diff --git a/.cursor/rules/.gitkeep b/.cursor/rules/.gitkeep new file mode 100644 index 00000000000..e69de29bb2d diff --git a/.cursor/rules/coding_conventions.mdc b/.cursor/rules/coding_conventions.mdc new file mode 100644 index 00000000000..fd454785f17 --- /dev/null +++ b/.cursor/rules/coding_conventions.mdc @@ -0,0 +1,6 @@ +--- +description: Coding Conventions +globs: *.go +alwaysApply: false +--- +@docs/agents/CodingConventions.md \ No newline at end of file diff --git a/.cursor/rules/core.mdc b/.cursor/rules/core.mdc new file mode 100644 index 00000000000..f20bcd2705d --- /dev/null +++ b/.cursor/rules/core.mdc @@ -0,0 +1,8 @@ +--- +description: Cursor Operational Doctrine +globs: +alwaysApply: true +--- +# Cursor Operational Doctrine + +@docs/agents/OperationalDoctrine.md \ No newline at end of file diff --git a/.cursor/rules/godocs.mdc b/.cursor/rules/godocs.mdc new file mode 100644 index 00000000000..04152e02229 --- /dev/null +++ b/.cursor/rules/godocs.mdc @@ -0,0 +1,6 @@ +--- +description: +globs: *.go +alwaysApply: false +--- +@docs/agents/GoDocs.md \ No newline at end of file diff --git a/.custom-gcl.yml b/.custom-gcl.yml new file mode 100644 index 00000000000..e78bdeb309d --- /dev/null +++ b/.custom-gcl.yml @@ -0,0 +1,12 @@ +# The version of golangci-lint used to build the custom binary +version: v1.63.4 + +# The name of the custom binary +name: custom-gcl + +# The directory path used to store the custom binary. +destination: ./tools/ + +plugins: + - module: 'github.com/onflow/flow-go' + path: ./tools/structwrite diff --git a/.dockerignore b/.dockerignore index 6b1dfb9d54b..5df7ecc1862 100644 --- a/.dockerignore +++ b/.dockerignore @@ -1,3 +1,7 @@ +# Do not copy the user's project git directory into the container +# it's not needed to build the project, and causes issues when working within git worktrees +.git/ + integration/localnet/bootstrap/ integration/localnet/profiler/ integration/localnet/data/ diff --git a/.gemini/settings.json b/.gemini/settings.json new file mode 100644 index 00000000000..9e1e6acc978 --- /dev/null +++ b/.gemini/settings.json @@ -0,0 +1 @@ +{ "contextFileName": "AGENTS.md" } \ No newline at end of file diff --git a/.github/stale.yml b/.github/stale.yml deleted file mode 100644 index a446dc86c50..00000000000 --- a/.github/stale.yml +++ /dev/null @@ -1,17 +0,0 @@ -# Number of days of inactivity before an issue becomes stale -daysUntilStale: 90 -# Number of days of inactivity before a stale issue is closed -daysUntilClose: 7 -# Issues with these labels will never be considered stale -exemptLabels: - - Preserve - - Idea -# Label to use when marking an issue as stale -staleLabel: Stale -# Comment to post when marking an issue as stale. Set to `false` to disable -markComment: > - This issue has been automatically marked as stale because it has not had - recent activity. It will be closed if no further activity occurs. Thank you - for your contributions. -# Comment to post when closing a stale issue. Set to `false` to disable -closeComment: false diff --git a/.github/workflows/actions/test-monitor-process-results/action.yml b/.github/workflows/actions/test-monitor-process-results/action.yml new file mode 100644 index 00000000000..7565e6ab20e --- /dev/null +++ b/.github/workflows/actions/test-monitor-process-results/action.yml @@ -0,0 +1,55 @@ +name: Test Monitor - Process Results + +description: Custom action that's used in multiple Flaky Test Monitor jobs to process test results and upload them to BigQuery + +inputs: + service_account: + description: 'The GCP Service Account' + required: true + workload_identity_provider: + description: 'The GCP Workload Identity Provider' + required: true + +runs: + using : "composite" + steps: + - name: Get commit date + id: commit_date + run: echo "::set-output name=date::$(git show --no-patch --no-notes --pretty='%cI' $COMMIT_SHA)" + shell: bash + + - name: Get job run date + id: job_run_date + run: echo "::set-output name=date::$(TZ=":UTC" date -Iseconds)" + shell: bash + + - name: Process test results + run: cat test-output | go run tools/test_monitor/level1/process_summary1_results.go + env: + JOB_STARTED: ${{ steps.job_run_date.outputs.date }} + COMMIT_DATE: ${{ steps.commit_date.outputs.date }} + shell: bash + + - name: Google auth + id: auth + uses: google-github-actions/auth@v2 + with: + service_account: ${{ inputs.service_account }} + token_format: 'access_token' + workload_identity_provider: ${{ inputs.workload_identity_provider }} + + - name: 'Set up Cloud SDK' + uses: 'google-github-actions/setup-gcloud@v2' + + - name: Upload results to BigQuery (skipped tests) + uses: nick-fields/retry@v3 + with: + timeout_minutes: 1 + max_attempts: 3 + command: bq load --source_format=NEWLINE_DELIMITED_JSON $BIGQUERY_DATASET.$BIGQUERY_TABLE $SKIPPED_TESTS_FILE tools/test_monitor/schemas/skipped_tests_schema.json + - name: Upload results to BigQuery (test run) + uses: nick-fields/retry@v3 + with: + timeout_minutes: 2 + max_attempts: 3 + command: bq load --source_format=NEWLINE_DELIMITED_JSON $BIGQUERY_DATASET.$BIGQUERY_TABLE2 $RESULTS_FILE tools/test_monitor/schemas/test_results_schema.json diff --git a/.github/workflows/bench.yml b/.github/workflows/bench.yml deleted file mode 100644 index ada29474be7..00000000000 --- a/.github/workflows/bench.yml +++ /dev/null @@ -1,99 +0,0 @@ -name: Benchstat -on: - pull_request: - branches: - - master - - "auto-cadence-upgrade/**" - - "feature/**" - - "v[0-9]+.[0-9]+" - paths: - - ".github/workflows/bench.yml" - - "fvm/**" - - "engine/execution/**" - - "go.sum" - -concurrency: - group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.run_id }} - cancel-in-progress: true - -jobs: - benchstat: - name: Performance regression check - runs-on: ubuntu-latest - # Check if the event is not triggered by a fork - # peter-evans/find-comment@v1 does not work on forks. - # see https://github.com/peter-evans/create-pull-request/blob/main/docs/concepts-guidelines.md#restrictions-on-repository-forks for details. - # Ideally we would like to still run the benchmark on forks, but we can't do that with the current setup. - if: github.event.pull_request.head.repo.full_name == github.repository - continue-on-error: true - steps: - - name: Set benchmark repetitions - # reducing repetition will speed up execution, - # but will be more inaccurate at detecting change - run: echo "::set-output name=benchmark_repetitions::7" - id: settings - - - name: Checkout - uses: actions/checkout@v3 - with: - fetch-depth: 0 - - - name: Setup go - uses: actions/setup-go@v3 - with: - go-version: "1.19" - cache: true - - - name: Build relic - run: make crypto_setup_gopath - - - name: Run benchmark on current branch - run: | - (for i in {1..${{ steps.settings.outputs.benchmark_repetitions }}}; do go test ./fvm ./engine/execution/computation --bench . --tags relic -shuffle=on --benchmem --run ^$; done) | tee new.txt - - - name: Checkout base branch - run: git checkout ${{ github.event.pull_request.base.sha }} - - - name: Run benchmark on base branch - run: | - (for i in {1..${{ steps.settings.outputs.benchmark_repetitions }}}; do go test ./fvm ./engine/execution/computation --bench . --tags relic -shuffle=on --benchmem --run ^$; done) | tee old.txt - - # see https://trstringer.com/github-actions-multiline-strings/ to see why this part is complex - - name: Use benchstat for comparison - run: | - export PATH=$PATH:$(go env GOPATH)/bin - go install golang.org/x/perf/cmd/benchstat@91a04616dc65ba76dbe9e5cf746b923b1402d303 - echo "BENCHSTAT<> $GITHUB_ENV - echo "$(benchstat -html -sort delta old.txt new.txt | sed '//d' | sed 's/<!doctype html>//g')" >> $GITHUB_ENV - echo "EOF" >> $GITHUB_ENV - - - name: Find existing comment on PR - uses: peter-evans/find-comment@v1 - id: fc - with: - issue-number: ${{ github.event.pull_request.number }} - comment-author: "github-actions[bot]" - body-includes: "## FVM [Benchstat](https://pkg.go.dev/golang.org/x/perf/cmd/benchstat) comparison" - - - name: Create or update comment - uses: peter-evans/create-or-update-comment@v1 - with: - comment-id: ${{ steps.fc.outputs.comment-id }} - issue-number: ${{ github.event.pull_request.number }} - body: | - ## FVM [Benchstat](https://pkg.go.dev/golang.org/x/perf/cmd/benchstat) comparison - - This branch with compared with the base branch ${{ github.event.pull_request.base.label }} commit ${{ github.event.pull_request.base.sha }} - - The command `(for i in {1..${{ steps.settings.outputs.benchmark_repetitions }}}; do go test ./fvm ./engine/execution/computation --bench . --tags relic -shuffle=on --benchmem --run ^$; done)` was used. - - <details> - <summary>Collapsed results for better readability</summary> - <p> - - ${{ env.BENCHSTAT }} - - </p> - </details> - - edit-mode: replace diff --git a/.github/workflows/builds.yml b/.github/workflows/builds.yml deleted file mode 100644 index 94120bdf62c..00000000000 --- a/.github/workflows/builds.yml +++ /dev/null @@ -1,118 +0,0 @@ -# This workflow is used to build and push one-off images for specific node types. This is useful -# when deploying hotfixes or any time a change is not needed for all node roles. -name: Build Node Docker Images - -on: - workflow_dispatch: - inputs: - tag: - type: string - description: 'Git tag/commit' - required: true - docker_tag: - type: string - description: 'Docker tag' - required: true - # GHA doesn't support multi-selects, so simulating it with one boolean for each option - build_access: - type: boolean - description: 'Access' - required: false - build_collection: - type: boolean - description: 'Collection' - required: false - build_consensus: - type: boolean - description: 'Consensus' - required: false - build_execution: - type: boolean - description: 'Execution' - required: false - build_verification: - type: boolean - description: 'Verification' - required: false - build_observer: - type: boolean - description: 'Observer' - required: false - include_without_netgo: - type: boolean - description: 'Build `without_netgo` images' - required: false - -jobs: - # matrix_builder generates a matrix that includes the roles selected in the input - matrix_builder: - name: Setup build jobs - runs-on: ubuntu-latest - outputs: - matrix: ${{ steps.generate.outputs.matrix }} - steps: - - id: generate - run: | - roles=() - if [[ "${{ inputs.build_access }}" = "true" ]]; then - roles+=( "access" ) - fi - if [[ "${{ inputs.build_collection }}" = "true" ]]; then - roles+=( "collection" ) - fi - if [[ "${{ inputs.build_consensus }}" = "true" ]]; then - roles+=( "consensus" ) - fi - if [[ "${{ inputs.build_execution }}" = "true" ]]; then - roles+=( "execution" ) - fi - if [[ "${{ inputs.build_verification }}" = "true" ]]; then - roles+=( "verification" ) - fi - if [[ "${{ inputs.build_observer }}" = "true" ]]; then - roles+=( "observer" ) - fi - rolesJSON=$(jq --compact-output --null-input '$ARGS.positional' --args -- "${roles[@]}") - echo "matrix={\"role\":$(echo $rolesJSON)}" >> $GITHUB_OUTPUT - docker-push: - name: ${{ matrix.role }} images - runs-on: ubuntu-latest - needs: matrix_builder - - # setup jobs for each role - strategy: - fail-fast: false - matrix: ${{ fromJson(needs.matrix_builder.outputs.matrix) }} - - steps: - - name: Setup Go - uses: actions/setup-go@v2 - with: - go-version: '1.19' - - name: Checkout repo - uses: actions/checkout@v2 - with: - ref: ${{ inputs.tag }} - # Provide Google Service Account credentials to Github Action, allowing interaction with the Google Container Registry - # Logging in as github-actions@dl-flow.iam.gserviceaccount.com - - name: Docker login - uses: docker/login-action@v1 - with: - registry: gcr.io - username: _json_key - password: ${{ secrets.GCR_SERVICE_KEY }} - - - name: Build/Push ${{ matrix.role }} images - env: - IMAGE_TAG: ${{ inputs.docker_tag }} - GITHUB_CREDS: "machine github.com login ${{ secrets.REPO_SYNC_USER }} password ${{ secrets.REPO_SYNC }}" - run: | - make docker-build-${{ matrix.role }} docker-push-${{ matrix.role }} - - - name: Build/Push ${{ matrix.role }} without_netgo images - if: ${{ inputs.include_without_netgo }} - env: - IMAGE_TAG: ${{ inputs.docker_tag }} - GITHUB_CREDS: "machine github.com login ${{ secrets.REPO_SYNC_USER }} password ${{ secrets.REPO_SYNC }}" - run: | - make docker-build-${{ matrix.role }}-without-netgo docker-push-${{ matrix.role }}-without-netgo diff --git a/.github/workflows/cd.yml b/.github/workflows/cd.yml deleted file mode 100644 index eb28e840078..00000000000 --- a/.github/workflows/cd.yml +++ /dev/null @@ -1,37 +0,0 @@ -name: CD - -on: - push: - tags: - - '*' - - "!daily-*" - -jobs: - docker-push: - name: Push to container registry - runs-on: ubuntu-latest - steps: - - name: Setup Go - uses: actions/setup-go@v2 - with: - go-version: '1.19' - - name: Checkout repo - uses: actions/checkout@v2 - - name: Build relic - run: make crypto_setup_gopath - # Provide Google Service Account credentials to Github Action, allowing interaction with the Google Container Registry - # Logging in as github-actions@dl-flow.iam.gserviceaccount.com - - name: Docker login - uses: docker/login-action@v1 - with: - registry: gcr.io - username: _json_key - password: ${{ secrets.GCR_SERVICE_KEY }} - - name: Docker build - run: | - make docker-build-flow - make docker-build-flow-without-netgo - - name: Docker push - run: | - make docker-push-flow - make docker-push-flow-without-netgo diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 08832eab401..57487842c47 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -14,89 +14,189 @@ on: - 'auto-cadence-upgrade/**' - 'feature/**' - 'v[0-9]+.[0-9]+' + merge_group: + branches: + - master env: - GO_VERSION: 1.19 + GO_VERSION: "1.25" -concurrency: +concurrency: group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.run_id }} cancel-in-progress: true jobs: + build-linter: + name: Build Custom Linter + runs-on: ubuntu-latest + steps: + - name: Check out code + uses: actions/checkout@v4 + - name: Set up Go + uses: actions/setup-go@v5 + with: + go-version: ${{ env.GO_VERSION }} + cache: true + - name: Cache custom linter binary + id: cache-linter + uses: actions/cache@v3 + with: + # Key should change whenever implementation (tools/structwrite), or compilation config (.custom-gcl.yml) changes + # When the key is different, it is a cache miss, and the custom linter binary is recompiled + # We include the SHA in the hash key because: + # - cache keys are branch/reference-scoped, with some exceptions (see https://docs.github.com/en/actions/writing-workflows/choosing-what-your-workflow-does/caching-dependencies-to-speed-up-workflows#restrictions-for-accessing-a-cache) + # - (we believe) cache keys for a repo share one namespace (sort of implied by https://docs.github.com/en/actions/writing-workflows/choosing-what-your-workflow-does/caching-dependencies-to-speed-up-workflows#matching-a-cache-key) + # - (we believe) the same cache being written by two different branches may cause contention, + # as a result of the shared namespace and branch-scoped permissions + key: custom-linter-${{ env.GO_VERSION }}-${{ runner.os }}-${{ hashFiles('.custom-gcl.yml', 'tools/structwrite/**') }}-${{ github.sha }} + # If a matching cache item from a different branch exists, and we have permission to access it, use it. + restore-keys: | + custom-linter-${{ env.GO_VERSION }}-${{ runner.os }}-${{ hashFiles('.custom-gcl.yml', 'tools/structwrite/**') }} + path: tools/custom-gcl # path defined in .custom-gcl.yml + lookup-only: 'true' # if already cached, don't download here + # We install the non-custom golangci-lint binary using the golangci-lint action. + # The action is set up to always install and run the linter - there isn't a way to only install. + # We provide args to disable all linters which results in the step immediately failing. + - name: Install golangci-lint + if: steps.cache-linter.outputs.cache-hit != 'true' + uses: golangci/golangci-lint-action@v6 + continue-on-error: true # after installation (what we care about), this step will fail - this line allows workflow to continue + with: + # Required: the version of golangci-lint is required and must be specified without patch version: we always use the latest patch version. + version: v1.63 + args: "--no-config --disable-all" # set args so that no linters are actually run + - name: Build custom linter binary + if: steps.cache-linter.outputs.cache-hit != 'true' + run: | + golangci-lint custom + golangci: strategy: fail-fast: false matrix: - dir: [./, ./integration/, ./crypto/, ./insecure/] + dir: [./, ./integration/, ./insecure/] name: Lint runs-on: ubuntu-latest + needs: build-linter # must wait for custom linter binary to be available steps: - name: Checkout repo - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Setup Go - uses: actions/setup-go@v3 + uses: actions/setup-go@v5 + timeout-minutes: 10 # fail fast. sometimes this step takes an extremely long time with: go-version: ${{ env.GO_VERSION }} cache: true - - name: Build relic - run: make crypto_setup_gopath + - name: Restore custom linter binary from cache + id: cache-linter + uses: actions/cache@v3 + with: + # See "Cache custom linter binary" job for information about the key structure + key: custom-linter-${{ env.GO_VERSION }}-${{ runner.os }}-${{ hashFiles('.custom-gcl.yml', 'tools/structwrite/**') }}-${{ github.sha }} + # If a matching cache item from a different branch exists, and we have permission to access it, use it. + restore-keys: | + custom-linter-${{ env.GO_VERSION }}-${{ runner.os }}-${{ hashFiles('.custom-gcl.yml', 'tools/structwrite/**') }} + path: tools/custom-gcl + # We are using the cache to share data between the build-linter job and the 3 lint jobs + # If there is a cache miss, it likely means either the build-linter job failed or the cache entry was evicted + # We expect this to happen very infrequently. If it does happen, the workflow needs to be manually retried. + fail-on-cache-miss: 'true' - name: Run go generate - run: go generate + run: go generate ./... working-directory: ${{ matrix.dir }} + # The golangci-lint action has a configuration where it searches for a binary named + # "golangci-lint" in the path rather than downloading. + # Below we rename our binary to this expected canonical name, and add it to the path. + - name: Rename custom linter binary + run: mv ./tools/custom-gcl ./tools/golangci-lint + - name: Make custom linter binary executable + run: chmod +x ./tools/golangci-lint + - name: Add custom linter binary to path + run: echo "$(pwd)/tools" >> $GITHUB_PATH - name: Run golangci-lint - uses: golangci/golangci-lint-action@v3 + uses: golangci/golangci-lint-action@v6 with: - # Required: the version of golangci-lint is required and must be specified without patch version: we always use the latest patch version. - version: v1.49 - args: -v --build-tags relic + install-mode: 'none' # looks for binary in path rather than downloading + args: "-v" working-directory: ${{ matrix.dir }} - # https://github.com/golangci/golangci-lint-action/issues/244 - skip-cache: true tidy: name: Tidy runs-on: ubuntu-latest steps: - name: Checkout repo - uses: actions/checkout@v3 + uses: actions/checkout@v4 + + - name: Setup private build environment + if: ${{ vars.PRIVATE_BUILDS_SUPPORTED == 'true' }} + uses: ./actions/private-setup + with: + cadence_deploy_key: ${{ secrets.CADENCE_DEPLOY_KEY }} + - name: Setup Go - uses: actions/setup-go@v3 + uses: actions/setup-go@v5 + timeout-minutes: 10 # fail fast. sometimes this step takes an extremely long time with: go-version: ${{ env.GO_VERSION }} cache: true - name: Run tidy run: make tidy - - name: Emulator no relic check - run: make emulator-norelic-check + - name: code sanity check + run: make code-sanity-check - shell-check: - name: ShellCheck + create-dynamic-test-matrix: + name: Create Dynamic Test Matrix runs-on: ubuntu-latest + outputs: + dynamic-matrix: ${{ steps.set-test-matrix.outputs.dynamicMatrix }} steps: - - name: Checkout repo - uses: actions/checkout@v3 - - name: Run ShellCheck - uses: ludeeus/action-shellcheck@203a3fd018dfe73f8ae7e3aa8da2c149a5f41c33 - with: - scandir: './crypto' - ignore: 'relic' + - name: Checkout repo + uses: actions/checkout@v4 + - name: Setup Go + uses: actions/setup-go@v5 + timeout-minutes: 10 # fail fast. sometimes this step takes an extremely long time + with: + go-version: ${{ env.GO_VERSION }} + cache: true + - name: Set Test Matrix + id: set-test-matrix + run: go run tools/test_matrix_generator/matrix.go - create-dynamic-test-matrix: - name: Create Dynamic Test Matrix + create-insecure-dynamic-test-matrix: + name: Create Dynamic Unit Test Insecure Package Matrix runs-on: ubuntu-latest outputs: dynamic-matrix: ${{ steps.set-test-matrix.outputs.dynamicMatrix }} steps: - name: Checkout repo - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Setup Go - uses: actions/setup-go@v3 + uses: actions/setup-go@v5 + timeout-minutes: 10 # fail fast. sometimes this step takes an extremely long time with: go-version: ${{ env.GO_VERSION }} cache: true - name: Set Test Matrix id: set-test-matrix - run: go run utils/test_matrix/test_matrix.go access admin cmd consensus engine fvm ledger module network utils + run: go run tools/test_matrix_generator/matrix.go -c insecure + + create-integration-dynamic-test-matrix: + name: Create Dynamic Integration Test Package Matrix + runs-on: ubuntu-latest + outputs: + dynamic-matrix: ${{ steps.set-test-matrix.outputs.dynamicMatrix }} + steps: + - name: Checkout repo + uses: actions/checkout@v4 + - name: Setup Go + uses: actions/setup-go@v5 + timeout-minutes: 10 # fail fast. sometimes this step takes an extremely long time + with: + go-version: ${{ env.GO_VERSION }} + cache: true + - name: Set Test Matrix + id: set-test-matrix + run: go run tools/test_matrix_generator/matrix.go -c integration unit-test: name: Unit Tests (${{ matrix.targets.name }}) @@ -105,156 +205,290 @@ jobs: fail-fast: false matrix: targets: ${{ fromJSON(needs.create-dynamic-test-matrix.outputs.dynamic-matrix)}} - # need to set image explicitly due to GitHub logging issue as described in https://github.com/onflow/flow-go/pull/3087#issuecomment-1234383202 - runs-on: ubuntu-20.04 + ## need to set image explicitly due to GitHub logging issue as described in https://github.com/onflow/flow-go/pull/3087#issuecomment-1234383202 + runs-on: ${{ matrix.targets.runner }} steps: - name: Checkout repo - uses: actions/checkout@v3 + uses: actions/checkout@v4 + + - name: Setup private build environment + if: ${{ vars.PRIVATE_BUILDS_SUPPORTED == 'true' }} + uses: ./actions/private-setup + with: + cadence_deploy_key: ${{ secrets.CADENCE_DEPLOY_KEY }} + - name: Setup Go - uses: actions/setup-go@v3 + uses: actions/setup-go@v5 + timeout-minutes: 10 # fail fast. sometimes this step takes an extremely long time with: go-version: ${{ env.GO_VERSION }} cache: true - - name: Setup tests (${{ matrix.targets.name }} + - name: Setup tests (${{ matrix.targets.name }}) run: VERBOSE=1 make -e GO_TEST_PACKAGES="${{ matrix.targets.packages }}" install-tools - name: Run tests (${{ matrix.targets.name }}) - uses: nick-fields/retry@v2 + uses: nick-fields/retry@v3 with: - timeout_minutes: 25 - max_attempts: 3 + timeout_minutes: 35 + max_attempts: 5 command: VERBOSE=1 make -e GO_TEST_PACKAGES="${{ matrix.targets.packages }}" test - # TODO(rbtz): re-enable when we fix exisiting races. #env: # RACE_DETECTOR: 1 - name: Upload coverage report - uses: codecov/codecov-action@v3 + uses: codecov/codecov-action@v5 + timeout-minutes: 1 + continue-on-error: true with: - file: ./coverage.txt + files: ./coverage.txt flags: unittests name: codecov-umbrella + token: ${{ secrets.CODECOV_TOKEN }} - unit-test-modules: - name: Unit Tests (Modules) + unit-test-insecure: + name: Unit Tests Insecure (${{ matrix.targets.name }}) + needs: create-insecure-dynamic-test-matrix strategy: fail-fast: false matrix: - include: - - name: crypto - make1: -C crypto setup - make2: unittest - retries: 1 - race: 1 - - name: insecure - make1: install-tools - make2: test - retries: 3 - race: 1 - - name: integration - make1: install-tools - make2: test - retries: 3 - race: 0 - runs-on: ubuntu-latest + targets: ${{ fromJSON(needs.create-insecure-dynamic-test-matrix.outputs.dynamic-matrix)}} + ## need to set image explicitly due to GitHub logging issue as described in https://github.com/onflow/flow-go/pull/3087#issuecomment-1234383202 + runs-on: ${{ matrix.targets.runner }} + steps: + - name: Checkout repo + uses: actions/checkout@v4 + + - name: Setup private build environment + if: ${{ vars.PRIVATE_BUILDS_SUPPORTED == 'true' }} + uses: ./actions/private-setup + with: + cadence_deploy_key: ${{ secrets.CADENCE_DEPLOY_KEY }} + + - name: Setup Go + uses: actions/setup-go@v5 + timeout-minutes: 10 # fail fast. sometimes this step takes an extremely long time + with: + go-version: ${{ env.GO_VERSION }} + cache: true + - name: Setup tests (${{ matrix.targets.name }}) + run: VERBOSE=1 make -e GO_TEST_PACKAGES="${{ matrix.targets.packages }}" install-tools + - name: Run tests (${{ matrix.targets.name }}) + uses: nick-fields/retry@v3 + with: + timeout_minutes: 35 + max_attempts: 5 + command: VERBOSE=1 make -C ./insecure -e GO_TEST_PACKAGES="${{ matrix.targets.packages }}" test + # TODO(rbtz): re-enable when we fix exisiting races. + #env: + # RACE_DETECTOR: 1 + - name: Upload coverage report + uses: codecov/codecov-action@v5 + timeout-minutes: 1 + continue-on-error: true + with: + files: ./coverage.txt + flags: unittests + name: codecov-umbrella + token: ${{ secrets.CODECOV_TOKEN }} + + docker-build: + name: Docker Build + runs-on: buildjet-16vcpu-ubuntu-2204 + env: + CADENCE_DEPLOY_KEY: ${{ secrets.CADENCE_DEPLOY_KEY }} steps: - name: Checkout repo - uses: actions/checkout@v3 + uses: actions/checkout@v4 + with: + # all tags are needed for integration tests + fetch-depth: 0 + + - name: Setup private build environment + if: ${{ vars.PRIVATE_BUILDS_SUPPORTED == 'true' }} + uses: ./actions/private-setup + with: + cadence_deploy_key: ${{ secrets.CADENCE_DEPLOY_KEY }} + - name: Setup Go - uses: actions/setup-go@v3 + uses: actions/setup-go@v5 + timeout-minutes: 10 # fail fast. sometimes this step takes an extremely long time with: go-version: ${{ env.GO_VERSION }} cache: true - - name: Setup tests (${{ matrix.name }}) - run: make ${{ matrix.make1 }} - - name: Run tests (${{ matrix.name }}) + + - name: Login to Docker Hub + uses: docker/login-action@v3 + with: + username: ${{ vars.DOCKERHUB_USERNAME }} + password: ${{ secrets.DOCKERHUB_TOKEN }} + if: ${{ (github.event_name == 'merge_group' || (github.event.pull_request && (github.event.pull_request.author_association == 'MEMBER' || github.event.pull_request.author_association == 'COLLABORATOR'))) }} + # this docker auth is exclusively for higher rate limits. continue unauthenticated if it fails + continue-on-error: true + + + - name: Docker build env: - RACE_DETECTOR: ${{ matrix.race }} - uses: nick-fields/retry@v2 + CADENCE_DEPLOY_KEY: ${{ secrets.CADENCE_DEPLOY_KEY }} + run: make docker-native-build-flow docker-native-build-flow-corrupt + - name: Save Docker images + run: | + docker save \ + gcr.io/flow-container-registry/access:latest \ + gcr.io/flow-container-registry/collection:latest \ + gcr.io/flow-container-registry/consensus:latest \ + gcr.io/flow-container-registry/execution:latest \ + gcr.io/flow-container-registry/ghost:latest \ + gcr.io/flow-container-registry/observer:latest \ + gcr.io/flow-container-registry/verification:latest \ + gcr.io/flow-container-registry/access-corrupted:latest \ + gcr.io/flow-container-registry/execution-corrupted:latest \ + gcr.io/flow-container-registry/verification-corrupted:latest > flow-docker-images.tar + - name: Cache Docker images + uses: actions/cache@v4 + with: + path: flow-docker-images.tar + # use the workflow run id as part of the cache key to ensure these docker images will only be used for a single workflow run + key: flow-docker-images-${{ hashFiles('**/Dockerfile') }}-${{ github.run_id }} + + integration-test-others: + name: Integration Tests Others (${{ matrix.targets.name }}) + needs: create-integration-dynamic-test-matrix + strategy: + fail-fast: false + matrix: + targets: ${{ fromJSON(needs.create-integration-dynamic-test-matrix.outputs.dynamic-matrix)}} + ## need to set image explicitly due to GitHub logging issue as described in https://github.com/onflow/flow-go/pull/3087#issuecomment-1234383202 + runs-on: ${{ matrix.targets.runner }} + + steps: + - name: Checkout repo + uses: actions/checkout@v4 + + - name: Setup private build environment + if: ${{ vars.PRIVATE_BUILDS_SUPPORTED == 'true' }} + uses: ./actions/private-setup with: - timeout_minutes: 25 - max_attempts: ${{ matrix.retries }} - # run `make2` target inside each module's root - command: VERBOSE=1 make -C ${{ matrix.name }} ${{ matrix.make2 }} + cadence_deploy_key: ${{ secrets.CADENCE_DEPLOY_KEY }} + + - name: Setup Go + uses: actions/setup-go@v5 + timeout-minutes: 10 # fail fast. sometimes this step takes an extremely long time + with: + go-version: ${{ env.GO_VERSION }} + cache: true + - name: Setup tests (${{ matrix.targets.name }}) + run: VERBOSE=1 make -e GO_TEST_PACKAGES="${{ matrix.targets.packages }}" install-tools + - name: Run tests (${{ matrix.targets.name }}) + uses: nick-fields/retry@v3 + with: + timeout_minutes: 35 + max_attempts: 5 + command: VERBOSE=1 make -C ./integration -e GO_TEST_PACKAGES="${{ matrix.targets.packages }}" test + # TODO(rbtz): re-enable when we fix exisiting races. + #env: + # RACE_DETECTOR: 1 - name: Upload coverage report - uses: codecov/codecov-action@v3 + uses: codecov/codecov-action@v5 + timeout-minutes: 1 + continue-on-error: true with: - file: ./coverage.txt + files: ./coverage.txt flags: unittests name: codecov-umbrella + token: ${{ secrets.CODECOV_TOKEN }} integration-test: name: Integration Tests + needs: docker-build strategy: fail-fast: false matrix: - make: - - make -C integration access-tests - - make -C integration bft-tests - - make -C integration collection-tests - - make -C integration consensus-tests - - make -C integration epochs-tests - - make -C integration execution-tests - - make -C integration ghost-tests - - make -C integration mvp-tests - - make -C integration network-tests - - make -C integration verification-tests - - make -C integration upgrades-tests - runs-on: ubuntu-latest + include: + - name: Access Cohort1 Integration Tests + make: make -C integration access-cohort1-tests + runner: buildjet-4vcpu-ubuntu-2204 + - name: Access Cohort2 Integration Tests + make: make -C integration access-cohort2-tests + runner: ubuntu-latest + - name: Access Cohort3 Integration Tests + make: make -C integration access-cohort3-tests + runner: ubuntu-latest + - name: Access Cohort4 Integration Tests + make: make -C integration access-cohort4-tests + runner: ubuntu-latest + # test suite has single test which is flaky and needs to be fixed - reminder here to put it back when it's fixed +# - name: BFT (Framework) Integration Tests +# make: make -C integration bft-framework-tests +# runner: ubuntu-latest + - name: BFT (Protocol) Integration Tests + make: make -C integration bft-protocol-tests + runner: buildjet-8vcpu-ubuntu-2204 + - name: BFT (Gossipsub) Integration Tests + make: make -C integration bft-gossipsub-tests + runner: ubuntu-latest + - name: Collection Integration Tests + make: make -C integration collection-tests + runner: ubuntu-latest + - name: Consensus Integration Tests + make: make -C integration consensus-tests + runner: ubuntu-latest + - name: Epoch Cohort1 Integration Tests + make: make -C integration epochs-cohort1-tests + runner: buildjet-8vcpu-ubuntu-2204 + - name: Epoch Cohort2 Integration Tests + make: make -C integration epochs-cohort2-tests + runner: buildjet-4vcpu-ubuntu-2204 + - name: Execution Integration Tests + make: make -C integration execution-tests + runner: ubuntu-latest + - name: Ghost Integration Tests + make: make -C integration ghost-tests + runner: ubuntu-latest + - name: MVP Integration Tests + make: make -C integration mvp-tests + runner: ubuntu-latest + - name: Network Integration Tests + make: make -C integration network-tests + runner: ubuntu-latest + - name: Verification Integration Tests + make: make -C integration verification-tests + runner: ubuntu-latest + - name: Upgrade Integration Tests + make: make -C integration upgrades-tests + runner: ubuntu-latest + runs-on: ${{ matrix.runner }} steps: - name: Checkout repo - uses: actions/checkout@v3 + uses: actions/checkout@v4 + with: + # all tags are needed for integration tests + fetch-depth: 0 + + - name: Setup private build environment + if: ${{ vars.PRIVATE_BUILDS_SUPPORTED == 'true' }} + uses: ./actions/private-setup + with: + cadence_deploy_key: ${{ secrets.CADENCE_DEPLOY_KEY }} + - name: Setup Go - uses: actions/setup-go@v3 + uses: actions/setup-go@v5 + timeout-minutes: 10 # fail fast. sometimes this step takes an extremely long time with: go-version: ${{ env.GO_VERSION }} cache: true - - name: Build relic - run: make crypto_setup_gopath - - name: Docker build - run: make docker-build-flow docker-build-flow-corrupt - - name: Run tests + - name: Load cached Docker images + uses: actions/cache@v4 + with: + path: flow-docker-images.tar + # use the same cache key as the docker-build job + key: flow-docker-images-${{ hashFiles('**/Dockerfile') }}-${{ github.run_id }} + - name: Load Docker images + run: docker load -i flow-docker-images.tar + - name: Run tests (${{ matrix.name }}) # TODO(rbtz): re-enable when we fix exisiting races. #env: # RACE_DETECTOR: 1 - uses: nick-fields/retry@v2 + uses: nick-fields/retry@v3 with: - timeout_minutes: 25 - max_attempts: 3 + timeout_minutes: 35 + max_attempts: 5 command: VERBOSE=1 ${{ matrix.make }} - - localnet-test: - name: Localnet Compatibility Tests With Flow-CLI Client and Observer - strategy: - fail-fast: false - runs-on: ubuntu-latest - steps: - - name: Checkout repo - uses: actions/checkout@v3 - - name: Setup Go - uses: actions/setup-go@v3 - with: - go-version: ${{ env.GO_VERSION }} - cache: true - - name: Build relic and other tools - run: make install-tools - - name: Install Flow Client In Docker - # This proved to be more reliable than installing it locally. - run: cd integration/localnet && sh client/client.sh - - name: Set up Localnet - run: bash -c 'cd integration/localnet/ && make -e OBSERVER=2 bootstrap && make start-flow' - - name: Ensure Observer is started - run: docker ps -f name=localnet_observer_1_1 | grep localnet_observer - - name: Get Client Version ensuring the client is provisioned - run: docker run --network host localnet-client /go/flow -f /go/flow-localnet.json -n observer version - - name: Wait for a default waiting period until a clean state - # This will not cause flakiness. - # The waiting time is a reasonable time to expect an observer to be responsive - run: sleep 10 - - name: Get Status ensuring the access endpoint is online - run: docker run --network host localnet-client /go/flow -f /go/flow-localnet.json -n access status - - name: Wait for finalized blocks and check them - run: docker run --network host localnet-client /go/flow -f /go/flow-localnet.json -n observer blocks get latest - - name: Wait for finalized blocks and check them with Observer - run: sleep 5 && docker run --network host localnet-client /go/flow -f /go/flow-localnet.json -n access blocks get latest && docker run --network host localnet-client /go/flow -f /go/flow-localnet.json -n observer blocks get latest - - name: Stop localnet - run: bash -c 'cd integration/localnet/ && make stop' diff --git a/.github/workflows/flaky-test-debug.yml b/.github/workflows/flaky-test-debug.yml deleted file mode 100644 index 3a5b47e2c2f..00000000000 --- a/.github/workflows/flaky-test-debug.yml +++ /dev/null @@ -1,225 +0,0 @@ -name: Flaky Test Debug - -on: - push: - branches: - - '**/*flaky-test-debug*' -env: - GO_VERSION: 1.19 - -#concurrency: -# group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.run_id }} -# cancel-in-progress: true - -jobs: - golangci: - strategy: - fail-fast: false - matrix: - dir: [./, ./integration/, ./crypto/, ./insecure/] - name: Lint - runs-on: ubuntu-latest - steps: - - name: Checkout repo - uses: actions/checkout@v3 - - name: Setup Go - uses: actions/setup-go@v3 - with: - go-version: ${{ env.GO_VERSION }} - cache: true - - name: Build relic - run: make crypto_setup_gopath - - name: Run go generate - run: go generate - working-directory: ${{ matrix.dir }} - - name: Run golangci-lint - uses: golangci/golangci-lint-action@v3 - with: - # Required: the version of golangci-lint is required and must be specified without patch version: we always use the latest patch version. - version: v1.49 - args: -v --build-tags relic - working-directory: ${{ matrix.dir }} - # https://github.com/golangci/golangci-lint-action/issues/244 - skip-cache: true - - tidy: - name: Tidy - runs-on: ubuntu-latest - steps: - - name: Checkout repo - uses: actions/checkout@v3 - - name: Setup Go - uses: actions/setup-go@v3 - with: - go-version: ${{ env.GO_VERSION }} - cache: true - - name: Run tidy - run: make tidy - - name: Emulator no relic check - run: make emulator-norelic-check - - # shell-check: - # name: ShellCheck - # runs-on: ubuntu-latest - # steps: - # - name: Checkout repo - # uses: actions/checkout@v3 - # - name: Run ShellCheck - # uses: ludeeus/action-shellcheck@203a3fd018dfe73f8ae7e3aa8da2c149a5f41c33 - # with: - # scandir: './crypto' - # ignore: 'relic' - - create-dynamic-test-matrix: - name: Create Dynamic Test Matrix - runs-on: ubuntu-latest - outputs: - dynamic-matrix: ${{ steps.set-test-matrix.outputs.dynamicMatrix }} - steps: - - name: Checkout repo - uses: actions/checkout@v3 - - name: Setup Go - uses: actions/setup-go@v3 - with: - go-version: ${{ env.GO_VERSION }} - cache: true - - name: Set Test Matrix - id: set-test-matrix - # modify which module to unit test by changing the argument to test_matrix.go - run: go run utils/test_matrix/test_matrix.go network - # run: go run utils/test_matrix/test_matrix.go access admin cmd consensus engine fvm ledger module network utils - - unit-test: - name: Unit Tests (${{ matrix.targets.name }}) - needs: create-dynamic-test-matrix - strategy: - fail-fast: false - matrix: - targets: ${{ fromJSON(needs.create-dynamic-test-matrix.outputs.dynamic-matrix)}} - # need to set image explicitly due to GitHub logging issue as described in https://github.com/onflow/flow-go/pull/3087#issuecomment-1234383202 - runs-on: ubuntu-20.04 - steps: - - name: Checkout repo - uses: actions/checkout@v3 - - name: Setup Go - uses: actions/setup-go@v3 - with: - go-version: ${{ env.GO_VERSION }} - cache: true - - name: Run tests (${{ matrix.targets.name }}) - if: github.actor != 'bors[bot]' - run: make -e GO_TEST_PACKAGES="${{ matrix.targets.packages }}" ci - # TODO(rbtz): re-enable when we fix exisiting races. - #env: - # RACE_DETECTOR: 1 - - name: Run tests (Bors) - if: github.actor == 'bors[bot]' - uses: nick-invision/retry@v2 - with: - timeout_minutes: 25 - max_attempts: 3 - command: make -e GO_TEST_PACKAGES="${{ matrix.targets.packages }}" ci - - name: Upload coverage report - uses: codecov/codecov-action@v3 - with: - file: ./coverage.txt - flags: unittests - name: codecov-umbrella - - # unit-test-modules: - # name: Unit Tests (Modules) - # strategy: - # fail-fast: false - # matrix: - # include: - # - name: crypto - # make1: -C crypto setup - # make2: unittest - # retries: 1 - # race: 1 - # - name: insecure - # make1: install-tools - # make2: test - # retries: 3 - # race: 1 - # - name: integration - # make1: install-tools - # make2: test - # retries: 3 - # race: 0 - # runs-on: ubuntu-latest - # steps: - # - name: Checkout repo - # uses: actions/checkout@v3 - # - name: Setup Go - # uses: actions/setup-go@v3 - # with: - # go-version: ${{ env.GO_VERSION }} - # cache: true - # - name: Run tests (${{ matrix.name }}) - # if: github.actor != 'bors[bot]' - # env: - # RACE_DETECTOR: ${{ matrix.race }} - # # run `make1` target before running `make2` target inside each module's root - # run: | - # make ${{ matrix.make1 }} - # make -C ${{ matrix.name }} ${{ matrix.make2 }} - # - name: Run tests (Bors) - # if: github.actor == 'bors[bot]' - # uses: nick-invision/retry@v2 - # with: - # timeout_minutes: 25 - # max_attempts: ${{ matrix.retries }} - # command: | - # make ${{ matrix.make1 }} - # make -C ${{ matrix.name }} ${{ matrix.make2 }} - # - name: Upload coverage report - # uses: codecov/codecov-action@v3 - # with: - # file: ./coverage.txt - # flags: unittests - # name: codecov-umbrella - - integration-test: - name: Integration Tests - strategy: - fail-fast: false - matrix: - # modify which integration module to test by adding / removing the matrix targets - make: -# - make -C integration access-tests -# - make -C integration bft-tests -# - make -C integration collection-tests -# - make -C integration consensus-tests - - make -C integration epochs-tests -# - make -C integration execution-tests -# - make -C integration ghost-tests -# - make -C integration mvp-tests -# - make -C integration network-tests -# - make -C integration verification-tests - runs-on: ubuntu-latest - steps: - - name: Checkout repo - uses: actions/checkout@v3 - - name: Setup Go - uses: actions/setup-go@v3 - with: - go-version: ${{ env.GO_VERSION }} - cache: true - - name: Build relic - run: make crypto_setup_gopath - - name: Docker build - run: make docker-build-flow docker-build-flow-corrupt - - name: Run tests - if: github.actor != 'bors[bot]' - run: VERBOSE=1 ${{ matrix.make }} - # TODO(rbtz): re-enable when we fix existing races. - #env: - # RACE_DETECTOR: 1 - - name: Run tests (Bors) - if: github.actor == 'bors[bot]' - uses: nick-invision/retry@v2 - with: - timeout_minutes: 15 - max_attempts: 2 - command: ${{ matrix.make }} diff --git a/.github/workflows/flaky-test-monitor.yml b/.github/workflows/flaky-test-monitor.yml new file mode 100644 index 00000000000..627762e4395 --- /dev/null +++ b/.github/workflows/flaky-test-monitor.yml @@ -0,0 +1,185 @@ +# This workflow runs all skipped (flaky) and non-skipped (regular) tests and generates a summary that's uploaded to BigQuery. + +name: Flaky Test Monitor + +on: + schedule: + - cron: '0 */2 * * *' # every 2 hours + push: + paths: + - '.github/workflows/flaky-test-monitor.yml' +permissions: + id-token: write + contents: read + +env: + GO_VERSION: "1.25" + BIGQUERY_DATASET: dev_src_flow_test_metrics + BIGQUERY_TABLE: skipped_tests + BIGQUERY_TABLE2: test_results + SKIPPED_TESTS_FILE: skipped-tests + RESULTS_FILE: test-results + COMMIT_SHA: ${{ github.sha }} + RUN_ID: ${{ github.run_id }} + JSON_OUTPUT: true + VERBOSE: true + TEST_FLAKY: true + +concurrency: + group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.run_id }} + cancel-in-progress: true + +jobs: + create-dynamic-test-matrix: + name: Create Dynamic Test Matrix + runs-on: ubuntu-latest + outputs: + dynamic-matrix: ${{ steps.set-test-matrix.outputs.dynamicMatrix }} + steps: + - name: Checkout repo + uses: actions/checkout@v4 + - name: Setup Go + uses: actions/setup-go@v5 + with: + go-version: ${{ env.GO_VERSION }} + cache: true + - name: Set Test Matrix + id: set-test-matrix + run: go run utils/test_matrix/test_matrix.go admin cmd consensus engine fvm ledger module network/test network/p2p utils + + unit-test: + name: Unit Tests (${{ matrix.targets.name }}) + needs: create-dynamic-test-matrix + strategy: + fail-fast: false + matrix: + targets: ${{ fromJSON(needs.create-dynamic-test-matrix.outputs.dynamic-matrix)}} + # need to set image explicitly due to GitHub logging issue as described in https://github.com/onflow/flow-go/pull/3087#issuecomment-1234383202 + runs-on: ubuntu-latest + steps: + - name: Checkout repo + uses: actions/checkout@v4 + - name: Setup Go + uses: actions/setup-go@v5 + with: + go-version: ${{ env.GO_VERSION }} + cache: true + - name: Setup tests (${{ matrix.targets.name }}) + run: make -e GO_TEST_PACKAGES="${{ matrix.targets.packages }}" install-tools + - name: Run tests (${{ matrix.targets.name }}) + run: make -es GO_TEST_PACKAGES="${{ matrix.targets.packages }}" unittest-main > test-output + timeout-minutes: 100 + # test run should continue even if there are failed tests + continue-on-error: true + - name: Process test results + env: + TEST_CATEGORY: unit + uses: ./.github/workflows/actions/test-monitor-process-results + with: + service_account: ${{ secrets.FLAKY_TEST_SERVICE_ACCOUNT }} + workload_identity_provider: ${{ secrets.FLAKY_TEST_WORKLOAD_IDENTITY_PROVIDER }} + + unit-test-modules: + name: Unit Tests (Modules) + strategy: + fail-fast: false + matrix: + include: + - name: crypto + setup: noop + race: 1 + test_category: unit-crypto + - name: insecure + setup: install-tools + race: 0 + test_category: unit-insecure + - name: integration + setup: install-tools + race: 0 + test_category: unit-integration + runs-on: ubuntu-latest + steps: + - name: Checkout repo + uses: actions/checkout@v4 + - name: Setup Go + uses: actions/setup-go@v5 + with: + go-version: ${{ env.GO_VERSION }} + cache: true + - name: Setup tests (${{ matrix.name }}) + run: make ${{ matrix.setup }} + - name: Run tests (${{ matrix.name }}) + env: + RACE_DETECTOR: ${{ matrix.race }} + run: make -es -C ${{ matrix.name }} test > test-output + timeout-minutes: 100 + continue-on-error: true + - name: Process test results (${{ matrix.name }}) + env: + TEST_CATEGORY: ${{ matrix.test_category }} + uses: ./.github/workflows/actions/test-monitor-process-results + with: + service_account: ${{ secrets.FLAKY_TEST_SERVICE_ACCOUNT }} + workload_identity_provider: ${{ secrets.FLAKY_TEST_WORKLOAD_IDENTITY_PROVIDER }} + + integration-test: + name: Integration Tests + strategy: + fail-fast: false + matrix: + include: + - target: access-tests + test_category: integration-access + - target: bft-protocol-tests + test_category: integration-bft-protocol + - target: bft-framework-tests + test_category: integration-bft-framework + - target: bft-gossipsub-tests + test_category: integration-bft-gossipsub + - target: collection-tests + test_category: integration-collection + - target: consensus-tests + test_category: integration-consensus + - target: epochs-cohort1-tests + test_category: integration-epochs + - target: epochs-cohort2-tests + test_category: integration-epochs + - target: execution-tests + test_category: integration-execution + - target: ghost-tests + test_category: integration-ghost + - target: mvp-tests + test_category: integration-mvp + - target: network-tests + test_category: integration-network + - target: verification-tests + test_category: integration-verification + - target: upgrades-tests + test_category: integration-upgrades + + runs-on: ubuntu-latest + steps: + - name: Checkout repo + uses: actions/checkout@v4 + with: + # all tags are needed for integration tests + fetch-depth: 0 + - name: Setup Go + uses: actions/setup-go@v5 + with: + go-version: ${{ env.GO_VERSION }} + cache: true + - name: Docker build + run: make docker-native-build-flow docker-native-build-flow-corrupt + - name: Run tests + run: make -es -C integration ${{ matrix.target }} > test-output + timeout-minutes: 100 + continue-on-error: true + - name: Process test results + env: + TEST_CATEGORY: ${{ matrix.test_category }} + uses: ./.github/workflows/actions/test-monitor-process-results + with: + service_account: ${{ secrets.FLAKY_TEST_SERVICE_ACCOUNT }} + workload_identity_provider: ${{ secrets.FLAKY_TEST_WORKLOAD_IDENTITY_PROVIDER }} + diff --git a/.github/workflows/image_builds.yml b/.github/workflows/image_builds.yml new file mode 100644 index 00000000000..acece5df001 --- /dev/null +++ b/.github/workflows/image_builds.yml @@ -0,0 +1,204 @@ +name: Build & Promote Docker Images to Public Registry +on: + workflow_dispatch: + inputs: + secure-build: + description: "Execute secure build for private dependencies. If set to true, the tag must be present on the private repo." + required: true + default: "false" + type: choice + options: + - "false" + - "true" + tag: + description: 'Git tag to build, tag, and push docker image' + required: true + type: string + +env: + GO_VERSION: "1.25" + PRIVATE_REGISTRY_HOST: us-central1-docker.pkg.dev + +jobs: + # This job is responsible for building docker images using flow-go and pushing them to the private registry. + # It uses a matrix strategy to handle the builds for different roles in parallel. + # The environment is set to 'container builds' that provides the necessary secrets for pushing to the pirvate registry. + public-build: + if: ${{ github.event.inputs.secure-build == 'false' }} + name: Execute public repo build & push to private artifact registry + runs-on: ubuntu-latest + strategy: + fail-fast: false + matrix: + # We specify all of the potential build commands for each role. + # This allows us to build and push all images in parallel, reducing the overall build time. + # The matrix is defined to include all roles & image types that we want to build and push. + # These commands are targets defined in the Makefile of the repository. + build_command: + # access Build Commands + - docker-build-access-with-adx docker-push-access-with-adx + - docker-build-access-without-adx docker-push-access-without-adx + - docker-build-access-without-netgo-without-adx docker-push-access-without-netgo-without-adx + - docker-cross-build-access-arm docker-push-access-arm + + # collection Build Commands + - docker-build-collection-with-adx docker-push-collection-with-adx + - docker-build-collection-without-adx docker-push-collection-without-adx + - docker-build-collection-without-netgo-without-adx docker-push-collection-without-netgo-without-adx + - docker-cross-build-collection-arm docker-push-collection-arm + + # consensus Build Commands + - docker-build-consensus-with-adx docker-push-consensus-with-adx + - docker-build-consensus-without-adx docker-push-consensus-without-adx + - docker-build-consensus-without-netgo-without-adx docker-push-consensus-without-netgo-without-adx + - docker-cross-build-consensus-arm docker-push-consensus-arm + + # execution Build Commands + - docker-build-execution-with-adx docker-push-execution-with-adx + - docker-build-execution-without-adx docker-push-execution-without-adx + - docker-build-execution-without-netgo-without-adx docker-push-execution-without-netgo-without-adx + - docker-cross-build-execution-arm docker-push-execution-arm + + # observer Build Commands + - docker-build-observer-with-adx docker-push-observer-with-adx + - docker-build-observer-without-adx docker-push-observer-without-adx + - docker-build-observer-without-netgo-without-adx docker-push-observer-without-netgo-without-adx + - docker-cross-build-observer-arm docker-push-observer-arm + + # verification Build Commands + - docker-build-verification-with-adx docker-push-verification-with-adx + - docker-build-verification-without-adx docker-push-verification-without-adx + - docker-build-verification-without-netgo-without-adx docker-push-verification-without-netgo-without-adx + - docker-cross-build-verification-arm docker-push-verification-arm + + environment: container builds + steps: + - name: Setup Go + uses: actions/setup-go@v4 + with: + go-version: ${{ env.GO_VERSION }} + + - name: Checkout Public flow-go repo + uses: actions/checkout@v3 + with: + fetch-depth: 0 + repository: onflow/flow-go + ref: ${{ inputs.tag }} + + - name: Authenticate with Docker Registry + uses: google-github-actions/auth@v1 + with: + credentials_json: ${{ secrets.GCP_CREDENTIALS_FOR_PRIVATE_REGISTRY }} + + - name: Setup Google Cloud Authentication + run: gcloud auth configure-docker ${{ env.PRIVATE_REGISTRY_HOST }} + + - name: Login to Docker Hub + uses: docker/login-action@v3 + with: + username: ${{ vars.DOCKERHUB_USERNAME }} + password: ${{ secrets.DOCKERHUB_TOKEN }} + + - name: Execute ${{ matrix.build_command }} command to build and push images + env: + IMAGE_TAG: ${{ inputs.tag }} + CONTAINER_REGISTRY: ${{ vars.PRIVATE_REGISTRY }} + run: | + make ${{ matrix.build_command }} CONTAINER_REGISTRY=${CONTAINER_REGISTRY} + + secure-build: + # This job is responsible for executing secure builds for private dependencies & pushing them to the private registry. + # It uses a matrix strategy to handle the builds for different roles in parallel. + # The environment is set to 'secure builds' to ensure that the builds are gated and only approved images are deployed. + # The job is triggered only if the 'secure-build' input is set to 'true'. + # The job uses an action to execute a cross-repo workflow that builds and pushes the images to the private registry. + name: Execute secure build & push to private registry + runs-on: ubuntu-latest + if: ${{ github.event.inputs.secure-build == 'true' }} + strategy: + fail-fast: false + matrix: + role: [access, collection, consensus, execution, observer, verification] + environment: secure builds + steps: + - uses: actions/create-github-app-token@v2 + id: app-token + with: + app-id: ${{ secrets.DEPLOYMENT_APP_ID }} + private-key: ${{ secrets.DEPLOYMENT_APP_PRIVATE_KEY }} + owner: ${{ github.repository_owner }} + + - uses: convictional/trigger-workflow-and-wait@v1.6.1 + with: + client_payload: '{"role": "${{ matrix.role }}", "tag": "${{ inputs.tag }}"}' + github_token: ${{ steps.app-token.outputs.token }} + owner: 'onflow' + repo: ${{ secrets.SECURE_BUILDS_REPO }} + ref: master-private + workflow_file_name: 'secure_build.yml' + + promote-to-partner-registry: + # This job promotes container images from the private registry to the partner registry. + # As of right now, the only role being promoted to the partner registry is 'access'. + # It uses a matrix strategy to handle the promotion of images for different roles in parallel. + # The environments defined for each role are used to gate the promotion process. + # This ensures that only approved images are deployed to the partner registry. + name: Promote Images to Partner Registry + runs-on: ubuntu-latest + needs: [public-build, secure-build] + # This job will only run if the previous jobs were successful and not cancelled. + # It checks the results of both the public and secure builds to ensure that at least one of them succeeded. + if: | + ${{ !cancelled() }} && + ${{ needs.public-build.result != 'failure' || needs.secure-build.result != 'failure' }} + strategy: + fail-fast: false + matrix: + role: [access] + environment: ${{ matrix.role }} image promotion to partner registry + steps: + - name: Checkout repo + uses: actions/checkout@v3 + + - name: Promote ${{ matrix.role }} + uses: ./actions/promote-images + with: + gcp_credentials: ${{ secrets.PARTNER_REGISTRY_PROMOTION_SECRET }} + private_registry: ${{ vars.PRIVATE_REGISTRY }} + private_registry_host: ${{ env.PRIVATE_REGISTRY_HOST }} + promotion_registry: ${{ vars.PARTNER_REGISTRY }} + role: ${{ matrix.role }} + tags: "${{ inputs.tag }},${{ inputs.tag }}-without-adx,${{ inputs.tag }}-without-netgo-without-adx,${{ inputs.tag }}-arm" + + promote-to-public-registry: + # This job promotes container images for various roles from a private registry to a public registry. + # It uses a matrix strategy to handle the promotion of images for different roles in parallel. + # The environments defined for each role are used to gate the promotion process. + # This ensures that only approved images are deployed to the public registry. + name: Promote Images to Public Registry + runs-on: ubuntu-latest + needs: [public-build, secure-build] + # This job will only run if the previous jobs were successful and not cancelled. + # It checks the results of both the public and secure builds to ensure that at least one of them succeeded. + if: | + ${{ !cancelled() }} && + ${{ needs.public-build.result != 'failure' || needs.secure-build.result != 'failure' }} + strategy: + fail-fast: false + matrix: + role: [access, collection, consensus, execution, observer, verification] + environment: ${{ matrix.role }} image promotion to public registry + steps: + - name: Checkout repo + uses: actions/checkout@v3 + + - name: Promote ${{ matrix.role }} + uses: ./actions/promote-images + with: + gcp_credentials: ${{ secrets.PUBLIC_REGISTRY_PROMOTION_SECRET }} + private_registry: ${{ vars.PRIVATE_REGISTRY }} + private_registry_host: ${{ env.PRIVATE_REGISTRY_HOST }} + promotion_registry: ${{ vars.PUBLIC_REGISTRY }} + role: ${{ matrix.role }} + tags: "${{ inputs.tag }},${{ inputs.tag }}-without-adx,${{ inputs.tag }}-without-netgo-without-adx,${{ inputs.tag }}-arm" + diff --git a/.github/workflows/semver-tags.yaml b/.github/workflows/semver-tags.yaml new file mode 100644 index 00000000000..41ebe625744 --- /dev/null +++ b/.github/workflows/semver-tags.yaml @@ -0,0 +1,22 @@ +name: Verify Tag + +on: + push: + tags: + - '*' + +jobs: + SemVer-Check: + runs-on: ubuntu-latest + steps: + - name: Check if tag is SemVer compliant + # the tag should be in semver format, but can optionally be prepended by "any_text_with_slashes/" and "v" + # valid examples crypto/v0.24.5-fvm, tools/flaky_test_monitor/v0.23.5, v0.23.5, 0.23.5-fvm + run: | + TAG_NAME=${GITHUB_REF#refs/tags/} + if [[ "${TAG_NAME}" =~ ^(.+\/)*v?(0|[1-9][0-9]*)\.(0|[1-9][0-9]*)\.(0|[1-9][0-9]*)(-((0|[1-9][0-9]*|[0-9]*[a-zA-Z-][0-9a-zA-Z-]*)(\.(0|[1-9][0-9]*|[0-9]*[a-zA-Z-][0-9a-zA-Z-]*))*))?(\+([0-9a-zA-Z-]+(\.[0-9a-zA-Z-]+)*))?$ ]]; then + echo "Tag $TAG_NAME is SemVer compliant" + else + echo "Tag $TAG_NAME is not SemVer compliant" + exit 1 + fi diff --git a/.github/workflows/stale.yml b/.github/workflows/stale.yml new file mode 100644 index 00000000000..363e079a0bb --- /dev/null +++ b/.github/workflows/stale.yml @@ -0,0 +1,20 @@ +name: 'Mark and close stale issues' +on: + schedule: + - cron: '30 1 * * *' + +jobs: + stale: + runs-on: ubuntu-latest + steps: + - uses: actions/stale@v10 + with: + days-before-issue-stale: 90 + days-before-issue-close: 7 + exempt-issue-labels: 'Preserve,Idea' + exempt-pr-labels: 'Preserve' + stale-issue-label: 'Stale' + stale-issue-message: > + This issue has been automatically marked as stale because it has not had + recent activity. It will be closed if no further activity occurs. Thank you + for your contributions. diff --git a/.github/workflows/test-monitor-flaky.yml b/.github/workflows/test-monitor-flaky.yml deleted file mode 100644 index fcf215b734e..00000000000 --- a/.github/workflows/test-monitor-flaky.yml +++ /dev/null @@ -1,87 +0,0 @@ -# This workflow runs ALL tests, including all tests that are skipped because they are flaky, as well as all the normal (non-skipped) tests. -# This workflow is run less frequently because running flaky tests is problematic and causes errors. - -name: Test Monitor - Flaky - -on: - schedule: - - cron: '0 */12 * * *' # every 12 hours - push: - paths: - - 'tools/test_monitor/**' - -env: - BIGQUERY_DATASET: production_src_flow_test_metrics - BIGQUERY_TABLE: test_results - GO_VERSION: 1.19 - -concurrency: - group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.run_id }} - cancel-in-progress: true - -jobs: - flaky-test-run: - name: Test Monitor - Flaky Tests Run - strategy: - fail-fast: false - matrix: - test-category: - - unit - - unit-crypto - - unit-insecure - - unit-integration - - integration-bft - - integration-mvp - - integration-ghost - - integration-network - - integration-epochs - - integration-access - - integration-collection - - integration-consensus - - integration-execution - - integration-verification - env: - TEST_CATEGORY: ${{ matrix.test-category }} - COMMIT_SHA: ${{ github.sha }} - RUN_ID: ${{ github.run_id }} - RESULTS_FILE: test-results - runs-on: ubuntu-latest - steps: - - name: Get job run date - id: job_run_date - run: echo "::set-output name=date::$(TZ=":America/Los_Angeles" date -Iseconds)" - - name: Set up Cloud SDK - uses: google-github-actions/setup-gcloud@v0 - with: - service_account_key: ${{ secrets.GCP_SA_KEY }} - - name: Setup Go - uses: actions/setup-go@v2 - with: - go-version: ${{ env.GO_VERSION }} - - name: Checkout repo - uses: actions/checkout@v2 - with: - ref: ${{ env.COMMIT_SHA }} - - name: Get commit date - id: commit_date - run: echo "::set-output name=date::$(git show --no-patch --no-notes --pretty='%cI' $COMMIT_SHA)" - - name: Run tests - continue-on-error: true - run: ./tools/test_monitor/run-tests.sh - env: - TEST_FLAKY: true - JSON_OUTPUT: true - RACE_DETECTOR: 1 - - name: Print test results - run: cat test-output - - name: Process test results - run: cat test-output | go run tools/test_monitor/level1/process_summary1_results.go - env: - JOB_STARTED: ${{ steps.job_run_date.outputs.date }} - COMMIT_DATE: ${{ steps.commit_date.outputs.date }} - - name: Upload results to BigQuery - uses: nick-fields/retry@v2 - with: - timeout_minutes: 2 - max_attempts: 3 - command: bq load --source_format=NEWLINE_DELIMITED_JSON $BIGQUERY_DATASET.$BIGQUERY_TABLE $RESULTS_FILE tools/test_monitor/schemas/test_results_schema.json diff --git a/.github/workflows/test-monitor-regular-skipped.yml b/.github/workflows/test-monitor-regular-skipped.yml deleted file mode 100644 index 74736a00431..00000000000 --- a/.github/workflows/test-monitor-regular-skipped.yml +++ /dev/null @@ -1,97 +0,0 @@ -# This workflow -# 1) runs all non-skipped (regular) tests and generates a summary. -# 2) generates a report of all skipped tests (e.g. due to flakiness). - -name: Test Monitor - Regular and Skipped - -on: - schedule: - - cron: '0 */2 * * *' # every 2 hours - push: - paths: - - 'tools/test_monitor/**' - -env: - BIGQUERY_DATASET: production_src_flow_test_metrics - BIGQUERY_TABLE: skipped_tests - BIGQUERY_TABLE2: test_results - GO_VERSION: 1.19 - -concurrency: - group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.run_id }} - cancel-in-progress: true - -jobs: - regular-skipped-test-run: - name: Test Monitor - Regular, Skipped Tests Run - strategy: - fail-fast: false - matrix: - test-category: - - unit - - unit-crypto - - unit-insecure - - unit-integration - - integration-bft - - integration-mvp - - integration-ghost - - integration-network - - integration-epochs - - integration-access - - integration-collection - - integration-consensus - - integration-execution - - integration-verification - env: - TEST_CATEGORY: ${{ matrix.test-category }} - COMMIT_SHA: ${{ github.sha }} - RUN_ID: ${{ github.run_id }} - SKIPPED_TESTS_FILE: skipped-tests - RESULTS_FILE: test-results - runs-on: ubuntu-latest - steps: - - name: Get job run date - id: job_run_date - run: echo "::set-output name=date::$(TZ=":America/Los_Angeles" date -Iseconds)" - - name: Set up Cloud SDK - uses: google-github-actions/setup-gcloud@v0 - with: - service_account_key: ${{ secrets.GCP_SA_KEY }} - - name: Setup Go - uses: actions/setup-go@v2 - with: - go-version: ${{ env.GO_VERSION }} - - name: Checkout repo - uses: actions/checkout@v2 - with: - ref: ${{ env.COMMIT_SHA }} - - name: Get commit date - id: commit_date - run: echo "::set-output name=date::$(git show --no-patch --no-notes --pretty='%cI' $COMMIT_SHA)" - - name: Run tests - uses: nick-fields/retry@v2 - with: - timeout_minutes: 60 - max_attempts: 5 - command: ./tools/test_monitor/run-tests.sh - env: - JSON_OUTPUT: true - - name: Print test results - run: cat test-output - - name: Process test results - run: cat test-output | go run tools/test_monitor/level1/process_summary1_results.go - env: - JOB_STARTED: ${{ steps.job_run_date.outputs.date }} - COMMIT_DATE: ${{ steps.commit_date.outputs.date }} - - name: Upload results to BigQuery (skipped tests) - uses: nick-fields/retry@v2 - with: - timeout_minutes: 1 - max_attempts: 3 - command: bq load --source_format=NEWLINE_DELIMITED_JSON $BIGQUERY_DATASET.$BIGQUERY_TABLE $SKIPPED_TESTS_FILE tools/test_monitor/schemas/skipped_tests_schema.json - - name: Upload results to BigQuery (test run) - uses: nick-fields/retry@v2 - with: - timeout_minutes: 2 - max_attempts: 3 - command: bq load --source_format=NEWLINE_DELIMITED_JSON $BIGQUERY_DATASET.$BIGQUERY_TABLE2 $RESULTS_FILE tools/test_monitor/schemas/test_results_schema.json diff --git a/.github/workflows/tools.yml b/.github/workflows/tools.yml index 2e297adb6ff..9bca236ceda 100644 --- a/.github/workflows/tools.yml +++ b/.github/workflows/tools.yml @@ -13,46 +13,64 @@ on: required: false type: boolean +env: + GO_VERSION: "1.25" + jobs: build-publish: name: Build boot tools runs-on: ubuntu-latest steps: + - name: Print all input variables + run: echo '${{ toJson(inputs) }}' | jq - id: auth uses: google-github-actions/auth@v1 with: - credentials_json: ${{ secrets.GCR_SERVICE_KEY }} # TODO: we need a new key to allow uploads - - name: Setup Go - uses: actions/setup-go@v2 - with: - go-version: '1.19' + credentials_json: ${{ secrets.GCR_SERVICE_KEY }} - name: Set up Google Cloud SDK uses: google-github-actions/setup-gcloud@v1 with: project_id: flow + - name: Setup Go + uses: actions/setup-go@v5 + with: + go-version: ${{ env.GO_VERSION }} - name: Checkout repo - uses: actions/checkout@v2 + uses: actions/checkout@v4 with: + # to accurately get the version tag + fetch-depth: 0 ref: ${{ inputs.tag }} - - name: Build relic - run: make crypto_setup_gopath - name: Build and upload boot-tools + env: + CADENCE_DEPLOY_KEY: ${{ secrets.CADENCE_DEPLOY_KEY }} run: | make tool-bootstrap tool-transit mkdir boot-tools mv bootstrap transit boot-tools/ + sha256sum boot-tools/bootstrap > boot-tools/bootstrap.sha256sum + cat boot-tools/bootstrap.sha256sum + sha256sum boot-tools/transit > boot-tools/transit.sha256sum + cat boot-tools/transit.sha256sum tar -czf boot-tools.tar ./boot-tools/ gsutil cp boot-tools.tar gs://flow-genesis-bootstrap/tools/${{ inputs.tag }}/boot-tools.tar - name: Build and upload util + env: + CADENCE_DEPLOY_KEY: ${{ secrets.CADENCE_DEPLOY_KEY }} run: | make tool-util - tar -czf util.tar util + sha256sum util > util.sha256sum + cat util.sha256sum + tar -czf util.tar util util.sha256sum gsutil cp util.tar gs://flow-genesis-bootstrap/tools/${{ inputs.tag }}/util.tar - name: Promote boot-tools run: | if [[ "${{ inputs.promote }}" = true ]]; then echo "promoting boot-tools.tar" gsutil cp boot-tools.tar gs://flow-genesis-bootstrap/boot-tools.tar + SUMMARY=$'# Tool Build and Upload Summary \n Your tools were uploaded to the following GCS objects \n * Boot Tools gs://flow-genesis-bootstrap/boot-tools.tar \n * Util util.tar gs://flow-genesis-bootstrap/tools/${{ inputs.tag }}/util.tar' else echo "not promoting boot-tools.tar" + SUMMARY=$'# Tool Build and Upload Summary \n Your tools were uploaded to the following GCS objects \n * Boot Tools gs://flow-genesis-bootstrap/tools/${{ inputs.tag }}/boot-tools.tar \n * Util util.tar gs://flow-genesis-bootstrap/tools/${{ inputs.tag }}/util.tar' fi + echo "$SUMMARY" >> $GITHUB_STEP_SUMMARY diff --git a/.gitignore b/.gitignore index 472cc944ee4..f1a940e2ecc 100644 --- a/.gitignore +++ b/.gitignore @@ -3,12 +3,11 @@ /cmd/consensus/consensus /cmd/execution/execution /cmd/verification/verification -/cmd/testclient/testclient /cmd/util/util /cmd/bootstrap/bootstrap -# crypto relic folder -crypto/relic/ +# Test ouput of bootstrapping CLI +cmd/bootstrap/bootstrap-example # Test binary, build with `go test -c` *.test @@ -44,6 +43,12 @@ flowdb .idea .vscode *.code-workspace +# ignore all files in the .cursor directory, except for the rules directory +.cursor/* +!.cursor/rules/ +!.cursor/rules/* +# do ignore rules files matching private-* +.cursor/rules/private-* git language/tools/vscode-extension/cadence-*.vsix language/tools/vscode-extension/out/* @@ -68,6 +73,11 @@ go.work.sum # Ledger checkpoint status files **/checkpoint_status.json +**/export_report.json # Local testing result files tps-results*.json + +# Custom golangcilint build +custom-gcl +tools/custom-gcl diff --git a/.golangci.yml b/.golangci.yml index 0b984c2ac90..d718c52eb68 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -3,9 +3,21 @@ run: linters-settings: goimports: + # enforced by linter # put imports beginning with prefix after 3rd-party packages; # it's a comma-separated list of prefixes - local-prefixes: github.com/onflow/flow-go + local-prefixes: github.com/onflow/flow-go/ + + # used to generate canonical import ordering + # not enforced by linter + gci: + sections: + - standard # 1. standard library + - default # 2. external packages + - prefix(github.com/onflow/) # 3. org packages + - prefix(github.com/onflow/flow-go/) # 4. project packages + skip-generated: true + custom-order: true gosec: # To select a subset of rules to run. @@ -18,19 +30,43 @@ linters-settings: - G503 - G505 + staticcheck: + # Disable SA1019 to allow use of deprecated label + checks: ["all", "-SA1019"] + + custom: + structwrite: + type: module + description: "disallow struct field writes outside constructor" + original-url: "github.com/onflow/flow-go/tools/structwrite" + linters: enable: - goimports - gosec + - structwrite issues: exclude-rules: - path: _test\.go # disable some linters on test files linters: - unused + - structwrite + - path: 'consensus/hotstuff/helper/*' # disable some linters on test helper files + linters: + - structwrite + - path: 'utils/unittest/*' # disable some linters on test files + linters: + - structwrite + - path: 'consensus/hotstuff/helper/*' # disable some linters on test files + linters: + - structwrite + - path: 'engine/execution/testutil/*' # disable some linters on test files + linters: + - structwrite # typecheck currently not handling the way we do function inheritance well # disabling for now - - path: 'cmd/access/node_build/*' + - path: 'cmd/access/node_build/*' linters: - typecheck - path: 'cmd/observer/node_builder/*' diff --git a/.mockery.yaml b/.mockery.yaml new file mode 100644 index 00000000000..a99b7c9e56d --- /dev/null +++ b/.mockery.yaml @@ -0,0 +1,113 @@ +dir: "{{.InterfaceDir}}/mock" +outpkg: "mock" +filename: "{{.InterfaceName | snakecase}}.go" +mockname: "{{.InterfaceName}}" + +all: True +with-expecter: False +include-auto-generated: False +disable-func-mocks: True +fail-on-missing: True + +# Suppress warnings +issue-845-fix: True +disable-version-string: True +resolve-type-alias: False + +packages: + github.com/onflow/flow-go/access: + github.com/onflow/flow-go/access/validator: + github.com/onflow/flow-go/cmd/util/ledger/reporters: + github.com/onflow/flow-go/consensus/hotstuff: + config: + dir: "consensus/hotstuff/mocks" + outpkg: "mocks" + github.com/onflow/flow-go/engine/access/ingestion/tx_error_messages: + github.com/onflow/flow-go/engine/access/rest/common/models: + github.com/onflow/flow-go/engine/access/rest/websockets: + github.com/onflow/flow-go/engine/access/rest/websockets/data_providers: + github.com/onflow/flow-go/engine/access/rpc/backend/accounts/provider: + github.com/onflow/flow-go/engine/access/rpc/backend/events/provider: + github.com/onflow/flow-go/engine/access/rpc/backend/node_communicator: + github.com/onflow/flow-go/engine/access/rpc/backend/transactions/error_messages: + github.com/onflow/flow-go/engine/access/rpc/backend/transactions/provider: + github.com/onflow/flow-go/engine/access/rpc/backend/transactions/retrier: + github.com/onflow/flow-go/engine/access/rpc/connection: + github.com/onflow/flow-go/engine/access/state_stream: + github.com/onflow/flow-go/engine/access/subscription: + github.com/onflow/flow-go/engine/access/subscription/tracker: + github.com/onflow/flow-go/engine/access/wrapper: + config: + dir: "engine/access/mock" + github.com/onflow/flow-go/engine/collection: + github.com/onflow/flow-go/engine/collection/epochmgr: + github.com/onflow/flow-go/engine/collection/rpc: + github.com/onflow/flow-go/engine/common/follower: + interfaces: + complianceCore: + config: + exported: true + mockname: "{{.InterfaceName | firstUpper}}" + github.com/onflow/flow-go/engine/common/follower/cache: + github.com/onflow/flow-go/engine/consensus: + github.com/onflow/flow-go/engine/consensus/approvals: + github.com/onflow/flow-go/engine/execution: + github.com/onflow/flow-go/engine/execution/computation: + github.com/onflow/flow-go/engine/execution/computation/computer: + github.com/onflow/flow-go/engine/execution/computation/query: + github.com/onflow/flow-go/engine/execution/ingestion/uploader: + github.com/onflow/flow-go/engine/execution/provider: + github.com/onflow/flow-go/engine/execution/state: + github.com/onflow/flow-go/engine/protocol: + github.com/onflow/flow-go/engine/verification/fetcher: + github.com/onflow/flow-go/fvm: + github.com/onflow/flow-go/fvm/environment: + github.com/onflow/flow-go/fvm/storage/snapshot: + github.com/onflow/flow-go/insecure: + github.com/onflow/flow-go/ledger: + github.com/onflow/flow-go/model/fingerprint: + github.com/onflow/flow-go/module: + github.com/onflow/flow-go/module/component: + github.com/onflow/flow-go/module/execution: + github.com/onflow/flow-go/module/executiondatasync/execution_data: + github.com/onflow/flow-go/module/executiondatasync/optimistic_sync: + config: + all: False + interfaces: + Core: + github.com/onflow/flow-go/module/executiondatasync/tracker: + github.com/onflow/flow-go/module/forest: + github.com/onflow/flow-go/module/mempool: + github.com/onflow/flow-go/module/mempool/consensus/mock_interfaces: + config: + dir: "module/mempool/consensus/mock" + github.com/onflow/flow-go/module/state_synchronization: + github.com/onflow/flow-go/module/state_synchronization/requester: + github.com/onflow/flow-go/network: + github.com/onflow/flow-go/network/alsp: + github.com/onflow/flow-go/network/p2p: + github.com/onflow/flow-go/state/cluster: + github.com/onflow/flow-go/state/protocol: + github.com/onflow/flow-go/state/protocol/events: + github.com/onflow/flow-go/state/protocol/protocol_state: + github.com/onflow/flow-go/state/protocol/protocol_state/mock_interfaces: + config: + dir: "state/protocol/protocol_state/mock" + github.com/onflow/flow-go/state/protocol/protocol_state/epochs: + github.com/onflow/flow-go/state/protocol/protocol_state/epochs/mock_interfaces: + config: + dir: "state/protocol/protocol_state/epochs/mock" + github.com/onflow/flow-go/storage: + + # external libraries + github.com/onflow/flow-go-sdk/access: + config: + dir: "integration/benchmark/mock" + interfaces: + Client: + + github.com/onflow/crypto: + config: + dir: "module/mock" + interfaces: + PublicKey: diff --git a/AGENTS.md b/AGENTS.md new file mode 100644 index 00000000000..466257e0103 --- /dev/null +++ b/AGENTS.md @@ -0,0 +1,169 @@ +# AGENTS.md + +This file provides guidance to AI Agents when working with code in this repository. + +## Agents Docs + +- Coding Conventions: @docs/agents/CodingConventions.md +- Agents Directive: @docs/agents/OperationalDoctrine.md +- GoDocs: @docs/agents/GoDocs.md + +## Behavior Guidance + +### Git Operations + +- By default, use origin/master as the base for new branches. +- Create branches using the following naming scheme: + ``` + [user]/[github issue number]-[brief description in kebab-case] + ``` +- Git commit messages should be a brief single line message. + +### Github Operations + +#### Creating PRs +- if there is an associated issue AND the PR will complete the issue, add a "closes" statement as the first line in the description + e.g. `Closes: #[Issue Number] +- Include a high level overview of the problem and changes in the PR. Be concise. DO NOT add tons of unnecessary detail or boilerplate. +- Check available labels and suggest any that seem appropriate. Ask user to confirm. +- If the branch was created from a branch other than master, update the base branch used for the PR to the correct branch. + +#### Creating or Commenting on Issues +- Include a note that the message was produced in collaboration with [your agent name - e.g. claude, gemini, cursor, etc]. + +### Answering Questions +- When asked a question, consider the answer and perform any exploration of the codebase required to provide quality answer. +- DO NOT attempt to write or modify code. Simply answer the question. + +### Communication +- Be direct and straight forward. +- DO NO be overly dramatic or jump to conclusions. e.g. don't say "Critical Memory Safety Issue Found" unless you are certain that is true. If you are not certain, then frame it "Potential Memory Issue Found". +- DO NOT be sycophantic or use unnecessary flattery. Avoid phrases like "You're absolutely right". + +## Development Commands + +### Building and Testing +- `make test` - Run unit test suite +- `make integration-test` - Run integration test suite (requires Docker) +- `make docker-native-build-flow` - Build Docker image for all node types +- `make docker-native-build-$ROLE` - Build Docker image for specific node role (collection, consensus, access, execution, verification) +- `make docker-native-build-access-binary` - Build native access node binary + +### Code Quality and Generation +- `make lint` - Run linter (includes tidy and custom checks) +- `make fix-new` - Run linter for files changed since master +- `make fix-lint` - Automatically fix linting issues +- `make fix-lint-new` - Automatically fix linting issues for files changed since master +- `make fix-imports` - Automatically fix imports +- `make fix-imports-new` - Automatically fix imports for files changed since master +- `make vet` - Run go vet +- `make vet-new` - Run go vet for files changed since master +- `make generate` - Run all code generators (proto, mocks, fvm wrappers) +- `make generate-mocks` - Generate mocks for unit tests +- `make generate-proto` - Generate protobuf stubs +- `make tidy` - Run go mod tidy + +`lint`, `vet`, `fix-lint`, and `fix-imports` support passing `LINT_PATH`, which sets the path used by golangci-lint +- `make lint -e LINT_PATH=./path/to/lint/...` - Run linter for a specific module + +### Dependency Management +- `make install-tools` - Install all required development tools +- `make install-mock-generators` - Install mock generation tools + +## Architecture Overview + +Flow is a multi-node blockchain protocol implementing a byzantine fault-tolerant consensus mechanism. The architecture follows a data flow graph pattern where components are processing vertices connected by message-passing edges. + +### Node Types +- **Access Node** (`/cmd/access/`) - Public API gateway, transaction submission and execution +- **Collection Node** (`/cmd/collection/`) - Transaction batching into collections +- **Consensus Node** (`/cmd/consensus/`) - Block production and consensus using Jolteon (HotStuff derivative) +- **Execution Node** (`/cmd/execution/`) - Transaction execution and state management +- **Verification Node** (`/cmd/verification/`) - Execution result verification +- **Observer Node** (`/cmd/observer/`) - Read-only network participant + +### Core Components + +#### Consensus (HotStuff/Jolteon) +- **Location**: `/consensus/hotstuff/` +- **Algorithm**: Jolteon protocol (HotStuff derivative with 2-chain finality rule) +- Uses BFT consensus with deterministic finality +- Implements pipelined block production and finalization + +#### Networking +- **Location**: `/network/` +- **Protocols**: LibP2P-based with GossipSub for broadcast, unicast for direct communication +- **Security**: Application Layer Spam Prevention (ALSP), peer scoring, RPC inspection + +#### Storage +- **Location**: `/storage/` +- **Backend**: Badger key-value store with custom indices + +#### Execution +- **Location**: `/fvm/` (Flow Virtual Machine) +- **Language**: Cadence smart contract language + +#### State Management +- **Location**: `/ledger/` +- **Structure**: Merkle trie for cryptographic state verification + +### Component Interface Pattern +All major processing components implement the `Component` interface from `/module/component/component.go`. This ensures consistent lifecycle management and graceful shutdown patterns across the codebase. + +### Error Handling Philosophy +Flow uses a high-assurance approach where: +- All inputs are considered potentially byzantine +- Error classification is context-dependent (same error can be benign or an exception based on caller context) +- No code path is safe unless explicitly proven and documented +- Comprehensive error wrapping for debugging (avoid `fmt.Errorf`, use `irrecoverable` package for exceptions) +- NEVER log and continue on best effort basis. ALWAYS explicitly handle errors. + +## Development Guidelines + +### Code Organization +- Follow the existing module structure in `/module/`, `/engine/`, `/model/` +- Use dependency injection patterns for component composition +- Implement proper interfaces before concrete types +- Follow Go naming conventions and the project's coding style in `/docs/CodingConventions.md` + +### Testing +- Unit tests should be co-located with the code they test +- Integration tests go in `/integration/tests/` +- Use mock generators: run `make generate-mocks` after interface changes +- Follow the existing pattern of `*_test.go` files +- Use fixtures for realistic test data. Defined in `/utils/unittest/` +- Some tests may be flaky. If unrelated tests fail, try them again before debugging. + +### Build System +- Uses Make and Go modules +- Docker-based builds for consistency +- Cross-compilation support for different architectures +- CGO_ENABLED=1 required due to cryptography dependencies + +### Linting and Code Quality +- Uses golangci-lint with custom configurations (`.golangci.yml`) +- Custom linters for Flow-specific conventions (struct write checking) +- Revive configuration for additional style checks +- Security checks for cryptographic misuse (gosec) + +### Key Directories +- `/access/` - Access API shared logic and types +- `/cmd/` - Node executables and main packages +- `/engine/` - Core protocol engines (consensus, collection, execution, etc.) +- `/model/` - Data structures and protocol messages +- `/module/` - Reusable components and utilities +- `/network/` - Networking layer and P2P protocols +- `/storage/` - Data persistence layer +- `/fvm/` - Flow Virtual Machine +- `/ledger/` - State management and Merkle tries +- `/crypto/` - Cryptographic primitives +- `/utils/` - General utilities + +### Special Considerations +- Byzantine fault tolerance is a core design principle +- Cryptographic operations require careful handling (see crypto library docs) +- Performance is critical - prefer efficient data structures and algorithms +- Network messages must be authenticated and validated +- State consistency is paramount - use proper synchronization primitives + +This codebase implements a production blockchain protocol with high security and performance requirements. Changes should be made carefully with thorough testing and consideration of byzantine failure modes.z \ No newline at end of file diff --git a/CLAUDE.md b/CLAUDE.md new file mode 100644 index 00000000000..2ffd9ffc87d --- /dev/null +++ b/CLAUDE.md @@ -0,0 +1 @@ +@./AGENTS.md \ No newline at end of file diff --git a/CODEOWNERS b/CODEOWNERS index b5bebe956e5..c00cc40e9ed 100644 --- a/CODEOWNERS +++ b/CODEOWNERS @@ -1,55 +1,2 @@ -# Collection Stream -/cmd/collection/** @jordanschalm -/engine/collection/** @jordanschalm -/protobuf/services/collection/** @jordanschalm - -# Consensus Stream -/cmd/consensus/** @AlexHentschel @durkmurder @jordanschalm -/engine/consensus/** @AlexHentschel @durkmurder @jordanschalm - -# Execution Stream -/cmd/execution/** @ramtinms -/engine/execution/** @ramtinms - -# Access Stream -/access/** @peterargue -/cmd/access/** @peterargue -/cmd/observer/** @peterargue -/engine/access/** @peterargue - -# Verification Stream -/cmd/verification/** @ramtinms @yhassanzadeh13 -/engine/verification/** @ramtinms @yhassanzadeh13 -/module/chunking/** @ramtinms -/integration/tests/verification @ramtinms @yhassanzadeh13 - -# Ledger Stream -/ledger/** @ramtinms @AlexHentschel - -# FVM Stream -/fvm/** @ramtinms @janezpodhostnik @pattyshack - -# Networking Stream -/network/** @yhassanzadeh13 - -# Cryptography Stream -/crypto/** @tarakby - -# Bootstrap and transit scripts -/cmd/bootstrap/** @zhangchiqing - -# Dev Tools Stream -.github/workflows/** @gomisha -/insecure/** @gomisha @yhassanzadeh13 -/integration/benchnet2/** @gomisha -/tools/test_monitor/** @gomisha - -# Performance Stream -/integration/benchmark/** @SaveTheRbtz @gomisha -/integration/localnet/** @SaveTheRbtz -/module/profiler/** @SaveTheRbtz @pattyshack -/module/tracer.go @SaveTheRbtz @pattyshack - -# Execution Sync -/module/executiondatasync/** @peterargue -/module/state_synchronization/** @peterargue +# Protocol owners are not generally differentiated by sub-area for simplicity +/** @onflow/flow-core-protocol diff --git a/CodingConventions.md b/CodingConventions.md index c8915e0b7b6..8c68256f6d2 100644 --- a/CodingConventions.md +++ b/CodingConventions.md @@ -91,7 +91,7 @@ happy path is either Benign failure cases are represented as typed sentinel errors ([basic errors](https://pkg.go.dev/errors#New) and [higher-level errors](https://dev.to/tigorlazuardi/go-creating-custom-error-wrapper-and-do-proper-error-equality-check-11k7)), so we can do type checks. - 2. _exception: the error a potential symptom of internal state corruption_. + 2. _exception: the error is a potential symptom of internal state corruption_. For example, a failed sanity check. In this case, the error is most likely fatal. <br /><br /> @@ -102,16 +102,76 @@ happy path is either Therefore, changing the set of specified sentinel errors is generally considered a breaking API change. -2. **All errors beyond the specified, benign sentinel errors ere considered unexpected failures, i.e. a symptom for potential state corruption.** +2. **All errors beyond the specified, benign sentinel errors are considered unexpected failures, i.e. a symptom of potential state corruption.** * We employ a fundamental principle of [High Assurance Software Engineering](https://www.researchgate.net/publication/228563190_High_Assurance_Software_Development), where we treat everything beyond the known benign errors as critical failures. In unexpected failure cases, we assume that the vertex's in-memory state has been broken and proper functioning is no longer guaranteed. The only safe route of recovery is to restart the vertex from a previously persisted, safe state. Per convention, a vertex should throw any unexpected exceptions using the related [irrecoverable context](https://github.com/onflow/flow-go/blob/277b6515add6136946913747efebd508f0419a25/module/irrecoverable/irrecoverable.go). - * Many components in our BFT system can return benign errors (type (i)) and exceptions (type (ii)) - * Use the special `irrecoverable.exception` [error type](https://github.com/onflow/flow-go/blob/master/module/irrecoverable/exception.go#L7-L26) to denote an unexpected error (and strip any sentinel information from the error stack) - - -3. _Optional Simplification for components that solely return benign errors._ + * Many components in our BFT system can return benign errors (type (i)) and irrecoverable exceptions (type (ii)) + +3. **Whether a particular error is benign or an exception depends on the caller's context. Errors _cannot_ be categorized as benign or exception based on their type alone.** + + ![Error Handling](/docs/images/ErrorHandling.png) + + * For example, consider `storage.ErrNotFound` that could be returned by the storage lager, when querying a block by ID + (method [`Headers.ByBlockID(flow.Identifier)`](https://github.com/onflow/flow-go/blob/a918616c7b541b772c254e7eaaae3573561e6c0a/storage/headers.go#L15-L18)). + In many cases, `storage.ErrNotFound` is expected, for instance if a node is receiving a new block proposal and checks whether the parent has already been ingested + or needs to be requested from a different node. In contrast, if we are querying a block that we know is already finalized and the storage returns a `storage.ErrNotFound` + something is badly broken. + * Use the special `irrecoverable.exception` [error type](https://github.com/onflow/flow-go/blob/master/module/irrecoverable/exception.go#L7-L26) + to denote an unexpected error (and strip any sentinel information from the error stack). + + This is for any scenario when a higher-level function is interpreting a sentinel returned from a lower-level function as an exception. + To construct an example, lets look at our `storage.Blocks` API, which has a [`ByHeight` method](https://github.com/onflow/flow-go/blob/a918616c7b541b772c254e7eaaae3573561e6c0a/storage/blocks.go#L24-L26) + to retrieve _finalized_ blocks by height. The following could be a hypothetical implementation: + ```golang + // ByHeight retrieves the finalized block for the given height. + // From the perspective of the storage layer, the following errors are benign: + // - storage.ErrNotFound if no finalized block at the given height is known + func ByHeight(height uint64) (*flow.Block, error) { + // Step 1: retrieve the ID of the finalized block for the given height. We expect + // `storage.ErrNotFound` during normal operations, if no block at height has been + // finalized. We just bubble this sentinel up, as it already has the expected type. + blockID, err := retrieveBlockIdByHeight(height) + if err != nil { + return nil, fmt.Errorf("could not query block by height: %w", err) + } + + // Step 2: retrieve full block by ID. Function `retrieveBlockByID` returns + // `storage.ErrNotFound` in case no block with the given ID is known. In other parts + // of the code that also use `retrieveBlockByID`, this would be expected during normal + // operations. However, here we are querying a block, which the storage layer has + // already indexed. Failure to retrieve the block implies our storage is corrupted. + block, err := retrieveBlockByID(blockID) + if err != nil { + // We cannot bubble up `storage.ErrNotFound` as this would hide this irrecoverable + // failure behind a benign sentinel error. We use the `Exception` wrapper, which + // also implements the error `interface` but provides no `Unwrap` method. Thereby, + // the `err`s sentinel type is hidden from upstream type checks, and consequently + // classified as unexpected, i.e. irrecoverable exceptions. + return nil, irrecoverable.NewExceptionf("storage inconsistency, failed to" + + "retrieve full block for indexed and finalized block %x: %w", blockID, err) + } + return block, nil + } + ``` + Functions **may** use `irrecoverable.NewExceptionf` when: + - they are interpreting any error returning from a 3rd party module as unexpected + - they are reacting to an unexpected condition internal to their stack frame and returning a generic error + + Functions **must** usd `irrecoverable.NewExceptionf` when: + - they are interpreting any documented sentinel error returned from a flow-go module as unexpected + + For brief illustration, let us consider some function body, in which there are multiple subsequent calls to other lower-level functions. + In most scenarios, a particular sentinel type is either always or never expected during normal operations. If it is expected, + then the sentinel type should be documented. If it is consistently not expected, the error should _not_ be mentioned in the + function's godoc. In the absence of positive affirmation that `error` is an expected and benign sentinel, the error is to be + treated as an irrecoverable exception. So if a sentinel type `T` is consistently not expected throughout the function's body, make + sure the sentinel `T` is not mentioned in the function's godoc. The latter is fully sufficient to classify `T` as an irrecoverable + exception. + + +5. _Optional Simplification for components that solely return benign errors._ * In this case, you _can_ use untyped errors to represent benign error cases (e.g. using `fmt.Errorf`). * By using untyped errors, the code would be _breaking with our best practice guideline_ that benign errors should be represented as typed sentinel errors. Therefore, whenever all returned errors are benign, please clearly document this _for each public functions individually_. @@ -160,7 +220,8 @@ For example, a statement like the following would be sufficient: * Handle errors at a level, where you still have enough context to decide whether the error is expected during normal operations. * Errors of unexpected types are indicators that the node's internal state might be corrupted. - - Use the special `irrecoverable.exception` [error type](https://github.com/onflow/flow-go/blob/master/module/irrecoverable/exception.go#L7-L26) at the point an unexpected error is being returned, or when an error returned from another function is interpreted as unexpected + + ### Anti-Pattern Continuing on a best-effort basis is not an option, i.e. the following is an anti-pattern in the context of Flow: diff --git a/Makefile b/Makefile index 5e55f9fe57b..510254b94c0 100644 --- a/Makefile +++ b/Makefile @@ -10,9 +10,6 @@ VERSION := $(shell git describe --tags --abbrev=2 --match "v*" --match "secure-c # dynamically split up CI jobs into smaller jobs that can be run in parallel GO_TEST_PACKAGES := ./... -FLOW_GO_TAG := v0.28.15 - - # Image tag: if image tag is not set, set it with version (or short commit if empty) ifeq (${IMAGE_TAG},) IMAGE_TAG := ${VERSION} @@ -22,12 +19,14 @@ ifeq (${IMAGE_TAG},) IMAGE_TAG := ${SHORT_COMMIT} endif -IMAGE_TAG_NO_NETGO := $(IMAGE_TAG)-without_netgo +IMAGE_TAG_NO_ADX := $(IMAGE_TAG)-without-adx +IMAGE_TAG_NO_NETGO_NO_ADX := $(IMAGE_TAG)-without-netgo-without-adx +IMAGE_TAG_ARM := $(IMAGE_TAG)-arm # Name of the cover profile COVER_PROFILE := coverage.txt # Disable go sum database lookup for private repos -GOPRIVATE=github.com/dapperlabs/* +GOPRIVATE=github.com/onflow/*-internal # OS UNAME := $(shell uname) @@ -42,37 +41,46 @@ K8S_YAMLS_LOCATION_STAGING=./k8s/staging export CONTAINER_REGISTRY := gcr.io/flow-container-registry export DOCKER_BUILDKIT := 1 -# setup the crypto package under the GOPATH: needed to test packages importing flow-go/crypto -.PHONY: crypto_setup_gopath -crypto_setup_gopath: - bash crypto_setup.sh +# set `CRYPTO_FLAG` when building natively (not cross-compiling) +include crypto_adx_flag.mk + +# needed for CI +.PHONY: noop +noop: + @echo "This is a no-op target" cmd/collection/collection: - go build -o cmd/collection/collection cmd/collection/main.go + CGO_CFLAGS=$(CRYPTO_FLAG) go build -o cmd/collection/collection cmd/collection/main.go cmd/util/util: - go build -o cmd/util/util --tags relic cmd/util/main.go + CGO_CFLAGS=$(CRYPTO_FLAG) go build -o cmd/util/util cmd/util/main.go .PHONY: update-core-contracts-version update-core-contracts-version: + # updates the core-contracts version in all of the go.mod files + # usage example: CC_VERSION=0.16.0 make update-core-contracts-version ./scripts/update-core-contracts.sh $(CC_VERSION) + make tidy -############################################################################################ -# CAUTION: DO NOT MODIFY THESE TARGETS! DOING SO WILL BREAK THE FLAKY TEST MONITOR +.PHONY: update-cadence-version +update-cadence-version: + # updates the cadence version in all of the go.mod files + # usage example: CC_VERSION=0.16.0 make update-cadence-version + ./scripts/update-cadence.sh $(CC_VERSION) + make tidy .PHONY: unittest-main unittest-main: - # test all packages with Relic library enabled - go test $(if $(VERBOSE),-v,) -coverprofile=$(COVER_PROFILE) -covermode=atomic $(if $(RACE_DETECTOR),-race,) $(if $(JSON_OUTPUT),-json,) $(if $(NUM_RUNS),-count $(NUM_RUNS),) --tags relic $(GO_TEST_PACKAGES) + # test all packages + CGO_CFLAGS=$(CRYPTO_FLAG) go test $(if $(VERBOSE),-v,) -coverprofile=$(COVER_PROFILE) -covermode=atomic $(if $(RACE_DETECTOR),-race,) $(if $(JSON_OUTPUT),-json,) $(if $(NUM_RUNS),-count $(NUM_RUNS),) $(GO_TEST_PACKAGES) .PHONY: install-mock-generators install-mock-generators: cd ${GOPATH}; \ - go install github.com/vektra/mockery/v2@v2.21.4; \ - go install github.com/golang/mock/mockgen@v1.6.0; + go install github.com/vektra/mockery/v2@v2.53.5; .PHONY: install-tools -install-tools: crypto_setup_gopath check-go-version install-mock-generators +install-tools: check-go-version install-mock-generators cd ${GOPATH}; \ go install github.com/golang/protobuf/protoc-gen-go@v1.3.2; \ go install github.com/uber/prototool/cmd/prototool@v1.9.0; \ @@ -80,30 +88,40 @@ install-tools: crypto_setup_gopath check-go-version install-mock-generators go install golang.org/x/tools/cmd/stringer@master; .PHONY: verify-mocks -verify-mocks: generate-mocks +verify-mocks: tidy generate-mocks git diff --exit-code -############################################################################################ - -.PHONY: emulator-norelic-check -emulator-norelic-check: - # test the fvm package compiles with Relic library disabled (required for the emulator build) - cd ./fvm && go test ./... -run=NoTestHasThisPrefix +.SILENT: go-math-rand-check +go-math-rand-check: + # check that the insecure math/rand Go package isn't used by production code. + # `exclude` should only specify non production code (test, bench..). + # If this check fails, try updating your code by using: + # - "crypto/rand" or "flow-go/utils/rand" for non-deterministic randomness + # - "onflow/crypto/random" for deterministic randomness + grep --include=\*.go \ + --exclude=*test* --exclude=*helper* --exclude=*example* --exclude=*fixture* --exclude=*benchmark* --exclude=*profiler* \ + --exclude-dir=*test* --exclude-dir=*helper* --exclude-dir=*example* --exclude-dir=*fixture* --exclude-dir=*benchmark* --exclude-dir=*profiler* --exclude-dir=*emulator* -rnw '"math/rand"'; \ + if [ $$? -ne 1 ]; then \ + echo "[Error] Go production code should not use math/rand package"; exit 1; \ + fi + +.PHONY: code-sanity-check +code-sanity-check: go-math-rand-check .PHONY: fuzz-fvm fuzz-fvm: # run fuzz tests in the fvm package - cd ./fvm && go test -fuzz=Fuzz -run ^$$ --tags relic + cd ./fvm && CGO_CFLAGS=$(CRYPTO_FLAG) go test -fuzz=Fuzz -run ^$$ .PHONY: test test: verify-mocks unittest-main .PHONY: integration-test -integration-test: docker-build-flow +integration-test: docker-native-build-flow $(MAKE) -C integration integration-test .PHONY: benchmark -benchmark: docker-build-flow +benchmark: docker-native-build-flow $(MAKE) -C integration benchmark .PHONY: coverage @@ -119,8 +137,8 @@ endif .PHONY: generate-openapi generate-openapi: - swagger-codegen generate -l go -i https://raw.githubusercontent.com/onflow/flow/master/openapi/access.yaml -D packageName=models,modelDocs=false,models -o engine/access/rest/models; - go fmt ./engine/access/rest/models + swagger-codegen generate -l go -i https://raw.githubusercontent.com/onflow/flow/master/openapi/access.yaml -D packageName=models,modelDocs=false,models -o engine/access/rest/http/models; + go fmt ./engine/access/rest/http/models .PHONY: generate generate: generate-proto generate-mocks generate-fvm-env-wrappers @@ -131,65 +149,12 @@ generate-proto: .PHONY: generate-fvm-env-wrappers generate-fvm-env-wrappers: - go run ./fvm/environment/generate-wrappers fvm/environment/parse_restricted_checker.go + CGO_CFLAGS=$(CRYPTO_FLAG) go run ./fvm/environment/generate-wrappers fvm/environment/parse_restricted_checker.go .PHONY: generate-mocks generate-mocks: install-mock-generators - mockery --name '(Connector|PingInfoProvider)' --dir=network/p2p --case=underscore --output="./network/mocknetwork" --outpkg="mocknetwork" - mockgen -destination=storage/mocks/storage.go -package=mocks github.com/onflow/flow-go/storage Blocks,Headers,Payloads,Collections,Commits,Events,ServiceEvents,TransactionResults - mockgen -destination=module/mocks/network.go -package=mocks github.com/onflow/flow-go/module Local,Requester - mockgen -destination=network/mocknetwork/mock_network.go -package=mocknetwork github.com/onflow/flow-go/network Network - mockery --name='.*' --dir=integration/benchmark/mocksiface --case=underscore --output="integration/benchmark/mock" --outpkg="mock" - mockery --name=ExecutionDataStore --dir=module/executiondatasync/execution_data --case=underscore --output="./module/executiondatasync/execution_data/mock" --outpkg="mock" - mockery --name=Downloader --dir=module/executiondatasync/execution_data --case=underscore --output="./module/executiondatasync/execution_data/mock" --outpkg="mock" - mockery --name 'ExecutionDataRequester' --dir=module/state_synchronization --case=underscore --output="./module/state_synchronization/mock" --outpkg="state_synchronization" - mockery --name 'ExecutionState' --dir=engine/execution/state --case=underscore --output="engine/execution/state/mock" --outpkg="mock" - mockery --name 'BlockComputer' --dir=engine/execution/computation/computer --case=underscore --output="engine/execution/computation/computer/mock" --outpkg="mock" - mockery --name 'ComputationManager' --dir=engine/execution/computation --case=underscore --output="engine/execution/computation/mock" --outpkg="mock" - mockery --name 'EpochComponentsFactory' --dir=engine/collection/epochmgr --case=underscore --output="engine/collection/epochmgr/mock" --outpkg="mock" - mockery --name 'Backend' --dir=engine/collection/rpc --case=underscore --output="engine/collection/rpc/mock" --outpkg="mock" - mockery --name 'ProviderEngine' --dir=engine/execution/provider --case=underscore --output="engine/execution/provider/mock" --outpkg="mock" - (cd ./crypto && mockery --name 'PublicKey' --case=underscore --output="../module/mock" --outpkg="mock") - mockery --name '.*' --dir=state/cluster --case=underscore --output="state/cluster/mock" --outpkg="mock" - mockery --name '.*' --dir=module --case=underscore --tags="relic" --output="./module/mock" --outpkg="mock" - mockery --name '.*' --dir=module/mempool --case=underscore --output="./module/mempool/mock" --outpkg="mempool" - mockery --name '.*' --dir=module/component --case=underscore --output="./module/component/mock" --outpkg="component" - mockery --name '.*' --dir=network --case=underscore --output="./network/mocknetwork" --outpkg="mocknetwork" - mockery --name '.*' --dir=storage --case=underscore --output="./storage/mock" --outpkg="mock" - mockery --name '.*' --dir="state/protocol" --case=underscore --output="state/protocol/mock" --outpkg="mock" - mockery --name '.*' --dir="state/protocol/events" --case=underscore --output="./state/protocol/events/mock" --outpkg="mock" - mockery --name '.*' --dir=engine/execution/computation/computer --case=underscore --output="./engine/execution/computation/computer/mock" --outpkg="mock" - mockery --name '.*' --dir=engine/execution/state --case=underscore --output="./engine/execution/state/mock" --outpkg="mock" - mockery --name '.*' --dir=engine/collection --case=underscore --output="./engine/collection/mock" --outpkg="mock" - mockery --name 'complianceCore' --dir=engine/common/follower --exported --case=underscore --output="./engine/common/follower/mock" --outpkg="mock" - mockery --name '.*' --dir=engine/common/follower/cache --case=underscore --output="./engine/common/follower/cache/mock" --outpkg="mock" - mockery --name '.*' --dir=engine/consensus --case=underscore --output="./engine/consensus/mock" --outpkg="mock" - mockery --name '.*' --dir=engine/consensus/approvals --case=underscore --output="./engine/consensus/approvals/mock" --outpkg="mock" - rm -rf ./fvm/mock - mockery --name '.*' --dir=fvm --case=underscore --output="./fvm/mock" --outpkg="mock" - rm -rf ./fvm/environment/mock - mockery --name '.*' --dir=fvm/environment --case=underscore --output="./fvm/environment/mock" --outpkg="mock" - mockery --name '.*' --dir=ledger --case=underscore --output="./ledger/mock" --outpkg="mock" - mockery --name 'ViolationsConsumer' --dir=network/slashing --case=underscore --output="./network/mocknetwork" --outpkg="mocknetwork" - mockery --name '.*' --dir=network/p2p/ --case=underscore --output="./network/p2p/mock" --outpkg="mockp2p" - mockery --name 'Vertex' --dir="./module/forest" --case=underscore --output="./module/forest/mock" --outpkg="mock" - mockery --name '.*' --dir="./consensus/hotstuff" --case=underscore --output="./consensus/hotstuff/mocks" --outpkg="mocks" - mockery --name '.*' --dir="./engine/access/wrapper" --case=underscore --output="./engine/access/mock" --outpkg="mock" - mockery --name 'API' --dir="./access" --case=underscore --output="./access/mock" --outpkg="mock" - mockery --name 'API' --dir="./engine/protocol" --case=underscore --output="./engine/protocol/mock" --outpkg="mock" - mockery --name 'API' --dir="./engine/access/state_stream" --case=underscore --output="./engine/access/state_stream/mock" --outpkg="mock" - mockery --name 'ConnectionFactory' --dir="./engine/access/rpc/backend" --case=underscore --output="./engine/access/rpc/backend/mock" --outpkg="mock" - mockery --name 'IngestRPC' --dir="./engine/execution/ingestion" --case=underscore --tags relic --output="./engine/execution/ingestion/mock" --outpkg="mock" - mockery --name '.*' --dir=model/fingerprint --case=underscore --output="./model/fingerprint/mock" --outpkg="mock" - mockery --name 'ExecForkActor' --structname 'ExecForkActorMock' --dir=module/mempool/consensus/mock/ --case=underscore --output="./module/mempool/consensus/mock/" --outpkg="mock" - mockery --name '.*' --dir=engine/verification/fetcher/ --case=underscore --output="./engine/verification/fetcher/mock" --outpkg="mockfetcher" - mockery --name '.*' --dir=./cmd/util/ledger/reporters --case=underscore --output="./cmd/util/ledger/reporters/mock" --outpkg="mock" - mockery --name 'Storage' --dir=module/executiondatasync/tracker --case=underscore --output="module/executiondatasync/tracker/mock" --outpkg="mocktracker" - - #temporarily make insecure/ a non-module to allow mockery to create mocks - mv insecure/go.mod insecure/go2.mod - mockery --name '.*' --dir=insecure/ --case=underscore --output="./insecure/mock" --outpkg="mockinsecure" - mv insecure/go2.mod insecure/go.mod + mockery --config .mockery.yaml + cd insecure; mockery --config .mockery.yaml # this ensures there is no unused dependency being added by accident .PHONY: tidy @@ -197,19 +162,46 @@ tidy: go mod tidy -v cd integration; go mod tidy -v cd crypto; go mod tidy -v - cd cmd/testclient; go mod tidy -v cd insecure; go mod tidy -v git diff --exit-code +# Builds a custom version of the golangci-lint binary which includes custom plugins +tools/custom-gcl: tools/structwrite .custom-gcl.yml + golangci-lint custom + .PHONY: lint -lint: tidy +lint: tools/custom-gcl # revive -config revive.toml -exclude storage/ledger/trie ./... - golangci-lint run -v --build-tags relic ./... + ./tools/custom-gcl run -v $(or $(LINT_PATH),./...) + +.PHONY: lint-new +lint-new: tools/custom-gcl + ./tools/custom-gcl run -v --new-from-rev=master .PHONY: fix-lint -fix-lint: +fix-lint: tools/custom-gcl # revive -config revive.toml -exclude storage/ledger/trie ./... - golangci-lint run -v --build-tags relic --fix ./... + ./tools/custom-gcl run -v --fix $(or $(LINT_PATH),./...) + +.PHONY: fix-lint-new +fix-lint-new: tools/custom-gcl + ./tools/custom-gcl run -v --fix --new-from-rev=master + +.PHONY: fix-imports +fix-imports: tools/custom-gcl + ./tools/custom-gcl run --enable-only=gci --fix $(or $(LINT_PATH),./...) + +.PHONY: fix-imports-new +fix-imports-new: tools/custom-gcl + ./tools/custom-gcl run --enable-only=gci --fix --new-from-rev=master + +.PHONY: vet +vet: tools/custom-gcl + ./tools/custom-gcl run --enable-only=govet $(or $(LINT_PATH),./...) + +.PHONY: vet-new +vet-new: tools/custom-gcl + ./tools/custom-gcl run --enable-only=govet --new-from-rev=master # Runs unit tests with different list of packages as passed by CI so they run in parallel .PHONY: ci @@ -217,11 +209,11 @@ ci: install-tools test # Runs integration tests .PHONY: ci-integration -ci-integration: crypto_setup_gopath - $(MAKE) -C integration ci-integration-test +ci-integration: + $(MAKE) -C integration integration-test # Runs benchmark tests -# NOTE: we do not need `docker-build-flow` as this is run as a separate step +# NOTE: we do not need `docker-native-build-flow` as this is run as a separate step # on Teamcity .PHONY: ci-benchmark ci-benchmark: install-tools @@ -239,7 +231,6 @@ docker-ci: # Runs integration tests in Docker (for mac) .PHONY: docker-ci-integration docker-ci-integration: - rm -rf crypto/relic docker run \ --env DOCKER_API_VERSION='1.39' \ --network host \ @@ -250,162 +241,332 @@ docker-ci-integration: -w "/go/flow" "$(CONTAINER_REGISTRY)/golang-cmake:v0.0.7" \ make ci-integration -.PHONY: docker-build-collection -docker-build-collection: - docker build -f cmd/Dockerfile --build-arg TARGET=./cmd/collection --build-arg COMMIT=$(COMMIT) --build-arg VERSION=$(IMAGE_TAG) --build-arg GOARCH=$(GOARCH) --target production \ - --secret id=git_creds,env=GITHUB_CREDS --build-arg GOPRIVATE=$(GOPRIVATE) \ +# only works on Debian +.SILENT: install-cross-build-tools +install-cross-build-tools: + if [ "$(UNAME)" = "Debian" ] ; then \ + apt-get update && apt-get -y install apt-utils gcc-aarch64-linux-gnu ; \ + elif [ "$(UNAME)" = "Linux" ] ; then \ + apt-get update && apt-get -y install apt-utils gcc-aarch64-linux-gnu ; \ + else \ + echo "this target only works on Debian or Linux, host runs on" $(UNAME) ; \ + fi + +.PHONY: docker-native-build-collection +docker-native-build-collection: + docker build -f cmd/Dockerfile --build-arg TARGET=./cmd/collection --build-arg COMMIT=$(COMMIT) --build-arg VERSION=$(IMAGE_TAG) --build-arg GOARCH=$(GOARCH) --build-arg CGO_FLAG=$(CRYPTO_FLAG) --target production \ + --secret id=cadence_deploy_key,env=CADENCE_DEPLOY_KEY --build-arg GOPRIVATE=$(GOPRIVATE) \ --label "git_commit=${COMMIT}" --label "git_tag=${IMAGE_TAG}" \ - -t "$(CONTAINER_REGISTRY)/collection:latest" -t "$(CONTAINER_REGISTRY)/collection:$(SHORT_COMMIT)" -t "$(CONTAINER_REGISTRY)/collection:$(IMAGE_TAG)" -t "$(CONTAINER_REGISTRY)/collection:$(FLOW_GO_TAG)" . - -.PHONY: docker-build-collection-without-netgo -docker-build-collection-without-netgo: - docker build -f cmd/Dockerfile --build-arg TAGS=relic --build-arg TARGET=./cmd/collection --build-arg COMMIT=$(COMMIT) --build-arg VERSION=$(IMAGE_TAG_NO_NETGO) --build-arg GOARCH=$(GOARCH) --target production \ - --secret id=git_creds,env=GITHUB_CREDS --build-arg GOPRIVATE=$(GOPRIVATE) \ - --label "git_commit=${COMMIT}" --label "git_tag=$(IMAGE_TAG_NO_NETGO)" \ - -t "$(CONTAINER_REGISTRY)/collection:$(IMAGE_TAG_NO_NETGO)" . - -.PHONY: docker-build-collection-debug -docker-build-collection-debug: - docker build -f cmd/Dockerfile --build-arg TARGET=./cmd/collection --build-arg COMMIT=$(COMMIT) --build-arg VERSION=$(IMAGE_TAG) --build-arg GOARCH=$(GOARCH) --target debug \ - -t "$(CONTAINER_REGISTRY)/collection-debug:latest" -t "$(CONTAINER_REGISTRY)/collection-debug:$(SHORT_COMMIT)" -t "$(CONTAINER_REGISTRY)/collection-debug:$(IMAGE_TAG)" . - -.PHONY: docker-build-consensus -docker-build-consensus: - docker build -f cmd/Dockerfile --build-arg TARGET=./cmd/consensus --build-arg COMMIT=$(COMMIT) --build-arg VERSION=$(IMAGE_TAG) --build-arg GOARCH=$(GOARCH) --target production \ - --secret id=git_creds,env=GITHUB_CREDS --build-arg GOPRIVATE=$(GOPRIVATE) \ + -t "$(CONTAINER_REGISTRY)/collection:latest" \ + -t "$(CONTAINER_REGISTRY)/collection:$(IMAGE_TAG)" . + +.PHONY: docker-build-collection-with-adx +docker-build-collection-with-adx: + docker build -f cmd/Dockerfile --build-arg TARGET=./cmd/collection --build-arg COMMIT=$(COMMIT) --build-arg VERSION=$(IMAGE_TAG) --build-arg GOARCH=amd64 --target production \ + --secret id=cadence_deploy_key,env=CADENCE_DEPLOY_KEY --build-arg GOPRIVATE=$(GOPRIVATE) \ + --label "git_commit=${COMMIT}" --label "git_tag=$(IMAGE_TAG)" \ + -t "$(CONTAINER_REGISTRY)/collection:$(IMAGE_TAG)" . + +.PHONY: docker-build-collection-without-adx +docker-build-collection-without-adx: + docker build -f cmd/Dockerfile --build-arg TARGET=./cmd/collection --build-arg COMMIT=$(COMMIT) --build-arg VERSION=$(IMAGE_TAG_NO_ADX) --build-arg GOARCH=amd64 --build-arg CGO_FLAG=$(DISABLE_ADX) --target production \ + --secret id=cadence_deploy_key,env=CADENCE_DEPLOY_KEY --build-arg GOPRIVATE=$(GOPRIVATE) \ + --label "git_commit=${COMMIT}" --label "git_tag=$(IMAGE_TAG_NO_ADX)" \ + -t "$(CONTAINER_REGISTRY)/collection:$(IMAGE_TAG_NO_ADX)" . + +.PHONY: docker-build-collection-without-netgo-without-adx +docker-build-collection-without-netgo-without-adx: + docker build -f cmd/Dockerfile --build-arg TARGET=./cmd/collection --build-arg COMMIT=$(COMMIT) --build-arg VERSION=$(IMAGE_TAG_NO_NETGO_NO_ADX) --build-arg GOARCH=amd64 --build-arg TAGS="" --build-arg CGO_FLAG=$(DISABLE_ADX) --target production \ + --secret id=cadence_deploy_key,env=CADENCE_DEPLOY_KEY --build-arg GOPRIVATE=$(GOPRIVATE) \ + --label "git_commit=${COMMIT}" --label "git_tag=$(IMAGE_TAG_NO_NETGO_NO_ADX)" \ + -t "$(CONTAINER_REGISTRY)/collection:$(IMAGE_TAG_NO_NETGO_NO_ADX)" . + +.PHONY: docker-cross-build-collection-arm +docker-cross-build-collection-arm: + docker build -f cmd/Dockerfile --build-arg TARGET=./cmd/collection --build-arg COMMIT=$(COMMIT) --build-arg VERSION=$(IMAGE_TAG_ARM) --build-arg CC=aarch64-linux-gnu-gcc --build-arg GOARCH=arm64 --target production \ + --secret id=cadence_deploy_key,env=CADENCE_DEPLOY_KEY --build-arg GOPRIVATE=$(GOPRIVATE) \ + --label "git_commit=${COMMIT}" --label "git_tag=$(IMAGE_TAG_ARM)" \ + -t "$(CONTAINER_REGISTRY)/collection:$(IMAGE_TAG_ARM)" . + +.PHONY: docker-native-build-collection-debug +docker-native-build-collection-debug: + docker build -f cmd/Dockerfile --build-arg TARGET=./cmd/collection --build-arg COMMIT=$(COMMIT) --build-arg VERSION=$(IMAGE_TAG) --build-arg GOARCH=$(GOARCH) --build-arg CGO_FLAG=$(CRYPTO_FLAG) --target debug \ + -t "$(CONTAINER_REGISTRY)/collection-debug:latest" \ + -t "$(CONTAINER_REGISTRY)/collection-debug:$(IMAGE_TAG)" . + +.PHONY: docker-native-build-consensus +docker-native-build-consensus: + docker build -f cmd/Dockerfile --build-arg TARGET=./cmd/consensus --build-arg COMMIT=$(COMMIT) --build-arg VERSION=$(IMAGE_TAG) --build-arg GOARCH=$(GOARCH) --build-arg CGO_FLAG=$(CRYPTO_FLAG) --target production \ + --secret id=cadence_deploy_key,env=CADENCE_DEPLOY_KEY --build-arg GOPRIVATE=$(GOPRIVATE) \ --label "git_commit=${COMMIT}" --label "git_tag=${IMAGE_TAG}" \ - -t "$(CONTAINER_REGISTRY)/consensus:latest" -t "$(CONTAINER_REGISTRY)/consensus:$(SHORT_COMMIT)" -t "$(CONTAINER_REGISTRY)/consensus:$(IMAGE_TAG)" -t "$(CONTAINER_REGISTRY)/consensus:$(FLOW_GO_TAG)" . - -.PHONY: docker-build-consensus-without-netgo -docker-build-consensus-without-netgo: - docker build -f cmd/Dockerfile --build-arg TAGS=relic --build-arg TARGET=./cmd/consensus --build-arg COMMIT=$(COMMIT) --build-arg VERSION=$(IMAGE_TAG_NO_NETGO) --build-arg GOARCH=$(GOARCH) --target production \ - --secret id=git_creds,env=GITHUB_CREDS --build-arg GOPRIVATE=$(GOPRIVATE) \ - --label "git_commit=${COMMIT}" --label "git_tag=$(IMAGE_TAG_NO_NETGO)" \ - -t "$(CONTAINER_REGISTRY)/consensus:$(IMAGE_TAG_NO_NETGO)" . - -.PHONY: docker-build-consensus-debug -docker-build-consensus-debug: - docker build -f cmd/Dockerfile --build-arg TARGET=./cmd/consensus --build-arg COMMIT=$(COMMIT) --build-arg VERSION=$(IMAGE_TAG) --build-arg GOARCH=$(GOARCH) --target debug \ - -t "$(CONTAINER_REGISTRY)/consensus-debug:latest" -t "$(CONTAINER_REGISTRY)/consensus-debug:$(SHORT_COMMIT)" -t "$(CONTAINER_REGISTRY)/consensus-debug:$(IMAGE_TAG)" . - -.PHONY: docker-build-execution -docker-build-execution: - docker build -f cmd/Dockerfile --build-arg TARGET=./cmd/execution --build-arg COMMIT=$(COMMIT) --build-arg VERSION=$(IMAGE_TAG) --build-arg GOARCH=$(GOARCH) --target production \ - --secret id=git_creds,env=GITHUB_CREDS --build-arg GOPRIVATE=$(GOPRIVATE) \ + -t "$(CONTAINER_REGISTRY)/consensus:latest" \ + -t "$(CONTAINER_REGISTRY)/consensus:$(IMAGE_TAG)" . + +.PHONY: docker-build-consensus-with-adx +docker-build-consensus-with-adx: + docker build -f cmd/Dockerfile --build-arg TARGET=./cmd/consensus --build-arg COMMIT=$(COMMIT) --build-arg VERSION=$(IMAGE_TAG) --build-arg GOARCH=amd64 --target production \ + --secret id=cadence_deploy_key,env=CADENCE_DEPLOY_KEY --build-arg GOPRIVATE=$(GOPRIVATE) \ + --label "git_commit=${COMMIT}" --label "git_tag=$(IMAGE_TAG)" \ + -t "$(CONTAINER_REGISTRY)/consensus:$(IMAGE_TAG)" . + +.PHONY: docker-build-consensus-without-adx +docker-build-consensus-without-adx: + docker build -f cmd/Dockerfile --build-arg TARGET=./cmd/consensus --build-arg COMMIT=$(COMMIT) --build-arg VERSION=$(IMAGE_TAG_NO_ADX) --build-arg GOARCH=amd64 --build-arg CGO_FLAG=$(DISABLE_ADX) --target production \ + --secret id=cadence_deploy_key,env=CADENCE_DEPLOY_KEY --build-arg GOPRIVATE=$(GOPRIVATE) \ + --label "git_commit=${COMMIT}" --label "git_tag=$(IMAGE_TAG_NO_ADX)" \ + -t "$(CONTAINER_REGISTRY)/consensus:$(IMAGE_TAG_NO_ADX)" . + +.PHONY: docker-build-consensus-without-netgo-without-adx +docker-build-consensus-without-netgo-without-adx: + docker build -f cmd/Dockerfile --build-arg TARGET=./cmd/consensus --build-arg COMMIT=$(COMMIT) --build-arg VERSION=$(IMAGE_TAG_NO_NETGO_NO_ADX) --build-arg GOARCH=amd64 --build-arg TAGS="" --build-arg CGO_FLAG=$(DISABLE_ADX) --target production \ + --secret id=cadence_deploy_key,env=CADENCE_DEPLOY_KEY --build-arg GOPRIVATE=$(GOPRIVATE) \ + --label "git_commit=${COMMIT}" --label "git_tag=$(IMAGE_TAG_NO_NETGO_NO_ADX)" \ + -t "$(CONTAINER_REGISTRY)/consensus:$(IMAGE_TAG_NO_NETGO_NO_ADX)" . + +.PHONY: docker-cross-build-consensus-arm +docker-cross-build-consensus-arm: + docker build -f cmd/Dockerfile --build-arg TARGET=./cmd/consensus --build-arg COMMIT=$(COMMIT) --build-arg VERSION=$(IMAGE_TAG_ARM) --build-arg GOARCH=arm64 --build-arg CC=aarch64-linux-gnu-gcc --target production \ + --secret id=cadence_deploy_key,env=CADENCE_DEPLOY_KEY --build-arg GOPRIVATE=$(GOPRIVATE) \ + --label "git_commit=${COMMIT}" --label "git_tag=${IMAGE_TAG_ARM}" \ + -t "$(CONTAINER_REGISTRY)/consensus:$(IMAGE_TAG_ARM)" . + + +.PHONY: docker-native-build-consensus-debug +docker-build-native-consensus-debug: + docker build -f cmd/Dockerfile --build-arg TARGET=./cmd/consensus --build-arg COMMIT=$(COMMIT) --build-arg VERSION=$(IMAGE_TAG) --build-arg GOARCH=$(GOARCH) --build-arg CGO_FLAG=$(CRYPTO_FLAG) --target debug \ + -t "$(CONTAINER_REGISTRY)/consensus-debug:latest" \ + -t "$(CONTAINER_REGISTRY)/consensus-debug:$(IMAGE_TAG)" . + +.PHONY: docker-native-build-execution +docker-native-build-execution: + docker build -f cmd/Dockerfile --build-arg TARGET=./cmd/execution --build-arg COMMIT=$(COMMIT) --build-arg VERSION=$(IMAGE_TAG) --build-arg GOARCH=$(GOARCH) --build-arg CGO_FLAG=$(CRYPTO_FLAG) --target production \ + --secret id=cadence_deploy_key,env=CADENCE_DEPLOY_KEY --build-arg GOPRIVATE=$(GOPRIVATE) \ --label "git_commit=${COMMIT}" --label "git_tag=${IMAGE_TAG}" \ - -t "$(CONTAINER_REGISTRY)/execution:latest" -t "$(CONTAINER_REGISTRY)/execution:$(SHORT_COMMIT)" -t "$(CONTAINER_REGISTRY)/execution:$(IMAGE_TAG)" -t "$(CONTAINER_REGISTRY)/execution:$(FLOW_GO_TAG)" . - -.PHONY: docker-build-execution-without-netgo -docker-build-execution-without-netgo: - docker build -f cmd/Dockerfile --build-arg TAGS=relic --build-arg TARGET=./cmd/execution --build-arg COMMIT=$(COMMIT) --build-arg VERSION=$(IMAGE_TAG_NO_NETGO) --build-arg GOARCH=$(GOARCH) --target production \ - --secret id=git_creds,env=GITHUB_CREDS --build-arg GOPRIVATE=$(GOPRIVATE) \ - --label "git_commit=${COMMIT}" --label "git_tag=$(IMAGE_TAG_NO_NETGO)" \ - -t "$(CONTAINER_REGISTRY)/execution:$(IMAGE_TAG_NO_NETGO)" . - -.PHONY: docker-build-execution-debug -docker-build-execution-debug: - docker build -f cmd/Dockerfile --build-arg TARGET=./cmd/execution --build-arg COMMIT=$(COMMIT) --build-arg VERSION=$(IMAGE_TAG) --build-arg GOARCH=$(GOARCH) --target debug \ - -t "$(CONTAINER_REGISTRY)/execution-debug:latest" -t "$(CONTAINER_REGISTRY)/execution-debug:$(SHORT_COMMIT)" -t "$(CONTAINER_REGISTRY)/execution-debug:$(IMAGE_TAG)" . + -t "$(CONTAINER_REGISTRY)/execution:latest" \ + -t "$(CONTAINER_REGISTRY)/execution:$(IMAGE_TAG)" . + +.PHONY: docker-build-execution-with-adx +docker-build-execution-with-adx: + docker build -f cmd/Dockerfile --build-arg TARGET=./cmd/execution --build-arg COMMIT=$(COMMIT) --build-arg VERSION=$(IMAGE_TAG) --build-arg GOARCH=amd64 --target production \ + --secret id=cadence_deploy_key,env=CADENCE_DEPLOY_KEY --build-arg GOPRIVATE=$(GOPRIVATE) \ + --label "git_commit=${COMMIT}" --label "git_tag=$(IMAGE_TAG)" \ + -t "$(CONTAINER_REGISTRY)/execution:$(IMAGE_TAG)" . + +.PHONY: docker-build-execution-without-adx +docker-build-execution-without-adx: + docker build -f cmd/Dockerfile --build-arg TARGET=./cmd/execution --build-arg COMMIT=$(COMMIT) --build-arg VERSION=$(IMAGE_TAG_NO_ADX) --build-arg GOARCH=amd64 --build-arg CGO_FLAG=$(DISABLE_ADX) --target production \ + --secret id=cadence_deploy_key,env=CADENCE_DEPLOY_KEY --build-arg GOPRIVATE=$(GOPRIVATE) \ + --label "git_commit=${COMMIT}" --label "git_tag=$(IMAGE_TAG_NO_ADX)" \ + -t "$(CONTAINER_REGISTRY)/execution:$(IMAGE_TAG_NO_ADX)" . + +.PHONY: docker-build-execution-without-netgo-without-adx +docker-build-execution-without-netgo-without-adx: + docker build -f cmd/Dockerfile --build-arg TARGET=./cmd/execution --build-arg COMMIT=$(COMMIT) --build-arg VERSION=$(IMAGE_TAG_NO_NETGO_NO_ADX) --build-arg GOARCH=amd64 --build-arg TAGS="" --build-arg CGO_FLAG=$(DISABLE_ADX) --target production \ + --secret id=cadence_deploy_key,env=CADENCE_DEPLOY_KEY --build-arg GOPRIVATE=$(GOPRIVATE) \ + --label "git_commit=${COMMIT}" --label "git_tag=$(IMAGE_TAG_NO_NETGO_NO_ADX)" \ + -t "$(CONTAINER_REGISTRY)/execution:$(IMAGE_TAG_NO_NETGO_NO_ADX)" . + +.PHONY: docker-cross-build-execution-arm +docker-cross-build-execution-arm: + docker build -f cmd/Dockerfile --build-arg TARGET=./cmd/execution --build-arg COMMIT=$(COMMIT) --build-arg VERSION=$(IMAGE_TAG_ARM) --build-arg GOARCH=arm64 --build-arg CC=aarch64-linux-gnu-gcc --target production \ + --secret id=cadence_deploy_key,env=CADENCE_DEPLOY_KEY --build-arg GOPRIVATE=$(GOPRIVATE) \ + --label "git_commit=${COMMIT}" --label "git_tag=${IMAGE_TAG_ARM}" \ + -t "$(CONTAINER_REGISTRY)/execution:$(IMAGE_TAG_ARM)" . + +.PHONY: docker-native-build-execution-debug +docker-native-build-execution-debug: + docker build -f cmd/Dockerfile --build-arg TARGET=./cmd/execution --build-arg COMMIT=$(COMMIT) --build-arg VERSION=$(IMAGE_TAG) --build-arg GOARCH=$(GOARCH) --build-arg CGO_FLAG=$(CRYPTO_FLAG) --target debug \ + -t "$(CONTAINER_REGISTRY)/execution-debug:latest" \ + -t "$(CONTAINER_REGISTRY)/execution-debug:$(IMAGE_TAG)" . # build corrupt execution node for BFT testing -.PHONY: docker-build-execution-corrupt -docker-build-execution-corrupt: +.PHONY: docker-native-build-execution-corrupt +docker-native-build-execution-corrupt: # temporarily make insecure/ a non-module to allow Docker to use corrupt builders there ./insecure/cmd/mods_override.sh - docker build -f cmd/Dockerfile --build-arg TARGET=./insecure/cmd/execution --build-arg COMMIT=$(COMMIT) --build-arg VERSION=$(IMAGE_TAG) --build-arg GOARCH=$(GOARCH) --target production \ + docker build -f cmd/Dockerfile --build-arg TARGET=./insecure/cmd/execution --build-arg COMMIT=$(COMMIT) --build-arg VERSION=$(IMAGE_TAG) --build-arg GOARCH=$(GOARCH) --build-arg CGO_FLAG=$(CRYPTO_FLAG) --target production \ --label "git_commit=${COMMIT}" --label "git_tag=${IMAGE_TAG}" \ - -t "$(CONTAINER_REGISTRY)/execution-corrupted:latest" -t "$(CONTAINER_REGISTRY)/execution-corrupted:$(SHORT_COMMIT)" -t "$(CONTAINER_REGISTRY)/execution-corrupted:$(IMAGE_TAG)" . + --secret id=cadence_deploy_key,env=CADENCE_DEPLOY_KEY --build-arg GOPRIVATE=$(GOPRIVATE) \ + -t "$(CONTAINER_REGISTRY)/execution-corrupted:latest" \ + -t "$(CONTAINER_REGISTRY)/execution-corrupted:$(IMAGE_TAG)" . ./insecure/cmd/mods_restore.sh -.PHONY: docker-build-verification -docker-build-verification: - docker build -f cmd/Dockerfile --build-arg TARGET=./cmd/verification --build-arg COMMIT=$(COMMIT) --build-arg VERSION=$(IMAGE_TAG) --build-arg GOARCH=$(GOARCH) --target production \ - --secret id=git_creds,env=GITHUB_CREDS --build-arg GOPRIVATE=$(GOPRIVATE) \ +.PHONY: docker-native-build-verification +docker-native-build-verification: + docker build -f cmd/Dockerfile --build-arg TARGET=./cmd/verification --build-arg COMMIT=$(COMMIT) --build-arg VERSION=$(IMAGE_TAG) --build-arg GOARCH=$(GOARCH) --build-arg CGO_FLAG=$(CRYPTO_FLAG) --target production \ + --secret id=cadence_deploy_key,env=CADENCE_DEPLOY_KEY --build-arg GOPRIVATE=$(GOPRIVATE) \ --label "git_commit=${COMMIT}" --label "git_tag=${IMAGE_TAG}" \ - -t "$(CONTAINER_REGISTRY)/verification:latest" -t "$(CONTAINER_REGISTRY)/verification:$(SHORT_COMMIT)" -t "$(CONTAINER_REGISTRY)/verification:$(IMAGE_TAG)" -t "$(CONTAINER_REGISTRY)/verification:$(FLOW_GO_TAG)" . - -.PHONY: docker-build-verification-without-netgo -docker-build-verification-without-netgo: - docker build -f cmd/Dockerfile --build-arg TAGS=relic --build-arg TARGET=./cmd/verification --build-arg COMMIT=$(COMMIT) --build-arg VERSION=$(IMAGE_TAG_NO_NETGO) --build-arg GOARCH=$(GOARCH) --target production \ - --secret id=git_creds,env=GITHUB_CREDS --build-arg GOPRIVATE=$(GOPRIVATE) \ - --label "git_commit=${COMMIT}" --label "git_tag=$(IMAGE_TAG_NO_NETGO)" \ - -t "$(CONTAINER_REGISTRY)/verification:$(IMAGE_TAG_NO_NETGO)" . - -.PHONY: docker-build-verification-debug -docker-build-verification-debug: - docker build -f cmd/Dockerfile --build-arg TARGET=./cmd/verification --build-arg COMMIT=$(COMMIT) --build-arg VERSION=$(IMAGE_TAG) --build-arg GOARCH=$(GOARCH) --target debug \ - -t "$(CONTAINER_REGISTRY)/verification-debug:latest" -t "$(CONTAINER_REGISTRY)/verification-debug:$(SHORT_COMMIT)" -t "$(CONTAINER_REGISTRY)/verification-debug:$(IMAGE_TAG)" . + -t "$(CONTAINER_REGISTRY)/verification:latest" \ + -t "$(CONTAINER_REGISTRY)/verification:$(IMAGE_TAG)" . + +.PHONY: docker-build-verification-with-adx +docker-build-verification-with-adx: + docker build -f cmd/Dockerfile --build-arg TARGET=./cmd/verification --build-arg COMMIT=$(COMMIT) --build-arg VERSION=$(IMAGE_TAG) --build-arg GOARCH=amd64 --target production \ + --secret id=cadence_deploy_key,env=CADENCE_DEPLOY_KEY --build-arg GOPRIVATE=$(GOPRIVATE) \ + --label "git_commit=${COMMIT}" --label "git_tag=$(IMAGE_TAG)" \ + -t "$(CONTAINER_REGISTRY)/verification:$(IMAGE_TAG)" . + +.PHONY: docker-build-verification-without-adx +docker-build-verification-without-adx: + docker build -f cmd/Dockerfile --build-arg TARGET=./cmd/verification --build-arg COMMIT=$(COMMIT) --build-arg VERSION=$(IMAGE_TAG_NO_ADX) --build-arg GOARCH=amd64 --build-arg CGO_FLAG=$(DISABLE_ADX) --target production \ + --secret id=cadence_deploy_key,env=CADENCE_DEPLOY_KEY --build-arg GOPRIVATE=$(GOPRIVATE) \ + --label "git_commit=${COMMIT}" --label "git_tag=$(IMAGE_TAG_NO_ADX)" \ + -t "$(CONTAINER_REGISTRY)/verification:$(IMAGE_TAG_NO_ADX)" . + +.PHONY: docker-build-verification-without-netgo-without-adx +docker-build-verification-without-netgo-without-adx: + docker build -f cmd/Dockerfile --build-arg TARGET=./cmd/verification --build-arg COMMIT=$(COMMIT) --build-arg VERSION=$(IMAGE_TAG_NO_NETGO_NO_ADX) --build-arg GOARCH=amd64 --build-arg TAGS="" --build-arg CGO_FLAG=$(DISABLE_ADX) --target production \ + --secret id=cadence_deploy_key,env=CADENCE_DEPLOY_KEY --build-arg GOPRIVATE=$(GOPRIVATE) \ + --label "git_commit=${COMMIT}" --label "git_tag=$(IMAGE_TAG_NO_NETGO_NO_ADX)" \ + -t "$(CONTAINER_REGISTRY)/verification:$(IMAGE_TAG_NO_NETGO_NO_ADX)" . + +.PHONY: docker-cross-build-verification-arm +docker-cross-build-verification-arm: + docker build -f cmd/Dockerfile --build-arg TARGET=./cmd/verification --build-arg COMMIT=$(COMMIT) --build-arg VERSION=$(IMAGE_TAG_ARM) --build-arg GOARCH=arm64 --build-arg CC=aarch64-linux-gnu-gcc --target production \ + --secret id=cadence_deploy_key,env=CADENCE_DEPLOY_KEY --build-arg GOPRIVATE=$(GOPRIVATE) \ + --label "git_commit=${COMMIT}" --label "git_tag=${IMAGE_TAG_ARM}" \ + -t "$(CONTAINER_REGISTRY)/verification:$(IMAGE_TAG_ARM)" . + +.PHONY: docker-native-build-verification-debug +docker-native-build-verification-debug: + docker build -f cmd/Dockerfile --build-arg TARGET=./cmd/verification --build-arg COMMIT=$(COMMIT) --build-arg VERSION=$(IMAGE_TAG) --build-arg GOARCH=$(GOARCH) --build-arg CGO_FLAG=$(CRYPTO_FLAG) --target debug \ + -t "$(CONTAINER_REGISTRY)/verification-debug:latest" \ + -t "$(CONTAINER_REGISTRY)/verification-debug:$(IMAGE_TAG)" . # build corrupt verification node for BFT testing -.PHONY: docker-build-verification-corrupt -docker-build-verification-corrupt: +.PHONY: docker-native-build-verification-corrupt +docker-native-build-verification-corrupt: # temporarily make insecure/ a non-module to allow Docker to use corrupt builders there ./insecure/cmd/mods_override.sh - docker build -f cmd/Dockerfile --build-arg TARGET=./insecure/cmd/verification --build-arg COMMIT=$(COMMIT) --build-arg VERSION=$(IMAGE_TAG) --build-arg GOARCH=$(GOARCH) --target production \ + docker build -f cmd/Dockerfile --build-arg TARGET=./insecure/cmd/verification --build-arg COMMIT=$(COMMIT) --build-arg VERSION=$(IMAGE_TAG) --build-arg GOARCH=$(GOARCH) --build-arg CGO_FLAG=$(CRYPTO_FLAG) --target production \ --label "git_commit=${COMMIT}" --label "git_tag=${IMAGE_TAG}" \ - -t "$(CONTAINER_REGISTRY)/verification-corrupted:latest" -t "$(CONTAINER_REGISTRY)/verification-corrupted:$(SHORT_COMMIT)" -t "$(CONTAINER_REGISTRY)/verification-corrupted:$(IMAGE_TAG)" . + --secret id=cadence_deploy_key,env=CADENCE_DEPLOY_KEY --build-arg GOPRIVATE=$(GOPRIVATE) \ + -t "$(CONTAINER_REGISTRY)/verification-corrupted:latest" \ + -t "$(CONTAINER_REGISTRY)/verification-corrupted:$(IMAGE_TAG)" . ./insecure/cmd/mods_restore.sh -.PHONY: docker-build-access -docker-build-access: - docker build -f cmd/Dockerfile --build-arg TARGET=./cmd/access --build-arg COMMIT=$(COMMIT) --build-arg VERSION=$(IMAGE_TAG) --build-arg GOARCH=$(GOARCH) --target production \ - --secret id=git_creds,env=GITHUB_CREDS --build-arg GOPRIVATE=$(GOPRIVATE) \ +.PHONY: docker-native-build-access +docker-native-build-access: + docker build -f cmd/Dockerfile --build-arg TARGET=./cmd/access --build-arg COMMIT=$(COMMIT) --build-arg VERSION=$(IMAGE_TAG) --build-arg GOARCH=$(GOARCH) --build-arg CGO_FLAG=$(CRYPTO_FLAG) --target production \ --label "git_commit=${COMMIT}" --label "git_tag=${IMAGE_TAG}" \ - -t "$(CONTAINER_REGISTRY)/access:latest" -t "$(CONTAINER_REGISTRY)/access:$(SHORT_COMMIT)" -t "$(CONTAINER_REGISTRY)/access:$(IMAGE_TAG)" -t "$(CONTAINER_REGISTRY)/access:$(FLOW_GO_TAG)" . - -.PHONY: docker-build-access-without-netgo -docker-build-access-without-netgo: - docker build -f cmd/Dockerfile --build-arg TAGS=relic --build-arg TARGET=./cmd/access --build-arg COMMIT=$(COMMIT) --build-arg VERSION=$(IMAGE_TAG_NO_NETGO) --build-arg GOARCH=$(GOARCH) --target production \ - --secret id=git_creds,env=GITHUB_CREDS --build-arg GOPRIVATE=$(GOPRIVATE) \ - --label "git_commit=${COMMIT}" --label "git_tag=$(IMAGE_TAG_NO_NETGO)" \ - -t "$(CONTAINER_REGISTRY)/access:$(IMAGE_TAG_NO_NETGO)" . - -.PHONY: docker-build-access-debug -docker-build-access-debug: - docker build -f cmd/Dockerfile --build-arg TARGET=./cmd/access --build-arg COMMIT=$(COMMIT) --build-arg VERSION=$(IMAGE_TAG) --build-arg GOARCH=$(GOARCH) --target debug \ - -t "$(CONTAINER_REGISTRY)/access-debug:latest" -t "$(CONTAINER_REGISTRY)/access-debug:$(SHORT_COMMIT)" -t "$(CONTAINER_REGISTRY)/access-debug:$(IMAGE_TAG)" . + --secret id=cadence_deploy_key,env=CADENCE_DEPLOY_KEY --build-arg GOPRIVATE=$(GOPRIVATE) \ + -t "$(CONTAINER_REGISTRY)/access:latest" \ + -t "$(CONTAINER_REGISTRY)/access:$(IMAGE_TAG)" . + +.PHONY: docker-build-access-with-adx +docker-build-access-with-adx: + docker build -f cmd/Dockerfile --build-arg TARGET=./cmd/access --build-arg COMMIT=$(COMMIT) --build-arg VERSION=$(IMAGE_TAG) --build-arg GOARCH=amd64 --target production \ + --secret id=cadence_deploy_key,env=CADENCE_DEPLOY_KEY --build-arg GOPRIVATE=$(GOPRIVATE) \ + --label "git_commit=${COMMIT}" --label "git_tag=$(IMAGE_TAG)" \ + -t "$(CONTAINER_REGISTRY)/access:$(IMAGE_TAG)" . + +.PHONY: docker-build-access-without-adx +docker-build-access-without-adx: + docker build -f cmd/Dockerfile --build-arg TARGET=./cmd/access --build-arg COMMIT=$(COMMIT) --build-arg VERSION=$(IMAGE_TAG_NO_ADX) --build-arg GOARCH=amd64 --build-arg CGO_FLAG=$(DISABLE_ADX) --target production \ + --secret id=cadence_deploy_key,env=CADENCE_DEPLOY_KEY --build-arg GOPRIVATE=$(GOPRIVATE) \ + --label "git_commit=${COMMIT}" --label "git_tag=$(IMAGE_TAG_NO_ADX)" \ + -t "$(CONTAINER_REGISTRY)/access:$(IMAGE_TAG_NO_ADX)" . + +.PHONY: docker-build-access-without-netgo-without-adx +docker-build-access-without-netgo-without-adx: + docker build -f cmd/Dockerfile --build-arg TARGET=./cmd/access --build-arg COMMIT=$(COMMIT) --build-arg VERSION=$(IMAGE_TAG_NO_NETGO_NO_ADX) --build-arg GOARCH=amd64 --build-arg TAGS="" --build-arg CGO_FLAG=$(DISABLE_ADX) --target production \ + --secret id=cadence_deploy_key,env=CADENCE_DEPLOY_KEY --build-arg GOPRIVATE=$(GOPRIVATE) \ + --label "git_commit=${COMMIT}" --label "git_tag=$(IMAGE_TAG_NO_NETGO_NO_ADX)" \ + -t "$(CONTAINER_REGISTRY)/access:$(IMAGE_TAG_NO_NETGO_NO_ADX)" . + +.PHONY: docker-cross-build-access-arm +docker-cross-build-access-arm: + docker build -f cmd/Dockerfile --build-arg TARGET=./cmd/access --build-arg COMMIT=$(COMMIT) --build-arg VERSION=$(IMAGE_TAG_ARM) --build-arg GOARCH=arm64 --build-arg CC=aarch64-linux-gnu-gcc --target production \ + --secret id=cadence_deploy_key,env=CADENCE_DEPLOY_KEY --build-arg GOPRIVATE=$(GOPRIVATE) \ + --label "git_commit=${COMMIT}" --label "git_tag=${IMAGE_TAG_ARM}" \ + -t "$(CONTAINER_REGISTRY)/access:$(IMAGE_TAG_ARM)" . + + +.PHONY: docker-native-build-access-debug +docker-native-build-access-debug: + docker build -f cmd/Dockerfile --build-arg TARGET=./cmd/access --build-arg COMMIT=$(COMMIT) --build-arg VERSION=$(IMAGE_TAG) --build-arg GOARCH=$(GOARCH) --build-arg CGO_FLAG=$(CRYPTO_FLAG) --target debug \ + --secret id=cadence_deploy_key,env=CADENCE_DEPLOY_KEY --build-arg GOPRIVATE=$(GOPRIVATE) \ + -t "$(CONTAINER_REGISTRY)/access-debug:latest" \ + -t "$(CONTAINER_REGISTRY)/access-debug:$(IMAGE_TAG)" . # build corrupt access node for BFT testing -.PHONY: docker-build-access-corrupt -docker-build-access-corrupt: +.PHONY: docker-native-build-access-corrupt +docker-native-build-access-corrupt: #temporarily make insecure/ a non-module to allow Docker to use corrupt builders there ./insecure/cmd/mods_override.sh - docker build -f cmd/Dockerfile --build-arg TARGET=./insecure/cmd/access --build-arg COMMIT=$(COMMIT) --build-arg VERSION=$(IMAGE_TAG) --build-arg GOARCH=$(GOARCH) --target production \ + docker build -f cmd/Dockerfile --build-arg TARGET=./insecure/cmd/access --build-arg COMMIT=$(COMMIT) --build-arg VERSION=$(IMAGE_TAG) --build-arg GOARCH=$(GOARCH) --build-arg CGO_FLAG=$(CRYPTO_FLAG) --target production \ + --secret id=cadence_deploy_key,env=CADENCE_DEPLOY_KEY --build-arg GOPRIVATE=$(GOPRIVATE) \ --label "git_commit=${COMMIT}" --label "git_tag=${IMAGE_TAG}" \ - -t "$(CONTAINER_REGISTRY)/access-corrupted:latest" -t "$(CONTAINER_REGISTRY)/access-corrupted:$(SHORT_COMMIT)" -t "$(CONTAINER_REGISTRY)/access-corrupted:$(IMAGE_TAG)" . + -t "$(CONTAINER_REGISTRY)/access-corrupted:latest" \ + -t "$(CONTAINER_REGISTRY)/access-corrupted:$(IMAGE_TAG)" . ./insecure/cmd/mods_restore.sh -.PHONY: docker-build-observer -docker-build-observer: - docker build -f cmd/Dockerfile --build-arg TARGET=./cmd/observer --build-arg COMMIT=$(COMMIT) --build-arg VERSION=$(IMAGE_TAG) --build-arg GOARCH=$(GOARCH) --target production \ - --secret id=git_creds,env=GITHUB_CREDS --build-arg GOPRIVATE=$(GOPRIVATE) \ +# build a binary to run on bare metal without using docker. +# binary is written to file ./bin/app +.PHONY: docker-native-build-access-binary +docker-native-build-access-binary: docker-native-build-access + docker create --name extract "$(CONTAINER_REGISTRY)/access:latest" + docker cp extract:/bin/app ./flow_access_node + docker rm extract + +.PHONY: docker-native-build-observer +docker-native-build-observer: + docker build -f cmd/Dockerfile --build-arg TARGET=./cmd/observer --build-arg COMMIT=$(COMMIT) --build-arg VERSION=$(IMAGE_TAG) --build-arg GOARCH=$(GOARCH) --build-arg CGO_FLAG=$(CRYPTO_FLAG) --target production \ + --secret id=cadence_deploy_key,env=CADENCE_DEPLOY_KEY --build-arg GOPRIVATE=$(GOPRIVATE) \ --label "git_commit=${COMMIT}" --label "git_tag=${IMAGE_TAG}" \ - -t "$(CONTAINER_REGISTRY)/observer:latest" -t "$(CONTAINER_REGISTRY)/observer:$(SHORT_COMMIT)" -t "$(CONTAINER_REGISTRY)/observer:$(IMAGE_TAG)" . - -.PHONY: docker-build-observer-without-netgo -docker-build-observer-without-netgo: - docker build -f cmd/Dockerfile --build-arg TAGS=relic --build-arg TARGET=./cmd/observer --build-arg COMMIT=$(COMMIT) --build-arg VERSION=$(IMAGE_TAG_NO_NETGO) --build-arg GOARCH=$(GOARCH) --target production \ - --secret id=git_creds,env=GITHUB_CREDS --build-arg GOPRIVATE=$(GOPRIVATE) \ - --label "git_commit=${COMMIT}" --label "git_tag=$(IMAGE_TAG_NO_NETGO)" \ - -t "$(CONTAINER_REGISTRY)/observer:$(IMAGE_TAG_NO_NETGO)" . - - -.PHONY: docker-build-ghost -docker-build-ghost: - docker build -f cmd/Dockerfile --build-arg TARGET=./cmd/ghost --build-arg COMMIT=$(COMMIT) --build-arg VERSION=$(IMAGE_TAG) --build-arg GOARCH=$(GOARCH) --target production \ + -t "$(CONTAINER_REGISTRY)/observer:latest" \ + -t "$(CONTAINER_REGISTRY)/observer:$(IMAGE_TAG)" . + +.PHONY: docker-build-observer-with-adx +docker-build-observer-with-adx: + docker build -f cmd/Dockerfile --build-arg TARGET=./cmd/observer --build-arg COMMIT=$(COMMIT) --build-arg VERSION=$(IMAGE_TAG) --build-arg GOARCH=amd64 --target production \ + --secret id=cadence_deploy_key,env=CADENCE_DEPLOY_KEY --build-arg GOPRIVATE=$(GOPRIVATE) \ + --label "git_commit=${COMMIT}" --label "git_tag=$(IMAGE_TAG)" \ + -t "$(CONTAINER_REGISTRY)/observer:$(IMAGE_TAG)" . + +.PHONY: docker-build-observer-without-adx +docker-build-observer-without-adx: + docker build -f cmd/Dockerfile --build-arg TARGET=./cmd/observer --build-arg COMMIT=$(COMMIT) --build-arg VERSION=$(IMAGE_TAG_NO_ADX) --build-arg GOARCH=amd64 --build-arg CGO_FLAG=$(DISABLE_ADX) --target production \ + --secret id=cadence_deploy_key,env=CADENCE_DEPLOY_KEY --build-arg GOPRIVATE=$(GOPRIVATE) \ + --label "git_commit=${COMMIT}" --label "git_tag=$(IMAGE_TAG_NO_ADX)" \ + -t "$(CONTAINER_REGISTRY)/observer:$(IMAGE_TAG_NO_ADX)" . + +.PHONY: docker-build-observer-without-netgo-without-adx +docker-build-observer-without-netgo-without-adx: + docker build -f cmd/Dockerfile --build-arg TARGET=./cmd/observer --build-arg COMMIT=$(COMMIT) --build-arg VERSION=$(IMAGE_TAG_NO_NETGO_NO_ADX) --build-arg GOARCH=amd64 --build-arg TAGS="" --build-arg CGO_FLAG=$(DISABLE_ADX) --target production \ + --secret id=cadence_deploy_key,env=CADENCE_DEPLOY_KEY --build-arg GOPRIVATE=$(GOPRIVATE) \ + --label "git_commit=${COMMIT}" --label "git_tag=$(IMAGE_TAG_NO_NETGO_NO_ADX)" \ + -t "$(CONTAINER_REGISTRY)/observer:$(IMAGE_TAG_NO_NETGO_NO_ADX)" . + +.PHONY: docker-cross-build-observer-arm +docker-cross-build-observer-arm: + docker build -f cmd/Dockerfile --build-arg TARGET=./cmd/observer --build-arg COMMIT=$(COMMIT) --build-arg VERSION=$(IMAGE_TAG_ARM) --build-arg GOARCH=arm64 --build-arg CC=aarch64-linux-gnu-gcc --target production \ + --secret id=cadence_deploy_key,env=CADENCE_DEPLOY_KEY --build-arg GOPRIVATE=$(GOPRIVATE) \ + --label "git_commit=${COMMIT}" --label "git_tag=${IMAGE_TAG_ARM}" \ + -t "$(CONTAINER_REGISTRY)/observer:$(IMAGE_TAG_ARM)" . + + +.PHONY: docker-native-build-ghost +docker-native-build-ghost: + docker build -f cmd/Dockerfile --build-arg TARGET=./cmd/ghost --build-arg COMMIT=$(COMMIT) --build-arg VERSION=$(IMAGE_TAG) --build-arg GOARCH=$(GOARCH) --build-arg CGO_FLAG=$(CRYPTO_FLAG) --target production \ --label "git_commit=${COMMIT}" --label "git_tag=${IMAGE_TAG}" \ - -t "$(CONTAINER_REGISTRY)/ghost:latest" -t "$(CONTAINER_REGISTRY)/ghost:$(SHORT_COMMIT)" -t "$(CONTAINER_REGISTRY)/ghost:$(IMAGE_TAG)" . + --secret id=cadence_deploy_key,env=CADENCE_DEPLOY_KEY --build-arg GOPRIVATE=$(GOPRIVATE) \ + -t "$(CONTAINER_REGISTRY)/ghost:latest" \ + -t "$(CONTAINER_REGISTRY)/ghost:$(IMAGE_TAG)" . -.PHONY: docker-build-ghost-debug -docker-build-ghost-debug: - docker build -f cmd/Dockerfile --build-arg TARGET=./cmd/ghost --build-arg COMMIT=$(COMMIT) --build-arg VERSION=$(IMAGE_TAG) --build-arg GOARCH=$(GOARCH) --target debug \ - -t "$(CONTAINER_REGISTRY)/ghost-debug:latest" -t "$(CONTAINER_REGISTRY)/ghost-debug:$(SHORT_COMMIT)" -t "$(CONTAINER_REGISTRY)/ghost-debug:$(IMAGE_TAG)" . +.PHONY: docker-native-build-ghost-debug +docker-native-build-ghost-debug: + docker build -f cmd/Dockerfile --build-arg TARGET=./cmd/ghost --build-arg COMMIT=$(COMMIT) --build-arg VERSION=$(IMAGE_TAG) --build-arg GOARCH=$(GOARCH) --build-arg CGO_FLAG=$(CRYPTO_FLAG) --target debug \ + -t "$(CONTAINER_REGISTRY)/ghost-debug:latest" \ + -t "$(CONTAINER_REGISTRY)/ghost-debug:$(IMAGE_TAG)" . PHONY: docker-build-bootstrap docker-build-bootstrap: - docker build -f cmd/Dockerfile --build-arg TARGET=./cmd/bootstrap --build-arg GOARCH=$(GOARCH) --target production \ + docker build -f cmd/Dockerfile --build-arg TARGET=./cmd/bootstrap --build-arg GOARCH=$(GOARCH) --build-arg VERSION=$(IMAGE_TAG) --build-arg CGO_FLAG=$(CRYPTO_FLAG) --target production \ + --secret id=cadence_deploy_key,env=CADENCE_DEPLOY_KEY \ --label "git_commit=${COMMIT}" --label "git_tag=${IMAGE_TAG}" \ - -t "$(CONTAINER_REGISTRY)/bootstrap:latest" -t "$(CONTAINER_REGISTRY)/bootstrap:$(SHORT_COMMIT)" -t "$(CONTAINER_REGISTRY)/bootstrap:$(IMAGE_TAG)" . + -t "$(CONTAINER_REGISTRY)/bootstrap:latest" \ + -t "$(CONTAINER_REGISTRY)/bootstrap:$(IMAGE_TAG)" . PHONY: tool-bootstrap tool-bootstrap: docker-build-bootstrap @@ -413,127 +574,174 @@ tool-bootstrap: docker-build-bootstrap .PHONY: docker-build-bootstrap-transit docker-build-bootstrap-transit: - docker build -f cmd/Dockerfile --build-arg TARGET=./cmd/bootstrap/transit --build-arg COMMIT=$(COMMIT) --build-arg VERSION=$(VERSION) --build-arg GOARCH=$(GOARCH) --no-cache \ + docker build -f cmd/Dockerfile --build-arg TARGET=./cmd/bootstrap/transit --build-arg COMMIT=$(COMMIT) --build-arg VERSION=$(VERSION) --build-arg GOARCH=$(GOARCH) --build-arg CGO_FLAG=$(CRYPTO_FLAG) --no-cache \ --target production \ - -t "$(CONTAINER_REGISTRY)/bootstrap-transit:latest" -t "$(CONTAINER_REGISTRY)/bootstrap-transit:$(SHORT_COMMIT)" -t "$(CONTAINER_REGISTRY)/bootstrap-transit:$(IMAGE_TAG)" . + --secret id=cadence_deploy_key,env=CADENCE_DEPLOY_KEY \ + -t "$(CONTAINER_REGISTRY)/bootstrap-transit:latest" \ + -t "$(CONTAINER_REGISTRY)/bootstrap-transit:$(IMAGE_TAG)" . PHONY: tool-transit tool-transit: docker-build-bootstrap-transit docker container create --name transit $(CONTAINER_REGISTRY)/bootstrap-transit:latest;docker container cp transit:/bin/app ./transit;docker container rm transit -.PHONY: docker-build-loader -docker-build-loader: - docker build -f ./integration/benchmark/cmd/manual/Dockerfile --build-arg TARGET=./benchmark/cmd/manual --target production \ +.PHONY: docker-native-build-loader +docker-native-build-loader: + docker build -f ./integration/benchmark/cmd/manual/Dockerfile --build-arg TARGET=./benchmark/cmd/manual --build-arg CGO_FLAG=$(CRYPTO_FLAG) --target production \ --label "git_commit=${COMMIT}" --label "git_tag=${IMAGE_TAG}" \ - -t "$(CONTAINER_REGISTRY)/loader:latest" -t "$(CONTAINER_REGISTRY)/loader:$(SHORT_COMMIT)" -t "$(CONTAINER_REGISTRY)/loader:$(IMAGE_TAG)" . + -t "$(CONTAINER_REGISTRY)/loader:latest" \ + -t "$(CONTAINER_REGISTRY)/loader:$(IMAGE_TAG)" . + +.PHONY: docker-native-build-flow +docker-native-build-flow: docker-native-build-collection docker-native-build-consensus docker-native-build-execution docker-native-build-verification docker-native-build-access docker-native-build-observer docker-native-build-ghost + +.PHONY: docker-build-flow-with-adx +docker-build-flow-with-adx: docker-build-collection-with-adx docker-build-consensus-with-adx docker-build-execution-with-adx docker-build-verification-with-adx docker-build-access-with-adx docker-build-observer-with-adx + +.PHONY: docker-build-flow-without-adx +docker-build-flow-without-adx: docker-build-collection-without-adx docker-build-consensus-without-adx docker-build-execution-without-adx docker-build-verification-without-adx docker-build-access-without-adx docker-build-observer-without-adx -.PHONY: docker-build-flow -docker-build-flow: docker-build-collection docker-build-consensus docker-build-execution docker-build-verification docker-build-access docker-build-observer docker-build-ghost +.PHONY: docker-build-flow-without-netgo-without-adx +docker-build-flow-without-netgo-without-adx: docker-build-collection-without-netgo-without-adx docker-build-consensus-without-netgo-without-adx docker-build-execution-without-netgo-without-adx docker-build-verification-without-netgo-without-adx docker-build-access-without-netgo-without-adx docker-build-observer-without-netgo-without-adx -.PHONY: docker-build-flow-without-netgo -docker-build-flow-without-netgo: docker-build-collection-without-netgo docker-build-consensus-without-netgo docker-build-execution-without-netgo docker-build-verification-without-netgo docker-build-access-without-netgo docker-build-observer-without-netgo +# in this target, images are arm64 (aarch64), are build with `netgo` and with `adx`. +# other arm64 images can be built without `netgo` or without `adx` +.PHONY: docker-cross-build-flow-arm +docker-cross-build-flow-arm: docker-cross-build-collection-arm docker-cross-build-consensus-arm docker-cross-build-execution-arm docker-cross-build-verification-arm docker-cross-build-access-arm docker-cross-build-observer-arm -.PHONY: docker-build-flow-corrupt -docker-build-flow-corrupt: docker-build-execution-corrupt docker-build-verification-corrupt docker-build-access-corrupt +.PHONY: docker-native-build-flow-corrupt +docker-native-build-flow-corrupt: docker-native-build-execution-corrupt docker-native-build-verification-corrupt docker-native-build-access-corrupt -.PHONY: docker-build-benchnet -docker-build-benchnet: docker-build-flow docker-build-loader +.PHONY: docker-native-build-benchnet +docker-native-build-benchnet: docker-native-build-flow docker-native-build-loader -.PHONY: docker-push-collection -docker-push-collection: - docker push "$(CONTAINER_REGISTRY)/collection:$(SHORT_COMMIT)" +.PHONY: docker-push-collection-with-adx +docker-push-collection-with-adx: docker push "$(CONTAINER_REGISTRY)/collection:$(IMAGE_TAG)" - docker push "$(CONTAINER_REGISTRY)/collection:$(FLOW_GO_TAG)" -.PHONY: docker-push-collection-without-netgo -docker-push-collection-without-netgo: - docker push "$(CONTAINER_REGISTRY)/collection:$(IMAGE_TAG_NO_NETGO)" +.PHONY: docker-push-collection-without-adx +docker-push-collection-without-adx: + docker push "$(CONTAINER_REGISTRY)/collection:$(IMAGE_TAG_NO_ADX)" + +.PHONY: docker-push-collection-without-netgo-without-adx +docker-push-collection-without-netgo-without-adx: + docker push "$(CONTAINER_REGISTRY)/collection:$(IMAGE_TAG_NO_NETGO_NO_ADX)" + +.PHONY: docker-push-collection-arm +docker-push-collection-arm: + docker push "$(CONTAINER_REGISTRY)/collection:$(IMAGE_TAG_ARM)" .PHONY: docker-push-collection-latest docker-push-collection-latest: docker-push-collection docker push "$(CONTAINER_REGISTRY)/collection:latest" -.PHONY: docker-push-consensus -docker-push-consensus: - docker push "$(CONTAINER_REGISTRY)/consensus:$(SHORT_COMMIT)" +.PHONY: docker-push-consensus-with-adx +docker-push-consensus-with-adx: docker push "$(CONTAINER_REGISTRY)/consensus:$(IMAGE_TAG)" - docker push "$(CONTAINER_REGISTRY)/consensus:$(FLOW_GO_TAG)" -.PHONY: docker-push-consensus-without-netgo -docker-push-consensus-without-netgo: - docker push "$(CONTAINER_REGISTRY)/consensus:$(IMAGE_TAG_NO_NETGO)" +.PHONY: docker-push-consensus-without-adx +docker-push-consensus-without-adx: + docker push "$(CONTAINER_REGISTRY)/consensus:$(IMAGE_TAG_NO_ADX)" + +.PHONY: docker-push-consensus-without-netgo-without-adx +docker-push-consensus-without-netgo-without-adx: + docker push "$(CONTAINER_REGISTRY)/consensus:$(IMAGE_TAG_NO_NETGO_NO_ADX)" + +.PHONY: docker-push-consensus-arm +docker-push-consensus-arm: + docker push "$(CONTAINER_REGISTRY)/consensus:$(IMAGE_TAG_ARM)" .PHONY: docker-push-consensus-latest docker-push-consensus-latest: docker-push-consensus docker push "$(CONTAINER_REGISTRY)/consensus:latest" -.PHONY: docker-push-execution -docker-push-execution: - docker push "$(CONTAINER_REGISTRY)/execution:$(SHORT_COMMIT)" +.PHONY: docker-push-execution-with-adx +docker-push-execution-with-adx: docker push "$(CONTAINER_REGISTRY)/execution:$(IMAGE_TAG)" - docker push "$(CONTAINER_REGISTRY)/execution:$(FLOW_GO_TAG)" .PHONY: docker-push-execution-corrupt docker-push-execution-corrupt: - docker push "$(CONTAINER_REGISTRY)/execution-corrupted:$(SHORT_COMMIT)" docker push "$(CONTAINER_REGISTRY)/execution-corrupted:$(IMAGE_TAG)" +.PHONY: docker-push-execution-without-adx +docker-push-execution-without-adx: + docker push "$(CONTAINER_REGISTRY)/execution:$(IMAGE_TAG_NO_ADX)" + +.PHONY: docker-push-execution-without-netgo-without-adx +docker-push-execution-without-netgo-without-adx: + docker push "$(CONTAINER_REGISTRY)/execution:$(IMAGE_TAG_NO_NETGO_NO_ADX)" -.PHONY: docker-push-execution-without-netgo -docker-push-execution-without-netgo: - docker push "$(CONTAINER_REGISTRY)/execution:$(IMAGE_TAG_NO_NETGO)" +.PHONY: docker-push-execution-arm +docker-push-execution-arm: + docker push "$(CONTAINER_REGISTRY)/execution:$(IMAGE_TAG_ARM)" .PHONY: docker-push-execution-latest docker-push-execution-latest: docker-push-execution docker push "$(CONTAINER_REGISTRY)/execution:latest" -.PHONY: docker-push-verification -docker-push-verification: - docker push "$(CONTAINER_REGISTRY)/verification:$(SHORT_COMMIT)" +.PHONY: docker-push-verification-with-adx +docker-push-verification-with-adx: docker push "$(CONTAINER_REGISTRY)/verification:$(IMAGE_TAG)" - docker push "$(CONTAINER_REGISTRY)/verification:$(FLOW_GO_TAG)" + +.PHONY: docker-push-verification-without-adx +docker-push-verification-without-adx: + docker push "$(CONTAINER_REGISTRY)/verification:$(IMAGE_TAG_NO_ADX)" .PHONY: docker-push-verification-corrupt docker-push-verification-corrupt: - docker push "$(CONTAINER_REGISTRY)/verification-corrupted:$(SHORT_COMMIT)" docker push "$(CONTAINER_REGISTRY)/verification-corrupted:$(IMAGE_TAG)" -.PHONY: docker-push-verification-without-netgo -docker-push-verification-without-netgo: - docker push "$(CONTAINER_REGISTRY)/verification:$(IMAGE_TAG_NO_NETGO)" +.PHONY: docker-push-verification-without-netgo-without-adx +docker-push-verification-without-netgo-without-adx: + docker push "$(CONTAINER_REGISTRY)/verification:$(IMAGE_TAG_NO_NETGO_NO_ADX)" + +.PHONY: docker-push-verification-arm +docker-push-verification-arm: + docker push "$(CONTAINER_REGISTRY)/verification:$(IMAGE_TAG_ARM)" .PHONY: docker-push-verification-latest docker-push-verification-latest: docker-push-verification docker push "$(CONTAINER_REGISTRY)/verification:latest" -.PHONY: docker-push-access -docker-push-access: - docker push "$(CONTAINER_REGISTRY)/access:$(SHORT_COMMIT)" +.PHONY: docker-push-access-with-adx +docker-push-access-with-adx: docker push "$(CONTAINER_REGISTRY)/access:$(IMAGE_TAG)" - docker push "$(CONTAINER_REGISTRY)/access:$(FLOW_GO_TAG)" + +.PHONY: docker-push-access-without-adx +docker-push-access-without-adx: + docker push "$(CONTAINER_REGISTRY)/access:$(IMAGE_TAG_NO_ADX)" .PHONY: docker-push-access-corrupt docker-push-access-corrupt: - docker push "$(CONTAINER_REGISTRY)/access-corrupted:$(SHORT_COMMIT)" docker push "$(CONTAINER_REGISTRY)/access-corrupted:$(IMAGE_TAG)" -.PHONY: docker-push-access-without-netgo -docker-push-access-without-netgo: - docker push "$(CONTAINER_REGISTRY)/access:$(IMAGE_TAG_NO_NETGO)" +.PHONY: docker-push-access-without-netgo-without-adx +docker-push-access-without-netgo-without-adx: + docker push "$(CONTAINER_REGISTRY)/access:$(IMAGE_TAG_NO_NETGO_NO_ADX)" + +.PHONY: docker-push-access-arm +docker-push-access-arm: + docker push "$(CONTAINER_REGISTRY)/access:$(IMAGE_TAG_ARM)" .PHONY: docker-push-access-latest docker-push-access-latest: docker-push-access docker push "$(CONTAINER_REGISTRY)/access:latest" - -.PHONY: docker-push-observer -docker-push-observer: - docker push "$(CONTAINER_REGISTRY)/observer:$(SHORT_COMMIT)" + +.PHONY: docker-push-observer-with-adx +docker-push-observer-with-adx: docker push "$(CONTAINER_REGISTRY)/observer:$(IMAGE_TAG)" -.PHONY: docker-push-observer-without-netgo -docker-push-observer-without-netgo: - docker push "$(CONTAINER_REGISTRY)/observer:$(IMAGE_TAG_NO_NETGO)" +.PHONY: docker-push-observer-without-adx +docker-push-observer-without-adx: + docker push "$(CONTAINER_REGISTRY)/observer:$(IMAGE_TAG_NO_ADX)" + +.PHONY: docker-push-observer-without-netgo-without-adx +docker-push-observer-without-netgo-without-adx: + docker push "$(CONTAINER_REGISTRY)/observer:$(IMAGE_TAG_NO_NETGO_NO_ADX)" + +.PHONY: docker-push-observer-arm +docker-push-observer-arm: + docker push "$(CONTAINER_REGISTRY)/observer:$(IMAGE_TAG_ARM)" .PHONY: docker-push-observer-latest docker-push-observer-latest: docker-push-observer @@ -541,7 +749,6 @@ docker-push-observer-latest: docker-push-observer .PHONY: docker-push-ghost docker-push-ghost: - docker push "$(CONTAINER_REGISTRY)/ghost:$(SHORT_COMMIT)" docker push "$(CONTAINER_REGISTRY)/ghost:$(IMAGE_TAG)" .PHONY: docker-push-ghost-latest @@ -550,18 +757,23 @@ docker-push-ghost-latest: docker-push-ghost .PHONY: docker-push-loader docker-push-loader: - docker push "$(CONTAINER_REGISTRY)/loader:$(SHORT_COMMIT)" docker push "$(CONTAINER_REGISTRY)/loader:$(IMAGE_TAG)" .PHONY: docker-push-loader-latest docker-push-loader-latest: docker-push-loader docker push "$(CONTAINER_REGISTRY)/loader:latest" -.PHONY: docker-push-flow -docker-push-flow: docker-push-collection docker-push-consensus docker-push-execution docker-push-verification docker-push-access docker-push-observer +.PHONY: docker-push-flow-with-adx +docker-push-flow-with-adx: docker-push-collection-with-adx docker-push-consensus-with-adx docker-push-execution-with-adx docker-push-verification-with-adx docker-push-access-with-adx docker-push-observer-with-adx + +.PHONY: docker-push-flow-without-adx +docker-push-flow-without-adx: docker-push-collection-without-adx docker-push-consensus-without-adx docker-push-execution-without-adx docker-push-verification-without-adx docker-push-access-without-adx docker-push-observer-without-adx + +.PHONY: docker-push-flow-without-netgo-without-adx +docker-push-flow-without-netgo-without-adx: docker-push-collection-without-netgo-without-adx docker-push-consensus-without-netgo-without-adx docker-push-execution-without-netgo-without-adx docker-push-verification-without-netgo-without-adx docker-push-access-without-netgo-without-adx docker-push-observer-without-netgo-without-adx -.PHONY: docker-push-flow-without-netgo -docker-push-flow-without-netgo: docker-push-collection-without-netgo docker-push-consensus-without-netgo docker-push-execution-without-netgo docker-push-verification-without-netgo docker-push-access-without-netgo docker-push-observer-without-netgo +.PHONY: docker-push-flow-arm +docker-push-flow-arm: docker-push-collection-arm docker-push-consensus-arm docker-push-execution-arm docker-push-verification-arm docker-push-access-arm docker-push-observer-arm .PHONY: docker-push-flow-latest docker-push-flow-latest: docker-push-collection-latest docker-push-consensus-latest docker-push-execution-latest docker-push-verification-latest docker-push-access-latest docker-push-observer-latest @@ -608,8 +820,10 @@ docker-all-tools: tool-util tool-remove-execution-fork PHONY: docker-build-util docker-build-util: - docker build -f cmd/Dockerfile --build-arg TARGET=./cmd/util --build-arg GOARCH=$(GOARCH) --target production \ - -t "$(CONTAINER_REGISTRY)/util:latest" -t "$(CONTAINER_REGISTRY)/util:$(SHORT_COMMIT)" -t "$(CONTAINER_REGISTRY)/util:$(IMAGE_TAG)" . + docker build -f cmd/Dockerfile --build-arg TARGET=./cmd/util --build-arg GOARCH=$(GOARCH) --build-arg VERSION=$(IMAGE_TAG) --build-arg CGO_FLAG=$(DISABLE_ADX) --target production \ + --secret id=cadence_deploy_key,env=CADENCE_DEPLOY_KEY --build-arg GOPRIVATE=$(GOPRIVATE) \ + -t "$(CONTAINER_REGISTRY)/util:latest" \ + -t "$(CONTAINER_REGISTRY)/util:$(IMAGE_TAG)" . PHONY: tool-util tool-util: docker-build-util @@ -617,8 +831,9 @@ tool-util: docker-build-util PHONY: docker-build-remove-execution-fork docker-build-remove-execution-fork: - docker build -f cmd/Dockerfile --ssh default --build-arg TARGET=./cmd/util/cmd/remove-execution-fork --build-arg GOARCH=$(GOARCH) --target production \ - -t "$(CONTAINER_REGISTRY)/remove-execution-fork:latest" -t "$(CONTAINER_REGISTRY)/remove-execution-fork:$(SHORT_COMMIT)" -t "$(CONTAINER_REGISTRY)/remove-execution-fork:$(IMAGE_TAG)" . + docker build -f cmd/Dockerfile --ssh default --build-arg TARGET=./cmd/util/cmd/remove-execution-fork --build-arg GOARCH=$(GOARCH) --build-arg VERSION=$(IMAGE_TAG) --build-arg CGO_FLAG=$(CRYPTO_FLAG) --target production \ + -t "$(CONTAINER_REGISTRY)/remove-execution-fork:latest" \ + -t "$(CONTAINER_REGISTRY)/remove-execution-fork:$(IMAGE_TAG)" . PHONY: tool-remove-execution-fork tool-remove-execution-fork: docker-build-remove-execution-fork diff --git a/NOTICE b/NOTICE index e0890e1be36..0c334d1c385 100644 --- a/NOTICE +++ b/NOTICE @@ -1,5 +1,5 @@ Flow-go -Copyright 2019-2020 Dapper Labs, Inc. +Copyright 2019-2024 Flow Foundation. -This product includes software developed at Dapper Labs, Inc. (https://www.dapperlabs.com/). +This product includes software developed at Flow Foundation (https://flow.com/flow-foundation). diff --git a/README.md b/README.md index 39bd7a13e3e..333cbfc7bd2 100644 --- a/README.md +++ b/README.md @@ -30,7 +30,7 @@ digital assets that power them. Read more about it [here](https://github.com/onf ## Documentation -You can find an overview of the Flow architecture on the [documentation website](https://www.onflow.org/primer). +You can find an overview of the Flow architecture on the [documentation website](https://www.flow.com/primer). Development on Flow is divided into work streams. Each work stream has a home directory containing high-level documentation for the stream, as well as links to documentation for relevant components used by that work stream. @@ -54,8 +54,7 @@ The following table lists all work streams and links to their home directory and ## Installation - Clone this repository -- Install [Go](https://golang.org/doc/install) (Flow supports Go 1.18 and later) -- Install [CMake](https://cmake.org/install/), which is used for building the crypto library +- Install [Go](https://golang.org/doc/install) (Flow requires Go 1.25 and later) - Install [Docker](https://docs.docker.com/get-docker/), which is used for running a local network and integration tests - Make sure the [`GOPATH`](https://golang.org/cmd/go/#hdr-GOPATH_environment_variable) and `GOBIN` environment variables are set, and `GOBIN` is added to your path: @@ -75,12 +74,6 @@ The following table lists all work streams and links to their home directory and At this point, you should be ready to build, test, and run Flow! 🎉 -Note: Whenever the crypto module version imported by "go.mod" is updated to a version that was never locally imported before, the crypto dependency needs to be set-up. If not, you should notice errors about "relic" or "crypto". Run the following command to set-up the new module version: - -```bash -make crypto_setup_gopath -``` - ## Development Workflow ### Testing @@ -107,14 +100,29 @@ The recommended way to build and run Flow for local development is using Docker. Build a Docker image for all nodes: ```bash -make docker-build-flow +make docker-native-build-flow ``` Build a Docker image for a particular node role (replace `$ROLE` with `collection`, `consensus`, etc.): ```bash -make docker-build-$ROLE +make docker-native-build-$ROLE +``` + +#### Building a binary for the access node + +Build the binary for an access node that can be run directly on the machine without using Docker. + +```bash +make docker-native-build-access-binary ``` +_this builds a binary for Linux/x86_64 machine_. + +The make command will generate a binary called `flow_access_node` + +### Importing the module + +When importing the `github.com/onflow/flow-go` module in your Go project, testing or building your project may require setting extra Go flags because the module requires [cgo](https://pkg.go.dev/cmd/cgo). In particular, `CGO_ENABLED` must be set to `1` if `cgo` isn't enabled by default. This constraint comes from the underlying cryptography library. Refer to the [crypto repository build](https://github.com/onflow/crypto?tab=readme-ov-file#build) for more details. ### Local Network @@ -148,3 +156,39 @@ Generate mocks used for unit tests: ```bash make generate-mocks ``` + +### Mocks + +We use `github.com/vektra/mockery` for mocking interfaces within tests. The configuration is in `.mockery.yaml`. + +#### Adding and updating packages + +You can add new packages by their fully qualified name. e.g. +``` +github.com/onflow/flow-go/module/execution: +``` + +This will add all interfaces within the `module/execution/` (non-recursive). + +#### Mocking functions + +Mockery dropped support for generating function mocks. Instead, you can use this pattern: + +1. Create a `mock_interfaces` directory in the package where the function mock exists. +2. Add a file that mocks the function. for example, this mocks the `StateMachineEventsTelemetryFactory(candidateView uint64) protocol_state.StateMachineTelemetryConsumer +```golang +package mockinterfaces + +import "github.com/onflow/flow-go/state/protocol/protocol_state" + +// ExecForkActor allows to create a mock for the ExecForkActor callback +type StateMachineEventsTelemetryFactory interface { + Execute(candidateView uint64) protocol_state.StateMachineTelemetryConsumer +} +``` +3. Add the package to `.mockery.yaml`. Note: specify the directory where you want the mock to be placed. +``` + github.com/onflow/flow-go/state/protocol/protocol_state/mock_interfaces: + config: + dir: "state/protocol/protocol_state/mock" +``` diff --git a/SECURITY.md b/SECURITY.md index 6b370e9060b..2a38679616c 100644 --- a/SECURITY.md +++ b/SECURITY.md @@ -9,4 +9,4 @@ If you care about making a difference, please follow the guidelines below. # **Guidelines For Responsible Disclosure** -We ask that all researchers adhere to these guidelines [here](https://docs.onflow.org/bounties/responsible-disclosure/) +We ask that all researchers adhere to these guidelines [here](https://flow.com/flow-responsible-disclosure) diff --git a/access/api.go b/access/api.go index 4188c04c1c4..7595371835a 100644 --- a/access/api.go +++ b/access/api.go @@ -3,18 +3,110 @@ package access import ( "context" - "github.com/onflow/flow/protobuf/go/flow/access" "github.com/onflow/flow/protobuf/go/flow/entities" - "github.com/onflow/flow-go/engine/common/rpc/convert" + "github.com/onflow/flow-go/engine/access/subscription" + accessmodel "github.com/onflow/flow-go/model/access" "github.com/onflow/flow-go/model/flow" ) +type AccountsAPI interface { + GetAccount(ctx context.Context, address flow.Address) (*flow.Account, error) + GetAccountAtLatestBlock(ctx context.Context, address flow.Address) (*flow.Account, error) + GetAccountAtBlockHeight(ctx context.Context, address flow.Address, height uint64) (*flow.Account, error) + + GetAccountBalanceAtLatestBlock(ctx context.Context, address flow.Address) (uint64, error) + GetAccountBalanceAtBlockHeight(ctx context.Context, address flow.Address, height uint64) (uint64, error) + + GetAccountKeyAtLatestBlock(ctx context.Context, address flow.Address, keyIndex uint32) (*flow.AccountPublicKey, error) + GetAccountKeyAtBlockHeight(ctx context.Context, address flow.Address, keyIndex uint32, height uint64) (*flow.AccountPublicKey, error) + GetAccountKeysAtLatestBlock(ctx context.Context, address flow.Address) ([]flow.AccountPublicKey, error) + GetAccountKeysAtBlockHeight(ctx context.Context, address flow.Address, height uint64) ([]flow.AccountPublicKey, error) +} + +type EventsAPI interface { + GetEventsForHeightRange( + ctx context.Context, + eventType string, + startHeight, + endHeight uint64, + requiredEventEncodingVersion entities.EventEncodingVersion, + ) ([]flow.BlockEvents, error) + + GetEventsForBlockIDs( + ctx context.Context, + eventType string, + blockIDs []flow.Identifier, + requiredEventEncodingVersion entities.EventEncodingVersion, + ) ([]flow.BlockEvents, error) +} + +type ScriptsAPI interface { + ExecuteScriptAtLatestBlock(ctx context.Context, script []byte, arguments [][]byte) ([]byte, error) + ExecuteScriptAtBlockHeight(ctx context.Context, blockHeight uint64, script []byte, arguments [][]byte) ([]byte, error) + ExecuteScriptAtBlockID(ctx context.Context, blockID flow.Identifier, script []byte, arguments [][]byte) ([]byte, error) +} + +type TransactionsAPI interface { + SendTransaction(ctx context.Context, tx *flow.TransactionBody) error + + GetTransaction(ctx context.Context, id flow.Identifier) (*flow.TransactionBody, error) + GetTransactionsByBlockID(ctx context.Context, blockID flow.Identifier) ([]*flow.TransactionBody, error) + + GetTransactionResult(ctx context.Context, txID flow.Identifier, blockID flow.Identifier, collectionID flow.Identifier, encodingVersion entities.EventEncodingVersion) (*accessmodel.TransactionResult, error) + GetTransactionResultByIndex(ctx context.Context, blockID flow.Identifier, index uint32, encodingVersion entities.EventEncodingVersion) (*accessmodel.TransactionResult, error) + GetTransactionResultsByBlockID(ctx context.Context, blockID flow.Identifier, encodingVersion entities.EventEncodingVersion) ([]*accessmodel.TransactionResult, error) + + GetSystemTransaction(ctx context.Context, txID flow.Identifier, blockID flow.Identifier) (*flow.TransactionBody, error) + GetSystemTransactionResult(ctx context.Context, txID flow.Identifier, blockID flow.Identifier, encodingVersion entities.EventEncodingVersion) (*accessmodel.TransactionResult, error) +} + +type TransactionStreamAPI interface { + // SubscribeTransactionStatuses subscribes to transaction status updates for a given transaction ID. Monitoring starts + // from the latest block to obtain the current transaction status. If the transaction is already in the final state + // ([flow.TransactionStatusSealed] or [flow.TransactionStatusExpired]), all statuses will be prepared and sent to the client + // sequentially. If the transaction is not in the final state, the subscription will stream status updates until the transaction + // reaches the final state. Once a final state is reached, the subscription will automatically terminate. + // + // Parameters: + // - ctx: Context to manage the subscription's lifecycle, including cancellation. + // - txID: The unique identifier of the transaction to monitor. + // - requiredEventEncodingVersion: The version of event encoding required for the subscription. + SubscribeTransactionStatuses( + ctx context.Context, + txID flow.Identifier, + requiredEventEncodingVersion entities.EventEncodingVersion, + ) subscription.Subscription + + // SendAndSubscribeTransactionStatuses sends a transaction to the execution node and subscribes to its status updates. + // Monitoring begins from the reference block saved in the transaction itself and streams status updates until the transaction + // reaches the final state ([flow.TransactionStatusSealed] or [flow.TransactionStatusExpired]). Once the final status has been reached, the subscription + // automatically terminates. + // + // Parameters: + // - ctx: The context to manage the transaction sending and subscription lifecycle, including cancellation. + // - tx: The transaction body to be sent and monitored. + // - requiredEventEncodingVersion: The version of event encoding required for the subscription. + // + // If the transaction cannot be sent, the subscription will fail and return a failed subscription. + SendAndSubscribeTransactionStatuses( + ctx context.Context, + tx *flow.TransactionBody, + requiredEventEncodingVersion entities.EventEncodingVersion, + ) subscription.Subscription +} + // API provides all public-facing functionality of the Flow Access API. type API interface { + AccountsAPI + EventsAPI + ScriptsAPI + TransactionsAPI + TransactionStreamAPI + Ping(ctx context.Context) error - GetNetworkParameters(ctx context.Context) NetworkParameters - GetNodeVersionInfo(ctx context.Context) (*NodeVersionInfo, error) + GetNetworkParameters(ctx context.Context) accessmodel.NetworkParameters + GetNodeVersionInfo(ctx context.Context) (*accessmodel.NodeVersionInfo, error) GetLatestBlockHeader(ctx context.Context, isSealed bool) (*flow.Header, flow.BlockStatus, error) GetBlockHeaderByHeight(ctx context.Context, height uint64) (*flow.Header, flow.BlockStatus, error) @@ -25,90 +117,155 @@ type API interface { GetBlockByID(ctx context.Context, id flow.Identifier) (*flow.Block, flow.BlockStatus, error) GetCollectionByID(ctx context.Context, id flow.Identifier) (*flow.LightCollection, error) - - SendTransaction(ctx context.Context, tx *flow.TransactionBody) error - GetTransaction(ctx context.Context, id flow.Identifier) (*flow.TransactionBody, error) - GetTransactionsByBlockID(ctx context.Context, blockID flow.Identifier) ([]*flow.TransactionBody, error) - GetTransactionResult(ctx context.Context, id flow.Identifier, blockID flow.Identifier, collectionID flow.Identifier) (*TransactionResult, error) - GetTransactionResultByIndex(ctx context.Context, blockID flow.Identifier, index uint32) (*TransactionResult, error) - GetTransactionResultsByBlockID(ctx context.Context, blockID flow.Identifier) ([]*TransactionResult, error) - - GetAccount(ctx context.Context, address flow.Address) (*flow.Account, error) - GetAccountAtLatestBlock(ctx context.Context, address flow.Address) (*flow.Account, error) - GetAccountAtBlockHeight(ctx context.Context, address flow.Address, height uint64) (*flow.Account, error) - - ExecuteScriptAtLatestBlock(ctx context.Context, script []byte, arguments [][]byte) ([]byte, error) - ExecuteScriptAtBlockHeight(ctx context.Context, blockHeight uint64, script []byte, arguments [][]byte) ([]byte, error) - ExecuteScriptAtBlockID(ctx context.Context, blockID flow.Identifier, script []byte, arguments [][]byte) ([]byte, error) - - GetEventsForHeightRange(ctx context.Context, eventType string, startHeight, endHeight uint64) ([]flow.BlockEvents, error) - GetEventsForBlockIDs(ctx context.Context, eventType string, blockIDs []flow.Identifier) ([]flow.BlockEvents, error) + GetFullCollectionByID(ctx context.Context, id flow.Identifier) (*flow.Collection, error) GetLatestProtocolStateSnapshot(ctx context.Context) ([]byte, error) + GetProtocolStateSnapshotByBlockID(ctx context.Context, blockID flow.Identifier) ([]byte, error) + GetProtocolStateSnapshotByHeight(ctx context.Context, blockHeight uint64) ([]byte, error) GetExecutionResultForBlockID(ctx context.Context, blockID flow.Identifier) (*flow.ExecutionResult, error) GetExecutionResultByID(ctx context.Context, id flow.Identifier) (*flow.ExecutionResult, error) -} -// TODO: Combine this with flow.TransactionResult? -type TransactionResult struct { - Status flow.TransactionStatus - StatusCode uint - Events []flow.Event - ErrorMessage string - BlockID flow.Identifier - TransactionID flow.Identifier - CollectionID flow.Identifier - BlockHeight uint64 -} + // SubscribeBlocksFromStartBlockID subscribes to the finalized or sealed blocks starting at the requested + // start block id, up until the latest available block. Once the latest is + // reached, the stream will remain open and responses are sent for each new + // block as it becomes available. + // + // Each block is filtered by the provided block status, and only + // those blocks that match the status are returned. + // + // Parameters: + // - ctx: Context for the operation. + // - startBlockID: The identifier of the starting block. + // - blockStatus: The status of the block, which could be only BlockStatusSealed or BlockStatusFinalized. + // + // If invalid parameters will be supplied SubscribeBlocksFromStartBlockID will return a failed subscription. + SubscribeBlocksFromStartBlockID(ctx context.Context, startBlockID flow.Identifier, blockStatus flow.BlockStatus) subscription.Subscription -func TransactionResultToMessage(result *TransactionResult) *access.TransactionResultResponse { - return &access.TransactionResultResponse{ - Status: entities.TransactionStatus(result.Status), - StatusCode: uint32(result.StatusCode), - ErrorMessage: result.ErrorMessage, - Events: convert.EventsToMessages(result.Events), - BlockId: result.BlockID[:], - TransactionId: result.TransactionID[:], - CollectionId: result.CollectionID[:], - BlockHeight: result.BlockHeight, - } -} + // SubscribeBlocksFromStartHeight subscribes to the finalized or sealed blocks starting at the requested + // start block height, up until the latest available block. Once the latest is + // reached, the stream will remain open and responses are sent for each new + // block as it becomes available. + // + // Each block is filtered by the provided block status, and only + // those blocks that match the status are returned. + // + // Parameters: + // - ctx: Context for the operation. + // - startHeight: The height of the starting block. + // - blockStatus: The status of the block, which could be only BlockStatusSealed or BlockStatusFinalized. + // + // If invalid parameters will be supplied SubscribeBlocksFromStartHeight will return a failed subscription. + SubscribeBlocksFromStartHeight(ctx context.Context, startHeight uint64, blockStatus flow.BlockStatus) subscription.Subscription -func TransactionResultsToMessage(results []*TransactionResult) *access.TransactionResultsResponse { - messages := make([]*access.TransactionResultResponse, len(results)) - for i, result := range results { - messages[i] = TransactionResultToMessage(result) - } + // SubscribeBlocksFromLatest subscribes to the finalized or sealed blocks starting at the latest sealed block, + // up until the latest available block. Once the latest is + // reached, the stream will remain open and responses are sent for each new + // block as it becomes available. + // + // Each block is filtered by the provided block status, and only + // those blocks that match the status are returned. + // + // Parameters: + // - ctx: Context for the operation. + // - blockStatus: The status of the block, which could be only BlockStatusSealed or BlockStatusFinalized. + // + // If invalid parameters will be supplied SubscribeBlocksFromLatest will return a failed subscription. + SubscribeBlocksFromLatest(ctx context.Context, blockStatus flow.BlockStatus) subscription.Subscription - return &access.TransactionResultsResponse{ - TransactionResults: messages, - } -} + // SubscribeBlockHeadersFromStartBlockID streams finalized or sealed block headers starting at the requested + // start block id, up until the latest available block header. Once the latest is + // reached, the stream will remain open and responses are sent for each new + // block header as it becomes available. + // + // Each block header are filtered by the provided block status, and only + // those block headers that match the status are returned. + // + // Parameters: + // - ctx: Context for the operation. + // - startBlockID: The identifier of the starting block. + // - blockStatus: The status of the block, which could be only BlockStatusSealed or BlockStatusFinalized. + // + // If invalid parameters will be supplied SubscribeBlockHeadersFromStartBlockID will return a failed subscription. + SubscribeBlockHeadersFromStartBlockID(ctx context.Context, startBlockID flow.Identifier, blockStatus flow.BlockStatus) subscription.Subscription -func MessageToTransactionResult(message *access.TransactionResultResponse) *TransactionResult { - - return &TransactionResult{ - Status: flow.TransactionStatus(message.Status), - StatusCode: uint(message.StatusCode), - ErrorMessage: message.ErrorMessage, - Events: convert.MessagesToEvents(message.Events), - BlockID: flow.HashToID(message.BlockId), - TransactionID: flow.HashToID(message.TransactionId), - CollectionID: flow.HashToID(message.CollectionId), - BlockHeight: message.BlockHeight, - } -} + // SubscribeBlockHeadersFromStartHeight streams finalized or sealed block headers starting at the requested + // start block height, up until the latest available block header. Once the latest is + // reached, the stream will remain open and responses are sent for each new + // block header as it becomes available. + // + // Each block header are filtered by the provided block status, and only + // those block headers that match the status are returned. + // + // Parameters: + // - ctx: Context for the operation. + // - startHeight: The height of the starting block. + // - blockStatus: The status of the block, which could be only BlockStatusSealed or BlockStatusFinalized. + // + // If invalid parameters will be supplied SubscribeBlockHeadersFromStartHeight will return a failed subscription. + SubscribeBlockHeadersFromStartHeight(ctx context.Context, startHeight uint64, blockStatus flow.BlockStatus) subscription.Subscription -// NetworkParameters contains the network-wide parameters for the Flow blockchain. -type NetworkParameters struct { - ChainID flow.ChainID -} + // SubscribeBlockHeadersFromLatest streams finalized or sealed block headers starting at the latest sealed block, + // up until the latest available block header. Once the latest is + // reached, the stream will remain open and responses are sent for each new + // block header as it becomes available. + // + // Each block header are filtered by the provided block status, and only + // those block headers that match the status are returned. + // + // Parameters: + // - ctx: Context for the operation. + // - blockStatus: The status of the block, which could be only BlockStatusSealed or BlockStatusFinalized. + // + // If invalid parameters will be supplied SubscribeBlockHeadersFromLatest will return a failed subscription. + SubscribeBlockHeadersFromLatest(ctx context.Context, blockStatus flow.BlockStatus) subscription.Subscription + + // Subscribe digests + + // SubscribeBlockDigestsFromStartBlockID streams finalized or sealed lightweight block starting at the requested + // start block id, up until the latest available block. Once the latest is + // reached, the stream will remain open and responses are sent for each new + // block as it becomes available. + // + // Each lightweight block are filtered by the provided block status, and only + // those blocks that match the status are returned. + // + // Parameters: + // - ctx: Context for the operation. + // - startBlockID: The identifier of the starting block. + // - blockStatus: The status of the block, which could be only BlockStatusSealed or BlockStatusFinalized. + // + // If invalid parameters will be supplied SubscribeBlockDigestsFromStartBlockID will return a failed subscription. + SubscribeBlockDigestsFromStartBlockID(ctx context.Context, startBlockID flow.Identifier, blockStatus flow.BlockStatus) subscription.Subscription + + // SubscribeBlockDigestsFromStartHeight streams finalized or sealed lightweight block starting at the requested + // start block height, up until the latest available block. Once the latest is + // reached, the stream will remain open and responses are sent for each new + // block as it becomes available. + // + // Each lightweight block are filtered by the provided block status, and only + // those blocks that match the status are returned. + // + // Parameters: + // - ctx: Context for the operation. + // - startHeight: The height of the starting block. + // - blockStatus: The status of the block, which could be only BlockStatusSealed or BlockStatusFinalized. + // + // If invalid parameters will be supplied SubscribeBlockDigestsFromStartHeight will return a failed subscription. + SubscribeBlockDigestsFromStartHeight(ctx context.Context, startHeight uint64, blockStatus flow.BlockStatus) subscription.Subscription -// NodeVersionInfo contains information about node, such as semver, commit, sporkID, protocolVersion, etc -type NodeVersionInfo struct { - Semver string - Commit string - SporkId flow.Identifier - ProtocolVersion uint64 + // SubscribeBlockDigestsFromLatest streams finalized or sealed lightweight block starting at the latest sealed block, + // up until the latest available block. Once the latest is + // reached, the stream will remain open and responses are sent for each new + // block as it becomes available. + // + // Each lightweight block are filtered by the provided block status, and only + // those blocks that match the status are returned. + // + // Parameters: + // - ctx: Context for the operation. + // - blockStatus: The status of the block, which could be only BlockStatusSealed or BlockStatusFinalized. + // + // If invalid parameters will be supplied SubscribeBlockDigestsFromLatest will return a failed subscription. + SubscribeBlockDigestsFromLatest(ctx context.Context, blockStatus flow.BlockStatus) subscription.Subscription } diff --git a/access/errors.go b/access/errors.go deleted file mode 100644 index e23c7a7347b..00000000000 --- a/access/errors.go +++ /dev/null @@ -1,92 +0,0 @@ -package access - -import ( - "errors" - "fmt" - - "github.com/onflow/flow-go/model/flow" -) - -// ErrUnknownReferenceBlock indicates that a transaction references an unknown block. -var ErrUnknownReferenceBlock = errors.New("unknown reference block") - -// IncompleteTransactionError indicates that a transaction is missing one or more required fields. -type IncompleteTransactionError struct { - MissingFields []string -} - -func (e IncompleteTransactionError) Error() string { - return fmt.Sprintf("transaction is missing required fields: %s", e.MissingFields) -} - -// ExpiredTransactionError indicates that a transaction has expired. -type ExpiredTransactionError struct { - RefHeight, FinalHeight uint64 -} - -func (e ExpiredTransactionError) Error() string { - return fmt.Sprintf("transaction is expired: ref_height=%d final_height=%d", e.RefHeight, e.FinalHeight) -} - -// InvalidScriptError indicates that a transaction contains an invalid Cadence script. -type InvalidScriptError struct { - ParserErr error -} - -func (e InvalidScriptError) Error() string { - return fmt.Sprintf("failed to parse transaction Cadence script: %s", e.ParserErr) -} - -func (e InvalidScriptError) Unwrap() error { - return e.ParserErr -} - -// InvalidGasLimitError indicates that a transaction specifies a gas limit that exceeds the maximum. -type InvalidGasLimitError struct { - Maximum uint64 - Actual uint64 -} - -func (e InvalidGasLimitError) Error() string { - return fmt.Sprintf("transaction gas limit (%d) is not in the acceptable range (min: 1, max: %d)", e.Actual, e.Maximum) -} - -// InvalidAddressError indicates that a transaction references an invalid flow Address -// in either the Authorizers or Payer field. -type InvalidAddressError struct { - Address flow.Address -} - -func (e InvalidAddressError) Error() string { - return fmt.Sprintf("invalid address: %s", e.Address) -} - -// DuplicatedSignatureError indicates that two signatures havs been provided for a key (combination of account and key index) -type DuplicatedSignatureError struct { - Address flow.Address - KeyIndex uint64 -} - -func (e DuplicatedSignatureError) Error() string { - return fmt.Sprintf("duplicated signature for key (address: %s, index: %d)", e.Address.String(), e.KeyIndex) -} - -// InvalidSignatureError indicates that a transaction contains a signature -// with a wrong format. -type InvalidSignatureError struct { - Signature flow.TransactionSignature -} - -func (e InvalidSignatureError) Error() string { - return fmt.Sprintf("invalid signature: %s", e.Signature) -} - -// InvalidTxByteSizeError indicates that a transaction byte size exceeds the maximum. -type InvalidTxByteSizeError struct { - Maximum uint64 - Actual uint64 -} - -func (e InvalidTxByteSizeError) Error() string { - return fmt.Sprintf("transaction byte size (%d) exceeds the maximum byte size allowed for a transaction (%d)", e.Actual, e.Maximum) -} diff --git a/access/handler.go b/access/handler.go deleted file mode 100644 index 404bfa81318..00000000000 --- a/access/handler.go +++ /dev/null @@ -1,696 +0,0 @@ -package access - -import ( - "context" - - "github.com/onflow/flow/protobuf/go/flow/access" - "github.com/onflow/flow/protobuf/go/flow/entities" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" - "google.golang.org/protobuf/types/known/timestamppb" - - "github.com/onflow/flow-go/consensus/hotstuff" - "github.com/onflow/flow-go/consensus/hotstuff/signature" - "github.com/onflow/flow-go/engine/common/rpc/convert" - "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/module" -) - -type Handler struct { - api API - chain flow.Chain - signerIndicesDecoder hotstuff.BlockSignerDecoder - finalizedHeaderCache module.FinalizedHeaderCache - me module.Local -} - -// HandlerOption is used to hand over optional constructor parameters -type HandlerOption func(*Handler) - -var _ access.AccessAPIServer = (*Handler)(nil) - -func NewHandler(api API, chain flow.Chain, finalizedHeader module.FinalizedHeaderCache, me module.Local, options ...HandlerOption) *Handler { - h := &Handler{ - api: api, - chain: chain, - finalizedHeaderCache: finalizedHeader, - me: me, - signerIndicesDecoder: &signature.NoopBlockSignerDecoder{}, - } - for _, opt := range options { - opt(h) - } - return h -} - -// Ping the Access API server for a response. -func (h *Handler) Ping(ctx context.Context, _ *access.PingRequest) (*access.PingResponse, error) { - err := h.api.Ping(ctx) - if err != nil { - return nil, err - } - - return &access.PingResponse{}, nil -} - -// GetNodeVersionInfo gets node version information such as semver, commit, sporkID, protocolVersion, etc -func (h *Handler) GetNodeVersionInfo( - ctx context.Context, - _ *access.GetNodeVersionInfoRequest, -) (*access.GetNodeVersionInfoResponse, error) { - nodeVersionInfo, err := h.api.GetNodeVersionInfo(ctx) - if err != nil { - return nil, err - } - - return &access.GetNodeVersionInfoResponse{ - Info: &entities.NodeVersionInfo{ - Semver: nodeVersionInfo.Semver, - Commit: nodeVersionInfo.Commit, - SporkId: nodeVersionInfo.SporkId[:], - ProtocolVersion: nodeVersionInfo.ProtocolVersion, - }, - }, nil -} - -func (h *Handler) GetNetworkParameters( - ctx context.Context, - _ *access.GetNetworkParametersRequest, -) (*access.GetNetworkParametersResponse, error) { - params := h.api.GetNetworkParameters(ctx) - - return &access.GetNetworkParametersResponse{ - ChainId: string(params.ChainID), - }, nil -} - -// GetLatestBlockHeader gets the latest sealed block header. -func (h *Handler) GetLatestBlockHeader( - ctx context.Context, - req *access.GetLatestBlockHeaderRequest, -) (*access.BlockHeaderResponse, error) { - header, status, err := h.api.GetLatestBlockHeader(ctx, req.GetIsSealed()) - if err != nil { - return nil, err - } - return h.blockHeaderResponse(header, status) -} - -// GetBlockHeaderByHeight gets a block header by height. -func (h *Handler) GetBlockHeaderByHeight( - ctx context.Context, - req *access.GetBlockHeaderByHeightRequest, -) (*access.BlockHeaderResponse, error) { - header, status, err := h.api.GetBlockHeaderByHeight(ctx, req.GetHeight()) - if err != nil { - return nil, err - } - return h.blockHeaderResponse(header, status) -} - -// GetBlockHeaderByID gets a block header by ID. -func (h *Handler) GetBlockHeaderByID( - ctx context.Context, - req *access.GetBlockHeaderByIDRequest, -) (*access.BlockHeaderResponse, error) { - id, err := convert.BlockID(req.GetId()) - if err != nil { - return nil, err - } - header, status, err := h.api.GetBlockHeaderByID(ctx, id) - if err != nil { - return nil, err - } - return h.blockHeaderResponse(header, status) -} - -// GetLatestBlock gets the latest sealed block. -func (h *Handler) GetLatestBlock( - ctx context.Context, - req *access.GetLatestBlockRequest, -) (*access.BlockResponse, error) { - block, status, err := h.api.GetLatestBlock(ctx, req.GetIsSealed()) - if err != nil { - return nil, err - } - return h.blockResponse(block, req.GetFullBlockResponse(), status) -} - -// GetBlockByHeight gets a block by height. -func (h *Handler) GetBlockByHeight( - ctx context.Context, - req *access.GetBlockByHeightRequest, -) (*access.BlockResponse, error) { - block, status, err := h.api.GetBlockByHeight(ctx, req.GetHeight()) - if err != nil { - return nil, err - } - return h.blockResponse(block, req.GetFullBlockResponse(), status) -} - -// GetBlockByID gets a block by ID. -func (h *Handler) GetBlockByID( - ctx context.Context, - req *access.GetBlockByIDRequest, -) (*access.BlockResponse, error) { - id, err := convert.BlockID(req.GetId()) - if err != nil { - return nil, err - } - block, status, err := h.api.GetBlockByID(ctx, id) - if err != nil { - return nil, err - } - return h.blockResponse(block, req.GetFullBlockResponse(), status) -} - -// GetCollectionByID gets a collection by ID. -func (h *Handler) GetCollectionByID( - ctx context.Context, - req *access.GetCollectionByIDRequest, -) (*access.CollectionResponse, error) { - metadata := h.buildMetadataResponse() - - id, err := convert.CollectionID(req.GetId()) - if err != nil { - return nil, err - } - - col, err := h.api.GetCollectionByID(ctx, id) - if err != nil { - return nil, err - } - - colMsg, err := convert.LightCollectionToMessage(col) - if err != nil { - return nil, status.Error(codes.Internal, err.Error()) - } - - return &access.CollectionResponse{ - Collection: colMsg, - Metadata: metadata, - }, nil -} - -// SendTransaction submits a transaction to the network. -func (h *Handler) SendTransaction( - ctx context.Context, - req *access.SendTransactionRequest, -) (*access.SendTransactionResponse, error) { - metadata := h.buildMetadataResponse() - - txMsg := req.GetTransaction() - - tx, err := convert.MessageToTransaction(txMsg, h.chain) - if err != nil { - return nil, status.Error(codes.InvalidArgument, err.Error()) - } - - err = h.api.SendTransaction(ctx, &tx) - if err != nil { - return nil, err - } - - txID := tx.ID() - - return &access.SendTransactionResponse{ - Id: txID[:], - Metadata: metadata, - }, nil -} - -// GetTransaction gets a transaction by ID. -func (h *Handler) GetTransaction( - ctx context.Context, - req *access.GetTransactionRequest, -) (*access.TransactionResponse, error) { - metadata := h.buildMetadataResponse() - - id, err := convert.TransactionID(req.GetId()) - if err != nil { - return nil, err - } - - tx, err := h.api.GetTransaction(ctx, id) - if err != nil { - return nil, err - } - - return &access.TransactionResponse{ - Transaction: convert.TransactionToMessage(*tx), - Metadata: metadata, - }, nil -} - -// GetTransactionResult gets a transaction by ID. -func (h *Handler) GetTransactionResult( - ctx context.Context, - req *access.GetTransactionRequest, -) (*access.TransactionResultResponse, error) { - metadata := h.buildMetadataResponse() - - transactionID, err := convert.TransactionID(req.GetId()) - if err != nil { - return nil, err - } - - blockId := flow.ZeroID - requestBlockId := req.GetBlockId() - if requestBlockId != nil { - blockId, err = convert.BlockID(requestBlockId) - if err != nil { - return nil, err - } - } - - collectionId := flow.ZeroID - requestCollectionId := req.GetCollectionId() - if requestCollectionId != nil { - collectionId, err = convert.CollectionID(requestCollectionId) - if err != nil { - return nil, err - } - } - - result, err := h.api.GetTransactionResult(ctx, transactionID, blockId, collectionId) - if err != nil { - return nil, err - } - - message := TransactionResultToMessage(result) - message.Metadata = metadata - - return message, nil -} - -func (h *Handler) GetTransactionResultsByBlockID( - ctx context.Context, - req *access.GetTransactionsByBlockIDRequest, -) (*access.TransactionResultsResponse, error) { - metadata := h.buildMetadataResponse() - - id, err := convert.BlockID(req.GetBlockId()) - if err != nil { - return nil, err - } - - results, err := h.api.GetTransactionResultsByBlockID(ctx, id) - if err != nil { - return nil, err - } - - message := TransactionResultsToMessage(results) - message.Metadata = metadata - - return message, nil -} - -func (h *Handler) GetTransactionsByBlockID( - ctx context.Context, - req *access.GetTransactionsByBlockIDRequest, -) (*access.TransactionsResponse, error) { - metadata := h.buildMetadataResponse() - - id, err := convert.BlockID(req.GetBlockId()) - if err != nil { - return nil, err - } - - transactions, err := h.api.GetTransactionsByBlockID(ctx, id) - if err != nil { - return nil, err - } - - return &access.TransactionsResponse{ - Transactions: convert.TransactionsToMessages(transactions), - Metadata: metadata, - }, nil -} - -// GetTransactionResultByIndex gets a transaction at a specific index for in a block that is executed, -// pending or finalized transactions return errors -func (h *Handler) GetTransactionResultByIndex( - ctx context.Context, - req *access.GetTransactionByIndexRequest, -) (*access.TransactionResultResponse, error) { - metadata := h.buildMetadataResponse() - - blockID, err := convert.BlockID(req.GetBlockId()) - if err != nil { - return nil, err - } - - result, err := h.api.GetTransactionResultByIndex(ctx, blockID, req.GetIndex()) - if err != nil { - return nil, err - } - - message := TransactionResultToMessage(result) - message.Metadata = metadata - - return message, nil -} - -// GetAccount returns an account by address at the latest sealed block. -func (h *Handler) GetAccount( - ctx context.Context, - req *access.GetAccountRequest, -) (*access.GetAccountResponse, error) { - metadata := h.buildMetadataResponse() - - address := flow.BytesToAddress(req.GetAddress()) - - account, err := h.api.GetAccount(ctx, address) - if err != nil { - return nil, err - } - - accountMsg, err := convert.AccountToMessage(account) - if err != nil { - return nil, status.Error(codes.Internal, err.Error()) - } - - return &access.GetAccountResponse{ - Account: accountMsg, - Metadata: metadata, - }, nil -} - -// GetAccountAtLatestBlock returns an account by address at the latest sealed block. -func (h *Handler) GetAccountAtLatestBlock( - ctx context.Context, - req *access.GetAccountAtLatestBlockRequest, -) (*access.AccountResponse, error) { - metadata := h.buildMetadataResponse() - - address, err := convert.Address(req.GetAddress(), h.chain) - if err != nil { - return nil, err - } - - account, err := h.api.GetAccountAtLatestBlock(ctx, address) - if err != nil { - return nil, err - } - - accountMsg, err := convert.AccountToMessage(account) - if err != nil { - return nil, status.Error(codes.Internal, err.Error()) - } - - return &access.AccountResponse{ - Account: accountMsg, - Metadata: metadata, - }, nil -} - -func (h *Handler) GetAccountAtBlockHeight( - ctx context.Context, - req *access.GetAccountAtBlockHeightRequest, -) (*access.AccountResponse, error) { - metadata := h.buildMetadataResponse() - - address, err := convert.Address(req.GetAddress(), h.chain) - if err != nil { - return nil, err - } - - account, err := h.api.GetAccountAtBlockHeight(ctx, address, req.GetBlockHeight()) - if err != nil { - return nil, err - } - - accountMsg, err := convert.AccountToMessage(account) - if err != nil { - return nil, status.Error(codes.Internal, err.Error()) - } - - return &access.AccountResponse{ - Account: accountMsg, - Metadata: metadata, - }, nil -} - -// ExecuteScriptAtLatestBlock executes a script at a the latest block. -func (h *Handler) ExecuteScriptAtLatestBlock( - ctx context.Context, - req *access.ExecuteScriptAtLatestBlockRequest, -) (*access.ExecuteScriptResponse, error) { - metadata := h.buildMetadataResponse() - - script := req.GetScript() - arguments := req.GetArguments() - - value, err := h.api.ExecuteScriptAtLatestBlock(ctx, script, arguments) - if err != nil { - return nil, err - } - - return &access.ExecuteScriptResponse{ - Value: value, - Metadata: metadata, - }, nil -} - -// ExecuteScriptAtBlockHeight executes a script at a specific block height. -func (h *Handler) ExecuteScriptAtBlockHeight( - ctx context.Context, - req *access.ExecuteScriptAtBlockHeightRequest, -) (*access.ExecuteScriptResponse, error) { - metadata := h.buildMetadataResponse() - - script := req.GetScript() - arguments := req.GetArguments() - blockHeight := req.GetBlockHeight() - - value, err := h.api.ExecuteScriptAtBlockHeight(ctx, blockHeight, script, arguments) - if err != nil { - return nil, err - } - - return &access.ExecuteScriptResponse{ - Value: value, - Metadata: metadata, - }, nil -} - -// ExecuteScriptAtBlockID executes a script at a specific block ID. -func (h *Handler) ExecuteScriptAtBlockID( - ctx context.Context, - req *access.ExecuteScriptAtBlockIDRequest, -) (*access.ExecuteScriptResponse, error) { - metadata := h.buildMetadataResponse() - - script := req.GetScript() - arguments := req.GetArguments() - blockID := convert.MessageToIdentifier(req.GetBlockId()) - - value, err := h.api.ExecuteScriptAtBlockID(ctx, blockID, script, arguments) - if err != nil { - return nil, err - } - - return &access.ExecuteScriptResponse{ - Value: value, - Metadata: metadata, - }, nil -} - -// GetEventsForHeightRange returns events matching a query. -func (h *Handler) GetEventsForHeightRange( - ctx context.Context, - req *access.GetEventsForHeightRangeRequest, -) (*access.EventsResponse, error) { - metadata := h.buildMetadataResponse() - - eventType, err := convert.EventType(req.GetType()) - if err != nil { - return nil, err - } - - startHeight := req.GetStartHeight() - endHeight := req.GetEndHeight() - - results, err := h.api.GetEventsForHeightRange(ctx, eventType, startHeight, endHeight) - if err != nil { - return nil, err - } - - resultEvents, err := blockEventsToMessages(results) - if err != nil { - return nil, err - } - return &access.EventsResponse{ - Results: resultEvents, - Metadata: metadata, - }, nil -} - -// GetEventsForBlockIDs returns events matching a set of block IDs. -func (h *Handler) GetEventsForBlockIDs( - ctx context.Context, - req *access.GetEventsForBlockIDsRequest, -) (*access.EventsResponse, error) { - metadata := h.buildMetadataResponse() - - eventType, err := convert.EventType(req.GetType()) - if err != nil { - return nil, err - } - - blockIDs, err := convert.BlockIDs(req.GetBlockIds()) - if err != nil { - return nil, err - } - - results, err := h.api.GetEventsForBlockIDs(ctx, eventType, blockIDs) - if err != nil { - return nil, err - } - - resultEvents, err := blockEventsToMessages(results) - if err != nil { - return nil, err - } - - return &access.EventsResponse{ - Results: resultEvents, - Metadata: metadata, - }, nil -} - -// GetLatestProtocolStateSnapshot returns the latest serializable Snapshot -func (h *Handler) GetLatestProtocolStateSnapshot(ctx context.Context, req *access.GetLatestProtocolStateSnapshotRequest) (*access.ProtocolStateSnapshotResponse, error) { - metadata := h.buildMetadataResponse() - - snapshot, err := h.api.GetLatestProtocolStateSnapshot(ctx) - if err != nil { - return nil, err - } - - return &access.ProtocolStateSnapshotResponse{ - SerializedSnapshot: snapshot, - Metadata: metadata, - }, nil -} - -// GetExecutionResultForBlockID returns the latest received execution result for the given block ID. -// AN might receive multiple receipts with conflicting results for unsealed blocks. -// If this case happens, since AN is not able to determine which result is the correct one until the block is sealed, it has to pick one result to respond to this query. For now, we return the result from the latest received receipt. -func (h *Handler) GetExecutionResultForBlockID(ctx context.Context, req *access.GetExecutionResultForBlockIDRequest) (*access.ExecutionResultForBlockIDResponse, error) { - metadata := h.buildMetadataResponse() - - blockID := convert.MessageToIdentifier(req.GetBlockId()) - - result, err := h.api.GetExecutionResultForBlockID(ctx, blockID) - if err != nil { - return nil, err - } - - return executionResultToMessages(result, metadata) -} - -func (h *Handler) blockResponse(block *flow.Block, fullResponse bool, status flow.BlockStatus) (*access.BlockResponse, error) { - metadata := h.buildMetadataResponse() - - signerIDs, err := h.signerIndicesDecoder.DecodeSignerIDs(block.Header) - if err != nil { - return nil, err // the block was retrieved from local storage - so no errors are expected - } - - var msg *entities.Block - if fullResponse { - msg, err = convert.BlockToMessage(block, signerIDs) - if err != nil { - return nil, err - } - } else { - msg = convert.BlockToMessageLight(block) - } - - return &access.BlockResponse{ - Block: msg, - BlockStatus: entities.BlockStatus(status), - Metadata: metadata, - }, nil -} - -func (h *Handler) blockHeaderResponse(header *flow.Header, status flow.BlockStatus) (*access.BlockHeaderResponse, error) { - metadata := h.buildMetadataResponse() - - signerIDs, err := h.signerIndicesDecoder.DecodeSignerIDs(header) - if err != nil { - return nil, err // the block was retrieved from local storage - so no errors are expected - } - - msg, err := convert.BlockHeaderToMessage(header, signerIDs) - if err != nil { - return nil, err - } - - return &access.BlockHeaderResponse{ - Block: msg, - BlockStatus: entities.BlockStatus(status), - Metadata: metadata, - }, nil -} - -// buildMetadataResponse builds and returns the metadata response object. -func (h *Handler) buildMetadataResponse() *entities.Metadata { - lastFinalizedHeader := h.finalizedHeaderCache.Get() - blockId := lastFinalizedHeader.ID() - nodeId := h.me.NodeID() - - return &entities.Metadata{ - LatestFinalizedBlockId: blockId[:], - LatestFinalizedHeight: lastFinalizedHeader.Height, - NodeId: nodeId[:], - } -} - -func executionResultToMessages(er *flow.ExecutionResult, metadata *entities.Metadata) (*access.ExecutionResultForBlockIDResponse, error) { - execResult, err := convert.ExecutionResultToMessage(er) - if err != nil { - return nil, err - } - return &access.ExecutionResultForBlockIDResponse{ - ExecutionResult: execResult, - Metadata: metadata, - }, nil -} - -func blockEventsToMessages(blocks []flow.BlockEvents) ([]*access.EventsResponse_Result, error) { - results := make([]*access.EventsResponse_Result, len(blocks)) - - for i, block := range blocks { - event, err := blockEventsToMessage(block) - if err != nil { - return nil, err - } - results[i] = event - } - - return results, nil -} - -func blockEventsToMessage(block flow.BlockEvents) (*access.EventsResponse_Result, error) { - eventMessages := make([]*entities.Event, len(block.Events)) - for i, event := range block.Events { - eventMessages[i] = convert.EventToMessage(event) - } - timestamp := timestamppb.New(block.BlockTimestamp) - return &access.EventsResponse_Result{ - BlockId: block.BlockID[:], - BlockHeight: block.BlockHeight, - BlockTimestamp: timestamp, - Events: eventMessages, - }, nil -} - -// WithBlockSignerDecoder configures the Handler to decode signer indices -// via the provided hotstuff.BlockSignerDecoder -func WithBlockSignerDecoder(signerIndicesDecoder hotstuff.BlockSignerDecoder) func(*Handler) { - return func(handler *Handler) { - handler.signerIndicesDecoder = signerIndicesDecoder - } -} diff --git a/access/legacy/convert/convert.go b/access/legacy/convert/convert.go index ebb52d2fe14..c9e7b6caf05 100644 --- a/access/legacy/convert/convert.go +++ b/access/legacy/convert/convert.go @@ -4,74 +4,77 @@ import ( "errors" "fmt" + "github.com/onflow/crypto" + "github.com/onflow/crypto/hash" accessproto "github.com/onflow/flow/protobuf/go/flow/legacy/access" entitiesproto "github.com/onflow/flow/protobuf/go/flow/legacy/entities" - "google.golang.org/protobuf/types/known/timestamppb" - "github.com/onflow/flow-go/access" - "github.com/onflow/flow-go/crypto" - "github.com/onflow/flow-go/crypto/hash" "github.com/onflow/flow-go/engine/common/rpc/convert" + accessmodel "github.com/onflow/flow-go/model/access" "github.com/onflow/flow-go/model/flow" ) var ErrEmptyMessage = errors.New("protobuf message is empty") func MessageToTransaction(m *entitiesproto.Transaction, chain flow.Chain) (flow.TransactionBody, error) { + var t flow.TransactionBody if m == nil { - return flow.TransactionBody{}, ErrEmptyMessage + return t, ErrEmptyMessage } - t := flow.NewTransactionBody() - + tb := flow.NewTransactionBodyBuilder() proposalKey := m.GetProposalKey() if proposalKey != nil { proposalAddress, err := convert.Address(proposalKey.GetAddress(), chain) if err != nil { - return *t, err + return t, err } - t.SetProposalKey(proposalAddress, uint64(proposalKey.GetKeyId()), proposalKey.GetSequenceNumber()) + tb.SetProposalKey(proposalAddress, proposalKey.GetKeyId(), proposalKey.GetSequenceNumber()) } payer := m.GetPayer() if payer != nil { payerAddress, err := convert.Address(payer, chain) if err != nil { - return *t, err + return t, err } - t.SetPayer(payerAddress) + tb.SetPayer(payerAddress) } for _, authorizer := range m.GetAuthorizers() { authorizerAddress, err := convert.Address(authorizer, chain) if err != nil { - return *t, err + return t, err } - t.AddAuthorizer(authorizerAddress) + tb.AddAuthorizer(authorizerAddress) } for _, sig := range m.GetPayloadSignatures() { addr, err := convert.Address(sig.GetAddress(), chain) if err != nil { - return *t, err + return t, err } - t.AddPayloadSignature(addr, uint64(sig.GetKeyId()), sig.GetSignature()) + tb.AddPayloadSignature(addr, sig.GetKeyId(), sig.GetSignature()) } for _, sig := range m.GetEnvelopeSignatures() { addr, err := convert.Address(sig.GetAddress(), chain) if err != nil { - return *t, err + return t, err } - t.AddEnvelopeSignature(addr, uint64(sig.GetKeyId()), sig.GetSignature()) + tb.AddEnvelopeSignature(addr, sig.GetKeyId(), sig.GetSignature()) } - t.SetScript(m.GetScript()) - t.SetArguments(m.GetArguments()) - t.SetReferenceBlockID(flow.HashToID(m.GetReferenceBlockId())) - t.SetGasLimit(m.GetGasLimit()) + transactionBody, err := tb.SetScript(m.GetScript()). + SetArguments(m.GetArguments()). + SetReferenceBlockID(flow.HashToID(m.GetReferenceBlockId())). + SetComputeLimit(m.GetGasLimit()). + Build() + if err != nil { + return t, fmt.Errorf("could not build transaction body: %w", err) + } - return *t, nil + return *transactionBody, nil } func TransactionToMessage(tb flow.TransactionBody) *entitiesproto.Transaction { @@ -119,7 +122,7 @@ func TransactionToMessage(tb flow.TransactionBody) *entitiesproto.Transaction { } } -func TransactionResultToMessage(result access.TransactionResult) *accessproto.TransactionResultResponse { +func TransactionResultToMessage(result accessmodel.TransactionResult) *accessproto.TransactionResultResponse { return &accessproto.TransactionResultResponse{ Status: entitiesproto.TransactionStatus(result.Status), StatusCode: uint32(result.StatusCode), @@ -131,22 +134,17 @@ func TransactionResultToMessage(result access.TransactionResult) *accessproto.Tr func BlockHeaderToMessage(h *flow.Header) (*entitiesproto.BlockHeader, error) { id := h.ID() - t := timestamppb.New(h.Timestamp) - return &entitiesproto.BlockHeader{ Id: id[:], ParentId: h.ParentID[:], Height: h.Height, - Timestamp: t, + Timestamp: convert.BlockTimestamp2ProtobufTime(h.Timestamp), }, nil } func BlockToMessage(h *flow.Block) (*entitiesproto.Block, error) { id := h.ID() - parentID := h.Header.ParentID - t := timestamppb.New(h.Header.Timestamp) - cg := make([]*entitiesproto.CollectionGuarantee, len(h.Payload.Guarantees)) for i, g := range h.Payload.Guarantees { cg[i] = collectionGuaranteeToMessage(g) @@ -159,22 +157,20 @@ func BlockToMessage(h *flow.Block) (*entitiesproto.Block, error) { bh := entitiesproto.Block{ Id: id[:], - Height: h.Header.Height, - ParentId: parentID[:], - Timestamp: t, + Height: h.Height, + ParentId: h.ParentID[:], + Timestamp: convert.BlockTimestamp2ProtobufTime(h.Timestamp), CollectionGuarantees: cg, BlockSeals: seals, - Signatures: [][]byte{h.Header.ParentVoterSigData}, + Signatures: [][]byte{h.ParentVoterSigData}, } return &bh, nil } func collectionGuaranteeToMessage(g *flow.CollectionGuarantee) *entitiesproto.CollectionGuarantee { - id := g.ID() - return &entitiesproto.CollectionGuarantee{ - CollectionId: id[:], + CollectionId: IdentifierToMessage(g.CollectionID), Signatures: [][]byte{g.Signature}, } } diff --git a/access/legacy/handler.go b/access/legacy/handler.go index 48f4efc911d..8a504680d01 100644 --- a/access/legacy/handler.go +++ b/access/legacy/handler.go @@ -3,6 +3,7 @@ package handler import ( "context" + "github.com/onflow/flow/protobuf/go/flow/entities" accessproto "github.com/onflow/flow/protobuf/go/flow/legacy/access" entitiesproto "github.com/onflow/flow/protobuf/go/flow/legacy/entities" "google.golang.org/grpc/codes" @@ -189,7 +190,13 @@ func (h *Handler) GetTransactionResult( ) (*accessproto.TransactionResultResponse, error) { id := convert.MessageToIdentifier(req.GetId()) - result, err := h.api.GetTransactionResult(ctx, id, flow.ZeroID, flow.ZeroID) + result, err := h.api.GetTransactionResult( + ctx, + id, + flow.ZeroID, + flow.ZeroID, + entities.EventEncodingVersion_JSON_CDC_V0, + ) if err != nil { return nil, err } @@ -313,7 +320,7 @@ func (h *Handler) GetEventsForHeightRange( startHeight := req.GetStartHeight() endHeight := req.GetEndHeight() - results, err := h.api.GetEventsForHeightRange(ctx, eventType, startHeight, endHeight) + results, err := h.api.GetEventsForHeightRange(ctx, eventType, startHeight, endHeight, entities.EventEncodingVersion_JSON_CDC_V0) if err != nil { return nil, err } @@ -331,7 +338,7 @@ func (h *Handler) GetEventsForBlockIDs( eventType := req.GetType() blockIDs := convert.MessagesToIdentifiers(req.GetBlockIds()) - results, err := h.api.GetEventsForBlockIDs(ctx, eventType, blockIDs) + results, err := h.api.GetEventsForBlockIDs(ctx, eventType, blockIDs, entities.EventEncodingVersion_JSON_CDC_V0) if err != nil { return nil, err } diff --git a/access/mock/accounts_api.go b/access/mock/accounts_api.go new file mode 100644 index 00000000000..8362f73238e --- /dev/null +++ b/access/mock/accounts_api.go @@ -0,0 +1,295 @@ +// Code generated by mockery. DO NOT EDIT. + +package mock + +import ( + context "context" + + flow "github.com/onflow/flow-go/model/flow" + mock "github.com/stretchr/testify/mock" +) + +// AccountsAPI is an autogenerated mock type for the AccountsAPI type +type AccountsAPI struct { + mock.Mock +} + +// GetAccount provides a mock function with given fields: ctx, address +func (_m *AccountsAPI) GetAccount(ctx context.Context, address flow.Address) (*flow.Account, error) { + ret := _m.Called(ctx, address) + + if len(ret) == 0 { + panic("no return value specified for GetAccount") + } + + var r0 *flow.Account + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, flow.Address) (*flow.Account, error)); ok { + return rf(ctx, address) + } + if rf, ok := ret.Get(0).(func(context.Context, flow.Address) *flow.Account); ok { + r0 = rf(ctx, address) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*flow.Account) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, flow.Address) error); ok { + r1 = rf(ctx, address) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetAccountAtBlockHeight provides a mock function with given fields: ctx, address, height +func (_m *AccountsAPI) GetAccountAtBlockHeight(ctx context.Context, address flow.Address, height uint64) (*flow.Account, error) { + ret := _m.Called(ctx, address, height) + + if len(ret) == 0 { + panic("no return value specified for GetAccountAtBlockHeight") + } + + var r0 *flow.Account + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, flow.Address, uint64) (*flow.Account, error)); ok { + return rf(ctx, address, height) + } + if rf, ok := ret.Get(0).(func(context.Context, flow.Address, uint64) *flow.Account); ok { + r0 = rf(ctx, address, height) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*flow.Account) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, flow.Address, uint64) error); ok { + r1 = rf(ctx, address, height) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetAccountAtLatestBlock provides a mock function with given fields: ctx, address +func (_m *AccountsAPI) GetAccountAtLatestBlock(ctx context.Context, address flow.Address) (*flow.Account, error) { + ret := _m.Called(ctx, address) + + if len(ret) == 0 { + panic("no return value specified for GetAccountAtLatestBlock") + } + + var r0 *flow.Account + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, flow.Address) (*flow.Account, error)); ok { + return rf(ctx, address) + } + if rf, ok := ret.Get(0).(func(context.Context, flow.Address) *flow.Account); ok { + r0 = rf(ctx, address) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*flow.Account) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, flow.Address) error); ok { + r1 = rf(ctx, address) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetAccountBalanceAtBlockHeight provides a mock function with given fields: ctx, address, height +func (_m *AccountsAPI) GetAccountBalanceAtBlockHeight(ctx context.Context, address flow.Address, height uint64) (uint64, error) { + ret := _m.Called(ctx, address, height) + + if len(ret) == 0 { + panic("no return value specified for GetAccountBalanceAtBlockHeight") + } + + var r0 uint64 + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, flow.Address, uint64) (uint64, error)); ok { + return rf(ctx, address, height) + } + if rf, ok := ret.Get(0).(func(context.Context, flow.Address, uint64) uint64); ok { + r0 = rf(ctx, address, height) + } else { + r0 = ret.Get(0).(uint64) + } + + if rf, ok := ret.Get(1).(func(context.Context, flow.Address, uint64) error); ok { + r1 = rf(ctx, address, height) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetAccountBalanceAtLatestBlock provides a mock function with given fields: ctx, address +func (_m *AccountsAPI) GetAccountBalanceAtLatestBlock(ctx context.Context, address flow.Address) (uint64, error) { + ret := _m.Called(ctx, address) + + if len(ret) == 0 { + panic("no return value specified for GetAccountBalanceAtLatestBlock") + } + + var r0 uint64 + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, flow.Address) (uint64, error)); ok { + return rf(ctx, address) + } + if rf, ok := ret.Get(0).(func(context.Context, flow.Address) uint64); ok { + r0 = rf(ctx, address) + } else { + r0 = ret.Get(0).(uint64) + } + + if rf, ok := ret.Get(1).(func(context.Context, flow.Address) error); ok { + r1 = rf(ctx, address) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetAccountKeyAtBlockHeight provides a mock function with given fields: ctx, address, keyIndex, height +func (_m *AccountsAPI) GetAccountKeyAtBlockHeight(ctx context.Context, address flow.Address, keyIndex uint32, height uint64) (*flow.AccountPublicKey, error) { + ret := _m.Called(ctx, address, keyIndex, height) + + if len(ret) == 0 { + panic("no return value specified for GetAccountKeyAtBlockHeight") + } + + var r0 *flow.AccountPublicKey + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, flow.Address, uint32, uint64) (*flow.AccountPublicKey, error)); ok { + return rf(ctx, address, keyIndex, height) + } + if rf, ok := ret.Get(0).(func(context.Context, flow.Address, uint32, uint64) *flow.AccountPublicKey); ok { + r0 = rf(ctx, address, keyIndex, height) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*flow.AccountPublicKey) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, flow.Address, uint32, uint64) error); ok { + r1 = rf(ctx, address, keyIndex, height) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetAccountKeyAtLatestBlock provides a mock function with given fields: ctx, address, keyIndex +func (_m *AccountsAPI) GetAccountKeyAtLatestBlock(ctx context.Context, address flow.Address, keyIndex uint32) (*flow.AccountPublicKey, error) { + ret := _m.Called(ctx, address, keyIndex) + + if len(ret) == 0 { + panic("no return value specified for GetAccountKeyAtLatestBlock") + } + + var r0 *flow.AccountPublicKey + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, flow.Address, uint32) (*flow.AccountPublicKey, error)); ok { + return rf(ctx, address, keyIndex) + } + if rf, ok := ret.Get(0).(func(context.Context, flow.Address, uint32) *flow.AccountPublicKey); ok { + r0 = rf(ctx, address, keyIndex) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*flow.AccountPublicKey) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, flow.Address, uint32) error); ok { + r1 = rf(ctx, address, keyIndex) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetAccountKeysAtBlockHeight provides a mock function with given fields: ctx, address, height +func (_m *AccountsAPI) GetAccountKeysAtBlockHeight(ctx context.Context, address flow.Address, height uint64) ([]flow.AccountPublicKey, error) { + ret := _m.Called(ctx, address, height) + + if len(ret) == 0 { + panic("no return value specified for GetAccountKeysAtBlockHeight") + } + + var r0 []flow.AccountPublicKey + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, flow.Address, uint64) ([]flow.AccountPublicKey, error)); ok { + return rf(ctx, address, height) + } + if rf, ok := ret.Get(0).(func(context.Context, flow.Address, uint64) []flow.AccountPublicKey); ok { + r0 = rf(ctx, address, height) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]flow.AccountPublicKey) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, flow.Address, uint64) error); ok { + r1 = rf(ctx, address, height) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetAccountKeysAtLatestBlock provides a mock function with given fields: ctx, address +func (_m *AccountsAPI) GetAccountKeysAtLatestBlock(ctx context.Context, address flow.Address) ([]flow.AccountPublicKey, error) { + ret := _m.Called(ctx, address) + + if len(ret) == 0 { + panic("no return value specified for GetAccountKeysAtLatestBlock") + } + + var r0 []flow.AccountPublicKey + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, flow.Address) ([]flow.AccountPublicKey, error)); ok { + return rf(ctx, address) + } + if rf, ok := ret.Get(0).(func(context.Context, flow.Address) []flow.AccountPublicKey); ok { + r0 = rf(ctx, address) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]flow.AccountPublicKey) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, flow.Address) error); ok { + r1 = rf(ctx, address) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// NewAccountsAPI creates a new instance of AccountsAPI. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewAccountsAPI(t interface { + mock.TestingT + Cleanup(func()) +}) *AccountsAPI { + mock := &AccountsAPI{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/access/mock/api.go b/access/mock/api.go index b3a91590f80..158945f9a55 100644 --- a/access/mock/api.go +++ b/access/mock/api.go @@ -1,15 +1,18 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mock import ( context "context" - access "github.com/onflow/flow-go/access" - flow "github.com/onflow/flow-go/model/flow" + entities "github.com/onflow/flow/protobuf/go/flow/entities" mock "github.com/stretchr/testify/mock" + + modelaccess "github.com/onflow/flow-go/model/access" + + subscription "github.com/onflow/flow-go/engine/access/subscription" ) // API is an autogenerated mock type for the API type @@ -21,6 +24,10 @@ type API struct { func (_m *API) ExecuteScriptAtBlockHeight(ctx context.Context, blockHeight uint64, script []byte, arguments [][]byte) ([]byte, error) { ret := _m.Called(ctx, blockHeight, script, arguments) + if len(ret) == 0 { + panic("no return value specified for ExecuteScriptAtBlockHeight") + } + var r0 []byte var r1 error if rf, ok := ret.Get(0).(func(context.Context, uint64, []byte, [][]byte) ([]byte, error)); ok { @@ -47,6 +54,10 @@ func (_m *API) ExecuteScriptAtBlockHeight(ctx context.Context, blockHeight uint6 func (_m *API) ExecuteScriptAtBlockID(ctx context.Context, blockID flow.Identifier, script []byte, arguments [][]byte) ([]byte, error) { ret := _m.Called(ctx, blockID, script, arguments) + if len(ret) == 0 { + panic("no return value specified for ExecuteScriptAtBlockID") + } + var r0 []byte var r1 error if rf, ok := ret.Get(0).(func(context.Context, flow.Identifier, []byte, [][]byte) ([]byte, error)); ok { @@ -73,6 +84,10 @@ func (_m *API) ExecuteScriptAtBlockID(ctx context.Context, blockID flow.Identifi func (_m *API) ExecuteScriptAtLatestBlock(ctx context.Context, script []byte, arguments [][]byte) ([]byte, error) { ret := _m.Called(ctx, script, arguments) + if len(ret) == 0 { + panic("no return value specified for ExecuteScriptAtLatestBlock") + } + var r0 []byte var r1 error if rf, ok := ret.Get(0).(func(context.Context, []byte, [][]byte) ([]byte, error)); ok { @@ -99,6 +114,10 @@ func (_m *API) ExecuteScriptAtLatestBlock(ctx context.Context, script []byte, ar func (_m *API) GetAccount(ctx context.Context, address flow.Address) (*flow.Account, error) { ret := _m.Called(ctx, address) + if len(ret) == 0 { + panic("no return value specified for GetAccount") + } + var r0 *flow.Account var r1 error if rf, ok := ret.Get(0).(func(context.Context, flow.Address) (*flow.Account, error)); ok { @@ -125,6 +144,10 @@ func (_m *API) GetAccount(ctx context.Context, address flow.Address) (*flow.Acco func (_m *API) GetAccountAtBlockHeight(ctx context.Context, address flow.Address, height uint64) (*flow.Account, error) { ret := _m.Called(ctx, address, height) + if len(ret) == 0 { + panic("no return value specified for GetAccountAtBlockHeight") + } + var r0 *flow.Account var r1 error if rf, ok := ret.Get(0).(func(context.Context, flow.Address, uint64) (*flow.Account, error)); ok { @@ -151,6 +174,10 @@ func (_m *API) GetAccountAtBlockHeight(ctx context.Context, address flow.Address func (_m *API) GetAccountAtLatestBlock(ctx context.Context, address flow.Address) (*flow.Account, error) { ret := _m.Called(ctx, address) + if len(ret) == 0 { + panic("no return value specified for GetAccountAtLatestBlock") + } + var r0 *flow.Account var r1 error if rf, ok := ret.Get(0).(func(context.Context, flow.Address) (*flow.Account, error)); ok { @@ -173,10 +200,190 @@ func (_m *API) GetAccountAtLatestBlock(ctx context.Context, address flow.Address return r0, r1 } +// GetAccountBalanceAtBlockHeight provides a mock function with given fields: ctx, address, height +func (_m *API) GetAccountBalanceAtBlockHeight(ctx context.Context, address flow.Address, height uint64) (uint64, error) { + ret := _m.Called(ctx, address, height) + + if len(ret) == 0 { + panic("no return value specified for GetAccountBalanceAtBlockHeight") + } + + var r0 uint64 + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, flow.Address, uint64) (uint64, error)); ok { + return rf(ctx, address, height) + } + if rf, ok := ret.Get(0).(func(context.Context, flow.Address, uint64) uint64); ok { + r0 = rf(ctx, address, height) + } else { + r0 = ret.Get(0).(uint64) + } + + if rf, ok := ret.Get(1).(func(context.Context, flow.Address, uint64) error); ok { + r1 = rf(ctx, address, height) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetAccountBalanceAtLatestBlock provides a mock function with given fields: ctx, address +func (_m *API) GetAccountBalanceAtLatestBlock(ctx context.Context, address flow.Address) (uint64, error) { + ret := _m.Called(ctx, address) + + if len(ret) == 0 { + panic("no return value specified for GetAccountBalanceAtLatestBlock") + } + + var r0 uint64 + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, flow.Address) (uint64, error)); ok { + return rf(ctx, address) + } + if rf, ok := ret.Get(0).(func(context.Context, flow.Address) uint64); ok { + r0 = rf(ctx, address) + } else { + r0 = ret.Get(0).(uint64) + } + + if rf, ok := ret.Get(1).(func(context.Context, flow.Address) error); ok { + r1 = rf(ctx, address) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetAccountKeyAtBlockHeight provides a mock function with given fields: ctx, address, keyIndex, height +func (_m *API) GetAccountKeyAtBlockHeight(ctx context.Context, address flow.Address, keyIndex uint32, height uint64) (*flow.AccountPublicKey, error) { + ret := _m.Called(ctx, address, keyIndex, height) + + if len(ret) == 0 { + panic("no return value specified for GetAccountKeyAtBlockHeight") + } + + var r0 *flow.AccountPublicKey + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, flow.Address, uint32, uint64) (*flow.AccountPublicKey, error)); ok { + return rf(ctx, address, keyIndex, height) + } + if rf, ok := ret.Get(0).(func(context.Context, flow.Address, uint32, uint64) *flow.AccountPublicKey); ok { + r0 = rf(ctx, address, keyIndex, height) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*flow.AccountPublicKey) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, flow.Address, uint32, uint64) error); ok { + r1 = rf(ctx, address, keyIndex, height) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetAccountKeyAtLatestBlock provides a mock function with given fields: ctx, address, keyIndex +func (_m *API) GetAccountKeyAtLatestBlock(ctx context.Context, address flow.Address, keyIndex uint32) (*flow.AccountPublicKey, error) { + ret := _m.Called(ctx, address, keyIndex) + + if len(ret) == 0 { + panic("no return value specified for GetAccountKeyAtLatestBlock") + } + + var r0 *flow.AccountPublicKey + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, flow.Address, uint32) (*flow.AccountPublicKey, error)); ok { + return rf(ctx, address, keyIndex) + } + if rf, ok := ret.Get(0).(func(context.Context, flow.Address, uint32) *flow.AccountPublicKey); ok { + r0 = rf(ctx, address, keyIndex) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*flow.AccountPublicKey) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, flow.Address, uint32) error); ok { + r1 = rf(ctx, address, keyIndex) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetAccountKeysAtBlockHeight provides a mock function with given fields: ctx, address, height +func (_m *API) GetAccountKeysAtBlockHeight(ctx context.Context, address flow.Address, height uint64) ([]flow.AccountPublicKey, error) { + ret := _m.Called(ctx, address, height) + + if len(ret) == 0 { + panic("no return value specified for GetAccountKeysAtBlockHeight") + } + + var r0 []flow.AccountPublicKey + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, flow.Address, uint64) ([]flow.AccountPublicKey, error)); ok { + return rf(ctx, address, height) + } + if rf, ok := ret.Get(0).(func(context.Context, flow.Address, uint64) []flow.AccountPublicKey); ok { + r0 = rf(ctx, address, height) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]flow.AccountPublicKey) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, flow.Address, uint64) error); ok { + r1 = rf(ctx, address, height) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetAccountKeysAtLatestBlock provides a mock function with given fields: ctx, address +func (_m *API) GetAccountKeysAtLatestBlock(ctx context.Context, address flow.Address) ([]flow.AccountPublicKey, error) { + ret := _m.Called(ctx, address) + + if len(ret) == 0 { + panic("no return value specified for GetAccountKeysAtLatestBlock") + } + + var r0 []flow.AccountPublicKey + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, flow.Address) ([]flow.AccountPublicKey, error)); ok { + return rf(ctx, address) + } + if rf, ok := ret.Get(0).(func(context.Context, flow.Address) []flow.AccountPublicKey); ok { + r0 = rf(ctx, address) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]flow.AccountPublicKey) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, flow.Address) error); ok { + r1 = rf(ctx, address) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + // GetBlockByHeight provides a mock function with given fields: ctx, height func (_m *API) GetBlockByHeight(ctx context.Context, height uint64) (*flow.Block, flow.BlockStatus, error) { ret := _m.Called(ctx, height) + if len(ret) == 0 { + panic("no return value specified for GetBlockByHeight") + } + var r0 *flow.Block var r1 flow.BlockStatus var r2 error @@ -210,6 +417,10 @@ func (_m *API) GetBlockByHeight(ctx context.Context, height uint64) (*flow.Block func (_m *API) GetBlockByID(ctx context.Context, id flow.Identifier) (*flow.Block, flow.BlockStatus, error) { ret := _m.Called(ctx, id) + if len(ret) == 0 { + panic("no return value specified for GetBlockByID") + } + var r0 *flow.Block var r1 flow.BlockStatus var r2 error @@ -243,6 +454,10 @@ func (_m *API) GetBlockByID(ctx context.Context, id flow.Identifier) (*flow.Bloc func (_m *API) GetBlockHeaderByHeight(ctx context.Context, height uint64) (*flow.Header, flow.BlockStatus, error) { ret := _m.Called(ctx, height) + if len(ret) == 0 { + panic("no return value specified for GetBlockHeaderByHeight") + } + var r0 *flow.Header var r1 flow.BlockStatus var r2 error @@ -276,6 +491,10 @@ func (_m *API) GetBlockHeaderByHeight(ctx context.Context, height uint64) (*flow func (_m *API) GetBlockHeaderByID(ctx context.Context, id flow.Identifier) (*flow.Header, flow.BlockStatus, error) { ret := _m.Called(ctx, id) + if len(ret) == 0 { + panic("no return value specified for GetBlockHeaderByID") + } + var r0 *flow.Header var r1 flow.BlockStatus var r2 error @@ -309,6 +528,10 @@ func (_m *API) GetBlockHeaderByID(ctx context.Context, id flow.Identifier) (*flo func (_m *API) GetCollectionByID(ctx context.Context, id flow.Identifier) (*flow.LightCollection, error) { ret := _m.Called(ctx, id) + if len(ret) == 0 { + panic("no return value specified for GetCollectionByID") + } + var r0 *flow.LightCollection var r1 error if rf, ok := ret.Get(0).(func(context.Context, flow.Identifier) (*flow.LightCollection, error)); ok { @@ -331,25 +554,29 @@ func (_m *API) GetCollectionByID(ctx context.Context, id flow.Identifier) (*flow return r0, r1 } -// GetEventsForBlockIDs provides a mock function with given fields: ctx, eventType, blockIDs -func (_m *API) GetEventsForBlockIDs(ctx context.Context, eventType string, blockIDs []flow.Identifier) ([]flow.BlockEvents, error) { - ret := _m.Called(ctx, eventType, blockIDs) +// GetEventsForBlockIDs provides a mock function with given fields: ctx, eventType, blockIDs, requiredEventEncodingVersion +func (_m *API) GetEventsForBlockIDs(ctx context.Context, eventType string, blockIDs []flow.Identifier, requiredEventEncodingVersion entities.EventEncodingVersion) ([]flow.BlockEvents, error) { + ret := _m.Called(ctx, eventType, blockIDs, requiredEventEncodingVersion) + + if len(ret) == 0 { + panic("no return value specified for GetEventsForBlockIDs") + } var r0 []flow.BlockEvents var r1 error - if rf, ok := ret.Get(0).(func(context.Context, string, []flow.Identifier) ([]flow.BlockEvents, error)); ok { - return rf(ctx, eventType, blockIDs) + if rf, ok := ret.Get(0).(func(context.Context, string, []flow.Identifier, entities.EventEncodingVersion) ([]flow.BlockEvents, error)); ok { + return rf(ctx, eventType, blockIDs, requiredEventEncodingVersion) } - if rf, ok := ret.Get(0).(func(context.Context, string, []flow.Identifier) []flow.BlockEvents); ok { - r0 = rf(ctx, eventType, blockIDs) + if rf, ok := ret.Get(0).(func(context.Context, string, []flow.Identifier, entities.EventEncodingVersion) []flow.BlockEvents); ok { + r0 = rf(ctx, eventType, blockIDs, requiredEventEncodingVersion) } else { if ret.Get(0) != nil { r0 = ret.Get(0).([]flow.BlockEvents) } } - if rf, ok := ret.Get(1).(func(context.Context, string, []flow.Identifier) error); ok { - r1 = rf(ctx, eventType, blockIDs) + if rf, ok := ret.Get(1).(func(context.Context, string, []flow.Identifier, entities.EventEncodingVersion) error); ok { + r1 = rf(ctx, eventType, blockIDs, requiredEventEncodingVersion) } else { r1 = ret.Error(1) } @@ -357,25 +584,29 @@ func (_m *API) GetEventsForBlockIDs(ctx context.Context, eventType string, block return r0, r1 } -// GetEventsForHeightRange provides a mock function with given fields: ctx, eventType, startHeight, endHeight -func (_m *API) GetEventsForHeightRange(ctx context.Context, eventType string, startHeight uint64, endHeight uint64) ([]flow.BlockEvents, error) { - ret := _m.Called(ctx, eventType, startHeight, endHeight) +// GetEventsForHeightRange provides a mock function with given fields: ctx, eventType, startHeight, endHeight, requiredEventEncodingVersion +func (_m *API) GetEventsForHeightRange(ctx context.Context, eventType string, startHeight uint64, endHeight uint64, requiredEventEncodingVersion entities.EventEncodingVersion) ([]flow.BlockEvents, error) { + ret := _m.Called(ctx, eventType, startHeight, endHeight, requiredEventEncodingVersion) + + if len(ret) == 0 { + panic("no return value specified for GetEventsForHeightRange") + } var r0 []flow.BlockEvents var r1 error - if rf, ok := ret.Get(0).(func(context.Context, string, uint64, uint64) ([]flow.BlockEvents, error)); ok { - return rf(ctx, eventType, startHeight, endHeight) + if rf, ok := ret.Get(0).(func(context.Context, string, uint64, uint64, entities.EventEncodingVersion) ([]flow.BlockEvents, error)); ok { + return rf(ctx, eventType, startHeight, endHeight, requiredEventEncodingVersion) } - if rf, ok := ret.Get(0).(func(context.Context, string, uint64, uint64) []flow.BlockEvents); ok { - r0 = rf(ctx, eventType, startHeight, endHeight) + if rf, ok := ret.Get(0).(func(context.Context, string, uint64, uint64, entities.EventEncodingVersion) []flow.BlockEvents); ok { + r0 = rf(ctx, eventType, startHeight, endHeight, requiredEventEncodingVersion) } else { if ret.Get(0) != nil { r0 = ret.Get(0).([]flow.BlockEvents) } } - if rf, ok := ret.Get(1).(func(context.Context, string, uint64, uint64) error); ok { - r1 = rf(ctx, eventType, startHeight, endHeight) + if rf, ok := ret.Get(1).(func(context.Context, string, uint64, uint64, entities.EventEncodingVersion) error); ok { + r1 = rf(ctx, eventType, startHeight, endHeight, requiredEventEncodingVersion) } else { r1 = ret.Error(1) } @@ -387,6 +618,10 @@ func (_m *API) GetEventsForHeightRange(ctx context.Context, eventType string, st func (_m *API) GetExecutionResultByID(ctx context.Context, id flow.Identifier) (*flow.ExecutionResult, error) { ret := _m.Called(ctx, id) + if len(ret) == 0 { + panic("no return value specified for GetExecutionResultByID") + } + var r0 *flow.ExecutionResult var r1 error if rf, ok := ret.Get(0).(func(context.Context, flow.Identifier) (*flow.ExecutionResult, error)); ok { @@ -413,6 +648,10 @@ func (_m *API) GetExecutionResultByID(ctx context.Context, id flow.Identifier) ( func (_m *API) GetExecutionResultForBlockID(ctx context.Context, blockID flow.Identifier) (*flow.ExecutionResult, error) { ret := _m.Called(ctx, blockID) + if len(ret) == 0 { + panic("no return value specified for GetExecutionResultForBlockID") + } + var r0 *flow.ExecutionResult var r1 error if rf, ok := ret.Get(0).(func(context.Context, flow.Identifier) (*flow.ExecutionResult, error)); ok { @@ -435,10 +674,44 @@ func (_m *API) GetExecutionResultForBlockID(ctx context.Context, blockID flow.Id return r0, r1 } +// GetFullCollectionByID provides a mock function with given fields: ctx, id +func (_m *API) GetFullCollectionByID(ctx context.Context, id flow.Identifier) (*flow.Collection, error) { + ret := _m.Called(ctx, id) + + if len(ret) == 0 { + panic("no return value specified for GetFullCollectionByID") + } + + var r0 *flow.Collection + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, flow.Identifier) (*flow.Collection, error)); ok { + return rf(ctx, id) + } + if rf, ok := ret.Get(0).(func(context.Context, flow.Identifier) *flow.Collection); ok { + r0 = rf(ctx, id) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*flow.Collection) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, flow.Identifier) error); ok { + r1 = rf(ctx, id) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + // GetLatestBlock provides a mock function with given fields: ctx, isSealed func (_m *API) GetLatestBlock(ctx context.Context, isSealed bool) (*flow.Block, flow.BlockStatus, error) { ret := _m.Called(ctx, isSealed) + if len(ret) == 0 { + panic("no return value specified for GetLatestBlock") + } + var r0 *flow.Block var r1 flow.BlockStatus var r2 error @@ -472,6 +745,10 @@ func (_m *API) GetLatestBlock(ctx context.Context, isSealed bool) (*flow.Block, func (_m *API) GetLatestBlockHeader(ctx context.Context, isSealed bool) (*flow.Header, flow.BlockStatus, error) { ret := _m.Called(ctx, isSealed) + if len(ret) == 0 { + panic("no return value specified for GetLatestBlockHeader") + } + var r0 *flow.Header var r1 flow.BlockStatus var r2 error @@ -505,6 +782,10 @@ func (_m *API) GetLatestBlockHeader(ctx context.Context, isSealed bool) (*flow.H func (_m *API) GetLatestProtocolStateSnapshot(ctx context.Context) ([]byte, error) { ret := _m.Called(ctx) + if len(ret) == 0 { + panic("no return value specified for GetLatestProtocolStateSnapshot") + } + var r0 []byte var r1 error if rf, ok := ret.Get(0).(func(context.Context) ([]byte, error)); ok { @@ -528,33 +809,41 @@ func (_m *API) GetLatestProtocolStateSnapshot(ctx context.Context) ([]byte, erro } // GetNetworkParameters provides a mock function with given fields: ctx -func (_m *API) GetNetworkParameters(ctx context.Context) access.NetworkParameters { +func (_m *API) GetNetworkParameters(ctx context.Context) modelaccess.NetworkParameters { ret := _m.Called(ctx) - var r0 access.NetworkParameters - if rf, ok := ret.Get(0).(func(context.Context) access.NetworkParameters); ok { + if len(ret) == 0 { + panic("no return value specified for GetNetworkParameters") + } + + var r0 modelaccess.NetworkParameters + if rf, ok := ret.Get(0).(func(context.Context) modelaccess.NetworkParameters); ok { r0 = rf(ctx) } else { - r0 = ret.Get(0).(access.NetworkParameters) + r0 = ret.Get(0).(modelaccess.NetworkParameters) } return r0 } // GetNodeVersionInfo provides a mock function with given fields: ctx -func (_m *API) GetNodeVersionInfo(ctx context.Context) (*access.NodeVersionInfo, error) { +func (_m *API) GetNodeVersionInfo(ctx context.Context) (*modelaccess.NodeVersionInfo, error) { ret := _m.Called(ctx) - var r0 *access.NodeVersionInfo + if len(ret) == 0 { + panic("no return value specified for GetNodeVersionInfo") + } + + var r0 *modelaccess.NodeVersionInfo var r1 error - if rf, ok := ret.Get(0).(func(context.Context) (*access.NodeVersionInfo, error)); ok { + if rf, ok := ret.Get(0).(func(context.Context) (*modelaccess.NodeVersionInfo, error)); ok { return rf(ctx) } - if rf, ok := ret.Get(0).(func(context.Context) *access.NodeVersionInfo); ok { + if rf, ok := ret.Get(0).(func(context.Context) *modelaccess.NodeVersionInfo); ok { r0 = rf(ctx) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(*access.NodeVersionInfo) + r0 = ret.Get(0).(*modelaccess.NodeVersionInfo) } } @@ -567,10 +856,134 @@ func (_m *API) GetNodeVersionInfo(ctx context.Context) (*access.NodeVersionInfo, return r0, r1 } +// GetProtocolStateSnapshotByBlockID provides a mock function with given fields: ctx, blockID +func (_m *API) GetProtocolStateSnapshotByBlockID(ctx context.Context, blockID flow.Identifier) ([]byte, error) { + ret := _m.Called(ctx, blockID) + + if len(ret) == 0 { + panic("no return value specified for GetProtocolStateSnapshotByBlockID") + } + + var r0 []byte + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, flow.Identifier) ([]byte, error)); ok { + return rf(ctx, blockID) + } + if rf, ok := ret.Get(0).(func(context.Context, flow.Identifier) []byte); ok { + r0 = rf(ctx, blockID) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]byte) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, flow.Identifier) error); ok { + r1 = rf(ctx, blockID) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetProtocolStateSnapshotByHeight provides a mock function with given fields: ctx, blockHeight +func (_m *API) GetProtocolStateSnapshotByHeight(ctx context.Context, blockHeight uint64) ([]byte, error) { + ret := _m.Called(ctx, blockHeight) + + if len(ret) == 0 { + panic("no return value specified for GetProtocolStateSnapshotByHeight") + } + + var r0 []byte + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, uint64) ([]byte, error)); ok { + return rf(ctx, blockHeight) + } + if rf, ok := ret.Get(0).(func(context.Context, uint64) []byte); ok { + r0 = rf(ctx, blockHeight) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]byte) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, uint64) error); ok { + r1 = rf(ctx, blockHeight) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetSystemTransaction provides a mock function with given fields: ctx, txID, blockID +func (_m *API) GetSystemTransaction(ctx context.Context, txID flow.Identifier, blockID flow.Identifier) (*flow.TransactionBody, error) { + ret := _m.Called(ctx, txID, blockID) + + if len(ret) == 0 { + panic("no return value specified for GetSystemTransaction") + } + + var r0 *flow.TransactionBody + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, flow.Identifier, flow.Identifier) (*flow.TransactionBody, error)); ok { + return rf(ctx, txID, blockID) + } + if rf, ok := ret.Get(0).(func(context.Context, flow.Identifier, flow.Identifier) *flow.TransactionBody); ok { + r0 = rf(ctx, txID, blockID) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*flow.TransactionBody) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, flow.Identifier, flow.Identifier) error); ok { + r1 = rf(ctx, txID, blockID) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetSystemTransactionResult provides a mock function with given fields: ctx, txID, blockID, encodingVersion +func (_m *API) GetSystemTransactionResult(ctx context.Context, txID flow.Identifier, blockID flow.Identifier, encodingVersion entities.EventEncodingVersion) (*modelaccess.TransactionResult, error) { + ret := _m.Called(ctx, txID, blockID, encodingVersion) + + if len(ret) == 0 { + panic("no return value specified for GetSystemTransactionResult") + } + + var r0 *modelaccess.TransactionResult + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, flow.Identifier, flow.Identifier, entities.EventEncodingVersion) (*modelaccess.TransactionResult, error)); ok { + return rf(ctx, txID, blockID, encodingVersion) + } + if rf, ok := ret.Get(0).(func(context.Context, flow.Identifier, flow.Identifier, entities.EventEncodingVersion) *modelaccess.TransactionResult); ok { + r0 = rf(ctx, txID, blockID, encodingVersion) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*modelaccess.TransactionResult) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, flow.Identifier, flow.Identifier, entities.EventEncodingVersion) error); ok { + r1 = rf(ctx, txID, blockID, encodingVersion) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + // GetTransaction provides a mock function with given fields: ctx, id func (_m *API) GetTransaction(ctx context.Context, id flow.Identifier) (*flow.TransactionBody, error) { ret := _m.Called(ctx, id) + if len(ret) == 0 { + panic("no return value specified for GetTransaction") + } + var r0 *flow.TransactionBody var r1 error if rf, ok := ret.Get(0).(func(context.Context, flow.Identifier) (*flow.TransactionBody, error)); ok { @@ -593,25 +1006,29 @@ func (_m *API) GetTransaction(ctx context.Context, id flow.Identifier) (*flow.Tr return r0, r1 } -// GetTransactionResult provides a mock function with given fields: ctx, id, blockID, collectionID -func (_m *API) GetTransactionResult(ctx context.Context, id flow.Identifier, blockID flow.Identifier, collectionID flow.Identifier) (*access.TransactionResult, error) { - ret := _m.Called(ctx, id, blockID, collectionID) +// GetTransactionResult provides a mock function with given fields: ctx, txID, blockID, collectionID, encodingVersion +func (_m *API) GetTransactionResult(ctx context.Context, txID flow.Identifier, blockID flow.Identifier, collectionID flow.Identifier, encodingVersion entities.EventEncodingVersion) (*modelaccess.TransactionResult, error) { + ret := _m.Called(ctx, txID, blockID, collectionID, encodingVersion) + + if len(ret) == 0 { + panic("no return value specified for GetTransactionResult") + } - var r0 *access.TransactionResult + var r0 *modelaccess.TransactionResult var r1 error - if rf, ok := ret.Get(0).(func(context.Context, flow.Identifier, flow.Identifier, flow.Identifier) (*access.TransactionResult, error)); ok { - return rf(ctx, id, blockID, collectionID) + if rf, ok := ret.Get(0).(func(context.Context, flow.Identifier, flow.Identifier, flow.Identifier, entities.EventEncodingVersion) (*modelaccess.TransactionResult, error)); ok { + return rf(ctx, txID, blockID, collectionID, encodingVersion) } - if rf, ok := ret.Get(0).(func(context.Context, flow.Identifier, flow.Identifier, flow.Identifier) *access.TransactionResult); ok { - r0 = rf(ctx, id, blockID, collectionID) + if rf, ok := ret.Get(0).(func(context.Context, flow.Identifier, flow.Identifier, flow.Identifier, entities.EventEncodingVersion) *modelaccess.TransactionResult); ok { + r0 = rf(ctx, txID, blockID, collectionID, encodingVersion) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(*access.TransactionResult) + r0 = ret.Get(0).(*modelaccess.TransactionResult) } } - if rf, ok := ret.Get(1).(func(context.Context, flow.Identifier, flow.Identifier, flow.Identifier) error); ok { - r1 = rf(ctx, id, blockID, collectionID) + if rf, ok := ret.Get(1).(func(context.Context, flow.Identifier, flow.Identifier, flow.Identifier, entities.EventEncodingVersion) error); ok { + r1 = rf(ctx, txID, blockID, collectionID, encodingVersion) } else { r1 = ret.Error(1) } @@ -619,25 +1036,29 @@ func (_m *API) GetTransactionResult(ctx context.Context, id flow.Identifier, blo return r0, r1 } -// GetTransactionResultByIndex provides a mock function with given fields: ctx, blockID, index -func (_m *API) GetTransactionResultByIndex(ctx context.Context, blockID flow.Identifier, index uint32) (*access.TransactionResult, error) { - ret := _m.Called(ctx, blockID, index) +// GetTransactionResultByIndex provides a mock function with given fields: ctx, blockID, index, encodingVersion +func (_m *API) GetTransactionResultByIndex(ctx context.Context, blockID flow.Identifier, index uint32, encodingVersion entities.EventEncodingVersion) (*modelaccess.TransactionResult, error) { + ret := _m.Called(ctx, blockID, index, encodingVersion) - var r0 *access.TransactionResult + if len(ret) == 0 { + panic("no return value specified for GetTransactionResultByIndex") + } + + var r0 *modelaccess.TransactionResult var r1 error - if rf, ok := ret.Get(0).(func(context.Context, flow.Identifier, uint32) (*access.TransactionResult, error)); ok { - return rf(ctx, blockID, index) + if rf, ok := ret.Get(0).(func(context.Context, flow.Identifier, uint32, entities.EventEncodingVersion) (*modelaccess.TransactionResult, error)); ok { + return rf(ctx, blockID, index, encodingVersion) } - if rf, ok := ret.Get(0).(func(context.Context, flow.Identifier, uint32) *access.TransactionResult); ok { - r0 = rf(ctx, blockID, index) + if rf, ok := ret.Get(0).(func(context.Context, flow.Identifier, uint32, entities.EventEncodingVersion) *modelaccess.TransactionResult); ok { + r0 = rf(ctx, blockID, index, encodingVersion) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(*access.TransactionResult) + r0 = ret.Get(0).(*modelaccess.TransactionResult) } } - if rf, ok := ret.Get(1).(func(context.Context, flow.Identifier, uint32) error); ok { - r1 = rf(ctx, blockID, index) + if rf, ok := ret.Get(1).(func(context.Context, flow.Identifier, uint32, entities.EventEncodingVersion) error); ok { + r1 = rf(ctx, blockID, index, encodingVersion) } else { r1 = ret.Error(1) } @@ -645,25 +1066,29 @@ func (_m *API) GetTransactionResultByIndex(ctx context.Context, blockID flow.Ide return r0, r1 } -// GetTransactionResultsByBlockID provides a mock function with given fields: ctx, blockID -func (_m *API) GetTransactionResultsByBlockID(ctx context.Context, blockID flow.Identifier) ([]*access.TransactionResult, error) { - ret := _m.Called(ctx, blockID) +// GetTransactionResultsByBlockID provides a mock function with given fields: ctx, blockID, encodingVersion +func (_m *API) GetTransactionResultsByBlockID(ctx context.Context, blockID flow.Identifier, encodingVersion entities.EventEncodingVersion) ([]*modelaccess.TransactionResult, error) { + ret := _m.Called(ctx, blockID, encodingVersion) - var r0 []*access.TransactionResult + if len(ret) == 0 { + panic("no return value specified for GetTransactionResultsByBlockID") + } + + var r0 []*modelaccess.TransactionResult var r1 error - if rf, ok := ret.Get(0).(func(context.Context, flow.Identifier) ([]*access.TransactionResult, error)); ok { - return rf(ctx, blockID) + if rf, ok := ret.Get(0).(func(context.Context, flow.Identifier, entities.EventEncodingVersion) ([]*modelaccess.TransactionResult, error)); ok { + return rf(ctx, blockID, encodingVersion) } - if rf, ok := ret.Get(0).(func(context.Context, flow.Identifier) []*access.TransactionResult); ok { - r0 = rf(ctx, blockID) + if rf, ok := ret.Get(0).(func(context.Context, flow.Identifier, entities.EventEncodingVersion) []*modelaccess.TransactionResult); ok { + r0 = rf(ctx, blockID, encodingVersion) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).([]*access.TransactionResult) + r0 = ret.Get(0).([]*modelaccess.TransactionResult) } } - if rf, ok := ret.Get(1).(func(context.Context, flow.Identifier) error); ok { - r1 = rf(ctx, blockID) + if rf, ok := ret.Get(1).(func(context.Context, flow.Identifier, entities.EventEncodingVersion) error); ok { + r1 = rf(ctx, blockID, encodingVersion) } else { r1 = ret.Error(1) } @@ -675,6 +1100,10 @@ func (_m *API) GetTransactionResultsByBlockID(ctx context.Context, blockID flow. func (_m *API) GetTransactionsByBlockID(ctx context.Context, blockID flow.Identifier) ([]*flow.TransactionBody, error) { ret := _m.Called(ctx, blockID) + if len(ret) == 0 { + panic("no return value specified for GetTransactionsByBlockID") + } + var r0 []*flow.TransactionBody var r1 error if rf, ok := ret.Get(0).(func(context.Context, flow.Identifier) ([]*flow.TransactionBody, error)); ok { @@ -701,6 +1130,10 @@ func (_m *API) GetTransactionsByBlockID(ctx context.Context, blockID flow.Identi func (_m *API) Ping(ctx context.Context) error { ret := _m.Called(ctx) + if len(ret) == 0 { + panic("no return value specified for Ping") + } + var r0 error if rf, ok := ret.Get(0).(func(context.Context) error); ok { r0 = rf(ctx) @@ -711,10 +1144,34 @@ func (_m *API) Ping(ctx context.Context) error { return r0 } +// SendAndSubscribeTransactionStatuses provides a mock function with given fields: ctx, tx, requiredEventEncodingVersion +func (_m *API) SendAndSubscribeTransactionStatuses(ctx context.Context, tx *flow.TransactionBody, requiredEventEncodingVersion entities.EventEncodingVersion) subscription.Subscription { + ret := _m.Called(ctx, tx, requiredEventEncodingVersion) + + if len(ret) == 0 { + panic("no return value specified for SendAndSubscribeTransactionStatuses") + } + + var r0 subscription.Subscription + if rf, ok := ret.Get(0).(func(context.Context, *flow.TransactionBody, entities.EventEncodingVersion) subscription.Subscription); ok { + r0 = rf(ctx, tx, requiredEventEncodingVersion) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(subscription.Subscription) + } + } + + return r0 +} + // SendTransaction provides a mock function with given fields: ctx, tx func (_m *API) SendTransaction(ctx context.Context, tx *flow.TransactionBody) error { ret := _m.Called(ctx, tx) + if len(ret) == 0 { + panic("no return value specified for SendTransaction") + } + var r0 error if rf, ok := ret.Get(0).(func(context.Context, *flow.TransactionBody) error); ok { r0 = rf(ctx, tx) @@ -725,13 +1182,212 @@ func (_m *API) SendTransaction(ctx context.Context, tx *flow.TransactionBody) er return r0 } -type mockConstructorTestingTNewAPI interface { - mock.TestingT - Cleanup(func()) +// SubscribeBlockDigestsFromLatest provides a mock function with given fields: ctx, blockStatus +func (_m *API) SubscribeBlockDigestsFromLatest(ctx context.Context, blockStatus flow.BlockStatus) subscription.Subscription { + ret := _m.Called(ctx, blockStatus) + + if len(ret) == 0 { + panic("no return value specified for SubscribeBlockDigestsFromLatest") + } + + var r0 subscription.Subscription + if rf, ok := ret.Get(0).(func(context.Context, flow.BlockStatus) subscription.Subscription); ok { + r0 = rf(ctx, blockStatus) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(subscription.Subscription) + } + } + + return r0 +} + +// SubscribeBlockDigestsFromStartBlockID provides a mock function with given fields: ctx, startBlockID, blockStatus +func (_m *API) SubscribeBlockDigestsFromStartBlockID(ctx context.Context, startBlockID flow.Identifier, blockStatus flow.BlockStatus) subscription.Subscription { + ret := _m.Called(ctx, startBlockID, blockStatus) + + if len(ret) == 0 { + panic("no return value specified for SubscribeBlockDigestsFromStartBlockID") + } + + var r0 subscription.Subscription + if rf, ok := ret.Get(0).(func(context.Context, flow.Identifier, flow.BlockStatus) subscription.Subscription); ok { + r0 = rf(ctx, startBlockID, blockStatus) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(subscription.Subscription) + } + } + + return r0 +} + +// SubscribeBlockDigestsFromStartHeight provides a mock function with given fields: ctx, startHeight, blockStatus +func (_m *API) SubscribeBlockDigestsFromStartHeight(ctx context.Context, startHeight uint64, blockStatus flow.BlockStatus) subscription.Subscription { + ret := _m.Called(ctx, startHeight, blockStatus) + + if len(ret) == 0 { + panic("no return value specified for SubscribeBlockDigestsFromStartHeight") + } + + var r0 subscription.Subscription + if rf, ok := ret.Get(0).(func(context.Context, uint64, flow.BlockStatus) subscription.Subscription); ok { + r0 = rf(ctx, startHeight, blockStatus) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(subscription.Subscription) + } + } + + return r0 +} + +// SubscribeBlockHeadersFromLatest provides a mock function with given fields: ctx, blockStatus +func (_m *API) SubscribeBlockHeadersFromLatest(ctx context.Context, blockStatus flow.BlockStatus) subscription.Subscription { + ret := _m.Called(ctx, blockStatus) + + if len(ret) == 0 { + panic("no return value specified for SubscribeBlockHeadersFromLatest") + } + + var r0 subscription.Subscription + if rf, ok := ret.Get(0).(func(context.Context, flow.BlockStatus) subscription.Subscription); ok { + r0 = rf(ctx, blockStatus) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(subscription.Subscription) + } + } + + return r0 +} + +// SubscribeBlockHeadersFromStartBlockID provides a mock function with given fields: ctx, startBlockID, blockStatus +func (_m *API) SubscribeBlockHeadersFromStartBlockID(ctx context.Context, startBlockID flow.Identifier, blockStatus flow.BlockStatus) subscription.Subscription { + ret := _m.Called(ctx, startBlockID, blockStatus) + + if len(ret) == 0 { + panic("no return value specified for SubscribeBlockHeadersFromStartBlockID") + } + + var r0 subscription.Subscription + if rf, ok := ret.Get(0).(func(context.Context, flow.Identifier, flow.BlockStatus) subscription.Subscription); ok { + r0 = rf(ctx, startBlockID, blockStatus) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(subscription.Subscription) + } + } + + return r0 +} + +// SubscribeBlockHeadersFromStartHeight provides a mock function with given fields: ctx, startHeight, blockStatus +func (_m *API) SubscribeBlockHeadersFromStartHeight(ctx context.Context, startHeight uint64, blockStatus flow.BlockStatus) subscription.Subscription { + ret := _m.Called(ctx, startHeight, blockStatus) + + if len(ret) == 0 { + panic("no return value specified for SubscribeBlockHeadersFromStartHeight") + } + + var r0 subscription.Subscription + if rf, ok := ret.Get(0).(func(context.Context, uint64, flow.BlockStatus) subscription.Subscription); ok { + r0 = rf(ctx, startHeight, blockStatus) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(subscription.Subscription) + } + } + + return r0 +} + +// SubscribeBlocksFromLatest provides a mock function with given fields: ctx, blockStatus +func (_m *API) SubscribeBlocksFromLatest(ctx context.Context, blockStatus flow.BlockStatus) subscription.Subscription { + ret := _m.Called(ctx, blockStatus) + + if len(ret) == 0 { + panic("no return value specified for SubscribeBlocksFromLatest") + } + + var r0 subscription.Subscription + if rf, ok := ret.Get(0).(func(context.Context, flow.BlockStatus) subscription.Subscription); ok { + r0 = rf(ctx, blockStatus) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(subscription.Subscription) + } + } + + return r0 +} + +// SubscribeBlocksFromStartBlockID provides a mock function with given fields: ctx, startBlockID, blockStatus +func (_m *API) SubscribeBlocksFromStartBlockID(ctx context.Context, startBlockID flow.Identifier, blockStatus flow.BlockStatus) subscription.Subscription { + ret := _m.Called(ctx, startBlockID, blockStatus) + + if len(ret) == 0 { + panic("no return value specified for SubscribeBlocksFromStartBlockID") + } + + var r0 subscription.Subscription + if rf, ok := ret.Get(0).(func(context.Context, flow.Identifier, flow.BlockStatus) subscription.Subscription); ok { + r0 = rf(ctx, startBlockID, blockStatus) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(subscription.Subscription) + } + } + + return r0 +} + +// SubscribeBlocksFromStartHeight provides a mock function with given fields: ctx, startHeight, blockStatus +func (_m *API) SubscribeBlocksFromStartHeight(ctx context.Context, startHeight uint64, blockStatus flow.BlockStatus) subscription.Subscription { + ret := _m.Called(ctx, startHeight, blockStatus) + + if len(ret) == 0 { + panic("no return value specified for SubscribeBlocksFromStartHeight") + } + + var r0 subscription.Subscription + if rf, ok := ret.Get(0).(func(context.Context, uint64, flow.BlockStatus) subscription.Subscription); ok { + r0 = rf(ctx, startHeight, blockStatus) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(subscription.Subscription) + } + } + + return r0 +} + +// SubscribeTransactionStatuses provides a mock function with given fields: ctx, txID, requiredEventEncodingVersion +func (_m *API) SubscribeTransactionStatuses(ctx context.Context, txID flow.Identifier, requiredEventEncodingVersion entities.EventEncodingVersion) subscription.Subscription { + ret := _m.Called(ctx, txID, requiredEventEncodingVersion) + + if len(ret) == 0 { + panic("no return value specified for SubscribeTransactionStatuses") + } + + var r0 subscription.Subscription + if rf, ok := ret.Get(0).(func(context.Context, flow.Identifier, entities.EventEncodingVersion) subscription.Subscription); ok { + r0 = rf(ctx, txID, requiredEventEncodingVersion) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(subscription.Subscription) + } + } + + return r0 } // NewAPI creates a new instance of API. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewAPI(t mockConstructorTestingTNewAPI) *API { +// The first argument is typically a *testing.T value. +func NewAPI(t interface { + mock.TestingT + Cleanup(func()) +}) *API { mock := &API{} mock.Mock.Test(t) diff --git a/access/mock/events_api.go b/access/mock/events_api.go new file mode 100644 index 00000000000..010a35a8910 --- /dev/null +++ b/access/mock/events_api.go @@ -0,0 +1,91 @@ +// Code generated by mockery. DO NOT EDIT. + +package mock + +import ( + context "context" + + flow "github.com/onflow/flow-go/model/flow" + entities "github.com/onflow/flow/protobuf/go/flow/entities" + + mock "github.com/stretchr/testify/mock" +) + +// EventsAPI is an autogenerated mock type for the EventsAPI type +type EventsAPI struct { + mock.Mock +} + +// GetEventsForBlockIDs provides a mock function with given fields: ctx, eventType, blockIDs, requiredEventEncodingVersion +func (_m *EventsAPI) GetEventsForBlockIDs(ctx context.Context, eventType string, blockIDs []flow.Identifier, requiredEventEncodingVersion entities.EventEncodingVersion) ([]flow.BlockEvents, error) { + ret := _m.Called(ctx, eventType, blockIDs, requiredEventEncodingVersion) + + if len(ret) == 0 { + panic("no return value specified for GetEventsForBlockIDs") + } + + var r0 []flow.BlockEvents + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, string, []flow.Identifier, entities.EventEncodingVersion) ([]flow.BlockEvents, error)); ok { + return rf(ctx, eventType, blockIDs, requiredEventEncodingVersion) + } + if rf, ok := ret.Get(0).(func(context.Context, string, []flow.Identifier, entities.EventEncodingVersion) []flow.BlockEvents); ok { + r0 = rf(ctx, eventType, blockIDs, requiredEventEncodingVersion) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]flow.BlockEvents) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, string, []flow.Identifier, entities.EventEncodingVersion) error); ok { + r1 = rf(ctx, eventType, blockIDs, requiredEventEncodingVersion) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetEventsForHeightRange provides a mock function with given fields: ctx, eventType, startHeight, endHeight, requiredEventEncodingVersion +func (_m *EventsAPI) GetEventsForHeightRange(ctx context.Context, eventType string, startHeight uint64, endHeight uint64, requiredEventEncodingVersion entities.EventEncodingVersion) ([]flow.BlockEvents, error) { + ret := _m.Called(ctx, eventType, startHeight, endHeight, requiredEventEncodingVersion) + + if len(ret) == 0 { + panic("no return value specified for GetEventsForHeightRange") + } + + var r0 []flow.BlockEvents + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, string, uint64, uint64, entities.EventEncodingVersion) ([]flow.BlockEvents, error)); ok { + return rf(ctx, eventType, startHeight, endHeight, requiredEventEncodingVersion) + } + if rf, ok := ret.Get(0).(func(context.Context, string, uint64, uint64, entities.EventEncodingVersion) []flow.BlockEvents); ok { + r0 = rf(ctx, eventType, startHeight, endHeight, requiredEventEncodingVersion) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]flow.BlockEvents) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, string, uint64, uint64, entities.EventEncodingVersion) error); ok { + r1 = rf(ctx, eventType, startHeight, endHeight, requiredEventEncodingVersion) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// NewEventsAPI creates a new instance of EventsAPI. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewEventsAPI(t interface { + mock.TestingT + Cleanup(func()) +}) *EventsAPI { + mock := &EventsAPI{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/access/mock/scripts_api.go b/access/mock/scripts_api.go new file mode 100644 index 00000000000..9c7b113c73b --- /dev/null +++ b/access/mock/scripts_api.go @@ -0,0 +1,119 @@ +// Code generated by mockery. DO NOT EDIT. + +package mock + +import ( + context "context" + + flow "github.com/onflow/flow-go/model/flow" + mock "github.com/stretchr/testify/mock" +) + +// ScriptsAPI is an autogenerated mock type for the ScriptsAPI type +type ScriptsAPI struct { + mock.Mock +} + +// ExecuteScriptAtBlockHeight provides a mock function with given fields: ctx, blockHeight, script, arguments +func (_m *ScriptsAPI) ExecuteScriptAtBlockHeight(ctx context.Context, blockHeight uint64, script []byte, arguments [][]byte) ([]byte, error) { + ret := _m.Called(ctx, blockHeight, script, arguments) + + if len(ret) == 0 { + panic("no return value specified for ExecuteScriptAtBlockHeight") + } + + var r0 []byte + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, uint64, []byte, [][]byte) ([]byte, error)); ok { + return rf(ctx, blockHeight, script, arguments) + } + if rf, ok := ret.Get(0).(func(context.Context, uint64, []byte, [][]byte) []byte); ok { + r0 = rf(ctx, blockHeight, script, arguments) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]byte) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, uint64, []byte, [][]byte) error); ok { + r1 = rf(ctx, blockHeight, script, arguments) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ExecuteScriptAtBlockID provides a mock function with given fields: ctx, blockID, script, arguments +func (_m *ScriptsAPI) ExecuteScriptAtBlockID(ctx context.Context, blockID flow.Identifier, script []byte, arguments [][]byte) ([]byte, error) { + ret := _m.Called(ctx, blockID, script, arguments) + + if len(ret) == 0 { + panic("no return value specified for ExecuteScriptAtBlockID") + } + + var r0 []byte + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, flow.Identifier, []byte, [][]byte) ([]byte, error)); ok { + return rf(ctx, blockID, script, arguments) + } + if rf, ok := ret.Get(0).(func(context.Context, flow.Identifier, []byte, [][]byte) []byte); ok { + r0 = rf(ctx, blockID, script, arguments) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]byte) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, flow.Identifier, []byte, [][]byte) error); ok { + r1 = rf(ctx, blockID, script, arguments) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ExecuteScriptAtLatestBlock provides a mock function with given fields: ctx, script, arguments +func (_m *ScriptsAPI) ExecuteScriptAtLatestBlock(ctx context.Context, script []byte, arguments [][]byte) ([]byte, error) { + ret := _m.Called(ctx, script, arguments) + + if len(ret) == 0 { + panic("no return value specified for ExecuteScriptAtLatestBlock") + } + + var r0 []byte + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, []byte, [][]byte) ([]byte, error)); ok { + return rf(ctx, script, arguments) + } + if rf, ok := ret.Get(0).(func(context.Context, []byte, [][]byte) []byte); ok { + r0 = rf(ctx, script, arguments) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]byte) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, []byte, [][]byte) error); ok { + r1 = rf(ctx, script, arguments) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// NewScriptsAPI creates a new instance of ScriptsAPI. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewScriptsAPI(t interface { + mock.TestingT + Cleanup(func()) +}) *ScriptsAPI { + mock := &ScriptsAPI{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/access/mock/transaction_stream_api.go b/access/mock/transaction_stream_api.go new file mode 100644 index 00000000000..9968f9b71f8 --- /dev/null +++ b/access/mock/transaction_stream_api.go @@ -0,0 +1,73 @@ +// Code generated by mockery. DO NOT EDIT. + +package mock + +import ( + context "context" + + flow "github.com/onflow/flow-go/model/flow" + entities "github.com/onflow/flow/protobuf/go/flow/entities" + + mock "github.com/stretchr/testify/mock" + + subscription "github.com/onflow/flow-go/engine/access/subscription" +) + +// TransactionStreamAPI is an autogenerated mock type for the TransactionStreamAPI type +type TransactionStreamAPI struct { + mock.Mock +} + +// SendAndSubscribeTransactionStatuses provides a mock function with given fields: ctx, tx, requiredEventEncodingVersion +func (_m *TransactionStreamAPI) SendAndSubscribeTransactionStatuses(ctx context.Context, tx *flow.TransactionBody, requiredEventEncodingVersion entities.EventEncodingVersion) subscription.Subscription { + ret := _m.Called(ctx, tx, requiredEventEncodingVersion) + + if len(ret) == 0 { + panic("no return value specified for SendAndSubscribeTransactionStatuses") + } + + var r0 subscription.Subscription + if rf, ok := ret.Get(0).(func(context.Context, *flow.TransactionBody, entities.EventEncodingVersion) subscription.Subscription); ok { + r0 = rf(ctx, tx, requiredEventEncodingVersion) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(subscription.Subscription) + } + } + + return r0 +} + +// SubscribeTransactionStatuses provides a mock function with given fields: ctx, txID, requiredEventEncodingVersion +func (_m *TransactionStreamAPI) SubscribeTransactionStatuses(ctx context.Context, txID flow.Identifier, requiredEventEncodingVersion entities.EventEncodingVersion) subscription.Subscription { + ret := _m.Called(ctx, txID, requiredEventEncodingVersion) + + if len(ret) == 0 { + panic("no return value specified for SubscribeTransactionStatuses") + } + + var r0 subscription.Subscription + if rf, ok := ret.Get(0).(func(context.Context, flow.Identifier, entities.EventEncodingVersion) subscription.Subscription); ok { + r0 = rf(ctx, txID, requiredEventEncodingVersion) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(subscription.Subscription) + } + } + + return r0 +} + +// NewTransactionStreamAPI creates a new instance of TransactionStreamAPI. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewTransactionStreamAPI(t interface { + mock.TestingT + Cleanup(func()) +}) *TransactionStreamAPI { + mock := &TransactionStreamAPI{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/access/mock/transactions_api.go b/access/mock/transactions_api.go new file mode 100644 index 00000000000..d3bef687ab1 --- /dev/null +++ b/access/mock/transactions_api.go @@ -0,0 +1,261 @@ +// Code generated by mockery. DO NOT EDIT. + +package mock + +import ( + context "context" + + flow "github.com/onflow/flow-go/model/flow" + entities "github.com/onflow/flow/protobuf/go/flow/entities" + + mock "github.com/stretchr/testify/mock" + + modelaccess "github.com/onflow/flow-go/model/access" +) + +// TransactionsAPI is an autogenerated mock type for the TransactionsAPI type +type TransactionsAPI struct { + mock.Mock +} + +// GetSystemTransaction provides a mock function with given fields: ctx, txID, blockID +func (_m *TransactionsAPI) GetSystemTransaction(ctx context.Context, txID flow.Identifier, blockID flow.Identifier) (*flow.TransactionBody, error) { + ret := _m.Called(ctx, txID, blockID) + + if len(ret) == 0 { + panic("no return value specified for GetSystemTransaction") + } + + var r0 *flow.TransactionBody + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, flow.Identifier, flow.Identifier) (*flow.TransactionBody, error)); ok { + return rf(ctx, txID, blockID) + } + if rf, ok := ret.Get(0).(func(context.Context, flow.Identifier, flow.Identifier) *flow.TransactionBody); ok { + r0 = rf(ctx, txID, blockID) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*flow.TransactionBody) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, flow.Identifier, flow.Identifier) error); ok { + r1 = rf(ctx, txID, blockID) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetSystemTransactionResult provides a mock function with given fields: ctx, txID, blockID, encodingVersion +func (_m *TransactionsAPI) GetSystemTransactionResult(ctx context.Context, txID flow.Identifier, blockID flow.Identifier, encodingVersion entities.EventEncodingVersion) (*modelaccess.TransactionResult, error) { + ret := _m.Called(ctx, txID, blockID, encodingVersion) + + if len(ret) == 0 { + panic("no return value specified for GetSystemTransactionResult") + } + + var r0 *modelaccess.TransactionResult + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, flow.Identifier, flow.Identifier, entities.EventEncodingVersion) (*modelaccess.TransactionResult, error)); ok { + return rf(ctx, txID, blockID, encodingVersion) + } + if rf, ok := ret.Get(0).(func(context.Context, flow.Identifier, flow.Identifier, entities.EventEncodingVersion) *modelaccess.TransactionResult); ok { + r0 = rf(ctx, txID, blockID, encodingVersion) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*modelaccess.TransactionResult) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, flow.Identifier, flow.Identifier, entities.EventEncodingVersion) error); ok { + r1 = rf(ctx, txID, blockID, encodingVersion) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetTransaction provides a mock function with given fields: ctx, id +func (_m *TransactionsAPI) GetTransaction(ctx context.Context, id flow.Identifier) (*flow.TransactionBody, error) { + ret := _m.Called(ctx, id) + + if len(ret) == 0 { + panic("no return value specified for GetTransaction") + } + + var r0 *flow.TransactionBody + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, flow.Identifier) (*flow.TransactionBody, error)); ok { + return rf(ctx, id) + } + if rf, ok := ret.Get(0).(func(context.Context, flow.Identifier) *flow.TransactionBody); ok { + r0 = rf(ctx, id) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*flow.TransactionBody) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, flow.Identifier) error); ok { + r1 = rf(ctx, id) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetTransactionResult provides a mock function with given fields: ctx, txID, blockID, collectionID, encodingVersion +func (_m *TransactionsAPI) GetTransactionResult(ctx context.Context, txID flow.Identifier, blockID flow.Identifier, collectionID flow.Identifier, encodingVersion entities.EventEncodingVersion) (*modelaccess.TransactionResult, error) { + ret := _m.Called(ctx, txID, blockID, collectionID, encodingVersion) + + if len(ret) == 0 { + panic("no return value specified for GetTransactionResult") + } + + var r0 *modelaccess.TransactionResult + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, flow.Identifier, flow.Identifier, flow.Identifier, entities.EventEncodingVersion) (*modelaccess.TransactionResult, error)); ok { + return rf(ctx, txID, blockID, collectionID, encodingVersion) + } + if rf, ok := ret.Get(0).(func(context.Context, flow.Identifier, flow.Identifier, flow.Identifier, entities.EventEncodingVersion) *modelaccess.TransactionResult); ok { + r0 = rf(ctx, txID, blockID, collectionID, encodingVersion) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*modelaccess.TransactionResult) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, flow.Identifier, flow.Identifier, flow.Identifier, entities.EventEncodingVersion) error); ok { + r1 = rf(ctx, txID, blockID, collectionID, encodingVersion) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetTransactionResultByIndex provides a mock function with given fields: ctx, blockID, index, encodingVersion +func (_m *TransactionsAPI) GetTransactionResultByIndex(ctx context.Context, blockID flow.Identifier, index uint32, encodingVersion entities.EventEncodingVersion) (*modelaccess.TransactionResult, error) { + ret := _m.Called(ctx, blockID, index, encodingVersion) + + if len(ret) == 0 { + panic("no return value specified for GetTransactionResultByIndex") + } + + var r0 *modelaccess.TransactionResult + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, flow.Identifier, uint32, entities.EventEncodingVersion) (*modelaccess.TransactionResult, error)); ok { + return rf(ctx, blockID, index, encodingVersion) + } + if rf, ok := ret.Get(0).(func(context.Context, flow.Identifier, uint32, entities.EventEncodingVersion) *modelaccess.TransactionResult); ok { + r0 = rf(ctx, blockID, index, encodingVersion) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*modelaccess.TransactionResult) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, flow.Identifier, uint32, entities.EventEncodingVersion) error); ok { + r1 = rf(ctx, blockID, index, encodingVersion) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetTransactionResultsByBlockID provides a mock function with given fields: ctx, blockID, encodingVersion +func (_m *TransactionsAPI) GetTransactionResultsByBlockID(ctx context.Context, blockID flow.Identifier, encodingVersion entities.EventEncodingVersion) ([]*modelaccess.TransactionResult, error) { + ret := _m.Called(ctx, blockID, encodingVersion) + + if len(ret) == 0 { + panic("no return value specified for GetTransactionResultsByBlockID") + } + + var r0 []*modelaccess.TransactionResult + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, flow.Identifier, entities.EventEncodingVersion) ([]*modelaccess.TransactionResult, error)); ok { + return rf(ctx, blockID, encodingVersion) + } + if rf, ok := ret.Get(0).(func(context.Context, flow.Identifier, entities.EventEncodingVersion) []*modelaccess.TransactionResult); ok { + r0 = rf(ctx, blockID, encodingVersion) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]*modelaccess.TransactionResult) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, flow.Identifier, entities.EventEncodingVersion) error); ok { + r1 = rf(ctx, blockID, encodingVersion) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetTransactionsByBlockID provides a mock function with given fields: ctx, blockID +func (_m *TransactionsAPI) GetTransactionsByBlockID(ctx context.Context, blockID flow.Identifier) ([]*flow.TransactionBody, error) { + ret := _m.Called(ctx, blockID) + + if len(ret) == 0 { + panic("no return value specified for GetTransactionsByBlockID") + } + + var r0 []*flow.TransactionBody + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, flow.Identifier) ([]*flow.TransactionBody, error)); ok { + return rf(ctx, blockID) + } + if rf, ok := ret.Get(0).(func(context.Context, flow.Identifier) []*flow.TransactionBody); ok { + r0 = rf(ctx, blockID) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]*flow.TransactionBody) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, flow.Identifier) error); ok { + r1 = rf(ctx, blockID) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// SendTransaction provides a mock function with given fields: ctx, tx +func (_m *TransactionsAPI) SendTransaction(ctx context.Context, tx *flow.TransactionBody) error { + ret := _m.Called(ctx, tx) + + if len(ret) == 0 { + panic("no return value specified for SendTransaction") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, *flow.TransactionBody) error); ok { + r0 = rf(ctx, tx) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// NewTransactionsAPI creates a new instance of TransactionsAPI. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewTransactionsAPI(t interface { + mock.TestingT + Cleanup(func()) +}) *TransactionsAPI { + mock := &TransactionsAPI{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/access/ratelimit/limiter.go b/access/ratelimit/limiter.go new file mode 100644 index 00000000000..280f9d773aa --- /dev/null +++ b/access/ratelimit/limiter.go @@ -0,0 +1,24 @@ +package ratelimit + +import ( + "github.com/onflow/flow-go/model/flow" +) + +// RateLimiter is an interface for checking if an address is rate limited. +// By convention, the address used is the payer field of a transaction. +// This rate limiter is applied when a transaction is first received by a +// node, meaning that if a transaction is rate-limited it will be dropped. +type RateLimiter interface { + // IsRateLimited returns true if the address is rate limited + IsRateLimited(address flow.Address) bool +} + +type NoopLimiter struct{} + +func NewNoopLimiter() *NoopLimiter { + return &NoopLimiter{} +} + +func (l *NoopLimiter) IsRateLimited(address flow.Address) bool { + return false +} diff --git a/access/utils/cadence.go b/access/utils/cadence.go new file mode 100644 index 00000000000..90199d01f79 --- /dev/null +++ b/access/utils/cadence.go @@ -0,0 +1,20 @@ +package utils + +import ( + "errors" + + "github.com/onflow/cadence" + "github.com/onflow/cadence/encoding/json" +) + +func EncodeArgs(argValues []cadence.Value) ([][]byte, error) { + args := make([][]byte, len(argValues)) + for i, arg := range argValues { + var err error + args[i], err = json.Encode(arg) + if err != nil { + return nil, errors.New("could not encode cadence value: " + err.Error()) + } + } + return args, nil +} diff --git a/access/validator.go b/access/validator.go deleted file mode 100644 index 2d87604a27a..00000000000 --- a/access/validator.go +++ /dev/null @@ -1,303 +0,0 @@ -package access - -import ( - "errors" - "fmt" - - "github.com/onflow/flow-go/crypto" - "github.com/onflow/flow-go/state" - - "github.com/onflow/cadence/runtime/parser" - - "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/state/protocol" -) - -type Blocks interface { - HeaderByID(id flow.Identifier) (*flow.Header, error) - FinalizedHeader() (*flow.Header, error) -} - -type ProtocolStateBlocks struct { - state protocol.State -} - -func NewProtocolStateBlocks(state protocol.State) *ProtocolStateBlocks { - return &ProtocolStateBlocks{state: state} -} - -func (b *ProtocolStateBlocks) HeaderByID(id flow.Identifier) (*flow.Header, error) { - header, err := b.state.AtBlockID(id).Head() - if err != nil { - if errors.Is(err, state.ErrUnknownSnapshotReference) { - return nil, nil - } - - return nil, err - } - - return header, nil -} - -func (b *ProtocolStateBlocks) FinalizedHeader() (*flow.Header, error) { - return b.state.Final().Head() -} - -type TransactionValidationOptions struct { - Expiry uint - ExpiryBuffer uint - AllowEmptyReferenceBlockID bool - AllowUnknownReferenceBlockID bool - MaxGasLimit uint64 - CheckScriptsParse bool - MaxTransactionByteSize uint64 - MaxCollectionByteSize uint64 -} - -type TransactionValidator struct { - blocks Blocks // for looking up blocks to check transaction expiry - chain flow.Chain // for checking validity of addresses - options TransactionValidationOptions - serviceAccountAddress flow.Address -} - -func NewTransactionValidator( - blocks Blocks, - chain flow.Chain, - options TransactionValidationOptions, -) *TransactionValidator { - return &TransactionValidator{ - blocks: blocks, - chain: chain, - options: options, - serviceAccountAddress: chain.ServiceAddress(), - } -} - -func (v *TransactionValidator) Validate(tx *flow.TransactionBody) (err error) { - err = v.checkTxSizeLimit(tx) - if err != nil { - return err - } - - err = v.checkMissingFields(tx) - if err != nil { - return err - } - - err = v.checkGasLimit(tx) - if err != nil { - return err - } - - err = v.checkExpiry(tx) - if err != nil { - return err - } - - err = v.checkCanBeParsed(tx) - if err != nil { - return err - } - - err = v.checkAddresses(tx) - if err != nil { - return err - } - - err = v.checkSignatureFormat(tx) - if err != nil { - return err - } - - err = v.checkSignatureDuplications(tx) - if err != nil { - return err - } - - // TODO replace checkSignatureFormat by verifying the account/payer signatures - - return nil -} - -func (v *TransactionValidator) checkTxSizeLimit(tx *flow.TransactionBody) error { - txSize := uint64(tx.ByteSize()) - // first check compatibility to collection byte size - // this guarantees liveness - if txSize >= v.options.MaxCollectionByteSize { - return InvalidTxByteSizeError{ - Actual: txSize, - Maximum: v.options.MaxCollectionByteSize, - } - } - // this logic need the reason we don't greenlist the service account against the collection size - // limits is we can't verify the signature here yet. - if tx.Payer == v.serviceAccountAddress { - return nil - } - if txSize > v.options.MaxTransactionByteSize { - return InvalidTxByteSizeError{ - Actual: txSize, - Maximum: v.options.MaxTransactionByteSize, - } - } - return nil -} - -func (v *TransactionValidator) checkMissingFields(tx *flow.TransactionBody) error { - missingFields := tx.MissingFields() - - if v.options.AllowEmptyReferenceBlockID { - missingFields = remove(missingFields, flow.TransactionFieldRefBlockID.String()) - } - - if len(missingFields) > 0 { - return IncompleteTransactionError{MissingFields: missingFields} - } - - return nil -} - -func (v *TransactionValidator) checkGasLimit(tx *flow.TransactionBody) error { - // if service account is the payer of the transaction accepts any gas limit - // note that even though we don't enforce any limit here, exec node later - // enforce a max value for any transaction - if tx.Payer == v.serviceAccountAddress { - return nil - } - if tx.GasLimit > v.options.MaxGasLimit || tx.GasLimit == 0 { - return InvalidGasLimitError{ - Actual: tx.GasLimit, - Maximum: v.options.MaxGasLimit, - } - } - - return nil -} - -// checkExpiry checks whether a transaction's reference block ID is -// valid. Returns nil if the reference is valid, returns an error if the -// reference is invalid or we failed to check it. -func (v *TransactionValidator) checkExpiry(tx *flow.TransactionBody) error { - if tx.ReferenceBlockID == flow.ZeroID && v.options.AllowEmptyReferenceBlockID { - return nil - } - - // look up the reference block - ref, err := v.blocks.HeaderByID(tx.ReferenceBlockID) - if err != nil { - return fmt.Errorf("could not get reference block: %w", err) - } - - if ref == nil { - // the transaction references an unknown block - at this point we decide - // whether to consider it expired based on configuration - if v.options.AllowUnknownReferenceBlockID { - return nil - } - - return ErrUnknownReferenceBlock - } - - // get the latest finalized block we know about - final, err := v.blocks.FinalizedHeader() - if err != nil { - return fmt.Errorf("could not get finalized header: %w", err) - } - - diff := final.Height - ref.Height - // check for overflow - if ref.Height > final.Height { - diff = 0 - } - - // discard transactions that are expired, or that will expire sooner than - // our configured buffer allows - if uint(diff) > v.options.Expiry-v.options.ExpiryBuffer { - return ExpiredTransactionError{ - RefHeight: ref.Height, - FinalHeight: final.Height, - } - } - - return nil -} - -func (v *TransactionValidator) checkCanBeParsed(tx *flow.TransactionBody) error { - if v.options.CheckScriptsParse { - _, err := parser.ParseProgram(nil, tx.Script, parser.Config{}) - if err != nil { - return InvalidScriptError{ParserErr: err} - } - } - - return nil -} - -func (v *TransactionValidator) checkAddresses(tx *flow.TransactionBody) error { - - for _, address := range append(tx.Authorizers, tx.Payer) { - // we check whether this is a valid output of the address generator - if !v.chain.IsValid(address) { - return InvalidAddressError{Address: address} - } - } - - return nil -} - -// every key (account, key index combination) can only be used once for signing -func (v *TransactionValidator) checkSignatureDuplications(tx *flow.TransactionBody) error { - type uniqueKey struct { - address flow.Address - index uint64 - } - observedSigs := make(map[uniqueKey]bool) - for _, sig := range append(tx.PayloadSignatures, tx.EnvelopeSignatures...) { - if observedSigs[uniqueKey{sig.Address, sig.KeyIndex}] { - return DuplicatedSignatureError{Address: sig.Address, KeyIndex: sig.KeyIndex} - } - observedSigs[uniqueKey{sig.Address, sig.KeyIndex}] = true - } - return nil -} - -func (v *TransactionValidator) checkSignatureFormat(tx *flow.TransactionBody) error { - - for _, signature := range append(tx.PayloadSignatures, tx.EnvelopeSignatures...) { - // check the format of the signature is valid. - // a valid signature is an ECDSA signature of either P-256 or secp256k1 curve. - ecdsaSignature := signature.Signature - - // check if the signature could be a P-256 signature - valid, err := crypto.SignatureFormatCheck(crypto.ECDSAP256, ecdsaSignature) - if err != nil { - return fmt.Errorf("could not check the signature format (%s): %w", signature, err) - } - if valid { - continue - } - - // check if the signature could be a secp256k1 signature - valid, err = crypto.SignatureFormatCheck(crypto.ECDSASecp256k1, ecdsaSignature) - if err != nil { - return fmt.Errorf("could not check the signature format (%s): %w", signature, err) - } - if valid { - continue - } - - return InvalidSignatureError{Signature: signature} - } - - return nil -} - -func remove(s []string, r string) []string { - for i, v := range s { - if v == r { - return append(s[:i], s[i+1:]...) - } - } - return s -} diff --git a/access/validator/errors.go b/access/validator/errors.go new file mode 100644 index 00000000000..5542f275ac3 --- /dev/null +++ b/access/validator/errors.go @@ -0,0 +1,142 @@ +package validator + +import ( + "errors" + "fmt" + + "github.com/onflow/cadence" + + "github.com/onflow/flow-go/model/flow" +) + +// ErrUnknownReferenceBlock indicates that a transaction references an unknown block. +var ErrUnknownReferenceBlock = errors.New("unknown reference block") + +// IndexReporterNotInitialized is returned when indexReporter is nil because +// execution data syncing and indexing is disabled +var IndexReporterNotInitialized = errors.New("index reported not initialized") + +// IncompleteTransactionError indicates that a transaction is missing one or more required fields. +type IncompleteTransactionError struct { + MissingFields []string +} + +func (e IncompleteTransactionError) Error() string { + return fmt.Sprintf("transaction is missing required fields: %s", e.MissingFields) +} + +// ExpiredTransactionError indicates that a transaction has expired. +type ExpiredTransactionError struct { + RefHeight, FinalHeight uint64 +} + +func (e ExpiredTransactionError) Error() string { + return fmt.Sprintf("transaction is expired: ref_height=%d final_height=%d", e.RefHeight, e.FinalHeight) +} + +// InvalidScriptError indicates that a transaction contains an invalid Cadence script. +type InvalidScriptError struct { + ParserErr error +} + +func (e InvalidScriptError) Error() string { + return fmt.Sprintf("failed to parse transaction Cadence script: %s", e.ParserErr) +} + +func (e InvalidScriptError) Unwrap() error { + return e.ParserErr +} + +// InvalidGasLimitError indicates that a transaction specifies a gas limit that exceeds the maximum. +type InvalidGasLimitError struct { + Maximum uint64 + Actual uint64 +} + +func (e InvalidGasLimitError) Error() string { + return fmt.Sprintf("transaction gas limit (%d) is not in the acceptable range (min: 1, max: %d)", e.Actual, e.Maximum) +} + +// InvalidAddressError indicates that a transaction references an invalid flow Address +// in either the Authorizers or Payer field. +type InvalidAddressError struct { + Address flow.Address +} + +func (e InvalidAddressError) Error() string { + return fmt.Sprintf("invalid address: %s", e.Address) +} + +// DuplicatedSignatureError indicates that two signatures havs been provided for a key (combination of account and key index) +type DuplicatedSignatureError struct { + Address flow.Address + KeyIndex uint32 +} + +func (e DuplicatedSignatureError) Error() string { + return fmt.Sprintf("duplicated signature for key (address: %s, index: %d)", e.Address.String(), e.KeyIndex) +} + +// InvalidRawSignatureError indicates that a transaction contains a cryptographic raw signature +// with a wrong format. +type InvalidRawSignatureError struct { + Signature flow.TransactionSignature +} + +func (e InvalidRawSignatureError) Error() string { + return fmt.Sprintf("the cryptographic signature within the transaction signature has an invalid format: %s", e.Signature) +} + +// InvalidAuthenticationSchemeFormatError indicates that a transaction contains a signature +// with a wrong format. +type InvalidAuthenticationSchemeFormatError struct { + Signature flow.TransactionSignature +} + +func (e InvalidAuthenticationSchemeFormatError) Error() string { + return fmt.Sprintf("the transaction signature has invalid extension data: %s", e.Signature) +} + +// InvalidTxByteSizeError indicates that a transaction byte size exceeds the maximum. +type InvalidTxByteSizeError struct { + Maximum uint64 + Actual uint64 +} + +func (e InvalidTxByteSizeError) Error() string { + return fmt.Sprintf("transaction byte size (%d) exceeds the maximum byte size allowed for a transaction (%d)", e.Actual, e.Maximum) +} + +type InvalidTxRateLimitedError struct { + Payer flow.Address +} + +func (e InvalidTxRateLimitedError) Error() string { + return fmt.Sprintf("transaction rate limited for payer (%s)", e.Payer) +} + +type InsufficientBalanceError struct { + Payer flow.Address + RequiredBalance cadence.UFix64 +} + +func (e InsufficientBalanceError) Error() string { + return fmt.Sprintf("transaction payer (%s) has insufficient balance to pay transaction fee. "+ + "Required balance: (%s). ", e.Payer, e.RequiredBalance.String()) +} + +func IsInsufficientBalanceError(err error) bool { + var balanceError InsufficientBalanceError + return errors.As(err, &balanceError) +} + +// IndexedHeightFarBehindError indicates that a node is far behind on indexing. +type IndexedHeightFarBehindError struct { + SealedHeight uint64 + IndexedHeight uint64 +} + +func (e IndexedHeightFarBehindError) Error() string { + return fmt.Sprintf("the difference between the latest sealed height (%d) and indexed height (%d) exceeds the maximum gap allowed", + e.SealedHeight, e.IndexedHeight) +} diff --git a/access/validator/mock/blocks.go b/access/validator/mock/blocks.go new file mode 100644 index 00000000000..358938db0ba --- /dev/null +++ b/access/validator/mock/blocks.go @@ -0,0 +1,145 @@ +// Code generated by mockery. DO NOT EDIT. + +package mock + +import ( + flow "github.com/onflow/flow-go/model/flow" + mock "github.com/stretchr/testify/mock" +) + +// Blocks is an autogenerated mock type for the Blocks type +type Blocks struct { + mock.Mock +} + +// FinalizedHeader provides a mock function with no fields +func (_m *Blocks) FinalizedHeader() (*flow.Header, error) { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for FinalizedHeader") + } + + var r0 *flow.Header + var r1 error + if rf, ok := ret.Get(0).(func() (*flow.Header, error)); ok { + return rf() + } + if rf, ok := ret.Get(0).(func() *flow.Header); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*flow.Header) + } + } + + if rf, ok := ret.Get(1).(func() error); ok { + r1 = rf() + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// HeaderByID provides a mock function with given fields: id +func (_m *Blocks) HeaderByID(id flow.Identifier) (*flow.Header, error) { + ret := _m.Called(id) + + if len(ret) == 0 { + panic("no return value specified for HeaderByID") + } + + var r0 *flow.Header + var r1 error + if rf, ok := ret.Get(0).(func(flow.Identifier) (*flow.Header, error)); ok { + return rf(id) + } + if rf, ok := ret.Get(0).(func(flow.Identifier) *flow.Header); ok { + r0 = rf(id) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*flow.Header) + } + } + + if rf, ok := ret.Get(1).(func(flow.Identifier) error); ok { + r1 = rf(id) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// IndexedHeight provides a mock function with no fields +func (_m *Blocks) IndexedHeight() (uint64, error) { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for IndexedHeight") + } + + var r0 uint64 + var r1 error + if rf, ok := ret.Get(0).(func() (uint64, error)); ok { + return rf() + } + if rf, ok := ret.Get(0).(func() uint64); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(uint64) + } + + if rf, ok := ret.Get(1).(func() error); ok { + r1 = rf() + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// SealedHeader provides a mock function with no fields +func (_m *Blocks) SealedHeader() (*flow.Header, error) { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for SealedHeader") + } + + var r0 *flow.Header + var r1 error + if rf, ok := ret.Get(0).(func() (*flow.Header, error)); ok { + return rf() + } + if rf, ok := ret.Get(0).(func() *flow.Header); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*flow.Header) + } + } + + if rf, ok := ret.Get(1).(func() error); ok { + r1 = rf() + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// NewBlocks creates a new instance of Blocks. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewBlocks(t interface { + mock.TestingT + Cleanup(func()) +}) *Blocks { + mock := &Blocks{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/access/validator/validator.go b/access/validator/validator.go new file mode 100644 index 00000000000..3e3368ce3d4 --- /dev/null +++ b/access/validator/validator.go @@ -0,0 +1,540 @@ +package validator + +import ( + "context" + "errors" + "fmt" + + "github.com/rs/zerolog/log" + + "github.com/onflow/cadence" + jsoncdc "github.com/onflow/cadence/encoding/json" + "github.com/onflow/cadence/parser" + "github.com/onflow/crypto" + "github.com/onflow/flow-core-contracts/lib/go/templates" + + "github.com/onflow/flow-go/access/ratelimit" + cadenceutils "github.com/onflow/flow-go/access/utils" + "github.com/onflow/flow-go/fvm" + "github.com/onflow/flow-go/fvm/systemcontracts" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module" + "github.com/onflow/flow-go/module/execution" + "github.com/onflow/flow-go/module/metrics" + "github.com/onflow/flow-go/module/state_synchronization" + "github.com/onflow/flow-go/state" + "github.com/onflow/flow-go/state/protocol" +) + +// DefaultSealedIndexedHeightThreshold is the default number of blocks between sealed and indexed height +// this sets a limit on how far into the past the payer validator will allow for checking the payer's balance. +const DefaultSealedIndexedHeightThreshold = 30 + +type Blocks interface { + HeaderByID(id flow.Identifier) (*flow.Header, error) + FinalizedHeader() (*flow.Header, error) + SealedHeader() (*flow.Header, error) + IndexedHeight() (uint64, error) +} + +type ProtocolStateBlocks struct { + state protocol.State + indexReporter state_synchronization.IndexReporter +} + +func NewProtocolStateBlocks(state protocol.State, indexReporter state_synchronization.IndexReporter) *ProtocolStateBlocks { + return &ProtocolStateBlocks{ + state: state, + indexReporter: indexReporter, + } +} + +func (b *ProtocolStateBlocks) HeaderByID(id flow.Identifier) (*flow.Header, error) { + header, err := b.state.AtBlockID(id).Head() + if err != nil { + if errors.Is(err, state.ErrUnknownSnapshotReference) { + return nil, nil + } + + return nil, err + } + + return header, nil +} + +func (b *ProtocolStateBlocks) FinalizedHeader() (*flow.Header, error) { + return b.state.Final().Head() +} + +func (b *ProtocolStateBlocks) SealedHeader() (*flow.Header, error) { + + return b.state.Sealed().Head() + +} + +// IndexedHeight returns the highest indexed height by calling corresponding function of indexReporter. +// Expected errors during normal operation: +// - access.IndexReporterNotInitialized - indexed reporter was not initialized. +func (b *ProtocolStateBlocks) IndexedHeight() (uint64, error) { + if b.indexReporter != nil { + return b.indexReporter.HighestIndexedHeight() + } + return 0, IndexReporterNotInitialized +} + +// PayerBalanceMode represents the mode for checking the payer's balance +// when validating transactions. It controls whether and how the balance +// check is performed during transaction validation. +// +// There are few modes available: +// +// - `Disabled` - Balance checking is completely disabled. No checks are +// performed to verify if the payer has sufficient balance to cover the +// transaction fees. +// - `WarnCheck` - Balance is checked, and a warning is logged if the payer +// does not have enough balance. The transaction is still accepted and +// processed regardless of the check result. +// - `EnforceCheck` - Balance is checked, and the transaction is rejected if +// the payer does not have sufficient balance to cover the transaction fees. +type PayerBalanceMode int + +const ( + // Disabled indicates that payer balance checking is turned off. + Disabled PayerBalanceMode = iota + + // WarnCheck logs a warning if the payer's balance is insufficient, but does not prevent the transaction from being accepted. + WarnCheck + + // EnforceCheck prevents the transaction from being accepted if the payer's balance is insufficient to cover transaction fees. + EnforceCheck +) + +func ParsePayerBalanceMode(s string) (PayerBalanceMode, error) { + switch s { + case Disabled.String(): + return Disabled, nil + case WarnCheck.String(): + return WarnCheck, nil + case EnforceCheck.String(): + return EnforceCheck, nil + default: + return 0, errors.New("invalid payer balance mode") + } +} + +func (m PayerBalanceMode) String() string { + switch m { + case Disabled: + return "disabled" + case WarnCheck: + return "warn" + case EnforceCheck: + return "enforce" + default: + return "" + } +} + +type TransactionValidationOptions struct { + Expiry uint + ExpiryBuffer uint + AllowEmptyReferenceBlockID bool + AllowUnknownReferenceBlockID bool + MaxGasLimit uint64 + CheckScriptsParse bool + MaxTransactionByteSize uint64 + MaxCollectionByteSize uint64 + CheckPayerBalanceMode PayerBalanceMode +} + +type ValidationStep struct { + check func(*flow.TransactionBody) error + failReason string +} + +// TransactionValidator implements transaction validation logic for Access and Collection Nodes. +// NOTE: This validation logic is a simplified interim approach: Collection/Access Nodes cannot reliably validate transaction signatures or payer balance. +// The long-term design for extending validation to cover these cases is described in the Sweet Onion Plan +// (https://flowfoundation.notion.site/Sweet-Onion-Plan-eae4db664feb459598879b49ccf2aa85). +type TransactionValidator struct { + blocks Blocks // for looking up blocks to check transaction expiry + chain flow.Chain // for checking validity of addresses + options TransactionValidationOptions + serviceAccountAddress flow.Address + limiter ratelimit.RateLimiter + scriptExecutor execution.ScriptExecutor + verifyPayerBalanceScript []byte + transactionValidationMetrics module.TransactionValidationMetrics + + validationSteps []ValidationStep +} + +func NewTransactionValidator( + blocks Blocks, + chain flow.Chain, + transactionValidationMetrics module.TransactionValidationMetrics, + options TransactionValidationOptions, + executor execution.ScriptExecutor, +) (*TransactionValidator, error) { + if options.CheckPayerBalanceMode != Disabled && executor == nil { + return nil, errors.New("transaction validator cannot use checkPayerBalance with nil executor") + } + + env := systemcontracts.SystemContractsForChain(chain.ChainID()).AsTemplateEnv() + + txValidator := &TransactionValidator{ + blocks: blocks, + chain: chain, + options: options, + serviceAccountAddress: chain.ServiceAddress(), + limiter: ratelimit.NewNoopLimiter(), + scriptExecutor: executor, + verifyPayerBalanceScript: templates.GenerateVerifyPayerBalanceForTxExecution(env), + transactionValidationMetrics: transactionValidationMetrics, + } + + txValidator.initValidationSteps() + + return txValidator, nil +} + +func NewTransactionValidatorWithLimiter( + blocks Blocks, + chain flow.Chain, + options TransactionValidationOptions, + transactionValidationMetrics module.TransactionValidationMetrics, + rateLimiter ratelimit.RateLimiter, +) *TransactionValidator { + txValidator := &TransactionValidator{ + blocks: blocks, + chain: chain, + options: options, + serviceAccountAddress: chain.ServiceAddress(), + limiter: rateLimiter, + transactionValidationMetrics: transactionValidationMetrics, + } + + txValidator.initValidationSteps() + + return txValidator +} + +func (v *TransactionValidator) initValidationSteps() { + v.validationSteps = []ValidationStep{ + // rate limit transactions for specific payers. + // a short term solution to prevent attacks that send too many failed transactions + // if a transaction is from a payer that should be rate limited, all the following + // checks will be skipped + {v.checkRateLimitPayer, metrics.InvalidTransactionRateLimit}, + {v.checkTxSizeLimit, metrics.InvalidTransactionByteSize}, + {v.checkMissingFields, metrics.IncompleteTransaction}, + {v.checkGasLimit, metrics.InvalidGasLimit}, + {v.checkExpiry, metrics.ExpiredTransaction}, + {v.checkCanBeParsed, metrics.InvalidScript}, + {v.checkAddresses, metrics.InvalidAddresses}, + {v.checkSignatureFormat, metrics.InvalidSignature}, + {v.checkSignatureDuplications, metrics.DuplicatedSignature}, + } +} + +func (v *TransactionValidator) Validate(ctx context.Context, tx *flow.TransactionBody) (err error) { + for _, step := range v.validationSteps { + if err = step.check(tx); err != nil { + v.transactionValidationMetrics.TransactionValidationFailed(step.failReason) + return err + } + } + + err = v.checkSufficientBalanceToPayForTransaction(ctx, tx) + if err != nil { + // we only return InsufficientBalanceError as it's a client-side issue + // that requires action from a user. Other errors (e.g. parsing errors) + // are 'internal' and related to script execution process. they shouldn't + // prevent the transaction from proceeding. + if IsInsufficientBalanceError(err) { + v.transactionValidationMetrics.TransactionValidationFailed(metrics.InsufficientBalance) + + if v.options.CheckPayerBalanceMode == EnforceCheck { + log.Warn().Err(err).Str("transactionID", tx.ID().String()).Str("payerAddress", tx.Payer.String()).Msg("enforce check error") + return err + } + } + + // log and ignore all other errors + v.transactionValidationMetrics.TransactionValidationSkipped() + log.Info().Err(err).Msg("check payer validation skipped due to error") + } + + // TODO replace checkSignatureFormat by verifying the account/payer signatures + + v.transactionValidationMetrics.TransactionValidated() + + return nil +} + +func (v *TransactionValidator) checkRateLimitPayer(tx *flow.TransactionBody) error { + if v.limiter.IsRateLimited(tx.Payer) { + return InvalidTxRateLimitedError{ + Payer: tx.Payer, + } + } + return nil +} + +func (v *TransactionValidator) checkTxSizeLimit(tx *flow.TransactionBody) error { + txSize := uint64(tx.ByteSize()) + // first check compatibility to collection byte size + // this guarantees liveness + if txSize >= v.options.MaxCollectionByteSize { + return InvalidTxByteSizeError{ + Actual: txSize, + Maximum: v.options.MaxCollectionByteSize, + } + } + // this logic need the reason we don't greenlist the service account against the collection size + // limits is we can't verify the signature here yet. + if tx.Payer == v.serviceAccountAddress { + return nil + } + if txSize > v.options.MaxTransactionByteSize { + return InvalidTxByteSizeError{ + Actual: txSize, + Maximum: v.options.MaxTransactionByteSize, + } + } + return nil +} + +func (v *TransactionValidator) checkMissingFields(tx *flow.TransactionBody) error { + missingFields := tx.MissingFields() + + if v.options.AllowEmptyReferenceBlockID { + missingFields = remove(missingFields, flow.TransactionFieldRefBlockID.String()) + } + + if len(missingFields) > 0 { + return IncompleteTransactionError{MissingFields: missingFields} + } + + return nil +} + +func (v *TransactionValidator) checkGasLimit(tx *flow.TransactionBody) error { + // if service account is the payer of the transaction accepts any gas limit + // note that even though we don't enforce any limit here, exec node later + // enforce a max value for any transaction + if tx.Payer == v.serviceAccountAddress { + return nil + } + if tx.GasLimit > v.options.MaxGasLimit || tx.GasLimit == 0 { + return InvalidGasLimitError{ + Actual: tx.GasLimit, + Maximum: v.options.MaxGasLimit, + } + } + + return nil +} + +// checkExpiry checks whether a transaction's reference block ID is +// valid. Returns nil if the reference is valid, returns an error if the +// reference is invalid or we failed to check it. +func (v *TransactionValidator) checkExpiry(tx *flow.TransactionBody) error { + if tx.ReferenceBlockID == flow.ZeroID && v.options.AllowEmptyReferenceBlockID { + return nil + } + + // look up the reference block + ref, err := v.blocks.HeaderByID(tx.ReferenceBlockID) + if err != nil { + return fmt.Errorf("could not get reference block: %w", err) + } + + if ref == nil { + // the transaction references an unknown block - at this point we decide + // whether to consider it expired based on configuration + if v.options.AllowUnknownReferenceBlockID { + return nil + } + + return ErrUnknownReferenceBlock + } + + // get the latest finalized block we know about + final, err := v.blocks.FinalizedHeader() + if err != nil { + return fmt.Errorf("could not get finalized header: %w", err) + } + + diff := final.Height - ref.Height + // check for overflow + if ref.Height > final.Height { + diff = 0 + } + + // discard transactions that are expired, or that will expire sooner than + // our configured buffer allows + if uint(diff) > v.options.Expiry-v.options.ExpiryBuffer { + return ExpiredTransactionError{ + RefHeight: ref.Height, + FinalHeight: final.Height, + } + } + + return nil +} + +func (v *TransactionValidator) checkCanBeParsed(tx *flow.TransactionBody) (err error) { + defer func() { + if r := recover(); r != nil { + if panicErr, ok := r.(error); ok { + err = InvalidScriptError{ParserErr: panicErr} + } else { + err = InvalidScriptError{ParserErr: fmt.Errorf("non-error-typed panic: %v", r)} + } + } + }() + if v.options.CheckScriptsParse { + _, parseErr := parser.ParseProgram(nil, tx.Script, parser.Config{}) + if parseErr != nil { + return InvalidScriptError{ParserErr: parseErr} + } + } + + return nil +} + +func (v *TransactionValidator) checkAddresses(tx *flow.TransactionBody) error { + for _, address := range append(tx.Authorizers, tx.Payer) { + // we check whether this is a valid output of the address generator + if !v.chain.IsValid(address) { + return InvalidAddressError{Address: address} + } + } + + return nil +} + +// every key (account, key index combination) can only be used once for signing +func (v *TransactionValidator) checkSignatureDuplications(tx *flow.TransactionBody) error { + type uniqueKey struct { + address flow.Address + index uint32 + } + observedSigs := make(map[uniqueKey]bool) + for _, sig := range append(tx.PayloadSignatures, tx.EnvelopeSignatures...) { + if observedSigs[uniqueKey{sig.Address, sig.KeyIndex}] { + return DuplicatedSignatureError{Address: sig.Address, KeyIndex: sig.KeyIndex} + } + observedSigs[uniqueKey{sig.Address, sig.KeyIndex}] = true + } + return nil +} + +func (v *TransactionValidator) checkSignatureFormat(tx *flow.TransactionBody) error { + for _, signature := range tx.PayloadSignatures { + valid, _ := signature.ValidateExtensionDataAndReconstructMessage(tx.PayloadMessage()) + if !valid { + return InvalidAuthenticationSchemeFormatError{Signature: signature} + } + } + + for _, signature := range tx.EnvelopeSignatures { + valid, _ := signature.ValidateExtensionDataAndReconstructMessage(tx.EnvelopeMessage()) + if !valid { + return InvalidAuthenticationSchemeFormatError{Signature: signature} + } + } + + for _, signature := range append(tx.PayloadSignatures, tx.EnvelopeSignatures...) { + // check the format of the signature is valid. + // a valid signature is an ECDSA signature of either P-256 or secp256k1 curve. + ecdsaSignature := signature.Signature + + // check if the signature could be a P-256 signature + valid, err := crypto.SignatureFormatCheck(crypto.ECDSAP256, ecdsaSignature) + if err != nil { + return fmt.Errorf("could not check the signature format (%s): %w", signature, err) + } + if valid { + continue + } + + // check if the signature could be a secp256k1 signature + valid, err = crypto.SignatureFormatCheck(crypto.ECDSASecp256k1, ecdsaSignature) + if err != nil { + return fmt.Errorf("could not check the signature format (%s): %w", signature, err) + } + if valid { + continue + } + + return InvalidRawSignatureError{Signature: signature} + } + + return nil +} + +func (v *TransactionValidator) checkSufficientBalanceToPayForTransaction(ctx context.Context, tx *flow.TransactionBody) error { + if v.options.CheckPayerBalanceMode == Disabled { + return nil + } + + header, err := v.blocks.SealedHeader() + if err != nil { + return fmt.Errorf("could not fetch block header: %w", err) + } + + indexedHeight, err := v.blocks.IndexedHeight() + if err != nil { + return fmt.Errorf("could not get indexed height: %w", err) + } + + // we use latest indexed block to get the most up-to-date state data available for executing scripts. + // check here to make sure indexing is within an acceptable tolerance of sealing to avoid issues + // if indexing falls behind + sealedHeight := header.Height + if indexedHeight < sealedHeight-DefaultSealedIndexedHeightThreshold { + return IndexedHeightFarBehindError{SealedHeight: sealedHeight, IndexedHeight: indexedHeight} + } + + payerAddress := cadence.NewAddress(tx.Payer) + inclusionEffort := cadence.UFix64(tx.InclusionEffort()) + gasLimit := cadence.UFix64(tx.GasLimit) + + args, err := cadenceutils.EncodeArgs([]cadence.Value{payerAddress, inclusionEffort, gasLimit}) + if err != nil { + return fmt.Errorf("failed to encode cadence args for script executor: %w", err) + } + + result, err := v.scriptExecutor.ExecuteAtBlockHeight(ctx, v.verifyPayerBalanceScript, args, indexedHeight) + if err != nil { + return fmt.Errorf("script finished with error: %w", err) + } + + value, err := jsoncdc.Decode(nil, result) + if err != nil { + return fmt.Errorf("could not decode result value returned by script executor: %w", err) + } + + canExecuteTransaction, requiredBalance, _, err := fvm.DecodeVerifyPayerBalanceResult(value) + if err != nil { + return fmt.Errorf("could not parse cadence value returned by script executor: %w", err) + } + + // return no error if payer has sufficient balance + if canExecuteTransaction { + return nil + } + + return InsufficientBalanceError{Payer: tx.Payer, RequiredBalance: requiredBalance} +} + +func remove(s []string, r string) []string { + for i, v := range s { + if v == r { + return append(s[:i], s[i+1:]...) + } + } + return s +} diff --git a/access/validator/validator_test.go b/access/validator/validator_test.go new file mode 100644 index 00000000000..9ef1bb17c97 --- /dev/null +++ b/access/validator/validator_test.go @@ -0,0 +1,309 @@ +package validator_test + +import ( + "context" + "encoding/hex" + "errors" + "testing" + + "github.com/onflow/cadence" + "github.com/onflow/cadence/common" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" + + jsoncdc "github.com/onflow/cadence/encoding/json" + + "github.com/onflow/flow-go/access/validator" + validatormock "github.com/onflow/flow-go/access/validator/mock" + "github.com/onflow/flow-go/fvm" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module" + execmock "github.com/onflow/flow-go/module/execution/mock" + "github.com/onflow/flow-go/module/metrics" + "github.com/onflow/flow-go/utils/unittest" +) + +func TestTransactionValidatorSuite(t *testing.T) { + suite.Run(t, new(TransactionValidatorSuite)) +} + +type TransactionValidatorSuite struct { + suite.Suite + blocks *validatormock.Blocks + header *flow.Header + chain flow.Chain + validatorOptions validator.TransactionValidationOptions + metrics module.TransactionValidationMetrics +} + +func (s *TransactionValidatorSuite) SetupTest() { + s.metrics = metrics.NewNoopCollector() + s.blocks = validatormock.NewBlocks(s.T()) + assert.NotNil(s.T(), s.blocks) + + s.header = unittest.BlockHeaderFixture() + assert.NotNil(s.T(), s.header) + + s.blocks. + On("HeaderByID", mock.Anything). + Return(s.header, nil) + + s.blocks. + On("FinalizedHeader"). + Return(s.header, nil) + + s.blocks. + On("SealedHeader"). + Return(s.header, nil) + + s.chain = flow.Testnet.Chain() + s.validatorOptions = validator.TransactionValidationOptions{ + CheckPayerBalanceMode: validator.EnforceCheck, + MaxTransactionByteSize: flow.DefaultMaxTransactionByteSize, + MaxCollectionByteSize: flow.DefaultMaxCollectionByteSize, + } +} + +var verifyPayerBalanceResultType = cadence.NewStructType( + common.StringLocation("test"), + "VerifyPayerBalanceResult", + []cadence.Field{ + { + Identifier: fvm.VerifyPayerBalanceResultTypeCanExecuteTransactionFieldName, + Type: cadence.BoolType, + }, + { + Identifier: fvm.VerifyPayerBalanceResultTypeRequiredBalanceFieldName, + Type: cadence.UFix64Type, + }, + { + Identifier: fvm.VerifyPayerBalanceResultTypeMaximumTransactionFeesFieldName, + Type: cadence.UFix64Type, + }, + }, + nil, +) + +func (s *TransactionValidatorSuite) TestTransactionValidator_ScriptExecutorInternalError() { + scriptExecutor := execmock.NewScriptExecutor(s.T()) + assert.NotNil(s.T(), scriptExecutor) + + s.blocks. + On("IndexedHeight"). + Return(s.header.Height, nil) + + scriptExecutor. + On("ExecuteAtBlockHeight", mock.Anything, mock.Anything, mock.Anything, mock.Anything). + Return(nil, errors.New("script executor internal error")). + Once() + + validator, err := validator.NewTransactionValidator(s.blocks, s.chain, s.metrics, s.validatorOptions, scriptExecutor) + assert.NoError(s.T(), err) + assert.NotNil(s.T(), validator) + + txBody := unittest.TransactionBodyFixture() + + err = validator.Validate(context.Background(), &txBody) + assert.NoError(s.T(), err) +} + +func (s *TransactionValidatorSuite) TestTransactionValidator_SufficientBalance() { + scriptExecutor := execmock.NewScriptExecutor(s.T()) + + canExecuteTransaction := cadence.Bool(true) + requiredBalance := cadence.UFix64(1000) + maximumTransactionFees := cadence.UFix64(1000) + fields := []cadence.Value{canExecuteTransaction, requiredBalance, maximumTransactionFees} + + actualResponseValue := cadence.NewStruct(fields).WithType(verifyPayerBalanceResultType) + actualResponse, err := jsoncdc.Encode(actualResponseValue) + assert.NoError(s.T(), err) + + s.blocks. + On("IndexedHeight"). + Return(s.header.Height, nil) + + scriptExecutor. + On("ExecuteAtBlockHeight", mock.Anything, mock.Anything, mock.Anything, mock.Anything). + Return(actualResponse, nil). + Once() + + validator, err := validator.NewTransactionValidator(s.blocks, s.chain, s.metrics, s.validatorOptions, scriptExecutor) + assert.NoError(s.T(), err) + assert.NotNil(s.T(), validator) + + txBody := unittest.TransactionBodyFixture() + + err = validator.Validate(context.Background(), &txBody) + assert.NoError(s.T(), err) +} + +func (s *TransactionValidatorSuite) TestTransactionValidator_InsufficientBalance() { + scriptExecutor := execmock.NewScriptExecutor(s.T()) + + canExecuteTransaction := cadence.Bool(false) + requiredBalance := cadence.UFix64(1000) + maximumTransactionFees := cadence.UFix64(1000) + fields := []cadence.Value{canExecuteTransaction, requiredBalance, maximumTransactionFees} + + actualResponseValue := cadence.NewStruct(fields).WithType(verifyPayerBalanceResultType) + actualResponse, err := jsoncdc.Encode(actualResponseValue) + assert.NoError(s.T(), err) + + s.blocks. + On("IndexedHeight"). + Return(s.header.Height, nil) + + scriptExecutor. + On("ExecuteAtBlockHeight", mock.Anything, mock.Anything, mock.Anything, mock.Anything). + Return(actualResponse, nil).Twice() + + actualAccountResponse, err := unittest.AccountFixture() + assert.NoError(s.T(), err) + assert.NotNil(s.T(), actualAccountResponse) + + validateTx := func() error { + txBody := unittest.TransactionBodyFixture() + validator, err := validator.NewTransactionValidator(s.blocks, s.chain, s.metrics, s.validatorOptions, scriptExecutor) + assert.NoError(s.T(), err) + assert.NotNil(s.T(), validator) + + return validator.Validate(context.Background(), &txBody) + } + + s.Run("with enforce check", func() { + err := validateTx() + + expectedError := validator.InsufficientBalanceError{ + Payer: unittest.AddressFixture(), + RequiredBalance: requiredBalance, + } + assert.ErrorIs(s.T(), err, expectedError) + }) + + s.Run("with warn check", func() { + s.validatorOptions.CheckPayerBalanceMode = validator.WarnCheck + err := validateTx() + assert.NoError(s.T(), err) + }) +} + +func (s *TransactionValidatorSuite) TestTransactionValidator_SealedIndexedHeightThresholdLimit() { + scriptExecutor := execmock.NewScriptExecutor(s.T()) + + // setting indexed height to be behind of sealed by bigger number than allowed(DefaultSealedIndexedHeightThreshold) + indexedHeight := s.header.Height - 40 + + s.blocks. + On("IndexedHeight"). + Return(indexedHeight, nil) + + validator, err := validator.NewTransactionValidator(s.blocks, s.chain, s.metrics, s.validatorOptions, scriptExecutor) + assert.NoError(s.T(), err) + assert.NotNil(s.T(), validator) + + txBody := unittest.TransactionBodyFixture() + + err = validator.Validate(context.Background(), &txBody) + assert.NoError(s.T(), err) +} + +func (s *TransactionValidatorSuite) TestTransactionValidator_SignatureValidation() { + scriptExecutor := execmock.NewScriptExecutor(s.T()) + + // setting indexed height to be behind of sealed by bigger number than allowed(DefaultSealedIndexedHeightThreshold) + indexedHeight := s.header.Height - 40 + + s.blocks. + On("IndexedHeight"). + Return(indexedHeight, nil) + + validator, err := validator.NewTransactionValidator(s.blocks, s.chain, s.metrics, s.validatorOptions, scriptExecutor) + assert.NoError(s.T(), err) + assert.NotNil(s.T(), validator) + + // valid format signature + ecdsaSignatureFixtureStr := "0f9c37c155fbb656d2049a8349a0fcd2dedfe27d1588c2f635f60df2052141c17f2e195790c55ce42c4f73b5cc7e194060fee641818e8d640e69f92d4777518d" + ecdsaSignatureFixture, err := hex.DecodeString(ecdsaSignatureFixtureStr) + require.NoError(s.T(), err) + + address := unittest.AddressFixture() + transactionBody := flow.TransactionBody{ + Script: []byte("some script"), + Arguments: [][]byte{ + []byte("arg1"), + }, + ReferenceBlockID: flow.HashToID([]byte("some block id")), + GasLimit: 1000, + Payer: address, + ProposalKey: flow.ProposalKey{ + Address: address, + KeyIndex: 0, + SequenceNumber: 0, + }, + Authorizers: []flow.Address{ + address, + }, + PayloadSignatures: []flow.TransactionSignature{ + { + Address: address, + KeyIndex: 0, + Signature: ecdsaSignatureFixture, + SignerIndex: 0, + ExtensionData: unittest.RandomBytes(3), + }, + }, + EnvelopeSignatures: []flow.TransactionSignature{ + { + Address: address, + KeyIndex: 1, + Signature: ecdsaSignatureFixture, + SignerIndex: 0, + ExtensionData: unittest.RandomBytes(3), + }, + }, + } + + // detailed cases of signature validation are tested in model/flow/transaction_test.go + cases := []struct { + payloadSigExtensionData []byte + EnvelopeSigExtensionData []byte + shouldError bool + }{ + { + // happy path + payloadSigExtensionData: nil, + EnvelopeSigExtensionData: nil, + shouldError: false, + }, + { + // invalid payload transaction + payloadSigExtensionData: []byte{10}, + EnvelopeSigExtensionData: nil, + shouldError: true, + }, { + // invalid envelope transaction + payloadSigExtensionData: nil, + EnvelopeSigExtensionData: []byte{10}, + shouldError: true, + }, { + // invalid envelope and payload transactions + payloadSigExtensionData: []byte{10}, + EnvelopeSigExtensionData: []byte{10}, + shouldError: true, + }, + } + // test all cases + for _, c := range cases { + transactionBody.PayloadSignatures[0].ExtensionData = c.payloadSigExtensionData + transactionBody.EnvelopeSignatures[0].ExtensionData = c.EnvelopeSigExtensionData + err = validator.Validate(context.Background(), &transactionBody) + if c.shouldError { + assert.Error(s.T(), err) + } else { + assert.NoError(s.T(), err) + } + } +} diff --git a/actions/private-setup/action.yml b/actions/private-setup/action.yml new file mode 100644 index 00000000000..9598ebd9f35 --- /dev/null +++ b/actions/private-setup/action.yml @@ -0,0 +1,29 @@ +name: "Private Build Setup" +description: "Checks and configures the environment for building private dependencies" +inputs: + cadence_deploy_key: + description: "Deploy Key for Private Cadence Repo" + required: true + go_private_value: + description: "The value for GOPRIVATE" + required: false + default: "github.com/onflow/*-internal" +runs: + using: "composite" + steps: + - name: Load deploy key + uses: webfactory/ssh-agent@v0.5.3 + with: + ssh-private-key: "${{ inputs.cadence_deploy_key }}" + known-hosts: "github.com" + + - name: Configure git for SSH + shell: bash + run: | + git config --global url."git@github.com:".insteadOf "https://github.com/" + + - name: Configure GOPRIVATE env + shell: bash + run: | + echo "GOPRIVATE=${{ inputs.go_private_value }}" >> $GITHUB_ENV + diff --git a/actions/promote-images/action.yml b/actions/promote-images/action.yml new file mode 100644 index 00000000000..e230f3e8b6d --- /dev/null +++ b/actions/promote-images/action.yml @@ -0,0 +1,71 @@ +name: Promote Image to another Registry +description: Pull image from private registry and push to another registry + +inputs: + gcp_credentials: + description: 'GCP Credentials JSON' + required: true + private_registry: + description: 'Private container registry URL' + required: true + private_registry_host: + description: 'Private Google Artifact Registry hostname' + required: true + promotion_registry: + description: 'Registry to promote images to' + required: true + role: + description: 'Role to promote' + required: true + tags: + description: 'Comma-separated list of tags to use' + required: true + +runs: + using: "composite" + steps: + - name: Authenticate with Google Cloud + uses: google-github-actions/auth@v1 + with: + credentials_json: ${{ inputs.gcp_credentials }} + + - name: Set up Google Cloud SDK + uses: google-github-actions/setup-gcloud@v1 + + - name: Authenticate with Private Docker Registry + run: | + gcloud auth configure-docker ${{ inputs.private_registry_host }} + shell: bash + + - name: Pull and Tag Images + shell: bash + run: | + # Convert comma-separated tags input into an array + IFS=',' read -ra TAGS <<< "${{ inputs.tags }}" + + # Loop through each tag and pull the image from the private registry, then tag it for the registry to promote to + for TAG in "${TAGS[@]}"; do + IMAGE_PRIVATE="${{ inputs.private_registry }}/${{ inputs.role }}:${TAG}" + IMAGE_PROMOTION="${{ inputs.promotion_registry }}/${{ inputs.role }}:${TAG}" + echo "Processing ${IMAGE_PRIVATE} -> ${IMAGE_PROMOTION}" + docker pull "${IMAGE_PRIVATE}" + docker tag "${IMAGE_PRIVATE}" "${IMAGE_PROMOTION}" + done + + - name: Authenticate with registry to promote to + run: | + gcloud auth configure-docker + shell: bash + + - name: Push Images to registry to promote to + shell: bash + run: | + # Convert comma-separated tags input into an array + IFS=',' read -ra TAGS <<< "${{ inputs.tags }}" + # Loop through each tag and push the image to the promotion registry + for TAG in "${TAGS[@]}"; do + IMAGE_PROMOTION="${{ inputs.promotion_registry }}/${{ inputs.role }}:${TAG}" + echo "Pushing Image ${IMAGE_PROMOTION} to Public registry" + docker push "${IMAGE_PROMOTION}" + done + diff --git a/admin/README.md b/admin/README.md index 05d9901f9f4..25632dbf1dd 100644 --- a/admin/README.md +++ b/admin/README.md @@ -21,11 +21,6 @@ libp2p, badger, and other golog-based libraries: curl localhost:9002/admin/run_command -H 'Content-Type: application/json' -d '{"commandName": "set-golog-level", "data": "debug"}' ``` -### To turn on profiler -``` -curl localhost:9002/admin/run_command -H 'Content-Type: application/json' -d '{"commandName": "set-profiler-enabled", "data": true}' -``` - ### To get the latest finalized block ``` curl localhost:9002/admin/run_command -H 'Content-Type: application/json' -d '{"commandName": "read-blocks", "data": { "block": "final" }}' @@ -51,6 +46,17 @@ curl localhost:9002/admin/run_command -H 'Content-Type: application/json' -d '{" curl localhost:9002/admin/run_command -H 'Content-Type: application/json' -d '{"commandName": "get-transactions", "data": { "start-height": 340, "end-height": 343 }}' ``` +### To get blocks for ranges (works for any node type, for block payload, only prints the collection ids) +``` +curl localhost:9002/admin/run_command -H 'Content-Type: application/json' -d '{"commandName": "read-range-blocks", "data": { "start-height": 105172044, "end-height": 105172047 }}' +``` + +### To get cluster block for ranges (only available to collection nodes, only prints the transaction ids) + +``` +curl localhost:9002/admin/run_command -H 'Content-Type: application/json' -d '{"commandName": "read-range-cluster-blocks", "data": { "chain-id": "cluster-576-e8af4702d837acb77868a95f61eb212f90b14c6b7d61c89f48949fd27d1a269b", "start-height": 25077, "end-height": 25080 }}' +``` + ### To get execution data for a block by execution_data_id (only available execution nodes and access nodes with execution sync enabled) ``` curl localhost:9002/admin/run_command -H 'Content-Type: application/json' -d '{"commandName": "read-execution-data", "data": { "execution_data_id": "2fff2b05e7226c58e3c14b3549ab44a354754761c5baa721ea0d1ea26d069dc4" }}' @@ -71,6 +77,7 @@ curl localhost:9002/admin/run_command -H 'Content-Type: application/json' -d '{" ``` curl localhost:9002/admin/run_command -H 'Content-Type: application/json' -d '{"commandName": "set-config", "data": {"consensus-required-approvals-for-sealing": 1}}' ``` +TODO remove #### Example: set block rate delay to 750ms ``` curl localhost:9002/admin/run_command -H 'Content-Type: application/json' -d '{"commandName": "set-config", "data": {"hotstuff-block-rate-delay": "750ms"}}' @@ -88,3 +95,39 @@ curl localhost:9002/admin/run_command -H 'Content-Type: application/json' -d '{" ``` curl localhost:9002/admin/run_command -H 'Content-Type: application/json' -d '{"commandName": "stop-at-height", "data": { "height": 1111, "crash": false }}' ``` + +### Trigger checkpoint creation on execution +``` +curl localhost:9002/admin/run_command -H 'Content-Type: application/json' -d '{"commandName": "trigger-checkpoint"}' +``` + +### Add/Remove/Get address to rate limit a payer from adding transactions to collection nodes' mempool +``` +curl localhost:9002/admin/run_command -H 'Content-Type: application/json' -d '{"commandName": "ingest-tx-rate-limit", "data": { "command": "add", "addresses": "a08d349e8037d6e5,e6765c6113547fb7" }}' +curl localhost:9002/admin/run_command -H 'Content-Type: application/json' -d '{"commandName": "ingest-tx-rate-limit", "data": { "command": "remove", "addresses": "a08d349e8037d6e5,e6765c6113547fb7" }}' +curl localhost:9002/admin/run_command -H 'Content-Type: application/json' -d '{"commandName": "ingest-tx-rate-limit", "data": { "command": "get" }}' +curl localhost:9002/admin/run_command -H 'Content-Type: application/json' -d '{"commandName": "ingest-tx-rate-limit", "data": { "command": "get_config" }}' +curl localhost:9002/admin/run_command -H 'Content-Type: application/json' -d '{"commandName": "ingest-tx-rate-limit", "data": { "command": "set_config", "limit": 1, "burst": 1 }}' +``` + +### To create a protocol snapshot for latest checkpoint (execution node only) +``` +curl localhost:9002/admin/run_command -H 'Content-Type: application/json' -d '{"commandName": "protocol-snapshot"}' +curl localhost:9002/admin/run_command -H 'Content-Type: application/json' -d '{"commandName": "protocol-snapshot", "data": { "blocks-to-skip": 10 }}' +``` + +### To backfill transaction error messages +``` +curl localhost:9002/admin/run_command -H 'Content-Type: application/json' -d '{"commandName": "backfill-tx-error-messages", "data": { "start-height": 340, "end-height": 343, "execution-node-ids":["ec7b934df29248d574ae1cc33ae77f22f0fcf96a79e009224c46374d1837824e", "8cbdc8d24a28899a33140cb68d4146cd6f2f6c18c57f54c299f26351d126919e"] }}' +``` + +### Trigger chunk data pack pebble database checkpoint +``` +curl localhost:9002/admin/run_command -H 'Content-Type: application/json' -d '{"commandName": "create-chunk-data-packs-checkpoint" }' +``` + +### Trigger pebble protocol database checkpoints +Useful for reading protocol state data from the checkpoints using the read-badger util without stopping the node process. +``` +curl localhost:9002/admin/run_command -H 'Content-Type: application/json' -d '{"commandName": "create-pebble-checkpoint" }' +``` diff --git a/admin/admin/admin.pb.go b/admin/admin/admin.pb.go index c886224d85c..97279dc1ac4 100644 --- a/admin/admin/admin.pb.go +++ b/admin/admin/admin.pb.go @@ -7,12 +7,13 @@ package admin import ( + reflect "reflect" + sync "sync" + _ "google.golang.org/genproto/googleapis/api/annotations" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" structpb "google.golang.org/protobuf/types/known/structpb" - reflect "reflect" - sync "sync" ) const ( diff --git a/admin/admin/admin_grpc.pb.go b/admin/admin/admin_grpc.pb.go index 43c25676ec7..4ca02408fe7 100644 --- a/admin/admin/admin_grpc.pb.go +++ b/admin/admin/admin_grpc.pb.go @@ -4,6 +4,7 @@ package admin import ( context "context" + grpc "google.golang.org/grpc" codes "google.golang.org/grpc/codes" status "google.golang.org/grpc/status" diff --git a/admin/command_runner.go b/admin/command_runner.go index c827fb5ff4c..a16c0085ff0 100644 --- a/admin/command_runner.go +++ b/admin/command_runner.go @@ -239,6 +239,7 @@ func (r *CommandRunner) runAdminServer(ctx irrecoverable.SignalerContext) error for _, name := range []string{"allocs", "block", "goroutine", "heap", "mutex", "threadcreate"} { mux.HandleFunc(fmt.Sprintf("/debug/pprof/%s", name), pprof.Handler(name).ServeHTTP) } + mux.HandleFunc("/debug/pprof/profile", pprof.Profile) mux.HandleFunc("/debug/pprof/trace", pprof.Trace) httpServer := &http.Server{ @@ -283,8 +284,16 @@ func (r *CommandRunner) runAdminServer(ctx irrecoverable.SignalerContext) error defer shutdownCancel() if err := httpServer.Shutdown(shutdownCtx); err != nil { - r.logger.Err(err).Msg("failed to shutdown http server") - ctx.Throw(err) + if errors.Is(err, context.DeadlineExceeded) { + r.logger.Warn().Err(err).Msg("failed to shutdown http server gracefully; forcing close") + if closeErr := httpServer.Close(); closeErr != nil { + r.logger.Err(closeErr).Msg("failed to force close http server") + ctx.Throw(closeErr) + } + } else { + r.logger.Err(err).Msg("failed to shutdown http server") + ctx.Throw(err) + } } } }() diff --git a/admin/commands/collection/tx_rate_limiter.go b/admin/commands/collection/tx_rate_limiter.go new file mode 100644 index 00000000000..c767f080156 --- /dev/null +++ b/admin/commands/collection/tx_rate_limiter.go @@ -0,0 +1,122 @@ +package collection + +import ( + "context" + "fmt" + + "github.com/rs/zerolog/log" + "golang.org/x/time/rate" + + "github.com/onflow/flow-go/admin" + "github.com/onflow/flow-go/admin/commands" + "github.com/onflow/flow-go/engine/collection/ingest" +) + +var _ commands.AdminCommand = (*TxRateLimitCommand)(nil) + +// TxRateLimitCommand will adjust the transaction ingest rate limiter. +type TxRateLimitCommand struct { + limiter *ingest.AddressRateLimiter +} + +type TxRateLimitCommandAddress struct { + Addresses []string +} + +func NewTxRateLimitCommand(limiter *ingest.AddressRateLimiter) *TxRateLimitCommand { + return &TxRateLimitCommand{ + limiter: limiter, + } +} + +func (s *TxRateLimitCommand) Handler(_ context.Context, req *admin.CommandRequest) (interface{}, error) { + input, ok := req.Data.(map[string]interface{}) + if !ok { + return admin.NewInvalidAdminReqFormatError("expected { \"command\": \"add|remove|get|get_config|set_config\", \"addresses\": \"addresses\""), nil + } + + command, ok := input["command"] + if !ok { + return admin.NewInvalidAdminReqErrorf("the \"command\" field is empty, must be one of add|remove|get|get_config|set_config"), nil + } + + cmd, ok := command.(string) + if !ok { + return admin.NewInvalidAdminReqErrorf("the \"command\" field is not string, must be one of add|remove|get|get_config|set_config"), nil + } + + if cmd == "get" { + list := s.limiter.GetAddresses() + return fmt.Sprintf("rate limited list contains a total of %d addresses: %v", len(list), list), nil + } + + if cmd == "add" || cmd == "remove" { + result, ok := input["addresses"] + if !ok { + return admin.NewInvalidAdminReqErrorf("the \"addresses\" field is empty, must be hex formated addresses, can be splitted by \",\""), nil + } + addresses, ok := result.(string) + if !ok { + return admin.NewInvalidAdminReqErrorf("the \"addresses\" field is not string, must be hex formated addresses, can be splitted by \",\""), nil + } + + log.Info().Msgf("admintool %v addresses: %v", cmd, addresses) + + resp, err := s.AddOrRemove(cmd, addresses) + if err != nil { + return nil, err + } + return resp, nil + } + + if cmd == "get_config" { + limit, burst := s.limiter.GetLimitConfig() + return fmt.Sprintf("limit: %v, burst: %v", limit, burst), nil + } + + if cmd == "set_config" { + dataLimit, limit_ok := input["limit"] + dataBurst, burst_ok := input["burst"] + if !burst_ok || !limit_ok { + return admin.NewInvalidAdminReqErrorf("the \"limit\" or \"burst\" field is empty, must be number"), nil + } + limit, ok := dataLimit.(float64) + if !ok { + return admin.NewInvalidAdminReqErrorf("the \"limit\" field is not number: %v", dataLimit), nil + } + + burst, ok := dataBurst.(float64) + if !ok { + return admin.NewInvalidAdminReqErrorf("the \"burst\" field is not number: %v", dataBurst), nil + } + + oldLimit, oldBurst := s.limiter.GetLimitConfig() + log.Info().Msgf("admintool set_config limit: %v, burst: %v, old limit: %v, old burst: %v", limit, burst, oldLimit, oldBurst) + s.limiter.SetLimitConfig(rate.Limit(limit), int(burst)) + return fmt.Sprintf("succesfully set limit %v, burst %v", limit, burst), nil + } + + return fmt.Sprintf( + "invalid command field (%s), must be either \"add\" or \"remove\" or \"get\" or \"get_config\" or \"set_config\"", + cmd), nil +} + +func (s *TxRateLimitCommand) Validator(req *admin.CommandRequest) error { + return nil +} + +func (s *TxRateLimitCommand) AddOrRemove(command string, addresses string) (string, error) { + addrList, err := ingest.ParseAddresses(addresses) + if err != nil { + return "", err + } + + if command == "add" { + ingest.AddAddresses(s.limiter, addrList) + return fmt.Sprintf("added %d addresses", len(addrList)), nil + } + + // command == "remove" + ingest.RemoveAddresses(s.limiter, addrList) + return fmt.Sprintf("removed %d addresses", len(addrList)), nil +} diff --git a/admin/commands/common/read_protocol_state_blocks.go b/admin/commands/common/read_protocol_state_blocks.go index 06c80693331..288b3cf010a 100644 --- a/admin/commands/common/read_protocol_state_blocks.go +++ b/admin/commands/common/read_protocol_state_blocks.go @@ -122,7 +122,7 @@ func (r *ReadProtocolStateBlocksCommand) Handler(_ context.Context, req *admin.C } result = append(result, block) - firstHeight := int64(block.Header.Height) + firstHeight := int64(block.Height) for height := firstHeight - 1; height >= 0 && height > firstHeight-int64(data.numBlocksToQuery); height-- { block, err = r.getBlockByHeight(uint64(height)) diff --git a/admin/commands/common/read_protocol_state_blocks_test.go b/admin/commands/common/read_protocol_state_blocks_test.go index bbfd414a8ac..e17e579c3a0 100644 --- a/admin/commands/common/read_protocol_state_blocks_test.go +++ b/admin/commands/common/read_protocol_state_blocks_test.go @@ -55,28 +55,28 @@ func (suite *ReadProtocolStateBlocksSuite) SetupTest() { var blocks []*flow.Block - genesis := unittest.GenesisFixture() + genesis := unittest.Block.Genesis(flow.Emulator) blocks = append(blocks, genesis) - sealed := unittest.BlockWithParentFixture(genesis.Header) + sealed := unittest.BlockWithParentFixture(genesis.ToHeader()) blocks = append(blocks, sealed) - final := unittest.BlockWithParentFixture(sealed.Header) + final := unittest.BlockWithParentFixture(sealed.ToHeader()) blocks = append(blocks, final) - final = unittest.BlockWithParentFixture(final.Header) + final = unittest.BlockWithParentFixture(final.ToHeader()) blocks = append(blocks, final) - final = unittest.BlockWithParentFixture(final.Header) + final = unittest.BlockWithParentFixture(final.ToHeader()) blocks = append(blocks, final) suite.allBlocks = blocks suite.sealed = sealed suite.final = final - suite.state.On("Final").Return(createSnapshot(final.Header)) - suite.state.On("Sealed").Return(createSnapshot(sealed.Header)) + suite.state.On("Final").Return(createSnapshot(final.ToHeader())) + suite.state.On("Sealed").Return(createSnapshot(sealed.ToHeader())) suite.state.On("AtBlockID", mock.Anything).Return( func(blockID flow.Identifier) protocol.Snapshot { for _, block := range blocks { if block.ID() == blockID { - return createSnapshot(block.Header) + return createSnapshot(block.ToHeader()) } } return invalid.NewSnapshot(fmt.Errorf("invalid block ID: %v", blockID)) @@ -86,7 +86,7 @@ func (suite *ReadProtocolStateBlocksSuite) SetupTest() { func(height uint64) protocol.Snapshot { if int(height) < len(blocks) { block := blocks[height] - return createSnapshot(block.Header) + return createSnapshot(block.ToHeader()) } return invalid.NewSnapshot(fmt.Errorf("invalid height: %v", height)) }, diff --git a/admin/commands/execution/stop_at_height.go b/admin/commands/execution/stop_at_height.go index b39b03e904e..fd88a8c4f10 100644 --- a/admin/commands/execution/stop_at_height.go +++ b/admin/commands/execution/stop_at_height.go @@ -7,7 +7,7 @@ import ( "github.com/onflow/flow-go/admin" "github.com/onflow/flow-go/admin/commands" - "github.com/onflow/flow-go/engine/execution/ingestion" + "github.com/onflow/flow-go/engine/execution/ingestion/stop" ) var _ commands.AdminCommand = (*StopAtHeightCommand)(nil) @@ -15,11 +15,11 @@ var _ commands.AdminCommand = (*StopAtHeightCommand)(nil) // StopAtHeightCommand will send a signal to engine to stop/crash EN // at given height type StopAtHeightCommand struct { - stopControl *ingestion.StopControl + stopControl *stop.StopControl } // NewStopAtHeightCommand creates a new StopAtHeightCommand object -func NewStopAtHeightCommand(sah *ingestion.StopControl) *StopAtHeightCommand { +func NewStopAtHeightCommand(sah *stop.StopControl) *StopAtHeightCommand { return &StopAtHeightCommand{ stopControl: sah, } @@ -36,13 +36,22 @@ type StopAtHeightReq struct { func (s *StopAtHeightCommand) Handler(_ context.Context, req *admin.CommandRequest) (interface{}, error) { sah := req.ValidatorData.(StopAtHeightReq) - oldHeight, oldCrash, err := s.stopControl.SetStopHeight(sah.height, sah.crash) + oldParams := s.stopControl.GetStopParameters() + newParams := stop.StopParameters{ + StopBeforeHeight: sah.height, + ShouldCrash: sah.crash, + } + + err := s.stopControl.SetStopParameters(newParams) if err != nil { return nil, err } - log.Info().Msgf("admintool: EN will stop at height %d and crash: %t, previous values: %d %t", sah.height, sah.crash, oldHeight, oldCrash) + log.Info(). + Interface("newParams", newParams). + Interface("oldParams", oldParams). + Msgf("admintool: New En stop parameters set") return "ok", nil } diff --git a/admin/commands/execution/stop_at_height_test.go b/admin/commands/execution/stop_at_height_test.go index 961d19ee452..f78858d97c7 100644 --- a/admin/commands/execution/stop_at_height_test.go +++ b/admin/commands/execution/stop_at_height_test.go @@ -3,12 +3,15 @@ package execution import ( "context" "testing" + "time" "github.com/rs/zerolog" "github.com/stretchr/testify/require" "github.com/onflow/flow-go/admin" - "github.com/onflow/flow-go/engine/execution/ingestion" + "github.com/onflow/flow-go/engine" + "github.com/onflow/flow-go/engine/execution/ingestion/stop" + "github.com/onflow/flow-go/model/flow" ) func TestCommandParsing(t *testing.T) { @@ -88,7 +91,18 @@ func TestCommandParsing(t *testing.T) { func TestCommandsSetsValues(t *testing.T) { - stopControl := ingestion.NewStopControl(zerolog.Nop(), false, 0) + stopControl := stop.NewStopControl( + engine.NewUnit(), + time.Second, + zerolog.Nop(), + nil, + nil, + nil, + nil, + &flow.Header{HeaderBody: flow.HeaderBody{Height: 1}}, + false, + false, + ) cmd := NewStopAtHeightCommand(stopControl) @@ -102,9 +116,9 @@ func TestCommandsSetsValues(t *testing.T) { _, err := cmd.Handler(context.TODO(), req) require.NoError(t, err) - height, crash := stopControl.GetStopHeight() + s := stopControl.GetStopParameters() - require.Equal(t, stopControl.GetState(), ingestion.StopControlSet) - require.Equal(t, uint64(37), height) - require.Equal(t, true, crash) + require.NotNil(t, s) + require.Equal(t, uint64(37), s.StopBeforeHeight) + require.Equal(t, true, s.ShouldCrash) } diff --git a/admin/commands/state_synchronization/execute_script.go b/admin/commands/state_synchronization/execute_script.go new file mode 100644 index 00000000000..237bb3907a7 --- /dev/null +++ b/admin/commands/state_synchronization/execute_script.go @@ -0,0 +1,98 @@ +package state_synchronization + +import ( + "context" + "encoding/json" + "strconv" + + "github.com/onflow/flow-go/admin" + "github.com/onflow/flow-go/admin/commands" + "github.com/onflow/flow-go/module/execution" +) + +var _ commands.AdminCommand = (*ReadExecutionDataCommand)(nil) + +type scriptData struct { + height uint64 + script []byte + arguments [][]byte +} + +type ExecuteScriptCommand struct { + scriptExecutor execution.ScriptExecutor +} + +func (e *ExecuteScriptCommand) Handler(ctx context.Context, req *admin.CommandRequest) (interface{}, error) { + d := req.ValidatorData.(*scriptData) + + result, err := e.scriptExecutor.ExecuteAtBlockHeight(context.Background(), d.script, d.arguments, d.height) + if err != nil { + return nil, err + } + + return string(result), nil +} + +// Validator validates the request. +// Returns admin.InvalidAdminReqError for invalid/malformed requests. +func (e *ExecuteScriptCommand) Validator(req *admin.CommandRequest) error { + input, ok := req.Data.(map[string]interface{}) + if !ok { + return admin.NewInvalidAdminReqFormatError("expected map[string]any") + } + + heightRaw, ok := input["height"] + if !ok { + return admin.NewInvalidAdminReqFormatError("missing required field 'height") + } + + scriptRaw, ok := input["script"] + if !ok { + return admin.NewInvalidAdminReqFormatError("missing required field 'script") + } + + argsRaw, ok := input["args"] + if !ok { + return admin.NewInvalidAdminReqFormatError("missing required field 'args") + } + + heightStr, ok := heightRaw.(string) + if !ok { + return admin.NewInvalidAdminReqFormatError("'height' must be string") + } + + height, err := strconv.ParseUint(heightStr, 10, 64) + if err != nil { + return admin.NewInvalidAdminReqFormatError("'height' must be valid uint64 value", err) + } + + scriptStr, ok := scriptRaw.(string) + if !ok { + return admin.NewInvalidAdminReqFormatError("'script' must be string") + } + + argsStr, ok := argsRaw.(string) + if !ok { + return admin.NewInvalidAdminReqFormatError("'args' must be string") + } + + args := make([][]byte, 0) + err = json.Unmarshal([]byte(argsStr), &args) + if err != nil { + return admin.NewInvalidAdminReqFormatError("'args' not valid JSON", err) + } + + req.ValidatorData = &scriptData{ + height: height, + script: []byte(scriptStr), + arguments: args, + } + + return nil +} + +func NewExecuteScriptCommand(scripts execution.ScriptExecutor) commands.AdminCommand { + return &ExecuteScriptCommand{ + scripts, + } +} diff --git a/admin/commands/state_synchronization/read_execution_data.go b/admin/commands/state_synchronization/read_execution_data.go index 04268cd6f89..5b3e4a98f75 100644 --- a/admin/commands/state_synchronization/read_execution_data.go +++ b/admin/commands/state_synchronization/read_execution_data.go @@ -24,7 +24,7 @@ type ReadExecutionDataCommand struct { func (r *ReadExecutionDataCommand) Handler(ctx context.Context, req *admin.CommandRequest) (interface{}, error) { data := req.ValidatorData.(*requestData) - ed, err := r.executionDataStore.GetExecutionData(ctx, data.rootID) + ed, err := r.executionDataStore.Get(ctx, data.rootID) if err != nil { return nil, fmt.Errorf("failed to get execution data: %w", err) diff --git a/admin/commands/storage/backfill_tx_error_messages.go b/admin/commands/storage/backfill_tx_error_messages.go new file mode 100644 index 00000000000..48ea8ff6de8 --- /dev/null +++ b/admin/commands/storage/backfill_tx_error_messages.go @@ -0,0 +1,220 @@ +package storage + +import ( + "context" + "fmt" + + "github.com/rs/zerolog" + + "github.com/onflow/flow-go/admin" + "github.com/onflow/flow-go/admin/commands" + "github.com/onflow/flow-go/engine/access/ingestion/tx_error_messages" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/model/flow/filter" + "github.com/onflow/flow-go/module/util" + "github.com/onflow/flow-go/state/protocol" +) + +var _ commands.AdminCommand = (*BackfillTxErrorMessagesCommand)(nil) + +// backfillTxErrorMessagesRequest represents the input parameters for +// backfilling transaction error messages. +type backfillTxErrorMessagesRequest struct { + startHeight uint64 // Start height from which to begin backfilling. + endHeight uint64 // End height up to which backfilling is performed. + executionNodeIds flow.IdentitySkeletonList // List of execution node IDs to be used for backfilling. +} + +// BackfillTxErrorMessagesCommand executes a command to backfill +// transaction error messages by fetching them from execution nodes. +type BackfillTxErrorMessagesCommand struct { + log zerolog.Logger + state protocol.State + txErrorMessagesCore *tx_error_messages.TxErrorMessagesCore +} + +// NewBackfillTxErrorMessagesCommand creates a new instance of BackfillTxErrorMessagesCommand +func NewBackfillTxErrorMessagesCommand( + log zerolog.Logger, + state protocol.State, + txErrorMessagesCore *tx_error_messages.TxErrorMessagesCore, +) commands.AdminCommand { + return &BackfillTxErrorMessagesCommand{ + log: log.With().Str("command", "backfill-tx-error-messages").Logger(), + state: state, + txErrorMessagesCore: txErrorMessagesCore, + } +} + +// Validator validates the input for the backfill command. The input is validated +// for field types, boundaries, and coherence of start and end heights. +// +// Expected errors during normal operation: +// - admin.InvalidAdminReqError - if start-height is greater than end-height or +// if the input format is invalid, if an invalid execution node ID is provided. +func (b *BackfillTxErrorMessagesCommand) Validator(request *admin.CommandRequest) error { + input, ok := request.Data.(map[string]interface{}) + if !ok { + return admin.NewInvalidAdminReqFormatError("expected map[string]any") + } + + data := &backfillTxErrorMessagesRequest{} + + rootHeight := b.state.Params().SealedRoot().Height + data.startHeight = rootHeight // Default value + + sealed, err := b.state.Sealed().Head() + if err != nil { + return fmt.Errorf("failed to lookup sealed header: %w", err) + } + + lastSealedHeight := sealed.Height + if startHeightIn, ok := input["start-height"]; ok { + startHeight, err := parseN(startHeightIn) + if err != nil { + return admin.NewInvalidAdminReqErrorf("invalid 'start-height' field: %w", err) + } + + if startHeight > lastSealedHeight { + return admin.NewInvalidAdminReqErrorf( + "'start-height' %d must not be greater than latest sealed block %d", + startHeight, + lastSealedHeight, + ) + } + + if startHeight < rootHeight { + return admin.NewInvalidAdminReqErrorf( + "'start-height' %d must not be less than root block %d", + startHeight, + rootHeight, + ) + } + + data.startHeight = startHeight + } + + data.endHeight = lastSealedHeight // Default value + if endHeightIn, ok := input["end-height"]; ok { + endHeight, err := parseN(endHeightIn) + if err != nil { + return admin.NewInvalidAdminReqErrorf("invalid 'end-height' field: %w", err) + } + + if endHeight > lastSealedHeight { + return admin.NewInvalidAdminReqErrorf( + "'end-height' %d must not be greater than latest sealed block %d", + endHeight, + lastSealedHeight, + ) + } + + data.endHeight = endHeight + } + + if data.endHeight < data.startHeight { + return admin.NewInvalidAdminReqErrorf( + "'start-height' %d must not be less than 'end-height' %d", + data.startHeight, + data.endHeight, + ) + } + + identities, err := b.state.Final().Identities(filter.HasRole[flow.Identity](flow.RoleExecution)) + if err != nil { + return fmt.Errorf("failed to retreive execution IDs: %w", err) + } + + if executionNodeIdsIn, ok := input["execution-node-ids"]; ok { + executionNodeIds, err := b.parseExecutionNodeIds(executionNodeIdsIn, identities) + if err != nil { + return err + } + data.executionNodeIds = executionNodeIds + } else { + // in case no execution node ids provided, the command will use any valid execution node + data.executionNodeIds = identities.ToSkeleton() + } + + request.ValidatorData = data + + return nil +} + +// Handler performs the backfilling operation by fetching missing transaction +// error messages for blocks within the specified height range. Uses execution nodes +// from data.executionNodeIds if available, otherwise defaults to valid execution nodes. +// +// No errors are expected during normal operation. +func (b *BackfillTxErrorMessagesCommand) Handler(ctx context.Context, request *admin.CommandRequest) (interface{}, error) { + if b.txErrorMessagesCore == nil { + return nil, fmt.Errorf("failed to backfill, could not get transaction error messages storage") + } + + data := request.ValidatorData.(*backfillTxErrorMessagesRequest) + + total := data.endHeight - data.startHeight + 1 + progress := util.LogProgress(b.log, + util.DefaultLogProgressConfig("backfilling", int(total)), + ) + + b.log.Info(). + Uint64("start_height", data.startHeight). + Uint64("end_height", data.endHeight). + Uint64("blocks", total). + Msgf("starting backfill") + + for height := data.startHeight; height <= data.endHeight; height++ { + header, err := b.state.AtHeight(height).Head() + if err != nil { + return nil, fmt.Errorf("failed to get block header: %w", err) + } + + blockID := header.ID() + err = b.txErrorMessagesCore.FetchErrorMessagesByENs(ctx, blockID, data.executionNodeIds) + if err != nil { + return nil, fmt.Errorf("error encountered while processing transaction result error message for block: %d, %w", height, err) + } + + progress(1) + } + + return nil, nil +} + +// parseExecutionNodeIds converts a list of node IDs from input to flow.IdentitySkeletonList. +// Returns an error if the IDs are invalid or empty. +// +// Expected errors during normal operation: +// - admin.InvalidAdminReqParameterError - if execution-node-ids is empty or has an invalid format. +func (b *BackfillTxErrorMessagesCommand) parseExecutionNodeIds(executionNodeIdsIn interface{}, allIdentities flow.IdentityList) (flow.IdentitySkeletonList, error) { + var ids flow.IdentityList + switch executionNodeIds := executionNodeIdsIn.(type) { + case []any: + if len(executionNodeIds) == 0 { + return nil, admin.NewInvalidAdminReqParameterError("execution-node-ids", "must be a non empty list of strings", executionNodeIdsIn) + } + + idStrings := make([]string, len(executionNodeIds)) + for i, id := range executionNodeIds { + idStrings[i] = id.(string) + } + + requestedENIdentifiers, err := flow.IdentifierListFromHex(idStrings) + if err != nil { + return nil, admin.NewInvalidAdminReqParameterError("execution-node-ids", err.Error(), executionNodeIdsIn) + } + + for _, enId := range requestedENIdentifiers { + id, exists := allIdentities.ByNodeID(enId) + if !exists { + return nil, admin.NewInvalidAdminReqParameterError("execution-node-ids", "could not find execution node by provided id", enId) + } + ids = append(ids, id) + } + default: + return nil, admin.NewInvalidAdminReqParameterError("execution-node-ids", "must be a list of strings", executionNodeIdsIn) + } + + return ids.ToSkeleton(), nil +} diff --git a/admin/commands/storage/backfill_tx_error_messages_test.go b/admin/commands/storage/backfill_tx_error_messages_test.go new file mode 100644 index 00000000000..4fa11fecb1e --- /dev/null +++ b/admin/commands/storage/backfill_tx_error_messages_test.go @@ -0,0 +1,547 @@ +package storage + +import ( + "context" + "fmt" + "os" + "testing" + + execproto "github.com/onflow/flow/protobuf/go/flow/execution" + "github.com/rs/zerolog" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/suite" + + "github.com/onflow/flow-go/admin" + "github.com/onflow/flow-go/admin/commands" + "github.com/onflow/flow-go/engine/access/index" + "github.com/onflow/flow-go/engine/access/ingestion/tx_error_messages" + accessmock "github.com/onflow/flow-go/engine/access/mock" + "github.com/onflow/flow-go/engine/access/rpc/backend" + "github.com/onflow/flow-go/engine/access/rpc/backend/node_communicator" + "github.com/onflow/flow-go/engine/access/rpc/backend/transactions/error_messages" + connectionmock "github.com/onflow/flow-go/engine/access/rpc/connection/mock" + commonrpc "github.com/onflow/flow-go/engine/common/rpc" + "github.com/onflow/flow-go/model/flow" + syncmock "github.com/onflow/flow-go/module/state_synchronization/mock" + "github.com/onflow/flow-go/state/protocol" + "github.com/onflow/flow-go/state/protocol/invalid" + protocolmock "github.com/onflow/flow-go/state/protocol/mock" + "github.com/onflow/flow-go/storage" + storagemock "github.com/onflow/flow-go/storage/mock" + "github.com/onflow/flow-go/utils/unittest" + unittestMocks "github.com/onflow/flow-go/utils/unittest/mocks" +) + +const expectedErrorMsg = "expected test error" + +type BackfillTxErrorMessagesSuite struct { + suite.Suite + + command commands.AdminCommand + + log zerolog.Logger + state *protocolmock.State + snapshot *protocolmock.Snapshot + params *protocolmock.Params + + txErrorMessages *storagemock.TransactionResultErrorMessages + transactionResults *storagemock.LightTransactionResults + receipts *storagemock.ExecutionReceipts + headers *storagemock.Headers + + reporter *syncmock.IndexReporter + indexReporter *index.Reporter + txResultsIndex *index.TransactionResultsIndex + + execClient *accessmock.ExecutionAPIClient + + connFactory *connectionmock.ConnectionFactory + allENIDs flow.IdentityList + + backend *backend.Backend + txResultErrorMessagesCore *tx_error_messages.TxErrorMessagesCore + + blockHeadersMap map[uint64]*flow.Header + + nodeRootBlock *flow.Block + sealedBlock *flow.Block + blockCount int +} + +func TestBackfillTxErrorMessages(t *testing.T) { + t.Parallel() + suite.Run(t, new(BackfillTxErrorMessagesSuite)) +} + +func (suite *BackfillTxErrorMessagesSuite) SetupTest() { + suite.log = zerolog.New(os.Stderr) + + suite.state = new(protocolmock.State) + suite.headers = new(storagemock.Headers) + suite.receipts = new(storagemock.ExecutionReceipts) + suite.transactionResults = storagemock.NewLightTransactionResults(suite.T()) + suite.txErrorMessages = new(storagemock.TransactionResultErrorMessages) + suite.reporter = syncmock.NewIndexReporter(suite.T()) + + suite.indexReporter = index.NewReporter() + err := suite.indexReporter.Initialize(suite.reporter) + suite.Require().NoError(err) + suite.txResultsIndex = index.NewTransactionResultsIndex(suite.indexReporter, suite.transactionResults) + + suite.execClient = new(accessmock.ExecutionAPIClient) + + suite.blockCount = 5 + suite.blockHeadersMap = make(map[uint64]*flow.Header, suite.blockCount) + + suite.nodeRootBlock = unittest.Block.Genesis(flow.Emulator) + suite.blockHeadersMap[suite.nodeRootBlock.Height] = suite.nodeRootBlock.ToHeader() + + parent := suite.nodeRootBlock.ToHeader() + + for i := 1; i <= suite.blockCount; i++ { + block := unittest.BlockWithParentFixture(parent) + // update for next iteration + parent = block.ToHeader() + suite.blockHeadersMap[block.Height] = block.ToHeader() + suite.sealedBlock = block + } + + suite.params = protocolmock.NewParams(suite.T()) + suite.params.On("SealedRoot").Return( + func() *flow.Header { + return suite.nodeRootBlock.ToHeader() + }, nil) + suite.state.On("Params").Return(suite.params, nil).Maybe() + + suite.snapshot = createSnapshot(suite.T(), suite.sealedBlock.ToHeader()) + suite.state.On("Sealed").Return(suite.snapshot) + suite.state.On("Final").Return(suite.snapshot) + + suite.state.On("AtHeight", mock.Anything).Return( + func(height uint64) protocol.Snapshot { + if int(height) < len(suite.blockHeadersMap) { + header := suite.blockHeadersMap[height] + return createSnapshot(suite.T(), header) + } + return invalid.NewSnapshot(fmt.Errorf("invalid height: %v", height)) + }, + ) + + // Mock the protocol snapshot to return fixed execution node IDs. + suite.allENIDs = unittest.IdentityListFixture(1, unittest.WithRole(flow.RoleExecution)) + suite.snapshot.On("Identities", mock.Anything).Return( + func(flow.IdentityFilter[flow.Identity]) (flow.IdentityList, error) { + return suite.allENIDs, nil + }, nil).Maybe() + + // create a mock connection factory + suite.connFactory = connectionmock.NewConnectionFactory(suite.T()) + + executionNodeIdentitiesProvider := commonrpc.NewExecutionNodeIdentitiesProvider( + suite.log, + suite.state, + suite.receipts, + nil, + nil, + ) + + errorMessageProvider := error_messages.NewTxErrorMessageProvider( + suite.log, + suite.txErrorMessages, + suite.txResultsIndex, + suite.connFactory, + node_communicator.NewNodeCommunicator(false), + executionNodeIdentitiesProvider, + ) + + suite.txResultErrorMessagesCore = tx_error_messages.NewTxErrorMessagesCore( + suite.log, + errorMessageProvider, + suite.txErrorMessages, + executionNodeIdentitiesProvider, + ) + + suite.command = NewBackfillTxErrorMessagesCommand( + suite.log, + suite.state, + suite.txResultErrorMessagesCore, + ) +} + +// TestValidateInvalidFormat validates that invalid input formats trigger appropriate error responses. +// It tests several invalid cases such as: +// - Invalid "start-height" and "end-height" fields where values are in an incorrect format or out of valid ranges. +// - Invalid combinations of "start-height" and "end-height" where logical constraints are violated. +// - Invalid types for "execution-node-ids" which must be a list of strings, and invalid node IDs. +func (suite *BackfillTxErrorMessagesSuite) TestValidateInvalidFormat() { + // invalid start-height + suite.Run("invalid start-height field", func() { + err := suite.command.Validator(&admin.CommandRequest{ + Data: map[string]interface{}{ + "start-height": "123", + }, + }) + suite.Error(err) + suite.Equal(err, admin.NewInvalidAdminReqErrorf( + "invalid 'start-height' field: %w", + fmt.Errorf("invalid value for \"n\": %v", 0))) + }) + + // invalid start-height, start-height is greater than latest sealed block + suite.Run("start-height is greater than latest sealed block", func() { + startHeight := 100 + err := suite.command.Validator(&admin.CommandRequest{ + Data: map[string]interface{}{ + "start-height": float64(startHeight), + }, + }) + suite.Error(err) + suite.Equal(err, admin.NewInvalidAdminReqErrorf( + "'start-height' %d must not be greater than latest sealed block %d", startHeight, suite.sealedBlock.Height)) + }) + + // invalid start-height, start-height is less than root block + suite.Run("start-height is less than root block", func() { + suite.nodeRootBlock.HeaderBody = suite.blockHeadersMap[2].HeaderBody // mock sealed root block to height 2 + + startHeight := 1 + err := suite.command.Validator(&admin.CommandRequest{ + Data: map[string]interface{}{ + "start-height": float64(startHeight), + }, + }) + suite.Error(err) + suite.Equal(err, admin.NewInvalidAdminReqErrorf( + "'start-height' %d must not be less than root block %d", startHeight, suite.nodeRootBlock.Height)) + + suite.nodeRootBlock.HeaderBody = suite.blockHeadersMap[0].HeaderBody // mock sealed root block back to height 0 + }) + + // invalid end-height + suite.Run("invalid end-height field", func() { + err := suite.command.Validator(&admin.CommandRequest{ + Data: map[string]interface{}{ + "end-height": "123", + }, + }) + suite.Error(err) + suite.Equal(err, admin.NewInvalidAdminReqErrorf( + "invalid 'end-height' field: %w", + fmt.Errorf("invalid value for \"n\": %v", 0))) + }) + + // end-height is greater than latest sealed block + suite.Run("invalid end-height is greater than latest sealed block", func() { + endHeight := 100 + err := suite.command.Validator(&admin.CommandRequest{ + Data: map[string]interface{}{ + "start-height": float64(1), // raw json parses to float64 + "end-height": float64(endHeight), // raw json parses to float64 + "execution-node-ids": []any{suite.allENIDs[0].NodeID.String()}, + }, + }) + suite.Error(err) + suite.Equal(err, admin.NewInvalidAdminReqErrorf( + "'end-height' %d must not be greater than latest sealed block %d", + endHeight, + suite.sealedBlock.Height, + )) + }) + + suite.Run("invalid combination of start-height and end-height fields", func() { + startHeight := 3 + endHeight := 1 + err := suite.command.Validator(&admin.CommandRequest{ + Data: map[string]interface{}{ + "start-height": float64(startHeight), // raw json parses to float64 + "end-height": float64(endHeight), // raw json parses to float64 + }, + }) + suite.Error(err) + suite.Equal(err, admin.NewInvalidAdminReqErrorf( + "'start-height' %d must not be less than 'end-height' %d", startHeight, endHeight)) + }) + + // invalid execution-node-ids param + suite.Run("invalid execution-node-ids field", func() { + // invalid type + err := suite.command.Validator(&admin.CommandRequest{ + Data: map[string]interface{}{ + "execution-node-ids": []int{1, 2, 3}, + }, + }) + suite.Error(err) + suite.Equal(err, admin.NewInvalidAdminReqParameterError( + "execution-node-ids", "must be a list of strings", []int{1, 2, 3})) + + // invalid type + err = suite.command.Validator(&admin.CommandRequest{ + Data: map[string]interface{}{ + "execution-node-ids": "123", + }, + }) + suite.Error(err) + suite.Equal(err, admin.NewInvalidAdminReqParameterError( + "execution-node-ids", "must be a list of strings", "123")) + + // invalid execution node id + invalidENID := unittest.IdentifierFixture() + err = suite.command.Validator(&admin.CommandRequest{ + Data: map[string]interface{}{ + "start-height": float64(1), // raw json parses to float64 + "end-height": float64(4), // raw json parses to float64 + "execution-node-ids": []any{invalidENID.String()}, + }, + }) + suite.Error(err) + suite.Equal(err, admin.NewInvalidAdminReqParameterError( + "execution-node-ids", "could not find execution node by provided id", invalidENID.String())) + }) +} + +// TestValidateValidFormat verifies that valid input parameters result in no validation errors +// in the command validator. +// It tests various valid cases, such as: +// - Default parameters (start-height, end-height, execution-node-ids) are used. +// - Provided parameters (start-height, end-height, execution-node-ids) values are within expected ranges. +func (suite *BackfillTxErrorMessagesSuite) TestValidateValidFormat() { + // start-height and end-height are not provided, the root block and the latest sealed block + // will be used as the start and end heights respectively. + // execution-node-ids is not provided, any valid execution node will be used. + suite.Run("happy case, all default parameters", func() { + err := suite.command.Validator(&admin.CommandRequest{ + Data: map[string]interface{}{}, + }) + suite.NoError(err) + }) + + // all parameters are provided + suite.Run("happy case, all parameters are provided", func() { + err := suite.command.Validator(&admin.CommandRequest{ + Data: map[string]interface{}{ + "start-height": float64(1), // raw json parses to float64 + "end-height": float64(3), // raw json parses to float64 + "execution-node-ids": []any{suite.allENIDs[0].NodeID.String()}, + }, + }) + suite.NoError(err) + }) +} + +// TestHandleBackfillTxErrorMessages handles the transaction error backfill logic for different scenarios. +// It validates behavior when transaction error messages exist or do not exist in the database, handling both default and custom parameters. +func (suite *BackfillTxErrorMessagesSuite) TestHandleBackfillTxErrorMessages() { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + // default parameters + req := &admin.CommandRequest{ + Data: map[string]interface{}{}, + } + suite.Require().NoError(suite.command.Validator(req)) + + suite.Run("happy case, all default parameters, tx error messages do not exist in db", func() { + // Create a mock execution client to simulate communication with execution nodes. + suite.connFactory.On("GetExecutionAPIClient", mock.Anything).Return(suite.execClient, &unittestMocks.MockCloser{}, nil) + + for i := suite.nodeRootBlock.Height; i <= suite.blockHeadersMap[uint64(suite.blockCount)].Height; i++ { + blockId := suite.blockHeadersMap[i].ID() + + // Setup mock storing the transaction error message after retrieving the failed result. + suite.txErrorMessages.On("Exists", blockId).Return(false, nil).Once() + + results := suite.generateResultsForBlock() + + // Mock the execution node API calls to fetch the error messages. + suite.mockTransactionErrorMessagesResponseByBlockID(blockId, results) + + // Setup mock storing the transaction error message after retrieving the failed result. + suite.mockStoreTxErrorMessages(blockId, results, suite.allENIDs[0].NodeID) + } + + _, err := suite.command.Handler(ctx, req) + suite.Require().NoError(err) + suite.assertAllExpectations() + }) + + suite.Run("happy case, all default parameters, tx error messages exist in db", func() { + for i := suite.nodeRootBlock.Height; i <= suite.blockHeadersMap[uint64(suite.blockCount)].Height; i++ { + blockId := suite.blockHeadersMap[i].ID() + + // Setup mock storing the transaction error message after retrieving the failed result. + suite.txErrorMessages.On("Exists", blockId).Return(true, nil).Once() + } + + _, err := suite.command.Handler(ctx, req) + suite.Require().NoError(err) + suite.assertAllExpectations() + }) + + suite.Run("happy case, all custom parameters, tx error messages do not exist in db", func() { + // custom parameters + startHeight := 1 + endHeight := 4 + + suite.allENIDs = unittest.IdentityListFixture(3, unittest.WithRole(flow.RoleExecution)) + + executorID := suite.allENIDs[1].NodeID + req = &admin.CommandRequest{ + Data: map[string]interface{}{ + "start-height": float64(startHeight), // raw json parses to float64 + "end-height": float64(endHeight), // raw json parses to float64 + "execution-node-ids": []any{executorID.String()}, + }, + } + suite.Require().NoError(suite.command.Validator(req)) + + // Create a mock execution client to simulate communication with execution nodes. + suite.connFactory.On("GetExecutionAPIClient", mock.Anything).Return(suite.execClient, &unittestMocks.MockCloser{}, nil) + + for i := startHeight; i <= endHeight; i++ { + blockId := suite.blockHeadersMap[uint64(i)].ID() + + // Setup mock storing the transaction error message after retrieving the failed result. + suite.txErrorMessages.On("Exists", blockId).Return(false, nil).Once() + + results := suite.generateResultsForBlock() + + // Mock the execution node API calls to fetch the error messages. + suite.mockTransactionErrorMessagesResponseByBlockID(blockId, results) + + // Setup mock storing the transaction error message after retrieving the failed result. + suite.mockStoreTxErrorMessages(blockId, results, executorID) + } + + _, err := suite.command.Handler(ctx, req) + suite.Require().NoError(err) + suite.assertAllExpectations() + }) +} + +// TestHandleBackfillTxErrorMessagesErrors tests various error scenarios for the +// Handler method of BackfillTxErrorMessagesCommand to ensure proper error handling +// when prerequisites or dependencies are not met. +// +// It tests various valid cases, such as: +// - Handling of nil txErrorMessagesCore dependency. +// - Failure when retrieving block headers. +func (suite *BackfillTxErrorMessagesSuite) TestHandleBackfillTxErrorMessagesErrors() { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + suite.Run("error when txErrorMessagesCore is nil", func() { + req := &admin.CommandRequest{Data: map[string]interface{}{}} + command := NewBackfillTxErrorMessagesCommand( + suite.log, + suite.state, + nil, + ) + suite.Require().NoError(command.Validator(req)) + + _, err := command.Handler(ctx, req) + assert.Error(suite.T(), err) + assert.Contains(suite.T(), err.Error(), "failed to backfill, could not get transaction error messages storage") + }) + + suite.Run("error when failing to retrieve block header", func() { + req := &admin.CommandRequest{ + Data: map[string]interface{}{ + "start-height": float64(1), // raw json parses to float64 + }, + } + suite.Require().NoError(suite.command.Validator(req)) + + snapNotFound := protocolmock.NewSnapshot(suite.T()) + snapNotFound.On("Head").Return(nil, storage.ErrNotFound).Once() + + suite.state.On("AtHeight", uint64(1)).Return(snapNotFound).Unset() + suite.state.On("AtHeight", uint64(1)).Return(snapNotFound).Once() + + _, err := suite.command.Handler(ctx, req) + assert.Error(suite.T(), err) + assert.Contains(suite.T(), err.Error(), "failed to get block header") + }) +} + +// generateResultsForBlock generates mock transaction results for a block. +// It creates a mix of failed and non-failed transaction results to simulate different transaction outcomes. +func (suite *BackfillTxErrorMessagesSuite) generateResultsForBlock() []flow.LightTransactionResult { + results := make([]flow.LightTransactionResult, 0) + + for i := 0; i < 5; i++ { + results = append(results, flow.LightTransactionResult{ + TransactionID: unittest.IdentifierFixture(), + Failed: i%2 == 0, // create a mix of failed and non-failed transactions + ComputationUsed: 0, + }) + } + + return results +} + +// mockTransactionErrorMessagesResponseByBlockID mocks the response of transaction error messages +// by block ID for failed transactions. It simulates API calls that retrieve error messages from execution nodes. +func (suite *BackfillTxErrorMessagesSuite) mockTransactionErrorMessagesResponseByBlockID( + blockID flow.Identifier, + results []flow.LightTransactionResult, +) { + exeEventReq := &execproto.GetTransactionErrorMessagesByBlockIDRequest{ + BlockId: blockID[:], + } + + exeErrMessagesResp := &execproto.GetTransactionErrorMessagesResponse{} + for i, result := range results { + r := result + if r.Failed { + errMsg := fmt.Sprintf("%s.%s", expectedErrorMsg, r.TransactionID) + exeErrMessagesResp.Results = append(exeErrMessagesResp.Results, &execproto.GetTransactionErrorMessagesResponse_Result{ + TransactionId: r.TransactionID[:], + ErrorMessage: errMsg, + Index: uint32(i), + }) + } + } + + suite.execClient.On("GetTransactionErrorMessagesByBlockID", mock.Anything, exeEventReq). + Return(exeErrMessagesResp, nil). + Once() +} + +// mockStoreTxErrorMessages mocks the process of storing transaction error messages in the database +// after retrieving the results of failed transactions . +func (suite *BackfillTxErrorMessagesSuite) mockStoreTxErrorMessages( + blockID flow.Identifier, + results []flow.LightTransactionResult, + executorID flow.Identifier, +) { + var txErrorMessages []flow.TransactionResultErrorMessage + + for i, result := range results { + r := result + if r.Failed { + errMsg := fmt.Sprintf("%s.%s", expectedErrorMsg, r.TransactionID) + + txErrorMessages = append(txErrorMessages, + flow.TransactionResultErrorMessage{ + TransactionID: result.TransactionID, + ErrorMessage: errMsg, + Index: uint32(i), + ExecutorID: executorID, + }) + } + } + + suite.txErrorMessages.On("Store", blockID, txErrorMessages).Return(nil).Once() +} + +// assertAllExpectations asserts that all the expectations set on various mocks are met, +// ensuring the test results are valid. +func (suite *BackfillTxErrorMessagesSuite) assertAllExpectations() { + suite.snapshot.AssertExpectations(suite.T()) + suite.state.AssertExpectations(suite.T()) + suite.headers.AssertExpectations(suite.T()) + suite.execClient.AssertExpectations(suite.T()) + suite.transactionResults.AssertExpectations(suite.T()) + suite.txErrorMessages.AssertExpectations(suite.T()) +} diff --git a/admin/commands/storage/helper.go b/admin/commands/storage/helper.go index 9474f85131f..e8166cb731a 100644 --- a/admin/commands/storage/helper.go +++ b/admin/commands/storage/helper.go @@ -5,6 +5,7 @@ import ( "math" "strings" + "github.com/onflow/flow-go/admin" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/state/protocol" ) @@ -90,3 +91,69 @@ func getBlockHeader(state protocol.State, req *blocksRequest) (*flow.Header, err return nil, fmt.Errorf("invalid request type: %v", req.requestType) } } + +func parseHeightRangeRequestData(req *admin.CommandRequest) (*heightRangeReqData, error) { + input, ok := req.Data.(map[string]interface{}) + if !ok { + return nil, admin.NewInvalidAdminReqFormatError("missing 'data' field") + } + + startHeight, err := findUint64(input, "start-height") + if err != nil { + return nil, fmt.Errorf("invalid start-height: %w", err) + } + + endHeight, err := findUint64(input, "end-height") + if err != nil { + return nil, fmt.Errorf("invalid end-height: %w", err) + } + + if endHeight < startHeight { + return nil, admin.NewInvalidAdminReqErrorf("end-height %v should not be smaller than start-height %v", endHeight, startHeight) + } + + return &heightRangeReqData{ + startHeight: startHeight, + endHeight: endHeight, + }, nil +} + +func parseString(req *admin.CommandRequest, field string) (string, error) { + input, ok := req.Data.(map[string]interface{}) + if !ok { + return "", admin.NewInvalidAdminReqFormatError("missing 'data' field") + } + fieldValue, err := findString(input, field) + if err != nil { + return "", admin.NewInvalidAdminReqErrorf("missing %v field", field) + } + return fieldValue, nil +} + +// Returns admin.InvalidAdminReqError for invalid inputs +func findUint64(input map[string]interface{}, field string) (uint64, error) { + data, ok := input[field] + if !ok { + return 0, admin.NewInvalidAdminReqErrorf("missing required field '%s'", field) + } + val, err := parseN(data) + if err != nil { + return 0, admin.NewInvalidAdminReqErrorf("invalid 'n' field: %w", err) + } + + return uint64(val), nil +} + +func findString(input map[string]interface{}, field string) (string, error) { + data, ok := input[field] + if !ok { + return "", admin.NewInvalidAdminReqErrorf("missing required field '%s'", field) + } + + str, ok := data.(string) + if !ok { + return "", admin.NewInvalidAdminReqErrorf("field '%s' is not string", field) + } + + return strings.ToLower(strings.TrimSpace(str)), nil +} diff --git a/admin/commands/storage/pebble_checkpoint.go b/admin/commands/storage/pebble_checkpoint.go new file mode 100644 index 00000000000..64a2274610b --- /dev/null +++ b/admin/commands/storage/pebble_checkpoint.go @@ -0,0 +1,59 @@ +package storage + +import ( + "context" + "fmt" + "path" + "time" + + "github.com/cockroachdb/pebble/v2" + "github.com/rs/zerolog/log" + + "github.com/onflow/flow-go/admin" + "github.com/onflow/flow-go/admin/commands" +) + +var _ commands.AdminCommand = (*PebbleDBCheckpointCommand)(nil) + +// PebbleDBCheckpointCommand creates a checkpoint for pebble database for querying the data +// while keeping the node alive. +type PebbleDBCheckpointCommand struct { + checkpointDir string + dbname string // dbname is for logging purposes only + pebbleDB *pebble.DB +} + +func NewPebbleDBCheckpointCommand(checkpointDir string, dbname string, pebbleDB *pebble.DB) *PebbleDBCheckpointCommand { + return &PebbleDBCheckpointCommand{ + checkpointDir: checkpointDir, + dbname: dbname, + pebbleDB: pebbleDB, + } +} + +func (c *PebbleDBCheckpointCommand) Handler(ctx context.Context, req *admin.CommandRequest) (interface{}, error) { + log.Info().Msgf("admintool: creating %v database checkpoint", c.dbname) + + targetDir := nextTmpFolder(c.checkpointDir) + + log.Info().Msgf("admintool: creating %v database checkpoint at: %v", c.dbname, targetDir) + + err := c.pebbleDB.Checkpoint(targetDir) + if err != nil { + return nil, admin.NewInvalidAdminReqErrorf("failed to create %v pebbledb checkpoint at %v: %w", c.dbname, targetDir, err) + } + + log.Info().Msgf("admintool: successfully created %v database checkpoint at: %v", c.dbname, targetDir) + + return fmt.Sprintf("successfully created %v db checkpoint at %v", c.dbname, targetDir), nil +} + +func (c *PebbleDBCheckpointCommand) Validator(req *admin.CommandRequest) error { + return nil +} + +func nextTmpFolder(dir string) string { + // use timestamp as folder name + folderName := time.Now().Format("2006-01-02_15-04-05") + return path.Join(dir, folderName) +} diff --git a/admin/commands/storage/read_blocks.go b/admin/commands/storage/read_blocks.go index 3405a5be6e2..b05c06721a8 100644 --- a/admin/commands/storage/read_blocks.go +++ b/admin/commands/storage/read_blocks.go @@ -4,6 +4,8 @@ import ( "context" "fmt" + "github.com/rs/zerolog/log" + "github.com/onflow/flow-go/admin" "github.com/onflow/flow-go/admin/commands" "github.com/onflow/flow-go/model/flow" @@ -28,6 +30,8 @@ func (r *ReadBlocksCommand) Handler(ctx context.Context, req *admin.CommandReque var result []*flow.Block var blockID flow.Identifier + log.Info().Str("module", "admin-tool").Msgf("read blocks, data: %v", data) + if header, err := getBlockHeader(r.state, data.blocksRequest); err != nil { return nil, fmt.Errorf("failed to get block header: %w", err) } else { @@ -40,10 +44,10 @@ func (r *ReadBlocksCommand) Handler(ctx context.Context, req *admin.CommandReque return nil, fmt.Errorf("failed to get block by ID: %w", err) } result = append(result, block) - if block.Header.Height == 0 { + if block.Height == 0 { break } - blockID = block.Header.ParentID + blockID = block.ParentID } return commands.ConvertToInterfaceList(result) diff --git a/admin/commands/storage/read_blocks_test.go b/admin/commands/storage/read_blocks_test.go index 334260fe1a8..e48a69b91d8 100644 --- a/admin/commands/storage/read_blocks_test.go +++ b/admin/commands/storage/read_blocks_test.go @@ -38,14 +38,14 @@ func TestReadBlocks(t *testing.T) { suite.Run(t, new(ReadBlocksSuite)) } -func createSnapshot(head *flow.Header) protocol.Snapshot { - snapshot := &protocolmock.Snapshot{} +func createSnapshot(t *testing.T, head *flow.Header) *protocolmock.Snapshot { + snapshot := protocolmock.NewSnapshot(t) snapshot.On("Head").Return( func() *flow.Header { return head }, nil, - ) + ).Maybe() return snapshot } @@ -55,28 +55,28 @@ func (suite *ReadBlocksSuite) SetupTest() { var blocks []*flow.Block - genesis := unittest.GenesisFixture() + genesis := unittest.Block.Genesis(flow.Emulator) blocks = append(blocks, genesis) - sealed := unittest.BlockWithParentFixture(genesis.Header) + sealed := unittest.BlockWithParentFixture(genesis.ToHeader()) blocks = append(blocks, sealed) - final := unittest.BlockWithParentFixture(sealed.Header) + final := unittest.BlockWithParentFixture(sealed.ToHeader()) blocks = append(blocks, final) - final = unittest.BlockWithParentFixture(final.Header) + final = unittest.BlockWithParentFixture(final.ToHeader()) blocks = append(blocks, final) - final = unittest.BlockWithParentFixture(final.Header) + final = unittest.BlockWithParentFixture(final.ToHeader()) blocks = append(blocks, final) suite.allBlocks = blocks suite.sealed = sealed suite.final = final - suite.state.On("Final").Return(createSnapshot(final.Header)) - suite.state.On("Sealed").Return(createSnapshot(sealed.Header)) + suite.state.On("Final").Return(createSnapshot(suite.T(), final.ToHeader())) + suite.state.On("Sealed").Return(createSnapshot(suite.T(), sealed.ToHeader())) suite.state.On("AtBlockID", mock.Anything).Return( func(blockID flow.Identifier) protocol.Snapshot { for _, block := range blocks { if block.ID() == blockID { - return createSnapshot(block.Header) + return createSnapshot(suite.T(), block.ToHeader()) } } return invalid.NewSnapshot(fmt.Errorf("invalid block ID: %v", blockID)) @@ -86,7 +86,7 @@ func (suite *ReadBlocksSuite) SetupTest() { func(height uint64) protocol.Snapshot { if int(height) < len(blocks) { block := blocks[height] - return createSnapshot(block.Header) + return createSnapshot(suite.T(), block.ToHeader()) } return invalid.NewSnapshot(fmt.Errorf("invalid height: %v", height)) }, diff --git a/admin/commands/storage/read_protocol_snapshot.go b/admin/commands/storage/read_protocol_snapshot.go new file mode 100644 index 00000000000..738e6409936 --- /dev/null +++ b/admin/commands/storage/read_protocol_snapshot.go @@ -0,0 +1,121 @@ +package storage + +import ( + "context" + "fmt" + + "github.com/rs/zerolog" + + "github.com/onflow/flow-go/admin" + "github.com/onflow/flow-go/admin/commands" + "github.com/onflow/flow-go/cmd/util/common" + "github.com/onflow/flow-go/state/protocol" + "github.com/onflow/flow-go/state/protocol/inmem" + "github.com/onflow/flow-go/storage" + "github.com/onflow/flow-go/utils/logging" +) + +var _ commands.AdminCommand = (*ProtocolSnapshotCommand)(nil) + +type protocolSnapshotData struct { + blocksToSkip uint +} + +// ProtocolSnapshotCommand is a command that generates a protocol snapshot for a checkpoint (usually latest checkpoint) +// This command is only available for execution node +type ProtocolSnapshotCommand struct { + logger zerolog.Logger + state protocol.State + headers storage.Headers + seals storage.Seals + checkpointDir string // the directory where the checkpoint is stored +} + +func NewProtocolSnapshotCommand( + logger zerolog.Logger, + state protocol.State, + headers storage.Headers, + seals storage.Seals, + checkpointDir string, +) *ProtocolSnapshotCommand { + return &ProtocolSnapshotCommand{ + logger: logger, + state: state, + headers: headers, + seals: seals, + checkpointDir: checkpointDir, + } +} + +func (s *ProtocolSnapshotCommand) Handler(_ context.Context, req *admin.CommandRequest) (interface{}, error) { + validated, ok := req.ValidatorData.(*protocolSnapshotData) + if !ok { + return nil, fmt.Errorf("fail to parse validator data") + } + + blocksToSkip := validated.blocksToSkip + + s.logger.Info().Uint("blocksToSkip", blocksToSkip).Msgf("admintool: generating protocol snapshot") + + snapshot, sealedHeight, commit, checkpointFile, err := common.GenerateProtocolSnapshotForCheckpoint( + s.logger, s.state, s.headers, s.seals, s.checkpointDir, blocksToSkip) + if err != nil { + return nil, fmt.Errorf("could not generate protocol snapshot for checkpoint, checkpointDir %v: %w", + s.checkpointDir, err) + } + + header, err := snapshot.Head() + if err != nil { + return nil, fmt.Errorf("could not get header from snapshot: %w", err) + } + + serializable, err := inmem.FromSnapshot(snapshot) + if err != nil { + return nil, fmt.Errorf("could not convert snapshot to serializable: %w", err) + } + + s.logger.Info(). + Uint64("finalized_height", header.Height). // finalized height + Hex("finalized_block_id", logging.Entity(header)). + Uint64("sealed_height", sealedHeight). + Hex("sealed_commit", commit[:]). // not the commit for the finalized height, but for the sealed height + Str("checkpoint_file", checkpointFile). + Uint("blocks_to_skip", blocksToSkip). + Msgf("admintool: protocol snapshot generated successfully") + + return commands.ConvertToMap(protocolSnapshotResponse{ + Snapshot: serializable.Encodable(), + Checkpoint: checkpointFile, + }) +} + +type protocolSnapshotResponse struct { + Snapshot inmem.EncodableSnapshot `json:"snapshot"` + Checkpoint string `json:"checkpoint"` +} + +func (s *ProtocolSnapshotCommand) Validator(req *admin.CommandRequest) error { + // blocksToSkip is the number of blocks to skip when iterating the sealed heights to find the state commitment + // in the checkpoint file. + // default is 0 + validated := &protocolSnapshotData{ + blocksToSkip: uint(0), + } + + input, ok := req.Data.(map[string]interface{}) + if ok { + data, ok := input["blocks-to-skip"] + + if ok { + n, ok := data.(float64) + if !ok { + return fmt.Errorf("could not parse blocks-to-skip: %v", data) + } + validated.blocksToSkip = uint(n) + } + } + + req.ValidatorData = validated + + return nil +} diff --git a/admin/commands/storage/read_range_blocks.go b/admin/commands/storage/read_range_blocks.go new file mode 100644 index 00000000000..cc4d00d6354 --- /dev/null +++ b/admin/commands/storage/read_range_blocks.go @@ -0,0 +1,51 @@ +package storage + +import ( + "context" + + "github.com/rs/zerolog/log" + + "github.com/onflow/flow-go/admin" + "github.com/onflow/flow-go/admin/commands" + "github.com/onflow/flow-go/cmd/util/cmd/read-light-block" + "github.com/onflow/flow-go/storage" +) + +var _ commands.AdminCommand = (*ReadRangeBlocksCommand)(nil) + +// 10001 instead of 10000, because 10000 won't allow a range from 10000 to 20000, +// which is easier to type than [10001, 20000] +const Max_Range_Block_Limit = uint64(10001) + +type ReadRangeBlocksCommand struct { + blocks storage.Blocks +} + +func NewReadRangeBlocksCommand(blocks storage.Blocks) commands.AdminCommand { + return &ReadRangeBlocksCommand{ + blocks: blocks, + } +} + +func (c *ReadRangeBlocksCommand) Handler(ctx context.Context, req *admin.CommandRequest) (interface{}, error) { + reqData, err := parseHeightRangeRequestData(req) + if err != nil { + return nil, err + } + + log.Info().Str("module", "admin-tool").Msgf("read range blocks, data: %v", reqData) + + if reqData.Range() > Max_Range_Block_Limit { + return nil, admin.NewInvalidAdminReqErrorf("getting for more than %v blocks at a time might have an impact to node's performance and is not allowed", Max_Range_Block_Limit) + } + + lights, err := read.ReadLightBlockByHeightRange(c.blocks, reqData.startHeight, reqData.endHeight) + if err != nil { + return nil, err + } + return commands.ConvertToInterfaceList(lights) +} + +func (c *ReadRangeBlocksCommand) Validator(req *admin.CommandRequest) error { + return nil +} diff --git a/admin/commands/storage/read_range_cluster_blocks.go b/admin/commands/storage/read_range_cluster_blocks.go new file mode 100644 index 00000000000..b0e41b86fe8 --- /dev/null +++ b/admin/commands/storage/read_range_cluster_blocks.go @@ -0,0 +1,67 @@ +package storage + +import ( + "context" + "fmt" + + "github.com/rs/zerolog/log" + + "github.com/onflow/flow-go/admin" + "github.com/onflow/flow-go/admin/commands" + "github.com/onflow/flow-go/cmd/util/cmd/read-light-block" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/storage" + "github.com/onflow/flow-go/storage/store" +) + +var _ commands.AdminCommand = (*ReadRangeClusterBlocksCommand)(nil) + +// 10001 instead of 10000, because 10000 won't allow a range from 10000 to 20000, +// which is easier to type than [10001, 20000] +const Max_Range_Cluster_Block_Limit = uint64(10001) + +type ReadRangeClusterBlocksCommand struct { + db storage.DB + headers *store.Headers + payloads *store.ClusterPayloads +} + +func NewReadRangeClusterBlocksCommand(db storage.DB, headers *store.Headers, payloads *store.ClusterPayloads) commands.AdminCommand { + return &ReadRangeClusterBlocksCommand{ + db: db, + headers: headers, + payloads: payloads, + } +} + +func (c *ReadRangeClusterBlocksCommand) Handler(ctx context.Context, req *admin.CommandRequest) (interface{}, error) { + chainID, err := parseString(req, "chain-id") + if err != nil { + return nil, err + } + + reqData, err := parseHeightRangeRequestData(req) + if err != nil { + return nil, err + } + + log.Info().Str("module", "admin-tool").Msgf("read range cluster blocks, data: %v", reqData) + + if reqData.Range() > Max_Range_Cluster_Block_Limit { + return nil, admin.NewInvalidAdminReqErrorf("getting for more than %v blocks at a time might have an impact to node's performance and is not allowed", Max_Range_Cluster_Block_Limit) + } + + clusterBlocks := store.NewClusterBlocks( + c.db, flow.ChainID(chainID), c.headers, c.payloads, + ) + + lights, err := read.ReadClusterLightBlockByHeightRange(clusterBlocks, reqData.startHeight, reqData.endHeight) + if err != nil { + return nil, fmt.Errorf("could not get with chainID id %v: %w", chainID, err) + } + return commands.ConvertToInterfaceList(lights) +} + +func (c *ReadRangeClusterBlocksCommand) Validator(req *admin.CommandRequest) error { + return nil +} diff --git a/admin/commands/storage/read_results_test.go b/admin/commands/storage/read_results_test.go index 44937aaadf8..e78d4a6b5b2 100644 --- a/admin/commands/storage/read_results_test.go +++ b/admin/commands/storage/read_results_test.go @@ -49,12 +49,12 @@ func (suite *ReadResultsSuite) SetupTest() { var blocks []*flow.Block var results []*flow.ExecutionResult - genesis := unittest.GenesisFixture() - genesisResult := unittest.ExecutionResultFixture(unittest.WithBlock(genesis)) + genesis := unittest.Block.Genesis(flow.Emulator) + genesisResult := unittest.ExecutionResultFixture(unittest.WithBlock(genesis), unittest.WithPreviousResultID(flow.ZeroID)) blocks = append(blocks, genesis) results = append(results, genesisResult) - sealed := unittest.BlockWithParentFixture(genesis.Header) + sealed := unittest.BlockWithParentFixture(genesis.ToHeader()) sealedResult := unittest.ExecutionResultFixture( unittest.WithBlock(sealed), unittest.WithPreviousResult(*genesisResult), @@ -62,7 +62,7 @@ func (suite *ReadResultsSuite) SetupTest() { blocks = append(blocks, sealed) results = append(results, sealedResult) - final := unittest.BlockWithParentFixture(sealed.Header) + final := unittest.BlockWithParentFixture(sealed.ToHeader()) finalResult := unittest.ExecutionResultFixture( unittest.WithBlock(final), unittest.WithPreviousResult(*sealedResult), @@ -70,7 +70,7 @@ func (suite *ReadResultsSuite) SetupTest() { blocks = append(blocks, final) results = append(results, finalResult) - final = unittest.BlockWithParentFixture(final.Header) + final = unittest.BlockWithParentFixture(final.ToHeader()) finalResult = unittest.ExecutionResultFixture( unittest.WithBlock(final), unittest.WithPreviousResult(*finalResult), @@ -78,7 +78,7 @@ func (suite *ReadResultsSuite) SetupTest() { blocks = append(blocks, final) results = append(results, finalResult) - final = unittest.BlockWithParentFixture(final.Header) + final = unittest.BlockWithParentFixture(final.ToHeader()) finalResult = unittest.ExecutionResultFixture( unittest.WithBlock(final), unittest.WithPreviousResult(*finalResult), @@ -93,13 +93,13 @@ func (suite *ReadResultsSuite) SetupTest() { suite.finalResult = finalResult suite.sealedResult = sealedResult - suite.state.On("Final").Return(createSnapshot(final.Header)) - suite.state.On("Sealed").Return(createSnapshot(sealed.Header)) + suite.state.On("Final").Return(createSnapshot(suite.T(), final.ToHeader())) + suite.state.On("Sealed").Return(createSnapshot(suite.T(), sealed.ToHeader())) suite.state.On("AtBlockID", mock.Anything).Return( func(blockID flow.Identifier) protocol.Snapshot { for _, block := range blocks { if block.ID() == blockID { - return createSnapshot(block.Header) + return createSnapshot(suite.T(), block.ToHeader()) } } return invalid.NewSnapshot(fmt.Errorf("invalid block ID: %v", blockID)) @@ -109,7 +109,7 @@ func (suite *ReadResultsSuite) SetupTest() { func(height uint64) protocol.Snapshot { if int(height) < len(blocks) { block := blocks[height] - return createSnapshot(block.Header) + return createSnapshot(suite.T(), block.ToHeader()) } return invalid.NewSnapshot(fmt.Errorf("invalid height: %v", height)) }, diff --git a/admin/commands/storage/read_transactions.go b/admin/commands/storage/read_transactions.go index 8c38a8edc98..386c429d509 100644 --- a/admin/commands/storage/read_transactions.go +++ b/admin/commands/storage/read_transactions.go @@ -14,14 +14,15 @@ import ( var _ commands.AdminCommand = (*GetTransactionsCommand)(nil) -// max number of block height to query transactions from -var MAX_HEIGHT_RANGE = uint64(1000) - -type getTransactionsReqData struct { +type heightRangeReqData struct { startHeight uint64 endHeight uint64 } +func (d heightRangeReqData) Range() uint64 { + return d.endHeight - d.startHeight + 1 +} + type GetTransactionsCommand struct { state protocol.State payloads storage.Payloads @@ -37,7 +38,12 @@ func NewGetTransactionsCommand(state protocol.State, payloads storage.Payloads, } func (c *GetTransactionsCommand) Handler(ctx context.Context, req *admin.CommandRequest) (interface{}, error) { - data := req.ValidatorData.(*getTransactionsReqData) + data := req.ValidatorData.(*heightRangeReqData) + + limit := uint64(10001) + if data.Range() > limit { + return nil, admin.NewInvalidAdminReqErrorf("getting transactions for more than %v blocks at a time might have an impact to node's performance and is not allowed", limit) + } finder := &transactions.Finder{ State: c.state, @@ -55,50 +61,13 @@ func (c *GetTransactionsCommand) Handler(ctx context.Context, req *admin.Command return commands.ConvertToInterfaceList(blocks) } -// Returns admin.InvalidAdminReqError for invalid inputs -func findUint64(input map[string]interface{}, field string) (uint64, error) { - data, ok := input[field] - if !ok { - return 0, admin.NewInvalidAdminReqErrorf("missing required field '%s'", field) - } - val, err := parseN(data) - if err != nil { - return 0, admin.NewInvalidAdminReqErrorf("invalid 'n' field: %w", err) - } - - return uint64(val), nil -} - // Validator validates the request. // Returns admin.InvalidAdminReqError for invalid/malformed requests. func (c *GetTransactionsCommand) Validator(req *admin.CommandRequest) error { - input, ok := req.Data.(map[string]interface{}) - if !ok { - return admin.NewInvalidAdminReqFormatError("expected map[string]any") - } - - startHeight, err := findUint64(input, "start-height") - if err != nil { - return err - } - - endHeight, err := findUint64(input, "end-height") + data, err := parseHeightRangeRequestData(req) if err != nil { return err } - - if endHeight < startHeight { - return admin.NewInvalidAdminReqErrorf("endHeight %v should not be smaller than startHeight %v", endHeight, startHeight) - } - - if endHeight-startHeight+1 > MAX_HEIGHT_RANGE { - return admin.NewInvalidAdminReqErrorf("getting transactions for more than %v blocks at a time might have an impact to node's performance and is not allowed", MAX_HEIGHT_RANGE) - } - - req.ValidatorData = &getTransactionsReqData{ - startHeight: startHeight, - endHeight: endHeight, - } - + req.ValidatorData = data return nil } diff --git a/admin/commands/storage/read_transactions_test.go b/admin/commands/storage/read_transactions_test.go index 664a27e065c..2a3917c48e9 100644 --- a/admin/commands/storage/read_transactions_test.go +++ b/admin/commands/storage/read_transactions_test.go @@ -1,6 +1,7 @@ package storage import ( + "context" "fmt" "testing" @@ -14,13 +15,18 @@ func TestReadTransactionsRangeTooWide(t *testing.T) { data := map[string]interface{}{ "start-height": float64(1), - "end-height": float64(1001), + "end-height": float64(10002), } - err := c.Validator(&admin.CommandRequest{ + + req := &admin.CommandRequest{ Data: data, - }) + } + err := c.Validator(req) + require.NoError(t, err) + + _, err = c.Handler(context.Background(), req) require.Error(t, err) - require.Contains(t, fmt.Sprintf("%v", err), "more than 1000 blocks") + require.Contains(t, fmt.Sprintf("%v", err), "more than 10001 blocks") } func TestReadTransactionsRangeInvalid(t *testing.T) { diff --git a/apiproxy/access_api_proxy.go b/apiproxy/access_api_proxy.go deleted file mode 100644 index 8e0b781af5e..00000000000 --- a/apiproxy/access_api_proxy.go +++ /dev/null @@ -1,468 +0,0 @@ -package apiproxy - -import ( - "context" - "fmt" - "sync" - "time" - - "google.golang.org/grpc/connectivity" - - "google.golang.org/grpc" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/credentials" - "google.golang.org/grpc/status" - - "github.com/onflow/flow/protobuf/go/flow/access" - - "github.com/onflow/flow-go/engine/access/rpc/backend" - "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/utils/grpcutils" -) - -// NewFlowAccessAPIRouter creates a backend access API that forwards some requests to an upstream node. -// It is used by Observer services, Blockchain Data Service, etc. -// Make sure that this is just for observation and not a staked participant in the flow network. -// This means that observers see a copy of the data but there is no interaction to ensure integrity from the root block. -func NewFlowAccessAPIRouter(accessNodeAddressAndPort flow.IdentityList, timeout time.Duration) (*FlowAccessAPIRouter, error) { - ret := &FlowAccessAPIRouter{} - err := ret.upstream.setFlowAccessAPI(accessNodeAddressAndPort, timeout) - if err != nil { - return nil, err - } - return ret, nil -} - -// setFlowAccessAPI sets a backend access API that forwards some requests to an upstream node. -// It is used by Observer services, Blockchain Data Service, etc. -// Make sure that this is just for observation and not a staked participant in the flow network. -// This means that observers see a copy of the data but there is no interaction to ensure integrity from the root block. -func (ret *FlowAccessAPIForwarder) setFlowAccessAPI(accessNodeAddressAndPort flow.IdentityList, timeout time.Duration) error { - ret.timeout = timeout - ret.ids = accessNodeAddressAndPort - ret.upstream = make([]access.AccessAPIClient, accessNodeAddressAndPort.Count()) - ret.connections = make([]*grpc.ClientConn, accessNodeAddressAndPort.Count()) - for i, identity := range accessNodeAddressAndPort { - // Store the faultTolerantClient setup parameters such as address, public, key and timeout, so that - // we can refresh the API on connection loss - ret.ids[i] = identity - - // We fail on any single error on startup, so that - // we identify bootstrapping errors early - err := ret.reconnectingClient(i) - if err != nil { - return err - } - } - - ret.roundRobin = 0 - return nil -} - -// FlowAccessAPIRouter is a structure that represents the routing proxy algorithm. -// It splits requests between a local and a remote API service. -type FlowAccessAPIRouter struct { - access.AccessAPIServer - upstream FlowAccessAPIForwarder -} - -// SetLocalAPI sets the local backend that responds to block related calls -// Everything else is forwarded to a selected upstream node -func (h *FlowAccessAPIRouter) SetLocalAPI(local access.AccessAPIServer) { - h.AccessAPIServer = local -} - -// reconnectingClient returns an active client, or -// creates one, if the last one is not ready anymore. -func (h *FlowAccessAPIForwarder) reconnectingClient(i int) error { - timeout := h.timeout - - if h.connections[i] == nil || h.connections[i].GetState() != connectivity.Ready { - identity := h.ids[i] - var connection *grpc.ClientConn - var err error - if identity.NetworkPubKey == nil { - connection, err = grpc.Dial( - identity.Address, - grpc.WithDefaultCallOptions(grpc.MaxCallRecvMsgSize(grpcutils.DefaultMaxMsgSize)), - grpc.WithInsecure(), //nolint:staticcheck - backend.WithClientUnaryInterceptor(timeout)) - if err != nil { - return err - } - } else { - tlsConfig, err := grpcutils.DefaultClientTLSConfig(identity.NetworkPubKey) - if err != nil { - return fmt.Errorf("failed to get default TLS client config using public flow networking key %s %w", identity.NetworkPubKey.String(), err) - } - - connection, err = grpc.Dial( - identity.Address, - grpc.WithDefaultCallOptions(grpc.MaxCallRecvMsgSize(grpcutils.DefaultMaxMsgSize)), - grpc.WithTransportCredentials(credentials.NewTLS(tlsConfig)), - backend.WithClientUnaryInterceptor(timeout)) - if err != nil { - return fmt.Errorf("cannot connect to %s %w", identity.Address, err) - } - } - connection.Connect() - time.Sleep(1 * time.Second) - state := connection.GetState() - if state != connectivity.Ready && state != connectivity.Connecting { - return fmt.Errorf("%v", state) - } - h.connections[i] = connection - h.upstream[i] = access.NewAccessAPIClient(connection) - } - - return nil -} - -// faultTolerantClient implements an upstream connection that reconnects on errors -// a reasonable amount of time. -func (h *FlowAccessAPIForwarder) faultTolerantClient() (access.AccessAPIClient, error) { - if h.upstream == nil || len(h.upstream) == 0 { - return nil, status.Errorf(codes.Unimplemented, "method not implemented") - } - - // Reasoning: A retry count of three gives an acceptable 5% failure ratio from a 37% failure ratio. - // A bigger number is problematic due to the DNS resolve and connection times, - // plus the need to log and debug each individual connection failure. - // - // This reasoning eliminates the need of making this parameter configurable. - // The logic works rolling over a single connection as well making clean code. - const retryMax = 3 - - h.lock.Lock() - defer h.lock.Unlock() - - var err error - for i := 0; i < retryMax; i++ { - h.roundRobin++ - h.roundRobin = h.roundRobin % len(h.upstream) - err = h.reconnectingClient(h.roundRobin) - if err != nil { - continue - } - state := h.connections[h.roundRobin].GetState() - if state != connectivity.Ready && state != connectivity.Connecting { - continue - } - return h.upstream[h.roundRobin], nil - } - - return nil, status.Errorf(codes.Unavailable, err.Error()) -} - -// Ping pings the service. It is special in the sense that it responds successful, -// only if all underlying services are ready. -func (h *FlowAccessAPIRouter) Ping(context context.Context, req *access.PingRequest) (*access.PingResponse, error) { - return h.AccessAPIServer.Ping(context, req) -} - -func (h *FlowAccessAPIRouter) GetLatestBlockHeader(context context.Context, req *access.GetLatestBlockHeaderRequest) (*access.BlockHeaderResponse, error) { - return h.AccessAPIServer.GetLatestBlockHeader(context, req) -} - -func (h *FlowAccessAPIRouter) GetBlockHeaderByID(context context.Context, req *access.GetBlockHeaderByIDRequest) (*access.BlockHeaderResponse, error) { - return h.AccessAPIServer.GetBlockHeaderByID(context, req) -} - -func (h *FlowAccessAPIRouter) GetBlockHeaderByHeight(context context.Context, req *access.GetBlockHeaderByHeightRequest) (*access.BlockHeaderResponse, error) { - return h.AccessAPIServer.GetBlockHeaderByHeight(context, req) -} - -func (h *FlowAccessAPIRouter) GetLatestBlock(context context.Context, req *access.GetLatestBlockRequest) (*access.BlockResponse, error) { - return h.AccessAPIServer.GetLatestBlock(context, req) -} - -func (h *FlowAccessAPIRouter) GetBlockByID(context context.Context, req *access.GetBlockByIDRequest) (*access.BlockResponse, error) { - return h.AccessAPIServer.GetBlockByID(context, req) -} - -func (h *FlowAccessAPIRouter) GetBlockByHeight(context context.Context, req *access.GetBlockByHeightRequest) (*access.BlockResponse, error) { - return h.AccessAPIServer.GetBlockByHeight(context, req) -} - -func (h *FlowAccessAPIRouter) GetCollectionByID(context context.Context, req *access.GetCollectionByIDRequest) (*access.CollectionResponse, error) { - return h.AccessAPIServer.GetCollectionByID(context, req) -} - -func (h *FlowAccessAPIRouter) SendTransaction(context context.Context, req *access.SendTransactionRequest) (*access.SendTransactionResponse, error) { - return h.upstream.SendTransaction(context, req) -} - -func (h *FlowAccessAPIRouter) GetTransaction(context context.Context, req *access.GetTransactionRequest) (*access.TransactionResponse, error) { - return h.upstream.GetTransaction(context, req) -} - -func (h *FlowAccessAPIRouter) GetTransactionResult(context context.Context, req *access.GetTransactionRequest) (*access.TransactionResultResponse, error) { - return h.upstream.GetTransactionResult(context, req) -} - -func (h *FlowAccessAPIRouter) GetTransactionResultByIndex(context context.Context, req *access.GetTransactionByIndexRequest) (*access.TransactionResultResponse, error) { - return h.upstream.GetTransactionResultByIndex(context, req) -} - -func (h *FlowAccessAPIRouter) GetAccount(context context.Context, req *access.GetAccountRequest) (*access.GetAccountResponse, error) { - return h.upstream.GetAccount(context, req) -} - -func (h *FlowAccessAPIRouter) GetAccountAtLatestBlock(context context.Context, req *access.GetAccountAtLatestBlockRequest) (*access.AccountResponse, error) { - return h.upstream.GetAccountAtLatestBlock(context, req) -} - -func (h *FlowAccessAPIRouter) GetAccountAtBlockHeight(context context.Context, req *access.GetAccountAtBlockHeightRequest) (*access.AccountResponse, error) { - return h.upstream.GetAccountAtBlockHeight(context, req) -} - -func (h *FlowAccessAPIRouter) ExecuteScriptAtLatestBlock(context context.Context, req *access.ExecuteScriptAtLatestBlockRequest) (*access.ExecuteScriptResponse, error) { - return h.upstream.ExecuteScriptAtLatestBlock(context, req) -} - -func (h *FlowAccessAPIRouter) ExecuteScriptAtBlockID(context context.Context, req *access.ExecuteScriptAtBlockIDRequest) (*access.ExecuteScriptResponse, error) { - return h.upstream.ExecuteScriptAtBlockID(context, req) -} - -func (h *FlowAccessAPIRouter) ExecuteScriptAtBlockHeight(context context.Context, req *access.ExecuteScriptAtBlockHeightRequest) (*access.ExecuteScriptResponse, error) { - return h.upstream.ExecuteScriptAtBlockHeight(context, req) -} - -func (h *FlowAccessAPIRouter) GetEventsForHeightRange(context context.Context, req *access.GetEventsForHeightRangeRequest) (*access.EventsResponse, error) { - return h.upstream.GetEventsForHeightRange(context, req) -} - -func (h *FlowAccessAPIRouter) GetEventsForBlockIDs(context context.Context, req *access.GetEventsForBlockIDsRequest) (*access.EventsResponse, error) { - return h.upstream.GetEventsForBlockIDs(context, req) -} - -func (h *FlowAccessAPIRouter) GetNetworkParameters(context context.Context, req *access.GetNetworkParametersRequest) (*access.GetNetworkParametersResponse, error) { - return h.AccessAPIServer.GetNetworkParameters(context, req) -} - -func (h *FlowAccessAPIRouter) GetLatestProtocolStateSnapshot(context context.Context, req *access.GetLatestProtocolStateSnapshotRequest) (*access.ProtocolStateSnapshotResponse, error) { - return h.AccessAPIServer.GetLatestProtocolStateSnapshot(context, req) -} - -func (h *FlowAccessAPIRouter) GetExecutionResultForBlockID(context context.Context, req *access.GetExecutionResultForBlockIDRequest) (*access.ExecutionResultForBlockIDResponse, error) { - return h.upstream.GetExecutionResultForBlockID(context, req) -} - -// FlowAccessAPIForwarder forwards all requests to a set of upstream access nodes or observers -type FlowAccessAPIForwarder struct { - lock sync.Mutex - roundRobin int - ids flow.IdentityList - upstream []access.AccessAPIClient - connections []*grpc.ClientConn - timeout time.Duration -} - -// Ping pings the service. It is special in the sense that it responds successful, -// only if all underlying services are ready. -func (h *FlowAccessAPIForwarder) Ping(context context.Context, req *access.PingRequest) (*access.PingResponse, error) { - // This is a passthrough request - upstream, err := h.faultTolerantClient() - if err != nil { - return nil, err - } - return upstream.Ping(context, req) -} - -func (h *FlowAccessAPIForwarder) GetLatestBlockHeader(context context.Context, req *access.GetLatestBlockHeaderRequest) (*access.BlockHeaderResponse, error) { - // This is a passthrough request - upstream, err := h.faultTolerantClient() - if err != nil { - return nil, err - } - return upstream.GetLatestBlockHeader(context, req) -} - -func (h *FlowAccessAPIForwarder) GetBlockHeaderByID(context context.Context, req *access.GetBlockHeaderByIDRequest) (*access.BlockHeaderResponse, error) { - // This is a passthrough request - upstream, err := h.faultTolerantClient() - if err != nil { - return nil, err - } - return upstream.GetBlockHeaderByID(context, req) -} - -func (h *FlowAccessAPIForwarder) GetBlockHeaderByHeight(context context.Context, req *access.GetBlockHeaderByHeightRequest) (*access.BlockHeaderResponse, error) { - // This is a passthrough request - upstream, err := h.faultTolerantClient() - if err != nil { - return nil, err - } - return upstream.GetBlockHeaderByHeight(context, req) -} - -func (h *FlowAccessAPIForwarder) GetLatestBlock(context context.Context, req *access.GetLatestBlockRequest) (*access.BlockResponse, error) { - // This is a passthrough request - upstream, err := h.faultTolerantClient() - if err != nil { - return nil, err - } - return upstream.GetLatestBlock(context, req) -} - -func (h *FlowAccessAPIForwarder) GetBlockByID(context context.Context, req *access.GetBlockByIDRequest) (*access.BlockResponse, error) { - // This is a passthrough request - upstream, err := h.faultTolerantClient() - if err != nil { - return nil, err - } - return upstream.GetBlockByID(context, req) -} - -func (h *FlowAccessAPIForwarder) GetBlockByHeight(context context.Context, req *access.GetBlockByHeightRequest) (*access.BlockResponse, error) { - // This is a passthrough request - upstream, err := h.faultTolerantClient() - if err != nil { - return nil, err - } - return upstream.GetBlockByHeight(context, req) -} - -func (h *FlowAccessAPIForwarder) GetCollectionByID(context context.Context, req *access.GetCollectionByIDRequest) (*access.CollectionResponse, error) { - // This is a passthrough request - upstream, err := h.faultTolerantClient() - if err != nil { - return nil, err - } - return upstream.GetCollectionByID(context, req) -} - -func (h *FlowAccessAPIForwarder) SendTransaction(context context.Context, req *access.SendTransactionRequest) (*access.SendTransactionResponse, error) { - // This is a passthrough request - upstream, err := h.faultTolerantClient() - if err != nil { - return nil, err - } - return upstream.SendTransaction(context, req) -} - -func (h *FlowAccessAPIForwarder) GetTransaction(context context.Context, req *access.GetTransactionRequest) (*access.TransactionResponse, error) { - // This is a passthrough request - upstream, err := h.faultTolerantClient() - if err != nil { - return nil, err - } - return upstream.GetTransaction(context, req) -} - -func (h *FlowAccessAPIForwarder) GetTransactionResult(context context.Context, req *access.GetTransactionRequest) (*access.TransactionResultResponse, error) { - // This is a passthrough request - upstream, err := h.faultTolerantClient() - if err != nil { - return nil, err - } - return upstream.GetTransactionResult(context, req) -} - -func (h *FlowAccessAPIForwarder) GetTransactionResultByIndex(context context.Context, req *access.GetTransactionByIndexRequest) (*access.TransactionResultResponse, error) { - // This is a passthrough request - upstream, err := h.faultTolerantClient() - if err != nil { - return nil, err - } - return upstream.GetTransactionResultByIndex(context, req) -} - -func (h *FlowAccessAPIForwarder) GetAccount(context context.Context, req *access.GetAccountRequest) (*access.GetAccountResponse, error) { - // This is a passthrough request - upstream, err := h.faultTolerantClient() - if err != nil { - return nil, err - } - return upstream.GetAccount(context, req) -} - -func (h *FlowAccessAPIForwarder) GetAccountAtLatestBlock(context context.Context, req *access.GetAccountAtLatestBlockRequest) (*access.AccountResponse, error) { - // This is a passthrough request - upstream, err := h.faultTolerantClient() - if err != nil { - return nil, err - } - return upstream.GetAccountAtLatestBlock(context, req) -} - -func (h *FlowAccessAPIForwarder) GetAccountAtBlockHeight(context context.Context, req *access.GetAccountAtBlockHeightRequest) (*access.AccountResponse, error) { - // This is a passthrough request - upstream, err := h.faultTolerantClient() - if err != nil { - return nil, err - } - return upstream.GetAccountAtBlockHeight(context, req) -} - -func (h *FlowAccessAPIForwarder) ExecuteScriptAtLatestBlock(context context.Context, req *access.ExecuteScriptAtLatestBlockRequest) (*access.ExecuteScriptResponse, error) { - // This is a passthrough request - upstream, err := h.faultTolerantClient() - if err != nil { - return nil, err - } - return upstream.ExecuteScriptAtLatestBlock(context, req) -} - -func (h *FlowAccessAPIForwarder) ExecuteScriptAtBlockID(context context.Context, req *access.ExecuteScriptAtBlockIDRequest) (*access.ExecuteScriptResponse, error) { - // This is a passthrough request - upstream, err := h.faultTolerantClient() - if err != nil { - return nil, err - } - return upstream.ExecuteScriptAtBlockID(context, req) -} - -func (h *FlowAccessAPIForwarder) ExecuteScriptAtBlockHeight(context context.Context, req *access.ExecuteScriptAtBlockHeightRequest) (*access.ExecuteScriptResponse, error) { - // This is a passthrough request - upstream, err := h.faultTolerantClient() - if err != nil { - return nil, err - } - return upstream.ExecuteScriptAtBlockHeight(context, req) -} - -func (h *FlowAccessAPIForwarder) GetEventsForHeightRange(context context.Context, req *access.GetEventsForHeightRangeRequest) (*access.EventsResponse, error) { - // This is a passthrough request - upstream, err := h.faultTolerantClient() - if err != nil { - return nil, err - } - return upstream.GetEventsForHeightRange(context, req) -} - -func (h *FlowAccessAPIForwarder) GetEventsForBlockIDs(context context.Context, req *access.GetEventsForBlockIDsRequest) (*access.EventsResponse, error) { - // This is a passthrough request - upstream, err := h.faultTolerantClient() - if err != nil { - return nil, err - } - return upstream.GetEventsForBlockIDs(context, req) -} - -func (h *FlowAccessAPIForwarder) GetNetworkParameters(context context.Context, req *access.GetNetworkParametersRequest) (*access.GetNetworkParametersResponse, error) { - // This is a passthrough request - upstream, err := h.faultTolerantClient() - if err != nil { - return nil, err - } - return upstream.GetNetworkParameters(context, req) -} - -func (h *FlowAccessAPIForwarder) GetLatestProtocolStateSnapshot(context context.Context, req *access.GetLatestProtocolStateSnapshotRequest) (*access.ProtocolStateSnapshotResponse, error) { - // This is a passthrough request - upstream, err := h.faultTolerantClient() - if err != nil { - return nil, err - } - return upstream.GetLatestProtocolStateSnapshot(context, req) -} - -func (h *FlowAccessAPIForwarder) GetExecutionResultForBlockID(context context.Context, req *access.GetExecutionResultForBlockIDRequest) (*access.ExecutionResultForBlockIDResponse, error) { - // This is a passthrough request - upstream, err := h.faultTolerantClient() - if err != nil { - return nil, err - } - return upstream.GetExecutionResultForBlockID(context, req) -} diff --git a/apiproxy/access_api_proxy_test.go b/apiproxy/access_api_proxy_test.go deleted file mode 100644 index 85be5054c09..00000000000 --- a/apiproxy/access_api_proxy_test.go +++ /dev/null @@ -1,272 +0,0 @@ -package apiproxy - -import ( - "context" - "fmt" - "net" - "testing" - "time" - - "github.com/onflow/flow/protobuf/go/flow/access" - "google.golang.org/grpc" - grpcinsecure "google.golang.org/grpc/credentials/insecure" - - "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/utils/grpcutils" -) - -// Methodology -// -// We test the proxy and fall-over logic to reach basic coverage. -// -// * Basic coverage means that all conditional checks happen once but only once. -// * We embrace the simplest adequate solution to reduce engineering cost. -// * Any use cases requiring multiple conditionals exercised in a row are considered ignorable due to cost constraints. - -// TestNetE2E tests the basic unix network first -func TestNetE2E(t *testing.T) { - done := make(chan int) - // Bring up 1st upstream server - charlie1, err := makeFlowLite("/tmp/TestProxyE2E1", done) - if err != nil { - t.Fatal(err) - } - // Wait until proxy call passes - err = callFlowLite("/tmp/TestProxyE2E1") - if err != nil { - t.Fatal(err) - } - // Bring up 2nd upstream server - charlie2, err := makeFlowLite("/tmp/TestProxyE2E2", done) - if err != nil { - t.Fatal(err) - } - // Both proxy calls should pass - err = callFlowLite("/tmp/TestProxyE2E1") - if err != nil { - t.Fatal(err) - } - err = callFlowLite("/tmp/TestProxyE2E2") - if err != nil { - t.Fatal(err) - } - // Stop 1st upstream server - _ = charlie1.Close() - // Proxy call falls through - err = callFlowLite("/tmp/TestProxyE2E1") - if err == nil { - t.Fatal(fmt.Errorf("backend still active after close")) - } - // Stop 2nd upstream server - _ = charlie2.Close() - // System errors out on shut down servers - err = callFlowLite("/tmp/TestProxyE2E1") - if err == nil { - t.Fatal(fmt.Errorf("backend still active after close")) - } - err = callFlowLite("/tmp/TestProxyE2E2") - if err == nil { - t.Fatal(fmt.Errorf("backend still active after close")) - } - // wait for all - <-done - <-done -} - -// TestgRPCE2E tests whether gRPC works -func TestGRPCE2E(t *testing.T) { - done := make(chan int) - // Bring up 1st upstream server - charlie1, _, err := newFlowLite("unix", "/tmp/TestProxyE2E1", done) - if err != nil { - t.Fatal(err) - } - // Wait until proxy call passes - err = openFlowLite("/tmp/TestProxyE2E1") - if err != nil { - t.Fatal(err) - } - // Bring up 2nd upstream server - charlie2, _, err := newFlowLite("unix", "/tmp/TestProxyE2E2", done) - if err != nil { - t.Fatal(err) - } - // Both proxy calls should pass - err = openFlowLite("/tmp/TestProxyE2E1") - if err != nil { - t.Fatal(err) - } - err = openFlowLite("/tmp/TestProxyE2E2") - if err != nil { - t.Fatal(err) - } - // Stop 1st upstream server - charlie1.Stop() - // Proxy call falls through - err = openFlowLite("/tmp/TestProxyE2E1") - if err == nil { - t.Fatal(fmt.Errorf("backend still active after close")) - } - // Stop 2nd upstream server - charlie2.Stop() - // System errors out on shut down servers - err = openFlowLite("/tmp/TestProxyE2E1") - if err == nil { - t.Fatal(fmt.Errorf("backend still active after close")) - } - err = openFlowLite("/tmp/TestProxyE2E2") - if err == nil { - t.Fatal(fmt.Errorf("backend still active after close")) - } - // wait for all - <-done - <-done -} - -// TestNewFlowCachedAccessAPIProxy tests the round robin end to end -func TestNewFlowCachedAccessAPIProxy(t *testing.T) { - done := make(chan int) - - // Bring up 1st upstream server - charlie1, _, err := newFlowLite("tcp", "127.0.0.1:11634", done) - if err != nil { - t.Fatal(err) - } - - // Prepare a proxy that fails due to the second connection being idle - l := flow.IdentityList{{Address: "127.0.0.1:11634"}, {Address: "127.0.0.1:11635"}} - c := FlowAccessAPIForwarder{} - err = c.setFlowAccessAPI(l, time.Second) - if err == nil { - t.Fatal(fmt.Errorf("should not start with one connection ready")) - } - - // Bring up 2nd upstream server - charlie2, _, err := newFlowLite("tcp", "127.0.0.1:11635", done) - if err != nil { - t.Fatal(err) - } - - background := context.Background() - - // Prepare a proxy - l = flow.IdentityList{{Address: "127.0.0.1:11634"}, {Address: "127.0.0.1:11635"}} - c = FlowAccessAPIForwarder{} - err = c.setFlowAccessAPI(l, time.Second) - if err != nil { - t.Fatal(err) - } - - // Wait until proxy call passes - _, err = c.Ping(background, &access.PingRequest{}) - if err != nil { - t.Fatal(err) - } - - // Wait until proxy call passes - _, err = c.Ping(background, &access.PingRequest{}) - if err != nil { - t.Fatal(err) - } - - // Wait until proxy call passes - _, err = c.Ping(background, &access.PingRequest{}) - if err != nil { - t.Fatal(err) - } - - charlie1.Stop() - charlie2.Stop() - - // Wait until proxy call fails - _, err = c.Ping(background, &access.PingRequest{}) - if err == nil { - t.Fatal(fmt.Errorf("should fail on no connections")) - } - - <-done - <-done -} - -func makeFlowLite(address string, done chan int) (net.Listener, error) { - l, err := net.Listen("unix", address) - if err != nil { - return nil, err - } - - go func(done chan int) { - for { - c, err := l.Accept() - if err != nil { - break - } - - b := make([]byte, 3) - _, _ = c.Read(b) - _, _ = c.Write(b) - _ = c.Close() - } - done <- 1 - }(done) - return l, err -} - -func callFlowLite(address string) error { - c, err := net.Dial("unix", address) - if err != nil { - return err - } - o := []byte("abc") - _, _ = c.Write(o) - i := make([]byte, 3) - _, _ = c.Read(i) - if string(o) != string(i) { - return fmt.Errorf("no match") - } - _ = c.Close() - _ = MockFlowAccessAPI{} - return err -} - -func newFlowLite(network string, address string, done chan int) (*grpc.Server, *net.Listener, error) { - l, err := net.Listen(network, address) - if err != nil { - return nil, nil, err - } - s := grpc.NewServer() - go func(done chan int) { - access.RegisterAccessAPIServer(s, MockFlowAccessAPI{}) - _ = s.Serve(l) - done <- 1 - }(done) - return s, &l, nil -} - -func openFlowLite(address string) error { - c, err := grpc.Dial( - "unix://"+address, - grpc.WithDefaultCallOptions(grpc.MaxCallRecvMsgSize(grpcutils.DefaultMaxMsgSize)), - grpc.WithTransportCredentials(grpcinsecure.NewCredentials())) - if err != nil { - return err - } - a := access.NewAccessAPIClient(c) - - background := context.Background() - - _, err = a.Ping(background, &access.PingRequest{}) - if err != nil { - return err - } - - return nil -} - -type MockFlowAccessAPI struct { - access.AccessAPIServer -} - -// Ping is used to check if the access node is alive and healthy. -func (p MockFlowAccessAPI) Ping(context.Context, *access.PingRequest) (*access.PingResponse, error) { - return &access.PingResponse{}, nil -} diff --git a/bors.toml b/bors.toml index acb31db6ed9..4366b20e275 100644 --- a/bors.toml +++ b/bors.toml @@ -17,10 +17,13 @@ status = [ "Unit Tests (utils)", "Unit Tests (others)", "Integration Tests (make -C integration access-tests)", - "Integration Tests (make -C integration bft-tests)", + "Integration Tests (make -C integration bft-framework-tests)", + "Integration Tests (make -C integration bft-gossipsub-tests)", + "Integration Tests (make -C integration bft-protocol-tests)", "Integration Tests (make -C integration collection-tests)", "Integration Tests (make -C integration consensus-tests)", - "Integration Tests (make -C integration epochs-tests)", + "Integration Tests (make -C integration epochs-cohort1-tests)", + "Integration Tests (make -C integration epochs-cohort2-tests)", "Integration Tests (make -C integration execution-tests)", "Integration Tests (make -C integration ghost-tests)", "Integration Tests (make -C integration mvp-tests)", diff --git a/cmd/Dockerfile b/cmd/Dockerfile index fc4bcf7badb..b0e5398fb36 100644 --- a/cmd/Dockerfile +++ b/cmd/Dockerfile @@ -3,10 +3,10 @@ #################################### ## (1) Setup the build environment -FROM golang:1.19-bullseye AS build-setup +FROM golang:1.25-bookworm AS build-setup RUN apt-get update -RUN apt-get -y install cmake zip +RUN apt-get -y install zip apt-utils gcc-aarch64-linux-gnu ## (2) Setup crypto dependencies FROM build-setup AS build-env @@ -23,10 +23,12 @@ ENV GOPRIVATE= COPY . . -RUN --mount=type=cache,sharing=locked,target=/go/pkg/mod \ - --mount=type=cache,target=/root/.cache/go-build \ - --mount=type=secret,id=git_creds,dst=/root/.netrc \ - make crypto_setup_gopath +# Update the git config to use SSH rather than HTTPS for clones +RUN git config --global url.git@github.com:.insteadOf https://github.com/ +RUN mkdir ~/.ssh + +# Add GitHub known host to avoid prompts or failures on key check +RUN ssh-keyscan -t rsa github.com >> ~/.ssh/known_hosts #################################### ## (3) Build the production app binary @@ -34,16 +36,22 @@ FROM build-env as build-production WORKDIR /app ARG GOARCH=amd64 - # TAGS can be overriden to modify the go build tags (e.g. build without netgo) -ARG TAGS="relic,netgo" +ARG TAGS="netgo,osusergo" +# CC flag can be overwritten to specify a C compiler +ARG CC="" +# CGO_FLAG uses ADX instructions by default, flag can be overwritten to build without ADX +ARG CGO_FLAG="" # Keep Go's build cache between builds. # https://github.com/golang/go/issues/27719#issuecomment-514747274 RUN --mount=type=cache,sharing=locked,target=/go/pkg/mod \ --mount=type=cache,target=/root/.cache/go-build \ - --mount=type=secret,id=git_creds,dst=/root/.netrc \ - CGO_ENABLED=1 GOOS=linux go build --tags "${TAGS}" -ldflags "-extldflags -static \ + --mount=type=secret,id=cadence_deploy_key \ + # We evaluate the SSH agent to safely pass in a key for cloning dependencies + # We explicitly use ";" rather than && as we want to safely pass if it is unavailable + eval `ssh-agent -s` && printf "%s\n" "$(cat /run/secrets/cadence_deploy_key)" | ssh-add - ; \ + CGO_ENABLED=1 GOOS=linux GOARCH=${GOARCH} CC="${CC}" CGO_CFLAGS="${CGO_FLAG}" go build --tags "${TAGS}" -ldflags "-extldflags -static \ -X 'github.com/onflow/flow-go/cmd/build.commit=${COMMIT}' -X 'github.com/onflow/flow-go/cmd/build.semver=${VERSION}'" \ -o ./app ${TARGET} @@ -61,17 +69,23 @@ ENTRYPOINT ["/bin/app"] FROM build-env as build-debug WORKDIR /app ARG GOARCH=amd64 +ARG CC="" +ARG CGO_FLAG="" RUN --mount=type=ssh \ --mount=type=cache,sharing=locked,target=/go/pkg/mod \ --mount=type=cache,target=/root/.cache/go-build \ - CGO_ENABLED=1 GOOS=linux go build --tags "relic,netgo" -ldflags "-extldflags -static \ + --mount=type=secret,id=cadence_deploy_key \ + # We evaluate the SSH agent to safely pass in a key for cloning dependencies + # We explicitly use ";" rather than && as we want to safely pass if it is unavailable + eval `ssh-agent -s` && printf "%s\n" "$(cat /run/secrets/cadence_deploy_key)" | ssh-add - ; \ + CGO_ENABLED=1 GOOS=linux GOARCH=${GOARCH} CC="${CC}" CGO_CFLAGS="${CGO_FLAG}" go build --tags "netgo" -ldflags "-extldflags -static \ -X 'github.com/onflow/flow-go/cmd/build.commit=${COMMIT}' -X 'github.com/onflow/flow-go/cmd/build.semver=${VERSION}'" \ -gcflags="all=-N -l" -o ./app ${TARGET} RUN chmod a+x /app/app ## (4) Add the statically linked debug binary to a distroless image configured for debugging -FROM golang:1.19-bullseye as debug +FROM golang:1.25-bookworm as debug RUN go install github.com/go-delve/delve/cmd/dlv@latest diff --git a/cmd/access/README.md b/cmd/access/README.md index 5a6681902f3..2da7fb4a050 100644 --- a/cmd/access/README.md +++ b/cmd/access/README.md @@ -88,4 +88,4 @@ This helps identify nodes in the system which are unreachable. ### Access node sequence diagram -![Access node sequence diagram](/docs/AccessNodeSequenceDiagram.png) +![Access node sequence diagram](/docs/images/AccessNodeSequenceDiagram.png) diff --git a/cmd/access/main.go b/cmd/access/main.go index 1deac0311ce..6b0dfa9e02c 100644 --- a/cmd/access/main.go +++ b/cmd/access/main.go @@ -1,6 +1,8 @@ package main import ( + "context" + "github.com/onflow/flow-go/cmd" nodebuilder "github.com/onflow/flow-go/cmd/access/node_builder" "github.com/onflow/flow-go/model/flow" @@ -24,5 +26,5 @@ func main() { if err != nil { builder.Logger.Fatal().Err(err).Send() } - node.Run() + node.Run(context.Background()) } diff --git a/cmd/access/node_builder/access_node_builder.go b/cmd/access/node_builder/access_node_builder.go index 66355eaed39..81d618a6c33 100644 --- a/cmd/access/node_builder/access_node_builder.go +++ b/cmd/access/node_builder/access_node_builder.go @@ -4,27 +4,31 @@ import ( "context" "errors" "fmt" + "math" "os" + "path" "path/filepath" "strings" "time" - badger "github.com/ipfs/go-ds-badger2" + "github.com/ipfs/boxo/bitswap" + "github.com/ipfs/go-cid" "github.com/libp2p/go-libp2p/core/host" "github.com/libp2p/go-libp2p/core/routing" + "github.com/onflow/crypto" + "github.com/onflow/flow/protobuf/go/flow/access" "github.com/rs/zerolog" "github.com/spf13/pflag" "google.golang.org/grpc" "google.golang.org/grpc/credentials" "google.golang.org/grpc/credentials/insecure" - "github.com/onflow/flow/protobuf/go/flow/access" - "github.com/onflow/go-bitswap" - + txvalidator "github.com/onflow/flow-go/access/validator" "github.com/onflow/flow-go/admin/commands" stateSyncCommands "github.com/onflow/flow-go/admin/commands/state_synchronization" storageCommands "github.com/onflow/flow-go/admin/commands/storage" "github.com/onflow/flow-go/cmd" + "github.com/onflow/flow-go/cmd/build" "github.com/onflow/flow-go/consensus" "github.com/onflow/flow-go/consensus/hotstuff" "github.com/onflow/flow-go/consensus/hotstuff/committees" @@ -34,55 +38,89 @@ import ( hotstuffvalidator "github.com/onflow/flow-go/consensus/hotstuff/validator" "github.com/onflow/flow-go/consensus/hotstuff/verification" recovery "github.com/onflow/flow-go/consensus/recovery/protocol" - "github.com/onflow/flow-go/crypto" + "github.com/onflow/flow-go/engine" + "github.com/onflow/flow-go/engine/access/index" "github.com/onflow/flow-go/engine/access/ingestion" + "github.com/onflow/flow-go/engine/access/ingestion/tx_error_messages" pingeng "github.com/onflow/flow-go/engine/access/ping" + "github.com/onflow/flow-go/engine/access/rest" + "github.com/onflow/flow-go/engine/access/rest/router" + "github.com/onflow/flow-go/engine/access/rest/websockets" "github.com/onflow/flow-go/engine/access/rpc" "github.com/onflow/flow-go/engine/access/rpc/backend" + "github.com/onflow/flow-go/engine/access/rpc/backend/events" + "github.com/onflow/flow-go/engine/access/rpc/backend/node_communicator" + "github.com/onflow/flow-go/engine/access/rpc/backend/query_mode" + "github.com/onflow/flow-go/engine/access/rpc/backend/transactions/error_messages" + rpcConnection "github.com/onflow/flow-go/engine/access/rpc/connection" "github.com/onflow/flow-go/engine/access/state_stream" + statestreambackend "github.com/onflow/flow-go/engine/access/state_stream/backend" + "github.com/onflow/flow-go/engine/access/subscription" + subscriptiontracker "github.com/onflow/flow-go/engine/access/subscription/tracker" followereng "github.com/onflow/flow-go/engine/common/follower" "github.com/onflow/flow-go/engine/common/requester" + commonrpc "github.com/onflow/flow-go/engine/common/rpc" + "github.com/onflow/flow-go/engine/common/stop" synceng "github.com/onflow/flow-go/engine/common/synchronization" + "github.com/onflow/flow-go/engine/common/version" + "github.com/onflow/flow-go/engine/execution/computation" + "github.com/onflow/flow-go/engine/execution/computation/query" + "github.com/onflow/flow-go/fvm" + "github.com/onflow/flow-go/fvm/storage/derived" + "github.com/onflow/flow-go/ledger" + "github.com/onflow/flow-go/ledger/complete/wal" + "github.com/onflow/flow-go/model/bootstrap" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/model/flow/filter" "github.com/onflow/flow-go/module" "github.com/onflow/flow-go/module/blobs" "github.com/onflow/flow-go/module/chainsync" - modulecompliance "github.com/onflow/flow-go/module/compliance" + "github.com/onflow/flow-go/module/counters" + "github.com/onflow/flow-go/module/execution" "github.com/onflow/flow-go/module/executiondatasync/execution_data" + execdatacache "github.com/onflow/flow-go/module/executiondatasync/execution_data/cache" + "github.com/onflow/flow-go/module/executiondatasync/pruner" + edstorage "github.com/onflow/flow-go/module/executiondatasync/storage" + "github.com/onflow/flow-go/module/executiondatasync/tracker" finalizer "github.com/onflow/flow-go/module/finalizer/consensus" + "github.com/onflow/flow-go/module/grpcserver" "github.com/onflow/flow-go/module/id" + "github.com/onflow/flow-go/module/mempool/herocache" "github.com/onflow/flow-go/module/mempool/stdmap" "github.com/onflow/flow-go/module/metrics" "github.com/onflow/flow-go/module/metrics/unstaked" "github.com/onflow/flow-go/module/state_synchronization" + "github.com/onflow/flow-go/module/state_synchronization/indexer" edrequester "github.com/onflow/flow-go/module/state_synchronization/requester" "github.com/onflow/flow-go/network" + alspmgr "github.com/onflow/flow-go/network/alsp/manager" netcache "github.com/onflow/flow-go/network/cache" "github.com/onflow/flow-go/network/channels" cborcodec "github.com/onflow/flow-go/network/codec/cbor" "github.com/onflow/flow-go/network/p2p" "github.com/onflow/flow-go/network/p2p/blob" + p2pbuilder "github.com/onflow/flow-go/network/p2p/builder" + p2pbuilderconfig "github.com/onflow/flow-go/network/p2p/builder/config" "github.com/onflow/flow-go/network/p2p/cache" + "github.com/onflow/flow-go/network/p2p/conduit" "github.com/onflow/flow-go/network/p2p/connection" "github.com/onflow/flow-go/network/p2p/dht" - "github.com/onflow/flow-go/network/p2p/middleware" - "github.com/onflow/flow-go/network/p2p/p2pbuilder" - p2pconfig "github.com/onflow/flow-go/network/p2p/p2pbuilder/config" - "github.com/onflow/flow-go/network/p2p/p2pbuilder/inspector" - "github.com/onflow/flow-go/network/p2p/subscription" - "github.com/onflow/flow-go/network/p2p/tracer" + networkingsubscription "github.com/onflow/flow-go/network/p2p/subscription" "github.com/onflow/flow-go/network/p2p/translator" "github.com/onflow/flow-go/network/p2p/unicast/protocols" relaynet "github.com/onflow/flow-go/network/relay" "github.com/onflow/flow-go/network/slashing" "github.com/onflow/flow-go/network/topology" + "github.com/onflow/flow-go/network/underlay" "github.com/onflow/flow-go/network/validator" "github.com/onflow/flow-go/state/protocol" badgerState "github.com/onflow/flow-go/state/protocol/badger" "github.com/onflow/flow-go/state/protocol/blocktimer" + statedatastore "github.com/onflow/flow-go/state/protocol/datastore" "github.com/onflow/flow-go/storage" bstorage "github.com/onflow/flow-go/storage/badger" + pstorage "github.com/onflow/flow-go/storage/pebble" + "github.com/onflow/flow-go/storage/store" "github.com/onflow/flow-go/utils/grpcutils" ) @@ -105,34 +143,53 @@ import ( // For a node running as a standalone process, the config fields will be populated from the command line params, // while for a node running as a library, the config fields are expected to be initialized by the caller. type AccessNodeConfig struct { - supportsObserver bool // True if this is an Access node that supports observers and consensus follower engines - collectionGRPCPort uint - executionGRPCPort uint - pingEnabled bool - nodeInfoFile string - apiRatelimits map[string]int - apiBurstlimits map[string]int - rpcConf rpc.Config - stateStreamConf state_stream.Config - stateStreamFilterConf map[string]int - ExecutionNodeAddress string // deprecated - HistoricalAccessRPCs []access.AccessAPIClient - logTxTimeToFinalized bool - logTxTimeToExecuted bool - logTxTimeToFinalizedExecuted bool - retryEnabled bool - rpcMetricsEnabled bool - executionDataSyncEnabled bool - executionDataDir string - executionDataStartHeight uint64 - executionDataConfig edrequester.ExecutionDataConfig - PublicNetworkConfig PublicNetworkConfig + supportsObserver bool // True if this is an Access node that supports observers and consensus follower engines + pingEnabled bool + nodeInfoFile string + apiRatelimits map[string]int + apiBurstlimits map[string]int + rpcConf rpc.Config + stateStreamConf statestreambackend.Config + stateStreamFilterConf map[string]int + ExecutionNodeAddress string // deprecated + HistoricalAccessRPCs []access.AccessAPIClient + logTxTimeToFinalized bool + logTxTimeToExecuted bool + logTxTimeToFinalizedExecuted bool + logTxTimeToSealed bool + retryEnabled bool + rpcMetricsEnabled bool + executionDataSyncEnabled bool + publicNetworkExecutionDataEnabled bool + executionDataPrunerHeightRangeTarget uint64 + executionDataPrunerThreshold uint64 + executionDataPruningInterval time.Duration + executionDataDir string + executionDataStartHeight uint64 + executionDataConfig edrequester.ExecutionDataConfig + PublicNetworkConfig PublicNetworkConfig + TxResultCacheSize uint + executionDataIndexingEnabled bool + registersDBPath string + checkpointFile string + scriptExecutorConfig query.QueryConfig + scriptExecMinBlock uint64 + scriptExecMaxBlock uint64 + registerCacheType string + registerCacheSize uint + programCacheSize uint + checkPayerBalanceMode string + versionControlEnabled bool + storeTxResultErrorMessages bool + stopControlEnabled bool + registerDBPruneThreshold uint64 + scheduledCallbacksEnabled bool } type PublicNetworkConfig struct { // NetworkKey crypto.PublicKey // TODO: do we need a different key for the public network? BindAddress string - Network network.Network + Network network.EngineRegistry Metrics module.NetworkMetrics } @@ -140,51 +197,76 @@ type PublicNetworkConfig struct { func DefaultAccessNodeConfig() *AccessNodeConfig { homedir, _ := os.UserHomeDir() return &AccessNodeConfig{ - supportsObserver: false, - collectionGRPCPort: 9000, - executionGRPCPort: 9000, + supportsObserver: false, rpcConf: rpc.Config{ - UnsecureGRPCListenAddr: "0.0.0.0:9000", - SecureGRPCListenAddr: "0.0.0.0:9001", - HTTPListenAddr: "0.0.0.0:8000", - RESTListenAddr: "", - CollectionAddr: "", - HistoricalAccessAddrs: "", - CollectionClientTimeout: 3 * time.Second, - ExecutionClientTimeout: 3 * time.Second, - ConnectionPoolSize: backend.DefaultConnectionPoolSize, - MaxHeightRange: backend.DefaultMaxHeightRange, - PreferredExecutionNodeIDs: nil, - FixedExecutionNodeIDs: nil, - ArchiveAddressList: nil, - MaxMsgSize: grpcutils.DefaultMaxMsgSize, + UnsecureGRPCListenAddr: "0.0.0.0:9000", + SecureGRPCListenAddr: "0.0.0.0:9001", + HTTPListenAddr: "0.0.0.0:8000", + CollectionAddr: "", + HistoricalAccessAddrs: "", + BackendConfig: backend.Config{ + AccessConfig: rpcConnection.DefaultAccessConfig(), + CollectionConfig: rpcConnection.DefaultCollectionConfig(), + ExecutionConfig: rpcConnection.DefaultExecutionConfig(), + ConnectionPoolSize: backend.DefaultConnectionPoolSize, + MaxHeightRange: events.DefaultMaxHeightRange, + PreferredExecutionNodeIDs: nil, + FixedExecutionNodeIDs: nil, + CircuitBreakerConfig: rpcConnection.CircuitBreakerConfig{ + Enabled: false, + RestoreTimeout: 60 * time.Second, + MaxFailures: 5, + MaxRequests: 1, + }, + ScriptExecutionMode: query_mode.IndexQueryModeExecutionNodesOnly.String(), // default to ENs only for now + EventQueryMode: query_mode.IndexQueryModeExecutionNodesOnly.String(), // default to ENs only for now + TxResultQueryMode: query_mode.IndexQueryModeExecutionNodesOnly.String(), // default to ENs only for now + }, + RestConfig: rest.Config{ + ListenAddress: "", + WriteTimeout: rest.DefaultWriteTimeout, + ReadTimeout: rest.DefaultReadTimeout, + IdleTimeout: rest.DefaultIdleTimeout, + MaxRequestSize: commonrpc.DefaultAccessMaxRequestSize, + MaxResponseSize: commonrpc.DefaultAccessMaxResponseSize, + }, + DeprecatedMaxMsgSize: 0, + CompressorName: grpcutils.NoCompressor, + WebSocketConfig: websockets.NewDefaultWebsocketConfig(), + EnableWebSocketsStreamAPI: true, }, - stateStreamConf: state_stream.Config{ - MaxExecutionDataMsgSize: grpcutils.DefaultMaxMsgSize, - ExecutionDataCacheSize: state_stream.DefaultCacheSize, - ClientSendTimeout: state_stream.DefaultSendTimeout, - ClientSendBufferSize: state_stream.DefaultSendBufferSize, - MaxGlobalStreams: state_stream.DefaultMaxGlobalStreams, + stateStreamConf: statestreambackend.Config{ + MaxExecutionDataMsgSize: commonrpc.DefaultAccessMaxResponseSize, + ExecutionDataCacheSize: subscription.DefaultCacheSize, + ClientSendTimeout: subscription.DefaultSendTimeout, + ClientSendBufferSize: subscription.DefaultSendBufferSize, + MaxGlobalStreams: subscription.DefaultMaxGlobalStreams, EventFilterConfig: state_stream.DefaultEventFilterConfig, + RegisterIDsRequestLimit: state_stream.DefaultRegisterIDsRequestLimit, + ResponseLimit: subscription.DefaultResponseLimit, + HeartbeatInterval: subscription.DefaultHeartbeatInterval, }, stateStreamFilterConf: nil, ExecutionNodeAddress: "localhost:9000", logTxTimeToFinalized: false, logTxTimeToExecuted: false, logTxTimeToFinalizedExecuted: false, + logTxTimeToSealed: false, pingEnabled: false, retryEnabled: false, rpcMetricsEnabled: false, nodeInfoFile: "", apiRatelimits: nil, apiBurstlimits: nil, + TxResultCacheSize: 0, PublicNetworkConfig: PublicNetworkConfig{ BindAddress: cmd.NotSet, Metrics: metrics.NewNoopCollector(), }, - executionDataSyncEnabled: true, - executionDataDir: filepath.Join(homedir, ".flow", "execution_data"), - executionDataStartHeight: 0, + executionDataSyncEnabled: true, + publicNetworkExecutionDataEnabled: false, + executionDataDir: filepath.Join(homedir, ".flow", "execution_data"), + executionDataStartHeight: 0, executionDataConfig: edrequester.ExecutionDataConfig{ InitialBlockHeight: 0, MaxSearchAhead: edrequester.DefaultMaxSearchAhead, @@ -193,6 +275,24 @@ func DefaultAccessNodeConfig() *AccessNodeConfig { RetryDelay: edrequester.DefaultRetryDelay, MaxRetryDelay: edrequester.DefaultMaxRetryDelay, }, + executionDataIndexingEnabled: false, + executionDataPrunerHeightRangeTarget: 0, + executionDataPrunerThreshold: pruner.DefaultThreshold, + executionDataPruningInterval: pruner.DefaultPruningInterval, + registersDBPath: filepath.Join(homedir, ".flow", "execution_state"), + checkpointFile: cmd.NotSet, + scriptExecutorConfig: query.NewDefaultConfig(), + scriptExecMinBlock: 0, + scriptExecMaxBlock: math.MaxUint64, + registerCacheType: pstorage.CacheTypeTwoQueue.String(), + registerCacheSize: 0, + programCacheSize: 0, + checkPayerBalanceMode: txvalidator.Disabled.String(), + versionControlEnabled: true, + storeTxResultErrorMessages: false, + stopControlEnabled: false, + registerDBPruneThreshold: 0, + scheduledCallbacksEnabled: fvm.DefaultScheduledCallbacksEnabled, } } @@ -204,26 +304,53 @@ type FlowAccessNodeBuilder struct { *AccessNodeConfig // components - FollowerState protocol.FollowerState - SyncCore *chainsync.Core - RpcEng *rpc.Engine - FollowerDistributor *consensuspubsub.FollowerDistributor - CollectionRPC access.AccessAPIClient - TransactionTimings *stdmap.TransactionTimings - CollectionsToMarkFinalized *stdmap.Times - CollectionsToMarkExecuted *stdmap.Times - BlocksToMarkExecuted *stdmap.Times - TransactionMetrics module.TransactionMetrics - AccessMetrics module.AccessMetrics - PingMetrics module.PingMetrics - Committee hotstuff.DynamicCommittee - Finalized *flow.Header - Pending []*flow.Header - FollowerCore module.HotStuffFollower - Validator hotstuff.Validator - ExecutionDataDownloader execution_data.Downloader - ExecutionDataRequester state_synchronization.ExecutionDataRequester - ExecutionDataStore execution_data.ExecutionDataStore + FollowerState protocol.FollowerState + SyncCore *chainsync.Core + RpcEng *rpc.Engine + FollowerDistributor *consensuspubsub.FollowerDistributor + CollectionRPC access.AccessAPIClient + TransactionTimings *stdmap.TransactionTimings + CollectionsToMarkFinalized *stdmap.Times + CollectionsToMarkExecuted *stdmap.Times + BlocksToMarkExecuted *stdmap.Times + BlockTransactions *stdmap.IdentifierMap + TransactionMetrics *metrics.TransactionCollector + TransactionValidationMetrics *metrics.TransactionValidationCollector + RestMetrics *metrics.RestCollector + AccessMetrics module.AccessMetrics + PingMetrics module.PingMetrics + Committee hotstuff.DynamicCommittee + Finalized *flow.Header // latest finalized block that the node knows of at startup time + Pending []*flow.ProposalHeader + FollowerCore module.HotStuffFollower + Validator hotstuff.Validator + ExecutionDataDownloader execution_data.Downloader + PublicBlobService network.BlobService + ExecutionDataRequester state_synchronization.ExecutionDataRequester + ExecutionDataStore execution_data.ExecutionDataStore + ExecutionDataBlobstore blobs.Blobstore + ExecutionDataCache *execdatacache.ExecutionDataCache + ExecutionIndexer *indexer.Indexer + ExecutionIndexerCore *indexer.IndexerCore + ScriptExecutor *backend.ScriptExecutor + RegistersAsyncStore *execution.RegistersAsyncStore + Reporter *index.Reporter + EventsIndex *index.EventsIndex + TxResultsIndex *index.TransactionResultsIndex + IndexerDependencies *cmd.DependencyList + collectionExecutedMetric module.CollectionExecutedMetric + ExecutionDataPruner *pruner.Pruner + ExecutionDatastoreManager edstorage.DatastoreManager + ExecutionDataTracker tracker.Storage + VersionControl *version.VersionControl + StopControl *stop.StopControl + + // storage + events storage.Events + lightTransactionResults storage.LightTransactionResults + transactionResultErrorMessages storage.TransactionResultErrorMessages + transactions storage.Transactions + collections storage.Collections // The sync engine participants provider is the libp2p peer store for the access node // which is not available until after the network has started. @@ -235,7 +362,19 @@ type FlowAccessNodeBuilder struct { RequestEng *requester.Engine FollowerEng *followereng.ComplianceEngine SyncEng *synceng.Engine - StateStreamEng *state_stream.Engine + StateStreamEng *statestreambackend.Engine + + // grpc servers + secureGrpcServer *grpcserver.GrpcServer + unsecureGrpcServer *grpcserver.GrpcServer + stateStreamGrpcServer *grpcserver.GrpcServer + + stateStreamBackend *statestreambackend.StateStreamBackend + nodeBackend *backend.Backend + + ExecNodeIdentitiesProvider *commonrpc.ExecutionNodeIdentitiesProvider + TxResultErrorMessagesCore *tx_error_messages.TxErrorMessagesCore + txResultErrorMessageProvider error_messages.Provider } func (builder *FlowAccessNodeBuilder) buildFollowerState() *FlowAccessNodeBuilder { @@ -305,7 +444,7 @@ func (builder *FlowAccessNodeBuilder) buildFollowerCore() *FlowAccessNodeBuilder builder.Component("follower core", func(node *cmd.NodeConfig) (module.ReadyDoneAware, error) { // create a finalizer that will handle updating the protocol // state when the follower detects newly finalized blocks - final := finalizer.NewFinalizer(node.DB, node.Storage.Headers, builder.FollowerState, node.Tracer) + final := finalizer.NewFinalizer(node.ProtocolDB.Reader(), node.Storage.Headers, builder.FollowerState, node.Tracer) packer := signature.NewConsensusSigDataPacker(builder.Committee) // initialize the verifier for the protocol consensus @@ -314,10 +453,11 @@ func (builder *FlowAccessNodeBuilder) buildFollowerCore() *FlowAccessNodeBuilder followerCore, err := consensus.NewFollower( node.Logger, + node.Metrics.Mempool, node.Storage.Headers, final, builder.FollowerDistributor, - node.RootBlock.Header, + node.FinalizedRootBlock.ToHeader(), node.RootQC, builder.Finalized, builder.Pending, @@ -357,17 +497,18 @@ func (builder *FlowAccessNodeBuilder) buildFollowerEngine() *FlowAccessNodeBuild builder.FollowerEng, err = followereng.NewComplianceLayer( node.Logger, - node.Network, + node.EngineRegistry, node.Me, node.Metrics.Engine, node.Storage.Headers, builder.Finalized, core, - followereng.WithComplianceConfigOpt(modulecompliance.WithSkipNewProposalsThreshold(node.ComplianceConfig.SkipNewProposalsThreshold)), + node.ComplianceConfig, ) if err != nil { return nil, fmt.Errorf("could not create follower engine: %w", err) } + builder.FollowerDistributor.AddOnBlockFinalizedConsumer(builder.FollowerEng.OnFinalizedBlock) return builder.FollowerEng, nil }) @@ -377,21 +518,27 @@ func (builder *FlowAccessNodeBuilder) buildFollowerEngine() *FlowAccessNodeBuild func (builder *FlowAccessNodeBuilder) buildSyncEngine() *FlowAccessNodeBuilder { builder.Component("sync engine", func(node *cmd.NodeConfig) (module.ReadyDoneAware, error) { + spamConfig, err := synceng.NewSpamDetectionConfig() + if err != nil { + return nil, fmt.Errorf("could not initialize spam detection config: %w", err) + } sync, err := synceng.New( node.Logger, node.Metrics.Engine, - node.Network, + node.EngineRegistry, node.Me, node.State, node.Storage.Blocks, builder.FollowerEng, builder.SyncCore, builder.SyncEngineParticipantsProviderFactory(), + spamConfig, ) if err != nil { return nil, fmt.Errorf("could not create synchronization engine: %w", err) } builder.SyncEng = sync + builder.FollowerDistributor.AddFinalizationConsumer(sync) return builder.SyncEng, nil }) @@ -412,47 +559,58 @@ func (builder *FlowAccessNodeBuilder) BuildConsensusFollower() *FlowAccessNodeBu return builder } -func (builder *FlowAccessNodeBuilder) BuildExecutionDataRequester() *FlowAccessNodeBuilder { - var ds *badger.Datastore +func (builder *FlowAccessNodeBuilder) BuildExecutionSyncComponents() *FlowAccessNodeBuilder { var bs network.BlobService - var processedBlockHeight storage.ConsumerProgress - var processedNotifications storage.ConsumerProgress + var processedBlockHeight storage.ConsumerProgressInitializer + var processedNotifications storage.ConsumerProgressInitializer var bsDependable *module.ProxiedReadyDoneAware var execDataDistributor *edrequester.ExecutionDataDistributor + var execDataCacheBackend *herocache.BlockExecutionData + var executionDataStoreCache *execdatacache.ExecutionDataCache + + // setup dependency chain to ensure indexer starts after the requester + requesterDependable := module.NewProxiedReadyDoneAware() + builder.IndexerDependencies.Add(requesterDependable) + + executionDataPrunerEnabled := builder.executionDataPrunerHeightRangeTarget != 0 builder. AdminCommand("read-execution-data", func(config *cmd.NodeConfig) commands.AdminCommand { return stateSyncCommands.NewReadExecutionDataCommand(builder.ExecutionDataStore) }). - Module("execution data datastore and blobstore", func(node *cmd.NodeConfig) error { - datastoreDir := filepath.Join(builder.executionDataDir, "blobstore") - err := os.MkdirAll(datastoreDir, 0700) - if err != nil { - return err - } + Module("transactions and collections storage", func(node *cmd.NodeConfig) error { + transactions := store.NewTransactions(node.Metrics.Cache, node.ProtocolDB) + collections := store.NewCollections(node.ProtocolDB, transactions) + builder.transactions = transactions + builder.collections = collections - ds, err = badger.NewDatastore(datastoreDir, &badger.DefaultOptions) + return nil + }). + Module("execution data datastore and blobstore", func(node *cmd.NodeConfig) error { + var err error + builder.ExecutionDatastoreManager, err = edstorage.CreateDatastoreManager( + node.Logger, builder.executionDataDir) if err != nil { - return err + return fmt.Errorf("could not create execution data datastore manager: %w", err) } - builder.ShutdownFunc(func() error { - if err := ds.Close(); err != nil { - return fmt.Errorf("could not close execution data datastore: %w", err) - } - return nil - }) + builder.ShutdownFunc(builder.ExecutionDatastoreManager.Close) return nil }). Module("processed block height consumer progress", func(node *cmd.NodeConfig) error { - // uses the datastore's DB - processedBlockHeight = bstorage.NewConsumerProgress(ds.DB, module.ConsumeProgressExecutionDataRequesterBlockHeight) + // Note: progress is stored in the datastore's DB since that is where the jobqueue + // writes execution data to. + db := builder.ExecutionDatastoreManager.DB() + + processedBlockHeight = store.NewConsumerProgress(db, module.ConsumeProgressExecutionDataRequesterBlockHeight) return nil }). Module("processed notifications consumer progress", func(node *cmd.NodeConfig) error { - // uses the datastore's DB - processedNotifications = bstorage.NewConsumerProgress(ds.DB, module.ConsumeProgressExecutionDataRequesterNotification) + // Note: progress is stored in the datastore's DB since that is where the jobqueue + // writes execution data to. + db := builder.ExecutionDatastoreManager.DB() + processedNotifications = store.NewConsumerProgress(db, module.ConsumeProgressExecutionDataRequesterNotification) return nil }). Module("blobservice peer manager dependencies", func(node *cmd.NodeConfig) error { @@ -461,12 +619,32 @@ func (builder *FlowAccessNodeBuilder) BuildExecutionDataRequester() *FlowAccessN return nil }). Module("execution datastore", func(node *cmd.NodeConfig) error { - blobstore := blobs.NewBlobstore(ds) - builder.ExecutionDataStore = execution_data.NewExecutionDataStore(blobstore, execution_data.DefaultSerializer) + builder.ExecutionDataBlobstore = blobs.NewBlobstore(builder.ExecutionDatastoreManager.Datastore()) + builder.ExecutionDataStore = execution_data.NewExecutionDataStore(builder.ExecutionDataBlobstore, execution_data.DefaultSerializer) return nil }). - Component("execution data service", func(node *cmd.NodeConfig) (module.ReadyDoneAware, error) { + Module("execution data cache", func(node *cmd.NodeConfig) error { + var heroCacheCollector module.HeroCacheMetrics = metrics.NewNoopCollector() + if builder.HeroCacheMetricsEnable { + heroCacheCollector = metrics.AccessNodeExecutionDataCacheMetrics(builder.MetricsRegisterer) + } + + execDataCacheBackend = herocache.NewBlockExecutionData(builder.stateStreamConf.ExecutionDataCacheSize, builder.Logger, heroCacheCollector) + + // Execution Data cache that uses a blobstore as the backend (instead of a downloader) + // This ensures that it simply returns a not found error if the blob doesn't exist + // instead of attempting to download it from the network. + executionDataStoreCache = execdatacache.NewExecutionDataCache( + builder.ExecutionDataStore, + builder.Storage.Headers, + builder.Storage.Seals, + builder.Storage.Results, + execDataCacheBackend, + ) + return nil + }). + Component("execution data service", func(node *cmd.NodeConfig) (module.ReadyDoneAware, error) { opts := []network.BlobServiceOption{ blob.WithBitswapOptions( // Only allow block requests from staked ENs and ANs @@ -479,8 +657,12 @@ func (builder *FlowAccessNodeBuilder) BuildExecutionDataRequester() *FlowAccessN ), } + if !builder.BitswapReprovideEnabled { + opts = append(opts, blob.WithReprovideInterval(-1)) + } + var err error - bs, err = node.Network.RegisterBlobService(channels.ExecutionDataService, ds, opts...) + bs, err = node.EngineRegistry.RegisterBlobService(channels.ExecutionDataService, builder.ExecutionDatastoreManager.Datastore(), opts...) if err != nil { return nil, fmt.Errorf("could not register blob service: %w", err) } @@ -490,17 +672,43 @@ func (builder *FlowAccessNodeBuilder) BuildExecutionDataRequester() *FlowAccessN // to be ready before starting bsDependable.Init(bs) - builder.ExecutionDataDownloader = execution_data.NewDownloader(bs) + var downloaderOpts []execution_data.DownloaderOption + + if executionDataPrunerEnabled { + sealed, err := node.State.Sealed().Head() + if err != nil { + return nil, fmt.Errorf("cannot get the sealed block: %w", err) + } + + trackerDir := filepath.Join(builder.executionDataDir, "tracker") + builder.ExecutionDataTracker, err = tracker.OpenStorage( + trackerDir, + sealed.Height, + node.Logger, + tracker.WithPruneCallback(func(c cid.Cid) error { + // TODO: use a proper context here + return builder.ExecutionDataBlobstore.DeleteBlob(context.TODO(), c) + }), + ) + if err != nil { + return nil, fmt.Errorf("failed to create execution data tracker: %w", err) + } + + downloaderOpts = []execution_data.DownloaderOption{ + execution_data.WithExecutionDataTracker(builder.ExecutionDataTracker, node.Storage.Headers), + } + } + builder.ExecutionDataDownloader = execution_data.NewDownloader(bs, downloaderOpts...) return builder.ExecutionDataDownloader, nil }). Component("execution data requester", func(node *cmd.NodeConfig) (module.ReadyDoneAware, error) { // Validation of the start block height needs to be done after loading state if builder.executionDataStartHeight > 0 { - if builder.executionDataStartHeight <= builder.RootBlock.Header.Height { + if builder.executionDataStartHeight <= builder.FinalizedRootBlock.Height { return nil, fmt.Errorf( "execution data start block height (%d) must be greater than the root block height (%d)", - builder.executionDataStartHeight, builder.RootBlock.Header.Height) + builder.executionDataStartHeight, builder.FinalizedRootBlock.Height) } latestSeal, err := builder.State.Sealed().Head() @@ -522,29 +730,290 @@ func (builder *FlowAccessNodeBuilder) BuildExecutionDataRequester() *FlowAccessN // requester expects the initial last processed height, which is the first height - 1 builder.executionDataConfig.InitialBlockHeight = builder.executionDataStartHeight - 1 } else { - builder.executionDataConfig.InitialBlockHeight = builder.RootBlock.Header.Height + builder.executionDataConfig.InitialBlockHeight = builder.SealedRootBlock.Height } execDataDistributor = edrequester.NewExecutionDataDistributor() - builder.ExecutionDataRequester = edrequester.New( + // Execution Data cache with a downloader as the backend. This is used by the requester + // to download and cache execution data for each block. It shares a cache backend instance + // with the datastore implementation. + executionDataCache := execdatacache.NewExecutionDataCache( + builder.ExecutionDataDownloader, + builder.Storage.Headers, + builder.Storage.Seals, + builder.Storage.Results, + execDataCacheBackend, + ) + + r, err := edrequester.New( builder.Logger, metrics.NewExecutionDataRequesterCollector(), builder.ExecutionDataDownloader, + executionDataCache, processedBlockHeight, processedNotifications, builder.State, builder.Storage.Headers, - builder.Storage.Results, - builder.Storage.Seals, builder.executionDataConfig, + execDataDistributor, ) + if err != nil { + return nil, fmt.Errorf("failed to create execution data requester: %w", err) + } + builder.ExecutionDataRequester = r builder.FollowerDistributor.AddOnBlockFinalizedConsumer(builder.ExecutionDataRequester.OnBlockFinalized) - builder.ExecutionDataRequester.AddOnExecutionDataReceivedConsumer(execDataDistributor.OnExecutionDataReceived) + + // add requester into ReadyDoneAware dependency passed to indexer. This allows the indexer + // to wait for the requester to be ready before starting. + requesterDependable.Init(builder.ExecutionDataRequester) return builder.ExecutionDataRequester, nil + }). + Component("execution data pruner", func(node *cmd.NodeConfig) (module.ReadyDoneAware, error) { + if !executionDataPrunerEnabled { + return &module.NoopReadyDoneAware{}, nil + } + + var prunerMetrics module.ExecutionDataPrunerMetrics = metrics.NewNoopCollector() + if node.MetricsEnabled { + prunerMetrics = metrics.NewExecutionDataPrunerCollector() + } + + var err error + builder.ExecutionDataPruner, err = pruner.NewPruner( + node.Logger, + prunerMetrics, + builder.ExecutionDataTracker, + pruner.WithPruneCallback(func(ctx context.Context) error { + return builder.ExecutionDatastoreManager.CollectGarbage(ctx) + }), + pruner.WithHeightRangeTarget(builder.executionDataPrunerHeightRangeTarget), + pruner.WithThreshold(builder.executionDataPrunerThreshold), + pruner.WithPruningInterval(builder.executionDataPruningInterval), + ) + if err != nil { + return nil, fmt.Errorf("failed to create execution data pruner: %w", err) + } + + builder.ExecutionDataPruner.RegisterHeightRecorder(builder.ExecutionDataDownloader) + + return builder.ExecutionDataPruner, nil + }) + + if builder.publicNetworkExecutionDataEnabled { + var publicBsDependable *module.ProxiedReadyDoneAware + + builder.Module("public blobservice peer manager dependencies", func(node *cmd.NodeConfig) error { + publicBsDependable = module.NewProxiedReadyDoneAware() + builder.PeerManagerDependencies.Add(publicBsDependable) + return nil + }) + builder.Component("public network execution data service", func(node *cmd.NodeConfig) (module.ReadyDoneAware, error) { + opts := []network.BlobServiceOption{ + blob.WithBitswapOptions( + bitswap.WithTracer( + blob.NewTracer(node.Logger.With().Str("public_blob_service", channels.PublicExecutionDataService.String()).Logger()), + ), + ), + blob.WithParentBlobService(bs), + } + + net := builder.AccessNodeConfig.PublicNetworkConfig.Network + + var err error + builder.PublicBlobService, err = net.RegisterBlobService(channels.PublicExecutionDataService, builder.ExecutionDatastoreManager.Datastore(), opts...) + if err != nil { + return nil, fmt.Errorf("could not register blob service: %w", err) + } + + // add blobservice into ReadyDoneAware dependency passed to peer manager + // this starts the blob service and configures peer manager to wait for the blobservice + // to be ready before starting + publicBsDependable.Init(builder.PublicBlobService) + return &module.NoopReadyDoneAware{}, nil }) + } + + if builder.executionDataIndexingEnabled { + var indexedBlockHeight storage.ConsumerProgressInitializer + + builder. + AdminCommand("execute-script", func(config *cmd.NodeConfig) commands.AdminCommand { + return stateSyncCommands.NewExecuteScriptCommand(builder.ScriptExecutor) + }). + Module("indexed block height consumer progress", func(node *cmd.NodeConfig) error { + // Note: progress is stored in the MAIN db since that is where indexed execution data is stored. + indexedBlockHeight = store.NewConsumerProgress(builder.ProtocolDB, module.ConsumeProgressExecutionDataIndexerBlockHeight) + return nil + }). + Module("transaction results storage", func(node *cmd.NodeConfig) error { + builder.lightTransactionResults = store.NewLightTransactionResults(node.Metrics.Cache, node.ProtocolDB, bstorage.DefaultCacheSize) + return nil + }). + DependableComponent("execution data indexer", func(node *cmd.NodeConfig) (module.ReadyDoneAware, error) { + // Note: using a DependableComponent here to ensure that the indexer does not block + // other components from starting while bootstrapping the register db since it may + // take hours to complete. + + pdb, err := pstorage.OpenRegisterPebbleDB( + node.Logger.With().Str("pebbledb", "registers").Logger(), + builder.registersDBPath) + if err != nil { + return nil, fmt.Errorf("could not open registers db: %w", err) + } + builder.ShutdownFunc(func() error { + return pdb.Close() + }) + + bootstrapped, err := pstorage.IsBootstrapped(pdb) + if err != nil { + return nil, fmt.Errorf("could not check if registers db is bootstrapped: %w", err) + } + + if !bootstrapped { + checkpointFile := builder.checkpointFile + if checkpointFile == cmd.NotSet { + checkpointFile = path.Join(builder.BootstrapDir, bootstrap.PathRootCheckpoint) + } + + // currently, the checkpoint must be from the root block. + // read the root hash from the provided checkpoint and verify it matches the + // state commitment from the root snapshot. + err := wal.CheckpointHasRootHash( + node.Logger, + "", // checkpoint file already full path + checkpointFile, + ledger.RootHash(node.RootSeal.FinalState), + ) + if err != nil { + return nil, fmt.Errorf("could not verify checkpoint file: %w", err) + } + + checkpointHeight := builder.SealedRootBlock.Height + + if builder.SealedRootBlock.ID() != builder.RootSeal.BlockID { + return nil, fmt.Errorf("mismatching sealed root block and root seal: %v != %v", + builder.SealedRootBlock.ID(), builder.RootSeal.BlockID) + } + + rootHash := ledger.RootHash(builder.RootSeal.FinalState) + bootstrap, err := pstorage.NewRegisterBootstrap(pdb, checkpointFile, checkpointHeight, rootHash, builder.Logger) + if err != nil { + return nil, fmt.Errorf("could not create registers bootstrap: %w", err) + } + + // TODO: find a way to hook a context up to this to allow a graceful shutdown + workerCount := 10 + err = bootstrap.IndexCheckpointFile(context.Background(), workerCount) + if err != nil { + return nil, fmt.Errorf("could not load checkpoint file: %w", err) + } + } + + registers, err := pstorage.NewRegisters(pdb, builder.registerDBPruneThreshold) + if err != nil { + return nil, fmt.Errorf("could not create registers storage: %w", err) + } + + if builder.registerCacheSize > 0 { + cacheType, err := pstorage.ParseCacheType(builder.registerCacheType) + if err != nil { + return nil, fmt.Errorf("could not parse register cache type: %w", err) + } + cacheMetrics := metrics.NewCacheCollector(builder.RootChainID) + registersCache, err := pstorage.NewRegistersCache(registers, cacheType, builder.registerCacheSize, cacheMetrics) + if err != nil { + return nil, fmt.Errorf("could not create registers cache: %w", err) + } + builder.Storage.RegisterIndex = registersCache + } else { + builder.Storage.RegisterIndex = registers + } + + indexerDerivedChainData, queryDerivedChainData, err := builder.buildDerivedChainData() + if err != nil { + return nil, fmt.Errorf("could not create derived chain data: %w", err) + } + + indexerCore, err := indexer.New( + builder.Logger, + metrics.NewExecutionStateIndexerCollector(), + notNil(builder.ProtocolDB), + notNil(builder.Storage.RegisterIndex), + notNil(builder.Storage.Headers), + notNil(builder.events), + notNil(builder.collections), + notNil(builder.transactions), + notNil(builder.lightTransactionResults), + builder.RootChainID.Chain(), + indexerDerivedChainData, + notNil(builder.collectionExecutedMetric), + node.StorageLockMgr, + ) + if err != nil { + return nil, err + } + builder.ExecutionIndexerCore = indexerCore + + // execution state worker uses a jobqueue to process new execution data and indexes it by using the indexer. + builder.ExecutionIndexer, err = indexer.NewIndexer( + builder.Logger, + registers.FirstHeight(), + registers, + indexerCore, + executionDataStoreCache, + builder.ExecutionDataRequester.HighestConsecutiveHeight, + indexedBlockHeight, + ) + if err != nil { + return nil, err + } + + if executionDataPrunerEnabled { + builder.ExecutionDataPruner.RegisterHeightRecorder(builder.ExecutionIndexer) + } + + // setup requester to notify indexer when new execution data is received + execDataDistributor.AddOnExecutionDataReceivedConsumer(builder.ExecutionIndexer.OnExecutionData) + + // create script execution module, this depends on the indexer being initialized and the + // having the register storage bootstrapped + scripts := execution.NewScripts( + builder.Logger, + metrics.NewExecutionCollector(builder.Tracer), + builder.RootChainID, + computation.NewProtocolStateWrapper(builder.State), + builder.Storage.Headers, + builder.ExecutionIndexerCore.RegisterValue, + builder.scriptExecutorConfig, + queryDerivedChainData, + builder.programCacheSize > 0, + ) + + err = builder.ScriptExecutor.Initialize(builder.ExecutionIndexer, scripts, builder.VersionControl) + if err != nil { + return nil, err + } + + err = builder.Reporter.Initialize(builder.ExecutionIndexer) + if err != nil { + return nil, err + } + + err = builder.RegistersAsyncStore.Initialize(registers) + if err != nil { + return nil, err + } + + if builder.stopControlEnabled { + builder.StopControl.RegisterHeightRecorder(builder.ExecutionIndexer) + } + + return builder.ExecutionIndexer, nil + }, builder.IndexerDependencies) + } if builder.stateStreamConf.ListenAddr != "" { builder.Component("exec state stream engine", func(node *cmd.NodeConfig) (module.ReadyDoneAware, error) { @@ -556,34 +1025,80 @@ func (builder *FlowAccessNodeBuilder) BuildExecutionDataRequester() *FlowAccessN builder.stateStreamConf.MaxAddresses = value case "Contracts": builder.stateStreamConf.MaxContracts = value + case "AccountAddresses": + builder.stateStreamConf.MaxAccountAddress = value } } builder.stateStreamConf.RpcMetricsEnabled = builder.rpcMetricsEnabled - var heroCacheCollector module.HeroCacheMetrics = metrics.NewNoopCollector() - if builder.HeroCacheMetricsEnable { - heroCacheCollector = metrics.AccessNodeExecutionDataCacheMetrics(builder.MetricsRegisterer) + highestAvailableHeight, err := builder.ExecutionDataRequester.HighestConsecutiveHeight() + if err != nil { + return nil, fmt.Errorf("could not get highest consecutive height: %w", err) } + broadcaster := engine.NewBroadcaster() - stateStreamEng, err := state_stream.NewEng( + eventQueryMode, err := query_mode.ParseIndexQueryMode(builder.rpcConf.BackendConfig.EventQueryMode) + if err != nil { + return nil, fmt.Errorf("could not parse event query mode: %w", err) + } + + // use the events index for events if enabled and the node is configured to use it for + // regular event queries + useIndex := builder.executionDataIndexingEnabled && + eventQueryMode != query_mode.IndexQueryModeExecutionNodesOnly + + executionDataTracker := subscriptiontracker.NewExecutionDataTracker( + builder.Logger, + node.State, + builder.executionDataConfig.InitialBlockHeight, + node.Storage.Headers, + broadcaster, + highestAvailableHeight, + builder.EventsIndex, + useIndex, + ) + + builder.stateStreamBackend, err = statestreambackend.New( node.Logger, - builder.stateStreamConf, - builder.ExecutionDataStore, node.State, node.Storage.Headers, node.Storage.Seals, node.Storage.Results, + builder.ExecutionDataStore, + executionDataStoreCache, + builder.RegistersAsyncStore, + builder.EventsIndex, + useIndex, + int(builder.stateStreamConf.RegisterIDsRequestLimit), + subscription.NewSubscriptionHandler( + builder.Logger, + broadcaster, + builder.stateStreamConf.ClientSendTimeout, + builder.stateStreamConf.ResponseLimit, + builder.stateStreamConf.ClientSendBufferSize, + ), + executionDataTracker, + ) + if err != nil { + return nil, fmt.Errorf("could not create state stream backend: %w", err) + } + + stateStreamEng, err := statestreambackend.NewEng( + node.Logger, + builder.stateStreamConf, + executionDataStoreCache, + node.Storage.Headers, node.RootChainID, - builder.apiRatelimits, - builder.apiBurstlimits, - heroCacheCollector, + builder.stateStreamGrpcServer, + builder.stateStreamBackend, ) if err != nil { return nil, fmt.Errorf("could not create state stream engine: %w", err) } builder.StateStreamEng = stateStreamEng - execDataDistributor.AddOnExecutionDataReceivedConsumer(builder.StateStreamEng.OnExecutionData) + // setup requester to notify ExecutionDataTracker when new execution data is received + execDataDistributor.AddOnExecutionDataReceivedConsumer(builder.stateStreamBackend.OnExecutionData) return builder.StateStreamEng, nil }) @@ -592,6 +1107,34 @@ func (builder *FlowAccessNodeBuilder) BuildExecutionDataRequester() *FlowAccessN return builder } +// buildDerivedChainData creates the derived chain data for the indexer and the query engine +// If program caching is disabled, the function will return nil for the indexer cache, and a +// derived chain data object for the query engine cache. +func (builder *FlowAccessNodeBuilder) buildDerivedChainData() ( + indexerCache *derived.DerivedChainData, + queryCache *derived.DerivedChainData, + err error, +) { + cacheSize := builder.programCacheSize + + // the underlying cache requires size > 0. no data will be written so 1 is fine. + if cacheSize == 0 { + cacheSize = 1 + } + + derivedChainData, err := derived.NewDerivedChainData(cacheSize) + if err != nil { + return nil, nil, err + } + + // writes are done by the indexer. using a nil value effectively disables writes to the cache. + if builder.programCacheSize == 0 { + return nil, derivedChainData, nil + } + + return derivedChainData, derivedChainData, nil +} + func FlowAccessNode(nodeBuilder *cmd.FlowNodeBuilder) *FlowAccessNodeBuilder { dist := consensuspubsub.NewFollowerDistributor() dist.AddProposalViolationConsumer(notifications.NewSlashingViolationsConsumer(nodeBuilder.Logger)) @@ -599,11 +1142,11 @@ func FlowAccessNode(nodeBuilder *cmd.FlowNodeBuilder) *FlowAccessNodeBuilder { AccessNodeConfig: DefaultAccessNodeConfig(), FlowNodeBuilder: nodeBuilder, FollowerDistributor: dist, + IndexerDependencies: cmd.NewDependencyList(), } } func (builder *FlowAccessNodeBuilder) ParseFlags() error { - builder.BaseFlags() builder.extraFlags() @@ -615,53 +1158,360 @@ func (builder *FlowAccessNodeBuilder) extraFlags() { builder.ExtraFlags(func(flags *pflag.FlagSet) { defaultConfig := DefaultAccessNodeConfig() - flags.UintVar(&builder.collectionGRPCPort, "collection-ingress-port", defaultConfig.collectionGRPCPort, "the grpc ingress port for all collection nodes") - flags.UintVar(&builder.executionGRPCPort, "execution-ingress-port", defaultConfig.executionGRPCPort, "the grpc ingress port for all execution nodes") - flags.StringVarP(&builder.rpcConf.UnsecureGRPCListenAddr, "rpc-addr", "r", defaultConfig.rpcConf.UnsecureGRPCListenAddr, "the address the unsecured gRPC server listens on") - flags.StringVar(&builder.rpcConf.SecureGRPCListenAddr, "secure-rpc-addr", defaultConfig.rpcConf.SecureGRPCListenAddr, "the address the secure gRPC server listens on") - flags.StringVar(&builder.stateStreamConf.ListenAddr, "state-stream-addr", defaultConfig.stateStreamConf.ListenAddr, "the address the state stream server listens on (if empty the server will not be started)") + flags.UintVar(&builder.rpcConf.BackendConfig.CollectionConfig.GRPCPort, + "collection-ingress-port", + defaultConfig.rpcConf.BackendConfig.CollectionConfig.GRPCPort, + "the grpc ingress port for all collection nodes") + flags.UintVar(&builder.rpcConf.BackendConfig.ExecutionConfig.GRPCPort, + "execution-ingress-port", + defaultConfig.rpcConf.BackendConfig.ExecutionConfig.GRPCPort, + "the grpc ingress port for all execution nodes") + flags.StringVarP(&builder.rpcConf.UnsecureGRPCListenAddr, + "rpc-addr", + "r", + defaultConfig.rpcConf.UnsecureGRPCListenAddr, + "the address the unsecured gRPC server listens on") + flags.StringVar(&builder.rpcConf.SecureGRPCListenAddr, + "secure-rpc-addr", + defaultConfig.rpcConf.SecureGRPCListenAddr, + "the address the secure gRPC server listens on") + flags.StringVar(&builder.stateStreamConf.ListenAddr, + "state-stream-addr", + defaultConfig.stateStreamConf.ListenAddr, + "the address the state stream server listens on (if empty the server will not be started)") flags.StringVarP(&builder.rpcConf.HTTPListenAddr, "http-addr", "h", defaultConfig.rpcConf.HTTPListenAddr, "the address the http proxy server listens on") - flags.StringVar(&builder.rpcConf.RESTListenAddr, "rest-addr", defaultConfig.rpcConf.RESTListenAddr, "the address the REST server listens on (if empty the REST server will not be started)") - flags.StringVarP(&builder.rpcConf.CollectionAddr, "static-collection-ingress-addr", "", defaultConfig.rpcConf.CollectionAddr, "the address (of the collection node) to send transactions to") - flags.StringVarP(&builder.ExecutionNodeAddress, "script-addr", "s", defaultConfig.ExecutionNodeAddress, "the address (of the execution node) forward the script to") - flags.StringSliceVar(&builder.rpcConf.ArchiveAddressList, "archive-address-list", defaultConfig.rpcConf.ArchiveAddressList, "the list of address of the archive node to forward the script queries to") - flags.StringVarP(&builder.rpcConf.HistoricalAccessAddrs, "historical-access-addr", "", defaultConfig.rpcConf.HistoricalAccessAddrs, "comma separated rpc addresses for historical access nodes") - flags.DurationVar(&builder.rpcConf.CollectionClientTimeout, "collection-client-timeout", defaultConfig.rpcConf.CollectionClientTimeout, "grpc client timeout for a collection node") - flags.DurationVar(&builder.rpcConf.ExecutionClientTimeout, "execution-client-timeout", defaultConfig.rpcConf.ExecutionClientTimeout, "grpc client timeout for an execution node") - flags.UintVar(&builder.rpcConf.ConnectionPoolSize, "connection-pool-size", defaultConfig.rpcConf.ConnectionPoolSize, "maximum number of connections allowed in the connection pool, size of 0 disables the connection pooling, and anything less than the default size will be overridden to use the default size") - flags.UintVar(&builder.rpcConf.MaxMsgSize, "rpc-max-message-size", grpcutils.DefaultMaxMsgSize, "the maximum message size in bytes for messages sent or received over grpc") - flags.UintVar(&builder.rpcConf.MaxHeightRange, "rpc-max-height-range", defaultConfig.rpcConf.MaxHeightRange, "maximum size for height range requests") - flags.StringSliceVar(&builder.rpcConf.PreferredExecutionNodeIDs, "preferred-execution-node-ids", defaultConfig.rpcConf.PreferredExecutionNodeIDs, "comma separated list of execution nodes ids to choose from when making an upstream call e.g. b4a4dbdcd443d...,fb386a6a... etc.") - flags.StringSliceVar(&builder.rpcConf.FixedExecutionNodeIDs, "fixed-execution-node-ids", defaultConfig.rpcConf.FixedExecutionNodeIDs, "comma separated list of execution nodes ids to choose from when making an upstream call if no matching preferred execution id is found e.g. b4a4dbdcd443d...,fb386a6a... etc.") + flags.StringVar(&builder.rpcConf.RestConfig.ListenAddress, + "rest-addr", + defaultConfig.rpcConf.RestConfig.ListenAddress, + "the address the REST server listens on (if empty the REST server will not be started)") + flags.DurationVar(&builder.rpcConf.RestConfig.WriteTimeout, + "rest-write-timeout", + defaultConfig.rpcConf.RestConfig.WriteTimeout, + "timeout to use when writing REST response") + flags.DurationVar(&builder.rpcConf.RestConfig.ReadTimeout, + "rest-read-timeout", + defaultConfig.rpcConf.RestConfig.ReadTimeout, + "timeout to use when reading REST request headers") + flags.DurationVar(&builder.rpcConf.RestConfig.IdleTimeout, "rest-idle-timeout", defaultConfig.rpcConf.RestConfig.IdleTimeout, "idle timeout for REST connections") + flags.Int64Var(&builder.rpcConf.RestConfig.MaxRequestSize, + "rest-max-request-size", + defaultConfig.rpcConf.RestConfig.MaxRequestSize, + "the maximum request size in bytes for payload sent over REST server") + flags.Int64Var(&builder.rpcConf.RestConfig.MaxResponseSize, + "rest-max-response-size", + defaultConfig.rpcConf.RestConfig.MaxResponseSize, + "the maximum response size in bytes for payload sent from REST server") + flags.StringVarP(&builder.rpcConf.CollectionAddr, + "static-collection-ingress-addr", + "", + defaultConfig.rpcConf.CollectionAddr, + "the address (of the collection node) to send transactions to") + flags.StringVarP(&builder.ExecutionNodeAddress, + "script-addr", + "s", + defaultConfig.ExecutionNodeAddress, + "the address (of the execution node) forward the script to") + flags.StringVarP(&builder.rpcConf.HistoricalAccessAddrs, + "historical-access-addr", + "", + defaultConfig.rpcConf.HistoricalAccessAddrs, + "comma separated rpc addresses for historical access nodes") + flags.DurationVar(&builder.rpcConf.BackendConfig.CollectionConfig.Timeout, + "collection-client-timeout", + defaultConfig.rpcConf.BackendConfig.CollectionConfig.Timeout, + "grpc client timeout for a collection node") + flags.DurationVar(&builder.rpcConf.BackendConfig.ExecutionConfig.Timeout, + "execution-client-timeout", + defaultConfig.rpcConf.BackendConfig.ExecutionConfig.Timeout, + "grpc client timeout for an execution node") + flags.UintVar(&builder.rpcConf.BackendConfig.ConnectionPoolSize, + "connection-pool-size", + defaultConfig.rpcConf.BackendConfig.ConnectionPoolSize, + "maximum number of connections allowed in the connection pool, size of 0 disables the connection pooling, and anything less than the default size will be overridden to use the default size") + flags.UintVar(&builder.rpcConf.DeprecatedMaxMsgSize, + "rpc-max-message-size", + defaultConfig.rpcConf.DeprecatedMaxMsgSize, + "[deprecated] the maximum message size in bytes for messages sent or received over grpc") + flags.UintVar(&builder.rpcConf.BackendConfig.AccessConfig.MaxRequestMsgSize, + "rpc-max-request-message-size", + defaultConfig.rpcConf.BackendConfig.AccessConfig.MaxRequestMsgSize, + "the maximum request message size in bytes for request messages received over grpc by the server") + flags.UintVar(&builder.rpcConf.BackendConfig.AccessConfig.MaxResponseMsgSize, + "rpc-max-response-message-size", + defaultConfig.rpcConf.BackendConfig.AccessConfig.MaxResponseMsgSize, + "the maximum message size in bytes for response messages sent over grpc by the server") + flags.UintVar(&builder.rpcConf.BackendConfig.CollectionConfig.MaxRequestMsgSize, + "rpc-max-collection-request-message-size", + defaultConfig.rpcConf.BackendConfig.CollectionConfig.MaxRequestMsgSize, + "the maximum request message size in bytes for request messages sent over grpc to collection nodes") + flags.UintVar(&builder.rpcConf.BackendConfig.CollectionConfig.MaxResponseMsgSize, + "rpc-max-collection-response-message-size", + defaultConfig.rpcConf.BackendConfig.CollectionConfig.MaxResponseMsgSize, + "the maximum message size in bytes for response messages received over grpc from collection nodes") + flags.UintVar(&builder.rpcConf.BackendConfig.ExecutionConfig.MaxRequestMsgSize, + "rpc-max-execution-request-message-size", + defaultConfig.rpcConf.BackendConfig.ExecutionConfig.MaxRequestMsgSize, + "the maximum request message size in bytes for request messages sent over grpc to execution nodes") + flags.UintVar(&builder.rpcConf.BackendConfig.ExecutionConfig.MaxResponseMsgSize, + "rpc-max-execution-response-message-size", + defaultConfig.rpcConf.BackendConfig.ExecutionConfig.MaxResponseMsgSize, + "the maximum message size in bytes for response messages received over grpc from execution nodes") + flags.UintVar(&builder.rpcConf.BackendConfig.MaxHeightRange, + "rpc-max-height-range", + defaultConfig.rpcConf.BackendConfig.MaxHeightRange, + "maximum size for height range requests") + flags.StringSliceVar(&builder.rpcConf.BackendConfig.PreferredExecutionNodeIDs, + "preferred-execution-node-ids", + defaultConfig.rpcConf.BackendConfig.PreferredExecutionNodeIDs, + "comma separated list of execution nodes ids to choose from when making an upstream call e.g. b4a4dbdcd443d...,fb386a6a... etc.") + flags.StringSliceVar(&builder.rpcConf.BackendConfig.FixedExecutionNodeIDs, + "fixed-execution-node-ids", + defaultConfig.rpcConf.BackendConfig.FixedExecutionNodeIDs, + "comma separated list of execution nodes ids to choose from when making an upstream call if no matching preferred execution id is found e.g. b4a4dbdcd443d...,fb386a6a... etc.") + flags.StringVar(&builder.rpcConf.CompressorName, + "grpc-compressor", + defaultConfig.rpcConf.CompressorName, + "name of grpc compressor that will be used for requests to other nodes. One of (gzip, snappy, deflate)") flags.BoolVar(&builder.logTxTimeToFinalized, "log-tx-time-to-finalized", defaultConfig.logTxTimeToFinalized, "log transaction time to finalized") flags.BoolVar(&builder.logTxTimeToExecuted, "log-tx-time-to-executed", defaultConfig.logTxTimeToExecuted, "log transaction time to executed") - flags.BoolVar(&builder.logTxTimeToFinalizedExecuted, "log-tx-time-to-finalized-executed", defaultConfig.logTxTimeToFinalizedExecuted, "log transaction time to finalized and executed") - flags.BoolVar(&builder.pingEnabled, "ping-enabled", defaultConfig.pingEnabled, "whether to enable the ping process that pings all other peers and report the connectivity to metrics") + flags.BoolVar(&builder.logTxTimeToFinalizedExecuted, + "log-tx-time-to-finalized-executed", + defaultConfig.logTxTimeToFinalizedExecuted, + "log transaction time to finalized and executed") + flags.BoolVar(&builder.logTxTimeToSealed, + "log-tx-time-to-sealed", + defaultConfig.logTxTimeToSealed, + "log transaction time to sealed") + flags.BoolVar(&builder.pingEnabled, + "ping-enabled", + defaultConfig.pingEnabled, + "whether to enable the ping process that pings all other peers and report the connectivity to metrics") flags.BoolVar(&builder.retryEnabled, "retry-enabled", defaultConfig.retryEnabled, "whether to enable the retry mechanism at the access node level") flags.BoolVar(&builder.rpcMetricsEnabled, "rpc-metrics-enabled", defaultConfig.rpcMetricsEnabled, "whether to enable the rpc metrics") - flags.StringVarP(&builder.nodeInfoFile, "node-info-file", "", defaultConfig.nodeInfoFile, "full path to a json file which provides more details about nodes when reporting its reachability metrics") + flags.UintVar(&builder.TxResultCacheSize, "transaction-result-cache-size", defaultConfig.TxResultCacheSize, "transaction result cache size.(Disabled by default i.e 0)") + flags.StringVarP(&builder.nodeInfoFile, + "node-info-file", + "", + defaultConfig.nodeInfoFile, + "full path to a json file which provides more details about nodes when reporting its reachability metrics") flags.StringToIntVar(&builder.apiRatelimits, "api-rate-limits", defaultConfig.apiRatelimits, "per second rate limits for Access API methods e.g. Ping=300,GetTransaction=500 etc.") flags.StringToIntVar(&builder.apiBurstlimits, "api-burst-limits", defaultConfig.apiBurstlimits, "burst limits for Access API methods e.g. Ping=100,GetTransaction=100 etc.") flags.BoolVar(&builder.supportsObserver, "supports-observer", defaultConfig.supportsObserver, "true if this staked access node supports observer or follower connections") flags.StringVar(&builder.PublicNetworkConfig.BindAddress, "public-network-address", defaultConfig.PublicNetworkConfig.BindAddress, "staked access node's public network bind address") - + flags.BoolVar(&builder.rpcConf.BackendConfig.CircuitBreakerConfig.Enabled, + "circuit-breaker-enabled", + defaultConfig.rpcConf.BackendConfig.CircuitBreakerConfig.Enabled, + "specifies whether the circuit breaker is enabled for collection and execution API clients.") + flags.DurationVar(&builder.rpcConf.BackendConfig.CircuitBreakerConfig.RestoreTimeout, + "circuit-breaker-restore-timeout", + defaultConfig.rpcConf.BackendConfig.CircuitBreakerConfig.RestoreTimeout, + "duration after which the circuit breaker will restore the connection to the client after closing it due to failures. Default value is 60s") + flags.Uint32Var(&builder.rpcConf.BackendConfig.CircuitBreakerConfig.MaxFailures, + "circuit-breaker-max-failures", + defaultConfig.rpcConf.BackendConfig.CircuitBreakerConfig.MaxFailures, + "maximum number of failed calls to the client that will cause the circuit breaker to close the connection. Default value is 5") + flags.Uint32Var(&builder.rpcConf.BackendConfig.CircuitBreakerConfig.MaxRequests, + "circuit-breaker-max-requests", + defaultConfig.rpcConf.BackendConfig.CircuitBreakerConfig.MaxRequests, + "maximum number of requests to check if connection restored after timeout. Default value is 1") + flags.BoolVar(&builder.versionControlEnabled, + "version-control-enabled", + defaultConfig.versionControlEnabled, + "whether to enable the version control feature. Default value is true") + flags.BoolVar(&builder.stopControlEnabled, + "stop-control-enabled", + defaultConfig.stopControlEnabled, + "whether to enable the stop control feature. Default value is false") + flags.BoolVar(&builder.scheduledCallbacksEnabled, + "scheduled-callbacks-enabled", + defaultConfig.scheduledCallbacksEnabled, + "whether to include scheduled callback transactions in system collections.") // ExecutionDataRequester config - flags.BoolVar(&builder.executionDataSyncEnabled, "execution-data-sync-enabled", defaultConfig.executionDataSyncEnabled, "whether to enable the execution data sync protocol") + flags.BoolVar(&builder.executionDataSyncEnabled, + "execution-data-sync-enabled", + defaultConfig.executionDataSyncEnabled, + "whether to enable the execution data sync protocol") + flags.BoolVar(&builder.publicNetworkExecutionDataEnabled, + "public-network-execution-data-sync-enabled", + defaultConfig.publicNetworkExecutionDataEnabled, + "[experimental] whether to enable the execution data sync protocol on public network") flags.StringVar(&builder.executionDataDir, "execution-data-dir", defaultConfig.executionDataDir, "directory to use for Execution Data database") - flags.Uint64Var(&builder.executionDataStartHeight, "execution-data-start-height", defaultConfig.executionDataStartHeight, "height of first block to sync execution data from when starting with an empty Execution Data database") - flags.Uint64Var(&builder.executionDataConfig.MaxSearchAhead, "execution-data-max-search-ahead", defaultConfig.executionDataConfig.MaxSearchAhead, "max number of heights to search ahead of the lowest outstanding execution data height") - flags.DurationVar(&builder.executionDataConfig.FetchTimeout, "execution-data-fetch-timeout", defaultConfig.executionDataConfig.FetchTimeout, "initial timeout to use when fetching execution data from the network. timeout increases using an incremental backoff until execution-data-max-fetch-timeout. e.g. 30s") - flags.DurationVar(&builder.executionDataConfig.MaxFetchTimeout, "execution-data-max-fetch-timeout", defaultConfig.executionDataConfig.MaxFetchTimeout, "maximum timeout to use when fetching execution data from the network e.g. 300s") - flags.DurationVar(&builder.executionDataConfig.RetryDelay, "execution-data-retry-delay", defaultConfig.executionDataConfig.RetryDelay, "initial delay for exponential backoff when fetching execution data fails e.g. 10s") - flags.DurationVar(&builder.executionDataConfig.MaxRetryDelay, "execution-data-max-retry-delay", defaultConfig.executionDataConfig.MaxRetryDelay, "maximum delay for exponential backoff when fetching execution data fails e.g. 5m") + flags.Uint64Var(&builder.executionDataStartHeight, + "execution-data-start-height", + defaultConfig.executionDataStartHeight, + "height of first block to sync execution data from when starting with an empty Execution Data database") + flags.Uint64Var(&builder.executionDataConfig.MaxSearchAhead, + "execution-data-max-search-ahead", + defaultConfig.executionDataConfig.MaxSearchAhead, + "max number of heights to search ahead of the lowest outstanding execution data height") + flags.DurationVar(&builder.executionDataConfig.FetchTimeout, + "execution-data-fetch-timeout", + defaultConfig.executionDataConfig.FetchTimeout, + "initial timeout to use when fetching execution data from the network. timeout increases using an incremental backoff until execution-data-max-fetch-timeout. e.g. 30s") + flags.DurationVar(&builder.executionDataConfig.MaxFetchTimeout, + "execution-data-max-fetch-timeout", + defaultConfig.executionDataConfig.MaxFetchTimeout, + "maximum timeout to use when fetching execution data from the network e.g. 300s") + flags.DurationVar(&builder.executionDataConfig.RetryDelay, + "execution-data-retry-delay", + defaultConfig.executionDataConfig.RetryDelay, + "initial delay for exponential backoff when fetching execution data fails e.g. 10s") + flags.DurationVar(&builder.executionDataConfig.MaxRetryDelay, + "execution-data-max-retry-delay", + defaultConfig.executionDataConfig.MaxRetryDelay, + "maximum delay for exponential backoff when fetching execution data fails e.g. 5m") + + var builderexecutionDataDBMode string + flags.StringVar(&builderexecutionDataDBMode, + "execution-data-db", + "pebble", + "[deprecated] the DB type for execution datastore") + + flags.Uint64Var(&builder.executionDataPrunerHeightRangeTarget, + "execution-data-height-range-target", + defaultConfig.executionDataPrunerHeightRangeTarget, + "number of blocks of Execution Data to keep on disk. older data is pruned") + flags.Uint64Var(&builder.executionDataPrunerThreshold, + "execution-data-height-range-threshold", + defaultConfig.executionDataPrunerThreshold, + "number of unpruned blocks of Execution Data beyond the height range target to allow before pruning") + flags.DurationVar(&builder.executionDataPruningInterval, + "execution-data-pruning-interval", + defaultConfig.executionDataPruningInterval, + "duration after which the pruner tries to prune execution data. The default value is 10 minutes") // Execution State Streaming API flags.Uint32Var(&builder.stateStreamConf.ExecutionDataCacheSize, "execution-data-cache-size", defaultConfig.stateStreamConf.ExecutionDataCacheSize, "block execution data cache size") flags.Uint32Var(&builder.stateStreamConf.MaxGlobalStreams, "state-stream-global-max-streams", defaultConfig.stateStreamConf.MaxGlobalStreams, "global maximum number of concurrent streams") - flags.UintVar(&builder.stateStreamConf.MaxExecutionDataMsgSize, "state-stream-max-message-size", defaultConfig.stateStreamConf.MaxExecutionDataMsgSize, "maximum size for a gRPC message containing block execution data") - flags.DurationVar(&builder.stateStreamConf.ClientSendTimeout, "state-stream-send-timeout", defaultConfig.stateStreamConf.ClientSendTimeout, "maximum wait before timing out while sending a response to a streaming client e.g. 30s") - flags.UintVar(&builder.stateStreamConf.ClientSendBufferSize, "state-stream-send-buffer-size", defaultConfig.stateStreamConf.ClientSendBufferSize, "maximum number of responses to buffer within a stream") - flags.StringToIntVar(&builder.stateStreamFilterConf, "state-stream-event-filter-limits", defaultConfig.stateStreamFilterConf, "event filter limits for ExecutionData SubscribeEvents API e.g. EventTypes=100,Addresses=100,Contracts=100 etc.") + flags.UintVar(&builder.stateStreamConf.MaxExecutionDataMsgSize, + "state-stream-max-message-size", + defaultConfig.stateStreamConf.MaxExecutionDataMsgSize, + "maximum size for a gRPC message containing block execution data") + flags.StringToIntVar(&builder.stateStreamFilterConf, + "state-stream-event-filter-limits", + defaultConfig.stateStreamFilterConf, + "event filter limits for ExecutionData SubscribeEvents API e.g. EventTypes=100,Addresses=100,Contracts=100 etc.") + flags.DurationVar(&builder.stateStreamConf.ClientSendTimeout, + "state-stream-send-timeout", + defaultConfig.stateStreamConf.ClientSendTimeout, + "maximum wait before timing out while sending a response to a streaming client e.g. 30s") + flags.UintVar(&builder.stateStreamConf.ClientSendBufferSize, + "state-stream-send-buffer-size", + defaultConfig.stateStreamConf.ClientSendBufferSize, + "maximum number of responses to buffer within a stream") + flags.Float64Var(&builder.stateStreamConf.ResponseLimit, + "state-stream-response-limit", + defaultConfig.stateStreamConf.ResponseLimit, + "max number of responses per second to send over streaming endpoints. this helps manage resources consumed by each client querying data not in the cache e.g. 3 or 0.5. 0 means no limit") + flags.Uint64Var(&builder.stateStreamConf.HeartbeatInterval, + "state-stream-heartbeat-interval", + defaultConfig.stateStreamConf.HeartbeatInterval, + "default interval in blocks at which heartbeat messages should be sent. applied when client did not specify a value.") + flags.Uint32Var(&builder.stateStreamConf.RegisterIDsRequestLimit, + "state-stream-max-register-values", + defaultConfig.stateStreamConf.RegisterIDsRequestLimit, + "maximum number of register ids to include in a single request to the GetRegisters endpoint") + + // Execution Data Indexer + flags.BoolVar(&builder.executionDataIndexingEnabled, + "execution-data-indexing-enabled", + defaultConfig.executionDataIndexingEnabled, + "whether to enable the execution data indexing") + flags.StringVar(&builder.registersDBPath, "execution-state-dir", defaultConfig.registersDBPath, "directory to use for execution-state database") + flags.StringVar(&builder.checkpointFile, "execution-state-checkpoint", defaultConfig.checkpointFile, "execution-state checkpoint file") + + flags.StringVar(&builder.rpcConf.BackendConfig.EventQueryMode, + "event-query-mode", + defaultConfig.rpcConf.BackendConfig.EventQueryMode, + "mode to use when querying events. one of [local-only, execution-nodes-only(default), failover]") + + flags.StringVar(&builder.rpcConf.BackendConfig.TxResultQueryMode, + "tx-result-query-mode", + defaultConfig.rpcConf.BackendConfig.TxResultQueryMode, + "mode to use when querying transaction results. one of [local-only, execution-nodes-only(default), failover]") + flags.BoolVar(&builder.storeTxResultErrorMessages, + "store-tx-result-error-messages", + defaultConfig.storeTxResultErrorMessages, + "whether to enable storing transaction error messages into the db") + // Script Execution + flags.StringVar(&builder.rpcConf.BackendConfig.ScriptExecutionMode, + "script-execution-mode", + defaultConfig.rpcConf.BackendConfig.ScriptExecutionMode, + "mode to use when executing scripts. one of (local-only, execution-nodes-only, failover, compare)") + flags.Uint64Var(&builder.scriptExecutorConfig.ComputationLimit, + "script-execution-computation-limit", + defaultConfig.scriptExecutorConfig.ComputationLimit, + "maximum number of computation units a locally executed script can use. default: 100000") + flags.IntVar(&builder.scriptExecutorConfig.MaxErrorMessageSize, + "script-execution-max-error-length", + defaultConfig.scriptExecutorConfig.MaxErrorMessageSize, + "maximum number characters to include in error message strings. additional characters are truncated. default: 1000") + flags.DurationVar(&builder.scriptExecutorConfig.LogTimeThreshold, + "script-execution-log-time-threshold", + defaultConfig.scriptExecutorConfig.LogTimeThreshold, + "emit a log for any scripts that take over this threshold. default: 1s") + flags.DurationVar(&builder.scriptExecutorConfig.ExecutionTimeLimit, + "script-execution-timeout", + defaultConfig.scriptExecutorConfig.ExecutionTimeLimit, + "timeout value for locally executed scripts. default: 10s") + flags.Uint64Var(&builder.scriptExecMinBlock, + "script-execution-min-height", + defaultConfig.scriptExecMinBlock, + "lowest block height to allow for script execution. default: no limit") + flags.Uint64Var(&builder.scriptExecMaxBlock, + "script-execution-max-height", + defaultConfig.scriptExecMaxBlock, + "highest block height to allow for script execution. default: no limit") + flags.StringVar(&builder.registerCacheType, + "register-cache-type", + defaultConfig.registerCacheType, + "type of backend cache to use for registers (lru, arc, 2q)") + flags.UintVar(&builder.registerCacheSize, + "register-cache-size", + defaultConfig.registerCacheSize, + "number of registers to cache for script execution. default: 0 (no cache)") + flags.UintVar(&builder.programCacheSize, + "program-cache-size", + defaultConfig.programCacheSize, + "[experimental] number of blocks to cache for cadence programs. use 0 to disable cache. default: 0. Note: this is an experimental feature and may cause nodes to become unstable under certain workloads. Use with caution.") + + // Payer Balance + flags.StringVar(&builder.checkPayerBalanceMode, + "check-payer-balance-mode", + defaultConfig.checkPayerBalanceMode, + "flag for payer balance validation that specifies whether or not to enforce the balance check. one of [disabled(default), warn, enforce]") + + // Register DB Pruning + flags.Uint64Var(&builder.registerDBPruneThreshold, + "registerdb-pruning-threshold", + defaultConfig.registerDBPruneThreshold, + fmt.Sprintf("specifies the number of blocks below the latest stored block height to keep in register db. default: %d", defaultConfig.registerDBPruneThreshold)) + + // websockets config + flags.DurationVar( + &builder.rpcConf.WebSocketConfig.InactivityTimeout, + "websocket-inactivity-timeout", + defaultConfig.rpcConf.WebSocketConfig.InactivityTimeout, + "the duration a WebSocket connection can remain open without any active subscriptions before being automatically closed", + ) + flags.Uint64Var( + &builder.rpcConf.WebSocketConfig.MaxSubscriptionsPerConnection, + "websocket-max-subscriptions-per-connection", + defaultConfig.rpcConf.WebSocketConfig.MaxSubscriptionsPerConnection, + "the maximum number of active WebSocket subscriptions allowed per connection", + ) + flags.Float64Var( + &builder.rpcConf.WebSocketConfig.MaxResponsesPerSecond, + "websocket-max-responses-per-second", + defaultConfig.rpcConf.WebSocketConfig.MaxResponsesPerSecond, + fmt.Sprintf("the maximum number of responses that can be sent to a single client per second. Default: %f. if set to 0, no limit is applied to the number of responses per second.", defaultConfig.rpcConf.WebSocketConfig.MaxResponsesPerSecond), + ) + flags.BoolVar( + &builder.rpcConf.EnableWebSocketsStreamAPI, + "websockets-stream-api-enabled", + defaultConfig.rpcConf.EnableWebSocketsStreamAPI, + "whether to enable the WebSockets Stream API.", + ) }).ValidateFlags(func() error { if builder.supportsObserver && (builder.PublicNetworkConfig.BindAddress == cmd.NotSet || builder.PublicNetworkConfig.BindAddress == "") { return errors.New("public-network-address must be set if supports-observer is true") @@ -690,52 +1540,74 @@ func (builder *FlowAccessNodeBuilder) extraFlags() { if builder.stateStreamConf.ClientSendBufferSize == 0 { return errors.New("state-stream-send-buffer-size must be greater than 0") } - if len(builder.stateStreamFilterConf) > 3 { - return errors.New("state-stream-event-filter-limits must have at most 3 keys (EventTypes, Addresses, Contracts)") + if len(builder.stateStreamFilterConf) > 4 { + return errors.New("state-stream-event-filter-limits must have at most 3 keys (EventTypes, Addresses, Contracts, AccountAddresses)") } for key, value := range builder.stateStreamFilterConf { switch key { - case "EventTypes", "Addresses", "Contracts": + case "EventTypes", "Addresses", "Contracts", "AccountAddresses": if value <= 0 { return fmt.Errorf("state-stream-event-filter-limits %s must be greater than 0", key) } default: - return errors.New("state-stream-event-filter-limits may only contain the keys EventTypes, Addresses, Contracts") + return errors.New("state-stream-event-filter-limits may only contain the keys EventTypes, Addresses, Contracts, AccountAddresses") } } + if builder.stateStreamConf.ResponseLimit < 0 { + return errors.New("state-stream-response-limit must be greater than or equal to 0") + } + if builder.stateStreamConf.RegisterIDsRequestLimit <= 0 { + return errors.New("state-stream-max-register-values must be greater than 0") + } + } + if builder.rpcConf.BackendConfig.CircuitBreakerConfig.Enabled { + if builder.rpcConf.BackendConfig.CircuitBreakerConfig.MaxFailures == 0 { + return errors.New("circuit-breaker-max-failures must be greater than 0") + } + if builder.rpcConf.BackendConfig.CircuitBreakerConfig.MaxRequests == 0 { + return errors.New("circuit-breaker-max-requests must be greater than 0") + } + if builder.rpcConf.BackendConfig.CircuitBreakerConfig.RestoreTimeout <= 0 { + return errors.New("circuit-breaker-restore-timeout must be greater than 0") + } } - return nil - }) -} + if builder.checkPayerBalanceMode != txvalidator.Disabled.String() && !builder.executionDataIndexingEnabled { + return errors.New("execution-data-indexing-enabled must be set if check-payer-balance is enabled") + } -// initNetwork creates the network.Network implementation with the given metrics, middleware, initial list of network -// participants and topology used to choose peers from the list of participants. The list of participants can later be -// updated by calling network.SetIDs. -func (builder *FlowAccessNodeBuilder) initNetwork(nodeID module.Local, - networkMetrics module.NetworkCoreMetrics, - middleware network.Middleware, - topology network.Topology, - receiveCache *netcache.ReceiveCache, -) (*p2p.Network, error) { - - // creates network instance - net, err := p2p.NewNetwork(&p2p.NetworkParameters{ - Logger: builder.Logger, - Codec: cborcodec.NewCodec(), - Me: nodeID, - MiddlewareFactory: func() (network.Middleware, error) { return builder.Middleware, nil }, - Topology: topology, - SubscriptionManager: subscription.NewChannelSubscriptionManager(middleware), - Metrics: networkMetrics, - IdentityProvider: builder.IdentityProvider, - ReceiveCache: receiveCache, - }) - if err != nil { - return nil, fmt.Errorf("could not initialize network: %w", err) - } + if builder.rpcConf.RestConfig.MaxRequestSize <= 0 { + return errors.New("rest-max-request-size must be greater than 0") + } + if builder.rpcConf.RestConfig.MaxResponseSize <= 0 { + return errors.New("rest-max-response-size must be greater than 0") + } + if builder.rpcConf.BackendConfig.AccessConfig.MaxRequestMsgSize <= 0 { + return errors.New("rpc-max-request-message-size must be greater than 0") + } + if builder.rpcConf.BackendConfig.AccessConfig.MaxResponseMsgSize <= 0 { + return errors.New("rpc-max-response-message-size must be greater than 0") + } + if builder.rpcConf.BackendConfig.CollectionConfig.MaxRequestMsgSize <= 0 { + return errors.New("rpc-max-collection-request-message-size must be greater than 0") + } + if builder.rpcConf.BackendConfig.CollectionConfig.MaxResponseMsgSize <= 0 { + return errors.New("rpc-max-collection-response-message-size must be greater than 0") + } + if builder.rpcConf.BackendConfig.ExecutionConfig.MaxRequestMsgSize <= 0 { + return errors.New("rpc-max-execution-request-message-size must be greater than 0") + } + if builder.rpcConf.BackendConfig.ExecutionConfig.MaxResponseMsgSize <= 0 { + return errors.New("rpc-max-execution-response-message-size must be greater than 0") + } + + // indexing tx error messages is only supported when tx results are also indexed + if builder.storeTxResultErrorMessages && !builder.executionDataIndexingEnabled { + return errors.New("execution-data-indexing-enabled must be set if store-tx-result-error-messages is enabled") + } - return net, nil + return nil + }) } func publicNetworkMsgValidators(log zerolog.Logger, idProvider module.IdentityProvider, selfID flow.Identifier) []network.MessageValidator { @@ -761,40 +1633,39 @@ func (builder *FlowAccessNodeBuilder) InitIDProviders() { } builder.IDTranslator = translator.NewHierarchicalIDTranslator(idCache, translator.NewPublicNetworkIDTranslator()) - builder.NodeDisallowListDistributor = cmd.BuildDisallowListNotificationDisseminator(builder.DisallowListNotificationCacheSize, builder.MetricsRegisterer, builder.Logger, builder.MetricsEnabled) - // The following wrapper allows to disallow-list byzantine nodes via an admin command: // the wrapper overrides the 'Ejected' flag of disallow-listed nodes to true - disallowListWrapper, err := cache.NewNodeBlocklistWrapper(idCache, node.DB, builder.NodeDisallowListDistributor) + disallowListWrapper, err := cache.NewNodeDisallowListWrapper( + idCache, + node.ProtocolDB, + func() network.DisallowListNotificationConsumer { + return builder.NetworkUnderlay + }, + ) if err != nil { - return fmt.Errorf("could not initialize NodeBlockListWrapper: %w", err) + return fmt.Errorf("could not initialize NodeDisallowListWrapper: %w", err) } builder.IdentityProvider = disallowListWrapper // register the wrapper for dynamic configuration via admin command err = node.ConfigManager.RegisterIdentifierListConfig("network-id-provider-blocklist", - disallowListWrapper.GetBlocklist, disallowListWrapper.Update) + disallowListWrapper.GetDisallowList, disallowListWrapper.Update) if err != nil { - return fmt.Errorf("failed to register blocklist with config manager: %w", err) + return fmt.Errorf("failed to register disallow-list wrapper with config manager: %w", err) } builder.SyncEngineParticipantsProviderFactory = func() module.IdentifierProvider { return id.NewIdentityFilterIdentifierProvider( filter.And( - filter.HasRole(flow.RoleConsensus), - filter.Not(filter.HasNodeID(node.Me.NodeID())), - p2p.NotEjectedFilter, + filter.HasRole[flow.Identity](flow.RoleConsensus), + filter.Not(filter.HasNodeID[flow.Identity](node.Me.NodeID())), + filter.NotEjectedFilter, ), builder.IdentityProvider, ) } return nil }) - - builder.Component("disallow list notification distributor", func(node *cmd.NodeConfig) (module.ReadyDoneAware, error) { - // distributor is returned as a component to be started and stopped. - return builder.NodeDisallowListDistributor, nil - }) } func (builder *FlowAccessNodeBuilder) Initialize() error { @@ -806,7 +1677,7 @@ func (builder *FlowAccessNodeBuilder) Initialize() error { builder.EnqueueNetworkInit() builder.AdminCommand("get-transactions", func(conf *cmd.NodeConfig) commands.AdminCommand { - return storageCommands.NewGetTransactionsCommand(conf.State, conf.Storage.Payloads, conf.Storage.Collections) + return storageCommands.NewGetTransactionsCommand(conf.State, conf.Storage.Payloads, notNil(builder.collections)) }) // if this is an access node that supports public followers, enqueue the public network @@ -825,7 +1696,7 @@ func (builder *FlowAccessNodeBuilder) Initialize() error { builder.EnqueueTracer() builder.PreInit(cmd.DynamicStartPreInit) - builder.ValidateRootSnapshot(badgerState.ValidRootSnapshotContainsEntityExpiryRange) + builder.ValidateRootSnapshot(statedatastore.ValidRootSnapshotContainsEntityExpiryRange) return nil } @@ -833,21 +1704,53 @@ func (builder *FlowAccessNodeBuilder) Initialize() error { func (builder *FlowAccessNodeBuilder) enqueueRelayNetwork() { builder.Component("relay network", func(node *cmd.NodeConfig) (module.ReadyDoneAware, error) { relayNet := relaynet.NewRelayNetwork( - node.Network, + node.EngineRegistry, builder.AccessNodeConfig.PublicNetworkConfig.Network, node.Logger, map[channels.Channel]channels.Channel{ channels.ReceiveBlocks: channels.PublicReceiveBlocks, }, ) - node.Network = relayNet + node.EngineRegistry = relayNet return relayNet, nil }) } func (builder *FlowAccessNodeBuilder) Build() (cmd.Node, error) { + var processedFinalizedBlockHeight storage.ConsumerProgressInitializer + var processedTxErrorMessagesBlockHeight storage.ConsumerProgressInitializer + + if builder.executionDataSyncEnabled { + builder.BuildExecutionSyncComponents() + } + + ingestionDependable := module.NewProxiedReadyDoneAware() + builder.IndexerDependencies.Add(ingestionDependable) + versionControlDependable := module.NewProxiedReadyDoneAware() + builder.IndexerDependencies.Add(versionControlDependable) + stopControlDependable := module.NewProxiedReadyDoneAware() + builder.IndexerDependencies.Add(stopControlDependable) + var lastFullBlockHeight *counters.PersistentStrictMonotonicCounter + builder. BuildConsensusFollower(). + Module("normalize rpc message limits", func(node *cmd.NodeConfig) error { + // This needs to be the first module run so other modules can use the normalized values + // TODO: remove this module once the deprecated flag is removed + if builder.rpcConf.DeprecatedMaxMsgSize != 0 { + node.Logger.Warn().Msg("A deprecated flag was specified (--rpc-max-message-size). Use --rpc-max-request-message-size and --rpc-max-response-message-size instead. This flag will be removed in a future release.") + builder.rpcConf.BackendConfig.AccessConfig.MaxRequestMsgSize = builder.rpcConf.DeprecatedMaxMsgSize + builder.rpcConf.BackendConfig.AccessConfig.MaxResponseMsgSize = builder.rpcConf.DeprecatedMaxMsgSize + + builder.rpcConf.BackendConfig.CollectionConfig.MaxRequestMsgSize = commonrpc.DefaultMaxMsgSize // previous version used this default + builder.rpcConf.BackendConfig.CollectionConfig.MaxResponseMsgSize = builder.rpcConf.DeprecatedMaxMsgSize + + builder.rpcConf.BackendConfig.ExecutionConfig.MaxRequestMsgSize = commonrpc.DefaultMaxMsgSize // previous version used this default + builder.rpcConf.BackendConfig.ExecutionConfig.MaxResponseMsgSize = builder.rpcConf.DeprecatedMaxMsgSize + } + + return nil + }). Module("collection node client", func(node *cmd.NodeConfig) error { // collection node address is optional (if not specified, collection nodes will be chosen at random) if strings.TrimSpace(builder.rpcConf.CollectionAddr) == "" { @@ -861,9 +1764,12 @@ func (builder *FlowAccessNodeBuilder) Build() (cmd.Node, error) { collectionRPCConn, err := grpc.Dial( builder.rpcConf.CollectionAddr, - grpc.WithDefaultCallOptions(grpc.MaxCallRecvMsgSize(int(builder.rpcConf.MaxMsgSize))), + grpc.WithDefaultCallOptions( + grpc.MaxCallSendMsgSize(int(builder.rpcConf.BackendConfig.CollectionConfig.MaxRequestMsgSize)), + grpc.MaxCallRecvMsgSize(int(builder.rpcConf.BackendConfig.CollectionConfig.MaxResponseMsgSize)), + ), grpc.WithTransportCredentials(insecure.NewCredentials()), - backend.WithClientUnaryInterceptor(builder.rpcConf.CollectionClientTimeout)) + rpcConnection.WithClientTimeoutOption(builder.rpcConf.BackendConfig.CollectionConfig.Timeout)) if err != nil { return err } @@ -878,9 +1784,24 @@ func (builder *FlowAccessNodeBuilder) Build() (cmd.Node, error) { } node.Logger.Info().Str("access_nodes", addr).Msg("historical access node addresses") + // maintain backwards compatibility with the deprecated flag + // TODO: remove this once the deprecated flag is removed + var callOpts []grpc.CallOption + if builder.rpcConf.DeprecatedMaxMsgSize == 0 { + callOpts = append(callOpts, + grpc.MaxCallSendMsgSize(int(builder.rpcConf.BackendConfig.AccessConfig.MaxRequestMsgSize)), + grpc.MaxCallRecvMsgSize(int(builder.rpcConf.BackendConfig.AccessConfig.MaxResponseMsgSize)), + ) + } else { + // only receive limit was enforced in previous versions. send used default (4mb) + callOpts = append(callOpts, + grpc.MaxCallRecvMsgSize(int(builder.rpcConf.DeprecatedMaxMsgSize)), + ) + } + historicalAccessRPCConn, err := grpc.Dial( addr, - grpc.WithDefaultCallOptions(grpc.MaxCallRecvMsgSize(int(builder.rpcConf.MaxMsgSize))), + grpc.WithDefaultCallOptions(callOpts...), grpc.WithTransportCredentials(insecure.NewCredentials())) if err != nil { return err @@ -890,32 +1811,62 @@ func (builder *FlowAccessNodeBuilder) Build() (cmd.Node, error) { return nil }). Module("transaction timing mempools", func(node *cmd.NodeConfig) error { - var err error - builder.TransactionTimings, err = stdmap.NewTransactionTimings(1500 * 300) // assume 1500 TPS * 300 seconds - if err != nil { - return err - } + builder.TransactionTimings = stdmap.NewTransactionTimings(1500 * 300) // assume 1500 TPS * 300 seconds + builder.CollectionsToMarkFinalized = stdmap.NewTimes(50 * 300) // assume 50 collection nodes * 300 seconds + builder.CollectionsToMarkExecuted = stdmap.NewTimes(50 * 300) // assume 50 collection nodes * 300 seconds + builder.BlockTransactions = stdmap.NewIdentifierMap(10000) + builder.BlocksToMarkExecuted = stdmap.NewTimes(1 * 300) // assume 1 block per second * 300 seconds - builder.CollectionsToMarkFinalized, err = stdmap.NewTimes(50 * 300) // assume 50 collection nodes * 300 seconds + return nil + }). + Module("transaction metrics", func(node *cmd.NodeConfig) error { + builder.TransactionMetrics = metrics.NewTransactionCollector( + node.Logger, + builder.TransactionTimings, + builder.logTxTimeToFinalized, + builder.logTxTimeToExecuted, + builder.logTxTimeToFinalizedExecuted, + builder.logTxTimeToSealed, + ) + return nil + }). + Module("transaction validation metrics", func(node *cmd.NodeConfig) error { + builder.TransactionValidationMetrics = metrics.NewTransactionValidationCollector() + return nil + }). + Module("rest metrics", func(node *cmd.NodeConfig) error { + m, err := metrics.NewRestCollector(router.URLToRoute, node.MetricsRegisterer) if err != nil { return err } - - builder.CollectionsToMarkExecuted, err = stdmap.NewTimes(50 * 300) // assume 50 collection nodes * 300 seconds + builder.RestMetrics = m + return nil + }). + Module("access metrics", func(node *cmd.NodeConfig) error { + builder.AccessMetrics = metrics.NewAccessCollector( + metrics.WithTransactionMetrics(builder.TransactionMetrics), + metrics.WithTransactionValidationMetrics(builder.TransactionValidationMetrics), + metrics.WithBackendScriptsMetrics(builder.TransactionMetrics), + metrics.WithRestMetrics(builder.RestMetrics), + ) + return nil + }). + Module("collection metrics", func(node *cmd.NodeConfig) error { + var err error + builder.collectionExecutedMetric, err = indexer.NewCollectionExecutedMetricImpl( + builder.Logger, + builder.AccessMetrics, + builder.CollectionsToMarkFinalized, + builder.CollectionsToMarkExecuted, + builder.BlocksToMarkExecuted, + builder.collections, + builder.Storage.Blocks, + builder.BlockTransactions, + ) if err != nil { return err } - builder.BlocksToMarkExecuted, err = stdmap.NewTimes(1 * 300) // assume 1 block per second * 300 seconds - return err - }). - Module("transaction metrics", func(node *cmd.NodeConfig) error { - builder.TransactionMetrics = metrics.NewTransactionCollector(builder.TransactionTimings, node.Logger, builder.logTxTimeToFinalized, - builder.logTxTimeToExecuted, builder.logTxTimeToFinalizedExecuted) - return nil - }). - Module("access metrics", func(node *cmd.NodeConfig) error { - builder.AccessMetrics = metrics.NewAccessCollector() return nil }). Module("ping metrics", func(node *cmd.NodeConfig) error { @@ -932,29 +1883,299 @@ func (builder *FlowAccessNodeBuilder) Build() (cmd.Node, error) { builder.rpcConf.TransportCredentials = credentials.NewTLS(tlsConfig) return nil }). - Component("RPC engine", func(node *cmd.NodeConfig) (module.ReadyDoneAware, error) { - engineBuilder, err := rpc.NewBuilder( + Module("creating grpc servers", func(node *cmd.NodeConfig) error { + builder.secureGrpcServer = grpcserver.NewGrpcServerBuilder( node.Logger, + builder.rpcConf.SecureGRPCListenAddr, + builder.rpcConf.BackendConfig.AccessConfig.MaxRequestMsgSize, + builder.rpcConf.BackendConfig.AccessConfig.MaxResponseMsgSize, + builder.rpcMetricsEnabled, + builder.apiRatelimits, + builder.apiBurstlimits, + grpcserver.WithTransportCredentials(builder.rpcConf.TransportCredentials)).Build() + + builder.stateStreamGrpcServer = grpcserver.NewGrpcServerBuilder( + node.Logger, + builder.stateStreamConf.ListenAddr, + builder.rpcConf.BackendConfig.AccessConfig.MaxRequestMsgSize, + builder.stateStreamConf.MaxExecutionDataMsgSize, + builder.rpcMetricsEnabled, + builder.apiRatelimits, + builder.apiBurstlimits, + grpcserver.WithStreamInterceptor()).Build() + + if builder.rpcConf.UnsecureGRPCListenAddr != builder.stateStreamConf.ListenAddr { + builder.unsecureGrpcServer = grpcserver.NewGrpcServerBuilder(node.Logger, + builder.rpcConf.UnsecureGRPCListenAddr, + builder.rpcConf.BackendConfig.AccessConfig.MaxRequestMsgSize, + builder.rpcConf.BackendConfig.AccessConfig.MaxResponseMsgSize, + builder.rpcMetricsEnabled, + builder.apiRatelimits, + builder.apiBurstlimits).Build() + } else { + builder.unsecureGrpcServer = builder.stateStreamGrpcServer + } + + return nil + }). + Module("backend script executor", func(node *cmd.NodeConfig) error { + builder.ScriptExecutor = backend.NewScriptExecutor(builder.Logger, builder.scriptExecMinBlock, builder.scriptExecMaxBlock) + return nil + }). + Module("async register store", func(node *cmd.NodeConfig) error { + builder.RegistersAsyncStore = execution.NewRegistersAsyncStore() + return nil + }). + Module("events storage", func(node *cmd.NodeConfig) error { + builder.events = store.NewEvents(node.Metrics.Cache, node.ProtocolDB) + return nil + }). + Module("reporter", func(node *cmd.NodeConfig) error { + builder.Reporter = index.NewReporter() + return nil + }). + Module("events index", func(node *cmd.NodeConfig) error { + builder.EventsIndex = index.NewEventsIndex(builder.Reporter, builder.events) + return nil + }). + Module("transaction result index", func(node *cmd.NodeConfig) error { + builder.TxResultsIndex = index.NewTransactionResultsIndex(builder.Reporter, builder.lightTransactionResults) + return nil + }). + Module("processed finalized block height consumer progress", func(node *cmd.NodeConfig) error { + processedFinalizedBlockHeight = store.NewConsumerProgress(builder.ProtocolDB, module.ConsumeProgressIngestionEngineBlockHeight) + return nil + }). + Module("processed last full block height monotonic consumer progress", func(node *cmd.NodeConfig) error { + rootBlockHeight := node.State.Params().FinalizedRoot().Height + + progress, err := store.NewConsumerProgress(builder.ProtocolDB, module.ConsumeProgressLastFullBlockHeight).Initialize(rootBlockHeight) + if err != nil { + return err + } + + lastFullBlockHeight, err = counters.NewPersistentStrictMonotonicCounter(progress) + if err != nil { + return fmt.Errorf("failed to initialize monotonic consumer progress: %w", err) + } + + return nil + }). + Component("version control", func(node *cmd.NodeConfig) (module.ReadyDoneAware, error) { + if !builder.versionControlEnabled { + noop := &module.NoopReadyDoneAware{} + versionControlDependable.Init(noop) + return noop, nil + } + + nodeVersion, err := build.Semver() + if err != nil { + return nil, fmt.Errorf("could not load node version for version control. "+ + "version (%s) is not semver compliant: %w. Make sure a valid semantic version is provided in the VERSION environment variable", build.Version(), err) + } + + versionControl, err := version.NewVersionControl( + builder.Logger, + node.Storage.VersionBeacons, + nodeVersion, + builder.SealedRootBlock.Height, + builder.LastFinalizedHeader.Height, + ) + if err != nil { + return nil, fmt.Errorf("could not create version control: %w", err) + } + + // VersionControl needs to consume BlockFinalized events. + node.ProtocolEvents.AddConsumer(versionControl) + + builder.VersionControl = versionControl + versionControlDependable.Init(builder.VersionControl) + + return versionControl, nil + }). + Component("stop control", func(node *cmd.NodeConfig) (module.ReadyDoneAware, error) { + if !builder.stopControlEnabled { + noop := &module.NoopReadyDoneAware{} + stopControlDependable.Init(noop) + return noop, nil + } + + stopControl := stop.NewStopControl( + builder.Logger, + ) + + builder.VersionControl.AddVersionUpdatesConsumer(stopControl.OnVersionUpdate) + + builder.StopControl = stopControl + stopControlDependable.Init(builder.StopControl) + + return stopControl, nil + }). + Component("RPC engine", func(node *cmd.NodeConfig) (module.ReadyDoneAware, error) { + config := builder.rpcConf + backendConfig := config.BackendConfig + accessMetrics := builder.AccessMetrics + cacheSize := int(backendConfig.ConnectionPoolSize) + + var connBackendCache *rpcConnection.Cache + var err error + if cacheSize > 0 { + connBackendCache, err = rpcConnection.NewCache(node.Logger, accessMetrics, cacheSize) + if err != nil { + return nil, fmt.Errorf("could not initialize connection cache: %w", err) + } + } + + connFactory := &rpcConnection.ConnectionFactoryImpl{ + AccessConfig: backendConfig.AccessConfig, + CollectionConfig: backendConfig.CollectionConfig, + ExecutionConfig: backendConfig.ExecutionConfig, + AccessMetrics: accessMetrics, + Log: node.Logger, + Manager: rpcConnection.NewManager( + node.Logger, + accessMetrics, + connBackendCache, + backendConfig.CircuitBreakerConfig, + config.CompressorName, + ), + } + + scriptExecMode, err := query_mode.ParseIndexQueryMode(config.BackendConfig.ScriptExecutionMode) + if err != nil { + return nil, fmt.Errorf("could not parse script execution mode: %w", err) + } + + eventQueryMode, err := query_mode.ParseIndexQueryMode(config.BackendConfig.EventQueryMode) + if err != nil { + return nil, fmt.Errorf("could not parse event query mode: %w", err) + } + if eventQueryMode == query_mode.IndexQueryModeCompare { + return nil, fmt.Errorf("event query mode 'compare' is not supported") + } + + broadcaster := engine.NewBroadcaster() + // create BlockTracker that will track for new blocks (finalized and sealed) and + // handles block-related operations. + blockTracker, err := subscriptiontracker.NewBlockTracker( node.State, - builder.rpcConf, - builder.CollectionRPC, - builder.HistoricalAccessRPCs, - node.Storage.Blocks, + builder.SealedRootBlock.Height, node.Storage.Headers, - node.Storage.Collections, - node.Storage.Transactions, + broadcaster, + ) + if err != nil { + return nil, fmt.Errorf("failed to initialize block tracker: %w", err) + } + txResultQueryMode, err := query_mode.ParseIndexQueryMode(config.BackendConfig.TxResultQueryMode) + if err != nil { + return nil, fmt.Errorf("could not parse transaction result query mode: %w", err) + } + if txResultQueryMode == query_mode.IndexQueryModeCompare { + return nil, fmt.Errorf("transaction result query mode 'compare' is not supported") + } + + // If execution data syncing and indexing is disabled, pass nil indexReporter + var indexReporter state_synchronization.IndexReporter + if builder.executionDataSyncEnabled && builder.executionDataIndexingEnabled { + indexReporter = builder.Reporter + } + + checkPayerBalanceMode, err := txvalidator.ParsePayerBalanceMode(builder.checkPayerBalanceMode) + if err != nil { + return nil, fmt.Errorf("could not parse payer balance mode: %w", err) + + } + + preferredENIdentifiers, err := flow.IdentifierListFromHex(backendConfig.PreferredExecutionNodeIDs) + if err != nil { + return nil, fmt.Errorf("failed to convert node id string to Flow Identifier for preferred EN map: %w", err) + } + + fixedENIdentifiers, err := flow.IdentifierListFromHex(backendConfig.FixedExecutionNodeIDs) + if err != nil { + return nil, fmt.Errorf("failed to convert node id string to Flow Identifier for fixed EN map: %w", err) + } + + builder.ExecNodeIdentitiesProvider = commonrpc.NewExecutionNodeIdentitiesProvider( + node.Logger, + node.State, node.Storage.Receipts, - node.Storage.Results, + preferredENIdentifiers, + fixedENIdentifiers, + ) + + nodeCommunicator := node_communicator.NewNodeCommunicator(backendConfig.CircuitBreakerConfig.Enabled) + builder.txResultErrorMessageProvider = error_messages.NewTxErrorMessageProvider( + node.Logger, + builder.transactionResultErrorMessages, // might be nil + notNil(builder.TxResultsIndex), + connFactory, + nodeCommunicator, + notNil(builder.ExecNodeIdentitiesProvider), + ) + + builder.nodeBackend, err = backend.New(backend.Params{ + State: node.State, + CollectionRPC: builder.CollectionRPC, // might be nil + HistoricalAccessNodes: notNil(builder.HistoricalAccessRPCs), + Blocks: node.Storage.Blocks, + Headers: node.Storage.Headers, + Collections: notNil(builder.collections), + Transactions: notNil(builder.transactions), + ExecutionReceipts: node.Storage.Receipts, + ExecutionResults: node.Storage.Results, + TxResultErrorMessages: builder.transactionResultErrorMessages, // might be nil + ChainID: node.RootChainID, + AccessMetrics: notNil(builder.AccessMetrics), + ConnFactory: connFactory, + RetryEnabled: builder.retryEnabled, + MaxHeightRange: backendConfig.MaxHeightRange, + Log: node.Logger, + SnapshotHistoryLimit: backend.DefaultSnapshotHistoryLimit, + Communicator: nodeCommunicator, + TxResultCacheSize: builder.TxResultCacheSize, + ScriptExecutor: notNil(builder.ScriptExecutor), + ScriptExecutionMode: scriptExecMode, + CheckPayerBalanceMode: checkPayerBalanceMode, + EventQueryMode: eventQueryMode, + BlockTracker: blockTracker, + SubscriptionHandler: subscription.NewSubscriptionHandler( + builder.Logger, + broadcaster, + builder.stateStreamConf.ClientSendTimeout, + builder.stateStreamConf.ResponseLimit, + builder.stateStreamConf.ClientSendBufferSize, + ), + EventsIndex: notNil(builder.EventsIndex), + TxResultQueryMode: txResultQueryMode, + TxResultsIndex: notNil(builder.TxResultsIndex), + LastFullBlockHeight: lastFullBlockHeight, + IndexReporter: indexReporter, + VersionControl: notNil(builder.VersionControl), + ExecNodeIdentitiesProvider: notNil(builder.ExecNodeIdentitiesProvider), + TxErrorMessageProvider: notNil(builder.txResultErrorMessageProvider), + MaxScriptAndArgumentSize: config.BackendConfig.AccessConfig.MaxRequestMsgSize, + ScheduledCallbacksEnabled: builder.scheduledCallbacksEnabled, + }) + if err != nil { + return nil, fmt.Errorf("could not initialize backend: %w", err) + } + + engineBuilder, err := rpc.NewBuilder( + node.Logger, + node.State, + config, node.RootChainID, - builder.TransactionMetrics, - builder.AccessMetrics, - builder.collectionGRPCPort, - builder.executionGRPCPort, - builder.retryEnabled, + notNil(builder.AccessMetrics), builder.rpcMetricsEnabled, - builder.apiRatelimits, - builder.apiBurstlimits, - builder.Me, + notNil(builder.Me), + notNil(builder.nodeBackend), + notNil(builder.nodeBackend), + notNil(builder.secureGrpcServer), + notNil(builder.unsecureGrpcServer), + notNil(builder.stateStreamBackend), + builder.stateStreamConf, + indexReporter, ) if err != nil { return nil, err @@ -975,40 +2196,58 @@ func (builder *FlowAccessNodeBuilder) Build() (cmd.Node, error) { var err error builder.RequestEng, err = requester.New( - node.Logger, + node.Logger.With().Str("entity", "collection").Logger(), node.Metrics.Engine, - node.Network, + node.EngineRegistry, node.Me, node.State, channels.RequestCollections, - filter.HasRole(flow.RoleCollection), - func() flow.Entity { return &flow.Collection{} }, + filter.HasRole[flow.Identity](flow.RoleCollection), + func() flow.Entity { return new(flow.Collection) }, ) if err != nil { return nil, fmt.Errorf("could not create requester engine: %w", err) } + if builder.storeTxResultErrorMessages { + builder.TxResultErrorMessagesCore = tx_error_messages.NewTxErrorMessagesCore( + node.Logger, + notNil(builder.txResultErrorMessageProvider), + builder.transactionResultErrorMessages, + notNil(builder.ExecNodeIdentitiesProvider), + ) + } + + collectionSyncer := ingestion.NewCollectionSyncer( + node.Logger, + notNil(builder.collectionExecutedMetric), + builder.RequestEng, + node.State, + node.Storage.Blocks, + notNil(builder.collections), + notNil(builder.transactions), + lastFullBlockHeight, + node.StorageLockMgr, + ) + builder.RequestEng.WithHandle(collectionSyncer.OnCollectionDownloaded) + builder.IngestEng, err = ingestion.New( node.Logger, - node.Network, + node.EngineRegistry, node.State, node.Me, - builder.RequestEng, node.Storage.Blocks, - node.Storage.Headers, - node.Storage.Collections, - node.Storage.Transactions, node.Storage.Results, node.Storage.Receipts, - builder.TransactionMetrics, - builder.CollectionsToMarkFinalized, - builder.CollectionsToMarkExecuted, - builder.BlocksToMarkExecuted, + processedFinalizedBlockHeight, + notNil(collectionSyncer), + notNil(builder.collectionExecutedMetric), + notNil(builder.TxResultErrorMessagesCore), ) if err != nil { return nil, err } - builder.RequestEng.WithHandle(builder.IngestEng.OnCollection) + ingestionDependable.Init(builder.IngestEng) builder.FollowerDistributor.AddOnBlockFinalizedConsumer(builder.IngestEng.OnFinalizedBlock) return builder.IngestEng, nil @@ -1018,8 +2257,50 @@ func (builder *FlowAccessNodeBuilder) Build() (cmd.Node, error) { // order for it to properly start and shut down, we should still return it as its own engine here, so it can // be handled by the scaffold. return builder.RequestEng, nil + }). + AdminCommand("backfill-tx-error-messages", func(config *cmd.NodeConfig) commands.AdminCommand { + return storageCommands.NewBackfillTxErrorMessagesCommand( + builder.Logger, + builder.State, + builder.TxResultErrorMessagesCore, + ) }) + if builder.storeTxResultErrorMessages { + builder. + Module("transaction result error messages storage", func(node *cmd.NodeConfig) error { + builder.transactionResultErrorMessages = store.NewTransactionResultErrorMessages( + node.Metrics.Cache, + node.ProtocolDB, + bstorage.DefaultCacheSize, + ) + return nil + }). + Module("processed error messages block height consumer progress", func(node *cmd.NodeConfig) error { + processedTxErrorMessagesBlockHeight = store.NewConsumerProgress( + builder.ProtocolDB, + module.ConsumeProgressEngineTxErrorMessagesBlockHeight, + ) + return nil + }). + Component("transaction result error messages engine", func(node *cmd.NodeConfig) (module.ReadyDoneAware, error) { + engine, err := tx_error_messages.New( + node.Logger, + metrics.NewTransactionErrorMessagesCollector(), + node.State, + node.Storage.Headers, + processedTxErrorMessagesBlockHeight, + builder.TxResultErrorMessagesCore, + ) + if err != nil { + return nil, err + } + builder.FollowerDistributor.AddOnBlockFinalizedConsumer(engine.OnFinalizedBlock) + + return engine, nil + }) + } + if builder.supportsObserver { builder.Component("public sync request handler", func(node *cmd.NodeConfig) (module.ReadyDoneAware, error) { syncRequestHandler, err := synceng.NewRequestHandlerEngine( @@ -1031,187 +2312,195 @@ func (builder *FlowAccessNodeBuilder) Build() (cmd.Node, error) { node.Storage.Blocks, builder.SyncCore, ) - if err != nil { return nil, fmt.Errorf("could not create public sync request handler: %w", err) } + builder.FollowerDistributor.AddFinalizationConsumer(syncRequestHandler) return syncRequestHandler, nil }) } - if builder.executionDataSyncEnabled { - builder.BuildExecutionDataRequester() - } + builder.Component("secure grpc server", func(node *cmd.NodeConfig) (module.ReadyDoneAware, error) { + return builder.secureGrpcServer, nil + }) - builder.Component("ping engine", func(node *cmd.NodeConfig) (module.ReadyDoneAware, error) { - ping, err := pingeng.New( - node.Logger, - node.IdentityProvider, - node.IDTranslator, - node.Me, - builder.PingMetrics, - builder.pingEnabled, - builder.nodeInfoFile, - node.PingService, - ) + builder.Component("state stream unsecure grpc server", func(node *cmd.NodeConfig) (module.ReadyDoneAware, error) { + return builder.stateStreamGrpcServer, nil + }) - if err != nil { - return nil, fmt.Errorf("could not create ping engine: %w", err) - } + if builder.rpcConf.UnsecureGRPCListenAddr != builder.stateStreamConf.ListenAddr { + builder.Component("unsecure grpc server", func(node *cmd.NodeConfig) (module.ReadyDoneAware, error) { + return builder.unsecureGrpcServer, nil + }) + } - return ping, nil - }) + if builder.pingEnabled { + builder.Component("ping engine", func(node *cmd.NodeConfig) (module.ReadyDoneAware, error) { + ping, err := pingeng.New( + node.Logger, + node.IdentityProvider, + node.IDTranslator, + node.Me, + builder.PingMetrics, + builder.nodeInfoFile, + node.PingService, + ) + if err != nil { + return nil, fmt.Errorf("could not create ping engine: %w", err) + } + + return ping, nil + }) + } return builder.FlowNodeBuilder.Build() } // enqueuePublicNetworkInit enqueues the public network component initialized for the staked node func (builder *FlowAccessNodeBuilder) enqueuePublicNetworkInit() { - var libp2pNode p2p.LibP2PNode + var publicLibp2pNode p2p.LibP2PNode builder. Module("public network metrics", func(node *cmd.NodeConfig) error { builder.PublicNetworkConfig.Metrics = metrics.NewNetworkCollector(builder.Logger, metrics.WithNetworkPrefix("public")) return nil }). Component("public libp2p node", func(node *cmd.NodeConfig) (module.ReadyDoneAware, error) { - - libP2PFactory := builder.initPublicLibP2PFactory(builder.NodeConfig.NetworkKey, builder.PublicNetworkConfig.BindAddress, builder.PublicNetworkConfig.Metrics) - var err error - libp2pNode, err = libP2PFactory() + publicLibp2pNode, err = builder.initPublicLibp2pNode( + builder.NodeConfig.NetworkKey, + builder.PublicNetworkConfig.BindAddress, + builder.PublicNetworkConfig.Metrics) if err != nil { return nil, fmt.Errorf("could not create public libp2p node: %w", err) } - return libp2pNode, nil + return publicLibp2pNode, nil }). Component("public network", func(node *cmd.NodeConfig) (module.ReadyDoneAware, error) { msgValidators := publicNetworkMsgValidators(node.Logger.With().Bool("public", true).Logger(), node.IdentityProvider, builder.NodeID) - - middleware := builder.initMiddleware(builder.NodeID, builder.PublicNetworkConfig.Metrics, libp2pNode, msgValidators...) - - // topology returns empty list since peers are not known upfront - top := topology.EmptyTopology{} - receiveCache := netcache.NewHeroReceiveCache(builder.NetworkReceivedMessageCacheSize, + receiveCache := netcache.NewHeroReceiveCache(builder.FlowConfig.NetworkConfig.NetworkReceivedMessageCacheSize, builder.Logger, - metrics.NetworkReceiveCacheMetricsFactory(builder.HeroCacheMetricsFactory(), p2p.PublicNetwork)) + metrics.NetworkReceiveCacheMetricsFactory(builder.HeroCacheMetricsFactory(), network.PublicNetwork)) err := node.Metrics.Mempool.Register(metrics.PrependPublicPrefix(metrics.ResourceNetworkingReceiveCache), receiveCache.Size) if err != nil { return nil, fmt.Errorf("could not register networking receive cache metric: %w", err) } - net, err := builder.initNetwork(builder.Me, builder.PublicNetworkConfig.Metrics, middleware, top, receiveCache) + net, err := underlay.NewNetwork(&underlay.NetworkConfig{ + Logger: builder.Logger.With().Str("module", "public-network").Logger(), + Libp2pNode: publicLibp2pNode, + Codec: cborcodec.NewCodec(), + Me: builder.Me, + Topology: topology.EmptyTopology{}, // topology returns empty list since peers are not known upfront + Metrics: builder.PublicNetworkConfig.Metrics, + BitSwapMetrics: builder.Metrics.Bitswap, + IdentityProvider: builder.IdentityProvider, + ReceiveCache: receiveCache, + ConduitFactory: conduit.NewDefaultConduitFactory(), + SporkId: builder.SporkID, + UnicastMessageTimeout: underlay.DefaultUnicastTimeout, + IdentityTranslator: builder.IDTranslator, + AlspCfg: &alspmgr.MisbehaviorReportManagerConfig{ + Logger: builder.Logger, + SpamRecordCacheSize: builder.FlowConfig.NetworkConfig.AlspConfig.SpamRecordCacheSize, + SpamReportQueueSize: builder.FlowConfig.NetworkConfig.AlspConfig.SpamReportQueueSize, + DisablePenalty: builder.FlowConfig.NetworkConfig.AlspConfig.DisablePenalty, + HeartBeatInterval: builder.FlowConfig.NetworkConfig.AlspConfig.HearBeatInterval, + AlspMetrics: builder.Metrics.Network, + NetworkType: network.PublicNetwork, + HeroCacheMetricsFactory: builder.HeroCacheMetricsFactory(), + }, + SlashingViolationConsumerFactory: func(adapter network.ConduitAdapter) network.ViolationsConsumer { + return slashing.NewSlashingViolationsConsumer(builder.Logger, builder.Metrics.Network, adapter) + }, + }, underlay.WithMessageValidators(msgValidators...)) if err != nil { - return nil, err + return nil, fmt.Errorf("could not initialize network: %w", err) } + builder.NetworkUnderlay = net builder.AccessNodeConfig.PublicNetworkConfig.Network = net node.Logger.Info().Msgf("network will run on address: %s", builder.PublicNetworkConfig.BindAddress) return net, nil }). Component("public peer manager", func(node *cmd.NodeConfig) (module.ReadyDoneAware, error) { - return libp2pNode.PeerManagerComponent(), nil + return publicLibp2pNode.PeerManagerComponent(), nil }) } -// initPublicLibP2PFactory creates the LibP2P factory function for the given node ID and network key. -// The factory function is later passed into the initMiddleware function to eventually instantiate the p2p.LibP2PNode instance +// initPublicLibp2pNode initializes the public libp2p node for the public (unstaked) network. // The LibP2P host is created with the following options: // - DHT as server // - The address from the node config or the specified bind address as the listen address // - The passed in private key as the libp2p key // - No connection gater // - Default Flow libp2p pubsub options -func (builder *FlowAccessNodeBuilder) initPublicLibP2PFactory(networkKey crypto.PrivateKey, bindAddress string, networkMetrics module.LibP2PMetrics) p2p.LibP2PFactoryFunc { - return func() (p2p.LibP2PNode, error) { - connManager, err := connection.NewConnManager(builder.Logger, networkMetrics, builder.ConnectionManagerConfig) - if err != nil { - return nil, fmt.Errorf("could not create connection manager: %w", err) - } - - meshTracer := tracer.NewGossipSubMeshTracer( - builder.Logger, - networkMetrics, - builder.IdentityProvider, - builder.GossipSubConfig.LocalMeshLogInterval) - - // setup RPC inspectors - rpcInspectorBuilder := inspector.NewGossipSubInspectorBuilder(builder.Logger, builder.SporkID, builder.GossipSubConfig.RpcInspector) - rpcInspectorSuite, err := rpcInspectorBuilder. - SetPublicNetwork(p2p.PublicNetwork). - SetMetrics(&p2pconfig.MetricsConfig{ - HeroCacheFactory: builder.HeroCacheMetricsFactory(), - Metrics: builder.Metrics.Network, - }).Build() - if err != nil { - return nil, fmt.Errorf("failed to create gossipsub rpc inspectors for access node: %w", err) - } - - libp2pNode, err := p2pbuilder.NewNodeBuilder( - builder.Logger, - networkMetrics, - bindAddress, - networkKey, - builder.SporkID, - builder.LibP2PResourceManagerConfig). - SetBasicResolver(builder.Resolver). - SetSubscriptionFilter( - subscription.NewRoleBasedFilter( - flow.RoleAccess, builder.IdentityProvider, - ), - ). - SetConnectionManager(connManager). - SetRoutingSystem(func(ctx context.Context, h host.Host) (routing.Routing, error) { - return dht.NewDHT( - ctx, - h, - protocols.FlowPublicDHTProtocolID(builder.SporkID), - builder.Logger, - networkMetrics, - dht.AsServer(), - ) - }). - // disable connection pruning for the access node which supports the observer - SetPeerManagerOptions(connection.PruningDisabled, builder.PeerUpdateInterval). - SetStreamCreationRetryInterval(builder.UnicastCreateStreamRetryDelay). - SetGossipSubTracer(meshTracer). - SetGossipSubScoreTracerInterval(builder.GossipSubConfig.ScoreTracerInterval). - SetGossipSubRpcInspectorSuite(rpcInspectorSuite). - Build() - - if err != nil { - return nil, fmt.Errorf("could not build libp2p node for staked access node: %w", err) - } - - return libp2pNode, nil +// +// Args: +// - networkKey: The private key to use for the libp2p node +// +// - bindAddress: The address to bind the libp2p node to. +// - networkMetrics: The metrics collector for the network +// Returns: +// - The libp2p node instance for the public network. +// - Any error encountered during initialization. Any error should be considered fatal. +func (builder *FlowAccessNodeBuilder) initPublicLibp2pNode(networkKey crypto.PrivateKey, bindAddress string, networkMetrics module.LibP2PMetrics) (p2p.LibP2PNode, + error, +) { + connManager, err := connection.NewConnManager(builder.Logger, networkMetrics, &builder.FlowConfig.NetworkConfig.ConnectionManager) + if err != nil { + return nil, fmt.Errorf("could not create connection manager: %w", err) } + libp2pNode, err := p2pbuilder.NewNodeBuilder(builder.Logger, &builder.FlowConfig.NetworkConfig.GossipSub, &p2pbuilderconfig.MetricsConfig{ + HeroCacheFactory: builder.HeroCacheMetricsFactory(), + Metrics: networkMetrics, + }, + network.PublicNetwork, + bindAddress, + networkKey, + builder.SporkID, + builder.IdentityProvider, + &builder.FlowConfig.NetworkConfig.ResourceManager, + &p2pbuilderconfig.PeerManagerConfig{ + // TODO: eventually, we need pruning enabled even on public network. However, it needs a modified version of + // the peer manager that also operate on the public identities. + ConnectionPruning: connection.PruningDisabled, + UpdateInterval: builder.FlowConfig.NetworkConfig.PeerUpdateInterval, + ConnectorFactory: connection.DefaultLibp2pBackoffConnectorFactory(), + }, + &p2p.DisallowListCacheConfig{ + MaxSize: builder.FlowConfig.NetworkConfig.DisallowListNotificationCacheSize, + Metrics: metrics.DisallowListCacheMetricsFactory(builder.HeroCacheMetricsFactory(), network.PublicNetwork), + }, + &p2pbuilderconfig.UnicastConfig{ + Unicast: builder.FlowConfig.NetworkConfig.Unicast, + }). + SetProtocolPeerCacheList(protocols.FlowProtocolID(builder.SporkID)). + SetBasicResolver(builder.Resolver). + SetSubscriptionFilter(networkingsubscription.NewRoleBasedFilter(flow.RoleAccess, builder.IdentityProvider)). + SetConnectionManager(connManager). + SetRoutingSystem(func(ctx context.Context, h host.Host) (routing.Routing, error) { + return dht.NewDHT(ctx, h, protocols.FlowPublicDHTProtocolID(builder.SporkID), builder.Logger, networkMetrics, dht.AsServer()) + }). + Build() + if err != nil { + return nil, fmt.Errorf("could not build libp2p node for staked access node: %w", err) + } + + return libp2pNode, nil } -// initMiddleware creates the network.Middleware implementation with the libp2p factory function, metrics, peer update -// interval, and validators. The network.Middleware is then passed into the initNetwork function. -func (builder *FlowAccessNodeBuilder) initMiddleware(nodeID flow.Identifier, - networkMetrics module.NetworkSecurityMetrics, - libp2pNode p2p.LibP2PNode, - validators ...network.MessageValidator, -) network.Middleware { - logger := builder.Logger.With().Bool("staked", false).Logger() - slashingViolationsConsumer := slashing.NewSlashingViolationsConsumer(logger, networkMetrics) - mw := middleware.NewMiddleware( - logger, - libp2pNode, - nodeID, - builder.Metrics.Bitswap, - builder.SporkID, - middleware.DefaultUnicastTimeout, - builder.IDTranslator, - builder.CodecFactory(), - slashingViolationsConsumer, - middleware.WithMessageValidators(validators...), // use default identifier provider - ) - builder.NodeDisallowListDistributor.AddConsumer(mw) - builder.Middleware = mw - return builder.Middleware +// notNil ensures that the input is not nil and returns it +// the usage is to ensure the dependencies are initialized before initializing a module. +// for instance, the IngestionEngine depends on storage.Collections, which is initialized in a +// different function, so we need to ensure that the storage.Collections is initialized before +// creating the IngestionEngine. +func notNil[T any](dep T) T { + if any(dep) == nil { + panic("dependency is nil") + } + return dep } diff --git a/cmd/bootstrap/README.md b/cmd/bootstrap/README.md index 9000f4d87f4..a0c7a242b19 100644 --- a/cmd/bootstrap/README.md +++ b/cmd/bootstrap/README.md @@ -3,9 +3,9 @@ This package contains script for generating the bootstrap files needed to initialize the Flow network. The high-level bootstrapping process is described in [Notion](https://www.notion.so/dapperlabs/Flow-Bootstrapping-ce9d227f18a8410dbce74ed7d4ddee27). -WARNING: These scripts use Go's crypto/rand package to generate seeds for private keys. Make sure you are running the bootstrap scripts on a machine that does provide proper a low-level implementation. See https://golang.org/pkg/crypto/rand/ for details. +WARNING: These scripts use Go's crypto/rand package to generate seeds for private keys, whenever seeds are not provided to the commands. Make sure you are running the bootstrap scripts on a machine that does provide a low-level cryptographically secure RNG. See https://golang.org/pkg/crypto/rand/ for details. -NOTE: Public and private keys are encoded in JSON files as base64 strings, not as hex, contrary to what might be expected. +NOTE: Public and private keys are encoded in JSON files as hex strings. Code structure: * `cmd/bootstrap/cmd` contains CLI logic that can exit the program and read/write files. It also uses structures and data types that are purely relevant for CLI purposes, such as encoding, decoding, etc. @@ -18,9 +18,9 @@ Code structure: The bootstrapping will generate the following information: #### Per node -* Staking key (BLS key with curve BLS12-381) -* Networking key (ECDSA key) -* Random beacon key; _only_ for consensus nodes (BLS based on Joint-Feldman DKG for threshold signatures) +* Staking private key (BLS key on curve BLS12-381) +* Networking private key (ECDSA key on curve P-256) +* Random beacon private key; _only_ for consensus nodes (BLS key on curve BLS12-381, used for a BLS-based threshold signatures) #### Node Identities * List of all authorized Flow nodes @@ -28,6 +28,7 @@ The bootstrapping will generate the following information: - node ID - node role - public staking key + - proof of possession of the staking private key - public networking key - weight @@ -46,7 +47,7 @@ _Each cluster_ of collector nodes needs to have its own root Block and root QC # Usage -`go run -tags relic ./cmd/bootstrap` prints usage information +`go run ./cmd/bootstrap` prints usage information ## Phase 1: Generate networking and staking keys for partner nodes: @@ -61,11 +62,12 @@ Values directly specified as command line parameters: Values can be specified as command line parameters: - seed for generating staking key (min 48 bytes in hex encoding) - seed for generating networking key (min 48 bytes in hex encoding) -If seeds are not provided, the CLI will try to use the system's pseudo-random number generator (PRNG), e. g. `dev/urandom`. Make sure you are running the CLI on a hardware that has a cryptographically secure PRNG, or provide seeds generated on such a system. +Provided seeds must be of high entropy, ideally generated by a crypto secure RNG. +If seeds are not provided, the CLI will try to use the system's random number generator (RNG), e. g. `dev/urandom`. Make sure you are running the CLI on a hardware that has a cryptographically secure RNG. #### Example ```bash -go run -tags relic ./cmd/bootstrap key --address "example.com:1234" --role "consensus" -o ./bootstrap/partner-node-infos +go run ./cmd/bootstrap key --address "example.com:1234" --role "consensus" -o ./bootstrap/partner-node-infos ``` #### Generated output files @@ -76,7 +78,7 @@ go run -tags relic ./cmd/bootstrap key --address "example.com:1234" --role "cons file needs to be available to respective partner node at boot up (or recovery after crash) * file `<NodeID>.node-info.pub.json` - public information - - file needs to be delivered to Dapper Labs for Phase 2 of generating root information, + - file needs to be delivered to the Flow Foundation team for Phase 2 of generating root information, but is not required at node start @@ -89,6 +91,7 @@ Each input is a config file specified as a command line parameter: * parameter with the ID for the chain for the root block (`root-chain`) * parameter with the ID of the parent block for the root block (`root-parent`) * parameter with height of the root block to bootstrap from (`root-height`) +* parameter with view of the root block to bootstrap from (`root-view`) * parameter with state commitment for the initial execution state (`root-commit`) * `json` containing configuration for all Dapper-Controlled nodes (see `./example_files/node-config.json`) * folder containing the `<NodeID>.node-info.pub.json` files for _all_ partner nodes (see `.example_files/partner-node-infos`) @@ -97,17 +100,62 @@ Each input is a config file specified as a command line parameter: #### Example ```bash -go run -tags relic ./cmd/bootstrap finalize \ - --fast-kg \ - --root-chain main \ - --root-height 0 \ - --root-parent 0000000000000000000000000000000000000000000000000000000000000000 \ - --root-commit 4b8d01975cf0cd23e046b1fae36518e542f92a6e35bedd627c43da30f4ae761a \ - --config ./cmd/bootstrap/example_files/node-config.json \ - --partner-dir ./cmd/bootstrap/example_files/partner-node-infos \ - --partner-weights ./cmd/bootstrap/example_files/partner-weights.json \ - --epoch-counter 1 \ - -o ./bootstrap/root-infos +go run . genconfig \ + --address-format "%s%d-example.onflow.org:3569" \ + --access 2 \ + --collection 4 \ + --consensus 3 \ + --execution 2 \ + --verification 3 \ + --weight 100 \ + -o ./ \ + --config ./bootstrap-example/node-config.json + +``` + +```bash +go run . keygen \ + --machine-account \ + --config ./bootstrap-example/node-config.json \ + -o ./bootstrap-example/keys + +``` + +```bash +go run . rootblock \ + --root-chain bench \ + --root-height 0 \ + --root-parent 0000000000000000000000000000000000000000000000000000000000000000 \ + --root-view 0 \ + --epoch-counter 0 \ + --epoch-length 30000 \ + --epoch-staking-phase-length 20000 \ + --epoch-dkg-phase-length 2000 \ + --collection-clusters 1 \ + --protocol-version=0 \ + --use-default-epoch-timing \ + --epoch-commit-safety-threshold=1000 \ + --config ./bootstrap-example/node-config.json \ + -o ./bootstrap-example \ + --partner-dir ./example_files/partner-node-infos \ + --partner-weights ./example_files/partner-weights.json \ + --internal-priv-dir ./bootstrap-example/keys +``` + +```bash +go run . finalize \ + --config ./bootstrap-example/node-config.json \ + --partner-dir ./example_files/partner-node-infos \ + --partner-weights ./example_files/partner-weights.json \ + --internal-priv-dir ./bootstrap-example/keys/private-root-information \ + --dkg-data ./bootstrap-example/private-root-information/root-dkg-data.priv.json \ + --root-block ./bootstrap-example/public-root-information/root-block.json \ + --intermediary-bootstrapping-data ./bootstrap-example/public-root-information/intermediary-bootstrapping-data.json \ + --root-block-votes-dir ./bootstrap-example/public-root-information/root-block-votes/ \ + --root-commit 0000000000000000000000000000000000000000000000000000000000000000 \ + --genesis-token-supply="1000000000.0" \ + --service-account-public-key-json "{\"PublicKey\":\"R7MTEDdLclRLrj2MI1hcp4ucgRTpR15PCHAWLM5nks6Y3H7+PGkfZTP2di2jbITooWO4DD1yqaBSAVK8iQ6i0A==\",\"SignAlgo\":2,\"HashAlgo\":1,\"SeqNumber\":0,\"Weight\":1000}" \ + -o ./bootstrap-example ``` #### Generated output files @@ -153,7 +201,7 @@ go run -tags relic ./cmd/bootstrap finalize \ This generates the networking key used by observers to connect to the public libp2p network. It is a different key format than staked nodes and should only be used for Observers. ```bash -go run -tags relic ./cmd/bootstrap observer-network-key -f ./path/network-key +go run ./cmd/bootstrap observer-network-key -f ./path/network-key ``` This key must be kept secret as it's used to encrypt and sign network requests sent by the observers. diff --git a/cmd/bootstrap/cmd/access_keygen.go b/cmd/bootstrap/cmd/access_keygen.go index ecd0c5d3945..4e11ee16afc 100644 --- a/cmd/bootstrap/cmd/access_keygen.go +++ b/cmd/bootstrap/cmd/access_keygen.go @@ -13,9 +13,9 @@ import ( "strings" "time" + "github.com/onflow/crypto" "github.com/spf13/cobra" - "github.com/onflow/flow-go/crypto" "github.com/onflow/flow-go/model/bootstrap" "github.com/onflow/flow-go/utils/grpcutils" ) diff --git a/cmd/bootstrap/cmd/block.go b/cmd/bootstrap/cmd/block.go index 0e9b3612559..f40ae15a25f 100644 --- a/cmd/bootstrap/cmd/block.go +++ b/cmd/bootstrap/cmd/block.go @@ -2,22 +2,109 @@ package cmd import ( "encoding/hex" + "fmt" "time" "github.com/onflow/flow-go/cmd/bootstrap/run" + "github.com/onflow/flow-go/model/dkg" "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module/signature" ) -func constructRootBlock(rootChain string, rootParent string, rootHeight uint64, rootTimestamp string) *flow.Block { - +// constructRootHeaderBody constructs a header body for the root block. +func constructRootHeaderBody(rootChain string, rootParent string, rootHeight uint64, rootView uint64, rootTimestamp string) (*flow.HeaderBody, error) { chainID := parseChainID(rootChain) parentID := parseParentID(rootParent) - height := rootHeight timestamp := parseRootTimestamp(rootTimestamp) - block := run.GenerateRootBlock(chainID, parentID, height, timestamp) + return run.GenerateRootHeaderBody(chainID, parentID, rootHeight, rootView, timestamp) +} + +// constructRootBlock constructs a valid root block based on the given header and protocol state ID for that block. +func constructRootBlock(rootHeaderBody *flow.HeaderBody, protocolStateID flow.Identifier) (*flow.Block, error) { + payload, err := flow.NewPayload( + flow.UntrustedPayload{ + Guarantees: nil, + Seals: nil, + Receipts: nil, + Results: nil, + ProtocolStateID: protocolStateID, + }, + ) + if err != nil { + return nil, fmt.Errorf("could not construct payload: %w", err) + } + + block, err := flow.NewRootBlock( + flow.UntrustedBlock{ + HeaderBody: *rootHeaderBody, + Payload: *payload, + }, + ) + if err != nil { + return nil, fmt.Errorf("could not construct root block: %w", err) + } + + return block, nil +} + +// constructRootEpochEvents constructs the epoch setup and commit events for the first epoch after spork. +func constructRootEpochEvents( + firstView uint64, + participants flow.IdentityList, + assignments flow.AssignmentList, + clusterQCs []*flow.QuorumCertificate, + dkgData dkg.ThresholdKeySet, + dkgIndexMap flow.DKGIndexMap, +) (*flow.EpochSetup, *flow.EpochCommit, error) { + epochSetup, err := flow.NewEpochSetup( + flow.UntrustedEpochSetup{ + Counter: flagEpochCounter, + FirstView: firstView, + DKGPhase1FinalView: firstView + flagNumViewsInStakingAuction + flagNumViewsInDKGPhase - 1, + DKGPhase2FinalView: firstView + flagNumViewsInStakingAuction + flagNumViewsInDKGPhase*2 - 1, + DKGPhase3FinalView: firstView + flagNumViewsInStakingAuction + flagNumViewsInDKGPhase*3 - 1, + FinalView: firstView + flagNumViewsInEpoch - 1, + Participants: participants.Sort(flow.Canonical[flow.Identity]).ToSkeleton(), + Assignments: assignments, + RandomSource: GenerateRandomSeed(flow.EpochSetupRandomSourceLength), + TargetDuration: flagEpochTimingDuration, + TargetEndTime: rootEpochTargetEndTime(), + }, + ) + if err != nil { + return nil, nil, fmt.Errorf("could not construct epoch setup: %w", err) + } + + qcsWithSignerIDs := make([]*flow.QuorumCertificateWithSignerIDs, 0, len(clusterQCs)) + for i, clusterQC := range clusterQCs { + members := assignments[i] + signerIDs, err := signature.DecodeSignerIndicesToIdentifiers(members, clusterQC.SignerIndices) + if err != nil { + log.Fatal().Err(err).Msgf("could not decode signer IDs from clusterQC at index %v", i) + } + qcsWithSignerIDs = append(qcsWithSignerIDs, &flow.QuorumCertificateWithSignerIDs{ + View: clusterQC.View, + BlockID: clusterQC.BlockID, + SignerIDs: signerIDs, + SigData: clusterQC.SigData, + }) + } + + epochCommit, err := flow.NewEpochCommit( + flow.UntrustedEpochCommit{ + Counter: flagEpochCounter, + ClusterQCs: flow.ClusterQCVoteDatasFromQCs(qcsWithSignerIDs), + DKGGroupKey: dkgData.PubGroupKey, + DKGParticipantKeys: dkgData.PubKeyShares, + DKGIndexMap: dkgIndexMap, + }, + ) + if err != nil { + return nil, nil, fmt.Errorf("could not construct epoch commit: %w", err) + } - return block + return epochSetup, epochCommit, nil } func parseChainID(chainID string) flow.ChainID { @@ -26,6 +113,8 @@ func parseChainID(chainID string) flow.ChainID { return flow.Mainnet case "test": return flow.Testnet + case "preview": + return flow.Previewnet case "sandbox": return flow.Sandboxnet case "bench": diff --git a/cmd/bootstrap/cmd/check_machine_account.go b/cmd/bootstrap/cmd/check_machine_account.go index e2261012219..e622ca77d65 100644 --- a/cmd/bootstrap/cmd/check_machine_account.go +++ b/cmd/bootstrap/cmd/check_machine_account.go @@ -12,7 +12,9 @@ import ( sdk "github.com/onflow/flow-go-sdk" client "github.com/onflow/flow-go-sdk/access/grpc" + "github.com/onflow/flow-go/cmd" + "github.com/onflow/flow-go/cmd/util/cmd/common" model "github.com/onflow/flow-go/model/bootstrap" "github.com/onflow/flow-go/module/epochs" ) @@ -44,7 +46,10 @@ func checkMachineAccountRun(_ *cobra.Command, _ []string) { // read the private node information - used to get the role var nodeInfoPriv model.NodeInfoPriv - readJSON(filepath.Join(flagOutdir, fmt.Sprintf(model.PathNodeInfoPriv, nodeID)), &nodeInfoPriv) + err = common.ReadJSON(filepath.Join(flagOutdir, fmt.Sprintf(model.PathNodeInfoPriv, nodeID)), &nodeInfoPriv) + if err != nil { + log.Fatal().Err(err).Msg("failed to read json") + } // read the machine account info file machineAccountInfo := readMachineAccountInfo(nodeID) @@ -58,12 +63,17 @@ func checkMachineAccountRun(_ *cobra.Command, _ []string) { log.Debug(). Str("machine_account_address", machineAccountInfo.Address). Str("machine_account_pub_key", fmt.Sprintf("%x", encodedRuntimeAccountPubKey(machineAccountPrivKey))). - Uint("key_index", machineAccountInfo.KeyIndex). + Uint32("key_index", machineAccountInfo.KeyIndex). Str("signing_algo", machineAccountInfo.SigningAlgorithm.String()). Str("hash_algo", machineAccountInfo.HashAlgorithm.String()). Msg("read machine account info from disk") - flowClient, err := client.NewClient(flagAccessAPIAddress, grpc.WithTransportCredentials(insecure.NewCredentials())) + flowClient, err := client.NewClient( + flagAccessAPIAddress, + client.WithGRPCDialOptions( + grpc.WithTransportCredentials(insecure.NewCredentials()), + ), + ) if err != nil { log.Fatal().Err(err).Msgf("could not connect to access API at address %s", flagAccessAPIAddress) } @@ -97,7 +107,10 @@ func readMachineAccountInfo(nodeID string) model.NodeMachineAccountInfo { var machineAccountInfo model.NodeMachineAccountInfo path := filepath.Join(flagOutdir, fmt.Sprintf(model.PathNodeMachineAccountInfoPriv, nodeID)) - readJSON(path, &machineAccountInfo) + err := common.ReadJSON(path, &machineAccountInfo) + if err != nil { + log.Fatal().Err(err).Msg("failed to read json") + } return machineAccountInfo } diff --git a/cmd/bootstrap/cmd/clusters.go b/cmd/bootstrap/cmd/clusters.go deleted file mode 100644 index 078c74c08f2..00000000000 --- a/cmd/bootstrap/cmd/clusters.go +++ /dev/null @@ -1,99 +0,0 @@ -package cmd - -import ( - "github.com/onflow/flow-go/cmd/bootstrap/run" - model "github.com/onflow/flow-go/model/bootstrap" - "github.com/onflow/flow-go/model/cluster" - "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/model/flow/assignment" - "github.com/onflow/flow-go/model/flow/factory" - "github.com/onflow/flow-go/model/flow/filter" -) - -// Construct cluster assignment with internal and partner nodes uniformly -// distributed across clusters. This function will produce the same cluster -// assignments for the same partner and internal lists, and the same seed. -func constructClusterAssignment(partnerNodes, internalNodes []model.NodeInfo, seed int64) (flow.AssignmentList, flow.ClusterList) { - - partners := model.ToIdentityList(partnerNodes).Filter(filter.HasRole(flow.RoleCollection)) - internals := model.ToIdentityList(internalNodes).Filter(filter.HasRole(flow.RoleCollection)) - nClusters := flagCollectionClusters - nCollectors := len(partners) + len(internals) - - // ensure we have at least as many collection nodes as clusters - if nCollectors < int(flagCollectionClusters) { - log.Fatal().Msgf("network bootstrap is configured with %d collection nodes, but %d clusters - must have at least one collection node per cluster", - nCollectors, flagCollectionClusters) - } - - // deterministically shuffle both collector lists based on the input seed - // by using a different seed each spork, we will have different clusters - // even with the same collectors - partners = partners.DeterministicShuffle(seed) - internals = internals.DeterministicShuffle(seed) - - identifierLists := make([]flow.IdentifierList, nClusters) - - // first, round-robin internal nodes into each cluster - for i, node := range internals { - identifierLists[i%len(identifierLists)] = append(identifierLists[i%len(identifierLists)], node.NodeID) - } - - // next, round-robin partner nodes into each cluster - for i, node := range partners { - identifierLists[i%len(identifierLists)] = append(identifierLists[i%len(identifierLists)], node.NodeID) - } - - assignments := assignment.FromIdentifierLists(identifierLists) - - collectors := append(partners, internals...) - clusters, err := factory.NewClusterList(assignments, collectors) - if err != nil { - log.Fatal().Err(err).Msg("could not create cluster list") - } - - return assignments, clusters -} - -func constructRootQCsForClusters( - clusterList flow.ClusterList, - nodeInfos []model.NodeInfo, - clusterBlocks []*cluster.Block, -) []*flow.QuorumCertificate { - - if len(clusterBlocks) != len(clusterList) { - log.Fatal().Int("len(clusterBlocks)", len(clusterBlocks)).Int("len(clusterList)", len(clusterList)). - Msg("number of clusters needs to equal number of cluster blocks") - } - - qcs := make([]*flow.QuorumCertificate, len(clusterBlocks)) - for i, cluster := range clusterList { - signers := filterClusterSigners(cluster, nodeInfos) - - qc, err := run.GenerateClusterRootQC(signers, cluster, clusterBlocks[i]) - if err != nil { - log.Fatal().Err(err).Int("cluster index", i).Msg("generating collector cluster root QC failed") - } - qcs[i] = qc - } - - return qcs -} - -// Filters a list of nodes to include only nodes that will sign the QC for the -// given cluster. The resulting list of nodes is only nodes that are in the -// given cluster AND are not partner nodes (ie. we have the private keys). -func filterClusterSigners(cluster flow.IdentityList, nodeInfos []model.NodeInfo) []model.NodeInfo { - - var filtered []model.NodeInfo - for _, node := range nodeInfos { - _, isInCluster := cluster.ByNodeID(node.NodeID) - isNotPartner := node.Type() == model.NodeInfoTypePrivate - - if isInCluster && isNotPartner { - filtered = append(filtered, node) - } - } - - return filtered -} diff --git a/cmd/bootstrap/cmd/constraints.go b/cmd/bootstrap/cmd/constraints.go index e50867341e5..3fc3c757cad 100644 --- a/cmd/bootstrap/cmd/constraints.go +++ b/cmd/bootstrap/cmd/constraints.go @@ -8,17 +8,19 @@ import ( // ensureUniformNodeWeightsPerRole verifies that the following condition is satisfied for each role R: // * all node with role R must have the same weight +// The function assumes there is at least one node for each role. func ensureUniformNodeWeightsPerRole(allNodes flow.IdentityList) { // ensure all nodes of the same role have equal weight for _, role := range flow.Roles() { - withRole := allNodes.Filter(filter.HasRole(role)) - expectedWeight := withRole[0].Weight + withRole := allNodes.Filter(filter.HasRole[flow.Identity](role)) + // each role has at least one node so it's safe to access withRole[0] + expectedWeight := withRole[0].InitialWeight for _, node := range withRole { - if node.Weight != expectedWeight { + if node.InitialWeight != expectedWeight { log.Fatal().Msgf( "will not bootstrap configuration with non-equal weights\n"+ "found nodes with role %s and weight1=%d, weight2=%d", - role, expectedWeight, node.Weight) + role, expectedWeight, node.InitialWeight) } } } @@ -34,30 +36,4 @@ func checkConstraints(partnerNodes, internalNodes []model.NodeInfo) { all := append(partners, internals...) ensureUniformNodeWeightsPerRole(all) - - // check collection committee Byzantine threshold for each cluster - // for checking Byzantine constraints, the seed doesn't matter - _, clusters := constructClusterAssignment(partnerNodes, internalNodes, 0) - partnerCOLCount := uint(0) - internalCOLCount := uint(0) - for _, cluster := range clusters { - clusterPartnerCount := uint(0) - clusterInternalCount := uint(0) - for _, node := range cluster { - if _, exists := partners.ByNodeID(node.NodeID); exists { - clusterPartnerCount++ - } - if _, exists := internals.ByNodeID(node.NodeID); exists { - clusterInternalCount++ - } - } - if clusterInternalCount <= clusterPartnerCount*2 { - log.Fatal().Msgf( - "will not bootstrap configuration without Byzantine majority within cluster: "+ - "(partners=%d, internals=%d, min_internals=%d)", - clusterPartnerCount, clusterInternalCount, clusterPartnerCount*2+1) - } - partnerCOLCount += clusterPartnerCount - internalCOLCount += clusterInternalCount - } } diff --git a/cmd/bootstrap/cmd/db_encryption_key.go b/cmd/bootstrap/cmd/db_encryption_key.go index c99843e859b..897a7099c90 100644 --- a/cmd/bootstrap/cmd/db_encryption_key.go +++ b/cmd/bootstrap/cmd/db_encryption_key.go @@ -7,6 +7,7 @@ import ( "github.com/spf13/cobra" "github.com/onflow/flow-go/cmd/bootstrap/utils" + "github.com/onflow/flow-go/cmd/util/cmd/common" model "github.com/onflow/flow-go/model/bootstrap" ) @@ -35,7 +36,7 @@ func dbEncryptionKeyRun(_ *cobra.Command, _ []string) { log = log.With().Str("path", dbEncryptionKeyPath).Logger() // check if the key already exists - exists, err := pathExists(path.Join(flagOutdir, dbEncryptionKeyPath)) + exists, err := common.PathExists(path.Join(flagOutdir, dbEncryptionKeyPath)) if err != nil { log.Fatal().Err(err).Msg("could not check if db encryption key already exists") } @@ -50,5 +51,10 @@ func dbEncryptionKeyRun(_ *cobra.Command, _ []string) { } log.Info().Msg("generated db encryption key") - writeText(dbEncryptionKeyPath, dbEncryptionKey) + err = common.WriteText(dbEncryptionKeyPath, flagOutdir, dbEncryptionKey) + if err != nil { + log.Fatal().Err(err).Msg("failed to write file") + } + + log.Info().Msgf("wrote file %s/%s", flagOutdir, dbEncryptionKeyPath) } diff --git a/cmd/bootstrap/cmd/dkg.go b/cmd/bootstrap/cmd/dkg.go index d7069534e64..9914c02507d 100644 --- a/cmd/bootstrap/cmd/dkg.go +++ b/cmd/bootstrap/cmd/dkg.go @@ -3,51 +3,62 @@ package cmd import ( "fmt" + "github.com/onflow/crypto" + bootstrapDKG "github.com/onflow/flow-go/cmd/bootstrap/dkg" - "github.com/onflow/flow-go/crypto" + "github.com/onflow/flow-go/cmd/util/cmd/common" model "github.com/onflow/flow-go/model/bootstrap" "github.com/onflow/flow-go/model/dkg" "github.com/onflow/flow-go/model/encodable" + "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/state/protocol/inmem" ) -func runBeaconKG(nodes []model.NodeInfo) dkg.DKGData { +func runBeaconKG(nodes []model.NodeInfo) (dkg.ThresholdKeySet, flow.DKGIndexMap) { n := len(nodes) - log.Info().Msgf("read %v node infos for DKG", n) log.Debug().Msgf("will run DKG") - var dkgData dkg.DKGData - var err error - dkgData, err = bootstrapDKG.RandomBeaconKG(n, GenerateRandomSeed(crypto.SeedMinLenDKG)) + randomBeaconData, err := bootstrapDKG.RandomBeaconKG(n, GenerateRandomSeed(crypto.KeyGenSeedMinLen)) if err != nil { log.Fatal().Err(err).Msg("error running DKG") } log.Info().Msgf("finished running DKG") - pubKeyShares := make([]encodable.RandomBeaconPubKey, 0, len(dkgData.PubKeyShares)) - for _, pubKey := range dkgData.PubKeyShares { - pubKeyShares = append(pubKeyShares, encodable.RandomBeaconPubKey{PublicKey: pubKey}) - } - - privKeyShares := make([]encodable.RandomBeaconPrivKey, 0, len(dkgData.PrivKeyShares)) - for i, privKey := range dkgData.PrivKeyShares { + encodableParticipants := make([]inmem.ThresholdParticipant, 0, len(nodes)) + for i, privKey := range randomBeaconData.PrivKeyShares { nodeID := nodes[i].NodeID encKey := encodable.RandomBeaconPrivKey{PrivateKey: privKey} - privKeyShares = append(privKeyShares, encKey) + encodableParticipants = append(encodableParticipants, inmem.ThresholdParticipant{ + PrivKeyShare: encKey, + PubKeyShare: encodable.RandomBeaconPubKey{PublicKey: randomBeaconData.PubKeyShares[i]}, + NodeID: nodeID, + }) - writeJSON(fmt.Sprintf(model.PathRandomBeaconPriv, nodeID), encKey) + err = common.WriteJSON(fmt.Sprintf(model.PathRandomBeaconPriv, nodeID), flagOutdir, encKey) + if err != nil { + log.Fatal().Err(err).Msg("failed to write json") + } + log.Info().Msgf("wrote file %s/%s", flagOutdir, fmt.Sprintf(model.PathRandomBeaconPriv, nodeID)) + } + + indexMap := make(flow.DKGIndexMap, len(nodes)) + for i, node := range nodes { + indexMap[node.NodeID] = i } // write full DKG info that will be used to construct QC - writeJSON(model.PathRootDKGData, inmem.EncodableFullDKG{ + err = common.WriteJSON(model.PathRootDKGData, flagOutdir, inmem.ThresholdKeySet{ GroupKey: encodable.RandomBeaconPubKey{ - PublicKey: dkgData.PubGroupKey, + PublicKey: randomBeaconData.PubGroupKey, }, - PubKeyShares: pubKeyShares, - PrivKeyShares: privKeyShares, + Participants: encodableParticipants, }) + if err != nil { + log.Fatal().Err(err).Msg("failed to write json") + } + log.Info().Msgf("wrote file %s/%s", flagOutdir, model.PathRootDKGData) - return dkgData + return randomBeaconData, indexMap } diff --git a/cmd/bootstrap/cmd/final_list.go b/cmd/bootstrap/cmd/final_list.go deleted file mode 100644 index ac1b000876b..00000000000 --- a/cmd/bootstrap/cmd/final_list.go +++ /dev/null @@ -1,293 +0,0 @@ -package cmd - -import ( - "github.com/spf13/cobra" - - "github.com/onflow/flow-go/cmd" - model "github.com/onflow/flow-go/model/bootstrap" - "github.com/onflow/flow-go/model/flow" -) - -var ( - flagStakingNodesPath string -) - -// finalListCmd represents the final list command -var finalListCmd = &cobra.Command{ - Use: "finallist", - Short: "generates a final list of nodes to be used for next network", - Long: "generates a final list of nodes to be used for next network after validating node data and matching against staking contract nodes ", - Run: finalList, -} - -func init() { - rootCmd.AddCommand(finalListCmd) - addFinalListFlags() -} - -func addFinalListFlags() { - // partner node info flag - finalListCmd.Flags().StringVar(&flagPartnerNodeInfoDir, "partner-infos", "", "path to a directory containing all parnter nodes details") - cmd.MarkFlagRequired(finalListCmd, "partner-infos") - - // internal/flow node info flag - finalListCmd.Flags().StringVar(&flagInternalNodePrivInfoDir, "flow-infos", "", "path to a directory containing all internal/flow nodes details") - cmd.MarkFlagRequired(finalListCmd, "flow-infos") - - // staking nodes dir containing staking nodes json - finalListCmd.Flags().StringVar(&flagStakingNodesPath, "staking-nodes", "", "path to a JSON file of all staking nodes") - cmd.MarkFlagRequired(finalListCmd, "staking-nodes") - - finalListCmd.Flags().UintVar(&flagCollectionClusters, "collection-clusters", 2, - "number of collection clusters") -} - -func finalList(cmd *cobra.Command, args []string) { - // read public partner node infos - log.Info().Msgf("reading partner public node information: %s", flagPartnerNodeInfoDir) - partnerNodes := assemblePartnerNodesWithoutWeight() - - // read internal private node infos - log.Info().Msgf("reading internal/flow private node information: %s", flagInternalNodePrivInfoDir) - internalNodes := assembleInternalNodesWithoutWeight() - - log.Info().Msg("checking constraints on consensus/cluster nodes") - checkConstraints(partnerNodes, internalNodes) - - // nodes which are registered on-chain - log.Info().Msgf("reading staking contract node information: %s", flagStakingNodesPath) - registeredNodes := readStakingContractDetails() - - // merge internal and partner node infos (from local files) - localNodes := mergeNodeInfos(internalNodes, partnerNodes) - - // reconcile nodes from staking contract nodes - validateNodes(localNodes, registeredNodes) - - // write node-config.json with the new list of nodes to be used for the `finalize` command - writeJSON(model.PathFinallist, model.ToPublicNodeInfoList(localNodes)) -} - -func validateNodes(localNodes []model.NodeInfo, registeredNodes []model.NodeInfo) { - // check node count - if len(localNodes) != len(registeredNodes) { - log.Error(). - Int("local", len(localNodes)). - Int("onchain", len(registeredNodes)). - Msg("onchain node count does not match local internal+partner node count") - } - - // check registered and local nodes to make sure node ID are not missing - validateNodeIDs(localNodes, registeredNodes) - - // print mismatching nodes - checkMismatchingNodes(localNodes, registeredNodes) - - // create map - localNodeMap := make(map[flow.Identifier]model.NodeInfo) - for _, node := range localNodes { - localNodeMap[node.NodeID] = node - } - - // check node type mismatch - for _, registeredNode := range registeredNodes { - - // win have matching node as we have a check before - matchingNode := localNodeMap[registeredNode.NodeID] - - // check node type and error if mismatch - if matchingNode.Role != registeredNode.Role { - log.Error(). - Str("registered node id", registeredNode.NodeID.String()). - Str("registered node role", registeredNode.Role.String()). - Str("local node", matchingNode.NodeID.String()). - Str("local node role", matchingNode.Role.String()). - Msg("node role does not match") - } - - if matchingNode.Address != registeredNode.Address { - log.Error(). - Str("registered node id", registeredNode.NodeID.String()). - Str("registered node address", registeredNode.Address). - Str("local node", matchingNode.NodeID.String()). - Str("local node address", matchingNode.Address). - Msg("node address does not match") - } - - // check address match - if matchingNode.Address != registeredNode.Address { - log.Warn(). - Str("registered node", registeredNode.NodeID.String()). - Str("node id", matchingNode.NodeID.String()). - Msg("address do not match") - } - - // flow localNodes contain private key info - if matchingNode.NetworkPubKey().String() != "" { - // check networking pubkey match - matchNodeKey := matchingNode.NetworkPubKey().String() - registeredNodeKey := registeredNode.NetworkPubKey().String() - - if matchNodeKey != registeredNodeKey { - log.Error(). - Str("registered network key", registeredNodeKey). - Str("network key", matchNodeKey). - Msg("networking keys do not match") - } - } - - // flow localNodes contain privatekey info - if matchingNode.StakingPubKey().String() != "" { - matchNodeKey := matchingNode.StakingPubKey().String() - registeredNodeKey := registeredNode.StakingPubKey().String() - - if matchNodeKey != registeredNodeKey { - log.Error(). - Str("registered staking key", registeredNodeKey). - Str("staking key", matchNodeKey). - Msg("staking keys do not match") - } - } - } -} - -// validateNodeIDs will go through both sets of nodes and ensure that no node-id -// are missing. It will log all missing node ID's and throw an error. -func validateNodeIDs(localNodes []model.NodeInfo, registeredNodes []model.NodeInfo) { - - // go through registered nodes - invalidStakingNodes := make([]model.NodeInfo, 0) - for _, node := range registeredNodes { - if node.NodeID.String() == "" { - - // we warn here but exit later - invalidStakingNodes = append(invalidStakingNodes, node) - log.Warn(). - Str("node-address", node.Address). - Msg("missing node-id from registered nodes") - } - } - - // go through local nodes - invalidNodes := make([]model.NodeInfo, 0) - for _, node := range localNodes { - if node.NodeID.String() == "" { - - // we warn here but exit later - invalidNodes = append(invalidNodes, node) - log.Warn(). - Str("node-address", node.Address). - Msg("missing node-id from local nodes") - } - } - - if len(invalidNodes) != 0 || len(invalidStakingNodes) != 0 { - log.Fatal().Msg("found missing nodes ids. fix and re-run") - } -} - -func checkMismatchingNodes(localNodes []model.NodeInfo, registeredNodes []model.NodeInfo) { - - localNodesByID := make(map[flow.Identifier]model.NodeInfo) - for _, node := range localNodes { - localNodesByID[node.NodeID] = node - } - - registeredNodesByID := make(map[flow.Identifier]model.NodeInfo) - for _, node := range registeredNodes { - registeredNodesByID[node.NodeID] = node - } - - // try match local nodes to registered nodes - invalidLocalNodes := make([]model.NodeInfo, 0) - for _, node := range localNodes { - if _, ok := registeredNodesByID[node.NodeID]; !ok { - log.Warn(). - Str("local-node-id", node.NodeID.String()). - Str("role", node.Role.String()). - Str("address", node.Address). - Msg("matching registered node not found for local node") - invalidLocalNodes = append(invalidLocalNodes, node) - } - } - - invalidRegisteredNodes := make([]model.NodeInfo, 0) - for _, node := range registeredNodes { - if _, ok := localNodesByID[node.NodeID]; !ok { - log.Warn(). - Str("registered-node-id", node.NodeID.String()). - Str("role", node.Role.String()). - Str("address", node.Address). - Msg("matching local node not found for local node") - invalidRegisteredNodes = append(invalidRegisteredNodes, node) - } - } - - if len(invalidLocalNodes) != 0 || len(invalidRegisteredNodes) != 0 { - log.Fatal().Msg("found missing mismatching nodes") - } -} - -func assembleInternalNodesWithoutWeight() []model.NodeInfo { - privInternals := readInternalNodes() - log.Info().Msgf("read %v internal private node-info files", len(privInternals)) - - var nodes []model.NodeInfo - for _, internal := range privInternals { - // check if address is valid format - validateAddressFormat(internal.Address) - - // validate every single internal node - nodeID := validateNodeID(internal.NodeID) - node := model.NewPrivateNodeInfo( - nodeID, - internal.Role, - internal.Address, - flow.DefaultInitialWeight, - internal.NetworkPrivKey, - internal.StakingPrivKey, - ) - - nodes = append(nodes, node) - } - - return nodes -} - -func assemblePartnerNodesWithoutWeight() []model.NodeInfo { - partners := readPartnerNodes() - log.Info().Msgf("read %v partner node configuration files", len(partners)) - return createPublicNodeInfo(partners) -} - -func readStakingContractDetails() []model.NodeInfo { - var stakingNodes []model.NodeInfoPub - readJSON(flagStakingNodesPath, &stakingNodes) - return createPublicNodeInfo(stakingNodes) -} - -func createPublicNodeInfo(nodes []model.NodeInfoPub) []model.NodeInfo { - var publicInfoNodes []model.NodeInfo - for _, n := range nodes { - validateAddressFormat(n.Address) - - // validate every single partner node - nodeID := validateNodeID(n.NodeID) - networkPubKey := validateNetworkPubKey(n.NetworkPubKey) - stakingPubKey := validateStakingPubKey(n.StakingPubKey) - - // all nodes should have equal weight - node := model.NewPublicNodeInfo( - nodeID, - n.Role, - n.Address, - flow.DefaultInitialWeight, - networkPubKey, - stakingPubKey, - ) - - publicInfoNodes = append(publicInfoNodes, node) - } - - return publicInfoNodes -} diff --git a/cmd/bootstrap/cmd/finalize.go b/cmd/bootstrap/cmd/finalize.go index 5d1eb74106a..74d526d2845 100644 --- a/cmd/bootstrap/cmd/finalize.go +++ b/cmd/bootstrap/cmd/finalize.go @@ -1,66 +1,57 @@ package cmd import ( - "encoding/binary" "encoding/hex" "encoding/json" "fmt" "path/filepath" "strings" + "time" "github.com/onflow/cadence" + "github.com/spf13/cobra" "github.com/onflow/flow-go/cmd" "github.com/onflow/flow-go/cmd/bootstrap/run" "github.com/onflow/flow-go/cmd/bootstrap/utils" + "github.com/onflow/flow-go/cmd/util/cmd/common" hotstuff "github.com/onflow/flow-go/consensus/hotstuff/model" "github.com/onflow/flow-go/fvm" + "github.com/onflow/flow-go/model/bootstrap" model "github.com/onflow/flow-go/model/bootstrap" "github.com/onflow/flow-go/model/dkg" - "github.com/onflow/flow-go/model/encodable" "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/model/flow/order" "github.com/onflow/flow-go/module/epochs" - "github.com/onflow/flow-go/state/protocol" - "github.com/onflow/flow-go/state/protocol/badger" + "github.com/onflow/flow-go/state/protocol/datastore" "github.com/onflow/flow-go/state/protocol/inmem" + "github.com/onflow/flow-go/state/protocol/protocol_state" + "github.com/onflow/flow-go/state/protocol/protocol_state/kvstore" "github.com/onflow/flow-go/utils/io" ) var ( flagConfig string flagInternalNodePrivInfoDir string - flagCollectionClusters uint flagPartnerNodeInfoDir string // Deprecated: use flagPartnerWeights instead - deprecatedFlagPartnerStakes string - flagPartnerWeights string - flagDKGDataPath string - flagRootBlock string - flagRootBlockVotesDir string - flagRootCommit string - flagProtocolVersion uint + deprecatedFlagPartnerStakes string + flagPartnerWeights string + flagDKGDataPath string + flagRootBlockPath string + flagRootCommit string + flagIntermediaryBootstrappingDataPath string + flagRootBlockVotesDir string + // optional flags for creating flagServiceAccountPublicKeyJSON string flagGenesisTokenSupply string - flagEpochCounter uint64 - flagNumViewsInEpoch uint64 - flagNumViewsInStakingAuction uint64 - flagNumViewsInDKGPhase uint64 - flagEpochCommitSafetyThreshold uint64 - - // this flag is used to seed the DKG, clustering and cluster QC generation - flagBootstrapRandomSeed []byte ) -// PartnerWeights is the format of the JSON file specifying partner node weights. -type PartnerWeights map[flow.Identifier]uint64 - -// finalizeCmd represents the finalize command +// finalizeCmd represents the finalize command` var finalizeCmd = &cobra.Command{ Use: "finalize", Short: "Finalize the bootstrapping process", - Long: `Finalize the bootstrapping process, which includes running the DKG for the generation of the random beacon + Long: `Finalize the bootstrapping process, which includes generating the random beacon keys and generating the root block, QC, execution result and block seal.`, Run: finalize, } @@ -78,44 +69,29 @@ func addFinalizeCmdFlags() { "containing the output from the `keygen` command for internal nodes") finalizeCmd.Flags().StringVar(&flagPartnerNodeInfoDir, "partner-dir", "", "path to directory "+ "containing one JSON file starting with node-info.pub.<NODE_ID>.json for every partner node (fields "+ - " in the JSON file: Role, Address, NodeID, NetworkPubKey, StakingPubKey)") + " in the JSON file: Role, Address, NodeID, NetworkPubKey, StakingPubKey, StakingKeyPoP)") // Deprecated: remove this flag finalizeCmd.Flags().StringVar(&deprecatedFlagPartnerStakes, "partner-stakes", "", "deprecated: use partner-weights instead") finalizeCmd.Flags().StringVar(&flagPartnerWeights, "partner-weights", "", "path to a JSON file containing "+ "a map from partner node's NodeID to their weight") - finalizeCmd.Flags().StringVar(&flagDKGDataPath, "dkg-data", "", "path to a JSON file containing data as output from DKG process") + finalizeCmd.Flags().StringVar(&flagDKGDataPath, "dkg-data", "", "path to a JSON file containing data as output from the random beacon key generation") + finalizeCmd.Flags().StringVar(&flagRootCommit, "root-commit", "0000000000000000000000000000000000000000000000000000000000000000", "state commitment of root execution state") cmd.MarkFlagRequired(finalizeCmd, "config") cmd.MarkFlagRequired(finalizeCmd, "internal-priv-dir") cmd.MarkFlagRequired(finalizeCmd, "partner-dir") cmd.MarkFlagRequired(finalizeCmd, "partner-weights") cmd.MarkFlagRequired(finalizeCmd, "dkg-data") + cmd.MarkFlagRequired(finalizeCmd, "root-commit") // required parameters for generation of root block, root execution result and root block seal - finalizeCmd.Flags().StringVar(&flagRootBlock, "root-block", "", - "path to a JSON file containing root block") + finalizeCmd.Flags().StringVar(&flagRootBlockPath, "root-block", "", "path to a JSON file containing root block") + finalizeCmd.Flags().StringVar(&flagIntermediaryBootstrappingDataPath, "intermediary-bootstrapping-data", "", "path to a JSON file containing intermediary bootstrapping data generated by the rootblock command") finalizeCmd.Flags().StringVar(&flagRootBlockVotesDir, "root-block-votes-dir", "", "path to directory with votes for root block") - finalizeCmd.Flags().StringVar(&flagRootCommit, "root-commit", "0000000000000000000000000000000000000000000000000000000000000000", "state commitment of root execution state") - finalizeCmd.Flags().Uint64Var(&flagEpochCounter, "epoch-counter", 0, "epoch counter for the epoch beginning with the root block") - finalizeCmd.Flags().Uint64Var(&flagNumViewsInEpoch, "epoch-length", 4000, "length of each epoch measured in views") - finalizeCmd.Flags().Uint64Var(&flagNumViewsInStakingAuction, "epoch-staking-phase-length", 100, "length of the epoch staking phase measured in views") - finalizeCmd.Flags().Uint64Var(&flagNumViewsInDKGPhase, "epoch-dkg-phase-length", 1000, "length of each DKG phase measured in views") - finalizeCmd.Flags().Uint64Var(&flagEpochCommitSafetyThreshold, "epoch-commit-safety-threshold", 500, "defines epoch commitment deadline") - finalizeCmd.Flags().BytesHexVar(&flagBootstrapRandomSeed, "random-seed", GenerateRandomSeed(flow.EpochSetupRandomSourceLength), "The seed used to for DKG, Clustering and Cluster QC generation") - finalizeCmd.Flags().UintVar(&flagProtocolVersion, "protocol-version", flow.DefaultProtocolVersion, "major software version used for the duration of this spork") cmd.MarkFlagRequired(finalizeCmd, "root-block") + cmd.MarkFlagRequired(finalizeCmd, "intermediary-bootstrapping-data") cmd.MarkFlagRequired(finalizeCmd, "root-block-votes-dir") - cmd.MarkFlagRequired(finalizeCmd, "root-commit") - cmd.MarkFlagRequired(finalizeCmd, "epoch-counter") - cmd.MarkFlagRequired(finalizeCmd, "epoch-length") - cmd.MarkFlagRequired(finalizeCmd, "epoch-staking-phase-length") - cmd.MarkFlagRequired(finalizeCmd, "epoch-dkg-phase-length") - cmd.MarkFlagRequired(finalizeCmd, "epoch-commit-safety-threshold") - cmd.MarkFlagRequired(finalizeCmd, "protocol-version") - - // optional parameters to influence various aspects of identity generation - finalizeCmd.Flags().UintVar(&flagCollectionClusters, "collection-clusters", 2, "number of collection clusters") // these two flags are only used when setup a network from genesis finalizeCmd.Flags().StringVar(&flagServiceAccountPublicKeyJSON, "service-account-public-key-json", @@ -137,38 +113,34 @@ func finalize(cmd *cobra.Command, args []string) { } } - // validate epoch configs - err := validateEpochConfig() + log.Info().Msg("collecting partner network and staking keys") + partnerNodes, err := common.ReadFullPartnerNodeInfos(log, flagPartnerWeights, flagPartnerNodeInfoDir) if err != nil { - log.Fatal().Err(err).Msg("invalid or unsafe epoch commit threshold config") - } - - if len(flagBootstrapRandomSeed) != flow.EpochSetupRandomSourceLength { - log.Error().Int("expected", flow.EpochSetupRandomSourceLength).Int("actual", len(flagBootstrapRandomSeed)).Msg("random seed provided length is not valid") - return + log.Fatal().Err(err).Msg("failed to read full partner node infos") } - - log.Info().Str("seed", hex.EncodeToString(flagBootstrapRandomSeed)).Msg("deterministic bootstrapping random seed") - log.Info().Msg("") - - log.Info().Msg("collecting partner network and staking keys") - partnerNodes := readPartnerNodeInfos() log.Info().Msg("") log.Info().Msg("generating internal private networking and staking keys") - internalNodes := readInternalNodeInfos() + internalNodes, err := common.ReadFullInternalNodeInfos(log, flagInternalNodePrivInfoDir, flagConfig) + if err != nil { + log.Fatal().Err(err).Msg("failed to read full internal node infos") + } + log.Info().Msg("") - log.Info().Msg("checking constraints on consensus/cluster nodes") + log.Info().Msg("checking constraints on consensus nodes") checkConstraints(partnerNodes, internalNodes) log.Info().Msg("") log.Info().Msg("assembling network and staking keys") - stakingNodes := mergeNodeInfos(internalNodes, partnerNodes) + stakingNodes, err := mergeNodeInfos(internalNodes, partnerNodes) + if err != nil { + log.Fatal().Err(err).Msgf("failed to merge internal and partner nodes: %v", err) + } log.Info().Msg("") // create flow.IdentityList representation of participant set - participants := model.ToIdentityList(stakingNodes).Sort(order.Canonical) + participants := bootstrap.Sort(stakingNodes, flow.Canonical[flow.Identity]) log.Info().Msg("reading root block data") block := readRootBlock() @@ -180,10 +152,13 @@ func finalize(cmd *cobra.Command, args []string) { log.Info().Msgf("received votes total: %v", len(votes)) - log.Info().Msg("reading dkg data") - dkgData := readDKGData() + log.Info().Msg("reading random beacon keys") + dkgData, _ := readRandomBeaconKeys() log.Info().Msg("") + log.Info().Msg("reading intermediary bootstrapping data") + intermediaryData := readIntermediaryBootstrappingData() + log.Info().Msg("constructing root QC") rootQC := constructRootQC( block, @@ -194,63 +169,63 @@ func finalize(cmd *cobra.Command, args []string) { ) log.Info().Msg("") - log.Info().Msg("computing collection node clusters") - clusterAssignmentSeed := binary.BigEndian.Uint64(flagBootstrapRandomSeed) - assignments, clusters := constructClusterAssignment(partnerNodes, internalNodes, int64(clusterAssignmentSeed)) - log.Info().Msg("") - - log.Info().Msg("constructing root blocks for collection node clusters") - clusterBlocks := run.GenerateRootClusterBlocks(flagEpochCounter, clusters) - log.Info().Msg("") - - log.Info().Msg("constructing root QCs for collection node clusters") - clusterQCs := constructRootQCsForClusters(clusters, internalNodes, clusterBlocks) - log.Info().Msg("") - // if no root commit is specified, bootstrap an empty execution state if flagRootCommit == "0000000000000000000000000000000000000000000000000000000000000000" { - generateEmptyExecutionState( - block.Header.ChainID, - flagBootstrapRandomSeed, - assignments, - clusterQCs, - dkgData, + commit := generateEmptyExecutionState( + block.ToHeader(), + intermediaryData.ExecutionStateConfig, participants, ) + flagRootCommit = hex.EncodeToString(commit[:]) } log.Info().Msg("constructing root execution result and block seal") - result, seal := constructRootResultAndSeal(flagRootCommit, block, participants, assignments, clusterQCs, dkgData) + result, seal := constructRootResultAndSeal(flagRootCommit, block, intermediaryData.RootEpochSetup, intermediaryData.RootEpochCommit) log.Info().Msg("") // construct serializable root protocol snapshot log.Info().Msg("constructing root protocol snapshot") - snapshot, err := inmem.SnapshotFromBootstrapStateWithParams(block, result, seal, rootQC, flagProtocolVersion, flagEpochCommitSafetyThreshold) + snapshot, err := inmem.SnapshotFromBootstrapStateWithParams( + block, + result, + seal, + rootQC, + func(epochStateID flow.Identifier) (protocol_state.KVStoreAPI, error) { + return kvstore.NewDefaultKVStore( + intermediaryData.FinalizationSafetyThreshold, + intermediaryData.EpochExtensionViewCount, + epochStateID) + }, + ) if err != nil { log.Fatal().Err(err).Msg("unable to generate root protocol snapshot") } // validate the generated root snapshot is valid verifyResultID := true - err = badger.IsValidRootSnapshot(snapshot, verifyResultID) + err = datastore.IsValidRootSnapshot(snapshot, verifyResultID) if err != nil { log.Fatal().Err(err).Msg("the generated root snapshot is invalid") } // validate the generated root snapshot QCs - err = badger.IsValidRootSnapshotQCs(snapshot) + err = datastore.IsValidRootSnapshotQCs(snapshot) if err != nil { log.Fatal().Err(err).Msg("root snapshot contains invalid QCs") } // write snapshot to disk - writeJSON(model.PathRootProtocolStateSnapshot, snapshot.Encodable()) + err = common.WriteJSON(model.PathRootProtocolStateSnapshot, flagOutdir, snapshot.Encodable()) + if err != nil { + log.Fatal().Err(err).Msg("failed to write json") + } + log.Info().Msgf("wrote file %s/%s", flagOutdir, model.PathRootProtocolStateSnapshot) log.Info().Msg("") // read snapshot and verify consistency rootSnapshot, err := loadRootProtocolSnapshot(model.PathRootProtocolStateSnapshot) if err != nil { - log.Fatal().Err(err).Msg("unable to load seralized root protocol") + log.Fatal().Err(err).Msg("unable to load serialized root protocol") } savedResult, savedSeal, err := rootSnapshot.SealedResult() @@ -272,13 +247,13 @@ func finalize(cmd *cobra.Command, args []string) { log.Info().Msg("saved result and seal are matching") - err = badger.IsValidRootSnapshot(rootSnapshot, verifyResultID) + err = datastore.IsValidRootSnapshot(rootSnapshot, verifyResultID) if err != nil { log.Fatal().Err(err).Msg("saved snapshot is invalid") } // validate the generated root snapshot QCs - err = badger.IsValidRootSnapshotQCs(snapshot) + err = datastore.IsValidRootSnapshotQCs(snapshot) if err != nil { log.Fatal().Err(err).Msg("root snapshot contains invalid QCs") } @@ -299,7 +274,7 @@ func finalize(cmd *cobra.Command, args []string) { log.Info().Msg("") // print count of all nodes - roleCounts := nodeCountByRole(stakingNodes) + roleCounts := common.NodeCountByRole(stakingNodes) log.Info().Msg(fmt.Sprintf("created keys for %d %s nodes", roleCounts[flow.RoleConsensus], flow.RoleConsensus.String())) log.Info().Msg(fmt.Sprintf("created keys for %d %s nodes", roleCounts[flow.RoleCollection], flow.RoleCollection.String())) log.Info().Msg(fmt.Sprintf("created keys for %d %s nodes", roleCounts[flow.RoleVerification], flow.RoleVerification.String())) @@ -312,7 +287,7 @@ func finalize(cmd *cobra.Command, args []string) { // readRootBlockVotes reads votes for root block func readRootBlockVotes() []*hotstuff.Vote { var votes []*hotstuff.Vote - files, err := filesInDir(flagRootBlockVotesDir) + files, err := common.FilesInDir(flagRootBlockVotesDir) if err != nil { log.Fatal().Err(err).Msg("could not read root block votes") } @@ -324,249 +299,85 @@ func readRootBlockVotes() []*hotstuff.Vote { // read file and append to partners var vote hotstuff.Vote - readJSON(f, &vote) + err = common.ReadJSON(f, &vote) + if err != nil { + log.Fatal().Err(err).Msg("failed to read json") + } + votes = append(votes, &vote) log.Info().Msgf("read vote %v for block %v from signerID %v", vote.ID(), vote.BlockID, vote.SignerID) } return votes } -// readPartnerNodeInfos returns a list of partner nodes after gathering weights -// and public key information from configuration files -func readPartnerNodeInfos() []model.NodeInfo { - partners := readPartnerNodes() - log.Info().Msgf("read %d partner node configuration files", len(partners)) - - var weights PartnerWeights - readJSON(flagPartnerWeights, &weights) - log.Info().Msgf("read %d weights for partner nodes", len(weights)) - - var nodes []model.NodeInfo - for _, partner := range partners { - // validate every single partner node - nodeID := validateNodeID(partner.NodeID) - networkPubKey := validateNetworkPubKey(partner.NetworkPubKey) - stakingPubKey := validateStakingPubKey(partner.StakingPubKey) - weight, valid := validateWeight(weights[partner.NodeID]) - if !valid { - log.Error().Msgf("weights: %v", weights) - log.Fatal().Msgf("partner node id %x has no weight", nodeID) - } - if weight != flow.DefaultInitialWeight { - log.Warn().Msgf("partner node (id=%x) has non-default weight (%d != %d)", partner.NodeID, weight, flow.DefaultInitialWeight) - } - - node := model.NewPublicNodeInfo( - nodeID, - partner.Role, - partner.Address, - weight, - networkPubKey.PublicKey, - stakingPubKey.PublicKey, - ) - nodes = append(nodes, node) - } - - return nodes -} - -// readPartnerNodes reads the partner node information -func readPartnerNodes() []model.NodeInfoPub { - var partners []model.NodeInfoPub - files, err := filesInDir(flagPartnerNodeInfoDir) - if err != nil { - log.Fatal().Err(err).Msg("could not read partner node infos") - } - for _, f := range files { - // skip files that do not include node-infos - if !strings.Contains(f, model.PathPartnerNodeInfoPrefix) { - continue - } - - // read file and append to partners - var p model.NodeInfoPub - readJSON(f, &p) - partners = append(partners, p) - } - return partners -} - -// readInternalNodeInfos returns a list of internal nodes after collecting weights -// from configuration files -func readInternalNodeInfos() []model.NodeInfo { - privInternals := readInternalNodes() - log.Info().Msgf("read %v internal private node-info files", len(privInternals)) - - weights := internalWeightsByAddress() - log.Info().Msgf("read %d weights for internal nodes", len(weights)) - - var nodes []model.NodeInfo - for _, internal := range privInternals { - // check if address is valid format - validateAddressFormat(internal.Address) - - // validate every single internal node - nodeID := validateNodeID(internal.NodeID) - weight, valid := validateWeight(weights[internal.Address]) - if !valid { - log.Error().Msgf("weights: %v", weights) - log.Fatal().Msgf("internal node %v has no weight. Did you forget to update the node address?", internal) - } - if weight != flow.DefaultInitialWeight { - log.Warn().Msgf("internal node (id=%x) has non-default weight (%d != %d)", internal.NodeID, weight, flow.DefaultInitialWeight) - } - - node := model.NewPrivateNodeInfo( - nodeID, - internal.Role, - internal.Address, - weight, - internal.NetworkPrivKey, - internal.StakingPrivKey, - ) - - nodes = append(nodes, node) - } - - return nodes -} - -// readInternalNodes reads our internal node private infos generated by -// `keygen` command and returns it -func readInternalNodes() []model.NodeInfoPriv { - var internalPrivInfos []model.NodeInfoPriv - - // get files in internal priv node infos directory - files, err := filesInDir(flagInternalNodePrivInfoDir) - if err != nil { - log.Fatal().Err(err).Msg("could not read partner node infos") - } - - // for each of the files - for _, f := range files { - // skip files that do not include node-infos - if !strings.Contains(f, model.PathPrivNodeInfoPrefix) { - continue - } - - // read file and append to partners - var p model.NodeInfoPriv - readJSON(f, &p) - internalPrivInfos = append(internalPrivInfos, p) - } - - return internalPrivInfos -} - -// internalWeightsByAddress returns a mapping of node address by weight for internal nodes -func internalWeightsByAddress() map[string]uint64 { - // read json - var configs []model.NodeConfig - readJSON(flagConfig, &configs) - log.Info().Interface("config", configs).Msgf("read internal node configurations") - - weights := make(map[string]uint64) - for _, config := range configs { - if _, ok := weights[config.Address]; !ok { - weights[config.Address] = config.Weight - } else { - log.Error().Msgf("duplicate internal node address %s", config.Address) - } - } - - return weights -} - // mergeNodeInfos merges the internal and partner nodes and checks if there are no // duplicate addresses or node Ids. // // IMPORTANT: node infos are returned in the canonical ordering, meaning this // is safe to use as the input to the DKG and protocol state. -func mergeNodeInfos(internalNodes, partnerNodes []model.NodeInfo) []model.NodeInfo { +func mergeNodeInfos(internalNodes, partnerNodes []model.NodeInfo) ([]model.NodeInfo, error) { nodes := append(internalNodes, partnerNodes...) // test for duplicate Addresses addressLookup := make(map[string]struct{}) for _, node := range nodes { if _, ok := addressLookup[node.Address]; ok { - log.Fatal().Str("address", node.Address).Msg("duplicate node address") + return nil, fmt.Errorf("duplicate node address: %v", node.Address) } + addressLookup[node.Address] = struct{}{} } // test for duplicate node IDs idLookup := make(map[flow.Identifier]struct{}) for _, node := range nodes { if _, ok := idLookup[node.NodeID]; ok { - log.Fatal().Str("NodeID", node.NodeID.String()).Msg("duplicate node ID") + return nil, fmt.Errorf("duplicate node ID: %v", node.NodeID.String()) } + idLookup[node.NodeID] = struct{}{} } // sort nodes using the canonical ordering - nodes = model.Sort(nodes, order.Canonical) + nodes = model.Sort(nodes, flow.Canonical[flow.Identity]) - return nodes + return nodes, nil } // readRootBlock reads root block data from disc, this file needs to be prepared with // rootblock command func readRootBlock() *flow.Block { - rootBlock, err := utils.ReadRootBlock(flagRootBlock) + rootBlock, err := utils.ReadData[flow.Block](flagRootBlockPath) if err != nil { log.Fatal().Err(err).Msg("could not read root block data") } return rootBlock } -func readDKGData() dkg.DKGData { - encodableDKG, err := utils.ReadDKGData(flagDKGDataPath) +// readRandomBeaconKeys reads the threshold key data from disc. +// This file needs to be prepared with rootblock command +func readRandomBeaconKeys() (dkg.ThresholdKeySet, flow.DKGIndexMap) { + encodableDKG, err := utils.ReadData[inmem.ThresholdKeySet](flagDKGDataPath) if err != nil { - log.Fatal().Err(err).Msg("could not read DKG data") + log.Fatal().Err(err).Msg("loading threshold key data for Random Beacon failed") } - dkgData := dkg.DKGData{ + dkgData := dkg.ThresholdKeySet{ PrivKeyShares: nil, PubGroupKey: encodableDKG.GroupKey.PublicKey, PubKeyShares: nil, } - for _, pubKey := range encodableDKG.PubKeyShares { - dkgData.PubKeyShares = append(dkgData.PubKeyShares, pubKey.PublicKey) + indexMap := make(flow.DKGIndexMap, len(encodableDKG.Participants)) + for i, participant := range encodableDKG.Participants { + dkgData.PubKeyShares = append(dkgData.PubKeyShares, participant.PubKeyShare.PublicKey) + dkgData.PrivKeyShares = append(dkgData.PrivKeyShares, participant.PrivKeyShare.PrivateKey) + indexMap[participant.NodeID] = i } - for _, privKey := range encodableDKG.PrivKeyShares { - dkgData.PrivKeyShares = append(dkgData.PrivKeyShares, privKey.PrivateKey) - } - - return dkgData + return dkgData, indexMap } // Validation utility methods ------------------------------------------------ -func validateNodeID(nodeID flow.Identifier) flow.Identifier { - if nodeID == flow.ZeroID { - log.Fatal().Msg("NodeID must not be zero") - } - return nodeID -} - -func validateNetworkPubKey(key encodable.NetworkPubKey) encodable.NetworkPubKey { - if key.PublicKey == nil { - log.Fatal().Msg("NetworkPubKey must not be nil") - } - return key -} - -func validateStakingPubKey(key encodable.StakingPubKey) encodable.StakingPubKey { - if key.PublicKey == nil { - log.Fatal().Msg("StakingPubKey must not be nil") - } - return key -} - -func validateWeight(weight uint64) (uint64, bool) { - return weight, weight > 0 -} - // loadRootProtocolSnapshot loads the root protocol snapshot from disk func loadRootProtocolSnapshot(path string) (*inmem.Snapshot, error) { data, err := io.ReadFile(filepath.Join(flagOutdir, path)) @@ -583,15 +394,22 @@ func loadRootProtocolSnapshot(path string) (*inmem.Snapshot, error) { return inmem.SnapshotFromEncodable(snapshot), nil } +// readIntermediaryBootstrappingData reads intermediary bootstrapping data file from disk. +// This file needs to be prepared with rootblock command +func readIntermediaryBootstrappingData() *IntermediaryBootstrappingData { + intermediaryData, err := utils.ReadData[IntermediaryBootstrappingData](flagIntermediaryBootstrappingDataPath) + if err != nil { + log.Fatal().Err(err).Msg("could not read root epoch data") + } + return intermediaryData +} + // generateEmptyExecutionState generates a new empty execution state with the // given configuration. Sets the flagRootCommit variable for future reads. func generateEmptyExecutionState( - chainID flow.ChainID, - randomSource []byte, - assignments flow.AssignmentList, - clusterQCs []*flow.QuorumCertificate, - dkgData dkg.DKGData, - identities flow.IdentityList, + rootBlock *flow.Header, + epochConfig epochs.EpochConfig, + nodes []bootstrap.NodeInfo, ) (commit flow.StateCommitment) { log.Info().Msg("generating empty execution state") @@ -606,68 +424,62 @@ func generateEmptyExecutionState( log.Fatal().Err(err).Msg("invalid genesis token supply") } - cdcRandomSource, err := cadence.NewString(hex.EncodeToString(randomSource)) - if err != nil { - log.Fatal().Err(err).Msg("invalid random source") - } - - epochConfig := epochs.EpochConfig{ - EpochTokenPayout: cadence.UFix64(0), - RewardCut: cadence.UFix64(0), - CurrentEpochCounter: cadence.UInt64(flagEpochCounter), - NumViewsInEpoch: cadence.UInt64(flagNumViewsInEpoch), - NumViewsInStakingAuction: cadence.UInt64(flagNumViewsInStakingAuction), - NumViewsInDKGPhase: cadence.UInt64(flagNumViewsInDKGPhase), - NumCollectorClusters: cadence.UInt16(flagCollectionClusters), - FLOWsupplyIncreasePercentage: cadence.UFix64(0), - RandomSource: cdcRandomSource, - CollectorClusters: assignments, - ClusterQCs: clusterQCs, - DKGPubKeys: dkgData.PubKeyShares, - } - commit, err = run.GenerateExecutionState( filepath.Join(flagOutdir, model.DirnameExecutionState), serviceAccountPublicKey, - chainID.Chain(), + rootBlock.ChainID.Chain(), + fvm.WithRootBlock(rootBlock), fvm.WithInitialTokenSupply(cdcInitialTokenSupply), fvm.WithMinimumStorageReservation(fvm.DefaultMinimumStorageReservation), fvm.WithAccountCreationFee(fvm.DefaultAccountCreationFee), fvm.WithStorageMBPerFLOW(fvm.DefaultStorageMBPerFLOW), fvm.WithEpochConfig(epochConfig), - fvm.WithIdentities(identities), + fvm.WithNodes(nodes), ) if err != nil { log.Fatal().Err(err).Msg("unable to generate execution state") } - flagRootCommit = hex.EncodeToString(commit[:]) log.Info().Msg("") - return + return commit } -// validateEpochConfig validates configuration of the epoch commitment deadline. -func validateEpochConfig() error { - chainID := parseChainID(flagRootChain) - dkgFinalView := flagNumViewsInStakingAuction + flagNumViewsInDKGPhase*3 // 3 DKG phases - epochCommitDeadline := flagNumViewsInEpoch - flagEpochCommitSafetyThreshold - - defaultSafetyThreshold, err := protocol.DefaultEpochCommitSafetyThreshold(chainID) - if err != nil { - return fmt.Errorf("could not get default epoch commit safety threshold: %w", err) - } +// validateOrPopulateEpochTimingConfig validates the epoch timing config flags. In case the +// `flagUseDefaultEpochTargetEndTime` value has been set, the function derives the values for +// `flagEpochTimingRefCounter`, `flagEpochTimingDuration`, and `flagEpochTimingRefTimestamp` +// from the configuration. Otherwise, it enforces that compatible values for the respective parameters have been +// specified (and errors otherwise). Therefore, after `validateOrPopulateEpochTimingConfig` ran, +// the targeted end time for the epoch can be computed via `rootEpochTargetEndTime()`. +// You can either let the tool choose default values, or specify a value for each config. +func validateOrPopulateEpochTimingConfig() error { + // Default timing is intended for Benchnet, Localnet, etc. + // Manually specified timings for Mainnet, Testnet, Canary. + if flagUseDefaultEpochTargetEndTime { + // No other flags may be set + if !(flagEpochTimingRefTimestamp == 0 && flagEpochTimingDuration == 0 && flagEpochTimingRefCounter == 0) { + return fmt.Errorf("invalid epoch timing config: cannot specify ANY of --epoch-timing-ref-counter, --epoch-timing-ref-timestamp, or --epoch-timing-duration if using default timing config") + } + flagEpochTimingRefCounter = flagEpochCounter + flagEpochTimingDuration = flagNumViewsInEpoch + flagEpochTimingRefTimestamp = uint64(time.Now().Unix()) + flagNumViewsInEpoch + + // compute target end time for initial (root) epoch from flags: `TargetEndTime = RefTimestamp + (RootEpochCounter - RefEpochCounter) * Duration` + rootEpochTargetEndTimeUNIX := rootEpochTargetEndTime() + rootEpochTargetEndTime := time.Unix(int64(rootEpochTargetEndTimeUNIX), 0) + log.Info().Msgf("using default epoch timing config with root epoch target end time %s, which is in %s", rootEpochTargetEndTime, time.Until(rootEpochTargetEndTime)) + } else { + // All other flags must be set + // NOTE: it is valid for flagEpochTimingRefCounter to be set to 0 + if flagEpochTimingRefTimestamp == 0 || flagEpochTimingDuration == 0 { + return fmt.Errorf("invalid epoch timing config: must specify ALL of --epoch-timing-ref-counter, --epoch-timing-ref-timestamp, and --epoch-timing-duration") + } + if flagEpochCounter < flagEpochTimingRefCounter { + return fmt.Errorf("invalid epoch timing config: reference epoch counter must be less than or equal to root epoch counter") + } - // sanity check: the safety threshold is >= the default for the chain - if flagEpochCommitSafetyThreshold < defaultSafetyThreshold { - return fmt.Errorf("potentially unsafe epoch config: epoch commit safety threshold smaller than expected (%d < %d)", flagEpochCommitSafetyThreshold, defaultSafetyThreshold) - } - // sanity check: epoch commitment deadline cannot be before the DKG end - if epochCommitDeadline <= dkgFinalView { - return fmt.Errorf("invalid epoch config: the epoch commitment deadline (%d) is before the DKG final view (%d)", epochCommitDeadline, dkgFinalView) - } - // sanity check: the difference between DKG end and safety threshold is also >= the default safety threshold - if epochCommitDeadline-dkgFinalView < defaultSafetyThreshold { - return fmt.Errorf("potentially unsafe epoch config: time between DKG end and epoch commitment deadline is smaller than expected (%d-%d < %d)", - epochCommitDeadline, dkgFinalView, defaultSafetyThreshold) + // compute target end time for initial (root) epoch from flags: `TargetEndTime = RefTimestamp + (RootEpochCounter - RefEpochCounter) * Duration` + rootEpochTargetEndTimeUNIX := rootEpochTargetEndTime() + rootEpochTargetEndTime := time.Unix(int64(rootEpochTargetEndTimeUNIX), 0) + log.Info().Msgf("using user-specified epoch timing config with root epoch target end time %s, which is in %s", rootEpochTargetEndTime, time.Until(rootEpochTargetEndTime)) } return nil } diff --git a/cmd/bootstrap/cmd/finalize_test.go b/cmd/bootstrap/cmd/finalize_test.go index 816760540da..c70f53a50cf 100644 --- a/cmd/bootstrap/cmd/finalize_test.go +++ b/cmd/bootstrap/cmd/finalize_test.go @@ -2,39 +2,38 @@ package cmd import ( "encoding/hex" - "os" + "math/rand" "path/filepath" "regexp" "strings" "testing" + "github.com/rs/zerolog" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - utils "github.com/onflow/flow-go/cmd/bootstrap/utils" + "github.com/onflow/flow-go/cmd/bootstrap/utils" + "github.com/onflow/flow-go/cmd/util/cmd/common" model "github.com/onflow/flow-go/model/bootstrap" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/utils/unittest" ) -const finalizeHappyPathLogs = "^deterministic bootstrapping random seed" + - "collecting partner network and staking keys" + +const finalizeHappyPathLogs = "collecting partner network and staking keys" + `read \d+ partner node configuration files` + `read \d+ weights for partner nodes` + "generating internal private networking and staking keys" + `read \d+ internal private node-info files` + `read internal node configurations` + `read \d+ weights for internal nodes` + - `checking constraints on consensus/cluster nodes` + + `checking constraints on consensus nodes` + `assembling network and staking keys` + `reading root block data` + `reading root block votes` + `read vote .*` + - `reading dkg data` + + `reading random beacon keys` + + `reading intermediary bootstrapping data` + `constructing root QC` + - `computing collection node clusters` + - `constructing root blocks for collection node clusters` + - `constructing root QCs for collection node clusters` + `constructing root execution result and block seal` + `constructing root protocol snapshot` + `wrote file \S+/root-protocol-state-snapshot.json` + @@ -52,7 +51,6 @@ const finalizeHappyPathLogs = "^deterministic bootstrapping random seed" + var finalizeHappyPathRegex = regexp.MustCompile(finalizeHappyPathLogs) func TestFinalize_HappyPath(t *testing.T) { - deterministicSeed := GenerateRandomSeed(flow.EpochSetupRandomSourceLength) rootCommit := unittest.StateCommitmentFixture() rootParent := unittest.StateCommitmentFixture() chainName := "main" @@ -71,147 +69,28 @@ func TestFinalize_HappyPath(t *testing.T) { flagRootChain = chainName flagRootParent = hex.EncodeToString(rootParent[:]) flagRootHeight = rootHeight - - // set deterministic bootstrapping seed - flagBootstrapRandomSeed = deterministicSeed - - // rootBlock will generate DKG and place it into bootDir/public-root-information - rootBlock(nil, nil) - - flagRootCommit = hex.EncodeToString(rootCommit[:]) - flagEpochCounter = epochCounter - flagNumViewsInEpoch = 100_000 - flagNumViewsInStakingAuction = 50_000 - flagNumViewsInDKGPhase = 2_000 - flagEpochCommitSafetyThreshold = 1_000 - flagRootBlock = filepath.Join(bootDir, model.PathRootBlockData) - flagDKGDataPath = filepath.Join(bootDir, model.PathRootDKGData) - flagRootBlockVotesDir = filepath.Join(bootDir, model.DirnameRootBlockVotes) - - hook := zeroLoggerHook{logs: &strings.Builder{}} - log = log.Hook(hook) - - finalize(nil, nil) - assert.Regexp(t, finalizeHappyPathRegex, hook.logs.String()) - hook.logs.Reset() - - // check if root protocol snapshot exists - snapshotPath := filepath.Join(bootDir, model.PathRootProtocolStateSnapshot) - assert.FileExists(t, snapshotPath) - }) -} - -func TestFinalize_Deterministic(t *testing.T) { - deterministicSeed := GenerateRandomSeed(flow.EpochSetupRandomSourceLength) - rootCommit := unittest.StateCommitmentFixture() - rootParent := unittest.StateCommitmentFixture() - chainName := "main" - rootHeight := uint64(1000) - epochCounter := uint64(0) - - utils.RunWithSporkBootstrapDir(t, func(bootDir, partnerDir, partnerWeights, internalPrivDir, configPath string) { - - flagOutdir = bootDir - - flagConfig = configPath - flagPartnerNodeInfoDir = partnerDir - flagPartnerWeights = partnerWeights - flagInternalNodePrivInfoDir = internalPrivDir - + flagRootView = 1_000 flagRootCommit = hex.EncodeToString(rootCommit[:]) - flagRootParent = hex.EncodeToString(rootParent[:]) - flagRootChain = chainName - flagRootHeight = rootHeight flagEpochCounter = epochCounter flagNumViewsInEpoch = 100_000 flagNumViewsInStakingAuction = 50_000 flagNumViewsInDKGPhase = 2_000 - flagEpochCommitSafetyThreshold = 1_000 - - // set deterministic bootstrapping seed - flagBootstrapRandomSeed = deterministicSeed - - // rootBlock will generate DKG and place it into model.PathRootDKGData - rootBlock(nil, nil) - - flagRootBlock = filepath.Join(bootDir, model.PathRootBlockData) - flagDKGDataPath = filepath.Join(bootDir, model.PathRootDKGData) - flagRootBlockVotesDir = filepath.Join(bootDir, model.DirnameRootBlockVotes) + flagFinalizationSafetyThreshold = 1_000 + flagEpochExtensionViewCount = 100_000 + flagUseDefaultEpochTargetEndTime = true + flagEpochTimingRefCounter = 0 + flagEpochTimingRefTimestamp = 0 + flagEpochTimingDuration = 0 - hook := zeroLoggerHook{logs: &strings.Builder{}} - log = log.Hook(hook) - - finalize(nil, nil) - require.Regexp(t, finalizeHappyPathRegex, hook.logs.String()) - hook.logs.Reset() - - // check if root protocol snapshot exists - snapshotPath := filepath.Join(bootDir, model.PathRootProtocolStateSnapshot) - assert.FileExists(t, snapshotPath) - - // read snapshot - _, err := utils.ReadRootProtocolSnapshot(bootDir) - require.NoError(t, err) - - // delete snapshot file - err = os.Remove(snapshotPath) - require.NoError(t, err) - - finalize(nil, nil) - require.Regexp(t, finalizeHappyPathRegex, hook.logs.String()) - hook.logs.Reset() - - // check if root protocol snapshot exists - assert.FileExists(t, snapshotPath) - - // read snapshot - _, err = utils.ReadRootProtocolSnapshot(bootDir) - require.NoError(t, err) - - // ATTENTION: we can't use next statement because QC generation is not deterministic - // assert.Equal(t, firstSnapshot, secondSnapshot) - // Meaning we don't have a guarantee that with same input arguments we will get same QC. - // This doesn't mean that QC is invalid, but it will result in different structures, - // different QC => different service events => different result => different seal - // We need to use a different mechanism for comparing. - // ToDo: Revisit if this test case is valid at all. - }) -} - -func TestFinalize_SameSeedDifferentStateCommits(t *testing.T) { - deterministicSeed := GenerateRandomSeed(flow.EpochSetupRandomSourceLength) - rootCommit := unittest.StateCommitmentFixture() - rootParent := unittest.StateCommitmentFixture() - chainName := "main" - rootHeight := uint64(1000) - epochCounter := uint64(0) - - utils.RunWithSporkBootstrapDir(t, func(bootDir, partnerDir, partnerWeights, internalPrivDir, configPath string) { - - flagOutdir = bootDir - - flagConfig = configPath - flagPartnerNodeInfoDir = partnerDir - flagPartnerWeights = partnerWeights - flagInternalNodePrivInfoDir = internalPrivDir - - flagRootCommit = hex.EncodeToString(rootCommit[:]) - flagRootParent = hex.EncodeToString(rootParent[:]) - flagRootChain = chainName - flagRootHeight = rootHeight - flagEpochCounter = epochCounter - flagNumViewsInEpoch = 100_000 - flagNumViewsInStakingAuction = 50_000 - flagNumViewsInDKGPhase = 2_000 - flagEpochCommitSafetyThreshold = 1_000 - - // set deterministic bootstrapping seed - flagBootstrapRandomSeed = deterministicSeed + // KV store values (epoch extension view count and finalization safety threshold) must be explicitly set for mainnet + require.NoError(t, rootBlockCmd.Flags().Set("kvstore-finalization-safety-threshold", "1000")) + require.NoError(t, rootBlockCmd.Flags().Set("kvstore-epoch-extension-view-count", "100000")) // rootBlock will generate DKG and place it into bootDir/public-root-information - rootBlock(nil, nil) + rootBlock(rootBlockCmd, nil) - flagRootBlock = filepath.Join(bootDir, model.PathRootBlockData) + flagRootBlockPath = filepath.Join(bootDir, model.PathRootBlockData) + flagIntermediaryBootstrappingDataPath = filepath.Join(bootDir, model.PathIntermediaryBootstrappingData) flagDKGDataPath = filepath.Join(bootDir, model.PathRootDKGData) flagRootBlockVotesDir = filepath.Join(bootDir, model.DirnameRootBlockVotes) @@ -219,108 +98,127 @@ func TestFinalize_SameSeedDifferentStateCommits(t *testing.T) { log = log.Hook(hook) finalize(nil, nil) - require.Regexp(t, finalizeHappyPathRegex, hook.logs.String()) + assert.Regexp(t, finalizeHappyPathRegex, hook.logs.String()) hook.logs.Reset() // check if root protocol snapshot exists snapshotPath := filepath.Join(bootDir, model.PathRootProtocolStateSnapshot) assert.FileExists(t, snapshotPath) - - // read snapshot - snapshot1, err := utils.ReadRootProtocolSnapshot(bootDir) - require.NoError(t, err) - - // delete snapshot file - err = os.Remove(snapshotPath) - require.NoError(t, err) - - // change input state commitments - rootCommit2 := unittest.StateCommitmentFixture() - rootParent2 := unittest.StateCommitmentFixture() - flagRootCommit = hex.EncodeToString(rootCommit2[:]) - flagRootParent = hex.EncodeToString(rootParent2[:]) - - finalize(nil, nil) - require.Regexp(t, finalizeHappyPathRegex, hook.logs.String()) - hook.logs.Reset() - - // check if root protocol snapshot exists - assert.FileExists(t, snapshotPath) - - // read snapshot - snapshot2, err := utils.ReadRootProtocolSnapshot(bootDir) - require.NoError(t, err) - - // current epochs - currentEpoch1 := snapshot1.Epochs().Current() - currentEpoch2 := snapshot2.Epochs().Current() - - // check dkg - dkg1, err := currentEpoch1.DKG() - require.NoError(t, err) - dkg2, err := currentEpoch2.DKG() - require.NoError(t, err) - assert.Equal(t, dkg1, dkg2) - - // check clustering - clustering1, err := currentEpoch1.Clustering() - require.NoError(t, err) - clustering2, err := currentEpoch2.Clustering() - require.NoError(t, err) - assert.Equal(t, clustering1, clustering2) - - // verify random sources are same - randomSource1, err := currentEpoch1.RandomSource() - require.NoError(t, err) - randomSource2, err := currentEpoch2.RandomSource() - require.NoError(t, err) - assert.Equal(t, randomSource1, randomSource2) - assert.Equal(t, randomSource1, deterministicSeed) - assert.Equal(t, flow.EpochSetupRandomSourceLength, len(randomSource1)) }) } -func TestFinalize_InvalidRandomSeedLength(t *testing.T) { - rootCommit := unittest.StateCommitmentFixture() - rootParent := unittest.StateCommitmentFixture() - chainName := "main" - rootHeight := uint64(12332) - epochCounter := uint64(2) - - // set random seed with smaller length - deterministicSeed, err := hex.DecodeString("a12354a343234aa44bbb43") +func TestClusterAssignment(t *testing.T) { + tmp := flagCollectionClusters + flagCollectionClusters = 5 + // Happy path (limit set-up, can't have one less internal node) + partnersLen := 7 + internalLen := 22 + partners := unittest.NodeInfosFixture(partnersLen, unittest.WithRole(flow.RoleCollection)) + internals := unittest.NodeInfosFixture(internalLen, unittest.WithRole(flow.RoleCollection)) + + log := zerolog.Nop() + // should not error + _, clusters, err := common.ConstructClusterAssignment(log, model.ToIdentityList(partners), model.ToIdentityList(internals), int(flagCollectionClusters)) require.NoError(t, err) + require.True(t, checkClusterConstraint(clusters, partners, internals)) + + // unhappy Path + internals = internals[:21] // reduce one internal node + // should error + _, _, err = common.ConstructClusterAssignment(log, model.ToIdentityList(partners), model.ToIdentityList(internals), int(flagCollectionClusters)) + require.Error(t, err) + // revert the flag value + flagCollectionClusters = tmp +} - // invalid length execution logs - expectedLogs := regexp.MustCompile("random seed provided length is not valid") - - utils.RunWithSporkBootstrapDir(t, func(bootDir, partnerDir, partnerWeights, internalPrivDir, configPath string) { - - flagOutdir = bootDir - - flagConfig = configPath - flagPartnerNodeInfoDir = partnerDir - flagPartnerWeights = partnerWeights - flagInternalNodePrivInfoDir = internalPrivDir +func TestEpochTimingConfig(t *testing.T) { + // Reset flags after test is completed + defer func(_flagDefault bool, _flagRefCounter, _flagRefTs, _flagDur uint64) { + flagUseDefaultEpochTargetEndTime = _flagDefault + flagEpochTimingRefCounter = _flagRefCounter + flagEpochTimingRefTimestamp = _flagRefTs + flagEpochTimingDuration = _flagDur + }(flagUseDefaultEpochTargetEndTime, flagEpochTimingRefCounter, flagEpochTimingRefTimestamp, flagEpochTimingDuration) + + flags := []*uint64{&flagEpochTimingRefCounter, &flagEpochTimingRefTimestamp, &flagEpochTimingDuration} + t.Run("if default is set, no other flag may be set", func(t *testing.T) { + flagUseDefaultEpochTargetEndTime = true + for _, flag := range flags { + *flag = rand.Uint64()%100 + 1 + err := validateOrPopulateEpochTimingConfig() + assert.Error(t, err) + *flag = 0 // set the flag back to 0 + } + err := validateOrPopulateEpochTimingConfig() + assert.NoError(t, err) + }) - flagRootCommit = hex.EncodeToString(rootCommit[:]) - flagRootParent = hex.EncodeToString(rootParent[:]) - flagRootChain = chainName - flagRootHeight = rootHeight - flagEpochCounter = epochCounter - flagNumViewsInEpoch = 100_000 - flagNumViewsInStakingAuction = 50_000 - flagNumViewsInDKGPhase = 2_000 - flagEpochCommitSafetyThreshold = 1_000 + t.Run("if default is not set, all other flags must be set", func(t *testing.T) { + flagUseDefaultEpochTargetEndTime = false + // First set all required flags and ensure validation passes + flagEpochTimingRefCounter = rand.Uint64() % flagEpochCounter + flagEpochTimingDuration = rand.Uint64()%100_000 + 1 + flagEpochTimingRefTimestamp = rand.Uint64() + + err := validateOrPopulateEpochTimingConfig() + assert.NoError(t, err) + + // Next, check that validation fails if any one flag is not set + // NOTE: we do not include refCounter here, because it is allowed to be zero. + for _, flag := range []*uint64{&flagEpochTimingRefTimestamp, &flagEpochTimingDuration} { + *flag = 0 + err := validateOrPopulateEpochTimingConfig() + assert.Error(t, err) + *flag = rand.Uint64()%100 + 1 // set the flag back to a non-zero value + } + }) +} - // set deterministic bootstrapping seed - flagBootstrapRandomSeed = deterministicSeed +// Check about the number of internal/partner nodes in each cluster. The identites +// in each cluster do not matter for this check. +func checkClusterConstraint(clusters flow.ClusterList, partnersInfo []model.NodeInfo, internalsInfo []model.NodeInfo) bool { + partners := model.ToIdentityList(partnersInfo) + internals := model.ToIdentityList(internalsInfo) + for _, cluster := range clusters { + var clusterPartnerCount, clusterInternalCount int + for _, node := range cluster { + if _, exists := partners.ByNodeID(node.NodeID); exists { + clusterPartnerCount++ + } + if _, exists := internals.ByNodeID(node.NodeID); exists { + clusterInternalCount++ + } + } + if clusterInternalCount <= clusterPartnerCount*2 { + return false + } + } + return true +} - hook := zeroLoggerHook{logs: &strings.Builder{}} - log = log.Hook(hook) +func TestMergeNodeInfos(t *testing.T) { + partnersLen := 7 + internalLen := 22 + partners := unittest.NodeInfosFixture(partnersLen, unittest.WithRole(flow.RoleCollection)) + internals := unittest.NodeInfosFixture(internalLen, unittest.WithRole(flow.RoleCollection)) - finalize(nil, nil) - assert.Regexp(t, expectedLogs, hook.logs.String()) - hook.logs.Reset() - }) + // Check if there is no overlap, then should pass + merged, err := mergeNodeInfos(partners, internals) + require.NoError(t, err) + require.Len(t, merged, partnersLen+internalLen) + + // Check if internals and partners have overlap, then should fail + internalAndPartnersHaveOverlap := append(partners, internals[0]) + _, err = mergeNodeInfos(internalAndPartnersHaveOverlap, internals) + require.Error(t, err) + + // Check if partners have overlap, then should fail + partnersHaveOverlap := append(partners, partners[0]) + _, err = mergeNodeInfos(partnersHaveOverlap, internals) + require.Error(t, err) + + // Check if internals have overlap, then should fail + internalsHaveOverlap := append(internals, internals[0]) + _, err = mergeNodeInfos(partners, internalsHaveOverlap) + require.Error(t, err) } diff --git a/cmd/bootstrap/cmd/genconfig.go b/cmd/bootstrap/cmd/genconfig.go index 404bd5e873e..f1902778f3a 100644 --- a/cmd/bootstrap/cmd/genconfig.go +++ b/cmd/bootstrap/cmd/genconfig.go @@ -5,6 +5,7 @@ import ( "github.com/spf13/cobra" + "github.com/onflow/flow-go/cmd/util/cmd/common" model "github.com/onflow/flow-go/model/bootstrap" "github.com/onflow/flow-go/model/flow" ) @@ -56,14 +57,18 @@ func genconfigCmdRun(_ *cobra.Command, _ []string) { configs = append(configs, createConf(flow.RoleVerification, i)) } - writeJSON(flagConfig, configs) + err := common.WriteJSON(flagConfig, flagOutdir, configs) + if err != nil { + log.Fatal().Err(err).Msg("failed to write json") + } + log.Info().Msgf("wrote file %s/%s", flagOutdir, flagConfig) } // genconfigCmd represents the genconfig command var genconfigCmd = &cobra.Command{ Use: "genconfig", Short: "Generate node-config.json", - Long: "example: go run -tags relic ./cmd/bootstrap genconfig --address-format \"%s-%03d.devnet19.nodes.onflow.org:3569\" --access 2 --collection 3 --consensus 3 --execution 2 --verification 1 --weight 100", + Long: "example: go run ./cmd/bootstrap genconfig --address-format \"%s-%03d.devnet19.nodes.onflow.org:3569\" --access 2 --collection 3 --consensus 3 --execution 2 --verification 1 --weight 100", Run: genconfigCmdRun, } diff --git a/cmd/bootstrap/cmd/intermediary.go b/cmd/bootstrap/cmd/intermediary.go new file mode 100644 index 00000000000..00f498e6254 --- /dev/null +++ b/cmd/bootstrap/cmd/intermediary.go @@ -0,0 +1,33 @@ +package cmd + +import ( + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module/epochs" +) + +// IntermediaryBootstrappingData stores data which needs to be passed between the +// 2 steps of the bootstrapping process: `rootblock` and `finalize`. +// This structure is created in `rootblock`, written to disk, then read in `finalize`. +type IntermediaryBootstrappingData struct { + IntermediaryParamsData + IntermediaryEpochData +} + +// IntermediaryParamsData stores the subset of protocol.GlobalParams which can be independently configured +// by the network operator (i.e. which is not dependent on other bootstrapping artifacts, +// like the root block). +// This is used to pass data between the rootblock command and the finalize command. +type IntermediaryParamsData struct { + FinalizationSafetyThreshold uint64 + EpochExtensionViewCount uint64 +} + +// IntermediaryEpochData stores the root epoch and the epoch config for the execution state +// and to bootstrap the Protocol State. +// This is used to pass data between the rootblock command and the finalize command. +type IntermediaryEpochData struct { + // TODO remove redundant inclusion of the fields (currently storing them in cadence as well as in protocol-state representation). + ExecutionStateConfig epochs.EpochConfig + RootEpochSetup *flow.EpochSetup + RootEpochCommit *flow.EpochCommit +} diff --git a/cmd/bootstrap/cmd/key.go b/cmd/bootstrap/cmd/key.go index 3790f167d56..0e336c57ce3 100644 --- a/cmd/bootstrap/cmd/key.go +++ b/cmd/bootstrap/cmd/key.go @@ -2,18 +2,13 @@ package cmd import ( "fmt" - "net" - "strconv" - "github.com/onflow/flow-go/cmd" - "github.com/onflow/flow-go/cmd/bootstrap/utils" - p2putils "github.com/onflow/flow-go/network/p2p/utils" - - "github.com/multiformats/go-multiaddr" + "github.com/onflow/crypto" "github.com/spf13/cobra" - "github.com/onflow/flow-go/crypto" - + "github.com/onflow/flow-go/cmd" + "github.com/onflow/flow-go/cmd/bootstrap/utils" + "github.com/onflow/flow-go/cmd/util/cmd/common" model "github.com/onflow/flow-go/model/bootstrap" "github.com/onflow/flow-go/model/flow" ) @@ -77,7 +72,7 @@ func keyCmdRun(_ *cobra.Command, _ []string) { // validate inputs role := validateRole(flagRole) - validateAddressFormat(flagAddress) + common.ValidateAddressFormat(log, flagAddress) // generate staking and network keys networkKey, stakingKey, secretsDBKey, err := generateKeys() @@ -99,10 +94,36 @@ func keyCmdRun(_ *cobra.Command, _ []string) { } // write files - writeText(model.PathNodeID, []byte(nodeInfo.NodeID.String())) - writeJSON(fmt.Sprintf(model.PathNodeInfoPriv, nodeInfo.NodeID), private) - writeText(fmt.Sprintf(model.PathSecretsEncryptionKey, nodeInfo.NodeID), secretsDBKey) - writeJSON(fmt.Sprintf(model.PathNodeInfoPub, nodeInfo.NodeID), nodeInfo.Public()) + err = common.WriteText(model.PathNodeID, flagOutdir, []byte(nodeInfo.NodeID.String())) + if err != nil { + log.Fatal().Err(err).Msg("failed to write file") + } + log.Info().Msgf("wrote file %s/%s", flagOutdir, model.PathNodeID) + + privKeyPath := fmt.Sprintf(model.PathNodeInfoPriv, nodeInfo.NodeID) + err = common.WriteJSON(privKeyPath, flagOutdir, private) + if err != nil { + log.Fatal().Err(err).Msg("failed to write json") + } + log.Info().Msgf("wrote file %s/%s", flagOutdir, privKeyPath) + + secretsKeyPath := fmt.Sprintf(model.PathSecretsEncryptionKey, nodeInfo.NodeID) + err = common.WriteText(secretsKeyPath, flagOutdir, secretsDBKey) + if err != nil { + log.Fatal().Err(err).Msg("failed to write file") + } + log.Info().Msgf("wrote file %s/%s", flagOutdir, secretsKeyPath) + + public, err := nodeInfo.Public() + if err != nil { + log.Fatal().Err(err).Msg("could not access public keys") + } + pubNodeInfoPath := fmt.Sprintf(model.PathNodeInfoPub, nodeInfo.NodeID) + err = common.WriteJSON(pubNodeInfoPath, flagOutdir, public) + if err != nil { + log.Fatal().Err(err).Msg("failed to write json") + } + log.Info().Msgf("wrote file %s/%s", flagOutdir, pubNodeInfoPath) // write machine account info if role == flow.RoleCollection || role == flow.RoleConsensus { @@ -116,7 +137,12 @@ func keyCmdRun(_ *cobra.Command, _ []string) { log.Debug().Str("address", flagAddress).Msg("assembling machine account information") // write the public key to terminal for entry in Flow Port machineAccountPriv := assembleNodeMachineAccountKey(machineKey) - writeJSON(fmt.Sprintf(model.PathNodeMachineAccountPrivateKey, nodeInfo.NodeID), machineAccountPriv) + privateKeyPath := fmt.Sprintf(model.PathNodeMachineAccountPrivateKey, nodeInfo.NodeID) + err = common.WriteJSON(privateKeyPath, flagOutdir, machineAccountPriv) + if err != nil { + log.Fatal().Err(err).Msg("failed to write json") + } + log.Info().Msgf("wrote file %s/%s", flagOutdir, privateKeyPath) } } @@ -166,27 +192,3 @@ func validateRole(role string) flow.Role { } return parsed } - -// validateAddressFormat validates the address provided by pretty much doing what the network layer would do before -// starting the node -func validateAddressFormat(address string) { - checkErr := func(err error) { - if err != nil { - log.Fatal().Err(err).Str("address", address).Msg("invalid address format.\n" + - `Address needs to be in the format hostname:port or ip:port e.g. "flow.com:3569"`) - } - } - - // split address into ip/hostname and port - ip, port, err := net.SplitHostPort(address) - checkErr(err) - - // check that port number is indeed a number - _, err = strconv.Atoi(port) - checkErr(err) - - // create a libp2p address from the ip and port - lp2pAddr := p2putils.MultiAddressStr(ip, port) - _, err = multiaddr.NewMultiaddr(lp2pAddr) - checkErr(err) -} diff --git a/cmd/bootstrap/cmd/key_test.go b/cmd/bootstrap/cmd/key_test.go index 6f61d78a755..14541f23ef9 100644 --- a/cmd/bootstrap/cmd/key_test.go +++ b/cmd/bootstrap/cmd/key_test.go @@ -10,7 +10,6 @@ import ( "testing" "time" - "github.com/rs/zerolog" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -124,11 +123,3 @@ func TestInvalidAddressSubprocess(t *testing.T) { return } } - -type zeroLoggerHook struct { - logs *strings.Builder -} - -func (h zeroLoggerHook) Run(_ *zerolog.Event, _ zerolog.Level, msg string) { - h.logs.WriteString(msg) -} diff --git a/cmd/bootstrap/cmd/keygen.go b/cmd/bootstrap/cmd/keygen.go index 62457fe4b56..ddb6435a9d5 100644 --- a/cmd/bootstrap/cmd/keygen.go +++ b/cmd/bootstrap/cmd/keygen.go @@ -5,11 +5,11 @@ import ( "io" "os" - "github.com/onflow/flow-go/cmd" - "github.com/onflow/flow-go/cmd/bootstrap/utils" - "github.com/spf13/cobra" + "github.com/onflow/flow-go/cmd" + "github.com/onflow/flow-go/cmd/bootstrap/utils" + "github.com/onflow/flow-go/cmd/util/cmd/common" model "github.com/onflow/flow-go/model/bootstrap" ) @@ -22,7 +22,7 @@ var keygenCmd = &cobra.Command{ Long: `Generate Staking and Networking keys for a list of nodes provided by the flag '--config'`, Run: func(cmd *cobra.Command, args []string) { // check if out directory exists - exists, err := pathExists(flagOutdir) + exists, err := common.PathExists(flagOutdir) if err != nil { log.Error().Msg("could not check if directory exists") return @@ -49,12 +49,10 @@ var keygenCmd = &cobra.Command{ // write key files writeJSONFile := func(relativePath string, val interface{}) error { - writeJSON(relativePath, val) - return nil + return common.WriteJSON(relativePath, flagOutdir, val) } writeFile := func(relativePath string, data []byte) error { - writeText(relativePath, data) - return nil + return common.WriteText(relativePath, flagOutdir, data) } log.Info().Msg("writing internal private key files") @@ -85,13 +83,16 @@ var keygenCmd = &cobra.Command{ } // count roles - roleCounts := nodeCountByRole(nodes) + roleCounts := common.NodeCountByRole(nodes) for role, count := range roleCounts { log.Info().Msg(fmt.Sprintf("created keys for %d %s nodes", count, role.String())) } log.Info().Msg("generating node public information") - genNodePubInfo(nodes) + err = writeNodePubInfo(nodes) + if err != nil { + log.Fatal().Err(err).Msg("failed to generate nodes public info") + } }, } @@ -104,7 +105,7 @@ func init() { // optional parameters, used for generating machine account files keygenCmd.Flags().BoolVar(&flagDefaultMachineAccount, "machine-account", false, "whether or not to generate a default (same as networking key) machine account key file") - keygenCmd.Flags().StringVar(&flagRootChain, "root-chain", "local", "chain ID for the root block (can be 'main', 'test', 'sandbox', 'bench', or 'local'") + keygenCmd.Flags().StringVar(&flagRootChain, "root-chain", "local", "chain ID for the root block (can be 'main', 'test', 'sandbox', 'preview', 'bench', or 'local'") } // isEmptyDir returns True if the directory contains children @@ -122,10 +123,19 @@ func isEmptyDir(path string) (bool, error) { return false, err // Either not empty or error, suits both cases } -func genNodePubInfo(nodes []model.NodeInfo) { +func writeNodePubInfo(nodes []model.NodeInfo) error { pubNodes := make([]model.NodeInfoPub, 0, len(nodes)) for _, node := range nodes { - pubNodes = append(pubNodes, node.Public()) + pub, err := node.Public() + if err != nil { + return fmt.Errorf("failed to read public info: %w", err) + } + pubNodes = append(pubNodes, pub) + } + err := common.WriteJSON(model.PathInternalNodeInfosPub, flagOutdir, pubNodes) + if err != nil { + return err } - writeJSON(model.PathInternalNodeInfosPub, pubNodes) + log.Info().Msgf("wrote file %s/%s", flagOutdir, model.PathInternalNodeInfosPub) + return nil } diff --git a/cmd/bootstrap/cmd/keys.go b/cmd/bootstrap/cmd/keys.go index 9624ade3a1a..e6e75391567 100644 --- a/cmd/bootstrap/cmd/keys.go +++ b/cmd/bootstrap/cmd/keys.go @@ -3,13 +3,13 @@ package cmd import ( "fmt" - "github.com/onflow/flow-go/crypto/hash" + "github.com/onflow/crypto/hash" "github.com/onflow/flow-go/cmd/bootstrap/utils" - "github.com/onflow/flow-go/model/flow/order" - "github.com/onflow/flow-go/crypto" + "github.com/onflow/crypto" + "github.com/onflow/flow-go/cmd/util/cmd/common" model "github.com/onflow/flow-go/model/bootstrap" "github.com/onflow/flow-go/model/encodable" "github.com/onflow/flow-go/model/flow" @@ -21,7 +21,11 @@ import ( func genNetworkAndStakingKeys() []model.NodeInfo { var nodeConfigs []model.NodeConfig - readJSON(flagConfig, &nodeConfigs) + err := common.ReadJSON(flagConfig, &nodeConfigs) + if err != nil { + log.Fatal().Err(err).Msg("failed to read json") + } + nodes := len(nodeConfigs) log.Info().Msgf("read %v node configurations", nodes) @@ -49,10 +53,11 @@ func genNetworkAndStakingKeys() []model.NodeInfo { internalNodes = append(internalNodes, nodeInfo) } - return model.Sort(internalNodes, order.Canonical) + return model.Sort(internalNodes, flow.Canonical[flow.Identity]) } -func assembleNodeInfo(nodeConfig model.NodeConfig, networkKey, stakingKey crypto.PrivateKey) model.NodeInfo { +func assembleNodeInfo(nodeConfig model.NodeConfig, networkKey, stakingKey crypto.PrivateKey, +) model.NodeInfo { var err error nodeID, found := getNameID() if !found { @@ -63,11 +68,11 @@ func assembleNodeInfo(nodeConfig model.NodeConfig, networkKey, stakingKey crypto } log.Debug(). - Str("networkPubKey", pubKeyToString(networkKey.PublicKey())). - Str("stakingPubKey", pubKeyToString(stakingKey.PublicKey())). + Str("networkPubKey", networkKey.PublicKey().String()). + Str("stakingPubKey", stakingKey.PublicKey().String()). Msg("encoded public staking and network keys") - nodeInfo := model.NewPrivateNodeInfo( + nodeInfo, err := model.NewPrivateNodeInfo( nodeID, nodeConfig.Role, nodeConfig.Address, @@ -75,6 +80,9 @@ func assembleNodeInfo(nodeConfig model.NodeConfig, networkKey, stakingKey crypto networkKey, stakingKey, ) + if err != nil { + log.Fatal().Err(err).Msg("creating node info failed") + } return nodeInfo } diff --git a/cmd/bootstrap/cmd/machine_account.go b/cmd/bootstrap/cmd/machine_account.go index 898ac7b96d9..bc64a6b015c 100644 --- a/cmd/bootstrap/cmd/machine_account.go +++ b/cmd/bootstrap/cmd/machine_account.go @@ -5,10 +5,11 @@ import ( "path/filepath" "strings" + "github.com/onflow/crypto" "github.com/spf13/cobra" "github.com/onflow/flow-go/cmd" - "github.com/onflow/flow-go/crypto" + "github.com/onflow/flow-go/cmd/util/cmd/common" model "github.com/onflow/flow-go/model/bootstrap" "github.com/onflow/flow-go/model/flow" ioutils "github.com/onflow/flow-go/utils/io" @@ -52,7 +53,7 @@ func machineAccountRun(_ *cobra.Command, _ []string) { // check if node-machine-account-key.priv.json path exists machineAccountKeyPath := fmt.Sprintf(model.PathNodeMachineAccountPrivateKey, nodeID) - keyExists, err := pathExists(filepath.Join(flagOutdir, machineAccountKeyPath)) + keyExists, err := common.PathExists(filepath.Join(flagOutdir, machineAccountKeyPath)) if err != nil { log.Fatal().Err(err).Msg("could not check if node-machine-account-key.priv.json exists") } @@ -63,7 +64,7 @@ func machineAccountRun(_ *cobra.Command, _ []string) { // check if node-machine-account-info.priv.json file exists in boostrap dir machineAccountInfoPath := fmt.Sprintf(model.PathNodeMachineAccountInfoPriv, nodeID) - infoExists, err := pathExists(filepath.Join(flagOutdir, machineAccountInfoPath)) + infoExists, err := common.PathExists(filepath.Join(flagOutdir, machineAccountInfoPath)) if err != nil { log.Fatal().Err(err).Msg("could not check if node-machine-account-info.priv.json exists") } @@ -80,7 +81,11 @@ func machineAccountRun(_ *cobra.Command, _ []string) { machineAccountInfo := assembleNodeMachineAccountInfo(machinePrivKey, flagMachineAccountAddress) // write machine account info - writeJSON(fmt.Sprintf(model.PathNodeMachineAccountInfoPriv, nodeID), machineAccountInfo) + err = common.WriteJSON(fmt.Sprintf(model.PathNodeMachineAccountInfoPriv, nodeID), flagOutdir, machineAccountInfo) + if err != nil { + log.Fatal().Err(err).Msg("failed to write json") + } + log.Info().Msgf("wrote file %s/%s", flagOutdir, fmt.Sprintf(model.PathNodeMachineAccountInfoPriv, nodeID)) } // readMachineAccountPriv reads the machine account private key files in the bootstrap dir @@ -88,7 +93,10 @@ func readMachineAccountKey(nodeID string) crypto.PrivateKey { var machineAccountPriv model.NodeMachineAccountKey path := filepath.Join(flagOutdir, fmt.Sprintf(model.PathNodeMachineAccountPrivateKey, nodeID)) - readJSON(path, &machineAccountPriv) + err := common.ReadJSON(path, &machineAccountPriv) + if err != nil { + log.Fatal().Err(err).Msg("failed to read json") + } return machineAccountPriv.PrivateKey.PrivateKey } @@ -125,7 +133,11 @@ func validateMachineAccountAddress(addressStr string) error { return nil } if flow.Sandboxnet.Chain().IsValid(address) { - log.Warn().Msgf("Machine account address (%s) is **STAGINGNET** address - ensure this is desired before continuing", address) + log.Warn().Msgf("Machine account address (%s) is **SANDBOXNET** address - ensure this is desired before continuing", address) + return nil + } + if flow.Previewnet.Chain().IsValid(address) { + log.Warn().Msgf("Machine account address (%s) is **PREVIEWNET** address - ensure this is desired before continuing", address) return nil } if flow.Localnet.Chain().IsValid(address) { diff --git a/cmd/bootstrap/cmd/machine_account_key.go b/cmd/bootstrap/cmd/machine_account_key.go index 09a03f0b193..14bdef868df 100644 --- a/cmd/bootstrap/cmd/machine_account_key.go +++ b/cmd/bootstrap/cmd/machine_account_key.go @@ -4,12 +4,11 @@ import ( "fmt" "path" - "github.com/onflow/flow-go/crypto" - - "github.com/onflow/flow-go/cmd/bootstrap/utils" - + "github.com/onflow/crypto" "github.com/spf13/cobra" + "github.com/onflow/flow-go/cmd/bootstrap/utils" + "github.com/onflow/flow-go/cmd/util/cmd/common" model "github.com/onflow/flow-go/model/bootstrap" ) @@ -39,7 +38,7 @@ func machineAccountKeyRun(_ *cobra.Command, _ []string) { // check if node-machine-account-key.priv.json path exists machineAccountKeyPath := fmt.Sprintf(model.PathNodeMachineAccountPrivateKey, nodeID) - keyExists, err := pathExists(path.Join(flagOutdir, machineAccountKeyPath)) + keyExists, err := common.PathExists(path.Join(flagOutdir, machineAccountKeyPath)) if err != nil { log.Fatal().Err(err).Msg("could not check if node-machine-account-key.priv.json exists") } @@ -58,5 +57,9 @@ func machineAccountKeyRun(_ *cobra.Command, _ []string) { // also write the public key to terminal for entry in Flow Port machineAccountPriv := assembleNodeMachineAccountKey(machineKey) - writeJSON(machineAccountKeyPath, machineAccountPriv) + err = common.WriteJSON(machineAccountKeyPath, flagOutdir, machineAccountPriv) + if err != nil { + log.Fatal().Err(err).Msg("failed to write json") + } + log.Info().Msg(fmt.Sprintf("wrote file %s/%s", flagOutdir, machineAccountKeyPath)) } diff --git a/cmd/bootstrap/cmd/machine_account_key_test.go b/cmd/bootstrap/cmd/machine_account_key_test.go index adcf45ea4b2..dfd93fcd5f6 100644 --- a/cmd/bootstrap/cmd/machine_account_key_test.go +++ b/cmd/bootstrap/cmd/machine_account_key_test.go @@ -11,6 +11,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "github.com/onflow/flow-go/cmd/util/cmd/common" "github.com/onflow/flow-go/model/bootstrap" model "github.com/onflow/flow-go/model/bootstrap" ioutils "github.com/onflow/flow-go/utils/io" @@ -49,7 +50,7 @@ func TestMachineAccountKeyFileExists(t *testing.T) { // read file priv key file before command var machineAccountPrivBefore model.NodeMachineAccountKey - readJSON(machineKeyFilePath, &machineAccountPrivBefore) + require.NoError(t, common.ReadJSON(machineKeyFilePath, &machineAccountPrivBefore)) // run command with flags machineAccountKeyRun(nil, nil) @@ -59,7 +60,7 @@ func TestMachineAccountKeyFileExists(t *testing.T) { // read machine account key file again var machineAccountPrivAfter model.NodeMachineAccountKey - readJSON(machineKeyFilePath, &machineAccountPrivAfter) + require.NoError(t, common.ReadJSON(machineKeyFilePath, &machineAccountPrivAfter)) // check if key was modified assert.Equal(t, machineAccountPrivBefore, machineAccountPrivAfter) diff --git a/cmd/bootstrap/cmd/machine_account_test.go b/cmd/bootstrap/cmd/machine_account_test.go index 5fab682e561..27631a3bddc 100644 --- a/cmd/bootstrap/cmd/machine_account_test.go +++ b/cmd/bootstrap/cmd/machine_account_test.go @@ -11,6 +11,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "github.com/onflow/flow-go/cmd/util/cmd/common" "github.com/onflow/flow-go/model/bootstrap" model "github.com/onflow/flow-go/model/bootstrap" "github.com/onflow/flow-go/model/flow" @@ -31,6 +32,7 @@ func TestMachineAccountHappyPath(t *testing.T) { flagRole = "consensus" flagAddress = "189.123.123.42:3869" addr, err := flow.Mainnet.Chain().AddressAtIndex(uint64(rand.Intn(1_000_000))) + t.Logf("address is %s", addr) require.NoError(t, err) flagMachineAccountAddress = addr.HexWithPrefix() @@ -114,14 +116,14 @@ func TestMachineAccountInfoFileExists(t *testing.T) { // read in info file var machineAccountInfoBefore model.NodeMachineAccountInfo - readJSON(machineInfoFilePath, &machineAccountInfoBefore) + require.NoError(t, common.ReadJSON(machineInfoFilePath, &machineAccountInfoBefore)) // run again and make sure info file was not changed machineAccountRun(nil, nil) require.Regexp(t, regex, hook.logs.String()) var machineAccountInfoAfter model.NodeMachineAccountInfo - readJSON(machineInfoFilePath, &machineAccountInfoAfter) + require.NoError(t, common.ReadJSON(machineInfoFilePath, &machineAccountInfoAfter)) assert.Equal(t, machineAccountInfoBefore, machineAccountInfoAfter) }) diff --git a/cmd/bootstrap/cmd/observer_network_key.go b/cmd/bootstrap/cmd/observer_network_key.go index 2b0fea31e3d..dfb6a2f609e 100644 --- a/cmd/bootstrap/cmd/observer_network_key.go +++ b/cmd/bootstrap/cmd/observer_network_key.go @@ -5,12 +5,12 @@ import ( "fmt" "os" + "github.com/onflow/crypto" "github.com/spf13/cobra" - "github.com/onflow/flow-go/crypto" - "github.com/onflow/flow-go/cmd" "github.com/onflow/flow-go/cmd/bootstrap/utils" + "github.com/onflow/flow-go/cmd/util/cmd/common" ) var ( @@ -48,7 +48,7 @@ func observerNetworkKeyRun(_ *cobra.Command, _ []string) { } // if the file already exists, exit - keyExists, err := pathExists(flagOutputFile) + keyExists, err := common.PathExists(flagOutputFile) if err != nil { log.Fatal().Err(err).Msgf("could not check if %s exists", flagOutputFile) } diff --git a/cmd/bootstrap/cmd/observer_network_key_test.go b/cmd/bootstrap/cmd/observer_network_key_test.go index 255979edba9..b0dd6c30a07 100644 --- a/cmd/bootstrap/cmd/observer_network_key_test.go +++ b/cmd/bootstrap/cmd/observer_network_key_test.go @@ -8,10 +8,10 @@ import ( "strings" "testing" + "github.com/onflow/crypto" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "github.com/onflow/flow-go/crypto" "github.com/onflow/flow-go/utils/io" "github.com/onflow/flow-go/utils/unittest" ) diff --git a/cmd/bootstrap/cmd/partner_infos.go b/cmd/bootstrap/cmd/partner_infos.go index 4c9ded401c8..751dcbaea01 100644 --- a/cmd/bootstrap/cmd/partner_infos.go +++ b/cmd/bootstrap/cmd/partner_infos.go @@ -7,25 +7,28 @@ import ( "strings" "github.com/onflow/cadence" + "github.com/onflow/crypto" "github.com/spf13/cobra" client "github.com/onflow/flow-go-sdk/access/grpc" + "github.com/onflow/flow-go/cmd" "github.com/onflow/flow-go/cmd/util/cmd/common" - "github.com/onflow/flow-go/crypto" "github.com/onflow/flow-go/model/bootstrap" "github.com/onflow/flow-go/model/encodable" "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module/grpcclient" ) const ( // Index of each field in the cadence NodeInfo as it corresponds to cadence.Struct.Fields // fields not needed are left out. - idField = iota - roleField - networkingAddressField - networkingKeyField - stakingKeyField + idField = "id" + roleField = "role" + networkingAddressField = "networkingAddress" + networkingKeyField = "networkingKey" + stakingKeyField = "stakingKey" + // PoP field isn't included because it is not stored on-chain ) const ( @@ -64,7 +67,7 @@ func populatePartnerInfosRun(_ *cobra.Command, _ []string) { flowClient := getFlowClient() - partnerWeights := make(PartnerWeights) + partnerWeights := make(common.PartnerWeights) skippedNodes := 0 numOfPartnerNodesByRole := map[flow.Role]int{ flow.RoleCollection: 0, @@ -116,12 +119,12 @@ func getFlowClient() *client.Client { insecureClient = false } - config, err := common.NewFlowClientConfig(flagANAddress, strings.TrimPrefix(flagANNetworkKey, "0x"), flow.ZeroID, insecureClient) + config, err := grpcclient.NewFlowClientConfig(flagANAddress, strings.TrimPrefix(flagANNetworkKey, "0x"), flow.ZeroID, insecureClient) if err != nil { log.Fatal().Err(err).Msgf("could not get flow client config with address (%s) and network key (%s)", flagANAddress, flagANNetworkKey) } - flowClient, err := common.FlowClient(config) + flowClient, err := grpcclient.FlowClient(config) if err != nil { log.Fatal().Err(err).Msgf("could not get flow client with address (%s) and network key (%s)", flagANAddress, flagANNetworkKey) } @@ -146,7 +149,8 @@ func executeGetProposedNodesInfosScript(ctx context.Context, client *client.Clie // parseNodeInfo convert node info retrieved from cadence script func parseNodeInfo(info cadence.Value) (*bootstrap.NodeInfoPub, error) { - fields := info.(cadence.Struct).Fields + fields := cadence.FieldsMappedByName(info.(cadence.Struct)) + nodeID, err := flow.HexStringToIdentifier(string(fields[idField].(cadence.String))) if err != nil { return nil, fmt.Errorf("failed to convert flow node ID from hex string to identifier (%s): %w", string(fields[idField].(cadence.String)), err) @@ -170,6 +174,8 @@ func parseNodeInfo(info cadence.Value) (*bootstrap.NodeInfoPub, error) { return nil, fmt.Errorf("failed to decode staking public key: %w", err) } + // PoP field isn't decoded because it is not stored on-chain + return &bootstrap.NodeInfoPub{ Role: flow.Role(fields[roleField].(cadence.UInt8)), Address: string(fields[networkingAddressField].(cadence.String)), @@ -203,12 +209,20 @@ func validateANNetworkKey(key string) error { // writeNodePubInfoFile writes the node-pub-info file func writeNodePubInfoFile(info *bootstrap.NodeInfoPub) { fileOutputPath := fmt.Sprintf(bootstrap.PathNodeInfoPub, info.NodeID) - writeJSON(fileOutputPath, info) + err := common.WriteJSON(fileOutputPath, flagOutdir, info) + if err != nil { + log.Fatal().Err(err).Msg("failed to write json") + } + log.Info().Msgf("wrote file %s/%s", flagOutdir, fileOutputPath) } // writePartnerWeightsFile writes the partner weights file -func writePartnerWeightsFile(partnerWeights PartnerWeights) { - writeJSON(bootstrap.FileNamePartnerWeights, partnerWeights) +func writePartnerWeightsFile(partnerWeights common.PartnerWeights) { + err := common.WriteJSON(bootstrap.FileNamePartnerWeights, flagOutdir, partnerWeights) + if err != nil { + log.Fatal().Err(err).Msg("failed to write json") + } + log.Info().Msgf("wrote file %s/%s", flagOutdir, bootstrap.FileNamePartnerWeights) } func printNodeCounts(numOfNodesByType map[flow.Role]int, totalNumOfPartnerNodes, skippedNodes int) { diff --git a/cmd/bootstrap/cmd/pull.go b/cmd/bootstrap/cmd/pull.go index fa595d15bd5..d2fd9abff26 100644 --- a/cmd/bootstrap/cmd/pull.go +++ b/cmd/bootstrap/cmd/pull.go @@ -1,21 +1,26 @@ package cmd import ( + "bytes" "context" "fmt" "path/filepath" "strings" + "sync" "time" "github.com/spf13/cobra" + "golang.org/x/sync/semaphore" "github.com/onflow/flow-go/cmd" "github.com/onflow/flow-go/cmd/bootstrap/gcs" + "github.com/onflow/flow-go/cmd/bootstrap/utils" ) var ( - flagNetwork string - flagBucketName string + flagNetwork string + flagBucketName string + flagConcurrency int64 ) // pullCmd represents a command to pull parnter node details from the google @@ -37,6 +42,7 @@ func addPullCmdFlags() { cmd.MarkFlagRequired(pullCmd, "network") pullCmd.Flags().StringVar(&flagBucketName, "bucket", "flow-genesis-bootstrap", "google bucket name") + pullCmd.Flags().Int64Var(&flagConcurrency, "concurrency", 2, "concurrency limit") } // pull partner node info from google bucket @@ -62,15 +68,35 @@ func pull(cmd *cobra.Command, args []string) { } log.Info().Msgf("found %d files in google bucket", len(files)) + sem := semaphore.NewWeighted(flagConcurrency) + wait := sync.WaitGroup{} for _, file := range files { - if strings.Contains(file, "node-info.pub") { - fullOutpath := filepath.Join(flagOutdir, file) - log.Printf("downloading %s", file) - - err = bucket.DownloadFile(ctx, client, fullOutpath, file) - if err != nil { - log.Error().Msgf("error trying download google bucket file: %v", err) + wait.Add(1) + go func(file gcs.GCSFile) { + _ = sem.Acquire(ctx, 1) + defer func() { + sem.Release(1) + wait.Done() + }() + + if strings.Contains(file.Name, "node-info.pub") { + fullOutpath := filepath.Join(flagOutdir, file.Name) + + fmd5 := utils.CalcMd5(fullOutpath) + // only skip files that have an MD5 hash + if file.MD5 != nil && bytes.Equal(fmd5, file.MD5) { + log.Printf("skipping %s", file) + return + } + + log.Printf("downloading %s", file) + err = bucket.DownloadFile(ctx, client, fullOutpath, file.Name) + if err != nil { + log.Error().Msgf("error trying download google bucket file: %v", err) + } } - } + }(file) } + + wait.Wait() } diff --git a/cmd/bootstrap/cmd/qc.go b/cmd/bootstrap/cmd/qc.go index 6e97363051b..1acb5fa51f7 100644 --- a/cmd/bootstrap/cmd/qc.go +++ b/cmd/bootstrap/cmd/qc.go @@ -5,6 +5,7 @@ import ( "path/filepath" "github.com/onflow/flow-go/cmd/bootstrap/run" + "github.com/onflow/flow-go/cmd/util/cmd/common" "github.com/onflow/flow-go/consensus/hotstuff/model" "github.com/onflow/flow-go/model/bootstrap" "github.com/onflow/flow-go/model/dkg" @@ -12,10 +13,10 @@ import ( ) // constructRootQC constructs root QC based on root block, votes and dkg info -func constructRootQC(block *flow.Block, votes []*model.Vote, allNodes, internalNodes []bootstrap.NodeInfo, dkgData dkg.DKGData) *flow.QuorumCertificate { +func constructRootQC(block *flow.Block, votes []*model.Vote, allNodes, internalNodes []bootstrap.NodeInfo, randomBeaconData dkg.ThresholdKeySet) *flow.QuorumCertificate { identities := bootstrap.ToIdentityList(allNodes) - participantData, err := run.GenerateQCParticipantData(allNodes, internalNodes, dkgData) + participantData, err := run.GenerateQCParticipantData(allNodes, internalNodes, randomBeaconData) if err != nil { log.Fatal().Err(err).Msg("failed to generate QC participant data") } @@ -35,8 +36,8 @@ func constructRootQC(block *flow.Block, votes []*model.Vote, allNodes, internalN } // NOTE: allNodes must be in the same order as when generating the DKG -func constructRootVotes(block *flow.Block, allNodes, internalNodes []bootstrap.NodeInfo, dkgData dkg.DKGData) { - participantData, err := run.GenerateQCParticipantData(allNodes, internalNodes, dkgData) +func constructRootVotes(block *flow.Block, allNodes, internalNodes []bootstrap.NodeInfo, randomBeaconData dkg.ThresholdKeySet) { + participantData, err := run.GenerateQCParticipantData(allNodes, internalNodes, randomBeaconData) if err != nil { log.Fatal().Err(err).Msg("failed to generate QC participant data") } @@ -48,6 +49,10 @@ func constructRootVotes(block *flow.Block, allNodes, internalNodes []bootstrap.N for _, vote := range votes { path := filepath.Join(bootstrap.DirnameRootBlockVotes, fmt.Sprintf(bootstrap.FilenameRootBlockVote, vote.SignerID)) - writeJSON(path, vote) + err = common.WriteJSON(path, flagOutdir, vote) + if err != nil { + log.Fatal().Err(err).Msg("failed to write json") + } + log.Info().Msgf("wrote file %s/%s", flagOutdir, path) } } diff --git a/cmd/bootstrap/cmd/rootblock.go b/cmd/bootstrap/cmd/rootblock.go index dd530f562d6..e16040e9434 100644 --- a/cmd/bootstrap/cmd/rootblock.go +++ b/cmd/bootstrap/cmd/rootblock.go @@ -1,14 +1,27 @@ package cmd import ( + "crypto/rand" "encoding/hex" + "fmt" + "strconv" "time" + "github.com/onflow/cadence" "github.com/spf13/cobra" "github.com/onflow/flow-go/cmd" + "github.com/onflow/flow-go/cmd/bootstrap/run" + "github.com/onflow/flow-go/cmd/util/cmd/common" model "github.com/onflow/flow-go/model/bootstrap" + "github.com/onflow/flow-go/model/dkg" + "github.com/onflow/flow-go/model/encodable" "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module/epochs" + "github.com/onflow/flow-go/state/protocol" + "github.com/onflow/flow-go/state/protocol/inmem" + "github.com/onflow/flow-go/state/protocol/protocol_state" + "github.com/onflow/flow-go/state/protocol/protocol_state/kvstore" ) var ( @@ -16,6 +29,24 @@ var ( flagRootParent string flagRootHeight uint64 flagRootTimestamp string + flagRootView uint64 + // Deprecated: Replaced by ProtocolStateVersion + // Historically, this flag set a spork-scoped version number, by convention equal to the major software version. + // Now that we have HCUs which change the major software version mid-spork, this is no longer useful. + deprecatedFlagProtocolVersion uint + flagKVStoreVersion string + flagFinalizationSafetyThreshold uint64 + flagEpochExtensionViewCount uint64 + flagCollectionClusters uint + flagEpochCounter uint64 + flagNumViewsInEpoch uint64 + flagNumViewsInStakingAuction uint64 + flagNumViewsInDKGPhase uint64 + // Epoch target end time config + flagUseDefaultEpochTargetEndTime bool + flagEpochTimingRefCounter uint64 + flagEpochTimingRefTimestamp uint64 + flagEpochTimingDuration uint64 ) // rootBlockCmd represents the rootBlock command @@ -49,21 +80,58 @@ func addRootBlockCmdFlags() { cmd.MarkFlagRequired(rootBlockCmd, "partner-dir") cmd.MarkFlagRequired(rootBlockCmd, "partner-weights") + // required parameters for generation of epoch setup and commit events + rootBlockCmd.Flags().Uint64Var(&flagEpochCounter, "epoch-counter", 0, "epoch counter for the epoch beginning with the root block") + rootBlockCmd.Flags().Uint64Var(&flagNumViewsInEpoch, "epoch-length", 4000, "length of each epoch measured in views") + rootBlockCmd.Flags().Uint64Var(&flagNumViewsInStakingAuction, "epoch-staking-phase-length", 100, "length of the epoch staking phase measured in views") + rootBlockCmd.Flags().Uint64Var(&flagNumViewsInDKGPhase, "epoch-dkg-phase-length", 1000, "length of each DKG phase measured in views") + + // optional parameters to influence various aspects of identity generation + rootBlockCmd.Flags().UintVar(&flagCollectionClusters, "collection-clusters", 2, "number of collection clusters") + + cmd.MarkFlagRequired(rootBlockCmd, "epoch-counter") + cmd.MarkFlagRequired(rootBlockCmd, "epoch-length") + cmd.MarkFlagRequired(rootBlockCmd, "epoch-staking-phase-length") + cmd.MarkFlagRequired(rootBlockCmd, "epoch-dkg-phase-length") + // required parameters for generation of root block, root execution result and root block seal - rootBlockCmd.Flags().StringVar(&flagRootChain, "root-chain", "local", "chain ID for the root block (can be 'main', 'test', 'sandbox', 'bench', or 'local'") + rootBlockCmd.Flags().StringVar(&flagRootChain, "root-chain", "local", "chain ID for the root block (can be 'main', 'test', 'sandbox', 'preview', 'bench', or 'local'") rootBlockCmd.Flags().StringVar(&flagRootParent, "root-parent", "0000000000000000000000000000000000000000000000000000000000000000", "ID for the parent of the root block") rootBlockCmd.Flags().Uint64Var(&flagRootHeight, "root-height", 0, "height of the root block") rootBlockCmd.Flags().StringVar(&flagRootTimestamp, "root-timestamp", time.Now().UTC().Format(time.RFC3339), "timestamp of the root block (RFC3339)") + rootBlockCmd.Flags().Uint64Var(&flagRootView, "root-view", 0, "view of the root block") + rootBlockCmd.Flags().UintVar(&deprecatedFlagProtocolVersion, "protocol-version", 0, "deprecated: this flag will be ignored and remove in a future release") + rootBlockCmd.Flags().Uint64Var(&flagFinalizationSafetyThreshold, "kvstore-finalization-safety-threshold", 0, "defines finalization safety threshold") + rootBlockCmd.Flags().Uint64Var(&flagEpochExtensionViewCount, "kvstore-epoch-extension-view-count", 0, "length of epoch extension in views, default is 100_000 which is approximately 1 day") + rootBlockCmd.Flags().StringVar(&flagKVStoreVersion, "kvstore-version", "default", + "protocol state KVStore version to initialize ('default' or an integer equal to a supported protocol version: '0', '1', '2', ...)") cmd.MarkFlagRequired(rootBlockCmd, "root-chain") cmd.MarkFlagRequired(rootBlockCmd, "root-parent") cmd.MarkFlagRequired(rootBlockCmd, "root-height") + cmd.MarkFlagRequired(rootBlockCmd, "root-view") - rootBlockCmd.Flags().BytesHexVar(&flagBootstrapRandomSeed, "random-seed", GenerateRandomSeed(flow.EpochSetupRandomSourceLength), "The seed used to for DKG, Clustering and Cluster QC generation") + // Epoch timing config - these values must be set identically to `EpochTimingConfig` in the FlowEpoch smart contract. + // See https://github.com/onflow/flow-core-contracts/blob/240579784e9bb8d97d91d0e3213614e25562c078/contracts/epochs/FlowEpoch.cdc#L259-L266 + // Must specify either: + // 1. --use-default-epoch-timing and no other `--epoch-timing*` flags + // 2. All `--epoch-timing*` flags except --use-default-epoch-timing + // + // Use Option 1 for Benchnet, Localnet, etc. + // Use Option 2 for Mainnet, Testnet, Canary. + rootBlockCmd.Flags().BoolVar(&flagUseDefaultEpochTargetEndTime, "use-default-epoch-timing", false, "whether to use the default target end time") + rootBlockCmd.Flags().Uint64Var(&flagEpochTimingRefCounter, "epoch-timing-ref-counter", 0, "the reference epoch for computing the root epoch's target end time") + rootBlockCmd.Flags().Uint64Var(&flagEpochTimingRefTimestamp, "epoch-timing-ref-timestamp", 0, "the end time of the reference epoch, specified in second-precision Unix time, to use to compute the root epoch's target end time") + rootBlockCmd.Flags().Uint64Var(&flagEpochTimingDuration, "epoch-timing-duration", 0, "the duration of each epoch in seconds, used to compute the root epoch's target end time") + + rootBlockCmd.MarkFlagsOneRequired("use-default-epoch-timing", "epoch-timing-ref-counter", "epoch-timing-ref-timestamp", "epoch-timing-duration") + rootBlockCmd.MarkFlagsRequiredTogether("epoch-timing-ref-counter", "epoch-timing-ref-timestamp", "epoch-timing-duration") + for _, flag := range []string{"epoch-timing-ref-counter", "epoch-timing-ref-timestamp", "epoch-timing-duration"} { + rootBlockCmd.MarkFlagsMutuallyExclusive("use-default-epoch-timing", flag) + } } func rootBlock(cmd *cobra.Command, args []string) { - // maintain backward compatibility with old flag name if deprecatedFlagPartnerStakes != "" { log.Warn().Msg("using deprecated flag --partner-stakes (use --partner-weights instead)") @@ -73,39 +141,194 @@ func rootBlock(cmd *cobra.Command, args []string) { log.Fatal().Msg("cannot use both --partner-stakes and --partner-weights flags (use only --partner-weights)") } } + if deprecatedFlagProtocolVersion != 0 { + log.Warn().Msg("using deprecated flag --protocol-version; please remove this flag from your workflow, it is ignored and will be removed in a future release") + } - if len(flagBootstrapRandomSeed) != flow.EpochSetupRandomSourceLength { - log.Error().Int("expected", flow.EpochSetupRandomSourceLength).Int("actual", len(flagBootstrapRandomSeed)).Msg("random seed provided length is not valid") - return + chainID := parseChainID(flagRootChain) + if (chainID == flow.Testnet || chainID == flow.Mainnet) && flagRootView == 0 { + log.Fatal().Msgf("--root-view must be non-zero on %q chain", flagRootChain) } - log.Info().Str("seed", hex.EncodeToString(flagBootstrapRandomSeed)).Msg("deterministic bootstrapping random seed") - log.Info().Msg("") + finalizationSet := cmd.Flags().Lookup("kvstore-finalization-safety-threshold").Changed + epochExtensionSet := cmd.Flags().Lookup("kvstore-epoch-extension-view-count").Changed + + // Warn if KV store values were not set on mainnet/testnet + if chainID == flow.Testnet || chainID == flow.Mainnet { + if !finalizationSet || !epochExtensionSet { + log.Fatal().Msgf( + "KV store values (epoch extension view count and finalization safety threshold) must be explicitly set on the %q chain", + flagRootChain, + ) + } + } else { + defaultEpochSafetyParams, err := protocol.DefaultEpochSafetyParams(chainID) + if err != nil { + log.Fatal().Err(err).Msgf("could not get default epoch commit safety parameters") + } + + // Use default values for non-mainnet/testnet chains if not explicitly set + if !finalizationSet { + flagFinalizationSafetyThreshold = defaultEpochSafetyParams.FinalizationSafetyThreshold + } + if !epochExtensionSet { + flagEpochExtensionViewCount = defaultEpochSafetyParams.EpochExtensionViewCount + } + } + + // validate epoch configs + err := validateEpochConfig() + if err != nil { + log.Fatal().Err(err).Msg("invalid or unsafe config for finalization safety threshold") + } + err = validateOrPopulateEpochTimingConfig() + if err != nil { + log.Fatal().Err(err).Msg("invalid epoch timing config") + } + // Read partner node's information and internal node's information. + // With "internal nodes" we reference nodes, whose private keys we have. In comparison, + // for "partner nodes" we generally do not have their keys. However, we allow some overlap, + // in that we tolerate a configuration where information about an "internal node" is also + // duplicated in the list of "partner nodes". log.Info().Msg("collecting partner network and staking keys") - partnerNodes := readPartnerNodeInfos() + rawPartnerNodes, err := common.ReadFullPartnerNodeInfos(log, flagPartnerWeights, flagPartnerNodeInfoDir) + if err != nil { + log.Fatal().Err(err).Msg("failed to read full partner node infos") + } log.Info().Msg("") log.Info().Msg("generating internal private networking and staking keys") - internalNodes := readInternalNodeInfos() + internalNodes, err := common.ReadFullInternalNodeInfos(log, flagInternalNodePrivInfoDir, flagConfig) + if err != nil { + log.Fatal().Err(err).Msg("failed to read full internal node infos") + } + log.Info().Msg("") + // we now convert to the strict meaning of: "internal nodes" vs "partner nodes" + // • "internal nodes" we have they private keys for + // • "partner nodes" we don't have the keys for + // • both sets are disjoint (no common nodes) + log.Info().Msg("remove internal partner nodes") + partnerNodes := common.FilterInternalPartners(rawPartnerNodes, internalNodes) + log.Info().Msgf("removed %d internal partner nodes", len(rawPartnerNodes)-len(partnerNodes)) + log.Info().Msg("checking constraints on consensus nodes") checkConstraints(partnerNodes, internalNodes) log.Info().Msg("") log.Info().Msg("assembling network and staking keys") - stakingNodes := mergeNodeInfos(internalNodes, partnerNodes) - writeJSON(model.PathNodeInfosPub, model.ToPublicNodeInfoList(stakingNodes)) + stakingNodes, err := mergeNodeInfos(internalNodes, partnerNodes) + if err != nil { + log.Fatal().Err(err).Msgf("failed to merge node infos") + } + publicInfo, err := model.ToPublicNodeInfoList(stakingNodes) + if err != nil { + log.Fatal().Msg("failed to read public node info") + } + err = common.WriteJSON(model.PathNodeInfosPub, flagOutdir, publicInfo) + if err != nil { + log.Fatal().Err(err).Msg("failed to write json") + } + log.Info().Msgf("wrote file %s/%s", flagOutdir, model.PathNodeInfosPub) log.Info().Msg("") log.Info().Msg("running DKG for consensus nodes") - dkgData := runBeaconKG(model.FilterByRole(stakingNodes, flow.RoleConsensus)) + randomBeaconData, dkgIndexMap := runBeaconKG(model.FilterByRole(stakingNodes, flow.RoleConsensus)) + log.Info().Msg("") + + // create flow.IdentityList representation of the participant set + participants := model.ToIdentityList(stakingNodes).Sort(flow.Canonical[flow.Identity]) + + log.Info().Msg("computing collection node clusters") + assignments, clusters, err := common.ConstructClusterAssignment(log, model.ToIdentityList(partnerNodes), model.ToIdentityList(internalNodes), int(flagCollectionClusters)) + if err != nil { + log.Fatal().Err(err).Msg("unable to generate cluster assignment") + } + log.Info().Msg("") + + log.Info().Msg("constructing root blocks for collection node clusters") + clusterBlocks := run.GenerateRootClusterBlocks(flagEpochCounter, clusters) + log.Info().Msg("") + + log.Info().Msg("constructing root QCs for collection node clusters") + clusterQCs := run.ConstructRootQCsForClusters(log, clusters, internalNodes, clusterBlocks) + log.Info().Msg("") + + log.Info().Msg("constructing root header") + headerBody, err := constructRootHeaderBody(flagRootChain, flagRootParent, flagRootHeight, flagRootView, flagRootTimestamp) + if err != nil { + log.Fatal().Err(err).Msg("failed to construct root header") + } + log.Info().Msg("") + + log.Info().Msg("constructing intermediary bootstrapping data") + epochSetup, epochCommit, err := constructRootEpochEvents(headerBody.View, participants, assignments, clusterQCs, randomBeaconData, dkgIndexMap) + if err != nil { + log.Fatal().Err(err).Msg("failed to construct root epoch events") + } + epochConfig := generateExecutionStateEpochConfig(epochSetup, clusterQCs, randomBeaconData) + intermediaryEpochData := IntermediaryEpochData{ + RootEpochSetup: epochSetup, + RootEpochCommit: epochCommit, + ExecutionStateConfig: epochConfig, + } + intermediaryParamsData := IntermediaryParamsData{ + FinalizationSafetyThreshold: flagFinalizationSafetyThreshold, + EpochExtensionViewCount: flagEpochExtensionViewCount, + } + intermediaryData := IntermediaryBootstrappingData{ + IntermediaryEpochData: intermediaryEpochData, + IntermediaryParamsData: intermediaryParamsData, + } + err = common.WriteJSON(model.PathIntermediaryBootstrappingData, flagOutdir, intermediaryData) + if err != nil { + log.Fatal().Err(err).Msg("failed to write json") + } + log.Info().Msgf("wrote file %s/%s", flagOutdir, model.PathIntermediaryBootstrappingData) log.Info().Msg("") log.Info().Msg("constructing root block") - block := constructRootBlock(flagRootChain, flagRootParent, flagRootHeight, flagRootTimestamp) - writeJSON(model.PathRootBlockData, block) + minEpochStateEntry, err := inmem.EpochProtocolStateFromServiceEvents(epochSetup, epochCommit) + if err != nil { + log.Fatal().Err(err).Msg("failed to construct epoch protocol state") + } + + var rootProtocolState protocol_state.KVStoreAPI + if flagKVStoreVersion != "default" { + kvStoreVersion, err := strconv.ParseUint(flagKVStoreVersion, 10, 64) + if err != nil { + log.Fatal().Err(err).Msgf("--kvstore-version must be a supported integer version number: (eg. '0', '1' or '2', etc.) got %s ", flagKVStoreVersion) + } + rootProtocolState, err = kvstore.NewKVStore( + kvStoreVersion, + flagFinalizationSafetyThreshold, + flagEpochExtensionViewCount, + minEpochStateEntry.ID(), + ) + if err != nil { + log.Fatal().Err(err).Msgf("failed to construct root kvstore with version: %d", kvStoreVersion) + } + } else { + rootProtocolState, err = kvstore.NewDefaultKVStore( + flagFinalizationSafetyThreshold, + flagEpochExtensionViewCount, + minEpochStateEntry.ID(), + ) + if err != nil { + log.Fatal().Err(err).Msg("failed to construct default root kvstore") + } + } + block, err := constructRootBlock(headerBody, rootProtocolState.ID()) + if err != nil { + log.Fatal().Err(err).Msg("failed to construct root block") + } + err = common.WriteJSON(model.PathRootBlockData, flagOutdir, block) + if err != nil { + log.Fatal().Err(err).Msg("failed to write json") + } + log.Info().Msgf("wrote file %s/%s", flagOutdir, model.PathRootBlockData) log.Info().Msg("") log.Info().Msg("constructing and writing votes") @@ -113,7 +336,72 @@ func rootBlock(cmd *cobra.Command, args []string) { block, model.FilterByRole(stakingNodes, flow.RoleConsensus), model.FilterByRole(internalNodes, flow.RoleConsensus), - dkgData, + randomBeaconData, ) log.Info().Msg("") } + +// validateEpochConfig validates configuration of the epoch commitment deadline. +func validateEpochConfig() error { + chainID := parseChainID(flagRootChain) + dkgFinalView := flagNumViewsInStakingAuction + flagNumViewsInDKGPhase*3 // 3 DKG phases + epochCommitDeadline := flagNumViewsInEpoch - flagFinalizationSafetyThreshold + + defaultEpochSafetyParams, err := protocol.DefaultEpochSafetyParams(chainID) + if err != nil { + return fmt.Errorf("could not get default epoch commit safety threshold: %w", err) + } + + // sanity check: the safety threshold is >= the default for the chain + if flagFinalizationSafetyThreshold < defaultEpochSafetyParams.FinalizationSafetyThreshold { + return fmt.Errorf("potentially unsafe epoch config: epoch commit safety threshold smaller than expected (%d < %d)", flagFinalizationSafetyThreshold, defaultEpochSafetyParams.FinalizationSafetyThreshold) + } + if flagEpochExtensionViewCount < defaultEpochSafetyParams.EpochExtensionViewCount { + return fmt.Errorf("potentially unsafe epoch config: epoch extension view count smaller than expected (%d < %d)", flagEpochExtensionViewCount, defaultEpochSafetyParams.EpochExtensionViewCount) + } + // sanity check: epoch commitment deadline cannot be before the DKG end + if epochCommitDeadline <= dkgFinalView { + return fmt.Errorf("invalid epoch config: the epoch commitment deadline (%d) is before the DKG final view (%d)", epochCommitDeadline, dkgFinalView) + } + // sanity check: the difference between DKG end and safety threshold is also >= the default safety threshold + if epochCommitDeadline-dkgFinalView < defaultEpochSafetyParams.FinalizationSafetyThreshold { + return fmt.Errorf("potentially unsafe epoch config: time between DKG end and epoch commitment deadline is smaller than expected (%d-%d < %d)", + epochCommitDeadline, dkgFinalView, defaultEpochSafetyParams.FinalizationSafetyThreshold) + } + return nil +} + +// generateExecutionStateEpochConfig generates epoch-related configuration used +// to generate an empty root execution state. This config is generated in the +// `rootblock` alongside the root epoch and root protocol state ID for consistency. +func generateExecutionStateEpochConfig( + epochSetup *flow.EpochSetup, + clusterQCs []*flow.QuorumCertificate, + dkgData dkg.ThresholdKeySet, +) epochs.EpochConfig { + + randomSource := make([]byte, flow.EpochSetupRandomSourceLength) + if _, err := rand.Read(randomSource); err != nil { + log.Fatal().Err(err).Msg("failed to generate a random source") + } + cdcRandomSource, err := cadence.NewString(hex.EncodeToString(randomSource)) + if err != nil { + log.Fatal().Err(err).Msg("invalid random source") + } + + epochConfig := epochs.EpochConfig{ + EpochTokenPayout: cadence.UFix64(0), + RewardCut: cadence.UFix64(0), + FLOWsupplyIncreasePercentage: cadence.UFix64(0), + CurrentEpochCounter: cadence.UInt64(epochSetup.Counter), + NumViewsInEpoch: cadence.UInt64(flagNumViewsInEpoch), + NumViewsInStakingAuction: cadence.UInt64(flagNumViewsInStakingAuction), + NumViewsInDKGPhase: cadence.UInt64(flagNumViewsInDKGPhase), + NumCollectorClusters: cadence.UInt16(flagCollectionClusters), + RandomSource: cdcRandomSource, + CollectorClusters: epochSetup.Assignments, + ClusterQCs: clusterQCs, + DKGPubKeys: encodable.WrapRandomBeaconPubKeys(dkgData.PubKeyShares), + } + return epochConfig +} diff --git a/cmd/bootstrap/cmd/rootblock_test.go b/cmd/bootstrap/cmd/rootblock_test.go index 09bc7d10305..c51c4f3d50d 100644 --- a/cmd/bootstrap/cmd/rootblock_test.go +++ b/cmd/bootstrap/cmd/rootblock_test.go @@ -2,29 +2,30 @@ package cmd import ( "encoding/hex" + "fmt" "os" + "os/exec" "path/filepath" "regexp" "strings" "testing" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/onflow/flow-go/cmd/bootstrap/utils" model "github.com/onflow/flow-go/model/bootstrap" - "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/utils/unittest" ) -const rootBlockHappyPathLogs = "^deterministic bootstrapping random seed" + - "collecting partner network and staking keys" + +const rootBlockHappyPathLogs = "collecting partner network and staking keys" + `read \d+ partner node configuration files` + `read \d+ weights for partner nodes` + "generating internal private networking and staking keys" + `read \d+ internal private node-info files` + `read internal node configurations` + `read \d+ weights for internal nodes` + + `remove internal partner nodes` + + `removed 0 internal partner nodes` + `checking constraints on consensus nodes` + `assembling network and staking keys` + `wrote file \S+/node-infos.pub.json` + @@ -34,6 +35,14 @@ const rootBlockHappyPathLogs = "^deterministic bootstrapping random seed" + `finished running DKG` + `.+/random-beacon.priv.json` + `wrote file \S+/root-dkg-data.priv.json` + + `computing collection node clusters` + + `constructing root blocks for collection node clusters` + + `constructing root QCs for collection node clusters` + + `producing QC for cluster .*` + + `producing QC for cluster .*` + + `constructing root header` + + `constructing intermediary bootstrapping data` + + `wrote file \S+/intermediary-bootstrapping-data.json` + `constructing root block` + `wrote file \S+/root-block.json` + `constructing and writing votes` + @@ -41,93 +50,141 @@ const rootBlockHappyPathLogs = "^deterministic bootstrapping random seed" + var rootBlockHappyPathRegex = regexp.MustCompile(rootBlockHappyPathLogs) -func TestRootBlock_HappyPath(t *testing.T) { - deterministicSeed := GenerateRandomSeed(flow.EpochSetupRandomSourceLength) +// setupHappyPathFlags sets up all required flags for the root block happy path test. +func setupHappyPathFlags(bootDir, partnerDir, partnerWeights, internalPrivDir, configPath string) { rootParent := unittest.StateCommitmentFixture() - chainName := "main" - rootHeight := uint64(12332) + flagOutdir = bootDir + flagConfig = configPath + flagPartnerNodeInfoDir = partnerDir + flagPartnerWeights = partnerWeights + flagInternalNodePrivInfoDir = internalPrivDir + + flagRootParent = hex.EncodeToString(rootParent[:]) + flagRootChain = "main" + flagRootHeight = 12332 + flagRootView = 1000 + flagEpochCounter = 0 + flagNumViewsInEpoch = 100_000 + flagNumViewsInStakingAuction = 50_000 + flagNumViewsInDKGPhase = 2_000 + flagFinalizationSafetyThreshold = 1_000 + flagUseDefaultEpochTargetEndTime = true + flagEpochTimingRefCounter = 0 + flagEpochTimingRefTimestamp = 0 + flagEpochTimingDuration = 0 +} +// TestRootBlock_HappyPath verifies that the rootBlock function +// completes successfully with valid arguments and outputs +// logs matching the expected pattern. +func TestRootBlock_HappyPath(t *testing.T) { utils.RunWithSporkBootstrapDir(t, func(bootDir, partnerDir, partnerWeights, internalPrivDir, configPath string) { + setupHappyPathFlags(bootDir, partnerDir, partnerWeights, internalPrivDir, configPath) - flagOutdir = bootDir - - flagConfig = configPath - flagPartnerNodeInfoDir = partnerDir - flagPartnerWeights = partnerWeights - flagInternalNodePrivInfoDir = internalPrivDir - - flagRootParent = hex.EncodeToString(rootParent[:]) - flagRootChain = chainName - flagRootHeight = rootHeight - - // set deterministic bootstrapping seed - flagBootstrapRandomSeed = deterministicSeed + // KV store values (epoch extension view count and finalization safety threshold) must be explicitly set for mainnet + require.NoError(t, rootBlockCmd.Flags().Set("kvstore-finalization-safety-threshold", "1000")) + require.NoError(t, rootBlockCmd.Flags().Set("kvstore-epoch-extension-view-count", "100000")) hook := zeroLoggerHook{logs: &strings.Builder{}} log = log.Hook(hook) - rootBlock(nil, nil) - assert.Regexp(t, rootBlockHappyPathRegex, hook.logs.String()) + rootBlock(rootBlockCmd, nil) + require.Regexp(t, rootBlockHappyPathRegex, hook.logs.String()) hook.logs.Reset() // check if root protocol snapshot exists rootBlockDataPath := filepath.Join(bootDir, model.PathRootBlockData) - assert.FileExists(t, rootBlockDataPath) + require.FileExists(t, rootBlockDataPath) }) } -func TestRootBlock_Deterministic(t *testing.T) { - deterministicSeed := GenerateRandomSeed(flow.EpochSetupRandomSourceLength) - rootParent := unittest.StateCommitmentFixture() - chainName := "main" - rootHeight := uint64(1000) +// TestInvalidRootBlockView verifies that running +// rootBlock with an invalid root view (0) on "main" or "testnet" chains. +// The test runs in subprocesses because the tested code calls os.Exit. +func TestInvalidRootBlockView(t *testing.T) { + for _, chain := range []string{"main", "test"} { + t.Run("invalid root block view for "+chain, func(t *testing.T) { + expectedError := fmt.Sprintf("--root-view must be non-zero on %q chain", chain) + extraEnv := []string{"CHAIN=" + chain} + + runTestInSubprocessWithError( + t, + "TestInvalidRootBlockViewSubprocess", + expectedError, + extraEnv, + ) + }) + } +} - utils.RunWithSporkBootstrapDir(t, func(bootDir, partnerDir, partnerWeights, internalPrivDir, configPath string) { +// TestInvalidRootBlockViewSubprocess runs in subprocess for invalid root view test on various chains +// This test only runs when invoked by TestInvalidRootBlockView as a sub-process. +func TestInvalidRootBlockViewSubprocess(t *testing.T) { + invalidRootBlockSubprocess(t, func() { + flagRootView = 0 + flagRootChain = os.Getenv("CHAIN") + }) +} - flagOutdir = bootDir +// TestInvalidKVStoreValues verifies that running +// rootBlock with an invalid kvstore values (default) on "main" or "testnet" chains. +// The test runs in subprocesses because the tested code calls os.Exit. +func TestInvalidKVStoreValues(t *testing.T) { + for _, chain := range []string{"main", "test"} { + t.Run("invalid kv store values for "+chain, func(t *testing.T) { + expectedError := fmt.Sprintf("KV store values (epoch extension view count and finalization safety threshold) must be explicitly set on the %q chain", chain) + extraEnv := []string{"CHAIN=" + chain} + + runTestInSubprocessWithError( + t, + "TestInvalidKVStoreValuesSubprocess", + expectedError, + extraEnv, + ) + }) + } +} - flagConfig = configPath - flagPartnerNodeInfoDir = partnerDir - flagPartnerWeights = partnerWeights - flagInternalNodePrivInfoDir = internalPrivDir +// TestInvalidKVStoreValuesSubprocess runs in subprocess for invalid kvstore values test on various chains +func TestInvalidKVStoreValuesSubprocess(t *testing.T) { + invalidRootBlockSubprocess(t, func() { + flagRootChain = os.Getenv("CHAIN") + }) +} - flagRootParent = hex.EncodeToString(rootParent[:]) - flagRootChain = chainName - flagRootHeight = rootHeight +// invalidRootBlockSubprocess is a reusable helper that runs rootBlock() with setup and logger hook, +// allowing the caller to override flags via the flagsModifier. +func invalidRootBlockSubprocess(t *testing.T, flagsModifier func()) { + if os.Getenv("FLAG_RUN_IN_SUBPROCESS_ONLY") != "1" { + return + } - // set deterministic bootstrapping seed - flagBootstrapRandomSeed = deterministicSeed + utils.RunWithSporkBootstrapDir(t, func(bootDir, partnerDir, partnerWeights, internalPrivDir, configPath string) { + setupHappyPathFlags(bootDir, partnerDir, partnerWeights, internalPrivDir, configPath) + + // Allow customization of flags before running rootBlock + if flagsModifier != nil { + flagsModifier() + } hook := zeroLoggerHook{logs: &strings.Builder{}} log = log.Hook(hook) - rootBlock(nil, nil) - require.Regexp(t, rootBlockHappyPathRegex, hook.logs.String()) - hook.logs.Reset() - - // check if root protocol snapshot exists - rootBlockDataPath := filepath.Join(bootDir, model.PathRootBlockData) - assert.FileExists(t, rootBlockDataPath) - - // read snapshot - firstRootBlockData, err := utils.ReadRootBlock(rootBlockDataPath) - require.NoError(t, err) - - // delete snapshot file - err = os.Remove(rootBlockDataPath) - require.NoError(t, err) - - rootBlock(nil, nil) - require.Regexp(t, rootBlockHappyPathRegex, hook.logs.String()) - hook.logs.Reset() + rootBlock(rootBlockCmd, nil) + }) +} - // check if root protocol snapshot exists - assert.FileExists(t, rootBlockDataPath) +// runTestInSubprocessWithError executes a test function in a subprocess, +// expecting it to fail with an error. It is used for testing code paths +// that call os.Exit. +func runTestInSubprocessWithError(t *testing.T, testName, expectedOutput string, extraEnv []string) { + cmd := exec.Command(os.Args[0], "-test.run="+testName) - // read snapshot - secondRootBlockData, err := utils.ReadRootBlock(rootBlockDataPath) - require.NoError(t, err) + env := append(os.Environ(), "FLAG_RUN_IN_SUBPROCESS_ONLY=1") + env = append(env, extraEnv...) + cmd.Env = env - assert.Equal(t, firstRootBlockData, secondRootBlockData) - }) + output, err := cmd.CombinedOutput() + require.Error(t, err) + require.Contains(t, string(output), expectedOutput) } diff --git a/cmd/bootstrap/cmd/seal.go b/cmd/bootstrap/cmd/seal.go index 91533377a0e..235ea48fd2e 100644 --- a/cmd/bootstrap/cmd/seal.go +++ b/cmd/bootstrap/cmd/seal.go @@ -2,21 +2,17 @@ package cmd import ( "encoding/hex" + "time" "github.com/onflow/flow-go/cmd/bootstrap/run" - "github.com/onflow/flow-go/model/dkg" "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/model/flow/order" - "github.com/onflow/flow-go/module/signature" ) func constructRootResultAndSeal( rootCommit string, block *flow.Block, - participants flow.IdentityList, - assignments flow.AssignmentList, - clusterQCs []*flow.QuorumCertificate, - dkgData dkg.DKGData, + epochSetup *flow.EpochSetup, + epochCommit *flow.EpochCommit, ) (*flow.ExecutionResult, *flow.Seal) { stateCommitBytes, err := hex.DecodeString(rootCommit) @@ -31,42 +27,10 @@ func constructRootResultAndSeal( Msg("root state commitment has incompatible length") } - firstView := block.Header.View - epochSetup := &flow.EpochSetup{ - Counter: flagEpochCounter, - FirstView: firstView, - FinalView: firstView + flagNumViewsInEpoch - 1, - DKGPhase1FinalView: firstView + flagNumViewsInStakingAuction + flagNumViewsInDKGPhase - 1, - DKGPhase2FinalView: firstView + flagNumViewsInStakingAuction + flagNumViewsInDKGPhase*2 - 1, - DKGPhase3FinalView: firstView + flagNumViewsInStakingAuction + flagNumViewsInDKGPhase*3 - 1, - Participants: participants.Sort(order.Canonical), - Assignments: assignments, - RandomSource: flagBootstrapRandomSeed, - } - - qcsWithSignerIDs := make([]*flow.QuorumCertificateWithSignerIDs, 0, len(clusterQCs)) - for i, clusterQC := range clusterQCs { - members := assignments[i] - signerIDs, err := signature.DecodeSignerIndicesToIdentifiers(members, clusterQC.SignerIndices) - if err != nil { - log.Fatal().Err(err).Msgf("could not decode signer IDs from clusterQC at index %v", i) - } - qcsWithSignerIDs = append(qcsWithSignerIDs, &flow.QuorumCertificateWithSignerIDs{ - View: clusterQC.View, - BlockID: clusterQC.BlockID, - SignerIDs: signerIDs, - SigData: clusterQC.SigData, - }) - } - - epochCommit := &flow.EpochCommit{ - Counter: flagEpochCounter, - ClusterQCs: flow.ClusterQCVoteDatasFromQCs(qcsWithSignerIDs), - DKGGroupKey: dkgData.PubGroupKey, - DKGParticipantKeys: dkgData.PubKeyShares, + result, err := run.GenerateRootResult(block, stateCommit, epochSetup, epochCommit) + if err != nil { + log.Fatal().Err(err).Msg("could not generate root result") } - - result := run.GenerateRootResult(block, stateCommit, epochSetup, epochCommit) seal, err := run.GenerateRootSeal(result) if err != nil { log.Fatal().Err(err).Msg("could not generate root seal") @@ -78,3 +42,22 @@ func constructRootResultAndSeal( return result, seal } + +// rootEpochTargetEndTime computes the target end time for the given epoch, using the given config. +// CAUTION: the variables `flagEpochTimingRefCounter`, `flagEpochTimingDuration`, and +// `flagEpochTimingRefTimestamp` must contain proper values. You can either specify a value for +// each config parameter or use the function `validateOrPopulateEpochTimingConfig()` to populate the variables +// from defaults. +func rootEpochTargetEndTime() uint64 { + if flagEpochTimingRefTimestamp == 0 || flagEpochTimingDuration == 0 { + panic("invalid epoch timing config: must specify ALL of --epoch-target-end-time-ref-counter, --epoch-target-end-time-ref-timestamp, and --epoch-target-end-time-duration") + } + if flagEpochCounter < flagEpochTimingRefCounter { + panic("invalid epoch timing config: reference epoch counter must be less than or equal to root epoch counter") + } + targetEndTime := flagEpochTimingRefTimestamp + (flagEpochCounter-flagEpochTimingRefCounter)*flagEpochTimingDuration + if targetEndTime <= uint64(time.Now().Unix()) { + panic("sanity check failed: root epoch target end time is before current time") + } + return targetEndTime +} diff --git a/cmd/bootstrap/cmd/util.go b/cmd/bootstrap/cmd/util.go index 0a8cba39e54..ac0b7358491 100644 --- a/cmd/bootstrap/cmd/util.go +++ b/cmd/bootstrap/cmd/util.go @@ -2,15 +2,9 @@ package cmd import ( "crypto/rand" - "encoding/json" - "fmt" - "os" - "path/filepath" + "strings" - "github.com/onflow/flow-go/crypto" - model "github.com/onflow/flow-go/model/bootstrap" - "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/utils/io" + "github.com/rs/zerolog" ) func GenerateRandomSeeds(n int, seedLen int) [][]byte { @@ -29,89 +23,13 @@ func GenerateRandomSeed(seedLen int) []byte { return seed } -func readJSON(path string, target interface{}) { - dat, err := io.ReadFile(path) - if err != nil { - log.Fatal().Err(err).Msg("cannot read json") - } - err = json.Unmarshal(dat, target) - if err != nil { - log.Fatal().Err(err).Msgf("cannot unmarshal json in file %s", path) - } -} - -func writeJSON(path string, data interface{}) { - bz, err := json.MarshalIndent(data, "", " ") - if err != nil { - log.Fatal().Err(err).Msg("cannot marshal json") - } - - writeText(path, bz) -} - -func writeText(path string, data []byte) { - path = filepath.Join(flagOutdir, path) - - err := os.MkdirAll(filepath.Dir(path), 0755) - if err != nil { - log.Fatal().Err(err).Msg("could not create output dir") - } - - err = os.WriteFile(path, data, 0644) - if err != nil { - log.Fatal().Err(err).Msg("could not write file") - } - - log.Info().Msgf("wrote file %v", path) -} - -func pubKeyToString(key crypto.PublicKey) string { - return fmt.Sprintf("%x", key.Encode()) -} - -func filesInDir(dir string) ([]string, error) { - exists, err := pathExists(dir) - if err != nil { - return nil, fmt.Errorf("could not check if dir exists: %w", err) - } - - if !exists { - return nil, fmt.Errorf("dir %v does not exist", dir) - } - - var files []string - err = filepath.Walk(dir, func(path string, info os.FileInfo, err error) error { - if !info.IsDir() { - files = append(files, path) - } - return nil - }) - return files, err +// zeroLoggerHook is a simple logger hook used for capturing log output +// from zerolog during tests. It writes all log messages to a provided +// strings.Builder so they can be inspected later. +type zeroLoggerHook struct { + logs *strings.Builder } -// pathExists -func pathExists(path string) (bool, error) { - _, err := os.Stat(path) - if err == nil { - return true, nil - } - if os.IsNotExist(err) { - return false, nil - } - return false, err -} - -func nodeCountByRole(nodes []model.NodeInfo) map[flow.Role]uint16 { - roleCounts := map[flow.Role]uint16{ - flow.RoleCollection: 0, - flow.RoleConsensus: 0, - flow.RoleExecution: 0, - flow.RoleVerification: 0, - flow.RoleAccess: 0, - } - for _, node := range nodes { - roleCounts[node.Role] = roleCounts[node.Role] + 1 - } - - return roleCounts +func (h zeroLoggerHook) Run(_ *zerolog.Event, _ zerolog.Level, msg string) { + h.logs.WriteString(msg) } diff --git a/cmd/bootstrap/dkg/dkg.go b/cmd/bootstrap/dkg/dkg.go index 3b65f44964a..79bacac26d9 100644 --- a/cmd/bootstrap/dkg/dkg.go +++ b/cmd/bootstrap/dkg/dkg.go @@ -3,21 +3,22 @@ package dkg import ( "fmt" - "github.com/onflow/flow-go/crypto" + "github.com/onflow/crypto" + model "github.com/onflow/flow-go/model/dkg" "github.com/onflow/flow-go/module/signature" ) // RandomBeaconKG is centralized BLS threshold signature key generation. -func RandomBeaconKG(n int, seed []byte) (model.DKGData, error) { +func RandomBeaconKG(n int, seed []byte) (model.ThresholdKeySet, error) { if n == 1 { sk, pk, pkGroup, err := thresholdSignKeyGenOneNode(seed) if err != nil { - return model.DKGData{}, fmt.Errorf("Beacon KeyGen failed: %w", err) + return model.ThresholdKeySet{}, fmt.Errorf("Beacon KeyGen failed: %w", err) } - dkgData := model.DKGData{ + dkgData := model.ThresholdKeySet{ PrivKeyShares: sk, PubGroupKey: pkGroup, PubKeyShares: pk, @@ -25,19 +26,19 @@ func RandomBeaconKG(n int, seed []byte) (model.DKGData, error) { return dkgData, nil } - skShares, pkShares, pkGroup, err := crypto.BLSThresholdKeyGen(int(n), - signature.RandomBeaconThreshold(int(n)), seed) + skShares, pkShares, pkGroup, err := crypto.BLSThresholdKeyGen(n, + signature.RandomBeaconThreshold(n), seed) if err != nil { - return model.DKGData{}, fmt.Errorf("Beacon KeyGen failed: %w", err) + return model.ThresholdKeySet{}, fmt.Errorf("Beacon KeyGen failed: %w", err) } - dkgData := model.DKGData{ + randomBeaconData := model.ThresholdKeySet{ PrivKeyShares: skShares, PubGroupKey: pkGroup, PubKeyShares: pkShares, } - return dkgData, nil + return randomBeaconData, nil } // Beacon KG with one node diff --git a/cmd/bootstrap/dkg/dkg_test.go b/cmd/bootstrap/dkg/dkg_test.go index a5d5a56de18..326632dae80 100644 --- a/cmd/bootstrap/dkg/dkg_test.go +++ b/cmd/bootstrap/dkg/dkg_test.go @@ -3,14 +3,14 @@ package dkg import ( "testing" + "github.com/onflow/crypto" "github.com/stretchr/testify/require" - "github.com/onflow/flow-go/crypto" "github.com/onflow/flow-go/utils/unittest" ) func TestBeaconKG(t *testing.T) { - seed := unittest.SeedFixture(2 * crypto.SeedMinLenDKG) + seed := unittest.SeedFixture(2 * crypto.KeyGenSeedMinLen) // n = 0 _, err := RandomBeaconKG(0, seed) diff --git a/cmd/bootstrap/example_files/partner-node-infos/public-genesis-information/node-info.pub.047e39d906e9ff961fa76cb5f479942d862c6cb1e768eb4525d6066cd707d595.json b/cmd/bootstrap/example_files/partner-node-infos/public-genesis-information/node-info.pub.047e39d906e9ff961fa76cb5f479942d862c6cb1e768eb4525d6066cd707d595.json deleted file mode 100644 index 1c54ec0d6aa..00000000000 --- a/cmd/bootstrap/example_files/partner-node-infos/public-genesis-information/node-info.pub.047e39d906e9ff961fa76cb5f479942d862c6cb1e768eb4525d6066cd707d595.json +++ /dev/null @@ -1,8 +0,0 @@ -{ - "Role": "consensus", - "Address": "example.com", - "NodeID": "047e39d906e9ff961fa76cb5f479942d862c6cb1e768eb4525d6066cd707d595", - "Weight": 0, - "NetworkPubKey": "6fSXKlHhPLMqWo3nhBsyjTZlxGBl5HC4ZqC7p1z4GEHx48UKnDaKdz0QdOxzSJP2G+P3bzDiJdLbvEd1CSvVgA==", - "StakingPubKey": "gdQQp6cbOzc/pnhOMl8mNQTAsbkuGs78Q72/zmhrAK+Ii2c/v04F9CDEo+FuVc0eALL/T0ioZwaTFCBO9+JRjfakqOiBCI9b7Xj4E8Dv4vBHDQyLOBBqXeA2VLAJYgFL" -} diff --git a/cmd/bootstrap/example_files/partner-node-infos/public-genesis-information/node-info.pub.79a7f711ba44dfdcfe774769eb97af6af732337fbcf96b788dbe6c51d29c5ec6.json b/cmd/bootstrap/example_files/partner-node-infos/public-genesis-information/node-info.pub.79a7f711ba44dfdcfe774769eb97af6af732337fbcf96b788dbe6c51d29c5ec6.json new file mode 100644 index 00000000000..60f83924e4d --- /dev/null +++ b/cmd/bootstrap/example_files/partner-node-infos/public-genesis-information/node-info.pub.79a7f711ba44dfdcfe774769eb97af6af732337fbcf96b788dbe6c51d29c5ec6.json @@ -0,0 +1,9 @@ +{ + "Role": "consensus", + "Address": "example.com:1234", + "NodeID": "79a7f711ba44dfdcfe774769eb97af6af732337fbcf96b788dbe6c51d29c5ec6", + "Weight": 0, + "NetworkPubKey": "0162f95166d39db8a6486625813019fcc8bb3d9439ad1e57b44f7bf01235cbcabd66411c3faa98de806e439cb4372275b76dcd3af7d384d24851cbae89f92cda", + "StakingPubKey": "84f806be7e4db914358e5b66a405244161ad5bfd87939b3a9b428a941baa6ae245d0d7a6cef684bd7168815fda5e9b6506b2cc87ec9c52576913d1990fd7c376fc2c6884247ff6a7c0c46ca143e3697422913d53c134b9534a199b7fc8f57d50", + "StakingPoP": "oEz2R3qe86/ZaRAemZfpdjcBZcOt7RHLjMhqjf7gg99XMsaLjmDma94Rr9ylciti" +} \ No newline at end of file diff --git a/cmd/bootstrap/gcs/gcs.go b/cmd/bootstrap/gcs/gcs.go index f14952f13df..457bf2f4aef 100644 --- a/cmd/bootstrap/gcs/gcs.go +++ b/cmd/bootstrap/gcs/gcs.go @@ -33,14 +33,19 @@ func (g *googleBucket) NewClient(ctx context.Context) (*storage.Client, error) { return client, nil } +type GCSFile struct { + Name string + MD5 []byte +} + // GetFiles returns a list of file names within the Google bucket -func (g *googleBucket) GetFiles(ctx context.Context, client *storage.Client, prefix, delimiter string) ([]string, error) { +func (g *googleBucket) GetFiles(ctx context.Context, client *storage.Client, prefix, delimiter string) ([]GCSFile, error) { it := client.Bucket(g.Name).Objects(ctx, &storage.Query{ Prefix: prefix, Delimiter: delimiter, }) - var files []string + var files []GCSFile for { attrs, err := it.Next() if err == iterator.Done { @@ -50,7 +55,10 @@ func (g *googleBucket) GetFiles(ctx context.Context, client *storage.Client, pre return nil, err } - files = append(files, attrs.Name) + files = append(files, GCSFile{ + Name: attrs.Name, + MD5: attrs.MD5, + }) } return files, nil @@ -88,15 +96,16 @@ func (g *googleBucket) DownloadFile(ctx context.Context, client *storage.Client, // UploadFile uploads a file to the google bucket func (g *googleBucket) UploadFile(ctx context.Context, client *storage.Client, destination, source string) error { - upload := client.Bucket(g.Name).Object(destination).NewWriter(ctx) - defer upload.Close() - + // Validate source file exists before creating GCS writer to avoid creating empty files file, err := os.Open(source) if err != nil { return fmt.Errorf("Error opening upload file: %w", err) } defer file.Close() + upload := client.Bucket(g.Name).Object(destination).NewWriter(ctx) + defer upload.Close() + _, err = io.Copy(upload, file) if err != nil { return fmt.Errorf("Error uploading file: %w", err) diff --git a/cmd/bootstrap/run/block.go b/cmd/bootstrap/run/block.go index d5a4a10a38d..297ff7cdcc0 100644 --- a/cmd/bootstrap/run/block.go +++ b/cmd/bootstrap/run/block.go @@ -1,32 +1,26 @@ package run import ( + "fmt" "time" "github.com/onflow/flow-go/model/flow" ) -func GenerateRootBlock(chainID flow.ChainID, parentID flow.Identifier, height uint64, timestamp time.Time) *flow.Block { - - payload := &flow.Payload{ - Guarantees: nil, - Seals: nil, - } - header := &flow.Header{ +func GenerateRootHeaderBody(chainID flow.ChainID, parentID flow.Identifier, height uint64, view uint64, timestamp time.Time) (*flow.HeaderBody, error) { + rootHeaderBody, err := flow.NewRootHeaderBody(flow.UntrustedHeaderBody{ ChainID: chainID, ParentID: parentID, Height: height, - PayloadHash: payload.Hash(), - Timestamp: timestamp, - View: 0, + Timestamp: uint64(timestamp.UnixMilli()), + View: view, ParentVoterIndices: nil, ParentVoterSigData: nil, ProposerID: flow.ZeroID, - ProposerSigData: nil, + }) + if err != nil { + return nil, fmt.Errorf("failed to generate root header body: %w", err) } - return &flow.Block{ - Header: header, - Payload: payload, - } + return rootHeaderBody, nil } diff --git a/cmd/bootstrap/run/cluster_block.go b/cmd/bootstrap/run/cluster_block.go index db9ee0b9caa..db3e0a9220c 100644 --- a/cmd/bootstrap/run/cluster_block.go +++ b/cmd/bootstrap/run/cluster_block.go @@ -16,7 +16,11 @@ func GenerateRootClusterBlocks(epoch uint64, clusters flow.ClusterList) []*clust panic(fmt.Sprintf("failed to get cluster by index: %v", i)) } - clusterBlocks[i] = clusterstate.CanonicalRootBlock(epoch, cluster) + rootBlock, err := clusterstate.CanonicalRootBlock(epoch, cluster) + if err != nil { + panic(fmt.Errorf("failed to get canonical root block: %w", err)) + } + clusterBlocks[i] = rootBlock } return clusterBlocks } diff --git a/cmd/bootstrap/run/cluster_qc.go b/cmd/bootstrap/run/cluster_qc.go index fa91e5cc4f8..185e4d43c2d 100644 --- a/cmd/bootstrap/run/cluster_qc.go +++ b/cmd/bootstrap/run/cluster_qc.go @@ -14,13 +14,15 @@ import ( "github.com/onflow/flow-go/model/bootstrap" "github.com/onflow/flow-go/model/cluster" "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/model/flow/order" "github.com/onflow/flow-go/module/local" ) // GenerateClusterRootQC creates votes and generates a QC based on participant data -func GenerateClusterRootQC(signers []bootstrap.NodeInfo, allCommitteeMembers flow.IdentityList, clusterBlock *cluster.Block) (*flow.QuorumCertificate, error) { - clusterRootBlock := model.GenesisBlockFromFlow(clusterBlock.Header) +func GenerateClusterRootQC(signers []bootstrap.NodeInfo, allCommitteeMembers flow.IdentitySkeletonList, clusterBlock *cluster.Block) (*flow.QuorumCertificate, error) { + if !allCommitteeMembers.Sorted(flow.Canonical[flow.IdentitySkeleton]) { + return nil, fmt.Errorf("can't create root cluster QC: committee members are not sorted in canonical order") + } + clusterRootBlock := model.GenesisBlockFromFlow(clusterBlock.ToHeader()) // STEP 1: create votes for cluster root block votes, err := createRootBlockVotes(signers, clusterRootBlock) @@ -28,9 +30,21 @@ func GenerateClusterRootQC(signers []bootstrap.NodeInfo, allCommitteeMembers flo return nil, err } + // STEP 1.5: patch committee to include dynamic identities. This is a temporary measure until bootstrapping is refactored. + // We need a Committee for creating the cluster's root QC and the Committee requires dynamic identities to be instantiated. + // The clustering for root block contain only static identities, since there no state transitions have happened yet. + dynamicCommitteeMembers := make(flow.IdentityList, 0, len(allCommitteeMembers)) + for _, participant := range allCommitteeMembers { + dynamicCommitteeMembers = append(dynamicCommitteeMembers, &flow.Identity{ + IdentitySkeleton: *participant, + DynamicIdentity: flow.DynamicIdentity{ + EpochParticipationStatus: flow.EpochParticipationStatusActive, + }, + }) + } + // STEP 2: create VoteProcessor - ordered := allCommitteeMembers.Sort(order.Canonical) - committee, err := committees.NewStaticCommittee(ordered, flow.Identifier{}, nil, nil) + committee, err := committees.NewStaticCommittee(dynamicCommitteeMembers, flow.Identifier{}, nil, nil) if err != nil { return nil, err } @@ -80,7 +94,7 @@ func createRootBlockVotes(participants []bootstrap.NodeInfo, rootBlock *model.Bl if err != nil { return nil, fmt.Errorf("could not retrieve private keys for participant: %w", err) } - me, err := local.New(participant.Identity(), keys.StakingKey) + me, err := local.New(participant.Identity().IdentitySkeleton, keys.StakingKey) if err != nil { return nil, err } diff --git a/cmd/bootstrap/run/cluster_qc_test.go b/cmd/bootstrap/run/cluster_qc_test.go index 19a379d5b47..eb5e5cc69aa 100644 --- a/cmd/bootstrap/run/cluster_qc_test.go +++ b/cmd/bootstrap/run/cluster_qc_test.go @@ -2,6 +2,7 @@ package run import ( "testing" + "time" "github.com/stretchr/testify/require" @@ -14,25 +15,21 @@ import ( func TestGenerateClusterRootQC(t *testing.T) { participants := createClusterParticipants(t, 3) - block := unittest.BlockFixture() - - block.Payload.Seals = nil - block.Payload.Guarantees = nil - block.Header.ParentID = flow.ZeroID - block.Header.View = 3 - block.Header.Height = 0 - block.Header.PayloadHash = block.Payload.Hash() - - clusterBlock := cluster.Block{ - Header: &flow.Header{ - ParentID: flow.ZeroID, - View: 42, + clusterBlock, err := cluster.NewRootBlock( + cluster.UntrustedBlock{ + HeaderBody: flow.HeaderBody{ + ChainID: flow.Emulator, + ParentID: flow.ZeroID, + Timestamp: uint64(time.Now().UnixMilli()), + View: 42, + }, + Payload: *cluster.NewEmptyPayload(flow.ZeroID), }, - } - payload := cluster.EmptyPayload(flow.ZeroID) - clusterBlock.SetPayload(payload) + ) + require.NoError(t, err) - _, err := GenerateClusterRootQC(participants, model.ToIdentityList(participants), &clusterBlock) + orderedParticipants := model.ToIdentityList(participants).Sort(flow.Canonical[flow.Identity]).ToSkeleton() + _, err = GenerateClusterRootQC(participants, orderedParticipants, clusterBlock) require.NoError(t, err) } @@ -44,14 +41,16 @@ func createClusterParticipants(t *testing.T, n int) []model.NodeInfo { participants := make([]model.NodeInfo, n) for i, id := range ids { - participants[i] = model.NewPrivateNodeInfo( + var err error + participants[i], err = model.NewPrivateNodeInfo( id.NodeID, id.Role, id.Address, - id.Weight, + id.InitialWeight, networkKeys[i], stakingKeys[i], ) + require.NoError(t, err) } return participants diff --git a/cmd/bootstrap/run/epochs.go b/cmd/bootstrap/run/epochs.go new file mode 100644 index 00000000000..b97c6b91b03 --- /dev/null +++ b/cmd/bootstrap/run/epochs.go @@ -0,0 +1,338 @@ +package run + +import ( + "encoding/hex" + "fmt" + + "github.com/onflow/cadence" + "github.com/onflow/crypto" + "github.com/rs/zerolog" + "golang.org/x/exp/slices" + + "github.com/onflow/flow-go/cmd/util/cmd/common" + "github.com/onflow/flow-go/fvm/systemcontracts" + "github.com/onflow/flow-go/model/bootstrap" + model "github.com/onflow/flow-go/model/bootstrap" + "github.com/onflow/flow-go/model/cluster" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/model/flow/filter" + "github.com/onflow/flow-go/state/protocol/inmem" +) + +// GenerateRecoverEpochTxArgs generates the required transaction arguments for the `recoverEpoch` transaction. +// No errors are expected during normal operation. +func GenerateRecoverEpochTxArgs( + log zerolog.Logger, + internalNodePrivInfoDir string, + nodeConfigJson string, + collectionClusters int, + recoveryEpochCounter uint64, + rootChainID flow.ChainID, + numViewsInStakingAuction uint64, + numViewsInEpoch uint64, + recoveryEpochTargetDuration uint64, + unsafeAllowOverWrite bool, + excludeNodeIDs flow.IdentifierList, // applied as set-minus operation + includeNodeIDs flow.IdentifierList, // applied as set-union operation + snapshot *inmem.Snapshot, +) ([]cadence.Value, error) { + log.Info().Msg("collecting internal node network and staking keys") + internalNodes, err := common.ReadFullInternalNodeInfos(log, internalNodePrivInfoDir, nodeConfigJson) + if err != nil { + return nil, fmt.Errorf("failed to read full internal node infos: %w", err) + } + + epochProtocolState, err := snapshot.EpochProtocolState() + if err != nil { + return nil, fmt.Errorf("failed to get epoch protocol state from snapshot: %w", err) + } + currentEpochCommit := epochProtocolState.EpochCommit() + + return GenerateRecoverTxArgsWithDKG( + log, + internalNodes, + collectionClusters, + recoveryEpochCounter, + rootChainID, + numViewsInStakingAuction, + numViewsInEpoch, + recoveryEpochTargetDuration, + unsafeAllowOverWrite, + currentEpochCommit.DKGIndexMap, + currentEpochCommit.DKGParticipantKeys, + currentEpochCommit.DKGGroupKey, + excludeNodeIDs, + includeNodeIDs, + snapshot, + ) +} + +// GenerateRecoverTxArgsWithDKG generates the required transaction arguments for the `recoverEpoch` transaction. +// No errors are expected during normal operation. +func GenerateRecoverTxArgsWithDKG( + log zerolog.Logger, + internalNodes []bootstrap.NodeInfo, + collectionClusters int, + recoveryEpochCounter uint64, + rootChainID flow.ChainID, + numViewsInStakingAuction uint64, + numViewsInEpoch uint64, + recoveryEpochTargetDuration uint64, + unsafeAllowOverWrite bool, + dkgIndexMap flow.DKGIndexMap, + dkgParticipantKeys []crypto.PublicKey, + dkgGroupKey crypto.PublicKey, + excludeNodeIDs flow.IdentifierList, // applied as set-minus operation + includeNodeIDs flow.IdentifierList, // applied as set-union operation + snapshot *inmem.Snapshot, +) ([]cadence.Value, error) { + epoch, err := snapshot.Epochs().Current() + if err != nil { + return nil, fmt.Errorf("could not retrieve current epoch: %w", err) + } + currentEpochCounter := epoch.Counter() + if recoveryEpochCounter != currentEpochCounter+1 { + return nil, fmt.Errorf("invalid recovery epoch counter, expect %d", currentEpochCounter+1) + } + currentEpochPhase, err := snapshot.EpochPhase() + if err != nil { + return nil, fmt.Errorf("could not retrieve epoch phase for snapshot: %w", err) + } + if currentEpochPhase == flow.EpochPhaseCommitted { + return nil, fmt.Errorf("next epoch has been already committed, will not build recovery transaction") + } + + // including only (conjunction): + // * nodes authorized to actively participate in the current epoch (i.e. excluding ejected, joining or leaving nodes) + // * with _positive_ weight, + // * nodes that were not explicitly excluded + eligibleEpochIdentities, err := snapshot.Identities(filter.And( + filter.IsValidCurrentEpochParticipant, + filter.HasWeightGreaterThanZero[flow.Identity], + filter.Not(filter.HasNodeID[flow.Identity](excludeNodeIDs...)))) + if err != nil { + return nil, fmt.Errorf("failed to get valid protocol participants from snapshot: %w", err) + } + // We expect canonical ordering here, because the Identities are originating from a protocol state snapshot, + // which by protocol convention maintains the Identities in canonical order. Removing elements from a + // canonically-ordered list, still retains canonical ordering. Sanity check to enforce this: + if !eligibleEpochIdentities.Sorted(flow.Canonical[flow.Identity]) { + return nil, fmt.Errorf("identies from snapshot not in canonical order") + } + // It would be contradictory if both `excludeNodeIDs` and `includeNodeIDs` contained the same ID. + // Specifically, we expect the set intersection between `excludeNodeIDs` and `includeNodeIDs` to + // be empty. To prevent first removing a node and then adding it back, we sanity-check consistency: + includeIDsLookup := includeNodeIDs.Lookup() + for _, id := range excludeNodeIDs { + if _, found := includeIDsLookup[id]; found { + return nil, fmt.Errorf("contradictory input: node ID %s is listed in both includeNodeIDs and excludeNodeIDs", id) + } + } + + // STEP I: compile Cluster Assignment, Cluster Root Blocks and QCs + // which are needed to initiate each cluster's consensus process + // ───────────────────────────────────────────────────────────────────────────────────────────── + // SHORTCUT, must be removed for full Collector Node decentralization. + // Currently, we are assuming access to a supermajority of Collector Nodes' private staking keys. + // In the future, this part needs to be refactored. At maturity, we need a supermajority of + // Collector nodes' votes on their respective cluster root block. The cluster root block can be + // deterministically generated by each Collector locally - though the root block for each cluster + // is different (for BFT reasons). Hence, the Collectors must be given the cluster-assignment + // upfront in order to generate their cluster's root block and vote for it. + // GenerateRecoverEpochTxArgs could aggregate the votes to a QC or alternatively the QC could + // be passed in as an input to GenerateRecoverEpochTxArgs (with vote collection and aggregation + // happening e.g. via a smart contract or mediated by the Flow Foundation. Either way, we + // need a QC for each cluster's root block in order to initiate each cluster's consensus process. + + // separate collector nodes by internal and partner nodes + collectors := eligibleEpochIdentities.Filter(filter.HasRole[flow.Identity](flow.RoleCollection)) + internalCollectors := make(flow.IdentityList, 0) + partnerCollectors := make(flow.IdentityList, 0) + + internalNodesMap := make(map[flow.Identifier]struct{}) + for _, node := range internalNodes { + if !eligibleEpochIdentities.Exists(node.Identity()) { + log.Warn().Msgf("Internal node (role=%s id=%x addr=%s) found in internal directory but does not exist in most recent protocol state snapshot. This node will be excluded from recovery epoch.", node.Role, node.NodeID, node.Address) + continue + } + // only add nodes which exist in protocol state snapshot + internalNodesMap[node.NodeID] = struct{}{} + } + // Filter internalNodes so it only contains nodes which are valid for inclusion in the epoch + // This is a safety measure: just in case subsequent functions don't properly account for additional nodes, + // we proactively remove them from consideration here. + internalNodes = slices.DeleteFunc(slices.Clone(internalNodes), func(info model.NodeInfo) bool { + _, isCurrentEligibleEpochParticipant := internalNodesMap[info.NodeID] + return !isCurrentEligibleEpochParticipant + }) + + for _, collector := range collectors { + if _, ok := internalNodesMap[collector.NodeID]; ok { + internalCollectors = append(internalCollectors, collector) + } else { + partnerCollectors = append(partnerCollectors, collector) + } + } + + log.Info().Msgf("partitioning %d partners + %d internal nodes into %d collector clusters", len(partnerCollectors), len(internalCollectors), collectionClusters) + assignments, clusters, err := common.ConstructClusterAssignment(log, partnerCollectors, internalCollectors, collectionClusters) + if err != nil { + return nil, fmt.Errorf("unable to generate cluster assignment: %w", err) + } + log.Info().Msg("") + + log.Info().Msg("constructing root blocks for collection node clusters") + clusterBlocks := GenerateRootClusterBlocks(recoveryEpochCounter, clusters) + log.Info().Msg("") + + log.Info().Msg("constructing root QCs for collection node clusters") + clusterQCs := ConstructRootQCsForClusters(log, clusters, internalNodes, clusterBlocks) + log.Info().Msg("") + + // STEP II: Public Key Vector with Random Beacon public keys + // determines which consensus nodes can contribute to the Random Beacon during the Recovery Epoch + // ───────────────────────────────────────────────────────────────────────────────────────────────────────────────── + // Context: recovering from Epoch Fallback Mode requires that a sufficiency large fraction of consensus participants + // has valid random beacon keys (threshold signature scheme). The specific origin of those threshold keys is largely + // irrelevant. Running a centralized key generation process, using keys from an off-chain DKG, or reusing the random + // beacon keys from a prior epoch are all conceptually possible - provided the intersection between the consensus + // committee and the random beacon committee is large enough (for liveness). + // Implemented here: + // In a nutshell, we are carrying the current consensus and collector nodes forward into the next epoch (the Recovery + // Epoch). Removing or adding a small number of nodes here would be possible, but is not implemented at the moment. + // In all cases, a core requirement for liveness is: the fraction of consensus participants in the recovery epoch with + // valid random beacon key should be significantly larger than the threshold of the threshold-cryptography scheme. + // The EFM Recovery State Machine will heuristically reject recovery attempts (specifically reject EpochRecover Service + // events, when the intersection between consensus and random beacon committees is too small. + + // NOTE: The RecoveryEpoch will re-use the last successful DKG output. This means that the random beacon committee can be + // different from the consensus committee. This could happen if the node was ejected from the consensus committee, but it still has to be + // included in the DKG committee since the threshold signature scheme operates on pre-defined number of participants and cannot be changed. + dkgGroupKeyCdc, cdcErr := cadence.NewString(hex.EncodeToString(dkgGroupKey.Encode())) + if cdcErr != nil { + return nil, fmt.Errorf("failed to convert Random Beacon group key to cadence representation: %w", cdcErr) + } + + // copy DKG index map from the current epoch + dkgIndexMapPairs := make([]cadence.KeyValuePair, 0) + for nodeID, index := range dkgIndexMap { + dkgIndexMapPairs = append(dkgIndexMapPairs, cadence.KeyValuePair{ + Key: cadence.String(nodeID.String()), + Value: cadence.NewInt(index), + }) + } + // copy DKG public keys from the current epoch + dkgPubKeys := make([]cadence.Value, 0) + for k, dkgPubKey := range dkgParticipantKeys { + dkgPubKeyCdc, cdcErr := cadence.NewString(hex.EncodeToString(dkgPubKey.Encode())) + if cdcErr != nil { + return nil, fmt.Errorf("failed convert public beacon key of participant %d to cadence representation: %w", k, cdcErr) + } + dkgPubKeys = append(dkgPubKeys, dkgPubKeyCdc) + } + // Compile list of NodeIDs that are allowed to participate in the recovery epoch: + // (i) eligible node IDs from the Epoch that the input `snapshot` is from + // (ii) node IDs (manually) specified in `includeNodeIDs` + // We use the set union to combine (i) and (ii). Important: the resulting list of node IDs must be canonically ordered! + nodeIds := make([]cadence.Value, 0) + unionIds := eligibleEpochIdentities.NodeIDs().Union(includeNodeIDs) + // CAUTION: unionIDs may not be canonically ordered anymore, due to set union + for _, id := range unionIds.Sort(flow.IdentifierCanonical) { + nodeIdCdc, err := cadence.NewString(id.String()) + if err != nil { + return nil, fmt.Errorf("failed to convert node ID %s to cadence string: %w", id, err) + } + nodeIds = append(nodeIds, nodeIdCdc) + } + + clusterQCAddress := systemcontracts.SystemContractsForChain(rootChainID).ClusterQC.Address.String() + qcVoteData, err := common.ConvertClusterQcsCdc(clusterQCs, clusters, clusterQCAddress) + if err != nil { + return nil, fmt.Errorf("failed to convert cluster qcs to cadence type") + } + + currEpochFinalView := epoch.FinalView() + currEpochTargetEndTime := epoch.TargetEndTime() + + // STEP III: compile list of arguments for `recoverEpoch` governance transaction. + // ─────────────────────────────────────────────────────────────────────────────── + // order of arguments are taken from Cadence transaction defined in core-contracts repo: https://github.com/onflow/flow-core-contracts/blob/807cf69d387d9a46b50fb4b8784a43ce9c2c0471/transactions/epoch/admin/recover_epoch.cdc#L16 + args := []cadence.Value{ + // recovery epoch counter + cadence.NewUInt64(recoveryEpochCounter), + // epoch start view + cadence.NewUInt64(currEpochFinalView + 1), + // staking phase end view + cadence.NewUInt64(currEpochFinalView + numViewsInStakingAuction), + // epoch end view + cadence.NewUInt64(currEpochFinalView + numViewsInEpoch), + // recovery epoch target duration + cadence.NewUInt64(recoveryEpochTargetDuration), + // target end time + cadence.NewUInt64(currEpochTargetEndTime + recoveryEpochTargetDuration), + // clusters, + common.ConvertClusterAssignmentsCdc(assignments), + // cluster qcVoteData + cadence.NewArray(qcVoteData), + // dkg pub keys + cadence.NewArray(dkgPubKeys), + // dkg group key, + dkgGroupKeyCdc, + // dkg index map + cadence.NewDictionary(dkgIndexMapPairs), + // node ids + cadence.NewArray(nodeIds), + // recover the network by initializing a new recover epoch which will increment the smart contract epoch counter + // or overwrite the epoch metadata for the current epoch + cadence.NewBool(unsafeAllowOverWrite), + } + + return args, nil +} + +// ConstructRootQCsForClusters constructs a root QC for each cluster in the list. +// Args: +// - log: the logger instance. +// - clusterList: list of clusters +// - nodeInfos: list of NodeInfos (must contain all internal nodes) +// - clusterBlocks: list of root blocks (one for each cluster) +// Returns: +// - flow.AssignmentList: the generated assignment list. +// - flow.ClusterList: the generate collection cluster list. +func ConstructRootQCsForClusters(log zerolog.Logger, clusterList flow.ClusterList, nodeInfos []bootstrap.NodeInfo, clusterBlocks []*cluster.Block) []*flow.QuorumCertificate { + if len(clusterBlocks) != len(clusterList) { + log.Fatal().Int("len(clusterBlocks)", len(clusterBlocks)).Int("len(clusterList)", len(clusterList)). + Msg("number of clusters needs to equal number of cluster blocks") + } + + qcs := make([]*flow.QuorumCertificate, len(clusterBlocks)) + for i, clusterMembers := range clusterList { + signers := filterClusterSigners(clusterMembers, nodeInfos) + log.Info().Msgf("producing QC for cluster (index: %d, size: %d) with %d internal signers", i, len(clusterMembers), len(signers)) + + qc, err := GenerateClusterRootQC(signers, clusterMembers, clusterBlocks[i]) + if err != nil { + log.Fatal().Err(err).Int("cluster index", i).Msg("generating collector cluster root QC failed") + } + qcs[i] = qc + } + + return qcs +} + +// Filters a list of nodes to include only nodes that will sign the QC for the +// given cluster. The resulting list of nodes is only nodes that are in the +// given cluster AND are not partner nodes (ie. we have the private keys). +func filterClusterSigners(cluster flow.IdentitySkeletonList, nodeInfos []model.NodeInfo) []model.NodeInfo { + var filtered []model.NodeInfo + for _, node := range nodeInfos { + _, isInCluster := cluster.ByNodeID(node.NodeID) + isPrivateKeyAvailable := node.Type() == model.NodeInfoTypePrivate + + if isInCluster && isPrivateKeyAvailable { + filtered = append(filtered, node) + } + } + + return filtered +} diff --git a/cmd/bootstrap/run/epochs_test.go b/cmd/bootstrap/run/epochs_test.go new file mode 100644 index 00000000000..05615b7b830 --- /dev/null +++ b/cmd/bootstrap/run/epochs_test.go @@ -0,0 +1,107 @@ +package run + +import ( + "testing" + + "github.com/onflow/cadence" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "pgregory.net/rapid" + + "github.com/onflow/flow-go/cmd/bootstrap/utils" + "github.com/onflow/flow-go/cmd/util/cmd/common" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/model/flow/filter" + "github.com/onflow/flow-go/utils/unittest" +) + +// TestGenerateRecoverTxArgsWithDKG_ExcludeIncludeParticipants tests that GenerateRecoverTxArgsWithDKG produces expected arguments +// for the recover epoch transaction, when excluding and including participants recovery epoch participants. +// This test uses fuzzy testing to generate random combinations of participants to exclude and include and checks that the +// generated arguments match the expected output. +// This test assumes that we include nodes that are not part of the protocol state and exclude nodes that are part of the protocol state. +// This test also verifies that the DKG index map contains all consensus nodes despite the exclusion and inclusion filters. +func TestGenerateRecoverTxArgsWithDKG_ExcludeIncludeParticipants(testifyT *testing.T) { + utils.RunWithSporkBootstrapDir(testifyT, func(bootDir, partnerDir, partnerWeights, internalPrivDir, configPath string) { + log := unittest.Logger() + internalNodes, err := common.ReadFullInternalNodeInfos(log, internalPrivDir, configPath) + require.NoError(testifyT, err) + partnerNodes, err := common.ReadFullPartnerNodeInfos(log, partnerWeights, partnerDir) + require.NoError(testifyT, err) + + allNodeIds := make(flow.IdentityList, 0) + for _, node := range append(internalNodes, partnerNodes...) { + allNodeIds = append(allNodeIds, node.Identity()) + } + + rootSnapshot := unittest.RootSnapshotFixture(allNodeIds) + allIdentities, err := rootSnapshot.Identities(filter.Any) + require.NoError(testifyT, err) + + rapid.Check(testifyT, func(t *rapid.T) { + numberOfNodesToInclude := rapid.IntRange(0, 3).Draw(t, "nodes-to-include") + numberOfNodesToExclude := rapid.UintRange(0, 3).Draw(t, "nodes-to-exclude") + + // we specifically omit collection nodes from the exclusion list since we have a specific + // check that there must be a valid cluster of collection nodes. + excludedNodes, err := allIdentities.Filter( + filter.Not(filter.HasRole[flow.Identity](flow.RoleCollection))).Sample(numberOfNodesToExclude) + require.NoError(t, err) + excludeNodeIds := excludedNodes.NodeIDs() + // an eligible participant is a current epoch participant with a weight greater than zero that has not been explicitly excluded + eligibleEpochIdentities := allIdentities.Filter(filter.And( + filter.IsValidCurrentEpochParticipant, + filter.HasWeightGreaterThanZero[flow.Identity], + filter.Not(filter.HasNodeID[flow.Identity](excludeNodeIds...)))) + + expectedNodeIds := make(map[cadence.String]struct{}) + includeNodeIds := unittest.IdentifierListFixture(numberOfNodesToInclude) + for _, nodeID := range eligibleEpochIdentities.NodeIDs().Union(includeNodeIds) { + expectedNodeIds[cadence.String(nodeID.String())] = struct{}{} + } + + epochProtocolState, err := rootSnapshot.EpochProtocolState() + require.NoError(t, err) + currentEpochCommit := epochProtocolState.EpochCommit() + expectedDKGIndexMap := make(map[cadence.String]cadence.Int) + for nodeID, index := range currentEpochCommit.DKGIndexMap { + expectedDKGIndexMap[cadence.String(nodeID.String())] = cadence.NewInt(index) + } + + args, err := GenerateRecoverTxArgsWithDKG( + log, + internalNodes, + 2, // number of collection clusters + currentEpochCommit.Counter+1, + flow.Localnet, + 100, // staking auction length, in views + 4000, // recovery epoch length, in views + 60*60, // recovery epoch duration, in seconds + false, // unsafe overwrite + currentEpochCommit.DKGIndexMap, + currentEpochCommit.DKGParticipantKeys, + currentEpochCommit.DKGGroupKey, + excludeNodeIds, + includeNodeIds, + rootSnapshot, + ) + require.NoError(t, err) + + // dkg index map + dkgIndexMapArgPairs := args[10].(cadence.Dictionary).Pairs + assert.Equal(t, len(dkgIndexMapArgPairs), len(expectedDKGIndexMap)) + for _, pair := range dkgIndexMapArgPairs { + expectedIndex, ok := expectedDKGIndexMap[pair.Key.(cadence.String)] + require.True(t, ok) + require.Equal(t, expectedIndex, pair.Value.(cadence.Int)) + } + // node ids + nodeIDsArgValues := args[11].(cadence.Array).Values + assert.Equal(t, len(nodeIDsArgValues), len(expectedNodeIds)) + for _, nodeId := range nodeIDsArgValues { + _, ok := expectedNodeIds[nodeId.(cadence.String)] + require.True(t, ok) + } + }) + }) +} diff --git a/cmd/bootstrap/run/execution_state.go b/cmd/bootstrap/run/execution_state.go index 8520be8de99..c1896668c38 100644 --- a/cmd/bootstrap/run/execution_state.go +++ b/cmd/bootstrap/run/execution_state.go @@ -1,37 +1,24 @@ package run import ( + "fmt" "math" "github.com/rs/zerolog" "go.uber.org/atomic" - "github.com/onflow/flow-go/crypto" - "github.com/onflow/flow-go/crypto/hash" "github.com/onflow/flow-go/engine/execution/state/bootstrap" "github.com/onflow/flow-go/fvm" "github.com/onflow/flow-go/ledger/common/pathfinder" "github.com/onflow/flow-go/ledger/complete" ledger "github.com/onflow/flow-go/ledger/complete" + "github.com/onflow/flow-go/ledger/complete/mtrie/trie" "github.com/onflow/flow-go/ledger/complete/wal" + bootstrapFilenames "github.com/onflow/flow-go/model/bootstrap" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/module/metrics" ) -// NOTE: this is now unused and should become part of another tool. -func GenerateServiceAccountPrivateKey(seed []byte) (flow.AccountPrivateKey, error) { - priv, err := crypto.GeneratePrivateKey(crypto.ECDSASecp256k1, seed) - if err != nil { - return flow.AccountPrivateKey{}, err - } - - return flow.AccountPrivateKey{ - PrivateKey: priv, - SignAlgo: crypto.ECDSASecp256k1, - HashAlgo: hash.SHA2_256, - }, nil -} - func GenerateExecutionState( dbDir string, accountKey flow.AccountPublicKey, @@ -56,7 +43,7 @@ func GenerateExecutionState( return flow.DummyStateCommitment, err } - compactor, err := complete.NewCompactor(ledgerStorage, diskWal, zerolog.Nop(), capacity, checkpointDistance, checkpointsToKeep, atomic.NewBool(false)) + compactor, err := complete.NewCompactor(ledgerStorage, diskWal, zerolog.Nop(), capacity, checkpointDistance, checkpointsToKeep, atomic.NewBool(false), metricsCollector) if err != nil { return flow.DummyStateCommitment, err } @@ -67,11 +54,30 @@ func GenerateExecutionState( <-compactor.Done() }() - return bootstrap.NewBootstrapper( - zerolog.Nop()).BootstrapLedger( - ledgerStorage, - accountKey, - chain, - bootstrapOptions..., - ) + stateCommitment, err := bootstrap. + NewBootstrapper(zerolog.Nop()). + BootstrapLedger( + ledgerStorage, + accountKey, + chain, + bootstrapOptions..., + ) + if err != nil { + return flow.DummyStateCommitment, err + } + + matchTrie, err := ledgerStorage.FindTrieByStateCommit(stateCommitment) + if err != nil { + return flow.DummyStateCommitment, err + } + if matchTrie == nil { + return flow.DummyStateCommitment, fmt.Errorf("bootstraping failed to produce a checkpoint for trie %v", stateCommitment) + } + + err = wal.StoreCheckpointV6([]*trie.MTrie{matchTrie}, dbDir, bootstrapFilenames.FilenameWALRootCheckpoint, zerolog.Nop(), 1) + if err != nil { + return flow.DummyStateCommitment, fmt.Errorf("failed to store bootstrap checkpoint: %w", err) + } + + return stateCommitment, nil } diff --git a/cmd/bootstrap/run/execution_state_test.go b/cmd/bootstrap/run/execution_state_test.go index 37ab3a23126..569fcd8f695 100644 --- a/cmd/bootstrap/run/execution_state_test.go +++ b/cmd/bootstrap/run/execution_state_test.go @@ -5,6 +5,8 @@ import ( "path/filepath" "testing" + "github.com/onflow/crypto" + "github.com/onflow/crypto/hash" "github.com/stretchr/testify/require" "github.com/onflow/flow-go/fvm" @@ -17,7 +19,7 @@ import ( func TestGenerateExecutionState(t *testing.T) { seed := make([]byte, 48) seed[0] = 1 - sk, err := GenerateServiceAccountPrivateKey(seed) + sk, err := generateServiceAccountPrivateKey(seed) require.NoError(t, err) pk := sk.PublicKey(42) @@ -34,3 +36,16 @@ func TestGenerateExecutionState(t *testing.T) { fmt.Printf("commit: %x\n", commit) fmt.Printf("a checkpoint file is generated at: %v\n", trieDir) } + +func generateServiceAccountPrivateKey(seed []byte) (flow.AccountPrivateKey, error) { + priv, err := crypto.GeneratePrivateKey(crypto.ECDSASecp256k1, seed) + if err != nil { + return flow.AccountPrivateKey{}, err + } + + return flow.AccountPrivateKey{ + PrivateKey: priv, + SignAlgo: crypto.ECDSASecp256k1, + HashAlgo: hash.SHA2_256, + }, nil +} diff --git a/cmd/bootstrap/run/qc.go b/cmd/bootstrap/run/qc.go index c07879eb446..fbd542e97d1 100644 --- a/cmd/bootstrap/run/qc.go +++ b/cmd/bootstrap/run/qc.go @@ -3,6 +3,7 @@ package run import ( "fmt" + "github.com/onflow/crypto" "github.com/rs/zerolog" "github.com/onflow/flow-go/consensus/hotstuff" @@ -12,7 +13,6 @@ import ( "github.com/onflow/flow-go/consensus/hotstuff/validator" "github.com/onflow/flow-go/consensus/hotstuff/verification" "github.com/onflow/flow-go/consensus/hotstuff/votecollector" - "github.com/onflow/flow-go/crypto" "github.com/onflow/flow-go/model/bootstrap" "github.com/onflow/flow-go/model/dkg" "github.com/onflow/flow-go/model/flow" @@ -24,10 +24,26 @@ type Participant struct { RandomBeaconPrivKey crypto.PrivateKey } +// ParticipantData represents a subset of all consensus participants that contribute to some signing process (at the moment, we only use +// it for the contributors for the root QC). For mainnet, this a *strict subset* of all consensus participants: +// - In an early step during the bootstrapping process, every node operator locally generates votes for the root block from the nodes they +// operate. During the vote-generation step, (see function `constructRootVotes`), `Participants` represents only the operator's own +// nodes (generally a small minority of the entire consensus committee). +// - During a subsequent step of the bootstrapping process, the Flow Foundation collects a supermajority of votes from the consensus +// participants and constructs the root QC (see function `constructRootQC`). Here, `Participants` is only populated with +// the information of the internal nodes that the Flow Foundation runs, but not used. +// +// Furthermore, ParticipantData contains auxiliary data about the DKG to set up the random beacon. Note that the consensus committee 𝒞 and the +// DKG committee 𝒟 are generally _not_ identical. We explicitly want to support that _either_ set can have nodes that are not in +// the other set (formally 𝒟 \ 𝒞 ≠ ∅ and 𝒞 \ 𝒟 ≠ ∅). ParticipantData has no direct representation of the consensus committee 𝒞. type ParticipantData struct { + // Participants of the signing process: only members of the consensus committee can vote, i.e. contribute to the random + // beacon (formally Participants ⊊ 𝒞). However, we allow for consensus committee members that are _not_ part of the DKG + // committee 𝒟 (formally ∅ ≠ Participants \ 𝒟). Participants []Participant - Lookup map[flow.Identifier]flow.DKGParticipant - GroupKey crypto.PublicKey + + DKGCommittee map[flow.Identifier]flow.DKGParticipant // DKG committee 𝒟 (to set up the random beacon) + DKGGroupKey crypto.PublicKey // group key for the DKG committee 𝒟 } func (pd *ParticipantData) Identities() flow.IdentityList { @@ -38,6 +54,29 @@ func (pd *ParticipantData) Identities() flow.IdentityList { return bootstrap.ToIdentityList(nodes) } +// DKGData links nodes to their respective public DKG values. We cover the DKG committee 𝒟, meaning all nodes +// that were authorized to participate in the DKG (even if they did not participate or were unsuccessful). +// Specifically, the function returns: +// - indexMap: a bijective mapping from the node IDs of the DKG committee to {0, 1, …, n-1}, +// with n = |𝒟| the size of the DKG committee 𝒟. In a nutshell, for a nodeID `d`, integer value +// i := DKGIndexMap[d] denotes the index, by which the low-level cryptographic DKG protocol references +// the participant. For details, please see the documentation of [flow.DKGIndexMap]. +// - keyShares: holds the public key share for every member of the DKG committee 𝒟 (irrespective +// of successful participation). For a member of the DKG committee with nodeID `d`, the respective +// public key share is keyShares[DKGIndexMap[d]]. +// +// CAUTION: the returned DKG data may include identifiers for nodes which do not exist in the consensus committee +// and may NOT include entries for all nodes in the consensus committee. +func (pd *ParticipantData) DKGData() (indexMap flow.DKGIndexMap, keyShares []crypto.PublicKey) { + indexMap = make(flow.DKGIndexMap, len(pd.DKGCommittee)) + keyShares = make([]crypto.PublicKey, len(pd.DKGCommittee)) + for nodeID, participant := range pd.DKGCommittee { + indexMap[nodeID] = int(participant.Index) + keyShares[participant.Index] = participant.KeyShare + } + return indexMap, keyShares +} + // GenerateRootQC generates QC for root block, caller needs to provide votes for root QC and // participantData to build the QC. // NOTE: at the moment, we require private keys for one node because we we re-using the full business logic, @@ -53,14 +92,14 @@ func GenerateRootQC(block *flow.Block, votes []*model.Vote, participantData *Par error, // exception or could not construct qc ) { // create consensus committee's state - committee, err := committees.NewStaticCommittee(identities, flow.Identifier{}, participantData.Lookup, participantData.GroupKey) + committee, err := committees.NewStaticCommittee(identities, flow.Identifier{}, participantData.DKGCommittee, participantData.DKGGroupKey) if err != nil { return nil, nil, err } // STEP 1: create VoteProcessor var createdQC *flow.QuorumCertificate - hotBlock := model.GenesisBlockFromFlow(block.Header) + hotBlock := model.GenesisBlockFromFlow(block.ToHeader()) processor, err := votecollector.NewBootstrapCombinedVoteProcessor(zerolog.Logger{}, committee, hotBlock, func(qc *flow.QuorumCertificate) { createdQC = qc }) @@ -107,7 +146,7 @@ func GenerateRootQC(block *flow.Block, votes []*model.Vote, participantData *Par // GenerateRootBlockVotes generates votes for root block based on participantData func GenerateRootBlockVotes(block *flow.Block, participantData *ParticipantData) ([]*model.Vote, error) { - hotBlock := model.GenesisBlockFromFlow(block.Header) + hotBlock := model.GenesisBlockFromFlow(block.ToHeader()) n := len(participantData.Participants) fmt.Println("Number of staked consensus nodes: ", n) @@ -120,7 +159,7 @@ func GenerateRootBlockVotes(block *flow.Block, participantData *ParticipantData) if err != nil { return nil, fmt.Errorf("could not get private keys for participant: %w", err) } - me, err := local.New(p.Identity(), keys.StakingKey) + me, err := local.New(p.Identity().IdentitySkeleton, keys.StakingKey) if err != nil { return nil, err } @@ -145,30 +184,40 @@ func createValidator(committee hotstuff.DynamicCommittee) (hotstuff.Validator, e return hotstuffValidator, nil } -// GenerateQCParticipantData generates QC participant data used to create the -// random beacon and staking signatures on the QC. +// GenerateQCParticipantData assembles the private information of a subset (`internalNodes`) of the consensus +// committee (`allNodes`). // -// allNodes must be in the same order that was used when running the DKG. -func GenerateQCParticipantData(allNodes, internalNodes []bootstrap.NodeInfo, dkgData dkg.DKGData) (*ParticipantData, error) { - +// LIMITATION: this function only supports the 'trusted dealer' model, where for the consensus committee (`allNodes`) +// a trusted dealer generated the threshold-signature key (`dkgData` containing key shares and group key). Therefore, +// `allNodes` must be in the same order that was used when running the DKG. +func GenerateQCParticipantData(allNodes, internalNodes []bootstrap.NodeInfo, dkgData dkg.ThresholdKeySet) (*ParticipantData, error) { // stakingNodes can include external validators, so it can be longer than internalNodes if len(allNodes) < len(internalNodes) { return nil, fmt.Errorf("need at least as many staking public keys as private keys (pub=%d, priv=%d)", len(allNodes), len(internalNodes)) } - // length of DKG participants needs to match stakingNodes, since we run DKG for external and internal validators + // The following code is SHORTCUT assuming a trusted dealer at network genesis to initialize the Random Beacon + // key shares for the first epoch. In addition to the staking keys (which are generated by each node individually), + // the Random Beacon key shares are needed to sign the QC for the root block. + // On the one hand, a trusted dealer can generate the key shares nearly instantaneously, which significantly + // simplifies the coordination of the consensus committee prior to network genesis. On the other hand, there + // are centralization concerns of employing a trusted dealer. + // In the future, we want to re-use a DKG result from a prior Flow instance, instead of relying on a trusted dealer. + // However, when using DKG results later (or re-using them to recover from Epoch Fallback Mode), there is a chance + // that the DKG committee 𝒟 and the consensus committee 𝒞 might differ by some nodes. In this case, the following + // logic would need to be generalized. Note that the output struct `ParticipantData`, which we construct below, + // already handles cases with 𝒟 \ 𝒞 ≠ ∅ and/or 𝒞 \ 𝒟 ≠ ∅. + // + // However, the logic in this function only supports the trusted dealer model! + // For further details see issue (Epic) https://github.com/onflow/flow-go/issues/6214 if len(allNodes) != len(dkgData.PrivKeyShares) { - return nil, fmt.Errorf("need exactly the same number of staking public keys as DKG private participants") + return nil, fmt.Errorf("only trusted dealer for DKG supported: need exactly the same number of staking public keys as DKG private participants (all=%d, dkg=%d)", len(allNodes), len(dkgData.PrivKeyShares)) } - qcData := &ParticipantData{} - - participantLookup := make(map[flow.Identifier]flow.DKGParticipant) - // the index here is important - we assume allNodes is in the same order as the DKG - for i := 0; i < len(allNodes); i++ { + participantLookup := make(map[flow.Identifier]flow.DKGParticipant) + for i, node := range allNodes { // assign a node to a DGKdata entry, using the canonical ordering - node := allNodes[i] participantLookup[node.NodeID] = flow.DKGParticipant{ KeyShare: dkgData.PubKeyShares[i], Index: uint(i), @@ -176,19 +225,18 @@ func GenerateQCParticipantData(allNodes, internalNodes []bootstrap.NodeInfo, dkg } // the QC will be signed by everyone in internalNodes + qcData := &ParticipantData{} for _, node := range internalNodes { - if node.NodeID == flow.ZeroID { return nil, fmt.Errorf("node id cannot be zero") } - if node.Weight == 0 { return nil, fmt.Errorf("node (id=%s) cannot have 0 weight", node.NodeID) } dkgParticipant, ok := participantLookup[node.NodeID] if !ok { - return nil, fmt.Errorf("nonexistannt node id (%x) in participant lookup", node.NodeID) + return nil, fmt.Errorf("nonexistent node id (%x) in participant lookup", node.NodeID) } dkgIndex := dkgParticipant.Index @@ -198,8 +246,8 @@ func GenerateQCParticipantData(allNodes, internalNodes []bootstrap.NodeInfo, dkg }) } - qcData.Lookup = participantLookup - qcData.GroupKey = dkgData.PubGroupKey + qcData.DKGCommittee = participantLookup + qcData.DKGGroupKey = dkgData.PubGroupKey return qcData, nil } diff --git a/cmd/bootstrap/run/qc_test.go b/cmd/bootstrap/run/qc_test.go index 5deed36d1ed..203b54bc777 100644 --- a/cmd/bootstrap/run/qc_test.go +++ b/cmd/bootstrap/run/qc_test.go @@ -4,12 +4,11 @@ import ( "crypto/rand" "testing" + "github.com/onflow/crypto" "github.com/stretchr/testify/require" - "github.com/onflow/flow-go/crypto" "github.com/onflow/flow-go/model/bootstrap" "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/model/flow/order" "github.com/onflow/flow-go/module/signature" "github.com/onflow/flow-go/utils/unittest" ) @@ -17,7 +16,7 @@ import ( func TestGenerateRootQC(t *testing.T) { participantData := createSignerData(t, 3) - block := unittest.GenesisFixture() + block := unittest.Block.Genesis(flow.Emulator) votes, err := GenerateRootBlockVotes(block, participantData) require.NoError(t, err) @@ -30,7 +29,7 @@ func TestGenerateRootQC(t *testing.T) { func TestGenerateRootQCWithSomeInvalidVotes(t *testing.T) { participantData := createSignerData(t, 10) - block := unittest.GenesisFixture() + block := unittest.Block.Genesis(flow.Emulator) votes, err := GenerateRootBlockVotes(block, participantData) require.NoError(t, err) @@ -45,12 +44,12 @@ func TestGenerateRootQCWithSomeInvalidVotes(t *testing.T) { } func createSignerData(t *testing.T, n int) *ParticipantData { - identities := unittest.IdentityListFixture(n).Sort(order.Canonical) + identities := unittest.IdentityListFixture(n).Sort(flow.Canonical[flow.Identity]) networkingKeys := unittest.NetworkingKeys(n) stakingKeys := unittest.StakingKeys(n) - seed := make([]byte, crypto.SeedMinLenDKG) + seed := make([]byte, crypto.KeyGenSeedMinLen) _, err := rand.Read(seed) require.NoError(t, err) randomBSKs, randomBPKs, groupKey, err := crypto.BLSThresholdKeyGen(n, @@ -70,14 +69,15 @@ func createSignerData(t *testing.T, n int) *ParticipantData { participantLookup[identity.NodeID] = lookupParticipant // add to participant list - nodeInfo := bootstrap.NewPrivateNodeInfo( + nodeInfo, err := bootstrap.NewPrivateNodeInfo( identity.NodeID, identity.Role, identity.Address, - identity.Weight, + identity.InitialWeight, networkingKeys[i], stakingKeys[i], ) + require.NoError(t, err) participants[i] = Participant{ NodeInfo: nodeInfo, RandomBeaconPrivKey: randomBSKs[i], @@ -86,8 +86,8 @@ func createSignerData(t *testing.T, n int) *ParticipantData { participantData := &ParticipantData{ Participants: participants, - Lookup: participantLookup, - GroupKey: groupKey, + DKGCommittee: participantLookup, + DKGGroupKey: groupKey, } return participantData diff --git a/cmd/bootstrap/run/result.go b/cmd/bootstrap/run/result.go index 3fc364fdeb4..85685eed3ba 100644 --- a/cmd/bootstrap/run/result.go +++ b/cmd/bootstrap/run/result.go @@ -1,6 +1,8 @@ package run import ( + "fmt" + "github.com/onflow/flow-go/model/chunks" "github.com/onflow/flow-go/model/flow" ) @@ -10,13 +12,17 @@ func GenerateRootResult( commit flow.StateCommitment, epochSetup *flow.EpochSetup, epochCommit *flow.EpochCommit, -) *flow.ExecutionResult { - - result := &flow.ExecutionResult{ +) (*flow.ExecutionResult, error) { + result, err := flow.NewRootExecutionResult(flow.UntrustedExecutionResult{ PreviousResultID: flow.ZeroID, BlockID: block.ID(), Chunks: chunks.ChunkListFromCommit(commit), ServiceEvents: []flow.ServiceEvent{epochSetup.ServiceEvent(), epochCommit.ServiceEvent()}, + ExecutionDataID: flow.ZeroID, + }) + if err != nil { + return nil, fmt.Errorf("could not build root execution result: %w", err) } - return result + + return result, nil } diff --git a/cmd/bootstrap/run/seal.go b/cmd/bootstrap/run/seal.go index de9e5cb75bf..009d01cff1f 100644 --- a/cmd/bootstrap/run/seal.go +++ b/cmd/bootstrap/run/seal.go @@ -6,15 +6,25 @@ import ( "github.com/onflow/flow-go/model/flow" ) +// GenerateRootSeal generates a root seal matching the input root result. +// The input is assumed to be a valid root result. +// No errors are expected during normal operation. func GenerateRootSeal(result *flow.ExecutionResult) (*flow.Seal, error) { finalState, err := result.FinalStateCommitment() if err != nil { return nil, fmt.Errorf("generating root seal failed: %w", err) } - seal := &flow.Seal{ - BlockID: result.BlockID, - ResultID: result.ID(), - FinalState: finalState, + seal, err := flow.NewSeal( + flow.UntrustedSeal{ + BlockID: result.BlockID, + ResultID: result.ID(), + FinalState: finalState, + AggregatedApprovalSigs: nil, + }, + ) + if err != nil { + return nil, fmt.Errorf("could not construct seal: %w", err) } + return seal, nil } diff --git a/cmd/bootstrap/transit/cmd/crypto_test.go b/cmd/bootstrap/transit/cmd/crypto_test.go index 4e8bfddfc73..a5932c68a40 100644 --- a/cmd/bootstrap/transit/cmd/crypto_test.go +++ b/cmd/bootstrap/transit/cmd/crypto_test.go @@ -14,14 +14,12 @@ import ( const nodeID string = "0000000000000000000000000000000000000000000000000000000000000001" func TestEndToEnd(t *testing.T) { - // Create a temp directory to work as "bootstrap" bootdir := t.TempDir() t.Logf("Created dir %s", bootdir) // Create test files - //bootcmd.WriteText(filepath.Join(bootdir, bootstrap.PathNodeId), []byte(nodeID) randomBeaconPath := filepath.Join(bootdir, fmt.Sprintf(bootstrap.PathRandomBeaconPriv, nodeID)) err := os.MkdirAll(filepath.Dir(randomBeaconPath), 0755) if err != nil { @@ -46,9 +44,13 @@ func TestEndToEnd(t *testing.T) { t.Fatalf("Error wrapping files: %s", err) } + unWrappedFilePath := filepath.Join( + bootdir, + fmt.Sprintf(bootstrap.PathRandomBeaconPriv, nodeID), + ) // Client: // Unwrap files - err = unWrapFile(bootdir, nodeID) + err = unWrapFile(bootdir, nodeID, bootdir, unWrappedFilePath) if err != nil { t.Fatalf("Error unwrapping response: %s", err) } diff --git a/cmd/bootstrap/transit/cmd/flags.go b/cmd/bootstrap/transit/cmd/flags.go index c3733f63ea9..89eea52b283 100644 --- a/cmd/bootstrap/transit/cmd/flags.go +++ b/cmd/bootstrap/transit/cmd/flags.go @@ -9,7 +9,11 @@ var ( flagAccessAddress string flagNodeRole string flagTimeout time.Duration + flagConcurrency int64 - flagWrapID string // wrap ID - flagVoteFile string + flagWrapID string // wrap ID + flagVoteFile string + flagVoteFilePath string + flagNodeID string + flagOutputDir string ) diff --git a/cmd/bootstrap/transit/cmd/generate_root_block_vote.go b/cmd/bootstrap/transit/cmd/generate_root_block_vote.go index 89702a388fa..f04d535219c 100644 --- a/cmd/bootstrap/transit/cmd/generate_root_block_vote.go +++ b/cmd/bootstrap/transit/cmd/generate_root_block_vote.go @@ -26,6 +26,11 @@ var generateVoteCmd = &cobra.Command{ func init() { rootCmd.AddCommand(generateVoteCmd) + addGenerateVoteCmdFlags() +} + +func addGenerateVoteCmdFlags() { + generateVoteCmd.Flags().StringVarP(&flagOutputDir, "outputDir", "o", "", "ouput directory for vote files; if not set defaults to bootstrap directory") } func generateVote(c *cobra.Command, args []string) { @@ -47,8 +52,13 @@ func generateVote(c *cobra.Command, args []string) { } // load DKG private key - path := fmt.Sprintf(bootstrap.PathRandomBeaconPriv, nodeID) - data, err := io.ReadFile(filepath.Join(flagBootDir, path)) + path := filepath.Join(flagBootDir, fmt.Sprintf(bootstrap.PathRandomBeaconPriv, nodeID)) + // If output directory is specified, use it for the root-block.json + if flagOutputDir != "" { + path = filepath.Join(flagOutputDir, bootstrap.FilenameRandomBeaconPriv) + } + + data, err := io.ReadFile(path) if err != nil { log.Fatal().Err(err).Msg("could not read DKG private key file") } @@ -60,11 +70,11 @@ func generateVote(c *cobra.Command, args []string) { } stakingPrivKey := nodeInfo.StakingPrivKey.PrivateKey - identity := &flow.Identity{ + identity := flow.IdentitySkeleton{ NodeID: nodeID, Address: nodeInfo.Address, Role: nodeInfo.Role, - Weight: flow.DefaultInitialWeight, + InitialWeight: flow.DefaultInitialWeight, StakingPubKey: stakingPrivKey.PublicKey(), NetworkPubKey: nodeInfo.NetworkPrivKey.PrivateKey.PublicKey(), } @@ -78,6 +88,12 @@ func generateVote(c *cobra.Command, args []string) { signer := verification.NewCombinedSigner(me, beaconKeyStore) path = filepath.Join(flagBootDir, bootstrap.PathRootBlockData) + + // If output directory is specified, use it for the root-block.json + if flagOutputDir != "" { + path = filepath.Join(flagOutputDir, "root-block.json") + } + data, err = io.ReadFile(path) if err != nil { log.Fatal().Err(err).Msg("could not read root block file") @@ -89,14 +105,22 @@ func generateVote(c *cobra.Command, args []string) { log.Fatal().Err(err).Msg("could not unmarshal root block data") } - vote, err := signer.CreateVote(model.GenesisBlockFromFlow(rootBlock.Header)) + vote, err := signer.CreateVote(model.GenesisBlockFromFlow(rootBlock.ToHeader())) if err != nil { log.Fatal().Err(err).Msg("could not load private node info") } voteFile := fmt.Sprintf(bootstrap.PathNodeRootBlockVote, nodeID) - if err = io.WriteJSON(filepath.Join(flagBootDir, voteFile), vote); err != nil { + // By default, use the bootstrap directory for storing the vote file + voteFilePath := filepath.Join(flagBootDir, voteFile) + + // If output directory is specified, use it for the vote file path + if flagOutputDir != "" { + voteFilePath = filepath.Join(flagOutputDir, "root-block-vote.json") + } + + if err = io.WriteJSON(voteFilePath, vote); err != nil { log.Fatal().Err(err).Msg("could not write vote to file") } diff --git a/cmd/bootstrap/transit/cmd/prepare.go b/cmd/bootstrap/transit/cmd/prepare.go index c11e2e05314..f8d4944b4c2 100644 --- a/cmd/bootstrap/transit/cmd/prepare.go +++ b/cmd/bootstrap/transit/cmd/prepare.go @@ -21,6 +21,8 @@ func init() { func addPrepareCmdFlags() { prepareCmd.Flags().StringVarP(&flagNodeRole, "role", "r", "", `node role (can be "collection", "consensus", "execution", "verification" or "access")`) + prepareCmd.Flags().StringVarP(&flagNodeID, "nodeID", "n", "", "node id") + prepareCmd.Flags().StringVarP(&flagOutputDir, "outputDir", "o", "", "ouput directory") _ = prepareCmd.MarkFlagRequired("role") } @@ -38,12 +40,22 @@ func prepare(cmd *cobra.Command, args []string) { return } - nodeID, err := readNodeID() - if err != nil { - log.Fatal().Err(err).Msg("could not read node ID from file") + // Set the output directory from the flag or use the bootstrap directory + outputDir := flagOutputDir + if outputDir == "" { + outputDir = flagBootDir + } + + // Set the NodeID from the flag or read it from the file + nodeID := flagNodeID + if nodeID == "" { + nodeID, err = readNodeID() + if err != nil { + log.Fatal().Err(err).Msg("could not read node ID from file") + } } - err = generateKeys(flagBootDir, nodeID) + err = generateKeys(outputDir, nodeID) if err != nil { log.Fatal().Err(err).Msg("failed to prepare") } diff --git a/cmd/bootstrap/transit/cmd/pull.go b/cmd/bootstrap/transit/cmd/pull.go index 9a2517803f4..9eb0a9861b5 100644 --- a/cmd/bootstrap/transit/cmd/pull.go +++ b/cmd/bootstrap/transit/cmd/pull.go @@ -1,16 +1,21 @@ package cmd import ( + "bytes" "context" "fmt" "io/fs" "path/filepath" "strings" + "sync" "time" "github.com/spf13/cobra" + "golang.org/x/sync/semaphore" "github.com/onflow/flow-go/cmd/bootstrap/gcs" + "github.com/onflow/flow-go/cmd/bootstrap/utils" + "github.com/onflow/flow-go/model/bootstrap" model "github.com/onflow/flow-go/model/bootstrap" "github.com/onflow/flow-go/model/flow" ) @@ -32,6 +37,8 @@ func addPullCmdFlags() { pullCmd.Flags().StringVarP(&flagToken, "token", "t", "", "token provided by the Flow team to access the Transit server") pullCmd.Flags().StringVarP(&flagNodeRole, "role", "r", "", `node role (can be "collection", "consensus", "execution", "verification" or "access")`) pullCmd.Flags().DurationVar(&flagTimeout, "timeout", time.Second*300, `timeout for pull`) + pullCmd.Flags().Int64Var(&flagConcurrency, "concurrency", 2, `concurrency limit for pull`) + pullCmd.Flags().StringVarP(&flagBucketName, "bucket-name", "g", "flow-genesis-bootstrap", `bucket for pulling bootstrap files`) _ = pullCmd.MarkFlagRequired("token") _ = pullCmd.MarkFlagRequired("role") @@ -78,17 +85,34 @@ func pull(cmd *cobra.Command, args []string) { } log.Info().Msgf("found %d files in Google Bucket", len(files)) - // download found files + sem := semaphore.NewWeighted(flagConcurrency) + wait := sync.WaitGroup{} for _, file := range files { - fullOutpath := filepath.Join(flagBootDir, "public-root-information", filepath.Base(file)) - - log.Info().Str("source", file).Str("dest", fullOutpath).Msgf("downloading file from transit servers") - err = bucket.DownloadFile(ctx, client, fullOutpath, file) - if err != nil { - log.Fatal().Err(err).Msgf("could not download google bucket file") - } + wait.Add(1) + go func(file gcs.GCSFile) { + _ = sem.Acquire(ctx, 1) + defer func() { + sem.Release(1) + wait.Done() + }() + + fullOutpath := filepath.Join(flagBootDir, "public-root-information", filepath.Base(file.Name)) + fmd5 := utils.CalcMd5(fullOutpath) + // only skip files that have an MD5 hash + if len(file.MD5) > 0 && bytes.Equal(fmd5, file.MD5) { + log.Info().Str("source", file.Name).Str("dest", fullOutpath).Msgf("skipping existing file from transit servers") + return + } + log.Info().Str("source", file.Name).Str("dest", fullOutpath).Msgf("downloading file from transit servers") + err = bucket.DownloadFile(ctx, client, fullOutpath, file.Name) + if err != nil { + log.Fatal().Err(err).Msgf("could not download google bucket file") + } + }(file) } + wait.Wait() + // download any extra files specific to node role extraFiles := getAdditionalFilesToDownload(role, nodeID) for _, file := range extraFiles { @@ -133,7 +157,8 @@ func pull(cmd *cobra.Command, args []string) { // unwrap consensus node role files if role == flow.RoleConsensus { - err = unWrapFile(flagBootDir, nodeID) + unWrappedFilePath := filepath.Join(flagBootDir, fmt.Sprintf(bootstrap.PathRandomBeaconPriv, nodeID)) + err = unWrapFile(flagBootDir, nodeID, flagBootDir, unWrappedFilePath) if err != nil { log.Fatal().Err(err).Msg("failed to pull") } diff --git a/cmd/bootstrap/transit/cmd/pull_root_block.go b/cmd/bootstrap/transit/cmd/pull_root_block.go index bc6539bc8ad..3cb645f5124 100644 --- a/cmd/bootstrap/transit/cmd/pull_root_block.go +++ b/cmd/bootstrap/transit/cmd/pull_root_block.go @@ -25,6 +25,8 @@ func init() { func addPullRootBlockCmdFlags() { pullRootBlockCmd.Flags().StringVarP(&flagToken, "token", "t", "", "token provided by the Flow team to access the Transit server") + pullRootBlockCmd.Flags().StringVarP(&flagBucketName, "bucket-name", "g", "flow-genesis-bootstrap", `bucket for pulling root block`) + pullRootBlockCmd.Flags().StringVarP(&flagOutputDir, "outputDir", "o", "", "output directory for root block file; if not set defaults to bootstrap directory") _ = pullRootBlockCmd.MarkFlagRequired("token") } @@ -50,28 +52,45 @@ func pullRootBlock(c *cobra.Command, args []string) { log.Info().Msg("downloading root block") rootBlockFile := filepath.Join(flagToken, bootstrap.PathRootBlockData) - fullOutpath := filepath.Join(flagBootDir, bootstrap.PathRootBlockData) + fullRootBlockPath := filepath.Join(flagBootDir, bootstrap.PathRootBlockData) + if flagOutputDir != "" { + fullRootBlockPath = filepath.Join(flagOutputDir, "root-block.json") + } - log.Info().Str("source", rootBlockFile).Str("dest", fullOutpath).Msgf("downloading root block file from transit servers") - err = bucket.DownloadFile(ctx, client, fullOutpath, rootBlockFile) + log.Info().Str("source", rootBlockFile).Str("dest", fullRootBlockPath).Msgf("downloading root block file from transit servers") + err = bucket.DownloadFile(ctx, client, fullRootBlockPath, rootBlockFile) if err != nil { log.Fatal().Err(err).Msgf("could not download google bucket file") } - objectName := filepath.Join(flagToken, fmt.Sprintf(FilenameRandomBeaconCipher, nodeID)) - fullOutpath = filepath.Join(flagBootDir, filepath.Base(objectName)) + log.Info().Msg("successfully downloaded root block ") - log.Info().Msgf("downloading random beacon key: %s", objectName) + objectName := filepath.Join(flagToken, fmt.Sprintf(FilenameRandomBeaconCipher, nodeID)) - err = bucket.DownloadFile(ctx, client, fullOutpath, objectName) - if err != nil { - log.Fatal().Err(err).Msg("could not download file from google bucket") + // By default, use the bootstrap directory for the random beacon download & unwrapping + fullRandomBeaconPath := filepath.Join(flagBootDir, filepath.Base(objectName)) + unWrappedRandomBeaconPath := filepath.Join( + flagBootDir, + fmt.Sprintf(bootstrap.PathRandomBeaconPriv, nodeID), + ) + + // If output directory is specified, use it for the random beacon path + // this will set the path used to download the random beacon file and unwrap it + if flagOutputDir != "" { + fullRandomBeaconPath = filepath.Join(flagOutputDir, filepath.Base(objectName)) + unWrappedRandomBeaconPath = filepath.Join(flagOutputDir, bootstrap.FilenameRandomBeaconPriv) } - err = unWrapFile(flagBootDir, nodeID) + log.Info().Msgf("downloading random beacon key: %s", objectName) + + err = bucket.DownloadFile(ctx, client, fullRandomBeaconPath, objectName) if err != nil { - log.Fatal().Err(err).Msg("could not unwrap random beacon file") + log.Fatal().Err(err).Msg("could not download random beacon key file from google bucket") + } else { + err = unWrapFile(flagBootDir, nodeID, flagOutputDir, unWrappedRandomBeaconPath) + if err != nil { + log.Fatal().Err(err).Msg("could not unwrap random beacon file") + } + log.Info().Msg("successfully downloaded and unwrapped random beacon private key") } - - log.Info().Msg("successfully downloaded root block and random beacon key") } diff --git a/cmd/bootstrap/transit/cmd/push.go b/cmd/bootstrap/transit/cmd/push.go index e45b63d27f8..2d42b675489 100644 --- a/cmd/bootstrap/transit/cmd/push.go +++ b/cmd/bootstrap/transit/cmd/push.go @@ -29,6 +29,7 @@ func init() { func addPushCmdFlags() { pushCmd.Flags().StringVarP(&flagToken, "token", "t", "", "token provided by the Flow team to access the Transit server") pushCmd.Flags().StringVarP(&flagNodeRole, "role", "r", "", `node role (can be "collection", "consensus", "execution", "verification" or "access")`) + pushCmd.Flags().StringVarP(&flagBucketName, "bucket-name", "g", "flow-genesis-bootstrap", `bucket for uploading transit keys`) _ = pushCmd.MarkFlagRequired("token") } diff --git a/cmd/bootstrap/transit/cmd/push_root_block_vote.go b/cmd/bootstrap/transit/cmd/push_root_block_vote.go index d225b7cf8a5..51702ecd476 100644 --- a/cmd/bootstrap/transit/cmd/push_root_block_vote.go +++ b/cmd/bootstrap/transit/cmd/push_root_block_vote.go @@ -28,8 +28,11 @@ func addPushVoteCmdFlags() { defaultVoteFilePath := fmt.Sprintf(bootstrap.PathNodeRootBlockVote, "<node_id>") pushVoteCmd.Flags().StringVarP(&flagToken, "token", "t", "", "token provided by the Flow team to access the Transit server") pushVoteCmd.Flags().StringVarP(&flagVoteFile, "vote-file", "v", "", fmt.Sprintf("path under bootstrap directory of the vote file to upload (default: %s)", defaultVoteFilePath)) + pushVoteCmd.Flags().StringVarP(&flagVoteFilePath, "vote-file-dir", "d", "", "directory for vote file to upload, ONLY for vote files outside the bootstrap directory") + pushVoteCmd.Flags().StringVarP(&flagBucketName, "bucket-name", "g", "flow-genesis-bootstrap", `bucket for pushing root block vote files`) _ = pushVoteCmd.MarkFlagRequired("token") + pushVoteCmd.MarkFlagsMutuallyExclusive("vote-file", "vote-file-dir") } func pushVote(c *cobra.Command, args []string) { @@ -44,12 +47,22 @@ func pushVote(c *cobra.Command, args []string) { } voteFile := flagVoteFile + + // If --vote-file-dir is not specified, use the bootstrap directory + voteFilePath := filepath.Join(flagBootDir, voteFile) + + // if --vote-file is not specified, use default file name within bootstrap directory if voteFile == "" { voteFile = fmt.Sprintf(bootstrap.PathNodeRootBlockVote, nodeID) + voteFilePath = filepath.Join(flagBootDir, voteFile) + } + + // If vote-file-dir is specified, use it to construct the full path to the vote file (with default file name) + if flagVoteFilePath != "" { + voteFilePath = filepath.Join(flagVoteFilePath, "root-block-vote.json") } destination := filepath.Join(flagToken, fmt.Sprintf(bootstrap.FilenameRootBlockVote, nodeID)) - source := filepath.Join(flagBootDir, voteFile) log.Info().Msg("pushing root block vote") @@ -66,7 +79,7 @@ func pushVote(c *cobra.Command, args []string) { } defer client.Close() - err = bucket.UploadFile(ctx, client, destination, source) + err = bucket.UploadFile(ctx, client, destination, voteFilePath) if err != nil { log.Fatal().Err(err).Msg("failed to upload vote file") } diff --git a/cmd/bootstrap/transit/cmd/snapshot.go b/cmd/bootstrap/transit/cmd/snapshot.go index b918779bb7f..561a0c4de3a 100644 --- a/cmd/bootstrap/transit/cmd/snapshot.go +++ b/cmd/bootstrap/transit/cmd/snapshot.go @@ -10,6 +10,7 @@ import ( "google.golang.org/grpc/credentials/insecure" client "github.com/onflow/flow-go-sdk/access/grpc" + "github.com/onflow/flow-go/engine/common/rpc/convert" "github.com/onflow/flow-go/model/bootstrap" "github.com/onflow/flow-go/model/flow" @@ -53,7 +54,12 @@ func snapshot(cmd *cobra.Command, args []string) { } // create a flow client with given access address - flowClient, err := client.NewClient(flagAccessAddress, grpc.WithTransportCredentials(insecure.NewCredentials())) + flowClient, err := client.NewClient( + flagAccessAddress, + client.WithGRPCDialOptions( + grpc.WithTransportCredentials(insecure.NewCredentials()), + ), + ) if err != nil { log.Fatal().Err(err).Msg("could not create flow client") } @@ -71,10 +77,11 @@ func snapshot(cmd *cobra.Command, args []string) { } // check if given NodeID is part of the current or next epoch - currentIdentities, err := snapshot.Epochs().Current().InitialIdentities() + currentEpoch, err := snapshot.Epochs().Current() if err != nil { - log.Fatal().Err(err).Msg("could not get initial identities from current epoch") + log.Fatal().Err(err).Msg("could not get current epoch") } + currentIdentities := currentEpoch.InitialIdentities() if _, exists := currentIdentities.ByNodeID(nodeID); exists { err := ioutils.WriteFile(filepath.Join(flagBootDir, bootstrap.PathRootProtocolStateSnapshot), bytes) if err != nil { @@ -83,10 +90,11 @@ func snapshot(cmd *cobra.Command, args []string) { return } - nextIdentities, err := snapshot.Epochs().Next().InitialIdentities() + nextEpoch, err := snapshot.Epochs().NextCommitted() if err != nil { - log.Fatal().Err(err).Msg("could not get initial identities from next epoch") + log.Fatal().Err(err).Msg("could not get next committed epoch") } + nextIdentities := nextEpoch.InitialIdentities() if _, exists := nextIdentities.ByNodeID(nodeID); exists { err := ioutils.WriteFile(filepath.Join(flagBootDir, bootstrap.PathRootProtocolStateSnapshot), bytes) if err != nil { diff --git a/cmd/bootstrap/transit/cmd/upload_transit_keys.go b/cmd/bootstrap/transit/cmd/upload_transit_keys.go index d46c44b9657..7333d537d41 100644 --- a/cmd/bootstrap/transit/cmd/upload_transit_keys.go +++ b/cmd/bootstrap/transit/cmd/upload_transit_keys.go @@ -29,6 +29,7 @@ func init() { func addUploadTransitKeysCmdFlags() { pushTransitKeyCmd.Flags().StringVarP(&flagToken, "token", "t", "", "token provided by the Flow team") + pushTransitKeyCmd.Flags().StringVarP(&flagBucketName, "bucket-name", "g", "flow-genesis-bootstrap", `bucket used for pushing transit keys`) err := pushTransitKeyCmd.MarkFlagRequired("token") if err != nil { log.Fatal().Err(err).Msg("failed to initialize") @@ -37,7 +38,6 @@ func addUploadTransitKeysCmdFlags() { // pushTransitKey uploads transit keys to the Flow server func pushTransitKey(_ *cobra.Command, _ []string) { - nodeIDString, err := readNodeID() if err != nil { log.Fatal().Err(err).Msg("could not read node ID") diff --git a/cmd/bootstrap/transit/cmd/utils.go b/cmd/bootstrap/transit/cmd/utils.go index 380646671c0..27c75b4dc22 100644 --- a/cmd/bootstrap/transit/cmd/utils.go +++ b/cmd/bootstrap/transit/cmd/utils.go @@ -36,7 +36,7 @@ var ( // commit and semver vars commit = build.Commit() - semver = build.Semver() + semver = build.Version() ) // readNodeID reads the NodeID file @@ -75,7 +75,6 @@ func getFileSHA256(file string) (string, error) { // moveFile moves a file from source to destination where src and dst are full paths including the filename func moveFile(src, dst string) error { - // check if source file exist if !ioutils.FileExists(src) { return fmt.Errorf("file not found: %s", src) @@ -153,14 +152,12 @@ func moveFile(src, dst string) error { return nil } -func unWrapFile(bootDir string, nodeID string) error { - +func unWrapFile(bootDir, nodeID, cipherTextDir, plaintextPath string) error { log.Info().Msg("decrypting Random Beacon key") pubKeyPath := filepath.Join(bootDir, fmt.Sprintf(FilenameTransitKeyPub, nodeID)) privKeyPath := filepath.Join(bootDir, fmt.Sprintf(FilenameTransitKeyPriv, nodeID)) - ciphertextPath := filepath.Join(bootDir, fmt.Sprintf(FilenameRandomBeaconCipher, nodeID)) - plaintextPath := filepath.Join(bootDir, fmt.Sprintf(bootstrap.PathRandomBeaconPriv, nodeID)) + ciphertextPath := filepath.Join(cipherTextDir, fmt.Sprintf(FilenameRandomBeaconCipher, nodeID)) ciphertext, err := ioutils.ReadFile(ciphertextPath) if err != nil { @@ -231,7 +228,6 @@ func wrapFile(bootDir string, nodeID string) error { // generateKeys creates the transit keypair and writes them to disk for later func generateKeys(bootDir string, nodeID string) error { - privPath := filepath.Join(bootDir, fmt.Sprintf(FilenameTransitKeyPriv, nodeID)) pubPath := filepath.Join(bootDir, fmt.Sprintf(FilenameTransitKeyPub, nodeID)) diff --git a/cmd/bootstrap/utils/file.go b/cmd/bootstrap/utils/file.go index b1c0585ba0e..b0d278b5249 100644 --- a/cmd/bootstrap/utils/file.go +++ b/cmd/bootstrap/utils/file.go @@ -7,7 +7,6 @@ import ( "github.com/onflow/flow-go/engine/common/rpc/convert" model "github.com/onflow/flow-go/model/bootstrap" - "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/state/protocol/inmem" io "github.com/onflow/flow-go/utils/io" ) @@ -32,30 +31,16 @@ func ReadRootProtocolSnapshot(bootDir string) (*inmem.Snapshot, error) { return snapshot, nil } -func ReadRootBlock(rootBlockDataPath string) (*flow.Block, error) { - bytes, err := io.ReadFile(rootBlockDataPath) +func ReadData[T any](path string) (*T, error) { + bytes, err := io.ReadFile(path) if err != nil { - return nil, fmt.Errorf("could not read root block: %w", err) + return nil, fmt.Errorf("could not read data from file: %w", err) } - var encodable flow.Block + var encodable T err = json.Unmarshal(bytes, &encodable) if err != nil { return nil, fmt.Errorf("could not unmarshal root block: %w", err) } return &encodable, nil } - -func ReadDKGData(dkgDataPath string) (*inmem.EncodableFullDKG, error) { - bytes, err := io.ReadFile(dkgDataPath) - if err != nil { - return nil, fmt.Errorf("could not read dkg data: %w", err) - } - - var encodable inmem.EncodableFullDKG - err = json.Unmarshal(bytes, &encodable) - if err != nil { - return nil, fmt.Errorf("could not unmarshal dkg data: %w", err) - } - return &encodable, nil -} diff --git a/cmd/bootstrap/utils/key_generation.go b/cmd/bootstrap/utils/key_generation.go index 8a8164ef862..7d82109309d 100644 --- a/cmd/bootstrap/utils/key_generation.go +++ b/cmd/bootstrap/utils/key_generation.go @@ -7,16 +7,17 @@ import ( gohash "hash" "io" - sdk "github.com/onflow/flow-go-sdk" - - "github.com/onflow/flow-go/model/encodable" - "golang.org/x/crypto/hkdf" + "github.com/onflow/crypto" + + sdk "github.com/onflow/flow-go-sdk" sdkcrypto "github.com/onflow/flow-go-sdk/crypto" - "github.com/onflow/flow-go/crypto" + "github.com/onflow/flow-go/fvm/systemcontracts" "github.com/onflow/flow-go/model/bootstrap" + model "github.com/onflow/flow-go/model/bootstrap" + "github.com/onflow/flow-go/model/encodable" "github.com/onflow/flow-go/model/flow" ) @@ -127,7 +128,15 @@ func GenerateStakingKey(seed []byte) (crypto.PrivateKey, error) { } func GenerateStakingKeys(n int, seeds [][]byte) ([]crypto.PrivateKey, error) { - return GenerateKeys(crypto.BLSBLS12381, n, seeds) + keys := make([]crypto.PrivateKey, 0, n) + for i := 0; i < n; i++ { + key, err := GenerateStakingKey(seeds[i]) + if err != nil { + return nil, err + } + keys = append(keys, key) + } + return keys, nil } func GenerateKeys(algo crypto.SigningAlgorithm, n int, seeds [][]byte) ([]crypto.PrivateKey, error) { @@ -175,7 +184,7 @@ func WriteMachineAccountFiles(chainID flow.ChainID, nodeInfos []bootstrap.NodeIn // // for the machine account key, we keep track of the address index to map // the Flow address of the machine account to the key. - addressIndex := uint64(4) + addressIndex := uint64(systemcontracts.LastSystemAccountIndex) for _, nodeInfo := range nodeInfos { // retrieve private representation of the node @@ -297,3 +306,21 @@ func WriteStakingNetworkingKeyFiles(nodeInfos []bootstrap.NodeInfo, write WriteJ return nil } + +// WriteNodeInternalPubInfos writes the `node-internal-infos.pub.json` file. +// In a nutshell, this file contains the Role, address and weight for all authorized nodes. +func WriteNodeInternalPubInfos(nodeInfos []bootstrap.NodeInfo, write WriteJSONFileFunc) error { + configs := make([]model.NodeConfig, len(nodeInfos)) + for i, nodeInfo := range nodeInfos { + configs[i] = model.NodeConfig{ + Role: nodeInfo.Role, + Address: nodeInfo.Address, + Weight: nodeInfo.Weight, + } + } + err := write(bootstrap.PathNodeInfosPub, configs) + if err != nil { + return err + } + return nil +} diff --git a/cmd/bootstrap/utils/key_generation_test.go b/cmd/bootstrap/utils/key_generation_test.go index 299e3c919f6..a261c07238d 100644 --- a/cmd/bootstrap/utils/key_generation_test.go +++ b/cmd/bootstrap/utils/key_generation_test.go @@ -4,12 +4,13 @@ import ( "fmt" "testing" + "github.com/onflow/crypto" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" sdkcrypto "github.com/onflow/flow-go-sdk/crypto" - "github.com/onflow/flow-go/crypto" + "github.com/onflow/flow-go/fvm/systemcontracts" "github.com/onflow/flow-go/model/bootstrap" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/utils/unittest" @@ -62,7 +63,7 @@ func TestWriteMachineAccountFiles(t *testing.T) { expected := make(map[string]bootstrap.NodeMachineAccountInfo) for i, node := range nodes { // See comments in WriteMachineAccountFiles for why addresses take this form - addr, err := chain.AddressAtIndex(uint64(6 + i*2)) + addr, err := chain.AddressAtIndex(uint64(systemcontracts.LastSystemAccountIndex + (i+1)*2)) require.NoError(t, err) private, err := node.Private() require.NoError(t, err) diff --git a/cmd/bootstrap/utils/md5.go b/cmd/bootstrap/utils/md5.go new file mode 100644 index 00000000000..4d4bbe21046 --- /dev/null +++ b/cmd/bootstrap/utils/md5.go @@ -0,0 +1,25 @@ +package utils + +// The google storage API only provides md5 and crc32 hence overriding the linter flag for md5 +import ( + // #nosec + "crypto/md5" + "io" + "os" +) + +func CalcMd5(outpath string) []byte { + f, err := os.Open(outpath) + if err != nil { + return nil + } + defer f.Close() + + // #nosec + h := md5.New() + if _, err := io.Copy(h, f); err != nil { + return nil + } + + return h.Sum(nil) +} diff --git a/cmd/bootstrap/utils/node_info.go b/cmd/bootstrap/utils/node_info.go index 8cc45c4f26b..d8bc11b649e 100644 --- a/cmd/bootstrap/utils/node_info.go +++ b/cmd/bootstrap/utils/node_info.go @@ -16,11 +16,15 @@ import ( // also writes a map containing each of the nodes weights mapped by NodeID func WritePartnerFiles(nodeInfos []model.NodeInfo, bootDir string) (string, string, error) { - // convert to public nodeInfos and map stkes + // convert to public nodeInfos and create a map from nodeID to weight nodePubInfos := make([]model.NodeInfoPub, len(nodeInfos)) weights := make(map[flow.Identifier]uint64) for i, node := range nodeInfos { - nodePubInfos[i] = node.Public() + var err error + nodePubInfos[i], err = node.Public() + if err != nil { + return "", "", fmt.Errorf("could not read public info: %w", err) + } weights[node.NodeID] = node.Weight } @@ -105,38 +109,38 @@ func GenerateNodeInfos(consensus, collection, execution, verification, access in nodes := make([]model.NodeInfo, 0) - // CONSENSUS = 1 + // CONSENSUS consensusNodes := unittest.NodeInfosFixture(consensus, unittest.WithRole(flow.RoleConsensus), - unittest.WithWeight(flow.DefaultInitialWeight), + unittest.WithInitialWeight(flow.DefaultInitialWeight), ) nodes = append(nodes, consensusNodes...) - // COLLECTION = 1 + // COLLECTION collectionNodes := unittest.NodeInfosFixture(collection, unittest.WithRole(flow.RoleCollection), - unittest.WithWeight(flow.DefaultInitialWeight), + unittest.WithInitialWeight(flow.DefaultInitialWeight), ) nodes = append(nodes, collectionNodes...) - // EXECUTION = 1 + // EXECUTION executionNodes := unittest.NodeInfosFixture(execution, unittest.WithRole(flow.RoleExecution), - unittest.WithWeight(flow.DefaultInitialWeight), + unittest.WithInitialWeight(flow.DefaultInitialWeight), ) nodes = append(nodes, executionNodes...) - // VERIFICATION = 1 + // VERIFICATION verificationNodes := unittest.NodeInfosFixture(verification, unittest.WithRole(flow.RoleVerification), - unittest.WithWeight(flow.DefaultInitialWeight), + unittest.WithInitialWeight(flow.DefaultInitialWeight), ) nodes = append(nodes, verificationNodes...) - // ACCESS = 1 + // ACCESS accessNodes := unittest.NodeInfosFixture(access, unittest.WithRole(flow.RoleAccess), - unittest.WithWeight(flow.DefaultInitialWeight), + unittest.WithInitialWeight(flow.DefaultInitialWeight), ) nodes = append(nodes, accessNodes...) diff --git a/cmd/build/version.go b/cmd/build/version.go index e2d59b3f74d..d089c27f3fb 100644 --- a/cmd/build/version.go +++ b/cmd/build/version.go @@ -6,6 +6,13 @@ // go build -ldflags "-X github.com/onflow/flow-go/cmd/build.semver=v1.0.0" package build +import ( + "fmt" + "strings" + + smv "github.com/coreos/go-semver/semver" +) + // Default value for build-time-injected version strings. const undefined = "undefined" @@ -15,8 +22,8 @@ var ( commit string ) -// Semver returns the semantic version of this build. -func Semver() string { +// Version returns the raw version string of this build. +func Version() string { return semver } @@ -41,3 +48,40 @@ func init() { commit = undefined } } + +var UndefinedVersionError = fmt.Errorf("version is undefined") + +// Semver returns the semantic version of this build as a semver.Version +// if it is defined, or UndefinedVersionError otherwise. +// The version string is converted to a semver compliant one if it isn't already +// but this might fail if the version string is still not semver compliant. In that +// case, an error is returned. +func Semver() (*smv.Version, error) { + if !IsDefined(semver) { + return nil, UndefinedVersionError + } + ver, err := smv.NewVersion(makeSemverCompliant(semver)) + return ver, err +} + +// makeSemverCompliant converts a non-semver version string to a semver compliant one. +// This removes the leading 'v'. +// In the past we sometimes omitted the patch version, e.g. v1.0.0 became v1.0 so this +// also adds a 0 patch version if there's no patch version. +func makeSemverCompliant(version string) string { + if !IsDefined(version) { + return version + } + + // Remove the leading 'v' + version = strings.TrimPrefix(version, "v") + + // If there's no patch version, add .0 + parts := strings.SplitN(version, "-", 2) + if strings.Count(parts[0], ".") == 1 { + parts[0] = parts[0] + ".0" + } + + version = strings.Join(parts, "-") + return version +} diff --git a/cmd/build/version_test.go b/cmd/build/version_test.go new file mode 100644 index 00000000000..4f3232b56d2 --- /dev/null +++ b/cmd/build/version_test.go @@ -0,0 +1,26 @@ +package build + +import "testing" + +func TestMakeSemverV2Compliant(t *testing.T) { + testCases := []struct { + name string + input string + expected string + }{ + {"No hyphen", "v0.29", "0.29.0"}, + {"With hyphen", "v0.29.11-an-error-handling", "0.29.11-an-error-handling"}, + {"With hyphen no patch", "v0.29-an-error-handling", "0.29.0-an-error-handling"}, + {"All digits", "v0.29.1", "0.29.1"}, + {undefined, undefined, undefined}, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + output := makeSemverCompliant(tc.input) + if output != tc.expected { + t.Errorf("Got %s; expected %s", output, tc.expected) + } + }) + } +} diff --git a/cmd/collection/main.go b/cmd/collection/main.go index c630d2dc7b3..59d64393a98 100644 --- a/cmd/collection/main.go +++ b/cmd/collection/main.go @@ -1,22 +1,22 @@ package main import ( + "context" + "errors" "fmt" "time" "github.com/spf13/pflag" + "golang.org/x/time/rate" client "github.com/onflow/flow-go-sdk/access/grpc" - "github.com/onflow/flow-go/cmd/util/cmd/common" - "github.com/onflow/flow-go/consensus/hotstuff/validator" - "github.com/onflow/flow-go/model/bootstrap" - modulecompliance "github.com/onflow/flow-go/module/compliance" - "github.com/onflow/flow-go/module/mempool/herocache" - "github.com/onflow/flow-go/module/mempool/queue" - "github.com/onflow/flow-go/utils/grpcutils" - sdkcrypto "github.com/onflow/flow-go-sdk/crypto" + + "github.com/onflow/flow-go/admin/commands" + collectionCommands "github.com/onflow/flow-go/admin/commands/collection" + storageCommands "github.com/onflow/flow-go/admin/commands/storage" "github.com/onflow/flow-go/cmd" + "github.com/onflow/flow-go/cmd/util/cmd/common" "github.com/onflow/flow-go/consensus" "github.com/onflow/flow-go/consensus/hotstuff" "github.com/onflow/flow-go/consensus/hotstuff/committees" @@ -24,32 +24,42 @@ import ( "github.com/onflow/flow-go/consensus/hotstuff/notifications/pubsub" "github.com/onflow/flow-go/consensus/hotstuff/pacemaker/timeout" hotsignature "github.com/onflow/flow-go/consensus/hotstuff/signature" + "github.com/onflow/flow-go/consensus/hotstuff/validator" "github.com/onflow/flow-go/consensus/hotstuff/verification" recovery "github.com/onflow/flow-go/consensus/recovery/protocol" "github.com/onflow/flow-go/engine/collection/epochmgr" "github.com/onflow/flow-go/engine/collection/epochmgr/factories" + "github.com/onflow/flow-go/engine/collection/events" "github.com/onflow/flow-go/engine/collection/ingest" "github.com/onflow/flow-go/engine/collection/pusher" "github.com/onflow/flow-go/engine/collection/rpc" followereng "github.com/onflow/flow-go/engine/common/follower" "github.com/onflow/flow-go/engine/common/provider" + commonrpc "github.com/onflow/flow-go/engine/common/rpc" consync "github.com/onflow/flow-go/engine/common/synchronization" "github.com/onflow/flow-go/fvm/systemcontracts" + "github.com/onflow/flow-go/model/bootstrap" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/model/flow/filter" "github.com/onflow/flow-go/module" builder "github.com/onflow/flow-go/module/builder/collection" "github.com/onflow/flow-go/module/chainsync" + modulecompliance "github.com/onflow/flow-go/module/compliance" "github.com/onflow/flow-go/module/epochs" confinalizer "github.com/onflow/flow-go/module/finalizer/consensus" + "github.com/onflow/flow-go/module/grpcclient" "github.com/onflow/flow-go/module/mempool" epochpool "github.com/onflow/flow-go/module/mempool/epochs" + "github.com/onflow/flow-go/module/mempool/herocache" + "github.com/onflow/flow-go/module/mempool/queue" "github.com/onflow/flow-go/module/metrics" + "github.com/onflow/flow-go/module/updatable_configs" "github.com/onflow/flow-go/network/channels" "github.com/onflow/flow-go/state/protocol" badgerState "github.com/onflow/flow-go/state/protocol/badger" "github.com/onflow/flow-go/state/protocol/blocktimer" "github.com/onflow/flow-go/state/protocol/events/gadgets" + "github.com/onflow/flow-go/storage/store" ) func main() { @@ -65,10 +75,11 @@ func main() { builderPayerRateLimitDryRun bool builderPayerRateLimit float64 builderUnlimitedPayers []string + builderPriorityPayers []string hotstuffMinTimeout time.Duration hotstuffTimeoutAdjustmentFactor float64 hotstuffHappyPathMaxRoundFailures uint64 - blockRateDelay time.Duration + hotstuffProposalDuration time.Duration startupTimeString string startupTime time.Time @@ -80,23 +91,30 @@ func main() { pools *epochpool.TransactionPools // epoch-scoped transaction pools followerDistributor *pubsub.FollowerDistributor + addressRateLimiter *ingest.AddressRateLimiter - push *pusher.Engine - ing *ingest.Engine - mainChainSyncCore *chainsync.Core - followerCore *hotstuff.FollowerLoop // follower hotstuff logic - followerEng *followereng.ComplianceEngine - colMetrics module.CollectionMetrics - err error + push *pusher.Engine + ing *ingest.Engine + mainChainSyncCore *chainsync.Core + followerCore *hotstuff.FollowerLoop // follower hotstuff logic + followerEng *followereng.ComplianceEngine + colMetrics module.CollectionMetrics + machineAccountMetrics module.MachineAccountMetrics + err error // epoch qc contract client - machineAccountInfo *bootstrap.NodeMachineAccountInfo - flowClientConfigs []*common.FlowClientConfig - insecureAccessAPI bool - accessNodeIDS []string - apiRatelimits map[string]int - apiBurstlimits map[string]int + machineAccountInfo *bootstrap.NodeMachineAccountInfo + flowClientConfigs []*grpcclient.FlowClientConfig + insecureAccessAPI bool + accessNodeIDS []string + apiRatelimits map[string]int + apiBurstlimits map[string]int + txRatelimits float64 + txBurstlimits int + txRatelimitPayers string + bySealingLagRateLimiterConfigGetter module.ReadonlySealingLagRateLimiterConfig ) + var deprecatedFlagBlockRateDelay time.Duration nodeBuilder := cmd.FlowNode(flow.RoleCollection.String()) nodeBuilder.ExtraFlags(func(flags *pflag.FlagSet) { @@ -104,8 +122,12 @@ func main() { "maximum number of transactions in the memory pool") flags.StringVarP(&rpcConf.ListenAddr, "ingress-addr", "i", "localhost:9000", "the address the ingress server listens on") - flags.UintVar(&rpcConf.MaxMsgSize, "rpc-max-message-size", grpcutils.DefaultMaxMsgSize, - "the maximum message size in bytes for messages sent or received over grpc") + flags.UintVar(&rpcConf.DeprecatedMaxMsgSize, "rpc-max-message-size", 0, + "[deprecated] the maximum message size in bytes for messages sent or received over grpc") + flags.UintVar(&rpcConf.MaxRequestMsgSize, "rpc-max-request-message-size", commonrpc.DefaultCollectionMaxRequestSize, + "the maximum request message size in bytes for request messages received over grpc by the server") + flags.UintVar(&rpcConf.MaxResponseMsgSize, "rpc-max-response-message-size", commonrpc.DefaultCollectionMaxResponseSize, + "the maximum message size in bytes for response messages sent over grpc by the server") flags.BoolVar(&rpcConf.RpcMetricsEnabled, "rpc-metrics-enabled", false, "whether to enable the rpc metrics") flags.Uint64Var(&ingestConf.MaxGasLimit, "ingest-max-gas-limit", flow.DefaultMaxTransactionGasLimit, @@ -114,7 +136,7 @@ func main() { "maximum per-transaction byte size") flags.Uint64Var(&ingestConf.MaxCollectionByteSize, "ingest-max-col-byte-size", flow.DefaultMaxCollectionByteSize, "maximum per-collection byte size") - flags.BoolVar(&ingestConf.CheckScriptsParse, "ingest-check-scripts-parse", true, + flags.BoolVar(&ingestConf.CheckScriptsParse, "ingest-check-scripts-parse", false, "whether we check that inbound transactions are parse-able") flags.UintVar(&ingestConf.ExpiryBuffer, "ingest-expiry-buffer", 30, "expiry buffer for inbound transactions") @@ -128,6 +150,8 @@ func main() { "rate limit for each payer (transactions/collection)") flags.StringSliceVar(&builderUnlimitedPayers, "builder-unlimited-payers", []string{}, // no unlimited payers "set of payer addresses which are omitted from rate limiting") + flags.StringSliceVar(&builderPriorityPayers, "builder-priority-payers", []string{}, // no priority payers + "set of payer addresses which are prioritized in tx selection algorithm") flags.UintVar(&maxCollectionSize, "builder-max-collection-size", flow.DefaultMaxCollectionSize, "maximum number of transactions in proposed collections") flags.Uint64Var(&maxCollectionByteSize, "builder-max-collection-byte-size", flow.DefaultMaxCollectionByteSize, @@ -137,17 +161,16 @@ func main() { // Collection Nodes use a lower min timeout than Consensus Nodes (1.5s vs 2.5s) because: // - they tend to have higher happy-path view rate, allowing a shorter timeout // - since they have smaller committees, 1-2 offline replicas has a larger negative impact, which is mitigating with a smaller timeout - flags.DurationVar(&hotstuffMinTimeout, "hotstuff-min-timeout", 1500*time.Millisecond, + flags.DurationVar(&hotstuffMinTimeout, "hotstuff-min-timeout", 1000*time.Millisecond, "the lower timeout bound for the hotstuff pacemaker, this is also used as initial timeout") flags.Float64Var(&hotstuffTimeoutAdjustmentFactor, "hotstuff-timeout-adjustment-factor", timeout.DefaultConfig.TimeoutAdjustmentFactor, "adjustment of timeout duration in case of time out event") flags.Uint64Var(&hotstuffHappyPathMaxRoundFailures, "hotstuff-happy-path-max-round-failures", timeout.DefaultConfig.HappyPathMaxRoundFailures, "number of failed rounds before first timeout increase") - flags.DurationVar(&blockRateDelay, "block-rate-delay", 250*time.Millisecond, - "the delay to broadcast block proposal in order to control block production rate") flags.Uint64Var(&clusterComplianceConfig.SkipNewProposalsThreshold, "cluster-compliance-skip-proposals-threshold", modulecompliance.DefaultConfig().SkipNewProposalsThreshold, "threshold at which new proposals are discarded rather than cached, if their height is this much above local finalized height (cluster compliance engine)") flags.StringVar(&startupTimeString, "hotstuff-startup-time", cmd.NotSet, "specifies date and time (in ISO 8601 format) after which the consensus participant may enter the first view (e.g (e.g 1996-04-24T15:04:05-07:00))") + flags.DurationVar(&hotstuffProposalDuration, "hotstuff-proposal-duration", time.Millisecond*250, "the target time between entering a view and broadcasting the proposal for that view (different and smaller than view time)") flags.Uint32Var(&maxCollectionRequestCacheSize, "max-collection-provider-cache-size", provider.DefaultEntityRequestCacheSize, "maximum number of collection requests to cache for collection provider") flags.UintVar(&collectionProviderWorkers, "collection-provider-workers", provider.DefaultRequestProviderWorkers, "number of workers to use for collection provider") // epoch qc contract flags @@ -156,6 +179,19 @@ func main() { flags.StringToIntVar(&apiRatelimits, "api-rate-limits", map[string]int{}, "per second rate limits for GRPC API methods e.g. Ping=300,SendTransaction=500 etc. note limits apply globally to all clients.") flags.StringToIntVar(&apiBurstlimits, "api-burst-limits", map[string]int{}, "burst limits for gRPC API methods e.g. Ping=100,SendTransaction=100 etc. note limits apply globally to all clients.") + // rate limiting for accounts, default is 2 transactions every 2.5 seconds + // Note: The rate limit configured for each node may differ from the effective network-wide rate limit + // for a given payer. In particular, the number of clusters and the message propagation factor will + // influence how the individual rate limit translates to a network-wide rate limit. + // For example, suppose we have 5 collection clusters and configure each Collection Node with a rate + // limit of 1 message per second. Then, the effective network-wide rate limit for a payer address would + // be *at least* 5 messages per second. + flags.Float64Var(&txRatelimits, "ingest-tx-rate-limits", 2.5, "per second rate limits for processing transactions for limited account") + flags.IntVar(&txBurstlimits, "ingest-tx-burst-limits", 2, "burst limits for processing transactions for limited account") + flags.StringVar(&txRatelimitPayers, "ingest-tx-rate-limit-payers", "", "comma separated list of accounts to apply rate limiting to") + + // deprecated flags + flags.DurationVar(&deprecatedFlagBlockRateDelay, "block-rate-delay", 0, "the delay to broadcast block proposal in order to control block production rate") }).ValidateFlags(func() error { if startupTimeString != cmd.NotSet { t, err := time.Parse(time.RFC3339, startupTimeString) @@ -164,6 +200,15 @@ func main() { } startupTime = t } + if deprecatedFlagBlockRateDelay > 0 { + nodeBuilder.Logger.Warn().Msg("A deprecated flag was specified (--block-rate-delay). This flag is deprecated as of v0.30 (Jun 2023), has no effect, and will eventually be removed.") + } + if rpcConf.MaxRequestMsgSize <= 0 { + return errors.New("rpc-max-request-message-size must be greater than 0") + } + if rpcConf.MaxResponseMsgSize <= 0 { + return errors.New("rpc-max-response-message-size must be greater than 0") + } return nil }) @@ -173,6 +218,26 @@ func main() { nodeBuilder. PreInit(cmd.DynamicStartPreInit). + Module("transaction rate limiter", func(node *cmd.NodeConfig) error { + // To be managed by admin tool, and used by ingestion engine + addressRateLimiter = ingest.NewAddressRateLimiter(rate.Limit(txRatelimits), txBurstlimits) + // read the rate limit addresses from flag and add to the rate limiter + addrs, err := ingest.ParseAddresses(txRatelimitPayers) + if err != nil { + return fmt.Errorf("could not parse rate limit addresses: %w", err) + } + ingest.AddAddresses(addressRateLimiter, addrs) + + return nil + }). + AdminCommand("ingest-tx-rate-limit", func(node *cmd.NodeConfig) commands.AdminCommand { + return collectionCommands.NewTxRateLimitCommand(addressRateLimiter) + }). + AdminCommand("read-range-cluster-blocks", func(conf *cmd.NodeConfig) commands.AdminCommand { + clusterPayloads := store.NewClusterPayloads(&metrics.NoopCollector{}, conf.ProtocolDB) + headers := store.NewHeaders(&metrics.NoopCollector{}, conf.ProtocolDB) + return storageCommands.NewReadRangeClusterBlocksCommand(conf.ProtocolDB, headers, clusterPayloads) + }). Module("follower distributor", func(node *cmd.NodeConfig) error { followerDistributor = pubsub.NewFollowerDistributor() followerDistributor.AddProposalViolationConsumer(notifications.NewSlashingViolationsConsumer(node.Logger)) @@ -212,35 +277,76 @@ func main() { err := node.Metrics.Mempool.Register(metrics.ResourceTransaction, pools.CombinedSize) return err }). - Module("metrics", func(node *cmd.NodeConfig) error { + Module("machine account config", func(node *cmd.NodeConfig) error { + machineAccountInfo, err = cmd.LoadNodeMachineAccountInfoFile(node.BootstrapDir, node.NodeID) + return err + }). + Module("collection node metrics", func(node *cmd.NodeConfig) error { colMetrics = metrics.NewCollectionCollector(node.Tracer) return nil }). + Module("machine account metrics", func(node *cmd.NodeConfig) error { + machineAccountMetrics = metrics.NewMachineAccountCollector(node.MetricsRegisterer, machineAccountInfo.FlowAddress()) + return nil + }). Module("main chain sync core", func(node *cmd.NodeConfig) error { log := node.Logger.With().Str("sync_chain_id", node.RootChainID.String()).Logger() mainChainSyncCore, err = chainsync.New(log, node.SyncCoreConfig, metrics.NewChainSyncCollector(node.RootChainID), node.RootChainID) return err }). - Module("machine account config", func(node *cmd.NodeConfig) error { - machineAccountInfo, err = cmd.LoadNodeMachineAccountInfoFile(node.BootstrapDir, node.NodeID) - return err - }). Module("sdk client connection options", func(node *cmd.NodeConfig) error { anIDS, err := common.ValidateAccessNodeIDSFlag(accessNodeIDS, node.RootChainID, node.State.Sealed()) if err != nil { return fmt.Errorf("failed to validate flag --access-node-ids %w", err) } - flowClientConfigs, err = common.FlowClientConfigs(anIDS, insecureAccessAPI, node.State.Sealed()) + flowClientConfigs, err = grpcclient.FlowClientConfigs(anIDS, insecureAccessAPI, node.State.Sealed()) if err != nil { return fmt.Errorf("failed to prepare flow client connection configs for each access node id %w", err) } return nil }). + Module("updatable collection rate limiting config", func(node *cmd.NodeConfig) error { + setter := updatable_configs.DefaultBySealingLagRateLimiterConfigs() + + // update the getter with the setter, so other modules can only get, but not set + bySealingLagRateLimiterConfigGetter = setter + + // admin tool is the only instance that have access to the setter interface, therefore, is + // the only module can change this config + err = node.ConfigManager.RegisterUintConfig("collection-builder-rate-limiter-min-sealing-lag", + setter.MinSealingLag, + setter.SetMinSealingLag) + if err != nil { + return err + } + err = node.ConfigManager.RegisterUintConfig("collection-builder-rate-limiter-max-sealing-lag", + setter.MaxSealingLag, + setter.SetMaxSealingLag) + if err != nil { + return err + } + err = node.ConfigManager.RegisterUintConfig("collection-builder-rate-limiter-halving-interval", + setter.HalvingInterval, + setter.SetHalvingInterval) + if err != nil { + return err + } + err = node.ConfigManager.RegisterUintConfig("collection-builder-rate-limiter-min-collection-size", + setter.MinCollectionSize, + setter.SetMinCollectionSize) + if err != nil { + return err + } + + // report the initial config value + colMetrics.CollectionMaxSize(maxCollectionSize) + return nil + }). Component("machine account config validator", func(node *cmd.NodeConfig) (module.ReadyDoneAware, error) { // @TODO use fallback logic for flowClient similar to DKG/QC contract clients - flowClient, err := common.FlowClient(flowClientConfigs[0]) + flowClient, err := grpcclient.FlowClient(flowClientConfigs[0]) if err != nil { return nil, fmt.Errorf("failed to get flow client connection option for access node (0): %s %w", flowClientConfigs[0].AccessAddress, err) } @@ -255,6 +361,7 @@ func main() { flowClient, flow.RoleCollection, *machineAccountInfo, + machineAccountMetrics, opts..., ) @@ -271,7 +378,7 @@ func main() { Component("follower core", func(node *cmd.NodeConfig) (module.ReadyDoneAware, error) { // create a finalizer for updating the protocol // state when the follower detects newly finalized blocks - finalizer := confinalizer.NewFinalizer(node.DB, node.Storage.Headers, followerState, node.Tracer) + finalizer := confinalizer.NewFinalizer(node.ProtocolDB.Reader(), node.Storage.Headers, followerState, node.Tracer) finalized, pending, err := recovery.FindLatest(node.State, node.Storage.Headers) if err != nil { return nil, fmt.Errorf("could not find latest finalized block and pending blocks to recover consensus follower: %w", err) @@ -279,10 +386,11 @@ func main() { // creates a consensus follower with noop consumer as the notifier followerCore, err = consensus.NewFollower( node.Logger, + node.Metrics.Mempool, node.Storage.Headers, finalizer, followerDistributor, - node.RootBlock.Header, + node.FinalizedRootBlock.ToHeader(), node.RootQC, finalized, pending, @@ -321,44 +429,51 @@ func main() { followerEng, err = followereng.NewComplianceLayer( node.Logger, - node.Network, + node.EngineRegistry, node.Me, node.Metrics.Engine, node.Storage.Headers, - node.FinalizedHeader, + node.LastFinalizedHeader, core, - followereng.WithComplianceConfigOpt(modulecompliance.WithSkipNewProposalsThreshold(node.ComplianceConfig.SkipNewProposalsThreshold)), + node.ComplianceConfig, ) if err != nil { return nil, fmt.Errorf("could not create follower engine: %w", err) } + followerDistributor.AddOnBlockFinalizedConsumer(followerEng.OnFinalizedBlock) return followerEng, nil }). Component("main chain sync engine", func(node *cmd.NodeConfig) (module.ReadyDoneAware, error) { + spamConfig, err := consync.NewSpamDetectionConfig() + if err != nil { + return nil, fmt.Errorf("could not initialize spam detection config: %w", err) + } // create a block synchronization engine to handle follower getting out of sync sync, err := consync.New( node.Logger, node.Metrics.Engine, - node.Network, + node.EngineRegistry, node.Me, node.State, node.Storage.Blocks, followerEng, mainChainSyncCore, node.SyncEngineIdentifierProvider, + spamConfig, ) if err != nil { return nil, fmt.Errorf("could not create synchronization engine: %w", err) } + followerDistributor.AddFinalizationConsumer(sync) return sync, nil }). Component("ingestion engine", func(node *cmd.NodeConfig) (module.ReadyDoneAware, error) { ing, err = ingest.New( node.Logger, - node.Network, + node.EngineRegistry, node.State, node.Metrics.Engine, node.Metrics.Mempool, @@ -367,10 +482,18 @@ func main() { node.RootChainID.Chain(), pools, ingestConf, + addressRateLimiter, ) return ing, err }). Component("transaction ingress rpc server", func(node *cmd.NodeConfig) (module.ReadyDoneAware, error) { + // maintain backwards compatibility with the deprecated flag + if rpcConf.DeprecatedMaxMsgSize != 0 { + node.Logger.Warn().Msg("A deprecated flag was specified (--rpc-max-message-size). Use --rpc-max-request-message-size and --rpc-max-response-message-size instead. This flag will be removed in a future release.") + rpcConf.MaxRequestMsgSize = rpcConf.DeprecatedMaxMsgSize + rpcConf.MaxResponseMsgSize = rpcConf.DeprecatedMaxMsgSize + } + server := rpc.New( rpcConf, ing, @@ -394,17 +517,17 @@ func main() { collectionRequestQueue := queue.NewHeroStore(maxCollectionRequestCacheSize, node.Logger, collectionRequestMetrics) return provider.New( - node.Logger, + node.Logger.With().Str("entity", "collection").Logger(), node.Metrics.Engine, - node.Network, + node.EngineRegistry, node.Me, node.State, collectionRequestQueue, collectionProviderWorkers, channels.ProvideCollections, filter.And( - filter.HasWeight(true), - filter.HasRole(flow.RoleAccess, flow.RoleExecution), + filter.IsValidCurrentEpochParticipantOrJoining, + filter.HasRole[flow.Identity](flow.RoleAccess, flow.RoleExecution), ), retrieve, ) @@ -412,20 +535,18 @@ func main() { Component("pusher engine", func(node *cmd.NodeConfig) (module.ReadyDoneAware, error) { push, err = pusher.New( node.Logger, - node.Network, + node.EngineRegistry, node.State, node.Metrics.Engine, - colMetrics, + node.Metrics.Mempool, node.Me, - node.Storage.Collections, - node.Storage.Transactions, ) return push, err }). // Epoch manager encapsulates and manages epoch-dependent engines as we // transition between epochs Component("epoch manager", func(node *cmd.NodeConfig) (module.ReadyDoneAware, error) { - clusterStateFactory, err := factories.NewClusterStateFactory(node.DB, node.Metrics.Cache, node.Tracer) + clusterStateFactory, err := factories.NewClusterStateFactory(node.ProtocolDB, node.StorageLockMgr, node.Metrics.Cache, node.Tracer) if err != nil { return nil, err } @@ -437,14 +558,23 @@ func main() { unlimitedPayers = append(unlimitedPayers, payerAddr) } + // convert hex string flag values to addresses + priorityPayers := make([]flow.Address, 0, len(builderPriorityPayers)) + for _, payerStr := range builderPriorityPayers { + payerAddr := flow.HexToAddress(payerStr) + priorityPayers = append(priorityPayers, payerAddr) + } + builderFactory, err := factories.NewBuilderFactory( - node.DB, + node.ProtocolDB, node.State, + node.StorageLockMgr, node.Storage.Headers, node.Tracer, colMetrics, push, node.Logger, + bySealingLagRateLimiterConfigGetter, builder.WithMaxCollectionSize(maxCollectionSize), builder.WithMaxCollectionByteSize(maxCollectionByteSize), builder.WithMaxCollectionTotalGas(maxCollectionTotalGas), @@ -452,6 +582,7 @@ func main() { builder.WithRateLimitDryRun(builderPayerRateLimitDryRun), builder.WithMaxPayerTransactionRate(builderPayerRateLimit), builder.WithUnlimitedPayers(unlimitedPayers...), + builder.WithPriorityPayers(priorityPayers...), ) if err != nil { return nil, err @@ -459,14 +590,13 @@ func main() { complianceEngineFactory, err := factories.NewComplianceEngineFactory( node.Logger, - node.Network, + node.EngineRegistry, node.Me, colMetrics, node.Metrics.Engine, node.Metrics.Mempool, node.State, - node.Storage.Transactions, - modulecompliance.WithSkipNewProposalsThreshold(clusterComplianceConfig.SkipNewProposalsThreshold), + clusterComplianceConfig, ) if err != nil { return nil, err @@ -480,7 +610,7 @@ func main() { syncFactory, err := factories.NewSyncEngineFactory( node.Logger, node.Metrics.Engine, - node.Network, + node.EngineRegistry, node.Me, ) if err != nil { @@ -492,7 +622,7 @@ func main() { } opts := []consensus.Option{ - consensus.WithBlockRateDelay(blockRateDelay), + consensus.WithStaticProposalDuration(hotstuffProposalDuration), consensus.WithMinTimeout(hotstuffMinTimeout), consensus.WithTimeoutAdjustmentFactor(hotstuffTimeoutAdjustmentFactor), consensus.WithHappyPathMaxRoundFailures(hotstuffHappyPathMaxRoundFailures), @@ -505,7 +635,8 @@ func main() { hotstuffFactory, err := factories.NewHotStuffFactory( node.Logger, node.Me, - node.DB, + node.ProtocolDB, + node.StorageLockMgr, node.State, node.Metrics.Engine, node.Metrics.Mempool, @@ -534,7 +665,7 @@ func main() { messageHubFactory := factories.NewMessageHubFactory( node.Logger, - node.Network, + node.EngineRegistry, node.Me, node.Metrics.Engine, node.State, @@ -555,6 +686,8 @@ func main() { heightEvents := gadgets.NewHeights() node.ProtocolEvents.AddConsumer(heightEvents) + clusterEvents := events.NewDistributor() + manager, err := epochmgr.New( node.Logger, node.Me, @@ -563,6 +696,7 @@ func main() { rootQCVoter, factory, heightEvents, + clusterEvents, ) if err != nil { return nil, fmt.Errorf("could not create epoch manager: %w", err) @@ -570,7 +704,7 @@ func main() { // register the manager for protocol events node.ProtocolEvents.AddConsumer(manager) - + clusterEvents.AddConsumer(node.LibP2PNode) return manager, err }) @@ -578,7 +712,7 @@ func main() { if err != nil { nodeBuilder.Logger.Fatal().Err(err).Send() } - node.Run() + node.Run(context.Background()) } // createQCContractClient creates QC contract client @@ -586,10 +720,7 @@ func createQCContractClient(node *cmd.NodeConfig, machineAccountInfo *bootstrap. var qcContractClient module.QCContractClient - contracts, err := systemcontracts.SystemContractsForChain(node.RootChainID) - if err != nil { - return nil, err - } + contracts := systemcontracts.SystemContractsForChain(node.RootChainID) qcContractAddress := contracts.ClusterQC.Address.Hex() // construct signer from private key @@ -604,17 +735,26 @@ func createQCContractClient(node *cmd.NodeConfig, machineAccountInfo *bootstrap. } // create actual qc contract client, all flags and machine account info file found - qcContractClient = epochs.NewQCContractClient(node.Logger, flowClient, anID, node.Me.NodeID(), machineAccountInfo.Address, machineAccountInfo.KeyIndex, qcContractAddress, txSigner) + qcContractClient = epochs.NewQCContractClient( + node.Logger, + flowClient, + anID, + node.Me.NodeID(), + machineAccountInfo.Address, + machineAccountInfo.KeyIndex, + qcContractAddress, + txSigner, + ) return qcContractClient, nil } // createQCContractClients creates priority ordered array of QCContractClient -func createQCContractClients(node *cmd.NodeConfig, machineAccountInfo *bootstrap.NodeMachineAccountInfo, flowClientOpts []*common.FlowClientConfig) ([]module.QCContractClient, error) { +func createQCContractClients(node *cmd.NodeConfig, machineAccountInfo *bootstrap.NodeMachineAccountInfo, flowClientOpts []*grpcclient.FlowClientConfig) ([]module.QCContractClient, error) { qcClients := make([]module.QCContractClient, 0) for _, opt := range flowClientOpts { - flowClient, err := common.FlowClient(opt) + flowClient, err := grpcclient.FlowClient(opt) if err != nil { return nil, fmt.Errorf("failed to create flow client for qc contract client with options: %s %w", flowClientOpts, err) } diff --git a/cmd/consensus/README.md b/cmd/consensus/README.md index 30cec968536..810ca67ce34 100644 --- a/cmd/consensus/README.md +++ b/cmd/consensus/README.md @@ -41,7 +41,7 @@ This document provides a high-level overview of the consensus node architecture. - **Seal**, also _Block Seal_ - an attestation of correct execution of a block in the blockchain, built by the consensus node after receiving the necessary execution receipts and result approvals. - **Header**, also _Block Header_ - a data structure containing the meta-data for a block, including the merkle root hash for the payload as well as the relevant consensus node signatures. - **Payload**, also _Block Payload_ - a list of entities included in a block, currently consisting of collection guarantees and block seals. -- **Index**, also _Payload Index_ - a list of entitie IDs included in a block, currently consising of a list of collection guarantee IDs and block seal IDs. +- **Index**, also _Payload Index_ - a list of entity IDs included in a block, currently consisting of a list of collection guarantee IDs and block seal IDs. - **Block** - the combination of a block header with a block contents, representing all of the data necessary to construct and validate the entirety of the block. @@ -106,7 +106,7 @@ When a block proposal is received, consensus node will first try to assemble all The `synchronization` engine is responsible for reactive synchronization of consensus nodes about the protocol state. -At regular interval, it will send synchronization requests (pings) to a random subset of consensus nodes, and receive synchonization responses (pongs) in return. If it detects a difference in finalized block height above a certain threshold, it will request the missing block heights. +At regular interval, it will send synchronization requests (pings) to a random subset of consensus nodes, and receive synchronization responses (pongs) in return. If it detects a difference in finalized block height above a certain threshold, it will request the missing block heights. Additionally, the synchronization engine provides the possibility to request blocks by specific identifier. This is used by the compliance engine to actively request missing blocks that are needed for the validation of another block. diff --git a/cmd/consensus/main.go b/cmd/consensus/main.go index b62d13c1172..2522e2c56f4 100644 --- a/cmd/consensus/main.go +++ b/cmd/consensus/main.go @@ -1,8 +1,7 @@ -// (c) 2019 Dapper Labs - ALL RIGHTS RESERVED - package main import ( + "context" "encoding/json" "errors" "fmt" @@ -14,12 +13,14 @@ import ( client "github.com/onflow/flow-go-sdk/access/grpc" "github.com/onflow/flow-go-sdk/crypto" + "github.com/onflow/flow-go/cmd" "github.com/onflow/flow-go/cmd/util/cmd/common" "github.com/onflow/flow-go/consensus" "github.com/onflow/flow-go/consensus/hotstuff" "github.com/onflow/flow-go/consensus/hotstuff/blockproducer" "github.com/onflow/flow-go/consensus/hotstuff/committees" + "github.com/onflow/flow-go/consensus/hotstuff/cruisectl" "github.com/onflow/flow-go/consensus/hotstuff/notifications" "github.com/onflow/flow-go/consensus/hotstuff/notifications/pubsub" "github.com/onflow/flow-go/consensus/hotstuff/pacemaker/timeout" @@ -48,10 +49,10 @@ import ( builder "github.com/onflow/flow-go/module/builder/consensus" "github.com/onflow/flow-go/module/chainsync" chmodule "github.com/onflow/flow-go/module/chunks" - modulecompliance "github.com/onflow/flow-go/module/compliance" dkgmodule "github.com/onflow/flow-go/module/dkg" "github.com/onflow/flow-go/module/epochs" finalizer "github.com/onflow/flow-go/module/finalizer/consensus" + "github.com/onflow/flow-go/module/grpcclient" "github.com/onflow/flow-go/module/mempool" consensusMempools "github.com/onflow/flow-go/module/mempool/consensus" "github.com/onflow/flow-go/module/mempool/stdmap" @@ -64,8 +65,9 @@ import ( "github.com/onflow/flow-go/state/protocol" badgerState "github.com/onflow/flow-go/state/protocol/badger" "github.com/onflow/flow-go/state/protocol/blocktimer" + "github.com/onflow/flow-go/state/protocol/datastore" "github.com/onflow/flow-go/state/protocol/events/gadgets" - "github.com/onflow/flow-go/storage" + protocol_state "github.com/onflow/flow-go/state/protocol/protocol_state/state" bstorage "github.com/onflow/flow-go/storage/badger" "github.com/onflow/flow-go/utils/io" ) @@ -73,59 +75,64 @@ import ( func main() { var ( - guaranteeLimit uint - resultLimit uint - approvalLimit uint - sealLimit uint - pendingReceiptsLimit uint - minInterval time.Duration - maxInterval time.Duration - maxSealPerBlock uint - maxGuaranteePerBlock uint - hotstuffMinTimeout time.Duration - hotstuffTimeoutAdjustmentFactor float64 - hotstuffHappyPathMaxRoundFailures uint64 - blockRateDelay time.Duration - chunkAlpha uint - requiredApprovalsForSealVerification uint - requiredApprovalsForSealConstruction uint - emergencySealing bool - dkgControllerConfig dkgmodule.ControllerConfig - dkgMessagingEngineConfig = dkgeng.DefaultMessagingEngineConfig() - startupTimeString string - startupTime time.Time + guaranteeLimit uint + resultLimit uint + approvalLimit uint + sealLimit uint + pendingReceiptsLimit uint + minInterval time.Duration + maxInterval time.Duration + maxSealPerBlock uint + maxGuaranteePerBlock uint + hotstuffMinTimeout time.Duration + hotstuffTimeoutAdjustmentFactor float64 + hotstuffHappyPathMaxRoundFailures uint64 + chunkAlpha uint + requiredApprovalsForSealVerification uint + requiredApprovalsForSealConstruction uint + emergencySealing bool + dkgMessagingEngineConfig = dkgeng.DefaultMessagingEngineConfig() + cruiseCtlConfig = cruisectl.DefaultConfig() + cruiseCtlFallbackProposalDurationFlag time.Duration + cruiseCtlMinViewDurationFlag time.Duration + cruiseCtlMaxViewDurationFlag time.Duration + cruiseCtlEnabledFlag bool + startupTimeString string + startupTime time.Time // DKG contract client machineAccountInfo *bootstrap.NodeMachineAccountInfo - flowClientConfigs []*common.FlowClientConfig + flowClientConfigs []*grpcclient.FlowClientConfig insecureAccessAPI bool accessNodeIDS []string - err error - mutableState protocol.ParticipantState - beaconPrivateKey *encodable.RandomBeaconPrivKey - guarantees mempool.Guarantees - receipts mempool.ExecutionTree - seals mempool.IncorporatedResultSeals - pendingReceipts mempool.PendingReceipts - receiptRequester *requester.Engine - syncCore *chainsync.Core - comp *compliance.Engine - hot module.HotStuff - conMetrics module.ConsensusMetrics - mainMetrics module.HotstuffMetrics - receiptValidator module.ReceiptValidator - chunkAssigner *chmodule.ChunkAssigner - followerDistributor *pubsub.FollowerDistributor - dkgBrokerTunnel *dkgmodule.BrokerTunnel - blockTimer protocol.BlockTimer - committee *committees.Consensus - epochLookup *epochs.EpochLookup - hotstuffModules *consensus.HotstuffModules - dkgState *bstorage.DKGState - safeBeaconKeys *bstorage.SafeBeaconPrivateKeys - getSealingConfigs module.SealingConfigsGetter + err error + mutableState protocol.ParticipantState + beaconPrivateKey *encodable.RandomBeaconPrivKey + guarantees mempool.Guarantees + receipts mempool.ExecutionTree + seals mempool.IncorporatedResultSeals + pendingReceipts mempool.PendingReceipts + receiptRequester *requester.Engine + syncCore *chainsync.Core + comp *compliance.Engine + hot module.HotStuff + conMetrics module.ConsensusMetrics + machineAccountMetrics module.MachineAccountMetrics + mainMetrics module.HotstuffMetrics + receiptValidator module.ReceiptValidator + chunkAssigner *chmodule.ChunkAssigner + followerDistributor *pubsub.FollowerDistributor + dkgBrokerTunnel *dkgmodule.BrokerTunnel + blockTimer protocol.BlockTimer + proposalDurProvider hotstuff.ProposalDurationProvider + committee *committees.Consensus + epochLookup *epochs.EpochLookup + hotstuffModules *consensus.HotstuffModules + myBeaconKeyStateMachine *bstorage.RecoverablePrivateBeaconKeyStateMachine + getSealingConfigs module.SealingConfigsGetter ) + var deprecatedFlagBlockRateDelay time.Duration nodeBuilder := cmd.FlowNode(flow.RoleConsensus.String()) nodeBuilder.ExtraFlags(func(flags *pflag.FlagSet) { @@ -140,23 +147,24 @@ func main() { flags.DurationVar(&maxInterval, "max-interval", 90*time.Second, "the maximum amount of time between two blocks") flags.UintVar(&maxSealPerBlock, "max-seal-per-block", 100, "the maximum number of seals to be included in a block") flags.UintVar(&maxGuaranteePerBlock, "max-guarantee-per-block", 100, "the maximum number of collection guarantees to be included in a block") - flags.DurationVar(&hotstuffMinTimeout, "hotstuff-min-timeout", 2500*time.Millisecond, "the lower timeout bound for the hotstuff pacemaker, this is also used as initial timeout") + flags.DurationVar(&hotstuffMinTimeout, "hotstuff-min-timeout", 1045*time.Millisecond, "the lower timeout bound for the hotstuff pacemaker, this is also used as initial timeout") flags.Float64Var(&hotstuffTimeoutAdjustmentFactor, "hotstuff-timeout-adjustment-factor", timeout.DefaultConfig.TimeoutAdjustmentFactor, "adjustment of timeout duration in case of time out event") flags.Uint64Var(&hotstuffHappyPathMaxRoundFailures, "hotstuff-happy-path-max-round-failures", timeout.DefaultConfig.HappyPathMaxRoundFailures, "number of failed rounds before first timeout increase") - flags.DurationVar(&blockRateDelay, "block-rate-delay", 500*time.Millisecond, "the delay to broadcast block proposal in order to control block production rate") + flags.DurationVar(&cruiseCtlFallbackProposalDurationFlag, "cruise-ctl-fallback-proposal-duration", cruiseCtlConfig.FallbackProposalDelay.Load(), "the proposal duration value to use when the controller is disabled, or in epoch fallback mode. In those modes, this value has the same as the old `--block-rate-delay`") + flags.DurationVar(&cruiseCtlMinViewDurationFlag, "cruise-ctl-min-view-duration", cruiseCtlConfig.MinViewDuration.Load(), "the lower bound of authority for the controller, when active. This is the smallest amount of time a view is allowed to take.") + flags.DurationVar(&cruiseCtlMaxViewDurationFlag, "cruise-ctl-max-view-duration", cruiseCtlConfig.MaxViewDuration.Load(), "the upper bound of authority for the controller when active. This is the largest amount of time a view is allowed to take.") + flags.BoolVar(&cruiseCtlEnabledFlag, "cruise-ctl-enabled", cruiseCtlConfig.Enabled.Load(), "whether the block time controller is enabled; when disabled, the FallbackProposalDelay is used") flags.UintVar(&chunkAlpha, "chunk-alpha", flow.DefaultChunkAssignmentAlpha, "number of verifiers that should be assigned to each chunk") flags.UintVar(&requiredApprovalsForSealVerification, "required-verification-seal-approvals", flow.DefaultRequiredApprovalsForSealValidation, "minimum number of approvals that are required to verify a seal") flags.UintVar(&requiredApprovalsForSealConstruction, "required-construction-seal-approvals", flow.DefaultRequiredApprovalsForSealConstruction, "minimum number of approvals that are required to construct a seal") flags.BoolVar(&emergencySealing, "emergency-sealing-active", flow.DefaultEmergencySealingActive, "(de)activation of emergency sealing") flags.BoolVar(&insecureAccessAPI, "insecure-access-api", false, "required if insecure GRPC connection should be used") flags.StringSliceVar(&accessNodeIDS, "access-node-ids", []string{}, fmt.Sprintf("array of access node IDs sorted in priority order where the first ID in this array will get the first connection attempt and each subsequent ID after serves as a fallback. Minimum length %d. Use '*' for all IDs in protocol state.", common.DefaultAccessNodeIDSMinimum)) - flags.DurationVar(&dkgControllerConfig.BaseStartDelay, "dkg-controller-base-start-delay", dkgmodule.DefaultBaseStartDelay, "used to define the range for jitter prior to DKG start (eg. 500µs) - the base value is scaled quadratically with the # of DKG participants") - flags.DurationVar(&dkgControllerConfig.BaseHandleFirstBroadcastDelay, "dkg-controller-base-handle-first-broadcast-delay", dkgmodule.DefaultBaseHandleFirstBroadcastDelay, "used to define the range for jitter prior to DKG handling the first broadcast messages (eg. 50ms) - the base value is scaled quadratically with the # of DKG participants") - flags.DurationVar(&dkgControllerConfig.HandleSubsequentBroadcastDelay, "dkg-controller-handle-subsequent-broadcast-delay", dkgmodule.DefaultHandleSubsequentBroadcastDelay, "used to define the constant delay introduced prior to DKG handling subsequent broadcast messages (eg. 2s)") flags.DurationVar(&dkgMessagingEngineConfig.RetryBaseWait, "dkg-messaging-engine-retry-base-wait", dkgMessagingEngineConfig.RetryBaseWait, "the inter-attempt wait time for the first attempt (base of exponential retry)") flags.Uint64Var(&dkgMessagingEngineConfig.RetryMax, "dkg-messaging-engine-retry-max", dkgMessagingEngineConfig.RetryMax, "the maximum number of retry attempts for an outbound DKG message") flags.Uint64Var(&dkgMessagingEngineConfig.RetryJitterPercent, "dkg-messaging-engine-retry-jitter-percent", dkgMessagingEngineConfig.RetryJitterPercent, "the percentage of jitter to apply to each inter-attempt wait time") flags.StringVar(&startupTimeString, "hotstuff-startup-time", cmd.NotSet, "specifies date and time (in ISO 8601 format) after which the consensus participant may enter the first view (e.g 1996-04-24T15:04:05-07:00)") + flags.DurationVar(&deprecatedFlagBlockRateDelay, "block-rate-delay", 0, "[deprecated in v0.30; Jun 2023] Use `cruise-ctl-*` flags instead, this flag has no effect and will eventually be removed") }).ValidateFlags(func() error { nodeBuilder.Logger.Info().Str("startup_time_str", startupTimeString).Msg("got startup_time_str") if startupTimeString != cmd.NotSet { @@ -167,6 +175,23 @@ func main() { startupTime = t nodeBuilder.Logger.Info().Time("startup_time", startupTime).Msg("got startup_time") } + // convert local flag variables to atomic config variables, for dynamically updatable fields + if cruiseCtlEnabledFlag != cruiseCtlConfig.Enabled.Load() { + cruiseCtlConfig.Enabled.Store(cruiseCtlEnabledFlag) + } + if cruiseCtlFallbackProposalDurationFlag != cruiseCtlConfig.FallbackProposalDelay.Load() { + cruiseCtlConfig.FallbackProposalDelay.Store(cruiseCtlFallbackProposalDurationFlag) + } + if cruiseCtlMinViewDurationFlag != cruiseCtlConfig.MinViewDuration.Load() { + cruiseCtlConfig.MinViewDuration.Store(cruiseCtlMinViewDurationFlag) + } + if cruiseCtlMaxViewDurationFlag != cruiseCtlConfig.MaxViewDuration.Load() { + cruiseCtlConfig.MaxViewDuration.Store(cruiseCtlMaxViewDurationFlag) + } + // log a warning about deprecated flags + if deprecatedFlagBlockRateDelay > 0 { + nodeBuilder.Logger.Warn().Msg("A deprecated flag was specified (--block-rate-delay). This flag is deprecated as of v0.30 (Jun 2023), has no effect, and will eventually be removed.") + } return nil }) @@ -176,19 +201,27 @@ func main() { nodeBuilder. PreInit(cmd.DynamicStartPreInit). - ValidateRootSnapshot(badgerState.ValidRootSnapshotContainsEntityExpiryRange). + ValidateRootSnapshot(datastore.ValidRootSnapshotContainsEntityExpiryRange). + Module("machine account config", func(node *cmd.NodeConfig) error { + machineAccountInfo, err = cmd.LoadNodeMachineAccountInfoFile(node.BootstrapDir, node.NodeID) + return err + }). Module("consensus node metrics", func(node *cmd.NodeConfig) error { conMetrics = metrics.NewConsensusCollector(node.Tracer, node.MetricsRegisterer) return nil }). + Module("machine account metrics", func(node *cmd.NodeConfig) error { + machineAccountMetrics = metrics.NewMachineAccountCollector(node.MetricsRegisterer, machineAccountInfo.FlowAddress()) + return nil + }). Module("dkg state", func(node *cmd.NodeConfig) error { - dkgState, err = bstorage.NewDKGState(node.Metrics.Cache, node.SecretsDB) + myBeaconKeyStateMachine, err = bstorage.NewRecoverableRandomBeaconStateMachine( + node.Metrics.Cache, + node.SecretsDB, + node.NodeID, + ) return err }). - Module("beacon keys", func(node *cmd.NodeConfig) error { - safeBeaconKeys = bstorage.NewSafeBeaconPrivateKeys(dkgState) - return nil - }). Module("updatable sealing config", func(node *cmd.NodeConfig) error { setter, err := updatable_configs.NewSealingConfigs( requiredApprovalsForSealConstruction, @@ -240,7 +273,7 @@ func main() { getSealingConfigs, conMetrics) - blockTimer, err = blocktimer.NewBlockTimer(minInterval, maxInterval) + blockTimer, err = blocktimer.NewBlockTimer(uint64(minInterval.Milliseconds()), uint64(maxInterval.Milliseconds())) if err != nil { return err } @@ -270,7 +303,7 @@ func main() { // their first beacon private key through the DKG in the EpochSetup phase // prior to their first epoch as network participant). - rootSnapshot := node.State.AtBlockID(node.RootBlock.ID()) + rootSnapshot := node.State.AtBlockID(node.FinalizedRootBlock.ID()) isSporkRoot, err := protocol.IsSporkRootSnapshot(rootSnapshot) if err != nil { return fmt.Errorf("could not check whether root snapshot is spork root: %w", err) @@ -290,11 +323,11 @@ func main() { return fmt.Errorf("could not load beacon key file: %w", err) } - rootEpoch := node.State.AtBlockID(node.RootBlock.ID()).Epochs().Current() - epochCounter, err := rootEpoch.Counter() + rootEpoch, err := rootSnapshot.Epochs().Current() if err != nil { - return fmt.Errorf("could not get root epoch counter: %w", err) + return fmt.Errorf("could not get root epoch: %w", err) } + rootEpochCounter := rootEpoch.Counter() // confirm the beacon key file matches the canonical public keys rootDKG, err := rootEpoch.DKG() @@ -312,22 +345,37 @@ func main() { myBeaconPublicKeyShare) } - // store my beacon key for the first epoch post-spork - err = dkgState.InsertMyBeaconPrivateKey(epochCounter, beaconPrivateKey.PrivateKey) - if err != nil && !errors.Is(err, storage.ErrAlreadyExists) { - return err + // store my beacon key for the first epoch post-spork (only if we haven't run this logic before, i.e. state machine is in initial state) + started, err := myBeaconKeyStateMachine.IsDKGStarted(rootEpochCounter) + if err != nil { + return fmt.Errorf("could not get DKG started flag for root epoch %d: %w", rootEpochCounter, err) } - // mark the root DKG as successful, so it is considered safe to use the key - err = dkgState.SetDKGEndState(epochCounter, flow.DKGEndStateSuccess) - if err != nil && !errors.Is(err, storage.ErrAlreadyExists) { - return err + if !started { + // store my beacon key for the first epoch post-spork + epochProtocolState, err := rootSnapshot.EpochProtocolState() + if err != nil { + return fmt.Errorf("could not get epoch protocol state for root snapshot: %w", err) + } + err = myBeaconKeyStateMachine.UpsertMyBeaconPrivateKey(rootEpochCounter, beaconPrivateKey.PrivateKey, epochProtocolState.EpochCommit()) + if err != nil { + return fmt.Errorf("could not upsert my beacon private key for root epoch %d: %w", rootEpochCounter, err) + } } return nil }). + Module("my beacon key epoch recovery", func(node *cmd.NodeConfig) error { + myBeaconKeyRecovery, err := dkgmodule.NewBeaconKeyRecovery(node.Logger, node.Me, node.State, myBeaconKeyStateMachine) + if err != nil { + return fmt.Errorf("could not initialize my beacon key epoch recovery: %w", err) + } + // subscribe for protocol events to handle exiting EFM + node.ProtocolEvents.AddConsumer(myBeaconKeyRecovery) + return nil + }). Module("collection guarantees mempool", func(node *cmd.NodeConfig) error { - guarantees, err = stdmap.NewGuarantees(guaranteeLimit) - return err + guarantees = stdmap.NewGuarantees(guaranteeLimit) + return nil }). Module("execution receipts mempool", func(node *cmd.NodeConfig) error { receipts = consensusMempools.NewExecutionTree() @@ -343,10 +391,12 @@ func main() { // the chain of seals rawMempool := stdmap.NewIncorporatedResultSeals(sealLimit) multipleReceiptsFilterMempool := consensusMempools.NewIncorporatedResultSeals(rawMempool, node.Storage.Receipts) + seals, err = consensusMempools.NewExecStateForkSuppressor( multipleReceiptsFilterMempool, consensusMempools.LogForkAndCrash(node.Logger), - node.DB, + node.ProtocolDB, + node.StorageLockMgr, node.Logger, ) if err != nil { @@ -371,17 +421,13 @@ func main() { followerDistributor = pubsub.NewFollowerDistributor() return nil }). - Module("machine account config", func(node *cmd.NodeConfig) error { - machineAccountInfo, err = cmd.LoadNodeMachineAccountInfoFile(node.BootstrapDir, node.NodeID) - return err - }). Module("sdk client connection options", func(node *cmd.NodeConfig) error { anIDS, err := common.ValidateAccessNodeIDSFlag(accessNodeIDS, node.RootChainID, node.State.Sealed()) if err != nil { return fmt.Errorf("failed to validate flag --access-node-ids %w", err) } - flowClientConfigs, err = common.FlowClientConfigs(anIDS, insecureAccessAPI, node.State.Sealed()) + flowClientConfigs, err = grpcclient.FlowClientConfigs(anIDS, insecureAccessAPI, node.State.Sealed()) if err != nil { return fmt.Errorf("failed to prepare flow client connection configs for each access node id %w", err) } @@ -390,7 +436,7 @@ func main() { }). Component("machine account config validator", func(node *cmd.NodeConfig) (module.ReadyDoneAware, error) { // @TODO use fallback logic for flowClient similar to DKG/QC contract clients - flowClient, err := common.FlowClient(flowClientConfigs[0]) + flowClient, err := grpcclient.FlowClient(flowClientConfigs[0]) if err != nil { return nil, fmt.Errorf("failed to get flow client connection option for access node (0): %s %w", flowClientConfigs[0].AccessAddress, err) } @@ -403,8 +449,9 @@ func main() { validator, err := epochs.NewMachineAccountConfigValidator( node.Logger, flowClient, - flow.RoleCollection, + flow.RoleConsensus, *machineAccountInfo, + machineAccountMetrics, opts..., ) return validator, err @@ -420,7 +467,7 @@ func main() { node.Metrics.Engine, node.Metrics.Mempool, sealingTracker, - node.Network, + node.EngineRegistry, node.Me, node.Storage.Headers, node.Storage.Payloads, @@ -432,6 +479,9 @@ func main() { seals, getSealingConfigs, ) + if err != nil { + return nil, fmt.Errorf("could not initialize sealing engine: %w", err) + } // subscribe for finalization events from hotstuff followerDistributor.AddOnBlockFinalizedConsumer(e.OnFinalizedBlock) @@ -441,14 +491,14 @@ func main() { }). Component("matching engine", func(node *cmd.NodeConfig) (module.ReadyDoneAware, error) { receiptRequester, err = requester.New( - node.Logger, + node.Logger.With().Str("entity", "receipt").Logger(), node.Metrics.Engine, - node.Network, + node.EngineRegistry, node.Me, node.State, channels.RequestReceiptsByBlockID, - filter.HasRole(flow.RoleExecution), - func() flow.Entity { return &flow.ExecutionReceipt{} }, + filter.HasRole[flow.Identity](flow.RoleExecution), + func() flow.Entity { return new(flow.ExecutionReceipt) }, requester.WithRetryInitial(2*time.Second), requester.WithRetryMaximum(30*time.Second), ) @@ -474,7 +524,7 @@ func main() { e, err := matching.NewEngine( node.Logger, - node.Network, + node.EngineRegistry, node.Me, node.Metrics.Engine, node.Metrics.Mempool, @@ -507,7 +557,7 @@ func main() { ing, err := ingestion.New( node.Logger, node.Metrics.Engine, - node.Network, + node.EngineRegistry, node.Me, core, ) @@ -527,7 +577,7 @@ func main() { Component("hotstuff modules", func(node *cmd.NodeConfig) (module.ReadyDoneAware, error) { // initialize the block finalizer finalize := finalizer.NewFinalizer( - node.DB, + node.ProtocolDB.Reader(), node.Storage.Headers, mutableState, node.Tracer, @@ -543,7 +593,7 @@ func main() { // wrap Main consensus committee with metrics wrappedCommittee := committees.NewMetricsWrapper(committee, mainMetrics) // wrapper for measuring time spent determining consensus committee relations - beaconKeyStore := hotsignature.NewEpochAwareRandomBeaconKeyStore(epochLookup, safeBeaconKeys) + beaconKeyStore := hotsignature.NewEpochAwareRandomBeaconKeyStore(epochLookup, myBeaconKeyStateMachine) // initialize the combined signer for hotstuff var signer hotstuff.Signer @@ -567,10 +617,15 @@ func main() { ) notifier.AddParticipantConsumer(telemetryConsumer) + notifier.AddCommunicatorConsumer(telemetryConsumer) + notifier.AddFinalizationConsumer(telemetryConsumer) notifier.AddFollowerConsumer(followerDistributor) // initialize the persister - persist := persister.New(node.DB, node.RootChainID) + persist, err := persister.New(node.ProtocolDB, node.RootChainID, node.StorageLockMgr) + if err != nil { + return nil, err + } finalizedBlock, err := node.State.Final().Head() if err != nil { @@ -582,7 +637,7 @@ func main() { node.Storage.Headers, finalize, notifier, - node.RootBlock.Header, + node.FinalizedRootBlock.ToHeader(), node.RootQC, ) if err != nil { @@ -651,12 +706,55 @@ func main() { return util.MergeReadyDone(voteAggregator, timeoutAggregator), nil }). + Component("block rate cruise control", func(node *cmd.NodeConfig) (module.ReadyDoneAware, error) { + livenessData, err := hotstuffModules.Persist.GetLivenessData() + if err != nil { + return nil, fmt.Errorf("could not load liveness data: %w", err) + } + ctl, err := cruisectl.NewBlockTimeController(node.Logger, metrics.NewCruiseCtlMetrics(), cruiseCtlConfig, node.State, livenessData.CurrentView) + if err != nil { + return nil, err + } + proposalDurProvider = ctl + hotstuffModules.Notifier.AddOnBlockIncorporatedConsumer(ctl.OnBlockIncorporated) + node.ProtocolEvents.AddConsumer(ctl) + + // set up admin commands for dynamically updating configs + err = node.ConfigManager.RegisterBoolConfig("cruise-ctl-enabled", cruiseCtlConfig.GetEnabled, cruiseCtlConfig.SetEnabled) + if err != nil { + return nil, err + } + err = node.ConfigManager.RegisterDurationConfig("cruise-ctl-fallback-proposal-duration", cruiseCtlConfig.GetFallbackProposalDuration, cruiseCtlConfig.SetFallbackProposalDuration) + if err != nil { + return nil, err + } + err = node.ConfigManager.RegisterDurationConfig("cruise-ctl-min-view-duration", cruiseCtlConfig.GetMinViewDuration, cruiseCtlConfig.SetMinViewDuration) + if err != nil { + return nil, err + } + err = node.ConfigManager.RegisterDurationConfig("cruise-ctl-max-view-duration", cruiseCtlConfig.GetMaxViewDuration, cruiseCtlConfig.SetMaxViewDuration) + if err != nil { + return nil, err + } + + return ctl, nil + }). Component("consensus participant", func(node *cmd.NodeConfig) (module.ReadyDoneAware, error) { + // create different epochs setups + mutableProtocolState := protocol_state.NewMutableProtocolState( + node.Logger, + node.Storage.EpochProtocolStateEntries, + node.Storage.ProtocolKVStore, + node.State.Params(), + node.Storage.Headers, + node.Storage.Results, + node.Storage.Setups, + node.Storage.EpochCommits, + ) // initialize the block builder var build module.Builder build, err = builder.NewBuilder( node.Metrics.Mempool, - node.DB, mutableState, node.Storage.Headers, node.Storage.Seals, @@ -664,6 +762,7 @@ func main() { node.Storage.Blocks, node.Storage.Results, node.Storage.Receipts, + mutableProtocolState, guarantees, seals, receipts, @@ -681,8 +780,7 @@ func main() { consensus.WithMinTimeout(hotstuffMinTimeout), consensus.WithTimeoutAdjustmentFactor(hotstuffTimeoutAdjustmentFactor), consensus.WithHappyPathMaxRoundFailures(hotstuffHappyPathMaxRoundFailures), - consensus.WithBlockRateDelay(blockRateDelay), - consensus.WithConfigRegistrar(node.ConfigManager), + consensus.WithProposalDurationProvider(proposalDurProvider), } if !startupTime.IsZero() { @@ -697,6 +795,7 @@ func main() { hot, err = consensus.NewParticipant( createLogger(node.Logger, node.RootChainID), mainMetrics, + node.Metrics.Mempool, build, finalizedBlock, pending, @@ -730,7 +829,7 @@ func main() { hot, hotstuffModules.VoteAggregator, hotstuffModules.TimeoutAggregator, - modulecompliance.WithSkipNewProposalsThreshold(node.ComplianceConfig.SkipNewProposalsThreshold), + node.ComplianceConfig, ) if err != nil { return nil, fmt.Errorf("could not initialize compliance core: %w", err) @@ -753,7 +852,7 @@ func main() { messageHub, err := message_hub.NewMessageHub( createLogger(node.Logger, node.RootChainID), node.Metrics.Engine, - node.Network, + node.EngineRegistry, node.Me, comp, hot, @@ -769,20 +868,27 @@ func main() { return messageHub, nil }). Component("sync engine", func(node *cmd.NodeConfig) (module.ReadyDoneAware, error) { + spamConfig, err := synceng.NewSpamDetectionConfig() + if err != nil { + return nil, fmt.Errorf("could not initialize spam detection config: %w", err) + } + sync, err := synceng.New( node.Logger, node.Metrics.Engine, - node.Network, + node.EngineRegistry, node.Me, node.State, node.Storage.Blocks, comp, syncCore, node.SyncEngineIdentifierProvider, + spamConfig, ) if err != nil { return nil, fmt.Errorf("could not initialize synchronization engine: %w", err) } + followerDistributor.AddFinalizationConsumer(sync) return sync, nil }). @@ -800,7 +906,7 @@ func main() { // exchange private DKG messages messagingEngine, err := dkgeng.NewMessagingEngine( node.Logger, - node.Network, + node.EngineRegistry, node.Me, dkgBrokerTunnel, node.Metrics.Mempool, @@ -830,13 +936,12 @@ func main() { node.Logger, node.Me, node.State, - dkgState, + myBeaconKeyStateMachine, dkgmodule.NewControllerFactory( node.Logger, node.Me, dkgContractClients, dkgBrokerTunnel, - dkgControllerConfig, ), viewsObserver, ) @@ -851,7 +956,7 @@ func main() { if err != nil { nodeBuilder.Logger.Fatal().Err(err).Send() } - node.Run() + node.Run(context.Background()) } func loadBeaconPrivateKey(dir string, myID flow.Identifier) (*encodable.RandomBeaconPrivKey, error) { @@ -873,10 +978,7 @@ func loadBeaconPrivateKey(dir string, myID flow.Identifier) (*encodable.RandomBe func createDKGContractClient(node *cmd.NodeConfig, machineAccountInfo *bootstrap.NodeMachineAccountInfo, flowClient *client.Client, anID flow.Identifier) (module.DKGContractClient, error) { var dkgClient module.DKGContractClient - contracts, err := systemcontracts.SystemContractsForChain(node.RootChainID) - if err != nil { - return nil, err - } + contracts := systemcontracts.SystemContractsForChain(node.RootChainID) dkgContractAddress := contracts.DKG.Address.Hex() // construct signer from private key @@ -905,11 +1007,11 @@ func createDKGContractClient(node *cmd.NodeConfig, machineAccountInfo *bootstrap } // createDKGContractClients creates an array dkgContractClient that is sorted by retry fallback priority -func createDKGContractClients(node *cmd.NodeConfig, machineAccountInfo *bootstrap.NodeMachineAccountInfo, flowClientOpts []*common.FlowClientConfig) ([]module.DKGContractClient, error) { +func createDKGContractClients(node *cmd.NodeConfig, machineAccountInfo *bootstrap.NodeMachineAccountInfo, flowClientOpts []*grpcclient.FlowClientConfig) ([]module.DKGContractClient, error) { dkgClients := make([]module.DKGContractClient, 0) for _, opt := range flowClientOpts { - flowClient, err := common.FlowClient(opt) + flowClient, err := grpcclient.FlowClient(opt) if err != nil { return nil, fmt.Errorf("failed to create flow client for dkg contract client with options: %s %w", flowClientOpts, err) } diff --git a/cmd/dynamic_startup.go b/cmd/dynamic_startup.go index a2c38f5bcc5..0704a10449b 100644 --- a/cmd/dynamic_startup.go +++ b/cmd/dynamic_startup.go @@ -3,116 +3,22 @@ package cmd import ( "context" "encoding/hex" - "encoding/json" "fmt" "path/filepath" "strconv" "strings" - "time" - "github.com/rs/zerolog" - "github.com/sethvargo/go-retry" + "github.com/onflow/crypto" - client "github.com/onflow/flow-go-sdk/access/grpc" "github.com/onflow/flow-go/cmd/util/cmd/common" - "github.com/onflow/flow-go/crypto" "github.com/onflow/flow-go/model/bootstrap" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module/grpcclient" "github.com/onflow/flow-go/state/protocol" badgerstate "github.com/onflow/flow-go/state/protocol/badger" utilsio "github.com/onflow/flow-go/utils/io" - - "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/state/protocol/inmem" ) -const getSnapshotTimeout = 30 * time.Second - -// GetProtocolSnapshot callback that will get latest finalized protocol snapshot -type GetProtocolSnapshot func(ctx context.Context) (protocol.Snapshot, error) - -// GetSnapshot will attempt to get the latest finalized protocol snapshot with the given flow configs -func GetSnapshot(ctx context.Context, client *client.Client) (*inmem.Snapshot, error) { - ctx, cancel := context.WithTimeout(ctx, getSnapshotTimeout) - defer cancel() - - b, err := client.GetLatestProtocolStateSnapshot(ctx) - if err != nil { - return nil, fmt.Errorf("failed to get latest finalized protocol state snapshot during pre-initialization: %w", err) - } - - var snapshotEnc inmem.EncodableSnapshot - err = json.Unmarshal(b, &snapshotEnc) - if err != nil { - return nil, fmt.Errorf("failed to unmarshal protocol state snapshot: %w", err) - } - - snapshot := inmem.SnapshotFromEncodable(snapshotEnc) - return snapshot, nil -} - -// GetSnapshotAtEpochAndPhase will get the latest finalized protocol snapshot and check the current epoch and epoch phase. -// If we are past the target epoch and epoch phase we exit the retry mechanism immediately. -// If not check the snapshot at the specified interval until we reach the target epoch and phase. -func GetSnapshotAtEpochAndPhase(ctx context.Context, log zerolog.Logger, startupEpoch uint64, startupEpochPhase flow.EpochPhase, retryInterval time.Duration, getSnapshot GetProtocolSnapshot) (protocol.Snapshot, error) { - start := time.Now() - - log = log.With(). - Uint64("target_epoch_counter", startupEpoch). - Str("target_epoch_phase", startupEpochPhase.String()). - Logger() - - log.Info().Msg("starting dynamic startup - waiting until target epoch/phase to start...") - - var snapshot protocol.Snapshot - var err error - - backoff := retry.NewConstant(retryInterval) - err = retry.Do(ctx, backoff, func(ctx context.Context) error { - snapshot, err = getSnapshot(ctx) - if err != nil { - err = fmt.Errorf("failed to get protocol snapshot: %w", err) - log.Error().Err(err).Msg("could not get protocol snapshot") - return retry.RetryableError(err) - } - - // if we encounter any errors interpreting the snapshot something went wrong stop retrying - currEpochCounter, err := snapshot.Epochs().Current().Counter() - if err != nil { - return fmt.Errorf("failed to get the current epoch counter: %w", err) - } - - currEpochPhase, err := snapshot.Phase() - if err != nil { - return fmt.Errorf("failed to get the current epoch phase: %w", err) - } - - // check if we are in or past the target epoch and phase - if currEpochCounter > startupEpoch || (currEpochCounter == startupEpoch && currEpochPhase >= startupEpochPhase) { - log.Info(). - Dur("time-waiting", time.Since(start)). - Uint64("current-epoch", currEpochCounter). - Str("current-epoch-phase", currEpochPhase.String()). - Msg("finished dynamic startup - reached desired epoch and phase") - - return nil - } - - // wait then poll for latest snapshot again - log.Info(). - Dur("time-waiting", time.Since(start)). - Uint64("current-epoch", currEpochCounter). - Str("current-epoch-phase", currEpochPhase.String()). - Msgf("waiting for epoch %d and phase %s", startupEpoch, startupEpochPhase.String()) - - return retry.RetryableError(fmt.Errorf("dynamic startup epoch and epoch phase not reached")) - }) - if err != nil { - return nil, fmt.Errorf("failed to wait for target epoch and phase: %w", err) - } - - return snapshot, nil -} - // ValidateDynamicStartupFlags will validate flags necessary for dynamic node startup // - assert dynamic-startup-access-publickey is valid ECDSA_P256 public key hex // - assert dynamic-startup-access-address is not empty @@ -142,47 +48,61 @@ func ValidateDynamicStartupFlags(accessPublicKey, accessAddress string, startPha // DynamicStartPreInit is the pre-init func that will check if a node has already bootstrapped // from a root protocol snapshot. If not attempt to get a protocol snapshot where the following // conditions are met. -// 1. Target epoch < current epoch (in the past), set root snapshot to current snapshot -// 2. Target epoch == "current", wait until target phase == current phase before setting root snapshot -// 3. Target epoch > current epoch (in future), wait until target epoch and target phase is reached before +// 1. Target epoch < current epoch (in the past), set root snapshot to current snapshot +// 2. Target epoch == "current", wait until target phase == current phase before setting root snapshot +// 3. Target epoch > current epoch (in future), wait until target epoch and target phase is reached before +// // setting root snapshot func DynamicStartPreInit(nodeConfig *NodeConfig) error { ctx := context.Background() log := nodeConfig.Logger.With().Str("component", "dynamic-startup").Logger() - // skip dynamic startup if the protocol state is bootstrapped - isBootstrapped, err := badgerstate.IsBootstrapped(nodeConfig.DB) + // CASE 1: The state is already bootstrapped - nothing to do + isBootstrapped, err := badgerstate.IsBootstrapped(nodeConfig.ProtocolDB) if err != nil { return fmt.Errorf("could not check if state is boostrapped: %w", err) } if isBootstrapped { - log.Info().Msg("protocol state already bootstrapped, skipping dynamic startup") + log.Debug().Msg("protocol state already bootstrapped, skipping dynamic startup") return nil } - // skip dynamic startup if a root snapshot file is specified - this takes priority + // CASE 2: The state is not already bootstrapped. + // We will either bootstrap from a file or using Dynamic Startup. rootSnapshotPath := filepath.Join(nodeConfig.BootstrapDir, bootstrap.PathRootProtocolStateSnapshot) - if utilsio.FileExists(rootSnapshotPath) { - log.Info(). + rootSnapshotFileExists := utilsio.FileExists(rootSnapshotPath) + dynamicStartupFlagsSet := anyDynamicStartupFlagsAreSet(nodeConfig) + + // If the user has provided both a root snapshot file AND dynamic startup specification, return an error. + // Previously, the snapshot file would take precedence over the Dynamic Startup flags. + // This caused operators to inadvertently bootstrap from an old snapshot file when attempting to use Dynamic Startup. + // Therefore, we instead require the operator to explicitly choose one option or the other. + if rootSnapshotFileExists && dynamicStartupFlagsSet { + return fmt.Errorf("must specify either a root snapshot file (%s) or Dynamic Startup flags (--dynamic-startup-*) but not both", rootSnapshotPath) + } + + // CASE 2.1: Use the root snapshot file to bootstrap. + if rootSnapshotFileExists { + log.Debug(). Str("root_snapshot_path", rootSnapshotPath). Msg("protocol state is not bootstrapped, will bootstrap using configured root snapshot file, skipping dynamic startup") return nil } + // CASE 2.2: Use Dynamic Startup to bootstrap. + // get flow client with secure client connection to download protocol snapshot from access node - config, err := common.NewFlowClientConfig(nodeConfig.DynamicStartupANAddress, nodeConfig.DynamicStartupANPubkey, flow.ZeroID, false) + config, err := grpcclient.NewFlowClientConfig(nodeConfig.DynamicStartupANAddress, nodeConfig.DynamicStartupANPubkey, flow.ZeroID, false) if err != nil { return fmt.Errorf("failed to create flow client config for node dynamic startup pre-init: %w", err) } - - flowClient, err := common.FlowClient(config) + flowClient, err := grpcclient.FlowClient(config) if err != nil { return fmt.Errorf("failed to create flow client for node dynamic startup pre-init: %w", err) } - getSnapshotFunc := func(ctx context.Context) (protocol.Snapshot, error) { - return GetSnapshot(ctx, flowClient) + return common.GetSnapshot(ctx, flowClient) } // validate dynamic startup epoch flag @@ -190,7 +110,6 @@ func DynamicStartPreInit(nodeConfig *NodeConfig) error { if err != nil { return fmt.Errorf("failed to validate flag --dynamic-start-epoch: %w", err) } - startupPhase := flow.GetEpochPhase(nodeConfig.DynamicStartupEpochPhase) // validate the rest of the dynamic startup flags @@ -199,7 +118,7 @@ func DynamicStartPreInit(nodeConfig *NodeConfig) error { return err } - snapshot, err := GetSnapshotAtEpochAndPhase( + snapshot, err := common.GetSnapshotAtEpochAndPhase( ctx, log, startupEpoch, @@ -216,9 +135,19 @@ func DynamicStartPreInit(nodeConfig *NodeConfig) error { return nil } +// anyDynamicStartupFlagsAreSet returns true if either the AN address or AN public key for Dynamic Startup are set. +// All other Dynamic Startup flags have default values (and aren't required) hence they aren't checked here. +// Both these flags must be set for Dynamic Startup to occur. +func anyDynamicStartupFlagsAreSet(config *NodeConfig) bool { + if len(config.DynamicStartupANAddress) > 0 || len(config.DynamicStartupANPubkey) > 0 { + return true + } + return false +} + // validateDynamicStartEpochFlags parse the start epoch flag and return the uin64 value, // if epoch = current return the current epoch counter -func validateDynamicStartEpochFlags(ctx context.Context, getSnapshot GetProtocolSnapshot, flagEpoch string) (uint64, error) { +func validateDynamicStartEpochFlags(ctx context.Context, getSnapshot common.GetProtocolSnapshot, flagEpoch string) (uint64, error) { // if flag is not `current` sentinel, it must be a specific epoch counter (uint64) if flagEpoch != "current" { @@ -235,10 +164,9 @@ func validateDynamicStartEpochFlags(ctx context.Context, getSnapshot GetProtocol return 0, fmt.Errorf("failed to get snapshot: %w", err) } - epochCounter, err := snapshot.Epochs().Current().Counter() + epoch, err := snapshot.Epochs().Current() if err != nil { - return 0, fmt.Errorf("failed to get current epoch counter: %w", err) + return 0, fmt.Errorf("failed to get current epoch: %w", err) } - - return epochCounter, nil + return epoch.Counter(), nil } diff --git a/cmd/dynamic_startup_test.go b/cmd/dynamic_startup_test.go index 775e8221fbf..a5a1f2bb1f3 100644 --- a/cmd/dynamic_startup_test.go +++ b/cmd/dynamic_startup_test.go @@ -7,6 +7,7 @@ import ( "github.com/stretchr/testify/require" + "github.com/onflow/flow-go/cmd/util/cmd/common" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/state/protocol" protocolmock "github.com/onflow/flow-go/state/protocol/mock" @@ -19,15 +20,16 @@ func dynamicJoinFlagsFixture() (string, string, flow.EpochPhase, uint64) { } func getMockSnapshot(t *testing.T, epochCounter uint64, phase flow.EpochPhase) *protocolmock.Snapshot { - currentEpoch := new(protocolmock.Epoch) + currentEpoch := new(protocolmock.CommittedEpoch) currentEpoch.On("Counter").Return(epochCounter, nil) epochQuery := mocks.NewEpochQuery(t, epochCounter) - epochQuery.Add(currentEpoch) + epochQuery.AddCommitted(currentEpoch) snapshot := new(protocolmock.Snapshot) snapshot.On("Epochs").Return(epochQuery) - snapshot.On("Phase").Return(phase, nil) + snapshot.On("EpochPhase").Return(phase, nil) + snapshot.On("Head").Return(unittest.BlockHeaderFixture(), nil) return snapshot } @@ -87,7 +89,7 @@ func TestGetSnapshotAtEpochAndPhase(t *testing.T) { _, _, targetPhase, targetEpoch := dynamicJoinFlagsFixture() // get snapshot - actualSnapshot, err := GetSnapshotAtEpochAndPhase( + actualSnapshot, err := common.GetSnapshotAtEpochAndPhase( context.Background(), unittest.Logger(), targetEpoch, @@ -113,7 +115,7 @@ func TestGetSnapshotAtEpochAndPhase(t *testing.T) { _, _, targetPhase, targetEpoch := dynamicJoinFlagsFixture() // get snapshot - actualSnapshot, err := GetSnapshotAtEpochAndPhase( + actualSnapshot, err := common.GetSnapshotAtEpochAndPhase( context.Background(), unittest.Logger(), targetEpoch, @@ -143,7 +145,7 @@ func TestGetSnapshotAtEpochAndPhase(t *testing.T) { _, _, targetPhase, _ := dynamicJoinFlagsFixture() // get snapshot - actualSnapshot, err := GetSnapshotAtEpochAndPhase( + actualSnapshot, err := common.GetSnapshotAtEpochAndPhase( context.Background(), unittest.Logger(), 5, diff --git a/cmd/execution/main.go b/cmd/execution/main.go index 58f10f7051c..c435823b029 100644 --- a/cmd/execution/main.go +++ b/cmd/execution/main.go @@ -1,6 +1,8 @@ package main import ( + "context" + "github.com/onflow/flow-go/cmd" "github.com/onflow/flow-go/model/flow" ) @@ -19,5 +21,5 @@ func main() { if err != nil { exeBuilder.FlowNodeBuilder.Logger.Fatal().Err(err).Send() } - node.Run() + node.Run(context.Background()) } diff --git a/cmd/execution_builder.go b/cmd/execution_builder.go index c12d233a65e..767975cae8e 100644 --- a/cmd/execution_builder.go +++ b/cmd/execution_builder.go @@ -13,11 +13,13 @@ import ( awsconfig "github.com/aws/aws-sdk-go-v2/config" "github.com/aws/aws-sdk-go-v2/service/s3" + "github.com/cockroachdb/pebble/v2" + "github.com/ipfs/boxo/bitswap" "github.com/ipfs/go-cid" - badger "github.com/ipfs/go-ds-badger2" + "github.com/onflow/cadence" "github.com/onflow/flow-core-contracts/lib/go/templates" - "github.com/onflow/go-bitswap" "github.com/rs/zerolog" + "github.com/rs/zerolog/log" "github.com/shirou/gopsutil/v3/cpu" "github.com/shirou/gopsutil/v3/host" "github.com/shirou/gopsutil/v3/mem" @@ -28,6 +30,7 @@ import ( stateSyncCommands "github.com/onflow/flow-go/admin/commands/state_synchronization" storageCommands "github.com/onflow/flow-go/admin/commands/storage" uploaderCommands "github.com/onflow/flow-go/admin/commands/uploader" + "github.com/onflow/flow-go/cmd/build" "github.com/onflow/flow-go/consensus" "github.com/onflow/flow-go/consensus/hotstuff" "github.com/onflow/flow-go/consensus/hotstuff/committees" @@ -37,6 +40,7 @@ import ( "github.com/onflow/flow-go/consensus/hotstuff/validator" "github.com/onflow/flow-go/consensus/hotstuff/verification" recovery "github.com/onflow/flow-go/consensus/recovery/protocol" + "github.com/onflow/flow-go/engine" followereng "github.com/onflow/flow-go/engine/common/follower" "github.com/onflow/flow-go/engine/common/provider" "github.com/onflow/flow-go/engine/common/requester" @@ -44,41 +48,55 @@ import ( "github.com/onflow/flow-go/engine/execution/checker" "github.com/onflow/flow-go/engine/execution/computation" "github.com/onflow/flow-go/engine/execution/computation/committer" + txmetrics "github.com/onflow/flow-go/engine/execution/computation/metrics" "github.com/onflow/flow-go/engine/execution/ingestion" + "github.com/onflow/flow-go/engine/execution/ingestion/fetcher" + "github.com/onflow/flow-go/engine/execution/ingestion/stop" "github.com/onflow/flow-go/engine/execution/ingestion/uploader" exeprovider "github.com/onflow/flow-go/engine/execution/provider" + exepruner "github.com/onflow/flow-go/engine/execution/pruner" "github.com/onflow/flow-go/engine/execution/rpc" + "github.com/onflow/flow-go/engine/execution/scripts" "github.com/onflow/flow-go/engine/execution/state" "github.com/onflow/flow-go/engine/execution/state/bootstrap" + "github.com/onflow/flow-go/engine/execution/storehouse" "github.com/onflow/flow-go/fvm" "github.com/onflow/flow-go/fvm/storage/snapshot" "github.com/onflow/flow-go/fvm/systemcontracts" + ledgerpkg "github.com/onflow/flow-go/ledger" "github.com/onflow/flow-go/ledger/common/pathfinder" ledger "github.com/onflow/flow-go/ledger/complete" "github.com/onflow/flow-go/ledger/complete/wal" bootstrapFilenames "github.com/onflow/flow-go/model/bootstrap" + modelbootstrap "github.com/onflow/flow-go/model/bootstrap" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/model/flow/filter" "github.com/onflow/flow-go/module" "github.com/onflow/flow-go/module/blobs" "github.com/onflow/flow-go/module/chainsync" - "github.com/onflow/flow-go/module/compliance" "github.com/onflow/flow-go/module/executiondatasync/execution_data" exedataprovider "github.com/onflow/flow-go/module/executiondatasync/provider" "github.com/onflow/flow-go/module/executiondatasync/pruner" + edstorage "github.com/onflow/flow-go/module/executiondatasync/storage" + execdatastorage "github.com/onflow/flow-go/module/executiondatasync/storage" "github.com/onflow/flow-go/module/executiondatasync/tracker" + "github.com/onflow/flow-go/module/finalizedreader" finalizer "github.com/onflow/flow-go/module/finalizer/consensus" "github.com/onflow/flow-go/module/mempool/queue" "github.com/onflow/flow-go/module/metrics" "github.com/onflow/flow-go/network" "github.com/onflow/flow-go/network/channels" "github.com/onflow/flow-go/network/p2p/blob" + "github.com/onflow/flow-go/network/underlay" "github.com/onflow/flow-go/state/protocol" badgerState "github.com/onflow/flow-go/state/protocol/badger" "github.com/onflow/flow-go/state/protocol/blocktimer" storageerr "github.com/onflow/flow-go/storage" storage "github.com/onflow/flow-go/storage/badger" - "github.com/onflow/flow-go/storage/badger/procedure" + "github.com/onflow/flow-go/storage/operation" + "github.com/onflow/flow-go/storage/operation/pebbleimpl" + storagepebble "github.com/onflow/flow-go/storage/pebble" + "github.com/onflow/flow-go/storage/store" ) const ( @@ -109,38 +127,54 @@ type ExecutionNode struct { builder *FlowNodeBuilder // This is needed for accessing the ShutdownFunc exeConf *ExecutionConfig - collector module.ExecutionMetrics - executionState state.ExecutionState - followerState protocol.FollowerState - committee hotstuff.DynamicCommittee - ledgerStorage *ledger.Ledger - events *storage.Events - serviceEvents *storage.ServiceEvents - txResults *storage.TransactionResults - results *storage.ExecutionResults - myReceipts *storage.MyExecutionReceipts - providerEngine *exeprovider.Engine + ingestionUnit *engine.Unit + + collector *metrics.ExecutionCollector + executionState state.ExecutionState + followerState protocol.FollowerState + committee hotstuff.DynamicCommittee + ledgerStorage *ledger.Ledger + registerStore *storehouse.RegisterStore + + // storage + events storageerr.Events + eventsReader storageerr.EventsReader + serviceEvents storageerr.ServiceEvents + txResults storageerr.TransactionResults + txResultsReader storageerr.TransactionResultsReader + results storageerr.ExecutionResults + resultsReader storageerr.ExecutionResultsReader + receipts storageerr.ExecutionReceipts + myReceipts storageerr.MyExecutionReceipts + commits storageerr.Commits + commitsReader storageerr.CommitsReader + collections storageerr.Collections + + chunkDataPackDB *pebble.DB + chunkDataPacks storageerr.ChunkDataPacks + providerEngine exeprovider.ProviderEngine checkerEng *checker.Engine syncCore *chainsync.Core syncEngine *synchronization.Engine followerCore *hotstuff.FollowerLoop // follower hotstuff logic followerEng *followereng.ComplianceEngine // to sync blocks from consensus nodes computationManager *computation.Manager - collectionRequester *requester.Engine - ingestionEng *ingestion.Engine + collectionRequester ingestion.CollectionRequester + scriptsEng *scripts.Engine followerDistributor *pubsub.FollowerDistributor checkAuthorizedAtBlock func(blockID flow.Identifier) (bool, error) diskWAL *wal.DiskWAL blockDataUploader *uploader.Manager executionDataStore execution_data.ExecutionDataStore - toTriggerCheckpoint *atomic.Bool // create the checkpoint trigger to be controlled by admin tool, and listened by the compactor - stopControl *ingestion.StopControl // stop the node at given block height - executionDataDatastore *badger.Datastore + toTriggerCheckpoint *atomic.Bool // create the checkpoint trigger to be controlled by admin tool, and listened by the compactor + stopControl *stop.StopControl // stop the node at given block height + executionDataDatastore execdatastorage.DatastoreManager executionDataPruner *pruner.Pruner executionDataBlobstore blobs.Blobstore executionDataTracker tracker.Storage blobService network.BlobService blobserviceDependable *module.ProxiedReadyDoneAware + metricsProvider txmetrics.TransactionExecutionMetricsProvider } func (builder *ExecutionNodeBuilder) LoadComponentsAndModules() { @@ -149,6 +183,7 @@ func (builder *ExecutionNodeBuilder) LoadComponentsAndModules() { builder: builder.FlowNodeBuilder, exeConf: builder.exeConf, toTriggerCheckpoint: atomic.NewBool(false), + ingestionUnit: engine.NewUnit(), } builder.FlowNodeBuilder. @@ -164,22 +199,32 @@ func (builder *ExecutionNodeBuilder) LoadComponentsAndModules() { AdminCommand("set-uploader-enabled", func(config *NodeConfig) commands.AdminCommand { return uploaderCommands.NewToggleUploaderCommand(exeNode.blockDataUploader) }). - AdminCommand("get-transactions", func(conf *NodeConfig) commands.AdminCommand { - return storageCommands.NewGetTransactionsCommand(conf.State, conf.Storage.Payloads, conf.Storage.Collections) + AdminCommand("protocol-snapshot", func(conf *NodeConfig) commands.AdminCommand { + return storageCommands.NewProtocolSnapshotCommand( + conf.Logger, + conf.State, + conf.Storage.Headers, + conf.Storage.Seals, + exeNode.exeConf.triedir, + ) }). + Module("load collections", exeNode.LoadCollections). Module("mutable follower state", exeNode.LoadMutableFollowerState). Module("system specs", exeNode.LoadSystemSpecs). Module("execution metrics", exeNode.LoadExecutionMetrics). Module("sync core", exeNode.LoadSyncCore). - Module("execution receipts storage", exeNode.LoadExecutionReceiptsStorage). + Module("execution storage", exeNode.LoadExecutionStorage). Module("follower distributor", exeNode.LoadFollowerDistributor). Module("authorization checking function", exeNode.LoadAuthorizationCheckingFunction). Module("execution data datastore", exeNode.LoadExecutionDataDatastore). Module("execution data getter", exeNode.LoadExecutionDataGetter). Module("blobservice peer manager dependencies", exeNode.LoadBlobservicePeerManagerDependencies). Module("bootstrap", exeNode.LoadBootstrapper). + Module("register store", exeNode.LoadRegisterStore). + AdminCommand("get-transactions", func(conf *NodeConfig) commands.AdminCommand { + return storageCommands.NewGetTransactionsCommand(conf.State, conf.Storage.Payloads, exeNode.collections) + }). Component("execution state ledger", exeNode.LoadExecutionStateLedger). - // TODO: Modules should be able to depends on components // Because all modules are always bootstrapped first, before components, // its not possible to have a module depending on a Component. @@ -187,16 +232,29 @@ func (builder *ExecutionNodeBuilder) LoadComponentsAndModules() { // I prefer to use dummy component now and keep the bootstrapping steps properly separated, // so it will be easier to follow and refactor later Component("execution state", exeNode.LoadExecutionState). + // Load the admin tool when chunk data packs db are initialized in execution state + AdminCommand("create-chunk-data-packs-checkpoint", func(config *NodeConfig) commands.AdminCommand { + // by default checkpoints will be created under "/data/chunk_data_packs_checkpoints_dir" + return storageCommands.NewPebbleDBCheckpointCommand(exeNode.exeConf.chunkDataPackCheckpointsDir, + "chunk_data_pack", exeNode.chunkDataPackDB) + }). Component("stop control", exeNode.LoadStopControl). Component("execution state ledger WAL compactor", exeNode.LoadExecutionStateLedgerWALCompactor). - Component("execution data pruner", exeNode.LoadExecutionDataPruner). + // disable execution data pruner for now, since storehouse is going to need the execution data + // for recovery, + // TODO: will re-visit this once storehouse has implemented new WAL for checkpoint file of + // payloadless trie. + // Component("execution data pruner", exeNode.LoadExecutionDataPruner). + Component("execution db pruner", exeNode.LoadExecutionDBPruner). Component("blob service", exeNode.LoadBlobService). Component("block data upload manager", exeNode.LoadBlockUploaderManager). Component("GCP block data uploader", exeNode.LoadGCPBlockDataUploader). Component("S3 block data uploader", exeNode.LoadS3BlockDataUploader). + Component("transaction execution metrics", exeNode.LoadTransactionExecutionMetrics). Component("provider engine", exeNode.LoadProviderEngine). Component("checker engine", exeNode.LoadCheckerEngine). Component("ingestion engine", exeNode.LoadIngestionEngine). + Component("scripts engine", exeNode.LoadScriptsEngine). Component("consensus committee", exeNode.LoadConsensusCommittee). Component("follower core", exeNode.LoadFollowerCore). Component("follower engine", exeNode.LoadFollowerEngine). @@ -206,6 +264,12 @@ func (builder *ExecutionNodeBuilder) LoadComponentsAndModules() { Component("grpc server", exeNode.LoadGrpcServer) } +func (exeNode *ExecutionNode) LoadCollections(node *NodeConfig) error { + transactions := store.NewTransactions(node.Metrics.Cache, node.ProtocolDB) + exeNode.collections = store.NewCollections(node.ProtocolDB, transactions) + return nil +} + func (exeNode *ExecutionNode) LoadMutableFollowerState(node *NodeConfig) error { // For now, we only support state implementations from package badger. // If we ever support different implementations, the following can be replaced by a type-aware factory @@ -241,9 +305,9 @@ func (exeNode *ExecutionNode) LoadExecutionMetrics(node *NodeConfig) error { // report the highest executed block height as soon as possible // this is guaranteed to exist because LoadBootstrapper has inserted // the root block as executed block - var height uint64 var blockID flow.Identifier - err := node.DB.View(procedure.GetHighestExecutedBlock(&height, &blockID)) + + err := operation.RetrieveExecutedBlock(node.ProtocolDB.Reader(), &blockID) if err != nil { // database has not been bootstrapped yet if errors.Is(err, storageerr.ErrNotFound) { @@ -252,7 +316,12 @@ func (exeNode *ExecutionNode) LoadExecutionMetrics(node *NodeConfig) error { return fmt.Errorf("could not get highest executed block: %w", err) } - exeNode.collector.ExecutionLastExecutedBlockHeight(height) + executed, err := node.Storage.Headers.ByBlockID(blockID) + if err != nil { + return fmt.Errorf("could not get header by id: %v: %w", blockID, err) + } + + exeNode.collector.ExecutionLastExecutedBlockHeight(executed.Height) return nil } @@ -262,11 +331,26 @@ func (exeNode *ExecutionNode) LoadSyncCore(node *NodeConfig) error { return err } -func (exeNode *ExecutionNode) LoadExecutionReceiptsStorage( +func (exeNode *ExecutionNode) LoadExecutionStorage( node *NodeConfig, ) error { - exeNode.results = storage.NewExecutionResults(node.Metrics.Cache, node.DB) - exeNode.myReceipts = storage.NewMyExecutionReceipts(node.Metrics.Cache, node.DB, node.Storage.Receipts.(*storage.ExecutionReceipts)) + var err error + db := node.ProtocolDB + + exeNode.events = store.NewEvents(node.Metrics.Cache, db) + exeNode.serviceEvents = store.NewServiceEvents(node.Metrics.Cache, db) + exeNode.commits = store.NewCommits(node.Metrics.Cache, db) + exeNode.results = store.NewExecutionResults(node.Metrics.Cache, db) + exeNode.receipts = store.NewExecutionReceipts(node.Metrics.Cache, db, exeNode.results, storage.DefaultCacheSize) + exeNode.myReceipts = store.NewMyExecutionReceipts(node.Metrics.Cache, db, exeNode.receipts) + exeNode.txResults, err = store.NewTransactionResults(node.Metrics.Cache, db, exeNode.exeConf.transactionResultsCacheSize) + if err != nil { + return err + } + exeNode.eventsReader = exeNode.events + exeNode.commitsReader = exeNode.commits + exeNode.resultsReader = exeNode.results + exeNode.txResultsReader = exeNode.txResults return nil } @@ -302,8 +386,11 @@ func (exeNode *ExecutionNode) LoadBlobService( return nil, fmt.Errorf("allowed node ID %s is not an access node", id.NodeID.String()) } - if id.Ejected { - return nil, fmt.Errorf("allowed node ID %s is ejected", id.NodeID.String()) + if id.IsEjected() { + exeNode.builder.Logger.Warn(). + Str("node_id", idHex). + Msg("removing Access Node from the set of nodes authorized to request Execution Data, because it is ejected") + continue } allowedANs[anID] = true @@ -322,11 +409,19 @@ func (exeNode *ExecutionNode) LoadBlobService( ), } + if !node.BitswapReprovideEnabled { + opts = append(opts, blob.WithReprovideInterval(-1)) + } + if exeNode.exeConf.blobstoreRateLimit > 0 && exeNode.exeConf.blobstoreBurstLimit > 0 { opts = append(opts, blob.WithRateLimit(float64(exeNode.exeConf.blobstoreRateLimit), exeNode.exeConf.blobstoreBurstLimit)) } - bs, err := node.Network.RegisterBlobService(channels.ExecutionDataService, exeNode.executionDataDatastore, opts...) + edsChannel := channels.ExecutionDataService + if node.ObserverMode { + edsChannel = channels.PublicExecutionDataService + } + bs, err := node.EngineRegistry.RegisterBlobService(edsChannel, exeNode.executionDataDatastore.Datastore(), opts...) if err != nil { return nil, fmt.Errorf("failed to register blob service: %w", err) } @@ -386,15 +481,16 @@ func (exeNode *ExecutionNode) LoadGCPBlockDataUploader( ) // Setting up RetryableUploader for GCP uploader + // deprecated retryableUploader := uploader.NewBadgerRetryableUploaderWrapper( asyncUploader, node.Storage.Blocks, - node.Storage.Commits, - node.Storage.Collections, + exeNode.commits, + exeNode.collections, exeNode.events, exeNode.results, exeNode.txResults, - storage.NewComputationResultUploadStatus(node.DB), + store.NewComputationResultUploadStatus(node.ProtocolDB), execution_data.NewDownloader(exeNode.blobService), exeNode.collector) if retryableUploader == nil { @@ -471,15 +567,48 @@ func (exeNode *ExecutionNode) LoadProviderEngine( exeNode.executionDataTracker, ) - vmCtx := fvm.NewContext(node.FvmOptions...) + // in case node.FvmOptions already set a logger, we don't want to override it + opts := append([]fvm.Option{ + fvm.WithLogger( + node.Logger.With().Str("module", "FVM").Logger(), + )}, + node.FvmOptions..., + ) + + opts = append(opts, + computation.DefaultFVMOptions( + node.RootChainID, + exeNode.exeConf.computationConfig.ExtensiveTracing, + exeNode.exeConf.scheduleCallbacksEnabled, + )..., + ) + + vmCtx := fvm.NewContext(opts...) + + var collector module.ExecutionMetrics + collector = exeNode.collector + if exeNode.exeConf.transactionExecutionMetricsEnabled { + // inject the transaction execution metrics + collector = exeNode.collector.WithTransactionCallback( + func(dur time.Duration, stats module.TransactionExecutionResultStats, info module.TransactionExecutionResultInfo) { + exeNode.metricsProvider.Collect( + info.BlockID, + info.BlockHeight, + txmetrics.TransactionExecutionMetrics{ + TransactionID: info.TransactionID, + ExecutionTime: dur, + ExecutionEffortWeights: stats.ComputationIntensities, + }) + }) + } ledgerViewCommitter := committer.NewLedgerViewCommitter(exeNode.ledgerStorage, node.Tracer) manager, err := computation.New( node.Logger, - exeNode.collector, + collector, node.Tracer, node.Me, - node.State, + computation.NewProtocolStateWrapper(node.State), vmCtx, ledgerViewCommitter, executionDataProvider, @@ -490,46 +619,52 @@ func (exeNode *ExecutionNode) LoadProviderEngine( } exeNode.computationManager = manager - var chunkDataPackRequestQueueMetrics module.HeroCacheMetrics = metrics.NewNoopCollector() - if node.HeroCacheMetricsEnable { - chunkDataPackRequestQueueMetrics = metrics.ChunkDataPackRequestQueueMetricsFactory(node.MetricsRegisterer) - } - chdpReqQueue := queue.NewHeroStore(exeNode.exeConf.chunkDataPackRequestsCacheSize, node.Logger, chunkDataPackRequestQueueMetrics) - exeNode.providerEngine, err = exeprovider.New( - node.Logger, - node.Tracer, - node.Network, - node.State, - exeNode.executionState, - exeNode.collector, - exeNode.checkAuthorizedAtBlock, - chdpReqQueue, - exeNode.exeConf.chunkDataPackRequestWorkers, - exeNode.exeConf.chunkDataPackQueryTimeout, - exeNode.exeConf.chunkDataPackDeliveryTimeout, - ) - if err != nil { - return nil, err + if node.ObserverMode { + exeNode.providerEngine = &exeprovider.NoopEngine{} + } else { + var chunkDataPackRequestQueueMetrics module.HeroCacheMetrics = metrics.NewNoopCollector() + if node.HeroCacheMetricsEnable { + chunkDataPackRequestQueueMetrics = metrics.ChunkDataPackRequestQueueMetricsFactory(node.MetricsRegisterer) + } + chdpReqQueue := queue.NewHeroStore(exeNode.exeConf.chunkDataPackRequestsCacheSize, node.Logger, chunkDataPackRequestQueueMetrics) + exeNode.providerEngine, err = exeprovider.New( + node.Logger, + node.Tracer, + node.EngineRegistry, + node.State, + exeNode.executionState, + exeNode.collector, + exeNode.checkAuthorizedAtBlock, + chdpReqQueue, + exeNode.exeConf.chunkDataPackRequestWorkers, + exeNode.exeConf.chunkDataPackQueryTimeout, + exeNode.exeConf.chunkDataPackDeliveryTimeout, + ) + if err != nil { + return nil, err + } } // Get latest executed block and a view at that block ctx := context.Background() - _, blockID, err := exeNode.executionState.GetHighestExecutedBlockID(ctx) + height, blockID, err := exeNode.executionState.GetLastExecutedBlockID(ctx) if err != nil { return nil, fmt.Errorf( - "cannot get the latest executed block id: %w", - err) + "cannot get the latest executed block id at height %v: %w", + height, err) } - stateCommit, err := exeNode.executionState.StateCommitmentByBlockID( - ctx, - blockID) + + blockSnapshot, _, err := exeNode.executionState.CreateStorageSnapshot(blockID) if err != nil { - return nil, fmt.Errorf( - "cannot get the state commitment at latest executed block id %s: %w", - blockID.String(), - err) + tries, _ := exeNode.ledgerStorage.Tries() + trieInfo := "empty" + if len(tries) > 0 { + trieInfo = fmt.Sprintf("length: %v, 1st: %v, last: %v", len(tries), tries[0].RootHash(), tries[len(tries)-1].RootHash()) + } + + return nil, fmt.Errorf("cannot create a storage snapshot at block %v at height %v, trie: %s: %w", blockID, + height, trieInfo, err) } - blockSnapshot := exeNode.executionState.NewStorageSnapshot(stateCommit) // Get the epoch counter from the smart contract at the last executed block. contractEpochCounter, err := getContractEpochCounter( @@ -538,25 +673,24 @@ func (exeNode *ExecutionNode) LoadProviderEngine( blockSnapshot) // Failing to fetch the epoch counter from the smart contract is a fatal error. if err != nil { - return nil, fmt.Errorf("cannot get epoch counter from the smart contract at block %s: %w", blockID.String(), err) + return nil, fmt.Errorf("cannot get epoch counter from the smart contract at block %s at height %v: %w", + blockID.String(), height, err) } - // Get the epoch counter form the protocol state, at the same block. - protocolStateEpochCounter, err := node.State. - AtBlockID(blockID). - Epochs(). - Current(). - Counter() - // Failing to fetch the epoch counter from the protocol state is a fatal error. + // Get the epoch counter from the protocol state, at the same block. + // Failing to fetch the epoch, or counter for the epoch, from the protocol state is a fatal error. + currentEpoch, err := node.State.AtBlockID(blockID).Epochs().Current() if err != nil { - return nil, fmt.Errorf("cannot get epoch counter from the protocol state at block %s: %w", blockID.String(), err) + return nil, fmt.Errorf("could not get current epoch at block %s: %w", blockID.String(), err) } + protocolStateEpochCounter := currentEpoch.Counter() l := node.Logger.With(). Str("component", "provider engine"). Uint64("contractEpochCounter", contractEpochCounter). Uint64("protocolStateEpochCounter", protocolStateEpochCounter). Str("blockID", blockID.String()). + Uint64("height", height). Logger() if contractEpochCounter != protocolStateEpochCounter { @@ -584,19 +718,14 @@ func (exeNode *ExecutionNode) LoadAuthorizationCheckingFunction( func (exeNode *ExecutionNode) LoadExecutionDataDatastore( node *NodeConfig, -) error { - datastoreDir := filepath.Join(exeNode.exeConf.executionDataDir, "blobstore") - err := os.MkdirAll(datastoreDir, 0700) - if err != nil { - return err - } - dsOpts := &badger.DefaultOptions - ds, err := badger.NewDatastore(datastoreDir, dsOpts) +) (err error) { + exeNode.executionDataDatastore, err = edstorage.CreateDatastoreManager( + node.Logger, exeNode.exeConf.executionDataDir) if err != nil { - return err + return fmt.Errorf("could not create execution data datastore manager: %w", err) } - exeNode.executionDataDatastore = ds - exeNode.builder.ShutdownFunc(ds.Close) + + exeNode.builder.ShutdownFunc(exeNode.executionDataDatastore.Close) return nil } @@ -607,7 +736,7 @@ func (exeNode *ExecutionNode) LoadBlobservicePeerManagerDependencies(node *NodeC } func (exeNode *ExecutionNode) LoadExecutionDataGetter(node *NodeConfig) error { - exeNode.executionDataBlobstore = blobs.NewBlobstore(exeNode.executionDataDatastore) + exeNode.executionDataBlobstore = blobs.NewBlobstore(exeNode.executionDataDatastore.Datastore()) exeNode.executionDataStore = execution_data.NewExecutionDataStore(exeNode.executionDataBlobstore, execution_data.DefaultSerializer) return nil } @@ -619,29 +748,63 @@ func (exeNode *ExecutionNode) LoadExecutionState( error, ) { - chunkDataPacks := storage.NewChunkDataPacks(node.Metrics.Cache, node.DB, node.Storage.Collections, exeNode.exeConf.chunkDataPackCacheSize) + chunkDataPackDB, err := storagepebble.SafeOpen( + node.Logger.With().Str("pebbledb", "cdp").Logger(), + exeNode.exeConf.chunkDataPackDir, + ) + if err != nil { + return nil, fmt.Errorf("could not open chunk data pack database: %w", err) + } - // Needed for gRPC server, make sure to assign to main scoped vars - exeNode.events = storage.NewEvents(node.Metrics.Cache, node.DB) - exeNode.serviceEvents = storage.NewServiceEvents(node.Metrics.Cache, node.DB) - exeNode.txResults = storage.NewTransactionResults(node.Metrics.Cache, node.DB, exeNode.exeConf.transactionResultsCacheSize) + exeNode.builder.ShutdownFunc(func() error { + if err := chunkDataPackDB.Close(); err != nil { + return fmt.Errorf("error closing chunk data pack database: %w", err) + } + return nil + }) + chunkDataPacks := store.NewChunkDataPacks(node.Metrics.Cache, + pebbleimpl.ToDB(chunkDataPackDB), exeNode.collections, exeNode.exeConf.chunkDataPackCacheSize) + + getLatestFinalized := func() (uint64, error) { + final, err := node.State.Final().Head() + if err != nil { + return 0, err + } + + return final.Height, nil + } + exeNode.chunkDataPackDB = chunkDataPackDB + exeNode.chunkDataPacks = chunkDataPacks + + // migrate execution data for last sealed and executed block exeNode.executionState = state.NewExecutionState( exeNode.ledgerStorage, - node.Storage.Commits, + exeNode.commits, node.Storage.Blocks, node.Storage.Headers, - node.Storage.Collections, chunkDataPacks, exeNode.results, exeNode.myReceipts, exeNode.events, exeNode.serviceEvents, exeNode.txResults, - node.DB, + node.ProtocolDB, + getLatestFinalized, node.Tracer, + exeNode.registerStore, + exeNode.exeConf.enableStorehouse, + node.StorageLockMgr, ) + height, _, err := exeNode.executionState.GetLastExecutedBlockID(context.Background()) + if err != nil { + return nil, fmt.Errorf("could not get last executed block: %w", err) + } + + log.Info().Msgf("execution state last executed block height: %v", height) + exeNode.collector.ExecutionLastExecutedBlockHeight(height) + return &module.NoopReadyDoneAware{}, nil } @@ -651,17 +814,124 @@ func (exeNode *ExecutionNode) LoadStopControl( module.ReadyDoneAware, error, ) { - lastExecutedHeight, _, err := exeNode.executionState.GetHighestExecutedBlockID(context.TODO()) + ver, err := build.Semver() + if err != nil { + err = fmt.Errorf("could not set semver version for stop control. "+ + "version %s is not semver compliant: %w", build.Version(), err) + + // The node would not know its own version. Without this the node would not know + // how to reach to version boundaries. + exeNode.builder.Logger. + Err(err). + Msg("error starting stop control") + + return nil, err + } + + latestFinalizedBlock, err := node.State.Final().Head() if err != nil { - return nil, fmt.Errorf("cannot get the latest executed block height for stop control: %w", err) + return nil, fmt.Errorf("could not get latest finalized block: %w", err) } - exeNode.stopControl = ingestion.NewStopControl( - exeNode.builder.Logger.With().Str("compontent", "stop_control").Logger(), + stopControl := stop.NewStopControl( + exeNode.ingestionUnit, + exeNode.exeConf.maxGracefulStopDuration, + exeNode.builder.Logger, + exeNode.executionState, + node.Storage.Headers, + node.Storage.VersionBeacons, + ver, + latestFinalizedBlock, + // TODO: rename to exeNode.exeConf.executionStopped to make it more consistent exeNode.exeConf.pauseExecution, - lastExecutedHeight) + true, + ) + // stopControl needs to consume BlockFinalized events. + node.ProtocolEvents.AddConsumer(stopControl) - return &module.NoopReadyDoneAware{}, nil + exeNode.stopControl = stopControl + + return stopControl, nil +} + +func (exeNode *ExecutionNode) LoadRegisterStore( + node *NodeConfig, +) error { + if !exeNode.exeConf.enableStorehouse { + node.Logger.Info().Msg("register store disabled") + return nil + } + + node.Logger.Info(). + Str("pebble_db_path", exeNode.exeConf.registerDir). + Msg("register store enabled") + pebbledb, err := storagepebble.OpenRegisterPebbleDB( + node.Logger.With().Str("pebbledb", "registers").Logger(), + exeNode.exeConf.registerDir) + + if err != nil { + return fmt.Errorf("could not create disk register store: %w", err) + } + + // close pebble db on shut down + exeNode.builder.ShutdownFunc(func() error { + err := pebbledb.Close() + if err != nil { + return fmt.Errorf("could not close register store: %w", err) + } + return nil + }) + + bootstrapped, err := storagepebble.IsBootstrapped(pebbledb) + if err != nil { + return fmt.Errorf("could not check if registers db is bootstrapped: %w", err) + } + + node.Logger.Info().Msgf("register store bootstrapped: %v", bootstrapped) + + if !bootstrapped { + checkpointFile := path.Join(exeNode.exeConf.triedir, modelbootstrap.FilenameWALRootCheckpoint) + sealedRoot := node.State.Params().SealedRoot() + + rootSeal := node.State.Params().Seal() + + if sealedRoot.ID() != rootSeal.BlockID { + return fmt.Errorf("mismatching root seal and sealed root: %v != %v", sealedRoot.ID(), rootSeal.BlockID) + } + + checkpointHeight := sealedRoot.Height + rootHash := ledgerpkg.RootHash(rootSeal.FinalState) + + err = bootstrap.ImportRegistersFromCheckpoint(node.Logger, checkpointFile, checkpointHeight, rootHash, pebbledb, exeNode.exeConf.importCheckpointWorkerCount) + if err != nil { + return fmt.Errorf("could not import registers from checkpoint: %w", err) + } + } + diskStore, err := storagepebble.NewRegisters(pebbledb, storagepebble.PruningDisabled) + if err != nil { + return fmt.Errorf("could not create registers storage: %w", err) + } + + reader := finalizedreader.NewFinalizedReader(node.Storage.Headers, node.LastFinalizedHeader.Height) + node.ProtocolEvents.AddConsumer(reader) + notifier := storehouse.NewRegisterStoreMetrics(exeNode.collector) + + // report latest finalized and executed height as metrics + notifier.OnFinalizedAndExecutedHeightUpdated(diskStore.LatestHeight()) + + registerStore, err := storehouse.NewRegisterStore( + diskStore, + nil, // TODO: replace with real WAL + reader, + node.Logger, + notifier, + ) + if err != nil { + return err + } + + exeNode.registerStore = registerStore + return nil } func (exeNode *ExecutionNode) LoadExecutionStateLedger( @@ -698,6 +968,7 @@ func (exeNode *ExecutionNode) LoadExecutionStateLedgerWALCompactor( exeNode.exeConf.checkpointDistance, exeNode.exeConf.checkpointsToKeep, exeNode.toTriggerCheckpoint, // compactor will listen to the signal from admin tool for force triggering checkpointing + exeNode.collector, ) } @@ -749,18 +1020,46 @@ func (exeNode *ExecutionNode) LoadExecutionDataPruner( return exeNode.executionDataPruner, err } +func (exeNode *ExecutionNode) LoadExecutionDBPruner(node *NodeConfig) (module.ReadyDoneAware, error) { + cfg := exepruner.PruningConfig{ + Threshold: exeNode.exeConf.pruningConfigThreshold, + BatchSize: exeNode.exeConf.pruningConfigBatchSize, + SleepAfterEachBatchCommit: exeNode.exeConf.pruningConfigSleepAfterCommit, + SleepAfterEachIteration: exeNode.exeConf.pruningConfigSleepAfterIteration, + } + + return exepruner.NewChunkDataPackPruningEngine( + node.Logger, + exeNode.collector, + node.State, + node.ProtocolDB, + node.Storage.Headers, + exeNode.chunkDataPacks, + exeNode.results, + exeNode.chunkDataPackDB, + cfg, + ), nil +} + func (exeNode *ExecutionNode) LoadCheckerEngine( node *NodeConfig, ) ( module.ReadyDoneAware, error, ) { - exeNode.checkerEng = checker.New( + if !exeNode.exeConf.enableChecker { + node.Logger.Warn().Msgf("checker engine is disabled") + return &module.NoopReadyDoneAware{}, nil + } + + node.Logger.Info().Msgf("checker engine is enabled") + + core := checker.NewCore( node.Logger, node.State, exeNode.executionState, - node.Storage.Seals, ) + exeNode.checkerEng = checker.NewEngine(core) return exeNode.checkerEng, nil } @@ -770,52 +1069,105 @@ func (exeNode *ExecutionNode) LoadIngestionEngine( module.ReadyDoneAware, error, ) { + var colFetcher ingestion.CollectionFetcher var err error - exeNode.collectionRequester, err = requester.New(node.Logger, node.Metrics.Engine, node.Network, node.Me, node.State, - channels.RequestCollections, - filter.Any, - func() flow.Entity { return &flow.Collection{} }, - // we are manually triggering batches in execution, but lets still send off a batch once a minute, as a safety net for the sake of retries - requester.WithBatchInterval(exeNode.exeConf.requestInterval), - // consistency of collection can be checked by checking hash, and hash comes from trusted source (blocks from consensus follower) - // hence we not need to check origin - requester.WithValidateStaking(false), - ) - if err != nil { - return nil, fmt.Errorf("could not create requester engine: %w", err) + if node.ObserverMode { + anID, err := flow.HexStringToIdentifier(exeNode.exeConf.publicAccessID) + if err != nil { + return nil, fmt.Errorf("could not parse public access ID: %w", err) + } + + anNode, ok := exeNode.builder.IdentityProvider.ByNodeID(anID) + if !ok { + return nil, fmt.Errorf("could not find public access node with ID %s", anID) + } + + if anNode.Role != flow.RoleAccess { + return nil, fmt.Errorf("public access node with ID %s is not an access node", anID) + } + + if anNode.IsEjected() { + return nil, fmt.Errorf("public access node with ID %s is ejected", anID) + } + + accessFetcher, err := fetcher.NewAccessCollectionFetcher(node.Logger, anNode.Address, anNode.NetworkPubKey, anNode.NodeID, node.RootChainID.Chain()) + if err != nil { + return nil, fmt.Errorf("could not create access collection fetcher: %w", err) + } + colFetcher = accessFetcher + exeNode.collectionRequester = accessFetcher + } else { + reqEng, err := requester.New(node.Logger.With().Str("entity", "collection").Logger(), node.Metrics.Engine, node.EngineRegistry, node.Me, node.State, + channels.RequestCollections, + filter.Any, + func() flow.Entity { return new(flow.Collection) }, + // we are manually triggering batches in execution, but lets still send off a batch once a minute, as a safety net for the sake of retries + requester.WithBatchInterval(exeNode.exeConf.requestInterval), + // consistency of collection can be checked by checking hash, and hash comes from trusted source (blocks from consensus follower) + // hence we not need to check origin + requester.WithValidateStaking(false), + // we have observed execution nodes occasionally fail to retrieve collections using this engine, which can cause temporary execution halts + // setting a retry maximum of 10s results in a much faster recovery from these faults (default is 2m) + requester.WithRetryMaximum(10*time.Second), + ) + + if err != nil { + return nil, fmt.Errorf("could not create requester engine: %w", err) + } + + colFetcher = fetcher.NewCollectionFetcher(node.Logger, reqEng, node.State, exeNode.exeConf.onflowOnlyLNs) + exeNode.collectionRequester = reqEng } - exeNode.ingestionEng, err = ingestion.New( + _, core, err := ingestion.NewMachine( node.Logger, - node.Network, - node.Me, + node.ProtocolEvents, exeNode.collectionRequester, - node.State, + colFetcher, + node.Storage.Headers, node.Storage.Blocks, - node.Storage.Collections, - exeNode.events, - exeNode.serviceEvents, - exeNode.txResults, - exeNode.computationManager, - exeNode.providerEngine, + exeNode.collections, exeNode.executionState, + node.State, exeNode.collector, - node.Tracer, - exeNode.exeConf.extensiveLog, - exeNode.checkAuthorizedAtBlock, - exeNode.executionDataPruner, + exeNode.computationManager, + exeNode.providerEngine, exeNode.blockDataUploader, exeNode.stopControl, ) - // TODO: we should solve these mutual dependencies better - // => https://github.com/dapperlabs/flow-go/issues/4360 - exeNode.collectionRequester = exeNode.collectionRequester.WithHandle(exeNode.ingestionEng.OnCollection) + return core, err +} + +// create scripts engine for handling script execution +func (exeNode *ExecutionNode) LoadScriptsEngine(node *NodeConfig) (module.ReadyDoneAware, error) { + + exeNode.scriptsEng = scripts.New( + node.Logger, + exeNode.computationManager.QueryExecutor(), + exeNode.executionState, + ) - node.ProtocolEvents.AddConsumer(exeNode.ingestionEng) + return exeNode.scriptsEng, nil +} + +func (exeNode *ExecutionNode) LoadTransactionExecutionMetrics( + node *NodeConfig, +) (module.ReadyDoneAware, error) { + lastFinalizedHeader := node.LastFinalizedHeader + + metricsProvider := txmetrics.NewTransactionExecutionMetricsProvider( + node.Logger, + exeNode.executionState, + node.Storage.Headers, + lastFinalizedHeader.Height, + exeNode.exeConf.transactionExecutionMetricsBufferSize, + ) - return exeNode.ingestionEng, err + node.ProtocolEvents.AddConsumer(metricsProvider) + exeNode.metricsProvider = metricsProvider + return metricsProvider, nil } func (exeNode *ExecutionNode) LoadConsensusCommittee( @@ -845,23 +1197,22 @@ func (exeNode *ExecutionNode) LoadFollowerCore( ) { // create a finalizer that handles updating the protocol // state when the follower detects newly finalized blocks - final := finalizer.NewFinalizer(node.DB, node.Storage.Headers, exeNode.followerState, node.Tracer) + final := finalizer.NewFinalizer(node.ProtocolDB.Reader(), node.Storage.Headers, exeNode.followerState, node.Tracer) finalized, pending, err := recovery.FindLatest(node.State, node.Storage.Headers) if err != nil { return nil, fmt.Errorf("could not find latest finalized block and pending blocks to recover consensus follower: %w", err) } - exeNode.followerDistributor.AddFinalizationConsumer(exeNode.checkerEng) - // creates a consensus follower with ingestEngine as the notifier // so that it gets notified upon each new finalized block exeNode.followerCore, err = consensus.NewFollower( node.Logger, + node.Metrics.Mempool, node.Storage.Headers, final, exeNode.followerDistributor, - node.RootBlock.Header, + node.FinalizedRootBlock.ToHeader(), node.RootQC, finalized, pending, @@ -906,17 +1257,18 @@ func (exeNode *ExecutionNode) LoadFollowerEngine( exeNode.followerEng, err = followereng.NewComplianceLayer( node.Logger, - node.Network, + node.EngineRegistry, node.Me, node.Metrics.Engine, node.Storage.Headers, - exeNode.builder.FinalizedHeader, + node.LastFinalizedHeader, core, - followereng.WithComplianceConfigOpt(compliance.WithSkipNewProposalsThreshold(node.ComplianceConfig.SkipNewProposalsThreshold)), + node.ComplianceConfig, ) if err != nil { return nil, fmt.Errorf("could not create follower engine: %w", err) } + exeNode.followerDistributor.AddOnBlockFinalizedConsumer(exeNode.followerEng.OnFinalizedBlock) return exeNode.followerEng, nil } @@ -949,18 +1301,22 @@ func (exeNode *ExecutionNode) LoadReceiptProviderEngine( } receiptRequestQueue := queue.NewHeroStore(exeNode.exeConf.receiptRequestsCacheSize, node.Logger, receiptRequestQueueMetric) + engineRegister := node.EngineRegistry + if node.ObserverMode { + engineRegister = &underlay.NoopEngineRegister{} + } eng, err := provider.New( - node.Logger, + node.Logger.With().Str("entity", "receipt").Logger(), node.Metrics.Engine, - node.Network, + engineRegister, node.Me, node.State, receiptRequestQueue, exeNode.exeConf.receiptRequestWorkers, channels.ProvideReceiptsByBlockID, filter.And( - filter.HasWeight(true), - filter.HasRole(flow.RoleConsensus), + filter.IsValidCurrentEpochParticipantOrJoining, + filter.HasRole[flow.Identity](flow.RoleConsensus), ), retrieve, ) @@ -974,21 +1330,27 @@ func (exeNode *ExecutionNode) LoadSynchronizationEngine( error, ) { // initialize the synchronization engine - var err error + spamConfig, err := synchronization.NewSpamDetectionConfig() + if err != nil { + return nil, fmt.Errorf("could not initialize spam detection config: %w", err) + } + exeNode.syncEngine, err = synchronization.New( node.Logger, node.Metrics.Engine, - node.Network, + node.EngineRegistry, node.Me, node.State, node.Storage.Blocks, exeNode.followerEng, exeNode.syncCore, node.SyncEngineIdentifierProvider, + spamConfig, ) if err != nil { return nil, fmt.Errorf("could not initialize synchronization engine: %w", err) } + exeNode.followerDistributor.AddFinalizationConsumer(exeNode.syncEngine) return exeNode.syncEngine, nil } @@ -999,16 +1361,23 @@ func (exeNode *ExecutionNode) LoadGrpcServer( module.ReadyDoneAware, error, ) { + // maintain backwards compatibility with the deprecated flag + if exeNode.exeConf.rpcConf.DeprecatedMaxMsgSize != 0 { + node.Logger.Warn().Msg("A deprecated flag was specified (--rpc-max-message-size). Use --rpc-max-request-message-size and --rpc-max-response-message-size instead. This flag will be removed in a future release.") + exeNode.exeConf.rpcConf.MaxRequestMsgSize = exeNode.exeConf.rpcConf.DeprecatedMaxMsgSize + exeNode.exeConf.rpcConf.MaxResponseMsgSize = exeNode.exeConf.rpcConf.DeprecatedMaxMsgSize + } return rpc.New( node.Logger, exeNode.exeConf.rpcConf, - exeNode.ingestionEng, + exeNode.scriptsEng, node.Storage.Headers, node.State, - exeNode.events, - exeNode.results, - exeNode.txResults, - node.Storage.Commits, + exeNode.eventsReader, + exeNode.resultsReader, + exeNode.txResultsReader, + exeNode.commitsReader, + exeNode.metricsProvider, node.RootChainID, signature.NewBlockSignerDecoder(exeNode.committee), exeNode.exeConf.apiRatelimits, @@ -1021,13 +1390,29 @@ func (exeNode *ExecutionNode) LoadBootstrapper(node *NodeConfig) error { // check if the execution database already exists bootstrapper := bootstrap.NewBootstrapper(node.Logger) - commit, bootstrapped, err := bootstrapper.IsBootstrapped(node.DB) + // in order to support switching from badger to pebble in the middle of the spork, + // we will check if the execution database has been bootstrapped by reading the state from badger db. + // and if not, bootstrap both badger and pebble db. + commit, bootstrapped, err := bootstrapper.IsBootstrapped(node.ProtocolDB) if err != nil { return fmt.Errorf("could not query database to know whether database has been bootstrapped: %w", err) } + node.Logger.Info().Msgf("execution database bootstrapped: %v, commit: %v", bootstrapped, commit) + // if the execution database does not exist, then we need to bootstrap the execution database. if !bootstrapped { + + err := wal.CheckpointHasRootHash( + node.Logger, + path.Join(node.BootstrapDir, bootstrapFilenames.DirnameExecutionState), + bootstrapFilenames.FilenameWALRootCheckpoint, + ledgerpkg.RootHash(node.RootSeal.FinalState), + ) + if err != nil { + return err + } + // when bootstrapping, the bootstrap folder must have a checkpoint file // we need to cover this file to the trie folder to restore the trie to restore the execution state. err = copyBootstrapState(node.BootstrapDir, exeNode.exeConf.triedir) @@ -1035,9 +1420,7 @@ func (exeNode *ExecutionNode) LoadBootstrapper(node *NodeConfig) error { return fmt.Errorf("could not load bootstrap state from checkpoint file: %w", err) } - // TODO: check that the checkpoint file contains the root block's statecommit hash - - err = bootstrapper.BootstrapExecutionDatabase(node.DB, node.RootSeal.FinalState, node.RootBlock.Header) + err = bootstrapper.BootstrapExecutionDatabase(node.StorageLockMgr, node.ProtocolDB, node.RootSeal) if err != nil { return fmt.Errorf("could not bootstrap execution database: %w", err) } @@ -1064,17 +1447,10 @@ func getContractEpochCounter( uint64, error, ) { - // Get the address of the FlowEpoch smart contract - sc, err := systemcontracts.SystemContractsForChain(vmCtx.Chain.ChainID()) - if err != nil { - return 0, fmt.Errorf("could not get system contracts: %w", err) - } - address := sc.Epoch.Address + sc := systemcontracts.SystemContractsForChain(vmCtx.Chain.ChainID()) // Generate the script to get the epoch counter from the FlowEpoch smart contract - scriptCode := templates.GenerateGetCurrentEpochCounterScript(templates.Environment{ - EpochAddress: address.Hex(), - }) + scriptCode := templates.GenerateGetCurrentEpochCounterScript(sc.AsTemplateEnv()) script := fvm.Script(scriptCode) // execute the script @@ -1089,8 +1465,8 @@ func getContractEpochCounter( return 0, fmt.Errorf("could not read epoch counter, script returned no value") } - epochCounter := output.Value.ToGoValue().(uint64) - return epochCounter, nil + epochCounter := output.Value.(cadence.UInt64) + return uint64(epochCounter), nil } // copy the checkpoint files from the bootstrap folder to the execution state folder @@ -1132,14 +1508,18 @@ func copyBootstrapState(dir, trie string) error { // copy from the bootstrap folder to the execution state folder from, to := path.Join(dir, bootstrapFilenames.DirnameExecutionState), trie - copiedFiles, err := wal.CopyCheckpointFile(filename, from, to) + + log.Info().Str("dir", dir).Str("trie", trie). + Msgf("linking checkpoint file %v from directory: %v, to: %v", filename, from, to) + + copiedFiles, err := wal.SoftlinkCheckpointFile(filename, from, to) if err != nil { - return fmt.Errorf("can not copy checkpoint file %s, from %s to %s", - filename, from, to) + return fmt.Errorf("can not link checkpoint file %s, from %s to %s, %w", + filename, from, to, err) } for _, newPath := range copiedFiles { - fmt.Printf("copied root checkpoint file from directory: %v, to: %v\n", from, newPath) + fmt.Printf("linked root checkpoint file from directory: %v, to: %v\n", from, newPath) } return nil diff --git a/cmd/execution_config.go b/cmd/execution_config.go index 860a5257593..308a17d94ef 100644 --- a/cmd/execution_config.go +++ b/cmd/execution_config.go @@ -1,8 +1,8 @@ package cmd import ( + "errors" "fmt" - "os" "path/filepath" "strings" "time" @@ -10,68 +10,100 @@ import ( "github.com/spf13/pflag" "github.com/onflow/flow-go/engine/common/provider" + commonrpc "github.com/onflow/flow-go/engine/common/rpc" + "github.com/onflow/flow-go/engine/execution/computation" "github.com/onflow/flow-go/engine/execution/computation/query" + "github.com/onflow/flow-go/engine/execution/ingestion/stop" exeprovider "github.com/onflow/flow-go/engine/execution/provider" - "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/module/mempool" - "github.com/onflow/flow-go/utils/grpcutils" - - "github.com/onflow/flow-go/engine/execution/computation" + exepruner "github.com/onflow/flow-go/engine/execution/pruner" "github.com/onflow/flow-go/engine/execution/rpc" + "github.com/onflow/flow-go/fvm" "github.com/onflow/flow-go/fvm/storage/derived" - storage "github.com/onflow/flow-go/storage/badger" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module/executiondatasync/execution_data" + "github.com/onflow/flow-go/module/mempool" + "github.com/onflow/flow-go/storage/store" ) // ExecutionConfig contains the configs for starting up execution nodes type ExecutionConfig struct { - rpcConf rpc.Config - triedir string - executionDataDir string - mTrieCacheSize uint32 - transactionResultsCacheSize uint - checkpointDistance uint - checkpointsToKeep uint - chunkDataPackCacheSize uint - chunkDataPackRequestsCacheSize uint32 - requestInterval time.Duration - extensiveLog bool - pauseExecution bool - chunkDataPackQueryTimeout time.Duration - chunkDataPackDeliveryTimeout time.Duration - enableBlockDataUpload bool - gcpBucketName string - s3BucketName string - apiRatelimits map[string]int - apiBurstlimits map[string]int - executionDataAllowedPeers string - executionDataPrunerHeightRangeTarget uint64 - executionDataPrunerThreshold uint64 - blobstoreRateLimit int - blobstoreBurstLimit int - chunkDataPackRequestWorkers uint + rpcConf rpc.Config + triedir string + executionDataDir string + registerDir string + mTrieCacheSize uint32 + transactionResultsCacheSize uint + checkpointDistance uint + checkpointsToKeep uint + chunkDataPackDir string + chunkDataPackCheckpointsDir string + chunkDataPackCacheSize uint + chunkDataPackRequestsCacheSize uint32 + requestInterval time.Duration + extensiveLog bool + pauseExecution bool + chunkDataPackQueryTimeout time.Duration + chunkDataPackDeliveryTimeout time.Duration + enableBlockDataUpload bool + gcpBucketName string + s3BucketName string + apiRatelimits map[string]int + apiBurstlimits map[string]int + executionDataAllowedPeers string + executionDataPrunerHeightRangeTarget uint64 + executionDataPrunerThreshold uint64 + blobstoreRateLimit int + blobstoreBurstLimit int + chunkDataPackRequestWorkers uint + maxGracefulStopDuration time.Duration + importCheckpointWorkerCount int + transactionExecutionMetricsEnabled bool + transactionExecutionMetricsBufferSize uint + scheduleCallbacksEnabled bool computationConfig computation.ComputationConfig receiptRequestWorkers uint // common provider engine workers receiptRequestsCacheSize uint32 // common provider engine cache size + + // This is included to temporarily work around an issue observed on a small number of ENs. + // It works around an issue where some collection nodes are not configured with enough + // this works around an issue where some collection nodes are not configured with enough + // file descriptors causing connection failures. + onflowOnlyLNs bool + enableStorehouse bool + enableChecker bool + publicAccessID string + + pruningConfigThreshold uint64 + pruningConfigBatchSize uint + pruningConfigSleepAfterCommit time.Duration + pruningConfigSleepAfterIteration time.Duration } func (exeConf *ExecutionConfig) SetupFlags(flags *pflag.FlagSet) { - homedir, _ := os.UserHomeDir() - datadir := filepath.Join(homedir, ".flow", "execution") + datadir := "/data" flags.StringVarP(&exeConf.rpcConf.ListenAddr, "rpc-addr", "i", "localhost:9000", "the address the gRPC server listens on") - flags.UintVar(&exeConf.rpcConf.MaxMsgSize, "rpc-max-message-size", grpcutils.DefaultMaxMsgSize, "the maximum message size in bytes for messages sent or received over grpc") + flags.UintVar(&exeConf.rpcConf.DeprecatedMaxMsgSize, "rpc-max-message-size", 0, + "[deprecated] the maximum message size in bytes for messages sent or received over grpc") + flags.UintVar(&exeConf.rpcConf.MaxRequestMsgSize, "rpc-max-request-message-size", commonrpc.DefaultExecutionMaxRequestSize, + "the maximum request message size in bytes for request messages received over grpc by the server") + flags.UintVar(&exeConf.rpcConf.MaxResponseMsgSize, "rpc-max-response-message-size", commonrpc.DefaultExecutionMaxResponseSize, + "the maximum message size in bytes for response messages sent over grpc by the server") flags.BoolVar(&exeConf.rpcConf.RpcMetricsEnabled, "rpc-metrics-enabled", false, "whether to enable the rpc metrics") - flags.StringVar(&exeConf.triedir, "triedir", datadir, "directory to store the execution State") - flags.StringVar(&exeConf.executionDataDir, "execution-data-dir", filepath.Join(homedir, ".flow", "execution_data"), "directory to use for storing Execution Data") + flags.StringVar(&exeConf.triedir, "triedir", filepath.Join(datadir, "trie"), "directory to store the execution State") + flags.StringVar(&exeConf.executionDataDir, "execution-data-dir", filepath.Join(datadir, "execution_data"), "directory to use for storing Execution Data") + flags.StringVar(&exeConf.registerDir, "register-dir", filepath.Join(datadir, "register"), "directory to use for storing registers Data") flags.Uint32Var(&exeConf.mTrieCacheSize, "mtrie-cache-size", 500, "cache size for MTrie") flags.UintVar(&exeConf.checkpointDistance, "checkpoint-distance", 20, "number of WAL segments between checkpoints") flags.UintVar(&exeConf.checkpointsToKeep, "checkpoints-to-keep", 5, "number of recent checkpoints to keep (0 to keep all)") flags.UintVar(&exeConf.computationConfig.DerivedDataCacheSize, "cadence-execution-cache", derived.DefaultDerivedDataCacheSize, "cache size for Cadence execution") flags.BoolVar(&exeConf.computationConfig.ExtensiveTracing, "extensive-tracing", false, "adds high-overhead tracing to execution") - flags.BoolVar(&exeConf.computationConfig.CadenceTracing, "cadence-tracing", false, "enables cadence runtime level tracing") - flags.UintVar(&exeConf.chunkDataPackCacheSize, "chdp-cache", storage.DefaultCacheSize, "cache size for chunk data packs") + flags.IntVar(&exeConf.computationConfig.MaxConcurrency, "computer-max-concurrency", 1, "set to greater than 1 to enable concurrent transaction execution") + flags.StringVar(&exeConf.chunkDataPackDir, "chunk-data-pack-dir", filepath.Join(datadir, "chunk_data_packs"), "directory to use for storing chunk data packs") + flags.StringVar(&exeConf.chunkDataPackCheckpointsDir, "chunk-data-pack-checkpoints-dir", filepath.Join(datadir, "chunk_data_packs_checkpoints_dir"), "directory to use storing chunk data packs pebble database checkpoints for querying while the node is running") + flags.UintVar(&exeConf.chunkDataPackCacheSize, "chdp-cache", store.DefaultCacheSize, "cache size for chunk data packs") flags.Uint32Var(&exeConf.chunkDataPackRequestsCacheSize, "chdp-request-queue", mempool.DefaultChunkDataPackRequestQueueSize, "queue size for chunk data pack requests") flags.DurationVar(&exeConf.requestInterval, "request-interval", 60*time.Second, "the interval between requests for the requester engine") flags.Uint32Var(&exeConf.receiptRequestsCacheSize, "receipt-request-cache", provider.DefaultEntityRequestCacheSize, "queue size for entity requests at common provider engine") @@ -80,6 +112,8 @@ func (exeConf *ExecutionConfig) SetupFlags(flags *pflag.FlagSet) { "threshold for logging script execution") flags.DurationVar(&exeConf.computationConfig.QueryConfig.ExecutionTimeLimit, "script-execution-time-limit", query.DefaultExecutionTimeLimit, "script execution time limit") + flags.Uint64Var(&exeConf.computationConfig.QueryConfig.ComputationLimit, "script-execution-computation-limit", fvm.DefaultComputationLimit, + "script execution computation limit") flags.UintVar(&exeConf.transactionResultsCacheSize, "transaction-results-cache-size", 10000, "number of transaction results to be cached") flags.BoolVar(&exeConf.extensiveLog, "extensive-logging", false, "extensive logging logs tx contents and block headers") flags.DurationVar(&exeConf.chunkDataPackQueryTimeout, "chunk-data-pack-query-timeout", exeprovider.DefaultChunkDataPackQueryTimeout, "timeout duration to determine a chunk data pack query being slow") @@ -97,6 +131,30 @@ func (exeConf *ExecutionConfig) SetupFlags(flags *pflag.FlagSet) { flags.StringToIntVar(&exeConf.apiBurstlimits, "api-burst-limits", map[string]int{}, "burst limits for gRPC API methods e.g. Ping=100,ExecuteScriptAtBlockID=100 etc. note limits apply globally to all clients.") flags.IntVar(&exeConf.blobstoreRateLimit, "blobstore-rate-limit", 0, "per second outgoing rate limit for Execution Data blobstore") flags.IntVar(&exeConf.blobstoreBurstLimit, "blobstore-burst-limit", 0, "outgoing burst limit for Execution Data blobstore") + flags.DurationVar(&exeConf.maxGracefulStopDuration, "max-graceful-stop-duration", stop.DefaultMaxGracefulStopDuration, "the maximum amount of time stop control will wait for ingestion engine to gracefully shutdown before crashing") + flags.IntVar(&exeConf.importCheckpointWorkerCount, "import-checkpoint-worker-count", 10, "number of workers to import checkpoint file during bootstrap") + flags.BoolVar(&exeConf.transactionExecutionMetricsEnabled, "tx-execution-metrics", true, "enable collection of transaction execution metrics") + flags.UintVar(&exeConf.transactionExecutionMetricsBufferSize, "tx-execution-metrics-buffer-size", 200, "buffer size for transaction execution metrics. The buffer size is the number of blocks that are kept in memory by the metrics provider engine") + + var exeConfExecutionDataDBMode string + flags.StringVar(&exeConfExecutionDataDBMode, + "execution-data-db", + execution_data.ExecutionDataDBModePebble.String(), + "[deprecated] the DB type for execution datastore. it's been deprecated") + + flags.BoolVar(&exeConf.onflowOnlyLNs, "temp-onflow-only-lns", false, "do not use unless required. forces node to only request collections from onflow collection nodes") + flags.BoolVar(&exeConf.enableStorehouse, "enable-storehouse", false, "enable storehouse to store registers on disk, default is false") + flags.BoolVar(&exeConf.enableChecker, "enable-checker", true, "enable checker to check the correctness of the execution result, default is true") + flags.BoolVar(&exeConf.scheduleCallbacksEnabled, "scheduled-callbacks-enabled", fvm.DefaultScheduledCallbacksEnabled, "enable execution of scheduled callbacks") + // deprecated. Retain it to prevent nodes that previously had this configuration from crashing. + var deprecatedEnableNewIngestionEngine bool + flags.BoolVar(&deprecatedEnableNewIngestionEngine, "enable-new-ingestion-engine", true, "enable new ingestion engine, default is true") + flags.StringVar(&exeConf.publicAccessID, "public-access-id", "", "public access ID for the node") + + flags.Uint64Var(&exeConf.pruningConfigThreshold, "pruning-config-threshold", exepruner.DefaultConfig.Threshold, "the number of blocks that we want to keep in the database, default 30 days") + flags.UintVar(&exeConf.pruningConfigBatchSize, "pruning-config-batch-size", exepruner.DefaultConfig.BatchSize, "the batch size is the number of blocks that we want to delete in one batch, default 1200") + flags.DurationVar(&exeConf.pruningConfigSleepAfterCommit, "pruning-config-sleep-after-commit", exepruner.DefaultConfig.SleepAfterEachBatchCommit, "sleep time after each batch commit, default 1s") + flags.DurationVar(&exeConf.pruningConfigSleepAfterIteration, "pruning-config-sleep-after-iteration", exepruner.DefaultConfig.SleepAfterEachIteration, "sleep time after each iteration, default max int64") } func (exeConf *ExecutionConfig) ValidateFlags() error { @@ -113,5 +171,11 @@ func (exeConf *ExecutionConfig) ValidateFlags() error { } } } + if exeConf.rpcConf.MaxRequestMsgSize == 0 { + return errors.New("rpc-max-request-message-size must be greater than 0") + } + if exeConf.rpcConf.MaxResponseMsgSize == 0 { + return errors.New("rpc-max-response-message-size must be greater than 0") + } return nil } diff --git a/cmd/ghost/main.go b/cmd/ghost/main.go index d49f11d9aca..42dfd4225d8 100644 --- a/cmd/ghost/main.go +++ b/cmd/ghost/main.go @@ -1,14 +1,16 @@ package main import ( + "context" + "github.com/spf13/pflag" "github.com/onflow/flow-go/cmd" + "github.com/onflow/flow-go/engine/common/rpc" "github.com/onflow/flow-go/engine/ghost/engine" "github.com/onflow/flow-go/module" "github.com/onflow/flow-go/network" "github.com/onflow/flow-go/network/validator" - "github.com/onflow/flow-go/utils/grpcutils" ) func main() { @@ -19,7 +21,7 @@ func main() { nodeBuilder := cmd.FlowNode("ghost") nodeBuilder.ExtraFlags(func(flags *pflag.FlagSet) { flags.StringVarP(&rpcConf.ListenAddr, "rpc-addr", "r", "localhost:9000", "the address the GRPC server listens on") - flags.UintVar(&rpcConf.MaxMsgSize, "rpc-max-message-size", grpcutils.DefaultMaxMsgSize, "the maximum message size in bytes for messages sent or received over grpc") + flags.UintVar(&rpcConf.MaxMsgSize, "rpc-max-message-size", rpc.DefaultMaxResponseMsgSize, "the maximum message size in bytes for messages sent or received over grpc") }) if err := nodeBuilder.Initialize(); err != nil { @@ -37,7 +39,7 @@ func main() { return nil }). Component("RPC engine", func(node *cmd.NodeConfig) (module.ReadyDoneAware, error) { - rpcEng, err := engine.New(node.Network, node.Logger, node.Me, node.State, rpcConf) + rpcEng, err := engine.New(node.EngineRegistry, node.Logger, node.Me, node.State, rpcConf) return rpcEng, err }) @@ -45,5 +47,5 @@ func main() { if err != nil { nodeBuilder.Logger.Fatal().Err(err).Send() } - node.Run() + node.Run(context.Background()) } diff --git a/cmd/node.go b/cmd/node.go index f17b8181f5c..3bccb2fd345 100644 --- a/cmd/node.go +++ b/cmd/node.go @@ -22,25 +22,61 @@ type Node interface { // Run initiates all common components (logger, database, protocol state etc.) // then starts each component. It also sets up a channel to gracefully shut // down each component if a SIGINT is received. - Run() + // The context can also be used to signal the node to shutdown. + Run(ctx context.Context) } // FlowNodeImp is created by the FlowNodeBuilder with all components ready to be started. // The Run function starts all the components, and is blocked until either a termination // signal is received or a irrecoverable error is encountered. type FlowNodeImp struct { - component.Component + NodeImp *NodeConfig +} + +// NodeImp can be used to create a node instance from: +// - a logger: to be used during startup and shutdown +// - a component: that will be started with Run +// - a cleanup function: that will be called after the component has been stopped +// - a fatal error handler: to handle any error received from the component +type NodeImp struct { + component.Component logger zerolog.Logger postShutdown func() error fatalHandler func(error) } // NewNode returns a new node instance -func NewNode(component component.Component, cfg *NodeConfig, logger zerolog.Logger, cleanup func() error, handleFatal func(error)) Node { +func NewNode( + component component.Component, + cfg *NodeConfig, + logger zerolog.Logger, + cleanup func() error, + handleFatal func(error), +) Node { return &FlowNodeImp{ + NodeConfig: cfg, + NodeImp: NewBaseNode( + component, + logger.With(). + Str("node_role", cfg.BaseConfig.NodeRole). + Hex("spork_id", logging.ID(cfg.SporkID)). + Logger(), + cleanup, + handleFatal, + ), + } +} + +// NewBaseNode returns a new base node instance +func NewBaseNode( + component component.Component, + logger zerolog.Logger, + cleanup func() error, + handleFatal func(error), +) NodeImp { + return NodeImp{ Component: component, - NodeConfig: cfg, logger: logger, postShutdown: cleanup, fatalHandler: handleFatal, @@ -51,13 +87,10 @@ func NewNode(component component.Component, cfg *NodeConfig, logger zerolog.Logg // which point it gracefully shuts down. // Any unhandled irrecoverable errors thrown in child components will propagate up to here and // result in a fatal error. -func (node *FlowNodeImp) Run() { - // Cancelling this context notifies all child components that it's time to shutdown - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() +func (node *NodeImp) Run(ctx context.Context) { // Block until node is shutting down - err := node.run(ctx, cancel) + err := node.run(ctx) // Any error received is considered fatal. if err != nil { @@ -73,14 +106,18 @@ func (node *FlowNodeImp) Run() { node.logger.Error().Err(err).Msg("error encountered during cleanup") } - node.logger.Info().Msgf("%s node shutdown complete", node.BaseConfig.NodeRole) + node.logger.Info().Msg("node shutdown complete") } // run starts the node and blocks until a SIGINT/SIGTERM is received or an error is encountered. // It returns: // - nil if a termination signal is received, and all components have been gracefully stopped. -// - error if a irrecoverable error is received -func (node *FlowNodeImp) run(ctx context.Context, shutdown context.CancelFunc) error { +// - error if an irrecoverable error is received +func (node *NodeImp) run(ctx context.Context) error { + // Cancelling this context notifies all child components that it's time to shut down + ctx, shutdown := context.WithCancel(ctx) + defer shutdown() + // Components will pass unhandled irrecoverable errors to this channel via signalerCtx (or a // child context). Any errors received on this channel should halt the node. signalerCtx, errChan := irrecoverable.WithSignaler(ctx) @@ -97,8 +134,7 @@ func (node *FlowNodeImp) run(ctx context.Context, shutdown context.CancelFunc) e select { case <-node.Ready(): node.logger.Info(). - Hex("spork_id", logging.ID(node.SporkID)). - Msgf("%s node startup complete", node.BaseConfig.NodeRole) + Msg("node startup complete") case <-ctx.Done(): } }() @@ -118,7 +154,7 @@ func (node *FlowNodeImp) run(ctx context.Context, shutdown context.CancelFunc) e // 3: Shut down // Send shutdown signal to components - node.logger.Info().Msgf("%s node shutting down", node.BaseConfig.NodeRole) + node.logger.Info().Msg("node shutting down") shutdown() // Block here until all components have stopped or an irrecoverable error is received. diff --git a/cmd/node_builder.go b/cmd/node_builder.go index 7083bdbc611..2721552642c 100644 --- a/cmd/node_builder.go +++ b/cmd/node_builder.go @@ -2,18 +2,19 @@ package cmd import ( "context" - "os" - "path/filepath" "time" "github.com/dgraph-io/badger/v2" + "github.com/jordanschalm/lockctx" madns "github.com/multiformats/go-multiaddr-dns" + "github.com/onflow/crypto" "github.com/prometheus/client_golang/prometheus" "github.com/rs/zerolog" "github.com/spf13/pflag" "github.com/onflow/flow-go/admin/commands" - "github.com/onflow/flow-go/crypto" + "github.com/onflow/flow-go/config" + "github.com/onflow/flow-go/engine/common/rpc" "github.com/onflow/flow-go/fvm" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/module" @@ -25,22 +26,19 @@ import ( "github.com/onflow/flow-go/network" "github.com/onflow/flow-go/network/codec/cbor" "github.com/onflow/flow-go/network/p2p" - "github.com/onflow/flow-go/network/p2p/connection" - "github.com/onflow/flow-go/network/p2p/distributor" - "github.com/onflow/flow-go/network/p2p/dns" - "github.com/onflow/flow-go/network/p2p/middleware" - "github.com/onflow/flow-go/network/p2p/p2pbuilder" - "github.com/onflow/flow-go/network/p2p/unicast" "github.com/onflow/flow-go/state/protocol" "github.com/onflow/flow-go/state/protocol/events" + "github.com/onflow/flow-go/storage" bstorage "github.com/onflow/flow-go/storage/badger" - "github.com/onflow/flow-go/utils/grpcutils" ) const NotSet = "not set" type BuilderFunc func(nodeConfig *NodeConfig) error -type ReadyDoneFactory func(node *NodeConfig) (module.ReadyDoneAware, error) + +// ReadyDoneFactory is a function that returns a ReadyDoneAware component or an error if +// the factory cannot create the component +type ReadyDoneFactory[Input any] func(input Input) (module.ReadyDoneAware, error) // NodeBuilder declares the initialization methods needed to bootstrap up a Flow node type NodeBuilder interface { @@ -80,7 +78,7 @@ type NodeBuilder interface { // The ReadyDoneFactory may return either a `Component` or `ReadyDoneAware` instance. // In both cases, the object is started according to its interface when the node is run, // and the node will wait for the component to exit gracefully. - Component(name string, f ReadyDoneFactory) NodeBuilder + Component(name string, f ReadyDoneFactory[*NodeConfig]) NodeBuilder // DependableComponent adds a new component to the node that conforms to the ReadyDoneAware // interface. The builder will wait until all of the components in the dependencies list are ready @@ -93,7 +91,7 @@ type NodeBuilder interface { // IMPORTANT: Dependable components are started in parallel with no guaranteed run order, so all // dependencies must be initialized outside of the ReadyDoneFactory, and their `Ready()` method // MUST be idempotent. - DependableComponent(name string, f ReadyDoneFactory, dependencies *DependencyList) NodeBuilder + DependableComponent(name string, f ReadyDoneFactory[*NodeConfig], dependencies *DependencyList) NodeBuilder // RestartableComponent adds a new component to the node that conforms to the ReadyDoneAware // interface, and calls the provided error handler when an irrecoverable error is encountered. @@ -101,7 +99,7 @@ type NodeBuilder interface { // can/should be independently restarted when an irrecoverable error is encountered. // // Any irrecoverable errors thrown by the component will be passed to the provided error handler. - RestartableComponent(name string, f ReadyDoneFactory, errorHandler component.OnError) NodeBuilder + RestartableComponent(name string, f ReadyDoneFactory[*NodeConfig], errorHandler component.OnError) NodeBuilder // ShutdownFunc adds a callback function that is called after all components have exited. // All shutdown functions are called regardless of errors returned by previous callbacks. Any @@ -140,7 +138,6 @@ type NodeBuilder interface { // For a node running as a standalone process, the config fields will be populated from the command line params, // while for a node running as a library, the config fields are expected to be initialized by the caller. type BaseConfig struct { - NetworkConfig nodeIDHex string AdminAddr string AdminCert string @@ -149,12 +146,15 @@ type BaseConfig struct { AdminMaxMsgSize uint BindAddr string NodeRole string + ObserverMode bool DynamicStartupANAddress string DynamicStartupANPubkey string DynamicStartupEpochPhase string DynamicStartupEpoch string DynamicStartupSleepInterval time.Duration datadir string + pebbleCheckpointsDir string + protocolDB storage.DB secretsdir string secretsDBEnabled bool InsecureSecretsDB bool @@ -168,7 +168,6 @@ type BaseConfig struct { MetricsEnabled bool guaranteesCacheSize uint receiptsCacheSize uint - db *badger.DB HeroCacheMetricsEnable bool SyncCoreConfig chainsync.Config CodecFactory func() network.Codec @@ -176,45 +175,18 @@ type BaseConfig struct { // ComplianceConfig configures either the compliance engine (consensus nodes) // or the follower engine (all other node roles) ComplianceConfig compliance.Config -} -type NetworkConfig struct { - // NetworkConnectionPruning determines whether connections to nodes - // that are not part of protocol state should be trimmed - // TODO: solely a fallback mechanism, can be removed upon reliable behavior in production. - NetworkConnectionPruning bool - // GossipSubConfig core gossipsub configuration. - GossipSubConfig *p2pbuilder.GossipSubConfig - // PreferredUnicastProtocols list of unicast protocols in preferred order - PreferredUnicastProtocols []string - NetworkReceivedMessageCacheSize uint32 - - PeerUpdateInterval time.Duration - UnicastMessageTimeout time.Duration - DNSCacheTTL time.Duration - LibP2PResourceManagerConfig *p2pbuilder.ResourceManagerConfig - ConnectionManagerConfig *connection.ManagerConfig - // UnicastCreateStreamRetryDelay initial delay used in the exponential backoff for create stream retries - UnicastCreateStreamRetryDelay time.Duration - // size of the queue for notifications about new peers in the disallow list. - DisallowListNotificationCacheSize uint32 - // UnicastRateLimitersConfig configuration for all unicast rate limiters. - UnicastRateLimitersConfig *UnicastRateLimitersConfig -} + // FlowConfig Flow configuration. + FlowConfig config.FlowConfig -// UnicastRateLimitersConfig unicast rate limiter configuration for the message and bandwidth rate limiters. -type UnicastRateLimitersConfig struct { - // DryRun setting this to true will disable connection disconnects and gating when unicast rate limiters are configured - DryRun bool - // LockoutDuration the number of seconds a peer will be forced to wait before being allowed to successful reconnect to the node - // after being rate limited. - LockoutDuration time.Duration - // MessageRateLimit amount of unicast messages that can be sent by a peer per second. - MessageRateLimit int - // BandwidthRateLimit bandwidth size in bytes a peer is allowed to send via unicast streams per second. - BandwidthRateLimit int - // BandwidthBurstLimit bandwidth size in bytes a peer is allowed to send via unicast streams at once. - BandwidthBurstLimit int + // DhtSystemEnabled configures whether the DHT system is enabled on Access and Execution nodes. + DhtSystemEnabled bool + + // BitswapReprovideEnabled configures whether the Bitswap reprovide mechanism is enabled. + // This is only meaningful to Access and Execution nodes. + BitswapReprovideEnabled bool + + TransactionFeesDisabled bool } // NodeConfig contains all the derived parameters such the NodeID, private keys etc. and initialized instances of @@ -230,14 +202,15 @@ type NodeConfig struct { ConfigManager *updatable_configs.Manager MetricsRegisterer prometheus.Registerer Metrics Metrics - DB *badger.DB + ProtocolDB storage.DB SecretsDB *badger.DB Storage Storage + StorageLockMgr lockctx.Manager ProtocolEvents *events.Distributor State protocol.State Resolver madns.BasicResolver - Middleware network.Middleware - Network network.Network + EngineRegistry network.EngineRegistry + NetworkUnderlay network.Underlay ConduitFactory network.ConduitFactory PingService network.PingService MsgValidators []network.MessageValidator @@ -248,7 +221,7 @@ type NodeConfig struct { // list of dependencies for network peer manager startup PeerManagerDependencies *DependencyList // ReadyDoneAware implementation of the network middleware for DependableComponents - middlewareDependable *module.ProxiedReadyDoneAware + networkUnderlayDependable *module.ProxiedReadyDoneAware // ID providers IdentityProvider module.IdentityProvider @@ -265,60 +238,44 @@ type NodeConfig struct { // UnicastRateLimiterDistributor notifies consumers when a peer's unicast message is rate limited. UnicastRateLimiterDistributor p2p.UnicastRateLimiterDistributor - // NodeDisallowListDistributor notifies consumers of updates to disallow listing of nodes. - NodeDisallowListDistributor p2p.DisallowListNotificationDistributor } // StateExcerptAtBoot stores information about the root snapshot and latest finalized block for use in bootstrapping. type StateExcerptAtBoot struct { // properties of RootSnapshot for convenience - RootBlock *flow.Block - RootQC *flow.QuorumCertificate - RootResult *flow.ExecutionResult - RootSeal *flow.Seal - RootChainID flow.ChainID - SporkID flow.Identifier - // finalized block for use in bootstrapping - FinalizedHeader *flow.Header + // For node bootstrapped with a root snapshot for the first block of a spork, + // FinalizedRootBlock and SealedRootBlock are the same block (special case of self-sealing block) + // For node bootstrapped with a root snapshot for a block above the first block of a spork (dynamically bootstrapped), + // FinalizedRootBlock.Height > SealedRootBlock.Height + FinalizedRootBlock *flow.Block // The last finalized block when bootstrapped. + SealedRootBlock *flow.Block // The last sealed block when bootstrapped. + RootQC *flow.QuorumCertificate // QC for Finalized Root Block + RootResult *flow.ExecutionResult // Result for SealedRootBlock + RootSeal *flow.Seal // Seal for RootResult + RootChainID flow.ChainID + SporkID flow.Identifier + LastFinalizedHeader *flow.Header // last finalized header when the node boots up } func DefaultBaseConfig() *BaseConfig { - homedir, _ := os.UserHomeDir() - datadir := filepath.Join(homedir, ".flow", "database") + datadir := "/data/protocol" // NOTE: if the codec used in the network component is ever changed any code relying on // the message format specific to the codec must be updated. i.e: the AuthorizedSenderValidator. codecFactory := func() network.Codec { return cbor.NewCodec() } return &BaseConfig{ - NetworkConfig: NetworkConfig{ - UnicastCreateStreamRetryDelay: unicast.DefaultRetryDelay, - PeerUpdateInterval: connection.DefaultPeerUpdateInterval, - UnicastMessageTimeout: middleware.DefaultUnicastTimeout, - NetworkReceivedMessageCacheSize: p2p.DefaultReceiveCacheSize, - UnicastRateLimitersConfig: &UnicastRateLimitersConfig{ - DryRun: true, - LockoutDuration: 10, - MessageRateLimit: 0, - BandwidthRateLimit: 0, - BandwidthBurstLimit: middleware.LargeMsgMaxUnicastMsgSize, - }, - GossipSubConfig: p2pbuilder.DefaultGossipSubConfig(), - DNSCacheTTL: dns.DefaultTimeToLive, - LibP2PResourceManagerConfig: p2pbuilder.DefaultResourceManagerConfig(), - ConnectionManagerConfig: connection.DefaultConnManagerConfig(), - NetworkConnectionPruning: connection.PruningEnabled, - DisallowListNotificationCacheSize: distributor.DefaultDisallowListNotificationQueueCacheSize, - }, nodeIDHex: NotSet, AdminAddr: NotSet, AdminCert: NotSet, AdminKey: NotSet, AdminClientCAs: NotSet, - AdminMaxMsgSize: grpcutils.DefaultMaxMsgSize, + AdminMaxMsgSize: rpc.DefaultMaxResponseMsgSize, BindAddr: NotSet, + ObserverMode: false, BootstrapDir: "bootstrap", datadir: datadir, + protocolDB: nil, secretsdir: NotSet, secretsDBEnabled: true, level: "info", @@ -340,26 +297,28 @@ func DefaultBaseConfig() *BaseConfig { Duration: 10 * time.Second, }, - HeroCacheMetricsEnable: false, - SyncCoreConfig: chainsync.DefaultConfig(), - CodecFactory: codecFactory, - ComplianceConfig: compliance.DefaultConfig(), + HeroCacheMetricsEnable: false, + SyncCoreConfig: chainsync.DefaultConfig(), + CodecFactory: codecFactory, + ComplianceConfig: compliance.DefaultConfig(), + DhtSystemEnabled: true, + BitswapReprovideEnabled: true, } } // DependencyList is a slice of ReadyDoneAware implementations that are used by DependableComponent // to define the list of dependencies that must be ready before starting the component. type DependencyList struct { - components []module.ReadyDoneAware + Components []module.ReadyDoneAware } func NewDependencyList(components ...module.ReadyDoneAware) *DependencyList { return &DependencyList{ - components: components, + Components: components, } } // Add adds a new ReadyDoneAware implementation to the list of dependencies. func (d *DependencyList) Add(component module.ReadyDoneAware) { - d.components = append(d.components, component) + d.Components = append(d.Components, component) } diff --git a/cmd/node_test.go b/cmd/node_test.go index a42de1f28db..e8fb48248f6 100644 --- a/cmd/node_test.go +++ b/cmd/node_test.go @@ -1,6 +1,7 @@ package cmd import ( + "context" "errors" "os" "syscall" @@ -42,7 +43,7 @@ func TestRunShutsDownCleanly(t *testing.T) { finished := make(chan struct{}) go func() { - node.Run() + node.Run(context.Background()) close(finished) }() @@ -62,6 +63,44 @@ func TestRunShutsDownCleanly(t *testing.T) { }, testLogger.logs) }) + t.Run("Run shuts down gracefully on context cancel", func(t *testing.T) { + testLogger.Reset() + manager := component.NewComponentManagerBuilder(). + AddWorker(func(ctx irrecoverable.SignalerContext, ready component.ReadyFunc) { + testLogger.Log("worker starting up") + ready() + testLogger.Log("worker startup complete") + + <-ctx.Done() + testLogger.Log("worker shutting down") + testLogger.Log("worker shutdown complete") + }). + Build() + node := NewNode(manager, nodeConfig, logger, postShutdown, fatalHandler) + + ctx, cancel := context.WithCancel(context.Background()) + + finished := make(chan struct{}) + go func() { + node.Run(ctx) + close(finished) + }() + + <-node.Ready() + + cancel() + + <-finished + + assert.Equal(t, []string{ + "worker starting up", + "worker startup complete", + "worker shutting down", + "worker shutdown complete", + "running cleanup", + }, testLogger.logs) + }) + t.Run("Run encounters error during postShutdown", func(t *testing.T) { testLogger.Reset() manager := component.NewComponentManagerBuilder(). @@ -82,7 +121,7 @@ func TestRunShutsDownCleanly(t *testing.T) { finished := make(chan struct{}) go func() { - node.Run() + node.Run(context.Background()) close(finished) }() @@ -123,7 +162,7 @@ func TestRunShutsDownCleanly(t *testing.T) { finished := make(chan struct{}) go func() { - node.Run() + node.Run(context.Background()) close(finished) }() @@ -157,7 +196,7 @@ func TestRunShutsDownCleanly(t *testing.T) { finished := make(chan struct{}) go func() { - node.Run() + node.Run(context.Background()) close(finished) }() @@ -191,7 +230,7 @@ func TestRunShutsDownCleanly(t *testing.T) { finished := make(chan struct{}) go func() { - node.Run() + node.Run(context.Background()) close(finished) }() diff --git a/cmd/observer/main.go b/cmd/observer/main.go index 96ec27bc5cc..bb84036fa9f 100644 --- a/cmd/observer/main.go +++ b/cmd/observer/main.go @@ -1,6 +1,8 @@ package main import ( + "context" + nodebuilder "github.com/onflow/flow-go/cmd/observer/node_builder" ) @@ -22,5 +24,5 @@ func main() { if err != nil { anb.Logger.Fatal().Err(err).Send() } - node.Run() + node.Run(context.Background()) } diff --git a/cmd/observer/node_builder/observer_builder.go b/cmd/observer/node_builder/observer_builder.go index 81c09e0c7c7..90deddace30 100644 --- a/cmd/observer/node_builder/observer_builder.go +++ b/cmd/observer/node_builder/observer_builder.go @@ -6,21 +6,26 @@ import ( "encoding/json" "errors" "fmt" + "math" "os" + "path" "path/filepath" "strings" "time" - badger "github.com/ipfs/go-ds-badger2" - dht "github.com/libp2p/go-libp2p-kad-dht" - "github.com/libp2p/go-libp2p/core/host" + "github.com/ipfs/boxo/bitswap" + "github.com/ipfs/go-cid" + "github.com/ipfs/go-datastore" "github.com/libp2p/go-libp2p/core/peer" - "github.com/libp2p/go-libp2p/core/routing" - "github.com/onflow/go-bitswap" + "github.com/onflow/crypto" "github.com/rs/zerolog" "github.com/spf13/pflag" + "google.golang.org/grpc/credentials" + "github.com/onflow/flow-go/admin/commands" + stateSyncCommands "github.com/onflow/flow-go/admin/commands/state_synchronization" "github.com/onflow/flow-go/cmd" + "github.com/onflow/flow-go/cmd/build" "github.com/onflow/flow-go/consensus" "github.com/onflow/flow-go/consensus/hotstuff" "github.com/onflow/flow-go/consensus/hotstuff/committees" @@ -30,47 +35,72 @@ import ( hotstuffvalidator "github.com/onflow/flow-go/consensus/hotstuff/validator" "github.com/onflow/flow-go/consensus/hotstuff/verification" recovery "github.com/onflow/flow-go/consensus/recovery/protocol" - "github.com/onflow/flow-go/crypto" + "github.com/onflow/flow-go/engine" "github.com/onflow/flow-go/engine/access/apiproxy" + "github.com/onflow/flow-go/engine/access/index" + "github.com/onflow/flow-go/engine/access/rest" + restapiproxy "github.com/onflow/flow-go/engine/access/rest/apiproxy" + "github.com/onflow/flow-go/engine/access/rest/router" + "github.com/onflow/flow-go/engine/access/rest/websockets" "github.com/onflow/flow-go/engine/access/rpc" "github.com/onflow/flow-go/engine/access/rpc/backend" + "github.com/onflow/flow-go/engine/access/rpc/backend/events" + "github.com/onflow/flow-go/engine/access/rpc/backend/node_communicator" + "github.com/onflow/flow-go/engine/access/rpc/backend/query_mode" + rpcConnection "github.com/onflow/flow-go/engine/access/rpc/connection" + "github.com/onflow/flow-go/engine/access/state_stream" + statestreambackend "github.com/onflow/flow-go/engine/access/state_stream/backend" + "github.com/onflow/flow-go/engine/access/subscription" + subscriptiontracker "github.com/onflow/flow-go/engine/access/subscription/tracker" "github.com/onflow/flow-go/engine/common/follower" + commonrpc "github.com/onflow/flow-go/engine/common/rpc" + "github.com/onflow/flow-go/engine/common/stop" synceng "github.com/onflow/flow-go/engine/common/synchronization" - "github.com/onflow/flow-go/engine/protocol" + "github.com/onflow/flow-go/engine/common/version" + "github.com/onflow/flow-go/engine/execution/computation" + "github.com/onflow/flow-go/engine/execution/computation/query" + "github.com/onflow/flow-go/fvm/storage/derived" + "github.com/onflow/flow-go/ledger" + "github.com/onflow/flow-go/ledger/complete/wal" + "github.com/onflow/flow-go/model/bootstrap" "github.com/onflow/flow-go/model/encodable" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/model/flow/filter" "github.com/onflow/flow-go/module" + "github.com/onflow/flow-go/module/blobs" "github.com/onflow/flow-go/module/chainsync" - "github.com/onflow/flow-go/module/compliance" + "github.com/onflow/flow-go/module/execution" "github.com/onflow/flow-go/module/executiondatasync/execution_data" + execdatacache "github.com/onflow/flow-go/module/executiondatasync/execution_data/cache" + "github.com/onflow/flow-go/module/executiondatasync/pruner" + edstorage "github.com/onflow/flow-go/module/executiondatasync/storage" + "github.com/onflow/flow-go/module/executiondatasync/tracker" finalizer "github.com/onflow/flow-go/module/finalizer/consensus" + "github.com/onflow/flow-go/module/grpcserver" "github.com/onflow/flow-go/module/id" "github.com/onflow/flow-go/module/local" + "github.com/onflow/flow-go/module/mempool/herocache" + "github.com/onflow/flow-go/module/mempool/stdmap" "github.com/onflow/flow-go/module/metrics" "github.com/onflow/flow-go/module/state_synchronization" + "github.com/onflow/flow-go/module/state_synchronization/indexer" edrequester "github.com/onflow/flow-go/module/state_synchronization/requester" consensus_follower "github.com/onflow/flow-go/module/upstream" "github.com/onflow/flow-go/network" + alspmgr "github.com/onflow/flow-go/network/alsp/manager" netcache "github.com/onflow/flow-go/network/cache" "github.com/onflow/flow-go/network/channels" - cborcodec "github.com/onflow/flow-go/network/codec/cbor" "github.com/onflow/flow-go/network/converter" "github.com/onflow/flow-go/network/p2p" "github.com/onflow/flow-go/network/p2p/blob" "github.com/onflow/flow-go/network/p2p/cache" - p2pdht "github.com/onflow/flow-go/network/p2p/dht" + "github.com/onflow/flow-go/network/p2p/conduit" "github.com/onflow/flow-go/network/p2p/keyutils" - "github.com/onflow/flow-go/network/p2p/middleware" - "github.com/onflow/flow-go/network/p2p/p2pbuilder" - p2pconfig "github.com/onflow/flow-go/network/p2p/p2pbuilder/config" - "github.com/onflow/flow-go/network/p2p/p2pbuilder/inspector" - "github.com/onflow/flow-go/network/p2p/subscription" - "github.com/onflow/flow-go/network/p2p/tracer" + p2plogging "github.com/onflow/flow-go/network/p2p/logging" "github.com/onflow/flow-go/network/p2p/translator" "github.com/onflow/flow-go/network/p2p/unicast/protocols" - "github.com/onflow/flow-go/network/p2p/utils" "github.com/onflow/flow-go/network/slashing" + "github.com/onflow/flow-go/network/underlay" "github.com/onflow/flow-go/network/validator" stateprotocol "github.com/onflow/flow-go/state/protocol" badgerState "github.com/onflow/flow-go/state/protocol/badger" @@ -78,6 +108,8 @@ import ( "github.com/onflow/flow-go/state/protocol/events/gadgets" "github.com/onflow/flow-go/storage" bstorage "github.com/onflow/flow-go/storage/badger" + pstorage "github.com/onflow/flow-go/storage/pebble" + "github.com/onflow/flow-go/storage/store" "github.com/onflow/flow-go/utils/grpcutils" "github.com/onflow/flow-go/utils/io" ) @@ -102,22 +134,41 @@ import ( // For a node running as a standalone process, the config fields will be populated from the command line params, // while for a node running as a library, the config fields are expected to be initialized by the caller. type ObserverServiceConfig struct { - bootstrapNodeAddresses []string - bootstrapNodePublicKeys []string - observerNetworkingKeyPath string - bootstrapIdentities flow.IdentityList // the identity list of bootstrap peers the node uses to discover other nodes - apiRatelimits map[string]int - apiBurstlimits map[string]int - rpcConf rpc.Config - rpcMetricsEnabled bool - executionDataSyncEnabled bool - executionDataDir string - executionDataStartHeight uint64 - executionDataConfig edrequester.ExecutionDataConfig - apiTimeout time.Duration - upstreamNodeAddresses []string - upstreamNodePublicKeys []string - upstreamIdentities flow.IdentityList // the identity list of upstream peers the node uses to forward API requests to + observerNetworkingKeyPath string + bootstrapIdentities flow.IdentitySkeletonList // the identity list of bootstrap peers the node uses to discover other nodes + apiRatelimits map[string]int + apiBurstlimits map[string]int + rpcConf rpc.Config + rpcMetricsEnabled bool + registersDBPath string + checkpointFile string + stateStreamConf statestreambackend.Config + stateStreamFilterConf map[string]int + upstreamNodeAddresses []string + upstreamNodePublicKeys []string + upstreamIdentities flow.IdentitySkeletonList // the identity list of upstream peers the node uses to forward API requests to + scriptExecutorConfig query.QueryConfig + logTxTimeToFinalized bool + logTxTimeToExecuted bool + logTxTimeToFinalizedExecuted bool + logTxTimeToSealed bool + executionDataSyncEnabled bool + executionDataIndexingEnabled bool + executionDataPrunerHeightRangeTarget uint64 + executionDataPrunerThreshold uint64 + executionDataPruningInterval time.Duration + localServiceAPIEnabled bool + versionControlEnabled bool + stopControlEnabled bool + executionDataDir string + executionDataStartHeight uint64 + executionDataConfig edrequester.ExecutionDataConfig + scriptExecMinBlock uint64 + scriptExecMaxBlock uint64 + registerCacheType string + registerCacheSize uint + programCacheSize uint + registerDBPruneThreshold uint64 } // DefaultObserverServiceConfig defines all the default values for the ObserverServiceConfig @@ -125,39 +176,85 @@ func DefaultObserverServiceConfig() *ObserverServiceConfig { homedir, _ := os.UserHomeDir() return &ObserverServiceConfig{ rpcConf: rpc.Config{ - UnsecureGRPCListenAddr: "0.0.0.0:9000", - SecureGRPCListenAddr: "0.0.0.0:9001", - HTTPListenAddr: "0.0.0.0:8000", - RESTListenAddr: "", - CollectionAddr: "", - HistoricalAccessAddrs: "", - CollectionClientTimeout: 3 * time.Second, - ExecutionClientTimeout: 3 * time.Second, - MaxHeightRange: backend.DefaultMaxHeightRange, - PreferredExecutionNodeIDs: nil, - FixedExecutionNodeIDs: nil, - ArchiveAddressList: nil, - MaxMsgSize: grpcutils.DefaultMaxMsgSize, + UnsecureGRPCListenAddr: "0.0.0.0:9000", + SecureGRPCListenAddr: "0.0.0.0:9001", + HTTPListenAddr: "0.0.0.0:8000", + CollectionAddr: "", + HistoricalAccessAddrs: "", + BackendConfig: backend.Config{ + AccessConfig: rpcConnection.DefaultAccessConfig(), + CollectionConfig: rpcConnection.DefaultCollectionConfig(), // unused on observers + ExecutionConfig: rpcConnection.DefaultExecutionConfig(), // unused on observers + ConnectionPoolSize: backend.DefaultConnectionPoolSize, + MaxHeightRange: events.DefaultMaxHeightRange, + PreferredExecutionNodeIDs: nil, + FixedExecutionNodeIDs: nil, + ScriptExecutionMode: query_mode.IndexQueryModeExecutionNodesOnly.String(), // default to ENs only for now + EventQueryMode: query_mode.IndexQueryModeExecutionNodesOnly.String(), // default to ENs only for now + TxResultQueryMode: query_mode.IndexQueryModeExecutionNodesOnly.String(), // default to ENs only for now + }, + RestConfig: rest.Config{ + ListenAddress: "", + WriteTimeout: rest.DefaultWriteTimeout, + ReadTimeout: rest.DefaultReadTimeout, + IdleTimeout: rest.DefaultIdleTimeout, + MaxRequestSize: commonrpc.DefaultAccessMaxRequestSize, + MaxResponseSize: commonrpc.DefaultAccessMaxResponseSize, + }, + DeprecatedMaxMsgSize: 0, + CompressorName: grpcutils.NoCompressor, + WebSocketConfig: websockets.NewDefaultWebsocketConfig(), + EnableWebSocketsStreamAPI: true, }, - rpcMetricsEnabled: false, - apiRatelimits: nil, - apiBurstlimits: nil, - bootstrapNodeAddresses: []string{}, - bootstrapNodePublicKeys: []string{}, - observerNetworkingKeyPath: cmd.NotSet, - executionDataSyncEnabled: false, - executionDataDir: filepath.Join(homedir, ".flow", "execution_data"), - executionDataStartHeight: 0, + stateStreamConf: statestreambackend.Config{ + MaxExecutionDataMsgSize: commonrpc.DefaultAccessMaxResponseSize, + ExecutionDataCacheSize: subscription.DefaultCacheSize, + ClientSendTimeout: subscription.DefaultSendTimeout, + ClientSendBufferSize: subscription.DefaultSendBufferSize, + MaxGlobalStreams: subscription.DefaultMaxGlobalStreams, + EventFilterConfig: state_stream.DefaultEventFilterConfig, + ResponseLimit: subscription.DefaultResponseLimit, + HeartbeatInterval: subscription.DefaultHeartbeatInterval, + RegisterIDsRequestLimit: state_stream.DefaultRegisterIDsRequestLimit, + }, + stateStreamFilterConf: nil, + rpcMetricsEnabled: false, + apiRatelimits: nil, + apiBurstlimits: nil, + observerNetworkingKeyPath: cmd.NotSet, + upstreamNodeAddresses: []string{}, + upstreamNodePublicKeys: []string{}, + registersDBPath: filepath.Join(homedir, ".flow", "execution_state"), + checkpointFile: cmd.NotSet, + scriptExecutorConfig: query.NewDefaultConfig(), + logTxTimeToFinalized: false, + logTxTimeToExecuted: false, + logTxTimeToFinalizedExecuted: false, + logTxTimeToSealed: false, + executionDataSyncEnabled: false, + executionDataIndexingEnabled: false, + executionDataPrunerHeightRangeTarget: 0, + executionDataPrunerThreshold: pruner.DefaultThreshold, + executionDataPruningInterval: pruner.DefaultPruningInterval, + localServiceAPIEnabled: false, + versionControlEnabled: true, + stopControlEnabled: false, + executionDataDir: filepath.Join(homedir, ".flow", "execution_data"), + executionDataStartHeight: 0, executionDataConfig: edrequester.ExecutionDataConfig{ InitialBlockHeight: 0, MaxSearchAhead: edrequester.DefaultMaxSearchAhead, FetchTimeout: edrequester.DefaultFetchTimeout, + MaxFetchTimeout: edrequester.DefaultMaxFetchTimeout, RetryDelay: edrequester.DefaultRetryDelay, MaxRetryDelay: edrequester.DefaultMaxRetryDelay, }, - apiTimeout: 3 * time.Second, - upstreamNodeAddresses: []string{}, - upstreamNodePublicKeys: []string{}, + scriptExecMinBlock: 0, + scriptExecMaxBlock: math.MaxUint64, + registerCacheType: pstorage.CacheTypeTwoQueue.String(), + registerCacheSize: 0, + programCacheSize: 0, + registerDBPruneThreshold: pruner.DefaultThreshold, } } @@ -168,27 +265,63 @@ type ObserverServiceBuilder struct { *ObserverServiceConfig // components - LibP2PNode p2p.LibP2PNode - FollowerState stateprotocol.FollowerState - SyncCore *chainsync.Core - RpcEng *rpc.Engine - FollowerDistributor *pubsub.FollowerDistributor - Committee hotstuff.DynamicCommittee - Finalized *flow.Header - Pending []*flow.Header - FollowerCore module.HotStuffFollower - ExecutionDataDownloader execution_data.Downloader - ExecutionDataRequester state_synchronization.ExecutionDataRequester // for the observer, the sync engine participants provider is the libp2p peer store which is not + + LibP2PNode p2p.LibP2PNode + FollowerState stateprotocol.FollowerState + SyncCore *chainsync.Core + RpcEng *rpc.Engine + TransactionTimings *stdmap.TransactionTimings + FollowerDistributor *pubsub.FollowerDistributor + Committee hotstuff.DynamicCommittee + Finalized *flow.Header + Pending []*flow.ProposalHeader + FollowerCore module.HotStuffFollower + ExecutionIndexer *indexer.Indexer + ExecutionIndexerCore *indexer.IndexerCore + TxResultsIndex *index.TransactionResultsIndex + IndexerDependencies *cmd.DependencyList + VersionControl *version.VersionControl + StopControl *stop.StopControl + + ExecutionDataDownloader execution_data.Downloader + ExecutionDataRequester state_synchronization.ExecutionDataRequester + ExecutionDataStore execution_data.ExecutionDataStore + ExecutionDataBlobstore blobs.Blobstore + ExecutionDataPruner *pruner.Pruner + ExecutionDatastoreManager edstorage.DatastoreManager + ExecutionDataTracker tracker.Storage + + RegistersAsyncStore *execution.RegistersAsyncStore + Reporter *index.Reporter + EventsIndex *index.EventsIndex + ScriptExecutor *backend.ScriptExecutor + + // storage + events storage.Events + lightTransactionResults storage.LightTransactionResults + // available until after the network has started. Hence, a factory function that needs to be called just before // creating the sync engine SyncEngineParticipantsProviderFactory func() module.IdentifierProvider // engines - FollowerEng *follower.ComplianceEngine - SyncEng *synceng.Engine + FollowerEng *follower.ComplianceEngine + SyncEng *synceng.Engine + StateStreamEng *statestreambackend.Engine // Public network peerID peer.ID + + TransactionMetrics *metrics.TransactionCollector + RestMetrics *metrics.RestCollector + AccessMetrics module.AccessMetrics + + // grpc servers + secureGrpcServer *grpcserver.GrpcServer + unsecureGrpcServer *grpcserver.GrpcServer + stateStreamGrpcServer *grpcserver.GrpcServer + + stateStreamBackend *statestreambackend.StateStreamBackend } // deriveBootstrapPeerIdentities derives the Flow Identity of the bootstrap peers from the parameters. @@ -200,7 +333,7 @@ func (builder *ObserverServiceBuilder) deriveBootstrapPeerIdentities() error { return nil } - ids, err := BootstrapIdentities(builder.bootstrapNodeAddresses, builder.bootstrapNodePublicKeys) + ids, err := builder.DeriveBootstrapPeerIdentities() if err != nil { return fmt.Errorf("failed to derive bootstrap peer identities: %w", err) } @@ -228,7 +361,7 @@ func (builder *ObserverServiceBuilder) deriveUpstreamIdentities() error { return fmt.Errorf("number of addresses and keys provided for the boostrap nodes don't match") } - ids := make([]*flow.Identity, len(addresses)) + ids := make(flow.IdentitySkeletonList, len(addresses)) for i, address := range addresses { key := keys[i] @@ -240,7 +373,7 @@ func (builder *ObserverServiceBuilder) deriveUpstreamIdentities() error { } // create the identity of the peer by setting only the relevant fields - ids[i] = &flow.Identity{ + ids[i] = &flow.IdentitySkeleton{ NodeID: flow.ZeroID, // the NodeID is the hash of the staking key and for the public network it does not apply Address: address, Role: flow.RoleAccess, // the upstream node has to be an access node @@ -327,14 +460,15 @@ func (builder *ObserverServiceBuilder) buildFollowerCore() *ObserverServiceBuild builder.Component("follower core", func(node *cmd.NodeConfig) (module.ReadyDoneAware, error) { // create a finalizer that will handle updating the protocol // state when the follower detects newly finalized blocks - final := finalizer.NewFinalizer(node.DB, node.Storage.Headers, builder.FollowerState, node.Tracer) + final := finalizer.NewFinalizer(node.ProtocolDB.Reader(), node.Storage.Headers, builder.FollowerState, node.Tracer) followerCore, err := consensus.NewFollower( node.Logger, + node.Metrics.Mempool, node.Storage.Headers, final, builder.FollowerDistributor, - node.RootBlock.Header, + node.FinalizedRootBlock.ToHeader(), node.RootQC, builder.Finalized, builder.Pending, @@ -377,18 +511,20 @@ func (builder *ObserverServiceBuilder) buildFollowerEngine() *ObserverServiceBui builder.FollowerEng, err = follower.NewComplianceLayer( node.Logger, - node.Network, + node.EngineRegistry, node.Me, node.Metrics.Engine, node.Storage.Headers, builder.Finalized, core, - follower.WithComplianceConfigOpt(compliance.WithSkipNewProposalsThreshold(builder.ComplianceConfig.SkipNewProposalsThreshold)), + builder.ComplianceConfig, follower.WithChannel(channels.PublicReceiveBlocks), ) if err != nil { return nil, fmt.Errorf("could not create follower engine: %w", err) } + builder.FollowerDistributor. + AddOnBlockFinalizedConsumer(builder.FollowerEng.OnFinalizedBlock) return builder.FollowerEng, nil }) @@ -398,21 +534,28 @@ func (builder *ObserverServiceBuilder) buildFollowerEngine() *ObserverServiceBui func (builder *ObserverServiceBuilder) buildSyncEngine() *ObserverServiceBuilder { builder.Component("sync engine", func(node *cmd.NodeConfig) (module.ReadyDoneAware, error) { + spamConfig, err := synceng.NewSpamDetectionConfig() + if err != nil { + return nil, fmt.Errorf("could not initialize spam detection config: %w", err) + } + sync, err := synceng.New( node.Logger, node.Metrics.Engine, - node.Network, + node.EngineRegistry, node.Me, node.State, node.Storage.Blocks, builder.FollowerEng, builder.SyncCore, builder.SyncEngineParticipantsProviderFactory(), + spamConfig, ) if err != nil { return nil, fmt.Errorf("could not create synchronization engine: %w", err) } builder.SyncEng = sync + builder.FollowerDistributor.AddFinalizationConsumer(sync) return builder.SyncEng, nil }) @@ -433,112 +576,6 @@ func (builder *ObserverServiceBuilder) BuildConsensusFollower() cmd.NodeBuilder return builder } -func (builder *ObserverServiceBuilder) BuildExecutionDataRequester() *ObserverServiceBuilder { - var ds *badger.Datastore - var bs network.BlobService - var processedBlockHeight storage.ConsumerProgress - var processedNotifications storage.ConsumerProgress - - builder. - Module("execution data datastore and blobstore", func(node *cmd.NodeConfig) error { - err := os.MkdirAll(builder.executionDataDir, 0700) - if err != nil { - return err - } - - ds, err = badger.NewDatastore(builder.executionDataDir, &badger.DefaultOptions) - if err != nil { - return err - } - - builder.ShutdownFunc(func() error { - if err := ds.Close(); err != nil { - return fmt.Errorf("could not close execution data datastore: %w", err) - } - return nil - }) - - return nil - }). - Module("processed block height consumer progress", func(node *cmd.NodeConfig) error { - // uses the datastore's DB - processedBlockHeight = bstorage.NewConsumerProgress(ds.DB, module.ConsumeProgressExecutionDataRequesterBlockHeight) - return nil - }). - Module("processed notifications consumer progress", func(node *cmd.NodeConfig) error { - // uses the datastore's DB - processedNotifications = bstorage.NewConsumerProgress(ds.DB, module.ConsumeProgressExecutionDataRequesterNotification) - return nil - }). - Component("execution data service", func(node *cmd.NodeConfig) (module.ReadyDoneAware, error) { - var err error - bs, err = node.Network.RegisterBlobService(channels.ExecutionDataService, ds, - blob.WithBitswapOptions( - bitswap.WithTracer( - blob.NewTracer(node.Logger.With().Str("blob_service", channels.ExecutionDataService.String()).Logger()), - ), - ), - ) - if err != nil { - return nil, fmt.Errorf("could not register blob service: %w", err) - } - - builder.ExecutionDataDownloader = execution_data.NewDownloader(bs) - - return builder.ExecutionDataDownloader, nil - }). - Component("execution data requester", func(node *cmd.NodeConfig) (module.ReadyDoneAware, error) { - // Validation of the start block height needs to be done after loading state - if builder.executionDataStartHeight > 0 { - if builder.executionDataStartHeight <= builder.RootBlock.Header.Height { - return nil, fmt.Errorf( - "execution data start block height (%d) must be greater than the root block height (%d)", - builder.executionDataStartHeight, builder.RootBlock.Header.Height) - } - - latestSeal, err := builder.State.Sealed().Head() - if err != nil { - return nil, fmt.Errorf("failed to get latest sealed height") - } - - // Note: since the root block of a spork is also sealed in the root protocol state, the - // latest sealed height is always equal to the root block height. That means that at the - // very beginning of a spork, this check will always fail. Operators should not specify - // an InitialBlockHeight when starting from the beginning of a spork. - if builder.executionDataStartHeight > latestSeal.Height { - return nil, fmt.Errorf( - "execution data start block height (%d) must be less than or equal to the latest sealed block height (%d)", - builder.executionDataStartHeight, latestSeal.Height) - } - - // executionDataStartHeight is provided as the first block to sync, but the - // requester expects the initial last processed height, which is the first height - 1 - builder.executionDataConfig.InitialBlockHeight = builder.executionDataStartHeight - 1 - } else { - builder.executionDataConfig.InitialBlockHeight = builder.RootBlock.Header.Height - } - - builder.ExecutionDataRequester = edrequester.New( - builder.Logger, - metrics.NewExecutionDataRequesterCollector(), - builder.ExecutionDataDownloader, - processedBlockHeight, - processedNotifications, - builder.State, - builder.Storage.Headers, - builder.Storage.Results, - builder.Storage.Seals, - builder.executionDataConfig, - ) - - builder.FollowerDistributor.AddOnBlockFinalizedConsumer(builder.ExecutionDataRequester.OnBlockFinalized) - - return builder.ExecutionDataRequester, nil - }) - - return builder -} - type Option func(*ObserverServiceConfig) func NewFlowObserverServiceBuilder(opts ...Option) *ObserverServiceBuilder { @@ -550,6 +587,7 @@ func NewFlowObserverServiceBuilder(opts ...Option) *ObserverServiceBuilder { ObserverServiceConfig: config, FlowNodeBuilder: cmd.FlowNode("observer"), FollowerDistributor: pubsub.NewFollowerDistributor(), + IndexerDependencies: cmd.NewDependencyList(), } anb.FollowerDistributor.AddProposalViolationConsumer(notifications.NewSlashingViolationsConsumer(anb.Logger)) // the observer gets a version of the root snapshot file that does not contain any node addresses @@ -559,7 +597,6 @@ func NewFlowObserverServiceBuilder(opts ...Option) *ObserverServiceBuilder { } func (builder *ObserverServiceBuilder) ParseFlags() error { - builder.BaseFlags() builder.extraFlags() @@ -571,35 +608,265 @@ func (builder *ObserverServiceBuilder) extraFlags() { builder.ExtraFlags(func(flags *pflag.FlagSet) { defaultConfig := DefaultObserverServiceConfig() - flags.StringVarP(&builder.rpcConf.UnsecureGRPCListenAddr, "rpc-addr", "r", defaultConfig.rpcConf.UnsecureGRPCListenAddr, "the address the unsecured gRPC server listens on") - flags.StringVar(&builder.rpcConf.SecureGRPCListenAddr, "secure-rpc-addr", defaultConfig.rpcConf.SecureGRPCListenAddr, "the address the secure gRPC server listens on") + flags.StringVarP(&builder.rpcConf.UnsecureGRPCListenAddr, + "rpc-addr", + "r", + defaultConfig.rpcConf.UnsecureGRPCListenAddr, + "the address the unsecured gRPC server listens on") + flags.StringVar(&builder.rpcConf.SecureGRPCListenAddr, + "secure-rpc-addr", + defaultConfig.rpcConf.SecureGRPCListenAddr, + "the address the secure gRPC server listens on") flags.StringVarP(&builder.rpcConf.HTTPListenAddr, "http-addr", "h", defaultConfig.rpcConf.HTTPListenAddr, "the address the http proxy server listens on") - flags.StringVar(&builder.rpcConf.RESTListenAddr, "rest-addr", defaultConfig.rpcConf.RESTListenAddr, "the address the REST server listens on (if empty the REST server will not be started)") - flags.UintVar(&builder.rpcConf.MaxMsgSize, "rpc-max-message-size", defaultConfig.rpcConf.MaxMsgSize, "the maximum message size in bytes for messages sent or received over grpc") - flags.UintVar(&builder.rpcConf.MaxHeightRange, "rpc-max-height-range", defaultConfig.rpcConf.MaxHeightRange, "maximum size for height range requests") - flags.StringToIntVar(&builder.apiRatelimits, "api-rate-limits", defaultConfig.apiRatelimits, "per second rate limits for Access API methods e.g. Ping=300,GetTransaction=500 etc.") - flags.StringToIntVar(&builder.apiBurstlimits, "api-burst-limits", defaultConfig.apiBurstlimits, "burst limits for Access API methods e.g. Ping=100,GetTransaction=100 etc.") - flags.StringVar(&builder.observerNetworkingKeyPath, "observer-networking-key-path", defaultConfig.observerNetworkingKeyPath, "path to the networking key for observer") - flags.StringSliceVar(&builder.bootstrapNodeAddresses, "bootstrap-node-addresses", defaultConfig.bootstrapNodeAddresses, "the network addresses of the bootstrap access node if this is an observer e.g. access-001.mainnet.flow.org:9653,access-002.mainnet.flow.org:9653") - flags.StringSliceVar(&builder.bootstrapNodePublicKeys, "bootstrap-node-public-keys", defaultConfig.bootstrapNodePublicKeys, "the networking public key of the bootstrap access node if this is an observer (in the same order as the bootstrap node addresses) e.g. \"d57a5e9c5.....\",\"44ded42d....\"") - flags.DurationVar(&builder.apiTimeout, "upstream-api-timeout", defaultConfig.apiTimeout, "tcp timeout for Flow API gRPC sockets to upstrem nodes") - flags.StringSliceVar(&builder.upstreamNodeAddresses, "upstream-node-addresses", defaultConfig.upstreamNodeAddresses, "the gRPC network addresses of the upstream access node. e.g. access-001.mainnet.flow.org:9000,access-002.mainnet.flow.org:9000") - flags.StringSliceVar(&builder.upstreamNodePublicKeys, "upstream-node-public-keys", defaultConfig.upstreamNodePublicKeys, "the networking public key of the upstream access node (in the same order as the upstream node addresses) e.g. \"d57a5e9c5.....\",\"44ded42d....\"") + flags.StringVar(&builder.rpcConf.RestConfig.ListenAddress, + "rest-addr", + defaultConfig.rpcConf.RestConfig.ListenAddress, + "the address the REST server listens on (if empty the REST server will not be started)") + flags.DurationVar(&builder.rpcConf.RestConfig.WriteTimeout, + "rest-write-timeout", + defaultConfig.rpcConf.RestConfig.WriteTimeout, + "timeout to use when writing REST response") + flags.DurationVar(&builder.rpcConf.RestConfig.ReadTimeout, + "rest-read-timeout", + defaultConfig.rpcConf.RestConfig.ReadTimeout, + "timeout to use when reading REST request headers") + flags.DurationVar(&builder.rpcConf.RestConfig.IdleTimeout, "rest-idle-timeout", defaultConfig.rpcConf.RestConfig.IdleTimeout, "idle timeout for REST connections") + flags.Int64Var(&builder.rpcConf.RestConfig.MaxRequestSize, + "rest-max-request-size", + defaultConfig.rpcConf.RestConfig.MaxRequestSize, + "the maximum request size in bytes for payload sent over REST server") + flags.Int64Var(&builder.rpcConf.RestConfig.MaxResponseSize, + "rest-max-response-size", + defaultConfig.rpcConf.RestConfig.MaxResponseSize, + "the maximum response size in bytes for payload sent from REST server") + flags.UintVar(&builder.rpcConf.DeprecatedMaxMsgSize, + "rpc-max-message-size", + defaultConfig.rpcConf.DeprecatedMaxMsgSize, + "[deprecated] the maximum message size in bytes for messages sent or received over grpc") + flags.UintVar(&builder.rpcConf.BackendConfig.AccessConfig.MaxRequestMsgSize, + "rpc-max-request-message-size", + defaultConfig.rpcConf.BackendConfig.AccessConfig.MaxRequestMsgSize, + "the maximum request message size in bytes for request messages received over grpc by the server") + flags.UintVar(&builder.rpcConf.BackendConfig.AccessConfig.MaxResponseMsgSize, + "rpc-max-response-message-size", + defaultConfig.rpcConf.BackendConfig.AccessConfig.MaxResponseMsgSize, + "the maximum message size in bytes for response messages sent over grpc by the server") + flags.UintVar(&builder.rpcConf.BackendConfig.ConnectionPoolSize, + "connection-pool-size", + defaultConfig.rpcConf.BackendConfig.ConnectionPoolSize, + "maximum number of connections allowed in the connection pool, size of 0 disables the connection pooling, and anything less than the default size will be overridden to use the default size") + flags.UintVar(&builder.rpcConf.BackendConfig.MaxHeightRange, + "rpc-max-height-range", + defaultConfig.rpcConf.BackendConfig.MaxHeightRange, + "maximum size for height range requests") + flags.StringToIntVar(&builder.apiRatelimits, + "api-rate-limits", + defaultConfig.apiRatelimits, + "per second rate limits for Access API methods e.g. Ping=300,GetTransaction=500 etc.") + flags.StringToIntVar(&builder.apiBurstlimits, + "api-burst-limits", + defaultConfig.apiBurstlimits, + "burst limits for Access API methods e.g. Ping=100,GetTransaction=100 etc.") + flags.StringVar(&builder.observerNetworkingKeyPath, + "observer-networking-key-path", + defaultConfig.observerNetworkingKeyPath, + "path to the networking key for observer") + flags.DurationVar(&builder.rpcConf.BackendConfig.AccessConfig.Timeout, + "upstream-api-timeout", + defaultConfig.rpcConf.BackendConfig.AccessConfig.Timeout, + "tcp timeout for Flow API gRPC sockets to upstrem nodes") + flags.StringSliceVar(&builder.upstreamNodeAddresses, + "upstream-node-addresses", + defaultConfig.upstreamNodeAddresses, + "the gRPC network addresses of the upstream access node. e.g. access-001.mainnet.flow.org:9000,access-002.mainnet.flow.org:9000") + flags.StringSliceVar(&builder.upstreamNodePublicKeys, + "upstream-node-public-keys", + defaultConfig.upstreamNodePublicKeys, + "the networking public key of the upstream access node (in the same order as the upstream node addresses) e.g. \"d57a5e9c5.....\",\"44ded42d....\"") + + flags.BoolVar(&builder.logTxTimeToFinalized, "log-tx-time-to-finalized", defaultConfig.logTxTimeToFinalized, "log transaction time to finalized") + flags.BoolVar(&builder.logTxTimeToExecuted, "log-tx-time-to-executed", defaultConfig.logTxTimeToExecuted, "log transaction time to executed") + flags.BoolVar(&builder.logTxTimeToFinalizedExecuted, + "log-tx-time-to-finalized-executed", + defaultConfig.logTxTimeToFinalizedExecuted, + "log transaction time to finalized and executed") + flags.BoolVar(&builder.logTxTimeToSealed, + "log-tx-time-to-sealed", + defaultConfig.logTxTimeToSealed, + "log transaction time to sealed") flags.BoolVar(&builder.rpcMetricsEnabled, "rpc-metrics-enabled", defaultConfig.rpcMetricsEnabled, "whether to enable the rpc metrics") + flags.BoolVar(&builder.executionDataIndexingEnabled, + "execution-data-indexing-enabled", + defaultConfig.executionDataIndexingEnabled, + "whether to enable the execution data indexing") + flags.BoolVar(&builder.versionControlEnabled, + "version-control-enabled", + defaultConfig.versionControlEnabled, + "whether to enable the version control feature. Default value is true") + flags.BoolVar(&builder.stopControlEnabled, + "stop-control-enabled", + defaultConfig.stopControlEnabled, + "whether to enable the stop control feature. Default value is false") + flags.BoolVar(&builder.localServiceAPIEnabled, "local-service-api-enabled", defaultConfig.localServiceAPIEnabled, "whether to use local indexed data for api queries") + flags.StringVar(&builder.registersDBPath, "execution-state-dir", defaultConfig.registersDBPath, "directory to use for execution-state database") + flags.StringVar(&builder.checkpointFile, "execution-state-checkpoint", defaultConfig.checkpointFile, "execution-state checkpoint file") + + var builderExecutionDataDBMode string + flags.StringVar(&builderExecutionDataDBMode, "execution-data-db", "pebble", "[deprecated] the DB type for execution datastore.") + + // Execution data pruner + flags.Uint64Var(&builder.executionDataPrunerHeightRangeTarget, + "execution-data-height-range-target", + defaultConfig.executionDataPrunerHeightRangeTarget, + "number of blocks of Execution Data to keep on disk. older data is pruned") + flags.Uint64Var(&builder.executionDataPrunerThreshold, + "execution-data-height-range-threshold", + defaultConfig.executionDataPrunerThreshold, + "number of unpruned blocks of Execution Data beyond the height range target to allow before pruning") + flags.DurationVar(&builder.executionDataPruningInterval, + "execution-data-pruning-interval", + defaultConfig.executionDataPruningInterval, + "duration after which the pruner tries to prune execution data. The default value is 10 minutes") // ExecutionDataRequester config - flags.BoolVar(&builder.executionDataSyncEnabled, "execution-data-sync-enabled", defaultConfig.executionDataSyncEnabled, "whether to enable the execution data sync protocol") - flags.StringVar(&builder.executionDataDir, "execution-data-dir", defaultConfig.executionDataDir, "directory to use for Execution Data database") - flags.Uint64Var(&builder.executionDataStartHeight, "execution-data-start-height", defaultConfig.executionDataStartHeight, "height of first block to sync execution data from when starting with an empty Execution Data database") - flags.Uint64Var(&builder.executionDataConfig.MaxSearchAhead, "execution-data-max-search-ahead", defaultConfig.executionDataConfig.MaxSearchAhead, "max number of heights to search ahead of the lowest outstanding execution data height") - flags.DurationVar(&builder.executionDataConfig.FetchTimeout, "execution-data-fetch-timeout", defaultConfig.executionDataConfig.FetchTimeout, "timeout to use when fetching execution data from the network e.g. 300s") - flags.DurationVar(&builder.executionDataConfig.RetryDelay, "execution-data-retry-delay", defaultConfig.executionDataConfig.RetryDelay, "initial delay for exponential backoff when fetching execution data fails e.g. 10s") - flags.DurationVar(&builder.executionDataConfig.MaxRetryDelay, "execution-data-max-retry-delay", defaultConfig.executionDataConfig.MaxRetryDelay, "maximum delay for exponential backoff when fetching execution data fails e.g. 5m") + flags.BoolVar(&builder.executionDataSyncEnabled, + "execution-data-sync-enabled", + defaultConfig.executionDataSyncEnabled, + "whether to enable the execution data sync protocol") + flags.StringVar(&builder.executionDataDir, + "execution-data-dir", + defaultConfig.executionDataDir, + "directory to use for Execution Data database") + flags.Uint64Var(&builder.executionDataStartHeight, + "execution-data-start-height", + defaultConfig.executionDataStartHeight, + "height of first block to sync execution data from when starting with an empty Execution Data database") + flags.Uint64Var(&builder.executionDataConfig.MaxSearchAhead, + "execution-data-max-search-ahead", + defaultConfig.executionDataConfig.MaxSearchAhead, + "max number of heights to search ahead of the lowest outstanding execution data height") + flags.DurationVar(&builder.executionDataConfig.FetchTimeout, + "execution-data-fetch-timeout", + defaultConfig.executionDataConfig.FetchTimeout, + "initial timeout to use when fetching execution data from the network. timeout increases using an incremental backoff until execution-data-max-fetch-timeout. e.g. 30s") + flags.DurationVar(&builder.executionDataConfig.MaxFetchTimeout, + "execution-data-max-fetch-timeout", + defaultConfig.executionDataConfig.MaxFetchTimeout, + "maximum timeout to use when fetching execution data from the network e.g. 300s") + flags.DurationVar(&builder.executionDataConfig.RetryDelay, + "execution-data-retry-delay", + defaultConfig.executionDataConfig.RetryDelay, + "initial delay for exponential backoff when fetching execution data fails e.g. 10s") + flags.DurationVar(&builder.executionDataConfig.MaxRetryDelay, + "execution-data-max-retry-delay", + defaultConfig.executionDataConfig.MaxRetryDelay, + "maximum delay for exponential backoff when fetching execution data fails e.g. 5m") + + // Streaming API + flags.StringVar(&builder.stateStreamConf.ListenAddr, + "state-stream-addr", + defaultConfig.stateStreamConf.ListenAddr, + "the address the state stream server listens on (if empty the server will not be started)") + flags.Uint32Var(&builder.stateStreamConf.ExecutionDataCacheSize, + "execution-data-cache-size", + defaultConfig.stateStreamConf.ExecutionDataCacheSize, + "block execution data cache size") + flags.Uint32Var(&builder.stateStreamConf.MaxGlobalStreams, + "state-stream-global-max-streams", defaultConfig.stateStreamConf.MaxGlobalStreams, + "global maximum number of concurrent streams") + flags.UintVar(&builder.stateStreamConf.MaxExecutionDataMsgSize, + "state-stream-max-message-size", + defaultConfig.stateStreamConf.MaxExecutionDataMsgSize, + "maximum size for a gRPC message containing block execution data") + flags.StringToIntVar(&builder.stateStreamFilterConf, + "state-stream-event-filter-limits", + defaultConfig.stateStreamFilterConf, + "event filter limits for ExecutionData SubscribeEvents API e.g. EventTypes=100,Addresses=100,Contracts=100 etc.") + flags.DurationVar(&builder.stateStreamConf.ClientSendTimeout, + "state-stream-send-timeout", + defaultConfig.stateStreamConf.ClientSendTimeout, + "maximum wait before timing out while sending a response to a streaming client e.g. 30s") + flags.UintVar(&builder.stateStreamConf.ClientSendBufferSize, + "state-stream-send-buffer-size", + defaultConfig.stateStreamConf.ClientSendBufferSize, + "maximum number of responses to buffer within a stream") + flags.Float64Var(&builder.stateStreamConf.ResponseLimit, + "state-stream-response-limit", + defaultConfig.stateStreamConf.ResponseLimit, + "max number of responses per second to send over streaming endpoints. this helps manage resources consumed by each client querying data not in the cache e.g. 3 or 0.5. 0 means no limit") + flags.Uint64Var(&builder.stateStreamConf.HeartbeatInterval, + "state-stream-heartbeat-interval", + defaultConfig.stateStreamConf.HeartbeatInterval, + "default interval in blocks at which heartbeat messages should be sent. applied when client did not specify a value.") + flags.Uint32Var(&builder.stateStreamConf.RegisterIDsRequestLimit, + "state-stream-max-register-values", + defaultConfig.stateStreamConf.RegisterIDsRequestLimit, + "maximum number of register ids to include in a single request to the GetRegisters endpoint") + flags.StringVar(&builder.rpcConf.BackendConfig.EventQueryMode, + "event-query-mode", + defaultConfig.rpcConf.BackendConfig.EventQueryMode, + "mode to use when querying events. one of [local-only, execution-nodes-only(default), failover]") + flags.Uint64Var(&builder.scriptExecMinBlock, + "script-execution-min-height", + defaultConfig.scriptExecMinBlock, + "lowest block height to allow for script execution. default: no limit") + flags.Uint64Var(&builder.scriptExecMaxBlock, + "script-execution-max-height", + defaultConfig.scriptExecMaxBlock, + "highest block height to allow for script execution. default: no limit") + + flags.StringVar(&builder.registerCacheType, + "register-cache-type", + defaultConfig.registerCacheType, + "type of backend cache to use for registers (lru, arc, 2q)") + flags.UintVar(&builder.registerCacheSize, + "register-cache-size", + defaultConfig.registerCacheSize, + "number of registers to cache for script execution. default: 0 (no cache)") + flags.UintVar(&builder.programCacheSize, + "program-cache-size", + defaultConfig.programCacheSize, + "[experimental] number of blocks to cache for cadence programs. use 0 to disable cache. default: 0. Note: this is an experimental feature and may cause nodes to become unstable under certain workloads. Use with caution.") + + // Register DB Pruning + flags.Uint64Var(&builder.registerDBPruneThreshold, + "registerdb-pruning-threshold", + defaultConfig.registerDBPruneThreshold, + fmt.Sprintf("specifies the number of blocks below the latest stored block height to keep in register db. default: %d", defaultConfig.registerDBPruneThreshold)) + + // websockets config + flags.DurationVar( + &builder.rpcConf.WebSocketConfig.InactivityTimeout, + "websocket-inactivity-timeout", + defaultConfig.rpcConf.WebSocketConfig.InactivityTimeout, + "the duration a WebSocket connection can remain open without any active subscriptions before being automatically closed", + ) + flags.Uint64Var( + &builder.rpcConf.WebSocketConfig.MaxSubscriptionsPerConnection, + "websocket-max-subscriptions-per-connection", + defaultConfig.rpcConf.WebSocketConfig.MaxSubscriptionsPerConnection, + "the maximum number of active WebSocket subscriptions allowed per connection", + ) + flags.Float64Var( + &builder.rpcConf.WebSocketConfig.MaxResponsesPerSecond, + "websocket-max-responses-per-second", + defaultConfig.rpcConf.WebSocketConfig.MaxResponsesPerSecond, + fmt.Sprintf("the maximum number of responses that can be sent to a single client per second. Default: %f. if set to 0, no limit is applied to the number of responses per second.", defaultConfig.rpcConf.WebSocketConfig.MaxResponsesPerSecond), + ) + flags.BoolVar( + &builder.rpcConf.EnableWebSocketsStreamAPI, + "websockets-stream-api-enabled", + defaultConfig.rpcConf.EnableWebSocketsStreamAPI, + "whether to enable the WebSockets Stream API.", + ) }).ValidateFlags(func() error { if builder.executionDataSyncEnabled { if builder.executionDataConfig.FetchTimeout <= 0 { return errors.New("execution-data-fetch-timeout must be greater than 0") } + if builder.executionDataConfig.MaxFetchTimeout < builder.executionDataConfig.FetchTimeout { + return errors.New("execution-data-max-fetch-timeout must be greater than execution-data-fetch-timeout") + } if builder.executionDataConfig.RetryDelay <= 0 { return errors.New("execution-data-retry-delay must be greater than 0") } @@ -610,37 +877,40 @@ func (builder *ObserverServiceBuilder) extraFlags() { return errors.New("execution-data-max-search-ahead must be greater than 0") } } - return nil - }) -} + if builder.stateStreamConf.ListenAddr != "" { + if builder.stateStreamConf.ExecutionDataCacheSize == 0 { + return errors.New("execution-data-cache-size must be greater than 0") + } + if builder.stateStreamConf.ClientSendBufferSize == 0 { + return errors.New("state-stream-send-buffer-size must be greater than 0") + } + if len(builder.stateStreamFilterConf) > 4 { + return errors.New("state-stream-event-filter-limits must have at most 4 keys (EventTypes, Addresses, Contracts, AccountAddresses)") + } + for key, value := range builder.stateStreamFilterConf { + switch key { + case "EventTypes", "Addresses", "Contracts", "AccountAddresses": + if value <= 0 { + return fmt.Errorf("state-stream-event-filter-limits %s must be greater than 0", key) + } + default: + return errors.New("state-stream-event-filter-limits may only contain the keys EventTypes, Addresses, Contracts, AccountAddresses") + } + } + if builder.stateStreamConf.ResponseLimit < 0 { + return errors.New("state-stream-response-limit must be greater than or equal to 0") + } + if builder.stateStreamConf.RegisterIDsRequestLimit <= 0 { + return errors.New("state-stream-max-register-values must be greater than 0") + } + } -// initNetwork creates the network.Network implementation with the given metrics, middleware, initial list of network -// participants and topology used to choose peers from the list of participants. The list of participants can later be -// updated by calling network.SetIDs. -func (builder *ObserverServiceBuilder) initNetwork(nodeID module.Local, - networkMetrics module.NetworkCoreMetrics, - middleware network.Middleware, - topology network.Topology, - receiveCache *netcache.ReceiveCache, -) (*p2p.Network, error) { - - // creates network instance - net, err := p2p.NewNetwork(&p2p.NetworkParameters{ - Logger: builder.Logger, - Codec: cborcodec.NewCodec(), - Me: nodeID, - MiddlewareFactory: func() (network.Middleware, error) { return builder.Middleware, nil }, - Topology: topology, - SubscriptionManager: subscription.NewChannelSubscriptionManager(middleware), - Metrics: networkMetrics, - IdentityProvider: builder.IdentityProvider, - ReceiveCache: receiveCache, - }) - if err != nil { - return nil, fmt.Errorf("could not initialize network: %w", err) - } + if builder.rpcConf.RestConfig.MaxRequestSize <= 0 { + return errors.New("rest-max-request-size must be greater than 0") + } - return net, nil + return nil + }) } func publicNetworkMsgValidators(log zerolog.Logger, idProvider module.IdentityProvider, selfID flow.Identifier) []network.MessageValidator { @@ -658,37 +928,6 @@ func publicNetworkMsgValidators(log zerolog.Logger, idProvider module.IdentityPr } } -// BootstrapIdentities converts the bootstrap node addresses and keys to a Flow Identity list where -// each Flow Identity is initialized with the passed address, the networking key -// and the Node ID set to ZeroID, role set to Access, 0 stake and no staking key. -func BootstrapIdentities(addresses []string, keys []string) (flow.IdentityList, error) { - if len(addresses) != len(keys) { - return nil, fmt.Errorf("number of addresses and keys provided for the boostrap nodes don't match") - } - - ids := make([]*flow.Identity, len(addresses)) - for i, address := range addresses { - bytes, err := hex.DecodeString(keys[i]) - if err != nil { - return nil, fmt.Errorf("failed to decode secured GRPC server public key hex %w", err) - } - - publicFlowNetworkingKey, err := crypto.DecodePublicKey(crypto.ECDSAP256, bytes) - if err != nil { - return nil, fmt.Errorf("failed to get public flow networking key could not decode public key bytes %w", err) - } - - // create the identity of the peer by setting only the relevant fields - ids[i] = &flow.Identity{ - NodeID: flow.ZeroID, // the NodeID is the hash of the staking key and for the public network it does not apply - Address: address, - Role: flow.RoleAccess, // the upstream node has to be an access node - NetworkPubKey: publicFlowNetworkingKey, - } - } - return ids, nil -} - func (builder *ObserverServiceBuilder) initNodeInfo() error { // use the networking key that was loaded from the configured file networkingKey, err := loadNetworkingKey(builder.observerNetworkingKeyPath) @@ -725,13 +964,17 @@ func (builder *ObserverServiceBuilder) InitIDProviders() { } builder.IDTranslator = translator.NewHierarchicalIDTranslator(idCache, translator.NewPublicNetworkIDTranslator()) - builder.NodeDisallowListDistributor = cmd.BuildDisallowListNotificationDisseminator(builder.DisallowListNotificationCacheSize, builder.MetricsRegisterer, builder.Logger, builder.MetricsEnabled) - // The following wrapper allows to black-list byzantine nodes via an admin command: // the wrapper overrides the 'Ejected' flag of disallow-listed nodes to true - builder.IdentityProvider, err = cache.NewNodeBlocklistWrapper(idCache, node.DB, builder.NodeDisallowListDistributor) + builder.IdentityProvider, err = cache.NewNodeDisallowListWrapper( + idCache, + node.ProtocolDB, + func() network.DisallowListNotificationConsumer { + return builder.NetworkUnderlay + }, + ) if err != nil { - return fmt.Errorf("could not initialize NodeBlockListWrapper: %w", err) + return fmt.Errorf("could not initialize NodeDisallowListWrapper: %w", err) } // use the default identifier provider @@ -748,7 +991,7 @@ func (builder *ObserverServiceBuilder) InitIDProviders() { if flowID, err := builder.IDTranslator.GetFlowID(pid); err != nil { // TODO: this is an instance of "log error and continue with best effort" anti-pattern - builder.Logger.Err(err).Str("peer", pid.String()).Msg("failed to translate to Flow ID") + builder.Logger.Debug().Str("peer", p2plogging.PeerId(pid)).Msg("failed to translate to Flow ID") } else { result = append(result, flowID) } @@ -760,11 +1003,6 @@ func (builder *ObserverServiceBuilder) InitIDProviders() { return nil }) - - builder.Component("disallow list notification distributor", func(node *cmd.NodeConfig) (module.ReadyDoneAware, error) { - // distributor is returned as a component to be started and stopped. - return builder.NodeDisallowListDistributor, nil - }) } func (builder *ObserverServiceBuilder) Initialize() error { @@ -790,8 +1028,6 @@ func (builder *ObserverServiceBuilder) Initialize() error { builder.enqueueConnectWithStakedAN() - builder.enqueueRPCServer() - if builder.BaseConfig.MetricsEnabled { builder.EnqueueMetricsServerInit() if err := builder.RegisterBadgerMetrics(); err != nil { @@ -814,10 +1050,10 @@ func (builder *ObserverServiceBuilder) validateParams() error { if len(builder.bootstrapIdentities) > 0 { return nil } - if len(builder.bootstrapNodeAddresses) == 0 { + if len(builder.BootstrapNodeAddresses) == 0 { return errors.New("no bootstrap node address provided") } - if len(builder.bootstrapNodeAddresses) != len(builder.bootstrapNodePublicKeys) { + if len(builder.BootstrapNodeAddresses) != len(builder.BootstrapNodePublicKeys) { return errors.New("number of bootstrap node addresses and public keys should match") } if len(builder.upstreamNodePublicKeys) > 0 && len(builder.upstreamNodeAddresses) != len(builder.upstreamNodePublicKeys) { @@ -826,89 +1062,13 @@ func (builder *ObserverServiceBuilder) validateParams() error { return nil } -// initPublicLibP2PFactory creates the LibP2P factory function for the given node ID and network key for the observer. -// The factory function is later passed into the initMiddleware function to eventually instantiate the p2p.LibP2PNode instance -// The LibP2P host is created with the following options: -// * DHT as client and seeded with the given bootstrap peers -// * The specified bind address as the listen address -// * The passed in private key as the libp2p key -// * No connection gater -// * No connection manager -// * No peer manager -// * Default libp2p pubsub options -func (builder *ObserverServiceBuilder) initPublicLibP2PFactory(networkKey crypto.PrivateKey) p2p.LibP2PFactoryFunc { - return func() (p2p.LibP2PNode, error) { - var pis []peer.AddrInfo - - for _, b := range builder.bootstrapIdentities { - pi, err := utils.PeerAddressInfo(*b) - - if err != nil { - return nil, fmt.Errorf("could not extract peer address info from bootstrap identity %v: %w", b, err) - } - - pis = append(pis, pi) - } - - meshTracer := tracer.NewGossipSubMeshTracer( - builder.Logger, - builder.Metrics.Network, - builder.IdentityProvider, - builder.GossipSubConfig.LocalMeshLogInterval) - - rpcInspectorSuite, err := inspector.NewGossipSubInspectorBuilder(builder.Logger, builder.SporkID, builder.GossipSubConfig.RpcInspector). - SetPublicNetwork(p2p.PublicNetwork). - SetMetrics(&p2pconfig.MetricsConfig{ - HeroCacheFactory: builder.HeroCacheMetricsFactory(), - Metrics: builder.Metrics.Network, - }).Build() - if err != nil { - return nil, fmt.Errorf("could not initialize gossipsub inspectors for observer node: %w", err) - } - - node, err := p2pbuilder.NewNodeBuilder( - builder.Logger, - builder.Metrics.Network, - builder.BaseConfig.BindAddr, - networkKey, - builder.SporkID, - builder.LibP2PResourceManagerConfig). - SetSubscriptionFilter( - subscription.NewRoleBasedFilter( - subscription.UnstakedRole, builder.IdentityProvider, - ), - ). - SetRoutingSystem(func(ctx context.Context, h host.Host) (routing.Routing, error) { - return p2pdht.NewDHT(ctx, h, protocols.FlowPublicDHTProtocolID(builder.SporkID), - builder.Logger, - builder.Metrics.Network, - p2pdht.AsClient(), - dht.BootstrapPeers(pis...), - ) - }). - SetStreamCreationRetryInterval(builder.UnicastCreateStreamRetryDelay). - SetGossipSubTracer(meshTracer). - SetGossipSubScoreTracerInterval(builder.GossipSubConfig.ScoreTracerInterval). - SetGossipSubRpcInspectorSuite(rpcInspectorSuite). - Build() - - if err != nil { - return nil, err - } - - builder.LibP2PNode = node - - return builder.LibP2PNode, nil - } -} - // initObserverLocal initializes the observer's ID, network key and network address // Currently, it reads a node-info.priv.json like any other node. // TODO: read the node ID from the special bootstrap files func (builder *ObserverServiceBuilder) initObserverLocal() func(node *cmd.NodeConfig) error { return func(node *cmd.NodeConfig) error { // for an observer, set the identity here explicitly since it will not be found in the protocol state - self := &flow.Identity{ + self := flow.IdentitySkeleton{ NodeID: node.NodeID, NetworkPubKey: node.NetworkKey.PublicKey(), StakingPubKey: nil, // no staking key needed for the observer @@ -929,120 +1089,973 @@ func (builder *ObserverServiceBuilder) initObserverLocal() func(node *cmd.NodeCo // Currently, the observer only runs the follower engine. func (builder *ObserverServiceBuilder) Build() (cmd.Node, error) { builder.BuildConsensusFollower() + if builder.executionDataSyncEnabled { - builder.BuildExecutionDataRequester() + builder.BuildExecutionSyncComponents() } + + builder.enqueueRPCServer() return builder.FlowNodeBuilder.Build() } -// enqueuePublicNetworkInit enqueues the observer network component initialized for the observer -func (builder *ObserverServiceBuilder) enqueuePublicNetworkInit() { - var libp2pNode p2p.LibP2PNode +func (builder *ObserverServiceBuilder) BuildExecutionSyncComponents() *ObserverServiceBuilder { + var ds datastore.Batching + var bs network.BlobService + var processedBlockHeight storage.ConsumerProgressInitializer + var processedNotifications storage.ConsumerProgressInitializer + var publicBsDependable *module.ProxiedReadyDoneAware + var execDataDistributor *edrequester.ExecutionDataDistributor + var execDataCacheBackend *herocache.BlockExecutionData + var executionDataStoreCache *execdatacache.ExecutionDataCache + + // setup dependency chain to ensure indexer starts after the requester + requesterDependable := module.NewProxiedReadyDoneAware() + builder.IndexerDependencies.Add(requesterDependable) + + executionDataPrunerEnabled := builder.executionDataPrunerHeightRangeTarget != 0 + builder. - Component("public libp2p node", func(node *cmd.NodeConfig) (module.ReadyDoneAware, error) { - libP2PFactory := builder.initPublicLibP2PFactory(node.NetworkKey) + AdminCommand("read-execution-data", func(config *cmd.NodeConfig) commands.AdminCommand { + return stateSyncCommands.NewReadExecutionDataCommand(builder.ExecutionDataStore) + }). + Module("execution data datastore and blobstore", func(node *cmd.NodeConfig) error { + datastoreDir := filepath.Join(builder.executionDataDir, "blobstore") + err := os.MkdirAll(datastoreDir, 0700) + if err != nil { + return err + } - var err error - libp2pNode, err = libP2PFactory() + builder.ExecutionDatastoreManager, err = edstorage.NewPebbleDatastoreManager( + node.Logger.With().Str("pebbledb", "endata").Logger(), + datastoreDir, nil) if err != nil { - return nil, fmt.Errorf("could not create public libp2p node: %w", err) + return fmt.Errorf("could not create PebbleDatastoreManager for execution data: %w", err) } + ds = builder.ExecutionDatastoreManager.Datastore() + + builder.ShutdownFunc(func() error { + if err := builder.ExecutionDatastoreManager.Close(); err != nil { + return fmt.Errorf("could not close execution data datastore: %w", err) + } + return nil + }) + + return nil + }). + Module("processed block height consumer progress", func(node *cmd.NodeConfig) error { + // Note: progress is stored in the datastore's DB since that is where the jobqueue + // writes execution data to. + db := builder.ExecutionDatastoreManager.DB() - return libp2pNode, nil + processedBlockHeight = store.NewConsumerProgress(db, module.ConsumeProgressExecutionDataRequesterBlockHeight) + return nil }). - Component("public network", func(node *cmd.NodeConfig) (module.ReadyDoneAware, error) { - receiveCache := netcache.NewHeroReceiveCache(builder.NetworkReceivedMessageCacheSize, - builder.Logger, - metrics.NetworkReceiveCacheMetricsFactory(builder.HeroCacheMetricsFactory(), p2p.PublicNetwork)) + Module("processed notifications consumer progress", func(node *cmd.NodeConfig) error { + // Note: progress is stored in the datastore's DB since that is where the jobqueue + // writes execution data to. + db := builder.ExecutionDatastoreManager.DB() - err := node.Metrics.Mempool.Register(metrics.PrependPublicPrefix(metrics.ResourceNetworkingReceiveCache), receiveCache.Size) - if err != nil { - return nil, fmt.Errorf("could not register networking receive cache metric: %w", err) + processedNotifications = store.NewConsumerProgress(db, module.ConsumeProgressExecutionDataRequesterNotification) + return nil + }). + Module("blobservice peer manager dependencies", func(node *cmd.NodeConfig) error { + publicBsDependable = module.NewProxiedReadyDoneAware() + builder.PeerManagerDependencies.Add(publicBsDependable) + return nil + }). + Module("execution datastore", func(node *cmd.NodeConfig) error { + builder.ExecutionDataBlobstore = blobs.NewBlobstore(ds) + builder.ExecutionDataStore = execution_data.NewExecutionDataStore(builder.ExecutionDataBlobstore, execution_data.DefaultSerializer) + return nil + }). + Module("execution data cache", func(node *cmd.NodeConfig) error { + var heroCacheCollector module.HeroCacheMetrics = metrics.NewNoopCollector() + if builder.HeroCacheMetricsEnable { + heroCacheCollector = metrics.AccessNodeExecutionDataCacheMetrics(builder.MetricsRegisterer) } - msgValidators := publicNetworkMsgValidators(node.Logger, node.IdentityProvider, node.NodeID) + execDataCacheBackend = herocache.NewBlockExecutionData(builder.stateStreamConf.ExecutionDataCacheSize, builder.Logger, heroCacheCollector) - builder.initMiddleware(node.NodeID, libp2pNode, msgValidators...) + // Execution Data cache that uses a blobstore as the backend (instead of a downloader) + // This ensures that it simply returns a not found error if the blob doesn't exist + // instead of attempting to download it from the network. + executionDataStoreCache = execdatacache.NewExecutionDataCache( + builder.ExecutionDataStore, + builder.Storage.Headers, + builder.Storage.Seals, + builder.Storage.Results, + execDataCacheBackend, + ) + + return nil + }). + Component("public execution data service", func(node *cmd.NodeConfig) (module.ReadyDoneAware, error) { + opts := []network.BlobServiceOption{ + blob.WithBitswapOptions( + bitswap.WithTracer( + blob.NewTracer(node.Logger.With().Str("public_blob_service", channels.PublicExecutionDataService.String()).Logger()), + ), + ), + } - // topology is nil since it is automatically managed by libp2p - net, err := builder.initNetwork(builder.Me, builder.Metrics.Network, builder.Middleware, nil, receiveCache) + var err error + bs, err = node.EngineRegistry.RegisterBlobService(channels.PublicExecutionDataService, ds, opts...) if err != nil { - return nil, err + return nil, fmt.Errorf("could not register blob service: %w", err) } - builder.Network = converter.NewNetwork(net, channels.SyncCommittee, channels.PublicSyncCommittee) + // add blobservice into ReadyDoneAware dependency passed to peer manager + // this starts the blob service and configures peer manager to wait for the blobservice + // to be ready before starting + publicBsDependable.Init(bs) - builder.Logger.Info().Msgf("network will run on address: %s", builder.BindAddr) + var downloaderOpts []execution_data.DownloaderOption - idEvents := gadgets.NewIdentityDeltas(builder.Middleware.UpdateNodeAddresses) - builder.ProtocolEvents.AddConsumer(idEvents) + if executionDataPrunerEnabled { + sealed, err := node.State.Sealed().Head() + if err != nil { + return nil, fmt.Errorf("cannot get the sealed block: %w", err) + } - return builder.Network, nil - }) -} + trackerDir := filepath.Join(builder.executionDataDir, "tracker") + builder.ExecutionDataTracker, err = tracker.OpenStorage( + trackerDir, + sealed.Height, + node.Logger, + tracker.WithPruneCallback(func(c cid.Cid) error { + // TODO: use a proper context here + return builder.ExecutionDataBlobstore.DeleteBlob(context.TODO(), c) + }), + ) + if err != nil { + return nil, fmt.Errorf("failed to create execution data tracker: %w", err) + } -// enqueueConnectWithStakedAN enqueues the upstream connector component which connects the libp2p host of the observer -// service with the AN. -// Currently, there is an issue with LibP2P stopping advertisements of subscribed topics if no peers are connected -// (https://github.com/libp2p/go-libp2p-pubsub/issues/442). This means that an observer could end up not being -// discovered by other observers if it subscribes to a topic before connecting to the AN. Hence, the need -// of an explicit connect to the AN before the node attempts to subscribe to topics. -func (builder *ObserverServiceBuilder) enqueueConnectWithStakedAN() { - builder.Component("upstream connector", func(_ *cmd.NodeConfig) (module.ReadyDoneAware, error) { - return consensus_follower.NewUpstreamConnector(builder.bootstrapIdentities, builder.LibP2PNode, builder.Logger), nil - }) -} + downloaderOpts = []execution_data.DownloaderOption{ + execution_data.WithExecutionDataTracker(builder.ExecutionDataTracker, node.Storage.Headers), + } + } -func (builder *ObserverServiceBuilder) enqueueRPCServer() { - builder.Component("RPC engine", func(node *cmd.NodeConfig) (module.ReadyDoneAware, error) { - engineBuilder, err := rpc.NewBuilder( - node.Logger, - node.State, - builder.rpcConf, - nil, - nil, - node.Storage.Blocks, - node.Storage.Headers, - node.Storage.Collections, - node.Storage.Transactions, - node.Storage.Receipts, - node.Storage.Results, - node.RootChainID, - nil, - nil, - 0, - 0, - false, - builder.rpcMetricsEnabled, - builder.apiRatelimits, + builder.ExecutionDataDownloader = execution_data.NewDownloader(bs, downloaderOpts...) + + return builder.ExecutionDataDownloader, nil + }). + Component("execution data requester", func(node *cmd.NodeConfig) (module.ReadyDoneAware, error) { + // Validation of the start block height needs to be done after loading state + if builder.executionDataStartHeight > 0 { + if builder.executionDataStartHeight <= builder.FinalizedRootBlock.Height { + return nil, fmt.Errorf( + "execution data start block height (%d) must be greater than the root block height (%d)", + builder.executionDataStartHeight, builder.FinalizedRootBlock.Height) + } + + latestSeal, err := builder.State.Sealed().Head() + if err != nil { + return nil, fmt.Errorf("failed to get latest sealed height") + } + + // Note: since the root block of a spork is also sealed in the root protocol state, the + // latest sealed height is always equal to the root block height. That means that at the + // very beginning of a spork, this check will always fail. Operators should not specify + // an InitialBlockHeight when starting from the beginning of a spork. + if builder.executionDataStartHeight > latestSeal.Height { + return nil, fmt.Errorf( + "execution data start block height (%d) must be less than or equal to the latest sealed block height (%d)", + builder.executionDataStartHeight, latestSeal.Height) + } + + // executionDataStartHeight is provided as the first block to sync, but the + // requester expects the initial last processed height, which is the first height - 1 + builder.executionDataConfig.InitialBlockHeight = builder.executionDataStartHeight - 1 + } else { + builder.executionDataConfig.InitialBlockHeight = builder.SealedRootBlock.Height + } + + execDataDistributor = edrequester.NewExecutionDataDistributor() + + // Execution Data cache with a downloader as the backend. This is used by the requester + // to download and cache execution data for each block. It shares a cache backend instance + // with the datastore implementation. + executionDataCache := execdatacache.NewExecutionDataCache( + builder.ExecutionDataDownloader, + builder.Storage.Headers, + builder.Storage.Seals, + builder.Storage.Results, + execDataCacheBackend, + ) + + r, err := edrequester.New( + builder.Logger, + metrics.NewExecutionDataRequesterCollector(), + builder.ExecutionDataDownloader, + executionDataCache, + processedBlockHeight, + processedNotifications, + builder.State, + builder.Storage.Headers, + builder.executionDataConfig, + execDataDistributor, + ) + if err != nil { + return nil, fmt.Errorf("failed to create execution data requester: %w", err) + } + builder.ExecutionDataRequester = r + + builder.FollowerDistributor.AddOnBlockFinalizedConsumer(builder.ExecutionDataRequester.OnBlockFinalized) + + // add requester into ReadyDoneAware dependency passed to indexer. This allows the indexer + // to wait for the requester to be ready before starting. + requesterDependable.Init(builder.ExecutionDataRequester) + + return builder.ExecutionDataRequester, nil + }). + Component("execution data pruner", func(node *cmd.NodeConfig) (module.ReadyDoneAware, error) { + if !executionDataPrunerEnabled { + return &module.NoopReadyDoneAware{}, nil + } + + var prunerMetrics module.ExecutionDataPrunerMetrics = metrics.NewNoopCollector() + if node.MetricsEnabled { + prunerMetrics = metrics.NewExecutionDataPrunerCollector() + } + + var err error + builder.ExecutionDataPruner, err = pruner.NewPruner( + node.Logger, + prunerMetrics, + builder.ExecutionDataTracker, + pruner.WithPruneCallback(func(ctx context.Context) error { + return builder.ExecutionDatastoreManager.CollectGarbage(ctx) + }), + pruner.WithHeightRangeTarget(builder.executionDataPrunerHeightRangeTarget), + pruner.WithThreshold(builder.executionDataPrunerThreshold), + pruner.WithPruningInterval(builder.executionDataPruningInterval), + ) + if err != nil { + return nil, fmt.Errorf("failed to create execution data pruner: %w", err) + } + + builder.ExecutionDataPruner.RegisterHeightRecorder(builder.ExecutionDataDownloader) + + return builder.ExecutionDataPruner, nil + }) + if builder.executionDataIndexingEnabled { + var indexedBlockHeight storage.ConsumerProgressInitializer + + builder.Module("indexed block height consumer progress", func(node *cmd.NodeConfig) error { + // Note: progress is stored in the MAIN db since that is where indexed execution data is stored. + indexedBlockHeight = store.NewConsumerProgress(builder.ProtocolDB, module.ConsumeProgressExecutionDataIndexerBlockHeight) + return nil + }).Module("transaction results storage", func(node *cmd.NodeConfig) error { + builder.lightTransactionResults = store.NewLightTransactionResults(node.Metrics.Cache, node.ProtocolDB, bstorage.DefaultCacheSize) + return nil + }).DependableComponent("execution data indexer", func(node *cmd.NodeConfig) (module.ReadyDoneAware, error) { + // Note: using a DependableComponent here to ensure that the indexer does not block + // other components from starting while bootstrapping the register db since it may + // take hours to complete. + + pdb, err := pstorage.OpenRegisterPebbleDB( + node.Logger.With().Str("pebbledb", "registers").Logger(), + builder.registersDBPath) + if err != nil { + return nil, fmt.Errorf("could not open registers db: %w", err) + } + builder.ShutdownFunc(func() error { + return pdb.Close() + }) + + bootstrapped, err := pstorage.IsBootstrapped(pdb) + if err != nil { + return nil, fmt.Errorf("could not check if registers db is bootstrapped: %w", err) + } + + if !bootstrapped { + checkpointFile := builder.checkpointFile + if checkpointFile == cmd.NotSet { + checkpointFile = path.Join(builder.BootstrapDir, bootstrap.PathRootCheckpoint) + } + + // currently, the checkpoint must be from the root block. + // read the root hash from the provided checkpoint and verify it matches the + // state commitment from the root snapshot. + err := wal.CheckpointHasRootHash( + node.Logger, + "", // checkpoint file already full path + checkpointFile, + ledger.RootHash(node.RootSeal.FinalState), + ) + if err != nil { + return nil, fmt.Errorf("could not verify checkpoint file: %w", err) + } + + checkpointHeight := builder.SealedRootBlock.Height + + if builder.SealedRootBlock.ID() != builder.RootSeal.BlockID { + return nil, fmt.Errorf("mismatching sealed root block and root seal: %v != %v", + builder.SealedRootBlock.ID(), builder.RootSeal.BlockID) + } + + rootHash := ledger.RootHash(builder.RootSeal.FinalState) + bootstrap, err := pstorage.NewRegisterBootstrap(pdb, checkpointFile, checkpointHeight, rootHash, builder.Logger) + if err != nil { + return nil, fmt.Errorf("could not create registers bootstrap: %w", err) + } + + // TODO: find a way to hook a context up to this to allow a graceful shutdown + workerCount := 10 + err = bootstrap.IndexCheckpointFile(context.Background(), workerCount) + if err != nil { + return nil, fmt.Errorf("could not load checkpoint file: %w", err) + } + } + + registers, err := pstorage.NewRegisters(pdb, builder.registerDBPruneThreshold) + if err != nil { + return nil, fmt.Errorf("could not create registers storage: %w", err) + } + + if builder.registerCacheSize > 0 { + cacheType, err := pstorage.ParseCacheType(builder.registerCacheType) + if err != nil { + return nil, fmt.Errorf("could not parse register cache type: %w", err) + } + cacheMetrics := metrics.NewCacheCollector(builder.RootChainID) + registersCache, err := pstorage.NewRegistersCache(registers, cacheType, builder.registerCacheSize, cacheMetrics) + if err != nil { + return nil, fmt.Errorf("could not create registers cache: %w", err) + } + builder.Storage.RegisterIndex = registersCache + } else { + builder.Storage.RegisterIndex = registers + } + + indexerDerivedChainData, queryDerivedChainData, err := builder.buildDerivedChainData() + if err != nil { + return nil, fmt.Errorf("could not create derived chain data: %w", err) + } + + var collectionExecutedMetric module.CollectionExecutedMetric = metrics.NewNoopCollector() + indexerCore, err := indexer.New( + builder.Logger, + metrics.NewExecutionStateIndexerCollector(), + builder.ProtocolDB, + builder.Storage.RegisterIndex, + builder.Storage.Headers, + builder.events, + builder.Storage.Collections, + builder.Storage.Transactions, + builder.lightTransactionResults, + builder.RootChainID.Chain(), + indexerDerivedChainData, + collectionExecutedMetric, + node.StorageLockMgr, + ) + if err != nil { + return nil, err + } + builder.ExecutionIndexerCore = indexerCore + + // execution state worker uses a jobqueue to process new execution data and indexes it by using the indexer. + builder.ExecutionIndexer, err = indexer.NewIndexer( + builder.Logger, + registers.FirstHeight(), + registers, + indexerCore, + executionDataStoreCache, + builder.ExecutionDataRequester.HighestConsecutiveHeight, + indexedBlockHeight, + ) + if err != nil { + return nil, err + } + + if executionDataPrunerEnabled { + builder.ExecutionDataPruner.RegisterHeightRecorder(builder.ExecutionIndexer) + } + + // setup requester to notify indexer when new execution data is received + execDataDistributor.AddOnExecutionDataReceivedConsumer(builder.ExecutionIndexer.OnExecutionData) + + err = builder.Reporter.Initialize(builder.ExecutionIndexer) + if err != nil { + return nil, err + } + + // create script execution module, this depends on the indexer being initialized and the + // having the register storage bootstrapped + scripts := execution.NewScripts( + builder.Logger, + metrics.NewExecutionCollector(builder.Tracer), + builder.RootChainID, + computation.NewProtocolStateWrapper(builder.State), + builder.Storage.Headers, + builder.ExecutionIndexerCore.RegisterValue, + builder.scriptExecutorConfig, + queryDerivedChainData, + builder.programCacheSize > 0, + ) + + err = builder.ScriptExecutor.Initialize(builder.ExecutionIndexer, scripts, builder.VersionControl) + if err != nil { + return nil, err + } + + err = builder.RegistersAsyncStore.Initialize(registers) + if err != nil { + return nil, err + } + + if builder.stopControlEnabled { + builder.StopControl.RegisterHeightRecorder(builder.ExecutionIndexer) + } + + return builder.ExecutionIndexer, nil + }, builder.IndexerDependencies) + } + + if builder.stateStreamConf.ListenAddr != "" { + builder.Component("exec state stream engine", func(node *cmd.NodeConfig) (module.ReadyDoneAware, error) { + for key, value := range builder.stateStreamFilterConf { + switch key { + case "EventTypes": + builder.stateStreamConf.MaxEventTypes = value + case "Addresses": + builder.stateStreamConf.MaxAddresses = value + case "Contracts": + builder.stateStreamConf.MaxContracts = value + case "AccountAddresses": + builder.stateStreamConf.MaxAccountAddress = value + } + } + builder.stateStreamConf.RpcMetricsEnabled = builder.rpcMetricsEnabled + + highestAvailableHeight, err := builder.ExecutionDataRequester.HighestConsecutiveHeight() + if err != nil { + return nil, fmt.Errorf("could not get highest consecutive height: %w", err) + } + broadcaster := engine.NewBroadcaster() + + eventQueryMode, err := query_mode.ParseIndexQueryMode(builder.rpcConf.BackendConfig.EventQueryMode) + if err != nil { + return nil, fmt.Errorf("could not parse event query mode: %w", err) + } + + // use the events index for events if enabled and the node is configured to use it for + // regular event queries + useIndex := builder.executionDataIndexingEnabled && + eventQueryMode != query_mode.IndexQueryModeExecutionNodesOnly + + executionDataTracker := subscriptiontracker.NewExecutionDataTracker( + builder.Logger, + node.State, + builder.executionDataConfig.InitialBlockHeight, + node.Storage.Headers, + broadcaster, + highestAvailableHeight, + builder.EventsIndex, + useIndex, + ) + + builder.stateStreamBackend, err = statestreambackend.New( + node.Logger, + node.State, + node.Storage.Headers, + node.Storage.Seals, + node.Storage.Results, + builder.ExecutionDataStore, + executionDataStoreCache, + builder.RegistersAsyncStore, + builder.EventsIndex, + useIndex, + int(builder.stateStreamConf.RegisterIDsRequestLimit), + subscription.NewSubscriptionHandler( + builder.Logger, + broadcaster, + builder.stateStreamConf.ClientSendTimeout, + builder.stateStreamConf.ResponseLimit, + builder.stateStreamConf.ClientSendBufferSize, + ), + executionDataTracker, + ) + if err != nil { + return nil, fmt.Errorf("could not create state stream backend: %w", err) + } + + stateStreamEng, err := statestreambackend.NewEng( + node.Logger, + builder.stateStreamConf, + executionDataStoreCache, + node.Storage.Headers, + node.RootChainID, + builder.stateStreamGrpcServer, + builder.stateStreamBackend, + ) + if err != nil { + return nil, fmt.Errorf("could not create state stream engine: %w", err) + } + builder.StateStreamEng = stateStreamEng + + // setup requester to notify ExecutionDataTracker when new execution data is received + execDataDistributor.AddOnExecutionDataReceivedConsumer(builder.stateStreamBackend.OnExecutionData) + + return builder.StateStreamEng, nil + }) + } + return builder +} + +// buildDerivedChainData creates the derived chain data for the indexer and the query engine +// If program caching is disabled, the function will return nil for the indexer cache, and a +// derived chain data object for the query engine cache. +func (builder *ObserverServiceBuilder) buildDerivedChainData() ( + indexerCache *derived.DerivedChainData, + queryCache *derived.DerivedChainData, + err error, +) { + cacheSize := builder.programCacheSize + + // the underlying cache requires size > 0. no data will be written so 1 is fine. + if cacheSize == 0 { + cacheSize = 1 + } + + derivedChainData, err := derived.NewDerivedChainData(cacheSize) + if err != nil { + return nil, nil, err + } + + // writes are done by the indexer. using a nil value effectively disables writes to the cache. + if builder.programCacheSize == 0 { + return nil, derivedChainData, nil + } + + return derivedChainData, derivedChainData, nil +} + +// enqueuePublicNetworkInit enqueues the observer network component initialized for the observer +func (builder *ObserverServiceBuilder) enqueuePublicNetworkInit() { + var publicLibp2pNode p2p.LibP2PNode + builder. + Component("public libp2p node", func(node *cmd.NodeConfig) (module.ReadyDoneAware, error) { + var err error + publicLibp2pNode, err = builder.BuildPublicLibp2pNode(builder.BaseConfig.BindAddr, builder.bootstrapIdentities) + if err != nil { + return nil, fmt.Errorf("could not build public libp2p node: %w", err) + } + + builder.LibP2PNode = publicLibp2pNode + + return publicLibp2pNode, nil + }). + Component("public network", func(node *cmd.NodeConfig) (module.ReadyDoneAware, error) { + receiveCache := netcache.NewHeroReceiveCache(builder.FlowConfig.NetworkConfig.NetworkReceivedMessageCacheSize, + builder.Logger, + metrics.NetworkReceiveCacheMetricsFactory(builder.HeroCacheMetricsFactory(), network.PublicNetwork)) + + err := node.Metrics.Mempool.Register(metrics.PrependPublicPrefix(metrics.ResourceNetworkingReceiveCache), receiveCache.Size) + if err != nil { + return nil, fmt.Errorf("could not register networking receive cache metric: %w", err) + } + + net, err := underlay.NewNetwork(&underlay.NetworkConfig{ + Logger: builder.Logger.With().Str("component", "public-network").Logger(), + Codec: builder.CodecFactory(), + Me: builder.Me, + Topology: nil, // topology is nil since it is managed by libp2p; //TODO: can we set empty topology? + Libp2pNode: publicLibp2pNode, + Metrics: builder.Metrics.Network, + BitSwapMetrics: builder.Metrics.Bitswap, + IdentityProvider: builder.IdentityProvider, + ReceiveCache: receiveCache, + ConduitFactory: conduit.NewDefaultConduitFactory(), + SporkId: builder.SporkID, + UnicastMessageTimeout: underlay.DefaultUnicastTimeout, + IdentityTranslator: builder.IDTranslator, + AlspCfg: &alspmgr.MisbehaviorReportManagerConfig{ + Logger: builder.Logger, + SpamRecordCacheSize: builder.FlowConfig.NetworkConfig.AlspConfig.SpamRecordCacheSize, + SpamReportQueueSize: builder.FlowConfig.NetworkConfig.AlspConfig.SpamReportQueueSize, + DisablePenalty: builder.FlowConfig.NetworkConfig.AlspConfig.DisablePenalty, + HeartBeatInterval: builder.FlowConfig.NetworkConfig.AlspConfig.HearBeatInterval, + AlspMetrics: builder.Metrics.Network, + HeroCacheMetricsFactory: builder.HeroCacheMetricsFactory(), + NetworkType: network.PublicNetwork, + }, + SlashingViolationConsumerFactory: func(adapter network.ConduitAdapter) network.ViolationsConsumer { + return slashing.NewSlashingViolationsConsumer(builder.Logger, builder.Metrics.Network, adapter) + }, + }, underlay.WithMessageValidators(publicNetworkMsgValidators(node.Logger, node.IdentityProvider, node.NodeID)...)) + if err != nil { + return nil, fmt.Errorf("could not initialize network: %w", err) + } + + builder.NetworkUnderlay = net + builder.EngineRegistry = converter.NewNetwork(net, channels.SyncCommittee, channels.PublicSyncCommittee) + + builder.Logger.Info().Msgf("network will run on address: %s", builder.BindAddr) + + idEvents := gadgets.NewIdentityDeltas(builder.NetworkUnderlay.UpdateNodeAddresses) + builder.ProtocolEvents.AddConsumer(idEvents) + + return builder.EngineRegistry, nil + }) +} + +// enqueueConnectWithStakedAN enqueues the upstream connector component which connects the libp2p host of the observer +// service with the AN. +// Currently, there is an issue with LibP2P stopping advertisements of subscribed topics if no peers are connected +// (https://github.com/libp2p/go-libp2p-pubsub/issues/442). This means that an observer could end up not being +// discovered by other observers if it subscribes to a topic before connecting to the AN. Hence, the need +// of an explicit connect to the AN before the node attempts to subscribe to topics. +func (builder *ObserverServiceBuilder) enqueueConnectWithStakedAN() { + builder.Component("upstream connector", func(_ *cmd.NodeConfig) (module.ReadyDoneAware, error) { + return consensus_follower.NewUpstreamConnector(builder.bootstrapIdentities, builder.LibP2PNode, builder.Logger), nil + }) +} + +func (builder *ObserverServiceBuilder) enqueueRPCServer() { + builder.Module("transaction metrics", func(node *cmd.NodeConfig) error { + builder.TransactionTimings = stdmap.NewTransactionTimings(1500 * 300) // assume 1500 TPS * 300 seconds + builder.TransactionMetrics = metrics.NewTransactionCollector( + node.Logger, + builder.TransactionTimings, + builder.logTxTimeToFinalized, + builder.logTxTimeToExecuted, + builder.logTxTimeToFinalizedExecuted, + builder.logTxTimeToSealed, + ) + return nil + }) + builder.Module("rest metrics", func(node *cmd.NodeConfig) error { + m, err := metrics.NewRestCollector(router.URLToRoute, node.MetricsRegisterer) + if err != nil { + return err + } + builder.RestMetrics = m + return nil + }) + builder.Module("access metrics", func(node *cmd.NodeConfig) error { + builder.AccessMetrics = metrics.NewAccessCollector( + metrics.WithTransactionMetrics(builder.TransactionMetrics), + metrics.WithBackendScriptsMetrics(builder.TransactionMetrics), + metrics.WithRestMetrics(builder.RestMetrics), + ) + return nil + }) + builder.Module("server certificate", func(node *cmd.NodeConfig) error { + // generate the server certificate that will be served by the GRPC server + x509Certificate, err := grpcutils.X509Certificate(node.NetworkKey) + if err != nil { + return err + } + tlsConfig := grpcutils.DefaultServerTLSConfig(x509Certificate) + builder.rpcConf.TransportCredentials = credentials.NewTLS(tlsConfig) + return nil + }) + builder.Module("creating grpc servers", func(node *cmd.NodeConfig) error { + if builder.rpcConf.DeprecatedMaxMsgSize != 0 { + node.Logger.Warn().Msg("A deprecated flag was specified (--rpc-max-message-size). Use --rpc-max-request-message-size and --rpc-max-response-message-size instead. This flag will be removed in a future release.") + builder.rpcConf.BackendConfig.AccessConfig.MaxRequestMsgSize = builder.rpcConf.DeprecatedMaxMsgSize + builder.rpcConf.BackendConfig.AccessConfig.MaxResponseMsgSize = builder.rpcConf.DeprecatedMaxMsgSize + } + + builder.secureGrpcServer = grpcserver.NewGrpcServerBuilder(node.Logger, + builder.rpcConf.SecureGRPCListenAddr, + builder.rpcConf.BackendConfig.AccessConfig.MaxRequestMsgSize, + builder.rpcConf.BackendConfig.AccessConfig.MaxResponseMsgSize, + builder.rpcMetricsEnabled, + builder.apiRatelimits, builder.apiBurstlimits, + grpcserver.WithTransportCredentials(builder.rpcConf.TransportCredentials)).Build() + + builder.stateStreamGrpcServer = grpcserver.NewGrpcServerBuilder( + node.Logger, + builder.stateStreamConf.ListenAddr, + builder.rpcConf.BackendConfig.AccessConfig.MaxRequestMsgSize, + builder.stateStreamConf.MaxExecutionDataMsgSize, + builder.rpcMetricsEnabled, + builder.apiRatelimits, + builder.apiBurstlimits, + grpcserver.WithStreamInterceptor()).Build() + + if builder.rpcConf.UnsecureGRPCListenAddr != builder.stateStreamConf.ListenAddr { + builder.unsecureGrpcServer = grpcserver.NewGrpcServerBuilder(node.Logger, + builder.rpcConf.UnsecureGRPCListenAddr, + builder.rpcConf.BackendConfig.AccessConfig.MaxRequestMsgSize, + builder.rpcConf.BackendConfig.AccessConfig.MaxResponseMsgSize, + builder.rpcMetricsEnabled, + builder.apiRatelimits, + builder.apiBurstlimits).Build() + } else { + builder.unsecureGrpcServer = builder.stateStreamGrpcServer + } + + return nil + }) + builder.Module("async register store", func(node *cmd.NodeConfig) error { + builder.RegistersAsyncStore = execution.NewRegistersAsyncStore() + return nil + }) + builder.Module("events storage", func(node *cmd.NodeConfig) error { + builder.events = store.NewEvents(node.Metrics.Cache, node.ProtocolDB) + return nil + }) + builder.Module("reporter", func(node *cmd.NodeConfig) error { + builder.Reporter = index.NewReporter() + return nil + }) + builder.Module("events index", func(node *cmd.NodeConfig) error { + builder.EventsIndex = index.NewEventsIndex(builder.Reporter, builder.events) + return nil + }) + builder.Module("transaction result index", func(node *cmd.NodeConfig) error { + builder.TxResultsIndex = index.NewTransactionResultsIndex(builder.Reporter, builder.lightTransactionResults) + return nil + }) + builder.Module("script executor", func(node *cmd.NodeConfig) error { + builder.ScriptExecutor = backend.NewScriptExecutor(builder.Logger, builder.scriptExecMinBlock, builder.scriptExecMaxBlock) + return nil + }) + + versionControlDependable := module.NewProxiedReadyDoneAware() + builder.IndexerDependencies.Add(versionControlDependable) + stopControlDependable := module.NewProxiedReadyDoneAware() + builder.IndexerDependencies.Add(stopControlDependable) + + builder.Component("version control", func(node *cmd.NodeConfig) (module.ReadyDoneAware, error) { + if !builder.versionControlEnabled { + noop := &module.NoopReadyDoneAware{} + versionControlDependable.Init(noop) + return noop, nil + } + + nodeVersion, err := build.Semver() + if err != nil { + return nil, fmt.Errorf("could not load node version for version control. "+ + "version (%s) is not semver compliant: %w. Make sure a valid semantic version is provided in the VERSION environment variable", build.Version(), err) + } + + versionControl, err := version.NewVersionControl( + builder.Logger, + node.Storage.VersionBeacons, + nodeVersion, + builder.SealedRootBlock.Height, + builder.LastFinalizedHeader.Height, + ) + if err != nil { + return nil, fmt.Errorf("could not create version control: %w", err) + } + + // VersionControl needs to consume BlockFinalized events. + node.ProtocolEvents.AddConsumer(versionControl) + + builder.VersionControl = versionControl + versionControlDependable.Init(builder.VersionControl) + + return versionControl, nil + }) + builder.Component("stop control", func(node *cmd.NodeConfig) (module.ReadyDoneAware, error) { + if !builder.stopControlEnabled { + noop := &module.NoopReadyDoneAware{} + stopControlDependable.Init(noop) + return noop, nil + } + + stopControl := stop.NewStopControl( + builder.Logger, + ) + + builder.VersionControl.AddVersionUpdatesConsumer(stopControl.OnVersionUpdate) + + builder.StopControl = stopControl + stopControlDependable.Init(builder.StopControl) + + return stopControl, nil + }) + + builder.Component("RPC engine", func(node *cmd.NodeConfig) (module.ReadyDoneAware, error) { + accessMetrics := builder.AccessMetrics + config := builder.rpcConf + backendConfig := config.BackendConfig + cacheSize := int(backendConfig.ConnectionPoolSize) + + var connBackendCache *rpcConnection.Cache + var err error + if cacheSize > 0 { + connBackendCache, err = rpcConnection.NewCache(node.Logger, accessMetrics, cacheSize) + if err != nil { + return nil, fmt.Errorf("could not initialize connection cache: %w", err) + } + } + + connFactory := &rpcConnection.ConnectionFactoryImpl{ + AccessConfig: backendConfig.AccessConfig, + CollectionConfig: backendConfig.CollectionConfig, + ExecutionConfig: backendConfig.ExecutionConfig, + AccessMetrics: accessMetrics, + Log: node.Logger, + Manager: rpcConnection.NewManager( + node.Logger, + accessMetrics, + connBackendCache, + backendConfig.CircuitBreakerConfig, + config.CompressorName, + ), + } + + broadcaster := engine.NewBroadcaster() + // create BlockTracker that will track for new blocks (finalized and sealed) and + // handles block-related operations. + blockTracker, err := subscriptiontracker.NewBlockTracker( + node.State, + builder.SealedRootBlock.Height, + node.Storage.Headers, + broadcaster, + ) + if err != nil { + return nil, fmt.Errorf("failed to initialize block tracker: %w", err) + } + + // If execution data syncing and indexing is disabled, pass nil indexReporter + var indexReporter state_synchronization.IndexReporter + if builder.executionDataSyncEnabled && builder.executionDataIndexingEnabled { + indexReporter = builder.Reporter + } + + preferredENIdentifiers, err := flow.IdentifierListFromHex(backendConfig.PreferredExecutionNodeIDs) + if err != nil { + return nil, fmt.Errorf("failed to convert node id string to Flow Identifier for preferred EN map: %w", err) + } + + fixedENIdentifiers, err := flow.IdentifierListFromHex(backendConfig.FixedExecutionNodeIDs) + if err != nil { + return nil, fmt.Errorf("failed to convert node id string to Flow Identifier for fixed EN map: %w", err) + } + + scriptExecMode, err := query_mode.ParseIndexQueryMode(config.BackendConfig.ScriptExecutionMode) + if err != nil { + return nil, fmt.Errorf("could not parse script execution mode: %w", err) + } + + eventQueryMode, err := query_mode.ParseIndexQueryMode(config.BackendConfig.EventQueryMode) + if err != nil { + return nil, fmt.Errorf("could not parse event query mode: %w", err) + } + if eventQueryMode == query_mode.IndexQueryModeCompare { + return nil, fmt.Errorf("event query mode 'compare' is not supported") + } + + txResultQueryMode, err := query_mode.ParseIndexQueryMode(config.BackendConfig.TxResultQueryMode) + if err != nil { + return nil, fmt.Errorf("could not parse transaction result query mode: %w", err) + } + if txResultQueryMode == query_mode.IndexQueryModeCompare { + return nil, fmt.Errorf("transaction result query mode 'compare' is not supported") + } + + execNodeIdentitiesProvider := commonrpc.NewExecutionNodeIdentitiesProvider( + node.Logger, + node.State, + node.Storage.Receipts, + preferredENIdentifiers, + fixedENIdentifiers, + ) + + backendParams := backend.Params{ + State: node.State, + Blocks: node.Storage.Blocks, + Headers: node.Storage.Headers, + Collections: node.Storage.Collections, + Transactions: node.Storage.Transactions, + ExecutionReceipts: node.Storage.Receipts, + ExecutionResults: node.Storage.Results, + ChainID: node.RootChainID, + AccessMetrics: accessMetrics, + ConnFactory: connFactory, + RetryEnabled: false, + MaxHeightRange: backendConfig.MaxHeightRange, + Log: node.Logger, + SnapshotHistoryLimit: backend.DefaultSnapshotHistoryLimit, + Communicator: node_communicator.NewNodeCommunicator(backendConfig.CircuitBreakerConfig.Enabled), + BlockTracker: blockTracker, + ScriptExecutionMode: scriptExecMode, + EventQueryMode: eventQueryMode, + TxResultQueryMode: txResultQueryMode, + SubscriptionHandler: subscription.NewSubscriptionHandler( + builder.Logger, + broadcaster, + builder.stateStreamConf.ClientSendTimeout, + builder.stateStreamConf.ResponseLimit, + builder.stateStreamConf.ClientSendBufferSize, + ), + IndexReporter: indexReporter, + VersionControl: builder.VersionControl, + ExecNodeIdentitiesProvider: execNodeIdentitiesProvider, + MaxScriptAndArgumentSize: config.BackendConfig.AccessConfig.MaxRequestMsgSize, + } + + if builder.localServiceAPIEnabled { + backendParams.ScriptExecutionMode = query_mode.IndexQueryModeLocalOnly + backendParams.EventQueryMode = query_mode.IndexQueryModeLocalOnly + backendParams.TxResultsIndex = builder.TxResultsIndex + backendParams.EventsIndex = builder.EventsIndex + backendParams.ScriptExecutor = builder.ScriptExecutor + } + + accessBackend, err := backend.New(backendParams) + if err != nil { + return nil, fmt.Errorf("could not initialize backend: %w", err) + } + + observerCollector := metrics.NewObserverCollector() + restHandler, err := restapiproxy.NewRestProxyHandler( + accessBackend, + builder.upstreamIdentities, + connFactory, + builder.Logger, + observerCollector, + node.RootChainID.Chain()) + if err != nil { + return nil, err + } + + engineBuilder, err := rpc.NewBuilder( + node.Logger, + node.State, + config, + node.RootChainID, + accessMetrics, + builder.rpcMetricsEnabled, builder.Me, + accessBackend, + restHandler, + builder.secureGrpcServer, + builder.unsecureGrpcServer, + builder.stateStreamBackend, + builder.stateStreamConf, + indexReporter, ) if err != nil { return nil, err } // upstream access node forwarder - forwarder, err := apiproxy.NewFlowAccessAPIForwarder(builder.upstreamIdentities, builder.apiTimeout, builder.rpcConf.MaxMsgSize) + forwarder, err := apiproxy.NewFlowAccessAPIForwarder(builder.upstreamIdentities, connFactory) if err != nil { return nil, err } - proxy := &apiproxy.FlowAccessAPIRouter{ - Logger: builder.Logger, - Metrics: metrics.NewObserverCollector(), + rpcHandler := apiproxy.NewFlowAccessAPIRouter(apiproxy.Params{ + Log: builder.Logger, + Metrics: observerCollector, Upstream: forwarder, - Observer: protocol.NewHandler(protocol.New( - node.State, - node.Storage.Blocks, - node.Storage.Headers, - backend.NewNetworkAPI(node.State, node.RootChainID, backend.DefaultSnapshotHistoryLimit), - )), - } + Local: engineBuilder.DefaultHandler(hotsignature.NewBlockSignerDecoder(builder.Committee)), + UseIndex: builder.localServiceAPIEnabled, + }) // build the rpc engine builder.RpcEng, err = engineBuilder. - WithNewHandler(proxy). + WithRpcHandler(rpcHandler). WithLegacy(). Build() if err != nil { @@ -1051,29 +2064,21 @@ func (builder *ObserverServiceBuilder) enqueueRPCServer() { builder.FollowerDistributor.AddOnBlockFinalizedConsumer(builder.RpcEng.OnFinalizedBlock) return builder.RpcEng, nil }) -} -// initMiddleware creates the network.Middleware implementation with the libp2p factory function, metrics, peer update -// interval, and validators. The network.Middleware is then passed into the initNetwork function. -func (builder *ObserverServiceBuilder) initMiddleware(nodeID flow.Identifier, - libp2pNode p2p.LibP2PNode, - validators ...network.MessageValidator, -) network.Middleware { - slashingViolationsConsumer := slashing.NewSlashingViolationsConsumer(builder.Logger, builder.Metrics.Network) - mw := middleware.NewMiddleware( - builder.Logger, - libp2pNode, nodeID, - builder.Metrics.Bitswap, - builder.SporkID, - middleware.DefaultUnicastTimeout, - builder.IDTranslator, - builder.CodecFactory(), - slashingViolationsConsumer, - middleware.WithMessageValidators(validators...), // use default identifier provider - ) - builder.NodeDisallowListDistributor.AddConsumer(mw) - builder.Middleware = mw - return builder.Middleware + // build secure grpc server + builder.Component("secure grpc server", func(node *cmd.NodeConfig) (module.ReadyDoneAware, error) { + return builder.secureGrpcServer, nil + }) + + builder.Component("state stream unsecure grpc server", func(node *cmd.NodeConfig) (module.ReadyDoneAware, error) { + return builder.stateStreamGrpcServer, nil + }) + + if builder.rpcConf.UnsecureGRPCListenAddr != builder.stateStreamConf.ListenAddr { + builder.Component("unsecure grpc server", func(node *cmd.NodeConfig) (module.ReadyDoneAware, error) { + return builder.unsecureGrpcServer, nil + }) + } } func loadNetworkingKey(path string) (crypto.PrivateKey, error) { diff --git a/cmd/scaffold.go b/cmd/scaffold.go index 60cf15b80b9..142f2d55c6d 100644 --- a/cmd/scaffold.go +++ b/cmd/scaffold.go @@ -1,11 +1,11 @@ package cmd import ( + "context" "crypto/tls" "crypto/x509" "errors" "fmt" - "math/rand" "os" "runtime" "strings" @@ -14,20 +14,28 @@ import ( gcemd "cloud.google.com/go/compute/metadata" "github.com/dgraph-io/badger/v2" "github.com/hashicorp/go-multierror" + dht "github.com/libp2p/go-libp2p-kad-dht" + "github.com/libp2p/go-libp2p/core/host" + "github.com/libp2p/go-libp2p/core/peer" + "github.com/libp2p/go-libp2p/core/routing" "github.com/prometheus/client_golang/prometheus" "github.com/rs/zerolog" + "github.com/rs/zerolog/log" "github.com/spf13/pflag" "golang.org/x/time/rate" "google.golang.org/api/option" + "github.com/onflow/crypto" + "github.com/onflow/flow-go/admin" "github.com/onflow/flow-go/admin/commands" "github.com/onflow/flow-go/admin/commands/common" storageCommands "github.com/onflow/flow-go/admin/commands/storage" "github.com/onflow/flow-go/cmd/build" + "github.com/onflow/flow-go/cmd/scaffold" + "github.com/onflow/flow-go/config" "github.com/onflow/flow-go/consensus/hotstuff/persister" - "github.com/onflow/flow-go/fvm" - "github.com/onflow/flow-go/fvm/environment" + "github.com/onflow/flow-go/fvm/initialize" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/model/flow/filter" "github.com/onflow/flow-go/module" @@ -44,29 +52,38 @@ import ( "github.com/onflow/flow-go/module/updatable_configs" "github.com/onflow/flow-go/module/util" "github.com/onflow/flow-go/network" + alspmgr "github.com/onflow/flow-go/network/alsp/manager" netcache "github.com/onflow/flow-go/network/cache" + "github.com/onflow/flow-go/network/channels" + "github.com/onflow/flow-go/network/converter" "github.com/onflow/flow-go/network/p2p" + p2pbuilder "github.com/onflow/flow-go/network/p2p/builder" + p2pbuilderconfig "github.com/onflow/flow-go/network/p2p/builder/config" "github.com/onflow/flow-go/network/p2p/cache" "github.com/onflow/flow-go/network/p2p/conduit" + "github.com/onflow/flow-go/network/p2p/connection" + p2pdht "github.com/onflow/flow-go/network/p2p/dht" "github.com/onflow/flow-go/network/p2p/dns" - "github.com/onflow/flow-go/network/p2p/inspector/validation" - "github.com/onflow/flow-go/network/p2p/middleware" - "github.com/onflow/flow-go/network/p2p/p2pbuilder" - p2pconfig "github.com/onflow/flow-go/network/p2p/p2pbuilder/config" + "github.com/onflow/flow-go/network/p2p/keyutils" "github.com/onflow/flow-go/network/p2p/ping" "github.com/onflow/flow-go/network/p2p/subscription" + "github.com/onflow/flow-go/network/p2p/translator" "github.com/onflow/flow-go/network/p2p/unicast/protocols" "github.com/onflow/flow-go/network/p2p/unicast/ratelimit" + "github.com/onflow/flow-go/network/p2p/utils" "github.com/onflow/flow-go/network/p2p/utils/ratelimiter" "github.com/onflow/flow-go/network/slashing" "github.com/onflow/flow-go/network/topology" + "github.com/onflow/flow-go/network/underlay" "github.com/onflow/flow-go/state/protocol" badgerState "github.com/onflow/flow-go/state/protocol/badger" + "github.com/onflow/flow-go/state/protocol/datastore" "github.com/onflow/flow-go/state/protocol/events" "github.com/onflow/flow-go/state/protocol/events/gadgets" "github.com/onflow/flow-go/storage" bstorage "github.com/onflow/flow-go/storage/badger" - "github.com/onflow/flow-go/storage/badger/operation" + "github.com/onflow/flow-go/storage/operation/pebbleimpl" + "github.com/onflow/flow-go/storage/store" sutil "github.com/onflow/flow-go/storage/util" "github.com/onflow/flow-go/utils/logging" ) @@ -94,12 +111,17 @@ type namedModuleFunc struct { name string } -type namedComponentFunc struct { - fn ReadyDoneFactory - name string +// NamedComponentFactory is wrapper for ReadyDoneFactory with additional fields: +// Name - name of the component +// ErrorHandler - error handler for the component +// Dependencies - list of dependencies for the component that should be ready before +// the component is started +type NamedComponentFactory[Input any] struct { + ComponentFactory ReadyDoneFactory[Input] + Name string - errorHandler component.OnError - dependencies *DependencyList + ErrorHandler component.OnError + Dependencies *DependencyList } // FlowNodeBuilder is the default builder struct used for all flow nodes @@ -115,7 +137,7 @@ type FlowNodeBuilder struct { *NodeConfig flags *pflag.FlagSet modules []namedModuleFunc - components []namedComponentFunc + components []NamedComponentFactory[*NodeConfig] postShutdownFns []func() error preInitFns []BuilderFunc postInitFns []BuilderFunc @@ -124,23 +146,37 @@ type FlowNodeBuilder struct { adminCommandBootstrapper *admin.CommandRunnerBootstrapper adminCommands map[string]func(config *NodeConfig) commands.AdminCommand componentBuilder component.ComponentManagerBuilder + BootstrapNodeAddresses []string + BootstrapNodePublicKeys []string } var _ NodeBuilder = (*FlowNodeBuilder)(nil) func (fnb *FlowNodeBuilder) BaseFlags() { + defaultFlowConfig, err := config.DefaultConfig() + if err != nil { + fnb.Logger.Fatal().Err(err).Msg("failed to initialize flow config") + } + + // initialize pflag set for Flow node + config.InitializePFlagSet(fnb.flags, defaultFlowConfig) + defaultConfig := DefaultBaseConfig() // bind configuration parameters fnb.flags.StringVar(&fnb.BaseConfig.nodeIDHex, "nodeid", defaultConfig.nodeIDHex, "identity of our node") fnb.flags.StringVar(&fnb.BaseConfig.BindAddr, "bind", defaultConfig.BindAddr, "address to bind on") fnb.flags.StringVarP(&fnb.BaseConfig.BootstrapDir, "bootstrapdir", "b", defaultConfig.BootstrapDir, "path to the bootstrap directory") - fnb.flags.StringVarP(&fnb.BaseConfig.datadir, "datadir", "d", defaultConfig.datadir, "directory to store the public database (protocol state)") + fnb.flags.StringVarP(&fnb.BaseConfig.datadir, "datadir", "d", defaultConfig.datadir, "directory to store the protocol database") + + var rejectPebbleDir rejectPebbleDirValue + fnb.flags.VarP(rejectPebbleDir, "pebble-dir", "", "DEPRECATED") + _ = fnb.flags.MarkHidden("pebble-dir") + + fnb.flags.StringVar(&fnb.BaseConfig.pebbleCheckpointsDir, "pebble-checkpoints-dir", defaultConfig.pebbleCheckpointsDir, "directory to store the checkpoints for the public pebble database (protocol state)") fnb.flags.StringVar(&fnb.BaseConfig.secretsdir, "secretsdir", defaultConfig.secretsdir, "directory to store private database (secrets)") fnb.flags.StringVarP(&fnb.BaseConfig.level, "loglevel", "l", defaultConfig.level, "level for logging output") fnb.flags.Uint32Var(&fnb.BaseConfig.debugLogLimit, "debug-log-limit", defaultConfig.debugLogLimit, "max number of debug/trace log events per second") - fnb.flags.DurationVar(&fnb.BaseConfig.PeerUpdateInterval, "peerupdate-interval", defaultConfig.PeerUpdateInterval, "how often to refresh the peer connections for the node") - fnb.flags.DurationVar(&fnb.BaseConfig.UnicastMessageTimeout, "unicast-timeout", defaultConfig.UnicastMessageTimeout, "how long a unicast transmission can take to complete") fnb.flags.UintVarP(&fnb.BaseConfig.metricsPort, "metricport", "m", defaultConfig.metricsPort, "port for /metrics endpoint") fnb.flags.BoolVar(&fnb.BaseConfig.profilerConfig.Enabled, "profiler-enabled", defaultConfig.profilerConfig.Enabled, "whether to enable the auto-profiler") fnb.flags.BoolVar(&fnb.BaseConfig.profilerConfig.UploaderEnabled, "profile-uploader-enabled", defaultConfig.profilerConfig.UploaderEnabled, @@ -166,76 +202,129 @@ func (fnb *FlowNodeBuilder) BaseFlags() { fnb.flags.StringVar(&fnb.BaseConfig.AdminClientCAs, "admin-client-certs", defaultConfig.AdminClientCAs, "admin client certs (for mutual TLS)") fnb.flags.UintVar(&fnb.BaseConfig.AdminMaxMsgSize, "admin-max-response-size", defaultConfig.AdminMaxMsgSize, "admin server max response size in bytes") - fnb.flags.Float64Var(&fnb.BaseConfig.LibP2PResourceManagerConfig.FileDescriptorsRatio, "libp2p-fd-ratio", defaultConfig.LibP2PResourceManagerConfig.FileDescriptorsRatio, "ratio of available file descriptors to be used by libp2p (in (0,1])") - fnb.flags.Float64Var(&fnb.BaseConfig.LibP2PResourceManagerConfig.MemoryLimitRatio, "libp2p-memory-limit", defaultConfig.LibP2PResourceManagerConfig.MemoryLimitRatio, "ratio of available memory to be used by libp2p (in (0,1])") - fnb.flags.IntVar(&fnb.BaseConfig.LibP2PResourceManagerConfig.PeerBaseLimitConnsInbound, "libp2p-inbound-conns-limit", defaultConfig.LibP2PResourceManagerConfig.PeerBaseLimitConnsInbound, "the maximum amount of allowed inbound connections per peer") - fnb.flags.IntVar(&fnb.BaseConfig.ConnectionManagerConfig.LowWatermark, "libp2p-connmgr-low", defaultConfig.ConnectionManagerConfig.LowWatermark, "low watermarking for libp2p connection manager") - fnb.flags.IntVar(&fnb.BaseConfig.ConnectionManagerConfig.HighWatermark, "libp2p-connmgr-high", defaultConfig.ConnectionManagerConfig.HighWatermark, "high watermarking for libp2p connection manager") - fnb.flags.DurationVar(&fnb.BaseConfig.ConnectionManagerConfig.GracePeriod, "libp2p-connmgr-grace", defaultConfig.ConnectionManagerConfig.GracePeriod, "grace period for libp2p connection manager") - fnb.flags.DurationVar(&fnb.BaseConfig.ConnectionManagerConfig.SilencePeriod, "libp2p-connmgr-silence", defaultConfig.ConnectionManagerConfig.SilencePeriod, "silence period for libp2p connection manager") - - fnb.flags.DurationVar(&fnb.BaseConfig.DNSCacheTTL, "dns-cache-ttl", defaultConfig.DNSCacheTTL, "time-to-live for dns cache") - fnb.flags.StringSliceVar(&fnb.BaseConfig.PreferredUnicastProtocols, "preferred-unicast-protocols", nil, "preferred unicast protocols in ascending order of preference") - fnb.flags.Uint32Var(&fnb.BaseConfig.NetworkReceivedMessageCacheSize, "networking-receive-cache-size", p2p.DefaultReceiveCacheSize, - "incoming message cache size at networking layer") - fnb.flags.BoolVar(&fnb.BaseConfig.NetworkConnectionPruning, "networking-connection-pruning", defaultConfig.NetworkConnectionPruning, "enabling connection trimming") - fnb.flags.BoolVar(&fnb.BaseConfig.GossipSubConfig.PeerScoring, "peer-scoring-enabled", defaultConfig.GossipSubConfig.PeerScoring, "enabling peer scoring on pubsub network") - fnb.flags.DurationVar(&fnb.BaseConfig.GossipSubConfig.LocalMeshLogInterval, "gossipsub-local-mesh-logging-interval", defaultConfig.GossipSubConfig.LocalMeshLogInterval, "logging interval for local mesh in gossipsub") - fnb.flags.DurationVar(&fnb.BaseConfig.GossipSubConfig.ScoreTracerInterval, "gossipsub-score-tracer-interval", defaultConfig.GossipSubConfig.ScoreTracerInterval, "logging interval for peer score tracer in gossipsub, set to 0 to disable") fnb.flags.UintVar(&fnb.BaseConfig.guaranteesCacheSize, "guarantees-cache-size", bstorage.DefaultCacheSize, "collection guarantees cache size") fnb.flags.UintVar(&fnb.BaseConfig.receiptsCacheSize, "receipts-cache-size", bstorage.DefaultCacheSize, "receipts cache size") + fnb.flags.BoolVar(&fnb.BaseConfig.DhtSystemEnabled, + "dht-enabled", + defaultConfig.DhtSystemEnabled, + "[experimental] whether to enable dht system. This is an experimental feature. Use with caution.") + fnb.flags.BoolVar(&fnb.BaseConfig.BitswapReprovideEnabled, + "bitswap-reprovide-enabled", + defaultConfig.BitswapReprovideEnabled, + "[experimental] whether to enable bitswap reproviding. This is an experimental feature. Use with caution.") + // dynamic node startup flags - fnb.flags.StringVar(&fnb.BaseConfig.DynamicStartupANPubkey, "dynamic-startup-access-publickey", "", "the public key of the trusted secure access node to connect to when using dynamic-startup, this access node must be staked") - fnb.flags.StringVar(&fnb.BaseConfig.DynamicStartupANAddress, "dynamic-startup-access-address", "", "the access address of the trusted secure access node to connect to when using dynamic-startup, this access node must be staked") - fnb.flags.StringVar(&fnb.BaseConfig.DynamicStartupEpochPhase, "dynamic-startup-epoch-phase", "EpochPhaseSetup", "the target epoch phase for dynamic startup <EpochPhaseStaking|EpochPhaseSetup|EpochPhaseCommitted") - fnb.flags.StringVar(&fnb.BaseConfig.DynamicStartupEpoch, "dynamic-startup-epoch", "current", "the target epoch for dynamic-startup, use \"current\" to start node in the current epoch") - fnb.flags.DurationVar(&fnb.BaseConfig.DynamicStartupSleepInterval, "dynamic-startup-sleep-interval", time.Minute, "the interval in which the node will check if it can start") + fnb.flags.StringVar(&fnb.BaseConfig.DynamicStartupANPubkey, + "dynamic-startup-access-publickey", + "", + "the public key of the trusted secure access node to connect to when using dynamic-startup, this access node must be staked") + fnb.flags.StringVar(&fnb.BaseConfig.DynamicStartupANAddress, + "dynamic-startup-access-address", + "", + "the access address of the trusted secure access node to connect to when using dynamic-startup, this access node must be staked") + fnb.flags.StringVar(&fnb.BaseConfig.DynamicStartupEpochPhase, + "dynamic-startup-epoch-phase", + "EpochPhaseSetup", + "the target epoch phase for dynamic startup <EpochPhaseStaking|EpochPhaseSetup|EpochPhaseCommitted") + fnb.flags.StringVar(&fnb.BaseConfig.DynamicStartupEpoch, + "dynamic-startup-epoch", + "current", + "the target epoch for dynamic-startup, use \"current\" to start node in the current epoch") + fnb.flags.DurationVar(&fnb.BaseConfig.DynamicStartupSleepInterval, + "dynamic-startup-sleep-interval", + time.Minute, + "the interval in which the node will check if it can start") fnb.flags.BoolVar(&fnb.BaseConfig.InsecureSecretsDB, "insecure-secrets-db", false, "allow the node to start up without an secrets DB encryption key") fnb.flags.BoolVar(&fnb.BaseConfig.HeroCacheMetricsEnable, "herocache-metrics-collector", false, "enables herocache metrics collection") // sync core flags - fnb.flags.DurationVar(&fnb.BaseConfig.SyncCoreConfig.RetryInterval, "sync-retry-interval", defaultConfig.SyncCoreConfig.RetryInterval, "the initial interval before we retry a sync request, uses exponential backoff") - fnb.flags.UintVar(&fnb.BaseConfig.SyncCoreConfig.Tolerance, "sync-tolerance", defaultConfig.SyncCoreConfig.Tolerance, "determines how big of a difference in block heights we tolerate before actively syncing with range requests") - fnb.flags.UintVar(&fnb.BaseConfig.SyncCoreConfig.MaxAttempts, "sync-max-attempts", defaultConfig.SyncCoreConfig.MaxAttempts, "the maximum number of attempts we make for each requested block/height before discarding") - fnb.flags.UintVar(&fnb.BaseConfig.SyncCoreConfig.MaxSize, "sync-max-size", defaultConfig.SyncCoreConfig.MaxSize, "the maximum number of blocks we request in the same block request message") - fnb.flags.UintVar(&fnb.BaseConfig.SyncCoreConfig.MaxRequests, "sync-max-requests", defaultConfig.SyncCoreConfig.MaxRequests, "the maximum number of requests we send during each scanning period") - - fnb.flags.Uint64Var(&fnb.BaseConfig.ComplianceConfig.SkipNewProposalsThreshold, "compliance-skip-proposals-threshold", defaultConfig.ComplianceConfig.SkipNewProposalsThreshold, "threshold at which new proposals are discarded rather than cached, if their height is this much above local finalized height") - - // unicast stream handler rate limits - fnb.flags.IntVar(&fnb.BaseConfig.UnicastRateLimitersConfig.MessageRateLimit, "unicast-message-rate-limit", defaultConfig.UnicastRateLimitersConfig.MessageRateLimit, "maximum number of unicast messages that a peer can send per second") - fnb.flags.IntVar(&fnb.BaseConfig.UnicastRateLimitersConfig.BandwidthRateLimit, "unicast-bandwidth-rate-limit", defaultConfig.UnicastRateLimitersConfig.BandwidthRateLimit, "bandwidth size in bytes a peer is allowed to send via unicast streams per second") - fnb.flags.IntVar(&fnb.BaseConfig.UnicastRateLimitersConfig.BandwidthBurstLimit, "unicast-bandwidth-burst-limit", defaultConfig.UnicastRateLimitersConfig.BandwidthBurstLimit, "bandwidth size in bytes a peer is allowed to send at one time") - fnb.flags.DurationVar(&fnb.BaseConfig.UnicastRateLimitersConfig.LockoutDuration, "unicast-rate-limit-lockout-duration", defaultConfig.UnicastRateLimitersConfig.LockoutDuration, "the number of seconds a peer will be forced to wait before being allowed to successful reconnect to the node after being rate limited") - fnb.flags.BoolVar(&fnb.BaseConfig.UnicastRateLimitersConfig.DryRun, "unicast-rate-limit-dry-run", defaultConfig.UnicastRateLimitersConfig.DryRun, "disable peer disconnects and connections gating when rate limiting peers") - - // gossipsub RPC control message validation limits used for validation configuration and rate limiting - fnb.flags.IntVar(&fnb.BaseConfig.GossipSubConfig.RpcInspector.ValidationInspectorConfigs.NumberOfWorkers, "gossipsub-rpc-validation-inspector-workers", defaultConfig.GossipSubConfig.RpcInspector.ValidationInspectorConfigs.NumberOfWorkers, "number of gossupsub RPC control message validation inspector component workers") - fnb.flags.Uint32Var(&fnb.BaseConfig.GossipSubConfig.RpcInspector.ValidationInspectorConfigs.CacheSize, "gossipsub-rpc-validation-inspector-cache-size", defaultConfig.GossipSubConfig.RpcInspector.ValidationInspectorConfigs.CacheSize, "cache size for gossipsub RPC validation inspector events worker pool queue.") - fnb.flags.StringToIntVar(&fnb.BaseConfig.GossipSubConfig.RpcInspector.ValidationInspectorConfigs.GraftLimits, "gossipsub-rpc-graft-limits", defaultConfig.GossipSubConfig.RpcInspector.ValidationInspectorConfigs.GraftLimits, fmt.Sprintf("discard threshold, safety and rate limits for gossipsub RPC GRAFT message validation e.g: %s=1000,%s=100,%s=1000", validation.DiscardThresholdMapKey, validation.SafetyThresholdMapKey, validation.RateLimitMapKey)) - fnb.flags.StringToIntVar(&fnb.BaseConfig.GossipSubConfig.RpcInspector.ValidationInspectorConfigs.PruneLimits, "gossipsub-rpc-prune-limits", defaultConfig.GossipSubConfig.RpcInspector.ValidationInspectorConfigs.PruneLimits, fmt.Sprintf("discard threshold, safety and rate limits for gossipsub RPC PRUNE message validation e.g: %s=1000,%s=20,%s=1000", validation.DiscardThresholdMapKey, validation.SafetyThresholdMapKey, validation.RateLimitMapKey)) - // gossipsub RPC control message metrics observer inspector configuration - fnb.flags.IntVar(&fnb.BaseConfig.GossipSubConfig.RpcInspector.MetricsInspectorConfigs.NumberOfWorkers, "gossipsub-rpc-metrics-inspector-workers", defaultConfig.GossipSubConfig.RpcInspector.MetricsInspectorConfigs.NumberOfWorkers, "cache size for gossipsub RPC metrics inspector events worker pool queue.") - fnb.flags.Uint32Var(&fnb.BaseConfig.GossipSubConfig.RpcInspector.MetricsInspectorConfigs.CacheSize, "gossipsub-rpc-metrics-inspector-cache-size", defaultConfig.GossipSubConfig.RpcInspector.MetricsInspectorConfigs.CacheSize, "cache size for gossipsub RPC metrics inspector events worker pool.") - - // networking event notifications - fnb.flags.Uint32Var(&fnb.BaseConfig.GossipSubConfig.RpcInspector.GossipSubRPCInspectorNotificationCacheSize, "gossipsub-rpc-inspector-notification-cache-size", defaultConfig.GossipSubConfig.RpcInspector.GossipSubRPCInspectorNotificationCacheSize, "cache size for notification events from gossipsub rpc inspector") - fnb.flags.Uint32Var(&fnb.BaseConfig.DisallowListNotificationCacheSize, "disallow-list-notification-cache-size", defaultConfig.DisallowListNotificationCacheSize, "cache size for notification events from disallow list") - - // unicast manager options - fnb.flags.DurationVar(&fnb.BaseConfig.UnicastCreateStreamRetryDelay, "unicast-manager-create-stream-retry-delay", defaultConfig.NetworkConfig.UnicastCreateStreamRetryDelay, "Initial delay between failing to establish a connection with another node and retrying. This delay increases exponentially (exponential backoff) with the number of subsequent failures to establish a connection.") -} + fnb.flags.DurationVar(&fnb.BaseConfig.SyncCoreConfig.RetryInterval, + "sync-retry-interval", + defaultConfig.SyncCoreConfig.RetryInterval, + "the initial interval before we retry a sync request, uses exponential backoff") + fnb.flags.UintVar(&fnb.BaseConfig.SyncCoreConfig.Tolerance, + "sync-tolerance", + defaultConfig.SyncCoreConfig.Tolerance, + "determines how big of a difference in block heights we tolerate before actively syncing with range requests") + fnb.flags.UintVar(&fnb.BaseConfig.SyncCoreConfig.MaxAttempts, + "sync-max-attempts", + defaultConfig.SyncCoreConfig.MaxAttempts, + "the maximum number of attempts we make for each requested block/height before discarding") + fnb.flags.UintVar(&fnb.BaseConfig.SyncCoreConfig.MaxSize, + "sync-max-size", + defaultConfig.SyncCoreConfig.MaxSize, + "the maximum number of blocks we request in the same block request message") + fnb.flags.UintVar(&fnb.BaseConfig.SyncCoreConfig.MaxRequests, + "sync-max-requests", + defaultConfig.SyncCoreConfig.MaxRequests, + "the maximum number of requests we send during each scanning period") + + fnb.flags.Uint64Var(&fnb.BaseConfig.ComplianceConfig.SkipNewProposalsThreshold, + "compliance-skip-proposals-threshold", + defaultConfig.ComplianceConfig.SkipNewProposalsThreshold, + "threshold at which new proposals are discarded rather than cached, if their height is this much above local finalized height") + + // observer mode allows a unstaked execution node to fetch blocks from a public staked access node, and being able to execute blocks + fnb.flags.BoolVar(&fnb.BaseConfig.ObserverMode, "observer-mode", defaultConfig.ObserverMode, "whether the node is running in observer mode") + fnb.flags.StringSliceVar(&fnb.BootstrapNodePublicKeys, + "observer-mode-bootstrap-node-public-keys", + []string{}, + "the networking public key of the bootstrap access node if this is an observer (in the same order as the bootstrap node addresses) e.g. \"d57a5e9c5.....\",\"44ded42d....\"") + fnb.flags.StringSliceVar(&fnb.BootstrapNodeAddresses, + "observer-mode-bootstrap-node-addresses", + []string{}, + "the network addresses of the bootstrap access node if this is an observer e.g. access-001.mainnet.flow.org:9653,access-002.mainnet.flow.org:9653") + + // TransactionFeesDisabled is a temporary convenience flag for easier testing of cadence compiler changes. This option should not be used if we need to disable fees on a network. + // To disable fees on a network, we need to set the fee price to 0.0. + fnb.flags.BoolVar(&fnb.TransactionFeesDisabled, + "disable-fees", + false, + "Disables calling the transaction fee deduction. This is only for testing purposes. To disable fees on a network it is better to set the fee price to 0.0 .") +} + +// TODO: remove after mainnet27 spork +// this struct is to reject the deprecated --pebble-dir flag +type rejectPebbleDirValue struct{} + +func (rejectPebbleDirValue) String() string { return "" } +func (rejectPebbleDirValue) Set(string) error { + return fmt.Errorf("the --pebble-dir flag is deprecated. Please remove the flag. " + + "Database will be stored in the location pointed by the --datadir flag which defaults to /data/protocol if not specified.") +} +func (rejectPebbleDirValue) Type() string { return "string" } func (fnb *FlowNodeBuilder) EnqueuePingService() { fnb.Component("ping service", func(node *NodeConfig) (module.ReadyDoneAware, error) { pingLibP2PProtocolID := protocols.PingProtocolId(node.SporkID) + var hotstuffViewFunc func() (uint64, error) + // Setup consensus nodes to report their HotStuff view + if fnb.BaseConfig.NodeRole == flow.RoleConsensus.String() { + hotstuffReader, err := persister.NewReader(node.ProtocolDB, node.RootChainID, node.StorageLockMgr) + if err != nil { + return nil, err + } + hotstuffViewFunc = func() (uint64, error) { + livenessData, err := hotstuffReader.GetLivenessData() + if err != nil { + return 0, fmt.Errorf("could not get liveness data: %w", err) + } + return livenessData.CurrentView, nil + } + } else { + // All other node roles do not report their hotstuff view + hotstuffViewFunc = func() (uint64, error) { + return 0, fmt.Errorf("hotstuff view reporting disabled") + } + } + // setup the Ping provider to return the software version and the sealed block height pingInfoProvider := &ping.InfoProvider{ SoftwareVersionFun: func() string { - return build.Semver() + return build.Version() }, SealedBlockHeightFun: func() (uint64, error) { head, err := node.State.Sealed().Head() @@ -244,28 +333,13 @@ func (fnb *FlowNodeBuilder) EnqueuePingService() { } return head.Height, nil }, - HotstuffViewFun: func() (uint64, error) { - return 0, fmt.Errorf("hotstuff view reporting disabled") - }, + HotstuffViewFun: hotstuffViewFunc, } - // only consensus roles will need to report hotstuff view - if fnb.BaseConfig.NodeRole == flow.RoleConsensus.String() { - // initialize the persister - persist := persister.New(node.DB, node.RootChainID) - - pingInfoProvider.HotstuffViewFun = func() (uint64, error) { - livenessData, err := persist.GetLivenessData() - if err != nil { - return 0, err - } - - return livenessData.CurrentView, nil - } + pingService, err := node.EngineRegistry.RegisterPingService(pingLibP2PProtocolID, pingInfoProvider) + if err != nil { + return nil, fmt.Errorf("could not register ping service: %w", err) } - - pingService, err := node.Network.RegisterPingService(pingLibP2PProtocolID, pingInfoProvider) - node.PingService = pingService return &module.NoopReadyDoneAware{}, err @@ -292,7 +366,7 @@ func (fnb *FlowNodeBuilder) EnqueueResolver() { node.Logger, fnb.Metrics.Network, cache, - dns.WithTTL(fnb.BaseConfig.DNSCacheTTL)) + dns.WithTTL(fnb.BaseConfig.FlowConfig.NetworkConfig.DNSCacheTTL)) fnb.Resolver = resolver return resolver, nil @@ -309,21 +383,21 @@ func (fnb *FlowNodeBuilder) EnqueueNetworkInit() { // setup default rate limiter options unicastRateLimiterOpts := []ratelimit.RateLimitersOption{ - ratelimit.WithDisabledRateLimiting(fnb.BaseConfig.UnicastRateLimitersConfig.DryRun), + ratelimit.WithDisabledRateLimiting(fnb.BaseConfig.FlowConfig.NetworkConfig.Unicast.RateLimiter.DryRun), ratelimit.WithNotifier(fnb.UnicastRateLimiterDistributor), } // override noop unicast message rate limiter - if fnb.BaseConfig.UnicastRateLimitersConfig.MessageRateLimit > 0 { + if fnb.BaseConfig.FlowConfig.NetworkConfig.Unicast.RateLimiter.MessageRateLimit > 0 { unicastMessageRateLimiter := ratelimiter.NewRateLimiter( - rate.Limit(fnb.BaseConfig.UnicastRateLimitersConfig.MessageRateLimit), - fnb.BaseConfig.UnicastRateLimitersConfig.MessageRateLimit, - fnb.BaseConfig.UnicastRateLimitersConfig.LockoutDuration, + rate.Limit(fnb.BaseConfig.FlowConfig.NetworkConfig.Unicast.RateLimiter.MessageRateLimit), + fnb.BaseConfig.FlowConfig.NetworkConfig.Unicast.RateLimiter.MessageRateLimit, + fnb.BaseConfig.FlowConfig.NetworkConfig.Unicast.RateLimiter.LockoutDuration, ) unicastRateLimiterOpts = append(unicastRateLimiterOpts, ratelimit.WithMessageRateLimiter(unicastMessageRateLimiter)) // avoid connection gating and pruning during dry run - if !fnb.BaseConfig.UnicastRateLimitersConfig.DryRun { + if !fnb.BaseConfig.FlowConfig.NetworkConfig.Unicast.RateLimiter.DryRun { f := rateLimiterPeerFilter(unicastMessageRateLimiter) // add IsRateLimited peerFilters to conn gater intercept secure peer and peer manager filters list // don't allow rate limited peers to establishing incoming connections @@ -334,16 +408,16 @@ func (fnb *FlowNodeBuilder) EnqueueNetworkInit() { } // override noop unicast bandwidth rate limiter - if fnb.BaseConfig.UnicastRateLimitersConfig.BandwidthRateLimit > 0 && fnb.BaseConfig.UnicastRateLimitersConfig.BandwidthBurstLimit > 0 { + if fnb.BaseConfig.FlowConfig.NetworkConfig.Unicast.RateLimiter.BandwidthRateLimit > 0 && fnb.BaseConfig.FlowConfig.NetworkConfig.Unicast.RateLimiter.BandwidthBurstLimit > 0 { unicastBandwidthRateLimiter := ratelimit.NewBandWidthRateLimiter( - rate.Limit(fnb.BaseConfig.UnicastRateLimitersConfig.BandwidthRateLimit), - fnb.BaseConfig.UnicastRateLimitersConfig.BandwidthBurstLimit, - fnb.BaseConfig.UnicastRateLimitersConfig.LockoutDuration, + rate.Limit(fnb.BaseConfig.FlowConfig.NetworkConfig.Unicast.RateLimiter.BandwidthRateLimit), + fnb.BaseConfig.FlowConfig.NetworkConfig.Unicast.RateLimiter.BandwidthBurstLimit, + fnb.BaseConfig.FlowConfig.NetworkConfig.Unicast.RateLimiter.LockoutDuration, ) unicastRateLimiterOpts = append(unicastRateLimiterOpts, ratelimit.WithBandwidthRateLimiter(unicastBandwidthRateLimiter)) // avoid connection gating and pruning during dry run - if !fnb.BaseConfig.UnicastRateLimitersConfig.DryRun { + if !fnb.BaseConfig.FlowConfig.NetworkConfig.Unicast.RateLimiter.DryRun { f := rateLimiterPeerFilter(unicastBandwidthRateLimiter) // add IsRateLimited peerFilters to conn gater intercept secure peer and peer manager filters list connGaterInterceptSecureFilters = append(connGaterInterceptSecureFilters, f) @@ -354,19 +428,20 @@ func (fnb *FlowNodeBuilder) EnqueueNetworkInit() { // setup unicast rate limiters unicastRateLimiters := ratelimit.NewRateLimiters(unicastRateLimiterOpts...) - uniCfg := &p2pconfig.UnicastConfig{ - StreamRetryInterval: fnb.UnicastCreateStreamRetryDelay, + uniCfg := &p2pbuilderconfig.UnicastConfig{ + Unicast: fnb.BaseConfig.FlowConfig.NetworkConfig.Unicast, RateLimiterDistributor: fnb.UnicastRateLimiterDistributor, } - connGaterCfg := &p2pconfig.ConnectionGaterConfig{ + connGaterCfg := &p2pbuilderconfig.ConnectionGaterConfig{ InterceptPeerDialFilters: connGaterPeerDialFilters, InterceptSecuredFilters: connGaterInterceptSecureFilters, } - peerManagerCfg := &p2pconfig.PeerManagerConfig{ - ConnectionPruning: fnb.NetworkConnectionPruning, - UpdateInterval: fnb.PeerUpdateInterval, + peerManagerCfg := &p2pbuilderconfig.PeerManagerConfig{ + ConnectionPruning: fnb.FlowConfig.NetworkConfig.NetworkConnectionPruning, + UpdateInterval: fnb.FlowConfig.NetworkConfig.PeerUpdateInterval, + ConnectorFactory: connection.DefaultLibp2pBackoffConnectorFactory(), } fnb.Component(LibP2PNodeComponent, func(node *NodeConfig) (module.ReadyDoneAware, error) { @@ -375,13 +450,33 @@ func (fnb *FlowNodeBuilder) EnqueueNetworkInit() { myAddr = fnb.BaseConfig.BindAddr } - libP2PNodeFactory := p2pbuilder.DefaultLibP2PNodeFactory( - fnb.Logger, + if fnb.ObserverMode { + // observer mode only init public libp2p node + ids, err := fnb.DeriveBootstrapPeerIdentities() + if err != nil { + return nil, fmt.Errorf("failed to derive bootstrap peer identities: %w", err) + } + + publicLibp2pNode, err := fnb.BuildPublicLibp2pNode(myAddr, ids) + if err != nil { + return nil, fmt.Errorf("could not build public libp2p node: %w", err) + } + fnb.LibP2PNode = publicLibp2pNode + + return publicLibp2pNode, nil + } + + dhtActivationStatus, err := DhtSystemActivationStatus(fnb.NodeRole, fnb.DhtSystemEnabled) + if err != nil { + return nil, fmt.Errorf("could not determine dht activation status: %w", err) + } + builder, err := p2pbuilder.DefaultNodeBuilder(fnb.Logger, myAddr, + network.PrivateNetwork, fnb.NetworkKey, fnb.SporkID, fnb.IdentityProvider, - &p2pconfig.MetricsConfig{ + &p2pbuilderconfig.MetricsConfig{ Metrics: fnb.Metrics.Network, HeroCacheFactory: fnb.HeroCacheMetricsFactory(), }, @@ -389,36 +484,53 @@ func (fnb *FlowNodeBuilder) EnqueueNetworkInit() { fnb.BaseConfig.NodeRole, connGaterCfg, peerManagerCfg, - // run peer manager with the specified interval and let it also prune connections - fnb.GossipSubConfig, - fnb.LibP2PResourceManagerConfig, + &fnb.FlowConfig.NetworkConfig.GossipSub, + &fnb.FlowConfig.NetworkConfig.ResourceManager, uniCfg, - ) + &fnb.FlowConfig.NetworkConfig.ConnectionManager, + &p2p.DisallowListCacheConfig{ + MaxSize: fnb.FlowConfig.NetworkConfig.DisallowListNotificationCacheSize, + Metrics: metrics.DisallowListCacheMetricsFactory(fnb.HeroCacheMetricsFactory(), network.PrivateNetwork), + }, + dhtActivationStatus) + if err != nil { + return nil, fmt.Errorf("could not create libp2p node builder: %w", err) + } - libp2pNode, err := libP2PNodeFactory() + libp2pNode, err := builder.Build() if err != nil { - return nil, fmt.Errorf("failed to create libp2p node: %w", err) + return nil, fmt.Errorf("could not build libp2p node: %w", err) } - fnb.LibP2PNode = libp2pNode + fnb.LibP2PNode = libp2pNode return libp2pNode, nil }) fnb.Component(NetworkComponent, func(node *NodeConfig) (module.ReadyDoneAware, error) { - cf := conduit.NewDefaultConduitFactory(fnb.Logger, fnb.Metrics.Network) fnb.Logger.Info().Hex("node_id", logging.ID(fnb.NodeID)).Msg("default conduit factory initiated") - return fnb.InitFlowNetworkWithConduitFactory(node, cf, unicastRateLimiters, peerManagerFilters) + return fnb.InitFlowNetworkWithConduitFactory( + node, + conduit.NewDefaultConduitFactory(), + unicastRateLimiters, + peerManagerFilters) + }) + + fnb.Module("epoch transition logger", func(node *NodeConfig) error { + node.ProtocolEvents.AddConsumer(events.NewEventLogger(node.Logger)) + return nil }) - fnb.Module("middleware dependency", func(node *NodeConfig) error { - fnb.middlewareDependable = module.NewProxiedReadyDoneAware() - fnb.PeerManagerDependencies.Add(fnb.middlewareDependable) + fnb.Module("network underlay dependency", func(node *NodeConfig) error { + fnb.networkUnderlayDependable = module.NewProxiedReadyDoneAware() + fnb.PeerManagerDependencies.Add(fnb.networkUnderlayDependable) return nil }) // peer manager won't be created until all PeerManagerDependencies are ready. - fnb.DependableComponent("peer manager", func(node *NodeConfig) (module.ReadyDoneAware, error) { - return fnb.LibP2PNode.PeerManagerComponent(), nil - }, fnb.PeerManagerDependencies) + if !fnb.ObserverMode { + fnb.DependableComponent("peer manager", func(node *NodeConfig) (module.ReadyDoneAware, error) { + return fnb.LibP2PNode.PeerManagerComponent(), nil + }, fnb.PeerManagerDependencies) + } } // HeroCacheMetricsFactory returns a HeroCacheMetricsFactory based on the MetricsEnabled flag. @@ -431,76 +543,172 @@ func (fnb *FlowNodeBuilder) HeroCacheMetricsFactory() metrics.HeroCacheMetricsFa return metrics.NewNoopHeroCacheMetricsFactory() } -func (fnb *FlowNodeBuilder) InitFlowNetworkWithConduitFactory(node *NodeConfig, cf network.ConduitFactory, unicastRateLimiters *ratelimit.RateLimiters, peerManagerFilters []p2p.PeerFilter) (network.Network, error) { - var mwOpts []middleware.MiddlewareOption +// DeriveBootstrapPeerIdentities derives the Flow Identity of the bootstrap peers from the parameters. +// These are the identities of the observers also acting as the DHT bootstrap server +func (fnb *FlowNodeBuilder) DeriveBootstrapPeerIdentities() (flow.IdentitySkeletonList, error) { + ids, err := BootstrapIdentities(fnb.BootstrapNodeAddresses, fnb.BootstrapNodePublicKeys) + if err != nil { + return nil, fmt.Errorf("failed to derive bootstrap peer identities: %w", err) + } + + return ids, nil +} + +// BuildPublicLibp2pNode creates a libp2p node for the observer service in the public (unstaked) network. +// The factory function is later passed into the initMiddleware function to eventually instantiate the p2p.LibP2PNode instance +// The LibP2P host is created with the following options: +// * DHT as client and seeded with the given bootstrap peers +// * The specified bind address as the listen address +// * The passed in private key as the libp2p key +// * No connection gater +// * No connection manager +// * No peer manager +// * Default libp2p pubsub options. +// Args: +// - networkKey: the private key to use for the libp2p node +// Returns: +// - p2p.LibP2PNode: the libp2p node +// - error: if any error occurs. Any error returned is considered irrecoverable. +func (fnb *FlowNodeBuilder) BuildPublicLibp2pNode(address string, bootstrapIdentities flow.IdentitySkeletonList) (p2p.LibP2PNode, error) { + var pis []peer.AddrInfo + + for _, b := range bootstrapIdentities { + pi, err := utils.PeerAddressInfo(*b) + if err != nil { + return nil, fmt.Errorf("could not extract peer address info from bootstrap identity %v: %w", b, err) + } + + pis = append(pis, pi) + } + + node, err := p2pbuilder.NewNodeBuilder( + fnb.Logger, + &fnb.FlowConfig.NetworkConfig.GossipSub, + &p2pbuilderconfig.MetricsConfig{ + HeroCacheFactory: fnb.HeroCacheMetricsFactory(), + Metrics: fnb.Metrics.Network, + }, + network.PublicNetwork, + address, + fnb.NetworkKey, + fnb.SporkID, + fnb.IdentityProvider, + &fnb.FlowConfig.NetworkConfig.ResourceManager, + p2pbuilderconfig.PeerManagerDisableConfig(), // disable peer manager for observer node. + &p2p.DisallowListCacheConfig{ + MaxSize: fnb.FlowConfig.NetworkConfig.DisallowListNotificationCacheSize, + Metrics: metrics.DisallowListCacheMetricsFactory(fnb.HeroCacheMetricsFactory(), network.PublicNetwork), + }, + &p2pbuilderconfig.UnicastConfig{ + Unicast: fnb.FlowConfig.NetworkConfig.Unicast, + }). + SetProtocolPeerCacheList(protocols.FlowProtocolID(fnb.SporkID)). + SetSubscriptionFilter( + subscription.NewRoleBasedFilter( + subscription.UnstakedRole, fnb.IdentityProvider, + ), + ). + SetRoutingSystem(func(ctx context.Context, h host.Host) (routing.Routing, error) { + return p2pdht.NewDHT(ctx, h, protocols.FlowPublicDHTProtocolID(fnb.SporkID), + fnb.Logger, + fnb.Metrics.Network, + p2pdht.AsClient(), + dht.BootstrapPeers(pis...), + ) + }). + Build() + + if err != nil { + return nil, fmt.Errorf("could not initialize libp2p node for observer: %w", err) + } + return node, nil +} + +func (fnb *FlowNodeBuilder) InitFlowNetworkWithConduitFactory( + node *NodeConfig, + cf network.ConduitFactory, + unicastRateLimiters *ratelimit.RateLimiters, + peerManagerFilters []p2p.PeerFilter) (network.EngineRegistry, error) { + + var networkOptions []underlay.NetworkOption if len(fnb.MsgValidators) > 0 { - mwOpts = append(mwOpts, middleware.WithMessageValidators(fnb.MsgValidators...)) + networkOptions = append(networkOptions, underlay.WithMessageValidators(fnb.MsgValidators...)) } // by default if no rate limiter configuration was provided in the CLI args the default // noop rate limiter will be used. - mwOpts = append(mwOpts, middleware.WithUnicastRateLimiters(unicastRateLimiters)) + networkOptions = append(networkOptions, underlay.WithUnicastRateLimiters(unicastRateLimiters)) - mwOpts = append(mwOpts, - middleware.WithPreferredUnicastProtocols(protocols.ToProtocolNames(fnb.PreferredUnicastProtocols)), + networkOptions = append(networkOptions, + underlay.WithPreferredUnicastProtocols(protocols.ToProtocolNames(fnb.FlowConfig.NetworkConfig.PreferredUnicastProtocols)...), ) - // peerManagerFilters are used by the peerManager via the middleware to filter peers from the topology. + // peerManagerFilters are used by the peerManager via the network to filter peers from the topology. if len(peerManagerFilters) > 0 { - mwOpts = append(mwOpts, middleware.WithPeerManagerFilters(peerManagerFilters)) + networkOptions = append(networkOptions, underlay.WithPeerManagerFilters(peerManagerFilters...)) } - slashingViolationsConsumer := slashing.NewSlashingViolationsConsumer(fnb.Logger, fnb.Metrics.Network) - mw := middleware.NewMiddleware( + receiveCache := netcache.NewHeroReceiveCache(fnb.FlowConfig.NetworkConfig.NetworkReceivedMessageCacheSize, fnb.Logger, - fnb.LibP2PNode, - fnb.Me.NodeID(), - fnb.Metrics.Bitswap, - fnb.SporkID, - fnb.BaseConfig.UnicastMessageTimeout, - fnb.IDTranslator, - fnb.CodecFactory(), - slashingViolationsConsumer, - mwOpts...) - fnb.NodeDisallowListDistributor.AddConsumer(mw) - fnb.Middleware = mw - - subscriptionManager := subscription.NewChannelSubscriptionManager(fnb.Middleware) - - receiveCache := netcache.NewHeroReceiveCache(fnb.NetworkReceivedMessageCacheSize, - fnb.Logger, - metrics.NetworkReceiveCacheMetricsFactory(fnb.HeroCacheMetricsFactory(), p2p.PrivateNetwork)) + metrics.NetworkReceiveCacheMetricsFactory(fnb.HeroCacheMetricsFactory(), network.PrivateNetwork)) err := node.Metrics.Mempool.Register(metrics.ResourceNetworkingReceiveCache, receiveCache.Size) if err != nil { return nil, fmt.Errorf("could not register networking receive cache metric: %w", err) } + networkType := network.PrivateNetwork + if fnb.ObserverMode { + // observer mode uses public network + networkType = network.PublicNetwork + } + // creates network instance - net, err := p2p.NewNetwork(&p2p.NetworkParameters{ - Logger: fnb.Logger, - Codec: fnb.CodecFactory(), - Me: fnb.Me, - MiddlewareFactory: func() (network.Middleware, error) { return fnb.Middleware, nil }, - Topology: topology.NewFullyConnectedTopology(), - SubscriptionManager: subscriptionManager, - Metrics: fnb.Metrics.Network, - IdentityProvider: fnb.IdentityProvider, - ReceiveCache: receiveCache, - Options: []p2p.NetworkOptFunction{p2p.WithConduitFactory(cf)}, - }) + net, err := underlay.NewNetwork(&underlay.NetworkConfig{ + Logger: fnb.Logger, + Libp2pNode: fnb.LibP2PNode, + Codec: fnb.CodecFactory(), + Me: fnb.Me, + SporkId: fnb.SporkID, + Topology: topology.NewFullyConnectedTopology(), + Metrics: fnb.Metrics.Network, + BitSwapMetrics: fnb.Metrics.Bitswap, + IdentityProvider: fnb.IdentityProvider, + ReceiveCache: receiveCache, + ConduitFactory: cf, + UnicastMessageTimeout: fnb.FlowConfig.NetworkConfig.Unicast.MessageTimeout, + IdentityTranslator: fnb.IDTranslator, + AlspCfg: &alspmgr.MisbehaviorReportManagerConfig{ + Logger: fnb.Logger, + SpamRecordCacheSize: fnb.FlowConfig.NetworkConfig.AlspConfig.SpamRecordCacheSize, + SpamReportQueueSize: fnb.FlowConfig.NetworkConfig.AlspConfig.SpamReportQueueSize, + DisablePenalty: fnb.FlowConfig.NetworkConfig.AlspConfig.DisablePenalty, + HeartBeatInterval: fnb.FlowConfig.NetworkConfig.AlspConfig.HearBeatInterval, + AlspMetrics: fnb.Metrics.Network, + HeroCacheMetricsFactory: fnb.HeroCacheMetricsFactory(), + NetworkType: networkType, + }, + SlashingViolationConsumerFactory: func(adapter network.ConduitAdapter) network.ViolationsConsumer { + return slashing.NewSlashingViolationsConsumer(fnb.Logger, fnb.Metrics.Network, adapter) + }, + }, networkOptions...) if err != nil { return nil, fmt.Errorf("could not initialize network: %w", err) } - fnb.Network = net + if node.ObserverMode { + fnb.EngineRegistry = converter.NewNetwork(net, channels.SyncCommittee, channels.PublicSyncCommittee) + } else { + fnb.EngineRegistry = net // setting network as the fnb.Network for the engine-level components + } + fnb.NetworkUnderlay = net // setting network as the fnb.Underlay for the lower-level components - // register middleware's ReadyDoneAware interface so other components can depend on it for startup - if fnb.middlewareDependable != nil { - fnb.middlewareDependable.Init(fnb.Middleware) + // register network ReadyDoneAware interface so other components can depend on it for startup + if fnb.networkUnderlayDependable != nil { + fnb.networkUnderlayDependable.Init(fnb.NetworkUnderlay) } - idEvents := gadgets.NewIdentityDeltas(fnb.Middleware.UpdateNodeAddresses) + idEvents := gadgets.NewIdentityDeltas(net.UpdateNodeAddresses) fnb.ProtocolEvents.AddConsumer(idEvents) return net, nil @@ -580,15 +788,28 @@ func (fnb *FlowNodeBuilder) ParseAndPrintFlags() error { // parse configuration parameters pflag.Parse() - // print all flags - log := fnb.Logger.Info() + configOverride, err := config.BindPFlags(&fnb.BaseConfig.FlowConfig, fnb.flags) + if err != nil { + return err + } + + if configOverride { + fnb.Logger.Info().Str("config-file", fnb.FlowConfig.ConfigFile).Msg("configuration file updated") + } - pflag.VisitAll(func(flag *pflag.Flag) { - log = log.Str(flag.Name, flag.Value.String()) - }) + if err = fnb.BaseConfig.FlowConfig.Validate(); err != nil { + fnb.Logger.Fatal().Err(err).Msg("flow configuration validation failed") + } - log.Msg("flags loaded") + info := fnb.Logger.Error() + noPrint := config.LogConfig(info, fnb.flags) + fnb.flags.VisitAll(func(flag *pflag.Flag) { + if _, ok := noPrint[flag.Name]; !ok { + info.Str(flag.Name, fmt.Sprintf("%v", flag.Value)) + } + }) + info.Msg("configuration loaded (logged as error for visibility)") return fnb.extraFlagsValidation() } @@ -603,7 +824,7 @@ func (fnb *FlowNodeBuilder) ValidateFlags(f func() error) NodeBuilder { } func (fnb *FlowNodeBuilder) PrintBuildVersionDetails() { - fnb.Logger.Info().Str("version", build.Semver()).Str("commit", build.Commit()).Msg("build details") + fnb.Logger.Info().Str("version", build.Version()).Str("commit", build.Commit()).Msg("build details") } func (fnb *FlowNodeBuilder) initNodeInfo() error { @@ -621,13 +842,48 @@ func (fnb *FlowNodeBuilder) initNodeInfo() error { return fmt.Errorf("failed to load private node info: %w", err) } + fnb.StakingKey = info.StakingPrivKey.PrivateKey + + if fnb.ObserverMode { + // observer mode uses a network private key with different format than the staked node, + // so it has to load the network private key from a separate file + networkingPrivateKey, err := LoadNetworkPrivateKey(fnb.BaseConfig.BootstrapDir, nodeID) + if err != nil { + return fmt.Errorf("failed to load networking private key: %w", err) + } + + peerID, err := peerIDFromNetworkKey(networkingPrivateKey) + if err != nil { + return fmt.Errorf("could not get peer ID from network key: %w", err) + } + + // public node ID for observer is derived from peer ID which is derived from network key + pubNodeID, err := translator.NewPublicNetworkIDTranslator().GetFlowID(peerID) + if err != nil { + return fmt.Errorf("could not get flow node ID: %w", err) + } + + fnb.NodeID = pubNodeID + fnb.NetworkKey = networkingPrivateKey + + return nil + } + fnb.NodeID = nodeID fnb.NetworkKey = info.NetworkPrivKey.PrivateKey - fnb.StakingKey = info.StakingPrivKey.PrivateKey return nil } +func peerIDFromNetworkKey(privateKey crypto.PrivateKey) (peer.ID, error) { + pubKey, err := keyutils.LibP2PPublicKeyFromFlow(privateKey.PublicKey()) + if err != nil { + return "", fmt.Errorf("could not load libp2p public key: %w", err) + } + + return peer.IDFromPublicKey(pubKey) +} + func (fnb *FlowNodeBuilder) initLogger() error { // configure logger with standard level, node ID and UTC timestamp zerolog.TimeFieldFormat = time.RFC3339Nano @@ -722,11 +978,11 @@ func (fnb *FlowNodeBuilder) initMetrics() error { // metrics enabled, report node info metrics as post init event fnb.PostInit(func(nodeConfig *NodeConfig) error { nodeInfoMetrics := metrics.NewNodeInfoCollector() - protocolVersion, err := fnb.RootSnapshot.Params().ProtocolVersion() + pstate, err := nodeConfig.State.Final().ProtocolState() if err != nil { - return fmt.Errorf("could not query root snapshoot protocol version: %w", err) + return fmt.Errorf("could not get protocol state: %w", err) } - nodeInfoMetrics.NodeInfo(build.Semver(), build.Commit(), nodeConfig.SporkID.String(), protocolVersion) + nodeInfoMetrics.NodeInfo(build.Version(), build.Commit(), nodeConfig.SporkID.String(), pstate.GetProtocolStateVersion()) return nil }) } @@ -754,7 +1010,7 @@ func (fnb *FlowNodeBuilder) createGCEProfileUploader(client *gcemd.Client, opts ProjectID: projectID, ChainID: chainID, Role: fnb.NodeConfig.NodeRole, - Version: build.Semver(), + Version: build.Version(), Commit: build.Commit(), Instance: instance, } @@ -838,57 +1094,27 @@ func (fnb *FlowNodeBuilder) initProfiler() error { return nil } -func (fnb *FlowNodeBuilder) initDB() error { - - // if a db has been passed in, use that instead of creating one - if fnb.BaseConfig.db != nil { - fnb.DB = fnb.BaseConfig.db +// create protocol protocol db +func (fnb *FlowNodeBuilder) initProtocolDB() error { + // if the protocol DB is already set, use it + // the protocol DB might be set by the follower engine + if fnb.BaseConfig.protocolDB != nil { + fnb.ProtocolDB = fnb.BaseConfig.protocolDB return nil } - // Pre-create DB path (Badger creates only one-level dirs) - err := os.MkdirAll(fnb.BaseConfig.datadir, 0700) + pebbleDB, closer, err := scaffold.InitPebbleDB(fnb.Logger.With().Str("pebbledb", "protocol").Logger(), fnb.BaseConfig.datadir) if err != nil { - return fmt.Errorf("could not create datadir (path: %s): %w", fnb.BaseConfig.datadir, err) - } - - log := sutil.NewLogger(fnb.Logger) - - // we initialize the database with options that allow us to keep the maximum - // item size in the trie itself (up to 1MB) and where we keep all level zero - // tables in-memory as well; this slows down compaction and increases memory - // usage, but it improves overall performance and disk i/o - opts := badger. - DefaultOptions(fnb.BaseConfig.datadir). - WithKeepL0InMemory(true). - WithLogger(log). - - // the ValueLogFileSize option specifies how big the value of a - // key-value pair is allowed to be saved into badger. - // exceeding this limit, will fail with an error like this: - // could not store data: Value with size <xxxx> exceeded 1073741824 limit - // Maximum value size is 10G, needed by execution node - // TODO: finding a better max value for each node type - WithValueLogFileSize(128 << 23). - WithValueLogMaxEntries(100000) // Default is 1000000 - - publicDB, err := bstorage.InitPublic(opts) - if err != nil { - return fmt.Errorf("could not open public db: %w", err) + return err } - fnb.DB = publicDB - - fnb.ShutdownFunc(func() error { - if err := fnb.DB.Close(); err != nil { - return fmt.Errorf("error closing protocol database: %w", err) - } - return nil - }) - fnb.Component("badger log cleaner", func(node *NodeConfig) (module.ReadyDoneAware, error) { - return bstorage.NewCleaner(node.Logger, node.DB, node.Metrics.CleanCollector, flow.DefaultValueLogGCWaitDuration), nil + fnb.AdminCommand("create-pebble-checkpoint", func(config *NodeConfig) commands.AdminCommand { + // by default checkpoints will be created under "/data/protocol_pebble_checkpoints" + return storageCommands.NewPebbleDBCheckpointCommand(config.pebbleCheckpointsDir, "protocol", pebbleDB) }) + fnb.ProtocolDB = pebbleimpl.ToDB(pebbleDB) + fnb.ShutdownFunc(closer.Close) return nil } @@ -909,9 +1135,9 @@ func (fnb *FlowNodeBuilder) initSecretsDB() error { return fmt.Errorf("could not create secrets db dir (path: %s): %w", fnb.BaseConfig.secretsdir, err) } - log := sutil.NewLogger(fnb.Logger) - - opts := badger.DefaultOptions(fnb.BaseConfig.secretsdir).WithLogger(log) + opts := badger.DefaultOptions(fnb.BaseConfig.secretsdir). + WithLogger(sutil.NewLogger( + fnb.Logger.With().Str("badgerdb", "secret").Logger())) // NOTE: SN nodes need to explicitly set --insecure-secrets-db to true in order to // disable secrets database encryption @@ -948,94 +1174,122 @@ func (fnb *FlowNodeBuilder) initSecretsDB() error { return nil } -func (fnb *FlowNodeBuilder) initStorage() error { +// initStorageLockManager initializes the lock manager used by the storage layer. +// This manager must be a process-wide singleton. +func (fnb *FlowNodeBuilder) initStorageLockManager() error { + if fnb.StorageLockMgr != nil { + fnb.Logger.Warn().Msgf("storage lock manager already initialized, skipping re-initialization, this should only happen in test case") + return nil + } - // in order to void long iterations with big keys when initializing with an - // already populated database, we bootstrap the initial maximum key size - // upon starting - err := operation.RetryOnConflict(fnb.DB.Update, func(tx *badger.Txn) error { - return operation.InitMax(tx) - }) - if err != nil { - return fmt.Errorf("could not initialize max tracker: %w", err) - } - - headers := bstorage.NewHeaders(fnb.Metrics.Cache, fnb.DB) - guarantees := bstorage.NewGuarantees(fnb.Metrics.Cache, fnb.DB, fnb.BaseConfig.guaranteesCacheSize) - seals := bstorage.NewSeals(fnb.Metrics.Cache, fnb.DB) - results := bstorage.NewExecutionResults(fnb.Metrics.Cache, fnb.DB) - receipts := bstorage.NewExecutionReceipts(fnb.Metrics.Cache, fnb.DB, results, fnb.BaseConfig.receiptsCacheSize) - index := bstorage.NewIndex(fnb.Metrics.Cache, fnb.DB) - payloads := bstorage.NewPayloads(fnb.DB, index, guarantees, seals, receipts, results) - blocks := bstorage.NewBlocks(fnb.DB, headers, payloads) - qcs := bstorage.NewQuorumCertificates(fnb.Metrics.Cache, fnb.DB, bstorage.DefaultCacheSize) - transactions := bstorage.NewTransactions(fnb.Metrics.Cache, fnb.DB) - collections := bstorage.NewCollections(fnb.DB, transactions) - setups := bstorage.NewEpochSetups(fnb.Metrics.Cache, fnb.DB) - epochCommits := bstorage.NewEpochCommits(fnb.Metrics.Cache, fnb.DB) - statuses := bstorage.NewEpochStatuses(fnb.Metrics.Cache, fnb.DB) - commits := bstorage.NewCommits(fnb.Metrics.Cache, fnb.DB) - versionBeacons := bstorage.NewVersionBeacons(fnb.DB) + fnb.StorageLockMgr = storage.MakeSingletonLockManager() + return nil +} + +func (fnb *FlowNodeBuilder) initStorage() error { + headers := store.NewHeaders(fnb.Metrics.Cache, fnb.ProtocolDB) + guarantees := store.NewGuarantees(fnb.Metrics.Cache, fnb.ProtocolDB, fnb.BaseConfig.guaranteesCacheSize, + store.DefaultCacheSize) + seals := store.NewSeals(fnb.Metrics.Cache, fnb.ProtocolDB) + results := store.NewExecutionResults(fnb.Metrics.Cache, fnb.ProtocolDB) + receipts := store.NewExecutionReceipts(fnb.Metrics.Cache, fnb.ProtocolDB, results, fnb.BaseConfig.receiptsCacheSize) + index := store.NewIndex(fnb.Metrics.Cache, fnb.ProtocolDB) + payloads := store.NewPayloads(fnb.ProtocolDB, index, guarantees, seals, receipts, results) + blocks := store.NewBlocks(fnb.ProtocolDB, headers, payloads) + qcs := store.NewQuorumCertificates(fnb.Metrics.Cache, fnb.ProtocolDB, store.DefaultCacheSize) + transactions := store.NewTransactions(fnb.Metrics.Cache, fnb.ProtocolDB) + collections := store.NewCollections(fnb.ProtocolDB, transactions) + setups := store.NewEpochSetups(fnb.Metrics.Cache, fnb.ProtocolDB) + epochCommits := store.NewEpochCommits(fnb.Metrics.Cache, fnb.ProtocolDB) + protocolState := store.NewEpochProtocolStateEntries(fnb.Metrics.Cache, setups, epochCommits, fnb.ProtocolDB, + store.DefaultEpochProtocolStateCacheSize, store.DefaultProtocolStateIndexCacheSize) + protocolKVStores := store.NewProtocolKVStore(fnb.Metrics.Cache, fnb.ProtocolDB, + store.DefaultProtocolKVStoreCacheSize, store.DefaultProtocolKVStoreByBlockIDCacheSize) + versionBeacons := store.NewVersionBeacons(fnb.ProtocolDB) fnb.Storage = Storage{ - Headers: headers, - Guarantees: guarantees, - Receipts: receipts, - Results: results, - Seals: seals, - Index: index, - Payloads: payloads, - Blocks: blocks, - QuorumCertificates: qcs, - Transactions: transactions, - Collections: collections, - Setups: setups, - EpochCommits: epochCommits, - VersionBeacons: versionBeacons, - Statuses: statuses, - Commits: commits, + Headers: headers, + Guarantees: guarantees, + Seals: seals, + Index: index, + Payloads: payloads, + Blocks: blocks, + QuorumCertificates: qcs, + Transactions: transactions, + Collections: collections, + Setups: setups, + EpochCommits: epochCommits, + VersionBeacons: versionBeacons, + EpochProtocolStateEntries: protocolState, + ProtocolKVStore: protocolKVStores, + + Results: results, + Receipts: receipts, } return nil } func (fnb *FlowNodeBuilder) InitIDProviders() { - fnb.Component("disallow list notification distributor", func(node *NodeConfig) (module.ReadyDoneAware, error) { - // distributor is returned as a component to be started and stopped. - if fnb.NodeDisallowListDistributor == nil { - return nil, fmt.Errorf("disallow list notification distributor has not been set") - } - return fnb.NodeDisallowListDistributor, nil - }) fnb.Module("id providers", func(node *NodeConfig) error { idCache, err := cache.NewProtocolStateIDCache(node.Logger, node.State, node.ProtocolEvents) if err != nil { return fmt.Errorf("could not initialize ProtocolStateIDCache: %w", err) } - node.IDTranslator = idCache - - fnb.NodeDisallowListDistributor = BuildDisallowListNotificationDisseminator(fnb.DisallowListNotificationCacheSize, fnb.MetricsRegisterer, fnb.Logger, fnb.MetricsEnabled) // The following wrapper allows to disallow-list byzantine nodes via an admin command: // the wrapper overrides the 'Ejected' flag of disallow-listed nodes to true - disallowListWrapper, err := cache.NewNodeBlocklistWrapper(idCache, node.DB, fnb.NodeDisallowListDistributor) + disallowListWrapper, err := cache.NewNodeDisallowListWrapper( + idCache, + node.ProtocolDB, + func() network.DisallowListNotificationConsumer { + return fnb.NetworkUnderlay + }, + ) if err != nil { - return fmt.Errorf("could not initialize NodeBlockListWrapper: %w", err) + return fmt.Errorf("could not initialize NodeDisallowListWrapper: %w", err) } node.IdentityProvider = disallowListWrapper + if node.ObserverMode { + // identifier providers decides which node to connect to when syncing blocks, + // in observer mode, the peer nodes have to be specific public access node, + // rather than the staked consensus nodes. + idTranslator, factory, err := CreatePublicIDTranslatorAndIdentifierProvider( + fnb.Logger, + fnb.NetworkKey, + fnb.SporkID, + // fnb.LibP2PNode is not created yet, until EnqueueNetworkInit is called. + // so we pass a function that will return the LibP2PNode when called. + func() p2p.LibP2PNode { + return fnb.LibP2PNode + }, + idCache, + ) + if err != nil { + return fmt.Errorf("could not initialize public ID translator and identifier provider: %w", err) + } + + fnb.IDTranslator = idTranslator + fnb.SyncEngineIdentifierProvider = factory() + + return nil + } + + node.IDTranslator = idCache + // register the disallow list wrapper for dynamic configuration via admin command err = node.ConfigManager.RegisterIdentifierListConfig("network-id-provider-blocklist", - disallowListWrapper.GetBlocklist, disallowListWrapper.Update) + disallowListWrapper.GetDisallowList, disallowListWrapper.Update) if err != nil { - return fmt.Errorf("failed to register blocklist with config manager: %w", err) + return fmt.Errorf("failed to register disallow-list wrapper with config manager: %w", err) } node.SyncEngineIdentifierProvider = id.NewIdentityFilterIdentifierProvider( filter.And( - filter.HasRole(flow.RoleConsensus), - filter.Not(filter.HasNodeID(node.Me.NodeID())), - p2p.NotEjectedFilter, + filter.HasRole[flow.Identity](flow.RoleConsensus), + filter.Not(filter.HasNodeID[flow.Identity](node.Me.NodeID())), + filter.NotEjectedFilter, ), node.IdentityProvider, ) @@ -1046,7 +1300,7 @@ func (fnb *FlowNodeBuilder) InitIDProviders() { func (fnb *FlowNodeBuilder) initState() error { fnb.ProtocolEvents = events.NewDistributor() - isBootStrapped, err := badgerState.IsBootstrapped(fnb.DB) + isBootStrapped, err := badgerState.IsBootstrapped(fnb.ProtocolDB) if err != nil { return fmt.Errorf("failed to determine whether database contains bootstrapped state: %w", err) } @@ -1055,7 +1309,8 @@ func (fnb *FlowNodeBuilder) initState() error { fnb.Logger.Info().Msg("opening already bootstrapped protocol state") state, err := badgerState.OpenState( fnb.Metrics.Compliance, - fnb.DB, + fnb.ProtocolDB, + fnb.StorageLockMgr, fnb.Storage.Headers, fnb.Storage.Seals, fnb.Storage.Results, @@ -1063,7 +1318,8 @@ func (fnb *FlowNodeBuilder) initState() error { fnb.Storage.QuorumCertificates, fnb.Storage.Setups, fnb.Storage.EpochCommits, - fnb.Storage.Statuses, + fnb.Storage.EpochProtocolStateEntries, + fnb.Storage.ProtocolKVStore, fnb.Storage.VersionBeacons, ) if err != nil { @@ -1072,11 +1328,7 @@ func (fnb *FlowNodeBuilder) initState() error { fnb.State = state // set root snapshot field - rootBlock, err := state.Params().Root() - if err != nil { - return fmt.Errorf("could not get root block from protocol state: %w", err) - } - + rootBlock := state.Params().FinalizedRoot() rootSnapshot := state.AtBlockID(rootBlock.ID()) if err := fnb.setRootSnapshot(rootSnapshot); err != nil { return err @@ -1107,7 +1359,8 @@ func (fnb *FlowNodeBuilder) initState() error { fnb.State, err = badgerState.Bootstrap( fnb.Metrics.Compliance, - fnb.DB, + fnb.ProtocolDB, + fnb.StorageLockMgr, fnb.Storage.Headers, fnb.Storage.Seals, fnb.Storage.Results, @@ -1115,7 +1368,8 @@ func (fnb *FlowNodeBuilder) initState() error { fnb.Storage.QuorumCertificates, fnb.Storage.Setups, fnb.Storage.EpochCommits, - fnb.Storage.Statuses, + fnb.Storage.EpochProtocolStateEntries, + fnb.Storage.ProtocolKVStore, fnb.Storage.VersionBeacons, fnb.RootSnapshot, options..., @@ -1127,8 +1381,10 @@ func (fnb *FlowNodeBuilder) initState() error { fnb.Logger.Info(). Hex("root_result_id", logging.Entity(fnb.RootResult)). Hex("root_state_commitment", fnb.RootSeal.FinalState[:]). - Hex("root_block_id", logging.Entity(fnb.RootBlock)). - Uint64("root_block_height", fnb.RootBlock.Header.Height). + Hex("finalized_root_block_id", logging.Entity(fnb.FinalizedRootBlock)). + Uint64("finalized_root_block_height", fnb.FinalizedRootBlock.Height). + Hex("sealed_root_block_id", logging.Entity(fnb.SealedRootBlock)). + Uint64("sealed_root_block_height", fnb.SealedRootBlock.Height). Msg("protocol state bootstrapped") } @@ -1143,13 +1399,22 @@ func (fnb *FlowNodeBuilder) initState() error { if err != nil { return fmt.Errorf("could not get last finalized block header: %w", err) } - fnb.NodeConfig.FinalizedHeader = lastFinalized + fnb.NodeConfig.LastFinalizedHeader = lastFinalized + + lastSealed, err := fnb.State.Sealed().Head() + if err != nil { + return fmt.Errorf("could not get last sealed block header: %w", err) + } fnb.Logger.Info(). - Hex("root_block_id", logging.Entity(fnb.RootBlock)). - Uint64("root_block_height", fnb.RootBlock.Header.Height). - Hex("finalized_block_id", logging.Entity(lastFinalized)). - Uint64("finalized_block_height", lastFinalized.Height). + Hex("last_finalized_block_id", logging.Entity(lastFinalized)). + Uint64("last_finalized_block_height", lastFinalized.Height). + Hex("last_sealed_block_id", logging.Entity(lastSealed)). + Uint64("last_sealed_block_height", lastSealed.Height). + Hex("finalized_root_block_id", logging.Entity(fnb.FinalizedRootBlock)). + Uint64("finalized_root_block_height", fnb.FinalizedRootBlock.Height). + Hex("sealed_root_block_id", logging.Entity(fnb.SealedRootBlock)). + Uint64("sealed_root_block_height", fnb.SealedRootBlock.Height). Msg("successfully opened protocol state") return nil @@ -1160,7 +1425,7 @@ func (fnb *FlowNodeBuilder) setRootSnapshot(rootSnapshot protocol.Snapshot) erro var err error // validate the root snapshot QCs - err = badgerState.IsValidRootSnapshotQCs(rootSnapshot) + err = datastore.IsValidRootSnapshotQCs(rootSnapshot) if err != nil { return fmt.Errorf("failed to validate root snapshot QCs: %w", err) } @@ -1185,31 +1450,58 @@ func (fnb *FlowNodeBuilder) setRootSnapshot(rootSnapshot protocol.Snapshot) erro return fmt.Errorf("failed to read root sealing segment: %w", err) } - fnb.RootBlock = sealingSegment.Highest() + fnb.FinalizedRootBlock = sealingSegment.Highest() + fnb.SealedRootBlock = sealingSegment.Sealed() fnb.RootQC, err = fnb.RootSnapshot.QuorumCertificate() if err != nil { return fmt.Errorf("failed to read root QC: %w", err) } - fnb.RootChainID = fnb.RootBlock.Header.ChainID - fnb.SporkID, err = fnb.RootSnapshot.Params().SporkID() - if err != nil { - return fmt.Errorf("failed to read spork ID: %w", err) - } + fnb.RootChainID = fnb.FinalizedRootBlock.ChainID + fnb.SporkID = fnb.RootSnapshot.Params().SporkID() return nil } func (fnb *FlowNodeBuilder) initLocal() error { + // NodeID has been set in initNodeInfo + myID := fnb.NodeID + if fnb.ObserverMode { + nodeID, err := flow.HexStringToIdentifier(fnb.BaseConfig.nodeIDHex) + if err != nil { + return fmt.Errorf("could not parse node ID from string (id: %v): %w", fnb.BaseConfig.nodeIDHex, err) + } + info, err := LoadPrivateNodeInfo(fnb.BaseConfig.BootstrapDir, nodeID) + if err != nil { + return fmt.Errorf("could not load private node info: %w", err) + } + + if info.Role != flow.RoleExecution { + return fmt.Errorf("observer mode is only available for execution nodes") + } + + id := flow.IdentitySkeleton{ + // observer mode uses the node id derived from the network key, + // rather than the node id from the node info file + NodeID: myID, + Address: info.Address, + Role: info.Role, + InitialWeight: 0, + NetworkPubKey: fnb.NetworkKey.PublicKey(), + StakingPubKey: fnb.StakingKey.PublicKey(), + } + fnb.Me, err = local.New(id, fnb.StakingKey) + if err != nil { + return fmt.Errorf("could not initialize local: %w", err) + } + + return nil + } + // Verify that my ID (as given in the configuration) is known to the network // (i.e. protocol state). There are two cases that will cause the following error: // 1) used the wrong node id, which is not part of the identity list of the finalized state // 2) the node id is a new one for a new spork, but the bootstrap data has not been updated. - myID, err := flow.HexStringToIdentifier(fnb.BaseConfig.nodeIDHex) - if err != nil { - return fmt.Errorf("could not parse node identifier: %w", err) - } - self, err := fnb.State.Final().Identity(myID) if err != nil { return fmt.Errorf("node identity not found in the identity list of the finalized state (id: %v): %w", myID, err) @@ -1219,11 +1511,7 @@ func (fnb *FlowNodeBuilder) initLocal() error { // We enforce this strictly for MainNet. For other networks (e.g. TestNet or BenchNet), we // are lenient, to allow ghost node to run as any role. if self.Role.String() != fnb.BaseConfig.NodeRole { - rootBlockHeader, err := fnb.State.Params().Root() - if err != nil { - return fmt.Errorf("could not get root block from protocol state: %w", err) - } - + rootBlockHeader := fnb.State.Params().FinalizedRoot() if rootBlockHeader.ChainID == flow.Mainnet { return fmt.Errorf("running as incorrect role, expected: %v, actual: %v, exiting", self.Role.String(), @@ -1244,7 +1532,7 @@ func (fnb *FlowNodeBuilder) initLocal() error { return fmt.Errorf("configured staking key does not match protocol state") } - fnb.Me, err = local.New(self, fnb.StakingKey) + fnb.Me, err = local.New(self.IdentitySkeleton, fnb.StakingKey) if err != nil { return fmt.Errorf("could not initialize local: %w", err) } @@ -1253,27 +1541,16 @@ func (fnb *FlowNodeBuilder) initLocal() error { } func (fnb *FlowNodeBuilder) initFvmOptions() { - blockFinder := environment.NewBlockFinder(fnb.Storage.Headers) - vmOpts := []fvm.Option{ - fvm.WithChain(fnb.RootChainID.Chain()), - fvm.WithBlocks(blockFinder), - fvm.WithAccountStorageLimit(true), - } - if fnb.RootChainID == flow.Testnet || fnb.RootChainID == flow.Sandboxnet || fnb.RootChainID == flow.Mainnet { - vmOpts = append(vmOpts, - fvm.WithTransactionFeesEnabled(true), - ) - } - if fnb.RootChainID == flow.Testnet || fnb.RootChainID == flow.Sandboxnet || fnb.RootChainID == flow.Localnet || fnb.RootChainID == flow.Benchnet { - vmOpts = append(vmOpts, - fvm.WithContractDeploymentRestricted(false), - ) - } - fnb.FvmOptions = vmOpts + fnb.FvmOptions = initialize.InitFvmOptions( + fnb.RootChainID, + fnb.Storage.Headers, + fnb.BaseConfig.TransactionFeesDisabled, + ) } // handleModules initializes the given module. func (fnb *FlowNodeBuilder) handleModule(v namedModuleFunc) error { + fnb.Logger.Info().Str("module", v.name).Msg("module initialization started") err := v.fn(fnb.NodeConfig) if err != nil { return fmt.Errorf("module %s initialization failed: %w", v.name, err) @@ -1294,10 +1571,20 @@ func (fnb *FlowNodeBuilder) handleModules() error { return nil } -// handleComponents registers the component's factory method with the ComponentManager to be run +func (fnb *FlowNodeBuilder) handleComponents() error { + AddWorkersFromComponents(fnb.Logger, fnb.NodeConfig, fnb.componentBuilder, fnb.components) + return nil +} + +// AddWorkersFromComponents registers the component's factory method with the ComponentManager to be run // when the node starts. // It uses signal channels to ensure that components are started serially. -func (fnb *FlowNodeBuilder) handleComponents() error { +func AddWorkersFromComponents[Input any]( + log zerolog.Logger, + input Input, + componentBuilder component.ComponentManagerBuilder, + components []NamedComponentFactory[Input], +) { // The parent/started channels are used to enforce serial startup. // - parent is the started channel of the previous component. // - when a component is ready, it closes its started channel by calling the provided callback. @@ -1308,27 +1595,22 @@ func (fnb *FlowNodeBuilder) handleComponents() error { parent := make(chan struct{}) close(parent) - var err error - asyncComponents := []namedComponentFunc{} + asyncComponents := []NamedComponentFactory[Input]{} // Run all components - for _, f := range fnb.components { + for _, f := range components { // Components with explicit dependencies are not started serially - if f.dependencies != nil { + if f.Dependencies != nil { asyncComponents = append(asyncComponents, f) continue } started := make(chan struct{}) - if f.errorHandler != nil { - err = fnb.handleRestartableComponent(f, parent, func() { close(started) }) + if f.ErrorHandler != nil { + componentBuilder.AddWorker(WorkerFromRestartableComponent(log, input, f, parent, func() { close(started) })) } else { - err = fnb.handleComponent(f, parent, func() { close(started) }) - } - - if err != nil { - return fmt.Errorf("could not handle component %s: %w", f.name, err) + componentBuilder.AddWorker(WorkerFromComponent(log, input, f, parent, func() { close(started) })) } parent = started @@ -1337,17 +1619,12 @@ func (fnb *FlowNodeBuilder) handleComponents() error { // Components with explicit dependencies are run asynchronously, which means dependencies in // the dependency list must be initialized outside of the component factory. for _, f := range asyncComponents { - fnb.Logger.Debug().Str("component", f.name).Int("dependencies", len(f.dependencies.components)).Msg("handling component asynchronously") - err = fnb.handleComponent(f, util.AllReady(f.dependencies.components...), func() {}) - if err != nil { - return fmt.Errorf("could not handle dependable component %s: %w", f.name, err) - } + log.Debug().Str("component", f.Name).Int("dependencies", len(f.Dependencies.Components)).Msg("handling component asynchronously") + componentBuilder.AddWorker(WorkerFromComponent(log, input, f, util.AllReady(f.Dependencies.Components...), func() {})) } - - return nil } -// handleComponent constructs a component using the provided ReadyDoneFactory, and registers a +// WorkerFromComponent constructs a component using the provided ReadyDoneFactory, and registers a // worker with the ComponentManager to be run when the node is started. // // The ComponentManager starts all workers in parallel. Since some components have non-idempotent @@ -1360,23 +1637,27 @@ func (fnb *FlowNodeBuilder) handleComponents() error { // using their ReadyDoneAware interface. After components are updated to use the idempotent // ReadyDoneAware interface and explicitly wait for their dependencies to be ready, we can remove // this channel chaining. -func (fnb *FlowNodeBuilder) handleComponent(v namedComponentFunc, dependencies <-chan struct{}, started func()) error { +func WorkerFromComponent[Input any](log zerolog.Logger, input Input, v NamedComponentFactory[Input], dependencies <-chan struct{}, started func()) component.ComponentWorker { // Add a closure that starts the component when the node is started, and then waits for it to exit // gracefully. // Startup for all components will happen in parallel, and components can use their dependencies' // ReadyDoneAware interface to wait until they are ready. - fnb.componentBuilder.AddWorker(func(ctx irrecoverable.SignalerContext, ready component.ReadyFunc) { + return func(ctx irrecoverable.SignalerContext, ready component.ReadyFunc) { // wait for the dependencies to be ready before starting if err := util.WaitClosed(ctx, dependencies); err != nil { return } - logger := fnb.Logger.With().Str("component", v.name).Logger() + logger := log.With().Str("component", v.Name).Logger() + logger.Info().Msg("component initialization started") // First, build the component using the factory method. - readyAware, err := v.fn(fnb.NodeConfig) + readyAware, err := v.ComponentFactory(input) if err != nil { - ctx.Throw(fmt.Errorf("component %s initialization failed: %w", v.name, err)) + ctx.Throw(fmt.Errorf("component %s initialization failed: %w", v.Name, err)) + } + if readyAware == nil { + ctx.Throw(fmt.Errorf("component %s initialization failed: nil component", v.Name)) } logger.Info().Msg("component initialization complete") @@ -1412,20 +1693,24 @@ func (fnb *FlowNodeBuilder) handleComponent(v namedComponentFunc, dependencies < // Finally, wait until component has finished shutting down. <-readyAware.Done() logger.Info().Msg("component shutdown complete") - }) - - return nil + } } -// handleRestartableComponent constructs a component using the provided ReadyDoneFactory, and +// WorkerFromRestartableComponent constructs a component using the provided ReadyDoneFactory, and // registers a worker with the ComponentManager to be run when the node is started. // // Restartable Components are components that can be restarted after successfully handling // an irrecoverable error. // // Any irrecoverable errors thrown by the component will be passed to the provided error handler. -func (fnb *FlowNodeBuilder) handleRestartableComponent(v namedComponentFunc, parentReady <-chan struct{}, started func()) error { - fnb.componentBuilder.AddWorker(func(ctx irrecoverable.SignalerContext, ready component.ReadyFunc) { +func WorkerFromRestartableComponent[Input any]( + log zerolog.Logger, + input Input, + v NamedComponentFactory[Input], + parentReady <-chan struct{}, + started func(), +) component.ComponentWorker { + return func(ctx irrecoverable.SignalerContext, ready component.ReadyFunc) { // wait for the previous component to be ready before starting if err := util.WaitClosed(ctx, parentReady); err != nil { return @@ -1440,11 +1725,12 @@ func (fnb *FlowNodeBuilder) handleRestartableComponent(v namedComponentFunc, par // from within the componentFactory started() - log := fnb.Logger.With().Str("component", v.name).Logger() + log := log.With().Str("component", v.Name).Logger() // This may be called multiple times if the component is restarted componentFactory := func() (component.Component, error) { - c, err := v.fn(fnb.NodeConfig) + log.Info().Msg("component initialization started") + c, err := v.ComponentFactory(input) if err != nil { return nil, err } @@ -1463,15 +1749,13 @@ func (fnb *FlowNodeBuilder) handleRestartableComponent(v namedComponentFunc, par return c.(component.Component), nil } - err := component.RunComponent(ctx, componentFactory, v.errorHandler) + err := component.RunComponent(ctx, componentFactory, v.ErrorHandler) if err != nil && !errors.Is(err, ctx.Err()) { - ctx.Throw(fmt.Errorf("component %s encountered an unhandled irrecoverable error: %w", v.name, err)) + ctx.Throw(fmt.Errorf("component %s encountered an unhandled irrecoverable error: %w", v.Name, err)) } log.Info().Msg("component shutdown complete") - }) - - return nil + } } // ExtraFlags enables binding additional flags beyond those defined in BaseConfig. @@ -1506,10 +1790,10 @@ func (fnb *FlowNodeBuilder) AdminCommand(command string, f func(config *NodeConf // The ReadyDoneFactory may return either a `Component` or `ReadyDoneAware` instance. // In both cases, the object is started when the node is run, and the node will wait for the // component to exit gracefully. -func (fnb *FlowNodeBuilder) Component(name string, f ReadyDoneFactory) NodeBuilder { - fnb.components = append(fnb.components, namedComponentFunc{ - fn: f, - name: name, +func (fnb *FlowNodeBuilder) Component(name string, f ReadyDoneFactory[*NodeConfig]) NodeBuilder { + fnb.components = append(fnb.components, NamedComponentFactory[*NodeConfig]{ + ComponentFactory: f, + Name: name, }) return fnb } @@ -1525,26 +1809,26 @@ func (fnb *FlowNodeBuilder) Component(name string, f ReadyDoneFactory) NodeBuild // IMPORTANT: Dependable components are started in parallel with no guaranteed run order, so all // dependencies must be initialized outside of the ReadyDoneFactory, and their `Ready()` method // MUST be idempotent. -func (fnb *FlowNodeBuilder) DependableComponent(name string, f ReadyDoneFactory, dependencies *DependencyList) NodeBuilder { +func (fnb *FlowNodeBuilder) DependableComponent(name string, f ReadyDoneFactory[*NodeConfig], dependencies *DependencyList) NodeBuilder { // Note: dependencies are passed as a struct to allow updating the list after calling this method. // Passing a slice instead would result in out of sync metadata since slices are passed by reference - fnb.components = append(fnb.components, namedComponentFunc{ - fn: f, - name: name, - dependencies: dependencies, + fnb.components = append(fnb.components, NamedComponentFactory[*NodeConfig]{ + ComponentFactory: f, + Name: name, + Dependencies: dependencies, }) return fnb } // OverrideComponent adds given builder function to the components set of the node builder. If a builder function with that name // already exists, it will be overridden. -func (fnb *FlowNodeBuilder) OverrideComponent(name string, f ReadyDoneFactory) NodeBuilder { +func (fnb *FlowNodeBuilder) OverrideComponent(name string, f ReadyDoneFactory[*NodeConfig]) NodeBuilder { for i := 0; i < len(fnb.components); i++ { - if fnb.components[i].name == name { + if fnb.components[i].Name == name { // found component with the name, override it. - fnb.components[i] = namedComponentFunc{ - fn: f, - name: name, + fnb.components[i] = NamedComponentFactory[*NodeConfig]{ + ComponentFactory: f, + Name: name, } return fnb @@ -1568,11 +1852,11 @@ func (fnb *FlowNodeBuilder) OverrideComponent(name string, f ReadyDoneFactory) N // Note: The ReadyDoneFactory method may be called multiple times if the component is restarted. // // Any irrecoverable errors thrown by the component will be passed to the provided error handler. -func (fnb *FlowNodeBuilder) RestartableComponent(name string, f ReadyDoneFactory, errorHandler component.OnError) NodeBuilder { - fnb.components = append(fnb.components, namedComponentFunc{ - fn: f, - name: name, - errorHandler: errorHandler, +func (fnb *FlowNodeBuilder) RestartableComponent(name string, f ReadyDoneFactory[*NodeConfig], errorHandler component.OnError) NodeBuilder { + fnb.components = append(fnb.components, NamedComponentFactory[*NodeConfig]{ + ComponentFactory: f, + Name: name, + ErrorHandler: errorHandler, }) return fnb } @@ -1620,11 +1904,29 @@ func WithBindAddress(bindAddress string) Option { } } -func WithDataDir(dataDir string) Option { +// WithProtocolDir set the protocol data directory for the database +// It will be ignored if WithProtocolDB is used +func WithProtocolDir(dataDir string) Option { return func(config *BaseConfig) { - if config.db == nil { - config.datadir = dataDir + if config.protocolDB != nil { + log.Warn().Msgf("ignoring data directory %s as storage database is already set", dataDir) + return } + + config.datadir = dataDir + } +} + +// WithProtocolDB sets the storage database instance +// If used, then WithDataDir method will be ignored +func WithProtocolDB(db storage.DB) Option { + return func(config *BaseConfig) { + if config.datadir != "" && config.datadir != NotSet { + log.Warn().Msgf("ignoring data directory is already set for badger %v", config.datadir) + config.datadir = "" + } + + config.protocolDB = db } } @@ -1658,14 +1960,6 @@ func WithLogLevel(level string) Option { } } -// WithDB takes precedence over WithDataDir and datadir will be set to empty if DB is set using this option -func WithDB(db *badger.DB) Option { - return func(config *BaseConfig) { - config.db = db - config.datadir = "" - } -} - // FlowNode creates a new Flow node builder with the given name. func FlowNode(role string, opts ...Option) *FlowNodeBuilder { config := DefaultBaseConfig() @@ -1732,6 +2026,8 @@ func (fnb *FlowNodeBuilder) RegisterDefaultAdminCommands() { return common.NewListConfigCommand(config.ConfigManager) }).AdminCommand("read-blocks", func(config *NodeConfig) commands.AdminCommand { return storageCommands.NewReadBlocksCommand(config.State, config.Storage.Blocks) + }).AdminCommand("read-range-blocks", func(conf *NodeConfig) commands.AdminCommand { + return storageCommands.NewReadRangeBlocksCommand(conf.Storage.Blocks) }).AdminCommand("read-results", func(config *NodeConfig) commands.AdminCommand { return storageCommands.NewReadResultsCommand(config.State, config.Storage.Results) }).AdminCommand("read-seals", func(config *NodeConfig) commands.AdminCommand { @@ -1758,10 +2054,6 @@ func (fnb *FlowNodeBuilder) Build() (Node, error) { } func (fnb *FlowNodeBuilder) onStart() error { - - // seed random generator - rand.Seed(time.Now().UnixNano()) - // init nodeinfo by reading the private bootstrap file if not already set if fnb.NodeID == flow.ZeroID { if err := fnb.initNodeInfo(); err != nil { @@ -1773,7 +2065,11 @@ func (fnb *FlowNodeBuilder) onStart() error { return err } - if err := fnb.initDB(); err != nil { + if err := fnb.initStorageLockManager(); err != nil { + return err + } + + if err := fnb.initProtocolDB(); err != nil { return err } @@ -1861,3 +2157,31 @@ func (fnb *FlowNodeBuilder) extraFlagsValidation() error { } return nil } + +// DhtSystemActivationStatus parses the given role string and returns the corresponding DHT system activation status. +// Args: +// - roleStr: the role string to parse. +// - enabled: whether the DHT system is configured to be enabled. Only meaningful for access and execution nodes. +// Returns: +// - DhtSystemActivation: the corresponding DHT system activation status. +// - error: if the role string is invalid, returns an error. +func DhtSystemActivationStatus(roleStr string, enabled bool) (p2pbuilder.DhtSystemActivation, error) { + if roleStr == "ghost" { + // ghost node is not a valid role, so we don't need to parse it + return p2pbuilder.DhtSystemDisabled, nil + } + + role, err := flow.ParseRole(roleStr) + if err != nil && roleStr != "ghost" { + // ghost role is not a valid role, so we don't need to parse it + return p2pbuilder.DhtSystemDisabled, fmt.Errorf("could not parse node role: %w", err) + } + + // Only access and execution nodes need to run DHT; which is used by bitswap. + // Access nodes also run a DHT on the public network for peer discovery of un-staked nodes. + if role != flow.RoleAccess && role != flow.RoleExecution { + return p2pbuilder.DhtSystemDisabled, nil + } + + return p2pbuilder.DhtSystemActivation(enabled), nil +} diff --git a/cmd/scaffold/pebble_db.go b/cmd/scaffold/pebble_db.go new file mode 100644 index 00000000000..e620c467d86 --- /dev/null +++ b/cmd/scaffold/pebble_db.go @@ -0,0 +1,35 @@ +package scaffold + +import ( + "fmt" + "io" + "os" + + "github.com/cockroachdb/pebble/v2" + "github.com/rs/zerolog" + + pebblestorage "github.com/onflow/flow-go/storage/pebble" +) + +func InitPebbleDB(logger zerolog.Logger, dir string) (*pebble.DB, io.Closer, error) { + // if the pebble DB is not set, we skip initialization + // the pebble DB must be provided to initialize + // since we've set an default directory for the pebble DB, this check + // is not necessary, but rather a sanity check + if dir == "not set" { + return nil, nil, fmt.Errorf("missing required flag '--pebble-dir'") + } + + // Pre-create DB path + err := os.MkdirAll(dir, 0700) + if err != nil { + return nil, nil, fmt.Errorf("could not create pebble db (path: %s): %w", dir, err) + } + + db, err := pebblestorage.SafeOpen(logger, dir) + if err != nil { + return nil, nil, fmt.Errorf("could not open newly created pebble db (path: %s): %w", dir, err) + } + + return db, db, nil +} diff --git a/cmd/scaffold/pebble_db_test.go b/cmd/scaffold/pebble_db_test.go new file mode 100644 index 00000000000..6c49490e81e --- /dev/null +++ b/cmd/scaffold/pebble_db_test.go @@ -0,0 +1,25 @@ +package scaffold_test + +import ( + "testing" + + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/cmd" + "github.com/onflow/flow-go/cmd/scaffold" + "github.com/onflow/flow-go/utils/unittest" +) + +func TestInitPebbleDB(t *testing.T) { + unittest.RunWithTempDir(t, func(dir string) { + _, closer, err := scaffold.InitPebbleDB(unittest.Logger(), dir) + require.NoError(t, err) + require.NoError(t, closer.Close()) + }) +} + +func TestInitPebbleDBDirNotSet(t *testing.T) { + _, _, err := scaffold.InitPebbleDB(unittest.Logger(), cmd.NotSet) + require.Error(t, err) + require.Contains(t, err.Error(), "missing required flag") +} diff --git a/cmd/scaffold_test.go b/cmd/scaffold_test.go index e37f708f190..b63eb68f22a 100644 --- a/cmd/scaffold_test.go +++ b/cmd/scaffold_test.go @@ -28,6 +28,7 @@ import ( "github.com/onflow/flow-go/module/component" "github.com/onflow/flow-go/module/irrecoverable" "github.com/onflow/flow-go/module/profiler" + p2pbuilder "github.com/onflow/flow-go/network/p2p/builder" "github.com/onflow/flow-go/utils/unittest" ) @@ -42,8 +43,7 @@ func TestLoadSecretsEncryptionKey(t *testing.T) { t.Run("should return ErrNotExist if file doesn't exist", func(t *testing.T) { require.NoFileExists(t, path) _, err := loadSecretsEncryptionKey(dir, myID) - assert.Error(t, err) - assert.True(t, errors.Is(err, os.ErrNotExist)) + assert.ErrorIs(t, err, os.ErrNotExist) }) t.Run("should return key and no error if file exists", func(t *testing.T) { @@ -64,7 +64,7 @@ func TestLoadSecretsEncryptionKey(t *testing.T) { // Test the components are started in the correct order, and are run serially func TestComponentsRunSerially(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) - signalerCtx, _ := irrecoverable.WithSignaler(ctx) + signalerCtx := irrecoverable.NewMockSignalerContext(t, ctx) nb := FlowNode("scaffold test") nb.componentBuilder = component.NewComponentManagerBuilder() @@ -165,7 +165,7 @@ func TestPostShutdown(t *testing.T) { func TestOverrideComponent(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) - signalerCtx, _ := irrecoverable.WithSignaler(ctx) + signalerCtx := irrecoverable.NewMockSignalerContext(t, ctx) nb := FlowNode("scaffold test") nb.componentBuilder = component.NewComponentManagerBuilder() @@ -225,7 +225,7 @@ func TestOverrideComponent(t *testing.T) { func TestOverrideModules(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) - signalerCtx, _ := irrecoverable.WithSignaler(ctx) + signalerCtx := irrecoverable.NewMockSignalerContext(t, ctx) nb := FlowNode("scaffold test") nb.componentBuilder = component.NewComponentManagerBuilder() @@ -283,7 +283,7 @@ func TestOverrideModules(t *testing.T) { type testComponentDefinition struct { name string - factory ReadyDoneFactory + factory ReadyDoneFactory[*NodeConfig] errorHandler component.OnError } @@ -602,7 +602,7 @@ func TestDependableComponentWaitForDependencies(t *testing.T) { func testDependableComponentWaitForDependencies(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) - signalerCtx, _ := irrecoverable.WithSignaler(ctx) + signalerCtx := irrecoverable.NewMockSignalerContext(t, ctx) nb := FlowNode("scaffold test") nb.componentBuilder = component.NewComponentManagerBuilder() @@ -729,3 +729,88 @@ type mockRoundTripper struct { func (m *mockRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { return m.DoFunc(req) } + +// TestDhtSystemActivationStatus tests that the DHT system activation status is correctly +// determined based on the role string. +// This test is not exhaustive, but should cover the most common cases. +func TestDhtSystemActivationStatus(t *testing.T) { + tests := []struct { + name string + roleStr string + enabled bool + expected p2pbuilder.DhtSystemActivation + expectErr bool + }{ + { + name: "ghost role returns disabled", + roleStr: "ghost", + enabled: true, + expected: p2pbuilder.DhtSystemDisabled, + expectErr: false, + }, + { + name: "access role returns enabled", + roleStr: "access", + enabled: true, + expected: p2pbuilder.DhtSystemEnabled, + expectErr: false, + }, + { + name: "execution role returns enabled", + roleStr: "execution", + enabled: true, + expected: p2pbuilder.DhtSystemEnabled, + expectErr: false, + }, + { + name: "access role with disabled returns disabled", + roleStr: "access", + enabled: false, + expected: p2pbuilder.DhtSystemDisabled, + expectErr: false, + }, + { + name: "execution role with disabled returns disabled", + roleStr: "execution", + enabled: false, + expected: p2pbuilder.DhtSystemDisabled, + expectErr: false, + }, + { + name: "collection role returns disabled", + roleStr: "collection", + enabled: true, + expected: p2pbuilder.DhtSystemDisabled, + expectErr: false, + }, + { + name: "consensus role returns disabled", + roleStr: "consensus", + enabled: true, + expected: p2pbuilder.DhtSystemDisabled, + expectErr: false, + }, + { + name: "verification nodes return disabled", + roleStr: "verification", + enabled: true, + expected: p2pbuilder.DhtSystemDisabled, + expectErr: false, + }, + { + name: "invalid role returns error", + roleStr: "invalidRole", + enabled: true, + expected: p2pbuilder.DhtSystemDisabled, + expectErr: true, + }, // Add more test cases for other roles, if needed. + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result, err := DhtSystemActivationStatus(tt.roleStr, tt.enabled) + require.Equal(t, tt.expectErr, err != nil, "unexpected error status") + require.Equal(t, tt.expected, result, "unexpected activation status") + }) + } +} diff --git a/cmd/testclient/README.md b/cmd/testclient/README.md deleted file mode 100644 index 16f853c39fa..00000000000 --- a/cmd/testclient/README.md +++ /dev/null @@ -1,39 +0,0 @@ -# Flow Test Client - -This package implements a client for testing Flow by submitting transactions -to an ingress GRPC server. - -## Usage - -### Collection Nodes - -To start 3 connected collection nodes, use the following commands: - -```shell script -./collection \ - --entries collection-0000000000000000000000000000000000000000000000000000000000000001@localhost:8001=1000,collection-0000000000000000000000000000000000000000000000000000000000000002@localhost:8002=1000,collection-0000000000000000000000000000000000000000000000000000000000000003@localhost:8003=1000 \ - --loglevel debug \ - --nodeid 0000000000000000000000000000000000000000000000000000000000000001 \ - --datadir ./data1 \ - --ingress-addr localhost:9001 -``` - -```shell script -./collection \ - --entries collection-0000000000000000000000000000000000000000000000000000000000000001@localhost:8001=1000,collection-0000000000000000000000000000000000000000000000000000000000000002@localhost:8002=1000,collection-0000000000000000000000000000000000000000000000000000000000000003@localhost:8003=1000 \ - --loglevel debug \ - --nodeid 0000000000000000000000000000000000000000000000000000000000000002 \ - --datadir ./data2 \ - --ingress-addr localhost:9002 -``` - -```shell script -./collection \ - --entries collection-0000000000000000000000000000000000000000000000000000000000000001@localhost:8001=1000,collection-0000000000000000000000000000000000000000000000000000000000000002@localhost:8002=1000,collection-0000000000000000000000000000000000000000000000000000000000000003@localhost:8003=1000 \ - --loglevel debug \ - --nodeid 0000000000000000000000000000000000000000000000000000000000000003 \ - --datadir ./data3 \ - --ingress-addr localhost:9003 -``` - -This starts 3 collection nodes, each connected to one another. diff --git a/cmd/testclient/go.mod b/cmd/testclient/go.mod deleted file mode 100644 index 0a02e69ad42..00000000000 --- a/cmd/testclient/go.mod +++ /dev/null @@ -1,31 +0,0 @@ -module github.com/onflow/flow-go/cmd/testclient - -go 1.19 - -require ( - github.com/onflow/flow-go-sdk v0.4.1 - github.com/spf13/pflag v1.0.5 - google.golang.org/grpc v1.52.3 -) - -require ( - github.com/antlr/antlr4 v0.0.0-20200503195918-621b933c7a7f // indirect - github.com/davecgh/go-spew v1.1.1 // indirect - github.com/ethereum/go-ethereum v1.9.9 // indirect - github.com/golang/protobuf v1.5.2 // indirect - github.com/onflow/cadence v0.4.0 // indirect - github.com/onflow/flow/protobuf/go/flow v0.1.5-0.20200601215056-34a11def1d6b // indirect - github.com/pkg/errors v0.8.1 // indirect - github.com/pmezard/go-difflib v1.0.0 // indirect - github.com/raviqqe/hamt v0.0.0-20190615202029-864fb7caef85 // indirect - github.com/rivo/uniseg v0.1.0 // indirect - github.com/segmentio/fasthash v1.0.2 // indirect - github.com/stretchr/testify v1.5.1 // indirect - golang.org/x/crypto v0.0.0-20200423211502-4bdfaf469ed5 // indirect - golang.org/x/net v0.4.0 // indirect - golang.org/x/sys v0.3.0 // indirect - golang.org/x/text v0.5.0 // indirect - google.golang.org/genproto v0.0.0-20221118155620-16455021b5e6 // indirect - google.golang.org/protobuf v1.28.1 // indirect - gopkg.in/yaml.v2 v2.2.4 // indirect -) diff --git a/cmd/testclient/go.sum b/cmd/testclient/go.sum deleted file mode 100644 index dd3500c37d1..00000000000 --- a/cmd/testclient/go.sum +++ /dev/null @@ -1,262 +0,0 @@ -cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -github.com/Azure/azure-pipeline-go v0.2.1/go.mod h1:UGSo8XybXnIGZ3epmeBw7Jdz+HiUVpqIlpz/HKHylF4= -github.com/Azure/azure-pipeline-go v0.2.2/go.mod h1:4rQ/NZncSvGqNkkOsNpOU1tgoNuIlp9AfUH5G1tvCHc= -github.com/Azure/azure-storage-blob-go v0.7.0/go.mod h1:f9YQKtsG1nMisotuTPpO0tjNuEjKRYAcJU8/ydDI++4= -github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI= -github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0= -github.com/Azure/go-autorest/autorest/adal v0.8.0/go.mod h1:Z6vX6WXXuyieHAXwMj0S6HY6e6wcHn37qQMBQlvY3lc= -github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA= -github.com/Azure/go-autorest/autorest/date v0.2.0/go.mod h1:vcORJHLJEh643/Ioh9+vPmf1Ij9AEBM5FuBIXLmIy0g= -github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= -github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= -github.com/Azure/go-autorest/autorest/mocks v0.3.0/go.mod h1:a8FDP3DYzQ4RYfVAxAN3SVSiiO77gL2j2ronKKP0syM= -github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc= -github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk= -github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= -github.com/OneOfOne/xxhash v1.2.5/go.mod h1:eZbhyaAYD41SGSSsnmcpxVoRiQ/MPUTjUdIIOT9Um7Q= -github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg= -github.com/VictoriaMetrics/fastcache v1.5.3/go.mod h1:+jv9Ckb+za/P1ZRg/sulP5Ni1v49daAVERr0H3CuscE= -github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= -github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156/go.mod h1:Cb/ax3seSYIx7SuZdm2G2xzfwmv3TPSk2ucNfQESPXM= -github.com/antlr/antlr4 v0.0.0-20191217191749-ff67971f8580/go.mod h1:T7PbCXFs94rrTttyxjbyT5+/1V8T2TYDejxUfHJjw1Y= -github.com/antlr/antlr4 v0.0.0-20200503195918-621b933c7a7f h1:0cEys61Sr2hUBEXfNV8eyQP01oZuBgoMeHunebPirK8= -github.com/antlr/antlr4 v0.0.0-20200503195918-621b933c7a7f/go.mod h1:T7PbCXFs94rrTttyxjbyT5+/1V8T2TYDejxUfHJjw1Y= -github.com/aristanetworks/goarista v0.0.0-20170210015632-ea17b1a17847/go.mod h1:D/tb0zPVXnP7fmsLZjtdUhSsumbK/ij54UXjjVgMGxQ= -github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= -github.com/btcsuite/btcd v0.0.0-20171128150713-2e60448ffcc6/go.mod h1:Dmm/EzmjnCiweXmzRIAiUWCInVmPgjkzgv5k4tVyXiQ= -github.com/c-bata/go-prompt v0.2.3/go.mod h1:VzqtzE2ksDBcdln8G7mk2RX9QyGjH+OVqOCSiVIqS34= -github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/cespare/cp v0.1.0/go.mod h1:SOGHArjBr4JWaSDEVpWpo/hNg6RoKrls6Oh40hiwW+s= -github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= -github.com/cespare/xxhash/v2 v2.0.1-0.20190104013014-3767db7a7e18/go.mod h1:HD5P3vAIAh+Y2GAxg0PrPN1P8WkepXGpjbUPDHJqqKM= -github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/cloudflare/cloudflare-go v0.10.2-0.20190916151808-a80f83b9add9/go.mod h1:1MxXX1Ux4x6mqPmjkUgTP1CdXIBXKX7T+Jk9Gxrmx+U= -github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= -github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= -github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-xdr v0.0.0-20161123171359-e6a2ba005892/go.mod h1:CTDl0pzVzE5DEzZhPfvhY/9sPFMQIxaJ9VAMs9AagrE= -github.com/deckarep/golang-set v0.0.0-20180603214616-504e848d77ea/go.mod h1:93vsz/8Wt4joVM7c2AVqh+YRMiUSc14yDtF28KmMOgQ= -github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= -github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= -github.com/docker/docker v1.4.2-0.20180625184442-8e610b2b55bf/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= -github.com/edsrzf/mmap-go v0.0.0-20160512033002-935e0e8a636c/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M= -github.com/elastic/gosigar v0.8.1-0.20180330100440-37f05ff46ffa/go.mod h1:cdorVVzy1fhmEqmtgqkoE3bYtCfSCkVyjTyCIo22xvs= -github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= -github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/ethereum/go-ethereum v1.9.9 h1:jnoBvjH8aMH++iH14XmiJdAsnRcmZUM+B5fsnEZBVE0= -github.com/ethereum/go-ethereum v1.9.9/go.mod h1:a9TqabFudpDu1nucId+k9S8R9whYaHnGBLKFouA5EAo= -github.com/fatih/color v1.3.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= -github.com/fjl/memsize v0.0.0-20180418122429-ca190fb6ffbc/go.mod h1:VvhXpOYNQvB+uIk2RvXzuaQtkQJzzIx6lSBe1xv7hi0= -github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= -github.com/fxamacker/cbor/v2 v2.2.0 h1:6eXqdDDe588rSYAi1HfZKbx6YYQO4mxQ9eC6xYpU/JQ= -github.com/fxamacker/cbor/v2 v2.2.0/go.mod h1:TA1xS00nchWmaBnEIxPSE5oHLuJBAVvqrtAnWBwBCVo= -github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff/go.mod h1:x7DCsMOv1taUwEWCzT4cmDeAkigA5/QCwUodaVOe8Ww= -github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= -github.com/go-ole/go-ole v1.2.1/go.mod h1:7FAglXiTm7HKlQRDeOQ6ZNUHidzCWXuZWq/1dTyBNF8= -github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= -github.com/go-test/deep v1.0.5/go.mod h1:QV8Hv/iy04NyLBxAdO9njL0iVPN1S4d/A3NVv1V36o8= -github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= -github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.2-0.20190517061210-b285ee9cfc6c/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= -github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= -github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= -github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= -github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= -github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= -github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= -github.com/gorilla/websocket v1.4.1-0.20190629185528-ae1634f6a989/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= -github.com/graph-gophers/graphql-go v0.0.0-20191115155744-f33e81362277/go.mod h1:9CQHMSxwO4MprSdzoIEobiHpoLtHm77vfxsvsIN5Vuc= -github.com/hashicorp/golang-lru v0.0.0-20160813221303-0a025b7e63ad/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= -github.com/huin/goupnp v0.0.0-20161224104101-679507af18f3/go.mod h1:MZ2ZmwcBpvOoJ22IJsc7va19ZwoheaBk43rKg12SKag= -github.com/influxdata/influxdb v1.2.3-0.20180221223340-01288bdb0883/go.mod h1:qZna6X/4elxqT3yI9iZYdZrWWdeFOOprn86kgg4+IzY= -github.com/jackpal/go-nat-pmp v1.0.2-0.20160603034137-1fa385a6f458/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+4orBN1SBKc= -github.com/julienschmidt/httprouter v1.1.1-0.20170430222011-975b5c4c7c21/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= -github.com/karalabe/usb v0.0.0-20190919080040-51dc0efba356/go.mod h1:Od972xHfMJowv7NGVDiWVxk2zxnWgjLlJzE+F4F7AGU= -github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= -github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= -github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= -github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= -github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= -github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= -github.com/lithammer/dedent v1.1.0/go.mod h1:jrXYCQtgg0nJiN+StA2KgR7w6CiQNv9Fd/Z9BP0jIOc= -github.com/logrusorgru/aurora v0.0.0-20200102142835-e9ef32dff381 h1:bqDmpDG49ZRnB5PcgP0RXtQvnMSgIF14M7CBd2shtXs= -github.com/logrusorgru/aurora v0.0.0-20200102142835-e9ef32dff381/go.mod h1:7rIyQOR62GCctdiQpZ/zOJlFyk6y+94wXzv6RNZgaR4= -github.com/mattn/go-colorable v0.1.0/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= -github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= -github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= -github.com/mattn/go-ieproxy v0.0.0-20190610004146-91bb50d98149/go.mod h1:31jz6HNzdxOmlERGGEc4v/dMssOfmp2p5bT/okiKFFc= -github.com/mattn/go-ieproxy v0.0.0-20190702010315-6dee0af9227d/go.mod h1:31jz6HNzdxOmlERGGEc4v/dMssOfmp2p5bT/okiKFFc= -github.com/mattn/go-isatty v0.0.5-0.20180830101745-3fb116b82035/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= -github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= -github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84= -github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= -github.com/mattn/go-runewidth v0.0.3/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= -github.com/mattn/go-runewidth v0.0.4/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= -github.com/mattn/go-runewidth v0.0.6/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= -github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= -github.com/mattn/go-tty v0.0.3/go.mod h1:ihxohKRERHTVzN+aSVRwACLCeqIoZAWpoICkkvrWyR0= -github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= -github.com/naoina/go-stringutil v0.1.0/go.mod h1:XJ2SJL9jCtBh+P9q5btrd/Ylo8XwT/h1USek5+NqSA0= -github.com/naoina/toml v0.1.2-0.20170918210437-9fafd6967416/go.mod h1:NBIhNtsFMo3G2szEBne+bO4gS192HuIYRqfvOWb4i1E= -github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= -github.com/olekukonko/tablewriter v0.0.1/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= -github.com/olekukonko/tablewriter v0.0.2-0.20190409134802-7e037d187b0c/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= -github.com/onflow/cadence v0.4.0 h1:oAKY/HclZZhc5wJgJwdPjWXJuC5IjuuHHVAAq3S7AHI= -github.com/onflow/cadence v0.4.0/go.mod h1:gaPtSctdMzT5NAoJgzsRuwUkdgRswVHsRXFNNmCTn3I= -github.com/onflow/flow-go-sdk v0.4.1 h1:YZex1yeLnCwKCx7sfmVcmDP8+sO3gJeHQxL1karvtHQ= -github.com/onflow/flow-go-sdk v0.4.1/go.mod h1:/wblHrPtccdgRA/Zk2EWqTEI4RDtGB8r0peI5zLkFMo= -github.com/onflow/flow/protobuf/go/flow v0.1.5-0.20200601215056-34a11def1d6b h1:n/KYBKS3/m5g0GNXWLueVUgwcI51XgAXlFBqox2c1uA= -github.com/onflow/flow/protobuf/go/flow v0.1.5-0.20200601215056-34a11def1d6b/go.mod h1:kRugbzZjwQqvevJhrnnCFMJZNmoSJmxlKt6hTGXZojM= -github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= -github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= -github.com/pborman/uuid v0.0.0-20170112150404-1b00554d8222/go.mod h1:VyrYX9gd7irzKovcSS6BIIEwPRkP2Wm2m9ufcdFSJ34= -github.com/peterh/liner v1.1.1-0.20190123174540-a2c9a5303de7/go.mod h1:CRroGNssyjTd/qIG2FyxByd2S8JEAZXBl4qUrZf8GS0= -github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I= -github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/term v0.0.0-20190109203006-aa71e9d9e942/go.mod h1:eCbImbZ95eXtAUIbLAuAVnBnwf83mjf6QIVH8SHYwqQ= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= -github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= -github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= -github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= -github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/prometheus/tsdb v0.6.2-0.20190402121629-4f204dcbc150/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= -github.com/raviqqe/hamt v0.0.0-20190615202029-864fb7caef85 h1:FG/cFwuZM0j3eEBI5jkkYRn6RufVzcvtTXN+YFHWJjI= -github.com/raviqqe/hamt v0.0.0-20190615202029-864fb7caef85/go.mod h1:I9elsTaXMhu41qARmzefHy7v2KmAV2TB1yH4E+nBSf0= -github.com/rivo/uniseg v0.1.0 h1:+2KBaVoUmb9XzDsrx/Ct0W/EYOSFf/nWTauy++DprtY= -github.com/rivo/uniseg v0.1.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= -github.com/rjeczalik/notify v0.9.1/go.mod h1:rKwnCoCGeuQnwBtTSPL9Dad03Vh2n40ePRrjvIXnJho= -github.com/robertkrimen/otto v0.0.0-20170205013659-6a77b7cbc37d/go.mod h1:xvqspoSXJTIpemEonrMDFq6XzwHYYgToXWj5eRX1OtY= -github.com/rs/cors v0.0.0-20160617231935-a62a804a8a00/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU= -github.com/rs/xhandler v0.0.0-20160618193221-ed27b6fd6521/go.mod h1:RvLn4FgxWubrpZHtQLnOf6EwhN2hEMusxZOhcW9H3UQ= -github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/segmentio/fasthash v1.0.1/go.mod h1:tm/wZFQ8e24NYaBGIlnO2WGCAi67re4HHuOm0sftE/M= -github.com/segmentio/fasthash v1.0.2 h1:86fGDl2hB+iSHYlccB/FP9qRGvLNuH/fhEEFn6gnQUs= -github.com/segmentio/fasthash v1.0.2/go.mod h1:waKX8l2N8yckOgmSsXJi7x1ZfdKZ4x7KRMzBtS3oedY= -github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= -github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= -github.com/spaolacci/murmur3 v1.0.1-0.20190317074736-539464a789e9/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= -github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= -github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/status-im/keycard-go v0.0.0-20190316090335-8537d3370df4/go.mod h1:RZLeN1LMWmRsyYjvAu+I6Dm9QmlDaIIt+Y+4Kd7Tp+Q= -github.com/steakknife/bloomfilter v0.0.0-20180922174646-6819c0d2a570/go.mod h1:8OR4w3TdeIHIh1g6EMY5p0gVNOovcWC+1vpc7naMuAw= -github.com/steakknife/hamming v0.0.0-20180906055917-c99c65617cd3/go.mod h1:hpGUWaI9xL8pRQCTXQgocU38Qw1g0Us7n5PxxTwTCYU= -github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.1.1 h1:2vfRuCMp5sSVIDSqO8oNnWJq7mPa6KVP3iPIwFBuy8A= -github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= -github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= -github.com/stretchr/testify v1.5.1 h1:nOGnQDM7FYENwehXlg/kFVnos3rEvtKTjRvOWSzb6H4= -github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= -github.com/syndtr/goleveldb v1.0.1-0.20190923125748-758128399b1d/go.mod h1:9OrXJhf154huy1nPWmuSrkgjPUtUNhA+Zmy+6AESzuA= -github.com/tyler-smith/go-bip39 v1.0.1-0.20181017060643-dbb3b84ba2ef/go.mod h1:sJ5fKU0s6JVwZjjcUEX2zFOnvq0ASQ2K9Zr6cf67kNs= -github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= -github.com/wsddn/go-ecdh v0.0.0-20161211032359-48726bab9208/go.mod h1:IotVbo4F+mw0EzQ08zFqg7pK3FebNXpaMsRy2RT+Ees= -github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= -github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= -go.uber.org/goleak v1.0.0/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= -golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20200423211502-4bdfaf469ed5 h1:Q7tZBpemrlsc2I7IyODzhtallWRSm4Q0d09pL6XbQtU= -golang.org/x/crypto v0.0.0-20200423211502-4bdfaf469ed5/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= -golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= -golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.4.0 h1:Q5QPcMlvfxFTAPV0+07Xz/MpK9NTXu2VDUuy0FeMfaU= -golang.org/x/net v0.4.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE= -golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190712062909-fae7ac547cb7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.3.0 h1:w8ZOecv6NaNa/zC8944JTU3vz4u6Lagfk4RPQxv92NQ= -golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= -golang.org/x/text v0.5.0 h1:OLmvp0KP+FVG99Ct/qFiL/Fhk4zp4QQnZ7b2U+5piUM= -golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= -golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= -google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20221118155620-16455021b5e6 h1:a2S6M0+660BgMNl++4JPlcAO/CjkqYItDEZwkoDQK7c= -google.golang.org/genproto v0.0.0-20221118155620-16455021b5e6/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= -google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= -google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= -google.golang.org/grpc v1.52.3 h1:pf7sOysg4LdgBqduXveGKrcEwbStiK2rtfghdzlUYDQ= -google.golang.org/grpc v1.52.3/go.mod h1:pu6fVzoFb+NBYNAvQL08ic+lvB2IojljRYuun5vorUY= -google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= -google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w= -google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= -gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo= -gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= -gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce/go.mod h1:5AcXVHNjg+BDxry382+8OKon8SEWiKktQR07RKPsv1c= -gopkg.in/olebedev/go-duktape.v3 v3.0.0-20190213234257-ec84240a7772/go.mod h1:uAJfkITjFhyEEuUfm7bsmCZRbW5WRq8s9EY8HZ6hCns= -gopkg.in/sourcemap.v1 v1.0.5/go.mod h1:2RlvNNSMglmRrcvhfuzp4hQHwOtjxlbjX7UPY/GXb78= -gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= -gopkg.in/urfave/cli.v1 v1.20.0/go.mod h1:vuBzUtMdQeixQj8LVd+/98pzhxNGQoyuPBlsXHOQNO0= -gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.4 h1:/eiJrUcujPVeJ3xlSWaiNi3uSVmDGBK1pDHUHAnao1I= -gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= -honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/cmd/testclient/main.go b/cmd/testclient/main.go deleted file mode 100644 index ce42309c78e..00000000000 --- a/cmd/testclient/main.go +++ /dev/null @@ -1,105 +0,0 @@ -package main - -import ( - "context" - "crypto/rand" - "os" - "os/signal" - "time" - - "github.com/spf13/pflag" - "google.golang.org/grpc" - "google.golang.org/grpc/credentials/insecure" - - sdk "github.com/onflow/flow-go-sdk" - "github.com/onflow/flow-go-sdk/client" - "github.com/onflow/flow-go-sdk/crypto" -) - -var ( - targetAddr string - txPerSec int -) - -func main() { - pflag.StringVarP(&targetAddr, "target-address", "t", "localhost:9001", "address of the collection node to connect to") - pflag.IntVarP(&txPerSec, "transaction-rate", "r", 1, "number of transactions to send per second") - - pflag.Parse() - - c, err := client.New(targetAddr, grpc.WithTransportCredentials(insecure.NewCredentials())) - if err != nil { - panic(err) - } - - // Generate key - seed := make([]byte, crypto.MinSeedLength) - _, err = rand.Read(seed) - if err != nil { - panic(err) - } - - sk, err := crypto.GeneratePrivateKey(crypto.ECDSA_P256, seed) - if err != nil { - panic(err) - } - - accountKey := sdk.NewAccountKey(). - FromPrivateKey(sk). - SetHashAlgo(crypto.SHA3_256). - SetWeight(sdk.AccountKeyWeightThreshold) - - signer, err := crypto.NewInMemorySigner(sk, accountKey.HashAlgo) - if err != nil { - panic(err) - } - - addr := sdk.NewAddressGenerator(sdk.Testnet).NextAddress() - - sig := make(chan os.Signal, 1) - signal.Notify(sig, os.Interrupt) - - nonce := uint64(0) - for { - - select { - - case <-time.After(time.Second / time.Duration(txPerSec)): - - nonce++ - - ctx, cancel := context.WithTimeout(context.Background(), time.Second*10) - latest, err := c.GetLatestBlockHeader(ctx, false) - if err != nil { - panic(err) - } - - tx := sdk.NewTransaction(). - SetScript([]byte(` - transaction { - prepare(signer: AuthAccount) { log(signer.address) } - } - `)). - SetGasLimit(100). - SetProposalKey(addr, accountKey.Index, nonce). - SetReferenceBlockID(latest.ID). - SetPayer(addr). - AddAuthorizer(addr) - - err = tx.SignEnvelope(addr, 1, signer) - if err != nil { - panic(err) - } - - err = c.SendTransaction(ctx, *tx) - if err != nil { - panic(err) - } - - cancel() - - case <-sig: - os.Exit(0) - } - } -} diff --git a/cmd/util/cmd/addresses/cmd.go b/cmd/util/cmd/addresses/cmd.go new file mode 100644 index 00000000000..ffef57366e6 --- /dev/null +++ b/cmd/util/cmd/addresses/cmd.go @@ -0,0 +1,50 @@ +package addresses + +import ( + "github.com/rs/zerolog/log" + "github.com/spf13/cobra" + + "github.com/onflow/flow-go/model/flow" +) + +var ( + flagChain string + flagCount int + flagSeparator string +) + +var Cmd = &cobra.Command{ + Use: "addresses", + Short: "generate addresses for a chain", + Run: run, +} + +func init() { + Cmd.Flags().StringVar(&flagChain, "chain", "", "Chain name") + _ = Cmd.MarkFlagRequired("chain") + + Cmd.Flags().IntVar(&flagCount, "count", 1, "Count") + _ = Cmd.MarkFlagRequired("count") + + Cmd.Flags().StringVar(&flagSeparator, "separator", ",", "Separator to use between addresses") +} + +func run(*cobra.Command, []string) { + chain := flow.ChainID(flagChain).Chain() + + generator := chain.NewAddressGenerator() + + for i := 0; i < flagCount; i++ { + address, err := generator.NextAddress() + if err != nil { + log.Fatal().Err(err).Msg("failed to generate address") + } + + str := address.Hex() + + if i > 0 { + print(flagSeparator) + } + print(str) + } +} diff --git a/cmd/util/cmd/atree_inlined_status/atree_inlined_status_test.go b/cmd/util/cmd/atree_inlined_status/atree_inlined_status_test.go new file mode 100644 index 00000000000..3a73ab5c212 --- /dev/null +++ b/cmd/util/cmd/atree_inlined_status/atree_inlined_status_test.go @@ -0,0 +1,133 @@ +package atree_inlined_status + +import ( + crand "crypto/rand" + "math/rand" + "testing" + + "github.com/fxamacker/cbor/v2" + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/ledger" + "github.com/onflow/flow-go/model/flow" +) + +func TestCheckAtreeInlinedStatus(t *testing.T) { + const nWorkers = 10 + + t.Run("no payloads", func(t *testing.T) { + var payloads []*ledger.Payload + atreeInlinedPayloadCount, atreeNonInlinedPayloadCount, err := checkAtreeInlinedStatus(payloads, nWorkers) + require.NoError(t, err) + require.Equal(t, 0, atreeInlinedPayloadCount) + require.Equal(t, 0, atreeNonInlinedPayloadCount) + }) + + t.Run("payloads no goroutine", func(t *testing.T) { + payloadCount := rand.Intn(50) + testCheckAtreeInlinedStatus(t, payloadCount, nWorkers) + }) + + t.Run("payloads using goroutine", func(t *testing.T) { + payloadCount := rand.Intn(numOfPayloadPerJob) + numOfPayloadPerJob + testCheckAtreeInlinedStatus(t, payloadCount, nWorkers) + }) +} + +func testCheckAtreeInlinedStatus(t *testing.T, payloadCount int, nWorkers int) { + atreeNoninlinedPayloadCount := rand.Intn(payloadCount + 1) + atreeInlinedPayloadCount := payloadCount - atreeNoninlinedPayloadCount + + payloads := make([]*ledger.Payload, 0, payloadCount) + for i := 0; i < atreeInlinedPayloadCount; i++ { + key := getRandomKey() + value := getAtreeInlinedPayload(t) + payloads = append(payloads, ledger.NewPayload(key, value)) + } + for i := 0; i < atreeNoninlinedPayloadCount; i++ { + key := getRandomKey() + value := getAtreeNoninlinedPayload(t) + payloads = append(payloads, ledger.NewPayload(key, value)) + } + + rand.Shuffle(len(payloads), func(i, j int) { + payloads[i], payloads[j] = payloads[j], payloads[i] + }) + + gotAtreeInlinedPayloadCount, gotAtreeNoninlinedPayloadCount, err := checkAtreeInlinedStatus(payloads, nWorkers) + require.NoError(t, err) + require.Equal(t, atreeNoninlinedPayloadCount, gotAtreeNoninlinedPayloadCount) + require.Equal(t, atreeInlinedPayloadCount, gotAtreeInlinedPayloadCount) +} + +func getAtreeNoninlinedPayload(t *testing.T) []byte { + num := rand.Uint64() + encodedNum, err := cbor.Marshal(num) + require.NoError(t, err) + + data := []byte{ + // extra data + // version + 0x00, + // extra data flag + 0x80, + // array of extra data + 0x81, + // type info + 0x18, 0x2a, + + // version + 0x00, + // array data slab flag + 0x80, + // CBOR encoded array head (fixed size 3 byte) + 0x99, 0x00, 0x01, + } + + return append(data, encodedNum...) +} + +func getAtreeInlinedPayload(t *testing.T) []byte { + num := rand.Uint64() + encodedNum, err := cbor.Marshal(num) + require.NoError(t, err) + + data := []byte{ + // version + 0x10, + // flag + 0x80, + + // extra data + // array of extra data + 0x81, + // type info + 0x18, 0x2a, + + // CBOR encoded array head (fixed size 3 byte) + 0x99, 0x00, 0x01, + } + + return append(data, encodedNum...) +} + +func getRandomKey() ledger.Key { + var address [8]byte + _, err := crand.Read(address[:]) + if err != nil { + panic(err) + } + + var key [9]byte + key[0] = flow.SlabIndexPrefix + _, err = crand.Read(key[1:]) + if err != nil { + panic(err) + } + + return ledger.Key{ + KeyParts: []ledger.KeyPart{ + {Type: uint16(0), Value: address[:]}, + {Type: uint16(2), Value: key[:]}, + }} +} diff --git a/cmd/util/cmd/atree_inlined_status/cmd.go b/cmd/util/cmd/atree_inlined_status/cmd.go new file mode 100644 index 00000000000..f55dc0be107 --- /dev/null +++ b/cmd/util/cmd/atree_inlined_status/cmd.go @@ -0,0 +1,296 @@ +package atree_inlined_status + +import ( + "context" + + "github.com/rs/zerolog/log" + "github.com/spf13/cobra" + "golang.org/x/sync/errgroup" + + "github.com/onflow/flow-go/cmd/util/ledger/reporters" + "github.com/onflow/flow-go/cmd/util/ledger/util" + "github.com/onflow/flow-go/ledger" + "github.com/onflow/flow-go/ledger/common/convert" + "github.com/onflow/flow-go/model/flow" +) + +var ( + flagOutputDirectory string + flagPayloads string + flagState string + flagStateCommitment string + flagNumOfPayloadToSample int + flagNWorker int +) + +var Cmd = &cobra.Command{ + Use: "atree-inlined-status", + Short: "Check if atree payloads are inlined in given state", + Run: run, +} + +const ( + ReporterName = "atree-inlined-status" + + numOfPayloadPerJob = 1_000 +) + +func init() { + + Cmd.Flags().StringVar( + &flagPayloads, + "payloads", + "", + "Input payload file name", + ) + + Cmd.Flags().StringVar( + &flagState, + "state", + "", + "Input state file name", + ) + + Cmd.Flags().StringVar( + &flagStateCommitment, + "state-commitment", + "", + "Input state commitment", + ) + + Cmd.Flags().StringVar( + &flagOutputDirectory, + "output-directory", + "", + "Output directory", + ) + + _ = Cmd.MarkFlagRequired("output-directory") + + Cmd.Flags().IntVar( + &flagNWorker, + "n-workers", + 8, + "number of workers to use", + ) + + Cmd.Flags().IntVar( + &flagNumOfPayloadToSample, + "n-payloads", + -1, + "number of payloads to sample for inlined status (sample all payloads by default)", + ) +} + +func run(*cobra.Command, []string) { + + if flagPayloads == "" && flagState == "" { + log.Fatal().Msg("Either --payloads or --state must be provided") + } else if flagPayloads != "" && flagState != "" { + log.Fatal().Msg("Only one of --payloads or --state must be provided") + } + if flagState != "" && flagStateCommitment == "" { + log.Fatal().Msg("--state-commitment must be provided when --state is provided") + } + + if flagNumOfPayloadToSample == 0 { + log.Fatal().Msg("--n-payloads must be either > 0 or -1 (check all payloads)") + } + + rw := reporters.NewReportFileWriterFactory(flagOutputDirectory, log.Logger). + ReportWriter(ReporterName) + defer rw.Close() + + var payloads []*ledger.Payload + var err error + + if flagPayloads != "" { + log.Info().Msgf("Reading payloads from %s", flagPayloads) + + _, payloads, err = util.ReadPayloadFile(log.Logger, flagPayloads) + if err != nil { + log.Fatal().Err(err).Msg("failed to read payloads") + } + } else { + log.Info().Msgf("Reading trie %s", flagStateCommitment) + + stateCommitment := util.ParseStateCommitment(flagStateCommitment) + payloads, err = util.ReadTrieForPayloads(flagState, stateCommitment) + if err != nil { + log.Fatal().Err(err).Msg("failed to read state") + } + } + + totalPayloadCount := len(payloads) + samplePayloadCount := len(payloads) + + if flagNumOfPayloadToSample > 0 && flagNumOfPayloadToSample < len(payloads) { + samplePayloadCount = flagNumOfPayloadToSample + } + + payloadsToSample := payloads + + if samplePayloadCount < totalPayloadCount { + atreePayloadCount := 0 + i := 0 + for ; atreePayloadCount < samplePayloadCount; i++ { + registerID, _, err := convert.PayloadToRegister(payloads[i]) + if err != nil { + log.Fatal().Err(err).Msg("failed to convert payload to register") + } + + if flow.IsSlabIndexKey(registerID.Key) { + atreePayloadCount++ + } + } + + payloadsToSample = payloads[:i] + } + + atreeInlinedPayloadCount, atreeNonInlinedPayloadCount, err := checkAtreeInlinedStatus(payloadsToSample, flagNWorker) + if err != nil { + log.Fatal().Err(err).Msg("failed to check atree inlined status") + } + + rw.Write(stateStatus{ + InputPayloadFile: flagPayloads, + InputState: flagState, + InputStateCommitment: flagStateCommitment, + TotalPayloadCount: len(payloads), + SamplePayloadCount: len(payloadsToSample), + AtreeInlinedPayloadCount: atreeInlinedPayloadCount, + AtreeNonInlinedPayloadCount: atreeNonInlinedPayloadCount, + }) +} + +func checkAtreeInlinedStatus(payloads []*ledger.Payload, nWorkers int) ( + atreeInlinedPayloadCount int, + atreeNonInlinedPayloadCount int, + err error, +) { + + if len(payloads)/numOfPayloadPerJob < nWorkers { + nWorkers = len(payloads) / numOfPayloadPerJob + } + + log.Info().Msgf("checking atree payload inlined status...") + + if nWorkers <= 1 { + // Skip goroutine to avoid overhead + for _, p := range payloads { + isAtreeSlab, isInlined, err := util.IsPayloadAtreeInlined(p) + if err != nil { + return 0, 0, err + } + + if !isAtreeSlab { + continue + } + + if isInlined { + atreeInlinedPayloadCount++ + } else { + atreeNonInlinedPayloadCount++ + } + } + return + } + + type job struct { + payloads []*ledger.Payload + } + + type result struct { + atreeInlinedPayloadCount int + atreeNonInlinedPayloadCount int + } + + numOfJobs := (len(payloads) + numOfPayloadPerJob - 1) / numOfPayloadPerJob + + jobs := make(chan job, numOfJobs) + + results := make(chan result, numOfJobs) + + g, ctx := errgroup.WithContext(context.Background()) + + // Launch goroutine to check atree register inlined state + for i := 0; i < nWorkers; i++ { + g.Go(func() error { + for job := range jobs { + var result result + + for _, p := range job.payloads { + isAtreeSlab, isInlined, err := util.IsPayloadAtreeInlined(p) + if err != nil { + return err + } + + if !isAtreeSlab { + continue + } + + if isInlined { + result.atreeInlinedPayloadCount++ + } else { + result.atreeNonInlinedPayloadCount++ + } + } + + select { + case results <- result: + case <-ctx.Done(): + return ctx.Err() + } + } + return nil + }) + } + + // Launch goroutine to wait for workers and close output channel + go func() { + _ = g.Wait() + close(results) + }() + + // Send job to jobs channel + payloadStartIndex := 0 + for { + if payloadStartIndex == len(payloads) { + close(jobs) + break + } + + endIndex := payloadStartIndex + numOfPayloadPerJob + if endIndex > len(payloads) { + endIndex = len(payloads) + } + + jobs <- job{payloads: payloads[payloadStartIndex:endIndex]} + + payloadStartIndex = endIndex + } + + // Gather results + for result := range results { + atreeInlinedPayloadCount += result.atreeInlinedPayloadCount + atreeNonInlinedPayloadCount += result.atreeNonInlinedPayloadCount + } + + log.Info().Msgf("waiting for goroutines...") + + if err := g.Wait(); err != nil { + return 0, 0, err + } + + return atreeInlinedPayloadCount, atreeNonInlinedPayloadCount, nil +} + +type stateStatus struct { + InputPayloadFile string `json:",omitempty"` + InputState string `json:",omitempty"` + InputStateCommitment string `json:",omitempty"` + TotalPayloadCount int + SamplePayloadCount int + AtreeInlinedPayloadCount int + AtreeNonInlinedPayloadCount int +} diff --git a/cmd/util/cmd/bootstrap-execution-state-payloads/cmd.go b/cmd/util/cmd/bootstrap-execution-state-payloads/cmd.go new file mode 100644 index 00000000000..6d76bc5b408 --- /dev/null +++ b/cmd/util/cmd/bootstrap-execution-state-payloads/cmd.go @@ -0,0 +1,90 @@ +package addresses + +import ( + "github.com/rs/zerolog/log" + "github.com/spf13/cobra" + + "github.com/onflow/flow-go/cmd/util/ledger/util" + "github.com/onflow/flow-go/fvm" + "github.com/onflow/flow-go/fvm/storage/snapshot" + "github.com/onflow/flow-go/ledger" + "github.com/onflow/flow-go/ledger/common/convert" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/utils/unittest" +) + +var ( + flagChain string + flagOutputPayloadFileName string +) + +var Cmd = &cobra.Command{ + Use: "bootstrap-execution-state-payloads", + Short: "generate payloads for execution state of bootstrapped chain", + Run: run, +} + +func init() { + Cmd.Flags().StringVar(&flagChain, "chain", "", "Chain name") + _ = Cmd.MarkFlagRequired("chain") + + Cmd.Flags().StringVar( + &flagOutputPayloadFileName, + "output-filename", + "", + "Output payload file name") + _ = Cmd.MarkFlagRequired("output-filename") + +} + +func run(*cobra.Command, []string) { + + chain := flow.ChainID(flagChain).Chain() + + log.Info().Msgf("creating payloads for chain %s", chain) + + ctx := fvm.NewContext( + fvm.WithChain(chain), + ) + + vm := fvm.NewVirtualMachine() + + storageSnapshot := snapshot.MapStorageSnapshot{} + + bootstrapProcedure := fvm.Bootstrap( + unittest.ServiceAccountPublicKey, + fvm.WithInitialTokenSupply(unittest.GenesisTokenSupply), + ) + + executionSnapshot, _, err := vm.Run( + ctx, + bootstrapProcedure, + storageSnapshot, + ) + if err != nil { + log.Fatal().Err(err).Msg("failed to run bootstrap procedure") + } + + payloads := make([]*ledger.Payload, 0, len(executionSnapshot.WriteSet)) + + for registerID, registerValue := range executionSnapshot.WriteSet { + payloadKey := convert.RegisterIDToLedgerKey(registerID) + payload := ledger.NewPayload(payloadKey, registerValue) + payloads = append(payloads, payload) + } + + log.Info().Msgf("writing payloads to %s", flagOutputPayloadFileName) + + numOfPayloadWritten, err := util.CreatePayloadFile( + log.Logger, + flagOutputPayloadFileName, + payloads, + nil, + false, + ) + if err != nil { + log.Fatal().Err(err).Msg("failed to create payloads") + } + + log.Info().Msgf("wrote %d payloads", numOfPayloadWritten) +} diff --git a/cmd/util/cmd/check-storage/cmd.go b/cmd/util/cmd/check-storage/cmd.go new file mode 100644 index 00000000000..c63f64deda6 --- /dev/null +++ b/cmd/util/cmd/check-storage/cmd.go @@ -0,0 +1,644 @@ +package check_storage + +import ( + "context" + "fmt" + "strings" + + "github.com/onflow/atree" + "github.com/onflow/cadence/interpreter" + "github.com/onflow/crypto/hash" + "github.com/rs/zerolog/log" + "github.com/spf13/cobra" + "golang.org/x/sync/errgroup" + + "github.com/onflow/cadence/common" + "github.com/onflow/cadence/runtime" + + "github.com/onflow/flow-go/cmd/util/ledger/migrations" + "github.com/onflow/flow-go/cmd/util/ledger/reporters" + "github.com/onflow/flow-go/cmd/util/ledger/util" + "github.com/onflow/flow-go/cmd/util/ledger/util/registers" + "github.com/onflow/flow-go/fvm/environment" + "github.com/onflow/flow-go/fvm/evm/emulator/state" + storageState "github.com/onflow/flow-go/fvm/storage/state" + "github.com/onflow/flow-go/fvm/systemcontracts" + "github.com/onflow/flow-go/ledger" + "github.com/onflow/flow-go/model/flow" + moduleUtil "github.com/onflow/flow-go/module/util" +) + +var ( + flagPayloads string + flagState string + flagStateCommitment string + flagOutputDirectory string + flagChain string + flagNWorker int + flagHasAccountFormatV1 bool + flagHasAccountFormatV2 bool + flagIsAccountStatusV4 bool + flagCheckContract bool +) + +var ( + evmAccount flow.Address + evmStorageIDKeys = []string{ + state.AccountsStorageIDKey, + state.CodesStorageIDKey, + } +) + +var Cmd = &cobra.Command{ + Use: "check-storage", + Short: "Check storage health", + Run: run, +} + +const ( + ReporterName = "storage-health" +) + +func init() { + + Cmd.Flags().StringVar( + &flagPayloads, + "payloads", + "", + "Input payload file name", + ) + + Cmd.Flags().StringVar( + &flagState, + "state", + "", + "Input state file name", + ) + + Cmd.Flags().StringVar( + &flagStateCommitment, + "state-commitment", + "", + "Input state commitment", + ) + + Cmd.Flags().StringVar( + &flagOutputDirectory, + "output-directory", + "", + "Output directory", + ) + + _ = Cmd.MarkFlagRequired("output-directory") + + Cmd.Flags().IntVar( + &flagNWorker, + "n-workers", + 10, + "number of workers to use", + ) + + Cmd.Flags().StringVar( + &flagChain, + "chain", + "", + "Chain name", + ) + _ = Cmd.MarkFlagRequired("chain") + + Cmd.Flags().BoolVar( + &flagHasAccountFormatV1, + "account-format-v1", + false, + "State contains accounts in v1 format", + ) + + Cmd.Flags().BoolVar( + &flagHasAccountFormatV2, + "account-format-v2", + true, + "State contains accounts in v2 format", + ) + + Cmd.Flags().BoolVar( + &flagIsAccountStatusV4, + "account-status-v4", + false, + "State is migrated to account status v4 format", + ) + + Cmd.Flags().BoolVar( + &flagCheckContract, + "check-contract", + false, + "check stored contract", + ) +} + +func run(*cobra.Command, []string) { + + chainID := flow.ChainID(flagChain) + // Validate chain ID + _ = chainID.Chain() + + if flagPayloads == "" && flagState == "" { + log.Fatal().Msg("Either --payloads or --state must be provided") + } else if flagPayloads != "" && flagState != "" { + log.Fatal().Msg("Only one of --payloads or --state must be provided") + } + if flagState != "" && flagStateCommitment == "" { + log.Fatal().Msg("--state-commitment must be provided when --state is provided") + } + if !flagHasAccountFormatV1 && !flagHasAccountFormatV2 { + log.Fatal().Msg("both of or one of --account-format-v1 and --account-format-v2 must be true") + } + + // Get EVM account by chain + evmAccount = systemcontracts.SystemContractsForChain(chainID).EVMStorage.Address + + // Create report in JSONL format + rw := reporters.NewReportFileWriterFactoryWithFormat(flagOutputDirectory, log.Logger, reporters.ReportFormatJSONL). + ReportWriter(ReporterName) + defer rw.Close() + + var payloads []*ledger.Payload + var err error + + // Read payloads from payload file or checkpoint file + + if flagPayloads != "" { + log.Info().Msgf("Reading payloads from %s", flagPayloads) + + _, payloads, err = util.ReadPayloadFile(log.Logger, flagPayloads) + if err != nil { + log.Fatal().Err(err).Msg("failed to read payloads") + } + } else { + log.Info().Msgf("Reading trie %s", flagStateCommitment) + + stateCommitment := util.ParseStateCommitment(flagStateCommitment) + payloads, err = util.ReadTrieForPayloads(flagState, stateCommitment) + if err != nil { + log.Fatal().Err(err).Msg("failed to read state") + } + } + + log.Info().Msgf("Grouping %d payloads by accounts ...", len(payloads)) + + // Group payloads by accounts + + payloadAccountGrouping := util.GroupPayloadsByAccount(log.Logger, payloads, flagNWorker) + + log.Info().Msgf( + "Creating registers from grouped payloads (%d) ...", + len(payloads), + ) + + registersByAccount, err := util.NewByAccountRegistersFromPayloadAccountGrouping(payloadAccountGrouping, flagNWorker) + if err != nil { + log.Fatal().Err(err).Msg("failed to create ByAccount registers from payload") + } + + accountCount := registersByAccount.AccountCount() + + log.Info().Msgf( + "Created registers from payloads (%d accounts, %d payloads)", + accountCount, + len(payloads), + ) + + failedAccountAddresses, err := checkStorageHealth(registersByAccount, flagNWorker, rw) + if err != nil { + log.Fatal().Err(err).Msg("failed to check storage health") + } + + if len(failedAccountAddresses) == 0 { + log.Info().Msgf("All %d accounts are healthy", accountCount) + return + } + + log.Info().Msgf( + "%d out of %d accounts reported storage health check issues. See report %s for more details.", + len(failedAccountAddresses), + accountCount, + ReporterName, + ) + + log.Info().Msgf("Accounts with storage health issues:") + for _, address := range failedAccountAddresses { + log.Info().Msgf(" %x", []byte(address)) + } +} + +func checkStorageHealth( + registersByAccount *registers.ByAccount, + nWorkers int, + rw reporters.ReportWriter, +) (failedAccountAddresses []string, err error) { + + accountCount := registersByAccount.AccountCount() + + nWorkers = min(accountCount, nWorkers) + + log.Info().Msgf("Checking storage health of %d accounts using %d workers ...", accountCount, nWorkers) + + logAccount := moduleUtil.LogProgress( + log.Logger, + moduleUtil.DefaultLogProgressConfig( + "processing account group", + accountCount, + ), + ) + + if nWorkers <= 1 { + // Skip goroutine to avoid overhead + err = registersByAccount.ForEachAccount( + func(accountRegisters *registers.AccountRegisters) error { + defer logAccount(1) + + accountStorageIssues := checkAccountStorageHealth(accountRegisters, nWorkers) + + if len(accountStorageIssues) > 0 { + failedAccountAddresses = append(failedAccountAddresses, accountRegisters.Owner()) + + for _, issue := range accountStorageIssues { + rw.Write(issue) + } + } + + return nil + }) + + return failedAccountAddresses, err + } + + type job struct { + accountRegisters *registers.AccountRegisters + } + + type result struct { + owner string + issues []accountStorageIssue + } + + jobs := make(chan job, nWorkers) + + results := make(chan result, nWorkers) + + g, ctx := errgroup.WithContext(context.Background()) + + // Launch goroutine to check account storage health + for i := 0; i < nWorkers; i++ { + g.Go(func() error { + for job := range jobs { + issues := checkAccountStorageHealth(job.accountRegisters, nWorkers) + + result := result{ + owner: job.accountRegisters.Owner(), + issues: issues, + } + + select { + case results <- result: + case <-ctx.Done(): + return ctx.Err() + } + + logAccount(1) + } + return nil + }) + } + + // Launch goroutine to wait for workers to finish and close results (output) channel + go func() { + defer close(results) + err = g.Wait() + }() + + // Launch goroutine to send job to jobs channel and close jobs (input) channel + go func() { + defer close(jobs) + + err = registersByAccount.ForEachAccount( + func(accountRegisters *registers.AccountRegisters) error { + jobs <- job{accountRegisters: accountRegisters} + return nil + }) + if err != nil { + log.Err(err).Msgf("failed to iterate accounts by registersByAccount") + } + }() + + // Gather results + for result := range results { + if len(result.issues) > 0 { + failedAccountAddresses = append(failedAccountAddresses, result.owner) + for _, issue := range result.issues { + rw.Write(issue) + } + } + } + + return failedAccountAddresses, err +} + +func checkAccountStorageHealth(accountRegisters *registers.AccountRegisters, nWorkers int) []accountStorageIssue { + owner := accountRegisters.Owner() + + address, err := common.BytesToAddress([]byte(owner)) + if err != nil { + return []accountStorageIssue{ + { + Address: address.Hex(), + Kind: storageErrorKindString[otherErrorKind], + Msg: err.Error(), + }} + } + + if isEVMAccount(address) { + return checkEVMAccountStorageHealth(address, accountRegisters) + } + + var issues []accountStorageIssue + + // Check atree storage health + + ledger := ®isters.ReadOnlyLedger{Registers: accountRegisters} + var config runtime.StorageConfig + storage := runtime.NewStorage(ledger, nil, config) + + // Check account format against specified flags. + err = checkAccountFormat( + ledger, + address, + flagHasAccountFormatV1, + flagHasAccountFormatV2, + ) + if err != nil { + issues = append( + issues, + accountStorageIssue{ + Address: address.Hex(), + Kind: storageErrorKindString[storageFormatErrorKind], + Msg: err.Error(), + }, + ) + return issues + } + + inter, err := interpreter.NewInterpreter( + nil, + nil, + &interpreter.Config{ + Storage: storage, + }, + ) + if err != nil { + issues = append( + issues, + accountStorageIssue{ + Address: address.Hex(), + Kind: storageErrorKindString[otherErrorKind], + Msg: err.Error(), + }, + ) + return issues + } + + err = util.CheckStorageHealth(inter, address, storage, accountRegisters, common.AllStorageDomains, nWorkers) + if err != nil { + issues = append( + issues, + accountStorageIssue{ + Address: address.Hex(), + Kind: storageErrorKindString[cadenceAtreeStorageErrorKind], + Msg: err.Error(), + }, + ) + } + + if flagIsAccountStatusV4 { + // Validate account public key storage + err = migrations.ValidateAccountPublicKeyV4(address, accountRegisters) + if err != nil { + issues = append( + issues, + accountStorageIssue{ + Address: address.Hex(), + Kind: storageErrorKindString[accountKeyErrorKind], + Msg: err.Error(), + }, + ) + } + + // Check account public keys + err = checkAccountPublicKeys(address, accountRegisters) + if err != nil { + issues = append( + issues, + accountStorageIssue{ + Address: address.Hex(), + Kind: storageErrorKindString[accountKeyErrorKind], + Msg: err.Error(), + }, + ) + } + } + + if flagCheckContract { + // Check contracts + err = checkContracts(address, accountRegisters) + if err != nil { + issues = append( + issues, + accountStorageIssue{ + Address: address.Hex(), + Kind: storageErrorKindString[contractErrorKind], + Msg: err.Error(), + }, + ) + } + } + + return issues +} + +type storageErrorKind int + +const ( + otherErrorKind storageErrorKind = iota + cadenceAtreeStorageErrorKind + evmAtreeStorageErrorKind + storageFormatErrorKind + accountKeyErrorKind + contractErrorKind +) + +var storageErrorKindString = map[storageErrorKind]string{ + otherErrorKind: "error_check_storage_failed", + cadenceAtreeStorageErrorKind: "error_cadence_atree_storage", + evmAtreeStorageErrorKind: "error_evm_atree_storage", + accountKeyErrorKind: "error_account_public_key", + contractErrorKind: "error_contract", +} + +type accountStorageIssue struct { + Address string + Kind string + Msg string +} + +func hasDomainRegister(ledger atree.Ledger, address common.Address) (bool, error) { + for _, domain := range common.AllStorageDomains { + value, err := ledger.GetValue(address[:], []byte(domain.Identifier())) + if err != nil { + return false, err + } + if len(value) > 0 { + return true, nil + } + } + + return false, nil +} + +func hasAccountRegister(ledger atree.Ledger, address common.Address) (bool, error) { + value, err := ledger.GetValue(address[:], []byte(runtime.AccountStorageKey)) + if err != nil { + return false, err + } + return len(value) > 0, nil +} + +func checkAccountFormat( + ledger atree.Ledger, + address common.Address, + expectV1 bool, + expectV2 bool, +) error { + // Skip empty address because it doesn't have any account or domain registers. + if len(address) == 0 || address == common.ZeroAddress { + return nil + } + + foundDomainRegister, err := hasDomainRegister(ledger, address) + if err != nil { + return err + } + + foundAccountRegister, err := hasAccountRegister(ledger, address) + if err != nil { + return err + } + + if !foundAccountRegister && !foundDomainRegister { + return fmt.Errorf("found neither domain nor account registers") + } + + if foundAccountRegister && foundDomainRegister { + return fmt.Errorf("found both domain and account registers") + } + + if foundAccountRegister && !expectV2 { + return fmt.Errorf("found account in format v2 while only expect account in format v1") + } + + if foundDomainRegister && !expectV1 { + return fmt.Errorf("found account in format v1 while only expect account in format v2") + } + + return nil +} + +func checkAccountPublicKeys( + address common.Address, + accountRegisters *registers.AccountRegisters, +) error { + // Skip empty address because it doesn't have any account status registers. + if len(address) == 0 || address == common.ZeroAddress { + return nil + } + + accounts := newAccounts(accountRegisters) + + keyCount, err := accounts.GetAccountPublicKeyCount(flow.BytesToAddress(address.Bytes())) + if err != nil { + return err + } + + // Check keys + for keyIndex := range keyCount { + _, err = accounts.GetAccountPublicKey(flow.BytesToAddress(address.Bytes()), keyIndex) + if err != nil { + return err + } + } + + // NOTE: don't need to check unreachable keys because it is checked in ValidateAccountPublicKeyV4(). + + return nil +} + +func checkContracts( + address common.Address, + accountRegisters *registers.AccountRegisters, +) error { + if len(address) == 0 || address == common.ZeroAddress { + return nil + } + + accounts := newAccounts(accountRegisters) + + contractNames, err := accounts.GetContractNames(flow.BytesToAddress(address.Bytes())) + if err != nil { + return err + } + + // Check contract + contractRegisterKeys := make(map[string]bool) + for _, contractName := range contractNames { + contractRegisterKeys[flow.ContractKey(contractName)] = true + + _, err = accounts.GetContract(contractName, flow.BytesToAddress(address.Bytes())) + if err != nil { + return err + } + } + + // Check unreachable contract registers + err = accountRegisters.ForEachKey(func(key string) error { + if strings.HasPrefix(key, flow.CodeKeyPrefix) && !contractRegisterKeys[key] { + return fmt.Errorf("found unreachable contract register %s, contract names %v", key, contractNames) + } + return nil + }) + + return err +} + +func newAccounts(accountRegisters *registers.AccountRegisters) environment.Accounts { + // Create a new transaction state with a dummy hasher + // because we do not need spock proofs for migrations. + transactionState := storageState.NewTransactionStateFromExecutionState( + storageState.NewExecutionStateWithSpockStateHasher( + registers.StorageSnapshot{ + Registers: accountRegisters, + }, + storageState.DefaultParameters(), + func() hash.Hasher { + return dummyHasher{} + }, + ), + ) + return environment.NewAccounts(transactionState) +} + +type dummyHasher struct{} + +func (d dummyHasher) Algorithm() hash.HashingAlgorithm { return hash.UnknownHashingAlgorithm } +func (d dummyHasher) Size() int { return 0 } +func (d dummyHasher) ComputeHash([]byte) hash.Hash { return nil } +func (d dummyHasher) Write([]byte) (int, error) { return 0, nil } +func (d dummyHasher) SumHash() hash.Hash { return nil } +func (d dummyHasher) Reset() {} diff --git a/cmd/util/cmd/check-storage/evm_account_storage_health.go b/cmd/util/cmd/check-storage/evm_account_storage_health.go new file mode 100644 index 00000000000..1a791438afa --- /dev/null +++ b/cmd/util/cmd/check-storage/evm_account_storage_health.go @@ -0,0 +1,517 @@ +package check_storage + +import ( + "bytes" + "cmp" + "fmt" + "slices" + + "github.com/onflow/cadence/interpreter" + "golang.org/x/exp/maps" + + "github.com/onflow/atree" + + "github.com/onflow/cadence/common" + "github.com/onflow/cadence/runtime" + + "github.com/onflow/flow-go/cmd/util/ledger/util/registers" + "github.com/onflow/flow-go/fvm/evm/emulator/state" + "github.com/onflow/flow-go/model/flow" +) + +var ( + compareSlabID = func(a, b atree.SlabID) int { + return a.Compare(b) + } + + equalSlabID = func(a, b atree.SlabID) bool { + return a.Compare(b) == 0 + } +) + +// checkEVMAccountStorageHealth checks storage health of cadence-atree +// registers and evm-atree registers in evm account. +func checkEVMAccountStorageHealth( + address common.Address, + accountRegisters *registers.AccountRegisters, +) []accountStorageIssue { + var issues []accountStorageIssue + + ledger := NewReadOnlyLedgerWithAtreeRegisterReadSet(accountRegisters) + + // Check health of cadence-atree registers. + issues = append( + issues, + checkCadenceAtreeRegistersInEVMAccount(address, ledger)..., + ) + + // Check health of evm-atree registers. + issues = append( + issues, + checkEVMAtreeRegistersInEVMAccount(address, ledger)..., + ) + + // Check unreferenced atree registers. + // If any atree registers are not accessed during health check of + // cadence-atree and evm-atree registers, these atree registers are + // unreferenced. + issues = append( + issues, + checkUnreferencedAtreeRegisters(address, ledger, accountRegisters)..., + ) + + return issues +} + +// checkCadenceAtreeRegistersInEVMAccount checks health of cadence-atree registers. +func checkCadenceAtreeRegistersInEVMAccount( + address common.Address, + ledger atree.Ledger, +) []accountStorageIssue { + var issues []accountStorageIssue + + storage := runtime.NewStorage(ledger, nil, runtime.StorageConfig{}) + + inter, err := interpreter.NewInterpreter( + nil, + nil, + &interpreter.Config{ + Storage: storage, + }, + ) + if err != nil { + issues = append( + issues, + accountStorageIssue{ + Address: address.Hex(), + Kind: storageErrorKindString[otherErrorKind], + Msg: fmt.Sprintf("failed to create interpreter for cadence registers: %s", err), + }, + ) + return issues + } + + // Load Cadence domains storage map, so atree slab iterator can traverse connected slabs from loaded root slab. + // NOTE: don't preload all atree slabs in evm account because evm-atree registers require evm-atree decoder. + + for _, domain := range common.AllStorageDomains { + _ = storage.GetDomainStorageMap(inter, address, domain, false) + } + + err = storage.CheckHealth() + if err != nil { + issues = append( + issues, + accountStorageIssue{ + Address: address.Hex(), + Kind: storageErrorKindString[cadenceAtreeStorageErrorKind], + Msg: fmt.Sprintf("cadence-atree registers health check failed in evm account: %s", err), + }) + } + + return issues +} + +// checkEVMAtreeRegistersInEVMAccount checks health of evm-atree registers. +func checkEVMAtreeRegistersInEVMAccount( + address common.Address, + ledger atree.Ledger, +) []accountStorageIssue { + var issues []accountStorageIssue + + baseStorage := atree.NewLedgerBaseStorage(ledger) + + storage, err := state.NewPersistentSlabStorage(baseStorage) + if err != nil { + issues = append( + issues, + accountStorageIssue{ + Address: address.Hex(), + Kind: storageErrorKindString[evmAtreeStorageErrorKind], + Msg: fmt.Sprintf("failed to create atree.PersistentSlabStorage for evm registers: %s", err), + }) + return issues + } + + domainSlabIDs := make(map[string]atree.SlabID) + + // Load evm domain root slabs. + for _, domain := range evmStorageIDKeys { + rawDomainSlabID, err := ledger.GetValue(address[:], []byte(domain)) + if err != nil { + issues = append(issues, accountStorageIssue{ + Address: address.Hex(), + Kind: storageErrorKindString[evmAtreeStorageErrorKind], + Msg: fmt.Sprintf("failed to get evm domain %s raw slab ID: %s", domain, err), + }) + continue + } + + if len(rawDomainSlabID) == 0 { + continue + } + + domainSlabID, err := atree.NewSlabIDFromRawBytes(rawDomainSlabID) + if err != nil { + issues = append(issues, accountStorageIssue{ + Address: address.Hex(), + Kind: storageErrorKindString[evmAtreeStorageErrorKind], + Msg: fmt.Sprintf("failed to convert evm domain %s raw slab ID %x to atree slab ID: %s", domain, rawDomainSlabID, err), + }) + continue + } + + // Retrieve evm domain storage register so slab iterator can traverse connected slabs from root slab. + + _, found, err := storage.Retrieve(domainSlabID) + if err != nil { + issues = append(issues, accountStorageIssue{ + Address: address.Hex(), + Kind: storageErrorKindString[evmAtreeStorageErrorKind], + Msg: fmt.Sprintf("failed to retrieve evm domain %s root slab %s: %s", domain, domainSlabID, err), + }) + continue + } + if !found { + issues = append(issues, accountStorageIssue{ + Address: address.Hex(), + Kind: storageErrorKindString[evmAtreeStorageErrorKind], + Msg: fmt.Sprintf("failed to find evm domain %s root slab %s", domain, domainSlabID), + }) + continue + } + + domainSlabIDs[domain] = domainSlabID + } + + if len(domainSlabIDs) == 0 { + return issues + } + + // Get evm storage slot slab IDs. + storageSlotSlabIDs, storageSlotIssues := getStorageSlotRootSlabIDs( + address, + domainSlabIDs[state.AccountsStorageIDKey], + storage) + + issues = append(issues, storageSlotIssues...) + + // Load evm storage slot slabs so slab iterator can traverse connected slabs in storage health check. + for _, id := range storageSlotSlabIDs { + _, found, err := storage.Retrieve(id) + if err != nil { + issues = append(issues, accountStorageIssue{ + Address: address.Hex(), + Kind: storageErrorKindString[evmAtreeStorageErrorKind], + Msg: fmt.Sprintf("failed to retrieve evm storage slot %s: %s", id, err), + }) + } + if !found { + issues = append(issues, accountStorageIssue{ + Address: address.Hex(), + Kind: storageErrorKindString[evmAtreeStorageErrorKind], + Msg: fmt.Sprintf("failed to find evm storage slot %s", id), + }) + } + } + + // Expected root slabs include domain root slabs and storage slot root slabs. + + expectedRootSlabIDs := make([]atree.SlabID, 0, len(domainSlabIDs)+len(storageSlotSlabIDs)) + expectedRootSlabIDs = append(expectedRootSlabIDs, maps.Values(domainSlabIDs)...) + expectedRootSlabIDs = append(expectedRootSlabIDs, storageSlotSlabIDs...) + + issues = append( + issues, + // Check storage health of evm-atree registers + checkHealthWithExpectedRootSlabIDs(address, storage, expectedRootSlabIDs)..., + ) + + return issues +} + +// getStorageSlotRootSlabIDs returns evm storage slot root slab IDs. +func getStorageSlotRootSlabIDs( + address common.Address, + accountStorageRootSlabID atree.SlabID, + storage *atree.PersistentSlabStorage, +) ([]atree.SlabID, []accountStorageIssue) { + + if accountStorageRootSlabID == atree.SlabIDUndefined { + return nil, nil + } + + var issues []accountStorageIssue + + // Load account storage map + m, err := atree.NewMapWithRootID(storage, accountStorageRootSlabID, atree.NewDefaultDigesterBuilder()) + if err != nil { + issues = append( + issues, + accountStorageIssue{ + Address: address.Hex(), + Kind: storageErrorKindString[evmAtreeStorageErrorKind], + Msg: fmt.Sprintf("failed to load evm storage slot %s: %s", accountStorageRootSlabID, err), + }) + return nil, issues + } + + storageSlotRootSlabIDs := make(map[atree.SlabID]struct{}) + + // Iterate accounts in account storage map to get storage slot collection ID. + err = m.IterateReadOnly(func(key, value atree.Value) (bool, error) { + // Check address type + acctAddress, ok := key.(state.ByteStringValue) + if !ok { + issues = append( + issues, + accountStorageIssue{ + Address: address.Hex(), + Kind: storageErrorKindString[evmAtreeStorageErrorKind], + Msg: fmt.Sprintf("expect evm account address as ByteStringValue, got %T", key), + }) + return true, nil + } + + // Check encoded account type + encodedAccount, ok := value.(state.ByteStringValue) + if !ok { + issues = append( + issues, + accountStorageIssue{ + Address: address.Hex(), + Kind: storageErrorKindString[evmAtreeStorageErrorKind], + Msg: fmt.Sprintf("expect evm account as ByteStringValue, got %T", key), + }) + return true, nil + } + + // Decode account + acct, err := state.DecodeAccount(encodedAccount.Bytes()) + if err != nil { + issues = append( + issues, + accountStorageIssue{ + Address: address.Hex(), + Kind: storageErrorKindString[evmAtreeStorageErrorKind], + Msg: fmt.Sprintf("failed to decode account %x in evm account storage: %s", acctAddress.Bytes(), err), + }) + return true, nil + } + + storageSlotCollectionID := acct.CollectionID + + if len(storageSlotCollectionID) == 0 { + return true, nil + } + + storageSlotSlabID, err := atree.NewSlabIDFromRawBytes(storageSlotCollectionID) + if err != nil { + issues = append( + issues, + accountStorageIssue{ + Address: address.Hex(), + Kind: storageErrorKindString[evmAtreeStorageErrorKind], + Msg: fmt.Sprintf("failed to convert storage slot collection ID %x to atree slab ID: %s", storageSlotCollectionID, err), + }) + return true, nil + } + + // Check storage slot is not double referenced. + if _, ok := storageSlotRootSlabIDs[storageSlotSlabID]; ok { + issues = append( + issues, + accountStorageIssue{ + Address: address.Hex(), + Kind: storageErrorKindString[evmAtreeStorageErrorKind], + Msg: fmt.Sprintf("found storage slot collection %x referenced by multiple accounts", storageSlotCollectionID), + }) + } + + storageSlotRootSlabIDs[storageSlotSlabID] = struct{}{} + + return true, nil + }) + if err != nil { + issues = append( + issues, + accountStorageIssue{ + Address: address.Hex(), + Kind: storageErrorKindString[evmAtreeStorageErrorKind], + Msg: fmt.Sprintf("failed to iterate EVM account storage map: %s", err), + }) + } + + return maps.Keys(storageSlotRootSlabIDs), nil +} + +// checkHealthWithExpectedRootSlabIDs checks atree registers in storage (loaded and connected registers). +func checkHealthWithExpectedRootSlabIDs( + address common.Address, + storage *atree.PersistentSlabStorage, + expectedRootSlabIDs []atree.SlabID, +) []accountStorageIssue { + var issues []accountStorageIssue + + // Check atree storage health + rootSlabIDSet, err := atree.CheckStorageHealth(storage, -1) + if err != nil { + issues = append( + issues, + accountStorageIssue{ + Address: address.Hex(), + Kind: storageErrorKindString[evmAtreeStorageErrorKind], + Msg: fmt.Sprintf("evm atree storage check failed: %s", err), + }) + return issues + } + + // Check if returned root slab IDs match expected root slab IDs. + + rootSlabIDs := maps.Keys(rootSlabIDSet) + slices.SortFunc(rootSlabIDs, compareSlabID) + + slices.SortFunc(expectedRootSlabIDs, compareSlabID) + + if !slices.EqualFunc(expectedRootSlabIDs, rootSlabIDs, equalSlabID) { + issues = append( + issues, + accountStorageIssue{ + Address: address.Hex(), + Kind: storageErrorKindString[evmAtreeStorageErrorKind], + Msg: fmt.Sprintf("root slabs %v from storage health check != expected root slabs %v", rootSlabIDs, expectedRootSlabIDs), + }) + } + + return issues +} + +// checkUnreferencedAtreeRegisters checks if all atree registers in account has been read through ledger. +func checkUnreferencedAtreeRegisters( + address common.Address, + ledger *ReadOnlyLedgerWithAtreeRegisterReadSet, + accountRegisters *registers.AccountRegisters, +) []accountStorageIssue { + var issues []accountStorageIssue + + allAtreeRegisterIDs, err := getAtreeRegisterIDsFromRegisters(accountRegisters) + if err != nil { + issues = append( + issues, + accountStorageIssue{ + Address: address.Hex(), + Kind: storageErrorKindString[otherErrorKind], + Msg: fmt.Sprintf("failed to get atree register IDs from account registers: %s", err), + }) + return issues + } + + // Check for unreferenced atree slabs by verifing all atree slabs in accountRegisters are read + // during storage health check for evm-atree and cadence-atree registers. + + if ledger.GetAtreeRegisterReadCount() == len(allAtreeRegisterIDs) { + return issues + } + + if ledger.GetAtreeRegisterReadCount() > len(allAtreeRegisterIDs) { + issues = append( + issues, + accountStorageIssue{ + Address: address.Hex(), + Kind: storageErrorKindString[otherErrorKind], + Msg: fmt.Sprintf("%d atree registers was read > %d atree registers in evm account", + ledger.GetAtreeRegisterReadCount(), + len(allAtreeRegisterIDs)), + }) + return issues + } + + unreferencedAtreeRegisterIDs := make([]flow.RegisterID, 0, len(allAtreeRegisterIDs)-ledger.GetAtreeRegisterReadCount()) + + for _, id := range allAtreeRegisterIDs { + if !ledger.IsAtreeRegisterRead(id) { + unreferencedAtreeRegisterIDs = append(unreferencedAtreeRegisterIDs, id) + } + } + + slices.SortFunc(unreferencedAtreeRegisterIDs, func(a, b flow.RegisterID) int { + return cmp.Compare(a.Key, b.Key) + }) + + issues = append(issues, accountStorageIssue{ + Address: address.Hex(), + Kind: storageErrorKindString[evmAtreeStorageErrorKind], + Msg: fmt.Sprintf( + "number of read atree slabs %d != number of atree slabs in storage %d: unreferenced atree registers %v", + ledger.GetAtreeRegisterReadCount(), + len(allAtreeRegisterIDs), + unreferencedAtreeRegisterIDs, + ), + }) + + return issues +} + +func getAtreeRegisterIDsFromRegisters(registers registers.Registers) ([]flow.RegisterID, error) { + registerIDs := make([]flow.RegisterID, 0, registers.Count()) + + err := registers.ForEach(func(owner string, key string, _ []byte) error { + if !flow.IsSlabIndexKey(key) { + return nil + } + + registerIDs = append( + registerIDs, + flow.NewRegisterID(flow.BytesToAddress([]byte(owner)), key), + ) + + return nil + }) + if err != nil { + return nil, err + } + + return registerIDs, nil +} + +func isEVMAccount(owner common.Address) bool { + return bytes.Equal(owner[:], evmAccount[:]) +} + +type ReadOnlyLedgerWithAtreeRegisterReadSet struct { + *registers.ReadOnlyLedger + atreeRegistersReadSet map[flow.RegisterID]struct{} +} + +var _ atree.Ledger = &ReadOnlyLedgerWithAtreeRegisterReadSet{} + +func NewReadOnlyLedgerWithAtreeRegisterReadSet( + accountRegisters *registers.AccountRegisters, +) *ReadOnlyLedgerWithAtreeRegisterReadSet { + return &ReadOnlyLedgerWithAtreeRegisterReadSet{ + ReadOnlyLedger: ®isters.ReadOnlyLedger{Registers: accountRegisters}, + atreeRegistersReadSet: make(map[flow.RegisterID]struct{}), + } +} + +func (l *ReadOnlyLedgerWithAtreeRegisterReadSet) GetValue(address, key []byte) (value []byte, err error) { + value, err = l.ReadOnlyLedger.GetValue(address, key) + if err != nil { + return nil, err + } + + if flow.IsSlabIndexKey(string(key)) { + registerID := flow.NewRegisterID(flow.BytesToAddress(address), string(key)) + l.atreeRegistersReadSet[registerID] = struct{}{} + } + return value, nil +} + +func (l *ReadOnlyLedgerWithAtreeRegisterReadSet) GetAtreeRegisterReadCount() int { + return len(l.atreeRegistersReadSet) +} + +func (l *ReadOnlyLedgerWithAtreeRegisterReadSet) IsAtreeRegisterRead(id flow.RegisterID) bool { + _, ok := l.atreeRegistersReadSet[id] + return ok +} diff --git a/cmd/util/cmd/check-storage/evm_account_storage_health_test.go b/cmd/util/cmd/check-storage/evm_account_storage_health_test.go new file mode 100644 index 00000000000..aa8cf3a3f1f --- /dev/null +++ b/cmd/util/cmd/check-storage/evm_account_storage_health_test.go @@ -0,0 +1,158 @@ +package check_storage + +import ( + "strconv" + "testing" + + "github.com/holiman/uint256" + "github.com/stretchr/testify/require" + "golang.org/x/exp/maps" + + gethCommon "github.com/ethereum/go-ethereum/common" + "github.com/onflow/atree" + "github.com/onflow/cadence/common" + "github.com/onflow/cadence/interpreter" + "github.com/onflow/cadence/runtime" + + "github.com/onflow/flow-go/cmd/util/ledger/util" + "github.com/onflow/flow-go/cmd/util/ledger/util/registers" + "github.com/onflow/flow-go/fvm/evm/emulator/state" + "github.com/onflow/flow-go/fvm/evm/testutils" + "github.com/onflow/flow-go/fvm/evm/types" + "github.com/onflow/flow-go/ledger" + "github.com/onflow/flow-go/ledger/common/convert" + "github.com/onflow/flow-go/model/flow" +) + +func TestEVMAccountStorageHealth(t *testing.T) { + address := common.Address{1} + + t.Run("has storage slot", func(t *testing.T) { + led := createPayloadLedger() + + createEVMStorage(t, led, address) + + createCadenceStorage(t, led, address) + + payloads := maps.Values(led.Payloads) + + accountRegisters, err := registers.NewAccountRegistersFromPayloads(string(address[:]), payloads) + require.NoError(t, err) + + issues := checkEVMAccountStorageHealth( + address, + accountRegisters, + ) + require.Equal(t, 0, len(issues)) + }) + + t.Run("unreferenced slabs", func(t *testing.T) { + led := createPayloadLedger() + + createEVMStorage(t, led, address) + + createCadenceStorage(t, led, address) + + payloads := maps.Values(led.Payloads) + + // Add unreferenced slabs + slabIndex, err := led.AllocateSlabIndexFunc(address[:]) + require.NoError(t, err) + + registerID := flow.NewRegisterID( + flow.BytesToAddress(address[:]), + string(atree.SlabIndexToLedgerKey(slabIndex))) + + unreferencedPayload := ledger.NewPayload( + convert.RegisterIDToLedgerKey(registerID), + ledger.Value([]byte{1})) + + payloads = append(payloads, unreferencedPayload) + + accountRegisters, err := registers.NewAccountRegistersFromPayloads(string(address[:]), payloads) + require.NoError(t, err) + + issues := checkEVMAccountStorageHealth( + address, + accountRegisters, + ) + require.Equal(t, 1, len(issues)) + require.Equal(t, storageErrorKindString[evmAtreeStorageErrorKind], issues[0].Kind) + require.Contains(t, issues[0].Msg, "unreferenced atree registers") + }) +} + +func createEVMStorage(t *testing.T, ledger atree.Ledger, address common.Address) { + view, err := state.NewBaseView(ledger, flow.BytesToAddress(address[:])) + require.NoError(t, err) + + // Create an account without storage slot + addr1 := testutils.RandomCommonAddress(t) + + err = view.CreateAccount(addr1, uint256.NewInt(1), 2, []byte("ABC"), gethCommon.Hash{3, 4, 5}) + require.NoError(t, err) + + // Create an account with storage slot + addr2 := testutils.RandomCommonAddress(t) + + err = view.CreateAccount(addr2, uint256.NewInt(6), 7, []byte("DEF"), gethCommon.Hash{8, 9, 19}) + require.NoError(t, err) + + slot := types.SlotAddress{ + Address: addr2, + Key: testutils.RandomCommonHash(t), + } + + err = view.UpdateSlot(slot, testutils.RandomCommonHash(t)) + require.NoError(t, err) + + err = view.Commit() + require.NoError(t, err) +} + +func createCadenceStorage(t *testing.T, ledger atree.Ledger, address common.Address) { + + storage := runtime.NewStorage(ledger, nil, runtime.StorageConfig{}) + + inter, err := interpreter.NewInterpreter( + nil, + nil, + &interpreter.Config{ + Storage: storage, + }, + ) + require.NoError(t, err) + + // Create storage and public domains + for _, domain := range []common.StorageDomain{ + common.StorageDomainPathStorage, + common.StorageDomainPathPublic, + } { + storageDomain := storage.GetDomainStorageMap(inter, address, domain, true) + + // Create large domain map so there are more than one atree registers under the hood. + for i := 0; i < 100; i++ { + domainStr := domain.Identifier() + key := interpreter.StringStorageMapKey(domainStr + "_key_" + strconv.Itoa(i)) + value := interpreter.NewUnmeteredStringValue(domainStr + "_value_" + strconv.Itoa(i)) + storageDomain.SetValue(inter, key, value) + } + } + + // Commit domain data + err = storage.Commit(inter, false) + require.NoError(t, err) +} + +func createPayloadLedger() *util.PayloadsLedger { + nextSlabIndex := atree.SlabIndex{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1} + + return &util.PayloadsLedger{ + Payloads: make(map[flow.RegisterID]*ledger.Payload), + AllocateSlabIndexFunc: func([]byte) (atree.SlabIndex, error) { + var slabIndex atree.SlabIndex + slabIndex, nextSlabIndex = nextSlabIndex, nextSlabIndex.Next() + return slabIndex, nil + }, + } +} diff --git a/cmd/util/cmd/checkpoint-collect-stats/account_stats.go b/cmd/util/cmd/checkpoint-collect-stats/account_stats.go new file mode 100644 index 00000000000..2fc2a0bd1d2 --- /dev/null +++ b/cmd/util/cmd/checkpoint-collect-stats/account_stats.go @@ -0,0 +1,101 @@ +package checkpoint_collect_stats + +import ( + "cmp" + "slices" + + "github.com/rs/zerolog/log" + + "github.com/onflow/flow-go/fvm/systemcontracts" + "github.com/onflow/flow-go/model/flow" +) + +type accountFormat uint8 + +const ( + accountFormatUnknown accountFormat = iota + accountFormatV1 + accountFormatV2 +) + +func (format accountFormat) MarshalJSON() ([]byte, error) { + switch format { + case accountFormatV1: + return []byte("\"v1\""), nil + + case accountFormatV2: + return []byte("\"v2\""), nil + + default: + return []byte("\"unknown\""), nil + } +} + +type AccountStats struct { + stats + FormatV1Count int `json:"account_format_v1_count"` + FormatV2Count int `json:"account_format_v2_count"` + ServiceAccount *AccountInfo `json:"service_account,omitempty"` + EVMAccount *AccountInfo `json:"evm_account,omitempty"` + TopN []*AccountInfo `json:"largest_accounts"` +} + +type AccountInfo struct { + Address string `json:"address"` + Format accountFormat `json:"account_format"` + PayloadCount uint64 `json:"payload_count"` + PayloadSize uint64 `json:"payload_size"` +} + +func getAccountStatus( + chainID flow.ChainID, + accounts map[string]*AccountInfo, +) AccountStats { + accountsSlice := make([]*AccountInfo, 0, len(accounts)) + accountSizesSlice := make([]float64, 0, len(accounts)) + + var accountFormatV1Count, accountFormatV2Count int + + for _, acct := range accounts { + accountsSlice = append(accountsSlice, acct) + accountSizesSlice = append(accountSizesSlice, float64(acct.PayloadSize)) + + switch acct.Format { + case accountFormatV1: + accountFormatV1Count++ + + case accountFormatV2: + accountFormatV2Count++ + + default: + if acct.Address != "" { + log.Info().Msgf("found account without account register nor domain register: %x", acct.Address) + } + } + } + + // Sort accounts by payload size in descending order + slices.SortFunc(accountsSlice, func(a, b *AccountInfo) int { + return cmp.Compare(b.PayloadSize, a.PayloadSize) + }) + + stats := getValueStats(accountSizesSlice, percentiles) + + evmAccountAddress := systemcontracts.SystemContractsForChain(chainID).EVMStorage.Address + + serviceAccountAddress := serviceAccountAddressForChain(chainID) + + return AccountStats{ + stats: stats, + FormatV1Count: accountFormatV1Count, + FormatV2Count: accountFormatV2Count, + ServiceAccount: accounts[string(serviceAccountAddress[:])], + EVMAccount: accounts[string(evmAccountAddress[:])], + TopN: accountsSlice[:flagTopN], + } +} + +func serviceAccountAddressForChain(chainID flow.ChainID) flow.Address { + sc := systemcontracts.SystemContractsForChain(chainID) + return sc.FlowServiceAccount.Address +} diff --git a/cmd/util/cmd/checkpoint-collect-stats/cmd.go b/cmd/util/cmd/checkpoint-collect-stats/cmd.go index cf74b467758..b5fa913e76e 100644 --- a/cmd/util/cmd/checkpoint-collect-stats/cmd.go +++ b/cmd/util/cmd/checkpoint-collect-stats/cmd.go @@ -1,14 +1,14 @@ package checkpoint_collect_stats import ( - "bufio" - "encoding/json" + "cmp" + "encoding/hex" "math" - "os" - "path/filepath" + "slices" "strings" + "sync" - "github.com/montanaflynn/stats" + "github.com/onflow/cadence/common" "github.com/pkg/profile" "github.com/rs/zerolog" "github.com/rs/zerolog/log" @@ -17,9 +17,14 @@ import ( "github.com/onflow/atree" + "github.com/onflow/flow-go/cmd/util/ledger/reporters" + "github.com/onflow/flow-go/cmd/util/ledger/util" + "github.com/onflow/flow-go/fvm/evm/emulator/state" + "github.com/onflow/flow-go/fvm/evm/handler" "github.com/onflow/flow-go/ledger" "github.com/onflow/flow-go/ledger/common/pathfinder" "github.com/onflow/flow-go/ledger/complete" + "github.com/onflow/flow-go/ledger/complete/mtrie/trie" "github.com/onflow/flow-go/ledger/complete/wal" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/module/metrics" @@ -27,61 +32,286 @@ import ( ) var ( - flagCheckpointDir string - flagOutputDir string - flagMemProfile bool + flagCheckpointDir string + flagStateCommitment string + flagPayloads string + flagOutputDir string + flagChain string + flagTopN int + flagMemProfile bool ) +const ( + ledgerStatsReportName = "ledger-stats" + accountStatsReportName = "account-stats" +) + +const ( + // NOTE: this constant is defined in github.com/onflow/cadence/runtime/storage.go + // Use this contant directly from cadence runtime package after dependency is updated. + AccountStorageKey = "stored" +) + +const ( + domainTypePrefix = "domain " + payloadChannelBufferSize = 100_000 + initialAccountMapSize = 5_000_000 +) + +const ( + // EVM register keys from fvm/evm/handler/blockHashList.go + blockHashListMetaKey = "BlockHashListMeta" + blockHashListBucketKeyPrefix = "BlockHashListBucket" +) + +// percentiles are Tukey's seven-number summary (without +// the 0 and 100 because min and max are always included). +var percentiles = []float64{12.5, 25.0, 50.0, 75.0, 87.5} + var Cmd = &cobra.Command{ Use: "checkpoint-collect-stats", - Short: "collects stats on tries stored in a checkpoint", - Run: run, + Short: "collects stats on tries stored in a checkpoint, or payloads from a payloads file", + Long: `checkpoint-collect-stats collects stats on tries stored in a checkpoint, or payloads from a payloads file. +Two kinds of input data are supported: +- checkpoint file(s) ("--checkpoint-dir" with optional "--state-commitment"), or +- payloads file ("--payload-filename")`, + Run: run, } func init() { Cmd.Flags().StringVar(&flagCheckpointDir, "checkpoint-dir", "", "Directory to load checkpoint files from") - _ = Cmd.MarkFlagRequired("checkpoint-dir") + + // state-commitment is optional. + // When provided, this program only gathers stats on trie with matching state commitment. + Cmd.Flags().StringVar(&flagStateCommitment, "state-commitment", "", + "Trie state commitment") + + Cmd.Flags().StringVar(&flagPayloads, "payload-filename", "", + "Payloads file name to load payloads from") Cmd.Flags().StringVar(&flagOutputDir, "output-dir", "", "Directory to write checkpoint stats to") _ = Cmd.MarkFlagRequired("output-dir") + Cmd.Flags().IntVar(&flagTopN, "top-n", 10, + "number of largest payloads or accounts to report") + + Cmd.Flags().StringVar(&flagChain, "chain", "", "Chain name") + _ = Cmd.MarkFlagRequired("chain") + Cmd.Flags().BoolVar(&flagMemProfile, "mem-profile", false, "Enable memory profiling") } -type Stats struct { - LedgerStats *complete.LedgerStats +type LedgerStats struct { + LedgerStats *complete.LedgerStats `json:",omitempty"` PayloadStats *PayloadStats } type PayloadStats struct { + TotalPayloadCount uint64 `json:"total_payload_count"` TotalPayloadSize uint64 `json:"total_payload_size"` TotalPayloadValueSize uint64 `json:"total_payload_value_size"` StatsByTypes []RegisterStatsByTypes `json:"stats_by_types"` + TopN []PayloadInfo `json:"largest_payloads"` } type RegisterStatsByTypes struct { - Type string `json:"type"` - Counts uint64 `json:"counts"` - ValueSizeTotal float64 `json:"value_size_total"` - ValueSizeMin float64 `json:"value_size_min"` - ValueSize25thPercentile float64 `json:"value_size_25th_percentile"` - ValueSizeMedian float64 `json:"value_size_median"` - ValueSize75thPercentile float64 `json:"value_size_75th_percentile"` - ValueSize95thPercentile float64 `json:"value_size_95th_percentile"` - ValueSizeMax float64 `json:"value_size_max"` + Type string `json:"type"` + stats + SubTypes []RegisterStatsByTypes `json:"subtypes,omitempty"` +} + +type PayloadInfo struct { + Address string `json:"address"` + Key string `json:"key"` + Type string `json:"type"` + Size uint64 `json:"size"` } type sizesByType map[string][]float64 func run(*cobra.Command, []string) { + if flagPayloads == "" && flagCheckpointDir == "" { + log.Fatal().Msg("Either --payload-filename or --checkpoint-dir must be provided") + } + if flagPayloads != "" && flagCheckpointDir != "" { + log.Fatal().Msg("Only one of --payload-filename or --checkpoint-dir must be provided") + } + if flagCheckpointDir == "" && flagStateCommitment != "" { + log.Fatal().Msg("--checkpont-dir must be provided when --state-commitment is provided") + } + + chainID := flow.ChainID(flagChain) + // Validate chain ID + _ = chainID.Chain() + if flagMemProfile { defer profile.Start(profile.MemProfile).Stop() } + payloadChannel := make(chan *ledger.Payload, payloadChannelBufferSize) + + ledgerStatsChannel := make(chan *complete.LedgerStats, 1) + + // Load execution state and retrieve payloads async + go getPayloadsAsync(payloadChannel, ledgerStatsChannel) + + var totalPayloadCount, totalPayloadSize, totalPayloadValueSize uint64 + + largestPayloads := util.NewTopN[PayloadInfo]( + flagTopN, + func(a, b PayloadInfo) bool { + return a.Size < b.Size + }, + ) + + valueSizesByType := make(sizesByType, 0) + + accounts := make(map[string]*AccountInfo, initialAccountMapSize) + + // Process payloads until payloadChannel is closed + for p := range payloadChannel { + key, err := p.Key() + if err != nil { + log.Fatal().Err(err).Msg("cannot load a key") + } + + address := key.KeyParts[0].Value + + size := p.Size() + value := p.Value() + valueSize := value.Size() + + // Update total payload size and count + totalPayloadSize += uint64(size) + totalPayloadValueSize += uint64(valueSize) + totalPayloadCount++ + + // Update payload sizes by type + typ := getRegisterType(key) + valueSizesByType[typ] = append(valueSizesByType[typ], float64(valueSize)) + + // Update top N largest payloads + _, _ = largestPayloads.Add( + PayloadInfo{ + Address: hex.EncodeToString(address), + Key: hex.EncodeToString(key.KeyParts[1].Value), + Type: typ, + Size: uint64(valueSize), + }) + + // Update accounts + account, exist := accounts[string(address)] + if !exist { + account = &AccountInfo{ + Address: hex.EncodeToString(address), + } + accounts[string(address)] = account + } + account.PayloadCount++ + account.PayloadSize += uint64(size) + + // Update account format + if isAccountRegister(key) { + if account.Format == accountFormatV1 { + log.Error().Msgf("found account register while domain register exists for %x", address) + } else { + account.Format = accountFormatV2 + } + } else if isDomainRegister(key) { + if account.Format == accountFormatV2 { + log.Error().Msgf("found domain register while account register exists for %x", address) + } else { + account.Format = accountFormatV1 + } + } + } + + // At this point, all payload are processed. + + ledgerStats := <-ledgerStatsChannel + + var wg sync.WaitGroup + wg.Add(2) + + // Collect and write ledger stats + go func() { + defer wg.Done() + + statsByTypes := getRegisterStats(valueSizesByType) + + // Sort top N largest payloads by payload size in descending order + slices.SortFunc(largestPayloads.Tree, func(a, b PayloadInfo) int { + return cmp.Compare(b.Size, a.Size) + }) + + stats := &LedgerStats{ + LedgerStats: ledgerStats, + PayloadStats: &PayloadStats{ + TotalPayloadCount: totalPayloadCount, + TotalPayloadSize: totalPayloadSize, + TotalPayloadValueSize: totalPayloadValueSize, + StatsByTypes: statsByTypes, + TopN: largestPayloads.Tree, + }, + } + + writeStats(ledgerStatsReportName, stats) + }() + + // Collect and write account stats + go func() { + defer wg.Done() + + acctStats := getAccountStatus(chainID, accounts) + + writeStats(accountStatsReportName, acctStats) + }() + + wg.Wait() +} + +func getPayloadsAsync( + payloadChannel chan<- *ledger.Payload, + ledgerStatsChannel chan<- *complete.LedgerStats, +) { + defer close(payloadChannel) + defer close(ledgerStatsChannel) + + payloadCallback := func(payload *ledger.Payload) { + payloadChannel <- payload + } + + useCheckpointFile := flagPayloads == "" + + if useCheckpointFile { + ledgerStatsChannel <- getPayloadStatsFromCheckpoint(payloadCallback) + } else { + getPayloadStatsFromPayloadFile(payloadCallback) + } +} + +func getPayloadStatsFromPayloadFile(payloadCallBack func(payload *ledger.Payload)) { + memAllocBefore := debug.GetHeapAllocsBytes() + log.Info().Msgf("loading payloads from %v", flagPayloads) + + _, payloads, err := util.ReadPayloadFile(log.Logger, flagPayloads) + if err != nil { + log.Fatal().Err(err).Msg("failed to read payloads") + } + + memAllocAfter := debug.GetHeapAllocsBytes() + log.Info().Msgf("%d payloads are loaded, mem usage: %d", len(payloads), memAllocAfter-memAllocBefore) + + for _, p := range payloads { + payloadCallBack(p) + } +} + +func getPayloadStatsFromCheckpoint(payloadCallBack func(payload *ledger.Payload)) *complete.LedgerStats { memAllocBefore := debug.GetHeapAllocsBytes() log.Info().Msgf("loading checkpoint(s) from %v", flagCheckpointDir) @@ -93,7 +323,7 @@ func run(*cobra.Command, []string) { if err != nil { log.Fatal().Err(err).Msg("cannot create ledger from write-a-head logs and checkpoints") } - compactor, err := complete.NewCompactor(led, diskWal, zerolog.Nop(), complete.DefaultCacheSize, math.MaxInt, 1, atomic.NewBool(false)) + compactor, err := complete.NewCompactor(led, diskWal, zerolog.Nop(), complete.DefaultCacheSize, math.MaxInt, 1, atomic.NewBool(false), &metrics.NoopCollector{}) if err != nil { log.Fatal().Err(err).Msg("cannot create compactor") } @@ -106,142 +336,170 @@ func run(*cobra.Command, []string) { memAllocAfter := debug.GetHeapAllocsBytes() log.Info().Msgf("the checkpoint is loaded, mem usage: %d", memAllocAfter-memAllocBefore) - var totalPayloadSize, totalPayloadValueSize uint64 - var value ledger.Value - var key ledger.Key - var size, valueSize int - - valueSizesByType := make(sizesByType, 0) - ledgerStats, err := led.CollectStats(func(p *ledger.Payload) { - key, err = p.Key() - if err != nil { - log.Fatal().Err(err).Msg("cannot load a key") - } - - size = p.Size() - value = p.Value() - valueSize = value.Size() - totalPayloadSize += uint64(size) - totalPayloadValueSize += uint64(valueSize) - valueSizesByType[getType(key)] = append(valueSizesByType[getType(key)], float64(valueSize)) - }) + var tries []*trie.MTrie - statsByTypes := make([]RegisterStatsByTypes, 0) - for t, values := range valueSizesByType { + if flagStateCommitment != "" { + stateCommitment := util.ParseStateCommitment(flagStateCommitment) - sum, err := stats.Sum(values) + t, err := led.FindTrieByStateCommit(stateCommitment) if err != nil { - log.Fatal().Err(err).Msg("cannot compute the sum of values") + log.Fatal().Err(err).Msgf("failed to find trie with state commitment %x", stateCommitment) } - - min, err := stats.Min(values) - if err != nil { - log.Fatal().Err(err).Msg("cannot compute the min of values") + if t == nil { + log.Fatal().Msgf("no trie with state commitment %x", stateCommitment) } - percentile25, err := stats.Percentile(values, 25) + tries = append(tries, t) + } else { + ts, err := led.Tries() if err != nil { - log.Fatal().Err(err).Msg("cannot compute the 25th percentile of values") + log.Fatal().Err(err).Msg("failed to get tries") } - median, err := stats.Median(values) - if err != nil { - log.Fatal().Err(err).Msg("cannot compute the median of values") - } + tries = append(tries, ts...) + } - percentile75, err := stats.Percentile(values, 75) - if err != nil { - log.Fatal().Err(err).Msg("cannot compute the 75th percentile of values") - } + log.Info().Msgf("collecting stats on %d tries", len(tries)) - percentile95, err := stats.Percentile(values, 95) - if err != nil { - log.Fatal().Err(err).Msg("cannot compute the 95th percentile of values") - } + ledgerStats, err := complete.CollectStats(tries, payloadCallBack) + if err != nil { + log.Fatal().Err(err).Msg("failed to collect stats") + } - max, err := stats.Max(values) - if err != nil { - log.Fatal().Err(err).Msg("cannot compute the max of values") + return ledgerStats +} + +func getRegisterStats(valueSizesByType sizesByType) []RegisterStatsByTypes { + domainStats := make([]RegisterStatsByTypes, 0, len(common.AllStorageDomains)) + var allDomainSizes []float64 + + statsByTypes := make([]RegisterStatsByTypes, 0, len(valueSizesByType)) + for t, values := range valueSizesByType { + + stats := RegisterStatsByTypes{ + Type: t, + stats: getValueStats(values, percentiles), } - statsByTypes = append(statsByTypes, - RegisterStatsByTypes{ - Type: t, - Counts: uint64(len(values)), - ValueSizeTotal: sum, - ValueSizeMin: min, - ValueSize25thPercentile: percentile25, - ValueSizeMedian: median, - ValueSize75thPercentile: percentile75, - ValueSize95thPercentile: percentile95, - ValueSizeMax: max, - }) + if isDomainType(t) { + domainStats = append(domainStats, stats) + allDomainSizes = append(allDomainSizes, values...) + } else { + statsByTypes = append(statsByTypes, stats) + } } - if err != nil { - log.Fatal().Err(err).Msg("failed to collect stats") + allDomainStats := RegisterStatsByTypes{ + Type: "domain", + stats: getValueStats(allDomainSizes, percentiles), + SubTypes: domainStats, } - stats := &Stats{ - LedgerStats: ledgerStats, - PayloadStats: &PayloadStats{ - TotalPayloadSize: totalPayloadSize, - TotalPayloadValueSize: totalPayloadValueSize, - StatsByTypes: statsByTypes, - }, - } + statsByTypes = append(statsByTypes, allDomainStats) - path := filepath.Join(flagOutputDir, "ledger.stats.json") + // Sort domain stats by payload count in descending order + slices.SortFunc(allDomainStats.SubTypes, func(a, b RegisterStatsByTypes) int { + return cmp.Compare(b.Count, a.Count) + }) - fi, err := os.Create(path) - if err != nil { - log.Fatal().Err(err).Msg("failed to create path") - } - defer fi.Close() + // Sort stats by payload count in descending order + slices.SortFunc(statsByTypes, func(a, b RegisterStatsByTypes) int { + return cmp.Compare(b.Count, a.Count) + }) + + return statsByTypes +} - writer := bufio.NewWriter(fi) - defer writer.Flush() +func writeStats(reportName string, stats interface{}) { + rw := reporters.NewReportFileWriterFactory(flagOutputDir, log.Logger). + ReportWriter(reportName) + defer rw.Close() - encoder := json.NewEncoder(writer) + rw.Write(stats) +} - err = encoder.Encode(stats) - if err != nil { - log.Fatal().Err(err).Msg("could not json encode ledger stats") +func isDomainType(typ string) bool { + return strings.HasPrefix(typ, domainTypePrefix) +} + +func isDomainRegister(key ledger.Key) bool { + k := key.KeyParts[1].Value + kstr := string(k) + for _, storageDomain := range common.AllStorageDomains { + if storageDomain.Identifier() == kstr { + return true + } } + return false } -func getType(key ledger.Key) string { +func isAccountRegister(key ledger.Key) bool { + k := key.KeyParts[1].Value + kstr := string(k) + return kstr == AccountStorageKey +} + +func getRegisterType(key ledger.Key) string { k := key.KeyParts[1].Value kstr := string(k) if atree.LedgerKeyIsSlabKey(kstr) { - return "slab" + return "atree slab" + } + + _, isDomain := common.AllStorageDomainsByIdentifier[kstr] + if isDomain { + return domainTypePrefix + kstr } switch kstr { - case "storage": - return "account's cadence storage domain map" - case "private": - return "account's cadence private domain map" - case "public": - return "account's cadence public domain map" - case "contract": - return "account's cadence contract domain map" + case AccountStorageKey: + return "account" case flow.ContractNamesKey: return "contract names" case flow.AccountStatusKey: return "account status" - case "uuid": - return "uuid generator state" - case "account_address_state": + case flow.AddressStateKey: return "address generator state" + case state.AccountsStorageIDKey: + return "account storage ID" + case state.CodesStorageIDKey: + return "code storage ID" + case handler.BlockStoreLatestBlockKey: + return "latest block" + case handler.BlockStoreLatestBlockProposalKey: + return "latest block proposal" } + // other fvm registers + if kstr == "uuid" || strings.HasPrefix(kstr, "uuid_") { + return "uuid generator state" + } if strings.HasPrefix(kstr, "public_key_") { - return "public key" + return "legacy public key" + } + if kstr == "apk_0" { + return "account public key 0" + } + if strings.HasPrefix(kstr, flow.BatchPublicKeyRegisterKeyPrefix) { + return "batch public key" + } + if strings.HasPrefix(kstr, flow.SequenceNumberRegisterKeyPrefix) { + return "sequence number" } if strings.HasPrefix(kstr, flow.CodeKeyPrefix) { return "contract content" } + + // other evm registers + if strings.HasPrefix(kstr, blockHashListBucketKeyPrefix) { + return "block hash list bucket" + } + if strings.HasPrefix(kstr, blockHashListMetaKey) { + return "block hash list meta" + } + + log.Warn().Msgf("unknown payload key: %s", kstr) + return "others" } diff --git a/cmd/util/cmd/checkpoint-collect-stats/stats_utils.go b/cmd/util/cmd/checkpoint-collect-stats/stats_utils.go new file mode 100644 index 00000000000..a45bea98219 --- /dev/null +++ b/cmd/util/cmd/checkpoint-collect-stats/stats_utils.go @@ -0,0 +1,58 @@ +package checkpoint_collect_stats + +import ( + "cmp" + "slices" + + statslib "github.com/montanaflynn/stats" + "github.com/rs/zerolog/log" +) + +type percentileValue struct { + Percentile float64 `json:"percentile"` + Value float64 `json:"value"` +} + +type stats struct { + Count uint64 `json:"count"` + Sum float64 `json:"sum"` + Min float64 `json:"min"` + Percentiles []percentileValue + Max float64 `json:"max"` +} + +func getValueStats(values []float64, percentiles []float64) stats { + if len(values) == 0 { + return stats{} + } + + describe, err := statslib.Describe(values, true, &percentiles) + if err != nil { + log.Fatal().Err(err).Msg("cannot describe values") + } + + sum, err := statslib.Sum(values) + if err != nil { + log.Fatal().Err(err).Msg("cannot compute sum of values") + } + + percentileValues := make([]percentileValue, len(describe.DescriptionPercentiles)) + for i, pv := range describe.DescriptionPercentiles { + percentileValues[i] = percentileValue{ + Percentile: pv.Percentile, + Value: pv.Value, + } + } + + slices.SortFunc(percentileValues, func(a, b percentileValue) int { + return cmp.Compare(a.Percentile, b.Percentile) + }) + + return stats{ + Count: uint64(len(values)), + Sum: sum, + Min: describe.Min, + Max: describe.Max, + Percentiles: percentileValues, + } +} diff --git a/cmd/util/cmd/checkpoint-list-tries/cmd.go b/cmd/util/cmd/checkpoint-list-tries/cmd.go index 26e5ca01c8b..830075bc5c8 100644 --- a/cmd/util/cmd/checkpoint-list-tries/cmd.go +++ b/cmd/util/cmd/checkpoint-list-tries/cmd.go @@ -29,7 +29,7 @@ func init() { func run(*cobra.Command, []string) { log.Info().Msgf("loading checkpoint %v", flagCheckpoint) - tries, err := wal.LoadCheckpoint(flagCheckpoint, &log.Logger) + tries, err := wal.LoadCheckpoint(flagCheckpoint, log.Logger) if err != nil { log.Fatal().Err(err).Msg("error while loading checkpoint") } diff --git a/cmd/util/cmd/checkpoint-trie-stats/cmd.go b/cmd/util/cmd/checkpoint-trie-stats/cmd.go new file mode 100644 index 00000000000..327a4cf037b --- /dev/null +++ b/cmd/util/cmd/checkpoint-trie-stats/cmd.go @@ -0,0 +1,113 @@ +package checkpoint_trie_stats + +import ( + "errors" + "fmt" + + "github.com/rs/zerolog" + "github.com/rs/zerolog/log" + "github.com/spf13/cobra" + + "github.com/onflow/flow-go/ledger/complete/mtrie/node" + "github.com/onflow/flow-go/ledger/complete/mtrie/trie" + "github.com/onflow/flow-go/ledger/complete/wal" +) + +var ( + flagCheckpoint string + flagTrieIndex int +) + +var Cmd = &cobra.Command{ + Use: "checkpoint-trie-stats", + Short: "List the trie node count by types in a checkpoint, show total payload size", + Run: run, +} + +func init() { + + Cmd.Flags().StringVar(&flagCheckpoint, "checkpoint", "", + "checkpoint file to read") + _ = Cmd.MarkFlagRequired("checkpoint") + Cmd.Flags().IntVar(&flagTrieIndex, "trie-index", 0, "trie index to read, 0 being the first trie, -1 is the last trie") + +} + +func run(*cobra.Command, []string) { + + log.Info().Msgf("loading checkpoint %v, reading %v-th trie", flagCheckpoint, flagTrieIndex) + res, err := scanCheckpoint(flagCheckpoint, flagTrieIndex, log.Logger) + if err != nil { + log.Fatal().Err(err).Msg("fail to scan checkpoint") + } + log.Info(). + Str("TrieRootHash", res.trieRootHash). + Int("InterimNodeCount", res.interimNodeCount). + Int("LeafNodeCount", res.leafNodeCount). + Int("TotalPayloadSize", res.totalPayloadSize). + Msgf("successfully scanned checkpoint %v", flagCheckpoint) +} + +type result struct { + trieRootHash string + interimNodeCount int + leafNodeCount int + totalPayloadSize int +} + +func readTrie(tries []*trie.MTrie, index int) (*trie.MTrie, error) { + if len(tries) == 0 { + return nil, errors.New("No tries available") + } + + if index < -len(tries) || index >= len(tries) { + return nil, fmt.Errorf("index %d out of range", index) + } + + if index < 0 { + return tries[len(tries)+index], nil + } + + return tries[index], nil +} + +func scanCheckpoint(checkpoint string, trieIndex int, log zerolog.Logger) (result, error) { + tries, err := wal.LoadCheckpoint(flagCheckpoint, log) + if err != nil { + return result{}, fmt.Errorf("error while loading checkpoint: %w", err) + } + + log.Info(). + Int("total_tries", len(tries)). + Msg("checkpoint loaded") + + t, err := readTrie(tries, trieIndex) + if err != nil { + return result{}, fmt.Errorf("error while reading trie: %w", err) + } + + log.Info().Msgf("trie loaded, root hash: %v", t.RootHash()) + + res := &result{ + trieRootHash: t.RootHash().String(), + interimNodeCount: 0, + leafNodeCount: 0, + totalPayloadSize: 0, + } + processNode := func(n *node.Node) error { + if n.IsLeaf() { + res.leafNodeCount++ + res.totalPayloadSize += n.Payload().Size() + } else { + res.interimNodeCount++ + } + return nil + } + + err = trie.TraverseNodes(t, processNode) + if err != nil { + return result{}, fmt.Errorf("fail to traverse the trie: %w", err) + } + + return *res, nil +} diff --git a/cmd/util/cmd/common/clusters.go b/cmd/util/cmd/common/clusters.go new file mode 100644 index 00000000000..d5217dcb19d --- /dev/null +++ b/cmd/util/cmd/common/clusters.go @@ -0,0 +1,186 @@ +package common + +import ( + "encoding/hex" + "errors" + "fmt" + + "github.com/rs/zerolog" + + "github.com/onflow/cadence" + cdcCommon "github.com/onflow/cadence/common" + + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/model/flow/assignment" + "github.com/onflow/flow-go/model/flow/factory" + "github.com/onflow/flow-go/model/flow/filter" + "github.com/onflow/flow-go/module/signature" +) + +// ConstructClusterAssignment generates a partially randomized collector cluster assignment with internal and partner nodes. +// The number of nodes in each cluster is deterministic and only depends on the number of clusters +// and the number of nodes. The repartition of internal and partner nodes is also deterministic +// and only depends on the number of clusters and nodes. +// The identity of internal and partner nodes in each cluster is the non-deterministic and is randomized +// using the system entropy. +// The function guarantees a specific constraint when partitioning the nodes into clusters: +// Each cluster must contain strictly more than 2/3 of internal nodes. If the constraint can't be +// satisfied, an exception is returned. +// Note that if an exception is returned with a certain number of internal/partner nodes, there is no chance +// of succeeding the assignment by re-running the function without increasing the internal nodes ratio. +// Args: +// - log: the logger instance. +// - partnerNodes: identity list of partner nodes. +// - internalNodes: identity list of internal nodes. +// - numCollectionClusters: the number of clusters to generate +// Returns: +// - flow.AssignmentList: the generated assignment list. +// - flow.ClusterList: the generate collection cluster list. +// - error: if any error occurs. Any error returned from this function is irrecoverable. +func ConstructClusterAssignment(log zerolog.Logger, partnerNodes, internalNodes flow.IdentityList, numCollectionClusters int) (flow.AssignmentList, flow.ClusterList, error) { + + partnerCollectors := partnerNodes.Filter(filter.HasRole[flow.Identity](flow.RoleCollection)) + internalCollectors := internalNodes.Filter(filter.HasRole[flow.Identity](flow.RoleCollection)) + nCollectors := len(partnerCollectors) + len(internalCollectors) + + // ensure we have at least as many collection nodes as clusters + if nCollectors < int(numCollectionClusters) { + log.Fatal().Msgf("network bootstrap is configured with %d collection nodes, but %d clusters - must have at least one collection node per cluster", + nCollectors, numCollectionClusters) + } + + // shuffle both collector lists based on a non-deterministic algorithm + partnerCollectors, err := partnerCollectors.Shuffle() + if err != nil { + log.Fatal().Err(err).Msg("could not shuffle partners") + } + internalCollectors, err = internalCollectors.Shuffle() + if err != nil { + log.Fatal().Err(err).Msg("could not shuffle internals") + } + + // capture first reference weight to validate that all collectors have equal weight + refWeight := internalCollectors[0].InitialWeight + + identifierLists := make([]flow.IdentifierList, numCollectionClusters) + // array to track the 2/3 internal-nodes constraint (internal_nodes > 2 * partner_nodes) + constraint := make([]int, numCollectionClusters) + + // first, round-robin internal nodes into each cluster + for i, node := range internalCollectors { + if node.InitialWeight != refWeight { + return nil, nil, fmt.Errorf("current implementation requires all collectors (partner & interal nodes) to have equal weight") + } + clusterIndex := i % numCollectionClusters + identifierLists[clusterIndex] = append(identifierLists[clusterIndex], node.NodeID) + constraint[clusterIndex] += 1 + } + + // next, round-robin partner nodes into each cluster + for i, node := range partnerCollectors { + if node.InitialWeight != refWeight { + return nil, nil, fmt.Errorf("current implementation requires all collectors (partner & interal nodes) to have equal weight") + } + clusterIndex := i % numCollectionClusters + identifierLists[clusterIndex] = append(identifierLists[clusterIndex], node.NodeID) + constraint[clusterIndex] -= 2 + } + + // check the 2/3 constraint: for every cluster `i`, constraint[i] must be strictly positive + for i := 0; i < numCollectionClusters; i++ { + if constraint[i] <= 0 { + return nil, nil, errors.New("there isn't enough internal nodes to have at least 2/3 internal nodes in each cluster") + } + } + + assignments := assignment.FromIdentifierLists(identifierLists) + + collectors := append(partnerCollectors, internalCollectors...) + clusters, err := factory.NewClusterList(assignments, collectors.ToSkeleton()) + if err != nil { + log.Fatal().Err(err).Msg("could not create cluster list") + } + + return assignments, clusters, nil +} + +// ConvertClusterAssignmentsCdc converts golang cluster assignments type to Cadence type `[[String]]`. +func ConvertClusterAssignmentsCdc(assignments flow.AssignmentList) cadence.Array { + stringArrayType := cadence.NewVariableSizedArrayType(cadence.StringType) + + assignmentsCdc := make([]cadence.Value, len(assignments)) + for i, asmt := range assignments { + vals := make([]cadence.Value, asmt.Len()) + for j, nodeID := range asmt { + vals[j] = cadence.String(nodeID.String()) + } + assignmentsCdc[i] = cadence.NewArray(vals). + WithType(stringArrayType) + } + + return cadence.NewArray(assignmentsCdc). + WithType(cadence.NewVariableSizedArrayType(stringArrayType)) +} + +// ConvertClusterQcsCdc converts cluster QCs from `QuorumCertificate` type to `ClusterQCVoteData` type. +// Args: +// - qcs: list of quorum certificates. +// - clusterList: the list of cluster lists each used to generate one of the quorum certificates in qcs. +// - flowClusterQCAddress: the FlowClusterQC contract address where the ClusterQCVoteData type is defined. +// +// Returns: +// - []cadence.Value: cadence representation of the list of cluster qcs. +// - error: error if the cluster qcs and cluster lists don't match in size or +// signature indices decoding fails. +func ConvertClusterQcsCdc(qcs []*flow.QuorumCertificate, clusterList flow.ClusterList, flowClusterQCAddress string) ([]cadence.Value, error) { + voteDataType := newClusterQCVoteDataCdcType(flowClusterQCAddress) + qcVoteData := make([]cadence.Value, len(qcs)) + for i, qc := range qcs { + c, ok := clusterList.ByIndex(uint(i)) + if !ok { + return nil, fmt.Errorf("could not get cluster list for cluster index %v", i) + } + voterIds, err := signature.DecodeSignerIndicesToIdentifiers(c.NodeIDs(), qc.SignerIndices) + if err != nil { + return nil, fmt.Errorf("could not decode signer indices: %w", err) + } + cdcVoterIds := make([]cadence.Value, len(voterIds)) + for i, id := range voterIds { + cdcVoterIds[i] = cadence.String(id.String()) + } + + qcVoteData[i] = cadence.NewStruct([]cadence.Value{ + // aggregatedSignature + cadence.String(hex.EncodeToString(qc.SigData)), + // Node IDs of signers + cadence.NewArray(cdcVoterIds).WithType(cadence.NewVariableSizedArrayType(cadence.StringType)), + }).WithType(voteDataType) + + } + + return qcVoteData, nil +} + +// newClusterQCVoteDataCdcType returns the FlowClusterQC cadence struct type. +func newClusterQCVoteDataCdcType(clusterQcAddress string) *cadence.StructType { + + // FlowClusterQC.ClusterQCVoteData + address, _ := cdcCommon.HexToAddress(clusterQcAddress) + location := cdcCommon.NewAddressLocation(nil, address, "FlowClusterQC") + + return cadence.NewStructType( + location, + "FlowClusterQC.ClusterQCVoteData", + []cadence.Field{ + { + Identifier: "aggregatedSignature", + Type: cadence.StringType, + }, + { + Identifier: "voterIDs", + Type: cadence.NewVariableSizedArrayType(cadence.StringType), + }, + }, + nil, + ) +} diff --git a/cmd/util/cmd/common/flags.go b/cmd/util/cmd/common/flags.go new file mode 100644 index 00000000000..355051c1d6c --- /dev/null +++ b/cmd/util/cmd/common/flags.go @@ -0,0 +1,13 @@ +package common + +import ( + "github.com/spf13/cobra" +) + +const DefaultProtocolDBDir = "/var/flow/data/protocol" + +// InitDataDirFlag initializes the --datadir flag on the given command. +func InitDataDirFlag(cmd *cobra.Command, dataDirFlag *string) { + cmd.PersistentFlags().StringVarP(dataDirFlag, "datadir", "d", DefaultProtocolDBDir, + "directory to the protocol db dababase") +} diff --git a/cmd/util/cmd/common/node_info.go b/cmd/util/cmd/common/node_info.go new file mode 100644 index 00000000000..b5f1254cf87 --- /dev/null +++ b/cmd/util/cmd/common/node_info.go @@ -0,0 +1,247 @@ +package common + +import ( + "fmt" + "strings" + + "github.com/rs/zerolog" + + "github.com/onflow/flow-go/model/bootstrap" + "github.com/onflow/flow-go/model/flow" +) + +// ReadFullPartnerNodeInfos reads partner node info and partner weight information from the specified paths and constructs +// a list of full bootstrap.NodeInfo for each partner node. +// Args: +// - log: logger used to log debug information. +// - partnerWeightsPath: path to partner weights configuration file. +// - partnerNodeInfoDir: path to partner nodes configuration file. +// Returns: +// - []bootstrap.NodeInfo: the generated node info list. (public information, private keys not set) +// - error: if any error occurs. Any error returned from this function is irrecoverable. +func ReadFullPartnerNodeInfos(log zerolog.Logger, partnerWeightsPath, partnerNodeInfoDir string) ([]bootstrap.NodeInfo, error) { + partners, err := ReadPartnerNodeInfos(partnerNodeInfoDir) + if err != nil { + return nil, err + } + log.Info().Msgf("read %d partner node configuration files", len(partners)) + + weights, err := ReadPartnerWeights(partnerWeightsPath) + if err != nil { + return nil, err + } + log.Info().Msgf("read %d weights for partner nodes", len(weights)) + + var nodes []bootstrap.NodeInfo + for _, partner := range partners { + // validate every single partner node + err = ValidateNodeID(partner.NodeID) + if err != nil { + return nil, fmt.Errorf("invalid node ID: %s", partner.NodeID) + } + err = ValidateNetworkPubKey(partner.NetworkPubKey) + if err != nil { + return nil, fmt.Errorf("invalid network public key: %s", partner.NetworkPubKey) + } + err = ValidateStakingPubKey(partner.StakingPubKey) + if err != nil { + return nil, fmt.Errorf("invalid staking public key: %s", partner.StakingPubKey) + } + + weight := weights[partner.NodeID] + if valid := ValidateWeight(weight); !valid { + return nil, fmt.Errorf("invalid partner weight %v: %d", partner.NodeID, weight) + } + + if weight != flow.DefaultInitialWeight { + log.Warn().Msgf("partner node (id=%x) has non-default weight (%d != %d)", partner.NodeID, weight, flow.DefaultInitialWeight) + } + + node := bootstrap.NewPublicNodeInfo( + partner.NodeID, + partner.Role, + partner.Address, + weight, + partner.NetworkPubKey.PublicKey, + partner.StakingPubKey.PublicKey, + partner.StakingPoP.Signature, + ) + nodes = append(nodes, node) + } + + return nodes, nil +} + +// ReadPartnerWeights reads the partner weights configuration file and returns a list of PartnerWeights. +// Args: +// - partnerWeightsPath: path to partner weights configuration file. +// Returns: +// - PartnerWeights: map from NodeID → node's weight +// - error: if any error occurs. Any error returned from this function is irrecoverable. +func ReadPartnerWeights(partnerWeightsPath string) (PartnerWeights, error) { + var weights PartnerWeights + + err := ReadJSON(partnerWeightsPath, &weights) + if err != nil { + return nil, fmt.Errorf("failed to read partner weights json: %w", err) + } + return weights, nil +} + +// ReadPartnerNodeInfos reads the partner node info from the configuration file and returns a list of []bootstrap.NodeInfoPub. +// Args: +// - partnerNodeInfoDir: path to partner nodes configuration file. +// Returns: +// - []bootstrap.NodeInfoPub: the generated partner node info list. +// - error: if any error occurs. Any error returned from this function is irrecoverable. +func ReadPartnerNodeInfos(partnerNodeInfoDir string) ([]bootstrap.NodeInfoPub, error) { + var partners []bootstrap.NodeInfoPub + files, err := FilesInDir(partnerNodeInfoDir) + if err != nil { + return nil, fmt.Errorf("could not read partner node infos: %w", err) + } + for _, f := range files { + // skip files that do not include node-infos + if !strings.Contains(f, bootstrap.PathPartnerNodeInfoPrefix) { + continue + } + // read file and append to partners + var p bootstrap.NodeInfoPub + err = ReadJSON(f, &p) + if err != nil { + return nil, fmt.Errorf("failed to read node info: %w", err) + } + partners = append(partners, p) + } + return partners, nil +} + +// ReadFullInternalNodeInfos reads internal node info and internal node weight information from the specified paths and constructs +// a list of full bootstrap.NodeInfo for each internal node. +// Args: +// - log: logger used to log debug information. +// - internalNodePrivInfoDir: path to internal nodes private info. +// - internalWeightsConfig: path to internal weights configuration file. +// Returns: +// - []bootstrap.NodeInfo: the generated node info list. Caution: contains private keys! +// - error: if any error occurs. Any error returned from this function is irrecoverable. +func ReadFullInternalNodeInfos(log zerolog.Logger, internalNodePrivInfoDir, internalWeightsConfig string) ([]bootstrap.NodeInfo, error) { + privInternals, err := ReadInternalNodeInfos(internalNodePrivInfoDir) + if err != nil { + return nil, err + } + + log.Info().Msgf("read %v internal private node-info files", len(privInternals)) + + weights := internalWeightsByAddress(log, internalWeightsConfig) + log.Info().Msgf("read %d weights for internal nodes", len(weights)) + + var nodes []bootstrap.NodeInfo + for i, internal := range privInternals { + // check if address is valid format + ValidateAddressFormat(log, internal.Address) + + // validate every single internal node + err := ValidateNodeID(internal.NodeID) + if err != nil { + return nil, fmt.Errorf("invalid internal node ID: %s", internal.NodeID) + } + weight := weights[internal.Address] + + if valid := ValidateWeight(weight); !valid { + return nil, fmt.Errorf("invalid partner weight %v: %d", internal.NodeID, weight) + } + if weight != flow.DefaultInitialWeight { + log.Warn().Msgf("internal node (id=%x) has non-default weight (%d != %d)", internal.NodeID, weight, flow.DefaultInitialWeight) + } + + node, err := bootstrap.NewPrivateNodeInfo( + internal.NodeID, + internal.Role, + internal.Address, + weight, + internal.NetworkPrivKey.PrivateKey, + internal.StakingPrivKey.PrivateKey, + ) + if err != nil { + return nil, fmt.Errorf("failed to build private node info at index %d: %w", i, err) + } + + nodes = append(nodes, node) + } + + return nodes, nil +} + +// ReadInternalNodeInfos reads our internal node private infos generated by `keygen` command and returns it. +// Args: +// - internalNodePrivInfoDir: path to internal nodes private info. +// Returns: +// - []bootstrap.NodeInfo: the generated private node info list. Caution: contains private keys! +// - error: if any error occurs. Any error returned from this function is irrecoverable. +func ReadInternalNodeInfos(internalNodePrivInfoDir string) ([]bootstrap.NodeInfoPriv, error) { + var internalPrivInfos []bootstrap.NodeInfoPriv + + // get files in internal priv node infos directory + files, err := FilesInDir(internalNodePrivInfoDir) + if err != nil { + return nil, fmt.Errorf("could not read partner node infos: %w", err) + } + + // for each of the files + for _, f := range files { + // skip files that do not include node-infos + if !strings.Contains(f, bootstrap.PathPrivNodeInfoPrefix) { + continue + } + + // read file and append to partners + var p bootstrap.NodeInfoPriv + err = ReadJSON(f, &p) + if err != nil { + return nil, fmt.Errorf("failed to read json: %w", err) + } + internalPrivInfos = append(internalPrivInfos, p) + } + + return internalPrivInfos, nil +} + +// internalWeightsByAddress returns a mapping of node address by weight for internal nodes +func internalWeightsByAddress(log zerolog.Logger, config string) map[string]uint64 { + // read json + var configs []bootstrap.NodeConfig + err := ReadJSON(config, &configs) + if err != nil { + log.Fatal().Err(err).Msg("failed to read json") + } + log.Info().Interface("config", configs).Msgf("read internal node configurations") + + weights := make(map[string]uint64) + for _, config := range configs { + if _, ok := weights[config.Address]; !ok { + weights[config.Address] = config.Weight + } else { + log.Error().Msgf("duplicate internal node address %s", config.Address) + } + } + + return weights +} + +// FilterInternalPartners returns the `partners`, dropping any entries that are also in `internal` +// Formally, this function implements the set difference `partners \ internal`. +func FilterInternalPartners(partners []bootstrap.NodeInfo, internal []bootstrap.NodeInfo) []bootstrap.NodeInfo { + lookup := make(map[flow.Identifier]struct{}) + for _, node := range internal { + lookup[node.NodeID] = struct{}{} + } + + var filtered []bootstrap.NodeInfo + for _, node := range partners { + if _, ok := lookup[node.NodeID]; !ok { + filtered = append(filtered, node) + } + } + return filtered +} diff --git a/cmd/util/cmd/common/snapshot.go b/cmd/util/cmd/common/snapshot.go new file mode 100644 index 00000000000..db9d0ed245a --- /dev/null +++ b/cmd/util/cmd/common/snapshot.go @@ -0,0 +1,142 @@ +package common + +import ( + "context" + "encoding/json" + "fmt" + "time" + + "github.com/rs/zerolog" + "github.com/sethvargo/go-retry" + + "github.com/onflow/flow-go-sdk/access/grpc" + + "github.com/onflow/flow-go/utils/logging" + + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/state/protocol" + "github.com/onflow/flow-go/state/protocol/inmem" +) + +const getSnapshotTimeout = 30 * time.Second + +// GetProtocolSnapshot callback that will get latest finalized protocol snapshot +type GetProtocolSnapshot func(ctx context.Context) (protocol.Snapshot, error) + +// GetSnapshot will attempt to get the latest finalized protocol snapshot with the given flow configs +func GetSnapshot(ctx context.Context, client *grpc.Client) (*inmem.Snapshot, error) { + ctx, cancel := context.WithTimeout(ctx, getSnapshotTimeout) + defer cancel() + + b, err := client.GetLatestProtocolStateSnapshot(ctx) + if err != nil { + return nil, fmt.Errorf("failed to get latest finalized protocol state snapshot during pre-initialization: %w", err) + } + + var snapshotEnc inmem.EncodableSnapshot + err = json.Unmarshal(b, &snapshotEnc) + if err != nil { + return nil, fmt.Errorf("failed to unmarshal protocol state snapshot: %w", err) + } + + snapshot := inmem.SnapshotFromEncodable(snapshotEnc) + return snapshot, nil +} + +// GetSnapshotAtEpochAndPhase will get the latest finalized protocol snapshot and check the current epoch and epoch phase. +// If we are past the target epoch and epoch phase we exit the retry mechanism immediately. +// If not check the snapshot at the specified interval until we reach the target epoch and phase. +// Args: +// - ctx: context used when getting the snapshot from the network. +// - log: the logger +// - startupEpoch: the desired epoch in which to take a snapshot for startup. +// - startupEpochPhase: the desired epoch phase in which to take a snapshot for startup. +// - retryInterval: sleep interval used to retry getting the snapshot from the network in our desired epoch and epoch phase. +// - getSnapshot: func used to get the snapshot. +// Returns: +// - protocol.Snapshot: the protocol snapshot. +// - error: if any error occurs. Any error returned from this function is irrecoverable. +func GetSnapshotAtEpochAndPhase(ctx context.Context, log zerolog.Logger, startupEpoch uint64, startupEpochPhase flow.EpochPhase, retryInterval time.Duration, getSnapshot GetProtocolSnapshot) (protocol.Snapshot, error) { + start := time.Now() + + log = log.With(). + Uint64("target_epoch_counter", startupEpoch). + Str("target_epoch_phase", startupEpochPhase.String()). + Logger() + + log.Info().Msg("starting dynamic startup - waiting until target epoch/phase to start...") + + var snapshot protocol.Snapshot + var err error + + backoff := retry.NewConstant(retryInterval) + err = retry.Do(ctx, backoff, func(ctx context.Context) error { + snapshot, err = getSnapshot(ctx) + if err != nil { + err = fmt.Errorf("failed to get protocol snapshot: %w", err) + log.Error().Err(err).Msg("could not get protocol snapshot") + return retry.RetryableError(err) + } + + // if we encounter any errors interpreting the snapshot something went wrong stop retrying + currEpoch, err := snapshot.Epochs().Current() + if err != nil { + return fmt.Errorf("failed to get current epoch: %w", err) + } + currEpochCounter := currEpoch.Counter() + + currEpochPhase, err := snapshot.EpochPhase() + if err != nil { + return fmt.Errorf("failed to get the current epoch phase: %w", err) + } + + if shouldStartAtEpochPhase(currEpochCounter, startupEpoch, currEpochPhase, startupEpochPhase) { + head, err := snapshot.Head() + if err != nil { + return fmt.Errorf("could not get Dynamic Startup snapshot header: %w", err) + } + log.Info(). + Dur("time_waiting", time.Since(start)). + Uint64("current_epoch", currEpochCounter). + Str("current_epoch_phase", currEpochPhase.String()). + Hex("finalized_root_block_id", logging.ID(head.ID())). + Uint64("finalized_block_height", head.Height). + Msg("finished dynamic startup - reached desired epoch and phase") + + return nil + } + + // wait then poll for latest snapshot again + log.Info(). + Dur("time-waiting", time.Since(start)). + Uint64("current-epoch", currEpochCounter). + Str("current-epoch-phase", currEpochPhase.String()). + Msgf("waiting for epoch %d and phase %s", startupEpoch, startupEpochPhase.String()) + + return retry.RetryableError(fmt.Errorf("dynamic startup epoch and epoch phase not reached")) + }) + if err != nil { + return nil, fmt.Errorf("failed to wait for target epoch and phase: %w", err) + } + + return snapshot, nil +} + +// shouldStartAtEpochPhase determines whether Dynamic Startup should start up the node, based on a +// target epoch/phase and a current epoch/phase. +func shouldStartAtEpochPhase(currentEpoch, targetEpoch uint64, currentPhase, targetPhase flow.EpochPhase) bool { + // if the current epoch is after the target epoch, start up regardless of phase + if currentEpoch > targetEpoch { + return true + } + // if the current epoch is before the target epoch, do not start up regardless of phase + if currentEpoch < targetEpoch { + return false + } + // if the target phase is EpochPhaseFallback, only start up if the current phase exactly matches + if targetPhase == flow.EpochPhaseFallback { + return currentPhase == flow.EpochPhaseFallback + } + // for any other target phase, start up if current phase is >= target + return currentPhase >= targetPhase +} diff --git a/cmd/util/cmd/common/state.go b/cmd/util/cmd/common/state.go index 16d5295a729..859c4834666 100644 --- a/cmd/util/cmd/common/state.go +++ b/cmd/util/cmd/common/state.go @@ -3,33 +3,36 @@ package common import ( "fmt" - "github.com/dgraph-io/badger/v2" + "github.com/jordanschalm/lockctx" "github.com/onflow/flow-go/module/metrics" "github.com/onflow/flow-go/state/protocol" protocolbadger "github.com/onflow/flow-go/state/protocol/badger" "github.com/onflow/flow-go/storage" + "github.com/onflow/flow-go/storage/store" ) -func InitProtocolState(db *badger.DB, storages *storage.All) (protocol.State, error) { +func OpenProtocolState(lockManager lockctx.Manager, db storage.DB, storages *store.All) (protocol.State, error) { metrics := &metrics.NoopCollector{} protocolState, err := protocolbadger.OpenState( metrics, db, + lockManager, storages.Headers, storages.Seals, storages.Results, storages.Blocks, storages.QuorumCertificates, - storages.Setups, + storages.EpochSetups, storages.EpochCommits, - storages.Statuses, + storages.EpochProtocolStateEntries, + storages.ProtocolKVStore, storages.VersionBeacons, ) if err != nil { - return nil, fmt.Errorf("could not init protocol state: %w", err) + return nil, fmt.Errorf("could not open protocol state: %w", err) } return protocolState, nil diff --git a/cmd/util/cmd/common/storage.go b/cmd/util/cmd/common/storage.go index 0fa58a9cf28..13cd2170f98 100644 --- a/cmd/util/cmd/common/storage.go +++ b/cmd/util/cmd/common/storage.go @@ -1,46 +1,92 @@ package common import ( - "github.com/dgraph-io/badger/v2" + "fmt" + "github.com/rs/zerolog/log" "github.com/onflow/flow-go/module/metrics" "github.com/onflow/flow-go/storage" storagebadger "github.com/onflow/flow-go/storage/badger" - "github.com/onflow/flow-go/storage/badger/operation" + "github.com/onflow/flow-go/storage/operation/pebbleimpl" + pebblestorage "github.com/onflow/flow-go/storage/pebble" + "github.com/onflow/flow-go/storage/store" ) -func InitStorage(datadir string) *badger.DB { - return InitStorageWithTruncate(datadir, false) -} - -func InitStorageWithTruncate(datadir string, truncate bool) *badger.DB { - opts := badger. - DefaultOptions(datadir). - WithKeepL0InMemory(true). - WithLogger(nil). - WithTruncate(truncate) +func InitStorage(datadir string) (storage.DB, error) { + ok, err := IsBadgerFolder(datadir) + if err != nil { + return nil, err + } + if ok { + return nil, fmt.Errorf("badger db is no longer supported for protocol data, datadir: %v", datadir) + } - db, err := badger.Open(opts) + ok, err = IsPebbleFolder(datadir) if err != nil { - log.Fatal().Err(err).Msg("could not open key-value store") + return nil, err + } + if !ok { + return nil, fmt.Errorf("could not determine storage type (not badger, nor pebble) for directory %s", datadir) } - // in order to void long iterations with big keys when initializing with an - // already populated database, we bootstrap the initial maximum key size - // upon starting - err = operation.RetryOnConflict(db.Update, func(tx *badger.Txn) error { - return operation.InitMax(tx) - }) + db, err := pebblestorage.ShouldOpenDefaultPebbleDB(log.Logger, datadir) if err != nil { - log.Fatal().Err(err).Msg("could not initialize max tracker") + return nil, fmt.Errorf("could not open pebble db at %s: %w", datadir, err) } + log.Info().Msgf("using pebble db at %s", datadir) + return pebbleimpl.ToDB(db), nil + +} - return db +// IsBadgerFolder checks if the given directory is a badger folder. +// It returns error if the folder is empty or not exists. +// it returns error if the folder is not empty, but misses some required badger files. +func IsBadgerFolder(dataDir string) (bool, error) { + return storagebadger.IsBadgerFolder(dataDir) } -func InitStorages(db *badger.DB) *storage.All { +// IsPebbleFolder checks if the given directory is a pebble folder. +// It returns error if the folder is empty or not exists. +// it returns error if the folder is not empty, but misses some required pebble files. +func IsPebbleFolder(dataDir string) (bool, error) { + return pebblestorage.IsPebbleFolder(dataDir) +} + +func InitStorages(db storage.DB) *store.All { metrics := &metrics.NoopCollector{} + return store.InitAll(metrics, db) +} + +// WithStorage runs the given function with the storage depending on the flags. +func WithStorage(datadir string, f func(storage.DB) error) error { + log.Info().Msgf("opening protocol db at datadir: %v", datadir) + + ok, err := IsPebbleFolder(datadir) + if err != nil { + return fmt.Errorf("fail to check if folder stores pebble data: %w", err) + } + + if !ok { + ok, err := IsBadgerFolder(datadir) + if err != nil { + return fmt.Errorf("fail to check if folder stores badger data: %w", err) + } + if ok { + return fmt.Errorf("badger db is no longer supported for protocol data, datadir: %v", datadir) + } + + return fmt.Errorf("the given datadir does not contain a valid pebble db: %v", datadir) + } + + db, err := pebblestorage.ShouldOpenDefaultPebbleDB(log.Logger, datadir) + if err != nil { + return fmt.Errorf("can not open pebble db at %v: %w", datadir, err) + } + + defer db.Close() + + log.Info().Msgf("opened pebble protocol database") - return storagebadger.InitAll(metrics, db) + return f(pebbleimpl.ToDB(db)) } diff --git a/cmd/util/cmd/common/transactions.go b/cmd/util/cmd/common/transactions.go index 15f6c3746fb..e765e01d326 100644 --- a/cmd/util/cmd/common/transactions.go +++ b/cmd/util/cmd/common/transactions.go @@ -3,14 +3,16 @@ package common import ( "fmt" + "github.com/onflow/flow-core-contracts/lib/go/templates" + "github.com/onflow/flow-go/fvm/systemcontracts" "github.com/onflow/flow-go/model/flow" ) const ( getInfoForProposedNodesScript = ` - import FlowIDTableStaking from 0x%s - pub fun main(): [FlowIDTableStaking.NodeInfo] { + import "FlowIDTableStaking" + access(all) fun main(): [FlowIDTableStaking.NodeInfo] { let nodeIDs = FlowIDTableStaking.getProposedNodeIDs() var infos: [FlowIDTableStaking.NodeInfo] = [] @@ -26,11 +28,12 @@ const ( // GetNodeInfoForProposedNodesScript returns a script that will return an array of FlowIDTableStaking.NodeInfo for each // node in the proposed table. func GetNodeInfoForProposedNodesScript(network string) ([]byte, error) { - contracts, err := systemcontracts.SystemContractsForChain(flow.ChainID(fmt.Sprintf("flow-%s", network))) - if err != nil { - return nil, fmt.Errorf("failed to get system contracts for network (%s): %w", network, err) - } + contracts := systemcontracts.SystemContractsForChain(flow.ChainID(fmt.Sprintf("flow-%s", network))) - //NOTE: The FlowIDTableStaking contract is deployed to the same account as the Epoch contract - return []byte(fmt.Sprintf(getInfoForProposedNodesScript, contracts.Epoch.Address)), nil + return []byte( + templates.ReplaceAddresses( + getInfoForProposedNodesScript, + contracts.AsTemplateEnv(), + ), + ), nil } diff --git a/cmd/util/cmd/common/utils.go b/cmd/util/cmd/common/utils.go new file mode 100644 index 00000000000..f5b9570071e --- /dev/null +++ b/cmd/util/cmd/common/utils.go @@ -0,0 +1,180 @@ +package common + +import ( + "encoding/json" + "fmt" + "net" + "os" + "path/filepath" + "strconv" + + "github.com/multiformats/go-multiaddr" + "github.com/rs/zerolog" + + "github.com/onflow/crypto" + + "github.com/onflow/flow-go/model/bootstrap" + "github.com/onflow/flow-go/model/encodable" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/network/p2p/utils" + "github.com/onflow/flow-go/utils/io" +) + +func FilesInDir(dir string) ([]string, error) { + exists, err := PathExists(dir) + if err != nil { + return nil, fmt.Errorf("could not check if dir exists: %w", err) + } + + if !exists { + return nil, fmt.Errorf("dir %v does not exist", dir) + } + + var files []string + err = filepath.Walk(dir, func(path string, info os.FileInfo, err error) error { + if !info.IsDir() { + files = append(files, path) + } + return nil + }) + return files, err +} + +// PathExists +func PathExists(path string) (bool, error) { + _, err := os.Stat(path) + if err == nil { + return true, nil + } + if os.IsNotExist(err) { + return false, nil + } + return false, err +} + +func ReadJSON(path string, target interface{}) error { + dat, err := io.ReadFile(path) + if err != nil { + return fmt.Errorf("cannot read json: %w", err) + } + err = json.Unmarshal(dat, target) + if err != nil { + return fmt.Errorf("cannot unmarshal json in file %s: %w", path, err) + } + return nil +} + +func WriteJSON(path string, out string, data interface{}) error { + bz, err := json.MarshalIndent(data, "", " ") + if err != nil { + return fmt.Errorf("cannot marshal json: %w", err) + } + + return WriteText(path, out, bz) +} + +func WriteText(path string, out string, data []byte) error { + path = filepath.Join(out, path) + + err := os.MkdirAll(filepath.Dir(path), 0755) + if err != nil { + return fmt.Errorf("could not create output dir: %w", err) + } + + err = os.WriteFile(path, data, 0644) + if err != nil { + return fmt.Errorf("could not write file: %w", err) + } + return nil +} + +func PubKeyToString(key crypto.PublicKey) string { + return fmt.Sprintf("%x", key.Encode()) +} + +func NodeCountByRole(nodes []bootstrap.NodeInfo) map[flow.Role]uint16 { + roleCounts := map[flow.Role]uint16{ + flow.RoleCollection: 0, + flow.RoleConsensus: 0, + flow.RoleExecution: 0, + flow.RoleVerification: 0, + flow.RoleAccess: 0, + } + for _, node := range nodes { + roleCounts[node.Role] = roleCounts[node.Role] + 1 + } + + return roleCounts +} + +// ValidateAddressFormat validates the address provided by pretty much doing what the network layer would do before +// starting the node +func ValidateAddressFormat(log zerolog.Logger, address string) { + checkErr := func(err error) { + if err != nil { + log.Fatal().Err(err).Str("address", address).Msg("invalid address format.\n" + + `Address needs to be in the format hostname:port or ip:port e.g. "flow.com:3569"`) + } + } + + // split address into ip/hostname and port + ip, port, err := net.SplitHostPort(address) + checkErr(err) + + // check that port number is indeed a number + _, err = strconv.Atoi(port) + checkErr(err) + + // create a libp2p address from the ip and port + lp2pAddr := utils.MultiAddressStr(ip, port) + _, err = multiaddr.NewMultiaddr(lp2pAddr) + checkErr(err) +} + +// ValidateNodeID returns an error if node ID is non-zero. +// Args: +// - nodeID: the node ID to validate. +// Returns: +// - error: if node id is the zero value. +func ValidateNodeID(nodeID flow.Identifier) error { + if nodeID == flow.ZeroID { + return fmt.Errorf("NodeID must not be zero") + } + return nil +} + +// ValidateNetworkPubKey returns an error if network public key is nil. +// Args: +// - key: the public key. +// Returns: +// - error: if the network key is nil. +func ValidateNetworkPubKey(key encodable.NetworkPubKey) error { + if key.PublicKey == nil { + return fmt.Errorf("network public key must not be nil") + } + return nil +} + +// ValidateStakingPubKey returns an error if the staking key is nil. +// Args: +// - key: the public key. +// Returns: +// - error: if the staking key is nil. +func ValidateStakingPubKey(key encodable.StakingPubKey) error { + if key.PublicKey == nil { + return fmt.Errorf("staking public key must not be nil") + } + return nil +} + +// ValidateWeight returns true if weight is greater than 0. +// Args: +// - weight: the weight to check. +// Returns: +// - bool: true if weight is greater than 0. +func ValidateWeight(weight uint64) bool { + return weight > 0 +} + +// PartnerWeights is the format of the JSON file specifying partner node weights. +type PartnerWeights map[flow.Identifier]uint64 diff --git a/cmd/util/cmd/common/validation.go b/cmd/util/cmd/common/validation.go index 6e974b7d1d1..8e86a34be80 100644 --- a/cmd/util/cmd/common/validation.go +++ b/cmd/util/cmd/common/validation.go @@ -4,6 +4,7 @@ import ( "fmt" "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module/grpcclient" "github.com/onflow/flow-go/state/protocol" ) @@ -31,7 +32,7 @@ func validateFlags(accessNodeIDS []string, snapshot protocol.Snapshot) ([]flow.I } if accessNodeIDS[0] == "*" { - anIDS, err := DefaultAccessNodeIDS(snapshot) + anIDS, err := grpcclient.DefaultAccessNodeIDS(snapshot) if err != nil { return nil, fmt.Errorf("failed to get default access node ids %w", err) } @@ -59,7 +60,7 @@ func validateFlagsMainNet(accessNodeIDS []string) ([]flow.Identifier, error) { // convertIDS converts a list of access node id hex strings to flow.Identifier func convertIDS(accessNodeIDS []string) ([]flow.Identifier, error) { - anIDS, err := FlowIDFromHexString(accessNodeIDS...) + anIDS, err := grpcclient.FlowIDFromHexString(accessNodeIDS...) if err != nil { return nil, fmt.Errorf("failed to convert access node ID(s) into flow identifier(s) %w", err) } diff --git a/cmd/util/cmd/db-migration/cmd.go b/cmd/util/cmd/db-migration/cmd.go new file mode 100644 index 00000000000..a96d74cc980 --- /dev/null +++ b/cmd/util/cmd/db-migration/cmd.go @@ -0,0 +1,93 @@ +package db + +import ( + "fmt" + "time" + + "github.com/docker/go-units" + "github.com/rs/zerolog/log" + "github.com/spf13/cobra" + + "github.com/onflow/flow-go/storage/migration" +) + +var ( + flagBadgerDBdir string + flagPebbleDBdir string + flagBatchByteSize int + flagReaderCount int + flagWriterCount int + flagReaderShardPrefixBytes int + flagValidationMode string + flagValidationOnly bool +) + +var Cmd = &cobra.Command{ + Use: "db-migration", + Short: "copy badger db to pebble db", + RunE: run, +} + +func init() { + Cmd.Flags().StringVar(&flagBadgerDBdir, "badgerdir", "", "BadgerDB Dir to copy data from") + _ = Cmd.MarkFlagRequired("badgerdir") + + Cmd.Flags().StringVar(&flagPebbleDBdir, "pebbledir", "", "PebbleDB Dir to copy data to") + _ = Cmd.MarkFlagRequired("pebbledir") + + Cmd.Flags().IntVar(&flagBatchByteSize, "batch_byte_size", migration.DefaultMigrationConfig.BatchByteSize, + "the batch size in bytes to use for migration (32MB by default)") + + Cmd.Flags().IntVar(&flagReaderCount, "reader_count", migration.DefaultMigrationConfig.ReaderWorkerCount, + "the number of reader workers to use for migration") + + Cmd.Flags().IntVar(&flagWriterCount, "writer_count", migration.DefaultMigrationConfig.WriterWorkerCount, + "the number of writer workers to use for migration") + + Cmd.Flags().IntVar(&flagReaderShardPrefixBytes, "reader_shard_prefix_bytes", migration.DefaultMigrationConfig.ReaderShardPrefixBytes, + "the number of prefix bytes used to assign iterator workload") + + Cmd.Flags().StringVar(&flagValidationMode, "validation_mode", string(migration.DefaultMigrationConfig.ValidationMode), + "the validation mode to use for migration (partial or full, default is partial)") + + Cmd.Flags().BoolVar(&flagValidationOnly, "validation_only", false, + "if set, only validate the data in the badger db without copying it to pebble db. "+ + "Note: this will not copy any data to pebble db, and will not create any pebble db files.") +} + +func run(*cobra.Command, []string) error { + lg := log.With(). + Str("badger_db_dir", flagBadgerDBdir). + Str("pebble_db_dir", flagPebbleDBdir). + Str("batch_byte_size", units.HumanSize(float64(flagBatchByteSize))). + Int("reader_count", flagReaderCount). + Int("writer_count", flagWriterCount). + Int("reader_shard_prefix_bytes", flagReaderShardPrefixBytes). + Str("validation_mode", flagValidationMode). + Bool("validation_only", flagValidationOnly). + Logger() + + validationMode, err := migration.ParseValidationModeValid(flagValidationMode) + if err != nil { + return fmt.Errorf("invalid validation mode: %w", err) + } + + lg.Info().Msgf("starting migration from badger db to pebble db") + start := time.Now() + err = migration.RunMigrationAndCompaction(flagBadgerDBdir, flagPebbleDBdir, migration.MigrationConfig{ + BatchByteSize: flagBatchByteSize, + ReaderWorkerCount: flagReaderCount, + WriterWorkerCount: flagWriterCount, + ReaderShardPrefixBytes: flagReaderShardPrefixBytes, + ValidationMode: validationMode, + ValidationOnly: flagValidationOnly, + }) + + if err != nil { + return fmt.Errorf("migration failed: %w", err) + } + + lg.Info().Msgf("migration completed successfully in %s", time.Since(start).String()) + + return nil +} diff --git a/cmd/util/cmd/debug-script/cmd.go b/cmd/util/cmd/debug-script/cmd.go new file mode 100644 index 00000000000..a1ce10361f6 --- /dev/null +++ b/cmd/util/cmd/debug-script/cmd.go @@ -0,0 +1,144 @@ +package debug_tx + +import ( + "context" + "os" + + "github.com/onflow/flow/protobuf/go/flow/access" + "github.com/onflow/flow/protobuf/go/flow/execution" + "github.com/onflow/flow/protobuf/go/flow/executiondata" + "github.com/rs/zerolog/log" + "github.com/spf13/cobra" + "google.golang.org/grpc" + "google.golang.org/grpc/credentials/insecure" + + "github.com/onflow/flow-go/fvm/storage/snapshot" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/utils/debug" +) + +// use the following command to forward port 9000 from the EN to localhost:9001 +// `gcloud compute ssh '--ssh-flag=-A' --no-user-output-enabled --tunnel-through-iap migrationmainnet1-execution-001 --project flow-multi-region -- -NL 9001:localhost:9000` + +var ( + flagAccessAddress string + flagExecutionAddress string + flagBlockID string + flagChain string + flagScript string + flagUseExecutionDataAPI bool +) + +var Cmd = &cobra.Command{ + Use: "debug-script", + Short: "debug a script", + Run: run, +} + +func init() { + + Cmd.Flags().StringVar( + &flagChain, + "chain", + "", + "Chain name", + ) + + Cmd.Flags().StringVar(&flagAccessAddress, "access-address", "", "address of the access node") + _ = Cmd.MarkFlagRequired("access-address") + + Cmd.Flags().StringVar(&flagExecutionAddress, "execution-address", "", "address of the execution node") + _ = Cmd.MarkFlagRequired("execution-address") + + Cmd.Flags().StringVar(&flagBlockID, "block-id", "", "block ID") + _ = Cmd.MarkFlagRequired("block-id") + + _ = Cmd.MarkFlagRequired("chain") + + Cmd.Flags().StringVar(&flagScript, "script", "", "path to script") + _ = Cmd.MarkFlagRequired("script") + + Cmd.Flags().BoolVar(&flagUseExecutionDataAPI, "use-execution-data-api", false, "use the execution data API") +} + +func run(*cobra.Command, []string) { + + chainID := flow.ChainID(flagChain) + chain := chainID.Chain() + + code, err := os.ReadFile(flagScript) + if err != nil { + log.Fatal().Err(err).Msgf("failed to read script from file %s", flagScript) + } + + log.Info().Msg("Fetching block header ...") + + accessConn, err := grpc.NewClient( + flagAccessAddress, + grpc.WithTransportCredentials(insecure.NewCredentials()), + ) + if err != nil { + log.Fatal().Err(err).Msg("failed to create access connection") + } + defer accessConn.Close() + + accessClient := access.NewAccessAPIClient(accessConn) + + blockID, err := flow.HexStringToIdentifier(flagBlockID) + if err != nil { + log.Fatal().Err(err).Msg("failed to parse block ID") + } + + header, err := debug.GetAccessAPIBlockHeader(accessClient, context.Background(), blockID) + if err != nil { + log.Fatal().Err(err).Msg("failed to fetch block header") + } + + blockHeight := header.Height + + log.Info().Msgf( + "Fetched block header: %s (height %d)", + header.ID(), + blockHeight, + ) + + var snap snapshot.StorageSnapshot + + if flagUseExecutionDataAPI { + executionDataClient := executiondata.NewExecutionDataAPIClient(accessConn) + snap, err = debug.NewExecutionDataStorageSnapshot(executionDataClient, nil, blockHeight) + if err != nil { + log.Fatal().Err(err).Msg("failed to create storage snapshot") + } + } else { + executionConn, err := grpc.NewClient( + flagExecutionAddress, + grpc.WithTransportCredentials(insecure.NewCredentials()), + ) + if err != nil { + log.Fatal().Err(err).Msg("failed to create execution connection") + } + defer executionConn.Close() + + executionClient := execution.NewExecutionAPIClient(executionConn) + snap, err = debug.NewExecutionNodeStorageSnapshot(executionClient, nil, blockID) + if err != nil { + log.Fatal().Err(err).Msg("failed to create storage snapshot") + } + } + + debugger := debug.NewRemoteDebugger(chain, log.Logger) + + // TODO: add support for arguments + var arguments [][]byte + + result, scriptErr, processErr := debugger.RunScript(code, arguments, snap, header) + + if scriptErr != nil { + log.Fatal().Err(scriptErr).Msg("transaction error") + } + if processErr != nil { + log.Fatal().Err(processErr).Msg("process error") + } + log.Info().Msgf("result: %s", result) +} diff --git a/cmd/util/cmd/debug-tx/cmd.go b/cmd/util/cmd/debug-tx/cmd.go new file mode 100644 index 00000000000..e3f4bab59d5 --- /dev/null +++ b/cmd/util/cmd/debug-tx/cmd.go @@ -0,0 +1,438 @@ +package debug_tx + +import ( + "cmp" + "context" + "encoding/csv" + "encoding/hex" + "fmt" + "os" + + client "github.com/onflow/flow-go-sdk/access/grpc" + "github.com/onflow/flow/protobuf/go/flow/execution" + "github.com/onflow/flow/protobuf/go/flow/executiondata" + "github.com/rs/zerolog/log" + "github.com/spf13/cobra" + "go.opentelemetry.io/otel/exporters/stdout/stdouttrace" + "golang.org/x/exp/slices" + "google.golang.org/grpc" + "google.golang.org/grpc/credentials/insecure" + + sdk "github.com/onflow/flow-go-sdk" + + "github.com/onflow/flow-go/fvm" + "github.com/onflow/flow-go/fvm/storage/snapshot" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module/grpcclient" + "github.com/onflow/flow-go/module/trace" + "github.com/onflow/flow-go/utils/debug" +) + +// use the following command to forward port 9000 from the EN to localhost:9001 +// `gcloud compute ssh '--ssh-flag=-A' --no-user-output-enabled --tunnel-through-iap migrationmainnet1-execution-001 --project flow-multi-region -- -NL 9001:localhost:9000` + +var ( + flagAccessAddress string + flagExecutionAddress string + flagChain string + flagComputeLimit uint64 + flagProposalKeySeq uint64 + flagUseExecutionDataAPI bool + flagDumpRegisters bool + flagTracePath string +) + +var Cmd = &cobra.Command{ + Use: "debug-tx", + Short: "debug a transaction", + Run: run, +} + +func init() { + + Cmd.Flags().StringVar( + &flagChain, + "chain", + "", + "Chain name", + ) + _ = Cmd.MarkFlagRequired("chain") + + Cmd.Flags().StringVar(&flagAccessAddress, "access-address", "", "address of the access node") + _ = Cmd.MarkFlagRequired("access-address") + + Cmd.Flags().StringVar(&flagExecutionAddress, "execution-address", "", "address of the execution node") + _ = Cmd.MarkFlagRequired("execution-address") + + Cmd.Flags().Uint64Var(&flagComputeLimit, "compute-limit", 9999, "transaction compute limit") + + Cmd.Flags().Uint64Var(&flagProposalKeySeq, "proposal-key-seq", 0, "proposal key sequence number") + + Cmd.Flags().BoolVar(&flagUseExecutionDataAPI, "use-execution-data-api", false, "use the execution data API") + + Cmd.Flags().BoolVar(&flagDumpRegisters, "dump-registers", false, "dump registers") + + Cmd.Flags().StringVar(&flagTracePath, "trace", "", "enable tracing to given path") +} + +func run(_ *cobra.Command, args []string) { + + log.Info().Msgf("Starting transaction debugger ... %v", args) + + chainID := flow.ChainID(flagChain) + chain := chainID.Chain() + + config, err := grpcclient.NewFlowClientConfig(flagAccessAddress, "", flow.ZeroID, true) + if err != nil { + log.Fatal().Err(err).Msg("failed to create flow client config") + } + + flowClient, err := grpcclient.FlowClient(config) + if err != nil { + log.Fatal().Err(err).Msg("failed to create client") + } + + for _, rawTxID := range args { + txID, err := flow.HexStringToIdentifier(rawTxID) + if err != nil { + log.Fatal().Err(err).Msg("failed to parse transaction ID") + } + + runTransactionID(txID, flowClient, chain) + } +} + +func runTransactionID(txID flow.Identifier, flowClient *client.Client, chain flow.Chain) { + log.Info().Msgf("Fetching transaction result for %s ...", txID) + + txResult, err := flowClient.GetTransactionResult(context.Background(), sdk.Identifier(txID)) + if err != nil { + log.Fatal().Err(err).Msg("failed to fetch transaction result") + } + + blockID := flow.Identifier(txResult.BlockID) + blockHeight := txResult.BlockHeight + + log.Info().Msgf( + "Fetched transaction result: %s at block %s (height %d)", + txResult.Status, + blockID, + blockHeight, + ) + + log.Info().Msg("Fetching transactions of block ...") + + txsResult, err := flowClient.GetTransactionsByBlockID(context.Background(), sdk.Identifier(blockID)) + if err != nil { + log.Fatal().Err(err).Msg("failed to fetch transactions of block") + } + + for _, blockTx := range txsResult { + log.Info().Msgf("Block transaction: %s", blockTx.ID()) + } + + log.Info().Msg("Fetching block header ...") + + header, err := debug.GetAccessAPIBlockHeader(flowClient.RPCClient(), context.Background(), blockID) + if err != nil { + log.Fatal().Err(err).Msg("failed to fetch block header") + } + + log.Info().Msgf( + "Fetched block header: %s (height %d)", + header.ID(), + header.Height, + ) + + var remoteSnapshot snapshot.StorageSnapshot + + if flagUseExecutionDataAPI { + accessConn, err := grpc.NewClient( + flagAccessAddress, + grpc.WithTransportCredentials(insecure.NewCredentials()), + ) + if err != nil { + log.Fatal().Err(err).Msg("failed to create access connection") + } + defer accessConn.Close() + + executionDataClient := executiondata.NewExecutionDataAPIClient(accessConn) + + // The execution data API provides the *resulting* data, + // so fetch the data for the parent block for the *initial* data. + remoteSnapshot, err = debug.NewExecutionDataStorageSnapshot(executionDataClient, nil, blockHeight-1) + if err != nil { + log.Fatal().Err(err).Msg("failed to create storage snapshot") + } + } else { + executionConn, err := grpc.NewClient( + flagExecutionAddress, + grpc.WithTransportCredentials(insecure.NewCredentials()), + ) + if err != nil { + log.Fatal().Err(err).Msg("failed to create execution connection") + } + defer executionConn.Close() + + executionClient := execution.NewExecutionAPIClient(executionConn) + + remoteSnapshot, err = debug.NewExecutionNodeStorageSnapshot(executionClient, nil, blockID) + if err != nil { + log.Fatal().Err(err).Msg("failed to create storage snapshot") + } + } + + blockSnapshot := newBlockSnapshot(remoteSnapshot) + + var fvmOptions []fvm.Option + + if flagTracePath != "" { + + var traceFile *os.File + if flagTracePath == "-" { + traceFile = os.Stdout + } else { + traceFile, err = os.Create(flagTracePath) + if err != nil { + log.Fatal().Err(err).Msg("failed to create trace file") + } + defer traceFile.Close() + } + + exporter, err := stdouttrace.New( + stdouttrace.WithWriter(traceFile), + ) + if err != nil { + log.Fatal().Err(err).Msg("failed to create trace exporter") + } + + tracer, err := trace.NewTracerWithExporter( + log.Logger, + "debug-tx", + flagChain, + trace.SensitivityCaptureAll, + exporter, + ) + if err != nil { + log.Fatal().Err(err).Msg("failed to create tracer") + } + + span, _ := tracer.StartTransactionSpan(context.TODO(), txID, "") + defer span.End() + + fvmOptions = append( + fvmOptions, + fvm.WithTracer(tracer), + fvm.WithSpan(span), + ) + } + + debugger := debug.NewRemoteDebugger( + chain, + log.Logger, + fvmOptions..., + ) + + for _, blockTx := range txsResult { + blockTxID := flow.Identifier(blockTx.ID()) + + isDebuggedTx := blockTxID == txID + + dumpRegisters := flagDumpRegisters && isDebuggedTx + + runTransaction( + debugger, + blockTxID, + flowClient, + blockSnapshot, + header, + dumpRegisters, + ) + + if isDebuggedTx { + break + } + } +} + +func runTransaction( + debugger *debug.RemoteDebugger, + txID flow.Identifier, + flowClient *client.Client, + blockSnapshot *blockSnapshot, + header *flow.Header, + dumpRegisters bool, +) { + + log.Info().Msgf("Fetching transaction %s ...", txID) + + tx, err := flowClient.GetTransaction(context.Background(), sdk.Identifier(txID)) + if err != nil { + log.Fatal().Err(err).Msg("Failed to fetch transaction") + } + + log.Info().Msgf("Fetched transaction: %s", tx.ID()) + + log.Info().Msgf("Debugging transaction %s ...", tx.ID()) + + txBodyBuilder := flow.NewTransactionBodyBuilder(). + SetScript(tx.Script). + SetComputeLimit(flagComputeLimit). + SetPayer(flow.Address(tx.Payer)) + + for _, argument := range tx.Arguments { + txBodyBuilder.AddArgument(argument) + } + + for _, authorizer := range tx.Authorizers { + txBodyBuilder.AddAuthorizer(flow.Address(authorizer)) + } + + proposalKeySequenceNumber := tx.ProposalKey.SequenceNumber + if flagProposalKeySeq != 0 { + proposalKeySequenceNumber = flagProposalKeySeq + } + + txBodyBuilder.SetProposalKey( + flow.Address(tx.ProposalKey.Address), + tx.ProposalKey.KeyIndex, + proposalKeySequenceNumber, + ) + + txBody, err := txBodyBuilder.Build() + if err != nil { + log.Fatal().Err(err).Msg("Failed to build transaction body") + } + + resultSnapshot, txErr, processErr := debugger.RunTransaction( + txBody, + blockSnapshot, + header, + ) + if processErr != nil { + log.Fatal().Err(processErr).Msg("Failed to process transaction") + } + + if txErr != nil { + log.Err(txErr).Msg("Transaction failed") + } else { + log.Info().Msg("Transaction succeeded") + } + + updatedRegisters := resultSnapshot.UpdatedRegisters() + for _, updatedRegister := range updatedRegisters { + blockSnapshot.Set( + updatedRegister.Key, + updatedRegister.Value, + ) + } + + if dumpRegisters { + dumpReadRegisters(txID, resultSnapshot.ReadRegisterIDs()) + dumpUpdatedRegisters(txID, updatedRegisters) + } +} + +func dumpReadRegisters(txID flow.Identifier, readRegisterIDs []flow.RegisterID) { + filename := fmt.Sprintf("%s.reads.csv", txID) + file, err := os.Create(filename) + if err != nil { + log.Fatal().Err(err).Msgf("Failed to create reads file: %s", filename) + } + defer file.Close() + + sortRegisterIDs(readRegisterIDs) + + writer := csv.NewWriter(file) + defer writer.Flush() + + err = writer.Write([]string{"RegisterID"}) + if err != nil { + log.Fatal().Err(err).Msg("Failed to write header") + } + + for _, readRegisterID := range readRegisterIDs { + err = writer.Write([]string{ + readRegisterID.String(), + }) + if err != nil { + log.Fatal().Err(err).Msgf("Failed to write read register: %s", readRegisterID) + } + } +} + +func dumpUpdatedRegisters(txID flow.Identifier, updatedRegisters []flow.RegisterEntry) { + filename := fmt.Sprintf("%s.updates.csv", txID) + file, err := os.Create(filename) + if err != nil { + log.Fatal().Err(err).Msgf("Failed to create writes file: %s", filename) + } + defer file.Close() + + sortRegisterEntries(updatedRegisters) + + writer := csv.NewWriter(file) + defer writer.Flush() + + err = writer.Write([]string{"RegisterID", "Value"}) + if err != nil { + log.Fatal().Err(err).Msg("Failed to write header") + } + + for _, updatedRegister := range updatedRegisters { + err = writer.Write([]string{ + updatedRegister.Key.String(), + hex.EncodeToString(updatedRegister.Value), + }) + if err != nil { + log.Fatal().Err(err).Msgf("Failed to write updated register: %s", updatedRegister) + } + } +} + +func compareRegisterIDs(a flow.RegisterID, b flow.RegisterID) int { + return cmp.Or( + cmp.Compare(a.Owner, b.Owner), + cmp.Compare(a.Key, b.Key), + ) +} + +func sortRegisterIDs(registerIDs []flow.RegisterID) { + slices.SortFunc(registerIDs, func(a, b flow.RegisterID) int { + return compareRegisterIDs(a, b) + }) +} + +func sortRegisterEntries(registerEntries []flow.RegisterEntry) { + slices.SortFunc(registerEntries, func(a, b flow.RegisterEntry) int { + return compareRegisterIDs(a.Key, b.Key) + }) +} + +type blockSnapshot struct { + cache *debug.InMemoryRegisterCache + backing snapshot.StorageSnapshot +} + +var _ snapshot.StorageSnapshot = (*blockSnapshot)(nil) + +func newBlockSnapshot(backing snapshot.StorageSnapshot) *blockSnapshot { + cache := debug.NewInMemoryRegisterCache() + return &blockSnapshot{ + cache: cache, + backing: backing, + } +} + +func (s *blockSnapshot) Get(id flow.RegisterID) (flow.RegisterValue, error) { + data, found := s.cache.Get(id.Key, id.Owner) + if found { + return data, nil + } + + return s.backing.Get(id) +} + +func (s *blockSnapshot) Set(id flow.RegisterID, value flow.RegisterValue) { + s.cache.Set(id.Key, id.Owner, value) +} diff --git a/cmd/util/cmd/diff-states/cmd.go b/cmd/util/cmd/diff-states/cmd.go new file mode 100644 index 00000000000..62e8ce2ce7b --- /dev/null +++ b/cmd/util/cmd/diff-states/cmd.go @@ -0,0 +1,742 @@ +package diff_states + +import ( + "bytes" + "context" + "encoding/hex" + "encoding/json" + "errors" + "fmt" + "slices" + + "github.com/dustin/go-humanize/english" + "github.com/onflow/cadence/common" + "github.com/onflow/cadence/interpreter" + "github.com/rs/zerolog/log" + "github.com/spf13/cobra" + "golang.org/x/sync/errgroup" + + "github.com/onflow/flow-go/cmd/util/ledger/migrations" + "github.com/onflow/flow-go/cmd/util/ledger/reporters" + "github.com/onflow/flow-go/cmd/util/ledger/util" + "github.com/onflow/flow-go/cmd/util/ledger/util/registers" + "github.com/onflow/flow-go/fvm/systemcontracts" + "github.com/onflow/flow-go/ledger" + "github.com/onflow/flow-go/model/flow" + moduleUtil "github.com/onflow/flow-go/module/util" +) + +var ( + flagOutputDirectory string + flagPayloads1 string + flagPayloads2 string + flagState1 string + flagState2 string + flagStateCommitment1 string + flagStateCommitment2 string + flagMode string + flagAlwaysDiffValues bool + flagExcludeRandomBeaconHistory bool + flagNWorker int + flagChain string +) + +var Cmd = &cobra.Command{ + Use: "diff-states", + Short: "Compares the given states", + Run: run, +} + +const ReporterName = "state-diff" + +type state uint8 + +const ( + oldState state = 1 + newState state = 2 +) + +func init() { + + // Input 1 + + Cmd.Flags().StringVar( + &flagPayloads1, + "payloads-1", + "", + "Input payload file name 1", + ) + + Cmd.Flags().StringVar( + &flagState1, + "state-1", + "", + "Input state file name 1", + ) + Cmd.Flags().StringVar( + &flagStateCommitment1, + "state-commitment-1", + "", + "Input state commitment 1", + ) + + // Input 2 + + Cmd.Flags().StringVar( + &flagPayloads2, + "payloads-2", + "", + "Input payload file name 2", + ) + + Cmd.Flags().StringVar( + &flagState2, + "state-2", + "", + "Input state file name 2", + ) + + Cmd.Flags().StringVar( + &flagStateCommitment2, + "state-commitment-2", + "", + "Input state commitment 2", + ) + + // Other + + Cmd.Flags().StringVar( + &flagOutputDirectory, + "output-directory", + "", + "Output directory", + ) + _ = Cmd.MarkFlagRequired("output-directory") + + Cmd.Flags().StringVar( + &flagMode, + "mode", + "values", + "one of 'values', 'accounts', or 'raw'; to diff values, accounts, or raw bytes. default is 'values'", + ) + + Cmd.Flags().BoolVar( + &flagAlwaysDiffValues, + "always-diff-values", + false, + "always diff on value level. useful when trying to test iteration, by diffing same state.", + ) + + Cmd.Flags().IntVar( + &flagNWorker, + "n-worker", + 10, + "number of workers to use", + ) + + Cmd.Flags().StringVar( + &flagChain, + "chain", + "", + "Chain name", + ) + _ = Cmd.MarkFlagRequired("chain") + + Cmd.Flags().BoolVar( + &flagExcludeRandomBeaconHistory, + "exclude-randombeaconhistory", + false, + "exclude random beacon history", + ) +} + +const ( + randomBeaconHistoryDomain = common.StorageDomainContract + randomBeaconHistoryDomainKey = interpreter.StringStorageMapKey("RandomBeaconHistory") +) + +type mode uint8 + +const ( + modeValues mode = iota + modeAccounts + modeRaw +) + +var modeByName = map[string]mode{ + "values": modeValues, + "accounts": modeAccounts, + "raw": modeRaw, +} + +func run(*cobra.Command, []string) { + + chainID := flow.ChainID(flagChain) + // Validate chain ID + _ = chainID.Chain() + + if flagPayloads1 == "" && flagState1 == "" { + log.Fatal().Msg("Either --payloads-1 or --state-1 must be provided") + } else if flagPayloads1 != "" && flagState1 != "" { + log.Fatal().Msg("Only one of --payloads-1 or --state-1 must be provided") + } + if flagState1 != "" && flagStateCommitment1 == "" { + log.Fatal().Msg("--state-commitment-1 must be provided when --state-1 is provided") + } + + if flagPayloads2 == "" && flagState2 == "" { + log.Fatal().Msg("Either --payloads-2 or --state-2 must be provided") + } else if flagPayloads2 != "" && flagState2 != "" { + log.Fatal().Msg("Only one of --payloads-2 or --state-2 must be provided") + } + if flagState2 != "" && flagStateCommitment2 == "" { + log.Fatal().Msg("--state-commitment-2 must be provided when --state-2 is provided") + } + + mode, ok := modeByName[flagMode] + if !ok { + modeNames := make([]string, 0, len(modeByName)) + for name := range modeByName { + modeNames = append(modeNames, fmt.Sprintf("%q", name)) + } + log.Fatal().Msgf( + "--mode must be one of %s", + english.OxfordWordSeries(modeNames, "or"), + ) + } + + if flagExcludeRandomBeaconHistory { + log.Info().Msg("--exclude-randombeaconhistory is set to exclude random beacon history") + } + + var acctsToSkipForCadenceValueDiff []string + + // Skip EVM storage account when diffing Cadence values. + if mode == modeValues { + systemContracts := systemcontracts.SystemContractsForChain(chainID) + + acctsToSkipForCadenceValueDiff = append( + acctsToSkipForCadenceValueDiff, + flow.AddressToRegisterOwner(systemContracts.EVMStorage.Address), + ) + } + + rw := reporters.NewReportFileWriterFactoryWithFormat(flagOutputDirectory, log.Logger, reporters.ReportFormatJSONL). + ReportWriter(ReporterName) + defer rw.Close() + + var registers1, registers2 *registers.ByAccount + { + // Load payloads and create registers. + // Define in a block, so that the memory is released after the registers are created. + payloads1, payloads2 := loadPayloads() + + payloadCount1 := len(payloads1) + payloadCount2 := len(payloads2) + if payloadCount1 != payloadCount2 { + log.Warn().Msgf( + "Payloads files have different number of payloads: %d vs %d", + payloadCount1, + payloadCount2, + ) + } + + registers1, registers2 = payloadsToRegisters(payloads1, payloads2) + + accountCount1 := registers1.AccountCount() + accountCount2 := registers2.AccountCount() + if accountCount1 != accountCount2 { + log.Warn().Msgf( + "Registers have different number of accounts: %d vs %d", + accountCount1, + accountCount2, + ) + } + } + + err := diff(registers1, registers2, chainID, rw, flagNWorker, mode, acctsToSkipForCadenceValueDiff) + if err != nil { + log.Warn().Err(err).Msgf("failed to diff registers") + } +} + +func loadPayloads() (payloads1, payloads2 []*ledger.Payload) { + + log.Info().Msg("Loading payloads") + + var group errgroup.Group + + group.Go(func() (err error) { + if flagPayloads1 != "" { + _, payloads1, err = util.ReadPayloadFile(log.Logger, flagPayloads1) + } else { + log.Info().Msg("Reading first trie") + + stateCommitment := util.ParseStateCommitment(flagStateCommitment1) + payloads1, err = util.ReadTrieForPayloads(flagState1, stateCommitment) + } + return + }) + + group.Go(func() (err error) { + if flagPayloads2 != "" { + _, payloads2, err = util.ReadPayloadFile(log.Logger, flagPayloads2) + } else { + log.Info().Msg("Reading second trie") + + stateCommitment := util.ParseStateCommitment(flagStateCommitment2) + payloads2, err = util.ReadTrieForPayloads(flagState2, stateCommitment) + } + return + }) + + err := group.Wait() + if err != nil { + log.Fatal().Err(err).Msg("failed to read payloads") + } + + log.Info().Msg("Finished loading payloads") + + return +} + +func payloadsToRegisters(payloads1, payloads2 []*ledger.Payload) (registers1, registers2 *registers.ByAccount) { + + log.Info().Msg("Creating registers from payloads") + + var group errgroup.Group + + group.Go(func() (err error) { + log.Info().Msgf("Creating registers from first payloads (%d)", len(payloads1)) + + registers1, err = registers.NewByAccountFromPayloads(payloads1) + + log.Info().Msgf( + "Created %d registers from payloads (%d accounts)", + registers1.Count(), + registers1.AccountCount(), + ) + + return + }) + + group.Go(func() (err error) { + log.Info().Msgf("Creating registers from second payloads (%d)", len(payloads2)) + + registers2, err = registers.NewByAccountFromPayloads(payloads2) + + log.Info().Msgf( + "Created %d registers from payloads (%d accounts)", + registers2.Count(), + registers2.AccountCount(), + ) + + return + }) + + err := group.Wait() + if err != nil { + log.Fatal().Err(err).Msg("failed to create registers from payloads") + } + + log.Info().Msg("Finished creating registers from payloads") + + return +} + +var accountsDiffer = errors.New("accounts differ") + +func diffAccount( + owner string, + accountRegisters1 *registers.AccountRegisters, + accountRegisters2 *registers.AccountRegisters, + chainID flow.ChainID, + rw reporters.ReportWriter, + mode mode, + acctsToSkip []string, + isValueIncludedFunc migrations.IsValueIncludedFunc, +) (err error) { + + diffValues := flagAlwaysDiffValues + + err = accountRegisters1.ForEach(func(owner, key string, value1 []byte) error { + var value2 []byte + value2, err = accountRegisters2.Get(owner, key) + if err != nil { + return err + } + + if !bytes.Equal(value1, value2) { + + if mode == modeRaw { + rw.Write(rawDiff{ + Owner: owner, + Key: key, + Value1: value1, + Value2: value2, + }) + } else { + // stop on first difference in accounts + return accountsDiffer + } + } + + return nil + }) + if err != nil { + accountsDiffer := errors.Is(err, accountsDiffer) + if !accountsDiffer { + return err + } + + switch mode { + case modeRaw: + // NO-OP + case modeAccounts: + rw.Write(accountDiff{ + Owner: owner, + }) + case modeValues: + diffValues = true + } + } + + if diffValues && !slices.Contains(acctsToSkip, owner) { + address, err := common.BytesToAddress([]byte(owner)) + if err != nil { + return err + } + + migrations.NewCadenceValueDiffReporter( + address, + chainID, + rw, + true, + flagNWorker/2, + ).DiffStates( + accountRegisters1, + accountRegisters2, + common.AllStorageDomains, + isValueIncludedFunc, + ) + } + + return nil +} + +func diff( + registers1 *registers.ByAccount, + registers2 *registers.ByAccount, + chainID flow.ChainID, + rw reporters.ReportWriter, + nWorkers int, + mode mode, + acctsToSkip []string, +) error { + log.Info().Msgf("Diffing %d accounts", registers1.AccountCount()) + + randomBeaconHistoryAddress := randomBeaconHistoryAddressForChain(chainID) + + if registers1.AccountCount() < nWorkers { + nWorkers = registers1.AccountCount() + } + + logAccount := moduleUtil.LogProgress( + log.Logger, + moduleUtil.DefaultLogProgressConfig( + "processing account group", + registers1.AccountCount(), + ), + ) + + isValueIncludedFunc := alwaysIncludeValue + if flagExcludeRandomBeaconHistory { + isValueIncludedFunc = excludeRandomBeaconHistory(randomBeaconHistoryAddress) + } + + if nWorkers <= 1 { + foundAccountCountInRegisters2 := 0 + + _ = registers1.ForEachAccount(func(accountRegisters1 *registers.AccountRegisters) (err error) { + owner := accountRegisters1.Owner() + + if !registers2.HasAccountOwner(owner) { + rw.Write(accountMissing{ + Owner: owner, + State: int(newState), + }) + + return nil + } + + foundAccountCountInRegisters2++ + + accountRegisters2 := registers2.AccountRegisters(owner) + + err = diffAccount( + owner, + accountRegisters1, + accountRegisters2, + chainID, + rw, + mode, + acctsToSkip, + isValueIncludedFunc, + ) + if err != nil { + log.Warn().Err(err).Msgf("failed to diff account %x", []byte(owner)) + } + + logAccount(1) + + return nil + }) + + if foundAccountCountInRegisters2 < registers2.AccountCount() { + _ = registers2.ForEachAccount(func(accountRegisters2 *registers.AccountRegisters) error { + owner := accountRegisters2.Owner() + if !registers1.HasAccountOwner(owner) { + rw.Write(accountMissing{ + Owner: owner, + State: int(oldState), + }) + } + return nil + }) + } + + return nil + } + + type job struct { + owner string + accountRegisters1 *registers.AccountRegisters + accountRegisters2 *registers.AccountRegisters + } + + type result struct { + owner string + err error + } + + jobs := make(chan job, nWorkers) + + results := make(chan result, nWorkers) + + g, ctx := errgroup.WithContext(context.Background()) + + // Launch goroutines to diff accounts + for i := 0; i < nWorkers; i++ { + g.Go(func() (err error) { + for job := range jobs { + err := diffAccount( + job.owner, + job.accountRegisters1, + job.accountRegisters2, + chainID, + rw, + mode, + acctsToSkip, + isValueIncludedFunc, + ) + + select { + case results <- result{owner: job.owner, err: err}: + case <-ctx.Done(): + return ctx.Err() + } + } + return nil + }) + } + + // Launch goroutine to wait for workers and close result channel + go func() { + _ = g.Wait() + close(results) + }() + + // Launch goroutine to send account registers to jobs channel + go func() { + defer close(jobs) + + foundAccountCountInRegisters2 := 0 + + _ = registers1.ForEachAccount(func(accountRegisters1 *registers.AccountRegisters) (err error) { + owner := accountRegisters1.Owner() + if !registers2.HasAccountOwner(owner) { + rw.Write(accountMissing{ + Owner: owner, + State: int(newState), + }) + + return nil + } + + foundAccountCountInRegisters2++ + + accountRegisters2 := registers2.AccountRegisters(owner) + + jobs <- job{ + owner: owner, + accountRegisters1: accountRegisters1, + accountRegisters2: accountRegisters2, + } + + return nil + }) + + if foundAccountCountInRegisters2 < registers2.AccountCount() { + _ = registers2.ForEachAccount(func(accountRegisters2 *registers.AccountRegisters) (err error) { + owner := accountRegisters2.Owner() + if !registers1.HasAccountOwner(owner) { + rw.Write(accountMissing{ + Owner: owner, + State: int(oldState), + }) + } + return nil + }) + } + }() + + // Gather results + for result := range results { + logAccount(1) + if result.err != nil { + log.Warn().Err(result.err).Msgf("failed to diff account %x", []byte(result.owner)) + } + } + + log.Info().Msgf("Finished diffing accounts, waiting for goroutines...") + + if err := g.Wait(); err != nil { + return err + } + + return nil +} + +type rawDiff struct { + Owner string + Key string + Value1 []byte + Value2 []byte +} + +var _ json.Marshaler = rawDiff{} + +func (e rawDiff) MarshalJSON() ([]byte, error) { + return json.Marshal(struct { + Kind string `json:"kind"` + Owner string `json:"owner"` + Key string `json:"key"` + Value1 string `json:"value1"` + Value2 string `json:"value2"` + }{ + Kind: "raw-diff", + Owner: hex.EncodeToString([]byte(e.Owner)), + Key: hex.EncodeToString([]byte(e.Key)), + Value1: hex.EncodeToString(e.Value1), + Value2: hex.EncodeToString(e.Value2), + }) +} + +type accountDiff struct { + Owner string +} + +var _ json.Marshaler = accountDiff{} + +func (e accountDiff) MarshalJSON() ([]byte, error) { + return json.Marshal(struct { + Kind string `json:"kind"` + Owner string `json:"owner"` + }{ + Kind: "account-diff", + Owner: hex.EncodeToString([]byte(e.Owner)), + }) +} + +type accountMissing struct { + Owner string + State int +} + +var _ json.Marshaler = accountMissing{} + +func (e accountMissing) MarshalJSON() ([]byte, error) { + return json.Marshal(struct { + Kind string `json:"kind"` + Owner string `json:"owner"` + State int `json:"state"` + }{ + Kind: "account-missing", + Owner: hex.EncodeToString([]byte(e.Owner)), + State: e.State, + }) +} + +type countDiff struct { + Owner string + State1 int + State2 int +} + +var _ json.Marshaler = countDiff{} + +func (e countDiff) MarshalJSON() ([]byte, error) { + return json.Marshal(struct { + Kind string `json:"kind"` + Owner string `json:"owner"` + State1 int `json:"state1"` + State2 int `json:"state2"` + }{ + Kind: "count-diff", + Owner: hex.EncodeToString([]byte(e.Owner)), + State1: e.State1, + State2: e.State2, + }) +} + +func isRandomBeaconHistory(randomBeaconHistoryAddress, address common.Address, domain common.StorageDomain, key any) bool { + if randomBeaconHistoryAddress.Compare(address) != 0 { + return false + } + + if domain != randomBeaconHistoryDomain { + return false + } + + switch key := key.(type) { + case interpreter.StringAtreeValue: + return interpreter.StringStorageMapKey(key) == randomBeaconHistoryDomainKey + + case interpreter.StringStorageMapKey: + return key == randomBeaconHistoryDomainKey + + default: + return false + } +} + +func randomBeaconHistoryAddressForChain(chainID flow.ChainID) common.Address { + sc := systemcontracts.SystemContractsForChain(chainID) + return common.Address(sc.RandomBeaconHistory.Address) +} + +func excludeRandomBeaconHistory(randomBeaconHistoryAddress common.Address) migrations.IsValueIncludedFunc { + return func(address common.Address, domain common.StorageDomain, key any) bool { + foundRandomBeaconHistory := isRandomBeaconHistory(randomBeaconHistoryAddress, address, domain, key) + + if foundRandomBeaconHistory { + log.Info().Msgf("excluding random beacon history in account %s, domain %s, key %v", address, domain.Identifier(), key) + } + + return !foundRandomBeaconHistory + } +} + +func alwaysIncludeValue(common.Address, common.StorageDomain, any) bool { + return true +} diff --git a/cmd/util/cmd/diff-states/diff_states_test.go b/cmd/util/cmd/diff-states/diff_states_test.go new file mode 100644 index 00000000000..e8b85770451 --- /dev/null +++ b/cmd/util/cmd/diff-states/diff_states_test.go @@ -0,0 +1,171 @@ +package diff_states + +import ( + "encoding/json" + "io" + "io/fs" + "os" + "path/filepath" + "strings" + "testing" + + "github.com/rs/zerolog" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/cmd/util/ledger/util" + "github.com/onflow/flow-go/ledger" + "github.com/onflow/flow-go/ledger/common/convert" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/utils/unittest" +) + +func newPayload(owner flow.Address, key string, value []byte) *ledger.Payload { + registerID := flow.NewRegisterID(owner, key) + ledgerKey := convert.RegisterIDToLedgerKey(registerID) + return ledger.NewPayload(ledgerKey, value) +} + +func TestDiffStates(t *testing.T) { + + unittest.RunWithTempDir(t, func(datadir string) { + + file1 := filepath.Join(datadir, "first.payloads") + file2 := filepath.Join(datadir, "second.payloads") + + payloads1 := []*ledger.Payload{ + // account 1 + newPayload(flow.Address{1}, "a", []byte{2}), + newPayload(flow.Address{1}, "b", []byte{3}), + newPayload(flow.Address{1}, "c", []byte{4}), + // account 2 + newPayload(flow.Address{2}, "d", []byte{5}), + } + payloads2 := []*ledger.Payload{ + // account 1, different values for key b and c + newPayload(flow.Address{1}, "a", []byte{2}), + newPayload(flow.Address{1}, "b", []byte{5}), + newPayload(flow.Address{1}, "c", []byte{6}), + // account 3, missing in payloads1 + newPayload(flow.Address{3}, "d", []byte{6}), + // account 4, missing in payloads1 + newPayload(flow.Address{4}, "e", []byte{7}), + } + + numOfPayloadWritten, err := util.CreatePayloadFile(zerolog.Nop(), file1, payloads1, nil, false) + require.NoError(t, err) + require.Equal(t, len(payloads1), numOfPayloadWritten) + + numOfPayloadWritten, err = util.CreatePayloadFile(zerolog.Nop(), file2, payloads2, nil, false) + require.NoError(t, err) + require.Equal(t, len(payloads2), numOfPayloadWritten) + + test := func(t *testing.T, mode string) []string { + + Cmd.SetArgs([]string{ + "--payloads-1", file1, + "--payloads-2", file2, + "--chain", string(flow.Emulator), + "--output-directory", datadir, + "--mode", mode, + }) + + err = Cmd.Execute() + require.NoError(t, err) + + var reportPath string + err = filepath.Walk( + datadir, + func(path string, info fs.FileInfo, err error) error { + if path != datadir && info.IsDir() { + return filepath.SkipDir + } + if strings.HasPrefix(filepath.Base(path), ReporterName) { + reportPath = path + return filepath.SkipAll + } + return err + }, + ) + require.NoError(t, err) + require.NotEmpty(t, reportPath) + + report, err := os.Open(reportPath) + require.NoError(t, err) + + var msgs []string + decoder := json.NewDecoder(report) + for { + var msg json.RawMessage + err = decoder.Decode(&msg) + if err == io.EOF { + break + } + require.NoError(t, err) + + msgs = append(msgs, string(msg)) + } + + return msgs + } + + t.Run("raw", func(t *testing.T) { + + msgs := test(t, "raw") + + assert.Equal(t, 5, len(msgs)) + assert.Containsf(t, + msgs, + `{"kind":"account-missing","owner":"0200000000000000","state":2}`, + "diff report contains account-missing for 0200000000000000", + ) + assert.Containsf(t, + msgs, + `{"kind":"account-missing","owner":"0300000000000000","state":1}`, + "diff report contains account-missing for 0300000000000000", + ) + assert.Containsf(t, + msgs, + `{"kind":"account-missing","owner":"0400000000000000","state":1}`, + "diff report contains account-missing for 0400000000000000", + ) + assert.Containsf(t, + msgs, + `{"kind":"raw-diff","owner":"0100000000000000","key":"62","value1":"03","value2":"05"}`, + "diff report contains raw-diff", + ) + assert.Containsf(t, + msgs, + `{"kind":"raw-diff","owner":"0100000000000000","key":"63","value1":"04","value2":"06"}`, + "diff report contains raw-diff", + ) + }) + + t.Run("accounts", func(t *testing.T) { + + msgs := test(t, "accounts") + + assert.Equal(t, 4, len(msgs)) + assert.Containsf(t, + msgs, + `{"kind":"account-missing","owner":"0200000000000000","state":2}`, + "diff report contains account-missing for 0200000000000000", + ) + assert.Containsf(t, + msgs, + `{"kind":"account-missing","owner":"0300000000000000","state":1}`, + "diff report contains account-missing for 0300000000000000", + ) + assert.Containsf(t, + msgs, + `{"kind":"account-missing","owner":"0400000000000000","state":1}`, + "diff report contains account-missing for 0400000000000000", + ) + assert.Containsf(t, + msgs, + `{"kind":"account-diff","owner":"0100000000000000"}`, + "diff report contains account-diff", + ) + }) + }) +} diff --git a/cmd/util/cmd/diffkeys/cmd.go b/cmd/util/cmd/diffkeys/cmd.go new file mode 100644 index 00000000000..e79c09ac948 --- /dev/null +++ b/cmd/util/cmd/diffkeys/cmd.go @@ -0,0 +1,507 @@ +package diffkeys + +import ( + "context" + "encoding/hex" + "encoding/json" + "fmt" + + "github.com/onflow/cadence/common" + "github.com/rs/zerolog/log" + "github.com/spf13/cobra" + "golang.org/x/sync/errgroup" + + "github.com/onflow/flow-go/cmd/util/ledger/migrations" + "github.com/onflow/flow-go/cmd/util/ledger/reporters" + "github.com/onflow/flow-go/cmd/util/ledger/util" + "github.com/onflow/flow-go/cmd/util/ledger/util/registers" + "github.com/onflow/flow-go/ledger" + "github.com/onflow/flow-go/model/flow" + moduleUtil "github.com/onflow/flow-go/module/util" +) + +var ( + flagOutputDirectory string + flagPayloadsV3 string + flagPayloadsV4 string + flagStateV3 string + flagStateV4 string + flagStateCommitmentV3 string + flagStateCommitmentV4 string + flagNWorker int + flagChain string +) + +var Cmd = &cobra.Command{ + Use: "diff-keys", + Short: "Compare account public keys in the given state-v3 with state-v4 and output to JSONL file (empty file if no difference)", + Run: run, +} + +const ReporterName = "key-diff" + +type stateType uint8 + +const ( + oldState stateType = 1 + newState stateType = 2 +) + +func init() { + + // Input with account public keys in v3 format + + Cmd.Flags().StringVar( + &flagPayloadsV3, + "payloads-v3", + "", + "Input payload file name with account public keys in v3 format", + ) + + Cmd.Flags().StringVar( + &flagStateV3, + "state-v3", + "", + "Input state file name with account public keys in v3 format", + ) + Cmd.Flags().StringVar( + &flagStateCommitmentV3, + "state-commitment-v3", + "", + "Input state commitment for state-v3", + ) + + // Input with account public keys in v4 format + + Cmd.Flags().StringVar( + &flagPayloadsV4, + "payloads-v4", + "", + "Input payload file name with account public keys in v4 format", + ) + + Cmd.Flags().StringVar( + &flagStateV4, + "state-v4", + "", + "Input state file name with account public keys in v4 format", + ) + + Cmd.Flags().StringVar( + &flagStateCommitmentV4, + "state-commitment-v4", + "", + "Input state commitment for state-v4", + ) + + // Other + + Cmd.Flags().StringVar( + &flagOutputDirectory, + "output-directory", + "", + "Output directory", + ) + _ = Cmd.MarkFlagRequired("output-directory") + + Cmd.Flags().IntVar( + &flagNWorker, + "n-worker", + 10, + "number of workers to use", + ) + + Cmd.Flags().StringVar( + &flagChain, + "chain", + "", + "Chain name", + ) + _ = Cmd.MarkFlagRequired("chain") +} + +func run(*cobra.Command, []string) { + + chainID := flow.ChainID(flagChain) + // Validate chain ID + _ = chainID.Chain() + + if flagPayloadsV3 == "" && flagStateV3 == "" { + log.Fatal().Msg("Either --payloads-v3 or --state-v3 must be provided") + } else if flagPayloadsV3 != "" && flagStateV3 != "" { + log.Fatal().Msg("Only one of --payloads-v4 or --state-v4 must be provided") + } + if flagStateV3 != "" && flagStateCommitmentV3 == "" { + log.Fatal().Msg("--state-commitment-v3 must be provided when --state-v3 is provided") + } + + if flagPayloadsV4 == "" && flagStateV4 == "" { + log.Fatal().Msg("Either --payloads-v4 or --state-v4 must be provided") + } else if flagPayloadsV4 != "" && flagStateV4 != "" { + log.Fatal().Msg("Only one of --payloads-v4 or --state-v4 must be provided") + } + if flagStateV4 != "" && flagStateCommitmentV4 == "" { + log.Fatal().Msg("--state-commitment-v4 must be provided when --state-v4 is provided") + } + + rw := reporters.NewReportFileWriterFactoryWithFormat(flagOutputDirectory, log.Logger, reporters.ReportFormatJSONL). + ReportWriter(ReporterName) + defer rw.Close() + + var registersV3, registersV4 *registers.ByAccount + { + // Load payloads and create registers. + // Define in a block, so that the memory is released after the registers are created. + payloadsV3, payloadsV4 := loadPayloads() + + registersV3, registersV4 = payloadsToRegisters(payloadsV3, payloadsV4) + + accountCountV3 := registersV3.AccountCount() + accountCountV4 := registersV4.AccountCount() + if accountCountV3 != accountCountV4 { + log.Warn().Msgf( + "Registers have different number of accounts: %d vs %d", + accountCountV3, + accountCountV4, + ) + } + } + + err := diff(registersV3, registersV4, chainID, rw, flagNWorker) + if err != nil { + log.Warn().Err(err).Msgf("failed to diff registers") + } +} + +func loadPayloads() (payloads1, payloads2 []*ledger.Payload) { + + log.Info().Msg("Loading payloads") + + var group errgroup.Group + + group.Go(func() (err error) { + if flagPayloadsV3 != "" { + log.Info().Msgf("Loading v3 payloads from file at %v", flagPayloadsV3) + + _, payloads1, err = util.ReadPayloadFile(log.Logger, flagPayloadsV3) + if err != nil { + err = fmt.Errorf("failed to load v3 payload file: %w", err) + } + } else { + log.Info().Msgf("Reading v3 trie with state commitement %s", flagStateCommitmentV3) + + stateCommitment := util.ParseStateCommitment(flagStateCommitmentV3) + payloads1, err = util.ReadTrieForPayloads(flagStateV3, stateCommitment) + if err != nil { + err = fmt.Errorf("failed to load v3 trie: %w", err) + } + } + return + }) + + group.Go(func() (err error) { + if flagPayloadsV4 != "" { + log.Info().Msgf("Loading v4 payloads from file at %v", flagPayloadsV4) + + _, payloads2, err = util.ReadPayloadFile(log.Logger, flagPayloadsV4) + if err != nil { + err = fmt.Errorf("failed to load v4 payload file: %w", err) + } + } else { + log.Info().Msgf("Reading v4 trie with state commitment %s", flagStateCommitmentV4) + + stateCommitment := util.ParseStateCommitment(flagStateCommitmentV4) + payloads2, err = util.ReadTrieForPayloads(flagStateV4, stateCommitment) + if err != nil { + err = fmt.Errorf("failed to load v4 trie: %w", err) + } + } + return + }) + + err := group.Wait() + if err != nil { + log.Fatal().Err(err).Msg("failed to read payloads") + } + + log.Info().Msg("Finished loading payloads") + + return +} + +func payloadsToRegisters(payloads1, payloads2 []*ledger.Payload) (registers1, registers2 *registers.ByAccount) { + + log.Info().Msg("Creating registers from payloads") + + var group errgroup.Group + + group.Go(func() (err error) { + log.Info().Msgf("Creating registers from v3 payloads (%d)", len(payloads1)) + + registers1, err = registers.NewByAccountFromPayloads(payloads1) + if err != nil { + return fmt.Errorf("failed to create registers from v3 payloads: %w", err) + } + + log.Info().Msgf( + "Created %d registers from payloads (%d accounts)", + registers1.Count(), + registers1.AccountCount(), + ) + + return + }) + + group.Go(func() (err error) { + log.Info().Msgf("Creating registers from v4 payloads (%d)", len(payloads2)) + + registers2, err = registers.NewByAccountFromPayloads(payloads2) + if err != nil { + return fmt.Errorf("failed to create registers from v4 payloads: %w", err) + } + + log.Info().Msgf( + "Created %d registers from payloads (%d accounts)", + registers2.Count(), + registers2.AccountCount(), + ) + + return + }) + + err := group.Wait() + if err != nil { + log.Fatal().Err(err).Msg("failed to create registers from payloads") + } + + log.Info().Msg("Finished creating registers from payloads") + + return +} + +func diff( + registersV3 *registers.ByAccount, + registersV4 *registers.ByAccount, + chainID flow.ChainID, + rw reporters.ReportWriter, + nWorkers int, +) error { + log.Info().Msgf("Diffing accounts: v3 count %d, v4 count %d", registersV3.AccountCount(), registersV4.AccountCount()) + + if registersV3.AccountCount() < nWorkers { + nWorkers = registersV3.AccountCount() + } + + logAccount := moduleUtil.LogProgress( + log.Logger, + moduleUtil.DefaultLogProgressConfig( + "processing account group", + registersV3.AccountCount(), + ), + ) + + if nWorkers <= 1 { + foundAccountCountInRegistersV4 := 0 + + _ = registersV3.ForEachAccount(func(accountRegistersV3 *registers.AccountRegisters) (err error) { + owner := accountRegistersV3.Owner() + + if !registersV4.HasAccountOwner(owner) { + rw.Write(accountMissing{ + Owner: owner, + State: int(newState), + }) + + return nil + } + + foundAccountCountInRegistersV4++ + + accountRegistersV4 := registersV4.AccountRegisters(owner) + + err = diffAccount( + owner, + accountRegistersV3, + accountRegistersV4, + chainID, + rw, + ) + if err != nil { + log.Warn().Err(err).Msgf("failed to diff account %x", []byte(owner)) + } + + logAccount(1) + + return nil + }) + + if foundAccountCountInRegistersV4 < registersV4.AccountCount() { + + log.Warn().Msgf("finding missing accounts that exist in v4, but are missing in v3, count: %v", registersV4.AccountCount()-foundAccountCountInRegistersV4) + + _ = registersV4.ForEachAccount(func(accountRegistersV4 *registers.AccountRegisters) error { + owner := accountRegistersV4.Owner() + if !registersV3.HasAccountOwner(owner) { + rw.Write(accountMissing{ + Owner: owner, + State: int(oldState), + }) + } + return nil + }) + } + + return nil + } + + type job struct { + owner string + accountRegistersV3 *registers.AccountRegisters + accountRegistersV4 *registers.AccountRegisters + } + + type result struct { + owner string + err error + } + + jobs := make(chan job, nWorkers) + + results := make(chan result, nWorkers) + + g, ctx := errgroup.WithContext(context.Background()) + + // Launch goroutines to diff accounts + for i := 0; i < nWorkers; i++ { + g.Go(func() (err error) { + for job := range jobs { + err := diffAccount( + job.owner, + job.accountRegistersV3, + job.accountRegistersV4, + chainID, + rw, + ) + + select { + case results <- result{owner: job.owner, err: err}: + case <-ctx.Done(): + return ctx.Err() + } + } + return nil + }) + } + + // Launch goroutine to wait for workers and close result channel + go func() { + _ = g.Wait() + close(results) + }() + + // Launch goroutine to send account registers to jobs channel + go func() { + defer close(jobs) + + foundAccountCountInRegistersV4 := 0 + + _ = registersV3.ForEachAccount(func(accountRegistersV3 *registers.AccountRegisters) (err error) { + owner := accountRegistersV3.Owner() + if !registersV4.HasAccountOwner(owner) { + rw.Write(accountMissing{ + Owner: owner, + State: int(newState), + }) + + return nil + } + + foundAccountCountInRegistersV4++ + + accountRegistersV4 := registersV4.AccountRegisters(owner) + + jobs <- job{ + owner: owner, + accountRegistersV3: accountRegistersV3, + accountRegistersV4: accountRegistersV4, + } + + return nil + }) + + if foundAccountCountInRegistersV4 < registersV4.AccountCount() { + + log.Warn().Msgf("finding missing accounts that exist in v4, but are missing in v3, count: %v", registersV4.AccountCount()-foundAccountCountInRegistersV4) + + _ = registersV4.ForEachAccount(func(accountRegistersV4 *registers.AccountRegisters) (err error) { + owner := accountRegistersV4.Owner() + if !registersV3.HasAccountOwner(owner) { + rw.Write(accountMissing{ + Owner: owner, + State: int(oldState), + }) + } + return nil + }) + } + }() + + // Gather results + for result := range results { + logAccount(1) + if result.err != nil { + log.Warn().Err(result.err).Msgf("failed to diff account %x", []byte(result.owner)) + } + } + + if err := g.Wait(); err != nil { + return err + } + + log.Info().Msgf("Finished diffing accounts") + + return nil +} + +func diffAccount( + owner string, + accountRegistersV3 *registers.AccountRegisters, + accountRegistersV4 *registers.AccountRegisters, + chainID flow.ChainID, + rw reporters.ReportWriter, +) (err error) { + address, err := common.BytesToAddress([]byte(owner)) + if err != nil { + return err + } + + migrations.NewAccountKeyDiffReporter( + address, + chainID, + rw, + ).DiffKeys( + accountRegistersV3, + accountRegistersV4, + ) + + return nil +} + +type accountMissing struct { + Owner string + State int +} + +var _ json.Marshaler = accountMissing{} + +func (e accountMissing) MarshalJSON() ([]byte, error) { + return json.Marshal(struct { + Kind string `json:"kind"` + Owner string `json:"owner"` + State int `json:"state"` + }{ + Kind: "account-missing", + Owner: hex.EncodeToString([]byte(e.Owner)), + State: e.State, + }) +} diff --git a/cmd/util/cmd/epochs/cmd/deploy.go b/cmd/util/cmd/epochs/cmd/deploy.go index 32e3da4acf3..1f85a836624 100644 --- a/cmd/util/cmd/epochs/cmd/deploy.go +++ b/cmd/util/cmd/epochs/cmd/deploy.go @@ -121,7 +121,10 @@ func deployRun(cmd *cobra.Command, args []string) { func getDeployEpochTransactionArguments(snapshot *inmem.Snapshot) []cadence.Value { // current epoch - currentEpoch := snapshot.Epochs().Current() + currentEpoch, err := snapshot.Epochs().Current() + if err != nil { + log.Fatal().Err(err).Msgf("could not get current epoch") + } head, err := snapshot.Head() if err != nil { @@ -131,25 +134,13 @@ func getDeployEpochTransactionArguments(snapshot *inmem.Snapshot) []cadence.Valu epochContractName := systemcontracts.ContractNameEpoch // current epoch counter - currentEpochCounter, err := currentEpoch.Counter() - if err != nil { - log.Fatal().Err(err).Msgf("could not get `currentEpochCounter` from snapshot") - } + currentEpochCounter := currentEpoch.Counter() // get final view from snapshot - finalView, err := currentEpoch.FinalView() - if err != nil { - log.Fatal().Err(err).Msgf("could not get `finalView` for current epoch from snapshot") - } + finalView := currentEpoch.FinalView() - dkgPhase1FinalView, err := currentEpoch.DKGPhase1FinalView() - if err != nil { - log.Fatal().Err(err).Msgf("could not get `dkgPhase1FinalView` from snapshot") - } - dkgPhase2FinalView, err := currentEpoch.DKGPhase2FinalView() - if err != nil { - log.Fatal().Err(err).Msgf("could not get `dkgPhase2FinalView` from snapshot") - } + dkgPhase1FinalView := currentEpoch.DKGPhase1FinalView() + dkgPhase2FinalView := currentEpoch.DKGPhase2FinalView() numViewsInEpoch := (finalView + 1) - head.View numViewsInDKGPhase := dkgPhase2FinalView - dkgPhase1FinalView + 1 @@ -163,10 +154,7 @@ func getDeployEpochTransactionArguments(snapshot *inmem.Snapshot) []cadence.Valu numCollectorClusters := len(clustering) // random source - randomSource, err := currentEpoch.RandomSource() - if err != nil { - log.Fatal().Err(err).Msgf("could not get `randomSource` for current epoch from snapshot") - } + randomSource := currentEpoch.RandomSource() return convertDeployEpochTransactionArguments( epochContractName, @@ -254,19 +242,17 @@ func getDeployEpochTransactionText(snapshot *inmem.Snapshot) []byte { // root chain id and system contractsRegister chainID := head.ChainID - systemContracts, err := systemcontracts.SystemContractsForChain(chainID) - if err != nil { - log.Fatal().Err(err).Str("chain_id", chainID.String()).Msgf("could not get system contracts for chainID") - } + systemContracts := systemcontracts.SystemContractsForChain(chainID) + + env := systemContracts.AsTemplateEnv() + env.FungibleTokenAddress = flagFungibleTokenAddress + env.FlowTokenAddress = flagFlowTokenAddress + env.IDTableAddress = flagIDTableAddress + env.FlowFeesAddress = flagFlowFeesAddress // epoch contract name and get code for contract epochContractCode := contracts.FlowEpoch( - flagFungibleTokenAddress, - flagFlowTokenAddress, - flagIDTableAddress, - systemContracts.ClusterQC.Address.Hex(), - systemContracts.DKG.Address.Hex(), - flagFlowFeesAddress, + env, ) // convert the epoch contract code to an [UInt8] literal string that can be diff --git a/cmd/util/cmd/epochs/cmd/move_machine_acct.go b/cmd/util/cmd/epochs/cmd/move_machine_acct.go index 7f51867693b..f3443d63c5c 100644 --- a/cmd/util/cmd/epochs/cmd/move_machine_acct.go +++ b/cmd/util/cmd/epochs/cmd/move_machine_acct.go @@ -72,7 +72,7 @@ func moveMachineAcctRun(cmd *cobra.Command, args []string) { } // identities with machine accounts - machineAcctIdentities := identities.Filter(filter.HasRole(flow.RoleCollection, flow.RoleConsensus)) + machineAcctIdentities := identities.Filter(filter.HasRole[flow.Identity](flow.RoleCollection, flow.RoleConsensus)) machineAcctFiles, err := os.ReadDir(flagMachineAccountsSrcDir) if err != nil { diff --git a/cmd/util/cmd/epochs/cmd/recover.go b/cmd/util/cmd/epochs/cmd/recover.go new file mode 100644 index 00000000000..fb7392b10e9 --- /dev/null +++ b/cmd/util/cmd/epochs/cmd/recover.go @@ -0,0 +1,196 @@ +package cmd + +import ( + "context" + "fmt" + "os" + + "github.com/spf13/cobra" + + "github.com/onflow/flow-go/cmd/bootstrap/run" + "github.com/onflow/flow-go/cmd/util/cmd/common" + epochcmdutil "github.com/onflow/flow-go/cmd/util/cmd/epochs/utils" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module/grpcclient" + "github.com/onflow/flow-go/state/protocol/inmem" +) + +// generateRecoverEpochTxArgsCmd represents a command to generate the data needed to submit an epoch-recovery transaction +// to the network when it is in EFM (epoch fallback mode). +// EFM can be exited only by a special service event, EpochRecover, which initially originates from a manual service account transaction. +// The full epoch data must be generated manually and submitted with this transaction in order for an +// EpochRecover event to be emitted. This command retrieves the current protocol state identities, computes the cluster assignment using those +// identities, generates the cluster QCs and retrieves the Random Beacon key vector of the last successful epoch. +// This recovery process has some constraints: +// - The RecoveryEpoch must have exactly the same consensus committee as participated in the most recent successful DKG. +// - The RecoveryEpoch must contain enough "internal" collection nodes so that all clusters contain a supermajority of "internal" collection nodes (same constraint as sporks) +var ( + generateRecoverEpochTxArgsCmd = &cobra.Command{ + Use: "efm-recover-tx-args", + Short: "Generates recover epoch transaction arguments", + Long: ` +Generates transaction arguments for the epoch recovery transaction. +The epoch recovery transaction is used to recover from any failure in the epoch transition process without requiring a spork. +This recovery process has some constraints: +- The RecoveryEpoch must have exactly the same consensus committee as participated in the most recent successful DKG. +- The RecoveryEpoch must contain enough "internal" collection nodes so that all clusters contain a supermajority of "internal" collection nodes (same constraint as sporks) +`, + Run: generateRecoverEpochTxArgs(getSnapshot), + } + + flagOut string + flagAnAddress string + flagAnPubkey string + flagAnInsecure bool + flagInternalNodePrivInfoDir string + flagNodeConfigJson string + flagCollectionClusters int + flagNumViewsInEpoch uint64 + flagNumViewsInStakingAuction uint64 + flagEpochCounter uint64 + flagRecoveryEpochTargetDuration uint64 + flagUnsafeAllowOverWrite bool + flagRootChainID string + flagExcludeNodeIDs []string + flagIncludeNodeIDs []string +) + +func init() { + rootCmd.AddCommand(generateRecoverEpochTxArgsCmd) + err := addGenerateRecoverEpochTxArgsCmdFlags() + if err != nil { + panic(err) + } +} + +func addGenerateRecoverEpochTxArgsCmdFlags() error { + generateRecoverEpochTxArgsCmd.Flags().StringVar(&flagOut, "out", "", "file to write tx args output") + generateRecoverEpochTxArgsCmd.Flags().StringVar(&flagAnAddress, "access-address", "", "the address of the access node used to retrieve the information") + generateRecoverEpochTxArgsCmd.Flags().StringVar(&flagRootChainID, "root-chain-id", "", "the root chain id") + generateRecoverEpochTxArgsCmd.Flags().StringVar(&flagAnPubkey, "access-network-key", "", "the network key of the access node used for client connections in hex string format") + generateRecoverEpochTxArgsCmd.Flags().BoolVar(&flagAnInsecure, "insecure", false, "set to true if the protocol snapshot should be retrieved from the insecure AN endpoint") + generateRecoverEpochTxArgsCmd.Flags().IntVar(&flagCollectionClusters, "collection-clusters", 0, + "number of collection clusters") + // Required parameters for network configuration and generation of root node identities. + // It is expected that these flags may point to directory which includes extra node identities + // which are not allowed to participate in the recovery epoch. These identities will be excluded by this tool. + generateRecoverEpochTxArgsCmd.Flags().StringVar(&flagNodeConfigJson, "config", "", + "path to a JSON file containing multiple node configurations (fields Role, Address, Weight).") + generateRecoverEpochTxArgsCmd.Flags().StringVar(&flagInternalNodePrivInfoDir, "internal-priv-dir", "", "path to directory containing the output from the `keygen` command for internal nodes."+ + " It is allowed for this directory to include extra internal nodes which are not current eligible epoch participants (they will be excluded from recovery epoch).") + generateRecoverEpochTxArgsCmd.Flags().Uint64Var(&flagNumViewsInEpoch, "epoch-length", 0, "length of each epoch measured in views") + generateRecoverEpochTxArgsCmd.Flags().Uint64Var(&flagNumViewsInStakingAuction, "epoch-staking-phase-length", 0, "length of the epoch staking phase measured in views") + generateRecoverEpochTxArgsCmd.Flags().Uint64Var(&flagRecoveryEpochTargetDuration, "epoch-timing-recovery-duration", 0, "the target duration of the recovery epoch, in seconds") + // The following option allows the RecoveryEpoch specified by this command to overwrite an epoch which already exists in the smart contract. + // This is needed only if a previous recoverEpoch transaction was submitted and a race condition occurred such that: + // - the RecoveryEpoch in the admin transaction was accepted by the smart contract + // - the RecoveryEpoch service event (after sealing latency) was rejected by the Protocol State + generateRecoverEpochTxArgsCmd.Flags().BoolVar(&flagUnsafeAllowOverWrite, "unsafe-overwrite-epoch-data", false, "set to true if the resulting transaction is allowed to overwrite an already specified epoch in the smart contract.") + generateRecoverEpochTxArgsCmd.Flags().Uint64Var(&flagEpochCounter, "recovery-epoch-counter", 0, "the epoch counter for the recovery epoch") + generateRecoverEpochTxArgsCmd.Flags().StringArrayVar(&flagExcludeNodeIDs, "exclude-node-ids", nil, "list of node IDs to exclude from the recovery epoch (if they exist)") + generateRecoverEpochTxArgsCmd.Flags().StringArrayVar(&flagIncludeNodeIDs, "include-node-ids", nil, "list of node IDs to include in the recovery epoch") + + err := generateRecoverEpochTxArgsCmd.MarkFlagRequired("access-address") + if err != nil { + return fmt.Errorf("failed to mark access-address flag as required") + } + err = generateRecoverEpochTxArgsCmd.MarkFlagRequired("epoch-length") + if err != nil { + return fmt.Errorf("failed to mark epoch-length flag as required") + } + err = generateRecoverEpochTxArgsCmd.MarkFlagRequired("epoch-staking-phase-length") + if err != nil { + return fmt.Errorf("failed to mark epoch-staking-phase-length flag as required") + } + err = generateRecoverEpochTxArgsCmd.MarkFlagRequired("collection-clusters") + if err != nil { + return fmt.Errorf("failed to mark collection-clusters flag as required") + } + err = generateRecoverEpochTxArgsCmd.MarkFlagRequired("epoch-timing-recovery-duration") + if err != nil { + return fmt.Errorf("failed to mark epoch-timing-recovery-duration flag as required") + } + err = generateRecoverEpochTxArgsCmd.MarkFlagRequired("root-chain-id") + if err != nil { + return fmt.Errorf("failed to mark root-chain-id flag as required") + } + err = generateRecoverEpochTxArgsCmd.MarkFlagRequired("recovery-epoch-counter") + if err != nil { + return fmt.Errorf("failed to mark recovery-epoch-counter flag as required") + } + return nil +} + +func getSnapshot() *inmem.Snapshot { + // get flow client with secure client connection to download protocol snapshot from access node + config, err := grpcclient.NewFlowClientConfig(flagAnAddress, flagAnPubkey, flow.ZeroID, flagAnInsecure) + if err != nil { + log.Fatal().Err(err).Msg("failed to create flow client config") + } + + flowClient, err := grpcclient.FlowClient(config) + if err != nil { + log.Fatal().Err(err).Msg("failed to create flow client") + } + + snapshot, err := common.GetSnapshot(context.Background(), flowClient) + if err != nil { + log.Fatal().Err(err).Msg("failed") + } + + return snapshot +} + +// generateRecoverEpochTxArgs generates recover epoch transaction arguments from a root protocol state snapshot and writes it to a JSON file +func generateRecoverEpochTxArgs(getSnapshot func() *inmem.Snapshot) func(cmd *cobra.Command, args []string) { + return func(cmd *cobra.Command, args []string) { + excludeNodeIDs, err := flow.IdentifierListFromHex(flagExcludeNodeIDs) + if err != nil { + log.Fatal().Err(err).Msg("failed to parse to parse node IDs provided via 'exclude-node-ids'") + } + includeNodeIDs, err := flow.IdentifierListFromHex(flagIncludeNodeIDs) + if err != nil { + log.Fatal().Err(err).Msg("failed to parse node IDs for additional inclusion provided via 'include-node-ids'") + } + + // generate transaction arguments + txArgs, err := run.GenerateRecoverEpochTxArgs( + log, + flagInternalNodePrivInfoDir, + flagNodeConfigJson, + flagCollectionClusters, + flagEpochCounter, + flow.ChainID(flagRootChainID), + flagNumViewsInStakingAuction, + flagNumViewsInEpoch, + flagRecoveryEpochTargetDuration, + flagUnsafeAllowOverWrite, + excludeNodeIDs, + includeNodeIDs, + getSnapshot(), + ) + if err != nil { + log.Fatal().Err(err).Msg("failed to generate recover epoch transaction arguments") + } + // encode to JSON + encodedTxArgs, err := epochcmdutil.EncodeArgs(txArgs) + if err != nil { + log.Fatal().Err(err).Msg("could not encode recover epoch transaction arguments") + } + + if flagOut == "" { + // write JSON args to stdout + _, err = cmd.OutOrStdout().Write(encodedTxArgs) + if err != nil { + log.Fatal().Err(err).Msg("could not write jsoncdc encoded arguments") + } + } else { + // write JSON args to file specified by flag + err := os.WriteFile(flagOut, encodedTxArgs, 0644) + if err != nil { + log.Fatal().Err(err).Msg(fmt.Sprintf("could not write jsoncdc encoded arguments to file %s", flagOut)) + } + log.Info().Msgf("wrote transaction args to output file %s", flagOut) + } + } +} diff --git a/cmd/util/cmd/epochs/cmd/recover_test.go b/cmd/util/cmd/epochs/cmd/recover_test.go new file mode 100644 index 00000000000..8cc552a81e3 --- /dev/null +++ b/cmd/util/cmd/epochs/cmd/recover_test.go @@ -0,0 +1,132 @@ +package cmd + +import ( + "bytes" + "encoding/hex" + "encoding/json" + "testing" + + "github.com/stretchr/testify/require" + + "github.com/onflow/cadence" + + "github.com/onflow/flow-go/cmd/bootstrap/utils" + "github.com/onflow/flow-go/cmd/util/cmd/common" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/state/protocol/inmem" + "github.com/onflow/flow-go/utils/unittest" +) + +// TestRecoverEpochHappyPath ensures recover epoch transaction arguments are generated as expected. +func TestRecoverEpochHappyPath(t *testing.T) { + // tests that given the root snapshot, the command + // writes the expected arguments to stdout. + utils.RunWithSporkBootstrapDir(t, func(bootDir, partnerDir, partnerWeights, internalPrivDir, configPath string) { + internalNodes, err := common.ReadFullInternalNodeInfos(log, internalPrivDir, configPath) + require.NoError(t, err) + partnerNodes, err := common.ReadFullPartnerNodeInfos(log, partnerWeights, partnerDir) + require.NoError(t, err) + + allNodeIds := make(flow.IdentityList, 0) + allNodeIdsCdc := make(map[cadence.String]*flow.Identity) + for _, node := range append(internalNodes, partnerNodes...) { + allNodeIds = append(allNodeIds, node.Identity()) + allNodeIdsCdc[cadence.String(node.Identity().NodeID.String())] = node.Identity() + } + + // create a root snapshot + rootSnapshot := unittest.RootSnapshotFixture(allNodeIds) + snapshotFn := func() *inmem.Snapshot { return rootSnapshot } + + // get expected dkg information + currentEpoch, err := rootSnapshot.Epochs().Current() + require.NoError(t, err) + currentEpochDKG, err := currentEpoch.DKG() + require.NoError(t, err) + expectedDKGPubKeys := make(map[cadence.String]struct{}) + expectedDKGGroupKey := cadence.String(hex.EncodeToString(currentEpochDKG.GroupKey().Encode())) + for _, id := range allNodeIds { + if id.GetRole() == flow.RoleConsensus { + dkgPubKey, keyShareErr := currentEpochDKG.KeyShare(id.GetNodeID()) + require.NoError(t, keyShareErr) + expectedDKGPubKeys[cadence.String(hex.EncodeToString(dkgPubKey.Encode()))] = struct{}{} + } + } + + // run command with overwritten stdout + stdout := bytes.NewBuffer(nil) + generateRecoverEpochTxArgsCmd.SetOut(stdout) + + flagInternalNodePrivInfoDir = internalPrivDir + flagNodeConfigJson = configPath + flagCollectionClusters = 2 + flagEpochCounter = 2 + flagRootChainID = flow.Localnet.String() + flagNumViewsInStakingAuction = 100 + flagNumViewsInEpoch = 4000 + + generateRecoverEpochTxArgs(snapshotFn)(generateRecoverEpochTxArgsCmd, nil) + + // read output from stdout + var outputTxArgs []interface{} + err = json.NewDecoder(stdout).Decode(&outputTxArgs) + require.NoError(t, err) + + // verify each argument + decodedValues := unittest.InterfaceToCdcValues(t, outputTxArgs) + currEpoch, err := rootSnapshot.Epochs().Current() + require.NoError(t, err) + finalView := currEpoch.FinalView() + currEpochTargetEndTime := currEpoch.TargetEndTime() + + // epoch counter + require.Equal(t, cadence.NewUInt64(flagEpochCounter), decodedValues[0]) + // epoch start view + require.Equal(t, cadence.NewUInt64(finalView+1), decodedValues[1]) + // staking phase end view + require.Equal(t, cadence.NewUInt64(finalView+flagNumViewsInStakingAuction), decodedValues[2]) + // epoch end view + require.Equal(t, cadence.NewUInt64(finalView+flagNumViewsInEpoch), decodedValues[3]) + // target duration + require.Equal(t, cadence.NewUInt64(flagRecoveryEpochTargetDuration), decodedValues[4]) + // target end time + require.Equal(t, cadence.NewUInt64(currEpochTargetEndTime+flagRecoveryEpochTargetDuration), decodedValues[5]) + // clusters: we cannot guarantee order of the cluster when we generate the test fixtures + // so, we ensure each cluster member is part of the full set of node ids + for _, cluster := range decodedValues[6].(cadence.Array).Values { + for _, nodeId := range cluster.(cadence.Array).Values { + _, ok := allNodeIdsCdc[nodeId.(cadence.String)] + require.True(t, ok) + } + } + // qcVoteData: we cannot guarantee order of the cluster when we generate the test fixtures + // so, we ensure each voter id that participated in a qc vote exists and is a collection node + for _, voteData := range decodedValues[7].(cadence.Array).Values { + fields := cadence.FieldsMappedByName(voteData.(cadence.Struct)) + for _, voterId := range fields["voterIDs"].(cadence.Array).Values { + id, ok := allNodeIdsCdc[voterId.(cadence.String)] + require.True(t, ok) + require.Equal(t, flow.RoleCollection, id.Role) + } + } + // dkg pub keys + for _, dkgPubKey := range decodedValues[8].(cadence.Array).Values { + _, ok := expectedDKGPubKeys[dkgPubKey.(cadence.String)] + require.True(t, ok) + } + // dkg group key + require.Equal(t, expectedDKGGroupKey, decodedValues[9].(cadence.String)) + // dkg index map + for _, pair := range decodedValues[10].(cadence.Dictionary).Pairs { + _, ok := allNodeIdsCdc[pair.Key.(cadence.String)] + require.True(t, ok) + } + // node ids + for _, nodeId := range decodedValues[11].(cadence.Array).Values { + _, ok := allNodeIdsCdc[nodeId.(cadence.String)] + require.True(t, ok) + } + // unsafeAllowOverWrite + require.Equal(t, cadence.NewBool(false), decodedValues[12]) + }) +} diff --git a/cmd/util/cmd/epochs/cmd/reset.go b/cmd/util/cmd/epochs/cmd/reset.go index 2a1469dab35..1ca172d625f 100644 --- a/cmd/util/cmd/epochs/cmd/reset.go +++ b/cmd/util/cmd/epochs/cmd/reset.go @@ -94,29 +94,21 @@ func resetRun(cmd *cobra.Command, args []string) { // extractResetEpochArgs extracts the required transaction arguments for the `resetEpoch` transaction func extractResetEpochArgs(snapshot *inmem.Snapshot) []cadence.Value { - // get current epoch - epoch := snapshot.Epochs().Current() + epoch, err := snapshot.Epochs().Current() + if err != nil { + log.Fatal().Err(err).Msg("could not get current epoch") + } // Note: The epochCounter value expected by the smart contract is the epoch being // replaced, which is one less than the epoch beginning after the spork. - epochCounter, err := epoch.Counter() - if err != nil { - log.Fatal().Err(err).Msg("could not get epoch counter") - } - epochCounter = epochCounter - 1 + epochCounter := epoch.Counter() - 1 // read random source from epoch - randomSource, err := epoch.RandomSource() - if err != nil { - log.Fatal().Err(err).Msg("could not get random source from epoch") - } + randomSource := epoch.RandomSource() // read first view - firstView, err := epoch.FirstView() - if err != nil { - log.Fatal().Err(err).Msg("could not get first view from epoch") - } + firstView := epoch.FirstView() // determine staking auction end view based on dkg timing stakingEndView, err := getStakingAuctionEndView(epoch) @@ -125,10 +117,7 @@ func extractResetEpochArgs(snapshot *inmem.Snapshot) []cadence.Value { } // read final view - finalView, err := epoch.FinalView() - if err != nil { - log.Fatal().Err(err).Msg("could not get final view from epoch") - } + finalView := epoch.FinalView() return convertResetEpochArgs(epochCounter, randomSource, firstView, stakingEndView, finalView) } @@ -142,15 +131,9 @@ func extractResetEpochArgs(snapshot *inmem.Snapshot) []cadence.Value { // ^ ^ ^-dkgPhase2FinalView // | `-dkgPhase1FinalView // `-stakingEndView -func getStakingAuctionEndView(epoch protocol.Epoch) (uint64, error) { - dkgPhase1FinalView, err := epoch.DKGPhase1FinalView() - if err != nil { - return 0, err - } - dkgPhase2FinalView, err := epoch.DKGPhase2FinalView() - if err != nil { - return 0, err - } +func getStakingAuctionEndView(epoch protocol.CommittedEpoch) (uint64, error) { + dkgPhase1FinalView := epoch.DKGPhase1FinalView() + dkgPhase2FinalView := epoch.DKGPhase2FinalView() // sanity check if dkgPhase1FinalView >= dkgPhase2FinalView { diff --git a/cmd/util/cmd/epochs/cmd/reset_test.go b/cmd/util/cmd/epochs/cmd/reset_test.go index 25983e5cf61..30e7d0178f2 100644 --- a/cmd/util/cmd/epochs/cmd/reset_test.go +++ b/cmd/util/cmd/epochs/cmd/reset_test.go @@ -11,9 +11,6 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "github.com/onflow/cadence" - jsoncdc "github.com/onflow/cadence/encoding/json" - "github.com/onflow/flow-go/model/bootstrap" "github.com/onflow/flow-go/state/protocol/inmem" "github.com/onflow/flow-go/utils/unittest" @@ -50,7 +47,7 @@ func TestReset_LocalSnapshot(t *testing.T) { // compare to expected values expectedArgs := extractResetEpochArgs(rootSnapshot) - verifyArguments(t, expectedArgs, outputTxArgs) + unittest.VerifyCdcArguments(t, expectedArgs, outputTxArgs) }) }) @@ -98,7 +95,7 @@ func TestReset_BucketSnapshot(t *testing.T) { rootSnapshot, err := getSnapshotFromBucket(fmt.Sprintf(rootSnapshotBucketURL, flagBucketNetworkName)) require.NoError(t, err) expectedArgs := extractResetEpochArgs(rootSnapshot) - verifyArguments(t, expectedArgs, outputTxArgs) + unittest.VerifyCdcArguments(t, expectedArgs, outputTxArgs) }) // should output arguments to stdout, including specified payout @@ -120,7 +117,7 @@ func TestReset_BucketSnapshot(t *testing.T) { rootSnapshot, err := getSnapshotFromBucket(fmt.Sprintf(rootSnapshotBucketURL, flagBucketNetworkName)) require.NoError(t, err) expectedArgs := extractResetEpochArgs(rootSnapshot) - verifyArguments(t, expectedArgs, outputTxArgs) + unittest.VerifyCdcArguments(t, expectedArgs, outputTxArgs) }) // with a missing snapshot, should log an error @@ -139,22 +136,6 @@ func TestReset_BucketSnapshot(t *testing.T) { }) } -func verifyArguments(t *testing.T, expected []cadence.Value, actual []interface{}) { - - for index, arg := range actual { - - // marshal to bytes - bz, err := json.Marshal(arg) - require.NoError(t, err) - - // parse cadence value - decoded, err := jsoncdc.Decode(nil, bz) - require.NoError(t, err) - - assert.Equal(t, expected[index], decoded) - } -} - func writeRootSnapshot(bootDir string, snapshot *inmem.Snapshot) error { rootSnapshotPath := filepath.Join(bootDir, bootstrap.PathRootProtocolStateSnapshot) return writeJSON(rootSnapshotPath, snapshot.Encodable()) diff --git a/cmd/util/cmd/epochs/cmd/templates.go b/cmd/util/cmd/epochs/cmd/templates.go index 7b05a95bb5f..85ab8baa212 100644 --- a/cmd/util/cmd/epochs/cmd/templates.go +++ b/cmd/util/cmd/epochs/cmd/templates.go @@ -21,7 +21,7 @@ transaction(name: String, FLOWsupplyIncreasePercentage: UFix64, randomSource: String) { - prepare(signer: AuthAccount) { + prepare(signer: auth(AddContract) &Account) { let currentBlock = getCurrentBlock() diff --git a/cmd/util/cmd/exec-data-json-export/block_exporter.go b/cmd/util/cmd/exec-data-json-export/block_exporter.go index 2e178d08af6..32efa2e78a5 100644 --- a/cmd/util/cmd/exec-data-json-export/block_exporter.go +++ b/cmd/util/cmd/exec-data-json-export/block_exporter.go @@ -12,7 +12,8 @@ import ( "github.com/onflow/flow-go/cmd/util/cmd/common" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/module/metrics" - "github.com/onflow/flow-go/storage/badger" + "github.com/onflow/flow-go/storage" + "github.com/onflow/flow-go/storage/store" ) type blockSummary struct { @@ -32,91 +33,97 @@ type blockSummary struct { // ExportBlocks exports blocks (note this only export blocks of the main chain and doesn't export forks) func ExportBlocks(blockID flow.Identifier, dbPath string, outputPath string) (flow.StateCommitment, error) { + var stateCommitment flow.StateCommitment // traverse backward from the given block (parent block) and fetch by blockHash - db := common.InitStorage(dbPath) - defer db.Close() - - cacheMetrics := &metrics.NoopCollector{} - headers := badger.NewHeaders(cacheMetrics, db) - index := badger.NewIndex(cacheMetrics, db) - guarantees := badger.NewGuarantees(cacheMetrics, db, badger.DefaultCacheSize) - seals := badger.NewSeals(cacheMetrics, db) - results := badger.NewExecutionResults(cacheMetrics, db) - receipts := badger.NewExecutionReceipts(cacheMetrics, db, results, badger.DefaultCacheSize) - payloads := badger.NewPayloads(db, index, guarantees, seals, receipts, results) - blocks := badger.NewBlocks(db, headers, payloads) - commits := badger.NewCommits(&metrics.NoopCollector{}, db) - - activeBlockID := blockID - outputFile := filepath.Join(outputPath, "blocks.jsonl") - - fi, err := os.Create(outputFile) - if err != nil { - return flow.DummyStateCommitment, fmt.Errorf("could not create block output file %w", err) - } - defer fi.Close() - - blockWriter := bufio.NewWriter(fi) - defer blockWriter.Flush() - - for { - header, err := headers.ByBlockID(activeBlockID) + err := common.WithStorage(dbPath, func(db storage.DB) error { + + cacheMetrics := &metrics.NoopCollector{} + headers := store.NewHeaders(cacheMetrics, db) + index := store.NewIndex(cacheMetrics, db) + guarantees := store.NewGuarantees(cacheMetrics, db, store.DefaultCacheSize, store.DefaultCacheSize) + seals := store.NewSeals(cacheMetrics, db) + results := store.NewExecutionResults(cacheMetrics, db) + receipts := store.NewExecutionReceipts(cacheMetrics, db, results, store.DefaultCacheSize) + payloads := store.NewPayloads(db, index, guarantees, seals, receipts, results) + blocks := store.NewBlocks(db, headers, payloads) + commits := store.NewCommits(&metrics.NoopCollector{}, db) + + activeBlockID := blockID + outputFile := filepath.Join(outputPath, "blocks.jsonl") + + fi, err := os.Create(outputFile) if err != nil { - // no more header is available - break + return fmt.Errorf("could not create block output file %w", err) } - - block, err := blocks.ByID(activeBlockID) - if err != nil { - // log.Fatal().Err(err).Msg("could not load block") - break - } - - cols := make([]string, 0) - for _, g := range block.Payload.Guarantees { - cols = append(cols, hex.EncodeToString(g.CollectionID[:])) + defer fi.Close() + + blockWriter := bufio.NewWriter(fi) + defer blockWriter.Flush() + + for { + header, err := headers.ByBlockID(activeBlockID) + if err != nil { + // no more header is available + break + } + + block, err := blocks.ByID(activeBlockID) + if err != nil { + // log.Fatal().Err(err).Msg("could not load block") + break + } + + cols := make([]string, 0) + for _, g := range block.Payload.Guarantees { + cols = append(cols, hex.EncodeToString(g.CollectionID[:])) + } + + seals := make([]string, 0) + sealsResults := make([]string, 0) + sealsStates := make([]string, 0) + for _, s := range block.Payload.Seals { + seals = append(seals, hex.EncodeToString(s.BlockID[:])) + sealsResults = append(sealsResults, hex.EncodeToString(s.ResultID[:])) + sealsStates = append(sealsStates, hex.EncodeToString(s.FinalState[:])) + } + + b := blockSummary{ + BlockID: hex.EncodeToString(activeBlockID[:]), + BlockHeight: header.Height, + ParentBlockID: hex.EncodeToString(header.ParentID[:]), + ParentVoterIndices: hex.EncodeToString(header.ParentVoterIndices), + ParentVoterSigData: hex.EncodeToString(header.ParentVoterSigData), + ProposerID: hex.EncodeToString(header.ProposerID[:]), + Timestamp: time.UnixMilli(int64(header.Timestamp)).UTC(), + CollectionIDs: cols, + SealedBlocks: seals, + SealedResults: sealsResults, + SealedFinalStates: sealsStates, + } + + jsonData, err := json.Marshal(b) + if err != nil { + return fmt.Errorf("could not create a json obj for a block: %w", err) + } + _, err = blockWriter.WriteString(string(jsonData) + "\n") + if err != nil { + return fmt.Errorf("could not write block json to the file: %w", err) + } + blockWriter.Flush() + + activeBlockID = header.ParentID } - seals := make([]string, 0) - sealsResults := make([]string, 0) - sealsStates := make([]string, 0) - for _, s := range block.Payload.Seals { - seals = append(seals, hex.EncodeToString(s.BlockID[:])) - sealsResults = append(sealsResults, hex.EncodeToString(s.ResultID[:])) - sealsStates = append(sealsStates, hex.EncodeToString(s.FinalState[:])) - } - - b := blockSummary{ - BlockID: hex.EncodeToString(activeBlockID[:]), - BlockHeight: header.Height, - ParentBlockID: hex.EncodeToString(header.ParentID[:]), - ParentVoterIndices: hex.EncodeToString(header.ParentVoterIndices), - ParentVoterSigData: hex.EncodeToString(header.ParentVoterSigData), - ProposerID: hex.EncodeToString(header.ProposerID[:]), - Timestamp: header.Timestamp, - CollectionIDs: cols, - SealedBlocks: seals, - SealedResults: sealsResults, - SealedFinalStates: sealsStates, - } - - jsonData, err := json.Marshal(b) - if err != nil { - return flow.DummyStateCommitment, fmt.Errorf("could not create a json obj for a block: %w", err) - } - _, err = blockWriter.WriteString(string(jsonData) + "\n") + state, err := commits.ByBlockID(blockID) if err != nil { - return flow.DummyStateCommitment, fmt.Errorf("could not write block json to the file: %w", err) + return fmt.Errorf("could not find state commitment for this block: %w", err) } - blockWriter.Flush() - - activeBlockID = header.ParentID - } - - state, err := commits.ByBlockID(blockID) + stateCommitment = state + return nil + }) if err != nil { - return flow.DummyStateCommitment, fmt.Errorf("could not find state commitment for this block: %w", err) + return flow.DummyStateCommitment, err } - return state, nil + return stateCommitment, nil } diff --git a/cmd/util/cmd/exec-data-json-export/cmd.go b/cmd/util/cmd/exec-data-json-export/cmd.go index 5832b380d07..32de9c4bf89 100644 --- a/cmd/util/cmd/exec-data-json-export/cmd.go +++ b/cmd/util/cmd/exec-data-json-export/cmd.go @@ -6,6 +6,7 @@ import ( "github.com/rs/zerolog/log" "github.com/spf13/cobra" + "github.com/onflow/flow-go/cmd/util/cmd/common" "github.com/onflow/flow-go/model/flow" ) @@ -36,9 +37,7 @@ func init() { "Block hash (hex-encoded, 64 characters)") _ = Cmd.MarkFlagRequired("block-hash") - Cmd.Flags().StringVar(&flagDatadir, "datadir", "", - "directory that stores the protocol state") - _ = Cmd.MarkFlagRequired("datadir") + common.InitDataDirFlag(Cmd, &flagDatadir) Cmd.Flags().StringVar(&flagStateCommitment, "state-commitment", "", "state commitment (hex-encoded, 64 characters)") diff --git a/cmd/util/cmd/exec-data-json-export/delta_snapshot_exporter.go b/cmd/util/cmd/exec-data-json-export/delta_snapshot_exporter.go index 68fbc9f4070..3e3d6971a51 100644 --- a/cmd/util/cmd/exec-data-json-export/delta_snapshot_exporter.go +++ b/cmd/util/cmd/exec-data-json-export/delta_snapshot_exporter.go @@ -11,8 +11,9 @@ import ( "github.com/onflow/flow-go/fvm/storage/snapshot" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/module/metrics" - "github.com/onflow/flow-go/storage/badger" - "github.com/onflow/flow-go/storage/badger/operation" + "github.com/onflow/flow-go/storage" + "github.com/onflow/flow-go/storage/operation" + "github.com/onflow/flow-go/storage/store" ) type dSnapshot struct { @@ -24,71 +25,72 @@ type dSnapshot struct { func ExportDeltaSnapshots(blockID flow.Identifier, dbPath string, outputPath string) error { // traverse backward from the given block (parent block) and fetch by blockHash - db := common.InitStorage(dbPath) - defer db.Close() + return common.WithStorage(dbPath, func(db storage.DB) error { - cacheMetrics := &metrics.NoopCollector{} - headers := badger.NewHeaders(cacheMetrics, db) + cacheMetrics := &metrics.NoopCollector{} + headers := store.NewHeaders(cacheMetrics, db) - activeBlockID := blockID - outputFile := filepath.Join(outputPath, "delta.jsonl") + activeBlockID := blockID + outputFile := filepath.Join(outputPath, "delta.jsonl") - fi, err := os.Create(outputFile) - if err != nil { - return fmt.Errorf("could not create delta snapshot output file %w", err) - } - defer fi.Close() - - writer := bufio.NewWriter(fi) - defer writer.Flush() - - for { - header, err := headers.ByBlockID(activeBlockID) + fi, err := os.Create(outputFile) if err != nil { - // no more header is available - return nil + return fmt.Errorf("could not create delta snapshot output file %w", err) } + defer fi.Close() - var snap []*snapshot.ExecutionSnapshot - err = db.View(operation.RetrieveExecutionStateInteractions(activeBlockID, &snap)) - if err != nil { - return fmt.Errorf("could not load delta snapshot: %w", err) - } + writer := bufio.NewWriter(fi) + defer writer.Flush() - if len(snap) < 1 { - // end of snapshots - return nil - } - m, err := json.Marshal(snap[0].UpdatedRegisters()) - if err != nil { - return fmt.Errorf("could not load delta snapshot: %w", err) - } + for { + header, err := headers.ByBlockID(activeBlockID) + if err != nil { + // no more header is available + return nil + } - reads := make([]string, 0) - for _, r := range snap[0].ReadSet { + var snap []*snapshot.ExecutionSnapshot + err = operation.RetrieveExecutionStateInteractions(db.Reader(), activeBlockID, &snap) + if err != nil { + return fmt.Errorf("could not load delta snapshot: %w", err) + } - json, err := json.Marshal(r) + if len(snap) < 1 { + // end of snapshots + return nil + } + m, err := json.Marshal(snap[0].UpdatedRegisters()) if err != nil { - return fmt.Errorf("could not create a json obj for a read registerID: %w", err) + return fmt.Errorf("could not load delta snapshot: %w", err) } - reads = append(reads, string(json)) - } - data := dSnapshot{ - DeltaJSONStr: string(m), - Reads: reads, - } + reads := make([]string, 0) + for _, r := range snap[0].ReadSet { - jsonData, err := json.Marshal(data) - if err != nil { - return fmt.Errorf("could not create a json obj for a delta snapshot: %w", err) - } - _, err = writer.WriteString(string(jsonData) + "\n") - if err != nil { - return fmt.Errorf("could not write delta snapshot json to the file: %w", err) + json, err := json.Marshal(r) + if err != nil { + return fmt.Errorf("could not create a json obj for a read registerID: %w", err) + } + reads = append(reads, string(json)) + } + + data := dSnapshot{ + DeltaJSONStr: string(m), + Reads: reads, + } + + jsonData, err := json.Marshal(data) + if err != nil { + return fmt.Errorf("could not create a json obj for a delta snapshot: %w", err) + } + _, err = writer.WriteString(string(jsonData) + "\n") + if err != nil { + return fmt.Errorf("could not write delta snapshot json to the file: %w", err) + } + writer.Flush() + + activeBlockID = header.ParentID } - writer.Flush() - activeBlockID = header.ParentID - } + }) } diff --git a/cmd/util/cmd/exec-data-json-export/event_exporter.go b/cmd/util/cmd/exec-data-json-export/event_exporter.go index a47c8d4a01d..600f4d45af3 100644 --- a/cmd/util/cmd/exec-data-json-export/event_exporter.go +++ b/cmd/util/cmd/exec-data-json-export/event_exporter.go @@ -11,7 +11,8 @@ import ( "github.com/onflow/flow-go/cmd/util/cmd/common" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/module/metrics" - "github.com/onflow/flow-go/storage/badger" + "github.com/onflow/flow-go/storage" + "github.com/onflow/flow-go/storage/store" ) type event struct { @@ -28,56 +29,57 @@ type event struct { func ExportEvents(blockID flow.Identifier, dbPath string, outputPath string) error { // traverse backward from the given block (parent block) and fetch by blockHash - db := common.InitStorage(dbPath) - defer db.Close() + return common.WithStorage(dbPath, func(db storage.DB) error { - cacheMetrics := &metrics.NoopCollector{} - headers := badger.NewHeaders(cacheMetrics, db) - events := badger.NewEvents(cacheMetrics, db) - activeBlockID := blockID + cacheMetrics := &metrics.NoopCollector{} + headers := store.NewHeaders(cacheMetrics, db) + events := store.NewEvents(cacheMetrics, db) + activeBlockID := blockID - outputFile := filepath.Join(outputPath, "events.jsonl") - fi, err := os.Create(outputFile) - if err != nil { - return fmt.Errorf("could not create event output file %w", err) - } - defer fi.Close() - - eventWriter := bufio.NewWriter(fi) - defer eventWriter.Flush() - - for { - header, err := headers.ByBlockID(activeBlockID) + outputFile := filepath.Join(outputPath, "events.jsonl") + fi, err := os.Create(outputFile) if err != nil { - // no more header is available - return nil + return fmt.Errorf("could not create event output file %w", err) } + defer fi.Close() - evs, err := events.ByBlockID(activeBlockID) - if err != nil { - return fmt.Errorf("could not fetch events %w", err) - } + eventWriter := bufio.NewWriter(fi) + defer eventWriter.Flush() - for _, ev := range evs { - e := event{ - TxID: hex.EncodeToString(ev.TransactionID[:]), - TxIndex: ev.TransactionIndex, - EventIndex: ev.EventIndex, - EventType: string(ev.Type), - PayloadHex: hex.EncodeToString(ev.Payload), - BlockID: hex.EncodeToString(activeBlockID[:]), - BlockHeight: header.Height, - } - jsonData, err := json.Marshal(e) + for { + header, err := headers.ByBlockID(activeBlockID) if err != nil { - return fmt.Errorf("could not create a json obj for an event: %w", err) + // no more header is available + return nil } - _, err = eventWriter.WriteString(string(jsonData) + "\n") + + evs, err := events.ByBlockID(activeBlockID) if err != nil { - return fmt.Errorf("could not write event json to the file: %w", err) + return fmt.Errorf("could not fetch events %w", err) + } + + for _, ev := range evs { + e := event{ + TxID: hex.EncodeToString(ev.TransactionID[:]), + TxIndex: ev.TransactionIndex, + EventIndex: ev.EventIndex, + EventType: string(ev.Type), + PayloadHex: hex.EncodeToString(ev.Payload), + BlockID: hex.EncodeToString(activeBlockID[:]), + BlockHeight: header.Height, + } + jsonData, err := json.Marshal(e) + if err != nil { + return fmt.Errorf("could not create a json obj for an event: %w", err) + } + _, err = eventWriter.WriteString(string(jsonData) + "\n") + if err != nil { + return fmt.Errorf("could not write event json to the file: %w", err) + } + eventWriter.Flush() } - eventWriter.Flush() + activeBlockID = header.ParentID } - activeBlockID = header.ParentID - } + + }) } diff --git a/cmd/util/cmd/exec-data-json-export/ledger_exporter.go b/cmd/util/cmd/exec-data-json-export/ledger_exporter.go index ee8573d8963..a9d75734d9b 100644 --- a/cmd/util/cmd/exec-data-json-export/ledger_exporter.go +++ b/cmd/util/cmd/exec-data-json-export/ledger_exporter.go @@ -35,7 +35,7 @@ func ExportLedger(ledgerPath string, targetstate string, outputPath string) erro return fmt.Errorf("cannot create ledger from write-a-head logs and checkpoints: %w", err) } - compactor, err := complete.NewCompactor(led, diskWal, zerolog.Nop(), complete.DefaultCacheSize, checkpointDistance, checkpointsToKeep, atomic.NewBool(false)) + compactor, err := complete.NewCompactor(led, diskWal, zerolog.Nop(), complete.DefaultCacheSize, checkpointDistance, checkpointsToKeep, atomic.NewBool(false), &metrics.NoopCollector{}) if err != nil { return fmt.Errorf("cannot create compactor: %w", err) } diff --git a/cmd/util/cmd/exec-data-json-export/result_exporter.go b/cmd/util/cmd/exec-data-json-export/result_exporter.go index df187a9aa87..6afefd7ee9a 100644 --- a/cmd/util/cmd/exec-data-json-export/result_exporter.go +++ b/cmd/util/cmd/exec-data-json-export/result_exporter.go @@ -11,7 +11,8 @@ import ( "github.com/onflow/flow-go/cmd/util/cmd/common" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/module/metrics" - "github.com/onflow/flow-go/storage/badger" + "github.com/onflow/flow-go/storage" + "github.com/onflow/flow-go/storage/store" ) type result struct { @@ -26,65 +27,66 @@ type result struct { func ExportResults(blockID flow.Identifier, dbPath string, outputPath string) error { // traverse backward from the given block (parent block) and fetch by blockHash - db := common.InitStorage(dbPath) - defer db.Close() + return common.WithStorage(dbPath, func(db storage.DB) error { - cacheMetrics := &metrics.NoopCollector{} - headers := badger.NewHeaders(cacheMetrics, db) - results := badger.NewExecutionResults(cacheMetrics, db) - activeBlockID := blockID + cacheMetrics := &metrics.NoopCollector{} + headers := store.NewHeaders(cacheMetrics, db) + results := store.NewExecutionResults(cacheMetrics, db) + activeBlockID := blockID - outputFile := filepath.Join(outputPath, "results.jsonl") - fi, err := os.Create(outputFile) - if err != nil { - return fmt.Errorf("could not create exec results output file %w", err) - } - defer fi.Close() - - resultWriter := bufio.NewWriter(fi) - defer resultWriter.Flush() - - for { - header, err := headers.ByBlockID(activeBlockID) + outputFile := filepath.Join(outputPath, "results.jsonl") + fi, err := os.Create(outputFile) if err != nil { - // no more header is available - return nil + return fmt.Errorf("could not create exec results output file %w", err) } + defer fi.Close() - res, err := results.ByBlockID(activeBlockID) - if err != nil { - return fmt.Errorf("could not fetch events %w", err) - } + resultWriter := bufio.NewWriter(fi) + defer resultWriter.Flush() - chunks := make([]string, 0) - for _, c := range res.Chunks { - cid := c.ID() - chunks = append(chunks, hex.EncodeToString(cid[:])) - } + for { + header, err := headers.ByBlockID(activeBlockID) + if err != nil { + // no more header is available + return nil + } - resID := res.ID() - finalState, err := res.FinalStateCommitment() - if err != nil { - return fmt.Errorf("export result error: %w", err) - } - e := result{ - ResultID: hex.EncodeToString(resID[:]), - PreviousResultID: hex.EncodeToString(res.PreviousResultID[:]), - FinalStateCommit: hex.EncodeToString(finalState[:]), - BlockID: hex.EncodeToString(activeBlockID[:]), - Chunks: chunks, - } + res, err := results.ByBlockID(activeBlockID) + if err != nil { + return fmt.Errorf("could not fetch events %w", err) + } - jsonData, err := json.Marshal(e) - if err != nil { - return fmt.Errorf("could not create a json obj for an result: %w", err) - } - _, err = resultWriter.WriteString(string(jsonData) + "\n") - if err != nil { - return fmt.Errorf("could not write result json to the file: %w", err) + chunks := make([]string, 0) + for _, c := range res.Chunks { + cid := c.ID() + chunks = append(chunks, hex.EncodeToString(cid[:])) + } + + resID := res.ID() + finalState, err := res.FinalStateCommitment() + if err != nil { + return fmt.Errorf("export result error: %w", err) + } + e := result{ + ResultID: hex.EncodeToString(resID[:]), + PreviousResultID: hex.EncodeToString(res.PreviousResultID[:]), + FinalStateCommit: hex.EncodeToString(finalState[:]), + BlockID: hex.EncodeToString(activeBlockID[:]), + Chunks: chunks, + } + + jsonData, err := json.Marshal(e) + if err != nil { + return fmt.Errorf("could not create a json obj for an result: %w", err) + } + _, err = resultWriter.WriteString(string(jsonData) + "\n") + if err != nil { + return fmt.Errorf("could not write result json to the file: %w", err) + } + resultWriter.Flush() + + activeBlockID = header.ParentID } - resultWriter.Flush() - activeBlockID = header.ParentID - } + }) } diff --git a/cmd/util/cmd/exec-data-json-export/transaction_exporter.go b/cmd/util/cmd/exec-data-json-export/transaction_exporter.go index 2fc112b9d8f..5c8f937437d 100644 --- a/cmd/util/cmd/exec-data-json-export/transaction_exporter.go +++ b/cmd/util/cmd/exec-data-json-export/transaction_exporter.go @@ -11,7 +11,7 @@ import ( "github.com/onflow/flow-go/cmd/util/cmd/common" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/module/metrics" - "github.com/onflow/flow-go/storage/badger" + "github.com/onflow/flow-go/storage/store" ) // TODO add status, events as repeated, gas used, ErrorMessage , register touches @@ -28,7 +28,7 @@ type transactionInContext struct { GasLimit uint64 `json:"gas_limit"` PayerAddressHex string `json:"payer_address_hex"` ProposalKeyAddressHex string `json:"proposal_key_address_hex"` - ProposalKeyID uint64 `json:"proposal_key_id"` + ProposalKeyID uint32 `json:"proposal_key_id"` ProposalSequenceNumber uint64 `json:"proposal_sequence_number"` AuthorizersAddressHex []string `json:"authorizers_address_hex"` EnvelopeSize int `json:"envelope_size"` @@ -42,20 +42,23 @@ type transactionInContext struct { func ExportExecutedTransactions(blockID flow.Identifier, dbPath string, outputPath string) error { // traverse backward from the given block (parent block) and fetch by blockHash - db := common.InitStorage(dbPath) + db, err := common.InitStorage(dbPath) + if err != nil { + return fmt.Errorf("could not initialize storage: %w", err) + } defer db.Close() cacheMetrics := &metrics.NoopCollector{} - index := badger.NewIndex(cacheMetrics, db) - guarantees := badger.NewGuarantees(cacheMetrics, db, badger.DefaultCacheSize) - seals := badger.NewSeals(cacheMetrics, db) - results := badger.NewExecutionResults(cacheMetrics, db) - receipts := badger.NewExecutionReceipts(cacheMetrics, db, results, badger.DefaultCacheSize) - transactions := badger.NewTransactions(cacheMetrics, db) - headers := badger.NewHeaders(cacheMetrics, db) - payloads := badger.NewPayloads(db, index, guarantees, seals, receipts, results) - blocks := badger.NewBlocks(db, headers, payloads) - collections := badger.NewCollections(db, transactions) + index := store.NewIndex(cacheMetrics, db) + guarantees := store.NewGuarantees(cacheMetrics, db, store.DefaultCacheSize, store.DefaultCacheSize) + seals := store.NewSeals(cacheMetrics, db) + results := store.NewExecutionResults(cacheMetrics, db) + receipts := store.NewExecutionReceipts(cacheMetrics, db, results, store.DefaultCacheSize) + transactions := store.NewTransactions(cacheMetrics, db) + headers := store.NewHeaders(cacheMetrics, db) + payloads := store.NewPayloads(db, index, guarantees, seals, receipts, results) + blocks := store.NewBlocks(db, headers, payloads) + collections := store.NewCollections(db, transactions) activeBlockID := blockID outputFile := filepath.Join(outputPath, "transactions.jsonl") diff --git a/cmd/util/cmd/execution-data-blobstore/cmd/get.go b/cmd/util/cmd/execution-data-blobstore/cmd/get.go index 0a1c7f70e4c..e18e9476d6b 100644 --- a/cmd/util/cmd/execution-data-blobstore/cmd/get.go +++ b/cmd/util/cmd/execution-data-blobstore/cmd/get.go @@ -45,7 +45,7 @@ func run(*cobra.Command, []string) { edID := flow.HashToID(b) - ed, err := eds.GetExecutionData(context.Background(), edID) + ed, err := eds.Get(context.Background(), edID) if err != nil { logger.Fatal().Err(err).Msg("failed to get execution data") } diff --git a/cmd/util/cmd/execution-data-blobstore/cmd/root.go b/cmd/util/cmd/execution-data-blobstore/cmd/root.go index ff81c5d4123..3de711d0683 100644 --- a/cmd/util/cmd/execution-data-blobstore/cmd/root.go +++ b/cmd/util/cmd/execution-data-blobstore/cmd/root.go @@ -5,7 +5,7 @@ import ( "os" "github.com/ipfs/go-datastore" - badger "github.com/ipfs/go-ds-badger2" + pebbleds "github.com/ipfs/go-ds-pebble" "github.com/rs/zerolog/log" "github.com/spf13/cobra" "github.com/spf13/viper" @@ -32,10 +32,10 @@ func Execute() { } func initBlobstore() (blobs.Blobstore, datastore.Batching) { - ds, err := badger.NewDatastore(flagBlobstoreDir, &badger.DefaultOptions) + ds, err := pebbleds.NewDatastore(flagBlobstoreDir, nil) if err != nil { - log.Fatal().Err(err).Msg("could not init badger datastore") + log.Fatal().Err(err).Msg("could not init pebble datastore") } blobstore := blobs.NewBlobstore(ds) diff --git a/cmd/util/cmd/execution-state-extract/cmd.go b/cmd/util/cmd/execution-state-extract/cmd.go index c8519b015ad..34375a1c025 100644 --- a/cmd/util/cmd/execution-state-extract/cmd.go +++ b/cmd/util/cmd/execution-state-extract/cmd.go @@ -3,44 +3,56 @@ package extract import ( "encoding/hex" "fmt" + "os" "path" + "runtime/pprof" + "strings" + "github.com/rs/zerolog" "github.com/rs/zerolog/log" "github.com/spf13/cobra" "github.com/onflow/flow-go/cmd/util/cmd/common" + common2 "github.com/onflow/flow-go/cmd/util/common" + "github.com/onflow/flow-go/cmd/util/ledger/migrations" + "github.com/onflow/flow-go/cmd/util/ledger/reporters" + "github.com/onflow/flow-go/cmd/util/ledger/util" + "github.com/onflow/flow-go/cmd/util/ledger/util/registers" + "github.com/onflow/flow-go/ledger" + "github.com/onflow/flow-go/ledger/complete/wal" "github.com/onflow/flow-go/model/bootstrap" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/module/metrics" - "github.com/onflow/flow-go/storage/badger" + "github.com/onflow/flow-go/storage" + "github.com/onflow/flow-go/storage/store" ) var ( - flagExecutionStateDir string - flagOutputDir string - flagBlockHash string - flagStateCommitment string - flagDatadir string - flagChain string - flagNoMigration bool - flagNoReport bool - flagNWorker int + flagExecutionStateDir string + flagOutputDir string + flagBlockHash string + flagStateCommitment string + flagDatadir string + flagChain string + flagNWorker int + flagNoMigration bool + flagMigration string + flagNoReport bool + flagAllowPartialStateFromPayloads bool + flagSortPayloads bool + flagPrune bool + flagInputPayloadFileName string + flagOutputPayloadFileName string + flagOutputPayloadByAddresses string + flagCPUProfile string + flagZeroMigration bool + flagValidate bool ) -func getChain(chainName string) (chain flow.Chain, err error) { - defer func() { - if r := recover(); r != nil { - err = fmt.Errorf("invalid chain: %s", r) - } - }() - chain = flow.ChainID(chainName).Chain() - return -} - var Cmd = &cobra.Command{ Use: "execution-state-extract", Short: "Reads WAL files and generates the checkpoint containing state commitment for given block hash", - Run: run, + RunE: runE, } func init() { @@ -61,43 +73,130 @@ func init() { Cmd.Flags().StringVar(&flagBlockHash, "block-hash", "", "Block hash (hex-encoded, 64 characters)") - Cmd.Flags().StringVar(&flagDatadir, "datadir", "", - "directory that stores the protocol state") + common.InitDataDirFlag(Cmd, &flagDatadir) Cmd.Flags().BoolVar(&flagNoMigration, "no-migration", false, "don't migrate the state") + Cmd.Flags().BoolVar(&flagZeroMigration, "estimate-migration-duration", false, + "run zero migrations to get minimum duration needed by migrations (load execution state, group payloads by account, iterate account payloads, create trie from payload, and generate checkpoint)") + + Cmd.Flags().StringVar(&flagMigration, "migration", "", "migration name") + Cmd.Flags().BoolVar(&flagNoReport, "no-report", false, "don't report the state") Cmd.Flags().IntVar(&flagNWorker, "n-migrate-worker", 10, "number of workers to migrate payload concurrently") + + Cmd.Flags().BoolVar(&flagAllowPartialStateFromPayloads, "allow-partial-state-from-payload-file", false, + "allow input payload file containing partial state (e.g. not all accounts)") + + Cmd.Flags().BoolVar(&flagSortPayloads, "sort-payloads", true, + "sort payloads (generate deterministic output; disable only for development purposes)") + + Cmd.Flags().BoolVar(&flagPrune, "prune", false, + "prune the state (for development purposes)") + + // If specified, the state will consist of payloads from the given input payload file. + // If not specified, then the state will be extracted from the latest checkpoint file. + // This flag can be used to reduce total duration of migrations when state extraction involves + // multiple migrations because it helps avoid repeatedly reading from checkpoint file to rebuild trie. + // The input payload file must be created by state extraction running with either + // flagOutputPayloadFileName or flagOutputPayloadByAddresses. + Cmd.Flags().StringVar( + &flagInputPayloadFileName, + "input-payload-filename", + "", + "input payload file", + ) + + Cmd.Flags().StringVar( + &flagOutputPayloadFileName, + "output-payload-filename", + "", + "output payload file", + ) + + Cmd.Flags().StringVar( + // Extract payloads of specified addresses (comma separated list of hex-encoded addresses) + // to file specified by --output-payload-filename. + // If no address is specified (empty string) then this flag is ignored. + &flagOutputPayloadByAddresses, + "extract-payloads-by-address", + "", + "extract payloads of addresses (comma separated hex-encoded addresses) to file specified by output-payload-filename", + ) + + Cmd.Flags().StringVar(&flagCPUProfile, "cpu-profile", "", + "enable CPU profiling") + + Cmd.Flags().BoolVar(&flagValidate, "validate-public-key-migration", false, + "validate migrated account public keys") } -func run(*cobra.Command, []string) { - var stateCommitment flow.StateCommitment +func runE(*cobra.Command, []string) error { + if flagCPUProfile != "" { + f, err := os.Create(flagCPUProfile) + if err != nil { + return fmt.Errorf("could not create CPU profile: %w", err) + } + + err = pprof.StartCPUProfile(f) + if err != nil { + return fmt.Errorf("could not start CPU profile: %w", err) + } + + defer pprof.StopCPUProfile() + } + + err := os.MkdirAll(flagOutputDir, 0755) + if err != nil { + return fmt.Errorf("cannot create output directory %s: %w", flagOutputDir, err) + } + + if flagNoMigration && flagZeroMigration { + return fmt.Errorf("cannot run the command with both --no-migration and --estimate-migration-duration flags, one of them or none of them should be provided") + } if len(flagBlockHash) > 0 && len(flagStateCommitment) > 0 { - log.Fatal().Msg("cannot run the command with both block hash and state commitment as inputs, only one of them should be provided") - return + return fmt.Errorf("cannot run the command with both block hash and state commitment as inputs, only one of them should be provided") } + if len(flagBlockHash) == 0 && len(flagStateCommitment) == 0 && len(flagInputPayloadFileName) == 0 { + return fmt.Errorf("--block-hash or --state-commitment or --input-payload-filename must be specified") + } + + if len(flagInputPayloadFileName) > 0 && (len(flagBlockHash) > 0 || len(flagStateCommitment) > 0) { + return fmt.Errorf("--input-payload-filename cannot be used with --block-hash or --state-commitment") + } + + // When flagOutputPayloadByAddresses is specified, flagOutputPayloadFileName is required. + if len(flagOutputPayloadFileName) == 0 && len(flagOutputPayloadByAddresses) > 0 { + return fmt.Errorf("--extract-payloads-by-address requires --output-payload-filename to be specified") + } + + var stateCommitment flow.StateCommitment + if len(flagBlockHash) > 0 { blockID, err := flow.HexStringToIdentifier(flagBlockHash) if err != nil { - log.Fatal().Err(err).Msg("malformed block hash") + return fmt.Errorf("malformed block hash: %w", err) } log.Info().Msgf("extracting state by block ID: %v", blockID) - db := common.InitStorage(flagDatadir) - defer db.Close() - - cache := &metrics.NoopCollector{} - commits := badger.NewCommits(cache, db) + err = common.WithStorage(flagDatadir, func(db storage.DB) error { + cache := &metrics.NoopCollector{} + commits := store.NewCommits(cache, db) - stateCommitment, err = getStateCommitment(commits, blockID) + stateCommitment, err = commits.ByBlockID(blockID) + if err != nil { + return fmt.Errorf("cannot get state commitment for block %v: %w", blockID, err) + } + return nil + }) if err != nil { - log.Fatal().Err(err).Msgf("cannot get state commitment for block %v", blockID) + return fmt.Errorf("cannot initialize storage with datadir %s: %w", flagDatadir, err) } } @@ -105,77 +204,350 @@ func run(*cobra.Command, []string) { var err error stateCommitmentBytes, err := hex.DecodeString(flagStateCommitment) if err != nil { - log.Fatal().Err(err).Msg("cannot get decode the state commitment") + return fmt.Errorf("cannot decode the state commitment: %w", err) } stateCommitment, err = flow.ToStateCommitment(stateCommitmentBytes) if err != nil { - log.Fatal().Err(err).Msg("invalid state commitment length") + return fmt.Errorf("invalid state commitment length: %w", err) } log.Info().Msgf("extracting state by state commitment: %x", stateCommitment) } - if len(flagBlockHash) == 0 && len(flagStateCommitment) == 0 { - log.Fatal().Msg("no --block-hash or --state-commitment was specified") + if len(flagInputPayloadFileName) > 0 { + if _, err := os.Stat(flagInputPayloadFileName); os.IsNotExist(err) { + return fmt.Errorf("payload input file %s doesn't exist", flagInputPayloadFileName) + } + + partialState, err := util.IsPayloadFilePartialState(flagInputPayloadFileName) + if err != nil { + return fmt.Errorf("cannot get flag from payload input file %s: %w", flagInputPayloadFileName, err) + } + + // Check if payload file contains partial state and is allowed by --allow-partial-state-from-payload-file. + if !flagAllowPartialStateFromPayloads && partialState { + return fmt.Errorf("payload input file %s contains partial state, please specify --allow-partial-state-from-payload-file", flagInputPayloadFileName) + } + + msg := "input payloads represent " + if partialState { + msg += "partial state" + } else { + msg += "complete state" + } + if flagAllowPartialStateFromPayloads { + msg += ", and --allow-partial-state-from-payload-file is specified" + } else { + msg += ", and --allow-partial-state-from-payload-file is NOT specified" + } + log.Info().Msg(msg) } - log.Info().Msgf("Extracting state from %s, exporting root checkpoint to %s, version: %v", - flagExecutionStateDir, - path.Join(flagOutputDir, bootstrap.FilenameWALRootCheckpoint), - 6, - ) + if len(flagOutputPayloadFileName) > 0 { + if _, err := os.Stat(flagOutputPayloadFileName); os.IsExist(err) { + return fmt.Errorf("payload output file %s exists", flagOutputPayloadFileName) + } + } - log.Info().Msgf("Block state commitment: %s from %v, output dir: %s", - hex.EncodeToString(stateCommitment[:]), - flagExecutionStateDir, - flagOutputDir) + var exportPayloadsForOwners map[string]struct{} - // err := ensureCheckpointFileExist(flagExecutionStateDir) - // if err != nil { - // log.Fatal().Err(err).Msgf("cannot ensure checkpoint file exist in folder %v", flagExecutionStateDir) - // } + if len(flagOutputPayloadByAddresses) > 0 { + var err error + exportPayloadsForOwners, err = common2.ParseOwners(strings.Split(flagOutputPayloadByAddresses, ",")) + if err != nil { + return fmt.Errorf("failed to parse addresses: %w", err) + } + } - chain, err := getChain(flagChain) + // Validate chain ID + chain := flow.ChainID(flagChain).Chain() + + if flagNoReport { + log.Warn().Msgf("--no-report flag is deprecated") + } + + var inputMsg string + if len(flagInputPayloadFileName) > 0 { + // Input is payloads + inputMsg = fmt.Sprintf("reading payloads from %s", flagInputPayloadFileName) + } else { + // Input is execution state + inputMsg = fmt.Sprintf("reading block state commitment %s from %s", + hex.EncodeToString(stateCommitment[:]), + flagExecutionStateDir, + ) + + err := ensureCheckpointFileExist(flagExecutionStateDir) + if err != nil { + log.Error().Err(err).Msgf("cannot ensure checkpoint file exist in folder %v", flagExecutionStateDir) + } + + } + + var outputMsg string + if len(flagOutputPayloadFileName) > 0 { + // Output is payload file + if len(exportPayloadsForOwners) == 0 { + outputMsg = fmt.Sprintf("exporting all payloads to %s", flagOutputPayloadFileName) + } else { + outputMsg = fmt.Sprintf( + "exporting payloads for owners %v to %s", + common2.OwnersToString(exportPayloadsForOwners), + flagOutputPayloadFileName, + ) + } + } else { + // Output is checkpoint files + outputMsg = fmt.Sprintf( + "exporting root checkpoint to %s, version: %d", + path.Join(flagOutputDir, bootstrap.FilenameWALRootCheckpoint), + 6, + ) + } + + log.Info().Msgf("state extraction plan: %s, %s", inputMsg, outputMsg) + + // Extract state and create checkpoint files without migration. + if flagNoMigration && + len(flagInputPayloadFileName) == 0 && + len(flagOutputPayloadFileName) == 0 { + + exportedState, err := extractStateToCheckpointWithoutMigration( + log.Logger, + flagExecutionStateDir, + flagOutputDir, + stateCommitment) + if err != nil { + return fmt.Errorf("error extracting state for commitment %s: %w", stateCommitment, err) + } + + reportExtraction(stateCommitment, exportedState) + return nil + } + + if flagZeroMigration { + newStateCommitment, err := emptyMigration( + log.Logger, + flagExecutionStateDir, + flagOutputDir, + stateCommitment) + if err != nil { + return fmt.Errorf("error extracting state for commitment %s: %w", stateCommitment, err) + } + if stateCommitment != flow.StateCommitment(newStateCommitment) { + return fmt.Errorf("empty migration failed: state commitments are different: %v != %s", stateCommitment, newStateCommitment) + } + return nil + } + + var extractor extractor + if len(flagInputPayloadFileName) > 0 { + extractor = newPayloadFileExtractor(log.Logger, flagInputPayloadFileName) + } else { + extractor = newExecutionStateExtractor(log.Logger, flagExecutionStateDir, stateCommitment) + } + + // Extract payloads. + + payloadsFromPartialState, payloads, err := extractor.extract() if err != nil { - log.Fatal().Err(err).Msgf("invalid chain name") + return fmt.Errorf("error extracting payloads: %w", err) + } + + log.Info().Msgf("extracted %d payloads", len(payloads)) + + // Migrate payloads. + + if !flagNoMigration { + var migs []migrations.NamedMigration + + if len(flagMigration) > 0 { + switch flagMigration { + case "add-migrationmainnet-keys": + migs = append(migs, addMigrationMainnetKeysMigration(log.Logger, flagOutputDir, flagNWorker, chain.ChainID())...) + default: + return fmt.Errorf("unknown migration: %s", flagMigration) + } + } + + migs = append( + migs, + migrations.NamedMigration{ + Name: "account-public-key-deduplication", + Migrate: migrations.NewAccountBasedMigration( + log.Logger, + flagNWorker, + []migrations.AccountBasedMigration{ + migrations.NewAccountPublicKeyDeduplicationMigration( + chain.ChainID(), + flagOutputDir, + flagValidate, + reporters.NewReportFileWriterFactory(flagOutputDir, log.Logger), + ), + migrations.NewAccountUsageMigration( + reporters.NewReportFileWriterFactoryWithFormat(flagOutputDir, log.Logger, reporters.ReportFormatCSV), + ), + }, + ), + }, + ) + + migration := newMigration(log.Logger, migs, flagNWorker) + + payloads, err = migration(payloads) + if err != nil { + return fmt.Errorf("error migrating payloads: %w", err) + } + + log.Info().Msgf("migrated %d payloads", len(payloads)) } - err = extractExecutionState( - flagExecutionStateDir, - stateCommitment, - flagOutputDir, + // Export migrated payloads. + + var exporter exporter + if len(flagOutputPayloadFileName) > 0 { + exporter = newPayloadFileExporter( + log.Logger, + flagNWorker, + flagOutputPayloadFileName, + exportPayloadsForOwners, + flagSortPayloads, + ) + } else { + exporter = newCheckpointFileExporter( + log.Logger, + flagOutputDir, + ) + } + + log.Info().Msgf("exporting %d payloads", len(payloads)) + + exportedState, err := exporter.export(payloadsFromPartialState, payloads) + if err != nil { + return fmt.Errorf("error exporting migrated payloads: %w", err) + } + + log.Info().Msgf("exported %d payloads", len(payloads)) + + reportExtraction(stateCommitment, exportedState) + return nil +} + +func reportExtraction(loadedState flow.StateCommitment, exportedState ledger.State) { + // Create export reporter. + reporter := reporters.NewExportReporter( log.Logger, - chain, - !flagNoMigration, - !flagNoReport, - flagNWorker, + func() flow.StateCommitment { return loadedState }, ) + err := reporter.Report(nil, exportedState) if err != nil { - log.Fatal().Err(err).Msgf("error extracting the execution state: %s", err.Error()) + log.Error().Err(err).Msgf("can not generate report for migrated state: %v", exportedState) } + + log.Info().Msgf( + "New state commitment for the exported state is: %s (base64: %s)", + exportedState.String(), + exportedState.Base64(), + ) +} + +func extractStateToCheckpointWithoutMigration( + logger zerolog.Logger, + executionStateDir string, + outputDir string, + stateCommitment flow.StateCommitment, +) (ledger.State, error) { + // Load state for given state commitment + newTrie, err := util.ReadTrie(executionStateDir, stateCommitment) + if err != nil { + return ledger.DummyState, fmt.Errorf("failed to load state: %w", err) + } + + // Create checkpoint files + return createCheckpoint(logger, newTrie, outputDir, bootstrap.FilenameWALRootCheckpoint) } -// func ensureCheckpointFileExist(dir string) error { -// checkpoints, err := wal.Checkpoints(dir) -// if err != nil { -// return fmt.Errorf("could not find checkpoint files: %v", err) -// } -// -// if len(checkpoints) != 0 { -// log.Info().Msgf("found checkpoint %v files: %v", len(checkpoints), checkpoints) -// return nil -// } -// -// has, err := wal.HasRootCheckpoint(dir) -// if err != nil { -// return fmt.Errorf("could not check has root checkpoint: %w", err) -// } -// -// if has { -// log.Info().Msg("found root checkpoint file") -// return nil -// } -// -// return fmt.Errorf("no checkpoint file was found, no root checkpoint file was found") -// } +func emptyMigration( + logger zerolog.Logger, + executionStateDir string, + outputDir string, + stateCommitment flow.StateCommitment, +) (ledger.State, error) { + + log.Info().Msgf("Loading state with commitment %s", stateCommitment) + + // Load state for given state commitment + trie, err := util.ReadTrie(executionStateDir, stateCommitment) + if err != nil { + return ledger.DummyState, fmt.Errorf("failed to load state: %w", err) + } + + log.Info().Msgf("Getting payloads from loaded state") + + // Get payloads from trie. + payloads := trie.AllPayloads() + + log.Info().Msgf("Migrating %d payloads", len(payloads)) + + // Migrate payloads (migration is no-op) + migs := []migrations.NamedMigration{ + { + Name: "empty migration", + Migrate: func(*registers.ByAccount) error { + return nil + }, + }, + } + + migration := newMigration(log.Logger, migs, flagNWorker) + + migratedPayloads, err := migration(payloads) + if err != nil { + return ledger.DummyState, fmt.Errorf("failed to migrate payloads: %w", err) + } + + log.Info().Msgf("Migrated %d payloads", len(migratedPayloads)) + + // Create trie from migrated payloads + migratedTrie, err := createTrieFromPayloads(log.Logger, payloads) + if err != nil { + return ledger.DummyState, fmt.Errorf("failed to create new trie from migrated payloads: %w", err) + } + + log.Info().Msgf("Created trie from migrated payloads with commitment %s", migratedTrie.RootHash()) + + // Create checkpoint files + newState, err := createCheckpoint(logger, migratedTrie, outputDir, bootstrap.FilenameWALRootCheckpoint) + if err != nil { + return ledger.DummyState, fmt.Errorf("failed to create checkpoint: %w", err) + } + + log.Info().Msgf("Created checkpoint") + + return newState, nil +} + +func ensureCheckpointFileExist(dir string) error { + checkpoints, err := wal.Checkpoints(dir) + if err != nil { + return fmt.Errorf("could not find checkpoint files: %v", err) + } + + if len(checkpoints) != 0 { + log.Info().Msgf("found checkpoint %v files: %v", len(checkpoints), checkpoints) + return nil + } + + has, err := wal.HasRootCheckpoint(dir) + if err != nil { + return fmt.Errorf("could not check has root checkpoint: %w", err) + } + + if has { + log.Info().Msg("found root checkpoint file") + return nil + } + + return fmt.Errorf("no checkpoint file was found, no root checkpoint file was found in %v, check the --execution-state-dir flag", dir) +} diff --git a/cmd/util/cmd/execution-state-extract/execution_state_extract.go b/cmd/util/cmd/execution-state-extract/execution_state_extract.go index 613e34c2326..8923d985a16 100644 --- a/cmd/util/cmd/execution-state-extract/execution_state_extract.go +++ b/cmd/util/cmd/execution-state-extract/execution_state_extract.go @@ -1,143 +1,395 @@ package extract import ( + "encoding/json" "fmt" - "math" + "os" + "time" "github.com/rs/zerolog" - "go.uber.org/atomic" + "github.com/rs/zerolog/log" + "golang.org/x/sync/errgroup" + "github.com/onflow/flow-go-sdk/crypto" + + migrators "github.com/onflow/flow-go/cmd/util/ledger/migrations" "github.com/onflow/flow-go/cmd/util/ledger/reporters" + "github.com/onflow/flow-go/cmd/util/ledger/util" "github.com/onflow/flow-go/ledger" + "github.com/onflow/flow-go/ledger/common/hash" "github.com/onflow/flow-go/ledger/common/pathfinder" "github.com/onflow/flow-go/ledger/complete" + "github.com/onflow/flow-go/ledger/complete/mtrie/trie" "github.com/onflow/flow-go/ledger/complete/wal" "github.com/onflow/flow-go/model/bootstrap" "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/module/metrics" - "github.com/onflow/flow-go/storage" ) -func getStateCommitment(commits storage.Commits, blockHash flow.Identifier) (flow.StateCommitment, error) { - return commits.ByBlockID(blockHash) +type extractor interface { + extract() (partialState bool, payloads []*ledger.Payload, err error) } -func extractExecutionState( - dir string, - targetHash flow.StateCommitment, - outputDir string, - log zerolog.Logger, - chain flow.Chain, - migrate bool, - report bool, - nWorker int, // number of concurrent worker to migation payloads +type payloadFileExtractor struct { + logger zerolog.Logger + fileName string +} + +func newPayloadFileExtractor( + logger zerolog.Logger, + fileName string, +) *payloadFileExtractor { + return &payloadFileExtractor{ + logger: logger, + fileName: fileName, + } +} + +func (e *payloadFileExtractor) extract() (bool, []*ledger.Payload, error) { + return util.ReadPayloadFile(e.logger, e.fileName) +} + +type executionStateExtractor struct { + logger zerolog.Logger + dir string + stateCommitment flow.StateCommitment +} + +func newExecutionStateExtractor( + logger zerolog.Logger, + executionStateDir string, + stateCommitment flow.StateCommitment, +) *executionStateExtractor { + return &executionStateExtractor{ + logger: logger, + dir: executionStateDir, + stateCommitment: stateCommitment, + } +} + +func (e *executionStateExtractor) extract() (bool, []*ledger.Payload, error) { + payloads, err := util.ReadTrieForPayloads(e.dir, e.stateCommitment) + if err != nil { + return false, nil, err + } + + return false, payloads, nil +} + +type exporter interface { + export(partialState bool, payloads []*ledger.Payload) (ledger.State, error) +} + +type payloadFileExporter struct { + logger zerolog.Logger + nWorker int + fileName string + addressFilters map[string]struct{} + sortPayloads bool +} + +func newPayloadFileExporter( + logger zerolog.Logger, + nWorker int, + fileName string, + addressFilters map[string]struct{}, + sortPayloads bool, +) *payloadFileExporter { + return &payloadFileExporter{ + logger: logger, + nWorker: nWorker, + fileName: fileName, + addressFilters: addressFilters, + sortPayloads: sortPayloads, + } +} + +func (e *payloadFileExporter) export( + partialState bool, + payloads []*ledger.Payload, +) (ledger.State, error) { + + var group errgroup.Group + + var migratedState ledger.State + + // Need to use a copy of payloads when creating new trie in goroutine + // because payloads are sorted in createPayloadFile(). + copiedPayloads := make([]*ledger.Payload, len(payloads)) + copy(copiedPayloads, payloads) + + // Launch goroutine to get root hash of trie from exported payloads + group.Go(func() error { + newTrie, err := createTrieFromPayloads(log.Logger, copiedPayloads) + if err != nil { + return err + } + + migratedState = ledger.State(newTrie.RootHash()) + return nil + }) + + // Export payloads to payload file + err := e.createPayloadFile(partialState, payloads) + if err != nil { + return ledger.DummyState, err + } + + err = group.Wait() + if err != nil { + return ledger.DummyState, err + } + + return migratedState, nil +} + +func (e *payloadFileExporter) createPayloadFile( + partialState bool, + payloads []*ledger.Payload, ) error { + if e.sortPayloads { + e.logger.Info().Msgf("sorting %d payloads", len(payloads)) + + // Sort payloads to produce deterministic payload file with + // same sequence of payloads inside. + payloads = util.SortPayloadsByAddress(payloads, e.nWorker) + + log.Info().Msgf("sorted %d payloads", len(payloads)) + } - log.Info().Msg("init WAL") + log.Info().Msgf("creating payloads file %s", e.fileName) - diskWal, err := wal.NewDiskWAL( - log, - nil, - metrics.NewNoopCollector(), - dir, - complete.DefaultCacheSize, - pathfinder.PathByteSize, - wal.SegmentSize, + exportedPayloadCount, err := util.CreatePayloadFile( + e.logger, + e.fileName, + payloads, + e.addressFilters, + partialState, ) if err != nil { - return fmt.Errorf("cannot create disk WAL: %w", err) + return fmt.Errorf("cannot generate payloads file: %w", err) } - log.Info().Msg("init ledger") + e.logger.Info().Msgf("exported %d payloads out of %d payloads", exportedPayloadCount, len(payloads)) + + return nil +} + +type checkpointFileExporter struct { + logger zerolog.Logger + outputDir string +} + +func newCheckpointFileExporter( + logger zerolog.Logger, + outputDir string, +) *checkpointFileExporter { + return &checkpointFileExporter{ + logger: logger, + outputDir: outputDir, + } +} - led, err := complete.NewLedger( - diskWal, - complete.DefaultCacheSize, - &metrics.NoopCollector{}, - log, - complete.DefaultPathFinderVersion) +func (e *checkpointFileExporter) export( + _ bool, + payloads []*ledger.Payload, +) (ledger.State, error) { + // Create trie + newTrie, err := createTrieFromPayloads(e.logger, payloads) if err != nil { - return fmt.Errorf("cannot create ledger from write-a-head logs and checkpoints: %w", err) + return ledger.DummyState, err } - const ( - checkpointDistance = math.MaxInt // A large number to prevent checkpoint creation. - checkpointsToKeep = 1 + // Create checkpoint files + return createCheckpoint( + log.Logger, + newTrie, + e.outputDir, + bootstrap.FilenameWALRootCheckpoint, ) +} + +func createCheckpoint( + log zerolog.Logger, + newTrie *trie.MTrie, + outputDir, + outputFile string, +) (ledger.State, error) { + stateCommitment := ledger.State(newTrie.RootHash()) - log.Info().Msg("init compactor") + log.Info().Msgf("successfully built new trie. NEW ROOT STATECOMMIEMENT: %v", stateCommitment.String()) - compactor, err := complete.NewCompactor(led, diskWal, log, complete.DefaultCacheSize, checkpointDistance, checkpointsToKeep, atomic.NewBool(false)) + err := os.MkdirAll(outputDir, os.ModePerm) if err != nil { - return fmt.Errorf("cannot create compactor: %w", err) + return ledger.State(hash.DummyHash), fmt.Errorf("could not create output dir %s: %w", outputDir, err) + } + + err = wal.StoreCheckpointV6Concurrently([]*trie.MTrie{newTrie}, outputDir, outputFile, log) + + // Writing the checkpoint takes time to write and copy. + // Without relying on an exit code or stdout, we need to know when the copy is complete. + writeStatusFileErr := writeStatusFile("checkpoint_status.json", err) + if writeStatusFileErr != nil { + return ledger.State(hash.DummyHash), fmt.Errorf("failed to write checkpoint status file: %w", writeStatusFileErr) } - log.Info().Msgf("waiting for compactor to load checkpoint and WAL") + if err != nil { + return ledger.State(hash.DummyHash), fmt.Errorf("failed to store the checkpoint: %w", err) + } - <-compactor.Ready() + log.Info().Msgf("checkpoint file successfully stored at: %v %v", outputDir, outputFile) + return stateCommitment, nil +} - defer func() { - <-led.Done() - <-compactor.Done() - }() +func writeStatusFile(fileName string, e error) error { + checkpointStatus := map[string]bool{"succeeded": e == nil} + checkpointStatusJson, _ := json.MarshalIndent(checkpointStatus, "", " ") + err := os.WriteFile(fileName, checkpointStatusJson, 0644) + return err +} - var migrations []ledger.Migration - var preCheckpointReporters, postCheckpointReporters []ledger.Reporter - newState := ledger.State(targetHash) +func newMigration( + logger zerolog.Logger, + migrations []migrators.NamedMigration, + nWorker int, +) ledger.Migration { + return func(payloads []*ledger.Payload) ([]*ledger.Payload, error) { - if migrate { - // add migration here - migrations = []ledger.Migration{ - // the following migration calculate the storage usage and update the storage for each account - // mig.MigrateAccountUsage, + if len(migrations) == 0 { + return payloads, nil } - } - // generating reports at the end, so that the checkpoint file can be used - // for sporking as soon as it's generated. - if report { - log.Info().Msgf("preparing reporter files") - reportFileWriterFactory := reporters.NewReportFileWriterFactory(outputDir, log) - - preCheckpointReporters = []ledger.Reporter{ - // report epoch counter which is needed for finalizing root block - reporters.NewExportReporter(log, - chain, - func() flow.StateCommitment { return targetHash }, - ), + + payloadCount := len(payloads) + + payloadAccountGrouping := util.GroupPayloadsByAccount(logger, payloads, nWorker) + + logger.Info().Msgf( + "creating registers from grouped payloads (%d) ...", + payloadCount, + ) + + registersByAccount, err := util.NewByAccountRegistersFromPayloadAccountGrouping(payloadAccountGrouping, nWorker) + if err != nil { + return nil, err } - postCheckpointReporters = []ledger.Reporter{ - &reporters.AccountReporter{ - Log: log, - Chain: chain, - RWF: reportFileWriterFactory, - }, - reporters.NewFungibleTokenTracker(log, reportFileWriterFactory, chain, []string{reporters.FlowTokenTypeID(chain)}), - &reporters.AtreeReporter{ - Log: log, - RWF: reportFileWriterFactory, - }, + logger.Info().Msgf( + "created registers from payloads (%d accounts)", + registersByAccount.AccountCount(), + ) + + // Run all migrations on the registers + for index, migration := range migrations { + migrationStep := index + 1 + + logger.Info(). + Str("migration", migration.Name). + Msgf( + "migration %d/%d is underway", + migrationStep, + len(migrations), + ) + + start := time.Now() + err := migration.Migrate(registersByAccount) + elapsed := time.Since(start) + if err != nil { + return nil, fmt.Errorf( + "error applying migration %s (%d/%d): %w", + migration.Name, + migrationStep, + len(migrations), + err, + ) + } + + newPayloadCount := registersByAccount.Count() + + if payloadCount != newPayloadCount { + logger.Warn(). + Int("migration_step", migrationStep). + Int("expected_size", payloadCount). + Int("outcome_size", newPayloadCount). + Msg("payload counts has changed during migration, make sure this is expected.") + } + + logger.Info(). + Str("timeTaken", elapsed.String()). + Str("migration", migration.Name). + Msgf( + "migration %d/%d is done", + migrationStep, + len(migrations), + ) + + payloadCount = newPayloadCount } + + logger.Info().Msg("creating new payloads from registers ...") + + newPayloads := registersByAccount.DestructIntoPayloads(nWorker) + + logger.Info().Msgf("created new payloads (%d) from registers", len(newPayloads)) + + return newPayloads, nil } +} - migratedState, err := led.ExportCheckpointAt( - newState, - migrations, - preCheckpointReporters, - postCheckpointReporters, - complete.DefaultPathFinderVersion, - outputDir, - bootstrap.FilenameWALRootCheckpoint, - ) +func createTrieFromPayloads(logger zerolog.Logger, payloads []*ledger.Payload) (*trie.MTrie, error) { + // get paths + paths, err := pathfinder.PathsFromPayloads(payloads, complete.DefaultPathFinderVersion) if err != nil { - return fmt.Errorf("cannot generate the output checkpoint: %w", err) + return nil, fmt.Errorf("cannot export checkpoint, can't construct paths: %w", err) } - log.Info().Msgf( - "New state commitment for the exported state is: %s (base64: %s)", - migratedState.String(), - migratedState.Base64(), - ) + logger.Info().Msgf("constructing a new trie with migrated payloads (count: %d)...", len(payloads)) - return nil + emptyTrie := trie.NewEmptyMTrie() + + derefPayloads := make([]ledger.Payload, len(payloads)) + for i, p := range payloads { + derefPayloads[i] = *p + } + + // no need to prune the data since it has already been prunned through migrations + const applyPruning = false + newTrie, _, err := trie.NewTrieWithUpdatedRegisters(emptyTrie, paths, derefPayloads, applyPruning) + if err != nil { + return nil, fmt.Errorf("constructing updated trie failed: %w", err) + } + + return newTrie, nil +} + +func addMigrationMainnetKeysMigration( + log zerolog.Logger, + outputDir string, + workerCount int, + chainID flow.ChainID, +) []migrators.NamedMigration { + + log.Info().Msg("initializing add-migrationmainnet-keys migrations ...") + + rwf := reporters.NewReportFileWriterFactory(outputDir, log) + + key, err := crypto.DecodePublicKeyHex(crypto.ECDSA_P256, "711d4cd9930d695ef5c79b668d321f92ba00ed8280fded52c0fa2b15501411d026fe6fb4be3ec894facd3a00f04e32e2db5f5696d3b2b3419e4fba89fb95dca8") + if err != nil { + panic("failed to decode key") + } + + namedMigrations := []migrators.NamedMigration{ + { + Name: "add-migrationmainnet-keys", + Migrate: migrators.NewAccountBasedMigration( + log, + workerCount, + []migrators.AccountBasedMigration{ + migrators.NewAddKeyMigration(chainID, key, rwf), + }, + ), + }, + } + + log.Info().Msg("initialized migrations") + + return namedMigrations } diff --git a/cmd/util/cmd/execution-state-extract/execution_state_extract_test.go b/cmd/util/cmd/execution-state-extract/execution_state_extract_test.go index 1f770f12426..0131a250bc3 100644 --- a/cmd/util/cmd/execution-state-extract/execution_state_extract_test.go +++ b/cmd/util/cmd/execution-state-extract/execution_state_extract_test.go @@ -2,21 +2,30 @@ package extract import ( "crypto/rand" + "encoding/hex" "math" + "path/filepath" + "strings" "testing" + "github.com/jordanschalm/lockctx" "github.com/rs/zerolog" "github.com/stretchr/testify/require" "go.uber.org/atomic" - "github.com/onflow/flow-go/cmd/util/cmd/common" + runtimeCommon "github.com/onflow/cadence/common" + + "github.com/onflow/flow-go/cmd/util/ledger/util" "github.com/onflow/flow-go/ledger" "github.com/onflow/flow-go/ledger/common/pathfinder" "github.com/onflow/flow-go/ledger/complete" "github.com/onflow/flow-go/ledger/complete/wal" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/module/metrics" - "github.com/onflow/flow-go/storage/badger" + "github.com/onflow/flow-go/storage" + "github.com/onflow/flow-go/storage/operation" + "github.com/onflow/flow-go/storage/operation/pebbleimpl" + "github.com/onflow/flow-go/storage/store" "github.com/onflow/flow-go/utils/unittest" ) @@ -29,29 +38,44 @@ func TestExtractExecutionState(t *testing.T) { metr := &metrics.NoopCollector{} t.Run("missing block->state commitment mapping", func(t *testing.T) { - withDirs(t, func(datadir, execdir, outdir string) { - db := common.InitStorage(datadir) - commits := badger.NewCommits(metr, db) + // Initialize a proper Badger database instead of using empty directory + db := unittest.PebbleDB(t, datadir) + defer db.Close() - _, err := getStateCommitment(commits, unittest.IdentifierFixture()) + // Convert to storage.DB interface + storageDB := pebbleimpl.ToDB(db) + commits := store.NewCommits(metr, storageDB) + + _, err := commits.ByBlockID(unittest.IdentifierFixture()) require.Error(t, err) }) }) t.Run("retrieves block->state mapping", func(t *testing.T) { + lockManager := storage.NewTestingLockManager() withDirs(t, func(datadir, execdir, outdir string) { - db := common.InitStorage(datadir) - commits := badger.NewCommits(metr, db) + // Initialize a proper Badger database instead of using empty directory + db := unittest.PebbleDB(t, datadir) + defer db.Close() + + // Convert to storage.DB interface + storageDB := pebbleimpl.ToDB(db) + commits := store.NewCommits(metr, storageDB) blockID := unittest.IdentifierFixture() stateCommitment := unittest.StateCommitmentFixture() - err := commits.Store(blockID, stateCommitment) + err := unittest.WithLock(t, lockManager, storage.LockInsertOwnReceipt, func(lctx lockctx.Context) error { + return storageDB.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + // Store the state commitment for the block ID + return operation.IndexStateCommitment(lctx, rw, blockID, stateCommitment) + }) + }) require.NoError(t, err) - retrievedStateCommitment, err := getStateCommitment(commits, blockID) + retrievedStateCommitment, err := commits.ByBlockID(blockID) require.NoError(t, err) require.Equal(t, stateCommitment, retrievedStateCommitment) }) @@ -59,31 +83,30 @@ func TestExtractExecutionState(t *testing.T) { t.Run("empty WAL doesn't find anything", func(t *testing.T) { withDirs(t, func(datadir, execdir, outdir string) { - err := extractExecutionState( - execdir, - unittest.StateCommitmentFixture(), - outdir, - zerolog.Nop(), - flow.Emulator.Chain(), - false, - false, - 10, - ) + extractor := newExecutionStateExtractor(zerolog.Nop(), execdir, unittest.StateCommitmentFixture()) + + partialState, payloads, err := extractor.extract() require.Error(t, err) + require.False(t, partialState) + require.Equal(t, 0, len(payloads)) }) }) t.Run("happy path", func(t *testing.T) { + lockManager := storage.NewTestingLockManager() withDirs(t, func(datadir, execdir, _ string) { - const ( checkpointDistance = math.MaxInt // A large number to prevent checkpoint creation. checkpointsToKeep = 1 ) - db := common.InitStorage(datadir) - commits := badger.NewCommits(metr, db) + // Initialize a proper Badger database instead of using empty directory + db := unittest.PebbleDB(t, datadir) + defer db.Close() + + // Convert to storage.DB interface + storageDB := pebbleimpl.ToDB(db) // generate some oldLedger data size := 10 @@ -92,13 +115,13 @@ func TestExtractExecutionState(t *testing.T) { require.NoError(t, err) f, err := complete.NewLedger(diskWal, size*10, metr, zerolog.Nop(), complete.DefaultPathFinderVersion) require.NoError(t, err) - compactor, err := complete.NewCompactor(f, diskWal, zerolog.Nop(), uint(size), checkpointDistance, checkpointsToKeep, atomic.NewBool(false)) + compactor, err := complete.NewCompactor(f, diskWal, zerolog.Nop(), uint(size), checkpointDistance, checkpointsToKeep, atomic.NewBool(false), &metrics.NoopCollector{}) require.NoError(t, err) <-compactor.Ready() var stateCommitment = f.InitialState() - //saved data after updates + // saved data after updates keysValuesByCommit := make(map[string]map[string]keyPair) commitsByBlocks := make(map[flow.Identifier]ledger.State) blocksInOrder := make([]flow.Identifier, size) @@ -110,12 +133,17 @@ func TestExtractExecutionState(t *testing.T) { require.NoError(t, err) stateCommitment, _, err = f.Set(update) - //stateCommitment, err = f.UpdateRegisters(keys, values, stateCommitment) + // stateCommitment, err = f.UpdateRegisters(keys, values, stateCommitment) require.NoError(t, err) // generate random block and map it to state commitment blockID := unittest.IdentifierFixture() - err = commits.Store(blockID, flow.StateCommitment(stateCommitment)) + + err = unittest.WithLock(t, lockManager, storage.LockInsertOwnReceipt, func(lctx lockctx.Context) error { + return storageDB.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return operation.IndexStateCommitment(lctx, rw, blockID, flow.StateCommitment(stateCommitment)) + }) + }) require.NoError(t, err) data := make(map[string]keyPair, len(keys)) @@ -125,85 +153,459 @@ func TestExtractExecutionState(t *testing.T) { value: values[j], } } - - keysValuesByCommit[string(stateCommitment[:])] = data + keysValuesByCommit[stateCommitment.String()] = data commitsByBlocks[blockID] = stateCommitment blocksInOrder[i] = blockID } + // wait for the ledger and compactor to finish + <-f.Done() + <-compactor.Done() + + // extract the execution state + extractor := newExecutionStateExtractor(zerolog.Nop(), execdir, flow.StateCommitment(stateCommitment)) + + partialState, payloads, err := extractor.extract() + require.NoError(t, err) + require.False(t, partialState) + // Calculate expected number of payloads based on getSampleKeyValues logic + expectedPayloads := 2 + 4 + 2 + (7 * 10) // cases 0, 1, 2, and 7 default cases + require.Equal(t, expectedPayloads, len(payloads)) + + // verify the payloads + for _, payload := range payloads { + key, err := payload.Key() + require.NoError(t, err) + + // Look for the key in all state commitments + found := false + for _, commitData := range keysValuesByCommit { + if kv, exist := commitData[key.String()]; exist { + require.Equal(t, kv.value, payload.Value()) + found = true + break + } + } + require.True(t, found, "key %s not found in any state commitment", key.String()) + } + }) + }) +} + +// TestExtractPayloadsFromExecutionState tests state extraction with checkpoint as input and payload as output. +func TestExtractPayloadsFromExecutionState(t *testing.T) { + metr := &metrics.NoopCollector{} + + const payloadFileName = "root.payload" + + t.Run("all payloads", func(t *testing.T) { + withDirs(t, func(_, execdir, outdir string) { + + const ( + checkpointDistance = math.MaxInt // A large number to prevent checkpoint creation. + checkpointsToKeep = 1 + ) + + outputPayloadFileName := filepath.Join(outdir, payloadFileName) + + size := 10 + + diskWal, err := wal.NewDiskWAL(zerolog.Nop(), nil, metrics.NewNoopCollector(), execdir, size, pathfinder.PathByteSize, wal.SegmentSize) + require.NoError(t, err) + f, err := complete.NewLedger(diskWal, size*10, metr, zerolog.Nop(), complete.DefaultPathFinderVersion) + require.NoError(t, err) + compactor, err := complete.NewCompactor(f, diskWal, zerolog.Nop(), uint(size), checkpointDistance, checkpointsToKeep, atomic.NewBool(false), &metrics.NoopCollector{}) + require.NoError(t, err) + <-compactor.Ready() + + var stateCommitment = f.InitialState() + + // Save generated data after updates + keysValues := make(map[string]keyPair) + + for i := 0; i < size; i++ { + keys, values := getSampleKeyValues(i) + + update, err := ledger.NewUpdate(stateCommitment, keys, values) + require.NoError(t, err) + + stateCommitment, _, err = f.Set(update) + require.NoError(t, err) + + for j, key := range keys { + keysValues[key.String()] = keyPair{ + key: key, + value: values[j], + } + } + } + + <-f.Done() + <-compactor.Done() + + tries, err := f.Tries() + require.NoError(t, err) + + err = wal.StoreCheckpointV6SingleThread(tries, execdir, "checkpoint.00000001", zerolog.Nop()) + require.NoError(t, err) + + // Export all payloads + Cmd.SetArgs([]string{ + "--execution-state-dir", execdir, + "--output-dir", outdir, + "--state-commitment", hex.EncodeToString(stateCommitment[:]), + "--no-migration", + "--no-report", + "--output-payload-filename", outputPayloadFileName, + "--chain", flow.Emulator.Chain().String()}) + + err = Cmd.Execute() + require.NoError(t, err) + + // Verify exported payloads. + partialState, payloadsFromFile, err := util.ReadPayloadFile(zerolog.Nop(), outputPayloadFileName) + require.NoError(t, err) + require.Equal(t, len(keysValues), len(payloadsFromFile)) + require.False(t, partialState) + + for _, payloadFromFile := range payloadsFromFile { + k, err := payloadFromFile.Key() + require.NoError(t, err) + + kv, exist := keysValues[k.String()] + require.True(t, exist) + require.Equal(t, kv.value, payloadFromFile.Value()) + } + }) + }) + + t.Run("some payloads", func(t *testing.T) { + withDirs(t, func(_, execdir, outdir string) { + const ( + checkpointDistance = math.MaxInt // A large number to prevent checkpoint creation. + checkpointsToKeep = 1 + ) + + outputPayloadFileName := filepath.Join(outdir, payloadFileName) + + size := 10 + + diskWal, err := wal.NewDiskWAL(zerolog.Nop(), nil, metrics.NewNoopCollector(), execdir, size, pathfinder.PathByteSize, wal.SegmentSize) + require.NoError(t, err) + f, err := complete.NewLedger(diskWal, size*10, metr, zerolog.Nop(), complete.DefaultPathFinderVersion) + require.NoError(t, err) + compactor, err := complete.NewCompactor(f, diskWal, zerolog.Nop(), uint(size), checkpointDistance, checkpointsToKeep, atomic.NewBool(false), &metrics.NoopCollector{}) + require.NoError(t, err) + <-compactor.Ready() + + var stateCommitment = f.InitialState() + + // Save generated data after updates + keysValues := make(map[string]keyPair) + + for i := 0; i < size; i++ { + keys, values := getSampleKeyValues(i) + + update, err := ledger.NewUpdate(stateCommitment, keys, values) + require.NoError(t, err) + + stateCommitment, _, err = f.Set(update) + require.NoError(t, err) + + for j, key := range keys { + keysValues[key.String()] = keyPair{ + key: key, + value: values[j], + } + } + } + <-f.Done() <-compactor.Done() - err = db.Close() + tries, err := f.Tries() + require.NoError(t, err) + + err = wal.StoreCheckpointV6SingleThread(tries, execdir, "checkpoint.00000001", zerolog.Nop()) require.NoError(t, err) - //for blockID, stateCommitment := range commitsByBlocks { + const selectedAddressCount = 10 + selectedAddresses := make(map[string]struct{}) + selectedKeysValues := make(map[string]keyPair) + for k, kv := range keysValues { + owner := kv.key.KeyParts[0].Value + if len(owner) != runtimeCommon.AddressLength { + continue + } + + address, err := runtimeCommon.BytesToAddress(owner) + require.NoError(t, err) - for i, blockID := range blocksInOrder { + if len(selectedAddresses) < selectedAddressCount { + selectedAddresses[address.Hex()] = struct{}{} + } - stateCommitment := commitsByBlocks[blockID] + if _, exist := selectedAddresses[address.Hex()]; exist { + selectedKeysValues[k] = kv + } + } - //we need fresh output dir to prevent contamination - unittest.RunWithTempDir(t, func(outdir string) { + addresses := make([]string, 0, len(selectedAddresses)) + for address := range selectedAddresses { + addresses = append(addresses, address) + } - Cmd.SetArgs([]string{ - "--execution-state-dir", execdir, - "--output-dir", outdir, - "--state-commitment", stateCommitment.String(), - "--datadir", datadir, - "--no-migration", - "--no-report", - "--chain", flow.Emulator.Chain().String()}) + // Export selected payloads + Cmd.SetArgs([]string{ + "--execution-state-dir", execdir, + "--output-dir", outdir, + "--state-commitment", hex.EncodeToString(stateCommitment[:]), + "--no-migration", + "--no-report", + "--output-payload-filename", outputPayloadFileName, + "--extract-payloads-by-address", strings.Join(addresses, ","), + "--chain", flow.Emulator.Chain().String()}) + + err = Cmd.Execute() + require.NoError(t, err) - err := Cmd.Execute() - require.NoError(t, err) + // Verify exported payloads. + partialState, payloadsFromFile, err := util.ReadPayloadFile(zerolog.Nop(), outputPayloadFileName) + require.NoError(t, err) + require.True(t, partialState) - diskWal, err := wal.NewDiskWAL(zerolog.Nop(), nil, metrics.NewNoopCollector(), outdir, size, pathfinder.PathByteSize, wal.SegmentSize) - require.NoError(t, err) + nonGlobalPayloads := make([]*ledger.Payload, 0, len(selectedKeysValues)) + for _, payloadFromFile := range payloadsFromFile { + key, err := payloadFromFile.Key() + require.NoError(t, err) - storage, err := complete.NewLedger(diskWal, 1000, metr, zerolog.Nop(), complete.DefaultPathFinderVersion) - require.NoError(t, err) + owner := key.KeyParts[0].Value + if len(owner) > 0 { + nonGlobalPayloads = append(nonGlobalPayloads, payloadFromFile) + } + } - const ( - checkpointDistance = math.MaxInt // A large number to prevent checkpoint creation. - checkpointsToKeep = 1 - ) - compactor, err := complete.NewCompactor(storage, diskWal, zerolog.Nop(), uint(size), checkpointDistance, checkpointsToKeep, atomic.NewBool(false)) - require.NoError(t, err) + require.Equal(t, len(selectedKeysValues), len(nonGlobalPayloads)) - <-compactor.Ready() + for _, payloadFromFile := range nonGlobalPayloads { + k, err := payloadFromFile.Key() + require.NoError(t, err) - data := keysValuesByCommit[string(stateCommitment[:])] + kv, exist := selectedKeysValues[k.String()] + require.True(t, exist) + require.Equal(t, kv.value, payloadFromFile.Value()) + } + }) + }) +} - keys := make([]ledger.Key, 0, len(data)) - for _, v := range data { - keys = append(keys, v.key) +// TestExtractStateFromPayloads tests state extraction with payload as input. +func TestExtractStateFromPayloads(t *testing.T) { + + const payloadFileName = "root.payload" + + t.Run("create checkpoint", func(t *testing.T) { + withDirs(t, func(_, execdir, outdir string) { + size := 10 + + inputPayloadFileName := filepath.Join(execdir, payloadFileName) + + // Generate some data + keysValues := make(map[string]keyPair) + var payloads []*ledger.Payload + + for i := 0; i < size; i++ { + keys, values := getSampleKeyValues(i) + + for j, key := range keys { + keysValues[key.String()] = keyPair{ + key: key, + value: values[j], } - query, err := ledger.NewQuery(stateCommitment, keys) - require.NoError(t, err) + payloads = append(payloads, ledger.NewPayload(key, values[j])) + } + } + + numOfPayloadWritten, err := util.CreatePayloadFile( + zerolog.Nop(), + inputPayloadFileName, + payloads, + nil, + false, + ) + require.NoError(t, err) + require.Equal(t, len(payloads), numOfPayloadWritten) + + // Export checkpoint file + Cmd.SetArgs([]string{ + "--execution-state-dir", execdir, + "--output-dir", outdir, + "--no-migration", + "--no-report", + "--state-commitment", "", + "--input-payload-filename", inputPayloadFileName, + "--output-payload-filename", "", + "--extract-payloads-by-address", "", + "--chain", flow.Emulator.Chain().String()}) + + err = Cmd.Execute() + require.NoError(t, err) + + tries, err := wal.OpenAndReadCheckpointV6(outdir, "root.checkpoint", zerolog.Nop()) + require.NoError(t, err) + require.Equal(t, 1, len(tries)) + + // Verify exported checkpoint + payloadsFromFile := tries[0].AllPayloads() + require.NoError(t, err) + require.Equal(t, len(keysValues), len(payloadsFromFile)) + + for _, payloadFromFile := range payloadsFromFile { + k, err := payloadFromFile.Key() + require.NoError(t, err) + + kv, exist := keysValues[k.String()] + require.True(t, exist) - registerValues, err := storage.Get(query) - //registerValues, err := mForest.Read([]byte(stateCommitment), keys) - require.NoError(t, err) + require.Equal(t, kv.value, payloadFromFile.Value()) + } + }) + + }) + + t.Run("create payloads", func(t *testing.T) { + withDirs(t, func(_, execdir, outdir string) { + inputPayloadFileName := filepath.Join(execdir, payloadFileName) + outputPayloadFileName := filepath.Join(outdir, "selected.payload") + + size := 10 - for i, key := range keys { - registerValue := registerValues[i] - require.Equal(t, data[key.String()].value, registerValue) + // Generate some data + keysValues := make(map[string]keyPair) + var payloads []*ledger.Payload + + for i := 0; i < size; i++ { + keys, values := getSampleKeyValues(i) + + for j, key := range keys { + keysValues[key.String()] = keyPair{ + key: key, + value: values[j], } - //make sure blocks after this one are not in checkpoint - // ie - extraction stops after hitting right hash - for j := i + 1; j < len(blocksInOrder); j++ { + payloads = append(payloads, ledger.NewPayload(key, values[j])) + } + } + + numOfPayloadWritten, err := util.CreatePayloadFile( + zerolog.Nop(), + inputPayloadFileName, + payloads, + nil, + false, + ) + require.NoError(t, err) + require.Equal(t, len(payloads), numOfPayloadWritten) + + // Export all payloads + Cmd.SetArgs([]string{ + "--execution-state-dir", execdir, + "--output-dir", outdir, + "--no-migration", + "--no-report", + "--state-commitment", "", + "--input-payload-filename", inputPayloadFileName, + "--output-payload-filename", outputPayloadFileName, + "--extract-payloads-by-address", "", + "--chain", flow.Emulator.Chain().String()}) + + err = Cmd.Execute() + require.NoError(t, err) + + // Verify exported payloads. + partialState, payloadsFromFile, err := util.ReadPayloadFile(zerolog.Nop(), outputPayloadFileName) + require.NoError(t, err) + require.Equal(t, len(keysValues), len(payloadsFromFile)) + require.False(t, partialState) - query.SetState(commitsByBlocks[blocksInOrder[j]]) - _, err := storage.Get(query) - require.Error(t, err) + for _, payloadFromFile := range payloadsFromFile { + k, err := payloadFromFile.Key() + require.NoError(t, err) + + kv, exist := keysValues[k.String()] + require.True(t, exist) + + require.Equal(t, kv.value, payloadFromFile.Value()) + } + }) + }) + + t.Run("input is partial state", func(t *testing.T) { + withDirs(t, func(_, execdir, outdir string) { + size := 10 + + inputPayloadFileName := filepath.Join(execdir, payloadFileName) + outputPayloadFileName := filepath.Join(outdir, "selected.payload") + + // Generate some data + keysValues := make(map[string]keyPair) + var payloads []*ledger.Payload + + for i := 0; i < size; i++ { + keys, values := getSampleKeyValues(i) + + for j, key := range keys { + keysValues[key.String()] = keyPair{ + key: key, + value: values[j], } - <-storage.Done() - <-compactor.Done() - }) + payloads = append(payloads, ledger.NewPayload(key, values[j])) + } + } + + // Create input payload file that represents partial state + numOfPayloadWritten, err := util.CreatePayloadFile( + zerolog.Nop(), + inputPayloadFileName, + payloads, + nil, + true, + ) + require.NoError(t, err) + require.Equal(t, len(payloads), numOfPayloadWritten) + + // Since input payload file is partial state, --allow-partial-state-from-payload-file must be specified. + Cmd.SetArgs([]string{ + "--execution-state-dir", execdir, + "--output-dir", outdir, + "--no-migration", + "--no-report", + "--state-commitment", "", + "--input-payload-filename", inputPayloadFileName, + "--output-payload-filename", outputPayloadFileName, + "--extract-payloads-by-address", "", + "--allow-partial-state-from-payload-file", + "--chain", flow.Emulator.Chain().String()}) + + err = Cmd.Execute() + require.NoError(t, err) + + // Verify exported payloads. + partialState, payloadsFromFile, err := util.ReadPayloadFile(zerolog.Nop(), outputPayloadFileName) + require.NoError(t, err) + require.Equal(t, len(keysValues), len(payloadsFromFile)) + require.True(t, partialState) + + for _, payloadFromFile := range payloadsFromFile { + k, err := payloadFromFile.Key() + require.NoError(t, err) + + kv, exist := keysValues[k.String()] + require.True(t, exist) + + require.Equal(t, kv.value, payloadFromFile.Value()) } }) }) @@ -228,7 +630,8 @@ func getSampleKeyValues(i int) ([]ledger.Key, []ledger.Value) { keys := make([]ledger.Key, 0) values := make([]ledger.Value, 0) for j := 0; j < 10; j++ { - address := make([]byte, 32) + // address := make([]byte, 32) + address := make([]byte, 8) _, err := rand.Read(address) if err != nil { panic(err) diff --git a/cmd/util/cmd/export-evm-state/cmd.go b/cmd/util/cmd/export-evm-state/cmd.go new file mode 100644 index 00000000000..b06544da56f --- /dev/null +++ b/cmd/util/cmd/export-evm-state/cmd.go @@ -0,0 +1,160 @@ +package evm_exporter + +import ( + "fmt" + "os" + "path/filepath" + + "github.com/rs/zerolog/log" + "github.com/spf13/cobra" + + "github.com/onflow/atree" + + "github.com/onflow/flow-go/cmd/util/ledger/util" + "github.com/onflow/flow-go/fvm/evm" + "github.com/onflow/flow-go/fvm/evm/emulator/state" + "github.com/onflow/flow-go/fvm/evm/testutils" + "github.com/onflow/flow-go/ledger" + "github.com/onflow/flow-go/ledger/common/convert" + "github.com/onflow/flow-go/model/flow" +) + +var ( + flagChain string + flagExecutionStateDir string + flagOutputDir string + flagStateCommitment string + flagEVMStateGobDir string + flagEVMStateGobHeight uint64 +) + +var Cmd = &cobra.Command{ + Use: "export-evm-state", + Short: "exports evm state into a several binary files", + Run: run, +} + +func init() { + Cmd.Flags().StringVar(&flagChain, "chain", "", "Chain name") + _ = Cmd.MarkFlagRequired("chain") + + Cmd.Flags().StringVar(&flagExecutionStateDir, "execution-state-dir", "", + "Execution Node state dir (where WAL logs are written") + + Cmd.Flags().StringVar(&flagOutputDir, "output-dir", "", + "Directory to write new Execution State to") + _ = Cmd.MarkFlagRequired("output-dir") + + Cmd.Flags().StringVar(&flagStateCommitment, "state-commitment", "", + "State commitment (hex-encoded, 64 characters)") + + Cmd.Flags().StringVar(&flagEVMStateGobDir, "evm_state_gob_dir", "/var/flow/data/evm_state_gob", + "directory that stores the evm state gob files as checkpoint") + + Cmd.Flags().Uint64Var(&flagEVMStateGobHeight, "evm_state_gob_height", 0, + "the flow height of the evm state gob files") +} + +func run(*cobra.Command, []string) { + log.Info().Msg("start exporting evm state") + if flagExecutionStateDir != "" { + err := ExportEVMState(flagChain, flagExecutionStateDir, flagStateCommitment, flagOutputDir) + if err != nil { + log.Fatal().Err(err).Msg("cannot get export evm state") + } + } else if flagEVMStateGobDir != "" { + err := ExportEVMStateFromGob(flagChain, flagEVMStateGobDir, flagEVMStateGobHeight, flagOutputDir) + if err != nil { + log.Fatal().Err(err).Msg("cannot get export evm state from gob files") + } + } +} + +// ExportEVMState evm state +func ExportEVMState( + chainName string, + ledgerPath string, + targetState string, + outputPath string) error { + + chainID := flow.ChainID(chainName) + + storageRoot := evm.StorageAccountAddress(chainID) + rootOwner := string(storageRoot.Bytes()) + + payloads, err := util.ReadTrieForPayloads(ledgerPath, util.ParseStateCommitment(targetState)) + if err != nil { + return err + } + + // filter payloads of evm storage + filteredPayloads := make(map[flow.RegisterID]*ledger.Payload, 0) + for _, payload := range payloads { + registerID, _, err := convert.PayloadToRegister(payload) + if err != nil { + return fmt.Errorf("failed to convert payload to register: %w", err) + } + if registerID.Owner == rootOwner { + filteredPayloads[registerID] = payload + } + } + + payloadsLedger := util.NewPayloadsLedger(filteredPayloads) + + return ExportEVMStateFromPayloads(payloadsLedger, storageRoot, outputPath) +} + +func ExportEVMStateFromGob( + chainName string, + evmStateGobDir string, + flowHeight uint64, + outputPath string) error { + + valueFileName, allocatorFileName := evmStateGobFileNamesByEndHeight(evmStateGobDir, flowHeight) + chainID := flow.ChainID(chainName) + + storageRoot := evm.StorageAccountAddress(chainID) + valuesGob, err := testutils.DeserializeState(valueFileName) + if err != nil { + return err + } + + allocatorGobs, err := testutils.DeserializeAllocator(allocatorFileName) + if err != nil { + return err + } + + store := testutils.GetSimpleValueStorePopulated(valuesGob, allocatorGobs) + + return ExportEVMStateFromPayloads(store, storageRoot, outputPath) +} + +func ExportEVMStateFromPayloads( + ledger atree.Ledger, + storageRoot flow.Address, + outputPath string, +) error { + exporter, err := state.NewExporter(ledger, storageRoot) + if err != nil { + return fmt.Errorf("failed to create exporter: %w", err) + } + + if _, err := os.Stat(outputPath); os.IsNotExist(err) { + err := os.Mkdir(outputPath, os.ModePerm) + if err != nil { + return fmt.Errorf("failed to create path: %w", err) + } + } + + err = exporter.ExportGob(outputPath) + if err != nil { + return fmt.Errorf("failed to export: %w", err) + } + return nil +} + +func evmStateGobFileNamesByEndHeight(evmStateGobDir string, endHeight uint64) (string, string) { + valueFileName := filepath.Join(evmStateGobDir, fmt.Sprintf("values-%d.gob", endHeight)) + allocatorFileName := filepath.Join(evmStateGobDir, fmt.Sprintf("allocators-%d.gob", endHeight)) + return valueFileName, allocatorFileName +} diff --git a/cmd/util/cmd/export-json-transactions/cmd.go b/cmd/util/cmd/export-json-transactions/cmd.go index 636c21754fa..c8ba51cf2b5 100644 --- a/cmd/util/cmd/export-json-transactions/cmd.go +++ b/cmd/util/cmd/export-json-transactions/cmd.go @@ -8,11 +8,13 @@ import ( "os" "path/filepath" + "github.com/jordanschalm/lockctx" "github.com/rs/zerolog/log" "github.com/spf13/cobra" "github.com/onflow/flow-go/cmd/util/cmd/common" "github.com/onflow/flow-go/cmd/util/cmd/export-json-transactions/transactions" + "github.com/onflow/flow-go/storage" ) var flagDatadir string @@ -21,7 +23,7 @@ var flagStartHeight uint64 var flagEndHeight uint64 // example: -// ./util export-json-transactions --output-dir ./ --datadir /var/fow/data/protocol/ --start-height 2 --end-height 242 +// ./util export-json-transactions --output-dir ./ --start-height 2 --end-height 242 var Cmd = &cobra.Command{ Use: "export-json-transactions", Short: "exports transactions into a json file", @@ -29,8 +31,7 @@ var Cmd = &cobra.Command{ } func init() { - Cmd.Flags().StringVar(&flagDatadir, "datadir", "/var/flow/data/protocol", - "the protocol state") + common.InitDataDirFlag(Cmd, &flagDatadir) Cmd.Flags().StringVar(&flagOutputDir, "output-dir", "", "Directory to write transactions JSON to") @@ -47,7 +48,8 @@ func init() { func run(*cobra.Command, []string) { log.Info().Msg("start exporting transactions") - err := ExportTransactions(flagDatadir, flagOutputDir, flagStartHeight, flagEndHeight) + lockManager := storage.MakeSingletonLockManager() + err := ExportTransactions(lockManager, flagDatadir, flagOutputDir, flagStartHeight, flagEndHeight) if err != nil { log.Fatal().Err(err).Msg("cannot get export transactions") } @@ -61,62 +63,62 @@ func writeJSONTo(writer io.Writer, jsonData []byte) error { // ExportTransactions exports transactions to JSON to the outputDir for height range specified by // startHeight and endHeight -func ExportTransactions(dataDir string, outputDir string, startHeight uint64, endHeight uint64) error { +func ExportTransactions(lockManager lockctx.Manager, dataDir string, outputDir string, startHeight uint64, endHeight uint64) error { // init dependencies - db := common.InitStorage(flagDatadir) - storages := common.InitStorages(db) - defer db.Close() - - state, err := common.InitProtocolState(db, storages) - if err != nil { - return fmt.Errorf("could not init protocol state: %w", err) - } - - // create finder - finder := &transactions.Finder{ - State: state, - Payloads: storages.Payloads, - Collections: storages.Collections, - } - - // create JSON file writer - outputFile := filepath.Join(outputDir, "transactions.json") - fi, err := os.Create(outputFile) - if err != nil { - return fmt.Errorf("could not create block output file %v, %w", outputFile, err) - } - defer fi.Close() - - blockWriter := bufio.NewWriter(fi) - - // build all blocks first before writing to disk - // TODO: if the height range is too high, consider streaming json writing for each block - blocks, err := finder.GetByHeightRange(startHeight, endHeight) - if err != nil { - return err - } - - log.Info().Msgf("exporting transactions for %v blocks", endHeight-startHeight+1) - - // converting all blocks into json - jsonData, err := json.Marshal(blocks) - if err != nil { - return fmt.Errorf("could not marshal JSON: %w", err) - } - - // writing to disk - err = writeJSONTo(blockWriter, jsonData) - if err != nil { - return fmt.Errorf("could not write json to %v: %w", outputDir, err) - } - - err = blockWriter.Flush() - if err != nil { - return fmt.Errorf("fail to flush block data: %w", err) - } - - log.Info().Msgf("successfully exported transaction data to %v", outputFile) - - return nil + return common.WithStorage(flagDatadir, func(db storage.DB) error { + storages := common.InitStorages(db) + + state, err := common.OpenProtocolState(lockManager, db, storages) + if err != nil { + return fmt.Errorf("could not open protocol state: %w", err) + } + + // create finder + finder := &transactions.Finder{ + State: state, + Payloads: storages.Payloads, + Collections: storages.Collections, + } + + // create JSON file writer + outputFile := filepath.Join(outputDir, "transactions.json") + fi, err := os.Create(outputFile) + if err != nil { + return fmt.Errorf("could not create block output file %v, %w", outputFile, err) + } + defer fi.Close() + + blockWriter := bufio.NewWriter(fi) + + // build all blocks first before writing to disk + // TODO: if the height range is too high, consider streaming json writing for each block + blocks, err := finder.GetByHeightRange(startHeight, endHeight) + if err != nil { + return err + } + + log.Info().Msgf("exporting transactions for %v blocks", endHeight-startHeight+1) + + // converting all blocks into json + jsonData, err := json.Marshal(blocks) + if err != nil { + return fmt.Errorf("could not marshal JSON: %w", err) + } + + // writing to disk + err = writeJSONTo(blockWriter, jsonData) + if err != nil { + return fmt.Errorf("could not write json to %v: %w", outputDir, err) + } + + err = blockWriter.Flush() + if err != nil { + return fmt.Errorf("fail to flush block data: %w", err) + } + + log.Info().Msgf("successfully exported transaction data to %v", outputFile) + + return nil + }) } diff --git a/cmd/util/cmd/export-json-transactions/transactions/range_test.go b/cmd/util/cmd/export-json-transactions/transactions/range_test.go index f8bc27b177d..9d85f5c6232 100644 --- a/cmd/util/cmd/export-json-transactions/transactions/range_test.go +++ b/cmd/util/cmd/export-json-transactions/transactions/range_test.go @@ -4,18 +4,20 @@ import ( "fmt" "testing" - badger "github.com/dgraph-io/badger/v2" + "github.com/jordanschalm/lockctx" "github.com/stretchr/testify/require" "github.com/onflow/flow-go/cmd/util/cmd/common" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/state/protocol/mock" "github.com/onflow/flow-go/storage" + "github.com/onflow/flow-go/storage/operation/dbtest" "github.com/onflow/flow-go/utils/unittest" ) func TestFindBlockTransactions(t *testing.T) { - unittest.RunWithBadgerDB(t, func(db *badger.DB) { + dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { + lockManager := storage.NewTestingLockManager() // prepare two blocks // block 1 has 2 collections // block 2 has 1 collection @@ -23,48 +25,72 @@ func TestFindBlockTransactions(t *testing.T) { col2 := unittest.ClusterPayloadFixture(2) col3 := unittest.ClusterPayloadFixture(3) - b1 := unittest.BlockFixture() - b1.Payload.Guarantees = []*flow.CollectionGuarantee{ - &flow.CollectionGuarantee{ - CollectionID: col1.Collection.ID(), - ReferenceBlockID: col1.ReferenceBlockID, - }, - &flow.CollectionGuarantee{ - CollectionID: col2.Collection.ID(), - ReferenceBlockID: col2.ReferenceBlockID, - }, - } - b1.Header.PayloadHash = b1.Payload.Hash() - b1.Header.Height = 4 - - b2 := unittest.BlockFixture() - b2.Payload.Guarantees = []*flow.CollectionGuarantee{ - &flow.CollectionGuarantee{ - CollectionID: col3.Collection.ID(), - ReferenceBlockID: col3.ReferenceBlockID, - }, - } - b2.Header.PayloadHash = b2.Payload.Hash() - b1.Header.Height = 5 + b1 := unittest.BlockFixture( + unittest.Block.WithHeight(4), + unittest.Block.WithPayload( + flow.Payload{ + Guarantees: []*flow.CollectionGuarantee{ + &flow.CollectionGuarantee{ + CollectionID: col1.Collection.ID(), + ReferenceBlockID: col1.ReferenceBlockID, + }, + &flow.CollectionGuarantee{ + CollectionID: col2.Collection.ID(), + ReferenceBlockID: col2.ReferenceBlockID, + }, + }, + ProtocolStateID: unittest.IdentifierFixture(), + }, + ), + ) + + b2 := unittest.BlockFixture( + unittest.Block.WithHeight(5), + unittest.Block.WithPayload( + flow.Payload{ + Guarantees: []*flow.CollectionGuarantee{ + &flow.CollectionGuarantee{ + CollectionID: col3.Collection.ID(), + ReferenceBlockID: col3.ReferenceBlockID, + }, + }, + ProtocolStateID: unittest.IdentifierFixture(), + }, + ), + ) // prepare dependencies storages := common.InitStorages(db) payloads, collections := storages.Payloads, storages.Collections snap4 := &mock.Snapshot{} - snap4.On("Head").Return(b1.Header, nil) + snap4.On("Head").Return(b1.ToHeader(), nil) snap5 := &mock.Snapshot{} - snap5.On("Head").Return(b2.Header, nil) + snap5.On("Head").Return(b2.ToHeader(), nil) state := &mock.State{} state.On("AtHeight", uint64(4)).Return(snap4, nil) state.On("AtHeight", uint64(5)).Return(snap5, nil) // store into database - require.NoError(t, payloads.Store(b1.ID(), b1.Payload)) - require.NoError(t, payloads.Store(b2.ID(), b2.Payload)) + p1 := unittest.ProposalFromBlock(b1) + p2 := unittest.ProposalFromBlock(b2) + err := unittest.WithLock(t, lockManager, storage.LockInsertBlock, func(lctx lockctx.Context) error { + return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + err := storages.Blocks.BatchStore(lctx, rw, p1) + if err != nil { + return err + } + + return storages.Blocks.BatchStore(lctx, rw, p2) + }) + }) + require.NoError(t, err) - require.NoError(t, collections.Store(&col1.Collection)) - require.NoError(t, collections.Store(&col2.Collection)) - require.NoError(t, collections.Store(&col3.Collection)) + _, err = collections.Store(&col1.Collection) + require.NoError(t, err) + _, err = collections.Store(&col2.Collection) + require.NoError(t, err) + _, err = collections.Store(&col3.Collection) + require.NoError(t, err) f := &Finder{ State: state, diff --git a/cmd/util/cmd/extract-payloads-by-address/cmd.go b/cmd/util/cmd/extract-payloads-by-address/cmd.go new file mode 100644 index 00000000000..031b8e15b0a --- /dev/null +++ b/cmd/util/cmd/extract-payloads-by-address/cmd.go @@ -0,0 +1,95 @@ +package extractpayloads + +import ( + "os" + "strings" + + "github.com/rs/zerolog/log" + "github.com/spf13/cobra" + + "github.com/onflow/flow-go/cmd/util/common" + "github.com/onflow/flow-go/cmd/util/ledger/util" +) + +var ( + flagInputPayloadFileName string + flagOutputPayloadFileName string + flagAddresses string +) + +var Cmd = &cobra.Command{ + Use: "extract-payloads-by-address", + Short: "Read payload file and generate payload file containing payloads with specified addresses", + Run: run, +} + +func init() { + Cmd.Flags().StringVar( + &flagInputPayloadFileName, + "input-filename", + "", + "Input payload file name") + _ = Cmd.MarkFlagRequired("input-filename") + + Cmd.Flags().StringVar( + &flagOutputPayloadFileName, + "output-filename", + "", + "Output payload file name") + _ = Cmd.MarkFlagRequired("output-filename") + + Cmd.Flags().StringVar( + &flagAddresses, + "addresses", + "", + "extract payloads of addresses (comma separated hex-encoded addresses) to file specified by output-payload-filename", + ) + _ = Cmd.MarkFlagRequired("addresses") +} + +func run(*cobra.Command, []string) { + + if _, err := os.Stat(flagInputPayloadFileName); os.IsNotExist(err) { + log.Fatal().Msgf("Input file %s doesn't exist", flagInputPayloadFileName) + } + + if _, err := os.Stat(flagOutputPayloadFileName); os.IsExist(err) { + log.Fatal().Msgf("Output file %s exists", flagOutputPayloadFileName) + } + + owners, err := common.ParseOwners(strings.Split(flagAddresses, ",")) + if err != nil { + log.Fatal().Err(err).Msg("failed to parse addresses") + } + + log.Info().Msgf( + "extracting payloads with owners %s from %s to %s", + common.OwnersToString(owners), + flagInputPayloadFileName, + flagOutputPayloadFileName, + ) + + inputPayloadsFromPartialState, payloads, err := util.ReadPayloadFile(log.Logger, flagInputPayloadFileName) + if err != nil { + log.Fatal().Err(err).Msg("failed to read payloads") + } + + numOfPayloadWritten, err := util.CreatePayloadFile( + log.Logger, + flagOutputPayloadFileName, + payloads, + owners, + inputPayloadsFromPartialState, + ) + if err != nil { + log.Fatal().Err(err).Msg("failed to create payloads file") + } + + log.Info().Msgf( + "extracted %d payloads with owners %s from %s to %s", + numOfPayloadWritten, + common.OwnersToString(owners), + flagInputPayloadFileName, + flagOutputPayloadFileName, + ) +} diff --git a/cmd/util/cmd/extract-payloads-by-address/extract_payloads_test.go b/cmd/util/cmd/extract-payloads-by-address/extract_payloads_test.go new file mode 100644 index 00000000000..697d983d1f9 --- /dev/null +++ b/cmd/util/cmd/extract-payloads-by-address/extract_payloads_test.go @@ -0,0 +1,261 @@ +package extractpayloads + +import ( + "crypto/rand" + "path/filepath" + "strings" + "testing" + + "github.com/rs/zerolog" + "github.com/stretchr/testify/require" + + "github.com/onflow/cadence/common" + + "github.com/onflow/flow-go/cmd/util/ledger/util" + "github.com/onflow/flow-go/ledger" + "github.com/onflow/flow-go/utils/unittest" +) + +type keyPair struct { + key ledger.Key + value ledger.Value +} + +func TestExtractPayloads(t *testing.T) { + + t.Run("some payloads", func(t *testing.T) { + + unittest.RunWithTempDir(t, func(datadir string) { + + inputFile := filepath.Join(datadir, "input.payload") + outputFile := filepath.Join(datadir, "output.payload") + + size := 10 + + // Generate some data + keysValues := make(map[string]keyPair) + var payloads []*ledger.Payload + + for i := 0; i < size; i++ { + keys, values := getSampleKeyValues(i) + + for j, key := range keys { + keysValues[key.String()] = keyPair{ + key: key, + value: values[j], + } + + payloads = append(payloads, ledger.NewPayload(key, values[j])) + } + } + + numOfPayloadWritten, err := util.CreatePayloadFile(zerolog.Nop(), inputFile, payloads, nil, false) + require.NoError(t, err) + require.Equal(t, len(payloads), numOfPayloadWritten) + + const selectedAddressCount = 10 + selectedAddresses := make(map[string]struct{}) + selectedKeysValues := make(map[string]keyPair) + for k, kv := range keysValues { + owner := kv.key.KeyParts[0].Value + if len(owner) != common.AddressLength { + continue + } + + address, err := common.BytesToAddress(owner) + require.NoError(t, err) + + if len(selectedAddresses) < selectedAddressCount { + selectedAddresses[address.Hex()] = struct{}{} + } + + if _, exist := selectedAddresses[address.Hex()]; exist { + selectedKeysValues[k] = kv + } + } + + addresses := make([]string, 0, len(selectedAddresses)) + for address := range selectedAddresses { + addresses = append(addresses, address) + } + + // Export selected payloads + Cmd.SetArgs([]string{ + "--input-filename", inputFile, + "--output-filename", outputFile, + "--addresses", strings.Join(addresses, ","), + }) + + err = Cmd.Execute() + require.NoError(t, err) + + // Verify exported payloads. + partialState, payloadsFromFile, err := util.ReadPayloadFile(zerolog.Nop(), outputFile) + require.NoError(t, err) + require.True(t, partialState) + + nonGlobalPayloads := make([]*ledger.Payload, 0, len(selectedKeysValues)) + for _, payloadFromFile := range payloadsFromFile { + key, err := payloadFromFile.Key() + require.NoError(t, err) + + owner := key.KeyParts[0].Value + if len(owner) > 0 { + nonGlobalPayloads = append(nonGlobalPayloads, payloadFromFile) + } + } + + require.Equal(t, len(selectedKeysValues), len(nonGlobalPayloads)) + + for _, payloadFromFile := range nonGlobalPayloads { + k, err := payloadFromFile.Key() + require.NoError(t, err) + + kv, exist := selectedKeysValues[k.String()] + require.True(t, exist) + require.Equal(t, kv.value, payloadFromFile.Value()) + } + }) + }) + + t.Run("empty address", func(t *testing.T) { + + unittest.RunWithTempDir(t, func(datadir string) { + + inputFile := filepath.Join(datadir, "input.payload") + outputFile := filepath.Join(datadir, "output.payload") + + size := 10 + + // Generate some data + keysValues := make(map[string]keyPair) + var payloads []*ledger.Payload + + for i := 0; i < size; i++ { + keys, values := getSampleKeyValues(i) + + for j, key := range keys { + keysValues[key.String()] = keyPair{ + key: key, + value: values[j], + } + + payloads = append(payloads, ledger.NewPayload(key, values[j])) + } + } + + numOfPayloadWritten, err := util.CreatePayloadFile(zerolog.Nop(), inputFile, payloads, nil, false) + require.NoError(t, err) + require.Equal(t, len(payloads), numOfPayloadWritten) + + // Export selected payloads + Cmd.SetArgs([]string{ + "--input-filename", inputFile, + "--output-filename", outputFile, + "--addresses", ",", + }) + + err = Cmd.Execute() + require.NoError(t, err) + + // Verify exported payloads. + partialState, payloadsFromFile, err := util.ReadPayloadFile(zerolog.Nop(), outputFile) + require.NoError(t, err) + require.True(t, partialState) + + var nonGlobalPayloads []*ledger.Payload + for _, payloadFromFile := range payloadsFromFile { + key, err := payloadFromFile.Key() + require.NoError(t, err) + + owner := key.KeyParts[0].Value + if len(owner) > 0 { + nonGlobalPayloads = append(nonGlobalPayloads, payloadFromFile) + } + } + + require.Equal(t, 0, len(nonGlobalPayloads)) + + }) + }) +} + +func getSampleKeyValues(i int) ([]ledger.Key, []ledger.Value) { + switch i { + case 0: + return []ledger.Key{getKey("", "uuid"), getKey("", "account_address_state")}, + []ledger.Value{[]byte{'1'}, []byte{'A'}} + case 1: + return []ledger.Key{getKey("ADDRESS", "public_key_count"), + getKey("ADDRESS", "public_key_0"), + getKey("ADDRESS", "exists"), + getKey("ADDRESS", "storage_used")}, + []ledger.Value{[]byte{1}, []byte("PUBLICKEYXYZ"), []byte{1}, []byte{100}} + case 2: + // TODO change the contract_names to CBOR encoding + return []ledger.Key{getKey("ADDRESS", "contract_names"), getKey("ADDRESS", "code.mycontract")}, + []ledger.Value{[]byte("mycontract"), []byte("CONTRACT Content")} + default: + keys := make([]ledger.Key, 0) + values := make([]ledger.Value, 0) + for j := 0; j < 10; j++ { + // address := make([]byte, 32) + address := make([]byte, 8) + _, err := rand.Read(address) + if err != nil { + panic(err) + } + keys = append(keys, getKey(string(address), "test")) + values = append(values, getRandomCadenceValue()) + } + return keys, values + } +} + +func getKey(owner, key string) ledger.Key { + return ledger.Key{KeyParts: []ledger.KeyPart{ + {Type: uint16(0), Value: []byte(owner)}, + {Type: uint16(2), Value: []byte(key)}, + }, + } +} + +func getRandomCadenceValue() ledger.Value { + + randomPart := make([]byte, 10) + _, err := rand.Read(randomPart) + if err != nil { + panic(err) + } + valueBytes := []byte{ + // magic prefix + 0x0, 0xca, 0xde, 0x0, 0x4, + // tag + 0xd8, 132, + // array, 5 items follow + 0x85, + + // tag + 0xd8, 193, + // UTF-8 string, length 4 + 0x64, + // t, e, s, t + 0x74, 0x65, 0x73, 0x74, + + // nil + 0xf6, + + // positive integer 1 + 0x1, + + // array, 0 items follow + 0x80, + + // UTF-8 string, length 10 + 0x6a, + 0x54, 0x65, 0x73, 0x74, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, + } + + valueBytes = append(valueBytes, randomPart...) + return ledger.Value(valueBytes) +} diff --git a/cmd/util/cmd/find-inconsistent-result/cmd.go b/cmd/util/cmd/find-inconsistent-result/cmd.go new file mode 100644 index 00000000000..16636dd5fc9 --- /dev/null +++ b/cmd/util/cmd/find-inconsistent-result/cmd.go @@ -0,0 +1,193 @@ +package find_inconsistent_result + +import ( + "errors" + "fmt" + + "github.com/jordanschalm/lockctx" + "github.com/spf13/cobra" + + "github.com/onflow/flow-go/cmd/util/cmd/common" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module/block_iterator/latest" + "github.com/onflow/flow-go/state/protocol" + "github.com/onflow/flow-go/storage" +) + +var NoMissmatchFoundError = errors.New("No missmatch found") + +var ( + flagDatadir string + flagStartHeight uint64 + flagEndHeight uint64 +) + +var Cmd = &cobra.Command{ + Use: "find-inconsistent-result", + Short: "find the first block that produces inconsistent results", + Run: run, +} + +func init() { + common.InitDataDirFlag(Cmd, &flagDatadir) + + Cmd.Flags().Uint64Var(&flagEndHeight, "end-height", 0, "the last block height checks for result consistency") + Cmd.Flags().Uint64Var(&flagStartHeight, "start-height", 0, "the first block height checks for result consistency") +} + +func run(*cobra.Command, []string) { + lockManager := storage.MakeSingletonLockManager() + err := findFirstMismatch(flagDatadir, flagStartHeight, flagEndHeight, lockManager) + if err != nil { + if errors.Is(err, NoMissmatchFoundError) { + fmt.Printf("no mismatch found: %v\n", err) + } else { + fmt.Printf("fatal: %v\n", err) + } + } +} + +func findFirstMismatch(datadir string, startHeight, endHeight uint64, lockManager lockctx.Manager) error { + fmt.Printf("initializing database\n") + return common.WithStorage(datadir, func(db storage.DB) error { + headers, results, seals, state, err := createStorages(db, lockManager) + if err != nil { + return fmt.Errorf("could not create storages: %v", err) + } + + c := &checker{ + headers: headers, + results: results, + seals: seals, + state: state, + } + + if startHeight == 0 { + startHeight = findRootBlockHeight(state) + } + + if endHeight == 0 { + endHeight, err = latest.LatestSealedAndExecutedHeight(state, db) + if err != nil { + return fmt.Errorf("could not find last executed and sealed height: %v", err) + } + } + + fmt.Printf("finding mismatch result between heights %v and %v\n", startHeight, endHeight) + + mismatchHeight, err := c.FindFirstMismatchHeight(startHeight, endHeight) + if err != nil { + return fmt.Errorf("could not find first mismatch: %v", err) + } + + fmt.Printf("first mismatch found at block %v\n", mismatchHeight) + + blockID, err := findBlockIDByHeight(headers, mismatchHeight) + if err != nil { + return fmt.Errorf("could not find block id for height %v: %v", mismatchHeight, err) + } + + fmt.Printf("mismatching block %v (id: %v)\n", mismatchHeight, blockID) + + return nil + }) +} + +func createStorages(db storage.DB, lockManager lockctx.Manager) ( + storage.Headers, storage.ExecutionResults, storage.Seals, protocol.State, error) { + storages := common.InitStorages(db) + state, err := common.OpenProtocolState(lockManager, db, storages) + if err != nil { + return nil, nil, nil, nil, fmt.Errorf("could not open protocol state: %v", err) + } + + return storages.Headers, storages.Results, storages.Seals, state, nil +} + +type checker struct { + headers storage.Headers + results storage.ExecutionResults + seals storage.Seals + state protocol.State +} + +func (c *checker) FindFirstMismatchHeight(startHeight uint64, endHeight uint64) (uint64, error) { + low := startHeight + high := endHeight + firstMismatch := endHeight + 1 // Initialize to a value outside the range + + for low <= high { + mid := low + (high-low)/2 + match, err := c.CompareAtHeight(mid) + if err != nil { + return 0, err + } + + if !match { + // Found a mismatch, update the first mismatch and search the lower half + firstMismatch = mid + high = mid - 1 + } else { + // No mismatch, search the upper half + low = mid + 1 + } + } + + if firstMismatch > endHeight { + // No mismatch found within the range + return 0, fmt.Errorf("no mismatch found between heights %v and %v: %w", startHeight, endHeight, NoMissmatchFoundError) + } + + return firstMismatch, nil +} + +func (c *checker) CompareAtHeight(height uint64) (bool, error) { + blockID, err := findBlockIDByHeight(c.headers, height) + if err != nil { + return false, fmt.Errorf("could not find block id for height %v: %w", height, err) + } + + ownResultID, err := findOwnResultIDByBlockID(c.results, blockID) + if err != nil { + return false, fmt.Errorf("could not find own result for block %v: %w", blockID, err) + } + + sealedResultID, err := findSealedResultIDByBlockHeight(c.seals, blockID) + if err != nil { + return false, fmt.Errorf("could not find sealed result for block %v: %w", blockID, err) + } + + match := ownResultID == sealedResultID + if match { + fmt.Printf("block %v (id: %v) match: result %v\n", height, blockID, ownResultID) + } else { + fmt.Printf("block %v (id: %v) mismatch: own %v, sealed %v\n", height, blockID, ownResultID, sealedResultID) + } + + return match, nil +} + +func findRootBlockHeight(state protocol.State) uint64 { + return state.Params().SealedRoot().Height +} + +func findBlockIDByHeight(headers storage.Headers, height uint64) (flow.Identifier, error) { + return headers.BlockIDByHeight(height) +} + +func findOwnResultIDByBlockID(results storage.ExecutionResults, blockID flow.Identifier) (flow.Identifier, error) { + result, err := results.ByBlockID(blockID) + if err != nil { + return flow.Identifier{}, err + } + return result.ID(), nil +} + +func findSealedResultIDByBlockHeight(seals storage.Seals, blockID flow.Identifier) (flow.Identifier, error) { + seal, err := seals.FinalizedSealForBlock(blockID) + if err != nil { + return flow.Identifier{}, err + } + + return seal.ResultID, nil +} diff --git a/cmd/util/cmd/find-trie-root/cmd.go b/cmd/util/cmd/find-trie-root/cmd.go new file mode 100644 index 00000000000..d50b4d9b3dd --- /dev/null +++ b/cmd/util/cmd/find-trie-root/cmd.go @@ -0,0 +1,370 @@ +package find_trie_root + +import ( + "encoding/hex" + "fmt" + "math" + "os" + "path/filepath" + + prometheusWAL "github.com/onflow/wal/wal" + "github.com/rs/zerolog" + "github.com/rs/zerolog/log" + "github.com/spf13/cobra" + + "github.com/onflow/flow-go/ledger" + "github.com/onflow/flow-go/ledger/common/hash" + "github.com/onflow/flow-go/ledger/complete/wal" +) + +var ( + flagExecutionStateDir string + flagRootHash string + flagFrom int + flagTo int + flagBackupDir string + flagTrimAsLatestWAL bool +) + +// find trie root hash from the wal files. +// useful for state extraction and rolling back executed height. +// for instance, when extracting state for a target height, it requires the wal files +// has the trie root hash of the target block as the latest few records. If not the case, +// then it is necessary to trim the wal files to the last record with the target trie root hash. +// in order to do that, this command can be used to find the trie root hash in the wal files, +// and copy the wal that contains the trie root hash to a new directory and trim it to +// have the target trie root hash as the last record. +// after that, the new wal file can be used to extract the state for the target height. +var Cmd = &cobra.Command{ + Use: "find-trie-root", + Short: "find trie root hash from the wal files", + Run: run, +} + +func init() { + Cmd.Flags().StringVar(&flagExecutionStateDir, "execution-state-dir", "/var/flow/data/execution", + "directory to the execution state") + _ = Cmd.MarkFlagRequired("execution-state-dir") + + Cmd.Flags().StringVar(&flagRootHash, "root-hash", "", + "ledger root hash (hex-encoded, 64 characters)") + _ = Cmd.MarkFlagRequired("root-hash") + + Cmd.Flags().IntVar(&flagFrom, "from", 0, "from segment") + Cmd.Flags().IntVar(&flagTo, "to", math.MaxInt32, "to segment") + + Cmd.Flags().StringVar(&flagBackupDir, "backup-dir", "", + "directory for backup wal files. must be not exist or empty folder. required when --trim-as-latest-wal flag is set to true.") + + Cmd.Flags().BoolVar(&flagTrimAsLatestWAL, "trim-as-latest-wal", false, + "trim the wal file to the last record with the target trie root hash") +} + +func run(*cobra.Command, []string) { + rootHash, err := parseInput(flagRootHash) + if err != nil { + log.Fatal().Err(err).Msg("cannot parse input") + } + + if flagExecutionStateDir == flagBackupDir { + log.Fatal().Msg("--backup-dir directory cannot be the same as the execution state directory") + } + + // making sure the backup dir is empty + empty, err := checkFolderIsEmpty(flagBackupDir) + if err != nil { + log.Fatal().Msgf("--backup-dir directory %v must exist and empty", flagBackupDir) + } + + if !empty { + log.Fatal().Msgf("--backup-dir directory %v must be empty", flagBackupDir) + } + + segment, offset, err := searchRootHashInSegments(rootHash, flagExecutionStateDir, flagFrom, flagTo) + if err != nil { + log.Fatal().Err(err).Msg("cannot find root hash in segments") + } + + segmentFile := prometheusWAL.SegmentName(flagExecutionStateDir, segment) + + log.Info().Msgf("found root hash in segment %d at offset %d, segment file: %v", segment, offset, segmentFile) + + if !flagTrimAsLatestWAL { + log.Info().Msg("not trimming WAL. Exiting. to trim the WAL, use --trim-as-latest-wal flag") + return + } + + if len(flagBackupDir) == 0 { + log.Error().Msgf("--backup-dir directory is not provided") + return + } + + // create a temporary folder in the backup folder to store the new segment file + tmpFolder := filepath.Join(flagBackupDir, "flow-last-segment-file") + + log.Info().Msgf("creating temporary folder %v", tmpFolder) + + err = os.Mkdir(tmpFolder, os.ModePerm) + if err != nil { + log.Fatal().Err(err).Msg("cannot create temporary folder") + } + + defer func() { + log.Info().Msgf("removing temporary folder %v", tmpFolder) + err := os.RemoveAll(tmpFolder) + if err != nil { + log.Error().Err(err).Msg("cannot remove temporary folder") + } + }() + + // genereate a segment file to the temporary folder with the root hash as its last record + newSegmentFile, err := findRootHashAndCreateTrimmed(flagExecutionStateDir, segment, rootHash, tmpFolder) + if err != nil { + log.Fatal().Err(err).Msg("cannot copy WAL") + } + + log.Info().Msgf("successfully copied WAL to the temporary folder %v", newSegmentFile) + + // before replacing the last wal file with the newly generated one, backup the rollbacked wals + // then move the last segment file to the execution state directory + err = backupRollbackedWALsAndMoveLastSegmentFile( + segment, flagExecutionStateDir, flagBackupDir, newSegmentFile) + if err != nil { + log.Fatal().Err(err).Msg("cannot backup rollbacked WALs") + } + + log.Info().Msgf("successfully trimmed WAL %v the trie root hash %v as its last record, original wal files are moved to %v", + segment, rootHash, flagBackupDir) +} + +func parseInput(rootHashStr string) (ledger.RootHash, error) { + rootHashBytes, err := hex.DecodeString(rootHashStr) + if err != nil { + return ledger.RootHash(hash.DummyHash), fmt.Errorf("cannot decode root hash: %w", err) + } + rootHash, err := ledger.ToRootHash(rootHashBytes) + if err != nil { + return ledger.RootHash(hash.DummyHash), fmt.Errorf("invalid root hash: %w", err) + } + return rootHash, nil +} + +func searchRootHashInSegments( + expectedHash ledger.RootHash, + dir string, + wantFrom, wantTo int, +) (int, int64, error) { + lg := zerolog.New(os.Stderr).With().Timestamp().Logger() + from, to, err := prometheusWAL.Segments(dir) + if err != nil { + return 0, 0, fmt.Errorf("cannot get segments: %w", err) + } + + if from < 0 { + return 0, 0, fmt.Errorf("no segments found in %s", dir) + } + + if wantFrom > to { + return 0, 0, fmt.Errorf("from segment %d is greater than the last segment %d", wantFrom, to) + } + + if wantTo < from { + return 0, 0, fmt.Errorf("to segment %d is less than the first segment %d", wantTo, from) + } + + if wantFrom > from { + from = wantFrom + } + + if wantTo < to { + to = wantTo + } + + lg.Info(). + Str("dir", dir). + Int("from", from). + Int("to", to). + Int("want-from", wantFrom). + Int("want-to", wantTo). + Msgf("searching for trie root hash %v in segments [%d,%d]", expectedHash, wantFrom, wantTo) + + sr, err := prometheusWAL.NewSegmentsRangeReader(lg, prometheusWAL.SegmentRange{ + Dir: dir, + First: from, + Last: to, + }) + + if err != nil { + return 0, 0, fmt.Errorf("cannot create WAL segments reader: %w", err) + } + + defer sr.Close() + + reader := prometheusWAL.NewReader(sr) + + for reader.Next() { + record := reader.Record() + operation, _, update, err := wal.Decode(record) + if err != nil { + return 0, 0, fmt.Errorf("cannot decode LedgerWAL record: %w", err) + } + + switch operation { + case wal.WALUpdate: + rootHash := update.RootHash + + log.Debug(). + Uint8("operation", uint8(operation)). + Str("root-hash", rootHash.String()). + Msg("found WALUpdate") + + if rootHash.Equals(expectedHash) { + log.Info().Msgf("found expected trie root hash %v", rootHash) + return reader.Segment(), reader.Offset(), nil + } + default: + } + + err = reader.Err() + if err != nil { + return 0, 0, fmt.Errorf("cannot read LedgerWAL: %w", err) + } + } + + return 0, 0, fmt.Errorf("finish reading all segment files from %d to %d, but not found", from, to) +} + +// findRootHashAndCreateTrimmed finds the root hash in the segment file from the given dir folder +// and creates a new segment file with the expected root hash as the last record in a temporary folder. +// it return the path to the new segment file. +func findRootHashAndCreateTrimmed( + dir string, segment int, expectedRoot ledger.RootHash, tmpFolder string) (string, error) { + // the new segment file will be created in the temporary folder + // and it's always 00000000 + newSegmentFile := prometheusWAL.SegmentName(tmpFolder, 0) + + log.Info().Msgf("writing new segment file to %v", newSegmentFile) + + writer, err := prometheusWAL.NewSize(log.Logger, nil, tmpFolder, wal.SegmentSize, false) + if err != nil { + return "", fmt.Errorf("cannot create writer WAL: %w", err) + } + + defer writer.Close() + + sr, err := prometheusWAL.NewSegmentsRangeReader(log.Logger, prometheusWAL.SegmentRange{ + Dir: dir, + First: segment, + Last: segment, + }) + if err != nil { + return "", fmt.Errorf("cannot create WAL segments reader: %w", err) + } + + defer sr.Close() + + reader := prometheusWAL.NewReader(sr) + + for reader.Next() { + record := reader.Record() + operation, _, update, err := wal.Decode(record) + if err != nil { + return "", fmt.Errorf("cannot decode LedgerWAL record: %w", err) + } + + switch operation { + case wal.WALUpdate: + + bytes := wal.EncodeUpdate(update) + _, err = writer.Log(bytes) + if err != nil { + return "", fmt.Errorf("cannot write LedgerWAL record: %w", err) + } + + rootHash := update.RootHash + + if rootHash.Equals(expectedRoot) { + log.Info().Msgf("found expected trie root hash %v, finish writing", rootHash) + return newSegmentFile, nil + } + default: + } + + err = reader.Err() + if err != nil { + return "", fmt.Errorf("cannot read LedgerWAL: %w", err) + } + } + + return "", fmt.Errorf("finish reading all segment files from %d to %d, but not found", segment, segment) +} + +func checkFolderIsEmpty(folderPath string) (bool, error) { + // Check if the folder exists + info, err := os.Stat(folderPath) + if err != nil { + if os.IsNotExist(err) { + log.Info().Msgf("folder %v does not exist, creating the folder", folderPath) + + // create the folder if not exist + err = os.MkdirAll(folderPath, os.ModePerm) + if err != nil { + return false, fmt.Errorf("Cannot create the folder.") + } + + return true, nil + } + return false, err + } + + // Check if the path is a directory + if !info.IsDir() { + return false, fmt.Errorf("The path is not a directory.") + } + + // Check if the folder is empty + files, err := os.ReadDir(folderPath) + if err != nil { + return false, fmt.Errorf("Cannot read the folder.") + } + + return len(files) == 0, nil +} + +// backup new wals before replacing +func backupRollbackedWALsAndMoveLastSegmentFile( + segment int, walDir, backupDir string, newSegmentFile string) error { + first, last, err := prometheusWAL.Segments(walDir) + if err != nil { + return fmt.Errorf("cannot get segments: %w", err) + } + + if segment < first { + return fmt.Errorf("segment %d is less than the first segment %d", segment, first) + } + + // backup all the segment files that have higher number than the given segment, including + // the segment file itself, since it will be replaced. + for i := segment; i <= last; i++ { + segmentFile := prometheusWAL.SegmentName(walDir, i) + backupFile := prometheusWAL.SegmentName(backupDir, i) + + log.Info().Msgf("backup segment file %s to %s, %v/%v", segmentFile, backupFile, i, last) + err := os.Rename(segmentFile, backupFile) + if err != nil { + return fmt.Errorf("cannot move segment file %s to %s: %w", segmentFile, backupFile, err) + } + } + + // after backup the segment files, replace the last segment file + segmentToBeReplaced := prometheusWAL.SegmentName(walDir, segment) + + log.Info().Msgf("moving segment file %s to %s", newSegmentFile, segmentToBeReplaced) + + err = os.Rename(newSegmentFile, segmentToBeReplaced) + if err != nil { + return fmt.Errorf("cannot move segment file %s to %s: %w", newSegmentFile, segmentToBeReplaced, err) + } + + return nil +} diff --git a/cmd/util/cmd/generate-authorization-fixes/cmd.go b/cmd/util/cmd/generate-authorization-fixes/cmd.go new file mode 100644 index 00000000000..a554a396445 --- /dev/null +++ b/cmd/util/cmd/generate-authorization-fixes/cmd.go @@ -0,0 +1,443 @@ +package generate_authorization_fixes + +import ( + "compress/gzip" + "encoding/json" + "io" + "os" + "strings" + "sync" + + "github.com/onflow/cadence/common" + "github.com/onflow/cadence/interpreter" + "github.com/onflow/cadence/sema" + "github.com/rs/zerolog/log" + "github.com/schollz/progressbar/v3" + "github.com/spf13/cobra" + + common2 "github.com/onflow/flow-go/cmd/util/common" + "github.com/onflow/flow-go/cmd/util/ledger/migrations" + "github.com/onflow/flow-go/cmd/util/ledger/reporters" + "github.com/onflow/flow-go/cmd/util/ledger/util" + "github.com/onflow/flow-go/cmd/util/ledger/util/registers" + "github.com/onflow/flow-go/ledger" + "github.com/onflow/flow-go/model/flow" +) + +var ( + flagPayloads string + flagState string + flagStateCommitment string + flagOutputDirectory string + flagChain string + flagLinkMigrationReport string + flagAddresses string +) + +var Cmd = &cobra.Command{ + Use: "generate-authorization-fixes", + Short: "generate authorization fixes for capability controllers", + Run: run, +} + +func init() { + + Cmd.Flags().StringVar( + &flagPayloads, + "payloads", + "", + "Input payload file name", + ) + + Cmd.Flags().StringVar( + &flagState, + "state", + "", + "Input state file name", + ) + + Cmd.Flags().StringVar( + &flagStateCommitment, + "state-commitment", + "", + "Input state commitment", + ) + + Cmd.Flags().StringVar( + &flagOutputDirectory, + "output-directory", + "", + "Output directory", + ) + + Cmd.Flags().StringVar( + &flagChain, + "chain", + "", + "Chain name", + ) + _ = Cmd.MarkFlagRequired("chain") + + Cmd.Flags().StringVar( + &flagLinkMigrationReport, + "link-migration-report", + "", + "Input link migration report file name", + ) + _ = Cmd.MarkFlagRequired("link-migration-report") + + Cmd.Flags().StringVar( + &flagAddresses, + "addresses", + "", + "only generate fixes for given accounts (comma-separated hex-encoded addresses)", + ) +} + +func run(*cobra.Command, []string) { + + var addressFilter map[common.Address]struct{} + + if len(flagAddresses) > 0 { + for _, hexAddr := range strings.Split(flagAddresses, ",") { + + hexAddr = strings.TrimSpace(hexAddr) + + if len(hexAddr) == 0 { + continue + } + + addr, err := common2.ParseAddress(hexAddr) + if err != nil { + log.Fatal().Err(err).Msgf("failed to parse address: %s", hexAddr) + } + + if addressFilter == nil { + addressFilter = make(map[common.Address]struct{}) + } + addressFilter[common.Address(addr)] = struct{}{} + } + + addresses := make([]string, 0, len(addressFilter)) + for addr := range addressFilter { + addresses = append(addresses, addr.HexWithPrefix()) + } + log.Info().Msgf( + "Only generating fixes for %d accounts: %s", + len(addressFilter), + addresses, + ) + } + + if flagPayloads == "" && flagState == "" { + log.Fatal().Msg("Either --payloads or --state must be provided") + } else if flagPayloads != "" && flagState != "" { + log.Fatal().Msg("Only one of --payloads or --state must be provided") + } + if flagState != "" && flagStateCommitment == "" { + log.Fatal().Msg("--state-commitment must be provided when --state is provided") + } + + rwf := reporters.NewReportFileWriterFactory(flagOutputDirectory, log.Logger) + + chainID := flow.ChainID(flagChain) + // Validate chain ID + _ = chainID.Chain() + + migratedPublicLinkSetChan := make(chan MigratedPublicLinkSet, 1) + go func() { + migratedPublicLinkSetChan <- readMigratedPublicLinkSet( + flagLinkMigrationReport, + addressFilter, + ) + }() + + registersByAccountChan := make(chan *registers.ByAccount, 1) + go func() { + registersByAccountChan <- loadRegistersByAccount() + }() + + migratedPublicLinkSet := <-migratedPublicLinkSetChan + registersByAccount := <-registersByAccountChan + + fixReporter := rwf.ReportWriter("authorization-fixes") + defer fixReporter.Close() + + authorizationFixGenerator := &AuthorizationFixGenerator{ + registersByAccount: registersByAccount, + chainID: chainID, + migratedPublicLinkSet: migratedPublicLinkSet, + reporter: fixReporter, + } + + log.Info().Msg("Generating authorization fixes ...") + + if len(addressFilter) > 0 { + authorizationFixGenerator.generateFixesForAccounts(addressFilter) + } else { + authorizationFixGenerator.generateFixesForAllAccounts() + } +} + +func loadRegistersByAccount() *registers.ByAccount { + // Read payloads from payload file or checkpoint file + + var payloads []*ledger.Payload + var err error + + if flagPayloads != "" { + log.Info().Msgf("Reading payloads from %s", flagPayloads) + + _, payloads, err = util.ReadPayloadFile(log.Logger, flagPayloads) + if err != nil { + log.Fatal().Err(err).Msg("failed to read payloads") + } + } else { + log.Info().Msgf("Reading trie %s", flagStateCommitment) + + stateCommitment := util.ParseStateCommitment(flagStateCommitment) + payloads, err = util.ReadTrieForPayloads(flagState, stateCommitment) + if err != nil { + log.Fatal().Err(err).Msg("failed to read state") + } + } + + log.Info().Msgf("creating registers from payloads (%d)", len(payloads)) + + registersByAccount, err := registers.NewByAccountFromPayloads(payloads) + if err != nil { + log.Fatal().Err(err) + } + log.Info().Msgf( + "created %d registers from payloads (%d accounts)", + registersByAccount.Count(), + registersByAccount.AccountCount(), + ) + + return registersByAccount +} + +func readMigratedPublicLinkSet(path string, addressFilter map[common.Address]struct{}) MigratedPublicLinkSet { + + file, err := os.Open(path) + if err != nil { + log.Fatal().Err(err).Msgf("can't open link migration report: %s", path) + } + defer file.Close() + + var reader io.Reader = file + if isGzip(file) { + reader, err = gzip.NewReader(file) + if err != nil { + log.Fatal().Err(err).Msgf("failed to create gzip reader for %s", path) + } + } + + log.Info().Msgf("Reading link migration report from %s ...", path) + + migratedPublicLinkSet, err := ReadMigratedPublicLinkSet(reader, addressFilter) + if err != nil { + log.Fatal().Err(err).Msgf("failed to read public link report: %s", path) + } + + log.Info().Msgf("Read %d public link migration entries", len(migratedPublicLinkSet)) + + return migratedPublicLinkSet +} + +func jsonEncodeAuthorization(authorization interpreter.Authorization) string { + switch authorization { + case interpreter.UnauthorizedAccess, interpreter.InaccessibleAccess: + return "" + default: + return string(authorization.ID()) + } +} + +type fixEntitlementsEntry struct { + CapabilityAddress common.Address + CapabilityID uint64 + ReferencedType interpreter.StaticType + Authorization interpreter.Authorization +} + +var _ json.Marshaler = fixEntitlementsEntry{} + +func (e fixEntitlementsEntry) MarshalJSON() ([]byte, error) { + return json.Marshal(struct { + CapabilityAddress string `json:"capability_address"` + CapabilityID uint64 `json:"capability_id"` + ReferencedType string `json:"referenced_type"` + Authorization string `json:"authorization"` + }{ + CapabilityAddress: e.CapabilityAddress.String(), + CapabilityID: e.CapabilityID, + ReferencedType: string(e.ReferencedType.ID()), + Authorization: jsonEncodeAuthorization(e.Authorization), + }) +} + +type AuthorizationFixGenerator struct { + registersByAccount *registers.ByAccount + chainID flow.ChainID + migratedPublicLinkSet MigratedPublicLinkSet + reporter reporters.ReportWriter +} + +func (g *AuthorizationFixGenerator) generateFixesForAllAccounts() { + var wg sync.WaitGroup + progress := progressbar.Default(int64(g.registersByAccount.AccountCount()), "Processing:") + + err := g.registersByAccount.ForEachAccount(func(accountRegisters *registers.AccountRegisters) error { + address := common.MustBytesToAddress([]byte(accountRegisters.Owner())) + wg.Add(1) + go func(address common.Address) { + defer wg.Done() + g.generateFixesForAccount(address) + _ = progress.Add(1) + }(address) + return nil + }) + if err != nil { + log.Fatal().Err(err) + } + + wg.Wait() + _ = progress.Finish() +} + +func (g *AuthorizationFixGenerator) generateFixesForAccounts(addresses map[common.Address]struct{}) { + var wg sync.WaitGroup + progress := progressbar.Default(int64(len(addresses)), "Processing:") + + for address := range addresses { + wg.Add(1) + go func(address common.Address) { + defer wg.Done() + g.generateFixesForAccount(address) + _ = progress.Add(1) + }(address) + } + + wg.Wait() + _ = progress.Finish() +} + +func (g *AuthorizationFixGenerator) generateFixesForAccount(address common.Address) { + mr, err := migrations.NewInterpreterMigrationRuntime( + g.registersByAccount, + g.chainID, + migrations.InterpreterMigrationRuntimeConfig{}, + ) + if err != nil { + log.Fatal().Err(err) + } + + capabilityControllerStorage := mr.Storage.GetDomainStorageMap( + mr.Interpreter, + address, + common.StorageDomainCapabilityController, + false, + ) + if capabilityControllerStorage == nil { + return + } + + iterator := capabilityControllerStorage.Iterator(nil) + for { + k, v := iterator.Next() + + if k == nil || v == nil { + break + } + + key, ok := k.(interpreter.Uint64AtreeValue) + if !ok { + log.Fatal().Msgf("unexpected key type: %T", k) + } + + capabilityID := uint64(key) + + value := interpreter.MustConvertUnmeteredStoredValue(v) + + capabilityController, ok := value.(*interpreter.StorageCapabilityControllerValue) + if !ok { + continue + } + + borrowType := capabilityController.BorrowType + + switch borrowType.Authorization.(type) { + case interpreter.EntitlementSetAuthorization: + g.maybeGenerateFixForEntitledCapabilityController( + address, + capabilityID, + borrowType, + ) + + case interpreter.Unauthorized: + // Already unauthorized, nothing to do + + case interpreter.Inaccessible: + log.Warn().Msgf( + "capability controller %d in account %s has borrow type with inaccessible authorization", + capabilityID, + address.HexWithPrefix(), + ) + + case interpreter.EntitlementMapAuthorization: + log.Warn().Msgf( + "capability controller %d in account %s has borrow type with entitlement map authorization", + capabilityID, + address.HexWithPrefix(), + ) + + default: + log.Warn().Msgf( + "capability controller %d in account %s has borrow type with entitlement map authorization", + capabilityID, + address.HexWithPrefix(), + ) + } + } +} + +func newEntitlementSetAuthorizationFromTypeIDs( + typeIDs []common.TypeID, + setKind sema.EntitlementSetKind, +) interpreter.EntitlementSetAuthorization { + return interpreter.NewEntitlementSetAuthorization( + nil, + func() []common.TypeID { + return typeIDs + }, + len(typeIDs), + setKind, + ) +} + +func (g *AuthorizationFixGenerator) maybeGenerateFixForEntitledCapabilityController( + capabilityAddress common.Address, + capabilityID uint64, + borrowType *interpreter.ReferenceStaticType, +) { + // Only remove the authorization if the capability controller was migrated from a public link + _, ok := g.migratedPublicLinkSet[AccountCapabilityID{ + Address: capabilityAddress, + CapabilityID: capabilityID, + }] + if !ok { + return + } + + g.reporter.Write(fixEntitlementsEntry{ + CapabilityAddress: capabilityAddress, + CapabilityID: capabilityID, + ReferencedType: borrowType.ReferencedType, + Authorization: borrowType.Authorization, + }) +} + +func isGzip(file *os.File) bool { + return strings.HasSuffix(file.Name(), ".gz") +} diff --git a/cmd/util/cmd/generate-authorization-fixes/cmd_test.go b/cmd/util/cmd/generate-authorization-fixes/cmd_test.go new file mode 100644 index 00000000000..00d6b62d594 --- /dev/null +++ b/cmd/util/cmd/generate-authorization-fixes/cmd_test.go @@ -0,0 +1,267 @@ +package generate_authorization_fixes + +import ( + "fmt" + "testing" + + "github.com/onflow/cadence" + "github.com/onflow/cadence/common" + jsoncdc "github.com/onflow/cadence/encoding/json" + "github.com/onflow/cadence/interpreter" + "github.com/onflow/cadence/sema" + "github.com/rs/zerolog" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/cmd/util/ledger/migrations" + "github.com/onflow/flow-go/cmd/util/ledger/reporters" + "github.com/onflow/flow-go/cmd/util/ledger/util/registers" + "github.com/onflow/flow-go/fvm" + "github.com/onflow/flow-go/fvm/storage/snapshot" + "github.com/onflow/flow-go/ledger" + "github.com/onflow/flow-go/ledger/common/convert" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/utils/unittest" +) + +func newBootstrapPayloads( + chainID flow.ChainID, + bootstrapProcedureOptions ...fvm.BootstrapProcedureOption, +) ([]*ledger.Payload, error) { + + ctx := fvm.NewContext( + fvm.WithChain(chainID.Chain()), + ) + + vm := fvm.NewVirtualMachine() + + storageSnapshot := snapshot.MapStorageSnapshot{} + + bootstrapProcedure := fvm.Bootstrap( + unittest.ServiceAccountPublicKey, + bootstrapProcedureOptions..., + ) + + executionSnapshot, _, err := vm.Run( + ctx, + bootstrapProcedure, + storageSnapshot, + ) + if err != nil { + return nil, err + } + + payloads := make([]*ledger.Payload, 0, len(executionSnapshot.WriteSet)) + + for registerID, registerValue := range executionSnapshot.WriteSet { + payloadKey := convert.RegisterIDToLedgerKey(registerID) + payload := ledger.NewPayload(payloadKey, registerValue) + payloads = append(payloads, payload) + } + + return payloads, nil +} + +type testReportWriter struct { + entries []any +} + +func (t *testReportWriter) Write(entry interface{}) { + t.entries = append(t.entries, entry) +} + +func (*testReportWriter) Close() { + // NO-OP +} + +var _ reporters.ReportWriter = &testReportWriter{} + +func TestGenerateAuthorizationFixes(t *testing.T) { + t.Parallel() + + // This test no longer works because publishing authorized capabilities is no longer allowed. + // The migration and test are kept for historical reasons. + + t.Skip() + + const chainID = flow.Emulator + chain := chainID.Chain() + + address, err := chain.AddressAtIndex(1000) + require.NoError(t, err) + + require.Equal(t, "bf519681cdb888b1", address.Hex()) + + log := zerolog.New(zerolog.NewTestWriter(t)) + + bootstrapPayloads, err := newBootstrapPayloads(chainID) + require.NoError(t, err) + + registersByAccount, err := registers.NewByAccountFromPayloads(bootstrapPayloads) + require.NoError(t, err) + + mr := migrations.NewBasicMigrationRuntime(registersByAccount) + + err = mr.Accounts.Create(nil, address) + require.NoError(t, err) + + expectedWriteAddresses := map[flow.Address]struct{}{ + address: {}, + } + + err = mr.Commit(expectedWriteAddresses, log) + require.NoError(t, err) + + const contractCode = ` + access(all) contract Test { + + access(all) entitlement E + + access(all) struct S {} + } + ` + + deployTX, err := flow.NewTransactionBodyBuilder(). + SetScript([]byte(` + transaction(code: String) { + prepare(signer: auth(Contracts) &Account) { + signer.contracts.add(name: "Test", code: code.utf8) + } + } + `)). + AddAuthorizer(address). + AddArgument(jsoncdc.MustEncode(cadence.String(contractCode))). + Build() + require.NoError(t, err) + + runDeployTx := migrations.NewTransactionBasedMigration( + deployTX, + chainID, + log, + expectedWriteAddresses, + ) + err = runDeployTx(registersByAccount) + require.NoError(t, err) + + setupTx, err := flow.NewTransactionBodyBuilder(). + SetScript([]byte(fmt.Sprintf(` + import Test from %s + + transaction { + prepare(signer: auth(Storage, Capabilities) &Account) { + // Capability 1 was a public, unauthorized capability, which is now authorized. + // It should lose its entitlement + let cap1 = signer.capabilities.storage.issue<auth(Test.E) &Test.S>(/storage/s) + signer.capabilities.publish(cap1, at: /public/s1) + + // Capability 2 was a public, unauthorized capability, which is now authorized. + // It is currently only stored, nested, in storage, and is not published. + // It should lose its entitlement + let cap2 = signer.capabilities.storage.issue<auth(Test.E) &Test.S>(/storage/s) + signer.storage.save([cap2], to: /storage/caps2) + + // Capability 3 was a private, authorized capability. + // It is currently only stored, nested, in storage, and is not published. + // It should keep its entitlement + let cap3 = signer.capabilities.storage.issue<auth(Test.E) &Test.S>(/storage/s) + signer.storage.save([cap3], to: /storage/caps3) + + // Capability 4 was a private, authorized capability. + // It is currently both stored, nested, in storage, and is published. + // It should keep its entitlement + let cap4 = signer.capabilities.storage.issue<auth(Test.E) &Test.S>(/storage/s) + signer.storage.save([cap4], to: /storage/caps4) + signer.capabilities.publish(cap4, at: /public/s4) + + // Capability 5 was a public, unauthorized capability, which is still unauthorized. + // It is currently both stored, nested, in storage, and is published. + // There is no need to fix it. + let cap5 = signer.capabilities.storage.issue<&Test.S>(/storage/s) + signer.storage.save([cap5], to: /storage/caps5) + signer.capabilities.publish(cap5, at: /public/s5) + } + } + `, + address.HexWithPrefix(), + ))). + AddAuthorizer(address). + Build() + require.NoError(t, err) + + runSetupTx := migrations.NewTransactionBasedMigration( + setupTx, + chainID, + log, + expectedWriteAddresses, + ) + err = runSetupTx(registersByAccount) + require.NoError(t, err) + + testContractLocation := common.AddressLocation{ + Address: common.Address(address), + Name: "Test", + } + + migratedPublicLinkSet := MigratedPublicLinkSet{ + { + Address: common.Address(address), + CapabilityID: 1, + }: {}, + { + Address: common.Address(address), + CapabilityID: 2, + }: {}, + { + Address: common.Address(address), + CapabilityID: 5, + }: {}, + } + + reporter := &testReportWriter{} + + generator := &AuthorizationFixGenerator{ + registersByAccount: registersByAccount, + chainID: chainID, + migratedPublicLinkSet: migratedPublicLinkSet, + reporter: reporter, + } + generator.generateFixesForAllAccounts() + + eTypeID := testContractLocation.TypeID(nil, "Test.E") + + assert.Equal(t, + []any{ + fixEntitlementsEntry{ + CapabilityAddress: common.Address(address), + CapabilityID: 1, + ReferencedType: interpreter.NewCompositeStaticTypeComputeTypeID( + nil, + testContractLocation, + "Test.S", + ), + Authorization: newEntitlementSetAuthorizationFromTypeIDs( + []common.TypeID{ + eTypeID, + }, + sema.Conjunction, + ), + }, + fixEntitlementsEntry{ + CapabilityAddress: common.Address(address), + CapabilityID: 2, + ReferencedType: interpreter.NewCompositeStaticTypeComputeTypeID( + nil, + testContractLocation, + "Test.S", + ), + Authorization: newEntitlementSetAuthorizationFromTypeIDs( + []common.TypeID{ + eTypeID, + }, + sema.Conjunction, + ), + }, + }, + reporter.entries, + ) +} diff --git a/cmd/util/cmd/generate-authorization-fixes/link_migration_report.go b/cmd/util/cmd/generate-authorization-fixes/link_migration_report.go new file mode 100644 index 00000000000..5a41dfc12e9 --- /dev/null +++ b/cmd/util/cmd/generate-authorization-fixes/link_migration_report.go @@ -0,0 +1,93 @@ +package generate_authorization_fixes + +import ( + "encoding/json" + "fmt" + "io" + "strings" + + "github.com/onflow/cadence/common" +) + +// AccountCapabilityID is a capability ID in an account. +type AccountCapabilityID struct { + Address common.Address + CapabilityID uint64 +} + +// MigratedPublicLinkSet is a set of capability controller IDs which were migrated from public links. +type MigratedPublicLinkSet map[AccountCapabilityID]struct{} + +// ReadMigratedPublicLinkSet reads a link migration report from the given reader, +// and returns a set of all capability controller IDs which were migrated from public links. +// +// The report is expected to be a JSON array of objects with the following structure: +// +// [ +// {"kind":"link-migration-success","account_address":"0x1","path":"/public/foo","capability_id":1}, +// ] +func ReadMigratedPublicLinkSet( + reader io.Reader, + filter map[common.Address]struct{}, +) (MigratedPublicLinkSet, error) { + + set := MigratedPublicLinkSet{} + + dec := json.NewDecoder(reader) + + token, err := dec.Token() + if err != nil { + return nil, fmt.Errorf("failed to read token: %w", err) + } + if token != json.Delim('[') { + return nil, fmt.Errorf("expected start of array, got %s", token) + } + + for dec.More() { + var entry struct { + Kind string `json:"kind"` + Address string `json:"account_address"` + Path string `json:"path"` + CapabilityID uint64 `json:"capability_id"` + } + err := dec.Decode(&entry) + if err != nil { + return nil, fmt.Errorf("failed to decode entry: %w", err) + } + + if entry.Kind != "link-migration-success" { + continue + } + + if !strings.HasPrefix(entry.Path, "/public/") { + continue + } + + address, err := common.HexToAddress(entry.Address) + if err != nil { + return nil, fmt.Errorf("failed to parse address: %w", err) + } + + if filter != nil { + if _, ok := filter[address]; !ok { + continue + } + } + + accountCapabilityID := AccountCapabilityID{ + Address: address, + CapabilityID: entry.CapabilityID, + } + set[accountCapabilityID] = struct{}{} + } + + token, err = dec.Token() + if err != nil { + return nil, fmt.Errorf("failed to read token: %w", err) + } + if token != json.Delim(']') { + return nil, fmt.Errorf("expected end of array, got %s", token) + } + + return set, nil +} diff --git a/cmd/util/cmd/generate-authorization-fixes/link_migration_report_test.go b/cmd/util/cmd/generate-authorization-fixes/link_migration_report_test.go new file mode 100644 index 00000000000..872babc0fec --- /dev/null +++ b/cmd/util/cmd/generate-authorization-fixes/link_migration_report_test.go @@ -0,0 +1,70 @@ +package generate_authorization_fixes + +import ( + "strings" + "testing" + + "github.com/onflow/cadence/common" + "github.com/stretchr/testify/require" +) + +func TestReadPublicLinkMigrationReport(t *testing.T) { + t.Parallel() + + contents := ` + [ + {"kind":"link-migration-success","account_address":"0x1","path":"/public/foo","capability_id":1}, + {"kind":"link-migration-success","account_address":"0x2","path":"/private/bar","capability_id":2}, + {"kind":"link-migration-success","account_address":"0x3","path":"/public/baz","capability_id":3} + ] + ` + + t.Run("unfiltered", func(t *testing.T) { + t.Parallel() + + reader := strings.NewReader(contents) + + mapping, err := ReadMigratedPublicLinkSet(reader, nil) + require.NoError(t, err) + + require.Equal(t, + MigratedPublicLinkSet{ + { + Address: common.MustBytesToAddress([]byte{0x1}), + CapabilityID: 1, + }: struct{}{}, + { + Address: common.MustBytesToAddress([]byte{0x3}), + CapabilityID: 3, + }: struct{}{}, + }, + mapping, + ) + }) + + t.Run("filtered", func(t *testing.T) { + t.Parallel() + + address1 := common.MustBytesToAddress([]byte{0x1}) + + reader := strings.NewReader(contents) + + mapping, err := ReadMigratedPublicLinkSet( + reader, + map[common.Address]struct{}{ + address1: {}, + }, + ) + require.NoError(t, err) + + require.Equal(t, + MigratedPublicLinkSet{ + { + Address: address1, + CapabilityID: 1, + }: struct{}{}, + }, + mapping, + ) + }) +} diff --git a/cmd/util/cmd/leaders/cmd.go b/cmd/util/cmd/leaders/cmd.go new file mode 100644 index 00000000000..3ce01a728d7 --- /dev/null +++ b/cmd/util/cmd/leaders/cmd.go @@ -0,0 +1,96 @@ +package leaders + +import ( + "encoding/json" + "io" + "os" + + "github.com/rs/zerolog/log" + "github.com/spf13/cobra" + + "github.com/onflow/flow-go/cmd" + "github.com/onflow/flow-go/consensus/hotstuff/committees/leader" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/state/protocol/inmem" +) + +var ( + flagSnapshotPath string + flagStartView uint64 + flagEndView uint64 +) + +var Cmd = &cobra.Command{ + Use: "leaders", + Short: "Get leader selection for a view range.", + Long: `Get leader selection for a view range in the current epoch for a provided snapshot. +Expects a valid protocol state snapshot JSON to be piped into STDIN, or provided as the snapshot-path flag. +Writes a JSON list of leaders for the given view range to STDOUT.`, + Run: run, +} + +func init() { + + Cmd.Flags().Uint64Var(&flagStartView, "start-view", 0, "the inclusive first view to get leader selection for") + Cmd.Flags().Uint64Var(&flagEndView, "end-view", 0, "the inclusive last view to get leader selection for") + Cmd.Flags().StringVar(&flagSnapshotPath, "snapshot-path", "", "the path to the snapshot to use (or pipe snapshot to stdin and omit this flag)") + cmd.MarkFlagRequired(Cmd, "start-view") + cmd.MarkFlagRequired(Cmd, "end-view") +} + +func run(*cobra.Command, []string) { + + // If a snapshot file is specified, read the file. Otherwise assume the snapshot is piped to stdin. + var inputBuffer io.Reader + if flagSnapshotPath != "" { + snapshotFile, err := os.Open(flagSnapshotPath) + if err != nil { + log.Fatal().Err(err).Msg("cannot open snapshot file") + } + inputBuffer = snapshotFile + } else { + inputBuffer = os.Stdin + } + + var snapshot inmem.EncodableSnapshot + err := json.NewDecoder(inputBuffer).Decode(&snapshot) + if err != nil { + log.Fatal().Err(err).Msg("Failed to read snapshot") + } + + snap := inmem.SnapshotFromEncodable(snapshot) + epoch, err := snap.Epochs().Current() + if err != nil { + log.Fatal().Err(err).Msg("Failed to read current epoch") + } + + // Should match https://github.com/onflow/flow-go/blob/48b6db32d4491903aa0ffa541377c8f239da3bcc/consensus/hotstuff/committees/consensus_committee.go#L74-L78 + selection, err := leader.SelectionForConsensus( + epoch.InitialIdentities(), + epoch.RandomSource(), + epoch.FirstView(), + epoch.FinalView(), + ) + if err != nil { + log.Fatal().Err(err).Msg("Failed to read current leader selection") + } + + type LeaderForView struct { + View uint64 + LeaderID flow.Identifier + } + + leaders := make([]LeaderForView, 0, flagEndView-flagStartView+1) + for view := flagStartView; view <= flagEndView; view++ { + leaderID, err := selection.LeaderForView(view) + if err != nil { + log.Fatal().Err(err).Msg("Failed to read leader for view") + } + leaders = append(leaders, LeaderForView{View: view, LeaderID: leaderID}) + } + + err = json.NewEncoder(os.Stdout).Encode(leaders) + if err != nil { + log.Fatal().Err(err).Msg("Failed to encode leaders") + } +} diff --git a/cmd/util/cmd/pebble-checkpoint/cmd.go b/cmd/util/cmd/pebble-checkpoint/cmd.go new file mode 100644 index 00000000000..247caa2612b --- /dev/null +++ b/cmd/util/cmd/pebble-checkpoint/cmd.go @@ -0,0 +1,54 @@ +package cmd + +import ( + "fmt" + + "github.com/rs/zerolog/log" + "github.com/spf13/cobra" + + "github.com/onflow/flow-go/storage/pebble" +) + +var ( + flagPebbleDir string + flagOutput string +) + +// Note: Although checkpoint is fast to create, it is not free. When creating a checkpoint, the +// underlying pebble sstables are hard-linked to the checkpoint directory, which means the compaction +// process will not be able to delete the sstables until the checkpoint is deleted. This can lead to +// increased disk usage if checkpoints are created frequently without being cleaned up. +var Cmd = &cobra.Command{ + Use: "pebble-checkpoint", + Short: "Create a checkpoint from a Pebble database", + RunE: runE, +} + +func init() { + Cmd.Flags().StringVar(&flagPebbleDir, "pebbledir", "", + "directory containing the Pebble database") + _ = Cmd.MarkFlagRequired("pebbledir") + + Cmd.Flags().StringVar(&flagOutput, "output", "", + "output directory for the checkpoint") + _ = Cmd.MarkFlagRequired("output") +} + +func runE(*cobra.Command, []string) error { + log.Info().Msgf("creating checkpoint from Pebble database at %v to %v", flagPebbleDir, flagOutput) + + // Initialize Pebble DB + db, err := pebble.ShouldOpenDefaultPebbleDB(log.Logger, flagPebbleDir) + if err != nil { + return fmt.Errorf("failed to initialize Pebble database %v: %w", flagPebbleDir, err) + } + + // Create checkpoint + err = db.Checkpoint(flagOutput) + if err != nil { + return fmt.Errorf("failed to create checkpoint %v: %w", flagOutput, err) + } + + log.Info().Msgf("successfully created checkpoint at %v", flagOutput) + return nil +} diff --git a/cmd/util/cmd/read-badger/cmd/blocks.go b/cmd/util/cmd/read-badger/cmd/blocks.go index 5b04d34a965..1e60bf85487 100644 --- a/cmd/util/cmd/read-badger/cmd/blocks.go +++ b/cmd/util/cmd/read-badger/cmd/blocks.go @@ -1,43 +1,72 @@ package cmd import ( + "fmt" + "github.com/rs/zerolog/log" "github.com/spf13/cobra" "github.com/onflow/flow-go/cmd/util/cmd/common" "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module/metrics" + "github.com/onflow/flow-go/storage" + "github.com/onflow/flow-go/storage/store" ) var flagBlockID string +var flagBlockHeight uint64 func init() { rootCmd.AddCommand(blocksCmd) blocksCmd.Flags().StringVarP(&flagBlockID, "id", "i", "", "the id of the block") - _ = blocksCmd.MarkFlagRequired("id") + blocksCmd.Flags().Uint64Var(&flagBlockHeight, "height", 0, "Block height") } var blocksCmd = &cobra.Command{ Use: "blocks", - Short: "get a block by block ID", - Run: func(cmd *cobra.Command, args []string) { - storages, db := InitStorages() - defer db.Close() - - log.Info().Msgf("got flag block id: %s", flagBlockID) - blockID, err := flow.HexStringToIdentifier(flagBlockID) - if err != nil { - log.Error().Err(err).Msg("malformed block id") - return - } - - log.Info().Msgf("getting block by id: %v", blockID) - block, err := storages.Blocks.ByID(blockID) - if err != nil { - log.Error().Err(err).Msgf("could not get block with id: %v", blockID) - return - } - - common.PrettyPrintEntity(block) + Short: "get a block by block ID or height", + RunE: func(cmd *cobra.Command, args []string) error { + return common.WithStorage(flagDatadir, func(db storage.DB) error { + cacheMetrics := &metrics.NoopCollector{} + headers := store.NewHeaders(cacheMetrics, db) + index := store.NewIndex(cacheMetrics, db) + guarantees := store.NewGuarantees(cacheMetrics, db, store.DefaultCacheSize, store.DefaultCacheSize) + seals := store.NewSeals(cacheMetrics, db) + results := store.NewExecutionResults(cacheMetrics, db) + receipts := store.NewExecutionReceipts(cacheMetrics, db, results, store.DefaultCacheSize) + payloads := store.NewPayloads(db, index, guarantees, seals, receipts, results) + blocks := store.NewBlocks(db, headers, payloads) + + var block *flow.Block + var err error + + if flagBlockID != "" { + log.Info().Msgf("got flag block id: %s", flagBlockID) + blockID, err := flow.HexStringToIdentifier(flagBlockID) + if err != nil { + return fmt.Errorf("malformed block id: %w", err) + } + + log.Info().Msgf("getting block by id: %v", blockID) + + block, err = blocks.ByID(blockID) + if err != nil { + return fmt.Errorf("could not get block with id %v: %w", blockID, err) + } + } else if flagBlockHeight != 0 { + log.Info().Msgf("got flag block height: %d", flagBlockHeight) + + block, err = blocks.ByHeight(flagBlockHeight) + if err != nil { + return fmt.Errorf("could not get block with height %d: %w", flagBlockHeight, err) + } + } else { + return fmt.Errorf("provide either a --id or --height and not both, (--block-id: %v), (--height: %v)", flagBlockID, flagBlockHeight) + } + + common.PrettyPrintEntity(block) + return nil + }) }, } diff --git a/cmd/util/cmd/read-badger/cmd/chunk_data_pack.go b/cmd/util/cmd/read-badger/cmd/chunk_data_pack.go index 0ac23aabe9c..72dcb9aeadb 100644 --- a/cmd/util/cmd/read-badger/cmd/chunk_data_pack.go +++ b/cmd/util/cmd/read-badger/cmd/chunk_data_pack.go @@ -1,11 +1,16 @@ package cmd import ( + "fmt" + "github.com/rs/zerolog/log" "github.com/spf13/cobra" "github.com/onflow/flow-go/cmd/util/cmd/common" "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module/metrics" + "github.com/onflow/flow-go/storage" + "github.com/onflow/flow-go/storage/store" ) var flagChunkID string @@ -13,31 +18,34 @@ var flagChunkID string func init() { rootCmd.AddCommand(chunkDataPackCmd) - chunkDataPackCmd.Flags().StringVarP(&flagChunkID, "chunk-id", "c", "", "the id of the chunk") - _ = chunkDataPackCmd.MarkFlagRequired("chunk-id") + chunkDataPackCmd.Flags().StringVarP(&flagChunkID, "id", "c", "", "the id of the chunk") + _ = chunkDataPackCmd.MarkFlagRequired("id") } var chunkDataPackCmd = &cobra.Command{ - Use: "chunk-data", + Use: "chunk-data-packs", Short: "get chunk data pack by chunk ID", - Run: func(cmd *cobra.Command, args []string) { - storages, db := InitStorages() - defer db.Close() - - log.Info().Msgf("got flag chunk id: %s", flagChunkID) - chunkID, err := flow.HexStringToIdentifier(flagChunkID) - if err != nil { - log.Error().Err(err).Msg("malformed chunk id") - return - } - - log.Info().Msgf("getting chunk data pack by chunk id: %v", chunkID) - chunkDataPack, err := storages.ChunkDataPacks.ByChunkID(chunkID) - if err != nil { - log.Error().Err(err).Msgf("could not get chunk data pack with chunk id: %v", chunkID) - return - } - - common.PrettyPrintEntity(chunkDataPack) + RunE: func(cmd *cobra.Command, args []string) error { + return common.WithStorage(flagDatadir, func(db storage.DB) error { + log.Info().Msgf("got flag chunk id: %s", flagChunkID) + chunkID, err := flow.HexStringToIdentifier(flagChunkID) + if err != nil { + return fmt.Errorf("malformed chunk id: %w", err) + } + + metrics := metrics.NewNoopCollector() + collections := store.NewCollections(db, store.NewTransactions(metrics, db)) + chunkDataPacks := store.NewChunkDataPacks(metrics, + db, collections, 1) + + log.Info().Msgf("getting chunk data pack by chunk id: %v", chunkID) + chunkDataPack, err := chunkDataPacks.ByChunkID(chunkID) + if err != nil { + return fmt.Errorf("could not get chunk data pack with chunk id: %v: %w", chunkID, err) + } + + common.PrettyPrintEntity(chunkDataPack) + return nil + }) }, } diff --git a/cmd/util/cmd/read-badger/cmd/cluster_blocks.go b/cmd/util/cmd/read-badger/cmd/cluster_blocks.go index 38fc43eaf59..6d094fd10ae 100644 --- a/cmd/util/cmd/read-badger/cmd/cluster_blocks.go +++ b/cmd/util/cmd/read-badger/cmd/cluster_blocks.go @@ -1,13 +1,16 @@ package cmd import ( + "fmt" + "github.com/rs/zerolog/log" "github.com/spf13/cobra" "github.com/onflow/flow-go/cmd/util/cmd/common" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/module/metrics" - "github.com/onflow/flow-go/storage/badger" + "github.com/onflow/flow-go/storage" + "github.com/onflow/flow-go/storage/store" ) var flagChainName string @@ -27,54 +30,52 @@ func init() { var clusterBlocksCmd = &cobra.Command{ Use: "cluster-blocks", Short: "get cluster blocks", - Run: func(cmd *cobra.Command, args []string) { - metrics := metrics.NewNoopCollector() - db := common.InitStorage(flagDatadir) - headers := badger.NewHeaders(metrics, db) - clusterPayloads := badger.NewClusterPayloads(metrics, db) - - // get chain id - log.Info().Msgf("got flag chain name: %s", flagChainName) - chainID := flow.ChainID(flagChainName) - clusterBlocks := badger.NewClusterBlocks(db, chainID, headers, clusterPayloads) - - if flagClusterBlockID != "" && flagHeight != 0 { - log.Error().Msg("provide either a --id or --height and not both") - return - } - - if flagClusterBlockID != "" { - log.Info().Msgf("got flag cluster block id: %s", flagClusterBlockID) - clusterBlockID, err := flow.HexStringToIdentifier(flagClusterBlockID) - if err != nil { - log.Error().Err(err).Msg("malformed cluster block id") - return - } + RunE: func(cmd *cobra.Command, args []string) error { + return common.WithStorage(flagDatadir, func(db storage.DB) error { + metrics := metrics.NewNoopCollector() + + headers := store.NewHeaders(metrics, db) + clusterPayloads := store.NewClusterPayloads(metrics, db) + + // get chain id + log.Info().Msgf("got flag chain name: %s", flagChainName) + chainID := flow.ChainID(flagChainName) + clusterBlocks := store.NewClusterBlocks(db, chainID, headers, clusterPayloads) - log.Info().Msgf("getting cluster block by id: %v", clusterBlockID) - clusterBlock, err := clusterBlocks.ByID(clusterBlockID) - if err != nil { - log.Error().Err(err).Msgf("could not get cluster block with id: %v", clusterBlockID) - return + if flagClusterBlockID != "" && flagHeight != 0 { + return fmt.Errorf("provide either a --id or --height and not both") } - common.PrettyPrint(clusterBlock) - return - } + if flagClusterBlockID != "" { + log.Info().Msgf("got flag cluster block id: %s", flagClusterBlockID) + clusterBlockID, err := flow.HexStringToIdentifier(flagClusterBlockID) + if err != nil { + return fmt.Errorf("malformed cluster block id: %w", err) + } - if flagClusterBlockID != "" { - log.Info().Msgf("getting cluster block by height: %v", flagHeight) - clusterBlock, err := clusterBlocks.ByHeight(flagHeight) - if err != nil { - log.Error().Err(err).Msgf("could not get cluster block with height: %v", flagHeight) - return + log.Info().Msgf("getting cluster block by id: %v", clusterBlockID) + clusterBlock, err := clusterBlocks.ProposalByID(clusterBlockID) + if err != nil { + return fmt.Errorf("could not get cluster block with id: %v, %w", clusterBlockID, err) + } + + common.PrettyPrint(clusterBlock) + return nil } - log.Info().Msgf("block id: %v", clusterBlock.ID()) - common.PrettyPrint(clusterBlock) - return - } + if flagHeight > 0 { + log.Info().Msgf("getting cluster block by height: %v", flagHeight) + clusterBlock, err := clusterBlocks.ProposalByHeight(flagHeight) + if err != nil { + return fmt.Errorf("could not get cluster block with height: %v, %w", flagHeight, err) + } + + log.Info().Msgf("block id: %v", clusterBlock.Block.ID()) + common.PrettyPrint(clusterBlock) + return nil + } - log.Error().Msg("provide either a --id or --height") + return fmt.Errorf("provide either a --id or --height") + }) }, } diff --git a/cmd/util/cmd/read-badger/cmd/collections.go b/cmd/util/cmd/read-badger/cmd/collections.go index 879454bac2d..b59817cad8e 100644 --- a/cmd/util/cmd/read-badger/cmd/collections.go +++ b/cmd/util/cmd/read-badger/cmd/collections.go @@ -1,11 +1,14 @@ package cmd import ( + "fmt" + "github.com/rs/zerolog/log" "github.com/spf13/cobra" "github.com/onflow/flow-go/cmd/util/cmd/common" "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/storage" ) var flagCollectionID string @@ -23,60 +26,56 @@ func init() { var collectionsCmd = &cobra.Command{ Use: "collections", Short: "get collection by collection or transaction ID", - Run: func(cmd *cobra.Command, args []string) { - storages, db := InitStorages() - defer db.Close() + RunE: func(cmd *cobra.Command, args []string) error { + return common.WithStorage(flagDatadir, func(db storage.DB) error { + storages := common.InitStorages(db) - if flagCollectionID != "" { - log.Info().Msgf("got flag collection id: %s", flagCollectionID) - collectionID, err := flow.HexStringToIdentifier(flagCollectionID) - if err != nil { - log.Error().Err(err).Msg("malformed collection id") - return - } + if flagCollectionID != "" { + log.Info().Msgf("got flag collection id: %s", flagCollectionID) + collectionID, err := flow.HexStringToIdentifier(flagCollectionID) + if err != nil { + return fmt.Errorf("malformed collection id: %w", err) + } - log.Info().Msgf("getting collection by id: %v", collectionID) + log.Info().Msgf("getting collection by id: %v", collectionID) - // get only the light collection if specified - if flagLightCollection { - light, err := storages.Collections.LightByID(collectionID) + // get only the light collection if specified + if flagLightCollection { + light, err := storages.Collections.LightByID(collectionID) + if err != nil { + return fmt.Errorf("could not get collection with id %v: %w", collectionID, err) + } + common.PrettyPrintEntity(light) + return nil + } + + // otherwise get the full collection + fullCollection, err := storages.Collections.ByID(collectionID) if err != nil { - log.Error().Err(err).Msgf("could not get collection with id: %v", collectionID) - return + return fmt.Errorf("could not get collection: %w", err) } - common.PrettyPrintEntity(light) - return + common.PrettyPrintEntity(fullCollection) + return nil } - // otherwise get the full collection - fullCollection, err := storages.Collections.ByID(collectionID) - if err != nil { - log.Error().Err(err).Msgf("could not get collection ") - return - } - common.PrettyPrintEntity(fullCollection) - return - } + if flagTransactionID != "" { + log.Info().Msgf("got flag transaction id: %s", flagTransactionID) + transactionID, err := flow.HexStringToIdentifier(flagTransactionID) + if err != nil { + return fmt.Errorf("malformed transaction id, %w", err) + } - if flagTransactionID != "" { - log.Info().Msgf("got flag transaction id: %s", flagTransactionID) - transactionID, err := flow.HexStringToIdentifier(flagTransactionID) - if err != nil { - log.Error().Err(err).Msg("malformed transaction id") - return - } + log.Info().Msgf("getting collections by transaction id: %v", transactionID) + light, err := storages.Collections.LightByTransactionID(transactionID) + if err != nil { + return fmt.Errorf("could not get collections for transaction id %v: %w", transactionID, err) + } - log.Info().Msgf("getting collections by transaction id: %v", transactionID) - light, err := storages.Collections.LightByTransactionID(transactionID) - if err != nil { - log.Error().Err(err).Msgf("could not get collections for transaction id: %v", transactionID) - return + common.PrettyPrintEntity(light) + return nil } - common.PrettyPrintEntity(light) - return - } - - log.Error().Msg("must specify exactly one of --collection-id or --transaction-id") + return fmt.Errorf("must specify exactly one of --collection-id or --transaction-id") + }) }, } diff --git a/cmd/util/cmd/read-badger/cmd/commits.go b/cmd/util/cmd/read-badger/cmd/commits.go index c16572045da..8ec83b62543 100644 --- a/cmd/util/cmd/read-badger/cmd/commits.go +++ b/cmd/util/cmd/read-badger/cmd/commits.go @@ -1,10 +1,16 @@ package cmd import ( + "fmt" + "github.com/rs/zerolog/log" "github.com/spf13/cobra" + "github.com/onflow/flow-go/cmd/util/cmd/common" "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module/metrics" + "github.com/onflow/flow-go/storage" + "github.com/onflow/flow-go/storage/store" ) func init() { @@ -17,24 +23,25 @@ func init() { var commitsCmd = &cobra.Command{ Use: "commits", Short: "get commit by block ID", - Run: func(cmd *cobra.Command, args []string) { - storages, db := InitStorages() - defer db.Close() - - log.Info().Msgf("got flag block id: %s", flagBlockID) - blockID, err := flow.HexStringToIdentifier(flagBlockID) - if err != nil { - log.Error().Err(err).Msg("malformed block id") - return - } - - log.Info().Msgf("getting commit by block id: %v", blockID) - commit, err := storages.Commits.ByBlockID(blockID) - if err != nil { - log.Error().Err(err).Msgf("could not get commit for block id: %v", blockID) - return - } - - log.Info().Msgf("commit: %x", commit) + RunE: func(cmd *cobra.Command, args []string) error { + return common.WithStorage(flagDatadir, func(db storage.DB) error { + + commits := store.NewCommits(metrics.NewNoopCollector(), db) + + log.Info().Msgf("got flag block id: %s", flagBlockID) + blockID, err := flow.HexStringToIdentifier(flagBlockID) + if err != nil { + return fmt.Errorf("malformed block id: %w", err) + } + + log.Info().Msgf("getting commit by block id: %v", blockID) + commit, err := commits.ByBlockID(blockID) + if err != nil { + return fmt.Errorf("could not get commit for block id: %v: %w", blockID, err) + } + + log.Info().Msgf("commit: %v", commit) + return nil + }) }, } diff --git a/cmd/util/cmd/read-badger/cmd/epoch_commit.go b/cmd/util/cmd/read-badger/cmd/epoch_commit.go index c689b951d66..23deb37105b 100644 --- a/cmd/util/cmd/read-badger/cmd/epoch_commit.go +++ b/cmd/util/cmd/read-badger/cmd/epoch_commit.go @@ -1,11 +1,14 @@ package cmd import ( + "fmt" + "github.com/rs/zerolog/log" "github.com/spf13/cobra" "github.com/onflow/flow-go/cmd/util/cmd/common" "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/storage" ) var flagEpochCommitID string @@ -20,25 +23,25 @@ func init() { var epochCommitCmd = &cobra.Command{ Use: "epoch-commit", Short: "get epoch commit by ID", - Run: func(cmd *cobra.Command, args []string) { - storages, db := InitStorages() - defer db.Close() - - log.Info().Msgf("got flag commit id: %s", flagEpochCommitID) - commitID, err := flow.HexStringToIdentifier(flagEpochCommitID) - if err != nil { - log.Error().Err(err).Msg("malformed epoch commit id") - return - } - - log.Info().Msgf("getting epoch commit by id: %v", commitID) - epochCommit, err := storages.EpochCommits.ByID(commitID) - if err != nil { - log.Error().Err(err).Msgf("could not get epoch commit with id: %v", commitID) - return - } - - log.Info().Msgf("epoch commit id: %v", epochCommit.ID()) - common.PrettyPrint(epochCommit) + RunE: func(cmd *cobra.Command, args []string) error { + return common.WithStorage(flagDatadir, func(db storage.DB) error { + storages := common.InitStorages(db) + + log.Info().Msgf("got flag commit id: %s", flagEpochCommitID) + commitID, err := flow.HexStringToIdentifier(flagEpochCommitID) + if err != nil { + return fmt.Errorf("malformed epoch commit id: %w", err) + } + + log.Info().Msgf("getting epoch commit by id: %v", commitID) + epochCommit, err := storages.EpochCommits.ByID(commitID) + if err != nil { + return fmt.Errorf("could not get epoch commit with id: %v: %w", commitID, err) + } + + log.Info().Msgf("epoch commit id: %v", epochCommit.ID()) + common.PrettyPrint(epochCommit) + return nil + }) }, } diff --git a/cmd/util/cmd/read-badger/cmd/epoch_protocol_state.go b/cmd/util/cmd/read-badger/cmd/epoch_protocol_state.go new file mode 100644 index 00000000000..0a7922e4cf3 --- /dev/null +++ b/cmd/util/cmd/read-badger/cmd/epoch_protocol_state.go @@ -0,0 +1,44 @@ +package cmd + +import ( + "fmt" + + "github.com/rs/zerolog/log" + "github.com/spf13/cobra" + + "github.com/onflow/flow-go/cmd/util/cmd/common" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/storage" +) + +func init() { + rootCmd.AddCommand(epochProtocolStateCmd) + + epochProtocolStateCmd.Flags().StringVarP(&flagBlockID, "block-id", "b", "", "the block id of which to query the protocol state") + _ = epochProtocolStateCmd.MarkFlagRequired("block-id") +} + +var epochProtocolStateCmd = &cobra.Command{ + Use: "epoch-protocol-state", + Short: "get epoch protocol state by block ID", + RunE: func(cmd *cobra.Command, args []string) error { + return common.WithStorage(flagDatadir, func(db storage.DB) error { + storages := common.InitStorages(db) + + log.Info().Msgf("got flag block id: %s", flagBlockID) + blockID, err := flow.HexStringToIdentifier(flagBlockID) + if err != nil { + return fmt.Errorf("malformed block id: %w", err) + } + + log.Info().Msgf("getting protocol state by block id: %v", blockID) + protocolState, err := storages.EpochProtocolStateEntries.ByBlockID(blockID) + if err != nil { + return fmt.Errorf("could not get protocol state for block id: %v: %w", blockID, err) + } + + common.PrettyPrint(protocolState) + return nil + }) + }, +} diff --git a/cmd/util/cmd/read-badger/cmd/epoch_statuses.go b/cmd/util/cmd/read-badger/cmd/epoch_statuses.go deleted file mode 100644 index 7d0cd055f03..00000000000 --- a/cmd/util/cmd/read-badger/cmd/epoch_statuses.go +++ /dev/null @@ -1,41 +0,0 @@ -package cmd - -import ( - "github.com/rs/zerolog/log" - "github.com/spf13/cobra" - - "github.com/onflow/flow-go/cmd/util/cmd/common" - "github.com/onflow/flow-go/model/flow" -) - -func init() { - rootCmd.AddCommand(epochStatusesCmd) - - epochStatusesCmd.Flags().StringVarP(&flagBlockID, "block-id", "b", "", "the block id of which to query the epoch status") - _ = epochStatusesCmd.MarkFlagRequired("block-id") -} - -var epochStatusesCmd = &cobra.Command{ - Use: "epoch-statuses", - Short: "get epoch statuses by block ID", - Run: func(cmd *cobra.Command, args []string) { - storages, db := InitStorages() - defer db.Close() - - log.Info().Msgf("got flag block id: %s", flagBlockID) - blockID, err := flow.HexStringToIdentifier(flagBlockID) - if err != nil { - log.Error().Err(err).Msg("malformed block id") - return - } - - log.Info().Msgf("getting epoch status by block id: %v", blockID) - epochStatus, err := storages.Statuses.ByBlockID(blockID) - if err != nil { - log.Error().Err(err).Msgf("could not get epoch status for block id: %v", blockID) - return - } - - common.PrettyPrint(epochStatus) - }, -} diff --git a/cmd/util/cmd/read-badger/cmd/events.go b/cmd/util/cmd/read-badger/cmd/events.go index 981d0a65727..6227ec7fa82 100644 --- a/cmd/util/cmd/read-badger/cmd/events.go +++ b/cmd/util/cmd/read-badger/cmd/events.go @@ -1,11 +1,16 @@ package cmd import ( + "fmt" + "github.com/rs/zerolog/log" "github.com/spf13/cobra" "github.com/onflow/flow-go/cmd/util/cmd/common" "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module/metrics" + "github.com/onflow/flow-go/storage" + "github.com/onflow/flow-go/storage/store" ) var flagEventType string @@ -23,79 +28,75 @@ func init() { var eventsCmd = &cobra.Command{ Use: "events", - Short: "Read events from badger", - Run: func(cmd *cobra.Command, args []string) { - storages, db := InitStorages() - defer db.Close() - - if flagEventType != "" && flagTransactionID != "" { - log.Error().Msg("provide only one of --transaction-id or --event-type") - return - } - - log.Info().Msgf("got flag block id: %s", flagBlockID) - blockID, err := flow.HexStringToIdentifier(flagBlockID) - if err != nil { - log.Error().Err(err).Msg("malformed block id") - return - } - - if flagTransactionID != "" { - log.Info().Msgf("got flag transaction id: %s", flagTransactionID) - transactionID, err := flow.HexStringToIdentifier(flagTransactionID) - if err != nil { - log.Error().Err(err).Msg("malformed transaction id") - return + Short: "Read events", + RunE: func(cmd *cobra.Command, args []string) error { + return common.WithStorage(flagDatadir, func(db storage.DB) error { + events := store.NewEvents(metrics.NewNoopCollector(), db) + + if flagEventType != "" && flagTransactionID != "" { + return fmt.Errorf("provide only one of --transaction-id or --event-type") } - log.Info().Msgf("getting events for block id: %v, transaction id: %v", blockID, transactionID) - events, err := storages.Events.ByBlockIDTransactionID(blockID, transactionID) + log.Info().Msgf("got flag block id: %s", flagBlockID) + blockID, err := flow.HexStringToIdentifier(flagBlockID) if err != nil { - log.Error().Err(err).Msgf("could not get events for block id: %v, transaction id: %v", blockID, transactionID) - return + return fmt.Errorf("malformed block id: %w", err) } - for _, event := range events { - common.PrettyPrint(event) - } - return - } - - if flagEventType != "" { - validEvents := map[string]bool{ - "flow.AccountCreated": true, - "flow.AccountUpdated": true, - "flow.EpochCommit": true, - "flow.EpochSetup": true, - } - if _, ok := validEvents[flagEventType]; ok { - log.Info().Msgf("getting events for block id: %v, event type: %s", blockID, flagEventType) - events, err := storages.Events.ByBlockIDEventType(blockID, flow.EventType(flagEventType)) + if flagTransactionID != "" { + log.Info().Msgf("got flag transaction id: %s", flagTransactionID) + transactionID, err := flow.HexStringToIdentifier(flagTransactionID) if err != nil { - log.Error().Err(err).Msgf("could not get events for block id: %v, event type: %s", blockID, flagEventType) - return + return fmt.Errorf("malformed traansaction id: %w", err) + } + + log.Info().Msgf("getting events for block id: %v, transaction id: %v", blockID, transactionID) + events, err := events.ByBlockIDTransactionID(blockID, transactionID) + if err != nil { + return fmt.Errorf("could not get events for block id: %v, transaction id: %v: %w", blockID, transactionID, err) } for _, event := range events { common.PrettyPrint(event) } - return + return nil + } + + if flagEventType != "" { + validEvents := map[string]bool{ + "flow.AccountCreated": true, + "flow.AccountUpdated": true, + "flow.EpochCommit": true, + "flow.EpochSetup": true, + } + if _, ok := validEvents[flagEventType]; ok { + log.Info().Msgf("getting events for block id: %v, event type: %s", blockID, flagEventType) + events, err := events.ByBlockIDEventType(blockID, flow.EventType(flagEventType)) + if err != nil { + return fmt.Errorf("could not get events for block id: %v, event type: %s, %w", blockID, flagEventType, err) + } + + for _, event := range events { + common.PrettyPrint(event) + } + return nil + } + + return fmt.Errorf("not a valid event type: %s", flagEventType) + } + + // just fetch events for block + log.Info().Msgf("getting events for block id: %v", blockID) + evts, err := events.ByBlockID(blockID) + if err != nil { + return fmt.Errorf("could not get events for block id: %v: %w", blockID, err) + } + + for _, event := range evts { + common.PrettyPrint(event) } - log.Fatal().Msgf("not a valid event type: %s", flagEventType) - return - } - - // just fetch events for block - log.Info().Msgf("getting events for block id: %v", blockID) - events, err := storages.Events.ByBlockID(blockID) - if err != nil { - log.Error().Err(err).Msgf("could not get events for block id: %v", blockID) - return - } - - for _, event := range events { - common.PrettyPrint(event) - } + return nil + }) }, } diff --git a/cmd/util/cmd/read-badger/cmd/guarantees.go b/cmd/util/cmd/read-badger/cmd/guarantees.go index 2a91e953342..14adf48588a 100644 --- a/cmd/util/cmd/read-badger/cmd/guarantees.go +++ b/cmd/util/cmd/read-badger/cmd/guarantees.go @@ -1,11 +1,14 @@ package cmd import ( + "fmt" + "github.com/rs/zerolog/log" "github.com/spf13/cobra" "github.com/onflow/flow-go/cmd/util/cmd/common" "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/storage" ) func init() { @@ -18,24 +21,24 @@ func init() { var guaranteesCmd = &cobra.Command{ Use: "guarantees", Short: "get guarantees by collection ID", - Run: func(cmd *cobra.Command, args []string) { - storages, db := InitStorages() - defer db.Close() - - log.Info().Msgf("got flag collection id: %s", flagCollectionID) - collectionID, err := flow.HexStringToIdentifier(flagCollectionID) - if err != nil { - log.Error().Err(err).Msg("malformed collection idenitifer") - return - } - - log.Info().Msgf("getting guarantee by collection id: %v", collectionID) - guarantee, err := storages.Guarantees.ByCollectionID(collectionID) - if err != nil { - log.Error().Err(err).Msgf("could not get guarantee for collection id: %v", collectionID) - return - } - - common.PrettyPrintEntity(guarantee) + RunE: func(cmd *cobra.Command, args []string) error { + return common.WithStorage(flagDatadir, func(db storage.DB) error { + storages := common.InitStorages(db) + + log.Info().Msgf("got flag collection id: %s", flagCollectionID) + collectionID, err := flow.HexStringToIdentifier(flagCollectionID) + if err != nil { + return fmt.Errorf("malformed collection identifier: %w", err) + } + + log.Info().Msgf("getting guarantee by collection id: %v", collectionID) + guarantee, err := storages.Guarantees.ByCollectionID(collectionID) + if err != nil { + return fmt.Errorf("could not get guarantee for collection id: %v: %w", collectionID, err) + } + + common.PrettyPrintEntity(guarantee) + return nil + }) }, } diff --git a/cmd/util/cmd/read-badger/cmd/protocol_kvstore.go b/cmd/util/cmd/read-badger/cmd/protocol_kvstore.go new file mode 100644 index 00000000000..d434d6e1aaa --- /dev/null +++ b/cmd/util/cmd/read-badger/cmd/protocol_kvstore.go @@ -0,0 +1,67 @@ +package cmd + +import ( + "fmt" + + "github.com/rs/zerolog/log" + "github.com/spf13/cobra" + + "github.com/onflow/flow-go/state/protocol/protocol_state/kvstore" + + "github.com/onflow/flow-go/cmd/util/cmd/common" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/storage" +) + +var flagDecodeData bool + +func init() { + rootCmd.AddCommand(protocolStateKVStore) + + protocolStateKVStore.Flags().StringVarP(&flagBlockID, "block-id", "b", "", "the block id of which to query the protocol state") + _ = protocolStateKVStore.MarkFlagRequired("block-id") + + protocolStateKVStore.Flags().BoolVar(&flagDecodeData, "decode", false, "whether to decode the data field") + _ = protocolStateKVStore.MarkFlagRequired("block-id") +} + +var protocolStateKVStore = &cobra.Command{ + Use: "protocol-kvstore", + Short: "get protocol state kvstore by block ID", + RunE: func(cmd *cobra.Command, args []string) error { + return common.WithStorage(flagDatadir, func(db storage.DB) error { + storages := common.InitStorages(db) + + log.Info().Msgf("got flag block id: %s", flagBlockID) + blockID, err := flow.HexStringToIdentifier(flagBlockID) + if err != nil { + return fmt.Errorf("malformed block id: %w", err) + } + + log.Info().Msgf("getting protocol state kvstore by block id: %v", blockID) + protocolState, err := storages.ProtocolKVStore.ByBlockID(blockID) + if err != nil { + return fmt.Errorf("could not get protocol state kvstore for block id: %v: %w", blockID, err) + } + if !flagDecodeData { + common.PrettyPrint(protocolState) + return nil + } + + kvstoreAPI, err := kvstore.VersionedDecode(protocolState.Version, protocolState.Data) + if err != nil { + return fmt.Errorf("could not decode protocol state kvstore for block id: %v: %w", blockID, err) + } + + var model any + switch kvstoreAPI.GetProtocolStateVersion() { + case 0: + model = kvstoreAPI.(*kvstore.Modelv0) + case 1: + model = kvstoreAPI.(*kvstore.Modelv1) + } + common.PrettyPrint(model) + return nil + }) + }, +} diff --git a/cmd/util/cmd/read-badger/cmd/receipts.go b/cmd/util/cmd/read-badger/cmd/receipts.go index 412b71b5efb..c6b86ef78c3 100644 --- a/cmd/util/cmd/read-badger/cmd/receipts.go +++ b/cmd/util/cmd/read-badger/cmd/receipts.go @@ -1,11 +1,16 @@ package cmd import ( + "fmt" + "github.com/rs/zerolog/log" "github.com/spf13/cobra" "github.com/onflow/flow-go/cmd/util/cmd/common" "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module/metrics" + "github.com/onflow/flow-go/storage" + "github.com/onflow/flow-go/storage/store" ) var flagReceiptID string @@ -20,49 +25,51 @@ func init() { var receiptsCmd = &cobra.Command{ Use: "receipts", Short: "get receipt by block or receipt ID", - Run: func(cmd *cobra.Command, args []string) { - storages, db := InitStorages() - defer db.Close() + RunE: func(cmd *cobra.Command, args []string) error { + return common.WithStorage(flagDatadir, func(db storage.DB) error { + results := store.NewExecutionResults(metrics.NewNoopCollector(), db) + receipts := store.NewExecutionReceipts(metrics.NewNoopCollector(), db, results, 1) - if flagBlockID != "" { - log.Info().Msgf("got flag block id: %s", flagBlockID) - blockID, err := flow.HexStringToIdentifier(flagBlockID) - if err != nil { - log.Error().Err(err).Msg("malformed block id") - return - } + if flagBlockID != "" { + log.Info().Msgf("got flag block id: %s", flagBlockID) + blockID, err := flow.HexStringToIdentifier(flagBlockID) + if err != nil { + return fmt.Errorf("malformed block id: %w", err) + } - log.Info().Msgf("getting receipt by block id: %v", blockID) - receipts, err := storages.Receipts.ByBlockID(blockID) - if err != nil { - log.Error().Err(err).Msgf("could not get receipt for block id: %v", blockID) - } + log.Info().Msgf("getting receipt by block id: %v", blockID) + recs, err := receipts.ByBlockID(blockID) + if err != nil { + return fmt.Errorf("could not get receipt for block id %v: %w", blockID, err) + } - if len(receipts) > 0 { - common.PrettyPrintEntity(receipts[0]) - } - return - } + if len(recs) == 0 { + log.Info().Msgf("no receipt found") + return nil + } - if flagReceiptID != "" { - log.Info().Msgf("got flag receipt id: %s", flagReceiptID) - receiptID, err := flow.HexStringToIdentifier(flagReceiptID) - if err != nil { - log.Error().Err(err).Msg("malformed receipt id") - return + common.PrettyPrintEntity(recs[0]) + return nil } - log.Info().Msgf("getting receipt by id: %v", receiptID) - receipt, err := storages.Receipts.ByID(receiptID) - if err != nil { - log.Error().Err(err).Msgf("could not get receipt with id: %v", receiptID) - return - } + if flagReceiptID != "" { + log.Info().Msgf("got flag receipt id: %s", flagReceiptID) + receiptID, err := flow.HexStringToIdentifier(flagReceiptID) + if err != nil { + return fmt.Errorf("malformed receipt id: %w", err) + } + + log.Info().Msgf("getting receipt by id: %v", receiptID) + receipt, err := receipts.ByID(receiptID) + if err != nil { + return fmt.Errorf("could not get receipt with id %v: %w", receiptID, err) + } - common.PrettyPrintEntity(receipt) - return - } + common.PrettyPrintEntity(receipt) + return nil + } - log.Error().Msg("missing flags: --block-id or --receipt-id") + return fmt.Errorf("missing flags: --block-id or --receipt-id") + }) }, } diff --git a/cmd/util/cmd/read-badger/cmd/results.go b/cmd/util/cmd/read-badger/cmd/results.go index 8c904ba12e6..6a14c3dd832 100644 --- a/cmd/util/cmd/read-badger/cmd/results.go +++ b/cmd/util/cmd/read-badger/cmd/results.go @@ -1,11 +1,16 @@ package cmd import ( + "fmt" + "github.com/rs/zerolog/log" "github.com/spf13/cobra" "github.com/onflow/flow-go/cmd/util/cmd/common" "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module/metrics" + "github.com/onflow/flow-go/storage" + "github.com/onflow/flow-go/storage/store" ) var flagResultID string @@ -20,48 +25,54 @@ func init() { var resultsCmd = &cobra.Command{ Use: "results", Short: "get result by block or result ID", - Run: func(cmd *cobra.Command, args []string) { - storages, db := InitStorages() - defer db.Close() + RunE: func(cmd *cobra.Command, args []string) error { + return common.WithStorage(flagDatadir, func(db storage.DB) error { + results := store.NewExecutionResults(metrics.NewNoopCollector(), db) + if flagBlockID != "" { + log.Info().Msgf("got flag block id: %s", flagBlockID) + blockID, err := flow.HexStringToIdentifier(flagBlockID) + if err != nil { + return fmt.Errorf("malformed block id: %w", err) + } - if flagBlockID != "" { - log.Info().Msgf("got flag block id: %s", flagBlockID) - blockID, err := flow.HexStringToIdentifier(flagBlockID) - if err != nil { - log.Error().Err(err).Msg("malformed block id") - return - } + log.Info().Msgf("getting result by block id: %v", blockID) + result, err := results.ByBlockID(blockID) + if err != nil { + return fmt.Errorf("could not get result for block id %v: %w", blockID, err) + } - log.Info().Msgf("getting result by block id: %v", blockID) - result, err := storages.Results.ByBlockID(blockID) - if err != nil { - log.Error().Err(err).Msgf("could not get result for block id: %v", blockID) - return + common.PrettyPrintEntity(result) + // the result does not include the chunk ID, so we need to print it separately + printChunkID(result) + return nil } - common.PrettyPrintEntity(result) - return - } + if flagResultID != "" { + log.Info().Msgf("got flag result id: %s", flagResultID) + resultID, err := flow.HexStringToIdentifier(flagResultID) + if err != nil { + return fmt.Errorf("malformed result id: %w", err) + } - if flagResultID != "" { - log.Info().Msgf("got flag result id: %s", flagResultID) - resultID, err := flow.HexStringToIdentifier(flagResultID) - if err != nil { - log.Error().Err(err).Msg("malformed result id") - return - } + log.Info().Msgf("getting result by id: %v", resultID) + result, err := results.ByID(resultID) + if err != nil { + return fmt.Errorf("could not get result with id %v: %w", resultID, err) + } - log.Info().Msgf("getting result by id: %v", resultID) - result, err := storages.Results.ByID(resultID) - if err != nil { - log.Error().Err(err).Msgf("could not get result with id: %v", resultID) - return + common.PrettyPrintEntity(result) + // the result does not include the chunk ID, so we need to print it separately + printChunkID(result) + return nil } - common.PrettyPrintEntity(result) - return - } - - log.Error().Msg("missing flags: --block-id or --result-id") + return fmt.Errorf("missing flags: --block-id or --result-id") + }) }, } + +func printChunkID(result *flow.ExecutionResult) { + for i, chunk := range result.Chunks { + log.Info().Msgf("chunk index %d, id: %v", i, chunk.ID()) + } +} diff --git a/cmd/util/cmd/read-badger/cmd/root.go b/cmd/util/cmd/read-badger/cmd/root.go index 4497e86a71e..44a7471cd74 100644 --- a/cmd/util/cmd/read-badger/cmd/root.go +++ b/cmd/util/cmd/read-badger/cmd/root.go @@ -6,6 +6,8 @@ import ( "github.com/spf13/cobra" "github.com/spf13/viper" + + "github.com/onflow/flow-go/cmd/util/cmd/common" ) var ( @@ -27,8 +29,7 @@ func Execute() { } func init() { - rootCmd.PersistentFlags().StringVarP(&flagDatadir, "datadir", "d", "/var/flow/data/protocol", "directory to the badger dababase") - _ = rootCmd.MarkPersistentFlagRequired("data-dir") + common.InitDataDirFlag(rootCmd, &flagDatadir) cobra.OnInitialize(initConfig) } diff --git a/cmd/util/cmd/read-badger/cmd/seals.go b/cmd/util/cmd/read-badger/cmd/seals.go index 475d4e5b72a..f6ebaa2e180 100644 --- a/cmd/util/cmd/read-badger/cmd/seals.go +++ b/cmd/util/cmd/read-badger/cmd/seals.go @@ -1,11 +1,14 @@ package cmd import ( + "fmt" + "github.com/rs/zerolog/log" "github.com/spf13/cobra" "github.com/onflow/flow-go/cmd/util/cmd/common" "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/storage" ) var flagSealID string @@ -20,53 +23,49 @@ func init() { var sealsCmd = &cobra.Command{ Use: "seals", Short: "get seals by block or seal ID", - Run: func(cmd *cobra.Command, args []string) { - storages, db := InitStorages() - defer db.Close() - - if flagSealID != "" && flagBlockID != "" { - log.Error().Msg("provide one of the flags --id or --block-id") - return - } + RunE: func(cmd *cobra.Command, args []string) error { + return common.WithStorage(flagDatadir, func(db storage.DB) error { + storages := common.InitStorages(db) - if flagSealID != "" { - log.Info().Msgf("got flag seal id: %s", flagSealID) - sealID, err := flow.HexStringToIdentifier(flagSealID) - if err != nil { - log.Error().Err(err).Msg("malformed seal id") - return + if flagSealID != "" && flagBlockID != "" { + return fmt.Errorf("provide one of the flags --id or --block-id") } - log.Info().Msgf("getting seal by id: %v", sealID) - seal, err := storages.Seals.ByID(sealID) - if err != nil { - log.Error().Err(err).Msgf("could not get seal with id: %v", sealID) - return - } + if flagSealID != "" { + log.Info().Msgf("got flag seal id: %s", flagSealID) + sealID, err := flow.HexStringToIdentifier(flagSealID) + if err != nil { + return fmt.Errorf("malformed seal id: %w", err) + } - common.PrettyPrintEntity(seal) - return - } + log.Info().Msgf("getting seal by id: %v", sealID) + seal, err := storages.Seals.ByID(sealID) + if err != nil { + return fmt.Errorf("could not get seal with id: %v: %w", sealID, err) + } - if flagBlockID != "" { - log.Info().Msgf("got flag block id: %s", flagBlockID) - blockID, err := flow.HexStringToIdentifier(flagBlockID) - if err != nil { - log.Error().Err(err).Msg("malformed block id") - return + common.PrettyPrintEntity(seal) + return nil } - log.Info().Msgf("getting seal by block id: %v", blockID) - seal, err := storages.Seals.FinalizedSealForBlock(blockID) - if err != nil { - log.Error().Err(err).Msgf("could not get seal for block id: %v", blockID) - return - } + if flagBlockID != "" { + log.Info().Msgf("got flag block id: %s", flagBlockID) + blockID, err := flow.HexStringToIdentifier(flagBlockID) + if err != nil { + return fmt.Errorf("malformed block id: %w", err) + } + + log.Info().Msgf("getting seal by block id: %v", blockID) + seal, err := storages.Seals.FinalizedSealForBlock(blockID) + if err != nil { + return fmt.Errorf("could not get seal for block id: %v: %w", blockID, err) + } - common.PrettyPrintEntity(seal) - return - } + common.PrettyPrintEntity(seal) + return nil + } - log.Error().Msg("missing flags --id or --block-id") + return fmt.Errorf("missing flags --id or --block-id") + }) }, } diff --git a/cmd/util/cmd/read-badger/cmd/stats.go b/cmd/util/cmd/read-badger/cmd/stats.go new file mode 100644 index 00000000000..1dc03058ebb --- /dev/null +++ b/cmd/util/cmd/read-badger/cmd/stats.go @@ -0,0 +1,40 @@ +package cmd + +import ( + "fmt" + "runtime" + + "github.com/rs/zerolog/log" + "github.com/spf13/cobra" + + "github.com/onflow/flow-go/cmd/util/cmd/common" + "github.com/onflow/flow-go/storage" + "github.com/onflow/flow-go/storage/operation" +) + +func init() { + rootCmd.AddCommand(statsCmd) +} + +var statsCmd = &cobra.Command{ + Use: "stats", + Short: "get stats for the database, such as key count, total value size, min/max value size etc", + RunE: func(cmd *cobra.Command, args []string) error { + return common.WithStorage(flagDatadir, func(sdb storage.DB) error { + + numWorkers := runtime.NumCPU() + if numWorkers > 256 { + numWorkers = 256 + } + log.Info().Msgf("getting stats with %v workers", numWorkers) + + stats, err := operation.SummarizeKeysByFirstByteConcurrent(log.Logger, sdb.Reader(), numWorkers) + if err != nil { + return fmt.Errorf("failed to get stats: %w", err) + } + + operation.PrintStats(log.Logger, stats) + return nil + }) + }, +} diff --git a/cmd/util/cmd/read-badger/cmd/storages.go b/cmd/util/cmd/read-badger/cmd/storages.go index a477881f3f6..1d619dd05e2 100644 --- a/cmd/util/cmd/read-badger/cmd/storages.go +++ b/cmd/util/cmd/read-badger/cmd/storages.go @@ -1,14 +1 @@ package cmd - -import ( - "github.com/dgraph-io/badger/v2" - - "github.com/onflow/flow-go/cmd/util/cmd/common" - "github.com/onflow/flow-go/storage" -) - -func InitStorages() (*storage.All, *badger.DB) { - db := common.InitStorage(flagDatadir) - storages := common.InitStorages(db) - return storages, db -} diff --git a/cmd/util/cmd/read-badger/cmd/transaction_results.go b/cmd/util/cmd/read-badger/cmd/transaction_results.go index ebf604c687b..44285007cc7 100644 --- a/cmd/util/cmd/read-badger/cmd/transaction_results.go +++ b/cmd/util/cmd/read-badger/cmd/transaction_results.go @@ -1,11 +1,16 @@ package cmd import ( + "fmt" + "github.com/rs/zerolog/log" "github.com/spf13/cobra" "github.com/onflow/flow-go/cmd/util/cmd/common" "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module/metrics" + "github.com/onflow/flow-go/storage" + "github.com/onflow/flow-go/storage/store" ) func init() { @@ -18,44 +23,48 @@ func init() { var transactionResultsCmd = &cobra.Command{ Use: "transaction-results", Short: "get transaction-result by block ID", - Run: func(cmd *cobra.Command, args []string) { - storages, db := InitStorages() - defer db.Close() - - log.Info().Msgf("got flag block id: %s", flagBlockID) - blockID, err := flow.HexStringToIdentifier(flagBlockID) - if err != nil { - log.Error().Err(err).Msg("malformed block id") - return - } - - log.Info().Msgf("getting transaction results by block id: %v", blockID) - block, err := storages.Blocks.ByID(blockID) - if err != nil { - log.Error().Err(err).Msg("could not get block with id: %w") - return - } - - txIDs := make([]flow.Identifier, 0) - - for _, guarantee := range block.Payload.Guarantees { - collection, err := storages.Collections.ByID(guarantee.CollectionID) + RunE: func(cmd *cobra.Command, args []string) error { + return common.WithStorage(flagDatadir, func(db storage.DB) error { + transactionResults, err := store.NewTransactionResults(metrics.NewNoopCollector(), db, 1) if err != nil { - log.Error().Err(err).Msgf("could not get collection with id: %v", guarantee.CollectionID) - return + return err } - for _, tx := range collection.Transactions { - txIDs = append(txIDs, tx.ID()) + storages := common.InitStorages(db) + log.Info().Msgf("got flag block id: %s", flagBlockID) + blockID, err := flow.HexStringToIdentifier(flagBlockID) + if err != nil { + return fmt.Errorf("malformed block id: %w", err) } - } - for _, txID := range txIDs { - transactionResult, err := storages.TransactionResults.ByBlockIDTransactionID(blockID, txID) + log.Info().Msgf("getting transaction results by block id: %v", blockID) + block, err := storages.Blocks.ByID(blockID) if err != nil { - log.Error().Err(err).Msgf("could not get transaction result for block id and transaction id: %v", txID) - return + return fmt.Errorf("could not get block with id: %w", err) } - common.PrettyPrintEntity(transactionResult) - } + + log.Info().Msgf("getting transaction results by block id: %v", blockID) + + txIDs := make([]flow.Identifier, 0) + + for _, guarantee := range block.Payload.Guarantees { + collection, err := storages.Collections.ByID(guarantee.CollectionID) + if err != nil { + return fmt.Errorf("could not get collection with id %v: %w", guarantee.CollectionID, err) + } + for _, tx := range collection.Transactions { + txIDs = append(txIDs, tx.ID()) + } + } + + for _, txID := range txIDs { + transactionResult, err := transactionResults.ByBlockIDTransactionID(blockID, txID) + if err != nil { + return fmt.Errorf("could not get transaction result for block id and transaction id: %v: %w", txID, err) + } + common.PrettyPrint(transactionResult) + } + + return nil + }) }, } diff --git a/cmd/util/cmd/read-badger/cmd/transactions.go b/cmd/util/cmd/read-badger/cmd/transactions.go index 9e954755109..c3e123d4808 100644 --- a/cmd/util/cmd/read-badger/cmd/transactions.go +++ b/cmd/util/cmd/read-badger/cmd/transactions.go @@ -1,11 +1,14 @@ package cmd import ( + "fmt" + "github.com/rs/zerolog/log" "github.com/spf13/cobra" "github.com/onflow/flow-go/cmd/util/cmd/common" "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/storage" ) func init() { @@ -18,24 +21,24 @@ func init() { var transactionsCmd = &cobra.Command{ Use: "transactions", Short: "get transaction by ID", - Run: func(cmd *cobra.Command, args []string) { - storages, db := InitStorages() - defer db.Close() - - log.Info().Msgf("got flag transaction id: %s", flagTransactionID) - transactionID, err := flow.HexStringToIdentifier(flagTransactionID) - if err != nil { - log.Error().Err(err).Msg("malformed transaction id") - return - } - - log.Info().Msgf("getting transaction by id: %v", transactionID) - tx, err := storages.Transactions.ByID(transactionID) - if err != nil { - log.Error().Err(err).Msgf("could not get transaction with id: %v", transactionID) - return - } - - common.PrettyPrintEntity(tx) + RunE: func(cmd *cobra.Command, args []string) error { + return common.WithStorage(flagDatadir, func(db storage.DB) error { + storages := common.InitStorages(db) + + log.Info().Msgf("got flag transaction id: %s", flagTransactionID) + transactionID, err := flow.HexStringToIdentifier(flagTransactionID) + if err != nil { + return fmt.Errorf("malformed transaction id: %w", err) + } + + log.Info().Msgf("getting transaction by id: %v", transactionID) + tx, err := storages.Transactions.ByID(transactionID) + if err != nil { + return fmt.Errorf("could not get transaction with id: %v: %w", transactionID, err) + } + + common.PrettyPrintEntity(tx) + return nil + }) }, } diff --git a/cmd/util/cmd/read-execution-state/list-accounts/cmd.go b/cmd/util/cmd/read-execution-state/list-accounts/cmd.go index 4a4ba7adbbf..ae335541d92 100644 --- a/cmd/util/cmd/read-execution-state/list-accounts/cmd.go +++ b/cmd/util/cmd/read-execution-state/list-accounts/cmd.go @@ -10,11 +10,11 @@ import ( "github.com/rs/zerolog/log" "github.com/spf13/cobra" - executionState "github.com/onflow/flow-go/engine/execution/state" "github.com/onflow/flow-go/fvm/environment" "github.com/onflow/flow-go/fvm/storage/snapshot" "github.com/onflow/flow-go/fvm/storage/state" "github.com/onflow/flow-go/ledger" + "github.com/onflow/flow-go/ledger/common/convert" "github.com/onflow/flow-go/ledger/common/pathfinder" "github.com/onflow/flow-go/ledger/complete" "github.com/onflow/flow-go/ledger/complete/mtrie" @@ -78,7 +78,7 @@ func run(*cobra.Command, []string) { ldg := snapshot.NewReadFuncStorageSnapshot( func(id flow.RegisterID) (flow.RegisterValue, error) { - ledgerKey := executionState.RegisterIDToKey(id) + ledgerKey := convert.RegisterIDToLedgerKey(id) path, err := pathfinder.KeyToPath( ledgerKey, complete.DefaultPathFinderVersion) diff --git a/cmd/util/cmd/read-hotstuff/cmd/get_liveness.go b/cmd/util/cmd/read-hotstuff/cmd/get_liveness.go index c6eb12e2c43..ee90c5cbbad 100644 --- a/cmd/util/cmd/read-hotstuff/cmd/get_liveness.go +++ b/cmd/util/cmd/read-hotstuff/cmd/get_liveness.go @@ -5,6 +5,9 @@ import ( "github.com/spf13/cobra" "github.com/onflow/flow-go/cmd/util/cmd/common" + "github.com/onflow/flow-go/consensus/hotstuff/persister" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/storage" ) var GetLivenessCmd = &cobra.Command{ @@ -18,29 +21,31 @@ func init() { } func runGetLivenessData(*cobra.Command, []string) { - db := common.InitStorage(flagDatadir) - defer db.Close() + err := common.WithStorage(flagDatadir, func(db storage.DB) error { + lockManager := storage.NewTestingLockManager() - storages := common.InitStorages(db) - state, err := common.InitProtocolState(db, storages) - if err != nil { - log.Fatal().Err(err).Msg("could not init protocol state") - } + chainID := flow.ChainID(flagChain) + reader, err := persister.NewReader(db, chainID, lockManager) + if err != nil { + log.Fatal().Err(err).Msg("could not create reader from db") + } - rootBlock, err := state.Params().Root() - if err != nil { - log.Fatal().Err(err).Msgf("could not get root block") - } + log.Info(). + Str("chain", flagChain). + Msg("getting hotstuff liveness data") - reader := NewHotstuffReader(db, rootBlock.ChainID) + livenessData, err := reader.GetLivenessData() + if err != nil { + log.Fatal().Err(err).Msg("could not get hotstuff liveness data") + } - log.Info().Msg("getting hotstuff liveness data") + log.Info().Msgf("successfully get hotstuff liveness data") + common.PrettyPrint(livenessData) + return nil + }) - livenessData, err := reader.GetLivenessData() if err != nil { - log.Fatal().Err(err).Msg("could not get hotstuff liveness data") + log.Error().Err(err).Msg("could not get hotstuff liveness data") } - log.Info().Msgf("successfully get hotstuff liveness data") - common.PrettyPrint(livenessData) } diff --git a/cmd/util/cmd/read-hotstuff/cmd/get_safety.go b/cmd/util/cmd/read-hotstuff/cmd/get_safety.go index bd0281990c7..f7abcf33a80 100644 --- a/cmd/util/cmd/read-hotstuff/cmd/get_safety.go +++ b/cmd/util/cmd/read-hotstuff/cmd/get_safety.go @@ -5,6 +5,9 @@ import ( "github.com/spf13/cobra" "github.com/onflow/flow-go/cmd/util/cmd/common" + "github.com/onflow/flow-go/consensus/hotstuff/persister" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/storage" ) var GetSafetyCmd = &cobra.Command{ @@ -18,29 +21,30 @@ func init() { } func runGetSafetyData(*cobra.Command, []string) { - db := common.InitStorage(flagDatadir) - defer db.Close() + err := common.WithStorage(flagDatadir, func(db storage.DB) error { + lockManager := storage.NewTestingLockManager() - storages := common.InitStorages(db) - state, err := common.InitProtocolState(db, storages) - if err != nil { - log.Fatal().Err(err).Msg("could not init protocol state") - } + chainID := flow.ChainID(flagChain) + reader, err := persister.NewReader(db, chainID, lockManager) + if err != nil { + log.Fatal().Err(err).Msg("could not create reader from db") + } - rootBlock, err := state.Params().Root() - if err != nil { - log.Fatal().Err(err).Msgf("could not get root block") - } + log.Info(). + Str("chain", flagChain). + Msg("getting hotstuff safety data") - reader := NewHotstuffReader(db, rootBlock.ChainID) + livenessData, err := reader.GetSafetyData() + if err != nil { + log.Fatal().Err(err).Msg("could not get hotstuff safety data") + } - log.Info().Msg("getting hotstuff safety data") + log.Info().Msgf("successfully get hotstuff safety data") + common.PrettyPrint(livenessData) + return nil + }) - livenessData, err := reader.GetSafetyData() if err != nil { - log.Fatal().Err(err).Msg("could not get hotstuff safety data") + log.Error().Err(err).Msg("could not get hotstuff safety data") } - - log.Info().Msgf("successfully get hotstuff safety data") - common.PrettyPrint(livenessData) } diff --git a/cmd/util/cmd/read-hotstuff/cmd/reader.go b/cmd/util/cmd/read-hotstuff/cmd/reader.go deleted file mode 100644 index dcfefe94f7a..00000000000 --- a/cmd/util/cmd/read-hotstuff/cmd/reader.go +++ /dev/null @@ -1,25 +0,0 @@ -package cmd - -import ( - "github.com/dgraph-io/badger/v2" - - "github.com/onflow/flow-go/consensus/hotstuff" - "github.com/onflow/flow-go/consensus/hotstuff/persister" - "github.com/onflow/flow-go/model/flow" -) - -// HotstuffReader exposes only the read-only parts of the Hotstuff Persister component. -// This is used to read information about the HotStuff instance's current state from CLI tools. -// CAUTION: the write functions are hidden here, because it is NOT SAFE to use them outside -// the Hotstuff state machine. -type HotstuffReader interface { - // GetLivenessData retrieves the latest persisted liveness data. - GetLivenessData() (*hotstuff.LivenessData, error) - // GetSafetyData retrieves the latest persisted safety data. - GetSafetyData() (*hotstuff.SafetyData, error) -} - -// NewHotstuffReader returns a new Persister, constrained to read-only operations. -func NewHotstuffReader(db *badger.DB, chainID flow.ChainID) HotstuffReader { - return persister.New(db, chainID) -} diff --git a/cmd/util/cmd/read-hotstuff/cmd/root.go b/cmd/util/cmd/read-hotstuff/cmd/root.go index 57dfcba65a6..8d982845a50 100644 --- a/cmd/util/cmd/read-hotstuff/cmd/root.go +++ b/cmd/util/cmd/read-hotstuff/cmd/root.go @@ -6,9 +6,12 @@ import ( "github.com/spf13/cobra" "github.com/spf13/viper" + + "github.com/onflow/flow-go/cmd/util/cmd/common" ) var ( + flagChain string flagDatadir string ) @@ -27,8 +30,10 @@ func Execute() { } func init() { - rootCmd.PersistentFlags().StringVarP(&flagDatadir, "datadir", "d", "/var/flow/data/protocol", "directory to the badger dababase") - _ = rootCmd.MarkPersistentFlagRequired("datadir") + common.InitDataDirFlag(rootCmd, &flagDatadir) + + rootCmd.PersistentFlags().StringVar(&flagChain, "chain", "", "Chain name, e.g. flow-mainnet, flow-testnet") + _ = rootCmd.MarkPersistentFlagRequired("chain") cobra.OnInitialize(initConfig) } diff --git a/cmd/util/cmd/read-light-block/read_light_block.go b/cmd/util/cmd/read-light-block/read_light_block.go new file mode 100644 index 00000000000..facfafc1208 --- /dev/null +++ b/cmd/util/cmd/read-light-block/read_light_block.go @@ -0,0 +1,73 @@ +package read + +import ( + "errors" + "fmt" + + "github.com/onflow/flow-go/model/cluster" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/storage" +) + +type ClusterLightBlock struct { + ID flow.Identifier + Height uint64 + CollectionID flow.Identifier + Transactions []flow.Identifier +} + +func ClusterBlockToLight(clusterBlock *cluster.Block) *ClusterLightBlock { + return &ClusterLightBlock{ + ID: clusterBlock.ID(), + Height: clusterBlock.Height, + CollectionID: clusterBlock.Payload.Collection.ID(), + Transactions: clusterBlock.Payload.Collection.Light().Transactions, + } +} + +func ReadClusterLightBlockByHeightRange(clusterBlocks storage.ClusterBlocks, startHeight uint64, endHeight uint64) ([]*ClusterLightBlock, error) { + blocks := make([]*ClusterLightBlock, 0) + for height := startHeight; height <= endHeight; height++ { + block, err := clusterBlocks.ProposalByHeight(height) + if err != nil { + if errors.Is(err, storage.ErrNotFound) { + break + } + return nil, fmt.Errorf("could not get cluster block by height %v: %w", height, err) + } + light := ClusterBlockToLight(&block.Block) + blocks = append(blocks, light) + } + return blocks, nil +} + +type LightBlock struct { + ID flow.Identifier + Height uint64 + Collections []flow.Identifier +} + +func BlockToLight(block *flow.Block) *LightBlock { + return &LightBlock{ + ID: block.ID(), + Height: block.Height, + Collections: flow.EntitiesToIDs(block.Payload.Guarantees), + } +} + +func ReadLightBlockByHeightRange(blocks storage.Blocks, startHeight uint64, endHeight uint64) ([]*LightBlock, error) { + bs := make([]*LightBlock, 0) + for height := startHeight; height <= endHeight; height++ { + block, err := blocks.ByHeight(height) + if err != nil { + if errors.Is(err, storage.ErrNotFound) { + break + } + + return nil, fmt.Errorf("could not get cluster block by height %v: %w", height, err) + } + light := BlockToLight(block) + bs = append(bs, light) + } + return bs, nil +} diff --git a/cmd/util/cmd/read-light-block/read_light_block_test.go b/cmd/util/cmd/read-light-block/read_light_block_test.go new file mode 100644 index 00000000000..84e5abfc002 --- /dev/null +++ b/cmd/util/cmd/read-light-block/read_light_block_test.go @@ -0,0 +1,74 @@ +package read + +import ( + "testing" + + "github.com/jordanschalm/lockctx" + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/module/metrics" + "github.com/onflow/flow-go/storage" + "github.com/onflow/flow-go/storage/operation" + "github.com/onflow/flow-go/storage/operation/dbtest" + "github.com/onflow/flow-go/storage/store" + "github.com/onflow/flow-go/utils/unittest" +) + +func TestReadClusterRange(t *testing.T) { + + dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { + lockManager := storage.NewTestingLockManager() + chain := unittest.ClusterBlockFixtures(5) + parent, blocks := chain[0], chain[1:] + + err := unittest.WithLock(t, lockManager, storage.LockInsertOrFinalizeClusterBlock, func(lctx lockctx.Context) error { + // add parent as boundary + err := db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return operation.IndexClusterBlockHeight(lctx, rw, parent.ChainID, parent.Height, parent.ID()) + }) + if err != nil { + return err + } + + return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return operation.BootstrapClusterFinalizedHeight(lctx, rw, parent.ChainID, parent.Height) + }) + }) + require.NoError(t, err) + + // add blocks + for _, block := range blocks { + err = unittest.WithLock(t, lockManager, storage.LockInsertOrFinalizeClusterBlock, func(lctx lockctx.Context) error { + err := db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return operation.InsertClusterBlock(lctx, rw, unittest.ClusterProposalFromBlock(block)) + }) + if err != nil { + return err + } + + return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return operation.FinalizeClusterBlock(lctx, rw, block.ID()) + }) + }) + require.NoError(t, err) + } + + clusterBlocks := store.NewClusterBlocks( + db, + blocks[0].ChainID, + store.NewHeaders(metrics.NewNoopCollector(), db), + store.NewClusterPayloads(metrics.NewNoopCollector(), db), + ) + + startHeight := blocks[0].Height + endHeight := startHeight + 10 // if end height is exceeded the last finalized height, only return up to the last finalized + lights, err := ReadClusterLightBlockByHeightRange(clusterBlocks, startHeight, endHeight) + require.NoError(t, err) + + for i, light := range lights { + require.Equal(t, light.ID, blocks[i].ID()) + } + + require.Equal(t, len(blocks), len(lights)) + }) +} diff --git a/cmd/util/cmd/read-protocol-state/cmd/blocks.go b/cmd/util/cmd/read-protocol-state/cmd/blocks.go index 8395a977969..4c2ef0d13bd 100644 --- a/cmd/util/cmd/read-protocol-state/cmd/blocks.go +++ b/cmd/util/cmd/read-protocol-state/cmd/blocks.go @@ -1,6 +1,7 @@ package cmd import ( + "errors" "fmt" "github.com/rs/zerolog/log" @@ -13,16 +14,17 @@ import ( ) var ( - flagHeight uint64 - flagBlockID string - flagFinal bool - flagSealed bool + flagHeight uint64 + flagBlockID string + flagFinal bool + flagSealed bool + flagExecuted bool ) var Cmd = &cobra.Command{ Use: "blocks", Short: "Read block from protocol state", - Run: run, + RunE: runE, } func init() { @@ -39,17 +41,22 @@ func init() { Cmd.Flags().BoolVar(&flagSealed, "sealed", false, "get sealed block") + + Cmd.Flags().BoolVar(&flagExecuted, "executed", false, + "get last executed and sealed block (execution node only)") } type Reader struct { - state protocol.State - blocks storage.Blocks + state protocol.State + blocks storage.Blocks + commits storage.Commits } -func NewReader(state protocol.State, storages *storage.All) *Reader { +func NewReader(state protocol.State, blocks storage.Blocks, commits storage.Commits) *Reader { return &Reader{ - state: state, - blocks: storages.Blocks, + state: state, + blocks: blocks, + commits: commits, } } @@ -101,6 +108,16 @@ func (r *Reader) GetSealed() (*flow.Block, error) { return block, nil } +func (r *Reader) GetRoot() (*flow.Block, error) { + header := r.state.Params().SealedRoot() + + block, err := r.getBlockByHeader(header) + if err != nil { + return nil, fmt.Errorf("could not get block by header: %w", err) + } + return block, nil +} + func (r *Reader) GetBlockByID(blockID flow.Identifier) (*flow.Block, error) { header, err := r.state.AtBlockID(blockID).Head() if err != nil { @@ -114,62 +131,143 @@ func (r *Reader) GetBlockByID(blockID flow.Identifier) (*flow.Block, error) { return block, nil } -func run(*cobra.Command, []string) { - db := common.InitStorage(flagDatadir) - defer db.Close() +// IsExecuted returns true if the block is executed +// this only works for execution node. +func (r *Reader) IsExecuted(blockID flow.Identifier) (bool, error) { + _, err := r.commits.ByBlockID(blockID) + if err == nil { + return true, nil + } - storages := common.InitStorages(db) - state, err := common.InitProtocolState(db, storages) - if err != nil { - log.Fatal().Err(err).Msg("could not init protocol state") + // statecommitment not exists means the block hasn't been executed yet + if errors.Is(err, storage.ErrNotFound) { + return false, nil } - reader := NewReader(state, storages) + return false, err +} + +func runE(*cobra.Command, []string) error { + lockManager := storage.MakeSingletonLockManager() + return common.WithStorage(flagDatadir, func(db storage.DB) error { + storages := common.InitStorages(db) + state, err := common.OpenProtocolState(lockManager, db, storages) - if flagHeight > 0 { - log.Info().Msgf("get block by height: %v", flagHeight) - block, err := reader.GetBlockByHeight(flagHeight) if err != nil { - log.Fatal().Err(err).Msg("could not get block by height") + log.Fatal().Err(err).Msg("could not init protocol state") } - common.PrettyPrintEntity(block) - return - } + reader := NewReader(state, storages.Blocks, storages.Commits) - if flagBlockID != "" { - blockID, err := flow.HexStringToIdentifier(flagBlockID) + // making sure only one flag is being used + err = checkOnlyOneFlagIsUsed(flagHeight, flagBlockID, flagFinal, flagSealed, flagExecuted) if err != nil { - log.Fatal().Err(err).Msgf("malformed block ID: %v", flagBlockID) + return fmt.Errorf("could not get block: %w", err) } - log.Info().Msgf("get block by ID: %v", blockID) - block, err := reader.GetBlockByID(blockID) - if err != nil { - log.Fatal().Err(err).Msg("could not get block by ID") + + if flagHeight > 0 { + log.Info().Msgf("get block by height: %v", flagHeight) + block, err := reader.GetBlockByHeight(flagHeight) + if err != nil { + log.Fatal().Err(err).Msg("could not get block by height") + } + + common.PrettyPrintEntity(block) + return nil } - common.PrettyPrintEntity(block) - return - } - if flagFinal { - log.Info().Msgf("get last finalized block") - block, err := reader.GetFinal() - if err != nil { - log.Fatal().Err(err).Msg("could not get finalized block") + if flagBlockID != "" { + blockID, err := flow.HexStringToIdentifier(flagBlockID) + if err != nil { + return fmt.Errorf("malformed block ID: %v: %w", flagBlockID, err) + } + log.Info().Msgf("get block by ID: %v", blockID) + block, err := reader.GetBlockByID(blockID) + if err != nil { + return fmt.Errorf("could not get block by ID: %w", err) + } + common.PrettyPrintEntity(block) + return nil } - common.PrettyPrintEntity(block) - return - } - if flagSealed { - log.Info().Msgf("get last sealed block") - block, err := reader.GetSealed() - if err != nil { - log.Fatal().Err(err).Msg("could not get sealed block") + if flagFinal { + log.Info().Msgf("get last finalized block") + block, err := reader.GetFinal() + if err != nil { + return fmt.Errorf("could not get finalized block: %w", err) + } + common.PrettyPrintEntity(block) + return nil + } + + if flagSealed { + log.Info().Msgf("get last sealed block") + block, err := reader.GetSealed() + if err != nil { + return fmt.Errorf("could not get sealed block: %w", err) + } + common.PrettyPrintEntity(block) + return nil + } + + if flagExecuted { + log.Info().Msgf("get last executed and sealed block") + sealed, err := reader.GetSealed() + if err != nil { + return fmt.Errorf("could not get sealed block: %w", err) + } + + root, err := reader.GetRoot() + if err != nil { + return fmt.Errorf("could not get root block: %w", err) + } + + // find the last executed and sealed block + for h := sealed.Height; h >= root.Height; h-- { + block, err := reader.GetBlockByHeight(h) + if err != nil { + return fmt.Errorf("could not get block by height: %v: %w", h, err) + } + + executed, err := reader.IsExecuted(block.ID()) + if err != nil { + log.Fatal().Err(err).Msgf("could not check block executed or not: %v", h) + } + + if executed { + common.PrettyPrintEntity(block) + return nil + } + } + + return fmt.Errorf("could not find executed block") } - common.PrettyPrintEntity(block) - return + + return fmt.Errorf("missing flag, try --final or --sealed or --height or --executed or --block-id, note that only one flag can be used at a time") + }) +} + +func checkOnlyOneFlagIsUsed(height uint64, blockID string, final, sealed, executed bool) error { + flags := make([]string, 0, 5) + if height > 0 { + flags = append(flags, "height") + } + if blockID != "" { + flags = append(flags, "blockID") + } + if final { + flags = append(flags, "final") + } + if sealed { + flags = append(flags, "sealed") + } + if executed { + flags = append(flags, "executed") + } + + if len(flags) != 1 { + return fmt.Errorf("only one flag can be used at a time, used flags: %v", flags) } - log.Fatal().Msgf("missing flag, try --final or --sealed or --height or --block-id") + return nil } diff --git a/cmd/util/cmd/read-protocol-state/cmd/root.go b/cmd/util/cmd/read-protocol-state/cmd/root.go index ddbc7e51aab..a789a05b6ff 100644 --- a/cmd/util/cmd/read-protocol-state/cmd/root.go +++ b/cmd/util/cmd/read-protocol-state/cmd/root.go @@ -6,6 +6,8 @@ import ( "github.com/spf13/cobra" "github.com/spf13/viper" + + "github.com/onflow/flow-go/cmd/util/cmd/common" ) var ( @@ -27,8 +29,7 @@ func Execute() { } func init() { - rootCmd.PersistentFlags().StringVarP(&flagDatadir, "datadir", "d", "/var/flow/data/protocol", "directory to the badger dababase") - _ = rootCmd.MarkPersistentFlagRequired("datadir") + common.InitDataDirFlag(rootCmd, &flagDatadir) cobra.OnInitialize(initConfig) } diff --git a/cmd/util/cmd/read-protocol-state/cmd/snapshot.go b/cmd/util/cmd/read-protocol-state/cmd/snapshot.go new file mode 100644 index 00000000000..add4b75093c --- /dev/null +++ b/cmd/util/cmd/read-protocol-state/cmd/snapshot.go @@ -0,0 +1,127 @@ +package cmd + +import ( + "fmt" + + "github.com/rs/zerolog/log" + "github.com/spf13/cobra" + + "github.com/onflow/flow-go/cmd/util/cmd/common" + commonFuncs "github.com/onflow/flow-go/cmd/util/common" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/state/protocol" + "github.com/onflow/flow-go/state/protocol/inmem" + "github.com/onflow/flow-go/storage" +) + +var flagCheckpointDir string +var flagCheckpointScanStep uint +var flagCheckpointScanEndHeight int64 + +var SnapshotCmd = &cobra.Command{ + Use: "snapshot", + Short: "Read snapshot from protocol state", + RunE: runSnapshotE, +} + +func init() { + rootCmd.AddCommand(SnapshotCmd) + + SnapshotCmd.Flags().Uint64Var(&flagHeight, "height", 0, + "Block height") + + SnapshotCmd.Flags().StringVar(&flagBlockID, "block-id", "", + "Block ID (hex-encoded, 64 characters)") + + SnapshotCmd.Flags().BoolVar(&flagFinal, "final", false, + "get finalized block") + + SnapshotCmd.Flags().BoolVar(&flagSealed, "sealed", false, + "get sealed block") + + SnapshotCmd.Flags().StringVar(&flagCheckpointDir, "checkpoint-dir", "", + "(execution node only) get snapshot from the latest checkpoint file in the given checkpoint directory") + + SnapshotCmd.Flags().UintVar(&flagCheckpointScanStep, "checkpoint-scan-step", 0, + "(execution node only) scan step for finding sealed height by checkpoint (use with --checkpoint-dir flag)") + + SnapshotCmd.Flags().Int64Var(&flagCheckpointScanEndHeight, "checkpoint-scan-end-height", -1, + "(execution node only) scan end height for finding sealed height by checkpoint (use with --checkpoint-dir flag)") +} + +func runSnapshotE(*cobra.Command, []string) error { + lockManager := storage.MakeSingletonLockManager() + return common.WithStorage(flagDatadir, func(db storage.DB) error { + storages := common.InitStorages(db) + state, err := common.OpenProtocolState(lockManager, db, storages) + if err != nil { + return fmt.Errorf("could not init protocol state") + } + + var snapshot protocol.Snapshot + + if flagHeight > 0 { + log.Info().Msgf("get snapshot by height: %v", flagHeight) + snapshot = state.AtHeight(flagHeight) + } else if flagBlockID != "" { + log.Info().Msgf("get snapshot by block ID: %v", flagBlockID) + blockID := flow.MustHexStringToIdentifier(flagBlockID) + snapshot = state.AtBlockID(blockID) + } else if flagFinal { + log.Info().Msgf("get last finalized snapshot") + snapshot = state.Final() + } else if flagSealed { + log.Info().Msgf("get last sealed snapshot") + snapshot = state.Sealed() + } else if flagCheckpointDir != "" { + log.Info().Msgf("get snapshot for latest checkpoint in directory %v (step: %v, endHeight: %v)", + flagCheckpointDir, flagCheckpointScanStep, flagCheckpointScanEndHeight) + var protocolSnapshot protocol.Snapshot + var sealedHeight uint64 + var sealedCommit flow.StateCommitment + var checkpointFile string + if flagCheckpointScanEndHeight < 0 { + // using default end height which is the last sealed height + protocolSnapshot, sealedHeight, sealedCommit, checkpointFile, err = commonFuncs.GenerateProtocolSnapshotForCheckpoint( + log.Logger, state, storages.Headers, storages.Seals, flagCheckpointDir, flagCheckpointScanStep) + } else { + // using customized end height + protocolSnapshot, sealedHeight, sealedCommit, checkpointFile, err = commonFuncs.GenerateProtocolSnapshotForCheckpointWithHeights( + log.Logger, state, storages.Headers, storages.Seals, flagCheckpointDir, flagCheckpointScanStep, uint64(flagCheckpointScanEndHeight)) + } + if err != nil { + return fmt.Errorf("could not generate protocol snapshot for checkpoint in dir: %v: %w", flagCheckpointDir, err) + } + + snapshot = protocolSnapshot + log.Info().Msgf("snapshot found for checkpoint file %v, sealed height %v, commit %x", checkpointFile, sealedHeight, sealedCommit) + } + + head, err := snapshot.Head() + if err != nil { + return fmt.Errorf("fail to get block of snapshot: %w", err) + } + + log.Info().Msgf("creating snapshot for block height %v, id %v", head.Height, head.ID()) + + serializable, err := inmem.FromSnapshot(snapshot) + if err != nil { + return fmt.Errorf("fail to serialize snapshot: %w", err) + } + + sealingSegment, err := serializable.SealingSegment() + if err != nil { + return fmt.Errorf("could not get sealing segment: %w", err) + } + + log.Info().Msgf("snapshot created, sealed height %v, id %v", + sealingSegment.Sealed().Height, sealingSegment.Sealed().ID()) + + log.Info().Msgf("highest finalized height %v, id %v", + sealingSegment.Highest().Height, sealingSegment.Highest().ID()) + + encoded := serializable.Encodable() + common.PrettyPrint(encoded) + return nil + }) +} diff --git a/cmd/util/cmd/reindex/cmd/results.go b/cmd/util/cmd/reindex/cmd/results.go index aee5b711d5f..33fcd92b87c 100644 --- a/cmd/util/cmd/reindex/cmd/results.go +++ b/cmd/util/cmd/reindex/cmd/results.go @@ -1,10 +1,13 @@ package cmd import ( + "fmt" + "github.com/rs/zerolog/log" "github.com/spf13/cobra" "github.com/onflow/flow-go/cmd/util/cmd/common" + "github.com/onflow/flow-go/storage" ) func init() { @@ -14,42 +17,40 @@ func init() { var resultsCmd = &cobra.Command{ Use: "results", Short: "reindex sealed result IDs by block ID", - Run: func(cmd *cobra.Command, args []string) { - db := common.InitStorage(flagDatadir) - defer db.Close() - storages := common.InitStorages(db) - state, err := common.InitProtocolState(db, storages) - if err != nil { - log.Fatal().Err(err).Msg("could not init protocol state") - } - - results := storages.Results - blocks := storages.Blocks - - root, err := state.Params().Root() - if err != nil { - log.Fatal().Err(err).Msg("could not get root header from protocol state") - } - - final, err := state.Final().Head() - if err != nil { - log.Fatal().Err(err).Msg("could not get final header from protocol state") - } - - for h := root.Height + 1; h <= final.Height; h++ { - block, err := blocks.ByHeight(h) + RunE: func(cmd *cobra.Command, args []string) error { + lockManager := storage.MakeSingletonLockManager() + return common.WithStorage(flagDatadir, func(db storage.DB) error { + storages := common.InitStorages(db) + state, err := common.OpenProtocolState(lockManager, db, storages) if err != nil { - log.Fatal().Err(err).Msgf("could not get block at height %d", h) + return fmt.Errorf("could not open protocol state: %w", err) } - for _, seal := range block.Payload.Seals { - err := results.Index(seal.BlockID, seal.ResultID) + results := storages.Results + blocks := storages.Blocks + + root := state.Params().FinalizedRoot() + final, err := state.Final().Head() + if err != nil { + return fmt.Errorf("could not get final header from protocol state: %w", err) + } + + for h := root.Height + 1; h <= final.Height; h++ { + block, err := blocks.ByHeight(h) if err != nil { - log.Fatal().Err(err).Msgf("could not index result ID at height %d", h) + return fmt.Errorf("could not get block at height %d: %w", h, err) + } + + for _, seal := range block.Payload.Seals { + err := results.Index(seal.BlockID, seal.ResultID) + if err != nil { + return fmt.Errorf("could not index result ID at height %d: %w", h, err) + } } } - } - log.Info().Uint64("start_height", root.Height).Uint64("end_height", final.Height).Msg("indexed execution results") + log.Info().Uint64("start_height", root.Height).Uint64("end_height", final.Height).Msg("indexed execution results") + return nil + }) }, } diff --git a/cmd/util/cmd/reindex/cmd/root.go b/cmd/util/cmd/reindex/cmd/root.go index 66a425faa91..7abc84f81f4 100644 --- a/cmd/util/cmd/reindex/cmd/root.go +++ b/cmd/util/cmd/reindex/cmd/root.go @@ -6,6 +6,8 @@ import ( "github.com/spf13/cobra" "github.com/spf13/viper" + + "github.com/onflow/flow-go/cmd/util/cmd/common" ) var ( @@ -27,7 +29,7 @@ func Execute() { } func init() { - rootCmd.PersistentFlags().StringVarP(&flagDatadir, "datadir", "d", "/var/flow/data/protocol", "directory to the badger dababase") + common.InitDataDirFlag(rootCmd, &flagDatadir) _ = rootCmd.MarkPersistentFlagRequired("data-dir") cobra.OnInitialize(initConfig) diff --git a/cmd/util/cmd/remove-execution-fork/cmd/execution-fork.go b/cmd/util/cmd/remove-execution-fork/cmd/execution-fork.go index 5cb82c63dc5..d62dc9021eb 100644 --- a/cmd/util/cmd/remove-execution-fork/cmd/execution-fork.go +++ b/cmd/util/cmd/remove-execution-fork/cmd/execution-fork.go @@ -6,7 +6,7 @@ import ( "github.com/onflow/flow-go/cmd/util/cmd/common" "github.com/onflow/flow-go/storage" - "github.com/onflow/flow-go/storage/badger/operation" + "github.com/onflow/flow-go/storage/operation" ) func run(*cobra.Command, []string) { @@ -14,10 +14,15 @@ func run(*cobra.Command, []string) { Str("datadir", flagDatadir). Msg("flags") - db := common.InitStorage(flagDatadir) + db, err := common.InitStorage(flagDatadir) + if err != nil { + log.Fatal().Err(err).Msg("could not initialize storage") + } defer db.Close() - err := db.Update(operation.RemoveExecutionForkEvidence()) + err = db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return operation.RemoveExecutionForkEvidence(rw.Writer()) + }) // for testing purpose // expectedSeals := unittest.IncorporatedResultSeal.Fixtures(2) diff --git a/cmd/util/cmd/remove-execution-fork/cmd/root.go b/cmd/util/cmd/remove-execution-fork/cmd/root.go index eeedaee03bd..9a037c718bb 100644 --- a/cmd/util/cmd/remove-execution-fork/cmd/root.go +++ b/cmd/util/cmd/remove-execution-fork/cmd/root.go @@ -6,13 +6,15 @@ import ( "github.com/spf13/cobra" "github.com/spf13/viper" + + "github.com/onflow/flow-go/cmd/util/cmd/common" ) var ( flagDatadir string ) -// run with `./remove-execution-fork --datadir /var/flow/data/protocol` +// run with `./remove-execution-fork ` var RootCmd = &cobra.Command{ Use: "remove-execution-fork", Short: "remove execution fork", @@ -27,8 +29,7 @@ func Execute() { } func init() { - RootCmd.PersistentFlags().StringVarP(&flagDatadir, "datadir", "d", "/var/flow/data/protocol", "directory to the badger dababase") - _ = RootCmd.MarkPersistentFlagRequired("datadir") + common.InitDataDirFlag(RootCmd, &flagDatadir) cobra.OnInitialize(initConfig) } diff --git a/cmd/util/cmd/rollback-executed-height/cmd/rollback_executed_height.go b/cmd/util/cmd/rollback-executed-height/cmd/rollback_executed_height.go index e6886772dc6..c2fc729fa21 100644 --- a/cmd/util/cmd/rollback-executed-height/cmd/rollback_executed_height.go +++ b/cmd/util/cmd/rollback-executed-height/cmd/rollback_executed_height.go @@ -13,17 +13,20 @@ import ( "github.com/onflow/flow-go/state/protocol" "github.com/onflow/flow-go/storage" "github.com/onflow/flow-go/storage/badger" + "github.com/onflow/flow-go/storage/operation/pebbleimpl" + storagepebble "github.com/onflow/flow-go/storage/pebble" + "github.com/onflow/flow-go/storage/store" ) var ( - flagHeight uint64 - flagDataDir string + flagHeight uint64 + flagChunkDataPackDir string ) var Cmd = &cobra.Command{ Use: "rollback-executed-height", Short: "Rollback the executed height", - Run: run, + RunE: runE, } func init() { @@ -33,98 +36,125 @@ func init() { "the height of the block to update the highest executed height") _ = Cmd.MarkFlagRequired("height") - Cmd.Flags().StringVar(&flagDataDir, "datadir", "", + common.InitDataDirFlag(Cmd, &flagDatadir) + + Cmd.Flags().StringVar(&flagChunkDataPackDir, "chunk_data_pack_dir", "/var/flow/data/chunk_data_pack", "directory that stores the protocol state") - _ = Cmd.MarkFlagRequired("datadir") + _ = Cmd.MarkFlagRequired("chunk_data_pack_dir") } -func run(*cobra.Command, []string) { +func runE(*cobra.Command, []string) error { + lockManager := storage.MakeSingletonLockManager() + log.Info(). - Str("datadir", flagDataDir). + Str("datadir", flagDatadir). + Str("chunk_data_pack_dir", flagChunkDataPackDir). Uint64("height", flagHeight). Msg("flags") if flagHeight == 0 { // this would be a mistake that the height flag is used but no height value // was specified, so the default value 0 is used. - log.Fatal().Msg("height must be above 0") + return fmt.Errorf("height must be above 0: %v", flagHeight) } - db := common.InitStorage(flagDataDir) - storages := common.InitStorages(db) - state, err := common.InitProtocolState(db, storages) - if err != nil { - log.Fatal().Err(err).Msg("could not init protocol states") - } + return common.WithStorage(flagDatadir, func(db storage.DB) error { + storages := common.InitStorages(db) + state, err := common.OpenProtocolState(lockManager, db, storages) + if err != nil { + return fmt.Errorf("could not open protocol states: %w", err) + } - metrics := &metrics.NoopCollector{} - transactionResults := badger.NewTransactionResults(metrics, db, badger.DefaultCacheSize) - commits := badger.NewCommits(metrics, db) - chunkDataPacks := badger.NewChunkDataPacks(metrics, db, badger.NewCollections(db, badger.NewTransactions(metrics, db)), badger.DefaultCacheSize) - results := badger.NewExecutionResults(metrics, db) - receipts := badger.NewExecutionReceipts(metrics, db, results, badger.DefaultCacheSize) - myReceipts := badger.NewMyExecutionReceipts(metrics, db, receipts) - headers := badger.NewHeaders(metrics, db) - events := badger.NewEvents(metrics, db) - serviceEvents := badger.NewServiceEvents(metrics, db) - - writeBatch := badger.NewBatch(db) - - err = removeExecutionResultsFromHeight( - writeBatch, - state, - headers, - transactionResults, - commits, - chunkDataPacks, - results, - myReceipts, - events, - serviceEvents, - flagHeight+1) + metrics := &metrics.NoopCollector{} - if err != nil { - log.Fatal().Err(err).Msgf("could not remove result from height %v", flagHeight) - } - err = writeBatch.Flush() - if err != nil { - log.Fatal().Err(err).Msgf("could not flush write batch at %v", flagHeight) - } + transactionResults, err := store.NewTransactionResults(metrics, db, badger.DefaultCacheSize) + if err != nil { + return err + } + commits := store.NewCommits(metrics, db) + results := store.NewExecutionResults(metrics, db) + receipts := store.NewExecutionReceipts(metrics, db, results, badger.DefaultCacheSize) + myReceipts := store.NewMyExecutionReceipts(metrics, db, receipts) + headers := store.NewHeaders(metrics, db) + events := store.NewEvents(metrics, db) + serviceEvents := store.NewServiceEvents(metrics, db) + transactions := store.NewTransactions(metrics, db) + collections := store.NewCollections(db, transactions) + // require the chunk data pack data must exist before returning the storage module + chunkDataPacksPebbleDB, err := storagepebble.ShouldOpenDefaultPebbleDB( + log.Logger.With().Str("pebbledb", "cdp").Logger(), flagChunkDataPackDir) + if err != nil { + return fmt.Errorf("could not open chunk data pack DB at %v: %w", flagChunkDataPackDir, err) + } + chunkDataPacksDB := pebbleimpl.ToDB(chunkDataPacksPebbleDB) + chunkDataPacks := store.NewChunkDataPacks(metrics, chunkDataPacksDB, collections, 1000) + chunkBatch := chunkDataPacksDB.NewBatch() + defer chunkBatch.Close() + + writeBatch := db.NewBatch() + defer writeBatch.Close() + + err = removeExecutionResultsFromHeight( + writeBatch, + chunkBatch, + state, + transactionResults, + commits, + chunkDataPacks, + results, + myReceipts, + events, + serviceEvents, + flagHeight+1) - header, err := state.AtHeight(flagHeight).Head() - if err != nil { - log.Fatal().Err(err).Msgf("could not get block header at height %v", flagHeight) - } + if err != nil { + return fmt.Errorf("could not remove result from height %v: %w", flagHeight, err) + } - err = headers.RollbackExecutedBlock(header) - if err != nil { - log.Fatal().Err(err).Msgf("could not roll back executed block at height %v", flagHeight) - } + // remove chunk data packs first, because otherwise the index to find chunk data pack will be removed. + err = chunkBatch.Commit() + if err != nil { + return fmt.Errorf("could not commit chunk batch at %v: %w", flagHeight, err) + } + + err = writeBatch.Commit() + if err != nil { + return fmt.Errorf("could not flush write batch at %v: %w", flagHeight, err) + } + + header, err := state.AtHeight(flagHeight).Head() + if err != nil { + return fmt.Errorf("could not get block header at height %v: %w", flagHeight, err) + } - log.Info().Msgf("executed height rolled back to %v", flagHeight) + err = headers.RollbackExecutedBlock(header) + if err != nil { + return fmt.Errorf("could not roll back executed block at height %v: %w", flagHeight, err) + } + + log.Info().Msgf("executed height rolled back to %v", flagHeight) + return nil + }) } // use badger instances directly instead of stroage interfaces so that the interface don't // need to include the Remove methods func removeExecutionResultsFromHeight( - writeBatch *badger.Batch, + writeBatch storage.Batch, + chunkBatch storage.Batch, protoState protocol.State, - headers *badger.Headers, - transactionResults *badger.TransactionResults, - commits *badger.Commits, - chunkDataPacks *badger.ChunkDataPacks, - results *badger.ExecutionResults, - myReceipts *badger.MyExecutionReceipts, - events *badger.Events, - serviceEvents *badger.ServiceEvents, + transactionResults storage.TransactionResults, + commits storage.Commits, + chunkDataPacks storage.ChunkDataPacks, + results storage.ExecutionResults, + myReceipts storage.MyExecutionReceipts, + events storage.Events, + serviceEvents storage.ServiceEvents, fromHeight uint64) error { log.Info().Msgf("removing results for blocks from height: %v", fromHeight) - root, err := protoState.Params().Root() - if err != nil { - return fmt.Errorf("could not get root: %w", err) - } + root := protoState.Params().FinalizedRoot() if fromHeight <= root.Height { return fmt.Errorf("can only remove results for block above root block. fromHeight: %v, rootHeight: %v", fromHeight, root.Height) @@ -151,7 +181,7 @@ func removeExecutionResultsFromHeight( blockID := head.ID() - err = removeForBlockID(writeBatch, headers, commits, transactionResults, results, chunkDataPacks, myReceipts, events, serviceEvents, blockID) + err = removeForBlockID(writeBatch, chunkBatch, commits, transactionResults, results, chunkDataPacks, myReceipts, events, serviceEvents, blockID) if err != nil { return fmt.Errorf("could not remove result for finalized block: %v, %w", blockID, err) } @@ -170,7 +200,7 @@ func removeExecutionResultsFromHeight( total = len(pendings) for _, pending := range pendings { - err = removeForBlockID(writeBatch, headers, commits, transactionResults, results, chunkDataPacks, myReceipts, events, serviceEvents, pending) + err = removeForBlockID(writeBatch, chunkBatch, commits, transactionResults, results, chunkDataPacks, myReceipts, events, serviceEvents, pending) if err != nil { return fmt.Errorf("could not remove result for pending block %v: %w", pending, err) @@ -190,15 +220,15 @@ func removeExecutionResultsFromHeight( // All data to be removed will be removed in a batch write. // It bubbles up any error encountered func removeForBlockID( - writeBatch *badger.Batch, - headers *badger.Headers, - commits *badger.Commits, - transactionResults *badger.TransactionResults, - results *badger.ExecutionResults, - chunks *badger.ChunkDataPacks, - myReceipts *badger.MyExecutionReceipts, - events *badger.Events, - serviceEvents *badger.ServiceEvents, + writeBatch storage.Batch, + chunkBatch storage.Batch, + commits storage.Commits, + transactionResults storage.TransactionResults, + results storage.ExecutionResults, + chunks storage.ChunkDataPacks, + myReceipts storage.MyExecutionReceipts, + events storage.Events, + serviceEvents storage.ServiceEvents, blockID flow.Identifier, ) error { result, err := results.ByBlockID(blockID) @@ -214,12 +244,7 @@ func removeForBlockID( for _, chunk := range result.Chunks { chunkID := chunk.ID() // remove chunk data pack - err := chunks.BatchRemove(chunkID, writeBatch) - if errors.Is(err, storage.ErrNotFound) { - log.Warn().Msgf("chunk %v not found for block %v", chunkID, blockID) - continue - } - + err := chunks.BatchRemove(chunkID, chunkBatch) if err != nil { return fmt.Errorf("could not remove chunk id %v for block id %v: %w", chunkID, blockID, err) } diff --git a/cmd/util/cmd/rollback-executed-height/cmd/rollback_executed_height_test.go b/cmd/util/cmd/rollback-executed-height/cmd/rollback_executed_height_test.go index 475c22a606b..daab8797e86 100644 --- a/cmd/util/cmd/rollback-executed-height/cmd/rollback_executed_height_test.go +++ b/cmd/util/cmd/rollback-executed-height/cmd/rollback_executed_height_test.go @@ -4,54 +4,71 @@ import ( "context" "testing" - "github.com/dgraph-io/badger/v2" + "github.com/cockroachdb/pebble/v2" + "github.com/jordanschalm/lockctx" "github.com/stretchr/testify/require" "github.com/onflow/flow-go/engine/execution/state" "github.com/onflow/flow-go/engine/execution/state/bootstrap" "github.com/onflow/flow-go/engine/execution/testutil" + "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/module/metrics" "github.com/onflow/flow-go/module/trace" + "github.com/onflow/flow-go/storage" bstorage "github.com/onflow/flow-go/storage/badger" + "github.com/onflow/flow-go/storage/operation/pebbleimpl" + "github.com/onflow/flow-go/storage/store" "github.com/onflow/flow-go/utils/unittest" ) // Test save block execution related data, then remove it, and then // save again should still work func TestReExecuteBlock(t *testing.T) { - unittest.RunWithBadgerDB(t, func(db *badger.DB) { + unittest.RunWithPebbleDB(t, func(pdb *pebble.DB) { + lockManager := storage.NewTestingLockManager() // bootstrap to init highest executed height bootstrapper := bootstrap.NewBootstrapper(unittest.Logger()) - genesis := unittest.BlockHeaderFixture() - err := bootstrapper.BootstrapExecutionDatabase(db, unittest.StateCommitmentFixture(), genesis) + genesis := unittest.BlockFixture() + rootSeal := unittest.Seal.Fixture(unittest.Seal.WithBlock(genesis.ToHeader())) + db := pebbleimpl.ToDB(pdb) + err := bootstrapper.BootstrapExecutionDatabase(lockManager, db, rootSeal) require.NoError(t, err) // create all modules metrics := &metrics.NoopCollector{} - headers := bstorage.NewHeaders(metrics, db) - txResults := bstorage.NewTransactionResults(metrics, db, bstorage.DefaultCacheSize) - commits := bstorage.NewCommits(metrics, db) - chunkDataPacks := bstorage.NewChunkDataPacks(metrics, db, bstorage.NewCollections(db, bstorage.NewTransactions(metrics, db)), bstorage.DefaultCacheSize) - results := bstorage.NewExecutionResults(metrics, db) - receipts := bstorage.NewExecutionReceipts(metrics, db, results, bstorage.DefaultCacheSize) - myReceipts := bstorage.NewMyExecutionReceipts(metrics, db, receipts) - events := bstorage.NewEvents(metrics, db) - serviceEvents := bstorage.NewServiceEvents(metrics, db) - transactions := bstorage.NewTransactions(metrics, db) - collections := bstorage.NewCollections(db, transactions) - - err = headers.Store(genesis) + all := store.InitAll(metrics, db) + headers := all.Headers + blocks := all.Blocks + txResults, err := store.NewTransactionResults(metrics, db, store.DefaultCacheSize) + require.NoError(t, err) + commits := store.NewCommits(metrics, db) + chunkDataPacks := store.NewChunkDataPacks(metrics, pebbleimpl.ToDB(pdb), store.NewCollections(db, store.NewTransactions(metrics, db)), store.DefaultCacheSize) + results := all.Results + receipts := all.Receipts + myReceipts := store.NewMyExecutionReceipts(metrics, db, receipts) + events := store.NewEvents(metrics, db) + serviceEvents := store.NewServiceEvents(metrics, db) + + err = unittest.WithLock(t, lockManager, storage.LockInsertBlock, func(lctx lockctx.Context) error { + return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + // By convention, root block has no proposer signature - implementation has to handle this edge case + return blocks.BatchStore(lctx, rw, &flow.Proposal{Block: *genesis, ProposerSigData: nil}) + }) + }) require.NoError(t, err) + getLatestFinalized := func() (uint64, error) { + return genesis.Height, nil + } + // create execution state module es := state.NewExecutionState( nil, commits, nil, headers, - collections, chunkDataPacks, results, myReceipts, @@ -59,26 +76,38 @@ func TestReExecuteBlock(t *testing.T) { serviceEvents, txResults, db, + getLatestFinalized, trace.NewNoopTracer(), + nil, + false, + lockManager, ) require.NotNil(t, es) computationResult := testutil.ComputationResultFixture(t) - header := computationResult.Block.Header + header := computationResult.Block.ToHeader() - err = headers.Store(header) + err = unittest.WithLock(t, lockManager, storage.LockInsertBlock, func(lctx lockctx.Context) error { + return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return blocks.BatchStore(lctx, rw, unittest.ProposalFromBlock(computationResult.Block)) + }) + }) require.NoError(t, err) // save execution results err = es.SaveExecutionResults(context.Background(), computationResult) require.NoError(t, err) - batch := bstorage.NewBatch(db) + batch := db.NewBatch() + defer batch.Close() + + chunkBatch := pebbleimpl.ToDB(pdb).NewBatch() + defer chunkBatch.Close() // remove execution results err = removeForBlockID( batch, - headers, + chunkBatch, commits, txResults, results, @@ -94,7 +123,7 @@ func TestReExecuteBlock(t *testing.T) { // remove again, to make sure missing entires are handled properly err = removeForBlockID( batch, - headers, + chunkBatch, commits, txResults, results, @@ -105,17 +134,22 @@ func TestReExecuteBlock(t *testing.T) { header.ID(), ) - err2 := batch.Flush() - require.NoError(t, err) + require.NoError(t, chunkBatch.Commit()) + err2 := batch.Commit() + require.NoError(t, err2) - batch = bstorage.NewBatch(db) + batch = db.NewBatch() + defer batch.Close() + + chunkBatch = pebbleimpl.ToDB(pdb).NewBatch() + defer chunkBatch.Close() // remove again after flushing err = removeForBlockID( batch, - headers, + chunkBatch, commits, txResults, results, @@ -125,9 +159,11 @@ func TestReExecuteBlock(t *testing.T) { serviceEvents, header.ID(), ) - err2 = batch.Flush() - require.NoError(t, err) + + require.NoError(t, chunkBatch.Commit()) + err2 = batch.Commit() + require.NoError(t, err2) // re execute result @@ -139,39 +175,54 @@ func TestReExecuteBlock(t *testing.T) { // Test save block execution related data, then remove it, and then // save again with different result should work func TestReExecuteBlockWithDifferentResult(t *testing.T) { - unittest.RunWithBadgerDB(t, func(db *badger.DB) { + lockManager := storage.NewTestingLockManager() + unittest.RunWithPebbleDB(t, func(pdb *pebble.DB) { // bootstrap to init highest executed height bootstrapper := bootstrap.NewBootstrapper(unittest.Logger()) - genesis := unittest.BlockHeaderFixture() - err := bootstrapper.BootstrapExecutionDatabase(db, unittest.StateCommitmentFixture(), genesis) + genesis := unittest.BlockFixture() + rootSeal := unittest.Seal.Fixture() + unittest.Seal.WithBlock(genesis.ToHeader())(rootSeal) + + db := pebbleimpl.ToDB(pdb) + err := bootstrapper.BootstrapExecutionDatabase(lockManager, db, rootSeal) require.NoError(t, err) // create all modules metrics := &metrics.NoopCollector{} + all := store.InitAll(metrics, db) + headers := all.Headers + blocks := all.Blocks + commits := store.NewCommits(metrics, db) + results := store.NewExecutionResults(metrics, db) + receipts := store.NewExecutionReceipts(metrics, db, results, bstorage.DefaultCacheSize) + myReceipts := store.NewMyExecutionReceipts(metrics, db, receipts) + events := store.NewEvents(metrics, db) + serviceEvents := store.NewServiceEvents(metrics, db) + transactions := store.NewTransactions(metrics, db) + collections := store.NewCollections(db, transactions) + chunkDataPacks := store.NewChunkDataPacks(metrics, pebbleimpl.ToDB(pdb), collections, bstorage.DefaultCacheSize) + txResults, err := store.NewTransactionResults(metrics, db, bstorage.DefaultCacheSize) + require.NoError(t, err) - headers := bstorage.NewHeaders(metrics, db) - txResults := bstorage.NewTransactionResults(metrics, db, bstorage.DefaultCacheSize) - commits := bstorage.NewCommits(metrics, db) - chunkDataPacks := bstorage.NewChunkDataPacks(metrics, db, bstorage.NewCollections(db, bstorage.NewTransactions(metrics, db)), bstorage.DefaultCacheSize) - results := bstorage.NewExecutionResults(metrics, db) - receipts := bstorage.NewExecutionReceipts(metrics, db, results, bstorage.DefaultCacheSize) - myReceipts := bstorage.NewMyExecutionReceipts(metrics, db, receipts) - events := bstorage.NewEvents(metrics, db) - serviceEvents := bstorage.NewServiceEvents(metrics, db) - transactions := bstorage.NewTransactions(metrics, db) - collections := bstorage.NewCollections(db, transactions) - - err = headers.Store(genesis) + err = unittest.WithLock(t, lockManager, storage.LockInsertBlock, func(lctx lockctx.Context) error { + return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + // By convention, root block has no proposer signature - implementation has to handle this edge case + return blocks.BatchStore(lctx, rw, &flow.Proposal{Block: *genesis, ProposerSigData: nil}) + }) + }) require.NoError(t, err) + getLatestFinalized := func() (uint64, error) { + return genesis.Height, nil + } + // create execution state module es := state.NewExecutionState( nil, commits, nil, headers, - collections, chunkDataPacks, results, myReceipts, @@ -179,33 +230,45 @@ func TestReExecuteBlockWithDifferentResult(t *testing.T) { serviceEvents, txResults, db, + getLatestFinalized, trace.NewNoopTracer(), + nil, + false, + lockManager, ) require.NotNil(t, es) executableBlock := unittest.ExecutableBlockFixtureWithParent( nil, - genesis, + genesis.ToHeader(), &unittest.GenesisStateCommitment) - header := executableBlock.Block.Header + blockID := executableBlock.Block.ID() - err = headers.Store(header) + err = unittest.WithLock(t, lockManager, storage.LockInsertBlock, func(lctx lockctx.Context) error { + return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return blocks.BatchStore(lctx, rw, unittest.ProposalFromBlock(executableBlock.Block)) + }) + }) require.NoError(t, err) computationResult := testutil.ComputationResultFixture(t) computationResult.ExecutableBlock = executableBlock - computationResult.ExecutionReceipt.ExecutionResult.BlockID = header.ID() + computationResult.ExecutionReceipt.ExecutionResult.BlockID = blockID // save execution results err = es.SaveExecutionResults(context.Background(), computationResult) require.NoError(t, err) - batch := bstorage.NewBatch(db) + batch := db.NewBatch() + defer batch.Close() + + chunkBatch := db.NewBatch() + defer chunkBatch.Close() // remove execution results err = removeForBlockID( batch, - headers, + chunkBatch, commits, txResults, results, @@ -213,20 +276,24 @@ func TestReExecuteBlockWithDifferentResult(t *testing.T) { myReceipts, events, serviceEvents, - header.ID(), + blockID, ) - err2 := batch.Flush() - require.NoError(t, err) + require.NoError(t, chunkBatch.Commit()) + err2 := batch.Commit() require.NoError(t, err2) - batch = bstorage.NewBatch(db) + batch = db.NewBatch() + defer batch.Close() + + chunkBatch = db.NewBatch() + defer chunkBatch.Close() // remove again to test for duplicates handling err = removeForBlockID( batch, - headers, + chunkBatch, commits, txResults, results, @@ -234,17 +301,18 @@ func TestReExecuteBlockWithDifferentResult(t *testing.T) { myReceipts, events, serviceEvents, - header.ID(), + blockID, ) - err2 = batch.Flush() - require.NoError(t, err) + require.NoError(t, chunkBatch.Commit()) + + err2 = batch.Commit() require.NoError(t, err2) computationResult2 := testutil.ComputationResultFixture(t) computationResult2.ExecutableBlock = executableBlock - computationResult2.ExecutionResult.BlockID = header.ID() + computationResult2.ExecutionReceipt.ExecutionResult.BlockID = blockID // re execute result err = es.SaveExecutionResults(context.Background(), computationResult2) diff --git a/cmd/util/cmd/rollback-executed-height/cmd/root.go b/cmd/util/cmd/rollback-executed-height/cmd/root.go index f2940816fdf..94ab104318a 100644 --- a/cmd/util/cmd/rollback-executed-height/cmd/root.go +++ b/cmd/util/cmd/rollback-executed-height/cmd/root.go @@ -6,17 +6,19 @@ import ( "github.com/spf13/cobra" "github.com/spf13/viper" + + "github.com/onflow/flow-go/cmd/util/cmd/common" ) var ( flagDatadir string ) -// run with `./rollback-executed-height --datadir /var/flow/data/protocol --height 100` +// run with `./rollback-executed-height --height 100` var rootCmd = &cobra.Command{ Use: "rollback-executed-height", Short: "rollback executed height", - Run: run, + RunE: runE, } func Execute() { @@ -27,7 +29,7 @@ func Execute() { } func init() { - rootCmd.PersistentFlags().StringVarP(&flagDatadir, "datadir", "d", "/var/flow/data/protocol", "directory to the badger dababase") + common.InitDataDirFlag(rootCmd, &flagDatadir) _ = rootCmd.MarkPersistentFlagRequired("datadir") cobra.OnInitialize(initConfig) diff --git a/cmd/util/cmd/root.go b/cmd/util/cmd/root.go index c2700d80bbd..2fc197470c8 100644 --- a/cmd/util/cmd/root.go +++ b/cmd/util/cmd/root.go @@ -3,32 +3,59 @@ package cmd import ( "fmt" "os" + "time" "github.com/rs/zerolog" "github.com/rs/zerolog/log" "github.com/spf13/cobra" "github.com/spf13/viper" + "github.com/onflow/flow-go/cmd/util/cmd/addresses" + "github.com/onflow/flow-go/cmd/util/cmd/atree_inlined_status" + bootstrap_execution_state_payloads "github.com/onflow/flow-go/cmd/util/cmd/bootstrap-execution-state-payloads" + check_storage "github.com/onflow/flow-go/cmd/util/cmd/check-storage" checkpoint_collect_stats "github.com/onflow/flow-go/cmd/util/cmd/checkpoint-collect-stats" checkpoint_list_tries "github.com/onflow/flow-go/cmd/util/cmd/checkpoint-list-tries" + checkpoint_trie_stats "github.com/onflow/flow-go/cmd/util/cmd/checkpoint-trie-stats" + db_migration "github.com/onflow/flow-go/cmd/util/cmd/db-migration" + debug_script "github.com/onflow/flow-go/cmd/util/cmd/debug-script" + debug_tx "github.com/onflow/flow-go/cmd/util/cmd/debug-tx" + diff_states "github.com/onflow/flow-go/cmd/util/cmd/diff-states" + "github.com/onflow/flow-go/cmd/util/cmd/diffkeys" epochs "github.com/onflow/flow-go/cmd/util/cmd/epochs/cmd" export "github.com/onflow/flow-go/cmd/util/cmd/exec-data-json-export" edbs "github.com/onflow/flow-go/cmd/util/cmd/execution-data-blobstore/cmd" extract "github.com/onflow/flow-go/cmd/util/cmd/execution-state-extract" + evm_state_exporter "github.com/onflow/flow-go/cmd/util/cmd/export-evm-state" ledger_json_exporter "github.com/onflow/flow-go/cmd/util/cmd/export-json-execution-state" export_json_transactions "github.com/onflow/flow-go/cmd/util/cmd/export-json-transactions" + extractpayloads "github.com/onflow/flow-go/cmd/util/cmd/extract-payloads-by-address" + find_inconsistent_result "github.com/onflow/flow-go/cmd/util/cmd/find-inconsistent-result" + find_trie_root "github.com/onflow/flow-go/cmd/util/cmd/find-trie-root" + generate_authorization_fixes "github.com/onflow/flow-go/cmd/util/cmd/generate-authorization-fixes" + "github.com/onflow/flow-go/cmd/util/cmd/leaders" + pebble_checkpoint "github.com/onflow/flow-go/cmd/util/cmd/pebble-checkpoint" read_badger "github.com/onflow/flow-go/cmd/util/cmd/read-badger/cmd" read_execution_state "github.com/onflow/flow-go/cmd/util/cmd/read-execution-state" read_hotstuff "github.com/onflow/flow-go/cmd/util/cmd/read-hotstuff/cmd" read_protocol_state "github.com/onflow/flow-go/cmd/util/cmd/read-protocol-state/cmd" index_er "github.com/onflow/flow-go/cmd/util/cmd/reindex/cmd" rollback_executed_height "github.com/onflow/flow-go/cmd/util/cmd/rollback-executed-height/cmd" + run_script "github.com/onflow/flow-go/cmd/util/cmd/run-script" "github.com/onflow/flow-go/cmd/util/cmd/snapshot" - truncate_database "github.com/onflow/flow-go/cmd/util/cmd/truncate-database" + system_addresses "github.com/onflow/flow-go/cmd/util/cmd/system-addresses" + verify_evm_offchain_replay "github.com/onflow/flow-go/cmd/util/cmd/verify-evm-offchain-replay" + verify_execution_result "github.com/onflow/flow-go/cmd/util/cmd/verify_execution_result" + "github.com/onflow/flow-go/cmd/util/cmd/version" + "github.com/onflow/flow-go/module/profiler" ) var ( - flagLogLevel string + flagLogLevel string + flagProfilerEnabled bool + flagProfilerDir string + flagProfilerInterval time.Duration + flagProfilerDuration time.Duration ) var rootCmd = &cobra.Command{ @@ -36,6 +63,10 @@ var rootCmd = &cobra.Command{ Short: "Utility functions for a flow network", PersistentPreRun: func(cmd *cobra.Command, args []string) { setLogLevel() + err := initProfiler() + if err != nil { + log.Fatal().Err(err).Msg("could not initialize profiler") + } }, } @@ -51,8 +82,16 @@ func Execute() { func init() { rootCmd.PersistentFlags().StringVarP(&flagLogLevel, "loglevel", "l", "info", "log level (panic, fatal, error, warn, info, debug)") + rootCmd.PersistentFlags().BoolVar(&flagProfilerEnabled, "profiler-enabled", false, "whether to enable the auto-profiler") - log.Logger = log.Output(zerolog.ConsoleWriter{Out: os.Stderr}) + rootCmd.PersistentFlags().StringVar(&flagProfilerDir, "profiler-dir", "profiler", "directory to create auto-profiler profiles") + rootCmd.PersistentFlags().DurationVar(&flagProfilerInterval, "profiler-interval", 1*time.Minute, "the interval between auto-profiler runs") + rootCmd.PersistentFlags().DurationVar(&flagProfilerDuration, "profiler-duration", 10*time.Second, "the duration to run the auto-profile for") + + log.Logger = log.Output(zerolog.ConsoleWriter{ + Out: os.Stderr, + TimeFormat: time.TimeOnly, + }) cobra.OnInitialize(initConfig) @@ -60,14 +99,16 @@ func init() { } func addCommands() { + rootCmd.AddCommand(version.Cmd) rootCmd.AddCommand(extract.Cmd) rootCmd.AddCommand(export.Cmd) rootCmd.AddCommand(checkpoint_list_tries.Cmd) + rootCmd.AddCommand(checkpoint_trie_stats.Cmd) rootCmd.AddCommand(checkpoint_collect_stats.Cmd) - rootCmd.AddCommand(truncate_database.Cmd) rootCmd.AddCommand(read_badger.RootCmd) rootCmd.AddCommand(read_protocol_state.RootCmd) rootCmd.AddCommand(ledger_json_exporter.Cmd) + rootCmd.AddCommand(leaders.Cmd) rootCmd.AddCommand(epochs.RootCmd) rootCmd.AddCommand(edbs.RootCmd) rootCmd.AddCommand(index_er.RootCmd) @@ -76,6 +117,25 @@ func addCommands() { rootCmd.AddCommand(snapshot.Cmd) rootCmd.AddCommand(export_json_transactions.Cmd) rootCmd.AddCommand(read_hotstuff.RootCmd) + rootCmd.AddCommand(addresses.Cmd) + rootCmd.AddCommand(bootstrap_execution_state_payloads.Cmd) + rootCmd.AddCommand(extractpayloads.Cmd) + rootCmd.AddCommand(find_inconsistent_result.Cmd) + rootCmd.AddCommand(diff_states.Cmd) + rootCmd.AddCommand(atree_inlined_status.Cmd) + rootCmd.AddCommand(find_trie_root.Cmd) + rootCmd.AddCommand(run_script.Cmd) + rootCmd.AddCommand(system_addresses.Cmd) + rootCmd.AddCommand(check_storage.Cmd) + rootCmd.AddCommand(debug_tx.Cmd) + rootCmd.AddCommand(debug_script.Cmd) + rootCmd.AddCommand(generate_authorization_fixes.Cmd) + rootCmd.AddCommand(evm_state_exporter.Cmd) + rootCmd.AddCommand(verify_execution_result.Cmd) + rootCmd.AddCommand(verify_evm_offchain_replay.Cmd) + rootCmd.AddCommand(pebble_checkpoint.Cmd) + rootCmd.AddCommand(db_migration.Cmd) + rootCmd.AddCommand(diffkeys.Cmd) } func initConfig() { @@ -101,3 +161,23 @@ func setLogLevel() { "\"error\", \"warn\", \"info\" or \"debug\"") } } + +func initProfiler() error { + uploader := &profiler.NoopUploader{} + profilerConfig := profiler.ProfilerConfig{ + Enabled: flagProfilerEnabled, + UploaderEnabled: false, + + Dir: flagProfilerDir, + Interval: flagProfilerInterval, + Duration: flagProfilerDuration, + } + + profiler, err := profiler.New(log.Logger, uploader, profilerConfig) + if err != nil { + return fmt.Errorf("could not initialize profiler: %w", err) + } + + <-profiler.Ready() + return nil +} diff --git a/cmd/util/cmd/run-script/cmd.go b/cmd/util/cmd/run-script/cmd.go new file mode 100644 index 00000000000..520440f1601 --- /dev/null +++ b/cmd/util/cmd/run-script/cmd.go @@ -0,0 +1,568 @@ +package run_script + +import ( + "context" + "errors" + "fmt" + "io" + "os" + + jsoncdc "github.com/onflow/cadence/encoding/json" + "github.com/onflow/flow/protobuf/go/flow/entities" + "github.com/rs/zerolog/log" + "github.com/spf13/cobra" + + "github.com/onflow/flow-go/access" + "github.com/onflow/flow-go/cmd/util/ledger/util" + "github.com/onflow/flow-go/cmd/util/ledger/util/registers" + "github.com/onflow/flow-go/engine/access/rest" + "github.com/onflow/flow-go/engine/access/rest/websockets" + "github.com/onflow/flow-go/engine/access/state_stream/backend" + "github.com/onflow/flow-go/engine/access/subscription" + "github.com/onflow/flow-go/engine/execution/computation" + "github.com/onflow/flow-go/fvm" + "github.com/onflow/flow-go/fvm/storage/snapshot" + "github.com/onflow/flow-go/ledger" + accessmodel "github.com/onflow/flow-go/model/access" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module/irrecoverable" + "github.com/onflow/flow-go/module/metrics" + modutil "github.com/onflow/flow-go/module/util" +) + +var ErrNotImplemented = errors.New("not implemented") + +var ( + flagPayloads string + flagState string + flagStateCommitment string + flagChain string + flagServe bool + flagPort int +) + +var Cmd = &cobra.Command{ + Use: "run-script", + Short: "run a script against the execution state", + Run: run, +} + +func init() { + + // Input 1 + + Cmd.Flags().StringVar( + &flagPayloads, + "payloads", + "", + "Input payload file name", + ) + + Cmd.Flags().StringVar( + &flagState, + "state", + "", + "Input state file name", + ) + Cmd.Flags().StringVar( + &flagStateCommitment, + "state-commitment", + "", + "Input state commitment", + ) + + Cmd.Flags().StringVar( + &flagChain, + "chain", + "", + "Chain name", + ) + _ = Cmd.MarkFlagRequired("chain") + + Cmd.Flags().BoolVar( + &flagServe, + "serve", + false, + "serve with an HTTP server", + ) + + Cmd.Flags().IntVar( + &flagPort, + "port", + 8000, + "port for HTTP server", + ) +} + +func run(*cobra.Command, []string) { + + if flagPayloads == "" && flagState == "" { + log.Fatal().Msg("Either --payloads or --state must be provided") + } else if flagPayloads != "" && flagState != "" { + log.Fatal().Msg("Only one of --payloads or --state must be provided") + } + if flagState != "" && flagStateCommitment == "" { + log.Fatal().Msg("--state-commitment must be provided when --state is provided") + } + + chainID := flow.ChainID(flagChain) + // Validate chain ID + chain := chainID.Chain() + + log.Info().Msg("loading state ...") + + var ( + err error + payloads []*ledger.Payload + ) + if flagPayloads != "" { + _, payloads, err = util.ReadPayloadFile(log.Logger, flagPayloads) + } else { + log.Info().Msg("reading trie") + + stateCommitment := util.ParseStateCommitment(flagStateCommitment) + payloads, err = util.ReadTrieForPayloads(flagState, stateCommitment) + } + if err != nil { + log.Fatal().Err(err).Msg("failed to read payloads") + } + + log.Info().Msgf("creating registers from payloads (%d)", len(payloads)) + + registersByAccount, err := registers.NewByAccountFromPayloads(payloads) + if err != nil { + log.Fatal().Err(err) + } + log.Info().Msgf( + "created %d registers from payloads (%d accounts)", + registersByAccount.Count(), + registersByAccount.AccountCount(), + ) + + options := computation.DefaultFVMOptions(chainID, false, false) + options = append( + options, + fvm.WithContractDeploymentRestricted(false), + fvm.WithContractRemovalRestricted(false), + fvm.WithAuthorizationChecksEnabled(false), + fvm.WithSequenceNumberCheckAndIncrementEnabled(false), + fvm.WithTransactionFeesEnabled(false), + ) + ctx := fvm.NewContext(options...) + + storageSnapshot := registers.StorageSnapshot{ + Registers: registersByAccount, + } + + vm := fvm.NewVirtualMachine() + + if flagServe { + api := &api{ + chainID: chainID, + vm: vm, + ctx: ctx, + storageSnapshot: storageSnapshot, + } + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + irrCtx, errCh := irrecoverable.WithSignaler(ctx) + go func() { + err := modutil.WaitError(errCh, ctx.Done()) + if err != nil { + log.Fatal().Err(err).Msg("server finished with error") + } + }() + + server, err := rest.NewServer( + irrCtx, + api, + rest.Config{ + ListenAddress: fmt.Sprintf(":%d", flagPort), + }, + log.Logger, + chain, + metrics.NewNoopCollector(), + nil, + backend.Config{}, + false, + websockets.NewDefaultWebsocketConfig(), + ) + if err != nil { + log.Fatal().Err(err).Msg("failed to create server") + } + + log.Info().Msgf("serving on port %d", flagPort) + + err = server.ListenAndServe() + if err != nil { + log.Info().Msg("server stopped") + } + } else { + code, err := io.ReadAll(os.Stdin) + if err != nil { + log.Fatal().Msgf("failed to read script: %s", err) + } + + encodedResult, err := runScript(vm, ctx, storageSnapshot, code, nil) + if err != nil { + log.Fatal().Err(err).Msg("failed to run script") + } + + _, _ = os.Stdout.Write(encodedResult) + } +} + +func runScript( + vm *fvm.VirtualMachine, + ctx fvm.Context, + storageSnapshot snapshot.StorageSnapshot, + code []byte, + arguments [][]byte, +) ( + encodedResult []byte, + err error, +) { + _, res, err := vm.Run( + ctx, + fvm.Script(code).WithArguments(arguments...), + storageSnapshot, + ) + if err != nil { + return nil, err + } + + if res.Err != nil { + return nil, res.Err + } + + encoded, err := jsoncdc.Encode(res.Value) + if err != nil { + return nil, err + } + + return encoded, nil +} + +type api struct { + chainID flow.ChainID + vm *fvm.VirtualMachine + ctx fvm.Context + storageSnapshot registers.StorageSnapshot +} + +var _ access.API = &api{} + +func (*api) Ping(_ context.Context) error { + return nil +} + +func (a *api) GetNetworkParameters(_ context.Context) accessmodel.NetworkParameters { + return accessmodel.NetworkParameters{ + ChainID: a.chainID, + } +} + +func (*api) GetNodeVersionInfo(_ context.Context) (*accessmodel.NodeVersionInfo, error) { + return nil, errors.New("unimplemented") +} + +func (*api) GetLatestBlockHeader(_ context.Context, _ bool) (*flow.Header, flow.BlockStatus, error) { + return nil, flow.BlockStatusUnknown, errors.New("unimplemented") +} + +func (*api) GetBlockHeaderByHeight(_ context.Context, _ uint64) (*flow.Header, flow.BlockStatus, error) { + return nil, flow.BlockStatusUnknown, errors.New("unimplemented") +} + +func (*api) GetBlockHeaderByID(_ context.Context, _ flow.Identifier) (*flow.Header, flow.BlockStatus, error) { + return nil, flow.BlockStatusUnknown, errors.New("unimplemented") +} + +func (*api) GetLatestBlock(_ context.Context, _ bool) (*flow.Block, flow.BlockStatus, error) { + return nil, flow.BlockStatusUnknown, errors.New("unimplemented") +} + +func (*api) GetBlockByHeight(_ context.Context, _ uint64) (*flow.Block, flow.BlockStatus, error) { + return nil, flow.BlockStatusUnknown, errors.New("unimplemented") +} + +func (*api) GetBlockByID(_ context.Context, _ flow.Identifier) (*flow.Block, flow.BlockStatus, error) { + return nil, flow.BlockStatusUnknown, errors.New("unimplemented") +} + +func (*api) GetCollectionByID(_ context.Context, _ flow.Identifier) (*flow.LightCollection, error) { + return nil, errors.New("unimplemented") +} + +func (*api) GetFullCollectionByID(_ context.Context, _ flow.Identifier) (*flow.Collection, error) { + return nil, errors.New("unimplemented") +} + +func (*api) SendTransaction(_ context.Context, _ *flow.TransactionBody) error { + return errors.New("unimplemented") +} + +func (*api) GetTransaction(_ context.Context, _ flow.Identifier) (*flow.TransactionBody, error) { + return nil, errors.New("unimplemented") +} + +func (*api) GetTransactionsByBlockID(_ context.Context, _ flow.Identifier) ([]*flow.TransactionBody, error) { + return nil, errors.New("unimplemented") +} + +func (*api) GetTransactionResult( + _ context.Context, + _ flow.Identifier, + _ flow.Identifier, + _ flow.Identifier, + _ entities.EventEncodingVersion, +) (*accessmodel.TransactionResult, error) { + return nil, errors.New("unimplemented") +} + +func (*api) GetTransactionResultByIndex( + _ context.Context, + _ flow.Identifier, + _ uint32, + _ entities.EventEncodingVersion, +) (*accessmodel.TransactionResult, error) { + return nil, errors.New("unimplemented") +} + +func (*api) GetTransactionResultsByBlockID( + _ context.Context, + _ flow.Identifier, + _ entities.EventEncodingVersion, +) ([]*accessmodel.TransactionResult, error) { + return nil, errors.New("unimplemented") +} + +func (*api) GetSystemTransaction( + _ context.Context, + _ flow.Identifier, + _ flow.Identifier, +) (*flow.TransactionBody, error) { + return nil, errors.New("unimplemented") +} + +func (*api) GetSystemTransactionResult( + _ context.Context, + _ flow.Identifier, + _ flow.Identifier, + _ entities.EventEncodingVersion, +) (*accessmodel.TransactionResult, error) { + return nil, errors.New("unimplemented") +} + +func (*api) GetAccount(_ context.Context, _ flow.Address) (*flow.Account, error) { + return nil, errors.New("unimplemented") +} + +func (*api) GetAccountAtLatestBlock(_ context.Context, _ flow.Address) (*flow.Account, error) { + return nil, errors.New("unimplemented") +} + +func (*api) GetAccountAtBlockHeight(_ context.Context, _ flow.Address, _ uint64) (*flow.Account, error) { + return nil, errors.New("unimplemented") +} + +func (*api) GetAccountBalanceAtLatestBlock(_ context.Context, _ flow.Address) (uint64, error) { + return 0, errors.New("unimplemented") +} + +func (*api) GetAccountBalanceAtBlockHeight( + _ context.Context, + _ flow.Address, + _ uint64, +) (uint64, error) { + return 0, errors.New("unimplemented") +} + +func (*api) GetAccountKeyAtLatestBlock( + _ context.Context, + _ flow.Address, + _ uint32, +) (*flow.AccountPublicKey, error) { + return nil, errors.New("unimplemented") +} + +func (*api) GetAccountKeyAtBlockHeight( + _ context.Context, + _ flow.Address, + _ uint32, + _ uint64, +) (*flow.AccountPublicKey, error) { + return nil, errors.New("unimplemented") +} + +func (*api) GetAccountKeysAtLatestBlock( + _ context.Context, + _ flow.Address, +) ([]flow.AccountPublicKey, error) { + return nil, errors.New("unimplemented") +} + +func (*api) GetAccountKeysAtBlockHeight( + _ context.Context, + _ flow.Address, + _ uint64, +) ([]flow.AccountPublicKey, error) { + return nil, errors.New("unimplemented") +} + +func (a *api) ExecuteScriptAtLatestBlock( + _ context.Context, + script []byte, + arguments [][]byte, +) ([]byte, error) { + return runScript( + a.vm, + a.ctx, + a.storageSnapshot, + script, + arguments, + ) +} + +func (*api) ExecuteScriptAtBlockHeight( + _ context.Context, + _ uint64, + _ []byte, + _ [][]byte, +) ([]byte, error) { + return nil, errors.New("unimplemented") +} + +func (*api) ExecuteScriptAtBlockID( + _ context.Context, + _ flow.Identifier, + _ []byte, + _ [][]byte, +) ([]byte, error) { + return nil, errors.New("unimplemented") +} + +func (a *api) GetEventsForHeightRange( + _ context.Context, + _ string, + _, _ uint64, + _ entities.EventEncodingVersion, +) ([]flow.BlockEvents, error) { + return nil, errors.New("unimplemented") +} + +func (a *api) GetEventsForBlockIDs( + _ context.Context, + _ string, + _ []flow.Identifier, + _ entities.EventEncodingVersion, +) ([]flow.BlockEvents, error) { + return nil, errors.New("unimplemented") +} + +func (*api) GetLatestProtocolStateSnapshot(_ context.Context) ([]byte, error) { + return nil, errors.New("unimplemented") +} + +func (*api) GetProtocolStateSnapshotByBlockID(_ context.Context, _ flow.Identifier) ([]byte, error) { + return nil, errors.New("unimplemented") +} + +func (*api) GetProtocolStateSnapshotByHeight(_ context.Context, _ uint64) ([]byte, error) { + return nil, errors.New("unimplemented") +} + +func (*api) GetExecutionResultForBlockID(_ context.Context, _ flow.Identifier) (*flow.ExecutionResult, error) { + return nil, errors.New("unimplemented") +} + +func (*api) GetExecutionResultByID(_ context.Context, _ flow.Identifier) (*flow.ExecutionResult, error) { + return nil, errors.New("unimplemented") +} + +func (*api) SubscribeBlocksFromStartBlockID( + _ context.Context, + _ flow.Identifier, + _ flow.BlockStatus, +) subscription.Subscription { + return nil +} + +func (*api) SubscribeBlocksFromStartHeight( + _ context.Context, + _ uint64, + _ flow.BlockStatus, +) subscription.Subscription { + return nil +} + +func (*api) SubscribeBlocksFromLatest( + _ context.Context, + _ flow.BlockStatus, +) subscription.Subscription { + return nil +} + +func (*api) SubscribeBlockHeadersFromStartBlockID( + _ context.Context, + _ flow.Identifier, + _ flow.BlockStatus, +) subscription.Subscription { + return nil +} + +func (*api) SubscribeBlockHeadersFromStartHeight( + _ context.Context, + _ uint64, + _ flow.BlockStatus, +) subscription.Subscription { + return nil +} + +func (*api) SubscribeBlockHeadersFromLatest( + _ context.Context, + _ flow.BlockStatus, +) subscription.Subscription { + return nil +} + +func (*api) SubscribeBlockDigestsFromStartBlockID( + _ context.Context, + _ flow.Identifier, + _ flow.BlockStatus, +) subscription.Subscription { + return nil +} + +func (*api) SubscribeBlockDigestsFromStartHeight( + _ context.Context, + _ uint64, + _ flow.BlockStatus, +) subscription.Subscription { + return nil +} + +func (*api) SubscribeBlockDigestsFromLatest( + _ context.Context, + _ flow.BlockStatus, +) subscription.Subscription { + return nil +} + +func (a *api) SubscribeTransactionStatuses( + _ context.Context, + _ flow.Identifier, + _ entities.EventEncodingVersion, +) subscription.Subscription { + return subscription.NewFailedSubscription(ErrNotImplemented, "failed to call SubscribeTransactionStatuses") +} + +func (a *api) SendAndSubscribeTransactionStatuses( + _ context.Context, + _ *flow.TransactionBody, + _ entities.EventEncodingVersion, +) subscription.Subscription { + return subscription.NewFailedSubscription(ErrNotImplemented, "failed to call SendAndSubscribeTransactionStatuses") +} diff --git a/cmd/util/cmd/snapshot/cmd.go b/cmd/util/cmd/snapshot/cmd.go index e45102ad09d..b384c81220f 100644 --- a/cmd/util/cmd/snapshot/cmd.go +++ b/cmd/util/cmd/snapshot/cmd.go @@ -1,6 +1,7 @@ package snapshot import ( + "fmt" "os" "path/filepath" @@ -10,6 +11,7 @@ import ( "github.com/onflow/flow-go/cmd/util/cmd/common" "github.com/onflow/flow-go/engine/common/rpc/convert" + "github.com/onflow/flow-go/storage" ) var ( @@ -29,45 +31,45 @@ var ( var Cmd = &cobra.Command{ Use: "snapshot", Short: "Retrieves a protocol state snapshot from the database, which can be used to instantiate another node", - Run: run, + RunE: runE, } func init() { - Cmd.Flags().StringVar(&flagDatadir, "datadir", "", - "directory that stores the protocol state") + common.InitDataDirFlag(Cmd, &flagDatadir) _ = Cmd.MarkFlagRequired("datadir") Cmd.Flags().Uint64Var(&flagHeight, "height", 0, "the height of the snapshot to retrieve") _ = Cmd.MarkFlagRequired("height") } -func run(*cobra.Command, []string) { +func runE(*cobra.Command, []string) error { + lockManager := storage.MakeSingletonLockManager() - db := common.InitStorage(flagDatadir) - defer db.Close() + return common.WithStorage(flagDatadir, func(db storage.DB) error { + storages := common.InitStorages(db) + state, err := common.OpenProtocolState(lockManager, db, storages) + if err != nil { + return fmt.Errorf("could not open protocol state: %w", err) + } - storages := common.InitStorages(db) - state, err := common.InitProtocolState(db, storages) - if err != nil { - log.Fatal().Err(err).Msg("could not init protocol state") - } + log := log.With().Uint64("block_height", flagHeight).Logger() - log := log.With().Uint64("block_height", flagHeight).Logger() + snap := state.AtHeight(flagHeight) + encoded, err := convert.SnapshotToBytes(snap) + if err != nil { + return fmt.Errorf("failed to encode snapshot: %w", err) + } - snap := state.AtHeight(flagHeight) - encoded, err := convert.SnapshotToBytes(snap) - if err != nil { - log.Fatal().Err(err).Msg("failed to encode snapshot") - } + dir := filepath.Join(".", "root-protocol-state-snapshot.json") - dir := filepath.Join(".", "root-protocol-state-snapshot.json") + log.Info().Msgf("going to write snapshot to %s", dir) + err = os.WriteFile(dir, encoded, 0600) + if err != nil { + return fmt.Errorf("failed to write snapshot: %w", err) + } - log.Info().Msgf("going to write snapshot to %s", dir) - err = os.WriteFile(dir, encoded, 0600) - if err != nil { - log.Fatal().Err(err).Msg("failed to write snapshot") - } - - log.Info().Msgf("successfully wrote snapshot to %s", dir) + log.Info().Msgf("successfully wrote snapshot to %s", dir) + return nil + }) } diff --git a/cmd/util/cmd/system-addresses/cmd.go b/cmd/util/cmd/system-addresses/cmd.go new file mode 100644 index 00000000000..fc8c83ffc87 --- /dev/null +++ b/cmd/util/cmd/system-addresses/cmd.go @@ -0,0 +1,62 @@ +package addresses + +import ( + "bytes" + "sort" + + "github.com/spf13/cobra" + + "github.com/onflow/flow-go/fvm/systemcontracts" + "github.com/onflow/flow-go/model/flow" +) + +var ( + flagChain string + flagSeparator string +) + +var Cmd = &cobra.Command{ + Use: "system-addresses", + Short: "print addresses of system contracts", + Run: run, +} + +func init() { + Cmd.Flags().StringVar(&flagChain, "chain", "", "Chain name") + _ = Cmd.MarkFlagRequired("chain") + + Cmd.Flags().StringVar(&flagSeparator, "separator", ",", "Separator to use between addresses") +} + +func run(*cobra.Command, []string) { + chainID := flow.ChainID(flagChain) + // validate + _ = chainID.Chain() + + systemContracts := systemcontracts.SystemContractsForChain(chainID) + + addressSet := map[flow.Address]struct{}{} + for _, contract := range systemContracts.All() { + addressSet[contract.Address] = struct{}{} + } + + addresses := make([]flow.Address, 0, len(addressSet)) + for address := range addressSet { + addresses = append(addresses, address) + } + + sort.Slice(addresses, func(i, j int) bool { + a := addresses[i] + b := addresses[j] + return bytes.Compare(a[:], b[:]) < 0 + }) + + for i, address := range addresses { + str := address.Hex() + + if i > 0 { + print(flagSeparator) + } + print(str) + } +} diff --git a/cmd/util/cmd/truncate-database/cmd.go b/cmd/util/cmd/truncate-database/cmd.go deleted file mode 100644 index d18cbe610c4..00000000000 --- a/cmd/util/cmd/truncate-database/cmd.go +++ /dev/null @@ -1,37 +0,0 @@ -package truncate_database - -import ( - "github.com/rs/zerolog/log" - - "github.com/spf13/cobra" - - "github.com/onflow/flow-go/cmd/util/cmd/common" -) - -var ( - flagDatadir string -) - -var Cmd = &cobra.Command{ - Use: "truncate-database", - Short: "Truncates protocol state database (Possible data loss!)", - Run: run, -} - -func init() { - - Cmd.Flags().StringVar(&flagDatadir, "datadir", "", - "directory that stores the protocol state") - _ = Cmd.MarkFlagRequired("datadir") - -} - -func run(*cobra.Command, []string) { - - log.Info().Msg("Opening database with truncate") - - db := common.InitStorageWithTruncate(flagDatadir, true) - defer db.Close() - - log.Info().Msg("Truncated") -} diff --git a/cmd/util/cmd/verify-evm-offchain-replay/main.go b/cmd/util/cmd/verify-evm-offchain-replay/main.go new file mode 100644 index 00000000000..c0c82c1a8c6 --- /dev/null +++ b/cmd/util/cmd/verify-evm-offchain-replay/main.go @@ -0,0 +1,88 @@ +package verify + +import ( + "fmt" + "strconv" + "strings" + + "github.com/rs/zerolog/log" + "github.com/spf13/cobra" + + "github.com/onflow/flow-go/cmd/util/cmd/common" + "github.com/onflow/flow-go/model/flow" +) + +var ( + flagDatadir string + flagExecutionDataDir string + flagEVMStateGobDir string + flagChain string + flagFromTo string + flagSaveEveryNBlocks uint64 +) + +// usage example +// +// ./util verify-evm-offchain-replay --chain flow-testnet --from_to 211176670-211177000 +// --execution_data_dir /var/flow/data/execution_data +var Cmd = &cobra.Command{ + Use: "verify-evm-offchain-replay", + Short: "verify evm offchain replay with execution data", + Run: run, +} + +func init() { + common.InitDataDirFlag(Cmd, &flagDatadir) + + Cmd.Flags().StringVar(&flagChain, "chain", "", "Chain name") + _ = Cmd.MarkFlagRequired("chain") + + Cmd.Flags().StringVar(&flagExecutionDataDir, "execution_data_dir", "/var/flow/data/execution_data", + "directory that stores the execution state") + + Cmd.Flags().StringVar(&flagFromTo, "from_to", "", + "the flow height range to verify blocks, i.e, 1-1000, 1000-2000, 2000-3000, etc.") + + Cmd.Flags().StringVar(&flagEVMStateGobDir, "evm_state_gob_dir", "/var/flow/data/evm_state_gob", + "directory that stores the evm state gob files as checkpoint") + + Cmd.Flags().Uint64Var(&flagSaveEveryNBlocks, "save_every", uint64(1_000_000), + "save the evm state gob files every N blocks") +} + +func run(*cobra.Command, []string) { + chainID := flow.ChainID(flagChain) + + from, to, err := parseFromTo(flagFromTo) + if err != nil { + log.Fatal().Err(err).Msg("could not parse from_to") + } + + err = Verify(log.Logger, from, to, chainID, flagDatadir, flagExecutionDataDir, flagEVMStateGobDir, flagSaveEveryNBlocks) + if err != nil { + log.Fatal().Err(err).Msg("could not verify height") + } +} + +func parseFromTo(fromTo string) (from, to uint64, err error) { + parts := strings.Split(fromTo, "-") + if len(parts) != 2 { + return 0, 0, fmt.Errorf("invalid format: expected 'from-to', got '%s'", fromTo) + } + + from, err = strconv.ParseUint(strings.TrimSpace(parts[0]), 10, 64) + if err != nil { + return 0, 0, fmt.Errorf("invalid 'from' value: %w", err) + } + + to, err = strconv.ParseUint(strings.TrimSpace(parts[1]), 10, 64) + if err != nil { + return 0, 0, fmt.Errorf("invalid 'to' value: %w", err) + } + + if from > to { + return 0, 0, fmt.Errorf("'from' value (%d) must be less than or equal to 'to' value (%d)", from, to) + } + + return from, to, nil +} diff --git a/cmd/util/cmd/verify-evm-offchain-replay/verify.go b/cmd/util/cmd/verify-evm-offchain-replay/verify.go new file mode 100644 index 00000000000..96eeeb3c9a3 --- /dev/null +++ b/cmd/util/cmd/verify-evm-offchain-replay/verify.go @@ -0,0 +1,169 @@ +package verify + +import ( + "fmt" + "io" + "os" + "path/filepath" + + pebbleds "github.com/ipfs/go-ds-pebble" + "github.com/rs/zerolog" + "github.com/rs/zerolog/log" + + "github.com/onflow/flow-go/cmd/util/cmd/common" + "github.com/onflow/flow-go/fvm/evm/offchain/utils" + "github.com/onflow/flow-go/fvm/evm/testutils" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module/blobs" + "github.com/onflow/flow-go/module/executiondatasync/execution_data" + "github.com/onflow/flow-go/storage" + "github.com/onflow/flow-go/storage/store" +) + +// Verify verifies the offchain replay of EVM blocks from the given height range +// and updates the EVM state gob files with the latest state +func Verify( + log zerolog.Logger, + from uint64, + to uint64, + chainID flow.ChainID, + dataDir string, + executionDataDir string, + evmStateGobDir string, + saveEveryNBlocks uint64, +) error { + lg := log.With(). + Uint64("from", from).Uint64("to", to). + Str("chain", chainID.String()). + Str("dataDir", dataDir). + Str("executionDataDir", executionDataDir). + Str("evmStateGobDir", evmStateGobDir). + Uint64("saveEveryNBlocks", saveEveryNBlocks). + Logger() + + lg.Info().Msgf("verifying range from %d to %d", from, to) + + return common.WithStorage(dataDir, func(db storage.DB) error { + storages, executionDataStore, dsStore, err := initStorages(db, executionDataDir) + if err != nil { + return fmt.Errorf("could not initialize storages: %w", err) + } + defer dsStore.Close() + + var store *testutils.TestValueStore + + // root block require the account status registers to be saved + isRoot := utils.IsEVMRootHeight(chainID, from) + if isRoot { + store = testutils.GetSimpleValueStore() + } else { + prev := from - 1 + store, err = loadState(prev, evmStateGobDir) + if err != nil { + return fmt.Errorf("could not load EVM state from previous height %d: %w", prev, err) + } + } + + // save state every N blocks + onHeightReplayed := func(height uint64) error { + log.Info().Msgf("replayed height %d", height) + if height%saveEveryNBlocks == 0 { + err := saveState(store, height, evmStateGobDir) + if err != nil { + return err + } + } + return nil + } + + // replay blocks + err = utils.OffchainReplayBackwardCompatibilityTest( + log, + chainID, + from, + to, + storages.Headers, + storages.Results, + executionDataStore, + store, + onHeightReplayed, + ) + + if err != nil { + return err + } + + err = saveState(store, to, evmStateGobDir) + if err != nil { + return err + } + + lg.Info().Msgf("successfully verified range from %d to %d", from, to) + + return nil + }) +} + +func saveState(store *testutils.TestValueStore, height uint64, gobDir string) error { + valueFileName, allocatorFileName := evmStateGobFileNamesByEndHeight(gobDir, height) + values, allocators := store.Dump() + err := testutils.SerializeState(valueFileName, values) + if err != nil { + return err + } + err = testutils.SerializeAllocator(allocatorFileName, allocators) + if err != nil { + return err + } + + log.Info().Msgf("saved EVM state to %s and %s", valueFileName, allocatorFileName) + + return nil +} + +func loadState(height uint64, gobDir string) (*testutils.TestValueStore, error) { + valueFileName, allocatorFileName := evmStateGobFileNamesByEndHeight(gobDir, height) + values, err := testutils.DeserializeState(valueFileName) + if err != nil { + return nil, fmt.Errorf("could not deserialize state %v: %w", valueFileName, err) + } + + allocators, err := testutils.DeserializeAllocator(allocatorFileName) + if err != nil { + return nil, fmt.Errorf("could not deserialize allocator %v: %w", allocatorFileName, err) + } + store := testutils.GetSimpleValueStorePopulated(values, allocators) + + log.Info().Msgf("loaded EVM state for height %d from gob file %v", height, valueFileName) + return store, nil +} + +func initStorages(db storage.DB, executionDataDir string) ( + *store.All, + execution_data.ExecutionDataGetter, + io.Closer, + error, +) { + storages := common.InitStorages(db) + + datastoreDir := filepath.Join(executionDataDir, "blobstore") + err := os.MkdirAll(datastoreDir, 0700) + if err != nil { + return nil, nil, nil, err + } + ds, err := pebbleds.NewDatastore(datastoreDir, nil) + if err != nil { + return nil, nil, nil, err + } + + executionDataBlobstore := blobs.NewBlobstore(ds) + executionDataStore := execution_data.NewExecutionDataStore(executionDataBlobstore, execution_data.DefaultSerializer) + + return storages, executionDataStore, ds, nil +} + +func evmStateGobFileNamesByEndHeight(evmStateGobDir string, endHeight uint64) (string, string) { + valueFileName := filepath.Join(evmStateGobDir, fmt.Sprintf("values-%d.gob", endHeight)) + allocatorFileName := filepath.Join(evmStateGobDir, fmt.Sprintf("allocators-%d.gob", endHeight)) + return valueFileName, allocatorFileName +} diff --git a/cmd/util/cmd/verify_execution_result/cmd.go b/cmd/util/cmd/verify_execution_result/cmd.go new file mode 100644 index 00000000000..5d1c39009ca --- /dev/null +++ b/cmd/util/cmd/verify_execution_result/cmd.go @@ -0,0 +1,135 @@ +package verify + +import ( + "fmt" + "strconv" + "strings" + + "github.com/rs/zerolog/log" + "github.com/spf13/cobra" + + "github.com/onflow/flow-go/cmd/util/cmd/common" + "github.com/onflow/flow-go/engine/verification/verifier" + "github.com/onflow/flow-go/fvm" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/storage" +) + +var ( + flagLastK uint64 + flagDatadir string + flagChunkDataPackDir string + flagChain string + flagFromTo string + flagWorkerCount uint // number of workers to verify the blocks concurrently + flagStopOnMismatch bool + flagtransactionFeesDisabled bool + flagScheduledCallbacksEnabled bool +) + +// # verify the last 100 sealed blocks +// ./util verify_execution_result --chain flow-testnet --datadir /var/flow/data/protocol --chunk_data_pack_dir /var/flow/data/chunk_data_pack --lastk 100 +// # verify the blocks from height 2000 to 3000 +// ./util verify_execution_result --chain flow-testnet --datadir /var/flow/data/protocol --chunk_data_pack_dir /var/flow/data/chunk_data_pack --from_to 2000_3000 +var Cmd = &cobra.Command{ + Use: "verify-execution-result", + Short: "verify block execution by verifying all chunks in the result", + Run: run, +} + +func init() { + Cmd.Flags().StringVar(&flagChain, "chain", "", "Chain name") + _ = Cmd.MarkFlagRequired("chain") + + common.InitDataDirFlag(Cmd, &flagDatadir) + + Cmd.Flags().StringVar(&flagChunkDataPackDir, "chunk_data_pack_dir", "/var/flow/data/chunk_data_pack", + "directory that stores the protocol state") + _ = Cmd.MarkFlagRequired("chunk_data_pack_dir") + + Cmd.Flags().Uint64Var(&flagLastK, "lastk", 1, + "last k sealed blocks to verify") + + Cmd.Flags().StringVar(&flagFromTo, "from_to", "", + "the height range to verify blocks (inclusive), i.e, 1_1000, 1000_2000, 2000_3000, etc.") + + Cmd.Flags().UintVar(&flagWorkerCount, "worker_count", 1, + "number of workers to use for verification, default is 1") + + Cmd.Flags().BoolVar(&flagStopOnMismatch, "stop_on_mismatch", false, "stop verification on first mismatch") + + Cmd.Flags().BoolVar(&flagtransactionFeesDisabled, "fees_disabled", false, "disable transaction fees") + + Cmd.Flags().BoolVar(&flagScheduledCallbacksEnabled, "scheduled_callbacks_enabled", fvm.DefaultScheduledCallbacksEnabled, "enable scheduled callbacks") +} + +func run(*cobra.Command, []string) { + lockManager := storage.MakeSingletonLockManager() + chainID := flow.ChainID(flagChain) + _ = chainID.Chain() + + if flagWorkerCount < 1 { + log.Fatal().Msgf("worker count must be at least 1, but got %v", flagWorkerCount) + } + + lg := log.With(). + Str("chain", string(chainID)). + Str("datadir", flagDatadir). + Str("chunk_data_pack_dir", flagChunkDataPackDir). + Uint64("lastk", flagLastK). + Str("from_to", flagFromTo). + Uint("worker_count", flagWorkerCount). + Bool("stop_on_mismatch", flagStopOnMismatch). + Logger() + + // Log configuration before starting verification so users can cancel and restart with different values if needed + if !flagStopOnMismatch { + lg.Info().Msgf("note flag --stop_on_mismatch is false, so mismatches (if any) are logged but do not stop the verification") + lg.Info().Msgf("look for 'could not verify' in the log for any mismatch, or try again with --stop_on_mismatch true to stop on first mismatch") + } + + if flagFromTo != "" { + from, to, err := parseFromTo(flagFromTo) + if err != nil { + lg.Fatal().Err(err).Msg("could not parse from_to") + } + + lg.Info().Msgf("verifying range from %d to %d", from, to) + err = verifier.VerifyRange(lockManager, from, to, chainID, flagDatadir, flagChunkDataPackDir, flagWorkerCount, flagStopOnMismatch, flagtransactionFeesDisabled, flagScheduledCallbacksEnabled) + if err != nil { + lg.Fatal().Err(err).Msgf("could not verify range from %d to %d", from, to) + } + lg.Info().Msgf("finished verified range from %d to %d", from, to) + } else { + lg.Info().Msgf("verifying last %d sealed blocks", flagLastK) + err := verifier.VerifyLastKHeight(lockManager, flagLastK, chainID, flagDatadir, flagChunkDataPackDir, flagWorkerCount, flagStopOnMismatch, flagtransactionFeesDisabled, flagScheduledCallbacksEnabled) + if err != nil { + lg.Fatal().Err(err).Msg("could not verify last k height") + } + + lg.Info().Msgf("finished verified last %d sealed blocks", flagLastK) + } +} + +func parseFromTo(fromTo string) (from, to uint64, err error) { + parts := strings.Split(fromTo, "_") + if len(parts) != 2 { + return 0, 0, fmt.Errorf("invalid format: expected 'from_to', got '%s'", fromTo) + } + + from, err = strconv.ParseUint(strings.TrimSpace(parts[0]), 10, 64) + if err != nil { + return 0, 0, fmt.Errorf("invalid 'from' value: %w", err) + } + + to, err = strconv.ParseUint(strings.TrimSpace(parts[1]), 10, 64) + if err != nil { + return 0, 0, fmt.Errorf("invalid 'to' value: %w", err) + } + + if from > to { + return 0, 0, fmt.Errorf("'from' value (%d) must be less than or equal to 'to' value (%d)", from, to) + } + + return from, to, nil +} diff --git a/cmd/util/cmd/version/cmd.go b/cmd/util/cmd/version/cmd.go new file mode 100644 index 00000000000..f68e273ae91 --- /dev/null +++ b/cmd/util/cmd/version/cmd.go @@ -0,0 +1,23 @@ +package version + +import ( + "github.com/rs/zerolog/log" + "github.com/spf13/cobra" + + "github.com/onflow/flow-go/cmd/build" +) + +var Cmd = &cobra.Command{ + Use: "version", + Short: "Prints the version of the utils tool", + Run: run, +} + +func run(*cobra.Command, []string) { + version, err := build.Semver() + if err != nil { + log.Fatal().Err(err).Msg("could not get version") + } + + log.Info().Msgf("utils version: %s", version.String()) +} diff --git a/cmd/util/common/address.go b/cmd/util/common/address.go new file mode 100644 index 00000000000..60060b1072d --- /dev/null +++ b/cmd/util/common/address.go @@ -0,0 +1,60 @@ +package common + +import ( + "encoding/hex" + "fmt" + "strings" + + "github.com/onflow/flow-go/model/flow" +) + +func ParseOwners(hexAddresses []string) (map[string]struct{}, error) { + if len(hexAddresses) == 0 { + return nil, fmt.Errorf("at least one address must be provided") + } + + addresses := make(map[string]struct{}, len(hexAddresses)) + for _, hexAddr := range hexAddresses { + hexAddr = strings.TrimSpace(hexAddr) + + if len(hexAddr) > 0 { + addr, err := ParseAddress(hexAddr) + if err != nil { + return nil, err + } + + addresses[string(addr[:])] = struct{}{} + } else { + // global registers has empty address + addresses[""] = struct{}{} + } + } + + return addresses, nil +} + +func ParseAddress(hexAddr string) (flow.Address, error) { + b, err := hex.DecodeString(hexAddr) + if err != nil { + return flow.Address{}, fmt.Errorf( + "address is not hex encoded %s: %w", + strings.TrimSpace(hexAddr), + err, + ) + } + + return flow.BytesToAddress(b), nil +} + +func OwnersToString(owners map[string]struct{}) string { + var sb strings.Builder + index := 0 + for owner := range owners { + if index > 0 { + sb.WriteRune(',') + } + _, _ = fmt.Fprintf(&sb, "%x", owner) + index++ + } + return sb.String() +} diff --git a/cmd/util/common/checkpoint.go b/cmd/util/common/checkpoint.go new file mode 100644 index 00000000000..a590081daed --- /dev/null +++ b/cmd/util/common/checkpoint.go @@ -0,0 +1,196 @@ +package common + +import ( + "fmt" + "path/filepath" + + "github.com/rs/zerolog" + "github.com/rs/zerolog/log" + + "github.com/onflow/flow-go/ledger" + "github.com/onflow/flow-go/ledger/complete/wal" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/state/protocol" + "github.com/onflow/flow-go/storage" +) + +// FindHeightsByCheckpoints finds the sealed height that produces the state commitment included in the checkpoint file. +func FindHeightsByCheckpoints( + logger zerolog.Logger, + headers storage.Headers, + seals storage.Seals, + checkpointFilePath string, + blocksToSkip uint, + startHeight uint64, + endHeight uint64, +) ( + uint64, // sealed height that produces the state commitment included in the checkpoint file + flow.StateCommitment, // the state commitment that matches the sealed height + uint64, // the finalized height that seals the sealed height + error, +) { + + // find all trie root hashes in the checkpoint file + dir, fileName := filepath.Split(checkpointFilePath) + hashes, err := wal.ReadTriesRootHash(logger, dir, fileName) + if err != nil { + return 0, flow.DummyStateCommitment, 0, + fmt.Errorf("could not read trie root hashes from checkpoint file %v: %w", + checkpointFilePath, err) + } + + // convert all trie root hashes to state commitments + commitments := hashesToCommits(hashes) + + commitMap := make(map[flow.StateCommitment]struct{}, len(commitments)) + for _, commit := range commitments { + commitMap[commit] = struct{}{} + } + + // iterate backwards from the end height to the start height + // to find the block that produces a state commitment in the given list + // It is safe to skip blocks in this linear search because we expect `stateCommitments` to hold commits + // for a contiguous range of blocks (for correct operation we assume `blocksToSkip` is smaller than this range). + // end height must be a sealed block + step := blocksToSkip + 1 + for height := endHeight; height >= startHeight; height -= uint64(step) { + finalizedID, err := headers.BlockIDByHeight(height) + if err != nil { + return 0, flow.DummyStateCommitment, 0, + fmt.Errorf("could not find block by height %v: %w", height, err) + } + + // since height is a sealed block height, then we must be able to find the seal for this block + finalizedSeal, err := seals.HighestInFork(finalizedID) + if err != nil { + return 0, flow.DummyStateCommitment, 0, + fmt.Errorf("could not find seal for block %v at height %v: %w", finalizedID, height, err) + } + + commit := finalizedSeal.FinalState + + _, ok := commitMap[commit] + if ok { + sealedBlock, err := headers.ByBlockID(finalizedSeal.BlockID) + if err != nil { + return 0, flow.DummyStateCommitment, 0, + fmt.Errorf("could not find block by ID %v: %w", finalizedSeal.BlockID, err) + } + + log.Info().Msgf("successfully found block %v (%v) that seals block %v (%v) for commit %x in checkpoint file %v", + height, finalizedID, + sealedBlock.Height, finalizedSeal.BlockID, + commit, checkpointFilePath) + + return sealedBlock.Height, commit, height, nil + } + + if height < uint64(step) { + break + } + } + + return 0, flow.DummyStateCommitment, 0, + fmt.Errorf("could not find commit within height range [%v,%v]", startHeight, endHeight) +} + +// GenerateProtocolSnapshotForCheckpoint finds a sealed block that produces the state commitment contained in the latest +// checkpoint file, and return a protocol snapshot for the finalized block that seals the sealed block. +// The returned protocol snapshot can be used for dynamic bootstrapping an execution node along with the latest checkpoint file. +// +// When finding a sealed block it iterates backwards through each sealed height from the last sealed height, and see +// if the state commitment matches with one of the state commitments contained in the checkpoint file. +// However, the iteration could be slow, in order to speed up the iteration, we can skip some blocks each time. +// Since a checkpoint file usually contains 500 tries, which might cover around 250 blocks (assuming 2 tries per block), +// then skipping 10 blocks each time will still allow us to find the sealed block while not missing the height contained +// by the checkpoint file. +// So the blocksToSkip parameter is used to skip some blocks each time when iterating the sealed heights. +func GenerateProtocolSnapshotForCheckpoint( + logger zerolog.Logger, + state protocol.State, + headers storage.Headers, + seals storage.Seals, + checkpointDir string, + blocksToSkip uint, +) (protocol.Snapshot, uint64, flow.StateCommitment, string, error) { + // skip X blocks (i.e. 10) each time to find the block that produces the state commitment in the checkpoint file + // since a checkpoint file contains 500 tries, this allows us to find the block more efficiently + sealed, err := state.Sealed().Head() + if err != nil { + return nil, 0, flow.DummyStateCommitment, "", err + } + endHeight := sealed.Height + + return GenerateProtocolSnapshotForCheckpointWithHeights(logger, state, headers, seals, + checkpointDir, + blocksToSkip, + endHeight, + ) +} + +// findLatestCheckpointFilePath finds the latest checkpoint file in the given directory +// it returns the header file name of the latest checkpoint file +func findLatestCheckpointFilePath(checkpointDir string) (string, error) { + _, last, err := wal.ListCheckpoints(checkpointDir) + if err != nil { + return "", fmt.Errorf("could not list checkpoints in directory %v: %w", checkpointDir, err) + } + + fileName := wal.NumberToFilename(last) + if last < 0 { + fileName = "root.checkpoint" + } + + checkpointFilePath := filepath.Join(checkpointDir, fileName) + return checkpointFilePath, nil +} + +// GenerateProtocolSnapshotForCheckpointWithHeights does the same thing as GenerateProtocolSnapshotForCheckpoint +// except that it allows the caller to specify the end height of the sealed block that we iterate backwards from. +func GenerateProtocolSnapshotForCheckpointWithHeights( + logger zerolog.Logger, + state protocol.State, + headers storage.Headers, + seals storage.Seals, + checkpointDir string, + blocksToSkip uint, + endHeight uint64, +) (protocol.Snapshot, uint64, flow.StateCommitment, string, error) { + // Stop searching after 10,000 iterations or upon reaching the minimum height, whichever comes first. + startHeight := uint64(0) + // preventing startHeight from being negative + length := uint64(blocksToSkip+1) * 10000 + if endHeight > length { + startHeight = endHeight - length + } + + checkpointFilePath, err := findLatestCheckpointFilePath(checkpointDir) + if err != nil { + return nil, 0, flow.DummyStateCommitment, "", fmt.Errorf("could not find latest checkpoint file in directory %v: %w", checkpointDir, err) + } + + log.Info(). + Uint64("start_height", startHeight). + Uint64("end_height", endHeight). + Uint("blocksToSkip", blocksToSkip). + Msgf("generating protocol snapshot for checkpoint file %v", checkpointFilePath) + // find the height of the finalized block that produces the state commitment contained in the checkpoint file + sealedHeight, commit, finalizedHeight, err := FindHeightsByCheckpoints(logger, headers, seals, checkpointFilePath, blocksToSkip, startHeight, endHeight) + if err != nil { + return nil, 0, flow.DummyStateCommitment, "", fmt.Errorf("could not find sealed height in range [%v:%v] (blocksToSkip: %v) by checkpoints: %w", + startHeight, endHeight, blocksToSkip, + err) + } + + snapshot := state.AtHeight(finalizedHeight) + return snapshot, sealedHeight, commit, checkpointFilePath, nil +} + +// hashesToCommits converts a list of ledger.RootHash to a list of flow.StateCommitment +func hashesToCommits(hashes []ledger.RootHash) []flow.StateCommitment { + commits := make([]flow.StateCommitment, len(hashes)) + for i, h := range hashes { + commits[i] = flow.StateCommitment(h) + } + return commits +} diff --git a/cmd/util/ledger/migrations/account.go b/cmd/util/ledger/migrations/account.go new file mode 100644 index 00000000000..be070b31e10 --- /dev/null +++ b/cmd/util/ledger/migrations/account.go @@ -0,0 +1,62 @@ +package migrations + +import ( + "fmt" + + "github.com/rs/zerolog" + + "github.com/onflow/flow-go/cmd/util/ledger/util/registers" + "github.com/onflow/flow-go/model/flow" +) + +func NewAccountCreationMigration( + address flow.Address, + logger zerolog.Logger, +) RegistersMigration { + + return func(registersByAccount *registers.ByAccount) error { + + migrationRuntime := NewBasicMigrationRuntime(registersByAccount) + + // Check if the account already exists + exists, err := migrationRuntime.Accounts.Exists(address) + if err != nil { + return fmt.Errorf( + "failed to check if account %s exists: %w", + address, + err, + ) + } + + // If the account already exists, do nothing + if exists { + logger.Info().Msgf("account %s already exists", address) + return nil + } + + // Create the account + err = migrationRuntime.Accounts.Create(nil, address) + if err != nil { + return fmt.Errorf( + "failed to create account %s: %w", + address, + err, + ) + } + + logger.Info().Msgf("created account %s", address) + + // Commit the changes to the migrated registers + err = migrationRuntime.Commit( + map[flow.Address]struct{}{ + address: {}, + }, + logger, + ) + if err != nil { + return fmt.Errorf("failed to commit account creation: %w", err) + } + + return nil + } +} diff --git a/cmd/util/ledger/migrations/account_based_migration.go b/cmd/util/ledger/migrations/account_based_migration.go index 0172e04737f..b6b4f15b369 100644 --- a/cmd/util/ledger/migrations/account_based_migration.go +++ b/cmd/util/ledger/migrations/account_based_migration.go @@ -1,189 +1,339 @@ package migrations import ( + "container/heap" + "context" "fmt" + "io" + syncAtomic "sync/atomic" + "time" - "github.com/rs/zerolog/log" + "github.com/onflow/cadence/common" + "github.com/rs/zerolog" + "golang.org/x/sync/errgroup" - "github.com/onflow/flow-go/ledger" - "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/module/util" + "github.com/onflow/flow-go/cmd/util/ledger/util" + "github.com/onflow/flow-go/cmd/util/ledger/util/registers" + moduleUtil "github.com/onflow/flow-go/module/util" ) -// PayloadToAccount takes a payload and return: -// - (address, true, nil) if the payload is for an account, the account address is returned -// - ("", false, nil) if the payload is not for an account -// - ("", false, err) if running into any exception -func PayloadToAccount(p ledger.Payload) (string, bool, error) { - k, err := p.Key() - if err != nil { - return "", false, fmt.Errorf("could not find key for payload: %w", err) - } - id, err := KeyToRegisterID(k) - if err != nil { - return "", false, fmt.Errorf("error converting key to register ID") - } - if len([]byte(id.Owner)) != flow.AddressLength { - return "", false, nil - } - return id.Owner, true, nil +// logTopNDurations is the number of longest migrations to log at the end of the migration +const logTopNDurations = 20 + +// AccountBasedMigration is an interface for migrations that migrate account by account +// concurrently getting all the payloads for each account at a time. +type AccountBasedMigration interface { + InitMigration( + log zerolog.Logger, + registersByAccount *registers.ByAccount, + nWorkers int, + ) error + MigrateAccount( + ctx context.Context, + address common.Address, + accountRegisters *registers.AccountRegisters, + ) error + io.Closer } -// PayloadGroup groups payloads by account. -// For global payloads, it's stored under NonAccountPayloads field -type PayloadGroup struct { - NonAccountPayloads []ledger.Payload - Accounts map[string][]ledger.Payload +// NewAccountBasedMigration creates a migration function that migrates the payloads +// account by account using the given migrations +// accounts are processed concurrently using the given number of workers +// but each account is processed sequentially by the given migrations in order. +// The migrations InitMigration function is called once before the migration starts +// And the Close function is called once after the migration finishes if the migration +// is a finisher. +func NewAccountBasedMigration( + log zerolog.Logger, + nWorker int, + migrations []AccountBasedMigration, +) RegistersMigration { + return func(registersByAccount *registers.ByAccount) error { + return MigrateByAccount( + log, + nWorker, + registersByAccount, + migrations, + ) + } } -// PayloadGrouping is a reducer function that adds the given payload to the corresponding -// group under its account -func PayloadGrouping(groups *PayloadGroup, payload ledger.Payload) (*PayloadGroup, error) { - address, isAccount, err := PayloadToAccount(payload) - if err != nil { - return nil, err +// MigrateByAccount takes migrations and all the registers, grouped by account, +// and returns the migrated registers. +func MigrateByAccount( + log zerolog.Logger, + nWorker int, + registersByAccount *registers.ByAccount, + migrations []AccountBasedMigration, +) error { + accountCount := registersByAccount.AccountCount() + + if accountCount == 0 { + return nil } - if isAccount { - groups.Accounts[address] = append(groups.Accounts[address], payload) - } else { - groups.NonAccountPayloads = append(groups.NonAccountPayloads, payload) + log.Info(). + Int("inner_migrations", len(migrations)). + Int("nWorker", nWorker). + Msgf("created account migrations") + + for migrationIndex, migration := range migrations { + logger := log.With(). + Int("migration_index", migrationIndex). + Logger() + + err := migration.InitMigration( + logger, + registersByAccount, + nWorker, + ) + if err != nil { + return fmt.Errorf("could not init migration: %w", err) + } } - return groups, nil -} + err := withMigrations(log, migrations, func() error { + return MigrateAccountsConcurrently( + log, + migrations, + registersByAccount, + nWorker, + ) + }) -// AccountMigrator takes all the payloads that belong to the given account -// and return the migrated payloads -type AccountMigrator interface { - MigratePayloads(account string, payloads []ledger.Payload) ([]ledger.Payload, error) -} + log.Info(). + Int("account_count", accountCount). + Msgf("finished migrating registers") -// MigrateByAccount teaks a migrator function and all the payloads, and return the migrated payloads -func MigrateByAccount(migrator AccountMigrator, allPayloads []ledger.Payload, nWorker int) ( - []ledger.Payload, error) { - groups := &PayloadGroup{ - NonAccountPayloads: make([]ledger.Payload, 0), - Accounts: make(map[string][]ledger.Payload), + if err != nil { + return fmt.Errorf("could not migrate accounts: %w", err) } - log.Info().Msgf("start grouping for a total of %v payloads", len(allPayloads)) + return nil +} - var err error - logGrouping := util.LogProgress("grouping payload", len(allPayloads), &log.Logger) - for i, payload := range allPayloads { - groups, err = PayloadGrouping(groups, payload) - if err != nil { - return nil, err +// withMigrations calls the given function and then closes the given migrations. +func withMigrations( + log zerolog.Logger, + migrations []AccountBasedMigration, + f func() error, +) (err error) { + defer func() { + for migrationIndex, migration := range migrations { + log.Info(). + Int("migration_index", migrationIndex). + Type("migration", migration). + Msg("closing migration") + if cerr := migration.Close(); cerr != nil { + log.Err(cerr).Msg("error closing migration") + if err == nil { + // only set the error if it's not already set + // so that we don't overwrite the original error + err = cerr + } + } } - logGrouping(i) - } + }() - log.Info().Msgf("finish grouping for payloads by account: %v groups in total, %v NonAccountPayloads", - len(groups.Accounts), len(groups.NonAccountPayloads)) + return f() +} - // migrate the payloads under accounts - migrated, err := MigrateGroupConcurrently(migrator, groups.Accounts, nWorker) +// MigrateAccountsConcurrently migrate the registers in the given account groups. +// The registers in each account are processed sequentially by the given migrations in order. +func MigrateAccountsConcurrently( + log zerolog.Logger, + migrations []AccountBasedMigration, + registersByAccount *registers.ByAccount, + nWorker int, +) error { - if err != nil { - return nil, fmt.Errorf("could not migrate group: %w", err) - } + accountCount := registersByAccount.AccountCount() - log.Info().Msgf("finished migrating payloads for %v account", len(groups.Accounts)) + g, ctx := errgroup.WithContext(context.Background()) - // add the non accounts which don't need to be migrated - migrated = append(migrated, groups.NonAccountPayloads...) + jobs := make(chan migrateAccountGroupJob, accountCount) + results := make(chan migrationDuration, accountCount) - log.Info().Msgf("finished migrating all account based payloads, total migrated payloads: %v", len(migrated)) + workersLeft := int64(nWorker) - return migrated, nil -} + for workerIndex := 0; workerIndex < nWorker; workerIndex++ { + g.Go(func() error { + defer func() { + if syncAtomic.AddInt64(&workersLeft, -1) == 0 { + close(results) + } + }() -// MigrateGroupSequentially migrate the payloads in the given payloadsByAccount map which -// using the migrator -func MigrateGroupSequentially( - migrator AccountMigrator, - payloadsByAccount map[string][]ledger.Payload, -) ( - []ledger.Payload, error) { + for job := range jobs { + start := time.Now() + + address := job.Address + accountRegisters := job.AccountRegisters + + // Only migrate accounts, not global registers + if !util.IsServiceLevelAddress(address) { + + for migrationIndex, migration := range migrations { + + err := migration.MigrateAccount(ctx, address, accountRegisters) + if err != nil { + log.Err(err). + Int("migration_index", migrationIndex). + Type("migration", migration). + Hex("address", address[:]). + Msg("could not migrate account") + return err + } + } + } - logAccount := util.LogProgress("processing account group", len(payloadsByAccount), &log.Logger) + migrationDuration := migrationDuration{ + Address: address, + Duration: time.Since(start), + RegisterCount: accountRegisters.Count(), + } - i := 0 - migrated := make([]ledger.Payload, 0) - for address, payloads := range payloadsByAccount { - accountMigrated, err := migrator.MigratePayloads(address, payloads) + select { + case <-ctx.Done(): + return ctx.Err() + case results <- migrationDuration: + } + } + + return nil + }) + } + + g.Go(func() error { + defer close(jobs) + + // TODO: maybe adjust, make configurable, or dependent on chain + const keepTopNAccountRegisters = 20 + largestAccountRegisters := util.NewTopN[*registers.AccountRegisters]( + keepTopNAccountRegisters, + func(a, b *registers.AccountRegisters) bool { + return a.Count() < b.Count() + }, + ) + + allAccountRegisters := make([]*registers.AccountRegisters, accountCount) + + smallerAccountRegisterIndex := keepTopNAccountRegisters + err := registersByAccount.ForEachAccount( + func(accountRegisters *registers.AccountRegisters) error { + + // Try to add the account registers to the top N largest account registers. + // If there is an "overflow" element (either the added element, or an existing element), + // add it to the account registers. + // This way we can process the largest account registers first, + // and do not need to sort all account registers. + + popped, didPop := largestAccountRegisters.Add(accountRegisters) + if didPop { + allAccountRegisters[smallerAccountRegisterIndex] = popped + smallerAccountRegisterIndex++ + } + + return nil + }, + ) if err != nil { - return nil, fmt.Errorf("could not migrate for account address %v: %w", address, err) + return fmt.Errorf("failed to get all account registers: %w", err) } - migrated = append(migrated, accountMigrated...) - logAccount(i) - i++ - } + // Add the largest account registers to the account registers. + // The elements in the top N largest account registers are returned in reverse order. + for index := largestAccountRegisters.Len() - 1; index >= 0; index-- { + accountRegisters := heap.Pop(largestAccountRegisters).(*registers.AccountRegisters) + allAccountRegisters[index] = accountRegisters + } - return migrated, nil -} + for _, accountRegisters := range allAccountRegisters { + owner := accountRegisters.Owner() -type jobMigrateAccountGroup struct { - Account string - Payloads []ledger.Payload -} + address, err := common.BytesToAddress([]byte(owner)) + if err != nil { + return fmt.Errorf("failed to convert owner to address: %w", err) + } -type migrationResult struct { - Migrated []ledger.Payload - Err error -} + job := migrateAccountGroupJob{ + Address: address, + AccountRegisters: accountRegisters, + } -// MigrateGroupConcurrently migrate the payloads in the given payloadsByAccount map which -// using the migrator -// It's similar to MigrateGroupSequentially, except it will migrate different groups concurrently -func MigrateGroupConcurrently( - migrator AccountMigrator, - payloadsByAccount map[string][]ledger.Payload, - nWorker int, -) ( - []ledger.Payload, error) { - - jobs := make(chan jobMigrateAccountGroup, len(payloadsByAccount)) - go func() { - for account, payloads := range payloadsByAccount { - jobs <- jobMigrateAccountGroup{ - Account: account, - Payloads: payloads, + select { + case <-ctx.Done(): + return ctx.Err() + case jobs <- job: } } - close(jobs) - }() - resultCh := make(chan *migrationResult) - for i := 0; i < int(nWorker); i++ { - go func() { - for job := range jobs { - accountMigrated, err := migrator.MigratePayloads(job.Account, job.Payloads) - resultCh <- &migrationResult{ - Migrated: accountMigrated, - Err: err, - } - } - }() - } + return nil + }) // read job results - logAccount := util.LogProgress("processing account group", len(payloadsByAccount), &log.Logger) + logAccount := moduleUtil.LogProgress( + log, + moduleUtil.DefaultLogProgressConfig( + "processing account group", + accountCount, + ), + ) + + topDurations := util.NewTopN[migrationDuration]( + logTopNDurations, + func(duration migrationDuration, duration2 migrationDuration) bool { + return duration.Duration < duration2.Duration + }, + ) + + g.Go(func() error { + for duration := range results { + topDurations.Add(duration) + logAccount(1) + } - migrated := make([]ledger.Payload, 0) + return nil + }) - for i := 0; i < len(payloadsByAccount); i++ { - result := <-resultCh - if result.Err != nil { - return nil, fmt.Errorf("fail to migrate payload: %w", result.Err) - } + // make sure to exit all workers before returning from this function + // so that the migration can be closed properly + log.Info().Msg("waiting for migration workers to finish") + err := g.Wait() + if err != nil { + return fmt.Errorf("failed to migrate accounts: %w", err) + } + + log.Info(). + Array("top_longest_migrations", loggableMigrationDurations(topDurations)). + Msgf("Top longest migrations") + + return nil +} + +type migrateAccountGroupJob struct { + Address common.Address + AccountRegisters *registers.AccountRegisters +} + +type migrationDuration struct { + Address common.Address + Duration time.Duration + RegisterCount int +} - accountMigrated := result.Migrated - migrated = append(migrated, accountMigrated...) - logAccount(i) +func loggableMigrationDurations(durations *util.TopN[migrationDuration]) zerolog.LogArrayMarshaler { + array := zerolog.Arr() + + for index := durations.Len() - 1; index >= 0; index-- { + duration := heap.Pop(durations).(migrationDuration) + array = array.Str(fmt.Sprintf( + "%s [registers: %d]: %s", + duration.Address.Hex(), + duration.RegisterCount, + duration.Duration.String(), + )) } - return migrated, nil + return array } diff --git a/cmd/util/ledger/migrations/account_based_migration_test.go b/cmd/util/ledger/migrations/account_based_migration_test.go new file mode 100644 index 00000000000..c06a6a7f090 --- /dev/null +++ b/cmd/util/ledger/migrations/account_based_migration_test.go @@ -0,0 +1,164 @@ +package migrations + +import ( + "context" + "fmt" + "testing" + + "github.com/onflow/cadence/common" + "github.com/rs/zerolog" + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/cmd/util/ledger/util/registers" + "github.com/onflow/flow-go/fvm/environment" + "github.com/onflow/flow-go/ledger" + "github.com/onflow/flow-go/ledger/common/convert" + "github.com/onflow/flow-go/model/flow" +) + +func accountStatusPayload(address common.Address) *ledger.Payload { + accountStatus := environment.NewAccountStatus() + + return ledger.NewPayload( + convert.RegisterIDToLedgerKey( + flow.AccountStatusRegisterID(flow.ConvertAddress(address)), + ), + accountStatus.ToBytes(), + ) +} + +func TestErrorPropagation(t *testing.T) { + t.Parallel() + + log := zerolog.New(zerolog.NewTestWriter(t)) + + address, err := common.HexToAddress("0x1") + require.NoError(t, err) + + migrateWith := func(mig AccountBasedMigration) error { + + // at least one payload otherwise the migration will not get called + payloads := []*ledger.Payload{ + accountStatusPayload(address), + } + + registersByAccount, err := registers.NewByAccountFromPayloads(payloads) + if err != nil { + return fmt.Errorf("could not create registers by account: %w", err) + } + + return MigrateByAccount( + log, + 10, + registersByAccount, + []AccountBasedMigration{ + mig, + }, + ) + } + + t.Run("no err", func(t *testing.T) { + t.Parallel() + + err := migrateWith( + testMigration{}, + ) + require.NoError(t, err) + }) + + t.Run("err on close", func(t *testing.T) { + t.Parallel() + + desiredErr := fmt.Errorf("test close error") + err := migrateWith( + testMigration{ + CloseFN: func() error { + return desiredErr + }, + }, + ) + require.ErrorIs(t, err, desiredErr) + }) + + t.Run("err on init", func(t *testing.T) { + t.Parallel() + + desiredErr := fmt.Errorf("test init error") + err := migrateWith( + testMigration{ + InitMigrationFN: func( + log zerolog.Logger, + registersByAccount *registers.ByAccount, + nWorkers int, + ) error { + return desiredErr + }, + }, + ) + require.ErrorIs(t, err, desiredErr) + }) + + t.Run("err on migrate", func(t *testing.T) { + t.Parallel() + + desiredErr := fmt.Errorf("test migrate error") + err := migrateWith( + testMigration{ + MigrateAccountFN: func( + _ context.Context, + _ common.Address, + _ *registers.AccountRegisters, + ) error { + return desiredErr + }, + }, + ) + require.ErrorIs(t, err, desiredErr) + }) +} + +type testMigration struct { + InitMigrationFN func( + log zerolog.Logger, + registersByAccount *registers.ByAccount, + nWorkers int, + ) error + MigrateAccountFN func( + ctx context.Context, + address common.Address, + accountRegisters *registers.AccountRegisters, + ) error + CloseFN func() error +} + +var _ AccountBasedMigration = &testMigration{} + +func (t testMigration) InitMigration( + log zerolog.Logger, + registersByAccount *registers.ByAccount, + nWorkers int, +) error { + if t.InitMigrationFN != nil { + return t.InitMigrationFN(log, registersByAccount, nWorkers) + } + return nil +} + +func (t testMigration) MigrateAccount( + ctx context.Context, + address common.Address, + accountRegisters *registers.AccountRegisters, +) error { + + if t.MigrateAccountFN != nil { + return t.MigrateAccountFN(ctx, address, accountRegisters) + } + return nil +} + +func (t testMigration) Close() error { + if t.CloseFN != nil { + return t.CloseFN() + } + return nil +} diff --git a/cmd/util/ledger/migrations/account_key_deduplication_encoder.go b/cmd/util/ledger/migrations/account_key_deduplication_encoder.go new file mode 100644 index 00000000000..9944d0a301d --- /dev/null +++ b/cmd/util/ledger/migrations/account_key_deduplication_encoder.go @@ -0,0 +1,535 @@ +package migrations + +import ( + "encoding/binary" + "fmt" + "math" + + accountkeymetadata "github.com/onflow/flow-go/fvm/environment/account-key-metadata" + "github.com/onflow/flow-go/model/flow" +) + +const ( + lengthPrefixSize = 4 + runLengthSize = 2 +) + +// Account Public Key Weight and Revoked Status + +const ( + // maxRunLengthInEncodedStatusGroup (65535) is the max run length that + // can be stored in each RLE encoded status group. + maxRunLengthInEncodedStatusGroup = math.MaxUint16 + + // weightAndRevokedStatusSize (2) is the number of bytes used to store + // the weight and status together as a uint16: + // - the high bit is the revoked status + // - the remaining 15 bits is the weight (more than enough for its 0..1000 range) + weightAndRevokedStatusSize = 2 + + // weightAndRevokedStatusGroupSize (4) is the number of bytes used to store + // the uint16 run length and the uint16 representing weight and revoked status. + weightAndRevokedStatusGroupSize = runLengthSize + weightAndRevokedStatusSize + + // revokedMask is the bitmask for setting or getting the revoked flag stored + // as the high bit of a uint16. + revokedMask = 0x8000 + + // weightMask is the bitmask for getting the weight from the low 15 bits (fifteen bits) of + // the uint16 containing the unsigned 15-bit weight. + weightMask = 0x7fff +) + +type accountPublicKeyWeightAndRevokedStatus struct { + weight uint16 // Weight is 0-1000 + revoked bool +} + +// accountPublicKeyWeightAndRevokedStatus is encoded using RLE: +// - run length (2 bytes) +// - value (2 bytes): revoked status is the high bit and weight is the remaining 15 bits. +// NOTE: if number of elements in a run-length group exceeds maxRunLengthInEncodedStatusGroup, +// a new group is created with remaining run-length and the same weight and revoked status. +func encodeAccountPublicKeyWeightsAndRevokedStatus(weightsAndRevoked []accountPublicKeyWeightAndRevokedStatus) ([]byte, error) { + if len(weightsAndRevoked) == 0 { + return nil, nil + } + + buf := make([]byte, 0, len(weightsAndRevoked)*(weightAndRevokedStatusGroupSize)) + + off := 0 + for i := 0; i < len(weightsAndRevoked); { + runLength := 1 + value := weightsAndRevoked[i] + i++ + + // Find group boundary + for i < len(weightsAndRevoked) && runLength < maxRunLengthInEncodedStatusGroup && weightsAndRevoked[i] == value { + runLength++ + i++ + } + + // Encode weight and revoked status group + + buf = buf[:off+weightAndRevokedStatusGroupSize] + + binary.BigEndian.PutUint16(buf[off:], uint16(runLength)) + off += runLengthSize + + weightAndRevoked := value.weight + if value.revoked { + weightAndRevoked |= revokedMask // Turn on high bit for revoked status + } + + binary.BigEndian.PutUint16(buf[off:], weightAndRevoked) + off += weightAndRevokedStatusSize + } + + return buf, nil +} + +func decodeAccountPublicKeyWeightAndRevokedStatusGroups(b []byte) ([]accountPublicKeyWeightAndRevokedStatus, error) { + if len(b)%weightAndRevokedStatusGroupSize != 0 { + return nil, fmt.Errorf("failed to decode weight and revoked status: expect multiple of %d bytes, got %d", weightAndRevokedStatusGroupSize, len(b)) + } + + statuses := make([]accountPublicKeyWeightAndRevokedStatus, 0, len(b)/weightAndRevokedStatusGroupSize) + + for i := 0; i < len(b); i += weightAndRevokedStatusGroupSize { + runLength := uint32(binary.BigEndian.Uint16(b[i:])) + weightAndRevoked := binary.BigEndian.Uint16(b[i+2 : i+4]) + + status := accountPublicKeyWeightAndRevokedStatus{ + weight: weightAndRevoked & weightMask, + revoked: (weightAndRevoked & revokedMask) > 0, + } + + for range runLength { + statuses = append(statuses, status) + } + } + + return statuses, nil +} + +// Account Public Key Index to Stored Public Key Index Mappings +const ( + storedKeyIndexSize = 4 + mappingGroupSize = runLengthSize + storedKeyIndexSize + consecutiveGroupFlagMask = 0x8000 + lengthMask = 0x7fff +) + +// encodeAccountPublicKeyMapping encodes keyIndexMappings into concatenated run-length groups. +// Each run-length group is encoded as: +// - length in the low 15 bits of uint16 (2 bytes) +// - stored key index as uint32 (4 bytes) +// For example, account has 8 account keys with 5 unique keys. +// Unique key index mapping is {Key0, Key1, Key1, Key1, Key1, Key2, Key3, Key4}. +// The example's encoded mapping would be: +// { {run-length 1, value 0}, {run-length 4, value 1}, {consecutive-run-length 3, start-value 2}} +func encodeAccountPublicKeyMapping(mapping []uint32) ([]byte, error) { + if len(mapping) == 0 { + return nil, nil + } + + firstGroup := accountkeymetadata.NewMappingGroup(1, mapping[0], false) + + if len(mapping) == 1 { + return firstGroup.Encode(), nil + } + + groups := make([]*accountkeymetadata.MappingGroup, 0, len(mapping)) + groups = append(groups, firstGroup) + + lastGroup := firstGroup + for _, storedKeyIndex := range mapping[1:] { + if !lastGroup.TryMerge(storedKeyIndex) { + // Create and append new group + lastGroup = accountkeymetadata.NewMappingGroup(1, storedKeyIndex, false) + groups = append(groups, lastGroup) + } + } + + return accountkeymetadata.MappingGroups(groups).Encode(), nil +} + +func decodeAccountPublicKeyMapping(b []byte) ([]uint32, error) { + if len(b)%mappingGroupSize != 0 { + return nil, fmt.Errorf("failed to decode mappings: expect multiple of %d bytes, got %d", mappingGroupSize, len(b)) + } + + mapping := make([]uint32, 0, len(b)/mappingGroupSize) + + for i := 0; i < len(b); i += mappingGroupSize { + runLength := binary.BigEndian.Uint16(b[i:]) + storedKeyIndex := binary.BigEndian.Uint32(b[i+runLengthSize:]) + + if consecutiveBit := (runLength & consecutiveGroupFlagMask) >> 15; consecutiveBit == 1 { + runLength &= lengthMask + + for i := range runLength { + mapping = append(mapping, storedKeyIndex+uint32(i)) + } + } else { + for range runLength { + mapping = append(mapping, storedKeyIndex) + } + } + } + + return mapping, nil +} + +// Digest list + +const digestSize = 8 + +// encodeDigestList encodes digests into concatenated uint64. +func encodeDigestList(digests []uint64) []byte { + if len(digests) == 0 { + return nil + } + encodedDigestList := make([]byte, digestSize*len(digests)) + off := 0 + for _, digest := range digests { + binary.BigEndian.PutUint64(encodedDigestList[off:], digest) + off += digestSize + } + return encodedDigestList +} + +func decodeDigestList(b []byte) ([]uint64, error) { + if len(b)%digestSize != 0 { + return nil, fmt.Errorf("failed to decode digest list: expect multiple of %d byte, got %d", digestSize, len(b)) + } + + storedDigestCount := len(b) / digestSize + + digests := make([]uint64, 0, storedDigestCount) + + for i := 0; i < len(b); i += digestSize { + digests = append(digests, binary.BigEndian.Uint64(b[i:])) + } + + return digests, nil +} + +// Public Key Batch Register + +const ( + maxEncodedKeySize = math.MaxUint8 // Encoded public key size is ~70 bytes +) + +// PublicKeyBatch register contains up to maxBatchPublicKeyCount number of encoded public keys. +// Each public key is encoded as: +// - length prefixed encoded public key +func encodePublicKeysInBatches(encodedPublicKey [][]byte, maxPublicKeyCountInBatch int) ([][]byte, error) { + // Return early if there is only one encoded public key (first public key). + // First public key is stored in its own register, not in batch public key register. + if len(encodedPublicKey) <= 1 { + return nil, nil + } + + // Reset first encoded public key to nil during encoding + // to avoid encoding first account public key in batch public key. + + firstEncodedPublicKey := encodedPublicKey[0] + defer func() { + encodedPublicKey[0] = firstEncodedPublicKey + }() + + encodedPublicKey[0] = nil + + values := make([][]byte, 0, len(encodedPublicKey)/maxPublicKeyCountInBatch+1) + + for i := 0; i < len(encodedPublicKey); { + batchCount := min(maxPublicKeyCountInBatch, len(encodedPublicKey)-i) + + encodedBatchPublicKey, err := encodeBatchPublicKey(encodedPublicKey[i : i+batchCount]) + if err != nil { + return nil, err + } + + values = append(values, encodedBatchPublicKey) + + i += batchCount + } + + return values, nil +} + +func encodeBatchPublicKey(encodedPublicKey [][]byte) ([]byte, error) { + + size := 0 + for _, encoded := range encodedPublicKey { + if len(encoded) > maxEncodedKeySize { + return nil, fmt.Errorf("encoded key size is %d bytes, exceeded max size %d", len(encoded), maxEncodedKeySize) + } + size += 1 + len(encoded) + } + + buf := make([]byte, size) + off := 0 + for _, encoded := range encodedPublicKey { + buf[off] = byte(len(encoded)) + off++ + + n := copy(buf[off:], encoded) + off += n + } + + return buf, nil +} + +func decodeBatchPublicKey(b []byte) ([][]byte, error) { + if len(b) == 0 { + return nil, nil + } + + encodedPublicKeys := make([][]byte, 0, maxPublicKeyCountInBatch) + + off := 0 + for off < len(b) { + size := int(b[off]) + off++ + + if off+size > len(b) { + return nil, fmt.Errorf("failed to decode batch public key: off %d + size %d out of bounds %d: %x", off, size, len(b), b) + } + + encodedPublicKey := b[off : off+size] + off += size + + encodedPublicKeys = append(encodedPublicKeys, encodedPublicKey) + } + + if off != len(b) { + return nil, fmt.Errorf("failed to decode batch public key: trailing data (%d bytes): %x", len(b)-off, b) + } + + return encodedPublicKeys, nil +} + +// Account Status register + +const ( + versionMask = 0xf0 + flagMask = 0x0f + deduplicatedAccountStatusV4VerionAndFlagByte = 0x41 + nondeduplicatedAccountStatusV4VerionAndFlagByte = 0x40 + accountStatusV4MinimumSize = 29 // Same size as account status v3 +) + +// encodeAccountStatusV4WithPublicKeyMetadata encodes public key metadata section +// in "a.s" register depending on deduplicated flag. +// +// With deduplicated flag, account status is encoded as: +// - account status v3 (29 bytes) +// - length prefixed list of account public key weight and revoked status starting from key index 1 +// - startKeyIndex (4 bytes) + length prefixed list of account public key index mappings to stored key index +// - startStoredKeyIndex (4 bytes) + length prefixed list of last N stored key digests +// +// Without deduplicated flag, account status is encoded as: +// - account status v3 (29 bytes) +// - length prefixed list of account public key weight and revoked status starting from key index 1 +// - startStoredKeyIndex (4 bytes) + length prefixed list of last N stored key digests +func encodeAccountStatusV4WithPublicKeyMetadata( + original []byte, + weightAndRevokedStatus []accountPublicKeyWeightAndRevokedStatus, + startKeyIndexForDigests uint32, + keyDigests []uint64, + startKeyIndexForMappings uint32, + accountPublicKeyMappings []uint32, + deduplicated bool, +) ([]byte, error) { + + // Return early if the original account status payload contains any optional fields. + if len(original) != accountStatusV4MinimumSize { + return nil, fmt.Errorf("failed to encode account status payload: original payload has %d bytes, expect %d bytes", len(original), accountStatusV4MinimumSize) + } + + // Encode list of account public key weight and revoked status + encodedAccountPublicKeyWeightAndRevokedStatus, err := encodeAccountPublicKeyWeightsAndRevokedStatus(weightAndRevokedStatus) + if err != nil { + return nil, err + } + + // Encode list of key digests + encodedKeyDigests := encodeDigestList(keyDigests) + + // Encode mappings for deduplicated account public keys + var encodedAccountPublicKeyMapping []byte + if deduplicated { + encodedAccountPublicKeyMapping, err = encodeAccountPublicKeyMapping(accountPublicKeyMappings) + if err != nil { + return nil, err + } + } + + newAccountStatusPayloadSize := len(original) + + lengthPrefixSize + len(encodedAccountPublicKeyWeightAndRevokedStatus) + // length prefixed account public key weight and revoked status + 4 + // start stored key index for digests + lengthPrefixSize + len(encodedKeyDigests) // length prefixed digests + + if deduplicated { + newAccountStatusPayloadSize += 4 + // start key index for mapping + lengthPrefixSize + len(encodedAccountPublicKeyMapping) // used to retrieve account public key + } + + buf := make([]byte, newAccountStatusPayloadSize) + off := 0 + + // Append account status v4 version and flag + if deduplicated { + buf[0] = deduplicatedAccountStatusV4VerionAndFlagByte + } else { + buf[0] = nondeduplicatedAccountStatusV4VerionAndFlagByte + } + off++ + + // Append original content, except for the flag byte + n := copy(buf[off:], original[1:]) + off += n + + // Append length prefixed encoded revoked status + binary.BigEndian.PutUint32(buf[off:], uint32(len(encodedAccountPublicKeyWeightAndRevokedStatus))) + off += 4 + + n = copy(buf[off:], encodedAccountPublicKeyWeightAndRevokedStatus) + off += n + + if deduplicated { + // Append start key index for mapping + binary.BigEndian.PutUint32(buf[off:], startKeyIndexForMappings) + off += 4 + + // Append length prefixed account public key mapping + binary.BigEndian.PutUint32(buf[off:], uint32(len(encodedAccountPublicKeyMapping))) + off += 4 + + n = copy(buf[off:], encodedAccountPublicKeyMapping) + off += n + } + + // Append start key index for digests + binary.BigEndian.PutUint32(buf[off:], startKeyIndexForDigests) + off += 4 + + // Append length prefixed key digests + binary.BigEndian.PutUint32(buf[off:], uint32(len(encodedKeyDigests))) + off += 4 + + n = copy(buf[off:], encodedKeyDigests) + off += n + + return buf[:off], nil +} + +func decodeAccountStatusKeyMetadata(b []byte, deduplicated bool) ( + weightAndRevokedStatus []accountPublicKeyWeightAndRevokedStatus, + startKeyIndexForMapping uint32, + accountPublicKeyMappings []uint32, + startKeyIndexForDigests uint32, + digests []uint64, + err error, +) { + // Decode weight and revoked list + + var weightAndRevokedGroupsData []byte + weightAndRevokedGroupsData, b, err = parseNextLengthPrefixedData(b) + if err != nil { + err = fmt.Errorf("failed to decode AccountStatusV4: %w", err) + return + } + + weightAndRevokedStatus, err = decodeAccountPublicKeyWeightAndRevokedStatusGroups(weightAndRevokedGroupsData) + if err != nil { + err = fmt.Errorf("failed to decode weight and revoked status list: %w", err) + return + } + + // Decode account public key mapping if deduplication is on + + if deduplicated { + if len(b) < 4 { + err = fmt.Errorf("failed to decode AccountStatusV4: expect 4 bytes of start key index for mapping, got %d bytes", len(b)) + return + } + + startKeyIndexForMapping = binary.BigEndian.Uint32(b) + + b = b[4:] + + var mappingData []byte + mappingData, b, err = parseNextLengthPrefixedData(b) + if err != nil { + err = fmt.Errorf("failed to decode AccountStatusV4: %w", err) + return + } + + accountPublicKeyMappings, err = decodeAccountPublicKeyMapping(mappingData) + if err != nil { + err = fmt.Errorf("failed to decode account public key mappings: %w", err) + return + } + } + + // Decode digests list + + if len(b) < 4 { + err = fmt.Errorf("failed to decode AccountStatusV4: expect 4 bytes of start stored key index for digests, got %d bytes", len(b)) + return + } + + startKeyIndexForDigests = binary.BigEndian.Uint32(b) + b = b[4:] + + var digestsData []byte + digestsData, b, err = parseNextLengthPrefixedData(b) + if err != nil { + err = fmt.Errorf("failed to decode AccountStatusV4: %w", err) + return + } + + digests, err = decodeDigestList(digestsData) + if err != nil { + err = fmt.Errorf("failed to decode digests: %w", err) + return + } + + // Check trailing data + + if len(b) != 0 { + err = fmt.Errorf("failed to decode AccountStatusV4: got %d extra bytes", len(b)) + return + } + + return +} + +func parseNextLengthPrefixedData(b []byte) (next []byte, rest []byte, err error) { + if len(b) < lengthPrefixSize { + return nil, nil, fmt.Errorf("failed to decode data: expect at least 4 bytes, got %d bytes", len(b)) + } + + length := binary.BigEndian.Uint32(b[:lengthPrefixSize]) + + if len(b) < lengthPrefixSize+int(length) { + return nil, nil, fmt.Errorf("failed to decode data: expect at least %d bytes, got %d bytes", lengthPrefixSize+int(length), len(b)) + } + + b = b[lengthPrefixSize:] + return b[:length], b[length:], nil +} + +// Stored Public Key + +func encodeStoredPublicKeyFromAccountPublicKey(a flow.AccountPublicKey) ([]byte, error) { + storedPublicKey := flow.StoredPublicKey{ + PublicKey: a.PublicKey, + SignAlgo: a.SignAlgo, + HashAlgo: a.HashAlgo, + } + return flow.EncodeStoredPublicKey(storedPublicKey) +} diff --git a/cmd/util/ledger/migrations/account_key_deduplication_encoder_test.go b/cmd/util/ledger/migrations/account_key_deduplication_encoder_test.go new file mode 100644 index 00000000000..84f9a8b1d95 --- /dev/null +++ b/cmd/util/ledger/migrations/account_key_deduplication_encoder_test.go @@ -0,0 +1,553 @@ +package migrations + +import ( + "crypto/rand" + "fmt" + "testing" + + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/fvm/environment" +) + +func TestAccountPublicKeyWeightsAndRevokedStatusSerizliation(t *testing.T) { + testcases := []struct { + name string + status []accountPublicKeyWeightAndRevokedStatus + expected []byte + }{ + { + name: "empty", + status: nil, + expected: nil, + }, + { + name: "one status", + status: []accountPublicKeyWeightAndRevokedStatus{ + {weight: 1000, revoked: false}, + }, + expected: []byte{0, 1, 0x03, 0xe8}, + }, + { + name: "multiple identical status", + status: []accountPublicKeyWeightAndRevokedStatus{ + {weight: 1000, revoked: true}, + {weight: 1000, revoked: true}, + {weight: 1000, revoked: true}, + }, + expected: []byte{0, 3, 0x83, 0xe8}, + }, + { + name: "different status", + status: []accountPublicKeyWeightAndRevokedStatus{ + {weight: 1, revoked: false}, + {weight: 2, revoked: false}, + {weight: 2, revoked: true}, + }, + expected: []byte{ + 0, 1, 0, 1, + 0, 1, 0, 2, + 0, 1, 0x80, 2, + }, + }, + { + name: "different status", + status: []accountPublicKeyWeightAndRevokedStatus{ + {weight: 1, revoked: false}, + {weight: 1, revoked: false}, + {weight: 2, revoked: true}, + }, + expected: []byte{ + 0, 2, 0, 1, + 0, 1, 0x80, 2, + }, + }, + } + + for _, tc := range testcases { + t.Run(tc.name, func(t *testing.T) { + b, err := encodeAccountPublicKeyWeightsAndRevokedStatus(tc.status) + require.NoError(t, err) + require.Equal(t, tc.expected, b) + + decodedStatus, err := decodeAccountPublicKeyWeightAndRevokedStatusGroups(b) + require.NoError(t, err) + require.ElementsMatch(t, tc.status, decodedStatus) + }) + } + + t.Run("run length around max group count", func(t *testing.T) { + testcases := []struct { + name string + status accountPublicKeyWeightAndRevokedStatus + count uint32 + expected []byte + }{ + { + name: "run length maxRunLengthInEncodedStatusGroup - 1", + status: accountPublicKeyWeightAndRevokedStatus{weight: 1000, revoked: true}, + count: maxRunLengthInEncodedStatusGroup - 1, + expected: []byte{ + 0xff, 0xfe, 0x83, 0xe8, + }, + }, + { + name: "run length maxRunLengthInEncodedStatusGroup ", + status: accountPublicKeyWeightAndRevokedStatus{weight: 1000, revoked: true}, + count: maxRunLengthInEncodedStatusGroup, + expected: []byte{ + 0xff, 0xff, 0x83, 0xe8, + }, + }, + { + name: "run length maxRunLengthInEncodedStatusGroup + 1", + status: accountPublicKeyWeightAndRevokedStatus{weight: 1000, revoked: true}, + count: maxRunLengthInEncodedStatusGroup + 1, + expected: []byte{ + 0xff, 0xff, 0x83, 0xe8, + 0x00, 0x01, 0x83, 0xe8, + }, + }, + } + + for _, tc := range testcases { + t.Run(tc.name, func(t *testing.T) { + status := make([]accountPublicKeyWeightAndRevokedStatus, tc.count) + for i := range len(status) { + status[i] = tc.status + } + + b, err := encodeAccountPublicKeyWeightsAndRevokedStatus(status) + require.NoError(t, err) + require.Equal(t, tc.expected, b) + + decodedStatus, err := decodeAccountPublicKeyWeightAndRevokedStatusGroups(b) + require.NoError(t, err) + require.ElementsMatch(t, status, decodedStatus) + }) + } + }) +} + +func TestMappingGroupSerialization(t *testing.T) { + testcases := []struct { + name string + mappings []uint32 + expected []byte + }{ + { + name: "1 group with run length 1", + mappings: []uint32{1}, + expected: []byte{ + 0, 1, 0, 0, 0, 1, + }, + }, + { + name: "2 groups with different run length", + mappings: []uint32{1, 1, 2}, + expected: []byte{ + 0, 2, 0, 0, 0, 1, + 0, 1, 0, 0, 0, 2, + }, + }, + { + name: "consecutive group count followed by regular group", + mappings: []uint32{1, 2, 2}, + expected: []byte{ + 0x80, 2, 0, 0, 0, 1, + 0, 1, 0, 0, 0, 2, + }, + }, + { + name: "group value not consecutive", + mappings: []uint32{1, 3}, + expected: []byte{ + 0, 1, 0, 0, 0, 1, + 0, 1, 0, 0, 0, 3, + }, + }, + { + name: "consecutive group with run length 2", + mappings: []uint32{1, 2}, + expected: []byte{ + 0x80, 2, 0, 0, 0, 1, + }, + }, + { + name: "consecutive group with run length 3", + mappings: []uint32{1, 2, 3}, + expected: []byte{ + 0x80, 3, 0, 0, 0, 1, + }, + }, + { + name: "consecutive group followed by non-consecutive group", + mappings: []uint32{1, 2, 2}, + expected: []byte{ + 0x80, 2, 0, 0, 0, 1, + 0, 1, 0, 0, 0, 2, + }, + }, + { + name: "consecutive group followed by consecutive group", + mappings: []uint32{1, 2, 2, 3}, + expected: []byte{ + 0x80, 2, 0, 0, 0, 1, + 0x80, 2, 0, 0, 0, 2, + }, + }, + { + name: "consecutive groups mixed with non-consecutive groups", + mappings: []uint32{1, 3, 4, 5, 5, 5, 5, 6, 7, 7}, + expected: []byte{ + 0, 1, 0, 0, 0, 1, + 0x80, 3, 0, 0, 0, 3, + 0, 3, 0, 0, 0, 5, + 0x80, 2, 0, 0, 0, 6, + 0, 1, 0, 0, 0, 7, + }, + }, + } + + for _, tc := range testcases { + t.Run(tc.name, func(t *testing.T) { + b, err := encodeAccountPublicKeyMapping(tc.mappings) + require.NoError(t, err) + require.Equal(t, tc.expected, b) + + decodedMappings, err := decodeAccountPublicKeyMapping(b) + require.NoError(t, err) + require.ElementsMatch(t, tc.mappings, decodedMappings) + }) + } +} + +func TestDigestListSerialization(t *testing.T) { + testcases := []struct { + name string + digests []uint64 + expected []byte + }{ + { + name: "empty", + digests: nil, + expected: nil, + }, + { + name: "1 digest", + digests: []uint64{1}, + expected: []byte{ + 0, 0, 0, 0, 0, 0, 0, 1, + }, + }, + { + name: "2 digests", + digests: []uint64{1, 2}, + expected: []byte{ + 0, 0, 0, 0, 0, 0, 0, 1, + 0, 0, 0, 0, 0, 0, 0, 2, + }, + }, + } + + for _, tc := range testcases { + t.Run(tc.name, func(t *testing.T) { + b := encodeDigestList(tc.digests) + require.Equal(t, tc.expected, b) + + decodedDigests, err := decodeDigestList(b) + require.NoError(t, err) + require.ElementsMatch(t, tc.digests, decodedDigests) + }) + } +} + +func TestBatchPublicKeySerialization(t *testing.T) { + t.Run("empty", func(t *testing.T) { + b, err := encodePublicKeysInBatches(nil, maxPublicKeyCountInBatch) + require.NoError(t, err) + require.Empty(t, b) + + decodedPublicKeys, err := decodeBatchPublicKey(nil) + require.NoError(t, err) + require.Empty(t, decodedPublicKeys) + }) + + t.Run("1 public key", func(t *testing.T) { + encodedPublicKey := make([]byte, 73) + _, _ = rand.Read(encodedPublicKey) + + b, err := encodePublicKeysInBatches([][]byte{encodedPublicKey}, maxPublicKeyCountInBatch) + require.NoError(t, err) + require.Empty(t, b) + }) + + t.Run("2 public key", func(t *testing.T) { + encodedPublicKey1 := make([]byte, 73) + _, _ = rand.Read(encodedPublicKey1) + + encodedPublicKey2 := make([]byte, 80) + _, _ = rand.Read(encodedPublicKey2) + + encodedPublicKeys := [][]byte{encodedPublicKey1, encodedPublicKey2} + + b, err := encodePublicKeysInBatches(encodedPublicKeys, maxPublicKeyCountInBatch) + require.NoError(t, err) + require.True(t, len(b) == 1) + + decodedPublicKeys, err := decodeBatchPublicKey(b[0]) + require.NoError(t, err) + require.True(t, len(decodedPublicKeys) == 2) + require.Empty(t, decodedPublicKeys[0]) + require.Equal(t, encodedPublicKey2, decodedPublicKeys[1]) + }) + + t.Run("2 batches of public key", func(t *testing.T) { + encodedPublicKeys := make([][]byte, maxPublicKeyCountInBatch*1.5) + + for i := range len(encodedPublicKeys) { + encodedPublicKeys[i] = make([]byte, 70+i) + _, _ = rand.Read(encodedPublicKeys[i]) + } + + b, err := encodePublicKeysInBatches(encodedPublicKeys, maxPublicKeyCountInBatch) + require.NoError(t, err) + require.True(t, len(b) == 2) + + // Decode first batch + decodedPublicKeys, err := decodeBatchPublicKey(b[0]) + require.NoError(t, err) + require.True(t, len(decodedPublicKeys) == maxPublicKeyCountInBatch) + require.Empty(t, decodedPublicKeys[0]) + for i := 1; i < maxPublicKeyCountInBatch; i++ { + require.Equal(t, encodedPublicKeys[i], decodedPublicKeys[i]) + } + + // Decode second batch + decodedPublicKeys, err = decodeBatchPublicKey(b[1]) + require.NoError(t, err) + require.True(t, len(decodedPublicKeys) == len(encodedPublicKeys)-maxPublicKeyCountInBatch) + for i := range len(decodedPublicKeys) { + require.Equal(t, encodedPublicKeys[i+maxPublicKeyCountInBatch], decodedPublicKeys[i]) + } + }) +} + +func TestAccountStatusV4Serialization(t *testing.T) { + // NOTE: account status only contains key metadata + // if there are at least 2 account public keys. + + testcases := []struct { + name string + deduplicated bool + accountPublicKeyCount uint32 + weightAndRevokedStatus []accountPublicKeyWeightAndRevokedStatus + startIndexForDigests uint32 + digests []uint64 + startIndexForMappings uint32 + accountPublicKeyMappings []uint32 + expected []byte + }{ + { + name: "not deduplicated with 2 account public key", + deduplicated: false, + accountPublicKeyCount: uint32(2), + weightAndRevokedStatus: []accountPublicKeyWeightAndRevokedStatus{ + {weight: 1000, revoked: false}, + }, + startIndexForDigests: uint32(0), + digests: []uint64{1, 2}, + expected: []byte{ + // Required Fields + 0x40, // version + flag + 0, 0, 0, 0, 0, 0, 0, 0, // init value for storage used + 0, 0, 0, 0, 0, 0, 0, 1, // init value for storage index + 0, 0, 0, 2, // init value for public key counts + 0, 0, 0, 0, 0, 0, 0, 0, // init value for address id counter + // Optional Fields + 0, 0, 0, 4, // length prefix for weight and revoked list + 0, 1, 3, 0xe8, // weight and revoked group + 0, 0, 0, 0, // start index for digests + 0, 0, 0, 0x10, // length prefix for digests + 0, 0, 0, 0, 0, 0, 0, 1, // digest 1 + 0, 0, 0, 0, 0, 0, 0, 2, // digest 2 + }, + }, + { + name: "not deduplicated with 3 account public key", + deduplicated: false, + accountPublicKeyCount: uint32(3), + weightAndRevokedStatus: []accountPublicKeyWeightAndRevokedStatus{ + {weight: 1000, revoked: false}, + {weight: 1000, revoked: false}, + }, + startIndexForDigests: uint32(1), + digests: []uint64{2, 3}, + expected: []byte{ + // Required Fields + 0x40, // version + flag + 0, 0, 0, 0, 0, 0, 0, 0, // init value for storage used + 0, 0, 0, 0, 0, 0, 0, 1, // init value for storage index + 0, 0, 0, 3, // init value for public key counts + 0, 0, 0, 0, 0, 0, 0, 0, // init value for address id counter + // Optional Fields + 0, 0, 0, 4, // length prefix for weight and revoked list + 0, 2, 3, 0xe8, // weight and revoked group + 0, 0, 0, 1, // start index for digests + 0, 0, 0, 0x10, // length prefix for digests + 0, 0, 0, 0, 0, 0, 0, 2, // digest 2 + 0, 0, 0, 0, 0, 0, 0, 3, // digest 3 + }, + }, + { + name: "deduplicated with 2 account public key (1 stored key)", + deduplicated: true, + accountPublicKeyCount: uint32(2), + weightAndRevokedStatus: []accountPublicKeyWeightAndRevokedStatus{ + {weight: 1000, revoked: false}, + }, + startIndexForDigests: uint32(0), + digests: []uint64{1}, + startIndexForMappings: uint32(1), + accountPublicKeyMappings: []uint32{0}, + expected: []byte{ + // Required Fields + 0x41, // version + flag + 0, 0, 0, 0, 0, 0, 0, 0, // init value for storage used + 0, 0, 0, 0, 0, 0, 0, 1, // init value for storage index + 0, 0, 0, 2, // init value for public key counts + 0, 0, 0, 0, 0, 0, 0, 0, // init value for address id counter + // Optional Fields + 0, 0, 0, 4, // length prefix for weight and revoked list + 0, 1, 3, 0xe8, // weight and revoked group + 0, 0, 0, 1, // start index for mapping + 0, 0, 0, 6, // length prefix for mapping + 0, 1, 0, 0, 0, 0, // mapping group 1 + 0, 0, 0, 0, // start index for digests + 0, 0, 0, 8, // length prefix for digests + 0, 0, 0, 0, 0, 0, 0, 1, // digest 1 + }, + }, + { + name: "deduplicated with 3 account public key (2 stored keys)", + deduplicated: true, + accountPublicKeyCount: uint32(3), + weightAndRevokedStatus: []accountPublicKeyWeightAndRevokedStatus{ + {weight: 1000, revoked: false}, + {weight: 1000, revoked: false}, + }, + startIndexForDigests: uint32(0), + digests: []uint64{1, 2}, + startIndexForMappings: uint32(1), + accountPublicKeyMappings: []uint32{0, 1}, + expected: []byte{ + // Required Fields + 0x41, // version + flag + 0, 0, 0, 0, 0, 0, 0, 0, // init value for storage used + 0, 0, 0, 0, 0, 0, 0, 1, // init value for storage index + 0, 0, 0, 3, // init value for public key counts + 0, 0, 0, 0, 0, 0, 0, 0, // init value for address id counter + // Optional Fields + 0, 0, 0, 4, // length prefix for weight and revoked list + 0, 2, 3, 0xe8, // weight and revoked group + 0, 0, 0, 1, // start index for mapping + 0, 0, 0, 6, // length prefix for mapping + 0x80, 2, 0, 0, 0, 0, // mapping group 1 + 0, 0, 0, 0, // start index for digests + 0, 0, 0, 0x10, // length prefix for digests + 0, 0, 0, 0, 0, 0, 0, 1, // digest 1 + 0, 0, 0, 0, 0, 0, 0, 2, // digest 2 + }, + }, + } + + for _, tc := range testcases { + t.Run(tc.name, func(t *testing.T) { + + s := environment.NewAccountStatus() + s.SetAccountPublicKeyCount(tc.accountPublicKeyCount) + + b, err := encodeAccountStatusV4WithPublicKeyMetadata( + s.ToBytes(), + tc.weightAndRevokedStatus, + tc.startIndexForDigests, + tc.digests, + tc.startIndexForMappings, + tc.accountPublicKeyMappings, + tc.deduplicated, + ) + require.NoError(t, err) + require.Equal(t, tc.expected, b) + + _, decodedWeightAndRevokedStatus, decodedStartIndexForDigests, decodedDigests, decodedStartIndexForMappings, decodedAccountPublicKeyMappings, err := decodeAccountStatusV4(b) + require.NoError(t, err) + require.ElementsMatch(t, tc.weightAndRevokedStatus, decodedWeightAndRevokedStatus) + require.Equal(t, tc.startIndexForDigests, decodedStartIndexForDigests) + require.ElementsMatch(t, tc.digests, decodedDigests) + require.Equal(t, tc.startIndexForMappings, decodedStartIndexForMappings) + require.ElementsMatch(t, tc.accountPublicKeyMappings, decodedAccountPublicKeyMappings) + + err = validateKeyMetadata( + tc.deduplicated, + tc.accountPublicKeyCount, + decodedWeightAndRevokedStatus, + decodedStartIndexForDigests, + decodedDigests, + decodedStartIndexForMappings, + decodedAccountPublicKeyMappings) + require.NoError(t, err) + }) + } +} + +func decodeAccountStatusV4(b []byte) ( + requiredFields []byte, + weightAndRevokedStatus []accountPublicKeyWeightAndRevokedStatus, + startKeyIndexForDigests uint32, + digests []uint64, + startKeyIndexForMapping uint32, + accountPublicKeyMappings []uint32, + err error, +) { + if len(b) < accountStatusV4MinimumSize { + return nil, nil, 0, nil, 0, nil, fmt.Errorf("failed to decode AccountStatusV4: expect at least %d byte, got %d bytes", accountStatusV4MinimumSize, len(b)) + } + + version, flag := b[0]&versionMask>>4, b[0]&flagMask + + if version != 4 { + return nil, nil, 0, nil, 0, nil, fmt.Errorf("failed to decode AccountStatusV4: expect version 4, got %d", version) + } + + if flag != 0 && flag != 1 { + return nil, nil, 0, nil, 0, nil, fmt.Errorf("failed to decode AccountStatusV4: expect flag 0 or 1, got %d", flag) + } + + deduplicated := flag == 1 + + requiredFields = append([]byte(nil), b[:accountStatusV4MinimumSize]...) + optionalFields := append([]byte(nil), b[accountStatusV4MinimumSize:]...) + + accountStatus, err := environment.AccountStatusFromBytes(requiredFields) + if err != nil { + return nil, nil, 0, nil, 0, nil, err + } + + accountPublicKeyCount := accountStatus.AccountPublicKeyCount() + + if accountPublicKeyCount <= 1 { + if len(optionalFields) > 0 { + return nil, nil, 0, nil, 0, nil, fmt.Errorf("failed to decode AccountStatusV4: found optional fields when account public key count is %d", accountPublicKeyCount) + } + + if deduplicated { + return nil, nil, 0, nil, 0, nil, fmt.Errorf("failed to create AccountStatusV4: deduplication flag should be off when account public key is less than 2") + } + + return requiredFields, nil, 0, nil, 0, nil, err + } + + weightAndRevokedStatus, startKeyIndexForMapping, accountPublicKeyMappings, startKeyIndexForDigests, digests, err = decodeAccountStatusKeyMetadata(optionalFields, deduplicated) + + return +} diff --git a/cmd/util/ledger/migrations/account_key_deduplication_migration.go b/cmd/util/ledger/migrations/account_key_deduplication_migration.go new file mode 100644 index 00000000000..0ab3edca023 --- /dev/null +++ b/cmd/util/ledger/migrations/account_key_deduplication_migration.go @@ -0,0 +1,455 @@ +package migrations + +import ( + "context" + "encoding/binary" + "encoding/hex" + "fmt" + "path" + "sync" + "time" + + "github.com/fxamacker/circlehash" + "github.com/rs/zerolog" + + "github.com/onflow/cadence/common" + + "github.com/onflow/flow-go/cmd/util/ledger/reporters" + "github.com/onflow/flow-go/cmd/util/ledger/util/registers" + "github.com/onflow/flow-go/fvm/environment" + "github.com/onflow/flow-go/model/flow" +) + +// NOTE: The term "payload" and "register" are used interchangeably here. + +// Public key deduplication migration deduplicates public keys and migrates related payloads. +// Migration includes: +// - Optionally appending account public key metadata in "a.s" (account status) payload. +// - Weight and revoked status of each account public key, encoded using RLE to save space. +// The weight cannot be modified and revoke status is infrequently modified. +// - Key index mapping to stored key index mapping (only for accounts with duplication flag) +// encoded in RLE to save space. +// - Last N digests to detect duplicate keys being added at runtime (after migration and spork). +// - Renaming the payload key from "public_key_0" to "apk_0" without changing the payload value. +// - Migrating public keys from individual payloads to batch deduplicated public key payloads, +// starting from the second unique public key. +// - Migrating non-zero sequence number of account public key to its own payload. +// NOTE: We store and update the sequence number of each account public key in a separate payload +// to avoid blocking some use cases of concurrent execution. +// +// Using a data format (account public key metadata) that can detect duplicates and store deduplication data +// requires storing some related information (overhead) but in most cases the overhead is more than offset +// by deduplication. +// To avoid or reduce overhead, +// - migration only adds key metadata section to "a.s" payload for accounts with at least two keys. +// - migration only stores digests of the last N unique public keys (N=2 is good, using more wasn't always better). +// - migration only stores account public keys to stored public keys mappings if key deduplication occurred. +// +// More specifically: +// - For accounts with 0 public keys, migration skips them +// - For accounts with 1 public key, migration only renames the "public_key_0" payload to "apk_0" (no other changes) +// - For accounts with at least two keys, migration: +// * renames the "public_key_0" payload to "apk_0" +// * stores unique keys in batch public key payload, starting from public key 1 +// * stores non-zero sequence numbers in sequence number payloads +// * adds account key weights and revoked statuses to the key metadata section in key metadata section +// * adds digests of only the last N unique public keys in key metadata section (N=2 is the default) +// * adds account public key to unique key mappings if any key is deduplicated + +const ( + legacyAccountPublicKeyRegisterKeyPrefix = "public_key_" + legacyAccountPublicKeyRegisterKeyPattern = "public_key_%d" + legacyAccountPublicKey0RegisterKey = "public_key_0" +) + +const ( + maxPublicKeyCountInBatch = 20 // 20 public key payload is ~1420 bytes + maxStoredDigests = 2 // Account status payload stores up to 2 digests from last 2 stored keys. +) + +const ( + dummyDigest = uint64(0) +) + +// AccountPublicKeyDeduplicationMigration deduplicates account public keys, +// and migrates account status and account public key related payloads. +type AccountPublicKeyDeduplicationMigration struct { + log zerolog.Logger + chainID flow.ChainID + outputDir string + reporter reporters.ReportWriter + validationReporter reporters.ReportWriter + migrationResult migrationResult + accountMigrationResults []accountMigrationResult + resultLock sync.Mutex + validate bool +} + +var _ AccountBasedMigration = (*AccountPublicKeyDeduplicationMigration)(nil) + +func NewAccountPublicKeyDeduplicationMigration( + chainID flow.ChainID, + outputDir string, + validate bool, + rwf reporters.ReportWriterFactory, +) *AccountPublicKeyDeduplicationMigration { + + m := &AccountPublicKeyDeduplicationMigration{ + chainID: chainID, + reporter: rwf.ReportWriter("account-public-key-deduplication-migration_summary"), + outputDir: outputDir, + validate: validate, + } + + if validate { + m.validationReporter = rwf.ReportWriter("account-public-key-deduplication-validation") + } + + return m +} + +func (m *AccountPublicKeyDeduplicationMigration) InitMigration( + log zerolog.Logger, + registersByAccount *registers.ByAccount, + _ int, +) error { + m.log = log.With().Str("component", "DeduplicateAccountPublicKey").Logger() + m.accountMigrationResults = make([]accountMigrationResult, 0, registersByAccount.AccountCount()) + return nil +} + +func (m *AccountPublicKeyDeduplicationMigration) MigrateAccount( + _ context.Context, + address common.Address, + accountRegisters *registers.AccountRegisters, +) error { + beforeCount := accountRegisters.Count() + beforeSize := accountRegisters.PayloadSize() + + deduplicated, err := migrateAndDeduplicateAccountPublicKeys(m.log, accountRegisters) + if err != nil { + return fmt.Errorf("failed to migrate and deduplicate account public keys for account %x: %w", accountRegisters.Owner(), err) + } + + if m.validate { + err := ValidateAccountPublicKeyV4(address, accountRegisters) + if err != nil { + m.validationReporter.Write(validationError{ + Address: address.Hex(), + Msg: err.Error(), + }) + } + } + + afterCount := accountRegisters.Count() + afterSize := accountRegisters.PayloadSize() + + migrationResult := accountMigrationResult{ + address: hex.EncodeToString([]byte(accountRegisters.Owner())), + beforeCount: beforeCount, + beforeSize: beforeSize, + afterCount: afterCount, + afterSize: afterSize, + deduplicated: deduplicated, + } + + m.resultLock.Lock() + defer m.resultLock.Unlock() + + m.accountMigrationResults = append(m.accountMigrationResults, migrationResult) + + if deduplicated { + m.migrationResult.TotalDeduplicatedAccountCount++ + } else { + m.migrationResult.TotalUndeduplicatedAccountCount++ + } + + m.migrationResult.TotalSizeDelta += afterSize - beforeSize + m.migrationResult.TotalCountDelta += afterCount - beforeCount + + return nil +} + +func (m *AccountPublicKeyDeduplicationMigration) Close() error { + // Write migration summary + m.reporter.Write(m.migrationResult) + defer m.reporter.Close() + + // Write account migration results + fileName := path.Join(m.outputDir, fmt.Sprintf("%s_%d.csv", "account_public_key_deduplication_account_migration_results", int32(time.Now().Unix()))) + return writeAccountMigrationResults(fileName, m.accountMigrationResults) +} + +func migrateAndDeduplicateAccountPublicKeys( + log zerolog.Logger, + accountRegisters *registers.AccountRegisters, +) (deduplicated bool, _ error) { + + owner := accountRegisters.Owner() + + encodedAccountStatusV4, err := migrateAccountStatusToV4(log, accountRegisters, owner) + if err != nil { + return false, fmt.Errorf("failed to migrate account status from v3 to v4 for %x: %w", owner, err) + } + + accountStatusV4, err := environment.AccountStatusFromBytes(encodedAccountStatusV4) + if err != nil { + return false, fmt.Errorf("failed to create AccountStatus from migrated payload for %x: %w", owner, err) + } + + accountPublicKeyCount := accountStatusV4.AccountPublicKeyCount() + + if accountPublicKeyCount == 0 { + return false, nil + } + + if accountPublicKeyCount == 1 { + _, err := migrateAccountPublicKey0(accountRegisters, owner) + if err != nil { + return false, fmt.Errorf("failed to migrate account public key 0 for %x: %w", owner, err) + } + return false, nil + } + + encodedAccountPublicKey0, err := migrateAccountPublicKey0(accountRegisters, owner) + if err != nil { + return false, fmt.Errorf("failed to migrate account public key 0 for %x: %w", owner, err) + } + + return migrateAndDeduplicateAccountPublicKeysIfNeeded( + log, + accountRegisters, + owner, + accountPublicKeyCount, + encodedAccountPublicKey0, + ) +} + +func migrateAndDeduplicateAccountPublicKeysIfNeeded( + log zerolog.Logger, + accountRegisters *registers.AccountRegisters, + owner string, + accountPublicKeyCount uint32, + encodedAccountPublicKey0 []byte, +) ( + deduplicated bool, + err error, +) { + // TODO: maybe special case migration for accounts with 2 account public keys (16% of accounts) + + // accountPublicKeyWeightAndRevokedStatuses is ordered by account public key index, + // starting from account public key at index 1. + accountPublicKeyWeightAndRevokedStatuses := make([]accountPublicKeyWeightAndRevokedStatus, 0, accountPublicKeyCount-1) + + // Account public key deduplicator deduplicates keys. + deduplicator := newAccountPublicKeyDeduplicator(owner, accountPublicKeyCount) + + // Add account public key 0 to deduplicator + err = deduplicator.addEncodedAccountPublicKey(0, encodedAccountPublicKey0) + if err != nil { + return false, fmt.Errorf("failed to add account public key at index %d for owner %x to deduplicator: %w", 0, owner, err) + } + + for keyIndex := uint32(1); keyIndex < accountPublicKeyCount; keyIndex++ { + + decodedAccountPublicKey, err := getAccountPublicKeyOrError(accountRegisters, owner, keyIndex) + if err != nil { + return false, fmt.Errorf("failed to decode public key at index %d for owner %x: %w", keyIndex, owner, err) + } + + err = deduplicator.addAccountPublicKey(keyIndex, decodedAccountPublicKey) + if err != nil { + return false, fmt.Errorf("failed to add account public key at index %d for owner %x to deduplicator: %w", keyIndex, owner, err) + } + + // Save weight and revoked status for account public key + accountPublicKeyWeightAndRevokedStatuses = append( + accountPublicKeyWeightAndRevokedStatuses, + accountPublicKeyWeightAndRevokedStatus{ + weight: uint16(decodedAccountPublicKey.Weight), + revoked: decodedAccountPublicKey.Revoked, + }, + ) + + // Migrate sequence number for account public key + // NOTE: sequence number is stored in its own payload, decoupled from public key. + err = migrateSeqNumberIfNeeded(accountRegisters, owner, keyIndex, decodedAccountPublicKey.SeqNumber) + if err != nil { + return false, fmt.Errorf("failed to migrate sequence number at index %d for owner %x: %w", keyIndex, owner, err) + } + + // Remove account public key from storage + // NOTE: account public key starting from key index 1 stores in batch. + err = removeAccountPublicKey(accountRegisters, owner, keyIndex) + if err != nil { + return false, fmt.Errorf("failed to remove public key at index %d for owner %x: %w", keyIndex, owner, err) + } + } + + shouldDeduplicate := deduplicator.hasDuplicateKey() + + encodedPublicKeys := deduplicator.uniqueKeys() + + // Migrate account status with account public key metadata + err = migrateAccountStatusWithPublicKeyMetadata( + log, + accountRegisters, + owner, + accountPublicKeyWeightAndRevokedStatuses, + encodedPublicKeys, + deduplicator.keyIndexMapping(), + shouldDeduplicate, + ) + if err != nil { + return false, fmt.Errorf("failed to migrate account status with key metadata for account %x: %w", owner, err) + } + + // Migrate account public key at index >= 1 + err = migrateAccountPublicKeysIfNeeded(accountRegisters, owner, encodedPublicKeys) + if err != nil { + return false, fmt.Errorf("failed to migrate account public keys in batches for account %x: %w", owner, err) + } + + return shouldDeduplicate, nil +} + +// accountPublicKeyDeduplicator deduplicates all account public keys (including account public key 0). +type accountPublicKeyDeduplicator struct { + owner string + + accountPublicKeyCount uint32 + + // uniqEncodedPublicKeysMap is used to deduplicate encoded public key. + uniqEncodedPublicKeysMap map[string]uint32 // key: encoded public key, value: index of uniqEncodedPublicKeys + + // uniqEncodedPublicKeys contains unique encoded public key. + // NOTE: First element is always encoded account public key 0. + uniqEncodedPublicKeys [][]byte + + // accountPublicKeyIndexMappings contains mapping of account public key index to unique public key index. + // NOTE: First mapping is always 0 (account public key index 0) to 0 (unique key index 0). + accountPublicKeyIndexMappings []uint32 // index: account public key index, element: uniqEncodedPublicKeys index +} + +func newAccountPublicKeyDeduplicator(owner string, accountPublicKeyCount uint32) *accountPublicKeyDeduplicator { + return &accountPublicKeyDeduplicator{ + owner: owner, + accountPublicKeyCount: accountPublicKeyCount, + uniqEncodedPublicKeysMap: make(map[string]uint32), + uniqEncodedPublicKeys: make([][]byte, 0, accountPublicKeyCount), + accountPublicKeyIndexMappings: make([]uint32, accountPublicKeyCount), + } +} + +func (d *accountPublicKeyDeduplicator) addEncodedAccountPublicKey( + keyIndex uint32, + encodedAccountPublicKey []byte, +) error { + accountPublicKey0, err := flow.DecodeAccountPublicKey(encodedAccountPublicKey, keyIndex) + if err != nil { + return fmt.Errorf("failed to decode account public key %d for owner %x: %w", keyIndex, d.owner, err) + } + return d.addAccountPublicKey(keyIndex, accountPublicKey0) +} + +func (d *accountPublicKeyDeduplicator) addAccountPublicKey( + keyIndex uint32, + accountPublicKey flow.AccountPublicKey, +) error { + encodedPublicKey, err := encodeStoredPublicKeyFromAccountPublicKey(accountPublicKey) + if err != nil { + return fmt.Errorf("failed to encode stored public key at index %d for owner %x: %w", keyIndex, d.owner, err) + } + + if uniqKeyIndex, exists := d.uniqEncodedPublicKeysMap[string(encodedPublicKey)]; !exists { + // New key is unique + + // Append key to unique key list + d.uniqEncodedPublicKeys = append(d.uniqEncodedPublicKeys, encodedPublicKey) + + // Unique key index is the last key index in uniqEncodedPublicKeys. + uniqKeyIndex = uint32(len(d.uniqEncodedPublicKeys) - 1) + + // Append unique key index to account public key mappings + d.accountPublicKeyIndexMappings[keyIndex] = uniqKeyIndex + + d.uniqEncodedPublicKeysMap[string(encodedPublicKey)] = uniqKeyIndex + } else { + // New key is duplicate + d.accountPublicKeyIndexMappings[keyIndex] = uniqKeyIndex + } + + return nil +} + +func (d *accountPublicKeyDeduplicator) hasDuplicateKey() bool { + return d.accountPublicKeyCount > uint32(len(d.uniqEncodedPublicKeys)) +} + +func (d *accountPublicKeyDeduplicator) keyIndexMapping() []uint32 { + return d.accountPublicKeyIndexMappings +} + +func (d *accountPublicKeyDeduplicator) uniqueKeys() [][]byte { + return d.uniqEncodedPublicKeys +} + +func generateLastNPublicKeyDigests( + log zerolog.Logger, + owner string, + encodedPublicKeys [][]byte, + n int, + hashFunc func(b []byte, seed uint64) uint64, +) (int, []uint64) { + digestCount := min(len(encodedPublicKeys), n) + startIndex := len(encodedPublicKeys) - digestCount + digests := generatePublicKeyDigests(log, owner, encodedPublicKeys[startIndex:], hashFunc) + return startIndex, digests +} + +// generatePublicKeyDigests returns digests of encodedPublicKeys. +func generatePublicKeyDigests( + log zerolog.Logger, + owner string, + encodedPublicKeys [][]byte, + hashFunc func(b []byte, seed uint64) uint64, +) (digests []uint64) { + if hashFunc == nil { + hashFunc = circlehash.Hash64 + } + + seed := binary.BigEndian.Uint64([]byte(owner)) + + digests = make([]uint64, len(encodedPublicKeys)) + + collisions := make(map[uint64][]int) + hasCollision := false + + for i, encodedPublicKey := range encodedPublicKeys { + digest := hashFunc(encodedPublicKey, seed) + + if _, exists := collisions[digest]; exists { + hasCollision = true + digests[i] = dummyDigest + } else { + digests[i] = digest + } + + collisions[digest] = append(collisions[digest], i) + } + + if hasCollision { + for digest, encodedPublicKeyIndexes := range collisions { + if len(encodedPublicKeyIndexes) > 1 { + log.Warn().Msgf("found digest collisions for account %x: digest %d, encoded public key indexes %v", owner, digest, encodedPublicKeyIndexes) + } + } + } + + return digests +} + +type validationError struct { + Address string + Msg string +} diff --git a/cmd/util/ledger/migrations/account_key_deduplication_migration_results.go b/cmd/util/ledger/migrations/account_key_deduplication_migration_results.go new file mode 100644 index 00000000000..f3ec9656b85 --- /dev/null +++ b/cmd/util/ledger/migrations/account_key_deduplication_migration_results.go @@ -0,0 +1,78 @@ +package migrations + +import ( + "cmp" + "encoding/csv" + "fmt" + "os" + "slices" + "strconv" +) + +type accountMigrationResult struct { + address string + beforeCount int + beforeSize int + afterCount int + afterSize int + deduplicated bool +} + +type migrationResult struct { + TotalDeduplicatedAccountCount int `json:"deduplicated_account"` + TotalUndeduplicatedAccountCount int `json:"undeduplicated_account"` + TotalCountDelta int `json:"register_count_delta"` + TotalSizeDelta int `json:"register_size_delta"` +} + +func writeAccountMigrationResults( + fileName string, + migrationResults []accountMigrationResult, +) error { + slices.SortFunc(migrationResults, func(a, b accountMigrationResult) int { + r := cmp.Compare(a.beforeCount, b.beforeCount) + if r != 0 { + return r + } + return cmp.Compare(a.address, b.address) + }) + + file, err := os.Create(fileName) + if err != nil { + return fmt.Errorf("failed to create account migration result file %s: %w", fileName, err) + } + defer file.Close() + + w := csv.NewWriter(file) + defer w.Flush() + + header := []string{ + "address", + "before_count", + "before_size", + "after_count", + "after_size", + "deduplicated", + } + + // Write header + err = w.Write(header) + if err != nil { + return fmt.Errorf("failed to write header to %s: %w", fileName, err) + } + + for _, migrationStats := range migrationResults { + data := []string{ + migrationStats.address, + strconv.Itoa(migrationStats.beforeCount), + strconv.Itoa(migrationStats.beforeSize), + strconv.Itoa(migrationStats.afterCount), + strconv.Itoa(migrationStats.afterSize), + strconv.FormatBool(migrationStats.deduplicated), + } + if err := w.Write(data); err != nil { + return fmt.Errorf("failed to write migration result for %s: %w", migrationStats.address, err) + } + } + return nil +} diff --git a/cmd/util/ledger/migrations/account_key_deduplication_migration_test.go b/cmd/util/ledger/migrations/account_key_deduplication_migration_test.go new file mode 100644 index 00000000000..86c40fdc66c --- /dev/null +++ b/cmd/util/ledger/migrations/account_key_deduplication_migration_test.go @@ -0,0 +1,648 @@ +package migrations + +import ( + "crypto/rand" + "encoding/binary" + "fmt" + "testing" + + "github.com/fxamacker/circlehash" + "github.com/onflow/cadence/common" + "github.com/rs/zerolog" + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/cmd/util/ledger/util/registers" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/utils/unittest" +) + +func TestAccountPublicKeyDeduplicator(t *testing.T) { + pk1 := newAccountPublicKey(t, 1000) + pkb1, err := encodeStoredPublicKeyFromAccountPublicKey(pk1) + require.NoError(t, err) + + pk2 := newAccountPublicKey(t, 1000) + pkb2, err := encodeStoredPublicKeyFromAccountPublicKey(pk2) + require.NoError(t, err) + + pk3 := newAccountPublicKey(t, 1000) + pkb3, err := encodeStoredPublicKeyFromAccountPublicKey(pk3) + require.NoError(t, err) + + pk4 := newAccountPublicKey(t, 1000) + pkb4, err := encodeStoredPublicKeyFromAccountPublicKey(pk4) + require.NoError(t, err) + + pk5 := newAccountPublicKey(t, 1000) + pkb5, err := encodeStoredPublicKeyFromAccountPublicKey(pk5) + require.NoError(t, err) + + testcases := []struct { + name string + keys []flow.AccountPublicKey + encodedUniqueKeys [][]byte + mappings []uint32 + hasDeduplicate bool + }{ + { + name: "empty", + }, + { + name: "1 key", + keys: []flow.AccountPublicKey{pk1}, + encodedUniqueKeys: [][]byte{pkb1}, + mappings: []uint32{0}, + }, + { + name: "2 unique key", + keys: []flow.AccountPublicKey{pk1, pk2}, + encodedUniqueKeys: [][]byte{pkb1, pkb2}, + mappings: []uint32{0, 1}, + }, + { + name: "2 duplicate key", + keys: []flow.AccountPublicKey{pk1, pk1}, + encodedUniqueKeys: [][]byte{pkb1}, + mappings: []uint32{0, 0}, + hasDeduplicate: true, + }, + { + name: "5 keys with 1 unique keys", + keys: []flow.AccountPublicKey{pk1, pk1, pk1, pk1, pk1}, + encodedUniqueKeys: [][]byte{pkb1}, + mappings: []uint32{0, 0, 0, 0, 0}, + hasDeduplicate: true, + }, + { + name: "5 keys with 4 unique keys", + keys: []flow.AccountPublicKey{pk1, pk2, pk1, pk3, pk4}, + encodedUniqueKeys: [][]byte{pkb1, pkb2, pkb3, pkb4}, + mappings: []uint32{0, 1, 0, 2, 3}, + hasDeduplicate: true, + }, + { + name: "5 keys with 5 unique keys", + keys: []flow.AccountPublicKey{pk1, pk2, pk3, pk4, pk5}, + encodedUniqueKeys: [][]byte{pkb1, pkb2, pkb3, pkb4, pkb5}, + mappings: []uint32{0, 1, 2, 3, 4}, + }, + } + + for _, tc := range testcases { + t.Run(tc.name, func(t *testing.T) { + deduplicator := newAccountPublicKeyDeduplicator("a", uint32(len(tc.keys))) + + for i, pk := range tc.keys { + err = deduplicator.addAccountPublicKey(uint32(i), pk) + require.NoError(t, err) + } + + require.Equal(t, tc.hasDeduplicate, deduplicator.hasDuplicateKey()) + + keys := deduplicator.uniqueKeys() + require.ElementsMatch(t, tc.encodedUniqueKeys, keys) + + mapping := deduplicator.keyIndexMapping() + require.ElementsMatch(t, tc.mappings, mapping) + }) + } +} + +func TestDigestForLastNEncodedPublicKeys(t *testing.T) { + var owner [8]byte + _, _ = rand.Read(owner[:]) + seed := binary.BigEndian.Uint64(owner[:]) + + type pkData struct { + pk flow.AccountPublicKey + pkb []byte + digest uint64 + } + + pks := make([]pkData, 3) + + for { + digestsMap := make(map[uint64]struct{}) + + for i := range len(pks) { + pk := newAccountPublicKey(t, 1000) + + pkb, err := encodeStoredPublicKeyFromAccountPublicKey(pk) + require.NoError(t, err) + + digest := circlehash.Hash64(pkb, seed) + + pks[i] = pkData{pk, pkb, digest} + digestsMap[digest] = struct{}{} + } + + if len(digestsMap) == len(pks) { + break + } + } + + testcases := []struct { + name string + encodedPublicKeys [][]byte + hashFunc func(b []byte, seed uint64) uint64 + n int + expectedStartIndex int + expectedDigests []uint64 + }{ + { + name: "empty", + n: 2, + }, + { + name: "1 key, min 2 keys", + n: 2, + encodedPublicKeys: [][]byte{pks[0].pkb}, + expectedStartIndex: 0, + expectedDigests: []uint64{pks[0].digest}, + }, + { + name: "2 key, min 2 keys", + n: 2, + encodedPublicKeys: [][]byte{pks[0].pkb, pks[1].pkb}, + expectedStartIndex: 0, + expectedDigests: []uint64{pks[0].digest, pks[1].digest}, + }, + { + name: "3 key, min 2 keys", + n: 2, + encodedPublicKeys: [][]byte{pks[0].pkb, pks[1].pkb, pks[2].pkb}, + expectedStartIndex: 1, + expectedDigests: []uint64{pks[1].digest, pks[2].digest}, + }, + { + name: "3 key, min 2 keys, collision", + n: 2, + hashFunc: func([]byte, uint64) uint64 { + return 0 + }, + encodedPublicKeys: [][]byte{pks[0].pkb, pks[1].pkb, pks[2].pkb}, + expectedStartIndex: 1, + expectedDigests: []uint64{0, dummyDigest}, + }, + } + + for _, tc := range testcases { + t.Run(tc.name, func(t *testing.T) { + startIndex, digests := generateLastNPublicKeyDigests( + zerolog.Nop(), + string(owner[:]), + tc.encodedPublicKeys, + tc.n, + tc.hashFunc, + ) + require.Equal(t, tc.expectedStartIndex, startIndex) + require.ElementsMatch(t, tc.expectedDigests, digests) + }) + } +} + +func TestMigration(t *testing.T) { + const accountStatusMinSize = 29 + + t.Run("no account public key", func(t *testing.T) { + var owner [8]byte + _, _ = rand.Read(owner[:]) + + encodedAccountStatusV3 := newEncodedAccountStatusV3(0) + + accountRegisters := registers.NewAccountRegisters(string(owner[:])) + err := accountRegisters.Set(string(owner[:]), flow.AccountStatusKey, encodedAccountStatusV3) + require.NoError(t, err) + + deduplicated, err := migrateAndDeduplicateAccountPublicKeys( + zerolog.Nop(), + accountRegisters, + ) + require.NoError(t, err) + require.False(t, deduplicated) + + // Register after migration: + // - "a.s" + require.Equal(t, 1, accountRegisters.Count()) + + // Test "a.s" register + encodedAccountStatusV4, err := accountRegisters.Get(string(owner[:]), flow.AccountStatusKey) + require.NoError(t, err) + require.Equal(t, len(encodedAccountStatusV3), len(encodedAccountStatusV4)) + require.Equal(t, byte(0x40), encodedAccountStatusV4[0]) + require.Equal(t, encodedAccountStatusV3[1:], encodedAccountStatusV4[1:]) + + err = ValidateAccountPublicKeyV4(common.Address(owner), accountRegisters) + require.NoError(t, err) + }) + + t.Run("1 account public key without sequence number", func(t *testing.T) { + var owner [8]byte + _, _ = rand.Read(owner[:]) + + pk1 := newAccountPublicKey(t, 1000) + encodedPk1, err := flow.EncodeAccountPublicKey(pk1) + require.NoError(t, err) + + encodedAccountStatusV3 := newEncodedAccountStatusV3(1) + + accountRegisters := registers.NewAccountRegisters(string(owner[:])) + err = accountRegisters.Set(string(owner[:]), flow.AccountStatusKey, encodedAccountStatusV3) + require.NoError(t, err) + err = accountRegisters.Set(string(owner[:]), legacyAccountPublicKey0RegisterKey, encodedPk1) + require.NoError(t, err) + + deduplicated, err := migrateAndDeduplicateAccountPublicKeys( + zerolog.Nop(), + accountRegisters, + ) + require.NoError(t, err) + require.False(t, deduplicated) + + // Registers after migration: + // - "a.s" + // - "apk_0" + require.Equal(t, 2, accountRegisters.Count()) + + // Test "a.s" register + encodedAccountStatusV4, err := accountRegisters.Get(string(owner[:]), flow.AccountStatusKey) + require.NoError(t, err) + require.Equal(t, len(encodedAccountStatusV3), len(encodedAccountStatusV4)) + require.Equal(t, byte(0x40), encodedAccountStatusV4[0]) + require.Equal(t, encodedAccountStatusV3[1:], encodedAccountStatusV4[1:]) + + // Test "apk_0" register + encodedAccountPublicKey0, err := accountRegisters.Get(string(owner[:]), flow.AccountPublicKey0RegisterKey) + require.NoError(t, err) + require.Equal(t, encodedPk1, encodedAccountPublicKey0) + + err = ValidateAccountPublicKeyV4(common.Address(owner), accountRegisters) + require.NoError(t, err) + }) + + t.Run("1 account public key with sequence number", func(t *testing.T) { + var owner [8]byte + _, _ = rand.Read(owner[:]) + + pk1 := newAccountPublicKey(t, 1000) + pk1.SeqNumber = 1 + encodedPk1, err := flow.EncodeAccountPublicKey(pk1) + require.NoError(t, err) + + encodedAccountStatusV3 := newEncodedAccountStatusV3(1) + + accountRegisters := registers.NewAccountRegisters(string(owner[:])) + err = accountRegisters.Set(string(owner[:]), flow.AccountStatusKey, encodedAccountStatusV3) + require.NoError(t, err) + err = accountRegisters.Set(string(owner[:]), legacyAccountPublicKey0RegisterKey, encodedPk1) + require.NoError(t, err) + + deduplicated, err := migrateAndDeduplicateAccountPublicKeys( + zerolog.Nop(), + accountRegisters, + ) + require.NoError(t, err) + require.False(t, deduplicated) + + // Registers after migration: + // - "a.s" + // - "apk_0" + require.Equal(t, 2, accountRegisters.Count()) + + // Test "a.s" register + encodedAccountStatusV4, err := accountRegisters.Get(string(owner[:]), flow.AccountStatusKey) + require.NoError(t, err) + require.Equal(t, len(encodedAccountStatusV3), len(encodedAccountStatusV4)) + require.Equal(t, byte(0x40), encodedAccountStatusV4[0]) + require.Equal(t, encodedAccountStatusV3[1:], encodedAccountStatusV4[1:]) + + // Test "apk_0" register + encodedAccountPublicKey0, err := accountRegisters.Get(string(owner[:]), flow.AccountPublicKey0RegisterKey) + require.NoError(t, err) + require.Equal(t, encodedPk1, encodedAccountPublicKey0) + + err = ValidateAccountPublicKeyV4(common.Address(owner), accountRegisters) + require.NoError(t, err) + }) + + t.Run("2 unique account public key without sequence number", func(t *testing.T) { + var owner [8]byte + _, _ = rand.Read(owner[:]) + seed := binary.BigEndian.Uint64(owner[:]) + + pk1 := newAccountPublicKey(t, 1000) + encodedPk1, err := flow.EncodeAccountPublicKey(pk1) + require.NoError(t, err) + + encodedSpk1, err := encodeStoredPublicKeyFromAccountPublicKey(pk1) + require.NoError(t, err) + + digest1 := circlehash.Hash64(encodedSpk1, seed) + + pk2 := newAccountPublicKey(t, 1000) + encodedPk2, err := flow.EncodeAccountPublicKey(pk2) + require.NoError(t, err) + + encodedSpk2, err := encodeStoredPublicKeyFromAccountPublicKey(pk2) + require.NoError(t, err) + + digest2 := circlehash.Hash64(encodedSpk2, seed) + + encodedAccountStatusV3 := newEncodedAccountStatusV3(2) + + accountRegisters := registers.NewAccountRegisters(string(owner[:])) + err = accountRegisters.Set(string(owner[:]), flow.AccountStatusKey, encodedAccountStatusV3) + require.NoError(t, err) + err = accountRegisters.Set(string(owner[:]), legacyAccountPublicKey0RegisterKey, encodedPk1) + require.NoError(t, err) + err = accountRegisters.Set(string(owner[:]), fmt.Sprintf(legacyAccountPublicKeyRegisterKeyPattern, 1), encodedPk2) + require.NoError(t, err) + + deduplicated, err := migrateAndDeduplicateAccountPublicKeys( + zerolog.Nop(), + accountRegisters, + ) + require.NoError(t, err) + require.False(t, deduplicated) + + // Registers after migration: + // - "a.s" + // - "apk_0" + // - "pk_b0" + require.Equal(t, 3, accountRegisters.Count()) + + // Test "a.s" register + encodedAccountStatusV4, err := accountRegisters.Get(string(owner[:]), flow.AccountStatusKey) + require.NoError(t, err) + require.True(t, len(encodedAccountStatusV3) < len(encodedAccountStatusV4)) + require.Equal(t, byte(0x40), encodedAccountStatusV4[0]) + require.Equal(t, encodedAccountStatusV3[1:], encodedAccountStatusV4[1:len(encodedAccountStatusV3)]) + + _, weightAndRevokedStatus, startKeyIndexForDigests, digests, startKeyIndexForMapping, accountPublicKeyMappings, err := decodeAccountStatusV4(encodedAccountStatusV4) + require.NoError(t, err) + require.ElementsMatch(t, []accountPublicKeyWeightAndRevokedStatus{{1000, false}}, weightAndRevokedStatus) + require.Equal(t, uint32(0), startKeyIndexForDigests) + require.ElementsMatch(t, []uint64{digest1, digest2}, digests) + require.Equal(t, uint32(0), startKeyIndexForMapping) + require.Nil(t, accountPublicKeyMappings) + + // Test "apk_0" register + encodedAccountPublicKey0, err := accountRegisters.Get(string(owner[:]), flow.AccountPublicKey0RegisterKey) + require.NoError(t, err) + require.Equal(t, encodedPk1, encodedAccountPublicKey0) + + // Test "pk_b0" register + encodedBatchPublicKey0, err := accountRegisters.Get(string(owner[:]), fmt.Sprintf(flow.BatchPublicKeyRegisterKeyPattern, 0)) + require.NoError(t, err) + + encodedPks, err := decodeBatchPublicKey(encodedBatchPublicKey0) + require.NoError(t, err) + require.ElementsMatch(t, [][]byte{{}, encodedSpk2}, encodedPks) + + err = ValidateAccountPublicKeyV4(common.Address(owner), accountRegisters) + require.NoError(t, err) + }) + + t.Run("2 unique account public key with sequence number", func(t *testing.T) { + var owner [8]byte + _, _ = rand.Read(owner[:]) + seed := binary.BigEndian.Uint64(owner[:]) + + pk1 := newAccountPublicKey(t, 1000) + pk1.SeqNumber = 1 + encodedPk1, err := flow.EncodeAccountPublicKey(pk1) + require.NoError(t, err) + + encodedSpk1, err := encodeStoredPublicKeyFromAccountPublicKey(pk1) + require.NoError(t, err) + + digest1 := circlehash.Hash64(encodedSpk1, seed) + + pk2 := newAccountPublicKey(t, 1000) + pk2.SeqNumber = 2 + encodedPk2, err := flow.EncodeAccountPublicKey(pk2) + require.NoError(t, err) + + encodedSpk2, err := encodeStoredPublicKeyFromAccountPublicKey(pk2) + require.NoError(t, err) + + digest2 := circlehash.Hash64(encodedSpk2, seed) + + encodedAccountStatusV3 := newEncodedAccountStatusV3(2) + + accountRegisters := registers.NewAccountRegisters(string(owner[:])) + err = accountRegisters.Set(string(owner[:]), flow.AccountStatusKey, encodedAccountStatusV3) + require.NoError(t, err) + err = accountRegisters.Set(string(owner[:]), legacyAccountPublicKey0RegisterKey, encodedPk1) + require.NoError(t, err) + err = accountRegisters.Set(string(owner[:]), fmt.Sprintf(legacyAccountPublicKeyRegisterKeyPattern, 1), encodedPk2) + require.NoError(t, err) + + deduplicated, err := migrateAndDeduplicateAccountPublicKeys( + zerolog.Nop(), + accountRegisters, + ) + require.NoError(t, err) + require.False(t, deduplicated) + + // Registers after migration: + // - "a.s" + // - "apk_0" + // - "pk_b0" + // - "sn_1" + require.Equal(t, 4, accountRegisters.Count()) + + // Test "a.s" register + encodedAccountStatusV4, err := accountRegisters.Get(string(owner[:]), flow.AccountStatusKey) + require.NoError(t, err) + require.True(t, len(encodedAccountStatusV3) < len(encodedAccountStatusV4)) + require.Equal(t, byte(0x40), encodedAccountStatusV4[0]) + require.Equal(t, encodedAccountStatusV3[1:], encodedAccountStatusV4[1:len(encodedAccountStatusV3)]) + + _, weightAndRevokedStatus, startKeyIndexForDigests, digests, startKeyIndexForMapping, accountPublicKeyMappings, err := decodeAccountStatusV4(encodedAccountStatusV4) + require.NoError(t, err) + require.ElementsMatch(t, []accountPublicKeyWeightAndRevokedStatus{{1000, false}}, weightAndRevokedStatus) + require.Equal(t, uint32(0), startKeyIndexForDigests) + require.ElementsMatch(t, []uint64{digest1, digest2}, digests) + require.Equal(t, uint32(0), startKeyIndexForMapping) + require.Nil(t, accountPublicKeyMappings) + + // Test "apk_0" register + encodedAccountPublicKey0, err := accountRegisters.Get(string(owner[:]), flow.AccountPublicKey0RegisterKey) + require.NoError(t, err) + require.Equal(t, encodedPk1, encodedAccountPublicKey0) + + // Test "pk_b0" register + encodedBatchPublicKey0, err := accountRegisters.Get(string(owner[:]), fmt.Sprintf(flow.BatchPublicKeyRegisterKeyPattern, 0)) + require.NoError(t, err) + + encodedPks, err := decodeBatchPublicKey(encodedBatchPublicKey0) + require.NoError(t, err) + require.ElementsMatch(t, [][]byte{{}, encodedSpk2}, encodedPks) + + // Test "sn_0" register + encodedSequenceNumber, err := accountRegisters.Get(string(owner[:]), fmt.Sprintf(flow.SequenceNumberRegisterKeyPattern, 1)) + require.NoError(t, err) + + seqNum, err := flow.DecodeSequenceNumber(encodedSequenceNumber) + require.NoError(t, err) + require.Equal(t, uint64(2), seqNum) + + err = ValidateAccountPublicKeyV4(common.Address(owner), accountRegisters) + require.NoError(t, err) + }) + + t.Run("2 account public key (1 unique key) without sequence number", func(t *testing.T) { + var owner [8]byte + _, _ = rand.Read(owner[:]) + seed := binary.BigEndian.Uint64(owner[:]) + + pk1 := newAccountPublicKey(t, 1000) + encodedPk1, err := flow.EncodeAccountPublicKey(pk1) + require.NoError(t, err) + + encodedSpk1, err := encodeStoredPublicKeyFromAccountPublicKey(pk1) + require.NoError(t, err) + + digest1 := circlehash.Hash64(encodedSpk1, seed) + + encodedAccountStatusV3 := newEncodedAccountStatusV3(2) + + accountRegisters := registers.NewAccountRegisters(string(owner[:])) + err = accountRegisters.Set(string(owner[:]), flow.AccountStatusKey, encodedAccountStatusV3) + require.NoError(t, err) + err = accountRegisters.Set(string(owner[:]), legacyAccountPublicKey0RegisterKey, encodedPk1) + require.NoError(t, err) + err = accountRegisters.Set(string(owner[:]), fmt.Sprintf(legacyAccountPublicKeyRegisterKeyPattern, 1), encodedPk1) + require.NoError(t, err) + + deduplicated, err := migrateAndDeduplicateAccountPublicKeys( + zerolog.Nop(), + accountRegisters, + ) + require.NoError(t, err) + require.True(t, deduplicated) + + // Registers after migration: + // - "a.s" + // - "apk_0" + require.Equal(t, 2, accountRegisters.Count()) + + // Test "a.s" register + encodedAccountStatusV4, err := accountRegisters.Get(string(owner[:]), flow.AccountStatusKey) + require.NoError(t, err) + require.True(t, len(encodedAccountStatusV3) < len(encodedAccountStatusV4)) + require.Equal(t, byte(0x41), encodedAccountStatusV4[0]) + require.Equal(t, encodedAccountStatusV3[1:], encodedAccountStatusV4[1:len(encodedAccountStatusV3)]) + + _, weightAndRevokedStatus, startKeyIndexForDigests, digests, startKeyIndexForMapping, accountPublicKeyMappings, err := decodeAccountStatusV4(encodedAccountStatusV4) + require.NoError(t, err) + require.ElementsMatch(t, []accountPublicKeyWeightAndRevokedStatus{{1000, false}}, weightAndRevokedStatus) + require.Equal(t, uint32(0), startKeyIndexForDigests) + require.ElementsMatch(t, []uint64{digest1}, digests) + require.Equal(t, uint32(1), startKeyIndexForMapping) + require.ElementsMatch(t, []uint32{0}, accountPublicKeyMappings) + + // Test "apk_0" register + encodedAccountPublicKey0, err := accountRegisters.Get(string(owner[:]), flow.AccountPublicKey0RegisterKey) + require.NoError(t, err) + require.Equal(t, encodedPk1, encodedAccountPublicKey0) + + err = ValidateAccountPublicKeyV4(common.Address(owner), accountRegisters) + require.NoError(t, err) + }) + + t.Run("2 account public key (1 unique key) with sequence number", func(t *testing.T) { + var owner [8]byte + _, _ = rand.Read(owner[:]) + seed := binary.BigEndian.Uint64(owner[:]) + + pk1 := newAccountPublicKey(t, 1000) + pk1.SeqNumber = 1 + encodedPk1, err := flow.EncodeAccountPublicKey(pk1) + require.NoError(t, err) + + encodedSpk1, err := encodeStoredPublicKeyFromAccountPublicKey(pk1) + require.NoError(t, err) + + digest1 := circlehash.Hash64(encodedSpk1, seed) + + encodedAccountStatusV3 := newEncodedAccountStatusV3(2) + + accountRegisters := registers.NewAccountRegisters(string(owner[:])) + err = accountRegisters.Set(string(owner[:]), flow.AccountStatusKey, encodedAccountStatusV3) + require.NoError(t, err) + err = accountRegisters.Set(string(owner[:]), legacyAccountPublicKey0RegisterKey, encodedPk1) + require.NoError(t, err) + err = accountRegisters.Set(string(owner[:]), fmt.Sprintf(legacyAccountPublicKeyRegisterKeyPattern, 1), encodedPk1) + require.NoError(t, err) + + deduplicated, err := migrateAndDeduplicateAccountPublicKeys( + zerolog.Nop(), + accountRegisters, + ) + require.NoError(t, err) + require.True(t, deduplicated) + + // Registers after migration: + // - "a.s" + // - "apk_0" + // - "sn_1" + require.Equal(t, 3, accountRegisters.Count()) + + // Test "a.s" register + encodedAccountStatusV4, err := accountRegisters.Get(string(owner[:]), flow.AccountStatusKey) + require.NoError(t, err) + require.True(t, len(encodedAccountStatusV3) < len(encodedAccountStatusV4)) + require.Equal(t, byte(0x41), encodedAccountStatusV4[0]) + require.Equal(t, encodedAccountStatusV3[1:], encodedAccountStatusV4[1:len(encodedAccountStatusV3)]) + + _, weightAndRevokedStatus, startKeyIndexForDigests, digests, startKeyIndexForMapping, accountPublicKeyMappings, err := decodeAccountStatusV4(encodedAccountStatusV4) + require.NoError(t, err) + require.ElementsMatch(t, []accountPublicKeyWeightAndRevokedStatus{{1000, false}}, weightAndRevokedStatus) + require.Equal(t, uint32(0), startKeyIndexForDigests) + require.ElementsMatch(t, []uint64{digest1}, digests) + require.Equal(t, uint32(1), startKeyIndexForMapping) + require.ElementsMatch(t, []uint32{0}, accountPublicKeyMappings) + + // Test "apk_0" register + encodedAccountPublicKey0, err := accountRegisters.Get(string(owner[:]), flow.AccountPublicKey0RegisterKey) + require.NoError(t, err) + require.Equal(t, encodedPk1, encodedAccountPublicKey0) + + // Test "sn_0" register + encodedSequenceNumber, err := accountRegisters.Get(string(owner[:]), fmt.Sprintf(flow.SequenceNumberRegisterKeyPattern, 1)) + require.NoError(t, err) + + seqNum, err := flow.DecodeSequenceNumber(encodedSequenceNumber) + require.NoError(t, err) + require.Equal(t, uint64(1), seqNum) + + err = ValidateAccountPublicKeyV4(common.Address(owner), accountRegisters) + require.NoError(t, err) + }) +} + +func newAccountPublicKey(t *testing.T, weight int) flow.AccountPublicKey { + privateKey, err := unittest.AccountKeyDefaultFixture() + require.NoError(t, err) + + return privateKey.PublicKey(weight) +} + +func newEncodedAccountStatusV3(accountPublicKeyCount uint32) []byte { + b := []byte{ + 0, // initial empty flags + 0, 0, 0, 0, 0, 0, 0, 0, // init value for storage used + 0, 0, 0, 0, 0, 0, 0, 1, // init value for storage index + 0, 0, 0, 0, // init value for public key counts + 0, 0, 0, 0, 0, 0, 0, 0, // init value for address id counter + } + + var encodedAccountPublicKeyCount [4]byte + binary.BigEndian.PutUint32(encodedAccountPublicKeyCount[:], accountPublicKeyCount) + + copy(b[17:], encodedAccountPublicKeyCount[:]) + + return b +} diff --git a/cmd/util/ledger/migrations/account_key_deduplication_migration_utils.go b/cmd/util/ledger/migrations/account_key_deduplication_migration_utils.go new file mode 100644 index 00000000000..bcefd0b75d1 --- /dev/null +++ b/cmd/util/ledger/migrations/account_key_deduplication_migration_utils.go @@ -0,0 +1,223 @@ +package migrations + +import ( + "fmt" + + "github.com/rs/zerolog" + + "github.com/onflow/flow-go/cmd/util/ledger/util/registers" + "github.com/onflow/flow-go/model/flow" +) + +const ( + accountStatusV4WithNoDeduplicationFlag = 0x40 +) + +func getAccountRegisterOrError( + accountRegisters *registers.AccountRegisters, + owner string, + key string, +) ([]byte, error) { + value, err := accountRegisters.Get(owner, key) + if err != nil { + return nil, err + } + if len(value) == 0 { + return nil, fmt.Errorf("owner %x key %s register not found", owner, key) + } + return value, nil +} + +func getAccountPublicKeyOrError( + accountRegisters *registers.AccountRegisters, + owner string, + keyIndex uint32, +) (flow.AccountPublicKey, error) { + publicKeyRegisterKey := fmt.Sprintf(legacyAccountPublicKeyRegisterKeyPattern, keyIndex) + + encodedAccountPublicKey, err := getAccountRegisterOrError(accountRegisters, owner, publicKeyRegisterKey) + if err != nil { + return flow.AccountPublicKey{}, err + } + + decodedAccountPublicKey, err := flow.DecodeAccountPublicKey(encodedAccountPublicKey, keyIndex) + if err != nil { + return flow.AccountPublicKey{}, err + } + + return decodedAccountPublicKey, nil +} + +func removeAccountPublicKey( + accountRegisters *registers.AccountRegisters, + owner string, + keyIndex uint32, +) error { + publicKeyRegisterKey := fmt.Sprintf(legacyAccountPublicKeyRegisterKeyPattern, keyIndex) + return accountRegisters.Set(owner, publicKeyRegisterKey, nil) +} + +// migrateAccountStatusToV4 sets account status version to v4, stores account status +// in account status register, and returns updated account status. +func migrateAccountStatusToV4( + log zerolog.Logger, + accountRegisters *registers.AccountRegisters, + owner string, +) ([]byte, error) { + encodedAccountStatus, err := getAccountRegisterOrError(accountRegisters, owner, flow.AccountStatusKey) + if err != nil { + return nil, err + } + + if encodedAccountStatus[0] != 0 { + log.Warn().Msgf("%x account status flag is %d, flag will be reset during migration", owner, encodedAccountStatus[0]) + } + + // Update account status version and flag in place. + encodedAccountStatus[0] = accountStatusV4WithNoDeduplicationFlag + + // Set account status register + err = accountRegisters.Set(owner, flow.AccountStatusKey, encodedAccountStatus) + if err != nil { + return nil, err + } + + return encodedAccountStatus, nil +} + +// migrateAccountPublicKey0 renames public_key_0 to apk_0, stores renamed +// account public key, and returns raw data of account public key 0. +func migrateAccountPublicKey0( + accountRegisters *registers.AccountRegisters, + owner string, +) ([]byte, error) { + encodedAccountPublicKey0, err := getAccountRegisterOrError(accountRegisters, owner, legacyAccountPublicKey0RegisterKey) + if err != nil { + return nil, err + } + + // Rename public_key_0 register key to apk_0 + err = accountRegisters.Set(owner, legacyAccountPublicKey0RegisterKey, nil) + if err != nil { + return nil, err + } + err = accountRegisters.Set(owner, flow.AccountPublicKey0RegisterKey, encodedAccountPublicKey0) + if err != nil { + return nil, err + } + + return encodedAccountPublicKey0, nil +} + +// migrateSeqNumberIfNeeded creates sequence number register for the given +// account public key if sequence number is > 0. +func migrateSeqNumberIfNeeded( + accountRegisters *registers.AccountRegisters, + owner string, + keyIndex uint32, + seqNumber uint64, +) error { + if seqNumber == 0 { + return nil + } + + seqNumberRegisterKey := fmt.Sprintf(flow.SequenceNumberRegisterKeyPattern, keyIndex) + + encodedSeqNumber, err := flow.EncodeSequenceNumber(seqNumber) + if err != nil { + return err + } + + return accountRegisters.Set(owner, seqNumberRegisterKey, encodedSeqNumber) +} + +// migrateAccountStatusWithPublicKeyMetadata appends accounts public key metadata +// to account status register. +func migrateAccountStatusWithPublicKeyMetadata( + log zerolog.Logger, + accountRegisters *registers.AccountRegisters, + owner string, + accountPublicKeyWeightAndRevokedStatus []accountPublicKeyWeightAndRevokedStatus, + encodedPublicKeys [][]byte, + keyIndexMappings []uint32, + deduplicated bool, +) error { + encodedAccountStatus, err := getAccountRegisterOrError(accountRegisters, owner, flow.AccountStatusKey) + if err != nil { + return err + } + + // After the migration and spork, the runtime needs to detect duplicate public keys + // being added and store them efficiently. Detection rate doesn't need to be 100% + // to be effective and we don't want to read all existing keys or store all digests. + // We only need to compute and store the hash digest of the last N public keys added. + // For example, N=2 showed good balance of tradeoffs in tests using mainnet snapshot. + startIndexForDigests, digests := generateLastNPublicKeyDigests(log, owner, encodedPublicKeys, maxStoredDigests, nil) + + // startIndexForMapping stores the start index of the first deduplicated public key. + // This is used to avoid unnecessary key index mapping overhead (both speed & storage). + startIndexForMapping := firstIndexOfDuplicateKeyInMappings(keyIndexMappings) + + // keyIndexMappings is a slice containing stored key index where the slice index is + // the account key index starting from startIndexForMapping. + keyIndexMappings = keyIndexMappings[startIndexForMapping:] + + newAccountStatus, err := encodeAccountStatusV4WithPublicKeyMetadata( + encodedAccountStatus, + accountPublicKeyWeightAndRevokedStatus, + uint32(startIndexForDigests), + digests, + uint32(startIndexForMapping), + keyIndexMappings, + deduplicated, + ) + if err != nil { + return err + } + + return accountRegisters.Set(owner, flow.AccountStatusKey, newAccountStatus) +} + +// migrateAccountPublicKeysIfNeeded migrates account public key from index >= 1 to batched public key register. +// NOTE: +// - Key 0 in batch 0 is always empty since it corresponds to account public key 0, which is stored in its own register. +func migrateAccountPublicKeysIfNeeded( + accountRegisters *registers.AccountRegisters, + owner string, + encodedUniquePublicKeys [][]byte, +) error { + // Return early if encodedPublicKeys only contains public key 0 (which always stores in register apk_0) + if len(encodedUniquePublicKeys) == 1 { + return nil + } + + // Storing public keys in batch reduces payload count and reduces number of reads for multiple public keys. + // About 65% of accounts have 1 public key so the first public key is not deduplicated to avoid overhead. + // About 90% of accounts have fewer than 10 account public keys, so with batching 90% of accounts have at + // most 2 payloads for public keys (since the first key is always by itself to avoid overhead). + // - apk_0 payload for account public key 0, and + // - pb_b0 payload for rest of deduplicated public keys + encodedBatchPublicKeys, err := encodePublicKeysInBatches(encodedUniquePublicKeys, maxPublicKeyCountInBatch) + if err != nil { + return err + } + + for batchIndex, encodedBatchPublicKey := range encodedBatchPublicKeys { + batchPublicKeyRegisterKey := fmt.Sprintf(flow.BatchPublicKeyRegisterKeyPattern, batchIndex) + err = accountRegisters.Set(owner, batchPublicKeyRegisterKey, encodedBatchPublicKey) + if err != nil { + return err + } + } + + return nil +} + +func firstIndexOfDuplicateKeyInMappings(accountPublicKeyMappings []uint32) int { + for keyIndex, storedKeyIndex := range accountPublicKeyMappings { + if uint32(keyIndex) != storedKeyIndex { + return keyIndex + } + } + return len(accountPublicKeyMappings) +} diff --git a/cmd/util/ledger/migrations/account_key_deduplication_migration_validation.go b/cmd/util/ledger/migrations/account_key_deduplication_migration_validation.go new file mode 100644 index 00000000000..0cd8a9cc627 --- /dev/null +++ b/cmd/util/ledger/migrations/account_key_deduplication_migration_validation.go @@ -0,0 +1,300 @@ +package migrations + +import ( + "fmt" + "maps" + "slices" + "strings" + + "github.com/onflow/cadence/common" + + "github.com/onflow/flow-go/cmd/util/ledger/util/registers" + "github.com/onflow/flow-go/fvm/environment" + "github.com/onflow/flow-go/model/flow" +) + +func ValidateAccountPublicKeyV4( + address common.Address, + accountRegisters *registers.AccountRegisters, +) error { + // Skip empty address because it doesn't have account status register. + if len(address) == 0 || address == common.ZeroAddress { + return nil + } + + // Validate account status register + accountPublicKeyCount, storedKeyCount, deduplicated, err := validateAccountStatusV4Register(address, accountRegisters) + if err != nil { + return err + } + + if storedKeyCount > accountPublicKeyCount { + return fmt.Errorf("number of stored keys shouldn't be greater than number of account keys, got %d stored keys, and %d account keys", storedKeyCount, accountPublicKeyCount) + } + + if deduplicated && accountPublicKeyCount == storedKeyCount { + return fmt.Errorf("number of deduplicated stored keys shouldn't equal number of account keys, got %d stored keys, and %d account keys", storedKeyCount, accountPublicKeyCount) + } + + // Find relevant registers + foundAccountPublicKey0 := false + sequeceNumberRegisters := make(map[string][]byte) + batchPublicKeyRegisters := make(map[string][]byte) + + err = accountRegisters.ForEach(func(_ string, key string, value []byte) error { + if strings.HasPrefix(key, legacyAccountPublicKeyRegisterKeyPrefix) { + return fmt.Errorf("found legacy account public key register %s", key) + } else if key == flow.AccountPublicKey0RegisterKey { + foundAccountPublicKey0 = true + } else if strings.HasPrefix(key, flow.BatchPublicKeyRegisterKeyPrefix) { + batchPublicKeyRegisters[key] = value + } else if strings.HasPrefix(key, flow.SequenceNumberRegisterKeyPrefix) { + sequeceNumberRegisters[key] = value + } + return nil + }) + if err != nil { + return err + } + + if foundAccountPublicKey0 && accountPublicKeyCount == 0 { + return fmt.Errorf("found account public key 0 when account public key count is 0") + } + + if !foundAccountPublicKey0 && accountPublicKeyCount > 0 { + return fmt.Errorf("no account public key 0 when account public key count is %d", accountPublicKeyCount) + } + + if accountPublicKeyCount <= 1 { + if len(sequeceNumberRegisters) != 0 { + return fmt.Errorf("found %d sequence number register when account public key count is %d", len(sequeceNumberRegisters), accountPublicKeyCount) + } + if len(batchPublicKeyRegisters) != 0 { + return fmt.Errorf("found %d batch public key register when account public key count is %d", len(batchPublicKeyRegisters), accountPublicKeyCount) + } + return nil + } + + // Check sequence number registers. + err = validateSequenceNumberRegisters(accountPublicKeyCount, sequeceNumberRegisters) + if err != nil { + return err + } + + // Check batch public key registers. + return validateBatchPublicKeyRegisters(storedKeyCount, batchPublicKeyRegisters) +} + +func validateAccountStatusV4Register( + address common.Address, + accountRegisters *registers.AccountRegisters, +) ( + accountPublicKeyCount uint32, + storedKeyCount uint32, + deduplicated bool, + err error, +) { + owner := flow.AddressToRegisterOwner(flow.Address(address[:])) + + encodedAccountStatus, err := accountRegisters.Get(owner, flow.AccountStatusKey) + if err != nil { + err = fmt.Errorf("failed to get account status register: %w", err) + return + } + + if len(encodedAccountStatus) == 0 { + err = fmt.Errorf("account status register is empty") + return + } + + accountStatus, err := environment.AccountStatusFromBytes(encodedAccountStatus) + if err != nil { + err = fmt.Errorf("failed to create account status bytes %x: %w", encodedAccountStatus, err) + return + } + + if accountStatus.Version() != 4 { + err = fmt.Errorf("account status version is %d", accountStatus.Version()) + return + } + + deduplicated = accountStatus.IsAccountKeyDeduplicated() + + accountPublicKeyCount = accountStatus.AccountPublicKeyCount() + + if accountPublicKeyCount <= 1 { + if len(encodedAccountStatus) != environment.AccountStatusMinSizeV4 { + err = fmt.Errorf("account status register size is %d, expect %d bytes", len(encodedAccountStatus), environment.AccountStatusMinSizeV4) + return + } + storedKeyCount = accountPublicKeyCount + return + } + + weightAndRevokedStatus, startKeyIndexForMapping, accountPublicKeyMappings, startKeyIndexForDigests, digests, err := decodeAccountStatusKeyMetadata( + encodedAccountStatus[environment.AccountStatusMinSizeV4:], + deduplicated, + ) + if err != nil { + err = fmt.Errorf("failed to parse key metadata %x: %s", encodedAccountStatus[environment.AccountStatusMinSizeV4:], err) + return + } + + if !deduplicated && len(accountPublicKeyMappings) > 0 { + err = fmt.Errorf("expect no mapping when account is not deduplicated, got %v", accountPublicKeyMappings) + return + } + + storedKeyCount = uint32(len(digests)) + startKeyIndexForDigests + + err = validateKeyMetadata( + deduplicated, + accountPublicKeyCount, + weightAndRevokedStatus, + startKeyIndexForDigests, + digests, + startKeyIndexForMapping, + accountPublicKeyMappings, + ) + if err != nil { + return + } + + return +} + +func validateSequenceNumberRegisters( + accountPublicKeyCount uint32, + sequeceNumberRegisters map[string][]byte, +) error { + if len(sequeceNumberRegisters) > int(accountPublicKeyCount-1) { + return fmt.Errorf("found %d sequence number register when account public key count is %d", len(sequeceNumberRegisters), accountPublicKeyCount) + } + + for i := 1; i < int(accountPublicKeyCount); i++ { + key := fmt.Sprintf(flow.SequenceNumberRegisterKeyPattern, i) + encoded, exists := sequeceNumberRegisters[key] + if exists { + sequenceNumber, err := flow.DecodeSequenceNumber(encoded) + if err != nil { + return fmt.Errorf("failed to decode sequence number register %s, %x: %s", key, encoded, err) + } + if sequenceNumber == 0 { + return fmt.Errorf("found sequence number 0 in sequence number register %s", key) + } + delete(sequeceNumberRegisters, key) + } + } + + if len(sequeceNumberRegisters) != 0 { + return fmt.Errorf("found %d unexpected sequence number registers: %+v", len(sequeceNumberRegisters), slices.Collect(maps.Keys(sequeceNumberRegisters))) + } + + return nil +} + +func validateBatchPublicKeyRegisters( + storedKeyCount uint32, + batchPublicKeyRegisters map[string][]byte, +) error { + if storedKeyCount == 1 { + if len(batchPublicKeyRegisters) > 0 { + return fmt.Errorf("found %d batch public key payloads while stored key count is %d", len(batchPublicKeyRegisters), storedKeyCount) + } + return nil + } + + keyCount := 0 + + for batchNum := range len(batchPublicKeyRegisters) { + key := fmt.Sprintf(flow.BatchPublicKeyRegisterKeyPattern, batchNum) + + encoded, exists := batchPublicKeyRegisters[key] + if !exists { + return fmt.Errorf("failed to find batch public key %s", key) + } + + encodedKeys, err := decodeBatchPublicKey(encoded) + if err != nil { + return fmt.Errorf("failed to decode batch public key register %s, %x: %s", key, encoded, err) + } + + batchCount := len(encodedKeys) + keyCount += batchCount + + if batchCount == 0 { + return fmt.Errorf("found batch public key %s with 0 keys", key) + } + + if batchNum == 0 { + if len(encodedKeys[0]) != 0 { + return fmt.Errorf("found unexpected key 0 at batch 0: %x", encodedKeys[0]) + } + + if len(encodedKeys) == 1 { + return fmt.Errorf("found only key 0 in batch public key 0") + } + + encodedKeys = encodedKeys[1:] + } + + for _, encodedKey := range encodedKeys { + _, err = flow.DecodeStoredPublicKey(encodedKey) + if err != nil { + return fmt.Errorf("failed to decode stored public key %x in register %s: %s", encodedKey, key, err) + } + } + + if batchCount < maxPublicKeyCountInBatch && batchNum != len(batchPublicKeyRegisters)-1 { + return fmt.Errorf("batch public key %s has less than max count in a batch: got %d keys, %d batches in total", key, batchCount, len(batchPublicKeyRegisters)) + } + } + + if keyCount != int(storedKeyCount) { + return fmt.Errorf("found %d stored keys in batch public key registers vs stored key count %d in key metadata ", keyCount, storedKeyCount) + } + + return nil +} + +func validateKeyMetadata( + deduplicated bool, + accountPublicKeyCount uint32, + weightAndRevokedStatus []accountPublicKeyWeightAndRevokedStatus, + startKeyIndexForDigests uint32, + digests []uint64, + startKeyIndexForMapping uint32, + accountPublicKeyMappings []uint32, +) error { + if len(weightAndRevokedStatus) != int(accountPublicKeyCount)-1 { + return fmt.Errorf("found %d weight and revoked status, expect %d", len(weightAndRevokedStatus), accountPublicKeyCount-1) + } + + if len(digests) > maxStoredDigests { + return fmt.Errorf("found %d digests, expect max %d digests", len(digests), maxStoredDigests) + } + + if len(digests) > int(accountPublicKeyCount) { + return fmt.Errorf("found %d digest, expect fewer digests than account public key count %d", len(digests), accountPublicKeyCount) + } + + if int(startKeyIndexForDigests)+len(digests) > int(accountPublicKeyCount) { + return fmt.Errorf("found %d digest at start index %d, expect fewer digests than account public key count %d", len(digests), startKeyIndexForDigests, accountPublicKeyCount) + } + + if deduplicated { + if int(startKeyIndexForMapping)+len(accountPublicKeyMappings) != int(accountPublicKeyCount) { + return fmt.Errorf("found %d mappings at start index %d, expect %d", + len(accountPublicKeyMappings), + startKeyIndexForMapping, + accountPublicKeyCount, + ) + } + } else { + if len(accountPublicKeyMappings) > 0 { + return fmt.Errorf("found %d account public key mappings for non-deduplicated account, expect 0", len(accountPublicKeyMappings)) + } + } + + return nil +} diff --git a/cmd/util/ledger/migrations/account_key_diff.go b/cmd/util/ledger/migrations/account_key_diff.go new file mode 100644 index 00000000000..63a16eaebe7 --- /dev/null +++ b/cmd/util/ledger/migrations/account_key_diff.go @@ -0,0 +1,273 @@ +package migrations + +import ( + "encoding/json" + "fmt" + + "github.com/onflow/cadence/common" + + "github.com/onflow/crypto/hash" + + "github.com/onflow/flow-go/cmd/util/ledger/reporters" + "github.com/onflow/flow-go/cmd/util/ledger/util/registers" + "github.com/onflow/flow-go/fvm/environment" + "github.com/onflow/flow-go/fvm/storage/state" + "github.com/onflow/flow-go/model/flow" +) + +type accountKeyDiffKind int + +const ( + accountKeyCountDiff accountKeyDiffKind = iota + accountKeyDiff +) + +var accountKeyDiffKindString = map[accountKeyDiffKind]string{ + accountKeyCountDiff: "key_count_diff", + accountKeyDiff: "key_diff", +} + +type accountKeyDiffProblem struct { + Address string + KeyIndex uint32 + Kind string + Msg string +} + +type accountKeyDiffErrorKind int + +const ( + accountKeyCountErrorKind accountKeyDiffErrorKind = iota + accountKeyErrorKind +) + +var accountKeyDiffErrorKindString = map[accountKeyDiffErrorKind]string{ + accountKeyCountErrorKind: "error_get_key_count_failed", + accountKeyErrorKind: "error_get_key_failed", +} + +type accountKeyDiffError struct { + Address string + Kind string + Msg string +} + +type AccountKeyDiffReporter struct { + address common.Address + chainID flow.ChainID + reportWriter reporters.ReportWriter +} + +func NewAccountKeyDiffReporter( + address common.Address, + chainID flow.ChainID, + rw reporters.ReportWriter, +) *AccountKeyDiffReporter { + return &AccountKeyDiffReporter{ + address: address, + chainID: chainID, + reportWriter: rw, + } +} + +func (akd *AccountKeyDiffReporter) DiffKeys( + accountRegistersV3, accountRegistersV4 registers.Registers, +) { + if akd.address == common.ZeroAddress { + return + } + + accountsV3 := newAccountsWithAccountKeyV3(accountRegistersV3) + + accountsV4 := newAccountsWithAccountKeyV4(accountRegistersV4) + + countV3, err := accountsV3.getAccountPublicKeyCount(flow.Address(akd.address)) + if err != nil { + akd.reportWriter.Write( + accountKeyDiffError{ + Address: akd.address.Hex(), + Kind: accountKeyDiffErrorKindString[accountKeyCountErrorKind], + Msg: fmt.Sprintf("failed to get account public key count v3: %s", err), + }) + return + } + + countV4, err := accountsV4.getAccountPublicKeyCount(flow.Address(akd.address)) + if err != nil { + akd.reportWriter.Write( + accountKeyDiffError{ + Address: akd.address.Hex(), + Kind: accountKeyDiffErrorKindString[accountKeyCountErrorKind], + Msg: fmt.Sprintf("failed to get account public key count v4: %s", err), + }) + return + } + + if countV3 != countV4 { + akd.reportWriter.Write( + accountKeyDiffProblem{ + Address: akd.address.Hex(), + Kind: accountKeyDiffKindString[accountKeyCountDiff], + Msg: fmt.Sprintf("%d keys in v3, %d keys in v4", countV3, countV4), + }) + return + } + + for keyIndex := range countV3 { + keyV3, err := accountsV3.getAccountPublicKey(flow.Address(akd.address), keyIndex) + if err != nil { + akd.reportWriter.Write( + accountKeyDiffError{ + Address: akd.address.Hex(), + Kind: accountKeyDiffErrorKindString[accountKeyErrorKind], + Msg: fmt.Sprintf("failed to get account public key v3 at key index %d: %s", keyIndex, err), + }) + continue + } + + keyV4, err := accountsV4.getAccountPublicKey(flow.Address(akd.address), keyIndex) + if err != nil { + akd.reportWriter.Write( + accountKeyDiffError{ + Address: akd.address.Hex(), + Kind: accountKeyDiffErrorKindString[accountKeyErrorKind], + Msg: fmt.Sprintf("failed to get account public key v4 at key index %d: %s", keyIndex, err), + }) + continue + } + + err = equal(keyV3, keyV4) + if err != nil { + encodedKeyV3, _ := json.Marshal(keyV3) + encodedKeyV4, _ := json.Marshal(keyV4) + + akd.reportWriter.Write( + accountKeyDiffProblem{ + Address: akd.address.Hex(), + KeyIndex: keyIndex, + Kind: accountKeyDiffKindString[accountKeyDiff], + Msg: fmt.Sprintf("v3: %s, v4 %s: %s", encodedKeyV3, encodedKeyV4, err.Error()), + }) + } + } +} + +type accountsWithAccountKeysV3 struct { + a *environment.StatefulAccounts +} + +func newAccountsWithAccountKeyV3(regs registers.Registers) *accountsWithAccountKeysV3 { + return &accountsWithAccountKeysV3{ + a: newStatefulAccounts(regs), + } +} + +func (akv3 *accountsWithAccountKeysV3) getAccountPublicKeyCount(address flow.Address) (uint32, error) { + id := flow.AccountStatusRegisterID(address) + + statusBytes, err := akv3.a.GetValue(id) + if err != nil { + return 0, fmt.Errorf("failed to load account status for account %s: %w", address, err) + } + if len(statusBytes) == 0 { + return 0, fmt.Errorf("account status register is empty for account %s", address) + } + + as, err := environment.AccountStatusFromBytes(statusBytes) + if err != nil { + return 0, fmt.Errorf("failed to create account status from bytes for account %s: %w", address, err) + } + + return as.AccountPublicKeyCount(), nil +} + +func (akv3 *accountsWithAccountKeysV3) getAccountPublicKey(address flow.Address, keyIndex uint32) (flow.AccountPublicKey, error) { + id := flow.RegisterID{ + Owner: string(address.Bytes()), + Key: fmt.Sprintf(legacyAccountPublicKeyRegisterKeyPattern, keyIndex), + } + + publicKeyBytes, err := akv3.a.GetValue(id) + if err != nil { + return flow.AccountPublicKey{}, fmt.Errorf("failed to load account public key %d for account %s: %w", keyIndex, address, err) + } + if len(publicKeyBytes) == 0 { + return flow.AccountPublicKey{}, fmt.Errorf("account public key %d is empty for account %s", keyIndex, address) + } + + key, err := flow.DecodeAccountPublicKey(publicKeyBytes, keyIndex) + if err != nil { + return flow.AccountPublicKey{}, fmt.Errorf("failed to decode account public key %d for account %s", keyIndex, address) + } + + return key, nil +} + +type accountsWithAccountKeysV4 struct { + a *environment.StatefulAccounts +} + +func newAccountsWithAccountKeyV4(regs registers.Registers) *accountsWithAccountKeysV4 { + return &accountsWithAccountKeysV4{ + a: newStatefulAccounts(regs), + } +} + +func (akv4 *accountsWithAccountKeysV4) getAccountPublicKeyCount(address flow.Address) (uint32, error) { + return akv4.a.GetAccountPublicKeyCount(address) +} + +func (akv4 *accountsWithAccountKeysV4) getAccountPublicKey(address flow.Address, keyIndex uint32) (flow.AccountPublicKey, error) { + return akv4.a.GetAccountPublicKey(address, keyIndex) +} + +func newStatefulAccounts( + regs registers.Registers, +) *environment.StatefulAccounts { + // Create a new transaction state with a dummy hasher + // because we do not need spock proofs for migrations. + transactionState := state.NewTransactionStateFromExecutionState( + state.NewExecutionStateWithSpockStateHasher( + registers.StorageSnapshot{ + Registers: regs, + }, + state.DefaultParameters(), + func() hash.Hasher { + return dummyHasher{} + }, + ), + ) + return environment.NewAccounts(transactionState) +} + +func equal(keyV3, keyV4 flow.AccountPublicKey) error { + if keyV3.Index != keyV4.Index { + return fmt.Errorf("account public key index differ: v3 %v, v4 %v", keyV3.Index, keyV4.Index) + } + + if !keyV3.PublicKey.Equals(keyV4.PublicKey) { + return fmt.Errorf("account public key differ: v3 %v, v4 %v", keyV3.PublicKey, keyV4.PublicKey) + } + + if keyV3.SignAlgo != keyV4.SignAlgo { + return fmt.Errorf("account public key sign algo differ: v3 %v, v4 %v", keyV3.SignAlgo, keyV4.SignAlgo) + } + + if keyV3.HashAlgo != keyV4.HashAlgo { + return fmt.Errorf("account public key hash algo differ: v3 %v, v4 %v", keyV3.HashAlgo, keyV4.HashAlgo) + } + + if keyV3.SeqNumber != keyV4.SeqNumber { + return fmt.Errorf("account public key sequence number differ: v3 %v, v4 %v", keyV3.SeqNumber, keyV4.SeqNumber) + } + + if keyV3.Weight != keyV4.Weight { + return fmt.Errorf("account public key weight differ: v3 %v, v4 %v", keyV3.Weight, keyV4.Weight) + } + + if keyV3.Revoked != keyV4.Revoked { + return fmt.Errorf("account public key revoked status differ: v3 %v, v4 %v", keyV3.Revoked, keyV4.Revoked) + } + + return nil +} diff --git a/cmd/util/ledger/migrations/account_key_diff_test.go b/cmd/util/ledger/migrations/account_key_diff_test.go new file mode 100644 index 00000000000..45ae3444bf8 --- /dev/null +++ b/cmd/util/ledger/migrations/account_key_diff_test.go @@ -0,0 +1,715 @@ +package migrations + +import ( + "fmt" + "math" + "testing" + + "github.com/stretchr/testify/require" + + "github.com/onflow/cadence/common" + "github.com/onflow/flow-go-sdk/crypto" + + "github.com/onflow/flow-go/cmd/util/ledger/util/registers" + "github.com/onflow/flow-go/fvm/environment" + accountkeymetadata "github.com/onflow/flow-go/fvm/environment/account-key-metadata" + "github.com/onflow/flow-go/ledger" + "github.com/onflow/flow-go/ledger/common/convert" + "github.com/onflow/flow-go/model/flow" +) + +func TestAccountPublicKeyDiff(t *testing.T) { + address := flow.BytesToAddress([]byte{0x01}) + chainID := flow.Testnet + + t.Run("0 key", func(t *testing.T) { + accountStatusV3Bytes := environment.NewAccountStatus().ToBytes() + accountStatusV3Bytes[0] = 0 + registersV3, err := registers.NewByAccountFromPayloads([]*ledger.Payload{ + newPayload(address, flow.AccountStatusKey, accountStatusV3Bytes), + }) + require.NoError(t, err) + + accountStatusV4Bytes := environment.NewAccountStatus().ToBytes() + registersV4, err := registers.NewByAccountFromPayloads([]*ledger.Payload{ + newPayload(address, flow.AccountStatusKey, accountStatusV4Bytes), + }) + require.NoError(t, err) + + reportWriter := newMemoryReportWriter() + + diffReporter := NewAccountKeyDiffReporter(common.Address(address), chainID, reportWriter) + diffReporter.DiffKeys(registersV3, registersV4) + + // No diff + require.Equal(t, 0, len(reportWriter.data)) + }) + + t.Run("1 key", func(t *testing.T) { + key0 := newAccountPublicKey(t, 1000) + storedKey0 := accountPublicKeyToStoredKey(key0) + + encodedKey0, err := flow.EncodeAccountPublicKey(key0) + require.NoError(t, err) + + encodedStoredKey0, err := flow.EncodeStoredPublicKey(storedKey0) + require.NoError(t, err) + + accountStatusV3 := environment.NewAccountStatus() + accountStatusV3.SetAccountPublicKeyCount(1) + accountStatusV3Bytes := accountStatusV3.ToBytes() + accountStatusV3Bytes[0] = 0 + + registersV3, err := registers.NewByAccountFromPayloads([]*ledger.Payload{ + newPayload(address, flow.AccountStatusKey, accountStatusV3Bytes), + newPayload(address, legacyAccountPublicKey0RegisterKey, encodedKey0), + }) + require.NoError(t, err) + + accountStatusV4 := environment.NewAccountStatus() + // Append key 0 + _, _, err = accountStatusV4.AppendAccountPublicKeyMetadata( + key0.Revoked, + uint16(key0.Weight), + encodedStoredKey0, + func(b []byte) uint64 { + return accountkeymetadata.GetPublicKeyDigest(address, b) + }, + func(i uint32) ([]byte, error) { + return nil, fmt.Errorf("don't expect getStoredKey called, got %d", i) + }, + ) + require.NoError(t, err) + + registersV4, err := registers.NewByAccountFromPayloads([]*ledger.Payload{ + newPayload(address, flow.AccountStatusKey, accountStatusV4.ToBytes()), + newPayload(address, "apk_0", encodedKey0), + }) + require.NoError(t, err) + + reportWriter := newMemoryReportWriter() + + diffReporter := NewAccountKeyDiffReporter(common.Address(address), chainID, reportWriter) + diffReporter.DiffKeys(registersV3, registersV4) + + // No diff + require.Equal(t, 0, len(reportWriter.data)) + }) + + t.Run("2 keys", func(t *testing.T) { + key0 := newAccountPublicKey(t, 1000) + storedKey0 := accountPublicKeyToStoredKey(key0) + + encodedKey0, err := flow.EncodeAccountPublicKey(key0) + require.NoError(t, err) + + encodedStoredKey0, err := flow.EncodeStoredPublicKey(storedKey0) + require.NoError(t, err) + + key1 := newAccountPublicKey(t, 1) + storedKey1 := accountPublicKeyToStoredKey(key1) + + encodedKey1, err := flow.EncodeAccountPublicKey(key1) + require.NoError(t, err) + + encodedStoredKey1, err := flow.EncodeStoredPublicKey(storedKey1) + require.NoError(t, err) + + accountStatusV3 := environment.NewAccountStatus() + accountStatusV3.SetAccountPublicKeyCount(2) + accountStatusV3Bytes := accountStatusV3.ToBytes() + accountStatusV3Bytes[0] = 0 + + registersV3, err := registers.NewByAccountFromPayloads([]*ledger.Payload{ + newPayload(address, flow.AccountStatusKey, accountStatusV3Bytes), + newPayload(address, legacyAccountPublicKey0RegisterKey, encodedKey0), + newPayload(address, fmt.Sprintf(legacyAccountPublicKeyRegisterKeyPattern, 1), encodedKey1), + }) + require.NoError(t, err) + + accountStatusV4 := environment.NewAccountStatus() + // Append key 0 + _, _, err = accountStatusV4.AppendAccountPublicKeyMetadata( + key0.Revoked, + uint16(key0.Weight), + encodedStoredKey0, + func(b []byte) uint64 { + return accountkeymetadata.GetPublicKeyDigest(address, b) + }, + func(i uint32) ([]byte, error) { + return nil, fmt.Errorf("don't expect getStoredKey called, got %d", i) + }, + ) + require.NoError(t, err) + // Append key 1 + _, _, err = accountStatusV4.AppendAccountPublicKeyMetadata( + key1.Revoked, + uint16(key1.Weight), + encodedStoredKey1, + func(b []byte) uint64 { + return accountkeymetadata.GetPublicKeyDigest(address, b) + }, + func(i uint32) ([]byte, error) { + if i == 0 { + return encodedStoredKey0, nil + } + return nil, fmt.Errorf("expect getStoredKey for key index 0, got %d", i) + }, + ) + require.NoError(t, err) + + registersV4, err := registers.NewByAccountFromPayloads([]*ledger.Payload{ + newPayload(address, flow.AccountStatusKey, accountStatusV4.ToBytes()), + newPayload(address, "apk_0", encodedKey0), + newPayload(address, "pk_b0", newBatchPublicKey(t, []*flow.StoredPublicKey{nil, &storedKey1})), + }) + require.NoError(t, err) + + reportWriter := newMemoryReportWriter() + + diffReporter := NewAccountKeyDiffReporter(common.Address(address), chainID, reportWriter) + diffReporter.DiffKeys(registersV3, registersV4) + + // No diff + require.Equal(t, 0, len(reportWriter.data)) + }) + + t.Run("2 keys, diff for public key", func(t *testing.T) { + key0 := newAccountPublicKey(t, 1000) + storedKey0 := accountPublicKeyToStoredKey(key0) + + encodedKey0, err := flow.EncodeAccountPublicKey(key0) + require.NoError(t, err) + + encodedStoredKey0, err := flow.EncodeStoredPublicKey(storedKey0) + require.NoError(t, err) + + key1a := newAccountPublicKey(t, 1) + + encodedKey1a, err := flow.EncodeAccountPublicKey(key1a) + require.NoError(t, err) + + key1b := newAccountPublicKey(t, 1) + storedKey1b := accountPublicKeyToStoredKey(key1b) + + encodedStoredKey1b, err := flow.EncodeStoredPublicKey(storedKey1b) + require.NoError(t, err) + + accountStatusV3 := environment.NewAccountStatus() + accountStatusV3.SetAccountPublicKeyCount(2) + accountStatusV3Bytes := accountStatusV3.ToBytes() + accountStatusV3Bytes[0] = 0 + + registersV3, err := registers.NewByAccountFromPayloads([]*ledger.Payload{ + newPayload(address, flow.AccountStatusKey, accountStatusV3Bytes), + newPayload(address, legacyAccountPublicKey0RegisterKey, encodedKey0), + newPayload(address, fmt.Sprintf(legacyAccountPublicKeyRegisterKeyPattern, 1), encodedKey1a), + }) + require.NoError(t, err) + + accountStatusV4 := environment.NewAccountStatus() + // Append key 0 + _, _, err = accountStatusV4.AppendAccountPublicKeyMetadata( + key0.Revoked, + uint16(key0.Weight), + encodedStoredKey0, + func(b []byte) uint64 { + return accountkeymetadata.GetPublicKeyDigest(address, b) + }, + func(i uint32) ([]byte, error) { + return nil, fmt.Errorf("don't expect getStoredKey called, got %d", i) + }, + ) + require.NoError(t, err) + // Append key 1 + _, _, err = accountStatusV4.AppendAccountPublicKeyMetadata( + key1b.Revoked, + uint16(key1b.Weight), + encodedStoredKey1b, + func(b []byte) uint64 { + return accountkeymetadata.GetPublicKeyDigest(address, b) + }, + func(i uint32) ([]byte, error) { + if i == 0 { + return encodedStoredKey0, nil + } + return nil, fmt.Errorf("expect getStoredKey for key index 0, got %d", i) + }, + ) + require.NoError(t, err) + + registersV4, err := registers.NewByAccountFromPayloads([]*ledger.Payload{ + newPayload(address, flow.AccountStatusKey, accountStatusV4.ToBytes()), + newPayload(address, "apk_0", encodedKey0), + newPayload(address, "pk_b0", newBatchPublicKey(t, []*flow.StoredPublicKey{nil, &storedKey1b})), + }) + require.NoError(t, err) + + reportWriter := newMemoryReportWriter() + + diffReporter := NewAccountKeyDiffReporter(common.Address(address), chainID, reportWriter) + diffReporter.DiffKeys(registersV3, registersV4) + + require.Equal(t, 1, len(reportWriter.data)) + require.Contains(t, reportWriter.data[0].(accountKeyDiffProblem).Msg, "account public key diff") + }) + + t.Run("2 keys, diff for weight", func(t *testing.T) { + key0 := newAccountPublicKey(t, 1000) + storedKey0 := accountPublicKeyToStoredKey(key0) + + encodedKey0, err := flow.EncodeAccountPublicKey(key0) + require.NoError(t, err) + + encodedStoredKey0, err := flow.EncodeStoredPublicKey(storedKey0) + require.NoError(t, err) + + key1 := newAccountPublicKey(t, 1) + storedKey1 := accountPublicKeyToStoredKey(key1) + + encodedKey1, err := flow.EncodeAccountPublicKey(key1) + require.NoError(t, err) + + encodedStoredKey1, err := flow.EncodeStoredPublicKey(storedKey1) + require.NoError(t, err) + + accountStatusV3 := environment.NewAccountStatus() + accountStatusV3.SetAccountPublicKeyCount(2) + accountStatusV3Bytes := accountStatusV3.ToBytes() + accountStatusV3Bytes[0] = 0 + + registersV3, err := registers.NewByAccountFromPayloads([]*ledger.Payload{ + newPayload(address, flow.AccountStatusKey, accountStatusV3Bytes), + newPayload(address, legacyAccountPublicKey0RegisterKey, encodedKey0), + newPayload(address, fmt.Sprintf(legacyAccountPublicKeyRegisterKeyPattern, 1), encodedKey1), + }) + require.NoError(t, err) + + accountStatusV4 := environment.NewAccountStatus() + // Append key 0 + _, _, err = accountStatusV4.AppendAccountPublicKeyMetadata( + key0.Revoked, + uint16(key0.Weight), + encodedStoredKey0, + func(b []byte) uint64 { + return accountkeymetadata.GetPublicKeyDigest(address, b) + }, + func(i uint32) ([]byte, error) { + return nil, fmt.Errorf("don't expect getStoredKey called, got %d", i) + }, + ) + require.NoError(t, err) + // Append key 1 + _, _, err = accountStatusV4.AppendAccountPublicKeyMetadata( + key1.Revoked, + uint16(key1.Weight+1), + encodedStoredKey1, + func(b []byte) uint64 { + return accountkeymetadata.GetPublicKeyDigest(address, b) + }, + func(i uint32) ([]byte, error) { + if i == 0 { + return encodedStoredKey0, nil + } + return nil, fmt.Errorf("expect getStoredKey for key index 0, got %d", i) + }, + ) + require.NoError(t, err) + + registersV4, err := registers.NewByAccountFromPayloads([]*ledger.Payload{ + newPayload(address, flow.AccountStatusKey, accountStatusV4.ToBytes()), + newPayload(address, "apk_0", encodedKey0), + newPayload(address, "pk_b0", newBatchPublicKey(t, []*flow.StoredPublicKey{nil, &storedKey1})), + }) + require.NoError(t, err) + + reportWriter := newMemoryReportWriter() + + diffReporter := NewAccountKeyDiffReporter(common.Address(address), chainID, reportWriter) + diffReporter.DiffKeys(registersV3, registersV4) + + require.Equal(t, 1, len(reportWriter.data)) + require.Contains(t, reportWriter.data[0].(accountKeyDiffProblem).Msg, "weight") + }) + + t.Run("2 keys, diff for revoked status", func(t *testing.T) { + key0 := newAccountPublicKey(t, 1000) + storedKey0 := accountPublicKeyToStoredKey(key0) + + encodedKey0, err := flow.EncodeAccountPublicKey(key0) + require.NoError(t, err) + + encodedStoredKey0, err := flow.EncodeStoredPublicKey(storedKey0) + require.NoError(t, err) + + key1 := newAccountPublicKey(t, 1) + storedKey1 := accountPublicKeyToStoredKey(key1) + + encodedKey1, err := flow.EncodeAccountPublicKey(key1) + require.NoError(t, err) + + encodedStoredKey1, err := flow.EncodeStoredPublicKey(storedKey1) + require.NoError(t, err) + + accountStatusV3 := environment.NewAccountStatus() + accountStatusV3.SetAccountPublicKeyCount(2) + accountStatusV3Bytes := accountStatusV3.ToBytes() + accountStatusV3Bytes[0] = 0 + + registersV3, err := registers.NewByAccountFromPayloads([]*ledger.Payload{ + newPayload(address, flow.AccountStatusKey, accountStatusV3Bytes), + newPayload(address, legacyAccountPublicKey0RegisterKey, encodedKey0), + newPayload(address, fmt.Sprintf(legacyAccountPublicKeyRegisterKeyPattern, 1), encodedKey1), + }) + require.NoError(t, err) + + accountStatusV4 := environment.NewAccountStatus() + // Append key 0 + _, _, err = accountStatusV4.AppendAccountPublicKeyMetadata( + key0.Revoked, + uint16(key0.Weight), + encodedStoredKey0, + func(b []byte) uint64 { + return accountkeymetadata.GetPublicKeyDigest(address, b) + }, + func(i uint32) ([]byte, error) { + return nil, fmt.Errorf("don't expect getStoredKey called, got %d", i) + }, + ) + require.NoError(t, err) + // Append key 1 + _, _, err = accountStatusV4.AppendAccountPublicKeyMetadata( + !key1.Revoked, + uint16(key1.Weight), + encodedStoredKey1, + func(b []byte) uint64 { + return accountkeymetadata.GetPublicKeyDigest(address, b) + }, + func(i uint32) ([]byte, error) { + if i == 0 { + return encodedStoredKey0, nil + } + return nil, fmt.Errorf("expect getStoredKey for key index 0, got %d", i) + }, + ) + require.NoError(t, err) + + registersV4, err := registers.NewByAccountFromPayloads([]*ledger.Payload{ + newPayload(address, flow.AccountStatusKey, accountStatusV4.ToBytes()), + newPayload(address, "apk_0", encodedKey0), + newPayload(address, "pk_b0", newBatchPublicKey(t, []*flow.StoredPublicKey{nil, &storedKey1})), + }) + require.NoError(t, err) + + reportWriter := newMemoryReportWriter() + + diffReporter := NewAccountKeyDiffReporter(common.Address(address), chainID, reportWriter) + diffReporter.DiffKeys(registersV3, registersV4) + + require.Equal(t, 1, len(reportWriter.data)) + require.Contains(t, reportWriter.data[0].(accountKeyDiffProblem).Msg, "revoked status") + }) + + t.Run("2 keys, diff for sequence number", func(t *testing.T) { + key0 := newAccountPublicKey(t, 1000) + storedKey0 := accountPublicKeyToStoredKey(key0) + + encodedKey0, err := flow.EncodeAccountPublicKey(key0) + require.NoError(t, err) + + encodedStoredKey0, err := flow.EncodeStoredPublicKey(storedKey0) + require.NoError(t, err) + + key1 := newAccountPublicKey(t, 1) + key1.SeqNumber = 1 + storedKey1 := accountPublicKeyToStoredKey(key1) + + encodedKey1, err := flow.EncodeAccountPublicKey(key1) + require.NoError(t, err) + + encodedStoredKey1, err := flow.EncodeStoredPublicKey(storedKey1) + require.NoError(t, err) + + accountStatusV3 := environment.NewAccountStatus() + accountStatusV3.SetAccountPublicKeyCount(2) + accountStatusV3Bytes := accountStatusV3.ToBytes() + accountStatusV3Bytes[0] = 0 + + registersV3, err := registers.NewByAccountFromPayloads([]*ledger.Payload{ + newPayload(address, flow.AccountStatusKey, accountStatusV3Bytes), + newPayload(address, legacyAccountPublicKey0RegisterKey, encodedKey0), + newPayload(address, fmt.Sprintf(legacyAccountPublicKeyRegisterKeyPattern, 1), encodedKey1), + }) + require.NoError(t, err) + + accountStatusV4 := environment.NewAccountStatus() + // Append key 0 + _, _, err = accountStatusV4.AppendAccountPublicKeyMetadata( + key0.Revoked, + uint16(key0.Weight), + encodedStoredKey0, + func(b []byte) uint64 { + return accountkeymetadata.GetPublicKeyDigest(address, b) + }, + func(i uint32) ([]byte, error) { + return nil, fmt.Errorf("don't expect getStoredKey called, got %d", i) + }, + ) + require.NoError(t, err) + // Append key 1 + _, _, err = accountStatusV4.AppendAccountPublicKeyMetadata( + key1.Revoked, + uint16(key1.Weight), + encodedStoredKey1, + func(b []byte) uint64 { + return accountkeymetadata.GetPublicKeyDigest(address, b) + }, + func(i uint32) ([]byte, error) { + if i == 0 { + return encodedStoredKey0, nil + } + return nil, fmt.Errorf("expect getStoredKey for key index 0, got %d", i) + }, + ) + require.NoError(t, err) + + registersV4, err := registers.NewByAccountFromPayloads([]*ledger.Payload{ + newPayload(address, flow.AccountStatusKey, accountStatusV4.ToBytes()), + newPayload(address, "apk_0", encodedKey0), + newPayload(address, "pk_b0", newBatchPublicKey(t, []*flow.StoredPublicKey{nil, &storedKey1})), + }) + require.NoError(t, err) + + reportWriter := newMemoryReportWriter() + + diffReporter := NewAccountKeyDiffReporter(common.Address(address), chainID, reportWriter) + diffReporter.DiffKeys(registersV3, registersV4) + + require.Equal(t, 1, len(reportWriter.data)) + require.Contains(t, reportWriter.data[0].(accountKeyDiffProblem).Msg, "sequence number") + }) + + t.Run("2 keys, diff for hash algo", func(t *testing.T) { + key0 := newAccountPublicKey(t, 1000) + storedKey0 := accountPublicKeyToStoredKey(key0) + + encodedKey0, err := flow.EncodeAccountPublicKey(key0) + require.NoError(t, err) + + encodedStoredKey0, err := flow.EncodeStoredPublicKey(storedKey0) + require.NoError(t, err) + + key1 := newAccountPublicKey(t, 1) + storedKey1 := accountPublicKeyToStoredKey(key1) + storedKey1.HashAlgo = crypto.SHA3_384 + + encodedKey1, err := flow.EncodeAccountPublicKey(key1) + require.NoError(t, err) + + encodedStoredKey1, err := flow.EncodeStoredPublicKey(storedKey1) + require.NoError(t, err) + + accountStatusV3 := environment.NewAccountStatus() + accountStatusV3.SetAccountPublicKeyCount(2) + accountStatusV3Bytes := accountStatusV3.ToBytes() + accountStatusV3Bytes[0] = 0 + + registersV3, err := registers.NewByAccountFromPayloads([]*ledger.Payload{ + newPayload(address, flow.AccountStatusKey, accountStatusV3Bytes), + newPayload(address, legacyAccountPublicKey0RegisterKey, encodedKey0), + newPayload(address, fmt.Sprintf(legacyAccountPublicKeyRegisterKeyPattern, 1), encodedKey1), + }) + require.NoError(t, err) + + accountStatusV4 := environment.NewAccountStatus() + // Append key 0 + _, _, err = accountStatusV4.AppendAccountPublicKeyMetadata( + key0.Revoked, + uint16(key0.Weight), + encodedStoredKey0, + func(b []byte) uint64 { + return accountkeymetadata.GetPublicKeyDigest(address, b) + }, + func(i uint32) ([]byte, error) { + return nil, fmt.Errorf("don't expect getStoredKey called, got %d", i) + }, + ) + require.NoError(t, err) + // Append key 1 + _, _, err = accountStatusV4.AppendAccountPublicKeyMetadata( + key1.Revoked, + uint16(key1.Weight), + encodedStoredKey1, + func(b []byte) uint64 { + return accountkeymetadata.GetPublicKeyDigest(address, b) + }, + func(i uint32) ([]byte, error) { + if i == 0 { + return encodedStoredKey0, nil + } + return nil, fmt.Errorf("expect getStoredKey for key index 0, got %d", i) + }, + ) + require.NoError(t, err) + + registersV4, err := registers.NewByAccountFromPayloads([]*ledger.Payload{ + newPayload(address, flow.AccountStatusKey, accountStatusV4.ToBytes()), + newPayload(address, "apk_0", encodedKey0), + newPayload(address, "pk_b0", newBatchPublicKey(t, []*flow.StoredPublicKey{nil, &storedKey1})), + }) + require.NoError(t, err) + + reportWriter := newMemoryReportWriter() + + diffReporter := NewAccountKeyDiffReporter(common.Address(address), chainID, reportWriter) + diffReporter.DiffKeys(registersV3, registersV4) + + require.Equal(t, 1, len(reportWriter.data)) + require.Contains(t, reportWriter.data[0].(accountKeyDiffProblem).Msg, "hash algo") + }) + + t.Run("2 duplicate keys", func(t *testing.T) { + key0 := newAccountPublicKey(t, 1000) + storedKey0 := accountPublicKeyToStoredKey(key0) + + encodedKey0, err := flow.EncodeAccountPublicKey(key0) + require.NoError(t, err) + + encodedStoredKey0, err := flow.EncodeStoredPublicKey(storedKey0) + require.NoError(t, err) + + key1 := key0 + key1.SeqNumber = 1 + storedKey1 := accountPublicKeyToStoredKey(key1) + + encodedKey1, err := flow.EncodeAccountPublicKey(key1) + require.NoError(t, err) + + encodedStoredKey1, err := flow.EncodeStoredPublicKey(storedKey1) + require.NoError(t, err) + + accountStatusV3 := environment.NewAccountStatus() + accountStatusV3.SetAccountPublicKeyCount(2) + accountStatusV3Bytes := accountStatusV3.ToBytes() + accountStatusV3Bytes[0] = 0 + + registersV3, err := registers.NewByAccountFromPayloads([]*ledger.Payload{ + newPayload(address, flow.AccountStatusKey, accountStatusV3Bytes), + newPayload(address, fmt.Sprintf(legacyAccountPublicKeyRegisterKeyPattern, 0), encodedKey0), + newPayload(address, fmt.Sprintf(legacyAccountPublicKeyRegisterKeyPattern, 1), encodedKey1), + }) + require.NoError(t, err) + + accountStatusV4 := environment.NewAccountStatus() + // Append key 0 + _, _, err = accountStatusV4.AppendAccountPublicKeyMetadata( + key0.Revoked, + uint16(key0.Weight), + encodedStoredKey0, + func(b []byte) uint64 { + return accountkeymetadata.GetPublicKeyDigest(address, b) + }, + func(i uint32) ([]byte, error) { + return nil, fmt.Errorf("don't expect getStoredKey called, got %d", i) + }, + ) + require.NoError(t, err) + // Append key 1 + _, _, err = accountStatusV4.AppendAccountPublicKeyMetadata( + key1.Revoked, + uint16(key1.Weight), + encodedStoredKey1, + func(b []byte) uint64 { + return accountkeymetadata.GetPublicKeyDigest(address, b) + }, + func(i uint32) ([]byte, error) { + if i == 0 { + return encodedStoredKey0, nil + } + return nil, fmt.Errorf("expect getStoredKey for key index 0, got %d", i) + }, + ) + require.NoError(t, err) + + encodedSeqNumber, err := flow.EncodeSequenceNumber(key1.SeqNumber) + require.NoError(t, err) + + registersV4, err := registers.NewByAccountFromPayloads([]*ledger.Payload{ + newPayload(address, flow.AccountStatusKey, accountStatusV4.ToBytes()), + newPayload(address, "apk_0", encodedKey0), + newPayload(address, "sn_1", encodedSeqNumber), + }) + require.NoError(t, err) + + reportWriter := newMemoryReportWriter() + + diffReporter := NewAccountKeyDiffReporter(common.Address(address), chainID, reportWriter) + diffReporter.DiffKeys(registersV3, registersV4) + + // No diff + require.Equal(t, 0, len(reportWriter.data)) + }) +} + +func newPayload(owner flow.Address, key string, value []byte) *ledger.Payload { + registerID := flow.NewRegisterID(owner, key) + ledgerKey := convert.RegisterIDToLedgerKey(registerID) + return ledger.NewPayload(ledgerKey, value) +} + +type memoryReportWriter struct { + data []any +} + +func newMemoryReportWriter() *memoryReportWriter { + return &memoryReportWriter{} +} + +func (w *memoryReportWriter) Write(dataPoint interface{}) { + w.data = append(w.data, dataPoint) +} + +func (w *memoryReportWriter) Close() { +} + +func accountPublicKeyToStoredKey(apk flow.AccountPublicKey) flow.StoredPublicKey { + return flow.StoredPublicKey{ + PublicKey: apk.PublicKey, + SignAlgo: apk.SignAlgo, + HashAlgo: apk.HashAlgo, + } +} + +func newBatchPublicKey(t *testing.T, storedPublicKeys []*flow.StoredPublicKey) []byte { + var buf []byte + var err error + + for _, k := range storedPublicKeys { + var encodedKey []byte + + if k != nil { + encodedKey, err = flow.EncodeStoredPublicKey(*k) + require.NoError(t, err) + } + + b, err := encodeBatchedPublicKey(encodedKey) + require.NoError(t, err) + + buf = append(buf, b...) + } + return buf +} + +func encodeBatchedPublicKey(encodedPublicKey []byte) ([]byte, error) { + const maxEncodedKeySize = math.MaxUint8 + + if len(encodedPublicKey) > maxEncodedKeySize { + return nil, fmt.Errorf("failed to encode batched public key: encoded key size is %d bytes, exceeded max size %d", len(encodedPublicKey), maxEncodedKeySize) + } + + buf := make([]byte, 1+len(encodedPublicKey)) + buf[0] = byte(len(encodedPublicKey)) + copy(buf[1:], encodedPublicKey) + + return buf, nil +} diff --git a/cmd/util/ledger/migrations/account_migration.go b/cmd/util/ledger/migrations/account_migration.go deleted file mode 100644 index 51c123712f1..00000000000 --- a/cmd/util/ledger/migrations/account_migration.go +++ /dev/null @@ -1,99 +0,0 @@ -package migrations - -import ( - "fmt" - - "github.com/rs/zerolog/log" - - "github.com/onflow/flow-go/fvm/environment" - "github.com/onflow/flow-go/ledger" - "github.com/onflow/flow-go/model/flow" -) - -func MigrateAccountUsage(payloads []ledger.Payload, nWorker int) ([]ledger.Payload, error) { - return MigrateByAccount(AccountUsageMigrator{}, payloads, nWorker) -} - -func payloadSize(key ledger.Key, payload ledger.Payload) (uint64, error) { - id, err := KeyToRegisterID(key) - if err != nil { - return 0, err - } - - return uint64(registerSize(id, payload)), nil -} - -func isAccountKey(key ledger.Key) bool { - return string(key.KeyParts[1].Value) == flow.AccountStatusKey -} - -type AccountUsageMigrator struct{} - -// AccountUsageMigrator iterate through each payload, and calculate the storage usage -// and update the accoutns status with the updated storage usage -func (m AccountUsageMigrator) MigratePayloads(account string, payloads []ledger.Payload) ([]ledger.Payload, error) { - var status *environment.AccountStatus - var statusIndex int - totalSize := uint64(0) - for i, payload := range payloads { - key, err := payload.Key() - if err != nil { - return nil, err - } - if isAccountKey(key) { - statusIndex = i - status, err = environment.AccountStatusFromBytes(payload.Value()) - if err != nil { - return nil, fmt.Errorf("could not parse account status: %w", err) - } - - } - - size, err := payloadSize(key, payload) - if err != nil { - return nil, err - } - totalSize += size - } - - err := compareUsage(status, totalSize) - if err != nil { - log.Error().Msgf("%v", err) - } - - if status == nil { - return nil, fmt.Errorf("could not find account status for account %v", account) - } - - // update storage used - status.SetStorageUsed(totalSize) - - newValue := status.ToBytes() - newPayload, err := newPayloadWithValue(payloads[statusIndex], newValue) - if err != nil { - return nil, fmt.Errorf("cannot create new payload with value: %w", err) - } - - payloads[statusIndex] = newPayload - - return payloads, nil -} - -func compareUsage(status *environment.AccountStatus, totalSize uint64) error { - oldSize := status.StorageUsed() - if oldSize != totalSize { - return fmt.Errorf("old size: %v, new size: %v", oldSize, totalSize) - } - return nil -} - -// newPayloadWithValue returns a new payload with the key from the given payload, and -// the value from the argument -func newPayloadWithValue(payload ledger.Payload, value ledger.Value) (ledger.Payload, error) { - key, err := payload.Key() - if err != nil { - return ledger.Payload{}, err - } - newPayload := ledger.NewPayload(key, payload.Value()) - return *newPayload, nil -} diff --git a/cmd/util/ledger/migrations/account_size_filter_migration.go b/cmd/util/ledger/migrations/account_size_filter_migration.go new file mode 100644 index 00000000000..e63aae287ac --- /dev/null +++ b/cmd/util/ledger/migrations/account_size_filter_migration.go @@ -0,0 +1,93 @@ +package migrations + +import ( + "fmt" + + "github.com/rs/zerolog" + + "github.com/onflow/flow-go/cmd/util/ledger/util/registers" +) + +func NewAccountSizeFilterMigration( + maxAccountSize uint64, + exceptions map[string]struct{}, + log zerolog.Logger, +) RegistersMigration { + + if maxAccountSize == 0 { + return nil + } + + return func(registersByAccount *registers.ByAccount) error { + + type accountInfo struct { + count int + size uint64 + } + payloadCountByAddress := make(map[string]accountInfo) + + err := registersByAccount.ForEach(func(owner string, key string, value []byte) error { + + info := payloadCountByAddress[owner] + info.count++ + info.size += uint64(len(value)) + payloadCountByAddress[owner] = info + + return nil + }) + if err != nil { + return err + } + + for address, info := range payloadCountByAddress { + log.Debug().Msgf( + "address %x has %d payloads and a total size of %s", + address, + info.count, + ByteCountIEC(int64(info.size)), + ) + + if _, ok := exceptions[address]; !ok && info.size > maxAccountSize { + log.Warn().Msgf( + "dropping payloads of account %x. size of payloads %s exceeds max size %s", + address, + ByteCountIEC(int64(info.size)), + ByteCountIEC(int64(maxAccountSize)), + ) + } + } + + return registersByAccount.ForEachAccount( + func(accountRegisters *registers.AccountRegisters) error { + owner := accountRegisters.Owner() + + if _, ok := exceptions[owner]; ok { + return nil + } + + info := payloadCountByAddress[owner] + if info.size <= maxAccountSize { + return nil + } + + return accountRegisters.ForEach(func(owner, key string, _ []byte) error { + return accountRegisters.Set(owner, key, nil) + }) + }, + ) + } +} + +func ByteCountIEC(b int64) string { + const unit = 1024 + if b < unit { + return fmt.Sprintf("%d B", b) + } + div, exp := int64(unit), 0 + for n := b / unit; n >= unit; n /= unit { + div *= unit + exp++ + } + return fmt.Sprintf("%.1f %ciB", + float64(b)/float64(div), "KMGTPE"[exp]) +} diff --git a/cmd/util/ledger/migrations/account_storage_migration.go b/cmd/util/ledger/migrations/account_storage_migration.go new file mode 100644 index 00000000000..7b80f7560d6 --- /dev/null +++ b/cmd/util/ledger/migrations/account_storage_migration.go @@ -0,0 +1,68 @@ +package migrations + +import ( + "fmt" + + "github.com/onflow/cadence/common" + "github.com/onflow/cadence/interpreter" + "github.com/onflow/cadence/runtime" + "github.com/rs/zerolog" + + "github.com/onflow/flow-go/cmd/util/ledger/util/registers" + "github.com/onflow/flow-go/model/flow" +) + +func NewAccountStorageMigration( + address common.Address, + log zerolog.Logger, + chainID flow.ChainID, + migrate func(*runtime.Storage, *interpreter.Interpreter) error, +) RegistersMigration { + + return func(registersByAccount *registers.ByAccount) error { + + // Create an interpreter migration runtime + migrationRuntime, err := NewInterpreterMigrationRuntime( + registersByAccount, + chainID, + InterpreterMigrationRuntimeConfig{}, + ) + if err != nil { + return fmt.Errorf("failed to create interpreter migration runtime: %w", err) + } + + // Run the migration + storage := migrationRuntime.Storage + inter := migrationRuntime.Interpreter + + err = migrate(storage, inter) + if err != nil { + return fmt.Errorf("failed to migrate storage: %w", err) + } + + // Commit the changes + err = storage.NondeterministicCommit(inter, false) + if err != nil { + return fmt.Errorf("failed to commit changes: %w", err) + } + + // Check the health of the storage + err = storage.CheckHealth() + if err != nil { + log.Err(err).Msg("storage health check failed") + } + + // Commit/finalize the transaction + + expectedAddresses := map[flow.Address]struct{}{ + flow.Address(address): {}, + } + + err = migrationRuntime.Commit(expectedAddresses, log) + if err != nil { + return fmt.Errorf("failed to commit: %w", err) + } + + return nil + } +} diff --git a/cmd/util/ledger/migrations/add_key_migration.go b/cmd/util/ledger/migrations/add_key_migration.go new file mode 100644 index 00000000000..2fcca8db493 --- /dev/null +++ b/cmd/util/ledger/migrations/add_key_migration.go @@ -0,0 +1,406 @@ +package migrations + +import ( + "context" + "fmt" + + "github.com/rs/zerolog" + + "github.com/onflow/cadence/common" + "github.com/onflow/crypto/hash" + + "github.com/onflow/flow-go-sdk/crypto" + + "github.com/onflow/flow-go/cmd/util/ledger/reporters" + "github.com/onflow/flow-go/cmd/util/ledger/util/registers" + "github.com/onflow/flow-go/fvm" + "github.com/onflow/flow-go/fvm/systemcontracts" + "github.com/onflow/flow-go/model/flow" +) + +// This migration is not safe to run on actual networks. +// It is used for getting control over system accounts so that they can be tested +// in a copied environment. When we need to do this it is best to create a branch +// where this variable is set to true, and use that temporary branch to run migrations. +const IAmSureIWantToRunThisMigration = false + +// The list of mainnet node addresses might need updating. Please check it and then set this to true. +const IHaveCheckedTheMainnetNodeAddressesForCorrectness = false + +// AddKeyMigration adds a new key to the core contracts accounts +type AddKeyMigration struct { + log zerolog.Logger + + accountsToAddKeyTo map[common.Address]AddKeyMigrationAccountPublicKeyData + chainID flow.ChainID + + reporter reporters.ReportWriter +} + +var _ AccountBasedMigration = (*AddKeyMigration)(nil) + +func NewAddKeyMigration( + chainID flow.ChainID, + key crypto.PublicKey, + rwf reporters.ReportWriterFactory, +) *AddKeyMigration { + if !IAmSureIWantToRunThisMigration { + panic("Cannot run AddKeyMigration migration") + } + + // this is a migration mainnet only migration + if chainID != flow.Mainnet { + panic("AddKeyMigration migration only works on mainnet") + } + + addresses := make(map[common.Address]AddKeyMigrationAccountPublicKeyData) + sc := systemcontracts.SystemContractsForChain(chainID).All() + + for _, sc := range sc { + addresses[common.Address(sc.Address)] = AddKeyMigrationAccountPublicKeyData{ + PublicKey: key, + HashAlgo: hash.SHA3_256, + } + } + + // add key to node accounts + for _, nodeAddress := range mainnetNodeAddresses() { + + address, err := common.HexToAddress(nodeAddress) + if err != nil { + // should never happen + panic(fmt.Errorf("invalid node address %s: %w", nodeAddress, err)) + } + + addresses[address] = AddKeyMigrationAccountPublicKeyData{ + PublicKey: key, + HashAlgo: hash.SHA3_256, + } + } + + return &AddKeyMigration{ + accountsToAddKeyTo: addresses, + chainID: chainID, + reporter: rwf.ReportWriter("add-key-migration"), + } +} + +type AddKeyMigrationAccountPublicKeyData struct { + PublicKey crypto.PublicKey + HashAlgo hash.HashingAlgorithm +} + +func (m *AddKeyMigration) InitMigration( + log zerolog.Logger, + _ *registers.ByAccount, + _ int, +) error { + m.log = log.With().Str("component", "AddKeyMigration").Logger() + return nil +} + +func (m *AddKeyMigration) Close() error { + return nil +} + +func (m *AddKeyMigration) MigrateAccount( + _ context.Context, + address common.Address, + accountRegisters *registers.AccountRegisters, +) error { + if !IAmSureIWantToRunThisMigration { + panic("Cannot run AddKeyMigration migration") + } + + keyData, ok := m.accountsToAddKeyTo[address] + if !ok { + return nil + } + + // Create all the runtime components we need for the migration + migrationRuntime, err := NewInterpreterMigrationRuntime( + accountRegisters, + m.chainID, + InterpreterMigrationRuntimeConfig{}, + ) + + if err != nil { + return err + } + + account, err := migrationRuntime.Accounts.Get(flow.ConvertAddress(address)) + if err != nil { + return fmt.Errorf("could not find account at address %s", address) + } + if len(account.Keys) == 0 { + // this is unexpected, + // all core contract accounts should have at least one key + m.log.Warn(). + Str("address", address.String()). + Msg("account has no keys") + } + + key := flow.AccountPublicKey{ + PublicKey: keyData.PublicKey, + SignAlgo: keyData.PublicKey.Algorithm(), + HashAlgo: keyData.HashAlgo, + Weight: fvm.AccountKeyWeightThreshold, + } + + flowAddress := flow.ConvertAddress(address) + + keyIndex, err := migrationRuntime.Accounts.GetAccountPublicKeyCount(flowAddress) + if err != nil { + return fmt.Errorf("failed to get public key count: %w", err) + } + + err = migrationRuntime.Accounts.AppendAccountPublicKey(flowAddress, key) + if err != nil { + return fmt.Errorf("failed to append public key: %w", err) + } + + // Finalize the transaction + result, err := migrationRuntime.TransactionState.FinalizeMainTransaction() + if err != nil { + return fmt.Errorf("failed to finalize main transaction: %w", err) + } + + // Merge the changes into the registers + expectedAddresses := map[flow.Address]struct{}{ + flow.Address(address): {}, + } + + err = registers.ApplyChanges( + accountRegisters, + result.WriteSet, + expectedAddresses, + m.log, + ) + if err != nil { + return fmt.Errorf("failed to apply register changes: %w", err) + } + + m.reporter.Write(keyAddedReport{ + Address: address.Hex(), + Index: keyIndex, + Key: key.PublicKey.String(), + }) + + return nil + +} + +type keyAddedReport struct { + Address string `json:"address"` + Index uint32 `json:"index"` + Key string `json:"key"` +} + +func mainnetNodeAddresses() []string { + if !IHaveCheckedTheMainnetNodeAddressesForCorrectness { + panic("please check that the addresses are correct then uncomment this line") + } + + return []string{ + "731fff3c92213443", // "access-001.mainnet24.nodes.onflow.org:3569", + "812c23d21adf3efd", // "access-002.mainnet24.nodes.onflow.org:3569", + "8a06114bd65a40d4", // "access-003.mainnet24.nodes.onflow.org:3569", + "7835cda55ea44a6a", // "access-004.mainnet24.nodes.onflow.org:3569", + "05c726db6e2c35b5", // "access-005.mainnet24.nodes.onflow.org:3569", + "00523f97886d672f", // "access-006.mainnet24.nodes.onflow.org:3569", + "dfa97677e212fe54", // "access-007.mainnet24.nodes.onflow.org:3569", + "decc3024db84c4f2", // "access-008.mainnet24.nodes.onflow.org:3569", + "334b37c675df7ea8", // "access-009.mainnet24.nodes.onflow.org:3569", + "f4e71586ea8c469e", // "access-010.mainnet24.nodes.onflow.org:3569", + "8f930807301b124e", // "collection-001.mainnet24.nodes.onflow.org:3569", + "7da0d4e9b8e518f0", // "collection-002.mainnet24.nodes.onflow.org:3569", + "60de8343ed646cb8", // "collection-003.mainnet24.nodes.onflow.org:3569", + "1644512674297fcc", // "collection-004.mainnet24.nodes.onflow.org:3569", + "e4778dc8fcd77572", // "collection-005.mainnet24.nodes.onflow.org:3569", + "ef5dbf5130520b5b", // "collection-006.mainnet24.nodes.onflow.org:3569", + "1d6e63bfb8ac01e5", // "collection-007.mainnet24.nodes.onflow.org:3569", + "eac8a61dd61359c1", // "collection-008.mainnet24.nodes.onflow.org:3569", + "18fb7af35eed537f", // "collection-009.mainnet24.nodes.onflow.org:3569", + "13d1486a92682d56", // "collection-010.mainnet24.nodes.onflow.org:3569", + "e1e294841a9627e8", // "collection-011.mainnet24.nodes.onflow.org:3569", + "e8022abba5379d6d", // "collection-012.mainnet24.nodes.onflow.org:3569", + "1a31f6552dc997d3", // "collection-013.mainnet24.nodes.onflow.org:3569", + "111bc4cce14ce9fa", // "collection-014.mainnet24.nodes.onflow.org:3569", + "074fa1ff7848e39b", // "collection-015.mainnet24.nodes.onflow.org:3569", + "f0e9645d16f7bbbf", // "collection-016.mainnet24.nodes.onflow.org:3569", + "02dab8b39e09b101", // "collection-017.mainnet24.nodes.onflow.org:3569", + "09f08a2a528ccf28", // "collection-018.mainnet24.nodes.onflow.org:3569", + "fbc356c4da72c596", // "collection-019.mainnet24.nodes.onflow.org:3569", + "0d00d5358d5ba714", // "collection-020.mainnet24.nodes.onflow.org:3569", + "ff3309db05a5adaa", // "collection-021.mainnet24.nodes.onflow.org:3569", + "f4193b42c920d383", // "collection-022.mainnet24.nodes.onflow.org:3569", + "062ae7ac41ded93d", // "collection-023.mainnet24.nodes.onflow.org:3569", + "f18c220e2f618119", // "collection-024.mainnet24.nodes.onflow.org:3569", + "03bffee0a79f8ba7", // "collection-025.mainnet24.nodes.onflow.org:3569", + "0895cc796b1af58e", // "collection-026.mainnet24.nodes.onflow.org:3569", + "faa61097e3e4ff30", // "collection-027.mainnet24.nodes.onflow.org:3569", + "f346aea85c4545b5", // "collection-028.mainnet24.nodes.onflow.org:3569", + "01757246d4bb4f0b", // "collection-029.mainnet24.nodes.onflow.org:3569", + "0a5f40df183e3122", // "collection-030.mainnet24.nodes.onflow.org:3569", + "f86c9c3190c03b9c", // "collection-031.mainnet24.nodes.onflow.org:3569", + "0fca5993fe7f63b8", // "collection-032.mainnet24.nodes.onflow.org:3569", + "fdf9857d76816906", // "collection-033.mainnet24.nodes.onflow.org:3569", + "f6d3b7e4ba04172f", // "collection-034.mainnet24.nodes.onflow.org:3569", + "04e06b0a32fa1d91", // "collection-035.mainnet24.nodes.onflow.org:3569", + "0db2761c119413f5", // "collection-036.mainnet24.nodes.onflow.org:3569", + "ff81aaf2996a194b", // "collection-037.mainnet24.nodes.onflow.org:3569", + "f4ab986b55ef6762", // "collection-038.mainnet24.nodes.onflow.org:3569", + "06984485dd116ddc", // "collection-039.mainnet24.nodes.onflow.org:3569", + "f13e8127b3ae35f8", // "collection-040.mainnet24.nodes.onflow.org:3569", + "030d5dc93b503f46", // "collection-041.mainnet24.nodes.onflow.org:3569", + "08276f50f7d5416f", // "collection-042.mainnet24.nodes.onflow.org:3569", + "fa14b3be7f2b4bd1", // "collection-043.mainnet24.nodes.onflow.org:3569", + "f3f40d81c08af154", // "collection-044.mainnet24.nodes.onflow.org:3569", + "01c7d16f4874fbea", // "collection-045.mainnet24.nodes.onflow.org:3569", + "0aede3f684f185c3", // "collection-046.mainnet24.nodes.onflow.org:3569", + "f8de3f180c0f8f7d", // "collection-047.mainnet24.nodes.onflow.org:3569", + "0f78faba62b0d759", // "collection-048.mainnet24.nodes.onflow.org:3569", + "fd4b2654ea4edde7", // "collection-049.mainnet24.nodes.onflow.org:3569", + "f66114cd26cba3ce", // "collection-050.mainnet24.nodes.onflow.org:3569", + "0452c823ae35a970", // "collection-051.mainnet24.nodes.onflow.org:3569", + "f2914bd2f91ccbf2", // "collection-052.mainnet24.nodes.onflow.org:3569", + "00a2973c71e2c14c", // "collection-053.mainnet24.nodes.onflow.org:3569", + "0b88a5a5bd67bf65", // "collection-054.mainnet24.nodes.onflow.org:3569", + "ea7a05344adced20", // "collection-055.mainnet24.nodes.onflow.org:3569", + "1849d9dac222e79e", // "collection-056.mainnet24.nodes.onflow.org:3569", + "1363eb430ea799b7", // "collection-057.mainnet24.nodes.onflow.org:3569", + "e15037ad86599309", // "collection-058.mainnet24.nodes.onflow.org:3569", + "e8b0899239f8298c", // "collection-059.mainnet24.nodes.onflow.org:3569", + "1a83557cb1062332", // "collection-060.mainnet24.nodes.onflow.org:3569", + "11a967e57d835d1b", // "collection-061.mainnet24.nodes.onflow.org:3569", + "e39abb0bf57d57a5", // "collection-062.mainnet24.nodes.onflow.org:3569", + "143c7ea99bc20f81", // "collection-063.mainnet24.nodes.onflow.org:3569", + "e60fa247133c053f", // "collection-064.mainnet24.nodes.onflow.org:3569", + "ed2590dedfb97b16", // "collection-065.mainnet24.nodes.onflow.org:3569", + "1f164c30574771a8", // "collection-066.mainnet24.nodes.onflow.org:3569", + "5668f9ec131bd35f", // "collection-067.mainnet24.nodes.onflow.org:3569", + "a45b25029be5d9e1", // "collection-068.mainnet24.nodes.onflow.org:3569", + "af71179b5760a7c8", // "collection-069.mainnet24.nodes.onflow.org:3569", + "5d42cb75df9ead76", // "collection-070.mainnet24.nodes.onflow.org:3569", + "aae40ed7b121f552", // "collection-071.mainnet24.nodes.onflow.org:3569", + "58d7d23939dfffec", // "collection-072.mainnet24.nodes.onflow.org:3569", + "53fde0a0f55a81c5", // "collection-073.mainnet24.nodes.onflow.org:3569", + "a1ce3c4e7da48b7b", // "collection-074.mainnet24.nodes.onflow.org:3569", + "a82e8271c20531fe", // "collection-075.mainnet24.nodes.onflow.org:3569", + "5a1d5e9f4afb3b40", // "collection-076.mainnet24.nodes.onflow.org:3569", + "51376c06867e4569", // "collection-077.mainnet24.nodes.onflow.org:3569", + "a304b0e80e804fd7", // "collection-078.mainnet24.nodes.onflow.org:3569", + "54a2754a603f17f3", // "collection-079.mainnet24.nodes.onflow.org:3569", + "a691a9a4e8c11d4d", // "collection-080.mainnet24.nodes.onflow.org:3569", + "adbb9b3d24446364", // "collection-081.mainnet24.nodes.onflow.org:3569", + "5f8847d3acba69da", // "collection-082.mainnet24.nodes.onflow.org:3569", + "a94bc422fb930b58", // "collection-083.mainnet24.nodes.onflow.org:3569", + "5b7818cc736d01e6", // "collection-084.mainnet24.nodes.onflow.org:3569", + "50522a55bfe87fcf", // "collection-085.mainnet24.nodes.onflow.org:3569", + "a261f6bb37167571", // "collection-086.mainnet24.nodes.onflow.org:3569", + "55c7331959a92d55", // "collection-087.mainnet24.nodes.onflow.org:3569", + "a7f4eff7d15727eb", // "collection-088.mainnet24.nodes.onflow.org:3569", + "acdedd6e1dd259c2", // "collection-089.mainnet24.nodes.onflow.org:3569", + "5eed0180952c537c", // "collection-090.mainnet24.nodes.onflow.org:3569", + "570dbfbf2a8de9f9", // "collection-091.mainnet24.nodes.onflow.org:3569", + "a53e6351a273e347", // "collection-092.mainnet24.nodes.onflow.org:3569", + "ae1451c86ef69d6e", // "collection-093.mainnet24.nodes.onflow.org:3569", + "5c278d26e60897d0", // "collection-094.mainnet24.nodes.onflow.org:3569", + "4fe6f159994dcf2b", // "collection-095.mainnet24.nodes.onflow.org:3569", + "bdd52db711b3c595", // "collection-096.mainnet24.nodes.onflow.org:3569", + "b6ff1f2edd36bbbc", // "consensus-001.mainnet24.nodes.onflow.org:3569", + "44ccc3c055c8b102", // "consensus-002.mainnet24.nodes.onflow.org:3569", + "4d9eded676a6bf66", // "consensus-003.mainnet24.nodes.onflow.org:3569", + "bfad0238fe58b5d8", // "consensus-004.mainnet24.nodes.onflow.org:3569", + "b48730a132ddcbf1", // "consensus-005.mainnet24.nodes.onflow.org:3569", + "46b4ec4fba23c14f", // "consensus-006.mainnet24.nodes.onflow.org:3569", + "b11229edd49c996b", // "consensus-007.mainnet24.nodes.onflow.org:3569", + "ac6c7e47811ded23", // "consensus-008.mainnet24.nodes.onflow.org:3569", + "5e5fa2a909e3e79d", // "consensus-009.mainnet24.nodes.onflow.org:3569", + "57bf1c96b6425d18", // "consensus-010.mainnet24.nodes.onflow.org:3569", + "a58cc0783ebc57a6", // "consensus-011.mainnet24.nodes.onflow.org:3569", + "aea6f2e1f239298f", // "consensus-012.mainnet24.nodes.onflow.org:3569", + "5c952e0f7ac72331", // "consensus-013.mainnet24.nodes.onflow.org:3569", + "ab33ebad14787b15", // "consensus-014.mainnet24.nodes.onflow.org:3569", + "590037439c8671ab", // "consensus-015.mainnet24.nodes.onflow.org:3569", + "522a05da50030f82", // "consensus-016.mainnet24.nodes.onflow.org:3569", + "447e60e9c90705e3", // "consensus-017.mainnet24.nodes.onflow.org:3569", + "b2bde3189e2e6761", // "consensus-018.mainnet24.nodes.onflow.org:3569", + "408e3ff616d06ddf", // "consensus-019.mainnet24.nodes.onflow.org:3569", + "4ba40d6fda5513f6", // "consensus-020.mainnet24.nodes.onflow.org:3569", + "b997d18152ab1948", // "consensus-021.mainnet24.nodes.onflow.org:3569", + "4e3114233c14416c", // "consensus-022.mainnet24.nodes.onflow.org:3569", + "bc02c8cdb4ea4bd2", // "consensus-023.mainnet24.nodes.onflow.org:3569", + "b728fa54786f35fb", // "consensus-024.mainnet24.nodes.onflow.org:3569", + "451b26baf0913f45", // "consensus-025.mainnet24.nodes.onflow.org:3569", + "4cfb98854f3085c0", // "consensus-026.mainnet24.nodes.onflow.org:3569", + "bec8446bc7ce8f7e", // "consensus-027.mainnet24.nodes.onflow.org:3569", + "b5e276f20b4bf157", // "consensus-028.mainnet24.nodes.onflow.org:3569", + "47d1aa1c83b5fbe9", // "consensus-029.mainnet24.nodes.onflow.org:3569", + "b0776fbeed0aa3cd", // "consensus-030.mainnet24.nodes.onflow.org:3569", + "4244b35065f4a973", // "consensus-031.mainnet24.nodes.onflow.org:3569", + "496e81c9a971d75a", // "consensus-032.mainnet24.nodes.onflow.org:3569", + "bb5d5d27218fdde4", // "consensus-033.mainnet24.nodes.onflow.org:3569", + "cdc78f42b8c2ce90", // "consensus-034.mainnet24.nodes.onflow.org:3569", + "3ff453ac303cc42e", // "consensus-035.mainnet24.nodes.onflow.org:3569", + "34de6135fcb9ba07", // "consensus-036.mainnet24.nodes.onflow.org:3569", + "c6edbddb7447b0b9", // "consensus-037.mainnet24.nodes.onflow.org:3569", + "314b78791af8e89d", // "consensus-038.mainnet24.nodes.onflow.org:3569", + "c378a4979206e223", // "consensus-039.mainnet24.nodes.onflow.org:3569", + "c852960e5e839c0a", // "consensus-040.mainnet24.nodes.onflow.org:3569", + "01fb1c432a2fed81", // "execution-001.mainnet24.nodes.onflow.org:3569", + "17af7970b32be7e0", // "execution-002.mainnet24.nodes.onflow.org:3569", + "e009bcd2dd94bfc4", // "execution-003.mainnet24.nodes.onflow.org:3569", + "123a603c556ab57a", // "execution-004.mainnet24.nodes.onflow.org:3569", + "88cc9deda57e8478", // "verification-001.mainnet24.nodes.onflow.org:3569", + "191052a599efcb53", // "verification-002.mainnet24.nodes.onflow.org:3569", + "eb238e4b1111c1ed", // "verification-003.mainnet24.nodes.onflow.org:3569", + "e271935d327fcf89", // "verification-004.mainnet24.nodes.onflow.org:3569", + "10424fb3ba81c537", // "verification-005.mainnet24.nodes.onflow.org:3569", + "1b687d2a7604bb1e", // "verification-006.mainnet24.nodes.onflow.org:3569", + "e95ba1c4fefab1a0", // "verification-007.mainnet24.nodes.onflow.org:3569", + "1efd64669045e984", // "verification-008.mainnet24.nodes.onflow.org:3569", + "ecceb88818bbe33a", // "verification-009.mainnet24.nodes.onflow.org:3569", + "e7e48a11d43e9d13", // "verification-010.mainnet24.nodes.onflow.org:3569", + "15d756ff5cc097ad", // "verification-011.mainnet24.nodes.onflow.org:3569", + "1c37e8c0e3612d28", // "verification-012.mainnet24.nodes.onflow.org:3569", + "ee04342e6b9f2796", // "verification-013.mainnet24.nodes.onflow.org:3569", + "e52e06b7a71a59bf", // "verification-014.mainnet24.nodes.onflow.org:3569", + "171dda592fe45301", // "verification-015.mainnet24.nodes.onflow.org:3569", + "e0bb1ffb415b0b25", // "verification-016.mainnet24.nodes.onflow.org:3569", + "1288c315c9a5019b", // "verification-017.mainnet24.nodes.onflow.org:3569", + "19a2f18c05207fb2", // "verification-018.mainnet24.nodes.onflow.org:3569", + "eb912d628dde750c", // "verification-019.mainnet24.nodes.onflow.org:3569", + "1d52ae93daf7178e", // "verification-020.mainnet24.nodes.onflow.org:3569", + "ef61727d52091d30", // "verification-021.mainnet24.nodes.onflow.org:3569", + "e44b40e49e8c6319", // "verification-022.mainnet24.nodes.onflow.org:3569", + "16789c0a167269a7", // "verification-023.mainnet24.nodes.onflow.org:3569", + "e1de59a878cd3183", // "verification-024.mainnet24.nodes.onflow.org:3569", + "13ed8546f0333b3d", // "verification-025.mainnet24.nodes.onflow.org:3569", + "18c7b7df3cb64514", // "verification-026.mainnet24.nodes.onflow.org:3569", + "eaf46b31b4484faa", // "verification-027.mainnet24.nodes.onflow.org:3569", + "e314d50e0be9f52f", // "verification-028.mainnet24.nodes.onflow.org:3569", + "112709e08317ff91", // "verification-029.mainnet24.nodes.onflow.org:3569", + "1a0d3b794f9281b8", // "verification-030.mainnet24.nodes.onflow.org:3569", + "e83ee797c76c8b06", // "verification-031.mainnet24.nodes.onflow.org:3569", + "1f982235a9d3d322", // "verification-032.mainnet24.nodes.onflow.org:3569", + "edabfedb212dd99c", // "verification-033.mainnet24.nodes.onflow.org:3569", + "e681cc42eda8a7b5", // "verification-034.mainnet24.nodes.onflow.org:3569", + "14b210ac6556ad0b", // "verification-035.mainnet24.nodes.onflow.org:3569", + "6228c2c9fc1bbe7f", // "verification-036.mainnet24.nodes.onflow.org:3569", + "901b1e2774e5b4c1", // "verification-037.mainnet24.nodes.onflow.org:3569", + "9b312cbeb860cae8", // "verification-038.mainnet24.nodes.onflow.org:3569", + "6902f050309ec056", // "verification-039.mainnet24.nodes.onflow.org:3569", + "9ea435f25e219872", // "verification-040.mainnet24.nodes.onflow.org:3569", + "6c97e91cd6df92cc", // "verification-041.mainnet24.nodes.onflow.org:3569", + "71e9beb6835ee684", // "verification-042.mainnet24.nodes.onflow.org:3569", + "780900893cff5c01", // "verification-043.mainnet24.nodes.onflow.org:3569", + "8a3adc67b40156bf", // "verification-044.mainnet24.nodes.onflow.org:3569", + "65775723697e2849", // "verification-045.mainnet24.nodes.onflow.org:3569", + "97448bcde18022f7", // "verification-046.mainnet24.nodes.onflow.org:3569", + "60e24e6f8f3f7ad3", // "verification-047.mainnet24.nodes.onflow.org:3569", + "92d1928107c1706d", // "verification-048.mainnet24.nodes.onflow.org:3569", + "99fba018cb440e44", // "verification-049.mainnet24.nodes.onflow.org:3569", + "6bc87cf643ba04fa", // "verification-050.mainnet24.nodes.onflow.org:3569", + } +} diff --git a/cmd/util/ledger/migrations/add_key_migration_test.go b/cmd/util/ledger/migrations/add_key_migration_test.go new file mode 100644 index 00000000000..a66c174020d --- /dev/null +++ b/cmd/util/ledger/migrations/add_key_migration_test.go @@ -0,0 +1,19 @@ +package migrations + +import ( + "testing" +) + +func Test_fail_if_migration_enabled(t *testing.T) { + t.Parallel() + // prevent merging this to master branch if enabled + if IAmSureIWantToRunThisMigration { + t.Fail() + } + + // a reminder to set this back to false so that we have + // it prepared for next time. + if IHaveCheckedTheMainnetNodeAddressesForCorrectness { + t.Fail() + } +} diff --git a/cmd/util/ledger/migrations/cadence_value_diff.go b/cmd/util/ledger/migrations/cadence_value_diff.go new file mode 100644 index 00000000000..5254e983f8e --- /dev/null +++ b/cmd/util/ledger/migrations/cadence_value_diff.go @@ -0,0 +1,1023 @@ +package migrations + +import ( + "fmt" + "time" + + "github.com/onflow/cadence/common" + "github.com/onflow/cadence/interpreter" + "github.com/onflow/cadence/runtime" + "github.com/rs/zerolog/log" + "golang.org/x/sync/errgroup" + + "github.com/onflow/flow-go/cmd/util/ledger/reporters" + "github.com/onflow/flow-go/cmd/util/ledger/util" + "github.com/onflow/flow-go/cmd/util/ledger/util/registers" + "github.com/onflow/flow-go/model/flow" +) + +type diffKind int + +const ( + storageMapExistDiffKind diffKind = iota // Storage map only exists in one state + storageMapKeyDiffKind // Storage map keys are different + storageMapValueDiffKind // Storage map values are different (only with verbose logging) + cadenceValueDiffKind // Cadence values are different + cadenceValueTypeDiffKind // Cadence value types are different + cadenceValueStaticTypeDiffKind // Cadence value static types are different +) + +var diffKindString = map[diffKind]string{ + storageMapExistDiffKind: "storage_map_exist_diff", + storageMapKeyDiffKind: "storage_map_key_diff", + storageMapValueDiffKind: "storage_map_value_diff", + cadenceValueDiffKind: "cadence_value_diff", + cadenceValueTypeDiffKind: "cadence_value_type_diff", + cadenceValueStaticTypeDiffKind: "cadence_value_static_type_diff", +} + +type diffErrorKind int + +const ( + abortErrorKind diffErrorKind = iota + storageMapKeyNotImplementingStorageMapKeyDiffErrorKind + cadenceValueNotImplementEquatableValueDiffErrorKind +) + +var diffErrorKindString = map[diffErrorKind]string{ + abortErrorKind: "error_diff_failed", + storageMapKeyNotImplementingStorageMapKeyDiffErrorKind: "error_storage_map_key_not_implementing_StorageMapKey", + cadenceValueNotImplementEquatableValueDiffErrorKind: "error_cadence_value_not_implementing_EquatableValue", +} + +type diffError struct { + Address string + Kind string + Msg string +} + +type diffProblem struct { + Address string + Domain string + Kind string + Msg string + Trace string `json:",omitempty"` +} + +type difference struct { + Address string + Domain string + Kind string + Msg string + Trace string `json:",omitempty"` + OldValue string `json:",omitempty"` + NewValue string `json:",omitempty"` + OldValueStaticType string `json:",omitempty"` + NewValueStaticType string `json:",omitempty"` +} + +const minLargeAccountRegisterCount = 1_000_000 + +type CadenceValueDiffReporter struct { + address common.Address + chainID flow.ChainID + reportWriter reporters.ReportWriter + verboseLogging bool + nWorkers int +} + +func NewCadenceValueDiffReporter( + address common.Address, + chainID flow.ChainID, + rw reporters.ReportWriter, + verboseLogging bool, + nWorkers int, +) *CadenceValueDiffReporter { + return &CadenceValueDiffReporter{ + address: address, + chainID: chainID, + reportWriter: rw, + verboseLogging: verboseLogging, + nWorkers: nWorkers, + } +} + +type IsValueIncludedFunc func(address common.Address, domain common.StorageDomain, key any) bool + +func (dr *CadenceValueDiffReporter) DiffStates( + oldRegs, newRegs registers.Registers, + domains []common.StorageDomain, + isValueIncluded IsValueIncludedFunc, +) { + + oldStorage := newReadonlyStorage(oldRegs) + + newStorage := newReadonlyStorage(newRegs) + + var loadAtreeStorageGroup errgroup.Group + + loadAtreeStorageGroup.Go(func() (err error) { + return util.LoadAtreeSlabsInStorage(oldStorage, oldRegs, dr.nWorkers) + }) + + err := util.LoadAtreeSlabsInStorage(newStorage, newRegs, dr.nWorkers) + if err != nil { + dr.reportWriter.Write( + diffError{ + Address: dr.address.Hex(), + Kind: diffErrorKindString[abortErrorKind], + Msg: fmt.Sprintf("failed to preload new atree registers: %s", err), + }) + return + } + + // Wait for old registers to be loaded in storage. + if err := loadAtreeStorageGroup.Wait(); err != nil { + dr.reportWriter.Write( + diffError{ + Address: dr.address.Hex(), + Kind: diffErrorKindString[abortErrorKind], + Msg: fmt.Sprintf("failed to preload old atree registers: %s", err), + }) + return + } + + // Skip goroutine overhead for smaller accounts + oldRuntime, err := newReadonlyStorageRuntimeWithStorage(oldStorage, oldRegs.Count()) + if err != nil { + dr.reportWriter.Write( + diffError{ + Address: dr.address.Hex(), + Kind: diffErrorKindString[abortErrorKind], + Msg: fmt.Sprintf("failed to create runtime for old registers: %s", err), + }) + return + } + + newRuntime, err := newReadonlyStorageRuntimeWithStorage(newStorage, newRegs.Count()) + if err != nil { + dr.reportWriter.Write( + diffError{ + Address: dr.address.Hex(), + Kind: diffErrorKindString[abortErrorKind], + Msg: fmt.Sprintf("failed to create runtime with new registers: %s", err), + }) + return + } + + for _, domain := range domains { + dr.diffDomain(oldRuntime, newRuntime, domain, isValueIncluded) + } +} + +func (dr *CadenceValueDiffReporter) diffDomain( + oldRuntime *readonlyStorageRuntime, + newRuntime *readonlyStorageRuntime, + domain common.StorageDomain, + isValueIncluded IsValueIncludedFunc, +) { + defer func() { + if r := recover(); r != nil { + dr.reportWriter.Write( + diffProblem{ + Address: dr.address.Hex(), + Domain: domain.Identifier(), + Kind: diffErrorKindString[abortErrorKind], + Msg: fmt.Sprintf( + "panic while diffing storage maps: %s", + r, + ), + }, + ) + } + }() + + oldStorageMap := oldRuntime.Storage.GetDomainStorageMap(oldRuntime.Interpreter, dr.address, domain, false) + newStorageMap := newRuntime.Storage.GetDomainStorageMap(newRuntime.Interpreter, dr.address, domain, false) + + if oldStorageMap == nil && newStorageMap == nil { + // No storage maps for this domain. + return + } + + if oldStorageMap == nil && newStorageMap != nil { + dr.reportWriter.Write( + difference{ + Address: dr.address.Hex(), + Domain: domain.Identifier(), + Kind: diffKindString[storageMapExistDiffKind], + Msg: fmt.Sprintf( + "old storage map doesn't exist, new storage map has %d elements with keys %v", + newStorageMap.Count(), + getStorageMapKeys(newStorageMap), + ), + }) + + return + } + + if oldStorageMap != nil && newStorageMap == nil { + dr.reportWriter.Write( + difference{ + Address: dr.address.Hex(), + Domain: domain.Identifier(), + Kind: diffKindString[storageMapExistDiffKind], + Msg: fmt.Sprintf( + "new storage map doesn't exist, old storage map has %d elements with keys %v", + oldStorageMap.Count(), + getStorageMapKeys(oldStorageMap), + ), + }) + + return + } + + oldKeys := getStorageMapKeys(oldStorageMap) + newKeys := getStorageMapKeys(newStorageMap) + + onlyOldKeys, onlyNewKeys, sharedKeys := diff(oldKeys, newKeys) + + // Log keys only present in old storage map + if len(onlyOldKeys) > 0 { + dr.reportWriter.Write( + difference{ + Address: dr.address.Hex(), + Domain: domain.Identifier(), + Kind: diffKindString[storageMapKeyDiffKind], + Msg: fmt.Sprintf( + "old storage map has %d elements with keys %v, that are not present in new storge map", + len(onlyOldKeys), + onlyOldKeys, + ), + }) + } + + // Log keys only present in new storage map + if len(onlyNewKeys) > 0 { + dr.reportWriter.Write( + difference{ + Address: dr.address.Hex(), + Domain: domain.Identifier(), + Kind: diffKindString[storageMapKeyDiffKind], + Msg: fmt.Sprintf( + "new storage map has %d elements with keys %v, that are not present in old storge map", + len(onlyNewKeys), + onlyNewKeys, + ), + }) + } + + if len(sharedKeys) == 0 { + return + } + + getValues := func(key any) (interpreter.Value, interpreter.Value, *util.Trace, bool) { + + trace := util.NewTrace(fmt.Sprintf("%s[%v]", domain.Identifier(), key)) + + var mapKey interpreter.StorageMapKey + + switch key := key.(type) { + case interpreter.StringAtreeValue: + mapKey = interpreter.StringStorageMapKey(key) + + case interpreter.Uint64AtreeValue: + mapKey = interpreter.Uint64StorageMapKey(key) + + case interpreter.StringStorageMapKey: + mapKey = key + + case interpreter.Uint64StorageMapKey: + mapKey = key + + default: + dr.reportWriter.Write( + diffProblem{ + Address: dr.address.Hex(), + Domain: domain.Identifier(), + Kind: diffErrorKindString[storageMapKeyNotImplementingStorageMapKeyDiffErrorKind], + Trace: trace.String(), + Msg: fmt.Sprintf( + "invalid storage map key %v (%T), expected interpreter.StorageMapKey", + key, + key, + ), + }) + return nil, nil, nil, false + } + + oldValue := oldStorageMap.ReadValue(nil, mapKey) + + newValue := newStorageMap.ReadValue(nil, mapKey) + + return oldValue, newValue, trace, true + } + + diffValues := func( + oldInterpreter *interpreter.Interpreter, + oldValue interpreter.Value, + newInterpreter *interpreter.Interpreter, + newValue interpreter.Value, + trace *util.Trace, + ) { + hasDifference := dr.diffValues( + oldInterpreter, + oldValue, + newInterpreter, + newValue, + domain, + trace, + ) + if hasDifference { + if dr.verboseLogging { + // Log potentially large values at top level only when verbose logging is enabled. + dr.reportWriter.Write( + difference{ + Address: dr.address.Hex(), + Domain: domain.Identifier(), + Kind: diffKindString[storageMapValueDiffKind], + Msg: "storage map elements are different", + Trace: trace.String(), + OldValue: oldValue.String(), + NewValue: newValue.String(), + OldValueStaticType: oldValue.StaticType(oldInterpreter).String(), + NewValueStaticType: newValue.StaticType(newInterpreter).String(), + }) + } + } + } + + startTime := time.Now() + + isLargeAccount := oldRuntime.PayloadCount > minLargeAccountRegisterCount + + if isLargeAccount { + log.Info().Msgf( + "Diffing %x storage domain containing %d elements (%d payloads) ...", + dr.address[:], + len(sharedKeys), + oldRuntime.PayloadCount, + ) + } + + // Diffing storage domain + + for _, key := range sharedKeys { + if !isValueIncluded(dr.address, domain, key) { + continue + } + oldValue, newValue, trace, canDiff := getValues(key) + if canDiff { + diffValues( + oldRuntime.Interpreter, + oldValue, + newRuntime.Interpreter, + newValue, + trace, + ) + } + } + + if isLargeAccount { + log.Info(). + Msgf( + "Finished diffing %x storage domain containing %d elements (%d payloads) in %s", + dr.address[:], + len(sharedKeys), + oldRuntime.PayloadCount, + time.Since(startTime), + ) + } +} + +func (dr *CadenceValueDiffReporter) diffValues( + vInterpreter *interpreter.Interpreter, + v interpreter.Value, + otherInterpreter *interpreter.Interpreter, + other interpreter.Value, + domain common.StorageDomain, + trace *util.Trace, +) (hasDifference bool) { + switch v := v.(type) { + case *interpreter.ArrayValue: + return dr.diffCadenceArrayValue(vInterpreter, v, otherInterpreter, other, domain, trace) + + case *interpreter.CompositeValue: + return dr.diffCadenceCompositeValue(vInterpreter, v, otherInterpreter, other, domain, trace) + + case *interpreter.DictionaryValue: + return dr.diffCadenceDictionaryValue(vInterpreter, v, otherInterpreter, other, domain, trace) + + case *interpreter.SomeValue: + return dr.diffCadenceSomeValue(vInterpreter, v, otherInterpreter, other, domain, trace) + + default: + return dr.diffEquatable(vInterpreter, v, otherInterpreter, other, domain, trace) + } +} + +func (dr *CadenceValueDiffReporter) diffEquatable( + vInterpreter *interpreter.Interpreter, + v interpreter.Value, + otherInterpreter *interpreter.Interpreter, + other interpreter.Value, + domain common.StorageDomain, + trace *util.Trace, +) (hasDifference bool) { + + defer func() { + if r := recover(); r != nil { + dr.reportWriter.Write( + diffProblem{ + Address: dr.address.Hex(), + Domain: domain.Identifier(), + Kind: diffErrorKindString[abortErrorKind], + Trace: trace.String(), + Msg: fmt.Sprintf( + "panic while diffing values: %s", + r, + ), + }, + ) + } + }() + + oldValue, ok := v.(interpreter.EquatableValue) + if !ok { + dr.reportWriter.Write( + diffProblem{ + Address: dr.address.Hex(), + Domain: domain.Identifier(), + Kind: diffErrorKindString[cadenceValueNotImplementEquatableValueDiffErrorKind], + Trace: trace.String(), + Msg: fmt.Sprintf( + "old value doesn't implement interpreter.EquatableValue: %s (%T)", + oldValue.String(), + oldValue, + ), + }) + return true + } + + if !oldValue.Equal(nil, interpreter.EmptyLocationRange, other) { + dr.reportWriter.Write( + difference{ + Address: dr.address.Hex(), + Domain: domain.Identifier(), + Kind: diffKindString[cadenceValueDiffKind], + Msg: fmt.Sprintf("values differ: %T vs %T", oldValue, other), + Trace: trace.String(), + OldValue: v.String(), + NewValue: other.String(), + OldValueStaticType: v.StaticType(vInterpreter).String(), + NewValueStaticType: other.StaticType(otherInterpreter).String(), + }) + return true + } + + return false +} + +func (dr *CadenceValueDiffReporter) diffCadenceSomeValue( + vInterpreter *interpreter.Interpreter, + v *interpreter.SomeValue, + otherInterpreter *interpreter.Interpreter, + other interpreter.Value, + domain common.StorageDomain, + trace *util.Trace, +) (hasDifference bool) { + + defer func() { + if r := recover(); r != nil { + dr.reportWriter.Write( + diffProblem{ + Address: dr.address.Hex(), + Domain: domain.Identifier(), + Kind: diffErrorKindString[abortErrorKind], + Trace: trace.String(), + Msg: fmt.Sprintf( + "panic while diffing some: %s", + r, + ), + }, + ) + } + }() + + otherSome, ok := other.(*interpreter.SomeValue) + if !ok { + dr.reportWriter.Write( + difference{ + Address: dr.address.Hex(), + Domain: domain.Identifier(), + Kind: diffKindString[cadenceValueTypeDiffKind], + Trace: trace.String(), + Msg: fmt.Sprintf("types differ: %T != %T", v, other), + }) + return true + } + + innerValue := v.InnerValue() + + otherInnerValue := otherSome.InnerValue() + + return dr.diffValues( + vInterpreter, + innerValue, + otherInterpreter, + otherInnerValue, + domain, + trace, + ) +} + +func (dr *CadenceValueDiffReporter) diffCadenceArrayValue( + vInterpreter *interpreter.Interpreter, + v *interpreter.ArrayValue, + otherInterpreter *interpreter.Interpreter, + other interpreter.Value, + domain common.StorageDomain, + trace *util.Trace, +) (hasDifference bool) { + + defer func() { + if r := recover(); r != nil { + dr.reportWriter.Write( + diffProblem{ + Address: dr.address.Hex(), + Domain: domain.Identifier(), + Kind: diffErrorKindString[abortErrorKind], + Trace: trace.String(), + Msg: fmt.Sprintf( + "panic while diffing array: %s", + r, + ), + }, + ) + } + }() + + otherArray, ok := other.(*interpreter.ArrayValue) + if !ok { + dr.reportWriter.Write( + difference{ + Address: dr.address.Hex(), + Domain: domain.Identifier(), + Kind: diffKindString[cadenceValueTypeDiffKind], + Trace: trace.String(), + Msg: fmt.Sprintf("types differ: %T != %T", v, other), + }) + return true + } + + if v.Type == nil && otherArray.Type != nil { + hasDifference = true + + dr.reportWriter.Write( + difference{ + Address: dr.address.Hex(), + Domain: domain.Identifier(), + Kind: diffKindString[cadenceValueStaticTypeDiffKind], + Trace: trace.String(), + Msg: fmt.Sprintf("array static types differ: nil != %s", otherArray.Type), + }) + } + + if v.Type != nil && otherArray.Type == nil { + hasDifference = true + + dr.reportWriter.Write( + difference{ + Address: dr.address.Hex(), + Domain: domain.Identifier(), + Kind: diffKindString[cadenceValueStaticTypeDiffKind], + Trace: trace.String(), + Msg: fmt.Sprintf("array static types differ: %s != nil", v.Type), + }) + } + + if v.Type != nil && otherArray.Type != nil && !v.Type.Equal(otherArray.Type) { + hasDifference = true + + dr.reportWriter.Write( + difference{ + Address: dr.address.Hex(), + Domain: domain.Identifier(), + Kind: diffKindString[cadenceValueStaticTypeDiffKind], + Trace: trace.String(), + Msg: fmt.Sprintf("array static types differ: %s != %s", v.Type, otherArray.Type), + }) + } + + count := v.Count() + if count != otherArray.Count() { + hasDifference = true + + d := difference{ + Address: dr.address.Hex(), + Domain: domain.Identifier(), + Kind: diffKindString[cadenceValueDiffKind], + Trace: trace.String(), + Msg: fmt.Sprintf("array counts differ: %d != %d", count, otherArray.Count()), + } + + if dr.verboseLogging { + d.OldValue = v.String() + d.NewValue = other.String() + } + + dr.reportWriter.Write(d) + } + + // Compare array elements + for i := 0; i < min(count, otherArray.Count()); i++ { + element := v.Get(vInterpreter, interpreter.EmptyLocationRange, i) + otherElement := otherArray.Get(otherInterpreter, interpreter.EmptyLocationRange, i) + + elementTrace := trace.Append(fmt.Sprintf("[%d]", i)) + elementHasDifference := dr.diffValues( + vInterpreter, + element, + otherInterpreter, + otherElement, + domain, + elementTrace, + ) + if elementHasDifference { + hasDifference = true + } + } + + return hasDifference +} + +func (dr *CadenceValueDiffReporter) diffCadenceCompositeValue( + vInterpreter *interpreter.Interpreter, + v *interpreter.CompositeValue, + otherInterpreter *interpreter.Interpreter, + other interpreter.Value, + domain common.StorageDomain, + trace *util.Trace, +) (hasDifference bool) { + + defer func() { + if r := recover(); r != nil { + dr.reportWriter.Write( + diffProblem{ + Address: dr.address.Hex(), + Domain: domain.Identifier(), + Kind: diffErrorKindString[abortErrorKind], + Trace: trace.String(), + Msg: fmt.Sprintf( + "panic while diffing composite: %s", + r, + ), + }, + ) + } + }() + + otherComposite, ok := other.(*interpreter.CompositeValue) + if !ok { + dr.reportWriter.Write( + difference{ + Address: dr.address.Hex(), + Domain: domain.Identifier(), + Kind: diffKindString[cadenceValueTypeDiffKind], + Trace: trace.String(), + Msg: fmt.Sprintf("types differ: %T != %T", v, other), + }) + return true + } + + if !v.StaticType(vInterpreter).Equal(otherComposite.StaticType(otherInterpreter)) { + hasDifference = true + + dr.reportWriter.Write( + difference{ + Address: dr.address.Hex(), + Domain: domain.Identifier(), + Kind: diffKindString[cadenceValueStaticTypeDiffKind], + Trace: trace.String(), + Msg: fmt.Sprintf( + "composite static types differ: %s != %s", + v.StaticType(vInterpreter), + otherComposite.StaticType(otherInterpreter)), + }) + } + + if v.Kind != otherComposite.Kind { + hasDifference = true + + dr.reportWriter.Write( + difference{ + Address: dr.address.Hex(), + Domain: domain.Identifier(), + Kind: diffKindString[cadenceValueStaticTypeDiffKind], + Trace: trace.String(), + Msg: fmt.Sprintf( + "composite kinds differ: %d != %d", + v.Kind, + otherComposite.Kind, + ), + }) + } + + oldFieldNames := make([]string, 0, v.FieldCount()) + v.ForEachFieldName(func(fieldName string) bool { + oldFieldNames = append(oldFieldNames, fieldName) + return true + }) + + newFieldNames := make([]string, 0, otherComposite.FieldCount()) + otherComposite.ForEachFieldName(func(fieldName string) bool { + newFieldNames = append(newFieldNames, fieldName) + return true + }) + + onlyOldFieldNames, onlyNewFieldNames, sharedFieldNames := diff(oldFieldNames, newFieldNames) + + // Log field names only present in old composite value + if len(onlyOldFieldNames) > 0 { + hasDifference = true + + dr.reportWriter.Write( + difference{ + Address: dr.address.Hex(), + Domain: domain.Identifier(), + Kind: diffKindString[cadenceValueDiffKind], + Trace: trace.String(), + Msg: fmt.Sprintf( + "old composite value has %d fields with keys %v, that are not present in new composite value", + len(onlyOldFieldNames), + onlyOldFieldNames, + ), + }) + } + + // Log field names only present in new composite value + if len(onlyNewFieldNames) > 0 { + hasDifference = true + + dr.reportWriter.Write( + difference{ + Address: dr.address.Hex(), + Domain: domain.Identifier(), + Kind: diffKindString[cadenceValueDiffKind], + Trace: trace.String(), + Msg: fmt.Sprintf( + "new composite value has %d fields with keys %v, that are not present in old composite value", + len(onlyNewFieldNames), + onlyNewFieldNames, + ), + }) + } + + // Compare fields in both composite values + for _, fieldName := range sharedFieldNames { + fieldValue := v.GetField(vInterpreter, fieldName) + otherFieldValue := otherComposite.GetField(otherInterpreter, fieldName) + + fieldTrace := trace.Append(fmt.Sprintf(".%s", fieldName)) + fieldHasDifference := dr.diffValues( + vInterpreter, + fieldValue, + otherInterpreter, + otherFieldValue, + domain, + fieldTrace, + ) + if fieldHasDifference { + hasDifference = true + } + } + + return hasDifference +} + +func (dr *CadenceValueDiffReporter) diffCadenceDictionaryValue( + vInterpreter *interpreter.Interpreter, + v *interpreter.DictionaryValue, + otherInterpreter *interpreter.Interpreter, + other interpreter.Value, + domain common.StorageDomain, + trace *util.Trace, +) (hasDifference bool) { + + defer func() { + if r := recover(); r != nil { + dr.reportWriter.Write( + diffProblem{ + Address: dr.address.Hex(), + Domain: domain.Identifier(), + Kind: diffErrorKindString[abortErrorKind], + Trace: trace.String(), + Msg: fmt.Sprintf( + "panic while diffing dictionary: %s", + r, + ), + }, + ) + } + }() + + otherDictionary, ok := other.(*interpreter.DictionaryValue) + if !ok { + dr.reportWriter.Write( + difference{ + Address: dr.address.Hex(), + Domain: domain.Identifier(), + Kind: diffKindString[cadenceValueTypeDiffKind], + Trace: trace.String(), + Msg: fmt.Sprintf("types differ: %T != %T", v, other), + }) + return true + } + + if !v.Type.Equal(otherDictionary.Type) { + hasDifference = true + + dr.reportWriter.Write( + difference{ + Address: dr.address.Hex(), + Domain: domain.Identifier(), + Kind: diffKindString[cadenceValueStaticTypeDiffKind], + Trace: trace.String(), + Msg: fmt.Sprintf( + "dict static types differ: %s != %s", + v.Type, + otherDictionary.Type), + }) + } + + oldKeys := make([]interpreter.Value, 0, v.Count()) + v.IterateKeys(vInterpreter, interpreter.EmptyLocationRange, func(key interpreter.Value) (resume bool) { + oldKeys = append(oldKeys, key) + return true + }) + + newKeys := make([]interpreter.Value, 0, otherDictionary.Count()) + otherDictionary.IterateKeys(otherInterpreter, interpreter.EmptyLocationRange, func(key interpreter.Value) (resume bool) { + newKeys = append(newKeys, key) + return true + }) + + onlyOldKeys := make([]interpreter.Value, 0, len(oldKeys)) + + // Compare elements in both dict values + + for _, key := range oldKeys { + valueTrace := trace.Append(fmt.Sprintf("[%v]", key)) + + oldValue, _ := v.Get(vInterpreter, interpreter.EmptyLocationRange, key) + + newValue, found := otherDictionary.Get(otherInterpreter, interpreter.EmptyLocationRange, key) + if !found { + onlyOldKeys = append(onlyOldKeys, key) + continue + } + + elementHasDifference := dr.diffValues( + vInterpreter, + oldValue, + otherInterpreter, + newValue, + domain, + valueTrace, + ) + if elementHasDifference { + hasDifference = true + } + } + + // Log keys only present in old dict value + + if len(onlyOldKeys) > 0 { + hasDifference = true + + dr.reportWriter.Write( + difference{ + Address: dr.address.Hex(), + Domain: domain.Identifier(), + Kind: diffKindString[cadenceValueDiffKind], + Trace: trace.String(), + Msg: fmt.Sprintf( + "old dict value has %d elements with keys %v, that are not present in new dict value", + len(onlyOldKeys), + onlyOldKeys, + ), + }) + } + + // Log keys only present in new dict value + + if len(oldKeys) != len(newKeys) || len(onlyOldKeys) > 0 { + onlyNewKeys := make([]interpreter.Value, 0, len(newKeys)) + + // find keys only present in new dict + for _, key := range newKeys { + found := v.ContainsKey(vInterpreter, interpreter.EmptyLocationRange, key) + if !found { + onlyNewKeys = append(onlyNewKeys, key) + } + } + + if len(onlyNewKeys) > 0 { + hasDifference = true + + dr.reportWriter.Write( + difference{ + Address: dr.address.Hex(), + Domain: domain.Identifier(), + Kind: diffKindString[cadenceValueDiffKind], + Trace: trace.String(), + Msg: fmt.Sprintf( + "new dict value has %d elements with keys %v, that are not present in old dict value", + len(onlyNewKeys), + onlyNewKeys, + ), + }) + } + } + + return hasDifference +} + +func getStorageMapKeys(storageMap *interpreter.DomainStorageMap) []any { + keys := make([]any, 0, storageMap.Count()) + + iter := storageMap.Iterator(nil) + for { + key := iter.NextKey() + if key == nil { + break + } + keys = append(keys, key) + } + + return keys +} + +func diff[T comparable](old, new []T) (onlyOld, onlyNew, shared []T) { + onlyOld = make([]T, 0, len(old)) + onlyNew = make([]T, 0, len(new)) + shared = make([]T, 0, min(len(old), len(new))) + + sharedNew := make([]bool, len(new)) + + for _, o := range old { + found := false + + for i, n := range new { + if o == n { + shared = append(shared, o) + found = true + sharedNew[i] = true + break + } + } + + if !found { + onlyOld = append(onlyOld, o) + } + } + + for i, shared := range sharedNew { + if !shared { + onlyNew = append(onlyNew, new[i]) + } + } + + return +} + +func newReadonlyStorage(regs registers.Registers) *runtime.Storage { + ledger := ®isters.ReadOnlyLedger{Registers: regs} + config := runtime.StorageConfig{} + return runtime.NewStorage(ledger, nil, config) +} + +type readonlyStorageRuntime struct { + Interpreter *interpreter.Interpreter + Storage *runtime.Storage + PayloadCount int +} + +func newReadonlyStorageRuntimeWithStorage(storage *runtime.Storage, payloadCount int) (*readonlyStorageRuntime, error) { + inter, err := interpreter.NewInterpreter( + nil, + nil, + &interpreter.Config{ + Storage: storage, + }, + ) + if err != nil { + return nil, err + } + + return &readonlyStorageRuntime{ + Interpreter: inter, + Storage: storage, + PayloadCount: payloadCount, + }, nil +} diff --git a/cmd/util/ledger/migrations/cadence_value_diff_test.go b/cmd/util/ledger/migrations/cadence_value_diff_test.go new file mode 100644 index 00000000000..50ba1460d27 --- /dev/null +++ b/cmd/util/ledger/migrations/cadence_value_diff_test.go @@ -0,0 +1,914 @@ +package migrations + +import ( + "fmt" + "runtime" + "strconv" + "testing" + + "github.com/onflow/cadence/common" + "github.com/onflow/cadence/interpreter" + + "github.com/onflow/flow-go/cmd/util/ledger/util/registers" + "github.com/onflow/flow-go/fvm/environment" + "github.com/onflow/flow-go/ledger" + "github.com/onflow/flow-go/ledger/common/convert" + "github.com/onflow/flow-go/model/flow" + + "github.com/stretchr/testify/require" +) + +func TestDiffCadenceValues(t *testing.T) { + t.Parallel() + + address, err := common.HexToAddress("0x1") + require.NoError(t, err) + + const domain = common.StorageDomainPathStorage + + alwaysDiff := func(address common.Address, domain common.StorageDomain, key any) bool { + return true + } + + t.Run("no diff", func(t *testing.T) { + t.Parallel() + + writer := &testReportWriter{} + + diffReporter := NewCadenceValueDiffReporter(address, flow.Emulator, writer, true, runtime.NumCPU()) + + diffReporter.DiffStates( + createTestRegisters(t, address, domain), + createTestRegisters(t, address, domain), + []common.StorageDomain{domain}, + alwaysDiff, + ) + require.NoError(t, err) + require.Equal(t, 0, len(writer.entries)) + }) + + t.Run("one storage map doesn't exist", func(t *testing.T) { + t.Parallel() + + writer := &testReportWriter{} + + diffReporter := NewCadenceValueDiffReporter(address, flow.Emulator, writer, true, runtime.NumCPU()) + + diffReporter.DiffStates( + createTestRegisters(t, address, domain), + registers.NewByAccount(), + []common.StorageDomain{domain}, + alwaysDiff, + ) + require.NoError(t, err) + require.Equal(t, 1, len(writer.entries)) + + diff := writer.entries[0].(difference) + require.Equal(t, diffKindString[storageMapExistDiffKind], diff.Kind) + require.Equal(t, address.Hex(), diff.Address) + require.Equal(t, domain.Identifier(), diff.Domain) + }) + + t.Run("storage maps have different sets of keys", func(t *testing.T) { + t.Parallel() + + writer := &testReportWriter{} + + diffReporter := NewCadenceValueDiffReporter(address, flow.Emulator, writer, true, runtime.NumCPU()) + + diffReporter.DiffStates( + createTestRegisters(t, address, domain), + createStorageMapRegisters(t, address, domain, []string{"unique_key"}, []interpreter.Value{interpreter.UInt64Value(0)}), + []common.StorageDomain{domain}, + alwaysDiff, + ) + require.NoError(t, err) + + // 2 differences: + // - unique keys in old storage map + // - unique keys in new storage map + require.Equal(t, 2, len(writer.entries)) + + for _, entry := range writer.entries { + diff := entry.(difference) + require.Equal(t, diffKindString[storageMapKeyDiffKind], diff.Kind) + require.Equal(t, address.Hex(), diff.Address) + require.Equal(t, domain.Identifier(), diff.Domain) + } + }) + + t.Run("storage maps have overlapping keys", func(t *testing.T) { + t.Parallel() + + writer := &testReportWriter{} + + diffReporter := NewCadenceValueDiffReporter(address, flow.Emulator, writer, true, runtime.NumCPU()) + + diffReporter.DiffStates( + createStorageMapRegisters(t, address, domain, []string{"0", "1"}, []interpreter.Value{interpreter.UInt64Value(0), interpreter.UInt64Value(0)}), + createStorageMapRegisters(t, address, domain, []string{"2", "0"}, []interpreter.Value{interpreter.UInt64Value(0), interpreter.UInt64Value(0)}), + []common.StorageDomain{domain}, + alwaysDiff, + ) + require.NoError(t, err) + + // 2 entries: + // - unique keys in old storage map + // - unique keys in new storage map + require.Equal(t, 2, len(writer.entries)) + + for _, entry := range writer.entries { + diff := entry.(difference) + require.Equal(t, diffKindString[storageMapKeyDiffKind], diff.Kind) + require.Equal(t, address.Hex(), diff.Address) + require.Equal(t, domain.Identifier(), diff.Domain) + } + }) + + t.Run("storage maps have one different value", func(t *testing.T) { + t.Parallel() + + writer := &testReportWriter{} + + diffReporter := NewCadenceValueDiffReporter(address, flow.Emulator, writer, false, runtime.NumCPU()) + + diffReporter.DiffStates( + createStorageMapRegisters(t, address, domain, []string{"0", "1"}, []interpreter.Value{interpreter.UInt64Value(100), interpreter.UInt64Value(101)}), + createStorageMapRegisters(t, address, domain, []string{"0", "1"}, []interpreter.Value{interpreter.UInt64Value(111), interpreter.UInt64Value(101)}), + []common.StorageDomain{domain}, + alwaysDiff, + ) + require.NoError(t, err) + + // 1 entries: + // - different value + require.Equal(t, 1, len(writer.entries)) + + diff := writer.entries[0].(difference) + require.Equal(t, diffKindString[cadenceValueDiffKind], diff.Kind) + require.Equal(t, address.Hex(), diff.Address) + require.Equal(t, domain.Identifier(), diff.Domain) + require.Equal(t, "storage[0]", diff.Trace) + require.Equal(t, "100", diff.OldValue) + require.Equal(t, "111", diff.NewValue) + }) + + t.Run("storage maps have multiple different values", func(t *testing.T) { + t.Parallel() + + writer := &testReportWriter{} + + diffReporter := NewCadenceValueDiffReporter(address, flow.Emulator, writer, false, runtime.NumCPU()) + + diffReporter.DiffStates( + createStorageMapRegisters(t, address, domain, []string{"0", "1"}, []interpreter.Value{interpreter.UInt64Value(100), interpreter.UInt64Value(101)}), + createStorageMapRegisters(t, address, domain, []string{"0", "1"}, []interpreter.Value{interpreter.UInt64Value(111), interpreter.UInt64Value(102)}), + []common.StorageDomain{domain}, + alwaysDiff, + ) + require.NoError(t, err) + + // 2 entries with 2 different values: + require.Equal(t, 2, len(writer.entries)) + + for _, entry := range writer.entries { + diff := entry.(difference) + require.Equal(t, diffKindString[cadenceValueDiffKind], diff.Kind) + require.Equal(t, address.Hex(), diff.Address) + require.Equal(t, domain.Identifier(), diff.Domain) + require.True(t, diff.Trace == "storage[0]" || diff.Trace == "storage[1]") + } + }) + + t.Run("nested array value has different elements", func(t *testing.T) { + t.Parallel() + + writer := &testReportWriter{} + + diffReporter := NewCadenceValueDiffReporter(address, flow.Emulator, writer, false, runtime.NumCPU()) + + createRegisters := func(arrayValues []interpreter.Value) registers.Registers { + + // Create account status payload + accountStatus := environment.NewAccountStatus() + accountStatusPayload := ledger.NewPayload( + convert.RegisterIDToLedgerKey( + flow.AccountStatusRegisterID(flow.ConvertAddress(address)), + ), + accountStatus.ToBytes(), + ) + + // at least one payload otherwise the migration will not get called + registersByAccount, err := registers.NewByAccountFromPayloads( + []*ledger.Payload{ + accountStatusPayload, + }, + ) + require.NoError(t, err) + + mr, err := NewInterpreterMigrationRuntime( + registersByAccount.AccountRegisters(string(address[:])), + flow.Emulator, + InterpreterMigrationRuntimeConfig{}, + ) + require.NoError(t, err) + + // Create new storage map + storageMap := mr.Storage.GetDomainStorageMap(mr.Interpreter, address, domain, true) + + nestedArray := interpreter.NewArrayValue( + mr.Interpreter, + interpreter.EmptyLocationRange, + &interpreter.VariableSizedStaticType{ + Type: interpreter.PrimitiveStaticTypeUInt64, + }, + address, + arrayValues..., + ) + + storageMap.WriteValue( + mr.Interpreter, + interpreter.StringStorageMapKey(fmt.Sprintf("key_%d", storageMap.Count())), + interpreter.NewArrayValue( + mr.Interpreter, + interpreter.EmptyLocationRange, + &interpreter.VariableSizedStaticType{ + Type: interpreter.PrimitiveStaticTypeAnyStruct, + }, + address, + nestedArray, + ), + ) + + err = mr.Storage.NondeterministicCommit(mr.Interpreter, false) + require.NoError(t, err) + + // finalize the transaction + result, err := mr.TransactionState.FinalizeMainTransaction() + require.NoError(t, err) + + payloads := make([]*ledger.Payload, 0, len(result.WriteSet)) + for id, value := range result.WriteSet { + key := convert.RegisterIDToLedgerKey(id) + payloads = append(payloads, ledger.NewPayload(key, value)) + } + + registers, err := registers.NewByAccountFromPayloads(payloads) + require.NoError(t, err) + + return registers + } + + diffReporter.DiffStates( + createRegisters([]interpreter.Value{ + interpreter.UInt64Value(0), + interpreter.UInt64Value(2), + interpreter.UInt64Value(4), + }), + createRegisters([]interpreter.Value{ + interpreter.UInt64Value(1), + interpreter.UInt64Value(3), + interpreter.UInt64Value(5), + }), + []common.StorageDomain{domain}, + alwaysDiff, + ) + require.NoError(t, err) + + // 3 entries: + // - different value + require.Equal(t, 3, len(writer.entries)) + + for _, entry := range writer.entries { + diff := entry.(difference) + require.Equal(t, diffKindString[cadenceValueDiffKind], diff.Kind) + require.Equal(t, address.Hex(), diff.Address) + require.Equal(t, domain.Identifier(), diff.Domain) + require.True(t, diff.Trace == "storage[key_0][0][0]" || diff.Trace == "storage[key_0][0][1]" || diff.Trace == "storage[key_0][0][2]") + + switch diff.Trace { + case "storage[key_0][0][0]": + require.Equal(t, "0", diff.OldValue) + require.Equal(t, "1", diff.NewValue) + + case "storage[key_0][0][1]": + require.Equal(t, "2", diff.OldValue) + require.Equal(t, "3", diff.NewValue) + + case "storage[key_0][0][2]": + require.Equal(t, "4", diff.OldValue) + require.Equal(t, "5", diff.NewValue) + } + } + }) + + t.Run("nested dict value has different elements", func(t *testing.T) { + t.Parallel() + + writer := &testReportWriter{} + + diffReporter := NewCadenceValueDiffReporter(address, flow.Emulator, writer, false, runtime.NumCPU()) + + createRegisters := func(dictValues []interpreter.Value) registers.Registers { + + // Create account status payload + accountStatus := environment.NewAccountStatus() + accountStatusPayload := ledger.NewPayload( + convert.RegisterIDToLedgerKey( + flow.AccountStatusRegisterID(flow.ConvertAddress(address)), + ), + accountStatus.ToBytes(), + ) + + // at least one payload otherwise the migration will not get called + registersByAccount, err := registers.NewByAccountFromPayloads( + []*ledger.Payload{ + accountStatusPayload, + }, + ) + require.NoError(t, err) + + mr, err := NewInterpreterMigrationRuntime( + registersByAccount.AccountRegisters(string(address[:])), + flow.Emulator, + InterpreterMigrationRuntimeConfig{}, + ) + require.NoError(t, err) + + // Create new storage map + storageMap := mr.Storage.GetDomainStorageMap(mr.Interpreter, address, domain, true) + + nestedDict := interpreter.NewDictionaryValueWithAddress( + mr.Interpreter, + interpreter.EmptyLocationRange, + &interpreter.DictionaryStaticType{ + KeyType: interpreter.PrimitiveStaticTypeAnyStruct, + ValueType: interpreter.PrimitiveStaticTypeAnyStruct, + }, + address, + dictValues..., + ) + + storageMap.WriteValue( + mr.Interpreter, + interpreter.StringStorageMapKey(fmt.Sprintf("key_%d", storageMap.Count())), + interpreter.NewArrayValue( + mr.Interpreter, + interpreter.EmptyLocationRange, + &interpreter.VariableSizedStaticType{ + Type: interpreter.PrimitiveStaticTypeAnyStruct, + }, + address, + nestedDict, + ), + ) + + err = mr.Storage.NondeterministicCommit(mr.Interpreter, false) + require.NoError(t, err) + + // finalize the transaction + result, err := mr.TransactionState.FinalizeMainTransaction() + require.NoError(t, err) + + payloads := make([]*ledger.Payload, 0, len(result.WriteSet)) + for id, value := range result.WriteSet { + key := convert.RegisterIDToLedgerKey(id) + payloads = append(payloads, ledger.NewPayload(key, value)) + } + + registers, err := registers.NewByAccountFromPayloads(payloads) + require.NoError(t, err) + + return registers + } + + diffReporter.DiffStates( + createRegisters( + []interpreter.Value{interpreter.NewUnmeteredStringValue("dict_key_0"), + interpreter.UInt64Value(0), + interpreter.NewUnmeteredStringValue("dict_key_1"), + interpreter.UInt64Value(2), + }), + createRegisters( + []interpreter.Value{interpreter.NewUnmeteredStringValue("dict_key_0"), + interpreter.UInt64Value(1), + interpreter.NewUnmeteredStringValue("dict_key_1"), + interpreter.UInt64Value(3), + }), + []common.StorageDomain{domain}, + alwaysDiff, + ) + require.NoError(t, err) + + // 2 entries: + // - different value + require.Equal(t, 2, len(writer.entries)) + + for _, entry := range writer.entries { + diff := entry.(difference) + require.Equal(t, diffKindString[cadenceValueDiffKind], diff.Kind) + require.Equal(t, address.Hex(), diff.Address) + require.Equal(t, domain.Identifier(), diff.Domain) + require.True(t, diff.Trace == "storage[key_0][0][\"dict_key_0\"]" || diff.Trace == "storage[key_0][0][\"dict_key_1\"]") + + switch diff.Trace { + case "storage[key_0][0][\"dict_key_0\"]": + require.Equal(t, "0", diff.OldValue) + require.Equal(t, "1", diff.NewValue) + + case "storage[key_0][0][\"dict_key_1\"]": + require.Equal(t, "2", diff.OldValue) + require.Equal(t, "3", diff.NewValue) + } + } + }) + + t.Run("nested composite value has different elements", func(t *testing.T) { + t.Parallel() + + writer := &testReportWriter{} + + diffReporter := NewCadenceValueDiffReporter(address, flow.Emulator, writer, false, runtime.NumCPU()) + + createRegisters := func(compositeFields []string, compositeValues []interpreter.Value) registers.Registers { + + // Create account status payload + accountStatus := environment.NewAccountStatus() + accountStatusPayload := ledger.NewPayload( + convert.RegisterIDToLedgerKey( + flow.AccountStatusRegisterID(flow.ConvertAddress(address)), + ), + accountStatus.ToBytes(), + ) + + // at least one payload otherwise the migration will not get called + registersByAccount, err := registers.NewByAccountFromPayloads( + []*ledger.Payload{ + accountStatusPayload, + }, + ) + require.NoError(t, err) + + mr, err := NewInterpreterMigrationRuntime( + registersByAccount.AccountRegisters(string(address[:])), + flow.Emulator, + InterpreterMigrationRuntimeConfig{}, + ) + require.NoError(t, err) + + // Create new storage map + storageMap := mr.Storage.GetDomainStorageMap(mr.Interpreter, address, domain, true) + + var fields []interpreter.CompositeField + + for i, fieldName := range compositeFields { + fields = append(fields, interpreter.CompositeField{Name: fieldName, Value: compositeValues[i]}) + } + + nestedComposite := interpreter.NewCompositeValue( + mr.Interpreter, + interpreter.EmptyLocationRange, + common.StringLocation("test"), + "Test", + common.CompositeKindStructure, + fields, + address, + ) + + storageMap.WriteValue( + mr.Interpreter, + interpreter.StringStorageMapKey(fmt.Sprintf("key_%d", storageMap.Count())), + interpreter.NewArrayValue( + mr.Interpreter, + interpreter.EmptyLocationRange, + &interpreter.VariableSizedStaticType{ + Type: interpreter.PrimitiveStaticTypeAnyStruct, + }, + address, + nestedComposite, + ), + ) + + err = mr.Storage.NondeterministicCommit(mr.Interpreter, false) + require.NoError(t, err) + + // finalize the transaction + result, err := mr.TransactionState.FinalizeMainTransaction() + require.NoError(t, err) + + payloads := make([]*ledger.Payload, 0, len(result.WriteSet)) + for id, value := range result.WriteSet { + key := convert.RegisterIDToLedgerKey(id) + payloads = append(payloads, ledger.NewPayload(key, value)) + } + + registers, err := registers.NewByAccountFromPayloads(payloads) + require.NoError(t, err) + + return registers + } + + diffReporter.DiffStates( + createRegisters( + []string{ + "Field_0", + "Field_1", + }, + []interpreter.Value{ + interpreter.UInt64Value(0), + interpreter.UInt64Value(2), + }), + createRegisters( + []string{ + "Field_0", + "Field_1", + }, + []interpreter.Value{ + interpreter.UInt64Value(1), + interpreter.UInt64Value(3), + }), + []common.StorageDomain{domain}, + alwaysDiff, + ) + require.NoError(t, err) + + // 2 entries: + // - different value + require.Equal(t, 2, len(writer.entries)) + + for _, entry := range writer.entries { + diff := entry.(difference) + require.Equal(t, diffKindString[cadenceValueDiffKind], diff.Kind) + require.Equal(t, address.Hex(), diff.Address) + require.Equal(t, domain.Identifier(), diff.Domain) + require.True(t, diff.Trace == "storage[key_0][0].Field_0" || diff.Trace == "storage[key_0][0].Field_1") + + switch diff.Trace { + case "storage[key_0][0].Field_0": + require.Equal(t, "0", diff.OldValue) + require.Equal(t, "1", diff.NewValue) + + case "storage[key_0][0].Field_1": + require.Equal(t, "2", diff.OldValue) + require.Equal(t, "3", diff.NewValue) + } + } + }) + + t.Run("nested composite value has different elements with verbose logging", func(t *testing.T) { + t.Parallel() + + writer := &testReportWriter{} + + diffReporter := NewCadenceValueDiffReporter(address, flow.Emulator, writer, true, runtime.NumCPU()) + + createRegisters := func(compositeFields []string, compositeValues []interpreter.Value) registers.Registers { + + // Create account status payload + accountStatus := environment.NewAccountStatus() + accountStatusPayload := ledger.NewPayload( + convert.RegisterIDToLedgerKey( + flow.AccountStatusRegisterID(flow.ConvertAddress(address)), + ), + accountStatus.ToBytes(), + ) + + // at least one payload otherwise the migration will not get called + registersByAccount, err := registers.NewByAccountFromPayloads( + []*ledger.Payload{ + accountStatusPayload, + }, + ) + require.NoError(t, err) + + mr, err := NewInterpreterMigrationRuntime( + registersByAccount.AccountRegisters(string(address[:])), + flow.Emulator, + InterpreterMigrationRuntimeConfig{}, + ) + require.NoError(t, err) + + // Create new storage map + storageMap := mr.Storage.GetDomainStorageMap(mr.Interpreter, address, domain, true) + + var fields []interpreter.CompositeField + + for i, fieldName := range compositeFields { + fields = append(fields, interpreter.CompositeField{Name: fieldName, Value: compositeValues[i]}) + } + + nestedComposite := interpreter.NewCompositeValue( + mr.Interpreter, + interpreter.EmptyLocationRange, + common.StringLocation("test"), + "Test", + common.CompositeKindStructure, + fields, + address, + ) + + storageMap.WriteValue( + mr.Interpreter, + interpreter.StringStorageMapKey(fmt.Sprintf("key_%d", storageMap.Count())), + interpreter.NewArrayValue( + mr.Interpreter, + interpreter.EmptyLocationRange, + &interpreter.VariableSizedStaticType{ + Type: interpreter.PrimitiveStaticTypeAnyStruct, + }, + address, + nestedComposite, + ), + ) + + err = mr.Storage.NondeterministicCommit(mr.Interpreter, false) + require.NoError(t, err) + + // finalize the transaction + result, err := mr.TransactionState.FinalizeMainTransaction() + require.NoError(t, err) + + payloads := make([]*ledger.Payload, 0, len(result.WriteSet)) + for id, value := range result.WriteSet { + key := convert.RegisterIDToLedgerKey(id) + payloads = append(payloads, ledger.NewPayload(key, value)) + } + + registers, err := registers.NewByAccountFromPayloads(payloads) + require.NoError(t, err) + + return registers + } + + diffReporter.DiffStates( + createRegisters( + []string{ + "Field_0", + "Field_1", + }, + []interpreter.Value{ + interpreter.UInt64Value(0), + interpreter.UInt64Value(2), + }), + createRegisters( + []string{ + "Field_0", + "Field_1", + }, + []interpreter.Value{ + interpreter.UInt64Value(1), + interpreter.UInt64Value(3), + }), + []common.StorageDomain{domain}, + alwaysDiff, + ) + require.NoError(t, err) + + // 3 entries: + // - 2 different values + // - verbose logging of storage map element + require.Equal(t, 3, len(writer.entries)) + + // Test 2 cadence value diff logs + for _, entry := range writer.entries[:2] { + diff := entry.(difference) + require.Equal(t, diffKindString[cadenceValueDiffKind], diff.Kind) + require.Equal(t, address.Hex(), diff.Address) + require.Equal(t, domain.Identifier(), diff.Domain) + require.True(t, diff.Trace == "storage[key_0][0].Field_0" || diff.Trace == "storage[key_0][0].Field_1") + + switch diff.Trace { + case "storage[key_0][0].Field_0": + require.Equal(t, "0", diff.OldValue) + require.Equal(t, "1", diff.NewValue) + + case "storage[key_0][0].Field_1": + require.Equal(t, "2", diff.OldValue) + require.Equal(t, "3", diff.NewValue) + } + } + + // Test storage map value diff log (only with verbose logging) + diff := writer.entries[2].(difference) + require.Equal(t, diffKindString[storageMapValueDiffKind], diff.Kind) + require.Equal(t, address.Hex(), diff.Address) + require.Equal(t, domain.Identifier(), diff.Domain) + require.Equal(t, "storage[key_0]", diff.Trace) + require.Equal(t, "[S.test.Test(Field_1: 2, Field_0: 0)]", diff.OldValue) + require.Equal(t, "[S.test.Test(Field_1: 3, Field_0: 1)]", diff.NewValue) + }) +} + +func createStorageMapRegisters( + t *testing.T, + address common.Address, + domain common.StorageDomain, + keys []string, + values []interpreter.Value, +) registers.Registers { + + // Create account status payload + accountStatus := environment.NewAccountStatus() + accountStatusPayload := ledger.NewPayload( + convert.RegisterIDToLedgerKey( + flow.AccountStatusRegisterID(flow.ConvertAddress(address)), + ), + accountStatus.ToBytes(), + ) + + // at least one payload otherwise the migration will not get called + registersByAccount, err := registers.NewByAccountFromPayloads( + []*ledger.Payload{ + accountStatusPayload, + }, + ) + require.NoError(t, err) + + mr, err := NewInterpreterMigrationRuntime( + registersByAccount.AccountRegisters(string(address[:])), + flow.Emulator, + InterpreterMigrationRuntimeConfig{}, + ) + require.NoError(t, err) + + // Create new storage map + storageMap := mr.Storage.GetDomainStorageMap(mr.Interpreter, address, domain, true) + + for i, k := range keys { + storageMap.WriteValue( + mr.Interpreter, + interpreter.StringStorageMapKey(k), + values[i], + ) + } + + err = mr.Storage.NondeterministicCommit(mr.Interpreter, false) + require.NoError(t, err) + + // finalize the transaction + result, err := mr.TransactionState.FinalizeMainTransaction() + require.NoError(t, err) + + payloads := make([]*ledger.Payload, 0, len(result.WriteSet)) + for id, value := range result.WriteSet { + key := convert.RegisterIDToLedgerKey(id) + payloads = append(payloads, ledger.NewPayload(key, value)) + } + + registers, err := registers.NewByAccountFromPayloads(payloads) + require.NoError(t, err) + + return registers +} + +func createTestRegisters(t *testing.T, address common.Address, domain common.StorageDomain) registers.Registers { + + // Create account status payload + accountStatus := environment.NewAccountStatus() + accountStatusPayload := ledger.NewPayload( + convert.RegisterIDToLedgerKey( + flow.AccountStatusRegisterID(flow.ConvertAddress(address)), + ), + accountStatus.ToBytes(), + ) + + registersByAccount, err := registers.NewByAccountFromPayloads( + []*ledger.Payload{ + accountStatusPayload, + }, + ) + require.NoError(t, err) + + mr, err := NewInterpreterMigrationRuntime( + registersByAccount, + flow.Emulator, + InterpreterMigrationRuntimeConfig{}, + ) + require.NoError(t, err) + + // Create new storage map + storageMap := mr.Storage.GetDomainStorageMap(mr.Interpreter, address, domain, true) + + // Add Cadence UInt64Value + storageMap.WriteValue( + mr.Interpreter, + interpreter.StringStorageMapKey(strconv.FormatUint(storageMap.Count(), 10)), + interpreter.NewUnmeteredUInt64Value(1), + ) + + // Add Cadence SomeValue + storageMap.WriteValue( + mr.Interpreter, + interpreter.StringStorageMapKey(strconv.FormatUint(storageMap.Count(), 10)), + interpreter.NewUnmeteredSomeValueNonCopying(interpreter.NewUnmeteredStringValue("InnerValueString")), + ) + + // Add Cadence ArrayValue + const arrayCount = 10 + i := uint64(0) + storageMap.WriteValue( + mr.Interpreter, + interpreter.StringStorageMapKey(strconv.FormatUint(storageMap.Count(), 10)), + interpreter.NewArrayValueWithIterator( + mr.Interpreter, + &interpreter.VariableSizedStaticType{ + Type: interpreter.PrimitiveStaticTypeAnyStruct, + }, + address, + 0, + func() interpreter.Value { + if i == arrayCount { + return nil + } + v := interpreter.NewUnmeteredUInt64Value(i) + i++ + return v + }, + ), + ) + + // Add Cadence DictionaryValue + const dictCount = 10 + dictValues := make([]interpreter.Value, 0, dictCount*2) + for i := 0; i < dictCount; i++ { + k := interpreter.NewUnmeteredUInt64Value(uint64(i)) + v := interpreter.NewUnmeteredStringValue(fmt.Sprintf("value %d", i)) + dictValues = append(dictValues, k, v) + } + + storageMap.WriteValue( + mr.Interpreter, + interpreter.StringStorageMapKey(strconv.FormatUint(storageMap.Count(), 10)), + interpreter.NewDictionaryValueWithAddress( + mr.Interpreter, + interpreter.EmptyLocationRange, + &interpreter.DictionaryStaticType{ + KeyType: interpreter.PrimitiveStaticTypeUInt64, + ValueType: interpreter.PrimitiveStaticTypeString, + }, + address, + dictValues..., + ), + ) + + // Add Cadence CompositeValue + storageMap.WriteValue( + mr.Interpreter, + interpreter.StringStorageMapKey(strconv.FormatUint(storageMap.Count(), 10)), + interpreter.NewCompositeValue( + mr.Interpreter, + interpreter.EmptyLocationRange, + common.StringLocation("test"), + "Test", + common.CompositeKindStructure, + []interpreter.CompositeField{ + {Name: "field1", Value: interpreter.NewUnmeteredStringValue("value1")}, + {Name: "field2", Value: interpreter.NewUnmeteredStringValue("value2")}, + }, + address, + ), + ) + + // Add Cadence DictionaryValue with nested CadenceArray + nestedArrayValue := interpreter.NewArrayValue( + mr.Interpreter, + interpreter.EmptyLocationRange, + &interpreter.VariableSizedStaticType{ + Type: interpreter.PrimitiveStaticTypeUInt64, + }, + address, + interpreter.NewUnmeteredUInt64Value(0), + ) + + storageMap.WriteValue( + mr.Interpreter, + interpreter.StringStorageMapKey(strconv.FormatUint(storageMap.Count(), 10)), + interpreter.NewArrayValue( + mr.Interpreter, + interpreter.EmptyLocationRange, + &interpreter.VariableSizedStaticType{ + Type: interpreter.PrimitiveStaticTypeAnyStruct, + }, + address, + nestedArrayValue, + ), + ) + + err = mr.Storage.NondeterministicCommit(mr.Interpreter, false) + require.NoError(t, err) + + // finalize the transaction + result, err := mr.TransactionState.FinalizeMainTransaction() + require.NoError(t, err) + + payloads := make([]*ledger.Payload, 0, len(result.WriteSet)) + for id, value := range result.WriteSet { + key := convert.RegisterIDToLedgerKey(id) + payloads = append(payloads, ledger.NewPayload(key, value)) + } + + registers, err := registers.NewByAccountFromPayloads(payloads) + require.NoError(t, err) + + return registers +} diff --git a/cmd/util/ledger/migrations/deploy_migration.go b/cmd/util/ledger/migrations/deploy_migration.go new file mode 100644 index 00000000000..b149d27632a --- /dev/null +++ b/cmd/util/ledger/migrations/deploy_migration.go @@ -0,0 +1,54 @@ +package migrations + +import ( + "fmt" + + "github.com/onflow/cadence" + jsoncdc "github.com/onflow/cadence/encoding/json" + "github.com/rs/zerolog" + + "github.com/onflow/flow-go/cmd/util/ledger/util/registers" + "github.com/onflow/flow-go/model/flow" +) + +type Contract struct { + Name string + Code []byte +} + +func NewDeploymentMigration( + chainID flow.ChainID, + contract Contract, + authorizer flow.Address, + expectedWriteAddresses map[flow.Address]struct{}, + logger zerolog.Logger, +) RegistersMigration { + + script := []byte(` + transaction(name: String, code: String) { + prepare(signer: auth(AddContract) &Account) { + signer.contracts.add(name: name, code: code.utf8) + } + } + `) + + txBody, err := flow.NewTransactionBodyBuilder(). + SetScript(script). + AddArgument(jsoncdc.MustEncode(cadence.String(contract.Name))). + AddArgument(jsoncdc.MustEncode(cadence.String(contract.Code))). + SetPayer(authorizer). + AddAuthorizer(authorizer). + Build() + if err != nil { + return func(registersByAccount *registers.ByAccount) error { + return fmt.Errorf("failed to run transaction: %w", err) + } + } + + return NewTransactionBasedMigration( + txBody, + chainID, + logger, + expectedWriteAddresses, + ) +} diff --git a/cmd/util/ledger/migrations/deploy_migration_test.go b/cmd/util/ledger/migrations/deploy_migration_test.go new file mode 100644 index 00000000000..c867eee5d71 --- /dev/null +++ b/cmd/util/ledger/migrations/deploy_migration_test.go @@ -0,0 +1,178 @@ +package migrations + +import ( + "fmt" + "testing" + + "github.com/rs/zerolog" + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/cmd/util/ledger/util/registers" + "github.com/onflow/flow-go/fvm" + "github.com/onflow/flow-go/fvm/storage/snapshot" + "github.com/onflow/flow-go/fvm/systemcontracts" + "github.com/onflow/flow-go/ledger" + "github.com/onflow/flow-go/ledger/common/convert" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/utils/unittest" +) + +func newBootstrapPayloads( + chainID flow.ChainID, + bootstrapProcedureOptions ...fvm.BootstrapProcedureOption, +) ([]*ledger.Payload, error) { + + ctx := fvm.NewContext( + fvm.WithChain(chainID.Chain()), + ) + + vm := fvm.NewVirtualMachine() + + storageSnapshot := snapshot.MapStorageSnapshot{} + + bootstrapProcedure := fvm.Bootstrap( + unittest.ServiceAccountPublicKey, + bootstrapProcedureOptions..., + ) + + executionSnapshot, _, err := vm.Run( + ctx, + bootstrapProcedure, + storageSnapshot, + ) + if err != nil { + return nil, err + } + + payloads := make([]*ledger.Payload, 0, len(executionSnapshot.WriteSet)) + + for registerID, registerValue := range executionSnapshot.WriteSet { + payloadKey := convert.RegisterIDToLedgerKey(registerID) + payload := ledger.NewPayload(payloadKey, registerValue) + payloads = append(payloads, payload) + } + + return payloads, nil +} + +func TestDeploy(t *testing.T) { + t.Parallel() + + const chainID = flow.Emulator + + chain := chainID.Chain() + + const nWorker = 2 + + systemContracts := systemcontracts.SystemContractsForChain(chainID) + serviceAccountAddress := systemContracts.FlowServiceAccount.Address + fungibleTokenAddress := systemContracts.FungibleToken.Address + + targetAddress := serviceAccountAddress + + migration := NewDeploymentMigration( + chainID, + Contract{ + Name: "NewContract", + Code: []byte(fmt.Sprintf( + ` + import FungibleToken from %s + + access(all) + contract NewContract { + + access(all) + fun answer(): Int { + return 42 + } + } + `, + fungibleTokenAddress.HexWithPrefix(), + )), + }, + targetAddress, + map[flow.Address]struct{}{ + targetAddress: {}, + }, + zerolog.New(zerolog.NewTestWriter(t)), + ) + + bootstrapPayloads, err := newBootstrapPayloads(chainID) + require.NoError(t, err) + + filteredPayloads := make([]*ledger.Payload, 0, len(bootstrapPayloads)) + + // TODO: move to NewTransactionBasedMigration + + // Filter the bootstrapped payloads to only include the target account (service account) + // and the account where the fungible token is deployed + + for _, payload := range bootstrapPayloads { + registerID, _, err := convert.PayloadToRegister(payload) + require.NoError(t, err) + + if len(registerID.Owner) > 0 { + registerAddress := flow.Address([]byte(registerID.Owner)) + switch registerAddress { + case targetAddress, fungibleTokenAddress: + filteredPayloads = append(filteredPayloads, payload) + } + } else { + filteredPayloads = append(filteredPayloads, payload) + } + } + + registersByAccount, err := registers.NewByAccountFromPayloads(filteredPayloads) + require.NoError(t, err) + + err = migration(registersByAccount) + require.NoError(t, err) + + txBody, err := flow.NewTransactionBodyBuilder(). + SetScript([]byte(fmt.Sprintf( + ` + import NewContract from %s + + transaction { + execute { + log(NewContract.answer()) + } + } + `, + targetAddress.HexWithPrefix(), + ))). + SetPayer(serviceAccountAddress). + Build() + require.NoError(t, err) + + vm := fvm.NewVirtualMachine() + + storageSnapshot := snapshot.MapStorageSnapshot{} + + newPayloads := registersByAccount.DestructIntoPayloads(nWorker) + + for _, newPayload := range newPayloads { + registerID, registerValue, err := convert.PayloadToRegister(newPayload) + require.NoError(t, err) + + storageSnapshot[registerID] = registerValue + } + + ctx := fvm.NewContext( + fvm.WithChain(chain), + fvm.WithAuthorizationChecksEnabled(false), + fvm.WithSequenceNumberCheckAndIncrementEnabled(false), + fvm.WithCadenceLogging(true), + ) + + _, output, err := vm.Run( + ctx, + fvm.Transaction(txBody, 0), + storageSnapshot, + ) + + require.NoError(t, err) + require.NoError(t, output.Err) + require.Len(t, output.Logs, 1) + require.Equal(t, "42", output.Logs[0]) +} diff --git a/cmd/util/ledger/migrations/filter_unreferenced_slabs_migration.go b/cmd/util/ledger/migrations/filter_unreferenced_slabs_migration.go new file mode 100644 index 00000000000..91f82e37f6e --- /dev/null +++ b/cmd/util/ledger/migrations/filter_unreferenced_slabs_migration.go @@ -0,0 +1,261 @@ +package migrations + +import ( + "context" + "encoding/binary" + "errors" + "fmt" + "path" + "sort" + "strings" + "sync" + "time" + + "github.com/onflow/atree" + "github.com/onflow/cadence/common" + "github.com/onflow/cadence/interpreter" + "github.com/onflow/cadence/runtime" + "github.com/rs/zerolog" + + "github.com/onflow/flow-go/cmd/util/ledger/reporters" + "github.com/onflow/flow-go/cmd/util/ledger/util" + "github.com/onflow/flow-go/cmd/util/ledger/util/registers" + "github.com/onflow/flow-go/ledger" + "github.com/onflow/flow-go/ledger/common/convert" + "github.com/onflow/flow-go/model/flow" +) + +func registerFromSlabID(slabID atree.SlabID) (owner, key string) { + var address [8]byte + binary.BigEndian.PutUint64(address[:], slabID.AddressAsUint64()) + + index := slabID.Index() + + owner = string(address[:]) + + var sb strings.Builder + sb.WriteByte(flow.SlabIndexPrefix) + sb.Write(index[:]) + key = sb.String() + + return owner, key +} + +type FilterUnreferencedSlabsMigration struct { + log zerolog.Logger + rw reporters.ReportWriter + outputDir string + mutex sync.Mutex + filteredPayloads []*ledger.Payload + payloadsFile string + nWorkers int +} + +var _ AccountBasedMigration = &FilterUnreferencedSlabsMigration{} + +const filterUnreferencedSlabsName = "filter-unreferenced-slabs" + +func NewFilterUnreferencedSlabsMigration( + outputDir string, + rwf reporters.ReportWriterFactory, +) *FilterUnreferencedSlabsMigration { + return &FilterUnreferencedSlabsMigration{ + outputDir: outputDir, + rw: rwf.ReportWriter(filterUnreferencedSlabsName), + filteredPayloads: make([]*ledger.Payload, 0, 50_000), + } +} + +func (m *FilterUnreferencedSlabsMigration) InitMigration( + log zerolog.Logger, + _ *registers.ByAccount, + nWorkers int, +) error { + m.log = log. + With(). + Str("migration", filterUnreferencedSlabsName). + Logger() + + m.nWorkers = nWorkers + + return nil +} + +func (m *FilterUnreferencedSlabsMigration) MigrateAccount( + _ context.Context, + address common.Address, + accountRegisters *registers.AccountRegisters, +) error { + + storage := runtime.NewStorage( + registers.ReadOnlyLedger{ + Registers: accountRegisters, + }, + nil, + runtime.StorageConfig{}, + ) + + inter, err := interpreter.NewInterpreter( + nil, + nil, + &interpreter.Config{ + Storage: storage, + }, + ) + if err != nil { + return fmt.Errorf("failed to create interpreter: %w", err) + } + + err = util.CheckStorageHealth(inter, address, storage, accountRegisters, common.AllStorageDomains, m.nWorkers) + if err == nil { + return nil + } + + // The storage health check failed. + // This can happen if there are unreferenced root slabs. + // In this case, we filter out the unreferenced root slabs and all slabs they reference from the payloads. + + var unreferencedRootSlabsErr runtime.UnreferencedRootSlabsError + if !errors.As(err, &unreferencedRootSlabsErr) { + return fmt.Errorf("storage health check failed: %w", err) + } + + // Create a set of unreferenced slabs: root slabs, and all slabs they reference. + + unreferencedSlabIDs := map[atree.SlabID]struct{}{} + for _, rootSlabID := range unreferencedRootSlabsErr.UnreferencedRootSlabIDs { + unreferencedSlabIDs[rootSlabID] = struct{}{} + + childReferences, _, err := storage.GetAllChildReferences(rootSlabID) + if err != nil { + return fmt.Errorf( + "failed to get all child references for root slab %s: %w", + rootSlabID, + err, + ) + } + + for _, childSlabID := range childReferences { + unreferencedSlabIDs[childSlabID] = struct{}{} + } + } + + // Filter out unreferenced slabs. + + filteredPayloads := make([]*ledger.Payload, 0, len(unreferencedSlabIDs)) + + m.log.Warn(). + Str("account", address.HexWithPrefix()). + Msgf("filtering %d unreferenced slabs", len(unreferencedSlabIDs)) + + var slabIDs []atree.SlabID + for storageID := range unreferencedSlabIDs { + slabIDs = append(slabIDs, storageID) + } + sort.Slice( + slabIDs, + func(i, j int) bool { + a := slabIDs[i] + b := slabIDs[j] + return a.Compare(b) < 0 + }, + ) + + for _, slabID := range slabIDs { + owner, key := registerFromSlabID(slabID) + + value, err := accountRegisters.Get(owner, key) + if err != nil { + return fmt.Errorf( + "failed to get register for slab %x/%x: %w", + owner, + slabID.Index(), + err, + ) + } + + err = accountRegisters.Set(owner, key, nil) + if err != nil { + return fmt.Errorf( + "failed to set register for slab %x/%x: %w", + owner, + slabID.Index(), + err, + ) + } + + ledgerKey := convert.RegisterIDToLedgerKey(flow.RegisterID{ + Owner: owner, + Key: key, + }) + payload := ledger.NewPayload(ledgerKey, value) + filteredPayloads = append(filteredPayloads, payload) + } + + m.rw.Write(unreferencedSlabs{ + Account: address.Hex(), + PayloadCount: len(filteredPayloads), + }) + + m.mergeFilteredPayloads(filteredPayloads) + + // Do NOT report the health check error here. + // The health check error is only reported if it is not due to unreferenced slabs. + // If it is due to unreferenced slabs, we filter them out and continue. + + return nil +} + +func (m *FilterUnreferencedSlabsMigration) mergeFilteredPayloads(payloads []*ledger.Payload) { + m.mutex.Lock() + defer m.mutex.Unlock() + + m.filteredPayloads = append(m.filteredPayloads, payloads...) +} + +func (m *FilterUnreferencedSlabsMigration) Close() error { + // close the report writer so it flushes to file + m.rw.Close() + + err := m.writeFilteredPayloads() + if err != nil { + return fmt.Errorf("failed to write filtered payloads to file: %w", err) + } + + return nil +} + +func (m *FilterUnreferencedSlabsMigration) writeFilteredPayloads() error { + + m.payloadsFile = path.Join( + m.outputDir, + fmt.Sprintf("filtered_%d.payloads", int32(time.Now().Unix())), + ) + + writtenPayloadCount, err := util.CreatePayloadFile( + m.log, + m.payloadsFile, + m.filteredPayloads, + nil, + true, + ) + + if err != nil { + return fmt.Errorf("failed to write all filtered payloads to file: %w", err) + } + + if writtenPayloadCount != len(m.filteredPayloads) { + return fmt.Errorf( + "failed to write all filtered payloads to file: expected %d, got %d", + len(m.filteredPayloads), + writtenPayloadCount, + ) + } + + return nil +} + +type unreferencedSlabs struct { + Account string `json:"account"` + PayloadCount int `json:"payload_count"` +} diff --git a/cmd/util/ledger/migrations/filter_unreferenced_slabs_migration_test.go b/cmd/util/ledger/migrations/filter_unreferenced_slabs_migration_test.go new file mode 100644 index 00000000000..43b10e012d6 --- /dev/null +++ b/cmd/util/ledger/migrations/filter_unreferenced_slabs_migration_test.go @@ -0,0 +1,273 @@ +package migrations + +import ( + "context" + "encoding/binary" + "fmt" + "sync" + "testing" + + "github.com/onflow/atree" + "github.com/onflow/cadence/common" + "github.com/onflow/cadence/interpreter" + "github.com/onflow/cadence/runtime" + "github.com/rs/zerolog" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/cmd/util/ledger/reporters" + "github.com/onflow/flow-go/cmd/util/ledger/util" + "github.com/onflow/flow-go/cmd/util/ledger/util/registers" + "github.com/onflow/flow-go/ledger" + "github.com/onflow/flow-go/ledger/common/convert" + "github.com/onflow/flow-go/model/flow" +) + +type testReportWriterFactory struct { + lock sync.Mutex + reportWriters map[string]*testReportWriter +} + +func (f *testReportWriterFactory) ReportWriter(dataNamespace string) reporters.ReportWriter { + f.lock.Lock() + defer f.lock.Unlock() + + if f.reportWriters == nil { + f.reportWriters = make(map[string]*testReportWriter) + } + reportWriter := &testReportWriter{} + if _, ok := f.reportWriters[dataNamespace]; ok { + panic(fmt.Sprintf("report writer already exists for namespace %s", dataNamespace)) + } + f.reportWriters[dataNamespace] = reportWriter + return reportWriter +} + +type testReportWriter struct { + lock sync.Mutex + entries []any +} + +var _ reporters.ReportWriter = &testReportWriter{} + +func (r *testReportWriter) Write(entry any) { + r.lock.Lock() + defer r.lock.Unlock() + + r.entries = append(r.entries, entry) +} + +func (r *testReportWriter) Close() {} + +func TestFilterUnreferencedSlabs(t *testing.T) { + t.Parallel() + + // Arrange + + const chainID = flow.Emulator + chain := chainID.Chain() + + const nWorker = 2 + + testFlowAddress, err := chain.AddressAtIndex(1_000_000) + require.NoError(t, err) + + testAddress := common.Address(testFlowAddress) + + payloads := map[flow.RegisterID]*ledger.Payload{} + + payloadsLedger := util.NewPayloadsLedger(payloads) + + storageIndices := map[string]uint64{} + payloadsLedger.AllocateSlabIndexFunc = func(owner []byte) (atree.SlabIndex, error) { + var index atree.SlabIndex + + storageIndices[string(owner)]++ + + binary.BigEndian.PutUint64( + index[:], + storageIndices[string(owner)], + ) + + return index, nil + } + + storage := runtime.NewStorage(payloadsLedger, nil, runtime.StorageConfig{}) + + // {Int: Int} + dict1StaticType := interpreter.NewDictionaryStaticType( + nil, + interpreter.PrimitiveStaticTypeInt, + interpreter.PrimitiveStaticTypeInt, + ) + + inter, err := interpreter.NewInterpreter( + nil, + nil, + &interpreter.Config{ + Storage: storage, + }, + ) + require.NoError(t, err) + + dict1 := interpreter.NewDictionaryValueWithAddress( + inter, + interpreter.EmptyLocationRange, + dict1StaticType, + testAddress, + ) + + // Store another dictionary, with a nested array, in the account. + // It is not referenced through a storage map though. + + arrayStaticType := interpreter.NewVariableSizedStaticType(nil, interpreter.PrimitiveStaticTypeInt) + + dict2StaticType := interpreter.NewDictionaryStaticType( + nil, + interpreter.PrimitiveStaticTypeInt, + arrayStaticType, + ) + + dict2 := interpreter.NewDictionaryValueWithAddress( + inter, + interpreter.EmptyLocationRange, + dict2StaticType, + testAddress, + ) + + // Ensure the array is large enough to be stored in a separate slab + arrayCount := 100 + arrayValues := make([]interpreter.Value, arrayCount) + for i := 0; i < arrayCount; i++ { + arrayValues[i] = interpreter.NewUnmeteredIntValueFromInt64(int64(i)) + } + + array := interpreter.NewArrayValue( + inter, + interpreter.EmptyLocationRange, + arrayStaticType, + common.ZeroAddress, + arrayValues..., + ) + + dict2.Insert( + inter, interpreter.EmptyLocationRange, + interpreter.NewUnmeteredIntValueFromInt64(2), + array, + ) + + storageMap := storage.GetDomainStorageMap( + inter, + testAddress, + common.StorageDomainPathStorage, + true, + ) + + // Only insert first dictionary. + // Second dictionary is unreferenced. + + storageMap.SetValue( + inter, + interpreter.StringStorageMapKey("test"), + dict1, + ) + + err = storage.NondeterministicCommit(inter, false) + require.NoError(t, err) + + oldPayloads := make([]*ledger.Payload, 0, len(payloads)) + + for _, payload := range payloadsLedger.Payloads { + if len(payload.Value()) == 0 { + // Don't count empty slabs as result of inlining. + continue + } + oldPayloads = append(oldPayloads, payload) + } + + // Storage has 4 non-empty payloads: + // - storage map + // - dict1 + // - dict2 + // - nested array in dict2 + const totalSlabCount = 4 + + require.Len(t, oldPayloads, totalSlabCount) + + // Act + + rwf := &testReportWriterFactory{} + migration := NewFilterUnreferencedSlabsMigration(t.TempDir(), rwf) + + log := zerolog.New(zerolog.NewTestWriter(t)) + + registersByAccount, err := registers.NewByAccountFromPayloads(oldPayloads) + require.NoError(t, err) + + err = migration.InitMigration(log, registersByAccount, 1) + require.NoError(t, err) + + accountRegisters := registersByAccount.AccountRegisters(string(testAddress[:])) + + err = migration.MigrateAccount( + context.Background(), + testAddress, + accountRegisters, + ) + require.NoError(t, err) + + err = migration.Close() + require.NoError(t, err) + + // Assert + + writer := rwf.reportWriters[filterUnreferencedSlabsName] + + expectedAddress := string(testAddress[:]) + expectedKeys := map[string]struct{}{ + string([]byte{flow.SlabIndexPrefix, 0, 0, 0, 0, 0, 0, 0, 2}): {}, + string([]byte{flow.SlabIndexPrefix, 0, 0, 0, 0, 0, 0, 0, 3}): {}, + } + + newPayloads := registersByAccount.DestructIntoPayloads(nWorker) + assert.Len(t, newPayloads, totalSlabCount-len(expectedKeys)) + + expectedFilteredPayloads := make([]*ledger.Payload, 0, len(expectedKeys)) + + for _, payload := range oldPayloads { + registerID, _, err := convert.PayloadToRegister(payload) + require.NoError(t, err) + + if registerID.Owner != expectedAddress { + continue + } + + if _, ok := expectedKeys[registerID.Key]; !ok { + continue + } + + expectedFilteredPayloads = append(expectedFilteredPayloads, payload) + } + + assert.Equal(t, + []any{ + unreferencedSlabs{ + Account: testAddress.Hex(), + PayloadCount: len(expectedFilteredPayloads), + }, + }, + writer.entries, + ) + assert.ElementsMatch(t, + expectedFilteredPayloads, + migration.filteredPayloads, + ) + + readIsPartial, readFilteredPayloads, err := util.ReadPayloadFile(log, migration.payloadsFile) + require.NoError(t, err) + assert.True(t, readIsPartial) + assert.ElementsMatch(t, + expectedFilteredPayloads, + readFilteredPayloads, + ) +} diff --git a/cmd/util/ledger/migrations/fix_broken_data_migration.go b/cmd/util/ledger/migrations/fix_broken_data_migration.go new file mode 100644 index 00000000000..56bfda49257 --- /dev/null +++ b/cmd/util/ledger/migrations/fix_broken_data_migration.go @@ -0,0 +1,281 @@ +package migrations + +import ( + "context" + "fmt" + "path" + "sort" + "sync" + "time" + + "github.com/onflow/cadence/interpreter" + "github.com/rs/zerolog" + + "github.com/onflow/atree" + + "github.com/onflow/cadence/common" + + "github.com/onflow/flow-go/cmd/util/ledger/reporters" + "github.com/onflow/flow-go/cmd/util/ledger/util" + "github.com/onflow/flow-go/cmd/util/ledger/util/registers" + "github.com/onflow/flow-go/ledger" + "github.com/onflow/flow-go/ledger/common/convert" + "github.com/onflow/flow-go/model/flow" +) + +// ShouldFixBrokenCompositeKeyedDictionary returns true if the given value is a dictionary with a composite key type. +// +// It is useful for use with atree's PersistentSlabStorage.FixLoadedBrokenReferences. +// +// NOTE: The intended use case is to enable migration programs in onflow/flow-go to fix broken references. +// As of April 2024, only 10 registers in testnet (not mainnet) were found to have broken references, +// and they seem to have resulted from a bug that was fixed 2 years ago by https://github.com/onflow/cadence/pull/1565. +func ShouldFixBrokenCompositeKeyedDictionary(atreeValue atree.Value) bool { + orderedMap, ok := atreeValue.(*atree.OrderedMap) + if !ok { + return false + } + + dictionaryStaticType, ok := orderedMap.Type().(*interpreter.DictionaryStaticType) + if !ok { + return false + } + + _, ok = dictionaryStaticType.KeyType.(*interpreter.CompositeStaticType) + return ok +} + +type FixSlabsWithBrokenReferencesMigration struct { + log zerolog.Logger + rw reporters.ReportWriter + outputDir string + accountsToFix map[common.Address]struct{} + nWorkers int + mutex sync.Mutex + brokenPayloads []*ledger.Payload + payloadsFile string +} + +var _ AccountBasedMigration = &FixSlabsWithBrokenReferencesMigration{} + +const fixSlabsWithBrokenReferencesName = "fix-slabs-with-broken-references" + +func NewFixBrokenReferencesInSlabsMigration( + outputDir string, + rwf reporters.ReportWriterFactory, + accountsToFix map[common.Address]struct{}, +) *FixSlabsWithBrokenReferencesMigration { + return &FixSlabsWithBrokenReferencesMigration{ + outputDir: outputDir, + rw: rwf.ReportWriter(fixSlabsWithBrokenReferencesName), + accountsToFix: accountsToFix, + brokenPayloads: make([]*ledger.Payload, 0, 10), + } +} + +func (m *FixSlabsWithBrokenReferencesMigration) InitMigration( + log zerolog.Logger, + _ *registers.ByAccount, + nWorkers int, +) error { + m.log = log. + With(). + Str("migration", fixSlabsWithBrokenReferencesName). + Logger() + m.nWorkers = nWorkers + + return nil +} + +func (m *FixSlabsWithBrokenReferencesMigration) MigrateAccount( + _ context.Context, + address common.Address, + accountRegisters *registers.AccountRegisters, +) error { + + if _, exist := m.accountsToFix[address]; !exist { + return nil + } + + migrationRuntime := NewBasicMigrationRuntime(accountRegisters) + storage := migrationRuntime.Storage + + // Load all atree registers in storage + err := util.LoadAtreeSlabsInStorage(storage, accountRegisters, m.nWorkers) + if err != nil { + return err + } + + // Fix broken references + fixedStorageIDs, skippedStorageIDs, err := + storage.FixLoadedBrokenReferences(ShouldFixBrokenCompositeKeyedDictionary) + if err != nil { + return err + } + + if len(skippedStorageIDs) > 0 { + m.log.Warn(). + Str("account", address.HexWithPrefix()). + Msgf("skipped slabs with broken references: %v", skippedStorageIDs) + } + + if len(fixedStorageIDs) == 0 { + m.log.Warn(). + Str("account", address.HexWithPrefix()). + Msgf("did not fix any slabs with broken references") + + return nil + } + + m.log.Log(). + Str("account", address.HexWithPrefix()). + Msgf("fixed %d slabs with broken references", len(fixedStorageIDs)) + + // Save broken payloads to save to payload file later + brokenPayloads, err := getAtreePayloadsByID(accountRegisters, fixedStorageIDs) + if err != nil { + return err + } + + m.mergeBrokenPayloads(brokenPayloads) + + err = storage.NondeterministicFastCommit(m.nWorkers) + if err != nil { + return fmt.Errorf("failed to commit storage: %w", err) + } + + // Commit/finalize the transaction + + expectedAddresses := map[flow.Address]struct{}{ + flow.Address(address): {}, + } + + err = migrationRuntime.Commit(expectedAddresses, m.log) + if err != nil { + return fmt.Errorf("failed to commit: %w", err) + } + + // Log fixed payloads + fixedPayloads, err := getAtreePayloadsByID(accountRegisters, fixedStorageIDs) + if err != nil { + return err + } + + m.rw.Write(fixedSlabsWithBrokenReferences{ + Account: address.Hex(), + BrokenPayloads: brokenPayloads, + FixedPayloads: fixedPayloads, + }) + + return nil +} + +func (m *FixSlabsWithBrokenReferencesMigration) mergeBrokenPayloads(payloads []*ledger.Payload) { + m.mutex.Lock() + defer m.mutex.Unlock() + + m.brokenPayloads = append(m.brokenPayloads, payloads...) +} + +func (m *FixSlabsWithBrokenReferencesMigration) Close() error { + // close the report writer so it flushes to file + m.rw.Close() + + err := m.writeBrokenPayloads() + if err != nil { + return fmt.Errorf("failed to write broken payloads to file: %w", err) + } + + return nil +} + +func (m *FixSlabsWithBrokenReferencesMigration) writeBrokenPayloads() error { + + m.payloadsFile = path.Join( + m.outputDir, + fmt.Sprintf("broken_%d.payloads", int32(time.Now().Unix())), + ) + + writtenPayloadCount, err := util.CreatePayloadFile( + m.log, + m.payloadsFile, + m.brokenPayloads, + nil, + true, + ) + + if err != nil { + return fmt.Errorf("failed to write all broken payloads to file: %w", err) + } + + if writtenPayloadCount != len(m.brokenPayloads) { + return fmt.Errorf( + "failed to write all broken payloads to file: expected %d, got %d", + len(m.brokenPayloads), + writtenPayloadCount, + ) + } + + return nil +} + +func getAtreePayloadsByID( + registers *registers.AccountRegisters, + ids map[atree.SlabID][]atree.SlabID, +) ( + []*ledger.Payload, + error, +) { + outputPayloads := make([]*ledger.Payload, 0, len(ids)) + + owner := registers.Owner() + + keys := make([]string, 0, len(ids)) + err := registers.ForEachKey(func(key string) error { + + if !flow.IsSlabIndexKey(key) { + return nil + } + + slabID := atree.NewSlabID( + atree.Address([]byte(owner)), + atree.SlabIndex([]byte(key[1:])), + ) + + _, ok := ids[slabID] + if !ok { + return nil + } + + keys = append(keys, key) + + return nil + }) + if err != nil { + return nil, err + } + + sort.Strings(keys) + + for _, key := range keys { + value, err := registers.Get(owner, key) + if err != nil { + return nil, err + } + + ledgerKey := convert.RegisterIDToLedgerKey(flow.RegisterID{ + Owner: owner, + Key: key, + }) + payload := ledger.NewPayload(ledgerKey, value) + outputPayloads = append(outputPayloads, payload) + } + + return outputPayloads, nil +} + +type fixedSlabsWithBrokenReferences struct { + Account string `json:"account"` + BrokenPayloads []*ledger.Payload `json:"broken_payloads"` + FixedPayloads []*ledger.Payload `json:"fixed_payloads"` +} diff --git a/cmd/util/ledger/migrations/fix_broken_data_migration_test.go b/cmd/util/ledger/migrations/fix_broken_data_migration_test.go new file mode 100644 index 00000000000..44be376a80a --- /dev/null +++ b/cmd/util/ledger/migrations/fix_broken_data_migration_test.go @@ -0,0 +1,212 @@ +package migrations + +import ( + "bytes" + "context" + "encoding/hex" + "testing" + + "github.com/onflow/cadence/common" + "github.com/rs/zerolog" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/cmd/util/ledger/util" + "github.com/onflow/flow-go/cmd/util/ledger/util/registers" + "github.com/onflow/flow-go/ledger" +) + +func TestFixSlabsWithBrokenReferences(t *testing.T) { + t.Parallel() + + const nWorker = 2 + + rawAddress := mustDecodeHex("5e3448b3cffb97f2") + + address := common.MustBytesToAddress(rawAddress) + + ownerKey := ledger.KeyPart{Type: 0, Value: rawAddress} + + oldPayloads := []*ledger.Payload{ + // account status "a.s" register + ledger.NewPayload( + ledger.NewKey([]ledger.KeyPart{ownerKey, {Type: 2, Value: mustDecodeHex("612e73")}}), + mustDecodeHex("00000000000000083900000000000000090000000000000001"), + ), + + // storage domain register + ledger.NewPayload( + ledger.NewKey([]ledger.KeyPart{ownerKey, {Type: 2, Value: mustDecodeHex("73746f72616765")}}), + mustDecodeHex("0000000000000008"), + ), + + // public domain register + ledger.NewPayload( + ledger.NewKey([]ledger.KeyPart{ownerKey, {Type: 2, Value: mustDecodeHex("7075626c6963")}}), + mustDecodeHex("0000000000000007"), + ), + + // MapDataSlab [balance:1000.00089000 uuid:13797744] + ledger.NewPayload( + ledger.NewKey([]ledger.KeyPart{ownerKey, {Type: 2, Value: mustDecodeHex("240000000000000001")}}), + mustDecodeHex("008883d88483d8c082487e60df042a9c086869466c6f77546f6b656e6f466c6f77546f6b656e2e5661756c7402021b146e6a6a4c5eee08008883005b00000000000000100887f9d0544c60cbefe0afc51d7f46609b0000000000000002826762616c616e6365d8bc1b00000017487843a8826475756964d8a41a00d28970"), + ), + + // MapDataSlab [uuid:13799884 roles:StorageIDStorable({[94 52 72 179 207 251 151 242] [0 0 0 0 0 0 0 3]}) recipient:0x5e3448b3cffb97f2] + ledger.NewPayload( + ledger.NewKey([]ledger.KeyPart{ownerKey, {Type: 2, Value: mustDecodeHex("240000000000000002")}}), + mustDecodeHex("00c883d88483d8c0824848602d8056ff9d937046616e546f705065726d697373696f6e7746616e546f705065726d697373696f6e2e486f6c64657202031bb9d0e9f36650574100c883005b000000000000001820d6c23f2e85e694b0070dbc21a9822de5725916c4a005e99b0000000000000003826475756964d8a41a00d291cc8265726f6c6573d8ff505e3448b3cffb97f200000000000000038269726563697069656e74d883485e3448b3cffb97f2"), + ), + + // This slab contains broken references. + // MapDataSlab [StorageIDStorable({[0 0 0 0 0 0 0 0] [0 0 0 0 0 0 0 45]}):Capability<&A.48602d8056ff9d93.FanTopPermission.Admin>(address: 0x48602d8056ff9d93, path: /private/FanTopAdmin)] + ledger.NewPayload( + ledger.NewKey([]ledger.KeyPart{ownerKey, {Type: 2, Value: mustDecodeHex("240000000000000003")}}), + mustDecodeHex("00c883d8d982d8d582d8c0824848602d8056ff9d937046616e546f705065726d697373696f6e7546616e546f705065726d697373696f6e2e526f6c65d8ddf6011b535c9de83a38cab000c883005b000000000000000856c1dcdf34d761b79b000000000000000182d8ff500000000000000000000000000000002dd8c983d8834848602d8056ff9d93d8c882026b46616e546f7041646d696ed8db82f4d8d582d8c0824848602d8056ff9d937046616e546f705065726d697373696f6e7646616e546f705065726d697373696f6e2e41646d696e"), + ), + + // MapDataSlab [resources:StorageIDStorable({[94 52 72 179 207 251 151 242] [0 0 0 0 0 0 0 5]}) uuid:15735719 address:0x5e3448b3cffb97f2] + ledger.NewPayload( + ledger.NewKey([]ledger.KeyPart{ownerKey, {Type: 2, Value: mustDecodeHex("240000000000000004")}}), + mustDecodeHex("00c883d88483d8c0824848602d8056ff9d937346616e546f705065726d697373696f6e563261781a46616e546f705065726d697373696f6e5632612e486f6c64657202031b5a99ef3adb06d40600c883005b00000000000000185c9fead93697b692967de568f789d3c2d5e974502c8b12e99b000000000000000382697265736f7572636573d8ff505e3448b3cffb97f20000000000000005826475756964d8a41a00f01ba7826761646472657373d883485e3448b3cffb97f2"), + ), + + // MapDataSlab ["admin":StorageIDStorable({[94 52 72 179 207 251 151 242] [0 0 0 0 0 0 0 6]})] + ledger.NewPayload( + ledger.NewKey([]ledger.KeyPart{ownerKey, {Type: 2, Value: mustDecodeHex("240000000000000005")}}), + mustDecodeHex("00c883d8d982d8d408d8dc82d8d40581d8d682d8c0824848602d8056ff9d937346616e546f705065726d697373696f6e563261781846616e546f705065726d697373696f6e5632612e526f6c65011b8059ccce9aa48cfb00c883005b00000000000000087a89c005baa53d9a9b000000000000000182d8876561646d696ed8ff505e3448b3cffb97f20000000000000006"), + ), + + // MapDataSlab [role:"admin" uuid:15735727] + ledger.NewPayload( + ledger.NewKey([]ledger.KeyPart{ownerKey, {Type: 2, Value: mustDecodeHex("240000000000000006")}}), + mustDecodeHex("008883d88483d8c0824848602d8056ff9d937346616e546f705065726d697373696f6e563261781946616e546f705065726d697373696f6e5632612e41646d696e02021b4fc212cd0f233183008883005b0000000000000010858862f5e3e45e48d2bf75097a8aaf819b00000000000000028264726f6c65d8876561646d696e826475756964d8a41a00f01baf"), + ), + + // MapDataSlab [ + // FanTopPermissionV2a:PathLink<&{A.48602d8056ff9d93.FanTopPermissionV2a.Receiver}>(/storage/FanTopPermissionV2a) + // flowTokenReceiver:PathLink<&{A.9a0766d93b6608b7.FungibleToken.Receiver}>(/storage/flowTokenVault) + // flowTokenBalance:PathLink<&{A.9a0766d93b6608b7.FungibleToken.Balance}>(/storage/flowTokenVault) + // FanTopPermission:PathLink<&{A.48602d8056ff9d93.FanTopPermission.Receiver}>(/storage/FanTopPermission)] + ledger.NewPayload( + ledger.NewKey([]ledger.KeyPart{ownerKey, {Type: 2, Value: mustDecodeHex("240000000000000007")}}), + mustDecodeHex("008883f6041bc576c5f201b94974008883005b00000000000000207971082fb163397089dbafb546246f429beff1dc622768dcb916d25455dc0be39b0000000000000004827346616e546f705065726d697373696f6e563261d8cb82d8c882017346616e546f705065726d697373696f6e563261d8db82f4d8dc82d8d40581d8d682d8c0824848602d8056ff9d937346616e546f705065726d697373696f6e563261781c46616e546f705065726d697373696f6e5632612e52656365697665728271666c6f77546f6b656e5265636569766572d8cb82d8c882016e666c6f77546f6b656e5661756c74d8db82f4d8dc82d8d582d8c082487e60df042a9c086869466c6f77546f6b656e6f466c6f77546f6b656e2e5661756c7481d8d682d8c082489a0766d93b6608b76d46756e6769626c65546f6b656e7646756e6769626c65546f6b656e2e52656365697665728270666c6f77546f6b656e42616c616e6365d8cb82d8c882016e666c6f77546f6b656e5661756c74d8db82f4d8dc82d8d582d8c082487e60df042a9c086869466c6f77546f6b656e6f466c6f77546f6b656e2e5661756c7481d8d682d8c082489a0766d93b6608b76d46756e6769626c65546f6b656e7546756e6769626c65546f6b656e2e42616c616e6365827046616e546f705065726d697373696f6ed8cb82d8c882017046616e546f705065726d697373696f6ed8db82f4d8dc82d8d40581d8d682d8c0824848602d8056ff9d937046616e546f705065726d697373696f6e781946616e546f705065726d697373696f6e2e5265636569766572"), + ), + + // MapDataSlab [ + // FanTopPermission:StorageIDStorable({[94 52 72 179 207 251 151 242] [0 0 0 0 0 0 0 2]}) + // FanTopPermissionV2a:StorageIDStorable({[94 52 72 179 207 251 151 242] [0 0 0 0 0 0 0 4]}) + // flowTokenVault:StorageIDStorable({[94 52 72 179 207 251 151 242] [0 0 0 0 0 0 0 1]})] + ledger.NewPayload( + ledger.NewKey([]ledger.KeyPart{ownerKey, {Type: 2, Value: mustDecodeHex("240000000000000008")}}), + mustDecodeHex("008883f6031b7d303e276f3b803f008883005b00000000000000180a613a86f5856a480b3a715aa29b9876e5d7742a5a1df8e09b0000000000000003827046616e546f705065726d697373696f6ed8ff505e3448b3cffb97f20000000000000002827346616e546f705065726d697373696f6e563261d8ff505e3448b3cffb97f20000000000000004826e666c6f77546f6b656e5661756c74d8ff505e3448b3cffb97f20000000000000001"), + ), + } + + slabIndexWithBrokenReferences := mustDecodeHex("240000000000000003") + + slabWithBrokenReferences := ledger.NewPayload( + ledger.NewKey([]ledger.KeyPart{ownerKey, {Type: 2, Value: slabIndexWithBrokenReferences}}), + mustDecodeHex("00c883d8d982d8d582d8c0824848602d8056ff9d937046616e546f705065726d697373696f6e7546616e546f705065726d697373696f6e2e526f6c65d8ddf6011b535c9de83a38cab000c883005b000000000000000856c1dcdf34d761b79b000000000000000182d8ff500000000000000000000000000000002dd8c983d8834848602d8056ff9d93d8c882026b46616e546f7041646d696ed8db82f4d8d582d8c0824848602d8056ff9d937046616e546f705065726d697373696f6e7646616e546f705065726d697373696f6e2e41646d696e"), + ) + + fixedSlabWithBrokenReferences := ledger.NewPayload( + ledger.NewKey([]ledger.KeyPart{ownerKey, {Type: 2, Value: slabIndexWithBrokenReferences}}), + ledger.Value(mustDecodeHex("108883d8d982d8d582d8c0824848602d8056ff9d937046616e546f705065726d697373696f6e7546616e546f705065726d697373696f6e2e526f6c65d8ddf6001b535c9de83a38cab08300590000990000")), + ) + + // Account status register is updated to include address ID counter and new storage used. + accountStatusRegisterID := mustDecodeHex("612e73") + updatedAccountStatusRegister := ledger.NewPayload( + ledger.NewKey([]ledger.KeyPart{ownerKey, {Type: 2, Value: accountStatusRegisterID}}), + []byte{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x7, 0xba, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x9, 0x0, 0x0, 0x0, 0x1, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}, + ) + + expectedNewPayloads := make([]*ledger.Payload, len(oldPayloads)) + copy(expectedNewPayloads, oldPayloads) + + for i, payload := range expectedNewPayloads { + payloadKey, err := payload.Key() + require.NoError(t, err) + + key := payloadKey.KeyParts[1].Value + + if bytes.Equal(key, slabIndexWithBrokenReferences) { + expectedNewPayloads[i] = fixedSlabWithBrokenReferences + } else if bytes.Equal(key, accountStatusRegisterID) { + expectedNewPayloads[i] = updatedAccountStatusRegister + } + } + + rwf := &testReportWriterFactory{} + + log := zerolog.New(zerolog.NewTestWriter(t)) + + accountsToFix := map[common.Address]struct{}{ + address: {}, + } + + migration := NewFixBrokenReferencesInSlabsMigration(t.TempDir(), rwf, accountsToFix) + + registersByAccount, err := registers.NewByAccountFromPayloads(oldPayloads) + require.NoError(t, err) + + err = migration.InitMigration(log, registersByAccount, nWorker) + require.NoError(t, err) + + accountRegisters := registersByAccount.AccountRegisters(string(address[:])) + + err = migration.MigrateAccount( + context.Background(), + address, + accountRegisters, + ) + require.NoError(t, err) + + err = migration.Close() + require.NoError(t, err) + + newPayloads := registersByAccount.DestructIntoPayloads(nWorker) + + require.Equal(t, len(expectedNewPayloads), len(newPayloads)) + + for _, expected := range expectedNewPayloads { + k, _ := expected.Key() + rawExpectedKey := expected.EncodedKey() + + var found bool + for _, p := range newPayloads { + if bytes.Equal(rawExpectedKey, p.EncodedKey()) { + found = true + require.Equal(t, expected.Value(), p.Value(), k.String()) + break + } + } + require.True(t, found) + } + + writer := rwf.reportWriters[fixSlabsWithBrokenReferencesName] + assert.Equal(t, + []any{ + fixedSlabsWithBrokenReferences{ + Account: address.Hex(), + BrokenPayloads: []*ledger.Payload{slabWithBrokenReferences}, + FixedPayloads: []*ledger.Payload{fixedSlabWithBrokenReferences}, + }, + }, + writer.entries, + ) + + readIsPartial, readBrokenPayloads, err := util.ReadPayloadFile(log, migration.payloadsFile) + require.NoError(t, err) + assert.True(t, readIsPartial) + assert.Equal(t, []*ledger.Payload{slabWithBrokenReferences}, readBrokenPayloads) +} + +func mustDecodeHex(s string) []byte { + b, err := hex.DecodeString(s) + if err != nil { + panic(err) + } + return b +} diff --git a/cmd/util/ledger/migrations/migrator_runtime.go b/cmd/util/ledger/migrations/migrator_runtime.go new file mode 100644 index 00000000000..946542ebc09 --- /dev/null +++ b/cmd/util/ledger/migrations/migrator_runtime.go @@ -0,0 +1,213 @@ +package migrations + +import ( + "fmt" + + "github.com/onflow/cadence/common" + "github.com/onflow/cadence/interpreter" + "github.com/onflow/cadence/runtime" + "github.com/onflow/cadence/stdlib" + "github.com/onflow/crypto/hash" + "github.com/rs/zerolog" + + "github.com/onflow/flow-go/cmd/util/ledger/util" + "github.com/onflow/flow-go/cmd/util/ledger/util/registers" + "github.com/onflow/flow-go/fvm/environment" + "github.com/onflow/flow-go/fvm/evm" + evmStdlib "github.com/onflow/flow-go/fvm/evm/stdlib" + "github.com/onflow/flow-go/fvm/storage/state" + "github.com/onflow/flow-go/fvm/systemcontracts" + "github.com/onflow/flow-go/model/flow" +) + +type BasicMigrationRuntime struct { + Registers registers.Registers + TransactionState state.NestedTransactionPreparer + Storage *runtime.Storage + AccountsLedger *util.AccountsAtreeLedger + Accounts environment.Accounts +} + +func (r *BasicMigrationRuntime) Commit(expectedAddresses map[flow.Address]struct{}, log zerolog.Logger) error { + + result, err := r.TransactionState.FinalizeMainTransaction() + if err != nil { + return fmt.Errorf("failed to finalize main transaction: %w", err) + } + + err = registers.ApplyChanges( + r.Registers, + result.WriteSet, + expectedAddresses, + log, + ) + if err != nil { + return fmt.Errorf("failed to apply changes: %w", err) + } + return nil +} + +type InterpreterMigrationRuntime struct { + *BasicMigrationRuntime + Interpreter *interpreter.Interpreter + ContractAdditionHandler stdlib.AccountContractAdditionHandler + ContractNamesProvider stdlib.AccountContractNamesProvider +} + +// InterpreterMigrationRuntimeConfig is used to configure the InterpreterMigrationRuntime. +// The code, contract names, and program loading functions can be nil, +// in which case program loading will be configured to use the default behavior, +// loading contracts from the given payloads. +// The listener function is optional and can be used to listen for program loading events. +type InterpreterMigrationRuntimeConfig struct { + GetCode util.GetContractCodeFunc + GetContractNames util.GetContractNamesFunc + GetOrLoadProgram util.GetOrLoadProgramFunc + GetOrLoadProgramListener util.GerOrLoadProgramListenerFunc +} + +func (c InterpreterMigrationRuntimeConfig) NewRuntimeInterface( + chainID flow.ChainID, + transactionState state.NestedTransactionPreparer, + accounts environment.Accounts, +) ( + runtime.Interface, + error, +) { + + getCodeFunc := func(location common.AddressLocation) ([]byte, error) { + // First, try to get the code from the provided function. + // If it is not provided, fall back to the default behavior, + // getting the code from the accounts. + + getCodeFunc := c.GetCode + if getCodeFunc != nil { + code, err := getCodeFunc(location) + if err != nil || code != nil { + // If the code was found, or if an error occurred, then return. + return code, err + } + } + + return accounts.GetContract( + location.Name, + flow.Address(location.Address), + ) + } + + getContractNames := c.GetContractNames + if getContractNames == nil { + getContractNames = accounts.GetContractNames + } + + getOrLoadProgram := c.GetOrLoadProgram + if getOrLoadProgram == nil { + var err error + getOrLoadProgram, err = util.NewProgramsGetOrLoadProgramFunc( + transactionState, + accounts, + ) + if err != nil { + return nil, err + } + } + + sc := systemcontracts.SystemContractsForChain(chainID) + + return util.NewMigrationRuntimeInterface( + chainID, + common.Address(sc.Crypto.Address), + getCodeFunc, + getContractNames, + getOrLoadProgram, + c.GetOrLoadProgramListener, + ), nil +} + +// NewBasicMigrationRuntime returns a basic runtime for migrations. +func NewBasicMigrationRuntime(regs registers.Registers) *BasicMigrationRuntime { + // Create a new transaction state with a dummy hasher + // because we do not need spock proofs for migrations. + transactionState := state.NewTransactionStateFromExecutionState( + state.NewExecutionStateWithSpockStateHasher( + registers.StorageSnapshot{ + Registers: regs, + }, + state.DefaultParameters(), + func() hash.Hasher { + return dummyHasher{} + }, + ), + ) + accounts := environment.NewAccounts(transactionState) + + accountsAtreeLedger := util.NewAccountsAtreeLedger(accounts) + runtimeStorage := runtime.NewStorage(accountsAtreeLedger, nil, runtime.StorageConfig{}) + + return &BasicMigrationRuntime{ + Registers: regs, + TransactionState: transactionState, + Storage: runtimeStorage, + AccountsLedger: accountsAtreeLedger, + Accounts: accounts, + } +} + +// NewInterpreterMigrationRuntime returns a runtime for migrations that need an interpreter. +func NewInterpreterMigrationRuntime( + regs registers.Registers, + chainID flow.ChainID, + config InterpreterMigrationRuntimeConfig, +) ( + *InterpreterMigrationRuntime, + error, +) { + basicMigrationRuntime := NewBasicMigrationRuntime(regs) + + env := runtime.NewBaseInterpreterEnvironment(runtime.Config{}) + + runtimeInterface, err := config.NewRuntimeInterface( + chainID, + basicMigrationRuntime.TransactionState, + basicMigrationRuntime.Accounts, + ) + if err != nil { + return nil, fmt.Errorf("failed to create runtime interface: %w", err) + } + + evmStdlib.SetupEnvironment(env, nil, evm.ContractAccountAddress(chainID)) + + env.Configure( + runtimeInterface, + runtime.NewCodesAndPrograms(), + basicMigrationRuntime.Storage, + nil, + nil, + nil, + ) + + inter, err := interpreter.NewInterpreter( + nil, + nil, + env.InterpreterConfig, + ) + if err != nil { + return nil, err + } + + return &InterpreterMigrationRuntime{ + BasicMigrationRuntime: basicMigrationRuntime, + Interpreter: inter, + ContractAdditionHandler: env, + ContractNamesProvider: env, + }, nil +} + +type dummyHasher struct{} + +func (d dummyHasher) Algorithm() hash.HashingAlgorithm { return hash.UnknownHashingAlgorithm } +func (d dummyHasher) Size() int { return 0 } +func (d dummyHasher) ComputeHash([]byte) hash.Hash { return nil } +func (d dummyHasher) Write([]byte) (int, error) { return 0, nil } +func (d dummyHasher) SumHash() hash.Hash { return nil } +func (d dummyHasher) Reset() {} diff --git a/cmd/util/ledger/migrations/noop.go b/cmd/util/ledger/migrations/noop.go deleted file mode 100644 index 9bf548bb45b..00000000000 --- a/cmd/util/ledger/migrations/noop.go +++ /dev/null @@ -1,9 +0,0 @@ -package migrations - -import ( - "github.com/onflow/flow-go/ledger" -) - -func NoOpMigration(p []ledger.Payload) ([]ledger.Payload, error) { - return p, nil -} diff --git a/cmd/util/ledger/migrations/prune_migration.go b/cmd/util/ledger/migrations/prune_migration.go deleted file mode 100644 index 3b694965568..00000000000 --- a/cmd/util/ledger/migrations/prune_migration.go +++ /dev/null @@ -1,17 +0,0 @@ -package migrations - -import ( - "github.com/onflow/flow-go/ledger" -) - -// PruneMigration removes all the payloads with empty value -// this prunes the trie for values that has been deleted -func PruneMigration(payload []ledger.Payload) ([]ledger.Payload, error) { - newPayload := make([]ledger.Payload, 0, len(payload)) - for _, p := range payload { - if len(p.Value()) > 0 { - newPayload = append(newPayload, p) - } - } - return newPayload, nil -} diff --git a/cmd/util/ledger/migrations/storage_fees_migration.go b/cmd/util/ledger/migrations/storage_fees_migration.go deleted file mode 100644 index d55a725d90b..00000000000 --- a/cmd/util/ledger/migrations/storage_fees_migration.go +++ /dev/null @@ -1,62 +0,0 @@ -package migrations - -import ( - fvm "github.com/onflow/flow-go/fvm/environment" - "github.com/onflow/flow-go/ledger" - "github.com/onflow/flow-go/ledger/common/utils" - "github.com/onflow/flow-go/model/flow" -) - -// iterates through registers keeping a map of register sizes -// after it has reached the end it add storage used and storage capacity for each address -func StorageFeesMigration(payload []ledger.Payload) ([]ledger.Payload, error) { - storageUsed := make(map[string]uint64) - newPayload := make([]ledger.Payload, len(payload)) - - for i, p := range payload { - err := incrementStorageUsed(p, storageUsed) - if err != nil { - return nil, err - } - newPayload[i] = p - } - - for s, u := range storageUsed { - // this is the storage used by the storage_used register we are about to add - id := flow.NewRegisterID( - string(flow.BytesToAddress([]byte(s)).Bytes()), - "storage_used") - storageUsedByStorageUsed := fvm.RegisterSize(id, make([]byte, 8)) - u = u + uint64(storageUsedByStorageUsed) - - newPayload = append(newPayload, *ledger.NewPayload( - registerIDToKey(id), - utils.Uint64ToBinary(u), - )) - } - return newPayload, nil -} - -func incrementStorageUsed(p ledger.Payload, used map[string]uint64) error { - k, err := p.Key() - if err != nil { - return err - } - id, err := KeyToRegisterID(k) - if err != nil { - return err - } - if len([]byte(id.Owner)) != flow.AddressLength { - // not an address - return nil - } - if _, ok := used[id.Owner]; !ok { - used[id.Owner] = 0 - } - used[id.Owner] = used[id.Owner] + uint64(registerSize(id, p)) - return nil -} - -func registerSize(id flow.RegisterID, p ledger.Payload) int { - return fvm.RegisterSize(id, p.Value()) -} diff --git a/cmd/util/ledger/migrations/storage_used_migration.go b/cmd/util/ledger/migrations/storage_used_migration.go new file mode 100644 index 00000000000..8495b9002ab --- /dev/null +++ b/cmd/util/ledger/migrations/storage_used_migration.go @@ -0,0 +1,201 @@ +package migrations + +import ( + "context" + "fmt" + "strconv" + "strings" + + "github.com/onflow/flow-go/cmd/util/ledger/reporters" + + "github.com/onflow/cadence/common" + "github.com/rs/zerolog" + "github.com/rs/zerolog/log" + + "github.com/onflow/flow-go/cmd/util/ledger/util/registers" + "github.com/onflow/flow-go/fvm/environment" + "github.com/onflow/flow-go/model/flow" +) + +// Sequence number registers are created on demand to reduce register count +// and they are in their own registers to avoid blocking concurrent execution. +// We also need to include sequence number register sizes in the storage used +// computation before the sequence number register is created (i.e., +// we update storage used when account public key is created) to unblock +// some use cases of concurrent execution. +// +// In other words, +// - When account public key is appened, predefined sequence number register size +// is included in storage used. +// - When sequence number register is created, storage size isn't affected. +// +// To simplify computation and avoid blocking concurrent execution, the storage used +// computation always uses 1 to indicate the number of bytes used to store each +// sequence number's value. + +func predefinedSequenceNumberPayloadSizes(owner string, startKeyIndex uint32, endKeyIndex uint32) uint64 { + size := uint64(0) + for i := startKeyIndex; i < endKeyIndex; i++ { + size += environment.PredefinedSequenceNumberPayloadSize(flow.BytesToAddress([]byte(owner)), i) + } + return size +} + +// AccountUsageMigration iterates through each payload, and calculate the storage usage +// and update the accounts status with the updated storage usage. It also upgrades the +// account status registers to the latest version. +type AccountUsageMigration struct { + log zerolog.Logger + rw reporters.ReportWriter +} + +var _ AccountBasedMigration = &AccountUsageMigration{} + +func NewAccountUsageMigration(rw reporters.ReportWriterFactory) *AccountUsageMigration { + return &AccountUsageMigration{ + rw: rw.ReportWriter("account-usage-migration"), + } +} + +func (m *AccountUsageMigration) InitMigration( + log zerolog.Logger, + _ *registers.ByAccount, + _ int, +) error { + m.log = log.With().Str("component", "AccountUsageMigration").Logger() + + csvReportHeader := []string{ + "address", + "old_storage_used", + "new_storage_used", + } + + m.rw.Write(csvReportHeader) + + return nil +} + +func (m *AccountUsageMigration) Close() error { + m.rw.Close() + return nil +} + +func (m *AccountUsageMigration) MigrateAccount( + _ context.Context, + address common.Address, + accountRegisters *registers.AccountRegisters, +) error { + + var status *environment.AccountStatus + var statusValue []byte + var accountPublicKeyCount uint32 + + actualUsed := uint64(0) + + // Find the account status register, + // and calculate the storage usage + err := accountRegisters.ForEach(func(owner, key string, value []byte) error { + + if strings.HasPrefix(key, flow.SequenceNumberRegisterKeyPrefix) { + // DO NOT include individual sequence number registers in storage used here. + // Instead, we include storage used for all account public key at key index >= 1 + // later in this function. + return nil + } + + if key == flow.AccountStatusKey { + statusValue = value + + var err error + status, err = environment.AccountStatusFromBytes(value) + if err != nil { + return fmt.Errorf("could not parse account status: %w", err) + } + + accountPublicKeyCount = status.AccountPublicKeyCount() + } + + actualUsed += uint64(environment.RegisterSize( + flow.RegisterID{ + Owner: owner, + Key: key, + }, + value, + )) + + return nil + }) + if err != nil { + return fmt.Errorf( + "could not iterate through registers of account %s: %w", + address.HexWithPrefix(), + err, + ) + } + + if status == nil { + log.Error(). + Str("account", address.HexWithPrefix()). + Msgf("could not find account status register") + return fmt.Errorf("could not find account status register") + } + + // reading the status will upgrade the status to the latest version, so it might + // have a different size than the one in the register + newStatusValue := status.ToBytes() + statusSizeDiff := len(newStatusValue) - len(statusValue) + + // the status size diff should be added to the actual size + if statusSizeDiff < 0 { + if uint64(-statusSizeDiff) > actualUsed { + log.Error(). + Str("account", address.HexWithPrefix()). + Msgf("account storage used would be negative") + return fmt.Errorf("account storage used would be negative") + } + + actualUsed = actualUsed - uint64(-statusSizeDiff) + } else if statusSizeDiff > 0 { + actualUsed = actualUsed + uint64(statusSizeDiff) + } + + if accountPublicKeyCount > 1 { + // Include predefined sequence number payload size per key for all account public key at index >= 1. + // NOTE: sequence number for the first account public key is included in the + // first account public key register, so it doesn't need to be included here. + actualUsed += predefinedSequenceNumberPayloadSizes(string(address[:]), 1, accountPublicKeyCount) + } + + currentUsed := status.StorageUsed() + + // update storage used if the actual size is different from the size in the status register + // or if the status size is different. + if actualUsed != currentUsed || statusSizeDiff != 0 { + // update storage used + status.SetStorageUsed(actualUsed) + + err = accountRegisters.Set( + string(address[:]), + flow.AccountStatusKey, + status.ToBytes(), + ) + if err != nil { + return fmt.Errorf("could not update account status register: %w", err) + } + + m.rw.Write([]string{ + address.Hex(), + strconv.FormatUint(currentUsed, 10), + strconv.FormatUint(actualUsed, 10), + }) + } + + return nil +} + +// nolint:unused +type accountUsageMigrationReportData struct { + AccountAddress string + OldStorageUsed uint64 + NewStorageUsed uint64 +} diff --git a/cmd/util/ledger/migrations/storage_used_migration_test.go b/cmd/util/ledger/migrations/storage_used_migration_test.go new file mode 100644 index 00000000000..cd4ebb3ba4e --- /dev/null +++ b/cmd/util/ledger/migrations/storage_used_migration_test.go @@ -0,0 +1,224 @@ +package migrations + +import ( + "context" + "testing" + + "github.com/onflow/atree" + "github.com/onflow/cadence/common" + "github.com/rs/zerolog" + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/cmd/util/ledger/util/registers" + "github.com/onflow/flow-go/fvm/environment" + "github.com/onflow/flow-go/ledger" + "github.com/onflow/flow-go/model/flow" +) + +func TestAccountStatusMigration(t *testing.T) { + t.Parallel() + + log := zerolog.New(zerolog.NewTestWriter(t)) + rwf := &testReportWriterFactory{} + + migration := NewAccountUsageMigration(rwf) + err := migration.InitMigration(log, nil, 0) + require.NoError(t, err) + + addressString := "0x1" + address, err := common.HexToAddress(addressString) + require.NoError(t, err) + ownerKey := ledger.KeyPart{Type: 0, Value: address.Bytes()} + + sizeOfTheStatusPayload := uint64( + environment.RegisterSize( + flow.AccountStatusRegisterID(flow.ConvertAddress(address)), + environment.NewAccountStatus().ToBytes(), + ), + ) + + migrate := func(oldPayloads []*ledger.Payload) ([]*ledger.Payload, error) { + + registersByAccount, err := registers.NewByAccountFromPayloads(oldPayloads) + require.NoError(t, err) + + err = migration.InitMigration(log, registersByAccount, 1) + require.NoError(t, err) + + accountRegisters := registersByAccount.AccountRegisters(string(address[:])) + + err = migration.MigrateAccount( + context.Background(), + address, + accountRegisters, + ) + if err != nil { + return nil, err + } + + err = migration.Close() + require.NoError(t, err) + + newPayloads := registersByAccount.DestructIntoPayloads(1) + + return newPayloads, nil + } + + t.Run("empty", func(t *testing.T) { + t.Parallel() + + _, err := migrate([]*ledger.Payload{}) + require.Error(t, err) + }) + + t.Run("status register v1", func(t *testing.T) { + t.Parallel() + + accountPublicKeyCount := uint32(5) + statusPayloadAndSequenceNubmerSize := sizeOfTheStatusPayload + + predefinedSequenceNumberPayloadSizes(string(address[:]), 1, accountPublicKeyCount) + + payloads := []*ledger.Payload{ + ledger.NewPayload( + ledger.NewKey([]ledger.KeyPart{ownerKey, {Type: 2, Value: []byte(flow.AccountStatusKey)}}), + []byte{ + 0, // flags + 0, 0, 0, 0, 0, 0, 0, 7, // storage used + 0, 0, 0, 0, 0, 0, 0, 6, // storage index + 0, 0, 0, 0, 0, 0, 0, 5, // public key counts + }, + ), + } + + migrated, err := migrate(payloads) + require.NoError(t, err) + require.Len(t, migrated, 1) + + accountStatus, err := environment.AccountStatusFromBytes(migrated[0].Value()) + require.NoError(t, err) + + require.Equal(t, statusPayloadAndSequenceNubmerSize, accountStatus.StorageUsed()) + require.Equal(t, atree.SlabIndex{0, 0, 0, 0, 0, 0, 0, 6}, accountStatus.SlabIndex()) + require.Equal(t, accountPublicKeyCount, accountStatus.AccountPublicKeyCount()) + require.Equal(t, uint64(0), accountStatus.AccountIdCounter()) + }) + t.Run("status register v2", func(t *testing.T) { + t.Parallel() + + accountPublicKeyCount := uint32(5) + statusPayloadAndSequenceNubmerSize := sizeOfTheStatusPayload + + predefinedSequenceNumberPayloadSizes(string(address[:]), 1, accountPublicKeyCount) + + payloads := []*ledger.Payload{ + ledger.NewPayload( + ledger.NewKey([]ledger.KeyPart{ownerKey, {Type: 2, Value: []byte(flow.AccountStatusKey)}}), + []byte{ + 0, // flags + 0, 0, 0, 0, 0, 0, 0, 100, // storage used + 0, 0, 0, 0, 0, 0, 0, 6, // storage index + 0, 0, 0, 0, 0, 0, 0, 5, // public key counts + 0, 0, 0, 0, 0, 0, 0, 3, // account id counter + }, + ), + } + + migrated, err := migrate(payloads) + require.NoError(t, err) + require.Len(t, migrated, 1) + + accountStatus, err := environment.AccountStatusFromBytes(migrated[0].Value()) + require.NoError(t, err) + + require.Equal(t, statusPayloadAndSequenceNubmerSize, accountStatus.StorageUsed()) + require.Equal(t, atree.SlabIndex{0, 0, 0, 0, 0, 0, 0, 6}, accountStatus.SlabIndex()) + require.Equal(t, accountPublicKeyCount, accountStatus.AccountPublicKeyCount()) + require.Equal(t, uint64(3), accountStatus.AccountIdCounter()) + }) + + t.Run("status register v3", func(t *testing.T) { + t.Parallel() + + accountPublicKeyCount := uint32(5) + statusPayloadAndSequenceNubmerSize := sizeOfTheStatusPayload + + predefinedSequenceNumberPayloadSizes(string(address[:]), 1, accountPublicKeyCount) + + payloads := []*ledger.Payload{ + ledger.NewPayload( + ledger.NewKey([]ledger.KeyPart{ownerKey, {Type: 2, Value: []byte(flow.AccountStatusKey)}}), + []byte{ + 0, // flags + 0, 0, 0, 0, 0, 0, 0, byte(sizeOfTheStatusPayload), // storage used + 0, 0, 0, 0, 0, 0, 0, 6, // storage index + 0, 0, 0, 5, // public key counts + 0, 0, 0, 0, 0, 0, 0, 3, // account id counter + }, + ), + } + + migrated, err := migrate(payloads) + require.NoError(t, err) + require.Len(t, migrated, 1) + + accountStatus, err := environment.AccountStatusFromBytes(migrated[0].Value()) + require.NoError(t, err) + + require.Equal(t, statusPayloadAndSequenceNubmerSize, accountStatus.StorageUsed()) + require.Equal(t, atree.SlabIndex{0, 0, 0, 0, 0, 0, 0, 6}, accountStatus.SlabIndex()) + require.Equal(t, accountPublicKeyCount, accountStatus.AccountPublicKeyCount()) + require.Equal(t, uint64(3), accountStatus.AccountIdCounter()) + }) + + t.Run("data registers", func(t *testing.T) { + t.Parallel() + + accountPublicKeyCount := uint32(5) + statusPayloadAndSequenceNubmerSize := sizeOfTheStatusPayload + + predefinedSequenceNumberPayloadSizes(string(address[:]), 1, accountPublicKeyCount) + + payloads := []*ledger.Payload{ + ledger.NewPayload( + ledger.NewKey([]ledger.KeyPart{ownerKey, {Type: 2, Value: []byte(flow.AccountStatusKey)}}), + []byte{ + 0, // flags + 0, 0, 0, 0, 0, 0, 0, 15, // storage used + 0, 0, 0, 0, 0, 0, 0, 6, // storage index + 0, 0, 0, 5, // public key counts + 0, 0, 0, 0, 0, 0, 0, 3, // account id counter + }, + ), + ledger.NewPayload( + ledger.NewKey([]ledger.KeyPart{ownerKey, {Type: 2, Value: []byte("1")}}), + make([]byte, 100), + ), + } + + migrated, err := migrate(payloads) + require.NoError(t, err) + require.Len(t, migrated, 2) + + var accountStatus *environment.AccountStatus + for _, payload := range migrated { + key, err := payload.Key() + require.NoError(t, err) + if string(key.KeyParts[1].Value) != flow.AccountStatusKey { + continue + } + + accountStatus, err = environment.AccountStatusFromBytes(payload.Value()) + require.NoError(t, err) + } + + dataRegisterSize := uint64(environment.RegisterSize( + flow.RegisterID{ + Owner: string(flow.ConvertAddress(address).Bytes()), + Key: "1", + }, + make([]byte, 100), + )) + + require.Equal(t, statusPayloadAndSequenceNubmerSize+dataRegisterSize, accountStatus.StorageUsed()) + require.Equal(t, atree.SlabIndex{0, 0, 0, 0, 0, 0, 0, 6}, accountStatus.SlabIndex()) + require.Equal(t, accountPublicKeyCount, accountStatus.AccountPublicKeyCount()) + require.Equal(t, uint64(3), accountStatus.AccountIdCounter()) + }) +} diff --git a/cmd/util/ledger/migrations/test-data/bootstrapped_v0.31/.gitignore b/cmd/util/ledger/migrations/test-data/bootstrapped_v0.31/.gitignore new file mode 100644 index 00000000000..0342671c3bc --- /dev/null +++ b/cmd/util/ledger/migrations/test-data/bootstrapped_v0.31/.gitignore @@ -0,0 +1,4 @@ +* + +!.gitignore +!00000000 diff --git a/cmd/util/ledger/migrations/test-data/bootstrapped_v0.31/00000000 b/cmd/util/ledger/migrations/test-data/bootstrapped_v0.31/00000000 new file mode 100644 index 00000000000..48a6440b89f Binary files /dev/null and b/cmd/util/ledger/migrations/test-data/bootstrapped_v0.31/00000000 differ diff --git a/cmd/util/ledger/migrations/test-data/cadence_values_migration/README.md b/cmd/util/ledger/migrations/test-data/cadence_values_migration/README.md new file mode 100644 index 00000000000..a2ccc17d37c --- /dev/null +++ b/cmd/util/ledger/migrations/test-data/cadence_values_migration/README.md @@ -0,0 +1,52 @@ +## Instructions for creating a snapshot + +To create a new snapshot for tests, follow the below steps. + +- Download a CLI which uses cadence version pre-1.0 (e.g. CLI version `v1.5.0`) + +- Start emulator with the `--persist` flag. + ```shell + flow emulator --persist + ``` + +- Create an account name `test`. + ```shell + flow accounts create + ``` + +- Update the `testAccountAddress` constant in the [cadence_values_migration_test.go](../../cadence_values_migration_test.go) + with the address of the account just created. + +- Deploy the `Test` contract. For that, first add the below entries to the `flow.json` + ```json + { + "contracts": { + "Test": "./test_contract.cdc" + }, + "deployments": { + "emulator": { + "test": [ + "Test" + ] + } + } + } + ``` + Then run: + ```shell + flow deploy + ``` + +- To store the values, run the transaction [store_transaction.cdc](store_transaction.cdc), + using the newly created `test` account as the signer. + ```shell + flow transactions send store_transaction.cdc --signer test + ``` + +- Create a snapshot of the emulator state using the REST API: + ```shell + curl -H "Content-type: application/x-www-form-urlencoded" -X POST http://localhost:8080/emulator/snapshots -d "name=test_snapshot" + ``` + +- Above will create file named `snapshot.test_snapshot` in a directly called `flowdb` where the flow project was initialized. + Copy it to this directly (`test-data/cadence_values_migration`) and rename it to `snapshot`. diff --git a/cmd/util/ledger/migrations/test-data/cadence_values_migration/snapshot_cadence_v0.42.6 b/cmd/util/ledger/migrations/test-data/cadence_values_migration/snapshot_cadence_v0.42.6 new file mode 100644 index 00000000000..55009f69289 Binary files /dev/null and b/cmd/util/ledger/migrations/test-data/cadence_values_migration/snapshot_cadence_v0.42.6 differ diff --git a/cmd/util/ledger/migrations/test-data/cadence_values_migration/store_transaction.cdc b/cmd/util/ledger/migrations/test-data/cadence_values_migration/store_transaction.cdc new file mode 100644 index 00000000000..56830184d1e --- /dev/null +++ b/cmd/util/ledger/migrations/test-data/cadence_values_migration/store_transaction.cdc @@ -0,0 +1,73 @@ +import Test from 0x01cf0e2f2f715450 + +transaction { + + prepare(acct: AuthAccount) { + acct.save("Cafe\u{0301}", to: /storage/string_value_1) + acct.save("Caf\u{00E9}", to: /storage/string_value_2) + acct.save(Type<AuthAccount>(), to: /storage/type_value) + + // String keys in dictionary + acct.save( + { + "Cafe\u{0301}": 1, + "H\u{00E9}llo": 2 + }, + to: /storage/dictionary_with_string_keys, + ) + + // Restricted typed keys in dictionary + acct.save( + { + Type<AnyStruct{Test.Bar, Test.Foo}>(): 1, + Type<AnyStruct{Test.Foo, Test.Bar, Test.Baz}>(): 2 + }, + to: /storage/dictionary_with_restricted_typed_keys, + ) + + // Capabilities and links + acct.save(<- Test.createR(), to: /storage/r) + + // Typed capability + var cap1: Capability<&Test.R>? = acct.link<&Test.R>(/public/linkR, target: /storage/r) + acct.save(cap1, to: /storage/capability) + + // Untyped capability + var cap2: Capability = acct.getCapability(/public/linkR) + acct.save(cap2, to: /storage/untyped_capability) + + // account-typed keys in dictionary + acct.save( + { + Type<AuthAccount>(): 1, + Type<AuthAccount.Capabilities>(): 2, + Type<AuthAccount.AccountCapabilities>(): 3, + Type<AuthAccount.StorageCapabilities>(): 4, + Type<AuthAccount.Contracts>(): 5, + Type<AuthAccount.Keys>(): 6, + Type<AuthAccount.Inbox>(): 7, + + Type<PublicAccount>(): 8, + + Type<AccountKey>(): 9 + }, + to: /storage/dictionary_with_account_type_keys, + ) + + // Entitlements. Both keys produces the same result, + // so having them both in the same dictionary will replace one from the other. + // Therefore use two separate dictionaries. + acct.save( + { Type<&Test.R>(): "non_auth_ref" }, + to: /storage/dictionary_with_reference_typed_key + ) + acct.save( + { Type<auth &Test.R>(): "auth_ref" }, + to: /storage/dictionary_with_auth_reference_typed_key + ) + } + + execute { + log("Values successfully saved in storage") + } +} diff --git a/cmd/util/ledger/migrations/test-data/cadence_values_migration/test_contract.cdc b/cmd/util/ledger/migrations/test-data/cadence_values_migration/test_contract.cdc new file mode 100644 index 00000000000..1643a9f9c52 --- /dev/null +++ b/cmd/util/ledger/migrations/test-data/cadence_values_migration/test_contract.cdc @@ -0,0 +1,16 @@ +pub contract Test { + + pub struct interface Foo {} + + pub struct interface Bar {} + + pub struct interface Baz {} + + pub resource R { + pub fun foo() {} + } + + pub fun createR(): @R { + return <- create R() + } +} diff --git a/cmd/util/ledger/migrations/test-data/cadence_values_migration/test_contract_upgraded.cdc b/cmd/util/ledger/migrations/test-data/cadence_values_migration/test_contract_upgraded.cdc new file mode 100644 index 00000000000..ffd7971411f --- /dev/null +++ b/cmd/util/ledger/migrations/test-data/cadence_values_migration/test_contract_upgraded.cdc @@ -0,0 +1,21 @@ +// Unused import. But keep it here for testing the import resolving. +import NonFungibleToken from 0xf8d6e0586b0a20c7 + +access(all) contract Test { + + access(all) struct interface Foo {} + + access(all) struct interface Bar {} + + access(all) struct interface Baz {} + + access(all) entitlement E + + access(all) resource R { + access(E) fun foo() {} + } + + access(all) fun createR(): @R { + return <- create R() + } +} diff --git a/cmd/util/ledger/migrations/test-data/staged_contracts_migration/staged_contracts.csv b/cmd/util/ledger/migrations/test-data/staged_contracts_migration/staged_contracts.csv new file mode 100644 index 00000000000..a12235342e3 --- /dev/null +++ b/cmd/util/ledger/migrations/test-data/staged_contracts_migration/staged_contracts.csv @@ -0,0 +1,13 @@ +0x0000000000000001,Foo,access(all) contract Foo{} +0x0000000000000001,Bar,access(all) contract Bar{} +0x0000000000000002,MultilineContract," +import Foo from 0x01 + +access(all) +contract MultilineContract{ + init() { + var a = ""hello"" + } +} +" +0x0000000000000002,Baz,import Foo from 0x01 access(all) contract Baz{} diff --git a/cmd/util/ledger/migrations/test-data/staged_contracts_migration/staged_contracts_malformed.csv b/cmd/util/ledger/migrations/test-data/staged_contracts_migration/staged_contracts_malformed.csv new file mode 100644 index 00000000000..f975a6f8fd8 --- /dev/null +++ b/cmd/util/ledger/migrations/test-data/staged_contracts_migration/staged_contracts_malformed.csv @@ -0,0 +1,3 @@ +0x0000000000000001,Foo,access(all) contract Foo{} +0x0000000000000001,Bar +0x0000000000000002,Baz,import Foo from 0x01, access(all) contract Baz{} diff --git a/cmd/util/ledger/migrations/test-data/staged_contracts_migration/too_few_fields.csv b/cmd/util/ledger/migrations/test-data/staged_contracts_migration/too_few_fields.csv new file mode 100644 index 00000000000..a8b0abb1c4f --- /dev/null +++ b/cmd/util/ledger/migrations/test-data/staged_contracts_migration/too_few_fields.csv @@ -0,0 +1,3 @@ +0x0000000000000001,Foo +0x0000000000000001,Bar +0x0000000000000002,Baz diff --git a/cmd/util/ledger/migrations/transaction_migration.go b/cmd/util/ledger/migrations/transaction_migration.go new file mode 100644 index 00000000000..488ce1aeb63 --- /dev/null +++ b/cmd/util/ledger/migrations/transaction_migration.go @@ -0,0 +1,62 @@ +package migrations + +import ( + "fmt" + + "github.com/rs/zerolog" + + "github.com/onflow/flow-go/cmd/util/ledger/util/registers" + "github.com/onflow/flow-go/engine/execution/computation" + "github.com/onflow/flow-go/fvm" + "github.com/onflow/flow-go/model/flow" +) + +func NewTransactionBasedMigration( + tx *flow.TransactionBody, + chainID flow.ChainID, + logger zerolog.Logger, + expectedWriteAddresses map[flow.Address]struct{}, +) RegistersMigration { + return func(registersByAccount *registers.ByAccount) error { + + options := computation.DefaultFVMOptions(chainID, false, false) + options = append(options, + fvm.WithContractDeploymentRestricted(false), + fvm.WithContractRemovalRestricted(false), + fvm.WithAuthorizationChecksEnabled(false), + fvm.WithSequenceNumberCheckAndIncrementEnabled(false), + fvm.WithTransactionFeesEnabled(false)) + ctx := fvm.NewContext(options...) + + storageSnapshot := registers.StorageSnapshot{ + Registers: registersByAccount, + } + + vm := fvm.NewVirtualMachine() + + executionSnapshot, res, err := vm.Run( + ctx, + fvm.Transaction(tx, 0), + storageSnapshot, + ) + if err != nil { + return fmt.Errorf("failed to run transaction: %w", err) + } + + if res.Err != nil { + return fmt.Errorf("transaction failed: %w", res.Err) + } + + err = registers.ApplyChanges( + registersByAccount, + executionSnapshot.WriteSet, + expectedWriteAddresses, + logger, + ) + if err != nil { + return fmt.Errorf("failed to apply changes: %w", err) + } + + return nil + } +} diff --git a/cmd/util/ledger/migrations/utils.go b/cmd/util/ledger/migrations/utils.go index 506efe61db0..b6a30a35f86 100644 --- a/cmd/util/ledger/migrations/utils.go +++ b/cmd/util/ledger/migrations/utils.go @@ -1,91 +1,12 @@ package migrations import ( - "fmt" - - "github.com/onflow/atree" - - "github.com/onflow/flow-go/engine/execution/state" - "github.com/onflow/flow-go/fvm/environment" - "github.com/onflow/flow-go/ledger" - "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/cmd/util/ledger/util/registers" ) -func KeyToRegisterID(key ledger.Key) (flow.RegisterID, error) { - if len(key.KeyParts) != 2 || - key.KeyParts[0].Type != state.KeyPartOwner || - key.KeyParts[1].Type != state.KeyPartKey { - return flow.RegisterID{}, fmt.Errorf("key not in expected format %s", key.String()) - } - - return flow.NewRegisterID( - string(key.KeyParts[0].Value), - string(key.KeyParts[1].Value), - ), nil -} - -func registerIDToKey(registerID flow.RegisterID) ledger.Key { - newKey := ledger.Key{} - newKey.KeyParts = []ledger.KeyPart{ - { - Type: state.KeyPartOwner, - Value: []byte(registerID.Owner), - }, - { - Type: state.KeyPartKey, - Value: []byte(registerID.Key), - }, - } - return newKey -} - -type AccountsAtreeLedger struct { - Accounts environment.Accounts -} - -func NewAccountsAtreeLedger(accounts environment.Accounts) *AccountsAtreeLedger { - return &AccountsAtreeLedger{Accounts: accounts} -} - -var _ atree.Ledger = &AccountsAtreeLedger{} - -func (a *AccountsAtreeLedger) GetValue(owner, key []byte) ([]byte, error) { - v, err := a.Accounts.GetValue( - flow.NewRegisterID( - string(flow.BytesToAddress(owner).Bytes()), - string(key))) - if err != nil { - return nil, fmt.Errorf("getting value failed: %w", err) - } - return v, nil -} - -func (a *AccountsAtreeLedger) SetValue(owner, key, value []byte) error { - err := a.Accounts.SetValue( - flow.NewRegisterID( - string(flow.BytesToAddress(owner).Bytes()), - string(key)), - value) - if err != nil { - return fmt.Errorf("setting value failed: %w", err) - } - return nil -} - -func (a *AccountsAtreeLedger) ValueExists(owner, key []byte) (exists bool, err error) { - v, err := a.GetValue(owner, key) - if err != nil { - return false, fmt.Errorf("checking value existence failed: %w", err) - } - - return len(v) > 0, nil -} +type RegistersMigration func(registersByAccount *registers.ByAccount) error -// AllocateStorageIndex allocates new storage index under the owner accounts to store a new register -func (a *AccountsAtreeLedger) AllocateStorageIndex(owner []byte) (atree.StorageIndex, error) { - v, err := a.Accounts.AllocateStorageIndex(flow.BytesToAddress(owner)) - if err != nil { - return atree.StorageIndex{}, fmt.Errorf("storage address allocation failed: %w", err) - } - return v, nil +type NamedMigration struct { + Name string + Migrate RegistersMigration } diff --git a/cmd/util/ledger/reporters/account_reporter.go b/cmd/util/ledger/reporters/account_reporter.go index 9b4fe206f63..859bb32ca83 100644 --- a/cmd/util/ledger/reporters/account_reporter.go +++ b/cmd/util/ledger/reporters/account_reporter.go @@ -9,13 +9,14 @@ import ( "github.com/schollz/progressbar/v3" "github.com/onflow/cadence" + "github.com/onflow/cadence/common" jsoncdc "github.com/onflow/cadence/encoding/json" - "github.com/onflow/cadence/runtime/common" "github.com/onflow/flow-go/fvm" "github.com/onflow/flow-go/fvm/environment" "github.com/onflow/flow-go/fvm/storage/snapshot" "github.com/onflow/flow-go/fvm/storage/state" + "github.com/onflow/flow-go/fvm/systemcontracts" "github.com/onflow/flow-go/ledger" "github.com/onflow/flow-go/model/flow" ) @@ -166,6 +167,7 @@ func newAccountDataProcessor( snapshot snapshot.StorageSnapshot, ) *balanceProcessor { bp := NewBalanceReporter(chain, snapshot) + sc := systemcontracts.SystemContractsForChain(bp.ctx.Chain.ChainID()) bp.logger = logger bp.rwa = rwa @@ -174,33 +176,30 @@ func newAccountDataProcessor( bp.balanceScript = []byte(fmt.Sprintf(` import FungibleToken from 0x%s import FlowToken from 0x%s - pub fun main(account: Address): UFix64 { + access(all) fun main(account: Address): UFix64 { let acct = getAccount(account) - let vaultRef = acct.getCapability(/public/flowTokenBalance) - .borrow<&FlowToken.Vault{FungibleToken.Balance}>() + let vaultRef = acct.capabilities.borrow<&FlowToken.Vault>(/public/flowTokenBalance) ?? panic("Could not borrow Balance reference to the Vault") return vaultRef.balance } - `, fvm.FungibleTokenAddress(bp.ctx.Chain), fvm.FlowTokenAddress(bp.ctx.Chain))) + `, sc.FungibleToken.Address.Hex(), sc.FlowToken.Address.Hex())) bp.fusdScript = []byte(fmt.Sprintf(` import FungibleToken from 0x%s import FUSD from 0x%s - pub fun main(address: Address): UFix64 { + access(all) fun main(address: Address): UFix64 { let account = getAccount(address) - let vaultRef = account.getCapability(/public/fusdBalance)! - .borrow<&FUSD.Vault{FungibleToken.Balance}>() + let vaultRef = account.capabilities.borrow<&FUSD.Vault>(/public/fusdBalance) ?? panic("Could not borrow Balance reference to the Vault") return vaultRef.balance } - `, fvm.FungibleTokenAddress(bp.ctx.Chain), "3c5959b568896393")) + `, sc.FungibleToken.Address.Hex(), "3c5959b568896393")) bp.momentsScript = []byte(` import TopShot from 0x0b2a3299cc857e29 - pub fun main(account: Address): Int { + access(all) fun main(account: Address): Int { let acct = getAccount(account) - let collectionRef = acct.getCapability(/public/MomentCollection) - .borrow<&{TopShot.MomentCollectionPublic}>()! + let collectionRef = acct.capabilities.borrow<&{TopShot.MomentCollectionPublic}>(/public/MomentCollection)! return collectionRef.getIDs().length } `) @@ -328,7 +327,7 @@ func (c *balanceProcessor) balance(address flow.Address) (uint64, bool, error) { var balance uint64 var hasVault bool if output.Err == nil && output.Value != nil { - balance = output.Value.ToGoValue().(uint64) + balance = uint64(output.Value.(cadence.UFix64)) hasVault = true } else { hasVault = false @@ -348,7 +347,7 @@ func (c *balanceProcessor) fusdBalance(address flow.Address) (uint64, error) { var balance uint64 if output.Err == nil && output.Value != nil { - balance = output.Value.ToGoValue().(uint64) + balance = uint64(output.Value.(cadence.UFix64)) } return balance, nil } @@ -399,7 +398,7 @@ func (c *balanceProcessor) ReadStored(address flow.Address, domain common.PathDo receiver, err := rt.ReadStored( addr, cadence.Path{ - Domain: domain.Identifier(), + Domain: domain, Identifier: id, }, ) diff --git a/cmd/util/ledger/reporters/atree_reporter.go b/cmd/util/ledger/reporters/atree_reporter.go index 6d1be625125..39c005dbc55 100644 --- a/cmd/util/ledger/reporters/atree_reporter.go +++ b/cmd/util/ledger/reporters/atree_reporter.go @@ -117,7 +117,7 @@ func getPayloadType(p *ledger.Payload) (payloadType, error) { } id := flow.NewRegisterID( - string(k.KeyParts[0].Value), + flow.BytesToAddress(k.KeyParts[0].Value), string(k.KeyParts[1].Value)) if id.IsInternalState() { return fvmPayloadType, nil diff --git a/cmd/util/ledger/reporters/export_reporter.go b/cmd/util/ledger/reporters/export_reporter.go index 9c69ebf7218..460c0ebe0dd 100644 --- a/cmd/util/ledger/reporters/export_reporter.go +++ b/cmd/util/ledger/reporters/export_reporter.go @@ -24,18 +24,15 @@ type ExportReport struct { // ExportReporter writes data that can be leveraged outside of extraction type ExportReporter struct { logger zerolog.Logger - chain flow.Chain getBeforeMigrationSCFunc GetStateCommitmentFunc } func NewExportReporter( logger zerolog.Logger, - chain flow.Chain, getBeforeMigrationSCFunc GetStateCommitmentFunc, ) *ExportReporter { return &ExportReporter{ logger: logger, - chain: chain, getBeforeMigrationSCFunc: getBeforeMigrationSCFunc, } } diff --git a/cmd/util/ledger/reporters/fungible_token_tracker.go b/cmd/util/ledger/reporters/fungible_token_tracker.go index f8f4755e5c8..44c5eef4fc6 100644 --- a/cmd/util/ledger/reporters/fungible_token_tracker.go +++ b/cmd/util/ledger/reporters/fungible_token_tracker.go @@ -9,24 +9,24 @@ import ( "github.com/rs/zerolog" "github.com/schollz/progressbar/v3" + "github.com/onflow/cadence/common" + "github.com/onflow/cadence/interpreter" cadenceRuntime "github.com/onflow/cadence/runtime" - "github.com/onflow/cadence/runtime/common" - "github.com/onflow/cadence/runtime/interpreter" - "github.com/onflow/flow-go/cmd/util/ledger/migrations" - "github.com/onflow/flow-go/fvm" + "github.com/onflow/flow-go/cmd/util/ledger/util" "github.com/onflow/flow-go/fvm/environment" "github.com/onflow/flow-go/fvm/storage/state" + "github.com/onflow/flow-go/fvm/systemcontracts" "github.com/onflow/flow-go/ledger" "github.com/onflow/flow-go/model/flow" ) const FungibleTokenTrackerReportPrefix = "fungible_token_report" -var domains = []string{ - common.PathDomainPublic.Identifier(), - common.PathDomainPrivate.Identifier(), - common.PathDomainStorage.Identifier(), +var domains = []common.StorageDomain{ + common.StorageDomainPathPublic, + common.StorageDomainPathPrivate, + common.StorageDomainPathStorage, } // FungibleTokenTracker iterates through stored cadence values over all accounts and check for any @@ -41,7 +41,8 @@ type FungibleTokenTracker struct { } func FlowTokenTypeID(chain flow.Chain) string { - return fmt.Sprintf("A.%s.FlowToken.Vault", fvm.FlowTokenAddress(chain).Hex()) + sc := systemcontracts.SystemContractsForChain(chain.ChainID()) + return fmt.Sprintf("A.%s.FlowToken.Vault", sc.FlowToken.Address.Hex()) } func NewFungibleTokenTracker(logger zerolog.Logger, rwf ReportWriterFactory, chain flow.Chain, vaultTypeIDs []string) *FungibleTokenTracker { @@ -141,13 +142,19 @@ func (r *FungibleTokenTracker) worker( wg *sync.WaitGroup) { for j := range jobs { + inter, err := interpreter.NewInterpreter(nil, nil, &interpreter.Config{}) + if err != nil { + panic(err) + } + txnState := state.NewTransactionState( NewStorageSnapshotFromPayload(j.payloads), state.DefaultParameters()) accounts := environment.NewAccounts(txnState) storage := cadenceRuntime.NewStorage( - &migrations.AccountsAtreeLedger{Accounts: accounts}, + &util.AccountsAtreeLedger{Accounts: accounts}, nil, + cadenceRuntime.StorageConfig{}, ) owner, err := common.BytesToAddress(j.owner[:]) @@ -155,17 +162,17 @@ func (r *FungibleTokenTracker) worker( panic(err) } - inter, err := interpreter.NewInterpreter(nil, nil, &interpreter.Config{}) - if err != nil { - panic(err) - } - for _, domain := range domains { - storageMap := storage.GetStorageMap(owner, domain, true) + storageMap := storage.GetDomainStorageMap(inter, owner, domain, true) itr := storageMap.Iterator(inter) key, value := itr.Next() for value != nil { - r.iterateChildren(append([]string{domain}, key), j.owner, value) + identifier := string(key.(interpreter.StringAtreeValue)) + r.iterateChildren( + append([]string{domain.Identifier()}, identifier), + j.owner, + value, + ) key, value = itr.Next() } } @@ -197,9 +204,8 @@ func (r *FungibleTokenTracker) iterateChildren(tr trace, addr flow.Address, valu if _, ok := r.vaultTypeIDs[typeIDStr]; ok { b := uint64(compValue.GetField( inter, - interpreter.EmptyLocationRange, "balance", - ).(interpreter.UFix64Value)) + ).(interpreter.UFix64Value).UFix64Value) if b > 0 { r.rw.Write(TokenDataPoint{ Path: tr.String(), @@ -212,8 +218,13 @@ func (r *FungibleTokenTracker) iterateChildren(tr trace, addr flow.Address, valu // iterate over fields of the composite value (skip the ones that are not resource typed) compValue.ForEachField(inter, - func(key string, value interpreter.Value) { + func(key string, value interpreter.Value) (resume bool) { r.iterateChildren(append(tr, key), addr, value) - }) + + // continue iteration + return true + }, + interpreter.EmptyLocationRange, + ) } } diff --git a/cmd/util/ledger/reporters/fungible_token_tracker_test.go b/cmd/util/ledger/reporters/fungible_token_tracker_test.go index 60a3988299c..7e0227cebe4 100644 --- a/cmd/util/ledger/reporters/fungible_token_tracker_test.go +++ b/cmd/util/ledger/reporters/fungible_token_tracker_test.go @@ -15,6 +15,7 @@ import ( "github.com/onflow/flow-go/cmd/util/ledger/reporters" "github.com/onflow/flow-go/fvm" "github.com/onflow/flow-go/fvm/storage/state" + "github.com/onflow/flow-go/fvm/systemcontracts" "github.com/onflow/flow-go/ledger" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/utils/unittest" @@ -69,37 +70,46 @@ func TestFungibleTokenTracker(t *testing.T) { err = view.Merge(snapshot) require.NoError(t, err) + sc := systemcontracts.SystemContractsForChain(chain.ChainID()) + // deploy wrapper resource testContract := fmt.Sprintf(` import FungibleToken from 0x%s - pub contract WrappedToken { - pub resource WrappedVault { - pub var vault: @FungibleToken.Vault + access(all) + contract WrappedToken { + + access(all) + resource WrappedVault { - init(v: @FungibleToken.Vault) { + access(all) + var vault: @{FungibleToken.Vault} + + init(v: @{FungibleToken.Vault}) { self.vault <- v } - destroy() { - destroy self.vault - } } - pub fun CreateWrappedVault(inp: @FungibleToken.Vault): @WrappedToken.WrappedVault { + + access(all) + fun CreateWrappedVault(inp: @{FungibleToken.Vault}): @WrappedToken.WrappedVault { return <-create WrappedVault(v :<- inp) } - }`, fvm.FungibleTokenAddress(chain)) + }`, sc.FungibleToken.Address.Hex()) deployingTestContractScript := []byte(fmt.Sprintf(` transaction { - prepare(signer: AuthAccount) { + prepare(signer: auth(AddContract) &Account) { signer.contracts.add(name: "%s", code: "%s".decodeHex()) } } `, "WrappedToken", hex.EncodeToString([]byte(testContract)))) - txBody := flow.NewTransactionBody(). + txBody, err := flow.NewTransactionBodyBuilder(). SetScript(deployingTestContractScript). - AddAuthorizer(chain.ServiceAddress()) + SetPayer(chain.ServiceAddress()). + AddAuthorizer(chain.ServiceAddress()). + Build() + require.NoError(t, err) tx := fvm.Transaction(txBody, 0) snapshot, output, err := vm.Run(ctx, tx, view) @@ -109,26 +119,34 @@ func TestFungibleTokenTracker(t *testing.T) { err = view.Merge(snapshot) require.NoError(t, err) - wrapTokenScript := []byte(fmt.Sprintf(` + wrapTokenScript := []byte(fmt.Sprintf( + ` import FungibleToken from 0x%s import FlowToken from 0x%s import WrappedToken from 0x%s transaction(amount: UFix64) { - prepare(signer: AuthAccount) { - let vaultRef = signer.borrow<&FlowToken.Vault>(from: /storage/flowTokenVault) + prepare(signer: auth(Storage) &Account) { + let vaultRef = signer.storage.borrow<auth(FungibleToken.Withdraw) &FlowToken.Vault>(from: /storage/flowTokenVault) ?? panic("Could not borrow reference to the owner's Vault!") let sentVault <- vaultRef.withdraw(amount: amount) let wrappedFlow <- WrappedToken.CreateWrappedVault(inp :<- sentVault) - signer.save(<-wrappedFlow, to: /storage/wrappedToken) + signer.storage.save(<-wrappedFlow, to: /storage/wrappedToken) } - }`, fvm.FungibleTokenAddress(chain), fvm.FlowTokenAddress(chain), chain.ServiceAddress())) + }`, + sc.FungibleToken.Address.Hex(), + sc.FlowToken.Address.Hex(), + sc.FlowServiceAccount.Address.Hex(), + )) - txBody = flow.NewTransactionBody(). + txBody, err = flow.NewTransactionBodyBuilder(). SetScript(wrapTokenScript). + SetPayer(chain.ServiceAddress()). AddArgument(jsoncdc.MustEncode(cadence.UFix64(105))). - AddAuthorizer(chain.ServiceAddress()) + AddAuthorizer(chain.ServiceAddress()). + Build() + require.NoError(t, err) tx = fvm.Transaction(txBody, 0) snapshot, output, err = vm.Run(ctx, tx, view) @@ -154,10 +172,11 @@ func TestFungibleTokenTracker(t *testing.T) { // wrappedToken require.True(t, strings.Contains(string(data), `{"path":"storage/wrappedToken/vault","address":"8c5303eaa26202d6","balance":105,"type_id":"A.7e60df042a9c0868.FlowToken.Vault"}`)) // flowTokenVaults - require.True(t, strings.Contains(string(data), `{"path":"storage/flowTokenVault","address":"8c5303eaa26202d6","balance":99999999999699895,"type_id":"A.7e60df042a9c0868.FlowToken.Vault"}`)) + require.True(t, strings.Contains(string(data), `{"path":"storage/flowTokenVault","address":"8c5303eaa26202d6","balance":99999999999599895,"type_id":"A.7e60df042a9c0868.FlowToken.Vault"}`)) require.True(t, strings.Contains(string(data), `{"path":"storage/flowTokenVault","address":"9a0766d93b6608b7","balance":100000,"type_id":"A.7e60df042a9c0868.FlowToken.Vault"}`)) require.True(t, strings.Contains(string(data), `{"path":"storage/flowTokenVault","address":"7e60df042a9c0868","balance":100000,"type_id":"A.7e60df042a9c0868.FlowToken.Vault"}`)) require.True(t, strings.Contains(string(data), `{"path":"storage/flowTokenVault","address":"912d5440f7e3769e","balance":100000,"type_id":"A.7e60df042a9c0868.FlowToken.Vault"}`)) + require.True(t, strings.Contains(string(data), `{"path":"storage/flowTokenVault","address":"754aed9de6197641","balance":100000,"type_id":"A.7e60df042a9c0868.FlowToken.Vault"}`)) // do not remove this line, see https://github.com/onflow/flow-go/pull/2237 t.Log("success") diff --git a/cmd/util/ledger/reporters/mock/get_state_commitment_func.go b/cmd/util/ledger/reporters/mock/get_state_commitment_func.go deleted file mode 100644 index a282b847b4c..00000000000 --- a/cmd/util/ledger/reporters/mock/get_state_commitment_func.go +++ /dev/null @@ -1,44 +0,0 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. - -package mock - -import ( - flow "github.com/onflow/flow-go/model/flow" - mock "github.com/stretchr/testify/mock" -) - -// GetStateCommitmentFunc is an autogenerated mock type for the GetStateCommitmentFunc type -type GetStateCommitmentFunc struct { - mock.Mock -} - -// Execute provides a mock function with given fields: -func (_m *GetStateCommitmentFunc) Execute() flow.StateCommitment { - ret := _m.Called() - - var r0 flow.StateCommitment - if rf, ok := ret.Get(0).(func() flow.StateCommitment); ok { - r0 = rf() - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(flow.StateCommitment) - } - } - - return r0 -} - -type mockConstructorTestingTNewGetStateCommitmentFunc interface { - mock.TestingT - Cleanup(func()) -} - -// NewGetStateCommitmentFunc creates a new instance of GetStateCommitmentFunc. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewGetStateCommitmentFunc(t mockConstructorTestingTNewGetStateCommitmentFunc) *GetStateCommitmentFunc { - mock := &GetStateCommitmentFunc{} - mock.Mock.Test(t) - - t.Cleanup(func() { mock.AssertExpectations(t) }) - - return mock -} diff --git a/cmd/util/ledger/reporters/mock/report_writer.go b/cmd/util/ledger/reporters/mock/report_writer.go index 036cfcf1b9b..4f601f06d08 100644 --- a/cmd/util/ledger/reporters/mock/report_writer.go +++ b/cmd/util/ledger/reporters/mock/report_writer.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mock @@ -9,7 +9,7 @@ type ReportWriter struct { mock.Mock } -// Close provides a mock function with given fields: +// Close provides a mock function with no fields func (_m *ReportWriter) Close() { _m.Called() } @@ -19,13 +19,12 @@ func (_m *ReportWriter) Write(dataPoint interface{}) { _m.Called(dataPoint) } -type mockConstructorTestingTNewReportWriter interface { +// NewReportWriter creates a new instance of ReportWriter. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewReportWriter(t interface { mock.TestingT Cleanup(func()) -} - -// NewReportWriter creates a new instance of ReportWriter. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewReportWriter(t mockConstructorTestingTNewReportWriter) *ReportWriter { +}) *ReportWriter { mock := &ReportWriter{} mock.Mock.Test(t) diff --git a/cmd/util/ledger/reporters/mock/report_writer_factory.go b/cmd/util/ledger/reporters/mock/report_writer_factory.go index 5cda1ee46ae..2177d4d3306 100644 --- a/cmd/util/ledger/reporters/mock/report_writer_factory.go +++ b/cmd/util/ledger/reporters/mock/report_writer_factory.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mock @@ -16,6 +16,10 @@ type ReportWriterFactory struct { func (_m *ReportWriterFactory) ReportWriter(dataNamespace string) reporters.ReportWriter { ret := _m.Called(dataNamespace) + if len(ret) == 0 { + panic("no return value specified for ReportWriter") + } + var r0 reporters.ReportWriter if rf, ok := ret.Get(0).(func(string) reporters.ReportWriter); ok { r0 = rf(dataNamespace) @@ -28,13 +32,12 @@ func (_m *ReportWriterFactory) ReportWriter(dataNamespace string) reporters.Repo return r0 } -type mockConstructorTestingTNewReportWriterFactory interface { +// NewReportWriterFactory creates a new instance of ReportWriterFactory. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewReportWriterFactory(t interface { mock.TestingT Cleanup(func()) -} - -// NewReportWriterFactory creates a new instance of ReportWriterFactory. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewReportWriterFactory(t mockConstructorTestingTNewReportWriterFactory) *ReportWriterFactory { +}) *ReportWriterFactory { mock := &ReportWriterFactory{} mock.Mock.Test(t) diff --git a/cmd/util/ledger/reporters/reporter_output.go b/cmd/util/ledger/reporters/reporter_output.go index 837f334f263..22d529c8d46 100644 --- a/cmd/util/ledger/reporters/reporter_output.go +++ b/cmd/util/ledger/reporters/reporter_output.go @@ -2,6 +2,7 @@ package reporters import ( "bufio" + "encoding/csv" "encoding/json" "fmt" "os" @@ -12,7 +13,7 @@ import ( "github.com/rs/zerolog" ) -// TODO janezp: we should be able to swithch the report writer to write to a database. +// TODO janezp: we should be able to switch the report writer to write to a database. type ReportWriterFactory interface { ReportWriter(dataNamespace string) ReportWriter } @@ -21,34 +22,81 @@ type ReportFileWriterFactory struct { fileSuffix int32 outputDir string log zerolog.Logger + format ReportFormat } +type ReportFormat uint8 + +const ( + // ReportFormatJSONArray represents format encoded as JSON array at the top level. + ReportFormatJSONArray ReportFormat = iota + + // ReportFormatJSONL represents format encoded as JSONL. + // ReportFormatJSONL should be used when report might be large enough to + // crash tools like jq (if JSON array is used instead of JSONL). + ReportFormatJSONL + + ReportFormatCSV +) + func NewReportFileWriterFactory(outputDir string, log zerolog.Logger) *ReportFileWriterFactory { + return NewReportFileWriterFactoryWithFormat(outputDir, log, ReportFormatJSONArray) +} + +func NewReportFileWriterFactoryWithFormat(outputDir string, log zerolog.Logger, format ReportFormat) *ReportFileWriterFactory { return &ReportFileWriterFactory{ fileSuffix: int32(time.Now().Unix()), outputDir: outputDir, log: log, + format: format, } } func (r *ReportFileWriterFactory) Filename(dataNamespace string) string { - return path.Join(r.outputDir, fmt.Sprintf("%s_%d.json", dataNamespace, r.fileSuffix)) + switch r.format { + case ReportFormatJSONArray: + return path.Join(r.outputDir, fmt.Sprintf("%s_%d.json", dataNamespace, r.fileSuffix)) + + case ReportFormatJSONL: + return path.Join(r.outputDir, fmt.Sprintf("%s_%d.jsonl", dataNamespace, r.fileSuffix)) + + case ReportFormatCSV: + return path.Join(r.outputDir, fmt.Sprintf("%s_%d.csv", dataNamespace, r.fileSuffix)) + + default: + panic(fmt.Sprintf("unrecognized report format: %d", r.format)) + } } func (r *ReportFileWriterFactory) ReportWriter(dataNamespace string) ReportWriter { fn := r.Filename(dataNamespace) - return NewReportFileWriter(fn, r.log) + return NewReportFileWriter(fn, r.log, r.format) } var _ ReportWriterFactory = &ReportFileWriterFactory{} +const reportFileWriteBufferSize = 100 + +func NewReportFileWriter(fileName string, log zerolog.Logger, format ReportFormat) ReportWriter { + switch format { + case ReportFormatCSV: + return NewCSVReportFileWriter(fileName, log) + case ReportFormatJSONArray, ReportFormatJSONL: + return NewJSONReportFileWriter(fileName, log, format) + default: + panic(fmt.Sprintf("report format %d not supported", format)) + } +} + // ReportWriter writes data from reports type ReportWriter interface { Write(dataPoint interface{}) Close() } +// ReportNilWriter + // ReportNilWriter does nothing. Can be used as the final fallback writer type ReportNilWriter struct { } @@ -61,22 +109,23 @@ func (r ReportNilWriter) Write(_ interface{}) { func (r ReportNilWriter) Close() { } -var _ ReportWriter = &ReportFileWriter{} +// JSONReportFileWriter -type ReportFileWriter struct { +type JSONReportFileWriter struct { f *os.File fileName string wg *sync.WaitGroup writeChan chan interface{} writer *bufio.Writer log zerolog.Logger + format ReportFormat faulty bool firstWrite bool } -const reportFileWriteBufferSize = 100 +var _ ReportWriter = &JSONReportFileWriter{} -func NewReportFileWriter(fileName string, log zerolog.Logger) ReportWriter { +func NewJSONReportFileWriter(fileName string, log zerolog.Logger, format ReportFormat) ReportWriter { f, err := os.Create(fileName) if err != nil { log.Warn().Err(err).Msg("Error creating ReportFileWriter, defaulting to ReportNilWriter") @@ -85,27 +134,29 @@ func NewReportFileWriter(fileName string, log zerolog.Logger) ReportWriter { writer := bufio.NewWriter(f) - // open a json array - _, err = writer.WriteRune('[') + if format == ReportFormatJSONArray { + // Open top-level JSON array + _, err = writer.WriteRune('[') - if err != nil { - log.Warn().Err(err).Msg("Error opening json array") - // time to clean up - err = writer.Flush() if err != nil { - log.Error().Err(err).Msg("Error closing flushing writer") - panic(err) + log.Warn().Err(err).Msg("Error opening json array") + // time to clean up + err = writer.Flush() + if err != nil { + log.Error().Err(err).Msg("Error closing flushing writer") + panic(err) + } + + err = f.Close() + if err != nil { + log.Error().Err(err).Msg("Error closing report file") + panic(err) + } + return ReportNilWriter{} } - - err = f.Close() - if err != nil { - log.Error().Err(err).Msg("Error closing report file") - panic(err) - } - return ReportNilWriter{} } - fw := &ReportFileWriter{ + fw := &JSONReportFileWriter{ f: f, fileName: fileName, writer: writer, @@ -113,6 +164,7 @@ func NewReportFileWriter(fileName string, log zerolog.Logger) ReportWriter { firstWrite: true, writeChan: make(chan interface{}, reportFileWriteBufferSize), wg: &sync.WaitGroup{}, + format: format, } fw.wg.Add(1) @@ -127,11 +179,11 @@ func NewReportFileWriter(fileName string, log zerolog.Logger) ReportWriter { return fw } -func (r *ReportFileWriter) Write(dataPoint interface{}) { +func (r *JSONReportFileWriter) Write(dataPoint interface{}) { r.writeChan <- dataPoint } -func (r *ReportFileWriter) write(dataPoint interface{}) { +func (r *JSONReportFileWriter) write(dataPoint interface{}) { if r.faulty { return } @@ -141,12 +193,23 @@ func (r *ReportFileWriter) write(dataPoint interface{}) { r.faulty = true } - // delimit the json records with commas if !r.firstWrite { - _, err = r.writer.WriteRune(',') - if err != nil { - r.log.Warn().Err(err).Msg("Error Writing json to file") - r.faulty = true + switch r.format { + case ReportFormatJSONArray: + // delimit the json records with commas + _, err = r.writer.WriteRune(',') + if err != nil { + r.log.Warn().Err(err).Msg("Error writing JSON array delimiter to file") + r.faulty = true + } + + case ReportFormatJSONL: + // delimit the json records with line break + _, err = r.writer.WriteRune('\n') + if err != nil { + r.log.Warn().Err(err).Msg("Error Writing JSONL delimiter to file") + r.faulty = true + } } } else { r.firstWrite = false @@ -159,16 +222,20 @@ func (r *ReportFileWriter) write(dataPoint interface{}) { } } -func (r *ReportFileWriter) Close() { +func (r *JSONReportFileWriter) Close() { close(r.writeChan) r.wg.Wait() - _, err := r.writer.WriteRune(']') - if err != nil { - r.log.Warn().Err(err).Msg("Error finishing json array") - // nothing to do, we will be closing the file now + if r.format == ReportFormatJSONArray { + // Close top-level json array + _, err := r.writer.WriteRune(']') + if err != nil { + r.log.Warn().Err(err).Msg("Error finishing json array") + // nothing to do, we will be closing the file now + } } - err = r.writer.Flush() + + err := r.writer.Flush() if err != nil { r.log.Error().Err(err).Msg("Error closing flushing writer") panic(err) @@ -182,3 +249,83 @@ func (r *ReportFileWriter) Close() { r.log.Info().Str("filename", r.fileName).Msg("Created report file") } + +// CSVReportFileWriter + +type CSVReportFileWriter struct { + f *os.File + fileName string + wg *sync.WaitGroup + writeChan chan []string + writer *csv.Writer + log zerolog.Logger + faulty bool +} + +var _ ReportWriter = &CSVReportFileWriter{} + +func NewCSVReportFileWriter(fileName string, log zerolog.Logger) ReportWriter { + f, err := os.Create(fileName) + if err != nil { + log.Warn().Err(err).Msg("Error creating ReportFileWriter, defaulting to ReportNilWriter") + return ReportNilWriter{} + } + + writer := csv.NewWriter(f) + + fw := &CSVReportFileWriter{ + f: f, + fileName: fileName, + writer: writer, + log: log, + writeChan: make(chan []string, reportFileWriteBufferSize), + wg: &sync.WaitGroup{}, + } + + fw.wg.Add(1) + go func() { + + for d := range fw.writeChan { + fw.write(d) + } + fw.wg.Done() + }() + + return fw +} + +func (r *CSVReportFileWriter) Write(dataPoint interface{}) { + record, ok := dataPoint.([]string) + if !ok { + r.log.Warn().Msgf("cannot write %T to csv, skip this record", dataPoint) + return + } + + r.writeChan <- record +} + +func (r *CSVReportFileWriter) write(record []string) { + if r.faulty { + return + } + err := r.writer.Write(record) + if err != nil { + r.log.Warn().Err(err).Msg("error writing to csv file") + r.faulty = true + } +} + +func (r *CSVReportFileWriter) Close() { + close(r.writeChan) + r.wg.Wait() + + r.writer.Flush() + + err := r.f.Close() + if err != nil { + r.log.Error().Err(err).Msg("Error closing report file") + panic(err) + } + + r.log.Info().Str("filename", r.fileName).Msg("Created report file") +} diff --git a/cmd/util/ledger/reporters/reporter_output_test.go b/cmd/util/ledger/reporters/reporter_output_test.go index 34695fa1673..a443f770f13 100644 --- a/cmd/util/ledger/reporters/reporter_output_test.go +++ b/cmd/util/ledger/reporters/reporter_output_test.go @@ -1,6 +1,7 @@ package reporters_test import ( + "fmt" "os" "path" "sync" @@ -12,7 +13,7 @@ import ( "github.com/onflow/flow-go/cmd/util/ledger/reporters" ) -func TestReportFileWriter(t *testing.T) { +func TestReportFileWriterJSONArray(t *testing.T) { dir := t.TempDir() filename := path.Join(dir, "test.json") @@ -30,20 +31,20 @@ func TestReportFileWriter(t *testing.T) { } t.Run("Open & Close - empty json array", func(t *testing.T) { - rw := reporters.NewReportFileWriter(filename, log) + rw := reporters.NewReportFileWriter(filename, log, reporters.ReportFormatJSONArray) rw.Close() requireFileContains(t, "[]") }) t.Run("Open & Write One & Close - json array with one element", func(t *testing.T) { - rw := reporters.NewReportFileWriter(filename, log) + rw := reporters.NewReportFileWriter(filename, log, reporters.ReportFormatJSONArray) rw.Write(testData{TestField: "something"}) rw.Close() requireFileContains(t, "[{\"TestField\":\"something\"}]") }) t.Run("Open & Write Many & Close - json array with many elements", func(t *testing.T) { - rw := reporters.NewReportFileWriter(filename, log) + rw := reporters.NewReportFileWriter(filename, log, reporters.ReportFormatJSONArray) rw.Write(testData{TestField: "something0"}) rw.Write(testData{TestField: "something1"}) rw.Write(testData{TestField: "something2"}) @@ -55,7 +56,7 @@ func TestReportFileWriter(t *testing.T) { }) t.Run("Open & Write Many in threads & Close", func(t *testing.T) { - rw := reporters.NewReportFileWriter(filename, log) + rw := reporters.NewReportFileWriter(filename, log, reporters.ReportFormatJSONArray) wg := &sync.WaitGroup{} for i := 0; i < 3; i++ { @@ -74,3 +75,134 @@ func TestReportFileWriter(t *testing.T) { "[{\"TestField\":\"something\"},{\"TestField\":\"something\"},{\"TestField\":\"something\"}]") }) } + +func TestReportFileWriterJSONL(t *testing.T) { + dir := t.TempDir() + + filename := path.Join(dir, "test.jsonl") + log := zerolog.Logger{} + + requireFileContains := func(t *testing.T, expected string) { + dat, err := os.ReadFile(filename) + require.NoError(t, err) + + fmt.Printf("filename: %s\n", filename) + + require.Equal(t, []byte(expected), dat) + } + + type testData struct { + TestField string + } + + t.Run("Open & Close", func(t *testing.T) { + rw := reporters.NewReportFileWriter(filename, log, reporters.ReportFormatJSONL) + rw.Close() + + requireFileContains(t, "") + }) + + t.Run("Open & Write One & Close", func(t *testing.T) { + rw := reporters.NewReportFileWriter(filename, log, reporters.ReportFormatJSONL) + rw.Write(testData{TestField: "something"}) + rw.Close() + + requireFileContains(t, "{\"TestField\":\"something\"}") + }) + + t.Run("Open & Write Many & Close", func(t *testing.T) { + rw := reporters.NewReportFileWriter(filename, log, reporters.ReportFormatJSONL) + rw.Write(testData{TestField: "something0"}) + rw.Write(testData{TestField: "something1"}) + rw.Write(testData{TestField: "something2"}) + + rw.Close() + + requireFileContains(t, + "{\"TestField\":\"something0\"}\n{\"TestField\":\"something1\"}\n{\"TestField\":\"something2\"}") + }) + + t.Run("Open & Write Many in threads & Close", func(t *testing.T) { + rw := reporters.NewReportFileWriter(filename, log, reporters.ReportFormatJSONL) + + wg := &sync.WaitGroup{} + for i := 0; i < 3; i++ { + wg.Add(1) + go func() { + rw.Write(testData{TestField: "something"}) + wg.Done() + }() + } + + wg.Wait() + + rw.Close() + + requireFileContains(t, + "{\"TestField\":\"something\"}\n{\"TestField\":\"something\"}\n{\"TestField\":\"something\"}") + }) +} + +func TestReportFileWriterCSV(t *testing.T) { + dir := t.TempDir() + + filename := path.Join(dir, "test.csv") + log := zerolog.Logger{} + + requireFileContains := func(t *testing.T, expected string) { + dat, err := os.ReadFile(filename) + require.NoError(t, err) + require.Equal(t, []byte(expected), dat) + } + + type testData struct { + TestField string + } + + t.Run("Open & Close", func(t *testing.T) { + rw := reporters.NewReportFileWriter(filename, log, reporters.ReportFormatCSV) + rw.Close() + + requireFileContains(t, "") + }) + + t.Run("Open & Write One & Close", func(t *testing.T) { + rw := reporters.NewReportFileWriter(filename, log, reporters.ReportFormatCSV) + rw.Write([]string{"something", "or other"}) + rw.Close() + + requireFileContains(t, "something,or other\n") + }) + + t.Run("Open & Write Many & Close", func(t *testing.T) { + rw := reporters.NewReportFileWriter(filename, log, reporters.ReportFormatCSV) + rw.Write([]string{"something0"}) + rw.Write([]string{"something1"}) + rw.Write([]string{"something2"}) + + rw.Close() + + requireFileContains(t, + "something0\nsomething1\nsomething2\n") + }) + + t.Run("Open & Write Many in threads & Close", func(t *testing.T) { + rw := reporters.NewReportFileWriter(filename, log, reporters.ReportFormatCSV) + + wg := &sync.WaitGroup{} + for i := 0; i < 3; i++ { + wg.Add(1) + go func() { + rw.Write([]string{"something"}) + wg.Done() + }() + } + + wg.Wait() + + rw.Close() + + requireFileContains(t, + "something\nsomething\nsomething\n") + }) +} diff --git a/cmd/util/ledger/reporters/storage_snapshot.go b/cmd/util/ledger/reporters/storage_snapshot.go index b9ca42c1fe5..bf4698e0559 100644 --- a/cmd/util/ledger/reporters/storage_snapshot.go +++ b/cmd/util/ledger/reporters/storage_snapshot.go @@ -19,7 +19,7 @@ func NewStorageSnapshotFromPayload( } id := flow.NewRegisterID( - string(key.KeyParts[0].Value), + flow.BytesToAddress(key.KeyParts[0].Value), string(key.KeyParts[1].Value)) snapshot[id] = entry.Value() diff --git a/cmd/util/ledger/util/atree_util.go b/cmd/util/ledger/util/atree_util.go new file mode 100644 index 00000000000..838618c4fc5 --- /dev/null +++ b/cmd/util/ledger/util/atree_util.go @@ -0,0 +1,126 @@ +package util + +import ( + "fmt" + + "github.com/onflow/atree" + "github.com/onflow/cadence/interpreter" + + "github.com/onflow/cadence/common" + "github.com/onflow/cadence/runtime" + + "github.com/onflow/flow-go/cmd/util/ledger/util/registers" + "github.com/onflow/flow-go/ledger" + "github.com/onflow/flow-go/ledger/common/convert" + "github.com/onflow/flow-go/model/flow" +) + +func IsPayloadAtreeInlined(payload *ledger.Payload) (isAtreeSlab bool, isInlined bool, err error) { + registerID, registerValue, err := convert.PayloadToRegister(payload) + if err != nil { + return false, false, fmt.Errorf("failed to convert payload to register: %w", err) + } + return IsRegisterAtreeInlined(registerID.Key, registerValue) +} + +func IsRegisterAtreeInlined(key string, value []byte) (isAtreeSlab bool, isInlined bool, err error) { + if !flow.IsSlabIndexKey(key) { + return false, false, nil + } + + // Check Atree register version + + head, err := newHeadFromData(value) + if err != nil { + return false, false, err + } + + version := head.version() + if version > maxSupportedVersion { + return false, false, fmt.Errorf("atree slab version %d, max supported version %d", version, maxSupportedVersion) + } + + return true, version == inlinedVersion, nil +} + +const ( + maskVersion byte = 0b1111_0000 + + noninlinedVersion = 0 + inlinedVersion = 1 + maxSupportedVersion = inlinedVersion +) + +type head [2]byte + +func newHeadFromData(data []byte) (head, error) { + if len(data) < 2 { + return head{}, fmt.Errorf("atree slab must be at least 2 bytes, got %d bytes", len(data)) + } + + return head{data[0], data[1]}, nil +} + +func (h *head) version() byte { + return (h[0] & maskVersion) >> 4 +} + +func getSlabIDsFromRegisters(registers registers.Registers) ([]atree.SlabID, error) { + storageIDs := make([]atree.SlabID, 0, registers.Count()) + + err := registers.ForEach(func(owner string, key string, _ []byte) error { + + if !flow.IsSlabIndexKey(key) { + return nil + } + + slabID := atree.NewSlabID( + atree.Address([]byte(owner)), + atree.SlabIndex([]byte(key[1:])), + ) + + storageIDs = append(storageIDs, slabID) + + return nil + }) + if err != nil { + return nil, err + } + + return storageIDs, nil +} + +func LoadAtreeSlabsInStorage( + storage *runtime.Storage, + registers registers.Registers, + nWorkers int, +) error { + + storageIDs, err := getSlabIDsFromRegisters(registers) + if err != nil { + return err + } + + return storage.PersistentSlabStorage.BatchPreload(storageIDs, nWorkers) +} + +func CheckStorageHealth( + interpreter *interpreter.Interpreter, + address common.Address, + storage *runtime.Storage, + registers registers.Registers, + domains []common.StorageDomain, + nWorkers int, +) error { + + err := LoadAtreeSlabsInStorage(storage, registers, nWorkers) + if err != nil { + return err + } + + for _, domain := range domains { + _ = storage.GetDomainStorageMap(interpreter, address, domain, false) + } + + return storage.CheckHealth() +} diff --git a/cmd/util/ledger/util/migration_runtime_interface.go b/cmd/util/ledger/util/migration_runtime_interface.go new file mode 100644 index 00000000000..8d5fbbee21c --- /dev/null +++ b/cmd/util/ledger/util/migration_runtime_interface.go @@ -0,0 +1,132 @@ +package util + +import ( + "errors" + "fmt" + + "github.com/onflow/cadence/ast" + "github.com/onflow/cadence/common" + "github.com/onflow/cadence/runtime" + + "github.com/onflow/flow-go/fvm/environment" + "github.com/onflow/flow-go/fvm/storage/derived" + "github.com/onflow/flow-go/fvm/storage/state" + "github.com/onflow/flow-go/model/flow" +) + +type GetContractCodeFunc func(location common.AddressLocation) ([]byte, error) + +type GetContractNamesFunc func(address flow.Address) ([]string, error) + +type GetOrLoadProgramFunc func( + location runtime.Location, + load func() (*runtime.Program, error), +) ( + *runtime.Program, + error, +) + +type GerOrLoadProgramListenerFunc func( + location runtime.Location, + program *runtime.Program, + err error, +) + +// MigrationRuntimeInterface is a runtime interface that can be used in migrations. +// It only allows parsing and checking of contracts. +type MigrationRuntimeInterface struct { + runtime.EmptyRuntimeInterface + chainID flow.ChainID + CryptoContractAddress common.Address + GetContractCodeFunc GetContractCodeFunc + GetContractNamesFunc GetContractNamesFunc + GetOrLoadProgramFunc GetOrLoadProgramFunc + GetOrLoadProgramListenerFunc GerOrLoadProgramListenerFunc +} + +var _ runtime.Interface = &MigrationRuntimeInterface{} + +func NewMigrationRuntimeInterface( + chainID flow.ChainID, + cryptoContractAddress common.Address, + getCodeFunc GetContractCodeFunc, + getContractNamesFunc GetContractNamesFunc, + getOrLoadProgramFunc GetOrLoadProgramFunc, + getOrLoadProgramListenerFunc GerOrLoadProgramListenerFunc, +) *MigrationRuntimeInterface { + return &MigrationRuntimeInterface{ + chainID: chainID, + CryptoContractAddress: cryptoContractAddress, + GetContractCodeFunc: getCodeFunc, + GetContractNamesFunc: getContractNamesFunc, + GetOrLoadProgramFunc: getOrLoadProgramFunc, + GetOrLoadProgramListenerFunc: getOrLoadProgramListenerFunc, + } +} + +func (m *MigrationRuntimeInterface) ResolveLocation( + identifiers []runtime.Identifier, + location runtime.Location, +) ([]runtime.ResolvedLocation, error) { + + return environment.ResolveLocation( + identifiers, + location, + m.GetContractNamesFunc, + m.CryptoContractAddress, + ) +} + +func (m *MigrationRuntimeInterface) GetCode(location runtime.Location) ([]byte, error) { + contractLocation, ok := location.(common.AddressLocation) + if !ok { + return nil, fmt.Errorf("GetCode failed: expected AddressLocation, got %T", location) + } + + return m.GetAccountContractCode(contractLocation) +} + +func (m *MigrationRuntimeInterface) GetAccountContractCode( + location common.AddressLocation, +) (code []byte, err error) { + getContractCode := m.GetContractCodeFunc + if getContractCode == nil { + return nil, fmt.Errorf("GetCodeFunc missing") + } + + return getContractCode(location) +} + +func (m *MigrationRuntimeInterface) GetOrLoadProgram( + location runtime.Location, + load func() (*runtime.Program, error), +) ( + program *runtime.Program, + err error, +) { + getOrLoadProgram := m.GetOrLoadProgramFunc + if getOrLoadProgram == nil { + return nil, errors.New("GetOrLoadProgramFunc missing") + } + + listener := m.GetOrLoadProgramListenerFunc + if listener != nil { + defer func() { + listener(location, program, err) + }() + } + + return getOrLoadProgram(location, load) +} + +func (m *MigrationRuntimeInterface) RecoverProgram( + program *ast.Program, + location common.Location, +) ([]byte, error) { + return environment.RecoverProgram(m.chainID, program, location) +} + +type migrationTransactionPreparer struct { + state.NestedTransactionPreparer + derived.DerivedTransactionPreparer +} diff --git a/cmd/util/ledger/util/nop_meter.go b/cmd/util/ledger/util/nop_meter.go new file mode 100644 index 00000000000..133466c112b --- /dev/null +++ b/cmd/util/ledger/util/nop_meter.go @@ -0,0 +1,51 @@ +package util + +import ( + "github.com/onflow/cadence/common" + + "github.com/onflow/flow-go/fvm/environment" + "github.com/onflow/flow-go/fvm/meter" +) + +// NopMeter is a meter that does nothing. It can be used in migrations. +type NopMeter struct{} + +func (n NopMeter) RunWithMeteringDisabled(f func()) {} + +func (n NopMeter) ComputationAvailable(_ common.ComputationUsage) bool { + return false +} + +func (n NopMeter) MeterComputation(_ common.ComputationUsage) error { + return nil +} + +func (n NopMeter) ComputationUsed() (uint64, error) { + return 0, nil +} + +func (n NopMeter) ComputationIntensities() meter.MeteredComputationIntensities { + return meter.MeteredComputationIntensities{} +} + +func (n NopMeter) MeterMemory(_ common.MemoryUsage) error { + return nil +} + +func (n NopMeter) MemoryUsed() (uint64, error) { + return 0, nil +} + +func (n NopMeter) MeterEmittedEvent(_ uint64) error { + return nil +} + +func (n NopMeter) TotalEmittedEventBytes() uint64 { + return 0 +} + +func (n NopMeter) InteractionUsed() (uint64, error) { + return 0, nil +} + +var _ environment.Meter = NopMeter{} diff --git a/cmd/util/ledger/util/payload_file.go b/cmd/util/ledger/util/payload_file.go new file mode 100644 index 00000000000..4b419d19736 --- /dev/null +++ b/cmd/util/ledger/util/payload_file.go @@ -0,0 +1,430 @@ +package util + +import ( + "bufio" + "encoding/binary" + "fmt" + "io" + "math" + "os" + + "github.com/fxamacker/cbor/v2" + "github.com/rs/zerolog" + + "github.com/onflow/flow-go/ledger" + "github.com/onflow/flow-go/ledger/complete/wal" +) + +const ( + defaultBufioWriteSize = 1024 * 32 + defaultBufioReadSize = 1024 * 32 + + payloadEncodingVersion = 1 +) + +const ( + PayloadFileVersionV1 uint16 = 0x01 + + encMagicBytesIndex = 0 + encMagicBytesSize = 2 + encVersionIndex = 2 + encVersionSize = 2 + encFlagIndex = 4 + encFlagLowByteIndex = 5 + encPartialStateFlagIndex = encFlagLowByteIndex + encFlagSize = 2 + headerSize = encMagicBytesSize + encVersionSize + encFlagSize + encPayloadCountSize = 8 + footerSize = encPayloadCountSize + crc32SumSize = 4 +) + +const ( + maskPartialState byte = 0b0000_0001 +) + +// newPayloadFileHeader() returns payload header, consisting of: +// - magic bytes (2 bytes) +// - version (2 bytes) +// - flags (2 bytes) +func newPayloadFileHeader(version uint16, partialState bool) []byte { + var header [headerSize]byte + + // Write magic bytes. + binary.BigEndian.PutUint16(header[encMagicBytesIndex:], wal.MagicBytesPayloadHeader) + + // Write version. + binary.BigEndian.PutUint16(header[encVersionIndex:], version) + + // Write flag. + if partialState { + header[encPartialStateFlagIndex] |= maskPartialState + } + + return header[:] +} + +// parsePayloadFileHeader verifies magic bytes and version in payload header. +func parsePayloadFileHeader(header []byte) (partialState bool, err error) { + if len(header) != headerSize { + return false, fmt.Errorf("can't parse payload header: got %d bytes, expected %d bytes", len(header), headerSize) + } + + // Read magic bytes. + gotMagicBytes := binary.BigEndian.Uint16(header[encMagicBytesIndex:]) + if gotMagicBytes != wal.MagicBytesPayloadHeader { + return false, fmt.Errorf("can't parse payload header: got magic bytes %d, expected %d", gotMagicBytes, wal.MagicBytesPayloadHeader) + } + + // Read version. + gotVersion := binary.BigEndian.Uint16(header[encVersionIndex:]) + if gotVersion != PayloadFileVersionV1 { + return false, fmt.Errorf("can't parse payload header: got version %d, expected %d", gotVersion, PayloadFileVersionV1) + } + + // Read partial state flag. + partialState = header[encPartialStateFlagIndex]&maskPartialState != 0 + + return partialState, nil +} + +// newPayloadFileFooter returns payload footer. +// - payload count (8 bytes) +func newPayloadFileFooter(payloadCount int) []byte { + var footer [footerSize]byte + + binary.BigEndian.PutUint64(footer[:], uint64(payloadCount)) + + return footer[:] +} + +// parsePayloadFooter returns payload count from footer. +func parsePayloadFooter(footer []byte) (payloadCount int, err error) { + if len(footer) != footerSize { + return 0, fmt.Errorf("can't parse payload footer: got %d bytes, expected %d bytes", len(footer), footerSize) + } + + count := binary.BigEndian.Uint64(footer) + if count > math.MaxInt { + return 0, fmt.Errorf("can't parse payload footer: got %d payload count, expected payload count < %d", count, math.MaxInt) + } + + return int(count), nil +} + +func CreatePayloadFile( + logger zerolog.Logger, + payloadFile string, + payloads []*ledger.Payload, + owners map[string]struct{}, + inputPayloadsFromPartialState bool, +) (int, error) { + + partialState := inputPayloadsFromPartialState || len(owners) > 0 + + f, err := os.Create(payloadFile) + if err != nil { + return 0, fmt.Errorf("can't create %s: %w", payloadFile, err) + } + defer f.Close() + + writer := bufio.NewWriterSize(f, defaultBufioWriteSize) + defer writer.Flush() + + // TODO: replace CRC-32 checksum. + // For now, CRC-32 checksum is used because checkpoint files (~21GB input files) already uses CRC-32 checksum. + // Additionally, the primary purpose of this intermediate payload file (since Feb 12, 2024) is to speed up + // development, testing, and troubleshooting by allowing a small subset of payloads to be extracted. + // However, we should replace it since it is inappropriate for large files as already suggested at: + // - September 28, 2022: https://github.com/onflow/flow-go/issues/3302 + // - September 26, 2022 (asked if SHA2 should replace CRC32) https://github.com/onflow/flow-go/pull/3273#discussion_r980433612 + // - February 24, 2022 (TODO suggested BLAKE2, etc. to replace CRC32): https://github.com/onflow/flow-go/pull/1944 + crc32Writer := wal.NewCRC32Writer(writer) + + // Write header with magic bytes, version, and flags. + header := newPayloadFileHeader(PayloadFileVersionV1, partialState) + + _, err = crc32Writer.Write(header) + if err != nil { + return 0, fmt.Errorf("can't write payload file head for %s: %w", payloadFile, err) + } + + includeAllPayloads := len(owners) == 0 + + // Write payloads. + var writtenPayloadCount int + if includeAllPayloads { + writtenPayloadCount, err = writePayloads(logger, crc32Writer, payloads) + } else { + writtenPayloadCount, err = writeSelectedPayloads(logger, crc32Writer, payloads, owners) + } + + if err != nil { + return 0, fmt.Errorf("can't write payload for %s: %w", payloadFile, err) + } + + // Write footer with payload count. + footer := newPayloadFileFooter(writtenPayloadCount) + + _, err = crc32Writer.Write(footer) + if err != nil { + return 0, fmt.Errorf("can't write payload footer for %s: %w", payloadFile, err) + } + + // Write CRC32 sum for validation + var crc32buf [crc32SumSize]byte + binary.BigEndian.PutUint32(crc32buf[:], crc32Writer.Crc32()) + + _, err = writer.Write(crc32buf[:]) + if err != nil { + return 0, fmt.Errorf("can't write CRC32 for %s: %w", payloadFile, err) + } + + return writtenPayloadCount, nil +} + +func writePayloads(logger zerolog.Logger, w io.Writer, payloads []*ledger.Payload) (int, error) { + logger.Info().Msgf("writing %d payloads to file", len(payloads)) + + enc := cbor.NewEncoder(w) + + var payloadScratchBuffer [1024 * 2]byte + for _, p := range payloads { + + buf := ledger.EncodeAndAppendPayloadWithoutPrefix(payloadScratchBuffer[:0], p, payloadEncodingVersion) + + // Encode payload + err := enc.Encode(buf) + if err != nil { + return 0, err + } + } + + return len(payloads), nil +} + +func writeSelectedPayloads( + logger zerolog.Logger, + w io.Writer, + payloads []*ledger.Payload, + owners map[string]struct{}, +) (int, error) { + logger.Info().Msgf("filtering %d payloads and writing selected payloads to file", len(payloads)) + + enc := cbor.NewEncoder(w) + + var includedPayloadCount int + var payloadScratchBuffer [1024 * 2]byte + for _, p := range payloads { + include, err := includePayloadByOwners(p, owners) + if err != nil { + return 0, err + } + if !include { + continue + } + + buf := ledger.EncodeAndAppendPayloadWithoutPrefix(payloadScratchBuffer[:0], p, payloadEncodingVersion) + + // Encode payload + err = enc.Encode(buf) + if err != nil { + return 0, err + } + + includedPayloadCount++ + } + + return includedPayloadCount, nil +} + +func includePayloadByOwners(payload *ledger.Payload, owners map[string]struct{}) (bool, error) { + if len(owners) == 0 { + // Include all payloads + return true, nil + } + + k, err := payload.Key() + if err != nil { + return false, fmt.Errorf("can't get key from payload: %w", err) + } + + owner := string(k.KeyParts[0].Value) + + // Always include payloads for global registers, + // i.e. with empty owner + if owner == "" { + return true, nil + } + + _, ok := owners[owner] + return ok, nil +} + +func ReadPayloadFile(logger zerolog.Logger, payloadFile string) (bool, []*ledger.Payload, error) { + + fInfo, err := os.Stat(payloadFile) + if os.IsNotExist(err) { + return false, nil, fmt.Errorf("%s doesn't exist", payloadFile) + } + + fsize := fInfo.Size() + + f, err := os.Open(payloadFile) + if err != nil { + return false, nil, fmt.Errorf("can't open %s: %w", payloadFile, err) + } + defer f.Close() + + partialState, payloadCount, err := readMetaDataFromPayloadFile(f) + if err != nil { + return false, nil, err + } + + bufReader := bufio.NewReaderSize(f, defaultBufioReadSize) + + crcReader := wal.NewCRC32Reader(bufReader) + + // Skip header (processed already) + _, err = io.CopyN(io.Discard, crcReader, headerSize) + if err != nil { + return false, nil, fmt.Errorf("can't read and discard header: %w", err) + } + + if partialState { + logger.Info().Msgf("reading %d payloads (partial state) from file", payloadCount) + } else { + logger.Info().Msgf("reading %d payloads from file", payloadCount) + } + + encPayloadSize := fsize - headerSize - footerSize - crc32SumSize + + // NOTE: We need to limit the amount of data CBOR codec reads + // because CBOR codec reads chunks of data under the hood for + // performance and we don't want crcReader to proces data + // containing CRC-32 checksum. + dec := cbor.NewDecoder(io.LimitReader(crcReader, encPayloadSize)) + + payloads := make([]*ledger.Payload, payloadCount) + + for i := 0; i < payloadCount; i++ { + var rawPayload []byte + err := dec.Decode(&rawPayload) + if err != nil { + return false, nil, fmt.Errorf("can't decode payload in CBOR: %w", err) + } + + payload, err := ledger.DecodePayloadWithoutPrefix(rawPayload, true, payloadEncodingVersion) + if err != nil { + return false, nil, fmt.Errorf("can't decode payload 0x%x: %w", rawPayload, err) + } + + payloads[i] = payload + } + + // Skip footer (processed already) + _, err = io.CopyN(io.Discard, crcReader, footerSize) + if err != nil { + return false, nil, fmt.Errorf("can't read and discard footer: %w", err) + } + + // Read CRC32 + var crc32buf [crc32SumSize]byte + _, err = io.ReadFull(bufReader, crc32buf[:]) + if err != nil { + return false, nil, fmt.Errorf("can't read CRC32: %w", err) + } + + readCrc32 := binary.BigEndian.Uint32(crc32buf[:]) + + calculatedCrc32 := crcReader.Crc32() + + if calculatedCrc32 != readCrc32 { + return false, nil, fmt.Errorf("payload file checksum failed! File contains %x but calculated crc32 is %x", readCrc32, calculatedCrc32) + } + + // Verify EOF is reached + _, err = io.CopyN(io.Discard, bufReader, 1) + if err == nil || err != io.EOF { + return false, nil, fmt.Errorf("can't process payload file: found trailing data") + } + + return partialState, payloads, nil +} + +// readMetaDataFromPayloadFile reads metadata from header and footer. +// NOTE: readMetaDataFromPayloadFile resets file offset to start of file in exit. +func readMetaDataFromPayloadFile(f *os.File) (partialState bool, payloadCount int, err error) { + defer func() { + _, seekErr := f.Seek(0, io.SeekStart) + if err == nil { + err = seekErr + } + }() + + // Seek to header + _, err = f.Seek(0, io.SeekStart) + if err != nil { + return false, 0, fmt.Errorf("can't seek to start of payload file: %w", err) + } + + var header [headerSize]byte + + // Read header + _, err = io.ReadFull(f, header[:]) + if err != nil { + return false, 0, fmt.Errorf("can't read payload header: %w", err) + } + + // Parse header + partialState, err = parsePayloadFileHeader(header[:]) + if err != nil { + return false, 0, err + } + + const footerOffset = footerSize + crc32SumSize + + // Seek to footer + _, err = f.Seek(-footerOffset, io.SeekEnd) + if err != nil { + return false, 0, fmt.Errorf("can't seek to payload footer: %w", err) + } + + var footer [footerSize]byte + + // Read footer + _, err = io.ReadFull(f, footer[:]) + if err != nil { + return false, 0, fmt.Errorf("can't read payload footer: %w", err) + } + + // Parse footer + payloadCount, err = parsePayloadFooter(footer[:]) + if err != nil { + return false, 0, err + } + + return partialState, payloadCount, nil +} + +func IsPayloadFilePartialState(payloadFile string) (bool, error) { + if _, err := os.Stat(payloadFile); os.IsNotExist(err) { + return false, fmt.Errorf("%s doesn't exist", payloadFile) + } + + f, err := os.Open(payloadFile) + if err != nil { + return false, fmt.Errorf("can't open %s: %w", payloadFile, err) + } + defer f.Close() + + var header [headerSize]byte + + // Read header + _, err = io.ReadFull(f, header[:]) + if err != nil { + return false, fmt.Errorf("can't read payload header: %w", err) + } + + return header[encPartialStateFlagIndex]&maskPartialState != 0, nil +} diff --git a/cmd/util/ledger/util/payload_file_test.go b/cmd/util/ledger/util/payload_file_test.go new file mode 100644 index 00000000000..4040c8707e9 --- /dev/null +++ b/cmd/util/ledger/util/payload_file_test.go @@ -0,0 +1,375 @@ +package util_test + +import ( + "bytes" + "crypto/rand" + "path/filepath" + "testing" + + "github.com/rs/zerolog" + "github.com/stretchr/testify/require" + + "github.com/onflow/cadence/common" + + "github.com/onflow/flow-go/cmd/util/ledger/util" + "github.com/onflow/flow-go/ledger" + "github.com/onflow/flow-go/utils/unittest" +) + +type keyPair struct { + key ledger.Key + value ledger.Value +} + +func TestPayloadFile(t *testing.T) { + + const fileName = "root.payload" + + t.Run("without filter, input payloads represent partial state", func(t *testing.T) { + unittest.RunWithTempDir(t, func(datadir string) { + size := 10 + + payloadFileName := filepath.Join(datadir, fileName) + + // Generate some data + keysValues := make(map[string]keyPair) + var payloads []*ledger.Payload + + for i := 0; i < size; i++ { + keys, values := getSampleKeyValues(i) + + for j, key := range keys { + keysValues[key.String()] = keyPair{ + key: key, + value: values[j], + } + + payloads = append(payloads, ledger.NewPayload(key, values[j])) + } + } + + numOfPayloadWritten, err := util.CreatePayloadFile( + zerolog.Nop(), + payloadFileName, + payloads, + nil, + true, // input payloads represent partial state + ) + require.NoError(t, err) + require.Equal(t, len(payloads), numOfPayloadWritten) + + partialState, err := util.IsPayloadFilePartialState(payloadFileName) + require.NoError(t, err) + require.True(t, partialState) + + partialState, payloadsFromFile, err := util.ReadPayloadFile(zerolog.Nop(), payloadFileName) + require.NoError(t, err) + require.Equal(t, len(payloads), len(payloadsFromFile)) + require.True(t, partialState) + + for _, payloadFromFile := range payloadsFromFile { + k, err := payloadFromFile.Key() + require.NoError(t, err) + + kv, exist := keysValues[k.String()] + require.True(t, exist) + + require.Equal(t, kv.value, payloadFromFile.Value()) + } + }) + }) + t.Run("without filter", func(t *testing.T) { + unittest.RunWithTempDir(t, func(datadir string) { + size := 10 + + payloadFileName := filepath.Join(datadir, fileName) + + // Generate some data + keysValues := make(map[string]keyPair) + var payloads []*ledger.Payload + + for i := 0; i < size; i++ { + keys, values := getSampleKeyValues(i) + + for j, key := range keys { + keysValues[key.String()] = keyPair{ + key: key, + value: values[j], + } + + payloads = append(payloads, ledger.NewPayload(key, values[j])) + } + } + + numOfPayloadWritten, err := util.CreatePayloadFile( + zerolog.Nop(), + payloadFileName, + payloads, + nil, + false, // input payloads represent entire state + ) + require.NoError(t, err) + require.Equal(t, len(payloads), numOfPayloadWritten) + + partialState, err := util.IsPayloadFilePartialState(payloadFileName) + require.NoError(t, err) + require.False(t, partialState) + + partialState, payloadsFromFile, err := util.ReadPayloadFile(zerolog.Nop(), payloadFileName) + require.NoError(t, err) + require.Equal(t, len(payloads), len(payloadsFromFile)) + require.False(t, partialState) + + for _, payloadFromFile := range payloadsFromFile { + k, err := payloadFromFile.Key() + require.NoError(t, err) + + kv, exist := keysValues[k.String()] + require.True(t, exist) + + require.Equal(t, kv.value, payloadFromFile.Value()) + } + }) + }) + + t.Run("with filter", func(t *testing.T) { + unittest.RunWithTempDir(t, func(datadir string) { + size := 10 + + payloadFileName := filepath.Join(datadir, fileName) + + // Generate some data + keysValues := make(map[string]keyPair) + var payloads []*ledger.Payload + + var globalRegisterCount int + for i := 0; i < size; i++ { + keys, values := getSampleKeyValues(i) + + for j, key := range keys { + + if len(key.KeyParts[0].Value) == 0 { + globalRegisterCount++ + } + + keysValues[key.String()] = keyPair{ + key: key, + value: values[j], + } + + payloads = append(payloads, ledger.NewPayload(key, values[j])) + } + } + + const selectedAddressCount = 10 + selectedAddresses := make(map[common.Address]struct{}) + selectedKeysValues := make(map[string]keyPair) + for k, kv := range keysValues { + owner := kv.key.KeyParts[0].Value + if len(owner) != common.AddressLength { + continue + } + + address, err := common.BytesToAddress(owner) + require.NoError(t, err) + + if len(selectedAddresses) < selectedAddressCount { + selectedAddresses[address] = struct{}{} + } + + if _, exist := selectedAddresses[address]; exist { + selectedKeysValues[k] = kv + } + } + + addresses := make(map[string]struct{}, len(selectedAddresses)) + for address := range selectedAddresses { + addresses[string(address[:])] = struct{}{} + } + + numOfPayloadWritten, err := util.CreatePayloadFile( + zerolog.Nop(), + payloadFileName, + payloads, + addresses, + false, // input payloads represent entire state + ) + require.NoError(t, err) + require.Equal( + t, + len(selectedKeysValues)+globalRegisterCount, + numOfPayloadWritten, + ) + + partialState, err := util.IsPayloadFilePartialState(payloadFileName) + require.NoError(t, err) + require.True(t, partialState) + + partialState, payloadsFromFile, err := util.ReadPayloadFile(zerolog.Nop(), payloadFileName) + require.NoError(t, err) + require.True(t, partialState) + + nonGlobalPayloads := make([]*ledger.Payload, 0, len(selectedKeysValues)) + for _, payloadFromFile := range payloadsFromFile { + key, err := payloadFromFile.Key() + require.NoError(t, err) + + owner := key.KeyParts[0].Value + if len(owner) > 0 { + nonGlobalPayloads = append(nonGlobalPayloads, payloadFromFile) + } + } + + require.Equal(t, len(selectedKeysValues), len(nonGlobalPayloads)) + + for _, payloadFromFile := range nonGlobalPayloads { + k, err := payloadFromFile.Key() + require.NoError(t, err) + + kv, exist := selectedKeysValues[k.String()] + require.True(t, exist) + + require.Equal(t, kv.value, payloadFromFile.Value()) + } + }) + }) + + t.Run("no payloads found with filter", func(t *testing.T) { + + emptyAddress := common.Address{} + + unittest.RunWithTempDir(t, func(datadir string) { + size := 10 + + payloadFileName := filepath.Join(datadir, fileName) + + // Generate some data + keysValues := make(map[string]keyPair) + var payloads []*ledger.Payload + + var globalRegisterCount int + + for i := 0; i < size; i++ { + keys, values := getSampleKeyValues(i) + + for j, key := range keys { + if len(key.KeyParts[0].Value) == 0 { + globalRegisterCount++ + } + + if bytes.Equal(key.KeyParts[0].Value, emptyAddress[:]) { + continue + } + + keysValues[key.String()] = keyPair{ + key: key, + value: values[j], + } + + payloads = append(payloads, ledger.NewPayload(key, values[j])) + } + } + + numOfPayloadWritten, err := util.CreatePayloadFile( + zerolog.Nop(), + payloadFileName, + payloads, + map[string]struct{}{ + string(emptyAddress[:]): {}, + }, + false, + ) + require.NoError(t, err) + require.Equal(t, globalRegisterCount, numOfPayloadWritten) + + partialState, err := util.IsPayloadFilePartialState(payloadFileName) + require.NoError(t, err) + require.True(t, partialState) + + partialState, payloadsFromFile, err := util.ReadPayloadFile(zerolog.Nop(), payloadFileName) + require.NoError(t, err) + require.Equal(t, globalRegisterCount, len(payloadsFromFile)) + require.True(t, partialState) + }) + }) +} + +func getSampleKeyValues(i int) ([]ledger.Key, []ledger.Value) { + switch i { + case 0: + return []ledger.Key{getKey("", "uuid"), getKey("", "account_address_state")}, + []ledger.Value{[]byte{'1'}, []byte{'A'}} + case 1: + return []ledger.Key{getKey("ADDRESS", "public_key_count"), + getKey("ADDRESS", "apk_0"), + getKey("ADDRESS", "exists"), + getKey("ADDRESS", "storage_used")}, + []ledger.Value{[]byte{1}, []byte("PUBLICKEYXYZ"), []byte{1}, []byte{100}} + case 2: + // TODO change the contract_names to CBOR encoding + return []ledger.Key{getKey("ADDRESS", "contract_names"), getKey("ADDRESS", "code.mycontract")}, + []ledger.Value{[]byte("mycontract"), []byte("CONTRACT Content")} + default: + keys := make([]ledger.Key, 0) + values := make([]ledger.Value, 0) + for j := 0; j < 10; j++ { + // address := make([]byte, 32) + address := make([]byte, 8) + _, err := rand.Read(address) + if err != nil { + panic(err) + } + keys = append(keys, getKey(string(address), "test")) + values = append(values, getRandomCadenceValue()) + } + return keys, values + } +} + +func getKey(owner, key string) ledger.Key { + return ledger.Key{KeyParts: []ledger.KeyPart{ + {Type: uint16(0), Value: []byte(owner)}, + {Type: uint16(2), Value: []byte(key)}, + }, + } +} + +func getRandomCadenceValue() ledger.Value { + + randomPart := make([]byte, 10) + _, err := rand.Read(randomPart) + if err != nil { + panic(err) + } + valueBytes := []byte{ + // magic prefix + 0x0, 0xca, 0xde, 0x0, 0x4, + // tag + 0xd8, 132, + // array, 5 items follow + 0x85, + + // tag + 0xd8, 193, + // UTF-8 string, length 4 + 0x64, + // t, e, s, t + 0x74, 0x65, 0x73, 0x74, + + // nil + 0xf6, + + // positive integer 1 + 0x1, + + // array, 0 items follow + 0x80, + + // UTF-8 string, length 10 + 0x6a, + 0x54, 0x65, 0x73, 0x74, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, + } + + valueBytes = append(valueBytes, randomPart...) + return ledger.Value(valueBytes) +} diff --git a/cmd/util/ledger/util/payload_grouping.go b/cmd/util/ledger/util/payload_grouping.go new file mode 100644 index 00000000000..cd46cbd9f8f --- /dev/null +++ b/cmd/util/ledger/util/payload_grouping.go @@ -0,0 +1,252 @@ +package util + +import ( + "bytes" + "fmt" + "sort" + "sync" + "time" + + "github.com/rs/zerolog" + + "github.com/onflow/flow-go/ledger" + "github.com/onflow/flow-go/model/flow" +) + +// minSizeForSplitSortingIntoGoroutines below this size, no need to split +// the sorting into goroutines +const minSizeForSplitSortingIntoGoroutines = 100_000 + +const estimatedNumOfAccount = 30_000_000 + +// PayloadAccountGroup is a grouping of payloads by account +type PayloadAccountGroup struct { + Address flow.Address + Payloads []*ledger.Payload +} + +// PayloadAccountGrouping is a grouping of payloads by account. +type PayloadAccountGrouping struct { + payloads sortablePayloads + indexes []int + + current int +} + +// Next returns the next account group. If there is no more account group, it returns nil. +// The zero address is used for global Payloads and is not an actual account. +func (g *PayloadAccountGrouping) Next() (*PayloadAccountGroup, error) { + if g.current == len(g.indexes) { + // reached the end + return nil, nil + } + + accountStartIndex := g.indexes[g.current] + accountEndIndex := len(g.payloads) + if g.current != len(g.indexes)-1 { + accountEndIndex = g.indexes[g.current+1] + } + g.current++ + + address, err := g.payloads[accountStartIndex].Address() + if err != nil { + return nil, fmt.Errorf("failed to get address from payload: %w", err) + } + + return &PayloadAccountGroup{ + Address: address, + Payloads: g.payloads[accountStartIndex:accountEndIndex], + }, nil +} + +// Len returns the number of accounts +func (g *PayloadAccountGrouping) Len() int { + return len(g.indexes) +} + +// AllPayloadsCount the number of payloads +func (g *PayloadAccountGrouping) AllPayloadsCount() int { + return len(g.payloads) +} + +// GroupPayloadsByAccount takes a list of payloads and groups them by account. +// it uses nWorkers to sort the payloads by address and find the start and end indexes of +// each account. +func GroupPayloadsByAccount( + log zerolog.Logger, + payloads []*ledger.Payload, + nWorkers int, +) *PayloadAccountGrouping { + if len(payloads) == 0 { + return &PayloadAccountGrouping{} + } + p := sortablePayloads(payloads) + + start := time.Now() + log.Info(). + Int("payloads", len(payloads)). + Int("workers", nWorkers). + Msg("Sorting payloads by address") + + // sort the payloads by address + sortPayloads(0, len(p), p, make(sortablePayloads, len(p)), nWorkers) + end := time.Now() + + log.Info(). + Int("payloads", len(payloads)). + Str("duration", end.Sub(start).Round(1*time.Second).String()). + Msg("Sorted. Finding account boundaries in sorted payloads") + + start = time.Now() + // find the indexes of the payloads that start a new account + indexes := make([]int, 0, estimatedNumOfAccount) + for i := 0; i < len(p); { + indexes = append(indexes, i) + i = p.FindNextKeyIndexUntil(i, len(p)) + } + end = time.Now() + + log.Info(). + Int("accounts", len(indexes)). + Str("duration", end.Sub(start).Round(1*time.Second).String()). + Msg("Done grouping payloads by account") + + return &PayloadAccountGrouping{ + payloads: p, + indexes: indexes, + } +} + +type sortablePayloads []*ledger.Payload + +func (s sortablePayloads) Len() int { + return len(s) +} + +func (s sortablePayloads) Less(i, j int) bool { + return s.Compare(i, j) < 0 +} + +func (s sortablePayloads) Compare(i, j int) int { + a, err := s[i].Address() + if err != nil { + panic(err) + } + + b, err := s[j].Address() + if err != nil { + panic(err) + } + + return bytes.Compare(a[:], b[:]) +} + +func (s sortablePayloads) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} + +func (s sortablePayloads) FindNextKeyIndexUntil(i int, upperBound int) int { + low := i + step := 1 + for low+step < upperBound && s.Compare(low+step, i) == 0 { + low += step + step *= 2 + } + + high := low + step + if high > upperBound { + high = upperBound + } + + for low < high { + mid := (low + high) / 2 + if s.Compare(mid, i) == 0 { + low = mid + 1 + } else { + high = mid + } + } + + return low +} + +// sortPayloads sorts the payloads in the range [i, j) using goroutines and merges +// the results using the intermediate buffer. The goroutine allowance is the number +// of goroutines that can be used for sorting. If the allowance is less than 2, +// the payloads are sorted using the built-in sort. +// The buffer must be of the same length as the source and can be disposed after. +func sortPayloads(i, j int, source, buffer sortablePayloads, goroutineAllowance int) { + // if the length is less than 2, no need to sort + if j-i <= 1 { + return + } + + // if we are out of goroutine allowance, sort with built-in sort + // if the length is less than minSizeForSplit, sort with built-in sort + if goroutineAllowance < 2 || j-i < minSizeForSplitSortingIntoGoroutines { + sort.Sort(source[i:j]) + return + } + + goroutineAllowance -= 2 + allowance1 := goroutineAllowance / 2 + allowance2 := goroutineAllowance - allowance1 + mid := (i + j) / 2 + + wg := sync.WaitGroup{} + wg.Add(2) + go func() { + sortPayloads(i, mid, source, buffer, allowance1) + wg.Done() + }() + go func() { + sortPayloads(mid, j, source, buffer, allowance2) + wg.Done() + }() + wg.Wait() + + mergeInto(source, buffer, i, mid, j) +} + +func mergeInto(source, buffer sortablePayloads, i int, mid int, j int) { + left := i + right := mid + k := i + for left < mid && right < j { + // More elements in the both partitions to process. + if source.Compare(left, right) <= 0 { + // Move left partition elements with the same address to buffer. + nextLeft := source.FindNextKeyIndexUntil(left, mid) + n := copy(buffer[k:], source[left:nextLeft]) + left = nextLeft + k += n + } else { + // Move right partition elements with the same address to buffer. + nextRight := source.FindNextKeyIndexUntil(right, j) + n := copy(buffer[k:], source[right:nextRight]) + right = nextRight + k += n + } + } + // At this point: + // - one partition is exhausted. + // - remaining elements in the other partition (already sorted) can be copied over. + if left < mid { + // Copy remaining elements in the left partition. + copy(buffer[k:], source[left:mid]) + } else { + // Copy remaining elements in the right partition. + copy(buffer[k:], source[right:j]) + } + // Copy merged buffer back to source. + copy(source[i:j], buffer[i:j]) +} + +func SortPayloadsByAddress(payloads []*ledger.Payload, nWorkers int) []*ledger.Payload { + p := sortablePayloads(payloads) + + // Sort the payloads by address + sortPayloads(0, len(p), p, make(sortablePayloads, len(p)), nWorkers) + + return p +} diff --git a/cmd/util/ledger/util/payload_grouping_test.go b/cmd/util/ledger/util/payload_grouping_test.go new file mode 100644 index 00000000000..13cd80af816 --- /dev/null +++ b/cmd/util/ledger/util/payload_grouping_test.go @@ -0,0 +1,197 @@ +package util_test + +import ( + "crypto/rand" + "encoding/hex" + rand2 "math/rand" + "runtime" + "testing" + + "github.com/rs/zerolog" + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/cmd/util/ledger/util" + "github.com/onflow/flow-go/ledger" + "github.com/onflow/flow-go/ledger/common/convert" + "github.com/onflow/flow-go/model/flow" +) + +func TestGroupPayloadsByAccount(t *testing.T) { + log := zerolog.New(zerolog.NewTestWriter(t)) + payloads := generateRandomPayloads(1000000) + tmp := make([]*ledger.Payload, len(payloads)) + copy(tmp, payloads) + + groups := util.GroupPayloadsByAccount(log, payloads, 0) + + require.Greater(t, groups.Len(), 1) +} + +func TestGroupPayloadsByAccountForDataRace(t *testing.T) { + log := zerolog.New(zerolog.NewTestWriter(t)) + + const accountSize = 4 + var payloads []*ledger.Payload + for i := 0; i < accountSize; i++ { + payloads = append(payloads, generateRandomPayloadsWithAddress(generateRandomAddress(), 100_000)...) + } + + const nWorkers = 8 + groups := util.GroupPayloadsByAccount(log, payloads, nWorkers) + require.Equal(t, accountSize, groups.Len()) +} + +func TestGroupPayloadsByAccountCompareResults(t *testing.T) { + log := zerolog.Nop() + payloads := generateRandomPayloads(1000000) + tmp1 := make([]*ledger.Payload, len(payloads)) + tmp2 := make([]*ledger.Payload, len(payloads)) + copy(tmp1, payloads) + copy(tmp2, payloads) + + groups1 := util.GroupPayloadsByAccount(log, tmp1, 0) + groups2 := util.GroupPayloadsByAccount(log, tmp2, runtime.NumCPU()) + + groups3 := map[flow.Address][]*ledger.Payload{} + for _, payload := range payloads { + key, err := payload.Key() + require.NoError(t, err) + registerID, err := convert.LedgerKeyToRegisterID(key) + require.NoError(t, err) + address := flow.BytesToAddress([]byte(registerID.Owner)) + require.NoError(t, err) + if _, ok := groups3[address]; !ok { + groups3[address] = []*ledger.Payload{} + } + groups3[address] = append(groups3[address], payload) + } + + require.Equal(t, groups1.Len(), groups2.Len()) + require.Equal(t, groups1.Len(), len(groups3)) + for { + group1, err1 := groups1.Next() + group2, err2 := groups2.Next() + + require.NoError(t, err1) + require.NoError(t, err2) + + if group1 == nil { + require.Nil(t, group2) + break + } + + require.Equal(t, group1.Address, group2.Address) + require.Equal(t, len(group1.Payloads), len(group2.Payloads)) + require.ElementsMatch(t, group1.Payloads, group2.Payloads) + require.Equal(t, len(group1.Payloads), len(groups3[group1.Address])) + require.ElementsMatch(t, group1.Payloads, groups3[group1.Address]) + } +} + +func BenchmarkGroupPayloadsByAccount(b *testing.B) { + log := zerolog.Nop() + payloads := generateRandomPayloads(10000000) + tmp := make([]*ledger.Payload, len(payloads)) + + bench := func(b *testing.B, nWorker int) func(b *testing.B) { + return func(b *testing.B) { + b.ResetTimer() + for i := 0; i < b.N; i++ { + b.StopTimer() + copy(tmp, payloads) + b.StartTimer() + util.GroupPayloadsByAccount(log, tmp, nWorker) + } + } + } + + b.Run("1 worker", bench(b, 1)) + b.Run("2 worker", bench(b, 2)) + b.Run("4 worker", bench(b, 4)) + b.Run("8 worker", bench(b, 8)) + b.Run("max worker", bench(b, runtime.NumCPU())) +} + +// GeneratePayloads generates n random payloads +// with a random number of payloads per account (exponentially distributed) +func generateRandomPayloads(n int) []*ledger.Payload { + const meanPayloadsPerAccount = 100 + const minPayloadsPerAccount = 1 + + payloads := make([]*ledger.Payload, 0, n) + + for i := 0; i < n; { + + registersForAccount := minPayloadsPerAccount + int(rand2.ExpFloat64()*(meanPayloadsPerAccount-minPayloadsPerAccount)) + if registersForAccount > n-i { + registersForAccount = n - i + } + i += registersForAccount + + accountKey := generateRandomAccountKey() + for j := 0; j < registersForAccount; j++ { + payloads = append(payloads, + ledger.NewPayload( + accountKey, + []byte(generateRandomString(10)), + )) + } + } + + return payloads +} + +func generateRandomPayloadsWithAddress(address string, n int) []*ledger.Payload { + const meanPayloadsPerAccount = 100 + const minPayloadsPerAccount = 1 + + payloads := make([]*ledger.Payload, 0, n) + + for i := 0; i < n; { + + registersForAccount := minPayloadsPerAccount + int(rand2.ExpFloat64()*(meanPayloadsPerAccount-minPayloadsPerAccount)) + if registersForAccount > n-i { + registersForAccount = n - i + } + i += registersForAccount + + accountKey := convert.RegisterIDToLedgerKey(flow.RegisterID{ + Owner: address, + Key: generateRandomString(10), + }) + for j := 0; j < registersForAccount; j++ { + payloads = append(payloads, + ledger.NewPayload( + accountKey, + []byte(generateRandomString(10)), + )) + } + } + + return payloads +} + +func generateRandomAccountKey() ledger.Key { + return convert.RegisterIDToLedgerKey(flow.RegisterID{ + Owner: generateRandomAddress(), + Key: generateRandomString(10), + }) +} + +func generateRandomString(i int) string { + buf := make([]byte, i) + _, err := rand.Read(buf) + if err != nil { + panic(err) + } + return hex.EncodeToString(buf) +} + +func generateRandomAddress() string { + buf := make([]byte, flow.AddressLength) + _, err := rand.Read(buf) + if err != nil { + panic(err) + } + return string(buf) +} diff --git a/cmd/util/ledger/util/programs.go b/cmd/util/ledger/util/programs.go new file mode 100644 index 00000000000..9cc1e1387cd --- /dev/null +++ b/cmd/util/ledger/util/programs.go @@ -0,0 +1,75 @@ +package util + +import ( + "fmt" + + "github.com/onflow/cadence/common" + "github.com/onflow/cadence/runtime" + + "github.com/onflow/flow-go/fvm/environment" + "github.com/onflow/flow-go/fvm/storage" + "github.com/onflow/flow-go/fvm/storage/derived" + "github.com/onflow/flow-go/fvm/storage/state" + "github.com/onflow/flow-go/fvm/tracing" + "github.com/onflow/flow-go/model/flow" +) + +var _ storage.TransactionPreparer = migrationTransactionPreparer{} + +func NewProgramsGetOrLoadProgramFunc( + nestedTransactionPreparer state.NestedTransactionPreparer, + accounts environment.Accounts, +) (GetOrLoadProgramFunc, error) { + + derivedChainData, err := derived.NewDerivedChainData(derived.DefaultDerivedDataCacheSize) + if err != nil { + return nil, fmt.Errorf("failed to create derived chain data: %w", err) + } + + // The current block ID does not matter here, it is only for keeping a cross-block cache, which is not needed here. + derivedTransactionData := derivedChainData. + NewDerivedBlockDataForScript(flow.Identifier{}). + NewSnapshotReadDerivedTransactionData() + + programs := environment.NewPrograms( + tracing.NewTracerSpan(), + NopMeter{}, + environment.NoopMetricsReporter{}, + migrationTransactionPreparer{ + NestedTransactionPreparer: nestedTransactionPreparer, + DerivedTransactionPreparer: derivedTransactionData, + }, + accounts, + ) + + programErrors := map[common.Location]error{} + + return func( + location runtime.Location, + load func() (*runtime.Program, error), + ) ( + program *runtime.Program, + err error, + ) { + return programs.GetOrLoadProgram( + location, + func() (*runtime.Program, error) { + // If the program is already known to be invalid, + // then return the error immediately, + // without attempting to load the program again + if err, ok := programErrors[location]; ok { + return nil, err + } + + // Otherwise, load the program. + // If an error occurs, then record it for subsequent calls + program, err := load() + if err != nil { + programErrors[location] = err + } + + return program, err + }, + ) + }, nil +} diff --git a/cmd/util/ledger/util/registers/registers.go b/cmd/util/ledger/util/registers/registers.go new file mode 100644 index 00000000000..c16abefe5f9 --- /dev/null +++ b/cmd/util/ledger/util/registers/registers.go @@ -0,0 +1,408 @@ +package registers + +import ( + "fmt" + "sync" + + "github.com/onflow/atree" + "github.com/rs/zerolog" + + "github.com/onflow/flow-go/fvm/environment" + "github.com/onflow/flow-go/fvm/storage/snapshot" + "github.com/onflow/flow-go/ledger" + "github.com/onflow/flow-go/ledger/common/convert" + "github.com/onflow/flow-go/model/flow" +) + +type ForEachCallback func(owner string, key string, value []byte) error + +type Registers interface { + Get(owner string, key string) ([]byte, error) + Set(owner string, key string, value []byte) error + ForEach(f ForEachCallback) error + Count() int +} + +// ByAccount represents the registers of all accounts +type ByAccount struct { + registers map[string]*AccountRegisters +} + +var _ Registers = &ByAccount{} + +func NewByAccount() *ByAccount { + return &ByAccount{ + registers: make(map[string]*AccountRegisters), + } +} + +const payloadsPerAccountEstimate = 200 + +func NewByAccountFromPayloads(payloads []*ledger.Payload) (*ByAccount, error) { + byAccount := &ByAccount{ + registers: make(map[string]*AccountRegisters, len(payloads)/payloadsPerAccountEstimate), + } + + for _, payload := range payloads { + registerID, registerValue, err := convert.PayloadToRegister(payload) + if err != nil { + return nil, fmt.Errorf("failed to convert payload to register: %w", err) + } + + err = byAccount.Set( + registerID.Owner, + registerID.Key, + registerValue, + ) + if err != nil { + return nil, fmt.Errorf("failed to set register: %w", err) + } + } + + return byAccount, nil +} + +func (b *ByAccount) Get(owner string, key string) ([]byte, error) { + accountRegisters, ok := b.registers[owner] + if !ok { + return nil, nil + } + return accountRegisters.registers[key], nil +} + +func (b *ByAccount) Set(owner string, key string, value []byte) error { + accountRegisters := b.AccountRegisters(owner) + accountRegisters.uncheckedSet(key, value) + return nil +} + +func (b *ByAccount) ForEach(f ForEachCallback) error { + for _, accountRegisters := range b.registers { + err := accountRegisters.ForEach(f) + if err != nil { + return err + } + } + return nil +} + +func (b *ByAccount) DestructIntoPayloads(nWorker int) []*ledger.Payload { + payloads := make([]*ledger.Payload, b.Count()) + + type job struct { + registers *AccountRegisters + payloads []*ledger.Payload + } + + var wg sync.WaitGroup + + jobs := make(chan job, b.AccountCount()) + + worker := func() { + defer wg.Done() + for job := range jobs { + job.registers.insertIntoPayloads(job.payloads) + } + } + + for i := 0; i < nWorker; i++ { + wg.Add(1) + go worker() + } + + startOffset := 0 + for owner, accountRegisters := range b.registers { + + endOffset := startOffset + accountRegisters.Count() + accountPayloads := payloads[startOffset:endOffset] + + jobs <- job{ + registers: accountRegisters, + payloads: accountPayloads, + } + + // Remove the account from the map to reduce memory usage. + // The account registers are now stored in the payloads, + // so we don't need to keep them in the by-account registers. + // This allows GC to collect converted account registers during the loop. + delete(b.registers, owner) + + startOffset = endOffset + } + close(jobs) + + wg.Wait() + + return payloads +} + +func (b *ByAccount) ForEachAccount(f func(accountRegisters *AccountRegisters) error) error { + for _, accountRegisters := range b.registers { + err := f(accountRegisters) + if err != nil { + return err + } + } + return nil +} + +func (b *ByAccount) AccountCount() int { + return len(b.registers) +} + +func (b *ByAccount) HasAccountOwner(owner string) bool { + _, ok := b.registers[owner] + return ok +} + +func (b *ByAccount) AccountRegisters(owner string) *AccountRegisters { + accountRegisters, ok := b.registers[owner] + if !ok { + accountRegisters = NewAccountRegisters(owner) + b.registers[owner] = accountRegisters + } + return accountRegisters +} + +func (b *ByAccount) SetAccountRegisters(newAccountRegisters *AccountRegisters) *AccountRegisters { + owner := newAccountRegisters.Owner() + oldAccountRegisters := b.registers[owner] + b.registers[owner] = newAccountRegisters + return oldAccountRegisters +} + +func (b *ByAccount) Count() int { + // TODO: parallelize + count := 0 + for _, accountRegisters := range b.registers { + count += accountRegisters.Count() + } + return count +} + +// AccountRegisters represents the registers of an account +type AccountRegisters struct { + owner string + registers map[string][]byte +} + +func NewAccountRegisters(owner string) *AccountRegisters { + return &AccountRegisters{ + owner: owner, + registers: make(map[string][]byte), + } +} + +var _ Registers = &AccountRegisters{} + +func (a *AccountRegisters) Get(owner string, key string) ([]byte, error) { + if owner != a.owner { + return nil, fmt.Errorf("owner mismatch: expected %x, got %x", a.owner, owner) + } + return a.registers[key], nil +} + +func (a *AccountRegisters) Set(owner string, key string, value []byte) error { + if owner != a.owner { + return fmt.Errorf("owner mismatch: expected %x, got %x", a.owner, owner) + } + a.uncheckedSet(key, value) + return nil +} + +func (a *AccountRegisters) uncheckedSet(key string, value []byte) { + if len(value) == 0 { + delete(a.registers, key) + } else { + a.registers[key] = value + } +} + +func (a *AccountRegisters) ForEach(f ForEachCallback) error { + for key, value := range a.registers { + err := f(a.owner, key, value) + if err != nil { + return err + } + } + return nil +} + +func (a *AccountRegisters) Count() int { + return len(a.registers) +} + +func (a *AccountRegisters) Owner() string { + return a.owner +} + +func (a *AccountRegisters) Payloads() []*ledger.Payload { + payloads := make([]*ledger.Payload, a.Count()) + a.insertIntoPayloads(payloads) + return payloads +} + +// insertIntoPayloads inserts the registers into the given payloads slice. +// The payloads slice must have the same size as the number of registers. +func (a *AccountRegisters) insertIntoPayloads(payloads []*ledger.Payload) { + payloadCount := len(payloads) + registerCount := len(a.registers) + if payloadCount != registerCount { + panic(fmt.Errorf( + "given payloads slice has wrong size: got %d, expected %d", + payloadCount, + registerCount, + )) + } + + index := 0 + for key, value := range a.registers { + if len(value) == 0 { + panic(fmt.Errorf("unexpected empty register value: %x, %x", a.owner, key)) + } + + ledgerKey := convert.RegisterIDToLedgerKey(flow.RegisterID{ + Owner: a.owner, + Key: key, + }) + payload := ledger.NewPayload(ledgerKey, value) + payloads[index] = payload + index++ + } +} + +// Merge merges the registers from the other AccountRegisters into this AccountRegisters. +func (a *AccountRegisters) Merge(other *AccountRegisters) error { + for key, value := range other.registers { + _, ok := a.registers[key] + if ok { + return fmt.Errorf("key already exists: %s", key) + } + a.registers[key] = value + } + return nil +} + +func (a *AccountRegisters) ForEachKey(f func(key string) error) error { + for key := range a.registers { + err := f(key) + if err != nil { + return err + } + } + return nil +} + +func (a *AccountRegisters) PayloadSize() int { + size := 0 + for key, value := range a.registers { + registerKey := flow.RegisterID{ + Owner: a.owner, + Key: key, + } + size += environment.RegisterSize(registerKey, value) + } + return size +} + +func NewAccountRegistersFromPayloads(owner string, payloads []*ledger.Payload) (*AccountRegisters, error) { + accountRegisters := NewAccountRegisters(owner) + + for _, payload := range payloads { + registerID, registerValue, err := convert.PayloadToRegister(payload) + if err != nil { + return nil, fmt.Errorf("failed to convert payload to register: %w", err) + } + + err = accountRegisters.Set( + registerID.Owner, + registerID.Key, + registerValue, + ) + if err != nil { + return nil, fmt.Errorf("failed to set register: %w", err) + } + } + + return accountRegisters, nil +} + +// StorageSnapshot adapts Registers to the snapshot.StorageSnapshot interface +type StorageSnapshot struct { + Registers +} + +var _ snapshot.StorageSnapshot = StorageSnapshot{} + +func (s StorageSnapshot) Get(id flow.RegisterID) (flow.RegisterValue, error) { + return s.Registers.Get(id.Owner, id.Key) +} + +// ReadOnlyLedger adapts Registers to the atree.Ledger interface +type ReadOnlyLedger struct { + Registers +} + +var _ atree.Ledger = ReadOnlyLedger{} + +func (l ReadOnlyLedger) GetValue(address, key []byte) (value []byte, err error) { + owner := flow.AddressToRegisterOwner(flow.BytesToAddress(address)) + return l.Registers.Get(owner, string(key)) +} + +func (l ReadOnlyLedger) ValueExists(owner, key []byte) (exists bool, err error) { + value, err := l.Registers.Get(string(owner), string(key)) + if err != nil { + return false, err + } + return value != nil, nil +} + +func (l ReadOnlyLedger) SetValue(_, _, _ []byte) error { + panic("unexpected call of SetValue") +} + +func (l ReadOnlyLedger) AllocateSlabIndex(_ []byte) (atree.SlabIndex, error) { + panic("unexpected call of AllocateSlabIndex") +} + +// ApplyChanges applies the given changes to the given registers, +// and verifies that the changes are only for the expected addresses. +func ApplyChanges( + registers Registers, + changes map[flow.RegisterID]flow.RegisterValue, + expectedChangeAddresses map[flow.Address]struct{}, + logger zerolog.Logger, +) error { + + for registerID, newValue := range changes { + + if expectedChangeAddresses != nil { + ownerAddress := flow.BytesToAddress([]byte(registerID.Owner)) + + if _, ok := expectedChangeAddresses[ownerAddress]; !ok { + + expectedChangeAddressesArray := zerolog.Arr() + for expectedChangeAddress := range expectedChangeAddresses { + expectedChangeAddressesArray = + expectedChangeAddressesArray.Str(expectedChangeAddress.Hex()) + } + + // something was changed that does not belong to this account. Log it. + logger.Error(). + Str("key", registerID.String()). + Str("actual_address", ownerAddress.Hex()). + Array("expected_addresses", expectedChangeAddressesArray). + Hex("value", newValue). + Msg("key is part of the change set, but is for a different account") + } + } + + err := registers.Set(registerID.Owner, registerID.Key, newValue) + if err != nil { + return fmt.Errorf("failed to set register: %w", err) + } + } + + return nil +} diff --git a/cmd/util/ledger/util/registers/registers_test.go b/cmd/util/ledger/util/registers/registers_test.go new file mode 100644 index 00000000000..4b281ada493 --- /dev/null +++ b/cmd/util/ledger/util/registers/registers_test.go @@ -0,0 +1,486 @@ +package registers + +import ( + "testing" + + "github.com/rs/zerolog" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/ledger" + "github.com/onflow/flow-go/ledger/common/convert" + "github.com/onflow/flow-go/model/flow" +) + +func newPayload(address flow.Address, key string, value []byte) *ledger.Payload { + ledgerKey := convert.RegisterIDToLedgerKey(flow.NewRegisterID(address, key)) + return ledger.NewPayload(ledgerKey, value) +} + +func TestNewByAccountFromPayloads(t *testing.T) { + t.Parallel() + + payloads := []*ledger.Payload{ + newPayload(flow.Address{2}, "d", []byte{5}), + newPayload(flow.Address{1}, "a", []byte{4}), + newPayload(flow.Address{2}, "c", []byte{6}), + newPayload(flow.Address{1}, "b", []byte{3}), + } + + byAccount, err := NewByAccountFromPayloads(payloads) + require.NoError(t, err) + + assert.Equal(t, 4, byAccount.Count()) + assert.Equal(t, 2, byAccount.AccountCount()) + + value, err := byAccount.Get("\x01\x00\x00\x00\x00\x00\x00\x00", "a") + require.NoError(t, err) + assert.Equal(t, []byte{4}, value) + + value, err = byAccount.Get("\x01\x00\x00\x00\x00\x00\x00\x00", "b") + require.NoError(t, err) + assert.Equal(t, []byte{3}, value) + + value, err = byAccount.Get("\x02\x00\x00\x00\x00\x00\x00\x00", "c") + require.NoError(t, err) + assert.Equal(t, []byte{6}, value) + + value, err = byAccount.Get("\x02\x00\x00\x00\x00\x00\x00\x00", "d") + require.NoError(t, err) + assert.Equal(t, []byte{5}, value) +} + +func TestByAccount_ForEachAccount(t *testing.T) { + t.Parallel() + + payloads := []*ledger.Payload{ + newPayload(flow.Address{2}, "d", []byte{5}), + newPayload(flow.Address{1}, "a", []byte{4}), + newPayload(flow.Address{2}, "c", []byte{6}), + newPayload(flow.Address{1}, "b", []byte{3}), + newPayload(flow.Address{2}, "e", []byte{7}), + } + + byAccount, err := NewByAccountFromPayloads(payloads) + require.NoError(t, err) + + var seen1, seen2 bool + + _ = byAccount.ForEachAccount(func(accountRegisters *AccountRegisters) error { + owner := accountRegisters.Owner() + switch owner { + case "\x01\x00\x00\x00\x00\x00\x00\x00": + require.False(t, seen1) + seen1 = true + assert.Equal(t, 2, accountRegisters.Count()) + + case "\x02\x00\x00\x00\x00\x00\x00\x00": + require.False(t, seen2) + seen2 = true + assert.Equal(t, 3, accountRegisters.Count()) + + default: + t.Fatalf("unexpected owner: %v", owner) + } + + return nil + }) + + assert.True(t, seen1) + assert.True(t, seen2) +} + +func TestByAccount_ForEach(t *testing.T) { + t.Parallel() + + payloads := []*ledger.Payload{ + newPayload(flow.Address{2}, "d", []byte{5}), + newPayload(flow.Address{1}, "a", []byte{4}), + newPayload(flow.Address{2}, "c", []byte{6}), + newPayload(flow.Address{1}, "b", []byte{3}), + newPayload(flow.Address{2}, "e", []byte{7}), + } + + byAccount, err := NewByAccountFromPayloads(payloads) + require.NoError(t, err) + + type register struct { + owner string + key string + value []byte + } + + var registers []register + + _ = byAccount.ForEach(func(owner string, key string, value []byte) error { + registers = append(registers, register{ + owner: owner, + key: key, + value: value, + }) + + return nil + }) + + require.ElementsMatch(t, + []register{ + {"\x02\x00\x00\x00\x00\x00\x00\x00", "d", []byte{5}}, + {"\x01\x00\x00\x00\x00\x00\x00\x00", "a", []byte{4}}, + {"\x02\x00\x00\x00\x00\x00\x00\x00", "c", []byte{6}}, + {"\x01\x00\x00\x00\x00\x00\x00\x00", "b", []byte{3}}, + {"\x02\x00\x00\x00\x00\x00\x00\x00", "e", []byte{7}}, + }, + registers, + ) +} + +func TestByAccount_Set(t *testing.T) { + t.Parallel() + + byAccount := NewByAccount() + assert.Equal(t, 0, byAccount.AccountCount()) + assert.Equal(t, 0, byAccount.Count()) + + // 0x1, a = 5 + + err := byAccount.Set("\x01\x00\x00\x00\x00\x00\x00\x00", "a", []byte{5}) + require.NoError(t, err) + + assert.Equal(t, 1, byAccount.AccountCount()) + assert.Equal(t, 1, byAccount.Count()) + + value, err := byAccount.Get("\x01\x00\x00\x00\x00\x00\x00\x00", "a") + require.NoError(t, err) + assert.Equal(t, []byte{5}, value) + + // 0x1, b = 6 + + err = byAccount.Set("\x01\x00\x00\x00\x00\x00\x00\x00", "b", []byte{6}) + require.NoError(t, err) + + assert.Equal(t, 1, byAccount.AccountCount()) + assert.Equal(t, 2, byAccount.Count()) + + value, err = byAccount.Get("\x01\x00\x00\x00\x00\x00\x00\x00", "a") + require.NoError(t, err) + assert.Equal(t, []byte{5}, value) + + value, err = byAccount.Get("\x01\x00\x00\x00\x00\x00\x00\x00", "b") + require.NoError(t, err) + assert.Equal(t, []byte{6}, value) + + // 0x2, c = 7 + + err = byAccount.Set("\x02\x00\x00\x00\x00\x00\x00\x00", "c", []byte{7}) + require.NoError(t, err) + + assert.Equal(t, 2, byAccount.AccountCount()) + assert.Equal(t, 3, byAccount.Count()) + + value, err = byAccount.Get("\x01\x00\x00\x00\x00\x00\x00\x00", "a") + require.NoError(t, err) + assert.Equal(t, []byte{5}, value) + + value, err = byAccount.Get("\x01\x00\x00\x00\x00\x00\x00\x00", "b") + require.NoError(t, err) + assert.Equal(t, []byte{6}, value) + + value, err = byAccount.Get("\x02\x00\x00\x00\x00\x00\x00\x00", "c") + require.NoError(t, err) + assert.Equal(t, []byte{7}, value) +} + +func TestByAccount_DestructIntoPayloads(t *testing.T) { + t.Parallel() + + payloads := []*ledger.Payload{ + newPayload(flow.Address{2}, "d", []byte{5}), + newPayload(flow.Address{1}, "a", []byte{4}), + newPayload(flow.Address{2}, "c", []byte{6}), + newPayload(flow.Address{1}, "b", []byte{3}), + } + + byAccount, err := NewByAccountFromPayloads(payloads) + require.NoError(t, err) + + newPayloads := byAccount.DestructIntoPayloads(2) + + assert.Equal(t, 0, byAccount.AccountCount()) + assert.Equal(t, 0, byAccount.Count()) + assert.ElementsMatch(t, payloads, newPayloads) +} + +func TestByAccount_SetAccountRegisters(t *testing.T) { + t.Parallel() + + byAccount := NewByAccount() + + accountRegisters1 := NewAccountRegisters("\x01\x00\x00\x00\x00\x00\x00\x00") + oldAccountRegisters := byAccount.SetAccountRegisters(accountRegisters1) + require.Nil(t, oldAccountRegisters) + + accountRegisters2 := NewAccountRegisters("\x01\x00\x00\x00\x00\x00\x00\x00") + oldAccountRegisters = byAccount.SetAccountRegisters(accountRegisters2) + require.Same(t, accountRegisters1, oldAccountRegisters) +} + +func TestAccountRegisters_Set(t *testing.T) { + t.Parallel() + + const owner1 = "\x01\x00\x00\x00\x00\x00\x00\x00" + const owner2 = "\x02\x00\x00\x00\x00\x00\x00\x00" + accountRegisters := NewAccountRegisters(owner1) + + err := accountRegisters.Set(owner1, "a", []byte{5}) + require.NoError(t, err) + + err = accountRegisters.Set(owner2, "a", []byte{5}) + require.ErrorContains(t, err, "owner mismatch") +} + +func TestAccountRegisters_Get(t *testing.T) { + t.Parallel() + + const owner1 = "\x01\x00\x00\x00\x00\x00\x00\x00" + const owner2 = "\x02\x00\x00\x00\x00\x00\x00\x00" + accountRegisters := NewAccountRegisters(owner1) + + err := accountRegisters.Set(owner1, "a", []byte{5}) + require.NoError(t, err) + + value, err := accountRegisters.Get(owner1, "a") + require.NoError(t, err) + assert.Equal(t, []byte{5}, value) + + _, err = accountRegisters.Get(owner2, "a") + require.ErrorContains(t, err, "owner mismatch") +} + +func TestNewAccountRegistersFromPayloads(t *testing.T) { + t.Parallel() + + const owner = "\x01\x00\x00\x00\x00\x00\x00\x00" + + payloads := []*ledger.Payload{ + newPayload(flow.Address{1}, "d", []byte{5}), + newPayload(flow.Address{1}, "a", []byte{4}), + newPayload(flow.Address{1}, "c", []byte{6}), + newPayload(flow.Address{1}, "b", []byte{3}), + } + + accountRegisters, err := NewAccountRegistersFromPayloads(owner, payloads) + require.NoError(t, err) + + assert.Equal(t, 4, accountRegisters.Count()) + + value, err := accountRegisters.Get(owner, "a") + require.NoError(t, err) + assert.Equal(t, []byte{4}, value) + + value, err = accountRegisters.Get(owner, "b") + require.NoError(t, err) + assert.Equal(t, []byte{3}, value) + + value, err = accountRegisters.Get(owner, "c") + require.NoError(t, err) + assert.Equal(t, []byte{6}, value) + + value, err = accountRegisters.Get(owner, "d") + require.NoError(t, err) + assert.Equal(t, []byte{5}, value) +} + +func TestAccountRegisters_ForEach(t *testing.T) { + t.Parallel() + + const owner = "\x01\x00\x00\x00\x00\x00\x00\x00" + + payloads := []*ledger.Payload{ + newPayload(flow.Address{1}, "d", []byte{5}), + newPayload(flow.Address{1}, "a", []byte{4}), + newPayload(flow.Address{1}, "c", []byte{6}), + newPayload(flow.Address{1}, "b", []byte{3}), + newPayload(flow.Address{1}, "e", []byte{7}), + } + + accountRegisters, err := NewAccountRegistersFromPayloads(owner, payloads) + require.NoError(t, err) + + type register struct { + owner string + key string + value []byte + } + + var registers []register + + _ = accountRegisters.ForEach(func(owner string, key string, value []byte) error { + registers = append(registers, register{ + owner: owner, + key: key, + value: value, + }) + + return nil + }) + + require.ElementsMatch(t, + []register{ + {"\x01\x00\x00\x00\x00\x00\x00\x00", "d", []byte{5}}, + {"\x01\x00\x00\x00\x00\x00\x00\x00", "a", []byte{4}}, + {"\x01\x00\x00\x00\x00\x00\x00\x00", "c", []byte{6}}, + {"\x01\x00\x00\x00\x00\x00\x00\x00", "b", []byte{3}}, + {"\x01\x00\x00\x00\x00\x00\x00\x00", "e", []byte{7}}, + }, + registers, + ) +} + +func TestAccountRegisters_Payloads(t *testing.T) { + t.Parallel() + + const owner = "\x01\x00\x00\x00\x00\x00\x00\x00" + + payloads := []*ledger.Payload{ + newPayload(flow.Address{1}, "d", []byte{5}), + newPayload(flow.Address{1}, "a", []byte{4}), + newPayload(flow.Address{1}, "c", []byte{6}), + newPayload(flow.Address{1}, "b", []byte{3}), + newPayload(flow.Address{1}, "e", []byte{7}), + } + + accountRegisters, err := NewAccountRegistersFromPayloads(owner, payloads) + require.NoError(t, err) + + newPayloads := accountRegisters.Payloads() + require.ElementsMatch(t, payloads, newPayloads) +} + +func TestAccountRegisters_Merge(t *testing.T) { + t.Parallel() + + const owner = "\x01\x00\x00\x00\x00\x00\x00\x00" + + payloads1 := []*ledger.Payload{ + newPayload(flow.Address{1}, "d", []byte{5}), + newPayload(flow.Address{1}, "a", []byte{4}), + newPayload(flow.Address{1}, "c", []byte{6}), + } + + accountRegisters1, err := NewAccountRegistersFromPayloads(owner, payloads1) + require.NoError(t, err) + + payloads2 := []*ledger.Payload{ + newPayload(flow.Address{1}, "b", []byte{3}), + newPayload(flow.Address{1}, "e", []byte{7}), + } + + accountRegisters2, err := NewAccountRegistersFromPayloads(owner, payloads2) + require.NoError(t, err) + + err = accountRegisters1.Merge(accountRegisters2) + require.NoError(t, err) + + allPayloads := append(payloads1, payloads2...) + + newPayloads := accountRegisters1.Payloads() + require.ElementsMatch(t, allPayloads, newPayloads) +} + +func TestApplyChanges_ByAccount(t *testing.T) { + t.Parallel() + + const owner1 = "\x01\x00\x00\x00\x00\x00\x00\x00" + const owner2 = "\x02\x00\x00\x00\x00\x00\x00\x00" + + payloads := []*ledger.Payload{ + newPayload(flow.Address{1}, "d", []byte{5}), + newPayload(flow.Address{2}, "a", []byte{4}), + newPayload(flow.Address{1}, "c", []byte{6}), + newPayload(flow.Address{2}, "b", []byte{3}), + newPayload(flow.Address{1}, "e", []byte{7}), + } + + byAccount, err := NewByAccountFromPayloads(payloads) + require.NoError(t, err) + + changes := map[flow.RegisterID]flow.RegisterValue{ + {Owner: owner1, Key: "c"}: {8}, + {Owner: owner1, Key: "f"}: {9}, + {Owner: owner1, Key: "a"}: {10}, + {Owner: owner2, Key: "a"}: {11}, + } + + err = ApplyChanges( + byAccount, + changes, + map[flow.Address]struct{}{ + {1}: {}, + {2}: {}, + }, + zerolog.Nop(), + ) + require.NoError(t, err) + + newPayloads := byAccount.DestructIntoPayloads(2) + + require.ElementsMatch(t, + []*ledger.Payload{ + newPayload(flow.Address{1}, "d", []byte{5}), + newPayload(flow.Address{2}, "a", []byte{11}), + newPayload(flow.Address{1}, "c", []byte{8}), + newPayload(flow.Address{2}, "b", []byte{3}), + newPayload(flow.Address{1}, "e", []byte{7}), + newPayload(flow.Address{1}, "f", []byte{9}), + newPayload(flow.Address{1}, "a", []byte{10}), + }, + newPayloads, + ) +} + +func TestApplyChanges_AccountRegisters(t *testing.T) { + t.Parallel() + + const owner = "\x01\x00\x00\x00\x00\x00\x00\x00" + + payloads := []*ledger.Payload{ + newPayload(flow.Address{1}, "d", []byte{5}), + newPayload(flow.Address{1}, "a", []byte{4}), + newPayload(flow.Address{1}, "c", []byte{6}), + newPayload(flow.Address{1}, "b", []byte{3}), + newPayload(flow.Address{1}, "e", []byte{7}), + } + + accountRegisters, err := NewAccountRegistersFromPayloads(owner, payloads) + require.NoError(t, err) + + changes := map[flow.RegisterID]flow.RegisterValue{ + {Owner: owner, Key: "c"}: {8}, + {Owner: owner, Key: "f"}: {9}, + {Owner: owner, Key: "a"}: {10}, + } + + err = ApplyChanges( + accountRegisters, + changes, + map[flow.Address]struct{}{ + {1}: {}, + }, + zerolog.Nop(), + ) + require.NoError(t, err) + + newPayloads := accountRegisters.Payloads() + + require.ElementsMatch(t, + []*ledger.Payload{ + newPayload(flow.Address{1}, "d", []byte{5}), + newPayload(flow.Address{1}, "a", []byte{10}), + newPayload(flow.Address{1}, "c", []byte{8}), + newPayload(flow.Address{1}, "b", []byte{3}), + newPayload(flow.Address{1}, "e", []byte{7}), + newPayload(flow.Address{1}, "f", []byte{9}), + }, + newPayloads, + ) +} diff --git a/cmd/util/ledger/util/registers_util.go b/cmd/util/ledger/util/registers_util.go new file mode 100644 index 00000000000..9fe59dcc074 --- /dev/null +++ b/cmd/util/ledger/util/registers_util.go @@ -0,0 +1,121 @@ +package util + +import ( + "context" + "fmt" + "sync/atomic" + + "github.com/rs/zerolog/log" + "golang.org/x/sync/errgroup" + + "github.com/onflow/flow-go/cmd/util/ledger/util/registers" + "github.com/onflow/flow-go/model/flow" +) + +func NewByAccountRegistersFromPayloadAccountGrouping( + payloadAccountGrouping *PayloadAccountGrouping, + nWorker int, +) ( + *registers.ByAccount, + error, +) { + accountCount := payloadAccountGrouping.Len() + + if accountCount == 0 { + return registers.NewByAccount(), nil + } + + // Set nWorker to be the lesser of nWorker and accountCount + // but greater than 0. + nWorker = min(nWorker, accountCount) + nWorker = max(nWorker, 1) + + g, ctx := errgroup.WithContext(context.Background()) + + jobs := make(chan *PayloadAccountGroup, nWorker) + results := make(chan *registers.AccountRegisters, nWorker) + + g.Go(func() error { + defer close(jobs) + for { + payloadAccountGroup, err := payloadAccountGrouping.Next() + if err != nil { + return fmt.Errorf("failed to group payloads by account: %w", err) + } + + if payloadAccountGroup == nil { + return nil + } + + select { + case <-ctx.Done(): + return ctx.Err() + case jobs <- payloadAccountGroup: + } + } + }) + + workersLeft := int64(nWorker) + for i := 0; i < nWorker; i++ { + g.Go(func() error { + defer func() { + if atomic.AddInt64(&workersLeft, -1) == 0 { + close(results) + } + }() + + for payloadAccountGroup := range jobs { + + // Convert address to owner + payloadGroupOwner := flow.AddressToRegisterOwner(payloadAccountGroup.Address) + + accountRegisters, err := registers.NewAccountRegistersFromPayloads( + payloadGroupOwner, + payloadAccountGroup.Payloads, + ) + if err != nil { + return fmt.Errorf("failed to create account registers from payloads: %w", err) + } + select { + case <-ctx.Done(): + return ctx.Err() + case results <- accountRegisters: + } + } + + return nil + }) + } + + registersByAccount := registers.NewByAccount() + g.Go(func() error { + for accountRegisters := range results { + oldAccountRegisters := registersByAccount.SetAccountRegisters(accountRegisters) + if oldAccountRegisters != nil { + // TODO: check full migration logs to see if this edge case of multiple groups + // for an account still exists. If it still exists, create an issue to fix it. + // Otherwise, we can treat this as error and panic (instead of merging groups). + + // Account grouping should never create multiple groups for an account. + // In case it does anyway, merge the groups together, + // by merging the existing registers into the new ones. + + log.Warn().Msgf( + "account registers already exist for account %x. merging %d existing registers into %d new", + accountRegisters.Owner(), + oldAccountRegisters.Count(), + accountRegisters.Count(), + ) + + err := accountRegisters.Merge(oldAccountRegisters) + if err != nil { + return fmt.Errorf("failed to merge account registers: %w", err) + } + } + } + + return nil + }) + + return registersByAccount, g.Wait() +} diff --git a/cmd/util/ledger/util/state.go b/cmd/util/ledger/util/state.go new file mode 100644 index 00000000000..7ef36270040 --- /dev/null +++ b/cmd/util/ledger/util/state.go @@ -0,0 +1,120 @@ +package util + +import ( + "encoding/hex" + "fmt" + "math" + + "github.com/rs/zerolog/log" + "go.uber.org/atomic" + + "github.com/onflow/flow-go/ledger" + "github.com/onflow/flow-go/ledger/common/pathfinder" + "github.com/onflow/flow-go/ledger/complete" + mtrie "github.com/onflow/flow-go/ledger/complete/mtrie/trie" + "github.com/onflow/flow-go/ledger/complete/wal" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module/metrics" +) + +func ReadTrie(dir string, targetHash flow.StateCommitment) (*mtrie.MTrie, error) { + log.Info().Msg("init WAL") + + diskWal, err := wal.NewDiskWAL( + log.Logger, + nil, + metrics.NewNoopCollector(), + dir, + complete.DefaultCacheSize, + pathfinder.PathByteSize, + wal.SegmentSize, + ) + if err != nil { + return nil, fmt.Errorf("cannot create disk WAL: %w", err) + } + + log.Info().Msg("init ledger") + + led, err := complete.NewLedger( + diskWal, + complete.DefaultCacheSize, + &metrics.NoopCollector{}, + log.Logger, + complete.DefaultPathFinderVersion) + if err != nil { + return nil, fmt.Errorf("cannot create ledger from write-a-head logs and checkpoints: %w", err) + } + + const ( + checkpointDistance = math.MaxInt // A large number to prevent checkpoint creation. + checkpointsToKeep = 1 + ) + + log.Info().Msg("init compactor") + + compactor, err := complete.NewCompactor( + led, + diskWal, + log.Logger, + complete.DefaultCacheSize, + checkpointDistance, + checkpointsToKeep, + atomic.NewBool(false), + &metrics.NoopCollector{}, + ) + if err != nil { + return nil, fmt.Errorf("cannot create compactor: %w", err) + } + + log.Info().Msgf("waiting for compactor to load checkpoint and WAL") + + <-compactor.Ready() + + defer func() { + <-led.Done() + <-compactor.Done() + }() + + state := ledger.State(targetHash) + + trie, err := led.Trie(ledger.RootHash(state)) + if err != nil { + s, err2 := led.MostRecentTouchedState() + if err2 != nil { + log.Error().Err(err2). + Msgf("cannot get most recently touched state in %v, check the --execution-state-dir flag", dir) + } else if s == ledger.State(mtrie.NewEmptyMTrie().RootHash()) { + log.Error().Msgf("cannot find any trie in folder %v. check the --execution-state-dir flag", dir) + } else { + log.Info(). + Str("hash", s.String()). + Msgf("Most recently touched state") + } + return nil, fmt.Errorf("cannot get trie at the given state commitment: %w", err) + } + + return trie, nil +} + +func ReadTrieForPayloads(dir string, targetHash flow.StateCommitment) ([]*ledger.Payload, error) { + trie, err := ReadTrie(dir, targetHash) + if err != nil { + return nil, err + } + return trie.AllPayloads(), nil +} + +func ParseStateCommitment(stateCommitmentHex string) flow.StateCommitment { + var err error + stateCommitmentBytes, err := hex.DecodeString(stateCommitmentHex) + if err != nil { + log.Fatal().Err(err).Msg("cannot get decode the state commitment") + } + + stateCommitment, err := flow.ToStateCommitment(stateCommitmentBytes) + if err != nil { + log.Fatal().Err(err).Msg("invalid state commitment length") + } + + return stateCommitment +} diff --git a/cmd/util/ledger/util/topn.go b/cmd/util/ledger/util/topn.go new file mode 100644 index 00000000000..25031974477 --- /dev/null +++ b/cmd/util/ledger/util/topn.go @@ -0,0 +1,64 @@ +package util + +import ( + "container/heap" +) + +// TopN keeps track of the top N elements. +// Use Add to add elements to the list. +type TopN[T any] struct { + Tree []T + N int + IsLess func(T, T) bool +} + +func NewTopN[T any](n int, isLess func(T, T) bool) *TopN[T] { + return &TopN[T]{ + Tree: make([]T, 0, n), + N: n, + IsLess: isLess, + } +} + +func (h *TopN[T]) Len() int { + return len(h.Tree) +} + +func (h *TopN[T]) Less(i, j int) bool { + a := h.Tree[i] + b := h.Tree[j] + return h.IsLess(a, b) +} + +func (h *TopN[T]) Swap(i, j int) { + h.Tree[i], h.Tree[j] = + h.Tree[j], h.Tree[i] +} + +func (h *TopN[T]) Push(x interface{}) { + h.Tree = append(h.Tree, x.(T)) +} + +func (h *TopN[T]) Pop() interface{} { + tree := h.Tree + count := len(tree) + lastIndex := count - 1 + last := tree[lastIndex] + var empty T + tree[lastIndex] = empty + h.Tree = tree[0:lastIndex] + return last +} + +// Add tries to add a value to the list. +// If the list is full, it will return the smallest value and true. +// If the list is not full, it will return the zero value and false. +func (h *TopN[T]) Add(value T) (popped T, didPop bool) { + heap.Push(h, value) + if h.Len() > h.N { + popped := heap.Pop(h).(T) + return popped, true + } + var empty T + return empty, false +} diff --git a/cmd/util/ledger/util/topn_test.go b/cmd/util/ledger/util/topn_test.go new file mode 100644 index 00000000000..cf32f9b425e --- /dev/null +++ b/cmd/util/ledger/util/topn_test.go @@ -0,0 +1,74 @@ +package util + +import ( + "container/heap" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestTopN(t *testing.T) { + t.Parallel() + + topN := NewTopN( + 3, + func(a, b int) bool { + return a < b + }, + ) + + _, didPop := topN.Add(5) + assert.False(t, didPop) + assert.ElementsMatch(t, + []int{5}, + topN.Tree, + ) + + _, didPop = topN.Add(2) + assert.False(t, didPop) + assert.ElementsMatch(t, + []int{5, 2}, + topN.Tree, + ) + + _, didPop = topN.Add(3) + assert.False(t, didPop) + assert.ElementsMatch(t, + []int{5, 3, 2}, + topN.Tree, + ) + + popped, didPop := topN.Add(3) + assert.True(t, didPop) + assert.Equal(t, 2, popped) + assert.ElementsMatch(t, + []int{5, 3, 3}, + topN.Tree, + ) + + popped, didPop = topN.Add(1) + assert.True(t, didPop) + assert.Equal(t, 1, popped) + assert.ElementsMatch(t, + []int{5, 3, 3}, + topN.Tree, + ) + + popped, didPop = topN.Add(4) + assert.True(t, didPop) + assert.Equal(t, 3, popped) + assert.ElementsMatch(t, + []int{5, 4, 3}, + topN.Tree, + ) + + sorted := make([]int, len(topN.Tree)) + for index := topN.Len() - 1; index >= 0; index-- { + sorted[index] = heap.Pop(topN).(int) + } + assert.Equal(t, + []int{5, 4, 3}, + sorted, + ) + assert.Empty(t, topN.Tree) +} diff --git a/cmd/util/ledger/util/trace.go b/cmd/util/ledger/util/trace.go new file mode 100644 index 00000000000..1bddb921995 --- /dev/null +++ b/cmd/util/ledger/util/trace.go @@ -0,0 +1,39 @@ +package util + +import "strings" + +type Trace struct { + value string + parent *Trace +} + +func NewTrace(value string) *Trace { + return &Trace{ + value: value, + } +} + +func (t *Trace) Append(value string) *Trace { + return &Trace{ + value: value, + parent: t, + } +} + +func (t *Trace) String() string { + var sb strings.Builder + + var size int + var elements []*Trace + for current := t; current != nil; current = current.parent { + size += len(current.value) + elements = append(elements, current) + } + sb.Grow(size) + + for i := len(elements) - 1; i >= 0; i-- { + sb.WriteString(elements[i].value) + } + + return sb.String() +} diff --git a/cmd/util/ledger/util/trace_test.go b/cmd/util/ledger/util/trace_test.go new file mode 100644 index 00000000000..3b1fc1a468b --- /dev/null +++ b/cmd/util/ledger/util/trace_test.go @@ -0,0 +1,26 @@ +package util + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestTrace(t *testing.T) { + t.Parallel() + + trace := NewTrace("foo") + assert.Equal(t, "foo", trace.String()) + + traceBar := trace.Append(".bar") + assert.Equal(t, "foo.bar", traceBar.String()) + + traceBarBaz := traceBar.Append(".baz") + assert.Equal(t, "foo.bar.baz", traceBarBaz.String()) + + traceQux := trace.Append(".qux") + assert.Equal(t, "foo.qux", traceQux.String()) + + traceQuux := traceQux.Append(".quux") + assert.Equal(t, "foo.qux.quux", traceQuux.String()) +} diff --git a/cmd/util/ledger/util/util.go b/cmd/util/ledger/util/util.go new file mode 100644 index 00000000000..62fcf601dc7 --- /dev/null +++ b/cmd/util/ledger/util/util.go @@ -0,0 +1,247 @@ +package util + +import ( + "database/sql" + "encoding/binary" + "encoding/hex" + "fmt" + "strconv" + "strings" + + "github.com/onflow/atree" + "github.com/onflow/cadence/common" + + "github.com/onflow/flow-go/fvm/environment" + "github.com/onflow/flow-go/ledger" + "github.com/onflow/flow-go/ledger/common/convert" + "github.com/onflow/flow-go/model/flow" +) + +func newRegisterID(owner []byte, key []byte) flow.RegisterID { + return flow.NewRegisterID(flow.BytesToAddress(owner), string(key)) +} + +type AccountsAtreeLedger struct { + Accounts environment.Accounts +} + +func NewAccountsAtreeLedger(accounts environment.Accounts) *AccountsAtreeLedger { + return &AccountsAtreeLedger{ + Accounts: accounts, + } +} + +var _ atree.Ledger = &AccountsAtreeLedger{} + +func (a *AccountsAtreeLedger) GetValue(owner, key []byte) ([]byte, error) { + if common.Address(owner) == common.ZeroAddress { + return nil, nil + } + + registerID := newRegisterID(owner, key) + v, err := a.Accounts.GetValue(registerID) + if err != nil { + return nil, fmt.Errorf("getting value failed: %w", err) + } + return v, nil +} + +func (a *AccountsAtreeLedger) SetValue(owner, key, value []byte) error { + registerID := newRegisterID(owner, key) + err := a.Accounts.SetValue(registerID, value) + if err != nil { + return fmt.Errorf("setting value failed: %w", err) + } + return nil +} + +func (a *AccountsAtreeLedger) ValueExists(owner, key []byte) (exists bool, err error) { + v, err := a.GetValue(owner, key) + if err != nil { + return false, fmt.Errorf("checking value existence failed: %w", err) + } + + return len(v) > 0, nil +} + +// AllocateSlabIndex allocates new storage index under the owner accounts to store a new register +func (a *AccountsAtreeLedger) AllocateSlabIndex(owner []byte) (atree.SlabIndex, error) { + v, err := a.Accounts.AllocateSlabIndex(flow.BytesToAddress(owner)) + if err != nil { + return atree.SlabIndex{}, fmt.Errorf("storage index allocation failed: %w", err) + } + return v, nil +} + +// IsServiceLevelAddress returns true if the given address is the service level address. +// Which means it's not an actual account but instead holds service lever registers. +func IsServiceLevelAddress(address common.Address) bool { + return address == common.ZeroAddress +} + +func PayloadsFromEmulatorSnapshot(snapshotPath string) ([]*ledger.Payload, error) { + db, err := sql.Open("sqlite", snapshotPath) + if err != nil { + return nil, err + } + + payloads, _, _, err := PayloadsAndAccountsFromEmulatorSnapshot(db) + return payloads, err +} + +func PayloadsAndAccountsFromEmulatorSnapshot(db *sql.DB) ( + []*ledger.Payload, + map[flow.RegisterID]PayloadMetaInfo, + []common.Address, + error, +) { + rows, err := db.Query("SELECT key, value, version, height FROM ledger ORDER BY height DESC") + if err != nil { + return nil, nil, nil, err + } + + var payloads []*ledger.Payload + var accounts []common.Address + accountsSet := make(map[common.Address]struct{}) + + payloadSet := make(map[flow.RegisterID]PayloadMetaInfo) + + for rows.Next() { + var hexKey, hexValue string + var height, version uint64 + + err := rows.Scan(&hexKey, &hexValue, &height, &version) + if err != nil { + return nil, nil, nil, err + } + + key, err := hex.DecodeString(hexKey) + if err != nil { + return nil, nil, nil, err + } + + value, err := hex.DecodeString(hexValue) + if err != nil { + return nil, nil, nil, err + } + + registerId, address := registerIDKeyFromString(string(key)) + + if _, contains := accountsSet[address]; !contains { + accountsSet[address] = struct{}{} + accounts = append(accounts, address) + } + + ledgerKey := convert.RegisterIDToLedgerKey(registerId) + + payload := ledger.NewPayload( + ledgerKey, + value, + ) + + if _, ok := payloadSet[registerId]; ok { + continue + } + + payloads = append(payloads, payload) + payloadSet[registerId] = PayloadMetaInfo{ + Height: height, + Version: version, + } + } + + return payloads, payloadSet, accounts, nil +} + +// registerIDKeyFromString is the inverse of `flow.RegisterID.String()` method. +func registerIDKeyFromString(s string) (flow.RegisterID, common.Address) { + parts := strings.SplitN(s, "/", 2) + + owner := parts[0] + key := parts[1] + + address, err := common.HexToAddress(owner) + if err != nil { + panic(err) + } + + var decodedKey string + + switch key[0] { + case '$': + b := make([]byte, 9) + b[0] = '$' + + int64Value, err := strconv.ParseInt(key[1:], 10, 64) + if err != nil { + panic(err) + } + + binary.BigEndian.PutUint64(b[1:], uint64(int64Value)) + + decodedKey = string(b) + case '#': + decoded, err := hex.DecodeString(key[1:]) + if err != nil { + panic(err) + } + decodedKey = string(decoded) + default: + panic("Invalid register key") + } + + return flow.RegisterID{ + Owner: string(address.Bytes()), + Key: decodedKey, + }, + address +} + +type PayloadMetaInfo struct { + Height, Version uint64 +} + +// PayloadsLedger is a simple read/write in-memory atree.Ledger implementation +type PayloadsLedger struct { + Payloads map[flow.RegisterID]*ledger.Payload + + AllocateSlabIndexFunc func(owner []byte) (atree.SlabIndex, error) +} + +var _ atree.Ledger = &PayloadsLedger{} + +func NewPayloadsLedger(payloads map[flow.RegisterID]*ledger.Payload) *PayloadsLedger { + return &PayloadsLedger{ + Payloads: payloads, + } +} + +func (p *PayloadsLedger) GetValue(owner, key []byte) (value []byte, err error) { + registerID := newRegisterID(owner, key) + v, ok := p.Payloads[registerID] + if !ok { + return nil, nil + } + return v.Value(), nil +} + +func (p *PayloadsLedger) SetValue(owner, key, value []byte) (err error) { + registerID := newRegisterID(owner, key) + ledgerKey := convert.RegisterIDToLedgerKey(registerID) + p.Payloads[registerID] = ledger.NewPayload(ledgerKey, value) + return nil +} + +func (p *PayloadsLedger) ValueExists(owner, key []byte) (exists bool, err error) { + registerID := newRegisterID(owner, key) + _, ok := p.Payloads[registerID] + return ok, nil +} + +func (p *PayloadsLedger) AllocateSlabIndex(owner []byte) (atree.SlabIndex, error) { + if p.AllocateSlabIndexFunc != nil { + return p.AllocateSlabIndexFunc(owner) + } + + panic("AllocateSlabIndex not expected to be called") +} diff --git a/cmd/utils.go b/cmd/utils.go index bfc77542c8d..536483cabc2 100644 --- a/cmd/utils.go +++ b/cmd/utils.go @@ -1,20 +1,26 @@ package cmd import ( + "encoding/hex" "encoding/json" "fmt" "path/filepath" + "strings" "github.com/libp2p/go-libp2p/core/peer" - "github.com/prometheus/client_golang/prometheus" "github.com/rs/zerolog" + "github.com/onflow/crypto" + "github.com/onflow/flow-go/model/bootstrap" "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/module/mempool/queue" - "github.com/onflow/flow-go/module/metrics" + "github.com/onflow/flow-go/module" + "github.com/onflow/flow-go/module/id" "github.com/onflow/flow-go/network/p2p" - "github.com/onflow/flow-go/network/p2p/distributor" + "github.com/onflow/flow-go/network/p2p/cache" + p2plogging "github.com/onflow/flow-go/network/p2p/logging" + "github.com/onflow/flow-go/network/p2p/translator" + "github.com/onflow/flow-go/network/p2p/unicast/protocols" "github.com/onflow/flow-go/state/protocol/inmem" "github.com/onflow/flow-go/utils/io" ) @@ -48,6 +54,26 @@ func LoadPrivateNodeInfo(dir string, myID flow.Identifier) (*bootstrap.NodeInfoP return &info, err } +func LoadNetworkPrivateKey(dir string, myID flow.Identifier) (crypto.PrivateKey, error) { + path := filepath.Join(dir, fmt.Sprintf(filepath.Join(bootstrap.DirPrivateRoot, + "private-node-info_%v/network_private_key"), myID)) + data, err := io.ReadFile(path) + if err != nil { + return nil, fmt.Errorf("could not read private node info (path=%s): %w", path, err) + } + + keyBytes, err := hex.DecodeString(strings.Trim(string(data), "\n ")) + if err != nil { + return nil, fmt.Errorf("could not hex decode networking key (path=%s): %w", path, err) + } + + networkingKey, err := crypto.DecodePrivateKey(crypto.ECDSASecp256k1, keyBytes) + if err != nil { + return nil, fmt.Errorf("could not decode networking key (path=%s): %w", path, err) + } + return networkingKey, nil +} + // loadSecretsEncryptionKey loads the encryption key for the secrets database. // If the file does not exist, returns os.ErrNotExist. func loadSecretsEncryptionKey(dir string, myID flow.Identifier) ([]byte, error) { @@ -69,12 +95,77 @@ func rateLimiterPeerFilter(rateLimiter p2p.RateLimiter) p2p.PeerFilter { } } -// BuildDisallowListNotificationDisseminator builds the disallow list notification distributor. -func BuildDisallowListNotificationDisseminator(size uint32, metricsRegistry prometheus.Registerer, logger zerolog.Logger, metricsEnabled bool) p2p.DisallowListNotificationDistributor { - heroStoreOpts := []queue.HeroStoreConfigOption{queue.WithHeroStoreSizeLimit(size)} - if metricsEnabled { - collector := metrics.DisallowListNotificationQueueMetricFactory(metricsRegistry) - heroStoreOpts = append(heroStoreOpts, queue.WithHeroStoreCollector(collector)) +// BootstrapIdentities converts the bootstrap node addresses and keys to a Flow Identity list where +// each Flow Identity is initialized with the passed address, the networking key +// and the Node ID set to ZeroID, role set to Access, 0 stake and no staking key. +func BootstrapIdentities(addresses []string, keys []string) (flow.IdentitySkeletonList, error) { + if len(addresses) != len(keys) { + return nil, fmt.Errorf("number of addresses and keys provided for the boostrap nodes don't match") + } + + ids := make(flow.IdentitySkeletonList, len(addresses)) + for i, address := range addresses { + bytes, err := hex.DecodeString(keys[i]) + if err != nil { + return nil, fmt.Errorf("failed to decode secured GRPC server public key hex %w", err) + } + + publicFlowNetworkingKey, err := crypto.DecodePublicKey(crypto.ECDSAP256, bytes) + if err != nil { + return nil, fmt.Errorf("failed to get public flow networking key could not decode public key bytes %w", err) + } + + // create the identity of the peer by setting only the relevant fields + ids[i] = &flow.IdentitySkeleton{ + NodeID: flow.ZeroID, // the NodeID is the hash of the staking key and for the public network it does not apply + Address: address, + Role: flow.RoleAccess, // the upstream node has to be an access node + NetworkPubKey: publicFlowNetworkingKey, + } + } + return ids, nil +} + +func CreatePublicIDTranslatorAndIdentifierProvider( + logger zerolog.Logger, + networkKey crypto.PrivateKey, + sporkID flow.Identifier, + getLibp2pNode func() p2p.LibP2PNode, + idCache *cache.ProtocolStateIDCache, +) ( + p2p.IDTranslator, + func() module.IdentifierProvider, + error, +) { + idTranslator := translator.NewHierarchicalIDTranslator(idCache, translator.NewPublicNetworkIDTranslator()) + + peerID, err := peerIDFromNetworkKey(networkKey) + if err != nil { + return nil, nil, fmt.Errorf("could not get peer ID from network key: %w", err) } - return distributor.DefaultDisallowListNotificationDistributor(logger, heroStoreOpts...) + // use the default identifier provider + factory := func() module.IdentifierProvider { + return id.NewCustomIdentifierProvider(func() flow.IdentifierList { + pids := getLibp2pNode().GetPeersForProtocol(protocols.FlowProtocolID(sporkID)) + result := make(flow.IdentifierList, 0, len(pids)) + + for _, pid := range pids { + // exclude own Identifier + if pid == peerID { + continue + } + + if flowID, err := idTranslator.GetFlowID(pid); err != nil { + // TODO: this is an instance of "log error and continue with best effort" anti-pattern + logger.Debug().Str("peer", p2plogging.PeerId(pid)).Msg("failed to translate to Flow ID") + } else { + result = append(result, flowID) + } + } + + return result + }) + } + + return idTranslator, factory, nil } diff --git a/cmd/verification/main.go b/cmd/verification/main.go index 6c9fbdd50e3..10ba2ae2670 100644 --- a/cmd/verification/main.go +++ b/cmd/verification/main.go @@ -1,6 +1,8 @@ package main import ( + "context" + "github.com/onflow/flow-go/cmd" "github.com/onflow/flow-go/model/flow" ) @@ -20,5 +22,5 @@ func main() { if err != nil { verificationBuilder.FlowNodeBuilder.Logger.Fatal().Err(err).Send() } - node.Run() + node.Run(context.Background()) } diff --git a/cmd/verification_builder.go b/cmd/verification_builder.go index f5acdf2641f..47dbfd153a8 100644 --- a/cmd/verification_builder.go +++ b/cmd/verification_builder.go @@ -18,6 +18,7 @@ import ( "github.com/onflow/flow-go/engine/common/follower" followereng "github.com/onflow/flow-go/engine/common/follower" commonsync "github.com/onflow/flow-go/engine/common/synchronization" + "github.com/onflow/flow-go/engine/execution/computation" "github.com/onflow/flow-go/engine/verification/assigner" "github.com/onflow/flow-go/engine/verification/assigner/blockconsumer" "github.com/onflow/flow-go/engine/verification/fetcher" @@ -29,7 +30,6 @@ import ( "github.com/onflow/flow-go/module" "github.com/onflow/flow-go/module/chainsync" "github.com/onflow/flow-go/module/chunks" - modulecompliance "github.com/onflow/flow-go/module/compliance" finalizer "github.com/onflow/flow-go/module/finalizer/consensus" "github.com/onflow/flow-go/module/mempool" "github.com/onflow/flow-go/module/mempool/stdmap" @@ -37,7 +37,8 @@ import ( "github.com/onflow/flow-go/state/protocol" badgerState "github.com/onflow/flow-go/state/protocol/badger" "github.com/onflow/flow-go/state/protocol/blocktimer" - "github.com/onflow/flow-go/storage/badger" + "github.com/onflow/flow-go/storage" + "github.com/onflow/flow-go/storage/store" ) type VerificationConfig struct { @@ -53,7 +54,8 @@ type VerificationConfig struct { blockWorkers uint64 // number of blocks processed in parallel. chunkWorkers uint64 // number of chunks processed in parallel. - stopAtHeight uint64 // height to stop the node on + stopAtHeight uint64 // height to stop the node on + scheduledCallbacksEnabled bool // enable execution of scheduled callbacks } type VerificationNodeBuilder struct { @@ -81,6 +83,7 @@ func (v *VerificationNodeBuilder) LoadFlags() { flags.Uint64Var(&v.verConf.blockWorkers, "block-workers", blockconsumer.DefaultBlockWorkers, "maximum number of blocks being processed in parallel") flags.Uint64Var(&v.verConf.chunkWorkers, "chunk-workers", chunkconsumer.DefaultChunkWorkers, "maximum number of execution nodes a chunk data pack request is dispatched to") flags.Uint64Var(&v.verConf.stopAtHeight, "stop-at-height", 0, "height to stop the node at (0 to disable)") + flags.BoolVar(&v.verConf.scheduledCallbacksEnabled, "scheduled-callbacks-enabled", fvm.DefaultScheduledCallbacksEnabled, "enable execution of scheduled callbacks") }) } @@ -88,11 +91,11 @@ func (v *VerificationNodeBuilder) LoadComponentsAndModules() { var ( followerState protocol.FollowerState - chunkStatuses *stdmap.ChunkStatuses // used in fetcher engine - chunkRequests *stdmap.ChunkRequests // used in requester engine - processedChunkIndex *badger.ConsumerProgress // used in chunk consumer - processedBlockHeight *badger.ConsumerProgress // used in block consumer - chunkQueue *badger.ChunksQueue // used in chunk consumer + chunkStatuses *stdmap.ChunkStatuses // used in fetcher engine + chunkRequests *stdmap.ChunkRequests // used in requester engine + processedChunkIndex storage.ConsumerProgressInitializer // used in chunk consumer + processedBlockHeight storage.ConsumerProgressInitializer // used in block consumer + chunkQueue storage.ChunksQueue // used in chunk consumer syncCore *chainsync.Core // used in follower engine assignerEngine *assigner.Engine // the assigner engine @@ -155,20 +158,26 @@ func (v *VerificationNodeBuilder) LoadComponentsAndModules() { return nil }). Module("processed chunk index consumer progress", func(node *NodeConfig) error { - processedChunkIndex = badger.NewConsumerProgress(node.DB, module.ConsumeProgressVerificationChunkIndex) + processedChunkIndex = store.NewConsumerProgress(node.ProtocolDB, module.ConsumeProgressVerificationChunkIndex) return nil }). Module("processed block height consumer progress", func(node *NodeConfig) error { - processedBlockHeight = badger.NewConsumerProgress(node.DB, module.ConsumeProgressVerificationBlockHeight) + processedBlockHeight = store.NewConsumerProgress(node.ProtocolDB, module.ConsumeProgressVerificationBlockHeight) return nil }). Module("chunks queue", func(node *NodeConfig) error { - chunkQueue = badger.NewChunkQueue(node.DB) - ok, err := chunkQueue.Init(chunkconsumer.DefaultJobIndex) + var ok bool + var err error + + queue := store.NewChunkQueue(node.Metrics.Cache, node.ProtocolDB) + ok, err = queue.Init(chunkconsumer.DefaultJobIndex) if err != nil { return fmt.Errorf("could not initialize default index in chunks queue: %w", err) } + chunkQueue = queue + node.Logger.Info().Msgf("chunks queue index has been initialized with protocol db batch updates") + node.Logger.Info(). Str("component", "node-builder"). Bool("init_to_default", ok). @@ -195,18 +204,33 @@ func (v *VerificationNodeBuilder) LoadComponentsAndModules() { []fvm.Option{fvm.WithLogger(node.Logger)}, node.FvmOptions..., ) + + // TODO(JanezP): cleanup creation of fvm context github.com/onflow/flow-go/issues/5249 + fvmOptions = append( + fvmOptions, + computation.DefaultFVMOptions( + node.RootChainID, + false, + v.verConf.scheduledCallbacksEnabled, + )..., + ) vmCtx := fvm.NewContext(fvmOptions...) + chunkVerifier := chunks.NewChunkVerifier(vm, vmCtx, node.Logger) - approvalStorage := badger.NewResultApprovals(node.Metrics.Cache, node.DB) + + approvalStorage := store.NewResultApprovals(node.Metrics.Cache, node.ProtocolDB, node.StorageLockMgr) + verifierEng, err = verifier.New( node.Logger, collector, node.Tracer, - node.Network, + node.EngineRegistry, node.State, node.Me, chunkVerifier, - approvalStorage) + approvalStorage, + node.StorageLockMgr, + ) return verifierEng, err }). Component("chunk consumer, requester, and fetcher engines", func(node *NodeConfig) (module.ReadyDoneAware, error) { @@ -215,7 +239,7 @@ func (v *VerificationNodeBuilder) LoadComponentsAndModules() { requesterEngine, err = requester.New( node.Logger, node.State, - node.Network, + node.EngineRegistry, node.Tracer, collector, chunkRequests, @@ -246,7 +270,7 @@ func (v *VerificationNodeBuilder) LoadComponentsAndModules() { v.verConf.stopAtHeight) // requester and fetcher engines are started by chunk consumer - chunkConsumer = chunkconsumer.NewChunkConsumer( + chunkConsumer, err = chunkconsumer.NewChunkConsumer( node.Logger, collector, processedChunkIndex, @@ -254,6 +278,10 @@ func (v *VerificationNodeBuilder) LoadComponentsAndModules() { fetcherEngine, v.verConf.chunkWorkers) + if err != nil { + return nil, fmt.Errorf("could not create chunk consumer: %w", err) + } + err = node.Metrics.Mempool.Register(metrics.ResourceChunkConsumer, chunkConsumer.Size) if err != nil { return nil, fmt.Errorf("could not register backend metric: %w", err) @@ -324,7 +352,7 @@ func (v *VerificationNodeBuilder) LoadComponentsAndModules() { Component("follower core", func(node *NodeConfig) (module.ReadyDoneAware, error) { // create a finalizer that handles updating the protocol // state when the follower detects newly finalized blocks - final := finalizer.NewFinalizer(node.DB, node.Storage.Headers, followerState, node.Tracer) + final := finalizer.NewFinalizer(node.ProtocolDB.Reader(), node.Storage.Headers, followerState, node.Tracer) finalized, pending, err := recoveryprotocol.FindLatest(node.State, node.Storage.Headers) if err != nil { @@ -337,10 +365,11 @@ func (v *VerificationNodeBuilder) LoadComponentsAndModules() { // so that it gets notified upon each new finalized block followerCore, err = flowconsensus.NewFollower( node.Logger, + node.Metrics.Mempool, node.Storage.Headers, final, followerDistributor, - node.RootBlock.Header, + node.FinalizedRootBlock.ToHeader(), node.RootQC, finalized, pending, @@ -379,35 +408,44 @@ func (v *VerificationNodeBuilder) LoadComponentsAndModules() { followerEng, err = followereng.NewComplianceLayer( node.Logger, - node.Network, + node.EngineRegistry, node.Me, node.Metrics.Engine, node.Storage.Headers, - node.FinalizedHeader, + node.LastFinalizedHeader, core, - followereng.WithComplianceConfigOpt(modulecompliance.WithSkipNewProposalsThreshold(node.ComplianceConfig.SkipNewProposalsThreshold)), + node.ComplianceConfig, ) if err != nil { return nil, fmt.Errorf("could not create follower engine: %w", err) } + followerDistributor.AddOnBlockFinalizedConsumer(followerEng.OnFinalizedBlock) return followerEng, nil }). Component("sync engine", func(node *NodeConfig) (module.ReadyDoneAware, error) { + spamConfig, err := commonsync.NewSpamDetectionConfig() + if err != nil { + return nil, fmt.Errorf("could not initialize spam detection config: %w", err) + } + sync, err := commonsync.New( node.Logger, node.Metrics.Engine, - node.Network, + node.EngineRegistry, node.Me, node.State, node.Storage.Blocks, followerEng, syncCore, node.SyncEngineIdentifierProvider, + spamConfig, ) if err != nil { return nil, fmt.Errorf("could not create synchronization engine: %w", err) } + followerDistributor.AddFinalizationConsumer(sync) + return sync, nil }) } diff --git a/config/README.md b/config/README.md new file mode 100644 index 00000000000..65800c7d73d --- /dev/null +++ b/config/README.md @@ -0,0 +1,125 @@ +## config +config is a package to hold all configuration values for each Flow component. This package centralizes configuration management providing access +to the entire FlowConfig and utilities to add a new config value, corresponding CLI flag, and validation. + +### Package structure +The root config package contains the FlowConfig struct and the default config file [default-config.yml](https://github.com/onflow/flow-go/blob/master/config/default-config.yml). The `default-config.yml` file is the default configuration that is loaded when the config package is initialized. +The `default-config.yml` is a snapshot of all the configuration values defined for Flow. +Each subpackage contains configuration structs and utilities for components and their related subcomponents. These packages also contain the CLI flags for each configuration value. The [netconf](https://github.com/onflow/flow-go/tree/master/network/netconf) package +is a good example of this pattern. The network component is a large component made of many other large components and subcomponents. Each configuration +struct is defined for all of these network related components in the netconf subpackage and CLI flags. + +### Overriding default values +The entire default config can be overridden using the `--config-file` CLI flag. When set the config package will attempt to parse the specified config file and override all the values +defined. A single default value can be overridden by setting the CLI flag for that specific config. For example `--networking-connection-pruning=false` will override the default network connection pruning +config to false. +Override entire config file. +```shell +go build -o flow-access-node ./cmd/access +./flow-access-node --config-file=config/config.yml +``` +Override a single configuration value. +```shell +go build -o flow-access-node ./cmd/access +./flow-access-node --networking-connection-pruning=false +``` +### Adding a new config value +Adding a new config to the FlowConfig can be done in a few easy steps. + +The network package can be used as a good example of how to add CLI flags and will be used in the steps below. + +1. Create a new configuration package for the new configuration structs and CLI flags. Although it is encouraged to define all package configuration structs and CLI flags as a subpackage of the [config package](https://github.com/onflow/flow-go/tree/master/config), +the configuration package can live anywhere. This configuration package will define the configuration structs and CLI flags for overriding. + ```shell + mkdir example_config + ``` + For the network package we have a configuration subpackage created in [network/netconf](https://github.com/onflow/flow-go/tree/master/network/netconf). + +2. Add a new CLI flag for the config value. + ```go + const workersCLIFlag = "app-workers" + flags.String(workersCLIFlag, 1, "number of app workers") + ``` + + All network package CLI flags are defined in [network/netconf/flags.go](https://github.com/onflow/flow-go/blob/master/network/netconf/flags.go) in: + - `const` block + - `AllFlagNames` function + - `InitializeNetworkFlags` function + + `InitializeNetworkFlags` is used during initialization of all flags + in the `InitializePFlagSet` function in the [config/base_flags.go](https://github.com/onflow/flow-go/blob/master/config/base_flags.go). + +3. Add the configuration as a new field to an existing configuration struct or create a new configuration struct. Each configuration struct must be a field on the FlowConfig struct so that it is unmarshalled during configuration initialization. + Each field on a configuration struct must contain the following field tags. + 1. `validate` - validate tag is used to perform validation on field structs using the [validator](https://github.com/go-playground/validator) package. In the example below you will notice + the `validate:"gt=0"` tag, this will ensure that the value of `AppWorkers` is greater than 0. The top level `FlowConfig` struct has a Validate method that performs struct validation. This + validation is done with the validator package, each validate tag on every struct field and sub struct field will be validated and validation errors are returned. + 2. `mapstructure` - [mapstructure](https://github.com/mitchellh/mapstructure) tag is used for unmarshalling and must match the CLI flag name defined in step or else the field will not be set when the config is unmarshalled. + ```go + type MyComponentConfig struct { + AppWorkers int `validate:"gt=0" mapstructure:"app-workers"` + } + ``` + It's important to make sure that each mapstructure field tag matches the CLI flag name in [config/default-config.yml](https://github.com/onflow/flow-go/blob/master/config/default-config.yml) to avoid parsing errors. + + All network package configuration structs are defined in [network/netconf/config.go](https://github.com/onflow/flow-go/blob/master/network/netconf/config.go) + +4. Add the new configuration properties and default values to [config/default-config.yml](https://github.com/onflow/flow-go/blob/master/config/default-config.yml). Ensure that each new property added matches the mapstructure value of the configuration struct field. + ```yaml + config-file: "./default-config.yml" + network-config: + ... + my-component: + app-workers: 1 + ``` + + All network package configuration values are defined under `network-config` in `default-config.yml` + +5. If a new struct was created in step 3, add it as a new field to `FlowConfig` struct in [config/config.go](https://github.com/onflow/flow-go/blob/master/config/config.go). In the previous steps we added a new config struct and added a new property to [config/default-config.yml](https://github.com/onflow/flow-go/blob/master/config/default-config.yml) for the `my-component` struct. This property name + must match the mapstructure field tag for the struct when added to `FlowConfig`. + ```go + // FlowConfig Flow configuration. + type FlowConfig struct { + ConfigFile string `validate:"filepath" mapstructure:"config-file"` + NetworkConfig *network.Config `mapstructure:"network-config"` + MyComponentConfig *mypackage.MyComponentConfig `mapstructure:"my-component"` + } + ``` + + The network package configuration struct, `NetworkConfig`, is already embedded as a field in `FlowConfig` struct. + This means that new fields can be added to the network package configuration struct without having to update the `FlowConfig` struct. + +### Nested structs +In an effort to keep the configuration yaml structure readable some configuration properties will be defined in nested structs. When this is the case the mapstructure [squash](https://pkg.go.dev/github.com/mitchellh/mapstructure#example-Decode-EmbeddedStruct) tag must be used in the parent struct so that the corresponding nested struct will be +flattened before the configuration is unmarshalled. This is used in the network package which is a collection of configuration structs nested in the `network.Config` struct: +```go +type Config struct { + UnicastConfig `mapstructure:",squash"` + p2pconf.ResourceManagerConfig `mapstructure:",squash"` + ConnectionManagerConfig `mapstructure:",squash"` + + p2pconf.GossipSubConfig `mapstructure:",squash"` + AlspConfig `mapstructure:",squash"` + ... +} +``` +Each nested struct must have the mapstructure squash tag so that the nested struct will be flattened before unmarshalling. +The nesting can be as deep as needed. For example, the `UnicastConfig` struct in the `Config` struct has a nested `UnicastRateLimitersConfig` struct that also uses the `squash` tag: + +```go +type UnicastConfig struct { + // UnicastRateLimitersConfig configuration for all unicast rate limiters. + UnicastRateLimitersConfig `mapstructure:",squash"` + ... +} +``` + +### Setting Aliases +Most configs will not be defined on the top layer FlowConfig but instead be defined on nested structs and in nested properties of the configuration yaml. When the default config is initially loaded the underlying config [viper](https://github.com/spf13/viper) store will store +each configuration with a key that is prefixed with each parent property. For example, because `networking-connection-pruning` is found on the `network-config` property of the configuration yaml, the key used by the config store to +store this config value will be prefixed with `network-config` e.g. +```network-config.networking-connection-pruning``` + +Later in the config process we bind the underlying config store with our pflag set, this allows us to override default values using CLI flags. +At this time the underlying config store would have 2 separate keys `networking-connection-pruning` and `network-config.networking-connection-pruning` for the same configuration value. This is because we don't use the network prefix for the CLI flags +used to override network configs. As a result, an alias must be set from `network-config.networking-connection-pruning` -> `networking-connection-pruning` so that they both point to the value loaded from the CLI flag. See `SetAliases` in [network/netconf/flags.go](https://github.com/onflow/flow-go/blob/master/config/network/netconf/flags.go) in the network package for a reference. \ No newline at end of file diff --git a/config/base_flags.go b/config/base_flags.go new file mode 100644 index 00000000000..360c4af89b6 --- /dev/null +++ b/config/base_flags.go @@ -0,0 +1,23 @@ +package config + +import ( + "github.com/spf13/pflag" + + "github.com/onflow/flow-go/network/netconf" +) + +const ( + configFileFlagName = "config-file" +) + +// InitializePFlagSet initializes all CLI flags for the Flow node base configuration on the provided pflag set. +// Args: +// +// *pflag.FlagSet: the pflag set of the Flow node. +// *FlowConfig: the config used to set default values on the flags +// +// Note: in subsequent PR's all flag initialization for Flow node should be moved to this func. +func InitializePFlagSet(flags *pflag.FlagSet, config *FlowConfig) { + flags.String(configFileFlagName, "", "provide a path to a Flow configuration file that will be used to set configuration values") + netconf.InitializeNetworkFlags(flags, config.NetworkConfig) +} diff --git a/config/config.go b/config/config.go new file mode 100644 index 00000000000..625cfd65008 --- /dev/null +++ b/config/config.go @@ -0,0 +1,260 @@ +package config + +import ( + "bytes" + _ "embed" + "errors" + "fmt" + "path/filepath" + "regexp" + "strings" + + "github.com/go-playground/validator/v10" + "github.com/mitchellh/mapstructure" + "github.com/rs/zerolog" + "github.com/spf13/pflag" + "github.com/spf13/viper" + + "github.com/onflow/flow-go/network/netconf" +) + +var ( + conf = viper.New() + validate *validator.Validate + //go:embed default-config.yml + configFile string + + errPflagsNotParsed = errors.New("failed to bind flags to configuration values, pflags must be parsed before binding") +) + +func init() { + initialize() +} + +// FlowConfig Flow configuration. +type FlowConfig struct { + // ConfigFile used to set a path to a config.yml file used to override the default-config.yml file. + ConfigFile string `validate:"filepath" mapstructure:"config-file"` + NetworkConfig *netconf.Config `mapstructure:"network-config"` +} + +// Validate checks validity of the Flow config. Errors indicate that either the configuration is broken, +// incompatible with the node's internal state, or that the node's internal state is corrupted. In all +// cases, continuation is impossible. +func (fc *FlowConfig) Validate() error { + err := validate.Struct(fc) + if err != nil { + if validationErrors, ok := err.(validator.ValidationErrors); ok { + return fmt.Errorf("failed to validate flow configuration: %w", validationErrors) + } + return fmt.Errorf("unexpeceted error encountered while validating flow configuration: %w", err) + } + return nil +} + +// DefaultConfig initializes the flow configuration. All default values for the Flow +// configuration are stored in the default-config.yml file. These values can be overridden +// by node operators by setting the corresponding cli flag. DefaultConfig should be called +// before any pflags are parsed, this will allow the configuration to initialize with defaults +// from default-config.yml. +// Returns: +// +// *FlowConfig: an instance of the network configuration fully initialized to the default values set in the config file +// error: if there is any error encountered while initializing the configuration, all errors are considered irrecoverable. +func DefaultConfig() (*FlowConfig, error) { + var flowConfig FlowConfig + err := Unmarshall(&flowConfig) + if err != nil { + return nil, fmt.Errorf("failed to unmarshall the Flow config: %w", err) + } + return &flowConfig, nil +} + +// RawViperConfig returns the raw viper config store. +// Returns: +// +// *viper.Viper: the raw viper config store. +func RawViperConfig() *viper.Viper { + return conf +} + +// BindPFlags binds the configuration to the cli pflag set. This should be called +// after all pflags have been parsed. If the --config-file flag has been set the config will +// be loaded from the specified config file. +// Args: +// +// c: The Flow configuration that will be used to unmarshall the configuration values into after binding pflags. +// This needs to be done because pflags may override a configuration value. +// +// Returns: +// +// error: if there is any error encountered binding pflags or unmarshalling the config struct, all errors are considered irrecoverable. +// bool: true if --config-file flag was set and config file was loaded, false otherwise. +// +// Note: As configuration management is improved, this func should accept the entire Flow config as the arg to unmarshall new config values into. +func BindPFlags(c *FlowConfig, flags *pflag.FlagSet) (bool, error) { + if !flags.Parsed() { + return false, errPflagsNotParsed + } + + // update the config store values from config file if --config-file flag is set + // if config file provided we will use values from the file and skip binding pflags + overridden, err := overrideConfigFile(flags) + if err != nil { + return false, err + } + + if !overridden { + err = conf.BindPFlags(flags) + if err != nil { + return false, fmt.Errorf("failed to bind pflag set: %w", err) + } + setAliases() + } + + err = Unmarshall(c) + if err != nil { + return false, fmt.Errorf("failed to unmarshall the Flow config: %w", err) + } + + return overridden, nil +} + +// Unmarshall unmarshalls the Flow configuration into the provided FlowConfig struct. +// Args: +// +// flowConfig: the flow config struct used for unmarshalling. +// +// Returns: +// +// error: if there is any error encountered unmarshalling the configuration, all errors are considered irrecoverable. +func Unmarshall(flowConfig *FlowConfig) error { + err := conf.Unmarshal(flowConfig, func(decoderConfig *mapstructure.DecoderConfig) { + // enforce all fields are set on the FlowConfig struct + decoderConfig.ErrorUnset = true + // currently the entire flow configuration has not been moved to this package + // for now we allow key's in the config which are unused. + decoderConfig.ErrorUnused = false + }) + if err != nil { + return fmt.Errorf("failed to unmarshal network config: %w", err) + } + return nil +} + +// LogConfig logs configuration keys and values if they were overridden with a config file. +// It also returns a map of keys for which the values were set by a config file. +// +// Parameters: +// - logger: *zerolog.Event to which the configuration keys and values will be logged. +// - flags: *pflag.FlagSet containing the set flags. +// +// Returns: +// - map[string]struct{}: map of keys for which the values were set by a config file. +func LogConfig(logger *zerolog.Event, flags *pflag.FlagSet) map[string]struct{} { + keysToAvoid := make(map[string]struct{}) + + if flags.Lookup(configFileFlagName).Changed { + for _, key := range conf.AllKeys() { + logger.Str(key, fmt.Sprint(conf.Get(key))) + parts := strings.Split(key, ".") + if len(parts) == 2 { + keysToAvoid[parts[1]] = struct{}{} + } else { + keysToAvoid[key] = struct{}{} + } + } + } + + return keysToAvoid +} + +// setAliases sets aliases for config sub packages. This should be done directly after pflags are bound to the configuration store. +// Upon initialization the conf will be loaded with the default config values, those values are then used as the default values for +// all the CLI flags, the CLI flags are then bound to the configuration store and at this point all aliases should be set if configuration +// keys do not match the CLI flags 1:1. ie: networking-connection-pruning -> network-config.networking-connection-pruning. After aliases +// are set the conf store will override values with any CLI flag values that are set as expected. +func setAliases() { + err := netconf.SetAliases(conf) + if err != nil { + panic(fmt.Errorf("failed to set network aliases: %w", err)) + } +} + +// overrideConfigFile overrides the default config file by reading in the config file at the path set +// by the --config-file flag in our viper config store. +// +// Returns: +// +// error: if there is any error encountered while reading new config file, all errors are considered irrecoverable. +// bool: true if the config was overridden by the new config file, false otherwise or if an error is encountered reading the new config file. +func overrideConfigFile(flags *pflag.FlagSet) (bool, error) { + configFileFlag := flags.Lookup(configFileFlagName) + if configFileFlag.Changed { + p := configFileFlag.Value.String() + dirPath, fileName := splitConfigPath(p) + conf.AddConfigPath(dirPath) + conf.SetConfigName(fileName) + err := conf.ReadInConfig() + if err != nil { + return false, fmt.Errorf("failed to read config file %s: %w", p, err) + } + if len(conf.AllKeys()) == 0 { + return false, fmt.Errorf("failed to read in config file no config values found") + } + return true, nil + } + return false, nil +} + +// splitConfigPath returns the directory and base name (without extension) of the config file from the provided path string. +// If the file name does not match the expected pattern, the function panics. +// +// The expected pattern for file names is that they must consist of alphanumeric characters, hyphens, or underscores, +// followed by a single dot and then the extension. +// +// Legitimate Inputs: +// - /path/to/my_config.yaml +// - /path/to/my-config123.yaml +// - my-config.yaml (when in the current directory) +// +// Illegitimate Inputs: +// - /path/to/my.config.yaml (contains multiple dots) +// - /path/to/my config.yaml (contains spaces) +// - /path/to/.config.yaml (does not have a file name before the dot) +// +// Args: +// - path: The file path string to be split into directory and base name. +// +// Returns: +// - The directory and base name without extension. +// +// Panics: +// - If the file name does not match the expected pattern. +func splitConfigPath(path string) (string, string) { + // Regex to match filenames like 'my_config.yaml' or 'my-config.yaml' but not 'my.config.yaml' + validFileNamePattern := regexp.MustCompile(`^[a-zA-Z0-9_-]+\.[a-zA-Z0-9]+$`) + + dir, name := filepath.Split(path) + + // Panic if the file name does not match the expected pattern + if !validFileNamePattern.MatchString(name) { + panic(fmt.Errorf("Invalid config file name '%s'. Expected pattern: alphanumeric, hyphens, or underscores followed by a single dot and extension", name)) + } + + // Extracting the base name without extension + baseName := strings.Split(name, ".")[0] + return dir, baseName +} + +func initialize() { + buf := bytes.NewBufferString(configFile) + conf.SetConfigType("yaml") + if err := conf.ReadConfig(buf); err != nil { + panic(fmt.Errorf("failed to initialize flow config failed to read in config file: %w", err)) + } + + // create validator, at this point you can register custom validation funcs + // struct tag translation etc. + validate = validator.New() +} diff --git a/config/config_test.go b/config/config_test.go new file mode 100644 index 00000000000..c52d7dac9bd --- /dev/null +++ b/config/config_test.go @@ -0,0 +1,226 @@ +package config + +import ( + "errors" + "fmt" + "os" + "reflect" + "strings" + "testing" + + "github.com/go-playground/validator/v10" + "github.com/spf13/pflag" + "github.com/spf13/viper" + "github.com/stretchr/testify/require" + "gopkg.in/yaml.v2" + + "github.com/onflow/flow-go/network/netconf" + "github.com/onflow/flow-go/utils/unittest" +) + +// TestBindPFlags ensures configuration is bound to the pflag set as expected and configuration values are overridden when set with CLI flags. +func TestBindPFlags(t *testing.T) { + t.Run("should override config values when any flag is set", func(t *testing.T) { + c := defaultConfig(t) + flags := testFlagSet(c) + err := flags.Set("networking-connection-pruning", "false") + require.NoError(t, err) + require.NoError(t, flags.Parse(nil)) + + configFileUsed, err := BindPFlags(c, flags) + require.NoError(t, err) + require.False(t, configFileUsed) + require.False(t, c.NetworkConfig.NetworkConnectionPruning) + }) + t.Run("should return an error if flags are not parsed", func(t *testing.T) { + c := defaultConfig(t) + flags := testFlagSet(c) + configFileUsed, err := BindPFlags(&FlowConfig{}, flags) + require.False(t, configFileUsed) + require.ErrorIs(t, err, errPflagsNotParsed) + }) +} + +// TestDefaultConfig ensures the default Flow config is created and returned without errors. +func TestDefaultConfig(t *testing.T) { + c := defaultConfig(t) + require.Equalf(t, "./default-config.yml", c.ConfigFile, "expected default config file to be used") + require.NoErrorf(t, c.Validate(), "unexpected error encountered validating default config") + unittest.IdentifierFixture() +} + +// TestFlowConfig_Validate ensures the Flow validate returns the expected number of validator.ValidationErrors when incorrect +// fields are set. +func TestFlowConfig_Validate(t *testing.T) { + c := defaultConfig(t) + // set invalid config values + c.NetworkConfig.Unicast.RateLimiter.MessageRateLimit = -100 + c.NetworkConfig.Unicast.RateLimiter.BandwidthRateLimit = -100 + err := c.Validate() + require.Error(t, err) + errs, ok := errors.Unwrap(err).(validator.ValidationErrors) + require.True(t, ok) + require.Len(t, errs, 2) +} + +// TestUnmarshall_UnsetFields ensures that if the config store has any missing config values an error is returned when the config is decoded into a Flow config. +func TestUnmarshall_UnsetFields(t *testing.T) { + conf = viper.New() + c := &FlowConfig{} + err := Unmarshall(c) + require.True(t, strings.Contains(err.Error(), "has unset fields")) +} + +// Test_overrideConfigFile ensures configuration values can be overridden via the --config-file flag. +func Test_overrideConfigFile(t *testing.T) { + t.Run("should override the default config if --config-file is set", func(t *testing.T) { + file, err := os.CreateTemp("", "config-*.yml") + require.NoError(t, err) + defer os.Remove(file.Name()) + + var data = fmt.Sprintf(`config-file: "%s" +network-config: + networking-connection-pruning: false +`, file.Name()) + _, err = file.Write([]byte(data)) + require.NoError(t, err) + c := defaultConfig(t) + flags := testFlagSet(c) + err = flags.Set(configFileFlagName, file.Name()) + + require.NoError(t, err) + overridden, err := overrideConfigFile(flags) + require.NoError(t, err) + require.True(t, overridden) + + // ensure config values overridden with values from our inline config + require.Equal(t, conf.GetString(configFileFlagName), file.Name()) + require.False(t, conf.GetBool("networking-connection-pruning")) + }) + t.Run("should return an error for missing --config file", func(t *testing.T) { + c := defaultConfig(t) + flags := testFlagSet(c) + err := flags.Set(configFileFlagName, "./missing-config.yml") + require.NoError(t, err) + overridden, err := overrideConfigFile(flags) + require.Error(t, err) + require.False(t, overridden) + }) + t.Run("should not attempt to override config if --config-file is not set", func(t *testing.T) { + c := defaultConfig(t) + flags := testFlagSet(c) + overridden, err := overrideConfigFile(flags) + require.NoError(t, err) + require.False(t, overridden) + }) + t.Run("should return an error for file types other than .yml", func(t *testing.T) { + file, err := os.CreateTemp("", "config-*.json") + require.NoError(t, err) + defer os.Remove(file.Name()) + c := defaultConfig(t) + flags := testFlagSet(c) + err = flags.Set(configFileFlagName, file.Name()) + require.NoError(t, err) + overridden, err := overrideConfigFile(flags) + require.Error(t, err) + require.False(t, overridden) + }) +} + +// defaultConfig resets the config store gets the default Flow config. +func defaultConfig(t *testing.T) *FlowConfig { + initialize() + c, err := DefaultConfig() + require.NoError(t, err) + return c +} + +func testFlagSet(c *FlowConfig) *pflag.FlagSet { + flags := pflag.NewFlagSet("test", pflag.PanicOnError) + // initialize default flags + InitializePFlagSet(flags, c) + return flags +} + +// getAllYAMLKeys is a helper function that recursively extracts all keys from the YAML data. +// The keys are returned in the format "prefix-key1-key2-...-keyN". +// For example, if the YAML data is: +// +// key1: +// key2: +// key3: value +// key4: +// key5: value +// +// the function will return ["key1-key2-key3", "key4-key5"]. +// Args: +// - data: the YAML data. +// - prefix: the prefix to prepend to the keys. +// Returns: +// - the list of keys extracted from the YAML data. +func getAllYAMLKeys(data interface{}, prefix string) []string { + var keys []string + + switch v := data.(type) { + case map[interface{}]interface{}: + for key, value := range v { + fullKey := prefix + "-" + key.(string) + keys = append(keys, getAllYAMLKeys(value, fullKey)...) + } + case []interface{}: + for i, value := range v { + fullKey := prefix + "-" + strings.ToLower(strings.ReplaceAll(reflect.TypeOf(value).Name(), "_", "-")) + keys = append(keys, getAllYAMLKeys(value, fullKey+string(rune('a'+i)))...) + } + default: + keys = append(keys, strings.TrimRight(prefix, "-")) + } + + return keys +} + +// allResourceManagerFlagNames is a helper function that returns all libp2p-resource-manager flag names from default-config.yml. +func allResourceManagerFlagNames(t *testing.T) []string { + yamlFile, err := os.ReadFile("default-config.yml") + require.NoError(t, err, "failed to read YAML file") + + var config map[string]interface{} + err = yaml.Unmarshal(yamlFile, &config) + require.NoError(t, err, "failed to unmarshal YAML file") + + networkConfig, exists := config["network-config"].(map[interface{}]interface{}) + require.True(t, exists, "the key 'network-config' does not exist in the YAML file") + + resourceManagerConfig, exists := networkConfig["libp2p-resource-manager"].(map[interface{}]interface{}) + require.True(t, exists, "the key 'libp2p-resource-manager' does not exist in the YAML file") + + return getAllYAMLKeys(resourceManagerConfig, "libp2p-resource-manager") +} + +// TestAllFlagNames_AllResourceManagerFlags validates that AllFlagNames returns a complete list of flag names from default-config.yml that includes all libp2p-resource-manager flags. +func TestAllFlagNames_AllResourceManagerFlags(t *testing.T) { + allFlags := netconf.AllFlagNames() + for _, f := range allResourceManagerFlagNames(t) { + require.Containsf(t, allFlags, f, "the flag '%s' is missing from the list of all flags", f) + } +} + +// TestLoadLibP2PResourceManagerFlags validates that all libp2p-resource-manager flags from default-config.yml are settable by LoadLibP2PResourceManagerFlags. +func TestLoadLibP2PResourceManagerFlags(t *testing.T) { + // create an instance of Config to pass to the loader function + var config netconf.Config + + // initialize the FlagSet + flags := pflag.NewFlagSet(t.Name(), pflag.ContinueOnError) + + // load the flags using your function + netconf.LoadLibP2PResourceManagerFlags(flags, &config) + + // retrieve all flag names + flagNames := allResourceManagerFlagNames(t) + + // iterate over the flag names to ensure each one is settable + for _, flagName := range flagNames { + require.NotNil(t, flags.Lookup(flagName), "flag '%s' is not settable by LoadLibP2PResourceManagerFlags", flagName) + } +} diff --git a/config/default-config.yml b/config/default-config.yml new file mode 100644 index 00000000000..0d338492ab9 --- /dev/null +++ b/config/default-config.yml @@ -0,0 +1,679 @@ +config-file: "./default-config.yml" +# WARNING: Only modify the network configurations below if you fully understand their implications. +# Incorrect settings may lead to system instability, security vulnerabilities, or degraded performance. +# Make changes with caution and refer to the documentation for guidance. +# Network configuration. +network-config: + # Network Configuration + # Connection pruning determines whether connections to nodes + # that are not part of protocol state should be trimmed + networking-connection-pruning: true + # Preferred unicasts protocols list of unicast protocols in preferred order + preferred-unicast-protocols: [ ] + received-message-cache-size: 10_000 + peerupdate-interval: 10m + + dns-cache-ttl: 5m + # The size of the queue for notifications about new peers in the disallow list. + disallow-list-notification-cache-size: 100 + unicast: + rate-limiter: + # Setting this to true will disable connection disconnects and gating when unicast rate limiters are configured + dry-run: true + # The number of seconds a peer will be forced to wait before being allowed to successfully reconnect to the node after being rate limited + lockout-duration: 10s + # Amount of unicast messages that can be sent by a peer per second + message-rate-limit: 0 + # Bandwidth size in bytes a peer is allowed to send via unicast streams per second + bandwidth-rate-limit: 0 + # Bandwidth size in bytes a peer is allowed to send via unicast streams at once + bandwidth-burst-limit: 1e9 + manager: + # The minimum number of consecutive successful streams to reset the unicast stream creation retry budget from zero to the maximum default. If it is set to 100 for example, it + # means that if a peer has 100 consecutive successful streams to the remote peer, and the remote peer has a zero stream creation budget, + # the unicast stream creation retry budget for that remote peer will be reset to the maximum default. + stream-zero-retry-reset-threshold: 100 + # The maximum number of retry attempts for creating a unicast stream to a remote peer before giving up. If it is set to 3 for example, it means that if a peer fails to create + # retry a unicast stream to a remote peer 3 times, the peer will give up and will not retry creating a unicast stream to that remote peer. + # When it is set to zero it means that the peer will not retry creating a unicast stream to a remote peer if it fails. + max-stream-creation-retry-attempt-times: 3 + # The size of the dial config cache used to keep track of the dial config for each remote peer. The dial config is used to keep track of the dial retry budget for each remote peer. + # Recommended to set it to the maximum number of remote peers in the network. + dial-config-cache-size: 10_000 + # Unicast create stream retry delay is initial delay used in the exponential backoff for create stream retries + create-stream-retry-delay: 1s + message-timeout: 5s + # Enable stream protection for unicast streams; when enabled, all connections that are being established or + # have been already established for unicast streams are protected, meaning that they won't be closed by the connection manager. + # This is useful for preventing the connection manager from closing unicast streams that are being used by the application layer. + # However, it may interfere with the resource manager of libp2p, i.e., the connection manager may not be able to close connections + # that are not being used by the application layer while at the same time the node is running out of resources for new connections. + enable-stream-protection: true + # Resource manager config + libp2p-resource-manager: + # Maximum allowed fraction of file descriptors to be allocated by the libp2p resources in [0,1] + # setting to zero means no allocation of memory by libp2p; and libp2p will run with very low limits + memory-limit-ratio: 0.5 # flow default + # Maximum allowed fraction of memory to be allocated by the libp2p resources in [0,1] + # setting to zero means no allocation of memory by libp2p; and libp2p will run with very low limits + file-descriptors-ratio: 0.2 # libp2p default + # limits override: any non-zero values for libp2p-resource-limit-override will override the default values of the libp2p resource limits. + limits-override: + system: + # maximum number of inbound system-wide streams, across all peers and protocols + # Note that streams are ephemeral and are created and destroyed intermittently. + streams-inbound: 15_000 # override + # maximum number of outbound system-wide streams, across all peers and protocols + # Note that streams are ephemeral and are created and destroyed intermittently. + streams-outbound: 15_000 # override + connections-inbound: 0 # no-override, use default + connections-outbound: 0 # no-override, use default + fd: 0 # no-override, use default + memory-bytes: 0 # no-override, use default + transient: + # maximum number of inbound transient streams, across all streams that are not yet fully opened and associated with a protocol. + # Note that streams are ephemeral and are created and destroyed intermittently. + streams-inbound: 15_000 # override + # maximum number of outbound transient streams, across all streams that are not yet fully opened and associated with a protocol. + # Note that streams are ephemeral and are created and destroyed intermittently. + streams-outbound: 15_000 # override + connections-inbound: 0 # no-override, use default + connections-outbound: 0 # no-override, use default + fd: 0 # no-override, use default + memory-bytes: 0 # no-override, use default + protocol: + # maximum number of inbound streams for each protocol across all peers; this is a per-protocol limit. We expect at least + # three protocols per node; gossipsub, unicast, and dht. Note that streams are ephemeral and are created and destroyed intermittently. + streams-inbound: 5000 # override + # maximum number of outbound streams for each protocol across all peers; this is a per-protocol limit. We expect at least + # three protocols per node; gossipsub, unicast, and dht. Note that streams are ephemeral and are created and destroyed intermittently. + streams-outbound: 5000 # override + connections-inbound: 0 # no-override, use default + connections-outbound: 0 # no-override, use default + fd: 0 # no-override, use default + memory-bytes: 0 # no-override, use default + peer: + # maximum number of inbound streams from each peer across all protocols. + streams-inbound: 1000 # override + # maximum number of outbound streams from each peer across all protocols. + streams-outbound: 1000 # override + connections-inbound: 0 # no-override, use default + connections-outbound: 0 # no-override, use default + fd: 0 # no-override, use default + memory-bytes: 0 # no-override, use default + peer-protocol: + # maximum number of inbound streams from each peer for each protocol. + streams-inbound: 500 # override + # maximum number of outbound streams from each peer for each protocol. + streams-outbound: 500 # override + connections-inbound: 0 # no-override, use default + connections-outbound: 0 # no-override, use default + fd: 0 # no-override, use default + memory-bytes: 0 # no-override, use default + connection-manager: + # HighWatermark and LowWatermark govern the number of connections are maintained by the ConnManager. + # When the peer count exceeds the HighWatermark, as many peers will be pruned (and + # their connections terminated) until LowWatermark peers remain. In other words, whenever the + # peer count is x > HighWatermark, the ConnManager will prune x - LowWatermark peers. + # The pruning algorithm is as follows: + # 1. The ConnManager will not prune any peers that have been connected for less than GracePeriod. + # 2. The ConnManager will not prune any peers that are protected. + # 3. The ConnManager will sort the peers based on their number of streams and direction of connections, and + # prunes the peers with the least number of streams. If there are ties, the peer with the incoming connection + # will be pruned. If both peers have incoming connections, and there are still ties, one of the peers will be + # pruned at random. + # Algorithm implementation is in https://github.com/libp2p/go-libp2p/blob/master/p2p/net/connmgr/connmgr.go#L262-L318 + # We assume number of nodes around 500, and each node is allowed to make at most 8 connections to each certain remote node, + # we hence set the high-watermark to 500 * 8 = 4000, and the low-watermark to 500 * (0.5 * 4) = 1000, this means that when the + # number of peers exceeds 4000, the connection manager will prune the peers with the least number of streams until the number of + # peers is reduced to 1000 assuming an average of 2 connections per peer. + high-watermark: 4000 + low-watermark: 1000 + # The silence period is a regular interval that the connection manager will check for pruning peers if the number of peers exceeds the high-watermark. + # it is a regular interval and 10s is the default libp2p value. + silence-period: 10s + # The time to wait before a new connection is considered for pruning. + grace-period: 1m + # Gossipsub config + gossipsub: + rpc-inspector: + # The size of the queue for notifications about invalid RPC messages + notification-cache-size: 10_000 + validation: # RPC control message validation inspector configs + inspection-queue: + # Rpc validation inspector number of pool workers + workers: 5 + # The size of the queue used by worker pool for the control message validation inspector + queue-size: 100 + publish-messages: + # The maximum number of messages in a single RPC message that are randomly sampled for async inspection. + # When the size of a single RPC message exceeds this threshold, a random sample is taken for inspection, but the RPC message is not truncated. + max-sample-size: 1000 + # The threshold at which an error will be returned if the number of invalid RPC messages exceeds this value + error-threshold: 500 + graft-and-prune: + # The maximum number of GRAFT or PRUNE messages in a single RPC message. + # When the total number of GRAFT or PRUNE messages in a single RPC message exceeds this threshold, + # a random sample of GRAFT or PRUNE messages will be taken and the RPC message will be truncated to this sample size. + message-count-threshold: 1000 + # Maximum number of total duplicate topic ids in a single GRAFT or PRUNE message, ideally this should be 0 but we allow for some tolerance + # to avoid penalizing peers that are not malicious but are misbehaving due to bugs or other issues. + # A topic id is considered duplicate if it appears more than once in a single GRAFT or PRUNE message. + duplicate-topic-id-threshold: 50 + # Maximum number of total invalid topic ids in GRAFTs/PRUNEs of a single RPC, ideally this should be 0 but we allow for some tolerance + # to avoid penalizing peers that are not malicious but are misbehaving due to bugs or other issues. Exceeding this threshold causes RPC inspection failure with an invalid control message notification (penalty). + invalid-topic-id-threshold: 50 + ihave: + # The maximum allowed number of iHave messages in a single RPC message. + # Each iHave message represents the list of message ids. When the total number of iHave messages + # in a single RPC message exceeds this threshold, a random sample of iHave messages will be taken and the RPC message will be truncated to this sample size. + # The sample size is equal to the configured message-count-threshold. + message-count-threshold: 1000 + # The maximum allowed number of message ids in a single iHave message. + # Each iHave message represents the list of message ids for a specific topic, and this parameter controls the maximum number of message ids + # that can be included in a single iHave message. When the total number of message ids in a single iHave message exceeds this threshold, + # a random sample of message ids will be taken and the iHave message will be truncated to this sample size. + # The sample size is equal to the configured message-id-count-threshold. + message-id-count-threshold: 1000 + # The tolerance threshold for having duplicate topics in an iHave message under inspection. + # When the total number of duplicate topic ids in a single iHave message exceeds this threshold, the inspection of message will fail. + # Note that a topic ID is counted as a duplicate only if it is repeated more than once. + duplicate-topic-id-threshold: 50 + # Threshold of tolerance for having duplicate message IDs in a single iHave message under inspection. + # When the total number of duplicate message ids in a single iHave message exceeds this threshold, the inspection of message will fail. + # Ideally, an iHave message should not have any duplicate message IDs, hence a message id is considered duplicate when it is repeated more than once + # within the same iHave message. When the total number of duplicate message ids in a single iHave message exceeds this threshold, the inspection of message will fail. + duplicate-message-id-threshold: 100 + # Maximum number of total invalid topic ids in an IHAVE message on a single RPC, ideally this should be 0 but we allow for some tolerance + # to avoid penalizing peers that are not malicious but are misbehaving due to bugs or other issues. Exceeding this threshold causes RPC inspection failure with an invalid control message notification (penalty). + invalid-topic-id-threshold: 50 + iwant: + # The maximum allowed number of iWant messages in a single RPC message. + # Each iWant message represents the list of message ids. When the total number of iWant messages + # in a single RPC message exceeds this threshold, a random sample of iWant messages will be taken and the RPC message will be truncated to this sample size. + # The sample size is equal to the configured message-count-threshold. + message-count-threshold: 1000 + # The maximum allowed number of message ids in a single iWant message. + # Each iWant message represents the list of message ids for a specific topic, and this parameter controls the maximum number of message ids + # that can be included in a single iWant message. When the total number of message ids in a single iWant message exceeds this threshold, + # a random sample of message ids will be taken and the iWant message will be truncated to this sample size. + # The sample size is equal to the configured message-id-count-threshold. + message-id-count-threshold: 1000 + # The allowed threshold of iWant messages received without a corresponding tracked iHave message that was sent. + # If the cache miss threshold is exceeded an invalid control message notification is disseminated and the sender will be penalized. + cache-miss-threshold: 500 + # The max allowed number of duplicate message ids in a single iwant message. + # Note that ideally there should be no duplicate message ids in a single iwant message but + # we allow for some tolerance to avoid penalizing peers that are not malicious + duplicate-message-id-threshold: 100 + cluster-prefixed-messages: + # Cluster prefixed control message validation configs + # The size of the cache used to track the amount of cluster prefixed topics received by peers + tracker-cache-size: 100 + # The decay val used for the geometric decay of cache counters used to keep track of cluster prefixed topics received by peers + tracker-cache-decay: 0.99 + # The upper bound on the amount of cluster prefixed control messages that will be processed + hard-threshold: 100 + process: + inspection: + # Serves as a fail-safe mechanism to globally deactivate inspection logic. When this fail-safe is activated it disables all + # aspects of the inspection logic, irrespective of individual configurations like inspection.enable-graft, inspection.enable-prune, etc. + # Consequently, all metrics collection and logging related to the rpc and inspection will also be disabled. + # It is important to note that activating this fail-safe results in a comprehensive deactivation inspection features. + # Please use this setting judiciously, considering its broad impact on the behavior of control message handling. + disabled: false + # Enables graft control message inspection. + enable-graft: true + # Enables prune control message inspection. + enable-prune: true + # Enables ihave control message inspection. + enable-ihave: true + # Enables iwant control message inspection. + enable-iwant: true + # Enables publish message inspection. + enable-publish: true + # When set to true RPC's will be rejected from unstaked peers + reject-unstaked-peers: true + truncation: + # Serves as a fail-safe mechanism to globally deactivate truncation logic. When this fail-safe is activated it disables all + # aspects of the truncation logic, irrespective of individual configurations like truncation.enable-graft, truncation.enable-prune, etc. + # Consequently, all metrics collection and logging related to the rpc and inspection will also be disabled. + # It is important to note that activating this fail-safe results in a comprehensive deactivation truncation features. + # Please use this setting judiciously, considering its broad impact on the behavior of control message handling. + disabled: false + # Enables graft control message truncation. + enable-graft: true + # Enables prune control message truncation. + enable-prune: true + # Enables ihave control message truncation. + enable-ihave: true + # Enables ihave message id truncation. + enable-ihave-message-id: true + # Enables iwant control message truncation. + enable-iwant: true + # Enables iwant message id truncation. + enable-iwant-message-id: true + rpc-tracer: + # The default interval at which the mesh tracer logs the mesh topology. This is used for debugging and forensics purposes. + # Note that we purposefully choose this logging interval high enough to avoid spamming the logs. Moreover, the + # mesh updates will be logged individually and separately. The logging interval is only used to log the mesh + # topology as a whole specially when there are no updates to the mesh topology for a long time. + local-mesh-logging-interval: 1m + # The default interval at which the gossipsub score tracer logs the peer scores. This is used for debugging and forensics purposes. + # Note that we purposefully choose this logging interval high enough to avoid spamming the logs. + score-tracer-interval: 1m + # The default RPC sent tracker cache size. The RPC sent tracker is used to track RPC control messages sent from the local node. + # Note: this cache size must be large enough to keep a history of sent messages in a reasonable time window of past history. + rpc-sent-tracker-cache-size: 1_000_000 + # Cache size of the rpc sent tracker queue used for async tracking. + rpc-sent-tracker-queue-cache-size: 100_000 + # Number of workers for rpc sent tracker worker pool. + rpc-sent-tracker-workers: 5 + # Cache size of the gossipsub duplicate message tracker. + duplicate-message-tracker: + cache-size: 10_000 + decay: .5 + # The threshold for which when the counter is below this value, the decay function will not be called. + # instead, the counter will be set to 0. This is to prevent the counter from becoming a large number over time. + skip-decay-threshold: 0.1 + # Peer scoring is the default value for enabling peer scoring + peer-scoring-enabled: true + scoring-parameters: + peer-scoring: + internal: + # The weight for app-specific scores. + # It is used to scale the app-specific scores to the same range as the other scores. + # At the current version, we don't distinguish between the app-specific scores + # and the other scores, so we set it to 1. + app-specific-score-weight: 1 + # The default decay interval for the overall score of a peer at the GossipSub scoring + # system. We set it to 1 minute so that it is not too short so that a malicious node can recover from a penalty + # and is not too long so that a well-behaved node can't recover from a penalty. + decay-interval: 1m + # The default decay to zero for the overall score of a peer at the GossipSub scoring system. + # It defines the maximum value below which a peer scoring counter is reset to zero. + # This is to prevent the counter from decaying to a very small value. + # The default value is 0.01, which means that a counter will be reset to zero if it decays to 0.01. + # When a counter hits the DecayToZero threshold, it means that the peer did not exhibit the behavior + # for a long time, and we can reset the counter. + decay-to-zero: 0.01 + topic: + # This is the default value for the skip atomic validation flag for topics. + # We set it to true, which means gossipsub parameter validation will not fail if we leave some of the + # topic parameters at their default values, i.e., zero. This is because we are not setting all + # topic parameters at the current implementation. + skip-atomic-validation: true + # This value is applied to the square of the number of invalid message deliveries on a topic. + # It is used to penalize peers that send invalid messages. By an invalid message, we mean a message that is not signed by the + # publisher, or a message that is not signed by the peer that sent it. + # An invalid message also can be a self-origin message, i.e., the peer sees its own message bounced back to it. + # GossipSub has an edge-case that a peer may inadvertently request a self-origin message from a peer that it is connected to, through iHave-iWant messages, which is a + # false-positive edge-case. + # We set it to -10e-4, which means that with around 1414 invalid + # message deliveries within a gossipsub heartbeat interval, the peer will be disconnected. + # Note that we intentionally set this threshold high to avoid false-positively penalizing nodes due to self-origin message requests by iHave-iWants (a known issue in gossipsub). + # The supporting math is as follows: + # - each staked (i.e., authorized) peer is rewarded by the fixed reward of 100 (i.e., DefaultStakedIdentityReward). + # - x invalid message deliveries will result in a penalty of x^2 * DefaultTopicInvalidMessageDeliveriesWeight, i.e., -x^2 * 10-e4. + # - the peer will be disconnected when its penalty reaches -100 (i.e., MaxAppSpecificPenalty). + # - so, the maximum number of invalid message deliveries that a peer can have before being disconnected is sqrt(200/10-e4) ~ 1414. + invalid-message-deliveries-weight: -10e-4 + # The decay factor used to decay the number of invalid message deliveries. + # The total number of invalid message deliveries is multiplied by this factor at each heartbeat interval to + # decay the number of invalid message deliveries, and prevent the peer from being disconnected if it stops + # sending invalid messages. We set it to 0.5, which means that the number of invalid message deliveries will + # decay by 50% at each heartbeat interval. + # The decay heartbeats are defined by the heartbeat interval of the gossipsub scoring system, which is 1 Minute (defaultDecayInterval). + # Note that we set the decay factor low so that the invalid message deliveries will be decayed fast enough to prevent the peer from being disconnected on mediocre loads. + # This is to address the false-positive disconnections that we observed in the network due to the self-origin message requests by iHave-iWants (a known issue in gossipsub). + invalid-message-deliveries-decay: 0.5 + # The default time in mesh quantum for the GossipSub scoring system. It is used to gauge + # a discrete time interval for the time in mesh counter. We set it to 1 hour, which means that every one complete hour a peer is + # in a topic mesh, the time in mesh counter will be incremented by 1 and is counted towards the availability score of the peer in that topic mesh. + # The reason for setting it to 1 hour is that we want to reward peers that are in a topic mesh for a long time, and we want to avoid rewarding peers that + # are churners, i.e., peers that join and leave a topic mesh frequently. + time-in-mesh-quantum: 1h + # The default weight of a topic in the GossipSub scoring system. + # The overall score of a peer in a topic mesh is multiplied by the weight of the topic when calculating the overall score of the peer. + # We set it to 1.0, which means that the overall score of a peer in a topic mesh is not affected by the weight of the topic. + topic-weight: 1.0 + # This is applied to the number of actual message deliveries in a topic mesh + # at each decay interval (i.e., defaultDecayInterval). + # It is used to decay the number of actual message deliveries, and prevents past message + # deliveries from affecting the current score of the peer. + # As the decay interval is 1 minute, we set it to 0.5, which means that the number of actual message + # deliveries will decay by 50% at each decay interval. + mesh-message-deliveries-decay: 0.5 + # The maximum number of actual message deliveries in a topic + # mesh that is used to calculate the score of a peer in that topic mesh. + # We set it to 1000, which means that the maximum number of actual message deliveries in a + # topic mesh that is used to calculate the score of a peer in that topic mesh is 1000. + # This is to prevent the score of a peer in a topic mesh from being affected by a large number of actual + # message deliveries and also affect the score of the peer in other topic meshes. + # When the total delivered messages in a topic mesh exceeds this value, the score of the peer in that topic + # mesh will not be affected by the actual message deliveries in that topic mesh. + # Moreover, this does not allow the peer to accumulate a large number of actual message deliveries in a topic mesh + # and then start under-performing in that topic mesh without being penalized. + mesh-message-deliveries-cap: 1000 + # The threshold for the number of actual message deliveries in a + # topic mesh that is used to calculate the score of a peer in that topic mesh. + # If the number of actual message deliveries in a topic mesh is less than this value, + # the peer will be penalized by square of the difference between the actual message deliveries and the threshold, + # i.e., -w * (actual - threshold)^2 where `actual` and `threshold` are the actual message deliveries and the + # threshold, respectively, and `w` is the weight (i.e., defaultTopicMeshMessageDeliveriesWeight). + # We set it to 0.1 * defaultTopicMeshMessageDeliveriesCap, which means that if a peer delivers less tha 10% of the + # maximum number of actual message deliveries in a topic mesh, it will be considered as an under-performing peer + # in that topic mesh. + mesh-message-deliveries-threshold: 100 + # The weight for applying penalty when a peer is under-performing in a topic mesh. + # Upon every decay interval, if the number of actual message deliveries is less than the topic mesh message deliveries threshold + # (i.e., defaultTopicMeshMessageDeliveriesThreshold), the peer will be penalized by square of the difference between the actual + # message deliveries and the threshold, multiplied by this weight, i.e., -w * (actual - threshold)^2 where w is the weight, and + # `actual` and `threshold` are the actual message deliveries and the threshold, respectively. + # We set this value to be - 0.05 MaxAppSpecificReward / (defaultTopicMeshMessageDeliveriesThreshold^2). This guarantees that even if a peer + # is not delivering any message in a topic mesh, it will not be disconnected. + # Rather, looses part of the MaxAppSpecificReward that is awarded by our app-specific scoring function to all staked + # nodes by default will be withdrawn, and the peer will be slightly penalized. In other words, under-performing in a topic mesh + # will drop the overall score of a peer by 5% of the MaxAppSpecificReward that is awarded by our app-specific scoring function. + # It means that under-performing in a topic mesh will not cause a peer to be disconnected, but it will cause the peer to lose + # its MaxAppSpecificReward that is awarded by our app-specific scoring function. + # At this point, we do not want to disconnect a peer only because it is under-performing in a topic mesh as it might be + # causing a false positive network partition. + mesh-deliveries-weight: -0.0005 + # The window size is time interval that we count a delivery of an already + # seen message towards the score of a peer in a topic mesh. The delivery is counted + # by GossipSub only if the previous sender of the message is different from the current sender. + # We set it to the decay interval of the GossipSub scoring system, which is 1 minute. + # It means that if a peer delivers a message that it has already seen less than one minute ago, + # the delivery will be counted towards the score of the peer in a topic mesh only if the previous sender of the message. + # This also prevents replay attacks of messages that are older than one minute. As replayed messages will not + # be counted towards the actual message deliveries of a peer in a topic mesh. + mesh-message-deliveries-window: 1m + # The time interval that we wait for a new peer that joins a topic mesh + # till start counting the number of actual message deliveries of that peer in that topic mesh. + # We set it to 2 * defaultDecayInterval, which means that we wait for 2 decay intervals before start counting + # the number of actual message deliveries of a peer in a topic mesh. + # With a default decay interval of 1 minute, it means that we wait for 2 minutes before start counting the + # number of actual message deliveries of a peer in a topic mesh. This is to account for + # the time that it takes for a peer to start up and receive messages from other peers in the topic mesh. + mesh-message-delivery-activation: 2m + thresholds: + # This is the threshold when a peer's penalty drops below this threshold, no gossip + # is emitted towards that peer and gossip from that peer is ignored. + # Validation Constraint: GossipThreshold >= PublishThreshold && GossipThreshold < 0 + # How we use it: As the current max penalty is -100, we set the threshold to -99 + # so that all gossips to and from peers with penalty -100 are ignored. + gossip: -99 + # This is the threshold when a peer's penalty drops below this threshold, + # self-published messages are not propagated towards this peer. + # Validation Constraint: + # PublishThreshold >= GraylistThreshold && PublishThreshold <= GossipThreshold && PublishThreshold < 0. + # How we use it: As the current max penalty is -100, we set the threshold to -99 + # so that all penalized peers are deprived of receiving any published messages. + publish: -99 + # This is the threshold when a peer's penalty drops below this threshold, + # the peer is graylisted, i.e., incoming RPCs from the peer are ignored. + # Validation Constraint: + # GraylistThreshold =< PublishThreshold && GraylistThreshold =< GossipThreshold && GraylistThreshold < 0 + # How we use it: As the current max penalty is -100, we set the threshold to -99 + # so that all penalized peers are graylisted. + graylist: -99 + # This is the threshold when a peer sends us PX information with a prune, + # we only accept it and connect to the supplied peers if the originating peer's + # penalty exceeds this threshold. + # Validation Constraint: must be non-negative. + # How we use it: As the current max reward is 100, we set the threshold to 99 + # so that we only receive supplied peers from well-behaved peers. + accept-px: 99 + # This is the threshold when the median peer penalty in the mesh drops + # below this value, the peer may select more peers with penalty above the median + # to opportunistically graft on the mesh. + # Validation Constraint: must be non-negative. + # How we use it: We set it to the -100 + 1 so that we only + # opportunistically graft peers that are not access nodes (i.e., with -1), + # or penalized peers (i.e., with -100). + opportunistic-graft: 101 + behaviour: + # The threshold when the behavior of a peer is considered as bad by GossipSub. + # Currently, the misbehavior is defined as advertising an iHave without responding to the iWants (iHave broken promises), as well as attempting + # on GRAFT when the peer is considered for a PRUNE backoff, i.e., the local peer does not allow the peer to join the local topic mesh + # for a while, and the remote peer keep attempting on GRAFT (aka GRAFT flood). + # When the misbehavior counter of a peer goes beyond this threshold, the peer is penalized by defaultBehaviorPenaltyWeight (see below) for the excess misbehavior. + # + # An iHave broken promise means that a peer advertises an iHave for a message, but does not respond to the iWant requests for that message. + # For iHave broken promises, the gossipsub scoring works as follows: + # It samples ONLY A SINGLE iHave out of the entire RPC. + # If that iHave is not followed by an actual message within the next 3 seconds, the peer misbehavior counter is incremented by 1. + # + # We set it to 10, meaning that we at most tolerate 10 of such RPCs containing iHave broken promises. After that, the peer is penalized for every + # excess RPC containing iHave broken promises. + # The counter is also decayed by (0.99) every decay interval (defaultDecayInterval) i.e., every minute. + # Note that misbehaviors are counted by GossipSub across all topics (and is different from the Application Layer Misbehaviors that we count through + # the ALSP system). + penalty-threshold: 1000 + # The weight for applying penalty when a peer misbehavior goes beyond the threshold. + # Misbehavior of a peer at gossipsub layer is defined as advertising an iHave without responding to the iWants (broken promises), as well as attempting + # on GRAFT when the peer is considered for a PRUNE backoff, i.e., the local peer does not allow the peer to join the local topic mesh + # This is detected by the GossipSub scoring system, and the peer is penalized by defaultBehaviorPenaltyWeight. + # + # An iHave broken promise means that a peer advertises an iHave for a message, but does not respond to the iWant requests for that message. + # For iHave broken promises, the gossipsub scoring works as follows: + # It samples ONLY A SINGLE iHave out of the entire RPC. + # If that iHave is not followed by an actual message within the next 3 seconds, the peer misbehavior counter is incremented by 1. + # + # The penalty is applied to the square of the difference between the misbehavior counter and the threshold, i.e., -|w| * (misbehavior counter - threshold)^2. + # We set it to 0.01 * MaxAppSpecificPenalty, which means that misbehaving 10 times more than the threshold (i.e., 10 + 10) will cause the peer to lose + # its entire AppSpecificReward that is awarded by our app-specific scoring function to all staked (i.e., authorized) nodes by default. + # Moreover, as the MaxAppSpecificPenalty is -MaxAppSpecificReward, misbehaving sqrt(2) * 10 times more than the threshold will cause the peer score + # to be dropped below the MaxAppSpecificPenalty, which is also below the GraylistThreshold, and the peer will be graylisted (i.e., disconnected). + # + # The math is as follows: -|w| * (misbehavior - threshold)^2 = 0.01 * MaxAppSpecificPenalty * (misbehavior - threshold)^2 < 2 * MaxAppSpecificPenalty + # if misbehavior > threshold + sqrt(2) * 10. + # As shown above, with this choice of defaultBehaviorPenaltyWeight, misbehaving sqrt(2) * 10 times more than the threshold will cause the peer score + # to be dropped below the MaxAppSpecificPenalty, which is also below the GraylistThreshold, and the peer will be graylisted (i.e., disconnected). This weight + # is chosen in a way that with almost a few misbehaviors more than the threshold, the peer will be graylisted. The rationale relies on the fact that + # the misbehavior counter is incremented by 1 for each RPC containing one or more broken promises. Hence, it is per RPC, and not per broken promise. + # Having sqrt(2) * 10 broken promises RPC is a blatant misbehavior, and the peer should be graylisted. With decay interval of 1 minute, and decay value of + # 0.99 we expect a graylisted node due to borken promises to get back in about 527 minutes, i.e., (0.99)^x * (sqrt(2) * 10)^2 * MaxAppSpecificPenalty > GraylistThreshold + # where x is the number of decay intervals that the peer is graylisted. As MaxAppSpecificPenalty and GraylistThresholds are close, we can simplify the inequality + # to (0.99)^x * (sqrt(2) * 10)^2 > 1 --> (0.99)^x * 200 > 1 --> (0.99)^x > 1/200 --> x > log(1/200) / log(0.99) --> x > 527.17 decay intervals, i.e., 527 minutes. + # Note that misbehaviors are counted by GossipSub across all topics (and is different from the Application Layer Misbehaviors that we count through + # the ALSP system that are reported by the engines). + penalty-weight: -0.01 + # The decay interval for the misbehavior counter of a peer. The misbehavior counter is + # incremented by GossipSub for iHave broken promises or the GRAFT flooding attacks (i.e., each GRAFT received from a remote peer while that peer is on a PRUNE backoff). + # + # An iHave broken promise means that a peer advertises an iHave for a message, but does not respond to the iWant requests for that message. + # For iHave broken promises, the gossipsub scoring works as follows: + # It samples ONLY A SINGLE iHave out of the entire RPC. + # If that iHave is not followed by an actual message within the next 3 seconds, the peer misbehavior counter is incremented by 1. + # This means that regardless of how many iHave broken promises an RPC contains, the misbehavior counter is incremented by 1. + # That is why we decay the misbehavior counter very slow, as this counter indicates a severe misbehavior. + # + # The misbehavior counter is decayed per decay interval (i.e., defaultDecayInterval = 1 minute) by GossipSub. + # We set it to 0.99, which means that the misbehavior counter is decayed by 1% per decay interval. + # With the generous threshold that we set (i.e., defaultBehaviourPenaltyThreshold = 10), we take the peers going beyond the threshold as persistent misbehaviors, + # We expect honest peers never to go beyond the threshold, and if they do, we expect them to go back below the threshold quickly. + # + # Note that misbehaviors are counted by GossipSub across all topics (and is different from the Application Layer Misbehaviors that we count through + # the ALSP system that is based on the engines report). + penalty-decay: 0.5 + protocol: + # The max number of debug/trace log events per second. + # Logs emitted above this threshold are dropped. + max-debug-logs: 50 + application-specific: + # This is the maximum penalty for severe offenses that we apply + # to a remote node score. The score mechanism of GossipSub in Flow is designed + # in a way that all other infractions are penalized with a fraction of this value. + # We have also set the other parameters such as GraylistThreshold, + # GossipThreshold, and PublishThreshold to be a bit higher than this, + # i.e., -100 + 1. This ensures that a node with a score of + # -100 will be graylisted (i.e., all incoming and outgoing RPCs + # are rejected) and will not be able to publish or gossip any messages. + max-app-specific-penalty: -100 + min-app-specific-penalty: -1 + # This is the penalty for unknown identity. It is + # applied to the peer's score when the peer is not in the identity list. + unknown-identity-penalty: -100 + # This is the penalty for invalid subscription. + # It is applied to the peer's score when the peer subscribes to a topic that it is + # not authorized to subscribe to. + invalid-subscription-penalty: -100 + # The penalty for duplicate messages detected by the gossipsub tracer for a peer. + # The penalty is multiplied by the current duplicate message count for a peer before it is applied to the application specific score. + duplicate-message-penalty: -10e-4 + # The threshold at which the duplicate message count for a peer will result in the peer being penalized + duplicate-message-threshold: 10e+4 + # This is the reward for well-behaving staked peers. + # If a peer does not have any misbehavior record, e.g., invalid subscription, + # invalid message, etc., it will be rewarded with this score. + max-app-specific-reward: 100 + # This is the reward for staking peers. It is applied + # to the peer's score when the peer does not have any misbehavior record, e.g., + # invalid subscription, invalid message, etc. The purpose is to reward the staking + # peers for their contribution to the network and prioritize them in neighbor selection. + staked-identity-reward: 100 + scoring-registry: + # Defines the duration of time, after the node startup, + # during which the scoring registry remains inactive before penalizing nodes. + # Throughout this startup silence period, the application-specific penalty + # returned for all nodes will be 0, and any invalid control message notifications + # will be ignored. This configuration allows nodes to stabilize and initialize before + # applying penalties or processing invalid control message notifications. + startup-silence-duration: 1h + app-specific-score: + # number of workers that asynchronously update the app specific score requests when they are expired. + score-update-worker-num: 5 + # size of the queue used by the worker pool for the app specific score update requests. The queue is used to buffer the app specific score update requests + # before they are processed by the worker pool. The queue size must be larger than 10x total number of peers in the network. + # The queue is deduplicated based on the peer ids ensuring that there is only one app specific score update request per peer in the queue. + score-update-request-queue-size: 10_000 + # score ttl is the time to live for the app specific score. Once the score is expired; a new request will be sent to the app specific score provider to update the score. + # until the score is updated, the previous score will be used. + score-ttl: 1m + # size of the queue used by the score registry to buffer the invalid control message notifications before they are processed by the worker pool. The queue size must be larger than 10x total number of peers in the network. + invalid-control-message-notification-queue-size: 10_000 + spam-record-cache: + # size of cache used to track spam records at gossipsub. Each peer id is mapped to a spam record that keeps track of the spam score for that peer. + # cache should be big enough to keep track of the entire network's size. Otherwise, the local node's view of the network will be incomplete due to cache eviction. + cache-size: 10_000 + decay: + # Threshold level for spam record penalty. + # At each evaluation period, when a node's penalty is below this value, the decay rate slows down, ensuring longer decay periods for malicious nodes and quicker decay for honest ones. + penalty-decay-slowdown-threshold: -99 + # This setting adjusts the decay rate when a node's penalty falls below the threshold. + # The decay rate, ranging between 0 and 1, dictates how quickly penalties decrease: a higher rate results in slower decay. + # The decay calculation is multiplicative (newPenalty = decayRate * oldPenalty). + # The reduction factor increases the decay rate, thus decelerating the penalty reduction. For instance, with a 0.01 reduction factor, + # the decay rate increases by 0.01 at each evaluation interval when the penalty is below the threshold. + # Consequently, a decay rate of `x` diminishes the penalty to zero more rapidly than a rate of `x+0.01`. + penalty-decay-rate-reduction-factor: 0.01 + # Defines the frequency for evaluating and potentially adjusting the decay process of a spam record. + # At each interval, the system assesses the current penalty of a node. + # If this penalty is below the defined threshold, the decay rate is modified according to the reduction factor, slowing down the penalty reduction process. + # This reassessment at regular intervals ensures that the decay rate is dynamically adjusted to reflect the node's ongoing behavior, + # maintaining a balance between penalizing malicious activity and allowing recovery for honest nodes. + penalty-decay-evaluation-period: 10m + # The minimum speed at which the spam penalty value of a peer is decayed. + # Spam record will be initialized with a decay value between .5 , .7 and this value will then be decayed up to .99 on consecutive misbehavior's, + # The maximum decay value decays the penalty by 1% every second. The decay is applied geometrically, i.e., `newPenalty = oldPenalty * decay`, hence, the higher decay value + # indicates a lower decay speed, i.e., it takes more heartbeat intervals to decay a penalty back to zero when the decay value is high. + # assume: + # penalty = -100 (the maximum application specific penalty is -100) + # skipDecayThreshold = -0.1 + # it takes around 459 seconds for the penalty to decay to reach greater than -0.1 and turn into 0. + # x * 0.99 ^ n > -0.1 (assuming negative x). + # 0.99 ^ n > -0.1 / x + # Now we can take the logarithm of both sides (with any base, but let's use base 10 for simplicity). + # log( 0.99 ^ n ) < log( 0.1 / x ) + # Using the properties of logarithms, we can bring down the exponent: + # n * log( 0.99 ) < log( -0.1 / x ) + # And finally, we can solve for n: + # n > log( -0.1 / x ) / log( 0.99 ) + # We can plug in x = -100: + # n > log( -0.1 / -100 ) / log( 0.99 ) + # n > log( 0.001 ) / log( 0.99 ) + # n > -3 / log( 0.99 ) + # n > 458.22 + minimum-spam-penalty-decay-factor: 0.99 + # The maximum rate at which the spam penalty value of a peer decays. Decay speeds increase + # during sustained malicious activity, leading to a slower recovery of the app-specific score for the penalized node. Conversely, + # decay speeds decrease, allowing faster recoveries, when nodes exhibit fleeting misbehavior. + maximum-spam-penalty-decay-factor: 0.8 + # The threshold for which when the negative penalty is above this value, the decay function will not be called. + # instead, the penalty will be set to 0. This is to prevent the penalty from keeping a small negative value for a long time. + skip-decay-threshold: -0.1 + misbehaviour-penalties: + # The penalty applied to the application specific penalty when a peer conducts a graft misbehaviour. + graft: -10 + # The penalty applied to the application specific penalty when a peer conducts a prune misbehaviour. + prune: -10 + # The penalty applied to the application specific penalty when a peer conducts a iHave misbehaviour. + ihave: -10 + # The penalty applied to the application specific penalty when a peer conducts a iWant misbehaviour. + iwant: -10 + # The penalty applied to the application specific penalty when a peer conducts a rpc publish message misbehaviour. + publish: -10 + # The factor used to reduce the penalty for control message misbehaviours on cluster prefixed topics. This allows a more lenient punishment for nodes + # that fall behind and may need to request old data. + cluster-prefixed-reduction-factor: 0.2 + subscription-provider: + # The interval for updating the list of subscribed peers to all topics in gossipsub. This is used to keep track of subscriptions + # violations and penalize peers accordingly. Recommended value is in the order of a few minutes to avoid contentions; as the operation + # reads all topics and all peers subscribed to each topic. + update-interval: 10m + # The size of cache for keeping the list of all peers subscribed to each topic (same as the local node). This cache is the local node's + # view of the network and is used to detect subscription violations and penalize peers accordingly. Recommended to be big enough to + # keep the entire network's size. Otherwise, the local node's view of the network will be incomplete due to cache eviction. + # Recommended size is 10x the number of peers in the network. + cache-size: 10000 + # Enables or disables the libp2p peer gater. + peer-gater-enabled: false + # The per IP decay for all counters tracked by the peer gater for a peer. + peer-gater-source-decay: 10m + # The priority topic delivery weights. + peer-gater-topic-delivery-weights-override: | + consensus-committee: 1.5, sync-committee: .75 + + # Application layer spam prevention + alsp-spam-record-cache-size: 1000 + alsp-spam-report-queue-size: 10_000 + alsp-disable-penalty: false + alsp-heart-beat-interval: 1s + # Base probability in [0,1] that's used in creating the final probability of creating a + # misbehavior report for a BatchRequest message. This is why the word "base" is used in the name of this field, + # since it's not the final probability and there are other factors that determine the final probability. + # The reason for this is that we want to increase the probability of creating a misbehavior report for a large batch. + # Create misbehavior report for about 0.2% of BatchRequest messages for normal batch requests (i.e. not too large) + # The final batch request probability is calculated as follows: + # batchRequestBaseProb * (len(batchRequest.BlockIDs) + 1) / synccore.DefaultConfig().MaxSize + # Example 1 (small batch of block IDs) if the batch request is for 10 blocks IDs and batchRequestBaseProb is 0.01, then the probability of + # creating a misbehavior report is: + # batchRequestBaseProb * (10+1) / synccore.DefaultConfig().MaxSize + # = 0.01 * 11 / 64 = 0.00171875 = 0.171875% + # Example 2 (large batch of block IDs) if the batch request is for 1000 block IDs and batchRequestBaseProb is 0.01, then the probability of + # creating a misbehavior report is: + # batchRequestBaseProb * (1000+1) / synccore.DefaultConfig().MaxSize + # = 0.01 * 1001 / 64 = 0.15640625 = 15.640625% + alsp-sync-engine-batch-request-base-prob: 0.01 + # Base probability in [0,1] that's used in creating the final probability of creating a + # misbehavior report for a RangeRequest message. This is why the word "base" is used in the name of this field, + # since it's not the final probability and there are other factors that determine the final probability. + # The reason for this is that we want to increase the probability of creating a misbehavior report for a large range. + # Create misbehavior report for about 0.2% of RangeRequest messages for normal range requests (i.e. not too large) + # and about 15% of RangeRequest messages for very large range requests. + # The final probability is calculated as follows: + # rangeRequestBaseProb * ((rangeRequest.ToHeight-rangeRequest.FromHeight) + 1) / synccore.DefaultConfig().MaxSize + # Example 1 (small range) if the range request is for 10 blocks and rangeRequestBaseProb is 0.01, then the probability of + # creating a misbehavior report is: + # rangeRequestBaseProb * (10+1) / synccore.DefaultConfig().MaxSize + # = 0.01 * 11 / 64 = 0.00171875 = 0.171875% + # Example 2 (large range) if the range request is for 1000 blocks and rangeRequestBaseProb is 0.01, then the probability of + # creating a misbehavior report is: + # rangeRequestBaseProb * (1000+1) / synccore.DefaultConfig().MaxSize + # = 0.01 * 1001 / 64 = 0.15640625 = 15.640625% + alsp-sync-engine-range-request-base-prob: 0.01 + # Probability in [0,1] of creating a misbehavior report for a SyncRequest message. + # create misbehavior report for 1% of SyncRequest messages + alsp-sync-engine-sync-request-prob: 0.01 diff --git a/config/docs/resourceManager.MD b/config/docs/resourceManager.MD new file mode 100644 index 00000000000..620e6ba3547 --- /dev/null +++ b/config/docs/resourceManager.MD @@ -0,0 +1,229 @@ +# libp2p Resource Manager Configuration in Flow Go +## Table of Contents +1. [Overview](#overview) +2. [What are These Limits?](#what-are-these-limits) +3. [How to Set Limits](#how-to-set-limits) + 1. [In Configuration File (`default-config.yaml`)](#in-configuration-file-default-configyaml) + 2. [Via Runtime Flags](#via-runtime-flags) +4. [Importance of Each Resource Scope](#importance-of-each-resource-scope) + 1. [What does each scope mean?](#what-does-each-scope-mean) + 2. [Scope Hierarchy](#scope-hierarchy) + 3. [On Transient Scope](#on-transient-scope) +5. [Case Study: what does scopes mean in terms of one resource?](#case-study-what-does-scopes-mean-in-terms-of-one-resource) + 1. [System Scope](#system-scope) + 2. [Transient Scope](#transient-scope) + 3. [Protocol Scope](#protocol-scope) + 4. [Peer Scope](#peer-scope) + 5. [Peer-Protocol Scope](#peer-protocol-scope) +6. [Troubleshooting (For Flow Node Operators)](#troubleshooting-for-flow-node-operators) + 1. [Observation](#observation) + 2. [Excessive Streams Across All Protocols and Peers](#excessive-streams-across-all-protocols-and-peers) + 3. [Excessive Streams in a Specific Protocol](#excessive-streams-in-a-specific-protocol) + 4. [Excessive Streams from Individual Peers](#excessive-streams-from-individual-peers) + 5. [Excessive Streams from a Specific Peer on a Specific Protocol](#excessive-streams-from-a-specific-peer-on-a-specific-protocol) +7. [Wildcard: Increasing all limit overrides at scale](#wildcard-increasing-all-limit-overrides-at-scale) +8. [References](#references) + +## Overview +In Flow Go, the libp2p Resource Manager plays a crucial role in managing network resources effectively. This document provides guidance on setting various limits through configuration files and runtime flags, helping you optimize resource usage based on specific network conditions or protocol behaviors. + +### What are These Limits? +The libp2p Resource Manager in Flow Go allows setting limits on different types of network resources like streams, connections, file descriptors, and memory. These limits are categorized under different scopes: `system`, `transient`, `protocol`, `peer`, and `peer-protocol`. Each scope serves a distinct purpose in resource management. + +### How to Set Limits + +#### In Configuration File (`default-config.yaml`) +You can define these limits in the `default-config.yaml` file under the `libp2p-resource-manager` section. Each limit can be set for different scopes as shown: + +```yaml +libp2p-resource-manager: + memory-limit-ratio: <value> + file-descriptors-ratio: <value> + limits-override: + <scope>: + streams-inbound: <value> + streams-outbound: <value> + ... +``` + +#### Via Runtime Flags +Each limit can also be dynamically set using runtime flags in the format: +`--libp2p-resource-manager-limits-override-<scope>-<limit>` + +For example: +- To set inbound stream limits for the system scope: `--libp2p-resource-manager-limits-override-system-streams-inbound=<value>` +- For outbound streams in the protocol scope: `--libp2p-resource-manager-limits-override-protocol-streams-outbound=<value>` + +**Exceptions:** The `memory-limit-ratio` and `file-descriptors-ratio` limits are set as the following flags and both must be **between 0 and 1**: +- `--libp2p-resource-manager-memory-limit-ratio=<value>` +- `--libp2p-resource-manager-file-descriptors-ratio=<value>` +- For example: `--libp2p-resource-manager-memory-limit-ratio=0.5` means that the memory limit for libp2p resources is set to 50% of the available memory, i.e., + the libp2p can take up to 50% of the available memory on the system. + + +### Importance of Each Resource Scope +In the libp2p Resource Manager, scopes are organized hierarchically; `system`, `protocol`, `peer`, and `peer-protocol` scopes are arranged in a _descending order of priority_. +This means that the `system` scope has the highest priority, then `protocol` scope, `peer` scope, and `peer-protocol` scope. +As we explain later in this documentation, the `transient` scope is a special case and does not strictly fit in the hierarchy of scopes. + +#### What does each scope mean? + - **System Scope**: sets the global limits for the entire system and ensures overall stability and prevents resource hogging by any single component. + - **Transient Scope**: manages resources for partially established connections or streams and prevents resource drainage during the establishment phase. + Transient resources are those that are not yet fully established, like a connection that's not yet fully established or a stream that's not yet fully opened. The transient scope is critical + for guarding against resource drainage during the establishment phase. + - **Protocol Scope**: sets limits for specific protocols (e.g., DHT, gossipsub) and prevents any single protocol from dominating resource usage. The protocol scope is essential for + protocol-specific resource tuning and preventing abuse by any single protocol. + - **Peer Scope**: manages resources used by individual (remote) peers on the local peer and prevents a single (remote) peer from exhausting resources of the local peer. The peer scope is critical for preventing abuse by any single (remote) peer. + - **Peer-Protocol Scope**: sets limits for specific (remote) peers on specific protocols at the local peer and prevents any single (remote) peer from dominating resource usage on a specific protocol at the local peer. It also prevents a single protocol + to dominate resource usage of a specific (remote) peer on the local peer among all the protocols the (remote) peer is participating in with the local peer. + +#### Scope Hierarchy +The higher order scopes **"override"** limits on lower scopes: +1. **System Scope vs. Protocol/Peer Scopes**: + - The system scope sets global limits. If the system scope has a lower limit than a protocol or peer scope, the system limit will be the effective constraint + because it's the upper bound for the entire system. + - For example, if the system scope has an inbound stream limit of 10,000 and a protocol scope has a limit of 15,000, + the effective limit will be 10,000 because the system scope's limit applies globally. + +2. **Protocol Scope vs. Peer Scope**: + - The protocol scope sets limits for specific protocols, while the peer scope sets limits for individual peers. These are independent of each other but both are under the overarching system scope. + - A peer can't exceed the limits set by the protocol scope, and vice versa. They operate within their own contexts but under the maximum limits imposed by the system scope. + +It's essential to understand that the lowest limit in the hierarchy of applicable scopes will effectively be the operational limit. +If the system inbound stream limit is lower than the protocol inbound stream limit, the system limit will effectively cap the maximum number of inbound streams, regardless of the higher limit set at the protocol level. +Also, the higher scopes limits must be configured in a way that they don't override the limits of lower scopes; rather, they add another layer of constraint. +Each scope must independently satisfy its own limits without violating the limits of the scopes above it. +When configuring limits, it's crucial to consider the hierarchical nature of these scopes. +Ensure that the limits at lower scopes (like protocol or peer) are set within the bounds of higher scopes (like system) to maintain a coherent and effective resource management strategy. + +#### On Transient Scope +The `transient` scope in the libp2p Resource Manager hierarchy has a specific and unique role. +It is placed **alongside** other scopes like `system`, `protocol`, `peer`, and `peer-protocol`, but it serves a distinct purpose. Here's how the `transient` scope fits into the hierarchy: +The `transient` scope is designed to manage resources for connections and streams that are in the process of being established but haven't yet been fully negotiated or associated with a specific peer or protocol. +This includes streams that are awaiting protocol negotiation or connections that are in the initial stages of establishment. + +In terms of hierarchy, the `transient` scope is below `system`, but is not strictly above or below other scopes like `protocol`. +Instead, it operates more as a parallel scope that specifically handles resources in a temporary, intermediate state. +While the `system` scope sets the global limits, the `transient` scope sets limits on resources that are not yet fully categorized into other specific scopes (like `peer` or `protocol`). +The limits set in the `transient` scope are independent of those in the `protocol`, `peer`, and `peer-protocol` scopes but still operate under the overarching constraints of the `system` scope. +Once a connection or stream transitions out of the `transient` state (i.e., when a protocol is negotiated, or a peer is identified), it then falls under the appropriate scope (such as `protocol` or `peer`) and adheres to the limits set within those scopes. +The `transient` scope is critical for managing resources during the negotiation phase of connections and streams. It helps in protecting the system against resource exhaustion attacks that can occur at the initial stages of connection or stream establishment. + +**Example:** For example, when the limit for system-wide connections is set lower than the limit for transient-wide connections in the libp2p Resource Manager, the system-wide limit effectively becomes the constraining factor +In this example, the system-wide connections limit acts as the global cap for all connections in the libp2p network, regardless of their state (established, transient, etc.). +If this limit is lower than the transient-wide limit, it essentially restricts the total number of connections (including transient ones) to this lower system-wide limit. +The transient-wide limit is intended to manage connections that are in the process of being fully established. + +### Case Study: what does scopes mean in terms of one resource? +As an example, we study the default limits for "Streams Inbound/Outbound" at different scopes in the libp2p Resource Manager. The limtis on other resources follow a similar pattern. +Here's an explanation of what these default limits mean at each scope: + +### System Scope +- **Streams Inbound/Outbound (e.g., 15,000)**: + - **Meaning**: This limit defines the maximum number of inbound and outbound streams that can be active across the entire system, regardless of the specific protocols or peers involved. + - **Implication**: It is a global cap ensuring that the total number of streams at any time does not exceed this limit, thus preventing system-wide resource exhaustion due to too many streams. + +### Transient Scope +- **Streams Inbound/Outbound (e.g., 15,000)**: + - **Meaning**: This limit controls the number of streams in the transient state, i.e., streams that are being set up but not yet fully established or associated with a peer/protocol. + - **Implication**: It provides a buffer for handling stream negotiations, ensuring the system can manage a high volume of initiating connections without overwhelming the resources during the setup phase. + +### Protocol Scope +- **Streams Inbound/Outbound (e.g., 5,000)**: + - **Meaning**: This limit specifies the maximum number of inbound and outbound streams for each protocol. It applies to each protocol independently. + - **Implication**: It ensures that no single protocol can dominate the network's resources, maintaining a balance in resource allocation among various protocols. + +### Peer Scope +- **Streams Inbound/Outbound (e.g., 1,000)**: + - **Meaning**: This sets the maximum number of inbound and outbound streams allowed per (remote) peer on the local peer. + - **Implication**: It restricts the resource usage by individual peers, ensuring no single (remote) peer can exhaust network resources with too many streams. + +### Peer-Protocol Scope +- **Streams Inbound/Outbound (e.g., 500)**: + - **Meaning**: This limit is the most granular, applying to streams from each (remote) peer for each protocol on the local peer. + - **Implication**: It offers fine-grained control, preventing any (remote) peer from using excessive resources in a specific protocol on the local peer, thus ensuring balanced resource use. + +## Troubleshooting (For Flow Node Operators) +This troubleshooting guide works based on the case of excessive streams in the network. Similar guidelines can be applied to other resources as well. + +### Observation +If you observe an excessive number of open streams (or open `goroutines` affiliated with a libp2p protocol) in your network, +the appropriate action would be to adjust the stream limits within specific scopes, depending on the nature of the issue. + +### 1. Excessive Streams Across All Protocols and Peers +- **Scope**: System Scope +- **Limits to Adjust**: + - `streams-inbound` + - `streams-outbound` +- **Reason**: The system scope applies globally across all peers and protocols. Adjusting these limits helps manage the overall number of streams in the network. + +### 2. Excessive Streams in a Specific Protocol +- **Scope**: Protocol Scope +- **Limits to Adjust**: + - `streams-inbound` + - `streams-outbound` +- **Reason**: If a particular protocol (e.g., DHT, gossipsub) is opening too many streams, tightening limits in the protocol scope can restrict the resource usage by that specific protocol. + +### 3. Excessive Streams from Individual Peers +- **Scope**: Peer Scope +- **Limits to Adjust**: + - `streams-inbound` + - `streams-outbound` +- **Reason**: When specific peers are opening too many streams, adjusting these limits can prevent any single peer from using an excessive number of streams. + +### 4. Excessive Streams from a Specific Peer on a Specific Protocol +- **Scope**: Peer-Protocol Scope +- **Limits to Adjust**: + - `streams-inbound` + - `streams-outbound` +- **Reason**: This is the most granular level of control, where you can restrict stream usage for a specific protocol used by a specific peer. + +## Wildcard: Increasing all limit overrides at scale +In order to preserve the hierarchy of scopes, you need to adjust the limits in each scope in a way that they don't override the limits of higher scopes. +One easy way is to increase all limits by a certain factor across all scopes. For example, if you want to increase all limits by 1.5 times, you can do so by adjusting the flags for each limit within each scope. + +### System Scope +1. **Streams Inbound/Outbound** + - `--libp2p-resource-manager-limits-override-system-streams-inbound=<1.5 * current value>` + - `--libp2p-resource-manager-limits-override-system-streams-outbound=<1.5 * current value>` +2. **Connections Inbound/Outbound** + - `--libp2p-resource-manager-limits-override-system-connections-inbound=<1.5 * current value>` + - `--libp2p-resource-manager-limits-override-system-connections-outbound=<1.5 * current value>` +3. **File Descriptors** + - `--libp2p-resource-manager-limits-override-system-fd=<1.5 * current value>` +4. **Memory Bytes** + - `--libp2p-resource-manager-limits-override-system-memory-bytes=<1.5 * current value>` + +### Transient Scope +1. **Streams Inbound/Outbound** + - `--libp2p-resource-manager-limits-override-transient-streams-inbound=<1.5 * current value>` + - `--libp2p-resource-manager-limits-override-transient-streams-outbound=<1.5 * current value>` +2. **Connections Inbound/Outbound** + - `--libp2p-resource-manager-limits-override-transient-connections-inbound=<1.5 * current value>` + - `--libp2p-resource-manager-limits-override-transient-connections-outbound=<1.5 * current value>` +3. **File Descriptors** + - `--libp2p-resource-manager-limits-override-transient-fd=<1.5 * current value>` +4. **Memory Bytes** + - `--libp2p-resource-manager-limits-override-transient-memory-bytes=<1.5 * current value>` + +### Protocol Scope +1. **Streams Inbound/Outbound** + - `--libp2p-resource-manager-limits-override-protocol-streams-inbound=<1.5 * current value>` + - `--libp2p-resource-manager-limits-override-protocol-streams-outbound=<1.5 * current value>` + +### Peer Scope +1. **Streams Inbound/Outbound** + - `--libp2p-resource-manager-limits-override-peer-streams-inbound=<1.5 * current value>` + - `--libp2p-resource-manager-limits-override-peer-streams-outbound=<1.5 * current value>` + +### Peer-Protocol Scope +1. **Streams Inbound/Outbound** + - `--libp2p-resource-manager-limits-override-peer-protocol-streams-inbound=<1.5 * current value>` + - `--libp2p-resource-manager-limits-override-peer-protocol-streams-outbound=<1.5 * current value>` + +### Notes +- Replace `<1.5 * current value>` with the actual calculated value from `default-config.yaml`. For example, if the current system streams inbound limit is 10,000, the new value would be `--libp2p-resource-manager-limits-override-system-streams-inbound=15000`. + + +# References +- https://github.com/libp2p/go-libp2p/blob/master/p2p/host/resource-manager/README.md \ No newline at end of file diff --git a/consensus/aggregators.go b/consensus/aggregators.go index ccdab038989..853bec81798 100644 --- a/consensus/aggregators.go +++ b/consensus/aggregators.go @@ -62,7 +62,7 @@ func NewTimeoutAggregator(log zerolog.Logger, ) (hotstuff.TimeoutAggregator, error) { timeoutCollectorFactory := timeoutcollector.NewTimeoutCollectorFactory(log, distributor, timeoutProcessorFactory) - collectors := timeoutaggregator.NewTimeoutCollectors(log, lowestRetainedView, timeoutCollectorFactory) + collectors := timeoutaggregator.NewTimeoutCollectors(log, hotstuffMetrics, lowestRetainedView, timeoutCollectorFactory) // initialize the timeout aggregator aggregator, err := timeoutaggregator.NewTimeoutAggregator( diff --git a/consensus/config.go b/consensus/config.go index 6c6716b142d..bb4c40d930b 100644 --- a/consensus/config.go +++ b/consensus/config.go @@ -5,8 +5,8 @@ import ( "github.com/onflow/flow-go/consensus/hotstuff" "github.com/onflow/flow-go/consensus/hotstuff/notifications/pubsub" + "github.com/onflow/flow-go/consensus/hotstuff/pacemaker" "github.com/onflow/flow-go/consensus/hotstuff/pacemaker/timeout" - "github.com/onflow/flow-go/module/updatable_configs" ) // HotstuffModules is a helper structure to encapsulate dependencies to create @@ -25,14 +25,13 @@ type HotstuffModules struct { } type ParticipantConfig struct { - StartupTime time.Time // the time when consensus participant enters first view - TimeoutMinimum time.Duration // the minimum timeout for the pacemaker - TimeoutMaximum time.Duration // the maximum timeout for the pacemaker - TimeoutAdjustmentFactor float64 // the factor at which the timeout duration is adjusted - HappyPathMaxRoundFailures uint64 // number of failed rounds before first timeout increase - BlockRateDelay time.Duration // a delay to broadcast block proposal in order to control the block production rate - MaxTimeoutObjectRebroadcastInterval time.Duration // maximum interval for timeout object rebroadcast - Registrar updatable_configs.Registrar // optional: for registering HotStuff configs as dynamically configurable + StartupTime time.Time // the time when consensus participant enters first view + TimeoutMinimum time.Duration // the minimum timeout for the pacemaker + TimeoutMaximum time.Duration // the maximum timeout for the pacemaker + TimeoutAdjustmentFactor float64 // the factor at which the timeout duration is adjusted + HappyPathMaxRoundFailures uint64 // number of failed rounds before first timeout increase + MaxTimeoutObjectRebroadcastInterval time.Duration // maximum interval for timeout object rebroadcast + ProposalDurationProvider hotstuff.ProposalDurationProvider // a delay to broadcast block proposal in order to control the block production rate } func DefaultParticipantConfig() ParticipantConfig { @@ -42,9 +41,8 @@ func DefaultParticipantConfig() ParticipantConfig { TimeoutMaximum: time.Duration(defTimeout.MaxReplicaTimeout) * time.Millisecond, TimeoutAdjustmentFactor: defTimeout.TimeoutAdjustmentFactor, HappyPathMaxRoundFailures: defTimeout.HappyPathMaxRoundFailures, - BlockRateDelay: defTimeout.GetBlockRateDelay(), MaxTimeoutObjectRebroadcastInterval: time.Duration(defTimeout.MaxTimeoutObjectRebroadcastInterval) * time.Millisecond, - Registrar: nil, + ProposalDurationProvider: pacemaker.NoProposalDelay(), } return cfg } @@ -75,14 +73,14 @@ func WithHappyPathMaxRoundFailures(happyPathMaxRoundFailures uint64) Option { } } -func WithBlockRateDelay(delay time.Duration) Option { +func WithProposalDurationProvider(provider hotstuff.ProposalDurationProvider) Option { return func(cfg *ParticipantConfig) { - cfg.BlockRateDelay = delay + cfg.ProposalDurationProvider = provider } } -func WithConfigRegistrar(reg updatable_configs.Registrar) Option { +func WithStaticProposalDuration(dur time.Duration) Option { return func(cfg *ParticipantConfig) { - cfg.Registrar = reg + cfg.ProposalDurationProvider = pacemaker.NewStaticProposalDurationProvider(dur) } } diff --git a/consensus/follower.go b/consensus/follower.go index d155948833b..41bbe4eb8e1 100644 --- a/consensus/follower.go +++ b/consensus/follower.go @@ -23,13 +23,14 @@ import ( // // CAUTION: all pending blocks are required to be valid (guaranteed if the block passed the compliance layer) func NewFollower(log zerolog.Logger, + mempoolMetrics module.MempoolMetrics, headers storage.Headers, updater module.Finalizer, notifier hotstuff.FollowerConsumer, rootHeader *flow.Header, rootQC *flow.QuorumCertificate, finalized *flow.Header, - pending []*flow.Header, + pending []*flow.ProposalHeader, ) (*hotstuff.FollowerLoop, error) { forks, err := NewForks(finalized, headers, updater, notifier, rootHeader, rootQC) if err != nil { @@ -43,7 +44,7 @@ func NewFollower(log zerolog.Logger, } // initialize the follower loop - loop, err := hotstuff.NewFollowerLoop(log, forks) + loop, err := hotstuff.NewFollowerLoop(log, mempoolMetrics, forks) if err != nil { return nil, fmt.Errorf("could not create follower loop: %w", err) } diff --git a/consensus/follower_test.go b/consensus/follower_test.go index 7496c103658..104a593331e 100644 --- a/consensus/follower_test.go +++ b/consensus/follower_test.go @@ -17,6 +17,7 @@ import ( "github.com/onflow/flow-go/consensus/hotstuff/model" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/module/irrecoverable" + "github.com/onflow/flow-go/module/metrics" mockmodule "github.com/onflow/flow-go/module/mock" "github.com/onflow/flow-go/module/signature" mockstorage "github.com/onflow/flow-go/storage/mock" @@ -62,7 +63,7 @@ type HotStuffFollowerSuite struct { rootHeader *flow.Header rootQC *flow.QuorumCertificate finalized *flow.Header - pending []*flow.Header + pending []*flow.ProposalHeader follower *hotstuff.FollowerLoop mockConsensus *MockConsensus @@ -74,7 +75,7 @@ type HotStuffFollowerSuite struct { // SetupTest initializes all the components needed for the Follower. // The follower itself is instantiated in method BeforeTest func (s *HotStuffFollowerSuite) SetupTest() { - identities := unittest.IdentityListFixture(4, unittest.WithRole(flow.RoleConsensus)) + identities := unittest.IdentityListFixture(4, unittest.WithRole(flow.RoleConsensus)).Sort(flow.Canonical[flow.Identity]) s.mockConsensus = &MockConsensus{identities: identities} // mock storage headers @@ -90,11 +91,13 @@ func (s *HotStuffFollowerSuite) SetupTest() { parentID, err := flow.HexStringToIdentifier("aa7693d498e9a087b1cadf5bfe9a1ff07829badc1915c210e482f369f9a00a70") require.NoError(s.T(), err) s.rootHeader = &flow.Header{ - ParentID: parentID, - Timestamp: time.Now().UTC(), - Height: 21053, - View: 52078, - ParentView: 52077, + HeaderBody: flow.HeaderBody{ + ParentID: parentID, + Timestamp: uint64(time.Now().UnixMilli()), + Height: 21053, + View: 52078, + ParentView: 52077, + }, } signerIndices, err := signature.EncodeSignersToIndices(identities.NodeIDs(), identities.NodeIDs()[:3]) @@ -108,7 +111,7 @@ func (s *HotStuffFollowerSuite) SetupTest() { // we start with the latest finalized block being the root block s.finalized = s.rootHeader // and no pending (unfinalized) block - s.pending = []*flow.Header{} + s.pending = []*flow.ProposalHeader{} } // BeforeTest instantiates and starts Follower @@ -116,6 +119,7 @@ func (s *HotStuffFollowerSuite) BeforeTest(suiteName, testName string) { var err error s.follower, err = consensus.NewFollower( zerolog.New(os.Stderr), + metrics.NewNoopCollector(), s.headers, s.finalizer, s.notifier, @@ -165,8 +169,7 @@ func (s *HotStuffFollowerSuite) TestOnBlockIncorporated() { rootBlockView := s.rootHeader.View child := s.mockConsensus.extendBlock(rootBlockView+2, s.rootHeader) grandChild := s.mockConsensus.extendBlock(child.View+2, child) - - certifiedChild := toCertifiedBlock(s.T(), child, grandChild.QuorumCertificate()) + certifiedChild := toCertifiedBlock(s.T(), child, grandChild.ParentQC()) blockIngested := make(chan struct{}) // close when child was ingested s.notifier.On("OnBlockIncorporated", blockWithID(child.ID())).Run(func(_ mock.Arguments) { close(blockIngested) @@ -203,14 +206,14 @@ func (s *HotStuffFollowerSuite) TestFollowerFinalizedBlock() { d := s.mockConsensus.extendBlock(c.View+1, c) // adding b should not advance finality - bCertified := toCertifiedBlock(s.T(), b, c.QuorumCertificate()) + bCertified := toCertifiedBlock(s.T(), b, c.ParentQC()) s.notifier.On("OnBlockIncorporated", blockWithID(b.ID())).Return().Once() s.follower.AddCertifiedBlock(bCertified) // adding the certified child of b should advance finality to b finalityAdvanced := make(chan struct{}) // close when finality has advanced to b - certifiedChild := toCertifiedBlock(s.T(), c, d.QuorumCertificate()) - s.notifier.On("OnBlockIncorporated", blockWithID(certifiedChild.ID())).Return().Once() + certifiedChild := toCertifiedBlock(s.T(), c, d.ParentQC()) + s.notifier.On("OnBlockIncorporated", blockWithID(certifiedChild.BlockID())).Return().Once() s.finalizer.On("MakeFinal", blockID(b.ID())).Return(nil).Once() s.notifier.On("OnFinalizedBlock", blockWithID(b.ID())).Run(func(_ mock.Arguments) { close(finalityAdvanced) @@ -277,13 +280,13 @@ func (s *HotStuffFollowerSuite) TestOutOfOrderBlocks() { // now we feed the blocks in some wild view order into the Follower // (Caution: we still have to make sure the parent is known, before we give its child to the Follower) - s.follower.AddCertifiedBlock(toCertifiedBlock(s.T(), block03, block04.QuorumCertificate())) - s.follower.AddCertifiedBlock(toCertifiedBlock(s.T(), block07, block08.QuorumCertificate())) - s.follower.AddCertifiedBlock(toCertifiedBlock(s.T(), block11, block17.QuorumCertificate())) - s.follower.AddCertifiedBlock(toCertifiedBlock(s.T(), block01, block02.QuorumCertificate())) - s.follower.AddCertifiedBlock(toCertifiedBlock(s.T(), block05, block06.QuorumCertificate())) - s.follower.AddCertifiedBlock(toCertifiedBlock(s.T(), block09, block10.QuorumCertificate())) - s.follower.AddCertifiedBlock(toCertifiedBlock(s.T(), block13, block14.QuorumCertificate())) + s.follower.AddCertifiedBlock(toCertifiedBlock(s.T(), block03, block04.ParentQC())) + s.follower.AddCertifiedBlock(toCertifiedBlock(s.T(), block07, block08.ParentQC())) + s.follower.AddCertifiedBlock(toCertifiedBlock(s.T(), block11, block17.ParentQC())) + s.follower.AddCertifiedBlock(toCertifiedBlock(s.T(), block01, block02.ParentQC())) + s.follower.AddCertifiedBlock(toCertifiedBlock(s.T(), block05, block06.ParentQC())) + s.follower.AddCertifiedBlock(toCertifiedBlock(s.T(), block09, block10.ParentQC())) + s.follower.AddCertifiedBlock(toCertifiedBlock(s.T(), block13, block14.ParentQC())) // Block 20 should now finalize the fork up to and including block13 finalityAdvanced := make(chan struct{}) // close when finality has advanced to b @@ -298,7 +301,7 @@ func (s *HotStuffFollowerSuite) TestOutOfOrderBlocks() { close(finalityAdvanced) }).Return(nil).Once() - s.follower.AddCertifiedBlock(toCertifiedBlock(s.T(), block14, block20.QuorumCertificate())) + s.follower.AddCertifiedBlock(toCertifiedBlock(s.T(), block14, block20.ParentQC())) unittest.RequireCloseBefore(s.T(), finalityAdvanced, time.Second, "expect finality progress before timeout") } diff --git a/consensus/hotstuff/README.md b/consensus/hotstuff/README.md index c2ede0b97c9..cd13619bb08 100644 --- a/consensus/hotstuff/README.md +++ b/consensus/hotstuff/README.md @@ -148,7 +148,7 @@ For primary section, we use a randomized, weight-proportional selection. HotStuff's core logic is broken down into multiple components. The figure below illustrates the dependencies of the core components and information flow between these components. -![](/docs/ComponentInteraction.png) +![](/docs/images/ComponentInteraction.png) <!--- source: https://drive.google.com/file/d/1rZsYta0F9Uz5_HM84MlMmMbiR62YALX-/ --> * `MessageHub` is responsible for relaying HotStuff messages. Incoming messages are relayed to the respective modules depending on their message type. @@ -156,7 +156,7 @@ Outgoing messages are relayed to the committee though the networking layer via e * `compliance.Engine` is responsible for processing incoming blocks, caching if needed, validating, extending state and forwarding them to HotStuff for further processing. Note: The embedded `compliance.Core` component is responsible for business logic and maintaining state; `compliance.Engine` schedules work and manages worker threads for the `Core`. * `EventLoop` buffers all incoming events. It manages a single worker routine executing the EventHandler`'s logic. -* `EventHandler` orchestrates all HotStuff components and implements the [HotStuff's state machine](/docs/StateMachine.png). +* `EventHandler` orchestrates all HotStuff components and implements the [HotStuff's state machine](/docs/images/StateMachine.png). The event handler is designed to be executed single-threaded. * `SafetyRules` tracks the latest vote, the latest timeout and determines whether to vote for a block and if it's safe to timeout current round. * `Pacemaker` implements Jolteon's PaceMaker. It manages and updates a replica's local view and synchronizes it with other replicas. @@ -173,7 +173,7 @@ The event handler is designed to be executed single-threaded. To separate general graph-theoretical concepts from the concrete blockchain application, `LevelledForest` refers to blocks as graph `vertices` and to a block's view number as `level`. * `Validator` validates the HotStuff-relevant aspects of - - QC: total weight of all signers is more than 2/3 of committee weight, validity of signatures, view number is strictly monotonously increasing; + - QC: total weight of all signers is more than 2/3 of committee weight, validity of signatures, view number is strictly monotonicly increasing; - TC: total weight of all signers is more than 2/3 of committee weight, validity of signatures, proof for entering view; - block proposal: from designated primary for the block's respective view, contains proposer's vote for its own block, QC in block is valid, a valid TC for the previous view is included if and only if the QC is not for the previous view; @@ -190,7 +190,7 @@ The event handler is designed to be executed single-threaded. We have translated the HotStuff protocol into the state machine shown below. The state machine is implemented in `EventHandler`. -![](/docs/StateMachine.png) +![](/docs/images/StateMachine.png) <!--- source: https://drive.google.com/file/d/1la4jxyaEJJfip7NCWBM9YBTz6PK4-N9e/ --> @@ -222,7 +222,7 @@ In contrast to HotStuff, Jolteon only allows a transition into view `V+1` after A central, non-trivial functionality of the PaceMaker is to _skip views_. Specifically, given a QC or TC with view `V`, the Pacemaker will skip ahead to view `V + 1` if `currentView ≤ V`. -![](/docs/PaceMaker.png) +![](/docs/images/PaceMaker.png) <!--- source: https://drive.google.com/file/d/1la4jxyaEJJfip7NCWBM9YBTz6PK4-N9e/ --> @@ -273,7 +273,7 @@ For completeness, we list the component implemented in each sub-folder below: The HotStuff state machine exposes some details about its internal progress as notification through the `hotstuff.Consumer`. The following figure depicts at which points notifications are emitted. -![](/docs/StateMachine_with_notifications.png) +![](/docs/images/StateMachine_with_notifications.png) <!--- source: https://drive.google.com/file/d/1la4jxyaEJJfip7NCWBM9YBTz6PK4-N9e/ --> We have implemented a telemetry system (`hotstuff.notifications.TelemetryConsumer`) which implements the `Consumer` interface. diff --git a/consensus/hotstuff/block_producer.go b/consensus/hotstuff/block_producer.go index 0721380f51f..0a63068b2eb 100644 --- a/consensus/hotstuff/block_producer.go +++ b/consensus/hotstuff/block_producer.go @@ -4,12 +4,24 @@ import ( "github.com/onflow/flow-go/model/flow" ) -// BlockProducer builds a new block proposal by building a new block payload with the builder module, -// and uses VoteCollectorFactory to create a disposable VoteCollector for producing the proposal vote. -// BlockProducer assembles the new block proposal using the block payload, block header and the proposal vote. +// BlockProducer is responsible for producing new block proposals. It is a service component to HotStuff's +// main state machine (implemented in the EventHandler). The BlockProducer's central purpose is to mediate +// concurrent signing requests to its embedded `hotstuff.SafetyRules` during block production. The actual +// work of producing a block proposal is delegated to the embedded `module.Builder`. +// +// Context: BlockProducer is part of the `hostuff` package and can therefore be expected to comply with +// hotstuff-internal design patterns, such as there being a single dedicated thread executing the EventLoop, +// including EventHandler, SafetyRules, and BlockProducer. However, `module.Builder` lives in a different +// package! Therefore, we should make the least restrictive assumptions, and support concurrent signing requests +// within `module.Builder`. To minimize implementation dependencies and reduce the chance of safety-critical +// consensus bugs, BlockProducer wraps `SafetyRules` and mediates concurrent access. Furthermore, by supporting +// concurrent singing requests, we enable various optimizations of optimistic and/or upfront block production. type BlockProducer interface { // MakeBlockProposal builds a new HotStuff block proposal using the given view, - // the given quorum certificate for its parent and [optionally] a timeout certificate for last view(could be nil). - // No errors are expected during normal operation. - MakeBlockProposal(view uint64, qc *flow.QuorumCertificate, lastViewTC *flow.TimeoutCertificate) (*flow.Header, error) + // the given quorum certificate for its parent and [optionally] a timeout certificate for last view (could be nil). + // Error Returns: + // - model.NoVoteError if it is not safe for us to vote (our proposal includes our vote) + // for this view. This can happen if we have already proposed or timed out this view. + // - generic error in case of unexpected failure + MakeBlockProposal(view uint64, qc *flow.QuorumCertificate, lastViewTC *flow.TimeoutCertificate) (*flow.ProposalHeader, error) } diff --git a/consensus/hotstuff/blockproducer/block_producer.go b/consensus/hotstuff/blockproducer/block_producer.go index 74d01cc317d..9e956d28d4d 100644 --- a/consensus/hotstuff/blockproducer/block_producer.go +++ b/consensus/hotstuff/blockproducer/block_producer.go @@ -7,68 +7,75 @@ import ( "github.com/onflow/flow-go/consensus/hotstuff/model" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/module" + "github.com/onflow/flow-go/module/irrecoverable" ) -// BlockProducer is responsible for producing new block proposals +// BlockProducer is responsible for producing new block proposals. It is a service component to HotStuff's +// main state machine (implemented in the EventHandler). The BlockProducer's central purpose is to mediate +// concurrent signing requests to its embedded `hotstuff.SafetyRules` during block production. The actual +// work of producing a block proposal is delegated to the embedded `module.Builder`. +// +// Context: BlockProducer is part of the `hostuff` package and can therefore be expected to comply with +// hotstuff-internal design patterns, such as there being a single dedicated thread executing the EventLoop, +// including EventHandler, SafetyRules, and BlockProducer. However, `module.Builder` lives in a different +// package! Therefore, we should make the least restrictive assumptions, and support concurrent signing requests +// within `module.Builder`. To minimize implementation dependencies and reduce the chance of safety-critical +// consensus bugs, BlockProducer wraps `SafetyRules` and mediates concurrent access. Furthermore, by supporting +// concurrent singing requests, we enable various optimizations of optimistic and/or upfront block production. type BlockProducer struct { - signer hotstuff.Signer - committee hotstuff.Replicas - builder module.Builder + safetyRules hotstuff.SafetyRules + committee hotstuff.Replicas + builder module.Builder } var _ hotstuff.BlockProducer = (*BlockProducer)(nil) -// New creates a new BlockProducer which wraps the chain compliance layer block builder -// to provide hotstuff with block proposals. +// New creates a new BlockProducer, which mediates concurrent signing requests to the embedded +// `hotstuff.SafetyRules` during block production, delegated to `module.Builder`. // No errors are expected during normal operation. -func New(signer hotstuff.Signer, committee hotstuff.Replicas, builder module.Builder) (*BlockProducer, error) { +func New(safetyRules hotstuff.SafetyRules, committee hotstuff.Replicas, builder module.Builder) (*BlockProducer, error) { bp := &BlockProducer{ - signer: signer, - committee: committee, - builder: builder, + safetyRules: safetyRules, + committee: committee, + builder: builder, } return bp, nil } // MakeBlockProposal builds a new HotStuff block proposal using the given view, // the given quorum certificate for its parent and [optionally] a timeout certificate for last view(could be nil). -// No errors are expected during normal operation. -func (bp *BlockProducer) MakeBlockProposal(view uint64, qc *flow.QuorumCertificate, lastViewTC *flow.TimeoutCertificate) (*flow.Header, error) { +// Error Returns: +// - model.NoVoteError if it is not safe for us to vote (our proposal includes our vote) +// for this view. This can happen if we have already proposed or timed out this view. +// - generic error in case of unexpected failure +func (bp *BlockProducer) MakeBlockProposal(view uint64, qc *flow.QuorumCertificate, lastViewTC *flow.TimeoutCertificate) (*flow.ProposalHeader, error) { // the custom functions allows us to set some custom fields on the block; // in hotstuff, we use this for view number and signature-related fields - setHotstuffFields := func(header *flow.Header) error { - header.View = view - header.ParentView = qc.View - header.ParentVoterIndices = qc.SignerIndices - header.ParentVoterSigData = qc.SigData - header.ProposerID = bp.committee.Self() - header.LastViewTC = lastViewTC - - // turn the header into a block header proposal as known by hotstuff - block := model.Block{ - BlockID: header.ID(), - View: view, - ProposerID: header.ProposerID, - QC: qc, - PayloadHash: header.PayloadHash, - Timestamp: header.Timestamp, - } - - // then sign the proposal - proposal, err := bp.signer.CreateProposal(&block) - if err != nil { - return fmt.Errorf("could not sign block proposal: %w", err) - } - - header.ProposerSigData = proposal.SigData + setHotstuffFields := func(headerBuilder *flow.HeaderBodyBuilder) error { + headerBuilder.WithView(view). + WithParentView(qc.View). + WithParentVoterIndices(qc.SignerIndices). + WithParentVoterSigData(qc.SigData). + WithProposerID(bp.committee.Self()). + WithLastViewTC(lastViewTC) return nil } - // retrieve a fully built block header from the builder - header, err := bp.builder.BuildOn(qc.BlockID, setHotstuffFields) + signer := newSafetyRulesConcurrencyWrapper(bp.safetyRules) + proposal, err := bp.builder.BuildOn( + qc.BlockID, + setHotstuffFields, // never returns an error + signer.Sign, // may return model.NoVoteError, which we handle below + ) if err != nil { - return nil, fmt.Errorf("could not build block proposal on top of %v: %w", qc.BlockID, err) + if model.IsNoVoteError(err) { + return nil, fmt.Errorf("unsafe to vote for own proposal on top of %x: %w", qc.BlockID, err) + } + return nil, irrecoverable.NewExceptionf("could not build block proposal on top of %v: %w", qc.BlockID, err) + } + if !signer.IsSigningComplete() { + return nil, fmt.Errorf("signer has not yet completed signing") } - return header, nil + return proposal, nil } diff --git a/consensus/hotstuff/blockproducer/metrics_wrapper.go b/consensus/hotstuff/blockproducer/metrics_wrapper.go index 004b238d3e1..8d13f4c3389 100644 --- a/consensus/hotstuff/blockproducer/metrics_wrapper.go +++ b/consensus/hotstuff/blockproducer/metrics_wrapper.go @@ -24,9 +24,9 @@ func NewMetricsWrapper(builder module.Builder, metrics module.HotstuffMetrics) * } } -func (w BlockBuilderMetricsWrapper) BuildOn(parentID flow.Identifier, setter func(*flow.Header) error) (*flow.Header, error) { +func (w BlockBuilderMetricsWrapper) BuildOn(parentID flow.Identifier, setter func(*flow.HeaderBodyBuilder) error, sign func(*flow.Header) ([]byte, error)) (*flow.ProposalHeader, error) { processStart := time.Now() - header, err := w.builder.BuildOn(parentID, setter) + header, err := w.builder.BuildOn(parentID, setter, sign) w.metrics.PayloadProductionDuration(time.Since(processStart)) return header, err } diff --git a/consensus/hotstuff/blockproducer/safety_rules_wrapper.go b/consensus/hotstuff/blockproducer/safety_rules_wrapper.go new file mode 100644 index 00000000000..dbcc254add8 --- /dev/null +++ b/consensus/hotstuff/blockproducer/safety_rules_wrapper.go @@ -0,0 +1,90 @@ +package blockproducer + +import ( + "fmt" + + "go.uber.org/atomic" + + "github.com/onflow/flow-go/consensus/hotstuff" + "github.com/onflow/flow-go/consensus/hotstuff/model" + "github.com/onflow/flow-go/model/flow" +) + +// safetyRulesConcurrencyWrapper wraps `hotstuff.SafetyRules` to allow its usage in concurrent environments. +// Correctness requirements: +// +// (i) The wrapper's Sign function is called exactly once (wrapper errors on repeated Sign calls) +// (ii) SafetyRules is not accessed outside the wrapper concurrently. The wrapper cannot enforce this. +// +// The correctness condition (ii) holds because there is a single dedicated thread executing the Event Loop, +// including the EventHandler, that also runs the logic of `BlockProducer.MakeBlockProposal`. +// +// Concurrency safety: +// +// (a) There is one dedicated thread executing the Event Loop, including the EventHandler, that also runs the logic of +// `BlockProducer.MakeBlockProposal`. Hence, while the 'Event Loop Thread' is in `MakeBlockProposal`, we are guaranteed +// the only interactions with `SafetyRules` are in `module.Builder.BuildOn` +// (b) The Event Loop Thread instantiates the variable `signingStatus`. Furthermore, the `signer` call first reads `signingStatus`. +// Therefore, all operations in the EventHandler prior to calling `Builder.BuildOn(..)` happen before the call to `signer`. +// Hence, it is guaranteed that the `signer` uses the most recent state of `SafetyRules`, even if `Sign` is executed by a +// different thread. +// (c) Just before the `signer` call returns, it writes `signingStatus`. Furthermore, the Event Loop Thread reads `signingStatus` +// right after the `Builder.BuildOn(..)` call returns. Thereby, Event Loop Thread sees the most recent state of `SafetyRules` +// after completing the signing operation. +// +// With the transitivity of the 'Happens Before' relationship (-> go Memory Model https://go.dev/ref/mem#atomic), we have proven +// that concurrent access of the wrapped `safetyRules` is safe for the state transition: +// +// instantiate signingStatus to 0 ─► update signingStatus from 0 to 1 → signer → update signingStatus from 1 to 2 ─► confirm signingStatus has value 2 +// +// ╰──────────────┬───────────────╯ ╰──────────────────────────────────────┬─────────────────────────────────────╯ ╰────────────────┬────────────────╯ +// +// Event Loop Thread within the scope of Builder.BuildOn Event Loop Thread +// +// All state transitions _other_ than the one above yield exceptions without modifying `SafetyRules`. +type safetyRulesConcurrencyWrapper struct { + // signingStatus guarantees concurrency safety and encodes the progress of the signing process. + // We differentiate between 4 different states: + // - value 0: signing is not yet started + // - value 1: one thread has already entered the signing process, which is currently ongoing + // - value 2: the thread that set `signingStatus` to value 1 has completed the signing + signingStatus atomic.Uint32 + safetyRules hotstuff.SafetyRules +} + +func newSafetyRulesConcurrencyWrapper(safetyRules hotstuff.SafetyRules) *safetyRulesConcurrencyWrapper { + return &safetyRulesConcurrencyWrapper{safetyRules: safetyRules} +} + +// Sign modifies the given unsignedHeader by including the proposer's signature date. +// Safe under concurrent calls. Per convention, this method should be called exactly once. +// Only the first call will succeed, and subsequent calls error. The implementation is backed +// by `SafetyRules` and thereby guarantees consensus safety for singing block proposals. +// Error Returns: +// - model.NoVoteError if it is not safe for us to vote (our proposal includes our vote) +// for this view. This can happen if we have already proposed or timed out this view. +// - generic error in case of unexpected failure +func (w *safetyRulesConcurrencyWrapper) Sign(unsignedHeader *flow.Header) ([]byte, error) { + if !w.signingStatus.CompareAndSwap(0, 1) { // value of `signingStatus` is something else than 0 + return nil, fmt.Errorf("signer has already commenced signing; possibly repeated signer call") + } // signer is now in state 1, and this thread is the only one every going to execute the following logic + + // signature for own block is structurally a vote + vote, err := w.safetyRules.SignOwnProposal(model.ProposalFromFlow(unsignedHeader)) + if err != nil { + return nil, fmt.Errorf("could not sign block proposal: %w", err) + } + // value of `signingStatus` is always 1, i.e. the following check always succeeds. + if !w.signingStatus.CompareAndSwap(1, 2) { // sanity check protects logic from future modifications accidentally breaking this invariant + panic("signer wrapper completed its work but encountered state other than 1") // never happens + } + return vote.SigData, nil +} + +// IsSigningComplete atomically checks whether the Sign logic has concluded, and returns true only in this case. +// By reading the atomic `signingStatus` and confirming it has the expected value, it is guaranteed that any state +// changes of `safetyRules` that happened within `Sign` are visible to the Event Loop Thread. +// No errors expected during normal operations +func (w *safetyRulesConcurrencyWrapper) IsSigningComplete() bool { + return w.signingStatus.Load() == 2 +} diff --git a/consensus/hotstuff/committee.go b/consensus/hotstuff/committee.go index cac2e3a877e..caf2e0f0e34 100644 --- a/consensus/hotstuff/committee.go +++ b/consensus/hotstuff/committee.go @@ -78,8 +78,7 @@ type Replicas interface { // Returns the following expected errors for invalid inputs: // - model.ErrViewForUnknownEpoch if no epoch containing the given view is known // - // TODO: should return identity skeleton https://github.com/dapperlabs/flow-go/issues/6232 - IdentitiesByEpoch(view uint64) (flow.IdentityList, error) + IdentitiesByEpoch(view uint64) (flow.IdentitySkeletonList, error) // IdentityByEpoch returns the full Identity for specified HotStuff participant. // The node must be a legitimate HotStuff participant with NON-ZERO WEIGHT at the specified block. @@ -92,8 +91,7 @@ type Replicas interface { // Returns the following expected errors for invalid inputs: // - model.ErrViewForUnknownEpoch if no epoch containing the given view is known // - // TODO: should return identity skeleton https://github.com/dapperlabs/flow-go/issues/6232 - IdentityByEpoch(view uint64, participantID flow.Identifier) (*flow.Identity, error) + IdentityByEpoch(view uint64, participantID flow.Identifier) (*flow.IdentitySkeleton, error) } // DynamicCommittee extends Replicas to provide the consensus committee for the purposes diff --git a/consensus/hotstuff/committees/cluster_committee.go b/consensus/hotstuff/committees/cluster_committee.go index 565261dd7ee..ba331574c76 100644 --- a/consensus/hotstuff/committees/cluster_committee.go +++ b/consensus/hotstuff/committees/cluster_committee.go @@ -20,18 +20,18 @@ import ( // implementation reference blocks on the cluster chain, which in turn reference // blocks on the main chain - this implementation manages that translation. type Cluster struct { - state protocol.State - payloads storage.ClusterPayloads - me flow.Identifier - // pre-computed leader selection for the full lifecycle of the cluster - selection *leader.LeaderSelection - // a filter that returns all members of the cluster committee allowed to vote - clusterMemberFilter flow.IdentityFilter - // initial set of cluster members, WITHOUT dynamic weight changes - // TODO: should use identity skeleton https://github.com/dapperlabs/flow-go/issues/6232 - initialClusterMembers flow.IdentityList - weightThresholdForQC uint64 // computed based on initial cluster committee weights - weightThresholdForTO uint64 // computed based on initial cluster committee weights + state protocol.State + payloads storage.ClusterPayloads + me flow.Identifier + selection *leader.LeaderSelection // pre-computed leader selection for the full lifecycle of the cluster + + clusterMembers flow.IdentitySkeletonList // cluster members in canonical order as specified by the epoch smart contract + clusterMemberFilter flow.IdentityFilter[flow.Identity] // filter that returns true for all members of the cluster committee allowed to vote + weightThresholdForQC uint64 // computed based on initial cluster committee weights + weightThresholdForTO uint64 // computed based on initial cluster committee weights + + // initialClusterIdentities lists full Identities for cluster members (in canonical order) at time of cluster initialization by Epoch smart contract + initialClusterIdentities flow.IdentityList } var _ hotstuff.Replicas = (*Cluster)(nil) @@ -41,29 +41,31 @@ func NewClusterCommittee( state protocol.State, payloads storage.ClusterPayloads, cluster protocol.Cluster, - epoch protocol.Epoch, + epoch protocol.CommittedEpoch, me flow.Identifier, ) (*Cluster, error) { - selection, err := leader.SelectionForCluster(cluster, epoch) if err != nil { return nil, fmt.Errorf("could not compute leader selection for cluster: %w", err) } - totalWeight := cluster.Members().TotalWeight() + initialClusterIdentities := votingClusterParticipants(cluster.Members()) // drops nodes with `InitialWeight=0` + initialClusterMembersSelector := initialClusterIdentities.Selector() // hence, any node accepted by this selector has `InitialWeight>0` + totalWeight := initialClusterIdentities.TotalWeight() + com := &Cluster{ state: state, payloads: payloads, me: me, selection: selection, - clusterMemberFilter: filter.And( - cluster.Members().Selector(), - filter.Not(filter.Ejected), - filter.HasWeight(true), + clusterMemberFilter: filter.And[flow.Identity]( + initialClusterMembersSelector, + filter.IsValidCurrentEpochParticipant, ), - initialClusterMembers: cluster.Members(), - weightThresholdForQC: WeightThresholdToBuildQC(totalWeight), - weightThresholdForTO: WeightThresholdToTimeout(totalWeight), + clusterMembers: initialClusterIdentities.ToSkeleton(), + initialClusterIdentities: initialClusterIdentities, + weightThresholdForQC: WeightThresholdToBuildQC(totalWeight), + weightThresholdForTO: WeightThresholdToTimeout(totalWeight), } return com, nil } @@ -74,18 +76,15 @@ func (c *Cluster) IdentitiesByBlock(blockID flow.Identifier) (flow.IdentityList, // blockID is a collection block not a block produced by consensus, // to query the identities from protocol state, we need to use the reference block id from the payload // - // first retrieve the cluster block payload + // first retrieve the cluster block's payload payload, err := c.payloads.ByBlockID(blockID) if err != nil { return nil, fmt.Errorf("could not get cluster payload: %w", err) } - // an empty reference block ID indicates a root block - isRootBlock := payload.ReferenceBlockID == flow.ZeroID - - // use the initial cluster members for root block - if isRootBlock { - return c.initialClusterMembers, nil + // An empty reference block ID indicates a root block. In this case, use the initial cluster members for root block + if isRootBlock := payload.ReferenceBlockID == flow.ZeroID; isRootBlock { + return c.initialClusterIdentities, nil } // otherwise use the snapshot given by the reference block @@ -94,19 +93,15 @@ func (c *Cluster) IdentitiesByBlock(blockID flow.Identifier) (flow.IdentityList, } func (c *Cluster) IdentityByBlock(blockID flow.Identifier, nodeID flow.Identifier) (*flow.Identity, error) { - - // first retrieve the cluster block payload + // first retrieve the cluster block's payload payload, err := c.payloads.ByBlockID(blockID) if err != nil { return nil, fmt.Errorf("could not get cluster payload: %w", err) } - // an empty reference block ID indicates a root block - isRootBlock := payload.ReferenceBlockID == flow.ZeroID - - // use the initial cluster members for root block - if isRootBlock { - identity, ok := c.initialClusterMembers.ByNodeID(nodeID) + // An empty reference block ID indicates a root block. In this case, use the initial cluster members for root block + if isRootBlock := payload.ReferenceBlockID == flow.ZeroID; isRootBlock { + identity, ok := c.initialClusterIdentities.ByNodeID(nodeID) if !ok { return nil, model.NewInvalidSignerErrorf("node %v is not an authorized hotstuff participant", nodeID) } @@ -127,11 +122,12 @@ func (c *Cluster) IdentityByBlock(blockID flow.Identifier, nodeID flow.Identifie return identity, nil } -// IdentitiesByEpoch returns the initial cluster members for this epoch. The view -// parameter is the view in the cluster consensus. Since clusters only exist for -// one epoch, we don't need to check the view. -func (c *Cluster) IdentitiesByEpoch(_ uint64) (flow.IdentityList, error) { - return c.initialClusterMembers, nil +// IdentitiesByEpoch returns the IdentitySkeletons of the cluster members in canonical order. +// This represents the cluster composition at the time the cluster was specified by the epoch smart +// contract (hence, we return IdentitySkeletons as opposed to full identities). Since clusters only +// exist for one epoch, we don't need to check the view. +func (c *Cluster) IdentitiesByEpoch(_ uint64) (flow.IdentitySkeletonList, error) { + return c.clusterMembers, nil } // IdentityByEpoch returns the node from the initial cluster members for this epoch. @@ -141,10 +137,10 @@ func (c *Cluster) IdentitiesByEpoch(_ uint64) (flow.IdentityList, error) { // Returns: // - model.InvalidSignerError if nodeID was not listed by the Epoch Setup event as an // authorized participant in this cluster -func (c *Cluster) IdentityByEpoch(_ uint64, nodeID flow.Identifier) (*flow.Identity, error) { - identity, ok := c.initialClusterMembers.ByNodeID(nodeID) +func (c *Cluster) IdentityByEpoch(view uint64, participantID flow.Identifier) (*flow.IdentitySkeleton, error) { + identity, ok := c.clusterMembers.ByNodeID(participantID) if !ok { - return nil, model.NewInvalidSignerErrorf("node %v is not an authorized hotstuff participant", nodeID) + return nil, model.NewInvalidSignerErrorf("node %v is not an authorized hotstuff participant", participantID) } return identity, nil } @@ -180,3 +176,38 @@ func (c *Cluster) Self() flow.Identifier { func (c *Cluster) DKG(_ uint64) (hotstuff.DKG, error) { panic("queried DKG of cluster committee") } + +// votingClusterParticipants extends the IdentitySkeletons of the cluster members to their full Identities +// at the time of cluster initialization by EpochSetup event. +// IMPORTANT CONVENTIONS: +// 1. clusterMembers with zero `InitialWeight` are _not included_ as "contributing" cluster participants. +// In accordance with their zero weight, they cannot contribute to advancing the cluster consensus. +// For example, the consensus leader selection allows zero-weighted nodes among the weighted participants, +// but these nodes have zero probability to be selected as leader. Similarly, they cannot meaningfully contribute +// votes or Timeouts to QCs or TC, due to their zero weight. Therefore, we do not consider them a valid signer. +// 2. This operation maintains the relative order. In other words, if `clusterMembers` is in canonical order, +// then the output `IdentityList` is also canonically ordered. +// +// CONTEXT: The EpochSetup event contains the IdentitySkeletons for each cluster, thereby specifying cluster membership. +// While ejection status is not part of the EpochSetup event, we can supplement this information as follows: +// - Per convention, service events are delivered (asynchronously) in an *order-preserving* manner. Furthermore, +// node ejection is also mediated by system smart contracts and delivered via service events. +// - Therefore, the EpochSetup event contains the up-to-date snapshot of the cluster members. Any node ejection +// that happened before should be reflected in the EpochSetup event. Specifically, ejected nodes +// should be no longer listed in the EpochSetup event. Hence, when the EpochSetup event is emitted / processed, +// the participation status of all cluster members equals flow.EpochParticipationStatusActive. +func votingClusterParticipants(clusterMembers flow.IdentitySkeletonList) flow.IdentityList { + initialClusterIdentities := make(flow.IdentityList, 0, len(clusterMembers)) + for _, skeleton := range clusterMembers { + if skeleton.InitialWeight == 0 { + continue + } + initialClusterIdentities = append(initialClusterIdentities, &flow.Identity{ + IdentitySkeleton: *skeleton, + DynamicIdentity: flow.DynamicIdentity{ + EpochParticipationStatus: flow.EpochParticipationStatusActive, + }, + }) + } + return initialClusterIdentities +} diff --git a/consensus/hotstuff/committees/cluster_committee_test.go b/consensus/hotstuff/committees/cluster_committee_test.go index 83903d23c3d..1a199bd3557 100644 --- a/consensus/hotstuff/committees/cluster_committee_test.go +++ b/consensus/hotstuff/committees/cluster_committee_test.go @@ -12,7 +12,7 @@ import ( clusterstate "github.com/onflow/flow-go/state/cluster" "github.com/onflow/flow-go/state/protocol" protocolmock "github.com/onflow/flow-go/state/protocol/mock" - "github.com/onflow/flow-go/state/protocol/seed" + "github.com/onflow/flow-go/state/protocol/prg" storagemock "github.com/onflow/flow-go/storage/mock" "github.com/onflow/flow-go/utils/unittest" ) @@ -24,7 +24,7 @@ type ClusterSuite struct { state *protocolmock.State snap *protocolmock.Snapshot cluster *protocolmock.Cluster - epoch *protocolmock.Epoch + epoch *protocolmock.CommittedEpoch payloads *storagemock.ClusterPayloads members flow.IdentityList @@ -43,22 +43,23 @@ func (suite *ClusterSuite) SetupTest() { suite.state = new(protocolmock.State) suite.snap = new(protocolmock.Snapshot) suite.cluster = new(protocolmock.Cluster) - suite.epoch = new(protocolmock.Epoch) + suite.epoch = new(protocolmock.CommittedEpoch) suite.payloads = new(storagemock.ClusterPayloads) suite.members = unittest.IdentityListFixture(5, unittest.WithRole(flow.RoleCollection)) suite.me = suite.members[0] counter := uint64(1) - suite.root = clusterstate.CanonicalRootBlock(counter, suite.members) + rootBlock, err := clusterstate.CanonicalRootBlock(counter, suite.members.ToSkeleton()) + suite.Require().NoError(err) + suite.root = rootBlock suite.cluster.On("EpochCounter").Return(counter) suite.cluster.On("Index").Return(uint(1)) - suite.cluster.On("Members").Return(suite.members) + suite.cluster.On("Members").Return(suite.members.ToSkeleton()) suite.cluster.On("RootBlock").Return(suite.root) suite.epoch.On("Counter").Return(counter, nil) - suite.epoch.On("RandomSource").Return(unittest.SeedFixture(seed.RandomSourceLength), nil) + suite.epoch.On("RandomSource").Return(unittest.SeedFixture(prg.RandomSourceLength)) - var err error suite.com, err = NewClusterCommittee( suite.state, suite.payloads, @@ -73,11 +74,11 @@ func (suite *ClusterSuite) SetupTest() { func (suite *ClusterSuite) TestThresholds() { threshold, err := suite.com.QuorumThresholdForView(rand.Uint64()) suite.Require().NoError(err) - suite.Assert().Equal(WeightThresholdToBuildQC(suite.members.TotalWeight()), threshold) + suite.Assert().Equal(WeightThresholdToBuildQC(suite.members.ToSkeleton().TotalWeight()), threshold) threshold, err = suite.com.TimeoutThresholdForView(rand.Uint64()) suite.Require().NoError(err) - suite.Assert().Equal(WeightThresholdToTimeout(suite.members.TotalWeight()), threshold) + suite.Assert().Equal(WeightThresholdToTimeout(suite.members.ToSkeleton().TotalWeight()), threshold) } // TestInvalidSigner tests that the InvalidSignerError sentinel is @@ -88,29 +89,29 @@ func (suite *ClusterSuite) TestInvalidSigner() { nonRootBlockID := unittest.IdentifierFixture() rootBlockID := suite.root.ID() - refID := unittest.IdentifierFixture() // reference block on main chain - payload := cluster.EmptyPayload(refID) // payload referencing main chain - rootPayload := cluster.EmptyPayload(flow.ZeroID) // root cluster block payload + refID := unittest.IdentifierFixture() // reference block on main chain + payload := cluster.NewEmptyPayload(refID) // payload referencing main chain + rootPayload := cluster.NewEmptyPayload(flow.ZeroID) // root cluster block payload - suite.payloads.On("ByBlockID", nonRootBlockID).Return(&payload, nil) - suite.payloads.On("ByBlockID", rootBlockID).Return(&rootPayload, nil) + suite.payloads.On("ByBlockID", nonRootBlockID).Return(payload, nil) + suite.payloads.On("ByBlockID", rootBlockID).Return(rootPayload, nil) // a real cluster member which continues to be a valid member realClusterMember := suite.members[1] - // a real cluster member which loses all its weight between cluster initialization + // a real cluster member which unstaked and is not active anymore // and the test's reference block - realNoWeightClusterMember := suite.members[2] - realNoWeightClusterMember.Weight = 0 + realLeavingClusterMember := suite.members[2] + realLeavingClusterMember.EpochParticipationStatus = flow.EpochParticipationStatusLeaving // a real cluster member which is ejected between cluster initialization and // the test's reference block realEjectedClusterMember := suite.members[3] - realEjectedClusterMember.Ejected = true + realEjectedClusterMember.EpochParticipationStatus = flow.EpochParticipationStatusEjected realNonClusterMember := unittest.IdentityFixture(unittest.WithRole(flow.RoleCollection)) fakeID := unittest.IdentifierFixture() suite.state.On("AtBlockID", refID).Return(suite.snap) suite.snap.On("Identity", realClusterMember.NodeID).Return(realClusterMember, nil) - suite.snap.On("Identity", realNoWeightClusterMember.NodeID).Return(realNoWeightClusterMember, nil) + suite.snap.On("Identity", realLeavingClusterMember.NodeID).Return(realLeavingClusterMember, nil) suite.snap.On("Identity", realEjectedClusterMember.NodeID).Return(realEjectedClusterMember, nil) suite.snap.On("Identity", realNonClusterMember.NodeID).Return(realNonClusterMember, nil) suite.snap.On("Identity", fakeID).Return(nil, protocol.IdentityNotFoundError{}) @@ -130,6 +131,18 @@ func (suite *ClusterSuite) TestInvalidSigner() { }) }) + suite.Run("should return ErrInvalidSigner for existent but not active cluster member", func() { + suite.Run("non-root block", func() { + _, err := suite.com.IdentityByBlock(nonRootBlockID, realLeavingClusterMember.NodeID) + suite.Assert().True(model.IsInvalidSignerError(err)) + }) + suite.Run("by epoch", func() { + actual, err := suite.com.IdentityByEpoch(rand.Uint64(), realLeavingClusterMember.NodeID) + suite.Require().NoError(err) + suite.Assert().Equal(realLeavingClusterMember.IdentitySkeleton, *actual) + }) + }) + suite.Run("should return InvalidSignerError for existent non-cluster-member", func() { suite.Run("root block", func() { _, err := suite.com.IdentityByBlock(rootBlockID, realNonClusterMember.NodeID) @@ -146,12 +159,6 @@ func (suite *ClusterSuite) TestInvalidSigner() { }) suite.Run("should return ErrInvalidSigner for existent but ejected cluster member", func() { - // at the root block, the cluster member is not ejected yet - suite.Run("root block", func() { - actual, err := suite.com.IdentityByBlock(rootBlockID, realEjectedClusterMember.NodeID) - suite.Require().NoError(err) - suite.Assert().Equal(realEjectedClusterMember, actual) - }) suite.Run("non-root block", func() { _, err := suite.com.IdentityByBlock(nonRootBlockID, realEjectedClusterMember.NodeID) suite.Assert().True(model.IsInvalidSignerError(err)) @@ -159,25 +166,7 @@ func (suite *ClusterSuite) TestInvalidSigner() { suite.Run("by epoch", func() { actual, err := suite.com.IdentityByEpoch(rand.Uint64(), realEjectedClusterMember.NodeID) suite.Assert().NoError(err) - suite.Assert().Equal(realEjectedClusterMember, actual) - }) - }) - - suite.Run("should return ErrInvalidSigner for existent but zero-weight cluster member", func() { - // at the root block, the cluster member has its initial weight - suite.Run("root block", func() { - actual, err := suite.com.IdentityByBlock(rootBlockID, realNoWeightClusterMember.NodeID) - suite.Require().NoError(err) - suite.Assert().Equal(realNoWeightClusterMember, actual) - }) - suite.Run("non-root block", func() { - _, err := suite.com.IdentityByBlock(nonRootBlockID, realNoWeightClusterMember.NodeID) - suite.Assert().True(model.IsInvalidSignerError(err)) - }) - suite.Run("by epoch", func() { - actual, err := suite.com.IdentityByEpoch(rand.Uint64(), realNoWeightClusterMember.NodeID) - suite.Require().NoError(err) - suite.Assert().Equal(realNoWeightClusterMember, actual) + suite.Assert().Equal(realEjectedClusterMember.IdentitySkeleton, *actual) }) }) @@ -195,7 +184,7 @@ func (suite *ClusterSuite) TestInvalidSigner() { suite.Run("by epoch", func() { actual, err := suite.com.IdentityByEpoch(rand.Uint64(), realClusterMember.NodeID) suite.Require().NoError(err) - suite.Assert().Equal(realClusterMember, actual) + suite.Assert().Equal(realClusterMember.IdentitySkeleton, *actual) }) }) } diff --git a/consensus/hotstuff/committees/consensus_committee.go b/consensus/hotstuff/committees/consensus_committee.go index cc29265e464..70c6a2ec54e 100644 --- a/consensus/hotstuff/committees/consensus_committee.go +++ b/consensus/hotstuff/committees/consensus_committee.go @@ -1,11 +1,10 @@ package committees import ( + "errors" "fmt" "sync" - "go.uber.org/atomic" - "github.com/onflow/flow-go/consensus/hotstuff" "github.com/onflow/flow-go/consensus/hotstuff/committees/leader" "github.com/onflow/flow-go/consensus/hotstuff/model" @@ -15,109 +14,107 @@ import ( "github.com/onflow/flow-go/module/irrecoverable" "github.com/onflow/flow-go/state/protocol" "github.com/onflow/flow-go/state/protocol/events" - "github.com/onflow/flow-go/state/protocol/seed" ) -// staticEpochInfo contains leader selection and the initial committee for one epoch. -// This data structure must not be mutated after construction. -type staticEpochInfo struct { - firstView uint64 // first view of the epoch (inclusive) - finalView uint64 // final view of the epoch (inclusive) - randomSource []byte // random source of epoch - leaders *leader.LeaderSelection // pre-computed leader selection for the epoch - // TODO: should use identity skeleton https://github.com/dapperlabs/flow-go/issues/6232 - initialCommittee flow.IdentityList - initialCommitteeMap map[flow.Identifier]*flow.Identity - weightThresholdForQC uint64 // computed based on initial committee weights - weightThresholdForTO uint64 // computed based on initial committee weights - dkg hotstuff.DKG +// epochInfo caches data about one epoch that is pertinent to the consensus committee. +// Per protocol definition, membership in the consensus committee is granted for an entire +// epoch, because HotStuff requires that the leader selection is fork-independent. It is +// important to note that a consensus committee member retains its proposer view slots +// for the current epoch even if it is ejected. Nevertheless, proposals from ejected nodes +// will not be certified, because the nodes' epoch participation status is no longer active, +// and hence are not voted for. +// The protocol convention implies that the leader selection is independent of the +// `DynamicIdentity` of the nodes, which can be updated throughout the epoch. The consensus +// committee is defined as the `Participants` in the EpochSetup event, filtered down to +// consensus nodes with _positive_ `InitialWeight`. +// Based on the same argument, the weight-threshold for creating a valid QuorumCertificate +// and TimeoutCertificate are constant throughout an epoch. Together with the DKG, all +// this information fully specified by the EpochSetup and EpochCommit events. Therefore, +// we can cache it here. +// CAUTION: epochInfo's LeaderSelection is the only field whose state may evolve over time. +// Guaranteeing concurrency safety is delegated to the higher-level logic. +type epochInfo struct { + *leader.LeaderSelection // pre-computed leader selection for the epoch + randomSeed []byte + initialCommittee flow.IdentitySkeletonList + initialCommitteeMap map[flow.Identifier]*flow.IdentitySkeleton + weightThresholdForQC uint64 // computed based on initial committee weights + weightThresholdForTO uint64 // computed based on initial committee weights + dkg hotstuff.DKG } -// newStaticEpochInfo returns the static epoch information from the epoch. -// This can be cached and used for all by-view queries for this epoch. -func newStaticEpochInfo(epoch protocol.Epoch) (*staticEpochInfo, error) { - firstView, err := epoch.FirstView() - if err != nil { - return nil, fmt.Errorf("could not get first view: %w", err) +// recomputeLeaderSelectionForExtendedViewRange re-computes the LeaderSelection field +// for the input epoch's entire view range, including the new extension. +// This must be called each time an extension is added to an epoch. +// This method is idempotent, i.e. repeated calls for the same final view are no-ops. +// Caution, not concurrency safe. +// No errors are expected during normal operation. +func (e *epochInfo) recomputeLeaderSelectionForExtendedViewRange(extension flow.EpochExtension) error { + // sanity check: ensure the final view of the current epoch monotonically increases + lastViewOfLeaderSelection := e.FinalView() + if extension.FinalView < lastViewOfLeaderSelection { + return fmt.Errorf("final view of epoch must be monotonically increases, but is decreasing from %d to %d", lastViewOfLeaderSelection, extension.FinalView) } - finalView, err := epoch.FinalView() - if err != nil { - return nil, fmt.Errorf("could not get final view: %w", err) + if extension.FinalView == lastViewOfLeaderSelection { + return nil } - randomSource, err := epoch.RandomSource() + + leaderSelection, err := leader.SelectionForConsensus(e.initialCommittee, e.randomSeed, e.FirstView(), extension.FinalView) if err != nil { - return nil, fmt.Errorf("could not get random source: %w", err) + return fmt.Errorf("could not re-compute leader selection for epoch after extension: %w", err) } - leaders, err := leader.SelectionForConsensus(epoch) + e.LeaderSelection = leaderSelection + return nil +} + +// newEpochInfo retrieves the committee information and computes leader selection. +// This can be cached and used for all by-view queries for this epoch. +// No errors are expected during normal operation. +func newEpochInfo(epoch protocol.CommittedEpoch) (*epochInfo, error) { + randomSeed := epoch.RandomSource() + firstView := epoch.FirstView() + finalView := epoch.FinalView() + initialIdentities := epoch.InitialIdentities() + leaders, err := leader.SelectionForConsensus(initialIdentities, randomSeed, firstView, finalView) if err != nil { return nil, fmt.Errorf("could not get leader selection: %w", err) } - initialIdentities, err := epoch.InitialIdentities() - if err != nil { - return nil, fmt.Errorf("could not initial identities: %w", err) - } - initialCommittee := initialIdentities.Filter(filter.IsVotingConsensusCommitteeMember) + + initialCommittee := initialIdentities.Filter(filter.IsConsensusCommitteeMember) dkg, err := epoch.DKG() if err != nil { return nil, fmt.Errorf("could not get dkg: %w", err) } totalWeight := initialCommittee.TotalWeight() - epochInfo := &staticEpochInfo{ - firstView: firstView, - finalView: finalView, - randomSource: randomSource, - leaders: leaders, + ei := &epochInfo{ + LeaderSelection: leaders, + randomSeed: randomSeed, initialCommittee: initialCommittee, initialCommitteeMap: initialCommittee.Lookup(), weightThresholdForQC: WeightThresholdToBuildQC(totalWeight), weightThresholdForTO: WeightThresholdToTimeout(totalWeight), dkg: dkg, } - return epochInfo, nil + return ei, nil } -// newEmergencyFallbackEpoch creates an artificial fallback epoch generated from -// the last committed epoch at the time epoch emergency fallback is triggered. -// The fallback epoch: -// * begins after the last committed epoch -// * lasts until the next spork (estimated 6 months) -// * has the same static committee as the last committed epoch -func newEmergencyFallbackEpoch(lastCommittedEpoch *staticEpochInfo) (*staticEpochInfo, error) { - - rng, err := seed.PRGFromRandomSource(lastCommittedEpoch.randomSource, seed.ProtocolConsensusLeaderSelection) - if err != nil { - return nil, fmt.Errorf("could not create rng from seed: %w", err) - } - leaders, err := leader.ComputeLeaderSelection(lastCommittedEpoch.finalView+1, rng, leader.EstimatedSixMonthOfViews, lastCommittedEpoch.initialCommittee) - if err != nil { - return nil, fmt.Errorf("could not compute leader selection for fallback epoch: %w", err) - } - epochInfo := &staticEpochInfo{ - firstView: lastCommittedEpoch.finalView + 1, - finalView: lastCommittedEpoch.finalView + leader.EstimatedSixMonthOfViews, - randomSource: lastCommittedEpoch.randomSource, - leaders: leaders, - initialCommittee: lastCommittedEpoch.initialCommittee, - initialCommitteeMap: lastCommittedEpoch.initialCommitteeMap, - weightThresholdForQC: lastCommittedEpoch.weightThresholdForQC, - weightThresholdForTO: lastCommittedEpoch.weightThresholdForTO, - dkg: lastCommittedEpoch.dkg, - } - return epochInfo, nil -} +// eventHandlerFunc holds an epoch-related ServiceEvent wrapped in a closure, which will perform +// the required local state changes upon execution. Pending eventHandlerFunc must be queued +// and processed by a *single* worker goroutine following exactly the order in which the +// epoch-related Service Events were delivered. +// No errors are expected under normal conditions. +type eventHandlerFunc func() error // Consensus represents the main committee for consensus nodes. The consensus // committee might be active for multiple successive epochs. type Consensus struct { - state protocol.State // the protocol state - me flow.Identifier // the node ID of this node - mu sync.RWMutex // protects access to epochs - epochs map[uint64]*staticEpochInfo // cache of initial committee & leader selection per epoch - committedEpochsCh chan *flow.Header // protocol events for newly committed epochs (the first block of the epoch is passed over the channel) - epochEmergencyFallback chan struct{} // protocol event for epoch emergency fallback - isEpochFallbackHandled *atomic.Bool // ensure we only inject fallback epoch once - events.Noop // implements protocol.Consumer + state protocol.State // the protocol state + me flow.Identifier // the node ID of this node + mu sync.RWMutex // protects access to epochs + epochs map[uint64]*epochInfo // caching per epoch: consensus committee (immutable) & leader selection (extendable) + epochEvents chan eventHandlerFunc // order-preserving queue for relevant service events still pending processing + events.Noop // implements protocol.Consumer component.Component } @@ -126,14 +123,11 @@ var _ hotstuff.Replicas = (*Consensus)(nil) var _ hotstuff.DynamicCommittee = (*Consensus)(nil) func NewConsensusCommittee(state protocol.State, me flow.Identifier) (*Consensus, error) { - com := &Consensus{ - state: state, - me: me, - epochs: make(map[uint64]*staticEpochInfo), - committedEpochsCh: make(chan *flow.Header, 1), - epochEmergencyFallback: make(chan struct{}, 1), - isEpochFallbackHandled: atomic.NewBool(false), + state: state, + me: me, + epochs: make(map[uint64]*epochInfo), + epochEvents: make(chan eventHandlerFunc, 5), } com.Component = component.NewComponentManagerBuilder(). @@ -143,26 +137,36 @@ func NewConsensusCommittee(state protocol.State, me flow.Identifier) (*Consensus final := state.Final() // pre-compute leader selection for all presently relevant committed epochs - epochs := make([]protocol.Epoch, 0, 3) - // we always prepare the current epoch - epochs = append(epochs, final.Epochs().Current()) + epochs := make([]protocol.CommittedEpoch, 0, 3) // we prepare the previous epoch, if one exists - exists, err := protocol.PreviousEpochExists(final) + prev, err := final.Epochs().Previous() if err != nil { - return nil, fmt.Errorf("could not check previous epoch exists: %w", err) + if !errors.Is(err, protocol.ErrNoPreviousEpoch) { + return nil, irrecoverable.NewExceptionf("unexpected error while retrieving previous epoch: %w", err) + } + // `ErrNoPreviousEpoch` is an expected edge case during normal operations (e.g. we are in first epoch after spork) + // continue without the previous epoch + } else { // previous epoch was successfully retrieved + epochs = append(epochs, prev) } - if exists { - epochs = append(epochs, final.Epochs().Previous()) + + // we always prepare the current epoch + curr, err := final.Epochs().Current() + if err != nil { + return nil, fmt.Errorf("could not get current epoch: %w", err) } + epochs = append(epochs, curr) // we prepare the next epoch, if it is committed - phase, err := final.Phase() + next, err := final.Epochs().NextCommitted() if err != nil { - return nil, fmt.Errorf("could not check epoch phase: %w", err) - } - if phase == flow.EpochPhaseCommitted { - epochs = append(epochs, final.Epochs().Next()) + if !errors.Is(err, protocol.ErrNextEpochNotCommitted) { + return nil, irrecoverable.NewExceptionf("unexpected error retrieving next epoch: %w", err) + } + // receiving a `ErrNextEpochNotCommitted` is expected during the happy path + } else { // next epoch was successfully retrieved + epochs = append(epochs, next) } for _, epoch := range epochs { @@ -172,18 +176,6 @@ func NewConsensusCommittee(state protocol.State, me flow.Identifier) (*Consensus } } - // if epoch emergency fallback was triggered, inject the fallback epoch - triggered, err := state.Params().EpochFallbackTriggered() - if err != nil { - return nil, fmt.Errorf("could not check epoch fallback: %w", err) - } - if triggered { - err = com.onEpochEmergencyFallbackTriggered() - if err != nil { - return nil, fmt.Errorf("could not prepare emergency fallback epoch: %w", err) - } - } - return com, nil } @@ -226,12 +218,12 @@ func (c *Consensus) IdentityByBlock(blockID flow.Identifier, nodeID flow.Identif // - model.ErrViewForUnknownEpoch if no committed epoch containing the given view is known. // This is an expected error and must be handled. // - unspecific error in case of unexpected problems and bugs -func (c *Consensus) IdentitiesByEpoch(view uint64) (flow.IdentityList, error) { - epochInfo, err := c.staticEpochInfoByView(view) +func (c *Consensus) IdentitiesByEpoch(view uint64) (flow.IdentitySkeletonList, error) { + epochInf, err := c.epochInfoByView(view) if err != nil { return nil, err } - return epochInfo.initialCommittee, nil + return epochInf.initialCommittee, nil } // IdentityByEpoch returns the identity for the given node ID, in the epoch which @@ -245,14 +237,14 @@ func (c *Consensus) IdentitiesByEpoch(view uint64) (flow.IdentityList, error) { // - model.InvalidSignerError if nodeID was not listed by the Epoch Setup event as an // authorized consensus participants. // - unspecific error in case of unexpected problems and bugs -func (c *Consensus) IdentityByEpoch(view uint64, nodeID flow.Identifier) (*flow.Identity, error) { - epochInfo, err := c.staticEpochInfoByView(view) +func (c *Consensus) IdentityByEpoch(view uint64, participantID flow.Identifier) (*flow.IdentitySkeleton, error) { + epochInf, err := c.epochInfoByView(view) if err != nil { return nil, err } - identity, ok := epochInfo.initialCommitteeMap[nodeID] + identity, ok := epochInf.initialCommitteeMap[participantID] if !ok { - return nil, model.NewInvalidSignerErrorf("id %v is not a valid node id", nodeID) + return nil, model.NewInvalidSignerErrorf("id %v is not a valid node id", participantID) } return identity, nil } @@ -264,17 +256,16 @@ func (c *Consensus) IdentityByEpoch(view uint64, nodeID flow.Identifier) (*flow. // This is an expected error and must be handled. // - unspecific error in case of unexpected problems and bugs func (c *Consensus) LeaderForView(view uint64) (flow.Identifier, error) { - - epochInfo, err := c.staticEpochInfoByView(view) + epochInf, err := c.epochInfoByView(view) if err != nil { return flow.ZeroID, err } - leaderID, err := epochInfo.leaders.LeaderForView(view) + leaderID, err := epochInf.LeaderForView(view) if leader.IsInvalidViewError(err) { // an invalid view error indicates that no leader was computed for this view // this is a fatal internal error, because the view necessarily is within an // epoch for which we have pre-computed leader selection - return flow.ZeroID, fmt.Errorf("unexpected inconsistency in epoch view spans for view %d: %v", view, err) + return flow.ZeroID, irrecoverable.NewExceptionf("unexpected inconsistency in epoch view spans for view %d: %v", view, err) } if err != nil { return flow.ZeroID, err @@ -291,11 +282,11 @@ func (c *Consensus) LeaderForView(view uint64) (flow.Identifier, error) { // This is an expected error and must be handled. // - unspecific error in case of unexpected problems and bugs func (c *Consensus) QuorumThresholdForView(view uint64) (uint64, error) { - epochInfo, err := c.staticEpochInfoByView(view) + epochInf, err := c.epochInfoByView(view) if err != nil { return 0, err } - return epochInfo.weightThresholdForQC, nil + return epochInf.weightThresholdForQC, nil } func (c *Consensus) Self() flow.Identifier { @@ -306,11 +297,11 @@ func (c *Consensus) Self() flow.Identifier { // to safely immediately timeout for the current view. The weight threshold only // changes at epoch boundaries and is computed based on the initial committee weights. func (c *Consensus) TimeoutThresholdForView(view uint64) (uint64, error) { - epochInfo, err := c.staticEpochInfoByView(view) + epochInf, err := c.epochInfoByView(view) if err != nil { return 0, err } - return epochInfo.weightThresholdForTO, nil + return epochInf.weightThresholdForTO, nil } // DKG returns the DKG for epoch which includes the given view. @@ -320,19 +311,16 @@ func (c *Consensus) TimeoutThresholdForView(view uint64) (uint64, error) { // This is an expected error and must be handled. // - unspecific error in case of unexpected problems and bugs func (c *Consensus) DKG(view uint64) (hotstuff.DKG, error) { - epochInfo, err := c.staticEpochInfoByView(view) + epochInf, err := c.epochInfoByView(view) if err != nil { return nil, err } - return epochInfo.dkg, nil + return epochInf.dkg, nil } -// handleProtocolEvents processes queued Epoch events `EpochCommittedPhaseStarted` -// and `EpochEmergencyFallbackTriggered`. This function permanently utilizes a worker -// routine until the `Component` terminates. -// When we observe a new epoch being committed, we compute -// the leader selection and cache static info for the epoch. When we observe -// epoch emergency fallback being triggered, we inject a fallback epoch. +// handleProtocolEvents processes queued protocol events. +// When we are notified of a new protocol event, the consumer function enqueues an eventHandlerFunc +// in the events channel. This function then executes each event handler in the order they were emitted. func (c *Consensus) handleProtocolEvents(ctx irrecoverable.SignalerContext, ready component.ReadyFunc) { ready() @@ -340,14 +328,8 @@ func (c *Consensus) handleProtocolEvents(ctx irrecoverable.SignalerContext, read select { case <-ctx.Done(): return - case block := <-c.committedEpochsCh: - epoch := c.state.AtBlockID(block.ID()).Epochs().Next() - _, err := c.prepareEpoch(epoch) - if err != nil { - ctx.Throw(err) - } - case <-c.epochEmergencyFallback: - err := c.onEpochEmergencyFallbackTriggered() + case handleEvent := <-c.epochEvents: + err := handleEvent() if err != nil { ctx.Throw(err) } @@ -355,80 +337,74 @@ func (c *Consensus) handleProtocolEvents(ctx irrecoverable.SignalerContext, read } } -// EpochCommittedPhaseStarted informs the `committee.Consensus` that the block starting the Epoch Committed Phase has been finalized. +// EpochCommittedPhaseStarted informs `committees.Consensus` that the first block in flow.EpochPhaseCommitted has been finalized. +// This event consumer function enqueues an event handler function for the single event handler thread to execute. func (c *Consensus) EpochCommittedPhaseStarted(_ uint64, first *flow.Header) { - c.committedEpochsCh <- first + c.epochEvents <- func() error { + return c.handleEpochCommittedPhaseStarted(first) + } } -// EpochEmergencyFallbackTriggered passes the protocol event to the worker thread. -func (c *Consensus) EpochEmergencyFallbackTriggered() { - c.epochEmergencyFallback <- struct{}{} +// EpochExtended informs `committees.Consensus` that a block including a new epoch extension has been finalized. +// This event consumer function enqueues an event handler function for the single event handler thread to execute. +func (c *Consensus) EpochExtended(epochCounter uint64, _ *flow.Header, extension flow.EpochExtension) { + c.epochEvents <- func() error { + return c.handleEpochExtended(epochCounter, extension) + } } -// onEpochEmergencyFallbackTriggered handles the protocol event for emergency epoch -// fallback mode being triggered. When this occurs, we inject a fallback epoch -// to the committee which extends the current epoch. -// This method must also be called on initialization, if emergency fallback mode -// was triggered in the past. +// handleEpochExtended executes all state changes required upon observing an EpochExtended event. +// This function conforms to eventHandlerFunc. +// When an extension is observed, we re-compute leader selection for the current epoch, taking into +// account the most recent extension (included as of refBlock). // No errors are expected during normal operation. -func (c *Consensus) onEpochEmergencyFallbackTriggered() error { +func (c *Consensus) handleEpochExtended(epochCounter uint64, extension flow.EpochExtension) error { + c.mu.Lock() + defer c.mu.Unlock() - // we respond to epoch fallback being triggered at most once, therefore - // the core logic is protected by an atomic bool. - // although it is only valid for epoch fallback to be triggered once per spork, - // we must account for repeated delivery of protocol events. - if !c.isEpochFallbackHandled.CompareAndSwap(false, true) { - return nil + epochInf, ok := c.epochs[epochCounter] + if !ok { + return fmt.Errorf("sanity check failed: current epoch committee info does not exist") } - - currentEpochCounter, err := c.state.Final().Epochs().Current().Counter() + // sanity check: we can only extend the current epoch, if the next epoch has not yet been committed: + if _, nextEpochCommitted := c.epochs[epochCounter+1]; nextEpochCommitted { + return fmt.Errorf("sanity check failed: attempting to extend epoch %d, but subsequent epoch %d is already committed", epochCounter, epochCounter+1) + } + err := epochInf.recomputeLeaderSelectionForExtendedViewRange(extension) if err != nil { - return fmt.Errorf("could not get current epoch counter: %w", err) + return fmt.Errorf("could not recompute leader selection for current epoch upon extension: %w", err) } + return nil +} - c.mu.RLock() - // sanity check: current epoch must be cached already - currentEpoch, ok := c.epochs[currentEpochCounter] - if !ok { - c.mu.RUnlock() - return fmt.Errorf("epoch fallback: could not find current epoch (counter=%d) info", currentEpochCounter) - } - // sanity check: next epoch must never be committed, therefore must not be cached - _, ok = c.epochs[currentEpochCounter+1] - c.mu.RUnlock() - if ok { - return fmt.Errorf("epoch fallback: next epoch (counter=%d) is cached contrary to expectation", currentEpochCounter+1) +// handleEpochCommittedPhaseStarted executes all state changes required upon observing an EpochCommittedPhaseStarted event. +// This function conforms to eventHandlerFunc. +// When the next epoch is committed, we compute leader selection for the epoch and cache it. +// No errors are expected during normal operation. +func (c *Consensus) handleEpochCommittedPhaseStarted(refBlock *flow.Header) error { + epoch, err := c.state.AtHeight(refBlock.Height).Epochs().NextCommitted() + if err != nil { // no expected errors since reference block is in EpochCommit phase + return fmt.Errorf("could not get next committed epoch: %w", err) } - - fallbackEpoch, err := newEmergencyFallbackEpoch(currentEpoch) + _, err = c.prepareEpoch(epoch) if err != nil { - return fmt.Errorf("could not construct fallback epoch: %w", err) + return fmt.Errorf("could not cache data for committed next epoch: %w", err) } - - // cache the epoch info - c.mu.Lock() - c.epochs[currentEpochCounter+1] = fallbackEpoch - c.mu.Unlock() - return nil } -// staticEpochInfoByView retrieves the previously cached static epoch info for -// the epoch which includes the given view. If no epoch is known for the given -// view, we will attempt to cache the next epoch. -// +// epochInfoByView retrieves the cached epoch info for the epoch which includes the given view. // Error returns: // - model.ErrViewForUnknownEpoch if no committed epoch containing the given view is known // - unspecific error in case of unexpected problems and bugs -func (c *Consensus) staticEpochInfoByView(view uint64) (*staticEpochInfo, error) { - +func (c *Consensus) epochInfoByView(view uint64) (*epochInfo, error) { // look for an epoch matching this view for which we have already pre-computed // leader selection. Epochs last ~500k views, so we find the epoch here 99.99% // of the time. Since epochs are long-lived and we only cache the most recent 3, // this linear map iteration is inexpensive. c.mu.RLock() for _, epoch := range c.epochs { - if epoch.firstView <= view && view <= epoch.finalView { + if epoch.FirstView() <= view && view <= epoch.FinalView() { c.mu.RUnlock() return epoch, nil } @@ -438,67 +414,58 @@ func (c *Consensus) staticEpochInfoByView(view uint64) (*staticEpochInfo, error) return nil, model.ErrViewForUnknownEpoch } -// prepareEpoch pre-computes and stores the static epoch information for the -// given epoch, including leader selection. Calling prepareEpoch multiple times -// for the same epoch returns cached static epoch information. +// prepareEpoch pre-computes and caches the epoch information for the given epoch, including leader selection. +// Calling prepareEpoch multiple times for the same epoch returns cached epoch information. // Input must be a committed epoch. // No errors are expected during normal operation. -func (c *Consensus) prepareEpoch(epoch protocol.Epoch) (*staticEpochInfo, error) { +func (c *Consensus) prepareEpoch(epoch protocol.CommittedEpoch) (*epochInfo, error) { + counter := epoch.Counter() - counter, err := epoch.Counter() - if err != nil { - return nil, fmt.Errorf("could not get counter for epoch to prepare: %w", err) - } + c.mu.Lock() + defer c.mu.Unlock() - // this is a no-op if we have already computed static info for this epoch - c.mu.RLock() - epochInfo, exists := c.epochs[counter] - c.mu.RUnlock() + // this is a no-op if we have already cached this epoch + epochInf, exists := c.epochs[counter] if exists { - return epochInfo, nil + return epochInf, nil } - epochInfo, err = newStaticEpochInfo(epoch) + epochInf, err := newEpochInfo(epoch) if err != nil { - return nil, fmt.Errorf("could not create static epoch info for epch %d: %w", counter, err) + return nil, fmt.Errorf("could not create epoch info for epoch %d: %w", counter, err) } // sanity check: ensure new epoch has contiguous views with the prior epoch - c.mu.RLock() prevEpochInfo, exists := c.epochs[counter-1] - c.mu.RUnlock() if exists { - if epochInfo.firstView != prevEpochInfo.finalView+1 { + if epochInf.FirstView() != prevEpochInfo.FinalView()+1 { return nil, fmt.Errorf("non-contiguous view ranges between consecutive epochs (epoch_%d=[%d,%d], epoch_%d=[%d,%d])", - counter-1, prevEpochInfo.firstView, prevEpochInfo.finalView, - counter, epochInfo.firstView, epochInfo.finalView) + counter-1, prevEpochInfo.FirstView(), prevEpochInfo.FinalView(), + counter, epochInf.FirstView(), epochInf.FinalView()) } } // cache the epoch info - c.mu.Lock() - defer c.mu.Unlock() - c.epochs[counter] = epochInfo - // now prune any old epochs, if we have exceeded our maximum of 3 - // if we have fewer than 3 epochs, this is a no-op + c.epochs[counter] = epochInf + // now prune any old epochs; if we have cached fewer than 3 epochs, this is a no-op c.pruneEpochInfo() - return epochInfo, nil + return epochInf, nil } // pruneEpochInfo removes any epochs older than the most recent 3. -// NOTE: Not safe for concurrent use - the caller must first acquire the lock. +// CAUTION: Not safe for concurrent use - the caller must first acquire the lock. func (c *Consensus) pruneEpochInfo() { // find the maximum counter, including the epoch we just computed - max := uint64(0) + maxCounter := uint64(0) for counter := range c.epochs { - if counter > max { - max = counter + if counter > maxCounter { + maxCounter = counter } } // remove any epochs which aren't within the most recent 3 for counter := range c.epochs { - if counter+3 <= max { + if counter+3 <= maxCounter { delete(c.epochs, counter) } } diff --git a/consensus/hotstuff/committees/consensus_committee_test.go b/consensus/hotstuff/committees/consensus_committee_test.go index b8d1f5bc415..1856388abcf 100644 --- a/consensus/hotstuff/committees/consensus_committee_test.go +++ b/consensus/hotstuff/committees/consensus_committee_test.go @@ -18,7 +18,6 @@ import ( "github.com/onflow/flow-go/module/irrecoverable" "github.com/onflow/flow-go/state/protocol" protocolmock "github.com/onflow/flow-go/state/protocol/mock" - "github.com/onflow/flow-go/state/protocol/seed" "github.com/onflow/flow-go/utils/unittest" "github.com/onflow/flow-go/utils/unittest/mocks" ) @@ -33,37 +32,30 @@ type ConsensusSuite struct { // mocks state *protocolmock.State snapshot *protocolmock.Snapshot - params *protocolmock.Params epochs *mocks.EpochQuery // backend for mocked functions - phase flow.EpochPhase - epochFallbackTriggered bool - currentEpochCounter uint64 - myID flow.Identifier + phase flow.EpochPhase + currentEpochCounter uint64 + myID flow.Identifier committee *Consensus cancel context.CancelFunc } +// SetupTest instantiates mocks for a test case. +// By default, we start in the Staking phase with no epochs mocked; test cases must add their own epoch mocks. func (suite *ConsensusSuite) SetupTest() { suite.phase = flow.EpochPhaseStaking - suite.epochFallbackTriggered = false suite.currentEpochCounter = 1 suite.myID = unittest.IdentifierFixture() suite.state = new(protocolmock.State) suite.snapshot = new(protocolmock.Snapshot) - suite.params = new(protocolmock.Params) suite.epochs = mocks.NewEpochQuery(suite.T(), suite.currentEpochCounter) suite.state.On("Final").Return(suite.snapshot) - suite.state.On("Params").Return(suite.params) - suite.params.On("EpochFallbackTriggered").Return( - func() bool { return suite.epochFallbackTriggered }, - func() error { return nil }, - ) - suite.snapshot.On("Phase").Return( + suite.snapshot.On("EpochPhase").Return( func() flow.EpochPhase { return suite.phase }, func() error { return nil }, ) @@ -74,6 +66,7 @@ func (suite *ConsensusSuite) TearDownTest() { if suite.cancel != nil { suite.cancel() } + unittest.AssertClosesBefore(suite.T(), suite.committee.Done(), time.Second) } // CreateAndStartCommittee instantiates and starts the committee. @@ -92,15 +85,14 @@ func (suite *ConsensusSuite) CreateAndStartCommittee() { // CommitEpoch adds the epoch to the protocol state and mimics the protocol state // behaviour when committing an epoch, by sending the protocol event to the committee. -func (suite *ConsensusSuite) CommitEpoch(epoch protocol.Epoch) { +func (suite *ConsensusSuite) CommitEpoch(epoch protocol.CommittedEpoch) { firstBlockOfCommittedPhase := unittest.BlockHeaderFixture() - suite.state.On("AtBlockID", firstBlockOfCommittedPhase.ID()).Return(suite.snapshot) - suite.epochs.Add(epoch) + suite.state.On("AtHeight", firstBlockOfCommittedPhase.Height).Return(suite.snapshot) + suite.epochs.AddCommitted(epoch) suite.committee.EpochCommittedPhaseStarted(1, firstBlockOfCommittedPhase) // get the first view, to test when the epoch has been processed - firstView, err := epoch.FirstView() - require.NoError(suite.T(), err) + firstView := epoch.FirstView() // wait for the protocol event to be processed (async) assert.Eventually(suite.T(), func() bool { @@ -109,6 +101,24 @@ func (suite *ConsensusSuite) CommitEpoch(epoch protocol.Epoch) { }, time.Second, time.Millisecond) } +// AssertKnownViews asserts that no errors is returned when querying identities by epoch for each of the input views. +func (suite *ConsensusSuite) AssertKnownViews(views ...uint64) { + for _, view := range views { + _, err := suite.committee.IdentitiesByEpoch(view) + suite.Assert().NoError(err) + } +} + +// AssertUnknownViews asserts that a model.ErrViewForUnknownEpoch sentinel +// is returned when querying identities by epoch for each of the input views. +func (suite *ConsensusSuite) AssertUnknownViews(views ...uint64) { + for _, view := range views { + _, err := suite.committee.IdentitiesByEpoch(view) + suite.Assert().Error(err) + suite.Assert().ErrorIs(err, model.ErrViewForUnknownEpoch) + } +} + // AssertStoredEpochCounterRange asserts that the cached epochs are for exactly // the given contiguous, inclusive counter range. // Eg. for the input (2,4), the committee must have epochs cached with counters 2,3,4 @@ -132,8 +142,8 @@ func (suite *ConsensusSuite) AssertStoredEpochCounterRange(from, to uint64) { // TestConstruction_CurrentEpoch tests construction with only a current epoch. // Only the current epoch should be cached after construction. func (suite *ConsensusSuite) TestConstruction_CurrentEpoch() { - curEpoch := newMockEpoch(suite.currentEpochCounter, unittest.IdentityListFixture(10), 101, 200, unittest.SeedFixture(32), true) - suite.epochs.Add(curEpoch) + curEpoch := newMockCommittedEpoch(suite.currentEpochCounter, unittest.IdentityListFixture(10), 101, 200) + suite.epochs.AddCommitted(curEpoch) suite.CreateAndStartCommittee() suite.Assert().Len(suite.committee.epochs, 1) @@ -143,10 +153,10 @@ func (suite *ConsensusSuite) TestConstruction_CurrentEpoch() { // TestConstruction_PreviousEpoch tests construction with a previous epoch. // Both current and previous epoch should be cached after construction. func (suite *ConsensusSuite) TestConstruction_PreviousEpoch() { - prevEpoch := newMockEpoch(suite.currentEpochCounter-1, unittest.IdentityListFixture(10), 1, 100, unittest.SeedFixture(32), true) - curEpoch := newMockEpoch(suite.currentEpochCounter, unittest.IdentityListFixture(10), 101, 200, unittest.SeedFixture(32), true) - suite.epochs.Add(prevEpoch) - suite.epochs.Add(curEpoch) + prevEpoch := newMockCommittedEpoch(suite.currentEpochCounter-1, unittest.IdentityListFixture(10), 1, 100) + curEpoch := newMockCommittedEpoch(suite.currentEpochCounter, unittest.IdentityListFixture(10), 101, 200) + suite.epochs.AddCommitted(prevEpoch) + suite.epochs.AddCommitted(curEpoch) suite.CreateAndStartCommittee() suite.Assert().Len(suite.committee.epochs, 2) @@ -157,10 +167,10 @@ func (suite *ConsensusSuite) TestConstruction_PreviousEpoch() { // Only the current epoch should be cached after construction. func (suite *ConsensusSuite) TestConstruction_UncommittedNextEpoch() { suite.phase = flow.EpochPhaseSetup - curEpoch := newMockEpoch(suite.currentEpochCounter, unittest.IdentityListFixture(10), 101, 200, unittest.SeedFixture(32), true) - nextEpoch := newMockEpoch(suite.currentEpochCounter+1, unittest.IdentityListFixture(10), 201, 300, unittest.SeedFixture(32), false) - suite.epochs.Add(curEpoch) - suite.epochs.Add(nextEpoch) + curEpoch := newMockCommittedEpoch(suite.currentEpochCounter, unittest.IdentityListFixture(10), 101, 200) + nextEpoch := newMockTentativeEpoch(suite.currentEpochCounter+1, unittest.IdentityListFixture(10)) + suite.epochs.AddCommitted(curEpoch) + suite.epochs.AddTentative(nextEpoch) suite.CreateAndStartCommittee() suite.Assert().Len(suite.committee.epochs, 1) @@ -170,10 +180,10 @@ func (suite *ConsensusSuite) TestConstruction_UncommittedNextEpoch() { // TestConstruction_CommittedNextEpoch tests construction with a committed next epoch. // Both current and next epochs should be cached after construction. func (suite *ConsensusSuite) TestConstruction_CommittedNextEpoch() { - curEpoch := newMockEpoch(suite.currentEpochCounter, unittest.IdentityListFixture(10), 101, 200, unittest.SeedFixture(32), true) - nextEpoch := newMockEpoch(suite.currentEpochCounter+1, unittest.IdentityListFixture(10), 201, 300, unittest.SeedFixture(32), true) - suite.epochs.Add(curEpoch) - suite.epochs.Add(nextEpoch) + curEpoch := newMockCommittedEpoch(suite.currentEpochCounter, unittest.IdentityListFixture(10), 101, 200) + nextEpoch := newMockCommittedEpoch(suite.currentEpochCounter+1, unittest.IdentityListFixture(10), 201, 300) + suite.epochs.AddCommitted(curEpoch) + suite.epochs.AddCommitted(nextEpoch) suite.phase = flow.EpochPhaseCommitted suite.CreateAndStartCommittee() @@ -181,36 +191,24 @@ func (suite *ConsensusSuite) TestConstruction_CommittedNextEpoch() { suite.AssertStoredEpochCounterRange(suite.currentEpochCounter, suite.currentEpochCounter+1) } -// TestConstruction_EpochFallbackTriggered tests construction when EECC has been triggered. -// Both current and the injected fallback epoch should be cached after construction. -func (suite *ConsensusSuite) TestConstruction_EpochFallbackTriggered() { - curEpoch := newMockEpoch(suite.currentEpochCounter, unittest.IdentityListFixture(10), 101, 200, unittest.SeedFixture(32), true) - suite.epochs.Add(curEpoch) - suite.epochFallbackTriggered = true - - suite.CreateAndStartCommittee() - suite.Assert().Len(suite.committee.epochs, 2) - suite.AssertStoredEpochCounterRange(suite.currentEpochCounter, suite.currentEpochCounter+1) -} - // TestProtocolEvents_CommittedEpoch tests that protocol events notifying of a newly // committed epoch are handled correctly. A committed epoch should be cached, and // repeated events should be no-ops. func (suite *ConsensusSuite) TestProtocolEvents_CommittedEpoch() { - curEpoch := newMockEpoch(suite.currentEpochCounter, unittest.IdentityListFixture(10), 101, 200, unittest.SeedFixture(32), true) - suite.epochs.Add(curEpoch) + curEpoch := newMockCommittedEpoch(suite.currentEpochCounter, unittest.IdentityListFixture(10), 101, 200) + suite.epochs.AddCommitted(curEpoch) suite.CreateAndStartCommittee() - nextEpoch := newMockEpoch(suite.currentEpochCounter+1, unittest.IdentityListFixture(10), 201, 300, unittest.SeedFixture(32), true) + nextEpoch := newMockCommittedEpoch(suite.currentEpochCounter+1, unittest.IdentityListFixture(10), 201, 300) firstBlockOfCommittedPhase := unittest.BlockHeaderFixture() - suite.state.On("AtBlockID", firstBlockOfCommittedPhase.ID()).Return(suite.snapshot) - suite.epochs.Add(nextEpoch) + suite.state.On("AtHeight", firstBlockOfCommittedPhase.Height).Return(suite.snapshot) + suite.epochs.AddCommitted(nextEpoch) suite.committee.EpochCommittedPhaseStarted(suite.currentEpochCounter, firstBlockOfCommittedPhase) // wait for the protocol event to be processed (async) assert.Eventually(suite.T(), func() bool { - _, err := suite.committee.IdentitiesByEpoch(unittest.Uint64InRange(201, 300)) + _, err := suite.committee.IdentitiesByEpoch(201) return err == nil }, 30*time.Second, 50*time.Millisecond) @@ -227,32 +225,90 @@ func (suite *ConsensusSuite) TestProtocolEvents_CommittedEpoch() { } -// TestProtocolEvents_EpochFallback tests that protocol events notifying of epoch -// fallback are handled correctly. Epoch fallback triggering should result in a -// fallback epoch being injected, and repeated events should be no-ops. -func (suite *ConsensusSuite) TestProtocolEvents_EpochFallback() { - curEpoch := newMockEpoch(suite.currentEpochCounter, unittest.IdentityListFixture(10), 101, 200, unittest.SeedFixture(32), true) - suite.epochs.Add(curEpoch) +// TestProtocolEvents_EpochExtended tests that protocol events notifying of an epoch extension are handled correctly. +// An EpochExtension event should result in a re-computation of the leader selection (including the new extension). +// Repeated events should be no-ops. +func (suite *ConsensusSuite) TestProtocolEvents_EpochExtended() { + curEpoch := newMockCommittedEpoch(suite.currentEpochCounter, unittest.IdentityListFixture(10), 101, 200) + suite.epochs.AddCommitted(curEpoch) suite.CreateAndStartCommittee() - suite.committee.EpochEmergencyFallbackTriggered() + suite.AssertUnknownViews(100, 201, 300, 301) + + extension := flow.EpochExtension{ + FirstView: 201, + FinalView: 300, + } + refBlock := unittest.BlockHeaderFixture() + addExtension(curEpoch, extension) + suite.state.On("AtHeight", refBlock.Height).Return(suite.snapshot) + + suite.committee.EpochExtended(suite.currentEpochCounter, refBlock, extension) // wait for the protocol event to be processed (async) require.Eventually(suite.T(), func() bool { - _, err := suite.committee.IdentitiesByEpoch(unittest.Uint64InRange(201, 300)) + _, err := suite.committee.IdentitiesByEpoch(extension.FirstView) return err == nil - }, 30*time.Second, 50*time.Millisecond) + }, time.Second, 50*time.Millisecond) - suite.Assert().Len(suite.committee.epochs, 2) - suite.AssertStoredEpochCounterRange(suite.currentEpochCounter, suite.currentEpochCounter+1) + // we should have the same number of cached epochs (an existing epoch has been extended + suite.Assert().Len(suite.committee.epochs, 1) + suite.AssertStoredEpochCounterRange(suite.currentEpochCounter, suite.currentEpochCounter) // should handle multiple deliveries of the protocol event - suite.committee.EpochEmergencyFallbackTriggered() - suite.committee.EpochEmergencyFallbackTriggered() - suite.committee.EpochEmergencyFallbackTriggered() + suite.committee.EpochExtended(suite.currentEpochCounter, refBlock, extension) + suite.committee.EpochExtended(suite.currentEpochCounter, refBlock, extension) + suite.committee.EpochExtended(suite.currentEpochCounter, refBlock, extension) - suite.Assert().Len(suite.committee.epochs, 2) - suite.AssertStoredEpochCounterRange(suite.currentEpochCounter, suite.currentEpochCounter+1) + suite.Assert().Len(suite.committee.epochs, 1) + suite.AssertStoredEpochCounterRange(suite.currentEpochCounter, suite.currentEpochCounter) + // check the boundary values of the original epoch and the extension, plus a random view within the extension + suite.AssertKnownViews(101, 200, 201, unittest.Uint64InRange(201, 300), 300) + suite.AssertUnknownViews(100, 301) +} + +// TestProtocolEvents_EpochExtendedMultiple tests that protocol events notifying of an epoch extension are handled correctly. +// An EpochExtension event should result in a re-computation of the leader selection (including the new extension). +// The Committee should handle multiple subsequent, contiguous epoch extensions. +// Repeated events should be no-ops. +func (suite *ConsensusSuite) TestProtocolEvents_EpochExtendedMultiple() { + curEpoch := newMockCommittedEpoch(suite.currentEpochCounter, unittest.IdentityListFixture(10), 101, 200) + suite.epochs.AddCommitted(curEpoch) + + suite.CreateAndStartCommittee() + + expectedKnownViews := []uint64{101, unittest.Uint64InRange(101, 200), 200} + suite.AssertUnknownViews(100, 201, 300, 301) + suite.AssertKnownViews(expectedKnownViews...) + + // Add several extensions in series + for i := 0; i < 10; i++ { + finalView := curEpoch.FinalView() + extension := flow.EpochExtension{ + FirstView: finalView + 1, + FinalView: finalView + 100, + } + refBlock := unittest.BlockHeaderFixture() + addExtension(curEpoch, extension) + suite.state.On("AtHeight", refBlock.Height).Return(suite.snapshot) + + suite.committee.EpochExtended(suite.currentEpochCounter, refBlock, extension) + // wait for the protocol event to be processed (async) + require.Eventually(suite.T(), func() bool { + _, err := suite.committee.IdentitiesByEpoch(extension.FirstView) + return err == nil + }, time.Second, 50*time.Millisecond) + + // we should have the same number of cached epochs (an existing epoch has been extended + suite.Assert().Len(suite.committee.epochs, 1) + suite.AssertStoredEpochCounterRange(suite.currentEpochCounter, suite.currentEpochCounter) + + // should respond to queries for view range of new extension + expectedKnownViews = append(expectedKnownViews, extension.FirstView, unittest.Uint64InRange(extension.FirstView, extension.FinalView), extension.FinalView) + suite.AssertKnownViews(expectedKnownViews...) + // should return sentinel for view outside extension + suite.AssertUnknownViews(100, extension.FinalView+1) + } } // TestIdentitiesByBlock tests retrieving committee members by block. @@ -262,21 +318,29 @@ func (suite *ConsensusSuite) TestIdentitiesByBlock() { t := suite.T() realIdentity := unittest.IdentityFixture(unittest.WithRole(flow.RoleConsensus)) - zeroWeightConsensusIdentity := unittest.IdentityFixture(unittest.WithRole(flow.RoleConsensus), unittest.WithWeight(0)) - ejectedConsensusIdentity := unittest.IdentityFixture(unittest.WithRole(flow.RoleConsensus), unittest.WithEjected(true)) + joiningConsensusIdentity := unittest.IdentityFixture(unittest.WithRole(flow.RoleConsensus), unittest.WithParticipationStatus(flow.EpochParticipationStatusJoining)) + leavingConsensusIdentity := unittest.IdentityFixture(unittest.WithRole(flow.RoleConsensus), unittest.WithParticipationStatus(flow.EpochParticipationStatusLeaving)) + ejectedConsensusIdentity := unittest.IdentityFixture(unittest.WithRole(flow.RoleConsensus), unittest.WithParticipationStatus(flow.EpochParticipationStatusEjected)) validNonConsensusIdentity := unittest.IdentityFixture(unittest.WithRole(flow.RoleVerification)) + validConsensusIdentities := []*flow.Identity{ + realIdentity, + joiningConsensusIdentity, + leavingConsensusIdentity, + validNonConsensusIdentity, + ejectedConsensusIdentity, + } fakeID := unittest.IdentifierFixture() blockID := unittest.IdentifierFixture() // create a mock epoch for leader selection setup in constructor - currEpoch := newMockEpoch(1, unittest.IdentityListFixture(10), 1, 100, unittest.SeedFixture(seed.RandomSourceLength), true) - suite.epochs.Add(currEpoch) + currEpoch := newMockCommittedEpoch(1, unittest.IdentityListFixture(10), 1, 100) + suite.epochs.AddCommitted(currEpoch) suite.state.On("AtBlockID", blockID).Return(suite.snapshot) - suite.snapshot.On("Identity", realIdentity.NodeID).Return(realIdentity, nil) - suite.snapshot.On("Identity", zeroWeightConsensusIdentity.NodeID).Return(zeroWeightConsensusIdentity, nil) - suite.snapshot.On("Identity", ejectedConsensusIdentity.NodeID).Return(ejectedConsensusIdentity, nil) - suite.snapshot.On("Identity", validNonConsensusIdentity.NodeID).Return(validNonConsensusIdentity, nil) + for _, identity := range validConsensusIdentities { + i := identity // copy + suite.snapshot.On("Identity", i.NodeID).Return(i, nil) + } suite.snapshot.On("Identity", fakeID).Return(nil, protocol.IdentityNotFoundError{}) suite.CreateAndStartCommittee() @@ -287,8 +351,13 @@ func (suite *ConsensusSuite) TestIdentitiesByBlock() { }) t.Run("existent but non-committee-member identity should return InvalidSignerError", func(t *testing.T) { - t.Run("zero-weight consensus node", func(t *testing.T) { - _, err := suite.committee.IdentityByBlock(blockID, zeroWeightConsensusIdentity.NodeID) + t.Run("joining consensus node", func(t *testing.T) { + _, err := suite.committee.IdentityByBlock(blockID, joiningConsensusIdentity.NodeID) + require.True(t, model.IsInvalidSignerError(err)) + }) + + t.Run("leaving consensus node", func(t *testing.T) { + _, err := suite.committee.IdentityByBlock(blockID, leavingConsensusIdentity.NodeID) require.True(t, model.IsInvalidSignerError(err)) }) @@ -327,8 +396,10 @@ func (suite *ConsensusSuite) TestIdentitiesByEpoch() { // epoch 1 identities with varying conditions which would disqualify them // from committee participation realIdentity := unittest.IdentityFixture(unittest.WithRole(flow.RoleConsensus)) - zeroWeightConsensusIdentity := unittest.IdentityFixture(unittest.WithRole(flow.RoleConsensus), unittest.WithWeight(0)) - ejectedConsensusIdentity := unittest.IdentityFixture(unittest.WithRole(flow.RoleConsensus), unittest.WithEjected(true)) + zeroWeightConsensusIdentity := unittest.IdentityFixture(unittest.WithRole(flow.RoleConsensus), + unittest.WithInitialWeight(0)) + ejectedConsensusIdentity := unittest.IdentityFixture(unittest.WithRole(flow.RoleConsensus), + unittest.WithParticipationStatus(flow.EpochParticipationStatusEjected)) validNonConsensusIdentity := unittest.IdentityFixture(unittest.WithRole(flow.RoleVerification)) epoch1Identities := flow.IdentityList{realIdentity, zeroWeightConsensusIdentity, ejectedConsensusIdentity, validNonConsensusIdentity} @@ -337,10 +408,10 @@ func (suite *ConsensusSuite) TestIdentitiesByEpoch() { epoch2Identities := flow.IdentityList{epoch2Identity} // create a mock epoch for leader selection setup in constructor - epoch1 := newMockEpoch(suite.currentEpochCounter, epoch1Identities, 1, 100, unittest.SeedFixture(seed.RandomSourceLength), true) + epoch1 := newMockCommittedEpoch(suite.currentEpochCounter, epoch1Identities, 1, 100) // initially epoch 2 is not committed - epoch2 := newMockEpoch(suite.currentEpochCounter+1, epoch2Identities, 101, 200, unittest.SeedFixture(seed.RandomSourceLength), true) - suite.epochs.Add(epoch1) + epoch2 := newMockCommittedEpoch(suite.currentEpochCounter+1, epoch2Identities, 101, 200) + suite.epochs.AddCommitted(epoch1) suite.CreateAndStartCommittee() @@ -356,11 +427,6 @@ func (suite *ConsensusSuite) TestIdentitiesByEpoch() { require.True(t, model.IsInvalidSignerError(err)) }) - t.Run("ejected consensus node", func(t *testing.T) { - _, err := suite.committee.IdentityByEpoch(unittest.Uint64InRange(1, 100), ejectedConsensusIdentity.NodeID) - require.True(t, model.IsInvalidSignerError(err)) - }) - t.Run("otherwise valid non-consensus node", func(t *testing.T) { _, err := suite.committee.IdentityByEpoch(unittest.Uint64InRange(1, 100), validNonConsensusIdentity.NodeID) require.True(t, model.IsInvalidSignerError(err)) @@ -370,13 +436,12 @@ func (suite *ConsensusSuite) TestIdentitiesByEpoch() { t.Run("should be able to retrieve real identity", func(t *testing.T) { actual, err := suite.committee.IdentityByEpoch(unittest.Uint64InRange(1, 100), realIdentity.NodeID) require.NoError(t, err) - require.Equal(t, realIdentity, actual) + require.Equal(t, realIdentity.IdentitySkeleton, *actual) }) t.Run("should return ErrViewForUnknownEpoch for view outside existing epoch", func(t *testing.T) { _, err := suite.committee.IdentityByEpoch(unittest.Uint64InRange(101, 1_000_000), epoch2Identity.NodeID) - require.Error(t, err) - require.True(t, errors.Is(err, model.ErrViewForUnknownEpoch)) + require.ErrorIs(t, err, model.ErrViewForUnknownEpoch) }) }) @@ -387,7 +452,7 @@ func (suite *ConsensusSuite) TestIdentitiesByEpoch() { t.Run("should be able to retrieve epoch 1 identity in epoch 1", func(t *testing.T) { actual, err := suite.committee.IdentityByEpoch(unittest.Uint64InRange(1, 100), realIdentity.NodeID) require.NoError(t, err) - require.Equal(t, realIdentity, actual) + require.Equal(t, realIdentity.IdentitySkeleton, *actual) }) t.Run("should be unable to retrieve epoch 1 identity in epoch 2", func(t *testing.T) { @@ -405,13 +470,12 @@ func (suite *ConsensusSuite) TestIdentitiesByEpoch() { t.Run("should be able to retrieve epoch 2 identity in epoch 2", func(t *testing.T) { actual, err := suite.committee.IdentityByEpoch(unittest.Uint64InRange(101, 200), epoch2Identity.NodeID) require.NoError(t, err) - require.Equal(t, epoch2Identity, actual) + require.Equal(t, epoch2Identity.IdentitySkeleton, *actual) }) t.Run("should return ErrViewForUnknownEpoch for view outside existing epochs", func(t *testing.T) { _, err := suite.committee.IdentityByEpoch(unittest.Uint64InRange(201, 1_000_000), epoch2Identity.NodeID) - require.Error(t, err) - require.True(t, errors.Is(err, model.ErrViewForUnknownEpoch)) + require.ErrorIs(t, err, model.ErrViewForUnknownEpoch) }) }) @@ -428,83 +492,79 @@ func (suite *ConsensusSuite) TestThresholds() { identities := unittest.IdentityListFixture(10) - prevEpoch := newMockEpoch(suite.currentEpochCounter-1, identities.Map(mapfunc.WithWeight(100)), 1, 100, unittest.SeedFixture(seed.RandomSourceLength), true) - currEpoch := newMockEpoch(suite.currentEpochCounter, identities.Map(mapfunc.WithWeight(200)), 101, 200, unittest.SeedFixture(32), true) - suite.epochs.Add(prevEpoch) - suite.epochs.Add(currEpoch) + prevEpoch := newMockCommittedEpoch(suite.currentEpochCounter-1, identities.Map(mapfunc.WithInitialWeight(100)), 1, 100) + currEpoch := newMockCommittedEpoch(suite.currentEpochCounter, identities.Map(mapfunc.WithInitialWeight(200)), 101, 200) + suite.epochs.AddCommitted(prevEpoch) + suite.epochs.AddCommitted(currEpoch) suite.CreateAndStartCommittee() t.Run("next epoch not ready", func(t *testing.T) { t.Run("previous epoch", func(t *testing.T) { threshold, err := suite.committee.QuorumThresholdForView(unittest.Uint64InRange(1, 100)) - require.Nil(t, err) + require.NoError(t, err) assert.Equal(t, WeightThresholdToBuildQC(1000), threshold) threshold, err = suite.committee.TimeoutThresholdForView(unittest.Uint64InRange(1, 100)) - require.Nil(t, err) + require.NoError(t, err) assert.Equal(t, WeightThresholdToTimeout(1000), threshold) }) t.Run("current epoch", func(t *testing.T) { threshold, err := suite.committee.QuorumThresholdForView(unittest.Uint64InRange(101, 200)) - require.Nil(t, err) + require.NoError(t, err) assert.Equal(t, WeightThresholdToBuildQC(2000), threshold) threshold, err = suite.committee.TimeoutThresholdForView(unittest.Uint64InRange(101, 200)) - require.Nil(t, err) + require.NoError(t, err) assert.Equal(t, WeightThresholdToTimeout(2000), threshold) }) t.Run("after current epoch - should return ErrViewForUnknownEpoch", func(t *testing.T) { // get threshold for view in next epoch when it is not set up yet _, err := suite.committee.QuorumThresholdForView(unittest.Uint64InRange(201, 300)) - assert.Error(t, err) - assert.True(t, errors.Is(err, model.ErrViewForUnknownEpoch)) + assert.ErrorIs(t, err, model.ErrViewForUnknownEpoch) _, err = suite.committee.TimeoutThresholdForView(unittest.Uint64InRange(201, 300)) - assert.Error(t, err) - assert.True(t, errors.Is(err, model.ErrViewForUnknownEpoch)) + assert.ErrorIs(t, err, model.ErrViewForUnknownEpoch) }) }) // now, add a valid next epoch - nextEpoch := newMockEpoch(suite.currentEpochCounter+1, identities.Map(mapfunc.WithWeight(300)), 201, 300, unittest.SeedFixture(seed.RandomSourceLength), true) + nextEpoch := newMockCommittedEpoch(suite.currentEpochCounter+1, identities.Map(mapfunc.WithInitialWeight(300)), 201, 300) suite.CommitEpoch(nextEpoch) t.Run("next epoch ready", func(t *testing.T) { t.Run("previous epoch", func(t *testing.T) { threshold, err := suite.committee.QuorumThresholdForView(unittest.Uint64InRange(1, 100)) - require.Nil(t, err) + require.NoError(t, err) assert.Equal(t, WeightThresholdToBuildQC(1000), threshold) threshold, err = suite.committee.TimeoutThresholdForView(unittest.Uint64InRange(1, 100)) - require.Nil(t, err) + require.NoError(t, err) assert.Equal(t, WeightThresholdToTimeout(1000), threshold) }) t.Run("current epoch", func(t *testing.T) { threshold, err := suite.committee.QuorumThresholdForView(unittest.Uint64InRange(101, 200)) - require.Nil(t, err) + require.NoError(t, err) assert.Equal(t, WeightThresholdToBuildQC(2000), threshold) threshold, err = suite.committee.TimeoutThresholdForView(unittest.Uint64InRange(101, 200)) - require.Nil(t, err) + require.NoError(t, err) assert.Equal(t, WeightThresholdToTimeout(2000), threshold) }) t.Run("next epoch", func(t *testing.T) { threshold, err := suite.committee.QuorumThresholdForView(unittest.Uint64InRange(201, 300)) - require.Nil(t, err) + require.NoError(t, err) assert.Equal(t, WeightThresholdToBuildQC(3000), threshold) threshold, err = suite.committee.TimeoutThresholdForView(unittest.Uint64InRange(201, 300)) - require.Nil(t, err) + require.NoError(t, err) assert.Equal(t, WeightThresholdToTimeout(3000), threshold) }) t.Run("beyond known epochs", func(t *testing.T) { // get threshold for view in next epoch when it is not set up yet _, err := suite.committee.QuorumThresholdForView(unittest.Uint64InRange(301, 10_000)) - assert.Error(t, err) - assert.True(t, errors.Is(err, model.ErrViewForUnknownEpoch)) + assert.ErrorIs(t, err, model.ErrViewForUnknownEpoch) _, err = suite.committee.TimeoutThresholdForView(unittest.Uint64InRange(301, 10_000)) - assert.Error(t, err) - assert.True(t, errors.Is(err, model.ErrViewForUnknownEpoch)) + assert.ErrorIs(t, err, model.ErrViewForUnknownEpoch) }) }) } @@ -517,10 +577,10 @@ func (suite *ConsensusSuite) TestLeaderForView() { identities := unittest.IdentityListFixture(10) - prevEpoch := newMockEpoch(suite.currentEpochCounter-1, identities, 1, 100, unittest.SeedFixture(seed.RandomSourceLength), true) - currEpoch := newMockEpoch(suite.currentEpochCounter, identities, 101, 200, unittest.SeedFixture(32), true) - suite.epochs.Add(currEpoch) - suite.epochs.Add(prevEpoch) + prevEpoch := newMockCommittedEpoch(suite.currentEpochCounter-1, identities, 1, 100) + currEpoch := newMockCommittedEpoch(suite.currentEpochCounter, identities, 101, 200) + suite.epochs.AddCommitted(currEpoch) + suite.epochs.AddCommitted(prevEpoch) suite.CreateAndStartCommittee() @@ -544,20 +604,19 @@ func (suite *ConsensusSuite) TestLeaderForView() { t.Run("after current epoch - should return ErrViewForUnknownEpoch", func(t *testing.T) { // get leader for view in next epoch when it is not set up yet _, err := suite.committee.LeaderForView(unittest.Uint64InRange(201, 300)) - assert.Error(t, err) - assert.True(t, errors.Is(err, model.ErrViewForUnknownEpoch)) + assert.ErrorIs(t, err, model.ErrViewForUnknownEpoch) }) }) // now, add a valid next epoch - nextEpoch := newMockEpoch(suite.currentEpochCounter+1, identities, 201, 300, unittest.SeedFixture(seed.RandomSourceLength), true) + nextEpoch := newMockCommittedEpoch(suite.currentEpochCounter+1, identities, 201, 300) suite.CommitEpoch(nextEpoch) t.Run("next epoch ready", func(t *testing.T) { t.Run("previous epoch", func(t *testing.T) { // get leader for view in previous epoch leaderID, err := suite.committee.LeaderForView(unittest.Uint64InRange(1, 100)) - require.Nil(t, err) + require.NoError(t, err) _, exists := identities.ByNodeID(leaderID) assert.True(t, exists) }) @@ -565,7 +624,7 @@ func (suite *ConsensusSuite) TestLeaderForView() { t.Run("current epoch", func(t *testing.T) { // get leader for view in current epoch leaderID, err := suite.committee.LeaderForView(unittest.Uint64InRange(101, 200)) - require.Nil(t, err) + require.NoError(t, err) _, exists := identities.ByNodeID(leaderID) assert.True(t, exists) }) @@ -573,15 +632,14 @@ func (suite *ConsensusSuite) TestLeaderForView() { t.Run("next epoch", func(t *testing.T) { // get leader for view in next epoch after it has been set up leaderID, err := suite.committee.LeaderForView(unittest.Uint64InRange(201, 300)) - require.Nil(t, err) + require.NoError(t, err) _, exists := identities.ByNodeID(leaderID) assert.True(t, exists) }) t.Run("beyond known epochs", func(t *testing.T) { _, err := suite.committee.LeaderForView(unittest.Uint64InRange(301, 1_000_000)) - assert.Error(t, err) - assert.True(t, errors.Is(err, model.ErrViewForUnknownEpoch)) + assert.ErrorIs(t, err, model.ErrViewForUnknownEpoch) }) }) } @@ -597,26 +655,23 @@ func TestRemoveOldEpochs(t *testing.T) { currentEpochCounter := firstEpochCounter epochFinalView := uint64(100) - epoch1 := newMockEpoch(currentEpochCounter, identities, 1, epochFinalView, unittest.SeedFixture(seed.RandomSourceLength), true) + epoch1 := newMockCommittedEpoch(currentEpochCounter, identities, 1, epochFinalView) // create mocks - state := new(protocolmock.State) - snapshot := new(protocolmock.Snapshot) - params := new(protocolmock.Params) + state := protocolmock.NewState(t) + snapshot := protocolmock.NewSnapshot(t) state.On("Final").Return(snapshot) - state.On("Params").Return(params) - params.On("EpochFallbackTriggered").Return(false, nil) epochQuery := mocks.NewEpochQuery(t, currentEpochCounter, epoch1) snapshot.On("Epochs").Return(epochQuery) currentEpochPhase := flow.EpochPhaseStaking - snapshot.On("Phase").Return( + snapshot.On("EpochPhase").Return( func() flow.EpochPhase { return currentEpochPhase }, func() error { return nil }, - ) + ).Maybe() com, err := NewConsensusCommittee(state, me) - require.Nil(t, err) + require.NoError(t, err) ctx, cancel, errCh := irrecoverable.WithSignallerAndCancel(context.Background()) com.Start(ctx) @@ -634,12 +689,12 @@ func TestRemoveOldEpochs(t *testing.T) { firstView := epochFinalView + 1 epochFinalView = epochFinalView + 100 currentEpochCounter++ - nextEpoch := newMockEpoch(currentEpochCounter, identities, firstView, epochFinalView, unittest.SeedFixture(seed.RandomSourceLength), true) - epochQuery.Add(nextEpoch) + nextEpoch := newMockCommittedEpoch(currentEpochCounter, identities, firstView, epochFinalView) + epochQuery.AddCommitted(nextEpoch) currentEpochPhase = flow.EpochPhaseCommitted firstBlockOfCommittedPhase := unittest.BlockHeaderFixture() - state.On("AtBlockID", firstBlockOfCommittedPhase.ID()).Return(snapshot) + state.On("AtHeight", firstBlockOfCommittedPhase.Height).Return(snapshot) com.EpochCommittedPhaseStarted(currentEpochCounter, firstBlockOfCommittedPhase) // wait for the protocol event to be processed (async) require.Eventually(t, func() bool { @@ -674,21 +729,29 @@ func TestRemoveOldEpochs(t *testing.T) { } } -// newMockEpoch returns a new mocked epoch with the given fields -func newMockEpoch(counter uint64, identities flow.IdentityList, firstView uint64, finalView uint64, seed []byte, committed bool) *protocolmock.Epoch { - - epoch := new(protocolmock.Epoch) - epoch.On("Counter").Return(counter, nil) - epoch.On("InitialIdentities").Return(identities, nil) - epoch.On("FirstView").Return(firstView, nil) - epoch.On("FinalView").Return(finalView, nil) - if committed { - // return nil error to indicate the epoch is committed - epoch.On("DKG").Return(nil, nil) - } else { - epoch.On("DKG").Return(nil, protocol.ErrNextEpochNotCommitted) - } +// addExtension adds the extension to the mocked epoch, by updating its final view. +func addExtension(epoch *protocolmock.CommittedEpoch, ext flow.EpochExtension) { + epoch.On("FinalView").Unset() + epoch.On("FinalView").Return(ext.FinalView) +} + +// newMockCommittedEpoch returns a new mocked committed epoch with the given fields +func newMockCommittedEpoch(counter uint64, identities flow.IdentityList, firstView uint64, finalView uint64) *protocolmock.CommittedEpoch { + epoch := new(protocolmock.CommittedEpoch) + epoch.On("Counter").Return(counter) + epoch.On("RandomSource").Return(unittest.RandomBytes(32)) + epoch.On("InitialIdentities").Return(identities.ToSkeleton()) + epoch.On("FirstView").Return(firstView) + epoch.On("FinalView").Return(finalView) + epoch.On("DKG").Return(nil, nil) + + return epoch +} - epoch.On("RandomSource").Return(seed, nil) +// newMockTentativeEpoch returns a new mocked tentative epoch with the given fields +func newMockTentativeEpoch(counter uint64, identities flow.IdentityList) *protocolmock.TentativeEpoch { + epoch := new(protocolmock.TentativeEpoch) + epoch.On("Counter").Return(counter) + epoch.On("InitialIdentities").Return(identities.ToSkeleton()) return epoch } diff --git a/consensus/hotstuff/committees/leader/cluster.go b/consensus/hotstuff/committees/leader/cluster.go index 2de6899d8d4..177cc18dea6 100644 --- a/consensus/hotstuff/committees/leader/cluster.go +++ b/consensus/hotstuff/committees/leader/cluster.go @@ -4,43 +4,37 @@ import ( "fmt" "github.com/onflow/flow-go/state/protocol" - "github.com/onflow/flow-go/state/protocol/seed" + "github.com/onflow/flow-go/state/protocol/prg" ) // SelectionForCluster pre-computes and returns leaders for the given cluster -// committee in the given epoch. -func SelectionForCluster(cluster protocol.Cluster, epoch protocol.Epoch) (*LeaderSelection, error) { - +// committee in the given epoch. A cluster containing nodes with zero `InitialWeight` +// is an accepted input as long as there are nodes with positive weights. Zero-weight nodes +// have zero probability of being selected as leaders in accordance with their weight. +func SelectionForCluster(cluster protocol.Cluster, epoch protocol.CommittedEpoch) (*LeaderSelection, error) { // sanity check to ensure the cluster and epoch match - counter, err := epoch.Counter() - if err != nil { - return nil, fmt.Errorf("could not get epoch counter: %w", err) - } + counter := epoch.Counter() if counter != cluster.EpochCounter() { return nil, fmt.Errorf("inconsistent counter between epoch (%d) and cluster (%d)", counter, cluster.EpochCounter()) } - identities := cluster.Members() // get the random source of the current epoch - randomSeed, err := epoch.RandomSource() - if err != nil { - return nil, fmt.Errorf("could not get leader selection seed for cluster (index: %v) at epoch: %v: %w", cluster.Index(), counter, err) - } + randomSeed := epoch.RandomSource() // create random number generator from the seed and customizer - rng, err := seed.PRGFromRandomSource(randomSeed, seed.ProtocolCollectorClusterLeaderSelection(cluster.Index())) + rng, err := prg.New(randomSeed, prg.CollectorClusterLeaderSelection(cluster.Index()), nil) if err != nil { return nil, fmt.Errorf("could not create rng: %w", err) } - firstView := cluster.RootBlock().Header.View - // TODO what is a good value here? - finalView := firstView + EstimatedSixMonthOfViews + firstView := cluster.RootBlock().View + finalView := firstView + EstimatedSixMonthOfViews // TODO what is a good value here? + // ComputeLeaderSelection already handles zero-weight nodes with marginal overhead. leaders, err := ComputeLeaderSelection( firstView, rng, int(finalView-firstView+1), - identities, + cluster.Members().ToSkeleton(), ) return leaders, err } diff --git a/consensus/hotstuff/committees/leader/consensus.go b/consensus/hotstuff/committees/leader/consensus.go index c9ea12eeece..368682d262e 100644 --- a/consensus/hotstuff/committees/leader/consensus.go +++ b/consensus/hotstuff/committees/leader/consensus.go @@ -3,47 +3,37 @@ package leader import ( "fmt" + "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/model/flow/filter" "github.com/onflow/flow-go/state/protocol" - "github.com/onflow/flow-go/state/protocol/seed" + "github.com/onflow/flow-go/state/protocol/prg" ) +// SelectionForConsensusFromEpoch returns the leader selection for the input epoch. +// See [SelectionForConsensus] for additional details. +func SelectionForConsensusFromEpoch(epoch protocol.CommittedEpoch) (*LeaderSelection, error) { + return SelectionForConsensus( + epoch.InitialIdentities(), + epoch.RandomSource(), + epoch.FirstView(), + epoch.FinalView(), + ) +} + // SelectionForConsensus pre-computes and returns leaders for the consensus committee // in the given epoch. The consensus committee spans multiple epochs and the leader // selection returned here is only valid for the input epoch, so it is necessary to // call this for each upcoming epoch. -func SelectionForConsensus(epoch protocol.Epoch) (*LeaderSelection, error) { - - // pre-compute leader selection for the epoch - identities, err := epoch.InitialIdentities() - if err != nil { - return nil, fmt.Errorf("could not get epoch initial identities: %w", err) - } - - // get the epoch source of randomness - randomSeed, err := epoch.RandomSource() - if err != nil { - return nil, fmt.Errorf("could not get epoch seed: %w", err) - } - // create random number generator from the seed and customizer - rng, err := seed.PRGFromRandomSource(randomSeed, seed.ProtocolConsensusLeaderSelection) +func SelectionForConsensus(initialIdentities flow.IdentitySkeletonList, randomSeed []byte, firstView, finalView uint64) (*LeaderSelection, error) { + rng, err := prg.New(randomSeed, prg.ConsensusLeaderSelection, nil) if err != nil { return nil, fmt.Errorf("could not create rng: %w", err) } - firstView, err := epoch.FirstView() - if err != nil { - return nil, fmt.Errorf("could not get epoch first view: %w", err) - } - finalView, err := epoch.FinalView() - if err != nil { - return nil, fmt.Errorf("could not get epoch final view: %w", err) - } - leaders, err := ComputeLeaderSelection( firstView, rng, int(finalView-firstView+1), // add 1 because both first/final view are inclusive - identities.Filter(filter.IsVotingConsensusCommitteeMember), + initialIdentities.Filter(filter.IsConsensusCommitteeMember), ) return leaders, err } diff --git a/consensus/hotstuff/committees/leader/leader_selection.go b/consensus/hotstuff/committees/leader/leader_selection.go index bc1936cc197..e820d1f617a 100644 --- a/consensus/hotstuff/committees/leader/leader_selection.go +++ b/consensus/hotstuff/committees/leader/leader_selection.go @@ -5,7 +5,8 @@ import ( "fmt" "math" - "github.com/onflow/flow-go/crypto/random" + "github.com/onflow/crypto/random" + "github.com/onflow/flow-go/model/flow" ) @@ -84,25 +85,28 @@ func (l LeaderSelection) newInvalidViewError(view uint64) InvalidViewError { // ComputeLeaderSelection pre-generates a certain number of leader selections, and returns a // leader selection instance for querying the leader indexes for certain views. -// firstView - the start view of the epoch, the generated leader selections start from this view. -// rng - the deterministic source of randoms -// count - the number of leader selections to be pre-generated and cached. -// identities - the identities that contain the weight info, which is used as probability for -// the identity to be selected as leader. +// Inputs: +// - firstView: the start view of the epoch, the generated leader selections start from this view. +// - rng: the deterministic source of randoms +// - count: the number of leader selections to be pre-generated and cached. +// - identities the identities that contain the weight info, which is used as probability for +// the identity to be selected as leader. +// +// Identities with `InitialWeight=0` are accepted as long as there are nodes with positive weights. +// Zero-weight nodes have zero probability of being selected as leaders in accordance with their weight. func ComputeLeaderSelection( firstView uint64, rng random.Rand, count int, - identities flow.IdentityList, + identities flow.IdentitySkeletonList, ) (*LeaderSelection, error) { - if count < 1 { return nil, fmt.Errorf("number of views must be positive (got %d)", count) } weights := make([]uint64, 0, len(identities)) for _, id := range identities { - weights = append(weights, id.Weight) + weights = append(weights, id.InitialWeight) } leaders, err := weightedRandomSelection(rng, count, weights) @@ -117,7 +121,7 @@ func ComputeLeaderSelection( }, nil } -// weightedRandomSelection - given a random source source and a given count, pre-generate the indices of leader. +// weightedRandomSelection - given a random source and a given count, pre-generate the indices of leader. // The chance to be selected as leader is proportional to its weight. // If an identity has 0 weight, it won't be selected as leader. // This algorithm is essentially Fitness proportionate selection: @@ -127,11 +131,9 @@ func weightedRandomSelection( count int, weights []uint64, ) ([]uint16, error) { - if len(weights) == 0 { return nil, fmt.Errorf("weights is empty") } - if len(weights) >= math.MaxUint16 { return nil, fmt.Errorf("number of possible leaders (%d) exceeds maximum (2^16-1)", len(weights)) } @@ -148,7 +150,6 @@ func weightedRandomSelection( cumsum += weight weightSums = append(weightSums, cumsum) } - if cumsum == 0 { return nil, fmt.Errorf("total weight must be greater than 0") } @@ -166,12 +167,12 @@ func weightedRandomSelection( return leaders, nil } -// binarySearchStriclyBigger finds the index of the first item in the given array that is +// binarySearchStrictlyBigger finds the index of the first item in the given array that is // strictly bigger to the given value. // There are a few assumptions on inputs: -// - `arr` must be non-empty -// - items in `arr` must be in non-decreasing order -// - `value` must be less than the last item in `arr` +// - `arr` must be non-empty +// - items in `arr` must be in monotonically increasing order (for indices i,j with i<j it must hold that arr[i] ≤ arr[j]) +// - `value` must be less than the last item in `arr` func binarySearchStrictlyBigger(value uint64, arr []uint64) int { left := 0 arrayLen := len(arr) diff --git a/consensus/hotstuff/committees/leader/leader_selection_test.go b/consensus/hotstuff/committees/leader/leader_selection_test.go index 7d580c76a6a..c391ea756a6 100644 --- a/consensus/hotstuff/committees/leader/leader_selection_test.go +++ b/consensus/hotstuff/committees/leader/leader_selection_test.go @@ -6,10 +6,10 @@ import ( "sort" "testing" + "github.com/onflow/crypto/random" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "github.com/onflow/flow-go/crypto/random" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/model/flow/filter" "github.com/onflow/flow-go/utils/unittest" @@ -23,9 +23,9 @@ var someSeed = []uint8{0x6A, 0x23, 0x41, 0xB7, 0x80, 0xE1, 0x64, 0x59, // We test that leader selection works for a committee of size one func TestSingleConsensusNode(t *testing.T) { - identity := unittest.IdentityFixture(unittest.WithWeight(8)) - rng := prg(t, someSeed) - selection, err := ComputeLeaderSelection(0, rng, 10, []*flow.Identity{identity}) + identity := unittest.IdentityFixture(unittest.WithInitialWeight(8)) + rng := getPRG(t, someSeed) + selection, err := ComputeLeaderSelection(0, rng, 10, flow.IdentitySkeletonList{&identity.IdentitySkeleton}) require.NoError(t, err) for i := uint64(0); i < 10; i++ { leaderID, err := selection.LeaderForView(i) @@ -114,7 +114,7 @@ func bruteSearch(value uint64, arr []uint64) (int, error) { return 0, fmt.Errorf("not found") } -func prg(t testing.TB, seed []byte) random.Rand { +func getPRG(t testing.TB, seed []byte) random.Rand { rng, err := random.NewChacha20PRG(seed, []byte("random")) require.NoError(t, err) return rng @@ -126,16 +126,16 @@ func TestDeterministic(t *testing.T) { const N_VIEWS = 100 const N_NODES = 4 - identities := unittest.IdentityListFixture(N_NODES) + identities := unittest.IdentityListFixture(N_NODES).ToSkeleton() for i, identity := range identities { - identity.Weight = uint64(i + 1) + identity.InitialWeight = uint64(i + 1) } - rng := prg(t, someSeed) + rng := getPRG(t, someSeed) leaders1, err := ComputeLeaderSelection(0, rng, N_VIEWS, identities) require.NoError(t, err) - rng = prg(t, someSeed) + rng = getPRG(t, someSeed) leaders2, err := ComputeLeaderSelection(0, rng, N_VIEWS, identities) require.NoError(t, err) @@ -153,21 +153,21 @@ func TestDeterministic(t *testing.T) { func TestInputValidation(t *testing.T) { - rng := prg(t, someSeed) + rng := getPRG(t, someSeed) // should return an error if we request to compute leader selection for <1 views t.Run("epoch containing no views", func(t *testing.T) { count := 0 - _, err := ComputeLeaderSelection(0, rng, count, unittest.IdentityListFixture(4)) + _, err := ComputeLeaderSelection(0, rng, count, unittest.IdentityListFixture(4).ToSkeleton()) assert.Error(t, err) count = -1 - _, err = ComputeLeaderSelection(0, rng, count, unittest.IdentityListFixture(4)) + _, err = ComputeLeaderSelection(0, rng, count, unittest.IdentityListFixture(4).ToSkeleton()) assert.Error(t, err) }) // epoch with no possible leaders should return an error t.Run("epoch without participants", func(t *testing.T) { - identities := unittest.IdentityListFixture(0) + identities := unittest.IdentityListFixture(0).ToSkeleton() _, err := ComputeLeaderSelection(0, rng, 100, identities) assert.Error(t, err) }) @@ -176,14 +176,14 @@ func TestInputValidation(t *testing.T) { // test that requesting a view outside the given range returns an error func TestViewOutOfRange(t *testing.T) { - rng := prg(t, someSeed) + rng := getPRG(t, someSeed) firstView := uint64(100) finalView := uint64(200) - identities := unittest.IdentityListFixture(4) + identities := unittest.IdentityListFixture(4).ToSkeleton() leaders, err := ComputeLeaderSelection(firstView, rng, int(finalView-firstView+1), identities) - require.Nil(t, err) + require.NoError(t, err) // confirm the selection has first/final view we expect assert.Equal(t, firstView, leaders.FirstView()) @@ -192,9 +192,9 @@ func TestViewOutOfRange(t *testing.T) { // boundary views should not return error t.Run("boundary views", func(t *testing.T) { _, err = leaders.LeaderForView(firstView) - assert.Nil(t, err) + assert.NoError(t, err) _, err = leaders.LeaderForView(finalView) - assert.Nil(t, err) + assert.NoError(t, err) }) // views before first view should return error @@ -203,7 +203,7 @@ func TestViewOutOfRange(t *testing.T) { _, err = leaders.LeaderForView(before) assert.Error(t, err) - before = rand.Uint64() % firstView // random view before first view + before = uint64(rand.Intn(int(firstView))) // random view before first view _, err = leaders.LeaderForView(before) assert.Error(t, err) }) @@ -227,19 +227,19 @@ func TestDifferentSeedWillProduceDifferentSelection(t *testing.T) { identities := unittest.IdentityListFixture(N_NODES) for i, identity := range identities { - identity.Weight = uint64(i) + identity.InitialWeight = uint64(i) } - rng1 := prg(t, someSeed) + rng1 := getPRG(t, someSeed) seed2 := make([]byte, 32) seed2[0] = 8 - rng2 := prg(t, seed2) + rng2 := getPRG(t, seed2) - leaders1, err := ComputeLeaderSelection(0, rng1, N_VIEWS, identities) + leaders1, err := ComputeLeaderSelection(0, rng1, N_VIEWS, identities.ToSkeleton()) require.NoError(t, err) - leaders2, err := ComputeLeaderSelection(0, rng2, N_VIEWS, identities) + leaders2, err := ComputeLeaderSelection(0, rng2, N_VIEWS, identities.ToSkeleton()) require.NoError(t, err) diff := 0 @@ -262,14 +262,14 @@ func TestDifferentSeedWillProduceDifferentSelection(t *testing.T) { // The number of time being selected as leader might not exactly match their weight, but also // won't go too far from that. func TestLeaderSelectionAreWeighted(t *testing.T) { - rng := prg(t, someSeed) + rng := getPRG(t, someSeed) const N_VIEWS = 100000 const N_NODES = 4 - identities := unittest.IdentityListFixture(N_NODES) + identities := unittest.IdentityListFixture(N_NODES).ToSkeleton() for i, identity := range identities { - identity.Weight = uint64(i + 1) + identity.InitialWeight = uint64(i + 1) } leaders, err := ComputeLeaderSelection(0, rng, N_VIEWS, identities) @@ -287,7 +287,7 @@ func TestLeaderSelectionAreWeighted(t *testing.T) { for nodeID, selectedCount := range selected { identity, ok := identities.ByNodeID(nodeID) require.True(t, ok) - target := uint64(N_VIEWS) * identity.Weight / 10 + target := uint64(N_VIEWS) * identity.InitialWeight / 10 var diff uint64 if selectedCount > target { @@ -304,44 +304,45 @@ func TestLeaderSelectionAreWeighted(t *testing.T) { func BenchmarkLeaderSelection(b *testing.B) { - const N_VIEWS = 15000000 + const N_VIEWS = EstimatedSixMonthOfViews const N_NODES = 20 - identities := make([]*flow.Identity, 0, N_NODES) + identities := make(flow.IdentityList, 0, N_NODES) for i := 0; i < N_NODES; i++ { - identities = append(identities, unittest.IdentityFixture(unittest.WithWeight(uint64(i)))) + identities = append(identities, unittest.IdentityFixture(unittest.WithInitialWeight(uint64(i)))) } - rng := prg(b, someSeed) + skeletonIdentities := identities.ToSkeleton() + rng := getPRG(b, someSeed) for n := 0; n < b.N; n++ { - _, err := ComputeLeaderSelection(0, rng, N_VIEWS, identities) + _, err := ComputeLeaderSelection(0, rng, N_VIEWS, skeletonIdentities) require.NoError(b, err) } } func TestInvalidTotalWeight(t *testing.T) { - rng := prg(t, someSeed) - identities := unittest.IdentityListFixture(4, unittest.WithWeight(0)) - _, err := ComputeLeaderSelection(0, rng, 10, identities) + rng := getPRG(t, someSeed) + identities := unittest.IdentityListFixture(4, unittest.WithInitialWeight(0)) + _, err := ComputeLeaderSelection(0, rng, 10, identities.ToSkeleton()) require.Error(t, err) } func TestZeroWeightNodeWillNotBeSelected(t *testing.T) { // create 2 RNGs from the same seed - rng := prg(t, someSeed) - rng_copy := prg(t, someSeed) + rng := getPRG(t, someSeed) + rng_copy := getPRG(t, someSeed) // check that if there is some node with 0 weight, the selections for each view should be the same as // with no zero-weight nodes. t.Run("small dataset", func(t *testing.T) { const N_VIEWS = 100 - weightless := unittest.IdentityListFixture(5, unittest.WithWeight(0)) - weightful := unittest.IdentityListFixture(5) + weightless := unittest.IdentityListFixture(5, unittest.WithInitialWeight(0)).ToSkeleton() + weightful := unittest.IdentityListFixture(5).ToSkeleton() for i, identity := range weightful { - identity.Weight = uint64(i + 1) + identity.InitialWeight = uint64(i + 1) } identities := append(weightless, weightful...) @@ -365,26 +366,27 @@ func TestZeroWeightNodeWillNotBeSelected(t *testing.T) { }) t.Run("fuzzy set", func(t *testing.T) { - toolRng := prg(t, someSeed) + toolRng := getPRG(t, someSeed) // create 1002 nodes with all 0 weight - identities := unittest.IdentityListFixture(1002, unittest.WithWeight(0)) + fullIdentities := unittest.IdentityListFixture(1002, unittest.WithInitialWeight(0)) // create 2 nodes with 1 weight, and place them in between // index 233-777 n := toolRng.UintN(777-233) + 233 m := toolRng.UintN(777-233) + 233 - identities[n].Weight = 1 - identities[m].Weight = 1 + fullIdentities[n].InitialWeight = 1 + fullIdentities[m].InitialWeight = 1 - // the following code check the zero weight node should not be selected - weightful := identities.Filter(filter.HasWeight(true)) + // the following code checks that zero-weight nodes are not selected (selection probability is proportional to weight) + votingConsensusNodes := fullIdentities.Filter(filter.HasInitialWeight[flow.Identity](true)).ToSkeleton() + allEpochConsensusNodes := fullIdentities.ToSkeleton() // including zero-weight nodes count := 1000 - selectionFromAll, err := ComputeLeaderSelection(0, rng, count, identities) + selectionFromAll, err := ComputeLeaderSelection(0, rng, count, allEpochConsensusNodes) require.NoError(t, err) - selectionFromWeightful, err := ComputeLeaderSelection(0, rng_copy, count, weightful) + selectionFromWeightful, err := ComputeLeaderSelection(0, rng_copy, count, votingConsensusNodes) require.NoError(t, err) for i := 0; i < count; i++ { @@ -399,13 +401,13 @@ func TestZeroWeightNodeWillNotBeSelected(t *testing.T) { } t.Run("if there is only 1 node has weight, then it will be always be the leader and the only leader", func(t *testing.T) { - toolRng := prg(t, someSeed) + toolRng := getPRG(t, someSeed) - identities := unittest.IdentityListFixture(1000, unittest.WithWeight(0)) + identities := unittest.IdentityListFixture(1000, unittest.WithInitialWeight(0)).ToSkeleton() n := rng.UintN(1000) weight := n + 1 - identities[n].Weight = weight + identities[n].InitialWeight = weight onlyNodeWithWeight := identities[n] selections, err := ComputeLeaderSelection(0, toolRng, 1000, identities) diff --git a/consensus/hotstuff/committees/metrics_wrapper.go b/consensus/hotstuff/committees/metrics_wrapper.go index e1bdcbc059a..cbc0d333503 100644 --- a/consensus/hotstuff/committees/metrics_wrapper.go +++ b/consensus/hotstuff/committees/metrics_wrapper.go @@ -1,4 +1,3 @@ -// (c) 2020 Dapper Labs - ALL RIGHTS RESERVED package committees import ( @@ -43,14 +42,14 @@ func (w CommitteeMetricsWrapper) IdentityByBlock(blockID flow.Identifier, partic return identity, err } -func (w CommitteeMetricsWrapper) IdentitiesByEpoch(view uint64) (flow.IdentityList, error) { +func (w CommitteeMetricsWrapper) IdentitiesByEpoch(view uint64) (flow.IdentitySkeletonList, error) { processStart := time.Now() identities, err := w.committee.IdentitiesByEpoch(view) w.metrics.CommitteeProcessingDuration(time.Since(processStart)) return identities, err } -func (w CommitteeMetricsWrapper) IdentityByEpoch(view uint64, participantID flow.Identifier) (*flow.Identity, error) { +func (w CommitteeMetricsWrapper) IdentityByEpoch(view uint64, participantID flow.Identifier) (*flow.IdentitySkeleton, error) { processStart := time.Now() identity, err := w.committee.IdentityByEpoch(view, participantID) w.metrics.CommitteeProcessingDuration(time.Since(processStart)) diff --git a/consensus/hotstuff/committees/static.go b/consensus/hotstuff/committees/static.go index b95c6448dff..8bf0ab6cfe7 100644 --- a/consensus/hotstuff/committees/static.go +++ b/consensus/hotstuff/committees/static.go @@ -3,31 +3,28 @@ package committees import ( "fmt" + "github.com/onflow/crypto" + "github.com/onflow/flow-go/consensus/hotstuff" "github.com/onflow/flow-go/consensus/hotstuff/model" - "github.com/onflow/flow-go/crypto" "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/model/flow/order" "github.com/onflow/flow-go/state/protocol" ) -// NewStaticCommittee returns a new committee with a static participant set. -func NewStaticCommittee(participants flow.IdentityList, myID flow.Identifier, dkgParticipants map[flow.Identifier]flow.DKGParticipant, dkgGroupKey crypto.PublicKey) (*Static, error) { - - return NewStaticCommitteeWithDKG(participants, myID, staticDKG{ +func NewStaticReplicas(participants flow.IdentitySkeletonList, myID flow.Identifier, dkgParticipants map[flow.Identifier]flow.DKGParticipant, dkgGroupKey crypto.PublicKey) (*StaticReplicas, error) { + return NewStaticReplicasWithDKG(participants, myID, staticDKG{ dkgParticipants: dkgParticipants, dkgGroupKey: dkgGroupKey, }) } -// NewStaticCommitteeWithDKG returns a new committee with a static participant set. -func NewStaticCommitteeWithDKG(participants flow.IdentityList, myID flow.Identifier, dkg protocol.DKG) (*Static, error) { - valid := order.IdentityListCanonical(participants) +func NewStaticReplicasWithDKG(participants flow.IdentitySkeletonList, myID flow.Identifier, dkg protocol.DKG) (*StaticReplicas, error) { + valid := flow.IsIdentityListCanonical(participants) if !valid { return nil, fmt.Errorf("participants %v is not in Canonical order", participants) } - static := &Static{ + static := &StaticReplicas{ participants: participants, myID: myID, dkg: dkg, @@ -35,31 +32,41 @@ func NewStaticCommitteeWithDKG(participants flow.IdentityList, myID flow.Identif return static, nil } -// Static represents a committee with a static participant set. It is used for -// bootstrapping purposes. -type Static struct { - participants flow.IdentityList - myID flow.Identifier - dkg protocol.DKG +// NewStaticCommittee returns a new committee with a static participant set. +func NewStaticCommittee(participants flow.IdentityList, myID flow.Identifier, dkgParticipants map[flow.Identifier]flow.DKGParticipant, dkgGroupKey crypto.PublicKey) (*Static, error) { + return NewStaticCommitteeWithDKG(participants, myID, staticDKG{ + dkgParticipants: dkgParticipants, + dkgGroupKey: dkgGroupKey, + }) } -func (s Static) IdentitiesByBlock(_ flow.Identifier) (flow.IdentityList, error) { - return s.participants, nil -} +// NewStaticCommitteeWithDKG returns a new committee with a static participant set. +func NewStaticCommitteeWithDKG(participants flow.IdentityList, myID flow.Identifier, dkg protocol.DKG) (*Static, error) { + replicas, err := NewStaticReplicasWithDKG(participants.ToSkeleton(), myID, dkg) + if err != nil { + return nil, fmt.Errorf("could not create static replicas: %w", err) + } -func (s Static) IdentityByBlock(_ flow.Identifier, participantID flow.Identifier) (*flow.Identity, error) { - identity, ok := s.participants.ByNodeID(participantID) - if !ok { - return nil, model.NewInvalidSignerErrorf("unknown participant %x", participantID) + static := &Static{ + StaticReplicas: *replicas, + fullIdentities: participants, } - return identity, nil + return static, nil +} + +type StaticReplicas struct { + participants flow.IdentitySkeletonList + myID flow.Identifier + dkg protocol.DKG } -func (s Static) IdentitiesByEpoch(_ uint64) (flow.IdentityList, error) { - return s.participants, nil +var _ hotstuff.Replicas = (*StaticReplicas)(nil) + +func (s StaticReplicas) IdentitiesByEpoch(view uint64) (flow.IdentitySkeletonList, error) { + return s.participants.ToSkeleton(), nil } -func (s Static) IdentityByEpoch(_ uint64, participantID flow.Identifier) (*flow.Identity, error) { +func (s StaticReplicas) IdentityByEpoch(view uint64, participantID flow.Identifier) (*flow.IdentitySkeleton, error) { identity, ok := s.participants.ByNodeID(participantID) if !ok { return nil, model.NewInvalidSignerErrorf("unknown participant %x", participantID) @@ -67,26 +74,47 @@ func (s Static) IdentityByEpoch(_ uint64, participantID flow.Identifier) (*flow. return identity, nil } -func (s Static) LeaderForView(_ uint64) (flow.Identifier, error) { +func (s StaticReplicas) LeaderForView(_ uint64) (flow.Identifier, error) { return flow.ZeroID, fmt.Errorf("invalid for static committee") } -func (s Static) QuorumThresholdForView(_ uint64) (uint64, error) { - return WeightThresholdToBuildQC(s.participants.TotalWeight()), nil +func (s StaticReplicas) QuorumThresholdForView(_ uint64) (uint64, error) { + return WeightThresholdToBuildQC(s.participants.ToSkeleton().TotalWeight()), nil } -func (s Static) TimeoutThresholdForView(_ uint64) (uint64, error) { - return WeightThresholdToTimeout(s.participants.TotalWeight()), nil +func (s StaticReplicas) TimeoutThresholdForView(_ uint64) (uint64, error) { + return WeightThresholdToTimeout(s.participants.ToSkeleton().TotalWeight()), nil } -func (s Static) Self() flow.Identifier { +func (s StaticReplicas) Self() flow.Identifier { return s.myID } -func (s Static) DKG(_ uint64) (hotstuff.DKG, error) { +func (s StaticReplicas) DKG(_ uint64) (hotstuff.DKG, error) { return s.dkg, nil } +// Static represents a committee with a static participant set. It is used for +// bootstrapping purposes. +type Static struct { + StaticReplicas + fullIdentities flow.IdentityList +} + +var _ hotstuff.DynamicCommittee = (*Static)(nil) + +func (s Static) IdentitiesByBlock(_ flow.Identifier) (flow.IdentityList, error) { + return s.fullIdentities, nil +} + +func (s Static) IdentityByBlock(_ flow.Identifier, participantID flow.Identifier) (*flow.Identity, error) { + identity, ok := s.fullIdentities.ByNodeID(participantID) + if !ok { + return nil, model.NewInvalidSignerErrorf("unknown participant %x", participantID) + } + return identity, nil +} + type staticDKG struct { dkgParticipants map[flow.Identifier]flow.DKGParticipant dkgGroupKey crypto.PublicKey @@ -119,3 +147,26 @@ func (s staticDKG) KeyShare(nodeID flow.Identifier) (crypto.PublicKey, error) { } return participant.KeyShare, nil } + +// KeyShares returns the public portions of all threshold key shares. Note that there might not +// exist a private key corresponding to each entry (e.g. if the respective node failed the DKG). +func (s staticDKG) KeyShares() []crypto.PublicKey { + participants := make([]crypto.PublicKey, len(s.dkgParticipants)) + for _, participant := range s.dkgParticipants { + participants[participant.Index] = participant.KeyShare + } + return participants +} + +// NodeID returns the node identifier for the given index. +// An exception is returned if the index is ≥ Size(). +// Intended for use outside the hotpath, with runtime +// scaling linearly in the number of DKG participants (ie. Size()) +func (s staticDKG) NodeID(index uint) (flow.Identifier, error) { + for nodeID, participant := range s.dkgParticipants { + if participant.Index == index { + return nodeID, nil + } + } + return flow.ZeroID, fmt.Errorf("index %d not found in DKG", index) +} diff --git a/consensus/hotstuff/consumer.go b/consensus/hotstuff/consumer.go index 0b76027b146..0e5acb7c4fa 100644 --- a/consensus/hotstuff/consumer.go +++ b/consensus/hotstuff/consumer.go @@ -21,7 +21,7 @@ type ProposalViolationConsumer interface { // Prerequisites: // Implementation must be concurrency safe; Non-blocking; // and must handle repetition of the same events (with some processing overhead). - OnInvalidBlockDetected(err model.InvalidProposalError) + OnInvalidBlockDetected(err flow.Slashable[model.InvalidProposalError]) // OnDoubleProposeDetected notifications are produced by the Finalization Logic // whenever a double block proposal (equivocation) was detected. @@ -60,7 +60,7 @@ type VoteAggregationViolationConsumer interface { // Prerequisites: // Implementation must be concurrency safe; Non-blocking; // and must handle repetition of the same events (with some processing overhead). - OnVoteForInvalidBlockDetected(vote *model.Vote, invalidProposal *model.Proposal) + OnVoteForInvalidBlockDetected(vote *model.Vote, invalidProposal *model.SignedProposal) } // TimeoutAggregationViolationConsumer consumes outbound notifications about Active Pacemaker violations specifically @@ -138,7 +138,7 @@ type ParticipantConsumer interface { // Prerequisites: // Implementation must be concurrency safe; Non-blocking; // and must handle repetition of the same events (with some processing overhead). - OnReceiveProposal(currentView uint64, proposal *model.Proposal) + OnReceiveProposal(currentView uint64, proposal *model.SignedProposal) // OnReceiveQc notifications are produced by the EventHandler when it starts processing a // QuorumCertificate [QC] constructed by the node's internal vote aggregator. @@ -307,7 +307,7 @@ type CommunicatorConsumer interface { // Prerequisites: // Implementation must be concurrency safe; Non-blocking; // and must handle repetition of the same events (with some processing overhead). - OnOwnVote(blockID flow.Identifier, view uint64, sigData []byte, recipientID flow.Identifier) + OnOwnVote(vote *model.Vote, recipientID flow.Identifier) // OnOwnTimeout notifies about intent to broadcast the given timeout object(TO) to all actors of the consensus process. // Prerequisites: @@ -321,7 +321,7 @@ type CommunicatorConsumer interface { // Prerequisites: // Implementation must be concurrency safe; Non-blocking; // and must handle repetition of the same events (with some processing overhead). - OnOwnProposal(proposal *flow.Header, targetPublicationTime time.Time) + OnOwnProposal(proposal *flow.ProposalHeader, targetPublicationTime time.Time) } // FollowerConsumer consumes outbound notifications produced by consensus followers. diff --git a/consensus/hotstuff/cruisectl/README.md b/consensus/hotstuff/cruisectl/README.md new file mode 100644 index 00000000000..af0abd45a01 --- /dev/null +++ b/consensus/hotstuff/cruisectl/README.md @@ -0,0 +1,405 @@ +# Cruise Control: Automated Block Time Adjustment for Precise Epoch Switchover Timing + +# Overview + +## Context + +Epochs have a fixed length, measured in views. +The actual view rate of the network varies depending on network conditions, e.g. load, number of offline replicas, etc. +We would like for consensus nodes to observe the actual view rate of the committee, and adjust how quickly they proceed +through views accordingly, to target a desired weekly epoch switchover time. + +## High-Level Design + +The `BlockTimeController` observes the current view rate and adjusts the timing when the proposal should be released. +It is a [PID controller](https://en.wikipedia.org/wiki/PID_controller). The essential idea is to take into account the +current error, the rate of change of the error, and the cumulative error, when determining how much compensation to apply. +The compensation function $u[v]$ has three terms: + +- $P[v]$ compensates proportionally to the magnitude of the instantaneous error +- $I[v]$ compensates proportionally to the magnitude of the error and how long it has persisted +- $D[v]$ compensates proportionally to the rate of change of the error + + +📚 This document uses ideas from: + +- the paper [Fast self-tuning PID controller specially suited for mini robots](https://www.frba.utn.edu.ar/wp-content/uploads/2021/02/EWMA_PID_7-1.pdf) +- the ‘Leaky Integrator’ [[forum discussion](https://engineering.stackexchange.com/questions/29833/limiting-the-integral-to-a-time-window-in-pid-controller), [technical background](https://www.music.mcgill.ca/~gary/307/week2/node4.html)] + + +### Choice of Process Variable: Targeted Epoch Switchover Time + +The process variable is the variable which: + +- has a target desired value, or setpoint ($SP$) +- is successively measured by the controller to compute the error $e$ + +--- +👉 The `BlockTimeController` controls the progression through views, such that the epoch switchover happens at the intended point in time. We define: + +- $\gamma = k\cdot \tau_0$ is the remaining epoch duration of a hypothetical ideal system, where *all* remaining $k$ views of the epoch progress with the ideal view time $\tau_0$. +- The parameter $\tau_0$ is computed solely based on the Epoch configuration as + $\tau_0 := \frac{<{\rm total\ epoch\ time}>}{<{\rm total\ views\ in\ epoch}>}$ (for mainnet 22, Epoch 75, we have $\tau_0 \simeq$ 1250ms). +- $\Gamma$ is the *actual* time remaining until the desired epoch switchover. + +The error, which the controller should drive towards zero, is defined as: + +```math +e := \gamma - \Gamma +``` +--- + + +From our definition it follows that: + +- $e > 0$ implies that the estimated epoch switchover (assuming ideal system behaviour) happens too late. Therefore, to hit the desired epoch switchover time, the time we spend in views has to be *smaller* than $\tau_0$. +- For $e < 0$ means that we estimate the epoch switchover to be too early. Therefore, we should be slowing down and spend more than $\tau_0$ in the following views. + +**Reasoning:** + +The desired idealized system behaviour would a constant view duration $\tau_0$ throughout the entire epoch. + +However, in the real-world system we have disturbances (varying message relay times, slow or offline nodes, etc) and measurement uncertainty (node can only observe its local view times, but not the committee’s collective swarm behaviour). + +<img src='https://github.com/onflow/flow-go/blob/master/docs/images/CruiseControl_BlockTimeController/PID_controller_for_block-rate-delay.png' width='680'> + + +After a disturbance, we want the controller to drive the system back to a state, where it can closely follow the ideal behaviour from there on. + +- Simulations have shown that this approach produces *very* stable controller with the intended behaviour. + + **Controller driving $e := \gamma - \Gamma \rightarrow 0$** + - setting the differential term $K_d=0$, the controller responds as expected with damped oscillatory behaviour + to a singular strong disturbance. Setting $K_d=3$ suppresses oscillations and the controller's performance improves as it responds more effectively. + + <img src='https://github.com/onflow/flow-go/blob/master/docs/images/CruiseControl_BlockTimeController/EpochSimulation_029.png' width='900'> + + <img src='https://github.com/onflow/flow-go/blob/master/docs/images/CruiseControl_BlockTimeController/EpochSimulation_030.png' width='900'> + + - controller very quickly compensates for moderate disturbances and observational noise in a well-behaved system: + + <img src='https://github.com/onflow/flow-go/blob/master/docs/images/CruiseControl_BlockTimeController/EpochSimulation_028.png' width='900'> + + - controller compensates massive anomaly (100s network partition) effectively: + + <img src='https://github.com/onflow/flow-go/blob/master/docs/images/CruiseControl_BlockTimeController/EpochSimulation_000.png' width='900'> + + - controller effectively stabilizes system with continued larger disturbances (20% of offline consensus participants) and notable observational noise: + + <img src='https://github.com/onflow/flow-go/blob/master/docs/images/CruiseControl_BlockTimeController/EpochSimulation_005-0.png' width='900'> + + **References:** + + - statistical model for happy-path view durations: [ID controller for ``block-rate-delay``](https://www.notion.so/ID-controller-for-block-rate-delay-cc9c2d9785ac4708a37bb952557b5ef4?pvs=21) + - For Python implementation with additional disturbances (offline nodes) and observational noise, see GitHub repo: [flow-internal/analyses/pacemaker_timing/2023-05_Blocktime_PID-controller](https://github.com/dapperlabs/flow-internal/tree/master/analyses/pacemaker_timing/2023-05_Blocktime_PID-controller) → [controller_tuning_v01.py](https://github.com/dapperlabs/flow-internal/blob/master/analyses/pacemaker_timing/2023-05_Blocktime_PID-controller/controller_tuning_v01.py) + +# Detailed PID controller specification + +Each consensus participant runs a local instance of the controller described below. Hence, all the quantities are based on the node’s local observation. + +## Definitions + +**Observables** (quantities provided to the node or directly measurable by the node): + +- $v$ is the node’s current view +- ideal view time $\tau_0$ is computed solely based on the Epoch configuration: + $\tau_0 := \frac{<{\rm total\ epoch\ time}>}{<{\rm total\ views\ in\ epoch}>}$ (for mainnet 22, Epoch 75, we have $\tau_0 \simeq$ 1250ms). +- $t[v]$ is the time the node entered view $v$ +- $F[v]$ is the final view of the current epoch +- $T[v]$ is the target end time of the current epoch + +**Derived quantities** + +- remaining views of the epoch $k[v] := F[v] +1 - v$ +- time remaining until the desired epoch switchover $\Gamma[v] := T[v]-t[v]$ +- error $e[v] := \underbrace{k\cdot\tau_0}_{\gamma[v]} - \Gamma[v] = t[v] + k[v] \cdot\tau_0 - T[v]$ + +### Precise convention of View Timing + +Upon observing block `B` with view $v$, the controller updates its internal state. + +Note the '+1' term in the computation of the remaining views $k[v] := F[v] +1 - v$ . This is related to our convention that the epoch begins (happy path) when observing the first block of the epoch. Only by observing this block, the nodes transition to the first view of the epoch. Up to that point, the consensus replicas remain in the last view of the previous epoch, in the state of `having processed the last block of the old epoch and voted for it` (happy path). Replicas remain in this state until they see a confirmation of the view (either QC or TC for the last view of the previous epoch). + +<img src='https://github.com/onflow/flow-go/blob/master/docs/images/CruiseControl_BlockTimeController/ViewDurationConvention.png' width='600'> + +In accordance with this convention, observing the proposal for the last view of an epoch, marks the start of the last view. By observing the proposal, nodes enter the last view, verify the block, vote for it, the primary aggregates the votes, constructs the child (for first view of new epoch). The last view of the epoch ends, when the child proposal is published. + +### Controller + +The goal of the controller is to drive the system towards an error of zero, i.e. $e[v] \rightarrow 0$. For a [PID controller](https://en.wikipedia.org/wiki/PID_controller), the output $u$ for view $v$ has the form: + +```math +u[v] = K_p \cdot e[v]+K_i \cdot \mathcal{I}[v] + K_d \cdot \Delta[v] +``` + +With error terms (computed from observations) + +- $e[v]$ representing the *instantaneous* error as of view $v$ + (commonly referred to as ‘proportional term’) +- $\mathcal{I} [v] = \sum_v e[v]$ the sum of the errors + (commonly referred to as ‘integral term’) +- $\Delta[v]=e[v]-e[v-1]$ the rate of change of the error + (commonly referred to as ‘derivative term’) + +and controller parameters (values derived from controller tuning): + +- $K_p$ be the proportional coefficient +- $K_i$ be the integral coefficient +- $K_d$ be the derivative coefficient + +## Measuring view duration + +Each consensus participant observes the error $e[v]$ based on its local view evolution. As the following figure illustrates, the view duration is highly variable on small time scales. + +![](/docs/images/CruiseControl_BlockTimeController/ViewRate.png) + +Therefore, we expect $e[v]$ to be very variable. Furthermore, note that a node uses its local view transition times as an estimator for the collective behaviour of the entire committee. Therefore, there is also observational noise obfuscating the underlying collective behaviour. Hence, we expect notable noise. + +## Managing noise + +Noisy values for $e[v]$ also impact the derivative term $\Delta[v]$ and integral term $\mathcal{I}[v]$. This can impact the controller’s performance. + +### **Managing noise in the proportional term** + +An established approach for managing noise in observables is to use [exponentially weighted moving average [EWMA]](https://en.wikipedia.org/wiki/Moving_average) instead of the instantaneous values. Specifically, let $\bar{e}[v]$ denote the EWMA of the instantaneous error, which is computed as follows: + +```math +\eqalign{ +\textnormal{initialization: }\quad \bar{e} :&= 0 \\ +\textnormal{update with instantaneous error\ } e[v]:\quad \bar{e}[v] &= \alpha \cdot e[v] + (1-\alpha)\cdot \bar{e}[v-1] +} +``` + +The parameter $\alpha$ relates to the averaging time window. Let $\alpha \equiv \frac{1}{N_\textnormal{ewma}}$ and consider that the input changes from $x_\textnormal{old}$ to $x_\textnormal{new}$ as a step function. Then $N_\textnormal{ewma}$ is the number of samples required to move the output average about 2/3 of the way from $x_\textnormal{old}$ to $x_\textnormal{new}$. + +see also [Python `Ewma` implementation](https://github.com/dapperlabs/flow-internal/blob/423d927421c073e4c3f66165d8f51b829925278f/analyses/pacemaker_timing/2023-05_Blocktime_PID-controller/controller_tuning_v01.py#L405-L431) + +### **Managing noise in the integral term** + +In particular systematic observation bias are a problem, as it leads to a diverging integral term. The commonly adopted approach is to use a ‘leaky integrator’ [[1](https://www.music.mcgill.ca/~gary/307/week2/node4.html), [2](https://engineering.stackexchange.com/questions/29833/limiting-the-integral-to-a-time-window-in-pid-controller)], which we denote as $\bar{\mathcal{I}}[v]$. + +```math +\eqalign{ +\textnormal{initialization: }\quad \bar{\mathcal{I}} :&= 0 \\ +\textnormal{update with instantaneous error\ } e[v]:\quad \bar{\mathcal{I}}[v] &= e[v] + (1-\lambda)\cdot\bar{\mathcal{I}}[v-1] +} +``` + +Intuitively, the loss factor $\lambda$ relates to the time window of the integrator. A factor of 0 means an infinite time horizon, while $\lambda =1$ makes the integrator only memorize the last input. Let $\lambda \equiv \frac{1}{N_\textnormal{itg}}$ and consider a constant input value $x$. Then $N_\textnormal{itg}$ relates to the number of past samples that the integrator remembers: + +- the integrators output will saturate at $x\cdot N_\textnormal{itg}$ +- an integrator initialized with 0, reaches 2/3 of the saturation value $x\cdot N_\textnormal{itg}$ after consuming $N_\textnormal{itg}$ inputs + +see also [Python `LeakyIntegrator` implementation](https://github.com/dapperlabs/flow-internal/blob/423d927421c073e4c3f66165d8f51b829925278f/analyses/pacemaker_timing/2023-05_Blocktime_PID-controller/controller_tuning_v01.py#L444-L468) + +### **Managing noise in the derivative term** + +Similarly to the proportional term, we apply an EWMA to the differential term and denote the averaged value as $\bar{\Delta}[v]$: + +```math +\eqalign{ +\textnormal{initialization: }\quad \bar{\Delta} :&= 0 \\ +\textnormal{update with instantaneous error\ } e[v]:\quad \bar{\Delta}[v] &= \bar{e}[v] - \bar{e}[v-1] +} +``` + +## Final formula for PID controller + +We have used a statistical model of the view duration extracted from mainnet 22 (Epoch 75) and manually added disturbances and observational noise and systemic observational bias. + +The following parameters have proven to generate stable controller behaviour over a large variety of network conditions: + +--- +👉 The controller is given by + +```math +u[v] = K_p \cdot \bar{e}[v]+K_i \cdot \bar{\mathcal{I}}[v] + K_d \cdot \bar{\Delta}[v] +``` + +with parameters: + +- $K_p = 2.0$ +- $K_i = 0.6$ +- $K_d = 3.0$ +- $N_\textnormal{ewma} = 5$, i.e. $\alpha = \frac{1}{N_\textnormal{ewma}} = 0.2$ +- $N_\textnormal{itg} = 50$, i.e. $\lambda = \frac{1}{N_\textnormal{itg}} = 0.02$ + +The controller output $u[v]$ represents the amount of time by which the controller wishes to deviate from the ideal view duration $\tau_0$. In other words, the duration of view $v$ that the controller wants to set is +```math +\widehat{\tau}[v] = \tau_0 - u[v] +``` +--- + +### Limits of authority + +[Latest update: Crescendo Upgrade, June 2024] + +In general, there is no bound on the output of the controller output $u$. However, it is important to limit the controller’s influence to keep $u$ within a sensible range. + +- upper bound on view duration $\widehat{\tau}[v]$ that we allow the controller to set: + + The current timeout threshold is set to 1045ms and the largest view duration we want to allow the controller to set is $\tau_\textrm{max}$ = 910ms. + Thereby, we have a buffer $\beta$ = 135ms remaining for message propagation and the replicas validating the proposal for view $v$. + + Note the subtle but important aspect: Primary for view $v$ controls duration of view $v-1$. This is because its proposal for view $v$ + contains the proof (Quorum Certificate [QC]) that view $v-1$ concluded on the happy path. By observing the QC for view $v-1$, nodes enter the + subsequent view $v$. + + +- lower bound on the view duration: + + Let $t_\textnormal{p}[v]$ denote the time when the primary for view $v$ has constructed its block proposal. + On the happy path, a replica concludes view $v-1$ and transitions to view $v$, when it observes the proposal for view $v$. + The duration $t_\textnormal{p}[v] - t[v-1]$ is the time between the primary observing the parent block (view $v-1$), collecting votes, + constructing a QC for view $v-1$, and subsequently its own proposal for view $v$. This duration is the minimally required time to execute the protocol. + The controller can only *delay* broadcasting the block, + but it cannot release the block before $t_\textnormal{p}[v]$ simply because the proposal isn’t ready any earlier. + + + +👉 Let $\hat{t}[v]$ denote the time when the primary for view $v$ *broadcasts* its proposal. We assign: + +```math +\hat{t}[v] := \max\Big(t[v-1] +\min(\widehat{\tau}[v-1],\ \tau_\textrm{max}),\ t_\textnormal{p}[v]\Big) +``` +This equation guarantees that the controller does not drive consensus into a timeout, as long as broadcasting the block and its validation +together require less than time $\beta$. Currently, we have $\tau_\textrm{max}$ = 910ms as the upper bound for view durations that the controller can set. +In comparison, for HotStuff's timeout threshold we set $\texttt{hotstuff-min-timeout} = \tau_\textrm{max} + \beta$, with $\beta$ = 135ms. + + + +### Further reading + +- the statistical model of the view duration, see [PID controller for ``block-rate-delay``](https://www.notion.so/ID-controller-for-block-rate-delay-cc9c2d9785ac4708a37bb952557b5ef4?pvs=21) +- the simulation and controller tuning, see [flow-internal/analyses/pacemaker_timing/2023-05_Blocktime_PID-controller](https://github.com/dapperlabs/flow-internal/tree/master/analyses/pacemaker_timing/2023-05_Blocktime_PID-controller) → [controller_tuning_v01.py](https://github.com/dapperlabs/flow-internal/blob/master/analyses/pacemaker_timing/2023-05_Blocktime_PID-controller/controller_tuning_v01.py) +- The most recent parameter setting was derived here: + - [Cruise-Control headroom for speedups](https://www.notion.so/flowfoundation/Cruise-Control-headroom-for-speedups-46dc17e07ae14462b03341e4432a907d?pvs=4) contains the formal analysis and discusses the numerical results in detail + - Python code for figures and calculating the final parameter settings: [flow-internal/analyses/pacemaker_timing/2024-03_Block-timing-update](https://github.com/dapperlabs/flow-internal/tree/master/analyses/pacemaker_timing/2024-03_Block-timing-update) → [timeout-attacks.py](https://github.com/dapperlabs/flow-internal/blob/master/analyses/pacemaker_timing/2024-03_Block-timing-update/timeout-attacks.py) + + +## Edge Cases + +### A node is catching up + +When a node is catching up, it observes the blocks significantly later than they were published. In other words, from the perspective +of the node catching up, the blocks are too late. However, as it reaches the most recent blocks, also the observed timing error approaches zero +(assuming approximately correct block publication by the honest supermajority). Nevertheless, due to its biased error observations, the node +catching up could still try to compensate for the network being behind, and publish its proposal as early as possible. + +**Assumption:** With only a smaller fraction of nodes being offline or catching up, the effect is expected to be small and easily compensated for by the supermajority of online nodes. + +### A node has a misconfigured clock + +Cap the maximum deviation from the default delay (limits the general impact of error introduced by the `BlockTimeController`). The node with misconfigured clock will contribute to the error in a limited way, but as long as the majority of nodes have an accurate clock, they will offset this error. + +**Assumption:** With only a smaller fraction of nodes having misconfigured clocks, the effect will be small enough to be easily compensated for by the supermajority of correct nodes. + +### Near epoch boundaries + +We might incorrectly compute high error in the target view rate, if local current view and current epoch are not exactly synchronized. By default, they would not be, because `EpochTransition` events occur upon finalization, and current view is updated as soon as QC/TC is available. + +**Solution:** determine epoch locally based on view only, do not use `EpochTransition` event. + +### EFM + +When the network is in EFM, epoch timing is anyway disrupted. The main thing we want to avoid is that the controller drives consensus into a timeout. +This is largely guaranteed, due to the limits of authority. Beyond that, pretty much any block timing on the happy path is acceptable. +Through, the optimal solution would be a consistent view time throughout normal Epochs as well as EFM. + +# Implementation Aspects + +## Timing Reference Points + +<img src='https://github.com/user-attachments/assets/8c28d4d1-0b6c-4b6f-8413-c0753e7400f9' width='900'> + + +* Under the hood, the controller outputs the unconstrained view time $\widehat{\tau}[v]= \tau_0 - u[v]$ (of type [`time.Duration`](https://pkg.go.dev/time)), + which is wrapped into a [`happyPathBlockTime`](https://github.com/onflow/flow-go/blob/d9f7522d6c502d7e148dab69c926279202677cf8/consensus/hotstuff/cruisectl/proposal_timing.go#L59-L74) + (👉 [code](https://github.com/onflow/flow-go/blob/d9f7522d6c502d7e148dab69c926279202677cf8/consensus/hotstuff/cruisectl/block_time_controller.go#L402-L404)). + The [`happyPathBlockTime`](https://github.com/onflow/flow-go/blob/d9f7522d6c502d7e148dab69c926279202677cf8/consensus/hotstuff/cruisectl/proposal_timing.go#L59-L74) + [applies the limits of authority](https://github.com/onflow/flow-go/blob/d9f7522d6c502d7e148dab69c926279202677cf8/consensus/hotstuff/cruisectl/proposal_timing.go#L94), + and the resulting `ConstrainedBlockTime` we capture by the metric `Average Target View Time` (blue dotted curve in figure above). +* From taking a look at the [`hotatuff.EventHandler`](https://github.com/onflow/flow-go/blob/d9f7522d6c502d7e148dab69c926279202677cf8/consensus/hotstuff/eventhandler/event_handler.go#L157-L171), + we can confirm that the `BlockTimeController` and the metric `Observed View Time` use practically the same reference time to determine view progression: + ```golang + func (e *EventHandler) OnReceiveProposal(proposal *model.Proposal) error { + ⋮ + + // store the block. + err := e.forks.AddValidatedBlock(block) + if err != nil { + return fmt.Errorf("cannot add proposal to forks (%x): %w", block.BlockID, err) + } + + _, err = e.paceMaker.ProcessQC(proposal.Block.QC) + if err != nil { + return fmt.Errorf("could not process QC for block %x: %w", block.BlockID, err) + } + + _, err = e.paceMaker.ProcessTC(proposal.LastViewTC) + if err != nil { + return fmt.Errorf("could not process TC for block %x: %w", block.BlockID, err) + } + + ⋮ + ``` + - The call to `forks.AddValidatedBlock` emits the `OnBlockIncorporated` notification for `block` with view $v$, which the `BlockTimeController` uses as [its starting point for the view](https://github.com/onflow/flow-go/blob/d9f7522d6c502d7e148dab69c926279202677cf8/consensus/hotstuff/cruisectl/block_time_controller.go#L476-L481): + ```golang + TimedBlock{Block: block, TimeObserved: time.Now().UTC()} + ``` + So for the `BlockTimeController`, the start of view `v` is the `TimeObserved` for proposal with view `v`. + - Right after, the PaceMaker ingests the QC for view `v-1`, which is included in the proposal for view `v`. + The call to `paceMaker.ProcessQC` updates the metric `consensus_hotstuff_cur_view`, based on which we calculate `Observed Average (10m) View Time [s]` (blue solid curve in figure above). +* As the `TargetPublicationTime` for block `v+1`, the `BlockTimeController` [calculates](https://github.com/onflow/flow-go/blob/d9f7522d6c502d7e148dab69c926279202677cf8/consensus/hotstuff/cruisectl/proposal_timing.go#L102-L112): + ```golang + targetPublicationTime := TimeObserved.Add(ConstrainedBlockTime) + ``` +* The [`EventHandler` triggers](https://github.com/onflow/flow-go/blob/d9f7522d6c502d7e148dab69c926279202677cf8/consensus/hotstuff/eventhandler/event_handler.go#L390-L417) the + [computation](https://github.com/onflow/flow-go/blob/d9f7522d6c502d7e148dab69c926279202677cf8/consensus/hotstuff/cruisectl/block_time_controller.go#L249-L262) of the + `Block Publication Delay` metric (dashed purple curve) right when it hands its proposal for view `v+1` to the [`MessageHub`](https://github.com/onflow/flow-go/blob/d9f7522d6c502d7e148dab69c926279202677cf8/engine/consensus/message_hub/message_hub.go#L428-L434) + ```golang + publicationDelay := time.Until(targetPublicationTime) + if publicationDelay < 0 { + publicationDelay = 0 // Controller can only delay publication of proposal. Hence, the delay is lower-bounded by zero. + } + metrics.ProposalPublicationDelay(publicationDelay) + ``` +* The [`MessageHub` repeats exactly that computation of `publicationDelay`](https://github.com/onflow/flow-go/blob/d9f7522d6c502d7e148dab69c926279202677cf8/engine/consensus/message_hub/message_hub.go#L433-L437) + to determine how long it should sleep before broadcasting the proposal. + +**Estimator for consensus runtime**: +- The `Observed Average View Time` measurement starts when we see the parent block while `Block Publication Delay` starts to measure when the child block is constructed. + `Observed Average View Time` and `Block Publication Delay` use nearly identical temporal reference points to stop their respective measurement (discrepancy is smaller than 1ms based on prior benchmarks of the `EventHandler`). + Therefore, the following is an estimator for how long it takes for the protocol to complete one view on the happy path (running it fast as it possibly can without any delays): + ```math + \texttt{Observed Average View Time} - \texttt{Block Publication Delay} + ``` +- There are some additional computation steps that we haven't accounted for, which could introduce errors. However, the `EventHandler` is very fast with execution times of single-digit milliseconds in practise. + Hence, the uncertainty of this estimator is minimal (expected order of single milliseconds). + + +## Controller's observation of blocks (`OnBlockIncorporated`) must approximately match the time when other replicas observe the proposal + +On Testnet (`devnet51`) August 14-18, 2024 we observed the following discrepancy for a consensus committee of 7 nodes: +* There was a significant discrepancy between the `Observed Average View Time` vs the `Average Target View Time`: + <img src='https://github.com/user-attachments/assets/3820497b-e134-4c50-8f8d-d710997ff646' width='700'> +* This resulted in the controller having reduced limits of authority: the limits of authority are computed based on the `Target View Time`, which the controller + set to the largest permitted value of 910ms (higher values were not allowed to prevent the controller from driving consensus into timeouts). However, in reality + the view progression was notably faster, meaning consensus would have tolerated larger delays without risk of timeouts. + +### Learning + +This discrepancy between what the controller was setting vs the networks real-world response was due to a systematic observation bias: +* For **small consensus committee sizes**, a node being selected as **leader for successive rounds** needs to be taken into account. In this scenario, + it is especially important that the leader "observes its own proposal" approximately at the same time when the other replicas receive it. +* Having the leader's cruise control "observes the own proposal" at the time when the proposal is constructed, before adding the controller's delay, would + introduce a significant observation bias. In this case, only the leader would assume that the view has started much earlier compared to the other replicas. + Therefore, it assumes it has much less time until other nodes would presumably trigger their timeout. Hence, the node erroneously restricts the possible delay + it can impose before broadcasting the child block. +* For larger consensus committee sizes, this systematic observation bias persists, but the probability of is much lower: The probability for a leader to be selected + as leader decreases with $\frac{1}{n}$ for $n$ the committee size. As the bias only affects the second of the two consecutive views, the overall impact of this + bias declines with increasing consensus committee size. + +## Initial Testing + +see [Cruise Control: Benchnet Testing Notes](https://www.notion.so/Cruise-Control-Benchnet-Testing-Notes-ea08f49ba9d24ce2a158fca9358966df?pvs=21) diff --git a/consensus/hotstuff/cruisectl/aggregators.go b/consensus/hotstuff/cruisectl/aggregators.go new file mode 100644 index 00000000000..4ea7cd7437c --- /dev/null +++ b/consensus/hotstuff/cruisectl/aggregators.go @@ -0,0 +1,130 @@ +package cruisectl + +import ( + "fmt" +) + +// Ewma implements the exponentially weighted moving average with smoothing factor α. +// The Ewma is a filter commonly applied to time-discrete signals. Mathematically, +// it is represented by the recursive update formula +// +// value ← α·v + (1-α)·value +// +// where `v` the next observation. Intuitively, the loss factor `α` relates to the +// time window of N observations that we average over. For example, let +// α ≡ 1/N and consider an input that suddenly changes from x to y as a step +// function. Then N is _roughly_ the number of samples required to move the output +// average about 2/3 of the way from x to y. +// For numeric stability, we require α to satisfy 0 < a < 1. +// Not concurrency safe. +type Ewma struct { + alpha float64 + value float64 +} + +// NewEwma instantiates a new exponentially weighted moving average. +// The smoothing factor `alpha` relates to the averaging time window. Let `alpha` ≡ 1/N and +// consider an input that suddenly changes from x to y as a step function. Then N is roughly +// the number of samples required to move the output average about 2/3 of the way from x to y. +// For numeric stability, we require `alpha` to satisfy 0 < `alpha` < 1. +func NewEwma(alpha, initialValue float64) (Ewma, error) { + if (alpha <= 0) || (1 <= alpha) { + return Ewma{}, fmt.Errorf("for numeric stability, we require the smoothing factor to satisfy 0 < alpha < 1") + } + return Ewma{ + alpha: alpha, + value: initialValue, + }, nil +} + +// AddRepeatedObservation adds k consecutive observations with the same value v. Returns the updated value. +func (e *Ewma) AddRepeatedObservation(v float64, k int) float64 { + // closed from for k consecutive updates with the same observation v: + // value ← r·value + v·(1-r) with r := (1-α)^k + r := powWithIntegerExponent(1.0-e.alpha, k) + e.value = r*e.value + v*(1.0-r) + return e.value +} + +// AddObservation adds the value `v` to the EWMA. Returns the updated value. +func (e *Ewma) AddObservation(v float64) float64 { + // Update formula: value ← α·v + (1-α)·value = value + α·(v - value) + e.value = e.value + e.alpha*(v-e.value) + return e.value +} + +func (e *Ewma) Value() float64 { + return e.value +} + +// LeakyIntegrator is a filter commonly applied to time-discrete signals. +// Intuitively, it sums values over a limited time window. This implementation is +// parameterized by the loss factor `ß`: +// +// value ← v + (1-ß)·value +// +// where `v` the next observation. Intuitively, the loss factor `ß` relates to the +// time window of N observations that we integrate over. For example, let ß ≡ 1/N +// and consider a constant input x: +// - the integrator value will saturate at x·N +// - an integrator initialized at 0 reaches 2/3 of the saturation value after N samples +// +// For numeric stability, we require ß to satisfy 0 < ß < 1. +// Further details on Leaky Integrator: https://www.music.mcgill.ca/~gary/307/week2/node4.html +// Not concurrency safe. +type LeakyIntegrator struct { + feedbackCoef float64 // feedback coefficient := (1-ß) + value float64 +} + +// NewLeakyIntegrator instantiates a new leaky integrator with loss factor `beta`, where +// `beta relates to window of N observations that we integrate over. For example, let +// `beta` ≡ 1/N and consider a constant input x. The integrator value will saturate at x·N. +// An integrator initialized at 0 reaches 2/3 of the saturation value after N samples. +// For numeric stability, we require `beta` to satisfy 0 < `beta` < 1. +func NewLeakyIntegrator(beta, initialValue float64) (LeakyIntegrator, error) { + if (beta <= 0) || (1 <= beta) { + return LeakyIntegrator{}, fmt.Errorf("for numeric stability, we require the loss factor to satisfy 0 < beta < 1") + } + return LeakyIntegrator{ + feedbackCoef: 1.0 - beta, + value: initialValue, + }, nil +} + +// AddRepeatedObservation adds k consecutive observations with the same value v. Returns the updated value. +func (e *LeakyIntegrator) AddRepeatedObservation(v float64, k int) float64 { + // closed from for k consecutive updates with the same observation v: + // value ← r·value + v·(1-r) with r := α^k + r := powWithIntegerExponent(e.feedbackCoef, k) + e.value = r*e.value + v*(1.0-r)/(1.0-e.feedbackCoef) + return e.value +} + +// AddObservation adds the value `v` to the LeakyIntegrator. Returns the updated value. +func (e *LeakyIntegrator) AddObservation(v float64) float64 { + // Update formula: value ← v + feedbackCoef·value + // where feedbackCoef = (1-beta) + e.value = v + e.feedbackCoef*e.value + return e.value +} + +func (e *LeakyIntegrator) Value() float64 { + return e.value +} + +// powWithIntegerExponent implements exponentiation b^k optimized for integer k >=1 +func powWithIntegerExponent(b float64, k int) float64 { + r := 1.0 + for { + if k&1 == 1 { + r *= b + } + k >>= 1 + if k == 0 { + break + } + b *= b + } + return r +} diff --git a/consensus/hotstuff/cruisectl/aggregators_test.go b/consensus/hotstuff/cruisectl/aggregators_test.go new file mode 100644 index 00000000000..d508290c814 --- /dev/null +++ b/consensus/hotstuff/cruisectl/aggregators_test.go @@ -0,0 +1,167 @@ +package cruisectl + +import ( + "math" + "testing" + + "github.com/stretchr/testify/require" +) + +// Test_Instantiation verifies successful instantiation of Ewma +func Test_EWMA_Instantiation(t *testing.T) { + w, err := NewEwma(0.5, 17.2) + require.NoError(t, err) + require.Equal(t, 17.2, w.Value()) +} + +// Test_EnforceNumericalBounds verifies that constructor only accepts +// alpha values that satisfy 0 < alpha < 1 +func Test_EWMA_EnforceNumericalBounds(t *testing.T) { + for _, alpha := range []float64{-1, 0, 1, 2} { + _, err := NewEwma(alpha, 17.2) + require.Error(t, err) + } +} + +// Test_AddingObservations verifies correct numerics when adding a single value. +// Reference values were generated via python +func Test_EWMA_AddingObservations(t *testing.T) { + alpha := math.Pi / 7.0 + initialValue := 17.0 + w, err := NewEwma(alpha, initialValue) + require.NoError(t, err) + + v := w.AddObservation(6.0) + require.InEpsilon(t, 12.063211544358897, v, 1e-12) + require.InEpsilon(t, 12.063211544358897, w.Value(), 1e-12) + v = w.AddObservation(-1.16) + require.InEpsilon(t, 6.128648080841518, v, 1e-12) + require.InEpsilon(t, 6.128648080841518, w.Value(), 1e-12) + v = w.AddObservation(1.23) + require.InEpsilon(t, 3.9301399632281675, v, 1e-12) + require.InEpsilon(t, 3.9301399632281675, w.Value(), 1e-12) +} + +// Test_AddingRepeatedObservations verifies correct numerics when repeated observations. +// Reference values were generated via python +func Test_EWMA_AddingRepeatedObservations(t *testing.T) { + alpha := math.Pi / 7.0 + initialValue := 17.0 + w, err := NewEwma(alpha, initialValue) + require.NoError(t, err) + + v := w.AddRepeatedObservation(6.0, 11) + require.InEpsilon(t, 6.015696509200239, v, 1e-12) + require.InEpsilon(t, 6.015696509200239, w.Value(), 1e-12) + v = w.AddRepeatedObservation(-1.16, 4) + require.InEpsilon(t, -0.49762458373978324, v, 1e-12) + require.InEpsilon(t, -0.49762458373978324, w.Value(), 1e-12) + v = w.AddRepeatedObservation(1.23, 1) + require.InEpsilon(t, 0.27773151632279214, v, 1e-12) + require.InEpsilon(t, 0.27773151632279214, w.Value(), 1e-12) +} + +// Test_AddingRepeatedObservations_selfConsistency applies a self-consistency check +// for repeated observations. +func Test_EWMA_AddingRepeatedObservations_selfConsistency(t *testing.T) { + alpha := math.Pi / 7.0 + initialValue := 17.0 + w1, err := NewEwma(alpha, initialValue) + require.NoError(t, err) + w2, err := NewEwma(alpha, initialValue) + require.NoError(t, err) + + for i := 7; i > 0; i-- { + w1.AddObservation(6.0) + } + v := w2.AddRepeatedObservation(6.0, 7) + require.InEpsilon(t, w1.Value(), v, 1e-12) + require.InEpsilon(t, w1.Value(), w2.Value(), 1e-12) + + for i := 4; i > 0; i-- { + w2.AddObservation(6.0) + } + v = w1.AddRepeatedObservation(6.0, 4) + require.InEpsilon(t, w2.Value(), v, 1e-12) + require.InEpsilon(t, w2.Value(), w1.Value(), 1e-12) +} + +// Test_LI_Instantiation verifies successful instantiation of LeakyIntegrator +func Test_LI_Instantiation(t *testing.T) { + li, err := NewLeakyIntegrator(0.5, 17.2) + require.NoError(t, err) + require.Equal(t, 17.2, li.Value()) +} + +// Test_EnforceNumericalBounds verifies that constructor only accepts +// alpha values that satisfy 0 < alpha < 1 +func Test_LI_EnforceNumericalBounds(t *testing.T) { + for _, beta := range []float64{-1, 0, 1, 2} { + _, err := NewLeakyIntegrator(beta, 17.2) + require.Error(t, err) + } +} + +// Test_AddingObservations verifies correct numerics when adding a single value. +// Reference values were generated via python +func Test_LI_AddingObservations(t *testing.T) { + beta := math.Pi / 7.0 + initialValue := 17.0 + li, err := NewLeakyIntegrator(beta, initialValue) + require.NoError(t, err) + + v := li.AddObservation(6.0) + require.InEpsilon(t, 15.370417841281931, v, 1e-12) + require.InEpsilon(t, 15.370417841281931, li.Value(), 1e-12) + v = li.AddObservation(-1.16) + require.InEpsilon(t, 7.312190445170959, v, 1e-12) + require.InEpsilon(t, 7.312190445170959, li.Value(), 1e-12) + v = li.AddObservation(1.23) + require.InEpsilon(t, 5.260487047428308, v, 1e-12) + require.InEpsilon(t, 5.260487047428308, li.Value(), 1e-12) +} + +// Test_AddingRepeatedObservations verifies correct numerics when repeated observations. +// Reference values were generated via python +func Test_LI_AddingRepeatedObservations(t *testing.T) { + beta := math.Pi / 7.0 + initialValue := 17.0 + li, err := NewLeakyIntegrator(beta, initialValue) + require.NoError(t, err) + + v := li.AddRepeatedObservation(6.0, 11) + require.InEpsilon(t, 13.374196472992809, v, 1e-12) + require.InEpsilon(t, 13.374196472992809, li.Value(), 1e-12) + v = li.AddRepeatedObservation(-1.16, 4) + require.InEpsilon(t, -1.1115419303895382, v, 1e-12) + require.InEpsilon(t, -1.1115419303895382, li.Value(), 1e-12) + v = li.AddRepeatedObservation(1.23, 1) + require.InEpsilon(t, 0.617316921420289, v, 1e-12) + require.InEpsilon(t, 0.617316921420289, li.Value(), 1e-12) + +} + +// Test_AddingRepeatedObservations_selfConsistency applies a self-consistency check +// for repeated observations. +func Test_LI_AddingRepeatedObservations_selfConsistency(t *testing.T) { + beta := math.Pi / 7.0 + initialValue := 17.0 + li1, err := NewLeakyIntegrator(beta, initialValue) + require.NoError(t, err) + li2, err := NewLeakyIntegrator(beta, initialValue) + require.NoError(t, err) + + for i := 7; i > 0; i-- { + li1.AddObservation(6.0) + } + v := li2.AddRepeatedObservation(6.0, 7) + require.InEpsilon(t, li1.Value(), v, 1e-12) + require.InEpsilon(t, li1.Value(), li2.Value(), 1e-12) + + for i := 4; i > 0; i-- { + li2.AddObservation(6.0) + } + v = li1.AddRepeatedObservation(6.0, 4) + require.InEpsilon(t, li2.Value(), v, 1e-12) + require.InEpsilon(t, li2.Value(), li1.Value(), 1e-12) +} diff --git a/consensus/hotstuff/cruisectl/block_time_controller.go b/consensus/hotstuff/cruisectl/block_time_controller.go new file mode 100644 index 00000000000..d4d8a5f642d --- /dev/null +++ b/consensus/hotstuff/cruisectl/block_time_controller.go @@ -0,0 +1,498 @@ +// Package cruisectl implements a "cruise control" system for Flow by adjusting +// nodes' latest ProposalTiming in response to changes in the measured view rate and +// target epoch switchover time. +// +// It uses a PID controller with the projected epoch switchover time as the process +// variable and the set-point computed using epoch length config. The error is +// the difference between the projected epoch switchover time, assuming an +// ideal view time τ, and the target epoch switchover time (based on a schedule). +package cruisectl + +import ( + "errors" + "fmt" + "time" + + "github.com/rs/zerolog" + "go.uber.org/atomic" + + "github.com/onflow/flow-go/consensus/hotstuff" + "github.com/onflow/flow-go/consensus/hotstuff/model" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module" + "github.com/onflow/flow-go/module/component" + "github.com/onflow/flow-go/module/irrecoverable" + "github.com/onflow/flow-go/state/protocol" + "github.com/onflow/flow-go/state/protocol/events" +) + +// TimedBlock represents a block, with a timestamp recording when the BlockTimeController received the block +type TimedBlock struct { + Block *model.Block + TimeObserved time.Time // timestamp when BlockTimeController received the block, per convention in UTC +} + +// epochTiming encapsulates the timing information of one specific epoch: +type epochTiming struct { + firstView uint64 // first view of the epoch's view range + finalView uint64 // last view of the epoch's view range + targetDuration uint64 // desired total duration of the epoch in seconds + targetEndTime uint64 // target end time of the epoch, represented as Unix Time [seconds] +} + +// newEpochTiming queries the timing information from the given `epoch` and returns it as a new `epochTiming` instance. +func newEpochTiming(epoch protocol.CommittedEpoch) *epochTiming { + return &epochTiming{ + firstView: epoch.FirstView(), + finalView: epoch.FinalView(), + targetDuration: epoch.TargetDuration(), + targetEndTime: epoch.TargetEndTime(), + } +} + +// targetViewTime returns τ[v], the ideal, steady-state view time for the current epoch. +// For numerical stability, we avoid repetitive conversions between seconds and time.Duration. +// Instead, internally within the controller, we work with float64 in units of seconds. +func (epoch *epochTiming) targetViewTime() float64 { + return float64(epoch.targetDuration) / float64(epoch.finalView-epoch.firstView+1) +} + +// isFollowedBy determines whether nextEpoch is indeed the direct successor of the receiver, +// based on the view ranges of both epochs. +func (et *epochTiming) isFollowedBy(nextEpoch *epochTiming) bool { + return et.finalView+1 == nextEpoch.firstView +} + +// BlockTimeController dynamically adjusts the ProposalTiming of this node, +// based on the measured view rate of the consensus committee as a whole, in +// order to achieve a desired switchover time for each epoch. +// In a nutshell, the controller outputs the block time on the happy path, i.e. +// - Suppose the node is observing the parent block B0 at some time `x0`. +// - The controller determines the duration `d` of how much later the child block B1 +// should be observed by the committee. +// - The controller internally memorizes the latest B0 it has seen and outputs +// the tuple `(B0, x0, d)` +// +// This low-level controller output `(B0, x0, d)` is wrapped into a `ProposalTiming` +// interface, specifically `happyPathBlockTime` on the happy path. The purpose of the +// `ProposalTiming` wrapper is to translate the raw controller output into a form +// that is useful for the EventHandler. Edge cases, such as initialization or +// epoch fallback are implemented by other implementations of `ProposalTiming`. +type BlockTimeController struct { + component.Component + // protocol.Consumer consumes protocol state events + protocol.Consumer + + config *Config + + state protocol.State + log zerolog.Logger + metrics module.CruiseCtlMetrics + + // currentEpochTiming holds the timing information for the current epoch (and next epoch if it is committed) + currentEpochTiming epochTiming + // nextEpochTiming holds the timing information for the next epoch if it is committed + nextEpochTiming *epochTiming + + // incorporatedBlocks queues OnBlockIncorporated notifications for subsequent processing by an internal worker routine. + // Channel capacity is small and if `incorporatedBlocks` is full we discard new blocks, because the timing targets + // from the controller only make sense, if the node is not overloaded and swiftly processing new blocks. + incorporatedBlocks chan TimedBlock + + // epochEvents queues functors for processing epoch-related protocol events. + // Events will be processed in the order they are received (fifo). + epochEvents chan func() error + proportionalErr Ewma + integralErr LeakyIntegrator + + // latestProposalTiming holds the ProposalTiming that the controller generated in response to processing the latest observation + latestProposalTiming *atomic.Pointer[ProposalTiming] +} + +var _ hotstuff.ProposalDurationProvider = (*BlockTimeController)(nil) +var _ protocol.Consumer = (*BlockTimeController)(nil) +var _ component.Component = (*BlockTimeController)(nil) + +// NewBlockTimeController returns a new BlockTimeController. +func NewBlockTimeController(log zerolog.Logger, metrics module.CruiseCtlMetrics, config *Config, state protocol.State, curView uint64) (*BlockTimeController, error) { + // Initial error must be 0 unless we are making assumptions of the prior history of the proportional error `e[v]` + initProptlErr, initItgErr, initDrivErr := .0, .0, .0 + proportionalErr, err := NewEwma(config.alpha(), initProptlErr) + if err != nil { + return nil, fmt.Errorf("failed to initialize EWMA for computing the proportional error: %w", err) + } + integralErr, err := NewLeakyIntegrator(config.beta(), initItgErr) + if err != nil { + return nil, fmt.Errorf("failed to initialize LeakyIntegrator for computing the integral error: %w", err) + } + + ctl := &BlockTimeController{ + Consumer: events.NewNoop(), + config: config, + log: log.With().Str("hotstuff", "cruise_ctl").Logger(), + metrics: metrics, + state: state, + incorporatedBlocks: make(chan TimedBlock, 3), + epochEvents: make(chan func() error, 20), + proportionalErr: proportionalErr, + integralErr: integralErr, + latestProposalTiming: atomic.NewPointer[ProposalTiming](nil), // set in initProposalTiming + } + ctl.Component = component.NewComponentManagerBuilder(). + AddWorker(ctl.processEventsWorkerLogic). + Build() + + // initialize state + err = ctl.initEpochTiming() + if err != nil { + return nil, fmt.Errorf("could not initialize epoch info: %w", err) + } + ctl.initProposalTiming(curView) + + ctl.log.Debug(). + Uint64("view", curView). + Msg("initialized BlockTimeController") + ctl.metrics.PIDError(initProptlErr, initItgErr, initDrivErr) + ctl.metrics.ControllerOutput(0) + ctl.metrics.TargetProposalDuration(0) + + return ctl, nil +} + +// initEpochTiming initializes the epochInfo state upon component startup. +// No errors are expected during normal operation. +func (ctl *BlockTimeController) initEpochTiming() error { + finalSnapshot := ctl.state.Final() + + currentEpoch, err := finalSnapshot.Epochs().Current() + if err != nil { + return fmt.Errorf("could not retrieve current epoch: %w", err) + } + currentEpochTiming := newEpochTiming(currentEpoch) + ctl.currentEpochTiming = *currentEpochTiming + + nextEpoch, err := finalSnapshot.Epochs().NextCommitted() + if err != nil { + if !errors.Is(err, protocol.ErrNextEpochNotCommitted) { + return irrecoverable.NewExceptionf("unexpected error retrieving next epoch: %w", err) + } + // receiving a `ErrNextEpochNotCommitted` is expected during the happy path + } else { // next epoch was successfully retrieved + ctl.nextEpochTiming = newEpochTiming(nextEpoch) + if !currentEpochTiming.isFollowedBy(ctl.nextEpochTiming) { + return fmt.Errorf("next epoch does not directly follow current epoch based on epoch timing") + } + } + + return nil +} + +// initProposalTiming initializes the ProposalTiming value upon startup. +// CAUTION: Must be called after initEpochTiming. +func (ctl *BlockTimeController) initProposalTiming(curView uint64) { + // When disabled, or in epoch fallback, use fallback timing (constant ProposalDuration) + if !ctl.config.Enabled.Load() { + ctl.storeProposalTiming(newFallbackTiming(curView, time.Now().UTC(), ctl.config.FallbackProposalDelay.Load())) + return + } + // Otherwise, before we observe any view changes, publish blocks immediately + ctl.storeProposalTiming(newPublishImmediately(curView, time.Now().UTC())) +} + +// storeProposalTiming stores the latest ProposalTiming. Concurrency safe. +func (ctl *BlockTimeController) storeProposalTiming(proposalTiming ProposalTiming) { + ctl.latestProposalTiming.Store(&proposalTiming) +} + +// getProposalTiming returns the controller's latest ProposalTiming. Concurrency safe. +func (ctl *BlockTimeController) getProposalTiming() ProposalTiming { + pt := ctl.latestProposalTiming.Load() + if pt == nil { // should never happen, as we always store non-nil instances of ProposalTiming. Though, this extra check makes `GetProposalTiming` universal. + return nil + } + return *pt +} + +// TargetPublicationTime is intended to be called by the EventHandler, whenever it +// wants to publish a new proposal. The event handler inputs +// - proposalView: the view it is proposing for, +// - timeViewEntered: the time when the EventHandler entered this view +// - parentBlockId: the ID of the parent block, which the EventHandler is building on +// +// TargetPublicationTime returns the time stamp when the new proposal should be broadcasted. +// For a given view where we are the primary, suppose the actual time we are done building our proposal is P: +// - if P < TargetPublicationTime(..), then the EventHandler should wait until +// `TargetPublicationTime` to broadcast the proposal +// - if P >= TargetPublicationTime(..), then the EventHandler should immediately broadcast the proposal +// +// Note: Technically, our metrics capture the publication delay relative to this function's _latest_ call. +// Currently, the EventHandler is the only caller of this function, and only calls it once per proposal. +// +// Concurrency safe. +func (ctl *BlockTimeController) TargetPublicationTime(proposalView uint64, timeViewEntered time.Time, parentBlockId flow.Identifier) time.Time { + targetPublicationTime := ctl.getProposalTiming().TargetPublicationTime(proposalView, timeViewEntered, parentBlockId) + + publicationDelay := time.Until(targetPublicationTime) + // targetPublicationTime should already account for the controller's upper limit of authority (longest view time + // the controller is allowed to select). However, targetPublicationTime is allowed to be in the past, if the + // controller want to signal that the proposal should be published asap. We could hypothetically update a past + // targetPublicationTime to 'now' at every level in the code. However, this time stamp would move into the past + // immediately, and we would have to update the targetPublicationTime over and over. Instead, we just allow values + // in the past, thereby making repeated corrections unnecessary. In this model, the code _interpreting_ the value + // needs to apply the convention a negative publicationDelay essentially means "no delay". + if publicationDelay < 0 { + publicationDelay = 0 // Controller can only delay publication of proposal. Hence, the delay is lower-bounded by zero. + } + ctl.metrics.ProposalPublicationDelay(publicationDelay) + + return targetPublicationTime +} + +// processEventsWorkerLogic is the logic for processing events received from other components. +// This method should be executed by a dedicated worker routine (not concurrency safe). +func (ctl *BlockTimeController) processEventsWorkerLogic(ctx irrecoverable.SignalerContext, ready component.ReadyFunc) { + ready() + + done := ctx.Done() + for { + // Priority 1: epoch related protocol events. + select { + case processEvtFn := <-ctl.epochEvents: + err := processEvtFn() + if err != nil { + ctl.log.Err(err).Msgf("fatal error handling epoch related event") + ctx.Throw(err) + } + default: + } + + // Priority 2: OnBlockIncorporated + select { + case <-done: + return + case block := <-ctl.incorporatedBlocks: + err := ctl.processIncorporatedBlock(block) + if err != nil { + ctl.log.Err(err).Msgf("fatal error handling OnBlockIncorporated data") + ctx.Throw(err) + return + } + case processEvtFn := <-ctl.epochEvents: + err := processEvtFn() + if err != nil { + ctl.log.Err(err).Msgf("fatal error handling epoch related event") + ctx.Throw(err) + } + } + } +} + +// processIncorporatedBlock processes `OnBlockIncorporated` events from HotStuff. +// Whenever the view changes, we: +// - updates epoch info, if this is the first observed view of a new epoch +// - compute error terms, compensation function output, and new ProposalTiming +// - compute a new projected epoch end time, assuming an ideal view rate +// +// No errors are expected during normal operation. +func (ctl *BlockTimeController) processIncorporatedBlock(tb TimedBlock) error { + latest := ctl.getProposalTiming() + if tb.Block.View <= latest.ObservationView() { // we don't care about older blocks that are incorporated into the protocol state + return nil + } + + err := ctl.checkForEpochTransition(tb) + if err != nil { + return fmt.Errorf("could not check for epoch transition: %w", err) + } + + err = ctl.measureViewDuration(tb) + if err != nil { + return fmt.Errorf("could not measure view rate: %w", err) + } + return nil +} + +// checkForEpochTransition updates the epochInfo to reflect an epoch transition if curView +// being entered causes a transition to the next epoch. Otherwise, this is a no-op. +// No errors are expected during normal operation. +func (ctl *BlockTimeController) checkForEpochTransition(tb TimedBlock) error { + view := tb.Block.View + if view <= ctl.currentEpochTiming.finalView { // prevalent case: we are still within the current epoch + return nil + } + + // sanity checks, since we are beyond the final view of the most recently processed epoch: + if ctl.nextEpochTiming == nil { // next epoch timing not initialized + return fmt.Errorf("sanity check failed: cannot transition without next epoch timing initialized") + } + if !ctl.currentEpochTiming.isFollowedBy(ctl.nextEpochTiming) { // non consecutive epochs + return fmt.Errorf("sanity check failed: invalid epoch transition current epoch (final view: %d) is not followed by next epoch (first view: %d)", + ctl.currentEpochTiming.finalView, ctl.nextEpochTiming.firstView) + } + if view > ctl.nextEpochTiming.finalView { // the block's view should be within the upcoming epoch + return fmt.Errorf("sanity check failed: curView %d is beyond both current epoch (final view %d) and next epoch (final view %d)", + view, ctl.currentEpochTiming.finalView, ctl.nextEpochTiming.finalView) + } + + ctl.currentEpochTiming = *ctl.nextEpochTiming + ctl.nextEpochTiming = nil + + return nil +} + +// measureViewDuration computes a new measurement of projected epoch switchover time and error for the newly entered view. +// It updates the latest ProposalTiming based on the new error. +// No errors are expected during normal operation. +func (ctl *BlockTimeController) measureViewDuration(tb TimedBlock) error { + view := tb.Block.View + // if the controller is disabled, we don't update measurements and instead use a fallback timing + if !ctl.config.Enabled.Load() { + fallbackDelay := ctl.config.FallbackProposalDelay.Load() + ctl.storeProposalTiming(newFallbackTiming(view, tb.TimeObserved, fallbackDelay)) + ctl.log.Debug(). + Uint64("cur_view", view). + Dur("fallback_proposal_delay", fallbackDelay). + Msg("controller is disabled - using fallback timing") + return nil + } + + previousProposalTiming := ctl.getProposalTiming() + previousPropErr := ctl.proportionalErr.Value() + + // Compute the projected time still needed for the remaining views, assuming that we progress through the remaining views with + // the idealized target view time. + // Note the '+1' term in the computation of `viewDurationsRemaining`. This is related to our convention that the epoch begins + // (happy path) when observing the first block of the epoch. Only by observing this block, the nodes transition to the first + // view of the epoch. Up to that point, the consensus replicas remain in the last view of the previous epoch, in the state of + // "having processed the last block of the old epoch and voted for it" (happy path). Replicas remain in this state until they + // see a confirmation of the view (either QC or TC for the last view of the previous epoch). + // In accordance with this convention, observing the proposal for the last view of an epoch, marks the start of the last view. + // By observing the proposal, nodes enter the last view, verify the block, vote for it, the primary aggregates the votes, + // constructs the child (for first view of new epoch). The last view of the epoch ends, when the child proposal is published. + tau := ctl.currentEpochTiming.targetViewTime() // τ: idealized target view time in units of seconds + viewDurationsRemaining := ctl.currentEpochTiming.finalView + 1 - view // k[v]: views remaining in current epoch + durationRemaining := unix2time(ctl.currentEpochTiming.targetEndTime).Sub(tb.TimeObserved) // Γ[v] = T[v] - t[v], with t[v] ≡ tb.TimeObserved the time when observing the block that triggered the view change + + // Compute instantaneous error term: e[v] = k[v]·τ - T[v] i.e. the projected difference from target switchover + // and update PID controller's error terms. All UNITS in SECOND. + instErr := float64(viewDurationsRemaining)*tau - durationRemaining.Seconds() + propErr := ctl.proportionalErr.AddObservation(instErr) + itgErr := ctl.integralErr.AddObservation(instErr) + drivErr := propErr - previousPropErr + + // controller output u[v] in units of second + u := propErr*ctl.config.KP + itgErr*ctl.config.KI + drivErr*ctl.config.KD + + // compute the controller output for this observation + unconstrainedBlockTime := sec2dur(tau - u) // desired time between parent and child block, in units of seconds + proposalTiming := newHappyPathBlockTime(tb, unconstrainedBlockTime, ctl.config.TimingConfig) + constrainedBlockTime := proposalTiming.ConstrainedBlockTime() + + ctl.log.Debug(). + Uint64("last_observation", previousProposalTiming.ObservationView()). + Dur("duration_since_last_observation", tb.TimeObserved.Sub(previousProposalTiming.ObservationTime())). + Dur("projected_time_remaining", durationRemaining). + Uint64("view_durations_remaining", viewDurationsRemaining). + Float64("inst_err", instErr). + Float64("proportional_err", propErr). + Float64("integral_err", itgErr). + Float64("derivative_err", drivErr). + Dur("controller_output", sec2dur(u)). + Dur("unconstrained_block_time", unconstrainedBlockTime). + Dur("constrained_block_time", constrainedBlockTime). + Msg("measured error upon view change") + + ctl.metrics.PIDError(propErr, itgErr, drivErr) + ctl.metrics.ControllerOutput(sec2dur(u)) + ctl.metrics.TargetProposalDuration(proposalTiming.ConstrainedBlockTime()) + + ctl.storeProposalTiming(proposalTiming) + return nil +} + +// processEpochCommittedPhaseStarted processes the EpochExtended notification, which the Protocol +// State emits when we finalize the first block whose Protocol State further extends the current +// epoch. The next epoch should not be committed so far, because epoch extension are only added +// when there is no subsequent epoch that we could transition into but the current epoch is nearing +// its end. Specifically, we memorize the updated timing information in the BlockTimeController. +// No errors are expected during normal operation. +func (ctl *BlockTimeController) processEpochExtended(first *flow.Header) error { + currentEpoch, err := ctl.state.AtHeight(first.Height).Epochs().Current() + if err != nil { + return fmt.Errorf("could not get current epoch: %w", err) + } + currEpochTimingWithExtension := newEpochTiming(currentEpoch) + + // sanity check: ensure the final view of the current epoch monotonically increases + if currEpochTimingWithExtension.finalView < ctl.currentEpochTiming.finalView { + return fmt.Errorf("final view of epoch must be monotonically increases, but is decreasing from %d to %d", ctl.currentEpochTiming.finalView, currEpochTimingWithExtension.finalView) + } + + if currEpochTimingWithExtension.finalView == ctl.currentEpochTiming.finalView { + return nil + } + + ctl.currentEpochTiming = *currEpochTimingWithExtension + + return nil +} + +// processEpochCommittedPhaseStarted processes the EpochCommittedPhaseStarted notification, which +// the consensus component emits when we finalize the first block of the Epoch Committed phase. +// Specifically, we memorize the next epoch's timing information in the BlockTimeController. +// No errors are expected during normal operation. +func (ctl *BlockTimeController) processEpochCommittedPhaseStarted(first *flow.Header) error { + snapshot := ctl.state.AtHeight(first.Height) + nextEpoch, err := snapshot.Epochs().NextCommitted() + if err != nil { + return fmt.Errorf("could not get next committed epoch: %w", err) + } + ctl.nextEpochTiming = newEpochTiming(nextEpoch) + if !ctl.currentEpochTiming.isFollowedBy(ctl.nextEpochTiming) { + return fmt.Errorf("failed to retrieve the next epoch's timing information: %w", err) + } + return nil +} + +// OnBlockIncorporated listens to notification from HotStuff about incorporating new blocks. +// The event is queued for async processing by the worker. If the channel is full, +// the event is discarded - since we are taking an average it doesn't matter if we +// occasionally miss a sample. +func (ctl *BlockTimeController) OnBlockIncorporated(block *model.Block) { + select { + case ctl.incorporatedBlocks <- TimedBlock{Block: block, TimeObserved: time.Now().UTC()}: + default: + } +} + +// EpochExtended listens to `EpochExtended` protocol notifications. The notification is queued +// for async processing by the worker. We must process _all_ `EpochExtended` notifications. +func (ctl *BlockTimeController) EpochExtended(_ uint64, first *flow.Header, _ flow.EpochExtension) { + ctl.epochEvents <- func() error { + return ctl.processEpochExtended(first) + } +} + +// EpochCommittedPhaseStarted ingests the respective protocol notifications. The notification is +// queued for async processing by the worker. We must process _all_ `EpochCommittedPhaseStarted` notifications. +func (ctl *BlockTimeController) EpochCommittedPhaseStarted(_ uint64, first *flow.Header) { + ctl.epochEvents <- func() error { + return ctl.processEpochCommittedPhaseStarted(first) + } +} + +// time2unix converts a time.Time to UNIX time represented as a uint64. +// Returned timestamp is precise to within one second of input. +func time2unix(t time.Time) uint64 { + return uint64(t.Unix()) +} + +// unix2time converts a UNIX timestamp represented as a uint64 to a time.Time. +func unix2time(unix uint64) time.Time { + return time.Unix(int64(unix), 0) +} + +// sec2dur converts a floating-point number of seconds to a time.Duration. +func sec2dur(sec float64) time.Duration { + return time.Duration(int64(sec * float64(time.Second))) +} diff --git a/consensus/hotstuff/cruisectl/block_time_controller_test.go b/consensus/hotstuff/cruisectl/block_time_controller_test.go new file mode 100644 index 00000000000..5fc053c1f21 --- /dev/null +++ b/consensus/hotstuff/cruisectl/block_time_controller_test.go @@ -0,0 +1,756 @@ +package cruisectl + +import ( + "context" + "fmt" + "math" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" + "go.uber.org/atomic" + + "github.com/onflow/flow-go/consensus/hotstuff/model" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module/irrecoverable" + mockmodule "github.com/onflow/flow-go/module/mock" + "github.com/onflow/flow-go/state/protocol/inmem" + mockprotocol "github.com/onflow/flow-go/state/protocol/mock" + "github.com/onflow/flow-go/utils/unittest" + "github.com/onflow/flow-go/utils/unittest/mocks" +) + +// BlockTimeControllerSuite encapsulates tests for the BlockTimeController. +type BlockTimeControllerSuite struct { + suite.Suite + + initialView uint64 + epochCounter uint64 + curEpochFirstView uint64 + curEpochFinalView uint64 + curEpochTargetDuration uint64 + curEpochTargetEndTime uint64 + epochFallbackTriggered bool + + metrics mockmodule.CruiseCtlMetrics + state mockprotocol.State + params mockprotocol.Params + snapshot mockprotocol.Snapshot + epochs mocks.EpochQuery + curEpoch mockprotocol.CommittedEpoch + epochProtocolState mockprotocol.EpochProtocolState + + config *Config + ctx irrecoverable.SignalerContext + cancel context.CancelFunc + ctl *BlockTimeController +} + +func TestBlockTimeController(t *testing.T) { + suite.Run(t, new(BlockTimeControllerSuite)) +} + +// EpochDurationSeconds returns the number of seconds in the epoch (1hr). +func (bs *BlockTimeControllerSuite) EpochDurationSeconds() uint64 { + return 60 * 60 +} + +// Helper function to allow an initial tick before checking the condition. +func (bs *BlockTimeControllerSuite) EventuallyWithDelay(t require.TestingT, condition func() bool, waitFor time.Duration, tick time.Duration) { + time.Sleep(tick) // initial delay + require.Eventually(t, condition, waitFor, tick) +} + +// SetupTest initializes mocks and default values. +func (bs *BlockTimeControllerSuite) SetupTest() { + bs.config = DefaultConfig() + bs.config.MaxViewDuration = atomic.NewDuration(2 * time.Second) + bs.config.Enabled.Store(true) + bs.initialView = 0 + bs.epochCounter = uint64(0) + bs.curEpochFirstView = uint64(0) + bs.curEpochFinalView = bs.EpochDurationSeconds() - 1 // 1 view/sec for 1hr epoch; term `-1` is needed because view 0 also takes 1 second + bs.curEpochTargetDuration = bs.EpochDurationSeconds() + bs.curEpochTargetEndTime = uint64(time.Now().Unix()) + bs.EpochDurationSeconds() + setupMocks(bs) +} + +func setupMocks(bs *BlockTimeControllerSuite) { + bs.metrics = *mockmodule.NewCruiseCtlMetrics(bs.T()) + bs.metrics.On("PIDError", mock.Anything, mock.Anything, mock.Anything).Maybe() + bs.metrics.On("TargetProposalDuration", mock.Anything).Maybe() + bs.metrics.On("ControllerOutput", mock.Anything).Maybe() + + bs.state = *mockprotocol.NewState(bs.T()) + bs.params = *mockprotocol.NewParams(bs.T()) + bs.snapshot = *mockprotocol.NewSnapshot(bs.T()) + bs.epochs = *mocks.NewEpochQuery(bs.T(), bs.epochCounter) + bs.curEpoch = *mockprotocol.NewCommittedEpoch(bs.T()) + bs.state.On("Final").Return(&bs.snapshot) + bs.state.On("AtHeight", mock.Anything).Return(&bs.snapshot).Maybe() + bs.state.On("Params").Return(&bs.params) + bs.epochProtocolState = *mockprotocol.NewEpochProtocolState(bs.T()) + bs.snapshot.On("EpochProtocolState").Return(&bs.epochProtocolState, nil) + bs.snapshot.On("EpochPhase").Return( + func() flow.EpochPhase { return bs.epochs.Phase() }, + func() error { return nil }) + bs.snapshot.On("Head").Return(unittest.BlockHeaderFixture(unittest.HeaderWithView(bs.initialView+11)), nil).Maybe() + bs.snapshot.On("Epochs").Return(&bs.epochs) + bs.curEpoch.On("Counter").Return(bs.epochCounter) + bs.curEpoch.On("FirstView").Return(bs.curEpochFirstView) + bs.curEpoch.On("FinalView").Return(bs.curEpochFinalView) + bs.curEpoch.On("TargetDuration").Return(bs.curEpochTargetDuration) + bs.curEpoch.On("TargetEndTime").Return(bs.curEpochTargetEndTime) + bs.epochs.AddCommitted(&bs.curEpoch) + + bs.ctx, bs.cancel = irrecoverable.NewMockSignalerContextWithCancel(bs.T(), context.Background()) +} + +// CreateAndStartController creates and starts the BlockTimeController. +// Should be called only once per test case. +func (bs *BlockTimeControllerSuite) CreateAndStartController() { + ctl, err := NewBlockTimeController(unittest.Logger(), &bs.metrics, bs.config, &bs.state, bs.initialView) + require.NoError(bs.T(), err) + bs.ctl = ctl + bs.ctl.Start(bs.ctx) + unittest.RequireCloseBefore(bs.T(), bs.ctl.Ready(), time.Second, "component did not start") +} + +// StopController stops the BlockTimeController. +func (bs *BlockTimeControllerSuite) StopController() { + bs.cancel() + unittest.RequireCloseBefore(bs.T(), bs.ctl.Done(), time.Second, "component did not stop") +} + +// AssertCorrectInitialization checks that the controller is configured as expected after construction. +func (bs *BlockTimeControllerSuite) AssertCorrectInitialization() { + // at initialization, controller should be set up to release blocks without delay + controllerTiming := bs.ctl.getProposalTiming() + now := time.Now().UTC() + + if !bs.ctl.config.Enabled.Load() { + // if controller is disabled, it should use fallback timing + assert.Equal(bs.T(), now.Add(bs.ctl.config.FallbackProposalDelay.Load()), controllerTiming.TargetPublicationTime(7, now, unittest.IdentifierFixture())) + } else { + // otherwise should publish immediately + assert.Equal(bs.T(), now, controllerTiming.TargetPublicationTime(7, now, unittest.IdentifierFixture())) + } + + // should initialize the current epoch timing + epoch := bs.ctl.currentEpochTiming + assert.Equal(bs.T(), bs.curEpochFirstView, epoch.firstView) + assert.Equal(bs.T(), bs.curEpochFinalView, epoch.finalView) + assert.Equal(bs.T(), bs.curEpochTargetDuration, epoch.targetDuration) + assert.Equal(bs.T(), bs.curEpochTargetEndTime, epoch.targetEndTime) + + // if next epoch is committed, final view should be set + if phase := bs.epochs.Phase(); phase == flow.EpochPhaseCommitted { + nextEpoch, err := bs.epochs.NextCommitted() + require.NoError(bs.T(), err) + finalView := nextEpoch.FinalView() + require.NotNil(bs.T(), bs.ctl.nextEpochTiming) + assert.Equal(bs.T(), finalView, bs.ctl.nextEpochTiming.finalView) + } else { + assert.Nil(bs.T(), bs.ctl.nextEpochTiming) + } + + // should create an initial measurement + assert.Equal(bs.T(), bs.initialView, controllerTiming.ObservationView()) + assert.WithinDuration(bs.T(), time.Now(), controllerTiming.ObservationTime(), time.Minute) + // errors should be initialized to zero + assert.Equal(bs.T(), float64(0), bs.ctl.proportionalErr.Value()) + assert.Equal(bs.T(), float64(0), bs.ctl.integralErr.Value()) +} + +// SanityCheckSubsequentMeasurements checks that two consecutive states of the BlockTimeController are different or equal and +// broadly reasonable. It does not assert exact values, because part of the measurements depend on timing in the worker. +func (bs *BlockTimeControllerSuite) SanityCheckSubsequentMeasurements(d1, d2 *controllerStateDigest, expectedEqual bool) { + if expectedEqual { + // later input should have left state invariant, including the Observation + assert.Equal(bs.T(), d1.latestProposalTiming.ObservationTime(), d2.latestProposalTiming.ObservationTime()) + assert.Equal(bs.T(), d1.latestProposalTiming.ObservationView(), d2.latestProposalTiming.ObservationView()) + // new measurement should have same error + assert.Equal(bs.T(), d1.proportionalErr.Value(), d2.proportionalErr.Value()) + assert.Equal(bs.T(), d1.integralErr.Value(), d2.integralErr.Value()) + } else { + // later input should have caused a new Observation to be recorded + assert.True(bs.T(), d1.latestProposalTiming.ObservationTime().Before(d2.latestProposalTiming.ObservationTime())) + // new measurement should have different error + assert.NotEqual(bs.T(), d1.proportionalErr.Value(), d2.proportionalErr.Value()) + assert.NotEqual(bs.T(), d1.integralErr.Value(), d2.integralErr.Value()) + } +} + +// PrintMeasurement prints the current state of the controller and the last measurement. +func (bs *BlockTimeControllerSuite) PrintMeasurement(parentBlockId flow.Identifier) { + ctl := bs.ctl + m := ctl.getProposalTiming() + tpt := m.TargetPublicationTime(m.ObservationView()+1, m.ObservationTime(), parentBlockId) + fmt.Printf("v=%d\tt=%s\tPD=%s\te_N=%.3f\tI_M=%.3f\n", + m.ObservationView(), m.ObservationTime(), tpt.Sub(m.ObservationTime()), + ctl.proportionalErr.Value(), ctl.integralErr.Value()) +} + +// TestStartStop tests that the component can be started and stopped gracefully. +func (bs *BlockTimeControllerSuite) TestStartStop() { + bs.CreateAndStartController() + bs.StopController() +} + +// TestInit_EpochStakingPhase tests initializing the component in the EpochStaking phase. +// Measurement and epoch info should be initialized, next epoch final view should be nil. +func (bs *BlockTimeControllerSuite) TestInit_EpochStakingPhase() { + bs.CreateAndStartController() + defer bs.StopController() + bs.AssertCorrectInitialization() +} + +// TestInit_EpochStakingPhase tests initializing the component in the EpochSetup phase. +// Measurement and epoch info should be initialized, next epoch final view should be set. +func (bs *BlockTimeControllerSuite) TestInit_EpochSetupPhase() { + nextEpoch := mockprotocol.NewCommittedEpoch(bs.T()) + nextEpoch.On("Counter").Return(bs.epochCounter + 1) + nextEpoch.On("FirstView").Return(bs.curEpochFinalView + 1) + nextEpoch.On("FinalView").Return(bs.curEpochFinalView * 2) + nextEpoch.On("TargetDuration").Return(bs.EpochDurationSeconds()) + nextEpoch.On("TargetEndTime").Return(bs.curEpochTargetEndTime + bs.EpochDurationSeconds()) + bs.epochs.AddCommitted(nextEpoch) + + bs.CreateAndStartController() + defer bs.StopController() + bs.AssertCorrectInitialization() +} + +// TestOnEpochExtended ensures that the epoch configuration is updated when EpochExtended events are processed. +func (bs *BlockTimeControllerSuite) TestOnEpochExtended() { + bs.CreateAndStartController() + defer bs.StopController() + + // create setup epoch fixture with extensions and transition into it + setupFixture := unittest.EpochSetupFixture() + setupFixture.Counter = bs.epochCounter + 1 + setupFixture.FirstView = bs.curEpochFinalView + 1 + setupFixture.FinalView = bs.curEpochFinalView * 2 + + extension := flow.EpochExtension{ + FirstView: setupFixture.FirstView, + FinalView: setupFixture.FinalView, + } + commitFixture := unittest.EpochCommitFixture() + + epoch := inmem.NewCommittedEpoch(setupFixture, commitFixture, []flow.EpochExtension{extension}) + bs.epochs.AddCommitted(epoch) + bs.epochs.Transition() + + header := unittest.BlockHeaderFixture() + bs.state.On("AtHeight", header.Height).Return(&bs.snapshot).Once() + + bs.ctl.EpochExtended(bs.epochCounter, header, extension) + + // Check component state after the epochEvents channel is empty, indicating the event has been processed. + bs.EventuallyWithDelay(bs.T(), func() bool { + return len(bs.ctl.epochEvents) == 0 + }, time.Second, 10*time.Millisecond) + + currentEpoch, err := bs.snapshot.Epochs().Current() + require.NoError(bs.T(), err) + extensionTargetTime := currentEpoch.TargetEndTime() + extensionTargetDuration := currentEpoch.TargetDuration() + extensionFinalView := currentEpoch.FinalView() + + assert.Equal(bs.T(), extensionTargetTime, bs.ctl.currentEpochTiming.targetEndTime) + assert.Equal(bs.T(), extensionTargetDuration, bs.ctl.currentEpochTiming.targetDuration) + assert.Equal(bs.T(), extensionFinalView, bs.ctl.currentEpochTiming.finalView) + + // duplicate events should be no-ops + for i := 0; i <= cap(bs.ctl.epochEvents); i++ { + bs.ctl.EpochExtended(bs.epochCounter, header, extension) + } + require.Eventually(bs.T(), func() bool { + return len(bs.ctl.epochEvents) == 0 + }, time.Second, time.Millisecond) + + assert.Equal(bs.T(), extensionTargetTime, bs.ctl.currentEpochTiming.targetEndTime) + assert.Equal(bs.T(), extensionTargetDuration, bs.ctl.currentEpochTiming.targetDuration) + assert.Equal(bs.T(), extensionFinalView, bs.ctl.currentEpochTiming.finalView) +} + +// TestOnEpochCommittedPhaseStarted ensures that the epoch info is updated when the next epoch is committed. +func (bs *BlockTimeControllerSuite) TestOnEpochCommittedPhaseStarted() { + nextEpoch := mockprotocol.NewCommittedEpoch(bs.T()) + nextEpoch.On("Counter").Return(bs.epochCounter + 1) + nextEpoch.On("FinalView").Return(bs.curEpochFinalView * 2) + nextEpoch.On("FirstView").Return(bs.curEpochFinalView + 1) + nextEpoch.On("TargetDuration").Return(bs.EpochDurationSeconds()) + nextEpoch.On("TargetEndTime").Return(bs.curEpochTargetEndTime + bs.EpochDurationSeconds()) + bs.epochs.AddCommitted(nextEpoch) + bs.CreateAndStartController() + defer bs.StopController() + header := unittest.BlockHeaderFixture() + bs.ctl.EpochCommittedPhaseStarted(bs.epochCounter, header) + require.Eventually(bs.T(), func() bool { + return bs.ctl.nextEpochTiming != nil + }, time.Second, time.Millisecond) + + assert.Equal(bs.T(), bs.curEpochFinalView+1, bs.ctl.nextEpochTiming.firstView) + assert.Equal(bs.T(), bs.curEpochFinalView*2, bs.ctl.nextEpochTiming.finalView) + assert.Equal(bs.T(), bs.curEpochTargetEndTime+bs.EpochDurationSeconds(), bs.ctl.nextEpochTiming.targetEndTime) + assert.Equal(bs.T(), bs.EpochDurationSeconds(), bs.ctl.nextEpochTiming.targetDuration) + + // duplicate events should be no-ops + for i := 0; i <= cap(bs.ctl.epochEvents); i++ { + bs.ctl.EpochCommittedPhaseStarted(bs.epochCounter, header) + } + + require.Eventually(bs.T(), func() bool { + return len(bs.ctl.epochEvents) == 0 + }, time.Second, time.Millisecond) + + assert.Equal(bs.T(), bs.curEpochFinalView+1, bs.ctl.nextEpochTiming.firstView) + assert.Equal(bs.T(), bs.curEpochFinalView*2, bs.ctl.nextEpochTiming.finalView) + assert.Equal(bs.T(), bs.curEpochTargetEndTime+bs.EpochDurationSeconds(), bs.ctl.nextEpochTiming.targetEndTime) + assert.Equal(bs.T(), bs.EpochDurationSeconds(), bs.ctl.nextEpochTiming.targetDuration) +} + +// TestOnBlockIncorporated_UpdateProposalDelay tests that a new measurement is taken and +// GetProposalTiming updated upon receiving an OnBlockIncorporated event. +func (bs *BlockTimeControllerSuite) TestOnBlockIncorporated_UpdateProposalDelay() { + bs.CreateAndStartController() + defer bs.StopController() + + initialControllerState := captureControllerStateDigest(bs.ctl) // copy initial controller state + initialProposalDelay := bs.ctl.getProposalTiming() + block := model.BlockFromFlow(unittest.BlockHeaderFixture(unittest.HeaderWithView(bs.initialView + 1))) + bs.ctl.OnBlockIncorporated(block) + require.Eventually(bs.T(), func() bool { + return bs.ctl.getProposalTiming().ObservationView() > bs.initialView + }, time.Second, time.Millisecond) + nextControllerState := captureControllerStateDigest(bs.ctl) + nextProposalDelay := bs.ctl.getProposalTiming() + + bs.SanityCheckSubsequentMeasurements(initialControllerState, nextControllerState, false) + // new measurement should update GetProposalTiming + now := time.Now().UTC() + assert.NotEqual(bs.T(), + initialProposalDelay.TargetPublicationTime(bs.initialView+2, now, unittest.IdentifierFixture()), + nextProposalDelay.TargetPublicationTime(bs.initialView+2, now, block.BlockID)) + + // duplicate events should be no-ops + for i := 0; i <= cap(bs.ctl.incorporatedBlocks); i++ { + bs.ctl.OnBlockIncorporated(block) + } + // wait for the channel to drain, since OnBlockIncorporated doesn't block on sending + require.Eventually(bs.T(), func() bool { + return len(bs.ctl.incorporatedBlocks) == 0 + }, time.Second, time.Millisecond) + + // state should be unchanged + finalControllerState := captureControllerStateDigest(bs.ctl) + bs.SanityCheckSubsequentMeasurements(nextControllerState, finalControllerState, true) + assert.Equal(bs.T(), nextProposalDelay, bs.ctl.getProposalTiming()) +} + +// TestEnableDisable tests that the controller responds to enabling and disabling. +func (bs *BlockTimeControllerSuite) TestEnableDisable() { + // start in a disabled state + err := bs.config.SetEnabled(false) + require.NoError(bs.T(), err) + bs.CreateAndStartController() + defer bs.StopController() + + now := time.Now() + + initialControllerState := captureControllerStateDigest(bs.ctl) + initialProposalDelay := bs.ctl.getProposalTiming() + // the initial proposal timing should use fallback timing + assert.Equal(bs.T(), now.Add(bs.ctl.config.FallbackProposalDelay.Load()), initialProposalDelay.TargetPublicationTime(bs.initialView+1, now, unittest.IdentifierFixture())) + + block := model.BlockFromFlow(unittest.BlockHeaderFixture(unittest.HeaderWithView(bs.initialView + 1))) + bs.ctl.OnBlockIncorporated(block) + require.Eventually(bs.T(), func() bool { + return bs.ctl.getProposalTiming().ObservationView() > bs.initialView + }, time.Second, time.Millisecond) + secondProposalDelay := bs.ctl.getProposalTiming() + + // new measurement should not change GetProposalTiming + assert.Equal(bs.T(), + initialProposalDelay.TargetPublicationTime(bs.initialView+2, now, unittest.IdentifierFixture()), + secondProposalDelay.TargetPublicationTime(bs.initialView+2, now, block.BlockID)) + + // now, enable the controller + err = bs.ctl.config.SetEnabled(true) + require.NoError(bs.T(), err) + + // send another block + block = model.BlockFromFlow(unittest.BlockHeaderFixture(unittest.HeaderWithView(bs.initialView + 2))) + bs.ctl.OnBlockIncorporated(block) + + bs.EventuallyWithDelay(bs.T(), func() bool { + return bs.ctl.getProposalTiming().ObservationView() > bs.initialView + }, time.Second, 10*time.Millisecond) + + thirdControllerState := captureControllerStateDigest(bs.ctl) + thirdProposalDelay := bs.ctl.getProposalTiming() + + // new measurement should change GetProposalTiming + bs.SanityCheckSubsequentMeasurements(initialControllerState, thirdControllerState, false) + assert.NotEqual(bs.T(), + initialProposalDelay.TargetPublicationTime(bs.initialView+3, now, unittest.IdentifierFixture()), + thirdProposalDelay.TargetPublicationTime(bs.initialView+3, now, block.BlockID)) + +} + +// TestOnBlockIncorporated_EpochTransition_Enabled tests epoch transition with controller enabled. +func (bs *BlockTimeControllerSuite) TestOnBlockIncorporated_EpochTransition_Enabled() { + err := bs.ctl.config.SetEnabled(true) + require.NoError(bs.T(), err) + bs.testOnBlockIncorporated_EpochTransition() +} + +// TestOnBlockIncorporated_EpochTransition_Disabled tests epoch transition with controller disabled. +func (bs *BlockTimeControllerSuite) TestOnBlockIncorporated_EpochTransition_Disabled() { + err := bs.ctl.config.SetEnabled(false) + require.NoError(bs.T(), err) + bs.testOnBlockIncorporated_EpochTransition() +} + +// testOnBlockIncorporated_EpochTransition tests that a view change into the next epoch +// updates the local state to reflect the new epoch. +func (bs *BlockTimeControllerSuite) testOnBlockIncorporated_EpochTransition() { + nextEpoch := mockprotocol.NewCommittedEpoch(bs.T()) + nextEpoch.On("Counter").Return(bs.epochCounter + 1) + nextEpoch.On("FinalView").Return(bs.curEpochFinalView * 2) + nextEpoch.On("FirstView").Return(bs.curEpochFinalView + 1) + nextEpoch.On("TargetDuration").Return(bs.EpochDurationSeconds()) // 1s/view + nextEpoch.On("TargetEndTime").Return(bs.curEpochTargetEndTime + bs.EpochDurationSeconds()) + bs.epochs.AddCommitted(nextEpoch) + bs.CreateAndStartController() + defer bs.StopController() + + initialControllerState := captureControllerStateDigest(bs.ctl) + bs.epochs.Transition() + timedBlock := makeTimedBlock(bs.curEpochFinalView+1, unittest.IdentifierFixture(), time.Now().UTC()) + err := bs.ctl.processIncorporatedBlock(timedBlock) + require.True(bs.T(), bs.ctl.getProposalTiming().ObservationView() > bs.initialView) + require.NoError(bs.T(), err) + nextControllerState := captureControllerStateDigest(bs.ctl) + + bs.SanityCheckSubsequentMeasurements(initialControllerState, nextControllerState, false) + // epoch boundaries should be updated + assert.Equal(bs.T(), bs.curEpochFinalView+1, bs.ctl.currentEpochTiming.firstView) + assert.Equal(bs.T(), bs.ctl.currentEpochTiming.finalView, bs.curEpochFinalView*2) + assert.Equal(bs.T(), bs.ctl.currentEpochTiming.targetEndTime, bs.curEpochTargetEndTime+bs.EpochDurationSeconds()) + assert.Nil(bs.T(), bs.ctl.nextEpochTiming) +} + +// TestProposalDelay_AfterTargetTransitionTime tests the behaviour of the controller +// when we have passed the target end time for the current epoch. +// We should approach the min GetProposalTiming (increase view rate as much as possible) +func (bs *BlockTimeControllerSuite) TestProposalDelay_AfterTargetTransitionTime() { + // we are near the end of the epoch in view terms + bs.initialView = uint64(float64(bs.curEpochFinalView) * .95) + bs.CreateAndStartController() + defer bs.StopController() + + lastProposalDelay := float64(bs.EpochDurationSeconds()) // start with large dummy value + for view := bs.initialView + 1; view < bs.ctl.currentEpochTiming.finalView; view++ { + // we have passed the target end time of the epoch + receivedParentBlockAt := unix2time(bs.ctl.currentEpochTiming.targetEndTime + view) + timedBlock := makeTimedBlock(view, unittest.IdentifierFixture(), receivedParentBlockAt) + err := bs.ctl.measureViewDuration(timedBlock) + require.NoError(bs.T(), err) + + // compute proposal delay: + pubTime := bs.ctl.getProposalTiming().TargetPublicationTime(view+1, time.Now().UTC(), timedBlock.Block.BlockID) // simulate building a child of `timedBlock` + delay := pubTime.Sub(receivedParentBlockAt) + + assert.LessOrEqual(bs.T(), delay.Seconds(), lastProposalDelay) + lastProposalDelay = delay.Seconds() + + // transition views until the end of the epoch, or for 100 views + if view-bs.initialView >= 100 { + break + } + } +} + +// TestProposalDelay_BehindSchedule tests the behaviour of the controller when the +// projected epoch switchover is LATER than the target switchover time, i.e. +// we are behind schedule. +// We should respond by lowering the GetProposalTiming (increasing view rate) +func (bs *BlockTimeControllerSuite) TestProposalDelay_BehindSchedule() { + // we are 50% of the way through the epoch in view terms + bs.initialView = uint64(float64(bs.curEpochFinalView) * .5) + bs.CreateAndStartController() + defer bs.StopController() + + lastProposalDelay := float64(bs.EpochDurationSeconds()) // start with large dummy value + idealEnteredViewTime := unix2time(bs.ctl.currentEpochTiming.targetEndTime - (bs.EpochDurationSeconds() / 2)) + + // 1s behind of schedule + receivedParentBlockAt := idealEnteredViewTime.Add(time.Second) + for view := bs.initialView + 1; view < bs.ctl.currentEpochTiming.finalView; view++ { + // hold the instantaneous error constant for each view + receivedParentBlockAt = receivedParentBlockAt.Add(sec2dur(bs.ctl.currentEpochTiming.targetViewTime())) + timedBlock := makeTimedBlock(view, unittest.IdentifierFixture(), receivedParentBlockAt) + err := bs.ctl.measureViewDuration(timedBlock) + require.NoError(bs.T(), err) + + // compute proposal delay: + pubTime := bs.ctl.getProposalTiming().TargetPublicationTime(view+1, time.Now().UTC(), timedBlock.Block.BlockID) // simulate building a child of `timedBlock` + delay := pubTime.Sub(receivedParentBlockAt) + // expecting decreasing GetProposalTiming + assert.LessOrEqual(bs.T(), delay.Seconds(), lastProposalDelay, "got non-decreasing delay on view %d (initial view: %d)", view, bs.initialView) + lastProposalDelay = delay.Seconds() + + // transition views until the end of the epoch, or for 100 views + if view-bs.initialView >= 100 { + break + } + } +} + +// TestProposalDelay_AheadOfSchedule tests the behaviour of the controller when the +// projected epoch switchover is EARLIER than the target switchover time, i.e. +// we are ahead of schedule. +// We should respond by increasing the GetProposalTiming (lowering view rate) +func (bs *BlockTimeControllerSuite) TestProposalDelay_AheadOfSchedule() { + // we are 50% of the way through the epoch in view terms + bs.initialView = uint64(float64(bs.curEpochFinalView) * .5) + bs.CreateAndStartController() + defer bs.StopController() + + lastProposalDelay := time.Duration(0) // start with large dummy value + idealEnteredViewTime := bs.ctl.currentEpochTiming.targetEndTime - (bs.EpochDurationSeconds() / 2) + // 1s ahead of schedule + receivedParentBlockAt := idealEnteredViewTime - 1 + for view := bs.initialView + 1; view < bs.ctl.currentEpochTiming.finalView; view++ { + // hold the instantaneous error constant for each view + receivedParentBlockAt = receivedParentBlockAt + uint64(bs.ctl.currentEpochTiming.targetViewTime()) + timedBlock := makeTimedBlock(view, unittest.IdentifierFixture(), unix2time(receivedParentBlockAt)) + err := bs.ctl.measureViewDuration(timedBlock) + require.NoError(bs.T(), err) + + // compute proposal delay: + pubTime := bs.ctl.getProposalTiming().TargetPublicationTime(view+1, time.Now().UTC(), timedBlock.Block.BlockID) // simulate building a child of `timedBlock` + delay := pubTime.Sub(unix2time(receivedParentBlockAt)) + + // expecting increasing GetProposalTiming + assert.GreaterOrEqual(bs.T(), delay, lastProposalDelay) + lastProposalDelay = delay + + // transition views until the end of the epoch, or for 100 views + if view-bs.initialView >= 100 { + break + } + } +} + +// TestMetricsWhenObservingBlock tests that when observing a new block the correct metrics are tracked. +func (bs *BlockTimeControllerSuite) TestMetricsWhenObservingBlock() { + bs.metrics = *mockmodule.NewCruiseCtlMetrics(bs.T()) + // should set metrics upon initialization + bs.metrics.On("PIDError", float64(0), float64(0), float64(0)).Once() + bs.metrics.On("TargetProposalDuration", time.Duration(0)).Once() + bs.metrics.On("ControllerOutput", time.Duration(0)).Once() + bs.CreateAndStartController() + defer bs.StopController() + bs.metrics.AssertExpectations(bs.T()) + + // we are at view 1 of the epoch, but the time is suddenly the target end time + enteredViewAt := bs.ctl.currentEpochTiming.targetEndTime + view := bs.initialView + 1 + // we should observe a large error + bs.metrics.On("PIDError", mock.Anything, mock.Anything, mock.Anything).Run(func(args mock.Arguments) { + p := args[0].(float64) + i := args[1].(float64) + d := args[2].(float64) + assert.Greater(bs.T(), p, float64(0)) + assert.Greater(bs.T(), i, float64(0)) + assert.Greater(bs.T(), d, float64(0)) + }).Once() + // should immediately use min proposal duration + bs.metrics.On("TargetProposalDuration", bs.config.MinViewDuration.Load()).Once() + // should have a large negative controller output + bs.metrics.On("ControllerOutput", mock.Anything).Run(func(args mock.Arguments) { + output := args[0].(time.Duration) + assert.Greater(bs.T(), output, time.Duration(0)) + }).Once() + + timedBlock := makeTimedBlock(view, unittest.IdentifierFixture(), unix2time(enteredViewAt)) + err := bs.ctl.measureViewDuration(timedBlock) + require.NoError(bs.T(), err) +} + +// TestMetrics_TargetPublicationTime tests that when recalling the `TargetPublicationTime`, +// the controller metrics capture the corresponding publication _delay_. +func (bs *BlockTimeControllerSuite) TestMetrics_TargetPublicationTime() { + // epoch's first view is 0, it has 3600 views in total, with each view targeted to be 1s long + bs.initialView = 7 // controller is initialized for view 7 of Epoch + epochTargetStartTime := uint64(time.Now().Unix()) - 7 // this is in the range (-8s,-7s] before 'now', because we truncate fractions of a second + bs.curEpochTargetEndTime = epochTargetStartTime + bs.EpochDurationSeconds() + setupMocks(bs) + + // In the tests, the controller starts at the _beginning_ of view 7, i.e. 7 views (0, 1, ...,6) have concluded + bs.CreateAndStartController() + defer bs.StopController() + // Sanity check that controller's notion of target view time is 1 second. Otherwise, our test is inconsistent. + assert.Equal(bs.T(), bs.ctl.currentEpochTiming.targetViewTime(), 1.0) + + // Without any context, the controller should default to "publish immediately". Hence, when + // we recall `TargetPublicationTime`, the delay relative to 'now' should be very close to zero. + bs.metrics.On("ProposalPublicationDelay", inProximityOf(0, 50*time.Millisecond)).Once() + bs.ctl.TargetPublicationTime(0, time.Now(), flow.ZeroID) + + // We assume block 7 arrives with perfect timing. When observing the proposal for view 10, the controller should have + // computed its first proper ProposalTiming. + timedBlock := makeTimedBlock(bs.initialView, unittest.IdentifierFixture(), unix2time(epochTargetStartTime).Add(7*time.Second)) + assert.NoError(bs.T(), bs.ctl.measureViewDuration(timedBlock)) + // As all the timing is perfect so far from the controller's point of view (mathematically, all errors are zero) + // the target publication time for the subsequent view 8 (=bs.initialView + 1) should be exactly epochTargetStartTime + 8s, + // because in total 8 views (0,..,7) have already concluded before. + expectedPublicationTime2 := unix2time(epochTargetStartTime).Add(8 * time.Second) + + // Assuming this node is primary in view 8 (=bs.initialView + 1). It has just finished constructing its proposal + // and is computing when to publish it. + now := time.Now() + expectedDelay := expectedPublicationTime2.Sub(now) // approximate value that the metrics-component should capture as delay + bs.metrics.On("ProposalPublicationDelay", inProximityOf(expectedDelay, 100*time.Millisecond)).Once() + bs.ctl.TargetPublicationTime(bs.initialView+1, now, timedBlock.Block.BlockID) +} + +// Test_vs_PythonSimulation performs a regression test. We implemented the controller in python +// together with a statistical model for the view duration. We used the python implementation to tune +// the PID controller parameters which we are using here. +// In this test, we feed values pre-generated with the python simulation into the Go implementation +// and compare the outputs to the pre-generated outputs from the python controller implementation. +func (bs *BlockTimeControllerSuite) Test_vs_PythonSimulation() { + // PART 1: setup system to mirror python simulation + // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + refT := time.Now().UTC() + refT = time.Date(refT.Year(), refT.Month(), refT.Day(), refT.Hour(), refT.Minute(), 0, 0, time.UTC) // truncate to past minute + + totalEpochViews := 483000 + bs.initialView = 0 + bs.curEpochFirstView, bs.curEpochFinalView = uint64(0), uint64(totalEpochViews-1) // views [0, .., totalEpochViews-1] + bs.curEpochTargetDuration = 7 * 24 * 60 * 60 // 1 week in seconds + bs.curEpochTargetEndTime = time2unix(refT) + bs.curEpochTargetDuration // now + 1 week + + bs.config = &Config{ + TimingConfig: TimingConfig{ + FallbackProposalDelay: atomic.NewDuration(500 * time.Millisecond), // irrelevant for this test, as controller should never enter fallback mode + MinViewDuration: atomic.NewDuration(470 * time.Millisecond), + MaxViewDuration: atomic.NewDuration(2010 * time.Millisecond), + Enabled: atomic.NewBool(true), + }, + ControllerParams: ControllerParams{KP: 2.0, KI: 0.06, KD: 3.0, N_ewma: 5, N_itg: 50}, + } + + setupMocks(bs) + bs.CreateAndStartController() + defer bs.StopController() + + // PART 2: timing generated from python simulation and corresponding controller response + // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + ref := struct { + // targetViewTime is the idealized view duration of a perfect system. + // In Python simulation, this is the array `EpochSimulation.ideal_view_time` + targetViewTime float64 // units: seconds + + // observedMinViewTimes[i] is the minimal time required time to execute the protocol for view i + // - Duration from the primary observing the parent block (indexed by i) to having its child proposal (block for view i+1) ready for publication. + // - This is the minimal time required to execute the protocol. Nodes can only delay their proposal but not progress any faster. + // - in Python simulation, this is the array `EpochSimulation.min_view_times + EpochSimulation.observation_noise` + // with is returned by function `EpochSimulation.current_view_observation()` + // Note that this is generally different than the time it takes the committee as a whole to transition + // through views. This is because the primary changes from view to view, and nodes observe blocks at slightly + // different times (small noise term). The real world (as well as the simulation) depend on collective swarm + // behaviour of the consensus committee, which is not observable by nodes individually. + // In contrast, our `observedMinViewTimes` here contains an additional noise term, to emulate + // the observations of a node in the real world. + observedMinViewTimes []float64 // units: seconds + + // controllerTargetedViewDuration[i] is the duration targeted by the Python controller : + // - measured from observing the parent until publishing the child block for view i+1 + controllerTargetedViewDuration []float64 // units: seconds + + // realWorldViewDuration[i] is the duration of the ith view for the entire committee. + // This value occurs in response to the controller output and is not observable by nodes individually. + // - in Python simulation, this is the array `EpochSimulation.real_world_view_duration` + // with is recorded by the environment upon the call of `EpochSimulation.delay()` + realWorldViewDuration []float64 // units: seconds + }{ + targetViewTime: 1.2521739130434784, + observedMinViewTimes: []float64{0.8139115907362099, 0.7093851608587579, 0.7370057913407495, 0.8378050305605419, 0.8221876685439506, 0.8129097289534515, 0.7835810854212116, 0.7419219104134447, 0.7122331139614623, 0.7263645183403751, 1.2481399484109290, 0.8741906105412369, 0.7082127929564489, 0.8175969272012624, 0.8040687048886446, 0.8163336940928989, 0.6354390018677689, 1.0568897015119771, 0.8283653995502240, 0.8649826738831023, 0.7249163864295024, 0.6572694879104934, 0.8796994117267707, 0.8251533370085626, 0.8383599333817994, 0.7561765091071196, 1.4239532706257330, 2.3848404271162811, 0.6997792104740760, 0.6783155065018911, 0.7397146999404549, 0.7568604144415827, 0.8224399309953295, 0.8635091458596464, 0.6292564656694590, 0.6399775559845721, 0.7551854294536755, 0.7493031513209824, 0.7916989850940226, 0.8584875376770561, 0.5733027665412744, 0.8190610271623866, 0.6664088123579012, 0.6856899641942998, 0.8235905136098289, 0.7673984464333541, 0.7514768668170753, 0.7145945518569533, 0.8076879859786521, 0.6890844388873341, 0.7782307638665685, 1.0031597171903470, 0.8056874789572074, 1.1894678554682030, 0.7751504335630999, 0.6598342159237116, 0.7198783916113262, 0.7231184452829420, 0.7291287772166142, 0.8941150065282033, 0.8216597987064465, 0.7074775436893693, 0.7886375844003763, 0.8028714839193359, 0.6473851384702657, 0.8247230728633490, 0.8268367270238434, 0.7776181863431995, 1.2870341252966155, 0.9022036087098005, 0.8608476621564736, 0.7448392402085238, 0.7030664985775897, 0.7343372879803260, 0.8501776646839836, 0.7949969493471933, 0.7030853022640485, 0.8506339844198412, 0.8520038195041865, 1.2159232403369129, 0.9501009619276108, 0.7063032843664507, 0.7676066345629766, 0.8050982844953996, 0.7460373897798731, 0.7531147127154058, 0.8276552672727131, 0.6777639708691676, 0.7759833549063068, 0.8861636486602165, 0.8272606701022402, 0.6742194284453155, 0.8270012408910985, 1.0799793512385585, 0.8343711941947437, 0.6424938240651709, 0.8314721058034046, 0.8687591599744876, 0.7681132139163648, 0.7993270549538212}, + realWorldViewDuration: []float64{1.2707067231074189, 1.3797713099533957, 1.1803368837187869, 1.0710943548975358, 1.3055277182347431, 1.3142312827952587, 1.2748087784689972, 1.2580713757160862, 1.2389594986278398, 1.2839951451881206, 0.8404551372521588, 1.7402295383244093, 1.2486807727203340, 1.1529076722170450, 1.2303564416007062, 1.1919067015405667, 1.4317417513319299, 0.8851802701506968, 1.4621618954558588, 1.2629599000198048, 1.3845528649513363, 1.3083813148510797, 1.0320875660949032, 1.2138806234836066, 1.2922205615230111, 1.3530469860253094, 1.5124780338765653, 2.4800000000000000, 0.8339877775027843, 0.7270580752471872, 0.8013511652567021, 0.7489973886099706, 0.9647668631144197, 1.4406086304771719, 1.6376005221775904, 1.3686144679115566, 1.2051140074616571, 1.2232170397428770, 1.1785015757024468, 1.2720488631325702, 1.4845607775546621, 1.0038608184511295, 1.4011693227324362, 1.2782420466946043, 1.0808595015305793, 1.2923716723984215, 1.2876404222029678, 1.3024029638718018, 1.1243308902566644, 1.3825311808461356, 1.1826028495527394, 1.0753560400260920, 1.4587594729770430, 1.3281281084314180, 1.1987898717701806, 1.3212567274973721, 1.2131355949220173, 1.2202213287069972, 1.2345177139086974, 1.1415707241388824, 1.2618615652263814, 1.3978228798726429, 1.1676202853133009, 1.2821402577607839, 1.4378331263208257, 1.0764974304705950, 1.1968636840861584, 1.3079197545950789, 1.3246769344178762, 1.0956265919521080, 1.3056225547363036, 1.3094504040915045, 1.2916519124885637, 1.2995343661957905, 1.0839793112463321, 1.2515453598485311, 1.3907042923175941, 1.1137329234266407, 1.2293962485228747, 1.4537855131563087, 1.1564260868809058, 1.2616419368628695, 1.1777963280146100, 1.2782540498222059, 1.2617698479511545, 1.2911000941099631, 1.1719344274281953, 1.3904853415093545, 1.1612440756337188, 1.1800751870755894, 1.2653752924717137, 1.3987404424771417, 1.1573292016433725, 1.2132227320045601, 1.2835627159341194, 1.3950341330597937, 1.0774862045842490, 1.2361956384863142, 1.3415505497959577, 1.1881870996394799}, + controllerTargetedViewDuration: []float64{1.2521739130434784, 1.2325291342837938, 1.0924796023620962, 1.1315714628442570, 1.3109201861848649, 1.2904005140483341, 1.2408200617376139, 1.2143186827596988, 1.2001258197216824, 1.2059386524427240, 1.1687014183641575, 1.5938588248347272, 1.1735049856838198, 1.1322996720968055, 1.2010702989934061, 1.2193620268012733, 1.2847380812524840, 1.1111384877632171, 1.4676632072726421, 1.3127404884038874, 1.3036822199799039, 1.1627828776831781, 1.0686746584877680, 1.2585854668086294, 1.3196479113378341, 1.3040688380370420, 1.2092520716891777, 0.9174437864843878, 0.4700000000000000, 0.4700000000000000, 0.4700000000000000, 0.4700000000000000, 0.9677983536241768, 1.4594930877396231, 1.4883132720086421, 1.2213393879261234, 1.1167787676139602, 1.1527862655996910, 1.1844688515164143, 1.2712560882996764, 1.2769188516898307, 1.0483030535756364, 1.2667785513482170, 1.1360673946540731, 1.0930571503977162, 1.2553993593963664, 1.2412509734564154, 1.2173708810202102, 1.1668170515618597, 1.2919854192770974, 1.1785774891590928, 1.2397180299682444, 1.4349751903776191, 1.2686663464941463, 1.1793337443757632, 1.2094760506747269, 1.1283680467942478, 1.1456014869605273, 1.1695603482439110, 1.1883473989997737, 1.3102878097954334, 1.3326636354319201, 1.2033095908546276, 1.2765637682955560, 1.2533105511679674, 1.0561925258579383, 1.1944030230453759, 1.2584181515051163, 1.2181701773236133, 1.1427643645565180, 1.2912929540520488, 1.2606456249879283, 1.2079980980125691, 1.1582846527456185, 1.0914599072895725, 1.2436632334468321, 1.2659732625682767, 1.1373906460646186, 1.2636670215783354, 1.3065542716228340, 1.1145058661373550, 1.1821457478344533, 1.1686494999739092, 1.2421504164081945, 1.2292642544361261, 1.2247229593559099, 1.1857675147732030, 1.2627704665069508, 1.1302481979483210, 1.2027256964130453, 1.2826968566299934, 1.2903197193121982, 1.1497164007008540, 1.2248494620352162, 1.2695192555858241, 1.2492112043621006, 1.1006141873118667, 1.2513218024356318, 1.2846249908259910, 1.2077144025965167}, + } + + // PART 3: run controller and ensure output matches pre-generated controller response from python ref implementation + // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + // sanity checks: + require.Equal(bs.T(), uint64(604800), bs.ctl.currentEpochTiming.targetEndTime-time2unix(refT), "Epoch should end 1 week from now, i.e. 604800s") + require.InEpsilon(bs.T(), ref.targetViewTime, bs.ctl.currentEpochTiming.targetViewTime(), 1e-15) // ideal view time + require.Equal(bs.T(), len(ref.observedMinViewTimes), len(ref.realWorldViewDuration)) + + // Notes: + // - We specifically make the first observation at when the full time of the epoch is left. + // The python simulation we compare with proceed exactly the same way. + // - we first make an observation, before requesting the controller output. Thereby, we + // avoid artifacts of recalling a controller that was just initialized with fallback values. + // - we call `measureViewDuration(..)` (_not_ `processIncorporatedBlock(..)`) to + // interfering with the deduplication logic. Here we want to test correct numerics. + // Correctness of the deduplication logic is verified in the different test. + observationTime := refT + + for v := 0; v < len(ref.observedMinViewTimes); v++ { + observedBlock := makeTimedBlock(uint64(v), unittest.IdentifierFixture(), observationTime) + err := bs.ctl.measureViewDuration(observedBlock) + require.NoError(bs.T(), err) + proposalTiming := bs.ctl.getProposalTiming() + tpt := proposalTiming.TargetPublicationTime(uint64(v+1), time.Now(), observedBlock.Block.BlockID) // value for `timeViewEntered` should be irrelevant here + + controllerTargetedViewDuration := tpt.Sub(observedBlock.TimeObserved).Seconds() + bs.T().Logf("%d: ctl=%f\tref=%f\tdiff=%f", v, controllerTargetedViewDuration, ref.controllerTargetedViewDuration[v], controllerTargetedViewDuration-ref.controllerTargetedViewDuration[v]) + require.InEpsilon(bs.T(), ref.controllerTargetedViewDuration[v], controllerTargetedViewDuration, 1e-5, "implementations deviate for view %d", v) // ideal view time + + observationTime = observationTime.Add(sec2dur(ref.realWorldViewDuration[v])) + } +} + +func makeTimedBlock(view uint64, parentID flow.Identifier, time time.Time) TimedBlock { + header := unittest.BlockHeaderFixture(unittest.HeaderWithView(view)) + header.ParentID = parentID + return TimedBlock{ + Block: model.BlockFromFlow(header), + TimeObserved: time, + } +} + +type controllerStateDigest struct { + proportionalErr Ewma + integralErr LeakyIntegrator + + // latestProposalTiming holds the ProposalTiming that the controller generated in response to processing the latest observation + latestProposalTiming ProposalTiming +} + +func captureControllerStateDigest(ctl *BlockTimeController) *controllerStateDigest { + return &controllerStateDigest{ + proportionalErr: ctl.proportionalErr, + integralErr: ctl.integralErr, + latestProposalTiming: ctl.getProposalTiming(), + } +} + +// inProximityOf returns a testify `argumentMatcher` that only accepts durations d, +// such that |d - t| ≤ ε, for specified constants targetValue t and acceptedDeviation ε. +func inProximityOf(targetValue, acceptedDeviation time.Duration) interface{} { + return mock.MatchedBy(func(duration time.Duration) bool { + e := targetValue.Seconds() - duration.Seconds() + return math.Abs(e) <= acceptedDeviation.Abs().Seconds() + }) +} diff --git a/consensus/hotstuff/cruisectl/config.go b/consensus/hotstuff/cruisectl/config.go new file mode 100644 index 00000000000..fa60a3ef191 --- /dev/null +++ b/consensus/hotstuff/cruisectl/config.go @@ -0,0 +1,135 @@ +package cruisectl + +import ( + "time" + + "go.uber.org/atomic" +) + +// DefaultConfig returns the default config for the BlockTimeController. +func DefaultConfig() *Config { + return &Config{ + TimingConfig{ + FallbackProposalDelay: atomic.NewDuration(250 * time.Millisecond), + MinViewDuration: atomic.NewDuration(125 * time.Millisecond), + MaxViewDuration: atomic.NewDuration(910 * time.Millisecond), + Enabled: atomic.NewBool(true), + }, + ControllerParams{ + N_ewma: 5, + N_itg: 50, + KP: 2.0, + KI: 0.6, + KD: 3.0, + }, + } +} + +// Config defines configuration for the BlockTimeController. +type Config struct { + TimingConfig + ControllerParams +} + +// TimingConfig specifies the BlockTimeController's limits of authority. +type TimingConfig struct { + // FallbackProposalDelay is the minimal block construction delay. When used, it behaves like the + // old command line flag `block-rate-delay`. Specifically, the primary measures the duration from + // starting to construct its proposal to the proposal being ready to be published. If this + // duration is _less_ than FallbackProposalDelay, the primary delays broadcasting its proposal + // by the remainder needed to reach `FallbackProposalDelay` + // It is used: + // - when Enabled is false + // - when epoch fallback has been triggered + FallbackProposalDelay *atomic.Duration + + // MaxViewDuration is a hard maximum on the total view time targeted by ProposalTiming. + // If the BlockTimeController computes a larger desired ProposalTiming value + // based on the observed error and tuning, this value will be used instead. + MaxViewDuration *atomic.Duration + + // MinViewDuration is a hard maximum on the total view time targeted by ProposalTiming. + // If the BlockTimeController computes a smaller desired ProposalTiming value + // based on the observed error and tuning, this value will be used instead. + MinViewDuration *atomic.Duration + + // Enabled defines whether responsive control of the GetProposalTiming is enabled. + // When disabled, the FallbackProposalDelay is used. + Enabled *atomic.Bool +} + +// ControllerParams specifies the BlockTimeController's internal parameters. +type ControllerParams struct { + // N_ewma defines how historical measurements are incorporated into the EWMA for the proportional error term. + // Intuition: Suppose the input changes from x to y instantaneously: + // - N_ewma is the number of samples required to move the EWMA output about 2/3 of the way from x to y + // Per convention, this must be a _positive_ integer. + N_ewma uint + + // N_itg defines how historical measurements are incorporated into the integral error term. + // Intuition: For a constant error x: + // - the integrator value will saturate at `x•N_itg` + // - an integrator initialized at 0 reaches 2/3 of the saturation value after N_itg samples + // Per convention, this must be a _positive_ integer. + N_itg uint + + // KP, KI, KD, are the coefficients to the PID controller and define its response. + // KP adjusts the proportional term (responds to the magnitude of error). + // KI adjusts the integral term (responds to the error sum over a recent time interval). + // KD adjusts the derivative term (responds to the rate of change, i.e. time derivative, of the error). + KP, KI, KD float64 +} + +// alpha returns α, the inclusion parameter for the error EWMA. See N_ewma for details. +func (c *ControllerParams) alpha() float64 { + return 1.0 / float64(c.N_ewma) +} + +// beta returns ß, the memory parameter of the leaky error integrator. See N_itg for details. +func (c *ControllerParams) beta() float64 { + return 1.0 / float64(c.N_itg) +} + +// GetFallbackProposalDuration returns the proposal duration used when Cruise Control is not active. +func (ctl TimingConfig) GetFallbackProposalDuration() time.Duration { + return ctl.FallbackProposalDelay.Load() +} + +// GetMaxViewDuration returns the max view duration returned by the controller. +func (ctl TimingConfig) GetMaxViewDuration() time.Duration { + return ctl.MaxViewDuration.Load() +} + +// GetMinViewDuration returns the min view duration returned by the controller. +func (ctl TimingConfig) GetMinViewDuration() time.Duration { + return ctl.MinViewDuration.Load() +} + +// GetEnabled returns whether the controller is enabled. +func (ctl TimingConfig) GetEnabled() bool { + return ctl.Enabled.Load() +} + +// SetFallbackProposalDuration sets the proposal duration used when Cruise Control is not active. +func (ctl TimingConfig) SetFallbackProposalDuration(dur time.Duration) error { + ctl.FallbackProposalDelay.Store(dur) + return nil +} + +// SetMaxViewDuration sets the max view duration returned by the controller. +func (ctl TimingConfig) SetMaxViewDuration(dur time.Duration) error { + ctl.MaxViewDuration.Store(dur) + return nil +} + +// SetMinViewDuration sets the min view duration returned by the controller. +func (ctl TimingConfig) SetMinViewDuration(dur time.Duration) error { + ctl.MinViewDuration.Store(dur) + return nil +} + +// SetEnabled sets whether the controller is enabled. +func (ctl TimingConfig) SetEnabled(enabled bool) error { + ctl.Enabled.Store(enabled) + return nil +} diff --git a/consensus/hotstuff/cruisectl/proposal_timing.go b/consensus/hotstuff/cruisectl/proposal_timing.go new file mode 100644 index 00000000000..8f6082ad252 --- /dev/null +++ b/consensus/hotstuff/cruisectl/proposal_timing.go @@ -0,0 +1,154 @@ +package cruisectl + +import ( + "time" + + "github.com/onflow/flow-go/consensus/hotstuff" + "github.com/onflow/flow-go/model/flow" +) + +// ProposalTiming encapsulates the output of the BlockTimeController. On the happy path, +// the controller observes a block and generates a specific ProposalTiming in response. +// For the happy path, the ProposalTiming describes when the child proposal should be +// broadcast. +// However, observations other than blocks might also be used to instantiate ProposalTiming +// objects, e.g. controller instantiation, a disabled controller, etc. +// The purpose of ProposalTiming is to convert the controller output to timing information +// that the EventHandler understands. By convention, ProposalTiming should be treated as +// immutable. +type ProposalTiming interface { + hotstuff.ProposalDurationProvider + + // ObservationView returns the view of the observation that the controller + // processed and generated this ProposalTiming instance in response. + ObservationView() uint64 + + // ObservationTime returns the time, when the controller received the + // leading to the generation of this ProposalTiming instance. + ObservationTime() time.Time +} + +/* *************************************** publishImmediately *************************************** */ + +// publishImmediately implements ProposalTiming: it returns the time when the view +// was entered as the TargetPublicationTime. By convention, publishImmediately should +// be treated as immutable. +type publishImmediately struct { + observationView uint64 + observationTime time.Time +} + +var _ ProposalTiming = (*publishImmediately)(nil) + +func newPublishImmediately(observationView uint64, observationTime time.Time) *publishImmediately { + return &publishImmediately{ + observationView: observationView, + observationTime: observationTime, + } +} + +func (pt *publishImmediately) TargetPublicationTime(_ uint64, timeViewEntered time.Time, _ flow.Identifier) time.Time { + return timeViewEntered +} +func (pt *publishImmediately) ObservationView() uint64 { return pt.observationView } +func (pt *publishImmediately) ObservationTime() time.Time { return pt.observationTime } +func (pt *publishImmediately) ConstrainedBlockTime() time.Duration { return 0 } + +/* *************************************** happyPathBlockTime *************************************** */ + +// happyPathBlockTime implements ProposalTiming for the happy path. Here, `TimedBlock` _latest_ block that the +// controller observed, and the `unconstrainedBlockTime` for the _child_ of this block. +// This function internally holds the _unconstrained_ view duration as computed by the BlockTimeController. Caution, +// no limits of authority have been applied to this value yet. The final controller output satisfying the limits of +// authority is computed by function `ConstrainedBlockTime()` +// +// For a given view where we are the primary, suppose the parent block we are building on top of has been observed +// at time `t := TimedBlock.TimeObserved` and applying the limits of authority yields `d := ConstrainedBlockTime()` +// Then, `TargetPublicationTime(..)` returns `t + d` as the target publication time for the child block. +// +// By convention, happyPathBlockTime should be treated as immutable. +// TODO: any additional logic for assisting the EventHandler in determining the applied delay should be added to the ControllerViewDuration +type happyPathBlockTime struct { + TimedBlock // latest block observed by the controller, including the time stamp when the controller received the block [UTC] + constrainedBlockTime time.Duration // block time _after_ applying limits of authority to unconstrainedBlockTime +} + +var _ ProposalTiming = (*happyPathBlockTime)(nil) + +// newHappyPathBlockTime instantiates a new happyPathBlockTime. Inputs: +// - `timedBlock` references the _published_ block with the highest view known to this node. +// On the consensus happy path, this node may construct the child block (iff it is the primary for +// view `timedBlock.Block.View` + 1). Note that the controller determines when to publish this child. +// In other words, when primary determines at what future time to broadcast the child, the child +// has _not_ been published and the `timedBlock` references the parent on the happy path (or another +// earlier block on the unhappy path) +// - `unconstrainedBlockTime` is the delay, relative to `timedBlock.TimeObserved` when the controller would +// like the child block to be published. Caution, no limits of authority have been applied to this value yet! +// - `timingConfig` which defines the limits for authority for the controller. +// +// Within the constructor, we compute the block time τ on the happy path. I.e. how much later a _direct child_ +// of the `timedBlock` should be published (also accounting for the controller's limits of authority). +func newHappyPathBlockTime(timedBlock TimedBlock, unconstrainedBlockTime time.Duration, timingConfig TimingConfig) *happyPathBlockTime { + return &happyPathBlockTime{ + TimedBlock: timedBlock, + constrainedBlockTime: min(max(unconstrainedBlockTime, timingConfig.MinViewDuration.Load()), timingConfig.MaxViewDuration.Load()), + } +} + +func (pt *happyPathBlockTime) ObservationView() uint64 { return pt.Block.View } +func (pt *happyPathBlockTime) ObservationTime() time.Time { return pt.TimeObserved } +func (pt *happyPathBlockTime) ConstrainedBlockTime() time.Duration { return pt.constrainedBlockTime } + +// TargetPublicationTime operates in two possible modes: +// 1. If `parentBlockId` matches our `TimedBlock`, i.e. the EventHandler is just building the child block, then +// we return `TimedBlock.TimeObserved + ConstrainedBlockTime` as the target publication time for the child block. +// 2. If `parentBlockId` does _not_ match our `TimedBlock`, the EventHandler should release the block immediately. +// This heuristic is based on the intuition that Block time is expected to be very long when deviating from the happy path. +func (pt *happyPathBlockTime) TargetPublicationTime(proposalView uint64, timeViewEntered time.Time, parentBlockId flow.Identifier) time.Time { + if parentBlockId != pt.Block.BlockID { + return timeViewEntered // broadcast immediately + } + return pt.TimeObserved.Add(pt.ConstrainedBlockTime()) // happy path +} + +/* *************************************** fallbackTiming for EFM *************************************** */ + +// fallbackTiming implements ProposalTiming, for the basic fallback: +// function `TargetPublicationTime(..)` always returns `timeViewEntered + defaultProposalDuration` +type fallbackTiming struct { + observationView uint64 + observationTime time.Time + defaultProposalDuration time.Duration +} + +var _ ProposalTiming = (*fallbackTiming)(nil) + +func newFallbackTiming(observationView uint64, observationTime time.Time, defaultProposalDuration time.Duration) *fallbackTiming { + return &fallbackTiming{ + observationView: observationView, + observationTime: observationTime, + defaultProposalDuration: defaultProposalDuration, + } +} + +func (pt *fallbackTiming) TargetPublicationTime(_ uint64, timeViewEntered time.Time, _ flow.Identifier) time.Time { + return timeViewEntered.Add(pt.defaultProposalDuration) +} +func (pt *fallbackTiming) ObservationView() uint64 { return pt.observationView } +func (pt *fallbackTiming) ObservationTime() time.Time { return pt.observationTime } + +/* *************************************** auxiliary functions *************************************** */ + +func min(d1, d2 time.Duration) time.Duration { + if d1 < d2 { + return d1 + } + return d2 +} + +func max(d1, d2 time.Duration) time.Duration { + if d1 > d2 { + return d1 + } + return d2 +} diff --git a/consensus/hotstuff/event_handler.go b/consensus/hotstuff/event_handler.go index a2134680389..6deda44eeca 100644 --- a/consensus/hotstuff/event_handler.go +++ b/consensus/hotstuff/event_handler.go @@ -39,7 +39,7 @@ type EventHandler interface { // consensus participant. // All inputs should be validated before feeding into this function. Assuming trusted data. // No errors are expected during normal operation. - OnReceiveProposal(proposal *model.Proposal) error + OnReceiveProposal(proposal *model.SignedProposal) error // OnLocalTimeout handles a local timeout event by creating a model.TimeoutObject and broadcasting it. // No errors are expected during normal operation. diff --git a/consensus/hotstuff/eventhandler/event_handler.go b/consensus/hotstuff/eventhandler/event_handler.go index c6f4acdb23a..0f5bbaaf61c 100644 --- a/consensus/hotstuff/eventhandler/event_handler.go +++ b/consensus/hotstuff/eventhandler/event_handler.go @@ -11,6 +11,7 @@ import ( "github.com/onflow/flow-go/consensus/hotstuff" "github.com/onflow/flow-go/consensus/hotstuff/model" "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/utils/logging" ) // EventHandler is the main handler for individual events that trigger state transition. @@ -134,7 +135,7 @@ func (e *EventHandler) OnReceiveTc(tc *flow.TimeoutCertificate) error { // consensus participant. // All inputs should be validated before feeding into this function. Assuming trusted data. // No errors are expected during normal operation. -func (e *EventHandler) OnReceiveProposal(proposal *model.Proposal) error { +func (e *EventHandler) OnReceiveProposal(proposal *model.SignedProposal) error { block := proposal.Block curView := e.paceMaker.CurView() log := e.log.With(). @@ -308,11 +309,15 @@ func (e *EventHandler) broadcastTimeoutObjectIfAuthorized() error { return nil } -// proposeForNewViewIfPrimary will only be called when we may able to propose a block, after processing a new event. +// proposeForNewViewIfPrimary will only be called when we may be able to propose a block, after processing a new event. // - after entering a new view as a result of processing a QC or TC, then we may propose for the newly entered view // - after receiving a proposal (but not changing view), if that proposal is referenced by our highest known QC, // and the proposal was previously unknown, then we can propose a block in the current view // +// Enforced INVARIANTS: +// - There will at most be `OnOwnProposal` notification emitted for views where this node is the leader, and none +// if another node is the leader. This holds irrespective of restarts. Formally, this prevents proposal equivocation. +// // It reads the current view, and generates a proposal if we are the leader. // No errors are expected during normal operation. func (e *EventHandler) proposeForNewViewIfPrimary() error { @@ -330,16 +335,10 @@ func (e *EventHandler) proposeForNewViewIfPrimary() error { e.notifier.OnCurrentViewDetails(curView, finalizedView, currentLeader) - // check that I am the primary for this view and that I haven't already proposed; otherwise there is nothing to do + // check that I am the primary for this view if e.committee.Self() != currentLeader { return nil } - for _, b := range e.forks.GetBlocksForView(curView) { - if b.ProposerID == e.committee.Self() { - log.Debug().Msg("already proposed for current view") - return nil - } - } // attempt to generate proposal: newestQC := e.paceMaker.NewestQC() @@ -359,7 +358,7 @@ func (e *EventHandler) proposeForNewViewIfPrimary() error { // Sanity checks to make sure that resulting proposal is valid: // In its proposal, the leader for view N needs to present evidence that it has legitimately entered view N. // As evidence, we include a QC or TC for view N-1, which should always be available as the PaceMaker advances - // to view N only after observing a QC or TC from view N-1. Moreover QC and TC are always processed together. As + // to view N only after observing a QC or TC from view N-1. Moreover, QC and TC are always processed together. As // EventHandler is strictly single-threaded without reentrancy, we must have a QC or TC for the prior view (curView-1). // Failing one of these sanity checks is a symptom of state corruption or a severe implementation bug. if newestQC.View+1 != curView { @@ -377,29 +376,64 @@ func (e *EventHandler) proposeForNewViewIfPrimary() error { lastViewTC = nil } + // Construct Own SignedProposal + // CAUTION, design constraints: + // (i) We cannot process our own proposal within the `EventHandler` right away. + // (ii) We cannot add our own proposal to Forks here right away. + // (iii) Metrics for the PaceMaker/CruiseControl assume that the EventHandler is the only caller of + // `TargetPublicationTime`. Technically, `TargetPublicationTime` records the publication delay + // relative to its _latest_ call. + // + // To satisfy all constraints, we construct the proposal here and query (once!) its `TargetPublicationTime`. Though, + // we do _not_ process our own blocks right away and instead ingest them into the EventHandler the same way as + // proposals from other consensus participants. Specifically, on the path through the HotStuff state machine leading + // to block construction, the node's own proposal is largely ephemeral. The proposal is handed to the `MessageHub` (via + // the `OnOwnProposal` notification including the `TargetPublicationTime`). The `MessageHub` waits until + // `TargetPublicationTime` and only then broadcast the proposal and puts it into the EventLoop's queue + // for inbound blocks. This is exactly the same way as proposals from other nodes are ingested by the `EventHandler`, + // except that we are skipping the ComplianceEngine (assuming that our own proposals are protocol-compliant). + // + // Context: + // • On constraint (i): We want to support consensus committees only consisting of a *single* node. If the EventHandler + // internally processed the block right away via a direct message call, the call-stack would be ever-growing and + // the node would crash eventually (we experienced this with a very early HotStuff implementation). Specifically, + // if we wanted to process the block directly without taking a detour through the EventLoop's inbound queue, + // we would call `OnReceiveProposal` here. The function `OnReceiveProposal` would then end up calling + // `proposeForNewViewIfPrimary` (this function) to generate the next proposal, which again + // would result in calling `OnReceiveProposal` and so on so forth until the call stack or memory limit is reached + // and the node crashes. This is only a problem for consensus committees of size 1. + // • On constraint (ii): When adding a proposal to Forks, Forks emits a `BlockIncorporatedEvent` notification, which + // is observed by Cruse Control and would change its state. However, note that Cruse Control is trying to estimate + // the point in time when _other_ nodes are observing the proposal. The time when we broadcast the proposal (i.e. + // `TargetPublicationTime`) is a reasonably good estimator, but *not* the time the proposer constructed the block + // (because there is potentially still a significant wait until `TargetPublicationTime`). + // + // The current approach is for a node to process its own proposals at the same time and through the same code path as + // proposals from other nodes. This satisfies constraints (i) and (ii) and generates very strong consistency, from a + // software design perspective. + // Just hypothetically, if we changed Cruise Control to be notified about own block proposals _only_ when they are + // broadcast (satisfying constraint (ii) without relying on the EventHandler), then we could add a proposal to Forks + // here right away. Nevertheless, the restriction remains that we cannot process that proposal right away within the + // EventHandler and instead need to put it into the EventLoop's inbound queue to support consensus committees of size 1. flowProposal, err := e.blockProducer.MakeBlockProposal(curView, newestQC, lastViewTC) if err != nil { + if model.IsNoVoteError(err) { + log.Info().Err(err).Msg("aborting block proposal to prevent equivocation (likely re-entered proposal logic due to crash)") + return nil + } return fmt.Errorf("can not make block proposal for curView %v: %w", curView, err) } - proposedBlock := model.BlockFromFlow(flowProposal) // turn the signed flow header into a proposal - - // we want to store created proposal in forks to make sure that we don't create more proposals for - // current view. Due to asynchronous nature of our design it's possible that after creating proposal - // we will be asked to propose again for same view. - err = e.forks.AddValidatedBlock(proposedBlock) - if err != nil { - return fmt.Errorf("could not add newly created proposal (%v): %w", proposedBlock.BlockID, err) - } + targetPublicationTime := e.paceMaker.TargetPublicationTime(flowProposal.Header.View, start, flowProposal.Header.ParentID) // determine target publication time log.Debug(). - Uint64("block_view", proposedBlock.View). - Hex("block_id", proposedBlock.BlockID[:]). + Uint64("block_view", flowProposal.Header.View). + Time("target_publication", targetPublicationTime). + Hex("block_id", logging.ID(flowProposal.Header.ID())). Uint64("parent_view", newestQC.View). Hex("parent_id", newestQC.BlockID[:]). - Hex("signer", proposedBlock.ProposerID[:]). + Hex("signer", flowProposal.Header.ProposerID[:]). Msg("forwarding proposal to communicator for broadcasting") - // raise a notification with proposal (also triggers broadcast) - targetPublicationTime := start.Add(e.paceMaker.BlockRateDelay()) + // emit notification with own proposal (also triggers broadcast) e.notifier.OnOwnProposal(flowProposal, targetPublicationTime) return nil } @@ -408,7 +442,7 @@ func (e *EventHandler) proposeForNewViewIfPrimary() error { // It is called AFTER the block has been stored or found in Forks // It checks whether to vote for this block. // No errors are expected during normal operation. -func (e *EventHandler) processBlockForCurrentView(proposal *model.Proposal) error { +func (e *EventHandler) processBlockForCurrentView(proposal *model.SignedProposal) error { // sanity check that block is really for the current view: curView := e.paceMaker.CurView() block := proposal.Block @@ -443,7 +477,7 @@ func (e *EventHandler) processBlockForCurrentView(proposal *model.Proposal) erro // ownVote generates and forwards the own vote, if we decide to vote. // Any errors are potential symptoms of uncovered edge cases or corrupted internal state (fatal). // No errors are expected during normal operation. -func (e *EventHandler) ownVote(proposal *model.Proposal, curView uint64, nextLeader flow.Identifier) error { +func (e *EventHandler) ownVote(proposal *model.SignedProposal, curView uint64, nextLeader flow.Identifier) error { block := proposal.Block log := e.log.With(). Uint64("block_view", block.View). @@ -473,7 +507,6 @@ func (e *EventHandler) ownVote(proposal *model.Proposal, curView uint64, nextLea } log.Debug().Msg("forwarding vote to compliance engine") - // raise a notification to send vote - e.notifier.OnOwnVote(ownVote.BlockID, ownVote.View, ownVote.SigData, nextLeader) + e.notifier.OnOwnVote(ownVote, nextLeader) return nil } diff --git a/consensus/hotstuff/eventhandler/event_handler_test.go b/consensus/hotstuff/eventhandler/event_handler_test.go index aeec6da1101..b469a47142b 100644 --- a/consensus/hotstuff/eventhandler/event_handler_test.go +++ b/consensus/hotstuff/eventhandler/event_handler_test.go @@ -38,11 +38,13 @@ type TestPaceMaker struct { var _ hotstuff.PaceMaker = (*TestPaceMaker)(nil) -func NewTestPaceMaker(timeoutController *timeout.Controller, +func NewTestPaceMaker( + timeoutController *timeout.Controller, + proposalDelayProvider hotstuff.ProposalDurationProvider, notifier hotstuff.Consumer, persist hotstuff.Persister, ) *TestPaceMaker { - p, err := pacemaker.New(timeoutController, notifier, persist) + p, err := pacemaker.New(timeoutController, proposalDelayProvider, notifier, persist) if err != nil { panic(err) } @@ -74,18 +76,12 @@ func (p *TestPaceMaker) LastViewTC() *flow.TimeoutCertificate { // using a real pacemaker for testing event handler func initPaceMaker(t require.TestingT, ctx context.Context, livenessData *hotstuff.LivenessData) hotstuff.PaceMaker { notifier := &mocks.Consumer{} - tc, err := timeout.NewConfig( - time.Duration(minRepTimeout*1e6), - time.Duration(maxRepTimeout*1e6), - multiplicativeIncrease, - happyPathMaxRoundFailures, - 0, - time.Duration(maxRepTimeout*1e6)) + tc, err := timeout.NewConfig(time.Duration(minRepTimeout*1e6), time.Duration(maxRepTimeout*1e6), multiplicativeIncrease, happyPathMaxRoundFailures, time.Duration(maxRepTimeout*1e6)) require.NoError(t, err) persist := &mocks.Persister{} persist.On("PutLivenessData", mock.Anything).Return(nil).Maybe() persist.On("GetLivenessData").Return(livenessData, nil).Once() - pm := NewTestPaceMaker(timeout.NewController(tc), notifier, persist) + pm := NewTestPaceMaker(timeout.NewController(tc), pacemaker.NoProposalDelay(), notifier, persist) notifier.On("OnStartingTimeout", mock.Anything).Return() notifier.On("OnQcTriggeredViewChange", mock.Anything, mock.Anything, mock.Anything).Return() notifier.On("OnTcTriggeredViewChange", mock.Anything, mock.Anything, mock.Anything).Return() @@ -136,14 +132,14 @@ func NewSafetyRules(t *testing.T) *SafetyRules { // SafetyRules will not vote for any block, unless the blockID exists in votable map safetyRules.On("ProduceVote", mock.Anything, mock.Anything).Return( - func(block *model.Proposal, _ uint64) *model.Vote { + func(block *model.SignedProposal, _ uint64) *model.Vote { _, ok := safetyRules.votable[block.Block.BlockID] if !ok { return nil } return createVote(block.Block) }, - func(block *model.Proposal, _ uint64) error { + func(block *model.SignedProposal, _ uint64) error { _, ok := safetyRules.votable[block.Block.BlockID] if !ok { return model.NewNoVoteErrorf("block not found") @@ -183,7 +179,7 @@ func NewForks(t *testing.T, finalized uint64) *Forks { } f.On("AddValidatedBlock", mock.Anything).Return(func(proposal *model.Block) error { - log.Info().Msgf("forks.AddValidatedBlock received Proposal for view: %v, QC: %v\n", proposal.View, proposal.QC.View) + log.Info().Msgf("forks.AddValidatedBlock received Block proposal for view: %v, QC: %v\n", proposal.View, proposal.QC.View) return f.addProposal(proposal) }).Maybe() @@ -226,20 +222,31 @@ func NewForks(t *testing.T, finalized uint64) *Forks { return f } -// BlockProducer mock will always make a valid block +// BlockProducer mock will always make a valid block, exactly once per view. +// If it is requested to make a block twice for the same view, returns model.NoVoteError type BlockProducer struct { - proposerID flow.Identifier + proposerID flow.Identifier + producedBlockForView map[uint64]bool } -func (b *BlockProducer) MakeBlockProposal(view uint64, qc *flow.QuorumCertificate, lastViewTC *flow.TimeoutCertificate) (*flow.Header, error) { - return model.ProposalToFlow(&model.Proposal{ - Block: helper.MakeBlock( +func NewBlockProducer(proposerID flow.Identifier) *BlockProducer { + return &BlockProducer{ + proposerID: proposerID, + producedBlockForView: make(map[uint64]bool), + } +} + +func (b *BlockProducer) MakeBlockProposal(view uint64, qc *flow.QuorumCertificate, lastViewTC *flow.TimeoutCertificate) (*flow.ProposalHeader, error) { + if b.producedBlockForView[view] { + return nil, model.NewNoVoteErrorf("block already produced") + } + b.producedBlockForView[view] = true + return helper.SignedProposalToFlow(helper.MakeSignedProposal(helper.WithProposal( + helper.MakeProposal(helper.WithBlock(helper.MakeBlock( helper.WithBlockView(view), helper.WithBlockQC(qc), - helper.WithBlockProposer(b.proposerID), - ), - LastViewTC: lastViewTC, - }), nil + helper.WithBlockProposer(b.proposerID))), + helper.WithLastViewTC(lastViewTC))))), nil } func TestEventHandler(t *testing.T) { @@ -262,8 +269,8 @@ type EventHandlerSuite struct { initView uint64 // the current view at the beginning of the test case endView uint64 // the expected current view at the end of the test case - parentProposal *model.Proposal - votingProposal *model.Proposal + parentProposal *model.SignedProposal + votingProposal *model.SignedProposal qc *flow.QuorumCertificate tc *flow.TimeoutCertificate newview *model.NewViewEvent @@ -289,7 +296,7 @@ func (es *EventHandlerSuite) SetupTest() { es.forks = NewForks(es.T(), finalized) es.persist = mocks.NewPersister(es.T()) es.persist.On("PutStarted", mock.Anything).Return(nil).Maybe() - es.blockProducer = &BlockProducer{proposerID: es.committee.Self()} + es.blockProducer = NewBlockProducer(es.committee.Self()) es.safetyRules = NewSafetyRules(es.T()) es.notifier = mocks.NewConsumer(es.T()) es.notifier.On("OnEventProcessed").Maybe() @@ -426,7 +433,12 @@ func (es *EventHandlerSuite) TestOnReceiveProposal_Vote_NextLeader() { // proposal is safe to vote es.safetyRules.votable[proposal.Block.BlockID] = struct{}{} - es.notifier.On("OnOwnVote", proposal.Block.BlockID, proposal.Block.View, mock.Anything, mock.Anything).Once() + vote := &model.Vote{ + BlockID: proposal.Block.BlockID, + View: proposal.Block.View, + } + + es.notifier.On("OnOwnVote", vote, mock.Anything).Once() // vote should be created for this proposal err := es.eventhandler.OnReceiveProposal(proposal) @@ -442,7 +454,13 @@ func (es *EventHandlerSuite) TestOnReceiveProposal_Vote_NotNextLeader() { // proposal is safe to vote es.safetyRules.votable[proposal.Block.BlockID] = struct{}{} - es.notifier.On("OnOwnVote", proposal.Block.BlockID, mock.Anything, mock.Anything, mock.Anything).Once() + vote := &model.Vote{ + BlockID: proposal.Block.BlockID, + View: proposal.Block.View, + SignerID: flow.ZeroID, + } + + es.notifier.On("OnOwnVote", vote, mock.Anything).Once() // vote should be created for this proposal err := es.eventhandler.OnReceiveProposal(proposal) @@ -467,10 +485,10 @@ func (es *EventHandlerSuite) TestOnReceiveProposal_ProposeAfterReceivingQC() { es.committee.leaders[es.paceMaker.CurView()] = struct{}{} es.notifier.On("OnOwnProposal", mock.Anything, mock.Anything).Run(func(args mock.Arguments) { - header, ok := args[0].(*flow.Header) + proposal, ok := args[0].(*flow.ProposalHeader) require.True(es.T(), ok) // it should broadcast a header as the same as current view - require.Equal(es.T(), es.paceMaker.CurView(), header.View) + require.Equal(es.T(), es.paceMaker.CurView(), proposal.Header.View) }).Once() // processing this proposal shouldn't trigger view change since we have already seen QC. @@ -501,10 +519,10 @@ func (es *EventHandlerSuite) TestOnReceiveProposal_ProposeAfterReceivingTC() { es.committee.leaders[es.paceMaker.CurView()] = struct{}{} es.notifier.On("OnOwnProposal", mock.Anything, mock.Anything).Run(func(args mock.Arguments) { - header, ok := args[0].(*flow.Header) + proposal, ok := args[0].(*flow.ProposalHeader) require.True(es.T(), ok) // it should broadcast a header as the same as current view - require.Equal(es.T(), es.paceMaker.CurView(), header.View) + require.Equal(es.T(), es.paceMaker.CurView(), proposal.Header.View) }).Once() // processing this proposal shouldn't trigger view change, since we have already seen QC. @@ -605,10 +623,10 @@ func (es *EventHandlerSuite) TestOnReceiveQc_NextLeaderProposes() { require.NoError(es.T(), err) es.notifier.On("OnOwnProposal", mock.Anything, mock.Anything).Run(func(args mock.Arguments) { - header, ok := args[0].(*flow.Header) + proposal, ok := args[0].(*flow.ProposalHeader) require.True(es.T(), ok) // it should broadcast a header as the same as endView - require.Equal(es.T(), es.endView, header.View) + require.Equal(es.T(), es.endView, proposal.Header.View) }).Once() // after receiving proposal build QC and deliver it to event handler @@ -667,16 +685,16 @@ func (es *EventHandlerSuite) TestOnReceiveTc_NextLeaderProposes() { es.endView++ es.notifier.On("OnOwnProposal", mock.Anything, mock.Anything).Run(func(args mock.Arguments) { - header, ok := args[0].(*flow.Header) + proposal, ok := args[0].(*flow.ProposalHeader) require.True(es.T(), ok) // it should broadcast a header as the same as endView - require.Equal(es.T(), es.endView, header.View) + require.Equal(es.T(), es.endView, proposal.Header.View) // proposed block should contain valid newest QC and lastViewTC expectedNewestQC := es.paceMaker.NewestQC() - proposal := model.ProposalFromFlow(header) - require.Equal(es.T(), expectedNewestQC, proposal.Block.QC) - require.Equal(es.T(), es.paceMaker.LastViewTC(), proposal.LastViewTC) + hotstuffProposal := model.SignedProposalFromFlow(proposal) + require.Equal(es.T(), expectedNewestQC, hotstuffProposal.Block.QC) + require.Equal(es.T(), es.paceMaker.LastViewTC(), hotstuffProposal.LastViewTC) }).Once() err := es.eventhandler.OnReceiveTc(es.tc) @@ -770,6 +788,8 @@ func (es *EventHandlerSuite) Test100Timeout() { // TestLeaderBuild100Blocks tests scenario where leader builds 100 proposals one after another func (es *EventHandlerSuite) TestLeaderBuild100Blocks() { + require.Equal(es.T(), 1, len(es.forks.proposals), "expect Forks to contain only root block") + // I'm the leader for the first view es.committee.leaders[es.initView] = struct{}{} @@ -796,11 +816,15 @@ func (es *EventHandlerSuite) TestLeaderBuild100Blocks() { es.endView++ es.notifier.On("OnOwnProposal", mock.Anything, mock.Anything).Run(func(args mock.Arguments) { - header, ok := args[0].(*flow.Header) + ownProposal, ok := args[0].(*flow.ProposalHeader) require.True(es.T(), ok) - require.Equal(es.T(), proposal.Block.View+1, header.View) + require.Equal(es.T(), proposal.Block.View+1, ownProposal.Header.View) }).Once() - es.notifier.On("OnOwnVote", proposal.Block.BlockID, proposal.Block.View, mock.Anything, mock.Anything).Once() + vote := &model.Vote{ + View: proposal.Block.View, + BlockID: proposal.Block.BlockID, + } + es.notifier.On("OnOwnVote", vote, mock.Anything).Once() err := es.eventhandler.OnReceiveProposal(proposal) require.NoError(es.T(), err) @@ -809,7 +833,8 @@ func (es *EventHandlerSuite) TestLeaderBuild100Blocks() { } require.Equal(es.T(), es.endView, es.paceMaker.CurView(), "incorrect view change") - require.Equal(es.T(), totalView, (len(es.forks.proposals)-1)/2) + require.Equal(es.T(), totalView+1, len(es.forks.proposals), "expect Forks to contain root block + 100 proposed blocks") + es.notifier.AssertExpectations(es.T()) } // TestFollowerFollows100Blocks tests scenario where follower receives 100 proposals one after another @@ -887,10 +912,10 @@ func (es *EventHandlerSuite) TestCreateProposal_SanityChecks() { es.committee.leaders[tc.View+1] = struct{}{} es.notifier.On("OnOwnProposal", mock.Anything, mock.Anything).Run(func(args mock.Arguments) { - header, ok := args[0].(*flow.Header) + proposal, ok := args[0].(*flow.ProposalHeader) require.True(es.T(), ok) // we need to make sure that produced proposal contains only QC even if there is TC for previous view as well - require.Nil(es.T(), header.LastViewTC) + require.Nil(es.T(), proposal.Header.LastViewTC) }).Once() err := es.eventhandler.OnReceiveTc(tc) @@ -1019,8 +1044,8 @@ func createQC(parent *model.Block) *flow.QuorumCertificate { qc := &flow.QuorumCertificate{ BlockID: parent.BlockID, View: parent.View, - SignerIndices: nil, - SigData: nil, + SignerIndices: unittest.SignerIndicesFixture(3), + SigData: unittest.SignatureFixture(), } return qc } @@ -1034,10 +1059,7 @@ func createVote(block *model.Block) *model.Vote { } } -func createProposal(view uint64, qcview uint64) *model.Proposal { +func createProposal(view uint64, qcview uint64) *model.SignedProposal { block := createBlockWithQC(view, qcview) - return &model.Proposal{ - Block: block, - SigData: nil, - } + return helper.MakeSignedProposal(helper.WithProposal(helper.MakeProposal(helper.WithBlock(block)))) } diff --git a/consensus/hotstuff/eventloop/event_loop.go b/consensus/hotstuff/eventloop/event_loop.go index 627a48e5e4a..de31b9b654d 100644 --- a/consensus/hotstuff/eventloop/event_loop.go +++ b/consensus/hotstuff/eventloop/event_loop.go @@ -22,7 +22,7 @@ import ( // it contains an attached insertionTime that is used to measure how long we have waited between queening proposal and // actually processing by `EventHandler`. type queuedProposal struct { - proposal *model.Proposal + proposal *model.SignedProposal insertionTime time.Time } @@ -32,6 +32,7 @@ type EventLoop struct { log zerolog.Logger eventHandler hotstuff.EventHandler metrics module.HotstuffMetrics + mempoolMetrics module.MempoolMetrics proposals chan queuedProposal newestSubmittedTc *tracker.NewestTCTracker newestSubmittedQc *tracker.NewestQCTracker @@ -46,19 +47,25 @@ var _ hotstuff.EventLoop = (*EventLoop)(nil) var _ component.Component = (*EventLoop)(nil) // NewEventLoop creates an instance of EventLoop. -func NewEventLoop(log zerolog.Logger, metrics module.HotstuffMetrics, eventHandler hotstuff.EventHandler, startTime time.Time) (*EventLoop, error) { +func NewEventLoop( + log zerolog.Logger, + metrics module.HotstuffMetrics, + mempoolMetrics module.MempoolMetrics, + eventHandler hotstuff.EventHandler, + startTime time.Time, +) (*EventLoop, error) { // we will use a buffered channel to avoid blocking of caller // we can't afford to drop messages since it undermines liveness, but we also want to avoid blocking of compliance // engine. We assume that we should be able to process proposals faster than compliance engine feeds them, worst case // we will fill the buffer and block compliance engine worker but that should happen only if compliance engine receives // large number of blocks in short period of time(when catching up for instance). - // TODO(active-pacemaker) add metrics for length of inbound channels proposals := make(chan queuedProposal, 1000) el := &EventLoop{ - log: log, + log: log.With().Str("component", "hotstuff.event_loop").Logger(), eventHandler: eventHandler, metrics: metrics, + mempoolMetrics: mempoolMetrics, proposals: proposals, tcSubmittedNotifier: engine.NewNotifier(), qcSubmittedNotifier: engine.NewNotifier(), @@ -256,7 +263,7 @@ func (el *EventLoop) loop(ctx context.Context) error { } // SubmitProposal pushes the received block to the proposals channel -func (el *EventLoop) SubmitProposal(proposal *model.Proposal) { +func (el *EventLoop) SubmitProposal(proposal *model.SignedProposal) { queueItem := queuedProposal{ proposal: proposal, insertionTime: time.Now(), @@ -266,7 +273,7 @@ func (el *EventLoop) SubmitProposal(proposal *model.Proposal) { case <-el.ComponentManager.ShutdownSignal(): return } - + el.mempoolMetrics.MempoolEntries(metrics.HotstuffEventTypeOnProposal, uint(len(el.proposals))) } // onTrustedQC pushes the received QC(which MUST be validated) to the quorumCertificates channel diff --git a/consensus/hotstuff/eventloop/event_loop_test.go b/consensus/hotstuff/eventloop/event_loop_test.go index 3f63b76f8d9..78dc5769dd2 100644 --- a/consensus/hotstuff/eventloop/event_loop_test.go +++ b/consensus/hotstuff/eventloop/event_loop_test.go @@ -46,13 +46,13 @@ func (s *EventLoopTestSuite) SetupTest() { log := zerolog.New(io.Discard) - eventLoop, err := NewEventLoop(log, metrics.NewNoopCollector(), s.eh, time.Time{}) + eventLoop, err := NewEventLoop(log, metrics.NewNoopCollector(), metrics.NewNoopCollector(), s.eh, time.Time{}) require.NoError(s.T(), err) s.eventLoop = eventLoop ctx, cancel := context.WithCancel(context.Background()) s.cancel = cancel - signalerCtx, _ := irrecoverable.WithSignaler(ctx) + signalerCtx := irrecoverable.NewMockSignalerContext(s.T(), ctx) s.eventLoop.Start(signalerCtx) unittest.RequireCloseBefore(s.T(), s.eventLoop.Ready(), 100*time.Millisecond, "event loop not started") @@ -75,7 +75,7 @@ func (s *EventLoopTestSuite) TestReadyDone() { // Test_SubmitQC tests that submitted proposal is eventually sent to event handler for processing func (s *EventLoopTestSuite) Test_SubmitProposal() { - proposal := helper.MakeProposal() + proposal := helper.MakeSignedProposal() processed := atomic.NewBool(false) s.eh.On("OnReceiveProposal", proposal).Run(func(args mock.Arguments) { processed.Store(true) @@ -200,13 +200,14 @@ func TestEventLoop_Timeout(t *testing.T) { log := zerolog.New(io.Discard) - eventLoop, err := NewEventLoop(log, metrics.NewNoopCollector(), eh, time.Time{}) + metricsCollector := metrics.NewNoopCollector() + eventLoop, err := NewEventLoop(log, metricsCollector, metricsCollector, eh, time.Time{}) require.NoError(t, err) eh.On("TimeoutChannel").Return(time.After(100 * time.Millisecond)) ctx, cancel := context.WithCancel(context.Background()) - signalerCtx, _ := irrecoverable.WithSignaler(ctx) + signalerCtx := irrecoverable.NewMockSignalerContext(t, ctx) eventLoop.Start(signalerCtx) unittest.RequireCloseBefore(t, eventLoop.Ready(), 100*time.Millisecond, "event loop not stopped") @@ -228,7 +229,7 @@ func TestEventLoop_Timeout(t *testing.T) { go func() { defer wg.Done() for !processed.Load() { - eventLoop.SubmitProposal(helper.MakeProposal()) + eventLoop.SubmitProposal(helper.MakeSignedProposal()) } }() @@ -253,24 +254,25 @@ func TestReadyDoneWithStartTime(t *testing.T) { startTimeDuration := 2 * time.Second startTime := time.Now().Add(startTimeDuration) - eventLoop, err := NewEventLoop(log, metrics, eh, startTime) + eventLoop, err := NewEventLoop(log, metrics, metrics, eh, startTime) require.NoError(t, err) done := make(chan struct{}) - eh.On("OnReceiveProposal", mock.AnythingOfType("*model.Proposal")).Run(func(args mock.Arguments) { + eh.On("OnReceiveProposal", mock.AnythingOfType("*model.SignedProposal")).Run(func(args mock.Arguments) { require.True(t, time.Now().After(startTime)) close(done) }).Return(nil).Once() ctx, cancel := context.WithCancel(context.Background()) - signalerCtx, _ := irrecoverable.WithSignaler(ctx) + signalerCtx := irrecoverable.NewMockSignalerContext(t, ctx) eventLoop.Start(signalerCtx) unittest.RequireCloseBefore(t, eventLoop.Ready(), 100*time.Millisecond, "event loop not started") parentBlock := unittest.BlockHeaderFixture() - block := unittest.BlockHeaderWithParentFixture(parentBlock) - eventLoop.SubmitProposal(model.ProposalFromFlow(block)) + header := unittest.BlockHeaderWithParentFixture(parentBlock) + proposal := unittest.ProposalHeaderFromHeader(header) + eventLoop.SubmitProposal(model.SignedProposalFromFlow(proposal)) unittest.RequireCloseBefore(t, done, startTimeDuration+100*time.Millisecond, "proposal wasn't received") cancel() diff --git a/consensus/hotstuff/follower_loop.go b/consensus/hotstuff/follower_loop.go index 026b21edaee..0c82c5e7623 100644 --- a/consensus/hotstuff/follower_loop.go +++ b/consensus/hotstuff/follower_loop.go @@ -10,6 +10,7 @@ import ( "github.com/onflow/flow-go/module" "github.com/onflow/flow-go/module/component" "github.com/onflow/flow-go/module/irrecoverable" + "github.com/onflow/flow-go/module/metrics" "github.com/onflow/flow-go/utils/logging" ) @@ -20,6 +21,7 @@ import ( type FollowerLoop struct { *component.ComponentManager log zerolog.Logger + mempoolMetrics module.MempoolMetrics certifiedBlocks chan *model.CertifiedBlock forks Forks } @@ -28,17 +30,17 @@ var _ component.Component = (*FollowerLoop)(nil) var _ module.HotStuffFollower = (*FollowerLoop)(nil) // NewFollowerLoop creates an instance of HotStuffFollower -func NewFollowerLoop(log zerolog.Logger, forks Forks) (*FollowerLoop, error) { +func NewFollowerLoop(log zerolog.Logger, mempoolMetrics module.MempoolMetrics, forks Forks) (*FollowerLoop, error) { // We can't afford to drop messages since it undermines liveness, but we also want to avoid blocking // the compliance layer. Generally, the follower loop should be able to process inbound blocks faster // than they pass through the compliance layer. Nevertheless, in the worst case we will fill the // channel and block the compliance layer's workers. Though, that should happen only if compliance // engine receives large number of blocks in short periods of time (e.g. when catching up). - // TODO(active-pacemaker) add metrics for length of inbound channels certifiedBlocks := make(chan *model.CertifiedBlock, 1000) fl := &FollowerLoop{ log: log.With().Str("hotstuff", "FollowerLoop").Logger(), + mempoolMetrics: mempoolMetrics, certifiedBlocks: certifiedBlocks, forks: forks, } @@ -76,8 +78,12 @@ func (fl *FollowerLoop) AddCertifiedBlock(certifiedBlock *model.CertifiedBlock) // the busy duration is measured as how long it takes from a block being // received to a block being handled by the event handler. busyDuration := time.Since(received) - fl.log.Debug().Hex("block_id", logging.ID(certifiedBlock.ID())). + + blocksQueued := uint(len(fl.certifiedBlocks)) + fl.mempoolMetrics.MempoolEntries(metrics.ResourceFollowerLoopCertifiedBlocksChannel, blocksQueued) + fl.log.Debug().Hex("block_id", logging.ID(certifiedBlock.BlockID())). Uint64("view", certifiedBlock.View()). + Uint("blocks_queued", blocksQueued). Dur("wait_time", busyDuration). Msg("wait time to queue inbound certified block") } @@ -100,9 +106,9 @@ func (fl *FollowerLoop) loop(ctx irrecoverable.SignalerContext, ready component. case b := <-fl.certifiedBlocks: err := fl.forks.AddCertifiedBlock(b) if err != nil { // all errors are fatal - err = fmt.Errorf("finalization logic failes to process certified block %v: %w", b.ID(), err) + err = fmt.Errorf("finalization logic failes to process certified block %v: %w", b.BlockID(), err) fl.log.Error(). - Hex("block_id", logging.ID(b.ID())). + Hex("block_id", logging.ID(b.BlockID())). Uint64("view", b.View()). Err(err). Msg("irrecoverable follower loop error") diff --git a/consensus/hotstuff/forks.go b/consensus/hotstuff/forks.go index 5940eb35789..0a241326c9d 100644 --- a/consensus/hotstuff/forks.go +++ b/consensus/hotstuff/forks.go @@ -28,7 +28,7 @@ type Forks interface { // GetBlocksForView returns all known blocks for the given view GetBlocksForView(view uint64) []*model.Block - // GetBlock returns (BlockProposal, true) if the block with the specified + // GetBlock returns (*model.Block, true) if the block with the specified // id was found and (nil, false) otherwise. GetBlock(blockID flow.Identifier) (*model.Block, bool) diff --git a/consensus/hotstuff/forks/block_builder_test.go b/consensus/hotstuff/forks/block_builder_test.go index 03daec535c1..49cc14e5cdd 100644 --- a/consensus/hotstuff/forks/block_builder_test.go +++ b/consensus/hotstuff/forks/block_builder_test.go @@ -59,7 +59,7 @@ func (bb *BlockBuilder) GenesisBlock() *model.CertifiedBlock { // AddVersioned adds a block with the given qcView and blockView. // In addition, the version identifier of the QC embedded within the block // is specified by `qcVersion`. The version identifier for the block itself -// (primarily for emulating different payloads) is specified by `blockVersion`. +// (primarily for emulating different block ID) is specified by `blockVersion`. // [(◄3) 4] denotes a block of view 4, with a qc for view 3 // [(◄3) 4'] denotes a block of view 4 that is different than [(◄3) 4], with a qc for view 3 // [(◄3) 4'] can be created by AddVersioned(3, 4, 0, 1) @@ -94,21 +94,18 @@ func (bb *BlockBuilder) Proposals() ([]*model.Proposal, error) { if !ok { return nil, fmt.Errorf("test fail: no qc found for qc index: %v", bv.QCIndex()) } - payloadHash := makePayloadHash(bv.View, qc, bv.BlockVersion) var lastViewTC *flow.TimeoutCertificate if qc.View+1 != bv.View { lastViewTC = helper.MakeTC(helper.WithTCView(bv.View - 1)) } proposal := &model.Proposal{ Block: &model.Block{ - View: bv.View, - QC: qc, - PayloadHash: payloadHash, + View: bv.View, + QC: qc, }, LastViewTC: lastViewTC, - SigData: nil, } - proposal.Block.BlockID = makeBlockID(proposal.Block) + proposal.Block.BlockID = makeBlockID(proposal.Block, bv.BlockVersion) blocks = append(blocks, proposal) @@ -134,36 +131,28 @@ func (bb *BlockBuilder) Blocks() ([]*model.Block, error) { return toBlocks(proposals), nil } -func makePayloadHash(view uint64, qc *flow.QuorumCertificate, blockVersion int) flow.Identifier { +// makeBlockID creates a block identifier based on the block's view, QC, and block version. +// This is used to identify blocks uniquely, in this specific test setup. +// ATTENTION: this should not be confused with the block ID used in production code which is a collision-resistant hash +// of the full block content. +func makeBlockID(block *model.Block, blockVersion int) flow.Identifier { return flow.MakeID(struct { View uint64 QC *flow.QuorumCertificate BlockVersion uint64 }{ - View: view, - QC: qc, + View: block.View, + QC: block.QC, BlockVersion: uint64(blockVersion), }) } -func makeBlockID(block *model.Block) flow.Identifier { - return flow.MakeID(struct { - View uint64 - QC *flow.QuorumCertificate - PayloadHash flow.Identifier - }{ - View: block.View, - QC: block.QC, - PayloadHash: block.PayloadHash, - }) -} - // constructs the genesis block (identical for all calls) func makeGenesis() *model.CertifiedBlock { genesis := &model.Block{ View: 1, } - genesis.BlockID = makeBlockID(genesis) + genesis.BlockID = makeBlockID(genesis, 0) genesisQC := &flow.QuorumCertificate{ View: 1, diff --git a/consensus/hotstuff/forks/forks.go b/consensus/hotstuff/forks/forks.go index aa4db7f9853..13765e84921 100644 --- a/consensus/hotstuff/forks/forks.go +++ b/consensus/hotstuff/forks/forks.go @@ -44,7 +44,7 @@ func New(trustedRoot *model.CertifiedBlock, finalizationCallback module.Finalize // verify and add root block to levelled forest err := forks.EnsureBlockIsValidExtension(trustedRoot.Block) if err != nil { - return nil, fmt.Errorf("invalid root block %v: %w", trustedRoot.ID(), err) + return nil, fmt.Errorf("invalid root block %v: %w", trustedRoot.BlockID(), err) } forks.forest.AddVertex(ToBlockContainer2(trustedRoot.Block)) return &forks, nil @@ -74,7 +74,7 @@ func (f *Forks) FinalityProof() (*hotstuff.FinalityProof, bool) { return f.finalityProof, f.finalityProof != nil } -// GetBlock returns (BlockProposal, true) if the block with the specified +// GetBlock returns (*model.Block, true) if the block with the specified // id was found and (nil, false) otherwise. func (f *Forks) GetBlock(blockID flow.Identifier) (*model.Block, bool) { blockContainer, hasBlock := f.forest.GetVertex(blockID) @@ -401,7 +401,7 @@ func (f *Forks) checkForAdvancingFinalization(certifiedBlock *model.CertifiedBlo parentBlock := parentVertex.(*BlockContainer).Block() // Note: we assume that all stored blocks pass Forks.EnsureBlockIsValidExtension(block); - // specifically, that Proposal's ViewNumber is strictly monotonically + // specifically, that block's ViewNumber is strictly monotonically // increasing which is enforced by LevelledForest.VerifyVertex(...) // We denote: // * a DIRECT 1-chain as '<-' diff --git a/consensus/hotstuff/forks/forks_test.go b/consensus/hotstuff/forks/forks_test.go index 9662533dd0d..c30d192ad50 100644 --- a/consensus/hotstuff/forks/forks_test.go +++ b/consensus/hotstuff/forks/forks_test.go @@ -44,7 +44,7 @@ func TestFinalize_Direct1Chain(t *testing.T) { Add(1, 2). Add(2, 3) blocks, err := builder.Blocks() - require.Nil(t, err) + require.NoError(t, err) t.Run("consensus participant mode: ingest validated blocks", func(t *testing.T) { forks, _ := newForks(t) @@ -88,7 +88,7 @@ func TestFinalize_Direct2Chain(t *testing.T) { Add(2, 3). Add(3, 4). Blocks() - require.Nil(t, err) + require.NoError(t, err) expectedFinalityProof := makeFinalityProof(t, blocks[0], blocks[1], blocks[2].QC) t.Run("consensus participant mode: ingest validated blocks", func(t *testing.T) { @@ -117,7 +117,7 @@ func TestFinalize_DirectIndirect2Chain(t *testing.T) { Add(2, 3). Add(3, 5). Blocks() - require.Nil(t, err) + require.NoError(t, err) expectedFinalityProof := makeFinalityProof(t, blocks[0], blocks[1], blocks[2].QC) t.Run("consensus participant mode: ingest validated blocks", func(t *testing.T) { @@ -146,7 +146,7 @@ func TestFinalize_IndirectDirect2Chain(t *testing.T) { Add(3, 5). Add(5, 7). Blocks() - require.Nil(t, err) + require.NoError(t, err) t.Run("consensus participant mode: ingest validated blocks", func(t *testing.T) { forks, _ := newForks(t) @@ -178,7 +178,7 @@ func TestFinalize_Direct2ChainOnIndirect(t *testing.T) { Add(6, 7). Add(7, 8). Blocks() - require.Nil(t, err) + require.NoError(t, err) expectedFinalityProof := makeFinalityProof(t, blocks[2], blocks[3], blocks[4].QC) t.Run("consensus participant mode: ingest validated blocks", func(t *testing.T) { @@ -209,7 +209,7 @@ func TestFinalize_Direct2ChainOnDirect(t *testing.T) { Add(4, 5). Add(5, 6). Blocks() - require.Nil(t, err) + require.NoError(t, err) expectedFinalityProof := makeFinalityProof(t, blocks[2], blocks[3], blocks[4].QC) t.Run("consensus participant mode: ingest validated blocks", func(t *testing.T) { @@ -240,7 +240,7 @@ func TestFinalize_Multiple2Chains(t *testing.T) { Add(3, 6). Add(3, 7). Blocks() - require.Nil(t, err) + require.NoError(t, err) expectedFinalityProof := makeFinalityProof(t, blocks[0], blocks[1], blocks[2].QC) t.Run("consensus participant mode: ingest validated blocks", func(t *testing.T) { @@ -261,7 +261,7 @@ func TestFinalize_Multiple2Chains(t *testing.T) { } // TestFinalize_OrphanedFork tests that we can finalize a block which causes a conflicting fork to be orphaned. -// We ingest the the following block tree: +// We ingest the following block tree: // // [◄(1) 2] [◄(2) 3] // [◄(2) 4] [◄(4) 5] [◄(5) 6] @@ -275,7 +275,7 @@ func TestFinalize_OrphanedFork(t *testing.T) { Add(4, 5). // [◄(4) 5] Add(5, 6). // [◄(5) 6] Blocks() - require.Nil(t, err) + require.NoError(t, err) expectedFinalityProof := makeFinalityProof(t, blocks[2], blocks[3], blocks[4].QC) t.Run("consensus participant mode: ingest validated blocks", func(t *testing.T) { @@ -311,7 +311,7 @@ func TestDuplication(t *testing.T) { Add(4, 5). Add(4, 5). Blocks() - require.Nil(t, err) + require.NoError(t, err) expectedFinalityProof := makeFinalityProof(t, blocks[1], blocks[3], blocks[5].QC) t.Run("consensus participant mode: ingest validated blocks", func(t *testing.T) { @@ -341,7 +341,7 @@ func TestIgnoreBlocksBelowFinalizedView(t *testing.T) { Add(3, 4). // [◄(3) 4] Add(1, 5) // [◄(1) 5] blocks, err := builder.Blocks() - require.Nil(t, err) + require.NoError(t, err) expectedFinalityProof := makeFinalityProof(t, blocks[0], blocks[1], blocks[2].QC) t.Run("consensus participant mode: ingest validated blocks", func(t *testing.T) { @@ -354,7 +354,7 @@ func TestIgnoreBlocksBelowFinalizedView(t *testing.T) { // sanity checks to confirm correct test setup requireLatestFinalizedBlock(t, forks, blocks[0]) requireFinalityProof(t, forks, expectedFinalityProof) - require.False(t, forks.IsKnownBlock(builder.GenesisBlock().ID())) + require.False(t, forks.IsKnownBlock(builder.GenesisBlock().BlockID())) // adding block [◄(1) 5]: note that QC is _below_ the pruning threshold, i.e. cannot resolve the parent // * Forks should store block, despite the parent already being pruned @@ -375,7 +375,7 @@ func TestIgnoreBlocksBelowFinalizedView(t *testing.T) { // sanity checks to confirm correct test setup requireLatestFinalizedBlock(t, forks, blocks[0]) requireFinalityProof(t, forks, expectedFinalityProof) - require.False(t, forks.IsKnownBlock(builder.GenesisBlock().ID())) + require.False(t, forks.IsKnownBlock(builder.GenesisBlock().BlockID())) // adding block [◄(1) 5]: note that QC is _below_ the pruning threshold, i.e. cannot resolve the parent // * Forks should store block, despite the parent already being pruned @@ -389,7 +389,7 @@ func TestIgnoreBlocksBelowFinalizedView(t *testing.T) { } // TestDoubleProposal tests that the DoubleProposal notification is emitted when two different -// blocks for the same view are added. We ingest the the following block tree: +// blocks for the same view are added. We ingest the following block tree: // // / [◄(1) 2] // [1] @@ -401,14 +401,14 @@ func TestDoubleProposal(t *testing.T) { Add(1, 2). // [◄(1) 2] AddVersioned(1, 2, 0, 1). // [◄(1) 2'] Blocks() - require.Nil(t, err) + require.NoError(t, err) t.Run("consensus participant mode: ingest validated blocks", func(t *testing.T) { forks, notifier := newForks(t) notifier.On("OnDoubleProposeDetected", blocks[1], blocks[0]).Once() err = addValidatedBlockToForks(forks, blocks) - require.Nil(t, err) + require.NoError(t, err) }) t.Run("consensus follower mode: ingest certified blocks", func(t *testing.T) { @@ -416,9 +416,9 @@ func TestDoubleProposal(t *testing.T) { notifier.On("OnDoubleProposeDetected", blocks[1], blocks[0]).Once() err = forks.AddCertifiedBlock(toCertifiedBlock(t, blocks[0])) // add [◄(1) 2] as certified block - require.Nil(t, err) + require.NoError(t, err) err = forks.AddCertifiedBlock(toCertifiedBlock(t, blocks[1])) // add [◄(1) 2'] as certified block - require.Nil(t, err) + require.NoError(t, err) }) } @@ -438,7 +438,7 @@ func TestConflictingQCs(t *testing.T) { Add(4, 6). // [◄(4) 6] AddVersioned(3, 5, 1, 0). // [◄(3') 5] Blocks() - require.Nil(t, err) + require.NoError(t, err) t.Run("consensus participant mode: ingest validated blocks", func(t *testing.T) { forks, notifier := newForks(t) @@ -460,7 +460,7 @@ func TestConflictingQCs(t *testing.T) { } // TestConflictingFinalizedForks checks that finalizing 2 conflicting forks should return model.ByzantineThresholdExceededError -// We ingest the the following block tree: +// We ingest the following block tree: // // [◄(1) 2] [◄(2) 3] [◄(3) 4] [◄(4) 5] // [◄(2) 6] [◄(6) 7] [◄(7) 8] @@ -477,7 +477,7 @@ func TestConflictingFinalizedForks(t *testing.T) { Add(6, 7). Add(7, 8). // finalizes [◄(2) 6], conflicting with conflicts with [◄(2) 3] Blocks() - require.Nil(t, err) + require.NoError(t, err) t.Run("consensus participant mode: ingest validated blocks", func(t *testing.T) { forks, _ := newForks(t) @@ -502,7 +502,7 @@ func TestAddDisconnectedBlock(t *testing.T) { Add(1, 2). // we will skip this block [◄(1) 2] Add(2, 3). // [◄(2) 3] Blocks() - require.Nil(t, err) + require.NoError(t, err) t.Run("consensus participant mode: ingest validated blocks", func(t *testing.T) { forks, _ := newForks(t) @@ -530,7 +530,7 @@ func TestGetBlock(t *testing.T) { Add(3, 4). // [◄(3) 4] Add(4, 5). // [◄(4) 5] Blocks() - require.Nil(t, err) + require.NoError(t, err) t.Run("consensus participant mode: ingest validated blocks", func(t *testing.T) { blocksAddedFirst := blocks[:3] // [◄(1) 2] [◄(2) 3] [◄(3) 4] @@ -543,7 +543,7 @@ func TestGetBlock(t *testing.T) { // add first 3 blocks - should finalize [◄(1) 2] err = addValidatedBlockToForks(forks, blocksAddedFirst) - require.Nil(t, err) + require.NoError(t, err) // should be able to retrieve all stored blocks for _, block := range blocksAddedFirst { @@ -581,9 +581,9 @@ func TestGetBlock(t *testing.T) { // add first blocks - should finalize [◄(1) 2] err := forks.AddCertifiedBlock(blocksAddedFirst[0]) - require.Nil(t, err) + require.NoError(t, err) err = forks.AddCertifiedBlock(blocksAddedFirst[1]) - require.Nil(t, err) + require.NoError(t, err) // should be able to retrieve all stored blocks for _, block := range blocksAddedFirst { @@ -620,14 +620,14 @@ func TestGetBlocksForView(t *testing.T) { Add(2, 4). // [◄(2) 4] AddVersioned(2, 4, 0, 1). // [◄(2) 4'] Blocks() - require.Nil(t, err) + require.NoError(t, err) t.Run("consensus participant mode: ingest validated blocks", func(t *testing.T) { forks, notifier := newForks(t) notifier.On("OnDoubleProposeDetected", blocks[2], blocks[1]).Once() err = addValidatedBlockToForks(forks, blocks) - require.Nil(t, err) + require.NoError(t, err) // expect 1 block at view 2 storedBlocks := forks.GetBlocksForView(2) @@ -649,11 +649,11 @@ func TestGetBlocksForView(t *testing.T) { notifier.On("OnDoubleProposeDetected", blocks[2], blocks[1]).Once() err := forks.AddCertifiedBlock(toCertifiedBlock(t, blocks[0])) - require.Nil(t, err) + require.NoError(t, err) err = forks.AddCertifiedBlock(toCertifiedBlock(t, blocks[1])) - require.Nil(t, err) + require.NoError(t, err) err = forks.AddCertifiedBlock(toCertifiedBlock(t, blocks[2])) - require.Nil(t, err) + require.NoError(t, err) // expect 1 block at view 2 storedBlocks := forks.GetBlocksForView(2) @@ -683,7 +683,7 @@ func TestNotifications(t *testing.T) { Add(2, 3). Add(3, 4) blocks, err := builder.Blocks() - require.Nil(t, err) + require.NoError(t, err) t.Run("consensus participant mode: ingest validated blocks", func(t *testing.T) { notifier := &mocks.Consumer{} @@ -736,7 +736,7 @@ func TestFinalizingMultipleBlocks(t *testing.T) { Add(11, 12). // index 4: [◄(11) 12] Add(12, 22) // index 5: [◄(12) 22] blocks, err := builder.Blocks() - require.Nil(t, err) + require.NoError(t, err) // The Finality Proof should right away point to the _latest_ finalized block. Subsequently emitting // Finalization events for lower blocks is fine, because notifications are guaranteed to be @@ -791,7 +791,7 @@ func TestFinalizingMultipleBlocks(t *testing.T) { t.Run("consensus participant mode: ingest validated blocks", func(t *testing.T) { forks, finalizationCallback, notifier := setupForksAndAssertions() err = addValidatedBlockToForks(forks, blocks[:5]) // adding [◄(1) 2] [◄(2) 4] [◄(4) 6] [◄(6) 11] [◄(11) 12] - require.Nil(t, err) + require.NoError(t, err) requireOnlyGenesisBlockFinalized(t, forks) // finalization should still be at the genesis block require.NoError(t, forks.AddValidatedBlock(blocks[5])) // adding [◄(12) 22] should trigger finalization events @@ -807,7 +807,7 @@ func TestFinalizingMultipleBlocks(t *testing.T) { require.NoError(t, forks.AddCertifiedBlock(toCertifiedBlock(t, blocks[1]))) require.NoError(t, forks.AddCertifiedBlock(toCertifiedBlock(t, blocks[2]))) require.NoError(t, forks.AddCertifiedBlock(toCertifiedBlock(t, blocks[3]))) - require.Nil(t, err) + require.NoError(t, err) requireOnlyGenesisBlockFinalized(t, forks) // finalization should still be at the genesis block // adding certified block [◄(11) 12] ◄(12) should trigger finalization events @@ -831,7 +831,7 @@ func newForks(t *testing.T) (*Forks, *mocks.Consumer) { forks, err := New(genesisBQ, finalizationCallback, notifier) - require.Nil(t, err) + require.NoError(t, err) return forks, notifier } @@ -914,7 +914,7 @@ func toCertifiedBlock(t *testing.T, block *model.Block) *model.CertifiedBlock { BlockID: block.BlockID, } cb, err := model.NewCertifiedBlock(block, qc) - require.Nil(t, err) + require.NoError(t, err) return &cb } diff --git a/consensus/hotstuff/helper/block.go b/consensus/hotstuff/helper/block.go index a3fa6f6e2e7..e4461c904cc 100644 --- a/consensus/hotstuff/helper/block.go +++ b/consensus/hotstuff/helper/block.go @@ -12,12 +12,11 @@ import ( func MakeBlock(options ...func(*model.Block)) *model.Block { view := rand.Uint64() block := model.Block{ - View: view, - BlockID: unittest.IdentifierFixture(), - PayloadHash: unittest.IdentifierFixture(), - ProposerID: unittest.IdentifierFixture(), - Timestamp: time.Now().UTC(), - QC: MakeQC(WithQCView(view - 1)), + View: view, + BlockID: unittest.IdentifierFixture(), + ProposerID: unittest.IdentifierFixture(), + Timestamp: uint64(time.Now().UnixMilli()), + QC: MakeQC(WithQCView(view - 1)), } for _, option := range options { option(&block) @@ -56,10 +55,21 @@ func WithBlockQC(qc *flow.QuorumCertificate) func(*model.Block) { } } +func MakeSignedProposal(options ...func(*model.SignedProposal)) *model.SignedProposal { + proposal := &model.SignedProposal{ + Proposal: *MakeProposal(), + SigData: unittest.SignatureFixture(), + } + for _, option := range options { + option(proposal) + } + return proposal +} + func MakeProposal(options ...func(*model.Proposal)) *model.Proposal { proposal := &model.Proposal{ - Block: MakeBlock(), - SigData: unittest.SignatureFixture(), + Block: MakeBlock(), + LastViewTC: nil, } for _, option := range options { option(proposal) @@ -67,14 +77,20 @@ func MakeProposal(options ...func(*model.Proposal)) *model.Proposal { return proposal } +func WithProposal(proposal *model.Proposal) func(*model.SignedProposal) { + return func(signedProposal *model.SignedProposal) { + signedProposal.Proposal = *proposal + } +} + func WithBlock(block *model.Block) func(*model.Proposal) { return func(proposal *model.Proposal) { proposal.Block = block } } -func WithSigData(sigData []byte) func(*model.Proposal) { - return func(proposal *model.Proposal) { +func WithSigData(sigData []byte) func(*model.SignedProposal) { + return func(proposal *model.SignedProposal) { proposal.SigData = sigData } } @@ -84,3 +100,31 @@ func WithLastViewTC(lastViewTC *flow.TimeoutCertificate) func(*model.Proposal) { proposal.LastViewTC = lastViewTC } } + +// SignedProposalToFlow turns a HotStuff block proposal into a flow block proposal. +// +// CAUTION: This function is only suitable for TESTING purposes ONLY. +// In the conversion from `flow.Header` to HotStuff's `model.Block` we lose information +// (e.g. `ChainID` and `Height` are not included in `model.Block`) and hence the conversion +// is *not reversible*. This is on purpose, because we wanted to only expose data to +// HotStuff that HotStuff really needs. +func SignedProposalToFlow(proposal *model.SignedProposal) *flow.ProposalHeader { + block := proposal.Block + header := &flow.Header{ + HeaderBody: flow.HeaderBody{ + ParentID: block.QC.BlockID, + Timestamp: block.Timestamp, + View: block.View, + ParentView: block.QC.View, + ParentVoterIndices: block.QC.SignerIndices, + ParentVoterSigData: block.QC.SigData, + ProposerID: block.ProposerID, + LastViewTC: proposal.LastViewTC, + }, + } + + return &flow.ProposalHeader{ + Header: header, + ProposerSigData: proposal.SigData, + } +} diff --git a/consensus/hotstuff/helper/bls_key.go b/consensus/hotstuff/helper/bls_key.go index e455be5b296..215a5c87e28 100644 --- a/consensus/hotstuff/helper/bls_key.go +++ b/consensus/hotstuff/helper/bls_key.go @@ -4,9 +4,8 @@ import ( "crypto/rand" "testing" + "github.com/onflow/crypto" "github.com/stretchr/testify/require" - - "github.com/onflow/flow-go/crypto" ) func MakeBLSKey(t *testing.T) crypto.PrivateKey { diff --git a/consensus/hotstuff/helper/signature.go b/consensus/hotstuff/helper/signature.go index 01fef1a77e4..9c3e4382e79 100644 --- a/consensus/hotstuff/helper/signature.go +++ b/consensus/hotstuff/helper/signature.go @@ -4,8 +4,9 @@ import ( "github.com/stretchr/testify/mock" "go.uber.org/atomic" + "github.com/onflow/crypto" + "github.com/onflow/flow-go/consensus/hotstuff/mocks" - "github.com/onflow/flow-go/crypto" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/utils/unittest" ) diff --git a/consensus/hotstuff/helper/timeout_certificate.go b/consensus/hotstuff/helper/timeout_certificate.go index 42ffe64d8d9..b214e2b4b64 100644 --- a/consensus/hotstuff/helper/timeout_certificate.go +++ b/consensus/hotstuff/helper/timeout_certificate.go @@ -9,16 +9,17 @@ import ( ) func MakeTC(options ...func(*flow.TimeoutCertificate)) *flow.TimeoutCertificate { - qc := MakeQC() + tcView := rand.Uint64() + qc := MakeQC(WithQCView(tcView - 1)) signerIndices := unittest.SignerIndicesFixture(3) highQCViews := make([]uint64, 3) for i := range highQCViews { highQCViews[i] = qc.View } tc := flow.TimeoutCertificate{ - View: rand.Uint64(), + View: tcView, NewestQC: qc, - NewestQCViews: []uint64{qc.View}, + NewestQCViews: highQCViews, SignerIndices: signerIndices, SigData: unittest.SignatureFixture(), } @@ -54,12 +55,18 @@ func WithTCHighQCViews(highQCViews []uint64) func(*flow.TimeoutCertificate) { } func TimeoutObjectFixture(opts ...func(TimeoutObject *hotstuff.TimeoutObject)) *hotstuff.TimeoutObject { + timeoutView := uint64(rand.Uint32()) + newestQC := MakeQC(WithQCView(timeoutView - 10)) + timeout := &hotstuff.TimeoutObject{ - View: uint64(rand.Uint32()), - NewestQC: MakeQC(), - LastViewTC: MakeTC(), - SignerID: unittest.IdentifierFixture(), - SigData: unittest.RandomBytes(128), + View: timeoutView, + NewestQC: newestQC, + LastViewTC: MakeTC( + WithTCView(timeoutView-1), + WithTCNewestQC(MakeQC(WithQCView(newestQC.View))), + ), + SignerID: unittest.IdentifierFixture(), + SigData: unittest.RandomBytes(128), } for _, opt := range opts { @@ -70,8 +77,8 @@ func TimeoutObjectFixture(opts ...func(TimeoutObject *hotstuff.TimeoutObject)) * } func WithTimeoutObjectSignerID(signerID flow.Identifier) func(*hotstuff.TimeoutObject) { - return func(TimeoutObject *hotstuff.TimeoutObject) { - TimeoutObject.SignerID = signerID + return func(timeout *hotstuff.TimeoutObject) { + timeout.SignerID = signerID } } @@ -88,7 +95,7 @@ func WithTimeoutLastViewTC(lastViewTC *flow.TimeoutCertificate) func(*hotstuff.T } func WithTimeoutObjectView(view uint64) func(*hotstuff.TimeoutObject) { - return func(TimeoutObject *hotstuff.TimeoutObject) { - TimeoutObject.View = view + return func(timeout *hotstuff.TimeoutObject) { + timeout.View = view } } diff --git a/consensus/hotstuff/integration/connect_test.go b/consensus/hotstuff/integration/connect_test.go index a254e0f9f3c..cb5f1f33b2d 100644 --- a/consensus/hotstuff/integration/connect_test.go +++ b/consensus/hotstuff/integration/connect_test.go @@ -25,25 +25,25 @@ func Connect(t *testing.T, instances []*Instance) { *sender.notifier = *NewMockedCommunicatorConsumer() sender.notifier.On("OnOwnProposal", mock.Anything, mock.Anything).Run( func(args mock.Arguments) { - header, ok := args[0].(*flow.Header) + proposal, ok := args[0].(*flow.ProposalHeader) require.True(t, ok) // sender should always have the parent sender.updatingBlocks.RLock() - _, exists := sender.headers[header.ParentID] + _, exists := sender.headers[proposal.Header.ParentID] sender.updatingBlocks.RUnlock() if !exists { - t.Fatalf("parent for proposal not found (sender: %x, parent: %x)", sender.localID, header.ParentID) + t.Fatalf("parent for proposal not found (sender: %x, parent: %x)", sender.localID, proposal.Header.ParentID) } // convert into proposal immediately - proposal := model.ProposalFromFlow(header) + hotstuffProposal := model.SignedProposalFromFlow(proposal) // store locally and loop back to engine for processing - sender.ProcessBlock(proposal) + sender.ProcessBlock(hotstuffProposal) // check if we should block the outgoing proposal - if sender.blockPropOut(proposal) { + if sender.blockPropOut(hotstuffProposal) { return } @@ -56,27 +56,20 @@ func Connect(t *testing.T, instances []*Instance) { } // check if we should block the incoming proposal - if receiver.blockPropIn(proposal) { + if receiver.blockPropIn(hotstuffProposal) { continue } - receiver.ProcessBlock(proposal) + receiver.ProcessBlock(hotstuffProposal) } }, ) - sender.notifier.On("OnOwnVote", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Run( + sender.notifier.On("OnOwnVote", mock.Anything, mock.Anything).Run( func(args mock.Arguments) { - blockID, ok := args[0].(flow.Identifier) + vote, ok := args[0].(*model.Vote) require.True(t, ok) - view, ok := args[1].(uint64) + recipientID, ok := args[1].(flow.Identifier) require.True(t, ok) - sigData, ok := args[2].([]byte) - require.True(t, ok) - recipientID, ok := args[3].(flow.Identifier) - require.True(t, ok) - // convert into vote - vote := model.VoteFromFlow(sender.localID, blockID, view, sigData) - // get the receiver receiver, exists := lookup[recipientID] if !exists { diff --git a/consensus/hotstuff/integration/defaults_test.go b/consensus/hotstuff/integration/defaults_test.go index 925fd3d2ec1..6cea7caf577 100644 --- a/consensus/hotstuff/integration/defaults_test.go +++ b/consensus/hotstuff/integration/defaults_test.go @@ -9,11 +9,13 @@ import ( func DefaultRoot() *flow.Header { header := &flow.Header{ - ChainID: "chain", - ParentID: flow.ZeroID, - Height: 0, + HeaderBody: flow.HeaderBody{ + ChainID: "chain", + ParentID: flow.ZeroID, + Height: 0, + Timestamp: uint64(time.Now().UnixMilli()), + }, PayloadHash: unittest.IdentifierFixture(), - Timestamp: time.Now().UTC(), } return header } diff --git a/consensus/hotstuff/integration/filters_test.go b/consensus/hotstuff/integration/filters_test.go index 8d6ac067f48..8bf12fc6d3b 100644 --- a/consensus/hotstuff/integration/filters_test.go +++ b/consensus/hotstuff/integration/filters_test.go @@ -8,7 +8,7 @@ import ( ) // VoteFilter is a filter function for dropping Votes. -// Return value `true` implies that the the given Vote should be +// Return value `true` implies that the given Vote should be // dropped, while `false` indicates that the Vote should be received. type VoteFilter func(*model.Vote) bool @@ -34,34 +34,34 @@ func BlockVotesBy(voterID flow.Identifier) VoteFilter { } // ProposalFilter is a filter function for dropping Proposals. -// Return value `true` implies that the the given Proposal should be -// dropped, while `false` indicates that the Proposal should be received. -type ProposalFilter func(*model.Proposal) bool +// Return value `true` implies that the given SignedProposal should be +// dropped, while `false` indicates that the SignedProposal should be received. +type ProposalFilter func(*model.SignedProposal) bool -func BlockNoProposals(*model.Proposal) bool { +func BlockNoProposals(*model.SignedProposal) bool { return false } -func BlockAllProposals(*model.Proposal) bool { +func BlockAllProposals(*model.SignedProposal) bool { return true } // BlockProposalRandomly drops proposals randomly with a probability of `dropProbability` ∈ [0,1] func BlockProposalRandomly(dropProbability float64) ProposalFilter { - return func(*model.Proposal) bool { + return func(*model.SignedProposal) bool { return rand.Float64() < dropProbability } } // BlockProposalsBy drops all proposals originating from the specified `proposerID` func BlockProposalsBy(proposerID flow.Identifier) ProposalFilter { - return func(proposal *model.Proposal) bool { + return func(proposal *model.SignedProposal) bool { return proposal.Block.ProposerID == proposerID } } // TimeoutObjectFilter is a filter function for dropping TimeoutObjects. -// Return value `true` implies that the the given TimeoutObject should be +// Return value `true` implies that the given TimeoutObject should be // dropped, while `false` indicates that the TimeoutObject should be received. type TimeoutObjectFilter func(*model.TimeoutObject) bool diff --git a/consensus/hotstuff/integration/instance_test.go b/consensus/hotstuff/integration/instance_test.go index 469fe252d2a..3c6c9153f1c 100644 --- a/consensus/hotstuff/integration/instance_test.go +++ b/consensus/hotstuff/integration/instance_test.go @@ -8,6 +8,7 @@ import ( "time" "github.com/gammazero/workerpool" + "github.com/onflow/crypto" "github.com/rs/zerolog" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" @@ -31,9 +32,8 @@ import ( "github.com/onflow/flow-go/consensus/hotstuff/validator" "github.com/onflow/flow-go/consensus/hotstuff/voteaggregator" "github.com/onflow/flow-go/consensus/hotstuff/votecollector" - "github.com/onflow/flow-go/crypto" - "github.com/onflow/flow-go/engine/consensus/sealing/counters" "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module/counters" "github.com/onflow/flow-go/module/irrecoverable" "github.com/onflow/flow-go/module/metrics" module "github.com/onflow/flow-go/module/mock" @@ -59,7 +59,7 @@ type Instance struct { queue chan interface{} updatingBlocks sync.RWMutex headers map[flow.Identifier]*flow.Header - pendings map[flow.Identifier]*model.Proposal // indexed by parent ID + pendings map[flow.Identifier]*model.SignedProposal // indexed by parent ID // mocked dependencies committee *mocks.DynamicCommittee @@ -151,7 +151,7 @@ func NewInstance(t *testing.T, options ...Option) *Instance { stop: cfg.StopCondition, // instance data - pendings: make(map[flow.Identifier]*model.Proposal), + pendings: make(map[flow.Identifier]*model.SignedProposal), headers: make(map[flow.Identifier]*flow.Header), queue: make(chan interface{}, 1024), @@ -170,14 +170,14 @@ func NewInstance(t *testing.T, options ...Option) *Instance { // program the hotstuff committee state in.committee.On("IdentitiesByEpoch", mock.Anything).Return( - func(_ uint64) flow.IdentityList { - return in.participants + func(_ uint64) flow.IdentitySkeletonList { + return in.participants.ToSkeleton() }, nil, ) for _, participant := range in.participants { in.committee.On("IdentityByBlock", mock.Anything, participant.NodeID).Return(participant, nil) - in.committee.On("IdentityByEpoch", mock.Anything, participant.NodeID).Return(participant, nil) + in.committee.On("IdentityByEpoch", mock.Anything, participant.NodeID).Return(&participant.IdentitySkeleton, nil) } in.committee.On("Self").Return(in.localID) in.committee.On("LeaderForView", mock.Anything).Return( @@ -185,12 +185,12 @@ func NewInstance(t *testing.T, options ...Option) *Instance { return in.participants[int(view)%len(in.participants)].NodeID }, nil, ) - in.committee.On("QuorumThresholdForView", mock.Anything).Return(committees.WeightThresholdToBuildQC(in.participants.TotalWeight()), nil) - in.committee.On("TimeoutThresholdForView", mock.Anything).Return(committees.WeightThresholdToTimeout(in.participants.TotalWeight()), nil) + in.committee.On("QuorumThresholdForView", mock.Anything).Return(committees.WeightThresholdToBuildQC(in.participants.ToSkeleton().TotalWeight()), nil) + in.committee.On("TimeoutThresholdForView", mock.Anything).Return(committees.WeightThresholdToTimeout(in.participants.ToSkeleton().TotalWeight()), nil) // program the builder module behaviour - in.builder.On("BuildOn", mock.Anything, mock.Anything).Return( - func(parentID flow.Identifier, setter func(*flow.Header) error) *flow.Header { + in.builder.On("BuildOn", mock.Anything, mock.Anything, mock.Anything).Return( + func(parentID flow.Identifier, setter func(builder *flow.HeaderBodyBuilder) error, sign func(*flow.Header) ([]byte, error)) *flow.ProposalHeader { in.updatingBlocks.Lock() defer in.updatingBlocks.Unlock() @@ -198,19 +198,30 @@ func NewInstance(t *testing.T, options ...Option) *Instance { if !ok { return nil } - header := &flow.Header{ - ChainID: "chain", - ParentID: parentID, - ParentView: parent.View, - Height: parent.Height + 1, + headerBuilder := flow.NewHeaderBodyBuilder(). + WithChainID("chain"). + WithParentID(parentID). + WithParentView(parent.View). + WithHeight(parent.Height + 1). + WithTimestamp(uint64(time.Now().UnixMilli())) + require.NoError(t, setter(headerBuilder)) + headerBody, err := headerBuilder.Build() + require.NoError(t, err) + header, err := flow.NewHeader(flow.UntrustedHeader{ + HeaderBody: *headerBody, PayloadHash: unittest.IdentifierFixture(), - Timestamp: time.Now().UTC(), + }) + require.NoError(t, err) + sig, err := sign(header) + require.NoError(t, err) + proposal := &flow.ProposalHeader{ + Header: header, + ProposerSigData: sig, } - require.NoError(t, setter(header)) in.headers[header.ID()] = header - return header + return proposal }, - func(parentID flow.Identifier, setter func(*flow.Header) error) error { + func(parentID flow.Identifier, _ func(*flow.HeaderBodyBuilder) error, _ func(*flow.Header) ([]byte, error)) error { in.updatingBlocks.RLock() _, ok := in.headers[parentID] in.updatingBlocks.RUnlock() @@ -226,16 +237,6 @@ func NewInstance(t *testing.T, options ...Option) *Instance { in.persist.On("PutLivenessData", mock.Anything).Return(nil) // program the hotstuff signer behaviour - in.signer.On("CreateProposal", mock.Anything).Return( - func(block *model.Block) *model.Proposal { - proposal := &model.Proposal{ - Block: block, - SigData: nil, - } - return proposal - }, - nil, - ) in.signer.On("CreateVote", mock.Anything).Return( func(block *model.Block) *model.Vote { vote := &model.Vote{ @@ -275,7 +276,7 @@ func NewInstance(t *testing.T, options ...Option) *Instance { View: votes[0].View, BlockID: votes[0].BlockID, SignerIndices: signerIndices, - SigData: nil, + SigData: unittest.RandomBytes(msig.SigLen), } return qc }, @@ -290,23 +291,23 @@ func NewInstance(t *testing.T, options ...Option) *Instance { // program the hotstuff communicator behaviour in.notifier.On("OnOwnProposal", mock.Anything, mock.Anything).Run( func(args mock.Arguments) { - header, ok := args[0].(*flow.Header) + proposal, ok := args[0].(*flow.ProposalHeader) require.True(t, ok) // sender should always have the parent in.updatingBlocks.RLock() - _, exists := in.headers[header.ParentID] + _, exists := in.headers[proposal.Header.ParentID] in.updatingBlocks.RUnlock() if !exists { - t.Fatalf("parent for proposal not found parent: %x", header.ParentID) + t.Fatalf("parent for proposal not found parent: %x", proposal.Header.ParentID) } // convert into proposal immediately - proposal := model.ProposalFromFlow(header) + hotstuffProposal := model.SignedProposalFromFlow(proposal) // store locally and loop back to engine for processing - in.ProcessBlock(proposal) + in.ProcessBlock(hotstuffProposal) }, ) in.notifier.On("OnOwnTimeout", mock.Anything).Run(func(args mock.Arguments) { @@ -317,13 +318,9 @@ func NewInstance(t *testing.T, options ...Option) *Instance { ) // in case of single node setup we should just forward vote to our own node // for multi-node setup this method will be overridden - in.notifier.On("OnOwnVote", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Run(func(args mock.Arguments) { - in.queue <- &model.Vote{ - View: args[1].(uint64), - BlockID: args[0].(flow.Identifier), - SignerID: in.localID, - SigData: args[2].([]byte), - } + in.notifier.On("OnOwnVote", mock.Anything, mock.Anything).Run(func(args mock.Arguments) { + vote := args[1].(*model.Vote) + in.queue <- vote }) // program the finalizer module behaviour @@ -364,12 +361,13 @@ func NewInstance(t *testing.T, options ...Option) *Instance { notifier.AddConsumer(logConsumer) notifier.AddConsumer(in.notifier) - // initialize the block producer - in.producer, err = blockproducer.New(in.signer, in.committee, in.builder) - require.NoError(t, err) - // initialize the finalizer - rootBlock := model.BlockFromFlow(cfg.Root) + var rootBlock *model.Block + if cfg.Root.ContainsParentQC() { + rootBlock = model.BlockFromFlow(cfg.Root) + } else { + rootBlock = model.GenesisBlockFromFlow(cfg.Root) + } signerIndices, err := msig.EncodeSignersToIndices(in.participants.NodeIDs(), in.participants.NodeIDs()) require.NoError(t, err, "could not encode signer indices") @@ -378,6 +376,7 @@ func NewInstance(t *testing.T, options ...Option) *Instance { View: rootBlock.View, BlockID: rootBlock.BlockID, SignerIndices: signerIndices, + SigData: unittest.RandomBytes(msig.SigLen), } certifiedRootBlock, err := model.NewCertifiedBlock(rootBlock, rootQC) require.NoError(t, err) @@ -391,7 +390,7 @@ func NewInstance(t *testing.T, options ...Option) *Instance { // initialize the pacemaker controller := timeout.NewController(cfg.Timeouts) - in.pacemaker, err = pacemaker.New(controller, notifier, in.persist) + in.pacemaker, err = pacemaker.New(controller, pacemaker.NoProposalDelay(), notifier, in.persist) require.NoError(t, err) // initialize the forks handler @@ -413,23 +412,32 @@ func NewInstance(t *testing.T, options ...Option) *Instance { in.queue <- qc } - minRequiredWeight := committees.WeightThresholdToBuildQC(uint64(in.participants.Count()) * weight) + minRequiredWeight := committees.WeightThresholdToBuildQC(uint64(len(in.participants)) * weight) voteProcessorFactory := mocks.NewVoteProcessorFactory(t) voteProcessorFactory.On("Create", mock.Anything, mock.Anything).Return( - func(log zerolog.Logger, proposal *model.Proposal) hotstuff.VerifyingVoteProcessor { + func(log zerolog.Logger, proposal *model.SignedProposal) hotstuff.VerifyingVoteProcessor { stakingSigAggtor := helper.MakeWeightedSignatureAggregator(weight) stakingSigAggtor.On("Verify", mock.Anything, mock.Anything).Return(nil).Maybe() - rbRector := helper.MakeRandomBeaconReconstructor(msig.RandomBeaconThreshold(int(in.participants.Count()))) + rbRector := helper.MakeRandomBeaconReconstructor(msig.RandomBeaconThreshold(len(in.participants))) rbRector.On("Verify", mock.Anything, mock.Anything).Return(nil).Maybe() - return votecollector.NewCombinedVoteProcessor( + processor := votecollector.NewCombinedVoteProcessor( log, proposal.Block, stakingSigAggtor, rbRector, onQCCreated, packer, minRequiredWeight, ) + + vote, err := proposal.ProposerVote() + require.NoError(t, err) + + err = processor.Process(vote) + if err != nil { + t.Fatalf("invalid vote for own proposal: %v", err) + } + return processor }, nil).Maybe() voteAggregationDistributor := pubsub.NewVoteAggregationDistributor() @@ -458,7 +466,7 @@ func NewInstance(t *testing.T, options ...Option) *Instance { // mock signature aggregator which doesn't perform any crypto operations and just tracks total weight aggregator := &mocks.TimeoutSignatureAggregator{} totalWeight := atomic.NewUint64(0) - newestView := counters.NewMonotonousCounter(0) + newestView := counters.NewMonotonicCounter(0) aggregator.On("View").Return(view).Maybe() aggregator.On("TotalWeight").Return(func() uint64 { return totalWeight.Load() @@ -468,7 +476,7 @@ func NewInstance(t *testing.T, options ...Option) *Instance { newestView.Set(newestQCView) identity, ok := in.participants.ByNodeID(signerID) require.True(t, ok) - return totalWeight.Add(identity.Weight) + return totalWeight.Add(identity.InitialWeight) }, nil, ).Maybe() aggregator.On("Aggregate", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return( @@ -502,7 +510,12 @@ func NewInstance(t *testing.T, options ...Option) *Instance { timeoutAggregationDistributor, timeoutProcessorFactory, ) - timeoutCollectors := timeoutaggregator.NewTimeoutCollectors(log, livenessData.CurrentView, timeoutCollectorFactory) + timeoutCollectors := timeoutaggregator.NewTimeoutCollectors( + log, + metricsCollector, + livenessData.CurrentView, + timeoutCollectorFactory, + ) // initialize the timeout aggregator in.timeoutAggregator, err = timeoutaggregator.NewTimeoutAggregator( @@ -525,6 +538,10 @@ func NewInstance(t *testing.T, options ...Option) *Instance { in.safetyRules, err = safetyrules.New(in.signer, in.persist, in.committee) require.NoError(t, err) + // initialize the block producer + in.producer, err = blockproducer.New(in.safetyRules, in.committee, in.builder) + require.NoError(t, err) + // initialize the event handler in.handler, err = eventhandler.NewEventHandler( log, @@ -546,13 +563,13 @@ func NewInstance(t *testing.T, options ...Option) *Instance { return &in } -func (in *Instance) Run() error { +func (in *Instance) Run(t *testing.T) error { ctx, cancel := context.WithCancel(context.Background()) defer func() { cancel() <-util.AllDone(in.voteAggregator, in.timeoutAggregator) }() - signalerCtx, _ := irrecoverable.WithSignaler(ctx) + signalerCtx := irrecoverable.NewMockSignalerContext(t, ctx) in.voteAggregator.Start(signalerCtx) in.timeoutAggregator.Start(signalerCtx) <-util.AllReady(in.voteAggregator, in.timeoutAggregator) @@ -595,7 +612,7 @@ func (in *Instance) Run() error { } case msg := <-in.queue: switch m := msg.(type) { - case *model.Proposal: + case *model.SignedProposal: // add block to aggregator in.voteAggregator.AddBlock(m) // then pass to event handler @@ -627,7 +644,7 @@ func (in *Instance) Run() error { } } -func (in *Instance) ProcessBlock(proposal *model.Proposal) { +func (in *Instance) ProcessBlock(proposal *model.SignedProposal) { in.updatingBlocks.Lock() defer in.updatingBlocks.Unlock() _, parentExists := in.headers[proposal.Block.QC.BlockID] @@ -635,7 +652,7 @@ func (in *Instance) ProcessBlock(proposal *model.Proposal) { if parentExists { next := proposal for next != nil { - in.headers[next.Block.BlockID] = model.ProposalToFlow(next) + in.headers[next.Block.BlockID] = helper.SignedProposalToFlow(next).Header in.queue <- next // keep processing the pending blocks diff --git a/consensus/hotstuff/integration/integration_test.go b/consensus/hotstuff/integration/integration_test.go index e2929777dee..d29ec533942 100644 --- a/consensus/hotstuff/integration/integration_test.go +++ b/consensus/hotstuff/integration/integration_test.go @@ -21,7 +21,6 @@ const safeTimeout = 2 * time.Second const happyPathMaxRoundFailures = 6 func TestSingleInstance(t *testing.T) { - // set up a single instance to run // NOTE: currently, the HotStuff logic will infinitely call back on itself // with a single instance, leading to a boundlessly growing call stack, @@ -33,7 +32,7 @@ func TestSingleInstance(t *testing.T) { ) // run the event handler until we reach a stop condition - err := in.Run() + err := in.Run(t) require.ErrorIs(t, err, errStopCondition, "should run until stop condition") // check if forks and pacemaker are in expected view state @@ -52,7 +51,7 @@ func TestThreeInstances(t *testing.T) { // generate three hotstuff participants participants := unittest.IdentityListFixture(num) root := DefaultRoot() - timeouts, err := timeout.NewConfig(safeTimeout, safeTimeout, 1.5, happyPathMaxRoundFailures, 0, safeTimeout) + timeouts, err := timeout.NewConfig(safeTimeout, safeTimeout, 1.5, happyPathMaxRoundFailures, safeTimeout) require.NoError(t, err) // set up three instances that are exactly the same @@ -79,7 +78,7 @@ func TestThreeInstances(t *testing.T) { for _, in := range instances { wg.Add(1) go func(in *Instance) { - err := in.Run() + err := in.Run(t) require.True(t, errors.Is(err, errStopCondition), "should run until stop condition") wg.Done() }(in) @@ -116,7 +115,7 @@ func TestSevenInstances(t *testing.T) { participants := unittest.IdentityListFixture(numPass + numFail) instances := make([]*Instance, 0, numPass+numFail) root := DefaultRoot() - timeouts, err := timeout.NewConfig(safeTimeout, safeTimeout, 1.5, happyPathMaxRoundFailures, 0, safeTimeout) + timeouts, err := timeout.NewConfig(safeTimeout, safeTimeout, 1.5, happyPathMaxRoundFailures, safeTimeout) require.NoError(t, err) // set up five instances that work fully @@ -152,7 +151,7 @@ func TestSevenInstances(t *testing.T) { for _, in := range instances { wg.Add(1) go func(in *Instance) { - err := in.Run() + err := in.Run(t) require.True(t, errors.Is(err, errStopCondition), "should run until stop condition") wg.Done() }(in) diff --git a/consensus/hotstuff/integration/liveness_test.go b/consensus/hotstuff/integration/liveness_test.go index 247957700d7..3d7c14c55f4 100644 --- a/consensus/hotstuff/integration/liveness_test.go +++ b/consensus/hotstuff/integration/liveness_test.go @@ -36,7 +36,7 @@ func Test2TimeoutOutof7Instances(t *testing.T) { participants := unittest.IdentityListFixture(healthyReplicas + notVotingReplicas) instances := make([]*Instance, 0, healthyReplicas+notVotingReplicas) root := DefaultRoot() - timeouts, err := timeout.NewConfig(pmTimeout, pmTimeout, 1.5, happyPathMaxRoundFailures, 0, maxTimeoutRebroadcast) + timeouts, err := timeout.NewConfig(pmTimeout, pmTimeout, 1.5, happyPathMaxRoundFailures, maxTimeoutRebroadcast) require.NoError(t, err) // set up five instances that work fully @@ -73,8 +73,8 @@ func Test2TimeoutOutof7Instances(t *testing.T) { for _, in := range instances { wg.Add(1) go func(in *Instance) { - err := in.Run() - require.True(t, errors.Is(err, errStopCondition)) + err := in.Run(t) + require.ErrorIs(t, err, errStopCondition) wg.Done() }(in) } @@ -103,8 +103,7 @@ func Test2TimeoutOutof4Instances(t *testing.T) { participants := unittest.IdentityListFixture(healthyReplicas + replicasDroppingHappyPathMsgs) instances := make([]*Instance, 0, healthyReplicas+replicasDroppingHappyPathMsgs) root := DefaultRoot() - timeouts, err := timeout.NewConfig( - 10*time.Millisecond, 50*time.Millisecond, 1.5, happyPathMaxRoundFailures, 0, maxTimeoutRebroadcast) + timeouts, err := timeout.NewConfig(10*time.Millisecond, 50*time.Millisecond, 1.5, happyPathMaxRoundFailures, maxTimeoutRebroadcast) require.NoError(t, err) // set up two instances that work fully @@ -142,7 +141,7 @@ func Test2TimeoutOutof4Instances(t *testing.T) { for _, in := range instances { wg.Add(1) go func(in *Instance) { - err := in.Run() + err := in.Run(t) require.True(t, errors.Is(err, errStopCondition), "should run until stop condition") wg.Done() }(in) @@ -173,7 +172,7 @@ func Test1TimeoutOutof5Instances(t *testing.T) { participants := unittest.IdentityListFixture(healthyReplicas + blockedReplicas) instances := make([]*Instance, 0, healthyReplicas+blockedReplicas) root := DefaultRoot() - timeouts, err := timeout.NewConfig(pmTimeout, pmTimeout, 1.5, happyPathMaxRoundFailures, 0, maxTimeoutRebroadcast) + timeouts, err := timeout.NewConfig(pmTimeout, pmTimeout, 1.5, happyPathMaxRoundFailures, maxTimeoutRebroadcast) require.NoError(t, err) // set up instances that work fully @@ -210,8 +209,8 @@ func Test1TimeoutOutof5Instances(t *testing.T) { for _, in := range instances { wg.Add(1) go func(in *Instance) { - err := in.Run() - require.True(t, errors.Is(err, errStopCondition)) + err := in.Run(t) + require.ErrorIs(t, err, errStopCondition) wg.Done() }(in) } @@ -270,7 +269,7 @@ func TestBlockDelayIsHigherThanTimeout(t *testing.T) { instances := make([]*Instance, 0, healthyReplicas+replicasNotGeneratingTimeouts) root := DefaultRoot() // set block rate delay to be bigger than minimal timeout - timeouts, err := timeout.NewConfig(pmTimeout, pmTimeout, 1.5, happyPathMaxRoundFailures, pmTimeout*2, maxTimeoutRebroadcast) + timeouts, err := timeout.NewConfig(pmTimeout, pmTimeout, 1.5, happyPathMaxRoundFailures, maxTimeoutRebroadcast) require.NoError(t, err) // set up 2 instances that fully work (incl. sending TimeoutObjects) @@ -307,8 +306,8 @@ func TestBlockDelayIsHigherThanTimeout(t *testing.T) { for _, in := range instances { wg.Add(1) go func(in *Instance) { - err := in.Run() - require.True(t, errors.Is(err, errStopCondition)) + err := in.Run(t) + require.ErrorIs(t, err, errStopCondition) wg.Done() }(in) } @@ -353,7 +352,7 @@ func TestAsyncClusterStartup(t *testing.T) { instances := make([]*Instance, 0, replicas) root := DefaultRoot() // set block rate delay to be bigger than minimal timeout - timeouts, err := timeout.NewConfig(pmTimeout, pmTimeout, 1.5, 6, 0, maxTimeoutRebroadcast) + timeouts, err := timeout.NewConfig(pmTimeout, pmTimeout, 1.5, 6, maxTimeoutRebroadcast) require.NoError(t, err) // set up instances that work fully @@ -389,8 +388,8 @@ func TestAsyncClusterStartup(t *testing.T) { for _, in := range instances { wg.Add(1) go func(in *Instance) { - err := in.Run() - require.True(t, errors.Is(err, errStopCondition)) + err := in.Run(t) + require.ErrorIs(t, err, errStopCondition) wg.Done() }(in) } diff --git a/consensus/hotstuff/mocks/block_producer.go b/consensus/hotstuff/mocks/block_producer.go index b4060a723e7..af340b10071 100644 --- a/consensus/hotstuff/mocks/block_producer.go +++ b/consensus/hotstuff/mocks/block_producer.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mocks @@ -14,19 +14,23 @@ type BlockProducer struct { } // MakeBlockProposal provides a mock function with given fields: view, qc, lastViewTC -func (_m *BlockProducer) MakeBlockProposal(view uint64, qc *flow.QuorumCertificate, lastViewTC *flow.TimeoutCertificate) (*flow.Header, error) { +func (_m *BlockProducer) MakeBlockProposal(view uint64, qc *flow.QuorumCertificate, lastViewTC *flow.TimeoutCertificate) (*flow.ProposalHeader, error) { ret := _m.Called(view, qc, lastViewTC) - var r0 *flow.Header + if len(ret) == 0 { + panic("no return value specified for MakeBlockProposal") + } + + var r0 *flow.ProposalHeader var r1 error - if rf, ok := ret.Get(0).(func(uint64, *flow.QuorumCertificate, *flow.TimeoutCertificate) (*flow.Header, error)); ok { + if rf, ok := ret.Get(0).(func(uint64, *flow.QuorumCertificate, *flow.TimeoutCertificate) (*flow.ProposalHeader, error)); ok { return rf(view, qc, lastViewTC) } - if rf, ok := ret.Get(0).(func(uint64, *flow.QuorumCertificate, *flow.TimeoutCertificate) *flow.Header); ok { + if rf, ok := ret.Get(0).(func(uint64, *flow.QuorumCertificate, *flow.TimeoutCertificate) *flow.ProposalHeader); ok { r0 = rf(view, qc, lastViewTC) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(*flow.Header) + r0 = ret.Get(0).(*flow.ProposalHeader) } } @@ -39,13 +43,12 @@ func (_m *BlockProducer) MakeBlockProposal(view uint64, qc *flow.QuorumCertifica return r0, r1 } -type mockConstructorTestingTNewBlockProducer interface { +// NewBlockProducer creates a new instance of BlockProducer. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewBlockProducer(t interface { mock.TestingT Cleanup(func()) -} - -// NewBlockProducer creates a new instance of BlockProducer. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewBlockProducer(t mockConstructorTestingTNewBlockProducer) *BlockProducer { +}) *BlockProducer { mock := &BlockProducer{} mock.Mock.Test(t) diff --git a/consensus/hotstuff/mocks/block_signer_decoder.go b/consensus/hotstuff/mocks/block_signer_decoder.go index e2a570264e8..518cfe567fb 100644 --- a/consensus/hotstuff/mocks/block_signer_decoder.go +++ b/consensus/hotstuff/mocks/block_signer_decoder.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mocks @@ -17,6 +17,10 @@ type BlockSignerDecoder struct { func (_m *BlockSignerDecoder) DecodeSignerIDs(header *flow.Header) (flow.IdentifierList, error) { ret := _m.Called(header) + if len(ret) == 0 { + panic("no return value specified for DecodeSignerIDs") + } + var r0 flow.IdentifierList var r1 error if rf, ok := ret.Get(0).(func(*flow.Header) (flow.IdentifierList, error)); ok { @@ -39,13 +43,12 @@ func (_m *BlockSignerDecoder) DecodeSignerIDs(header *flow.Header) (flow.Identif return r0, r1 } -type mockConstructorTestingTNewBlockSignerDecoder interface { +// NewBlockSignerDecoder creates a new instance of BlockSignerDecoder. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewBlockSignerDecoder(t interface { mock.TestingT Cleanup(func()) -} - -// NewBlockSignerDecoder creates a new instance of BlockSignerDecoder. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewBlockSignerDecoder(t mockConstructorTestingTNewBlockSignerDecoder) *BlockSignerDecoder { +}) *BlockSignerDecoder { mock := &BlockSignerDecoder{} mock.Mock.Test(t) diff --git a/consensus/hotstuff/mocks/communicator_consumer.go b/consensus/hotstuff/mocks/communicator_consumer.go index e0a8f079200..bda84f4661e 100644 --- a/consensus/hotstuff/mocks/communicator_consumer.go +++ b/consensus/hotstuff/mocks/communicator_consumer.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mocks @@ -18,7 +18,7 @@ type CommunicatorConsumer struct { } // OnOwnProposal provides a mock function with given fields: proposal, targetPublicationTime -func (_m *CommunicatorConsumer) OnOwnProposal(proposal *flow.Header, targetPublicationTime time.Time) { +func (_m *CommunicatorConsumer) OnOwnProposal(proposal *flow.ProposalHeader, targetPublicationTime time.Time) { _m.Called(proposal, targetPublicationTime) } @@ -27,18 +27,17 @@ func (_m *CommunicatorConsumer) OnOwnTimeout(timeout *model.TimeoutObject) { _m.Called(timeout) } -// OnOwnVote provides a mock function with given fields: blockID, view, sigData, recipientID -func (_m *CommunicatorConsumer) OnOwnVote(blockID flow.Identifier, view uint64, sigData []byte, recipientID flow.Identifier) { - _m.Called(blockID, view, sigData, recipientID) +// OnOwnVote provides a mock function with given fields: vote, recipientID +func (_m *CommunicatorConsumer) OnOwnVote(vote *model.Vote, recipientID flow.Identifier) { + _m.Called(vote, recipientID) } -type mockConstructorTestingTNewCommunicatorConsumer interface { +// NewCommunicatorConsumer creates a new instance of CommunicatorConsumer. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewCommunicatorConsumer(t interface { mock.TestingT Cleanup(func()) -} - -// NewCommunicatorConsumer creates a new instance of CommunicatorConsumer. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewCommunicatorConsumer(t mockConstructorTestingTNewCommunicatorConsumer) *CommunicatorConsumer { +}) *CommunicatorConsumer { mock := &CommunicatorConsumer{} mock.Mock.Test(t) diff --git a/consensus/hotstuff/mocks/consumer.go b/consensus/hotstuff/mocks/consumer.go index 23776596a43..e4efa650e95 100644 --- a/consensus/hotstuff/mocks/consumer.go +++ b/consensus/hotstuff/mocks/consumer.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mocks @@ -33,7 +33,7 @@ func (_m *Consumer) OnDoubleProposeDetected(_a0 *model.Block, _a1 *model.Block) _m.Called(_a0, _a1) } -// OnEventProcessed provides a mock function with given fields: +// OnEventProcessed provides a mock function with no fields func (_m *Consumer) OnEventProcessed() { _m.Called() } @@ -44,7 +44,7 @@ func (_m *Consumer) OnFinalizedBlock(_a0 *model.Block) { } // OnInvalidBlockDetected provides a mock function with given fields: err -func (_m *Consumer) OnInvalidBlockDetected(err model.InvalidProposalError) { +func (_m *Consumer) OnInvalidBlockDetected(err flow.Slashable[model.InvalidProposalError]) { _m.Called(err) } @@ -54,7 +54,7 @@ func (_m *Consumer) OnLocalTimeout(currentView uint64) { } // OnOwnProposal provides a mock function with given fields: proposal, targetPublicationTime -func (_m *Consumer) OnOwnProposal(proposal *flow.Header, targetPublicationTime time.Time) { +func (_m *Consumer) OnOwnProposal(proposal *flow.ProposalHeader, targetPublicationTime time.Time) { _m.Called(proposal, targetPublicationTime) } @@ -63,9 +63,9 @@ func (_m *Consumer) OnOwnTimeout(timeout *model.TimeoutObject) { _m.Called(timeout) } -// OnOwnVote provides a mock function with given fields: blockID, view, sigData, recipientID -func (_m *Consumer) OnOwnVote(blockID flow.Identifier, view uint64, sigData []byte, recipientID flow.Identifier) { - _m.Called(blockID, view, sigData, recipientID) +// OnOwnVote provides a mock function with given fields: vote, recipientID +func (_m *Consumer) OnOwnVote(vote *model.Vote, recipientID flow.Identifier) { + _m.Called(vote, recipientID) } // OnPartialTc provides a mock function with given fields: currentView, partialTc @@ -79,7 +79,7 @@ func (_m *Consumer) OnQcTriggeredViewChange(oldView uint64, newView uint64, qc * } // OnReceiveProposal provides a mock function with given fields: currentView, proposal -func (_m *Consumer) OnReceiveProposal(currentView uint64, proposal *model.Proposal) { +func (_m *Consumer) OnReceiveProposal(currentView uint64, proposal *model.SignedProposal) { _m.Called(currentView, proposal) } @@ -113,13 +113,12 @@ func (_m *Consumer) OnViewChange(oldView uint64, newView uint64) { _m.Called(oldView, newView) } -type mockConstructorTestingTNewConsumer interface { +// NewConsumer creates a new instance of Consumer. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewConsumer(t interface { mock.TestingT Cleanup(func()) -} - -// NewConsumer creates a new instance of Consumer. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewConsumer(t mockConstructorTestingTNewConsumer) *Consumer { +}) *Consumer { mock := &Consumer{} mock.Mock.Test(t) diff --git a/consensus/hotstuff/mocks/dkg.go b/consensus/hotstuff/mocks/dkg.go index 77ec3602d69..e39fc77f1c1 100644 --- a/consensus/hotstuff/mocks/dkg.go +++ b/consensus/hotstuff/mocks/dkg.go @@ -1,9 +1,9 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mocks import ( - crypto "github.com/onflow/flow-go/crypto" + crypto "github.com/onflow/crypto" flow "github.com/onflow/flow-go/model/flow" mock "github.com/stretchr/testify/mock" @@ -14,10 +14,14 @@ type DKG struct { mock.Mock } -// GroupKey provides a mock function with given fields: +// GroupKey provides a mock function with no fields func (_m *DKG) GroupKey() crypto.PublicKey { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for GroupKey") + } + var r0 crypto.PublicKey if rf, ok := ret.Get(0).(func() crypto.PublicKey); ok { r0 = rf() @@ -34,6 +38,10 @@ func (_m *DKG) GroupKey() crypto.PublicKey { func (_m *DKG) Index(nodeID flow.Identifier) (uint, error) { ret := _m.Called(nodeID) + if len(ret) == 0 { + panic("no return value specified for Index") + } + var r0 uint var r1 error if rf, ok := ret.Get(0).(func(flow.Identifier) (uint, error)); ok { @@ -58,6 +66,10 @@ func (_m *DKG) Index(nodeID flow.Identifier) (uint, error) { func (_m *DKG) KeyShare(nodeID flow.Identifier) (crypto.PublicKey, error) { ret := _m.Called(nodeID) + if len(ret) == 0 { + panic("no return value specified for KeyShare") + } + var r0 crypto.PublicKey var r1 error if rf, ok := ret.Get(0).(func(flow.Identifier) (crypto.PublicKey, error)); ok { @@ -80,10 +92,64 @@ func (_m *DKG) KeyShare(nodeID flow.Identifier) (crypto.PublicKey, error) { return r0, r1 } -// Size provides a mock function with given fields: +// KeyShares provides a mock function with no fields +func (_m *DKG) KeyShares() []crypto.PublicKey { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for KeyShares") + } + + var r0 []crypto.PublicKey + if rf, ok := ret.Get(0).(func() []crypto.PublicKey); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]crypto.PublicKey) + } + } + + return r0 +} + +// NodeID provides a mock function with given fields: index +func (_m *DKG) NodeID(index uint) (flow.Identifier, error) { + ret := _m.Called(index) + + if len(ret) == 0 { + panic("no return value specified for NodeID") + } + + var r0 flow.Identifier + var r1 error + if rf, ok := ret.Get(0).(func(uint) (flow.Identifier, error)); ok { + return rf(index) + } + if rf, ok := ret.Get(0).(func(uint) flow.Identifier); ok { + r0 = rf(index) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(flow.Identifier) + } + } + + if rf, ok := ret.Get(1).(func(uint) error); ok { + r1 = rf(index) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Size provides a mock function with no fields func (_m *DKG) Size() uint { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Size") + } + var r0 uint if rf, ok := ret.Get(0).(func() uint); ok { r0 = rf() @@ -94,13 +160,12 @@ func (_m *DKG) Size() uint { return r0 } -type mockConstructorTestingTNewDKG interface { +// NewDKG creates a new instance of DKG. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewDKG(t interface { mock.TestingT Cleanup(func()) -} - -// NewDKG creates a new instance of DKG. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewDKG(t mockConstructorTestingTNewDKG) *DKG { +}) *DKG { mock := &DKG{} mock.Mock.Test(t) diff --git a/consensus/hotstuff/mocks/dynamic_committee.go b/consensus/hotstuff/mocks/dynamic_committee.go index 67acf8f8bcb..c2cb50f7da1 100644 --- a/consensus/hotstuff/mocks/dynamic_committee.go +++ b/consensus/hotstuff/mocks/dynamic_committee.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mocks @@ -18,6 +18,10 @@ type DynamicCommittee struct { func (_m *DynamicCommittee) DKG(view uint64) (hotstuff.DKG, error) { ret := _m.Called(view) + if len(ret) == 0 { + panic("no return value specified for DKG") + } + var r0 hotstuff.DKG var r1 error if rf, ok := ret.Get(0).(func(uint64) (hotstuff.DKG, error)); ok { @@ -44,6 +48,10 @@ func (_m *DynamicCommittee) DKG(view uint64) (hotstuff.DKG, error) { func (_m *DynamicCommittee) IdentitiesByBlock(blockID flow.Identifier) (flow.IdentityList, error) { ret := _m.Called(blockID) + if len(ret) == 0 { + panic("no return value specified for IdentitiesByBlock") + } + var r0 flow.IdentityList var r1 error if rf, ok := ret.Get(0).(func(flow.Identifier) (flow.IdentityList, error)); ok { @@ -67,19 +75,23 @@ func (_m *DynamicCommittee) IdentitiesByBlock(blockID flow.Identifier) (flow.Ide } // IdentitiesByEpoch provides a mock function with given fields: view -func (_m *DynamicCommittee) IdentitiesByEpoch(view uint64) (flow.IdentityList, error) { +func (_m *DynamicCommittee) IdentitiesByEpoch(view uint64) (flow.IdentitySkeletonList, error) { ret := _m.Called(view) - var r0 flow.IdentityList + if len(ret) == 0 { + panic("no return value specified for IdentitiesByEpoch") + } + + var r0 flow.IdentitySkeletonList var r1 error - if rf, ok := ret.Get(0).(func(uint64) (flow.IdentityList, error)); ok { + if rf, ok := ret.Get(0).(func(uint64) (flow.IdentitySkeletonList, error)); ok { return rf(view) } - if rf, ok := ret.Get(0).(func(uint64) flow.IdentityList); ok { + if rf, ok := ret.Get(0).(func(uint64) flow.IdentitySkeletonList); ok { r0 = rf(view) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(flow.IdentityList) + r0 = ret.Get(0).(flow.IdentitySkeletonList) } } @@ -96,6 +108,10 @@ func (_m *DynamicCommittee) IdentitiesByEpoch(view uint64) (flow.IdentityList, e func (_m *DynamicCommittee) IdentityByBlock(blockID flow.Identifier, participantID flow.Identifier) (*flow.Identity, error) { ret := _m.Called(blockID, participantID) + if len(ret) == 0 { + panic("no return value specified for IdentityByBlock") + } + var r0 *flow.Identity var r1 error if rf, ok := ret.Get(0).(func(flow.Identifier, flow.Identifier) (*flow.Identity, error)); ok { @@ -119,19 +135,23 @@ func (_m *DynamicCommittee) IdentityByBlock(blockID flow.Identifier, participant } // IdentityByEpoch provides a mock function with given fields: view, participantID -func (_m *DynamicCommittee) IdentityByEpoch(view uint64, participantID flow.Identifier) (*flow.Identity, error) { +func (_m *DynamicCommittee) IdentityByEpoch(view uint64, participantID flow.Identifier) (*flow.IdentitySkeleton, error) { ret := _m.Called(view, participantID) - var r0 *flow.Identity + if len(ret) == 0 { + panic("no return value specified for IdentityByEpoch") + } + + var r0 *flow.IdentitySkeleton var r1 error - if rf, ok := ret.Get(0).(func(uint64, flow.Identifier) (*flow.Identity, error)); ok { + if rf, ok := ret.Get(0).(func(uint64, flow.Identifier) (*flow.IdentitySkeleton, error)); ok { return rf(view, participantID) } - if rf, ok := ret.Get(0).(func(uint64, flow.Identifier) *flow.Identity); ok { + if rf, ok := ret.Get(0).(func(uint64, flow.Identifier) *flow.IdentitySkeleton); ok { r0 = rf(view, participantID) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(*flow.Identity) + r0 = ret.Get(0).(*flow.IdentitySkeleton) } } @@ -148,6 +168,10 @@ func (_m *DynamicCommittee) IdentityByEpoch(view uint64, participantID flow.Iden func (_m *DynamicCommittee) LeaderForView(view uint64) (flow.Identifier, error) { ret := _m.Called(view) + if len(ret) == 0 { + panic("no return value specified for LeaderForView") + } + var r0 flow.Identifier var r1 error if rf, ok := ret.Get(0).(func(uint64) (flow.Identifier, error)); ok { @@ -174,6 +198,10 @@ func (_m *DynamicCommittee) LeaderForView(view uint64) (flow.Identifier, error) func (_m *DynamicCommittee) QuorumThresholdForView(view uint64) (uint64, error) { ret := _m.Called(view) + if len(ret) == 0 { + panic("no return value specified for QuorumThresholdForView") + } + var r0 uint64 var r1 error if rf, ok := ret.Get(0).(func(uint64) (uint64, error)); ok { @@ -194,10 +222,14 @@ func (_m *DynamicCommittee) QuorumThresholdForView(view uint64) (uint64, error) return r0, r1 } -// Self provides a mock function with given fields: +// Self provides a mock function with no fields func (_m *DynamicCommittee) Self() flow.Identifier { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Self") + } + var r0 flow.Identifier if rf, ok := ret.Get(0).(func() flow.Identifier); ok { r0 = rf() @@ -214,6 +246,10 @@ func (_m *DynamicCommittee) Self() flow.Identifier { func (_m *DynamicCommittee) TimeoutThresholdForView(view uint64) (uint64, error) { ret := _m.Called(view) + if len(ret) == 0 { + panic("no return value specified for TimeoutThresholdForView") + } + var r0 uint64 var r1 error if rf, ok := ret.Get(0).(func(uint64) (uint64, error)); ok { @@ -234,13 +270,12 @@ func (_m *DynamicCommittee) TimeoutThresholdForView(view uint64) (uint64, error) return r0, r1 } -type mockConstructorTestingTNewDynamicCommittee interface { +// NewDynamicCommittee creates a new instance of DynamicCommittee. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewDynamicCommittee(t interface { mock.TestingT Cleanup(func()) -} - -// NewDynamicCommittee creates a new instance of DynamicCommittee. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewDynamicCommittee(t mockConstructorTestingTNewDynamicCommittee) *DynamicCommittee { +}) *DynamicCommittee { mock := &DynamicCommittee{} mock.Mock.Test(t) diff --git a/consensus/hotstuff/mocks/event_handler.go b/consensus/hotstuff/mocks/event_handler.go index 8cfdbbb4317..2784ff3628a 100644 --- a/consensus/hotstuff/mocks/event_handler.go +++ b/consensus/hotstuff/mocks/event_handler.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mocks @@ -20,10 +20,14 @@ type EventHandler struct { mock.Mock } -// OnLocalTimeout provides a mock function with given fields: +// OnLocalTimeout provides a mock function with no fields func (_m *EventHandler) OnLocalTimeout() error { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for OnLocalTimeout") + } + var r0 error if rf, ok := ret.Get(0).(func() error); ok { r0 = rf() @@ -38,6 +42,10 @@ func (_m *EventHandler) OnLocalTimeout() error { func (_m *EventHandler) OnPartialTcCreated(partialTC *hotstuff.PartialTcCreated) error { ret := _m.Called(partialTC) + if len(ret) == 0 { + panic("no return value specified for OnPartialTcCreated") + } + var r0 error if rf, ok := ret.Get(0).(func(*hotstuff.PartialTcCreated) error); ok { r0 = rf(partialTC) @@ -49,11 +57,15 @@ func (_m *EventHandler) OnPartialTcCreated(partialTC *hotstuff.PartialTcCreated) } // OnReceiveProposal provides a mock function with given fields: proposal -func (_m *EventHandler) OnReceiveProposal(proposal *model.Proposal) error { +func (_m *EventHandler) OnReceiveProposal(proposal *model.SignedProposal) error { ret := _m.Called(proposal) + if len(ret) == 0 { + panic("no return value specified for OnReceiveProposal") + } + var r0 error - if rf, ok := ret.Get(0).(func(*model.Proposal) error); ok { + if rf, ok := ret.Get(0).(func(*model.SignedProposal) error); ok { r0 = rf(proposal) } else { r0 = ret.Error(0) @@ -66,6 +78,10 @@ func (_m *EventHandler) OnReceiveProposal(proposal *model.Proposal) error { func (_m *EventHandler) OnReceiveQc(qc *flow.QuorumCertificate) error { ret := _m.Called(qc) + if len(ret) == 0 { + panic("no return value specified for OnReceiveQc") + } + var r0 error if rf, ok := ret.Get(0).(func(*flow.QuorumCertificate) error); ok { r0 = rf(qc) @@ -80,6 +96,10 @@ func (_m *EventHandler) OnReceiveQc(qc *flow.QuorumCertificate) error { func (_m *EventHandler) OnReceiveTc(tc *flow.TimeoutCertificate) error { ret := _m.Called(tc) + if len(ret) == 0 { + panic("no return value specified for OnReceiveTc") + } + var r0 error if rf, ok := ret.Get(0).(func(*flow.TimeoutCertificate) error); ok { r0 = rf(tc) @@ -94,6 +114,10 @@ func (_m *EventHandler) OnReceiveTc(tc *flow.TimeoutCertificate) error { func (_m *EventHandler) Start(ctx context.Context) error { ret := _m.Called(ctx) + if len(ret) == 0 { + panic("no return value specified for Start") + } + var r0 error if rf, ok := ret.Get(0).(func(context.Context) error); ok { r0 = rf(ctx) @@ -104,10 +128,14 @@ func (_m *EventHandler) Start(ctx context.Context) error { return r0 } -// TimeoutChannel provides a mock function with given fields: +// TimeoutChannel provides a mock function with no fields func (_m *EventHandler) TimeoutChannel() <-chan time.Time { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for TimeoutChannel") + } + var r0 <-chan time.Time if rf, ok := ret.Get(0).(func() <-chan time.Time); ok { r0 = rf() @@ -120,13 +148,12 @@ func (_m *EventHandler) TimeoutChannel() <-chan time.Time { return r0 } -type mockConstructorTestingTNewEventHandler interface { +// NewEventHandler creates a new instance of EventHandler. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewEventHandler(t interface { mock.TestingT Cleanup(func()) -} - -// NewEventHandler creates a new instance of EventHandler. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewEventHandler(t mockConstructorTestingTNewEventHandler) *EventHandler { +}) *EventHandler { mock := &EventHandler{} mock.Mock.Test(t) diff --git a/consensus/hotstuff/mocks/event_loop.go b/consensus/hotstuff/mocks/event_loop.go index a1425da0629..fd1fea6bd9e 100644 --- a/consensus/hotstuff/mocks/event_loop.go +++ b/consensus/hotstuff/mocks/event_loop.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mocks @@ -17,10 +17,14 @@ type EventLoop struct { mock.Mock } -// Done provides a mock function with given fields: +// Done provides a mock function with no fields func (_m *EventLoop) Done() <-chan struct{} { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Done") + } + var r0 <-chan struct{} if rf, ok := ret.Get(0).(func() <-chan struct{}); ok { r0 = rf() @@ -68,10 +72,14 @@ func (_m *EventLoop) OnVoteProcessed(vote *model.Vote) { _m.Called(vote) } -// Ready provides a mock function with given fields: +// Ready provides a mock function with no fields func (_m *EventLoop) Ready() <-chan struct{} { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Ready") + } + var r0 <-chan struct{} if rf, ok := ret.Get(0).(func() <-chan struct{}); ok { r0 = rf() @@ -90,17 +98,16 @@ func (_m *EventLoop) Start(_a0 irrecoverable.SignalerContext) { } // SubmitProposal provides a mock function with given fields: proposal -func (_m *EventLoop) SubmitProposal(proposal *model.Proposal) { +func (_m *EventLoop) SubmitProposal(proposal *model.SignedProposal) { _m.Called(proposal) } -type mockConstructorTestingTNewEventLoop interface { +// NewEventLoop creates a new instance of EventLoop. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewEventLoop(t interface { mock.TestingT Cleanup(func()) -} - -// NewEventLoop creates a new instance of EventLoop. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewEventLoop(t mockConstructorTestingTNewEventLoop) *EventLoop { +}) *EventLoop { mock := &EventLoop{} mock.Mock.Test(t) diff --git a/consensus/hotstuff/mocks/finalization_consumer.go b/consensus/hotstuff/mocks/finalization_consumer.go index 7780a5e1c79..9855d8c5fa5 100644 --- a/consensus/hotstuff/mocks/finalization_consumer.go +++ b/consensus/hotstuff/mocks/finalization_consumer.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mocks @@ -22,13 +22,12 @@ func (_m *FinalizationConsumer) OnFinalizedBlock(_a0 *model.Block) { _m.Called(_a0) } -type mockConstructorTestingTNewFinalizationConsumer interface { +// NewFinalizationConsumer creates a new instance of FinalizationConsumer. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewFinalizationConsumer(t interface { mock.TestingT Cleanup(func()) -} - -// NewFinalizationConsumer creates a new instance of FinalizationConsumer. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewFinalizationConsumer(t mockConstructorTestingTNewFinalizationConsumer) *FinalizationConsumer { +}) *FinalizationConsumer { mock := &FinalizationConsumer{} mock.Mock.Test(t) diff --git a/consensus/hotstuff/mocks/follower_consumer.go b/consensus/hotstuff/mocks/follower_consumer.go index 225459ffe15..307004cc8e1 100644 --- a/consensus/hotstuff/mocks/follower_consumer.go +++ b/consensus/hotstuff/mocks/follower_consumer.go @@ -1,10 +1,13 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mocks import ( - model "github.com/onflow/flow-go/consensus/hotstuff/model" + flow "github.com/onflow/flow-go/model/flow" + mock "github.com/stretchr/testify/mock" + + model "github.com/onflow/flow-go/consensus/hotstuff/model" ) // FollowerConsumer is an autogenerated mock type for the FollowerConsumer type @@ -28,17 +31,16 @@ func (_m *FollowerConsumer) OnFinalizedBlock(_a0 *model.Block) { } // OnInvalidBlockDetected provides a mock function with given fields: err -func (_m *FollowerConsumer) OnInvalidBlockDetected(err model.InvalidProposalError) { +func (_m *FollowerConsumer) OnInvalidBlockDetected(err flow.Slashable[model.InvalidProposalError]) { _m.Called(err) } -type mockConstructorTestingTNewFollowerConsumer interface { +// NewFollowerConsumer creates a new instance of FollowerConsumer. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewFollowerConsumer(t interface { mock.TestingT Cleanup(func()) -} - -// NewFollowerConsumer creates a new instance of FollowerConsumer. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewFollowerConsumer(t mockConstructorTestingTNewFollowerConsumer) *FollowerConsumer { +}) *FollowerConsumer { mock := &FollowerConsumer{} mock.Mock.Test(t) diff --git a/consensus/hotstuff/mocks/forks.go b/consensus/hotstuff/mocks/forks.go index c14ece84bc5..1da09ab462e 100644 --- a/consensus/hotstuff/mocks/forks.go +++ b/consensus/hotstuff/mocks/forks.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mocks @@ -20,6 +20,10 @@ type Forks struct { func (_m *Forks) AddCertifiedBlock(certifiedBlock *model.CertifiedBlock) error { ret := _m.Called(certifiedBlock) + if len(ret) == 0 { + panic("no return value specified for AddCertifiedBlock") + } + var r0 error if rf, ok := ret.Get(0).(func(*model.CertifiedBlock) error); ok { r0 = rf(certifiedBlock) @@ -34,6 +38,10 @@ func (_m *Forks) AddCertifiedBlock(certifiedBlock *model.CertifiedBlock) error { func (_m *Forks) AddValidatedBlock(proposal *model.Block) error { ret := _m.Called(proposal) + if len(ret) == 0 { + panic("no return value specified for AddValidatedBlock") + } + var r0 error if rf, ok := ret.Get(0).(func(*model.Block) error); ok { r0 = rf(proposal) @@ -44,10 +52,14 @@ func (_m *Forks) AddValidatedBlock(proposal *model.Block) error { return r0 } -// FinalityProof provides a mock function with given fields: +// FinalityProof provides a mock function with no fields func (_m *Forks) FinalityProof() (*hotstuff.FinalityProof, bool) { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for FinalityProof") + } + var r0 *hotstuff.FinalityProof var r1 bool if rf, ok := ret.Get(0).(func() (*hotstuff.FinalityProof, bool)); ok { @@ -70,10 +82,14 @@ func (_m *Forks) FinalityProof() (*hotstuff.FinalityProof, bool) { return r0, r1 } -// FinalizedBlock provides a mock function with given fields: +// FinalizedBlock provides a mock function with no fields func (_m *Forks) FinalizedBlock() *model.Block { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for FinalizedBlock") + } + var r0 *model.Block if rf, ok := ret.Get(0).(func() *model.Block); ok { r0 = rf() @@ -86,10 +102,14 @@ func (_m *Forks) FinalizedBlock() *model.Block { return r0 } -// FinalizedView provides a mock function with given fields: +// FinalizedView provides a mock function with no fields func (_m *Forks) FinalizedView() uint64 { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for FinalizedView") + } + var r0 uint64 if rf, ok := ret.Get(0).(func() uint64); ok { r0 = rf() @@ -104,6 +124,10 @@ func (_m *Forks) FinalizedView() uint64 { func (_m *Forks) GetBlock(blockID flow.Identifier) (*model.Block, bool) { ret := _m.Called(blockID) + if len(ret) == 0 { + panic("no return value specified for GetBlock") + } + var r0 *model.Block var r1 bool if rf, ok := ret.Get(0).(func(flow.Identifier) (*model.Block, bool)); ok { @@ -130,6 +154,10 @@ func (_m *Forks) GetBlock(blockID flow.Identifier) (*model.Block, bool) { func (_m *Forks) GetBlocksForView(view uint64) []*model.Block { ret := _m.Called(view) + if len(ret) == 0 { + panic("no return value specified for GetBlocksForView") + } + var r0 []*model.Block if rf, ok := ret.Get(0).(func(uint64) []*model.Block); ok { r0 = rf(view) @@ -142,13 +170,12 @@ func (_m *Forks) GetBlocksForView(view uint64) []*model.Block { return r0 } -type mockConstructorTestingTNewForks interface { +// NewForks creates a new instance of Forks. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewForks(t interface { mock.TestingT Cleanup(func()) -} - -// NewForks creates a new instance of Forks. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewForks(t mockConstructorTestingTNewForks) *Forks { +}) *Forks { mock := &Forks{} mock.Mock.Test(t) diff --git a/consensus/hotstuff/mocks/on_qc_created.go b/consensus/hotstuff/mocks/on_qc_created.go deleted file mode 100644 index 90c370cb8fd..00000000000 --- a/consensus/hotstuff/mocks/on_qc_created.go +++ /dev/null @@ -1,34 +0,0 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. - -package mocks - -import ( - flow "github.com/onflow/flow-go/model/flow" - - mock "github.com/stretchr/testify/mock" -) - -// OnQCCreated is an autogenerated mock type for the OnQCCreated type -type OnQCCreated struct { - mock.Mock -} - -// Execute provides a mock function with given fields: _a0 -func (_m *OnQCCreated) Execute(_a0 *flow.QuorumCertificate) { - _m.Called(_a0) -} - -type mockConstructorTestingTNewOnQCCreated interface { - mock.TestingT - Cleanup(func()) -} - -// NewOnQCCreated creates a new instance of OnQCCreated. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewOnQCCreated(t mockConstructorTestingTNewOnQCCreated) *OnQCCreated { - mock := &OnQCCreated{} - mock.Mock.Test(t) - - t.Cleanup(func() { mock.AssertExpectations(t) }) - - return mock -} diff --git a/consensus/hotstuff/mocks/pace_maker.go b/consensus/hotstuff/mocks/pace_maker.go index 1ec28cf7d34..ca28c361a90 100644 --- a/consensus/hotstuff/mocks/pace_maker.go +++ b/consensus/hotstuff/mocks/pace_maker.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mocks @@ -19,24 +19,14 @@ type PaceMaker struct { mock.Mock } -// BlockRateDelay provides a mock function with given fields: -func (_m *PaceMaker) BlockRateDelay() time.Duration { +// CurView provides a mock function with no fields +func (_m *PaceMaker) CurView() uint64 { ret := _m.Called() - var r0 time.Duration - if rf, ok := ret.Get(0).(func() time.Duration); ok { - r0 = rf() - } else { - r0 = ret.Get(0).(time.Duration) + if len(ret) == 0 { + panic("no return value specified for CurView") } - return r0 -} - -// CurView provides a mock function with given fields: -func (_m *PaceMaker) CurView() uint64 { - ret := _m.Called() - var r0 uint64 if rf, ok := ret.Get(0).(func() uint64); ok { r0 = rf() @@ -47,10 +37,14 @@ func (_m *PaceMaker) CurView() uint64 { return r0 } -// LastViewTC provides a mock function with given fields: +// LastViewTC provides a mock function with no fields func (_m *PaceMaker) LastViewTC() *flow.TimeoutCertificate { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for LastViewTC") + } + var r0 *flow.TimeoutCertificate if rf, ok := ret.Get(0).(func() *flow.TimeoutCertificate); ok { r0 = rf() @@ -63,10 +57,14 @@ func (_m *PaceMaker) LastViewTC() *flow.TimeoutCertificate { return r0 } -// NewestQC provides a mock function with given fields: +// NewestQC provides a mock function with no fields func (_m *PaceMaker) NewestQC() *flow.QuorumCertificate { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for NewestQC") + } + var r0 *flow.QuorumCertificate if rf, ok := ret.Get(0).(func() *flow.QuorumCertificate); ok { r0 = rf() @@ -83,6 +81,10 @@ func (_m *PaceMaker) NewestQC() *flow.QuorumCertificate { func (_m *PaceMaker) ProcessQC(qc *flow.QuorumCertificate) (*model.NewViewEvent, error) { ret := _m.Called(qc) + if len(ret) == 0 { + panic("no return value specified for ProcessQC") + } + var r0 *model.NewViewEvent var r1 error if rf, ok := ret.Get(0).(func(*flow.QuorumCertificate) (*model.NewViewEvent, error)); ok { @@ -109,6 +111,10 @@ func (_m *PaceMaker) ProcessQC(qc *flow.QuorumCertificate) (*model.NewViewEvent, func (_m *PaceMaker) ProcessTC(tc *flow.TimeoutCertificate) (*model.NewViewEvent, error) { ret := _m.Called(tc) + if len(ret) == 0 { + panic("no return value specified for ProcessTC") + } + var r0 *model.NewViewEvent var r1 error if rf, ok := ret.Get(0).(func(*flow.TimeoutCertificate) (*model.NewViewEvent, error)); ok { @@ -136,10 +142,32 @@ func (_m *PaceMaker) Start(ctx context.Context) { _m.Called(ctx) } -// TimeoutChannel provides a mock function with given fields: +// TargetPublicationTime provides a mock function with given fields: proposalView, timeViewEntered, parentBlockId +func (_m *PaceMaker) TargetPublicationTime(proposalView uint64, timeViewEntered time.Time, parentBlockId flow.Identifier) time.Time { + ret := _m.Called(proposalView, timeViewEntered, parentBlockId) + + if len(ret) == 0 { + panic("no return value specified for TargetPublicationTime") + } + + var r0 time.Time + if rf, ok := ret.Get(0).(func(uint64, time.Time, flow.Identifier) time.Time); ok { + r0 = rf(proposalView, timeViewEntered, parentBlockId) + } else { + r0 = ret.Get(0).(time.Time) + } + + return r0 +} + +// TimeoutChannel provides a mock function with no fields func (_m *PaceMaker) TimeoutChannel() <-chan time.Time { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for TimeoutChannel") + } + var r0 <-chan time.Time if rf, ok := ret.Get(0).(func() <-chan time.Time); ok { r0 = rf() @@ -152,13 +180,12 @@ func (_m *PaceMaker) TimeoutChannel() <-chan time.Time { return r0 } -type mockConstructorTestingTNewPaceMaker interface { +// NewPaceMaker creates a new instance of PaceMaker. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewPaceMaker(t interface { mock.TestingT Cleanup(func()) -} - -// NewPaceMaker creates a new instance of PaceMaker. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewPaceMaker(t mockConstructorTestingTNewPaceMaker) *PaceMaker { +}) *PaceMaker { mock := &PaceMaker{} mock.Mock.Test(t) diff --git a/consensus/hotstuff/mocks/packer.go b/consensus/hotstuff/mocks/packer.go index b9d7bb573cf..6adb8d9c701 100644 --- a/consensus/hotstuff/mocks/packer.go +++ b/consensus/hotstuff/mocks/packer.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mocks @@ -18,6 +18,10 @@ type Packer struct { func (_m *Packer) Pack(view uint64, sig *hotstuff.BlockSignatureData) ([]byte, []byte, error) { ret := _m.Called(view, sig) + if len(ret) == 0 { + panic("no return value specified for Pack") + } + var r0 []byte var r1 []byte var r2 error @@ -50,15 +54,19 @@ func (_m *Packer) Pack(view uint64, sig *hotstuff.BlockSignatureData) ([]byte, [ } // Unpack provides a mock function with given fields: signerIdentities, sigData -func (_m *Packer) Unpack(signerIdentities flow.IdentityList, sigData []byte) (*hotstuff.BlockSignatureData, error) { +func (_m *Packer) Unpack(signerIdentities flow.IdentitySkeletonList, sigData []byte) (*hotstuff.BlockSignatureData, error) { ret := _m.Called(signerIdentities, sigData) + if len(ret) == 0 { + panic("no return value specified for Unpack") + } + var r0 *hotstuff.BlockSignatureData var r1 error - if rf, ok := ret.Get(0).(func(flow.IdentityList, []byte) (*hotstuff.BlockSignatureData, error)); ok { + if rf, ok := ret.Get(0).(func(flow.IdentitySkeletonList, []byte) (*hotstuff.BlockSignatureData, error)); ok { return rf(signerIdentities, sigData) } - if rf, ok := ret.Get(0).(func(flow.IdentityList, []byte) *hotstuff.BlockSignatureData); ok { + if rf, ok := ret.Get(0).(func(flow.IdentitySkeletonList, []byte) *hotstuff.BlockSignatureData); ok { r0 = rf(signerIdentities, sigData) } else { if ret.Get(0) != nil { @@ -66,7 +74,7 @@ func (_m *Packer) Unpack(signerIdentities flow.IdentityList, sigData []byte) (*h } } - if rf, ok := ret.Get(1).(func(flow.IdentityList, []byte) error); ok { + if rf, ok := ret.Get(1).(func(flow.IdentitySkeletonList, []byte) error); ok { r1 = rf(signerIdentities, sigData) } else { r1 = ret.Error(1) @@ -75,13 +83,12 @@ func (_m *Packer) Unpack(signerIdentities flow.IdentityList, sigData []byte) (*h return r0, r1 } -type mockConstructorTestingTNewPacker interface { +// NewPacker creates a new instance of Packer. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewPacker(t interface { mock.TestingT Cleanup(func()) -} - -// NewPacker creates a new instance of Packer. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewPacker(t mockConstructorTestingTNewPacker) *Packer { +}) *Packer { mock := &Packer{} mock.Mock.Test(t) diff --git a/consensus/hotstuff/mocks/participant_consumer.go b/consensus/hotstuff/mocks/participant_consumer.go index 2d2b4141093..cfe7a4d4134 100644 --- a/consensus/hotstuff/mocks/participant_consumer.go +++ b/consensus/hotstuff/mocks/participant_consumer.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mocks @@ -21,7 +21,7 @@ func (_m *ParticipantConsumer) OnCurrentViewDetails(currentView uint64, finalize _m.Called(currentView, finalizedView, currentLeader) } -// OnEventProcessed provides a mock function with given fields: +// OnEventProcessed provides a mock function with no fields func (_m *ParticipantConsumer) OnEventProcessed() { _m.Called() } @@ -42,7 +42,7 @@ func (_m *ParticipantConsumer) OnQcTriggeredViewChange(oldView uint64, newView u } // OnReceiveProposal provides a mock function with given fields: currentView, proposal -func (_m *ParticipantConsumer) OnReceiveProposal(currentView uint64, proposal *model.Proposal) { +func (_m *ParticipantConsumer) OnReceiveProposal(currentView uint64, proposal *model.SignedProposal) { _m.Called(currentView, proposal) } @@ -76,13 +76,12 @@ func (_m *ParticipantConsumer) OnViewChange(oldView uint64, newView uint64) { _m.Called(oldView, newView) } -type mockConstructorTestingTNewParticipantConsumer interface { +// NewParticipantConsumer creates a new instance of ParticipantConsumer. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewParticipantConsumer(t interface { mock.TestingT Cleanup(func()) -} - -// NewParticipantConsumer creates a new instance of ParticipantConsumer. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewParticipantConsumer(t mockConstructorTestingTNewParticipantConsumer) *ParticipantConsumer { +}) *ParticipantConsumer { mock := &ParticipantConsumer{} mock.Mock.Test(t) diff --git a/consensus/hotstuff/mocks/persister.go b/consensus/hotstuff/mocks/persister.go index 668fbc6a2c3..49e3a4ca5f9 100644 --- a/consensus/hotstuff/mocks/persister.go +++ b/consensus/hotstuff/mocks/persister.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mocks @@ -12,10 +12,14 @@ type Persister struct { mock.Mock } -// GetLivenessData provides a mock function with given fields: +// GetLivenessData provides a mock function with no fields func (_m *Persister) GetLivenessData() (*hotstuff.LivenessData, error) { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for GetLivenessData") + } + var r0 *hotstuff.LivenessData var r1 error if rf, ok := ret.Get(0).(func() (*hotstuff.LivenessData, error)); ok { @@ -38,10 +42,14 @@ func (_m *Persister) GetLivenessData() (*hotstuff.LivenessData, error) { return r0, r1 } -// GetSafetyData provides a mock function with given fields: +// GetSafetyData provides a mock function with no fields func (_m *Persister) GetSafetyData() (*hotstuff.SafetyData, error) { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for GetSafetyData") + } + var r0 *hotstuff.SafetyData var r1 error if rf, ok := ret.Get(0).(func() (*hotstuff.SafetyData, error)); ok { @@ -68,6 +76,10 @@ func (_m *Persister) GetSafetyData() (*hotstuff.SafetyData, error) { func (_m *Persister) PutLivenessData(livenessData *hotstuff.LivenessData) error { ret := _m.Called(livenessData) + if len(ret) == 0 { + panic("no return value specified for PutLivenessData") + } + var r0 error if rf, ok := ret.Get(0).(func(*hotstuff.LivenessData) error); ok { r0 = rf(livenessData) @@ -82,6 +94,10 @@ func (_m *Persister) PutLivenessData(livenessData *hotstuff.LivenessData) error func (_m *Persister) PutSafetyData(safetyData *hotstuff.SafetyData) error { ret := _m.Called(safetyData) + if len(ret) == 0 { + panic("no return value specified for PutSafetyData") + } + var r0 error if rf, ok := ret.Get(0).(func(*hotstuff.SafetyData) error); ok { r0 = rf(safetyData) @@ -92,13 +108,12 @@ func (_m *Persister) PutSafetyData(safetyData *hotstuff.SafetyData) error { return r0 } -type mockConstructorTestingTNewPersister interface { +// NewPersister creates a new instance of Persister. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewPersister(t interface { mock.TestingT Cleanup(func()) -} - -// NewPersister creates a new instance of Persister. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewPersister(t mockConstructorTestingTNewPersister) *Persister { +}) *Persister { mock := &Persister{} mock.Mock.Test(t) diff --git a/consensus/hotstuff/mocks/persister_reader.go b/consensus/hotstuff/mocks/persister_reader.go new file mode 100644 index 00000000000..54146f89594 --- /dev/null +++ b/consensus/hotstuff/mocks/persister_reader.go @@ -0,0 +1,87 @@ +// Code generated by mockery. DO NOT EDIT. + +package mocks + +import ( + hotstuff "github.com/onflow/flow-go/consensus/hotstuff" + mock "github.com/stretchr/testify/mock" +) + +// PersisterReader is an autogenerated mock type for the PersisterReader type +type PersisterReader struct { + mock.Mock +} + +// GetLivenessData provides a mock function with no fields +func (_m *PersisterReader) GetLivenessData() (*hotstuff.LivenessData, error) { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for GetLivenessData") + } + + var r0 *hotstuff.LivenessData + var r1 error + if rf, ok := ret.Get(0).(func() (*hotstuff.LivenessData, error)); ok { + return rf() + } + if rf, ok := ret.Get(0).(func() *hotstuff.LivenessData); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*hotstuff.LivenessData) + } + } + + if rf, ok := ret.Get(1).(func() error); ok { + r1 = rf() + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetSafetyData provides a mock function with no fields +func (_m *PersisterReader) GetSafetyData() (*hotstuff.SafetyData, error) { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for GetSafetyData") + } + + var r0 *hotstuff.SafetyData + var r1 error + if rf, ok := ret.Get(0).(func() (*hotstuff.SafetyData, error)); ok { + return rf() + } + if rf, ok := ret.Get(0).(func() *hotstuff.SafetyData); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*hotstuff.SafetyData) + } + } + + if rf, ok := ret.Get(1).(func() error); ok { + r1 = rf() + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// NewPersisterReader creates a new instance of PersisterReader. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewPersisterReader(t interface { + mock.TestingT + Cleanup(func()) +}) *PersisterReader { + mock := &PersisterReader{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/consensus/hotstuff/mocks/proposal_duration_provider.go b/consensus/hotstuff/mocks/proposal_duration_provider.go new file mode 100644 index 00000000000..f1f5a4b6e2e --- /dev/null +++ b/consensus/hotstuff/mocks/proposal_duration_provider.go @@ -0,0 +1,48 @@ +// Code generated by mockery. DO NOT EDIT. + +package mocks + +import ( + flow "github.com/onflow/flow-go/model/flow" + + mock "github.com/stretchr/testify/mock" + + time "time" +) + +// ProposalDurationProvider is an autogenerated mock type for the ProposalDurationProvider type +type ProposalDurationProvider struct { + mock.Mock +} + +// TargetPublicationTime provides a mock function with given fields: proposalView, timeViewEntered, parentBlockId +func (_m *ProposalDurationProvider) TargetPublicationTime(proposalView uint64, timeViewEntered time.Time, parentBlockId flow.Identifier) time.Time { + ret := _m.Called(proposalView, timeViewEntered, parentBlockId) + + if len(ret) == 0 { + panic("no return value specified for TargetPublicationTime") + } + + var r0 time.Time + if rf, ok := ret.Get(0).(func(uint64, time.Time, flow.Identifier) time.Time); ok { + r0 = rf(proposalView, timeViewEntered, parentBlockId) + } else { + r0 = ret.Get(0).(time.Time) + } + + return r0 +} + +// NewProposalDurationProvider creates a new instance of ProposalDurationProvider. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewProposalDurationProvider(t interface { + mock.TestingT + Cleanup(func()) +}) *ProposalDurationProvider { + mock := &ProposalDurationProvider{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/consensus/hotstuff/mocks/proposal_violation_consumer.go b/consensus/hotstuff/mocks/proposal_violation_consumer.go index d775b3e923d..597b860eb08 100644 --- a/consensus/hotstuff/mocks/proposal_violation_consumer.go +++ b/consensus/hotstuff/mocks/proposal_violation_consumer.go @@ -1,10 +1,13 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mocks import ( - model "github.com/onflow/flow-go/consensus/hotstuff/model" + flow "github.com/onflow/flow-go/model/flow" + mock "github.com/stretchr/testify/mock" + + model "github.com/onflow/flow-go/consensus/hotstuff/model" ) // ProposalViolationConsumer is an autogenerated mock type for the ProposalViolationConsumer type @@ -18,17 +21,16 @@ func (_m *ProposalViolationConsumer) OnDoubleProposeDetected(_a0 *model.Block, _ } // OnInvalidBlockDetected provides a mock function with given fields: err -func (_m *ProposalViolationConsumer) OnInvalidBlockDetected(err model.InvalidProposalError) { +func (_m *ProposalViolationConsumer) OnInvalidBlockDetected(err flow.Slashable[model.InvalidProposalError]) { _m.Called(err) } -type mockConstructorTestingTNewProposalViolationConsumer interface { +// NewProposalViolationConsumer creates a new instance of ProposalViolationConsumer. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewProposalViolationConsumer(t interface { mock.TestingT Cleanup(func()) -} - -// NewProposalViolationConsumer creates a new instance of ProposalViolationConsumer. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewProposalViolationConsumer(t mockConstructorTestingTNewProposalViolationConsumer) *ProposalViolationConsumer { +}) *ProposalViolationConsumer { mock := &ProposalViolationConsumer{} mock.Mock.Test(t) diff --git a/consensus/hotstuff/mocks/random_beacon_inspector.go b/consensus/hotstuff/mocks/random_beacon_inspector.go index ef53e9cebd4..1c08aab162c 100644 --- a/consensus/hotstuff/mocks/random_beacon_inspector.go +++ b/consensus/hotstuff/mocks/random_beacon_inspector.go @@ -1,9 +1,9 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mocks import ( - crypto "github.com/onflow/flow-go/crypto" + crypto "github.com/onflow/crypto" mock "github.com/stretchr/testify/mock" ) @@ -13,10 +13,14 @@ type RandomBeaconInspector struct { mock.Mock } -// EnoughShares provides a mock function with given fields: +// EnoughShares provides a mock function with no fields func (_m *RandomBeaconInspector) EnoughShares() bool { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for EnoughShares") + } + var r0 bool if rf, ok := ret.Get(0).(func() bool); ok { r0 = rf() @@ -27,10 +31,14 @@ func (_m *RandomBeaconInspector) EnoughShares() bool { return r0 } -// Reconstruct provides a mock function with given fields: +// Reconstruct provides a mock function with no fields func (_m *RandomBeaconInspector) Reconstruct() (crypto.Signature, error) { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Reconstruct") + } + var r0 crypto.Signature var r1 error if rf, ok := ret.Get(0).(func() (crypto.Signature, error)); ok { @@ -57,6 +65,10 @@ func (_m *RandomBeaconInspector) Reconstruct() (crypto.Signature, error) { func (_m *RandomBeaconInspector) TrustedAdd(signerIndex int, share crypto.Signature) (bool, error) { ret := _m.Called(signerIndex, share) + if len(ret) == 0 { + panic("no return value specified for TrustedAdd") + } + var r0 bool var r1 error if rf, ok := ret.Get(0).(func(int, crypto.Signature) (bool, error)); ok { @@ -81,6 +93,10 @@ func (_m *RandomBeaconInspector) TrustedAdd(signerIndex int, share crypto.Signat func (_m *RandomBeaconInspector) Verify(signerIndex int, share crypto.Signature) error { ret := _m.Called(signerIndex, share) + if len(ret) == 0 { + panic("no return value specified for Verify") + } + var r0 error if rf, ok := ret.Get(0).(func(int, crypto.Signature) error); ok { r0 = rf(signerIndex, share) @@ -91,13 +107,12 @@ func (_m *RandomBeaconInspector) Verify(signerIndex int, share crypto.Signature) return r0 } -type mockConstructorTestingTNewRandomBeaconInspector interface { +// NewRandomBeaconInspector creates a new instance of RandomBeaconInspector. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewRandomBeaconInspector(t interface { mock.TestingT Cleanup(func()) -} - -// NewRandomBeaconInspector creates a new instance of RandomBeaconInspector. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewRandomBeaconInspector(t mockConstructorTestingTNewRandomBeaconInspector) *RandomBeaconInspector { +}) *RandomBeaconInspector { mock := &RandomBeaconInspector{} mock.Mock.Test(t) diff --git a/consensus/hotstuff/mocks/random_beacon_reconstructor.go b/consensus/hotstuff/mocks/random_beacon_reconstructor.go index 7cb4fe52c75..579c60aa510 100644 --- a/consensus/hotstuff/mocks/random_beacon_reconstructor.go +++ b/consensus/hotstuff/mocks/random_beacon_reconstructor.go @@ -1,9 +1,9 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mocks import ( - crypto "github.com/onflow/flow-go/crypto" + crypto "github.com/onflow/crypto" flow "github.com/onflow/flow-go/model/flow" mock "github.com/stretchr/testify/mock" @@ -14,10 +14,14 @@ type RandomBeaconReconstructor struct { mock.Mock } -// EnoughShares provides a mock function with given fields: +// EnoughShares provides a mock function with no fields func (_m *RandomBeaconReconstructor) EnoughShares() bool { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for EnoughShares") + } + var r0 bool if rf, ok := ret.Get(0).(func() bool); ok { r0 = rf() @@ -28,10 +32,14 @@ func (_m *RandomBeaconReconstructor) EnoughShares() bool { return r0 } -// Reconstruct provides a mock function with given fields: +// Reconstruct provides a mock function with no fields func (_m *RandomBeaconReconstructor) Reconstruct() (crypto.Signature, error) { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Reconstruct") + } + var r0 crypto.Signature var r1 error if rf, ok := ret.Get(0).(func() (crypto.Signature, error)); ok { @@ -58,6 +66,10 @@ func (_m *RandomBeaconReconstructor) Reconstruct() (crypto.Signature, error) { func (_m *RandomBeaconReconstructor) TrustedAdd(signerID flow.Identifier, sig crypto.Signature) (bool, error) { ret := _m.Called(signerID, sig) + if len(ret) == 0 { + panic("no return value specified for TrustedAdd") + } + var r0 bool var r1 error if rf, ok := ret.Get(0).(func(flow.Identifier, crypto.Signature) (bool, error)); ok { @@ -82,6 +94,10 @@ func (_m *RandomBeaconReconstructor) TrustedAdd(signerID flow.Identifier, sig cr func (_m *RandomBeaconReconstructor) Verify(signerID flow.Identifier, sig crypto.Signature) error { ret := _m.Called(signerID, sig) + if len(ret) == 0 { + panic("no return value specified for Verify") + } + var r0 error if rf, ok := ret.Get(0).(func(flow.Identifier, crypto.Signature) error); ok { r0 = rf(signerID, sig) @@ -92,13 +108,12 @@ func (_m *RandomBeaconReconstructor) Verify(signerID flow.Identifier, sig crypto return r0 } -type mockConstructorTestingTNewRandomBeaconReconstructor interface { +// NewRandomBeaconReconstructor creates a new instance of RandomBeaconReconstructor. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewRandomBeaconReconstructor(t interface { mock.TestingT Cleanup(func()) -} - -// NewRandomBeaconReconstructor creates a new instance of RandomBeaconReconstructor. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewRandomBeaconReconstructor(t mockConstructorTestingTNewRandomBeaconReconstructor) *RandomBeaconReconstructor { +}) *RandomBeaconReconstructor { mock := &RandomBeaconReconstructor{} mock.Mock.Test(t) diff --git a/consensus/hotstuff/mocks/replicas.go b/consensus/hotstuff/mocks/replicas.go index 965031dafd2..37d946d69c9 100644 --- a/consensus/hotstuff/mocks/replicas.go +++ b/consensus/hotstuff/mocks/replicas.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mocks @@ -18,6 +18,10 @@ type Replicas struct { func (_m *Replicas) DKG(view uint64) (hotstuff.DKG, error) { ret := _m.Called(view) + if len(ret) == 0 { + panic("no return value specified for DKG") + } + var r0 hotstuff.DKG var r1 error if rf, ok := ret.Get(0).(func(uint64) (hotstuff.DKG, error)); ok { @@ -41,19 +45,23 @@ func (_m *Replicas) DKG(view uint64) (hotstuff.DKG, error) { } // IdentitiesByEpoch provides a mock function with given fields: view -func (_m *Replicas) IdentitiesByEpoch(view uint64) (flow.IdentityList, error) { +func (_m *Replicas) IdentitiesByEpoch(view uint64) (flow.IdentitySkeletonList, error) { ret := _m.Called(view) - var r0 flow.IdentityList + if len(ret) == 0 { + panic("no return value specified for IdentitiesByEpoch") + } + + var r0 flow.IdentitySkeletonList var r1 error - if rf, ok := ret.Get(0).(func(uint64) (flow.IdentityList, error)); ok { + if rf, ok := ret.Get(0).(func(uint64) (flow.IdentitySkeletonList, error)); ok { return rf(view) } - if rf, ok := ret.Get(0).(func(uint64) flow.IdentityList); ok { + if rf, ok := ret.Get(0).(func(uint64) flow.IdentitySkeletonList); ok { r0 = rf(view) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(flow.IdentityList) + r0 = ret.Get(0).(flow.IdentitySkeletonList) } } @@ -67,19 +75,23 @@ func (_m *Replicas) IdentitiesByEpoch(view uint64) (flow.IdentityList, error) { } // IdentityByEpoch provides a mock function with given fields: view, participantID -func (_m *Replicas) IdentityByEpoch(view uint64, participantID flow.Identifier) (*flow.Identity, error) { +func (_m *Replicas) IdentityByEpoch(view uint64, participantID flow.Identifier) (*flow.IdentitySkeleton, error) { ret := _m.Called(view, participantID) - var r0 *flow.Identity + if len(ret) == 0 { + panic("no return value specified for IdentityByEpoch") + } + + var r0 *flow.IdentitySkeleton var r1 error - if rf, ok := ret.Get(0).(func(uint64, flow.Identifier) (*flow.Identity, error)); ok { + if rf, ok := ret.Get(0).(func(uint64, flow.Identifier) (*flow.IdentitySkeleton, error)); ok { return rf(view, participantID) } - if rf, ok := ret.Get(0).(func(uint64, flow.Identifier) *flow.Identity); ok { + if rf, ok := ret.Get(0).(func(uint64, flow.Identifier) *flow.IdentitySkeleton); ok { r0 = rf(view, participantID) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(*flow.Identity) + r0 = ret.Get(0).(*flow.IdentitySkeleton) } } @@ -96,6 +108,10 @@ func (_m *Replicas) IdentityByEpoch(view uint64, participantID flow.Identifier) func (_m *Replicas) LeaderForView(view uint64) (flow.Identifier, error) { ret := _m.Called(view) + if len(ret) == 0 { + panic("no return value specified for LeaderForView") + } + var r0 flow.Identifier var r1 error if rf, ok := ret.Get(0).(func(uint64) (flow.Identifier, error)); ok { @@ -122,6 +138,10 @@ func (_m *Replicas) LeaderForView(view uint64) (flow.Identifier, error) { func (_m *Replicas) QuorumThresholdForView(view uint64) (uint64, error) { ret := _m.Called(view) + if len(ret) == 0 { + panic("no return value specified for QuorumThresholdForView") + } + var r0 uint64 var r1 error if rf, ok := ret.Get(0).(func(uint64) (uint64, error)); ok { @@ -142,10 +162,14 @@ func (_m *Replicas) QuorumThresholdForView(view uint64) (uint64, error) { return r0, r1 } -// Self provides a mock function with given fields: +// Self provides a mock function with no fields func (_m *Replicas) Self() flow.Identifier { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Self") + } + var r0 flow.Identifier if rf, ok := ret.Get(0).(func() flow.Identifier); ok { r0 = rf() @@ -162,6 +186,10 @@ func (_m *Replicas) Self() flow.Identifier { func (_m *Replicas) TimeoutThresholdForView(view uint64) (uint64, error) { ret := _m.Called(view) + if len(ret) == 0 { + panic("no return value specified for TimeoutThresholdForView") + } + var r0 uint64 var r1 error if rf, ok := ret.Get(0).(func(uint64) (uint64, error)); ok { @@ -182,13 +210,12 @@ func (_m *Replicas) TimeoutThresholdForView(view uint64) (uint64, error) { return r0, r1 } -type mockConstructorTestingTNewReplicas interface { +// NewReplicas creates a new instance of Replicas. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewReplicas(t interface { mock.TestingT Cleanup(func()) -} - -// NewReplicas creates a new instance of Replicas. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewReplicas(t mockConstructorTestingTNewReplicas) *Replicas { +}) *Replicas { mock := &Replicas{} mock.Mock.Test(t) diff --git a/consensus/hotstuff/mocks/safety_rules.go b/consensus/hotstuff/mocks/safety_rules.go index dccb0b91ddc..1c76ba1d956 100644 --- a/consensus/hotstuff/mocks/safety_rules.go +++ b/consensus/hotstuff/mocks/safety_rules.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mocks @@ -19,6 +19,10 @@ type SafetyRules struct { func (_m *SafetyRules) ProduceTimeout(curView uint64, newestQC *flow.QuorumCertificate, lastViewTC *flow.TimeoutCertificate) (*model.TimeoutObject, error) { ret := _m.Called(curView, newestQC, lastViewTC) + if len(ret) == 0 { + panic("no return value specified for ProduceTimeout") + } + var r0 *model.TimeoutObject var r1 error if rf, ok := ret.Get(0).(func(uint64, *flow.QuorumCertificate, *flow.TimeoutCertificate) (*model.TimeoutObject, error)); ok { @@ -42,15 +46,19 @@ func (_m *SafetyRules) ProduceTimeout(curView uint64, newestQC *flow.QuorumCerti } // ProduceVote provides a mock function with given fields: proposal, curView -func (_m *SafetyRules) ProduceVote(proposal *model.Proposal, curView uint64) (*model.Vote, error) { +func (_m *SafetyRules) ProduceVote(proposal *model.SignedProposal, curView uint64) (*model.Vote, error) { ret := _m.Called(proposal, curView) + if len(ret) == 0 { + panic("no return value specified for ProduceVote") + } + var r0 *model.Vote var r1 error - if rf, ok := ret.Get(0).(func(*model.Proposal, uint64) (*model.Vote, error)); ok { + if rf, ok := ret.Get(0).(func(*model.SignedProposal, uint64) (*model.Vote, error)); ok { return rf(proposal, curView) } - if rf, ok := ret.Get(0).(func(*model.Proposal, uint64) *model.Vote); ok { + if rf, ok := ret.Get(0).(func(*model.SignedProposal, uint64) *model.Vote); ok { r0 = rf(proposal, curView) } else { if ret.Get(0) != nil { @@ -58,7 +66,7 @@ func (_m *SafetyRules) ProduceVote(proposal *model.Proposal, curView uint64) (*m } } - if rf, ok := ret.Get(1).(func(*model.Proposal, uint64) error); ok { + if rf, ok := ret.Get(1).(func(*model.SignedProposal, uint64) error); ok { r1 = rf(proposal, curView) } else { r1 = ret.Error(1) @@ -67,13 +75,42 @@ func (_m *SafetyRules) ProduceVote(proposal *model.Proposal, curView uint64) (*m return r0, r1 } -type mockConstructorTestingTNewSafetyRules interface { - mock.TestingT - Cleanup(func()) +// SignOwnProposal provides a mock function with given fields: unsignedProposal +func (_m *SafetyRules) SignOwnProposal(unsignedProposal *model.Proposal) (*model.Vote, error) { + ret := _m.Called(unsignedProposal) + + if len(ret) == 0 { + panic("no return value specified for SignOwnProposal") + } + + var r0 *model.Vote + var r1 error + if rf, ok := ret.Get(0).(func(*model.Proposal) (*model.Vote, error)); ok { + return rf(unsignedProposal) + } + if rf, ok := ret.Get(0).(func(*model.Proposal) *model.Vote); ok { + r0 = rf(unsignedProposal) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*model.Vote) + } + } + + if rf, ok := ret.Get(1).(func(*model.Proposal) error); ok { + r1 = rf(unsignedProposal) + } else { + r1 = ret.Error(1) + } + + return r0, r1 } // NewSafetyRules creates a new instance of SafetyRules. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewSafetyRules(t mockConstructorTestingTNewSafetyRules) *SafetyRules { +// The first argument is typically a *testing.T value. +func NewSafetyRules(t interface { + mock.TestingT + Cleanup(func()) +}) *SafetyRules { mock := &SafetyRules{} mock.Mock.Test(t) diff --git a/consensus/hotstuff/mocks/signer.go b/consensus/hotstuff/mocks/signer.go index 49dc412d29e..dd6588feb91 100644 --- a/consensus/hotstuff/mocks/signer.go +++ b/consensus/hotstuff/mocks/signer.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mocks @@ -15,36 +15,14 @@ type Signer struct { mock.Mock } -// CreateProposal provides a mock function with given fields: block -func (_m *Signer) CreateProposal(block *model.Block) (*model.Proposal, error) { - ret := _m.Called(block) - - var r0 *model.Proposal - var r1 error - if rf, ok := ret.Get(0).(func(*model.Block) (*model.Proposal, error)); ok { - return rf(block) - } - if rf, ok := ret.Get(0).(func(*model.Block) *model.Proposal); ok { - r0 = rf(block) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*model.Proposal) - } - } - - if rf, ok := ret.Get(1).(func(*model.Block) error); ok { - r1 = rf(block) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - // CreateTimeout provides a mock function with given fields: curView, newestQC, lastViewTC func (_m *Signer) CreateTimeout(curView uint64, newestQC *flow.QuorumCertificate, lastViewTC *flow.TimeoutCertificate) (*model.TimeoutObject, error) { ret := _m.Called(curView, newestQC, lastViewTC) + if len(ret) == 0 { + panic("no return value specified for CreateTimeout") + } + var r0 *model.TimeoutObject var r1 error if rf, ok := ret.Get(0).(func(uint64, *flow.QuorumCertificate, *flow.TimeoutCertificate) (*model.TimeoutObject, error)); ok { @@ -71,6 +49,10 @@ func (_m *Signer) CreateTimeout(curView uint64, newestQC *flow.QuorumCertificate func (_m *Signer) CreateVote(block *model.Block) (*model.Vote, error) { ret := _m.Called(block) + if len(ret) == 0 { + panic("no return value specified for CreateVote") + } + var r0 *model.Vote var r1 error if rf, ok := ret.Get(0).(func(*model.Block) (*model.Vote, error)); ok { @@ -93,13 +75,12 @@ func (_m *Signer) CreateVote(block *model.Block) (*model.Vote, error) { return r0, r1 } -type mockConstructorTestingTNewSigner interface { +// NewSigner creates a new instance of Signer. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewSigner(t interface { mock.TestingT Cleanup(func()) -} - -// NewSigner creates a new instance of Signer. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewSigner(t mockConstructorTestingTNewSigner) *Signer { +}) *Signer { mock := &Signer{} mock.Mock.Test(t) diff --git a/consensus/hotstuff/mocks/timeout_aggregation_consumer.go b/consensus/hotstuff/mocks/timeout_aggregation_consumer.go index c123201f956..75e16a933f3 100644 --- a/consensus/hotstuff/mocks/timeout_aggregation_consumer.go +++ b/consensus/hotstuff/mocks/timeout_aggregation_consumer.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mocks @@ -50,13 +50,12 @@ func (_m *TimeoutAggregationConsumer) OnTimeoutProcessed(timeout *model.TimeoutO _m.Called(timeout) } -type mockConstructorTestingTNewTimeoutAggregationConsumer interface { +// NewTimeoutAggregationConsumer creates a new instance of TimeoutAggregationConsumer. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewTimeoutAggregationConsumer(t interface { mock.TestingT Cleanup(func()) -} - -// NewTimeoutAggregationConsumer creates a new instance of TimeoutAggregationConsumer. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewTimeoutAggregationConsumer(t mockConstructorTestingTNewTimeoutAggregationConsumer) *TimeoutAggregationConsumer { +}) *TimeoutAggregationConsumer { mock := &TimeoutAggregationConsumer{} mock.Mock.Test(t) diff --git a/consensus/hotstuff/mocks/timeout_aggregation_violation_consumer.go b/consensus/hotstuff/mocks/timeout_aggregation_violation_consumer.go index 552f8650f9f..71509a764d9 100644 --- a/consensus/hotstuff/mocks/timeout_aggregation_violation_consumer.go +++ b/consensus/hotstuff/mocks/timeout_aggregation_violation_consumer.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mocks @@ -22,13 +22,12 @@ func (_m *TimeoutAggregationViolationConsumer) OnInvalidTimeoutDetected(err mode _m.Called(err) } -type mockConstructorTestingTNewTimeoutAggregationViolationConsumer interface { +// NewTimeoutAggregationViolationConsumer creates a new instance of TimeoutAggregationViolationConsumer. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewTimeoutAggregationViolationConsumer(t interface { mock.TestingT Cleanup(func()) -} - -// NewTimeoutAggregationViolationConsumer creates a new instance of TimeoutAggregationViolationConsumer. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewTimeoutAggregationViolationConsumer(t mockConstructorTestingTNewTimeoutAggregationViolationConsumer) *TimeoutAggregationViolationConsumer { +}) *TimeoutAggregationViolationConsumer { mock := &TimeoutAggregationViolationConsumer{} mock.Mock.Test(t) diff --git a/consensus/hotstuff/mocks/timeout_aggregator.go b/consensus/hotstuff/mocks/timeout_aggregator.go index 38d26732c6b..2273f157baf 100644 --- a/consensus/hotstuff/mocks/timeout_aggregator.go +++ b/consensus/hotstuff/mocks/timeout_aggregator.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mocks @@ -19,10 +19,14 @@ func (_m *TimeoutAggregator) AddTimeout(timeoutObject *model.TimeoutObject) { _m.Called(timeoutObject) } -// Done provides a mock function with given fields: +// Done provides a mock function with no fields func (_m *TimeoutAggregator) Done() <-chan struct{} { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Done") + } + var r0 <-chan struct{} if rf, ok := ret.Get(0).(func() <-chan struct{}); ok { r0 = rf() @@ -40,10 +44,14 @@ func (_m *TimeoutAggregator) PruneUpToView(lowestRetainedView uint64) { _m.Called(lowestRetainedView) } -// Ready provides a mock function with given fields: +// Ready provides a mock function with no fields func (_m *TimeoutAggregator) Ready() <-chan struct{} { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Ready") + } + var r0 <-chan struct{} if rf, ok := ret.Get(0).(func() <-chan struct{}); ok { r0 = rf() @@ -61,13 +69,12 @@ func (_m *TimeoutAggregator) Start(_a0 irrecoverable.SignalerContext) { _m.Called(_a0) } -type mockConstructorTestingTNewTimeoutAggregator interface { +// NewTimeoutAggregator creates a new instance of TimeoutAggregator. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewTimeoutAggregator(t interface { mock.TestingT Cleanup(func()) -} - -// NewTimeoutAggregator creates a new instance of TimeoutAggregator. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewTimeoutAggregator(t mockConstructorTestingTNewTimeoutAggregator) *TimeoutAggregator { +}) *TimeoutAggregator { mock := &TimeoutAggregator{} mock.Mock.Test(t) diff --git a/consensus/hotstuff/mocks/timeout_collector.go b/consensus/hotstuff/mocks/timeout_collector.go index 260ad174450..399d8a33c95 100644 --- a/consensus/hotstuff/mocks/timeout_collector.go +++ b/consensus/hotstuff/mocks/timeout_collector.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mocks @@ -16,6 +16,10 @@ type TimeoutCollector struct { func (_m *TimeoutCollector) AddTimeout(timeoutObject *model.TimeoutObject) error { ret := _m.Called(timeoutObject) + if len(ret) == 0 { + panic("no return value specified for AddTimeout") + } + var r0 error if rf, ok := ret.Get(0).(func(*model.TimeoutObject) error); ok { r0 = rf(timeoutObject) @@ -26,10 +30,14 @@ func (_m *TimeoutCollector) AddTimeout(timeoutObject *model.TimeoutObject) error return r0 } -// View provides a mock function with given fields: +// View provides a mock function with no fields func (_m *TimeoutCollector) View() uint64 { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for View") + } + var r0 uint64 if rf, ok := ret.Get(0).(func() uint64); ok { r0 = rf() @@ -40,13 +48,12 @@ func (_m *TimeoutCollector) View() uint64 { return r0 } -type mockConstructorTestingTNewTimeoutCollector interface { +// NewTimeoutCollector creates a new instance of TimeoutCollector. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewTimeoutCollector(t interface { mock.TestingT Cleanup(func()) -} - -// NewTimeoutCollector creates a new instance of TimeoutCollector. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewTimeoutCollector(t mockConstructorTestingTNewTimeoutCollector) *TimeoutCollector { +}) *TimeoutCollector { mock := &TimeoutCollector{} mock.Mock.Test(t) diff --git a/consensus/hotstuff/mocks/timeout_collector_consumer.go b/consensus/hotstuff/mocks/timeout_collector_consumer.go index 629f33f9a14..48fb7c25404 100644 --- a/consensus/hotstuff/mocks/timeout_collector_consumer.go +++ b/consensus/hotstuff/mocks/timeout_collector_consumer.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mocks @@ -40,13 +40,12 @@ func (_m *TimeoutCollectorConsumer) OnTimeoutProcessed(timeout *model.TimeoutObj _m.Called(timeout) } -type mockConstructorTestingTNewTimeoutCollectorConsumer interface { +// NewTimeoutCollectorConsumer creates a new instance of TimeoutCollectorConsumer. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewTimeoutCollectorConsumer(t interface { mock.TestingT Cleanup(func()) -} - -// NewTimeoutCollectorConsumer creates a new instance of TimeoutCollectorConsumer. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewTimeoutCollectorConsumer(t mockConstructorTestingTNewTimeoutCollectorConsumer) *TimeoutCollectorConsumer { +}) *TimeoutCollectorConsumer { mock := &TimeoutCollectorConsumer{} mock.Mock.Test(t) diff --git a/consensus/hotstuff/mocks/timeout_collector_factory.go b/consensus/hotstuff/mocks/timeout_collector_factory.go index fec262ab94e..9eac542523d 100644 --- a/consensus/hotstuff/mocks/timeout_collector_factory.go +++ b/consensus/hotstuff/mocks/timeout_collector_factory.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mocks @@ -16,6 +16,10 @@ type TimeoutCollectorFactory struct { func (_m *TimeoutCollectorFactory) Create(view uint64) (hotstuff.TimeoutCollector, error) { ret := _m.Called(view) + if len(ret) == 0 { + panic("no return value specified for Create") + } + var r0 hotstuff.TimeoutCollector var r1 error if rf, ok := ret.Get(0).(func(uint64) (hotstuff.TimeoutCollector, error)); ok { @@ -38,13 +42,12 @@ func (_m *TimeoutCollectorFactory) Create(view uint64) (hotstuff.TimeoutCollecto return r0, r1 } -type mockConstructorTestingTNewTimeoutCollectorFactory interface { +// NewTimeoutCollectorFactory creates a new instance of TimeoutCollectorFactory. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewTimeoutCollectorFactory(t interface { mock.TestingT Cleanup(func()) -} - -// NewTimeoutCollectorFactory creates a new instance of TimeoutCollectorFactory. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewTimeoutCollectorFactory(t mockConstructorTestingTNewTimeoutCollectorFactory) *TimeoutCollectorFactory { +}) *TimeoutCollectorFactory { mock := &TimeoutCollectorFactory{} mock.Mock.Test(t) diff --git a/consensus/hotstuff/mocks/timeout_collectors.go b/consensus/hotstuff/mocks/timeout_collectors.go index 0a5a5c78731..c2f15d5b294 100644 --- a/consensus/hotstuff/mocks/timeout_collectors.go +++ b/consensus/hotstuff/mocks/timeout_collectors.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mocks @@ -16,6 +16,10 @@ type TimeoutCollectors struct { func (_m *TimeoutCollectors) GetOrCreateCollector(view uint64) (hotstuff.TimeoutCollector, bool, error) { ret := _m.Called(view) + if len(ret) == 0 { + panic("no return value specified for GetOrCreateCollector") + } + var r0 hotstuff.TimeoutCollector var r1 bool var r2 error @@ -50,13 +54,12 @@ func (_m *TimeoutCollectors) PruneUpToView(lowestRetainedView uint64) { _m.Called(lowestRetainedView) } -type mockConstructorTestingTNewTimeoutCollectors interface { +// NewTimeoutCollectors creates a new instance of TimeoutCollectors. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewTimeoutCollectors(t interface { mock.TestingT Cleanup(func()) -} - -// NewTimeoutCollectors creates a new instance of TimeoutCollectors. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewTimeoutCollectors(t mockConstructorTestingTNewTimeoutCollectors) *TimeoutCollectors { +}) *TimeoutCollectors { mock := &TimeoutCollectors{} mock.Mock.Test(t) diff --git a/consensus/hotstuff/mocks/timeout_processor.go b/consensus/hotstuff/mocks/timeout_processor.go index bb601070560..c2c42a3b99b 100644 --- a/consensus/hotstuff/mocks/timeout_processor.go +++ b/consensus/hotstuff/mocks/timeout_processor.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mocks @@ -16,6 +16,10 @@ type TimeoutProcessor struct { func (_m *TimeoutProcessor) Process(timeout *model.TimeoutObject) error { ret := _m.Called(timeout) + if len(ret) == 0 { + panic("no return value specified for Process") + } + var r0 error if rf, ok := ret.Get(0).(func(*model.TimeoutObject) error); ok { r0 = rf(timeout) @@ -26,13 +30,12 @@ func (_m *TimeoutProcessor) Process(timeout *model.TimeoutObject) error { return r0 } -type mockConstructorTestingTNewTimeoutProcessor interface { +// NewTimeoutProcessor creates a new instance of TimeoutProcessor. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewTimeoutProcessor(t interface { mock.TestingT Cleanup(func()) -} - -// NewTimeoutProcessor creates a new instance of TimeoutProcessor. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewTimeoutProcessor(t mockConstructorTestingTNewTimeoutProcessor) *TimeoutProcessor { +}) *TimeoutProcessor { mock := &TimeoutProcessor{} mock.Mock.Test(t) diff --git a/consensus/hotstuff/mocks/timeout_processor_factory.go b/consensus/hotstuff/mocks/timeout_processor_factory.go index 26c0952ba8a..074112dcc88 100644 --- a/consensus/hotstuff/mocks/timeout_processor_factory.go +++ b/consensus/hotstuff/mocks/timeout_processor_factory.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mocks @@ -16,6 +16,10 @@ type TimeoutProcessorFactory struct { func (_m *TimeoutProcessorFactory) Create(view uint64) (hotstuff.TimeoutProcessor, error) { ret := _m.Called(view) + if len(ret) == 0 { + panic("no return value specified for Create") + } + var r0 hotstuff.TimeoutProcessor var r1 error if rf, ok := ret.Get(0).(func(uint64) (hotstuff.TimeoutProcessor, error)); ok { @@ -38,13 +42,12 @@ func (_m *TimeoutProcessorFactory) Create(view uint64) (hotstuff.TimeoutProcesso return r0, r1 } -type mockConstructorTestingTNewTimeoutProcessorFactory interface { +// NewTimeoutProcessorFactory creates a new instance of TimeoutProcessorFactory. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewTimeoutProcessorFactory(t interface { mock.TestingT Cleanup(func()) -} - -// NewTimeoutProcessorFactory creates a new instance of TimeoutProcessorFactory. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewTimeoutProcessorFactory(t mockConstructorTestingTNewTimeoutProcessorFactory) *TimeoutProcessorFactory { +}) *TimeoutProcessorFactory { mock := &TimeoutProcessorFactory{} mock.Mock.Test(t) diff --git a/consensus/hotstuff/mocks/timeout_signature_aggregator.go b/consensus/hotstuff/mocks/timeout_signature_aggregator.go index 2ae0840efce..3c39fe843a9 100644 --- a/consensus/hotstuff/mocks/timeout_signature_aggregator.go +++ b/consensus/hotstuff/mocks/timeout_signature_aggregator.go @@ -1,9 +1,9 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mocks import ( - crypto "github.com/onflow/flow-go/crypto" + crypto "github.com/onflow/crypto" flow "github.com/onflow/flow-go/model/flow" hotstuff "github.com/onflow/flow-go/consensus/hotstuff" @@ -16,10 +16,14 @@ type TimeoutSignatureAggregator struct { mock.Mock } -// Aggregate provides a mock function with given fields: +// Aggregate provides a mock function with no fields func (_m *TimeoutSignatureAggregator) Aggregate() ([]hotstuff.TimeoutSignerInfo, crypto.Signature, error) { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Aggregate") + } + var r0 []hotstuff.TimeoutSignerInfo var r1 crypto.Signature var r2 error @@ -51,10 +55,14 @@ func (_m *TimeoutSignatureAggregator) Aggregate() ([]hotstuff.TimeoutSignerInfo, return r0, r1, r2 } -// TotalWeight provides a mock function with given fields: +// TotalWeight provides a mock function with no fields func (_m *TimeoutSignatureAggregator) TotalWeight() uint64 { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for TotalWeight") + } + var r0 uint64 if rf, ok := ret.Get(0).(func() uint64); ok { r0 = rf() @@ -69,6 +77,10 @@ func (_m *TimeoutSignatureAggregator) TotalWeight() uint64 { func (_m *TimeoutSignatureAggregator) VerifyAndAdd(signerID flow.Identifier, sig crypto.Signature, newestQCView uint64) (uint64, error) { ret := _m.Called(signerID, sig, newestQCView) + if len(ret) == 0 { + panic("no return value specified for VerifyAndAdd") + } + var r0 uint64 var r1 error if rf, ok := ret.Get(0).(func(flow.Identifier, crypto.Signature, uint64) (uint64, error)); ok { @@ -89,10 +101,14 @@ func (_m *TimeoutSignatureAggregator) VerifyAndAdd(signerID flow.Identifier, sig return r0, r1 } -// View provides a mock function with given fields: +// View provides a mock function with no fields func (_m *TimeoutSignatureAggregator) View() uint64 { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for View") + } + var r0 uint64 if rf, ok := ret.Get(0).(func() uint64); ok { r0 = rf() @@ -103,13 +119,12 @@ func (_m *TimeoutSignatureAggregator) View() uint64 { return r0 } -type mockConstructorTestingTNewTimeoutSignatureAggregator interface { +// NewTimeoutSignatureAggregator creates a new instance of TimeoutSignatureAggregator. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewTimeoutSignatureAggregator(t interface { mock.TestingT Cleanup(func()) -} - -// NewTimeoutSignatureAggregator creates a new instance of TimeoutSignatureAggregator. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewTimeoutSignatureAggregator(t mockConstructorTestingTNewTimeoutSignatureAggregator) *TimeoutSignatureAggregator { +}) *TimeoutSignatureAggregator { mock := &TimeoutSignatureAggregator{} mock.Mock.Test(t) diff --git a/consensus/hotstuff/mocks/validator.go b/consensus/hotstuff/mocks/validator.go index d31e02dd1c9..18f60590d0e 100644 --- a/consensus/hotstuff/mocks/validator.go +++ b/consensus/hotstuff/mocks/validator.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mocks @@ -16,11 +16,15 @@ type Validator struct { } // ValidateProposal provides a mock function with given fields: proposal -func (_m *Validator) ValidateProposal(proposal *model.Proposal) error { +func (_m *Validator) ValidateProposal(proposal *model.SignedProposal) error { ret := _m.Called(proposal) + if len(ret) == 0 { + panic("no return value specified for ValidateProposal") + } + var r0 error - if rf, ok := ret.Get(0).(func(*model.Proposal) error); ok { + if rf, ok := ret.Get(0).(func(*model.SignedProposal) error); ok { r0 = rf(proposal) } else { r0 = ret.Error(0) @@ -33,6 +37,10 @@ func (_m *Validator) ValidateProposal(proposal *model.Proposal) error { func (_m *Validator) ValidateQC(qc *flow.QuorumCertificate) error { ret := _m.Called(qc) + if len(ret) == 0 { + panic("no return value specified for ValidateQC") + } + var r0 error if rf, ok := ret.Get(0).(func(*flow.QuorumCertificate) error); ok { r0 = rf(qc) @@ -47,6 +55,10 @@ func (_m *Validator) ValidateQC(qc *flow.QuorumCertificate) error { func (_m *Validator) ValidateTC(tc *flow.TimeoutCertificate) error { ret := _m.Called(tc) + if len(ret) == 0 { + panic("no return value specified for ValidateTC") + } + var r0 error if rf, ok := ret.Get(0).(func(*flow.TimeoutCertificate) error); ok { r0 = rf(tc) @@ -58,19 +70,23 @@ func (_m *Validator) ValidateTC(tc *flow.TimeoutCertificate) error { } // ValidateVote provides a mock function with given fields: vote -func (_m *Validator) ValidateVote(vote *model.Vote) (*flow.Identity, error) { +func (_m *Validator) ValidateVote(vote *model.Vote) (*flow.IdentitySkeleton, error) { ret := _m.Called(vote) - var r0 *flow.Identity + if len(ret) == 0 { + panic("no return value specified for ValidateVote") + } + + var r0 *flow.IdentitySkeleton var r1 error - if rf, ok := ret.Get(0).(func(*model.Vote) (*flow.Identity, error)); ok { + if rf, ok := ret.Get(0).(func(*model.Vote) (*flow.IdentitySkeleton, error)); ok { return rf(vote) } - if rf, ok := ret.Get(0).(func(*model.Vote) *flow.Identity); ok { + if rf, ok := ret.Get(0).(func(*model.Vote) *flow.IdentitySkeleton); ok { r0 = rf(vote) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(*flow.Identity) + r0 = ret.Get(0).(*flow.IdentitySkeleton) } } @@ -83,13 +99,12 @@ func (_m *Validator) ValidateVote(vote *model.Vote) (*flow.Identity, error) { return r0, r1 } -type mockConstructorTestingTNewValidator interface { +// NewValidator creates a new instance of Validator. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewValidator(t interface { mock.TestingT Cleanup(func()) -} - -// NewValidator creates a new instance of Validator. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewValidator(t mockConstructorTestingTNewValidator) *Validator { +}) *Validator { mock := &Validator{} mock.Mock.Test(t) diff --git a/consensus/hotstuff/mocks/verifier.go b/consensus/hotstuff/mocks/verifier.go index 3ba02ff54e1..38c1fa81ee7 100644 --- a/consensus/hotstuff/mocks/verifier.go +++ b/consensus/hotstuff/mocks/verifier.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mocks @@ -14,11 +14,15 @@ type Verifier struct { } // VerifyQC provides a mock function with given fields: signers, sigData, view, blockID -func (_m *Verifier) VerifyQC(signers flow.IdentityList, sigData []byte, view uint64, blockID flow.Identifier) error { +func (_m *Verifier) VerifyQC(signers flow.IdentitySkeletonList, sigData []byte, view uint64, blockID flow.Identifier) error { ret := _m.Called(signers, sigData, view, blockID) + if len(ret) == 0 { + panic("no return value specified for VerifyQC") + } + var r0 error - if rf, ok := ret.Get(0).(func(flow.IdentityList, []byte, uint64, flow.Identifier) error); ok { + if rf, ok := ret.Get(0).(func(flow.IdentitySkeletonList, []byte, uint64, flow.Identifier) error); ok { r0 = rf(signers, sigData, view, blockID) } else { r0 = ret.Error(0) @@ -28,11 +32,15 @@ func (_m *Verifier) VerifyQC(signers flow.IdentityList, sigData []byte, view uin } // VerifyTC provides a mock function with given fields: signers, sigData, view, highQCViews -func (_m *Verifier) VerifyTC(signers flow.IdentityList, sigData []byte, view uint64, highQCViews []uint64) error { +func (_m *Verifier) VerifyTC(signers flow.IdentitySkeletonList, sigData []byte, view uint64, highQCViews []uint64) error { ret := _m.Called(signers, sigData, view, highQCViews) + if len(ret) == 0 { + panic("no return value specified for VerifyTC") + } + var r0 error - if rf, ok := ret.Get(0).(func(flow.IdentityList, []byte, uint64, []uint64) error); ok { + if rf, ok := ret.Get(0).(func(flow.IdentitySkeletonList, []byte, uint64, []uint64) error); ok { r0 = rf(signers, sigData, view, highQCViews) } else { r0 = ret.Error(0) @@ -42,11 +50,15 @@ func (_m *Verifier) VerifyTC(signers flow.IdentityList, sigData []byte, view uin } // VerifyVote provides a mock function with given fields: voter, sigData, view, blockID -func (_m *Verifier) VerifyVote(voter *flow.Identity, sigData []byte, view uint64, blockID flow.Identifier) error { +func (_m *Verifier) VerifyVote(voter *flow.IdentitySkeleton, sigData []byte, view uint64, blockID flow.Identifier) error { ret := _m.Called(voter, sigData, view, blockID) + if len(ret) == 0 { + panic("no return value specified for VerifyVote") + } + var r0 error - if rf, ok := ret.Get(0).(func(*flow.Identity, []byte, uint64, flow.Identifier) error); ok { + if rf, ok := ret.Get(0).(func(*flow.IdentitySkeleton, []byte, uint64, flow.Identifier) error); ok { r0 = rf(voter, sigData, view, blockID) } else { r0 = ret.Error(0) @@ -55,13 +67,12 @@ func (_m *Verifier) VerifyVote(voter *flow.Identity, sigData []byte, view uint64 return r0 } -type mockConstructorTestingTNewVerifier interface { +// NewVerifier creates a new instance of Verifier. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewVerifier(t interface { mock.TestingT Cleanup(func()) -} - -// NewVerifier creates a new instance of Verifier. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewVerifier(t mockConstructorTestingTNewVerifier) *Verifier { +}) *Verifier { mock := &Verifier{} mock.Mock.Test(t) diff --git a/consensus/hotstuff/mocks/verifying_vote_processor.go b/consensus/hotstuff/mocks/verifying_vote_processor.go index beaada561e3..105346f3a3e 100644 --- a/consensus/hotstuff/mocks/verifying_vote_processor.go +++ b/consensus/hotstuff/mocks/verifying_vote_processor.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mocks @@ -14,10 +14,14 @@ type VerifyingVoteProcessor struct { mock.Mock } -// Block provides a mock function with given fields: +// Block provides a mock function with no fields func (_m *VerifyingVoteProcessor) Block() *model.Block { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Block") + } + var r0 *model.Block if rf, ok := ret.Get(0).(func() *model.Block); ok { r0 = rf() @@ -34,6 +38,10 @@ func (_m *VerifyingVoteProcessor) Block() *model.Block { func (_m *VerifyingVoteProcessor) Process(vote *model.Vote) error { ret := _m.Called(vote) + if len(ret) == 0 { + panic("no return value specified for Process") + } + var r0 error if rf, ok := ret.Get(0).(func(*model.Vote) error); ok { r0 = rf(vote) @@ -44,10 +52,14 @@ func (_m *VerifyingVoteProcessor) Process(vote *model.Vote) error { return r0 } -// Status provides a mock function with given fields: +// Status provides a mock function with no fields func (_m *VerifyingVoteProcessor) Status() hotstuff.VoteCollectorStatus { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Status") + } + var r0 hotstuff.VoteCollectorStatus if rf, ok := ret.Get(0).(func() hotstuff.VoteCollectorStatus); ok { r0 = rf() @@ -58,13 +70,12 @@ func (_m *VerifyingVoteProcessor) Status() hotstuff.VoteCollectorStatus { return r0 } -type mockConstructorTestingTNewVerifyingVoteProcessor interface { +// NewVerifyingVoteProcessor creates a new instance of VerifyingVoteProcessor. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewVerifyingVoteProcessor(t interface { mock.TestingT Cleanup(func()) -} - -// NewVerifyingVoteProcessor creates a new instance of VerifyingVoteProcessor. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewVerifyingVoteProcessor(t mockConstructorTestingTNewVerifyingVoteProcessor) *VerifyingVoteProcessor { +}) *VerifyingVoteProcessor { mock := &VerifyingVoteProcessor{} mock.Mock.Test(t) diff --git a/consensus/hotstuff/mocks/vote_aggregation_consumer.go b/consensus/hotstuff/mocks/vote_aggregation_consumer.go index 0ab7b7f53aa..23693e11797 100644 --- a/consensus/hotstuff/mocks/vote_aggregation_consumer.go +++ b/consensus/hotstuff/mocks/vote_aggregation_consumer.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mocks @@ -31,7 +31,7 @@ func (_m *VoteAggregationConsumer) OnQcConstructedFromVotes(_a0 *flow.QuorumCert } // OnVoteForInvalidBlockDetected provides a mock function with given fields: vote, invalidProposal -func (_m *VoteAggregationConsumer) OnVoteForInvalidBlockDetected(vote *model.Vote, invalidProposal *model.Proposal) { +func (_m *VoteAggregationConsumer) OnVoteForInvalidBlockDetected(vote *model.Vote, invalidProposal *model.SignedProposal) { _m.Called(vote, invalidProposal) } @@ -40,13 +40,12 @@ func (_m *VoteAggregationConsumer) OnVoteProcessed(vote *model.Vote) { _m.Called(vote) } -type mockConstructorTestingTNewVoteAggregationConsumer interface { +// NewVoteAggregationConsumer creates a new instance of VoteAggregationConsumer. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewVoteAggregationConsumer(t interface { mock.TestingT Cleanup(func()) -} - -// NewVoteAggregationConsumer creates a new instance of VoteAggregationConsumer. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewVoteAggregationConsumer(t mockConstructorTestingTNewVoteAggregationConsumer) *VoteAggregationConsumer { +}) *VoteAggregationConsumer { mock := &VoteAggregationConsumer{} mock.Mock.Test(t) diff --git a/consensus/hotstuff/mocks/vote_aggregation_violation_consumer.go b/consensus/hotstuff/mocks/vote_aggregation_violation_consumer.go index c27e40c1513..9fb0ec4b1f3 100644 --- a/consensus/hotstuff/mocks/vote_aggregation_violation_consumer.go +++ b/consensus/hotstuff/mocks/vote_aggregation_violation_consumer.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mocks @@ -23,17 +23,16 @@ func (_m *VoteAggregationViolationConsumer) OnInvalidVoteDetected(err model.Inva } // OnVoteForInvalidBlockDetected provides a mock function with given fields: vote, invalidProposal -func (_m *VoteAggregationViolationConsumer) OnVoteForInvalidBlockDetected(vote *model.Vote, invalidProposal *model.Proposal) { +func (_m *VoteAggregationViolationConsumer) OnVoteForInvalidBlockDetected(vote *model.Vote, invalidProposal *model.SignedProposal) { _m.Called(vote, invalidProposal) } -type mockConstructorTestingTNewVoteAggregationViolationConsumer interface { +// NewVoteAggregationViolationConsumer creates a new instance of VoteAggregationViolationConsumer. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewVoteAggregationViolationConsumer(t interface { mock.TestingT Cleanup(func()) -} - -// NewVoteAggregationViolationConsumer creates a new instance of VoteAggregationViolationConsumer. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewVoteAggregationViolationConsumer(t mockConstructorTestingTNewVoteAggregationViolationConsumer) *VoteAggregationViolationConsumer { +}) *VoteAggregationViolationConsumer { mock := &VoteAggregationViolationConsumer{} mock.Mock.Test(t) diff --git a/consensus/hotstuff/mocks/vote_aggregator.go b/consensus/hotstuff/mocks/vote_aggregator.go index 78e0faee344..ae9cf3a7000 100644 --- a/consensus/hotstuff/mocks/vote_aggregator.go +++ b/consensus/hotstuff/mocks/vote_aggregator.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mocks @@ -15,7 +15,7 @@ type VoteAggregator struct { } // AddBlock provides a mock function with given fields: block -func (_m *VoteAggregator) AddBlock(block *model.Proposal) { +func (_m *VoteAggregator) AddBlock(block *model.SignedProposal) { _m.Called(block) } @@ -24,10 +24,14 @@ func (_m *VoteAggregator) AddVote(vote *model.Vote) { _m.Called(vote) } -// Done provides a mock function with given fields: +// Done provides a mock function with no fields func (_m *VoteAggregator) Done() <-chan struct{} { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Done") + } + var r0 <-chan struct{} if rf, ok := ret.Get(0).(func() <-chan struct{}); ok { r0 = rf() @@ -41,11 +45,15 @@ func (_m *VoteAggregator) Done() <-chan struct{} { } // InvalidBlock provides a mock function with given fields: block -func (_m *VoteAggregator) InvalidBlock(block *model.Proposal) error { +func (_m *VoteAggregator) InvalidBlock(block *model.SignedProposal) error { ret := _m.Called(block) + if len(ret) == 0 { + panic("no return value specified for InvalidBlock") + } + var r0 error - if rf, ok := ret.Get(0).(func(*model.Proposal) error); ok { + if rf, ok := ret.Get(0).(func(*model.SignedProposal) error); ok { r0 = rf(block) } else { r0 = ret.Error(0) @@ -59,10 +67,14 @@ func (_m *VoteAggregator) PruneUpToView(view uint64) { _m.Called(view) } -// Ready provides a mock function with given fields: +// Ready provides a mock function with no fields func (_m *VoteAggregator) Ready() <-chan struct{} { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Ready") + } + var r0 <-chan struct{} if rf, ok := ret.Get(0).(func() <-chan struct{}); ok { r0 = rf() @@ -80,13 +92,12 @@ func (_m *VoteAggregator) Start(_a0 irrecoverable.SignalerContext) { _m.Called(_a0) } -type mockConstructorTestingTNewVoteAggregator interface { +// NewVoteAggregator creates a new instance of VoteAggregator. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewVoteAggregator(t interface { mock.TestingT Cleanup(func()) -} - -// NewVoteAggregator creates a new instance of VoteAggregator. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewVoteAggregator(t mockConstructorTestingTNewVoteAggregator) *VoteAggregator { +}) *VoteAggregator { mock := &VoteAggregator{} mock.Mock.Test(t) diff --git a/consensus/hotstuff/mocks/vote_collector.go b/consensus/hotstuff/mocks/vote_collector.go index 9126f896081..651dd1197ef 100644 --- a/consensus/hotstuff/mocks/vote_collector.go +++ b/consensus/hotstuff/mocks/vote_collector.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mocks @@ -18,6 +18,10 @@ type VoteCollector struct { func (_m *VoteCollector) AddVote(vote *model.Vote) error { ret := _m.Called(vote) + if len(ret) == 0 { + panic("no return value specified for AddVote") + } + var r0 error if rf, ok := ret.Get(0).(func(*model.Vote) error); ok { r0 = rf(vote) @@ -29,11 +33,15 @@ func (_m *VoteCollector) AddVote(vote *model.Vote) error { } // ProcessBlock provides a mock function with given fields: block -func (_m *VoteCollector) ProcessBlock(block *model.Proposal) error { +func (_m *VoteCollector) ProcessBlock(block *model.SignedProposal) error { ret := _m.Called(block) + if len(ret) == 0 { + panic("no return value specified for ProcessBlock") + } + var r0 error - if rf, ok := ret.Get(0).(func(*model.Proposal) error); ok { + if rf, ok := ret.Get(0).(func(*model.SignedProposal) error); ok { r0 = rf(block) } else { r0 = ret.Error(0) @@ -47,10 +55,14 @@ func (_m *VoteCollector) RegisterVoteConsumer(consumer hotstuff.VoteConsumer) { _m.Called(consumer) } -// Status provides a mock function with given fields: +// Status provides a mock function with no fields func (_m *VoteCollector) Status() hotstuff.VoteCollectorStatus { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Status") + } + var r0 hotstuff.VoteCollectorStatus if rf, ok := ret.Get(0).(func() hotstuff.VoteCollectorStatus); ok { r0 = rf() @@ -61,10 +73,14 @@ func (_m *VoteCollector) Status() hotstuff.VoteCollectorStatus { return r0 } -// View provides a mock function with given fields: +// View provides a mock function with no fields func (_m *VoteCollector) View() uint64 { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for View") + } + var r0 uint64 if rf, ok := ret.Get(0).(func() uint64); ok { r0 = rf() @@ -75,13 +91,12 @@ func (_m *VoteCollector) View() uint64 { return r0 } -type mockConstructorTestingTNewVoteCollector interface { +// NewVoteCollector creates a new instance of VoteCollector. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewVoteCollector(t interface { mock.TestingT Cleanup(func()) -} - -// NewVoteCollector creates a new instance of VoteCollector. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewVoteCollector(t mockConstructorTestingTNewVoteCollector) *VoteCollector { +}) *VoteCollector { mock := &VoteCollector{} mock.Mock.Test(t) diff --git a/consensus/hotstuff/mocks/vote_collector_consumer.go b/consensus/hotstuff/mocks/vote_collector_consumer.go index 5c5b064e975..25357617118 100644 --- a/consensus/hotstuff/mocks/vote_collector_consumer.go +++ b/consensus/hotstuff/mocks/vote_collector_consumer.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mocks @@ -25,13 +25,12 @@ func (_m *VoteCollectorConsumer) OnVoteProcessed(vote *model.Vote) { _m.Called(vote) } -type mockConstructorTestingTNewVoteCollectorConsumer interface { +// NewVoteCollectorConsumer creates a new instance of VoteCollectorConsumer. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewVoteCollectorConsumer(t interface { mock.TestingT Cleanup(func()) -} - -// NewVoteCollectorConsumer creates a new instance of VoteCollectorConsumer. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewVoteCollectorConsumer(t mockConstructorTestingTNewVoteCollectorConsumer) *VoteCollectorConsumer { +}) *VoteCollectorConsumer { mock := &VoteCollectorConsumer{} mock.Mock.Test(t) diff --git a/consensus/hotstuff/mocks/vote_collectors.go b/consensus/hotstuff/mocks/vote_collectors.go index 18ae2b9e18d..e7be4957fa0 100644 --- a/consensus/hotstuff/mocks/vote_collectors.go +++ b/consensus/hotstuff/mocks/vote_collectors.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mocks @@ -14,10 +14,14 @@ type VoteCollectors struct { mock.Mock } -// Done provides a mock function with given fields: +// Done provides a mock function with no fields func (_m *VoteCollectors) Done() <-chan struct{} { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Done") + } + var r0 <-chan struct{} if rf, ok := ret.Get(0).(func() <-chan struct{}); ok { r0 = rf() @@ -34,6 +38,10 @@ func (_m *VoteCollectors) Done() <-chan struct{} { func (_m *VoteCollectors) GetOrCreateCollector(view uint64) (hotstuff.VoteCollector, bool, error) { ret := _m.Called(view) + if len(ret) == 0 { + panic("no return value specified for GetOrCreateCollector") + } + var r0 hotstuff.VoteCollector var r1 bool var r2 error @@ -68,10 +76,14 @@ func (_m *VoteCollectors) PruneUpToView(lowestRetainedView uint64) { _m.Called(lowestRetainedView) } -// Ready provides a mock function with given fields: +// Ready provides a mock function with no fields func (_m *VoteCollectors) Ready() <-chan struct{} { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Ready") + } + var r0 <-chan struct{} if rf, ok := ret.Get(0).(func() <-chan struct{}); ok { r0 = rf() @@ -89,13 +101,12 @@ func (_m *VoteCollectors) Start(_a0 irrecoverable.SignalerContext) { _m.Called(_a0) } -type mockConstructorTestingTNewVoteCollectors interface { +// NewVoteCollectors creates a new instance of VoteCollectors. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewVoteCollectors(t interface { mock.TestingT Cleanup(func()) -} - -// NewVoteCollectors creates a new instance of VoteCollectors. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewVoteCollectors(t mockConstructorTestingTNewVoteCollectors) *VoteCollectors { +}) *VoteCollectors { mock := &VoteCollectors{} mock.Mock.Test(t) diff --git a/consensus/hotstuff/mocks/vote_consumer.go b/consensus/hotstuff/mocks/vote_consumer.go deleted file mode 100644 index c4065533800..00000000000 --- a/consensus/hotstuff/mocks/vote_consumer.go +++ /dev/null @@ -1,33 +0,0 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. - -package mocks - -import ( - model "github.com/onflow/flow-go/consensus/hotstuff/model" - mock "github.com/stretchr/testify/mock" -) - -// VoteConsumer is an autogenerated mock type for the VoteConsumer type -type VoteConsumer struct { - mock.Mock -} - -// Execute provides a mock function with given fields: vote -func (_m *VoteConsumer) Execute(vote *model.Vote) { - _m.Called(vote) -} - -type mockConstructorTestingTNewVoteConsumer interface { - mock.TestingT - Cleanup(func()) -} - -// NewVoteConsumer creates a new instance of VoteConsumer. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewVoteConsumer(t mockConstructorTestingTNewVoteConsumer) *VoteConsumer { - mock := &VoteConsumer{} - mock.Mock.Test(t) - - t.Cleanup(func() { mock.AssertExpectations(t) }) - - return mock -} diff --git a/consensus/hotstuff/mocks/vote_processor.go b/consensus/hotstuff/mocks/vote_processor.go index f69c48bd7be..bcd9cc2377e 100644 --- a/consensus/hotstuff/mocks/vote_processor.go +++ b/consensus/hotstuff/mocks/vote_processor.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mocks @@ -18,6 +18,10 @@ type VoteProcessor struct { func (_m *VoteProcessor) Process(vote *model.Vote) error { ret := _m.Called(vote) + if len(ret) == 0 { + panic("no return value specified for Process") + } + var r0 error if rf, ok := ret.Get(0).(func(*model.Vote) error); ok { r0 = rf(vote) @@ -28,10 +32,14 @@ func (_m *VoteProcessor) Process(vote *model.Vote) error { return r0 } -// Status provides a mock function with given fields: +// Status provides a mock function with no fields func (_m *VoteProcessor) Status() hotstuff.VoteCollectorStatus { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Status") + } + var r0 hotstuff.VoteCollectorStatus if rf, ok := ret.Get(0).(func() hotstuff.VoteCollectorStatus); ok { r0 = rf() @@ -42,13 +50,12 @@ func (_m *VoteProcessor) Status() hotstuff.VoteCollectorStatus { return r0 } -type mockConstructorTestingTNewVoteProcessor interface { +// NewVoteProcessor creates a new instance of VoteProcessor. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewVoteProcessor(t interface { mock.TestingT Cleanup(func()) -} - -// NewVoteProcessor creates a new instance of VoteProcessor. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewVoteProcessor(t mockConstructorTestingTNewVoteProcessor) *VoteProcessor { +}) *VoteProcessor { mock := &VoteProcessor{} mock.Mock.Test(t) diff --git a/consensus/hotstuff/mocks/vote_processor_factory.go b/consensus/hotstuff/mocks/vote_processor_factory.go index 5b45997dbf5..7f7807272ec 100644 --- a/consensus/hotstuff/mocks/vote_processor_factory.go +++ b/consensus/hotstuff/mocks/vote_processor_factory.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mocks @@ -17,15 +17,19 @@ type VoteProcessorFactory struct { } // Create provides a mock function with given fields: log, proposal -func (_m *VoteProcessorFactory) Create(log zerolog.Logger, proposal *model.Proposal) (hotstuff.VerifyingVoteProcessor, error) { +func (_m *VoteProcessorFactory) Create(log zerolog.Logger, proposal *model.SignedProposal) (hotstuff.VerifyingVoteProcessor, error) { ret := _m.Called(log, proposal) + if len(ret) == 0 { + panic("no return value specified for Create") + } + var r0 hotstuff.VerifyingVoteProcessor var r1 error - if rf, ok := ret.Get(0).(func(zerolog.Logger, *model.Proposal) (hotstuff.VerifyingVoteProcessor, error)); ok { + if rf, ok := ret.Get(0).(func(zerolog.Logger, *model.SignedProposal) (hotstuff.VerifyingVoteProcessor, error)); ok { return rf(log, proposal) } - if rf, ok := ret.Get(0).(func(zerolog.Logger, *model.Proposal) hotstuff.VerifyingVoteProcessor); ok { + if rf, ok := ret.Get(0).(func(zerolog.Logger, *model.SignedProposal) hotstuff.VerifyingVoteProcessor); ok { r0 = rf(log, proposal) } else { if ret.Get(0) != nil { @@ -33,7 +37,7 @@ func (_m *VoteProcessorFactory) Create(log zerolog.Logger, proposal *model.Propo } } - if rf, ok := ret.Get(1).(func(zerolog.Logger, *model.Proposal) error); ok { + if rf, ok := ret.Get(1).(func(zerolog.Logger, *model.SignedProposal) error); ok { r1 = rf(log, proposal) } else { r1 = ret.Error(1) @@ -42,13 +46,12 @@ func (_m *VoteProcessorFactory) Create(log zerolog.Logger, proposal *model.Propo return r0, r1 } -type mockConstructorTestingTNewVoteProcessorFactory interface { +// NewVoteProcessorFactory creates a new instance of VoteProcessorFactory. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewVoteProcessorFactory(t interface { mock.TestingT Cleanup(func()) -} - -// NewVoteProcessorFactory creates a new instance of VoteProcessorFactory. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewVoteProcessorFactory(t mockConstructorTestingTNewVoteProcessorFactory) *VoteProcessorFactory { +}) *VoteProcessorFactory { mock := &VoteProcessorFactory{} mock.Mock.Test(t) diff --git a/consensus/hotstuff/mocks/weighted_signature_aggregator.go b/consensus/hotstuff/mocks/weighted_signature_aggregator.go index 185d680e244..106914d8ad5 100644 --- a/consensus/hotstuff/mocks/weighted_signature_aggregator.go +++ b/consensus/hotstuff/mocks/weighted_signature_aggregator.go @@ -1,9 +1,9 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mocks import ( - crypto "github.com/onflow/flow-go/crypto" + crypto "github.com/onflow/crypto" flow "github.com/onflow/flow-go/model/flow" mock "github.com/stretchr/testify/mock" @@ -14,10 +14,14 @@ type WeightedSignatureAggregator struct { mock.Mock } -// Aggregate provides a mock function with given fields: +// Aggregate provides a mock function with no fields func (_m *WeightedSignatureAggregator) Aggregate() (flow.IdentifierList, []byte, error) { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Aggregate") + } + var r0 flow.IdentifierList var r1 []byte var r2 error @@ -49,10 +53,14 @@ func (_m *WeightedSignatureAggregator) Aggregate() (flow.IdentifierList, []byte, return r0, r1, r2 } -// TotalWeight provides a mock function with given fields: +// TotalWeight provides a mock function with no fields func (_m *WeightedSignatureAggregator) TotalWeight() uint64 { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for TotalWeight") + } + var r0 uint64 if rf, ok := ret.Get(0).(func() uint64); ok { r0 = rf() @@ -67,6 +75,10 @@ func (_m *WeightedSignatureAggregator) TotalWeight() uint64 { func (_m *WeightedSignatureAggregator) TrustedAdd(signerID flow.Identifier, sig crypto.Signature) (uint64, error) { ret := _m.Called(signerID, sig) + if len(ret) == 0 { + panic("no return value specified for TrustedAdd") + } + var r0 uint64 var r1 error if rf, ok := ret.Get(0).(func(flow.Identifier, crypto.Signature) (uint64, error)); ok { @@ -91,6 +103,10 @@ func (_m *WeightedSignatureAggregator) TrustedAdd(signerID flow.Identifier, sig func (_m *WeightedSignatureAggregator) Verify(signerID flow.Identifier, sig crypto.Signature) error { ret := _m.Called(signerID, sig) + if len(ret) == 0 { + panic("no return value specified for Verify") + } + var r0 error if rf, ok := ret.Get(0).(func(flow.Identifier, crypto.Signature) error); ok { r0 = rf(signerID, sig) @@ -101,13 +117,12 @@ func (_m *WeightedSignatureAggregator) Verify(signerID flow.Identifier, sig cryp return r0 } -type mockConstructorTestingTNewWeightedSignatureAggregator interface { +// NewWeightedSignatureAggregator creates a new instance of WeightedSignatureAggregator. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewWeightedSignatureAggregator(t interface { mock.TestingT Cleanup(func()) -} - -// NewWeightedSignatureAggregator creates a new instance of WeightedSignatureAggregator. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewWeightedSignatureAggregator(t mockConstructorTestingTNewWeightedSignatureAggregator) *WeightedSignatureAggregator { +}) *WeightedSignatureAggregator { mock := &WeightedSignatureAggregator{} mock.Mock.Test(t) diff --git a/consensus/hotstuff/mocks/workerpool.go b/consensus/hotstuff/mocks/workerpool.go index faeeb74d433..447fc39bd43 100644 --- a/consensus/hotstuff/mocks/workerpool.go +++ b/consensus/hotstuff/mocks/workerpool.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mocks @@ -9,7 +9,7 @@ type Workerpool struct { mock.Mock } -// StopWait provides a mock function with given fields: +// StopWait provides a mock function with no fields func (_m *Workerpool) StopWait() { _m.Called() } @@ -19,13 +19,12 @@ func (_m *Workerpool) Submit(task func()) { _m.Called(task) } -type mockConstructorTestingTNewWorkerpool interface { +// NewWorkerpool creates a new instance of Workerpool. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewWorkerpool(t interface { mock.TestingT Cleanup(func()) -} - -// NewWorkerpool creates a new instance of Workerpool. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewWorkerpool(t mockConstructorTestingTNewWorkerpool) *Workerpool { +}) *Workerpool { mock := &Workerpool{} mock.Mock.Test(t) diff --git a/consensus/hotstuff/mocks/workers.go b/consensus/hotstuff/mocks/workers.go index ef6e359df4c..3d0a4b91696 100644 --- a/consensus/hotstuff/mocks/workers.go +++ b/consensus/hotstuff/mocks/workers.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mocks @@ -14,13 +14,12 @@ func (_m *Workers) Submit(task func()) { _m.Called(task) } -type mockConstructorTestingTNewWorkers interface { +// NewWorkers creates a new instance of Workers. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewWorkers(t interface { mock.TestingT Cleanup(func()) -} - -// NewWorkers creates a new instance of Workers. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewWorkers(t mockConstructorTestingTNewWorkers) *Workers { +}) *Workers { mock := &Workers{} mock.Mock.Test(t) diff --git a/consensus/hotstuff/model/block.go b/consensus/hotstuff/model/block.go index 6c682514dfc..ba7fb806fc8 100644 --- a/consensus/hotstuff/model/block.go +++ b/consensus/hotstuff/model/block.go @@ -2,7 +2,6 @@ package model import ( "fmt" - "time" "github.com/onflow/flow-go/model/flow" ) @@ -10,23 +9,21 @@ import ( // Block is the HotStuff algorithm's concept of a block, which - in the bigger picture - corresponds // to the block header. type Block struct { - View uint64 - BlockID flow.Identifier - ProposerID flow.Identifier - QC *flow.QuorumCertificate - PayloadHash flow.Identifier - Timestamp time.Time + View uint64 + BlockID flow.Identifier + ProposerID flow.Identifier + QC *flow.QuorumCertificate + Timestamp uint64 // Unix milliseconds } // BlockFromFlow converts a flow header to a hotstuff block. func BlockFromFlow(header *flow.Header) *Block { block := Block{ - BlockID: header.ID(), - View: header.View, - QC: header.QuorumCertificate(), - ProposerID: header.ProposerID, - PayloadHash: header.PayloadHash, - Timestamp: header.Timestamp, + BlockID: header.ID(), + View: header.View, + QC: header.ParentQC(), + ProposerID: header.ProposerID, + Timestamp: header.Timestamp, } return &block @@ -36,12 +33,11 @@ func BlockFromFlow(header *flow.Header) *Block { // block based on the given header. func GenesisBlockFromFlow(header *flow.Header) *Block { genesis := &Block{ - BlockID: header.ID(), - View: header.View, - ProposerID: header.ProposerID, - QC: nil, - PayloadHash: header.PayloadHash, - Timestamp: header.Timestamp, + BlockID: header.ID(), + View: header.View, + ProposerID: header.ProposerID, + QC: nil, + Timestamp: header.Timestamp, } return genesis } @@ -69,10 +65,11 @@ func NewCertifiedBlock(block *Block, qc *flow.QuorumCertificate) (CertifiedBlock return CertifiedBlock{Block: block, CertifyingQC: qc}, nil } -// ID returns unique identifier for the block. +// BlockID returns a unique identifier for the block (the ID signed to produce a block vote). // To avoid repeated computation, we use value from the QC. -func (b *CertifiedBlock) ID() flow.Identifier { - return b.Block.BlockID +// CAUTION: This is not a cryptographic commitment for the CertifiedBlock model. +func (b *CertifiedBlock) BlockID() flow.Identifier { + return b.CertifyingQC.BlockID } // View returns view where the block was proposed. diff --git a/consensus/hotstuff/model/errors.go b/consensus/hotstuff/model/errors.go index 4244d0ac531..047e827090f 100644 --- a/consensus/hotstuff/model/errors.go +++ b/consensus/hotstuff/model/errors.go @@ -113,7 +113,7 @@ type MissingBlockError struct { } func (e MissingBlockError) Error() string { - return fmt.Sprintf("missing Proposal at view %d with ID %v", e.View, e.BlockID) + return fmt.Sprintf("missing block at view %d with ID %v", e.View, e.BlockID) } // IsMissingBlockError returns whether an error is MissingBlockError @@ -165,11 +165,11 @@ func (e InvalidTCError) Unwrap() error { // InvalidProposalError indicates that the proposal is invalid type InvalidProposalError struct { - InvalidProposal *Proposal + InvalidProposal *SignedProposal Err error } -func NewInvalidProposalErrorf(proposal *Proposal, msg string, args ...interface{}) error { +func NewInvalidProposalErrorf(proposal *SignedProposal, msg string, args ...interface{}) error { return InvalidProposalError{ InvalidProposal: proposal, Err: fmt.Errorf(msg, args...), @@ -513,7 +513,10 @@ func NewInvalidTimeoutErrorf(timeout *TimeoutObject, msg string, args ...interfa } func (e InvalidTimeoutError) Error() string { - return fmt.Sprintf("invalid timeout %x for view %d: %s", e.Timeout.ID(), e.Timeout.View, e.Err.Error()) + return fmt.Sprintf("invalid timeout: %s: %s", + e.Timeout.String(), + e.Err.Error(), + ) } // IsInvalidTimeoutError returns whether an error is InvalidTimeoutError diff --git a/consensus/hotstuff/model/proposal.go b/consensus/hotstuff/model/proposal.go index 6566de09a97..5decc58c015 100644 --- a/consensus/hotstuff/model/proposal.go +++ b/consensus/hotstuff/model/proposal.go @@ -1,54 +1,102 @@ package model import ( + "github.com/onflow/flow-go/model/cluster" "github.com/onflow/flow-go/model/flow" ) -// Proposal represent a new proposed block within HotStuff (and thus a -// a header in the bigger picture), signed by the proposer. +// Proposal represents a block proposal under construction. +// In order to decide whether a proposal is safe to sign, HotStuff's Safety Rules require +// proof that the leader entered the respective view in a protocol-compliant manner. Specifically, +// we require a TimeoutCertificate [TC] if and only if the QC in the block is _not_ for the +// immediately preceding view. Thereby we protect the consensus process from malicious leaders +// attempting to skip views that haven't concluded yet (a form of front-running attack). +// However, LastViewTC is only relevant until a QC is known that certifies the correctness of +// the block. Thereafter, the QC attests that honest consensus participants have confirmed the +// validity of the fork up to the latest certified block (including protocol-compliant view transitions). +// +// By explicitly differentiating the Proposal from the SignedProposal (extending Proposal by +// adding the proposer's signature), we can unify the algorithmic path of signing block proposals. +// This codifies the important aspect that a proposer's signature for their own block +// is conceptually also just a vote (we explicitly use that for aggregating votes, including the +// proposer's own vote to a QC). In order to express this conceptual equivalence in code, the +// voting logic in Safety Rules must also operate on an unsigned Proposal. +// +// TODO: atm, the flow.Header embeds the LastViewTC. However, for HotStuff we have `model.Block` +// and `model.Proposal`, where the latter was introduced when we added the PaceMaker to +// vanilla HotStuff. It would be more consistent, if we added `LastViewTC` to `model.Block`, +// or even better, introduce an interface for HotStuff's notion of a block (exposing +// the fields in `model.Block` plus LastViewTC) type Proposal struct { Block *Block - SigData []byte LastViewTC *flow.TimeoutCertificate } +// SignedProposal represent a new proposed block within HotStuff (and thus +// a header in the bigger picture), signed by the proposer. +// +// CAUTION: the signature only covers the pair (Block.View, Block.BlockID). Therefore, only +// the data that is hashed into the BlockID is cryptographically secured by the proposer's +// signature. +// Specifically, the proposer's signature cannot be covered by the Block.BlockID, as the +// proposer _signs_ the Block.BlockID (otherwise we have a cyclic dependency). +type SignedProposal struct { + Proposal + SigData []byte +} + // ProposerVote extracts the proposer vote from the proposal -func (p *Proposal) ProposerVote() *Vote { - vote := Vote{ +// All errors indicate a valid Vote cannot be constructed from the receiver SignedProposal. +func (p *SignedProposal) ProposerVote() (*Vote, error) { + return NewVote(UntrustedVote{ View: p.Block.View, BlockID: p.Block.BlockID, SignerID: p.Block.ProposerID, SigData: p.SigData, + }) +} + +// SignedProposalFromFlow turns a flow header proposal into a hotstuff block type. +// Since not all header fields are exposed to HotStuff, this conversion is not reversible. +func SignedProposalFromFlow(p *flow.ProposalHeader) *SignedProposal { + proposal := SignedProposal{ + Proposal: Proposal{ + Block: BlockFromFlow(p.Header), + LastViewTC: p.Header.LastViewTC, + }, + SigData: p.ProposerSigData, } - return &vote + return &proposal } -// ProposalFromFlow turns a flow header into a hotstuff block type. -func ProposalFromFlow(header *flow.Header) *Proposal { - proposal := Proposal{ - Block: BlockFromFlow(header), - SigData: header.ProposerSigData, - LastViewTC: header.LastViewTC, +// TODO(malleability, #7311) clean up conversion functions and/or proposal types here +func SignedProposalFromBlock(p *flow.Proposal) *SignedProposal { + proposal := SignedProposal{ + Proposal: Proposal{ + Block: BlockFromFlow(p.Block.ToHeader()), + LastViewTC: p.Block.LastViewTC, + }, + SigData: p.ProposerSigData, } return &proposal } -// ProposalToFlow turns a block proposal into a flow header. -func ProposalToFlow(proposal *Proposal) *flow.Header { - - block := proposal.Block - header := &flow.Header{ - ParentID: block.QC.BlockID, - PayloadHash: block.PayloadHash, - Timestamp: block.Timestamp, - View: block.View, - ParentView: block.QC.View, - ParentVoterIndices: block.QC.SignerIndices, - ParentVoterSigData: block.QC.SigData, - ProposerID: block.ProposerID, - ProposerSigData: proposal.SigData, - LastViewTC: proposal.LastViewTC, +func SignedProposalFromClusterBlock(p *cluster.Proposal) *SignedProposal { + proposal := SignedProposal{ + Proposal: Proposal{ + Block: BlockFromFlow(p.Block.ToHeader()), + LastViewTC: p.Block.LastViewTC, + }, + SigData: p.ProposerSigData, } + return &proposal +} - return header +// ProposalFromFlow turns an unsigned flow header into a unsigned hotstuff block type. +func ProposalFromFlow(header *flow.Header) *Proposal { + proposal := Proposal{ + Block: BlockFromFlow(header), + LastViewTC: header.LastViewTC, + } + return &proposal } diff --git a/consensus/hotstuff/model/signature_data.go b/consensus/hotstuff/model/signature_data.go index 0eb6c0741ff..63f8003f6ad 100644 --- a/consensus/hotstuff/model/signature_data.go +++ b/consensus/hotstuff/model/signature_data.go @@ -2,9 +2,12 @@ package model import ( "bytes" + "fmt" + + "github.com/onflow/crypto" - "github.com/onflow/flow-go/crypto" "github.com/onflow/flow-go/model/encoding/rlp" + "github.com/onflow/flow-go/model/flow" ) // SigDataPacker implements logic for encoding/decoding SignatureData using RLP encoding. @@ -45,13 +48,17 @@ func (p *SigDataPacker) Decode(data []byte) (*SignatureData, error) { return &sigData, nil } -// UnpackRandomBeaconSig takes sigData previously packed by packer, -// decodes it and extracts random beacon signature. -// This function is side-effect free. It only ever returns a -// model.InvalidFormatError, which indicates an invalid encoding. -func UnpackRandomBeaconSig(sigData []byte) (crypto.Signature, error) { - // decode into typed data +// BeaconSignature extracts the source of randomness from the QC sigData. +// +// The sigData is an RLP encoded structure that is part of QuorumCertificate. +// The function only ever returns a model.InvalidFormatError, which indicates an +// invalid encoding. +func BeaconSignature(qc *flow.QuorumCertificate) ([]byte, error) { + // unpack sig data to extract random beacon signature packer := SigDataPacker{} - sig, err := packer.Decode(sigData) - return sig.ReconstructedRandomBeaconSig, err + sigData, err := packer.Decode(qc.SigData) + if err != nil { + return nil, fmt.Errorf("could not unpack block signature: %w", err) + } + return sigData.ReconstructedRandomBeaconSig, nil } diff --git a/consensus/hotstuff/model/timeout.go b/consensus/hotstuff/model/timeout.go index 51347e4b41f..5482069c7f9 100644 --- a/consensus/hotstuff/model/timeout.go +++ b/consensus/hotstuff/model/timeout.go @@ -1,12 +1,13 @@ package model import ( + "bytes" "fmt" "time" + "github.com/onflow/crypto" "github.com/rs/zerolog" - "github.com/onflow/flow-go/crypto" "github.com/onflow/flow-go/model/flow" ) @@ -36,6 +37,8 @@ type NewViewEvent TimerInfo // TimeoutObject represents intent of replica to leave its current view with a timeout. This concept is very similar to // HotStuff vote. Valid TimeoutObject is signed by staking key. +// +//structwrite:immutable - mutations allowed only within the constructor type TimeoutObject struct { // View is the view number which is replica is timing out View uint64 @@ -56,11 +59,72 @@ type TimeoutObject struct { // `TimeoutObject` periodically based on some internal heuristic. Each time we attempt a re-broadcast, // the `TimeoutTick` is incremented. Incrementing the field prevents de-duplicated within the network layer, // which in turn guarantees quick delivery of the `TimeoutObject` after GST and facilitates recovery. - // This field is not part of timeout object ID. Thereby, two timeouts are identical if only they differ - // by their TimeoutTick value. TimeoutTick uint64 } +// UntrustedTimeoutObject is an untrusted input-only representation of a TimeoutObject, +// used for construction. +// +// This type exists to ensure that constructor functions are invoked explicitly +// with named fields, which improves clarity and reduces the risk of incorrect field +// ordering during construction. +// +// An instance of UntrustedTimeoutObject should be validated and converted into +// a trusted TimeoutObject using NewTimeoutObject constructor. +type UntrustedTimeoutObject TimeoutObject + +// NewTimeoutObject creates a new instance of TimeoutObject. +// Construction TimeoutObject allowed only within the constructor. +// +// All errors indicate a valid TimeoutObject cannot be constructed from the input. +func NewTimeoutObject(untrusted UntrustedTimeoutObject) (*TimeoutObject, error) { + if untrusted.NewestQC == nil { + return nil, fmt.Errorf("newest QC must not be nil") + } + if untrusted.SignerID == flow.ZeroID { + return nil, fmt.Errorf("signer ID must not be zero") + } + if len(untrusted.SigData) == 0 { + return nil, fmt.Errorf("signature must not be empty") + } + if untrusted.View <= untrusted.NewestQC.View { + return nil, fmt.Errorf("TO's QC %d cannot be newer than the TO's view %d", untrusted.NewestQC.View, untrusted.View) + } + + // If a TC is included, the TC must be for the past round, no matter whether a QC + // for the last round is also included. In some edge cases, a node might observe + // _both_ QC and TC for the previous round, in which case it can include both. + if untrusted.LastViewTC != nil { + if untrusted.View != untrusted.LastViewTC.View+1 { + return nil, fmt.Errorf("invalid TC for non-previous view, expected view %d, got view %d", untrusted.View-1, untrusted.LastViewTC.View) + } + if untrusted.NewestQC.View < untrusted.LastViewTC.NewestQC.View { + return nil, fmt.Errorf("timeout.NewestQC is older (view=%d) than the QC in timeout.LastViewTC (view=%d)", untrusted.NewestQC.View, untrusted.LastViewTC.NewestQC.View) + } + } + // The TO must contain a proof that sender legitimately entered View. Transitioning + // to round timeout.View is possible either by observing a QC or a TC for the previous round. + // If no QC is included, we require a TC to be present, which by check must be for + // the previous round. + lastViewSuccessful := untrusted.View == untrusted.NewestQC.View+1 + if !lastViewSuccessful { + // The TO's sender did _not_ observe a QC for round timeout.View-1. Hence, it should + // include a TC for the previous round. Otherwise, the TO is invalid. + if untrusted.LastViewTC == nil { + return nil, fmt.Errorf("must include TC") + } + } + + return &TimeoutObject{ + View: untrusted.View, + NewestQC: untrusted.NewestQC, + LastViewTC: untrusted.LastViewTC, + SignerID: untrusted.SignerID, + SigData: untrusted.SigData, + TimeoutTick: untrusted.TimeoutTick, + }, nil +} + // ID returns the TimeoutObject's identifier func (t *TimeoutObject) ID() flow.Identifier { body := struct { @@ -79,20 +143,40 @@ func (t *TimeoutObject) ID() flow.Identifier { return flow.MakeID(body) } +// Equals returns true if and only if the receiver TimeoutObject is equal to the `other`. Nil values are supported. +// It compares View, NewestQC, LastViewTC, SignerID and SigData and is used for de-duplicate TimeoutObjects in the cache. +// It excludes TimeoutTick: two TimeoutObjects with different TimeoutTick values are considered equivalent. +func (t *TimeoutObject) Equals(other *TimeoutObject) bool { + // Shortcut if `t` and `other` point to the same object; covers case where both are nil. + if t == other { + return true + } + if t == nil || other == nil { // only one is nil, the other not (otherwise we would have returned above) + return false + } + // both are not nil, so we can compare the fields + return t.View == other.View && + t.NewestQC.Equals(other.NewestQC) && + t.LastViewTC.Equals(other.LastViewTC) && + t.SignerID == other.SignerID && + bytes.Equal(t.SigData, other.SigData) +} + +// String returns a partial string representation of the TimeoutObject, +// including the signer ID, view, and the newest QC view. func (t *TimeoutObject) String() string { return fmt.Sprintf( - "View: %d, HighestQC.View: %d, LastViewTC: %v, TimeoutTick: %d", + "Signer ID: %s, View: %d, NewestQC.View: %d", + t.SignerID.String(), t.View, t.NewestQC.View, - t.LastViewTC, - t.TimeoutTick, ) } -// LogContext returns a `zerolog.Contex` including the most important properties of the TC: -// - view number that this TC is for +// LogContext returns a `zerolog.Context` including the most important properties of the TO: // - view and ID of the block that the included QC points to // - number of times a re-broadcast of this timeout was attempted +// - view number that this TO is for // - [optional] if the TC also includes a TC for the prior view, i.e. `LastViewTC` ≠ nil: // the new of `LastViewTC` and the view that `LastViewTC.NewestQC` is for func (t *TimeoutObject) LogContext(logger zerolog.Logger) zerolog.Context { diff --git a/consensus/hotstuff/model/timeout_bench_test.go b/consensus/hotstuff/model/timeout_bench_test.go new file mode 100644 index 00000000000..a3d73a48652 --- /dev/null +++ b/consensus/hotstuff/model/timeout_bench_test.go @@ -0,0 +1,82 @@ +package model_test + +import ( + "bytes" + "testing" + + clone "github.com/huandu/go-clone/generic" + + "github.com/onflow/flow-go/consensus/hotstuff/helper" + "github.com/onflow/flow-go/consensus/hotstuff/model" +) + +// createDummyTimeoutObjects construct two identical TimeoutObject with entirely disjoint memory representation +func createDummyTimeoutObjects() (*model.TimeoutObject, *model.TimeoutObject) { + to := helper.TimeoutObjectFixture() + return to, clone.Clone(to) +} + +// comparingUsingFieldsAndHashes evaluates equality of two TimeoutObjects using +// - field comparisons for primitive types +// - and comparison of hashes for the embedded `NewestQC` and `LastViewTC` objects. +func comparingUsingFieldsAndHashes(a, b *model.TimeoutObject) bool { + if a == b { // shortcut the same pointer is provided + return true + } + if a == nil || b == nil { + return false + } + // both are not nil, so we can compare the fields + return a.View == b.View && + a.NewestQC.ID() == b.NewestQC.ID() && + a.LastViewTC.ID() == b.LastViewTC.ID() && + a.SignerID == b.SignerID && + bytes.Equal(a.SigData, b.SigData) +} + +// BenchmarkTimeoutObjectEquals_CompareFieldsAndHashes benchmarks `Equals` implementation that is based on +// comparison of fields (for primitive types) and comparison of hashes (for `NewestQC` and `LastViewTC`). +func BenchmarkTimeoutObjectEquals_CompareFieldsAndHashes(t *testing.B) { + a, b := createDummyTimeoutObjects() + + t.ResetTimer() + for i := 0; i < t.N; i++ { + _ = comparingUsingFieldsAndHashes(a, b) + } +} + +// comparingOnlyUsingFields evaluates equality of two TimeoutObjects using only field comparisons. +func comparingOnlyUsingFields(a, b *model.TimeoutObject) bool { + if a == b { // shortcut the same pointer is provided + return true + } + if a == nil || b == nil { + return false + } + return (a.View == b.View) && + (a.SignerID == b.SignerID) && + bytes.Equal(a.SigData, b.SigData) && + a.NewestQC.Equals(b.NewestQC) && + a.LastViewTC.Equals(b.LastViewTC) +} + +// BenchmarkTimeoutObjectEquals_CompareFieldsOnly benchmarks `Equals` implementation that is based +// solely on comparison of fields (no serialization and hash computations) +func BenchmarkTimeoutObjectEquals_CompareFieldsOnly(t *testing.B) { + a, b := createDummyTimeoutObjects() + + t.ResetTimer() + for i := 0; i < t.N; i++ { + _ = comparingOnlyUsingFields(a, b) + } +} + +// BenchmarkTimeoutObjectEquals benchmarks the `Equals` implementation provided by `TimeoutObject` +func BenchmarkTimeoutObjectEquals(t *testing.B) { + a, b := createDummyTimeoutObjects() + + t.ResetTimer() + for i := 0; i < t.N; i++ { + _ = a.Equals(b) + } +} diff --git a/consensus/hotstuff/model/timeout_test.go b/consensus/hotstuff/model/timeout_test.go new file mode 100644 index 00000000000..157761ab4fb --- /dev/null +++ b/consensus/hotstuff/model/timeout_test.go @@ -0,0 +1,231 @@ +package model_test + +import ( + "math/rand" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/consensus/hotstuff/helper" + "github.com/onflow/flow-go/consensus/hotstuff/model" + "github.com/onflow/flow-go/model/flow" +) + +// TestTimeoutObject_Equals verifies the correctness of the Equals method on TimeoutObject. +// It checks that TimeoutObjects are considered equal if and only if all fields except +// TimeoutTick match. This test is crucial for ensuring deduplication logic works correctly +// in the cache. +// +// Fields compared for equality: +// - View +// - NewestQC +// - LastViewTC +// - SignerID +// - SigData +// +// TimeoutTick is explicitly excluded from equality checks. +func TestTimeoutObject_Equals(t *testing.T) { + // Create two TimeoutObjects with random but different values. + timeout1 := helper.TimeoutObjectFixture() + timeout2 := helper.TimeoutObjectFixture() + require.False(t, timeout1.Equals(timeout2), "Initially, all fields are different, so the objects should not be equal.") + + // List of mutations to apply on timeout1 to gradually make it equal to timeout2 + // (excluding TimeoutTick). + mutations := []func(){ + func() { + timeout1.View = timeout2.View + }, func() { + timeout1.NewestQC = timeout2.NewestQC + }, func() { + timeout1.LastViewTC = timeout2.LastViewTC + }, func() { + timeout1.SignerID = timeout2.SignerID + }, func() { + timeout1.SigData = timeout2.SigData + }, + } + + // Shuffle the order of mutations + rand.Shuffle(len(mutations), func(i, j int) { + mutations[i], mutations[j] = mutations[j], mutations[i] + }) + + // Apply each mutation one at a time, except the last. + // After each step, the objects should still not be equal. + for _, mutation := range mutations[:len(mutations)-1] { + mutation() + require.False(t, timeout1.Equals(timeout2)) + } + + // Apply the final mutation; now all relevant fields should match, so the objects must be equal. + mutations[len(mutations)-1]() + require.True(t, timeout1.Equals(timeout2)) + + // Even if TimeoutTick differs, equality should still hold since TimeoutTick is not important for equality. + timeout1.TimeoutTick = timeout2.TimeoutTick + 1 + require.True(t, timeout1.Equals(timeout2)) +} + +// TestTimeoutObject_Equals_Nil verifies the behavior of the Equals method when either +// or both the receiver and the function input are nil +func TestTimeoutObject_Equals_Nil(t *testing.T) { + var nilTO *model.TimeoutObject + to := helper.TimeoutObjectFixture() + t.Run("nil receiver", func(t *testing.T) { + require.False(t, nilTO.Equals(to)) + }) + t.Run("nil input", func(t *testing.T) { + require.False(t, to.Equals(nilTO)) + }) + t.Run("both nil", func(t *testing.T) { + require.True(t, nilTO.Equals(nil)) + }) +} + +// TestNewTimeoutObject verifies the behavior of the NewTimeoutObject constructor. +// It ensures proper handling of both valid and invalid untrusted input fields. +// +// Test Cases: +// +// 1. Valid input: +// - Verifies that a properly populated UntrustedTimeoutObject results in a valid TimeoutObject. +// +// 2. Invalid input with nil NewestQC: +// - Ensures an error is returned when the NewestQC field is nil. +// +// 3. Invalid input with zero SignerID: +// - Ensures an error is returned when the SignerID is flow.ZeroID. +// +// 4. Invalid input with nil SigData: +// - Ensures an error is returned when the SigData field is nil. +// +// 5. Invalid input with empty SigData: +// - Ensures an error is returned when the SigData field is an empty byte slice. +// +// 6. Invalid input when View is lower than or equal to NewestQC.View: +// - Ensures an error is returned when the TimeoutObject's View is less than or equal to the included QC's View. +// +// 7. Invalid input when TC present but for wrong view: +// - Ensures an error is returned when LastViewTC.View is not one less than the TimeoutObject's View. +// +// 8. Invalid input when TC's QC newer than TimeoutObject's QC: +// - Ensures an error is returned when TimeoutObject's NewestQC.View is older than LastViewTC.NewestQC.View. +// +// 9. Invalid input when LastViewTC missing when QC does not prove previous round: +// - Ensures an error is returned when TimeoutObject lacks both a QC for previous round and a LastViewTC. +func TestNewTimeoutObject(t *testing.T) { + t.Run("valid input", func(t *testing.T) { + res, err := model.NewTimeoutObject(model.UntrustedTimeoutObject(*helper.TimeoutObjectFixture())) + require.NoError(t, err) + require.NotNil(t, res) + }) + + t.Run("invalid input with nil NewestQC", func(t *testing.T) { + to := helper.TimeoutObjectFixture() + to.NewestQC = nil + + res, err := model.NewTimeoutObject(model.UntrustedTimeoutObject(*to)) + require.Error(t, err) + require.Nil(t, res) + assert.Contains(t, err.Error(), "newest QC must not be nil") + }) + + t.Run("invalid input with zero SignerID", func(t *testing.T) { + to := helper.TimeoutObjectFixture() + to.SignerID = flow.ZeroID + + res, err := model.NewTimeoutObject(model.UntrustedTimeoutObject(*to)) + require.Error(t, err) + require.Nil(t, res) + assert.Contains(t, err.Error(), "signer ID must not be zero") + }) + + t.Run("invalid input with nil SigData", func(t *testing.T) { + to := helper.TimeoutObjectFixture() + to.SigData = nil + + res, err := model.NewTimeoutObject(model.UntrustedTimeoutObject(*to)) + require.Error(t, err) + require.Nil(t, res) + assert.Contains(t, err.Error(), "signature must not be empty") + }) + + t.Run("invalid input with empty SigData", func(t *testing.T) { + to := helper.TimeoutObjectFixture() + to.SigData = []byte{} + + res, err := model.NewTimeoutObject(model.UntrustedTimeoutObject(*to)) + require.Error(t, err) + require.Nil(t, res) + assert.Contains(t, err.Error(), "signature must not be empty") + }) + + t.Run("invalid input when View <= NewestQC.View", func(t *testing.T) { + qc := helper.MakeQC(helper.WithQCView(100)) + res, err := model.NewTimeoutObject( + model.UntrustedTimeoutObject( + *helper.TimeoutObjectFixture( + helper.WithTimeoutNewestQC(qc), + helper.WithTimeoutObjectView(100), // Equal to QC view + ), + )) + require.Error(t, err) + require.Nil(t, res) + assert.Contains(t, err.Error(), "TO's QC 100 cannot be newer than the TO's view 100") + }) + + t.Run("invalid input when LastViewTC.View is not View - 1", func(t *testing.T) { + tc := helper.MakeTC(helper.WithTCView(50)) + qc := helper.MakeQC(helper.WithQCView(40)) + + result, err := model.NewTimeoutObject( + model.UntrustedTimeoutObject( + *helper.TimeoutObjectFixture( + helper.WithTimeoutObjectView(100), + helper.WithTimeoutNewestQC(qc), + helper.WithTimeoutLastViewTC(tc), + ), + ), + ) + require.Error(t, err) + require.Nil(t, result) + assert.Contains(t, err.Error(), "invalid TC for non-previous view") + }) + + t.Run("invalid input when TimeoutObject's QC is older than TC's QC", func(t *testing.T) { + tcQC := helper.MakeQC(helper.WithQCView(150)) + tc := helper.MakeTC(helper.WithTCNewestQC(tcQC), helper.WithTCView(99)) + + res, err := model.NewTimeoutObject( + model.UntrustedTimeoutObject( + *helper.TimeoutObjectFixture( + helper.WithTimeoutObjectView(100), + helper.WithTimeoutLastViewTC(tc), + helper.WithTimeoutNewestQC(helper.MakeQC(helper.WithQCView(80))), // older than TC.NewestQC + ), + ), + ) + require.Error(t, err) + require.Nil(t, res) + assert.Contains(t, err.Error(), "timeout.NewestQC is older") + }) + + t.Run("invalid input when no QC for previous round and TC is missing", func(t *testing.T) { + qc := helper.MakeQC(helper.WithQCView(90)) + + res, err := model.NewTimeoutObject( + model.UntrustedTimeoutObject( + *helper.TimeoutObjectFixture( + helper.WithTimeoutObjectView(100), + helper.WithTimeoutNewestQC(qc), + helper.WithTimeoutLastViewTC(nil), + ), + ), + ) + require.Error(t, err) + require.Nil(t, res) + assert.Contains(t, err.Error(), "must include TC") + }) +} diff --git a/consensus/hotstuff/model/vote.go b/consensus/hotstuff/model/vote.go index eedf3c975ff..5046028f47f 100644 --- a/consensus/hotstuff/model/vote.go +++ b/consensus/hotstuff/model/vote.go @@ -1,11 +1,14 @@ package model import ( - "github.com/onflow/flow-go/crypto" + "fmt" + "github.com/onflow/flow-go/model/flow" ) // Vote is the HotStuff algorithm's concept of a vote for a block proposal. +// +//structwrite:immutable - mutations allowed only within the constructor type Vote struct { View uint64 BlockID flow.Identifier @@ -13,18 +16,43 @@ type Vote struct { SigData []byte } +// UntrustedVote is an untrusted input-only representation of an Vote, +// used for construction. +// +// This type exists to ensure that constructor functions are invoked explicitly +// with named fields, which improves clarity and reduces the risk of incorrect field +// ordering during construction. +// +// An instance of UntrustedVote should be validated and converted into +// a trusted Vote using NewVote constructor. +type UntrustedVote Vote + +// NewVote creates a new instance of Vote. +// Construction Vote allowed only within the constructor +// +// All errors indicate a valid Vote cannot be constructed from the input. +func NewVote(untrusted UntrustedVote) (*Vote, error) { + if untrusted.BlockID == flow.ZeroID { + return nil, fmt.Errorf("BlockID must not be empty") + } + + if untrusted.SignerID == flow.ZeroID { + return nil, fmt.Errorf("SignerID must not be empty") + } + + if len(untrusted.SigData) == 0 { + return nil, fmt.Errorf("SigData must not be empty") + } + + return &Vote{ + View: untrusted.View, + BlockID: untrusted.BlockID, + SignerID: untrusted.SignerID, + SigData: untrusted.SigData, + }, nil +} + // ID returns the identifier for the vote. func (uv *Vote) ID() flow.Identifier { return flow.MakeID(uv) } - -// VoteFromFlow turns the vote parameters into a vote struct. -func VoteFromFlow(signerID flow.Identifier, blockID flow.Identifier, view uint64, sig crypto.Signature) *Vote { - vote := Vote{ - View: view, - BlockID: blockID, - SignerID: signerID, - SigData: sig, - } - return &vote -} diff --git a/consensus/hotstuff/model/vote_test.go b/consensus/hotstuff/model/vote_test.go new file mode 100644 index 00000000000..95f18550548 --- /dev/null +++ b/consensus/hotstuff/model/vote_test.go @@ -0,0 +1,11 @@ +package model_test + +import ( + "testing" + + "github.com/onflow/flow-go/utils/unittest" +) + +func TestVoteNonMalleable(t *testing.T) { + unittest.RequireEntityNonMalleable(t, unittest.VoteFixture()) +} diff --git a/consensus/hotstuff/notifications/log_consumer.go b/consensus/hotstuff/notifications/log_consumer.go index 4f97fb53343..acd4f16624a 100644 --- a/consensus/hotstuff/notifications/log_consumer.go +++ b/consensus/hotstuff/notifications/log_consumer.go @@ -46,16 +46,17 @@ func (lc *LogConsumer) OnFinalizedBlock(block *model.Block) { Msg("block finalized") } -func (lc *LogConsumer) OnInvalidBlockDetected(err model.InvalidProposalError) { - invalidBlock := err.InvalidProposal.Block +func (lc *LogConsumer) OnInvalidBlockDetected(err flow.Slashable[model.InvalidProposalError]) { + invalidBlock := err.Message.InvalidProposal.Block lc.log.Warn(). Str(logging.KeySuspicious, "true"). + Hex("origin_id", err.OriginID[:]). Uint64("block_view", invalidBlock.View). Hex("proposer_id", invalidBlock.ProposerID[:]). Hex("block_id", invalidBlock.BlockID[:]). Uint64("qc_block_view", invalidBlock.QC.View). Hex("qc_block_id", invalidBlock.QC.BlockID[:]). - Msgf("invalid block detected: %s", err.Error()) + Msgf("invalid block detected: %s", err.Message.Error()) } func (lc *LogConsumer) OnDoubleProposeDetected(block *model.Block, alt *model.Block) { @@ -68,7 +69,7 @@ func (lc *LogConsumer) OnDoubleProposeDetected(block *model.Block, alt *model.Bl Msg("double proposal detected") } -func (lc *LogConsumer) OnReceiveProposal(currentView uint64, proposal *model.Proposal) { +func (lc *LogConsumer) OnReceiveProposal(currentView uint64, proposal *model.SignedProposal) { logger := lc.logBasicBlockData(lc.log.Debug(), proposal.Block). Uint64("cur_view", currentView) lastViewTC := proposal.LastViewTC @@ -196,7 +197,7 @@ func (lc *LogConsumer) OnInvalidVoteDetected(err model.InvalidVoteError) { Msgf("invalid vote detected: %s", err.Error()) } -func (lc *LogConsumer) OnVoteForInvalidBlockDetected(vote *model.Vote, proposal *model.Proposal) { +func (lc *LogConsumer) OnVoteForInvalidBlockDetected(vote *model.Vote, proposal *model.SignedProposal) { lc.log.Warn(). Str(logging.KeySuspicious, "true"). Uint64("vote_view", vote.View). @@ -209,10 +210,12 @@ func (lc *LogConsumer) OnVoteForInvalidBlockDetected(vote *model.Vote, proposal func (lc *LogConsumer) OnDoubleTimeoutDetected(timeout *model.TimeoutObject, alt *model.TimeoutObject) { lc.log.Warn(). Str(logging.KeySuspicious, "true"). + Hex("timeout_signer_id", logging.ID(timeout.SignerID)). Uint64("timeout_view", timeout.View). - Hex("signer_id", logging.ID(timeout.SignerID)). - Hex("timeout_id", logging.ID(timeout.ID())). - Hex("alt_id", logging.ID(alt.ID())). + Uint64("timeout_newest_qc_view", timeout.NewestQC.View). + Hex("alt_signer_id", logging.ID(alt.SignerID)). + Uint64("alt_view", alt.View). + Uint64("alt_newest_qc_view", alt.NewestQC.View). Msg("double timeout detected") } @@ -228,7 +231,6 @@ func (lc *LogConsumer) logBasicBlockData(loggerEvent *zerolog.Event, block *mode Uint64("block_view", block.View). Hex("block_id", logging.ID(block.BlockID)). Hex("proposer_id", logging.ID(block.ProposerID)). - Hex("payload_hash", logging.ID(block.PayloadHash)). Uint64("qc_view", block.QC.View). Hex("qc_block_id", logging.ID(block.QC.BlockID)) @@ -267,10 +269,10 @@ func (lc *LogConsumer) OnNewTcDiscovered(tc *flow.TimeoutCertificate) { Msg("new TC discovered") } -func (lc *LogConsumer) OnOwnVote(blockID flow.Identifier, view uint64, sigData []byte, recipientID flow.Identifier) { +func (lc *LogConsumer) OnOwnVote(vote *model.Vote, recipientID flow.Identifier) { lc.log.Debug(). - Hex("block_id", blockID[:]). - Uint64("block_view", view). + Hex("block_id", vote.BlockID[:]). + Uint64("block_view", vote.View). Hex("recipient_id", recipientID[:]). Msg("publishing HotStuff vote") } @@ -280,7 +282,8 @@ func (lc *LogConsumer) OnOwnTimeout(timeout *model.TimeoutObject) { log.Debug().Msg("publishing HotStuff timeout object") } -func (lc *LogConsumer) OnOwnProposal(header *flow.Header, targetPublicationTime time.Time) { +func (lc *LogConsumer) OnOwnProposal(proposal *flow.ProposalHeader, targetPublicationTime time.Time) { + header := proposal.Header lc.log.Debug(). Str("chain_id", header.ChainID.String()). Uint64("block_height", header.Height). @@ -288,7 +291,7 @@ func (lc *LogConsumer) OnOwnProposal(header *flow.Header, targetPublicationTime Hex("block_id", logging.Entity(header)). Hex("parent_id", header.ParentID[:]). Hex("payload_hash", header.PayloadHash[:]). - Time("timestamp", header.Timestamp). + Time("timestamp", time.UnixMilli(int64(header.Timestamp)).UTC()). Hex("parent_signer_indices", header.ParentVoterIndices). Time("target_publication_time", targetPublicationTime). Msg("publishing HotStuff block proposal") diff --git a/consensus/hotstuff/notifications/noop_consumer.go b/consensus/hotstuff/notifications/noop_consumer.go index 4ae0584a9d2..ab7e5dad28d 100644 --- a/consensus/hotstuff/notifications/noop_consumer.go +++ b/consensus/hotstuff/notifications/noop_consumer.go @@ -32,7 +32,7 @@ func (*NoopParticipantConsumer) OnEventProcessed() {} func (*NoopParticipantConsumer) OnStart(uint64) {} -func (*NoopParticipantConsumer) OnReceiveProposal(uint64, *model.Proposal) {} +func (*NoopParticipantConsumer) OnReceiveProposal(uint64, *model.SignedProposal) {} func (*NoopParticipantConsumer) OnReceiveQc(uint64, *flow.QuorumCertificate) {} @@ -85,11 +85,11 @@ type NoopCommunicatorConsumer struct{} var _ hotstuff.CommunicatorConsumer = (*NoopCommunicatorConsumer)(nil) -func (*NoopCommunicatorConsumer) OnOwnVote(flow.Identifier, uint64, []byte, flow.Identifier) {} +func (*NoopCommunicatorConsumer) OnOwnVote(*model.Vote, flow.Identifier) {} func (*NoopCommunicatorConsumer) OnOwnTimeout(*model.TimeoutObject) {} -func (*NoopCommunicatorConsumer) OnOwnProposal(*flow.Header, time.Time) {} +func (*NoopCommunicatorConsumer) OnOwnProposal(*flow.ProposalHeader, time.Time) {} // no-op implementation of hotstuff.VoteCollectorConsumer @@ -107,7 +107,8 @@ type NoopProposalViolationConsumer struct{} var _ hotstuff.ProposalViolationConsumer = (*NoopProposalViolationConsumer)(nil) -func (*NoopProposalViolationConsumer) OnInvalidBlockDetected(model.InvalidProposalError) {} +func (*NoopProposalViolationConsumer) OnInvalidBlockDetected(flow.Slashable[model.InvalidProposalError]) { +} func (*NoopProposalViolationConsumer) OnDoubleProposeDetected(*model.Block, *model.Block) {} @@ -115,7 +116,8 @@ func (*NoopProposalViolationConsumer) OnDoubleVotingDetected(*model.Vote, *model func (*NoopProposalViolationConsumer) OnInvalidVoteDetected(model.InvalidVoteError) {} -func (*NoopProposalViolationConsumer) OnVoteForInvalidBlockDetected(*model.Vote, *model.Proposal) {} +func (*NoopProposalViolationConsumer) OnVoteForInvalidBlockDetected(*model.Vote, *model.SignedProposal) { +} func (*NoopProposalViolationConsumer) OnDoubleTimeoutDetected(*model.TimeoutObject, *model.TimeoutObject) { } diff --git a/consensus/hotstuff/notifications/pubsub/communicator_distributor.go b/consensus/hotstuff/notifications/pubsub/communicator_distributor.go index 5e0604fa83c..e5057e6e848 100644 --- a/consensus/hotstuff/notifications/pubsub/communicator_distributor.go +++ b/consensus/hotstuff/notifications/pubsub/communicator_distributor.go @@ -31,11 +31,11 @@ func (d *CommunicatorDistributor) AddCommunicatorConsumer(consumer hotstuff.Comm d.consumers = append(d.consumers, consumer) } -func (d *CommunicatorDistributor) OnOwnVote(blockID flow.Identifier, view uint64, sigData []byte, recipientID flow.Identifier) { +func (d *CommunicatorDistributor) OnOwnVote(vote *model.Vote, recipientID flow.Identifier) { d.lock.RLock() defer d.lock.RUnlock() for _, s := range d.consumers { - s.OnOwnVote(blockID, view, sigData, recipientID) + s.OnOwnVote(vote, recipientID) } } @@ -47,7 +47,7 @@ func (d *CommunicatorDistributor) OnOwnTimeout(timeout *model.TimeoutObject) { } } -func (d *CommunicatorDistributor) OnOwnProposal(proposal *flow.Header, targetPublicationTime time.Time) { +func (d *CommunicatorDistributor) OnOwnProposal(proposal *flow.ProposalHeader, targetPublicationTime time.Time) { d.lock.RLock() defer d.lock.RUnlock() for _, s := range d.consumers { diff --git a/consensus/hotstuff/notifications/pubsub/participant_distributor.go b/consensus/hotstuff/notifications/pubsub/participant_distributor.go index f5047cd7a53..4285a96b258 100644 --- a/consensus/hotstuff/notifications/pubsub/participant_distributor.go +++ b/consensus/hotstuff/notifications/pubsub/participant_distributor.go @@ -46,7 +46,7 @@ func (d *ParticipantDistributor) OnStart(currentView uint64) { } } -func (d *ParticipantDistributor) OnReceiveProposal(currentView uint64, proposal *model.Proposal) { +func (d *ParticipantDistributor) OnReceiveProposal(currentView uint64, proposal *model.SignedProposal) { d.lock.RLock() defer d.lock.RUnlock() for _, subscriber := range d.consumers { diff --git a/consensus/hotstuff/notifications/pubsub/proposal_violation_distributor.go b/consensus/hotstuff/notifications/pubsub/proposal_violation_distributor.go index b2ed5f533af..7b974a3269c 100644 --- a/consensus/hotstuff/notifications/pubsub/proposal_violation_distributor.go +++ b/consensus/hotstuff/notifications/pubsub/proposal_violation_distributor.go @@ -5,6 +5,7 @@ import ( "github.com/onflow/flow-go/consensus/hotstuff" "github.com/onflow/flow-go/consensus/hotstuff/model" + "github.com/onflow/flow-go/model/flow" ) // ProposalViolationDistributor ingests notifications about HotStuff-protocol violations and @@ -28,7 +29,7 @@ func (d *ProposalViolationDistributor) AddProposalViolationConsumer(consumer hot d.consumers = append(d.consumers, consumer) } -func (d *ProposalViolationDistributor) OnInvalidBlockDetected(err model.InvalidProposalError) { +func (d *ProposalViolationDistributor) OnInvalidBlockDetected(err flow.Slashable[model.InvalidProposalError]) { d.lock.RLock() defer d.lock.RUnlock() for _, subscriber := range d.consumers { diff --git a/consensus/hotstuff/notifications/pubsub/vote_aggregation_violation_consumer.go b/consensus/hotstuff/notifications/pubsub/vote_aggregation_violation_consumer.go index d9d1e9baa26..7b75bd933e1 100644 --- a/consensus/hotstuff/notifications/pubsub/vote_aggregation_violation_consumer.go +++ b/consensus/hotstuff/notifications/pubsub/vote_aggregation_violation_consumer.go @@ -43,7 +43,7 @@ func (d *VoteAggregationViolationDistributor) OnInvalidVoteDetected(err model.In } } -func (d *VoteAggregationViolationDistributor) OnVoteForInvalidBlockDetected(vote *model.Vote, invalidProposal *model.Proposal) { +func (d *VoteAggregationViolationDistributor) OnVoteForInvalidBlockDetected(vote *model.Vote, invalidProposal *model.SignedProposal) { d.lock.RLock() defer d.lock.RUnlock() for _, subscriber := range d.consumers { diff --git a/consensus/hotstuff/notifications/slashing_violation_consumer.go b/consensus/hotstuff/notifications/slashing_violation_consumer.go index 8b8b55ff886..7c447344f31 100644 --- a/consensus/hotstuff/notifications/slashing_violation_consumer.go +++ b/consensus/hotstuff/notifications/slashing_violation_consumer.go @@ -1,10 +1,13 @@ package notifications import ( + "time" + "github.com/rs/zerolog" "github.com/onflow/flow-go/consensus/hotstuff" "github.com/onflow/flow-go/consensus/hotstuff/model" + "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/utils/logging" ) @@ -23,16 +26,16 @@ func NewSlashingViolationsConsumer(log zerolog.Logger) *SlashingViolationsConsum log: log, } } -func (c *SlashingViolationsConsumer) OnInvalidBlockDetected(err model.InvalidProposalError) { - block := err.InvalidProposal.Block +func (c *SlashingViolationsConsumer) OnInvalidBlockDetected(err flow.Slashable[model.InvalidProposalError]) { + block := err.Message.InvalidProposal.Block c.log.Warn(). Bool(logging.KeySuspicious, true). + Hex("origin_id", err.OriginID[:]). Hex("proposer_id", block.ProposerID[:]). Uint64("block_view", block.View). Hex("block_id", block.BlockID[:]). - Hex("block_payloadhash", block.PayloadHash[:]). - Time("block_timestamp", block.Timestamp). - Msg("OnInvalidBlockDetected") + Time("block_timestamp", time.UnixMilli(int64(block.Timestamp)).UTC()). + Msgf("OnInvalidBlockDetected: %s", err.Message.Error()) } func (c *SlashingViolationsConsumer) OnDoubleVotingDetected(vote1 *model.Vote, vote2 *model.Vote) { @@ -59,10 +62,12 @@ func (c *SlashingViolationsConsumer) OnInvalidVoteDetected(err model.InvalidVote func (c *SlashingViolationsConsumer) OnDoubleTimeoutDetected(timeout *model.TimeoutObject, altTimeout *model.TimeoutObject) { c.log.Warn(). Bool(logging.KeySuspicious, true). - Hex("timeout_creator", timeout.SignerID[:]). + Hex("timeout_signer_id", timeout.SignerID[:]). Uint64("timeout_view", timeout.View). - Hex("timeout_id1", logging.ID(timeout.ID())). - Hex("timeout_id2", logging.ID(altTimeout.ID())). + Uint64("timeout_newest_qc_view", timeout.NewestQC.View). + Hex("alt_signer_id", logging.ID(altTimeout.SignerID)). + Uint64("alt_view", altTimeout.View). + Uint64("alt_newest_qc_view", altTimeout.NewestQC.View). Msg("OnDoubleTimeoutDetected") } @@ -76,7 +81,7 @@ func (c *SlashingViolationsConsumer) OnInvalidTimeoutDetected(err model.InvalidT Msg("OnInvalidTimeoutDetected") } -func (c *SlashingViolationsConsumer) OnVoteForInvalidBlockDetected(vote *model.Vote, proposal *model.Proposal) { +func (c *SlashingViolationsConsumer) OnVoteForInvalidBlockDetected(vote *model.Vote, proposal *model.SignedProposal) { c.log.Warn(). Uint64("vote_view", vote.View). Hex("voted_block_id", vote.BlockID[:]). diff --git a/consensus/hotstuff/notifications/telemetry.go b/consensus/hotstuff/notifications/telemetry.go index 7bbf57f79de..ed132bd5056 100644 --- a/consensus/hotstuff/notifications/telemetry.go +++ b/consensus/hotstuff/notifications/telemetry.go @@ -38,7 +38,10 @@ type TelemetryConsumer struct { noPathLogger zerolog.Logger } +// Telemetry implements consumers for _all happy-path_ interfaces in consensus/hotstuff/notifications/telemetry.go: var _ hotstuff.ParticipantConsumer = (*TelemetryConsumer)(nil) +var _ hotstuff.CommunicatorConsumer = (*TelemetryConsumer)(nil) +var _ hotstuff.FinalizationConsumer = (*TelemetryConsumer)(nil) var _ hotstuff.VoteCollectorConsumer = (*TelemetryConsumer)(nil) var _ hotstuff.TimeoutCollectorConsumer = (*TelemetryConsumer)(nil) @@ -57,14 +60,14 @@ func (t *TelemetryConsumer) OnStart(currentView uint64) { t.pathHandler.NextStep().Msg("OnStart") } -func (t *TelemetryConsumer) OnReceiveProposal(currentView uint64, proposal *model.Proposal) { +func (t *TelemetryConsumer) OnReceiveProposal(currentView uint64, proposal *model.SignedProposal) { block := proposal.Block t.pathHandler.StartNextPath(currentView) step := t.pathHandler.NextStep(). Uint64("block_view", block.View). Hex("block_id", logging.ID(block.BlockID)). Hex("block_proposer_id", logging.ID(block.ProposerID)). - Time("block_time", block.Timestamp). + Time("block_time", time.UnixMilli(int64(block.Timestamp)).UTC()). Uint64("qc_view", block.QC.View). Hex("qc_block_id", logging.ID(block.QC.BlockID)) @@ -166,24 +169,24 @@ func (t *TelemetryConsumer) OnTcTriggeredViewChange(oldView uint64, newView uint Msg("OnTcTriggeredViewChange") } -func (t *TelemetryConsumer) OnOwnVote(blockID flow.Identifier, view uint64, _ []byte, recipientID flow.Identifier) { +func (t *TelemetryConsumer) OnOwnVote(vote *model.Vote, recipientID flow.Identifier) { t.pathHandler.NextStep(). - Uint64("voted_block_view", view). - Hex("voted_block_id", logging.ID(blockID)). + Uint64("voted_block_view", vote.View). + Hex("voted_block_id", logging.ID(vote.BlockID)). Hex("recipient_id", logging.ID(recipientID)). Msg("OnOwnVote") } -func (t *TelemetryConsumer) OnOwnProposal(proposal *flow.Header, targetPublicationTime time.Time) { +func (t *TelemetryConsumer) OnOwnProposal(proposal *flow.ProposalHeader, targetPublicationTime time.Time) { step := t.pathHandler.NextStep(). - Uint64("block_view", proposal.View). - Hex("block_id", logging.ID(proposal.ID())). - Hex("block_proposer_id", logging.ID(proposal.ProposerID)). - Time("block_time", proposal.Timestamp). - Uint64("qc_view", proposal.ParentView). - Hex("qc_block_id", logging.ID(proposal.ParentID)). + Uint64("block_view", proposal.Header.View). + Hex("block_id", logging.ID(proposal.Header.ID())). + Hex("block_proposer_id", logging.ID(proposal.Header.ProposerID)). + Time("block_time", time.UnixMilli(int64(proposal.Header.Timestamp)).UTC()). + Uint64("qc_view", proposal.Header.ParentView). + Hex("qc_block_id", logging.ID(proposal.Header.ParentID)). Time("targetPublicationTime", targetPublicationTime) - lastViewTC := proposal.LastViewTC + lastViewTC := proposal.Header.LastViewTC if lastViewTC != nil { step. Uint64("last_view_tc_view", lastViewTC.View). diff --git a/consensus/hotstuff/pacemaker.go b/consensus/hotstuff/pacemaker.go index 66b8787b241..d7bafac4d63 100644 --- a/consensus/hotstuff/pacemaker.go +++ b/consensus/hotstuff/pacemaker.go @@ -50,6 +50,7 @@ type LivenessData struct { // // Not concurrency safe. type PaceMaker interface { + ProposalDurationProvider // CurView returns the current view. CurView() uint64 @@ -81,8 +82,25 @@ type PaceMaker interface { // be executed by the same goroutine that also calls the other business logic // methods, or concurrency safety has to be implemented externally. Start(ctx context.Context) +} + +// ProposalDurationProvider generates the target publication time for block proposals. +type ProposalDurationProvider interface { - // BlockRateDelay returns the minimal wait time for broadcasting a proposal, measured from - // the point in time when the primary (locally) enters the respective view. - BlockRateDelay() time.Duration + // TargetPublicationTime is intended to be called by the EventHandler, whenever it + // wants to publish a new proposal. The event handler inputs + // - proposalView: the view it is proposing for, + // - timeViewEntered: the time when the EventHandler entered this view + // - parentBlockId: the ID of the parent block, which the EventHandler is building on + // TargetPublicationTime returns the time stamp when the new proposal should be broadcasted. + // For a given view where we are the primary, suppose the actual time we are done building our proposal is P: + // - if P < TargetPublicationTime(..), then the EventHandler should wait until + // `TargetPublicationTime` to broadcast the proposal + // - if P >= TargetPublicationTime(..), then the EventHandler should immediately broadcast the proposal + // + // Note: Technically, our metrics capture the publication delay relative to this function's _latest_ call. + // Currently, the EventHandler is the only caller of this function, and only calls it once. + // + // Concurrency safe. + TargetPublicationTime(proposalView uint64, timeViewEntered time.Time, parentBlockId flow.Identifier) time.Time } diff --git a/consensus/hotstuff/pacemaker/pacemaker.go b/consensus/hotstuff/pacemaker/pacemaker.go index 1e1959eeb60..ae62aee0ea2 100644 --- a/consensus/hotstuff/pacemaker/pacemaker.go +++ b/consensus/hotstuff/pacemaker/pacemaker.go @@ -27,14 +27,17 @@ import ( // // Not concurrency safe. type ActivePaceMaker struct { + hotstuff.ProposalDurationProvider + ctx context.Context timeoutControl *timeout.Controller - notifier hotstuff.Consumer + notifier hotstuff.ParticipantConsumer viewTracker viewTracker started bool } var _ hotstuff.PaceMaker = (*ActivePaceMaker)(nil) +var _ hotstuff.ProposalDurationProvider = (*ActivePaceMaker)(nil) // New creates a new ActivePaceMaker instance // - startView is the view for the pacemaker to start with. @@ -45,6 +48,7 @@ var _ hotstuff.PaceMaker = (*ActivePaceMaker)(nil) // * model.ConfigurationError if initial LivenessData is invalid func New( timeoutController *timeout.Controller, + proposalDurationProvider hotstuff.ProposalDurationProvider, notifier hotstuff.Consumer, persist hotstuff.Persister, recovery ...recoveryInformation, @@ -55,10 +59,11 @@ func New( } pm := &ActivePaceMaker{ - timeoutControl: timeoutController, - notifier: notifier, - viewTracker: vt, - started: false, + ProposalDurationProvider: proposalDurationProvider, + timeoutControl: timeoutController, + notifier: notifier, + viewTracker: vt, + started: false, } for _, recoveryAction := range recovery { err = recoveryAction(pm) @@ -85,9 +90,6 @@ func (p *ActivePaceMaker) LastViewTC() *flow.TimeoutCertificate { return p.viewT // To get the timeout for the next timeout, you need to call TimeoutChannel() again. func (p *ActivePaceMaker) TimeoutChannel() <-chan time.Time { return p.timeoutControl.Channel() } -// BlockRateDelay returns the delay for broadcasting its own proposals. -func (p *ActivePaceMaker) BlockRateDelay() time.Duration { return p.timeoutControl.BlockRateDelay() } - // ProcessQC notifies the pacemaker with a new QC, which might allow pacemaker to // fast-forward its view. In contrast to `ProcessTC`, this function does _not_ handle `nil` inputs. // No errors are expected, any error should be treated as exception @@ -171,7 +173,7 @@ type recoveryInformation func(p *ActivePaceMaker) error // WithQCs informs the PaceMaker about the given QCs. Old and nil QCs are accepted (no-op). func WithQCs(qcs ...*flow.QuorumCertificate) recoveryInformation { - // To avoid excessive data base writes during initialization, we pre-filter the newest QC + // To avoid excessive database writes during initialization, we pre-filter the newest QC // here and only hand that one to the viewTracker. For recovery, we allow the special case // of nil QCs, because the genesis block has no QC. tracker := tracker.NewNewestQCTracker() diff --git a/consensus/hotstuff/pacemaker/pacemaker_test.go b/consensus/hotstuff/pacemaker/pacemaker_test.go index 58193e0bd50..7db14618460 100644 --- a/consensus/hotstuff/pacemaker/pacemaker_test.go +++ b/consensus/hotstuff/pacemaker/pacemaker_test.go @@ -7,6 +7,7 @@ import ( "testing" "time" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" @@ -17,6 +18,7 @@ import ( "github.com/onflow/flow-go/consensus/hotstuff/model" "github.com/onflow/flow-go/consensus/hotstuff/pacemaker/timeout" "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/utils/unittest" ) const ( @@ -44,11 +46,12 @@ type ActivePaceMakerTestSuite struct { initialQC *flow.QuorumCertificate initialTC *flow.TimeoutCertificate - notifier *mocks.Consumer - persist *mocks.Persister - paceMaker *ActivePaceMaker - stop context.CancelFunc - timeoutConf timeout.Config + notifier *mocks.Consumer + proposalDurationProvider hotstuff.ProposalDurationProvider + persist *mocks.Persister + paceMaker *ActivePaceMaker + stop context.CancelFunc + timeoutConf timeout.Config } func (s *ActivePaceMakerTestSuite) SetupTest() { @@ -57,13 +60,7 @@ func (s *ActivePaceMakerTestSuite) SetupTest() { s.initialTC = nil var err error - s.timeoutConf, err = timeout.NewConfig( - time.Duration(minRepTimeout*1e6), - time.Duration(maxRepTimeout*1e6), - multiplicativeIncrease, - happyPathMaxRoundFailures, - 0, - time.Duration(maxRepTimeout*1e6)) + s.timeoutConf, err = timeout.NewConfig(time.Duration(minRepTimeout*1e6), time.Duration(maxRepTimeout*1e6), multiplicativeIncrease, happyPathMaxRoundFailures, time.Duration(maxRepTimeout*1e6)) require.NoError(s.T(), err) // init consumer for notifications emitted by PaceMaker @@ -82,7 +79,7 @@ func (s *ActivePaceMakerTestSuite) SetupTest() { s.persist.On("GetLivenessData").Return(livenessData, nil) // init PaceMaker and start - s.paceMaker, err = New(timeout.NewController(s.timeoutConf), s.notifier, s.persist) + s.paceMaker, err = New(timeout.NewController(s.timeoutConf), NoProposalDelay(), s.notifier, s.persist) require.NoError(s.T(), err) var ctx context.Context @@ -347,7 +344,7 @@ func (s *ActivePaceMakerTestSuite) Test_Initialization() { // test that the constructor finds the newest QC and TC s.Run("Random TCs and QCs combined", func() { pm, err := New( - timeout.NewController(s.timeoutConf), s.notifier, s.persist, + timeout.NewController(s.timeoutConf), NoProposalDelay(), s.notifier, s.persist, WithQCs(qcs...), WithTCs(tcs...), ) require.NoError(s.T(), err) @@ -367,7 +364,7 @@ func (s *ActivePaceMakerTestSuite) Test_Initialization() { tcs[45] = helper.MakeTC(helper.WithTCView(highestView+15), helper.WithTCNewestQC(QC(highestView+12))) pm, err := New( - timeout.NewController(s.timeoutConf), s.notifier, s.persist, + timeout.NewController(s.timeoutConf), NoProposalDelay(), s.notifier, s.persist, WithTCs(tcs...), WithQCs(qcs...), ) require.NoError(s.T(), err) @@ -387,7 +384,7 @@ func (s *ActivePaceMakerTestSuite) Test_Initialization() { tcs[45] = helper.MakeTC(helper.WithTCView(highestView+15), helper.WithTCNewestQC(QC(highestView+15))) pm, err := New( - timeout.NewController(s.timeoutConf), s.notifier, s.persist, + timeout.NewController(s.timeoutConf), NoProposalDelay(), s.notifier, s.persist, WithTCs(tcs...), WithQCs(qcs...), ) require.NoError(s.T(), err) @@ -403,11 +400,11 @@ func (s *ActivePaceMakerTestSuite) Test_Initialization() { // Verify that WithTCs still works correctly if no TCs are given: // the list of TCs is empty or all contained TCs are nil s.Run("Only nil TCs", func() { - pm, err := New(timeout.NewController(s.timeoutConf), s.notifier, s.persist, WithTCs()) + pm, err := New(timeout.NewController(s.timeoutConf), NoProposalDelay(), s.notifier, s.persist, WithTCs()) require.NoError(s.T(), err) require.Equal(s.T(), s.initialView, pm.CurView()) - pm, err = New(timeout.NewController(s.timeoutConf), s.notifier, s.persist, WithTCs(nil, nil, nil)) + pm, err = New(timeout.NewController(s.timeoutConf), NoProposalDelay(), s.notifier, s.persist, WithTCs(nil, nil, nil)) require.NoError(s.T(), err) require.Equal(s.T(), s.initialView, pm.CurView()) }) @@ -415,17 +412,29 @@ func (s *ActivePaceMakerTestSuite) Test_Initialization() { // Verify that WithQCs still works correctly if no QCs are given: // the list of QCs is empty or all contained QCs are nil s.Run("Only nil QCs", func() { - pm, err := New(timeout.NewController(s.timeoutConf), s.notifier, s.persist, WithQCs()) + pm, err := New(timeout.NewController(s.timeoutConf), NoProposalDelay(), s.notifier, s.persist, WithQCs()) require.NoError(s.T(), err) require.Equal(s.T(), s.initialView, pm.CurView()) - pm, err = New(timeout.NewController(s.timeoutConf), s.notifier, s.persist, WithQCs(nil, nil, nil)) + pm, err = New(timeout.NewController(s.timeoutConf), NoProposalDelay(), s.notifier, s.persist, WithQCs(nil, nil, nil)) require.NoError(s.T(), err) require.Equal(s.T(), s.initialView, pm.CurView()) }) } +// TestProposalDuration tests that the active pacemaker forwards proposal duration values from the provider. +func (s *ActivePaceMakerTestSuite) TestProposalDuration() { + proposalDurationProvider := NewStaticProposalDurationProvider(time.Millisecond * 500) + pm, err := New(timeout.NewController(s.timeoutConf), &proposalDurationProvider, s.notifier, s.persist) + require.NoError(s.T(), err) + + now := time.Now().UTC() + assert.Equal(s.T(), now.Add(time.Millisecond*500), pm.TargetPublicationTime(117, now, unittest.IdentifierFixture())) + proposalDurationProvider.dur = time.Second + assert.Equal(s.T(), now.Add(time.Second), pm.TargetPublicationTime(117, now, unittest.IdentifierFixture())) +} + func max(a uint64, values ...uint64) uint64 { for _, v := range values { if v > a { diff --git a/consensus/hotstuff/pacemaker/proposal_timing.go b/consensus/hotstuff/pacemaker/proposal_timing.go new file mode 100644 index 00000000000..7530b2aedcb --- /dev/null +++ b/consensus/hotstuff/pacemaker/proposal_timing.go @@ -0,0 +1,29 @@ +package pacemaker + +import ( + "time" + + "github.com/onflow/flow-go/consensus/hotstuff" + "github.com/onflow/flow-go/model/flow" +) + +// StaticProposalDurationProvider is a hotstuff.ProposalDurationProvider which provides a static ProposalDuration. +// The constant dur represents the time to produce and broadcast the proposal (ProposalDuration), +// NOT the time for the entire view (ViewDuration). +type StaticProposalDurationProvider struct { + dur time.Duration +} + +var _ hotstuff.ProposalDurationProvider = (*StaticProposalDurationProvider)(nil) + +func NewStaticProposalDurationProvider(dur time.Duration) StaticProposalDurationProvider { + return StaticProposalDurationProvider{dur: dur} +} + +func (p StaticProposalDurationProvider) TargetPublicationTime(_ uint64, timeViewEntered time.Time, _ flow.Identifier) time.Time { + return timeViewEntered.Add(p.dur) +} + +func NoProposalDelay() StaticProposalDurationProvider { + return NewStaticProposalDurationProvider(0) +} diff --git a/consensus/hotstuff/pacemaker/timeout/config.go b/consensus/hotstuff/pacemaker/timeout/config.go index 7d55a3ca1c9..1686521edc8 100644 --- a/consensus/hotstuff/pacemaker/timeout/config.go +++ b/consensus/hotstuff/pacemaker/timeout/config.go @@ -1,14 +1,9 @@ package timeout import ( - "fmt" "time" - "github.com/rs/zerolog/log" - "go.uber.org/atomic" - "github.com/onflow/flow-go/consensus/hotstuff/model" - "github.com/onflow/flow-go/module/updatable_configs" ) // Config contains the configuration parameters for a Truncated Exponential Backoff, @@ -30,8 +25,6 @@ type Config struct { // HappyPathMaxRoundFailures is the number of rounds without progress where we still consider being // on hot path of execution. After exceeding this value we will start increasing timeout values. HappyPathMaxRoundFailures uint64 - // BlockRateDelayMS is a delay to broadcast the proposal in order to control block production rate [MILLISECONDS] - BlockRateDelayMS *atomic.Float64 // MaxTimeoutObjectRebroadcastInterval is the maximum value for timeout object rebroadcast interval [MILLISECONDS] MaxTimeoutObjectRebroadcastInterval float64 } @@ -51,17 +44,9 @@ func NewDefaultConfig() Config { timeoutAdjustmentFactorFactor := 1.2 // after 6 successively failed rounds, the pacemaker leaves the hot path and starts increasing timeouts (recovery mode) happyPathMaxRoundFailures := uint64(6) - blockRateDelay := 0 * time.Millisecond maxRebroadcastInterval := 5 * time.Second - conf, err := NewConfig( - minReplicaTimeout+blockRateDelay, - maxReplicaTimeout, - timeoutAdjustmentFactorFactor, - happyPathMaxRoundFailures, - blockRateDelay, - maxRebroadcastInterval, - ) + conf, err := NewConfig(minReplicaTimeout, maxReplicaTimeout, timeoutAdjustmentFactorFactor, happyPathMaxRoundFailures, maxRebroadcastInterval) if err != nil { // we check in a unit test that this does not happen panic("Default config is not compliant with timeout Config requirements") @@ -82,14 +67,7 @@ func NewDefaultConfig() Config { // Consistency requirement: must be non-negative // // Returns `model.ConfigurationError` is any of the consistency requirements is violated. -func NewConfig( - minReplicaTimeout time.Duration, - maxReplicaTimeout time.Duration, - timeoutAdjustmentFactor float64, - happyPathMaxRoundFailures uint64, - blockRateDelay time.Duration, - maxRebroadcastInterval time.Duration, -) (Config, error) { +func NewConfig(minReplicaTimeout time.Duration, maxReplicaTimeout time.Duration, timeoutAdjustmentFactor float64, happyPathMaxRoundFailures uint64, maxRebroadcastInterval time.Duration) (Config, error) { if minReplicaTimeout <= 0 { return Config{}, model.NewConfigurationErrorf("minReplicaTimeout must be a positive number[milliseconds]") } @@ -99,9 +77,6 @@ func NewConfig( if timeoutAdjustmentFactor <= 1 { return Config{}, model.NewConfigurationErrorf("timeoutAdjustmentFactor must be strictly bigger than 1") } - if err := validBlockRateDelay(blockRateDelay); err != nil { - return Config{}, err - } if maxRebroadcastInterval <= 0 { return Config{}, model.NewConfigurationErrorf("maxRebroadcastInterval must be a positive number [milliseconds]") } @@ -112,43 +87,6 @@ func NewConfig( TimeoutAdjustmentFactor: timeoutAdjustmentFactor, HappyPathMaxRoundFailures: happyPathMaxRoundFailures, MaxTimeoutObjectRebroadcastInterval: float64(maxRebroadcastInterval.Milliseconds()), - BlockRateDelayMS: atomic.NewFloat64(float64(blockRateDelay.Milliseconds())), } return tc, nil } - -// validBlockRateDelay validates a block rate delay config. -// Returns model.ConfigurationError for invalid config inputs. -func validBlockRateDelay(blockRateDelay time.Duration) error { - if blockRateDelay < 0 { - return model.NewConfigurationErrorf("blockRateDelay must be must be non-negative") - } - return nil -} - -// GetBlockRateDelay returns the block rate delay as a Duration. This is used by -// the dyamic config manager. -func (c *Config) GetBlockRateDelay() time.Duration { - ms := c.BlockRateDelayMS.Load() - return time.Millisecond * time.Duration(ms) -} - -// SetBlockRateDelay sets the block rate delay. It is used to modify this config -// value while HotStuff is running. -// Returns updatable_configs.ValidationError if the new value is invalid. -func (c *Config) SetBlockRateDelay(delay time.Duration) error { - if err := validBlockRateDelay(delay); err != nil { - if model.IsConfigurationError(err) { - return updatable_configs.NewValidationErrorf("invalid block rate delay: %w", err) - } - return fmt.Errorf("unexpected error validating block rate delay: %w", err) - } - // sanity check: log a warning if we set block rate delay above min timeout - // it is valid to want to do this, to significantly slow the block rate, but - // only in edge cases - if c.MinReplicaTimeout < float64(delay.Milliseconds()) { - log.Warn().Msgf("CAUTION: setting block rate delay to %s, above min timeout %dms - this will degrade performance!", delay.String(), int64(c.MinReplicaTimeout)) - } - c.BlockRateDelayMS.Store(float64(delay.Milliseconds())) - return nil -} diff --git a/consensus/hotstuff/pacemaker/timeout/config_test.go b/consensus/hotstuff/pacemaker/timeout/config_test.go index 4bacc678580..fe758dbd70d 100644 --- a/consensus/hotstuff/pacemaker/timeout/config_test.go +++ b/consensus/hotstuff/pacemaker/timeout/config_test.go @@ -11,37 +11,34 @@ import ( // TestConstructor tests that constructor performs needed checks and returns expected values depending on different inputs. func TestConstructor(t *testing.T) { - c, err := NewConfig(1200*time.Millisecond, 2000*time.Millisecond, 1.5, 3, time.Second, 2000*time.Millisecond) + c, err := NewConfig(1200*time.Millisecond, 2000*time.Millisecond, 1.5, 3, 2000*time.Millisecond) require.NoError(t, err) require.Equal(t, float64(1200), c.MinReplicaTimeout) require.Equal(t, float64(2000), c.MaxReplicaTimeout) require.Equal(t, float64(1.5), c.TimeoutAdjustmentFactor) require.Equal(t, uint64(3), c.HappyPathMaxRoundFailures) - require.Equal(t, float64(1000), c.BlockRateDelayMS.Load()) require.Equal(t, float64(2000), c.MaxTimeoutObjectRebroadcastInterval) // should not allow negative minReplicaTimeout - c, err = NewConfig(-1200*time.Millisecond, 2000*time.Millisecond, 1.5, 3, time.Second, 2000*time.Millisecond) + c, err = NewConfig(-1200*time.Millisecond, 2000*time.Millisecond, 1.5, 3, 2000*time.Millisecond) require.True(t, model.IsConfigurationError(err)) // should not allow 0 minReplicaTimeout - c, err = NewConfig(0, 2000*time.Millisecond, 1.5, 3, time.Second, 2000*time.Millisecond) + c, err = NewConfig(0, 2000*time.Millisecond, 1.5, 3, 2000*time.Millisecond) require.True(t, model.IsConfigurationError(err)) // should not allow maxReplicaTimeout < minReplicaTimeout - c, err = NewConfig(1200*time.Millisecond, 1000*time.Millisecond, 1.5, 3, time.Second, 2000*time.Millisecond) + c, err = NewConfig(1200*time.Millisecond, 1000*time.Millisecond, 1.5, 3, 2000*time.Millisecond) require.True(t, model.IsConfigurationError(err)) // should not allow timeoutIncrease to be 1.0 or smaller - c, err = NewConfig(1200*time.Millisecond, 2000*time.Millisecond, 1.0, 3, time.Second, 2000*time.Millisecond) + c, err = NewConfig(1200*time.Millisecond, 2000*time.Millisecond, 1.0, 3, 2000*time.Millisecond) require.True(t, model.IsConfigurationError(err)) - // should not allow blockRateDelay to be zero negative - c, err = NewConfig(1200*time.Millisecond, 2000*time.Millisecond, 1.5, 3, -1*time.Nanosecond, 2000*time.Millisecond) + // should accept only positive values for maxRebroadcastInterval + c, err = NewConfig(1200*time.Millisecond, 2000*time.Millisecond, 1.5, 3, 0) require.True(t, model.IsConfigurationError(err)) - - // should not allow maxRebroadcastInterval to be smaller than minReplicaTimeout - c, err = NewConfig(1200*time.Millisecond, 2000*time.Millisecond, 1.5, 3, -1*time.Nanosecond, 1000*time.Millisecond) + c, err = NewConfig(1200*time.Millisecond, 2000*time.Millisecond, 1.5, 3, -1000*time.Millisecond) require.True(t, model.IsConfigurationError(err)) } @@ -52,20 +49,4 @@ func TestDefaultConfig(t *testing.T) { require.Equal(t, float64(3000), c.MinReplicaTimeout) require.Equal(t, 1.2, c.TimeoutAdjustmentFactor) require.Equal(t, uint64(6), c.HappyPathMaxRoundFailures) - require.Equal(t, float64(0), c.BlockRateDelayMS.Load()) -} - -// Test_ConfigPassByValue tests timeout.Config can be passed by value -// without breaking the ability to update `BlockRateDelayMS` -func Test_ConfigPassByValue(t *testing.T) { - origConf := NewDefaultConfig() - err := origConf.SetBlockRateDelay(2227 * time.Millisecond) - require.NoError(t, err) - - copiedConf := origConf - require.Equal(t, float64(2227), copiedConf.BlockRateDelayMS.Load()) - - err = origConf.SetBlockRateDelay(1011 * time.Millisecond) - require.NoError(t, err) - require.Equal(t, float64(1011), copiedConf.BlockRateDelayMS.Load()) } diff --git a/consensus/hotstuff/pacemaker/timeout/controller.go b/consensus/hotstuff/pacemaker/timeout/controller.go index e162d5986ef..1b09cf8debf 100644 --- a/consensus/hotstuff/pacemaker/timeout/controller.go +++ b/consensus/hotstuff/pacemaker/timeout/controller.go @@ -147,8 +147,3 @@ func (t *Controller) OnProgressBeforeTimeout() { t.r-- } } - -// BlockRateDelay is a delay to broadcast the proposal in order to control block production rate -func (t *Controller) BlockRateDelay() time.Duration { - return time.Duration(t.cfg.BlockRateDelayMS.Load() * float64(time.Millisecond)) -} diff --git a/consensus/hotstuff/pacemaker/timeout/controller_test.go b/consensus/hotstuff/pacemaker/timeout/controller_test.go index 4db023dfcd0..be2b367f774 100644 --- a/consensus/hotstuff/pacemaker/timeout/controller_test.go +++ b/consensus/hotstuff/pacemaker/timeout/controller_test.go @@ -17,13 +17,7 @@ const ( ) func initTimeoutController(t *testing.T) *Controller { - tc, err := NewConfig( - time.Duration(minRepTimeout*1e6), - time.Duration(maxRepTimeout*1e6), - timeoutAdjustmentFactor, - happyPathMaxRoundFailures, - 0, - time.Duration(maxRepTimeout*1e6)) + tc, err := NewConfig(time.Duration(minRepTimeout*1e6), time.Duration(maxRepTimeout*1e6), timeoutAdjustmentFactor, happyPathMaxRoundFailures, time.Duration(maxRepTimeout*1e6)) if err != nil { t.Fail() } @@ -149,33 +143,3 @@ func Test_CombinedIncreaseDecreaseDynamics(t *testing.T) { testDynamicSequence([]bool{increase, decrease, increase, decrease, increase, decrease}) testDynamicSequence([]bool{increase, increase, increase, increase, increase, decrease}) } - -// Test_BlockRateDelay check that correct block rate delay is returned -func Test_BlockRateDelay(t *testing.T) { - c, err := NewConfig( - time.Duration(minRepTimeout*float64(time.Millisecond)), - time.Duration(maxRepTimeout*float64(time.Millisecond)), - timeoutAdjustmentFactor, - happyPathMaxRoundFailures, - time.Second, - time.Duration(maxRepTimeout*float64(time.Millisecond))) - if err != nil { - t.Fail() - } - tc := NewController(c) - assert.Equal(t, time.Second, tc.BlockRateDelay()) -} - -// Test_AdjustBlockRateDelayAtRuntime tests timeout.Config can be passed by value -// without breaking the ability to update `BlockRateDelayMS` -func Test_AdjustBlockRateDelayAtRuntime(t *testing.T) { - origConf := NewDefaultConfig() - require.NoError(t, origConf.SetBlockRateDelay(2227*time.Millisecond)) - - tc := NewController(origConf) // here, we pass the timeout.Config BY VALUE - assert.Equal(t, 2227*time.Millisecond, tc.BlockRateDelay()) - - // adjust BlockRateDelay on `origConf`, which should be reflected by the `timeout.Controller` - require.NoError(t, origConf.SetBlockRateDelay(1101*time.Millisecond)) - assert.Equal(t, 1101*time.Millisecond, tc.BlockRateDelay()) -} diff --git a/consensus/hotstuff/pacemaker/view_tracker.go b/consensus/hotstuff/pacemaker/view_tracker.go index b52822d0d5a..8c9d54ef40b 100644 --- a/consensus/hotstuff/pacemaker/view_tracker.go +++ b/consensus/hotstuff/pacemaker/view_tracker.go @@ -4,7 +4,6 @@ import ( "fmt" "github.com/onflow/flow-go/consensus/hotstuff" - "github.com/onflow/flow-go/consensus/hotstuff/model" "github.com/onflow/flow-go/model/flow" ) @@ -31,10 +30,6 @@ func newViewTracker(persist hotstuff.Persister) (viewTracker, error) { return viewTracker{}, fmt.Errorf("could not load liveness data: %w", err) } - if livenessData.CurrentView < 1 { - return viewTracker{}, model.NewConfigurationErrorf("PaceMaker cannot start in view 0 (view zero is reserved for genesis block, which has no proposer)") - } - return viewTracker{ livenessData: *livenessData, persist: persist, @@ -118,15 +113,15 @@ func (vt *viewTracker) ProcessTC(tc *flow.TimeoutCertificate) (uint64, error) { } // updateLivenessData updates the current view, qc, tc. We want to avoid unnecessary data-base -// writes, which we enforce by requiring that the view number is STRICTLY monotonously increasing. +// writes, which we enforce by requiring that the view number is STRICTLY monotonicly increasing. // Otherwise, an exception is returned. No errors are expected, any error should be treated as exception. func (vt *viewTracker) updateLivenessData(newView uint64, qc *flow.QuorumCertificate, tc *flow.TimeoutCertificate) error { if newView <= vt.livenessData.CurrentView { // This should never happen: in the current implementation, it is trivially apparent that // newView is _always_ larger than currentView. This check is to protect the code from // future modifications that violate the necessary condition for - // STRICTLY monotonously increasing view numbers. - return fmt.Errorf("cannot move from view %d to %d: currentView must be strictly monotonously increasing", + // STRICTLY monotonicly increasing view numbers. + return fmt.Errorf("cannot move from view %d to %d: currentView must be strictly monotonicly increasing", vt.livenessData.CurrentView, newView) } diff --git a/consensus/hotstuff/persister.go b/consensus/hotstuff/persister.go index eaed7fcba57..5e200149d20 100644 --- a/consensus/hotstuff/persister.go +++ b/consensus/hotstuff/persister.go @@ -1,23 +1,30 @@ package hotstuff -// Persister is responsible for persisting state we need to bootstrap after a -// restart or crash. +// Persister is responsible for persisting minimal critical safety and liveness data for HotStuff: +// specifically [hotstuff.LivenessData] and [hotstuff.SafetyData]. type Persister interface { - // GetSafetyData will retrieve last persisted safety data. - // During normal operations, no errors are expected. - GetSafetyData() (*SafetyData, error) + PersisterReader // PutSafetyData persists the last safety data. // This method blocks until `safetyData` was successfully persisted. // During normal operations, no errors are expected. PutSafetyData(safetyData *SafetyData) error - // GetLivenessData will retrieve last persisted liveness data. - // During normal operations, no errors are expected. - GetLivenessData() (*LivenessData, error) - // PutLivenessData persists the last liveness data. // This method blocks until `safetyData` was successfully persisted. // During normal operations, no errors are expected. PutLivenessData(livenessData *LivenessData) error } + +// PersisterReader exposes only the read-only parts of the Persister component. +// This is used to read information about the HotStuff instance's current state from other components. +// CAUTION: the write functions are hidden here, because it is NOT SAFE to use them outside the Hotstuff state machine. +type PersisterReader interface { + // GetSafetyData will retrieve last persisted safety data. + // During normal operations, no errors are expected. + GetSafetyData() (*SafetyData, error) + + // GetLivenessData will retrieve last persisted liveness data. + // During normal operations, no errors are expected. + GetLivenessData() (*LivenessData, error) +} diff --git a/consensus/hotstuff/persister/persister.go b/consensus/hotstuff/persister/persister.go index f7b69575dac..178c7903256 100644 --- a/consensus/hotstuff/persister/persister.go +++ b/consensus/hotstuff/persister/persister.go @@ -1,38 +1,80 @@ package persister import ( - "github.com/dgraph-io/badger/v2" + "fmt" + + "github.com/jordanschalm/lockctx" "github.com/onflow/flow-go/consensus/hotstuff" "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/storage/badger/operation" + "github.com/onflow/flow-go/storage" + "github.com/onflow/flow-go/storage/operation" ) -// Persister can persist relevant information for hotstuff. -// Persister depends on protocol.State root snapshot bootstrapping to set initial values for -// SafetyData and LivenessData. These values must be initialized before first use of Persister. +// Persister is responsible for persisting minimal critical safety and liveness data for HotStuff: +// specifically [hotstuff.LivenessData] and [hotstuff.SafetyData]. +// Persister depends on protocol.State and cluster.State bootstrapping to set initial values for +// SafetyData and LivenessData, for each distinct chain ID. This bootstrapping must be complete +// before constructing a Persister instance with New (otherwise it will return an error). type Persister struct { - db *badger.DB - chainID flow.ChainID + db storage.DB + chainID flow.ChainID + lockManager lockctx.Manager } var _ hotstuff.Persister = (*Persister)(nil) +var _ hotstuff.PersisterReader = (*Persister)(nil) + +// New creates a new Persister. +// Persister depends on protocol.State and cluster.State bootstrapping to set initial values for +// SafetyData and LivenessData, for each distinct chain ID. This bootstrapping must be completed +// before first using a Persister instance. +func New(db storage.DB, chainID flow.ChainID, lockManager lockctx.Manager) (*Persister, error) { + err := ensureSafetyDataAndLivenessDataAreBootstrapped(db, chainID) + if err != nil { + return nil, fmt.Errorf("fail to check persister was properly bootstrapped: %w", err) + } -// New creates a new Persister using the injected data base to persist -// relevant hotstuff data. -func New(db *badger.DB, chainID flow.ChainID) *Persister { p := &Persister{ - db: db, - chainID: chainID, + db: db, + chainID: chainID, + lockManager: lockManager, + } + return p, nil +} + +// ensureSafetyDataAndLivenessDataAreBootstrapped checks if the safety and liveness data are +// bootstrapped for the given chain ID. If not, it returns an error. +// The Flow Protocol mandates that SafetyData and LivenessData is provided as part of the bootstrapping +// data. For a node, the SafetyData and LivenessData is among the most safety-critical data. We require +// the protocol.State's or cluster.State's bootstrapping logic to properly initialize these values in the +// database. +func ensureSafetyDataAndLivenessDataAreBootstrapped(db storage.DB, chainID flow.ChainID) error { + var safetyData hotstuff.SafetyData + err := operation.RetrieveSafetyData(db.Reader(), chainID, &safetyData) + if err != nil { + return fmt.Errorf("fail to retrieve safety data: %w", err) } - return p + + var livenessData hotstuff.LivenessData + err = operation.RetrieveLivenessData(db.Reader(), chainID, &livenessData) + if err != nil { + return fmt.Errorf("fail to retrieve liveness data: %w", err) + } + + return nil +} + +// NewReader returns a new Persister as a PersisterReader type (only read methods accessible). +func NewReader(db storage.DB, chainID flow.ChainID, lockManager lockctx.Manager) (hotstuff.PersisterReader, error) { + return New(db, chainID, lockManager) } // GetSafetyData will retrieve last persisted safety data. // During normal operations, no errors are expected. func (p *Persister) GetSafetyData() (*hotstuff.SafetyData, error) { var safetyData hotstuff.SafetyData - err := p.db.View(operation.RetrieveSafetyData(p.chainID, &safetyData)) + err := operation.RetrieveSafetyData(p.db.Reader(), p.chainID, &safetyData) return &safetyData, err } @@ -40,18 +82,26 @@ func (p *Persister) GetSafetyData() (*hotstuff.SafetyData, error) { // During normal operations, no errors are expected. func (p *Persister) GetLivenessData() (*hotstuff.LivenessData, error) { var livenessData hotstuff.LivenessData - err := p.db.View(operation.RetrieveLivenessData(p.chainID, &livenessData)) + err := operation.RetrieveLivenessData(p.db.Reader(), p.chainID, &livenessData) return &livenessData, err } // PutSafetyData persists the last safety data. // During normal operations, no errors are expected. func (p *Persister) PutSafetyData(safetyData *hotstuff.SafetyData) error { - return operation.RetryOnConflict(p.db.Update, operation.UpdateSafetyData(p.chainID, safetyData)) + return storage.WithLock(p.lockManager, storage.LockInsertSafetyData, func(lctx lockctx.Context) error { + return p.db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return operation.UpsertSafetyData(lctx, rw, p.chainID, safetyData) + }) + }) } // PutLivenessData persists the last liveness data. // During normal operations, no errors are expected. func (p *Persister) PutLivenessData(livenessData *hotstuff.LivenessData) error { - return operation.RetryOnConflict(p.db.Update, operation.UpdateLivenessData(p.chainID, livenessData)) + return storage.WithLock(p.lockManager, storage.LockInsertLivenessData, func(lctx lockctx.Context) error { + return p.db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return operation.UpsertLivenessData(lctx, rw, p.chainID, livenessData) + }) + }) } diff --git a/consensus/hotstuff/randombeacon_inspector.go b/consensus/hotstuff/randombeacon_inspector.go index ceca7c7ab82..85df7777b8d 100644 --- a/consensus/hotstuff/randombeacon_inspector.go +++ b/consensus/hotstuff/randombeacon_inspector.go @@ -1,7 +1,7 @@ package hotstuff import ( - "github.com/onflow/flow-go/crypto" + "github.com/onflow/crypto" ) // RandomBeaconInspector encapsulates all methods needed by a Hotstuff leader to validate the diff --git a/consensus/hotstuff/safety_rules.go b/consensus/hotstuff/safety_rules.go index d4db634c79a..7e6e29b392f 100644 --- a/consensus/hotstuff/safety_rules.go +++ b/consensus/hotstuff/safety_rules.go @@ -22,18 +22,26 @@ type SafetyData struct { // SafetyRules enforces all consensus rules that guarantee safety. It produces votes for // the given blocks or TimeoutObject for the given views, only if all safety rules are satisfied. +// In particular, SafetyRules guarantees a foundational security theorem for HotStuff (incl. +// the DiemBFT / Jolteon variant), which we utilize also outside of consensus (e.g. queuing pending +// blocks for execution, verification, sealing etc): +// +// THEOREM: For each view, there can be at most 1 certified block. +// +// Implementations are generally *not* concurrency safe. type SafetyRules interface { // ProduceVote takes a block proposal and current view, and decides whether to vote for the block. - // Voting is deterministic meaning voting for same proposal will always result in the same vote. + // Voting is deterministic, i.e. voting for same proposal will always result in the same vote. // Returns: // * (vote, nil): On the _first_ block for the current view that is safe to vote for. - // Subsequently, voter does _not_ vote for any _other_ block with the same (or lower) view. + // Subsequently, voter does _not_ vote for any _other_ block with the same (or lower) view. // SafetyRules internally caches and persists its latest vote. As long as the SafetyRules' internal // state remains unchanged, ProduceVote will return its cached for identical inputs. // * (nil, model.NoVoteError): If the safety module decides that it is not safe to vote for the given block. // This is a sentinel error and _expected_ during normal operation. // All other errors are unexpected and potential symptoms of uncovered edge cases or corrupted internal state (fatal). - ProduceVote(proposal *model.Proposal, curView uint64) (*model.Vote, error) + ProduceVote(proposal *model.SignedProposal, curView uint64) (*model.Vote, error) + // ProduceTimeout takes current view, highest locally known QC and TC (optional, must be nil if and // only if QC is for previous view) and decides whether to produce timeout for current view. // Returns: @@ -43,4 +51,19 @@ type SafetyRules interface { // normal operation, e.g. during the grace-period after Epoch switchover or after the replica self-ejected. // All other errors are unexpected and potential symptoms of uncovered edge cases or corrupted internal state (fatal). ProduceTimeout(curView uint64, newestQC *flow.QuorumCertificate, lastViewTC *flow.TimeoutCertificate) (*model.TimeoutObject, error) + + // SignOwnProposal takes an unsigned block proposal and produces a vote for it. Vote is a cryptographic commitment + // to the proposal. By adding the vote to an unsigned proposal, the caller constructs a signed block proposal. This + // method has to be used only by the leader, which must be the proposer of the block (or an exception is returned). + // Implementors must guarantee that: + // - vote on the proposal satisfies safety rules + // - maximum one proposal is signed per view + // Returns: + // * (vote, nil): the passed unsigned proposal is a valid one, and it's safe to make a proposal. + // Subsequently, leader does _not_ produce any _other_ proposal with the same (or lower) view. + // * (nil, model.NoVoteError): according to HotStuff's Safety Rules, it is not safe to sign the given proposal. + // This could happen because we have already proposed or timed out for the given view. + // This is a sentinel error and _expected_ during normal operation. + // All other errors are unexpected and potential symptoms of uncovered edge cases or corrupted internal state (fatal). + SignOwnProposal(unsignedProposal *model.Proposal) (*model.Vote, error) } diff --git a/consensus/hotstuff/safetyrules/safety_rules.go b/consensus/hotstuff/safetyrules/safety_rules.go index 5ce5da8198a..e231b47925a 100644 --- a/consensus/hotstuff/safetyrules/safety_rules.go +++ b/consensus/hotstuff/safetyrules/safety_rules.go @@ -14,11 +14,16 @@ import ( // SafetyRules relies on hotstuff.Persister to store latest state of hotstuff.SafetyData. // // The voting rules implemented by SafetyRules are: -// 1. Replicas vote strictly in increasing rounds -// 2. Each block has to include a TC or a QC from the previous round. -// a. [Happy path] If the previous round resulted in a QC then new QC should extend it. +// 1. Replicas vote in strictly increasing views. At most one vote can be signed per view. +// Caution: The leader's block signature is formally a vote for their own proposal. +// 2. Each block has to include a TC or a QC from the previous view. +// a. [Happy path] If the previous view resulted in a QC then the proposer should include it in their block. // b. [Recovery path] If the previous round did *not* result in a QC, the leader of the -// subsequent round *must* include a valid TC for the previous round in its block. +// subsequent round *must* include a valid TC for the previous view in its block. +// +// Condition 1 guarantees a foundational security theorem for HotStuff (incl. the DiemBFT / Jolteon variant): +// +// THEOREM: For each view, there can be at most 1 certified block. // // NOT safe for concurrent use. type SafetyRules struct { @@ -39,9 +44,8 @@ func New( // get the last stored safety data safetyData, err := persist.GetSafetyData() if err != nil { - return nil, fmt.Errorf("could not recover safety data: %w", err) + return nil, fmt.Errorf("could not load safety data: %w", err) } - return &SafetyRules{ signer: signer, persist: persist, @@ -63,39 +67,86 @@ func New( // This is a sentinel error and _expected_ during normal operation. // // All other errors are unexpected and potential symptoms of uncovered edge cases or corrupted internal state (fatal). -func (r *SafetyRules) ProduceVote(proposal *model.Proposal, curView uint64) (*model.Vote, error) { +func (r *SafetyRules) ProduceVote(signedProposal *model.SignedProposal, curView uint64) (*model.Vote, error) { + return r.produceVote(&signedProposal.Proposal, curView) +} + +// produceVote implements the core Safety Rules to validate whether it is safe to vote. +// This method is to be used to vote for other leaders' blocks as well as this node's own proposals +// under construction. We explicitly codify the important aspect that a proposer's signature for their +// own block is conceptually also just a vote (we explicitly use that property when aggregating votes and +// including the proposer's own vote into a QC). In order to express this conceptual equivalence in code, the +// voting logic in Safety Rules must also operate on an unsigned Proposal. +// +// The curView is taken as input to ensure SafetyRules will only vote for proposals at current view and prevent double voting. +// Returns: +// - (vote, nil): On the _first_ block for the current view that is safe to vote for. +// Subsequently, voter does _not_ vote for any other block with the same (or lower) view. +// - (nil, model.NoVoteError): If the voter decides that it does not want to vote for the given block. +// This is a sentinel error and _expected_ during normal operation. +// +// All other errors are unexpected and potential symptoms of uncovered edge cases or corrupted internal state (fatal). +func (r *SafetyRules) produceVote(proposal *model.Proposal, curView uint64) (*model.Vote, error) { block := proposal.Block // sanity checks: if curView != block.View { return nil, fmt.Errorf("expecting block for current view %d, but block's view is %d", curView, block.View) } - err := r.IsSafeToVote(proposal) + err := r.isSafeToVote(proposal) if err != nil { return nil, fmt.Errorf("not safe to vote for proposal %x: %w", proposal.Block.BlockID, err) } - // we expect that only valid proposals are submitted for voting - // we need to make sure that proposer is not ejected to decide to vote or not - _, err = r.committee.IdentityByBlock(block.BlockID, block.ProposerID) - if model.IsInvalidSignerError(err) { - // the proposer must be ejected since the proposal has already been validated, - // which ensures that the proposer was a valid committee member at the start of the epoch - return nil, model.NewNoVoteErrorf("proposer ejected: %w", err) - } + currentLeader, err := r.committee.LeaderForView(block.View) if err != nil { - return nil, fmt.Errorf("internal error retrieving Identity of proposer %x at block %x: %w", block.ProposerID, block.BlockID, err) + return nil, fmt.Errorf("expect to have a valid leader for view %d: %w", curView, err) } - - // Do not produce a vote for blocks where we are not a valid committee member. - // HotStuff will ask for a vote for the first block of the next epoch, even if we - // have zero weight in the next epoch. Such vote can't be used to produce valid QCs. - _, err = r.committee.IdentityByBlock(block.BlockID, r.committee.Self()) - if model.IsInvalidSignerError(err) { - return nil, model.NewNoVoteErrorf("I am not authorized to vote for block %x: %w", block.BlockID, err) + // This sanity check confirms that the proposal is from the correct leader of this view. In case this sanity check + // fails, we return an exception, because the compliance layer should have verified this already. However, proposals + // from this node might not go through the compliance engine, and must be signed before anyway. Therefore, + // we still include this sanity check, but return an exception because signing a proposal should be only for views + // where this node is actually the leader. + if block.ProposerID != currentLeader { + return nil, fmt.Errorf("incorrect proposal, as proposer %x is different from the leader %x for view %d", block.ProposerID, currentLeader, curView) } - if err != nil { - return nil, fmt.Errorf("could not get self identity: %w", err) + + // In case this node is the leader, we can skip the following checks. + // • If this node is ejected (check (ii) would fail), voting for any blocks or signing own proposals is of no harm. + // This is because all other honest nodes should have terminated their connection to us, so we are not risking + // to use up the networking bandwidth of honest nodes. This is relevant in case of self-ejection: a node + // operator suspecting their node's keys to be compromised can request for their node to be ejected to prevent + // malicious actors impersonating their node, launching an attack on the network, and the stake being slashed. + // The self-ejection mechanism corresponds to key-revocation and reduces attack surface for the network and + // the node operator's stake. In case of self-ejection, a node is no longer part of the network, hence it cannot + // harm the network and is no longer subject to slashing for actions during the respective views. Therefore, + // voting or continuing to signing block proposals is of no concern. + // • In case this node is the leader, `block.ProposerID` and `r.committee.Self()` are identical. In other words, + // check (i) also verifies that this node itself is not ejected -- the same as check (ii). Hence, also check + // (i) can be skipped with the same reasoning. + if currentLeader != r.committee.Self() { + // (i): we need to make sure that proposer is not ejected to vote + _, err = r.committee.IdentityByBlock(block.BlockID, block.ProposerID) + if model.IsInvalidSignerError(err) { + // the proposer must be ejected since the proposal has already been validated, + // which ensures that the proposer was a valid committee member at the start of the epoch + return nil, model.NewNoVoteErrorf("proposer ejected: %w", err) + } + if err != nil { + return nil, fmt.Errorf("internal error retrieving Identity of proposer %x at block %x: %w", block.ProposerID, block.BlockID, err) + } + + // (ii) Do not produce a vote for blocks where we are not an active committee member. The HotStuff + // state machine may request to vote during grace periods outside the epochs, where the node is + // authorized to actively participate. If we voted during those grace periods, we would needlessly + // waste network bandwidth, as such votes can't be used to produce valid QCs. + _, err = r.committee.IdentityByBlock(block.BlockID, r.committee.Self()) + if model.IsInvalidSignerError(err) { + return nil, model.NewNoVoteErrorf("I am not authorized to vote for block %x: %w", block.BlockID, err) + } + if err != nil { + return nil, fmt.Errorf("could not get self identity: %w", err) + } } vote, err := r.signer.CreateVote(block) @@ -130,12 +181,23 @@ func (r *SafetyRules) ProduceTimeout(curView uint64, newestQC *flow.QuorumCertif lastTimeout := r.safetyData.LastTimeout if lastTimeout != nil && lastTimeout.View == curView { // model.TimeoutObject are conceptually immutable, hence we create a shallow copy here, which allows us to increment TimeoutTick - updatedTimeout := *lastTimeout - updatedTimeout.TimeoutTick += 1 + updatedTimeout, err := model.NewTimeoutObject( + model.UntrustedTimeoutObject{ + View: lastTimeout.View, + NewestQC: lastTimeout.NewestQC, + LastViewTC: lastTimeout.LastViewTC, + SignerID: lastTimeout.SignerID, + SigData: lastTimeout.SigData, + TimeoutTick: lastTimeout.TimeoutTick + 1, + }, + ) + if err != nil { + return nil, fmt.Errorf("could not construct timeout object: %w", err) + } // persist updated TimeoutObject in `safetyData` and return it - r.safetyData.LastTimeout = &updatedTimeout - err := r.persist.PutSafetyData(r.safetyData) + r.safetyData.LastTimeout = updatedTimeout + err = r.persist.PutSafetyData(r.safetyData) if err != nil { return nil, fmt.Errorf("could not persist safety data: %w", err) } @@ -172,16 +234,39 @@ func (r *SafetyRules) ProduceTimeout(curView uint64, newestQC *flow.QuorumCertif return timeout, nil } -// IsSafeToVote checks if this proposal is valid in terms of voting rules, if voting for this proposal won't break safety rules. +// SignOwnProposal takes an unsigned block proposal and produces a vote for it. Vote is a cryptographic commitment +// to the proposal. By adding the vote to an unsigned proposal, the caller constructs a signed block proposal. This +// method has to be used only by the leader, which must be the proposer of the block (or an exception is returned). +// Implementors must guarantee that: +// - vote on the proposal satisfies safety rules +// - maximum one proposal is signed per view +// Returns: +// - (vote, nil): the passed unsigned proposal is a valid one, and it's safe to make a proposal. +// Subsequently, leader does _not_ produce any _other_ proposal with the same (or lower) view. +// - (nil, model.NoVoteError): according to HotStuff's Safety Rules, it is not safe to sign the given proposal. +// This could happen because we have already proposed or timed out for the given view. +// This is a sentinel error and _expected_ during normal operation. +// +// All other errors are unexpected and potential symptoms of uncovered edge cases or corrupted internal state (fatal). +func (r *SafetyRules) SignOwnProposal(unsignedProposal *model.Proposal) (*model.Vote, error) { + // check that the block is created by us + if unsignedProposal.Block.ProposerID != r.committee.Self() { + return nil, fmt.Errorf("can't sign proposal for someone else's block") + } + + return r.produceVote(unsignedProposal, unsignedProposal.Block.View) +} + +// isSafeToVote checks if this proposal is valid in terms of voting rules, if voting for this proposal won't break safety rules. // Expected errors during normal operations: // - NoVoteError if replica already acted during this view (either voted or generated timeout) -func (r *SafetyRules) IsSafeToVote(proposal *model.Proposal) error { +func (r *SafetyRules) isSafeToVote(proposal *model.Proposal) error { blockView := proposal.Block.View err := r.validateEvidenceForEnteringView(blockView, proposal.Block.QC, proposal.LastViewTC) if err != nil { // As we are expecting the blocks to be pre-validated, any failure here is a symptom of an internal bug. - return fmt.Errorf("proposal failed consensus validity check") + return fmt.Errorf("proposal failed consensus validity check: %w", err) } // This check satisfies voting rule 1 diff --git a/consensus/hotstuff/safetyrules/safety_rules_test.go b/consensus/hotstuff/safetyrules/safety_rules_test.go index 2c2d9cc201a..eb94e92694f 100644 --- a/consensus/hotstuff/safetyrules/safety_rules_test.go +++ b/consensus/hotstuff/safetyrules/safety_rules_test.go @@ -31,7 +31,7 @@ type SafetyRulesTestSuite struct { suite.Suite bootstrapBlock *model.Block - proposal *model.Proposal + proposal *model.SignedProposal proposerIdentity *flow.Identity ourIdentity *flow.Identity signer *mocks.Signer @@ -50,18 +50,19 @@ func (s *SafetyRulesTestSuite) SetupTest() { // bootstrap at random bootstrapBlock s.bootstrapBlock = helper.MakeBlock(helper.WithBlockView(100)) - s.proposal = helper.MakeProposal( + s.proposal = helper.MakeSignedProposal(helper.WithProposal(helper.MakeProposal( helper.WithBlock( helper.MakeBlock( helper.WithParentBlock(s.bootstrapBlock), helper.WithBlockView(s.bootstrapBlock.View+1), helper.WithBlockProposer(s.proposerIdentity.NodeID)), - )) + )))) s.committee.On("Self").Return(s.ourIdentity.NodeID).Maybe() + s.committee.On("LeaderForView", mock.Anything).Return(s.proposerIdentity.NodeID, nil).Maybe() s.committee.On("IdentityByBlock", mock.Anything, s.ourIdentity.NodeID).Return(s.ourIdentity, nil).Maybe() s.committee.On("IdentityByBlock", s.proposal.Block.BlockID, s.proposal.Block.ProposerID).Return(s.proposerIdentity, nil).Maybe() - s.committee.On("IdentityByEpoch", mock.Anything, s.ourIdentity.NodeID).Return(s.ourIdentity, nil).Maybe() + s.committee.On("IdentityByEpoch", mock.Anything, s.ourIdentity.NodeID).Return(&s.ourIdentity.IdentitySkeleton, nil).Maybe() s.safetyData = &hotstuff.SafetyData{ LockedOneChainView: s.bootstrapBlock.View, @@ -103,13 +104,13 @@ func (s *SafetyRulesTestSuite) TestProduceVote_ShouldVote() { helper.WithTCNewestQC(s.proposal.Block.QC)) // voting on proposal where last view ended with TC - proposalWithTC := helper.MakeProposal( + proposalWithTC := helper.MakeSignedProposal(helper.WithProposal(helper.MakeProposal( helper.WithBlock( helper.MakeBlock( helper.WithParentBlock(s.bootstrapBlock), helper.WithBlockView(s.proposal.Block.View+2), helper.WithBlockProposer(s.proposerIdentity.NodeID))), - helper.WithLastViewTC(lastViewTC)) + helper.WithLastViewTC(lastViewTC)))) expectedSafetyData = &hotstuff.SafetyData{ LockedOneChainView: s.proposal.Block.QC.View, @@ -138,13 +139,13 @@ func (s *SafetyRulesTestSuite) TestProduceVote_IncludedQCHigherThanTCsQC() { helper.WithTCNewestQC(s.proposal.Block.QC)) // voting on proposal where last view ended with TC - proposalWithTC := helper.MakeProposal( + proposalWithTC := helper.MakeSignedProposal(helper.WithProposal(helper.MakeProposal( helper.WithBlock( helper.MakeBlock( helper.WithParentBlock(s.proposal.Block), helper.WithBlockView(s.proposal.Block.View+2), helper.WithBlockProposer(s.proposerIdentity.NodeID))), - helper.WithLastViewTC(lastViewTC)) + helper.WithLastViewTC(lastViewTC)))) expectedSafetyData := &hotstuff.SafetyData{ LockedOneChainView: proposalWithTC.Block.QC.View, @@ -195,7 +196,7 @@ func (s *SafetyRulesTestSuite) TestProduceVote_UpdateLockedOneChainView() { // TestProduceVote_InvalidCurrentView tests that no vote is created if `curView` has invalid values. // In particular, `SafetyRules` requires that: // - the block's view matches `curView` -// - that values for `curView` are monotonously increasing +// - that values for `curView` are monotonicly increasing // // Failing any of these conditions is a symptom of an internal bug; hence `SafetyRules` should // _not_ return a `NoVoteError`. @@ -207,15 +208,15 @@ func (s *SafetyRulesTestSuite) TestProduceVote_InvalidCurrentView() { require.Error(s.T(), err) require.False(s.T(), model.IsNoVoteError(err)) }) - s.Run("view-not-monotonously-increasing", func() { + s.Run("view-not-monotonicly-increasing", func() { // create block with view < HighestAcknowledgedView - proposal := helper.MakeProposal( + proposal := helper.MakeSignedProposal(helper.WithProposal(helper.MakeProposal( helper.WithBlock( helper.MakeBlock( func(block *model.Block) { block.QC = helper.MakeQC(helper.WithQCView(s.safetyData.HighestAcknowledgedView - 2)) }, - helper.WithBlockView(s.safetyData.HighestAcknowledgedView-1)))) + helper.WithBlockView(s.safetyData.HighestAcknowledgedView-1)))))) vote, err := s.safety.ProduceVote(proposal, proposal.Block.View) require.Nil(s.T(), vote) require.Error(s.T(), err) @@ -225,10 +226,43 @@ func (s *SafetyRulesTestSuite) TestProduceVote_InvalidCurrentView() { s.persister.AssertNotCalled(s.T(), "PutSafetyData") } +// TestProduceVote_CommitteeLeaderException verifies that SafetyRules handles unexpected error returns from +// the DynamicCommittee correctly. Specifically, generic exceptions and `model.ErrViewForUnknownEpoch` +// returned by the committee when requesting the leader for the block's view is propagated up the call stack. +// SafetyRules should *not* wrap unexpected exceptions into an expected NoVoteError. +func (s *SafetyRulesTestSuite) TestProduceVote_CommitteeLeaderException() { + *s.committee = mocks.DynamicCommittee{} + for _, exception := range []error{ + errors.New("invalid-leader-identity"), + model.ErrViewForUnknownEpoch, + } { + s.committee.On("LeaderForView", s.proposal.Block.View).Return(nil, exception).Once() + vote, err := s.safety.ProduceVote(s.proposal, s.proposal.Block.View) + require.Nil(s.T(), vote) + require.ErrorIs(s.T(), err, exception) + require.False(s.T(), model.IsNoVoteError(err)) + s.persister.AssertNotCalled(s.T(), "PutSafetyData") + } +} + +// TestProduceVote_DifferentProposerFromLeader tests that no vote is created if the proposer is different from the leader for +// current view. This is a byzantine behavior and should be handled by the compliance layer but nevertheless we want to +// have a sanity check for other code paths like voting on an own proposal created by the current leader. +func (s *SafetyRulesTestSuite) TestProduceVote_DifferentProposerFromLeader() { + s.proposal.Block.ProposerID = unittest.IdentifierFixture() // different proposer + vote, err := s.safety.ProduceVote(s.proposal, s.proposal.Block.View) + require.Error(s.T(), err) + require.False(s.T(), model.IsNoVoteError(err)) + require.Nil(s.T(), vote) + s.persister.AssertNotCalled(s.T(), "PutSafetyData") +} + // TestProduceVote_NodeEjected tests that no vote is created if block proposer is ejected func (s *SafetyRulesTestSuite) TestProduceVote_ProposerEjected() { *s.committee = mocks.DynamicCommittee{} + s.committee.On("Self").Return(s.ourIdentity.NodeID).Maybe() s.committee.On("IdentityByBlock", s.proposal.Block.BlockID, s.proposal.Block.ProposerID).Return(nil, model.NewInvalidSignerErrorf("node-ejected")).Once() + s.committee.On("LeaderForView", s.proposal.Block.View).Return(s.proposerIdentity.NodeID, nil).Once() vote, err := s.safety.ProduceVote(s.proposal, s.proposal.Block.View) require.Nil(s.T(), vote) @@ -242,6 +276,8 @@ func (s *SafetyRulesTestSuite) TestProduceVote_ProposerEjected() { func (s *SafetyRulesTestSuite) TestProduceVote_InvalidProposerIdentity() { *s.committee = mocks.DynamicCommittee{} exception := errors.New("invalid-signer-identity") + s.committee.On("Self").Return(s.ourIdentity.NodeID).Maybe() + s.committee.On("LeaderForView", s.proposal.Block.View).Return(s.proposerIdentity.NodeID, nil).Once() s.committee.On("IdentityByBlock", s.proposal.Block.BlockID, s.proposal.Block.ProposerID).Return(nil, exception).Once() vote, err := s.safety.ProduceVote(s.proposal, s.proposal.Block.View) @@ -258,8 +294,9 @@ func (s *SafetyRulesTestSuite) TestProduceVote_InvalidProposerIdentity() { func (s *SafetyRulesTestSuite) TestProduceVote_NodeEjected() { *s.committee = mocks.DynamicCommittee{} s.committee.On("Self").Return(s.ourIdentity.NodeID) - s.committee.On("IdentityByBlock", s.proposal.Block.BlockID, s.ourIdentity.NodeID).Return(nil, model.NewInvalidSignerErrorf("node-ejected")).Once() + s.committee.On("LeaderForView", s.proposal.Block.View).Return(s.proposerIdentity.NodeID, nil).Once() s.committee.On("IdentityByBlock", s.proposal.Block.BlockID, s.proposal.Block.ProposerID).Return(s.proposerIdentity, nil).Maybe() + s.committee.On("IdentityByBlock", s.proposal.Block.BlockID, s.ourIdentity.NodeID).Return(nil, model.NewInvalidSignerErrorf("node-ejected")).Once() vote, err := s.safety.ProduceVote(s.proposal, s.proposal.Block.View) require.Nil(s.T(), vote) @@ -274,6 +311,7 @@ func (s *SafetyRulesTestSuite) TestProduceVote_InvalidVoterIdentity() { *s.committee = mocks.DynamicCommittee{} s.committee.On("Self").Return(s.ourIdentity.NodeID) exception := errors.New("invalid-signer-identity") + s.committee.On("LeaderForView", s.proposal.Block.View).Return(s.proposerIdentity.NodeID, nil).Once() s.committee.On("IdentityByBlock", s.proposal.Block.BlockID, s.proposal.Block.ProposerID).Return(s.proposerIdentity, nil).Maybe() s.committee.On("IdentityByBlock", s.proposal.Block.BlockID, s.ourIdentity.NodeID).Return(nil, exception).Once() @@ -324,12 +362,12 @@ func (s *SafetyRulesTestSuite) TestProduceVote_VotingOnInvalidProposals() { // a proposal which includes a QC for the previous round should not contain a TC s.Run("proposal-includes-last-view-qc-and-tc", func() { - proposal := helper.MakeProposal( + proposal := helper.MakeSignedProposal(helper.WithProposal(helper.MakeProposal( helper.WithBlock( helper.MakeBlock( helper.WithParentBlock(s.bootstrapBlock), helper.WithBlockView(s.bootstrapBlock.View+1))), - helper.WithLastViewTC(helper.MakeTC())) + helper.WithLastViewTC(helper.MakeTC())))) s.committee.On("IdentityByBlock", proposal.Block.BlockID, proposal.Block.ProposerID).Return(s.proposerIdentity, nil).Maybe() vote, err := s.safety.ProduceVote(proposal, proposal.Block.View) require.Error(s.T(), err) @@ -338,11 +376,11 @@ func (s *SafetyRulesTestSuite) TestProduceVote_VotingOnInvalidProposals() { }) s.Run("no-last-view-tc", func() { // create block where Block.View != Block.QC.View+1 and LastViewTC = nil - proposal := helper.MakeProposal( + proposal := helper.MakeSignedProposal(helper.WithProposal(helper.MakeProposal( helper.WithBlock( helper.MakeBlock( helper.WithParentBlock(s.bootstrapBlock), - helper.WithBlockView(s.bootstrapBlock.View+2)))) + helper.WithBlockView(s.bootstrapBlock.View+2)))))) vote, err := s.safety.ProduceVote(proposal, proposal.Block.View) require.Error(s.T(), err) require.False(s.T(), model.IsNoVoteError(err)) @@ -351,14 +389,14 @@ func (s *SafetyRulesTestSuite) TestProduceVote_VotingOnInvalidProposals() { s.Run("last-view-tc-invalid-view", func() { // create block where Block.View != Block.QC.View+1 and // Block.View != LastViewTC.View+1 - proposal := helper.MakeProposal( + proposal := helper.MakeSignedProposal(helper.WithProposal(helper.MakeProposal( helper.WithBlock( helper.MakeBlock( helper.WithParentBlock(s.bootstrapBlock), helper.WithBlockView(s.bootstrapBlock.View+2))), helper.WithLastViewTC( helper.MakeTC( - helper.WithTCView(s.bootstrapBlock.View)))) + helper.WithTCView(s.bootstrapBlock.View)))))) vote, err := s.safety.ProduceVote(proposal, proposal.Block.View) require.Error(s.T(), err) require.False(s.T(), model.IsNoVoteError(err)) @@ -368,7 +406,7 @@ func (s *SafetyRulesTestSuite) TestProduceVote_VotingOnInvalidProposals() { // create block where Block.View != Block.QC.View+1 and // Block.View == LastViewTC.View+1 and Block.QC.View >= Block.View // in this case block is not safe to extend since proposal includes QC which is newer than the proposal itself. - proposal := helper.MakeProposal( + proposal := helper.MakeSignedProposal(helper.WithProposal(helper.MakeProposal( helper.WithBlock( helper.MakeBlock( helper.WithParentBlock(s.bootstrapBlock), @@ -378,7 +416,7 @@ func (s *SafetyRulesTestSuite) TestProduceVote_VotingOnInvalidProposals() { })), helper.WithLastViewTC( helper.MakeTC( - helper.WithTCView(s.bootstrapBlock.View+1)))) + helper.WithTCView(s.bootstrapBlock.View+1)))))) vote, err := s.safety.ProduceVote(proposal, proposal.Block.View) require.Error(s.T(), err) require.False(s.T(), model.IsNoVoteError(err)) @@ -390,7 +428,7 @@ func (s *SafetyRulesTestSuite) TestProduceVote_VotingOnInvalidProposals() { // in this case block is not safe to extend since proposal is built on top of QC, which is lower // than QC presented in LastViewTC. TONewestQC := helper.MakeQC(helper.WithQCView(s.bootstrapBlock.View + 1)) - proposal := helper.MakeProposal( + proposal := helper.MakeSignedProposal(helper.WithProposal(helper.MakeProposal( helper.WithBlock( helper.MakeBlock( helper.WithParentBlock(s.bootstrapBlock), @@ -398,7 +436,7 @@ func (s *SafetyRulesTestSuite) TestProduceVote_VotingOnInvalidProposals() { helper.WithLastViewTC( helper.MakeTC( helper.WithTCView(s.bootstrapBlock.View+1), - helper.WithTCNewestQC(TONewestQC)))) + helper.WithTCNewestQC(TONewestQC)))))) vote, err := s.safety.ProduceVote(proposal, proposal.Block.View) require.Error(s.T(), err) require.False(s.T(), model.IsNoVoteError(err)) @@ -412,7 +450,7 @@ func (s *SafetyRulesTestSuite) TestProduceVote_VotingOnInvalidProposals() { // TestProduceVote_VoteEquivocation tests scenario when we try to vote twice in same view. We require that replica // follows next rules: // - replica votes once per view -// - replica votes in monotonously increasing views +// - replica votes in monotonicly increasing views // // Voting twice per round on equivocating proposals is considered a byzantine behavior. // Expect a `model.NoVoteError` sentinel in such scenario. @@ -426,18 +464,25 @@ func (s *SafetyRulesTestSuite) TestProduceVote_VoteEquivocation() { require.NotNil(s.T(), vote) require.Equal(s.T(), expectedVote, vote) - equivocatingProposal := helper.MakeProposal( + equivocatingProposal := helper.MakeSignedProposal(helper.WithProposal(helper.MakeProposal( helper.WithBlock( helper.MakeBlock( helper.WithParentBlock(s.bootstrapBlock), helper.WithBlockView(s.bootstrapBlock.View+1), helper.WithBlockProposer(s.proposerIdentity.NodeID)), - )) + )))) - // voting at same view(event different proposal) should result in NoVoteError + // voting at same view(even different proposal) should result in NoVoteError vote, err = s.safety.ProduceVote(equivocatingProposal, s.proposal.Block.View) require.True(s.T(), model.IsNoVoteError(err)) require.Nil(s.T(), vote) + + s.proposal.Block.ProposerID = s.ourIdentity.NodeID + + // proposing at the same view should result in NoVoteError since we have already voted + vote, err = s.safety.SignOwnProposal(&s.proposal.Proposal) + require.True(s.T(), model.IsNoVoteError(err)) + require.Nil(s.T(), vote) } // TestProduceVote_AfterTimeout tests a scenario where we first timeout for view and then try to produce a vote for @@ -475,6 +520,9 @@ func (s *SafetyRulesTestSuite) TestProduceTimeout_ShouldTimeout() { expectedTimeout := &model.TimeoutObject{ View: view, NewestQC: newestQC, + // don't care about actual data + SignerID: unittest.IdentifierFixture(), + SigData: unittest.SignatureFixture(), } expectedSafetyData := &hotstuff.SafetyData{ @@ -498,7 +546,7 @@ func (s *SafetyRulesTestSuite) TestProduceTimeout_ShouldTimeout() { otherTimeout, err := s.safety.ProduceTimeout(view, newestQC, nil) require.NoError(s.T(), err) - require.Equal(s.T(), timeout.ID(), otherTimeout.ID()) + require.True(s.T(), timeout.Equals(otherTimeout)) require.Equal(s.T(), timeout.TimeoutTick+1, otherTimeout.TimeoutTick) // to create new TO we need to provide a TC @@ -707,6 +755,82 @@ func (s *SafetyRulesTestSuite) TestProduceTimeout_NodeEjected() { s.persister.AssertNotCalled(s.T(), "PutSafetyData") } +// TestSignOwnProposal tests a happy path scenario where leader can sign his own proposal. +func (s *SafetyRulesTestSuite) TestSignOwnProposal() { + s.proposal.Block.ProposerID = s.ourIdentity.NodeID + expectedSafetyData := &hotstuff.SafetyData{ + LockedOneChainView: s.proposal.Block.QC.View, + HighestAcknowledgedView: s.proposal.Block.View, + } + expectedVote := makeVote(s.proposal.Block) + s.committee.On("LeaderForView").Unset() + s.committee.On("LeaderForView", s.proposal.Block.View).Return(s.ourIdentity.NodeID, nil).Once() + s.signer.On("CreateVote", s.proposal.Block).Return(expectedVote, nil).Once() + s.persister.On("PutSafetyData", expectedSafetyData).Return(nil).Once() + vote, err := s.safety.SignOwnProposal(&s.proposal.Proposal) + require.NoError(s.T(), err) + require.Equal(s.T(), vote, expectedVote) +} + +// TestSignOwnProposal_ProposalNotSelf tests that we cannot sign a proposal that is not ours. We +// verify that SafetyRules returns an exception and not the benign sentinel error NoVoteError. +func (s *SafetyRulesTestSuite) TestSignOwnProposal_ProposalNotSelf() { + vote, err := s.safety.SignOwnProposal(&s.proposal.Proposal) + require.Error(s.T(), err) + require.False(s.T(), model.IsNoVoteError(err)) + require.Nil(s.T(), vote) +} + +// TestSignOwnProposal_SelfInvalidLeader tests that we cannot sign a proposal if we are not the leader for the view. +// We verify that SafetyRules returns and exception and does not the benign sentinel error NoVoteError. +func (s *SafetyRulesTestSuite) TestSignOwnProposal_SelfInvalidLeader() { + s.proposal.Block.ProposerID = s.ourIdentity.NodeID + otherID := unittest.IdentifierFixture() + require.NotEqual(s.T(), otherID, s.ourIdentity.NodeID) + s.committee.On("LeaderForView").Unset() + s.committee.On("LeaderForView", s.proposal.Block.View).Return(otherID, nil).Once() + vote, err := s.safety.SignOwnProposal(&s.proposal.Proposal) + require.Error(s.T(), err) + require.False(s.T(), model.IsNoVoteError(err)) + require.Nil(s.T(), vote) +} + +// TestSignOwnProposal_ProposalEquivocation verifies that SafetyRules will refuse to sign multiple proposals for the same view. +// We require that leader complies with the following next rules: +// - leader proposes once per view +// - leader's proposals follow safety rules +// +// Signing repeatedly for one view (either proposals or voting) can lead to equivocating (byzantine behavior). +// Expect a `model.NoVoteError` sentinel in such scenario. +func (s *SafetyRulesTestSuite) TestSignOwnProposal_ProposalEquivocation() { + s.proposal.Block.ProposerID = s.ourIdentity.NodeID + expectedSafetyData := &hotstuff.SafetyData{ + LockedOneChainView: s.proposal.Block.QC.View, + HighestAcknowledgedView: s.proposal.Block.View, + } + expectedVote := makeVote(s.proposal.Block) + s.committee.On("LeaderForView").Unset() + s.committee.On("LeaderForView", s.proposal.Block.View).Return(s.ourIdentity.NodeID, nil).Once() + s.signer.On("CreateVote", s.proposal.Block).Return(expectedVote, nil).Once() + s.persister.On("PutSafetyData", expectedSafetyData).Return(nil).Once() + + vote, err := s.safety.SignOwnProposal(&s.proposal.Proposal) + require.NoError(s.T(), err) + require.Equal(s.T(), expectedVote, vote) + + // signing same proposal again should return an error since we have already created a proposal for this view + vote, err = s.safety.SignOwnProposal(&s.proposal.Proposal) + require.Error(s.T(), err) + require.True(s.T(), model.IsNoVoteError(err)) + require.Nil(s.T(), vote) + + // voting for same view should also return an error since we have already proposed + vote, err = s.safety.ProduceVote(s.proposal, s.proposal.Block.View) + require.Error(s.T(), err) + require.True(s.T(), model.IsNoVoteError(err)) + require.Nil(s.T(), vote) +} + func makeVote(block *model.Block) *model.Vote { return &model.Vote{ BlockID: block.BlockID, diff --git a/consensus/hotstuff/signature.go b/consensus/hotstuff/signature.go index 0fc56748ab2..0deec84caa4 100644 --- a/consensus/hotstuff/signature.go +++ b/consensus/hotstuff/signature.go @@ -1,7 +1,8 @@ package hotstuff import ( - "github.com/onflow/flow-go/crypto" + "github.com/onflow/crypto" + "github.com/onflow/flow-go/model/flow" ) @@ -174,5 +175,5 @@ type Packer interface { // It returns: // - (sigData, nil) if successfully unpacked the signature data // - (nil, model.InvalidFormatError) if failed to unpack the signature data - Unpack(signerIdentities flow.IdentityList, sigData []byte) (*BlockSignatureData, error) + Unpack(signerIdentities flow.IdentitySkeletonList, sigData []byte) (*BlockSignatureData, error) } diff --git a/consensus/hotstuff/signature/block_signer_decoder.go b/consensus/hotstuff/signature/block_signer_decoder.go index 46a2036c50a..b56d054bf4d 100644 --- a/consensus/hotstuff/signature/block_signer_decoder.go +++ b/consensus/hotstuff/signature/block_signer_decoder.go @@ -32,7 +32,7 @@ var _ hotstuff.BlockSignerDecoder = (*BlockSignerDecoder)(nil) // - state.ErrUnknownSnapshotReference if the input header is not a known incorporated block. func (b *BlockSignerDecoder) DecodeSignerIDs(header *flow.Header) (flow.IdentifierList, error) { // root block does not have signer indices - if header.ParentVoterIndices == nil && header.View == 0 { + if !header.ContainsParentQC() { return []flow.Identifier{}, nil } @@ -44,11 +44,12 @@ func (b *BlockSignerDecoder) DecodeSignerIDs(header *flow.Header) (flow.Identifi // try asking by parent ID // TODO: this assumes no identity table changes within epochs, must be changed for Dynamic Protocol State // See https://github.com/onflow/flow-go/issues/4085 - members, err = b.IdentitiesByBlock(header.ParentID) + byBlockMembers, err := b.IdentitiesByBlock(header.ParentID) if err != nil { return nil, fmt.Errorf("could not retrieve identities for block %x with QC view %d for parent %x: %w", header.ID(), header.ParentView, header.ParentID, err) // state.ErrUnknownSnapshotReference or exception } + members = byBlockMembers.ToSkeleton() } else { return nil, fmt.Errorf("unexpected error retrieving identities for block %v: %w", header.ID(), err) } diff --git a/consensus/hotstuff/signature/block_signer_decoder_test.go b/consensus/hotstuff/signature/block_signer_decoder_test.go index 78efb3005eb..d5fd0b8a18d 100644 --- a/consensus/hotstuff/signature/block_signer_decoder_test.go +++ b/consensus/hotstuff/signature/block_signer_decoder_test.go @@ -11,7 +11,6 @@ import ( hotstuff "github.com/onflow/flow-go/consensus/hotstuff/mocks" "github.com/onflow/flow-go/consensus/hotstuff/model" "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/model/flow/order" "github.com/onflow/flow-go/module/signature" "github.com/onflow/flow-go/state" "github.com/onflow/flow-go/utils/unittest" @@ -27,40 +26,42 @@ type blockSignerDecoderSuite struct { committee *hotstuff.DynamicCommittee decoder *BlockSignerDecoder - block flow.Block + block *flow.Block } func (s *blockSignerDecoderSuite) SetupTest() { // the default header fixture creates signerIDs for a committee of 10 nodes, so we prepare a committee same as that - s.allConsensus = unittest.IdentityListFixture(40, unittest.WithRole(flow.RoleConsensus)).Sort(order.Canonical) + s.allConsensus = unittest.IdentityListFixture(40, unittest.WithRole(flow.RoleConsensus)).Sort(flow.Canonical[flow.Identity]) // mock consensus committee s.committee = hotstuff.NewDynamicCommittee(s.T()) - s.committee.On("IdentitiesByEpoch", mock.Anything).Return(s.allConsensus, nil).Maybe() + s.committee.On("IdentitiesByEpoch", mock.Anything).Return(s.allConsensus.ToSkeleton(), nil).Maybe() // prepare valid test block: voterIndices, err := signature.EncodeSignersToIndices(s.allConsensus.NodeIDs(), s.allConsensus.NodeIDs()) require.NoError(s.T(), err) s.block = unittest.BlockFixture() - s.block.Header.ParentVoterIndices = voterIndices + s.block.ParentVoterIndices = voterIndices s.decoder = NewBlockSignerDecoder(s.committee) } // Test_SuccessfulDecode tests happy path decoding func (s *blockSignerDecoderSuite) Test_SuccessfulDecode() { - ids, err := s.decoder.DecodeSignerIDs(s.block.Header) + ids, err := s.decoder.DecodeSignerIDs(s.block.ToHeader()) require.NoError(s.T(), err) require.Equal(s.T(), s.allConsensus.NodeIDs(), ids) } // Test_RootBlock tests decoder accepts root block with empty signer indices func (s *blockSignerDecoderSuite) Test_RootBlock() { - s.block.Header.ParentVoterIndices = nil - s.block.Header.ParentVoterSigData = nil - s.block.Header.View = 0 + s.block.ParentVoterIndices = nil + s.block.ParentVoterSigData = nil + s.block.ParentView = 0 + s.block.ProposerID = flow.ZeroID + s.block.View = 0 - ids, err := s.decoder.DecodeSignerIDs(s.block.Header) + ids, err := s.decoder.DecodeSignerIDs(s.block.ToHeader()) require.NoError(s.T(), err) require.Empty(s.T(), ids) } @@ -74,7 +75,7 @@ func (s *blockSignerDecoderSuite) Test_CommitteeException() { *s.committee = *hotstuff.NewDynamicCommittee(s.T()) s.committee.On("IdentitiesByEpoch", mock.Anything).Return(nil, exception) - ids, err := s.decoder.DecodeSignerIDs(s.block.Header) + ids, err := s.decoder.DecodeSignerIDs(s.block.ToHeader()) require.Empty(s.T(), ids) require.NotErrorIs(s.T(), err, model.ErrViewForUnknownEpoch) require.False(s.T(), signature.IsInvalidSignerIndicesError(err)) @@ -86,7 +87,7 @@ func (s *blockSignerDecoderSuite) Test_CommitteeException() { s.committee.On("IdentitiesByEpoch", mock.Anything).Return(nil, model.ErrViewForUnknownEpoch) s.committee.On("IdentitiesByBlock", mock.Anything).Return(nil, exception) - ids, err := s.decoder.DecodeSignerIDs(s.block.Header) + ids, err := s.decoder.DecodeSignerIDs(s.block.ToHeader()) require.Empty(s.T(), ids) require.NotErrorIs(s.T(), err, model.ErrViewForUnknownEpoch) require.False(s.T(), signature.IsInvalidSignerIndicesError(err)) @@ -98,10 +99,10 @@ func (s *blockSignerDecoderSuite) Test_CommitteeException() { // where the block is known - should return identities for block. func (s *blockSignerDecoderSuite) Test_UnknownEpoch_KnownBlock() { *s.committee = *hotstuff.NewDynamicCommittee(s.T()) - s.committee.On("IdentitiesByEpoch", s.block.Header.ParentView).Return(nil, model.ErrViewForUnknownEpoch) - s.committee.On("IdentitiesByBlock", s.block.Header.ParentID).Return(s.allConsensus, nil) + s.committee.On("IdentitiesByEpoch", s.block.ParentView).Return(nil, model.ErrViewForUnknownEpoch) + s.committee.On("IdentitiesByBlock", s.block.ParentID).Return(s.allConsensus, nil) - ids, err := s.decoder.DecodeSignerIDs(s.block.Header) + ids, err := s.decoder.DecodeSignerIDs(s.block.ToHeader()) require.NoError(s.T(), err) require.Equal(s.T(), s.allConsensus.NodeIDs(), ids) } @@ -110,10 +111,10 @@ func (s *blockSignerDecoderSuite) Test_UnknownEpoch_KnownBlock() { // where the block is unknown - should propagate state.ErrUnknownSnapshotReference. func (s *blockSignerDecoderSuite) Test_UnknownEpoch_UnknownBlock() { *s.committee = *hotstuff.NewDynamicCommittee(s.T()) - s.committee.On("IdentitiesByEpoch", s.block.Header.ParentView).Return(nil, model.ErrViewForUnknownEpoch) - s.committee.On("IdentitiesByBlock", s.block.Header.ParentID).Return(nil, state.ErrUnknownSnapshotReference) + s.committee.On("IdentitiesByEpoch", s.block.ParentView).Return(nil, model.ErrViewForUnknownEpoch) + s.committee.On("IdentitiesByBlock", s.block.ParentID).Return(nil, state.ErrUnknownSnapshotReference) - ids, err := s.decoder.DecodeSignerIDs(s.block.Header) + ids, err := s.decoder.DecodeSignerIDs(s.block.ToHeader()) require.ErrorIs(s.T(), err, state.ErrUnknownSnapshotReference) require.Empty(s.T(), ids) } @@ -122,8 +123,8 @@ func (s *blockSignerDecoderSuite) Test_UnknownEpoch_UnknownBlock() { // signature.InvalidSignerIndicesError if the signer indices in the provided header // are not a valid encoding. func (s *blockSignerDecoderSuite) Test_InvalidIndices() { - s.block.Header.ParentVoterIndices = unittest.RandomBytes(1) - ids, err := s.decoder.DecodeSignerIDs(s.block.Header) + s.block.ParentVoterIndices = unittest.RandomBytes(1) + ids, err := s.decoder.DecodeSignerIDs(s.block.ToHeader()) require.Empty(s.T(), ids) require.True(s.T(), signature.IsInvalidSignerIndicesError(err)) } @@ -137,16 +138,17 @@ func (s *blockSignerDecoderSuite) Test_EpochTransition() { // // Epoch 1 Epoch 2 // PARENT <- | -- B - blockView := s.block.Header.View - parentView := s.block.Header.ParentView - epoch1Committee := s.allConsensus - epoch2Committee := s.allConsensus.SamplePct(.8) + blockView := s.block.View + parentView := s.block.ParentView + epoch1Committee := s.allConsensus.ToSkeleton() + epoch2Committee, err := s.allConsensus.SamplePct(.8) + require.NoError(s.T(), err) *s.committee = *hotstuff.NewDynamicCommittee(s.T()) s.committee.On("IdentitiesByEpoch", parentView).Return(epoch1Committee, nil).Maybe() - s.committee.On("IdentitiesByEpoch", blockView).Return(epoch2Committee, nil).Maybe() + s.committee.On("IdentitiesByEpoch", blockView).Return(epoch2Committee.ToSkeleton(), nil).Maybe() - ids, err := s.decoder.DecodeSignerIDs(s.block.Header) + ids, err := s.decoder.DecodeSignerIDs(s.block.ToHeader()) require.NoError(s.T(), err) require.Equal(s.T(), epoch1Committee.NodeIDs(), ids) } diff --git a/consensus/hotstuff/signature/packer.go b/consensus/hotstuff/signature/packer.go index 4b6652ce66f..20f819569b9 100644 --- a/consensus/hotstuff/signature/packer.go +++ b/consensus/hotstuff/signature/packer.go @@ -69,7 +69,7 @@ func (p *ConsensusSigDataPacker) Pack(view uint64, sig *hotstuff.BlockSignatureD // It returns: // - (sigData, nil) if successfully unpacked the signature data // - (nil, model.InvalidFormatError) if failed to unpack the signature data -func (p *ConsensusSigDataPacker) Unpack(signerIdentities flow.IdentityList, sigData []byte) (*hotstuff.BlockSignatureData, error) { +func (p *ConsensusSigDataPacker) Unpack(signerIdentities flow.IdentitySkeletonList, sigData []byte) (*hotstuff.BlockSignatureData, error) { // decode into typed data data, err := p.Decode(sigData) // all potential error are of type `model.InvalidFormatError` if err != nil { diff --git a/consensus/hotstuff/signature/packer_test.go b/consensus/hotstuff/signature/packer_test.go index 862534d6eda..5ff63f77749 100644 --- a/consensus/hotstuff/signature/packer_test.go +++ b/consensus/hotstuff/signature/packer_test.go @@ -16,11 +16,11 @@ import ( "github.com/onflow/flow-go/utils/unittest" ) -func newPacker(identities flow.IdentityList) *ConsensusSigDataPacker { +func newPacker(identities flow.IdentitySkeletonList) *ConsensusSigDataPacker { // mock consensus committee committee := &mocks.DynamicCommittee{} committee.On("IdentitiesByEpoch", mock.Anything).Return( - func(_ uint64) flow.IdentityList { + func(_ uint64) flow.IdentitySkeletonList { return identities }, nil, @@ -29,7 +29,7 @@ func newPacker(identities flow.IdentityList) *ConsensusSigDataPacker { return NewConsensusSigDataPacker(committee) } -func makeBlockSigData(committee flow.IdentityList) *hotstuff.BlockSignatureData { +func makeBlockSigData(committee flow.IdentitySkeletonList) *hotstuff.BlockSignatureData { blockSigData := &hotstuff.BlockSignatureData{ StakingSigners: []flow.Identifier{ committee[0].NodeID, // A @@ -54,7 +54,7 @@ func makeBlockSigData(committee flow.IdentityList) *hotstuff.BlockSignatureData // aggregated random beacon sigs are from [D,F] func TestPackUnpack(t *testing.T) { // prepare data for testing - committee := unittest.IdentityListFixture(6, unittest.WithRole(flow.RoleConsensus)) + committee := unittest.IdentityListFixture(6, unittest.WithRole(flow.RoleConsensus)).Sort(flow.Canonical[flow.Identity]).ToSkeleton() view := rand.Uint64() blockSigData := makeBlockSigData(committee) @@ -100,9 +100,9 @@ func TestPackUnpack_EmptySigners(t *testing.T) { require.NoError(t, err) // create packer with a non-empty committee (honest node trying to decode the sig data) - committee := unittest.IdentityListFixture(6, unittest.WithRole(flow.RoleConsensus)) + committee := unittest.IdentityListFixture(6, unittest.WithRole(flow.RoleConsensus)).ToSkeleton() packer := newPacker(committee) - unpacked, err := packer.Unpack(make([]*flow.Identity, 0), sig) + unpacked, err := packer.Unpack(make(flow.IdentitySkeletonList, 0), sig) require.NoError(t, err) // check that the unpack data match with the original data @@ -117,7 +117,7 @@ func TestPackUnpack_EmptySigners(t *testing.T) { // it's able to pack and unpack func TestPackUnpackManyNodes(t *testing.T) { // prepare data for testing - committee := unittest.IdentityListFixture(200, unittest.WithRole(flow.RoleConsensus)) + committee := unittest.IdentityListFixture(200, unittest.WithRole(flow.RoleConsensus)).ToSkeleton() view := rand.Uint64() blockSigData := makeBlockSigData(committee) stakingSigners := make([]flow.Identifier, 0) @@ -161,7 +161,7 @@ func TestPackUnpackManyNodes(t *testing.T) { // if the sig data can not be decoded, return model.InvalidFormatError func TestFailToDecode(t *testing.T) { // prepare data for testing - committee := unittest.IdentityListFixture(6, unittest.WithRole(flow.RoleConsensus)) + committee := unittest.IdentityListFixture(6, unittest.WithRole(flow.RoleConsensus)).ToSkeleton() view := rand.Uint64() blockSigData := makeBlockSigData(committee) @@ -184,7 +184,7 @@ func TestFailToDecode(t *testing.T) { // if the signer IDs doesn't match, return InvalidFormatError func TestMismatchSignerIDs(t *testing.T) { // prepare data for testing - committee := unittest.IdentityListFixture(9, unittest.WithRole(flow.RoleConsensus)) + committee := unittest.IdentityListFixture(9, unittest.WithRole(flow.RoleConsensus)).ToSkeleton() view := rand.Uint64() blockSigData := makeBlockSigData(committee[:6]) @@ -216,7 +216,7 @@ func TestMismatchSignerIDs(t *testing.T) { // if sig type doesn't match, return InvalidFormatError func TestInvalidSigType(t *testing.T) { // prepare data for testing - committee := unittest.IdentityListFixture(6, unittest.WithRole(flow.RoleConsensus)) + committee := unittest.IdentityListFixture(6, unittest.WithRole(flow.RoleConsensus)).ToSkeleton() view := rand.Uint64() blockSigData := makeBlockSigData(committee) @@ -250,7 +250,7 @@ func TestInvalidSigType(t *testing.T) { // no random beacon signers func TestPackUnpackWithoutRBAggregatedSig(t *testing.T) { // prepare data for testing - committee := unittest.IdentityListFixture(3, unittest.WithRole(flow.RoleConsensus)) + committee := unittest.IdentityListFixture(3, unittest.WithRole(flow.RoleConsensus)).ToSkeleton() view := rand.Uint64() blockSigData := &hotstuff.BlockSignatureData{ @@ -292,7 +292,7 @@ func TestPackUnpackWithoutRBAggregatedSig(t *testing.T) { // with different structure format, more specifically there is no difference between // nil and empty slices for RandomBeaconSigners and AggregatedRandomBeaconSig. func TestPackWithoutRBAggregatedSig(t *testing.T) { - identities := unittest.IdentityListFixture(3, unittest.WithRole(flow.RoleConsensus)) + identities := unittest.IdentityListFixture(3, unittest.WithRole(flow.RoleConsensus)).ToSkeleton() committee := identities.NodeIDs() // prepare data for testing diff --git a/consensus/hotstuff/signature/randombeacon_inspector.go b/consensus/hotstuff/signature/randombeacon_inspector.go index 49d2b1ab50a..e6fa3a1bf0e 100644 --- a/consensus/hotstuff/signature/randombeacon_inspector.go +++ b/consensus/hotstuff/signature/randombeacon_inspector.go @@ -3,8 +3,9 @@ package signature import ( "fmt" + "github.com/onflow/crypto" + "github.com/onflow/flow-go/consensus/hotstuff/model" - "github.com/onflow/flow-go/crypto" "github.com/onflow/flow-go/module/signature" ) diff --git a/consensus/hotstuff/signature/randombeacon_inspector_test.go b/consensus/hotstuff/signature/randombeacon_inspector_test.go index 5784577f668..4f5bbc065d4 100644 --- a/consensus/hotstuff/signature/randombeacon_inspector_test.go +++ b/consensus/hotstuff/signature/randombeacon_inspector_test.go @@ -1,19 +1,17 @@ package signature import ( - "errors" - mrand "math/rand" + "math/rand" "sync" "testing" - "time" + "github.com/onflow/crypto" + "github.com/onflow/crypto/hash" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" "github.com/onflow/flow-go/consensus/hotstuff/model" - "github.com/onflow/flow-go/crypto" - "github.com/onflow/flow-go/crypto/hash" "github.com/onflow/flow-go/module/signature" "github.com/onflow/flow-go/utils/unittest" ) @@ -24,6 +22,7 @@ func TestRandomBeaconInspector(t *testing.T) { type randomBeaconSuite struct { suite.Suite + rng *rand.Rand n int threshold int kmac hash.Hasher @@ -39,9 +38,9 @@ func (rs *randomBeaconSuite) SetupTest() { rs.threshold = signature.RandomBeaconThreshold(rs.n) // generate threshold keys - mrand.Seed(time.Now().UnixNano()) - seed := make([]byte, crypto.SeedMinLenDKG) - _, err := mrand.Read(seed) + rs.rng = unittest.GetPRG(rs.T()) + seed := make([]byte, crypto.KeyGenSeedMinLen) + _, err := rs.rng.Read(seed) require.NoError(rs.T(), err) rs.skShares, rs.pkShares, rs.pkGroup, err = crypto.BLSThresholdKeyGen(rs.n, rs.threshold, seed) require.NoError(rs.T(), err) @@ -57,7 +56,7 @@ func (rs *randomBeaconSuite) SetupTest() { for i := 0; i < rs.n; i++ { rs.signers = append(rs.signers, i) } - mrand.Shuffle(rs.n, func(i, j int) { + rs.rng.Shuffle(rs.n, func(i, j int) { rs.signers[i], rs.signers[j] = rs.signers[j], rs.signers[i] }) } @@ -166,7 +165,7 @@ func (rs *randomBeaconSuite) TestInvalidSignerIndex() { func (rs *randomBeaconSuite) TestInvalidSignature() { follower, err := NewRandomBeaconInspector(rs.pkGroup, rs.pkShares, rs.threshold, rs.thresholdSignatureMessage) require.NoError(rs.T(), err) - index := mrand.Intn(rs.n) // random signer + index := rs.rng.Intn(rs.n) // random signer share, err := rs.skShares[index].Sign(rs.thresholdSignatureMessage, rs.kmac) require.NoError(rs.T(), err) @@ -174,8 +173,7 @@ func (rs *randomBeaconSuite) TestInvalidSignature() { share[4] ^= 1 // Verify err = follower.Verify(index, share) - assert.Error(rs.T(), err) - assert.True(rs.T(), errors.Is(err, model.ErrInvalidSignature)) + assert.ErrorIs(rs.T(), err, model.ErrInvalidSignature) // restore share share[4] ^= 1 @@ -183,8 +181,7 @@ func (rs *randomBeaconSuite) TestInvalidSignature() { otherIndex := (index + 1) % len(rs.pkShares) // otherIndex is different than index // VerifyShare err = follower.Verify(otherIndex, share) - assert.Error(rs.T(), err) - assert.True(rs.T(), errors.Is(err, model.ErrInvalidSignature)) + assert.ErrorIs(rs.T(), err, model.ErrInvalidSignature) } func (rs *randomBeaconSuite) TestConstructorErrors() { diff --git a/consensus/hotstuff/signature/randombeacon_reconstructor.go b/consensus/hotstuff/signature/randombeacon_reconstructor.go index df18db1acf0..205657bb80e 100644 --- a/consensus/hotstuff/signature/randombeacon_reconstructor.go +++ b/consensus/hotstuff/signature/randombeacon_reconstructor.go @@ -3,9 +3,10 @@ package signature import ( "fmt" + "github.com/onflow/crypto" + "github.com/onflow/flow-go/consensus/hotstuff" "github.com/onflow/flow-go/consensus/hotstuff/model" - "github.com/onflow/flow-go/crypto" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/state/protocol" ) diff --git a/consensus/hotstuff/signature/randombeacon_reconstructor_test.go b/consensus/hotstuff/signature/randombeacon_reconstructor_test.go index ed69a2da37f..a36b7cd2e5a 100644 --- a/consensus/hotstuff/signature/randombeacon_reconstructor_test.go +++ b/consensus/hotstuff/signature/randombeacon_reconstructor_test.go @@ -13,8 +13,8 @@ import ( // TestRandomBeaconReconstructor_InvalidSignerID tests that RandomBeaconReconstructor doesn't forward calls to // RandomBeaconInspector if it fails to map signerID to signerIndex func TestRandomBeaconReconstructor_InvalidSignerID(t *testing.T) { - dkg := &mockhotstuff.DKG{} - inspector := &mockhotstuff.RandomBeaconInspector{} + dkg := mockhotstuff.NewDKG(t) + inspector := mockhotstuff.NewRandomBeaconInspector(t) reconstructor := NewRandomBeaconReconstructor(dkg, inspector) exception := errors.New("invalid-signer-id") t.Run("trustedAdd", func(t *testing.T) { @@ -31,7 +31,4 @@ func TestRandomBeaconReconstructor_InvalidSignerID(t *testing.T) { require.ErrorAs(t, err, &exception) inspector.AssertNotCalled(t, "Verify") }) - - dkg.AssertExpectations(t) - inspector.AssertExpectations(t) } diff --git a/consensus/hotstuff/signature/randombeacon_signer_store.go b/consensus/hotstuff/signature/randombeacon_signer_store.go index c0092ea289e..f0f993f7cbc 100644 --- a/consensus/hotstuff/signature/randombeacon_signer_store.go +++ b/consensus/hotstuff/signature/randombeacon_signer_store.go @@ -4,7 +4,8 @@ import ( "errors" "fmt" - "github.com/onflow/flow-go/crypto" + "github.com/onflow/crypto" + "github.com/onflow/flow-go/module" "github.com/onflow/flow-go/storage" ) @@ -35,15 +36,15 @@ func NewEpochAwareRandomBeaconKeyStore(epochLookup module.EpochLookup, keys stor // - (nil, module.ErrNoBeaconKeyForEpoch) if beacon key for epoch is unavailable // - (nil, error) if there is any exception func (s *EpochAwareRandomBeaconKeyStore) ByView(view uint64) (crypto.PrivateKey, error) { - epoch, err := s.epochLookup.EpochForViewWithFallback(view) + epoch, err := s.epochLookup.EpochForView(view) if err != nil { return nil, fmt.Errorf("could not get epoch by view %v: %w", view, err) } // When DKG has completed, - // - if a node successfully generated the DKG key, the valid private key will be stored in database. - // - if a node failed to generate the DKG key, we will save a record in database to indicate this - // node has no private key for this epoch. + // - if a node successfully generated the Random Beacon key, the valid private key will be stored in database. + // - if a node failed to generate the Random Beacon key, we will save a record in database to indicate this + // node has no private key for this epoch. // Within the epoch, we can look up my random beacon private key for the epoch. There are 3 cases: // 1. DKG has completed, and the private key is stored in database, and we can retrieve it (happy path) // 2. DKG has completed, but we failed to generate a private key (unhappy path) diff --git a/consensus/hotstuff/signature/randombeacon_signer_store_test.go b/consensus/hotstuff/signature/randombeacon_signer_store_test.go index 87ceeb0a7fe..1d861fbb948 100644 --- a/consensus/hotstuff/signature/randombeacon_signer_store_test.go +++ b/consensus/hotstuff/signature/randombeacon_signer_store_test.go @@ -4,14 +4,13 @@ import ( "errors" "math/rand" "testing" - "time" + "github.com/onflow/crypto" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" "github.com/onflow/flow-go/consensus/hotstuff/model" - "github.com/onflow/flow-go/crypto" "github.com/onflow/flow-go/module" mockmodule "github.com/onflow/flow-go/module/mock" "github.com/onflow/flow-go/storage" @@ -31,7 +30,6 @@ func TestBeaconKeyStore(t *testing.T) { } func (suite *BeaconKeyStore) SetupTest() { - rand.Seed(time.Now().Unix()) suite.epochLookup = mockmodule.NewEpochLookup(suite.T()) suite.beaconKeys = mockstorage.NewSafeBeaconKeys(suite.T()) suite.store = NewEpochAwareRandomBeaconKeyStore(suite.epochLookup, suite.beaconKeys) @@ -42,7 +40,7 @@ func (suite *BeaconKeyStore) TestHappyPath() { view := rand.Uint64() epoch := rand.Uint64() expectedKey := unittest.KeyFixture(crypto.BLSBLS12381) - suite.epochLookup.On("EpochForViewWithFallback", view).Return(epoch, nil) + suite.epochLookup.On("EpochForView", view).Return(epoch, nil) suite.beaconKeys.On("RetrieveMyBeaconPrivateKey", epoch).Return(expectedKey, true, nil) key, err := suite.store.ByView(view) @@ -54,7 +52,7 @@ func (suite *BeaconKeyStore) TestHappyPath() { // model.ErrViewForUnknownEpoch it is propagated to the caller of ByView. func (suite *BeaconKeyStore) Test_EpochLookup_ViewForUnknownEpoch() { view := rand.Uint64() - suite.epochLookup.On("EpochForViewWithFallback", view).Return(uint64(0), model.ErrViewForUnknownEpoch) + suite.epochLookup.On("EpochForView", view).Return(uint64(0), model.ErrViewForUnknownEpoch) key, err := suite.store.ByView(view) require.ErrorIs(suite.T(), err, model.ErrViewForUnknownEpoch) @@ -66,7 +64,7 @@ func (suite *BeaconKeyStore) Test_EpochLookup_ViewForUnknownEpoch() { func (suite *BeaconKeyStore) Test_EpochLookup_UnexpectedError() { view := rand.Uint64() exception := errors.New("unexpected error") - suite.epochLookup.On("EpochForViewWithFallback", view).Return(uint64(0), exception) + suite.epochLookup.On("EpochForView", view).Return(uint64(0), exception) key, err := suite.store.ByView(view) require.ErrorIs(suite.T(), err, exception) @@ -78,7 +76,7 @@ func (suite *BeaconKeyStore) Test_EpochLookup_UnexpectedError() { func (suite *BeaconKeyStore) Test_BeaconKeys_Unsafe() { view := rand.Uint64() epoch := rand.Uint64() - suite.epochLookup.On("EpochForViewWithFallback", view).Return(epoch, nil) + suite.epochLookup.On("EpochForView", view).Return(epoch, nil) suite.beaconKeys.On("RetrieveMyBeaconPrivateKey", epoch).Return(nil, false, nil) key, err := suite.store.ByView(view) @@ -91,7 +89,7 @@ func (suite *BeaconKeyStore) Test_BeaconKeys_Unsafe() { func (suite *BeaconKeyStore) Test_BeaconKeys_NotFound() { view := rand.Uint64() epoch := rand.Uint64() - suite.epochLookup.On("EpochForViewWithFallback", view).Return(epoch, nil) + suite.epochLookup.On("EpochForView", view).Return(epoch, nil) suite.beaconKeys.On("RetrieveMyBeaconPrivateKey", epoch).Return(nil, false, storage.ErrNotFound) key, err := suite.store.ByView(view) @@ -106,7 +104,7 @@ func (suite *BeaconKeyStore) Test_BeaconKeys_NotFound() { func (suite *BeaconKeyStore) Test_BeaconKeys_NotFoundThenAvailable() { view := rand.Uint64() epoch := rand.Uint64() - suite.epochLookup.On("EpochForViewWithFallback", view).Return(epoch, nil) + suite.epochLookup.On("EpochForView", view).Return(epoch, nil) var retKey crypto.PrivateKey var retSafe bool diff --git a/consensus/hotstuff/signature/static_randombeacon_signer_store.go b/consensus/hotstuff/signature/static_randombeacon_signer_store.go index 44e2f0d5724..fc211dda390 100644 --- a/consensus/hotstuff/signature/static_randombeacon_signer_store.go +++ b/consensus/hotstuff/signature/static_randombeacon_signer_store.go @@ -1,7 +1,8 @@ package signature import ( - "github.com/onflow/flow-go/crypto" + "github.com/onflow/crypto" + "github.com/onflow/flow-go/module" ) diff --git a/consensus/hotstuff/signature/weighted_signature_aggregator.go b/consensus/hotstuff/signature/weighted_signature_aggregator.go index 6730e30f6f9..7e111cff870 100644 --- a/consensus/hotstuff/signature/weighted_signature_aggregator.go +++ b/consensus/hotstuff/signature/weighted_signature_aggregator.go @@ -5,9 +5,10 @@ import ( "fmt" "sync" + "github.com/onflow/crypto" + "github.com/onflow/flow-go/consensus/hotstuff" "github.com/onflow/flow-go/consensus/hotstuff/model" - "github.com/onflow/flow-go/crypto" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/module/signature" ) @@ -68,7 +69,7 @@ func NewWeightedSignatureAggregator( idToInfo := make(map[flow.Identifier]signerInfo) for i, id := range ids { idToInfo[id.NodeID] = signerInfo{ - weight: id.Weight, + weight: id.InitialWeight, index: i, } } diff --git a/consensus/hotstuff/signature/weighted_signature_aggregator_test.go b/consensus/hotstuff/signature/weighted_signature_aggregator_test.go index d4b2c28b728..03942153fe5 100644 --- a/consensus/hotstuff/signature/weighted_signature_aggregator_test.go +++ b/consensus/hotstuff/signature/weighted_signature_aggregator_test.go @@ -5,12 +5,11 @@ import ( "sync" "testing" + "github.com/onflow/crypto" + "github.com/onflow/crypto/hash" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "github.com/onflow/flow-go/crypto" - "github.com/onflow/flow-go/crypto/hash" - "github.com/onflow/flow-go/consensus/hotstuff/model" "github.com/onflow/flow-go/model/flow" msig "github.com/onflow/flow-go/module/signature" @@ -118,7 +117,7 @@ func TestWeightedSignatureAggregator(t *testing.T) { // ignore weight as comparing against expected weight is not thread safe assert.NoError(t, err) }(i, sig) - expectedWeight += ids[i+subSet].Weight + expectedWeight += ids[i+subSet].InitialWeight } wg.Wait() @@ -138,7 +137,7 @@ func TestWeightedSignatureAggregator(t *testing.T) { for i, sig := range sigs[:subSet] { weight, err := aggregator.TrustedAdd(ids[i].NodeID, sig) assert.NoError(t, err) - expectedWeight += ids[i].Weight + expectedWeight += ids[i].InitialWeight assert.Equal(t, expectedWeight, weight) // test TotalWeight assert.Equal(t, expectedWeight, aggregator.TotalWeight()) @@ -182,7 +181,7 @@ func TestWeightedSignatureAggregator(t *testing.T) { // add signatures for i, sig := range sigs { weight, err := aggregator.TrustedAdd(ids[i].NodeID, sig) - expectedWeight += ids[i].Weight + expectedWeight += ids[i].InitialWeight assert.Equal(t, expectedWeight, weight) require.NoError(t, err) } @@ -264,7 +263,7 @@ func TestWeightedSignatureAggregator(t *testing.T) { for i, sig := range sigs { weight, err := aggregator.TrustedAdd(ids[i].NodeID, sig) require.NoError(t, err) - expectedWeight += ids[i].Weight + expectedWeight += ids[i].InitialWeight assert.Equal(t, expectedWeight, weight) } diff --git a/consensus/hotstuff/signer.go b/consensus/hotstuff/signer.go index 48d74c2bee4..94cbdc9b936 100644 --- a/consensus/hotstuff/signer.go +++ b/consensus/hotstuff/signer.go @@ -7,10 +7,6 @@ import ( // Signer is responsible for creating votes, proposals for a given block. type Signer interface { - // CreateProposal creates a proposal for the given block. No error returns - // are expected during normal operations (incl. presence of byz. actors). - CreateProposal(block *model.Block) (*model.Proposal, error) - // CreateVote creates a vote for the given block. No error returns are // expected during normal operations (incl. presence of byz. actors). CreateVote(block *model.Block) (*model.Vote, error) diff --git a/consensus/hotstuff/timeoutaggregator/timeout_aggregator.go b/consensus/hotstuff/timeoutaggregator/timeout_aggregator.go index 7d359257176..e5294e9b4ee 100644 --- a/consensus/hotstuff/timeoutaggregator/timeout_aggregator.go +++ b/consensus/hotstuff/timeoutaggregator/timeout_aggregator.go @@ -13,9 +13,9 @@ import ( "github.com/onflow/flow-go/consensus/hotstuff/notifications" "github.com/onflow/flow-go/engine" "github.com/onflow/flow-go/engine/common/fifoqueue" - "github.com/onflow/flow-go/engine/consensus/sealing/counters" "github.com/onflow/flow-go/module" "github.com/onflow/flow-go/module/component" + "github.com/onflow/flow-go/module/counters" "github.com/onflow/flow-go/module/irrecoverable" "github.com/onflow/flow-go/module/mempool" "github.com/onflow/flow-go/module/metrics" @@ -35,7 +35,7 @@ type TimeoutAggregator struct { log zerolog.Logger hotstuffMetrics module.HotstuffMetrics engineMetrics module.EngineMetrics - lowestRetainedView counters.StrictMonotonousCounter // lowest view, for which we still process timeouts + lowestRetainedView counters.StrictMonotonicCounter // lowest view, for which we still process timeouts collectors hotstuff.TimeoutCollectors queuedTimeoutsNotifier engine.Notifier enteringViewNotifier engine.Notifier @@ -64,7 +64,7 @@ func NewTimeoutAggregator(log zerolog.Logger, log: log.With().Str("component", "hotstuff.timeout_aggregator").Logger(), hotstuffMetrics: hotstuffMetrics, engineMetrics: engineMetrics, - lowestRetainedView: counters.NewMonotonousCounter(lowestRetainedView), + lowestRetainedView: counters.NewMonotonicCounter(lowestRetainedView), collectors: collectors, queuedTimeoutsNotifier: engine.NewNotifier(), enteringViewNotifier: engine.NewNotifier(), @@ -134,10 +134,13 @@ func (t *TimeoutAggregator) processQueuedTimeoutObjects(ctx context.Context) err t.engineMetrics.MessageHandled(metrics.EngineTimeoutAggregator, metrics.MessageTimeoutObject) if err != nil { - return fmt.Errorf("could not process pending TO %v: %w", timeoutObject.ID(), err) + return fmt.Errorf("could not process pending TO: %s: %w", + timeoutObject.String(), + err, + ) } - t.log.Info(). + t.log.Debug(). Uint64("view", timeoutObject.View). Hex("signer", timeoutObject.SignerID[:]). Msg("TimeoutObject processed successfully") diff --git a/consensus/hotstuff/timeoutaggregator/timeout_aggregator_test.go b/consensus/hotstuff/timeoutaggregator/timeout_aggregator_test.go index e8fd19b1bb8..ac570cf77a2 100644 --- a/consensus/hotstuff/timeoutaggregator/timeout_aggregator_test.go +++ b/consensus/hotstuff/timeoutaggregator/timeout_aggregator_test.go @@ -55,7 +55,7 @@ func (s *TimeoutAggregatorTestSuite) SetupTest() { require.NoError(s.T(), err) ctx, cancel := context.WithCancel(context.Background()) - signalerCtx, _ := irrecoverable.WithSignaler(ctx) + signalerCtx := irrecoverable.NewMockSignalerContext(s.T(), ctx) s.stopAggregator = cancel s.aggregator.Start(signalerCtx) unittest.RequireCloseBefore(s.T(), s.aggregator.Ready(), 100*time.Millisecond, "should close before timeout") diff --git a/consensus/hotstuff/timeoutaggregator/timeout_collectors.go b/consensus/hotstuff/timeoutaggregator/timeout_collectors.go index 20369bc9485..31e83d10b21 100644 --- a/consensus/hotstuff/timeoutaggregator/timeout_collectors.go +++ b/consensus/hotstuff/timeoutaggregator/timeout_collectors.go @@ -7,6 +7,7 @@ import ( "github.com/rs/zerolog" "github.com/onflow/flow-go/consensus/hotstuff" + "github.com/onflow/flow-go/module" "github.com/onflow/flow-go/module/mempool" ) @@ -15,23 +16,26 @@ import ( // particular view is lazy (instances are created on demand). // This structure is concurrently safe. // TODO: once VoteCollectors gets updated to stop managing worker pool we can merge VoteCollectors and TimeoutCollectors using generics -// TODO(active-pacemaker): add metrics for tracking size of collectors and active range type TimeoutCollectors struct { - log zerolog.Logger - lock sync.RWMutex - lowestRetainedView uint64 // lowest view, for which we still retain a TimeoutCollector and process timeouts - collectors map[uint64]hotstuff.TimeoutCollector // view -> TimeoutCollector - collectorFactory hotstuff.TimeoutCollectorFactory // factor for creating collectors + log zerolog.Logger + metrics module.HotstuffMetrics + lock sync.RWMutex + lowestRetainedView uint64 // lowest view, for which we still retain a TimeoutCollector and process timeouts + newestViewCachedCollector uint64 // highest view, for which we have created a TimeoutCollector + collectors map[uint64]hotstuff.TimeoutCollector // view -> TimeoutCollector + collectorFactory hotstuff.TimeoutCollectorFactory // factor for creating collectors } var _ hotstuff.TimeoutCollectors = (*TimeoutCollectors)(nil) -func NewTimeoutCollectors(log zerolog.Logger, lowestRetainedView uint64, collectorFactory hotstuff.TimeoutCollectorFactory) *TimeoutCollectors { +func NewTimeoutCollectors(log zerolog.Logger, metrics module.HotstuffMetrics, lowestRetainedView uint64, collectorFactory hotstuff.TimeoutCollectorFactory) *TimeoutCollectors { return &TimeoutCollectors{ - log: log.With().Str("component", "timeout_collectors").Logger(), - lowestRetainedView: lowestRetainedView, - collectors: make(map[uint64]hotstuff.TimeoutCollector), - collectorFactory: collectorFactory, + log: log.With().Str("component", "timeout_collectors").Logger(), + metrics: metrics, + lowestRetainedView: lowestRetainedView, + newestViewCachedCollector: lowestRetainedView, + collectors: make(map[uint64]hotstuff.TimeoutCollector), + collectorFactory: collectorFactory, } } @@ -70,8 +74,15 @@ func (t *TimeoutCollectors) GetOrCreateCollector(view uint64) (hotstuff.TimeoutC return clr, false, nil } t.collectors[view] = collector + if t.newestViewCachedCollector < view { + t.newestViewCachedCollector = view + } + lowestRetainedView := t.lowestRetainedView + numCollectors := len(t.collectors) + newestViewCachedCollector := t.newestViewCachedCollector t.lock.Unlock() + t.metrics.TimeoutCollectorsRange(lowestRetainedView, newestViewCachedCollector, numCollectors) t.log.Info().Uint64("view", view).Msg("timeout collector has been created") return collector, true, nil } @@ -97,13 +108,14 @@ func (t *TimeoutCollectors) getCollector(view uint64) (hotstuff.TimeoutCollector // kept and the method call is a NoOp. func (t *TimeoutCollectors) PruneUpToView(lowestRetainedView uint64) { t.lock.Lock() - defer t.lock.Unlock() if t.lowestRetainedView >= lowestRetainedView { + t.lock.Unlock() return } sizeBefore := len(t.collectors) if sizeBefore == 0 { t.lowestRetainedView = lowestRetainedView + t.lock.Unlock() return } @@ -124,11 +136,15 @@ func (t *TimeoutCollectors) PruneUpToView(lowestRetainedView uint64) { } from := t.lowestRetainedView t.lowestRetainedView = lowestRetainedView + numCollectors := len(t.collectors) + newestViewCachedCollector := t.newestViewCachedCollector + t.lock.Unlock() + t.metrics.TimeoutCollectorsRange(lowestRetainedView, newestViewCachedCollector, numCollectors) t.log.Debug(). Uint64("prior_lowest_retained_view", from). Uint64("lowest_retained_view", lowestRetainedView). Int("prior_size", sizeBefore). - Int("size", len(t.collectors)). + Int("size", numCollectors). Msgf("pruned timeout collectors") } diff --git a/consensus/hotstuff/timeoutaggregator/timeout_collectors_test.go b/consensus/hotstuff/timeoutaggregator/timeout_collectors_test.go index 66252c6e065..ef19cfce01d 100644 --- a/consensus/hotstuff/timeoutaggregator/timeout_collectors_test.go +++ b/consensus/hotstuff/timeoutaggregator/timeout_collectors_test.go @@ -17,6 +17,7 @@ import ( "github.com/onflow/flow-go/consensus/hotstuff/mocks" "github.com/onflow/flow-go/consensus/hotstuff/model" "github.com/onflow/flow-go/module/mempool" + "github.com/onflow/flow-go/module/metrics" "github.com/onflow/flow-go/utils/unittest" ) @@ -54,7 +55,7 @@ func (s *TimeoutCollectorsTestSuite) SetupTest() { } return fmt.Errorf("mocked collector %v not found: %w", view, factoryError) }).Maybe() - s.collectors = NewTimeoutCollectors(unittest.Logger(), s.lowestView, s.factoryMethod) + s.collectors = NewTimeoutCollectors(unittest.Logger(), metrics.NewNoopCollector(), s.lowestView, s.factoryMethod) } func (s *TimeoutCollectorsTestSuite) TearDownTest() { diff --git a/consensus/hotstuff/timeoutcollector/aggregation.go b/consensus/hotstuff/timeoutcollector/aggregation.go index 4a2c3ce5b2b..6d68c245707 100644 --- a/consensus/hotstuff/timeoutcollector/aggregation.go +++ b/consensus/hotstuff/timeoutcollector/aggregation.go @@ -4,11 +4,12 @@ import ( "fmt" "sync" + "github.com/onflow/crypto" + "github.com/onflow/crypto/hash" + "github.com/onflow/flow-go/consensus/hotstuff" "github.com/onflow/flow-go/consensus/hotstuff/model" "github.com/onflow/flow-go/consensus/hotstuff/verification" - "github.com/onflow/flow-go/crypto" - "github.com/onflow/flow-go/crypto/hash" "github.com/onflow/flow-go/model/flow" msig "github.com/onflow/flow-go/module/signature" ) @@ -63,7 +64,7 @@ var _ hotstuff.TimeoutSignatureAggregator = (*TimeoutSignatureAggregator)(nil) // signature aggregation task in the protocol. func NewTimeoutSignatureAggregator( view uint64, // view for which we are aggregating signatures - ids flow.IdentityList, // list of all authorized signers + ids flow.IdentitySkeletonList, // list of all authorized signers dsTag string, // domain separation tag used by the signature ) (*TimeoutSignatureAggregator, error) { if len(ids) == 0 { @@ -81,7 +82,7 @@ func NewTimeoutSignatureAggregator( for _, id := range ids { idToInfo[id.NodeID] = signerInfo{ pk: id.StakingPubKey, - weight: id.Weight, + weight: id.InitialWeight, } } diff --git a/consensus/hotstuff/timeoutcollector/aggregation_test.go b/consensus/hotstuff/timeoutcollector/aggregation_test.go index 8adc1cacccc..93eb0774d0a 100644 --- a/consensus/hotstuff/timeoutcollector/aggregation_test.go +++ b/consensus/hotstuff/timeoutcollector/aggregation_test.go @@ -5,11 +5,10 @@ import ( "sync" "testing" + "github.com/onflow/crypto" + "github.com/onflow/crypto/hash" "github.com/stretchr/testify/require" - "github.com/onflow/flow-go/crypto" - "github.com/onflow/flow-go/crypto/hash" - "github.com/onflow/flow-go/consensus/hotstuff" "github.com/onflow/flow-go/consensus/hotstuff/model" "github.com/onflow/flow-go/consensus/hotstuff/verification" @@ -21,7 +20,7 @@ import ( // createAggregationData is a helper which creates fixture data for testing func createAggregationData(t *testing.T, signersNumber int) ( *TimeoutSignatureAggregator, - flow.IdentityList, + flow.IdentitySkeletonList, []crypto.PublicKey, []crypto.Signature, []hotstuff.TimeoutSignerInfo, @@ -37,14 +36,14 @@ func createAggregationData(t *testing.T, signersNumber int) ( hashers := make([]hash.Hasher, 0, signersNumber) // create keys, identities and signatures - ids := make([]*flow.Identity, 0, signersNumber) + ids := make(flow.IdentitySkeletonList, 0, signersNumber) pks := make([]crypto.PublicKey, 0, signersNumber) view := 10 + uint64(rand.Uint32()) for i := 0; i < signersNumber; i++ { - sk := unittest.PrivateKeyFixture(crypto.BLSBLS12381, crypto.KeyGenSeedMinLen) + sk := unittest.PrivateKeyFixture(crypto.BLSBLS12381) identity := unittest.IdentityFixture(unittest.WithStakingPubKey(sk.PublicKey())) // id - ids = append(ids, identity) + ids = append(ids, &identity.IdentitySkeleton) // keys newestQCView := uint64(rand.Intn(int(view))) msg := verification.MakeTimeoutMessage(view, newestQCView) @@ -71,13 +70,13 @@ func createAggregationData(t *testing.T, signersNumber int) ( func TestNewTimeoutSignatureAggregator(t *testing.T) { tag := "random_tag" - sk := unittest.PrivateKeyFixture(crypto.ECDSAP256, crypto.KeyGenSeedMinLen) + sk := unittest.PrivateKeyFixture(crypto.ECDSAP256) signer := unittest.IdentityFixture(unittest.WithStakingPubKey(sk.PublicKey())) // wrong key type - _, err := NewTimeoutSignatureAggregator(0, flow.IdentityList{signer}, tag) + _, err := NewTimeoutSignatureAggregator(0, flow.IdentitySkeletonList{&signer.IdentitySkeleton}, tag) require.Error(t, err) // empty signers - _, err = NewTimeoutSignatureAggregator(0, flow.IdentityList{}, tag) + _, err = NewTimeoutSignatureAggregator(0, flow.IdentitySkeletonList{}, tag) require.Error(t, err) } @@ -102,7 +101,7 @@ func TestTimeoutSignatureAggregator_HappyPath(t *testing.T) { // ignore weight as comparing against expected weight is not thread safe require.NoError(t, err) }(i, sig) - expectedWeight += ids[i+subSet].Weight + expectedWeight += ids[i+subSet].InitialWeight } wg.Wait() @@ -118,7 +117,7 @@ func TestTimeoutSignatureAggregator_HappyPath(t *testing.T) { for i, sig := range sigs[:subSet] { weight, err := aggregator.VerifyAndAdd(ids[i].NodeID, sig, signersData[i].NewestQCView) require.NoError(t, err) - expectedWeight += ids[i].Weight + expectedWeight += ids[i].InitialWeight require.Equal(t, expectedWeight, weight) // test TotalWeight require.Equal(t, expectedWeight, aggregator.TotalWeight()) @@ -154,7 +153,7 @@ func TestTimeoutSignatureAggregator_VerifyAndAdd(t *testing.T) { // add signatures for i, sig := range sigs { weight, err := aggregator.VerifyAndAdd(ids[i].NodeID, sig, signersInfo[i].NewestQCView) - expectedWeight += ids[i].Weight + expectedWeight += ids[i].InitialWeight require.Equal(t, expectedWeight, weight) require.NoError(t, err) } @@ -192,7 +191,7 @@ func TestTimeoutSignatureAggregator_Aggregate(t *testing.T) { var err error aggregator, ids, pks, sigs, signersInfo, msgs, hashers := createAggregationData(t, signersNum) // replace sig with random one - sk := unittest.PrivateKeyFixture(crypto.BLSBLS12381, crypto.KeyGenSeedMinLen) + sk := unittest.PrivateKeyFixture(crypto.BLSBLS12381) sigs[0], err = sk.Sign([]byte("dummy"), hashers[0]) require.NoError(t, err) @@ -205,7 +204,7 @@ func TestTimeoutSignatureAggregator_Aggregate(t *testing.T) { for i, sig := range sigs { weight, err := aggregator.VerifyAndAdd(ids[i].NodeID, sig, signersInfo[i].NewestQCView) if err == nil { - expectedWeight += ids[i].Weight + expectedWeight += ids[i].InitialWeight } require.Equal(t, expectedWeight, weight) } diff --git a/consensus/hotstuff/timeoutcollector/timeout_cache.go b/consensus/hotstuff/timeoutcollector/timeout_cache.go index e98df27d022..f2030ffb89e 100644 --- a/consensus/hotstuff/timeoutcollector/timeout_cache.go +++ b/consensus/hotstuff/timeoutcollector/timeout_cache.go @@ -20,9 +20,9 @@ var ( // view. The cache memorizes the order in which the timeouts were received. Timeouts // are de-duplicated based on the following rules: // - For each voter (i.e. SignerID), we store the _first_ timeout t0. -// - For any subsequent timeout t, we check whether t.ID() == t0.ID(). +// - For any subsequent timeout t, we check whether t equals t0. // If this is the case, we consider the timeout a duplicate and drop it. -// If t and t0 have different checksums, the voter is equivocating, and +// If t and t0 have different contents, the voter is equivocating, and // we return a model.DoubleTimeoutError. type TimeoutObjectsCache struct { lock sync.RWMutex @@ -57,16 +57,14 @@ func (vc *TimeoutObjectsCache) AddTimeoutObject(timeout *model.TimeoutObject) er // De-duplicated timeouts based on the following rules: // * For each voter (i.e. SignerID), we store the _first_ t0. - // * For any subsequent timeout t, we check whether t.ID() == t0.ID(). + // * For any subsequent timeout t, we check whether t equals t0. // If this is the case, we consider the timeout a duplicate and drop it. - // If t and t0 have different checksums, the voter is equivocating, and + // If t and t0 have different contents, the voter is equivocating, and // we return a model.DoubleTimeoutError. firstTimeout, exists := vc.timeouts[timeout.SignerID] if exists { vc.lock.Unlock() - // TODO: once we have signer indices, implement Equals methods for QC, TC - // and TimeoutObjects, to avoid the comparatively very expensive ID computation. - if firstTimeout.ID() != timeout.ID() { + if !firstTimeout.Equals(timeout) { return model.NewDoubleTimeoutErrorf(firstTimeout, timeout, "detected timeout equivocation by replica %x at view: %d", timeout.SignerID, vc.view) } return ErrRepeatedTimeout diff --git a/consensus/hotstuff/timeoutcollector/timeout_collector.go b/consensus/hotstuff/timeoutcollector/timeout_collector.go index 90541a1a0c1..a9ab959d4fa 100644 --- a/consensus/hotstuff/timeoutcollector/timeout_collector.go +++ b/consensus/hotstuff/timeoutcollector/timeout_collector.go @@ -8,7 +8,7 @@ import ( "github.com/onflow/flow-go/consensus/hotstuff" "github.com/onflow/flow-go/consensus/hotstuff/model" - "github.com/onflow/flow-go/engine/consensus/sealing/counters" + "github.com/onflow/flow-go/module/counters" ) // TimeoutCollector implements logic for collecting timeout objects. Performs deduplication, caching and processing @@ -20,8 +20,8 @@ type TimeoutCollector struct { timeoutsCache *TimeoutObjectsCache // cache for tracking double timeout and timeout equivocation notifier hotstuff.TimeoutAggregationConsumer processor hotstuff.TimeoutProcessor - newestReportedQC counters.StrictMonotonousCounter // view of newest QC that was reported - newestReportedTC counters.StrictMonotonousCounter // view of newest TC that was reported + newestReportedQC counters.StrictMonotonicCounter // view of newest QC that was reported + newestReportedTC counters.StrictMonotonicCounter // view of newest TC that was reported } var _ hotstuff.TimeoutCollector = (*TimeoutCollector)(nil) @@ -40,8 +40,8 @@ func NewTimeoutCollector(log zerolog.Logger, notifier: notifier, timeoutsCache: NewTimeoutObjectsCache(view), processor: processor, - newestReportedQC: counters.NewMonotonousCounter(0), - newestReportedTC: counters.NewMonotonousCounter(0), + newestReportedQC: counters.NewMonotonicCounter(0), + newestReportedTC: counters.NewMonotonicCounter(0), } } @@ -64,12 +64,18 @@ func (c *TimeoutCollector) AddTimeout(timeout *model.TimeoutObject) error { c.notifier.OnDoubleTimeoutDetected(doubleTimeoutErr.FirstTimeout, doubleTimeoutErr.ConflictingTimeout) return nil } - return fmt.Errorf("internal error adding timeout %v to cache for view: %d: %w", timeout.ID(), timeout.View, err) + return fmt.Errorf("internal error adding timeout to cache: %s: %w", + timeout.String(), + err, + ) } err = c.processTimeout(timeout) if err != nil { - return fmt.Errorf("internal error processing TO %v for view: %d: %w", timeout.ID(), timeout.View, err) + return fmt.Errorf("internal error processing TO: %s: %w", + timeout.String(), + err, + ) } return nil } @@ -96,7 +102,7 @@ func (c *TimeoutCollector) processTimeout(timeout *model.TimeoutObject) error { // * Over larger time scales, the emitted events are for statistically increasing views. // * However, on short time scales there are _no_ monotonicity guarantees w.r.t. the views. // Explanation: - // While only QCs with strict monotonously increasing views pass the + // While only QCs with strict monotonicly increasing views pass the // `if c.newestReportedQC.Set(timeout.NewestQC.View)` statement, we emit the notification in a separate // step. Therefore, emitting the notifications is subject to races, where on very short time-scales // the notifications can be out of order. diff --git a/consensus/hotstuff/timeoutcollector/timeout_collector_test.go b/consensus/hotstuff/timeoutcollector/timeout_collector_test.go index 3c83801cf72..f30b953c1cf 100644 --- a/consensus/hotstuff/timeoutcollector/timeout_collector_test.go +++ b/consensus/hotstuff/timeoutcollector/timeout_collector_test.go @@ -172,7 +172,6 @@ func (s *TimeoutCollectorTestSuite) TestAddTimeout_TONotifications() { expectedHighestQC := timeouts[len(timeouts)-1].NewestQC // shuffle timeouts in random order - rand.Seed(time.Now().UnixNano()) rand.Shuffle(len(timeouts), func(i, j int) { timeouts[i], timeouts[j] = timeouts[j], timeouts[i] }) diff --git a/consensus/hotstuff/timeoutcollector/timeout_processor.go b/consensus/hotstuff/timeoutcollector/timeout_processor.go index 60f0e785359..d266558d421 100644 --- a/consensus/hotstuff/timeoutcollector/timeout_processor.go +++ b/consensus/hotstuff/timeoutcollector/timeout_processor.go @@ -12,7 +12,6 @@ import ( "github.com/onflow/flow-go/consensus/hotstuff/model" "github.com/onflow/flow-go/consensus/hotstuff/tracker" "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/model/flow/order" "github.com/onflow/flow-go/module/signature" ) @@ -266,8 +265,8 @@ func (p *TimeoutProcessor) buildTC() (*flow.TimeoutCertificate, error) { // we need to canonically order the respective `newestQCView`, so we can properly map signer to `newestQCView` after decoding. // sort data in canonical order - slices.SortFunc(signersData, func(lhs, rhs hotstuff.TimeoutSignerInfo) bool { - return order.IdentifierCanonical(lhs.Signer, rhs.Signer) + slices.SortFunc(signersData, func(lhs, rhs hotstuff.TimeoutSignerInfo) int { + return flow.IdentifierCanonical(lhs.Signer, rhs.Signer) }) // extract signers and data separately @@ -291,13 +290,20 @@ func (p *TimeoutProcessor) buildTC() (*flow.TimeoutCertificate, error) { // than the data stored in `sigAggregator`. newestQC := p.newestQCTracker.NewestQC() - return &flow.TimeoutCertificate{ - View: p.view, - NewestQCViews: newestQCViews, - NewestQC: newestQC, - SignerIndices: signerIndices, - SigData: aggregatedSig, - }, nil + tc, err := flow.NewTimeoutCertificate( + flow.UntrustedTimeoutCertificate{ + View: p.view, + NewestQCViews: newestQCViews, + NewestQC: newestQC, + SignerIndices: signerIndices, + SigData: aggregatedSig, + }, + ) + if err != nil { + return nil, fmt.Errorf("could not construct timeout certificate: %w", err) + } + + return tc, nil } // signerIndicesFromIdentities encodes identities into signer indices. diff --git a/consensus/hotstuff/timeoutcollector/timeout_processor_test.go b/consensus/hotstuff/timeoutcollector/timeout_processor_test.go index b37188c5857..8178ff35b5b 100644 --- a/consensus/hotstuff/timeoutcollector/timeout_processor_test.go +++ b/consensus/hotstuff/timeoutcollector/timeout_processor_test.go @@ -7,6 +7,7 @@ import ( "sync" "testing" + "github.com/onflow/crypto" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" @@ -20,9 +21,7 @@ import ( hotstuffvalidator "github.com/onflow/flow-go/consensus/hotstuff/validator" "github.com/onflow/flow-go/consensus/hotstuff/verification" "github.com/onflow/flow-go/consensus/hotstuff/votecollector" - "github.com/onflow/flow-go/crypto" "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/model/flow/order" "github.com/onflow/flow-go/module/local" msig "github.com/onflow/flow-go/module/signature" "github.com/onflow/flow-go/utils/unittest" @@ -36,8 +35,8 @@ func TestTimeoutProcessor(t *testing.T) { type TimeoutProcessorTestSuite struct { suite.Suite - participants flow.IdentityList - signer *flow.Identity + participants flow.IdentitySkeletonList + signer *flow.IdentitySkeleton view uint64 sigWeight uint64 totalWeight atomic.Uint64 @@ -55,7 +54,7 @@ func (s *TimeoutProcessorTestSuite) SetupTest() { s.validator = mocks.NewValidator(s.T()) s.sigAggregator = mocks.NewTimeoutSignatureAggregator(s.T()) s.notifier = mocks.NewTimeoutCollectorConsumer(s.T()) - s.participants = unittest.IdentityListFixture(11, unittest.WithWeight(s.sigWeight)).Sort(order.Canonical) + s.participants = unittest.IdentityListFixture(11, unittest.WithInitialWeight(s.sigWeight)).Sort(flow.Canonical[flow.Identity]).ToSkeleton() s.signer = s.participants[0] s.view = (uint64)(rand.Uint32() + 100) s.totalWeight = *atomic.NewUint64(0) @@ -413,7 +412,7 @@ func (s *TimeoutProcessorTestSuite) TestProcess_ConcurrentCreatingTC() { }) } // don't care about actual data - s.sigAggregator.On("Aggregate").Return(signersData, crypto.Signature{}, nil) + s.sigAggregator.On("Aggregate").Return(signersData, unittest.SignatureFixture(), nil) var startupWg, shutdownWg sync.WaitGroup @@ -462,20 +461,19 @@ func TestTimeoutProcessor_BuildVerifyTC(t *testing.T) { // signers hold objects that are created with private key and can sign votes and proposals signers := make(map[flow.Identifier]*verification.StakingSigner) // prepare staking signers, each signer has its own private/public key pair + // identities must be in canonical order stakingSigners := unittest.IdentityListFixture(11, func(identity *flow.Identity) { stakingPriv := unittest.StakingPrivKeyFixture() identity.StakingPubKey = stakingPriv.PublicKey() - me, err := local.New(identity, stakingPriv) + me, err := local.New(identity.IdentitySkeleton, stakingPriv) require.NoError(t, err) signers[identity.NodeID] = verification.NewStakingSigner(me) - }) - // identities must be in canonical order - stakingSigners = stakingSigners.Sort(order.Canonical) + }).Sort(flow.Canonical[flow.Identity]) // utility function which generates a valid timeout for every signer - createTimeouts := func(participants flow.IdentityList, view uint64, newestQC *flow.QuorumCertificate, lastViewTC *flow.TimeoutCertificate) []*model.TimeoutObject { + createTimeouts := func(participants flow.IdentitySkeletonList, view uint64, newestQC *flow.QuorumCertificate, lastViewTC *flow.TimeoutCertificate) []*model.TimeoutObject { timeouts := make([]*model.TimeoutObject, 0, len(participants)) for _, signer := range participants { timeout, err := signers[signer.NodeID].CreateTimeout(view, newestQC, lastViewTC) @@ -491,20 +489,22 @@ func TestTimeoutProcessor_BuildVerifyTC(t *testing.T) { block := helper.MakeBlock(helper.WithBlockView(view-1), helper.WithBlockProposer(leader.NodeID)) + stakingSignersSkeleton := stakingSigners.ToSkeleton() + committee := mocks.NewDynamicCommittee(t) - committee.On("IdentitiesByEpoch", mock.Anything).Return(stakingSigners, nil) + committee.On("IdentitiesByEpoch", mock.Anything).Return(stakingSignersSkeleton, nil) committee.On("IdentitiesByBlock", mock.Anything).Return(stakingSigners, nil) - committee.On("QuorumThresholdForView", mock.Anything).Return(committees.WeightThresholdToBuildQC(stakingSigners.TotalWeight()), nil) - committee.On("TimeoutThresholdForView", mock.Anything).Return(committees.WeightThresholdToTimeout(stakingSigners.TotalWeight()), nil) + committee.On("QuorumThresholdForView", mock.Anything).Return(committees.WeightThresholdToBuildQC(stakingSignersSkeleton.TotalWeight()), nil) + committee.On("TimeoutThresholdForView", mock.Anything).Return(committees.WeightThresholdToTimeout(stakingSignersSkeleton.TotalWeight()), nil) // create first QC for view N-1, this will be our olderQC - olderQC := createRealQC(t, committee, stakingSigners, signers, block) + olderQC := createRealQC(t, committee, stakingSignersSkeleton, signers, block) // now create a second QC for view N, this will be our newest QC nextBlock := helper.MakeBlock( helper.WithBlockView(view), helper.WithBlockProposer(leader.NodeID), helper.WithBlockQC(olderQC)) - newestQC := createRealQC(t, committee, stakingSigners, signers, nextBlock) + newestQC := createRealQC(t, committee, stakingSignersSkeleton, signers, nextBlock) // At this point we have created two QCs for round N-1 and N. // Next step is create a TC for view N. @@ -523,7 +523,7 @@ func TestTimeoutProcessor_BuildVerifyTC(t *testing.T) { lastViewTC = tc } - aggregator, err := NewTimeoutSignatureAggregator(view, stakingSigners, msig.CollectorTimeoutTag) + aggregator, err := NewTimeoutSignatureAggregator(view, stakingSignersSkeleton, msig.CollectorTimeoutTag) require.NoError(t, err) notifier := mocks.NewTimeoutCollectorConsumer(t) @@ -533,7 +533,7 @@ func TestTimeoutProcessor_BuildVerifyTC(t *testing.T) { require.NoError(t, err) // last view was successful, no lastViewTC in this case - timeouts := createTimeouts(stakingSigners, view, olderQC, nil) + timeouts := createTimeouts(stakingSignersSkeleton, view, olderQC, nil) for _, timeout := range timeouts { err := processor.Process(timeout) require.NoError(t, err) @@ -544,7 +544,7 @@ func TestTimeoutProcessor_BuildVerifyTC(t *testing.T) { // at this point we have created QCs for view N-1 and N additionally a TC for view N, we can create TC for view N+1 // with timeout objects containing both QC and TC for view N - aggregator, err = NewTimeoutSignatureAggregator(view+1, stakingSigners, msig.CollectorTimeoutTag) + aggregator, err = NewTimeoutSignatureAggregator(view+1, stakingSignersSkeleton, msig.CollectorTimeoutTag) require.NoError(t, err) notifier = mocks.NewTimeoutCollectorConsumer(t) @@ -555,8 +555,8 @@ func TestTimeoutProcessor_BuildVerifyTC(t *testing.T) { // part of committee will use QC, another part TC, this will result in aggregated signature consisting // of two types of messages with views N-1 and N representing the newest QC known to replicas. - timeoutsWithQC := createTimeouts(stakingSigners[:len(stakingSigners)/2], view+1, newestQC, nil) - timeoutsWithTC := createTimeouts(stakingSigners[len(stakingSigners)/2:], view+1, olderQC, lastViewTC) + timeoutsWithQC := createTimeouts(stakingSignersSkeleton[:len(stakingSignersSkeleton)/2], view+1, newestQC, nil) + timeoutsWithTC := createTimeouts(stakingSignersSkeleton[len(stakingSignersSkeleton)/2:], view+1, olderQC, lastViewTC) timeouts = append(timeoutsWithQC, timeoutsWithTC...) for _, timeout := range timeouts { err := processor.Process(timeout) @@ -570,13 +570,14 @@ func TestTimeoutProcessor_BuildVerifyTC(t *testing.T) { func createRealQC( t *testing.T, committee hotstuff.DynamicCommittee, - signers flow.IdentityList, + signers flow.IdentitySkeletonList, signerObjects map[flow.Identifier]*verification.StakingSigner, block *model.Block, ) *flow.QuorumCertificate { leader := signers[0] - proposal, err := signerObjects[leader.NodeID].CreateProposal(block) + leaderVote, err := signerObjects[leader.NodeID].CreateVote(block) require.NoError(t, err) + proposal := helper.MakeSignedProposal(helper.WithProposal(helper.MakeProposal(helper.WithBlock(block))), helper.WithSigData(leaderVote.SigData)) var createdQC *flow.QuorumCertificate onQCCreated := func(qc *flow.QuorumCertificate) { diff --git a/consensus/hotstuff/validator.go b/consensus/hotstuff/validator.go index 5bcc77f1810..ff40c550a2a 100644 --- a/consensus/hotstuff/validator.go +++ b/consensus/hotstuff/validator.go @@ -24,12 +24,12 @@ type Validator interface { // During normal operations, the following error returns are expected: // * model.InvalidProposalError if the block is invalid // * model.ErrViewForUnknownEpoch if the proposal refers unknown epoch - ValidateProposal(proposal *model.Proposal) error + ValidateProposal(proposal *model.SignedProposal) error // ValidateVote checks the validity of a vote. // Returns the full entity for the voter. During normal operations, // the following errors are expected: // * model.InvalidVoteError for invalid votes // * model.ErrViewForUnknownEpoch if the vote refers unknown epoch - ValidateVote(vote *model.Vote) (*flow.Identity, error) + ValidateVote(vote *model.Vote) (*flow.IdentitySkeleton, error) } diff --git a/consensus/hotstuff/validator/metrics_wrapper.go b/consensus/hotstuff/validator/metrics_wrapper.go index 127ca317094..5bd2aad9bec 100644 --- a/consensus/hotstuff/validator/metrics_wrapper.go +++ b/consensus/hotstuff/validator/metrics_wrapper.go @@ -40,14 +40,14 @@ func (w ValidatorMetricsWrapper) ValidateTC(tc *flow.TimeoutCertificate) error { return err } -func (w ValidatorMetricsWrapper) ValidateProposal(proposal *model.Proposal) error { +func (w ValidatorMetricsWrapper) ValidateProposal(proposal *model.SignedProposal) error { processStart := time.Now() err := w.validator.ValidateProposal(proposal) w.metrics.ValidatorProcessingDuration(time.Since(processStart)) return err } -func (w ValidatorMetricsWrapper) ValidateVote(vote *model.Vote) (*flow.Identity, error) { +func (w ValidatorMetricsWrapper) ValidateVote(vote *model.Vote) (*flow.IdentitySkeleton, error) { processStart := time.Now() identity, err := w.validator.ValidateVote(vote) w.metrics.ValidatorProcessingDuration(time.Since(processStart)) diff --git a/consensus/hotstuff/validator/validator.go b/consensus/hotstuff/validator/validator.go index b9cafdc5d89..54490b0f453 100644 --- a/consensus/hotstuff/validator/validator.go +++ b/consensus/hotstuff/validator/validator.go @@ -201,12 +201,16 @@ func (v *Validator) ValidateQC(qc *flow.QuorumCertificate) error { // - model.ErrViewForUnknownEpoch if the proposal refers unknown epoch // // Any other error should be treated as exception -func (v *Validator) ValidateProposal(proposal *model.Proposal) error { +func (v *Validator) ValidateProposal(proposal *model.SignedProposal) error { qc := proposal.Block.QC block := proposal.Block // validate the proposer's vote and get his identity - _, err := v.ValidateVote(proposal.ProposerVote()) + vote, err := proposal.ProposerVote() + if err != nil { + return fmt.Errorf("could not get vote from proposer vote: %w", err) + } + _, err = v.ValidateVote(vote) if model.IsInvalidVoteError(err) { return model.NewInvalidProposalErrorf(proposal, "invalid proposer signature: %w", err) } @@ -294,7 +298,7 @@ func (v *Validator) ValidateProposal(proposal *model.Proposal) error { // - model.ErrViewForUnknownEpoch if the vote refers unknown epoch // // Any other error should be treated as exception -func (v *Validator) ValidateVote(vote *model.Vote) (*flow.Identity, error) { +func (v *Validator) ValidateVote(vote *model.Vote) (*flow.IdentitySkeleton, error) { voter, err := v.committee.IdentityByEpoch(vote.View, vote.SignerID) if model.IsInvalidSignerError(err) { return nil, newInvalidVoteError(vote, err) @@ -315,7 +319,7 @@ func (v *Validator) ValidateVote(vote *model.Vote) (*flow.Identity, error) { return nil, newInvalidVoteError(vote, err) } if errors.Is(err, model.ErrViewForUnknownEpoch) { - return nil, fmt.Errorf("no Epoch information availalbe for vote; symptom of internal bug or invalid bootstrapping information: %s", err.Error()) + return nil, fmt.Errorf("no Epoch information available for vote; symptom of internal bug or invalid bootstrapping information: %s", err.Error()) } return nil, fmt.Errorf("cannot verify signature for vote (%x): %w", vote.ID(), err) } diff --git a/consensus/hotstuff/validator/validator_test.go b/consensus/hotstuff/validator/validator_test.go index ea41778c259..725aedac470 100644 --- a/consensus/hotstuff/validator/validator_test.go +++ b/consensus/hotstuff/validator/validator_test.go @@ -5,7 +5,6 @@ import ( "fmt" "math/rand" "testing" - "time" "github.com/onflow/flow-go/module/signature" @@ -31,14 +30,14 @@ type ProposalSuite struct { suite.Suite participants flow.IdentityList indices []byte - leader *flow.Identity + leader *flow.IdentitySkeleton finalized uint64 parent *model.Block block *model.Block - voters flow.IdentityList - proposal *model.Proposal + voters flow.IdentitySkeletonList + proposal *model.SignedProposal vote *model.Vote - voter *flow.Identity + voter *flow.IdentitySkeleton committee *mocks.Replicas verifier *mocks.Verifier validator *Validator @@ -46,10 +45,9 @@ type ProposalSuite struct { func (ps *ProposalSuite) SetupTest() { // the leader is a random node for now - rand.Seed(time.Now().UnixNano()) ps.finalized = uint64(rand.Uint32() + 1) - ps.participants = unittest.IdentityListFixture(8, unittest.WithRole(flow.RoleConsensus)) - ps.leader = ps.participants[0] + ps.participants = unittest.IdentityListFixture(8, unittest.WithRole(flow.RoleConsensus)).Sort(flow.Canonical[flow.Identity]) + ps.leader = &ps.participants[0].IdentitySkeleton // the parent is the last finalized block, followed directly by a block from the leader ps.parent = helper.MakeBlock( @@ -71,23 +69,25 @@ func (ps *ProposalSuite) SetupTest() { voterIDs, err := signature.DecodeSignerIndicesToIdentifiers(ps.participants.NodeIDs(), ps.block.QC.SignerIndices) require.NoError(ps.T(), err) - ps.voters = ps.participants.Filter(filter.HasNodeID(voterIDs...)) - ps.proposal = &model.Proposal{Block: ps.block} - ps.vote = ps.proposal.ProposerVote() + ps.voters = ps.participants.Filter(filter.HasNodeID[flow.Identity](voterIDs...)).ToSkeleton() + ps.proposal = helper.MakeSignedProposal(helper.WithProposal(helper.MakeProposal(helper.WithBlock(ps.block)))) + vote, err := ps.proposal.ProposerVote() + require.NoError(ps.T(), err) + ps.vote = vote ps.voter = ps.leader // set up the mocked hotstuff Replicas state ps.committee = &mocks.Replicas{} ps.committee.On("LeaderForView", ps.block.View).Return(ps.leader.NodeID, nil) - ps.committee.On("QuorumThresholdForView", mock.Anything).Return(committees.WeightThresholdToBuildQC(ps.participants.TotalWeight()), nil) + ps.committee.On("QuorumThresholdForView", mock.Anything).Return(committees.WeightThresholdToBuildQC(ps.participants.ToSkeleton().TotalWeight()), nil) ps.committee.On("IdentitiesByEpoch", mock.Anything).Return( - func(_ uint64) flow.IdentityList { - return ps.participants + func(_ uint64) flow.IdentitySkeletonList { + return ps.participants.ToSkeleton() }, nil, ) for _, participant := range ps.participants { - ps.committee.On("IdentityByEpoch", mock.Anything, participant.NodeID).Return(participant, nil) + ps.committee.On("IdentityByEpoch", mock.Anything, participant.NodeID).Return(&participant.IdentitySkeleton, nil) } // set up the mocked verifier @@ -154,7 +154,7 @@ func (ps *ProposalSuite) TestProposalWrongLeader() { // change the hotstuff.Replicas to return a different leader *ps.committee = mocks.Replicas{} ps.committee.On("LeaderForView", ps.block.View).Return(ps.participants[1].NodeID, nil) - for _, participant := range ps.participants { + for _, participant := range ps.participants.ToSkeleton() { ps.committee.On("IdentityByEpoch", mock.Anything, participant.NodeID).Return(participant, nil) } @@ -258,7 +258,7 @@ func (ps *ProposalSuite) TestProposalWithLastViewTC() { ps.committee.On("LeaderForView", mock.Anything).Return(ps.leader.NodeID, nil) ps.Run("happy-path", func() { - proposal := helper.MakeProposal( + proposal := helper.MakeSignedProposal(helper.WithProposal(helper.MakeProposal( helper.WithBlock(helper.MakeBlock( helper.WithBlockView(ps.block.View+2), helper.WithBlockProposer(ps.leader.NodeID), @@ -269,14 +269,14 @@ func (ps *ProposalSuite) TestProposalWithLastViewTC() { helper.WithTCSigners(ps.indices), helper.WithTCView(ps.block.View+1), helper.WithTCNewestQC(ps.block.QC))), - ) + ))) ps.verifier.On("VerifyTC", ps.voters, []byte(proposal.LastViewTC.SigData), proposal.LastViewTC.View, proposal.LastViewTC.NewestQCViews).Return(nil).Once() err := ps.validator.ValidateProposal(proposal) require.NoError(ps.T(), err) }) ps.Run("no-tc", func() { - proposal := helper.MakeProposal( + proposal := helper.MakeSignedProposal(helper.WithProposal(helper.MakeProposal( helper.WithBlock(helper.MakeBlock( helper.WithBlockView(ps.block.View+2), helper.WithBlockProposer(ps.leader.NodeID), @@ -284,14 +284,14 @@ func (ps *ProposalSuite) TestProposalWithLastViewTC() { helper.WithBlockQC(ps.block.QC)), ), // in this case proposal without LastViewTC is considered invalid - ) + ))) err := ps.validator.ValidateProposal(proposal) require.True(ps.T(), model.IsInvalidProposalError(err)) ps.verifier.AssertNotCalled(ps.T(), "VerifyQC") ps.verifier.AssertNotCalled(ps.T(), "VerifyTC") }) ps.Run("tc-for-wrong-view", func() { - proposal := helper.MakeProposal( + proposal := helper.MakeSignedProposal(helper.WithProposal(helper.MakeProposal( helper.WithBlock(helper.MakeBlock( helper.WithBlockView(ps.block.View+2), helper.WithBlockProposer(ps.leader.NodeID), @@ -302,14 +302,14 @@ func (ps *ProposalSuite) TestProposalWithLastViewTC() { helper.WithTCSigners(ps.indices), helper.WithTCView(ps.block.View+10), // LastViewTC.View must be equal to Block.View-1 helper.WithTCNewestQC(ps.block.QC))), - ) + ))) err := ps.validator.ValidateProposal(proposal) require.True(ps.T(), model.IsInvalidProposalError(err)) ps.verifier.AssertNotCalled(ps.T(), "VerifyQC") ps.verifier.AssertNotCalled(ps.T(), "VerifyTC") }) ps.Run("proposal-not-safe-to-extend", func() { - proposal := helper.MakeProposal( + proposal := helper.MakeSignedProposal(helper.WithProposal(helper.MakeProposal( helper.WithBlock(helper.MakeBlock( helper.WithBlockView(ps.block.View+2), helper.WithBlockProposer(ps.leader.NodeID), @@ -321,14 +321,14 @@ func (ps *ProposalSuite) TestProposalWithLastViewTC() { helper.WithTCView(ps.block.View+1), // proposal is not safe to extend because included QC.View is higher that Block.QC.View helper.WithTCNewestQC(helper.MakeQC(helper.WithQCView(ps.block.View+1))))), - ) + ))) err := ps.validator.ValidateProposal(proposal) require.True(ps.T(), model.IsInvalidProposalError(err)) ps.verifier.AssertNotCalled(ps.T(), "VerifyQC") ps.verifier.AssertNotCalled(ps.T(), "VerifyTC") }) ps.Run("included-tc-highest-qc-not-highest", func() { - proposal := helper.MakeProposal( + proposal := helper.MakeSignedProposal(helper.WithProposal(helper.MakeProposal( helper.WithBlock(helper.MakeBlock( helper.WithBlockView(ps.block.View+2), helper.WithBlockProposer(ps.leader.NodeID), @@ -340,7 +340,7 @@ func (ps *ProposalSuite) TestProposalWithLastViewTC() { helper.WithTCView(ps.block.View+1), helper.WithTCNewestQC(ps.block.QC), )), - ) + ))) ps.verifier.On("VerifyTC", ps.voters, []byte(proposal.LastViewTC.SigData), proposal.LastViewTC.View, mock.Anything).Return(nil).Once() @@ -354,7 +354,7 @@ func (ps *ProposalSuite) TestProposalWithLastViewTC() { // TC is signed by only one signer - insufficient to reach weight threshold insufficientSignerIndices, err := signature.EncodeSignersToIndices(ps.participants.NodeIDs(), ps.participants.NodeIDs()[:1]) require.NoError(ps.T(), err) - proposal := helper.MakeProposal( + proposal := helper.MakeSignedProposal(helper.WithProposal(helper.MakeProposal( helper.WithBlock(helper.MakeBlock( helper.WithBlockView(ps.block.View+2), helper.WithBlockProposer(ps.leader.NodeID), @@ -366,7 +366,7 @@ func (ps *ProposalSuite) TestProposalWithLastViewTC() { helper.WithTCView(ps.block.View+1), helper.WithTCNewestQC(ps.block.QC), )), - ) + ))) err = ps.validator.ValidateProposal(proposal) require.True(ps.T(), model.IsInvalidProposalError(err) && model.IsInvalidTCError(err)) ps.verifier.AssertNotCalled(ps.T(), "VerifyTC") @@ -377,7 +377,7 @@ func (ps *ProposalSuite) TestProposalWithLastViewTC() { helper.WithQCView(ps.block.QC.View-1), helper.WithQCSigners(ps.indices)) - proposal := helper.MakeProposal( + proposal := helper.MakeSignedProposal(helper.WithProposal(helper.MakeProposal( helper.WithBlock(helper.MakeBlock( helper.WithBlockView(ps.block.View+2), helper.WithBlockProposer(ps.leader.NodeID), @@ -388,7 +388,7 @@ func (ps *ProposalSuite) TestProposalWithLastViewTC() { helper.WithTCSigners(ps.indices), helper.WithTCView(ps.block.View+1), helper.WithTCNewestQC(qc))), - ) + ))) ps.verifier.On("VerifyTC", ps.voters, []byte(proposal.LastViewTC.SigData), proposal.LastViewTC.View, proposal.LastViewTC.NewestQCViews).Return(nil).Once() ps.verifier.On("VerifyQC", ps.voters, qc.SigData, @@ -401,7 +401,7 @@ func (ps *ProposalSuite) TestProposalWithLastViewTC() { helper.WithQCView(ps.block.QC.View-2), helper.WithQCSigners(ps.indices)) - proposal := helper.MakeProposal( + proposal := helper.MakeSignedProposal(helper.WithProposal(helper.MakeProposal( helper.WithBlock(helper.MakeBlock( helper.WithBlockView(ps.block.View+2), helper.WithBlockProposer(ps.leader.NodeID), @@ -412,7 +412,7 @@ func (ps *ProposalSuite) TestProposalWithLastViewTC() { helper.WithTCSigners(ps.indices), helper.WithTCView(ps.block.View+1), helper.WithTCNewestQC(newestQC))), - ) + ))) ps.verifier.On("VerifyTC", ps.voters, []byte(proposal.LastViewTC.SigData), proposal.LastViewTC.View, proposal.LastViewTC.NewestQCViews).Return(nil).Once() // Validating QC included in TC returns ErrViewForUnknownEpoch @@ -425,7 +425,7 @@ func (ps *ProposalSuite) TestProposalWithLastViewTC() { require.NotErrorIs(ps.T(), err, model.ErrViewForUnknownEpoch) }) ps.Run("included-tc-invalid-sig", func() { - proposal := helper.MakeProposal( + proposal := helper.MakeSignedProposal(helper.WithProposal(helper.MakeProposal( helper.WithBlock(helper.MakeBlock( helper.WithBlockView(ps.block.View+2), helper.WithBlockProposer(ps.leader.NodeID), @@ -436,7 +436,7 @@ func (ps *ProposalSuite) TestProposalWithLastViewTC() { helper.WithTCSigners(ps.indices), helper.WithTCView(ps.block.View+1), helper.WithTCNewestQC(ps.block.QC))), - ) + ))) ps.verifier.On("VerifyTC", ps.voters, []byte(proposal.LastViewTC.SigData), proposal.LastViewTC.View, proposal.LastViewTC.NewestQCViews).Return(model.ErrInvalidSignature).Once() err := ps.validator.ValidateProposal(proposal) @@ -445,7 +445,7 @@ func (ps *ProposalSuite) TestProposalWithLastViewTC() { proposal.LastViewTC.View, proposal.LastViewTC.NewestQCViews) }) ps.Run("last-view-successful-but-includes-tc", func() { - proposal := helper.MakeProposal( + proposal := helper.MakeSignedProposal(helper.WithProposal(helper.MakeProposal( helper.WithBlock(helper.MakeBlock( helper.WithBlockView(ps.finalized+1), helper.WithBlockProposer(ps.leader.NodeID), @@ -453,7 +453,7 @@ func (ps *ProposalSuite) TestProposalWithLastViewTC() { helper.WithParentBlock(ps.parent)), ), helper.WithLastViewTC(helper.MakeTC()), - ) + ))) err := ps.validator.ValidateProposal(proposal) require.True(ps.T(), model.IsInvalidProposalError(err)) ps.verifier.AssertNotCalled(ps.T(), "VerifyTC") @@ -467,7 +467,7 @@ func TestValidateVote(t *testing.T) { type VoteSuite struct { suite.Suite - signer *flow.Identity + signer *flow.IdentitySkeleton block *model.Block vote *model.Vote verifier *mocks.Verifier @@ -478,7 +478,7 @@ type VoteSuite struct { func (vs *VoteSuite) SetupTest() { // create a random signing identity - vs.signer = unittest.IdentityFixture(unittest.WithRole(flow.RoleConsensus)) + vs.signer = &unittest.IdentityFixture(unittest.WithRole(flow.RoleConsensus)).IdentitySkeleton // create a block that should be signed vs.block = helper.MakeBlock() @@ -572,8 +572,8 @@ func TestValidateQC(t *testing.T) { type QCSuite struct { suite.Suite - participants flow.IdentityList - signers flow.IdentityList + participants flow.IdentitySkeletonList + signers flow.IdentitySkeletonList block *model.Block qc *flow.QuorumCertificate committee *mocks.Replicas @@ -585,8 +585,8 @@ func (qs *QCSuite) SetupTest() { // create a list of 10 nodes with 1-weight each qs.participants = unittest.IdentityListFixture(10, unittest.WithRole(flow.RoleConsensus), - unittest.WithWeight(1), - ) + unittest.WithInitialWeight(1), + ).Sort(flow.Canonical[flow.Identity]).ToSkeleton() // signers are a qualified majority at 7 qs.signers = qs.participants[:7] @@ -601,7 +601,7 @@ func (qs *QCSuite) SetupTest() { // return the correct participants and identities from view state qs.committee = &mocks.Replicas{} qs.committee.On("IdentitiesByEpoch", mock.Anything).Return( - func(_ uint64) flow.IdentityList { + func(_ uint64) flow.IdentitySkeletonList { return qs.participants }, nil, @@ -728,8 +728,8 @@ func TestValidateTC(t *testing.T) { type TCSuite struct { suite.Suite - participants flow.IdentityList - signers flow.IdentityList + participants flow.IdentitySkeletonList + signers flow.IdentitySkeletonList indices []byte block *model.Block tc *flow.TimeoutCertificate @@ -743,8 +743,8 @@ func (s *TCSuite) SetupTest() { // create a list of 10 nodes with 1-weight each s.participants = unittest.IdentityListFixture(10, unittest.WithRole(flow.RoleConsensus), - unittest.WithWeight(1), - ) + unittest.WithInitialWeight(1), + ).Sort(flow.Canonical[flow.Identity]).ToSkeleton() // signers are a qualified majority at 7 s.signers = s.participants[:7] @@ -753,7 +753,6 @@ func (s *TCSuite) SetupTest() { s.indices, err = signature.EncodeSignersToIndices(s.participants.NodeIDs(), s.signers.NodeIDs()) require.NoError(s.T(), err) - rand.Seed(time.Now().UnixNano()) view := uint64(int(rand.Uint32()) + len(s.participants)) highQCViews := make([]uint64, 0, len(s.signers)) @@ -778,7 +777,7 @@ func (s *TCSuite) SetupTest() { // return the correct participants and identities from view state s.committee = &mocks.DynamicCommittee{} s.committee.On("IdentitiesByEpoch", mock.Anything, mock.Anything).Return( - func(view uint64) flow.IdentityList { + func(view uint64) flow.IdentitySkeletonList { return s.participants }, nil, diff --git a/consensus/hotstuff/verification/combined_signer_v2.go b/consensus/hotstuff/verification/combined_signer_v2.go index a7aa3fd6b5a..0879d9ee722 100644 --- a/consensus/hotstuff/verification/combined_signer_v2.go +++ b/consensus/hotstuff/verification/combined_signer_v2.go @@ -4,9 +4,10 @@ import ( "errors" "fmt" + "github.com/onflow/crypto/hash" + "github.com/onflow/flow-go/consensus/hotstuff" "github.com/onflow/flow-go/consensus/hotstuff/model" - "github.com/onflow/flow-go/crypto/hash" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/module" msig "github.com/onflow/flow-go/module/signature" @@ -51,29 +52,6 @@ func NewCombinedSigner( return sc } -// CreateProposal will create a proposal with a combined signature for the given block. -func (c *CombinedSigner) CreateProposal(block *model.Block) (*model.Proposal, error) { - - // check that the block is created by us - if block.ProposerID != c.staking.NodeID() { - return nil, fmt.Errorf("can't create proposal for someone else's block") - } - - // create the signature data - sigData, err := c.genSigData(block) - if err != nil { - return nil, fmt.Errorf("signing my proposal failed: %w", err) - } - - // create the proposal - proposal := &model.Proposal{ - Block: block, - SigData: sigData, - } - - return proposal, nil -} - // CreateVote will create a vote with a combined signature for the given block. func (c *CombinedSigner) CreateVote(block *model.Block) (*model.Vote, error) { @@ -83,12 +61,14 @@ func (c *CombinedSigner) CreateVote(block *model.Block) (*model.Vote, error) { return nil, fmt.Errorf("could not create signature: %w", err) } - // create the vote - vote := &model.Vote{ + vote, err := model.NewVote(model.UntrustedVote{ View: block.View, BlockID: block.BlockID, SignerID: c.staking.NodeID(), SigData: sigData, + }) + if err != nil { + return nil, fmt.Errorf("could not create vote: %w", err) } return vote, nil @@ -104,13 +84,20 @@ func (c *CombinedSigner) CreateTimeout(curView uint64, newestQC *flow.QuorumCert return nil, fmt.Errorf("could not generate signature for timeout object at view %d: %w", curView, err) } - timeout := &model.TimeoutObject{ - View: curView, - NewestQC: newestQC, - LastViewTC: lastViewTC, - SignerID: c.staking.NodeID(), - SigData: sigData, + timeout, err := model.NewTimeoutObject( + model.UntrustedTimeoutObject{ + View: curView, + NewestQC: newestQC, + LastViewTC: lastViewTC, + SignerID: c.staking.NodeID(), + SigData: sigData, + TimeoutTick: 0, + }, + ) + if err != nil { + return nil, fmt.Errorf("could not construct timeout object: %w", err) } + return timeout, nil } diff --git a/consensus/hotstuff/verification/combined_signer_v2_test.go b/consensus/hotstuff/verification/combined_signer_v2_test.go index 6947a12acf1..ce721c9333c 100644 --- a/consensus/hotstuff/verification/combined_signer_v2_test.go +++ b/consensus/hotstuff/verification/combined_signer_v2_test.go @@ -3,14 +3,16 @@ package verification import ( "testing" + "github.com/onflow/crypto" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" + "github.com/onflow/flow-go/consensus/hotstuff" "github.com/onflow/flow-go/consensus/hotstuff/mocks" "github.com/onflow/flow-go/consensus/hotstuff/model" + "github.com/onflow/flow-go/consensus/hotstuff/safetyrules" "github.com/onflow/flow-go/consensus/hotstuff/signature" - "github.com/onflow/flow-go/crypto" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/module" "github.com/onflow/flow-go/module/local" @@ -28,23 +30,26 @@ func TestCombinedSignWithBeaconKey(t *testing.T) { // prepare data beaconKey := unittest.RandomBeaconPriv() pk := beaconKey.PublicKey() - view := uint64(20) + proposerView := uint64(20) - fblock := unittest.BlockFixture() - fblock.Header.ProposerID = identities[0].NodeID - fblock.Header.View = view - block := model.BlockFromFlow(fblock.Header) - signerID := fblock.Header.ProposerID + proposerIdentity := identities[0] + fblock := unittest.BlockFixture( + unittest.Block.WithParent(unittest.IdentifierFixture(), proposerView-1, 0), + unittest.Block.WithProposerID(proposerIdentity.NodeID), + unittest.Block.WithLastViewTC(nil), + ) + proposal := model.ProposalFromFlow(fblock.ToHeader()) + signerID := fblock.ProposerID beaconKeyStore := modulemock.NewRandomBeaconKeyStore(t) - beaconKeyStore.On("ByView", view).Return(beaconKey, nil) + beaconKeyStore.On("ByView", proposerView).Return(beaconKey, nil) + ourIdentity := unittest.IdentityFixture() stakingPriv := unittest.StakingPrivKeyFixture() - nodeID := unittest.IdentityFixture() - nodeID.NodeID = signerID - nodeID.StakingPubKey = stakingPriv.PublicKey() + ourIdentity.NodeID = signerID + ourIdentity.StakingPubKey = stakingPriv.PublicKey() - me, err := local.New(nodeID, stakingPriv) + me, err := local.New(ourIdentity.IdentitySkeleton, stakingPriv) require.NoError(t, err) signer := NewCombinedSigner(me, beaconKeyStore) @@ -53,19 +58,32 @@ func TestCombinedSignWithBeaconKey(t *testing.T) { committee := &mocks.DynamicCommittee{} committee.On("DKG", mock.Anything).Return(dkg, nil) + committee.On("Self").Return(me.NodeID()) + committee.On("IdentityByBlock", fblock.ID(), fblock.ProposerID).Return(proposerIdentity, nil) + committee.On("LeaderForView", proposerView).Return(signerID, nil).Maybe() packer := signature.NewConsensusSigDataPacker(committee) verifier := NewCombinedVerifier(committee, packer) - // check that a created proposal can be verified by a verifier - proposal, err := signer.CreateProposal(block) + persist := mocks.NewPersister(t) + safetyData := &hotstuff.SafetyData{ + LockedOneChainView: fblock.ParentView, + HighestAcknowledgedView: fblock.ParentView, + } + persist.On("GetSafetyData", mock.Anything).Return(safetyData, nil).Once() + persist.On("PutSafetyData", mock.Anything).Return(nil) + safetyRules, err := safetyrules.New(signer, persist, committee) + require.NoError(t, err) + + // check that the proposer's vote for their own block (i.e. the proposer signature in the header) passes verification + vote, err := safetyRules.SignOwnProposal(proposal) require.NoError(t, err) - vote := proposal.ProposerVote() - err = verifier.VerifyVote(nodeID, vote.SigData, proposal.Block.View, proposal.Block.BlockID) + err = verifier.VerifyVote(&ourIdentity.IdentitySkeleton, vote.SigData, proposal.Block.View, proposal.Block.BlockID) require.NoError(t, err) // check that a created proposal's signature is a combined staking sig and random beacon sig + block := proposal.Block msg := MakeVoteMessage(block.View, block.BlockID) stakingSig, err := stakingPriv.Sign(msg, msig.NewBLSHasher(msig.ConsensusVoteTag)) require.NoError(t, err) @@ -74,29 +92,29 @@ func TestCombinedSignWithBeaconKey(t *testing.T) { require.NoError(t, err) expectedSig := msig.EncodeDoubleSig(stakingSig, beaconSig) - require.Equal(t, expectedSig, proposal.SigData) + require.Equal(t, expectedSig, vote.SigData) // vote should be valid vote, err = signer.CreateVote(block) require.NoError(t, err) - err = verifier.VerifyVote(nodeID, vote.SigData, proposal.Block.View, proposal.Block.BlockID) + err = verifier.VerifyVote(&ourIdentity.IdentitySkeleton, vote.SigData, proposal.Block.View, proposal.Block.BlockID) require.NoError(t, err) // vote on different block should be invalid blockWrongID := *block blockWrongID.BlockID[0]++ - err = verifier.VerifyVote(nodeID, vote.SigData, blockWrongID.View, blockWrongID.BlockID) + err = verifier.VerifyVote(&ourIdentity.IdentitySkeleton, vote.SigData, blockWrongID.View, blockWrongID.BlockID) require.ErrorIs(t, err, model.ErrInvalidSignature) - // vote with a wrong view should be invalid + // vote with a wrong proposerView should be invalid blockWrongView := *block blockWrongView.View++ - err = verifier.VerifyVote(nodeID, vote.SigData, blockWrongID.View, blockWrongID.BlockID) + err = verifier.VerifyVote(&ourIdentity.IdentitySkeleton, vote.SigData, blockWrongID.View, blockWrongID.BlockID) require.ErrorIs(t, err, model.ErrInvalidSignature) // vote by different signer should be invalid - wrongVoter := identities[1] + wrongVoter := &identities[1].IdentitySkeleton wrongVoter.StakingPubKey = unittest.StakingPrivKeyFixture().PublicKey() err = verifier.VerifyVote(wrongVoter, vote.SigData, block.View, block.BlockID) require.ErrorIs(t, err, model.ErrInvalidSignature) @@ -104,7 +122,7 @@ func TestCombinedSignWithBeaconKey(t *testing.T) { // vote with changed signature should be invalid brokenSig := append([]byte{}, vote.SigData...) // copy brokenSig[4]++ - err = verifier.VerifyVote(nodeID, brokenSig, block.View, block.BlockID) + err = verifier.VerifyVote(&ourIdentity.IdentitySkeleton, brokenSig, block.View, block.BlockID) require.ErrorIs(t, err, model.ErrInvalidSignature) // Vote from a node that is _not_ part of the Random Beacon committee should be rejected. @@ -112,7 +130,7 @@ func TestCombinedSignWithBeaconKey(t *testing.T) { // as a sign of an invalid vote and wraps it into a `model.InvalidSignerError`. *dkg = mocks.DKG{} // overwrite DKG mock with a new one dkg.On("KeyShare", signerID).Return(nil, protocol.IdentityNotFoundError{NodeID: signerID}) - err = verifier.VerifyVote(nodeID, vote.SigData, proposal.Block.View, proposal.Block.BlockID) + err = verifier.VerifyVote(&ourIdentity.IdentitySkeleton, vote.SigData, proposal.Block.View, proposal.Block.BlockID) require.True(t, model.IsInvalidSignerError(err)) } @@ -122,22 +140,24 @@ func TestCombinedSignWithNoBeaconKey(t *testing.T) { // prepare data beaconKey := unittest.RandomBeaconPriv() pk := beaconKey.PublicKey() - view := uint64(20) + proposerView := uint64(20) - fblock := unittest.BlockFixture() - fblock.Header.View = view - block := model.BlockFromFlow(fblock.Header) - signerID := fblock.Header.ProposerID + fblock := unittest.BlockFixture( + unittest.Block.WithParent(unittest.IdentifierFixture(), proposerView-1, 0), + unittest.Block.WithLastViewTC(nil), + ) + proposal := model.ProposalFromFlow(fblock.ToHeader()) + signerID := fblock.ProposerID beaconKeyStore := modulemock.NewRandomBeaconKeyStore(t) - beaconKeyStore.On("ByView", view).Return(nil, module.ErrNoBeaconKeyForEpoch) + beaconKeyStore.On("ByView", proposerView).Return(nil, module.ErrNoBeaconKeyForEpoch) + ourIdentity := unittest.IdentityFixture() stakingPriv := unittest.StakingPrivKeyFixture() - nodeID := unittest.IdentityFixture() - nodeID.NodeID = signerID - nodeID.StakingPubKey = stakingPriv.PublicKey() + ourIdentity.NodeID = signerID + ourIdentity.StakingPubKey = stakingPriv.PublicKey() - me, err := local.New(nodeID, stakingPriv) + me, err := local.New(ourIdentity.IdentitySkeleton, stakingPriv) require.NoError(t, err) signer := NewCombinedSigner(me, beaconKeyStore) @@ -150,25 +170,38 @@ func TestCombinedSignWithNoBeaconKey(t *testing.T) { // for this failed node, which can be used to verify signature from // this failed node. committee.On("DKG", mock.Anything).Return(dkg, nil) + committee.On("Self").Return(me.NodeID()) + committee.On("IdentityByBlock", fblock.ID(), signerID).Return(ourIdentity, nil) + committee.On("LeaderForView", mock.Anything).Return(signerID, nil).Maybe() packer := signature.NewConsensusSigDataPacker(committee) verifier := NewCombinedVerifier(committee, packer) - proposal, err := signer.CreateProposal(block) + persist := mocks.NewPersister(t) + safetyData := &hotstuff.SafetyData{ + LockedOneChainView: fblock.ParentView, + HighestAcknowledgedView: fblock.ParentView, + } + persist.On("GetSafetyData", mock.Anything).Return(safetyData, nil).Once() + persist.On("PutSafetyData", mock.Anything).Return(nil) + safetyRules, err := safetyrules.New(signer, persist, committee) + require.NoError(t, err) + + // check that the proposer's vote for their own block (i.e. the proposer signature in the header) passes verification + vote, err := safetyRules.SignOwnProposal(proposal) require.NoError(t, err) - vote := proposal.ProposerVote() - err = verifier.VerifyVote(nodeID, vote.SigData, proposal.Block.View, proposal.Block.BlockID) + err = verifier.VerifyVote(&ourIdentity.IdentitySkeleton, vote.SigData, proposal.Block.View, proposal.Block.BlockID) require.NoError(t, err) // As the proposer does not have a Random Beacon Key, it should sign solely with its staking key. // In this case, the SigData should be identical to the staking sig. expectedStakingSig, err := stakingPriv.Sign( - MakeVoteMessage(block.View, block.BlockID), + MakeVoteMessage(proposal.Block.View, proposal.Block.BlockID), msig.NewBLSHasher(msig.ConsensusVoteTag), ) require.NoError(t, err) - require.Equal(t, expectedStakingSig, crypto.Signature(proposal.SigData)) + require.Equal(t, expectedStakingSig, crypto.Signature(vote.SigData)) } // Test_VerifyQC_EmptySigners checks that Verifier returns an `model.InsufficientSignaturesError` @@ -200,7 +233,7 @@ func Test_VerifyQC_EmptySigners(t *testing.T) { sigData, err := encoder.Encode(&emptySignersInput) require.NoError(t, err) - err = verifier.VerifyQC([]*flow.Identity{}, sigData, block.View, block.BlockID) + err = verifier.VerifyQC(flow.IdentitySkeletonList{}, sigData, block.View, block.BlockID) require.True(t, model.IsInsufficientSignaturesError(err)) err = verifier.VerifyQC(nil, sigData, block.View, block.BlockID) @@ -218,7 +251,7 @@ func TestCombinedSign_BeaconKeyStore_ViewForUnknownEpoch(t *testing.T) { nodeID := unittest.IdentityFixture() nodeID.StakingPubKey = stakingPriv.PublicKey() - me, err := local.New(nodeID, stakingPriv) + me, err := local.New(nodeID.IdentitySkeleton, stakingPriv) require.NoError(t, err) signer := NewCombinedSigner(me, beaconKeyStore) diff --git a/consensus/hotstuff/verification/combined_signer_v3.go b/consensus/hotstuff/verification/combined_signer_v3.go index 6ab6de760d5..8bca7934db9 100644 --- a/consensus/hotstuff/verification/combined_signer_v3.go +++ b/consensus/hotstuff/verification/combined_signer_v3.go @@ -4,9 +4,10 @@ import ( "errors" "fmt" + "github.com/onflow/crypto/hash" + "github.com/onflow/flow-go/consensus/hotstuff" "github.com/onflow/flow-go/consensus/hotstuff/model" - "github.com/onflow/flow-go/crypto/hash" "github.com/onflow/flow-go/model/encoding" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/module" @@ -52,29 +53,6 @@ func NewCombinedSignerV3( return sc } -// CreateProposal will create a proposal with a combined signature for the given block. -func (c *CombinedSignerV3) CreateProposal(block *model.Block) (*model.Proposal, error) { - - // check that the block is created by us - if block.ProposerID != c.staking.NodeID() { - return nil, fmt.Errorf("can't create proposal for someone else's block") - } - - // create the signature data - sigData, err := c.genSigData(block) - if err != nil { - return nil, fmt.Errorf("signing my proposal failed: %w", err) - } - - // create the proposal - proposal := &model.Proposal{ - Block: block, - SigData: sigData, - } - - return proposal, nil -} - // CreateVote will create a vote with a combined signature for the given block. func (c *CombinedSignerV3) CreateVote(block *model.Block) (*model.Vote, error) { @@ -84,12 +62,14 @@ func (c *CombinedSignerV3) CreateVote(block *model.Block) (*model.Vote, error) { return nil, fmt.Errorf("could not create signature: %w", err) } - // create the vote - vote := &model.Vote{ + vote, err := model.NewVote(model.UntrustedVote{ View: block.View, BlockID: block.BlockID, SignerID: c.staking.NodeID(), SigData: sigData, + }) + if err != nil { + return nil, fmt.Errorf("could not create vote: %w", err) } return vote, nil @@ -105,13 +85,20 @@ func (c *CombinedSignerV3) CreateTimeout(curView uint64, newestQC *flow.QuorumCe return nil, fmt.Errorf("could not generate signature for timeout object at view %d: %w", curView, err) } - timeout := &model.TimeoutObject{ - View: curView, - NewestQC: newestQC, - LastViewTC: lastViewTC, - SignerID: c.staking.NodeID(), - SigData: sigData, + timeout, err := model.NewTimeoutObject( + model.UntrustedTimeoutObject{ + View: curView, + NewestQC: newestQC, + LastViewTC: lastViewTC, + SignerID: c.staking.NodeID(), + SigData: sigData, + TimeoutTick: 0, + }, + ) + if err != nil { + return nil, fmt.Errorf("could not construct timeout object: %w", err) } + return timeout, nil } diff --git a/consensus/hotstuff/verification/combined_signer_v3_test.go b/consensus/hotstuff/verification/combined_signer_v3_test.go index 1a59d6d047a..c73999e0939 100644 --- a/consensus/hotstuff/verification/combined_signer_v3_test.go +++ b/consensus/hotstuff/verification/combined_signer_v3_test.go @@ -7,7 +7,7 @@ import ( "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" - "github.com/onflow/flow-go/crypto" + "github.com/onflow/crypto" "github.com/onflow/flow-go/consensus/hotstuff" "github.com/onflow/flow-go/consensus/hotstuff/mocks" @@ -29,22 +29,24 @@ func TestCombinedSignWithBeaconKeyV3(t *testing.T) { // prepare data beaconKey := unittest.RandomBeaconPriv() pk := beaconKey.PublicKey() - view := uint64(20) + proposerView := uint64(20) - fblock := unittest.BlockFixture() - fblock.Header.View = view - block := model.BlockFromFlow(fblock.Header) - signerID := fblock.Header.ProposerID + fblock := unittest.BlockFixture( + unittest.Block.WithParentView(proposerView-1), + unittest.Block.WithView(proposerView), + ) + block := model.BlockFromFlow(fblock.ToHeader()) + signerID := fblock.ProposerID beaconKeyStore := modulemock.NewRandomBeaconKeyStore(t) - beaconKeyStore.On("ByView", view).Return(beaconKey, nil) + beaconKeyStore.On("ByView", proposerView).Return(beaconKey, nil) stakingPriv := unittest.StakingPrivKeyFixture() - nodeID := unittest.IdentityFixture() + nodeID := &unittest.IdentityFixture().IdentitySkeleton nodeID.NodeID = signerID nodeID.StakingPubKey = stakingPriv.PublicKey() - me, err := local.New(nodeID, stakingPriv) + me, err := local.New(*nodeID, stakingPriv) require.NoError(t, err) signer := NewCombinedSignerV3(me, beaconKeyStore) @@ -57,12 +59,11 @@ func TestCombinedSignWithBeaconKeyV3(t *testing.T) { packer := signature.NewConsensusSigDataPacker(committee) verifier := NewCombinedVerifierV3(committee, packer) - // check that a created proposal can be verified by a verifier - proposal, err := signer.CreateProposal(block) + // check that the proposer's vote for their own block (i.e. the proposer signature in the header) passes verification + vote, err := signer.CreateVote(block) require.NoError(t, err) - vote := proposal.ProposerVote() - err = verifier.VerifyVote(nodeID, vote.SigData, proposal.Block.View, proposal.Block.BlockID) + err = verifier.VerifyVote(nodeID, vote.SigData, block.View, block.BlockID) require.NoError(t, err) // check that a created proposal's signature is a combined staking sig and random beacon sig @@ -72,14 +73,14 @@ func TestCombinedSignWithBeaconKeyV3(t *testing.T) { require.NoError(t, err) expectedSig := msig.EncodeSingleSig(encoding.SigTypeRandomBeacon, beaconSig) - require.Equal(t, expectedSig, proposal.SigData) + require.Equal(t, expectedSig, vote.SigData) // Vote from a node that is _not_ part of the Random Beacon committee should be rejected. // Specifically, we expect that the verifier recognizes the `protocol.IdentityNotFoundError` // as a sign of an invalid vote and wraps it into a `model.InvalidSignerError`. *dkg = mocks.DKG{} // overwrite DKG mock with a new one dkg.On("KeyShare", signerID).Return(nil, protocol.IdentityNotFoundError{NodeID: signerID}) - err = verifier.VerifyVote(nodeID, vote.SigData, proposal.Block.View, proposal.Block.BlockID) + err = verifier.VerifyVote(nodeID, vote.SigData, block.View, block.BlockID) require.True(t, model.IsInvalidSignerError(err)) } @@ -89,22 +90,24 @@ func TestCombinedSignWithNoBeaconKeyV3(t *testing.T) { // prepare data beaconKey := unittest.RandomBeaconPriv() pk := beaconKey.PublicKey() - view := uint64(20) + proposerView := uint64(20) - fblock := unittest.BlockFixture() - fblock.Header.View = view - block := model.BlockFromFlow(fblock.Header) - signerID := fblock.Header.ProposerID + fblock := unittest.BlockFixture( + unittest.Block.WithParentView(proposerView-1), + unittest.Block.WithView(proposerView), + ) + block := model.BlockFromFlow(fblock.ToHeader()) + signerID := fblock.ProposerID beaconKeyStore := modulemock.NewRandomBeaconKeyStore(t) - beaconKeyStore.On("ByView", view).Return(nil, module.ErrNoBeaconKeyForEpoch) + beaconKeyStore.On("ByView", proposerView).Return(nil, module.ErrNoBeaconKeyForEpoch) stakingPriv := unittest.StakingPrivKeyFixture() - nodeID := unittest.IdentityFixture() + nodeID := &unittest.IdentityFixture().IdentitySkeleton nodeID.NodeID = signerID nodeID.StakingPubKey = stakingPriv.PublicKey() - me, err := local.New(nodeID, stakingPriv) + me, err := local.New(*nodeID, stakingPriv) require.NoError(t, err) signer := NewCombinedSignerV3(me, beaconKeyStore) @@ -121,11 +124,11 @@ func TestCombinedSignWithNoBeaconKeyV3(t *testing.T) { packer := signature.NewConsensusSigDataPacker(committee) verifier := NewCombinedVerifierV3(committee, packer) - proposal, err := signer.CreateProposal(block) + // check that the proposer's vote for their own block (i.e. the proposer signature in the header) passes verification + vote, err := signer.CreateVote(block) require.NoError(t, err) - vote := proposal.ProposerVote() - err = verifier.VerifyVote(nodeID, vote.SigData, proposal.Block.View, proposal.Block.BlockID) + err = verifier.VerifyVote(nodeID, vote.SigData, block.View, block.BlockID) require.NoError(t, err) // check that a created proposal's signature is a combined staking sig and random beacon sig @@ -136,7 +139,7 @@ func TestCombinedSignWithNoBeaconKeyV3(t *testing.T) { expectedSig := msig.EncodeSingleSig(encoding.SigTypeStaking, stakingSig) // check the signature only has staking sig - require.Equal(t, expectedSig, proposal.SigData) + require.Equal(t, expectedSig, vote.SigData) } // Test_VerifyQC checks that a QC where either signer list is empty is rejected as invalid @@ -161,7 +164,7 @@ func Test_VerifyQCV3(t *testing.T) { stakingSigners := generateIdentitiesForPrivateKeys(t, privStakingKeys) rbSigners := generateIdentitiesForPrivateKeys(t, privRbKeyShares) registerPublicRbKeys(t, dkg, rbSigners.NodeIDs(), privRbKeyShares) - allSigners := append(append(flow.IdentityList{}, stakingSigners...), rbSigners...) + allSigners := append(append(flow.IdentityList{}, stakingSigners...), rbSigners...).ToSkeleton() packedSigData := unittest.RandomBytes(1021) unpackedSigData := hotstuff.BlockSignatureData{ @@ -272,7 +275,7 @@ func Test_VerifyQC_EmptySignersV3(t *testing.T) { sigData, err := encoder.Encode(&emptySignersInput) require.NoError(t, err) - err = verifier.VerifyQC([]*flow.Identity{}, sigData, block.View, block.BlockID) + err = verifier.VerifyQC(flow.IdentitySkeletonList{}, sigData, block.View, block.BlockID) require.True(t, model.IsInsufficientSignaturesError(err)) err = verifier.VerifyQC(nil, sigData, block.View, block.BlockID) @@ -290,7 +293,7 @@ func TestCombinedSign_BeaconKeyStore_ViewForUnknownEpochv3(t *testing.T) { nodeID := unittest.IdentityFixture() nodeID.StakingPubKey = stakingPriv.PublicKey() - me, err := local.New(nodeID, stakingPriv) + me, err := local.New(nodeID.IdentitySkeleton, stakingPriv) require.NoError(t, err) signer := NewCombinedSigner(me, beaconKeyStore) @@ -339,7 +342,7 @@ func generateAggregatedSignature(t *testing.T, n int, msg []byte, tag string) ([ // generateSignature creates a single private BLS 12-381 key, signs the provided `message` with // using domain separation `tag` and return the private key and signature. func generateSignature(t *testing.T, message []byte, tag string) (crypto.PrivateKey, crypto.Signature) { - priv := unittest.PrivateKeyFixture(crypto.BLSBLS12381, crypto.KeyGenSeedMinLen) + priv := unittest.PrivateKeyFixture(crypto.BLSBLS12381) sig, err := priv.Sign(message, msig.NewBLSHasher(tag)) require.NoError(t, err) return priv, sig diff --git a/consensus/hotstuff/verification/combined_verifier_v2.go b/consensus/hotstuff/verification/combined_verifier_v2.go index ee67a4ea36a..73e1043cc11 100644 --- a/consensus/hotstuff/verification/combined_verifier_v2.go +++ b/consensus/hotstuff/verification/combined_verifier_v2.go @@ -1,15 +1,13 @@ -//go:build relic -// +build relic - package verification import ( "errors" "fmt" + "github.com/onflow/crypto/hash" + "github.com/onflow/flow-go/consensus/hotstuff" "github.com/onflow/flow-go/consensus/hotstuff/model" - "github.com/onflow/flow-go/crypto/hash" "github.com/onflow/flow-go/model/flow" msig "github.com/onflow/flow-go/module/signature" "github.com/onflow/flow-go/state/protocol" @@ -53,7 +51,7 @@ func NewCombinedVerifier(committee hotstuff.Replicas, packer hotstuff.Packer) *C // - model.ErrViewForUnknownEpoch if no epoch containing the given view is known // - unexpected errors should be treated as symptoms of bugs or uncovered // edge cases in the logic (i.e. as fatal) -func (c *CombinedVerifier) VerifyVote(signer *flow.Identity, sigData []byte, view uint64, blockID flow.Identifier) error { +func (c *CombinedVerifier) VerifyVote(signer *flow.IdentitySkeleton, sigData []byte, view uint64, blockID flow.Identifier) error { // create the to-be-signed message msg := MakeVoteMessage(view, blockID) @@ -120,7 +118,7 @@ func (c *CombinedVerifier) VerifyVote(signer *flow.Identity, sigData []byte, vie // - model.ErrInvalidSignature if a signature is invalid // - model.ErrViewForUnknownEpoch if no epoch containing the given view is known // - error if running into any unexpected exception (i.e. fatal error) -func (c *CombinedVerifier) VerifyQC(signers flow.IdentityList, sigData []byte, view uint64, blockID flow.Identifier) error { +func (c *CombinedVerifier) VerifyQC(signers flow.IdentitySkeletonList, sigData []byte, view uint64, blockID flow.Identifier) error { dkg, err := c.committee.DKG(view) if err != nil { return fmt.Errorf("could not get dkg data: %w", err) @@ -160,7 +158,7 @@ func (c *CombinedVerifier) VerifyQC(signers flow.IdentityList, sigData []byte, v // - model.ErrInvalidSignature if a signature is invalid // - unexpected errors should be treated as symptoms of bugs or uncovered // edge cases in the logic (i.e. as fatal) -func (c *CombinedVerifier) VerifyTC(signers flow.IdentityList, sigData []byte, view uint64, highQCViews []uint64) error { +func (c *CombinedVerifier) VerifyTC(signers flow.IdentitySkeletonList, sigData []byte, view uint64, highQCViews []uint64) error { stakingPks := signers.PublicStakingKeys() return verifyTCSignatureManyMessages(stakingPks, sigData, view, highQCViews, c.timeoutObjectHasher) } diff --git a/consensus/hotstuff/verification/combined_verifier_v3.go b/consensus/hotstuff/verification/combined_verifier_v3.go index 8f5f9acd8f0..5aff5e352a3 100644 --- a/consensus/hotstuff/verification/combined_verifier_v3.go +++ b/consensus/hotstuff/verification/combined_verifier_v3.go @@ -1,16 +1,14 @@ -//go:build relic -// +build relic - package verification import ( "errors" "fmt" + "github.com/onflow/crypto" + "github.com/onflow/crypto/hash" + "github.com/onflow/flow-go/consensus/hotstuff" "github.com/onflow/flow-go/consensus/hotstuff/model" - "github.com/onflow/flow-go/crypto" - "github.com/onflow/flow-go/crypto/hash" "github.com/onflow/flow-go/model/encoding" "github.com/onflow/flow-go/model/flow" msig "github.com/onflow/flow-go/module/signature" @@ -57,7 +55,7 @@ func NewCombinedVerifierV3(committee hotstuff.Replicas, packer hotstuff.Packer) // // This implementation already support the cases, where the DKG committee is a // _strict subset_ of the full consensus committee. -func (c *CombinedVerifierV3) VerifyVote(signer *flow.Identity, sigData []byte, view uint64, blockID flow.Identifier) error { +func (c *CombinedVerifierV3) VerifyVote(signer *flow.IdentitySkeleton, sigData []byte, view uint64, blockID flow.Identifier) error { // create the to-be-signed message msg := MakeVoteMessage(view, blockID) @@ -125,7 +123,7 @@ func (c *CombinedVerifierV3) VerifyVote(signer *flow.Identity, sigData []byte, v // // This implementation already support the cases, where the DKG committee is a // _strict subset_ of the full consensus committee. -func (c *CombinedVerifierV3) VerifyQC(signers flow.IdentityList, sigData []byte, view uint64, blockID flow.Identifier) error { +func (c *CombinedVerifierV3) VerifyQC(signers flow.IdentitySkeletonList, sigData []byte, view uint64, blockID flow.Identifier) error { signerIdentities := signers.Lookup() dkg, err := c.committee.DKG(view) if err != nil { @@ -176,7 +174,7 @@ func (c *CombinedVerifierV3) VerifyQC(signers flow.IdentityList, sigData []byte, if protocol.IsIdentityNotFound(err) { return model.NewInvalidSignerErrorf("%v is not a random beacon participant: %w", signerID, err) } - return fmt.Errorf("unexpected error retrieving dkg key share for signer %v: %w", signerID, err) + return fmt.Errorf("unexpected error retrieving Random Beacon key share for signer %v: %w", signerID, err) } beaconPubKeys = append(beaconPubKeys, keyShare) } @@ -227,7 +225,7 @@ func (c *CombinedVerifierV3) VerifyQC(signers flow.IdentityList, sigData []byte, // - model.ErrInvalidSignature if a signature is invalid // - unexpected errors should be treated as symptoms of bugs or uncovered // edge cases in the logic (i.e. as fatal) -func (c *CombinedVerifierV3) VerifyTC(signers flow.IdentityList, sigData []byte, view uint64, highQCViews []uint64) error { +func (c *CombinedVerifierV3) VerifyTC(signers flow.IdentitySkeletonList, sigData []byte, view uint64, highQCViews []uint64) error { stakingPks := signers.PublicStakingKeys() return verifyTCSignatureManyMessages(stakingPks, sigData, view, highQCViews, c.timeoutObjectHasher) } diff --git a/consensus/hotstuff/verification/common.go b/consensus/hotstuff/verification/common.go index 00d73a0caee..04c355f4390 100644 --- a/consensus/hotstuff/verification/common.go +++ b/consensus/hotstuff/verification/common.go @@ -1,14 +1,14 @@ package verification import ( + "encoding/binary" "fmt" + "github.com/onflow/crypto" + "github.com/onflow/crypto/hash" + "github.com/onflow/flow-go/consensus/hotstuff/model" - "github.com/onflow/flow-go/crypto" - "github.com/onflow/flow-go/crypto/hash" "github.com/onflow/flow-go/model/flow" - - "encoding/binary" ) // MakeVoteMessage generates the message we have to sign in order to be able diff --git a/consensus/hotstuff/verification/metrics_wrapper.go b/consensus/hotstuff/verification/metrics_wrapper.go index 7c929e361ef..1f5b7691492 100644 --- a/consensus/hotstuff/verification/metrics_wrapper.go +++ b/consensus/hotstuff/verification/metrics_wrapper.go @@ -29,28 +29,6 @@ func NewMetricsWrapper(signer hotstuff.Signer, metrics module.HotstuffMetrics) * } } -// TODO: to be moved to VerifierMetricsWrapper -// func (w SignerMetricsWrapper) VerifyVote(voter *flow.Identity, sigData []byte, block *model.Block) (bool, error) { -// processStart := time.Now() -// valid, err := w.signer.VerifyVote(voter, sigData, block) -// w.metrics.SignerProcessingDuration(time.Since(processStart)) -// return valid, err -// } -// -// func (w SignerMetricsWrapper) VerifyQC(signers flow.IdentityList, sigData []byte, block *model.Block) (bool, error) { -// processStart := time.Now() -// valid, err := w.signer.VerifyQC(signers, sigData, block) -// w.metrics.SignerProcessingDuration(time.Since(processStart)) -// return valid, err -// } - -func (w SignerMetricsWrapper) CreateProposal(block *model.Block) (*model.Proposal, error) { - processStart := time.Now() - proposal, err := w.signer.CreateProposal(block) - w.metrics.SignerProcessingDuration(time.Since(processStart)) - return proposal, err -} - func (w SignerMetricsWrapper) CreateVote(block *model.Block) (*model.Vote, error) { processStart := time.Now() vote, err := w.signer.CreateVote(block) @@ -66,10 +44,3 @@ func (w SignerMetricsWrapper) CreateTimeout(curView uint64, w.metrics.SignerProcessingDuration(time.Since(processStart)) return timeout, err } - -// func (w SignerMetricsWrapper) CreateQC(votes []*model.Vote) (*flow.QuorumCertificate, error) { -// processStart := time.Now() -// qc, err := w.signer.CreateQC(votes) -// w.metrics.SignerProcessingDuration(time.Since(processStart)) -// return qc, err -// } diff --git a/consensus/hotstuff/verification/staking_signer.go b/consensus/hotstuff/verification/staking_signer.go index 91c35e1cddd..8e4c9bfcfd5 100644 --- a/consensus/hotstuff/verification/staking_signer.go +++ b/consensus/hotstuff/verification/staking_signer.go @@ -3,9 +3,10 @@ package verification import ( "fmt" + "github.com/onflow/crypto/hash" + "github.com/onflow/flow-go/consensus/hotstuff" "github.com/onflow/flow-go/consensus/hotstuff/model" - "github.com/onflow/flow-go/crypto/hash" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/module" msig "github.com/onflow/flow-go/module/signature" @@ -39,29 +40,6 @@ func NewStakingSigner( return sc } -// CreateProposal will create a proposal with a staking signature for the given block. -func (c *StakingSigner) CreateProposal(block *model.Block) (*model.Proposal, error) { - - // check that the block is created by us - if block.ProposerID != c.signerID { - return nil, fmt.Errorf("can't create proposal for someone else's block") - } - - // create the signature data - sigData, err := c.genSigData(block) - if err != nil { - return nil, fmt.Errorf("signing my proposal failed: %w", err) - } - - // create the proposal - proposal := &model.Proposal{ - Block: block, - SigData: sigData, - } - - return proposal, nil -} - // CreateVote will create a vote with a staking signature for the given block. func (c *StakingSigner) CreateVote(block *model.Block) (*model.Vote, error) { @@ -71,12 +49,14 @@ func (c *StakingSigner) CreateVote(block *model.Block) (*model.Vote, error) { return nil, fmt.Errorf("could not create signature: %w", err) } - // create the vote - vote := &model.Vote{ + vote, err := model.NewVote(model.UntrustedVote{ View: block.View, BlockID: block.BlockID, SignerID: c.signerID, SigData: sigData, + }) + if err != nil { + return nil, fmt.Errorf("could not create vote: %w", err) } return vote, nil @@ -91,13 +71,20 @@ func (c *StakingSigner) CreateTimeout(curView uint64, newestQC *flow.QuorumCerti return nil, fmt.Errorf("could not generate signature for timeout object at view %d: %w", curView, err) } - timeout := &model.TimeoutObject{ - View: curView, - NewestQC: newestQC, - LastViewTC: lastViewTC, - SignerID: c.signerID, - SigData: sigData, + timeout, err := model.NewTimeoutObject( + model.UntrustedTimeoutObject{ + View: curView, + NewestQC: newestQC, + LastViewTC: lastViewTC, + SignerID: c.signerID, + SigData: sigData, + TimeoutTick: 0, + }, + ) + if err != nil { + return nil, fmt.Errorf("could not construct timeout object: %w", err) } + return timeout, nil } diff --git a/consensus/hotstuff/verification/staking_signer_test.go b/consensus/hotstuff/verification/staking_signer_test.go index fc563266f92..6fc4d14fdc5 100644 --- a/consensus/hotstuff/verification/staking_signer_test.go +++ b/consensus/hotstuff/verification/staking_signer_test.go @@ -15,57 +15,6 @@ import ( "github.com/onflow/flow-go/utils/unittest" ) -// TestStakingSigner_CreateProposal verifies that StakingSigner can produce correctly signed proposal -// that can be verified later using StakingVerifier. -// Additionally, we check cases where errors during signing are happening. -func TestStakingSigner_CreateProposal(t *testing.T) { - stakingPriv := unittest.StakingPrivKeyFixture() - signer := unittest.IdentityFixture() - signerID := signer.NodeID - signer.StakingPubKey = stakingPriv.PublicKey() - - t.Run("invalid-signer-id", func(t *testing.T) { - me := &modulemock.Local{} - me.On("NodeID").Return(signerID) - signer := NewStakingSigner(me) - - block := helper.MakeBlock() - proposal, err := signer.CreateProposal(block) - require.Error(t, err) - require.Nil(t, proposal) - }) - t.Run("could-not-sign", func(t *testing.T) { - signException := errors.New("sign-exception") - me := &modulemock.Local{} - me.On("NodeID").Return(signerID) - me.On("Sign", mock.Anything, mock.Anything).Return(nil, signException).Once() - signer := NewStakingSigner(me) - - block := helper.MakeBlock() - proposal, err := signer.CreateProposal(block) - require.ErrorAs(t, err, &signException) - require.Nil(t, proposal) - }) - t.Run("created-proposal", func(t *testing.T) { - me, err := local.New(signer, stakingPriv) - require.NoError(t, err) - - signerIdentity := unittest.IdentityFixture(unittest.WithNodeID(signerID), - unittest.WithStakingPubKey(stakingPriv.PublicKey())) - - signer := NewStakingSigner(me) - - block := helper.MakeBlock(helper.WithBlockProposer(signerID)) - proposal, err := signer.CreateProposal(block) - require.NoError(t, err) - require.NotNil(t, proposal) - - verifier := NewStakingVerifier() - err = verifier.VerifyVote(signerIdentity, proposal.SigData, proposal.Block.View, proposal.Block.BlockID) - require.NoError(t, err) - }) -} - // TestStakingSigner_CreateVote verifies that StakingSigner can produce correctly signed vote // that can be verified later using StakingVerifier. // Additionally, we check cases where errors during signing are happening. @@ -83,16 +32,16 @@ func TestStakingSigner_CreateVote(t *testing.T) { signer := NewStakingSigner(me) block := helper.MakeBlock() - proposal, err := signer.CreateProposal(block) + proposal, err := signer.CreateVote(block) require.ErrorAs(t, err, &signException) require.Nil(t, proposal) }) t.Run("created-vote", func(t *testing.T) { - me, err := local.New(signer, stakingPriv) + me, err := local.New(signer.IdentitySkeleton, stakingPriv) require.NoError(t, err) - signerIdentity := unittest.IdentityFixture(unittest.WithNodeID(signerID), - unittest.WithStakingPubKey(stakingPriv.PublicKey())) + signerIdentity := &unittest.IdentityFixture(unittest.WithNodeID(signerID), + unittest.WithStakingPubKey(stakingPriv.PublicKey())).IdentitySkeleton signer := NewStakingSigner(me) @@ -114,7 +63,7 @@ func TestStakingSigner_VerifyQC(t *testing.T) { sigData := unittest.RandomBytes(127) verifier := NewStakingVerifier() - err := verifier.VerifyQC([]*flow.Identity{}, sigData, block.View, block.BlockID) + err := verifier.VerifyQC(flow.IdentitySkeletonList{}, sigData, block.View, block.BlockID) require.True(t, model.IsInsufficientSignaturesError(err)) err = verifier.VerifyQC(nil, sigData, block.View, block.BlockID) diff --git a/consensus/hotstuff/verification/staking_verifier.go b/consensus/hotstuff/verification/staking_verifier.go index 60b2f45f4d5..d916adb09d3 100644 --- a/consensus/hotstuff/verification/staking_verifier.go +++ b/consensus/hotstuff/verification/staking_verifier.go @@ -1,14 +1,12 @@ -//go:build relic -// +build relic - package verification import ( "fmt" + "github.com/onflow/crypto/hash" + "github.com/onflow/flow-go/consensus/hotstuff" "github.com/onflow/flow-go/consensus/hotstuff/model" - "github.com/onflow/flow-go/crypto/hash" "github.com/onflow/flow-go/model/flow" msig "github.com/onflow/flow-go/module/signature" ) @@ -37,7 +35,7 @@ func NewStakingVerifier() *StakingVerifier { // - model.ErrInvalidSignature is the signature is invalid // - unexpected errors should be treated as symptoms of bugs or uncovered // edge cases in the logic (i.e. as fatal) -func (v *StakingVerifier) VerifyVote(signer *flow.Identity, sigData []byte, view uint64, blockID flow.Identifier) error { +func (v *StakingVerifier) VerifyVote(signer *flow.IdentitySkeleton, sigData []byte, view uint64, blockID flow.Identifier) error { // create the to-be-signed message msg := MakeVoteMessage(view, blockID) @@ -65,7 +63,7 @@ func (v *StakingVerifier) VerifyVote(signer *flow.Identity, sigData []byte, view // edge cases in the logic (i.e. as fatal) // // In the single verification case, `sigData` represents a single signature (`crypto.Signature`). -func (v *StakingVerifier) VerifyQC(signers flow.IdentityList, sigData []byte, view uint64, blockID flow.Identifier) error { +func (v *StakingVerifier) VerifyQC(signers flow.IdentitySkeletonList, sigData []byte, view uint64, blockID flow.Identifier) error { msg := MakeVoteMessage(view, blockID) err := verifyAggregatedSignatureOneMessage(signers.PublicStakingKeys(), sigData, v.stakingHasher, msg) @@ -85,6 +83,6 @@ func (v *StakingVerifier) VerifyQC(signers flow.IdentityList, sigData []byte, vi // - model.ErrInvalidSignature if a signature is invalid // - unexpected errors should be treated as symptoms of bugs or uncovered // edge cases in the logic (i.e. as fatal) -func (v *StakingVerifier) VerifyTC(signers flow.IdentityList, sigData []byte, view uint64, highQCViews []uint64) error { +func (v *StakingVerifier) VerifyTC(signers flow.IdentitySkeletonList, sigData []byte, view uint64, highQCViews []uint64) error { return verifyTCSignatureManyMessages(signers.PublicStakingKeys(), sigData, view, highQCViews, v.timeoutObjectHasher) } diff --git a/consensus/hotstuff/verifier.go b/consensus/hotstuff/verifier.go index 126ac7f78db..354b406cdab 100644 --- a/consensus/hotstuff/verifier.go +++ b/consensus/hotstuff/verifier.go @@ -38,7 +38,7 @@ type Verifier interface { // where querying of DKG might fail if no epoch containing the given view is known. // * unexpected errors should be treated as symptoms of bugs or uncovered // edge cases in the logic (i.e. as fatal) - VerifyVote(voter *flow.Identity, sigData []byte, view uint64, blockID flow.Identifier) error + VerifyVote(voter *flow.IdentitySkeleton, sigData []byte, view uint64, blockID flow.Identifier) error // VerifyQC checks the cryptographic validity of a QC's `SigData` w.r.t. the // given view and blockID. It is the responsibility of the calling code to ensure that @@ -58,7 +58,7 @@ type Verifier interface { // where querying of DKG might fail if no epoch containing the given view is known. // * unexpected errors should be treated as symptoms of bugs or uncovered // edge cases in the logic (i.e. as fatal) - VerifyQC(signers flow.IdentityList, sigData []byte, view uint64, blockID flow.Identifier) error + VerifyQC(signers flow.IdentitySkeletonList, sigData []byte, view uint64, blockID flow.Identifier) error // VerifyTC checks cryptographic validity of the TC's `sigData` w.r.t. the // given view. It is the responsibility of the calling code to ensure @@ -69,5 +69,5 @@ type Verifier interface { // * model.ErrInvalidSignature if a signature is invalid // * unexpected errors should be treated as symptoms of bugs or uncovered // edge cases in the logic (i.e. as fatal) - VerifyTC(signers flow.IdentityList, sigData []byte, view uint64, highQCViews []uint64) error + VerifyTC(signers flow.IdentitySkeletonList, sigData []byte, view uint64, highQCViews []uint64) error } diff --git a/consensus/hotstuff/vote_aggregator.go b/consensus/hotstuff/vote_aggregator.go index 14dc4f7dc2f..7c9bbcaad01 100644 --- a/consensus/hotstuff/vote_aggregator.go +++ b/consensus/hotstuff/vote_aggregator.go @@ -25,12 +25,12 @@ type VoteAggregator interface { // CAUTION: we expect that the input block's validity has been confirmed prior to calling AddBlock, // including the proposer's signature. Otherwise, VoteAggregator might crash or exhibit undefined // behaviour. - AddBlock(block *model.Proposal) + AddBlock(block *model.SignedProposal) // InvalidBlock notifies the VoteAggregator about an invalid proposal, so that it // can process votes for the invalid block and slash the voters. // No errors are expected during normal operations - InvalidBlock(block *model.Proposal) error + InvalidBlock(block *model.SignedProposal) error // PruneUpToView deletes all votes _below_ to the given view, as well as // related indices. We only retain and process whose view is equal or larger diff --git a/consensus/hotstuff/vote_collector.go b/consensus/hotstuff/vote_collector.go index 157ef5338a7..3a259808dc4 100644 --- a/consensus/hotstuff/vote_collector.go +++ b/consensus/hotstuff/vote_collector.go @@ -61,7 +61,7 @@ type VoteCollector interface { // It returns nil if the block is valid. // It returns model.InvalidProposalError if block is invalid. // It returns other error if there is exception processing the block. - ProcessBlock(block *model.Proposal) error + ProcessBlock(block *model.SignedProposal) error // AddVote adds a vote to the collector // When enough votes have been added to produce a QC, the QC will be created asynchronously, and @@ -116,5 +116,5 @@ type VoteProcessorFactory interface { // Caller can be sure that proposal vote was successfully verified and processed. // Expected error returns during normal operations: // * model.InvalidProposalError - proposal has invalid proposer vote - Create(log zerolog.Logger, proposal *model.Proposal) (VerifyingVoteProcessor, error) + Create(log zerolog.Logger, proposal *model.SignedProposal) (VerifyingVoteProcessor, error) } diff --git a/consensus/hotstuff/vote_test.go b/consensus/hotstuff/vote_test.go new file mode 100644 index 00000000000..6afedf98038 --- /dev/null +++ b/consensus/hotstuff/vote_test.go @@ -0,0 +1,85 @@ +package hotstuff + +import ( + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/onflow/flow-go/consensus/hotstuff/model" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/utils/unittest" +) + +// TestNewVote verifies that NewVote correctly constructs a Vote from valid input +// and returns an error when any required field is missing. +// It covers: +// - valid vote creation +// - missing BlockID +// - missing SignerID +// - missing SigData +func TestNewVote(t *testing.T) { + const validView = uint64(1) + + t.Run("valid vote", func(t *testing.T) { + blockID := unittest.IdentifierFixture() + signerID := unittest.IdentifierFixture() + sigData := []byte{0, 1, 2} + + uv := model.UntrustedVote{ + View: validView, + BlockID: blockID, + SignerID: signerID, + SigData: sigData, + } + + v, err := model.NewVote(uv) + assert.NoError(t, err) + assert.NotNil(t, v) + assert.Equal(t, validView, v.View) + assert.Equal(t, blockID, v.BlockID) + assert.Equal(t, signerID, v.SignerID) + assert.Equal(t, sigData, v.SigData) + }) + + t.Run("empty BlockID", func(t *testing.T) { + uv := model.UntrustedVote{ + View: validView, + BlockID: flow.ZeroID, + SignerID: unittest.IdentifierFixture(), + SigData: []byte{0, 1, 2}, + } + + v, err := model.NewVote(uv) + assert.Error(t, err) + assert.Nil(t, v) + assert.Contains(t, err.Error(), "BlockID") + }) + + t.Run("empty SignerID", func(t *testing.T) { + uv := model.UntrustedVote{ + View: validView, + BlockID: unittest.IdentifierFixture(), + SignerID: flow.ZeroID, + SigData: []byte{0, 1, 2}, + } + + v, err := model.NewVote(uv) + assert.Error(t, err) + assert.Nil(t, v) + assert.Contains(t, err.Error(), "SignerID") + }) + + t.Run("empty SigData", func(t *testing.T) { + uv := model.UntrustedVote{ + View: validView, + BlockID: unittest.IdentifierFixture(), + SignerID: unittest.IdentifierFixture(), + SigData: nil, + } + + v, err := model.NewVote(uv) + assert.Error(t, err) + assert.Nil(t, v) + assert.Contains(t, err.Error(), "SigData") + }) +} diff --git a/consensus/hotstuff/voteaggregator/vote_aggregator.go b/consensus/hotstuff/voteaggregator/vote_aggregator.go index fadf5f17e07..32ea0ef2a65 100644 --- a/consensus/hotstuff/voteaggregator/vote_aggregator.go +++ b/consensus/hotstuff/voteaggregator/vote_aggregator.go @@ -12,12 +12,13 @@ import ( "github.com/onflow/flow-go/consensus/hotstuff/model" "github.com/onflow/flow-go/engine" "github.com/onflow/flow-go/engine/common/fifoqueue" - "github.com/onflow/flow-go/engine/consensus/sealing/counters" "github.com/onflow/flow-go/module" "github.com/onflow/flow-go/module/component" + "github.com/onflow/flow-go/module/counters" "github.com/onflow/flow-go/module/irrecoverable" "github.com/onflow/flow-go/module/mempool" "github.com/onflow/flow-go/module/metrics" + "github.com/onflow/flow-go/module/util" ) // defaultVoteAggregatorWorkers number of workers to dispatch events for vote aggregators @@ -38,11 +39,11 @@ type VoteAggregator struct { hotstuffMetrics module.HotstuffMetrics engineMetrics module.EngineMetrics notifier hotstuff.VoteAggregationViolationConsumer - lowestRetainedView counters.StrictMonotonousCounter // lowest view, for which we still process votes + lowestRetainedView counters.StrictMonotonicCounter // lowest view, for which we still process votes collectors hotstuff.VoteCollectors queuedMessagesNotifier engine.Notifier finalizationEventsNotifier engine.Notifier - finalizedView counters.StrictMonotonousCounter // cache the last finalized view to queue up the pruning work, and unblock the caller who's delivering the finalization event. + finalizedView counters.StrictMonotonicCounter // cache the last finalized view to queue up the pruning work, and unblock the caller who's delivering the finalization event. queuedVotes *fifoqueue.FifoQueue queuedBlocks *fifoqueue.FifoQueue } @@ -79,8 +80,8 @@ func NewVoteAggregator( hotstuffMetrics: hotstuffMetrics, engineMetrics: engineMetrics, notifier: notifier, - lowestRetainedView: counters.NewMonotonousCounter(lowestRetainedView), - finalizedView: counters.NewMonotonousCounter(lowestRetainedView), + lowestRetainedView: counters.NewMonotonicCounter(lowestRetainedView), + finalizedView: counters.NewMonotonicCounter(lowestRetainedView), collectors: collectors, queuedVotes: queuedVotes, queuedBlocks: queuedBlocks, @@ -99,24 +100,36 @@ func NewVoteAggregator( aggregator.queuedMessagesProcessingLoop(ctx) }) } - componentBuilder.AddWorker(func(_ irrecoverable.SignalerContext, ready component.ReadyFunc) { + componentBuilder.AddWorker(func(parentCtx irrecoverable.SignalerContext, ready component.ReadyFunc) { // create new context which is not connected to parent // we need to ensure that our internal workers stop before asking // vote collectors to stop. We want to avoid delivering events to already stopped vote collectors ctx, cancel := context.WithCancel(context.Background()) - signalerCtx, _ := irrecoverable.WithSignaler(ctx) + signalerCtx, errCh := irrecoverable.WithSignaler(ctx) + // start vote collectors collectors.Start(signalerCtx) - <-collectors.Ready() - ready() + // Handle the component lifecycle in a separate goroutine so we can capture any errors + // thrown during initialization in the main goroutine. + go func() { + if err := util.WaitClosed(parentCtx, collectors.Ready()); err == nil { + // only signal ready when collectors are ready, but always handle shutdown + ready() + } - // wait for internal workers to stop - wg.Wait() - // signal vote collectors to stop - cancel() - // wait for it to stop - <-collectors.Done() + // wait for internal workers to stop, then signal vote collectors to stop + wg.Wait() + cancel() + }() + + // since we are breaking the connection between parentCtx and signalerCtx, we need to + // explicitly rethrow any errors from signalerCtx to parentCtx, otherwise they are dropped. + // Handle errors in the main worker goroutine to guarantee that they are rethrown to the parent + // before the component is marked done. + if err := util.WaitError(errCh, collectors.Done()); err != nil { + parentCtx.Throw(err) + } }) componentBuilder.AddWorker(func(ctx irrecoverable.SignalerContext, ready component.ReadyFunc) { ready() @@ -156,7 +169,7 @@ func (va *VoteAggregator) processQueuedMessages(ctx context.Context) error { msg, ok := va.queuedBlocks.Pop() if ok { - block := msg.(*model.Proposal) + block := msg.(*model.SignedProposal) err := va.processQueuedBlock(block) if err != nil { return fmt.Errorf("could not process pending block %v: %w", block.Block.BlockID, err) @@ -200,7 +213,7 @@ func (va *VoteAggregator) processQueuedVote(vote *model.Vote) error { vote.View, err) } if created { - va.log.Info().Uint64("view", vote.View).Msg("vote collector is created by processing vote") + va.log.Debug().Uint64("view", vote.View).Msg("vote collector is created by processing vote") } err = collector.AddVote(vote) @@ -209,7 +222,7 @@ func (va *VoteAggregator) processQueuedVote(vote *model.Vote) error { vote.View, vote.BlockID, err) } - va.log.Info(). + va.log.Debug(). Uint64("view", vote.View). Hex("block_id", vote.BlockID[:]). Str("vote_id", vote.ID().String()). @@ -224,7 +237,7 @@ func (va *VoteAggregator) processQueuedVote(vote *model.Vote) error { // including the proposer's signature. Otherwise, VoteAggregator might crash or exhibit undefined // behaviour. // No errors are expected during normal operation. -func (va *VoteAggregator) processQueuedBlock(block *model.Proposal) error { +func (va *VoteAggregator) processQueuedBlock(block *model.SignedProposal) error { // check if the block is for a view that has already been pruned (and is thus stale) if block.Block.View < va.lowestRetainedView.Value() { return nil @@ -238,7 +251,7 @@ func (va *VoteAggregator) processQueuedBlock(block *model.Proposal) error { return fmt.Errorf("could not get or create collector for block %v: %w", block.Block.BlockID, err) } if created { - va.log.Info(). + va.log.Debug(). Uint64("view", block.Block.View). Hex("block_id", block.Block.BlockID[:]). Msg("vote collector is created by processing block") @@ -255,7 +268,7 @@ func (va *VoteAggregator) processQueuedBlock(block *model.Proposal) error { return fmt.Errorf("could not process block: %v, %w", block.Block.BlockID, err) } - va.log.Info(). + va.log.Debug(). Uint64("view", block.Block.View). Hex("block_id", block.Block.BlockID[:]). Msg("block has been processed successfully") @@ -293,7 +306,7 @@ func (va *VoteAggregator) AddVote(vote *model.Vote) { // CAUTION: we expect that the input block's validity has been confirmed prior to calling AddBlock, // including the proposer's signature. Otherwise, VoteAggregator might crash or exhibit undefined // behaviour. -func (va *VoteAggregator) AddBlock(block *model.Proposal) { +func (va *VoteAggregator) AddBlock(block *model.SignedProposal) { // It's ok to silently drop blocks in case our processing pipeline is full. // It means that we are probably catching up. if ok := va.queuedBlocks.Push(block); ok { @@ -306,7 +319,7 @@ func (va *VoteAggregator) AddBlock(block *model.Proposal) { // InvalidBlock notifies the VoteAggregator about an invalid proposal, so that it // can process votes for the invalid block and slash the voters. // No errors are expected during normal operations -func (va *VoteAggregator) InvalidBlock(proposal *model.Proposal) error { +func (va *VoteAggregator) InvalidBlock(proposal *model.SignedProposal) error { slashingVoteConsumer := func(vote *model.Vote) { if proposal.Block.BlockID == vote.BlockID { va.notifier.OnVoteForInvalidBlockDetected(vote, proposal) diff --git a/consensus/hotstuff/voteaggregator/vote_aggregator_test.go b/consensus/hotstuff/voteaggregator/vote_aggregator_test.go index 006ab52b744..acc88729eb1 100644 --- a/consensus/hotstuff/voteaggregator/vote_aggregator_test.go +++ b/consensus/hotstuff/voteaggregator/vote_aggregator_test.go @@ -84,13 +84,13 @@ func (s *VoteAggregatorTestSuite) TestOnFinalizedBlock() { // an input to AddBlock (only expects _valid_ blocks per API contract). // The exception should be propagated to the VoteAggregator's internal `ComponentManager`. func (s *VoteAggregatorTestSuite) TestProcessInvalidBlock() { - block := helper.MakeProposal( + block := helper.MakeSignedProposal(helper.WithProposal(helper.MakeProposal( helper.WithBlock( helper.MakeBlock( helper.WithBlockView(100), ), ), - ) + ))) processed := make(chan struct{}) collector := mocks.NewVoteCollector(s.T()) collector.On("ProcessBlock", block).Run(func(_ mock.Arguments) { diff --git a/consensus/hotstuff/votecollector/combined_vote_processor_v2.go b/consensus/hotstuff/votecollector/combined_vote_processor_v2.go index 69d6fb350af..c1b4edececa 100644 --- a/consensus/hotstuff/votecollector/combined_vote_processor_v2.go +++ b/consensus/hotstuff/votecollector/combined_vote_processor_v2.go @@ -4,6 +4,7 @@ import ( "errors" "fmt" + "github.com/onflow/crypto" "github.com/rs/zerolog" "go.uber.org/atomic" @@ -11,7 +12,6 @@ import ( "github.com/onflow/flow-go/consensus/hotstuff/model" "github.com/onflow/flow-go/consensus/hotstuff/signature" "github.com/onflow/flow-go/consensus/hotstuff/verification" - "github.com/onflow/flow-go/crypto" "github.com/onflow/flow-go/model/flow" msig "github.com/onflow/flow-go/module/signature" ) @@ -55,21 +55,13 @@ func (f *combinedVoteProcessorFactoryBaseV2) Create(log zerolog.Logger, block *m return nil, fmt.Errorf("could not create aggregator for staking signatures at block %v: %w", block.BlockID, err) } - publicKeyShares := make([]crypto.PublicKey, 0, len(allParticipants)) dkg, err := f.committee.DKG(block.View) if err != nil { return nil, fmt.Errorf("could not get DKG info at block %v: %w", block.BlockID, err) } - for _, participant := range allParticipants { - pk, err := dkg.KeyShare(participant.NodeID) - if err != nil { - return nil, fmt.Errorf("could not get random beacon key share for %x at block %v: %w", participant.NodeID, block.BlockID, err) - } - publicKeyShares = append(publicKeyShares, pk) - } threshold := msig.RandomBeaconThreshold(int(dkg.Size())) - randomBeaconInspector, err := signature.NewRandomBeaconInspector(dkg.GroupKey(), publicKeyShares, threshold, msg) + randomBeaconInspector, err := signature.NewRandomBeaconInspector(dkg.GroupKey(), dkg.KeyShares(), threshold, msg) if err != nil { return nil, fmt.Errorf("could not create random beacon inspector at block %v: %w", block.BlockID, err) } @@ -235,7 +227,7 @@ func (p *CombinedVoteProcessorV2) Process(vote *model.Vote) error { // checking of conditions for building QC are satisfied totalWeight := p.stakingSigAggtor.TotalWeight() - p.log.Debug().Msgf("processed vote, total weight=(%d), required=(%d)", totalWeight, p.minRequiredWeight) + p.log.Debug().Msgf("processed vote with sig len %d, total weight=(%d), required=(%d)", len(vote.SigData), totalWeight, p.minRequiredWeight) if totalWeight < p.minRequiredWeight { return nil } @@ -316,10 +308,15 @@ func buildQCWithPackerAndSigData( return nil, fmt.Errorf("could not pack the block sig data: %w", err) } - return &flow.QuorumCertificate{ + qc, err := flow.NewQuorumCertificate(flow.UntrustedQuorumCertificate{ View: block.View, BlockID: block.BlockID, SignerIndices: signerIndices, SigData: sigData, - }, nil + }) + if err != nil { + return nil, fmt.Errorf("could not build quorum certificate: %w", err) + } + + return qc, nil } diff --git a/consensus/hotstuff/votecollector/combined_vote_processor_v2_test.go b/consensus/hotstuff/votecollector/combined_vote_processor_v2_test.go index 63ee234d68a..cbb2b488d9e 100644 --- a/consensus/hotstuff/votecollector/combined_vote_processor_v2_test.go +++ b/consensus/hotstuff/votecollector/combined_vote_processor_v2_test.go @@ -5,12 +5,13 @@ import ( "math/rand" "sync" "testing" - "time" + "github.com/onflow/crypto" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" "go.uber.org/atomic" + "golang.org/x/exp/slices" "pgregory.net/rapid" bootstrapDKG "github.com/onflow/flow-go/cmd/bootstrap/dkg" @@ -19,18 +20,17 @@ import ( "github.com/onflow/flow-go/consensus/hotstuff/helper" mockhotstuff "github.com/onflow/flow-go/consensus/hotstuff/mocks" "github.com/onflow/flow-go/consensus/hotstuff/model" + "github.com/onflow/flow-go/consensus/hotstuff/safetyrules" "github.com/onflow/flow-go/consensus/hotstuff/signature" hsig "github.com/onflow/flow-go/consensus/hotstuff/signature" hotstuffvalidator "github.com/onflow/flow-go/consensus/hotstuff/validator" "github.com/onflow/flow-go/consensus/hotstuff/verification" - "github.com/onflow/flow-go/crypto" "github.com/onflow/flow-go/model/encodable" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/module/local" modulemock "github.com/onflow/flow-go/module/mock" msig "github.com/onflow/flow-go/module/signature" "github.com/onflow/flow-go/state/protocol/inmem" - "github.com/onflow/flow-go/state/protocol/seed" storagemock "github.com/onflow/flow-go/storage/mock" "github.com/onflow/flow-go/utils/unittest" ) @@ -58,7 +58,7 @@ func (s *CombinedVoteProcessorV2TestSuite) SetupTest() { s.reconstructor = &mockhotstuff.RandomBeaconReconstructor{} s.packer = &mockhotstuff.Packer{} - s.proposal = helper.MakeProposal() + s.proposal = helper.MakeSignedProposal() s.minRequiredShares = 9 // we require 9 RB shares to reconstruct signature s.rbSharesTotal = 0 @@ -120,7 +120,7 @@ func (s *CombinedVoteProcessorV2TestSuite) TestProcess_InvalidSignatureFormat() rapid.Check(s.T(), func(t *rapid.T) { // create a signature with invalid length vote := unittest.VoteForBlockFixture(s.proposal.Block, func(vote *model.Vote) { - vote.SigData = unittest.RandomBytes(generator.Draw(t, "sig-size").(int)) + vote.SigData = unittest.RandomBytes(generator.Draw(t, "sig-size")) }) err := s.processor.Process(vote) require.Error(s.T(), err) @@ -436,8 +436,8 @@ func TestCombinedVoteProcessorV2_PropertyCreatingQCCorrectness(testifyT *testing rapid.Check(testifyT, func(t *rapid.T) { // draw participants in range 1 <= participants <= maxParticipants - participants := rapid.Uint64Range(1, maxParticipants).Draw(t, "participants").(uint64) - beaconSignersCount := rapid.Uint64Range(participants/2+1, participants).Draw(t, "beaconSigners").(uint64) + participants := rapid.Uint64Range(1, maxParticipants).Draw(t, "participants") + beaconSignersCount := rapid.Uint64Range(participants/2+1, participants).Draw(t, "beaconSigners") stakingSignersCount := participants - beaconSignersCount require.Equal(t, participants, stakingSignersCount+beaconSignersCount) @@ -597,7 +597,6 @@ func TestCombinedVoteProcessorV2_PropertyCreatingQCCorrectness(testifyT *testing } // shuffle votes in random order - rand.Seed(time.Now().UnixNano()) rand.Shuffle(len(votes), func(i, j int) { votes[i], votes[j] = votes[j], votes[i] }) @@ -641,21 +640,21 @@ func TestCombinedVoteProcessorV2_PropertyCreatingQCCorrectness(testifyT *testing func TestCombinedVoteProcessorV2_PropertyCreatingQCLiveness(testifyT *testing.T) { rapid.Check(testifyT, func(t *rapid.T) { // draw beacon signers in range 1 <= beaconSignersCount <= 53 - beaconSignersCount := rapid.Uint64Range(1, 53).Draw(t, "beaconSigners").(uint64) + beaconSignersCount := rapid.Uint64Range(1, 53).Draw(t, "beaconSigners") // draw staking signers in range 0 <= stakingSignersCount <= 10 - stakingSignersCount := rapid.Uint64Range(0, 10).Draw(t, "stakingSigners").(uint64) + stakingSignersCount := rapid.Uint64Range(0, 10).Draw(t, "stakingSigners") stakingWeightRange, beaconWeightRange := rapid.Uint64Range(1, 10), rapid.Uint64Range(1, 10) minRequiredWeight := uint64(0) // draw weight for each signer randomly stakingSigners := unittest.IdentityListFixture(int(stakingSignersCount), func(identity *flow.Identity) { - identity.Weight = stakingWeightRange.Draw(t, identity.String()).(uint64) - minRequiredWeight += identity.Weight + identity.InitialWeight = stakingWeightRange.Draw(t, identity.String()) + minRequiredWeight += identity.InitialWeight }) beaconSigners := unittest.IdentityListFixture(int(beaconSignersCount), func(identity *flow.Identity) { - identity.Weight = beaconWeightRange.Draw(t, identity.String()).(uint64) - minRequiredWeight += identity.Weight + identity.InitialWeight = beaconWeightRange.Draw(t, identity.String()) + minRequiredWeight += identity.InitialWeight }) // proposing block @@ -728,14 +727,14 @@ func TestCombinedVoteProcessorV2_PropertyCreatingQCLiveness(testifyT *testing.T) // prepare votes for _, signer := range stakingSigners { vote := unittest.VoteForBlockFixture(processor.Block(), VoteWithStakingSig()) - vote.SignerID = signer.ID() - expectStakingAggregatorCalls(vote, signer.Weight) + vote.SignerID = signer.NodeID + expectStakingAggregatorCalls(vote, signer.InitialWeight) votes = append(votes, vote) } for _, signer := range beaconSigners { vote := unittest.VoteForBlockFixture(processor.Block(), VoteWithDoubleSig()) - vote.SignerID = signer.ID() - expectStakingAggregatorCalls(vote, signer.Weight) + vote.SignerID = signer.NodeID + expectStakingAggregatorCalls(vote, signer.InitialWeight) expectedSig := crypto.Signature(vote.SigData[msig.SigLen:]) reconstructor.On("Verify", vote.SignerID, expectedSig).Return(nil).Maybe() reconstructor.On("TrustedAdd", vote.SignerID, expectedSig).Run(func(args mock.Arguments) { @@ -745,7 +744,6 @@ func TestCombinedVoteProcessorV2_PropertyCreatingQCLiveness(testifyT *testing.T) } // shuffle votes in random order - rand.Seed(time.Now().UnixNano()) rand.Shuffle(len(votes), func(i, j int) { votes[i], votes[j] = votes[j], votes[i] }) @@ -786,7 +784,7 @@ func TestCombinedVoteProcessorV2_BuildVerifyQC(t *testing.T) { epochCounter := uint64(3) epochLookup := &modulemock.EpochLookup{} view := uint64(20) - epochLookup.On("EpochForViewWithFallback", view).Return(epochCounter, nil) + epochLookup.On("EpochForView", view).Return(epochCounter, nil) // all committee members run DKG dkgData, err := bootstrapDKG.RandomBeaconKG(11, unittest.RandomBytes(32)) @@ -800,6 +798,7 @@ func TestCombinedVoteProcessorV2_BuildVerifyQC(t *testing.T) { stakingSigners := unittest.IdentityListFixture(3) beaconSigners := unittest.IdentityListFixture(8) allIdentities := append(stakingSigners, beaconSigners...) + slices.SortFunc(allIdentities, flow.Canonical[flow.Identity]) // sort in place to avoid taking a copy. require.Equal(t, len(dkgData.PubKeyShares), len(allIdentities)) dkgParticipants := make(map[flow.Identifier]flow.DKGParticipant) // fill dkg participants data @@ -815,57 +814,61 @@ func TestCombinedVoteProcessorV2_BuildVerifyQC(t *testing.T) { identity.StakingPubKey = stakingPriv.PublicKey() keys := &storagemock.SafeBeaconKeys{} - // there is no DKG key for this epoch + // there is no Random Beacon key for this epoch keys.On("RetrieveMyBeaconPrivateKey", epochCounter).Return(nil, false, nil) beaconSignerStore := hsig.NewEpochAwareRandomBeaconKeyStore(epochLookup, keys) - me, err := local.New(identity, stakingPriv) + me, err := local.New(identity.IdentitySkeleton, stakingPriv) require.NoError(t, err) signers[identity.NodeID] = verification.NewCombinedSigner(me, beaconSignerStore) } + dkgIndexMap := make(flow.DKGIndexMap) for _, identity := range beaconSigners { stakingPriv := unittest.StakingPrivKeyFixture() identity.StakingPubKey = stakingPriv.PublicKey() participantData := dkgParticipants[identity.NodeID] - dkgKey := encodable.RandomBeaconPrivKey{ PrivateKey: dkgData.PrivKeyShares[participantData.Index], } + dkgIndexMap[identity.NodeID] = int(participantData.Index) keys := &storagemock.SafeBeaconKeys{} - // there is DKG key for this epoch + // there is Random Beacon key for this epoch keys.On("RetrieveMyBeaconPrivateKey", epochCounter).Return(dkgKey, true, nil) beaconSignerStore := hsig.NewEpochAwareRandomBeaconKeyStore(epochLookup, keys) - me, err := local.New(identity, stakingPriv) + me, err := local.New(identity.IdentitySkeleton, stakingPriv) require.NoError(t, err) signers[identity.NodeID] = verification.NewCombinedSigner(me, beaconSignerStore) } - leader := stakingSigners[0] + leader := allIdentities[0] block := helper.MakeBlock(helper.WithBlockView(view), - helper.WithBlockProposer(leader.NodeID)) - - inmemDKG, err := inmem.DKGFromEncodable(inmem.EncodableDKG{ - GroupKey: encodable.RandomBeaconPubKey{ - PublicKey: dkgData.PubGroupKey, - }, - Participants: dkgParticipants, + helper.WithBlockProposer(leader.NodeID), + helper.WithBlockQC(helper.MakeQC(helper.WithQCView(view-1)))) + proposal := helper.MakeProposal(helper.WithBlock(block)) + + inmemDKG := inmem.NewDKG(nil, &flow.EpochCommit{ + DKGGroupKey: dkgData.PubGroupKey, + DKGParticipantKeys: dkgData.PubKeyShares, + DKGIndexMap: dkgIndexMap, }) - require.NoError(t, err) committee := &mockhotstuff.DynamicCommittee{} - committee.On("QuorumThresholdForView", mock.Anything).Return(committees.WeightThresholdToBuildQC(allIdentities.TotalWeight()), nil) - committee.On("IdentitiesByEpoch", block.View).Return(allIdentities, nil) + committee.On("LeaderForView", block.View).Return(leader.NodeID, nil).Maybe() + committee.On("QuorumThresholdForView", mock.Anything).Return(committees.WeightThresholdToBuildQC(allIdentities.ToSkeleton().TotalWeight()), nil) + committee.On("IdentitiesByEpoch", block.View).Return(allIdentities.ToSkeleton(), nil) committee.On("IdentitiesByBlock", block.BlockID).Return(allIdentities, nil) + committee.On("IdentityByBlock", block.BlockID, leader.NodeID).Return(leader, nil) committee.On("DKG", block.View).Return(inmemDKG, nil) + committee.On("Self").Return(leader.NodeID) votes := make([]*model.Vote, 0, len(allIdentities)) @@ -878,7 +881,19 @@ func TestCombinedVoteProcessorV2_BuildVerifyQC(t *testing.T) { } // create and sign proposal - proposal, err := signers[leader.NodeID].CreateProposal(block) + persist := mockhotstuff.NewPersister(t) + safetyData := &hotstuff.SafetyData{ + LockedOneChainView: block.View - 1, + HighestAcknowledgedView: block.View - 1, + } + persist.On("GetSafetyData", mock.Anything).Return(safetyData, nil).Once() + persist.On("PutSafetyData", mock.Anything).Return(nil) + safetyRules, err := safetyrules.New(signers[leader.NodeID], persist, committee) + require.NoError(t, err) + vote, err := safetyRules.SignOwnProposal(proposal) + require.NoError(t, err) + signedProposal := helper.MakeSignedProposal(helper.WithProposal(proposal), helper.WithSigData(vote.SigData)) + require.NoError(t, err) qcCreated := false @@ -897,7 +912,7 @@ func TestCombinedVoteProcessorV2_BuildVerifyQC(t *testing.T) { } voteProcessorFactory := NewCombinedVoteProcessorFactory(committee, onQCCreated) - voteProcessor, err := voteProcessorFactory.Create(unittest.Logger(), proposal) + voteProcessor, err := voteProcessorFactory.Create(unittest.Logger(), signedProposal) require.NoError(t, err) // process votes by new leader, this will result in producing new QC @@ -942,16 +957,16 @@ func TestReadRandomSourceFromPackedQCV2(t *testing.T) { // create a packer committee := &mockhotstuff.DynamicCommittee{} committee.On("IdentitiesByBlock", block.BlockID).Return(allSigners, nil) - committee.On("IdentitiesByEpoch", block.View).Return(allSigners, nil) + committee.On("IdentitiesByEpoch", block.View).Return(allSigners.ToSkeleton(), nil) packer := signature.NewConsensusSigDataPacker(committee) qc, err := buildQCWithPackerAndSigData(packer, block, blockSigData) require.NoError(t, err) - randomSource, err := seed.FromParentQCSignature(qc.SigData) + randomSource, err := model.BeaconSignature(qc) require.NoError(t, err) - randomSourceAgain, err := seed.FromParentQCSignature(qc.SigData) + randomSourceAgain, err := model.BeaconSignature(qc) require.NoError(t, err) // verify the random source is deterministic diff --git a/consensus/hotstuff/votecollector/combined_vote_processor_v3.go b/consensus/hotstuff/votecollector/combined_vote_processor_v3.go index 1a2bdf72fee..e47234421be 100644 --- a/consensus/hotstuff/votecollector/combined_vote_processor_v3.go +++ b/consensus/hotstuff/votecollector/combined_vote_processor_v3.go @@ -4,6 +4,7 @@ import ( "errors" "fmt" + "github.com/onflow/crypto" "github.com/rs/zerolog" "go.uber.org/atomic" @@ -11,10 +12,11 @@ import ( "github.com/onflow/flow-go/consensus/hotstuff/model" "github.com/onflow/flow-go/consensus/hotstuff/signature" "github.com/onflow/flow-go/consensus/hotstuff/verification" - "github.com/onflow/flow-go/crypto" "github.com/onflow/flow-go/model/encoding" "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module/irrecoverable" msig "github.com/onflow/flow-go/module/signature" + "github.com/onflow/flow-go/state/protocol" ) /* **************** Base-Factory for CombinedVoteProcessors ***************** */ @@ -47,39 +49,48 @@ func (f *combinedVoteProcessorFactoryBaseV3) Create(log zerolog.Logger, block *m // message that has to be verified against aggregated signature msg := verification.MakeVoteMessage(block.View, block.BlockID) - // prepare the staking public keys of participants - stakingKeys := make([]crypto.PublicKey, 0, len(allParticipants)) - for _, participant := range allParticipants { - stakingKeys = append(stakingKeys, participant.StakingPubKey) - } - - stakingSigAggtor, err := signature.NewWeightedSignatureAggregator(allParticipants, stakingKeys, msg, msig.ConsensusVoteTag) - if err != nil { - return nil, fmt.Errorf("could not create aggregator for staking signatures: %w", err) - } - dkg, err := f.committee.DKG(block.View) if err != nil { return nil, fmt.Errorf("could not get DKG info at block %v: %w", block.BlockID, err) } - // prepare the random beacon public keys of participants + // Prepare the staking public keys of participants. + // CAUTION: while every participant must have a staking key (hence len(allParticipants) == len(stakingKeys)) + // some consensus nodes might not be part of the Random Beacon Committee. + // - We use 𝒫 as shorthand notation of `allParticipants`, which is the set of all nodes that are authorized to vote for `block`. + // - The DKG committee 𝒟 is the set of parties that were authorized to participate in the DKG (happy path; or + // eligible to receive a private key share from an alternative source on the fallback path). + // With 𝓑 we denote the subset 𝓑 := (𝒟 ∩ 𝒫), i.e. all nodes that are authorized to vote for `block` _and_ are part of the + // DKG committee. Only for nodes ρ ∈ 𝓑, the method `dkg.KeyShare(ρ.NodeID)` will return a public key. Note that there might + // not exist a private key for ρ (e.g. if ρ failed the DKG), but `dkg.KeyShare(ρ.NodeID)` nevertheless returns a key. + stakingKeys := make([]crypto.PublicKey, 0, len(allParticipants)) + beaconParticipants := make(flow.IdentityList, 0, len(allParticipants)) beaconKeys := make([]crypto.PublicKey, 0, len(allParticipants)) for _, participant := range allParticipants { - pk, err := dkg.KeyShare(participant.NodeID) + stakingKeys = append(stakingKeys, participant.StakingPubKey) // all nodes have staking keys + pk, err := dkg.KeyShare(participant.NodeID) // but only a subset of nodes might have random beacon keys if err != nil { - return nil, fmt.Errorf("could not get random beacon key share for %x: %w", participant.NodeID, err) + if protocol.IsIdentityNotFound(err) { + continue + } + return nil, irrecoverable.NewException(fmt.Errorf("unexpected error retrieving random beacon key share for node %v: %w", participant.NodeID, err)) } + beaconParticipants = append(beaconParticipants, participant) beaconKeys = append(beaconKeys, pk) } - rbSigAggtor, err := signature.NewWeightedSignatureAggregator(allParticipants, beaconKeys, msg, msig.RandomBeaconTag) + stakingSigAggtor, err := signature.NewWeightedSignatureAggregator(allParticipants, stakingKeys, msg, msig.ConsensusVoteTag) + if err != nil { + return nil, fmt.Errorf("could not create aggregator for staking signatures: %w", err) + } + + beaconAggregator, err := signature.NewWeightedSignatureAggregator(beaconParticipants, beaconKeys, msg, msig.RandomBeaconTag) if err != nil { - return nil, fmt.Errorf("could not create aggregator for thershold signatures: %w", err) + return nil, fmt.Errorf("could not create aggregator for threshold signatures: %w", err) } threshold := msig.RandomBeaconThreshold(int(dkg.Size())) - randomBeaconInspector, err := signature.NewRandomBeaconInspector(dkg.GroupKey(), beaconKeys, threshold, msg) + randomBeaconInspector, err := signature.NewRandomBeaconInspector(dkg.GroupKey(), dkg.KeyShares(), threshold, msg) if err != nil { return nil, fmt.Errorf("could not create random beacon inspector: %w", err) } @@ -94,7 +105,7 @@ func (f *combinedVoteProcessorFactoryBaseV3) Create(log zerolog.Logger, block *m log: log.With().Hex("block_id", block.BlockID[:]).Logger(), block: block, stakingSigAggtor: stakingSigAggtor, - rbSigAggtor: rbSigAggtor, + rbSigAggtor: beaconAggregator, rbRector: rbRector, onQCCreated: f.onQCCreated, packer: f.packer, @@ -313,10 +324,15 @@ func (p *CombinedVoteProcessorV3) buildQC() (*flow.QuorumCertificate, error) { return nil, fmt.Errorf("could not pack the block sig data: %w", err) } - return &flow.QuorumCertificate{ + qc, err := flow.NewQuorumCertificate(flow.UntrustedQuorumCertificate{ View: p.block.View, BlockID: p.block.BlockID, SignerIndices: signerIndices, SigData: sigData, - }, nil + }) + if err != nil { + return nil, fmt.Errorf("could not build quorum certificate: %w", err) + } + + return qc, nil } diff --git a/consensus/hotstuff/votecollector/combined_vote_processor_v3_test.go b/consensus/hotstuff/votecollector/combined_vote_processor_v3_test.go index e3d370dfb4f..6afcf56392a 100644 --- a/consensus/hotstuff/votecollector/combined_vote_processor_v3_test.go +++ b/consensus/hotstuff/votecollector/combined_vote_processor_v3_test.go @@ -3,10 +3,11 @@ package votecollector import ( "errors" "math/rand" + "slices" "sync" "testing" - "time" + "github.com/onflow/crypto" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" @@ -22,13 +23,12 @@ import ( hsig "github.com/onflow/flow-go/consensus/hotstuff/signature" hotstuffvalidator "github.com/onflow/flow-go/consensus/hotstuff/validator" "github.com/onflow/flow-go/consensus/hotstuff/verification" - "github.com/onflow/flow-go/crypto" "github.com/onflow/flow-go/model/encodable" "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/model/flow/filter" "github.com/onflow/flow-go/module/local" modulemock "github.com/onflow/flow-go/module/mock" "github.com/onflow/flow-go/module/signature" - "github.com/onflow/flow-go/state/protocol/inmem" storagemock "github.com/onflow/flow-go/storage/mock" "github.com/onflow/flow-go/utils/unittest" ) @@ -59,7 +59,7 @@ func (s *CombinedVoteProcessorV3TestSuite) SetupTest() { s.rbSigAggregator = &mockhotstuff.WeightedSignatureAggregator{} s.reconstructor = &mockhotstuff.RandomBeaconReconstructor{} s.packer = &mockhotstuff.Packer{} - s.proposal = helper.MakeProposal() + s.proposal = helper.MakeSignedProposal() s.minRequiredShares = 9 // we require 9 RB shares to reconstruct signature s.thresholdTotalWeight, s.rbSharesTotal = atomic.Uint64{}, atomic.Uint64{} @@ -435,8 +435,8 @@ func TestCombinedVoteProcessorV3_PropertyCreatingQCCorrectness(testifyT *testing rapid.Check(testifyT, func(t *rapid.T) { // draw participants in range 1 <= participants <= maxParticipants - participants := rapid.Uint64Range(1, maxParticipants).Draw(t, "participants").(uint64) - beaconSignersCount := rapid.Uint64Range(participants/2+1, participants).Draw(t, "beaconSigners").(uint64) + participants := rapid.Uint64Range(1, maxParticipants).Draw(t, "participants") + beaconSignersCount := rapid.Uint64Range(participants/2+1, participants).Draw(t, "beaconSigners") stakingSignersCount := participants - beaconSignersCount require.Equal(t, participants, stakingSignersCount+beaconSignersCount) @@ -647,7 +647,6 @@ func TestCombinedVoteProcessorV3_PropertyCreatingQCCorrectness(testifyT *testing } // shuffle votes in random order - rand.Seed(time.Now().UnixNano()) rand.Shuffle(len(votes), func(i, j int) { votes[i], votes[j] = votes[j], votes[i] }) @@ -751,21 +750,21 @@ func TestCombinedVoteProcessorV3_OnlyRandomBeaconSigners(testifyT *testing.T) { func TestCombinedVoteProcessorV3_PropertyCreatingQCLiveness(testifyT *testing.T) { rapid.Check(testifyT, func(t *rapid.T) { // draw beacon signers in range 1 <= beaconSignersCount <= 53 - beaconSignersCount := rapid.Uint64Range(1, 53).Draw(t, "beaconSigners").(uint64) + beaconSignersCount := rapid.Uint64Range(1, 53).Draw(t, "beaconSigners") // draw staking signers in range 0 <= stakingSignersCount <= 10 - stakingSignersCount := rapid.Uint64Range(0, 10).Draw(t, "stakingSigners").(uint64) + stakingSignersCount := rapid.Uint64Range(0, 10).Draw(t, "stakingSigners") stakingWeightRange, beaconWeightRange := rapid.Uint64Range(1, 10), rapid.Uint64Range(1, 10) minRequiredWeight := uint64(0) // draw weight for each signer randomly stakingSigners := unittest.IdentityListFixture(int(stakingSignersCount), func(identity *flow.Identity) { - identity.Weight = stakingWeightRange.Draw(t, identity.String()).(uint64) - minRequiredWeight += identity.Weight + identity.InitialWeight = stakingWeightRange.Draw(t, identity.String()) + minRequiredWeight += identity.InitialWeight }) beaconSigners := unittest.IdentityListFixture(int(beaconSignersCount), func(identity *flow.Identity) { - identity.Weight = beaconWeightRange.Draw(t, identity.String()).(uint64) - minRequiredWeight += identity.Weight + identity.InitialWeight = beaconWeightRange.Draw(t, identity.String()) + minRequiredWeight += identity.InitialWeight }) // proposing block @@ -855,8 +854,8 @@ func TestCombinedVoteProcessorV3_PropertyCreatingQCLiveness(testifyT *testing.T) // prepare votes for _, signer := range stakingSigners { vote := unittest.VoteForBlockFixture(processor.Block(), unittest.VoteWithStakingSig()) - vote.SignerID = signer.ID() - weight := signer.Weight + vote.SignerID = signer.NodeID + weight := signer.InitialWeight expectedSig := crypto.Signature(vote.SigData[1:]) stakingAggregator.On("Verify", vote.SignerID, expectedSig).Return(nil).Maybe() stakingAggregator.On("TrustedAdd", vote.SignerID, expectedSig).Run(func(args mock.Arguments) { @@ -866,8 +865,8 @@ func TestCombinedVoteProcessorV3_PropertyCreatingQCLiveness(testifyT *testing.T) } for _, signer := range beaconSigners { vote := unittest.VoteForBlockFixture(processor.Block(), unittest.VoteWithBeaconSig()) - vote.SignerID = signer.ID() - weight := signer.Weight + vote.SignerID = signer.NodeID + weight := signer.InitialWeight expectedSig := crypto.Signature(vote.SigData[1:]) rbSigAggregator.On("Verify", vote.SignerID, expectedSig).Return(nil).Maybe() rbSigAggregator.On("TrustedAdd", vote.SignerID, expectedSig).Run(func(args mock.Arguments) { @@ -880,7 +879,6 @@ func TestCombinedVoteProcessorV3_PropertyCreatingQCLiveness(testifyT *testing.T) } // shuffle votes in random order - rand.Seed(time.Now().UnixNano()) rand.Shuffle(len(votes), func(i, j int) { votes[i], votes[j] = votes[j], votes[i] }) @@ -921,24 +919,33 @@ func TestCombinedVoteProcessorV3_PropertyCreatingQCLiveness(testifyT *testing.T) func TestCombinedVoteProcessorV3_BuildVerifyQC(t *testing.T) { epochCounter := uint64(3) epochLookup := &modulemock.EpochLookup{} - view := uint64(20) - epochLookup.On("EpochForViewWithFallback", view).Return(epochCounter, nil) + proposerView := uint64(20) + epochLookup.On("EpochForView", proposerView).Return(epochCounter, nil) - dkgData, err := bootstrapDKG.RandomBeaconKG(11, unittest.RandomBytes(32)) + dkgData, err := bootstrapDKG.RandomBeaconKG(9, unittest.RandomBytes(32)) require.NoError(t, err) // signers hold objects that are created with private key and can sign votes and proposals signers := make(map[flow.Identifier]*verification.CombinedSignerV3) - // prepare staking signers, each signer has it's own private/public key pair - // stakingSigners sign only with staking key, meaning they have failed DKG + // prepare consensus committee: + // * 3 signers that have the staking key but have failed DKG and don't have Random Beacon key + // * 8 signers that have the staking key and have the Random Beacon key + // * 1 signer that was ejected from the committee but still took part in DKG. + // Total consensus committee is 11. + // Total random beacon committee is 9. + // This way both random beacon committee and consensus committee have nodes that are not part of the other committee + // therefore forming a symmetric difference. stakingSigners := unittest.IdentityListFixture(3) - beaconSigners := unittest.IdentityListFixture(8) - allIdentities := append(stakingSigners, beaconSigners...) - require.Equal(t, len(dkgData.PubKeyShares), len(allIdentities)) + beaconAndStakingSigners := unittest.IdentityListFixture(8) + onlyBeaconSigner := unittest.IdentityFixture() + beaconSigners := append(beaconAndStakingSigners, onlyBeaconSigner).Sort(flow.Canonical[flow.Identity]) + allIdentities := append(stakingSigners, beaconAndStakingSigners...) + slices.SortFunc(allIdentities, flow.Canonical[flow.Identity]) // sort in place to avoid taking a copy. + require.Equal(t, len(dkgData.PubKeyShares), len(beaconSigners), + "require the most general case: consensus and random beacon committees form a symmetric difference") dkgParticipants := make(map[flow.Identifier]flow.DKGParticipant) - // fill dkg participants data - for index, identity := range allIdentities { + for index, identity := range beaconSigners { dkgParticipants[identity.NodeID] = flow.DKGParticipant{ Index: uint(index), KeyShare: dkgData.PubKeyShares[index], @@ -950,18 +957,18 @@ func TestCombinedVoteProcessorV3_BuildVerifyQC(t *testing.T) { identity.StakingPubKey = stakingPriv.PublicKey() keys := &storagemock.SafeBeaconKeys{} - // there is no DKG key for this epoch + // there is no Random Beacon key for this epoch keys.On("RetrieveMyBeaconPrivateKey", epochCounter).Return(nil, false, nil) beaconSignerStore := hsig.NewEpochAwareRandomBeaconKeyStore(epochLookup, keys) - me, err := local.New(identity, stakingPriv) + me, err := local.New(identity.IdentitySkeleton, stakingPriv) require.NoError(t, err) signers[identity.NodeID] = verification.NewCombinedSignerV3(me, beaconSignerStore) } - for _, identity := range beaconSigners { + for _, identity := range beaconAndStakingSigners { stakingPriv := unittest.StakingPrivKeyFixture() identity.StakingPubKey = stakingPriv.PublicKey() @@ -972,49 +979,37 @@ func TestCombinedVoteProcessorV3_BuildVerifyQC(t *testing.T) { } keys := &storagemock.SafeBeaconKeys{} - // there is DKG key for this epoch + // there is Random Beacon key for this epoch keys.On("RetrieveMyBeaconPrivateKey", epochCounter).Return(dkgKey, true, nil) beaconSignerStore := hsig.NewEpochAwareRandomBeaconKeyStore(epochLookup, keys) - me, err := local.New(identity, stakingPriv) + me, err := local.New(identity.IdentitySkeleton, stakingPriv) require.NoError(t, err) signers[identity.NodeID] = verification.NewCombinedSignerV3(me, beaconSignerStore) } leader := stakingSigners[0] + block := helper.MakeBlock(helper.WithBlockView(proposerView), helper.WithBlockProposer(leader.NodeID)) - block := helper.MakeBlock(helper.WithBlockView(view), - helper.WithBlockProposer(leader.NodeID)) - - inmemDKG, err := inmem.DKGFromEncodable(inmem.EncodableDKG{ - GroupKey: encodable.RandomBeaconPubKey{ - PublicKey: dkgData.PubGroupKey, - }, - Participants: dkgParticipants, - }) + committee, err := committees.NewStaticCommittee(allIdentities, flow.ZeroID, dkgParticipants, dkgData.PubGroupKey) require.NoError(t, err) - committee := &mockhotstuff.DynamicCommittee{} - committee.On("IdentitiesByBlock", block.BlockID).Return(allIdentities, nil) - committee.On("IdentitiesByEpoch", block.View).Return(allIdentities, nil) - committee.On("QuorumThresholdForView", mock.Anything).Return(committees.WeightThresholdToBuildQC(allIdentities.TotalWeight()), nil) - committee.On("DKG", block.View).Return(inmemDKG, nil) - votes := make([]*model.Vote, 0, len(allIdentities)) // first staking signer will be leader collecting votes for proposal // prepare votes for every member of committee except leader - for _, signer := range allIdentities[1:] { + for _, signer := range allIdentities.Filter(filter.Not(filter.HasNodeID[flow.Identity](leader.NodeID))) { vote, err := signers[signer.NodeID].CreateVote(block) require.NoError(t, err) votes = append(votes, vote) } // create and sign proposal - proposal, err := signers[leader.NodeID].CreateProposal(block) + leaderVote, err := signers[leader.NodeID].CreateVote(block) require.NoError(t, err) + proposal := helper.MakeSignedProposal(helper.WithProposal(helper.MakeProposal(helper.WithBlock(block))), helper.WithSigData(leaderVote.SigData)) qcCreated := false onQCCreated := func(qc *flow.QuorumCertificate) { diff --git a/consensus/hotstuff/votecollector/factory.go b/consensus/hotstuff/votecollector/factory.go index 2c515fc052c..29a5ef00abe 100644 --- a/consensus/hotstuff/votecollector/factory.go +++ b/consensus/hotstuff/votecollector/factory.go @@ -16,7 +16,7 @@ import ( // CAUTION: the baseFactory creates the VerifyingVoteProcessor for the given block. It // does _not_ check the proposer's vote for its own block. The API reflects this by // expecting a `model.Block` as input (which does _not_ contain the proposer vote) as -// opposed to `model.Proposal` (combines block with proposer's vote). +// opposed to `model.SignedProposal` (combines block with proposer's vote). // Therefore, baseFactory does _not_ implement `hotstuff.VoteProcessorFactory` by itself. // The VoteProcessorFactory adds the missing logic to verify the proposer's vote, by // wrapping the baseFactory (decorator pattern). @@ -40,13 +40,18 @@ var _ hotstuff.VoteProcessorFactory = (*VoteProcessorFactory)(nil) // A VerifyingVoteProcessor are only created for proposals with valid proposer votes. // Expected error returns during normal operations: // * model.InvalidProposalError - proposal has invalid proposer vote -func (f *VoteProcessorFactory) Create(log zerolog.Logger, proposal *model.Proposal) (hotstuff.VerifyingVoteProcessor, error) { +func (f *VoteProcessorFactory) Create(log zerolog.Logger, proposal *model.SignedProposal) (hotstuff.VerifyingVoteProcessor, error) { processor, err := f.baseFactory(log, proposal.Block) if err != nil { return nil, fmt.Errorf("instantiating vote processor for block %v failed: %w", proposal.Block.BlockID, err) } - err = processor.Process(proposal.ProposerVote()) + vote, err := proposal.ProposerVote() + if err != nil { + return nil, fmt.Errorf("could not get vote from proposer vote: %w", err) + } + + err = processor.Process(vote) if err != nil { if model.IsInvalidVoteError(err) { return nil, model.NewInvalidProposalErrorf(proposal, "invalid proposer vote: %w", err) diff --git a/consensus/hotstuff/votecollector/factory_test.go b/consensus/hotstuff/votecollector/factory_test.go index 9adeaef98f8..7102967e956 100644 --- a/consensus/hotstuff/votecollector/factory_test.go +++ b/consensus/hotstuff/votecollector/factory_test.go @@ -19,9 +19,11 @@ import ( func TestVoteProcessorFactory_CreateWithValidProposal(t *testing.T) { mockedFactory := mockhotstuff.VoteProcessorFactory{} - proposal := helper.MakeProposal() + proposal := helper.MakeSignedProposal() mockedProcessor := &mockhotstuff.VerifyingVoteProcessor{} - mockedProcessor.On("Process", proposal.ProposerVote()).Return(nil).Once() + vote, err := proposal.ProposerVote() + require.NoError(t, err) + mockedProcessor.On("Process", vote).Return(nil).Once() mockedFactory.On("Create", unittest.Logger(), proposal).Return(mockedProcessor, nil).Once() voteProcessorFactory := &VoteProcessorFactory{ @@ -44,9 +46,11 @@ func TestVoteProcessorFactory_CreateWithInvalidVote(t *testing.T) { mockedFactory := mockhotstuff.VoteProcessorFactory{} t.Run("invalid-vote", func(t *testing.T) { - proposal := helper.MakeProposal() + proposal := helper.MakeSignedProposal() mockedProcessor := &mockhotstuff.VerifyingVoteProcessor{} - mockedProcessor.On("Process", proposal.ProposerVote()).Return(model.NewInvalidVoteErrorf(proposal.ProposerVote(), "")).Once() + vote, err := proposal.ProposerVote() + require.NoError(t, err) + mockedProcessor.On("Process", vote).Return(model.NewInvalidVoteErrorf(vote, "")).Once() mockedFactory.On("Create", unittest.Logger(), proposal).Return(mockedProcessor, nil).Once() voteProcessorFactory := &VoteProcessorFactory{ @@ -63,10 +67,12 @@ func TestVoteProcessorFactory_CreateWithInvalidVote(t *testing.T) { mockedProcessor.AssertExpectations(t) }) t.Run("process-vote-exception", func(t *testing.T) { - proposal := helper.MakeProposal() + proposal := helper.MakeSignedProposal() mockedProcessor := &mockhotstuff.VerifyingVoteProcessor{} exception := errors.New("process-exception") - mockedProcessor.On("Process", proposal.ProposerVote()).Return(exception).Once() + vote, err := proposal.ProposerVote() + require.NoError(t, err) + mockedProcessor.On("Process", vote).Return(exception).Once() mockedFactory.On("Create", unittest.Logger(), proposal).Return(mockedProcessor, nil).Once() @@ -93,7 +99,7 @@ func TestVoteProcessorFactory_CreateWithInvalidVote(t *testing.T) { func TestVoteProcessorFactory_CreateProcessException(t *testing.T) { mockedFactory := mockhotstuff.VoteProcessorFactory{} - proposal := helper.MakeProposal() + proposal := helper.MakeSignedProposal() exception := errors.New("create-exception") mockedFactory.On("Create", unittest.Logger(), proposal).Return(nil, exception).Once() diff --git a/consensus/hotstuff/votecollector/staking_vote_processor.go b/consensus/hotstuff/votecollector/staking_vote_processor.go index a470d97bc67..cd9814a6d96 100644 --- a/consensus/hotstuff/votecollector/staking_vote_processor.go +++ b/consensus/hotstuff/votecollector/staking_vote_processor.go @@ -4,6 +4,7 @@ import ( "errors" "fmt" + "github.com/onflow/crypto" "github.com/rs/zerolog" "go.uber.org/atomic" @@ -11,7 +12,6 @@ import ( "github.com/onflow/flow-go/consensus/hotstuff/model" "github.com/onflow/flow-go/consensus/hotstuff/signature" "github.com/onflow/flow-go/consensus/hotstuff/verification" - "github.com/onflow/flow-go/crypto" "github.com/onflow/flow-go/model/flow" msig "github.com/onflow/flow-go/module/signature" ) @@ -179,12 +179,17 @@ func (p *StakingVoteProcessor) buildQC() (*flow.QuorumCertificate, error) { return nil, fmt.Errorf("could not encode signer indices: %w", err) } - return &flow.QuorumCertificate{ + qc, err := flow.NewQuorumCertificate(flow.UntrustedQuorumCertificate{ View: p.block.View, BlockID: p.block.BlockID, SignerIndices: signerIndices, SigData: aggregatedStakingSig, - }, nil + }) + if err != nil { + return nil, fmt.Errorf("could not build quorum certificate: %w", err) + } + + return qc, nil } func (p *StakingVoteProcessor) signerIndicesFromIdentities(signerIDs flow.IdentifierList) ([]byte, error) { diff --git a/consensus/hotstuff/votecollector/staking_vote_processor_test.go b/consensus/hotstuff/votecollector/staking_vote_processor_test.go index b6efe8f93c4..1b096419c4d 100644 --- a/consensus/hotstuff/votecollector/staking_vote_processor_test.go +++ b/consensus/hotstuff/votecollector/staking_vote_processor_test.go @@ -5,6 +5,7 @@ import ( "sync" "testing" + "github.com/onflow/crypto" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" @@ -17,7 +18,6 @@ import ( "github.com/onflow/flow-go/consensus/hotstuff/model" hotstuffvalidator "github.com/onflow/flow-go/consensus/hotstuff/validator" "github.com/onflow/flow-go/consensus/hotstuff/verification" - "github.com/onflow/flow-go/crypto" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/module/local" modulemock "github.com/onflow/flow-go/module/mock" @@ -250,8 +250,8 @@ func (s *StakingVoteProcessorTestSuite) TestProcess_ConcurrentCreatingQC() { func TestStakingVoteProcessorV2_BuildVerifyQC(t *testing.T) { epochCounter := uint64(3) epochLookup := &modulemock.EpochLookup{} - view := uint64(20) - epochLookup.On("EpochForViewWithFallback", view).Return(epochCounter, nil) + proposerView := uint64(20) + epochLookup.On("EpochForView", proposerView).Return(epochCounter, nil) // signers hold objects that are created with private key and can sign votes and proposals signers := make(map[flow.Identifier]*verification.StakingSigner) @@ -260,21 +260,19 @@ func TestStakingVoteProcessorV2_BuildVerifyQC(t *testing.T) { stakingPriv := unittest.StakingPrivKeyFixture() identity.StakingPubKey = stakingPriv.PublicKey() - me, err := local.New(identity, stakingPriv) + me, err := local.New(identity.IdentitySkeleton, stakingPriv) require.NoError(t, err) signers[identity.NodeID] = verification.NewStakingSigner(me) - }) + }).Sort(flow.Canonical[flow.Identity]) leader := stakingSigners[0] - - block := helper.MakeBlock(helper.WithBlockView(view), - helper.WithBlockProposer(leader.NodeID)) + block := helper.MakeBlock(helper.WithBlockView(proposerView), helper.WithBlockProposer(leader.NodeID)) committee := &mockhotstuff.DynamicCommittee{} - committee.On("IdentitiesByEpoch", block.View).Return(stakingSigners, nil) + committee.On("IdentitiesByEpoch", block.View).Return(stakingSigners.ToSkeleton(), nil) committee.On("IdentitiesByBlock", block.BlockID).Return(stakingSigners, nil) - committee.On("QuorumThresholdForView", mock.Anything).Return(committees.WeightThresholdToBuildQC(stakingSigners.TotalWeight()), nil) + committee.On("QuorumThresholdForView", mock.Anything).Return(committees.WeightThresholdToBuildQC(stakingSigners.ToSkeleton().TotalWeight()), nil) votes := make([]*model.Vote, 0, len(stakingSigners)) @@ -287,8 +285,10 @@ func TestStakingVoteProcessorV2_BuildVerifyQC(t *testing.T) { } // create and sign proposal - proposal, err := signers[leader.NodeID].CreateProposal(block) + leaderVote, err := signers[leader.NodeID].CreateVote(block) require.NoError(t, err) + proposal := helper.MakeSignedProposal(helper.WithProposal( + helper.MakeProposal(helper.WithBlock(block))), helper.WithSigData(leaderVote.SigData)) qcCreated := false onQCCreated := func(qc *flow.QuorumCertificate) { diff --git a/consensus/hotstuff/votecollector/statemachine.go b/consensus/hotstuff/votecollector/statemachine.go index d62159ea9ef..60558cf2aaf 100644 --- a/consensus/hotstuff/votecollector/statemachine.go +++ b/consensus/hotstuff/votecollector/statemachine.go @@ -18,7 +18,7 @@ var ( ) // VerifyingVoteProcessorFactory generates hotstuff.VerifyingVoteCollector instances -type VerifyingVoteProcessorFactory = func(log zerolog.Logger, proposal *model.Proposal) (hotstuff.VerifyingVoteProcessor, error) +type VerifyingVoteProcessorFactory = func(log zerolog.Logger, proposal *model.SignedProposal) (hotstuff.VerifyingVoteProcessor, error) // VoteCollector implements a state machine for transition between different states of vote collector type VoteCollector struct { @@ -175,7 +175,7 @@ func (m *VoteCollector) View() uint64 { // CachingVotes -> VerifyingVotes // CachingVotes -> Invalid // VerifyingVotes -> Invalid -func (m *VoteCollector) ProcessBlock(proposal *model.Proposal) error { +func (m *VoteCollector) ProcessBlock(proposal *model.SignedProposal) error { if proposal.Block.View != m.View() { return fmt.Errorf("this VoteCollector requires a proposal for view %d but received block %v with view %d", @@ -243,7 +243,7 @@ func (m *VoteCollector) RegisterVoteConsumer(consumer hotstuff.VoteConsumer) { // Error returns: // * ErrDifferentCollectorState if the VoteCollector's state is _not_ `CachingVotes` // * all other errors are unexpected and potential symptoms of internal bugs or state corruption (fatal) -func (m *VoteCollector) caching2Verifying(proposal *model.Proposal) error { +func (m *VoteCollector) caching2Verifying(proposal *model.SignedProposal) error { blockID := proposal.Block.BlockID newProc, err := m.createVerifyingProcessor(m.log, proposal) if err != nil { diff --git a/consensus/hotstuff/votecollector/statemachine_test.go b/consensus/hotstuff/votecollector/statemachine_test.go index 007dcce1fe2..1f6409c3136 100644 --- a/consensus/hotstuff/votecollector/statemachine_test.go +++ b/consensus/hotstuff/votecollector/statemachine_test.go @@ -51,7 +51,7 @@ func (s *StateMachineTestSuite) SetupTest() { s.mockedProcessors = make(map[flow.Identifier]*mocks.VerifyingVoteProcessor) s.notifier = mocks.NewVoteAggregationConsumer(s.T()) - s.factoryMethod = func(log zerolog.Logger, block *model.Proposal) (hotstuff.VerifyingVoteProcessor, error) { + s.factoryMethod = func(log zerolog.Logger, block *model.SignedProposal) (hotstuff.VerifyingVoteProcessor, error) { if processor, found := s.mockedProcessors[block.Block.BlockID]; found { return processor, nil } @@ -64,7 +64,7 @@ func (s *StateMachineTestSuite) SetupTest() { // prepareMockedProcessor prepares a mocked processor and stores it in map, later it will be used // to mock behavior of verifying vote processor. -func (s *StateMachineTestSuite) prepareMockedProcessor(proposal *model.Proposal) *mocks.VerifyingVoteProcessor { +func (s *StateMachineTestSuite) prepareMockedProcessor(proposal *model.SignedProposal) *mocks.VerifyingVoteProcessor { processor := &mocks.VerifyingVoteProcessor{} processor.On("Block").Return(func() *model.Block { return proposal.Block @@ -78,7 +78,7 @@ func (s *StateMachineTestSuite) prepareMockedProcessor(proposal *model.Proposal) // when proposal processing can possibly change state of collector func (s *StateMachineTestSuite) TestStatus_StateTransitions() { block := helper.MakeBlock(helper.WithBlockView(s.view)) - proposal := helper.MakeProposal(helper.WithBlock(block)) + proposal := helper.MakeSignedProposal(helper.WithProposal(helper.MakeProposal(helper.WithBlock(block)))) s.prepareMockedProcessor(proposal) // by default, we should create in caching status @@ -90,9 +90,7 @@ func (s *StateMachineTestSuite) TestStatus_StateTransitions() { require.Equal(s.T(), hotstuff.VoteCollectorStatusVerifying, s.collector.Status()) // after submitting double proposal we should transfer into invalid state - err = s.collector.ProcessBlock(helper.MakeProposal( - helper.WithBlock( - helper.MakeBlock(helper.WithBlockView(s.view))))) + err = s.collector.ProcessBlock(makeSignedProposalWithView(s.view)) require.NoError(s.T(), err) require.Equal(s.T(), hotstuff.VoteCollectorStatusInvalid, s.collector.Status()) } @@ -101,13 +99,14 @@ func (s *StateMachineTestSuite) TestStatus_StateTransitions() { // factory are handed through (potentially wrapped), but are not replaced. func (s *StateMachineTestSuite) Test_FactoryErrorPropagation() { factoryError := errors.New("factory error") - factory := func(log zerolog.Logger, block *model.Proposal) (hotstuff.VerifyingVoteProcessor, error) { + factory := func(log zerolog.Logger, block *model.SignedProposal) (hotstuff.VerifyingVoteProcessor, error) { return nil, factoryError } s.collector.createVerifyingProcessor = factory // failing to create collector has to result in error and won't change state - err := s.collector.ProcessBlock(helper.MakeProposal(helper.WithBlock(helper.MakeBlock(helper.WithBlockView(s.view))))) + proposal := makeSignedProposalWithView(s.view) + err := s.collector.ProcessBlock(proposal) require.ErrorIs(s.T(), err, factoryError) require.Equal(s.T(), hotstuff.VoteCollectorStatusCaching, s.collector.Status()) } @@ -115,8 +114,8 @@ func (s *StateMachineTestSuite) Test_FactoryErrorPropagation() { // TestAddVote_VerifyingState tests that AddVote correctly process valid and invalid votes as well // as repeated, invalid and double votes in verifying state func (s *StateMachineTestSuite) TestAddVote_VerifyingState() { - block := helper.MakeBlock(helper.WithBlockView(s.view)) - proposal := helper.MakeProposal(helper.WithBlock(block)) + proposal := makeSignedProposalWithView(s.view) + block := proposal.Block processor := s.prepareMockedProcessor(proposal) err := s.collector.ProcessBlock(proposal) require.NoError(s.T(), err) @@ -203,8 +202,8 @@ func (s *StateMachineTestSuite) TestAddVote_VerifyingState() { // are sent to vote processor func (s *StateMachineTestSuite) TestProcessBlock_ProcessingOfCachedVotes() { votes := 10 - block := helper.MakeBlock(helper.WithBlockView(s.view)) - proposal := helper.MakeProposal(helper.WithBlock(block)) + proposal := makeSignedProposalWithView(s.view) + block := proposal.Block processor := s.prepareMockedProcessor(proposal) for i := 0; i < votes; i++ { vote := unittest.VoteForBlockFixture(block) @@ -226,11 +225,12 @@ func (s *StateMachineTestSuite) TestProcessBlock_ProcessingOfCachedVotes() { // Test_VoteProcessorErrorPropagation verifies that unexpected errors from the `VoteProcessor` // are propagated up the call stack (potentially wrapped), but are not replaced. func (s *StateMachineTestSuite) Test_VoteProcessorErrorPropagation() { - block := helper.MakeBlock(helper.WithBlockView(s.view)) - proposal := helper.MakeProposal(helper.WithBlock(block)) + proposal := makeSignedProposalWithView(s.view) + block := proposal.Block processor := s.prepareMockedProcessor(proposal) - err := s.collector.ProcessBlock(helper.MakeProposal(helper.WithBlock(block))) + err := s.collector.ProcessBlock(helper.MakeSignedProposal( + helper.WithProposal(helper.MakeProposal(helper.WithBlock(block))))) require.NoError(s.T(), err) unexpectedError := errors.New("some unexpected error") @@ -244,8 +244,8 @@ func (s *StateMachineTestSuite) Test_VoteProcessorErrorPropagation() { // in strict ordering of arrival. func (s *StateMachineTestSuite) RegisterVoteConsumer() { votes := 10 - block := helper.MakeBlock(helper.WithBlockView(s.view)) - proposal := helper.MakeProposal(helper.WithBlock(block)) + proposal := makeSignedProposalWithView(s.view) + block := proposal.Block processor := s.prepareMockedProcessor(proposal) expectedVotes := make([]*model.Vote, 0) for i := 0; i < votes; i++ { @@ -273,3 +273,7 @@ func (s *StateMachineTestSuite) RegisterVoteConsumer() { require.Equal(s.T(), expectedVotes, actualVotes) } + +func makeSignedProposalWithView(view uint64) *model.SignedProposal { + return helper.MakeSignedProposal(helper.WithProposal(helper.MakeProposal(helper.WithBlock(helper.MakeBlock(helper.WithBlockView(view)))))) +} diff --git a/consensus/hotstuff/votecollector/testutil.go b/consensus/hotstuff/votecollector/testutil.go index 26ea9b69547..4c9f2d288e2 100644 --- a/consensus/hotstuff/votecollector/testutil.go +++ b/consensus/hotstuff/votecollector/testutil.go @@ -1,13 +1,13 @@ package votecollector import ( + "github.com/onflow/crypto" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/suite" "github.com/onflow/flow-go/consensus/hotstuff/helper" mockhotstuff "github.com/onflow/flow-go/consensus/hotstuff/mocks" "github.com/onflow/flow-go/consensus/hotstuff/model" - "github.com/onflow/flow-go/crypto" "github.com/onflow/flow-go/model/flow" ) @@ -22,12 +22,12 @@ type VoteProcessorTestSuiteBase struct { stakingAggregator *mockhotstuff.WeightedSignatureAggregator minRequiredWeight uint64 - proposal *model.Proposal + proposal *model.SignedProposal } func (s *VoteProcessorTestSuiteBase) SetupTest() { s.stakingAggregator = &mockhotstuff.WeightedSignatureAggregator{} - s.proposal = helper.MakeProposal() + s.proposal = helper.MakeSignedProposal() // let's assume we have 19 nodes each with weight 100 s.sigWeight = 100 diff --git a/consensus/integration/blockordelay_test.go b/consensus/integration/blockordelay_test.go index e3fde5817f5..fceebc4c1ca 100644 --- a/consensus/integration/blockordelay_test.go +++ b/consensus/integration/blockordelay_test.go @@ -20,13 +20,13 @@ import ( func blockNodesFirstMessages(n uint64, denyList ...*Node) BlockOrDelayFunc { blackList := make(map[flow.Identifier]uint64, len(denyList)) for _, node := range denyList { - blackList[node.id.ID()] = n + blackList[node.id.NodeID] = n } lock := new(sync.Mutex) return func(channel channels.Channel, event interface{}, sender, receiver *Node) (bool, time.Duration) { // filter only consensus messages switch event.(type) { - case *messages.BlockProposal: + case *messages.Proposal: case *messages.BlockVote: case *messages.BlockResponse: case *messages.TimeoutObject: @@ -35,9 +35,9 @@ func blockNodesFirstMessages(n uint64, denyList ...*Node) BlockOrDelayFunc { } lock.Lock() defer lock.Unlock() - count, ok := blackList[receiver.id.ID()] + count, ok := blackList[receiver.id.NodeID] if ok && count > 0 { - blackList[receiver.id.ID()] = count - 1 + blackList[receiver.id.NodeID] = count - 1 return true, 0 } return false, 0 diff --git a/consensus/integration/epoch_test.go b/consensus/integration/epoch_test.go index aa41de368fe..079f6048f8a 100644 --- a/consensus/integration/epoch_test.go +++ b/consensus/integration/epoch_test.go @@ -8,19 +8,15 @@ import ( "github.com/stretchr/testify/require" "github.com/onflow/flow-go/cmd/bootstrap/run" - "github.com/onflow/flow-go/model/encodable" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/model/flow/filter" - "github.com/onflow/flow-go/model/flow/mapfunc" - "github.com/onflow/flow-go/model/flow/order" "github.com/onflow/flow-go/state/protocol/inmem" + "github.com/onflow/flow-go/state/protocol/protocol_state/kvstore" "github.com/onflow/flow-go/utils/unittest" ) -// should be able to reach consensus when identity table contains nodes with 0 weight. +// should be able to reach consensus when identity table contains nodes which are joining in next epoch. func TestUnweightedNode(t *testing.T) { - // stop after building 2 blocks to ensure we can tolerate 0-weight (joining next - // epoch) identities, but don't cross an epoch boundary // stop after building 2 blocks to ensure we can tolerate 0-weight (joining next // epoch) identities, but don't cross an epoch boundary stopper := NewStopper(2, 0) @@ -28,24 +24,25 @@ func TestUnweightedNode(t *testing.T) { rootSnapshot := createRootSnapshot(t, participantsData) consensusParticipants := NewConsensusParticipants(participantsData) - // add a consensus node to next epoch (it will have 0 weight in the current epoch) + // add a consensus node to next epoch (it will have `flow.EpochParticipationStatusJoining` status in the current epoch) nextEpochParticipantsData := createConsensusIdentities(t, 1) // epoch 2 identities includes: // * same collection node from epoch 1, so cluster QCs are consistent // * 1 new consensus node, joining at epoch 2 // * random nodes with other roles + currentEpochCollectionNodes, err := rootSnapshot.Identities(filter.HasRole[flow.Identity](flow.RoleCollection)) + require.NoError(t, err) nextEpochIdentities := unittest.CompleteIdentitySet( append( - rootSnapshot.Encodable().Identities.Filter(filter.HasRole(flow.RoleCollection)), + currentEpochCollectionNodes, nextEpochParticipantsData.Identities()...)..., ) - rootSnapshot = withNextEpoch( - rootSnapshot, - nextEpochIdentities, - nextEpochParticipantsData, - consensusParticipants, - 10_000, - ) + rootSnapshot = withNextEpoch(t, rootSnapshot, nextEpochIdentities, nextEpochParticipantsData, consensusParticipants, 10_000, func(block *flow.Block) *flow.QuorumCertificate { + return createRootQC(t, block, participantsData) + }) + encodableSnap := rootSnapshot.Encodable() + encodableSnap.QuorumCertificate = createRootQC(t, rootSnapshot.Encodable().SealingSegment.Highest(), participantsData) + rootSnapshot = inmem.SnapshotFromEncodable(encodableSnap) nodes, hub, runFor := createNodes(t, consensusParticipants, rootSnapshot, stopper) @@ -67,19 +64,16 @@ func TestStaticEpochTransition(t *testing.T) { rootSnapshot := createRootSnapshot(t, participantsData) consensusParticipants := NewConsensusParticipants(participantsData) - firstEpochCounter, err := rootSnapshot.Epochs().Current().Counter() + firstEpoch, err := rootSnapshot.Epochs().Current() require.NoError(t, err) + firstEpochCounter := firstEpoch.Counter() // set up next epoch beginning in 4 views, with same identities as first epoch nextEpochIdentities, err := rootSnapshot.Identities(filter.Any) require.NoError(t, err) - rootSnapshot = withNextEpoch( - rootSnapshot, - nextEpochIdentities, - participantsData, - consensusParticipants, - 4, - ) + rootSnapshot = withNextEpoch(t, rootSnapshot, nextEpochIdentities, participantsData, consensusParticipants, 4, func(block *flow.Block) *flow.QuorumCertificate { + return createRootQC(t, block, participantsData) + }) nodes, hub, runFor := createNodes(t, consensusParticipants, rootSnapshot, stopper) @@ -92,9 +86,9 @@ func TestStaticEpochTransition(t *testing.T) { // confirm that we have transitioned to the new epoch pstate := nodes[0].state - afterCounter, err := pstate.Final().Epochs().Current().Counter() + secondEpoch, err := pstate.Final().Epochs().Current() require.NoError(t, err) - assert.Equal(t, firstEpochCounter+1, afterCounter) + assert.Equal(t, firstEpochCounter+1, secondEpoch.Counter()) cleanupNodes(nodes) } @@ -104,13 +98,14 @@ func TestStaticEpochTransition(t *testing.T) { func TestEpochTransition_IdentitiesOverlap(t *testing.T) { // must finalize 8 blocks, we specify the epoch transition after 4 views stopper := NewStopper(8, 0) - privateNodeInfos := createPrivateNodeIdentities(4) + privateNodeInfos := createPrivateNodeIdentities(t, 4) firstEpochConsensusParticipants := completeConsensusIdentities(t, privateNodeInfos[:3]) rootSnapshot := createRootSnapshot(t, firstEpochConsensusParticipants) consensusParticipants := NewConsensusParticipants(firstEpochConsensusParticipants) - firstEpochCounter, err := rootSnapshot.Epochs().Current().Counter() + firstEpoch, err := rootSnapshot.Epochs().Current() require.NoError(t, err) + firstEpochCounter := firstEpoch.Counter() // set up next epoch with 1 new consensus nodes and 2 consensus nodes from first epoch // 1 consensus node is removed after the first epoch @@ -120,19 +115,15 @@ func TestEpochTransition_IdentitiesOverlap(t *testing.T) { removedIdentity := privateNodeInfos[0].Identity() newIdentity := privateNodeInfos[3].Identity() nextEpochIdentities := append( - firstEpochIdentities.Filter(filter.Not(filter.HasNodeID(removedIdentity.NodeID))), + firstEpochIdentities.Filter(filter.Not(filter.HasNodeID[flow.Identity](removedIdentity.NodeID))), newIdentity, ) - // generate new identities for next epoch, it will generate new DKG keys for random beacon participants + // generate new identities for next epoch, it will generate new Random Beacon keys for random beacon participants nextEpochParticipantData := completeConsensusIdentities(t, privateNodeInfos[1:]) - rootSnapshot = withNextEpoch( - rootSnapshot, - nextEpochIdentities, - nextEpochParticipantData, - consensusParticipants, - 4, - ) + rootSnapshot = withNextEpoch(t, rootSnapshot, nextEpochIdentities, nextEpochParticipantData, consensusParticipants, 4, func(block *flow.Block) *flow.QuorumCertificate { + return createRootQC(t, block, firstEpochConsensusParticipants) + }) nodes, hub, runFor := createNodes(t, consensusParticipants, rootSnapshot, stopper) @@ -145,9 +136,9 @@ func TestEpochTransition_IdentitiesOverlap(t *testing.T) { // confirm that we have transitioned to the new epoch pstate := nodes[0].state - afterCounter, err := pstate.Final().Epochs().Current().Counter() + secondEpoch, err := pstate.Final().Epochs().Current() require.NoError(t, err) - assert.Equal(t, firstEpochCounter+1, afterCounter) + assert.Equal(t, firstEpochCounter+1, secondEpoch.Counter()) cleanupNodes(nodes) } @@ -161,8 +152,9 @@ func TestEpochTransition_IdentitiesDisjoint(t *testing.T) { rootSnapshot := createRootSnapshot(t, firstEpochConsensusParticipants) consensusParticipants := NewConsensusParticipants(firstEpochConsensusParticipants) - firstEpochCounter, err := rootSnapshot.Epochs().Current().Counter() + firstEpoch, err := rootSnapshot.Epochs().Current() require.NoError(t, err) + firstEpochCounter := firstEpoch.Counter() // prepare a next epoch with a completely different consensus committee // (no overlapping consensus nodes) @@ -171,17 +163,13 @@ func TestEpochTransition_IdentitiesDisjoint(t *testing.T) { nextEpochParticipantData := createConsensusIdentities(t, 3) nextEpochIdentities := append( - firstEpochIdentities.Filter(filter.Not(filter.HasRole(flow.RoleConsensus))), // remove all consensus nodes - nextEpochParticipantData.Identities()..., // add new consensus nodes + firstEpochIdentities.Filter(filter.Not(filter.HasRole[flow.Identity](flow.RoleConsensus))), // remove all consensus nodes + nextEpochParticipantData.Identities()..., // add new consensus nodes ) - rootSnapshot = withNextEpoch( - rootSnapshot, - nextEpochIdentities, - nextEpochParticipantData, - consensusParticipants, - 4, - ) + rootSnapshot = withNextEpoch(t, rootSnapshot, nextEpochIdentities, nextEpochParticipantData, consensusParticipants, 4, func(block *flow.Block) *flow.QuorumCertificate { + return createRootQC(t, block, firstEpochConsensusParticipants) + }) nodes, hub, runFor := createNodes(t, consensusParticipants, rootSnapshot, stopper) @@ -194,68 +182,131 @@ func TestEpochTransition_IdentitiesDisjoint(t *testing.T) { // confirm that we have transitioned to the new epoch pstate := nodes[0].state - afterCounter, err := pstate.Final().Epochs().Current().Counter() + secondEpoch, err := pstate.Final().Epochs().Current() require.NoError(t, err) - assert.Equal(t, firstEpochCounter+1, afterCounter) + assert.Equal(t, firstEpochCounter+1, secondEpoch.Counter()) cleanupNodes(nodes) } // withNextEpoch adds a valid next epoch with the given identities to the input // snapshot. Also sets the length of the first (current) epoch to curEpochViews. +// NOTE: the input initial snapshot must be a spork root snapshot. // // We make the first (current) epoch start in committed phase so we can transition // to the next epoch upon reaching the appropriate view without any further changes // to the protocol state. func withNextEpoch( + t *testing.T, snapshot *inmem.Snapshot, nextEpochIdentities flow.IdentityList, nextEpochParticipantData *run.ParticipantData, participantsCache *ConsensusParticipants, curEpochViews uint64, + createQC func(block *flow.Block) *flow.QuorumCertificate, ) *inmem.Snapshot { + nextEpochIdentities = nextEpochIdentities.Sort(flow.Canonical[flow.Identity]) // convert to encodable representation for simple modification encodableSnapshot := snapshot.Encodable() + rootResult, rootSeal, err := snapshot.SealedResult() + require.NoError(t, err) + require.Len(t, encodableSnapshot.SealingSegment.Blocks, 1, "function `withNextEpoch` only works for spork-root/genesis snapshots") + + rootProtocolState := encodableSnapshot.SealingSegment.LatestProtocolStateEntry() + currEpochSetup := rootProtocolState.EpochEntry.CurrentEpochSetup + currEpochCommit := rootProtocolState.EpochEntry.CurrentEpochCommit + + // Set current epoch length + currEpochSetup.FinalView = currEpochSetup.FirstView + curEpochViews - 1 + + // Construct events for next epoch + nextEpochSetup := &flow.EpochSetup{ + Counter: currEpochSetup.Counter + 1, + FirstView: currEpochSetup.FinalView + 1, + FinalView: currEpochSetup.FinalView + 1 + 10_000, + RandomSource: unittest.SeedFixture(flow.EpochSetupRandomSourceLength), + Participants: nextEpochIdentities.ToSkeleton(), + Assignments: unittest.ClusterAssignment(1, nextEpochIdentities.ToSkeleton()), + } + dkgIndexMap, dkgParticipantKeys := nextEpochParticipantData.DKGData() + nextEpochCommit := &flow.EpochCommit{ + Counter: nextEpochSetup.Counter, + ClusterQCs: currEpochCommit.ClusterQCs, + DKGParticipantKeys: dkgParticipantKeys, + DKGGroupKey: nextEpochParticipantData.DKGGroupKey, + DKGIndexMap: dkgIndexMap, + } - nextEpochIdentities = nextEpochIdentities.Sort(order.Canonical) - - currEpoch := &encodableSnapshot.Epochs.Current // take pointer so assignments apply - currEpoch.FinalView = currEpoch.FirstView + curEpochViews - 1 // first epoch lasts curEpochViews - encodableSnapshot.Epochs.Next = &inmem.EncodableEpoch{ - Counter: currEpoch.Counter + 1, - FirstView: currEpoch.FinalView + 1, - FinalView: currEpoch.FinalView + 1 + 10000, - RandomSource: unittest.SeedFixture(flow.EpochSetupRandomSourceLength), - InitialIdentities: nextEpochIdentities, - // must include info corresponding to EpochCommit event, since we are - // starting in committed phase - Clustering: unittest.ClusterList(1, nextEpochIdentities), - Clusters: currEpoch.Clusters, - DKG: &inmem.EncodableDKG{ - GroupKey: encodable.RandomBeaconPubKey{ - PublicKey: nextEpochParticipantData.GroupKey, + // Construct the new min epoch state entry + minEpochStateEntry := &flow.MinEpochStateEntry{ + PreviousEpoch: rootProtocolState.EpochEntry.PreviousEpoch, + CurrentEpoch: flow.EpochStateContainer{ + SetupID: currEpochSetup.ID(), + CommitID: currEpochCommit.ID(), + ActiveIdentities: rootProtocolState.EpochEntry.CurrentEpoch.ActiveIdentities, + EpochExtensions: rootProtocolState.EpochEntry.CurrentEpoch.EpochExtensions, + }, + NextEpoch: &flow.EpochStateContainer{ + SetupID: nextEpochSetup.ID(), + CommitID: nextEpochCommit.ID(), + ActiveIdentities: flow.DynamicIdentityEntryListFromIdentities(nextEpochIdentities), + }, + EpochFallbackTriggered: false, + } + + // Construct the new epoch protocol state entry + epochStateEntry, err := flow.NewEpochStateEntry( + flow.UntrustedEpochStateEntry{ + MinEpochStateEntry: minEpochStateEntry, + PreviousEpochSetup: rootProtocolState.EpochEntry.PreviousEpochSetup, + PreviousEpochCommit: rootProtocolState.EpochEntry.PreviousEpochCommit, + CurrentEpochSetup: currEpochSetup, + CurrentEpochCommit: currEpochCommit, + NextEpochSetup: nextEpochSetup, + NextEpochCommit: nextEpochCommit, + }, + ) + require.NoError(t, err) + // Re-construct epoch protocol state with modified events (constructs ActiveIdentity fields) + epochRichProtocolState, err := flow.NewRichEpochStateEntry(epochStateEntry) + require.NoError(t, err) + + originalRootKVStore, err := snapshot.ProtocolState() + require.NoError(t, err) + + // Store the modified epoch protocol state entry and corresponding KV store entry + rootKVStore, err := kvstore.NewDefaultKVStore( + originalRootKVStore.GetFinalizationSafetyThreshold(), + originalRootKVStore.GetEpochExtensionViewCount(), + epochRichProtocolState.ID()) + require.NoError(t, err) + protocolVersion, encodedKVStore, err := rootKVStore.VersionedEncode() + require.NoError(t, err) + encodableSnapshot.SealingSegment.ProtocolStateEntries = map[flow.Identifier]*flow.ProtocolStateEntryWrapper{ + rootKVStore.ID(): { + KVStore: flow.PSKeyValueStoreData{ + Version: protocolVersion, + Data: encodedKVStore, }, - Participants: nextEpochParticipantData.Lookup, + EpochEntry: epochRichProtocolState, }, } - participantsCache.Update(encodableSnapshot.Epochs.Next.Counter, nextEpochParticipantData) - - // we must start the current epoch in committed phase so we can transition to the next epoch - encodableSnapshot.Phase = flow.EpochPhaseCommitted - encodableSnapshot.LatestSeal.ResultID = encodableSnapshot.LatestResult.ID() - - // set identities for root snapshot to include next epoch identities, - // since we are in committed phase - encodableSnapshot.Identities = append( - // all the current epoch identities - encodableSnapshot.Identities, - // and all the NEW identities in next epoch, with 0 weight - nextEpochIdentities. - Filter(filter.Not(filter.In(encodableSnapshot.Identities))). - Map(mapfunc.WithWeight(0))..., - ).Sort(order.Canonical) + // Since we modified the root protocol state, we need to update the root block's ProtocolStateID field. + encodableSnapshot.SealingSegment.Blocks[0].Block.Payload.ProtocolStateID = rootKVStore.ID() + rootBlock := encodableSnapshot.SealingSegment.Blocks[0].Block + // Since we changed the root block, we need to update the QC, root result, and root seal. + // rootResult and rootSeal are pointers, so mutations apply to Snapshot + rootResult.BlockID = rootBlock.ID() + rootSeal.ResultID = rootResult.ID() + rootSeal.BlockID = rootBlock.ID() + encodableSnapshot.SealingSegment.LatestSeals = map[flow.Identifier]flow.Identifier{ + rootBlock.ID(): rootSeal.ID(), + } + encodableSnapshot.QuorumCertificate = createQC(&rootBlock) + + participantsCache.Update(nextEpochSetup.Counter, nextEpochParticipantData) return inmem.SnapshotFromEncodable(encodableSnapshot) } diff --git a/consensus/integration/integration_test.go b/consensus/integration/integration_test.go index 6ba804d103d..ddc1c8cb6fc 100644 --- a/consensus/integration/integration_test.go +++ b/consensus/integration/integration_test.go @@ -24,6 +24,7 @@ func runNodes(signalerCtx irrecoverable.SignalerContext, nodes []*Node) { n.timeoutAggregator.Start(signalerCtx) n.compliance.Start(signalerCtx) n.messageHub.Start(signalerCtx) + n.sync.Start(signalerCtx) <-util.AllReady(n.committee, n.hot, n.voteAggregator, n.timeoutAggregator, n.compliance, n.sync, n.messageHub) }(n) } @@ -123,7 +124,7 @@ func chainViews(t *testing.T, node *Node) []uint64 { head, err := node.state.Final().Head() require.NoError(t, err) - for head != nil && head.View > 0 { + for head != nil && head.ContainsParentQC() { views = append(views, head.View) head, err = node.headers.ByBlockID(head.ParentID) require.NoError(t, err) @@ -156,14 +157,14 @@ func blockNothing(_ channels.Channel, _ interface{}, _, _ *Node) (bool, time.Dur func blockNodes(denyList ...*Node) BlockOrDelayFunc { denyMap := make(map[flow.Identifier]*Node, len(denyList)) for _, n := range denyList { - denyMap[n.id.ID()] = n + denyMap[n.id.NodeID] = n } // no concurrency protection needed as blackList is only read but not modified return func(channel channels.Channel, event interface{}, sender, receiver *Node) (bool, time.Duration) { - if _, ok := denyMap[sender.id.ID()]; ok { + if _, ok := denyMap[sender.id.NodeID]; ok { return true, 0 // block the message } - if _, ok := denyMap[receiver.id.ID()]; ok { + if _, ok := denyMap[receiver.id.NodeID]; ok { return true, 0 // block the message } return false, 0 // allow the message diff --git a/consensus/integration/network_test.go b/consensus/integration/network_test.go index dfa71c53066..79cb20ad2ee 100644 --- a/consensus/integration/network_test.go +++ b/consensus/integration/network_test.go @@ -6,11 +6,13 @@ import ( "time" "github.com/hashicorp/go-multierror" + "github.com/rs/zerolog" "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/model/messages" "github.com/onflow/flow-go/network" "github.com/onflow/flow-go/network/channels" - "github.com/onflow/flow-go/network/mocknetwork" + mocknetwork "github.com/onflow/flow-go/network/mock" ) // TODO replace this type with `network/stub/hub.go` @@ -18,14 +20,16 @@ import ( // It maintains a set of network instances and enables them to directly exchange message // over the memory. type Hub struct { + log zerolog.Logger networks map[flow.Identifier]*Network filter BlockOrDelayFunc identities flow.IdentityList } // NewNetworkHub creates and returns a new Hub instance. -func NewNetworkHub() *Hub { +func NewNetworkHub(log zerolog.Logger) *Hub { return &Hub{ + log: log, networks: make(map[flow.Identifier]*Network), identities: flow.IdentityList{}, } @@ -42,6 +46,7 @@ func (h *Hub) WithFilter(filter BlockOrDelayFunc) *Hub { func (h *Hub) AddNetwork(originID flow.Identifier, node *Node) *Network { net := &Network{ ctx: context.Background(), + log: h.log.With().Str("network", originID.String()).Logger(), hub: h, originID: originID, conduits: make(map[channels.Channel]*Conduit), @@ -60,14 +65,15 @@ func (h *Hub) AddNetwork(originID flow.Identifier, node *Node) *Network { // all engine's events to others using an in-memory delivery mechanism. type Network struct { ctx context.Context + log zerolog.Logger hub *Hub node *Node originID flow.Identifier conduits map[channels.Channel]*Conduit - mocknetwork.Network + mocknetwork.EngineRegistry } -var _ network.Network = (*Network)(nil) +var _ network.EngineRegistry = (*Network)(nil) // Register registers an Engine of the attached node to the channel via a Conduit, and returns the // Conduit instance. @@ -84,7 +90,12 @@ func (n *Network) Register(channel channels.Channel, engine network.MessageProce go func() { for msg := range con.queue { go func(m message) { - _ = engine.Process(channel, m.originID, m.event) + internal, err := m.event.ToInternal() + if err != nil { + n.log.Err(err).Msgf("failed to convert UntrustedMessage %T to internal type", m.event) + return + } + _ = engine.Process(channel, m.originID, internal) }(msg) } }() @@ -135,7 +146,11 @@ func (n *Network) unicast(event interface{}, channel channels.Channel, targetID // no delay, push to the receiver's message queue right away if delay == 0 { - con.queue <- message{originID: n.originID, event: event} + msg, ok := event.(messages.UntrustedMessage) + if !ok { + return fmt.Errorf("invalid message type: expected messages.UntrustedMessage, got %T", event) + } + con.queue <- message{originID: n.originID, event: msg} return nil } @@ -143,7 +158,13 @@ func (n *Network) unicast(event interface{}, channel channels.Channel, targetID go func(delay time.Duration, senderID flow.Identifier, receiver *Conduit, event interface{}) { // sleep in order to simulate the network delay time.Sleep(delay) - con.queue <- message{originID: senderID, event: event} + msg, ok := event.(messages.UntrustedMessage) + if !ok { + err := fmt.Errorf("invalid message type: expected messages.UntrustedMessage, got %T", event) + n.log.Err(err).Msg("failed to push to the receiver's message queue") + return + } + con.queue <- message{originID: senderID, event: msg} }(delay, n.originID, con, event) return nil @@ -160,7 +181,11 @@ func (n *Network) publish(event interface{}, channel channels.Channel, targetIDs // Engines attached to the same channel on other nodes. The targeted nodes are selected based on the selector. // In this test helper implementation, multicast uses submit method under the hood. func (n *Network) multicast(event interface{}, channel channels.Channel, num uint, targetIDs ...flow.Identifier) error { - targetIDs = flow.Sample(num, targetIDs...) + var err error + targetIDs, err = flow.Sample(num, targetIDs...) + if err != nil { + return fmt.Errorf("sampling failed: %w", err) + } return n.submit(event, channel, targetIDs...) } @@ -219,5 +244,5 @@ func (c *Conduit) Close() error { type message struct { originID flow.Identifier - event interface{} + event messages.UntrustedMessage } diff --git a/consensus/integration/nodes_test.go b/consensus/integration/nodes_test.go index 0a11493182b..c355b3be8c3 100644 --- a/consensus/integration/nodes_test.go +++ b/consensus/integration/nodes_test.go @@ -3,13 +3,14 @@ package integration_test import ( "context" "fmt" + "io" "os" "sort" "testing" "time" - "github.com/dgraph-io/badger/v2" "github.com/gammazero/workerpool" + "github.com/onflow/crypto" "github.com/rs/zerolog" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" @@ -28,18 +29,17 @@ import ( "github.com/onflow/flow-go/consensus/hotstuff/verification" "github.com/onflow/flow-go/consensus/hotstuff/voteaggregator" "github.com/onflow/flow-go/consensus/hotstuff/votecollector" - "github.com/onflow/flow-go/crypto" synceng "github.com/onflow/flow-go/engine/common/synchronization" "github.com/onflow/flow-go/engine/consensus/compliance" "github.com/onflow/flow-go/engine/consensus/message_hub" "github.com/onflow/flow-go/model/bootstrap" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/model/flow/filter" - "github.com/onflow/flow-go/model/flow/order" "github.com/onflow/flow-go/module" "github.com/onflow/flow-go/module/buffer" builder "github.com/onflow/flow-go/module/builder/consensus" synccore "github.com/onflow/flow-go/module/chainsync" + modulecompliance "github.com/onflow/flow-go/module/compliance" finalizer "github.com/onflow/flow-go/module/finalizer/consensus" "github.com/onflow/flow-go/module/id" "github.com/onflow/flow-go/module/irrecoverable" @@ -55,9 +55,13 @@ import ( "github.com/onflow/flow-go/state/protocol/blocktimer" "github.com/onflow/flow-go/state/protocol/events" "github.com/onflow/flow-go/state/protocol/inmem" + "github.com/onflow/flow-go/state/protocol/protocol_state/kvstore" + protocol_state "github.com/onflow/flow-go/state/protocol/protocol_state/state" "github.com/onflow/flow-go/state/protocol/util" - storage "github.com/onflow/flow-go/storage/badger" + fstorage "github.com/onflow/flow-go/storage" storagemock "github.com/onflow/flow-go/storage/mock" + "github.com/onflow/flow-go/storage/operation/pebbleimpl" + "github.com/onflow/flow-go/storage/store" "github.com/onflow/flow-go/utils/unittest" ) @@ -93,7 +97,7 @@ func NewConsensusParticipants(data *run.ParticipantData) *ConsensusParticipants beaconInfoByEpoch: map[uint64]RandomBeaconNodeInfo{ 1: { RandomBeaconPrivKey: participant.RandomBeaconPrivKey, - DKGParticipant: data.Lookup[participant.NodeID], + DKGParticipant: data.DKGCommittee[participant.NodeID], }, }, } @@ -116,7 +120,7 @@ func (p *ConsensusParticipants) Lookup(nodeID flow.Identifier) *ConsensusPartici // If this node was part of previous epoch it will get updated, if not created. func (p *ConsensusParticipants) Update(epochCounter uint64, data *run.ParticipantData) { for _, participant := range data.Participants { - dkgParticipant := data.Lookup[participant.NodeID] + dkgParticipant := data.DKGCommittee[participant.NodeID] entry, ok := p.lookup[participant.NodeID] if !ok { entry = ConsensusParticipant{ @@ -134,7 +138,8 @@ func (p *ConsensusParticipants) Update(epochCounter uint64, data *run.Participan } type Node struct { - db *badger.DB + db fstorage.DB + dbCloser io.Closer dbDir string index int log zerolog.Logger @@ -147,7 +152,7 @@ type Node struct { timeoutAggregator hotstuff.TimeoutAggregator messageHub *message_hub.MessageHub state *bprotocol.ParticipantState - headers *storage.Headers + headers fstorage.Headers net *Network } @@ -158,20 +163,12 @@ type epochInfo struct { } // buildEpochLookupList is a helper function which builds an auxiliary structure of epochs sorted by counter -func buildEpochLookupList(epochs ...protocol.Epoch) []epochInfo { +func buildEpochLookupList(epochs ...protocol.CommittedEpoch) []epochInfo { infos := make([]epochInfo, 0) for _, epoch := range epochs { - finalView, err := epoch.FinalView() - if err != nil { - continue - } - counter, err := epoch.Counter() - if err != nil { - continue - } infos = append(infos, epochInfo{ - finalView: finalView, - counter: counter, + finalView: epoch.FinalView(), + counter: epoch.Counter(), }) } sort.Slice(infos, func(i, j int) bool { @@ -187,14 +184,23 @@ func buildEpochLookupList(epochs ...protocol.Epoch) []epochInfo { // The list of created nodes, the common network hub, and a function which starts // all the nodes together, is returned. func createNodes(t *testing.T, participants *ConsensusParticipants, rootSnapshot protocol.Snapshot, stopper *Stopper) (nodes []*Node, hub *Hub, runFor func(time.Duration)) { - consensus, err := rootSnapshot.Identities(filter.HasRole(flow.RoleConsensus)) + consensus, err := rootSnapshot.Identities(filter.HasRole[flow.Identity](flow.RoleConsensus)) require.NoError(t, err) - epochViewLookup := buildEpochLookupList(rootSnapshot.Epochs().Current(), - rootSnapshot.Epochs().Next()) + var epochViewLookup []epochInfo + currentEpoch, err := rootSnapshot.Epochs().Current() + require.NoError(t, err) + // Whether there is a next committed epoch depends on the test. + nextEpoch, err := rootSnapshot.Epochs().NextCommitted() + if err != nil { // the only acceptable error here is `protocol.ErrNextEpochNotCommitted` + require.ErrorIs(t, err, protocol.ErrNextEpochNotCommitted) + epochViewLookup = buildEpochLookupList(currentEpoch) + } else { + epochViewLookup = buildEpochLookupList(currentEpoch, nextEpoch) + } epochLookup := &mockmodule.EpochLookup{} - epochLookup.On("EpochForViewWithFallback", mock.Anything).Return( + epochLookup.On("EpochForView", mock.Anything).Return( func(view uint64) uint64 { for _, info := range epochViewLookup { if view <= info.finalView { @@ -210,7 +216,7 @@ func createNodes(t *testing.T, participants *ConsensusParticipants, rootSnapshot } }) - hub = NewNetworkHub() + hub = NewNetworkHub(unittest.Logger()) nodes = make([]*Node, 0, len(consensus)) for i, identity := range consensus { consensusParticipant := participants.Lookup(identity.NodeID) @@ -221,7 +227,7 @@ func createNodes(t *testing.T, participants *ConsensusParticipants, rootSnapshot // create a context which will be used for all nodes ctx, cancel := context.WithCancel(context.Background()) - signalerCtx, _ := irrecoverable.WithSignaler(ctx) + signalerCtx := irrecoverable.NewMockSignalerContext(t, ctx) // create a function to return which the test case can use to run the nodes for some maximum duration // and gracefully stop after. @@ -250,35 +256,50 @@ func createRootQC(t *testing.T, root *flow.Block, participantData *run.Participa // createRootBlockData creates genesis block with first epoch and real data node identities. // This function requires all participants to pass DKG process. -func createRootBlockData(participantData *run.ParticipantData) (*flow.Block, *flow.ExecutionResult, *flow.Seal) { - root := unittest.GenesisFixture() +func createRootBlockData(t *testing.T, participantData *run.ParticipantData) (*flow.Block, *flow.ExecutionResult, *flow.Seal) { + rootHeaderBody := unittest.Block.Genesis(flow.Emulator).HeaderBody consensusParticipants := participantData.Identities() // add other roles to create a complete identity list - participants := unittest.CompleteIdentitySet(consensusParticipants...) - participants.Sort(order.Canonical) - + participants := unittest.CompleteIdentitySet(consensusParticipants...).Sort(flow.Canonical[flow.Identity]) + dkgParticipants := participants.ToSkeleton().Filter(filter.IsConsensusCommitteeMember) dkgParticipantsKeys := make([]crypto.PublicKey, 0, len(consensusParticipants)) - for _, participant := range participants.Filter(filter.HasRole(flow.RoleConsensus)) { - dkgParticipantsKeys = append(dkgParticipantsKeys, participantData.Lookup[participant.NodeID].KeyShare) + dkgIndexMap := make(flow.DKGIndexMap) + for index, participant := range dkgParticipants { + dkgParticipantsKeys = append(dkgParticipantsKeys, participantData.DKGCommittee[participant.NodeID].KeyShare) + dkgIndexMap[participant.NodeID] = index } counter := uint64(1) setup := unittest.EpochSetupFixture( - unittest.WithParticipants(participants), + unittest.WithParticipants(participants.ToSkeleton()), unittest.SetupWithCounter(counter), - unittest.WithFirstView(root.Header.View), - unittest.WithFinalView(root.Header.View+1000), + unittest.WithFirstView(rootHeaderBody.View), + unittest.WithFinalView(rootHeaderBody.View+1000), ) commit := unittest.EpochCommitFixture( unittest.CommitWithCounter(counter), unittest.WithClusterQCsFromAssignments(setup.Assignments), func(commit *flow.EpochCommit) { - commit.DKGGroupKey = participantData.GroupKey + commit.DKGGroupKey = participantData.DKGGroupKey commit.DKGParticipantKeys = dkgParticipantsKeys + commit.DKGIndexMap = dkgIndexMap }, ) - + minEpochStateEntry, err := inmem.EpochProtocolStateFromServiceEvents(setup, commit) + require.NoError(t, err) + epochProtocolStateID := minEpochStateEntry.ID() + safetyParams, err := protocol.DefaultEpochSafetyParams(rootHeaderBody.ChainID) + require.NoError(t, err) + rootProtocolState, err := kvstore.NewDefaultKVStore(safetyParams.FinalizationSafetyThreshold, safetyParams.EpochExtensionViewCount, epochProtocolStateID) + require.NoError(t, err) + root, err := flow.NewRootBlock( + flow.UntrustedBlock{ + HeaderBody: rootHeaderBody, + Payload: flow.Payload{ProtocolStateID: rootProtocolState.ID()}, + }, + ) + require.NoError(t, err) result := unittest.BootstrapExecutionResultFixture(root, unittest.GenesisStateCommitment) result.ServiceEvents = []flow.ServiceEvent{setup.ServiceEvent(), commit.ServiceEvent()} @@ -287,20 +308,21 @@ func createRootBlockData(participantData *run.ParticipantData) (*flow.Block, *fl return root, result, seal } -func createPrivateNodeIdentities(n int) []bootstrap.NodeInfo { - consensus := unittest.IdentityListFixture(n, unittest.WithRole(flow.RoleConsensus)).Sort(order.Canonical) +func createPrivateNodeIdentities(t *testing.T, n int) []bootstrap.NodeInfo { + consensus := unittest.IdentityListFixture(n, unittest.WithRole(flow.RoleConsensus)).Sort(flow.Canonical[flow.Identity]) infos := make([]bootstrap.NodeInfo, 0, n) for _, node := range consensus { networkPrivKey := unittest.NetworkingPrivKeyFixture() stakingPrivKey := unittest.StakingPrivKeyFixture() - nodeInfo := bootstrap.NewPrivateNodeInfo( + nodeInfo, err := bootstrap.NewPrivateNodeInfo( node.NodeID, node.Role, node.Address, - node.Weight, + node.InitialWeight, networkPrivKey, stakingPrivKey, ) + require.NoError(t, err) infos = append(infos, nodeInfo) } return infos @@ -308,7 +330,7 @@ func createPrivateNodeIdentities(n int) []bootstrap.NodeInfo { func createConsensusIdentities(t *testing.T, n int) *run.ParticipantData { // create n consensus node participants - consensus := createPrivateNodeIdentities(n) + consensus := createPrivateNodeIdentities(t, n) return completeConsensusIdentities(t, consensus) } @@ -319,8 +341,8 @@ func completeConsensusIdentities(t *testing.T, nodeInfos []bootstrap.NodeInfo) * participantData := &run.ParticipantData{ Participants: make([]run.Participant, 0, len(nodeInfos)), - Lookup: make(map[flow.Identifier]flow.DKGParticipant), - GroupKey: dkgData.PubGroupKey, + DKGCommittee: make(map[flow.Identifier]flow.DKGParticipant), + DKGGroupKey: dkgData.PubGroupKey, } for index, node := range nodeInfos { participant := run.Participant{ @@ -328,7 +350,7 @@ func completeConsensusIdentities(t *testing.T, nodeInfos []bootstrap.NodeInfo) * RandomBeaconPrivKey: dkgData.PrivKeyShares[index], } participantData.Participants = append(participantData.Participants, participant) - participantData.Lookup[node.NodeID] = flow.DKGParticipant{ + participantData.DKGCommittee[node.NodeID] = flow.DKGParticipant{ Index: uint(index), KeyShare: dkgData.PubKeyShares[index], } @@ -340,10 +362,10 @@ func completeConsensusIdentities(t *testing.T, nodeInfos []bootstrap.NodeInfo) * // createRootSnapshot creates root block, generates root QC and builds a root snapshot for // bootstrapping a node func createRootSnapshot(t *testing.T, participantData *run.ParticipantData) *inmem.Snapshot { - root, result, seal := createRootBlockData(participantData) + root, result, seal := createRootBlockData(t, participantData) rootQC := createRootQC(t, root, participantData) - rootSnapshot, err := inmem.SnapshotFromBootstrapState(root, result, seal, rootQC) + rootSnapshot, err := unittest.SnapshotFromBootstrapState(root, result, seal, rootQC) require.NoError(t, err) return rootSnapshot } @@ -359,35 +381,41 @@ func createNode( epochLookup module.EpochLookup, ) *Node { - db, dbDir := unittest.TempBadgerDB(t) + pdb, dbDir := unittest.TempPebbleDB(t) metricsCollector := metrics.NewNoopCollector() tracer := trace.NewNoopTracer() - - headersDB := storage.NewHeaders(metricsCollector, db) - guaranteesDB := storage.NewGuarantees(metricsCollector, db, storage.DefaultCacheSize) - sealsDB := storage.NewSeals(metricsCollector, db) - indexDB := storage.NewIndex(metricsCollector, db) - resultsDB := storage.NewExecutionResults(metricsCollector, db) - receiptsDB := storage.NewExecutionReceipts(metricsCollector, db, resultsDB, storage.DefaultCacheSize) - payloadsDB := storage.NewPayloads(db, indexDB, guaranteesDB, sealsDB, receiptsDB, resultsDB) - blocksDB := storage.NewBlocks(db, headersDB, payloadsDB) - qcsDB := storage.NewQuorumCertificates(metricsCollector, db, storage.DefaultCacheSize) - setupsDB := storage.NewEpochSetups(metricsCollector, db) - commitsDB := storage.NewEpochCommits(metricsCollector, db) - statusesDB := storage.NewEpochStatuses(metricsCollector, db) - versionBeaconDB := storage.NewVersionBeacons(db) - consumer := events.NewDistributor() - - localID := identity.ID() + db := pebbleimpl.ToDB(pdb) + lockManager := fstorage.NewTestingLockManager() + + headersDB := store.NewHeaders(metricsCollector, db) + guaranteesDB := store.NewGuarantees(metricsCollector, db, store.DefaultCacheSize, store.DefaultCacheSize) + sealsDB := store.NewSeals(metricsCollector, db) + indexDB := store.NewIndex(metricsCollector, db) + resultsDB := store.NewExecutionResults(metricsCollector, db) + receiptsDB := store.NewExecutionReceipts(metricsCollector, db, resultsDB, store.DefaultCacheSize) + payloadsDB := store.NewPayloads(db, indexDB, guaranteesDB, sealsDB, receiptsDB, resultsDB) + blocksDB := store.NewBlocks(db, headersDB, payloadsDB) + qcsDB := store.NewQuorumCertificates(metricsCollector, db, store.DefaultCacheSize) + setupsDB := store.NewEpochSetups(metricsCollector, db) + commitsDB := store.NewEpochCommits(metricsCollector, db) + protocolStateDB := store.NewEpochProtocolStateEntries(metricsCollector, setupsDB, commitsDB, db, + store.DefaultEpochProtocolStateCacheSize, store.DefaultProtocolStateIndexCacheSize) + protocolKVStoreDB := store.NewProtocolKVStore(metricsCollector, db, + store.DefaultProtocolKVStoreCacheSize, store.DefaultProtocolKVStoreByBlockIDCacheSize) + versionBeaconDB := store.NewVersionBeacons(db) + protocolStateEvents := events.NewDistributor() + + localNodeID := identity.NodeID log := unittest.Logger().With(). Int("index", index). - Hex("node_id", localID[:]). + Hex("node_id", localNodeID[:]). Logger() state, err := bprotocol.Bootstrap( metricsCollector, db, + lockManager, headersDB, sealsDB, resultsDB, @@ -395,19 +423,20 @@ func createNode( qcsDB, setupsDB, commitsDB, - statusesDB, + protocolStateDB, + protocolKVStoreDB, versionBeaconDB, rootSnapshot, ) require.NoError(t, err) - blockTimer, err := blocktimer.NewBlockTimer(1*time.Millisecond, 90*time.Second) + blockTimer, err := blocktimer.NewBlockTimer(1, 90_000) require.NoError(t, err) fullState, err := bprotocol.NewFullConsensusState( log, tracer, - consumer, + protocolStateEvents, state, indexDB, payloadsDB, @@ -428,38 +457,61 @@ func createNode( counterConsumer := &CounterConsumer{ finalized: func(total uint) { - stopper.onFinalizedTotal(node.id.ID(), total) + stopper.onFinalizedTotal(node.id.NodeID, total) }, } // log with node index logConsumer := notifications.NewLogConsumer(log) - notifier := pubsub.NewDistributor() - notifier.AddConsumer(counterConsumer) - notifier.AddConsumer(logConsumer) + hotstuffDistributor := pubsub.NewDistributor() + hotstuffDistributor.AddConsumer(counterConsumer) + hotstuffDistributor.AddConsumer(logConsumer) - require.Equal(t, participant.nodeInfo.NodeID, localID) + require.Equal(t, participant.nodeInfo.NodeID, localNodeID) privateKeys, err := participant.nodeInfo.PrivateKeys() require.NoError(t, err) // make local - me, err := local.New(identity, privateKeys.StakingKey) + me, err := local.New(identity.IdentitySkeleton, privateKeys.StakingKey) require.NoError(t, err) // add a network for this node to the hub - net := hub.AddNetwork(localID, node) + net := hub.AddNetwork(localNodeID, node) guaranteeLimit, sealLimit := uint(1000), uint(1000) - guarantees, err := stdmap.NewGuarantees(guaranteeLimit) - require.NoError(t, err) + guarantees := stdmap.NewGuarantees(guaranteeLimit) receipts := consensusMempools.NewExecutionTree() seals := stdmap.NewIncorporatedResultSeals(sealLimit) + mutableProtocolState := protocol_state.NewMutableProtocolState( + log, + protocolStateDB, + protocolKVStoreDB, + state.Params(), + headersDB, + resultsDB, + setupsDB, + commitsDB, + ) + // initialize the block builder - build, err := builder.NewBuilder(metricsCollector, db, fullState, headersDB, sealsDB, indexDB, blocksDB, resultsDB, receiptsDB, - guarantees, consensusMempools.NewIncorporatedResultSeals(seals, receiptsDB), receipts, tracer) + build, err := builder.NewBuilder( + metricsCollector, + fullState, + headersDB, + sealsDB, + indexDB, + blocksDB, + resultsDB, + receiptsDB, + mutableProtocolState, + guarantees, + consensusMempools.NewIncorporatedResultSeals(seals, receiptsDB), + receipts, + tracer, + ) require.NoError(t, err) // initialize the pending blocks cache @@ -471,13 +523,12 @@ func createNode( rootQC, err := rootSnapshot.QuorumCertificate() require.NoError(t, err) - // selector := filter.HasRole(flow.RoleConsensus) - committee, err := committees.NewConsensusCommittee(state, localID) + committee, err := committees.NewConsensusCommittee(state, localNodeID) require.NoError(t, err) - consumer.AddConsumer(committee) + protocolStateEvents.AddConsumer(committee) // initialize the block finalizer - final := finalizer.NewFinalizer(db, headersDB, fullState, trace.NewNoopTracer()) + final := finalizer.NewFinalizer(db.Reader(), headersDB, fullState, trace.NewNoopTracer()) syncCore, err := synccore.New(log, synccore.DefaultConfig(), metricsCollector, rootHeader.ChainID) require.NoError(t, err) @@ -485,14 +536,14 @@ func createNode( voteAggregationDistributor := pubsub.NewVoteAggregationDistributor() voteAggregationDistributor.AddVoteAggregationConsumer(logConsumer) - forks, err := consensus.NewForks(rootHeader, headersDB, final, notifier, rootHeader, rootQC) + forks, err := consensus.NewForks(rootHeader, headersDB, final, hotstuffDistributor, rootHeader, rootQC) require.NoError(t, err) validator := consensus.NewValidator(metricsCollector, committee) require.NoError(t, err) keys := &storagemock.SafeBeaconKeys{} - // there is DKG key for this epoch + // there is Random Beacon key for this epoch keys.On("RetrieveMyBeaconPrivateKey", mock.Anything).Return( func(epochCounter uint64) crypto.PrivateKey { dkgInfo, ok := participant.beaconInfoByEpoch[epochCounter] @@ -512,7 +563,8 @@ func createNode( signer := verification.NewCombinedSigner(me, beaconKeyStore) - persist := persister.New(db, rootHeader.ChainID) + persist, err := persister.New(db, rootHeader.ChainID, lockManager) + require.NoError(t, err) livenessData, err := persist.GetLivenessData() require.NoError(t, err) @@ -548,7 +600,12 @@ func createNode( timeoutAggregationDistributor, timeoutProcessorFactory, ) - timeoutCollectors := timeoutaggregator.NewTimeoutCollectors(log, livenessData.CurrentView, timeoutCollectorsFactory) + timeoutCollectors := timeoutaggregator.NewTimeoutCollectors( + log, + metricsCollector, + livenessData.CurrentView, + timeoutCollectorsFactory, + ) timeoutAggregator, err := timeoutaggregator.NewTimeoutAggregator( log, @@ -563,7 +620,7 @@ func createNode( hotstuffModules := &consensus.HotstuffModules{ Forks: forks, Validator: validator, - Notifier: notifier, + Notifier: hotstuffDistributor, Committee: committee, Signer: signer, Persist: persist, @@ -577,9 +634,10 @@ func createNode( hot, err := consensus.NewParticipant( log, metricsCollector, + metricsCollector, build, rootHeader, - []*flow.Header{}, + []*flow.ProposalHeader{}, hotstuffModules, consensus.WithMinTimeout(hotstuffTimeout), func(cfg *consensus.ParticipantConfig) { @@ -595,7 +653,7 @@ func createNode( metricsCollector, metricsCollector, metricsCollector, - notifier, + hotstuffDistributor, tracer, headersDB, payloadsDB, @@ -606,6 +664,7 @@ func createNode( hot, voteAggregator, timeoutAggregator, + modulecompliance.DefaultConfig(), ) require.NoError(t, err) @@ -613,12 +672,15 @@ func createNode( require.NoError(t, err) identities, err := state.Final().Identities(filter.And( - filter.HasRole(flow.RoleConsensus), - filter.Not(filter.HasNodeID(me.NodeID())), + filter.HasRole[flow.Identity](flow.RoleConsensus), + filter.Not(filter.HasNodeID[flow.Identity](me.NodeID())), )) require.NoError(t, err) idProvider := id.NewFixedIdentifierProvider(identities.NodeIDs()) + spamConfig, err := synceng.NewSpamDetectionConfig() + require.NoError(t, err, "could not initialize spam detection config") + // initialize the synchronization engine sync, err := synceng.New( log, @@ -630,6 +692,7 @@ func createNode( comp, syncCore, idProvider, + spamConfig, func(cfg *synceng.Config) { // use a small pool and scan interval for sync engine cfg.ScanInterval = 500 * time.Millisecond @@ -652,8 +715,9 @@ func createNode( ) require.NoError(t, err) - notifier.AddConsumer(messageHub) + hotstuffDistributor.AddConsumer(messageHub) + node.dbCloser = db node.compliance = comp node.sync = sync node.state = fullState @@ -671,7 +735,7 @@ func createNode( func cleanupNodes(nodes []*Node) { for _, n := range nodes { - _ = n.db.Close() + _ = n.dbCloser.Close() _ = os.RemoveAll(n.dbDir) } } diff --git a/consensus/integration/signer_test.go b/consensus/integration/signer_test.go deleted file mode 100644 index ea443394d20..00000000000 --- a/consensus/integration/signer_test.go +++ /dev/null @@ -1,44 +0,0 @@ -package integration_test - -import ( - "github.com/onflow/flow-go/consensus/hotstuff/model" - "github.com/onflow/flow-go/model/flow" -) - -type Signer struct { - localID flow.Identifier -} - -func (*Signer) CreateProposal(block *model.Block) (*model.Proposal, error) { - proposal := &model.Proposal{ - Block: block, - SigData: nil, - } - return proposal, nil -} -func (s *Signer) CreateVote(block *model.Block) (*model.Vote, error) { - vote := &model.Vote{ - View: block.View, - BlockID: block.BlockID, - SignerID: s.localID, - SigData: nil, - } - return vote, nil -} -func (*Signer) CreateQC(votes []*model.Vote) (*flow.QuorumCertificate, error) { - qc := &flow.QuorumCertificate{ - View: votes[0].View, - BlockID: votes[0].BlockID, - SignerIndices: nil, - SigData: nil, - } - return qc, nil -} - -func (*Signer) VerifyVote(voterID *flow.Identity, sigData []byte, block *model.Block) error { - return nil -} - -func (*Signer) VerifyQC(voters flow.IdentityList, sigData []byte, block *model.Block) error { - return nil -} diff --git a/consensus/integration/slow_test.go b/consensus/integration/slow_test.go index cff6ced64c3..3ac5226755c 100644 --- a/consensus/integration/slow_test.go +++ b/consensus/integration/slow_test.go @@ -105,8 +105,8 @@ func TestTimeoutRebroadcast(t *testing.T) { blockedTimeoutObjectsTracker := make(map[flow.Identifier]map[uint64]uint64) hub.WithFilter(func(channelID channels.Channel, event interface{}, sender, receiver *Node) (bool, time.Duration) { switch m := event.(type) { - case *messages.BlockProposal: - return m.Block.Header.View == 5, 0 // drop proposals only for view 5 + case *messages.Proposal: + return m.Block.View == 5, 0 // drop proposals only for view 5 case *messages.TimeoutObject: // drop first timeout object for every sender for every view lock.Lock() diff --git a/consensus/integration/stopper_test.go b/consensus/integration/stopper_test.go index 1c0978ddc18..ee66b2046b2 100644 --- a/consensus/integration/stopper_test.go +++ b/consensus/integration/stopper_test.go @@ -48,7 +48,7 @@ func NewStopper(finalizedCount uint, tolerate int) *Stopper { func (s *Stopper) AddNode(n *Node) { s.Lock() defer s.Unlock() - s.running[n.id.ID()] = struct{}{} + s.running[n.id.NodeID] = struct{}{} } // WithStopFunc adds a function to use to stop all nodes (typically the cancel function of the context used to start them). diff --git a/consensus/participant.go b/consensus/participant.go index 663da42ea16..0c39696dc54 100644 --- a/consensus/participant.go +++ b/consensus/participant.go @@ -27,9 +27,10 @@ import ( func NewParticipant( log zerolog.Logger, metrics module.HotstuffMetrics, + mempoolMetrics module.MempoolMetrics, builder module.Builder, finalized *flow.Header, - pending []*flow.Header, + pending []*flow.ProposalHeader, modules *HotstuffModules, options ...Option, ) (*eventloop.EventLoop, error) { @@ -58,29 +59,14 @@ func NewParticipant( } // initialize dynamically updatable timeout config - timeoutConfig, err := timeout.NewConfig( - cfg.TimeoutMinimum, - cfg.TimeoutMaximum, - cfg.TimeoutAdjustmentFactor, - cfg.HappyPathMaxRoundFailures, - cfg.BlockRateDelay, - cfg.MaxTimeoutObjectRebroadcastInterval, - ) + timeoutConfig, err := timeout.NewConfig(cfg.TimeoutMinimum, cfg.TimeoutMaximum, cfg.TimeoutAdjustmentFactor, cfg.HappyPathMaxRoundFailures, cfg.MaxTimeoutObjectRebroadcastInterval) if err != nil { return nil, fmt.Errorf("could not initialize timeout config: %w", err) } - // register as dynamically updatable via admin command - if cfg.Registrar != nil { - err = cfg.Registrar.RegisterDurationConfig("hotstuff-block-rate-delay", timeoutConfig.GetBlockRateDelay, timeoutConfig.SetBlockRateDelay) - if err != nil { - return nil, fmt.Errorf("failed to register block rate delay config: %w", err) - } - } - // initialize the pacemaker controller := timeout.NewController(timeoutConfig) - pacemaker, err := pacemaker.New(controller, modules.Notifier, modules.Persist, + pacemaker, err := pacemaker.New(controller, cfg.ProposalDurationProvider, modules.Notifier, modules.Persist, pacemaker.WithQCs(qcCollector.Retrieve()...), pacemaker.WithTCs(tcCollector.Retrieve()...), ) @@ -88,18 +74,18 @@ func NewParticipant( return nil, fmt.Errorf("could not initialize flow pacemaker: %w", err) } - // initialize block producer - producer, err := blockproducer.New(modules.Signer, modules.Committee, builder) - if err != nil { - return nil, fmt.Errorf("could not initialize block producer: %w", err) - } - // initialize the safetyRules safetyRules, err := safetyrules.New(modules.Signer, modules.Persist, modules.Committee) if err != nil { return nil, fmt.Errorf("could not initialize safety rules: %w", err) } + // initialize block producer + producer, err := blockproducer.New(safetyRules, modules.Committee, builder) + if err != nil { + return nil, fmt.Errorf("could not initialize block producer: %w", err) + } + // initialize the event handler eventHandler, err := eventhandler.NewEventHandler( log, @@ -116,7 +102,7 @@ func NewParticipant( } // initialize and return the event loop - loop, err := eventloop.NewEventLoop(log, metrics, eventHandler, cfg.StartupTime) + loop, err := eventloop.NewEventLoop(log, metrics, mempoolMetrics, eventHandler, cfg.StartupTime) if err != nil { return nil, fmt.Errorf("could not initialize event loop: %w", err) } @@ -200,12 +186,11 @@ func makeCertifiedRootBlock(header *flow.Header, qc *flow.QuorumCertificate) (mo // instead of having to distinguish between a genesis block without a qc // and a later-finalized root block where we can retrieve the qc. rootBlock := &model.Block{ - View: header.View, - BlockID: header.ID(), - ProposerID: header.ProposerID, - QC: nil, // QC is omitted - PayloadHash: header.PayloadHash, - Timestamp: header.Timestamp, + View: header.View, + BlockID: header.ID(), + ProposerID: header.ProposerID, + QC: nil, // QC is omitted + Timestamp: header.Timestamp, } return model.NewCertifiedBlock(rootBlock, qc) } diff --git a/consensus/recovery/cluster/state.go b/consensus/recovery/cluster/state.go index 7cc8446190d..d5e115ffbef 100644 --- a/consensus/recovery/cluster/state.go +++ b/consensus/recovery/cluster/state.go @@ -19,7 +19,7 @@ import ( // // Note: this is an expensive method, which is intended to help recover from a crash, e.g. help to // re-built the in-memory consensus state. -func FindLatest(state cluster.State, headers storage.Headers) (*flow.Header, []*flow.Header, error) { +func FindLatest(state cluster.State, headers storage.Headers) (*flow.Header, []*flow.ProposalHeader, error) { finalizedSnapshot := state.Final() // state snapshot at latest finalized block finalizedBlock, err := finalizedSnapshot.Head() // header of latest finalized block if err != nil { @@ -30,13 +30,13 @@ func FindLatest(state cluster.State, headers storage.Headers) (*flow.Header, []* return nil, nil, fmt.Errorf("could not get pending children: %w", err) } - pending := make([]*flow.Header, 0, len(pendingIDs)) + pending := make([]*flow.ProposalHeader, 0, len(pendingIDs)) for _, pendingID := range pendingIDs { - header, err := headers.ByBlockID(pendingID) + proposal, err := headers.ProposalByBlockID(pendingID) if err != nil { return nil, nil, fmt.Errorf("could not find pending child: %w", err) } - pending = append(pending, header) + pending = append(pending, proposal) } return finalizedBlock, pending, nil diff --git a/consensus/recovery/protocol/state.go b/consensus/recovery/protocol/state.go index 1bbc20b1bf1..52fbb25021e 100644 --- a/consensus/recovery/protocol/state.go +++ b/consensus/recovery/protocol/state.go @@ -19,7 +19,7 @@ import ( // // Note: this is an expensive method, which is intended to help recover from a crash, e.g. help to // re-built the in-memory consensus state. -func FindLatest(state protocol.State, headers storage.Headers) (*flow.Header, []*flow.Header, error) { +func FindLatest(state protocol.State, headers storage.Headers) (*flow.Header, []*flow.ProposalHeader, error) { finalizedSnapshot := state.Final() // state snapshot at latest finalized block finalizedBlock, err := finalizedSnapshot.Head() // header of latest finalized block if err != nil { @@ -31,13 +31,13 @@ func FindLatest(state protocol.State, headers storage.Headers) (*flow.Header, [] } // retrieve the headers for each of the pending blocks - pending := make([]*flow.Header, 0, len(pendingIDs)) + pending := make([]*flow.ProposalHeader, 0, len(pendingIDs)) for _, pendingID := range pendingIDs { - pendingHeader, err := headers.ByBlockID(pendingID) + proposal, err := headers.ProposalByBlockID(pendingID) if err != nil { return nil, nil, fmt.Errorf("could not find pending block by ID: %w", err) } - pending = append(pending, pendingHeader) + pending = append(pending, proposal) } return finalizedBlock, pending, nil diff --git a/consensus/recovery/protocol/state_test.go b/consensus/recovery/protocol/state_test.go index d22b4ef53f9..790aa8751a4 100644 --- a/consensus/recovery/protocol/state_test.go +++ b/consensus/recovery/protocol/state_test.go @@ -4,7 +4,6 @@ import ( "context" "testing" - "github.com/dgraph-io/badger/v2" "github.com/stretchr/testify/require" recovery "github.com/onflow/flow-go/consensus/recovery/protocol" @@ -12,7 +11,8 @@ import ( "github.com/onflow/flow-go/module/metrics" protocol "github.com/onflow/flow-go/state/protocol/badger" "github.com/onflow/flow-go/state/protocol/util" - bstorage "github.com/onflow/flow-go/storage/badger" + "github.com/onflow/flow-go/storage" + "github.com/onflow/flow-go/storage/store" "github.com/onflow/flow-go/utils/unittest" ) @@ -21,34 +21,37 @@ import ( func TestSaveBlockAsReplica(t *testing.T) { participants := unittest.IdentityListFixture(5, unittest.WithAllRoles()) rootSnapshot := unittest.RootSnapshotFixture(participants) + protocolState, err := rootSnapshot.ProtocolState() + require.NoError(t, err) + rootProtocolStateID := protocolState.ID() b0, err := rootSnapshot.Head() require.NoError(t, err) - util.RunWithFullProtocolState(t, rootSnapshot, func(db *badger.DB, state *protocol.ParticipantState) { - b1 := unittest.BlockWithParentFixture(b0) - b1.SetPayload(flow.Payload{}) - - err = state.Extend(context.Background(), b1) + util.RunWithFullProtocolState(t, rootSnapshot, func(db storage.DB, state *protocol.ParticipantState) { + b1 := unittest.BlockWithParentAndPayload( + b0, + unittest.PayloadFixture(unittest.WithProtocolStateID(rootProtocolStateID)), + ) + b1p := unittest.ProposalFromBlock(b1) + err = state.Extend(context.Background(), b1p) require.NoError(t, err) - b2 := unittest.BlockWithParentFixture(b1.Header) - b2.SetPayload(flow.Payload{}) - - err = state.Extend(context.Background(), b2) + b2 := unittest.BlockWithParentProtocolState(b1) + b2p := unittest.ProposalFromBlock(b2) + err = state.Extend(context.Background(), b2p) require.NoError(t, err) - b3 := unittest.BlockWithParentFixture(b2.Header) - b3.SetPayload(flow.Payload{}) - - err = state.Extend(context.Background(), b3) + b3 := unittest.BlockWithParentProtocolState(b2) + b3p := unittest.ProposalFromBlock(b3) + err = state.Extend(context.Background(), b3p) require.NoError(t, err) metrics := metrics.NewNoopCollector() - headers := bstorage.NewHeaders(metrics, db) + headers := store.NewHeaders(metrics, db) finalized, pending, err := recovery.FindLatest(state, headers) require.NoError(t, err) require.Equal(t, b0.ID(), finalized.ID(), "recover find latest returns inconsistent finalized block") // b1,b2,b3 are unfinalized (pending) blocks - require.Equal(t, []*flow.Header{b1.Header, b2.Header, b3.Header}, pending) + require.Equal(t, []*flow.ProposalHeader{b1p.ProposalHeader(), b2p.ProposalHeader(), b3p.ProposalHeader()}, pending) }) } diff --git a/consensus/recovery/recover.go b/consensus/recovery/recover.go index a470aedc3ce..5d2a33ff464 100644 --- a/consensus/recovery/recover.go +++ b/consensus/recovery/recover.go @@ -12,7 +12,7 @@ import ( // BlockScanner describes a function for ingesting pending blocks. // Any returned errors are considered fatal. -type BlockScanner func(proposal *model.Proposal) error +type BlockScanner func(proposal *model.SignedProposal) error // Recover is a utility method for recovering the HotStuff state after a restart. // It receives the list `pending` containing _all_ blocks that @@ -22,12 +22,12 @@ type BlockScanner func(proposal *model.Proposal) error // be listed before B, unless B's parent is the latest finalized block) // // CAUTION: all pending blocks are required to be valid (guaranteed if the block passed the compliance layer) -func Recover(log zerolog.Logger, pending []*flow.Header, scanners ...BlockScanner) error { +func Recover(log zerolog.Logger, pending []*flow.ProposalHeader, scanners ...BlockScanner) error { log.Info().Int("total", len(pending)).Msgf("recovery started") // add all pending blocks to forks for _, header := range pending { - proposal := model.ProposalFromFlow(header) // convert the header into a proposal + proposal := model.SignedProposalFromFlow(header) // convert the header into a proposal for _, s := range scanners { err := s(proposal) if err != nil { @@ -48,7 +48,7 @@ func Recover(log zerolog.Logger, pending []*flow.Header, scanners ...BlockScanne // finalized block. Caution, input blocks must be valid and in parent-first order // (unless parent is the latest finalized block). func ForksState(forks hotstuff.Forks) BlockScanner { - return func(proposal *model.Proposal) error { + return func(proposal *model.SignedProposal) error { err := forks.AddValidatedBlock(proposal.Block) if err != nil { return fmt.Errorf("could not add block %v to forks: %w", proposal.Block.BlockID, err) @@ -63,7 +63,7 @@ func ForksState(forks hotstuff.Forks) BlockScanner { // // Caution: input blocks must be valid. func VoteAggregatorState(voteAggregator hotstuff.VoteAggregator) BlockScanner { - return func(proposal *model.Proposal) error { + return func(proposal *model.SignedProposal) error { voteAggregator.AddBlock(proposal) return nil } @@ -72,7 +72,7 @@ func VoteAggregatorState(voteAggregator hotstuff.VoteAggregator) BlockScanner { // CollectParentQCs collects all parent QCs included in the blocks descending from the // latest finalized block. Caution, input blocks must be valid. func CollectParentQCs(collector Collector[*flow.QuorumCertificate]) BlockScanner { - return func(proposal *model.Proposal) error { + return func(proposal *model.SignedProposal) error { qc := proposal.Block.QC if qc != nil { collector.Append(qc) @@ -84,7 +84,7 @@ func CollectParentQCs(collector Collector[*flow.QuorumCertificate]) BlockScanner // CollectTCs collect all TCs included in the blocks descending from the // latest finalized block. Caution, input blocks must be valid. func CollectTCs(collector Collector[*flow.TimeoutCertificate]) BlockScanner { - return func(proposal *model.Proposal) error { + return func(proposal *model.SignedProposal) error { tc := proposal.LastViewTC if tc != nil { collector.Append(tc) diff --git a/consensus/recovery/recover_test.go b/consensus/recovery/recover_test.go index ac0fb0c3d4f..7f9c5dd4832 100644 --- a/consensus/recovery/recover_test.go +++ b/consensus/recovery/recover_test.go @@ -12,34 +12,34 @@ import ( func TestRecover(t *testing.T) { finalized := unittest.BlockHeaderFixture() - blocks := unittest.ChainFixtureFrom(100, finalized) - pending := make([]*flow.Header, 0) - for _, b := range blocks { - pending = append(pending, b.Header) + proposals := unittest.ProposalChainFixtureFrom(100, finalized) + pending := make([]*flow.ProposalHeader, 0) + for _, b := range proposals { + pending = append(pending, b.ProposalHeader()) } // Recover with `pending` blocks and record what blocks are forwarded to `onProposal` - recovered := make([]*model.Proposal, 0) - scanner := func(block *model.Proposal) error { + recovered := make([]*model.SignedProposal, 0) + scanner := func(block *model.SignedProposal) error { recovered = append(recovered, block) return nil } err := Recover(unittest.Logger(), pending, scanner) require.NoError(t, err) - // should forward blocks in exact order, just converting flow.Header to pending block + // should forward blocks in exact order, just converting flow.ProposalHeader to pending block require.Len(t, recovered, len(pending)) for i, r := range recovered { - require.Equal(t, model.ProposalFromFlow(pending[i]), r) + require.Equal(t, model.SignedProposalFromFlow(pending[i]), r) } } func TestRecoverEmptyInput(t *testing.T) { - scanner := func(block *model.Proposal) error { + scanner := func(block *model.SignedProposal) error { require.Fail(t, "no proposal expected") return nil } - err := Recover(unittest.Logger(), []*flow.Header{}, scanner) + err := Recover(unittest.Logger(), []*flow.ProposalHeader{}, scanner) require.NoError(t, err) } diff --git a/crypto/.dockerignore b/crypto/.dockerignore deleted file mode 100644 index 5c75f82093a..00000000000 --- a/crypto/.dockerignore +++ /dev/null @@ -1 +0,0 @@ -relic/build diff --git a/crypto/Dockerfile b/crypto/Dockerfile deleted file mode 100644 index 37a0b373171..00000000000 --- a/crypto/Dockerfile +++ /dev/null @@ -1,8 +0,0 @@ -# gcr.io/dl-flow/golang-cmake - -FROM golang:1.19-buster -RUN apt-get update -RUN apt-get -y install cmake zip -RUN go install github.com/axw/gocov/gocov@latest -RUN go install github.com/matm/gocov-html@latest -WORKDIR /go/src/flow diff --git a/crypto/Makefile b/crypto/Makefile deleted file mode 100644 index c66774e1033..00000000000 --- a/crypto/Makefile +++ /dev/null @@ -1,57 +0,0 @@ -# Name of the cover profile -COVER_PROFILE := cover.out - -IMAGE_TAG := v0.0.7 - -# allows CI to specify whether to have race detection on / off -ifeq ($(RACE_DETECTOR),1) - RACE_FLAG := -race -else - RACE_FLAG := -endif - -ADX_SUPPORT := $(shell if ([ -f "/proc/cpuinfo" ] && grep -q -e '^flags.*\badx\b' /proc/cpuinfo); then echo 1; else echo 0; fi) - -.PHONY: setup -setup: - go generate - -# test BLS-related functionalities requiring the Relic library (and hence relic Go build flag) -.PHONY: relic_tests -relic_tests: -ifeq ($(ADX_SUPPORT), 1) - go test -coverprofile=$(COVER_PROFILE) $(RACE_FLAG) $(if $(JSON_OUTPUT),-json,) $(if $(NUM_RUNS),-count $(NUM_RUNS),) --tags relic $(if $(VERBOSE),-v,) -else - CGO_CFLAGS="-D__BLST_PORTABLE__" go test -coverprofile=$(COVER_PROFILE) $(RACE_FLAG) $(if $(JSON_OUTPUT),-json,) $(if $(NUM_RUNS),-count $(NUM_RUNS),) --tags relic $(if $(VERBOSE),-v,) -endif - -# test all packages that do not require Relic library (all functionalities except the BLS-related ones) -.PHONY: non_relic_tests -non_relic_tests: -# root package without relic - go test -coverprofile=$(COVER_PROFILE) $(RACE_FLAG) $(if $(JSON_OUTPUT),-json,) $(if $(NUM_RUNS),-count $(NUM_RUNS),) $(if $(VERBOSE),-v,) -# sub packages - go test -coverprofile=$(COVER_PROFILE) $(RACE_FLAG) $(if $(JSON_OUTPUT),-json,) $(if $(NUM_RUNS),-count $(NUM_RUNS),) $(if $(VERBOSE),-v,) ./hash - go test -coverprofile=$(COVER_PROFILE) $(RACE_FLAG) $(if $(JSON_OUTPUT),-json,) $(if $(NUM_RUNS),-count $(NUM_RUNS),) $(if $(VERBOSE),-v,) ./random - -############################################################################################ -# CAUTION: DO NOT MODIFY THIS TARGET! DOING SO WILL BREAK THE FLAKY TEST MONITOR - -# sets up the crypto module and runs all tests -.PHONY: test -test: setup unittest - -# runs the unit tests of the module (assumes the module was set up) -.PHONY: unittest -unittest: relic_tests non_relic_tests - -############################################################################################ - -.PHONY: docker-build -docker-build: - docker build -t gcr.io/dl-flow/golang-cmake:latest -t gcr.io/dl-flow/golang-cmake:$(IMAGE_TAG) . - -.PHONY: docker-push -docker-push: - docker push gcr.io/dl-flow/golang-cmake:latest - docker push "gcr.io/dl-flow/golang-cmake:$(IMAGE_TAG)" diff --git a/crypto/README.md b/crypto/README.md index 9f29ad03e16..3c9f7839d4a 100644 --- a/crypto/README.md +++ b/crypto/README.md @@ -1,174 +1,7 @@ # Flow Cryptography -This Go package provides the cryptography tools needed by the Flow blockchain. -Most of the primitives and protocols can be used in other projects and are not specific to Flow. - -Flow is an ongoing project, which means that new features will still be added and modifications will still be made to improve security and performance of the cryptography package. - -Notes: - - The package has been audited for security in January 2021 on [this version](https://github.com/onflow/flow-go/tree/2707acdabb851138e298b2d186e73f47df8a14dd). The package had a few improvements since. - - The package does not provide security against side channel or fault attacks. - -## Package import - -Cloning Flow repository and following the [installation steps](https://github.com/onflow/flow-go) builds the necessary tools to use Flow cryptography. - -If you wish to only import the Flow cryptography package into your Go project, please follow the following steps: - -- Get Flow cryptography package -``` -go get github.com/onflow/flow-go/crypto -``` -or simply import the package to your Go project - ``` -import "github.com/onflow/flow-go/crypto" -``` - -This is enough to run the package code for many functionalities. However, this isn't enough if BLS signature related functionalities are used. The BLS features rely on an extrnal C library ([Relic](https://github.com/relic-toolkit/relic)) for lower level mathematical operations. Building your project at this stage including BLS functionalities would result in build errors related to missing "relic" files. For instance: -``` -fatal error: 'relic.h' file not found -#include "relic.h" - ^~~~~~~~~ -``` - - An extra step is required to compile the external dependency (Relic) locally. - -- Install [CMake](https://cmake.org/install/), which is used for building the package. The build also requires [Git](http://git-scm.com/) and bash scripting. -- From the Go package directory in `$GOPATH/pkg/mod/github.com/onflow/flow-go/crypto@<version-tag>/`, build the package dependencies. `version-tag` is the imported package version. -For instance: -``` -cd $GOPATH/pkg/mod/github.com/onflow/flow-go/crypto@v0.25.0/ -go generate -``` - -Below is a bash script example to automate the above steps. The script can be copied into your Go project root directory. -It extracts the imported pacakage version from your project's go.mod file and performs the remaining steps. -```bash -#!/bin/bash - -# crypto package -PKG_NAME="github.com/onflow/flow-go/crypto" - -# go get the package -go get ${PKG_NAME} - -# go.mod -MOD_FILE="./go.mod" - -# the version of onflow/flow-go/crypto used in the project is read from the go.mod file -if [ -f "${MOD_FILE}" ] -then - # extract the version from the go.mod file - VERSION="$(grep ${PKG_NAME} < ${MOD_FILE} | cut -d' ' -f 2)" - # using the right version, get the package directory path - PKG_DIR="$(go env GOPATH)/pkg/mod/${PKG_NAME}@${VERSION}" -else - { echo "couldn't find go.mod file - make sure the script is in the project root directory"; exit 1; } -fi - -# grant permissions if not existant -if [[ ! -r ${PKG_DIR} || ! -w ${PKG_DIR} || ! -x ${PKG_DIR} ]]; then - sudo chmod -R 755 "${PKG_DIR}" -fi - -# get into the package directory and set up the external dependencies -( - cd "${PKG_DIR}" || { echo "cd into the GOPATH package folder failed"; exit 1; } - go generate -) -``` - - -Finally, when building your project and including any BLS functionality, adding a Go build tag to include the BLS files in the build is required. -The tag is not required when the package is used without BLS functions. It was introduced to avoid build errors when BLS (and therefore Relic) is not needed. - -``` -go build -tags=relic -``` - -## Algorithms - -### Hashing and Message Authentication Code: - -`crypto/hash` provides the hashing and MAC algorithms required for Flow. All algorithm implement the generic interface `Hasher`. All digests are of the generic type `Hash`. - - * SHA-3: 256 and 384 output sizes - * Legacy Kaccak: 256 output size - * SHA-2: 256 and 384 output sizes - * KMAC: 128 variant - -### Signature schemes - -All signature schemes use the generic interfaces of `PrivateKey` and `PublicKey`. All signatures are of the generic type `Signature`. - - * ECDSA - * public keys are compressed or uncompressed. - * ephemeral key is derived from the private key, hash and an external entropy using a CSPRNG (based on https://golang.org/pkg/crypto/ecdsa/). - * supports NIST P-256 (secp256r1) and secp256k1 curves. - - * BLS - * supports [BLS 12-381](https://electriccoin.co/blog/new-snark-curve/) curve. - * is implementing the minimal-signature-size variant: - signatures in G1 and public keys in G2. - * default set-up uses [compressed](https://www.ietf.org/archive/id/draft-irtf-cfrg-pairing-friendly-curves-08.html#name-zcash-serialization-format-) G1/G2 points, - but uncompressed format is also supported. - * hashing to curve uses the [Simplified SWU map-to-curve](https://datatracker.ietf.org/doc/html/draft-irtf-cfrg-hash-to-curve-14#section-6.6.3). - * expanding the message in hash-to-curve uses a cSHAKE-based KMAC128 with a domain separation tag. - KMAC128 serves as an expand_message_xof function. - * this results in the full ciphersuite BLS_SIG_BLS12381G1_XOF:KMAC128_SSWU_RO_POP_ for signatures - and BLS_POP_BLS12381G1_XOF:KMAC128_SSWU_RO_POP_ for proofs of possession. - * signature verification includes the signature membership check in G1. - * public key membership check in G2 is provided outside of the signature verification. - * membership check in G1 is using [Bowe's fast check](https://eprint.iacr.org/2019/814.pdf), while membership check in G2 is using a simple scalar multiplication by the group order (both will be updated to use Scott's method) - * non-interactive aggregation of signatures, public keys and private keys. - * multi-signature verification of an aggregated signature of a single message under multiple public keys. - * multi-signature verification of an aggregated signature of multiple messages under multiple public keys. - * batch verification of multiple signatures of a single message under multiple - public keys: use a binary tree of aggregations to find the invalid signatures. - * SPoCK scheme based on BLS: verifies two signatures have been generated from the same message that is unknown to the verifier. - - * Future features: - * membership checks in G1/G2 using [Scotts's method](https://eprint.iacr.org/2021/1130.pdf). - * support minimal-pubkey-size variant - -### PRNG - - * ChaCha20-based CSPRNG - -## Protocols - -### Threshold Signature - - * BLS-based threshold signature - * [non interactive](https://www.iacr.org/archive/pkc2003/25670031/25670031.pdf) threshold signature reconstruction. - * supports only BLS 12-381 curve with the same features above. - * (t+1) signatures are required to reconstruct the threshold signature. - * key generation (single dealer) to provide the set of keys. - * provides a stateless api and a stateful api. - - * Future features: - * support a partial signature reconstruction in the stateful api to avoid a long final reconstruction. - - -### Discrete-Log based distributed key generation - -All supported Distributed Key Generation protocols are [discrete log based](http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.50.2737&rep=rep1&type=pdf) and are implemented for the same BLS setup on the BLS 12-381 curve. The protocols generate key sets for the BLS-based threshold signature. - - * Feldman VSS - * simple verifiable secret sharing with a single dealer. - * the library does not implement the communication channels between participants. The caller should implement the methods `PrivateSend` (1-to-1 messaging) and `Broadcast` (1-to-n messaging) - * 1-to-1 messaging must be a private channel, the caller must make sure the channel preserves confidentialiy and authenticates the sender. - * 1-to-n broadcasting assume all destination participants receive the same copy of the message. The channel should also authenticate the broadcaster. - * It is recommended that both communication channels are unique per protocol instance. This could be achieved by prepending the messages to send/broadcast by a unique protocol instance ID. - * Feldman VSS Qual. - * an extension of the simple Feldman VSS. - * implements a complaint mechanism to qualify/disqualify the dealer. - * Joint Feldman (Pedersen) - * distributed generation. - * based on multiple parallel instances of Feldman VSS Qual with multiple dealers. - * same assumptions about the communication channels as in Feldman VSS. - - +Note: This module has been deprecated. The latest supported version is `v0.25.0`. The module migrated since to `github.com/onflow/crypto`. Please use the new module `github.com/onflow/crypto` instead. Version `v0.25.0` is equivalent to version `v0.25.0` on the new module. +Files on this module have been removed starting from `v0.25.1` to accommodate the `github.com/onflow/flow-go` repository. diff --git a/crypto/bls.go b/crypto/bls.go deleted file mode 100644 index 1e009304fe2..00000000000 --- a/crypto/bls.go +++ /dev/null @@ -1,599 +0,0 @@ -//go:build relic -// +build relic - -package crypto - -// BLS signature scheme implementation using BLS12-381 curve -// ([zcash]https://electriccoin.co/blog/new-snark-curve/) -// Pairing, ellipic curve and modular arithmetic is using Relic library. -// This implementation does not include any security against side-channel attacks. - -// existing features: -// - the implementation variant is minimal-signature-size signatures: -// shorter signatures in G1, longer public keys in G2 -// - serialization of points on G1 and G2 is compressed ([zcash] -// https://www.ietf.org/archive/id/draft-irtf-cfrg-pairing-friendly-curves-08.html#name-zcash-serialization-format-) -// - hashing to curve uses the Simplified SWU map-to-curve -// (https://datatracker.ietf.org/doc/html/draft-irtf-cfrg-hash-to-curve-14#section-6.6.3) -// - expanding the message in hash-to-curve uses a cSHAKE-based KMAC128 with a domain separation tag. -// KMAC128 serves as an expand_message_xof function. -// - this results in the full ciphersuite BLS_SIG_BLS12381G1_XOF:KMAC128_SSWU_RO_POP_ for signatures -// and BLS_POP_BLS12381G1_XOF:KMAC128_SSWU_RO_POP_ for proofs of possession. -// - signature verification checks the membership of signature in G1. -// - the public key membership check in G2 is implemented separately from the signature verification. -// - membership check in G1 is implemented using fast Bowe's check (to be updated to Scott's check). -// - membership check in G2 is using a simple scalar multiplication with the group order (to be updated to Scott's check). -// - multi-signature tools are defined in bls_multisg.go -// - SPoCK scheme based on BLS: verifies two signatures have been generated from the same message, -// that is unknown to the verifier. - -// future features: -// - membership checks G2 using Bowe's method (https://eprint.iacr.org/2019/814.pdf) -// - implement a G1/G2 swap (signatures on G2 and public keys on G1) - -// #cgo CFLAGS: -g -Wall -std=c99 -// #cgo LDFLAGS: -L${SRCDIR}/relic/build/lib -l relic_s -// #include "bls_include.h" -import "C" - -import ( - "bytes" - "crypto/sha256" - "errors" - "fmt" - - "golang.org/x/crypto/hkdf" - - "github.com/onflow/flow-go/crypto/hash" -) - -const ( - // BLS12-381 - // p size in bytes, where G1 is defined over the field Zp - fieldSize = 48 - // - // 1 for compressed, 0 for uncompressed - values should not be changed - uncompressed = 0 //nolint - compressed = 1 - // Points compression when serialized - serializationG1 = compressed - serializationG2 = compressed - // - // SignatureLenBLSBLS12381 is the size of G1 elements - SignatureLenBLSBLS12381 = fieldSize * (2 - serializationG1) // the length is divided by 2 if compression is on - PrKeyLenBLSBLS12381 = 32 - // PubKeyLenBLSBLS12381 is the size of G2 elements - PubKeyLenBLSBLS12381 = 2 * fieldSize * (2 - serializationG2) // the length is divided by 2 if compression is on - - // Hash to curve params - // expandMsgOutput is the output length of the expand_message step as required by the hash_to_curve algorithm - expandMsgOutput = 2 * (fieldSize + (securityBits / 8)) - // hash to curve suite ID of the form : CurveID_ || HashID_ || MapID_ || encodingVariant_ - h2cSuiteID = "BLS12381G1_XOF:KMAC128_SSWU_RO_" - // scheme implemented as a countermasure for rogue attacks of the form : SchemeTag_ - schemeTag = "POP_" - // Cipher suite used for BLS signatures of the form : BLS_SIG_ || h2cSuiteID || SchemeTag_ - blsSigCipherSuite = "BLS_SIG_" + h2cSuiteID + schemeTag - // Cipher suite used for BLS PoP of the form : BLS_POP_ || h2cSuiteID || SchemeTag_ - // The PoP cipher suite is guaranteed to be different than all signature ciphersuites - blsPOPCipherSuite = "BLS_POP_" + h2cSuiteID + schemeTag -) - -// blsBLS12381Algo, embeds SignAlgo -type blsBLS12381Algo struct { - // points to Relic context of BLS12-381 with all the parameters - context ctx - // the signing algo and parameters - algo SigningAlgorithm -} - -// BLS context on the BLS 12-381 curve -var blsInstance *blsBLS12381Algo - -// NewExpandMsgXOFKMAC128 returns a new expand_message_xof instance for -// the hash-to-curve function, hashing data to G1 on BLS12 381. -// This instance must only be used to generate signatures (and not PoP), -// because the internal ciphersuite is customized for signatures. It -// is guaranteed to be different than the expand_message_xof instance used -// to generate proofs of possession. -// -// KMAC128 is used as the underligned extendable-output function (xof) -// as required by https://datatracker.ietf.org/doc/html/draft-irtf-cfrg-hash-to-curve-14#section-5.4.4. -// -// `domainTag` is a domain separation tag that defines the protocol and its subdomain. Such tag should be of the -// format: <protocol>-V<xx>-CS<yy>-with- where <protocol> is the name of the protocol, <xx> the protocol -// version number and <yy> the index of the ciphersuite in the protocol. -// The function suffixes the given `domainTag` by the BLS ciphersuite supported by the library. -// -// The returned instance is a `Hasher` and can be used to generate BLS signatures -// with the `Sign` method. -func NewExpandMsgXOFKMAC128(domainTag string) hash.Hasher { - // application tag is guaranteed to be different than the tag used - // to generate proofs of possession - // postfix the domain tag with the BLS ciphersuite - key := domainTag + blsSigCipherSuite - return internalExpandMsgXOFKMAC128(key) -} - -// returns an expand_message_xof instance for -// the hash-to-curve function, hashing data to G1 on BLS12 381. -// The key is used as a customizer rather than a MAC key. -func internalExpandMsgXOFKMAC128(key string) hash.Hasher { - // blsKMACFunction is the customizer used for KMAC in BLS - const blsKMACFunction = "H2C" - // the error is ignored as the parameter lengths are chosen to be in the correct range for kmac - // (tested by TestBLSBLS12381Hasher) - kmac, _ := hash.NewKMAC_128([]byte(key), []byte(blsKMACFunction), expandMsgOutput) - return kmac -} - -// checkBLSHasher asserts that the given `hasher` is not nil and -// has an output size of `expandMsgOutput`. Otherwise an error is returned: -// - nilHasherError if the hasher is nil -// - invalidHasherSizeError if the hasher's output size is not `expandMsgOutput` (128 bytes) -func checkBLSHasher(hasher hash.Hasher) error { - if hasher == nil { - return nilHasherError - } - if hasher.Size() != expandMsgOutput { - return invalidHasherSizeErrorf("hasher's size needs to be %d, got %d", expandMsgOutput, hasher.Size()) - } - return nil -} - -// Sign signs an array of bytes using the private key -// -// Signature is compressed [zcash] -// https://www.ietf.org/archive/id/draft-irtf-cfrg-pairing-friendly-curves-08.html#name-zcash-serialization-format- -// The private key is read only. -// If the hasher used is KMAC128, the hasher is read only. -// It is recommended to use Sign with the hasher from NewExpandMsgXOFKMAC128. If not, the hasher used -// must expand the message to 1024 bits. It is also recommended to use a hasher -// with a domain separation tag. -// -// The function returns: -// - (false, nilHasherError) if a hasher is nil -// - (false, invalidHasherSizeError) if a hasher's output size is not 128 bytes -// - (signature, nil) otherwise -func (sk *prKeyBLSBLS12381) Sign(data []byte, kmac hash.Hasher) (Signature, error) { - // sanity check of input hasher - err := checkBLSHasher(kmac) - if err != nil { - return nil, err - } - - // hash the input to 128 bytes - h := kmac.ComputeHash(data) - - // set BLS context - blsInstance.reInit() - - s := make([]byte, SignatureLenBLSBLS12381) - C.bls_sign((*C.uchar)(&s[0]), - (*C.bn_st)(&sk.scalar), - (*C.uchar)(&h[0]), - (C.int)(len(h))) - return s, nil -} - -// Verify verifies a signature of a byte array using the public key and the input hasher. -// -// If the input signature slice has an invalid length or fails to deserialize into a curve -// subgroup point, the function returns false without an error. -// -// The function assumes the public key is in the valid G2 subgroup because -// all the package functions generating a BLS `PublicKey` include a G2-membership check. -// The public keys are not guaranteed to be non-identity, and therefore the function -// includes an identity comparison. Verifications against an identity public key -// are invalid to avoid equivocation issues. -// The signature membership check in G1 is included in the verification. -// -// If the hasher used is ExpandMsgXOFKMAC128, the hasher is read only. -// -// The function returns: -// - (false, nilHasherError) if a hasher is nil -// - (false, invalidHasherSizeError) if a hasher's output size is not 128 bytes -// - (false, error) if an unexpected error occurs -// - (validity, nil) otherwise -func (pk *pubKeyBLSBLS12381) Verify(s Signature, data []byte, kmac hash.Hasher) (bool, error) { - // check of input hasher - err := checkBLSHasher(kmac) - if err != nil { - return false, err - } - - // intialize BLS context - blsInstance.reInit() - - if len(s) != signatureLengthBLSBLS12381 { - return false, nil - } - - // hash the input to 128 bytes - h := kmac.ComputeHash(data) - - // check for identity public key - if pk.isIdentity { - return false, nil - } - - verif := C.bls_verify((*C.ep2_st)(&pk.point), - (*C.uchar)(&s[0]), - (*C.uchar)(&h[0]), - (C.int)(len(h))) - - switch verif { - case invalid: - return false, nil - case valid: - return true, nil - default: - return false, fmt.Errorf("signature verification failed") - } -} - -// 0xC0 is the header of the point at infinity serialization (either in G1 or G2) -const infinityPointHeader = 0xC0 - -var identityBLSSignature = append([]byte{infinityPointHeader}, make([]byte, signatureLengthBLSBLS12381-1)...) - -// IsBLSSignatureIdentity checks whether the input signature is -// the identity signature (point at infinity in G1). -// -// An identity signature is always an invalid signature even when -// verified against the identity public key. -// This identity check is useful when an aggregated signature is -// suspected to be equal to identity, which avoids failing the aggregated -// signature verification. -func IsBLSSignatureIdentity(s Signature) bool { - return bytes.Equal(s, identityBLSSignature) -} - -// generatePrivateKey deterministically generates a private key for BLS on BLS12-381 curve. -// The minimum size of the input seed is 32 bytes. -// -// It is recommended to use a secure crypto RNG to generate the seed. -// Otherwise, the seed must have enough entropy. -// -// The generated private key (resp. its corresponding public key) is guaranteed -// to not be equal to the identity element of Z_r (resp. G2). -func (a *blsBLS12381Algo) generatePrivateKey(ikm []byte) (PrivateKey, error) { - if len(ikm) < KeyGenSeedMinLen || len(ikm) > KeyGenSeedMaxLen { - return nil, invalidInputsErrorf( - "seed length should be at least %d bytes and at most %d bytes", - KeyGenSeedMinLen, KeyGenSeedMaxLen) - } - - // HKDF parameters - - // use SHA2-256 as the building block H in HKDF - hashFunction := sha256.New - // salt = H(UTF-8("BLS-SIG-KEYGEN-SALT-")) as per draft-irtf-cfrg-bls-signature-05 section 2.3. - saltString := "BLS-SIG-KEYGEN-SALT-" - hasher := hashFunction() - hasher.Write([]byte(saltString)) - salt := make([]byte, hasher.Size()) - hasher.Sum(salt[:0]) - - // L is the OKM length - // L = ceil((3 * ceil(log2(r))) / 16) which makes L (security_bits/8)-larger than r size - okmLength := (3 * PrKeyLenBLSBLS12381) / 2 - - // HKDF secret = IKM || I2OSP(0, 1) - secret := make([]byte, len(ikm)+1) - copy(secret, ikm) - defer overwrite(secret) // overwrite secret - // HKDF info = key_info || I2OSP(L, 2) - keyInfo := "" // use empty key diversifier. TODO: update header to accept input identifier - info := append([]byte(keyInfo), byte(okmLength>>8), byte(okmLength)) - - sk := newPrKeyBLSBLS12381(nil) - for { - // instantiate HKDF and extract L bytes - reader := hkdf.New(hashFunction, secret, salt, info) - okm := make([]byte, okmLength) - n, err := reader.Read(okm) - if err != nil || n != okmLength { - return nil, fmt.Errorf("key generation failed because of the HKDF reader, %d bytes were read: %w", - n, err) - } - defer overwrite(okm) // overwrite okm - - // map the bytes to a private key : SK = OS2IP(OKM) mod r - isZero := mapToZr(&sk.scalar, okm) - if !isZero { - return sk, nil - } - - // update salt = H(salt) - hasher.Reset() - hasher.Write(salt) - salt = hasher.Sum(salt[:0]) - } -} - -const invalidBLSSignatureHeader = byte(0xE0) - -// BLSInvalidSignature returns an invalid signature that fails when verified -// with any message and public key. -// -// The signature bytes represent an invalid serialization of a point which -// makes the verification fail early. The verification would return (false, nil). -func BLSInvalidSignature() Signature { - signature := make([]byte, SignatureLenBLSBLS12381) - signature[0] = invalidBLSSignatureHeader // invalid header as per C.ep_read_bin_compact - return signature -} - -// decodePrivateKey decodes a slice of bytes into a private key. -// It checks the scalar is non-zero and is less than the group order. -func (a *blsBLS12381Algo) decodePrivateKey(privateKeyBytes []byte) (PrivateKey, error) { - if len(privateKeyBytes) != prKeyLengthBLSBLS12381 { - return nil, invalidInputsErrorf("input length must be %d, got %d", - prKeyLengthBLSBLS12381, len(privateKeyBytes)) - } - sk := newPrKeyBLSBLS12381(nil) - - readScalar(&sk.scalar, privateKeyBytes) - if C.check_membership_Zr_star((*C.bn_st)(&sk.scalar)) == valid { - return sk, nil - } - - return nil, invalidInputsErrorf("the private key is not a valid BLS12-381 curve key") -} - -// decodePublicKey decodes a slice of bytes into a public key. -// This function includes a membership check in G2. -// -// Note the function does not reject the infinity point (identity element of G2). -// However, the comparison to identity is cached in the `PublicKey` structure for -// a faster check during signature verifications. Any verification against an identity -// public key outputs `false`. -func (a *blsBLS12381Algo) decodePublicKey(publicKeyBytes []byte) (PublicKey, error) { - if len(publicKeyBytes) != pubKeyLengthBLSBLS12381 { - return nil, invalidInputsErrorf("input length must be %d, got %d", - pubKeyLengthBLSBLS12381, len(publicKeyBytes)) - } - var pk pubKeyBLSBLS12381 - err := readPointG2(&pk.point, publicKeyBytes) - if err != nil { - return nil, fmt.Errorf("decode public key failed %w", err) - } - - // membership check in G2 - if C.check_membership_G2((*C.ep2_st)(&pk.point)) != valid { - return nil, invalidInputsErrorf("input key is infinity or does not encode a BLS12-381 point in the valid group") - } - - // check point is non-infinity and cache it - pk.isIdentity = (&pk.point).isInfinity() - - return &pk, nil -} - -// decodePublicKeyCompressed decodes a slice of bytes into a public key. -// since we use the compressed representation by default, this checks the default and delegates to decodePublicKeyCompressed -func (a *blsBLS12381Algo) decodePublicKeyCompressed(publicKeyBytes []byte) (PublicKey, error) { - if serializationG2 != compressed { - panic("library is not configured to use compressed public key serialization") - } - return a.decodePublicKey(publicKeyBytes) -} - -// prKeyBLSBLS12381 is the private key of BLS using BLS12_381, it implements PrivateKey -type prKeyBLSBLS12381 struct { - // public key - pk *pubKeyBLSBLS12381 - // private key data - scalar scalar -} - -// newPrKeyBLSBLS12381 creates a new BLS private key with the given scalar. -// If no scalar is provided, the function allocates an -// empty scalar. -func newPrKeyBLSBLS12381(x *scalar) *prKeyBLSBLS12381 { - var sk prKeyBLSBLS12381 - if x == nil { - // initialize the scalar - C.bn_new_wrapper((*C.bn_st)(&sk.scalar)) - } else { - // set the scalar - sk.scalar = *x - } - // the embedded public key is only computed when needed - return &sk -} - -// Algorithm returns the Signing Algorithm -func (sk *prKeyBLSBLS12381) Algorithm() SigningAlgorithm { - return BLSBLS12381 -} - -// Size returns the private key length in bytes -func (sk *prKeyBLSBLS12381) Size() int { - return PrKeyLenBLSBLS12381 -} - -// computePublicKey generates the public key corresponding to -// the input private key. The function makes sure the public key -// is valid in G2. -func (sk *prKeyBLSBLS12381) computePublicKey() { - var newPk pubKeyBLSBLS12381 - // compute public key pk = g2^sk - generatorScalarMultG2(&newPk.point, &sk.scalar) - - // cache the identity comparison - newPk.isIdentity = (&sk.scalar).isZero() - - sk.pk = &newPk -} - -// PublicKey returns the public key corresponding to the private key -func (sk *prKeyBLSBLS12381) PublicKey() PublicKey { - if sk.pk != nil { - return sk.pk - } - sk.computePublicKey() - return sk.pk -} - -// Encode returns a byte encoding of the private key. -// The encoding is a raw encoding in big endian padded to the group order -func (a *prKeyBLSBLS12381) Encode() []byte { - dest := make([]byte, prKeyLengthBLSBLS12381) - writeScalar(dest, &a.scalar) - return dest -} - -// Equals checks is two public keys are equal. -func (sk *prKeyBLSBLS12381) Equals(other PrivateKey) bool { - otherBLS, ok := other.(*prKeyBLSBLS12381) - if !ok { - return false - } - return sk.scalar.equals(&otherBLS.scalar) -} - -// String returns the hex string representation of the key. -func (sk *prKeyBLSBLS12381) String() string { - return fmt.Sprintf("%#x", sk.Encode()) -} - -// pubKeyBLSBLS12381 is the public key of BLS using BLS12_381, -// it implements PublicKey. -type pubKeyBLSBLS12381 struct { - // The package guarantees an instance is only created with a point - // on the correct G2 subgroup. No membership check is needed when the - // instance is used in any BLS function. - // However, an instance can be created with an infinity point. Although - // infinity is a valid G2 point, some BLS functions fail (return false) - // when used with an infinity point. The package caches the infinity - // comparison in pubKeyBLSBLS12381 for a faster check. The package makes - // sure the comparison is performed after an instance is created. - // - // public key G2 point - point pointG2 - // G2 identity check cache - isIdentity bool -} - -// newPubKeyBLSBLS12381 creates a new BLS public key with the given point. -// If no scalar is provided, the function allocates an -// empty scalar. -func newPubKeyBLSBLS12381(p *pointG2) *pubKeyBLSBLS12381 { - if p != nil { - key := &pubKeyBLSBLS12381{ - point: *p, - } - // cache the identity comparison for a faster check - // during signature verifications - key.isIdentity = p.isInfinity() - return key - } - return &pubKeyBLSBLS12381{} -} - -// Algorithm returns the Signing Algorithm -func (pk *pubKeyBLSBLS12381) Algorithm() SigningAlgorithm { - return BLSBLS12381 -} - -// Size returns the public key lengh in bytes -func (pk *pubKeyBLSBLS12381) Size() int { - return PubKeyLenBLSBLS12381 -} - -// EncodeCompressed returns a byte encoding of the public key. -// The encoding is a compressed encoding of the point -// [zcash] https://www.ietf.org/archive/id/draft-irtf-cfrg-pairing-friendly-curves-08.html#name-zcash-serialization-format- -func (a *pubKeyBLSBLS12381) EncodeCompressed() []byte { - if serializationG2 != compressed { - panic("library is not configured to use compressed public key serialization") - } - return a.Encode() -} - -// Encode returns a byte encoding of the public key. -// Since we use a compressed encoding by default, this delegates to EncodeCompressed -func (a *pubKeyBLSBLS12381) Encode() []byte { - dest := make([]byte, pubKeyLengthBLSBLS12381) - writePointG2(dest, &a.point) - return dest -} - -// Equals checks is two public keys are equal -func (pk *pubKeyBLSBLS12381) Equals(other PublicKey) bool { - otherBLS, ok := other.(*pubKeyBLSBLS12381) - if !ok { - return false - } - return pk.point.equals(&otherBLS.point) -} - -// String returns the hex string representation of the key. -func (pk *pubKeyBLSBLS12381) String() string { - return fmt.Sprintf("%#x", pk.Encode()) -} - -// Get Macro definitions from the C layer as Cgo does not export macros -var signatureLengthBLSBLS12381 = int(C.get_signature_len()) -var pubKeyLengthBLSBLS12381 = int(C.get_pk_len()) -var prKeyLengthBLSBLS12381 = int(C.get_sk_len()) - -// init sets the context of BLS12-381 curve -func (a *blsBLS12381Algo) init() error { - // initializes relic context and sets the B12_381 parameters - if err := a.context.initContext(); err != nil { - return err - } - - // compare the Go and C layer constants as a sanity check - if signatureLengthBLSBLS12381 != SignatureLenBLSBLS12381 || - pubKeyLengthBLSBLS12381 != PubKeyLenBLSBLS12381 || - prKeyLengthBLSBLS12381 != PrKeyLenBLSBLS12381 { - return errors.New("BLS-12381 length settings in Go and C are not consistent, check hardcoded lengths and compressions") - } - return nil -} - -// set the context of BLS 12-381 curve in the lower C and Relic layers assuming the context -// was previously initialized with a call to init(). -// -// If the implementation evolves to support multiple contexts, -// reinit should be called at every blsBLS12381Algo operation. -func (a *blsBLS12381Algo) reInit() { - a.context.setContext() -} - -// This is only a TEST/DEBUG/BENCH function. -// It returns the hash to G1 point from a slice of 128 bytes -func mapToG1(data []byte) *pointG1 { - l := len(data) - var h pointG1 - C.map_to_G1((*C.ep_st)(&h), (*C.uchar)(&data[0]), (C.int)(l)) - return &h -} - -// This is only a TEST function. -// signWithXMDSHA256 signs a message using XMD_SHA256 as a hash to field. -// -// The function is in this file because cgo can't be used in go test files. -// TODO: implement a hasher for XMD SHA256 and use the `Sign` function. -func (sk *prKeyBLSBLS12381) signWithXMDSHA256(data []byte) Signature { - - dst := []byte("BLS_SIG_BLS12381G1_XMD:SHA-256_SSWU_RO_NUL_") - hash := make([]byte, expandMsgOutput) - // XMD using SHA256 - C.xmd_sha256((*C.uchar)(&hash[0]), - (C.int)(expandMsgOutput), - (*C.uchar)(&data[0]), (C.int)(len(data)), - (*C.uchar)(&dst[0]), (C.int)(len(dst))) - - // sign the hash - s := make([]byte, SignatureLenBLSBLS12381) - C.bls_sign((*C.uchar)(&s[0]), - (*C.bn_st)(&sk.scalar), - (*C.uchar)(&hash[0]), - (C.int)(len(hash))) - return s -} diff --git a/crypto/bls12381_hashtocurve.c b/crypto/bls12381_hashtocurve.c deleted file mode 100644 index 229f9c009de..00000000000 --- a/crypto/bls12381_hashtocurve.c +++ /dev/null @@ -1,338 +0,0 @@ -// +build relic - -#include "bls12381_utils.h" -#include "bls_include.h" - -extern prec_st* bls_prec; - -#if (hashToPoint== LOCAL_SSWU) - -// These constants are taken from https://github.com/kwantam/bls12-381_hash -// and converted to the Mongtomery domain. -// Copyright 2019 Riad S. Wahby -const uint64_t iso_Nx_data[ELLP_Nx_LEN][Fp_DIGITS] = { - {0x4d18b6f3af00131c, 0x19fa219793fee28c, 0x3f2885f1467f19ae, - 0x23dcea34f2ffb304, 0xd15b58d2ffc00054, 0x0913be200a20bef4,}, - {0x898985385cdbbd8b, 0x3c79e43cc7d966aa, 0x1597e193f4cd233a, - 0x8637ef1e4d6623ad, 0x11b22deed20d827b, 0x07097bc5998784ad,}, - {0xa542583a480b664b, 0xfc7169c026e568c6, 0x5ba2ef314ed8b5a6, - 0x5b5491c05102f0e7, 0xdf6e99707d2a0079, 0x0784151ed7605524,}, - {0x494e212870f72741, 0xab9be52fbda43021, 0x26f5577994e34c3d, - 0x049dfee82aefbd60, 0x65dadd7828505289, 0x0e93d431ea011aeb,}, - {0x90ee774bd6a74d45, 0x7ada1c8a41bfb185, 0x0f1a8953b325f464, - 0x104c24211be4805c, 0x169139d319ea7a8f, 0x09f20ead8e532bf6,}, - {0x6ddd93e2f43626b7, 0xa5482c9aa1ccd7bd, 0x143245631883f4bd, - 0x2e0a94ccf77ec0db, 0xb0282d480e56489f, 0x18f4bfcbb4368929,}, - {0x23c5f0c953402dfd, 0x7a43ff6958ce4fe9, 0x2c390d3d2da5df63, - 0xd0df5c98e1f9d70f, 0xffd89869a572b297, 0x1277ffc72f25e8fe,}, - {0x79f4f0490f06a8a6, 0x85f894a88030fd81, 0x12da3054b18b6410, - 0xe2a57f6505880d65, 0xbba074f260e400f1, 0x08b76279f621d028,}, - {0xe67245ba78d5b00b, 0x8456ba9a1f186475, 0x7888bff6e6b33bb4, - 0xe21585b9a30f86cb, 0x05a69cdcef55feee, 0x09e699dd9adfa5ac,}, - {0x0de5c357bff57107, 0x0a0db4ae6b1a10b2, 0xe256bb67b3b3cd8d, - 0x8ad456574e9db24f, 0x0443915f50fd4179, 0x098c4bf7de8b6375,}, - {0xe6b0617e7dd929c7, 0xfe6e37d442537375, 0x1dafdeda137a489e, - 0xe4efd1ad3f767ceb, 0x4a51d8667f0fe1cf, 0x054fdf4bbf1d821c,}, - {0x72db2a50658d767b, 0x8abf91faa257b3d5, 0xe969d6833764ab47, - 0x464170142a1009eb, 0xb14f01aadb30be2f, 0x18ae6a856f40715d,}, -}; - -const uint64_t iso_Ny_data[ELLP_Ny_LEN][Fp_DIGITS] = { - {0x2b567ff3e2837267, 0x1d4d9e57b958a767, 0xce028fea04bd7373, - 0xcc31a30a0b6cd3df, 0x7d7b18a682692693, 0x0d300744d42a0310,}, - {0x99c2555fa542493f, 0xfe7f53cc4874f878, 0x5df0608b8f97608a, - 0x14e03832052b49c8, 0x706326a6957dd5a4, 0x0a8dadd9c2414555,}, - {0x13d942922a5cf63a, 0x357e33e36e261e7d, 0xcf05a27c8456088d, - 0x0000bd1de7ba50f0, 0x83d0c7532f8c1fde, 0x13f70bf38bbf2905,}, - {0x5c57fd95bfafbdbb, 0x28a359a65e541707, 0x3983ceb4f6360b6d, - 0xafe19ff6f97e6d53, 0xb3468f4550192bf7, 0x0bb6cde49d8ba257,}, - {0x590b62c7ff8a513f, 0x314b4ce372cacefd, 0x6bef32ce94b8a800, - 0x6ddf84a095713d5f, 0x64eace4cb0982191, 0x0386213c651b888d,}, - {0xa5310a31111bbcdd, 0xa14ac0f5da148982, 0xf9ad9cc95423d2e9, - 0xaa6ec095283ee4a7, 0xcf5b1f022e1c9107, 0x01fddf5aed881793,}, - {0x65a572b0d7a7d950, 0xe25c2d8183473a19, 0xc2fcebe7cb877dbd, - 0x05b2d36c769a89b0, 0xba12961be86e9efb, 0x07eb1b29c1dfde1f,}, - {0x93e09572f7c4cd24, 0x364e929076795091, 0x8569467e68af51b5, - 0xa47da89439f5340f, 0xf4fa918082e44d64, 0x0ad52ba3e6695a79,}, - {0x911429844e0d5f54, 0xd03f51a3516bb233, 0x3d587e5640536e66, - 0xfa86d2a3a9a73482, 0xa90ed5adf1ed5537, 0x149c9c326a5e7393,}, - {0x462bbeb03c12921a, 0xdc9af5fa0a274a17, 0x9a558ebde836ebed, - 0x649ef8f11a4fae46, 0x8100e1652b3cdc62, 0x1862bd62c291dacb,}, - {0x05c9b8ca89f12c26, 0x0194160fa9b9ac4f, 0x6a643d5a6879fa2c, - 0x14665bdd8846e19d, 0xbb1d0d53af3ff6bf, 0x12c7e1c3b28962e5,}, - {0xb55ebf900b8a3e17, 0xfedc77ec1a9201c4, 0x1f07db10ea1a4df4, - 0x0dfbd15dc41a594d, 0x389547f2334a5391, 0x02419f98165871a4,}, - {0xb416af000745fc20, 0x8e563e9d1ea6d0f5, 0x7c763e17763a0652, - 0x01458ef0159ebbef, 0x8346fe421f96bb13, 0x0d2d7b829ce324d2,}, - {0x93096bb538d64615, 0x6f2a2619951d823a, 0x8f66b3ea59514fa4, - 0xf563e63704f7092f, 0x724b136c4cf2d9fa, 0x046959cfcfd0bf49,}, - {0xea748d4b6e405346, 0x91e9079c2c02d58f, 0x41064965946d9b59, - 0xa06731f1d2bbe1ee, 0x07f897e267a33f1b, 0x1017290919210e5f,}, - {0x872aa6c17d985097, 0xeecc53161264562a, 0x07afe37afff55002, - 0x54759078e5be6838, 0xc4b92d15db8acca8, 0x106d87d1b51d13b9,}, -}; - -// sqrt_ration optimized for p mod 4 = 3. -// Check if (U/V) is a square, return 1 if yes, 0 otherwise -// If 1 is returned, out contains sqrt(U/V), -// otherwise out is sqrt(z*U/V) -// out should not be the same as U, or V -static int sqrt_ratio_3mod4(fp_t out, const fp_t u, const fp_t v) { - fp_t t0, t1, t2; - - fp_sqr(t1, v); // V^2 - fp_mul(t2, u, v); // U*V - fp_mul(t1, t1, t2); // U*V^3 - fp_exp(out, t1, &bls_prec->p_3div4); // (U*V^3)^((p-3)/4) - fp_mul(out, out, t2); // (U*V)*(U*V^3)^((p-3)/4) = U^((p+1)/4) * V^(3p-5)/4 - - fp_sqr(t0, out); // out^2 - fp_mul(t0, t0, v); // out^2 * V - - int res = 1; - if (fp_cmp(t0, u) != RLC_EQ) { // check whether U/V is a quadratic residue - fp_mul(out, out, bls_prec->sqrt_z); // sqrt(-z)*U*V(UV^3)^((p-3)/4) - res = 0; - } - - return res; -} - -// returns 1 if input is odd and 0 if input is even -static int sign_0(const fp_t in) { -#if FP_RDC == MONTY - bn_t tmp; - fp_prime_back(tmp, in); // TODO: entire reduction may not be needed to get the parity - return bn_is_even(tmp); -#endif - return in[0]&1; -} - -// Maps the field element t to a point p in E1(Fp) where E1: y^2 = g(x) = x^3 + a1*x + b1 -// using optimized non-constant-time Simplified SWU implementation (A.B = 0) -// Outout point p is in Jacobian coordinates to avoid extra inversions. -static inline void map_to_E1_osswu(ep_t p, const fp_t t) { - fp_t t0, t1, t2, t3, t4; - - // get the isogeny map coefficients - ctx_t* ctx = core_get(); - fp_t *a1 = &ctx->ep_iso.a; - fp_t *b1 = &ctx->ep_iso.b; - fp_t *z = &ctx->ep_map_u; - - // compute numerator and denominator of X0(t) = N / D - fp_sqr(t1, t); // t^2 - fp_mul(t1, t1, *z); // z * t^2 - fp_sqr(t2, t1); // z^2 * t^4 - fp_add(t2, t2, t1); // z * t^2 + z^2 * t^4 - fp_add(t3, t2, bls_prec->r); // z * t^2 + z^2 * t^4 + 1 - fp_mul(t3, t3, *b1); // N = b * (z * t^2 + z^2 * t^4 + 1) - - if (fp_is_zero(t2)) { - fp_copy(p->z, bls_prec->a1z); // D = a * z - } else { - fp_mul(p->z, t2, bls_prec->minus_a1); // D = - a * (z * t^2 + z^2 * t^4) - } - - // compute numerator and denominator of g(X0(t)) = U / V - // U = N^3 + a1 * N * D^2 + b1 * D^3 - // V = D^3 - fp_sqr(t2, t3); // N^2 - fp_sqr(t0, p->z); // D^2 - fp_mul(t4, *a1, t0); // a * D^2 - fp_add(t2, t4, t2); // N^2 + a * D^2 - fp_mul(t2, t3, t2); // N^3 + a * N * D^2 - fp_mul(t0, t0, p->z); // V = D^3 - fp_mul(t4, *b1, t0); // b * V = b * D^3 - fp_add(t2, t4, t2); // U = N^3 + a1 * N * D^2 + b1 * D^3 - - // compute sqrt(U/V) - int is_sqr = sqrt_ratio_3mod4(p->y, t2, t0); - if (is_sqr) { - fp_copy(p->x, t3); // x = N - } else { - fp_mul(p->x, t1, t3); // x = N * z * t^2 - fp_mul(t1, t1, t); // z * t^3 - fp_mul(p->y, p->y, t1); // y = z * t^3 * sqrt(r * U/V) where r is 1 or map coefficient z - } - - // negate y to be the same sign of t - if (sign_0(t) != sign_0(p->y)) { - fp_neg(p->y, p->y); // -y - } - - // convert (x/D, y) into Jacobian (X,Y,Z) where Z=D to avoid inversion. - // Z = D, X = x/D * D^2 = x*D , Y = y*D^3 - fp_mul(p->x, p->x, p->z); // X = N*D - fp_mul(p->y, p->y, t0); // Y = y*D^3 - // p->z is already equal to D - p->coord = JACOB; -} - -// This code is taken from https://github.com/kwantam/bls12-381_hash -// and adapted to use Relic modular arithemtic. -// Copyright 2019 Riad S. Wahby -static inline void hornerPolynomial(fp_t accumulator, const fp_t x, const int start_val, const fp_t fp_tmp[]) { - for (int i = start_val; i >= 0; --i) { - fp_mul(accumulator, accumulator, x); // acc *= x - fp_add(accumulator, accumulator, fp_tmp[i]); // acc += next_val - } -} - -// This code is taken from https://github.com/kwantam/bls12-381_hash -// and adapted to use Relic modular arithemtic. -// Copyright 2019 Riad S. Wahby -static inline void compute_map_zvals(fp_t out[], const fp_t inv[], const fp_t zv[], const unsigned len) { - for (unsigned i = 0; i < len; ++i) { - fp_mul(out[i], inv[i], zv[i]); - } -} - -// 11-isogeny map -// computes the mapping of p and stores the result in r -// -// This code is taken from https://github.com/kwantam/bls12-381_hash -// and adapted to use Relic modular arithemtic. The constant tables -// iso_D and iso_N were converted to the Montgomery domain. -// -// Copyright 2019 Riad S. Wahby -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at - -// http://www.apache.org/licenses/LICENSE-2.0 - -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -static inline void eval_iso11(ep_t r, const ep_t p) { - fp_t fp_tmp[32]; - - // precompute even powers of Z up to Z^30 in fp_tmp[31]..fp_tmp[17] - fp_sqr(fp_tmp[31], p->z); // Z^2 - fp_sqr(fp_tmp[30], fp_tmp[31]); // Z^4 - fp_mul(fp_tmp[29], fp_tmp[30], fp_tmp[31]); // Z^6 - fp_sqr(fp_tmp[28], fp_tmp[30]); // Z^8 - fp_mul(fp_tmp[27], fp_tmp[28], fp_tmp[31]); // Z^10 - fp_sqr(fp_tmp[26], fp_tmp[29]); // Z^12 - fp_mul(fp_tmp[25], fp_tmp[26], fp_tmp[31]); // Z^14 - fp_sqr(fp_tmp[24], fp_tmp[28]); // Z^16 - fp_mul(fp_tmp[23], fp_tmp[24], fp_tmp[31]); // Z^18 - fp_sqr(fp_tmp[22], fp_tmp[27]); // Z^20 - fp_mul(fp_tmp[21], fp_tmp[22], fp_tmp[31]); // Z^22 - fp_sqr(fp_tmp[20], fp_tmp[26]); // Z^24 - fp_mul(fp_tmp[19], fp_tmp[20], fp_tmp[31]); // Z^26 - fp_sqr(fp_tmp[18], fp_tmp[25]); // Z^28 - fp_mul(fp_tmp[17], fp_tmp[18], fp_tmp[31]); // Z^30 - - // get isogeny map coefficients - iso_t iso = ep_curve_get_iso(); - // hardcode the constant to avoid warnings of gcc -Wstringop-overread - const int deg_dy = 15; // also equal to iso->deg_yd; - const int deg_dx = 10; // also equal to iso->deg_xd; - // TODO: get N coefficient from Relic and update N computations - - // y = Ny/Dy - // compute Dy - compute_map_zvals(fp_tmp, iso->yd, fp_tmp + 17, deg_dy); // k_(15-i) Z^(2i) - fp_add(fp_tmp[16], p->x, fp_tmp[deg_dy - 1]); // X + k_14 Z^2 - hornerPolynomial(fp_tmp[16], p->x, deg_dy - 2, fp_tmp); // Horner for the rest - fp_mul(fp_tmp[15], fp_tmp[16], fp_tmp[31]); // Dy * Z^2 - fp_mul(fp_tmp[15], fp_tmp[15], p->z); // Dy * Z^3 - - // compute Ny - compute_map_zvals(fp_tmp, bls_prec->iso_Ny, fp_tmp + 17, ELLP_Ny_LEN - 1); // k_(15-i) Z^(2i) - fp_mul(fp_tmp[16], p->x, bls_prec->iso_Ny[ELLP_Ny_LEN - 1]); // k_15 * X - fp_add(fp_tmp[16], fp_tmp[16], fp_tmp[ELLP_Ny_LEN - 2]); // k_15 * X + k_14 Z^2 - hornerPolynomial(fp_tmp[16], p->x, ELLP_Ny_LEN - 3, fp_tmp); // Horner for the rest - fp_mul(fp_tmp[16], fp_tmp[16], p->y); // Ny * Y - - // x = Nx/Dx - // compute Dx - compute_map_zvals(fp_tmp, iso->xd, fp_tmp + 22, deg_dx); // k_(10-i) Z^(2i) - fp_add(fp_tmp[14], p->x, fp_tmp[deg_dx - 1]); // X + k_9 Z^2 - hornerPolynomial(fp_tmp[14], p->x, deg_dx - 2, fp_tmp); // Horner for the rest - fp_mul(fp_tmp[14], fp_tmp[14], fp_tmp[31]); // Dx * Z^2 - - // compute Nx - compute_map_zvals(fp_tmp, bls_prec->iso_Nx, fp_tmp + 21, ELLP_Nx_LEN - 1); // k_(11-i) Z^(2i) - fp_mul(fp_tmp[13], p->x, bls_prec->iso_Nx[ELLP_Nx_LEN - 1]); // k_11 * X - fp_add(fp_tmp[13], fp_tmp[13], fp_tmp[ELLP_Nx_LEN - 2]); // k_11 * X + k_10 * Z^2 - hornerPolynomial(fp_tmp[13], p->x, ELLP_Nx_LEN - 3, fp_tmp); // Dy: Horner for the rest - - // compute the resulting point (Xo,Yo,Zo) - fp_mul(r->z, fp_tmp[14], fp_tmp[15]); // Zo = Dx Dy - fp_mul(r->x, fp_tmp[13], fp_tmp[15]); // Nx Dy - fp_mul(r->x, r->x, r->z); // Xo = Nx Dy Z - fp_sqr(fp_tmp[12], r->z); // Zo^2 - fp_mul(r->y, fp_tmp[16], fp_tmp[14]); // Ny Dx - fp_mul(r->y, r->y, fp_tmp[12]); // Yo = Ny Dx Zo^2 - r->coord = JACOB; -} - -// map an input point in E to a point in G1 by clearing the cofactor of G1 -static void clear_cofactor(ep_t out, const ep_t in) { - bn_t z; - bn_new(z); - fp_prime_get_par(z); - // compute 1-z - bn_neg(z, z); - bn_add_dig(z, z, 1); - ep_mul_dig(out, in, z->dp[0]); // z fits in 64 bits - bn_free(z); -} - -// construction 2 section 5 in in https://eprint.iacr.org/2019/403.pdf -// evaluate the optimized SSWU map twice, add resulting points, apply isogeny map, clear cofactor -// the result is stored in p -// msg is the input message to hash, must be at least 2*(FP_BYTES+16) = 128 bytes -static void map_to_G1_local(ep_t p, const uint8_t *msg, int len) { - RLC_TRY { - if (len < 2*(Fp_BYTES+16)) { - RLC_THROW(ERR_NO_BUFFER); - } - - fp_t t1, t2; - bn_t tmp; - bn_new(tmp); - bn_read_bin(tmp, msg, len/2); - fp_prime_conv(t1, tmp); - bn_read_bin(tmp, msg + len/2, len - len/2); - fp_prime_conv(t2, tmp); - bn_free(tmp); - - ep_t p_temp; - ep_new(p_temp); - // first mapping - map_to_E1_osswu(p_temp, t1); // map to E1 - eval_iso11(p_temp, p_temp); // map to E - - // second mapping - map_to_E1_osswu(p, t2); // map to E1 - eval_iso11(p, p); // map to E - // sum - // TODO: implement point addition in E1 and apply the isogeny map only once. - // Gives 4% improvement for map-to-curve overall - ep_add_jacob(p, p, p_temp); - - // clear the cofactor - clear_cofactor(p, p); // map to G1 - ep_free(p_temp); - } - RLC_CATCH_ANY { - RLC_THROW(ERR_CAUGHT); - } -} -#endif - -// computes a hash of input data to G1 -// construction 2 from section 5 in https://eprint.iacr.org/2019/403.pdf -void map_to_G1(ep_t h, const byte* data, const int len) { - #if hashToPoint==LOCAL_SSWU - map_to_G1_local(h, data, len); - #elif hashToPoint==RELIC_SSWU - ep_map_from_field(h, data, len); - #endif -} diff --git a/crypto/bls12381_utils.c b/crypto/bls12381_utils.c deleted file mode 100644 index 19a1b730b5e..00000000000 --- a/crypto/bls12381_utils.c +++ /dev/null @@ -1,852 +0,0 @@ -// +build relic - -// this file contains utility functions for the curve BLS 12-381 -// these tools are shared by the BLS signature scheme, the BLS based threshold signature -// and the BLS distributed key generation protocols - -#include "bls12381_utils.h" -#include "bls_include.h" -#include "assert.h" - -// The functions are tested for ALLOC=AUTO (not for ALLOC=DYNAMIC) - -// return macro values to the upper Go Layer -int get_valid() { - return VALID; -} - -int get_invalid() { - return INVALID; -} - -void bn_new_wrapper(bn_t a) { - bn_new(a); -} - -// global variable of the pre-computed data -prec_st bls_prec_st; -prec_st* bls_prec = NULL; - -// required constants for the optimized SWU hash to curve -#if (hashToPoint == LOCAL_SSWU) -extern const uint64_t iso_Nx_data[ELLP_Nx_LEN][Fp_DIGITS]; -extern const uint64_t iso_Ny_data[ELLP_Ny_LEN][Fp_DIGITS]; -#endif - -#if (MEMBERSHIP_CHECK_G1 == BOWE) -extern const uint64_t beta_data[Fp_DIGITS]; -extern const uint64_t z2_1_by3_data[2]; -#endif - -// sets the global variable to input -void precomputed_data_set(const prec_st* p) { - bls_prec = (prec_st*)p; -} - -// Reads a prime field element from a digit vector in big endian format. -// There is no conversion to Montgomery domain in this function. - #define fp_read_raw(a, data_pointer) dv_copy((a), (data_pointer), Fp_DIGITS) - -// pre-compute some data required for curve BLS12-381 -prec_st* init_precomputed_data_BLS12_381() { - bls_prec = &bls_prec_st; - ctx_t* ctx = core_get(); - - // (p-1)/2 - bn_div_dig(&bls_prec->p_1div2, &ctx->prime, 2); - #if (hashToPoint == LOCAL_SSWU) - // (p-3)/4 - bn_div_dig(&bls_prec->p_3div4, &bls_prec->p_1div2, 2); - // sqrt(-z) - fp_neg(bls_prec->sqrt_z, ctx->ep_map_u); - fp_srt(bls_prec->sqrt_z, bls_prec->sqrt_z); - // -a1 and a1*z - fp_neg(bls_prec->minus_a1, ctx->ep_iso.a); - fp_mul(bls_prec->a1z, ctx->ep_iso.a, ctx->ep_map_u); - - for (int i=0; i<ELLP_Nx_LEN; i++) - fp_read_raw(bls_prec->iso_Nx[i], iso_Nx_data[i]); - for (int i=0; i<ELLP_Ny_LEN; i++) - fp_read_raw(bls_prec->iso_Ny[i], iso_Ny_data[i]); - #endif - - #if (MEMBERSHIP_CHECK_G1 == BOWE) - bn_new(&bls_prec->beta); - bn_read_raw(&bls_prec->beta, beta_data, Fp_DIGITS); - bn_new(&bls_prec->z2_1_by3); - bn_read_raw(&bls_prec->z2_1_by3, z2_1_by3_data, 2); - #endif - - // Montgomery constant R - fp_set_dig(bls_prec->r, 1); - return bls_prec; -} - -// Initializes Relic context with BLS12-381 parameters -ctx_t* relic_init_BLS12_381() { - // check Relic was compiled with the right conf - assert(ALLOC == AUTO); - - // sanity check of Relic constants the package is relying on - assert(RLC_OK == RLC_EQ); - - // initialize relic core with a new context - ctx_t* bls_ctx = (ctx_t*) calloc(1, sizeof(ctx_t)); - if (!bls_ctx) return NULL; - core_set(bls_ctx); - if (core_init() != RLC_OK) return NULL; - - // init BLS curve - int ret = RLC_OK; - #if (FP_PRIME == 381) - ret = ep_param_set_any_pairf(); // sets B12_P381 if FP_PRIME = 381 in relic config - #else - ep_param_set(B12_P381); - ep2_curve_set_twist(EP_MTYPE); // Multiplicative twist - #endif - - if (ret != RLC_OK) return NULL; - return core_get(); -} - -// seeds relic PRG -void seed_relic(byte* seed, int len) { - #if RAND == HASHD - // instantiate a new DRBG - ctx_t *ctx = core_get(); - ctx->seeded = 0; - #endif - rand_seed(seed, len); -} - -// Exponentiation of a generic point p in G1 -void ep_mult(ep_t res, const ep_t p, const bn_t expo) { - // Using window NAF of size 2 - ep_mul_lwnaf(res, p, expo); -} - -// Exponentiation of generator g1 in G1 -// These two function are here for bench purposes only -void ep_mult_gen_bench(ep_t res, const bn_t expo) { - // Using precomputed table of size 4 - ep_mul_gen(res, (bn_st *)expo); -} - -void ep_mult_generic_bench(ep_t res, const bn_t expo) { - // generic point multiplication - ep_mult(res, &core_get()->ep_g, expo); -} - -// Exponentiation of a generic point p in G2 -void ep2_mult(ep2_t res, ep2_t p, bn_t expo) { - // Using window NAF of size 2 - ep2_mul_lwnaf(res, p, expo); -} - -// Exponentiation of fixed g2 in G2 -void ep2_mult_gen(ep2_t res, const bn_t expo) { - // Using precomputed table of size 4 - g2_mul_gen(res, (bn_st*)expo); -} - -// DEBUG printing functions -void bytes_print_(char* s, byte* data, int len) { - printf("[%s]:\n", s); - for (int i=0; i<len; i++) - printf("%02x,", data[i]); - printf("\n"); -} - -// DEBUG printing functions -void fp_print_(char* s, fp_st a) { - char* str = malloc(sizeof(char) * fp_size_str(a, 16)); - fp_write_str(str, 100, a, 16); - printf("[%s]:\n%s\n", s, str); - free(str); -} - -void bn_print_(char* s, bn_st *a) { - char* str = malloc(sizeof(char) * bn_size_str(a, 16)); - bn_write_str(str, 128, a, 16); - printf("[%s]:\n%s\n", s, str); - free(str); -} - -void ep_print_(char* s, ep_st* p) { - printf("[%s]:\n", s); - g1_print(p); -} - -void ep2_print_(char* s, ep2_st* p) { - printf("[%s]:\n", s); - g2_print(p); -} - -// generates a random number less than the order r -void bn_randZr_star(bn_t x) { - // reduce the modular reduction bias - const int seed_len = BITS_TO_BYTES(Fr_BITS + SEC_BITS); - byte seed[seed_len]; - rand_bytes(seed, seed_len); - bn_map_to_Zr_star(x, seed, seed_len); - rand_bytes(seed, seed_len); // overwrite seed -} - -// generates a random number less than the order r -void bn_randZr(bn_t x) { - // reduce the modular reduction bias - bn_new_size(x, BITS_TO_DIGITS(Fr_BITS + SEC_BITS)); - bn_rand(x, RLC_POS, Fr_BITS + SEC_BITS); - bn_mod(x, x, &core_get()->ep_r); -} - -// Reads a scalar from an array and maps it to Zr. -// The resulting scalar `a` satisfies 0 <= a < r. -// `len` must be less than BITS_TO_BYTES(RLC_BN_BITS). -// It returns VALID if scalar is zero and INVALID otherwise -int bn_map_to_Zr(bn_t a, const uint8_t* bin, int len) { - bn_t tmp; - bn_new(tmp); - bn_new_size(tmp, BYTES_TO_DIGITS(len)); - bn_read_bin(tmp, bin, len); - bn_mod(a, tmp, &core_get()->ep_r); - bn_rand(tmp, RLC_POS, len << 3); // overwrite tmp - bn_free(tmp); - if (bn_cmp_dig(a, 0) == RLC_EQ) { - return VALID; - } - return INVALID; -} - -// Reads a scalar from an array and maps it to Zr*. -// The resulting scalar `a` satisfies 0 < a < r. -// `len` must be less than BITS_TO_BYTES(RLC_BN_BITS) -void bn_map_to_Zr_star(bn_t a, const uint8_t* bin, int len) { - bn_t tmp; - bn_new(tmp); - bn_new_size(tmp, BYTES_TO_DIGITS(len)); - bn_read_bin(tmp, bin, len); - bn_t r_1; - bn_new(r_1); - bn_sub_dig(r_1, &core_get()->ep_r, 1); - bn_mod_basic(a,tmp,r_1); - bn_add_dig(a,a,1); - bn_rand(tmp, RLC_POS, len << 3); // overwrite tmp - bn_free(tmp); - bn_free(r_1); -} - -// returns the sign of y. -// 1 if y > (p - 1)/2 and 0 otherwise. -static int fp_get_sign(const fp_t y) { - bn_t bn_y; - bn_new(bn_y); - fp_prime_back(bn_y, y); - return bn_cmp(bn_y, &bls_prec->p_1div2) == RLC_GT; -} - -// ep_write_bin_compact exports a point a in E(Fp) to a buffer bin in a compressed or uncompressed form. -// len is the allocated size of the buffer bin. -// The serialization is following: -// https://www.ietf.org/archive/id/draft-irtf-cfrg-pairing-friendly-curves-08.html#name-zcash-serialization-format-) -// The code is a modified version of Relic ep_write_bin -void ep_write_bin_compact(byte *bin, const ep_t a, const int len) { - const int G1_size = (G1_BYTES/(G1_SERIALIZATION+1)); - - if (len!=G1_size) { - RLC_THROW(ERR_NO_BUFFER); - return; - } - - if (ep_is_infty(a)) { - // set the infinity bit - bin[0] = (G1_SERIALIZATION << 7) | 0x40; - memset(bin+1, 0, G1_size-1); - return; - } - - RLC_TRY { - ep_t t; - ep_null(t); - ep_new(t); - ep_norm(t, a); - fp_write_bin(bin, Fp_BYTES, t->x); - - if (G1_SERIALIZATION == COMPRESSED) { - bin[0] |= (fp_get_sign(t->y) << 5); - } else { - fp_write_bin(bin + Fp_BYTES, Fp_BYTES, t->y); - } - ep_free(t); - } RLC_CATCH_ANY { - RLC_THROW(ERR_CAUGHT); - } - - bin[0] |= (G1_SERIALIZATION << 7); - } - -// fp_read_bin_safe is a modified version of Relic's (void fp_read_bin). -// It reads a field element from a buffer and makes sure the big number read can be -// written as a field element (is reduced modulo p). -// Unlike Relic's versions, the function does not reduce the read integer modulo p and does -// not throw an exception for an integer larger than p. The function returns RLC_OK if the input -// corresponds to a field element, and returns RLC_ERR otherwise. -static int fp_read_bin_safe(fp_t a, const uint8_t *bin, int len) { - if (len != Fp_BYTES) { - return RLC_ERR; - } - - int ret = RLC_ERR; - bn_t t; - bn_new(t); - bn_read_bin(t, bin, Fp_BYTES); - - // make sure read bn is reduced modulo p - // first check is sanity check, since current implementation of `bn_read_bin` insures - // output bn is positive - if (bn_sign(t) == RLC_NEG || bn_cmp(t, &core_get()->prime) != RLC_LT) { - goto out; - } - - if (bn_is_zero(t)) { - fp_zero(a); - } else { - if (t->used == 1) { - fp_prime_conv_dig(a, t->dp[0]); - } else { - fp_prime_conv(a, t); - } - } - ret = RLC_OK; -out: - bn_free(t); - return ret; -} - -// ep_read_bin_compact imports a point from a buffer in a compressed or uncompressed form. -// len is the size of the input buffer. -// -// The resulting point is guaranteed to be on the curve E1. -// The serialization follows: -// https://www.ietf.org/archive/id/draft-irtf-cfrg-pairing-friendly-curves-08.html#name-zcash-serialization-format-) -// The code is a modified version of Relic ep_read_bin -// -// It returns RLC_OK if the inputs are valid (input buffer lengths are valid and coordinates correspond -// to a point on curve) and the execution completes, and RLC_ERR otherwise. -int ep_read_bin_compact(ep_t a, const byte *bin, const int len) { - // check the length - const int G1_size = (G1_BYTES/(G1_SERIALIZATION+1)); - if (len!=G1_size) { - return RLC_ERR; - } - - // check the compression bit - int compressed = bin[0] >> 7; - if ((compressed == 1) != (G1_SERIALIZATION == COMPRESSED)) { - return RLC_ERR; - } - - // check if the point is infinity - int is_infinity = bin[0] & 0x40; - if (is_infinity) { - // check if the remaining bits are cleared - if (bin[0] & 0x3F) { - return RLC_ERR; - } - for (int i=1; i<G1_size-1; i++) { - if (bin[i]) { - return RLC_ERR; - } - } - ep_set_infty(a); - return RLC_OK; - } - - // read the sign bit and check for consistency - int y_sign = (bin[0] >> 5) & 1; - if (y_sign && (!compressed)) { - return RLC_ERR; - } - - a->coord = BASIC; - fp_set_dig(a->z, 1); - // use a temporary buffer to mask the header bits and read a.x - byte temp[Fp_BYTES]; - memcpy(temp, bin, Fp_BYTES); - temp[0] &= 0x1F; - if (fp_read_bin_safe(a->x, temp, sizeof(temp)) != RLC_OK) { - return RLC_ERR; - } - - if (G1_SERIALIZATION == UNCOMPRESSED) { - if (fp_read_bin_safe(a->y, bin + Fp_BYTES, Fp_BYTES) != RLC_OK) { - return RLC_ERR; - } - // check read point is on curve - if (!ep_on_curve(a)) { - return RLC_ERR; - } - return RLC_OK; - } - fp_zero(a->y); - fp_set_bit(a->y, 0, y_sign); - if (ep_upk(a, a) == 1) { - // resulting point is guaranteed to be on curve - return RLC_OK; - } - return RLC_ERR; -} - - -// returns the sign of y. -// sign(y_0) if y_1 = 0, else sign(y_1) -static int fp2_get_sign(fp2_t y) { - if (fp_is_zero(y[1])) { // no need to convert back as the montgomery form of 0 is 0 - return fp_get_sign(y[0]); - } - return fp_get_sign(y[1]); -} - -// ep2_write_bin_compact exports a point in E(Fp^2) to a buffer in a compressed or uncompressed form. -// len is the allocated size of the buffer bin. -// The serialization is following: -// https://www.ietf.org/archive/id/draft-irtf-cfrg-pairing-friendly-curves-08.html#name-zcash-serialization-format-) -// The code is a modified version of Relic ep2_write_bin -void ep2_write_bin_compact(byte *bin, const ep2_t a, const int len) { - ep2_t t; - ep2_null(t); - const int G2_size = (G2_BYTES/(G2_SERIALIZATION+1)); - - if (len!=G2_size) { - RLC_THROW(ERR_NO_BUFFER); - return; - } - - if (ep2_is_infty((ep2_st *)a)) { - // set the infinity bit - bin[0] = (G2_SERIALIZATION << 7) | 0x40; - memset(bin+1, 0, G2_size-1); - return; - } - - RLC_TRY { - ep2_new(t); - ep2_norm(t, (ep2_st *)a); - fp2_write_bin(bin, Fp2_BYTES, t->x, 0); - - if (G2_SERIALIZATION == COMPRESSED) { - bin[0] |= (fp2_get_sign(t->y) << 5); - } else { - fp2_write_bin(bin + Fp2_BYTES, Fp2_BYTES, t->y, 0); - } - } RLC_CATCH_ANY { - RLC_THROW(ERR_CAUGHT); - } - - bin[0] |= (G2_SERIALIZATION << 7); - ep_free(t); -} - -// fp2_read_bin_safe is a modified version of Relic's (void fp2_read_bin). -// It reads an Fp^2 element from a buffer and makes sure the big numbers read can be -// written as field elements (are reduced modulo p). -// Unlike Relic's versions, the function does not reduce the read integers modulo p and does -// not throw an exception for integers larger than p. The function returns RLC_OK if the input -// corresponds to a field element in Fp^2, and returns RLC_ERR otherwise. -static int fp2_read_bin_safe(fp2_t a, const uint8_t *bin, int len) { - if (len != Fp2_BYTES) { - return RLC_ERR; - } - if (fp_read_bin_safe(a[0], bin, Fp_BYTES) != RLC_OK) { - return RLC_ERR; - } - if (fp_read_bin_safe(a[1], bin + Fp_BYTES, Fp_BYTES) != RLC_OK) { - return RLC_ERR; - } - return RLC_OK; -} - -// ep2_read_bin_compact imports a point from a buffer in a compressed or uncompressed form. -// The resulting point is guaranteed to be on curve E2. -// -// It returns RLC_OK if the inputs are valid (input buffer lengths are valid and read coordinates -// correspond to a point on curve) and the execution completes and RLC_ERR otherwise. -// The code is a modified version of Relic ep2_read_bin -int ep2_read_bin_compact(ep2_t a, const byte *bin, const int len) { - // check the length - const int G2size = (G2_BYTES/(G2_SERIALIZATION+1)); - if (len!=G2size) { - return RLC_ERR; - } - - // check the compression bit - int compressed = bin[0] >> 7; - if ((compressed == 1) != (G2_SERIALIZATION == COMPRESSED)) { - return RLC_ERR; - } - - // check if the point in infinity - int is_infinity = bin[0] & 0x40; - if (is_infinity) { - // the remaining bits need to be cleared - if (bin[0] & 0x3F) { - return RLC_ERR; - } - for (int i=1; i<G2size-1; i++) { - if (bin[i]) { - return RLC_ERR; - } - } - ep2_set_infty(a); - return RLC_OK; - } - - // read the sign bit and check for consistency - int y_sign = (bin[0] >> 5) & 1; - if (y_sign && (!compressed)) { - return RLC_ERR; - } - - a->coord = BASIC; - fp2_set_dig(a->z, 1); // a.z - // use a temporary buffer to mask the header bits and read a.x - byte temp[Fp2_BYTES]; - memcpy(temp, bin, Fp2_BYTES); - temp[0] &= 0x1F; // clear the header bits - if (fp2_read_bin_safe(a->x, temp, sizeof(temp)) != RLC_OK) { - return RLC_ERR; - } - - if (G2_SERIALIZATION == UNCOMPRESSED) { - if (fp2_read_bin_safe(a->y, bin + Fp2_BYTES, Fp2_BYTES) != RLC_OK){ - return RLC_ERR; - } - // check read point is on curve - if (!ep2_on_curve(a)) { - return RLC_ERR; - } - return RLC_OK; - } - - fp2_zero(a->y); - fp_set_bit(a->y[0], 0, y_sign); - fp_zero(a->y[1]); - if (ep2_upk(a, a) == 1) { - // resulting point is guaranteed to be on curve - return RLC_OK; - } - return RLC_ERR; -} - -// reads a scalar in a and checks it is a valid Zr element (a < r) -// returns RLC_OK if the scalar is valid and RLC_ERR otherwise. -int bn_read_Zr_bin(bn_t a, const uint8_t *bin, int len) { - if (len!=Fr_BYTES) { - return RLC_ERR; - } - bn_read_bin(a, bin, Fr_BYTES); - bn_t r; - bn_new(r); - g2_get_ord(r); - if (bn_cmp(a, r) == RLC_LT) { - return RLC_OK; - } - return RLC_ERR; -} - -// computes the sum of the array elements x and writes the sum in jointx -// the sum is computed in Zr -void bn_sum_vector(bn_t jointx, const bn_st* x, const int len) { - bn_t r; - bn_new(r); - g2_get_ord(r); - bn_set_dig(jointx, 0); - bn_new_size(jointx, BITS_TO_DIGITS(Fr_BITS+1)); - for (int i=0; i<len; i++) { - bn_add(jointx, jointx, &x[i]); - if (bn_cmp(jointx, r) == RLC_GT) - bn_sub(jointx, jointx, r); - } - bn_free(r); -} - -// computes the sum of the G2 array elements y and writes the sum in jointy -void ep2_sum_vector(ep2_t jointy, ep2_st* y, const int len){ - ep2_set_infty(jointy); - for (int i=0; i<len; i++){ - ep2_add_projc(jointy, jointy, &y[i]); - } - ep2_norm(jointy, jointy); // not necessary but left here to optimize the - // multiple pairing computations with the same - // public key -} - -// Verifies the validity of 2 SPoCK proofs and 2 public keys. -// Membership check in G1 of both proofs is verified in this function. -// Membership check in G2 of both keys is not verified in this function. -// the membership check in G2 is separated to allow optimizing multiple verifications -// using the same public keys. -int bls_spock_verify(const ep2_t pk1, const byte* sig1, const ep2_t pk2, const byte* sig2) { - ep_t elemsG1[2]; - ep2_t elemsG2[2]; - - // elemsG1[0] = s1 - ep_new(elemsG1[0]); - int read_ret = ep_read_bin_compact(elemsG1[0], sig1, SIGNATURE_LEN); - if (read_ret != RLC_OK) - return read_ret; - - // check s1 is in G1 - if (check_membership_G1(elemsG1[0]) != VALID) // only enabled if MEMBERSHIP_CHECK==1 - return INVALID; - - // elemsG1[1] = s2 - ep_new(elemsG1[1]); - read_ret = ep_read_bin_compact(elemsG1[1], sig2, SIGNATURE_LEN); - if (read_ret != RLC_OK) - return read_ret; - - // check s2 in G1 - if (check_membership_G1(elemsG1[1]) != VALID) // only enabled if MEMBERSHIP_CHECK==1 - return INVALID; - - // elemsG2[1] = pk1 - ep2_new(elemsG2[1]); - ep2_copy(elemsG2[1], (ep2_st*)pk1); - - // elemsG2[0] = pk2 - ep2_new(elemsG2[0]); - ep2_copy(elemsG2[0], (ep2_st*)pk2); - -#if DOUBLE_PAIRING - // elemsG2[0] = -pk2 - ep2_neg(elemsG2[0], elemsG2[0]); - - fp12_t pair; - fp12_new(&pair); - // double pairing with Optimal Ate - pp_map_sim_oatep_k12(pair, (ep_t*)(elemsG1) , (ep2_t*)(elemsG2), 2); - - // compare the result to 1 - int res = fp12_cmp_dig(pair, 1); - -#elif SINGLE_PAIRING - fp12_t pair1, pair2; - fp12_new(&pair1); fp12_new(&pair2); - pp_map_oatep_k12(pair1, elemsG1[0], elemsG2[0]); - pp_map_oatep_k12(pair2, elemsG1[1], elemsG2[1]); - - int res = fp12_cmp(pair1, pair2); -#endif - fp12_free(&one); - ep_free(elemsG1[0]); - ep_free(elemsG1[1]); - ep2_free(elemsG2[0]); - ep2_free(elemsG2[1]); - - if (core_get()->code == RLC_OK) { - if (res == RLC_EQ) return VALID; - return INVALID; - } - return UNDEFINED; -} - -// Subtracts the sum of a G2 array elements y from an element x and writes the -// result in res -void ep2_subtract_vector(ep2_t res, ep2_t x, ep2_st* y, const int len){ - ep2_sum_vector(res, y, len); - ep2_neg(res, res); - ep2_add_projc(res, x, res); -} - -// computes the sum of the G1 array elements y and writes the sum in jointy -void ep_sum_vector(ep_t jointx, ep_st* x, const int len) { - ep_set_infty(jointx); - for (int i=0; i<len; i++){ - ep_add_jacob(jointx, jointx, &x[i]); - } -} - -// Computes the sum of the signatures (G1 elements) flattened in a single sigs array -// and writes the sum (G1 element) as bytes in dest. -// The function assumes sigs is correctly allocated with regards to len. -int ep_sum_vector_byte(byte* dest, const byte* sigs_bytes, const int len) { - int error = UNDEFINED; - - // temp variables - ep_t acc; - ep_new(acc); - ep_set_infty(acc); - ep_st* sigs = (ep_st*) malloc(len * sizeof(ep_st)); - if (!sigs) goto mem_error; - for (int i=0; i < len; i++) ep_new(sigs[i]); - - // import the points from the array - for (int i=0; i < len; i++) { - // deserialize each point from the input array - error = ep_read_bin_compact(&sigs[i], &sigs_bytes[SIGNATURE_LEN*i], SIGNATURE_LEN); - if (error != RLC_OK) { - goto out; - } - } - // sum the points - ep_sum_vector(acc, sigs, len); - // export the result - ep_write_bin_compact(dest, acc, SIGNATURE_LEN); - - error = VALID; -out: - // free the temp memory - ep_free(acc); - for (int i=0; i < len; i++) ep_free(sigs[i]); - free(sigs); -mem_error: - return error; -} - -// uses a simple scalar multiplication by G1's order -// to check whether a point on the curve E1 is in G1. -int simple_subgroup_check_G1(const ep_t p){ - ep_t inf; - ep_new(inf); - // check p^order == infinity - // use basic double & add as lwnaf reduces the expo modulo r - ep_mul_basic(inf, p, &core_get()->ep_r); - if (!ep_is_infty(inf)){ - ep_free(inf); - return INVALID; - } - ep_free(inf); - return VALID; -} - -// uses a simple scalar multiplication by G1's order -// to check whether a point on the curve E2 is in G2. -int simple_subgroup_check_G2(const ep2_t p){ - ep2_t inf; - ep2_new(inf); - // check p^order == infinity - // use basic double & add as lwnaf reduces the expo modulo r - ep2_mul_basic(inf, (ep2_st*)p, &core_get()->ep_r); - if (!ep2_is_infty(inf)){ - ep2_free(inf); - return INVALID; - } - ep2_free(inf); - return VALID; -} - -#if (MEMBERSHIP_CHECK_G1 == BOWE) -// beta such that beta^3 == 1 mod p -// beta is in the Montgomery form -const uint64_t beta_data[Fp_DIGITS] = { - 0xcd03c9e48671f071, 0x5dab22461fcda5d2, 0x587042afd3851b95, - 0x8eb60ebe01bacb9e, 0x03f97d6e83d050d2, 0x18f0206554638741, -}; - - -// (z^2-1)/3 with z being the parameter of bls12-381 -const uint64_t z2_1_by3_data[2] = { - 0x0000000055555555, 0x396c8c005555e156 -}; - -// uses Bowe's check from section 3.2 from https://eprint.iacr.org/2019/814.pdf -// to check whether a point on the curve E1 is in G1. -int bowe_subgroup_check_G1(const ep_t p){ - if (ep_is_infty(p) == 1) - return VALID; - fp_t b; - dv_copy(b, beta_data, Fp_DIGITS); - ep_t sigma, sigma2, p_inv; - ep_new(sigma); - ep_new(sigma2); - ep_new(p_inv); - - // si(p) - ep_copy(sigma, p); - fp_mul(sigma[0].x, sigma[0].x, b); - // -si^2(p) - ep_copy(sigma2, sigma); - fp_mul(sigma2[0].x, sigma2[0].x, b); - fp_neg(sigma2[0].y, sigma2[0].y); - ep_dbl(sigma, sigma); - // -p - ep_copy(p_inv, p); - fp_neg(p_inv[0].y, p_inv[0].y); - // (z^2-1)/3 (2*si(p) - p - si^2(p)) - si^2(p) - ep_add(sigma, sigma, p_inv); - ep_add(sigma, sigma, sigma2); - // TODO: multiplication using a chain? - ep_mul_lwnaf(sigma, sigma, &bls_prec->z2_1_by3); - ep_add(sigma, sigma, sigma2); - - ep_free(sigma2); - ep_free(p_inv); - // check result against infinity - if (!ep_is_infty(sigma)){ - ep_free(sigma); - return INVALID; - } - ep_free(sigma); - return VALID; -} -#endif - -// generates a random point in G1 and stores it in p -void ep_rand_G1(ep_t p) { - // multiplies G1 generator by a random scalar - ep_rand(p); -} - -// generates a random point in E1\G1 and stores it in p -void ep_rand_G1complement(ep_t p) { - // generate a random point in E1 - p->coord = BASIC; - fp_set_dig(p->z, 1); - do { - fp_rand(p->x); // set x to a random field element - byte r; - rand_bytes(&r, 1); - fp_zero(p->y); - fp_set_bit(p->y, 0, r&1); // set y randomly to 0 or 1 - } - while (ep_upk(p, p) == 0); // make sure p is in E1 - - // map the point to E1\G1 by clearing G1 order - ep_mul_basic(p, p, &core_get()->ep_r); - - assert(ep_on_curve(p)); // sanity check to make sure p is in E1 -} - -// generates a random point in G2 and stores it in p -void ep2_rand_G2(ep2_t p) { - // multiplies G2 generator by a random scalar - ep2_rand(p); -} - -// generates a random point in E2\G2 and stores it in p -void ep2_rand_G2complement(ep2_t p) { - // generate a random point in E2 - p->coord = BASIC; - fp_set_dig(p->z[0], 1); - fp_zero(p->z[1]); - do { - fp2_rand(p->x); // set x to a random field element - byte r; - rand_bytes(&r, 1); - fp2_zero(p->y); - fp_set_bit(p->y[0], 0, r&1); // set y randomly to 0 or 1 - } - while (ep2_upk(p, p) == 0); // make sure p is in E1 - - // map the point to E1\G1 by clearing G1 order - ep2_mul_basic(p, p, &core_get()->ep_r); - - assert(ep2_on_curve(p)); // sanity check to make sure p is in E1 -} - -// This is a testing function. -// It wraps a call to a Relic macro since cgo can't call macros. -void xmd_sha256(uint8_t *hash, int len_hash, uint8_t *msg, int len_msg, uint8_t *dst, int len_dst){ - md_xmd_sh256(hash, len_hash, msg, len_msg, dst, len_dst); -} diff --git a/crypto/bls12381_utils.go b/crypto/bls12381_utils.go deleted file mode 100644 index 50676fc2c04..00000000000 --- a/crypto/bls12381_utils.go +++ /dev/null @@ -1,264 +0,0 @@ -//go:build relic -// +build relic - -package crypto - -// this file contains utility functions for the curve BLS 12-381 -// these tools are shared by the BLS signature scheme, the BLS based threshold signature -// and the BLS distributed key generation protocols - -// #cgo CFLAGS: -g -Wall -std=c99 -I${SRCDIR}/ -I${SRCDIR}/relic/build/include -I${SRCDIR}/relic/include -I${SRCDIR}/relic/include/low -// #cgo LDFLAGS: -L${SRCDIR}/relic/build/lib -l relic_s -// #include "bls12381_utils.h" -// #include "bls_include.h" -import "C" -import ( - "errors" -) - -// Go wrappers to Relic C types -// Relic is compiled with ALLOC=AUTO -type pointG1 C.ep_st -type pointG2 C.ep2_st -type scalar C.bn_st - -// context required for the BLS set-up -type ctx struct { - relicCtx *C.ctx_t - precCtx *C.prec_st -} - -// get some constants from the C layer -// (Cgo does not export C macros) -var valid = C.get_valid() -var invalid = C.get_invalid() - -// initContext sets relic B12_381 parameters and precomputes some data in the C layer -func (ct *ctx) initContext() error { - c := C.relic_init_BLS12_381() - if c == nil { - return errors.New("Relic core init failed") - } - ct.relicCtx = c - ct.precCtx = C.init_precomputed_data_BLS12_381() - return nil -} - -// seeds the internal relic random function. -// relic context must be initialized before seeding. -func seedRelic(seed []byte) error { - if len(seed) < (securityBits / 8) { - return invalidInputsErrorf( - "seed length needs to be larger than %d", - securityBits/8) - } - if len(seed) > maxRelicPrgSeed { - return invalidInputsErrorf( - "seed length needs to be less than %x", - maxRelicPrgSeed) - } - C.seed_relic((*C.uchar)(&seed[0]), (C.int)(len(seed))) - return nil -} - -// setContext sets the context (previously initialized) of the C layer with -// pre-saved data. -func (ct *ctx) setContext() { - C.core_set(ct.relicCtx) - C.precomputed_data_set(ct.precCtx) -} - -// Exponentiation in G1 (scalar point multiplication) -func (p *pointG1) scalarMultG1(res *pointG1, expo *scalar) { - C.ep_mult((*C.ep_st)(res), (*C.ep_st)(p), (*C.bn_st)(expo)) -} - -// This function is for TEST only -// Exponentiation of g1 in G1 -func generatorScalarMultG1(res *pointG1, expo *scalar) { - C.ep_mult_gen_bench((*C.ep_st)(res), (*C.bn_st)(expo)) -} - -// This function is for TEST only -// Generic Exponentiation G1 -func genericScalarMultG1(res *pointG1, expo *scalar) { - C.ep_mult_generic_bench((*C.ep_st)(res), (*C.bn_st)(expo)) -} - -// Exponentiation of g2 in G2 -func generatorScalarMultG2(res *pointG2, expo *scalar) { - C.ep2_mult_gen((*C.ep2_st)(res), (*C.bn_st)(expo)) -} - -// comparison in Zr where r is the group order of G1/G2 -// (both scalars should be reduced mod r) -func (x *scalar) equals(other *scalar) bool { - return C.bn_cmp((*C.bn_st)(x), (*C.bn_st)(other)) == valid -} - -// comparison in G2 -func (p *pointG2) equals(other *pointG2) bool { - return C.ep2_cmp((*C.ep2_st)(p), (*C.ep2_st)(other)) == valid -} - -// Comparison to zero in Zr. -// Scalar must be already reduced modulo r -func (x *scalar) isZero() bool { - return C.bn_is_zero((*C.bn_st)(x)) == 1 -} - -// Comparison to point at infinity in G2. -func (p *pointG2) isInfinity() bool { - return C.ep2_is_infty((*C.ep2_st)(p)) == 1 -} - -// returns a random number in Zr -func randZr(x *scalar) { - C.bn_randZr((*C.bn_st)(x)) -} - -// returns a random non-zero number in Zr -func randZrStar(x *scalar) { - C.bn_randZr_star((*C.bn_st)(x)) -} - -// mapToZr reads a scalar from a slice of bytes and maps it to Zr. -// The resulting scalar `k` satisfies 0 <= k < r. -// It returns true if scalar is zero and false otherwise. -func mapToZr(x *scalar, src []byte) bool { - isZero := C.bn_map_to_Zr((*C.bn_st)(x), - (*C.uchar)(&src[0]), - (C.int)(len(src))) - return isZero == valid -} - -// writeScalar writes a G2 point in a slice of bytes -func writeScalar(dest []byte, x *scalar) { - C.bn_write_bin((*C.uchar)(&dest[0]), - (C.ulong)(prKeyLengthBLSBLS12381), - (*C.bn_st)(x), - ) -} - -// readScalar reads a scalar from a slice of bytes -func readScalar(x *scalar, src []byte) { - C.bn_read_bin((*C.bn_st)(x), - (*C.uchar)(&src[0]), - (C.ulong)(len(src)), - ) -} - -// writePointG2 writes a G2 point in a slice of bytes -// The slice should be of size PubKeyLenBLSBLS12381 and the serialization will -// follow the Zcash format specified in draft-irtf-cfrg-pairing-friendly-curves -func writePointG2(dest []byte, a *pointG2) { - C.ep2_write_bin_compact((*C.uchar)(&dest[0]), - (*C.ep2_st)(a), - (C.int)(pubKeyLengthBLSBLS12381), - ) -} - -// writePointG1 writes a G1 point in a slice of bytes -// The slice should be of size SignatureLenBLSBLS12381 and the serialization will -// follow the Zcash format specified in draft-irtf-cfrg-pairing-friendly-curves -func writePointG1(dest []byte, a *pointG1) { - C.ep_write_bin_compact((*C.uchar)(&dest[0]), - (*C.ep_st)(a), - (C.int)(signatureLengthBLSBLS12381), - ) -} - -// readPointG2 reads a G2 point from a slice of bytes -// The slice is expected to be of size PubKeyLenBLSBLS12381 and the deserialization will -// follow the Zcash format specified in draft-irtf-cfrg-pairing-friendly-curves -func readPointG2(a *pointG2, src []byte) error { - switch C.ep2_read_bin_compact((*C.ep2_st)(a), - (*C.uchar)(&src[0]), - (C.int)(len(src))) { - case valid: - return nil - case invalid: - return invalidInputsErrorf("input is not a G2 point") - default: - return errors.New("reading a G2 point failed") - } -} - -// readPointG1 reads a G1 point from a slice of bytes -// The slice should be of size SignatureLenBLSBLS12381 and the deserialization will -// follow the Zcash format specified in draft-irtf-cfrg-pairing-friendly-curves -func readPointG1(a *pointG1, src []byte) error { - switch C.ep_read_bin_compact((*C.ep_st)(a), - (*C.uchar)(&src[0]), - (C.int)(len(src))) { - case valid: - return nil - case invalid: - return invalidInputsErrorf("input is not a G1 point") - default: - return errors.New("reading a G1 point failed") - } -} - -// checkMembershipG1 wraps a call to a subgroup check in G1 since cgo can't be used -// in go test files. -func checkMembershipG1(pt *pointG1) int { - return int(C.check_membership_G1((*C.ep_st)(pt))) -} - -// checkMembershipG2 wraps a call to a subgroup check in G2 since cgo can't be used -// in go test files. -func checkMembershipG2(pt *pointG2) int { - return int(C.check_membership_G2((*C.ep2_st)(pt))) -} - -// randPointG1 wraps a call to C since cgo can't be used in go test files. -// It generates a random point in G1 and stores it in input point. -func randPointG1(pt *pointG1) { - C.ep_rand_G1((*C.ep_st)(pt)) -} - -// randPointG1Complement wraps a call to C since cgo can't be used in go test files. -// It generates a random point in E1\G1 and stores it in input point. -func randPointG1Complement(pt *pointG1) { - C.ep_rand_G1complement((*C.ep_st)(pt)) -} - -// randPointG2 wraps a call to C since cgo can't be used in go test files. -// It generates a random point in G2 and stores it in input point. -func randPointG2(pt *pointG2) { - C.ep2_rand_G2((*C.ep2_st)(pt)) -} - -// randPointG1Complement wraps a call to C since cgo can't be used in go test files. -// It generates a random point in E2\G2 and stores it in input point. -func randPointG2Complement(pt *pointG2) { - C.ep2_rand_G2complement((*C.ep2_st)(pt)) -} - -// This is only a TEST function. -// It hashes `data` to a G1 point using the tag `dst` and returns the G1 point serialization. -// The function uses xmd with SHA256 in the hash-to-field. -func hashToG1Bytes(data, dst []byte) []byte { - hash := make([]byte, expandMsgOutput) - - inputLength := len(data) - if len(data) == 0 { - data = make([]byte, 1) - } - - // XMD using SHA256 - C.xmd_sha256((*C.uchar)(&hash[0]), - (C.int)(expandMsgOutput), - (*C.uchar)(&data[0]), (C.int)(inputLength), - (*C.uchar)(&dst[0]), (C.int)(len(dst))) - - // map the hash to G1 - var point pointG1 - C.map_to_G1((*C.ep_st)(&point), (*C.uchar)(&hash[0]), (C.int)(len(hash))) - - // serialize the point - pointBytes := make([]byte, signatureLengthBLSBLS12381) - writePointG1(pointBytes, &point) - return pointBytes -} diff --git a/crypto/bls12381_utils.h b/crypto/bls12381_utils.h deleted file mode 100644 index 2c96503654c..00000000000 --- a/crypto/bls12381_utils.h +++ /dev/null @@ -1,143 +0,0 @@ -// +build relic - -// this file contains utility functions for the curve BLS 12-381 -// these tools are shared by the BLS signature scheme, the BLS based threshold signature -// and the BLS distributed key generation protocols - -#ifndef _REL_MISC_INCLUDE_H -#define _REL_MISC_INCLUDE_H - -#include "relic.h" - -typedef uint8_t byte; - -#define VALID RLC_OK -#define INVALID RLC_ERR -#define UNDEFINED (((VALID&1)^1) | ((INVALID&2)^2)) // different value than RLC_OK and RLC_ERR - -#define BITS_TO_BYTES(x) ((x+7)>>3) -#define BITS_TO_DIGITS(x) ((x+63)>>6) -#define BYTES_TO_DIGITS(x) ((x+7)>>3) -#define MIN(a,b) ((a)>(b)?(b):(a)) - -// Fields and Group serialization lengths -#define SEC_BITS 128 -#define Fp_BITS 381 -#define Fr_BITS 255 -#define Fp_BYTES BITS_TO_BYTES(Fp_BITS) -#define Fp2_BYTES (2*Fp_BYTES) -#define Fp_DIGITS BITS_TO_DIGITS(Fp_BITS) -#define Fr_BYTES BITS_TO_BYTES(Fr_BITS) - -#define G1_BYTES (2*Fp_BYTES) -#define G2_BYTES (2*Fp2_BYTES) - -// Compressed and uncompressed points -#define COMPRESSED 1 -#define UNCOMPRESSED 0 -#define G1_SERIALIZATION COMPRESSED -#define G2_SERIALIZATION COMPRESSED - -// Subgroup membership check method -#define EXP_ORDER 0 -#define BOWE 1 -#define MEMBERSHIP_CHECK_G1 BOWE -#define MEMBERSHIP_CHECK_G2 EXP_ORDER - - -// constants used in the optimized SWU hash to curve -#if (hashToPoint == LOCAL_SSWU) - #define ELLP_Nx_LEN 12 - #define ELLP_Dx_LEN 10 - #define ELLP_Ny_LEN 16 - #define ELLP_Dy_LEN 15 -#endif - - -// Structure of precomputed data -typedef struct prec_ { - #if (hashToPoint == LOCAL_SSWU) - // constants needed in optimized SSWU - bn_st p_3div4; - fp_st sqrt_z; - // related hardcoded constants for faster access, - // where a1 is the coefficient of isogenous curve E1 - fp_st minus_a1; - fp_st a1z; - // coefficients of the isogeny map - fp_st iso_Nx[ELLP_Nx_LEN]; - fp_st iso_Ny[ELLP_Ny_LEN]; - #endif - #if (MEMBERSHIP_CHECK_G1 == BOWE) - bn_st beta; - bn_st z2_1_by3; - #endif - // other field-related constants - bn_st p_1div2; - fp_t r; // Montgomery multiplication constant -} prec_st; - -// BLS based SPoCK -int bls_spock_verify(const ep2_t, const byte*, const ep2_t, const byte*); - -// hash to curve functions (functions in bls12381_hashtocurve.c) -void map_to_G1(ep_t, const byte*, const int); - -// Utility functions -int get_valid(); -int get_invalid(); -void bn_new_wrapper(bn_t a); - -ctx_t* relic_init_BLS12_381(); -prec_st* init_precomputed_data_BLS12_381(); -void precomputed_data_set(const prec_st* p); -void seed_relic(byte*, int); - -int ep_read_bin_compact(ep_t, const byte *, const int); -void ep_write_bin_compact(byte *, const ep_t, const int); -int ep2_read_bin_compact(ep2_t, const byte *, const int); -void ep2_write_bin_compact(byte *, const ep2_t, const int); -int bn_read_Zr_bin(bn_t, const uint8_t *, int ); - -void ep_mult_gen_bench(ep_t, const bn_t); -void ep_mult_generic_bench(ep_t, const bn_t); -void ep_mult(ep_t, const ep_t, const bn_t); -void ep2_mult_gen(ep2_t, const bn_t); - -void bn_randZr(bn_t); -void bn_randZr_star(bn_t); -int bn_map_to_Zr(bn_t, const uint8_t*, int); -void bn_map_to_Zr_star(bn_t, const uint8_t*, int); - -void bn_sum_vector(bn_t, const bn_st*, const int); -void ep_sum_vector(ep_t, ep_st*, const int); -void ep2_sum_vector(ep2_t, ep2_st*, const int); -int ep_sum_vector_byte(byte*, const byte*, const int); -void ep2_subtract_vector(ep2_t res, ep2_t x, ep2_st* y, const int len); - -// membership checks -int check_membership_G1(const ep_t); -int check_membership_G2(const ep2_t); -int check_membership_Zr_star(const bn_t); - -int simple_subgroup_check_G1(const ep_t); -int simple_subgroup_check_G2(const ep2_t); -void ep_rand_G1(ep_t); -void ep_rand_G1complement( ep_t); -void ep2_rand_G2(ep2_t); -void ep2_rand_G2complement( ep2_t); -#if (MEMBERSHIP_CHECK_G1 == BOWE) -int bowe_subgroup_check_G1(const ep_t); -#endif - -// utility testing function -void xmd_sha256(uint8_t *, int, uint8_t *, int, uint8_t *, int); - -// Debugging related functions -void bytes_print_(char*, byte*, int); -void fp_print_(char*, fp_t); -void bn_print_(char*, bn_st*); -void ep_print_(char*, ep_st*); -void ep2_print_(char*, ep2_st*); - -#endif \ No newline at end of file diff --git a/crypto/bls12381_utils_test.go b/crypto/bls12381_utils_test.go deleted file mode 100644 index f8278414e4a..00000000000 --- a/crypto/bls12381_utils_test.go +++ /dev/null @@ -1,191 +0,0 @@ -//go:build relic -// +build relic - -package crypto - -import ( - crand "crypto/rand" - "encoding/hex" - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -func TestDeterministicKeyGen(t *testing.T) { - // 2 keys generated with the same seed should be equal - seed := make([]byte, KeyGenSeedMinLen) - n, err := crand.Read(seed) - require.Equal(t, n, KeyGenSeedMinLen) - require.NoError(t, err) - sk1, err := GeneratePrivateKey(BLSBLS12381, seed) - require.Nil(t, err) - sk2, err := GeneratePrivateKey(BLSBLS12381, seed) - require.Nil(t, err) - assert.True(t, sk1.Equals(sk2), "private keys should be equal") -} - -// test the deterministicity of the relic PRG (used by the DKG polynomials) -func TestPRGseeding(t *testing.T) { - blsInstance.reInit() - // 2 scalars generated with the same seed should be equal - seed := make([]byte, KeyGenSeedMinLen) - n, err := crand.Read(seed) - require.Equal(t, n, KeyGenSeedMinLen) - require.NoError(t, err) - // 1st scalar (wrapped in a private key) - err = seedRelic(seed) - require.Nil(t, err) - var sk1 prKeyBLSBLS12381 - randZr(&sk1.scalar) - // 2nd scalar (wrapped in a private key) - err = seedRelic(seed) - require.Nil(t, err) - var sk2 prKeyBLSBLS12381 - randZr(&sk2.scalar) - // compare the 2 scalars (by comparing the private keys) - assert.True(t, sk1.Equals(&sk2), "private keys should be equal") -} - -// G1 and G2 scalar multiplication -func BenchmarkScalarMultG1G2(b *testing.B) { - blsInstance.reInit() - seed := make([]byte, securityBits/8) - _, err := crand.Read(seed) - require.NoError(b, err) - _ = seedRelic(seed) - var expo scalar - randZr(&expo) - - // G1 generator multiplication - b.Run("G1 gen", func(b *testing.B) { - var res pointG1 - b.ResetTimer() - for i := 0; i < b.N; i++ { - generatorScalarMultG1(&res, &expo) - } - b.StopTimer() - }) - - // G1 base point multiplication - b.Run("G1 generic", func(b *testing.B) { - var res pointG1 - b.ResetTimer() - for i := 0; i < b.N; i++ { - genericScalarMultG1(&res, &expo) - } - b.StopTimer() - }) - - // G2 base point multiplication - b.Run("G2 gen", func(b *testing.B) { - var res pointG2 - b.ResetTimer() - for i := 0; i < b.N; i++ { - generatorScalarMultG2(&res, &expo) - } - b.StopTimer() - }) -} - -// Sanity-check of the map-to-G1 with regards to the IETF draft hash-to-curve -func TestMapToG1(t *testing.T) { - - // test vectors from https://datatracker.ietf.org/doc/html/draft-irtf-cfrg-hash-to-curve-14#appendix-J.9.1 - dst := []byte("QUUX-V01-CS02-with-BLS12381G1_XMD:SHA-256_SSWU_RO_") - - msgs := [][]byte{ - []byte{}, - []byte("abc"), - []byte("abcdef0123456789"), - []byte("q128_qqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqq"), - []byte("a512_aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"), - } - - expectedPointString := []string{ - "052926add2207b76ca4fa57a8734416c8dc95e24501772c814278700eed6d1e4e8cf62d9c09db0fac349612b759e79a1", - "03567bc5ef9c690c2ab2ecdf6a96ef1c139cc0b2f284dca0a9a7943388a49a3aee664ba5379a7655d3c68900be2f6903", - "11e0b079dea29a68f0383ee94fed1b940995272407e3bb916bbf268c263ddd57a6a27200a784cbc248e84f357ce82d98", - "15f68eaa693b95ccb85215dc65fa81038d69629f70aeee0d0f677cf22285e7bf58d7cb86eefe8f2e9bc3f8cb84fac488", - "082aabae8b7dedb0e78aeb619ad3bfd9277a2f77ba7fad20ef6aabdc6c31d19ba5a6d12283553294c1825c4b3ca2dcfe", - } - - for i, msg := range msgs { - pointBytes := hashToG1Bytes(msg, dst) - - expectedPointBytes, err := hex.DecodeString(expectedPointString[i]) - require.NoError(t, err) - // skip comparing the first 3 bits that depend on the serialization scheme - pointBytes[0] = (expectedPointBytes[0] & 0xE0) | (pointBytes[0] & 0x1F) - assert.Equal(t, expectedPointBytes, pointBytes, "map to G1 should match the IETF draft test vector") - } -} - -// Hashing to G1 bench -func BenchmarkMapToG1(b *testing.B) { - blsInstance.reInit() - input := make([]byte, expandMsgOutput) - for i := 0; i < len(input); i++ { - input[i] = byte(i) - } - b.ResetTimer() - for i := 0; i < b.N; i++ { - mapToG1(input) - } - b.StopTimer() -} - -// test subgroup membership check in G1 and G2 -func TestSubgroupCheck(t *testing.T) { - blsInstance.reInit() - // seed Relic PRG - seed := make([]byte, securityBits/8) - _, err := crand.Read(seed) - require.NoError(t, err) - _ = seedRelic(seed) - - t.Run("G1", func(t *testing.T) { - var p pointG1 - randPointG1(&p) // point in G1 - res := checkMembershipG1(&p) - assert.Equal(t, res, int(valid)) - randPointG1Complement(&p) // point in E1\G1 - res = checkMembershipG1(&p) - assert.Equal(t, res, int(invalid)) - }) - - t.Run("G2", func(t *testing.T) { - var p pointG2 - randPointG2(&p) // point in G2 - res := checkMembershipG2(&p) - assert.Equal(t, res, int(valid)) - randPointG2Complement(&p) // point in E2\G2 - res = checkMembershipG2(&p) - assert.Equal(t, res, int(invalid)) - }) -} - -// subgroup membership check bench -func BenchmarkSubgroupCheck(b *testing.B) { - blsInstance.reInit() - - b.Run("G1", func(b *testing.B) { - var p pointG1 - randPointG1(&p) - b.ResetTimer() - for i := 0; i < b.N; i++ { - _ = checkMembershipG1(&p) // G1 - } - b.StopTimer() - }) - - b.Run("G2", func(b *testing.B) { - var p pointG2 - randPointG2(&p) - b.ResetTimer() - for i := 0; i < b.N; i++ { - _ = checkMembershipG2(&p) // G2 - } - b.StopTimer() - }) -} diff --git a/crypto/bls_core.c b/crypto/bls_core.c deleted file mode 100644 index e6e5dca8a3e..00000000000 --- a/crypto/bls_core.c +++ /dev/null @@ -1,540 +0,0 @@ -// +build relic - -#include "bls_include.h" - -// this file is about the core functions required by the BLS signature scheme - -// The functions are tested for ALLOC=AUTO (not for ALLOC=DYNAMIC) - -// functions to export macros to the Go layer (because cgo does not import macros) -int get_signature_len() { - return SIGNATURE_LEN; -} - -int get_pk_len() { - return PK_LEN; -} - -int get_sk_len() { - return SK_LEN; -} - -// checks an input scalar a satisfies 0 < a < r -// where (r) is the order of G1/G2 -int check_membership_Zr_star(const bn_t a){ - if (bn_cmp(a, &core_get()->ep_r) != RLC_LT || bn_cmp_dig(a, 0) != RLC_GT) { - return INVALID; - } - return VALID; -} - -// Checks if input point p is in the subgroup G1. -// The function assumes the input is known to be on the curve E1. -int check_membership_G1(const ep_t p){ -#if MEMBERSHIP_CHECK - #if MEMBERSHIP_CHECK_G1 == EXP_ORDER - return simple_subgroup_check_G1(p); - #elif MEMBERSHIP_CHECK_G1 == BOWE - // section 3.2 from https://eprint.iacr.org/2019/814.pdf - return bowe_subgroup_check_G1(p); - #else - return UNDEFINED; - #endif -#endif - return VALID; -} - -// checks if input point s is on the curve E2 -// and is in the subgroup G2. -// -// membership check in G2 is using a scalar multiplication by the group order. -// TODO: switch to the faster Bowe check -int check_membership_G2(const ep2_t p){ -#if MEMBERSHIP_CHECK - // check p is on curve - if (!ep2_on_curve((ep2_st*)p)) - return INVALID; - // check p is in G2 - #if MEMBERSHIP_CHECK_G2 == EXP_ORDER - return simple_subgroup_check_G2(p); - #elif MEMBERSHIP_CHECK_G2 == BOWE - // TODO: implement Bowe's check - return UNDEFINED; - #else - return UNDEFINED; - #endif -#endif - return VALID; -} - -// Computes a BLS signature from a G1 point -static void bls_sign_ep(byte* s, const bn_t sk, const ep_t h) { - ep_t p; - ep_new(p); - // s = h^sk - ep_mult(p, h, sk); - ep_write_bin_compact(s, p, SIGNATURE_LEN); - ep_free(p); -} - -// Computes a BLS signature from a hash -void bls_sign(byte* s, const bn_t sk, const byte* data, const int len) { - ep_t h; - ep_new(h); - // hash to G1 - map_to_G1(h, data, len); - // s = h^sk - bls_sign_ep(s, sk, h); - ep_free(h); -} - -// Verifies a BLS signature (G1 point) against a public key (G2 point) -// and a message data. -// The signature and public key are assumed to be in G1 and G2 respectively. This -// function only checks the pairing equality. -static int bls_verify_ep(const ep2_t pk, const ep_t s, const byte* data, const int len) { - - ep_t elemsG1[2]; - ep2_t elemsG2[2]; - - // elemsG1[0] = s - ep_new(elemsG1[0]); - ep_copy(elemsG1[0], (ep_st*)s); - - // elemsG1[1] = h - ep_new(elemsG1[1]); - // hash to G1 - map_to_G1(elemsG1[1], data, len); - - // elemsG2[1] = pk - ep2_new(elemsG2[1]); - ep2_copy(elemsG2[1], (ep2_st*)pk); - ep2_new(&elemsG2[0]); - - int ret = UNDEFINED; - -#if DOUBLE_PAIRING - // elemsG2[0] = -g2 - ep2_neg(elemsG2[0], core_get()->ep2_g); // could be hardcoded - - fp12_t pair; - fp12_new(&pair); - // double pairing with Optimal Ate - pp_map_sim_oatep_k12(pair, (ep_t*)(elemsG1) , (ep2_t*)(elemsG2), 2); - - // compare the result to 1 - int res = fp12_cmp_dig(pair, 1); - -#elif SINGLE_PAIRING - fp12_t pair1, pair2; - fp12_new(&pair1); fp12_new(&pair2); - pp_map_oatep_k12(pair1, elemsG1[0], core_get()->ep2_g); - pp_map_oatep_k12(pair2, elemsG1[1], elemsG2[1]); - - int res = fp12_cmp(pair1, pair2); -#endif - if (core_get()->code == RLC_OK) { - if (res == RLC_EQ) { - ret = VALID; - goto out; - } else { - ret = INVALID; - goto out; - } - } - -out: - ep_free(elemsG1[0]); - ep_free(elemsG1[1]); - ep2_free(elemsG2[0]); - ep2_free(elemsG2[1]); - - return ret; -} - - -// Verifies the validity of an aggregated BLS signature under distinct messages. -// -// Each message is mapped to a set of public keys, so that the verification equation is -// optimized to compute one pairing per message. -// - sig is the signature. -// - nb_hashes is the number of the messages (hashes) in the map -// - hashes is pointer to all flattened hashes in order where the hash at index i has a byte length len_hashes[i], -// is mapped to pks_per_hash[i] public keys. -// - the keys are flattened in pks in the same hashes order. -// -// membership check of the signature in G1 is verified in this function -// membership check of pks in G2 is not verified in this function -// the membership check is separated to allow optimizing multiple verifications using the same pks -int bls_verifyPerDistinctMessage(const byte* sig, - const int nb_hashes, const byte* hashes, const uint32_t* len_hashes, - const uint32_t* pks_per_hash, const ep2_st* pks) { - - int ret = UNDEFINED; // return value - - ep_t* elemsG1 = (ep_t*)malloc((nb_hashes + 1) * sizeof(ep_t)); - if (!elemsG1) goto outG1; - ep2_t* elemsG2 = (ep2_t*)malloc((nb_hashes + 1) * sizeof(ep2_t)); - if (!elemsG2) goto outG2; - - for (int i=0; i < nb_hashes+1; i++) { - ep_new(elemsG1[i]); - ep2_new(elemsG2[i]); - } - - // elemsG1[0] = sig - ret = ep_read_bin_compact(elemsG1[0], sig, SIGNATURE_LEN); - if (ret != RLC_OK) goto out; - - // check s is in G1 - ret = check_membership_G1(elemsG1[0]); // only enabled if MEMBERSHIP_CHECK==1 - if (ret != VALID) goto out; - - // elemsG2[0] = -g2 - ep2_neg(elemsG2[0], core_get()->ep2_g); // could be hardcoded - - // map all hashes to G1 - int offset = 0; - for (int i=1; i < nb_hashes+1; i++) { - // elemsG1[i] = h - // hash to G1 - map_to_G1(elemsG1[i], &hashes[offset], len_hashes[i-1]); - offset += len_hashes[i-1]; - } - - // aggregate public keys mapping to the same hash - offset = 0; - for (int i=1; i < nb_hashes+1; i++) { - // elemsG2[i] = agg_pk[i] - ep2_sum_vector(elemsG2[i], (ep2_st*) &pks[offset] , pks_per_hash[i-1]); - offset += pks_per_hash[i-1]; - } - - fp12_t pair; - fp12_new(&pair); - // double pairing with Optimal Ate - pp_map_sim_oatep_k12(pair, (ep_t*)(elemsG1) , (ep2_t*)(elemsG2), nb_hashes+1); - - // compare the result to 1 - int cmp_res = fp12_cmp_dig(pair, 1); - - if (core_get()->code == RLC_OK) { - if (cmp_res == RLC_EQ) ret = VALID; - else ret = INVALID; - } else { - ret = UNDEFINED; - } - -out: - for (int i=0; i < nb_hashes+1; i++) { - ep_free(elemsG1[i]); - ep2_free(elemsG2[i]); - } - free(elemsG2); -outG2: - free(elemsG1); -outG1: - return ret; -} - - -// Verifies the validity of an aggregated BLS signature under distinct public keys. -// -// Each key is mapped to a set of messages, so that the verification equation is -// optimized to compute one pairing per public key. -// - nb_pks is the number of the public keys in the map. -// - pks is pointer to all pks in order where the key at index i -// is mapped to hashes_per_pk[i] hashes. -// - the messages (hashes) are flattened in hashes in the same public key order, -// each with a length in len_hashes. -// -// membership check of the signature in G1 is verified in this function -// membership check of pks in G2 is not verified in this function -// the membership check is separated to allow optimizing multiple verifications using the same pks -int bls_verifyPerDistinctKey(const byte* sig, - const int nb_pks, const ep2_st* pks, const uint32_t* hashes_per_pk, - const byte* hashes, const uint32_t* len_hashes){ - - int ret = UNDEFINED; // return value - - ep_t* elemsG1 = (ep_t*)malloc((nb_pks + 1) * sizeof(ep_t)); - if (!elemsG1) goto outG1; - ep2_t* elemsG2 = (ep2_t*)malloc((nb_pks + 1) * sizeof(ep2_t)); - if (!elemsG2) goto outG2; - for (int i=0; i < nb_pks+1; i++) { - ep_new(elemsG1[i]); - ep2_new(elemsG2[i]); - } - - // elemsG1[0] = s - ret = ep_read_bin_compact(elemsG1[0], sig, SIGNATURE_LEN); - if (ret != RLC_OK) goto out; - - // check s in G1 - ret = check_membership_G1(elemsG1[0]); // only enabled if MEMBERSHIP_CHECK==1 - if (ret != VALID) goto out; - - // elemsG2[0] = -g2 - ep2_neg(elemsG2[0], core_get()->ep2_g); // could be hardcoded - - // set the public keys - for (int i=1; i < nb_pks+1; i++) { - ep2_copy(elemsG2[i], (ep2_st*) &pks[i-1]); - } - - // map all hashes to G1 and aggregate the ones with the same public key - - // tmp_hashes is a temporary array of all hashes under a same key mapped to a G1 point. - // tmp_hashes size is set to the maximum possible size to minimize malloc calls. - int tmp_hashes_size = hashes_per_pk[0]; - for (int i=1; i<nb_pks; i++) - if (hashes_per_pk[i] > tmp_hashes_size) - tmp_hashes_size = hashes_per_pk[i]; - ep_st* tmp_hashes = (ep_st*)malloc(tmp_hashes_size * sizeof(ep_st)); - if (!tmp_hashes) { - ret = UNDEFINED; - goto out; - } - - // sum hashes under the same key - for (int i=0; i<tmp_hashes_size; i++) ep_new(&tmp_hashes[i]); - int data_offset = 0; - int index_offset = 0; - for (int i=1; i < nb_pks+1; i++) { - for (int j=0; j < hashes_per_pk[i-1]; j++) { - // map the hash to G1 - map_to_G1(&tmp_hashes[j], &hashes[data_offset], len_hashes[index_offset]); - data_offset += len_hashes[index_offset]; - index_offset++; - } - // aggregate all the points of the array - ep_sum_vector(elemsG1[i], tmp_hashes, hashes_per_pk[i-1]); - } - for (int i=0; i<tmp_hashes_size; i++) ep_free(&tmp_hashes[i]); - free(tmp_hashes); - - fp12_t pair; - fp12_new(&pair); - // double pairing with Optimal Ate - pp_map_sim_oatep_k12(pair, (ep_t*)(elemsG1) , (ep2_t*)(elemsG2), nb_pks+1); - - // compare the result to 1 - int cmp_res = fp12_cmp_dig(pair, 1); - - if (core_get()->code == RLC_OK) { - if (cmp_res == RLC_EQ) ret = VALID; - else ret = INVALID; - } else { - ret = UNDEFINED; - } - -out: - for (int i=0; i < nb_pks+1; i++) { - ep_free(elemsG1[i]); - ep2_free(elemsG2[i]); - } - free(elemsG2); -outG2: - free(elemsG1); -outG1: - return ret; -} - -// Verifies a BLS signature in a byte buffer. -// membership check of the signature in G1 is verified. -// membership check of pk in G2 is not verified in this function. -// the membership check in G2 is separated to allow optimizing multiple verifications using the same key. -int bls_verify(const ep2_t pk, const byte* sig, const byte* data, const int len) { - ep_t s; - ep_new(s); - - // deserialize the signature into a curve point - int read_ret = ep_read_bin_compact(s, sig, SIGNATURE_LEN); - if (read_ret != RLC_OK) - return read_ret; - - // check s is in G1 - if (check_membership_G1(s) != VALID) // only enabled if MEMBERSHIP_CHECK==1 - return INVALID; - - return bls_verify_ep(pk, s, data, len); -} - -// binary tree structure to be used by bls_batch verify. -// Each node contains a signature and a public key, the signature (resp. the public key) -// being the aggregated signature of the two children's signature (resp. public keys). -// The leaves contain the initial signatures and public keys. -typedef struct st_node { - ep_st* sig; - ep2_st* pk; - struct st_node* left; - struct st_node* right; -} node; - -static node* new_node(const ep2_st* pk, const ep_st* sig){ - node* t = (node*) malloc(sizeof(node)); - if (t) { - t->pk = (ep2_st*)pk; - t->sig = (ep_st*)sig; - t->right = t->left = NULL; - } - return t; -} - -static void free_tree(node* root) { - if (!root) return; - - // only free pks and sigs of non-leafs, data of leafs are allocated - // as an entire array in `bls_batchVerify`. - if (root->left) { // no need to check the right child for the leaf check because - // the recursive build starts with the left side first - // relic free - if (root->sig) ep_free(root->sig); - if (root->pk) ep2_free(root->pk); - // pointer free - free(root->sig); - free(root->pk); - // free the children nodes - free_tree(root->left); - free_tree(root->right); - } - free(root); -} - -// builds a binary tree of aggregation of signatures and public keys recursively. -static node* build_tree(const int len, const ep2_st* pks, const ep_st* sigs) { - // check if a leaf is reached - if (len == 1) { - return new_node(&pks[0], &sigs[0]); // use the first element of the arrays - } - - // a leaf is not reached yet, - int right_len = len/2; - int left_len = len - right_len; - - // create a new node with new points - ep2_st* new_pk = (ep2_st*)malloc(sizeof(ep2_st)); - if (!new_pk) goto error; - ep_st* new_sig = (ep_st*)malloc(sizeof(ep_st)); - if (!new_sig) goto error_sig; - - node* t = new_node(new_pk, new_sig); - if (!t) goto error_node; - ep_new(t->sig); - ep2_new(t->pk); - - // build the tree in a top-down way - t->left = build_tree(left_len, &pks[0], &sigs[0]); - if (!t->left) { free_tree(t); goto error; } - - t->right = build_tree(right_len, &pks[left_len], &sigs[left_len]); - if (!t->right) { free_tree(t); goto error; } - // sum the children - ep_add_jacob(t->sig, t->left->sig, t->right->sig); - ep2_add_projc(t->pk, t->left->pk, t->right->pk); - return t; - -error_node: - free(new_sig); -error_sig: - free(new_pk); -error: - return NULL; -} - -// verify the binary tree and fill the results using recursive batch verifications. -static void bls_batchVerify_tree(const node* root, const int len, byte* results, - const byte* data, const int data_len) { - - // verify the aggregated signature against the aggregated public key. - int res = bls_verify_ep(root->pk, root->sig, data, data_len); - - // if the result is valid, all the subtree signatures are valid. - if (res == VALID) { - for (int i=0; i < len; i++) { - if (results[i] == UNDEFINED) results[i] = VALID; // do not overwrite invalid results - } - return; - } - - // check if root is a leaf - if (root->left == NULL) { // no need to check the right side - *results = INVALID; - return; - } - - // otherwise, at least one of the subtree signatures is invalid. - // use the binary tree structure to find the invalid signatures. - int right_len = len/2; - int left_len = len - right_len; - bls_batchVerify_tree(root->left, left_len, &results[0], data, data_len); - bls_batchVerify_tree(root->right, right_len, &results[left_len], data, data_len); -} - -// Batch verifies the validity of a multiple BLS signatures of the -// same message under multiple public keys. -// -// - membership checks of all signatures is verified upfront. -// - use random coefficients for signatures and public keys at the same index. -// - optimize the verification by verifying an aggregated signature against an aggregated -// public key, and use a recursive verification to find invalid signatures. -void bls_batchVerify(const int sigs_len, byte* results, const ep2_st* pks_input, - const byte* sigs_bytes, const byte* data, const int data_len) { - - // initialize results to undefined - memset(results, UNDEFINED, sigs_len); - - // build the arrays of G1 and G2 elements to verify - ep2_st* pks = (ep2_st*) malloc(sigs_len * sizeof(ep2_st)); - if (!pks) return; - ep_st* sigs = (ep_st*) malloc(sigs_len * sizeof(ep_st)); - if (!sigs) goto out_sigs; - for (int i=0; i < sigs_len; i++) { - ep_new(sigs[i]); - ep2_new(pks[i]); - } - bn_t r; bn_new(r); - - for (int i=0; i < sigs_len; i++) { - // convert the signature points: - // - invalid points are stored as infinity points with an invalid result, so that - // the tree aggregations remain valid. - // - valid points are multiplied by a random scalar (same for public keys at same index) - // to make sure a signature at index (i) is verified against the public key at the same index. - int read_ret = ep_read_bin_compact(&sigs[i], &sigs_bytes[SIGNATURE_LEN*i], SIGNATURE_LEN); - if ( read_ret != RLC_OK || check_membership_G1(&sigs[i]) != VALID) { - if (read_ret == UNDEFINED) // unexpected error case - goto out; - // set signature as infinity and set result as invald - ep_set_infty(&sigs[i]); - ep2_copy(&pks[i], (ep2_st*) &pks_input[i]); - results[i] = INVALID; - // multiply signatures and public keys at the same index by random coefficients - } else { - // random non-zero coefficient of a least 128 bits - bn_rand(r, RLC_POS, SEC_BITS); - bn_add_dig(r, r, 1); - ep_mul_lwnaf(&sigs[i], &sigs[i], r); - ep2_mul_lwnaf(&pks[i], (ep2_st*) &pks_input[i], r); - } - } - - // build a binary tree of aggreagtions - node* root = build_tree(sigs_len, &pks[0], &sigs[0]); - if (!root) goto out; - - // verify the binary tree and fill the results using batch verification - bls_batchVerify_tree(root, sigs_len, &results[0], data, data_len); - // free the allocated tree - free_tree(root); - -out: - bn_free(r); - for (int i=0; i < sigs_len; i++) { - ep_free(sigs[i]); - ep2_free(pks[i]); - } - free(sigs); -out_sigs: - free(pks); -} diff --git a/crypto/bls_crossBLST_test.go b/crypto/bls_crossBLST_test.go deleted file mode 100644 index 5ac9e996cc1..00000000000 --- a/crypto/bls_crossBLST_test.go +++ /dev/null @@ -1,223 +0,0 @@ -//go:build relic -// +build relic - -package crypto - -// This file contains tests against the library BLST (https://github.com/supranational/blst). -// The purpose of these tests is to detect differences with a different implementation of BLS on the BLS12-381 -// curve since the BLS IETF draft (https://datatracker.ietf.org/doc/draft-irtf-cfrg-bls-signature/) doesn't -// provide extensive test vectors. -// -// This file also serves as a way to test the Flow crypto module against random input data -// generated by the "rapid" package. If the comparison against BLST is removed in the future, -// it is mandatory to add fuzzing-like tests using random inputs. -// -// A detected difference with BLST library doesn't necessary mean a bug or a non-standard implementation since -// both libraries might have made different choices. It is nevertheless a good flag for possible bugs or deviations -// from the standard as both libraries are being developed. - -import ( - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - blst "github.com/supranational/blst/bindings/go" - "pgregory.net/rapid" -) - -// validPrivateKeyBytesFlow generates bytes of a valid private key in Flow library -func validPrivateKeyBytesFlow(t *rapid.T) []byte { - seed := rapid.SliceOfN(rapid.Byte(), KeyGenSeedMinLen, KeyGenSeedMaxLen).Draw(t, "seed").([]byte) - sk, err := GeneratePrivateKey(BLSBLS12381, seed) - // TODO: require.NoError(t, err) seems to mess with rapid - if err != nil { - assert.FailNow(t, "failed key generation") - } - return sk.Encode() -} - -// validPublicKeyBytesFlow generates bytes of a valid public key in Flow library -func validPublicKeyBytesFlow(t *rapid.T) []byte { - seed := rapid.SliceOfN(rapid.Byte(), KeyGenSeedMinLen, KeyGenSeedMaxLen).Draw(t, "seed").([]byte) - sk, err := GeneratePrivateKey(BLSBLS12381, seed) - require.NoError(t, err) - return sk.PublicKey().Encode() -} - -// validSignatureBytesFlow generates bytes of a valid signature in Flow library -func validSignatureBytesFlow(t *rapid.T) []byte { - seed := rapid.SliceOfN(rapid.Byte(), KeyGenSeedMinLen, KeyGenSeedMaxLen).Draw(t, "seed").([]byte) - sk, err := GeneratePrivateKey(BLSBLS12381, seed) - require.NoError(t, err) - hasher := NewExpandMsgXOFKMAC128("random_tag") - message := rapid.SliceOfN(rapid.Byte(), 1, 1000).Draw(t, "msg").([]byte) - signature, err := sk.Sign(message, hasher) - require.NoError(t, err) - return signature -} - -// validPrivateKeyBytesBLST generates bytes of a valid private key in BLST library -func validPrivateKeyBytesBLST(t *rapid.T) []byte { - randomSlice := rapid.SliceOfN(rapid.Byte(), KeyGenSeedMinLen, KeyGenSeedMaxLen) - ikm := randomSlice.Draw(t, "ikm").([]byte) - return blst.KeyGen(ikm).Serialize() -} - -// validPublicKeyBytesBLST generates bytes of a valid public key in BLST library -func validPublicKeyBytesBLST(t *rapid.T) []byte { - ikm := rapid.SliceOfN(rapid.Byte(), KeyGenSeedMinLen, KeyGenSeedMaxLen).Draw(t, "ikm").([]byte) - blstS := blst.KeyGen(ikm) - blstG2 := new(blst.P2Affine).From(blstS) - return blstG2.Compress() -} - -// validSignatureBytesBLST generates bytes of a valid signature in BLST library -func validSignatureBytesBLST(t *rapid.T) []byte { - ikm := rapid.SliceOfN(rapid.Byte(), KeyGenSeedMinLen, KeyGenSeedMaxLen).Draw(t, "ikm").([]byte) - blstS := blst.KeyGen(ikm[:]) - blstG1 := new(blst.P1Affine).From(blstS) - return blstG1.Compress() -} - -// testEncodeDecodePrivateKeyCrossBLST tests encoding and decoding of private keys are consistent with BLST. -// This test assumes private key serialization is identical to the one in BLST. -func testEncodeDecodePrivateKeyCrossBLST(t *rapid.T) { - randomSlice := rapid.SliceOfN(rapid.Byte(), prKeyLengthBLSBLS12381, prKeyLengthBLSBLS12381) - validSliceFlow := rapid.Custom(validPrivateKeyBytesFlow) - validSliceBLST := rapid.Custom(validPrivateKeyBytesBLST) - // skBytes are bytes of either a valid or a random private key - skBytes := rapid.OneOf(randomSlice, validSliceFlow, validSliceBLST).Example().([]byte) - - // check decoding results are consistent - skFlow, err := DecodePrivateKey(BLSBLS12381, skBytes) - var skBLST blst.Scalar - res := skBLST.Deserialize(skBytes) - - flowPass := err == nil - blstPass := res != nil - require.Equal(t, flowPass, blstPass, "deserialization of the private key %x differs", skBytes) - - // check private keys are equal - if blstPass && flowPass { - skFlowOutBytes := skFlow.Encode() - skBLSTOutBytes := skBLST.Serialize() - - assert.Equal(t, skFlowOutBytes, skBLSTOutBytes) - } -} - -// testEncodeDecodePublicKeyCrossBLST tests encoding and decoding of public keys keys are consistent with BLST. -// This test assumes public key serialization is identical to the one in BLST. -func testEncodeDecodePublicKeyCrossBLST(t *rapid.T) { - randomSlice := rapid.SliceOfN(rapid.Byte(), PubKeyLenBLSBLS12381, PubKeyLenBLSBLS12381) - validSliceFlow := rapid.Custom(validPublicKeyBytesFlow) - validSliceBLST := rapid.Custom(validPublicKeyBytesBLST) - // pkBytes are bytes of either a valid or a random public key - pkBytes := rapid.OneOf(randomSlice, validSliceFlow, validSliceBLST).Example().([]byte) - - // check decoding results are consistent - pkFlow, err := DecodePublicKey(BLSBLS12381, pkBytes) - var pkBLST blst.P2Affine - res := pkBLST.Deserialize(pkBytes) - pkValidBLST := pkBLST.KeyValidate() - - flowPass := err == nil - blstPass := res != nil && pkValidBLST - require.Equal(t, flowPass, blstPass, "deserialization of pubkey %x differs", pkBytes) - - // check public keys are equal - if flowPass && blstPass { - pkFlowOutBytes := pkFlow.Encode() - pkBLSTOutBytes := pkBLST.Compress() - - assert.Equal(t, pkFlowOutBytes, pkBLSTOutBytes) - } -} - -// testEncodeDecodeSignatureCrossBLST tests encoding and decoding of signatures are consistent with BLST. -// This test assumes signature serialization is identical to the one in BLST. -func testEncodeDecodeSignatureCrossBLST(t *rapid.T) { - randomSlice := rapid.SliceOfN(rapid.Byte(), SignatureLenBLSBLS12381, SignatureLenBLSBLS12381) - validSignatureFlow := rapid.Custom(validSignatureBytesFlow) - validSignatureBLST := rapid.Custom(validSignatureBytesBLST) - // sigBytes are bytes of either a valid or a random signature - sigBytes := rapid.OneOf(randomSlice, validSignatureFlow, validSignatureBLST).Example().([]byte) - - // check decoding results are consistent - var pointFlow pointG1 - // here we test readPointG1 rather than the simple Signature type alias - err := readPointG1(&pointFlow, sigBytes) - flowPass := (err == nil) && (checkMembershipG1(&pointFlow) == int(valid)) - - var pointBLST blst.P1Affine - res := pointBLST.Uncompress(sigBytes) - // flow validation has no infinity rejection for G1 - blstPass := (res != nil) && pointBLST.SigValidate(false) - - require.Equal(t, flowPass, blstPass, "deserialization of signature %x differs", sigBytes) - - // check both signatures (G1 points) are equal - if flowPass && blstPass { - sigFlowOutBytes := make([]byte, signatureLengthBLSBLS12381) - writePointG1(sigFlowOutBytes, &pointFlow) - sigBLSTOutBytes := pointBLST.Compress() - - assert.Equal(t, sigFlowOutBytes, sigBLSTOutBytes) - } -} - -// testSignHashCrossBLST tests signing a hashed message is consistent with BLST. -// -// The tests assumes the used hash-to-field and map-to-curve are identical in the 2 signatures: -// - hash-to-field : use XMD_SHA256 in both signatures -// - map to curve : Flow and BLST use an SWU mapping consistent with the test vector in -// https://datatracker.ietf.org/doc/html/draft-irtf-cfrg-hash-to-curve-14#appendix-J.9.1 -// (Flow map to curve is tested agaisnt the IETF draft in TestMapToG1, BLST map to curve is not -// tested in this repo) -// -// The test also assumes Flow signature serialization is identical to the one in BLST. -func testSignHashCrossBLST(t *rapid.T) { - // generate two private keys from the same seed - skBytes := rapid.Custom(validPrivateKeyBytesFlow).Example().([]byte) - - skFlow, err := DecodePrivateKey(BLSBLS12381, skBytes) - require.NoError(t, err) - var skBLST blst.Scalar - res := skBLST.Deserialize(skBytes) - require.NotNil(t, res) - - // generate two signatures using both libraries - blsCipher := []byte("BLS_SIG_BLS12381G1_XMD:SHA-256_SSWU_RO_NUL_") - message := rapid.SliceOfN(rapid.Byte(), 1, 1000).Example().([]byte) - - var sigBLST blst.P1Affine - sigBLST.Sign(&skBLST, message, blsCipher) - sigBytesBLST := sigBLST.Compress() - - skFlowBLS, ok := skFlow.(*prKeyBLSBLS12381) - require.True(t, ok, "incoherent key type assertion") - sigFlow := skFlowBLS.signWithXMDSHA256(message) - sigBytesFlow := sigFlow.Bytes() - - // check both signatures are equal - assert.Equal(t, sigBytesBLST, sigBytesFlow) -} - -func testKeyGenCrossBLST(t *rapid.T) { - seed := rapid.SliceOfN(rapid.Byte(), KeyGenSeedMinLen, KeyGenSeedMaxLen).Draw(t, "seed").([]byte) - - skFlow, err := GeneratePrivateKey(BLSBLS12381, seed) - if err != nil { - assert.FailNow(t, "failed key generation") - } - skBLST := blst.KeyGen(seed) - assert.Equal(t, skFlow.Encode(), skBLST.Serialize()) -} - -func TestAgainstBLST(t *testing.T) { - rapid.Check(t, testKeyGenCrossBLST) - rapid.Check(t, testEncodeDecodePrivateKeyCrossBLST) - rapid.Check(t, testEncodeDecodePublicKeyCrossBLST) - rapid.Check(t, testEncodeDecodeSignatureCrossBLST) - rapid.Check(t, testSignHashCrossBLST) -} diff --git a/crypto/bls_include.h b/crypto/bls_include.h deleted file mode 100644 index 016845719e1..00000000000 --- a/crypto/bls_include.h +++ /dev/null @@ -1,48 +0,0 @@ -// +build relic - -// this file is about the core functions required by the BLS signature scheme - -#ifndef _REL_BLS_INCLUDE_H -#define _REL_BLS_INCLUDE_H - -#include "relic.h" -#include "bls12381_utils.h" - -// Signature, Public key and Private key lengths -#define FULL_SIGNATURE_LEN G1_BYTES -#define FULL_PK_LEN G2_BYTES -#define SIGNATURE_LEN (FULL_SIGNATURE_LEN/(G1_SERIALIZATION+1)) -#define PK_LEN (FULL_PK_LEN/(G2_SERIALIZATION+1)) -#define SK_BITS (Fr_BITS) -#define SK_LEN BITS_TO_BYTES(SK_BITS) - -// Simultaneous Pairing in verification -#define DOUBLE_PAIRING 1 -#define SINGLE_PAIRING (DOUBLE_PAIRING^1) - -// Signature and public key membership check -#define MEMBERSHIP_CHECK 1 - -// algorithm choice for the hashing to G1 -// both methods are similar implementations of the same optimzed SSWU -// but offer different timings. -#define RELIC_SSWU 1 // relic library implementation -#define LOCAL_SSWU 2 // local implementation -#define hashToPoint LOCAL_SSWU - -// bls core (functions in bls_core.c) -int get_signature_len(); -int get_pk_len(); -int get_sk_len(); - -void bls_sign(byte*, const bn_t, const byte*, const int); -int bls_verify(const ep2_t, const byte*, const byte*, const int); -int bls_verifyPerDistinctMessage(const byte*, const int, const byte*, const uint32_t*, - const uint32_t*, const ep2_st*); -int bls_verifyPerDistinctKey(const byte*, - const int, const ep2_st*, const uint32_t*, - const byte*, const uint32_t*); -void bls_batchVerify(const int, byte*, const ep2_st*, - const byte*, const byte*, const int); - -#endif diff --git a/crypto/bls_multisig.go b/crypto/bls_multisig.go deleted file mode 100644 index 1dfe29abc05..00000000000 --- a/crypto/bls_multisig.go +++ /dev/null @@ -1,581 +0,0 @@ -//go:build relic -// +build relic - -package crypto - -import ( - "errors" - "fmt" - - "github.com/onflow/flow-go/crypto/hash" -) - -// BLS multi-signature using BLS12-381 curve -// ([zcash]https://github.com/zkcrypto/pairing/blob/master/src/bls12_381/README.md#bls12-381) -// Pairing, ellipic curve and modular arithmetic is using Relic library. -// This implementation does not include any security against side-channel attacks. - -// existing features: -// - the same BLS set-up in bls.go -// - Use the proof of possession scheme (PoP) to prevent against rogue public-key attack. -// - Non-interactive aggregation of private keys, public keys and signatures. -// - Non-interactive subtraction of multiple public keys from an (aggregated) public key. -// - Multi-signature verification of an aggregated signature of a single message -// under multiple public keys. -// - Multi-signature verification of an aggregated signature of multiple messages under -// multiple public keys. -// - batch verification of multiple signatures of a single message under multiple -// public keys: use a binary tree of aggregations to find the invalid signatures. - -// #cgo CFLAGS: -g -Wall -std=c99 -// #cgo LDFLAGS: -L${SRCDIR}/relic/build/lib -l relic_s -// #include "bls_include.h" -import "C" - -// the PoP hasher, used to generate and verify PoPs -// The key is based on blsPOPCipherSuite which guarantees -// that hash_to_field of PoP is orthogonal to all hash_to_field functions -// used for signatures. -var popKMAC = internalExpandMsgXOFKMAC128(blsPOPCipherSuite) - -// BLSGeneratePOP returns a proof of possession (PoP) for the receiver private key. -// -// The KMAC hasher used in the function is guaranteed to be orthogonal to all hashers used -// for signatures or SPoCK proofs on this package. This means a specific domain tag is used -// to generate PoP and is not used by any other application. -// -// The function returns: -// - (nil, notBLSKeyError) if the input key is not of type BLS BLS12-381 -// - (pop, nil) otherwise -func BLSGeneratePOP(sk PrivateKey) (Signature, error) { - _, ok := sk.(*prKeyBLSBLS12381) - if !ok { - return nil, notBLSKeyError - } - // sign the public key - return sk.Sign(sk.PublicKey().Encode(), popKMAC) -} - -// BLSVerifyPOP verifies a proof of possession (PoP) for the receiver public key. -// -// The function internally uses the same KMAC hasher used to generate the PoP. -// The hasher is guaranteed to be orthogonal to any hasher used to generate signature -// or SPoCK proofs on this package. -// Note that verifying a PoP against an idenity public key fails. -// -// The function returns: -// - (false, notBLSKeyError) if the input key is not of type BLS BLS12-381 -// - (validity, nil) otherwise -func BLSVerifyPOP(pk PublicKey, s Signature) (bool, error) { - _, ok := pk.(*pubKeyBLSBLS12381) - if !ok { - return false, notBLSKeyError - } - // verify the signature against the public key - return pk.Verify(s, pk.Encode(), popKMAC) -} - -// AggregateBLSSignatures aggregates multiple BLS signatures into one. -// -// Signatures could be generated from the same or distinct messages, they -// could also be the aggregation of other signatures. -// The order of the signatures in the slice does not matter since the aggregation -// is commutative. The slice should not be empty. -// No G1 membership check is performed on the input signatures. -// -// The function returns: -// - (nil, blsAggregateEmptyListError) if no signatures are provided (input slice is empty) -// - (nil, invalidSignatureError) if a deserialization of at least one signature fails (input is an invalid serialization of a -// compressed E1 element following [zcash] -// https://www.ietf.org/archive/id/draft-irtf-cfrg-pairing-friendly-curves-08.html#name-zcash-serialization-format-). -// G1 membership is not checked. -// - (nil, error) if an unexpected error occurs -// - (aggregated_signature, nil) otherwise -func AggregateBLSSignatures(sigs []Signature) (Signature, error) { - // set BLS context - blsInstance.reInit() - - // check for empty list - if len(sigs) == 0 { - return nil, blsAggregateEmptyListError - } - - // flatten the shares (required by the C layer) - flatSigs := make([]byte, 0, signatureLengthBLSBLS12381*len(sigs)) - for i, sig := range sigs { - if len(sig) != signatureLengthBLSBLS12381 { - return nil, fmt.Errorf("signature at index %d has an invalid length: %w", i, invalidSignatureError) - } - flatSigs = append(flatSigs, sig...) - } - aggregatedSig := make([]byte, signatureLengthBLSBLS12381) - - // add the points in the C layer - result := C.ep_sum_vector_byte( - (*C.uchar)(&aggregatedSig[0]), - (*C.uchar)(&flatSigs[0]), - (C.int)(len(sigs))) - - switch result { - case valid: - return aggregatedSig, nil - case invalid: - return nil, invalidSignatureError - default: - return nil, fmt.Errorf("aggregating signatures failed") - } -} - -// AggregateBLSPrivateKeys aggregates multiple BLS private keys into one. -// -// The order of the keys in the slice does not matter since the aggregation -// is commutative. The slice should not be empty. -// No check is performed on the input private keys. -// Input or output private keys could be equal to the identity element (zero). Note that any -// signature generated by the identity key is invalid (to avoid equivocation issues). -// -// The function returns: -// - (nil, notBLSKeyError) if at least one key is not of type BLS BLS12-381 -// - (nil, blsAggregateEmptyListError) if no keys are provided (input slice is empty) -// - (aggregated_key, nil) otherwise -func AggregateBLSPrivateKeys(keys []PrivateKey) (PrivateKey, error) { - // set BLS context - blsInstance.reInit() - - // check for empty list - if len(keys) == 0 { - return nil, blsAggregateEmptyListError - } - - scalars := make([]scalar, 0, len(keys)) - for i, sk := range keys { - skBls, ok := sk.(*prKeyBLSBLS12381) - if !ok { - return nil, fmt.Errorf("key at index %d is invalid: %w", i, notBLSKeyError) - } - scalars = append(scalars, skBls.scalar) - } - - var sum scalar - C.bn_new_wrapper((*C.bn_st)(&sum)) - C.bn_sum_vector((*C.bn_st)(&sum), (*C.bn_st)(&scalars[0]), - (C.int)(len(scalars))) - return newPrKeyBLSBLS12381(&sum), nil -} - -// AggregateBLSPublicKeys aggregate multiple BLS public keys into one. -// -// The order of the keys in the slice does not matter since the aggregation -// is commutative. The slice should not be empty. -// No check is performed on the input public keys. The input keys are guaranteed by -// the package constructors to be on the G2 subgroup. -// Input or output keys can be equal to the identity key. Note that any -// signature verified against the identity key is invalid (to avoid equivocation issues). -// -// The function returns: -// - (nil, notBLSKeyError) if at least one key is not of type BLS BLS12-381 -// - (nil, blsAggregateEmptyListError) no keys are provided (input slice is empty) -// - (aggregated_key, nil) otherwise -func AggregateBLSPublicKeys(keys []PublicKey) (PublicKey, error) { - // set BLS context - blsInstance.reInit() - - // check for empty list - if len(keys) == 0 { - return nil, blsAggregateEmptyListError - } - - points := make([]pointG2, 0, len(keys)) - for i, pk := range keys { - pkBLS, ok := pk.(*pubKeyBLSBLS12381) - if !ok { - return nil, fmt.Errorf("key at index %d is invalid: %w", i, notBLSKeyError) - } - points = append(points, pkBLS.point) - } - - var sum pointG2 - C.ep2_sum_vector((*C.ep2_st)(&sum), (*C.ep2_st)(&points[0]), - (C.int)(len(points))) - - sumKey := newPubKeyBLSBLS12381(&sum) - return sumKey, nil -} - -// IdentityBLSPublicKey returns an identity public key which corresponds to the point -// at infinity in G2 (identity element of G2). -func IdentityBLSPublicKey() PublicKey { - // set BLS context - blsInstance.reInit() - - identity := *newPubKeyBLSBLS12381(nil) - // set the point to infinity - C.ep2_set_infty((*C.ep2_st)(&identity.point)) - identity.isIdentity = true - return &identity -} - -// RemoveBLSPublicKeys removes multiple BLS public keys from a given (aggregated) public key. -// -// The common use case assumes the aggregated public key was initially formed using -// the keys to be removed (directly or using other aggregated forms). However the function -// can still be called in different use cases. -// The order of the keys to be removed in the slice does not matter since the removal -// is commutative. The slice of keys to be removed can be empty. -// No check is performed on the input public keys. The input keys are guaranteed by the -// package constructors to be on the G2 subgroup. -// Input or output keys can be equal to the identity key. -// -// The function returns: -// - (nil, notBLSKeyError) if at least one input key is not of type BLS BLS12-381 -// - (remaining_key, nil) otherwise -func RemoveBLSPublicKeys(aggKey PublicKey, keysToRemove []PublicKey) (PublicKey, error) { - // set BLS context - blsInstance.reInit() - - aggPKBLS, ok := aggKey.(*pubKeyBLSBLS12381) - if !ok { - return nil, notBLSKeyError - } - - pointsToSubtract := make([]pointG2, 0, len(keysToRemove)) - for i, pk := range keysToRemove { - pkBLS, ok := pk.(*pubKeyBLSBLS12381) - if !ok { - return nil, fmt.Errorf("key at index %d is invalid: %w", i, notBLSKeyError) - } - pointsToSubtract = append(pointsToSubtract, pkBLS.point) - } - - // check for empty list to avoid a cgo edge case - if len(keysToRemove) == 0 { - return aggKey, nil - } - - var resultPoint pointG2 - C.ep2_subtract_vector((*C.ep2_st)(&resultPoint), (*C.ep2_st)(&aggPKBLS.point), - (*C.ep2_st)(&pointsToSubtract[0]), (C.int)(len(pointsToSubtract))) - - resultKey := newPubKeyBLSBLS12381(&resultPoint) - return resultKey, nil -} - -// VerifyBLSSignatureOneMessage is a multi-signature verification that verifies a -// BLS signature of a single message against multiple BLS public keys. -// -// The input signature could be generated by aggregating multiple signatures of the -// message under multiple private keys. The public keys corresponding to the signing -// private keys are passed as input to this function. -// The caller must make sure the input public keys's proofs of possession have been -// verified prior to calling this function (or each input key is sum of public keys of -// which proofs of possession have been verified). -// -// The input hasher is the same hasher used to generate all initial signatures. -// The order of the public keys in the slice does not matter. -// Membership check is performed on the input signature but is not performed on the input -// public keys (membership is guaranteed by using the package functions). -// If the input public keys add up to the identity public key, the signature is invalid -// to avoid signature equivocation issues. -// -// This is a special case function of VerifyBLSSignatureManyMessages, using a single -// message and hasher. -// -// The function returns: -// - (false, nilHasherError) if hasher is nil -// - (false, invalidHasherSizeError) if hasher's output size is not 128 bytes -// - (false, notBLSKeyError) if at least one key is not of type pubKeyBLSBLS12381 -// - (nil, blsAggregateEmptyListError) if input key slice is empty -// - (false, error) if an unexpected error occurs -// - (validity, nil) otherwise -func VerifyBLSSignatureOneMessage( - pks []PublicKey, s Signature, message []byte, kmac hash.Hasher, -) (bool, error) { - // public key list must be non empty, this is checked internally by AggregateBLSPublicKeys - aggPk, err := AggregateBLSPublicKeys(pks) - if err != nil { - return false, fmt.Errorf("verify signature one message failed: %w", err) - } - return aggPk.Verify(s, message, kmac) -} - -// VerifyBLSSignatureManyMessages is a multi-signature verification that verifies a -// BLS signature under multiple messages and public keys. -// -// The input signature could be generated by aggregating multiple signatures of distinct -// messages under distinct private keys. The verification is performed against the message -// at index (i) and the public key at the same index (i) of the input messages and public keys. -// The hasher at index (i) is used to hash the message at index (i). -// -// Since the package only supports the Proof of Possession scheme, the function does not enforce -// input messages to be distinct. Thereore, the caller must make sure the input public keys's proofs -// of possession have been verified prior to calling this function (or each input key is sum of public -// keys of which proofs of possession have been verified). -// -// The verification is optimized to compute one pairing per distinct message, or one pairing -// per distinct key, whatever way offers less pairings calls. If all messages are the same, the -// function has the same behavior as VerifyBLSSignatureOneMessage. If there is one input message and -// input public key, the function has the same behavior as pk.Verify. -// Membership check is performed on the input signature. -// In order to avoid equivocation issues, any identity public key results in the overall -// signature being invalid. -// -// The function returns: -// - (false, nilHasherError) if a hasher is nil -// - (false, invalidHasherSizeError) if a hasher's output size is not 128 bytes -// - (false, notBLSKeyError) if at least one key is not a BLS BLS12-381 key -// - (false, invalidInputsError) if size of keys is not matching the size of messages and hashers -// - (false, blsAggregateEmptyListError) if input key slice `pks` is empty -// - (false, error) if an unexpected error occurs -// - (validity, nil) otherwise -func VerifyBLSSignatureManyMessages( - pks []PublicKey, s Signature, messages [][]byte, kmac []hash.Hasher, -) (bool, error) { - // set BLS context - blsInstance.reInit() - - // check signature length - if len(s) != signatureLengthBLSBLS12381 { - return false, nil - } - // check the list lengths - if len(pks) == 0 { - return false, fmt.Errorf("invalid list of public keys: %w", blsAggregateEmptyListError) - } - if len(pks) != len(messages) || len(kmac) != len(messages) { - return false, invalidInputsErrorf( - "input lists must be equal, messages are %d, keys are %d, hashers are %d", - len(messages), - len(pks), - len(kmac)) - } - - // compute the hashes - hashes := make([][]byte, 0, len(messages)) - for i, k := range kmac { - if err := checkBLSHasher(k); err != nil { - return false, fmt.Errorf("hasher at index %d is invalid: %w ", i, err) - } - hashes = append(hashes, k.ComputeHash(messages[i])) - } - - // two maps to count the type (keys or messages) with the least distinct elements. - // mapPerHash maps hashes to keys while mapPerPk maps keys to hashes. - // The comparison of the maps length minimizes the number of pairings to - // compute by aggregating either public keys or the message hashes in - // the verification equation. - mapPerHash := make(map[string][]pointG2) - mapPerPk := make(map[pointG2][][]byte) - // Note: mapPerPk is using a cgo structure as map keys which may lead to 2 equal public keys - // being considered distinct. This does not make the verification equation wrong but leads to - // computing extra pairings. This case is considered unlikely to happen since a caller is likely - // to use the same struct for a same public key. - // One way to fix this is to use the public key encoding as the map keys and store the "pointG2" - // structure with the map value, which adds more complexity and processing time. - - // fill the 2 maps - for i, pk := range pks { - pkBLS, ok := pk.(*pubKeyBLSBLS12381) - if !ok { - return false, fmt.Errorf( - "public key at index %d is invalid: %w", - i, notBLSKeyError) - } - // check identity check - if pkBLS.isIdentity { - return false, nil - } - - mapPerHash[string(hashes[i])] = append(mapPerHash[string(hashes[i])], pkBLS.point) - mapPerPk[pkBLS.point] = append(mapPerPk[pkBLS.point], hashes[i]) - } - - var verif (C.int) - //compare the 2 maps for the shortest length - if len(mapPerHash) < len(mapPerPk) { - // aggregate keys per distinct hashes - // using the linearity of the pairing on the G2 variables. - flatDistinctHashes := make([]byte, 0) - lenHashes := make([]uint32, 0) - pkPerHash := make([]uint32, 0, len(mapPerHash)) - allPks := make([]pointG2, 0) - for hash, pksVal := range mapPerHash { - flatDistinctHashes = append(flatDistinctHashes, []byte(hash)...) - lenHashes = append(lenHashes, uint32(len([]byte(hash)))) - pkPerHash = append(pkPerHash, uint32(len(pksVal))) - allPks = append(allPks, pksVal...) - } - verif = C.bls_verifyPerDistinctMessage( - (*C.uchar)(&s[0]), - (C.int)(len(mapPerHash)), - (*C.uchar)(&flatDistinctHashes[0]), - (*C.uint32_t)(&lenHashes[0]), - (*C.uint32_t)(&pkPerHash[0]), - (*C.ep2_st)(&allPks[0]), - ) - - } else { - // aggregate hashes per distinct key - // using the linearity of the pairing on the G1 variables. - distinctPks := make([]pointG2, 0, len(mapPerPk)) - hashPerPk := make([]uint32, 0, len(mapPerPk)) - flatHashes := make([]byte, 0) - lenHashes := make([]uint32, 0) - for pk, hashesVal := range mapPerPk { - distinctPks = append(distinctPks, pk) - hashPerPk = append(hashPerPk, uint32(len(hashesVal))) - for _, h := range hashesVal { - flatHashes = append(flatHashes, h...) - lenHashes = append(lenHashes, uint32(len(h))) - } - } - - verif = C.bls_verifyPerDistinctKey( - (*C.uchar)(&s[0]), - (C.int)(len(mapPerPk)), - (*C.ep2_st)(&distinctPks[0]), - (*C.uint32_t)(&hashPerPk[0]), - (*C.uchar)(&flatHashes[0]), - (*C.uint32_t)(&lenHashes[0])) - } - - switch verif { - case invalid: - return false, nil - case valid: - return true, nil - default: - return false, fmt.Errorf("signature verification failed") - } -} - -// BatchVerifyBLSSignaturesOneMessage is a batch verification of multiple -// BLS signatures of a single message against multiple BLS public keys that -// is faster than verifying the signatures one by one. -// -// Each signature at index (i) of the input signature slice is verified against -// the public key of the same index (i) in the input key slice. -// The input hasher is the same used to generate all signatures. -// The returned boolean slice is a slice so that the value at index (i) is true -// if signature (i) verifies against public key (i), and false otherwise. -// -// The caller must make sure the input public keys's proofs of possession have been -// verified prior to calling this function (or each input key is sum of public -// keys of which proofs of possession have been verified). -// -// Membership checks are performed on the input signatures but are not performed -// on the input public keys (which are guaranteed by the package to be on the correct -// G2 subgroup). -// In order to avoid equivocation issues, any identity public key results in the corresponding -// signature being invalid. -// -// The function returns: -// - ([]false, nilHasherError) if a hasher is nil -// - ([]false, invalidHasherSizeError) if a hasher's output size is not 128 bytes -// - ([]false, notBLSKeyError) if at least one key is not of type BLS BLS12-381 -// - ([]false, invalidInputsError) if size of keys is not matching the size of signatures -// - ([]false, blsAggregateEmptyListError) if input key slice is empty -// - ([]false, error) if an unexpected error occurs -// - ([]validity, nil) otherwise -func BatchVerifyBLSSignaturesOneMessage( - pks []PublicKey, sigs []Signature, message []byte, kmac hash.Hasher, -) ([]bool, error) { - // set BLS context - blsInstance.reInit() - - // empty list check - if len(pks) == 0 { - return []bool{}, fmt.Errorf("invalid list of public keys: %w", blsAggregateEmptyListError) - } - - if len(pks) != len(sigs) { - return []bool{}, invalidInputsErrorf( - "keys length %d and signatures length %d are mismatching", - len(pks), - len(sigs)) - } - - verifBool := make([]bool, len(sigs)) - if err := checkBLSHasher(kmac); err != nil { - return verifBool, err - } - - // an invalid signature with an incorrect header but correct length - invalidSig := make([]byte, signatureLengthBLSBLS12381) - invalidSig[0] = invalidBLSSignatureHeader // incorrect header - - // flatten the shares (required by the C layer) - flatSigs := make([]byte, 0, signatureLengthBLSBLS12381*len(sigs)) - pkPoints := make([]pointG2, 0, len(pks)) - - for i, pk := range pks { - pkBLS, ok := pk.(*pubKeyBLSBLS12381) - if !ok { - return verifBool, fmt.Errorf("key at index %d is invalid: %w", i, notBLSKeyError) - } - pkPoints = append(pkPoints, pkBLS.point) - - if len(sigs[i]) != signatureLengthBLSBLS12381 || pkBLS.isIdentity { - // force the signature to be invalid by replacing it with an invalid array - // that fails the deserialization in C.ep_read_bin_compact - flatSigs = append(flatSigs, invalidSig...) - } else { - flatSigs = append(flatSigs, sigs[i]...) - } - } - - // hash the input to 128 bytes - h := kmac.ComputeHash(message) - verifInt := make([]byte, len(verifBool)) - - C.bls_batchVerify( - (C.int)(len(verifInt)), - (*C.uchar)(&verifInt[0]), - (*C.ep2_st)(&pkPoints[0]), - (*C.uchar)(&flatSigs[0]), - (*C.uchar)(&h[0]), - (C.int)(len(h)), - ) - - for i, v := range verifInt { - if (C.int)(v) != valid && (C.int)(v) != invalid { - return verifBool, fmt.Errorf("batch verification failed") - } - verifBool[i] = ((C.int)(v) == valid) - } - - return verifBool, nil -} - -// blsAggregateEmptyListError is returned when a list of BLS objects (e.g. signatures or keys) -// is empty or nil and thereby represents an invalid input. -var blsAggregateEmptyListError = errors.New("list cannot be empty") - -// IsBLSAggregateEmptyListError checks if err is an `blsAggregateEmptyListError`. -// blsAggregateEmptyListError is returned when a BLS aggregation function is called with -// an empty list which is not allowed in some aggregation cases to avoid signature equivocation -// issues. -func IsBLSAggregateEmptyListError(err error) bool { - return errors.Is(err, blsAggregateEmptyListError) -} - -// notBLSKeyError is returned when a private or public key -// used is not a BLS on BLS12 381 key. -var notBLSKeyError = errors.New("input key has to be a BLS on BLS12-381 key") - -// IsNotBLSKeyError checks if err is an `notBLSKeyError`. -// notBLSKeyError is returned when a private or public key -// used is not a BLS on BLS12 381 key. -func IsNotBLSKeyError(err error) bool { - return errors.Is(err, notBLSKeyError) -} - -// invalidSignatureError is returned when a signature input does not serialize to a -// valid element on E1 of the BLS12-381 curve (but without checking the element is on subgroup G1). -var invalidSignatureError = errors.New("input signature does not deserialize to an E1 element") - -// IsInvalidSignatureError checks if err is an `invalidSignatureError` -// invalidSignatureError is returned when a signature input does not serialize to a -// valid element on E1 of the BLS12-381 curve (but without checking the element is on subgroup G1). -func IsInvalidSignatureError(err error) bool { - return errors.Is(err, invalidSignatureError) -} diff --git a/crypto/bls_no_relic.go b/crypto/bls_no_relic.go deleted file mode 100644 index fed6c216398..00000000000 --- a/crypto/bls_no_relic.go +++ /dev/null @@ -1,156 +0,0 @@ -//go:build !relic -// +build !relic - -package crypto - -import ( - "github.com/onflow/flow-go/crypto/hash" -) - -// The functions below are the non-Relic versions of the public APIs -// requiring the Relic library. -// All BLS functionalities in the package require the Relic dependency, -// and therefore the "relic" build tag. -// Building without the "relic" tag is successful, but and calling one of the -// BLS functions results in a runtime panic. This allows projects depending on the -// crypto library to build successfully with or without the "relic" tag. - -const relic_panic = "function is not supported when building without \"relic\" Go build tag" - -const ( - SignatureLenBLSBLS12381 = 48 -) - -// bls.go functions -func NewExpandMsgXOFKMAC128(tag string) hash.Hasher { - panic(relic_panic) -} - -func BLSInvalidSignature() Signature { - panic(relic_panic) -} - -// bls_multisig.go functions -func BLSGeneratePOP(sk PrivateKey) (Signature, error) { - panic(relic_panic) -} - -func BLSVerifyPOP(pk PublicKey, s Signature) (bool, error) { - panic(relic_panic) -} - -func AggregateBLSSignatures(sigs []Signature) (Signature, error) { - panic(relic_panic) -} - -func AggregateBLSPrivateKeys(keys []PrivateKey) (PrivateKey, error) { - panic(relic_panic) -} - -func AggregateBLSPublicKeys(keys []PublicKey) (PublicKey, error) { - panic(relic_panic) -} - -func IdentityBLSPublicKey() PublicKey { - panic(relic_panic) -} - -func IsBLSAggregateEmptyListError(err error) bool { - panic(relic_panic) -} - -func IsInvalidSignatureError(err error) bool { - panic(relic_panic) -} - -func IsNotBLSKeyError(err error) bool { - panic(relic_panic) -} - -func IsBLSSignatureIdentity(s Signature) bool { - panic(relic_panic) -} - -func RemoveBLSPublicKeys(aggKey PublicKey, keysToRemove []PublicKey) (PublicKey, error) { - panic(relic_panic) -} - -func VerifyBLSSignatureOneMessage(pks []PublicKey, s Signature, - message []byte, kmac hash.Hasher) (bool, error) { - panic(relic_panic) -} - -func VerifyBLSSignatureManyMessages(pks []PublicKey, s Signature, - messages [][]byte, kmac []hash.Hasher) (bool, error) { - panic(relic_panic) -} - -func BatchVerifyBLSSignaturesOneMessage(pks []PublicKey, sigs []Signature, - message []byte, kmac hash.Hasher) ([]bool, error) { - panic(relic_panic) -} - -func SPOCKProve(sk PrivateKey, data []byte, kmac hash.Hasher) (Signature, error) { - panic(relic_panic) -} - -func SPOCKVerifyAgainstData(pk PublicKey, proof Signature, data []byte, kmac hash.Hasher) (bool, error) { - panic(relic_panic) -} - -func SPOCKVerify(pk1 PublicKey, proof1 Signature, pk2 PublicKey, proof2 Signature) (bool, error) { - panic(relic_panic) -} - -// bls_threshold.go functions -func NewBLSThresholdSignatureParticipant( - groupPublicKey PublicKey, - sharePublicKeys []PublicKey, - threshold int, - myIndex int, - myPrivateKey PrivateKey, - message []byte, - dsTag string, -) (ThresholdSignatureParticipant, error) { - panic(relic_panic) -} - -func NewBLSThresholdSignatureInspector( - groupPublicKey PublicKey, - sharePublicKeys []PublicKey, - threshold int, - message []byte, - dsTag string, -) (ThresholdSignatureInspector, error) { - panic(relic_panic) -} - -func BLSReconstructThresholdSignature(size int, threshold int, - shares []Signature, signers []int) (Signature, error) { - panic(relic_panic) -} - -func EnoughShares(threshold int, sharesNumber int) (bool, error) { - panic(relic_panic) -} - -func BLSThresholdKeyGen(size int, threshold int, seed []byte) ([]PrivateKey, - []PublicKey, PublicKey, error) { - panic(relic_panic) -} - -// dkg.go functions -func NewFeldmanVSS(size int, threshold int, myIndex int, - processor DKGProcessor, dealerIndex int) (DKGState, error) { - panic(relic_panic) -} - -func NewFeldmanVSSQual(size int, threshold int, myIndex int, - processor DKGProcessor, dealerIndex int) (DKGState, error) { - panic(relic_panic) -} - -func NewJointFeldman(size int, threshold int, myIndex int, - processor DKGProcessor) (DKGState, error) { - panic(relic_panic) -} diff --git a/crypto/bls_no_relic_test.go b/crypto/bls_no_relic_test.go deleted file mode 100644 index 47f8120060f..00000000000 --- a/crypto/bls_no_relic_test.go +++ /dev/null @@ -1,42 +0,0 @@ -//go:build !relic -// +build !relic - -package crypto - -import ( - "testing" - - "github.com/stretchr/testify/assert" -) - -// Test for all public APIs requiring relic build tag. -// These functions should panic if build without the relic tag. -func TestNoRelicPanic(t *testing.T) { - assert.PanicsWithValue(t, relic_panic, func() { NewExpandMsgXOFKMAC128("") }) - assert.PanicsWithValue(t, relic_panic, func() { BLSInvalidSignature() }) - assert.PanicsWithValue(t, relic_panic, func() { BLSGeneratePOP(nil) }) - assert.PanicsWithValue(t, relic_panic, func() { BLSVerifyPOP(nil, nil) }) - assert.PanicsWithValue(t, relic_panic, func() { AggregateBLSSignatures(nil) }) - assert.PanicsWithValue(t, relic_panic, func() { AggregateBLSPrivateKeys(nil) }) - assert.PanicsWithValue(t, relic_panic, func() { AggregateBLSPublicKeys(nil) }) - assert.PanicsWithValue(t, relic_panic, func() { IdentityBLSPublicKey() }) - assert.PanicsWithValue(t, relic_panic, func() { IsBLSAggregateEmptyListError(nil) }) - assert.PanicsWithValue(t, relic_panic, func() { IsInvalidSignatureError(nil) }) - assert.PanicsWithValue(t, relic_panic, func() { IsNotBLSKeyError(nil) }) - assert.PanicsWithValue(t, relic_panic, func() { IsBLSSignatureIdentity(nil) }) - assert.PanicsWithValue(t, relic_panic, func() { RemoveBLSPublicKeys(nil, nil) }) - assert.PanicsWithValue(t, relic_panic, func() { VerifyBLSSignatureOneMessage(nil, nil, nil, nil) }) - assert.PanicsWithValue(t, relic_panic, func() { VerifyBLSSignatureManyMessages(nil, nil, nil, nil) }) - assert.PanicsWithValue(t, relic_panic, func() { BatchVerifyBLSSignaturesOneMessage(nil, nil, nil, nil) }) - assert.PanicsWithValue(t, relic_panic, func() { SPOCKProve(nil, nil, nil) }) - assert.PanicsWithValue(t, relic_panic, func() { SPOCKVerify(nil, nil, nil, nil) }) - assert.PanicsWithValue(t, relic_panic, func() { SPOCKVerifyAgainstData(nil, nil, nil, nil) }) - assert.PanicsWithValue(t, relic_panic, func() { NewBLSThresholdSignatureParticipant(nil, nil, 0, 0, nil, nil, "") }) - assert.PanicsWithValue(t, relic_panic, func() { NewBLSThresholdSignatureInspector(nil, nil, 0, nil, "") }) - assert.PanicsWithValue(t, relic_panic, func() { BLSReconstructThresholdSignature(0, 0, nil, nil) }) - assert.PanicsWithValue(t, relic_panic, func() { EnoughShares(0, 0) }) - assert.PanicsWithValue(t, relic_panic, func() { BLSThresholdKeyGen(0, 0, nil) }) - assert.PanicsWithValue(t, relic_panic, func() { NewFeldmanVSS(0, 0, 0, nil, 0) }) - assert.PanicsWithValue(t, relic_panic, func() { NewFeldmanVSSQual(0, 0, 0, nil, 0) }) - assert.PanicsWithValue(t, relic_panic, func() { NewJointFeldman(0, 0, 0, nil) }) -} diff --git a/crypto/bls_test.go b/crypto/bls_test.go deleted file mode 100644 index adb02d02a29..00000000000 --- a/crypto/bls_test.go +++ /dev/null @@ -1,1125 +0,0 @@ -//go:build relic -// +build relic - -package crypto - -import ( - crand "crypto/rand" - "encoding/hex" - "fmt" - mrand "math/rand" - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/onflow/flow-go/crypto/hash" -) - -// TestBLSMainMethods is a sanity check of main signature scheme methods (keyGen, sign, verify) -func TestBLSMainMethods(t *testing.T) { - // test the key generation seed lengths - testKeyGenSeed(t, BLSBLS12381, KeyGenSeedMinLen, KeyGenSeedMaxLen) - // test the consistency with different inputs - hasher := NewExpandMsgXOFKMAC128("test tag") - testGenSignVerify(t, BLSBLS12381, hasher) - - // specific signature test for BLS: - // Test a signature with a point encoded with a coordinate x not reduced mod p - // The same signature point with the x coordinate reduced passes verification. - // This test checks that: - // - signature decoding handles input x-coordinates larger than p (doesn't result in an exception) - // - signature decoding only accepts reduced x-coordinates to avoid signature malleability - t.Run("invalid x coordinate larger than p", func(t *testing.T) { - msg, err := hex.DecodeString("7f26ba692dc2da7ff828ef4675ff1cd6ab855fca0637b6dab295f1df8e51bc8bb1b8f0c6610aabd486cf1f098f2ddbc6691d94e10f928816f890a3d366ce46249836a595c7ea1828af52e899ba2ab627ab667113bb563918c5d5a787c414399487b4e3a7") - require.NoError(t, err) - validSig, err := hex.DecodeString("80b0cac2a0f4f8881913edf2b29065675dfed6f6f4e17e9b5d860a845d4e7d476b277d06a493b81482e63d8131f9f2fa") - require.NoError(t, err) - invalidSig, err := hex.DecodeString("9AB1DCACDA74DF22642F95A8F5DC123EC276227BE866915AC4B6DD2553FF736B89D37D0555E7B8143CE53D8131F99DA5") - require.NoError(t, err) - pkBytes, err := hex.DecodeString("a7ac85ac8ffd9d2611f73721a93ec92115f29d769dfa425fec2e2c26ab3e4e8089a961ab430639104262723e829b75e9190a05d8fc8d22a7ac78a18473cc3df146b5c4c9c8e46d5f208039384fe2fc018321f14c01641c3afff7558a2eb06463") - require.NoError(t, err) - pk, err := DecodePublicKey(BLSBLS12381, pkBytes) - require.NoError(t, err) - // sanity check of valid signature (P_x < p) - valid, err := pk.Verify(validSig, msg, hasher) - require.NoError(t, err) - require.True(t, valid) - // invalid signature (P'_x = P_x + p ) - valid, err = pk.Verify(invalidSig, msg, hasher) - require.NoError(t, err) - assert.False(t, valid) - }) - - t.Run("private key equal to 1 and -1", func(t *testing.T) { - sk1Bytes := make([]byte, PrKeyLenBLSBLS12381) - sk1Bytes[PrKeyLenBLSBLS12381-1] = 1 - sk1, err := DecodePrivateKey(BLSBLS12381, sk1Bytes) - require.NoError(t, err) - - skMinus1Bytes := make([]byte, PrKeyLenBLSBLS12381) - copy(skMinus1Bytes, BLS12381Order) - skMinus1Bytes[PrKeyLenBLSBLS12381-1] -= 1 - skMinus1, err := DecodePrivateKey(BLSBLS12381, skMinus1Bytes) - require.NoError(t, err) - - for _, sk := range []PrivateKey{sk1, skMinus1} { - input := make([]byte, 100) - _, err = crand.Read(input) - require.NoError(t, err) - s, err := sk.Sign(input, hasher) - require.NoError(t, err) - pk := sk.PublicKey() - - // test a valid signature - result, err := pk.Verify(s, input, hasher) - assert.NoError(t, err) - assert.True(t, result, - "Verification should succeed:\n signature:%s\n message:%x\n private key:%s", s, input, sk) - } - }) -} - -// Signing bench -func BenchmarkBLSBLS12381Sign(b *testing.B) { - halg := NewExpandMsgXOFKMAC128("bench tag") - benchSign(b, BLSBLS12381, halg) -} - -// Verifying bench -func BenchmarkBLSBLS12381Verify(b *testing.B) { - halg := NewExpandMsgXOFKMAC128("bench tag") - benchVerify(b, BLSBLS12381, halg) -} - -// utility function to generate a random BLS private key -func randomSK(t *testing.T, rand *mrand.Rand) PrivateKey { - seed := make([]byte, KeyGenSeedMinLen) - n, err := rand.Read(seed) - require.Equal(t, n, KeyGenSeedMinLen) - require.NoError(t, err) - sk, err := GeneratePrivateKey(BLSBLS12381, seed) - require.NoError(t, err) - return sk -} - -// utility function to generate a non BLS private key -func invalidSK(t *testing.T) PrivateKey { - seed := make([]byte, KeyGenSeedMinLen) - n, err := crand.Read(seed) - require.Equal(t, n, KeyGenSeedMinLen) - require.NoError(t, err) - sk, err := GeneratePrivateKey(ECDSAP256, seed) - require.NoError(t, err) - return sk -} - -// BLS tests -func TestBLSBLS12381Hasher(t *testing.T) { - rand := getPRG(t) - // generate a key pair - sk := randomSK(t, rand) - sig := make([]byte, SignatureLenBLSBLS12381) - msg := []byte("message") - - // empty hasher - t.Run("Empty hasher", func(t *testing.T) { - _, err := sk.Sign(msg, nil) - assert.Error(t, err) - assert.True(t, IsNilHasherError(err)) - _, err = sk.PublicKey().Verify(sig, msg, nil) - assert.Error(t, err) - assert.True(t, IsNilHasherError(err)) - }) - - // short size hasher - t.Run("short size hasher", func(t *testing.T) { - s, err := sk.Sign(msg, hash.NewSHA2_256()) - assert.Error(t, err) - assert.True(t, IsInvalidHasherSizeError(err)) - assert.Nil(t, s) - - valid, err := sk.PublicKey().Verify(sig, msg, hash.NewSHA2_256()) - assert.Error(t, err) - assert.True(t, IsInvalidHasherSizeError(err)) - assert.False(t, valid) - }) - - t.Run("NewExpandMsgXOFKMAC128 sanity check", func(t *testing.T) { - // test the parameter lengths of NewExpandMsgXOFKMAC128 are in the correct range - // h would be nil if the kmac inputs are invalid - h := internalExpandMsgXOFKMAC128(blsSigCipherSuite) - assert.NotNil(t, h) - }) - - t.Run("constants sanity check", func(t *testing.T) { - // test that the ciphersuites exceed 16 bytes as per draft-irtf-cfrg-hash-to-curve - // The tags used by internalExpandMsgXOFKMAC128 are at least len(ciphersuite) long - assert.GreaterOrEqual(t, len(blsSigCipherSuite), 16) - assert.GreaterOrEqual(t, len(blsPOPCipherSuite), 16) - }) - - t.Run("orthogonal PoP and signature hashing", func(t *testing.T) { - data := []byte("random_data") - // empty tag hasher - sigKmac := NewExpandMsgXOFKMAC128("") - h1 := sigKmac.ComputeHash(data) - - // PoP hasher - h2 := popKMAC.ComputeHash(data) - assert.NotEqual(t, h1, h2) - }) - -} - -// TestBLSEncodeDecode tests encoding and decoding of BLS keys -func TestBLSEncodeDecode(t *testing.T) { - // generic tests - testEncodeDecode(t, BLSBLS12381) - - // specific tests for BLS - - // zero private key - skBytes := make([]byte, PrKeyLenBLSBLS12381) - sk, err := DecodePrivateKey(BLSBLS12381, skBytes) - require.Error(t, err, "decoding identity private key should fail") - assert.True(t, IsInvalidInputsError(err)) - assert.Nil(t, sk) - - // identity public key - pkBytes := make([]byte, PubKeyLenBLSBLS12381) - pkBytes[0] = infinityPointHeader - pk, err := DecodePublicKey(BLSBLS12381, pkBytes) - require.NoError(t, err, "decoding identity public key should succeed") - assert.True(t, pk.Equals(IdentityBLSPublicKey())) - - // invalid point - pkBytes = make([]byte, PubKeyLenBLSBLS12381) - pkBytes[0] = invalidBLSSignatureHeader - pk, err = DecodePublicKey(BLSBLS12381, pkBytes) - require.Error(t, err, "the key decoding should fail - key value is invalid") - assert.True(t, IsInvalidInputsError(err)) - assert.Nil(t, pk) - - // Test a public key serialization with a point encoded with a coordinate x with - // x[0] or x[1] not reduced mod p. - // The same public key point with x[0] and x[1] reduced passes decoding. - // This test checks that: - // - public key decoding handles input x-coordinates with x[0] and x[1] larger than p (doesn't result in an exception) - // - public key decoding only accepts reduced x[0] and x[1] to insure key serialization uniqueness. - // Although uniqueness of public key respresentation isn't a security property, some implementations - // may implicitely rely on the property. - - // valid pk with x[0] < p and x[1] < p - validPk, err := hex.DecodeString("818d72183e3e908af5bd6c2e37494c749b88f0396d3fbc2ba4d9ea28f1c50d1c6a540ec8fe06b6d860f72ec9363db3b8038360809700d36d761cb266af6babe9a069dc7364d3502e84536bd893d5f09ec2dd4f07cae1f8a178ffacc450f9b9a2") - require.NoError(t, err) - _, err = DecodePublicKey(BLSBLS12381, validPk) - assert.NoError(t, err) - // invalidpk1 with x[0]+p and same x[1] - invalidPk1, err := hex.DecodeString("9B8E840277BE772540D913E47A94F94C00003BBE60C4CEEB0C0ABCC9E876034089000EC7AF5AB6D81AF62EC9363D5E63038360809700d36d761cb266af6babe9a069dc7364d3502e84536bd893d5f09ec2dd4f07cae1f8a178ffacc450f9b9a2") - require.NoError(t, err) - _, err = DecodePublicKey(BLSBLS12381, invalidPk1) - assert.Error(t, err) - // invalidpk1 with same x[0] and x[1]+p - invalidPk2, err := hex.DecodeString("818d72183e3e908af5bd6c2e37494c749b88f0396d3fbc2ba4d9ea28f1c50d1c6a540ec8fe06b6d860f72ec9363db3b81D84726AD080BA07C1385A1CF2B758C104E127F8585862EDEB843E798A86E6C2E1894F067C35F8A132FEACC450F9644D") - require.NoError(t, err) - _, err = DecodePublicKey(BLSBLS12381, invalidPk2) - assert.Error(t, err) -} - -// TestBLSEquals tests equal for BLS keys -func TestBLSEquals(t *testing.T) { - testEquals(t, BLSBLS12381, ECDSAP256) -} - -// TestBLSUtils tests some utility functions -func TestBLSUtils(t *testing.T) { - rand := getPRG(t) - // generate a key pair - sk := randomSK(t, rand) - // test Algorithm() - testKeysAlgorithm(t, sk, BLSBLS12381) - // test Size() - testKeySize(t, sk, PrKeyLenBLSBLS12381, PubKeyLenBLSBLS12381) -} - -// BLS Proof of Possession test -func TestBLSPOP(t *testing.T) { - rand := getPRG(t) - seed := make([]byte, KeyGenSeedMinLen) - input := make([]byte, 100) - - t.Run("PoP tests", func(t *testing.T) { - loops := 10 - for j := 0; j < loops; j++ { - n, err := rand.Read(seed) - require.Equal(t, n, KeyGenSeedMinLen) - require.NoError(t, err) - sk, err := GeneratePrivateKey(BLSBLS12381, seed) - require.NoError(t, err) - _, err = rand.Read(input) - require.NoError(t, err) - s, err := BLSGeneratePOP(sk) - require.NoError(t, err) - pk := sk.PublicKey() - - // test a valid PoP - result, err := BLSVerifyPOP(pk, s) - require.NoError(t, err) - assert.True(t, result, "Verification should succeed:\n signature:%s\n private key:%s", s, sk) - - // test with a valid but different key - seed[0] ^= 1 - wrongSk, err := GeneratePrivateKey(BLSBLS12381, seed) - require.NoError(t, err) - result, err = BLSVerifyPOP(wrongSk.PublicKey(), s) - require.NoError(t, err) - assert.False(t, result, "Verification should fail:\n signature:%s\n private key:%s", s, sk) - } - }) - - t.Run("invalid inputs", func(t *testing.T) { - // ecdsa key - sk := invalidSK(t) - s, err := BLSGeneratePOP(sk) - assert.True(t, IsNotBLSKeyError(err)) - assert.Nil(t, s) - - s = make([]byte, SignatureLenBLSBLS12381) - result, err := BLSVerifyPOP(sk.PublicKey(), s) - assert.True(t, IsNotBLSKeyError(err)) - assert.False(t, result) - }) -} - -// BLS multi-signature -// signature aggregation sanity check -// -// Aggregate n signatures of the same message under different keys, and compare -// it against the signature of the message under an aggregated private key. -// Verify the aggregated signature using the multi-signature verification with -// one message. -func TestBLSAggregateSignatures(t *testing.T) { - rand := getPRG(t) - // random message - input := make([]byte, 100) - _, err := rand.Read(input) - require.NoError(t, err) - // hasher - kmac := NewExpandMsgXOFKMAC128("test tag") - // number of signatures to aggregate - sigsNum := mrand.Intn(100) + 1 - sigs := make([]Signature, 0, sigsNum) - sks := make([]PrivateKey, 0, sigsNum) - pks := make([]PublicKey, 0, sigsNum) - var aggSig, expectedSig Signature - - // create the signatures - for i := 0; i < sigsNum; i++ { - sk := randomSK(t, rand) - s, err := sk.Sign(input, kmac) - require.NoError(t, err) - sigs = append(sigs, s) - sks = append(sks, sk) - pks = append(pks, sk.PublicKey()) - } - - // all signatures are valid - t.Run("all valid signatures", func(t *testing.T) { - // aggregate private keys - aggSk, err := AggregateBLSPrivateKeys(sks) - require.NoError(t, err) - expectedSig, err := aggSk.Sign(input, kmac) - require.NoError(t, err) - // aggregate signatures - aggSig, err := AggregateBLSSignatures(sigs) - require.NoError(t, err) - // First check: check the signatures are equal - assert.Equal(t, aggSig, expectedSig, - "incorrect signature %s, should be %s, private keys are %s, input is %x", - aggSig, expectedSig, sks, input) - // Second check: Verify the aggregated signature - valid, err := VerifyBLSSignatureOneMessage(pks, aggSig, input, kmac) - require.NoError(t, err) - assert.True(t, valid, - "Verification of %s failed, signature should be %s private keys are %s, input is %x", - aggSig, expectedSig, sks, input) - }) - - // check if one signature is not correct - t.Run("one invalid signature", func(t *testing.T) { - input[0] ^= 1 - randomIndex := mrand.Intn(sigsNum) - sigs[randomIndex], err = sks[randomIndex].Sign(input, kmac) - input[0] ^= 1 - aggSig, err = AggregateBLSSignatures(sigs) - require.NoError(t, err) - assert.NotEqual(t, aggSig, expectedSig, - "signature %s shouldn't be %s private keys are %s, input is %x", - aggSig, expectedSig, sks, input) - valid, err := VerifyBLSSignatureOneMessage(pks, aggSig, input, kmac) - require.NoError(t, err) - assert.False(t, valid, - "verification of signature %s should fail, it shouldn't be %s private keys are %s, input is %x", - aggSig, expectedSig, sks, input) - sigs[randomIndex], err = sks[randomIndex].Sign(input, kmac) - require.NoError(t, err) - }) - - // check if one the public keys is not correct - t.Run("one invalid public key", func(t *testing.T) { - randomIndex := mrand.Intn(sigsNum) - newSk := randomSK(t, rand) - sks[randomIndex] = newSk - pks[randomIndex] = newSk.PublicKey() - aggSk, err := AggregateBLSPrivateKeys(sks) - require.NoError(t, err) - expectedSig, err = aggSk.Sign(input, kmac) - require.NoError(t, err) - assert.NotEqual(t, aggSig, expectedSig, - "signature %s shouldn't be %s, private keys are %s, input is %x, wrong key is of index %d", - aggSig, expectedSig, sks, input, randomIndex) - valid, err := VerifyBLSSignatureOneMessage(pks, aggSig, input, kmac) - require.NoError(t, err) - assert.False(t, valid, - "signature %s should fail, shouldn't be %s, private keys are %s, input is %x, wrong key is of index %d", - aggSig, expectedSig, sks, input, randomIndex) - }) - - t.Run("invalid inputs", func(t *testing.T) { - // test aggregating an empty signature list - aggSig, err = AggregateBLSSignatures(sigs[:0]) - assert.Error(t, err) - assert.True(t, IsBLSAggregateEmptyListError(err)) - assert.Nil(t, aggSig) - - // test verification with an empty key list - result, err := VerifyBLSSignatureOneMessage(pks[:0], aggSig, input, kmac) - assert.Error(t, err) - assert.True(t, IsBLSAggregateEmptyListError(err)) - assert.False(t, result) - - // test with a signature of a wrong length - shortSig := sigs[0][:signatureLengthBLSBLS12381-1] - aggSig, err = AggregateBLSSignatures([]Signature{shortSig}) - assert.Error(t, err) - assert.True(t, IsInvalidSignatureError(err)) - assert.Nil(t, aggSig) - - // test with an invalid signature of a correct length - invalidSig := BLSInvalidSignature() - aggSig, err = AggregateBLSSignatures([]Signature{invalidSig}) - assert.Error(t, err) - assert.True(t, IsInvalidSignatureError(err)) - assert.Nil(t, aggSig) - - // test the empty key list - aggSk, err := AggregateBLSPrivateKeys(sks[:0]) - assert.Error(t, err) - assert.True(t, IsBLSAggregateEmptyListError(err)) - assert.Nil(t, aggSk) - - // test with an invalid key type - sk := invalidSK(t) - aggSk, err = AggregateBLSPrivateKeys([]PrivateKey{sk}) - assert.Error(t, err) - assert.True(t, IsNotBLSKeyError(err)) - assert.Nil(t, aggSk) - }) -} - -// BLS multi-signature -// public keys aggregation sanity check -// -// Aggregate n public keys and their respective private keys and compare -// the public key of the aggregated private key is equal to the aggregated -// public key -func TestBLSAggregatePubKeys(t *testing.T) { - rand := getPRG(t) - // number of keys to aggregate - pkNum := mrand.Intn(100) + 1 - pks := make([]PublicKey, 0, pkNum) - sks := make([]PrivateKey, 0, pkNum) - - // create the signatures - for i := 0; i < pkNum; i++ { - sk := randomSK(t, rand) - sks = append(sks, sk) - pks = append(pks, sk.PublicKey()) - } - - // consistent private and public key aggregation - t.Run("correctness check", func(t *testing.T) { - // aggregate private keys - aggSk, err := AggregateBLSPrivateKeys(sks) - require.NoError(t, err) - expectedPk := aggSk.PublicKey() - // aggregate public keys - aggPk, err := AggregateBLSPublicKeys(pks) - assert.NoError(t, err) - assert.True(t, expectedPk.Equals(aggPk), - "incorrect public key %s, should be %s, public keys are %s", - aggPk, expectedPk, pks) - }) - - // aggregate an empty list - t.Run("empty list", func(t *testing.T) { - // private keys - aggSk, err := AggregateBLSPrivateKeys(sks[:0]) - assert.Error(t, err) - assert.True(t, IsBLSAggregateEmptyListError(err)) - assert.Nil(t, aggSk) - // public keys - aggPk, err := AggregateBLSPublicKeys(pks[:0]) - assert.Error(t, err) - assert.True(t, IsBLSAggregateEmptyListError(err)) - assert.Nil(t, aggPk) - }) - - // aggregate a list that includes the identity key, - // to check that identity key is indeed the identity element with regards to aggregation. - t.Run("aggregate a list that includes the identity key", func(t *testing.T) { - // aggregate the identity key with a non identity key - keys := []PublicKey{pks[0], IdentityBLSPublicKey()} - aggPkWithIdentity, err := AggregateBLSPublicKeys(keys) - assert.NoError(t, err) - assert.True(t, aggPkWithIdentity.Equals(pks[0]), - "incorrect public key %s, should be %s", - aggPkWithIdentity, pks[0]) - }) - - t.Run("invalid inputs", func(t *testing.T) { - // empty list - aggPK, err := AggregateBLSPublicKeys(pks[:0]) - assert.Error(t, err) - assert.True(t, IsBLSAggregateEmptyListError(err)) - assert.Nil(t, aggPK) - - // test with an invalid key type - pk := invalidSK(t).PublicKey() - aggPK, err = AggregateBLSPublicKeys([]PublicKey{pk}) - assert.Error(t, err) - assert.True(t, IsNotBLSKeyError(err)) - assert.Nil(t, aggPK) - }) - - // check that the public key corresponding to the zero private key is indeed identity - // The package doesn't allow to generate a zero private key. One way to obtain a zero - // private key is via aggrgeting opposite private keys - t.Run("public key of zero private key", func(t *testing.T) { - // sk1 is group order of bls12-381 minus one - groupOrderMinus1 := []byte{0x73, 0xED, 0xA7, 0x53, 0x29, 0x9D, 0x7D, 0x48, 0x33, 0x39, - 0xD8, 0x08, 0x09, 0xA1, 0xD8, 0x05, 0x53, 0xBD, 0xA4, 0x02, 0xFF, 0xFE, - 0x5B, 0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00} - sk1, err := DecodePrivateKey(BLSBLS12381, groupOrderMinus1) - require.NoError(t, err) - // sk2 is 1 - one := make([]byte, PrKeyLenBLSBLS12381) - one[PrKeyLenBLSBLS12381-1] = 1 - sk2, err := DecodePrivateKey(BLSBLS12381, one) - require.NoError(t, err) - aggSK, err := AggregateBLSPrivateKeys([]PrivateKey{sk1, sk2}) - require.NoError(t, err) - assert.True(t, aggSK.PublicKey().Equals(IdentityBLSPublicKey())) - }) -} - -// BLS multi-signature -// public keys removal sanity check -func TestBLSRemovePubKeys(t *testing.T) { - rand := getPRG(t) - // number of keys to aggregate - pkNum := mrand.Intn(100) + 1 - pks := make([]PublicKey, 0, pkNum) - - // generate public keys - for i := 0; i < pkNum; i++ { - sk := randomSK(t, rand) - pks = append(pks, sk.PublicKey()) - } - // aggregate public keys - aggPk, err := AggregateBLSPublicKeys(pks) - require.NoError(t, err) - - // random number of keys to remove (at least one key is left) - pkToRemoveNum := mrand.Intn(pkNum) - expectedPatrialPk, err := AggregateBLSPublicKeys(pks[pkToRemoveNum:]) - require.NoError(t, err) - - // check correctness - t.Run("equality check", func(t *testing.T) { - partialPk, err := RemoveBLSPublicKeys(aggPk, pks[:pkToRemoveNum]) - require.NoError(t, err) - - BLSkey, ok := expectedPatrialPk.(*pubKeyBLSBLS12381) - require.True(t, ok) - - assert.True(t, BLSkey.Equals(partialPk), - "incorrect key %s, should be %s, keys are %s, index is %d", - partialPk, BLSkey, pks, pkToRemoveNum) - }) - - // remove an extra key and check inequality - t.Run("inequality check", func(t *testing.T) { - extraPk := randomSK(t, rand).PublicKey() - partialPk, err := RemoveBLSPublicKeys(aggPk, []PublicKey{extraPk}) - assert.NoError(t, err) - - BLSkey, ok := expectedPatrialPk.(*pubKeyBLSBLS12381) - require.True(t, ok) - assert.False(t, BLSkey.Equals(partialPk), - "incorrect key %s, should not be %s, keys are %s, index is %d, extra key is %s", - partialPk, BLSkey, pks, pkToRemoveNum, extraPk) - }) - - // specific test to remove all keys - t.Run("remove all keys", func(t *testing.T) { - identityPk, err := RemoveBLSPublicKeys(aggPk, pks) - require.NoError(t, err) - // identity public key is expected - randomPk := randomSK(t, rand).PublicKey() - randomPkPlusIdentityPk, err := AggregateBLSPublicKeys([]PublicKey{randomPk, identityPk}) - require.NoError(t, err) - - BLSRandomPk, ok := randomPk.(*pubKeyBLSBLS12381) - require.True(t, ok) - - assert.True(t, BLSRandomPk.Equals(randomPkPlusIdentityPk), - "incorrect key %s, should be infinity point, keys are %s", - identityPk, pks) - }) - - // specific test with an empty slice of keys to remove - t.Run("remove empty list", func(t *testing.T) { - partialPk, err := RemoveBLSPublicKeys(aggPk, []PublicKey{}) - require.NoError(t, err) - - aggBLSkey, ok := aggPk.(*pubKeyBLSBLS12381) - require.True(t, ok) - - assert.True(t, aggBLSkey.Equals(partialPk), - "incorrect key %s, should be %s", - partialPk, aggBLSkey) - }) - - t.Run("invalid inputs", func(t *testing.T) { - pk := invalidSK(t).PublicKey() - partialPk, err := RemoveBLSPublicKeys(pk, pks) - assert.Error(t, err) - assert.True(t, IsNotBLSKeyError(err)) - assert.Nil(t, partialPk) - - partialPk, err = RemoveBLSPublicKeys(aggPk, []PublicKey{pk}) - assert.Error(t, err) - assert.True(t, IsNotBLSKeyError(err)) - assert.Nil(t, partialPk) - }) -} - -// BLS multi-signature -// batch verification -// -// Verify n signatures of the same message under different keys using the fast -// batch verification technique and compares the result to verifying each signature -// separately. -func TestBLSBatchVerify(t *testing.T) { - rand := getPRG(t) - // random message - input := make([]byte, 100) - _, err := rand.Read(input) - require.NoError(t, err) - // hasher - kmac := NewExpandMsgXOFKMAC128("test tag") - // number of signatures to aggregate - sigsNum := rand.Intn(100) + 2 - sigs := make([]Signature, 0, sigsNum) - sks := make([]PrivateKey, 0, sigsNum) - pks := make([]PublicKey, 0, sigsNum) - expectedValid := make([]bool, 0, sigsNum) - - // create the signatures - for i := 0; i < sigsNum; i++ { - sk := randomSK(t, rand) - s, err := sk.Sign(input, kmac) - require.NoError(t, err) - sigs = append(sigs, s) - sks = append(sks, sk) - pks = append(pks, sk.PublicKey()) - expectedValid = append(expectedValid, true) - } - - // all signatures are valid - t.Run("all signatures are valid", func(t *testing.T) { - valid, err := BatchVerifyBLSSignaturesOneMessage(pks, sigs, input, kmac) - require.NoError(t, err) - assert.Equal(t, valid, expectedValid, - "Verification of %s failed, private keys are %s, input is %x, results is %v", - sigs, sks, input, valid) - }) - - // one valid signature - t.Run("one valid signature", func(t *testing.T) { - valid, err := BatchVerifyBLSSignaturesOneMessage(pks[:1], sigs[:1], input, kmac) - require.NoError(t, err) - assert.Equal(t, valid, expectedValid[:1], - "Verification of %s failed, private keys are %s, input is %x, results is %v", - sigs, sks, input, valid) - }) - - // pick a random number of invalid signatures - invalidSigsNum := rand.Intn(sigsNum-1) + 1 - // generate a random permutation of indices to pick the - // invalid signatures. - indices := make([]int, 0, sigsNum) - for i := 0; i < sigsNum; i++ { - indices = append(indices, i) - } - rand.Shuffle(sigsNum, func(i, j int) { - indices[i], indices[j] = indices[j], indices[i] - }) - - // some signatures are invalid - t.Run("some signatures are invalid", func(t *testing.T) { - - for i := 0; i < invalidSigsNum; i++ { // alter invalidSigsNum random signatures - alterSignature(sigs[indices[i]]) - expectedValid[indices[i]] = false - } - - valid, err := BatchVerifyBLSSignaturesOneMessage(pks, sigs, input, kmac) - require.NoError(t, err) - assert.Equal(t, expectedValid, valid, - "Verification of %s failed\n private keys are %s\n input is %x\n results is %v", - sigs, sks, input, valid) - }) - - // all signatures are invalid - t.Run("all signatures are invalid", func(t *testing.T) { - for i := invalidSigsNum; i < sigsNum; i++ { // alter the remaining random signatures - alterSignature(sigs[indices[i]]) - expectedValid[indices[i]] = false - if i%5 == 0 { - sigs[indices[i]] = sigs[indices[i]][:3] // test the short signatures - } - } - - valid, err := BatchVerifyBLSSignaturesOneMessage(pks, sigs, input, kmac) - require.NoError(t, err) - assert.Equal(t, valid, expectedValid, - "Verification of %s failed, private keys are %s, input is %x, results is %v", - sigs, sks, input, valid) - }) - - // test the empty list case - t.Run("empty list", func(t *testing.T) { - valid, err := BatchVerifyBLSSignaturesOneMessage(pks[:0], sigs[:0], input, kmac) - require.Error(t, err) - assert.True(t, IsBLSAggregateEmptyListError(err)) - assert.Equal(t, valid, []bool{}, - "verification should fail with empty list key, got %v", valid) - }) - - // test incorrect inputs - t.Run("inconsistent inputs", func(t *testing.T) { - valid, err := BatchVerifyBLSSignaturesOneMessage(pks[:len(pks)-1], sigs, input, kmac) - require.Error(t, err) - assert.True(t, IsInvalidInputsError(err)) - assert.Equal(t, valid, []bool{}, - "verification should fail with incorrect input lenghts, got %v", valid) - }) - - // test wrong hasher - t.Run("invalid hasher", func(t *testing.T) { - for i := 0; i < sigsNum; i++ { - expectedValid[i] = false - } - valid, err := BatchVerifyBLSSignaturesOneMessage(pks, sigs, input, nil) - require.Error(t, err) - assert.True(t, IsNilHasherError(err)) - - assert.Equal(t, valid, expectedValid, - "verification should fail with nil hasher, got %v", valid) - }) - - // test wrong key - t.Run("wrong key", func(t *testing.T) { - for i := 0; i < sigsNum; i++ { - expectedValid[i] = false - } - pks[0] = invalidSK(t).PublicKey() - valid, err := BatchVerifyBLSSignaturesOneMessage(pks, sigs, input, kmac) - require.Error(t, err) - assert.True(t, IsNotBLSKeyError(err)) - - assert.Equal(t, valid, expectedValid, - "verification should fail with invalid key, got %v", valid) - }) -} - -// alter or fix a signature -func alterSignature(s Signature) { - // this causes the signature to remain in G1 and be invalid - // OR to be a non-point in G1 (either on curve or not) - // which tests multiple error cases. - s[10] ^= 1 -} - -// Batch verify bench in the happy (all signatures are valid) -// and unhappy path (only one signature is invalid) -func BenchmarkBatchVerify(b *testing.B) { - // random message - input := make([]byte, 100) - _, err := crand.Read(input) - require.NoError(b, err) - // hasher - kmac := NewExpandMsgXOFKMAC128("bench tag") - sigsNum := 100 - sigs := make([]Signature, 0, sigsNum) - pks := make([]PublicKey, 0, sigsNum) - seed := make([]byte, KeyGenSeedMinLen) - - // create the signatures - for i := 0; i < sigsNum; i++ { - _, err := crand.Read(seed) - require.NoError(b, err) - sk, err := GeneratePrivateKey(BLSBLS12381, seed) - require.NoError(b, err) - s, err := sk.Sign(input, kmac) - require.NoError(b, err) - sigs = append(sigs, s) - pks = append(pks, sk.PublicKey()) - } - - // Batch verify bench when all signatures are valid - // (2) pairing compared to (2*n) pairings for the batch verification. - b.Run("happy path", func(b *testing.B) { - b.ResetTimer() - for i := 0; i < b.N; i++ { - // all signatures are valid - _, err := BatchVerifyBLSSignaturesOneMessage(pks, sigs, input, kmac) - require.NoError(b, err) - } - b.StopTimer() - }) - - // Batch verify bench when some signatures are invalid - // - if only one signaure is invalid (a valid point in G1): - // less than (2*2*log(n)) pairings compared to (2*n) pairings for the simple verification. - // - if all signatures are invalid (valid points in G1): - // (2*2*(n-1)) pairings compared to (2*n) pairings for the simple verification. - b.Run("unhappy path", func(b *testing.B) { - // only one invalid signature - alterSignature(sigs[sigsNum/2]) - b.ResetTimer() - for i := 0; i < b.N; i++ { - // all signatures are valid - _, err := BatchVerifyBLSSignaturesOneMessage(pks, sigs, input, kmac) - require.NoError(b, err) - } - b.StopTimer() - }) -} - -// BLS multi-signature -// signature aggregation sanity check -// -// Aggregate n signatures of distinct messages under different keys, -// and verify the aggregated signature using the multi-signature verification with -// many message. -func TestBLSAggregateSignaturesManyMessages(t *testing.T) { - rand := getPRG(t) - - // number of signatures to aggregate - sigsNum := mrand.Intn(20) + 1 - sigs := make([]Signature, 0, sigsNum) - - // number of keys - keysNum := mrand.Intn(sigsNum) + 1 - sks := make([]PrivateKey, 0, keysNum) - // generate the keys - for i := 0; i < keysNum; i++ { - sk := randomSK(t, rand) - sks = append(sks, sk) - } - - // number of messages (could be larger or smaller than the number of keys) - msgsNum := mrand.Intn(sigsNum) + 1 - messages := make([][20]byte, msgsNum) - for i := 0; i < msgsNum; i++ { - _, err := rand.Read(messages[i][:]) - require.NoError(t, err) - } - - inputMsgs := make([][]byte, 0, sigsNum) - inputPks := make([]PublicKey, 0, sigsNum) - inputKmacs := make([]hash.Hasher, 0, sigsNum) - - // create the signatures - for i := 0; i < sigsNum; i++ { - kmac := NewExpandMsgXOFKMAC128("test tag") - // pick a key randomly from the list - skRand := mrand.Intn(keysNum) - sk := sks[skRand] - // pick a message randomly from the list - msgRand := mrand.Intn(msgsNum) - msg := messages[msgRand][:] - // generate a signature - s, err := sk.Sign(msg, kmac) - require.NoError(t, err) - // update signatures and api inputs - sigs = append(sigs, s) - inputPks = append(inputPks, sk.PublicKey()) - inputMsgs = append(inputMsgs, msg) - inputKmacs = append(inputKmacs, kmac) - } - var aggSig Signature - - t.Run("correctness check", func(t *testing.T) { - // aggregate signatures - var err error - aggSig, err = AggregateBLSSignatures(sigs) - require.NoError(t, err) - // Verify the aggregated signature - valid, err := VerifyBLSSignatureManyMessages(inputPks, aggSig, inputMsgs, inputKmacs) - require.NoError(t, err) - assert.True(t, valid, - "Verification of %s failed, should be valid, private keys are %s, inputs are %x, input public keys are %s", - aggSig, sks, inputMsgs, inputPks) - }) - - // check if one of the signatures is not correct - t.Run("one signature is invalid", func(t *testing.T) { - randomIndex := mrand.Intn(sigsNum) // pick a random signature - messages[0][0] ^= 1 // make sure the signature is different - var err error - sigs[randomIndex], err = sks[0].Sign(messages[0][:], inputKmacs[0]) - require.NoError(t, err) - messages[0][0] ^= 1 - aggSig, err = AggregateBLSSignatures(sigs) - require.NoError(t, err) - valid, err := VerifyBLSSignatureManyMessages(inputPks, aggSig, inputMsgs, inputKmacs) - require.NoError(t, err) - assert.False(t, valid, - "Verification of %s should fail, private keys are %s, inputs are %x, input public keys are %s", - aggSig, sks, inputMsgs, inputPks) - }) - - // test the empty keys case - t.Run("empty list", func(t *testing.T) { - valid, err := VerifyBLSSignatureManyMessages(inputPks[:0], aggSig, inputMsgs, inputKmacs) - assert.Error(t, err) - assert.True(t, IsBLSAggregateEmptyListError(err)) - assert.False(t, valid, - "verification should fail with an empty key list") - }) - - // test inconsistent input arrays - t.Run("inconsistent inputs", func(t *testing.T) { - // inconsistent lengths - valid, err := VerifyBLSSignatureManyMessages(inputPks, aggSig, inputMsgs[:sigsNum-1], inputKmacs) - assert.Error(t, err) - assert.True(t, IsInvalidInputsError(err)) - assert.False(t, valid, "verification should fail with inconsistent messages and hashers") - - // empty key list - valid, err = VerifyBLSSignatureManyMessages(inputPks[:0], aggSig, inputMsgs, inputKmacs) - assert.Error(t, err) - assert.True(t, IsBLSAggregateEmptyListError(err)) - assert.False(t, valid, "verification should fail with empty list key") - - // nil hasher - tmp := inputKmacs[0] - inputKmacs[0] = nil - valid, err = VerifyBLSSignatureManyMessages(inputPks, aggSig, inputMsgs, inputKmacs) - assert.Error(t, err) - assert.True(t, IsNilHasherError(err)) - assert.False(t, valid, "verification should fail with nil hasher") - inputKmacs[0] = tmp - - // wrong key - tmpPK := inputPks[0] - inputPks[0] = invalidSK(t).PublicKey() - valid, err = VerifyBLSSignatureManyMessages(inputPks, aggSig, inputMsgs, inputKmacs) - assert.Error(t, err) - assert.True(t, IsNotBLSKeyError(err)) - assert.False(t, valid, "verification should fail with nil hasher") - inputPks[0] = tmpPK - }) -} - -// TestBLSErrorTypes verifies working of error-type-detecting functions -// such as `IsInvalidInputsError`. -func TestBLSErrorTypes(t *testing.T) { - t.Run("aggregateEmptyListError sanity", func(t *testing.T) { - err := blsAggregateEmptyListError - invInpError := invalidInputsErrorf("") - otherError := fmt.Errorf("some error") - assert.True(t, IsBLSAggregateEmptyListError(err)) - assert.False(t, IsInvalidInputsError(err)) - assert.False(t, IsBLSAggregateEmptyListError(invInpError)) - assert.False(t, IsBLSAggregateEmptyListError(otherError)) - assert.False(t, IsBLSAggregateEmptyListError(nil)) - }) - - t.Run("notBLSKeyError sanity", func(t *testing.T) { - err := notBLSKeyError - invInpError := invalidInputsErrorf("") - otherError := fmt.Errorf("some error") - assert.True(t, IsNotBLSKeyError(err)) - assert.False(t, IsInvalidInputsError(err)) - assert.False(t, IsNotBLSKeyError(invInpError)) - assert.False(t, IsNotBLSKeyError(otherError)) - assert.False(t, IsNotBLSKeyError(nil)) - }) -} - -// VerifyBLSSignatureManyMessages bench -// Bench the slowest case where all messages and public keys are distinct. -// (2*n) pairings without aggrgetion Vs (n+1) pairings with aggregation. -// The function is faster whenever there are redundant messages or public keys. -func BenchmarkVerifySignatureManyMessages(b *testing.B) { - // inputs - sigsNum := 100 - inputKmacs := make([]hash.Hasher, 0, sigsNum) - sigs := make([]Signature, 0, sigsNum) - pks := make([]PublicKey, 0, sigsNum) - inputMsgs := make([][]byte, 0, sigsNum) - kmac := NewExpandMsgXOFKMAC128("bench tag") - seed := make([]byte, KeyGenSeedMinLen) - - // create the signatures - for i := 0; i < sigsNum; i++ { - input := make([]byte, 100) - _, err := crand.Read(input) - require.NoError(b, err) - - _, err = crand.Read(seed) - require.NoError(b, err) - sk, err := GeneratePrivateKey(BLSBLS12381, seed) - require.NoError(b, err) - s, err := sk.Sign(input, kmac) - require.NoError(b, err) - sigs = append(sigs, s) - pks = append(pks, sk.PublicKey()) - inputKmacs = append(inputKmacs, kmac) - inputMsgs = append(inputMsgs, input) - } - aggSig, err := AggregateBLSSignatures(sigs) - require.NoError(b, err) - b.ResetTimer() - for i := 0; i < b.N; i++ { - _, err := VerifyBLSSignatureManyMessages(pks, aggSig, inputMsgs, inputKmacs) - require.NoError(b, err) - } - b.StopTimer() -} - -// Bench of all aggregation functions -func BenchmarkAggregate(b *testing.B) { - seed := make([]byte, KeyGenSeedMinLen) - // random message - input := make([]byte, 100) - _, _ = crand.Read(input) - // hasher - kmac := NewExpandMsgXOFKMAC128("bench tag") - sigsNum := 1000 - sigs := make([]Signature, 0, sigsNum) - sks := make([]PrivateKey, 0, sigsNum) - pks := make([]PublicKey, 0, sigsNum) - - // create the signatures - for i := 0; i < sigsNum; i++ { - _, err := crand.Read(seed) - require.NoError(b, err) - sk, err := GeneratePrivateKey(BLSBLS12381, seed) - require.NoError(b, err) - s, err := sk.Sign(input, kmac) - if err != nil { - b.Fatal() - } - sigs = append(sigs, s) - sks = append(sks, sk) - pks = append(pks, sk.PublicKey()) - } - - // private keys - b.Run("PrivateKeys", func(b *testing.B) { - b.ResetTimer() - for i := 0; i < b.N; i++ { - _, err := AggregateBLSPrivateKeys(sks) - require.NoError(b, err) - } - b.StopTimer() - }) - - // public keys - b.Run("PublicKeys", func(b *testing.B) { - b.ResetTimer() - for i := 0; i < b.N; i++ { - _, err := AggregateBLSPublicKeys(pks) - require.NoError(b, err) - } - b.StopTimer() - }) - - // signatures - b.Run("Signatures", func(b *testing.B) { - b.ResetTimer() - for i := 0; i < b.N; i++ { - _, err := AggregateBLSSignatures(sigs) - require.NoError(b, err) - } - b.StopTimer() - }) -} - -func TestBLSIdentity(t *testing.T) { - rand := getPRG(t) - - var identitySig []byte - msg := []byte("random_message") - hasher := NewExpandMsgXOFKMAC128("") - - t.Run("identity signature comparison", func(t *testing.T) { - // verify that constructed identity signatures are recognized as such by IsBLSSignatureIdentity. - // construct identity signature by summing (aggregating) a random signature and its inverse. - - assert.True(t, IsBLSSignatureIdentity(identityBLSSignature)) - - // sum up a random signature and its inverse to get identity - sk := randomSK(t, rand) - sig, err := sk.Sign(msg, hasher) - require.NoError(t, err) - oppositeSig := make([]byte, signatureLengthBLSBLS12381) - copy(oppositeSig, sig) - oppositeSig[0] ^= 0x20 // flip the last 3rd bit to flip the point sign - aggSig, err := AggregateBLSSignatures([]Signature{sig, oppositeSig}) - require.NoError(t, err) - assert.True(t, IsBLSSignatureIdentity(aggSig)) - }) - - t.Run("verification with identity key", func(t *testing.T) { - // all verification methods should return (false, nil) when verified against - // an identity public key. - idPk := IdentityBLSPublicKey() - valid, err := idPk.Verify(identitySig, msg, hasher) - assert.NoError(t, err) - assert.False(t, valid) - - valid, err = VerifyBLSSignatureOneMessage([]PublicKey{idPk}, identitySig, msg, hasher) - assert.NoError(t, err) - assert.False(t, valid) - - valid, err = VerifyBLSSignatureManyMessages([]PublicKey{idPk}, identitySig, [][]byte{msg}, []hash.Hasher{hasher}) - assert.NoError(t, err) - assert.False(t, valid) - - validSlice, err := BatchVerifyBLSSignaturesOneMessage([]PublicKey{idPk}, []Signature{identitySig}, msg, hasher) - assert.NoError(t, err) - assert.False(t, validSlice[0]) - - valid, err = BLSVerifyPOP(idPk, identitySig) - assert.NoError(t, err) - assert.False(t, valid) - }) -} diff --git a/crypto/bls_thresholdsign.go b/crypto/bls_thresholdsign.go deleted file mode 100644 index 4256af84ab9..00000000000 --- a/crypto/bls_thresholdsign.go +++ /dev/null @@ -1,607 +0,0 @@ -//go:build relic -// +build relic - -package crypto - -// #cgo CFLAGS: -g -Wall -std=c99 -// #include "bls_thresholdsign_include.h" -import "C" - -import ( - "fmt" - "sync" - - "github.com/onflow/flow-go/crypto/hash" -) - -// BLS-based threshold signature on BLS 12-381 curve -// The BLS settings are the same as in the signature -// scheme defined in the package. - -// A threshold signature scheme allows any subset of (t+1) -// valid signature shares to reconstruct the threshold signature. -// Up to (t) shares do not reveal any information about the threshold -// signature. -// Although the API allows using arbitrary values of (t), -// the threshold signature scheme is secure in the presence of up to (t) -// malicious participants when (t < n/2). -// In order to optimize equally for unforgeability and robustness, -// the input threshold value (t) should be set to t = floor((n-1)/2). - -// The package offers two api for BLS threshold signature: -// - stateful api where a structure holds all information -// of the threshold signature protocols and is recommended -// to be used for safety and to reduce protocol inconsistencies. -// - stateless api with signature reconstruction. Verifying and storing -// the signature shares has to be managed outside of the library. - -// blsThresholdSignatureParticipant implements ThresholdSignatureParticipant -// based on the BLS signature scheme -type blsThresholdSignatureParticipant struct { - // embed the follower - *blsThresholdSignatureInspector - // the index of the current participant - myIndex int - // the current participant private key (a threshold KG output) - myPrivateKey PrivateKey -} - -// blsThresholdSignatureInspector implements ThresholdSignatureInspector -// based on the BLS signature scheme -type blsThresholdSignatureInspector struct { - // size of the group - size int - // the threshold t of the scheme where (t+1) shares are - // required to reconstruct a signature - threshold int - // the group public key (a threshold KG output) - groupPublicKey PublicKey - // the group public key shares (a threshold KG output) - publicKeyShares []PublicKey - // the hasher to be used for all signatures - hasher hash.Hasher - // the message to be signed. Signature shares and the threshold signature - // are verified against this message - message []byte - // the valid signature shares received from other participants - shares map[index]Signature - // the threshold signature. It is equal to nil if less than (t+1) shares are - // received - thresholdSignature Signature - // lock for atomic operations - lock sync.RWMutex -} - -// NewBLSThresholdSignatureParticipant creates a new instance of Threshold signature Participant using BLS. -// A participant is able to participate in a threshold signing protocol as well as following the -// protocol. -// -// A new instance is needed for each set of public keys and message. -// If the key set or message change, a new structure needs to be instantiated. -// Participants are defined by their public key share, and are indexed from 0 to n-1. The current -// participant is indexed by `myIndex` and holds the input private key -// where n is the length of the public key shares slice. -// -// The function returns -// - (nil, invalidInputsError) if: -// - n is not in [`ThresholdSignMinSize`, `ThresholdSignMaxSize`] -// - threshold value is not in interval [1, n-1] -// - input private key and public key at my index do not match -// - (nil, notBLSKeyError) if the private or at least one public key is not of type BLS BLS12-381. -// - (pointer, nil) otherwise -func NewBLSThresholdSignatureParticipant( - groupPublicKey PublicKey, - sharePublicKeys []PublicKey, - threshold int, - myIndex int, - myPrivateKey PrivateKey, - message []byte, - dsTag string, -) (*blsThresholdSignatureParticipant, error) { - - size := len(sharePublicKeys) - if myIndex >= size || myIndex < 0 { - return nil, invalidInputsErrorf( - "the current index must be between 0 and %d, got %d", - size-1, myIndex) - } - - // check private key is BLS key - if _, ok := myPrivateKey.(*prKeyBLSBLS12381); !ok { - return nil, fmt.Errorf("private key of participant %d is not valid: %w", myIndex, notBLSKeyError) - } - - // create the follower - follower, err := NewBLSThresholdSignatureInspector(groupPublicKey, sharePublicKeys, threshold, message, dsTag) - if err != nil { - return nil, fmt.Errorf("create a threshold signature follower failed: %w", err) - } - - // check the private key, index and corresponding public key are consistent - currentPublicKey := sharePublicKeys[myIndex] - if !myPrivateKey.PublicKey().Equals(currentPublicKey) { - return nil, invalidInputsErrorf("private key is not matching public key at index %d", myIndex) - } - - return &blsThresholdSignatureParticipant{ - blsThresholdSignatureInspector: follower, - myIndex: myIndex, // current participant index - myPrivateKey: myPrivateKey, // myPrivateKey is the current participant's own private key share - }, nil -} - -// NewBLSThresholdSignatureInspector creates a new instance of Threshold signature follower using BLS. -// It only allows following the threshold signing protocol . -// -// A new instance is needed for each set of public keys and message. -// If the key set or message change, a new structure needs to be instantiated. -// Participants are defined by their public key share, and are indexed from 0 to n-1 -// where n is the length of the public key shares slice. -// -// The function returns -// - (nil, invalidInputsError) if: -// - n is not in [`ThresholdSignMinSize`, `ThresholdSignMaxSize`] -// - threshold value is not in interval [1, n-1] -// - (nil, notBLSKeyError) at least one public key is not of type pubKeyBLSBLS12381 -// - (pointer, nil) otherwise -func NewBLSThresholdSignatureInspector( - groupPublicKey PublicKey, - sharePublicKeys []PublicKey, - threshold int, - message []byte, - dsTag string, -) (*blsThresholdSignatureInspector, error) { - - size := len(sharePublicKeys) - if size < ThresholdSignMinSize || size > ThresholdSignMaxSize { - return nil, invalidInputsErrorf( - "size should be between %d and %d, got %d", - ThresholdSignMinSize, ThresholdSignMaxSize, size) - } - if threshold >= size || threshold < MinimumThreshold { - return nil, invalidInputsErrorf( - "the threshold must be between %d and %d, got %d", - MinimumThreshold, size-1, threshold) - } - - // check keys are BLS keys - for i, pk := range sharePublicKeys { - if _, ok := pk.(*pubKeyBLSBLS12381); !ok { - return nil, fmt.Errorf("key at index %d is invalid: %w", i, notBLSKeyError) - } - } - if _, ok := groupPublicKey.(*pubKeyBLSBLS12381); !ok { - return nil, fmt.Errorf("group key is invalid: %w", notBLSKeyError) - } - - return &blsThresholdSignatureInspector{ - size: size, - threshold: threshold, - message: message, - hasher: NewExpandMsgXOFKMAC128(dsTag), - shares: make(map[index]Signature), - thresholdSignature: nil, - groupPublicKey: groupPublicKey, // groupPublicKey is the group public key corresponding to the group secret key - publicKeyShares: sharePublicKeys, // sharePublicKeys are the public key shares corresponding to the private key shares - }, nil -} - -// SignShare generates a signature share using the current private key share. -// -// The function does not add the share to the internal pool of shares and do -// not update the internal state. -// This function is thread safe and non-blocking -// -// The function returns -// - (nil, error) if an unexpected error occurs -// - (signature, nil) otherwise -func (s *blsThresholdSignatureParticipant) SignShare() (Signature, error) { - share, err := s.myPrivateKey.Sign(s.message, s.hasher) - if err != nil { - return nil, fmt.Errorf("share signing failed: %w", err) - } - return share, nil -} - -// validIndex returns invalidInputsError error if given index is valid and nil otherwise. -// This function is thread safe. -func (s *blsThresholdSignatureInspector) validIndex(orig int) error { - if orig >= s.size || orig < 0 { - return invalidInputsErrorf( - "origin input is invalid, should be positive less than %d, got %d", - s.size, orig) - } - return nil -} - -// VerifyShare verifies the input signature against the stored message and stored -// key at the input index. -// -// This function does not update the internal state and is thread-safe. -// Returns: -// - (true, nil) if the signature is valid -// - (false, nil) if `orig` is valid but the signature share does not verify against -// the public key share and message. -// - (false, invalidInputsError) if `orig` is an invalid index value -// - (false, error) for all other unexpected errors -func (s *blsThresholdSignatureInspector) VerifyShare(orig int, share Signature) (bool, error) { - // validate index - if err := s.validIndex(orig); err != nil { - return false, err - } - return s.publicKeyShares[orig].Verify(share, s.message, s.hasher) -} - -// VerifyThresholdSignature verifies the input signature against the stored -// message and stored group public key. -// -// This function does not update the internal state and is thread-safe. -// Returns: -// - (true, nil) if the signature is valid -// - (false, nil) if signature is invalid -// - (false, error) for all other unexpected errors -func (s *blsThresholdSignatureInspector) VerifyThresholdSignature(thresholdSignature Signature) (bool, error) { - return s.groupPublicKey.Verify(thresholdSignature, s.message, s.hasher) -} - -// EnoughShares indicates whether enough shares have been accumulated in order to reconstruct -// a group signature. -// -// This function is thread safe. -// Returns: -// - true if and only if at least (threshold+1) shares were added -func (s *blsThresholdSignatureInspector) EnoughShares() bool { - s.lock.RLock() - defer s.lock.RUnlock() - - return s.enoughShares() -} - -// non thread safe version of EnoughShares -func (s *blsThresholdSignatureInspector) enoughShares() bool { - // len(s.signers) is always <= s.threshold + 1 - return len(s.shares) == (s.threshold + 1) -} - -// HasShare checks whether the internal map contains the share of the given index. -// This function is thread safe and locks the internal state. -// The function returns: -// - (false, invalidInputsError) if the index is invalid -// - (false, nil) if index is valid and share is not in the map -// - (true, nil) if index is valid and share is in the map -func (s *blsThresholdSignatureInspector) HasShare(orig int) (bool, error) { - // validate index - if err := s.validIndex(orig); err != nil { - return false, err - } - - s.lock.RLock() - defer s.lock.RUnlock() - - return s.hasShare(index(orig)), nil -} - -// non thread safe version of HasShare, and assumes input is valid -func (s *blsThresholdSignatureInspector) hasShare(orig index) bool { - _, ok := s.shares[orig] - return ok -} - -// TrustedAdd adds a signature share to the internal pool of shares -// without verifying the signature against the message and the participant's -// public key. This function is thread safe and locks the internal state. -// -// The share is only added if the signer index is valid and has not been -// added yet. Moreover, the share is added only if not enough shares were collected. -// The function returns: -// - (true, nil) if enough signature shares were already collected and no error occurred -// - (false, nil) if not enough shares were collected and no error occurred -// - (false, invalidInputsError) if index is invalid -// - (false, duplicatedSignerError) if a signature for the index was previously added -func (s *blsThresholdSignatureInspector) TrustedAdd(orig int, share Signature) (bool, error) { - // validate index - if err := s.validIndex(orig); err != nil { - return false, err - } - - s.lock.Lock() - defer s.lock.Unlock() - - if s.hasShare(index(orig)) { - return false, duplicatedSignerErrorf("share for %d was already added", orig) - } - - if s.enoughShares() { - return true, nil - } - s.shares[index(orig)] = share - return s.enoughShares(), nil -} - -// VerifyAndAdd verifies a signature share (same as `VerifyShare`), -// and may or may not add the share to the local pool of shares. -// This function is thread safe and locks the internal state. -// -// The share is only added if the signature is valid, the signer index is valid and has not been -// added yet. Moreover, the share is added only if not enough shares were collected. -// Boolean returns: -// - First boolean output is true if the share is valid and no error is returned, and false otherwise. -// - Second boolean output is true if enough shares were collected and no error is returned, and false otherwise. -// -// Error returns: -// - invalidInputsError if input index is invalid. A signature that doesn't verify against the signer's -// public key is not considered an invalid input. -// - duplicatedSignerError if signer was already added. -// - other errors if an unexpected exception occurred. -func (s *blsThresholdSignatureInspector) VerifyAndAdd(orig int, share Signature) (bool, bool, error) { - // validate index - if err := s.validIndex(orig); err != nil { - return false, false, err - } - - s.lock.Lock() - defer s.lock.Unlock() - - // check share is new - if s.hasShare(index(orig)) { - return false, false, duplicatedSignerErrorf("share for %d was already added", orig) - } - - // verify the share - verif, err := s.publicKeyShares[index(orig)].Verify(share, s.message, s.hasher) - if err != nil { - return false, false, fmt.Errorf("verification of share failed: %w", err) - } - - enough := s.enoughShares() - if verif && !enough { - s.shares[index(orig)] = share - } - return verif, s.enoughShares(), nil -} - -// ThresholdSignature returns the threshold signature if the threshold was reached. -// The threshold signature is reconstructed only once is cached for subsequent calls. -// -// The function is thread-safe. -// Returns: -// - (signature, nil) if no error occurred -// - (nil, notEnoughSharesError) if not enough shares were collected -// - (nil, invalidSignatureError) if at least one collected share does not serialize to a valid BLS signature. -// - (nil, invalidInputsError) if the constructed signature failed to verify against the group public key and stored -// message. This post-verification is required for safety, as `TrustedAdd` allows adding invalid signatures. -// - (nil, error) for any other unexpected error. -func (s *blsThresholdSignatureInspector) ThresholdSignature() (Signature, error) { - s.lock.Lock() - defer s.lock.Unlock() - - // check cached thresholdSignature - if s.thresholdSignature != nil { - return s.thresholdSignature, nil - } - - // reconstruct the threshold signature - thresholdSignature, err := s.reconstructThresholdSignature() - if err != nil { - return nil, err - } - s.thresholdSignature = thresholdSignature - return thresholdSignature, nil -} - -// reconstructThresholdSignature reconstructs the threshold signature from at least (t+1) shares. -// Returns: -// - (signature, nil) if no error occurred -// - (nil, notEnoughSharesError) if not enough shares were collected -// - (nil, invalidSignatureError) if at least one collected share does not serialize to a valid BLS signature. -// - (nil, invalidInputsError) if the constructed signature failed to verify against the group public key and stored message. -// - (nil, error) for any other unexpected error. -func (s *blsThresholdSignatureInspector) reconstructThresholdSignature() (Signature, error) { - - if !s.enoughShares() { - return nil, notEnoughSharesErrorf("number of signature shares %d is not enough, %d are required", - len(s.shares), s.threshold+1) - } - thresholdSignature := make([]byte, signatureLengthBLSBLS12381) - - // prepare the C layer inputs - shares := make([]byte, 0, len(s.shares)*signatureLengthBLSBLS12381) - signers := make([]index, 0, len(s.shares)) - for index, share := range s.shares { - shares = append(shares, share...) - signers = append(signers, index) - } - - // set BLS settings - blsInstance.reInit() - - // Lagrange Interpolate at point 0 - result := C.G1_lagrangeInterpolateAtZero( - (*C.uchar)(&thresholdSignature[0]), - (*C.uchar)(&shares[0]), - (*C.uint8_t)(&signers[0]), (C.int)(s.threshold+1)) - - if result != valid { - return nil, invalidSignatureError - } - - // Verify the computed signature - verif, err := s.VerifyThresholdSignature(thresholdSignature) - if err != nil { - return nil, fmt.Errorf("internal error while verifying the threshold signature: %w", err) - } - if !verif { - return nil, invalidInputsErrorf( - "constructed threshold signature does not verify against the group public key, check shares and public key") - } - - return thresholdSignature, nil -} - -// BLSReconstructThresholdSignature is a stateless BLS api that takes a list of -// BLS signatures and their signers' indices and returns the threshold signature. -// -// size is the number of participants, it must be in the range [ThresholdSignMinSize..ThresholdSignMaxSize]. -// threshold is the threshold value, it must be in the range [MinimumThreshold..size-1]. -// The function does not check the validity of the shares, and does not check -// the validity of the resulting signature. -// BLSReconstructThresholdSignature returns: -// - (nil, error) if the inputs are not in the correct range, if the threshold is not reached -// - (nil, duplicatedSignerError) if input signers are not distinct. -// - (nil, invalidSignatureError) if at least one of the first (threshold+1) signatures. -// does not serialize to a valid E1 point. -// - (threshold_sig, nil) otherwise. -// -// If the number of shares reaches the required threshold, only the first threshold+1 shares -// are considered to reconstruct the signature. -func BLSReconstructThresholdSignature(size int, threshold int, - shares []Signature, signers []int) (Signature, error) { - // set BLS settings - blsInstance.reInit() - - if size < ThresholdSignMinSize || size > ThresholdSignMaxSize { - return nil, invalidInputsErrorf( - "size should be between %d and %d", - ThresholdSignMinSize, - ThresholdSignMaxSize) - } - if threshold >= size || threshold < MinimumThreshold { - return nil, invalidInputsErrorf( - "the threshold must be between %d and %d, got %d", - MinimumThreshold, size-1, - threshold) - } - - if len(shares) != len(signers) { - return nil, invalidInputsErrorf( - "the number of signature shares is not matching the number of signers") - } - - if len(shares) < threshold+1 { - return nil, invalidInputsErrorf( - "the number of signatures does not reach the threshold") - } - - // map to check signers are distinct - m := make(map[index]bool) - - // flatten the shares (required by the C layer) - flatShares := make([]byte, 0, signatureLengthBLSBLS12381*(threshold+1)) - indexSigners := make([]index, 0, threshold+1) - for i, share := range shares { - flatShares = append(flatShares, share...) - // check the index is valid - if signers[i] >= size || signers[i] < 0 { - return nil, invalidInputsErrorf( - "signer index #%d is invalid", i) - } - // check the index is new - if _, isSeen := m[index(signers[i])]; isSeen { - return nil, duplicatedSignerErrorf( - "%d is a duplicate signer", index(signers[i])) - } - m[index(signers[i])] = true - indexSigners = append(indexSigners, index(signers[i])) - } - - thresholdSignature := make([]byte, signatureLengthBLSBLS12381) - // Lagrange Interpolate at point 0 - if C.G1_lagrangeInterpolateAtZero( - (*C.uchar)(&thresholdSignature[0]), - (*C.uchar)(&flatShares[0]), - (*C.uint8_t)(&indexSigners[0]), (C.int)(threshold+1), - ) != valid { - return nil, invalidSignatureError - } - return thresholdSignature, nil -} - -// EnoughShares is a stateless function that takes the value of the threshold -// and a shares number and returns true if the shares number is enough -// to reconstruct a threshold signature. -// -// The function returns: -// - (false, invalidInputsErrorf) if input threshold is less than 1 -// - (false, nil) if threshold is valid but shares are not enough. -// - (true, nil) if the threshold is valid but shares are enough. -func EnoughShares(threshold int, sharesNumber int) (bool, error) { - if threshold < MinimumThreshold { - return false, invalidInputsErrorf( - "the threshold can't be smaller than %d, got %d", - MinimumThreshold, threshold) - } - return sharesNumber > threshold, nil -} - -// BLSThresholdKeyGen is a key generation for a BLS-based -// threshold signature scheme with a trusted dealer. -// -// The function returns : -// - (nil, nil, nil, invalidInputsErrorf) if: -// - n is not in [`ThresholdSignMinSize`, `ThresholdSignMaxSize`] -// - threshold value is not in interval [1, n-1] -// - (groupPrivKey, []pubKeyShares, groupPubKey, nil) otherwise -func BLSThresholdKeyGen(size int, threshold int, seed []byte) ([]PrivateKey, - []PublicKey, PublicKey, error) { - if size < ThresholdSignMinSize || size > ThresholdSignMaxSize { - return nil, nil, nil, invalidInputsErrorf( - "size should be between %d and %d, got %d", - ThresholdSignMinSize, - ThresholdSignMaxSize, - size) - } - if threshold >= size || threshold < MinimumThreshold { - return nil, nil, nil, invalidInputsErrorf( - "the threshold must be between %d and %d, got %d", - MinimumThreshold, - size-1, - threshold) - } - - // set BLS settings - blsInstance.reInit() - - // the scalars x and G2 points y - x := make([]scalar, size) - y := make([]pointG2, size) - var X0 pointG2 - - // seed relic - if err := seedRelic(seed); err != nil { - return nil, nil, nil, fmt.Errorf("seeding relic failed: %w", err) - } - // Generate a polynomial P in Zr[X] of degree t - a := make([]scalar, threshold+1) - randZrStar(&a[0]) // non-identity key - if threshold > 0 { - for i := 1; i < threshold; i++ { - randZr(&a[i]) - } - randZrStar(&a[threshold]) // enforce the polynomial degree - } - // compute the shares - for i := index(1); int(i) <= size; i++ { - C.Zr_polynomialImage( - (*C.bn_st)(&x[i-1]), - (*C.ep2_st)(&y[i-1]), - (*C.bn_st)(&a[0]), (C.int)(len(a)), - (C.uint8_t)(i), - ) - } - // group public key - generatorScalarMultG2(&X0, &a[0]) - // export the keys - skShares := make([]PrivateKey, size) - pkShares := make([]PublicKey, size) - var pkGroup PublicKey - for i := 0; i < size; i++ { - skShares[i] = newPrKeyBLSBLS12381(&x[i]) - pkShares[i] = newPubKeyBLSBLS12381(&y[i]) - } - pkGroup = newPubKeyBLSBLS12381(&X0) - - // public key shares and group public key - // are sampled uniformly at random. The probability of - // generating an identity key is therefore negligible. - return skShares, pkShares, pkGroup, nil -} diff --git a/crypto/bls_thresholdsign_core.c b/crypto/bls_thresholdsign_core.c deleted file mode 100644 index dc57355df47..00000000000 --- a/crypto/bls_thresholdsign_core.c +++ /dev/null @@ -1,123 +0,0 @@ -// +build relic - -#include "bls_thresholdsign_include.h" - -// Computes the Lagrange coefficient L(i+1) at 0 with regards to the range [signers(0)+1..signers(t)+1] -// and stores it in res, where t is the degree of the polynomial P -static void Zr_lagrangeCoefficientAtZero(bn_t res, const int i, const uint8_t* signers, const int len){ - // r is the order of G1 and G2 - bn_t r, r_2; - bn_new(r); - g2_get_ord(r); - // (r-2) is needed to compute the inverse in Zr - // using little Fermat theorem - bn_new(r_2); - bn_sub_dig(r_2, r, 2); - //#define MOD_METHOD MONTY - #define MOD_METHOD BASIC - - #if MOD_METHOD == MONTY - bn_t u; - bn_new(u) - // Montgomery reduction constant - // TODO: hardcode u - bn_mod_pre_monty(u, r); - #endif - - // temp buffers - bn_t acc, inv, base, numerator; - bn_new(inv); - bn_new(base); - bn_new_size(base, BITS_TO_DIGITS(Fr_BITS)) - bn_new(acc); - bn_new(numerator); - bn_new_size(acc, BITS_TO_DIGITS(3*Fr_BITS)); - - // the accumulator of the largarnge coeffiecient - // the sign (sign of acc) is equal to 1 if acc is positive, 0 otherwise - bn_set_dig(acc, 1); - int sign = 1; - - // loops is the maximum number of loops that takes the accumulator to - // overflow modulo r, mainly the highest k such that fact(MAX_IND)/fact(MAX_IND-k) < r - const int loops = MAX_IND_LOOPS; - int k,j = 0; - while (j<len) { - bn_set_dig(base, 1); - bn_set_dig(numerator, 1); - for (k = j; j < MIN(len, k+loops); j++){ - if (signers[j]==i) - continue; - if (signers[j]<i) - sign ^= 1; - bn_mul_dig(base, base, abs((int)signers[j]-i)); - bn_mul_dig(numerator, numerator, signers[j]+1); - } - // compute the inverse using little Fermat theorem - bn_mxp_slide(inv, base, r_2, r); - #if MOD_METHOD == MONTY - // convert to Montgomery domain - bn_mod_monty_conv(inv, inv, r); - bn_mod_monty_conv(numerator, numerator, r); - bn_mod_monty_conv(acc, acc, r); - // multiply - bn_mul(acc, acc, inv); - bn_mod_monty(acc, acc, r, u); - bn_mul(acc, acc, numerator); - bn_mod_monty(acc, acc, r, u); - bn_mod_monty_back(acc, acc, r); - #elif MOD_METHOD == BASIC - bn_mul(acc, acc, inv); - bn_mul(acc, acc, numerator); - bn_mod_basic(acc, acc, r); - #endif - } - if (sign) bn_copy(res, acc); - else bn_sub(res, r, acc); - - // free the temp memory - bn_free(r);bn_free(r_1); - #if MOD_METHOD == MONTY - bn_free(&u); - #endif - bn_free(acc); - bn_free(inv);bn_free(base); - bn_free(numerator); -} - - -// Computes the Langrange interpolation at zero LI(0) with regards to the points [signers(1)+1..signers(t+1)+1] -// and their images [shares(1)..shares(t+1)], and stores the result in dest -// len is the polynomial degree -int G1_lagrangeInterpolateAtZero(byte* dest, const byte* shares, const uint8_t* signers, const int len) { - // computes Q(x) = A_0 + A_1*x + ... + A_n*x^n in G2 - // powers of x - bn_t bn_lagr_coef; - bn_new(bn_lagr_coef); - bn_new_size(bn_lagr_coef, BITS_TO_BYTES(Fr_BITS)); - - // temp variables - ep_t mult, acc, share; - ep_new(mult); - ep_new(acc); - ep_new(share); - ep_set_infty(acc); - - for (int i=0; i < len; i++) { - int read_ret = ep_read_bin_compact(share, &shares[SIGNATURE_LEN*i], SIGNATURE_LEN); - if (read_ret != RLC_OK) - return read_ret; - Zr_lagrangeCoefficientAtZero(bn_lagr_coef, signers[i], signers, len); - ep_mul_lwnaf(mult, share, bn_lagr_coef); - ep_add_jacob(acc, acc, mult); - } - // export the result - ep_write_bin_compact(dest, acc, SIGNATURE_LEN); - - // free the temp memory - ep2_free(acc); - ep2_free(mult); - ep2_free(share); - bn_free(bn_lagr_coef); - return VALID; -} diff --git a/crypto/bls_thresholdsign_include.h b/crypto/bls_thresholdsign_include.h deleted file mode 100644 index 7471e1a0a3d..00000000000 --- a/crypto/bls_thresholdsign_include.h +++ /dev/null @@ -1,15 +0,0 @@ -// +build relic - -#ifndef _REL_THRESHOLD_INCLUDE_H -#define _REL_THRESHOLD_INCLUDE_H - -#include "bls_include.h" - -// the highest k such that fact(MAX_IND)/fact(MAX_IND-k) < r -// (approximately Fr_bits/MAX_IND_BITS) -#define MAX_IND_LOOPS 32 - -int G1_lagrangeInterpolateAtZero(byte*, const byte* , const uint8_t*, const int); -extern void Zr_polynomialImage(bn_t out, ep2_t y, const bn_st* a, const int a_size, const byte x); - -#endif diff --git a/crypto/bls_thresholdsign_test.go b/crypto/bls_thresholdsign_test.go deleted file mode 100644 index 52e14785c9d..00000000000 --- a/crypto/bls_thresholdsign_test.go +++ /dev/null @@ -1,619 +0,0 @@ -//go:build relic -// +build relic - -package crypto - -import ( - crand "crypto/rand" - "fmt" - "sync" - "testing" - "time" - - log "github.com/sirupsen/logrus" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -func TestBLSThresholdSignature(t *testing.T) { - // stateless API - t.Run("centralized_stateless_keygen", testCentralizedStatelessAPI) - // stateful API - t.Run("centralized_stateful_keygen", testCentralizedStatefulAPI) - t.Run("distributed_stateful_feldmanVSS_keygen", testDistributedStatefulAPI_FeldmanVSS) - t.Run("distributed_stateful_jointFeldman_keygen", testDistributedStatefulAPI_JointFeldman) // Flow Random beacon case -} - -const thresholdSignatureTag = "random tag" - -var thresholdSignatureMessage = []byte("random message") - -// centralized test of the stateful threshold signature using the threshold key generation. -func testCentralizedStatefulAPI(t *testing.T) { - n := 10 - for threshold := MinimumThreshold; threshold < n; threshold++ { - // generate threshold keys - rand := getPRG(t) - seed := make([]byte, SeedMinLenDKG) - _, err := rand.Read(seed) - require.NoError(t, err) - skShares, pkShares, pkGroup, err := BLSThresholdKeyGen(n, threshold, seed) - require.NoError(t, err) - // generate signature shares - signers := make([]int, 0, n) - // hasher - kmac := NewExpandMsgXOFKMAC128(thresholdSignatureTag) - // fill the signers list and shuffle it - for i := 0; i < n; i++ { - signers = append(signers, i) - } - rand.Shuffle(n, func(i, j int) { - signers[i], signers[j] = signers[j], signers[i] - }) - - t.Run("happy path", func(t *testing.T) { - // create the stateful threshold signer - ts, err := NewBLSThresholdSignatureInspector(pkGroup, pkShares, threshold, thresholdSignatureMessage, thresholdSignatureTag) - require.NoError(t, err) - - // check EnoughShares - enough := ts.EnoughShares() - assert.False(t, enough) - var wg sync.WaitGroup - // create (t) signatures of the first randomly chosen signers - // ( 1 signature short of the threshold) - for j := 0; j < threshold; j++ { - wg.Add(1) - // test thread safety - go func(j int) { - defer wg.Done() - i := signers[j] - share, err := skShares[i].Sign(thresholdSignatureMessage, kmac) - require.NoError(t, err) - // VerifyShare - verif, err := ts.VerifyShare(i, share) - assert.NoError(t, err) - assert.True(t, verif, "signature should be valid") - // check HasSignature is false - ok, err := ts.HasShare(i) - assert.NoError(t, err) - assert.False(t, ok) - // TrustedAdd - enough, err := ts.TrustedAdd(i, share) - assert.NoError(t, err) - assert.False(t, enough) - // check HasShare is true - ok, err = ts.HasShare(i) - assert.NoError(t, err) - assert.True(t, ok) - // check EnoughSignature - assert.False(t, ts.EnoughShares(), "threshold shouldn't be reached") - // check ThresholdSignature - sig, err := ts.ThresholdSignature() - assert.Error(t, err) - assert.True(t, IsNotEnoughSharesError(err)) - assert.Nil(t, sig) - }(j) - } - wg.Wait() - // add the last required signature to get (t+1) shares - i := signers[threshold] - share, err := skShares[i].Sign(thresholdSignatureMessage, kmac) - require.NoError(t, err) - verif, enough, err := ts.VerifyAndAdd(i, share) - assert.NoError(t, err) - assert.True(t, verif) - assert.True(t, enough) - // check EnoughSignature - assert.True(t, ts.EnoughShares()) - - // add a share when threshold is reached - if threshold+1 < n { - i := signers[threshold+1] - share, err := skShares[i].Sign(thresholdSignatureMessage, kmac) - require.NoError(t, err) - // Trusted Add - enough, err := ts.TrustedAdd(i, share) - assert.NoError(t, err) - assert.True(t, enough) - // VerifyAndAdd - verif, enough, err := ts.VerifyAndAdd(i, share) - assert.NoError(t, err) - assert.True(t, verif) - assert.True(t, enough) - } - // reconstruct the threshold signature - thresholdsignature, err := ts.ThresholdSignature() - require.NoError(t, err) - // VerifyThresholdSignature - verif, err = ts.VerifyThresholdSignature(thresholdsignature) - require.NoError(t, err) - assert.True(t, verif) - }) - - t.Run("duplicate signer", func(t *testing.T) { - // create the stateful threshold signer - ts, err := NewBLSThresholdSignatureInspector(pkGroup, pkShares, threshold, thresholdSignatureMessage, thresholdSignatureTag) - require.NoError(t, err) - - // Create a share and add it - i := rand.Intn(n) - share, err := skShares[i].Sign(thresholdSignatureMessage, kmac) - require.NoError(t, err) - enough, err := ts.TrustedAdd(i, share) - assert.NoError(t, err) - assert.False(t, enough) - - // Add an existing share - - // VerifyAndAdd - verif, enough, err := ts.VerifyAndAdd(i, share) - assert.Error(t, err) - assert.True(t, IsDuplicatedSignerError(err)) - assert.False(t, verif) - assert.False(t, enough) - // TrustedAdd - enough, err = ts.TrustedAdd(i, share) - assert.Error(t, err) - assert.True(t, IsDuplicatedSignerError(err)) - assert.False(t, enough) - }) - - t.Run("Invalid index", func(t *testing.T) { - // create the stateful threshold signer - ts, err := NewBLSThresholdSignatureInspector(pkGroup, pkShares, threshold, thresholdSignatureMessage, thresholdSignatureTag) - require.NoError(t, err) - - share, err := skShares[0].Sign(thresholdSignatureMessage, kmac) - require.NoError(t, err) - // invalid index - invalidIndex := len(pkShares) + 1 - // VerifyShare - verif, err := ts.VerifyShare(invalidIndex, share) - assert.Error(t, err) - assert.True(t, IsInvalidInputsError(err)) - assert.False(t, verif) - // TrustedAdd - enough, err := ts.TrustedAdd(invalidIndex, share) - assert.Error(t, err) - assert.True(t, IsInvalidInputsError(err)) - assert.False(t, enough) - // VerifyAndAdd - verif, enough, err = ts.VerifyAndAdd(invalidIndex, share) - assert.Error(t, err) - assert.True(t, IsInvalidInputsError(err)) - assert.False(t, verif) - assert.False(t, enough) - // HasShare - verif, err = ts.HasShare(invalidIndex) - assert.Error(t, err) - assert.True(t, IsInvalidInputsError(err)) - assert.False(t, verif) - }) - - t.Run("invalid signature", func(t *testing.T) { - index := signers[0] - ts, err := NewBLSThresholdSignatureInspector(pkGroup, pkShares, threshold, thresholdSignatureMessage, thresholdSignatureTag) - require.NoError(t, err) - share, err := skShares[index].Sign(thresholdSignatureMessage, kmac) - require.NoError(t, err) - - // alter signature - invalid serialization - tmp := share[0] - share[0] = invalidBLSSignatureHeader - // VerifyShare - verif, err := ts.VerifyShare(index, share) - assert.NoError(t, err) - assert.False(t, verif) - // VerifyAndAdd - verif, enough, err := ts.VerifyAndAdd(index, share) - assert.NoError(t, err) - assert.False(t, verif) - assert.False(t, enough) - // check share was not added - verif, err = ts.HasShare(index) - assert.NoError(t, err) - assert.False(t, verif) - // restore share - share[0] = tmp - - // valid curve point but invalid signature - otherIndex := (index + 1) % n // otherIndex is different than index - // VerifyShare - verif, err = ts.VerifyShare(otherIndex, share) - assert.NoError(t, err) - assert.False(t, verif) - // VerifyAndAdd - verif, enough, err = ts.VerifyAndAdd(otherIndex, share) - assert.NoError(t, err) - assert.False(t, verif) - assert.False(t, enough) - // check share was not added - verif, err = ts.HasShare(otherIndex) - assert.NoError(t, err) - assert.False(t, verif) - - // trust add one invalid signature and check ThresholdSignature - tmp = share[0] - share[0] = invalidBLSSignatureHeader // alter the share - enough, err = ts.TrustedAdd(index, share) // invalid share - assert.NoError(t, err) - assert.False(t, enough) - for i := 1; i < threshold+1; i++ { // valid shares - index := signers[i] - valid, err := skShares[index].Sign(thresholdSignatureMessage, kmac) - require.NoError(t, err) - enough, err = ts.TrustedAdd(index, valid) - assert.NoError(t, err) - if i < threshold { - assert.False(t, enough) - } else { - assert.True(t, enough) - } - } - sig, err := ts.ThresholdSignature() - assert.Error(t, err) - assert.True(t, IsInvalidSignatureError(err)) - assert.Nil(t, sig) - share[0] = tmp // restore the share - }) - - t.Run("constructor errors", func(t *testing.T) { - // invalid keys size - index := rand.Intn(n) - pkSharesInvalid := make([]PublicKey, ThresholdSignMaxSize+1) - tsFollower, err := NewBLSThresholdSignatureInspector(pkGroup, pkSharesInvalid, threshold, thresholdSignatureMessage, thresholdSignatureTag) - assert.Error(t, err) - assert.True(t, IsInvalidInputsError(err)) - assert.Nil(t, tsFollower) - // non BLS key share - seed := make([]byte, KeyGenSeedMinLen) - _, err = rand.Read(seed) - require.NoError(t, err) - skEcdsa, err := GeneratePrivateKey(ECDSAP256, seed) - require.NoError(t, err) - tmp := pkShares[0] - pkShares[0] = skEcdsa.PublicKey() - tsFollower, err = NewBLSThresholdSignatureInspector(pkGroup, pkShares, threshold, thresholdSignatureMessage, thresholdSignatureTag) - assert.Error(t, err) - assert.True(t, IsNotBLSKeyError(err)) - assert.Nil(t, tsFollower) - pkShares[0] = tmp // restore valid keys - // non BLS group key - tsFollower, err = NewBLSThresholdSignatureInspector(skEcdsa.PublicKey(), pkShares, threshold, thresholdSignatureMessage, thresholdSignatureTag) - assert.Error(t, err) - assert.True(t, IsNotBLSKeyError(err)) - assert.Nil(t, tsFollower) - // non BLS private key - tsParticipant, err := NewBLSThresholdSignatureParticipant(pkGroup, pkShares, threshold, index, skEcdsa, thresholdSignatureMessage, thresholdSignatureTag) - assert.Error(t, err) - assert.True(t, IsNotBLSKeyError(err)) - assert.Nil(t, tsParticipant) - // invalid current index - tsParticipant, err = NewBLSThresholdSignatureParticipant(pkGroup, pkShares, threshold, len(pkShares)+1, skShares[index], thresholdSignatureMessage, thresholdSignatureTag) - assert.Error(t, err) - assert.True(t, IsInvalidInputsError(err)) - assert.Nil(t, tsParticipant) - // invalid threshold - tsFollower, err = NewBLSThresholdSignatureInspector(pkGroup, pkShares, len(pkShares)+1, thresholdSignatureMessage, thresholdSignatureTag) - assert.Error(t, err) - assert.True(t, IsInvalidInputsError(err)) - assert.Nil(t, tsFollower) - // inconsistent private and public key - indexSwap := (index + 1) % n // indexSwap is different than index - pkShares[index], pkShares[indexSwap] = pkShares[indexSwap], pkShares[index] - tsParticipant, err = NewBLSThresholdSignatureParticipant(pkGroup, pkShares, len(pkShares)+1, index, skShares[index], thresholdSignatureMessage, thresholdSignatureTag) - assert.Error(t, err) - assert.True(t, IsInvalidInputsError(err)) - assert.Nil(t, tsParticipant) - pkShares[index], pkShares[indexSwap] = pkShares[indexSwap], pkShares[index] // restore keys - }) - } -} - -// Distributed Threshold Signature stateful api test -// keys are generated using simple Feldman VSS -func testDistributedStatefulAPI_FeldmanVSS(t *testing.T) { - log.SetLevel(log.ErrorLevel) - log.Info("DKG starts") - gt = t - rand := getPRG(t) - // number of participants to test - n := 5 - lead := rand.Intn(n) // random - var sync sync.WaitGroup - chans := make([]chan *message, n) - processors := make([]testDKGProcessor, 0, n) - - // create n processors for all participants - for current := 0; current < n; current++ { - processors = append(processors, testDKGProcessor{ - current: current, - chans: chans, - protocol: dkgType, - }) - // create DKG in all participants - var err error - processors[current].dkg, err = NewFeldmanVSS(n, optimalThreshold(n), - current, &processors[current], lead) - require.NoError(t, err) - } - - // create the participant (buffered) communication channels - for i := 0; i < n; i++ { - chans[i] = make(chan *message, 2*n) - } - // start DKG in all participants - seed := make([]byte, SeedMinLenDKG) - read, err := rand.Read(seed) - require.Equal(t, read, SeedMinLenDKG) - require.NoError(t, err) - sync.Add(n) - for current := 0; current < n; current++ { - err := processors[current].dkg.Start(seed) - require.NoError(t, err) - go tsDkgRunChan(&processors[current], &sync, t, 2) - } - - // synchronize the main thread to end DKG - sync.Wait() - for i := 1; i < n; i++ { - assert.True(t, processors[i].pk.Equals(processors[0].pk), "2 group public keys are mismatching") - } - - // Start TS - log.Info("TS starts") - sync.Add(n) - for i := 0; i < n; i++ { - go tsRunChan(&processors[i], &sync, t) - } - // synchronize the main thread to end TS - sync.Wait() -} - -// Distributed Threshold Signature stateful api test -// keys are generated using Joint-Feldman -func testDistributedStatefulAPI_JointFeldman(t *testing.T) { - log.SetLevel(log.ErrorLevel) - log.Info("DKG starts") - gt = t - rand := getPRG(t) - // number of participants to test - n := 5 - for threshold := MinimumThreshold; threshold < n; threshold++ { - var sync sync.WaitGroup - chans := make([]chan *message, n) - processors := make([]testDKGProcessor, 0, n) - - // create n processors for all participants - for current := 0; current < n; current++ { - processors = append(processors, testDKGProcessor{ - current: current, - chans: chans, - protocol: dkgType, - }) - // create DKG in all participants - var err error - processors[current].dkg, err = NewJointFeldman(n, - optimalThreshold(n), current, &processors[current]) - require.NoError(t, err) - } - - // create the participant (buffered) communication channels - for i := 0; i < n; i++ { - chans[i] = make(chan *message, 2*n) - } - // start DKG in all participants but the - seed := make([]byte, SeedMinLenDKG) - read, err := rand.Read(seed) - require.Equal(t, read, SeedMinLenDKG) - require.NoError(t, err) - sync.Add(n) - for current := 0; current < n; current++ { - err := processors[current].dkg.Start(seed) - require.NoError(t, err) - go tsDkgRunChan(&processors[current], &sync, t, 0) - } - - // sync the 2 timeouts at all participants and start the next phase - for phase := 1; phase <= 2; phase++ { - sync.Wait() - sync.Add(n) - for current := 0; current < n; current++ { - go tsDkgRunChan(&processors[current], &sync, t, phase) - } - } - - // synchronize the main thread to end DKG - sync.Wait() - for i := 1; i < n; i++ { - assert.True(t, processors[i].pk.Equals(processors[0].pk), - "2 group public keys are mismatching") - } - - // Start TS - log.Info("TS starts") - sync.Add(n) - for current := 0; current < n; current++ { - go tsRunChan(&processors[current], &sync, t) - } - // synchronize the main thread to end TS - sync.Wait() - } -} - -// This is a testing function -// It simulates processing incoming messages by a participant during DKG -// It assumes proc.dkg is already running -func tsDkgRunChan(proc *testDKGProcessor, - sync *sync.WaitGroup, t *testing.T, phase int) { - for { - select { - case newMsg := <-proc.chans[proc.current]: - log.Debugf("%d Receiving DKG from %d:", proc.current, newMsg.orig) - if newMsg.channel == private { - err := proc.dkg.HandlePrivateMsg(newMsg.orig, newMsg.data) - require.Nil(t, err) - } else { - err := proc.dkg.HandleBroadcastMsg(newMsg.orig, newMsg.data) - require.Nil(t, err) - } - - // if timeout, finalize DKG and create the threshold signer - case <-time.After(200 * time.Millisecond): - switch phase { - case 0: - log.Infof("%d shares phase ended \n", proc.current) - err := proc.dkg.NextTimeout() - require.NoError(t, err) - case 1: - log.Infof("%d complaints phase ended \n", proc.current) - err := proc.dkg.NextTimeout() - require.NoError(t, err) - case 2: - log.Infof("%d dkg ended \n", proc.current) - sk, groupPK, nodesPK, err := proc.dkg.End() - require.NotNil(t, sk) - require.NotNil(t, groupPK) - require.NotNil(t, nodesPK) - require.Nil(t, err, "End dkg failed: %v\n", err) - proc.pk = groupPK - n := proc.dkg.Size() - proc.ts, err = NewBLSThresholdSignatureParticipant(groupPK, nodesPK, optimalThreshold(n), proc.current, sk, thresholdSignatureMessage, thresholdSignatureTag) - require.NoError(t, err) - // needed to test the statless api - proc.keys = &statelessKeys{sk, groupPK, nodesPK} - } - sync.Done() - return - } - } -} - -// This is a testing function using the stateful api -// It simulates processing incoming messages by a participant during TS -func tsRunChan(proc *testDKGProcessor, sync *sync.WaitGroup, t *testing.T) { - // Sign a share and broadcast it - sigShare, err := proc.ts.SignShare() - proc.protocol = tsType - if err != nil { // not using require.Nil for now - panic(fmt.Sprintf("%d couldn't sign", proc.current)) - } - proc.Broadcast(sigShare) - for { - select { - case newMsg := <-proc.chans[proc.current]: - log.Debugf("%d Receiving TS from %d:", proc.current, newMsg.orig) - verif, enough, err := proc.ts.VerifyAndAdd( - newMsg.orig, newMsg.data) - require.NoError(t, err) - assert.True(t, verif, - "the signature share sent from %d to %d is not correct", newMsg.orig, - proc.current) - log.Info(enough) - if enough { - assert.Equal(t, enough, proc.ts.EnoughShares()) - thresholdSignature, err := proc.ts.ThresholdSignature() - require.NoError(t, err) - verif, err = proc.ts.VerifyThresholdSignature(thresholdSignature) - require.NoError(t, err) - assert.True(t, verif, "the threshold signature is not correct") - if verif { - log.Infof("%d reconstructed a valid signature: %d\n", proc.current, - thresholdSignature) - } - } - - // if timeout, finalize TS - case <-time.After(time.Second): - sync.Done() - return - } - } -} - -// This stucture holds the keys and is needed for the stateless test -type statelessKeys struct { - // the current participant private key (a DKG output) - myPrivateKey PrivateKey - // the group public key (a DKG output) - groupPublicKey PublicKey - // the group public key shares (a DKG output) - publicKeyShares []PublicKey -} - -// Centralized test of threshold signature protocol using the threshold key generation. -func testCentralizedStatelessAPI(t *testing.T) { - rand := getPRG(t) - n := 10 - for threshold := MinimumThreshold; threshold < n; threshold++ { - // generate threshold keys - seed := make([]byte, SeedMinLenDKG) - _, err := rand.Read(seed) - require.NoError(t, err) - skShares, pkShares, pkGroup, err := BLSThresholdKeyGen(n, threshold, seed) - require.NoError(t, err) - // signature hasher - kmac := NewExpandMsgXOFKMAC128(thresholdSignatureTag) - // generate signature shares - signShares := make([]Signature, 0, n) - signers := make([]int, 0, n) - // fill the signers list and shuffle it - for i := 0; i < n; i++ { - signers = append(signers, i) - } - rand.Shuffle(n, func(i, j int) { - signers[i], signers[j] = signers[j], signers[i] - }) - // create (t+1) signatures of the first randomly chosen signers - for j := 0; j < threshold+1; j++ { - i := signers[j] - share, err := skShares[i].Sign(thresholdSignatureMessage, kmac) - require.NoError(t, err) - verif, err := pkShares[i].Verify(share, thresholdSignatureMessage, kmac) - require.NoError(t, err) - assert.True(t, verif, "signature share is not valid") - if verif { - signShares = append(signShares, share) - } - } - // reconstruct and test the threshold signature - thresholdSignature, err := BLSReconstructThresholdSignature(n, threshold, signShares, signers[:threshold+1]) - require.NoError(t, err) - verif, err := pkGroup.Verify(thresholdSignature, thresholdSignatureMessage, kmac) - require.NoError(t, err) - assert.True(t, verif, "signature share is not valid") - - // check failure with a random redundant signer - if threshold > 1 { - randomDuplicate := rand.Intn(int(threshold)) + 1 // 1 <= duplicate <= threshold - tmp := signers[randomDuplicate] - signers[randomDuplicate] = signers[0] - thresholdSignature, err = BLSReconstructThresholdSignature(n, threshold, signShares, signers[:threshold+1]) - assert.Error(t, err) - assert.True(t, IsDuplicatedSignerError(err)) - assert.Nil(t, thresholdSignature) - signers[randomDuplicate] = tmp - } - - // check with an invalid signature (invalid serialization) - invalidSig := make([]byte, signatureLengthBLSBLS12381) - signShares[0] = invalidSig - thresholdSignature, err = BLSReconstructThresholdSignature(n, threshold, signShares, signers[:threshold+1]) - assert.Error(t, err) - assert.True(t, IsInvalidSignatureError(err)) - assert.Nil(t, thresholdSignature) - } -} - -func BenchmarkSimpleKeyGen(b *testing.B) { - n := 60 - seed := make([]byte, SeedMinLenDKG) - _, err := crand.Read(seed) - require.NoError(b, err) - b.ResetTimer() - for i := 0; i < b.N; i++ { - _, _, _, _ = BLSThresholdKeyGen(n, optimalThreshold(n), seed) - } - b.StopTimer() -} diff --git a/crypto/build_dependency.sh b/crypto/build_dependency.sh deleted file mode 100644 index 4bfe99dbad2..00000000000 --- a/crypto/build_dependency.sh +++ /dev/null @@ -1,36 +0,0 @@ -#!/bin/bash - -set -euo pipefail - -PKG_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" -RELIC_DIR_NAME="relic" -RELIC_DIR="${PKG_DIR}/${RELIC_DIR_NAME}" - -# grant permissions if not existant -if [[ ! -r ${PKG_DIR} || ! -w ${PKG_DIR} || ! -x ${PKG_DIR} ]]; then - chmod -R 755 "${PKG_DIR}" -fi - -rm -rf "${RELIC_DIR}" - -# relic version or tag -relic_version="7d885d1ba34be61bf22190943a73549a910c1714" - -# clone a specific version of Relic without history if it's tagged. -# git -c http.sslVerify=true clone --branch $(relic_version) --single-branch --depth 1 https://github.com/relic-toolkit/relic.git ${RELIC_DIR_NAME} || { echo "git clone failed"; exit 1; } - -# clone all the history if the version is only defined by a commit hash. -git -c http.sslVerify=true clone --branch main --single-branch https://github.com/relic-toolkit/relic.git ${RELIC_DIR_NAME} || { echo "git clone failed"; exit 1; } - -if [ -d "${RELIC_DIR}" ] -then - ( - cd ${RELIC_DIR_NAME} || { echo "cd relic failed"; exit 1; } - git checkout $relic_version - ) - # build relic - bash relic_build.sh -else - { echo "couldn't find relic directory"; exit 1; } -fi - diff --git a/crypto/common.go b/crypto/common.go deleted file mode 100644 index f476de92e3f..00000000000 --- a/crypto/common.go +++ /dev/null @@ -1,100 +0,0 @@ -package crypto - -import ( - "crypto/rand" - "errors" - "fmt" -) - -//revive:disable:var-naming - -// the `go generate` command requires bash scripting, `cmake` and `git`. -//go:generate bash ./build_dependency.sh - -const ( - // Minimum targeted bits of security. - // This is used as a reference but it doesn't mean all implemented primitives provide this minimum. - securityBits = 128 - - // keygen seed length conditions - // enforce seed to be at least double the security bits and have enough entropy. - // it is still recommened that seed is generated using a secure RNG. - KeyGenSeedMinLen = 2 * (securityBits / 8) - KeyGenSeedMaxLen = 256 - - // max relic PRG seed length in bytes - maxRelicPrgSeed = 1 << 32 -) - -// TODO: update this code to make sure -// the function isn't removed by the compiler -// https://github.com/golang/go/issues/21865 -func overwrite(data []byte) { - _, err := rand.Read(data) // checking err is enough - if err != nil { - // zero the buffer if randomizing failed - for i := 0; i < len(data); i++ { - data[i] = 0 - } - } -} - -// invalidInputsError is an error returned when a crypto API receives invalid inputs. -// It allows a function caller differentiate unexpected program errors from errors caused by -// invalid inputs. -type invalidInputsError struct { - error -} - -func (e invalidInputsError) Unwrap() error { - return e.error -} - -// invalidInputsErrorf constructs a new invalidInputsError -func invalidInputsErrorf(msg string, args ...interface{}) error { - return &invalidInputsError{ - error: fmt.Errorf(msg, args...), - } -} - -// IsInvalidInputsError checks if the input error is of an invalidInputsError type -// invalidInputsError is returned when the API is provided invalid inputs. -// Some specific errors are assigned specific sentinel errors for a simpler error check -// while the remaining input errors trigger an invalidInputsError. -func IsInvalidInputsError(err error) bool { - var target *invalidInputsError - return errors.As(err, &target) -} - -var nilHasherError = errors.New("hasher cannot be nil") - -// IsNilHasherError checks if the input error wraps a nilHasherError. -// nilHasherError is returned when a nil hasher is used. -func IsNilHasherError(err error) bool { - return errors.Is(err, nilHasherError) -} - -// invalidHasherSizeError is an error returned when a crypto API is called with a hasher -// with an output size not suited with the cryptographic operation. -type invalidHasherSizeError struct { - error -} - -func (e invalidHasherSizeError) Unwrap() error { - return e.error -} - -// invalidHasherSizeErrorf constructs a new invalidHasherSizeError -func invalidHasherSizeErrorf(msg string, args ...interface{}) error { - return &invalidHasherSizeError{ - error: fmt.Errorf(msg, args...), - } -} - -// IsInvalidHasherSizeError checks if the input error is of an invalidHasherSizeError type. -// invalidHasherSizeError is an error returned when a crypto API is called with a hasher -// with an output size not suited with the cryptographic operation. -func IsInvalidHasherSizeError(err error) bool { - var target *invalidHasherSizeError - return errors.As(err, &target) -} diff --git a/crypto/dkg.go b/crypto/dkg.go deleted file mode 100644 index 6e74f3d54a5..00000000000 --- a/crypto/dkg.go +++ /dev/null @@ -1,237 +0,0 @@ -package crypto - -import ( - "errors" - "fmt" -) - -// DKG stands for distributed key generation. In this library, DKG -// refers to discrete-log based protocols. -// The protocols implemented in the package for now generate keys for a BLS-based -// threshold signature scheme. -// BLS is used with the BLS12-381 curve. -// -// These protocols mainly generate a BLS key pair and share the secret key -// among (n) participants in a way that any (t+1) key shares allow reconstructing -// the initial key (and also reconstructing a BLS threshold signature under the initial key). -// Up to (t) shares don't reveal any information about the initial key (or a signature generated -// by that key). -// -// We refer to the initial key pair by group private and group public key. -// (t) is the threshold parameter. -// Flow uses DKG with the value t = floor((n-1)/2) to optimize for unforgeability and robustness -// of the threshold signature scheme using the output keys. -// -// Private keys are scalar in Zr, where r is the group order of G1/G2. -// Public keys are in G2. - -const ( - // DKG and Threshold Signatures - - // MinimumThreshold is the minimum value of the threshold parameter in all threshold-based protocols. - MinimumThreshold = 1 - // DKGMinSize is the minimum size of a group participating in a DKG protocol - DKGMinSize int = MinimumThreshold + 1 - // DKGMaxSize is the maximum size of a group participating in a DKG protocol - DKGMaxSize int = 254 - // SeedMinLenDKG is the minumum seed length required to participate in a DKG protocol - SeedMinLenDKG = securityBits / 8 - SeedMaxLenDKG = maxRelicPrgSeed -) - -type DKGState interface { - // Size returns the size of the DKG group n - Size() int - // Threshold returns the threshold value t - Threshold() int - // Start starts running a DKG in the current participant - Start(seed []byte) error - // HandleBroadcastMsg processes a new broadcasted message received by the current participant. - // orig is the message origin index - HandleBroadcastMsg(orig int, msg []byte) error - // HandlePrivateMsg processes a new private message received by the current participant. - // orig is the message origin index - HandlePrivateMsg(orig int, msg []byte) error - // End ends a DKG protocol in the current participant. - // It returns the finalized public data and participant private key share. - // - the group public key corresponding to the group secret key - // - all the public key shares corresponding to the participants private - // key shares - // - the finalized private key which is the current participant's own private key share - End() (PrivateKey, PublicKey, []PublicKey, error) - // NextTimeout set the next timeout of the protocol if any timeout applies. - // Some protocols could require more than one timeout - NextTimeout() error - // Running returns the running state of the DKG protocol - Running() bool - // ForceDisqualify forces a participant to get disqualified - // for a reason outside of the DKG protocol. - // The caller should make sure all honest participants call this function, - // otherwise, the protocol can be broken. - ForceDisqualify(participant int) error -} - -// dkgFailureError is an error returned when a participant -// detects a failure in the protocol and is not able to compute output keys. -// Such a failure can be local and only depends on the participant's view of what -// happened in the protocol. The error can only be returned using the End() function. -type dkgFailureError struct { - error -} - -// dkgFailureErrorf constructs a new dkgFailureError -func dkgFailureErrorf(msg string, args ...interface{}) error { - return &dkgFailureError{ - error: fmt.Errorf(msg, args...), - } -} - -// IsDKGFailureError checks if the input error is of a dkgFailureError type. -// dkgFailureError is an error returned when a participant -// detects a failure in the protocol and is not able to compute output keys. -func IsDKGFailureError(err error) bool { - var target *dkgFailureError - return errors.As(err, &target) -} - -type dkgInvalidStateTransitionError struct { - error -} - -func (e dkgInvalidStateTransitionError) Unwrap() error { - return e.error -} - -// dkgInvalidStateTransitionErrorf constructs a new dkgInvalidStateTransitionError -func dkgInvalidStateTransitionErrorf(msg string, args ...interface{}) error { - return &dkgInvalidStateTransitionError{ - error: fmt.Errorf(msg, args...), - } -} - -// IsDkgInvalidStateTransitionError checks if the input error is of a dkgInvalidStateTransition type. -// invalidStateTransition is returned when a caller -// triggers an invalid state transition in the local DKG instance. -// Such a failure can only happen if the API is misued by not respecting -// the state machine conditions. -func IsDKGInvalidStateTransitionError(err error) bool { - var target *dkgInvalidStateTransitionError - return errors.As(err, &target) -} - -// index is the node index type used as participants ID -type index byte - -// newDKGCommon initializes the common structure of DKG protocols -func newDKGCommon(size int, threshold int, myIndex int, - processor DKGProcessor, dealerIndex int) (*dkgCommon, error) { - if size < DKGMinSize || size > DKGMaxSize { - return nil, invalidInputsErrorf( - "size should be between %d and %d", - DKGMinSize, - DKGMaxSize) - } - - if myIndex >= size || dealerIndex >= size || myIndex < 0 || dealerIndex < 0 { - return nil, invalidInputsErrorf( - "indices of current and dealer nodes must be between 0 and %d, got %d", - size-1, - myIndex) - } - - if threshold >= size || threshold < MinimumThreshold { - return nil, invalidInputsErrorf( - "The threshold must be between %d and %d, got %d", - MinimumThreshold, - size-1, - threshold) - } - - return &dkgCommon{ - size: size, - threshold: threshold, - myIndex: index(myIndex), - processor: processor, - }, nil -} - -// dkgCommon holds the common data of all DKG protocols -type dkgCommon struct { - size int - threshold int - myIndex index - // running is true when the DKG protocol is running, is false otherwise - running bool - // processes the action of the DKG interface outputs - processor DKGProcessor -} - -// Running returns the running state of the DKG protocol. -// The state is equal to true when the DKG protocol is running, and is equal to false otherwise. -func (s *dkgCommon) Running() bool { - return s.running -} - -// Size returns the size of the DKG group n -func (s *dkgCommon) Size() int { - return s.size -} - -// Threshold returns the threshold value t -func (s *dkgCommon) Threshold() int { - return s.threshold -} - -// NextTimeout sets the next protocol timeout if there is any. -// This function should be overwritten by any protocol that uses timeouts. -func (s *dkgCommon) NextTimeout() error { - return nil -} - -// dkgMsgTag is the type used to encode message tags -type dkgMsgTag byte - -const ( - feldmanVSSShare dkgMsgTag = iota - feldmanVSSVerifVec - feldmanVSSComplaint - feldmanVSSComplaintAnswer -) - -// DKGProcessor is an interface that implements the DKG output actions. -// -// An instance of a DKGProcessor is needed for each participant in order to -// particpate in a DKG protocol -type DKGProcessor interface { - // PrivateSend sends a message to a destination over - // a private channel. The channel must preserve the - // confidentiality of the message and should authenticate - // the sender. - // It is recommended that the private channel is unique per - // protocol instance. This can be achieved by prepending all - // messages by a unique instance ID. - PrivateSend(dest int, data []byte) - // Broadcast broadcasts a message to all participants. - // This function assumes all participants have received the same message, - // failing to do so, the protocol can be broken. - // The broadcasted message is public and not confidential. - // The broadcasting channel should authenticate the sender. - // It is recommended that the broadcasting channel is unique per - // protocol instance. This can be achieved by prepending all - // messages by a unique instance ID. - Broadcast(data []byte) - // Disqualify flags that a participant is misbehaving and that it got - // disqualified from the protocol. Such behavior deserves - // disqualifying as it is flagged to all honest participants in - // the protocol. - // log describes the disqualification reason. - Disqualify(participant int, log string) - // FlagMisbehavior warns that a participant is misbehaving. - // Such behavior is not necessarily flagged to all participants and therefore - // the participant is not disqualified from the protocol. Other mechanisms - // outside DKG could be implemented to synchronize slashing the misbehaving - // participant by all participating participants, using the api `ForceDisqualify`. Failing to - // do so, the protocol can be broken. - // log describes the misbehavior. - FlagMisbehavior(participant int, log string) -} diff --git a/crypto/dkg_core.c b/crypto/dkg_core.c deleted file mode 100644 index 3a2bce01559..00000000000 --- a/crypto/dkg_core.c +++ /dev/null @@ -1,127 +0,0 @@ -// +build relic - -#include "dkg_include.h" - - -#define N_max 250 -#define N_bits_max 8 // log(250) -#define T_max ((N_max-1)/2) - -// computes P(x) = a_0 + a_1*x + .. + a_n x^n (mod r) -// r being the order of G1 -// writes P(x) in out and P(x).g2 in y if y is non NULL -// x being a small integer -void Zr_polynomialImage_export(byte* out, ep2_t y, const bn_st* a, const int a_size, const byte x){ - bn_t image; - bn_new(image); - Zr_polynomialImage(image, y, a, a_size, x); - // exports the result - const int out_size = Fr_BYTES; - bn_write_bin(out, out_size, image); - bn_free(image); -} - -// computes P(x) = a_0 + a_1*x + .. + a_n x^n (mod r) -// r being the order of G1 -// writes P(x) in out and P(x).g2 in y if y is non NULL -// x being a small integer -void Zr_polynomialImage(bn_t image, ep2_t y, const bn_st *a, const int a_size, const byte x){ - bn_t r; - bn_new(r); - g2_get_ord(r); - - // temp variables - bn_t acc; - bn_new(acc); - bn_new_size(acc, BITS_TO_DIGITS(Fr_BITS+8+1)); - bn_set_dig(acc, 0); - - for (int i=a_size-1; i >= 0; i--) { - bn_mul_dig(acc, acc, x); - // Use basic reduction as it's an 9-bits reduction - // in the worst case (|acc|<|r|+9 ) - bn_mod_basic(acc, acc, r); - bn_add(acc, acc, &a[i]); - } - // export the result - bn_mod_basic(image, acc, r); - - // compute y = P(x).g2 - if (y) g2_mul_gen(y, acc); - - bn_free(acc) - bn_free(r); -} - -// computes Q(x) = A_0 + A_1*x + ... + A_n*x^n in G2 -// and stores the point in y -// r is the order of G2 -static void G2_polynomialImage(ep2_t y, const ep2_st* A, const int len_A, - const byte x, const bn_t r){ - - bn_t bn_x; - bn_new(bn_x); - ep2_set_infty(y); - bn_set_dig(bn_x, x); - for (int i = len_A-1; i >= 0 ; i--) { - ep2_mul_lwnaf(y, y, bn_x); - ep2_add_projc(y, y, (ep2_st*)&A[i]); - } - - ep2_norm(y, y); // not necessary but left here to optimize the - // multiple pairing computations with the same public key - bn_free(bn_x); -} - -// compute the participants public keys from the verification vector -// y[i] = Q(i+1) for all participants i, with: -// Q(x) = A_0 + A_1*x + ... + A_n*x^n in G2 -void G2_polynomialImages(ep2_st *y, const int len_y, const ep2_st* A, const int len_A) { - // order r - bn_t r; - bn_new(r); - g2_get_ord(r); - for (byte i=0; i<len_y; i++) { - //y[i] = Q(i+1) - G2_polynomialImage(y+i , A, len_A, i+1, r); - } - bn_free(r); -} - -// export an array of ep2_st into an array of bytes -// the length matching is supposed to be checked -void ep2_vector_write_bin(byte* out, const ep2_st* A, const int len) { - const int size = (G2_BYTES/(G2_SERIALIZATION+1)); - byte* p = out; - for (int i=0; i<len; i++){ - ep2_write_bin_compact(p, &A[i], size); - p += size; - } -} - -// The function imports an array of ep2_st from an array of bytes -// the length matching is supposed to be already done. -// -// It returns RLC_OK if reading all the vector succeeded and RLC_ERR -// otherwise. -int ep2_vector_read_bin(ep2_st* A, const byte* src, const int len){ - const int size = (G2_BYTES/(G2_SERIALIZATION+1)); - byte* p = (byte*) src; - for (int i=0; i<len; i++){ - int read_ret = ep2_read_bin_compact(&A[i], p, size); // returns RLC_OK or RLC_ERR - if (read_ret != RLC_OK) - return read_ret; - p += size; - } - return RLC_OK; -} - -// returns 1 if g2^x = y, where g2 is the generator of G2 -// returns 0 otherwise -int verifyshare(const bn_t x, const ep2_t y) { - ep2_t res; - ep2_new(res); - g2_mul_gen(res, (bn_st*)x); - return (ep2_cmp(res, (ep2_st*)y) == RLC_EQ); -} - diff --git a/crypto/dkg_feldmanvss.go b/crypto/dkg_feldmanvss.go deleted file mode 100644 index 451c1b8a180..00000000000 --- a/crypto/dkg_feldmanvss.go +++ /dev/null @@ -1,457 +0,0 @@ -//go:build relic -// +build relic - -package crypto - -// #cgo CFLAGS: -g -Wall -std=c99 -// #include "dkg_include.h" -import "C" - -import ( - "fmt" -) - -// Implements Feldman Verifiable Secret Sharing using -// the BLS set up on the BLS12-381 curve. - -// The secret is a BLS private key generated by a single dealer. -// (and hence this is a centralized generation). -// The generates key shares for a BLS-based -// threshold signature scheme and distributes the shares over the (n) -// partcipants including itself. The particpants validate their shares -// using a public verifiaction vector shared by the . - -// Private keys are scalar in Zr, where r is the group order of G1/G2 -// Public keys are in G2. - -// feldman VSS protocol, implements DKGState -type feldmanVSSstate struct { - // common DKG state - *dkgCommon - // participant index - dealerIndex index - // Polynomial P = a_0 + a_1*x + .. + a_t*x^t in Zr[X], the vector size is (t+1) - // a_0 is the group private key - a []scalar - // Public vector of the group, the vector size is (t+1) - // A_0 is the group public key - vA []pointG2 - vAReceived bool - // Private share of the current participant - x scalar - xReceived bool - // Public keys of the group participants, the vector size is (n) - y []pointG2 - // true if the private share is valid - validKey bool -} - -// NewFeldmanVSS creates a new instance of Feldman VSS protocol. -// -// An instance is run by a single participant and is usable for only one protocol. -// In order to run the protocol again, a new instance needs to be created -// -// The function returns: -// - (nil, InvalidInputsError) if: -// - size if not in [DKGMinSize, DKGMaxSize] -// - threshold is not in [MinimumThreshold, size-1] -// - myIndex is not in [0, size-1] -// - dealerIndex is not in [0, size-1] -// -// - (dkgInstance, nil) otherwise -func NewFeldmanVSS(size int, threshold int, myIndex int, - processor DKGProcessor, dealerIndex int) (DKGState, error) { - - common, err := newDKGCommon(size, threshold, myIndex, processor, dealerIndex) - if err != nil { - return nil, err - } - - fvss := &feldmanVSSstate{ - dkgCommon: common, - dealerIndex: index(dealerIndex), - } - fvss.init() - return fvss, nil -} - -func (s *feldmanVSSstate) init() { - // set the bls context - blsInstance.reInit() - s.running = false - s.y = nil - s.xReceived = false - s.vAReceived = false - C.bn_new_wrapper((*C.bn_st)(&s.x)) -} - -// Start triggers the protocol start for the current participant. -// If the current participant is the dealer, then the seed is used -// to generate the secret polynomial (including the group private key). -// If the current participant is not the dealer, the seed is ignored. -// -// The function returns: -// - dkgInvalidStateTransitionError if the DKG instance is already running. -// - error if an unexpected exception occurs -// - nil otherwise -func (s *feldmanVSSstate) Start(seed []byte) error { - if s.running { - return dkgInvalidStateTransitionErrorf("dkg is already running") - } - - s.running = true - // Generate shares if necessary - if s.dealerIndex == s.myIndex { - return s.generateShares(seed) - } - return nil -} - -// End finalizes the protocol in the current node. -// It returns the finalized public data and participants private key share. -// - the group public key corresponding to the group secret key -// - all the public key shares corresponding to the participants private -// key shares. -// - the finalized private key which is the current participant's own private key share -// -// The returned erorr is : -// - dkgInvalidStateTransitionError if the DKG instance was not running. -// - dkgFailureError if the private key and vector are inconsistent. -// - dkgFailureError if the public key share or group public key is identity. -// - nil otherwise. -func (s *feldmanVSSstate) End() (PrivateKey, PublicKey, []PublicKey, error) { - if !s.running { - return nil, nil, nil, dkgInvalidStateTransitionErrorf("dkg is not running") - } - s.running = false - if !s.validKey { - return nil, nil, nil, dkgFailureErrorf("received private key is invalid") - } - // private key of the current participant - x := newPrKeyBLSBLS12381(&s.x) - - // Group public key - Y := newPubKeyBLSBLS12381(&s.vA[0]) - - // The participants public keys - y := make([]PublicKey, s.size) - for i, p := range s.y { - y[i] = newPubKeyBLSBLS12381(&p) - } - - // check if current public key share or group public key is identity. - // In that case all signatures generated by the key are invalid (as stated by the BLS IETF draft) - // to avoid equivocation issues. - if (&s.x).isZero() { - return nil, nil, nil, dkgFailureErrorf("received private key is identity and is therefore invalid") - } - if Y.isIdentity { - return nil, nil, nil, dkgFailureErrorf("group private key is identity and is therefore invalid") - } - return x, Y, y, nil -} - -const ( - shareSize = PrKeyLenBLSBLS12381 - // the actual verifVectorSize depends on the state and is: - // PubKeyLenBLSBLS12381*(t+1) - verifVectorSize = PubKeyLenBLSBLS12381 -) - -// HandleBroadcastMsg processes a new broadcasted message received by the current participant. -// `orig` is the message origin index. -// -// The function returns: -// - dkgInvalidStateTransitionError if the instance is not running -// - invalidInputsError if `orig` is not valid (in [0, size-1]) -// - nil otherwise -func (s *feldmanVSSstate) HandleBroadcastMsg(orig int, msg []byte) error { - if !s.running { - return dkgInvalidStateTransitionErrorf("dkg is not running") - } - if orig >= s.Size() || orig < 0 { - return invalidInputsErrorf( - "wrong origin input, should be less than %d, got %d", - s.Size(), - orig) - } - - // In case a message is received by the origin participant, - // the message is just ignored - if s.myIndex == index(orig) { - return nil - } - - if len(msg) == 0 { - s.processor.Disqualify(orig, "the received broadcast is empty") - return nil - } - - // msg = |tag| Data | - if dkgMsgTag(msg[0]) == feldmanVSSVerifVec { - s.receiveVerifVector(index(orig), msg[1:]) - } else { - s.processor.Disqualify(orig, - fmt.Sprintf("the broadcast header is invalid, got %d", - dkgMsgTag(msg[0]))) - } - return nil -} - -// HandlePrivateMsg processes a new private message received by the current participant. -// `orig` is the message origin index. -// -// The function returns: -// - dkgInvalidStateTransitionError if the instance is not running -// - invalidInputsError if `orig` is not valid (in [0, size-1]) -// - nil otherwise -func (s *feldmanVSSstate) HandlePrivateMsg(orig int, msg []byte) error { - if !s.running { - return dkgInvalidStateTransitionErrorf("dkg is not running") - } - - if orig >= s.Size() || orig < 0 { - return invalidInputsErrorf( - "wrong origin, should be positive less than %d, got %d", - s.Size(), - orig) - } - - // In case a private message is received by the origin participant, - // the message is just ignored - if s.myIndex == index(orig) { - return nil - } - - // forward received message to receiveShare because private messages - // can only be private shares - // msg = |tag| Data | - s.receiveShare(index(orig), msg) - - return nil -} - -// ForceDisqualify forces a participant to get disqualified -// for a reason outside of the DKG protocol -// The caller should make sure all honest participants call this function, -// otherwise, the protocol can be broken. -// -// The function returns: -// - dkgInvalidStateTransitionError if the instance is not running -// - invalidInputsError if `orig` is not valid (in [0, size-1]) -// - nil otherwise -func (s *feldmanVSSstate) ForceDisqualify(participant int) error { - if !s.running { - return dkgInvalidStateTransitionErrorf("dkg is not running") - } - if participant >= s.Size() || participant < 0 { - return invalidInputsErrorf( - "wrong origin input, should be less than %d, got %d", - s.Size(), - participant) - } - if index(participant) == s.dealerIndex { - s.validKey = false - } - return nil -} - -// generateShares is used by the dealer to generate secret polynomial from the input seed -// and derive all private shares and public data. -func (s *feldmanVSSstate) generateShares(seed []byte) error { - err := seedRelic(seed) - if err != nil { - return fmt.Errorf("generating shares failed: %w", err) - } - - // Generate a polyomial P in Zr[X] of degree t - s.a = make([]scalar, s.threshold+1) - s.vA = make([]pointG2, s.threshold+1) - s.y = make([]pointG2, s.size) - // non-zero a[0] - group private key is not zero - randZrStar(&s.a[0]) - generatorScalarMultG2(&s.vA[0], &s.a[0]) - if s.threshold > 0 { - for i := 1; i < s.threshold; i++ { - C.bn_new_wrapper((*C.bn_st)(&s.a[i])) - randZr(&s.a[i]) - generatorScalarMultG2(&s.vA[i], &s.a[i]) - } - // non-zero a[t] to enforce the polynomial degree - randZrStar(&s.a[s.threshold]) - generatorScalarMultG2(&s.vA[s.threshold], &s.a[s.threshold]) - } - - // compute the shares - for i := index(1); int(i) <= s.size; i++ { - // the dealer's own share - if i-1 == s.myIndex { - xdata := make([]byte, shareSize) - zrPolynomialImage(xdata, s.a, i, &s.y[i-1]) - C.bn_read_bin((*C.bn_st)(&s.x), - (*C.uchar)(&xdata[0]), - PrKeyLenBLSBLS12381, - ) - continue - } - // the-other-participant shares - data := make([]byte, shareSize+1) - data[0] = byte(feldmanVSSShare) - zrPolynomialImage(data[1:], s.a, i, &s.y[i-1]) - s.processor.PrivateSend(int(i-1), data) - } - // broadcast the vector - vectorSize := verifVectorSize * (s.threshold + 1) - data := make([]byte, vectorSize+1) - data[0] = byte(feldmanVSSVerifVec) - writeVerifVector(data[1:], s.vA) - s.processor.Broadcast(data) - - s.vAReceived = true - s.xReceived = true - s.validKey = true - return nil -} - -// receives a private share from the -func (s *feldmanVSSstate) receiveShare(origin index, data []byte) { - // only accept private shares from the . - if origin != s.dealerIndex { - return - } - - if s.xReceived { - s.processor.FlagMisbehavior(int(origin), "private share was already received") - return - } - - // at this point, tag the private message as received - s.xReceived = true - - // private message general check - // msg = |tag| Data | - if len(data) == 0 || dkgMsgTag(data[0]) != feldmanVSSShare { - s.validKey = false - s.processor.FlagMisbehavior(int(origin), - fmt.Sprintf("private share should be non-empty and first byte should be %d, received %#x", - feldmanVSSShare, data)) - return - } - - // consider the remaining data from message - data = data[1:] - - if (len(data)) != shareSize { - s.validKey = false - s.processor.FlagMisbehavior(int(origin), - fmt.Sprintf("invalid share size, expects %d, got %d", - shareSize, len(data))) - return - } - - // read the participant private share - if C.bn_read_Zr_bin((*C.bn_st)(&s.x), - (*C.uchar)(&data[0]), - PrKeyLenBLSBLS12381, - ) != valid { - s.validKey = false - s.processor.FlagMisbehavior(int(origin), - fmt.Sprintf("invalid share value %x", data)) - return - } - - if s.vAReceived { - s.validKey = s.verifyShare() - } -} - -// receives the public vector from the -func (s *feldmanVSSstate) receiveVerifVector(origin index, data []byte) { - // only accept the verification vector from the . - if origin != s.dealerIndex { - return - } - - if s.vAReceived { - s.processor.FlagMisbehavior(int(origin), - "verification vector was already received") - return - } - - if verifVectorSize*(s.threshold+1) != len(data) { - s.vAReceived = true - s.validKey = false - s.processor.Disqualify(int(origin), - fmt.Sprintf("invalid verification vector size, expects %d, got %d", - verifVectorSize*(s.threshold+1), len(data))) - return - } - // read the verification vector - s.vA = make([]pointG2, s.threshold+1) - err := readVerifVector(s.vA, data) - if err != nil { - s.vAReceived = true - s.validKey = false - s.processor.Disqualify(int(origin), - fmt.Sprintf("reading the verification vector failed: %s", err)) - } - - s.y = make([]pointG2, s.size) - s.computePublicKeys() - - s.vAReceived = true - if s.xReceived { - s.validKey = s.verifyShare() - } -} - -// zrPolynomialImage computes P(x) = a_0 + a_1*x + .. + a_n*x^n (mod r) in Z/Zr -// r being the order of G1 -// P(x) is written in dest, while g2^P(x) is written in y -// x being a small integer -func zrPolynomialImage(dest []byte, a []scalar, x index, y *pointG2) { - C.Zr_polynomialImage_export((*C.uchar)(&dest[0]), - (*C.ep2_st)(y), - (*C.bn_st)(&a[0]), (C.int)(len(a)), - (C.uint8_t)(x), - ) -} - -// writeVerifVector exports a vector A into an array of bytes -// assuming the array length matches the vector length -func writeVerifVector(dest []byte, A []pointG2) { - C.ep2_vector_write_bin((*C.uchar)(&dest[0]), - (*C.ep2_st)(&A[0]), - (C.int)(len(A)), - ) -} - -// readVerifVector imports A vector from an array of bytes, -// assuming the slice length matches the vector length -func readVerifVector(A []pointG2, src []byte) error { - read := C.ep2_vector_read_bin((*C.ep2_st)(&A[0]), - (*C.uchar)(&src[0]), - (C.int)(len(A))) - if read == valid { - return nil - } - // invalid A vector - return invalidInputsErrorf("the verifcation vector does not serialize G2 points") -} - -func (s *feldmanVSSstate) verifyShare() bool { - // check y[current] == x.G2 - return C.verifyshare((*C.bn_st)(&s.x), - (*C.ep2_st)(&s.y[s.myIndex])) == 1 -} - -// computePublicKeys extracts the participants public keys from the verification vector -// y[i] = Q(i+1) for all participants i, with: -// -// Q(x) = A_0 + A_1*x + ... + A_n*x^n in G2 -func (s *feldmanVSSstate) computePublicKeys() { - C.G2_polynomialImages( - (*C.ep2_st)(&s.y[0]), (C.int)(len(s.y)), - (*C.ep2_st)(&s.vA[0]), (C.int)(len(s.vA)), - ) -} diff --git a/crypto/dkg_feldmanvssq.go b/crypto/dkg_feldmanvssq.go deleted file mode 100644 index 335ce6fc86d..00000000000 --- a/crypto/dkg_feldmanvssq.go +++ /dev/null @@ -1,675 +0,0 @@ -//go:build relic -// +build relic - -package crypto - -// #cgo CFLAGS: -g -Wall -std=c99 -// #include "dkg_include.h" -import "C" - -import ( - "fmt" -) - -// Implements Feldman Verifiable Secret Sharing using -// the BLS set up on the BLS12-381 curve. A complaint mechanism -// is added to qualify/disqualify the dealer if they misbehave. - -// The secret is a BLS private key generated by the dealer. -// (and hence this is a centralized generation). -// The dealer generates key shares for a BLS-based -// threshold signature scheme and distributes the shares over the (n) -// participants including itself. The participants validate their shares -// using a public verification vector shared by the dealer and are able -// to broadcast complaints against a misbehaving dealer. - -// The dealer has the chance to avoid being disqualified by broadcasting -// a complaint answer. The protocol ends with all honest participants -// reaching a consensus about the dealer qualification/disqualification. - -// Private keys are scalar in Zr, where r is the group order of G1/G2 -// Public keys are in G2. - -// feldman VSS protocol, with complaint mechanism, implements DKGState -type feldmanVSSQualState struct { - // feldmanVSSstate state - *feldmanVSSstate - // complaints received against the dealer: - // the key is the origin of the complaint - // a complaint will be created if a complaint message or an answer was - // broadcasted, a complaint will be checked only when both the - // complaint message and the answer were broadcasted - complaints map[index]*complaint - // is the dealer disqualified - disqualified bool - // Timeout to receive shares and verification vector - // - if a share is not received before this timeout a complaint will be formed - // - if the verification is not received before this timeout, - // dealer is disqualified - sharesTimeout bool - // Timeout to receive complaints - // all complaints received after this timeout are ignored - complaintsTimeout bool -} - -// these data are required to justify a slashing -type complaint struct { - received bool - answerReceived bool - answer scalar -} - -// NewFeldmanVSSQual creates a new instance of a Feldman VSS protocol -// with a qualification mechanism. -// -// An instance is run by a single participant and is usable for only one protocol. -// In order to run the protocol again, a new instance needs to be created -// -// The function returns: -// - (nil, InvalidInputsError) if: -// - size if not in [DKGMinSize, DKGMaxSize] -// - threshold is not in [MinimumThreshold, size-1] -// - myIndex is not in [0, size-1] -// - dealerIndex is not in [0, size-1] -// - (dkgInstance, nil) otherwise -func NewFeldmanVSSQual(size int, threshold int, myIndex int, - processor DKGProcessor, dealerIndex int) (DKGState, error) { - - common, err := newDKGCommon(size, threshold, myIndex, processor, dealerIndex) - if err != nil { - return nil, err - } - - fvss := &feldmanVSSstate{ - dkgCommon: common, - dealerIndex: index(dealerIndex), - } - fvssq := &feldmanVSSQualState{ - feldmanVSSstate: fvss, - disqualified: false, - } - fvssq.init() - return fvssq, nil -} - -func (s *feldmanVSSQualState) init() { - s.feldmanVSSstate.init() - s.complaints = make(map[index]*complaint) -} - -// NextTimeout sets the next protocol timeout -// This function needs to be called twice by every participant in -// the Feldman VSS Qual protocol. -// The first call is a timeout for sharing the private shares. -// The second call is a timeout for broadcasting the complaints. -// -// The returned erorr is : -// - dkgInvalidStateTransitionError if the DKG instance was not running. -// - dkgInvalidStateTransitionError if the DKG instance already called the 2 required timeouts. -// - nil otherwise. -func (s *feldmanVSSQualState) NextTimeout() error { - if !s.running { - return dkgInvalidStateTransitionErrorf("dkg protocol %d is not running", s.myIndex) - } - if s.complaintsTimeout { - return dkgInvalidStateTransitionErrorf("the next timeout should be to end DKG protocol") - } - - // if dealer is already disqualified, there is nothing to do - if s.disqualified { - if !s.sharesTimeout { - s.sharesTimeout = true - return nil - } else { - s.complaintsTimeout = true - return nil - } - } - - if !s.sharesTimeout { - s.setSharesTimeout() - return nil - } else { - s.setComplaintsTimeout() - return nil - } -} - -// End ends the protocol in the current participant. -// This is also a timeout to receiving all complaint answers. -// It returns the finalized public data and participant private key share: -// 1. the group public key corresponding to the group secret key -// 2. all the public key shares corresponding to the participants private key shares. -// 3. the finalized private key which is the current participant's own private key share -// 4. Error Returns: -// - dkgFailureError if the dealer was disqualified. -// - dkgFailureError if the public key share or group public key is identity. -// - dkgInvalidStateTransition if Start() was not called, or NextTimeout() was not called twice -// - nil otherwise. -func (s *feldmanVSSQualState) End() (PrivateKey, PublicKey, []PublicKey, error) { - if !s.running { - return nil, nil, nil, dkgInvalidStateTransitionErrorf("dkg protocol %d is not running", s.myIndex) - } - if !s.sharesTimeout || !s.complaintsTimeout { - return nil, nil, nil, - dkgInvalidStateTransitionErrorf("%d: two timeouts should be set before ending dkg", s.myIndex) - } - s.running = false - // check if a complaint has remained without an answer - // a dealer is disqualified if a complaint was never answered - if !s.disqualified { - for complainer, c := range s.complaints { - if c.received && !c.answerReceived { - s.disqualified = true - s.processor.Disqualify(int(s.dealerIndex), - fmt.Sprintf("complaint from %d was not answered", - complainer)) - break - } - } - } - - // If the dealer is disqualified, all keys are ignored - // otherwise, the keys are valid - if s.disqualified { - return nil, nil, nil, dkgFailureErrorf("dealer is disqualified") - } - - // private key of the current participant - x := newPrKeyBLSBLS12381(&s.x) - - // Group public key - Y := newPubKeyBLSBLS12381(&s.vA[0]) - - // The participants public keys - y := make([]PublicKey, s.size) - for i, p := range s.y { - y[i] = newPubKeyBLSBLS12381(&p) - } - - // check if current public key share or group public key is identity. - // In that case all signatures generated by the key are invalid (as stated by the BLS IETF - // draft) to avoid equivocation issues. - // TODO: update generateShares to make sure no public key share is identity AND - // update receiveVector function to disqualify the dealer if any public key share - // is identity, only when FeldmanVSSQ is not a building primitive of Joint-Feldman - if (&s.x).isZero() { - s.disqualified = true - return nil, nil, nil, dkgFailureErrorf("private key share is identity and therefore invalid") - } - if Y.isIdentity { - s.disqualified = true - return nil, nil, nil, dkgFailureErrorf("group private key is identity and is therefore invalid") - } - return x, Y, y, nil -} - -const ( - complaintSize = 1 - complaintAnswerSize = 1 + PrKeyLenBLSBLS12381 -) - -// HandleBroadcastMsg processes a new broadcasted message received by the current participant. -// orig is the message origin index -// -// The function returns: -// - dkgInvalidStateTransitionError if the instance is not running -// - invalidInputsError if `orig` is not valid (in [0, size-1]) -// - nil otherwise -func (s *feldmanVSSQualState) HandleBroadcastMsg(orig int, msg []byte) error { - if !s.running { - return dkgInvalidStateTransitionErrorf("dkg is not running") - } - - if orig >= s.Size() || orig < 0 { - return invalidInputsErrorf( - "wrong origin input, should be less than %d, got %d", - s.Size(), - orig) - } - - // In case a message is received by the origin participant, - // the message is just ignored - if s.myIndex == index(orig) { - return nil - } - - // if dealer is already disqualified, ignore the message - if s.disqualified { - return nil - } - - if len(msg) == 0 { - if index(orig) == s.dealerIndex { - s.disqualified = true - } - s.processor.Disqualify(orig, "received broadcast is empty") - return nil - } - - switch dkgMsgTag(msg[0]) { - case feldmanVSSVerifVec: - s.receiveVerifVector(index(orig), msg[1:]) - case feldmanVSSComplaint: - s.receiveComplaint(index(orig), msg[1:]) - case feldmanVSSComplaintAnswer: - s.receiveComplaintAnswer(index(orig), msg[1:]) - default: - if index(orig) == s.dealerIndex { - s.disqualified = true - } - s.processor.Disqualify(orig, - fmt.Sprintf("invalid broadcast header, got %d", - dkgMsgTag(msg[0]))) - } - return nil -} - -// HandlePrivateMsg processes a new private message received by the current participant. -// orig is the message origin index. -// -// The function returns: -// - dkgInvalidStateTransitionError if the instance is not running -// - invalidInputsError if `orig` is not valid (in [0, size-1]) -// - nil otherwise -func (s *feldmanVSSQualState) HandlePrivateMsg(orig int, msg []byte) error { - if !s.running { - return dkgInvalidStateTransitionErrorf("dkg is not running") - } - if orig >= s.Size() || orig < 0 { - return invalidInputsErrorf( - "invalid origin, should be positive less than %d, got %d", - s.Size(), - orig) - } - - // In case a private message is received by the origin participant, - // the message is just ignored - if s.myIndex == index(orig) { - return nil - } - - // if dealer is already disqualified, ignore the message - if s.disqualified { - return nil - } - - // forward all the message to receiveShare because any private message - // has to be a private share - s.receiveShare(index(orig), msg) - - return nil -} - -// ForceDisqualify forces a participant to get disqualified -// for a reason outside of the DKG protocol -// The caller should make sure all honest participants call this function, -// otherwise, the protocol can be broken -// -// The function returns: -// - dkgInvalidStateTransitionError if the instance is not running -// - invalidInputsError if `orig` is not valid (in [0, size-1]) -// - nil otherwise -func (s *feldmanVSSQualState) ForceDisqualify(participant int) error { - if !s.running { - return dkgInvalidStateTransitionErrorf("dkg is not running") - } - if participant >= s.Size() || participant < 0 { - return invalidInputsErrorf( - "invalid origin input, should be less than %d, got %d", - s.Size(), participant) - } - if index(participant) == s.dealerIndex { - s.disqualified = true - } - return nil -} - -// The function does not check the call respects the machine -// state transition of feldmanVSSQual. The calling function must make sure this call -// is valid. -func (s *feldmanVSSQualState) setSharesTimeout() { - s.sharesTimeout = true - // if verif vector is not received, disqualify the dealer - if !s.vAReceived { - s.disqualified = true - s.processor.Disqualify(int(s.dealerIndex), - "verification vector was not received") - return - } - // if share is not received, make a complaint - if !s.xReceived { - s.buildAndBroadcastComplaint() - } -} - -// The function does not check the call respects the machine -// state transition of feldmanVSSQual. The calling function must make sure this call -// is valid. -func (s *feldmanVSSQualState) setComplaintsTimeout() { - s.complaintsTimeout = true - // if more than t complaints are received, the dealer is disqualified - // regardless of the answers. - // (at this point, all answered complaints should have been already received) - // (i.e there is no complaint with (!c.received && c.answerReceived) - if len(s.complaints) > s.threshold { - s.disqualified = true - s.processor.Disqualify(int(s.dealerIndex), - fmt.Sprintf("there are %d complaints, they exceeded the threshold %d", - len(s.complaints), s.threshold)) - } -} - -func (s *feldmanVSSQualState) receiveShare(origin index, data []byte) { - // only accept private shares from the dealer. - if origin != s.dealerIndex { - return - } - - // check the share timeout - if s.sharesTimeout { - s.processor.FlagMisbehavior(int(origin), - "private share is received after the shares timeout") - return - } - - if s.xReceived { - s.processor.FlagMisbehavior(int(origin), - "private share was already received") - return - } - - // at this point, tag private share is received - s.xReceived = true - - // private message general check - if len(data) == 0 || dkgMsgTag(data[0]) != feldmanVSSShare { - s.buildAndBroadcastComplaint() - s.processor.FlagMisbehavior(int(origin), - fmt.Sprintf("private share should be non-empty and first byte should be %d, received %#x", - feldmanVSSShare, data)) - return - } - - // consider the remaining data from message - data = data[1:] - - if (len(data)) != shareSize { - s.buildAndBroadcastComplaint() - s.processor.FlagMisbehavior(int(origin), - fmt.Sprintf("invalid share size, expects %d, got %d", - shareSize, len(data))) - return - } - // read the participant private share - if C.bn_read_Zr_bin((*C.bn_st)(&s.x), - (*C.uchar)(&data[0]), - PrKeyLenBLSBLS12381, - ) != valid { - s.buildAndBroadcastComplaint() - s.processor.FlagMisbehavior(int(origin), - fmt.Sprintf("invalid share value %x", data)) - return - } - - if s.vAReceived { - if !s.verifyShare() { - // otherwise, build a complaint - s.buildAndBroadcastComplaint() - } - } -} - -func (s *feldmanVSSQualState) receiveVerifVector(origin index, data []byte) { - // only accept the verification vector from the dealer. - if origin != s.dealerIndex { - return - } - - // check the share timeout - if s.sharesTimeout { - s.processor.FlagMisbehavior(int(origin), - "verification vector received after the shares timeout") - return - } - - if s.vAReceived { - s.processor.FlagMisbehavior(int(origin), - "verification received was already received") - return - } - s.vAReceived = true - - if len(data) != verifVectorSize*(s.threshold+1) { - s.disqualified = true - s.processor.Disqualify(int(origin), - fmt.Sprintf("invalid verification vector size, expects %d, got %d", - verifVectorSize*(s.threshold+1), len(data))) - return - } - // read the verification vector - s.vA = make([]pointG2, s.threshold+1) - err := readVerifVector(s.vA, data) - if err != nil { - s.disqualified = true - s.processor.Disqualify(int(origin), - fmt.Sprintf("reading the verification vector failed:%s", err)) - return - } - - s.y = make([]pointG2, s.size) - s.computePublicKeys() - - // check the (already) registered complaints - for complainer, c := range s.complaints { - if c.received && c.answerReceived { - if s.checkComplaint(complainer, c) { - s.disqualified = true - s.processor.Disqualify(int(s.dealerIndex), - fmt.Sprintf("verification vector received: a complaint answer to %d is invalid", - complainer)) - return - } - } - } - // check the private share - if s.xReceived { - if !s.verifyShare() { - s.buildAndBroadcastComplaint() - } - } -} - -// build a complaint against the dealer, add it to the local -// complaint map and broadcast it -func (s *feldmanVSSQualState) buildAndBroadcastComplaint() { - s.complaints[s.myIndex] = &complaint{ - received: true, - answerReceived: false, - } - data := []byte{byte(feldmanVSSComplaint), byte(s.dealerIndex)} - s.processor.Broadcast(data) -} - -// build a complaint answer, add it to the local -// complaint map and broadcast it -func (s *feldmanVSSQualState) buildAndBroadcastComplaintAnswer(complainee index) { - data := make([]byte, complaintAnswerSize+1) - data[0] = byte(feldmanVSSComplaintAnswer) - data[1] = byte(complainee) - zrPolynomialImage(data[2:], s.a, complainee+1, nil) - s.complaints[complainee].answerReceived = true - s.processor.Broadcast(data) -} - -// assuming a complaint and its answer were both received, this function returns: -// - false if the complaint answer is correct -// - true if the complaint answer is not correct -func (s *feldmanVSSQualState) checkComplaint(complainer index, c *complaint) bool { - // check y[complainer] == share.G2 - return C.verifyshare((*C.bn_st)(&c.answer), - (*C.ep2_st)(&s.y[complainer])) == 0 -} - -// data = |complainee| -func (s *feldmanVSSQualState) receiveComplaint(origin index, data []byte) { - // check the complaint timeout - if s.complaintsTimeout { - s.processor.FlagMisbehavior(int(origin), - "complaint received after the complaint timeout") - return - } - - if len(data) != complaintSize { - // only the dealer of the instance gets disqualified - if origin == s.dealerIndex { - s.disqualified = true - s.processor.Disqualify(int(origin), - fmt.Sprintf("invalid complaint size, expects %d, got %d", - complaintSize, len(data))) - } - return - } - - // the byte encodes the complainee - complainee := index(data[0]) - - // validate the complainee value - if int(complainee) >= s.size { - // only the dealer of the instance gets disqualified - if origin == s.dealerIndex { - s.disqualified = true - s.processor.Disqualify(int(origin), - fmt.Sprintf("invalid complainee, should be less than %d, got %d", - s.size, complainee)) - } - return - } - - // if the complaint is coming from the dealer, ignore it - if origin == s.dealerIndex { - return - } - - // if the complainee is not the dealer, ignore the complaint - if complainee != s.dealerIndex { - return - } - - c, ok := s.complaints[origin] - // if the complaint is new, add it - if !ok { - s.complaints[origin] = &complaint{ - received: true, - answerReceived: false, - } - // if the complainee is the current participant, prepare an answer - if s.myIndex == s.dealerIndex { - s.buildAndBroadcastComplaintAnswer(origin) - } - return - } - // complaint is not new in the map - // check if the complaint has been already received - if c.received { - s.processor.FlagMisbehavior(int(origin), - "complaint was already received") - return - } - c.received = true - // answerReceived flag check is a sanity check - if s.vAReceived && c.answerReceived && s.myIndex != s.dealerIndex { - s.disqualified = s.checkComplaint(origin, c) - if s.disqualified { - s.processor.Disqualify(int(s.dealerIndex), - fmt.Sprintf("complaint received: complaint answer to %d is invalid", - origin)) - } - return - } -} - -// answer = |complainer| private share | -func (s *feldmanVSSQualState) receiveComplaintAnswer(origin index, data []byte) { - // check for invalid answers - if origin != s.dealerIndex { - return - } - - // check the answer format - if len(data) != complaintAnswerSize { - s.disqualified = true - s.processor.Disqualify(int(s.dealerIndex), - fmt.Sprintf("the complaint answer has an invalid length, expects %d, got %d", - complaintAnswerSize, len(data))) - return - } - - // first byte encodes the complainee - complainer := index(data[0]) - if int(complainer) >= s.size { - s.disqualified = true - s.processor.Disqualify(int(origin), - fmt.Sprintf("complainer value is invalid, should be less that %d, got %d", - s.size, int(complainer))) - return - } - - c, ok := s.complaints[complainer] - // if the complaint is new, add it - if !ok { - s.complaints[complainer] = &complaint{ - received: false, - answerReceived: true, - } - - // read the complainer private share - C.bn_new_wrapper((*C.bn_st)(&s.complaints[complainer].answer)) - if C.bn_read_Zr_bin((*C.bn_st)(&s.complaints[complainer].answer), - (*C.uchar)(&data[1]), - PrKeyLenBLSBLS12381, - ) != valid { - s.disqualified = true - s.processor.Disqualify(int(s.dealerIndex), - fmt.Sprintf("invalid complaint answer value %x", data)) - return - } - return - } - // complaint is not new in the map - // check if the answer has been already received - if c.answerReceived { - s.processor.FlagMisbehavior(int(origin), - "complaint answer was already received") - return - } - c.answerReceived = true - - // flag check is a sanity check - if c.received { - // read the complainer private share - C.bn_new_wrapper((*C.bn_st)(&c.answer)) - if C.bn_read_Zr_bin((*C.bn_st)(&c.answer), - (*C.uchar)(&data[1]), - PrKeyLenBLSBLS12381, - ) != valid { - s.disqualified = true - s.processor.Disqualify(int(s.dealerIndex), - fmt.Sprintf("invalid complaint answer value %x", data)) - return - } - if s.vAReceived { - s.disqualified = s.checkComplaint(complainer, c) - if s.disqualified { - s.processor.Disqualify(int(s.dealerIndex), - fmt.Sprintf("complaint answer received: complaint answer to %d is invalid", - complainer)) - } - } - - // fix the share of the current participant if the complaint is invalid - if !s.disqualified && complainer == s.myIndex { - s.x = c.answer - } - } -} diff --git a/crypto/dkg_include.h b/crypto/dkg_include.h deleted file mode 100644 index 5e518300071..00000000000 --- a/crypto/dkg_include.h +++ /dev/null @@ -1,19 +0,0 @@ -// +build relic - -#ifndef _REL_DKG_INCLUDE_H -#define _REL_DKG_INCLUDE_H - -#include "bls12381_utils.h" - -// the highest index of a DKG participant -#define MAX_IND 255 -#define MAX_IND_BITS 8 - -void Zr_polynomialImage_export(byte* out, ep2_t y, const bn_st* a, const int a_size, const byte x); -void Zr_polynomialImage(bn_t out, ep2_t y, const bn_st* a, const int a_size, const byte x); -void G2_polynomialImages(ep2_st* y, const int len_y, const ep2_st* A, const int len_A); -void ep2_vector_write_bin(byte* out, const ep2_st* A, const int len); -int ep2_vector_read_bin(ep2_st* A, const byte* src, const int len); -int verifyshare(const bn_t x, const ep2_t y); - -#endif diff --git a/crypto/dkg_jointfeldman.go b/crypto/dkg_jointfeldman.go deleted file mode 100644 index 7b63f88e810..00000000000 --- a/crypto/dkg_jointfeldman.go +++ /dev/null @@ -1,341 +0,0 @@ -//go:build relic -// +build relic - -package crypto - -// #cgo CFLAGS: -g -Wall -std=c99 -// #cgo LDFLAGS: -L${SRCDIR}/relic/build/lib -l relic_s -// #include "dkg_include.h" -import "C" - -import ( - "fmt" -) - -// Implements Joint Feldman (Pedersen) protocol using -// the BLS set up on the BLS12-381 curve. -// The protocol runs (n) parallel instances of Feldman vss with -// the complaints mechanism, each participant being a dealer -// once. - -// This is a fully distributed generation. The secret is a BLS -// private key generated jointly by all the participants. - -// (t) is the threshold parameter. Although the API allows using arbitrary values of (t), -// the DKG protocol is secure in the presence of up to (t) malicious participants -// when (t < n/2). -// Joint-Feldman is the protocol implemented in Flow, (t) being set to the maximum value -// t = floor((n-1)/2) to optimize for unforgeability and robustness of the threshold -// signature scheme using the output keys. - -// In each feldman VSS instance, the dealer generates a chunk of the -// the private key of a BLS threshold signature scheme. -// Using the complaints mechanism, each dealer is qualified or disqualified -// from the protocol, and the overall key is taking into account -// all chunks from qualified dealers. - -// Private keys are scalar in Zr, where r is the group order of G1/G2 -// Public keys are in G2. - -// Joint Feldman protocol, with complaint mechanism, implements DKGState -type JointFeldmanState struct { - *dkgCommon - // jointRunning is true if and only if all parallel Feldman vss protocols are running - jointRunning bool - // feldmanVSSQualState parallel states - fvss []feldmanVSSQualState - // is the group public key - jointPublicKey pointG2 - // Private share of the current participant - jointx scalar - // Public keys of the group participants, the vector size is (n) - jointy []pointG2 -} - -// NewJointFeldman creates a new instance of a Joint Feldman protocol. -// -// size if the total number of participants (n). -// threshold is the threshold parameter (t). the DKG protocol is secure in the -// presence of up to (t) malicious participants when (t < n/2). -// myIndex is the index of the participant creating the new DKG instance. -// processor is the DKGProcessor instance required to connect the participant to the -// communication channels. -// -// An instance is run by a single participant and is usable for only one protocol. -// In order to run the protocol again, a new instance needs to be created. -// -// The function returns: -// - (nil, InvalidInputsError) if: -// - size if not in [DKGMinSize, DKGMaxSize] -// - threshold is not in [MinimumThreshold, size-1] -// - myIndex is not in [0, size-1] -// - dealerIndex is not in [0, size-1] -// -// - (dkgInstance, nil) otherwise -func NewJointFeldman(size int, threshold int, myIndex int, - processor DKGProcessor) (DKGState, error) { - - common, err := newDKGCommon(size, threshold, myIndex, processor, 0) - if err != nil { - return nil, err - } - - jf := &JointFeldmanState{ - dkgCommon: common, - } - jf.init() - return jf, nil -} - -func (s *JointFeldmanState) init() { - s.fvss = make([]feldmanVSSQualState, s.size) - for i := 0; i < s.size; i++ { - fvss := &feldmanVSSstate{ - dkgCommon: s.dkgCommon, - dealerIndex: index(i), - } - s.fvss[i] = feldmanVSSQualState{ - feldmanVSSstate: fvss, - disqualified: false, - } - s.fvss[i].init() - } -} - -// Start triggers Joint Feldman protocol start for the current participant. -// The seed is used to generate the FVSS secret polynomial -// (including the instance group private key) when the current -// participant is the dealer. -// -// The returned error is : -// - dkgInvalidStateTransitionError if the DKG instance is already running. -// - error if an unexpected exception occurs -// - nil otherwise. -func (s *JointFeldmanState) Start(seed []byte) error { - if s.jointRunning { - return dkgInvalidStateTransitionErrorf("dkg is already running") - } - - for i := index(0); int(i) < s.size; i++ { - s.fvss[i].running = false - err := s.fvss[i].Start(seed) - if err != nil { - return fmt.Errorf("error when starting dkg: %w", err) - } - } - s.jointRunning = true - return nil -} - -// NextTimeout sets the next timeout of the protocol if any timeout applies. -// -// The returned error is : -// - dkgInvalidStateTransitionError if the DKG instance was not running. -// - dkgInvalidStateTransitionError if the DKG instance already called the 2 required timeouts. -// - nil otherwise. -func (s *JointFeldmanState) NextTimeout() error { - if !s.jointRunning { - return dkgInvalidStateTransitionErrorf("dkg protocol %d is not running", s.myIndex) - } - - for i := index(0); int(i) < s.size; i++ { - err := s.fvss[i].NextTimeout() - if err != nil { - return fmt.Errorf("next timeout failed: %w", err) - } - } - return nil -} - -// End ends the protocol in the current participant -// It returns the finalized public data and participant private key share. -// - the group public key corresponding to the group secret key -// - all the public key shares corresponding to the participants private -// key shares. -// - the finalized private key which is the current participant's own private key share -// -// The returned error is: -// - dkgFailureError if the disqualified dealers exceeded the threshold -// - dkgFailureError if the public key share or group public key is identity. -// - dkgInvalidStateTransitionError Start() was not called, or NextTimeout() was not called twice -// - nil otherwise. -func (s *JointFeldmanState) End() (PrivateKey, PublicKey, []PublicKey, error) { - if !s.jointRunning { - return nil, nil, nil, dkgInvalidStateTransitionErrorf("dkg protocol %d is not running", s.myIndex) - } - - disqualifiedTotal := 0 - for i := 0; i < s.size; i++ { - // check previous timeouts were called - if !s.fvss[i].sharesTimeout || !s.fvss[i].complaintsTimeout { - return nil, nil, nil, - dkgInvalidStateTransitionErrorf("%d: two timeouts should be set before ending dkg", s.myIndex) - } - - // check if a complaint has remained without an answer - // a dealer is disqualified if a complaint was never answered - if !s.fvss[i].disqualified { - for complainer, c := range s.fvss[i].complaints { - if c.received && !c.answerReceived { - s.fvss[i].disqualified = true - s.processor.Disqualify(i, - fmt.Sprintf("complaint from %d was not answered", complainer)) - disqualifiedTotal++ - break - } - } - } else { - disqualifiedTotal++ - } - } - s.jointRunning = false - - // check failing dkg - if disqualifiedTotal > s.threshold || s.size-disqualifiedTotal <= s.threshold { - return nil, nil, nil, - dkgFailureErrorf( - "Joint-Feldman failed because the diqualified participants number is high: %d disqualified, threshold is %d, size is %d", - disqualifiedTotal, s.threshold, s.size) - } - - // wrap up the keys from qualified dealers - jointx, jointPublicKey, jointy := s.sumUpQualifiedKeys(s.size - disqualifiedTotal) - - // private key of the current participant - x := newPrKeyBLSBLS12381(jointx) - - // Group public key - Y := newPubKeyBLSBLS12381(jointPublicKey) - - // The participants public keys - y := make([]PublicKey, s.size) - for i, p := range jointy { - y[i] = newPubKeyBLSBLS12381(&p) - } - - // check if current public key share or group public key is identity. - // In that case all signatures generated by the current private key share or - // the group private key are invalid (as stated by the BLS IETF draft) - // to avoid equivocation issues. - // - // Assuming both private keys have entropy from at least one honest dealer, each private - // key is initially uniformly distributed over the 2^255 possible values. We can argue that - // the known uniformity-bias caused by malicious dealers in Joint-Feldman does not weaken - // the likelihood of generating an identity key to practical probabilities. - if (jointx).isZero() { - return nil, nil, nil, dkgFailureErrorf("private key share is identity and is therefore invalid") - } - if Y.isIdentity { - return nil, nil, nil, dkgFailureErrorf("group private key is identity and is therefore invalid") - } - return x, Y, y, nil -} - -// HandleBroadcastMsg processes a new broadcasted message received by the current participant -// orig is the message origin index -// -// The function returns: -// - dkgInvalidStateTransitionError if the instance is not running -// - invalidInputsError if `orig` is not valid (in [0, size-1]) -// - nil otherwise -func (s *JointFeldmanState) HandleBroadcastMsg(orig int, msg []byte) error { - if !s.jointRunning { - return dkgInvalidStateTransitionErrorf("dkg protocol %d is not running", s.myIndex) - } - for i := index(0); int(i) < s.size; i++ { - err := s.fvss[i].HandleBroadcastMsg(orig, msg) - if err != nil { - return fmt.Errorf("handle broadcast message failed: %w", err) - } - } - return nil -} - -// HandlePrivateMsg processes a new private message received by the current participant -// orig is the message origin index -// -// The function returns: -// - dkgInvalidStateTransitionError if the instance is not running -// - invalidInputsError if `orig` is not valid (in [0, size-1]) -// - nil otherwise -func (s *JointFeldmanState) HandlePrivateMsg(orig int, msg []byte) error { - if !s.jointRunning { - return dkgInvalidStateTransitionErrorf("dkg protocol %d is not running", s.myIndex) - } - for i := index(0); int(i) < s.size; i++ { - err := s.fvss[i].HandlePrivateMsg(orig, msg) - if err != nil { - return fmt.Errorf("handle private message failed: %w", err) - } - } - return nil -} - -// Running returns the running state of Joint Feldman protocol -func (s *JointFeldmanState) Running() bool { - return s.jointRunning -} - -// ForceDisqualify forces a participant to get disqualified -// for a reason outside of the DKG protocol -// The caller should make sure all honest participants call this function, -// otherwise, the protocol can be broken -// -// The function returns: -// - dkgInvalidStateTransitionError if the instance is not running -// - invalidInputsError if `orig` is not valid (in [0, size-1]) -// - nil otherwise -func (s *JointFeldmanState) ForceDisqualify(participant int) error { - if !s.jointRunning { - return dkgInvalidStateTransitionErrorf("dkg is not running") - } - // disqualify the participant in the fvss instance where they are a dealer - err := s.fvss[participant].ForceDisqualify(participant) - if err != nil { - return fmt.Errorf("force disqualify failed: %w", err) - } - return nil -} - -// sum up the 3 type of keys from all qualified dealers to end the protocol -func (s *JointFeldmanState) sumUpQualifiedKeys(qualified int) (*scalar, *pointG2, []pointG2) { - qualifiedx, qualifiedPubKey, qualifiedy := s.getQualifiedKeys(qualified) - - // sum up x - var jointx scalar - C.bn_new_wrapper((*C.bn_st)(&jointx)) - C.bn_sum_vector((*C.bn_st)(&jointx), (*C.bn_st)(&qualifiedx[0]), - (C.int)(qualified)) - // sum up Y - var jointPublicKey pointG2 - C.ep2_sum_vector((*C.ep2_st)(&jointPublicKey), - (*C.ep2_st)(&qualifiedPubKey[0]), (C.int)(qualified)) - // sum up []y - jointy := make([]pointG2, s.size) - for i := 0; i < s.size; i++ { - C.ep2_sum_vector((*C.ep2_st)(&jointy[i]), - (*C.ep2_st)(&qualifiedy[i][0]), (C.int)(qualified)) - } - return &jointx, &jointPublicKey, jointy -} - -// get the 3 type of keys from all qualified dealers -func (s *JointFeldmanState) getQualifiedKeys(qualified int) ([]scalar, []pointG2, [][]pointG2) { - qualifiedx := make([]scalar, 0, qualified) - qualifiedPubKey := make([]pointG2, 0, qualified) - qualifiedy := make([][]pointG2, s.size) - for i := 0; i < s.size; i++ { - qualifiedy[i] = make([]pointG2, 0, qualified) - } - - for i := 0; i < s.size; i++ { - if !s.fvss[i].disqualified { - qualifiedx = append(qualifiedx, s.fvss[i].x) - qualifiedPubKey = append(qualifiedPubKey, s.fvss[i].vA[0]) - for j := 0; j < s.size; j++ { - qualifiedy[j] = append(qualifiedy[j], s.fvss[i].y[j]) - } - } - } - return qualifiedx, qualifiedPubKey, qualifiedy -} diff --git a/crypto/dkg_test.go b/crypto/dkg_test.go deleted file mode 100644 index 3cc1d172cca..00000000000 --- a/crypto/dkg_test.go +++ /dev/null @@ -1,834 +0,0 @@ -//go:build relic -// +build relic - -package crypto - -import ( - crand "crypto/rand" - "fmt" - mrand "math/rand" - "sync" - "testing" - "time" - - log "github.com/sirupsen/logrus" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -var gt *testing.T - -func TestDKG(t *testing.T) { - t.Run("FeldmanVSSSimple", testFeldmanVSSSimple) - t.Run("FeldmanVSSQual", testFeldmanVSSQual) - t.Run("JointFeldman", testJointFeldman) -} - -// optimal threshold (t) to allow the largest number of malicious participants (m) -// assuming the protocol requires: -// -// m<=t for unforgeability -// n-m>=t+1 for robustness -func optimalThreshold(size int) int { - return (size - 1) / 2 -} - -// Testing the happy path of Feldman VSS by simulating a network of n participants -func testFeldmanVSSSimple(t *testing.T) { - log.SetLevel(log.ErrorLevel) - - n := 4 - for threshold := MinimumThreshold; threshold < n; threshold++ { - t.Run(fmt.Sprintf("FeldmanVSS (n,t)=(%d,%d)", n, threshold), func(t *testing.T) { - dkgCommonTest(t, feldmanVSS, n, threshold, happyPath) - }) - } -} - -type testCase int - -const ( - happyPath testCase = iota - invalidShares - invalidVector - invalidComplaint - invalidComplaintAnswer - duplicatedMessages -) - -type behavior int - -const ( - honest behavior = iota - manyInvalidShares - fewInvalidShares - invalidVectorBroadcast - invalidComplaintBroadcast - timeoutedComplaintBroadcast - invalidSharesComplainTrigger - invalidComplaintAnswerBroadcast - duplicatedSendAndBroadcast -) - -// Testing Feldman VSS with the qualification system by simulating a network of n participants -func testFeldmanVSSQual(t *testing.T) { - log.SetLevel(log.ErrorLevel) - - n := 4 - // happy path, test multiple values of thresold - for threshold := MinimumThreshold; threshold < n; threshold++ { - t.Run(fmt.Sprintf("FeldmanVSSQual_(n,t)=(%d,%d)", n, threshold), func(t *testing.T) { - dkgCommonTest(t, feldmanVSSQual, n, threshold, happyPath) - }) - } - - // unhappy path, with focus on the optimal threshold value - n = 5 - threshold := optimalThreshold(n) - // unhappy path, with invalid shares - t.Run(fmt.Sprintf("FeldmanVSSQual_InvalidShares_(n,t)=(%d,%d)", n, threshold), func(t *testing.T) { - dkgCommonTest(t, feldmanVSSQual, n, threshold, invalidShares) - }) - // unhappy path, with invalid vector - t.Run(fmt.Sprintf("FeldmanVSSQual_InvalidVector_(n,t)=(%d,%d)", n, threshold), func(t *testing.T) { - dkgCommonTest(t, feldmanVSSQual, n, threshold, invalidVector) - }) - // unhappy paths with invalid complaints and complaint answers - // are only tested within joint feldman. -} - -// Testing JointFeldman by simulating a network of n participants -func testJointFeldman(t *testing.T) { - log.SetLevel(log.ErrorLevel) - - n := 4 - var threshold int - // happy path, test multiple values of thresold - for threshold = MinimumThreshold; threshold < n; threshold++ { - t.Run(fmt.Sprintf("JointFeldman_(n,t)=(%d,%d)", n, threshold), func(t *testing.T) { - dkgCommonTest(t, jointFeldman, n, threshold, happyPath) - }) - } - - // unhappy path, with focus on the optimal threshold value - n = 5 - threshold = optimalThreshold(n) - // unhappy path, with invalid shares - t.Run(fmt.Sprintf("JointFeldman_InvalidShares_(n,t)=(%d,%d)", n, threshold), func(t *testing.T) { - dkgCommonTest(t, jointFeldman, n, threshold, invalidShares) - }) - // unhappy path, with invalid vector - t.Run(fmt.Sprintf("JointFeldman_InvalidVector_(n,t)=(%d,%d)", n, threshold), func(t *testing.T) { - dkgCommonTest(t, jointFeldman, n, threshold, invalidVector) - }) - // unhappy path, with invalid complaints - t.Run(fmt.Sprintf("JointFeldman_InvalidComplaints_(n,t)=(%d,%d)", n, threshold), func(t *testing.T) { - dkgCommonTest(t, jointFeldman, n, threshold, invalidComplaint) - }) - // unhappy path, with invalid complaint answers - t.Run(fmt.Sprintf("JointFeldman_InvalidComplaintAnswers_(n,t)=(%d,%d)", n, threshold), func(t *testing.T) { - dkgCommonTest(t, jointFeldman, n, threshold, invalidComplaintAnswer) - }) - // unhappy path, with duplicated messages (all types) - t.Run(fmt.Sprintf("JointFeldman_DuplicatedMessages_(n,t)=(%d,%d)", n, threshold), func(t *testing.T) { - dkgCommonTest(t, jointFeldman, n, threshold, duplicatedMessages) - }) -} - -// Supported Key Generation protocols -const ( - feldmanVSS = iota - feldmanVSSQual - jointFeldman -) - -func newDKG(dkg int, size int, threshold int, myIndex int, - processor DKGProcessor, dealerIndex int) (DKGState, error) { - switch dkg { - case feldmanVSS: - return NewFeldmanVSS(size, threshold, myIndex, processor, dealerIndex) - case feldmanVSSQual: - return NewFeldmanVSSQual(size, threshold, myIndex, processor, dealerIndex) - case jointFeldman: - return NewJointFeldman(size, threshold, myIndex, processor) - default: - return nil, fmt.Errorf("non supported protocol") - } -} - -func dkgCommonTest(t *testing.T, dkg int, n int, threshold int, test testCase) { - gt = t - log.Info("DKG protocol set up") - - // create the participant channels - chans := make([]chan *message, n) - lateChansTimeout1 := make([]chan *message, n) - lateChansTimeout2 := make([]chan *message, n) - for i := 0; i < n; i++ { - chans[i] = make(chan *message, 5*n) - lateChansTimeout1[i] = make(chan *message, 5*n) - lateChansTimeout2[i] = make(chan *message, 5*n) - } - - // number of dealers in the protocol - var dealers int - if dkg == jointFeldman { - dealers = n - } else { - dealers = 1 - } - - // create n processors for all participants - processors := make([]testDKGProcessor, 0, n) - for current := 0; current < n; current++ { - list := make([]bool, dealers) - processors = append(processors, testDKGProcessor{ - current: current, - chans: chans, - lateChansTimeout1: lateChansTimeout1, - lateChansTimeout2: lateChansTimeout2, - protocol: dkgType, - malicious: honest, - disqualified: list, - }) - } - - // Update processors depending on the test - // - // r1 and r2 is the number of malicious participants, each group with a slight diffrent behavior. - // - r1 participants of indices 0 to r1-1 behave maliciously and will get disqualified by honest participants. - // - r2 participants of indices r1 to r1+r2-1 will behave maliciously at first but will recover and won't be - // disqualified by honest participants. The r2 participants may or may not obtain correct protocol results. - var r1, r2 int - // h is the index of the first honest participant. All participant with indices greater than or equal to h are honest. - // Checking the final protocol results is done for honest participants only. - // Whether the r2 participants belong to the honest participants or not depend on the malicious behavior (detailed below). - var h int - - switch test { - case happyPath: - // r1 = r2 = 0 - - case invalidShares: - r1 = mrand.Intn(dealers + 1) // dealers with invalid shares and will get disqualified - r2 = mrand.Intn(dealers - r1 + 1) // dealers with invalid shares but will recover - h = r1 - - var i int - for i = 0; i < r1; i++ { - processors[i].malicious = manyInvalidShares - } - for ; i < r1+r2; i++ { - processors[i].malicious = fewInvalidShares - } - t.Logf("%d participants will be disqualified, %d other participants will recover\n", r1, r2) - - case invalidVector: - r1 = 1 + mrand.Intn(dealers) // dealers with invalid vector and will get disqualified - h = r1 - - // in this case r2 = 0 - for i := 0; i < r1; i++ { - processors[i].malicious = invalidVectorBroadcast - } - t.Logf("%d participants will be disqualified\n", r1) - - case invalidComplaint: - r1 = 1 + mrand.Intn(dealers-1) // participants with invalid complaints and will get disqualified. - // r1>= 1 to have at least one malicious dealer, and r1<leadrers-1 to leave space for the trigger dealer below. - r2 = mrand.Intn(dealers - r1) // participants with timeouted complaints: they are considered qualified by honest participants - // but their results are invalid - h = r1 + r2 // r2 shouldn't be verified for protocol correctness - - for i := 0; i < r1; i++ { - processors[i].malicious = invalidComplaintBroadcast - } - for i := r1; i < r1+r2; i++ { - processors[i].malicious = timeoutedComplaintBroadcast - } - // The participant (r1+r2) will send wrong shares and cause the 0..r1+r2-1 dealers to send complaints. - // This participant doesn't risk getting disqualified as the complaints against them - // are invalid and won't count. The participant doesn't even answer the complaint. - processors[r1+r2].malicious = invalidSharesComplainTrigger - t.Logf("%d participants will be disqualified, %d other participants won't be disqualified.\n", r1, r2) - - case invalidComplaintAnswer: - r1 = 1 + mrand.Intn(dealers-1) // participants with invalid complaint answers and will get disqualified. - // r1>= 1 to have at least one malicious dealer, and r1<leadrers-1 to leave space for the complaint sender. - h = r1 - // the 0..r1-1 dealers will send invalid shares to n-1 to trigger complaints. - for i := 0; i < r1; i++ { - processors[i].malicious = invalidComplaintAnswerBroadcast - } - t.Logf("%d participants will be disqualified\n", r1) - case duplicatedMessages: - // r1 = r2 = 0 - // participant 0 will send duplicated shares, verif vector and complaint to all participants - processors[0].malicious = duplicatedSendAndBroadcast - // participant 1 is a complaint trigger, it sents a wrong share to 0 to trigger a complaint. - // it also sends duplicated complaint answers. - processors[1].malicious = invalidSharesComplainTrigger - - default: - panic("test case not supported") - } - - // number of participants to test - lead := 0 - var sync sync.WaitGroup - - // create DKG in all participants - for current := 0; current < n; current++ { - var err error - processors[current].dkg, err = newDKG(dkg, n, threshold, - current, &processors[current], lead) - require.NoError(t, err) - } - - phase := 0 - if dkg == feldmanVSS { // jump to the last phase since there is only one phase for feldmanVSS - phase = 2 - } - - // start DKG in all participants - // start listening on the channels - seed := make([]byte, SeedMinLenDKG) - sync.Add(n) - - log.Info("DKG protocol starts") - - for current := 0; current < n; current++ { - processors[current].startSync.Add(1) - go dkgRunChan(&processors[current], &sync, t, phase) - } - - for current := 0; current < n; current++ { - // start dkg in parallel - // ( one common PRG is used internally for all instances which causes a race - // in generating randoms and leads to non-deterministic keys. If deterministic keys - // are required, switch to sequential calls to dkg.Start() ) - go func(current int) { - _, err := crand.Read(seed) - require.NoError(t, err) - err = processors[current].dkg.Start(seed) - require.Nil(t, err) - processors[current].startSync.Done() // avoids reading messages when a dkg instance hasn't started yet - }(current) - } - phase++ - - // sync the two timeouts and start the next phase - for ; phase <= 2; phase++ { - sync.Wait() - // post processing required for timeout edge case tests - go timeoutPostProcess(processors, t, phase) - sync.Add(n) - for current := 0; current < n; current++ { - go dkgRunChan(&processors[current], &sync, t, phase) - } - } - - // synchronize the main thread to end all DKGs - sync.Wait() - - // assertions and results: - - // check the disqualified list for all non-disqualified participants - expected := make([]bool, dealers) - for i := 0; i < r1; i++ { - expected[i] = true - } - - for i := h; i < n; i++ { - t.Logf("participant %d is not disqualified, its disqualified list is:\n", i) - t.Log(processors[i].disqualified) - assert.Equal(t, expected, processors[i].disqualified) - } - // check if DKG is successful - if (dkg == jointFeldman && (r1 > threshold || (n-r1) <= threshold)) || - (dkg == feldmanVSSQual && r1 == 1) { // case of a single dealer - t.Logf("dkg failed, there are %d disqualified participants\n", r1) - // DKG failed, check for final errors - for i := r1; i < n; i++ { - err := processors[i].finalError - assert.Error(t, err) - assert.True(t, IsDKGFailureError(err)) - } - } else { - t.Logf("dkg succeeded, there are %d disqualified participants\n", r1) - // DKG has succeeded, check for final errors - for i := h; i < n; i++ { - assert.NoError(t, processors[i].finalError) - } - // DKG has succeeded, check the final keys - for i := h; i < n; i++ { - assert.True(t, processors[h].pk.Equals(processors[i].pk), - "2 group public keys are mismatching") - } - } - -} - -// time after which a silent channel causes switching to the next dkg phase -const phaseSwitchTimeout = 200 * time.Millisecond - -// This is a testing function -// It simulates processing incoming messages by a participant -// it assumes proc.dkg is already running -func dkgRunChan(proc *testDKGProcessor, - sync *sync.WaitGroup, t *testing.T, phase int) { - for { - select { - // if a message is received, handle it - case newMsg := <-proc.chans[proc.current]: - proc.startSync.Wait() // avoids reading a message when the receiving dkg instance - // hasn't started yet. - if newMsg.channel == private { - err := proc.dkg.HandlePrivateMsg(newMsg.orig, newMsg.data) - require.Nil(t, err) - } else { - err := proc.dkg.HandleBroadcastMsg(newMsg.orig, newMsg.data) - require.Nil(t, err) - } - // if no message is received by the channel, call the DKG timeout - case <-time.After(phaseSwitchTimeout): - proc.startSync.Wait() // avoids racing when starting isn't over yet - switch phase { - case 0: - log.Infof("%d shares phase ended\n", proc.current) - err := proc.dkg.NextTimeout() - require.Nil(t, err) - case 1: - log.Infof("%d complaints phase ended \n", proc.current) - err := proc.dkg.NextTimeout() - require.Nil(t, err) - case 2: - log.Infof("%d dkg ended \n", proc.current) - _, pk, _, err := proc.dkg.End() - proc.finalError = err - proc.pk = pk - } - sync.Done() - return - } - } -} - -// post processing required for some edge case tests -func timeoutPostProcess(processors []testDKGProcessor, t *testing.T, phase int) { - switch phase { - case 1: - for i := 0; i < len(processors); i++ { - go func(i int) { - for len(processors[0].lateChansTimeout1[i]) != 0 { - // to test timeouted messages, late messages are copied to the main channels - msg := <-processors[0].lateChansTimeout1[i] - processors[0].chans[i] <- msg - } - }(i) - } - case 2: - for i := 0; i < len(processors); i++ { - go func(i int) { - for len(processors[0].lateChansTimeout2[i]) != 0 { - // to test timeouted messages, late messages are copied to the main channels - msg := <-processors[0].lateChansTimeout2[i] - processors[0].chans[i] <- msg - } - }(i) - } - } -} - -// implements DKGProcessor interface -type testDKGProcessor struct { - // instnce of DKG - dkg DKGState - // index of the current participant in the protocol - current int - // group public key, output of DKG - pk PublicKey - // final disqualified list - disqualified []bool - // final output error of the DKG - finalError error - // type of malicious behavior - malicious behavior - // start DKG syncer - startSync sync.WaitGroup - - // main message channels - chans []chan *message - // extra channels for late messges with regards to the first timeout, and second timeout - lateChansTimeout1 []chan *message - lateChansTimeout2 []chan *message - // type of the protocol - protocol int - - // only used when testing the threshold signature stateful api - ts *blsThresholdSignatureParticipant - keys *statelessKeys -} - -const ( - dkgType int = iota - tsType -) - -const ( - broadcast int = iota - private -) - -type message struct { - orig int - protocol int - channel int - data []byte -} - -func (proc *testDKGProcessor) Disqualify(participant int, logInfo string) { - gt.Logf("%d disqualifies %d, %s\n", proc.current, participant, logInfo) - proc.disqualified[participant] = true -} - -func (proc *testDKGProcessor) FlagMisbehavior(participant int, logInfo string) { - gt.Logf("%d flags a misbehavior from %d: %s", proc.current, participant, logInfo) -} - -// This is a testing function -// it simulates sending a message from one participant to another -func (proc *testDKGProcessor) PrivateSend(dest int, data []byte) { - go func() { - log.Infof("%d sending to %d", proc.current, dest) - if proc.malicious == fewInvalidShares || proc.malicious == manyInvalidShares || - proc.malicious == invalidSharesComplainTrigger || proc.malicious == invalidComplaintAnswerBroadcast || - proc.malicious == duplicatedSendAndBroadcast { - proc.invalidShareSend(dest, data) - return - } - proc.honestSend(dest, data) - }() -} - -// This is a testing function -// it simulates sending a honest message from one participant to another -func (proc *testDKGProcessor) honestSend(dest int, data []byte) { - gt.Logf("%d honestly sending to %d:\n%x\n", proc.current, dest, data) - newMsg := &message{proc.current, proc.protocol, private, data} - proc.chans[dest] <- newMsg -} - -// This is a testing function -// it simulates sending a malicious message from one participant to another -// This function simulates the behavior of a malicious participant. -func (proc *testDKGProcessor) invalidShareSend(dest int, data []byte) { - - // check the behavior - var recipients int // number of recipients to send invalid shares to - switch proc.malicious { - case manyInvalidShares: - recipients = proc.dkg.Threshold() + 1 // t < recipients <= n - case fewInvalidShares: - recipients = proc.dkg.Threshold() // 0 <= recipients <= t - case invalidSharesComplainTrigger: - recipients = proc.current // equal to r1+r2, which causes all r1+r2 to complain - case invalidComplaintAnswerBroadcast: - recipients = 0 // treat this case separately as the complaint trigger is the participant n-1 - case duplicatedSendAndBroadcast: - proc.honestSend(dest, data) - proc.honestSend(dest, data) - return - default: - panic("invalid share send not supported") - } - - // copy of data - newData := make([]byte, len(data)) - copy(newData, data) - - newMsg := &message{proc.current, proc.protocol, private, newData} - originalMsg := &message{proc.current, proc.protocol, private, data} - - // check destination - if (dest < recipients) || (proc.current < recipients && dest < recipients+1) || - (proc.malicious == invalidComplaintAnswerBroadcast && dest == proc.dkg.Size()-1) { - // choose a random reason for an invalid share - coin := mrand.Intn(7) - gt.Logf("%d maliciously sending to %d, coin is %d\n", proc.current, dest, coin) - switch coin { - case 0: - // value doesn't match the verification vector - newMsg.data[8]++ - proc.chans[dest] <- newMsg - case 1: - // empty message - newMsg.data = newMsg.data[:0] - proc.chans[dest] <- newMsg - case 2: - // valid message length but invalid share length - newMsg.data = newMsg.data[:1] - proc.chans[dest] <- newMsg - case 3: - // invalid value - for i := 0; i < len(newMsg.data); i++ { - newMsg.data[i] = 0xFF - } - proc.chans[dest] <- newMsg - case 4: - // do not send the share at all - return - case 5: - // wrong header: will cause a complaint - newMsg.data[0] = byte(feldmanVSSVerifVec) - proc.chans[dest] <- newMsg - case 6: - // message will be sent after the shares timeout and will be considered late - // by the receiver. All late messages go into a separate channel and will be sent to - // the main channel after the shares timeout. - proc.lateChansTimeout1[dest] <- newMsg - return - } - - } else { - gt.Logf("turns out to be a honest send\n%x\n", data) - } - // honest send case: this is the only message sent - // malicious send case: this is a second correct send, to test the second message gets ignored - // by the receiver (sender has been tagged malicious after the first send) - proc.chans[dest] <- originalMsg - -} - -// This is a testing function -// it simulates broadcasting a message from one participant to all participants -func (proc *testDKGProcessor) Broadcast(data []byte) { - go func() { - log.Infof("%d Broadcasting:", proc.current) - - if data[0] == byte(feldmanVSSVerifVec) && proc.malicious == invalidVectorBroadcast { - proc.invalidVectorBroadcast(data) - } else if data[0] == byte(feldmanVSSComplaint) && - (proc.malicious == invalidComplaintBroadcast || proc.malicious == timeoutedComplaintBroadcast) { - proc.invalidComplaintBroadcast(data) - } else if data[0] == byte(feldmanVSSComplaintAnswer) && proc.malicious == invalidComplaintAnswerBroadcast { - proc.invalidComplaintAnswerBroadcast(data) - } else if proc.malicious == duplicatedSendAndBroadcast || - (data[0] == byte(feldmanVSSComplaintAnswer) && proc.malicious == invalidSharesComplainTrigger) { - // the complaint trigger also sends duplicated complaint answers - proc.honestBroadcast(data) - proc.honestBroadcast(data) - } else { - proc.honestBroadcast(data) - } - }() -} - -func (proc *testDKGProcessor) honestBroadcast(data []byte) { - gt.Logf("%d honestly broadcasting:\n%x\n", proc.current, data) - newMsg := &message{proc.current, proc.protocol, broadcast, data} - for i := 0; i < len(proc.chans); i++ { - if i != proc.current { - proc.chans[i] <- newMsg - } - } -} - -func (proc *testDKGProcessor) invalidVectorBroadcast(data []byte) { - newMsg := &message{proc.current, proc.protocol, broadcast, data} - - // choose a random reason of an invalid vector - coin := mrand.Intn(5) - gt.Logf("%d malicious vector broadcast, coin is %d\n", proc.current, coin) - switch coin { - case 0: - // invalid point serialization - newMsg.data[1] = 0xFF - case 1: - // invalid length - newMsg.data = newMsg.data[:5] - case 2: - // do not send the vector at all - return - case 3: - // wrong header - newMsg.data[0] = byte(feldmanVSSShare) - case 4: - // send the vector after the first timeout, equivalent to not sending at all - // as the vector should be ignored. - for i := 0; i < proc.dkg.Size(); i++ { - if i != proc.current { - proc.lateChansTimeout1[i] <- newMsg - } - } - return - } - gt.Logf("%x\n", newMsg.data) - for i := 0; i < proc.dkg.Size(); i++ { - if i != proc.current { - proc.chans[i] <- newMsg - } - } -} - -func (proc *testDKGProcessor) invalidComplaintBroadcast(data []byte) { - newMsg := &message{proc.current, proc.protocol, broadcast, data} - - if proc.malicious == invalidComplaintBroadcast { - - // choose a random reason for an invalid complaint - coin := mrand.Intn(2) - gt.Logf("%d malicious complaint broadcast, coin is %d\n", proc.current, coin) - switch coin { - case 0: - // invalid complainee - newMsg.data[1] = byte(proc.dkg.Size() + 1) - case 1: - // invalid length - newMsg.data = make([]byte, complaintSize+5) - copy(newMsg.data, data) - } - gt.Logf("%x\n", newMsg.data) - for i := 0; i < len(proc.chans); i++ { - if i != proc.current { - proc.chans[i] <- newMsg - } - } - } else if proc.malicious == timeoutedComplaintBroadcast { - gt.Logf("%d timeouted complaint broadcast\n", proc.current) - // send the complaint after the second timeout, equivalent to not sending at all - // as the complaint should be ignored. - for i := 0; i < len(proc.chans); i++ { - if i != proc.current { - proc.lateChansTimeout2[i] <- newMsg - } - } - return - } -} - -func (proc *testDKGProcessor) invalidComplaintAnswerBroadcast(data []byte) { - newMsg := &message{proc.current, proc.protocol, broadcast, data} - - // choose a random reason for an invalid complaint - coin := mrand.Intn(3) - gt.Logf("%d malicious complaint answer broadcast, coin is %d\n", proc.current, coin) - switch coin { - case 0: - // invalid complainee - newMsg.data[1] = byte(proc.dkg.Size() + 1) - case 1: - // invalid length - newMsg.data = make([]byte, complaintAnswerSize+5) - copy(newMsg.data, data) - case 2: - // no answer at all - return - } - //gt.Logf("%x\n", newMsg.data) - for i := 0; i < len(proc.chans); i++ { - if i != proc.current { - proc.chans[i] <- newMsg - } - } -} - -// implements a dummy DKGProcessor -type dummyTestDKGProcessor struct { -} - -func (proc dummyTestDKGProcessor) PrivateSend(int, []byte) {} -func (proc dummyTestDKGProcessor) Broadcast([]byte) {} -func (proc dummyTestDKGProcessor) Disqualify(int, string) {} -func (proc dummyTestDKGProcessor) FlagMisbehavior(int, string) {} - -func TestDKGErrorTypes(t *testing.T) { - t.Run("dkgFailureError sanity", func(t *testing.T) { - failureError := dkgFailureErrorf("some error") - invInpError := invalidInputsErrorf("") - otherError := fmt.Errorf("some error") - assert.True(t, IsDKGFailureError(failureError)) - assert.False(t, IsDKGFailureError(otherError)) - assert.False(t, IsDKGFailureError(invInpError)) - assert.False(t, IsDKGFailureError(nil)) - assert.False(t, IsInvalidInputsError(failureError)) - }) - - t.Run("dkgInvalidStateTransitionError sanity", func(t *testing.T) { - failureError := dkgInvalidStateTransitionErrorf("some error") - invInpError := invalidInputsErrorf("") - otherError := fmt.Errorf("some error") - assert.True(t, IsDKGInvalidStateTransitionError(failureError)) - assert.False(t, IsInvalidInputsError(failureError)) - assert.False(t, IsDKGInvalidStateTransitionError(invInpError)) - assert.False(t, IsDKGInvalidStateTransitionError(otherError)) - assert.False(t, IsDKGInvalidStateTransitionError(nil)) - }) -} - -func TestDKGTransitionErrors(t *testing.T) { - n := 5 - threshold := 3 - myIndex := 0 - dealer := 1 - seed := make([]byte, SeedMinLenDKG) - - t.Run("feldman VSS", func(t *testing.T) { - state, err := NewFeldmanVSS(n, threshold, myIndex, dummyTestDKGProcessor{}, dealer) - require.NoError(t, err) - // calls before start - err = state.ForceDisqualify(1) - assert.True(t, IsDKGInvalidStateTransitionError(err)) - err = state.HandlePrivateMsg(1, []byte{}) - assert.True(t, IsDKGInvalidStateTransitionError(err)) - err = state.HandleBroadcastMsg(1, []byte{}) - assert.True(t, IsDKGInvalidStateTransitionError(err)) - _, _, _, err = state.End() - assert.True(t, IsDKGInvalidStateTransitionError(err)) - }) - - t.Run("Feldman VSS Qualif and joint-Feldman ", func(t *testing.T) { - stateFVSSQ, err := NewFeldmanVSSQual(n, threshold, myIndex, dummyTestDKGProcessor{}, dealer) - require.NoError(t, err) - stateJF, err := NewJointFeldman(n, threshold, myIndex, dummyTestDKGProcessor{}) - require.NoError(t, err) - - for _, state := range []DKGState{stateFVSSQ, stateJF} { - // calls before start - err = state.ForceDisqualify(1) - assert.True(t, IsDKGInvalidStateTransitionError(err)) - err = state.HandlePrivateMsg(1, []byte{}) - assert.True(t, IsDKGInvalidStateTransitionError(err)) - err = state.HandleBroadcastMsg(1, []byte{}) - assert.True(t, IsDKGInvalidStateTransitionError(err)) - _, _, _, err = state.End() - assert.True(t, IsDKGInvalidStateTransitionError(err)) - err = state.NextTimeout() - assert.True(t, IsDKGInvalidStateTransitionError(err)) - // after start - err = state.Start(seed) - require.NoError(t, err) - _, _, _, err = state.End() - assert.True(t, IsDKGInvalidStateTransitionError(err)) - // after first timeout - err = state.NextTimeout() - require.NoError(t, err) - err = state.Start(seed) - assert.True(t, IsDKGInvalidStateTransitionError(err)) - _, _, _, err = state.End() - assert.True(t, IsDKGInvalidStateTransitionError(err)) - // after second timeout - err = state.NextTimeout() - require.NoError(t, err) - err = state.Start(seed) - assert.True(t, IsDKGInvalidStateTransitionError(err)) - err = state.NextTimeout() - assert.True(t, IsDKGInvalidStateTransitionError(err)) - // after end - _, _, _, err = state.End() - require.True(t, IsDKGFailureError(err)) - err = state.NextTimeout() - assert.True(t, IsDKGInvalidStateTransitionError(err)) - } - }) -} diff --git a/crypto/ecdsa.go b/crypto/ecdsa.go deleted file mode 100644 index dca3604570a..00000000000 --- a/crypto/ecdsa.go +++ /dev/null @@ -1,457 +0,0 @@ -package crypto - -// Elliptic Curve Digital Signature Algorithm is implemented as -// defined in FIPS 186-4 (although the hash functions implemented in this package are SHA2 and SHA3). - -// Most of the implementation is Go based and is not optimized for performance. - -// This implementation does not include any security against side-channel attacks. - -import ( - "crypto/ecdsa" - "crypto/elliptic" - "crypto/rand" - "crypto/sha256" - "fmt" - "math/big" - - "github.com/btcsuite/btcd/btcec/v2" - "golang.org/x/crypto/hkdf" - - "github.com/onflow/flow-go/crypto/hash" -) - -const ( - // NIST P256 - SignatureLenECDSAP256 = 64 - PrKeyLenECDSAP256 = 32 - // PubKeyLenECDSAP256 is the size of uncompressed points on P256 - PubKeyLenECDSAP256 = 64 - - // SECG secp256k1 - SignatureLenECDSASecp256k1 = 64 - PrKeyLenECDSASecp256k1 = 32 - // PubKeyLenECDSASecp256k1 is the size of uncompressed points on secp256k1 - PubKeyLenECDSASecp256k1 = 64 -) - -// ecdsaAlgo embeds SignAlgo -type ecdsaAlgo struct { - // elliptic curve - curve elliptic.Curve - // the signing algo and parameters - algo SigningAlgorithm -} - -// ECDSA contexts for each supported curve -// -// NIST P-256 curve -var p256Instance *ecdsaAlgo - -// SECG secp256k1 curve https://www.secg.org/sec2-v2.pdf -var secp256k1Instance *ecdsaAlgo - -func bitsToBytes(bits int) int { - return (bits + 7) >> 3 -} - -// signHash returns the signature of the hash using the private key -// the signature is the concatenation bytes(r)||bytes(s) -// where r and s are padded to the curve order size -func (sk *prKeyECDSA) signHash(h hash.Hash) (Signature, error) { - r, s, err := ecdsa.Sign(rand.Reader, sk.goPrKey, h) - if err != nil { - return nil, fmt.Errorf("ECDSA Sign failed: %w", err) - } - rBytes := r.Bytes() - sBytes := s.Bytes() - Nlen := bitsToBytes((sk.alg.curve.Params().N).BitLen()) - signature := make([]byte, 2*Nlen) - // pad the signature with zeroes - copy(signature[Nlen-len(rBytes):], rBytes) - copy(signature[2*Nlen-len(sBytes):], sBytes) - return signature, nil -} - -// Sign signs an array of bytes -// -// The resulting signature is the concatenation bytes(r)||bytes(s), -// where r and s are padded to the curve order size. -// The private key is read only while sha2 and sha3 hashers are -// modified temporarily. -// -// The function returns: -// - (false, nilHasherError) if a hasher is nil -// - (false, invalidHasherSizeError) when the hasher's output size is less than the curve order (currently 32 bytes). -// - (nil, error) if an unexpected error occurs -// - (signature, nil) otherwise -func (sk *prKeyECDSA) Sign(data []byte, alg hash.Hasher) (Signature, error) { - if alg == nil { - return nil, nilHasherError - } - // check hasher's size is at least the curve order in bytes - Nlen := bitsToBytes((sk.alg.curve.Params().N).BitLen()) - if alg.Size() < Nlen { - return nil, invalidHasherSizeErrorf( - "hasher's size should be at least %d, got %d", Nlen, alg.Size()) - } - - h := alg.ComputeHash(data) - return sk.signHash(h) -} - -// verifyHash implements ECDSA signature verification -func (pk *pubKeyECDSA) verifyHash(sig Signature, h hash.Hash) (bool, error) { - Nlen := bitsToBytes((pk.alg.curve.Params().N).BitLen()) - - if len(sig) != 2*Nlen { - return false, nil - } - - var r big.Int - var s big.Int - r.SetBytes(sig[:Nlen]) - s.SetBytes(sig[Nlen:]) - return ecdsa.Verify(pk.goPubKey, h, &r, &s), nil -} - -// Verify verifies a signature of an input data under the public key. -// -// If the input signature slice has an invalid length or fails to deserialize into valid -// scalars, the function returns false without an error. -// -// Public keys are read only, sha2 and sha3 hashers are -// modified temporarily. -// -// The function returns: -// - (false, nilHasherError) if a hasher is nil -// - (false, invalidHasherSizeError) when the hasher's output size is less than the curve order (currently 32 bytes). -// - (false, error) if an unexpected error occurs -// - (validity, nil) otherwise -func (pk *pubKeyECDSA) Verify(sig Signature, data []byte, alg hash.Hasher) (bool, error) { - if alg == nil { - return false, nilHasherError - } - - // check hasher's size is at least the curve order in bytes - Nlen := bitsToBytes((pk.alg.curve.Params().N).BitLen()) - if alg.Size() < Nlen { - return false, invalidHasherSizeErrorf( - "hasher's size should be at least %d, got %d", Nlen, alg.Size()) - } - - h := alg.ComputeHash(data) - return pk.verifyHash(sig, h) -} - -// signatureFormatCheck verifies the format of a serialized signature, -// regardless of messages or public keys. -// If FormatCheck returns false then the input is not a valid ECDSA -// signature and will fail a verification against any message and public key. -func (a *ecdsaAlgo) signatureFormatCheck(sig Signature) bool { - N := a.curve.Params().N - Nlen := bitsToBytes(N.BitLen()) - - if len(sig) != 2*Nlen { - return false - } - - var r big.Int - var s big.Int - r.SetBytes(sig[:Nlen]) - s.SetBytes(sig[Nlen:]) - - if r.Sign() == 0 || s.Sign() == 0 { - return false - } - - if r.Cmp(N) >= 0 || s.Cmp(N) >= 0 { - return false - } - - // We could also check whether r and r+N are quadratic residues modulo (p) - // using Euler's criterion. - return true -} - -var one = new(big.Int).SetInt64(1) - -// goecdsaGenerateKey generates a public and private key pair -// for the crypto/ecdsa library using the input seed -func goecdsaGenerateKey(c elliptic.Curve, seed []byte) *ecdsa.PrivateKey { - k := new(big.Int).SetBytes(seed) - n := new(big.Int).Sub(c.Params().N, one) - k.Mod(k, n) - k.Add(k, one) - - priv := new(ecdsa.PrivateKey) - priv.PublicKey.Curve = c - priv.D = k - // public key is not computed - return priv -} - -// generatePrivateKey generates a private key for ECDSA -// deterministically using the input seed. -// -// It is recommended to use a secure crypto RNG to generate the seed. -// The seed must have enough entropy. -func (a *ecdsaAlgo) generatePrivateKey(seed []byte) (PrivateKey, error) { - if len(seed) < KeyGenSeedMinLen || len(seed) > KeyGenSeedMaxLen { - return nil, invalidInputsErrorf("seed byte length should be between %d and %d", - KeyGenSeedMinLen, KeyGenSeedMaxLen) - } - - // use HKDF to extract the seed entropy and expand it into key bytes - - // use SHA2-256 as the building block H in HKDF - hashFunction := sha256.New - salt := []byte("") // HKDF salt - info := []byte("") // HKDF info - // use extra 128 bits to reduce the modular reduction bias - Nlen := bitsToBytes((a.curve.Params().N).BitLen()) - okmLength := Nlen + (securityBits / 8) - - // instantiate HKDF and extract okm - reader := hkdf.New(hashFunction, seed, salt, info) - okm := make([]byte, okmLength) - n, err := reader.Read(okm) - if err != nil || n != okmLength { - return nil, fmt.Errorf("key generation failed because of the HKDF reader, %d bytes were read: %w", - n, err) - } - defer overwrite(okm) // overwrite okm - - sk := goecdsaGenerateKey(a.curve, okm) - return &prKeyECDSA{ - alg: a, - goPrKey: sk, - pubKey: nil, // public key is not computed - }, nil -} - -func (a *ecdsaAlgo) rawDecodePrivateKey(der []byte) (PrivateKey, error) { - n := a.curve.Params().N - nlen := bitsToBytes(n.BitLen()) - if len(der) != nlen { - return nil, invalidInputsErrorf("input has incorrect %s key size", a.algo) - } - var d big.Int - d.SetBytes(der) - - if d.Cmp(n) >= 0 { - return nil, invalidInputsErrorf("input is not a valid %s key", a.algo) - } - - priv := ecdsa.PrivateKey{ - D: &d, - } - priv.PublicKey.Curve = a.curve - - return &prKeyECDSA{ - alg: a, - goPrKey: &priv, - pubKey: nil, // public key is not computed - }, nil -} - -func (a *ecdsaAlgo) decodePrivateKey(der []byte) (PrivateKey, error) { - return a.rawDecodePrivateKey(der) -} - -func (a *ecdsaAlgo) rawDecodePublicKey(der []byte) (PublicKey, error) { - p := (a.curve.Params().P) - plen := bitsToBytes(p.BitLen()) - if len(der) != 2*plen { - return nil, invalidInputsErrorf("input has incorrect %s key size, got %d, expects %d", - a.algo, len(der), 2*plen) - } - var x, y big.Int - x.SetBytes(der[:plen]) - y.SetBytes(der[plen:]) - - // all the curves supported for now have a cofactor equal to 1, - // so that IsOnCurve guarantees the point is on the right subgroup. - if x.Cmp(p) >= 0 || y.Cmp(p) >= 0 || !a.curve.IsOnCurve(&x, &y) { - return nil, invalidInputsErrorf("input %x is not a valid %s key", der, a.algo) - } - - pk := ecdsa.PublicKey{ - Curve: a.curve, - X: &x, - Y: &y, - } - - return &pubKeyECDSA{a, &pk}, nil -} - -func (a *ecdsaAlgo) decodePublicKey(der []byte) (PublicKey, error) { - return a.rawDecodePublicKey(der) -} - -// decodePublicKeyCompressed returns a public key given the bytes of a compressed public key according to X9.62 section 4.3.6. -// this compressed representation uses an extra byte to disambiguate sign -func (a *ecdsaAlgo) decodePublicKeyCompressed(pkBytes []byte) (PublicKey, error) { - expectedLen := bitsToBytes(a.curve.Params().BitSize) + 1 - if len(pkBytes) != expectedLen { - return nil, invalidInputsErrorf(fmt.Sprintf("input length incompatible, expected %d, got %d", expectedLen, len(pkBytes))) - } - var goPubKey *ecdsa.PublicKey - - if a.curve == elliptic.P256() { - x, y := elliptic.UnmarshalCompressed(a.curve, pkBytes) - if x == nil { - return nil, invalidInputsErrorf("Key %x can't be interpreted as %v", pkBytes, a.algo.String()) - } - goPubKey = new(ecdsa.PublicKey) - goPubKey.Curve = a.curve - goPubKey.X = x - goPubKey.Y = y - - } else if a.curve == btcec.S256() { - pk, err := btcec.ParsePubKey(pkBytes) - if err != nil { - return nil, invalidInputsErrorf("Key %x can't be interpreted as %v", pkBytes, a.algo.String()) - } - // convert to a crypto/ecdsa key - goPubKey = pk.ToECDSA() - } else { - return nil, invalidInputsErrorf("the input curve is not supported") - } - return &pubKeyECDSA{a, goPubKey}, nil -} - -// prKeyECDSA is the private key of ECDSA, it implements the generic PrivateKey -type prKeyECDSA struct { - // the signature algo - alg *ecdsaAlgo - // ecdsa private key - goPrKey *ecdsa.PrivateKey - // public key - pubKey *pubKeyECDSA -} - -// Algorithm returns the algo related to the private key -func (sk *prKeyECDSA) Algorithm() SigningAlgorithm { - return sk.alg.algo -} - -// Size returns the length of the private key in bytes -func (sk *prKeyECDSA) Size() int { - return bitsToBytes((sk.alg.curve.Params().N).BitLen()) -} - -// PublicKey returns the public key associated to the private key -func (sk *prKeyECDSA) PublicKey() PublicKey { - // compute the public key once - if sk.pubKey == nil { - priv := sk.goPrKey - priv.PublicKey.X, priv.PublicKey.Y = priv.Curve.ScalarBaseMult(priv.D.Bytes()) - } - sk.pubKey = &pubKeyECDSA{ - alg: sk.alg, - goPubKey: &sk.goPrKey.PublicKey, - } - return sk.pubKey -} - -// given a private key (d), returns a raw encoding bytes(d) in big endian -// padded to the private key length -func (sk *prKeyECDSA) rawEncode() []byte { - skBytes := sk.goPrKey.D.Bytes() - Nlen := bitsToBytes((sk.alg.curve.Params().N).BitLen()) - skEncoded := make([]byte, Nlen) - // pad sk with zeroes - copy(skEncoded[Nlen-len(skBytes):], skBytes) - return skEncoded -} - -// Encode returns a byte representation of a private key. -// a simple raw byte encoding in big endian is used for all curves -func (sk *prKeyECDSA) Encode() []byte { - return sk.rawEncode() -} - -// Equals test the equality of two private keys -func (sk *prKeyECDSA) Equals(other PrivateKey) bool { - // check the key type - otherECDSA, ok := other.(*prKeyECDSA) - if !ok { - return false - } - // check the curve - if sk.alg.curve != otherECDSA.alg.curve { - return false - } - return sk.goPrKey.D.Cmp(otherECDSA.goPrKey.D) == 0 -} - -// String returns the hex string representation of the key. -func (sk *prKeyECDSA) String() string { - return fmt.Sprintf("%#x", sk.Encode()) -} - -// pubKeyECDSA is the public key of ECDSA, it implements PublicKey -type pubKeyECDSA struct { - // the signature algo - alg *ecdsaAlgo - // public key data - goPubKey *ecdsa.PublicKey -} - -// Algorithm returns the the algo related to the private key -func (pk *pubKeyECDSA) Algorithm() SigningAlgorithm { - return pk.alg.algo -} - -// Size returns the length of the public key in bytes -func (pk *pubKeyECDSA) Size() int { - return 2 * bitsToBytes((pk.goPubKey.Params().P).BitLen()) -} - -// EncodeCompressed returns a compressed encoding according to X9.62 section 4.3.6. -// This compressed representation uses an extra byte to disambiguate parity. -// The expected input is a public key (x,y). -func (pk *pubKeyECDSA) EncodeCompressed() []byte { - return elliptic.MarshalCompressed(pk.goPubKey.Curve, pk.goPubKey.X, pk.goPubKey.Y) -} - -// given a public key (x,y), returns a raw uncompressed encoding bytes(x)||bytes(y) -// x and y are padded to the field size -func (pk *pubKeyECDSA) rawEncode() []byte { - xBytes := pk.goPubKey.X.Bytes() - yBytes := pk.goPubKey.Y.Bytes() - Plen := bitsToBytes((pk.alg.curve.Params().P).BitLen()) - pkEncoded := make([]byte, 2*Plen) - // pad the public key coordinates with zeroes - copy(pkEncoded[Plen-len(xBytes):], xBytes) - copy(pkEncoded[2*Plen-len(yBytes):], yBytes) - return pkEncoded -} - -// Encode returns a byte representation of a public key. -// a simple uncompressed raw encoding X||Y is used for all curves -// X and Y are the big endian byte encoding of the x and y coordinates of the public key -func (pk *pubKeyECDSA) Encode() []byte { - return pk.rawEncode() -} - -// Equals test the equality of two private keys -func (pk *pubKeyECDSA) Equals(other PublicKey) bool { - // check the key type - otherECDSA, ok := other.(*pubKeyECDSA) - if !ok { - return false - } - // check the curve - if pk.alg.curve != otherECDSA.alg.curve { - return false - } - return (pk.goPubKey.X.Cmp(otherECDSA.goPubKey.X) == 0) && - (pk.goPubKey.Y.Cmp(otherECDSA.goPubKey.Y) == 0) -} - -// String returns the hex string representation of the key. -func (pk *pubKeyECDSA) String() string { - return fmt.Sprintf("%#x", pk.Encode()) -} diff --git a/crypto/ecdsa_test.go b/crypto/ecdsa_test.go deleted file mode 100644 index 342162668cf..00000000000 --- a/crypto/ecdsa_test.go +++ /dev/null @@ -1,381 +0,0 @@ -//go:build !relic -// +build !relic - -package crypto - -import ( - "encoding/hex" - "testing" - - "crypto/elliptic" - crand "crypto/rand" - "math/big" - - "github.com/btcsuite/btcd/btcec/v2" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/onflow/flow-go/crypto/hash" -) - -var ecdsaCurves = []SigningAlgorithm{ - ECDSAP256, - ECDSASecp256k1, -} -var ecdsaPrKeyLen = map[SigningAlgorithm]int{ - ECDSAP256: PrKeyLenECDSAP256, - ECDSASecp256k1: PrKeyLenECDSASecp256k1, -} -var ecdsaPubKeyLen = map[SigningAlgorithm]int{ - ECDSAP256: PubKeyLenECDSAP256, - ECDSASecp256k1: PubKeyLenECDSASecp256k1, -} -var ecdsaSigLen = map[SigningAlgorithm]int{ - ECDSAP256: SignatureLenECDSAP256, - ECDSASecp256k1: SignatureLenECDSASecp256k1, -} - -// ECDSA tests -func TestECDSA(t *testing.T) { - - for _, curve := range ecdsaCurves { - t.Logf("Testing ECDSA for curve %s", curve) - // test key generation seed limits - testKeyGenSeed(t, curve, KeyGenSeedMinLen, KeyGenSeedMaxLen) - // test consistency - halg := hash.NewSHA3_256() - testGenSignVerify(t, curve, halg) - } -} - -type dummyHasher struct{ size int } - -func newDummyHasher(size int) hash.Hasher { return &dummyHasher{size} } -func (d *dummyHasher) Algorithm() hash.HashingAlgorithm { return hash.UnknownHashingAlgorithm } -func (d *dummyHasher) Size() int { return d.size } -func (d *dummyHasher) ComputeHash([]byte) hash.Hash { return make([]byte, d.size) } -func (d *dummyHasher) Write([]byte) (int, error) { return 0, nil } -func (d *dummyHasher) SumHash() hash.Hash { return make([]byte, d.size) } -func (d *dummyHasher) Reset() {} - -func TestECDSAHasher(t *testing.T) { - - for _, curve := range ecdsaCurves { - - // generate a key pair - seed := make([]byte, KeyGenSeedMinLen) - n, err := crand.Read(seed) - require.Equal(t, n, KeyGenSeedMinLen) - require.NoError(t, err) - sk, err := GeneratePrivateKey(curve, seed) - require.NoError(t, err) - sig := make([]byte, ecdsaSigLen[curve]) - - // empty hasher - t.Run("Empty hasher", func(t *testing.T) { - _, err := sk.Sign(seed, nil) - assert.Error(t, err) - assert.True(t, IsNilHasherError(err)) - _, err = sk.PublicKey().Verify(sig, seed, nil) - assert.Error(t, err) - assert.True(t, IsNilHasherError(err)) - }) - - // hasher with large output size - t.Run("large size hasher is accepted", func(t *testing.T) { - dummy := newDummyHasher(500) - _, err := sk.Sign(seed, dummy) - assert.NoError(t, err) - _, err = sk.PublicKey().Verify(sig, seed, dummy) - assert.NoError(t, err) - }) - - // hasher with small output size - t.Run("small size hasher is rejected", func(t *testing.T) { - dummy := newDummyHasher(31) // 31 is one byte less than the supported curves' order - _, err := sk.Sign(seed, dummy) - assert.Error(t, err) - assert.True(t, IsInvalidHasherSizeError(err)) - _, err = sk.PublicKey().Verify(sig, seed, dummy) - assert.Error(t, err) - assert.True(t, IsInvalidHasherSizeError(err)) - }) - } -} - -// Signing bench -func BenchmarkECDSAP256Sign(b *testing.B) { - halg := hash.NewSHA3_256() - benchSign(b, ECDSAP256, halg) -} - -// Verifying bench -func BenchmarkECDSAP256Verify(b *testing.B) { - halg := hash.NewSHA3_256() - benchVerify(b, ECDSAP256, halg) -} - -// Signing bench -func BenchmarkECDSASecp256k1Sign(b *testing.B) { - halg := hash.NewSHA3_256() - benchSign(b, ECDSASecp256k1, halg) -} - -// Verifying bench -func BenchmarkECDSASecp256k1Verify(b *testing.B) { - halg := hash.NewSHA3_256() - benchVerify(b, ECDSASecp256k1, halg) -} - -// TestECDSAEncodeDecode tests encoding and decoding of ECDSA keys -func TestECDSAEncodeDecode(t *testing.T) { - for _, curve := range ecdsaCurves { - testEncodeDecode(t, curve) - } -} - -// TestECDSAEquals tests equal for ECDSA keys -func TestECDSAEquals(t *testing.T) { - for i, curve := range ecdsaCurves { - testEquals(t, curve, ecdsaCurves[i]^1) - } -} - -// TestECDSAUtils tests some utility functions -func TestECDSAUtils(t *testing.T) { - - for _, curve := range ecdsaCurves { - // generate a key pair - seed := make([]byte, KeyGenSeedMinLen) - n, err := crand.Read(seed) - require.Equal(t, n, KeyGenSeedMinLen) - require.NoError(t, err) - sk, err := GeneratePrivateKey(curve, seed) - require.NoError(t, err) - testKeysAlgorithm(t, sk, curve) - testKeySize(t, sk, ecdsaPrKeyLen[curve], ecdsaPubKeyLen[curve]) - } -} - -// TestScalarMult is a unit test of the scalar multiplication -// This is only a sanity check meant to make sure the curve implemented -// is checked against an independant test vector -func TestScalarMult(t *testing.T) { - secp256k1 := secp256k1Instance.curve - p256 := p256Instance.curve - genericMultTests := []struct { - curve elliptic.Curve - Px string - Py string - k string - Qx string - Qy string - }{ - { - secp256k1, - "858a2ea2498449acf531128892f8ee5eb6d10cfb2f7ebfa851def0e0d8428742", - "015c59492d794a4f6a3ab3046eecfc85e223d1ce8571aa99b98af6838018286e", - "6e37a39c31a05181bf77919ace790efd0bdbcaf42b5a52871fc112fceb918c95", - "fea24b9a6acdd97521f850e782ef4a24f3ef672b5cd51f824499d708bb0c744d", - "5f0b6db1a2c851cb2959fab5ed36ad377e8b53f1f43b7923f1be21b316df1ea1", - }, - { - p256, - "fa1a85f1ae436e9aa05baabe60eb83b2d7ff52e5766504fda4e18d2d25887481", - "f7cc347e1ac53f6720ffc511bfb23c2f04c764620be0baf8c44313e92d5404de", - "6e37a39c31a05181bf77919ace790efd0bdbcaf42b5a52871fc112fceb918c95", - "28a27fc352f315d5cc562cb0d97e5882b6393fd6571f7d394cc583e65b5c7ffe", - "4086d17a2d0d9dc365388c91ba2176de7acc5c152c1a8d04e14edc6edaebd772", - }, - } - - baseMultTests := []struct { - curve elliptic.Curve - k string - Qx string - Qy string - }{ - { - secp256k1, - "6e37a39c31a05181bf77919ace790efd0bdbcaf42b5a52871fc112fceb918c95", - "36f292f6c287b6e72ca8128465647c7f88730f84ab27a1e934dbd2da753930fa", - "39a09ddcf3d28fb30cc683de3fc725e095ec865c3d41aef6065044cb12b1ff61", - }, - { - p256, - "6e37a39c31a05181bf77919ace790efd0bdbcaf42b5a52871fc112fceb918c95", - "78a80dfe190a6068be8ddf05644c32d2540402ffc682442f6a9eeb96125d8681", - "3789f92cf4afabf719aaba79ecec54b27e33a188f83158f6dd15ecb231b49808", - }, - } - - t.Run("scalar mult check", func(t *testing.T) { - for _, test := range genericMultTests { - Px, _ := new(big.Int).SetString(test.Px, 16) - Py, _ := new(big.Int).SetString(test.Py, 16) - k, _ := new(big.Int).SetString(test.k, 16) - Qx, _ := new(big.Int).SetString(test.Qx, 16) - Qy, _ := new(big.Int).SetString(test.Qy, 16) - Rx, Ry := test.curve.ScalarMult(Px, Py, k.Bytes()) - assert.Equal(t, Rx.Cmp(Qx), 0) - assert.Equal(t, Ry.Cmp(Qy), 0) - } - }) - - t.Run("base scalar mult check", func(t *testing.T) { - for _, test := range baseMultTests { - k, _ := new(big.Int).SetString(test.k, 16) - Qx, _ := new(big.Int).SetString(test.Qx, 16) - Qy, _ := new(big.Int).SetString(test.Qy, 16) - // base mult - Rx, Ry := test.curve.ScalarBaseMult(k.Bytes()) - assert.Equal(t, Rx.Cmp(Qx), 0) - assert.Equal(t, Ry.Cmp(Qy), 0) - // generic mult with base point - Px := new(big.Int).Set(test.curve.Params().Gx) - Py := new(big.Int).Set(test.curve.Params().Gy) - Rx, Ry = test.curve.ScalarMult(Px, Py, k.Bytes()) - assert.Equal(t, Rx.Cmp(Qx), 0) - assert.Equal(t, Ry.Cmp(Qy), 0) - } - }) -} - -func TestSignatureFormatCheck(t *testing.T) { - - for _, curve := range ecdsaCurves { - t.Run("valid signature", func(t *testing.T) { - len := ecdsaSigLen[curve] - sig := Signature(make([]byte, len)) - _, err := crand.Read(sig) - require.NoError(t, err) - sig[len/2] = 0 // force s to be less than the curve order - sig[len-1] |= 1 // force s to be non zero - sig[0] = 0 // force r to be less than the curve order - sig[len/2-1] |= 1 // force r to be non zero - valid, err := SignatureFormatCheck(curve, sig) - assert.Nil(t, err) - assert.True(t, valid) - }) - - t.Run("invalid length", func(t *testing.T) { - len := ecdsaSigLen[curve] - shortSig := Signature(make([]byte, len/2)) - valid, err := SignatureFormatCheck(curve, shortSig) - assert.Nil(t, err) - assert.False(t, valid) - - longSig := Signature(make([]byte, len*2)) - valid, err = SignatureFormatCheck(curve, longSig) - assert.Nil(t, err) - assert.False(t, valid) - }) - - t.Run("zero values", func(t *testing.T) { - // signature with a zero s - len := ecdsaSigLen[curve] - sig0s := Signature(make([]byte, len)) - _, err := crand.Read(sig0s[:len/2]) - require.NoError(t, err) - - valid, err := SignatureFormatCheck(curve, sig0s) - assert.Nil(t, err) - assert.False(t, valid) - - // signature with a zero r - sig0r := Signature(make([]byte, len)) - _, err = crand.Read(sig0r[len/2:]) - require.NoError(t, err) - - valid, err = SignatureFormatCheck(curve, sig0r) - assert.Nil(t, err) - assert.False(t, valid) - }) - - t.Run("large values", func(t *testing.T) { - len := ecdsaSigLen[curve] - sigLargeS := Signature(make([]byte, len)) - _, err := crand.Read(sigLargeS[:len/2]) - require.NoError(t, err) - // make sure s is larger than the curve order - for i := len / 2; i < len; i++ { - sigLargeS[i] = 0xFF - } - - valid, err := SignatureFormatCheck(curve, sigLargeS) - assert.Nil(t, err) - assert.False(t, valid) - - sigLargeR := Signature(make([]byte, len)) - _, err = crand.Read(sigLargeR[len/2:]) - require.NoError(t, err) - // make sure s is larger than the curve order - for i := 0; i < len/2; i++ { - sigLargeR[i] = 0xFF - } - - valid, err = SignatureFormatCheck(curve, sigLargeR) - assert.Nil(t, err) - assert.False(t, valid) - }) - } -} - -func TestEllipticUnmarshalSecp256k1(t *testing.T) { - - testVectors := []string{ - "028b10bf56476bf7da39a3286e29df389177a2fa0fca2d73348ff78887515d8da1", // IsOnCurve for elliptic returns false - "03d39427f07f680d202fe8504306eb29041aceaf4b628c2c69b0ec248155443166", // odd, IsOnCurve for elliptic returns false - "0267d1942a6cbe4daec242ea7e01c6cdb82dadb6e7077092deb55c845bf851433e", // arith of sqrt in elliptic doesn't match secp256k1 - "0345d45eda6d087918b041453a96303b78c478dce89a4ae9b3c933a018888c5e06", // odd, arith of sqrt in elliptic doesn't match secp256k1 - } - - for _, testVector := range testVectors { - - // get the compressed bytes - publicBytes, err := hex.DecodeString(testVector) - require.NoError(t, err) - - // decompress, check that those are perfectly valid Secp256k1 public keys - retrieved, err := DecodePublicKeyCompressed(ECDSASecp256k1, publicBytes) - require.NoError(t, err) - - // check the compression is canonical by re-compressing to the same bytes - require.Equal(t, retrieved.EncodeCompressed(), publicBytes) - - // check that elliptic fails at decompressing them - x, y := elliptic.UnmarshalCompressed(btcec.S256(), publicBytes) - require.Nil(t, x) - require.Nil(t, y) - } -} - -func BenchmarkECDSADecode(b *testing.B) { - // random message - seed := make([]byte, 50) - _, _ = crand.Read(seed) - - for _, curve := range []SigningAlgorithm{ECDSASecp256k1, ECDSAP256} { - sk, _ := GeneratePrivateKey(curve, seed) - comp := sk.PublicKey().EncodeCompressed() - uncomp := sk.PublicKey().Encode() - - b.Run("compressed point on "+curve.String(), func(b *testing.B) { - b.ResetTimer() - for i := 0; i < b.N; i++ { - _, err := DecodePublicKeyCompressed(curve, comp) - require.NoError(b, err) - } - b.StopTimer() - }) - - b.Run("uncompressed point on "+curve.String(), func(b *testing.B) { - b.ResetTimer() - for i := 0; i < b.N; i++ { - _, err := DecodePublicKey(curve, uncomp) - require.NoError(b, err) - } - b.StopTimer() - }) - } -} diff --git a/crypto/empty.go b/crypto/empty.go new file mode 100644 index 00000000000..5871506ee7e --- /dev/null +++ b/crypto/empty.go @@ -0,0 +1 @@ +package crypto diff --git a/crypto/go.mod b/crypto/go.mod index c7fe54f9ff5..d1ab85ff01a 100644 --- a/crypto/go.mod +++ b/crypto/go.mod @@ -1,24 +1,4 @@ +// Deprecated: The latest supported version is v0.25.0. The module then migrated to github.com/onflow/crypto. Use the new module github.com/onflow/crypto instead. module github.com/onflow/flow-go/crypto -go 1.19 - -require ( - github.com/btcsuite/btcd/btcec/v2 v2.2.1 - github.com/sirupsen/logrus v1.4.2 - github.com/stretchr/testify v1.8.0 - github.com/supranational/blst v0.3.10 - golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d - gonum.org/v1/gonum v0.6.1 - pgregory.net/rapid v0.4.7 -) - -require ( - github.com/davecgh/go-spew v1.1.1 // indirect - github.com/decred/dcrd/dcrec/secp256k1/v4 v4.0.1 // indirect - github.com/konsorten/go-windows-terminal-sequences v1.0.1 // indirect - github.com/kr/pretty v0.1.0 // indirect - github.com/pmezard/go-difflib v1.0.0 // indirect - golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1 // indirect - gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 // indirect - gopkg.in/yaml.v3 v3.0.1 // indirect -) +go 1.25 diff --git a/crypto/go.sum b/crypto/go.sum index 19a05d05d6d..e69de29bb2d 100644 --- a/crypto/go.sum +++ b/crypto/go.sum @@ -1,59 +0,0 @@ -github.com/ajstarks/svgo v0.0.0-20180226025133-644b8db467af/go.mod h1:K08gAheRH3/J6wwsYMMT4xOr94bZjxIelGM0+d/wbFw= -github.com/btcsuite/btcd/btcec/v2 v2.2.1 h1:xP60mv8fvp+0khmrN0zTdPC3cNm24rfeE6lh2R/Yv3E= -github.com/btcsuite/btcd/btcec/v2 v2.2.1/go.mod h1:9/CSmJxmuvqzX9Wh2fXMWToLOHhPd11lSPuIupwTkI8= -github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= -github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/decred/dcrd/crypto/blake256 v1.0.0/go.mod h1:sQl2p6Y26YV+ZOcSTP6thNdn47hh8kt6rqSlvmrXFAc= -github.com/decred/dcrd/dcrec/secp256k1/v4 v4.0.1 h1:YLtO71vCjJRCBcrPMtQ9nqBsqpA1m5sE92cU+pd5Mcc= -github.com/decred/dcrd/dcrec/secp256k1/v4 v4.0.1/go.mod h1:hyedUtir6IdtD/7lIxGeCxkaw7y45JueMRL4DIyJDKs= -github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= -github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k= -github.com/jung-kurt/gofpdf v1.0.3-0.20190309125859-24315acbbda5/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes= -github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk= -github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= -github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= -github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= -github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= -github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/sirupsen/logrus v1.4.2 h1:SPIRibHv4MatM3XXNO2BJeFLZwZ2LvZgfQ5+UNI2im4= -github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= -github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= -github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= -github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.8.0 h1:pSgiaMZlXftHpm5L7V1+rVB+AZJydKsMxsQBIJw4PKk= -github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= -github.com/supranational/blst v0.3.10 h1:CMciDZ/h4pXDDXQASe8ZGTNKUiVNxVVA5hpci2Uuhuk= -github.com/supranational/blst v0.3.10/go.mod h1:jZJtfjgudtNl4en1tzwPIV3KjUnQUvG3/j+w+fVonLw= -golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d h1:sK3txAijHtOK88l68nt020reeT1ZdKLIYetKl95FzVY= -golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20190125153040-c74c464bbbf2 h1:y102fOLFqhV41b+4GPiJoa0k/x+pJcEi2/HB1Y5T6fU= -golang.org/x/exp v0.0.0-20190125153040-c74c464bbbf2/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/image v0.0.0-20180708004352-c73c2afc3b81/go.mod h1:ux5Hcp/YLpHSI86hEcLt0YII63i6oz57MZXIpbrjZUs= -golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1 h1:SrN+KX8Art/Sf4HNj6Zcz06G7VEz+7w9tdXTPOZ7+l4= -golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190206041539-40960b6deb8e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -gonum.org/v1/gonum v0.0.0-20180816165407-929014505bf4/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo= -gonum.org/v1/gonum v0.6.1 h1:/LSrTrgZtpbXyAR6+0e152SROCkJJSh7goYWVmdPFGc= -gonum.org/v1/gonum v0.6.1/go.mod h1:9mxDZsDKxgMAuccQkewq682L+0eCu4dCN2yonUJTCLU= -gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0 h1:OE9mWmgKkjJyEmDAAtGMPjXu+YNeGvK9VTSHY6+Qihc= -gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw= -gonum.org/v1/plot v0.0.0-20190515093506-e2840ee46a6b/go.mod h1:Wt8AAjI+ypCyYX3nZBvf6cAIx93T+c/OS2HFAYskSZc= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo= -gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= -gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -pgregory.net/rapid v0.4.7 h1:MTNRktPuv5FNqOO151TM9mDTa+XHcX6ypYeISDVD14g= -pgregory.net/rapid v0.4.7/go.mod h1:UYpPVyjFHzYBGHIxLFoupi8vwk6rXNzRY9OMvVxFIOU= -rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4= diff --git a/crypto/hash/empty.go b/crypto/hash/empty.go new file mode 100644 index 00000000000..7adc22fef06 --- /dev/null +++ b/crypto/hash/empty.go @@ -0,0 +1 @@ +package hash diff --git a/crypto/hash/hash.go b/crypto/hash/hash.go deleted file mode 100644 index 31f9fd08c7a..00000000000 --- a/crypto/hash/hash.go +++ /dev/null @@ -1,45 +0,0 @@ -package hash - -import ( - "bytes" - "fmt" - "io" -) - -// Hash is the hash algorithms output types -type Hash []byte - -// Equal checks if a hash is equal to a given hash -func (h Hash) Equal(input Hash) bool { - return bytes.Equal(h, input) -} - -// Hex returns the hex string representation of the hash. -func (h Hash) Hex() string { - return fmt.Sprintf("%#x", []byte(h)) -} - -// String returns the hex string representation of the hash. -func (h Hash) String() string { - return h.Hex() -} - -// Hasher interface -type Hasher interface { - // Algorithm returns the hashing algorithm of the hasher. - Algorithm() HashingAlgorithm - // Size returns the hash output length in bytes. - Size() int - // ComputeHash returns the hash output regardless of the existing hash state. - // It may update the state or not depending on the implementation. Thread safety - // also depends on the implementation. - ComputeHash([]byte) Hash - // Write([]bytes) (using the io.Writer interface) adds more bytes to the - // current hash state. - io.Writer - // SumHash returns the hash output. - // It may update the state or not depending on the implementation. - SumHash() Hash - // Reset resets the hash state. - Reset() -} diff --git a/crypto/hash/hash_test.go b/crypto/hash/hash_test.go deleted file mode 100644 index 21c14134fde..00000000000 --- a/crypto/hash/hash_test.go +++ /dev/null @@ -1,335 +0,0 @@ -package hash - -import ( - "crypto/rand" - "crypto/sha256" - "crypto/sha512" - "encoding/hex" - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - "golang.org/x/crypto/sha3" -) - -// Sanity check of SHA3_256 -func TestSanitySHA3_256(t *testing.T) { - input := []byte("test") - expected, _ := hex.DecodeString("36f028580bb02cc8272a9a020f4200e346e276ae664e45ee80745574e2f5ab80") - - alg := NewSHA3_256() - hash := alg.ComputeHash(input) - assert.Equal(t, Hash(expected), hash) -} - -// Sanity check of SHA3_384 -func TestSanitySHA3_384(t *testing.T) { - input := []byte("test") - expected, _ := hex.DecodeString("e516dabb23b6e30026863543282780a3ae0dccf05551cf0295178d7ff0f1b41eecb9db3ff219007c4e097260d58621bd") - - alg := NewSHA3_384() - hash := alg.ComputeHash(input) - assert.Equal(t, Hash(expected), hash) -} - -// Sanity check of SHA2_256 -func TestSanitySHA2_256(t *testing.T) { - input := []byte("test") - expected, _ := hex.DecodeString("9f86d081884c7d659a2feaa0c55ad015a3bf4f1b2b0b822cd15d6c15b0f00a08") - - alg := NewSHA2_256() - hash := alg.ComputeHash(input) - assert.Equal(t, Hash(expected), hash) -} - -// Sanity check of SHA2_384 -func TestSanitySHA2_384(t *testing.T) { - input := []byte("test") - expected, _ := hex.DecodeString("768412320f7b0aa5812fce428dc4706b3cae50e02a64caa16a782249bfe8efc4b7ef1ccb126255d196047dfedf17a0a9") - - alg := NewSHA2_384() - hash := alg.ComputeHash(input) - assert.Equal(t, Hash(expected), hash) -} - -// Sanity check of Keccak_256 -func TestSanityKeccak_256(t *testing.T) { - input := []byte("test") - expected, _ := hex.DecodeString("9c22ff5f21f0b81b113e63f7db6da94fedef11b2119b4088b89664fb9a3cb658") - - alg := NewKeccak_256() - hash := alg.ComputeHash(input) - assert.Equal(t, Hash(expected), hash) -} - -// Sanity checks of KMAC128 -// the test vector is taken from the NIST document -// https://csrc.nist.gov/CSRC/media/Projects/Cryptographic-Standards-and-Guidelines/documents/examples/Kmac_samples.pdf -func TestSanityKmac128(t *testing.T) { - - input := []byte{0x00, 0x01, 0x02, 0x03} - expected := []Hash{ - {0xE5, 0x78, 0x0B, 0x0D, 0x3E, 0xA6, 0xF7, 0xD3, 0xA4, 0x29, 0xC5, 0x70, 0x6A, 0xA4, 0x3A, 0x00, - 0xFA, 0xDB, 0xD7, 0xD4, 0x96, 0x28, 0x83, 0x9E, 0x31, 0x87, 0x24, 0x3F, 0x45, 0x6E, 0xE1, 0x4E}, - {0x3B, 0x1F, 0xBA, 0x96, 0x3C, 0xD8, 0xB0, 0xB5, 0x9E, 0x8C, 0x1A, 0x6D, 0x71, 0x88, 0x8B, 0x71, - 0x43, 0x65, 0x1A, 0xF8, 0xBA, 0x0A, 0x70, 0x70, 0xC0, 0x97, 0x9E, 0x28, 0x11, 0x32, 0x4A, 0xA5}, - } - key := []byte{0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4A, 0x4B, 0x4C, 0x4D, 0x4E, 0x4F, - 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59, 0x5A, 0x5B, 0x5C, 0x5D, 0x5E, 0x5F} - customizers := [][]byte{ - []byte(""), - []byte("My Tagged Application"), - } - outputSize := 32 - - alg, err := NewKMAC_128(key, customizers[0], outputSize) - require.Nil(t, err) - _, _ = alg.Write(input[0:2]) - _, _ = alg.Write(input[2:]) - hash := alg.SumHash() - assert.Equal(t, expected[0], hash) - - for i := 0; i < len(customizers); i++ { - alg, err = NewKMAC_128(key, customizers[i], outputSize) - require.Nil(t, err) - hash = alg.ComputeHash(input) - assert.Equal(t, expected[i], hash) - } - - // test short key length - _, err = NewKMAC_128(key[:15], customizers[0], outputSize) - assert.Error(t, err) -} - -// TestHashersAPI tests the expected definition of the hashers APIs -func TestHashersAPI(t *testing.T) { - - newKmac128 := func() Hasher { - kmac, err := NewKMAC_128([]byte("test_key________"), []byte("test_custommizer"), 32) - if err != nil { - panic("new kmac hasher failed") - } - return kmac - } - - newHasherFunctions := [](func() Hasher){ - NewSHA2_256, - NewSHA2_384, - NewSHA3_256, - NewSHA3_384, - newKmac128, - NewKeccak_256, - } - - data := make([]byte, 1801) - _, err := rand.Read(data) - require.NoError(t, err) - - for _, newFunction := range newHasherFunctions { - // Reset should empty the state - h := newFunction() - expectedEmptyHash := h.SumHash() - _, _ = h.Write(data) - h.Reset() - emptyHash := h.SumHash() - assert.Equal(t, expectedEmptyHash, emptyHash) - - // SumHash on an empty state is equal to compute hash with empty data - emptyHash = h.ComputeHash(nil) - assert.Equal(t, expectedEmptyHash, emptyHash) - - // successive writes of data are equivalent to compute hash - // of the concatenated data - h = newFunction() - hash1 := h.ComputeHash(data) - - h.Reset() - _, _ = h.Write(data[:355]) - _, _ = h.Write(data[355:902]) - _, _ = h.Write(data[902:]) - hash2 := h.SumHash() - assert.Equal(t, hash1, hash2) - - // ComputeHash output does not depend on the hasher state - h = newFunction() - - _, _ = h.Write([]byte("dummy data")) - hash1 = h.ComputeHash(data) - assert.Equal(t, hash1, hash2) - } -} - -// TestSHA2 is a specific test of SHA2-256 and SHA2-384. -// It compares the hashes of random data of different lengths to -// the output of standard Go sha2. -func TestSHA2(t *testing.T) { - - t.Run("SHA2_256", func(t *testing.T) { - for i := 0; i < 5000; i++ { - value := make([]byte, i) - _, err := rand.Read(value) - require.NoError(t, err) - expected := sha256.Sum256(value) - - // test hash computation using the hasher - hasher := NewSHA2_256() - h := hasher.ComputeHash(value) - assert.Equal(t, expected[:], []byte(h)) - - // test hash computation using the light api - var res [HashLenSHA2_256]byte - ComputeSHA2_256(&res, value) - assert.Equal(t, expected[:], res[:]) - } - }) - - t.Run("SHA2_384", func(t *testing.T) { - for i := 0; i < 5000; i++ { - value := make([]byte, i) - _, err := rand.Read(value) - require.NoError(t, err) - expected := sha512.Sum384(value) - - hasher := NewSHA2_384() - h := hasher.ComputeHash(value) - assert.Equal(t, expected[:], []byte(h)) - } - }) -} - -// TestSHA3 is a specific test of SHA3-256 and SHA3-384. -// It compares the hashes of random data of different lengths to -// the output of standard Go sha3. -func TestSHA3(t *testing.T) { - t.Run("SHA3_256", func(t *testing.T) { - for i := 0; i < 5000; i++ { - value := make([]byte, i) - _, err := rand.Read(value) - require.NoError(t, err) - expected := sha3.Sum256(value) - - // test hash computation using the hasher - hasher := NewSHA3_256() - h := hasher.ComputeHash(value) - assert.Equal(t, expected[:], []byte(h)) - - // test hash computation using the light api - var res [HashLenSHA3_256]byte - ComputeSHA3_256(&res, value) - assert.Equal(t, expected[:], res[:]) - } - }) - - t.Run("SHA3_384", func(t *testing.T) { - for i := 0; i < 5000; i++ { - value := make([]byte, i) - _, err := rand.Read(value) - require.NoError(t, err) - expected := sha3.Sum384(value) - - hasher := NewSHA3_384() - h := hasher.ComputeHash(value) - assert.Equal(t, expected[:], []byte(h)) - } - }) -} - -// TestKeccak is a specific test of Keccak-256. -// It compares the hashes of random data of different lengths to -// the output of Go LegacyKeccak. -func TestKeccak(t *testing.T) { - for i := 0; i < 5000; i++ { - value := make([]byte, i) - _, err := rand.Read(value) - require.NoError(t, err) - k := sha3.NewLegacyKeccak256() - k.Write(value) - expected := k.Sum(nil) - - // test hash computation using the hasher - hasher := NewKeccak_256() - h := hasher.ComputeHash(value) - assert.Equal(t, expected[:], []byte(h)) - } -} - -// Benchmark of all hashers' ComputeHash function -func BenchmarkComputeHash(b *testing.B) { - - m := make([]byte, 32) - _, err := rand.Read(m) - require.NoError(b, err) - - b.Run("SHA2_256", func(b *testing.B) { - b.ResetTimer() - for i := 0; i < b.N; i++ { - alg := NewSHA2_256() - _ = alg.ComputeHash(m) - } - b.StopTimer() - }) - - b.Run("SHA2_256_light", func(b *testing.B) { - var h [HashLenSHA2_256]byte - b.ResetTimer() - for i := 0; i < b.N; i++ { - ComputeSHA2_256(&h, m) - } - b.StopTimer() - }) - - b.Run("SHA2_384", func(b *testing.B) { - b.ResetTimer() - for i := 0; i < b.N; i++ { - alg := NewSHA2_384() - _ = alg.ComputeHash(m) - } - b.StopTimer() - }) - - b.Run("SHA3_256", func(b *testing.B) { - b.ResetTimer() - for i := 0; i < b.N; i++ { - alg := NewSHA3_256() - alg.ComputeHash(m) - } - b.StopTimer() - }) - - b.Run("SHA3_256_light", func(b *testing.B) { - var h [HashLenSHA3_256]byte - b.ResetTimer() - for i := 0; i < b.N; i++ { - ComputeSHA3_256(&h, m) - } - b.StopTimer() - }) - - b.Run("SHA3_384", func(b *testing.B) { - b.ResetTimer() - for i := 0; i < b.N; i++ { - alg := NewSHA3_384() - _ = alg.ComputeHash(m) - } - b.StopTimer() - }) - - b.Run("Keccak_256", func(b *testing.B) { - b.ResetTimer() - for i := 0; i < b.N; i++ { - alg := NewKeccak_256() - alg.ComputeHash(m) - } - b.StopTimer() - }) - - // KMAC128 with 128 bytes output - b.Run("KMAC128_128", func(b *testing.B) { - alg, _ := NewKMAC_128([]byte("bench_key________"), []byte("bench_custommizer"), 128) - b.ResetTimer() - for i := 0; i < b.N; i++ { - _ = alg.ComputeHash(m) - } - b.StopTimer() - }) -} diff --git a/crypto/hash/keccak.go b/crypto/hash/keccak.go deleted file mode 100644 index 6c515c02d83..00000000000 --- a/crypto/hash/keccak.go +++ /dev/null @@ -1,204 +0,0 @@ -package hash - -// Size returns the output size of the hash function in bytes. -func (d *spongeState) Size() int { - return d.outputLen -} - -// Algorithm returns the hashing algorithm of the instance. -func (s *spongeState) Algorithm() HashingAlgorithm { - return s.algo -} - -// ComputeHash calculates and returns the digest of the input. -// It updates the state (and therefore not thread-safe) and doesn't allow -// further writing without calling Reset(). -func (s *spongeState) ComputeHash(data []byte) Hash { - s.Reset() - s.write(data) - return s.sum() -} - -// SumHash returns the digest of the data written to the state. -// It updates the state and doesn't allow further writing without -// calling Reset(). -func (s *spongeState) SumHash() Hash { - return s.sum() -} - -// Write absorbs more data into the hash's state. -// It returns the number of bytes written and never errors. -func (d *spongeState) Write(p []byte) (int, error) { - d.write(p) - return len(p), nil -} - -// The functions below were copied and modified from golang.org/x/crypto/sha3. -// -// Copyright (c) 2009 The Go Authors. All rights reserved. - -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: - -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. - -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -type spongeState struct { - // the hashing algorithm name - algo HashingAlgorithm - - a [25]uint64 // main state of the hash - storage storageBuf // constant size array - // `buf` is a sub-slice that points into `storage` using `bufIndex` and `bufSize`: - // - `bufIndex` is the index of the first element of buf - // - `bufSize` is the size of buf - bufIndex int - bufSize int - rate int // the number of bytes of state to use - // dsbyte contains the domain separation bits (if any are defined) - // and the first bit of the 10*1 padding. - // Using a little-endian bit-ordering convention, it is 0b01 for SHA-3 - // and not defined for legacy Keccak. - // The padding 10*1 is applied to pad the message to a multiple - // of the rate, which involves adding a "1" bit, zero or more "0" bits, and - // a final "1" bit. We merge the first "1" bit from the padding into dsbyte, - // ( giving 0b00000110 for SHA-3 and 0b00000001 for legacy Keccak) - // [1] https://keccak.team/sponge_duplex.html - // "The sponge and duplex constructions" - dsByte byte // the domain separation byte with one bit padding - outputLen int // the default output size in bytes -} - -const ( - // maxRate is the maximum size of the internal buffer. SHA3-256 - // currently needs the largest buffer among supported sponge-based - // algorithms. - maxRate = rateSHA3_256 - - // initialization value of the buffer index - bufNilValue = -1 -) - -// returns the current buf -func (d *spongeState) buf() []byte { - return d.storage.asBytes()[d.bufIndex : d.bufIndex+d.bufSize] -} - -// setBuf assigns `buf` (sub-slice of `storage`) to a sub-slice of `storage` -// defined by a starting index and size. -func (d *spongeState) setBuf(start, size int) { - d.bufIndex = start - d.bufSize = size -} - -// checks if `buf` is nil (not yet set) -func (d *spongeState) bufIsNil() bool { - return d.bufSize == bufNilValue -} - -// appendBuf appends a slice to `buf` (sub-slice of `storage`) -// The function assumes the appended buffer still fits into `storage`. -func (d *spongeState) appendBuf(slice []byte) { - copy(d.storage.asBytes()[d.bufIndex+d.bufSize:], slice) - d.bufSize += len(slice) -} - -// Reset clears the internal state. -func (d *spongeState) Reset() { - // Zero the permutation's state. - for i := range d.a { - d.a[i] = 0 - } - d.setBuf(0, 0) -} - -// permute applies the KeccakF-1600 permutation. -func (d *spongeState) permute() { - // xor the input into the state before applying the permutation. - xorIn(d, d.buf()) - d.setBuf(0, 0) - keccakF1600(&d.a) -} - -func (d *spongeState) write(p []byte) { - if d.bufIsNil() { - d.setBuf(0, 0) - } - - for len(p) > 0 { - if d.bufSize == 0 && len(p) >= d.rate { - // The fast path; absorb a full "rate" bytes of input and apply the permutation. - xorIn(d, p[:d.rate]) - p = p[d.rate:] - keccakF1600(&d.a) - } else { - // The slow path; buffer the input until we can fill the sponge, and then xor it in. - todo := d.rate - d.bufSize - if todo > len(p) { - todo = len(p) - } - d.appendBuf(p[:todo]) - p = p[todo:] - - // If the sponge is full, apply the permutation. - if d.bufSize == d.rate { - d.permute() - } - } - } -} - -// pads appends the domain separation bits in dsbyte, applies -// the multi-bitrate 10..1 padding rule, and permutes the state. -func (d *spongeState) padAndPermute() { - if d.bufIsNil() { - d.setBuf(0, 0) - } - // Pad with this instance with dsbyte. We know that there's - // at least one byte of space in d.buf because, if it were full, - // permute would have been called to empty it. dsbyte also contains the - // first one bit for the padding. See the comment in the state struct. - d.appendBuf([]byte{d.dsByte}) - zerosStart := d.bufSize - d.setBuf(0, d.rate) - buf := d.buf() - for i := zerosStart; i < d.rate; i++ { - buf[i] = 0 - } - // This adds the final one bit for the padding. Because of the way that - // bits are numbered from the LSB upwards, the final bit is the MSB of - // the last byte. - buf[d.rate-1] ^= 0x80 - // Apply the permutation - d.permute() - d.setBuf(0, d.rate) -} - -// Sum applies padding to the hash state and then squeezes out the desired -// number of output bytes. -func (d *spongeState) sum() []byte { - hash := make([]byte, d.outputLen) - d.padAndPermute() - copyOut(hash, d) - return hash -} diff --git a/crypto/hash/keccak.s b/crypto/hash/keccak.s deleted file mode 100644 index 01e35bb9a5c..00000000000 --- a/crypto/hash/keccak.s +++ /dev/null @@ -1,419 +0,0 @@ -// The functions below were copied from golang.org/x/crypto/sha3. -// -// Copyright (c) 2009 The Go Authors. All rights reserved. - -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: - -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. - -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - -// +build amd64,!purego,gc - -// This code was translated into a form compatible with 6a from the public -// domain sources at https://github.com/gvanas/KeccakCodePackage - -// Offsets in state - -#define _ba (0*8) -#define _be (1*8) -#define _bi (2*8) -#define _bo (3*8) -#define _bu (4*8) -#define _ga (5*8) -#define _ge (6*8) -#define _gi (7*8) -#define _go (8*8) -#define _gu (9*8) -#define _ka (10*8) -#define _ke (11*8) -#define _ki (12*8) -#define _ko (13*8) -#define _ku (14*8) -#define _ma (15*8) -#define _me (16*8) -#define _mi (17*8) -#define _mo (18*8) -#define _mu (19*8) -#define _sa (20*8) -#define _se (21*8) -#define _si (22*8) -#define _so (23*8) -#define _su (24*8) - -// Temporary registers -#define rT1 AX - -// Round vars -#define rpState DI -#define rpStack SP - -#define rDa BX -#define rDe CX -#define rDi DX -#define rDo R8 -#define rDu R9 - -#define rBa R10 -#define rBe R11 -#define rBi R12 -#define rBo R13 -#define rBu R14 - -#define rCa SI -#define rCe BP -#define rCi rBi -#define rCo rBo -#define rCu R15 - -#define MOVQ_RBI_RCE MOVQ rBi, rCe -#define XORQ_RT1_RCA XORQ rT1, rCa -#define XORQ_RT1_RCE XORQ rT1, rCe -#define XORQ_RBA_RCU XORQ rBa, rCu -#define XORQ_RBE_RCU XORQ rBe, rCu -#define XORQ_RDU_RCU XORQ rDu, rCu -#define XORQ_RDA_RCA XORQ rDa, rCa -#define XORQ_RDE_RCE XORQ rDe, rCe - -#define mKeccakRound(iState, oState, rc, B_RBI_RCE, G_RT1_RCA, G_RT1_RCE, G_RBA_RCU, K_RT1_RCA, K_RT1_RCE, K_RBA_RCU, M_RT1_RCA, M_RT1_RCE, M_RBE_RCU, S_RDU_RCU, S_RDA_RCA, S_RDE_RCE) \ - /* Prepare round */ \ - MOVQ rCe, rDa; \ - ROLQ $1, rDa; \ - \ - MOVQ _bi(iState), rCi; \ - XORQ _gi(iState), rDi; \ - XORQ rCu, rDa; \ - XORQ _ki(iState), rCi; \ - XORQ _mi(iState), rDi; \ - XORQ rDi, rCi; \ - \ - MOVQ rCi, rDe; \ - ROLQ $1, rDe; \ - \ - MOVQ _bo(iState), rCo; \ - XORQ _go(iState), rDo; \ - XORQ rCa, rDe; \ - XORQ _ko(iState), rCo; \ - XORQ _mo(iState), rDo; \ - XORQ rDo, rCo; \ - \ - MOVQ rCo, rDi; \ - ROLQ $1, rDi; \ - \ - MOVQ rCu, rDo; \ - XORQ rCe, rDi; \ - ROLQ $1, rDo; \ - \ - MOVQ rCa, rDu; \ - XORQ rCi, rDo; \ - ROLQ $1, rDu; \ - \ - /* Result b */ \ - MOVQ _ba(iState), rBa; \ - MOVQ _ge(iState), rBe; \ - XORQ rCo, rDu; \ - MOVQ _ki(iState), rBi; \ - MOVQ _mo(iState), rBo; \ - MOVQ _su(iState), rBu; \ - XORQ rDe, rBe; \ - ROLQ $44, rBe; \ - XORQ rDi, rBi; \ - XORQ rDa, rBa; \ - ROLQ $43, rBi; \ - \ - MOVQ rBe, rCa; \ - MOVQ rc, rT1; \ - ORQ rBi, rCa; \ - XORQ rBa, rT1; \ - XORQ rT1, rCa; \ - MOVQ rCa, _ba(oState); \ - \ - XORQ rDu, rBu; \ - ROLQ $14, rBu; \ - MOVQ rBa, rCu; \ - ANDQ rBe, rCu; \ - XORQ rBu, rCu; \ - MOVQ rCu, _bu(oState); \ - \ - XORQ rDo, rBo; \ - ROLQ $21, rBo; \ - MOVQ rBo, rT1; \ - ANDQ rBu, rT1; \ - XORQ rBi, rT1; \ - MOVQ rT1, _bi(oState); \ - \ - NOTQ rBi; \ - ORQ rBa, rBu; \ - ORQ rBo, rBi; \ - XORQ rBo, rBu; \ - XORQ rBe, rBi; \ - MOVQ rBu, _bo(oState); \ - MOVQ rBi, _be(oState); \ - B_RBI_RCE; \ - \ - /* Result g */ \ - MOVQ _gu(iState), rBe; \ - XORQ rDu, rBe; \ - MOVQ _ka(iState), rBi; \ - ROLQ $20, rBe; \ - XORQ rDa, rBi; \ - ROLQ $3, rBi; \ - MOVQ _bo(iState), rBa; \ - MOVQ rBe, rT1; \ - ORQ rBi, rT1; \ - XORQ rDo, rBa; \ - MOVQ _me(iState), rBo; \ - MOVQ _si(iState), rBu; \ - ROLQ $28, rBa; \ - XORQ rBa, rT1; \ - MOVQ rT1, _ga(oState); \ - G_RT1_RCA; \ - \ - XORQ rDe, rBo; \ - ROLQ $45, rBo; \ - MOVQ rBi, rT1; \ - ANDQ rBo, rT1; \ - XORQ rBe, rT1; \ - MOVQ rT1, _ge(oState); \ - G_RT1_RCE; \ - \ - XORQ rDi, rBu; \ - ROLQ $61, rBu; \ - MOVQ rBu, rT1; \ - ORQ rBa, rT1; \ - XORQ rBo, rT1; \ - MOVQ rT1, _go(oState); \ - \ - ANDQ rBe, rBa; \ - XORQ rBu, rBa; \ - MOVQ rBa, _gu(oState); \ - NOTQ rBu; \ - G_RBA_RCU; \ - \ - ORQ rBu, rBo; \ - XORQ rBi, rBo; \ - MOVQ rBo, _gi(oState); \ - \ - /* Result k */ \ - MOVQ _be(iState), rBa; \ - MOVQ _gi(iState), rBe; \ - MOVQ _ko(iState), rBi; \ - MOVQ _mu(iState), rBo; \ - MOVQ _sa(iState), rBu; \ - XORQ rDi, rBe; \ - ROLQ $6, rBe; \ - XORQ rDo, rBi; \ - ROLQ $25, rBi; \ - MOVQ rBe, rT1; \ - ORQ rBi, rT1; \ - XORQ rDe, rBa; \ - ROLQ $1, rBa; \ - XORQ rBa, rT1; \ - MOVQ rT1, _ka(oState); \ - K_RT1_RCA; \ - \ - XORQ rDu, rBo; \ - ROLQ $8, rBo; \ - MOVQ rBi, rT1; \ - ANDQ rBo, rT1; \ - XORQ rBe, rT1; \ - MOVQ rT1, _ke(oState); \ - K_RT1_RCE; \ - \ - XORQ rDa, rBu; \ - ROLQ $18, rBu; \ - NOTQ rBo; \ - MOVQ rBo, rT1; \ - ANDQ rBu, rT1; \ - XORQ rBi, rT1; \ - MOVQ rT1, _ki(oState); \ - \ - MOVQ rBu, rT1; \ - ORQ rBa, rT1; \ - XORQ rBo, rT1; \ - MOVQ rT1, _ko(oState); \ - \ - ANDQ rBe, rBa; \ - XORQ rBu, rBa; \ - MOVQ rBa, _ku(oState); \ - K_RBA_RCU; \ - \ - /* Result m */ \ - MOVQ _ga(iState), rBe; \ - XORQ rDa, rBe; \ - MOVQ _ke(iState), rBi; \ - ROLQ $36, rBe; \ - XORQ rDe, rBi; \ - MOVQ _bu(iState), rBa; \ - ROLQ $10, rBi; \ - MOVQ rBe, rT1; \ - MOVQ _mi(iState), rBo; \ - ANDQ rBi, rT1; \ - XORQ rDu, rBa; \ - MOVQ _so(iState), rBu; \ - ROLQ $27, rBa; \ - XORQ rBa, rT1; \ - MOVQ rT1, _ma(oState); \ - M_RT1_RCA; \ - \ - XORQ rDi, rBo; \ - ROLQ $15, rBo; \ - MOVQ rBi, rT1; \ - ORQ rBo, rT1; \ - XORQ rBe, rT1; \ - MOVQ rT1, _me(oState); \ - M_RT1_RCE; \ - \ - XORQ rDo, rBu; \ - ROLQ $56, rBu; \ - NOTQ rBo; \ - MOVQ rBo, rT1; \ - ORQ rBu, rT1; \ - XORQ rBi, rT1; \ - MOVQ rT1, _mi(oState); \ - \ - ORQ rBa, rBe; \ - XORQ rBu, rBe; \ - MOVQ rBe, _mu(oState); \ - \ - ANDQ rBa, rBu; \ - XORQ rBo, rBu; \ - MOVQ rBu, _mo(oState); \ - M_RBE_RCU; \ - \ - /* Result s */ \ - MOVQ _bi(iState), rBa; \ - MOVQ _go(iState), rBe; \ - MOVQ _ku(iState), rBi; \ - XORQ rDi, rBa; \ - MOVQ _ma(iState), rBo; \ - ROLQ $62, rBa; \ - XORQ rDo, rBe; \ - MOVQ _se(iState), rBu; \ - ROLQ $55, rBe; \ - \ - XORQ rDu, rBi; \ - MOVQ rBa, rDu; \ - XORQ rDe, rBu; \ - ROLQ $2, rBu; \ - ANDQ rBe, rDu; \ - XORQ rBu, rDu; \ - MOVQ rDu, _su(oState); \ - \ - ROLQ $39, rBi; \ - S_RDU_RCU; \ - NOTQ rBe; \ - XORQ rDa, rBo; \ - MOVQ rBe, rDa; \ - ANDQ rBi, rDa; \ - XORQ rBa, rDa; \ - MOVQ rDa, _sa(oState); \ - S_RDA_RCA; \ - \ - ROLQ $41, rBo; \ - MOVQ rBi, rDe; \ - ORQ rBo, rDe; \ - XORQ rBe, rDe; \ - MOVQ rDe, _se(oState); \ - S_RDE_RCE; \ - \ - MOVQ rBo, rDi; \ - MOVQ rBu, rDo; \ - ANDQ rBu, rDi; \ - ORQ rBa, rDo; \ - XORQ rBi, rDi; \ - XORQ rBo, rDo; \ - MOVQ rDi, _si(oState); \ - MOVQ rDo, _so(oState) \ - -// func keccakF1600(state *[25]uint64) -TEXT ·keccakF1600(SB), 0, $200-8 - MOVQ state+0(FP), rpState - - // Convert the user state into an internal state - NOTQ _be(rpState) - NOTQ _bi(rpState) - NOTQ _go(rpState) - NOTQ _ki(rpState) - NOTQ _mi(rpState) - NOTQ _sa(rpState) - - // Execute the KeccakF permutation - MOVQ _ba(rpState), rCa - MOVQ _be(rpState), rCe - MOVQ _bu(rpState), rCu - - XORQ _ga(rpState), rCa - XORQ _ge(rpState), rCe - XORQ _gu(rpState), rCu - - XORQ _ka(rpState), rCa - XORQ _ke(rpState), rCe - XORQ _ku(rpState), rCu - - XORQ _ma(rpState), rCa - XORQ _me(rpState), rCe - XORQ _mu(rpState), rCu - - XORQ _sa(rpState), rCa - XORQ _se(rpState), rCe - MOVQ _si(rpState), rDi - MOVQ _so(rpState), rDo - XORQ _su(rpState), rCu - - mKeccakRound(rpState, rpStack, $0x0000000000000001, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE) - mKeccakRound(rpStack, rpState, $0x0000000000008082, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE) - mKeccakRound(rpState, rpStack, $0x800000000000808a, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE) - mKeccakRound(rpStack, rpState, $0x8000000080008000, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE) - mKeccakRound(rpState, rpStack, $0x000000000000808b, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE) - mKeccakRound(rpStack, rpState, $0x0000000080000001, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE) - mKeccakRound(rpState, rpStack, $0x8000000080008081, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE) - mKeccakRound(rpStack, rpState, $0x8000000000008009, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE) - mKeccakRound(rpState, rpStack, $0x000000000000008a, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE) - mKeccakRound(rpStack, rpState, $0x0000000000000088, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE) - mKeccakRound(rpState, rpStack, $0x0000000080008009, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE) - mKeccakRound(rpStack, rpState, $0x000000008000000a, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE) - mKeccakRound(rpState, rpStack, $0x000000008000808b, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE) - mKeccakRound(rpStack, rpState, $0x800000000000008b, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE) - mKeccakRound(rpState, rpStack, $0x8000000000008089, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE) - mKeccakRound(rpStack, rpState, $0x8000000000008003, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE) - mKeccakRound(rpState, rpStack, $0x8000000000008002, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE) - mKeccakRound(rpStack, rpState, $0x8000000000000080, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE) - mKeccakRound(rpState, rpStack, $0x000000000000800a, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE) - mKeccakRound(rpStack, rpState, $0x800000008000000a, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE) - mKeccakRound(rpState, rpStack, $0x8000000080008081, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE) - mKeccakRound(rpStack, rpState, $0x8000000000008080, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE) - mKeccakRound(rpState, rpStack, $0x0000000080000001, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE) - mKeccakRound(rpStack, rpState, $0x8000000080008008, NOP, NOP, NOP, NOP, NOP, NOP, NOP, NOP, NOP, NOP, NOP, NOP, NOP) - - // Revert the internal state to the user state - NOTQ _be(rpState) - NOTQ _bi(rpState) - NOTQ _go(rpState) - NOTQ _ki(rpState) - NOTQ _mi(rpState) - NOTQ _sa(rpState) - - RET - \ No newline at end of file diff --git a/crypto/hash/keccakf.go b/crypto/hash/keccakf.go deleted file mode 100644 index 76d0b9a1a5d..00000000000 --- a/crypto/hash/keccakf.go +++ /dev/null @@ -1,439 +0,0 @@ -// The functions below were copied from golang.org/x/crypto/sha3. -// -// Copyright (c) 2009 The Go Authors. All rights reserved. - -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: - -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. - -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -//go:build !amd64 || purego || !gc -// +build !amd64 purego !gc - -package hash - -// rc stores the round constants for use in the ι step. -var rc = [24]uint64{ - 0x0000000000000001, - 0x0000000000008082, - 0x800000000000808A, - 0x8000000080008000, - 0x000000000000808B, - 0x0000000080000001, - 0x8000000080008081, - 0x8000000000008009, - 0x000000000000008A, - 0x0000000000000088, - 0x0000000080008009, - 0x000000008000000A, - 0x000000008000808B, - 0x800000000000008B, - 0x8000000000008089, - 0x8000000000008003, - 0x8000000000008002, - 0x8000000000000080, - 0x000000000000800A, - 0x800000008000000A, - 0x8000000080008081, - 0x8000000000008080, - 0x0000000080000001, - 0x8000000080008008, -} - -// keccakF1600 applies the Keccak permutation to a 1600b-wide -// state represented as a slice of 25 uint64s. -func keccakF1600(a *[25]uint64) { - // Implementation translated from Keccak-inplace.c - // in the keccak reference code. - var t, bc0, bc1, bc2, bc3, bc4, d0, d1, d2, d3, d4 uint64 - - for i := 0; i < 24; i += 4 { - // Combines the 5 steps in each round into 2 steps. - // Unrolls 4 rounds per loop and spreads some steps across rounds. - - // Round 1 - bc0 = a[0] ^ a[5] ^ a[10] ^ a[15] ^ a[20] - bc1 = a[1] ^ a[6] ^ a[11] ^ a[16] ^ a[21] - bc2 = a[2] ^ a[7] ^ a[12] ^ a[17] ^ a[22] - bc3 = a[3] ^ a[8] ^ a[13] ^ a[18] ^ a[23] - bc4 = a[4] ^ a[9] ^ a[14] ^ a[19] ^ a[24] - d0 = bc4 ^ (bc1<<1 | bc1>>63) - d1 = bc0 ^ (bc2<<1 | bc2>>63) - d2 = bc1 ^ (bc3<<1 | bc3>>63) - d3 = bc2 ^ (bc4<<1 | bc4>>63) - d4 = bc3 ^ (bc0<<1 | bc0>>63) - - bc0 = a[0] ^ d0 - t = a[6] ^ d1 - bc1 = t<<44 | t>>(64-44) - t = a[12] ^ d2 - bc2 = t<<43 | t>>(64-43) - t = a[18] ^ d3 - bc3 = t<<21 | t>>(64-21) - t = a[24] ^ d4 - bc4 = t<<14 | t>>(64-14) - a[0] = bc0 ^ (bc2 &^ bc1) ^ rc[i] - a[6] = bc1 ^ (bc3 &^ bc2) - a[12] = bc2 ^ (bc4 &^ bc3) - a[18] = bc3 ^ (bc0 &^ bc4) - a[24] = bc4 ^ (bc1 &^ bc0) - - t = a[10] ^ d0 - bc2 = t<<3 | t>>(64-3) - t = a[16] ^ d1 - bc3 = t<<45 | t>>(64-45) - t = a[22] ^ d2 - bc4 = t<<61 | t>>(64-61) - t = a[3] ^ d3 - bc0 = t<<28 | t>>(64-28) - t = a[9] ^ d4 - bc1 = t<<20 | t>>(64-20) - a[10] = bc0 ^ (bc2 &^ bc1) - a[16] = bc1 ^ (bc3 &^ bc2) - a[22] = bc2 ^ (bc4 &^ bc3) - a[3] = bc3 ^ (bc0 &^ bc4) - a[9] = bc4 ^ (bc1 &^ bc0) - - t = a[20] ^ d0 - bc4 = t<<18 | t>>(64-18) - t = a[1] ^ d1 - bc0 = t<<1 | t>>(64-1) - t = a[7] ^ d2 - bc1 = t<<6 | t>>(64-6) - t = a[13] ^ d3 - bc2 = t<<25 | t>>(64-25) - t = a[19] ^ d4 - bc3 = t<<8 | t>>(64-8) - a[20] = bc0 ^ (bc2 &^ bc1) - a[1] = bc1 ^ (bc3 &^ bc2) - a[7] = bc2 ^ (bc4 &^ bc3) - a[13] = bc3 ^ (bc0 &^ bc4) - a[19] = bc4 ^ (bc1 &^ bc0) - - t = a[5] ^ d0 - bc1 = t<<36 | t>>(64-36) - t = a[11] ^ d1 - bc2 = t<<10 | t>>(64-10) - t = a[17] ^ d2 - bc3 = t<<15 | t>>(64-15) - t = a[23] ^ d3 - bc4 = t<<56 | t>>(64-56) - t = a[4] ^ d4 - bc0 = t<<27 | t>>(64-27) - a[5] = bc0 ^ (bc2 &^ bc1) - a[11] = bc1 ^ (bc3 &^ bc2) - a[17] = bc2 ^ (bc4 &^ bc3) - a[23] = bc3 ^ (bc0 &^ bc4) - a[4] = bc4 ^ (bc1 &^ bc0) - - t = a[15] ^ d0 - bc3 = t<<41 | t>>(64-41) - t = a[21] ^ d1 - bc4 = t<<2 | t>>(64-2) - t = a[2] ^ d2 - bc0 = t<<62 | t>>(64-62) - t = a[8] ^ d3 - bc1 = t<<55 | t>>(64-55) - t = a[14] ^ d4 - bc2 = t<<39 | t>>(64-39) - a[15] = bc0 ^ (bc2 &^ bc1) - a[21] = bc1 ^ (bc3 &^ bc2) - a[2] = bc2 ^ (bc4 &^ bc3) - a[8] = bc3 ^ (bc0 &^ bc4) - a[14] = bc4 ^ (bc1 &^ bc0) - - // Round 2 - bc0 = a[0] ^ a[5] ^ a[10] ^ a[15] ^ a[20] - bc1 = a[1] ^ a[6] ^ a[11] ^ a[16] ^ a[21] - bc2 = a[2] ^ a[7] ^ a[12] ^ a[17] ^ a[22] - bc3 = a[3] ^ a[8] ^ a[13] ^ a[18] ^ a[23] - bc4 = a[4] ^ a[9] ^ a[14] ^ a[19] ^ a[24] - d0 = bc4 ^ (bc1<<1 | bc1>>63) - d1 = bc0 ^ (bc2<<1 | bc2>>63) - d2 = bc1 ^ (bc3<<1 | bc3>>63) - d3 = bc2 ^ (bc4<<1 | bc4>>63) - d4 = bc3 ^ (bc0<<1 | bc0>>63) - - bc0 = a[0] ^ d0 - t = a[16] ^ d1 - bc1 = t<<44 | t>>(64-44) - t = a[7] ^ d2 - bc2 = t<<43 | t>>(64-43) - t = a[23] ^ d3 - bc3 = t<<21 | t>>(64-21) - t = a[14] ^ d4 - bc4 = t<<14 | t>>(64-14) - a[0] = bc0 ^ (bc2 &^ bc1) ^ rc[i+1] - a[16] = bc1 ^ (bc3 &^ bc2) - a[7] = bc2 ^ (bc4 &^ bc3) - a[23] = bc3 ^ (bc0 &^ bc4) - a[14] = bc4 ^ (bc1 &^ bc0) - - t = a[20] ^ d0 - bc2 = t<<3 | t>>(64-3) - t = a[11] ^ d1 - bc3 = t<<45 | t>>(64-45) - t = a[2] ^ d2 - bc4 = t<<61 | t>>(64-61) - t = a[18] ^ d3 - bc0 = t<<28 | t>>(64-28) - t = a[9] ^ d4 - bc1 = t<<20 | t>>(64-20) - a[20] = bc0 ^ (bc2 &^ bc1) - a[11] = bc1 ^ (bc3 &^ bc2) - a[2] = bc2 ^ (bc4 &^ bc3) - a[18] = bc3 ^ (bc0 &^ bc4) - a[9] = bc4 ^ (bc1 &^ bc0) - - t = a[15] ^ d0 - bc4 = t<<18 | t>>(64-18) - t = a[6] ^ d1 - bc0 = t<<1 | t>>(64-1) - t = a[22] ^ d2 - bc1 = t<<6 | t>>(64-6) - t = a[13] ^ d3 - bc2 = t<<25 | t>>(64-25) - t = a[4] ^ d4 - bc3 = t<<8 | t>>(64-8) - a[15] = bc0 ^ (bc2 &^ bc1) - a[6] = bc1 ^ (bc3 &^ bc2) - a[22] = bc2 ^ (bc4 &^ bc3) - a[13] = bc3 ^ (bc0 &^ bc4) - a[4] = bc4 ^ (bc1 &^ bc0) - - t = a[10] ^ d0 - bc1 = t<<36 | t>>(64-36) - t = a[1] ^ d1 - bc2 = t<<10 | t>>(64-10) - t = a[17] ^ d2 - bc3 = t<<15 | t>>(64-15) - t = a[8] ^ d3 - bc4 = t<<56 | t>>(64-56) - t = a[24] ^ d4 - bc0 = t<<27 | t>>(64-27) - a[10] = bc0 ^ (bc2 &^ bc1) - a[1] = bc1 ^ (bc3 &^ bc2) - a[17] = bc2 ^ (bc4 &^ bc3) - a[8] = bc3 ^ (bc0 &^ bc4) - a[24] = bc4 ^ (bc1 &^ bc0) - - t = a[5] ^ d0 - bc3 = t<<41 | t>>(64-41) - t = a[21] ^ d1 - bc4 = t<<2 | t>>(64-2) - t = a[12] ^ d2 - bc0 = t<<62 | t>>(64-62) - t = a[3] ^ d3 - bc1 = t<<55 | t>>(64-55) - t = a[19] ^ d4 - bc2 = t<<39 | t>>(64-39) - a[5] = bc0 ^ (bc2 &^ bc1) - a[21] = bc1 ^ (bc3 &^ bc2) - a[12] = bc2 ^ (bc4 &^ bc3) - a[3] = bc3 ^ (bc0 &^ bc4) - a[19] = bc4 ^ (bc1 &^ bc0) - - // Round 3 - bc0 = a[0] ^ a[5] ^ a[10] ^ a[15] ^ a[20] - bc1 = a[1] ^ a[6] ^ a[11] ^ a[16] ^ a[21] - bc2 = a[2] ^ a[7] ^ a[12] ^ a[17] ^ a[22] - bc3 = a[3] ^ a[8] ^ a[13] ^ a[18] ^ a[23] - bc4 = a[4] ^ a[9] ^ a[14] ^ a[19] ^ a[24] - d0 = bc4 ^ (bc1<<1 | bc1>>63) - d1 = bc0 ^ (bc2<<1 | bc2>>63) - d2 = bc1 ^ (bc3<<1 | bc3>>63) - d3 = bc2 ^ (bc4<<1 | bc4>>63) - d4 = bc3 ^ (bc0<<1 | bc0>>63) - - bc0 = a[0] ^ d0 - t = a[11] ^ d1 - bc1 = t<<44 | t>>(64-44) - t = a[22] ^ d2 - bc2 = t<<43 | t>>(64-43) - t = a[8] ^ d3 - bc3 = t<<21 | t>>(64-21) - t = a[19] ^ d4 - bc4 = t<<14 | t>>(64-14) - a[0] = bc0 ^ (bc2 &^ bc1) ^ rc[i+2] - a[11] = bc1 ^ (bc3 &^ bc2) - a[22] = bc2 ^ (bc4 &^ bc3) - a[8] = bc3 ^ (bc0 &^ bc4) - a[19] = bc4 ^ (bc1 &^ bc0) - - t = a[15] ^ d0 - bc2 = t<<3 | t>>(64-3) - t = a[1] ^ d1 - bc3 = t<<45 | t>>(64-45) - t = a[12] ^ d2 - bc4 = t<<61 | t>>(64-61) - t = a[23] ^ d3 - bc0 = t<<28 | t>>(64-28) - t = a[9] ^ d4 - bc1 = t<<20 | t>>(64-20) - a[15] = bc0 ^ (bc2 &^ bc1) - a[1] = bc1 ^ (bc3 &^ bc2) - a[12] = bc2 ^ (bc4 &^ bc3) - a[23] = bc3 ^ (bc0 &^ bc4) - a[9] = bc4 ^ (bc1 &^ bc0) - - t = a[5] ^ d0 - bc4 = t<<18 | t>>(64-18) - t = a[16] ^ d1 - bc0 = t<<1 | t>>(64-1) - t = a[2] ^ d2 - bc1 = t<<6 | t>>(64-6) - t = a[13] ^ d3 - bc2 = t<<25 | t>>(64-25) - t = a[24] ^ d4 - bc3 = t<<8 | t>>(64-8) - a[5] = bc0 ^ (bc2 &^ bc1) - a[16] = bc1 ^ (bc3 &^ bc2) - a[2] = bc2 ^ (bc4 &^ bc3) - a[13] = bc3 ^ (bc0 &^ bc4) - a[24] = bc4 ^ (bc1 &^ bc0) - - t = a[20] ^ d0 - bc1 = t<<36 | t>>(64-36) - t = a[6] ^ d1 - bc2 = t<<10 | t>>(64-10) - t = a[17] ^ d2 - bc3 = t<<15 | t>>(64-15) - t = a[3] ^ d3 - bc4 = t<<56 | t>>(64-56) - t = a[14] ^ d4 - bc0 = t<<27 | t>>(64-27) - a[20] = bc0 ^ (bc2 &^ bc1) - a[6] = bc1 ^ (bc3 &^ bc2) - a[17] = bc2 ^ (bc4 &^ bc3) - a[3] = bc3 ^ (bc0 &^ bc4) - a[14] = bc4 ^ (bc1 &^ bc0) - - t = a[10] ^ d0 - bc3 = t<<41 | t>>(64-41) - t = a[21] ^ d1 - bc4 = t<<2 | t>>(64-2) - t = a[7] ^ d2 - bc0 = t<<62 | t>>(64-62) - t = a[18] ^ d3 - bc1 = t<<55 | t>>(64-55) - t = a[4] ^ d4 - bc2 = t<<39 | t>>(64-39) - a[10] = bc0 ^ (bc2 &^ bc1) - a[21] = bc1 ^ (bc3 &^ bc2) - a[7] = bc2 ^ (bc4 &^ bc3) - a[18] = bc3 ^ (bc0 &^ bc4) - a[4] = bc4 ^ (bc1 &^ bc0) - - // Round 4 - bc0 = a[0] ^ a[5] ^ a[10] ^ a[15] ^ a[20] - bc1 = a[1] ^ a[6] ^ a[11] ^ a[16] ^ a[21] - bc2 = a[2] ^ a[7] ^ a[12] ^ a[17] ^ a[22] - bc3 = a[3] ^ a[8] ^ a[13] ^ a[18] ^ a[23] - bc4 = a[4] ^ a[9] ^ a[14] ^ a[19] ^ a[24] - d0 = bc4 ^ (bc1<<1 | bc1>>63) - d1 = bc0 ^ (bc2<<1 | bc2>>63) - d2 = bc1 ^ (bc3<<1 | bc3>>63) - d3 = bc2 ^ (bc4<<1 | bc4>>63) - d4 = bc3 ^ (bc0<<1 | bc0>>63) - - bc0 = a[0] ^ d0 - t = a[1] ^ d1 - bc1 = t<<44 | t>>(64-44) - t = a[2] ^ d2 - bc2 = t<<43 | t>>(64-43) - t = a[3] ^ d3 - bc3 = t<<21 | t>>(64-21) - t = a[4] ^ d4 - bc4 = t<<14 | t>>(64-14) - a[0] = bc0 ^ (bc2 &^ bc1) ^ rc[i+3] - a[1] = bc1 ^ (bc3 &^ bc2) - a[2] = bc2 ^ (bc4 &^ bc3) - a[3] = bc3 ^ (bc0 &^ bc4) - a[4] = bc4 ^ (bc1 &^ bc0) - - t = a[5] ^ d0 - bc2 = t<<3 | t>>(64-3) - t = a[6] ^ d1 - bc3 = t<<45 | t>>(64-45) - t = a[7] ^ d2 - bc4 = t<<61 | t>>(64-61) - t = a[8] ^ d3 - bc0 = t<<28 | t>>(64-28) - t = a[9] ^ d4 - bc1 = t<<20 | t>>(64-20) - a[5] = bc0 ^ (bc2 &^ bc1) - a[6] = bc1 ^ (bc3 &^ bc2) - a[7] = bc2 ^ (bc4 &^ bc3) - a[8] = bc3 ^ (bc0 &^ bc4) - a[9] = bc4 ^ (bc1 &^ bc0) - - t = a[10] ^ d0 - bc4 = t<<18 | t>>(64-18) - t = a[11] ^ d1 - bc0 = t<<1 | t>>(64-1) - t = a[12] ^ d2 - bc1 = t<<6 | t>>(64-6) - t = a[13] ^ d3 - bc2 = t<<25 | t>>(64-25) - t = a[14] ^ d4 - bc3 = t<<8 | t>>(64-8) - a[10] = bc0 ^ (bc2 &^ bc1) - a[11] = bc1 ^ (bc3 &^ bc2) - a[12] = bc2 ^ (bc4 &^ bc3) - a[13] = bc3 ^ (bc0 &^ bc4) - a[14] = bc4 ^ (bc1 &^ bc0) - - t = a[15] ^ d0 - bc1 = t<<36 | t>>(64-36) - t = a[16] ^ d1 - bc2 = t<<10 | t>>(64-10) - t = a[17] ^ d2 - bc3 = t<<15 | t>>(64-15) - t = a[18] ^ d3 - bc4 = t<<56 | t>>(64-56) - t = a[19] ^ d4 - bc0 = t<<27 | t>>(64-27) - a[15] = bc0 ^ (bc2 &^ bc1) - a[16] = bc1 ^ (bc3 &^ bc2) - a[17] = bc2 ^ (bc4 &^ bc3) - a[18] = bc3 ^ (bc0 &^ bc4) - a[19] = bc4 ^ (bc1 &^ bc0) - - t = a[20] ^ d0 - bc3 = t<<41 | t>>(64-41) - t = a[21] ^ d1 - bc4 = t<<2 | t>>(64-2) - t = a[22] ^ d2 - bc0 = t<<62 | t>>(64-62) - t = a[23] ^ d3 - bc1 = t<<55 | t>>(64-55) - t = a[24] ^ d4 - bc2 = t<<39 | t>>(64-39) - a[20] = bc0 ^ (bc2 &^ bc1) - a[21] = bc1 ^ (bc3 &^ bc2) - a[22] = bc2 ^ (bc4 &^ bc3) - a[23] = bc3 ^ (bc0 &^ bc4) - a[24] = bc4 ^ (bc1 &^ bc0) - } -} diff --git a/crypto/hash/keccakf_asm.go b/crypto/hash/keccakf_asm.go deleted file mode 100644 index 978d2b6c658..00000000000 --- a/crypto/hash/keccakf_asm.go +++ /dev/null @@ -1,16 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build amd64 && !purego && gc -// +build amd64,!purego,gc - -package hash - -// keccakF1600 is Keccak permutation function with -// a width of 1600 bits and 24 rounds. -// This function is implemented in keccakf_amd64.s. - -//go:noescape - -func keccakF1600(a *[25]uint64) diff --git a/crypto/hash/kmac.go b/crypto/hash/kmac.go deleted file mode 100644 index 8814fda4e99..00000000000 --- a/crypto/hash/kmac.go +++ /dev/null @@ -1,175 +0,0 @@ -package hash - -import ( - "encoding/binary" - "fmt" - - "golang.org/x/crypto/sha3" -) - -// implements the interface sha3.ShakeHash -type kmac128 struct { - // the output size of KMAC - outputSize int - // embeds ShakeHash - // stores the encoding of the function name and customization string - // Using the io.Writer interface changes the internal state - // of the KMAC - sha3.ShakeHash - // the block initialized by NewKMAC_128 - // stores the encoding of the key - initBlock []byte -} - -// the cSHAKE128 rate as defined in NIST SP 800-185 -const cSHAKE128BlockSize = 168 - -// NewKMAC_128 returns a new KMAC instance -// - key is the KMAC key (the key size is compared to the security level, although -// the parameter is used as a domain tag in Flow and not as a security key). -// - customizer is the customization string. It can be left empty if no customizer -// is required. -func NewKMAC_128(key []byte, customizer []byte, outputSize int) (Hasher, error) { - var k kmac128 - if outputSize < 0 { - return nil, - fmt.Errorf("kmac output cannot be negative, got %d", outputSize) - } - - // check the key size (required if the key is used as a security key) - if len(key) < KmacMinKeyLen { - return nil, - fmt.Errorf("kmac key size must be at least %d", KmacMinKeyLen) - } - - k.outputSize = outputSize - // initialize the cSHAKE128 instance - k.ShakeHash = sha3.NewCShake128([]byte("KMAC"), customizer) - - // store the encoding of the key - k.initBlock = bytepad(encodeString(key), cSHAKE128BlockSize) - _, _ = k.Write(k.initBlock) - return &k, nil -} - -func (k *kmac128) Algorithm() HashingAlgorithm { - return KMAC128 -} - -const maxEncodeLen = 9 - -// encode_string function as defined in NIST SP 800-185 (for value < 2^64) -func encodeString(s []byte) []byte { - // leftEncode returns max 9 bytes - out := make([]byte, 0, maxEncodeLen+len(s)) - out = append(out, leftEncode(uint64(len(s)*8))...) - out = append(out, s...) - return out -} - -// "left_encode" function as defined in NIST SP 800-185 (for value < 2^64) -// copied from golang.org/x/crypto/sha3 -// -// Copyright (c) 2009 The Go Authors. All rights reserved. - -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: - -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. - -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -func leftEncode(value uint64) []byte { - var b [maxEncodeLen]byte - binary.BigEndian.PutUint64(b[1:], value) - // Trim all but last leading zero bytes - i := byte(1) - for i < 8 && b[i] == 0 { - i++ - } - // Prepend number of encoded bytes - b[i-1] = maxEncodeLen - i - return b[i-1:] -} - -// bytepad function as defined in NIST SP 800-185 -// copied from golang.org/x/crypto/sha3 -// The caller must make sure parameter (w) is strictly positive. -// -// Copyright (c) 2009 The Go Authors. All rights reserved. -func bytepad(input []byte, w int) []byte { - // leftEncode always returns max 9 bytes - buf := make([]byte, 0, maxEncodeLen+len(input)+w) - buf = append(buf, leftEncode(uint64(w))...) - buf = append(buf, input...) - padlen := w - (len(buf) % w) - return append(buf, make([]byte, padlen)...) -} - -// "right_encode" function as defined in NIST SP 800-185 (for value < 2^64) -func rightEncode(value uint64) []byte { - var b [maxEncodeLen]byte - binary.BigEndian.PutUint64(b[:8], value) - // Trim all but last leading zero bytes - i := byte(0) - for i < 7 && b[i] == 0 { - i++ - } - // Append number of encoded bytes - b[8] = maxEncodeLen - 1 - i - return b[i:] -} - -// Reset resets the hash to initial state. -func (k *kmac128) Reset() { - k.ShakeHash.Reset() - _, _ = k.Write(k.initBlock) -} - -// ComputeHash computes the mac of the input data. -// It does not update the underlying hash state (the function is thread safe). -func (k *kmac128) ComputeHash(data []byte) Hash { - cshake := k.ShakeHash.Clone() - cshake.Reset() - _, _ = cshake.Write(k.initBlock) - _, _ = cshake.Write(data) - _, _ = cshake.Write(rightEncode(uint64(k.outputSize * 8))) - // read the cshake output - h := make([]byte, k.outputSize) - _, _ = cshake.Read(h) - return h -} - -// SumHash finalizes the mac computations and returns the output. -// It does not reset the state to allow further writing. -func (k *kmac128) SumHash() Hash { - cshake := k.ShakeHash.Clone() - _, _ = cshake.Write(rightEncode(uint64(k.outputSize * 8))) - // read the cshake output - h := make([]byte, k.outputSize) - _, _ = cshake.Read(h) - return h -} - -// Size returns the output length of the KMAC instance -func (k *kmac128) Size() int { - return k.outputSize -} diff --git a/crypto/hash/legacy_keccak.go b/crypto/hash/legacy_keccak.go deleted file mode 100644 index a333dcce00a..00000000000 --- a/crypto/hash/legacy_keccak.go +++ /dev/null @@ -1,19 +0,0 @@ -package hash - -const ( - rateKeccak_256 = 136 - - dsByteKeccak = byte(0x1) -) - -// NewKeccak_256 returns a new instance of legacy Keccak-256 hasher. -func NewKeccak_256() Hasher { - return &spongeState{ - algo: Keccak_256, - rate: rateKeccak_256, - dsByte: dsByteKeccak, - outputLen: HashLenKeccak_256, - bufIndex: bufNilValue, - bufSize: bufNilValue, - } -} diff --git a/crypto/hash/sha2.go b/crypto/hash/sha2.go deleted file mode 100644 index 3362face47a..00000000000 --- a/crypto/hash/sha2.go +++ /dev/null @@ -1,78 +0,0 @@ -package hash - -import ( - "crypto/sha256" - "crypto/sha512" - "hash" -) - -// sha2_256Algo -type sha2_256Algo struct { - hash.Hash -} - -// NewSHA2_256 returns a new instance of SHA2-256 hasher -func NewSHA2_256() Hasher { - return &sha2_256Algo{ - Hash: sha256.New()} -} - -func (s *sha2_256Algo) Algorithm() HashingAlgorithm { - return SHA2_256 -} - -// ComputeHash calculates and returns the SHA2-256 digest of the input. -// The function updates the state (and therefore not thread-safe) -// but does not reset the state to allow further writing. -func (s *sha2_256Algo) ComputeHash(data []byte) Hash { - s.Reset() - // `Write` delegates this call to sha256.digest's `Write` which does not return an error. - _, _ = s.Write(data) - return s.Sum(nil) -} - -// SumHash returns the SHA2-256 output. -// It does not reset the state to allow further writing. -func (s *sha2_256Algo) SumHash() Hash { - return s.Sum(nil) -} - -// sha2_384Algo -type sha2_384Algo struct { - hash.Hash -} - -// NewSHA2_384 returns a new instance of SHA2-384 hasher -func NewSHA2_384() Hasher { - return &sha2_384Algo{ - Hash: sha512.New384()} -} - -func (s *sha2_384Algo) Algorithm() HashingAlgorithm { - return SHA2_384 -} - -// ComputeHash calculates and returns the SHA2-384 digest of the input. -// It does not reset the state to allow further writing. -func (s *sha2_384Algo) ComputeHash(data []byte) Hash { - s.Reset() - // `Write` delegates this call to sha512.digest's `Write` which does not return an error. - _, _ = s.Write(data) - return s.Sum(nil) -} - -// SumHash returns the SHA2-384 output. -// It does not reset the state to allow further writing. -func (s *sha2_384Algo) SumHash() Hash { - return s.Sum(nil) -} - -// ComputeSHA2_256 computes the SHA2-256 (commonly known as SHA256) -// digest of data and copies the result to the result buffer. -// -// The function is not part of the Hasher API. It is a pure function -// for simple computation of a hash with minimal heap allocations. -func ComputeSHA2_256(result *[HashLenSHA2_256]byte, data []byte) { - hash := sha256.Sum256(data) - copy(result[:], hash[:]) -} diff --git a/crypto/hash/sha3.go b/crypto/hash/sha3.go deleted file mode 100644 index f5b6cd9fce4..00000000000 --- a/crypto/hash/sha3.go +++ /dev/null @@ -1,50 +0,0 @@ -package hash - -const ( - rateSHA3_256 = 136 - rateSHA3_384 = 104 - - dsByteSHA3 = byte(0x6) -) - -// NewSHA3_256 returns a new instance of SHA3-256 hasher. -func NewSHA3_256() Hasher { - return &spongeState{ - algo: SHA3_256, - rate: rateSHA3_256, - dsByte: dsByteSHA3, - outputLen: HashLenSHA3_256, - bufIndex: bufNilValue, - bufSize: bufNilValue, - } -} - -// NewSHA3_384 returns a new instance of SHA3-384 hasher. -func NewSHA3_384() Hasher { - return &spongeState{ - algo: SHA3_384, - rate: rateSHA3_384, - dsByte: dsByteSHA3, - outputLen: HashLenSHA3_384, - bufIndex: bufNilValue, - bufSize: bufNilValue, - } -} - -// ComputeSHA3_256 computes the SHA3-256 digest of data -// and copies the result to the result buffer. -// -// The function is not part of the Hasher API. It is a pure function -// for simple computation of a hash with minimal heap allocations. -func ComputeSHA3_256(result *[HashLenSHA3_256]byte, data []byte) { - state := &spongeState{ - rate: rateSHA3_256, - dsByte: dsByteSHA3, - outputLen: HashLenSHA3_256, - bufIndex: bufNilValue, - bufSize: bufNilValue, - } - state.write(data) - state.padAndPermute() - copyOut(result[:], state) -} diff --git a/crypto/hash/types.go b/crypto/hash/types.go deleted file mode 100644 index 709f8bdf364..00000000000 --- a/crypto/hash/types.go +++ /dev/null @@ -1,49 +0,0 @@ -package hash - -//revive:disable:var-naming - -// HashingAlgorithm is an identifier for a hashing algorithm. -type HashingAlgorithm int - -const ( - // Supported hashing algorithms - UnknownHashingAlgorithm HashingAlgorithm = iota - // SHA-2 - SHA2_256 - SHA2_384 - // SHA-3 - SHA3_256 - SHA3_384 - // KMAC (Keccak based MAC algorithm) - KMAC128 - // legacy Keccak - Keccak_256 -) - -// String returns the string representation of this hashing algorithm. -func (h HashingAlgorithm) String() string { - return [...]string{ - "UNKNOWN", - "SHA2_256", - "SHA2_384", - "SHA3_256", - "SHA3_384", - "KMAC128", - "Keccak_256"}[h] -} - -const ( - // minimum targeted bits of security - securityBits = 128 - - // Lengths of hash outputs in bytes - HashLenSHA2_256 = 32 - HashLenSHA2_384 = 48 - HashLenSHA3_256 = 32 - HashLenSHA3_384 = 48 - HashLenKeccak_256 = 32 - - // KMAC - // the minimum key length in bytes - KmacMinKeyLen = securityBits / 8 -) diff --git a/crypto/hash/xor_generic.go b/crypto/hash/xor_generic.go deleted file mode 100644 index 38f9d1863db..00000000000 --- a/crypto/hash/xor_generic.go +++ /dev/null @@ -1,64 +0,0 @@ -// The functions below were copied and modified from golang.org/x/crypto/sha3. -// -// Copyright (c) 2009 The Go Authors. All rights reserved. - -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: - -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. - -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -//go:build (!amd64 && !386 && !ppc64le) || purego -// +build !amd64,!386,!ppc64le purego - -package hash - -import "encoding/binary" - -// A storageBuf is an aligned array of maxRate bytes. -type storageBuf [maxRate]byte - -func (b *storageBuf) asBytes() *[maxRate]byte { - return (*[maxRate]byte)(b) -} - -// xorIn xors the bytes in buf into the state; it -// makes no non-portable assumptions about memory layout -// or alignment. -func xorIn(d *spongeState, buf []byte) { - n := len(buf) / 8 - - for i := 0; i < n; i++ { - a := binary.LittleEndian.Uint64(buf) - d.a[i] ^= a - buf = buf[8:] - } -} - -// copyOut copies ulint64s to a byte buffer. -func copyOut(b []byte, d *spongeState) { - for i := 0; len(b) >= 8; i++ { - binary.LittleEndian.PutUint64(b, d.a[i]) - b = b[8:] - } -} diff --git a/crypto/hash/xor_unaligned.go b/crypto/hash/xor_unaligned.go deleted file mode 100644 index 3b9446c3037..00000000000 --- a/crypto/hash/xor_unaligned.go +++ /dev/null @@ -1,82 +0,0 @@ -/// The functions below were copied and modified from golang.org/x/crypto/sha3. -// -// Copyright (c) 2009 The Go Authors. All rights reserved. - -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: - -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. - -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -//go:build (amd64 || 386 || ppc64le) && !purego -// +build amd64 386 ppc64le -// +build !purego - -package hash - -import "unsafe" - -// A storageBuf is an aligned array of maxRate bytes. -type storageBuf [maxRate / 8]uint64 - -//go:nocheckptr ignore "pointer arithmetic result points to invalid allocation" -func (b *storageBuf) asBytes() *[maxRate]byte { - // re-using a trick from https://github.com/golang/go/blob/master/src/runtime/stubs.go#L178: - // to hide the input pointer from escape analysis and avoid - // an escape to to the heap. The 0 xor tricks the escape analysis tool - // to think "ptr" and "b" are not related. - ptr := uintptr(unsafe.Pointer(b)) ^ 0 // nolint:staticcheck - return (*[maxRate]byte)(unsafe.Pointer(ptr)) -} - -// xorIn uses unaligned reads and writes to update d.a to contain d.a -// XOR buf. -func xorIn(d *spongeState, buf []byte) { - n := len(buf) - bw := (*[maxRate / 8]uint64)(unsafe.Pointer(&buf[0]))[: n/8 : n/8] - - d.a[0] ^= bw[0] - d.a[1] ^= bw[1] - d.a[2] ^= bw[2] - d.a[3] ^= bw[3] - d.a[4] ^= bw[4] - d.a[5] ^= bw[5] - d.a[6] ^= bw[6] - d.a[7] ^= bw[7] - d.a[8] ^= bw[8] - d.a[9] ^= bw[9] - d.a[10] ^= bw[10] - d.a[11] ^= bw[11] - d.a[12] ^= bw[12] - if n >= 136 { - d.a[13] ^= bw[13] - d.a[14] ^= bw[14] - d.a[15] ^= bw[15] - d.a[16] ^= bw[16] - } -} - -func copyOut(buf []byte, d *spongeState) { - ab := (*[maxRate]uint8)(unsafe.Pointer(&d.a[0])) - copy(buf, ab[:]) -} diff --git a/crypto/random/chacha20.go b/crypto/random/chacha20.go deleted file mode 100644 index ae834057b81..00000000000 --- a/crypto/random/chacha20.go +++ /dev/null @@ -1,194 +0,0 @@ -package random - -import ( - "encoding/binary" - "fmt" - - "golang.org/x/crypto/chacha20" -) - -// We use Chacha20, to build a cryptographically secure random number generator -// that uses the ChaCha algorithm. -// -// ChaCha is a stream cipher designed by Daniel J. Bernstein[^1], that we use as a PRG. It is -// an improved variant of the Salsa20 cipher family. -// -// We use Chacha20 with a 256-bit key, a 192-bit stream identifier and a 32-bit counter as -// as specified in RFC 8439 [^2]. -// The encryption key is used as the PRG seed while the stream identifier is used as a nonce -// to customize the PRG. The PRG outputs are the successive encryptions of a constant message. -// -// A 32-bit counter over 64-byte blocks allows 256 GiB of output before cycling, -// and the stream identifier allows 2^192 unique streams of output per seed. -// It is the caller's responsibility to avoid the PRG output cycling. -// -// [^1]: D. J. Bernstein, [*ChaCha, a variant of Salsa20*]( -// https://cr.yp.to/chacha.html) -// -// [^2]: [RFC 8439: ChaCha20 and Poly1305 for IETF Protocols]( -// https://datatracker.ietf.org/doc/html/rfc8439) - -// The PRG core, implements the randCore interface -type chachaCore struct { - cipher chacha20.Cipher - - // empty message added to minimize allocations and buffer clearing - emptyMessage [lenEmptyMessage]byte - - // Only used for State/Restore functionality - - // Counter of bytes encrypted so far by the sream cipher. - // Note this is different than the internal 32-bits counter of the chacha state - // that counts the encrypted blocks of 512 bits. - bytesCounter uint64 - // initial seed - seed [keySize]byte - // initial customizer - customizer [nonceSize]byte -} - -// The main PRG, implements the Rand interface -type chachaPRG struct { - genericPRG - core *chachaCore -} - -const ( - keySize = chacha20.KeySize - nonceSize = chacha20.NonceSize - - // Chacha20SeedLen is the seed length of the Chacha based PRG, it is fixed to 32 bytes. - Chacha20SeedLen = keySize - // Chacha20CustomizerMaxLen is the maximum length of the nonce used as a PRG customizer, it is fixed to 24 bytes. - // Shorter customizers are padded by zeros to 24 bytes. - Chacha20CustomizerMaxLen = nonceSize -) - -// NewChacha20PRG returns a new Chacha20-based PRG, seeded with -// the input seed (32 bytes) and a customizer (up to 12 bytes). -// -// It is recommended to sample the seed uniformly at random. -// The function errors if the seed is different than 32 bytes, -// or if the customizer is larger than 12 bytes. -// Shorter customizers than 12 bytes are padded by zero bytes. -func NewChacha20PRG(seed []byte, customizer []byte) (*chachaPRG, error) { - - // check the key size - if len(seed) != Chacha20SeedLen { - return nil, fmt.Errorf("chacha20 seed length should be %d, got %d", Chacha20SeedLen, len(seed)) - } - - // check the nonce size - if len(customizer) > Chacha20CustomizerMaxLen { - return nil, fmt.Errorf("chacha20 streamID should be less than %d bytes", Chacha20CustomizerMaxLen) - } - - // init the state core - var core chachaCore - // core.bytesCounter is set to 0 - copy(core.seed[:], seed) - copy(core.customizer[:], customizer) // pad the customizer with zero bytes when it's short - - // create the Chacha20 state, initialized with the seed as a key, and the customizer as a streamID. - chacha, err := chacha20.NewUnauthenticatedCipher(core.seed[:], core.customizer[:]) - if err != nil { - return nil, fmt.Errorf("chacha20 instance creation failed: %w", err) - } - core.cipher = *chacha - - prg := &chachaPRG{ - genericPRG: genericPRG{ - randCore: &core, - }, - core: &core, - } - return prg, nil -} - -const lenEmptyMessage = 64 - -// Read pulls random bytes from the pseudo-random source. -// The randoms are copied into the input buffer, the number of bytes read -// is equal to the buffer input length. -// -// The stream cipher encrypts a stream of a constant message (empty for simplicity). -func (c *chachaCore) Read(buffer []byte) { - // message to encrypt - var message []byte - - if len(buffer) <= lenEmptyMessage { - // use a constant message (used for most of the calls) - message = c.emptyMessage[:len(buffer)] - } else { - // when buffer is large, use is as the message to encrypt, - // but this requires clearing it first. - for i := 0; i < len(buffer); i++ { - buffer[i] = 0 - } - message = buffer - } - c.cipher.XORKeyStream(buffer, message) - // increase the counter - c.bytesCounter += uint64(len(buffer)) -} - -// counter is stored over 8 bytes -const counterBytesLen = 8 - -// Store returns the internal state of the concatenated Chacha20s -// This is used for serialization/deserialization purposes. -func (c *chachaPRG) Store() []byte { - bytes := make([]byte, 0, keySize+nonceSize+counterBytesLen) - counter := make([]byte, counterBytesLen) - binary.LittleEndian.PutUint64(counter, c.core.bytesCounter) - // output is seed || streamID || counter - bytes = append(bytes, c.core.seed[:]...) - bytes = append(bytes, c.core.customizer[:]...) - bytes = append(bytes, counter...) - return bytes -} - -// RestoreChacha20PRG creates a chacha20 base PRG based on a previously stored state. -// The created PRG is restored at the same state where the previous PRG was stored. -func RestoreChacha20PRG(stateBytes []byte) (*chachaPRG, error) { - // input should be seed (32 bytes) || streamID (12 bytes) || bytesCounter (8 bytes) - const expectedLen = keySize + nonceSize + counterBytesLen - - // check input length - if len(stateBytes) != expectedLen { - return nil, fmt.Errorf("Rand state length should be of %d bytes, got %d", expectedLen, len(stateBytes)) - } - - seed := stateBytes[:keySize] - streamID := stateBytes[keySize : keySize+nonceSize] - bytesCounter := binary.LittleEndian.Uint64(stateBytes[keySize+nonceSize:]) - - // create the Chacha20 instance with seed and streamID - chacha, err := chacha20.NewUnauthenticatedCipher(seed, streamID) - if err != nil { - return nil, fmt.Errorf("Chacha20 instance creation failed: %w", err) - } - // set the block counter, each chacha internal block is 512 bits - const bytesPerBlock = 512 >> 3 - blockCount := uint32(bytesCounter / bytesPerBlock) - remainingBytes := bytesCounter % bytesPerBlock - chacha.SetCounter(blockCount) - // query the remaining bytes and to catch the stored chacha state - remainderStream := make([]byte, remainingBytes) - chacha.XORKeyStream(remainderStream, remainderStream) - - core := &chachaCore{ - cipher: *chacha, - bytesCounter: bytesCounter, - } - copy(core.seed[:], seed) - copy(core.customizer[:], streamID) - - prg := &chachaPRG{ - genericPRG: genericPRG{ - randCore: core, - }, - core: core, - } - return prg, nil -} diff --git a/crypto/random/empty.go b/crypto/random/empty.go new file mode 100644 index 00000000000..eeab28a038e --- /dev/null +++ b/crypto/random/empty.go @@ -0,0 +1 @@ +package random diff --git a/crypto/random/rand.go b/crypto/random/rand.go deleted file mode 100644 index 712ac0c03a8..00000000000 --- a/crypto/random/rand.go +++ /dev/null @@ -1,173 +0,0 @@ -package random - -import ( - "encoding/binary" - "fmt" -) - -// Rand is a pseudo random number generator -// All methods update the internal state of the PRG -// which makes the PRGs implementing this interface -// non concurrent-safe. -type Rand interface { - // Read fills the input slice with random bytes. - Read([]byte) - - // UintN returns a random number between 0 and N (exclusive) - UintN(uint64) uint64 - - // Permutation returns a permutation of the set [0,n-1] - // the theoretical output space grows very fast with (!n) so that input (n) should be chosen carefully - // to make sure the function output space covers a big chunk of the theoretical outputs. - // The function errors if the parameter is a negative integer. - Permutation(n int) ([]int, error) - - // SubPermutation returns the m first elements of a permutation of [0,n-1] - // the theoretical output space can be large (n!/(n-m)!) so that the inputs should be chosen carefully - // to make sure the function output space covers a big chunk of the theoretical outputs. - // The function errors if the parameter is a negative integer. - SubPermutation(n int, m int) ([]int, error) - - // Shuffle permutes an ordered data structure of an arbitrary type in place. The main use-case is - // permuting slice or array elements. (n) is the size of the data structure. - // the theoretical output space grows very fast with the slice size (n!) so that input (n) should be chosen carefully - // to make sure the function output space covers a big chunk of the theoretical outputs. - // The function errors if any of the parameters is a negative integer. - Shuffle(n int, swap func(i, j int)) error - - // Samples picks (m) random ordered elements of a data structure of an arbitrary type of total size (n). The (m) elements are placed - // in the indices 0 to (m-1) with in place swapping. The data structure ends up being a permutation of the initial (n) elements. - // While the sampling of the (m) elements is pseudo-uniformly random, there is no guarantee about the uniformity of the permutation of - // the (n) elements. The function Shuffle should be used in case the entire (n) elements need to be shuffled. - // The main use-case of the data structure is a slice or array. - // The theoretical output space grows very fast with the slice size (n!/(n-m)!) so that inputs should be chosen carefully - // to make sure the function output space covers a big chunk of the theoretical outputs. - // The function errors if any of the parameters is a negative integer. - Samples(n int, m int, swap func(i, j int)) error - - // Store returns the internal state of the random generator. - // The internal state can be used as a seed input for the function - // Restore to restore an identical PRG (with the same internal state) - Store() []byte -} - -// randCore is PRG providing the core Read function of a PRG. -// All other Rand methods use the core Read method. -// -// In order to add a new Rand implementation, -// it should be enough to implement randCore. -type randCore interface { - // Read fills the input slice with random bytes. - Read([]byte) -} - -// genericPRG implements all the Rand methods using the embedded randCore method. -// All implementations of the Rand interface should embed the genericPRG struct. -type genericPRG struct { - randCore - // buffer used by UintN function to avoid extra memory allocation - uintnBuffer [8]byte -} - -// UintN returns an uint64 pseudo-random number in [0,n-1], -// using `p` as an entropy source. -// The function panics if input `n` is zero. -func (p *genericPRG) UintN(n uint64) uint64 { - if n == 0 { - panic("input to UintN can't be 0") - } - // the max returned random is n-1 - max := n - 1 - // count the size of max in bytes - size := 0 - for tmp := max; tmp != 0; tmp >>= 8 { - size++ - } - // get the bit size of max - mask := uint64(0) - for max&mask != max { - mask = (mask << 1) | 1 - } - - // For a better uniformity of the result, loop till a sample is less or equal to `max`. - // This means the function might take longer time to output a random. - // Using the size of `max` in bits helps the loop end earlier. - // (a different approach would be to pull at least 128 bits from the random source - // and use big number modular reduction by `n`) - random := n - for random > max { - p.Read(p.uintnBuffer[:size]) // adjust to the size of max in bytes - random = binary.LittleEndian.Uint64(p.uintnBuffer[:]) - random &= mask // adjust to the size of max in bits - } - - return random -} - -// Permutation returns a permutation of the set [0,n-1]. -// It implements Fisher-Yates Shuffle (inside-out variant) using `p` as a random source. -// The output space grows very fast with (!n) so that input `n` should be chosen carefully -// to guarantee a good uniformity of the output. -// -// O(n) space and O(n) time. -func (p *genericPRG) Permutation(n int) ([]int, error) { - if n < 0 { - return nil, fmt.Errorf("population size cannot be negative") - } - items := make([]int, n) - for i := 0; i < n; i++ { - j := p.UintN(uint64(i + 1)) - items[i] = items[j] - items[j] = i - } - return items, nil -} - -// SubPermutation returns the `m` first elements of a permutation of [0,n-1]. -// -// It implements Fisher-Yates Shuffle using `p` as a source of randoms. -// -// O(n) space and O(n) time -func (p *genericPRG) SubPermutation(n int, m int) ([]int, error) { - if m < 0 { - return nil, fmt.Errorf("sample size cannot be negative") - } - if n < m { - return nil, fmt.Errorf("sample size (%d) cannot be larger than entire population (%d)", m, n) - } - // condition n >= 0 is enforced by function Permutation(n) - items, _ := p.Permutation(n) - return items[:m], nil -} - -// Shuffle permutes the given slice in place. -// -// It implements Fisher-Yates Shuffle using `p` as a source of randoms. -// -// O(1) space and O(n) time -func (p *genericPRG) Shuffle(n int, swap func(i, j int)) error { - if n < 0 { - return fmt.Errorf("population size cannot be negative") - } - return p.Samples(n, n, swap) -} - -// Samples picks randomly m elements out of n elemnts and places them -// in random order at indices [0,m-1], the swapping being implemented in place. -// -// It implements the first (m) elements of Fisher-Yates Shuffle using `p` as a source of randoms. -// -// O(1) space and O(m) time -func (p *genericPRG) Samples(n int, m int, swap func(i, j int)) error { - if m < 0 { - return fmt.Errorf("sample size cannot be negative") - } - if n < m { - return fmt.Errorf("sample size (%d) cannot be larger than entire population (%d)", m, n) - } - for i := 0; i < m; i++ { - j := p.UintN(uint64(n - i)) - swap(i, i+int(j)) - } - return nil -} diff --git a/crypto/random/rand_test.go b/crypto/random/rand_test.go deleted file mode 100644 index 2f41d3c632c..00000000000 --- a/crypto/random/rand_test.go +++ /dev/null @@ -1,422 +0,0 @@ -package random - -import ( - "bytes" - mrand "math/rand" - "testing" - "time" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - "golang.org/x/crypto/chacha20" -) - -// sanity check for the underlying implementation of Chacha20 -// to make sure the implementation is compliant the RFC 7539. -func TestChacha20Compliance(t *testing.T) { - - t.Run("key and nonce length", func(t *testing.T) { - - assert.Equal(t, Chacha20SeedLen, 32) - assert.Equal(t, Chacha20CustomizerMaxLen, 12) - }) - - t.Run("RFC test vector", func(t *testing.T) { - - key := []byte{ - 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, - 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, - } - nonce := []byte{0, 0, 0, 0, 0, 0, 0, 0x4a, 0, 0, 0, 0} - counter := uint32(1) - plaintext := []byte{ - 0x4c, 0x61, 0x64, 0x69, 0x65, 0x73, 0x20, 0x61, 0x6e, 0x64, 0x20, 0x47, 0x65, 0x6e, 0x74, 0x6c, - 0x65, 0x6d, 0x65, 0x6e, 0x20, 0x6f, 0x66, 0x20, 0x74, 0x68, 0x65, 0x20, 0x63, 0x6c, 0x61, 0x73, - 0x73, 0x20, 0x6f, 0x66, 0x20, 0x27, 0x39, 0x39, 0x3a, 0x20, 0x49, 0x66, 0x20, 0x49, 0x20, 0x63, - 0x6f, 0x75, 0x6c, 0x64, 0x20, 0x6f, 0x66, 0x66, 0x65, 0x72, 0x20, 0x79, 0x6f, 0x75, 0x20, 0x6f, - 0x6e, 0x6c, 0x79, 0x20, 0x6f, 0x6e, 0x65, 0x20, 0x74, 0x69, 0x70, 0x20, 0x66, 0x6f, 0x72, 0x20, - 0x74, 0x68, 0x65, 0x20, 0x66, 0x75, 0x74, 0x75, 0x72, 0x65, 0x2c, 0x20, 0x73, 0x75, 0x6e, 0x73, - 0x63, 0x72, 0x65, 0x65, 0x6e, 0x20, 0x77, 0x6f, 0x75, 0x6c, 0x64, 0x20, 0x62, 0x65, 0x20, 0x69, - } - ciphertext := []byte{ - 0x6e, 0x2e, 0x35, 0x9a, 0x25, 0x68, 0xf9, 0x80, 0x41, 0xba, 0x07, 0x28, 0xdd, 0x0d, 0x69, 0x81, - 0xe9, 0x7e, 0x7a, 0xec, 0x1d, 0x43, 0x60, 0xc2, 0x0a, 0x27, 0xaf, 0xcc, 0xfd, 0x9f, 0xae, 0x0b, - 0xf9, 0x1b, 0x65, 0xc5, 0x52, 0x47, 0x33, 0xab, 0x8f, 0x59, 0x3d, 0xab, 0xcd, 0x62, 0xb3, 0x57, - 0x16, 0x39, 0xd6, 0x24, 0xe6, 0x51, 0x52, 0xab, 0x8f, 0x53, 0x0c, 0x35, 0x9f, 0x08, 0x61, 0xd8, - 0x07, 0xca, 0x0d, 0xbf, 0x50, 0x0d, 0x6a, 0x61, 0x56, 0xa3, 0x8e, 0x08, 0x8a, 0x22, 0xb6, 0x5e, - 0x52, 0xbc, 0x51, 0x4d, 0x16, 0xcc, 0xf8, 0x06, 0x81, 0x8c, 0xe9, 0x1a, 0xb7, 0x79, 0x37, 0x36, - 0x5a, 0xf9, 0x0b, 0xbf, 0x74, 0xa3, 0x5b, 0xe6, 0xb4, 0x0b, 0x8e, 0xed, 0xf2, 0x78, 0x5e, 0x42, - } - - chacha, err := chacha20.NewUnauthenticatedCipher(key, nonce) - require.NoError(t, err) - chacha.SetCounter(counter) - chacha.XORKeyStream(plaintext, plaintext) - assert.Equal(t, plaintext, ciphertext) - - }) - - t.Run("invalid constructor inputs", func(t *testing.T) { - seed := make([]byte, Chacha20SeedLen+1) - customizer := make([]byte, Chacha20CustomizerMaxLen+1) - - // long seed - _, err := NewChacha20PRG(seed, customizer[:Chacha20CustomizerMaxLen]) - assert.Error(t, err) - // long nonce - _, err = NewChacha20PRG(seed[:Chacha20SeedLen], customizer) - assert.Error(t, err) - }) - - t.Run("short nonce", func(t *testing.T) { - seed := make([]byte, Chacha20SeedLen) - customizer := make([]byte, Chacha20CustomizerMaxLen) - - // short nonces should be accepted - _, err := NewChacha20PRG(seed, customizer[:Chacha20CustomizerMaxLen-1]) - assert.NoError(t, err) - _, err = NewChacha20PRG(seed, customizer[:0]) - assert.NoError(t, err) - }) -} - -func getPRG(t *testing.T) *mrand.Rand { - random := time.Now().UnixNano() - t.Logf("rng seed is %d", random) - rng := mrand.New(mrand.NewSource(random)) - return rng -} - -// The tests are targeting the PRG implementations in the package. -// For now, the tests are only used for Chacha20 PRG, but can be ported -// to test another PRG implementation. - -// Simple unit testing of UintN using a basic randomness test. -// It doesn't perform advanced statistical tests. -func TestUintN(t *testing.T) { - rand := getPRG(t) - seed := make([]byte, Chacha20SeedLen) - _, err := rand.Read(seed) - require.NoError(t, err) - customizer := make([]byte, Chacha20CustomizerMaxLen) - _, err = rand.Read(customizer) - require.NoError(t, err) - - rng, err := NewChacha20PRG(seed, customizer) - require.NoError(t, err) - - t.Run("basic uniformity", func(t *testing.T) { - - maxN := uint64(1000) - mod := mrand.Uint64() - var n, classWidth uint64 - if mod < maxN { // `mod` is too small so that we can consider `mod` classes - n = mod - classWidth = 1 - } else { // `mod` is big enough so that we can partition [0,mod-1] into `maxN` classes - n = maxN - mod = (mod / n) * n // adjust `mod` to make sure it is a multiple of n for a more accurate test - classWidth = mod / n - } - - uintNf := func() (uint64, error) { - return uint64(rng.UintN(mod)), nil - } - BasicDistributionTest(t, n, classWidth, uintNf) - - }) - - t.Run("zero n", func(t *testing.T) { - assert.Panics(t, func() { - rng.UintN(0) - }) - }) -} - -// Simple unit testing of SubPermutation using a basic randomness test. -// It doesn't perform advanced statistical tests. -// -// SubPermutation tests cover Permutation as well. -func TestSubPermutation(t *testing.T) { - rand := getPRG(t) - - seed := make([]byte, Chacha20SeedLen) - _, err := rand.Read(seed) - require.NoError(t, err) - customizer := make([]byte, Chacha20CustomizerMaxLen) - _, err = rand.Read(customizer) - require.NoError(t, err) - - rng, err := NewChacha20PRG(seed, customizer) - require.NoError(t, err) - - t.Run("basic randomness", func(t *testing.T) { - listSize := 100 - subsetSize := 20 - sampleSize := 85000 - // tests the subset sampling randomness - samplingDistribution := make([]float64, listSize) - // tests the subset ordering randomness (using a particular element testElement) - orderingDistribution := make([]float64, subsetSize) - testElement := rand.Intn(listSize) - - for i := 0; i < sampleSize; i++ { - shuffledlist, err := rng.SubPermutation(listSize, subsetSize) - require.NoError(t, err) - require.Equal(t, len(shuffledlist), subsetSize) - has := make(map[int]struct{}) - for j, e := range shuffledlist { - // check for repetition - _, ok := has[e] - require.False(t, ok, "duplicated item") - has[e] = struct{}{} - // fill the distribution - samplingDistribution[e] += 1.0 - if e == testElement { - orderingDistribution[j] += 1.0 - } - } - } - EvaluateDistributionUniformity(t, samplingDistribution) - EvaluateDistributionUniformity(t, orderingDistribution) - }) - - // Evaluate that - // - permuting an empty set returns an empty list - // - drawing a sample of size zero from a non-empty set returns an empty list - t.Run("empty sets", func(t *testing.T) { - - // verify that permuting an empty set returns an empty list - res, err := rng.SubPermutation(0, 0) - require.NoError(t, err) - assert.True(t, len(res) == 0) - - // verify that drawing a sample of size zero from a non-empty set returns an empty list - res, err = rng.SubPermutation(10, 0) - require.NoError(t, err) - assert.True(t, len(res) == 0) - }) - - t.Run("negative inputs", func(t *testing.T) { - res, err := rng.Permutation(-3) - require.Error(t, err) - assert.Nil(t, res) - - res, err = rng.SubPermutation(5, -3) - require.Error(t, err) - assert.Nil(t, res) - - res, err = rng.SubPermutation(-3, 5) - require.Error(t, err) - assert.Nil(t, res) - }) -} - -// Simple unit testing of Shuffle using a basic randomness test. -// It doesn't perform advanced statistical tests. -func TestShuffle(t *testing.T) { - rand := getPRG(t) - - seed := make([]byte, Chacha20SeedLen) - _, err := rand.Read(seed) - require.NoError(t, err) - customizer := make([]byte, Chacha20CustomizerMaxLen) - _, err = rand.Read(customizer) - require.NoError(t, err) - - rng, err := NewChacha20PRG(seed, customizer) - require.NoError(t, err) - - t.Run("basic uniformity", func(t *testing.T) { - listSize := 100 - sampleSize := 80000 - // the distribution of a particular element of the list, testElement - distribution := make([]float64, listSize) - testElement := rand.Intn(listSize) - // Slice to shuffle - list := make([]int, 0, listSize) - for i := 0; i < listSize; i++ { - list = append(list, i) - } - - shuffleAndCount := func(t *testing.T) { - err = rng.Shuffle(listSize, func(i, j int) { - list[i], list[j] = list[j], list[i] - }) - require.NoError(t, err) - has := make(map[int]struct{}) - for j, e := range list { - // check for repetition - _, ok := has[e] - require.False(t, ok, "duplicated item") - has[e] = struct{}{} - // fill the distribution - if e == testElement { - distribution[j] += 1.0 - } - } - } - - t.Run("shuffle a random permutation", func(t *testing.T) { - for k := 0; k < sampleSize; k++ { - shuffleAndCount(t) - } - EvaluateDistributionUniformity(t, distribution) - }) - - t.Run("shuffle a same permutation", func(t *testing.T) { - for k := 0; k < sampleSize; k++ { - // reinit the permutation to the same value - for i := 0; i < listSize; i++ { - list[i] = i - } - shuffleAndCount(t) - } - EvaluateDistributionUniformity(t, distribution) - }) - }) - - t.Run("empty slice", func(t *testing.T) { - emptySlice := make([]float64, 0) - err = rng.Shuffle(len(emptySlice), func(i, j int) { - emptySlice[i], emptySlice[j] = emptySlice[j], emptySlice[i] - }) - require.NoError(t, err) - assert.True(t, len(emptySlice) == 0) - }) - - t.Run("negative inputs", func(t *testing.T) { - emptySlice := make([]float64, 5) - err = rng.Shuffle(-3, func(i, j int) { - emptySlice[i], emptySlice[j] = emptySlice[j], emptySlice[i] - }) - require.Error(t, err) - }) -} - -func TestSamples(t *testing.T) { - rand := getPRG(t) - - seed := make([]byte, Chacha20SeedLen) - _, err := rand.Read(seed) - require.NoError(t, err) - customizer := make([]byte, Chacha20CustomizerMaxLen) - _, err = rand.Read(customizer) - require.NoError(t, err) - - rng, err := NewChacha20PRG(seed, customizer) - require.NoError(t, err) - - t.Run("basic uniformity", func(t *testing.T) { - listSize := 100 - samplesSize := 20 - sampleSize := 100000 - // tests the subset sampling randomness - samplingDistribution := make([]float64, listSize) - // tests the subset ordering randomness (using a particular element testElement) - orderingDistribution := make([]float64, samplesSize) - testElement := rand.Intn(listSize) - // Slice to shuffle - list := make([]int, 0, listSize) - for i := 0; i < listSize; i++ { - list = append(list, i) - } - - for i := 0; i < sampleSize; i++ { - err = rng.Samples(listSize, samplesSize, func(i, j int) { - list[i], list[j] = list[j], list[i] - }) - require.NoError(t, err) - has := make(map[int]struct{}) - for j, e := range list[:samplesSize] { - // check for repetition - _, ok := has[e] - require.False(t, ok, "duplicated item") - has[e] = struct{}{} - // fill the distribution - samplingDistribution[e] += 1.0 - if e == testElement { - orderingDistribution[j] += 1.0 - } - } - } - EvaluateDistributionUniformity(t, samplingDistribution) - EvaluateDistributionUniformity(t, orderingDistribution) - }) - - t.Run("zero edge cases", func(t *testing.T) { - // Sampling from an empty set - emptySlice := make([]float64, 0) - err = rng.Samples(len(emptySlice), len(emptySlice), func(i, j int) { - emptySlice[i], emptySlice[j] = emptySlice[j], emptySlice[i] - }) - require.NoError(t, err) - assert.True(t, len(emptySlice) == 0) - - // drawing a sample of size zero from an non-empty list should leave the original list unmodified - constant := []float64{0, 1, 2, 3, 4, 5} - fullSlice := constant - err = rng.Samples(len(fullSlice), 0, func(i, j int) { // modifies fullSlice in-place - emptySlice[i], emptySlice[j] = emptySlice[j], emptySlice[i] - }) - require.NoError(t, err) - assert.Equal(t, constant, fullSlice) - }) - - t.Run("negative inputs", func(t *testing.T) { - emptySlice := make([]float64, 5) - err = rng.Samples(-3, 5, func(i, j int) { - emptySlice[i], emptySlice[j] = emptySlice[j], emptySlice[i] - }) - require.Error(t, err) - - err = rng.Samples(-5, 3, func(i, j int) { - emptySlice[i], emptySlice[j] = emptySlice[j], emptySlice[i] - }) - require.Error(t, err) - }) -} - -// TestStateRestore tests the serilaization and deserialization functions -// Store and Restore -func TestStateRestore(t *testing.T) { - rand := getPRG(t) - - // generate a seed - seed := make([]byte, Chacha20SeedLen) - _, err := rand.Read(seed) - require.NoError(t, err) - customizer := make([]byte, Chacha20CustomizerMaxLen) - _, err = rand.Read(customizer) - require.NoError(t, err) - t.Logf("seed is %x, customizer is %x\n", seed, customizer) - - // create an rng - rng, err := NewChacha20PRG(seed, customizer) - require.NoError(t, err) - - // evolve the internal state of the rng - iterations := rand.Intn(1000) - for i := 0; i < iterations; i++ { - _ = rng.UintN(1024) - } - // get the internal state of the rng - state := rng.Store() - - // check the state is deterministic - state_clone := rng.Store() - assert.True(t, bytes.Equal(state, state_clone), "Store is not deterministic") - - // check Store is the Restore reverse function - secondRng, err := RestoreChacha20PRG(state) - require.NoError(t, err) - assert.True(t, bytes.Equal(state, secondRng.Store()), "Store o Restore is not identity") - - // check the 2 PRGs are generating identical outputs - iterations = rand.Intn(1000) - for i := 0; i < iterations; i++ { - rand1 := rng.UintN(1024) - rand2 := secondRng.UintN(1024) - assert.Equal(t, rand1, rand2, "the 2 rngs are not identical on round %d", i) - } -} diff --git a/crypto/random/rand_utils.go b/crypto/random/rand_utils.go deleted file mode 100644 index 49b33b50492..00000000000 --- a/crypto/random/rand_utils.go +++ /dev/null @@ -1,51 +0,0 @@ -package random - -import ( - "fmt" - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - "gonum.org/v1/gonum/stat" -) - -// BasicDistributionTest is a test function to run a basic statistic test on `randf` output. -// `randf` is a function that outputs random integers. -// It partitions all outputs into `n` continuous classes and computes the distribution -// over the partition. Each class has a width of `classWidth`: first class is [0..classWidth-1], -// secons class is [classWidth..2*classWidth-1], etc.. -// It computes the frequency of outputs in the `n` classes and computes the -// standard deviation of frequencies. A small standard deviation is a necessary -// condition for a uniform distribution of `randf` (though is not a guarantee of -// uniformity) -func BasicDistributionTest(t *testing.T, n uint64, classWidth uint64, randf func() (uint64, error)) { - // sample size should ideally be a high number multiple of `n` - // but if `n` is too small, we could use a small sample size so that the test - // isn't too slow - sampleSize := 1000 * n - if n < 100 { - sampleSize = (80000 / n) * n // highest multiple of n less than 80000 - } - distribution := make([]float64, n) - // populate the distribution - for i := uint64(0); i < sampleSize; i++ { - r, err := randf() - require.NoError(t, err) - if n*classWidth != 0 { - require.Less(t, r, n*classWidth) - } - distribution[r/classWidth] += 1.0 - } - EvaluateDistributionUniformity(t, distribution) -} - -// EvaluateDistributionUniformity evaluates if the input distribution is close to uinform -// through a basic quick test. -// The test computes the standard deviation and checks it is small enough compared -// to the distribution mean. -func EvaluateDistributionUniformity(t *testing.T, distribution []float64) { - tolerance := 0.05 - stdev := stat.StdDev(distribution, nil) - mean := stat.Mean(distribution, nil) - assert.Greater(t, tolerance*mean, stdev, fmt.Sprintf("basic randomness test failed: n: %d, stdev: %v, mean: %v", len(distribution), stdev, mean)) -} diff --git a/crypto/relic_build.sh b/crypto/relic_build.sh deleted file mode 100755 index 6cff3a6b478..00000000000 --- a/crypto/relic_build.sh +++ /dev/null @@ -1,90 +0,0 @@ -#!/bin/bash - -set -euo pipefail - -DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" - -pushd "$DIR" - -# Ensure the directory is writeable -chmod -R +w "$(pwd)" - -mkdir -p "$DIR/relic/build" -pushd "$DIR/relic/build" - - -# make cmake print its CC interpretation -CMAKE_FILE="${DIR}/relic/CMakeLists.txt" -# parameter expansion is not suitable here -# shellcheck disable=SC2089 -CMAKE_PRINT_CC="message ( STATUS \"CC=\$ENV{CC}\" )" -# Make the cmake run print its interpretation of CC -echo "$CMAKE_PRINT_CC" >> "${CMAKE_FILE}" - -# Probe cmake's MakeFile generation and extract the CC version -CMAKE_TEMP=$(mktemp) -cmake .. > "$CMAKE_TEMP" -CC_VAL="$(tail -n 5 "$CMAKE_TEMP" | grep -oE -m 1 'CC=.*$')" -CC_VAL="${CC_VAL:3}" - -# de-mangle the CMakeLists file, using a temporary file for BSD compatibility -sed '$d' ../CMakeLists.txt > "$CMAKE_TEMP" -mv "$CMAKE_TEMP" ../CMakeLists.txt - -# default to which -CC_VAL=${CC_VAL:-"$(which cc)"} -CC_VERSION_STR="$($CC_VAL --version)" - -# we use uname to record which arch we are running on -ARCH=$(uname -m 2>/dev/null || true) - -if [[ "$ARCH" =~ "x86_64" ]]; then - # Compile as westmere arch to avoid cross-compilation issues on machines not supporting AVX extensions. - # Relic performance as used in flow crypto library is not impacted by whether it is compiled with "native" or "westmere", as proven by benchmark results. - MARCH="-march=westmere" -elif [[ "$ARCH" =~ ^(arm64|armv7|armv7s)$ && "${CC_VERSION_STR[0]}" =~ (clang) ]]; then - # the "-march=native" option is not supported with clang on ARM - MARCH="" -else - MARCH="-march=native" -fi - -# Set RELIC config for Flow -COMP=(-DCFLAGS="-O3 -funroll-loops -fomit-frame-pointer ${MARCH} -mtune=native") -GENERAL=(-DTIMER=CYCLE -DCHECK=OFF -DVERBS=OFF) -LIBS=(-DSHLIB=OFF -DSTLIB=ON) -RAND=(-DRAND=HASHD -DSEED=) - -# -BN_REP=(-DALLOC=AUTO -DALIGN=1 -DWSIZE=64 -DBN_PRECI=1024 -DBN_MAGNI=DOUBLE) -ARITH=(-DARITH=EASY) -PRIME=(-DFP_PRIME=381) - -# -BN_METH=(-DBN_KARAT=0 -DBN_METHD="COMBA;COMBA;MONTY;SLIDE;BINAR;BASIC") -FP_METH=(-DFP_KARAT=0 -DFP_METHD="INTEG;INTEG;INTEG;MONTY;MONTY;JMPDS;SLIDE") -PRIMES=(-DFP_PMERS=OFF -DFP_QNRES=ON) -FPX_METH=(-DFPX_METHD="INTEG;INTEG;LAZYR") -EP_METH=(-DEP_MIXED=ON -DEP_PLAIN=OFF -DEP_ENDOM=ON -DEP_SUPER=OFF\ - -DEP_CTMAP=ON -DEP_METHD="JACOB;LWNAF;COMBS;INTER") -PP_METH=(-DPP_METHD="LAZYR;OATEP") - -# run cmake -cmake "${COMP[@]}" "${GENERAL[@]}" \ - "${LIBS[@]}" "${RAND[@]}" \ - "${BN_REP[@]}" "${ARITH[@]}" \ - "${PRIME[@]}" "${PRIMES[@]}" \ - "${EP_METH[@]}" \ - "${BN_METH[@]}" \ - "${FP_METH[@]}" \ - "${FPX_METH[@]}" \ - "${PP_METH[@]}" .. - - -# Compile the static library -make clean -make relic_s -j8 -rm -f CMakeCache.txt - -popd -popd diff --git a/crypto/sign.go b/crypto/sign.go deleted file mode 100644 index 68196acba2d..00000000000 --- a/crypto/sign.go +++ /dev/null @@ -1,230 +0,0 @@ -// Package crypto ... -package crypto - -import ( - "crypto/elliptic" - "fmt" - - "github.com/btcsuite/btcd/btcec/v2" - - "github.com/onflow/flow-go/crypto/hash" -) - -// revive:disable:var-naming - -// revive:enable - -// SigningAlgorithm is an identifier for a signing algorithm -// (and parameters if applicable) -type SigningAlgorithm int - -const ( - // Supported signing algorithms - UnknownSigningAlgorithm SigningAlgorithm = iota - // BLSBLS12381 is BLS on BLS 12-381 curve - BLSBLS12381 - // ECDSAP256 is ECDSA on NIST P-256 curve - ECDSAP256 - // ECDSASecp256k1 is ECDSA on secp256k1 curve - ECDSASecp256k1 -) - -// String returns the string representation of this signing algorithm. -func (f SigningAlgorithm) String() string { - return [...]string{"UNKNOWN", "BLS_BLS12381", "ECDSA_P256", "ECDSA_secp256k1"}[f] -} - -// Signature is a generic type, regardless of the signature scheme -type Signature []byte - -// Signer interface -type signer interface { - // generatePrivateKey generates a private key - generatePrivateKey([]byte) (PrivateKey, error) - // decodePrivateKey loads a private key from a byte array - decodePrivateKey([]byte) (PrivateKey, error) - // decodePublicKey loads a public key from a byte array - decodePublicKey([]byte) (PublicKey, error) - // decodePublicKeyCompressed loads a public key from a byte array representing a point in compressed form - decodePublicKeyCompressed([]byte) (PublicKey, error) -} - -// newNonRelicSigner returns a signer that does not depend on Relic library. -func newNonRelicSigner(algo SigningAlgorithm) (signer, error) { - switch algo { - case ECDSAP256: - return p256Instance, nil - case ECDSASecp256k1: - return secp256k1Instance, nil - default: - return nil, invalidInputsErrorf("the signature scheme %s is not supported", algo) - } -} - -// Initialize the context of all algos not requiring Relic -func initNonRelic() { - // P-256 - p256Instance = &(ecdsaAlgo{ - curve: elliptic.P256(), - algo: ECDSAP256, - }) - - // secp256k1 - secp256k1Instance = &(ecdsaAlgo{ - curve: btcec.S256(), - algo: ECDSASecp256k1, - }) -} - -// Signature format Check for non-relic algos (ECDSA) -func signatureFormatCheckNonRelic(algo SigningAlgorithm, s Signature) (bool, error) { - switch algo { - case ECDSAP256: - return p256Instance.signatureFormatCheck(s), nil - case ECDSASecp256k1: - return secp256k1Instance.signatureFormatCheck(s), nil - default: - return false, invalidInputsErrorf( - "the signature scheme %s is not supported", - algo) - } -} - -// SignatureFormatCheck verifies the format of a serialized signature, -// regardless of messages or public keys. -// -// This function is only defined for ECDSA algos for now. -// -// If SignatureFormatCheck returns false then the input is not a valid -// signature and will fail a verification against any message and public key. -func SignatureFormatCheck(algo SigningAlgorithm, s Signature) (bool, error) { - // For now, signatureFormatCheckNonRelic is only defined for non-Relic algos. - return signatureFormatCheckNonRelic(algo, s) -} - -// GeneratePrivateKey generates a private key of the algorithm using the entropy of the given seed. -// -// The seed minimum length is 32 bytes and it should have enough entropy. -// It is recommended to use a secure crypto RNG to generate the seed. -// -// The function returns: -// - (false, invalidInputsErrors) if the signing algorithm is not supported or -// if the seed length is not valid (less than 32 bytes or larger than 256 bytes) -// - (false, error) if an unexpected error occurs -// - (sk, nil) if key generation was successful -func GeneratePrivateKey(algo SigningAlgorithm, seed []byte) (PrivateKey, error) { - signer, err := newSigner(algo) - if err != nil { - return nil, fmt.Errorf("key generation failed: %w", err) - } - return signer.generatePrivateKey(seed) -} - -// DecodePrivateKey decodes an array of bytes into a private key of the given algorithm -// -// The function returns: -// - (nil, invalidInputsErrors) if the signing algorithm is not supported -// - (nil, invalidInputsErrors) if the input does not serialize a valid private key: -// - ECDSA: bytes(x) where bytes() is the big-endian encoding padded to the curve order. -// - BLS: bytes(x) where bytes() is the big-endian encoding padded to the order of BLS12-381. -// for all algorithms supported, input is big-endian encoding -// of a the private scalar less than the curve order and left-padded to 32 bytes -// - (nil, error) if an unexpected error occurs -// - (sk, nil) otherwise -func DecodePrivateKey(algo SigningAlgorithm, data []byte) (PrivateKey, error) { - signer, err := newSigner(algo) - if err != nil { - return nil, fmt.Errorf("decode private key failed: %w", err) - } - return signer.decodePrivateKey(data) -} - -// DecodePublicKey decodes an array of bytes into a public key of the given algorithm -// -// The function returns: -// - (nil, invalidInputsErrors) if the signing algorithm is not supported -// - (nil, invalidInputsErrors) if the input does not serialize a valid public key: -// - ECDSA: bytes(x)||bytes(y) where bytes() is the big-endian encoding padded to the field size. -// - BLS: compressed serialization of a G2 point following https://www.ietf.org/archive/id/draft-irtf-cfrg-pairing-friendly-curves-08.html#name-zcash-serialization-format- -// - (nil, error) if an unexpected error occurs -// - (sk, nil) otherwise -func DecodePublicKey(algo SigningAlgorithm, data []byte) (PublicKey, error) { - signer, err := newSigner(algo) - if err != nil { - return nil, fmt.Errorf("decode public key failed: %w", err) - } - return signer.decodePublicKey(data) -} - -// DecodePublicKeyCompressed decodes an array of bytes given in a compressed representation into a public key of the given algorithm -// Only ECDSA is supported (BLS uses the compressed serialzation by default). -// -// The function returns: -// - (nil, invalidInputsErrors) if the signing algorithm is not supported (is not ECDSA) -// - (nil, invalidInputsErrors) if the input does not serialize a valid public key: -// - ECDSA: sign_byte||bytes(x) according to X9.62 section 4.3.6. -// - (nil, error) if an unexpected error occurs -// - (sk, nil) otherwise -func DecodePublicKeyCompressed(algo SigningAlgorithm, data []byte) (PublicKey, error) { - signer, err := newSigner(algo) - if err != nil { - return nil, fmt.Errorf("decode public key failed: %w", err) - } - return signer.decodePublicKeyCompressed(data) -} - -// Signature type tools - -// Bytes returns a byte array of the signature data -func (s Signature) Bytes() []byte { - return s[:] -} - -// String returns a String representation of the signature data -func (s Signature) String() string { - return fmt.Sprintf("%#x", s.Bytes()) -} - -// Key Pair - -// PrivateKey is an unspecified signature scheme private key -type PrivateKey interface { - // Algorithm returns the signing algorithm related to the private key. - Algorithm() SigningAlgorithm - // Size return the key size in bytes. - Size() int - // String return a hex representation of the key - String() string - // Sign generates a signature using the provided hasher. - Sign([]byte, hash.Hasher) (Signature, error) - // PublicKey returns the public key. - PublicKey() PublicKey - // Encode returns a bytes representation of the private key - Encode() []byte - // Equals returns true if the given PrivateKeys are equal. Keys are considered unequal if their algorithms are - // unequal or if their encoded representations are unequal. If the encoding of either key fails, they are considered - // unequal as well. - Equals(PrivateKey) bool -} - -// PublicKey is an unspecified signature scheme public key. -type PublicKey interface { - // Algorithm returns the signing algorithm related to the public key. - Algorithm() SigningAlgorithm - // Size() return the key size in bytes. - Size() int - // String return a hex representation of the key - String() string - // Verify verifies a signature of an input message using the provided hasher. - Verify(Signature, []byte, hash.Hasher) (bool, error) - // Encode returns a bytes representation of the public key. - Encode() []byte - // EncodeCompressed returns a compressed byte representation of the public key. - // The compressed serialization concept is generic to elliptic curves, - // but we refer to individual curve parameters for details of the compressed format - EncodeCompressed() []byte - // Equals returns true if the given PublicKeys are equal. Keys are considered unequal if their algorithms are - // unequal or if their encoded representations are unequal. If the encoding of either key fails, they are considered - // unequal as well. - Equals(PublicKey) bool -} diff --git a/crypto/sign_norelic.go b/crypto/sign_norelic.go deleted file mode 100644 index 7e6dd4c0d10..00000000000 --- a/crypto/sign_norelic.go +++ /dev/null @@ -1,13 +0,0 @@ -//go:build !relic -// +build !relic - -package crypto - -// newSigner chooses and initializes a signature scheme -func newSigner(algo SigningAlgorithm) (signer, error) { - return newNonRelicSigner(algo) -} - -func init() { - initNonRelic() -} diff --git a/crypto/sign_relic.go b/crypto/sign_relic.go deleted file mode 100644 index 980fca20c51..00000000000 --- a/crypto/sign_relic.go +++ /dev/null @@ -1,42 +0,0 @@ -//go:build relic -// +build relic - -package crypto - -import ( - "fmt" -) - -// newSigner chooses and initializes a signature scheme -func newSigner(algo SigningAlgorithm) (signer, error) { - // try Relic algos - if signer := relicSigner(algo); signer != nil { - return signer, nil - } - // return a non-Relic algo - return newNonRelicSigner(algo) -} - -// relicSigner returns a signer that depends on Relic library. -func relicSigner(algo SigningAlgorithm) signer { - if algo == BLSBLS12381 { - return blsInstance - } - return nil -} - -// Initialize Relic with the BLS context on BLS 12-381 -func init() { - initRelic() - initNonRelic() -} - -// Initialize the context of all algos requiring Relic -func initRelic() { - blsInstance = &blsBLS12381Algo{ - algo: BLSBLS12381, - } - if err := blsInstance.init(); err != nil { - panic(fmt.Sprintf("initialization of BLS failed: %s", err.Error())) - } -} diff --git a/crypto/sign_test_utils.go b/crypto/sign_test_utils.go deleted file mode 100644 index a98f7d0713b..00000000000 --- a/crypto/sign_test_utils.go +++ /dev/null @@ -1,369 +0,0 @@ -package crypto - -import ( - crand "crypto/rand" - "fmt" - mrand "math/rand" - "testing" - "time" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/onflow/flow-go/crypto/hash" -) - -func getPRG(t *testing.T) *mrand.Rand { - random := time.Now().UnixNano() - t.Logf("rng seed is %d", random) - rng := mrand.New(mrand.NewSource(random)) - return rng -} - -func TestKeyGenErrors(t *testing.T) { - seed := make([]byte, 50) - invalidSigAlgo := SigningAlgorithm(20) - sk, err := GeneratePrivateKey(invalidSigAlgo, seed) - assert.Nil(t, sk) - assert.Error(t, err) - assert.True(t, IsInvalidInputsError(err)) -} - -func TestHasherErrors(t *testing.T) { - t.Run("nilHasher error sanity", func(t *testing.T) { - err := nilHasherError - invInpError := invalidInputsErrorf("") - otherError := fmt.Errorf("some error") - assert.True(t, IsNilHasherError(err)) - assert.False(t, IsInvalidInputsError(err)) - assert.False(t, IsNilHasherError(invInpError)) - assert.False(t, IsNilHasherError(otherError)) - assert.False(t, IsNilHasherError(nil)) - }) - - t.Run("nilHasher error sanity", func(t *testing.T) { - err := invalidHasherSizeErrorf("") - invInpError := invalidInputsErrorf("") - otherError := fmt.Errorf("some error") - assert.True(t, IsInvalidHasherSizeError(err)) - assert.False(t, IsInvalidInputsError(err)) - assert.False(t, IsInvalidHasherSizeError(invInpError)) - assert.False(t, IsInvalidHasherSizeError(otherError)) - assert.False(t, IsInvalidHasherSizeError(nil)) - }) -} - -// tests sign and verify are consistent for multiple generated keys and messages -func testGenSignVerify(t *testing.T, salg SigningAlgorithm, halg hash.Hasher) { - t.Logf("Testing Generation/Signature/Verification for %s", salg) - // make sure the length is larger than minimum lengths of all the signaure algos - seedMinLength := 48 - seed := make([]byte, seedMinLength) - input := make([]byte, 100) - rand := getPRG(t) - - loops := 50 - for j := 0; j < loops; j++ { - n, err := rand.Read(seed) - require.Equal(t, n, seedMinLength) - require.NoError(t, err) - sk, err := GeneratePrivateKey(salg, seed) - require.NoError(t, err) - _, err = rand.Read(input) - require.NoError(t, err) - s, err := sk.Sign(input, halg) - require.NoError(t, err) - pk := sk.PublicKey() - - // test a valid signature - result, err := pk.Verify(s, input, halg) - require.NoError(t, err) - assert.True(t, result, fmt.Sprintf( - "Verification should succeed:\n signature:%s\n message:%x\n private key:%s", s, input, sk)) - - // test with a different message - input[0] ^= 1 - result, err = pk.Verify(s, input, halg) - require.NoError(t, err) - assert.False(t, result, fmt.Sprintf( - "Verification should fail:\n signature:%s\n message:%x\n private key:%s", s, input, sk)) - input[0] ^= 1 - - // test with a valid but different key - seed[0] ^= 1 - wrongSk, err := GeneratePrivateKey(salg, seed) - require.NoError(t, err) - result, err = wrongSk.PublicKey().Verify(s, input, halg) - require.NoError(t, err) - assert.False(t, result, fmt.Sprintf( - "Verification should fail:\n signature:%s\n message:%x\n private key:%s", s, input, sk)) - - // test a wrong signature length - invalidLen := rand.Intn(2 * len(s)) // try random invalid lengths - if invalidLen == len(s) { // map to an invalid length - invalidLen = 0 - } - invalidSig := make([]byte, invalidLen) - result, err = pk.Verify(invalidSig, input, halg) - require.NoError(t, err) - assert.False(t, result, fmt.Sprintf( - "Verification should fail:\n signature:%s\n with invalid length %d", invalidSig, invalidLen)) - } -} - -// tests the key generation constraints with regards to the input seed, mainly -// the seed length constraints and the result determinicity. -func testKeyGenSeed(t *testing.T, salg SigningAlgorithm, minLen int, maxLen int) { - t.Run("seed length check", func(t *testing.T) { - // valid seed lengths - seed := make([]byte, minLen) - _, err := GeneratePrivateKey(salg, seed) - assert.NoError(t, err) - if maxLen > 0 { - seed = make([]byte, maxLen) - _, err = GeneratePrivateKey(salg, seed) - assert.NoError(t, err) - } - // invalid seed lengths - seed = make([]byte, minLen-1) - _, err = GeneratePrivateKey(salg, seed) - assert.Error(t, err) - assert.True(t, IsInvalidInputsError(err)) - if maxLen > 0 { - seed = make([]byte, maxLen+1) - _, err = GeneratePrivateKey(salg, seed) - assert.Error(t, err) - assert.True(t, IsInvalidInputsError(err)) - } - }) - - t.Run("deterministic generation", func(t *testing.T) { - - // same seed results in the same key - seed := make([]byte, minLen) - read, err := crand.Read(seed) - require.Equal(t, read, minLen) - require.NoError(t, err) - sk1, err := GeneratePrivateKey(salg, seed) - require.NoError(t, err) - sk2, err := GeneratePrivateKey(salg, seed) - require.NoError(t, err) - assert.True(t, sk1.Equals(sk2)) - // different seed results in a different key - seed[0] ^= 1 // alter a seed bit - sk2, err = GeneratePrivateKey(salg, seed) - require.NoError(t, err) - assert.False(t, sk1.Equals(sk2)) - }) -} - -var BLS12381Order = []byte{0x73, 0xED, 0xA7, 0x53, 0x29, 0x9D, 0x7D, 0x48, 0x33, 0x39, - 0xD8, 0x08, 0x09, 0xA1, 0xD8, 0x05, 0x53, 0xBD, 0xA4, 0x02, 0xFF, 0xFE, - 0x5B, 0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x01} - -func testEncodeDecode(t *testing.T, salg SigningAlgorithm) { - t.Logf("Testing encode/decode for %s", salg) - rand := getPRG(t) - // make sure the length is larger than minimum lengths of all the signaure algos - seedMinLength := 48 - - t.Run("happy path tests", func(t *testing.T) { - loops := 50 - for j := 0; j < loops; j++ { - // generate a private key - seed := make([]byte, seedMinLength) - read, err := rand.Read(seed) - require.Equal(t, read, seedMinLength) - require.NoError(t, err) - sk, err := GeneratePrivateKey(salg, seed) - assert.Nil(t, err, "the key generation failed") - seed[0] ^= 1 // alter the seed to get a new private key - distinctSk, err := GeneratePrivateKey(salg, seed) - require.NoError(t, err) - - // check private key encoding - skBytes := sk.Encode() - skCheck, err := DecodePrivateKey(salg, skBytes) - require.Nil(t, err, "the key decoding failed") - assert.True(t, sk.Equals(skCheck), "key equality check failed") - skCheckBytes := skCheck.Encode() - assert.Equal(t, skBytes, skCheckBytes, "keys should be equal") - distinctSkBytes := distinctSk.Encode() - assert.NotEqual(t, skBytes, distinctSkBytes, "keys should be different") - - // check public key encoding - pk := sk.PublicKey() - pkBytes := pk.Encode() - pkCheck, err := DecodePublicKey(salg, pkBytes) - require.Nil(t, err, "the key decoding failed") - assert.True(t, pk.Equals(pkCheck), "key equality check failed") - pkCheckBytes := pkCheck.Encode() - assert.Equal(t, pkBytes, pkCheckBytes, "keys should be equal") - distinctPkBytes := distinctSk.PublicKey().Encode() - assert.NotEqual(t, pkBytes, distinctPkBytes, "keys should be different") - - // same for the compressed encoding - pkComprBytes := pk.EncodeCompressed() - pkComprCheck, err := DecodePublicKeyCompressed(salg, pkComprBytes) - require.Nil(t, err, "the key decoding failed") - assert.True(t, pk.Equals(pkComprCheck), "key equality check failed") - pkCheckComprBytes := pkComprCheck.EncodeCompressed() - assert.Equal(t, pkComprBytes, pkCheckComprBytes, "keys should be equal") - distinctPkComprBytes := distinctSk.PublicKey().EncodeCompressed() - assert.NotEqual(t, pkComprBytes, distinctPkComprBytes, "keys should be different") - } - }) - - // test invalid private keys (equal to the curve group order) - t.Run("private keys equal to the group order", func(t *testing.T) { - groupOrder := make(map[SigningAlgorithm][]byte) - groupOrder[ECDSAP256] = []byte{255, 255, 255, 255, 0, 0, 0, 0, 255, 255, 255, - 255, 255, 255, 255, 255, 188, 230, 250, 173, 167, - 23, 158, 132, 243, 185, 202, 194, 252, 99, 37, 81} - - groupOrder[ECDSASecp256k1] = []byte{255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 254, 186, 174, 220, 230, - 175, 72, 160, 59, 191, 210, 94, 140, 208, 54, 65, 65} - - groupOrder[BLSBLS12381] = BLS12381Order - - sk, err := DecodePrivateKey(salg, groupOrder[salg]) - require.Error(t, err, "the key decoding should fail - private key value is too large") - assert.True(t, IsInvalidInputsError(err)) - assert.Nil(t, sk) - }) - - // test invalid private and public keys (invalid length) - t.Run("invalid key length", func(t *testing.T) { - // private key - skLens := make(map[SigningAlgorithm]int) - skLens[ECDSAP256] = PrKeyLenECDSAP256 - skLens[ECDSASecp256k1] = PrKeyLenECDSASecp256k1 - skLens[BLSBLS12381] = 32 - - bytes := make([]byte, skLens[salg]+1) - sk, err := DecodePrivateKey(salg, bytes) - require.Error(t, err) - assert.True(t, IsInvalidInputsError(err)) - assert.Nil(t, sk) - - // public key - pkLens := make(map[SigningAlgorithm]int) - pkLens[ECDSAP256] = PubKeyLenECDSAP256 - pkLens[ECDSASecp256k1] = PubKeyLenECDSASecp256k1 - pkLens[BLSBLS12381] = 96 - - bytes = make([]byte, pkLens[salg]+1) - pk, err := DecodePublicKey(salg, bytes) - require.Error(t, err) - assert.True(t, IsInvalidInputsError(err)) - assert.Nil(t, pk) - }) -} - -func testEquals(t *testing.T, salg SigningAlgorithm, otherSigAlgo SigningAlgorithm) { - t.Logf("Testing Equals for %s", salg) - rand := getPRG(t) - // make sure the length is larger than minimum lengths of all the signaure algos - seedMinLength := 48 - - // generate a key pair - seed := make([]byte, seedMinLength) - n, err := rand.Read(seed) - require.Equal(t, n, seedMinLength) - require.NoError(t, err) - - // first pair - sk1, err := GeneratePrivateKey(salg, seed) - require.NoError(t, err) - pk1 := sk1.PublicKey() - - // second pair without changing the seed - sk2, err := GeneratePrivateKey(salg, seed) - require.NoError(t, err) - pk2 := sk2.PublicKey() - - // unrelated algo pair - sk3, err := GeneratePrivateKey(otherSigAlgo, seed) - require.NoError(t, err) - pk3 := sk3.PublicKey() - - // fourth pair with same algo but a different seed - seed[0] ^= 1 - sk4, err := GeneratePrivateKey(salg, seed) - require.NoError(t, err) - pk4 := sk4.PublicKey() - - // tests - assert.True(t, sk1.Equals(sk2), "key equality should return true") - assert.True(t, pk1.Equals(pk2), "key equality should return true") - assert.False(t, sk1.Equals(sk3), "key equality should return false") - assert.False(t, pk1.Equals(pk3), "key equality should return false") - assert.False(t, sk1.Equals(sk4), "key equality should return false") - assert.False(t, pk1.Equals(pk4), "key equality should return false") -} - -func testKeysAlgorithm(t *testing.T, sk PrivateKey, salg SigningAlgorithm) { - t.Logf("Testing key.Algorithm for %s", salg) - alg := sk.Algorithm() - assert.Equal(t, alg, salg) - alg = sk.PublicKey().Algorithm() - assert.Equal(t, alg, salg) -} - -func testKeySize(t *testing.T, sk PrivateKey, skLen int, pkLen int) { - t.Logf("Testing key.Size for %s", sk.Algorithm()) - size := sk.Size() - assert.Equal(t, size, skLen) - size = sk.PublicKey().Size() - assert.Equal(t, size, pkLen) -} - -func benchVerify(b *testing.B, algo SigningAlgorithm, halg hash.Hasher) { - seed := make([]byte, 48) - for j := 0; j < len(seed); j++ { - seed[j] = byte(j) - } - sk, err := GeneratePrivateKey(algo, seed) - require.NoError(b, err) - pk := sk.PublicKey() - - input := []byte("Bench input") - s, err := sk.Sign(input, halg) - require.NoError(b, err) - var result bool - - b.ResetTimer() - for i := 0; i < b.N; i++ { - result, err = pk.Verify(s, input, halg) - require.NoError(b, err) - } - // sanity check - require.True(b, result) - - b.StopTimer() -} - -func benchSign(b *testing.B, algo SigningAlgorithm, halg hash.Hasher) { - seed := make([]byte, 48) - for j := 0; j < len(seed); j++ { - seed[j] = byte(j) - } - sk, err := GeneratePrivateKey(algo, seed) - require.NoError(b, err) - - input := []byte("Bench input") - var signature []byte - - b.ResetTimer() - for i := 0; i < b.N; i++ { - signature, err = sk.Sign(input, halg) - require.NoError(b, err) - } - // sanity check - result, err := sk.PublicKey().Verify(signature, input, halg) - require.NoError(b, err) - require.True(b, result) - - b.StopTimer() -} diff --git a/crypto/spock.go b/crypto/spock.go deleted file mode 100644 index 2487f39ce1b..00000000000 --- a/crypto/spock.go +++ /dev/null @@ -1,106 +0,0 @@ -//go:build relic -// +build relic - -package crypto - -// SPoCK design based on the BLS signature scheme. -// BLS is using BLS12-381 curve and the same settings in bls.go. - -// #cgo CFLAGS: -g -Wall -std=c99 -// #cgo LDFLAGS: -L${SRCDIR}/relic/build/lib -l relic_s -// #include "bls_include.h" -import "C" -import ( - "fmt" - - "github.com/onflow/flow-go/crypto/hash" -) - -// SPOCKProve generates a spock poof for data under the private key sk. -// -// The function returns: -// - (false, nilHasherError) if the hasher is nil -// - (false, invalidHasherSiseError) if hasher's output size is not 128 bytes -// - (nil, notBLSKeyError) if input key is not a BLS key -// - (nil, error) if an unexpected error occurs -// - (proof, nil) otherwise -func SPOCKProve(sk PrivateKey, data []byte, kmac hash.Hasher) (Signature, error) { - if sk.Algorithm() != BLSBLS12381 { - return nil, notBLSKeyError - } - - // BLS signature of data - return sk.Sign(data, kmac) -} - -// SPOCKVerifyAgainstData verifies a SPoCK proof is generated from the given data -// and the prover's public key. -// -// This is a simple BLS signature verifictaion of the proof under the input data -// and public key. -// -// The function returns: -// - (false, notBLSKeyError) if input key is not a BLS key -// - (false, nilHasherError) if the hasher is nil -// - (false, invalidHasherSiseError) if hasher's output size is not 128 bytes -// - (false, error) if an unexpected error occurs -// - (validity, nil) otherwise -func SPOCKVerifyAgainstData(pk PublicKey, proof Signature, data []byte, kmac hash.Hasher) (bool, error) { - if pk.Algorithm() != BLSBLS12381 { - return false, notBLSKeyError - } - // BLS verification of data - return pk.Verify(proof, data, kmac) -} - -// SPOCKVerify checks whether two couples of (SPoCK proof, public key) are consistent. -// -// Two (SPoCK proof, public key) couples are consistent if there exists a message such -// that each proof could be generated from the message and the private key corresponding -// to the respective public key. -// -// If the input proof slices have an invalid length or fail to deserialize into curve -// points, the function returns false without an error. -// The proofs membership checks in G1 are included in the verifcation. -// -// The function does not check the public keys membership in G2 because it is -// guaranteed by the package. However, the caller must make sure each input public key has been -// verified against a proof of possession prior to calling this function. -// -// The function returns: -// - (false, notBLSKeyError) if at least one key is not a BLS key. -// - (false, error) if an unexpected error occurs. -// - (validity, nil) otherwise -func SPOCKVerify(pk1 PublicKey, proof1 Signature, pk2 PublicKey, proof2 Signature) (bool, error) { - blsPk1, ok1 := pk1.(*pubKeyBLSBLS12381) - blsPk2, ok2 := pk2.(*pubKeyBLSBLS12381) - if !(ok1 && ok2) { - return false, notBLSKeyError - } - - if len(proof1) != signatureLengthBLSBLS12381 || len(proof2) != signatureLengthBLSBLS12381 { - return false, nil - } - - // if pk1 and proof1 are identities of their respective groups, any couple (pk2, proof2) would - // verify the pairing equality which breaks the unforgeability of the SPoCK scheme. This edge case - // is avoided by not allowing an identity pk1. Similarly, an identity pk2 is not allowed. - if blsPk1.isIdentity || blsPk2.isIdentity { - return false, nil - } - - // verify the spock proof using the secret data - verif := C.bls_spock_verify((*C.ep2_st)(&blsPk1.point), - (*C.uchar)(&proof1[0]), - (*C.ep2_st)(&blsPk2.point), - (*C.uchar)(&proof2[0])) - - switch verif { - case invalid: - return false, nil - case valid: - return true, nil - default: - return false, fmt.Errorf("SPoCK verification failed") - } -} diff --git a/crypto/spock_test.go b/crypto/spock_test.go deleted file mode 100644 index 596968234e4..00000000000 --- a/crypto/spock_test.go +++ /dev/null @@ -1,185 +0,0 @@ -//go:build relic -// +build relic - -package crypto - -import ( - crand "crypto/rand" - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -func TestSPOCKProveVerifyAgainstData(t *testing.T) { - // test the consistency with different data - seed := make([]byte, KeyGenSeedMinLen) - data := make([]byte, 100) - - n, err := crand.Read(seed) - require.Equal(t, n, KeyGenSeedMinLen) - require.NoError(t, err) - sk, err := GeneratePrivateKey(BLSBLS12381, seed) - require.NoError(t, err) - _, err = crand.Read(data) - require.NoError(t, err) - - // generate a SPoCK proof - kmac := NewExpandMsgXOFKMAC128("spock test") - s, err := SPOCKProve(sk, data, kmac) - require.NoError(t, err) - pk := sk.PublicKey() - - // SPoCK verify against the data (happy path) - t.Run("correctness check", func(t *testing.T) { - result, err := SPOCKVerifyAgainstData(pk, s, data, kmac) - require.NoError(t, err) - assert.True(t, result, - "Verification should succeed:\n signature:%s\n message:%s\n private key:%s", s, data, sk) - }) - - // test with a different message (unhappy path) - t.Run("invalid message", func(t *testing.T) { - data[0] ^= 1 - result, err := SPOCKVerifyAgainstData(pk, s, data, kmac) - require.NoError(t, err) - assert.False(t, result, - "Verification should fail:\n signature:%s\n message:%s\n private key:%s", s, data, sk) - data[0] ^= 1 - }) - - // test with a valid but different key (unhappy path) - t.Run("invalid key", func(t *testing.T) { - seed[0] ^= 1 - wrongSk, err := GeneratePrivateKey(BLSBLS12381, seed) - require.NoError(t, err) - result, err := SPOCKVerifyAgainstData(wrongSk.PublicKey(), s, data, kmac) - require.NoError(t, err) - assert.False(t, result, - "Verification should fail:\n signature:%s\n message:%s\n private key:%s", s, data, sk) - }) - - // test with an invalid key type - t.Run("invalid key type", func(t *testing.T) { - wrongSk := invalidSK(t) - result, err := SPOCKVerifyAgainstData(wrongSk.PublicKey(), s, data, kmac) - require.Error(t, err) - assert.True(t, IsNotBLSKeyError(err)) - assert.False(t, result) - }) - - // test with an identity public key - t.Run("identity proof", func(t *testing.T) { - // verifying with a pair of (proof, publicKey) equal to (identity_signature, identity_key) should - // return false - identityProof := identityBLSSignature - result, err := SPOCKVerifyAgainstData(IdentityBLSPublicKey(), identityProof, data, kmac) - assert.NoError(t, err) - assert.False(t, result) - }) -} - -// tests of happy and unhappy paths of SPOCKVerify -func TestSPOCKProveVerify(t *testing.T) { - // test the consistency with different data - seed1 := make([]byte, KeyGenSeedMinLen) - seed2 := make([]byte, KeyGenSeedMinLen) - data := make([]byte, 100) - - // data - _, err := crand.Read(data) - require.NoError(t, err) - // sk1 - n, err := crand.Read(seed1) - require.Equal(t, n, KeyGenSeedMinLen) - require.NoError(t, err) - sk1, err := GeneratePrivateKey(BLSBLS12381, seed1) - require.NoError(t, err) - // sk2 - n, err = crand.Read(seed2) - require.Equal(t, n, KeyGenSeedMinLen) - require.NoError(t, err) - sk2, err := GeneratePrivateKey(BLSBLS12381, seed2) - require.NoError(t, err) - - // generate SPoCK proofs - kmac := NewExpandMsgXOFKMAC128("spock test") - pr1, err := SPOCKProve(sk1, data, kmac) - require.NoError(t, err) - pr2, err := SPOCKProve(sk2, data, kmac) - require.NoError(t, err) - - // SPoCK verify against the data, happy path - t.Run("correctness check", func(t *testing.T) { - result, err := SPOCKVerify(sk1.PublicKey(), pr1, sk2.PublicKey(), pr2) - require.NoError(t, err) - assert.True(t, result, - "Verification should succeed:\n proofs:%s\n %s\n private keys:%s\n %s\n data:%x", - pr1, pr2, sk1, sk2, data) - }) - - // test with a different message, verification should fail for proofs - // of different messages. - t.Run("inconsistent proofs", func(t *testing.T) { - data[0] ^= 1 // alter the data - pr2bis, err := SPOCKProve(sk2, data, kmac) - require.NoError(t, err) - result, err := SPOCKVerify(sk1.PublicKey(), pr1, sk2.PublicKey(), pr2bis) - require.NoError(t, err) - assert.False(t, result, - "Verification should fail:\n proofs:%s\n %s\n private keys:%s\n %s \n data:%x", - pr1, pr2bis, sk1, sk2, data) - data[0] ^= 1 // restore the data - }) - - // test with a different key, verification should fail if the public keys are not - // matching the private keys used to generate the proofs. - t.Run("invalid public key", func(t *testing.T) { - seed2[0] ^= 1 // alter the seed - sk2bis, err := GeneratePrivateKey(BLSBLS12381, seed2) - require.NoError(t, err) - result, err := SPOCKVerify(sk1.PublicKey(), pr1, sk2bis.PublicKey(), pr2) - require.NoError(t, err) - assert.False(t, result, - "Verification should succeed:\n proofs:%s\n %s\n private keys:%s\n %s \n data:%s", - pr1, pr2, sk1, sk2bis, data) - }) - - // test with an invalid key type - t.Run("invalid key type", func(t *testing.T) { - wrongSk := invalidSK(t) - - pr, err := SPOCKProve(wrongSk, data, kmac) - require.Error(t, err) - assert.True(t, IsNotBLSKeyError(err)) - assert.Nil(t, pr) - - result, err := SPOCKVerify(wrongSk.PublicKey(), pr1, sk2.PublicKey(), pr2) - require.Error(t, err) - assert.True(t, IsNotBLSKeyError(err)) - assert.False(t, result) - - result, err = SPOCKVerify(sk1.PublicKey(), pr1, wrongSk.PublicKey(), pr2) - require.Error(t, err) - assert.True(t, IsNotBLSKeyError(err)) - assert.False(t, result) - }) - - // test with identity public key and proof - t.Run("identity proof", func(t *testing.T) { - // verifying with either pair of (proof, publicKey) equal to (identity_signature, identity_key) should - // return falsen with any other (proof, key) pair. - identityProof := identityBLSSignature - result, err := SPOCKVerify(IdentityBLSPublicKey(), identityProof, sk2.PublicKey(), pr2) - assert.NoError(t, err) - assert.False(t, result) - - result, err = SPOCKVerify(sk1.PublicKey(), pr1, IdentityBLSPublicKey(), identityProof) - assert.NoError(t, err) - assert.False(t, result) - - result, err = SPOCKVerify(IdentityBLSPublicKey(), identityProof, IdentityBLSPublicKey(), identityProof) - assert.NoError(t, err) - assert.False(t, result) - }) -} diff --git a/crypto/thresholdsign.go b/crypto/thresholdsign.go deleted file mode 100644 index 2dae7061b76..00000000000 --- a/crypto/thresholdsign.go +++ /dev/null @@ -1,146 +0,0 @@ -package crypto - -import ( - "errors" - "fmt" -) - -// A threshold signature scheme allows any subset of (t+1) -// valid signature shares to reconstruct the threshold signature. -// Up to (t) shares do not reveal any information about the threshold -// signature. -// Although the API allows using arbitrary values of (t), -// the threshold signature scheme is secure in the presence of up to (t) -// malicious participants when (t < n/2). -// In order to optimize equally for unforgeability and robustness, -// the input threshold value (t) should be set to t = floor((n-1)/2). - -const ( - // ThresholdSignMinSize is the minimum size of a group participating in a threshold signature protocol - ThresholdSignMinSize = MinimumThreshold + 1 - // ThresholdSignMaxSize is the maximum size of a group participating in a threshold signature protocol - ThresholdSignMaxSize = DKGMaxSize -) - -// ThresholdSignatureInspector is an inspector of the threshold signature protocol. -// The interface only allows inspecting the threshold signing protocol without taking part in it. -type ThresholdSignatureInspector interface { - // VerifyShare verifies the input signature against the stored message and stored - // key at the input index. This function does not update the internal state. - // The function is thread-safe. - // Returns: - // - (true, nil) if the signature is valid - // - (false, nil) if `orig` is a valid index but the signature share is invalid - // - (false, InvalidInputsError) if `orig` is an invalid index value - // - (false, error) for all other unexpected errors - VerifyShare(orig int, share Signature) (bool, error) - - // VerifyThresholdSignature verifies the input signature against the stored - // message and stored group public key. It does not update the internal state. - // The function is thread-safe. - // Returns: - // - (true, nil) if the signature is valid - // - (false, nil) if the signature is invalid - // - (false, error) for all other unexpected errors - VerifyThresholdSignature(thresholdSignature Signature) (bool, error) - - // EnoughShares indicates whether enough shares have been accumulated in order to reconstruct - // a group signature. This function is thread safe and locks the internal state. - // Returns: - // - true if and only if at least (threshold+1) shares were added - EnoughShares() bool - - // TrustedAdd adds a signature share to the internal pool of shares - // without verifying the signature against the message and the participant's - // public key. This function is thread safe and locks the internal state. - // - // The share is only added if the signer index is valid and has not been - // added yet. Moreover, the share is added only if not enough shares were collected. - // The function returns: - // - (true, nil) if enough signature shares were already collected and no error occurred - // - (false, nil) if not enough shares were collected and no error occurred - // - (false, InvalidInputsError) if index is invalid - // - (false, duplicatedSignerError) if a signature for the index was previously added - TrustedAdd(orig int, share Signature) (bool, error) - - // VerifyAndAdd verifies a signature share (same as `VerifyShare`), - // and may or may not add the share to the local pool of shares. - // This function is thread safe and locks the internal state. - // - // The share is only added if the signature is valid, the signer index is valid and has not been - // added yet. Moreover, the share is added only if not enough shares were collected. - // Boolean returns: - // - First boolean output is true if the share is valid and no error is returned, and false otherwise. - // - Second boolean output is true if enough shares were collected and no error is returned, and false otherwise. - // Error returns: - // - invalidInputsError if input index is invalid. A signature that doesn't verify against the signer's - // public key is not considered an invalid input. - // - duplicatedSignerError if signer was already added. - // - other errors if an unexpected exception occurred. - VerifyAndAdd(orig int, share Signature) (bool, bool, error) - - // HasShare checks whether the internal map contains the share of the given index. - // This function is thread safe. - // The function errors with InvalidInputsError if the index is invalid. - HasShare(orig int) (bool, error) - - // ThresholdSignature returns the threshold signature if the threshold was reached. - // The threshold signature is reconstructed only once and is cached for subsequent calls. - // - // Returns: - // - (signature, nil) if no error occurred - // - (nil, notEnoughSharesError) if not enough shares were collected - // - (nil, invalidSignatureError) if at least one collected share does not serialize to a valid BLS signature. - // - (nil, invalidInputsError) if the constructed signature failed to verify against the group public key and stored message. This post-verification - // is required for safety, as `TrustedAdd` allows adding invalid signatures. - // - (nil, error) for any other unexpected error. - ThresholdSignature() (Signature, error) -} - -// ThresholdSignatureParticipant is a participant in a threshold signature protocol. -// A participant is able to participate in a threshold signing protocol as well as inspecting the -// protocol. -type ThresholdSignatureParticipant interface { - ThresholdSignatureInspector - // SignShare generates a signature share using the current private key share. - // - // The function does not add the share to the internal pool of shares and do - // not update the internal state. - // This function is thread safe - // No error is expected unless an unexpected exception occurs - SignShare() (Signature, error) -} - -// duplicatedSignerError is an error returned when TrustedAdd or VerifyAndAdd encounter -// a signature share that has been already added to the internal state. -type duplicatedSignerError struct { - error -} - -// duplicatedSignerErrorf constructs a new duplicatedSignerError -func duplicatedSignerErrorf(msg string, args ...interface{}) error { - return &duplicatedSignerError{error: fmt.Errorf(msg, args...)} -} - -// IsDuplicatedSignerError checks if the input error is a duplicatedSignerError -func IsDuplicatedSignerError(err error) bool { - var target *duplicatedSignerError - return errors.As(err, &target) -} - -// notEnoughSharesError is an error returned when ThresholdSignature is called -// and not enough shares have been collected. -type notEnoughSharesError struct { - error -} - -// notEnoughSharesErrorf constructs a new notEnoughSharesError -func notEnoughSharesErrorf(msg string, args ...interface{}) error { - return ¬EnoughSharesError{error: fmt.Errorf(msg, args...)} -} - -// IsNotEnoughSharesError checks if the input error is a notEnoughSharesError -func IsNotEnoughSharesError(err error) bool { - var target *notEnoughSharesError - return errors.As(err, &target) -} diff --git a/crypto_adx_flag.mk b/crypto_adx_flag.mk new file mode 100644 index 00000000000..0d0d5ac7467 --- /dev/null +++ b/crypto_adx_flag.mk @@ -0,0 +1,29 @@ +# This script can be imported by Makefiles in order to set the `CRYPTO_FLAG` automatically for +# a native build (build and run on the same machine NOT for cross-compilation). +# +# The `CRYPTO_FLAG` is a Go command flag that should be used when the target machine's CPU executing +# the command may not support ADX instructions. +# For new machines that support ADX instructions, the `CRYPTO_FLAG` flag is not needed (or set +# to an empty string). + +# First detect ADX support: +# `ADX_SUPPORT` is 1 if ADX instructions are supported on the current machine and 0 otherwise. +ifeq ($(shell uname -s),Linux) +# detect ADX support on the CURRENT linux machine. + ADX_SUPPORT := $(shell if ([ -f "/proc/cpuinfo" ] && grep -q -e '^flags.*\badx\b' /proc/cpuinfo); then echo 1; else echo 0; fi) +else +# on non-linux machines, set the flag to 1 by default + ADX_SUPPORT := 1 +endif + +DISABLE_ADX := "-O2 -D__BLST_PORTABLE__" + +# Then, set `CRYPTO_FLAG` +# the crypto package uses BLST source files underneath which may use ADX instructions. +ifeq ($(ADX_SUPPORT), 1) +# if ADX instructions are supported on the current machine, default is to use a fast ADX implementation + CRYPTO_FLAG := "" +else +# if ADX instructions aren't supported, this CGO flags uses a slower non-ADX implementation + CRYPTO_FLAG := $(DISABLE_ADX) +endif \ No newline at end of file diff --git a/crypto_setup.sh b/crypto_setup.sh deleted file mode 100644 index e9789c74a23..00000000000 --- a/crypto_setup.sh +++ /dev/null @@ -1,32 +0,0 @@ - -#!/bin/bash - -# crypto package -PKG_NAME="github.com/onflow/flow-go/crypto" - -# go.mod -MOD_FILE="./go.mod" - -# the version of onflow/flow-go/crypto used in the project is read from the go.mod file -if [ -f "${MOD_FILE}" ] -then - # extract the imported version - VERSION="$(go list -f '{{.Version}}' -m ${PKG_NAME})" - # go get the package - go get "${PKG_NAME}@${VERSION}" || { echo "go get the package failed"; exit 1; } - # using the right version, get the package directory path - PKG_DIR="$(go env GOPATH)/pkg/mod/${PKG_NAME}@${VERSION}" -else - { echo "couldn't find go.mod file - make sure the script is in the project root directory"; exit 1; } -fi - -# grant permissions if not existant -if [[ ! -r ${PKG_DIR} || ! -w ${PKG_DIR} || ! -x ${PKG_DIR} ]]; then - chmod -R 755 "${PKG_DIR}" -fi - -# get into the package directory and set up the external dependencies -( - cd "${PKG_DIR}" || { echo "cd into the GOPATH package folder failed"; exit 1; } - go generate -) diff --git a/deploy/systemd-docker/example-node-infos.pub.json b/deploy/systemd-docker/example-node-infos.pub.json index cfba2da97fc..a60dcf48625 100644 --- a/deploy/systemd-docker/example-node-infos.pub.json +++ b/deploy/systemd-docker/example-node-infos.pub.json @@ -5,7 +5,8 @@ "NodeID": "9bbea0644b5e91ec66b4afddef7083e896da545d530ee52b85c78afa88301556", "Weight": 1000, "NetworkPubKey": "...", - "StakingPubKey": "..." + "StakingPubKey": "...", + "StakingKeyPoP": "..." }, { "Role": "consensus", @@ -13,7 +14,8 @@ "NodeID": "9bbea0644b5e91ec66b4afddef7083e896da545d530ee52b85c78afa88301556", "Weight": 1000, "NetworkPubKey": "...", - "StakingPubKey": "..." + "StakingPubKey": "...", + "StakingKeyPoP": "..." }, { "Role": "execution", @@ -21,7 +23,8 @@ "NodeID": "9bbea0644b5e91ec66b4afddef7083e896da545d530ee52b85c78afa88301556", "Weight": 1000, "NetworkPubKey": "...", - "StakingPubKey": "..." + "StakingPubKey": "...", + "StakingKeyPoP": "..." }, { "Role": "verification", @@ -29,6 +32,7 @@ "NodeID": "9bbea0644b5e91ec66b4afddef7083e896da545d530ee52b85c78afa88301556", "Weight": 1000, "NetworkPubKey": "...", - "StakingPubKey": "..." + "StakingPubKey": "...", + "StakingKeyPoP": "..." } ] diff --git a/docs/RELEASE.md b/docs/RELEASE.md new file mode 100644 index 00000000000..b1229ec8386 --- /dev/null +++ b/docs/RELEASE.md @@ -0,0 +1,183 @@ +# Development + +Please carefully read our [Coding Conventions](../CodingConventions.md). + +## Branching Policy + +**This convention is _binding_**<sup>⭒</sup> **for all PR requests in this repo**, as +deviations pose a significant risk to mainnet liveness. If you believe the branching convention could be improved, +please prepare a concrete proposal and add an agenda item for +a [Core Protocol Working Group](https://github.com/onflow/Flow-Working-Groups/tree/main/core_protocol_working_group) meeting. + +(⭒) There may be scenarios where the Branching Policy has its limitations, and one-off deviations could be beneficial. +This includes accidental deviations that, once discovered, are not easily undone. +These cases should be carefully tracked and require awareness and detailed coordination. +It is the responsibility of team members working on branches that break the policy to ensure +the _entire_ engineering team working on `flow-go` is informed and stays in the loop. + +### Overview +Conceptually, we maintain four categories of branches: +* **deployment branches** contain software versions that were/are/should be running on testnet and mainnet. + The naming convention is `v0.minor` and loosely follows semantic versioning. The major version is currently pinned to `0`, + as the Flow protocol is not yet fully implemented. The minor version increases at every Height-Coordinated-Upgrade [HCU] or spork (points of breaking downwards compatibility). + In a nutshell, you can think of the minor as a version number for the overall protocol. Each time there is a change in the protocol, such that nodes + running the old and new version can't be mixed, the minor version increases and we create a new deployment branch + (usually from `master`). +* `master` accumulates all revisions and features that are either fully downwards compatible or can be rolled out via an HCU. Probably the majority of PRs + can be merged directly to master or via a feature branch. Furthermore, our ability to roll out upgrades to the live network via HCUs will significantly increase over time. +* Most **feature branches** implement protocol changes small enough to be deployed via an HCU (and hence can be merged to `master`). + However, for the foreseeable future, large and broadly-breaking improvements will be necessary for a fast evolution and + feature-completion of Flow. Such features should be kept in their individual feature branches. +* The `spork` branch will be created in preparation of a new spork from the most recent `master`. Feature branches holding HCU-**in**compatible upgrades will be + consolidated on this `spork` branch. The spork branch is intended to be very short-lived and only exist for a few weeks right before the testnet spork. + When the Flow network has committed to the spork on a governance level (currently coordinated by the Flow Foundation), + the `spork` branch will be merged back to `master`. + +We are purposefully continuing the usage of the 'spork' terminology here, to describe a "severely breaking change". +Specifically, spork describes a deployment of new node software, which requires the entire network to be stopped and rebooted. +We only carry over a snapshot of the execution state but fully re-initialize the protocol state. Thereby, major upgrades of the protocol, +incl. migrations of the entire execution state to a new formats become reasonably straight forward. +Additional context on HCU vs sporks can be found [here](https://developers.flow.com/networks/node-ops/node-operation/hcu#hcu-versus-spork). + + +<img src='./docs/images/flow-go_branching-convention.png' width='350'> + + + + +#### Motivations +* Based on the decentralized nature of blockchain networks, continuous deployment is practically intractable (without compromising decentralization). +Therefore, longer wait times for features until their deployment must be accounted for in the development process. This convention has been incrementally +developed and refined based on the multi-year learnings of managing the deployment of the flow network. +* Deployment branches, `master` containing a runnable snapshot of the most recent development state and feature branches are intuitively exactly what you + would expect without further flow-specific context. We just _extend_ the common convention by a few edge cases accounting for the upgrade-constraints of a blockchain network: + * On a specific deployment branch, there can only be non-breaking changes. This is to prevent accidents where we roll out new node + software incrementally to one node after another, but then discover later that there is some specific case where the two versions don't work together and network halts. + * Due to the limitations of upgrades that can be rolled out via HCUs, we have to separate spork-level features. That is the scenario we address with long-living + feature branches and the spork branch. +* You may wonder why we don't just use a single spork branch right away. So `master` would contain all HCU-compatible upgrades and the spork-branch _in addition_ also the HCU-**in**compatible upgrades. + We optimize for the case where the majority of changes is HCU-compatible, while we have very few HCU-incompatible features in the pipeline which in tendency go very deep in one specific area of the code base. + If we collected the HCU-incompatible features in a single 'spork' branch, we would need to merge master into that. That means an engineer would need to know all HCU-incompatible features + currently in the pipeline to resolve merge conflicts. In our model where teams develop features in parallel, it has proven to be efficient for each team to maintain their own HCU-incompatible feature + branches and regularly merge the evolving `master` into them. As feature branches stay aligned with `master`, the last remaining step of merging all the feature branches together is usually not a big lift. +* During the limited lifetime of the `spork` branch, the evolution of `master` should be very small. Generally, the engineering team is more focused on testing, a significant portion of features will have + completed their development in time for the spork and new features start in their own branches anyway. + + +### The [master](https://github.com/onflow/flow-go/tree/master) branch + +The `master` branch is intended to only contain features for the _immediately_ upcoming release. +This is under the assumption that we already committed to the type of upgrade (HCU vs Spork). + +**Generally only HCU-compatible changes are allowed to be merged to `master`**. It is the responsibilities of the developers to provide conclusive evidence why +their change can be deployed via HCU. For fully downwards-compatible changes, this explanation can be omitted. Otherwise, please provide a brief summary in your PR +targeting master as of why the code changes are HCU-compatible (for example "only affects transaction execution and verification"). More details are provided in the +[breaking change classification](#breaking-change-classifications) section below. + + +### Deployment branches + +- For every HCU and spork, a new deployment branch will be created from master. This branch will be tagged and used to update testnet and then mainnet. +- **Only non-breaking changes** can be committed to a deployment branch, such that nodes running the older and newer version on this branch can be mixed. + In other words, a single deployment branch spans _all_ patches that are protocol-compatible without an HCU. +- Each time there is a breaking-change in the protocol (see [breaking change classification](#breaking-change-classifications) below), + the minor version increases and we create a new deployment branch from `master`. + +#### Naming Convention: +The naming convention is `v0.minor`, with the major version currently pinned to `0`. +The minor version increments at every Height-Coordinated-Upgrade [HCU] or spork (points of breaking downwards compatibility). +An example is the branch `v0.33`. Extensions of the branch names are allowed but optional, for example including key differences +such as cryptographic stack (e.g. `v0.33-relic`), storage implementation (e.g. `v0.33-storehouse`), etc. Patch versions (e.g. `0.33.0`) are usually only included in tags and builds. + + +### Feature branches +- During development, all features should live on a feature branch. +- For small features, this will be a simple working branch. These branches have the naming scheme `<contributor>/<issue #>-<short description>`, for example `kan/123-fix-known-issue` +- For larger features, this may be a shared feature branch with other team members. Feature branches have the naming scheme `feature/<name>`. + +### The `spork` branch + +The spork branch was specifically introduced for managing spork-level changes. Sporks are very far apart (targeted at 12 months intervals), so the `spork` branch is not needed for the time in between sporks. +HCU-compatible features are merged to `master` and HCU-incompatible features live in their own feature branches until shortly before the spork. +In an ideal world, where we finalize the spork date a few months ahead of time and _always_ stick to it, the `spork` branch would not be needed at all. + +Despite our best efforts, occasionally circumstances arise that require us to postpone the spork just a few weeks before it was supposed to happen. +In fact, given the magnitude and depth of changes being rolled out in a spork, needing to postpone with short notice is actually a reasonably plausible scenario. +And the `spork` branch helps us to handle this scenario: + +Unless we are absolutely sure the spork is going to happen when planned, limit `master` to HCU-compatible changes only. Thereby we keep our options open +to postpone the spork and if needed to continue with HCUs (e.g. for time-sensitive security fixes). However, we also need to consolidate our +HCU-**in**compatible (major breaking) changes, resolve conflicts and test to keep our options open for actually sporking on the planned date, +which we do on the `spork` branch. + + +## Upgrade Path Eligibility + +- When a feature branch is ready to be merged, the desired upgrade path onto Mainnet must be determined (if any). The options are: + - Height Coordinated Upgrade (HCU) + - No protocol-level breaking changes + - No state migrations + - Changes to how Execution state/path are handled are allowed if they are + - Backwards compatible, or + - Brand new additions + - Resource optimizations are okay + - Cadence upgrades which could cause execution state fork (likely any Cadence upgrade except for trivial changes) + - Spork + - Protocol level breaking change + - State migrations required +- All HCU upgrades can go directly into the `master` branch +- All spork upgrades must live on their own feature branch until the last HCU before the spork has been performed (usually approximately 1 month before the Spork). + - It is the responsibility of the DRI to keep this feature branch in a mergeable state. + - If the spork is scheduled to occur within a month, all the feature branches can be merged into `master`. + However, if the exact spork date has not been decided, then a special `spork` branch may be created from master to merge all the feature branches. + This is to consolidate all the feature branches while accommodating any additional HCUs that may occur between then and the spork date. + - Suggestion: once a sprint, merge `master` into the feature branch. More frequent merges are easier, as they avoid complex conflict resolutions + + +## End of Release Cycle + +- At the end of every release cycle, we will tag a commit that includes all desired features to be released +- This commit will be tagged according to [semantic versioning guidelines](https://dapperlabs.notion.site/Changes-to-handling-git-tags-5e39af7c723a428a915bd88901fc1274) +- Release notes must be written up, describing all features included in this tag + +## Benchmark Testing + +[Benchmarking](https://www.notion.so/Benchmarking-e3d89e3aadb44b0787da9bb7703b0dae?pvs=21) + +- All features on the release candidate tag must undergo testing on `benchmarknet` + +### Testnet + +- The current schedule is the Wednesday two weeks before the corresponding Mainnet spork +- Features should aim to live on Testnet for at least two weeks before making it to Mainnet + +### Mainnet + +- Features must live on Testnet for two week before making it to Mainnet +- The current schedule is the Wednesday two weeks after the Testnet Spork + +## Breaking Change Classifications + +### Acceptable Changes for HCU + +- All backward compatible changes +- Breaking changes only pertaining to the execution of future transactions + - Many Cadence related breaking changes would fall in this category + - FVM changes may also fall here +- Breaking changes only pertaining to the verification of future transactions + +### Spork only changes + +- Any change that requires a state migration + - i.e. something changing in how the historical state will be read and interacted with +- Any change that would break the communication contract between nodes + - e.g. Addition of a new REQUIRED field in a message structure + - Removal of a REQUIRED channel in libp2p + - Removal of a REQUIRED field in a message structure + - Generally, *all* the fields in our node-to-node messages are required. + - For BFT reasons we avoid optional fields, as they add surface for spamming attacks, impose additional consistency requirements, and they often add security vulnerabilities w.r.t. the message hash/ID. + - Most changes of the core protocol outside of execution, such as changes in the consensus algorithm, verification-and-sealing pipeline, or collector mechanics. + - For any protocol-related changes, you need to have a solid argument for why this change is non-breaking (absence of a counter-example is not sufficient). + - Changes in the [Service Events](https://www.notion.so/Service-Events-54e5edb7515445f293dff36ade910ad7?pvs=21) that are emitted by the Execution environment and ingested by the protocol + - Change in the reading of Protocol state that is not backwards compatible + - e.g. If the way the node interprets identities loaded from storage is changed so that identities stored before an upgrade is no longer recognized after the upgrade, this is not acceptable for an HCU diff --git a/docs/RecoverableRandomBeaconStateMachine.md b/docs/RecoverableRandomBeaconStateMachine.md new file mode 100644 index 00000000000..927a31ad59e --- /dev/null +++ b/docs/RecoverableRandomBeaconStateMachine.md @@ -0,0 +1,5 @@ +The `storage.RecoverableRandomBeaconStateMachine` formalizes the life-cycle of the Random Beacon keys for each epoch. On the happy path, each consensus participant for the next epoch takes part in a DKG to obtain a threshold key to participate in Flow's Random Beacon. After successfully finishing the DKG protocol, the node obtains a random beacon private key, which is stored in the database along with DKG current state `flow.DKGStateCompleted`. If for any reason the DKG fails, then the private key will be `nil` and DKG current state is set to `flow.DKGStateFailure`. +In case of failing Epoch switchover, the network goes into Epoch Fallback Mode [EFM]. The governance committee can recover the network via a special EpochRecover transaction. In this case, the set of threshold keys is specified by the governance committee. +The current implementation focuses on the scenario, where the governance committee re-uses the threshold key set from the last successful epoch transition. While injecting other threshold keys into the nodes is conceptually possible and supported, the utilities for this recovery path are not yet implemented. + +[diagram](https://drive.google.com/file/d/1UnJLlTIs8IDOIHZhNUhXakeP_5Re10S4/view?usp=sharing) diff --git a/docs/agents/CodingConventions.md b/docs/agents/CodingConventions.md new file mode 100644 index 00000000000..967592e3e0b --- /dev/null +++ b/docs/agents/CodingConventions.md @@ -0,0 +1,312 @@ +# Coding Conventions + +## High-Assurance Software Engineering Principles + +Flow is a high-assurance software project where the cost of bugs that slip through can be catastrophically high. We consider all inputs to be potentially byzantine. This fundamentally shapes our approach to error handling and code correctness: + +### Inversion of Default Safety Assumptions +- Traditional software engineering often assumes code paths are safe unless proven dangerous +- In Flow, we invert this: **no code path is considered safe unless explicitly proven and documented to be safe** +- The mere absence of known failure cases is NOT sufficient evidence of safety +- We require conclusive arguments for why each code path will always behave correctly + +### Context-Dependent Error Classification + +A critical rule in Flow's error handling is that **the same error type can be benign in one context but an exception in another**. Error classification depends on the caller's context, not the error's type. + +Key principles: +- An error type alone CANNOT determine whether it's benign or an exception +- The caller's context and expectations determine the error's severity +- The same error type may be handled differently in different contexts +- Documentation *must* specify which errors are benign in which contexts + +Example of context-dependent error handling, where `storage.ErrNotFound` is _benign_: +```go +// We're checking if we need to request a block from another node +// +// No Expected errors during normal operations. +func (s *Synchronizer) checkBlockExists(blockID flow.Identifier) error { + _, err := s.storage.ByBlockID(blockID) + if errors.Is(err, storage.ErrNotFound) { + // Expected during normal operation - request block from peer. + return s.requestBlockFromPeer(blockID) // Expecting no errors from this call under normal operations + } + if err != nil { + // Other storage errors are unexpected + return fmt.Errorf("unexpected storage error: %w", err) + } + return nil +} +``` + +However, in this context, the same `storage.ErrNotFound` is not expected during normal operations (we term unexpected errors as "exceptions"): +```go +// We're trying to read a block we know was finalized +// +// No Expected errors during normal operations. +func (s *State) GetFinalizedBlock(height uint64) (*flow.Block, error) { + blockID, err := s.storage.FinalizedBlockID(height) + if err != nil { + return nil, fmt.Errorf("could not get finalized block ID: %w", err) + } + + // At this point, we KNOW the block should exist + block, err := s.storage.ByBlockID(blockID) + if err != nil { + // Any error here (including ErrNotFound) indicates a bug or corruption + return nil, irrecoverable.NewExceptionf( + "storage corrupted - failed to get finalized block %v: %w", + blockID, err) + } + return block, nil +} +``` + +### Rules for Error Classification + +1. **Documentation Requirements** + - Functions MUST document which error types are benign in their context + - Documentation MUST explain WHY an error is considered benign + - Absence of documentation means an error is treated as an exception + +2. **Error Propagation** + - When propagating errors, evaluate if they remain benign in the new context + - If a benign error from a lower layer indicates a critical failure in your context, wrap it as an exception + - Use `irrecoverable.NewExceptionf` when elevating a benign error to an exception + +3. **Testing Requirements** + - Tests MUST verify error handling in different contexts + - Test that benign errors in one context are properly elevated to exceptions in another + - Mock dependencies to test both benign and exceptional paths + +### Error Handling Philosophy +- All errors are considered potentially fatal by default +- Only explicitly documented benign errors are safe to recover from +- For any undocumented error case, we must assume the execution state is corrupted +- Recovery from undocumented errors requires node restart from last known safe state +- This conservative approach prioritizes safety over continuous operation + +Example of proper high-assurance error handling: +```go +func (e *engine) process(event interface{}) error { + // Step 1: type checking of input + switch v := event.(type) { + case *ValidEvent: + // explicitly documented safe path + return e.handleValidEvent(v) + default: + // undocumented event type - unsafe to proceed + return fmt.Errorf("unexpected event type %T: %w", event, ErrInvalidEventType) + } +} + +func (e *engine) Submit(event interface{}) { + err := e.process(event) + if errors.Is(err, ErrInvalidEventType) { + // This is a documented benign error - safe to handle + metrics.InvalidEventsCounter.Inc() + return + } + if err != nil { + // Any other error is potentially fatal + // We cannot prove it's safe to continue + e.log.Fatal().Err(err).Msg("potentially corrupted state - must restart") + return + } +} +``` + +## 1. Code Documentation +- Every interface must have clear documentation +- Copy and extend interface documentation in implementations +- Include clear explanations for any deviations from conventions +- Document all public functions individually +- Document error handling strategies and expected error types + +Example of proper error documentation: +```go +// foo does abc. +// Expected errors during normal operations: +// - ErrXFailed: if x failed +func foo() err { + ... + return fmt.Errorf("details about failure: %w", ErrXFailed) +} +``` + +## 2. Code Structure +- Follow the component-based architecture +- Each component must implement the `Component` interface +- Clearly differentiate between trusted (internal) and untrusted (external) inputs +- Components should have dedicated worker pools +- Proper resource management with worker limits +- Proper state management and recovery + +## 3. Error Categories and Handling Philosophy + +### a. Benign Errors +- Component remains fully functional despite the error +- Expected during normal operations +- Must be handled within the component +- Must be documented in the component's context +- Must be represented as typed sentinel errors +- Cannot be represented by generic/untyped errors unless explicitly documented as an optional simplification for components that solely return benign errors + +Example of proper benign error handling: +```go +// Expected errors during normal operations: +// * ErrXFailed: if x failed +func benignErrorExample() error { + err := foo() + if err != nil { + return fmt.Errorf("failed to do foo: %w", err) + } + return nil +} +``` + +### b. Exceptions +- Potential symptoms of internal state corruption +- Unexpected failures that may compromise component state +- Should lead to component restart or node termination +- Strongly encouraged to wrap with context when bubbling up + +Example of proper exception handling: +```go +err := foo() +if errors.Is(err, XFailedErr) { + // expected error + return +} +if err != nil { + log.Fatal().Err(err).Msg("unexpected internal error") + return +} +``` + +### c. Sentinel Error Requirements +- Must be properly typed +- Must be documented in GoDoc +- Must avoid generic error formats +- Must always wrap with context when bubbling up the call stack +- Must document all expected error types +- Must handle at the appropriate level where context is available +- Must use proper error wrapping for stack traces + +Example of proper sentinel error definition and usage: +```go +ErrXFailed := errors.New("x failed") + +// bar does ... +// Expected error returns during normal operations: +// * XFailedErr: if x failed +func bar() err { + ... + err := foo() + if err != nil { + return fmt.Errorf("failed to do foo: %w", err) + } + ... +} +``` + +## 4. Additional Best Practices +- Prioritize safety over liveness +- Don't continue on "best-effort" basis when encountering unexpected errors +- Testing Error Handling: + - Test both benign error cases and exceptions + - Must verify that documented sentinel errors are returned in their specified situations + - Must verify that unexpected errors (exceptions) from lower layers or their mocks are not misinterpreted as benign errors + - Verify proper error propagation + - Test component recovery from errors + - Validate error handling in both trusted and untrusted contexts + +Example of proper error handling in components: +```go +func (e *engine) process(event interface{}) error { + switch v := event.(type) { + ... + default: + return fmt.Errorf("invalid input type %T: %w", event, InvalidMessageType) + } +} + +func (e *engine) Process(chan network.Channel, originID flow.Identifier, event interface{}) error { + err := e.process(event) + if err != nil { + if errors.Is(err, InvalidMessageType) { + // this is EXPECTED during normal operations + } + // this is unexpected during normal operations + e.log.Fatal().Err(err).Msg("unexpected internal error") + } +} + +func (e *engine) ProcessLocal(event interface{}) { + err := e.process(event) + if err != nil { + if errors.Is(err, InvalidMessageType) { + // this is a CRITICAL BUG + } + // this is unexpected during normal operations + e.log.Fatal().Err(err).Msg("unexpected internal error") + } +} +``` + +## 5. Anti-patterns to Avoid +- Don't use generic error logging without proper handling +- Don't swallow errors silently +- Don't continue execution after unexpected errors +- Don't use untyped errors unless explicitly documented as benign + +Example of an anti-pattern to avoid: +```go +// DON'T DO THIS: +err := foo() +if err != nil { + log.Error().Err(err).Msg("foo failed") + return +} +``` + +Instead, implement proper error handling: +```go +func (e *engine) Submit(chan network.Channel, originID flow.Identifier, event interface{}) { + e.unit.Launch(func() { + err := e.process(event) + if errors.Is(err, InvalidMessageType) { + // invalid input: ignore or slash + return + } + if err != nil { + // unexpected input: for now we prioritize safety over liveness and just crash + // TODO: restart engine from known good state + e.log.Fatal().Err(err).Msg("unexpected internal error") + } + }) +} +``` + +## 6. Security Considerations +- Treat all external inputs as potentially byzantine +- Handle byzantine inputs gracefully +- Prevent state corruption from malicious inputs +- Use proper error types for security-related issues + +Example of handling untrusted inputs: +```go +func (e *engine) Submit(event interface{}) { + e.unit.Launch(func() { + err := e.process(event) + if errors.Is(err, InvalidMessageType) { + // invalid input from external source: ignore or slash + return + } + if err != nil { + // unexpected input: prioritize safety over liveness + e.log.Fatal().Err(err).Msg("unexpected internal error") + } + }) +} +``` diff --git a/docs/agents/GoDocs.md b/docs/agents/GoDocs.md new file mode 100644 index 00000000000..387893b0455 --- /dev/null +++ b/docs/agents/GoDocs.md @@ -0,0 +1,298 @@ +# Go Documentation Rule + +## General Guidance + +- Add godocs comments for all types, variables, constants, functions, and interfaces. +- Begin with the name of the entity. +- Use complete sentences. +- **ALL** methods that return an error **MUST** document expected error conditions! +- When updating existing code, if godocs exist, keep the existing content and improve formating/expand with additional details to conform with these rules. +- If any details are unclear, **DO NOT make something up**. Add a TODO to fill in the missing details or ask the user for clarification. + +## Method Rules +```go +// MethodName performs a specific action or returns specific information. +// +// Returns: (only if additional interpretation of return values is needed beyond the method / function signature) +// - return1: description of non-obvious aspects +// - return2: description of non-obvious aspects +// +// Expected errors during normal operations: +// - ErrType1: when and why this error occurs +// - ErrType2: when and why this error occurs +// - All other errors are potential indicators of bugs or corrupted internal state (continuation impossible) +// +// Safe for concurrent access (default, may be omitted) +// CAUTION: not concurrency safe! (if applicable, documentation is obligatory) +``` + +### Method Description + - First line must be a complete sentence describing what the method does + - Use present tense + - Start with the method name + - End with a period + - Prefer a concise description that naturally incorporates the meaning of parameters + - Example: + ```go + // ByBlockID returns the header with the given ID. It is available for finalized and ambiguous blocks. + // Error returns: + // - ErrNotFound if no block header with the given ID exists + ByBlockID(blockID flow.Identifier) (*flow.Header, error) + ``` + +### Parameters + - Only document parameters separately when they have non-obvious aspects: + - Complex constraints or requirements + - Special relationships with other parameters + - Formatting or validation rules + - Example: + ```go + // ValidateTransaction validates the transaction against the current state. + // + // Parameters: + // - script: must be valid BPL-encoded script with max size of 64KB + // - accounts: must contain at least one account with signing capability + ``` + +### Returns + - Only document return values if there is **additional information** necessary to interpret the function's or method's return values, which is not apparent from the method signature's return values + - When documenting non-error returns, be concise and focus only on non-obvious aspects: + ```go + // Example 1 - No return docs needed (self-explanatory): + // GetHeight returns the block's height. + + // Example 2 - Additional context needed: + // GetPipeline returns the execution pipeline, or nil if not configured. + + // Example 3 - Complex return value needs explanation: + // GetBlockStatus returns the block's current status. + // Returns: + // - status: PENDING if still processing, FINALIZED if complete, INVALID if failed validation + ``` + - Error returns documentation is mandatory (see section `Error Returns` below) + + +### Error Documentation + - Error classification is context-dependent - the same error type can be benign in one context but an exception in another + - **ALL** methods that return an error **MUST** document exhaustively all benign errors that can be returned (if there are any) + - Error documentation should be the last part of a method's or function's documentation + - Only document benign errors that are expected during normal operations + - Exceptions (unexpected errors) are not individually documented in the error section. Instead, we include the catch-all statement: `All other errors are potential indicators of bugs or corrupted internal state (continuation impossible)` + + Before documenting any error, verify: + - [ ] The error type exists in the codebase (for sentinel errors) + - [ ] The error is actually returned by the method + - [ ] The error handling matches the documented behavior + - [ ] The error is benign in this specific context + - [ ] If wrapping a sentinel error with fmt.Errorf, document the original sentinel error type + - [ ] The error documentation follows the standard format + + Error documentation must follow this format: + ```go + // Expected errors during normal operations: + // - ErrTypeName: when and why this error occurs (for sentinel errors) + // - ErrWrapped: when wrapped via fmt.Errorf, document the original sentinel error + // - All other errors are potential indicators of bugs or corrupted internal state (continuation impossible) + ``` + + For methods where all errors are exceptions: + ```go + // No errors are expected during normal operation. + ``` + + Common mistakes to avoid: + - Don't document errors that aren't returned + - Don't document generic fmt.Errorf errors unless they wrap a sentinel error + - Don't document exceptions (unexpected errors that may indicate bugs) + - Don't mix benign and exceptional errors without clear distinction + - Don't omit the catch-all statement about other errors + - Don't document implementation details that might change + + Examples: + ```go + // Example 1: Method with sentinel errors + // GetBlock returns the block with the given ID. + // Expected errors during normal operations: + // - ErrNotFound: when the block doesn't exist + // - All other errors are potential indicators of bugs or corrupted internal state (continuation impossible) + + // Example 2: Method wrapping a sentinel error + // ValidateTransaction validates the transaction against the current state. + // Expected errors during normal operations: + // - ErrInvalidSignature: when the transaction signature is invalid (wrapped) + // - All other errors are potential indicators of bugs or corrupted internal state (continuation impossible) + + // Example 3: Method with only exceptional errors + // ProcessFinalizedBlock processes a block that is known to be finalized. + // No errors are expected during normal operation. + + // Example 4: Method with context-dependent error handling + // ByBlockID returns the block with the given ID. + // Expected errors during normal operations: + // - ErrNotFound: when requesting non-finalized blocks that don't exist + // - All other errors are potential indicators of bugs or corrupted internal state (continuation impossible) + // Note: ErrNotFound is NOT expected when requesting finalized blocks + ``` + +### Concurrency Safety + - By default, we assume methods and functions to be concurrency safe. + - Every struct and interface must explicitly state whether it is safe for concurrent access + - If not thread-safe, explain why + - For methods or functions that are not concurrency safe (deviating from the default), it is **mandatory** to diligently document this by including the following call-out: + ```go + // CAUTION: not concurrency safe! + ``` + - If **all methods** of a struct or interface are thread-safe, only document this in the struct's or interface's godoc and mention that all methods are thread-safe. Do not include the line in each method: + ```go + // Safe for concurrent access + ``` + +### Special Cases + - For getters/setters, use simplified format: + ```go + // GetterName returns the value of the field. + // Returns: + // - value: description of the returned value + ``` + - For constructors, use: + ```go + // NewTypeName creates a new instance of TypeName. + // Parameters: + // - param1: description of param1 + // Returns: + // - *TypeName: the newly created instance + // - error: any error that occurred during creation + ``` + +### Private Methods + - Private methods should still be documented + - Can use more technical language + - Focus on implementation details + - Must include error documention for any method that returns an error + +## Examples + +### Standard Method Example +```go +// AddReceipt adds the given execution receipt to the container and associates it with the block. +// Returns true if the receipt was added, false if it already existed. Safe for concurrent access. +// +// Expected errors during normal operations: +// - ErrInvalidReceipt: when the receipt is malformed +// - ErrDuplicateReceipt: when the receipt already exists +// - All other errors are potential indicators of bugs or corrupted internal state (continuation impossible) +``` + +### Getter Method Example +```go +// Pipeline returns the pipeline associated with this execution result container. +// Returns nil if no pipeline is set. Safe for concurrent access +``` + +### Constructor Example +```go +// NewExecutionResultContainer creates a new instance of ExecutionResultContainer with the given result and pipeline. +// +// Expected Errors: +// - ErrInvalidBlock: when the block ID doesn't match the result's block ID +// - All other errors are potential indicators of bugs or corrupted internal state (continuation impossible) +``` + +## Interface Documentation +1. **Interface Description** + - Start with the interface name + - Describe the purpose and behavior of the interface + - Explain any invariants or guarantees the interface provides + - Explicitly state whether it is safe for concurrent access + - Example: + ```go + // Executor defines the interface for executing transactions. + // Implementations must guarantee thread-safety and handle byzantine inputs gracefully. + type Executor interface { + // ... methods ... + } + ``` + +2. **Interface Methods** + - Document each method in the interface + - Focus on the contract/behavior rather than implementation details + - Include error documentation for methods that return errors + - Ensure that the interface documentation is consistent with the structs' documentations implementing this interface + - Every sentinel error that can be returned by any of the implementations must also be documented by the interface. + - Example: + ```go + // Execute processes the given transaction and returns its execution result. + // The method must be idempotent and handle byzantine inputs gracefully. + // + // Expected Errors: + // - ErrInvalidTransaction: when the transaction is malformed + // - ErrExecutionFailed: when the transaction execution fails + Execute(tx *Transaction) (*Result, error) + ``` + +## Constants and Variables +1. **Constants** + - Document the purpose and usage of each constant + - Include any constraints or invariants + - Example: + ```go + // MaxBlockSize defines the maximum size of a block in bytes. + // This value must be a power of 2 and cannot be changed after initialization. + const MaxBlockSize = 1024 * 1024 + ``` + +2. **Variables** + - Document the purpose and lifecycle of each variable + - Include any thread-safety considerations + - Example: + ```go + // defaultConfig holds the default configuration for the system. + // This variable is read-only after initialization and safe for concurrent access. + var defaultConfig = &Config{ + // ... fields ... + } + ``` + +## Type Documentation +1. **Type Description** + - Start with the type name + - Describe the purpose and behavior of the type + - Include any invariants or guarantees + - Example: + ```go + // Block represents a block in the Flow blockchain. + // Blocks are immutable once created and contain a list of transactions. + // All exported methods are safe for concurrent access. + type Block struct { + // ... fields ... + } + ``` + +2. **Type Fields** + - Document each field with its purpose and constraints + - Include any thread-safety considerations + - Example: + ```go + type Block struct { + // Header contains the block's metadata and cryptographic commitments. + // This field is immutable after block creation. + Header *BlockHeader + + // Payload contains the block's transactions and execution results. + // This field is immutable after block creation. + Payload *BlockPayload + + // Signature is the cryptographic signature of the block proposer. + // This field must be set before the block is considered valid. + Signature []byte + } + ``` + +3. **Type Methods** + - Document each method following the method documentation rules + - Include error documentation for methods that return errors + +## Special Cases: +We have low-level storage functions in the packages `storage/operation` and `storage/procedure` that have +specialized documentation requirements. For all files in these packages, meticulously follow the instructions +in `storage/operation/Documentation-Guidelines.md` diff --git a/docs/agents/OperationalDoctrine.md b/docs/agents/OperationalDoctrine.md new file mode 100644 index 00000000000..fc25b1bd299 --- /dev/null +++ b/docs/agents/OperationalDoctrine.md @@ -0,0 +1,40 @@ +# Agents Directive + +You are an AI with extensive expertise in byzantine-fault-tolerant, distributed software engineering. You will consider scalability, reliability, maintainability, and security in your recommendations. + +You are working in a pair-programming setting with a senior engineer. Their time is valuable, so work time-efficiently. They prefer an iterative working style, where you take one step at a time, confirm the direction is correct and then proceed. +Critically reflect on your work. Ask if you are not sure. Avoid confirmation bias - speak up (short and concisely reasoning, followed by tangible suggestions) if something should be changed or approached differently in your opinion. + +## Primary directive + +Your peer's instructions, questions, requests **always** take precedence over any general rules (such as the ones below). + +## Interactions with your peer +- Never use apologies. +- Acknowledge if you missunderstood something, and concisely summarize what you have learned. +- Only when explicitly requested, provide feedback about your understanding of comments, documentation, code +- Don't show or discuss the current implementation unless specifically requested. +- State which files have been modifed and very briefly in which regard. But don't provide excerpts of changes made. +- Don't ask for confirmation of information already provided in the context. +- Don't ask your peer to verify implementations that are visible in the provided context. +- Always provide links to the real files, not just the names x.md. + +## Verify Information +- Always verify information before presenting it. Do not make assumptions or speculate without clear evidence. +- For all changes you made, review your changes in the broader context of the component you are modifying. + - internally, construct a correctness argument as evidence that the updated component will _always_ behave correctly + - memorize your correctness argument, but do not immediately include it in your response unless specifically requested by your peer + +## Software Design Approach +- Leverage existing abstractions; refactor them judiciously. +- Augment with tests, logging, and API exposition once the core business logic is robust. +- Ensure new packages are modular, orthogonal, and future-proof. + +## No Inventions +Don't invent changes other than what's explicitly requested. + +## No Unnecessary Updates +- Don't remove unrelated code or functionalities. +- Don't suggest updates or changes to files when there are no actual modifications needed. +- Don't suggest whitespace changes. + diff --git a/docs/AccessNodeSequenceDiagram.png b/docs/images/AccessNodeSequenceDiagram.png similarity index 100% rename from docs/AccessNodeSequenceDiagram.png rename to docs/images/AccessNodeSequenceDiagram.png diff --git a/docs/AssignmentCollectorTree_1.png b/docs/images/AssignmentCollectorTree_1.png similarity index 100% rename from docs/AssignmentCollectorTree_1.png rename to docs/images/AssignmentCollectorTree_1.png diff --git a/docs/AssignmentCollectorTree_2.png b/docs/images/AssignmentCollectorTree_2.png similarity index 100% rename from docs/AssignmentCollectorTree_2.png rename to docs/images/AssignmentCollectorTree_2.png diff --git a/docs/Chain_and_ExecutionResult_trees_A.png b/docs/images/Chain_and_ExecutionResult_trees_A.png similarity index 100% rename from docs/Chain_and_ExecutionResult_trees_A.png rename to docs/images/Chain_and_ExecutionResult_trees_A.png diff --git a/docs/Chain_and_ExecutionResult_trees_B.png b/docs/images/Chain_and_ExecutionResult_trees_B.png similarity index 100% rename from docs/Chain_and_ExecutionResult_trees_B.png rename to docs/images/Chain_and_ExecutionResult_trees_B.png diff --git a/docs/Chain_and_ExecutionResult_trees_C.png b/docs/images/Chain_and_ExecutionResult_trees_C.png similarity index 100% rename from docs/Chain_and_ExecutionResult_trees_C.png rename to docs/images/Chain_and_ExecutionResult_trees_C.png diff --git a/docs/Chain_and_ExecutionResult_trees_D.png b/docs/images/Chain_and_ExecutionResult_trees_D.png similarity index 100% rename from docs/Chain_and_ExecutionResult_trees_D.png rename to docs/images/Chain_and_ExecutionResult_trees_D.png diff --git a/docs/Chain_and_ExecutionResult_trees_E.png b/docs/images/Chain_and_ExecutionResult_trees_E.png similarity index 100% rename from docs/Chain_and_ExecutionResult_trees_E.png rename to docs/images/Chain_and_ExecutionResult_trees_E.png diff --git a/docs/ComponentInteraction.png b/docs/images/ComponentInteraction.png similarity index 100% rename from docs/ComponentInteraction.png rename to docs/images/ComponentInteraction.png diff --git a/docs/images/CruiseControl_BlockTimeController/EpochSimulation_000.png b/docs/images/CruiseControl_BlockTimeController/EpochSimulation_000.png new file mode 100644 index 00000000000..d9d852d7228 Binary files /dev/null and b/docs/images/CruiseControl_BlockTimeController/EpochSimulation_000.png differ diff --git a/docs/images/CruiseControl_BlockTimeController/EpochSimulation_005-0.png b/docs/images/CruiseControl_BlockTimeController/EpochSimulation_005-0.png new file mode 100644 index 00000000000..550b82fc3ae Binary files /dev/null and b/docs/images/CruiseControl_BlockTimeController/EpochSimulation_005-0.png differ diff --git a/docs/images/CruiseControl_BlockTimeController/EpochSimulation_005-1.png b/docs/images/CruiseControl_BlockTimeController/EpochSimulation_005-1.png new file mode 100644 index 00000000000..e058fbbe775 Binary files /dev/null and b/docs/images/CruiseControl_BlockTimeController/EpochSimulation_005-1.png differ diff --git a/docs/images/CruiseControl_BlockTimeController/EpochSimulation_028.png b/docs/images/CruiseControl_BlockTimeController/EpochSimulation_028.png new file mode 100644 index 00000000000..50dd514b5a2 Binary files /dev/null and b/docs/images/CruiseControl_BlockTimeController/EpochSimulation_028.png differ diff --git a/docs/images/CruiseControl_BlockTimeController/EpochSimulation_029.png b/docs/images/CruiseControl_BlockTimeController/EpochSimulation_029.png new file mode 100644 index 00000000000..bcee262c740 Binary files /dev/null and b/docs/images/CruiseControl_BlockTimeController/EpochSimulation_029.png differ diff --git a/docs/images/CruiseControl_BlockTimeController/EpochSimulation_030.png b/docs/images/CruiseControl_BlockTimeController/EpochSimulation_030.png new file mode 100644 index 00000000000..83f8f5c3833 Binary files /dev/null and b/docs/images/CruiseControl_BlockTimeController/EpochSimulation_030.png differ diff --git a/docs/images/CruiseControl_BlockTimeController/PID_controller_for_block-rate-delay.png b/docs/images/CruiseControl_BlockTimeController/PID_controller_for_block-rate-delay.png new file mode 100644 index 00000000000..78b6cb680f8 Binary files /dev/null and b/docs/images/CruiseControl_BlockTimeController/PID_controller_for_block-rate-delay.png differ diff --git a/docs/images/CruiseControl_BlockTimeController/ViewDurationConvention.png b/docs/images/CruiseControl_BlockTimeController/ViewDurationConvention.png new file mode 100644 index 00000000000..41b2f4d6153 Binary files /dev/null and b/docs/images/CruiseControl_BlockTimeController/ViewDurationConvention.png differ diff --git a/docs/images/CruiseControl_BlockTimeController/ViewRate.png b/docs/images/CruiseControl_BlockTimeController/ViewRate.png new file mode 100644 index 00000000000..003de946b15 Binary files /dev/null and b/docs/images/CruiseControl_BlockTimeController/ViewRate.png differ diff --git a/docs/Emergency_Sealing.png b/docs/images/Emergency_Sealing.png similarity index 100% rename from docs/Emergency_Sealing.png rename to docs/images/Emergency_Sealing.png diff --git a/docs/images/ErrorHandling.png b/docs/images/ErrorHandling.png new file mode 100644 index 00000000000..2d75c31aca3 Binary files /dev/null and b/docs/images/ErrorHandling.png differ diff --git a/docs/ExecutionResultTrees.png b/docs/images/ExecutionResultTrees.png similarity index 100% rename from docs/ExecutionResultTrees.png rename to docs/images/ExecutionResultTrees.png diff --git a/docs/NotifierStateMachine.png b/docs/images/NotifierStateMachine.png similarity index 100% rename from docs/NotifierStateMachine.png rename to docs/images/NotifierStateMachine.png diff --git a/docs/NotifierUsagePattern.png b/docs/images/NotifierUsagePattern.png similarity index 100% rename from docs/NotifierUsagePattern.png rename to docs/images/NotifierUsagePattern.png diff --git a/docs/PaceMaker.png b/docs/images/PaceMaker.png similarity index 100% rename from docs/PaceMaker.png rename to docs/images/PaceMaker.png diff --git a/docs/StateMachine.png b/docs/images/StateMachine.png similarity index 100% rename from docs/StateMachine.png rename to docs/images/StateMachine.png diff --git a/docs/StateMachine_with_notifications.png b/docs/images/StateMachine_with_notifications.png similarity index 100% rename from docs/StateMachine_with_notifications.png rename to docs/images/StateMachine_with_notifications.png diff --git a/docs/VerifierAssignment.png b/docs/images/VerifierAssignment.png similarity index 100% rename from docs/VerifierAssignment.png rename to docs/images/VerifierAssignment.png diff --git a/docs/images/flow-go_branching-convention.png b/docs/images/flow-go_branching-convention.png new file mode 100644 index 00000000000..6f549b4b3f3 Binary files /dev/null and b/docs/images/flow-go_branching-convention.png differ diff --git a/engine/Readme.md b/engine/Readme.md index 8faebe0b332..421de13ca35 100644 --- a/engine/Readme.md +++ b/engine/Readme.md @@ -1,7 +1,6 @@ # Notifier - The Notifier implements the following state machine -![Notifier State Machine](/docs/NotifierStateMachine.png) +![Notifier State Machine](/docs/images/NotifierStateMachine.png) The intended usage pattern is: * there are goroutines, aka `Producer`s, that append work to a queue `pendingWorkQueue` @@ -10,7 +9,7 @@ The intended usage pattern is: * when they find that the `pendingWorkQueue` contains no more work, they go back to the notifier and await notification -![Notifier Usage Pattern](/docs/NotifierUsagePattern.png) +![Notifier Usage Pattern](/docs/images/NotifierUsagePattern.png) Note that the consumer / producer interact in a _different_ order with the `pendingWorkQueue` vs the `notifier`: * the producer first drops its work into the queue and subsequently sends the notification diff --git a/engine/access/access_test.go b/engine/access/access_test.go index 8aa301ba49b..f5620fafe66 100644 --- a/engine/access/access_test.go +++ b/engine/access/access_test.go @@ -3,14 +3,11 @@ package access_test import ( "context" "encoding/json" - "os" "testing" - "github.com/dgraph-io/badger/v2" + "github.com/cockroachdb/pebble/v2" "github.com/google/go-cmp/cmp" - accessproto "github.com/onflow/flow/protobuf/go/flow/access" - entitiesproto "github.com/onflow/flow/protobuf/go/flow/entities" - execproto "github.com/onflow/flow/protobuf/go/flow/execution" + "github.com/jordanschalm/lockctx" "github.com/rs/zerolog" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" @@ -18,34 +15,46 @@ import ( "github.com/stretchr/testify/suite" "google.golang.org/protobuf/testing/protocmp" - "github.com/onflow/flow-go/access" + "github.com/onflow/crypto" + "github.com/onflow/flow-go/cmd/build" hsmock "github.com/onflow/flow-go/consensus/hotstuff/mocks" "github.com/onflow/flow-go/consensus/hotstuff/model" - "github.com/onflow/flow-go/crypto" "github.com/onflow/flow-go/engine/access/ingestion" accessmock "github.com/onflow/flow-go/engine/access/mock" + "github.com/onflow/flow-go/engine/access/rpc" "github.com/onflow/flow-go/engine/access/rpc/backend" - factorymock "github.com/onflow/flow-go/engine/access/rpc/backend/mock" + "github.com/onflow/flow-go/engine/access/rpc/backend/events" + "github.com/onflow/flow-go/engine/access/rpc/backend/node_communicator" + "github.com/onflow/flow-go/engine/access/rpc/backend/query_mode" + connectionmock "github.com/onflow/flow-go/engine/access/rpc/connection/mock" + "github.com/onflow/flow-go/engine/access/subscription" + commonrpc "github.com/onflow/flow-go/engine/common/rpc" "github.com/onflow/flow-go/engine/common/rpc/convert" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/model/flow/factory" "github.com/onflow/flow-go/model/flow/filter" "github.com/onflow/flow-go/module" + "github.com/onflow/flow-go/module/counters" "github.com/onflow/flow-go/module/irrecoverable" "github.com/onflow/flow-go/module/mempool/stdmap" "github.com/onflow/flow-go/module/metrics" mockmodule "github.com/onflow/flow-go/module/mock" "github.com/onflow/flow-go/module/signature" + "github.com/onflow/flow-go/module/state_synchronization/indexer" "github.com/onflow/flow-go/network/channels" - "github.com/onflow/flow-go/network/mocknetwork" + mocknetwork "github.com/onflow/flow-go/network/mock" protocol "github.com/onflow/flow-go/state/protocol/mock" "github.com/onflow/flow-go/storage" - bstorage "github.com/onflow/flow-go/storage/badger" - "github.com/onflow/flow-go/storage/badger/operation" - "github.com/onflow/flow-go/storage/util" + "github.com/onflow/flow-go/storage/operation" + "github.com/onflow/flow-go/storage/operation/pebbleimpl" + "github.com/onflow/flow-go/storage/store" "github.com/onflow/flow-go/utils/unittest" "github.com/onflow/flow-go/utils/unittest/mocks" + + accessproto "github.com/onflow/flow/protobuf/go/flow/access" + entitiesproto "github.com/onflow/flow/protobuf/go/flow/entities" + execproto "github.com/onflow/flow/protobuf/go/flow/execution" ) type Suite struct { @@ -58,7 +67,7 @@ type Suite struct { signerIndicesDecoder *hsmock.BlockSignerDecoder signerIds flow.IdentifierList log zerolog.Logger - net *mocknetwork.Network + net *mocknetwork.EngineRegistry request *mockmodule.Requester collClient *accessmock.AccessAPIClient execClient *accessmock.ExecutionAPIClient @@ -70,6 +79,9 @@ type Suite struct { metrics *metrics.NoopCollector finalizedHeaderCache module.FinalizedHeaderCache backend *backend.Backend + sporkID flow.Identifier + protocolStateVersion uint64 + lockManager lockctx.Manager } // TestAccess tests scenarios which exercise multiple API calls using both the RPC handler and the ingest engine @@ -79,11 +91,14 @@ func TestAccess(t *testing.T) { } func (suite *Suite) SetupTest() { - suite.log = zerolog.New(os.Stderr) - suite.net = new(mocknetwork.Network) + suite.lockManager = storage.NewTestingLockManager() + suite.log = unittest.Logger() + suite.net = new(mocknetwork.EngineRegistry) suite.state = new(protocol.State) suite.finalSnapshot = new(protocol.Snapshot) suite.sealedSnapshot = new(protocol.Snapshot) + suite.sporkID = unittest.IdentifierFixture() + suite.protocolStateVersion = unittest.Uint64InRange(10, 30) suite.rootBlock = unittest.BlockHeaderFixture(unittest.WithHeaderHeight(0)) suite.sealedBlock = suite.rootBlock @@ -106,15 +121,22 @@ func (suite *Suite) SetupTest() { nil, ).Maybe() + pstate := protocol.NewKVStoreReader(suite.T()) + pstate.On("GetProtocolStateVersion").Return(suite.protocolStateVersion, nil).Maybe() + suite.finalSnapshot.On("ProtocolState").Return(pstate, nil).Maybe() + suite.params = new(protocol.Params) - suite.params.On("Root").Return(suite.rootBlock, nil) + suite.params.On("FinalizedRoot").Return(suite.rootBlock, nil) + suite.params.On("SporkID").Return(suite.sporkID, nil) suite.params.On("SporkRootBlockHeight").Return(suite.rootBlock.Height, nil) + suite.params.On("SealedRoot").Return(suite.rootBlock, nil) suite.state.On("Params").Return(suite.params).Maybe() suite.collClient = new(accessmock.AccessAPIClient) suite.execClient = new(accessmock.ExecutionAPIClient) suite.request = new(mockmodule.Requester) suite.request.On("EntityByID", mock.Anything, mock.Anything) + suite.request.On("Force").Return() suite.me = new(mockmodule.Local) @@ -133,42 +155,51 @@ func (suite *Suite) SetupTest() { } func (suite *Suite) RunTest( - f func(handler *access.Handler, db *badger.DB, all *storage.All), + f func(handler *rpc.Handler, db storage.DB, all *store.All), ) { - unittest.RunWithBadgerDB(suite.T(), func(db *badger.DB) { - all := util.StorageLayer(suite.T(), db) + unittest.RunWithPebbleDB(suite.T(), func(pdb *pebble.DB) { + db := pebbleimpl.ToDB(pdb) + all := store.InitAll(metrics.NewNoopCollector(), db) + + var err error + suite.backend, err = backend.New(backend.Params{ + State: suite.state, + CollectionRPC: suite.collClient, + Blocks: all.Blocks, + Headers: all.Headers, + Collections: all.Collections, + Transactions: all.Transactions, + ExecutionResults: all.Results, + ExecutionReceipts: all.Receipts, + ChainID: suite.chainID, + AccessMetrics: suite.metrics, + MaxHeightRange: events.DefaultMaxHeightRange, + Log: suite.log, + SnapshotHistoryLimit: backend.DefaultSnapshotHistoryLimit, + Communicator: node_communicator.NewNodeCommunicator(false), + EventQueryMode: query_mode.IndexQueryModeExecutionNodesOnly, + ScriptExecutionMode: query_mode.IndexQueryModeExecutionNodesOnly, + TxResultQueryMode: query_mode.IndexQueryModeExecutionNodesOnly, + MaxScriptAndArgumentSize: commonrpc.DefaultAccessMaxRequestSize, + }) + require.NoError(suite.T(), err) - suite.backend = backend.New(suite.state, - suite.collClient, - nil, - all.Blocks, - all.Headers, - all.Collections, - all.Transactions, - all.Receipts, - all.Results, - suite.chainID, - suite.metrics, - nil, - false, - backend.DefaultMaxHeightRange, - nil, - nil, - suite.log, - backend.DefaultSnapshotHistoryLimit, - nil, + handler := rpc.NewHandler( + suite.backend, + suite.chainID.Chain(), + suite.finalizedHeaderCache, + suite.me, + subscription.DefaultMaxGlobalStreams, + rpc.WithBlockSignerDecoder(suite.signerIndicesDecoder), ) - handler := access.NewHandler(suite.backend, suite.chainID.Chain(), suite.finalizedHeaderCache, suite.me, access.WithBlockSignerDecoder(suite.signerIndicesDecoder)) f(handler, db, all) }) } func (suite *Suite) TestSendAndGetTransaction() { - suite.RunTest(func(handler *access.Handler, _ *badger.DB, _ *storage.All) { + suite.RunTest(func(handler *rpc.Handler, _ storage.DB, _ *store.All) { referenceBlock := unittest.BlockHeaderFixture() - transaction := unittest.TransactionFixture() - transaction.SetReferenceBlockID(referenceBlock.ID()) - + transaction := unittest.TransactionBodyFixture(unittest.WithReferenceBlock(referenceBlock.ID())) refSnapshot := new(protocol.Snapshot) suite.state. @@ -185,7 +216,7 @@ func (suite *Suite) TestSendAndGetTransaction() { Return(referenceBlock, nil). Once() - expected := convert.TransactionToMessage(transaction.TransactionBody) + expected := convert.TransactionToMessage(transaction) sendReq := &accessproto.SendTransactionRequest{ Transaction: expected, } @@ -217,11 +248,10 @@ func (suite *Suite) TestSendAndGetTransaction() { } func (suite *Suite) TestSendExpiredTransaction() { - suite.RunTest(func(handler *access.Handler, _ *badger.DB, _ *storage.All) { + suite.RunTest(func(handler *rpc.Handler, _ storage.DB, _ *store.All) { referenceBlock := suite.finalizedBlock + transaction := unittest.TransactionBodyFixture(unittest.WithReferenceBlock(referenceBlock.ID())) - transaction := unittest.TransactionFixture() - transaction.SetReferenceBlockID(referenceBlock.ID()) // create latest block that is past the expiry window latestBlock := unittest.BlockHeaderFixture() latestBlock.Height = referenceBlock.Height + flow.DefaultTransactionExpiry*2 @@ -237,11 +267,11 @@ func (suite *Suite) TestSendExpiredTransaction() { Return(referenceBlock, nil). Twice() - //Advancing final state to expire ref block + // Advancing final state to expire ref block suite.finalizedBlock = latestBlock req := &accessproto.SendTransactionRequest{ - Transaction: convert.TransactionToMessage(transaction.TransactionBody), + Transaction: convert.TransactionToMessage(transaction), } _, err := handler.SendTransaction(context.Background(), req) @@ -249,19 +279,15 @@ func (suite *Suite) TestSendExpiredTransaction() { }) } -type mockCloser struct{} - -func (mc *mockCloser) Close() error { return nil } - // TestSendTransactionToRandomCollectionNode tests that collection nodes are chosen from the appropriate cluster when // forwarding transactions by sending two transactions bound for two different collection clusters. func (suite *Suite) TestSendTransactionToRandomCollectionNode() { - unittest.RunWithBadgerDB(suite.T(), func(db *badger.DB) { + unittest.RunWithPebbleDB(suite.T(), func(pdb *pebble.DB) { + db := pebbleimpl.ToDB(pdb) // create a transaction referenceBlock := unittest.BlockHeaderFixture() - transaction := unittest.TransactionFixture() - transaction.SetReferenceBlockID(referenceBlock.ID()) + transaction := unittest.TransactionBodyFixture(unittest.WithReferenceBlock(referenceBlock.ID())) // setup the state and finalSnapshot mock expectations suite.state.On("AtBlockID", referenceBlock.ID()).Return(suite.finalSnapshot, nil) @@ -269,30 +295,30 @@ func (suite *Suite) TestSendTransactionToRandomCollectionNode() { // create storage metrics := metrics.NewNoopCollector() - transactions := bstorage.NewTransactions(metrics, db) - collections := bstorage.NewCollections(db, transactions) + transactions := store.NewTransactions(metrics, db) + collections := store.NewCollections(db, transactions) // create collection node cluster count := 2 - collNodes := unittest.IdentityListFixture(count, unittest.WithRole(flow.RoleCollection)) + collNodes := unittest.IdentityListFixture(count, unittest.WithRole(flow.RoleCollection)).ToSkeleton() assignments := unittest.ClusterAssignment(uint(count), collNodes) clusters, err := factory.NewClusterList(assignments, collNodes) suite.Require().Nil(err) collNode1 := clusters[0][0] collNode2 := clusters[1][0] - epoch := new(protocol.Epoch) - suite.epochQuery.On("Current").Return(epoch) + epoch := new(protocol.CommittedEpoch) + suite.epochQuery.On("Current").Return(epoch, nil) epoch.On("Clustering").Return(clusters, nil) // create two transactions bound for each of the cluster cluster1 := clusters[0] - cluster1tx := unittest.AlterTransactionForCluster(transaction.TransactionBody, clusters, cluster1, func(transaction *flow.TransactionBody) {}) + cluster1tx := unittest.AlterTransactionForCluster(transaction, clusters, cluster1, func(transaction *flow.TransactionBody) {}) tx1 := convert.TransactionToMessage(cluster1tx) sendReq1 := &accessproto.SendTransactionRequest{ Transaction: tx1, } cluster2 := clusters[1] - cluster2tx := unittest.AlterTransactionForCluster(transaction.TransactionBody, clusters, cluster2, func(transaction *flow.TransactionBody) {}) + cluster2tx := unittest.AlterTransactionForCluster(transaction, clusters, cluster2, func(transaction *flow.TransactionBody) {}) tx2 := convert.TransactionToMessage(cluster2tx) sendReq2 := &accessproto.SendTransactionRequest{ Transaction: tx2, @@ -306,32 +332,28 @@ func (suite *Suite) TestSendTransactionToRandomCollectionNode() { col2ApiClient.On("SendTransaction", mock.Anything, sendReq2).Return(&sendResp, nil).Once() // create a mock connection factory - connFactory := new(factorymock.ConnectionFactory) - connFactory.On("GetAccessAPIClient", collNode1.Address).Return(col1ApiClient, &mockCloser{}, nil) - connFactory.On("GetAccessAPIClient", collNode2.Address).Return(col2ApiClient, &mockCloser{}, nil) - - backend := backend.New(suite.state, - nil, - nil, - nil, - nil, - collections, - transactions, - nil, - nil, - suite.chainID, - metrics, - connFactory, - false, - backend.DefaultMaxHeightRange, - nil, - nil, - suite.log, - backend.DefaultSnapshotHistoryLimit, - nil, - ) + connFactory := connectionmock.NewConnectionFactory(suite.T()) + connFactory.On("GetCollectionAPIClient", collNode1.Address, nil).Return(col1ApiClient, &mocks.MockCloser{}, nil) + connFactory.On("GetCollectionAPIClient", collNode2.Address, nil).Return(col2ApiClient, &mocks.MockCloser{}, nil) + + bnd, err := backend.New(backend.Params{State: suite.state, + Collections: collections, + Transactions: transactions, + ChainID: suite.chainID, + AccessMetrics: metrics, + ConnFactory: connFactory, + MaxHeightRange: events.DefaultMaxHeightRange, + Log: suite.log, + SnapshotHistoryLimit: backend.DefaultSnapshotHistoryLimit, + Communicator: node_communicator.NewNodeCommunicator(false), + EventQueryMode: query_mode.IndexQueryModeExecutionNodesOnly, + ScriptExecutionMode: query_mode.IndexQueryModeExecutionNodesOnly, + TxResultQueryMode: query_mode.IndexQueryModeExecutionNodesOnly, + MaxScriptAndArgumentSize: commonrpc.DefaultAccessMaxRequestSize, + }) + require.NoError(suite.T(), err) - handler := access.NewHandler(backend, suite.chainID.Chain(), suite.finalizedHeaderCache, suite.me) + handler := rpc.NewHandler(bnd, suite.chainID.Chain(), suite.finalizedHeaderCache, suite.me, subscription.DefaultMaxGlobalStreams) // Send transaction 1 resp, err := handler.SendTransaction(context.Background(), sendReq1) @@ -368,19 +390,36 @@ func (suite *Suite) TestSendTransactionToRandomCollectionNode() { } func (suite *Suite) TestGetBlockByIDAndHeight() { - suite.RunTest(func(handler *access.Handler, db *badger.DB, all *storage.All) { + suite.RunTest(func(handler *rpc.Handler, db storage.DB, all *store.All) { // test block1 get by ID block1 := unittest.BlockFixture() + proposal1 := unittest.ProposalFromBlock(block1) // test block2 get by height - block2 := unittest.BlockFixture() - block2.Header.Height = 2 - - require.NoError(suite.T(), all.Blocks.Store(&block1)) - require.NoError(suite.T(), all.Blocks.Store(&block2)) + block2 := unittest.BlockFixture( + unittest.Block.WithHeight(2), + ) + proposal2 := unittest.ProposalFromBlock(block2) + + err := unittest.WithLock(suite.T(), suite.lockManager, storage.LockInsertBlock, func(lctx lockctx.Context) error { + return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + if err := all.Blocks.BatchStore(lctx, rw, proposal1); err != nil { + return err + } + if err := all.Blocks.BatchStore(lctx, rw, proposal2); err != nil { + return err + } + return nil + }) + }) + require.NoError(suite.T(), err) - // the follower logic should update height index on the block storage when a block is finalized - err := db.Update(operation.IndexBlockHeight(block2.Header.Height, block2.ID())) + err = unittest.WithLock(suite.T(), suite.lockManager, storage.LockFinalizeBlock, func(fctx lockctx.Context) error { + // the follower logic should update height index on the block storage when a block is finalized + return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return operation.IndexFinalizedBlockByHeight(fctx, rw, block2.Height, block2.ID()) + }) + }) require.NoError(suite.T(), err) assertHeaderResp := func( @@ -427,7 +466,7 @@ func (suite *Suite) TestGetBlockByIDAndHeight() { require.Equal(suite.T(), expectedMessage, actual) } - suite.finalSnapshot.On("Head").Return(block1.Header, nil) + suite.finalSnapshot.On("Head").Return(block1.ToHeader(), nil) suite.Run("get header 1 by ID", func() { // get header by ID id := block1.ID() @@ -438,7 +477,7 @@ func (suite *Suite) TestGetBlockByIDAndHeight() { resp, err := handler.GetBlockHeaderByID(context.Background(), req) // assert it is indeed block1 - assertHeaderResp(resp, err, block1.Header) + assertHeaderResp(resp, err, block1.ToHeader()) }) suite.Run("get block 1 by ID", func() { @@ -451,7 +490,7 @@ func (suite *Suite) TestGetBlockByIDAndHeight() { resp, err := handler.GetBlockByID(context.Background(), req) - assertBlockResp(resp, err, &block1) + assertBlockResp(resp, err, block1) }) suite.Run("get block light 1 by ID", func() { @@ -463,48 +502,48 @@ func (suite *Suite) TestGetBlockByIDAndHeight() { resp, err := handler.GetBlockByID(context.Background(), req) - assertLightBlockResp(resp, err, &block1) + assertLightBlockResp(resp, err, block1) }) suite.Run("get header 2 by height", func() { // get header by height req := &accessproto.GetBlockHeaderByHeightRequest{ - Height: block2.Header.Height, + Height: block2.Height, } resp, err := handler.GetBlockHeaderByHeight(context.Background(), req) - assertHeaderResp(resp, err, block2.Header) + assertHeaderResp(resp, err, block2.ToHeader()) }) suite.Run("get block 2 by height", func() { // get block details by height req := &accessproto.GetBlockByHeightRequest{ - Height: block2.Header.Height, + Height: block2.Height, FullBlockResponse: true, } resp, err := handler.GetBlockByHeight(context.Background(), req) - assertBlockResp(resp, err, &block2) + assertBlockResp(resp, err, block2) }) suite.Run("get block 2 by height", func() { // get block details by height req := &accessproto.GetBlockByHeightRequest{ - Height: block2.Header.Height, + Height: block2.Height, } resp, err := handler.GetBlockByHeight(context.Background(), req) - assertLightBlockResp(resp, err, &block2) + assertLightBlockResp(resp, err, block2) }) }) } func (suite *Suite) TestGetExecutionResultByBlockID() { - suite.RunTest(func(handler *access.Handler, db *badger.DB, all *storage.All) { + suite.RunTest(func(handler *rpc.Handler, db storage.DB, all *store.All) { // test block1 get by ID nonexistingID := unittest.IdentifierFixture() @@ -547,15 +586,12 @@ func (suite *Suite) TestGetExecutionResultByBlockID() { for i, serviceEvent := range executionResult.ServiceEvents { assert.Equal(suite.T(), serviceEvent.Type.String(), er.ServiceEvents[i].Type) event := serviceEvent.Event - marshalledEvent, err := json.Marshal(event) require.NoError(suite.T(), err) - assert.Equal(suite.T(), marshalledEvent, er.ServiceEvents[i].Payload) } parsedExecResult, err := convert.MessageToExecutionResult(resp.ExecutionResult) require.NoError(suite.T(), err) - assert.Equal(suite.T(), parsedExecResult, executionResult) assert.Equal(suite.T(), parsedExecResult.ID(), executionResult.ID()) } @@ -589,18 +625,17 @@ func (suite *Suite) TestGetExecutionResultByBlockID() { // TestGetSealedTransaction tests that transactions status of transaction that belongs to a sealed block // is reported as sealed func (suite *Suite) TestGetSealedTransaction() { - unittest.RunWithBadgerDB(suite.T(), func(db *badger.DB) { - all := util.StorageLayer(suite.T(), db) - results := bstorage.NewExecutionResults(suite.metrics, db) - receipts := bstorage.NewExecutionReceipts(suite.metrics, db, results, bstorage.DefaultCacheSize) + unittest.RunWithPebbleDB(suite.T(), func(pdb *pebble.DB) { + db := pebbleimpl.ToDB(pdb) + all := store.InitAll(metrics.NewNoopCollector(), db) enIdentities := unittest.IdentityListFixture(2, unittest.WithRole(flow.RoleExecution)) enNodeIDs := enIdentities.NodeIDs() // create block -> collection -> transactions - block, collection := suite.createChain() + proposal, collection := suite.createChain() + block := proposal.Block // setup mocks - originID := unittest.IdentifierFixture() conduit := new(mocknetwork.Conduit) suite.net.On("Register", channels.ReceiveReceipts, mock.Anything).Return(conduit, nil). Once() @@ -616,65 +651,132 @@ func (suite *Suite) TestGetSealedTransaction() { } // generate receipts - executionReceipts := unittest.ReceiptsForBlockFixture(block, enNodeIDs) + executionReceipts := unittest.ReceiptsForBlockFixture(&block, enNodeIDs) // assume execution node returns an empty list of events suite.execClient.On("GetTransactionResult", mock.Anything, mock.Anything).Return(&exeEventResp, nil) // create a mock connection factory - connFactory := new(factorymock.ConnectionFactory) - connFactory.On("GetExecutionAPIClient", mock.Anything).Return(suite.execClient, &mockCloser{}, nil) + connFactory := connectionmock.NewConnectionFactory(suite.T()) + connFactory.On("GetExecutionAPIClient", mock.Anything).Return(suite.execClient, &mocks.MockCloser{}, nil) // initialize storage metrics := metrics.NewNoopCollector() - transactions := bstorage.NewTransactions(metrics, db) - collections := bstorage.NewCollections(db, transactions) - collectionsToMarkFinalized, err := stdmap.NewTimes(100) + transactions := store.NewTransactions(metrics, db) + collections := store.NewCollections(db, transactions) + collectionsToMarkFinalized := stdmap.NewTimes(100) + collectionsToMarkExecuted := stdmap.NewTimes(100) + blocksToMarkExecuted := stdmap.NewTimes(100) + blockTransactions := stdmap.NewIdentifierMap(100) + + execNodeIdentitiesProvider := commonrpc.NewExecutionNodeIdentitiesProvider( + suite.log, + suite.state, + all.Receipts, + enNodeIDs, + nil, + ) + + bnd, err := backend.New(backend.Params{ + State: suite.state, + CollectionRPC: suite.collClient, + Blocks: all.Blocks, + Headers: all.Headers, + Collections: collections, + Transactions: transactions, + ExecutionReceipts: all.Receipts, + ExecutionResults: all.Results, + ChainID: suite.chainID, + AccessMetrics: suite.metrics, + ConnFactory: connFactory, + MaxHeightRange: events.DefaultMaxHeightRange, + Log: suite.log, + SnapshotHistoryLimit: backend.DefaultSnapshotHistoryLimit, + Communicator: node_communicator.NewNodeCommunicator(false), + TxResultQueryMode: query_mode.IndexQueryModeExecutionNodesOnly, + EventQueryMode: query_mode.IndexQueryModeExecutionNodesOnly, + ScriptExecutionMode: query_mode.IndexQueryModeExecutionNodesOnly, + ExecNodeIdentitiesProvider: execNodeIdentitiesProvider, + MaxScriptAndArgumentSize: commonrpc.DefaultAccessMaxRequestSize, + }) require.NoError(suite.T(), err) - collectionsToMarkExecuted, err := stdmap.NewTimes(100) + + handler := rpc.NewHandler(bnd, suite.chainID.Chain(), suite.finalizedHeaderCache, suite.me, subscription.DefaultMaxGlobalStreams) + + collectionExecutedMetric, err := indexer.NewCollectionExecutedMetricImpl( + suite.log, + metrics, + collectionsToMarkFinalized, + collectionsToMarkExecuted, + blocksToMarkExecuted, + collections, + all.Blocks, + blockTransactions, + ) + require.NoError(suite.T(), err) + + progress, err := store.NewConsumerProgress(db, module.ConsumeProgressLastFullBlockHeight).Initialize(suite.rootBlock.Height) require.NoError(suite.T(), err) - blocksToMarkExecuted, err := stdmap.NewTimes(100) + lastFullBlockHeight, err := counters.NewPersistentStrictMonotonicCounter(progress) require.NoError(suite.T(), err) - backend := backend.New(suite.state, - suite.collClient, - nil, + // create the ingest engine + processedHeight := store.NewConsumerProgress(db, module.ConsumeProgressIngestionEngineBlockHeight) + + collectionSyncer := ingestion.NewCollectionSyncer( + suite.log, + module.CollectionExecutedMetric(collectionExecutedMetric), + suite.request, + suite.state, all.Blocks, - all.Headers, collections, transactions, - receipts, - results, - suite.chainID, - suite.metrics, - connFactory, - false, - backend.DefaultMaxHeightRange, - nil, - enNodeIDs.Strings(), + lastFullBlockHeight, + suite.lockManager, + ) + + ingestEng, err := ingestion.New( suite.log, - backend.DefaultSnapshotHistoryLimit, + suite.net, + suite.state, + suite.me, + all.Blocks, + all.Results, + all.Receipts, + processedHeight, + collectionSyncer, + collectionExecutedMetric, nil, ) - - handler := access.NewHandler(backend, suite.chainID.Chain(), suite.finalizedHeaderCache, suite.me) - - // create the ingest engine - ingestEng, err := ingestion.New(suite.log, suite.net, suite.state, suite.me, suite.request, all.Blocks, all.Headers, collections, - transactions, results, receipts, metrics, collectionsToMarkFinalized, collectionsToMarkExecuted, blocksToMarkExecuted) require.NoError(suite.T(), err) // 1. Assume that follower engine updated the block storage and the protocol state. The block is reported as sealed - err = all.Blocks.Store(block) + err = unittest.WithLock(suite.T(), suite.lockManager, storage.LockInsertBlock, func(lctx lockctx.Context) error { + return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return all.Blocks.BatchStore(lctx, rw, proposal) + }) + }) + require.NoError(suite.T(), err) + + err = unittest.WithLock(suite.T(), suite.lockManager, storage.LockFinalizeBlock, func(fctx lockctx.Context) error { + return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return operation.IndexFinalizedBlockByHeight(fctx, rw, block.Height, block.ID()) + }) + }) require.NoError(suite.T(), err) - suite.sealedBlock = block.Header + + suite.sealedBlock = block.ToHeader() background, cancel := context.WithCancel(context.Background()) defer cancel() - ctx, _ := irrecoverable.WithSignaler(background) + ctx := irrecoverable.NewMockSignalerContext(suite.T(), background) ingestEng.Start(ctx) <-ingestEng.Ready() + defer func() { + cancel() + <-ingestEng.Done() + }() // 2. Ingest engine was notified by the follower engine about a new block. // Follower engine --> Ingest engine @@ -685,8 +787,12 @@ func (suite *Suite) TestGetSealedTransaction() { // 3. Request engine is used to request missing collection suite.request.On("EntityByID", collection.ID(), mock.Anything).Return() - // 4. Ingest engine receives the requested collection and all the execution receipts - ingestEng.OnCollection(originID, collection) + // 4. Indexer IndexCollection receives the requested collection and all the execution receipts + // Create a lock context for indexing + err = unittest.WithLock(suite.T(), suite.lockManager, storage.LockInsertCollection, func(indexLctx lockctx.Context) error { + return indexer.IndexCollection(indexLctx, collection, collections, suite.log, module.CollectionExecutedMetric(collectionExecutedMetric)) + }) + require.NoError(suite.T(), err) for _, r := range executionReceipts { err = ingestEng.Process(channels.ReceiveReceipts, enNodeIDs[0], r) @@ -709,34 +815,43 @@ func (suite *Suite) TestGetSealedTransaction() { // TestGetTransactionResult tests different approaches to using the GetTransactionResult query, including using // transaction ID, block ID, and collection ID. func (suite *Suite) TestGetTransactionResult() { - unittest.RunWithBadgerDB(suite.T(), func(db *badger.DB) { - all := util.StorageLayer(suite.T(), db) - results := bstorage.NewExecutionResults(suite.metrics, db) - receipts := bstorage.NewExecutionReceipts(suite.metrics, db, results, bstorage.DefaultCacheSize) - + unittest.RunWithPebbleDB(suite.T(), func(pdb *pebble.DB) { + db := pebbleimpl.ToDB(pdb) + all := store.InitAll(metrics.NewNoopCollector(), db) originID := unittest.IdentifierFixture() *suite.state = protocol.State{} // create block -> collection -> transactions - block, collection := suite.createChain() - blockNegative, collectionNegative := suite.createChain() + proposal, collection := suite.createChain() + block := proposal.Block + proposalNegative, collectionNegative := suite.createChain() + blockNegative := proposalNegative.Block blockId := block.ID() blockNegativeId := blockNegative.ID() finalSnapshot := new(protocol.Snapshot) - finalSnapshot.On("Head").Return(block.Header, nil) + finalSnapshot.On("Head").Return(suite.finalizedBlock, nil) suite.state.On("Params").Return(suite.params) suite.state.On("Final").Return(finalSnapshot) suite.state.On("Sealed").Return(suite.sealedSnapshot) - sealedBlock := unittest.GenesisFixture().Header + sealedBlock := unittest.Block.Genesis(flow.Emulator).ToHeader() // specifically for this test we will consider that sealed block is far behind finalized, so we get EXECUTED status suite.sealedSnapshot.On("Head").Return(sealedBlock, nil) - err := all.Blocks.Store(block) + err := unittest.WithLock(suite.T(), suite.lockManager, storage.LockInsertBlock, func(lctx lockctx.Context) error { + return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return all.Blocks.BatchStore(lctx, rw, proposal) + }) + }) require.NoError(suite.T(), err) - err = all.Blocks.Store(blockNegative) + + err = unittest.WithLock(suite.T(), suite.lockManager, storage.LockInsertBlock, func(lctx2 lockctx.Context) error { + return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return all.Blocks.BatchStore(lctx2, rw, proposalNegative) + }) + }) require.NoError(suite.T(), err) suite.state.On("AtBlockID", blockId).Return(suite.sealedSnapshot) @@ -748,6 +863,9 @@ func (suite *Suite) TestGetTransactionResult() { allIdentities := append(colIdentities, enIdentities...) finalSnapshot.On("Identities", mock.Anything).Return(allIdentities, nil) + suite.state.On("AtBlockID", blockNegativeId).Return(suite.sealedSnapshot) + suite.sealedSnapshot.On("Identities", mock.Anything).Return(allIdentities, nil) + // assume execution node returns an empty list of events suite.execClient.On("GetTransactionResult", mock.Anything, mock.Anything).Return(&execproto.GetTransactionResultResponse{ Events: nil, @@ -759,48 +877,100 @@ func (suite *Suite) TestGetTransactionResult() { suite.request.On("Request", mock.Anything, mock.Anything).Return() // create a mock connection factory - connFactory := new(factorymock.ConnectionFactory) - connFactory.On("GetExecutionAPIClient", mock.Anything).Return(suite.execClient, &mockCloser{}, nil) + connFactory := connectionmock.NewConnectionFactory(suite.T()) + connFactory.On("GetExecutionAPIClient", mock.Anything).Return(suite.execClient, &mocks.MockCloser{}, nil) // initialize storage metrics := metrics.NewNoopCollector() - transactions := bstorage.NewTransactions(metrics, db) - collections := bstorage.NewCollections(db, transactions) - err = collections.Store(collectionNegative) + transactions := store.NewTransactions(metrics, db) + collections := store.NewCollections(db, transactions) + _, err = collections.Store(collectionNegative) require.NoError(suite.T(), err) - collectionsToMarkFinalized, err := stdmap.NewTimes(100) + collectionsToMarkFinalized := stdmap.NewTimes(100) + collectionsToMarkExecuted := stdmap.NewTimes(100) + blocksToMarkExecuted := stdmap.NewTimes(100) + blockTransactions := stdmap.NewIdentifierMap(100) + + execNodeIdentitiesProvider := commonrpc.NewExecutionNodeIdentitiesProvider( + suite.log, + suite.state, + all.Receipts, + enNodeIDs, + nil, + ) + + bnd, err := backend.New(backend.Params{ + State: suite.state, + CollectionRPC: suite.collClient, + Blocks: all.Blocks, + Headers: all.Headers, + Collections: collections, + Transactions: transactions, + ExecutionReceipts: all.Receipts, + ExecutionResults: all.Results, + ChainID: suite.chainID, + AccessMetrics: suite.metrics, + ConnFactory: connFactory, + MaxHeightRange: events.DefaultMaxHeightRange, + Log: suite.log, + SnapshotHistoryLimit: backend.DefaultSnapshotHistoryLimit, + Communicator: node_communicator.NewNodeCommunicator(false), + TxResultQueryMode: query_mode.IndexQueryModeExecutionNodesOnly, + EventQueryMode: query_mode.IndexQueryModeExecutionNodesOnly, + ScriptExecutionMode: query_mode.IndexQueryModeExecutionNodesOnly, + ExecNodeIdentitiesProvider: execNodeIdentitiesProvider, + MaxScriptAndArgumentSize: commonrpc.DefaultAccessMaxRequestSize, + }) require.NoError(suite.T(), err) - collectionsToMarkExecuted, err := stdmap.NewTimes(100) + + handler := rpc.NewHandler(bnd, suite.chainID.Chain(), suite.finalizedHeaderCache, suite.me, subscription.DefaultMaxGlobalStreams) + + collectionExecutedMetric, err := indexer.NewCollectionExecutedMetricImpl( + suite.log, + metrics, + collectionsToMarkFinalized, + collectionsToMarkExecuted, + blocksToMarkExecuted, + collections, + all.Blocks, + blockTransactions, + ) require.NoError(suite.T(), err) - blocksToMarkExecuted, err := stdmap.NewTimes(100) + + processedHeightInitializer := store.NewConsumerProgress(db, module.ConsumeProgressIngestionEngineBlockHeight) + + lastFullBlockHeightProgress, err := store.NewConsumerProgress(db, module.ConsumeProgressLastFullBlockHeight). + Initialize(suite.rootBlock.Height) require.NoError(suite.T(), err) - backend := backend.New(suite.state, - suite.collClient, - nil, + lastFullBlockHeight, err := counters.NewPersistentStrictMonotonicCounter(lastFullBlockHeightProgress) + require.NoError(suite.T(), err) + + collectionSyncer := ingestion.NewCollectionSyncer( + suite.log, + module.CollectionExecutedMetric(collectionExecutedMetric), + suite.request, + suite.state, all.Blocks, - all.Headers, collections, transactions, - receipts, - results, - suite.chainID, - suite.metrics, - connFactory, - false, - backend.DefaultMaxHeightRange, - nil, - enNodeIDs.Strings(), + lastFullBlockHeight, + suite.lockManager, + ) + + ingestEng, err := ingestion.New( suite.log, - backend.DefaultSnapshotHistoryLimit, + suite.net, + suite.state, + suite.me, + all.Blocks, + all.Results, + all.Receipts, + processedHeightInitializer, + collectionSyncer, + collectionExecutedMetric, nil, ) - - handler := access.NewHandler(backend, suite.chainID.Chain(), suite.finalizedHeaderCache, suite.me) - - // create the ingest engine - ingestEng, err := ingestion.New(suite.log, suite.net, suite.state, suite.me, suite.request, all.Blocks, all.Headers, collections, - transactions, results, receipts, metrics, collectionsToMarkFinalized, collectionsToMarkExecuted, blocksToMarkExecuted) require.NoError(suite.T(), err) background, cancel := context.WithCancel(context.Background()) @@ -825,16 +995,28 @@ func (suite *Suite) TestGetTransactionResult() { } ingestEng.OnFinalizedBlock(mb) - // Ingest engine receives the requested collection and all the execution receipts - ingestEng.OnCollection(originID, collection) + // Indexer IndexCollection receives the requested collection and all the execution receipts + // Create a lock context for indexing + err = unittest.WithLock(suite.T(), suite.lockManager, storage.LockInsertCollection, func(indexLctx lockctx.Context) error { + return indexer.IndexCollection(indexLctx, collection, collections, suite.log, module.CollectionExecutedMetric(collectionExecutedMetric)) + }) + require.NoError(suite.T(), err) for _, r := range executionReceipts { err = ingestEng.Process(channels.ReceiveReceipts, enNodeIDs[0], r) require.NoError(suite.T(), err) } } - processExecutionReceipts(block, collection, enNodeIDs, originID, ingestEng) - processExecutionReceipts(blockNegative, collectionNegative, enNodeIDs, originID, ingestEng) + err = unittest.WithLock(suite.T(), suite.lockManager, storage.LockFinalizeBlock, func(fctx2 lockctx.Context) error { + return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return operation.IndexFinalizedBlockByHeight(fctx2, rw, block.Height, block.ID()) + }) + }) + require.NoError(suite.T(), err) + finalSnapshot.On("Head").Return(block.ToHeader(), nil) + + processExecutionReceipts(&block, collection, enNodeIDs, originID, ingestEng) + processExecutionReceipts(&blockNegative, collectionNegative, enNodeIDs, originID, ingestEng) txId := collection.Transactions[0].ID() collectionId := collection.ID() @@ -891,6 +1073,7 @@ func (suite *Suite) TestGetTransactionResult() { } resp, err := handler.GetTransactionResult(context.Background(), getReq) require.Error(suite.T(), err) + require.Contains(suite.T(), err.Error(), "failed to find: transaction not in block") require.Nil(suite.T(), resp) }) @@ -951,90 +1134,156 @@ func (suite *Suite) TestGetTransactionResult() { // TestExecuteScript tests the three execute Script related calls to make sure that the execution api is called with // the correct block id func (suite *Suite) TestExecuteScript() { - unittest.RunWithBadgerDB(suite.T(), func(db *badger.DB) { - all := util.StorageLayer(suite.T(), db) - transactions := bstorage.NewTransactions(suite.metrics, db) - collections := bstorage.NewCollections(db, transactions) - results := bstorage.NewExecutionResults(suite.metrics, db) - receipts := bstorage.NewExecutionReceipts(suite.metrics, db, results, bstorage.DefaultCacheSize) - + unittest.RunWithPebbleDB(suite.T(), func(pdb *pebble.DB) { + db := pebbleimpl.ToDB(pdb) + all := store.InitAll(metrics.NewNoopCollector(), db) identities := unittest.IdentityListFixture(2, unittest.WithRole(flow.RoleExecution)) suite.sealedSnapshot.On("Identities", mock.Anything).Return(identities, nil) suite.finalSnapshot.On("Identities", mock.Anything).Return(identities, nil) // create a mock connection factory - connFactory := new(factorymock.ConnectionFactory) - connFactory.On("GetExecutionAPIClient", mock.Anything).Return(suite.execClient, &mockCloser{}, nil) + connFactory := connectionmock.NewConnectionFactory(suite.T()) + connFactory.On("GetExecutionAPIClient", mock.Anything).Return(suite.execClient, &mocks.MockCloser{}, nil) - suite.backend = backend.New(suite.state, - suite.collClient, - nil, - all.Blocks, - all.Headers, - collections, - transactions, - receipts, - results, - suite.chainID, - suite.metrics, - connFactory, - false, - backend.DefaultMaxHeightRange, - nil, - flow.IdentifierList(identities.NodeIDs()).Strings(), + execNodeIdentitiesProvider := commonrpc.NewExecutionNodeIdentitiesProvider( suite.log, - backend.DefaultSnapshotHistoryLimit, + suite.state, + all.Receipts, nil, + identities.NodeIDs(), ) - handler := access.NewHandler(suite.backend, suite.chainID.Chain(), suite.finalizedHeaderCache, suite.me) + var err error + suite.backend, err = backend.New(backend.Params{ + State: suite.state, + CollectionRPC: suite.collClient, + Blocks: all.Blocks, + Headers: all.Headers, + Collections: all.Collections, + Transactions: all.Transactions, + ExecutionReceipts: all.Receipts, + ExecutionResults: all.Results, + ChainID: suite.chainID, + AccessMetrics: suite.metrics, + ConnFactory: connFactory, + MaxHeightRange: events.DefaultMaxHeightRange, + Log: suite.log, + SnapshotHistoryLimit: backend.DefaultSnapshotHistoryLimit, + Communicator: node_communicator.NewNodeCommunicator(false), + EventQueryMode: query_mode.IndexQueryModeExecutionNodesOnly, + ScriptExecutionMode: query_mode.IndexQueryModeExecutionNodesOnly, + TxResultQueryMode: query_mode.IndexQueryModeExecutionNodesOnly, + ExecNodeIdentitiesProvider: execNodeIdentitiesProvider, + MaxScriptAndArgumentSize: commonrpc.DefaultAccessMaxRequestSize, + }) + require.NoError(suite.T(), err) + + handler := rpc.NewHandler(suite.backend, suite.chainID.Chain(), suite.finalizedHeaderCache, suite.me, subscription.DefaultMaxGlobalStreams) // initialize metrics related storage metrics := metrics.NewNoopCollector() - collectionsToMarkFinalized, err := stdmap.NewTimes(100) - require.NoError(suite.T(), err) - collectionsToMarkExecuted, err := stdmap.NewTimes(100) - require.NoError(suite.T(), err) - blocksToMarkExecuted, err := stdmap.NewTimes(100) + collectionsToMarkFinalized := stdmap.NewTimes(100) + collectionsToMarkExecuted := stdmap.NewTimes(100) + blocksToMarkExecuted := stdmap.NewTimes(100) + blockTransactions := stdmap.NewIdentifierMap(100) + + collectionExecutedMetric, err := indexer.NewCollectionExecutedMetricImpl( + suite.log, + metrics, + collectionsToMarkFinalized, + collectionsToMarkExecuted, + blocksToMarkExecuted, + all.Collections, + all.Blocks, + blockTransactions, + ) require.NoError(suite.T(), err) conduit := new(mocknetwork.Conduit) suite.net.On("Register", channels.ReceiveReceipts, mock.Anything).Return(conduit, nil). Once() - // create the ingest engine - ingestEng, err := ingestion.New(suite.log, suite.net, suite.state, suite.me, suite.request, all.Blocks, all.Headers, collections, - transactions, results, receipts, metrics, collectionsToMarkFinalized, collectionsToMarkExecuted, blocksToMarkExecuted) + + processedHeightInitializer := store.NewConsumerProgress(db, module.ConsumeProgressIngestionEngineBlockHeight) + + lastFullBlockHeightInitializer := store.NewConsumerProgress(db, module.ConsumeProgressLastFullBlockHeight) + lastFullBlockHeightProgress, err := lastFullBlockHeightInitializer.Initialize(suite.rootBlock.Height) + require.NoError(suite.T(), err) + + lastFullBlockHeight, err := counters.NewPersistentStrictMonotonicCounter(lastFullBlockHeightProgress) + require.NoError(suite.T(), err) + + collectionSyncer := ingestion.NewCollectionSyncer( + suite.log, + module.CollectionExecutedMetric(collectionExecutedMetric), + suite.request, + suite.state, + all.Blocks, + all.Collections, + all.Transactions, + lastFullBlockHeight, + suite.lockManager, + ) + + ingestEng, err := ingestion.New( + suite.log, + suite.net, + suite.state, + suite.me, + all.Blocks, + all.Results, + all.Receipts, + processedHeightInitializer, + collectionSyncer, + collectionExecutedMetric, + nil, + ) require.NoError(suite.T(), err) // create another block as a predecessor of the block created earlier prevBlock := unittest.BlockWithParentFixture(suite.finalizedBlock) // create a block and a seal pointing to that block - lastBlock := unittest.BlockWithParentFixture(prevBlock.Header) - err = all.Blocks.Store(lastBlock) + lastBlock := unittest.BlockWithParentFixture(prevBlock.ToHeader()) + err = unittest.WithLock(suite.T(), suite.lockManager, storage.LockInsertBlock, func(lctx lockctx.Context) error { + return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return all.Blocks.BatchStore(lctx, rw, unittest.ProposalFromBlock(lastBlock)) + }) + }) require.NoError(suite.T(), err) - err = db.Update(operation.IndexBlockHeight(lastBlock.Header.Height, lastBlock.ID())) + + err = unittest.WithLock(suite.T(), suite.lockManager, storage.LockFinalizeBlock, func(fctx lockctx.Context) error { + return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return operation.IndexFinalizedBlockByHeight(fctx, rw, lastBlock.Height, lastBlock.ID()) + }) + }) require.NoError(suite.T(), err) - //update latest sealed block - suite.sealedBlock = lastBlock.Header + // update latest sealed block + suite.sealedBlock = lastBlock.ToHeader() // create execution receipts for each of the execution node and the last block executionReceipts := unittest.ReceiptsForBlockFixture(lastBlock, identities.NodeIDs()) // notify the ingest engine about the receipts for _, r := range executionReceipts { - err = ingestEng.ProcessLocal(r) + err = ingestEng.Process(channels.ReceiveReceipts, unittest.IdentifierFixture(), r) require.NoError(suite.T(), err) } - err = all.Blocks.Store(prevBlock) - require.NoError(suite.T(), err) - err = db.Update(operation.IndexBlockHeight(prevBlock.Header.Height, prevBlock.ID())) + err = unittest.WithLocks(suite.T(), suite.lockManager, []string{storage.LockInsertBlock, storage.LockFinalizeBlock}, func(ctx lockctx.Context) error { + return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + err := all.Blocks.BatchStore(ctx, rw, unittest.ProposalFromBlock(prevBlock)) + if err != nil { + return err + } + + return operation.IndexFinalizedBlockByHeight(ctx, rw, prevBlock.Height, prevBlock.ID()) + }) + }) require.NoError(suite.T(), err) // create execution receipts for each of the execution node and the previous block executionReceipts = unittest.ReceiptsForBlockFixture(prevBlock, identities.NodeIDs()) // notify the ingest engine about the receipts for _, r := range executionReceipts { - err = ingestEng.ProcessLocal(r) + err = ingestEng.Process(channels.ReceiveReceipts, unittest.IdentifierFixture(), r) require.NoError(suite.T(), err) } @@ -1111,7 +1360,7 @@ func (suite *Suite) TestExecuteScript() { expectedResp := setupExecClientMock(prevBlock.ID()) req := accessproto.ExecuteScriptAtBlockHeightRequest{ - BlockHeight: prevBlock.Header.Height, + BlockHeight: prevBlock.Height, Script: script, } actualResp, err := handler.ExecuteScriptAtBlockHeight(ctx, &req) @@ -1123,13 +1372,7 @@ func (suite *Suite) TestExecuteScript() { // TestAPICallNodeVersionInfo tests the GetNodeVersionInfo query and check response returns correct node version // information func (suite *Suite) TestAPICallNodeVersionInfo() { - suite.RunTest(func(handler *access.Handler, db *badger.DB, all *storage.All) { - sporkId := unittest.IdentifierFixture() - protocolVersion := uint(unittest.Uint64InRange(10, 30)) - - suite.params.On("SporkID").Return(sporkId, nil) - suite.params.On("ProtocolVersion").Return(protocolVersion, nil) - + suite.RunTest(func(handler *rpc.Handler, db storage.DB, all *store.All) { req := &accessproto.GetNodeVersionInfoRequest{} resp, err := handler.GetNodeVersionInfo(context.Background(), req) require.NoError(suite.T(), err) @@ -1137,10 +1380,11 @@ func (suite *Suite) TestAPICallNodeVersionInfo() { respNodeVersionInfo := resp.Info suite.Require().Equal(respNodeVersionInfo, &entitiesproto.NodeVersionInfo{ - Semver: build.Semver(), - Commit: build.Commit(), - SporkId: sporkId[:], - ProtocolVersion: uint64(protocolVersion), + Semver: build.Version(), + Commit: build.Commit(), + SporkId: suite.sporkID[:], + ProtocolVersion: 0, + ProtocolStateVersion: uint64(suite.protocolStateVersion), }) }) } @@ -1149,12 +1393,18 @@ func (suite *Suite) TestAPICallNodeVersionInfo() { // field in the response matches the finalized header from cache. It also tests that the LastFinalizedBlock field is // updated correctly when a block with a greater height is finalized. func (suite *Suite) TestLastFinalizedBlockHeightResult() { - suite.RunTest(func(handler *access.Handler, db *badger.DB, all *storage.All) { + suite.RunTest(func(handler *rpc.Handler, db storage.DB, all *store.All) { block := unittest.BlockWithParentFixture(suite.finalizedBlock) - newFinalizedBlock := unittest.BlockWithParentFixture(block.Header) - - // store new block - require.NoError(suite.T(), all.Blocks.Store(block)) + proposal := unittest.ProposalFromBlock(block) + newFinalizedBlock := unittest.BlockWithParentFixture(block.ToHeader()) + + err := unittest.WithLock(suite.T(), suite.lockManager, storage.LockInsertBlock, func(lctx lockctx.Context) error { + // store new block + return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return all.Blocks.BatchStore(lctx, rw, proposal) + }) + }) + require.NoError(suite.T(), err) assertFinalizedBlockHeader := func(resp *accessproto.BlockHeaderResponse, err error) { require.NoError(suite.T(), err) @@ -1178,18 +1428,18 @@ func (suite *Suite) TestLastFinalizedBlockHeightResult() { resp, err := handler.GetBlockHeaderByID(context.Background(), req) assertFinalizedBlockHeader(resp, err) - suite.finalizedBlock = newFinalizedBlock.Header + suite.finalizedBlock = newFinalizedBlock.ToHeader() resp, err = handler.GetBlockHeaderByID(context.Background(), req) assertFinalizedBlockHeader(resp, err) }) } -func (suite *Suite) createChain() (*flow.Block, *flow.Collection) { +func (suite *Suite) createChain() (*flow.Proposal, *flow.Collection) { collection := unittest.CollectionFixture(10) refBlockID := unittest.IdentifierFixture() // prepare cluster committee members - clusterCommittee := unittest.IdentityListFixture(32 * 4).Filter(filter.HasRole(flow.RoleCollection)) + clusterCommittee := unittest.IdentityListFixture(32 * 4).Filter(filter.HasRole[flow.Identity](flow.RoleCollection)) // guarantee signers must be cluster committee members, so that access will fetch collection from // the signers that are specified by guarantee.SignerIndices indices, err := signature.EncodeSignersToIndices(clusterCommittee.NodeIDs(), clusterCommittee.NodeIDs()) @@ -1200,21 +1450,24 @@ func (suite *Suite) createChain() (*flow.Block, *flow.Collection) { ReferenceBlockID: refBlockID, SignerIndices: indices, } - block := unittest.BlockWithParentFixture(suite.finalizedBlock) - block.SetPayload(unittest.PayloadFixture(unittest.WithGuarantees(guarantee))) + block := unittest.BlockWithParentAndPayload( + suite.finalizedBlock, + unittest.PayloadFixture(unittest.WithGuarantees(guarantee)), + ) + proposal := unittest.ProposalFromBlock(block) cluster := new(protocol.Cluster) - cluster.On("Members").Return(clusterCommittee, nil) - epoch := new(protocol.Epoch) + cluster.On("Members").Return(clusterCommittee.ToSkeleton(), nil) + epoch := new(protocol.CommittedEpoch) epoch.On("ClusterByChainID", mock.Anything).Return(cluster, nil) epochs := new(protocol.EpochQuery) - epochs.On("Current").Return(epoch) + epochs.On("Current").Return(epoch, nil) snap := new(protocol.Snapshot) snap.On("Epochs").Return(epochs).Maybe() snap.On("Params").Return(suite.params).Maybe() - snap.On("Head").Return(block.Header, nil).Maybe() + snap.On("Head").Return(block.ToHeader(), nil).Maybe() suite.state.On("AtBlockID", refBlockID).Return(snap) - return block, &collection + return proposal, &collection } diff --git a/engine/access/apiproxy/access_api_proxy.go b/engine/access/apiproxy/access_api_proxy.go index d72ec5bb5e2..79d7dc15fdd 100644 --- a/engine/access/apiproxy/access_api_proxy.go +++ b/engine/access/apiproxy/access_api_proxy.go @@ -2,42 +2,60 @@ package apiproxy import ( "context" - "fmt" - "sync" - "time" - - "google.golang.org/grpc/connectivity" - "google.golang.org/grpc/credentials/insecure" - - "google.golang.org/grpc" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/credentials" - "google.golang.org/grpc/status" "github.com/onflow/flow/protobuf/go/flow/access" "github.com/rs/zerolog" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" - "github.com/onflow/flow-go/engine/access/rpc/backend" - "github.com/onflow/flow-go/engine/protocol" + "github.com/onflow/flow-go/engine/access/rpc" + "github.com/onflow/flow-go/engine/access/rpc/connection" + "github.com/onflow/flow-go/engine/common/grpc/forwarder" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/module/metrics" - "github.com/onflow/flow-go/utils/grpcutils" +) + +const ( + LocalApiService = "local" + UpstreamApiService = "upstream" ) // FlowAccessAPIRouter is a structure that represents the routing proxy algorithm. // It splits requests between a local and a remote API service. type FlowAccessAPIRouter struct { - Logger zerolog.Logger + logger zerolog.Logger + metrics *metrics.ObserverCollector + upstream *FlowAccessAPIForwarder + local *rpc.Handler + useIndex bool +} + +type Params struct { + Log zerolog.Logger Metrics *metrics.ObserverCollector Upstream *FlowAccessAPIForwarder - Observer *protocol.Handler + Local *rpc.Handler + UseIndex bool +} + +// NewFlowAccessAPIRouter creates FlowAccessAPIRouter instance +func NewFlowAccessAPIRouter(params Params) *FlowAccessAPIRouter { + h := &FlowAccessAPIRouter{ + logger: params.Log, + metrics: params.Metrics, + upstream: params.Upstream, + local: params.Local, + useIndex: params.UseIndex, + } + + return h } func (h *FlowAccessAPIRouter) log(handler, rpc string, err error) { code := status.Code(err) - h.Metrics.RecordRPC(handler, rpc, code) + h.metrics.RecordRPC(handler, rpc, code) - logger := h.Logger.With(). + logger := h.logger.With(). Str("handler", handler). Str("grpc_method", rpc). Str("grpc_code", code.String()). @@ -51,519 +69,793 @@ func (h *FlowAccessAPIRouter) log(handler, rpc string, err error) { logger.Info().Msg("request succeeded") } -// reconnectingClient returns an active client, or -// creates one, if the last one is not ready anymore. -func (h *FlowAccessAPIForwarder) reconnectingClient(i int) error { - timeout := h.timeout - - if h.connections[i] == nil || h.connections[i].GetState() != connectivity.Ready { - identity := h.ids[i] - var connection *grpc.ClientConn - var err error - if identity.NetworkPubKey == nil { - connection, err = grpc.Dial( - identity.Address, - grpc.WithDefaultCallOptions(grpc.MaxCallRecvMsgSize(int(h.maxMsgSize))), - grpc.WithTransportCredentials(insecure.NewCredentials()), - backend.WithClientUnaryInterceptor(timeout)) - if err != nil { - return err - } - } else { - tlsConfig, err := grpcutils.DefaultClientTLSConfig(identity.NetworkPubKey) - if err != nil { - return fmt.Errorf("failed to get default TLS client config using public flow networking key %s %w", identity.NetworkPubKey.String(), err) - } - - connection, err = grpc.Dial( - identity.Address, - grpc.WithDefaultCallOptions(grpc.MaxCallRecvMsgSize(int(h.maxMsgSize))), - grpc.WithTransportCredentials(credentials.NewTLS(tlsConfig)), - backend.WithClientUnaryInterceptor(timeout)) - if err != nil { - return fmt.Errorf("cannot connect to %s %w", identity.Address, err) - } - } - connection.Connect() - time.Sleep(1 * time.Second) - state := connection.GetState() - if state != connectivity.Ready && state != connectivity.Connecting { - return fmt.Errorf("%v", state) - } - h.connections[i] = connection - h.upstream[i] = access.NewAccessAPIClient(connection) - } - - return nil -} - -// faultTolerantClient implements an upstream connection that reconnects on errors -// a reasonable amount of time. -func (h *FlowAccessAPIForwarder) faultTolerantClient() (access.AccessAPIClient, error) { - if h.upstream == nil || len(h.upstream) == 0 { - return nil, status.Errorf(codes.Unimplemented, "method not implemented") - } - - // Reasoning: A retry count of three gives an acceptable 5% failure ratio from a 37% failure ratio. - // A bigger number is problematic due to the DNS resolve and connection times, - // plus the need to log and debug each individual connection failure. - // - // This reasoning eliminates the need of making this parameter configurable. - // The logic works rolling over a single connection as well making clean code. - const retryMax = 3 - - h.lock.Lock() - defer h.lock.Unlock() - - var err error - for i := 0; i < retryMax; i++ { - h.roundRobin++ - h.roundRobin = h.roundRobin % len(h.upstream) - err = h.reconnectingClient(h.roundRobin) - if err != nil { - continue - } - state := h.connections[h.roundRobin].GetState() - if state != connectivity.Ready && state != connectivity.Connecting { - continue - } - return h.upstream[h.roundRobin], nil - } - - return nil, status.Errorf(codes.Unavailable, err.Error()) -} - // Ping pings the service. It is special in the sense that it responds successful, // only if all underlying services are ready. func (h *FlowAccessAPIRouter) Ping(context context.Context, req *access.PingRequest) (*access.PingResponse, error) { - h.log("observer", "Ping", nil) + h.log(LocalApiService, "Ping", nil) return &access.PingResponse{}, nil } func (h *FlowAccessAPIRouter) GetNodeVersionInfo(ctx context.Context, request *access.GetNodeVersionInfoRequest) (*access.GetNodeVersionInfoResponse, error) { - res, err := h.Observer.GetNodeVersionInfo(ctx, request) - h.log("observer", "GetNodeVersionInfo", err) + res, err := h.local.GetNodeVersionInfo(ctx, request) + h.log(LocalApiService, "GetNodeVersionInfo", err) return res, err } func (h *FlowAccessAPIRouter) GetLatestBlockHeader(context context.Context, req *access.GetLatestBlockHeaderRequest) (*access.BlockHeaderResponse, error) { - res, err := h.Observer.GetLatestBlockHeader(context, req) - h.log("observer", "GetLatestBlockHeader", err) + res, err := h.local.GetLatestBlockHeader(context, req) + h.log(LocalApiService, "GetLatestBlockHeader", err) return res, err } func (h *FlowAccessAPIRouter) GetBlockHeaderByID(context context.Context, req *access.GetBlockHeaderByIDRequest) (*access.BlockHeaderResponse, error) { - res, err := h.Observer.GetBlockHeaderByID(context, req) - h.log("observer", "GetBlockHeaderByID", err) + res, err := h.local.GetBlockHeaderByID(context, req) + h.log(LocalApiService, "GetBlockHeaderByID", err) return res, err } func (h *FlowAccessAPIRouter) GetBlockHeaderByHeight(context context.Context, req *access.GetBlockHeaderByHeightRequest) (*access.BlockHeaderResponse, error) { - res, err := h.Observer.GetBlockHeaderByHeight(context, req) - h.log("observer", "GetBlockHeaderByHeight", err) + res, err := h.local.GetBlockHeaderByHeight(context, req) + h.log(LocalApiService, "GetBlockHeaderByHeight", err) return res, err } func (h *FlowAccessAPIRouter) GetLatestBlock(context context.Context, req *access.GetLatestBlockRequest) (*access.BlockResponse, error) { - res, err := h.Observer.GetLatestBlock(context, req) - h.log("observer", "GetLatestBlock", err) + res, err := h.local.GetLatestBlock(context, req) + h.log(LocalApiService, "GetLatestBlock", err) return res, err } func (h *FlowAccessAPIRouter) GetBlockByID(context context.Context, req *access.GetBlockByIDRequest) (*access.BlockResponse, error) { - res, err := h.Observer.GetBlockByID(context, req) - h.log("observer", "GetBlockByID", err) + res, err := h.local.GetBlockByID(context, req) + h.log(LocalApiService, "GetBlockByID", err) return res, err } func (h *FlowAccessAPIRouter) GetBlockByHeight(context context.Context, req *access.GetBlockByHeightRequest) (*access.BlockResponse, error) { - res, err := h.Observer.GetBlockByHeight(context, req) - h.log("observer", "GetBlockByHeight", err) + res, err := h.local.GetBlockByHeight(context, req) + h.log(LocalApiService, "GetBlockByHeight", err) return res, err } func (h *FlowAccessAPIRouter) GetCollectionByID(context context.Context, req *access.GetCollectionByIDRequest) (*access.CollectionResponse, error) { - res, err := h.Upstream.GetCollectionByID(context, req) - h.log("upstream", "GetCollectionByID", err) + if h.useIndex { + res, err := h.local.GetCollectionByID(context, req) + h.log(LocalApiService, "GetCollectionByID", err) + return res, err + } + + res, err := h.upstream.GetCollectionByID(context, req) + h.log(UpstreamApiService, "GetCollectionByID", err) + return res, err +} + +func (h *FlowAccessAPIRouter) GetFullCollectionByID(context context.Context, req *access.GetFullCollectionByIDRequest) (*access.FullCollectionResponse, error) { + if h.useIndex { + res, err := h.local.GetFullCollectionByID(context, req) + h.log(LocalApiService, "GetFullCollectionByID", err) + return res, err + } + + res, err := h.upstream.GetFullCollectionByID(context, req) + h.log(UpstreamApiService, "GetFullCollectionByID", err) return res, err } func (h *FlowAccessAPIRouter) SendTransaction(context context.Context, req *access.SendTransactionRequest) (*access.SendTransactionResponse, error) { - res, err := h.Upstream.SendTransaction(context, req) - h.log("upstream", "SendTransaction", err) + res, err := h.upstream.SendTransaction(context, req) + h.log(UpstreamApiService, "SendTransaction", err) return res, err } func (h *FlowAccessAPIRouter) GetTransaction(context context.Context, req *access.GetTransactionRequest) (*access.TransactionResponse, error) { - res, err := h.Upstream.GetTransaction(context, req) - h.log("upstream", "GetTransaction", err) + if h.useIndex { + res, err := h.local.GetTransaction(context, req) + h.log(LocalApiService, "GetTransaction", err) + return res, err + } + + res, err := h.upstream.GetTransaction(context, req) + h.log(UpstreamApiService, "GetTransaction", err) return res, err } func (h *FlowAccessAPIRouter) GetTransactionResult(context context.Context, req *access.GetTransactionRequest) (*access.TransactionResultResponse, error) { - res, err := h.Upstream.GetTransactionResult(context, req) - h.log("upstream", "GetTransactionResult", err) + // TODO: add implementation for transaction error message before adding local impl + + res, err := h.upstream.GetTransactionResult(context, req) + h.log(UpstreamApiService, "GetTransactionResult", err) return res, err } func (h *FlowAccessAPIRouter) GetTransactionResultsByBlockID(context context.Context, req *access.GetTransactionsByBlockIDRequest) (*access.TransactionResultsResponse, error) { - res, err := h.Upstream.GetTransactionResultsByBlockID(context, req) - h.log("upstream", "GetTransactionResultsByBlockID", err) + // TODO: add implementation for transaction error message before adding local impl + + res, err := h.upstream.GetTransactionResultsByBlockID(context, req) + h.log(UpstreamApiService, "GetTransactionResultsByBlockID", err) return res, err } func (h *FlowAccessAPIRouter) GetTransactionsByBlockID(context context.Context, req *access.GetTransactionsByBlockIDRequest) (*access.TransactionsResponse, error) { - res, err := h.Upstream.GetTransactionsByBlockID(context, req) - h.log("upstream", "GetTransactionsByBlockID", err) + if h.useIndex { + res, err := h.local.GetTransactionsByBlockID(context, req) + h.log(LocalApiService, "GetTransactionsByBlockID", err) + return res, err + } + + res, err := h.upstream.GetTransactionsByBlockID(context, req) + h.log(UpstreamApiService, "GetTransactionsByBlockID", err) return res, err } func (h *FlowAccessAPIRouter) GetTransactionResultByIndex(context context.Context, req *access.GetTransactionByIndexRequest) (*access.TransactionResultResponse, error) { - res, err := h.Upstream.GetTransactionResultByIndex(context, req) - h.log("upstream", "GetTransactionResultByIndex", err) + // TODO: add implementation for transaction error message before adding local impl + + res, err := h.upstream.GetTransactionResultByIndex(context, req) + h.log(UpstreamApiService, "GetTransactionResultByIndex", err) + return res, err +} + +func (h *FlowAccessAPIRouter) GetSystemTransaction(context context.Context, req *access.GetSystemTransactionRequest) (*access.TransactionResponse, error) { + if h.useIndex { + res, err := h.local.GetSystemTransaction(context, req) + h.log(LocalApiService, "GetSystemTransaction", err) + return res, err + } + + res, err := h.upstream.GetSystemTransaction(context, req) + h.log(UpstreamApiService, "GetSystemTransaction", err) + return res, err +} + +func (h *FlowAccessAPIRouter) GetSystemTransactionResult(context context.Context, req *access.GetSystemTransactionResultRequest) (*access.TransactionResultResponse, error) { + res, err := h.upstream.GetSystemTransactionResult(context, req) + h.log(UpstreamApiService, "GetSystemTransactionResult", err) return res, err } func (h *FlowAccessAPIRouter) GetAccount(context context.Context, req *access.GetAccountRequest) (*access.GetAccountResponse, error) { - res, err := h.Upstream.GetAccount(context, req) - h.log("upstream", "GetAccount", err) + if h.useIndex { + res, err := h.local.GetAccount(context, req) + h.log(LocalApiService, "GetAccount", err) + return res, err + } + + res, err := h.upstream.GetAccount(context, req) + h.log(UpstreamApiService, "GetAccount", err) return res, err } func (h *FlowAccessAPIRouter) GetAccountAtLatestBlock(context context.Context, req *access.GetAccountAtLatestBlockRequest) (*access.AccountResponse, error) { - res, err := h.Upstream.GetAccountAtLatestBlock(context, req) - h.log("upstream", "GetAccountAtLatestBlock", err) + if h.useIndex { + res, err := h.local.GetAccountAtLatestBlock(context, req) + h.log(LocalApiService, "GetAccountAtLatestBlock", err) + return res, err + } + + res, err := h.upstream.GetAccountAtLatestBlock(context, req) + h.log(UpstreamApiService, "GetAccountAtLatestBlock", err) return res, err } func (h *FlowAccessAPIRouter) GetAccountAtBlockHeight(context context.Context, req *access.GetAccountAtBlockHeightRequest) (*access.AccountResponse, error) { - res, err := h.Upstream.GetAccountAtBlockHeight(context, req) - h.log("upstream", "GetAccountAtBlockHeight", err) + if h.useIndex { + res, err := h.local.GetAccountAtBlockHeight(context, req) + h.log(LocalApiService, "GetAccountAtBlockHeight", err) + return res, err + } + + res, err := h.upstream.GetAccountAtBlockHeight(context, req) + h.log(UpstreamApiService, "GetAccountAtBlockHeight", err) + return res, err +} + +func (h *FlowAccessAPIRouter) GetAccountBalanceAtLatestBlock(context context.Context, req *access.GetAccountBalanceAtLatestBlockRequest) (*access.AccountBalanceResponse, error) { + if h.useIndex { + res, err := h.local.GetAccountBalanceAtLatestBlock(context, req) + h.log(LocalApiService, "GetAccountBalanceAtLatestBlock", err) + return res, err + } + + res, err := h.upstream.GetAccountBalanceAtLatestBlock(context, req) + h.log(UpstreamApiService, "GetAccountBalanceAtLatestBlock", err) + return res, err +} + +func (h *FlowAccessAPIRouter) GetAccountBalanceAtBlockHeight(context context.Context, req *access.GetAccountBalanceAtBlockHeightRequest) (*access.AccountBalanceResponse, error) { + if h.useIndex { + res, err := h.local.GetAccountBalanceAtBlockHeight(context, req) + h.log(LocalApiService, "GetAccountBalanceAtBlockHeight", err) + return res, err + } + + res, err := h.upstream.GetAccountBalanceAtBlockHeight(context, req) + h.log(UpstreamApiService, "GetAccountBalanceAtBlockHeight", err) + return res, err +} + +func (h *FlowAccessAPIRouter) GetAccountKeyAtLatestBlock(context context.Context, req *access.GetAccountKeyAtLatestBlockRequest) (*access.AccountKeyResponse, error) { + if h.useIndex { + res, err := h.local.GetAccountKeyAtLatestBlock(context, req) + h.log(LocalApiService, "GetAccountKeyAtLatestBlock", err) + return res, err + } + + res, err := h.upstream.GetAccountKeyAtLatestBlock(context, req) + h.log(UpstreamApiService, "GetAccountKeyAtLatestBlock", err) + return res, err +} + +func (h *FlowAccessAPIRouter) GetAccountKeysAtLatestBlock(context context.Context, req *access.GetAccountKeysAtLatestBlockRequest) (*access.AccountKeysResponse, error) { + if h.useIndex { + res, err := h.local.GetAccountKeysAtLatestBlock(context, req) + h.log(LocalApiService, "GetAccountKeysAtLatestBlock", err) + return res, err + } + + res, err := h.upstream.GetAccountKeysAtLatestBlock(context, req) + h.log(UpstreamApiService, "GetAccountKeysAtLatestBlock", err) + return res, err +} + +func (h *FlowAccessAPIRouter) GetAccountKeyAtBlockHeight(context context.Context, req *access.GetAccountKeyAtBlockHeightRequest) (*access.AccountKeyResponse, error) { + if h.useIndex { + res, err := h.local.GetAccountKeyAtBlockHeight(context, req) + h.log(LocalApiService, "GetAccountKeyAtBlockHeight", err) + return res, err + } + + res, err := h.upstream.GetAccountKeyAtBlockHeight(context, req) + h.log(UpstreamApiService, "GetAccountKeyAtBlockHeight", err) + return res, err +} + +func (h *FlowAccessAPIRouter) GetAccountKeysAtBlockHeight(context context.Context, req *access.GetAccountKeysAtBlockHeightRequest) (*access.AccountKeysResponse, error) { + if h.useIndex { + res, err := h.local.GetAccountKeysAtBlockHeight(context, req) + h.log(LocalApiService, "GetAccountKeysAtBlockHeight", err) + return res, err + } + + res, err := h.upstream.GetAccountKeysAtBlockHeight(context, req) + h.log(UpstreamApiService, "GetAccountKeysAtBlockHeight", err) return res, err } func (h *FlowAccessAPIRouter) ExecuteScriptAtLatestBlock(context context.Context, req *access.ExecuteScriptAtLatestBlockRequest) (*access.ExecuteScriptResponse, error) { - res, err := h.Upstream.ExecuteScriptAtLatestBlock(context, req) - h.log("upstream", "ExecuteScriptAtLatestBlock", err) + if h.useIndex { + res, err := h.local.ExecuteScriptAtLatestBlock(context, req) + h.log(LocalApiService, "ExecuteScriptAtLatestBlock", err) + return res, err + } + + res, err := h.upstream.ExecuteScriptAtLatestBlock(context, req) + h.log(UpstreamApiService, "ExecuteScriptAtLatestBlock", err) return res, err } func (h *FlowAccessAPIRouter) ExecuteScriptAtBlockID(context context.Context, req *access.ExecuteScriptAtBlockIDRequest) (*access.ExecuteScriptResponse, error) { - res, err := h.Upstream.ExecuteScriptAtBlockID(context, req) - h.log("upstream", "ExecuteScriptAtBlockID", err) + if h.useIndex { + res, err := h.local.ExecuteScriptAtBlockID(context, req) + h.log(LocalApiService, "ExecuteScriptAtBlockID", err) + return res, err + } + + res, err := h.upstream.ExecuteScriptAtBlockID(context, req) + h.log(UpstreamApiService, "ExecuteScriptAtBlockID", err) return res, err } func (h *FlowAccessAPIRouter) ExecuteScriptAtBlockHeight(context context.Context, req *access.ExecuteScriptAtBlockHeightRequest) (*access.ExecuteScriptResponse, error) { - res, err := h.Upstream.ExecuteScriptAtBlockHeight(context, req) - h.log("upstream", "ExecuteScriptAtBlockHeight", err) + if h.useIndex { + res, err := h.local.ExecuteScriptAtBlockHeight(context, req) + h.log(LocalApiService, "ExecuteScriptAtBlockHeight", err) + return res, err + } + + res, err := h.upstream.ExecuteScriptAtBlockHeight(context, req) + h.log(UpstreamApiService, "ExecuteScriptAtBlockHeight", err) return res, err } func (h *FlowAccessAPIRouter) GetEventsForHeightRange(context context.Context, req *access.GetEventsForHeightRangeRequest) (*access.EventsResponse, error) { - res, err := h.Upstream.GetEventsForHeightRange(context, req) - h.log("upstream", "GetEventsForHeightRange", err) + if h.useIndex { + res, err := h.local.GetEventsForHeightRange(context, req) + h.log(LocalApiService, "GetEventsForHeightRange", err) + return res, err + } + + res, err := h.upstream.GetEventsForHeightRange(context, req) + h.log(UpstreamApiService, "GetEventsForHeightRange", err) return res, err } func (h *FlowAccessAPIRouter) GetEventsForBlockIDs(context context.Context, req *access.GetEventsForBlockIDsRequest) (*access.EventsResponse, error) { - res, err := h.Upstream.GetEventsForBlockIDs(context, req) - h.log("upstream", "GetEventsForBlockIDs", err) + if h.useIndex { + res, err := h.local.GetEventsForBlockIDs(context, req) + h.log(LocalApiService, "GetEventsForBlockIDs", err) + return res, err + } + + res, err := h.upstream.GetEventsForBlockIDs(context, req) + h.log(UpstreamApiService, "GetEventsForBlockIDs", err) return res, err } func (h *FlowAccessAPIRouter) GetNetworkParameters(context context.Context, req *access.GetNetworkParametersRequest) (*access.GetNetworkParametersResponse, error) { - res, err := h.Observer.GetNetworkParameters(context, req) - h.log("observer", "GetNetworkParameters", err) + res, err := h.local.GetNetworkParameters(context, req) + h.log(LocalApiService, "GetNetworkParameters", err) return res, err } func (h *FlowAccessAPIRouter) GetLatestProtocolStateSnapshot(context context.Context, req *access.GetLatestProtocolStateSnapshotRequest) (*access.ProtocolStateSnapshotResponse, error) { - res, err := h.Observer.GetLatestProtocolStateSnapshot(context, req) - h.log("observer", "GetLatestProtocolStateSnapshot", err) + res, err := h.local.GetLatestProtocolStateSnapshot(context, req) + h.log(LocalApiService, "GetLatestProtocolStateSnapshot", err) + return res, err +} + +func (h *FlowAccessAPIRouter) GetProtocolStateSnapshotByBlockID(context context.Context, req *access.GetProtocolStateSnapshotByBlockIDRequest) (*access.ProtocolStateSnapshotResponse, error) { + res, err := h.local.GetProtocolStateSnapshotByBlockID(context, req) + h.log(LocalApiService, "GetProtocolStateSnapshotByBlockID", err) + return res, err +} + +func (h *FlowAccessAPIRouter) GetProtocolStateSnapshotByHeight(context context.Context, req *access.GetProtocolStateSnapshotByHeightRequest) (*access.ProtocolStateSnapshotResponse, error) { + res, err := h.local.GetProtocolStateSnapshotByHeight(context, req) + h.log(LocalApiService, "GetProtocolStateSnapshotByHeight", err) return res, err } func (h *FlowAccessAPIRouter) GetExecutionResultForBlockID(context context.Context, req *access.GetExecutionResultForBlockIDRequest) (*access.ExecutionResultForBlockIDResponse, error) { - res, err := h.Upstream.GetExecutionResultForBlockID(context, req) - h.log("upstream", "GetExecutionResultForBlockID", err) + res, err := h.upstream.GetExecutionResultForBlockID(context, req) + h.log(UpstreamApiService, "GetExecutionResultForBlockID", err) return res, err } +func (h *FlowAccessAPIRouter) GetExecutionResultByID(context context.Context, req *access.GetExecutionResultByIDRequest) (*access.ExecutionResultByIDResponse, error) { + if h.useIndex { + res, err := h.local.GetExecutionResultByID(context, req) + h.log(LocalApiService, "GetExecutionResultByID", err) + return res, err + } + + res, err := h.upstream.GetExecutionResultByID(context, req) + h.log(UpstreamApiService, "GetExecutionResultByID", err) + return res, err +} + +func (h *FlowAccessAPIRouter) SubscribeBlocksFromStartBlockID(req *access.SubscribeBlocksFromStartBlockIDRequest, server access.AccessAPI_SubscribeBlocksFromStartBlockIDServer) error { + err := h.local.SubscribeBlocksFromStartBlockID(req, server) + h.log(LocalApiService, "SubscribeBlocksFromStartBlockID", err) + return err +} + +func (h *FlowAccessAPIRouter) SubscribeBlocksFromStartHeight(req *access.SubscribeBlocksFromStartHeightRequest, server access.AccessAPI_SubscribeBlocksFromStartHeightServer) error { + err := h.local.SubscribeBlocksFromStartHeight(req, server) + h.log(LocalApiService, "SubscribeBlocksFromStartHeight", err) + return err +} + +func (h *FlowAccessAPIRouter) SubscribeBlocksFromLatest(req *access.SubscribeBlocksFromLatestRequest, server access.AccessAPI_SubscribeBlocksFromLatestServer) error { + err := h.local.SubscribeBlocksFromLatest(req, server) + h.log(LocalApiService, "SubscribeBlocksFromLatest", err) + return err +} + +func (h *FlowAccessAPIRouter) SubscribeBlockHeadersFromStartBlockID(req *access.SubscribeBlockHeadersFromStartBlockIDRequest, server access.AccessAPI_SubscribeBlockHeadersFromStartBlockIDServer) error { + err := h.local.SubscribeBlockHeadersFromStartBlockID(req, server) + h.log(LocalApiService, "SubscribeBlockHeadersFromStartBlockID", err) + return err +} + +func (h *FlowAccessAPIRouter) SubscribeBlockHeadersFromStartHeight(req *access.SubscribeBlockHeadersFromStartHeightRequest, server access.AccessAPI_SubscribeBlockHeadersFromStartHeightServer) error { + err := h.local.SubscribeBlockHeadersFromStartHeight(req, server) + h.log(LocalApiService, "SubscribeBlockHeadersFromStartHeight", err) + return err +} + +func (h *FlowAccessAPIRouter) SubscribeBlockHeadersFromLatest(req *access.SubscribeBlockHeadersFromLatestRequest, server access.AccessAPI_SubscribeBlockHeadersFromLatestServer) error { + err := h.local.SubscribeBlockHeadersFromLatest(req, server) + h.log(LocalApiService, "SubscribeBlockHeadersFromLatest", err) + return err +} + +func (h *FlowAccessAPIRouter) SubscribeBlockDigestsFromStartBlockID(req *access.SubscribeBlockDigestsFromStartBlockIDRequest, server access.AccessAPI_SubscribeBlockDigestsFromStartBlockIDServer) error { + err := h.local.SubscribeBlockDigestsFromStartBlockID(req, server) + h.log(LocalApiService, "SubscribeBlockDigestsFromStartBlockID", err) + return err +} + +func (h *FlowAccessAPIRouter) SubscribeBlockDigestsFromStartHeight(req *access.SubscribeBlockDigestsFromStartHeightRequest, server access.AccessAPI_SubscribeBlockDigestsFromStartHeightServer) error { + err := h.local.SubscribeBlockDigestsFromStartHeight(req, server) + h.log(LocalApiService, "SubscribeBlockDigestsFromStartHeight", err) + return err +} + +func (h *FlowAccessAPIRouter) SubscribeBlockDigestsFromLatest(req *access.SubscribeBlockDigestsFromLatestRequest, server access.AccessAPI_SubscribeBlockDigestsFromLatestServer) error { + err := h.local.SubscribeBlockDigestsFromLatest(req, server) + h.log(LocalApiService, "SubscribeBlockDigestsFromLatest", err) + return err +} + +func (h *FlowAccessAPIRouter) SendAndSubscribeTransactionStatuses(req *access.SendAndSubscribeTransactionStatusesRequest, server access.AccessAPI_SendAndSubscribeTransactionStatusesServer) error { + // SendAndSubscribeTransactionStatuses is not implemented for observer yet + return status.Errorf(codes.Unimplemented, "method SendAndSubscribeTransactionStatuses not implemented") +} + // FlowAccessAPIForwarder forwards all requests to a set of upstream access nodes or observers type FlowAccessAPIForwarder struct { - lock sync.Mutex - roundRobin int - ids flow.IdentityList - upstream []access.AccessAPIClient - connections []*grpc.ClientConn - timeout time.Duration - maxMsgSize uint -} - -func NewFlowAccessAPIForwarder(identities flow.IdentityList, timeout time.Duration, maxMsgSize uint) (*FlowAccessAPIForwarder, error) { - forwarder := &FlowAccessAPIForwarder{maxMsgSize: maxMsgSize} - err := forwarder.setFlowAccessAPI(identities, timeout) - return forwarder, err -} - -// setFlowAccessAPI sets a backend access API that forwards some requests to an upstream node. -// It is used by Observer services, Blockchain Data Service, etc. -// Make sure that this is just for observation and not a staked participant in the flow network. -// This means that observers see a copy of the data but there is no interaction to ensure integrity from the root block. -func (ret *FlowAccessAPIForwarder) setFlowAccessAPI(accessNodeAddressAndPort flow.IdentityList, timeout time.Duration) error { - ret.timeout = timeout - ret.ids = accessNodeAddressAndPort - ret.upstream = make([]access.AccessAPIClient, accessNodeAddressAndPort.Count()) - ret.connections = make([]*grpc.ClientConn, accessNodeAddressAndPort.Count()) - for i, identity := range accessNodeAddressAndPort { - // Store the faultTolerantClient setup parameters such as address, public, key and timeout, so that - // we can refresh the API on connection loss - ret.ids[i] = identity - - // We fail on any single error on startup, so that - // we identify bootstrapping errors early - err := ret.reconnectingClient(i) - if err != nil { - return err - } - } - - ret.roundRobin = 0 - return nil + *forwarder.Forwarder +} + +func NewFlowAccessAPIForwarder(identities flow.IdentitySkeletonList, connectionFactory connection.ConnectionFactory) (*FlowAccessAPIForwarder, error) { + forwarder, err := forwarder.NewForwarder(identities, connectionFactory) + if err != nil { + return nil, err + } + + return &FlowAccessAPIForwarder{ + Forwarder: forwarder, + }, nil } // Ping pings the service. It is special in the sense that it responds successful, // only if all underlying services are ready. func (h *FlowAccessAPIForwarder) Ping(context context.Context, req *access.PingRequest) (*access.PingResponse, error) { // This is a passthrough request - upstream, err := h.faultTolerantClient() + upstream, closer, err := h.FaultTolerantClient() if err != nil { return nil, err } + defer closer.Close() return upstream.Ping(context, req) } func (h *FlowAccessAPIForwarder) GetNodeVersionInfo(context context.Context, req *access.GetNodeVersionInfoRequest) (*access.GetNodeVersionInfoResponse, error) { // This is a passthrough request - upstream, err := h.faultTolerantClient() + upstream, closer, err := h.FaultTolerantClient() if err != nil { return nil, err } + defer closer.Close() return upstream.GetNodeVersionInfo(context, req) } func (h *FlowAccessAPIForwarder) GetLatestBlockHeader(context context.Context, req *access.GetLatestBlockHeaderRequest) (*access.BlockHeaderResponse, error) { // This is a passthrough request - upstream, err := h.faultTolerantClient() + upstream, closer, err := h.FaultTolerantClient() if err != nil { return nil, err } + defer closer.Close() return upstream.GetLatestBlockHeader(context, req) } func (h *FlowAccessAPIForwarder) GetBlockHeaderByID(context context.Context, req *access.GetBlockHeaderByIDRequest) (*access.BlockHeaderResponse, error) { // This is a passthrough request - upstream, err := h.faultTolerantClient() + upstream, closer, err := h.FaultTolerantClient() if err != nil { return nil, err } + defer closer.Close() return upstream.GetBlockHeaderByID(context, req) } func (h *FlowAccessAPIForwarder) GetBlockHeaderByHeight(context context.Context, req *access.GetBlockHeaderByHeightRequest) (*access.BlockHeaderResponse, error) { // This is a passthrough request - upstream, err := h.faultTolerantClient() + upstream, closer, err := h.FaultTolerantClient() if err != nil { return nil, err } + defer closer.Close() return upstream.GetBlockHeaderByHeight(context, req) } func (h *FlowAccessAPIForwarder) GetLatestBlock(context context.Context, req *access.GetLatestBlockRequest) (*access.BlockResponse, error) { // This is a passthrough request - upstream, err := h.faultTolerantClient() + upstream, closer, err := h.FaultTolerantClient() if err != nil { return nil, err } + defer closer.Close() return upstream.GetLatestBlock(context, req) } func (h *FlowAccessAPIForwarder) GetBlockByID(context context.Context, req *access.GetBlockByIDRequest) (*access.BlockResponse, error) { // This is a passthrough request - upstream, err := h.faultTolerantClient() + upstream, closer, err := h.FaultTolerantClient() if err != nil { return nil, err } + defer closer.Close() return upstream.GetBlockByID(context, req) } func (h *FlowAccessAPIForwarder) GetBlockByHeight(context context.Context, req *access.GetBlockByHeightRequest) (*access.BlockResponse, error) { // This is a passthrough request - upstream, err := h.faultTolerantClient() + upstream, closer, err := h.FaultTolerantClient() if err != nil { return nil, err } + defer closer.Close() return upstream.GetBlockByHeight(context, req) } func (h *FlowAccessAPIForwarder) GetCollectionByID(context context.Context, req *access.GetCollectionByIDRequest) (*access.CollectionResponse, error) { // This is a passthrough request - upstream, err := h.faultTolerantClient() + upstream, closer, err := h.FaultTolerantClient() if err != nil { return nil, err } + defer closer.Close() return upstream.GetCollectionByID(context, req) } +func (h *FlowAccessAPIForwarder) GetFullCollectionByID(context context.Context, req *access.GetFullCollectionByIDRequest) (*access.FullCollectionResponse, error) { + // This is a passthrough request + upstream, closer, err := h.FaultTolerantClient() + if err != nil { + return nil, err + } + defer closer.Close() + return upstream.GetFullCollectionByID(context, req) +} + func (h *FlowAccessAPIForwarder) SendTransaction(context context.Context, req *access.SendTransactionRequest) (*access.SendTransactionResponse, error) { // This is a passthrough request - upstream, err := h.faultTolerantClient() + upstream, closer, err := h.FaultTolerantClient() if err != nil { return nil, err } + defer closer.Close() return upstream.SendTransaction(context, req) } func (h *FlowAccessAPIForwarder) GetTransaction(context context.Context, req *access.GetTransactionRequest) (*access.TransactionResponse, error) { // This is a passthrough request - upstream, err := h.faultTolerantClient() + upstream, closer, err := h.FaultTolerantClient() if err != nil { return nil, err } + defer closer.Close() return upstream.GetTransaction(context, req) } func (h *FlowAccessAPIForwarder) GetTransactionResult(context context.Context, req *access.GetTransactionRequest) (*access.TransactionResultResponse, error) { // This is a passthrough request - upstream, err := h.faultTolerantClient() + upstream, closer, err := h.FaultTolerantClient() if err != nil { return nil, err } + defer closer.Close() return upstream.GetTransactionResult(context, req) } +func (h *FlowAccessAPIForwarder) GetSystemTransaction(context context.Context, req *access.GetSystemTransactionRequest) (*access.TransactionResponse, error) { + // This is a passthrough request + upstream, closer, err := h.FaultTolerantClient() + if err != nil { + return nil, err + } + defer closer.Close() + return upstream.GetSystemTransaction(context, req) +} + +func (h *FlowAccessAPIForwarder) GetSystemTransactionResult(context context.Context, req *access.GetSystemTransactionResultRequest) (*access.TransactionResultResponse, error) { + // This is a passthrough request + upstream, closer, err := h.FaultTolerantClient() + if err != nil { + return nil, err + } + defer closer.Close() + return upstream.GetSystemTransactionResult(context, req) +} + func (h *FlowAccessAPIForwarder) GetTransactionResultByIndex(context context.Context, req *access.GetTransactionByIndexRequest) (*access.TransactionResultResponse, error) { // This is a passthrough request - upstream, err := h.faultTolerantClient() + upstream, closer, err := h.FaultTolerantClient() if err != nil { return nil, err } + defer closer.Close() return upstream.GetTransactionResultByIndex(context, req) } func (h *FlowAccessAPIForwarder) GetTransactionResultsByBlockID(context context.Context, req *access.GetTransactionsByBlockIDRequest) (*access.TransactionResultsResponse, error) { // This is a passthrough request - upstream, err := h.faultTolerantClient() + upstream, closer, err := h.FaultTolerantClient() if err != nil { return nil, err } + defer closer.Close() return upstream.GetTransactionResultsByBlockID(context, req) } func (h *FlowAccessAPIForwarder) GetTransactionsByBlockID(context context.Context, req *access.GetTransactionsByBlockIDRequest) (*access.TransactionsResponse, error) { - upstream, err := h.faultTolerantClient() + upstream, closer, err := h.FaultTolerantClient() if err != nil { return nil, err } + defer closer.Close() return upstream.GetTransactionsByBlockID(context, req) } func (h *FlowAccessAPIForwarder) GetAccount(context context.Context, req *access.GetAccountRequest) (*access.GetAccountResponse, error) { // This is a passthrough request - upstream, err := h.faultTolerantClient() + upstream, closer, err := h.FaultTolerantClient() if err != nil { return nil, err } + defer closer.Close() return upstream.GetAccount(context, req) } func (h *FlowAccessAPIForwarder) GetAccountAtLatestBlock(context context.Context, req *access.GetAccountAtLatestBlockRequest) (*access.AccountResponse, error) { // This is a passthrough request - upstream, err := h.faultTolerantClient() + upstream, closer, err := h.FaultTolerantClient() if err != nil { return nil, err } + defer closer.Close() return upstream.GetAccountAtLatestBlock(context, req) } func (h *FlowAccessAPIForwarder) GetAccountAtBlockHeight(context context.Context, req *access.GetAccountAtBlockHeightRequest) (*access.AccountResponse, error) { // This is a passthrough request - upstream, err := h.faultTolerantClient() + upstream, closer, err := h.FaultTolerantClient() if err != nil { return nil, err } + defer closer.Close() return upstream.GetAccountAtBlockHeight(context, req) } +func (h *FlowAccessAPIForwarder) GetAccountBalanceAtLatestBlock(context context.Context, req *access.GetAccountBalanceAtLatestBlockRequest) (*access.AccountBalanceResponse, error) { + // This is a passthrough request + upstream, closer, err := h.FaultTolerantClient() + if err != nil { + return nil, err + } + defer closer.Close() + return upstream.GetAccountBalanceAtLatestBlock(context, req) +} + +func (h *FlowAccessAPIForwarder) GetAccountBalanceAtBlockHeight(context context.Context, req *access.GetAccountBalanceAtBlockHeightRequest) (*access.AccountBalanceResponse, error) { + // This is a passthrough request + upstream, closer, err := h.FaultTolerantClient() + if err != nil { + return nil, err + } + defer closer.Close() + return upstream.GetAccountBalanceAtBlockHeight(context, req) +} + +func (h *FlowAccessAPIForwarder) GetAccountKeyAtLatestBlock(context context.Context, req *access.GetAccountKeyAtLatestBlockRequest) (*access.AccountKeyResponse, error) { + // This is a passthrough request + upstream, closer, err := h.FaultTolerantClient() + if err != nil { + return nil, err + } + defer closer.Close() + return upstream.GetAccountKeyAtLatestBlock(context, req) +} + +func (h *FlowAccessAPIForwarder) GetAccountKeysAtLatestBlock(context context.Context, req *access.GetAccountKeysAtLatestBlockRequest) (*access.AccountKeysResponse, error) { + // This is a passthrough request + upstream, closer, err := h.FaultTolerantClient() + if err != nil { + return nil, err + } + defer closer.Close() + return upstream.GetAccountKeysAtLatestBlock(context, req) +} + +func (h *FlowAccessAPIForwarder) GetAccountKeyAtBlockHeight(context context.Context, req *access.GetAccountKeyAtBlockHeightRequest) (*access.AccountKeyResponse, error) { + // This is a passthrough request + upstream, closer, err := h.FaultTolerantClient() + if err != nil { + return nil, err + } + defer closer.Close() + return upstream.GetAccountKeyAtBlockHeight(context, req) +} + +func (h *FlowAccessAPIForwarder) GetAccountKeysAtBlockHeight(context context.Context, req *access.GetAccountKeysAtBlockHeightRequest) (*access.AccountKeysResponse, error) { + // This is a passthrough request + upstream, closer, err := h.FaultTolerantClient() + if err != nil { + return nil, err + } + defer closer.Close() + return upstream.GetAccountKeysAtBlockHeight(context, req) +} + func (h *FlowAccessAPIForwarder) ExecuteScriptAtLatestBlock(context context.Context, req *access.ExecuteScriptAtLatestBlockRequest) (*access.ExecuteScriptResponse, error) { // This is a passthrough request - upstream, err := h.faultTolerantClient() + upstream, closer, err := h.FaultTolerantClient() if err != nil { return nil, err } + defer closer.Close() return upstream.ExecuteScriptAtLatestBlock(context, req) } func (h *FlowAccessAPIForwarder) ExecuteScriptAtBlockID(context context.Context, req *access.ExecuteScriptAtBlockIDRequest) (*access.ExecuteScriptResponse, error) { // This is a passthrough request - upstream, err := h.faultTolerantClient() + upstream, closer, err := h.FaultTolerantClient() if err != nil { return nil, err } + defer closer.Close() return upstream.ExecuteScriptAtBlockID(context, req) } func (h *FlowAccessAPIForwarder) ExecuteScriptAtBlockHeight(context context.Context, req *access.ExecuteScriptAtBlockHeightRequest) (*access.ExecuteScriptResponse, error) { // This is a passthrough request - upstream, err := h.faultTolerantClient() + upstream, closer, err := h.FaultTolerantClient() if err != nil { return nil, err } + defer closer.Close() return upstream.ExecuteScriptAtBlockHeight(context, req) } func (h *FlowAccessAPIForwarder) GetEventsForHeightRange(context context.Context, req *access.GetEventsForHeightRangeRequest) (*access.EventsResponse, error) { // This is a passthrough request - upstream, err := h.faultTolerantClient() + upstream, closer, err := h.FaultTolerantClient() if err != nil { return nil, err } + defer closer.Close() return upstream.GetEventsForHeightRange(context, req) } func (h *FlowAccessAPIForwarder) GetEventsForBlockIDs(context context.Context, req *access.GetEventsForBlockIDsRequest) (*access.EventsResponse, error) { // This is a passthrough request - upstream, err := h.faultTolerantClient() + upstream, closer, err := h.FaultTolerantClient() if err != nil { return nil, err } + defer closer.Close() return upstream.GetEventsForBlockIDs(context, req) } func (h *FlowAccessAPIForwarder) GetNetworkParameters(context context.Context, req *access.GetNetworkParametersRequest) (*access.GetNetworkParametersResponse, error) { // This is a passthrough request - upstream, err := h.faultTolerantClient() + upstream, closer, err := h.FaultTolerantClient() if err != nil { return nil, err } + defer closer.Close() return upstream.GetNetworkParameters(context, req) } func (h *FlowAccessAPIForwarder) GetLatestProtocolStateSnapshot(context context.Context, req *access.GetLatestProtocolStateSnapshotRequest) (*access.ProtocolStateSnapshotResponse, error) { // This is a passthrough request - upstream, err := h.faultTolerantClient() + upstream, closer, err := h.FaultTolerantClient() if err != nil { return nil, err } + defer closer.Close() return upstream.GetLatestProtocolStateSnapshot(context, req) } func (h *FlowAccessAPIForwarder) GetExecutionResultForBlockID(context context.Context, req *access.GetExecutionResultForBlockIDRequest) (*access.ExecutionResultForBlockIDResponse, error) { // This is a passthrough request - upstream, err := h.faultTolerantClient() + upstream, closer, err := h.FaultTolerantClient() if err != nil { return nil, err } + defer closer.Close() return upstream.GetExecutionResultForBlockID(context, req) } + +func (h *FlowAccessAPIForwarder) GetExecutionResultByID(context context.Context, req *access.GetExecutionResultByIDRequest) (*access.ExecutionResultByIDResponse, error) { + // This is a passthrough request + upstream, closer, err := h.FaultTolerantClient() + if err != nil { + return nil, err + } + defer closer.Close() + return upstream.GetExecutionResultByID(context, req) +} diff --git a/engine/access/apiproxy/access_api_proxy_test.go b/engine/access/apiproxy/access_api_proxy_test.go index 9f5a5aa74b8..e36040e39af 100644 --- a/engine/access/apiproxy/access_api_proxy_test.go +++ b/engine/access/apiproxy/access_api_proxy_test.go @@ -8,10 +8,15 @@ import ( "time" "github.com/onflow/flow/protobuf/go/flow/access" + "github.com/stretchr/testify/assert" "google.golang.org/grpc" grpcinsecure "google.golang.org/grpc/credentials/insecure" + "github.com/onflow/flow-go/engine/access/rpc/connection" + "github.com/onflow/flow-go/engine/common/grpc/forwarder" + commonrpc "github.com/onflow/flow-go/engine/common/rpc" "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module/metrics" "github.com/onflow/flow-go/utils/grpcutils" "github.com/onflow/flow-go/utils/unittest" ) @@ -134,44 +139,63 @@ func TestNewFlowCachedAccessAPIProxy(t *testing.T) { t.Fatal(err) } - // Prepare a proxy that fails due to the second connection being idle - l := flow.IdentityList{{Address: unittest.IPPort("11634")}, {Address: unittest.IPPort("11635")}} - c := FlowAccessAPIForwarder{} - err = c.setFlowAccessAPI(l, time.Second) - if err == nil { - t.Fatal(fmt.Errorf("should not start with one connection ready")) - } - // Bring up 2nd upstream server charlie2, _, err := newFlowLite("tcp", unittest.IPPort("11635"), done) if err != nil { t.Fatal(err) } - background := context.Background() + metrics := metrics.NewNoopCollector() - // Prepare a proxy - l = flow.IdentityList{{Address: unittest.IPPort("11634")}, {Address: unittest.IPPort("11635")}} - c = FlowAccessAPIForwarder{} - err = c.setFlowAccessAPI(l, time.Second) + // create the factory + connectionFactory := &connection.ConnectionFactoryImpl{ + // set metrics reporting + AccessMetrics: metrics, + CollectionConfig: connection.Config{ + Timeout: time.Second, + MaxRequestMsgSize: commonrpc.DefaultCollectionMaxRequestSize, + MaxResponseMsgSize: commonrpc.DefaultCollectionMaxResponseSize, + }, + Manager: connection.NewManager( + unittest.Logger(), + metrics, + nil, + connection.CircuitBreakerConfig{}, + grpcutils.NoCompressor, + ), + } + + // Prepare a proxy that fails due to the second connection being idle + l := flow.IdentitySkeletonList{{Address: unittest.IPPort("11634")}, {Address: unittest.IPPort("11635")}} + c := FlowAccessAPIForwarder{} + c.Forwarder, err = forwarder.NewForwarder(l, connectionFactory) if err != nil { t.Fatal(err) } + ctx := context.Background() + // Wait until proxy call passes - _, err = c.Ping(background, &access.PingRequest{}) + _, err = c.Ping(ctx, &access.PingRequest{}) if err != nil { t.Fatal(err) } + // get and close first connection + _, closer, err := c.Forwarder.FaultTolerantClient() + assert.NoError(t, err) + closer.Close() + + // connection factory created a new gRPC connection which was closed before + // if creation fails should use second connection // Wait until proxy call passes - _, err = c.Ping(background, &access.PingRequest{}) + _, err = c.Ping(ctx, &access.PingRequest{}) if err != nil { t.Fatal(err) } // Wait until proxy call passes - _, err = c.Ping(background, &access.PingRequest{}) + _, err = c.Ping(ctx, &access.PingRequest{}) if err != nil { t.Fatal(err) } @@ -180,7 +204,7 @@ func TestNewFlowCachedAccessAPIProxy(t *testing.T) { charlie2.Stop() // Wait until proxy call fails - _, err = c.Ping(background, &access.PingRequest{}) + _, err = c.Ping(ctx, &access.PingRequest{}) if err == nil { t.Fatal(fmt.Errorf("should fail on no connections")) } @@ -246,7 +270,7 @@ func newFlowLite(network string, address string, done chan int) (*grpc.Server, * func openFlowLite(address string) error { c, err := grpc.Dial( "unix://"+address, - grpc.WithDefaultCallOptions(grpc.MaxCallRecvMsgSize(grpcutils.DefaultMaxMsgSize)), + grpc.WithDefaultCallOptions(grpc.MaxCallRecvMsgSize(commonrpc.DefaultAccessMaxResponseSize)), grpc.WithTransportCredentials(grpcinsecure.NewCredentials())) if err != nil { return err diff --git a/engine/access/handle_irrecoverable_state_test.go b/engine/access/handle_irrecoverable_state_test.go new file mode 100644 index 00000000000..5c7edf41451 --- /dev/null +++ b/engine/access/handle_irrecoverable_state_test.go @@ -0,0 +1,278 @@ +package access + +import ( + "context" + "fmt" + "io" + "testing" + "time" + + "github.com/antihax/optional" + accessproto "github.com/onflow/flow/protobuf/go/flow/access" + "github.com/rs/zerolog" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/suite" + "google.golang.org/grpc" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/credentials/insecure" + + "github.com/onflow/crypto" + restclient "github.com/onflow/flow/openapi/go-client-generated" + + accessmock "github.com/onflow/flow-go/engine/access/mock" + "github.com/onflow/flow-go/engine/access/rest" + "github.com/onflow/flow-go/engine/access/rest/router" + "github.com/onflow/flow-go/engine/access/rest/websockets" + "github.com/onflow/flow-go/engine/access/rpc" + "github.com/onflow/flow-go/engine/access/rpc/backend" + "github.com/onflow/flow-go/engine/access/rpc/backend/node_communicator" + "github.com/onflow/flow-go/engine/access/rpc/backend/query_mode" + statestreambackend "github.com/onflow/flow-go/engine/access/state_stream/backend" + commonrpc "github.com/onflow/flow-go/engine/common/rpc" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module/grpcserver" + "github.com/onflow/flow-go/module/irrecoverable" + "github.com/onflow/flow-go/module/metrics" + module "github.com/onflow/flow-go/module/mock" + mocknetwork "github.com/onflow/flow-go/network/mock" + protocol "github.com/onflow/flow-go/state/protocol/mock" + storagemock "github.com/onflow/flow-go/storage/mock" + "github.com/onflow/flow-go/utils/grpcutils" + "github.com/onflow/flow-go/utils/unittest" +) + +// IrrecoverableStateTestSuite tests that Access node indicate an inconsistent or corrupted node state +type IrrecoverableStateTestSuite struct { + suite.Suite + log zerolog.Logger + cancel context.CancelFunc + + state *protocol.State + snapshot *protocol.Snapshot + epochQuery *protocol.EpochQuery + net *mocknetwork.EngineRegistry + request *module.Requester + collClient *accessmock.AccessAPIClient + execClient *accessmock.ExecutionAPIClient + me *module.Local + chainID flow.ChainID + metrics *metrics.NoopCollector + rpcEng *rpc.Engine + publicKey crypto.PublicKey + + // storage + blocks *storagemock.Blocks + headers *storagemock.Headers + collections *storagemock.Collections + transactions *storagemock.Transactions + receipts *storagemock.ExecutionReceipts + + // grpc servers + secureGrpcServer *grpcserver.GrpcServer + unsecureGrpcServer *grpcserver.GrpcServer +} + +func (suite *IrrecoverableStateTestSuite) SetupTest() { + suite.log = unittest.Logger() + suite.net = mocknetwork.NewEngineRegistry(suite.T()) + suite.state = protocol.NewState(suite.T()) + suite.snapshot = protocol.NewSnapshot(suite.T()) + + params := protocol.NewParams(suite.T()) + + suite.epochQuery = protocol.NewEpochQuery(suite.T()) + suite.state.On("Sealed").Return(suite.snapshot, nil).Maybe() + suite.state.On("Final").Return(suite.snapshot, nil).Maybe() + suite.state.On("Params").Return(params, nil).Maybe() + suite.snapshot.On("Epochs").Return(suite.epochQuery).Maybe() + suite.blocks = storagemock.NewBlocks(suite.T()) + suite.headers = storagemock.NewHeaders(suite.T()) + suite.transactions = storagemock.NewTransactions(suite.T()) + suite.collections = storagemock.NewCollections(suite.T()) + suite.receipts = storagemock.NewExecutionReceipts(suite.T()) + + suite.collClient = accessmock.NewAccessAPIClient(suite.T()) + suite.execClient = accessmock.NewExecutionAPIClient(suite.T()) + + suite.request = module.NewRequester(suite.T()) + suite.me = module.NewLocal(suite.T()) + + accessIdentity := unittest.IdentityFixture(unittest.WithRole(flow.RoleAccess)) + suite.me. + On("NodeID"). + Return(accessIdentity.NodeID).Maybe() + + suite.chainID = flow.Testnet + suite.metrics = metrics.NewNoopCollector() + + config := rpc.Config{ + UnsecureGRPCListenAddr: unittest.DefaultAddress, + SecureGRPCListenAddr: unittest.DefaultAddress, + HTTPListenAddr: unittest.DefaultAddress, + RestConfig: rest.Config{ + ListenAddress: unittest.DefaultAddress, + }, + WebSocketConfig: websockets.NewDefaultWebsocketConfig(), + } + + // generate a server certificate that will be served by the GRPC server + networkingKey := unittest.NetworkingPrivKeyFixture() + x509Certificate, err := grpcutils.X509Certificate(networkingKey) + assert.NoError(suite.T(), err) + tlsConfig := grpcutils.DefaultServerTLSConfig(x509Certificate) + // set the transport credentials for the server to use + config.TransportCredentials = credentials.NewTLS(tlsConfig) + // save the public key to use later in tests later + suite.publicKey = networkingKey.PublicKey() + + suite.secureGrpcServer = grpcserver.NewGrpcServerBuilder(suite.log, + config.SecureGRPCListenAddr, + commonrpc.DefaultAccessMaxRequestSize, + commonrpc.DefaultAccessMaxResponseSize, + false, + nil, + nil, + grpcserver.WithTransportCredentials(config.TransportCredentials)).Build() + + suite.unsecureGrpcServer = grpcserver.NewGrpcServerBuilder(suite.log, + config.UnsecureGRPCListenAddr, + commonrpc.DefaultAccessMaxRequestSize, + commonrpc.DefaultAccessMaxResponseSize, + false, + nil, + nil).Build() + + blockHeader := unittest.BlockHeaderFixture() + suite.snapshot.On("Head").Return(blockHeader, nil).Once() + + bnd, err := backend.New(backend.Params{ + State: suite.state, + CollectionRPC: suite.collClient, + Blocks: suite.blocks, + Headers: suite.headers, + Collections: suite.collections, + Transactions: suite.transactions, + ChainID: suite.chainID, + AccessMetrics: suite.metrics, + MaxHeightRange: 0, + Log: suite.log, + SnapshotHistoryLimit: 0, + Communicator: node_communicator.NewNodeCommunicator(false), + BlockTracker: nil, + EventQueryMode: query_mode.IndexQueryModeExecutionNodesOnly, + ScriptExecutionMode: query_mode.IndexQueryModeExecutionNodesOnly, + TxResultQueryMode: query_mode.IndexQueryModeExecutionNodesOnly, + }) + suite.Require().NoError(err) + + stateStreamConfig := statestreambackend.Config{} + rpcEngBuilder, err := rpc.NewBuilder( + suite.log, + suite.state, + config, + suite.chainID, + suite.metrics, + false, + suite.me, + bnd, + bnd, + suite.secureGrpcServer, + suite.unsecureGrpcServer, + nil, + stateStreamConfig, + nil, + ) + assert.NoError(suite.T(), err) + suite.rpcEng, err = rpcEngBuilder.WithLegacy().Build() + assert.NoError(suite.T(), err) + + err = fmt.Errorf("inconsistent node's state") + + ctx, cancel := context.WithCancel(context.Background()) + suite.cancel = cancel + + signCtxErr := irrecoverable.NewExceptionf("failed to lookup sealed header: %w", err) + signalCtx := irrecoverable.NewMockSignalerContextExpectError(suite.T(), ctx, signCtxErr) + + suite.rpcEng.Start(signalCtx) + + suite.secureGrpcServer.Start(signalCtx) + suite.unsecureGrpcServer.Start(signalCtx) + + // wait for the servers to startup + unittest.AssertClosesBefore(suite.T(), suite.secureGrpcServer.Ready(), 2*time.Second) + unittest.AssertClosesBefore(suite.T(), suite.unsecureGrpcServer.Ready(), 2*time.Second) + + // wait for the engine to startup + unittest.AssertClosesBefore(suite.T(), suite.rpcEng.Ready(), 2*time.Second) +} + +func (suite *IrrecoverableStateTestSuite) TearDownTest() { + suite.cancel() + unittest.AssertClosesBefore(suite.T(), suite.secureGrpcServer.Done(), 2*time.Second) + unittest.AssertClosesBefore(suite.T(), suite.unsecureGrpcServer.Done(), 2*time.Second) + unittest.AssertClosesBefore(suite.T(), suite.rpcEng.Done(), 2*time.Second) +} + +func TestIrrecoverableState(t *testing.T) { + suite.Run(t, new(IrrecoverableStateTestSuite)) +} + +// TestGRPCInconsistentNodeState tests the behavior when gRPC encounters an inconsistent node state. +func (suite *IrrecoverableStateTestSuite) TestGRPCInconsistentNodeState() { + err := fmt.Errorf("inconsistent node's state") + suite.snapshot.On("Head").Return(nil, err) + + conn, err := grpc.Dial( + suite.unsecureGrpcServer.GRPCAddress().String(), + grpc.WithTransportCredentials(insecure.NewCredentials())) + assert.NoError(suite.T(), err) + defer io.Closer(conn).Close() + + client := accessproto.NewAccessAPIClient(conn) + + req := &accessproto.GetAccountAtLatestBlockRequest{ + Address: unittest.AddressFixture().Bytes(), + } + + ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second) + defer cancel() + + actual, err := client.GetAccountAtLatestBlock(ctx, req) + suite.Require().Error(err) + suite.Require().Nil(actual) +} + +// TestRestInconsistentNodeState tests the behavior when the REST API encounters an inconsistent node state. +func (suite *IrrecoverableStateTestSuite) TestRestInconsistentNodeState() { + collections := unittest.CollectionListFixture(1) + block := unittest.BlockFixture( + unittest.Block.WithPayload( + unittest.PayloadFixture(unittest.WithGuarantees(unittest.CollectionGuaranteesWithCollectionIDFixture(collections)...)), + ), + ) + suite.blocks.On("ByID", block.ID()).Return(block, nil) + suite.headers.On("BlockIDByHeight", block.Height).Return(block.ID(), nil) + + err := fmt.Errorf("inconsistent node's state") + suite.snapshot.On("Head").Return(nil, err) + + config := restclient.NewConfiguration() + config.BasePath = fmt.Sprintf("http://%s/v1", suite.rpcEng.RestApiAddress().String()) + client := restclient.NewAPIClient(config) + + ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second) + defer cancel() + + actual, _, err := client.BlocksApi.BlocksIdGet(ctx, []string{block.ID().String()}, optionsForBlocksIdGetOpts()) + suite.Require().Error(err) + suite.Require().Nil(actual) +} + +// optionsForBlocksIdGetOpts returns options for the BlocksApi.BlocksIdGet function. +func optionsForBlocksIdGetOpts() *restclient.BlocksApiBlocksIdGetOpts { + return &restclient.BlocksApiBlocksIdGetOpts{ + Expand: optional.NewInterface([]string{router.ExpandableFieldPayload}), + Select_: optional.NewInterface([]string{"header.id"}), + } +} diff --git a/engine/access/index/event_index_test.go b/engine/access/index/event_index_test.go new file mode 100644 index 00000000000..e86e9594c65 --- /dev/null +++ b/engine/access/index/event_index_test.go @@ -0,0 +1,82 @@ +package index + +import ( + "bytes" + "math" + "sort" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/model/flow" + storagemock "github.com/onflow/flow-go/storage/mock" + "github.com/onflow/flow-go/utils/unittest" +) + +// TestGetEvents tests that GetEvents returns the events in the correct order +func TestGetEvents(t *testing.T) { + expectedEvents := make(flow.EventsList, 0, 6) + expectedEvents = append(expectedEvents, generateTxEvents(unittest.IdentifierFixture(), 0, 1)...) + expectedEvents = append(expectedEvents, generateTxEvents(unittest.IdentifierFixture(), 1, 3)...) + expectedEvents = append(expectedEvents, generateTxEvents(unittest.IdentifierFixture(), 2, 2)...) + + storedEvents := make([]flow.Event, len(expectedEvents)) + copy(storedEvents, expectedEvents) + + // sort events in storage order (by tx ID) + sort.Slice(storedEvents, func(i, j int) bool { + cmp := bytes.Compare(storedEvents[i].TransactionID[:], storedEvents[j].TransactionID[:]) + if cmp == 0 { + if storedEvents[i].TransactionIndex == storedEvents[j].TransactionIndex { + return storedEvents[i].EventIndex < storedEvents[j].EventIndex + } + return storedEvents[i].TransactionIndex < storedEvents[j].TransactionIndex + } + return cmp < 0 + }) + + events := storagemock.NewEvents(t) + header := unittest.BlockHeaderFixture() + + events.On("ByBlockID", mock.Anything).Return(func(blockID flow.Identifier) ([]flow.Event, error) { + return storedEvents, nil + }) + + eventsIndex := NewEventsIndex(NewReporter(), events) + err := eventsIndex.Initialize(&mockIndexReporter{}) + require.NoError(t, err) + + actualEvents, err := eventsIndex.ByBlockID(header.ID(), header.Height) + require.NoError(t, err) + + // output events should be in the same order as the expected events + assert.Len(t, actualEvents, len(expectedEvents)) + for i, event := range actualEvents { + assert.Equal(t, expectedEvents[i], event) + } +} + +func generateTxEvents(txID flow.Identifier, txIndex uint32, count int) flow.EventsList { + events := make(flow.EventsList, count) + for i := 0; i < count; i++ { + events[i] = flow.Event{ + Type: unittest.EventTypeFixture(flow.Localnet), + TransactionID: txID, + TransactionIndex: txIndex, + EventIndex: uint32(i), + } + } + return events +} + +type mockIndexReporter struct{} + +func (r *mockIndexReporter) LowestIndexedHeight() (uint64, error) { + return 0, nil +} + +func (r *mockIndexReporter) HighestIndexedHeight() (uint64, error) { + return math.MaxUint64, nil +} diff --git a/engine/access/index/events_index.go b/engine/access/index/events_index.go new file mode 100644 index 00000000000..bc669c79fb1 --- /dev/null +++ b/engine/access/index/events_index.go @@ -0,0 +1,75 @@ +package index + +import ( + "sort" + + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/storage" +) + +// EventsIndex implements a wrapper around `storage.Events` ensuring that needed data has been synced and is available to the client. +// Note: read detail how `Reporter` is working +type EventsIndex struct { + *Reporter + events storage.Events +} + +func NewEventsIndex(reporter *Reporter, events storage.Events) *EventsIndex { + return &EventsIndex{ + Reporter: reporter, + events: events, + } +} + +// ByBlockID checks data availability and returns events for a block +// Expected errors: +// - indexer.ErrIndexNotInitialized if the `EventsIndex` has not been initialized +// - storage.ErrHeightNotIndexed when data is unavailable +// - codes.NotFound if result cannot be provided by storage due to the absence of data. +func (e *EventsIndex) ByBlockID(blockID flow.Identifier, height uint64) ([]flow.Event, error) { + if err := e.checkDataAvailability(height); err != nil { + return nil, err + } + + events, err := e.events.ByBlockID(blockID) + if err != nil { + return nil, err + } + + // events are keyed/sorted by [blockID, txID, txIndex, eventIndex] + // we need to resort them by tx index then event index so the output is in execution order + sort.Slice(events, func(i, j int) bool { + if events[i].TransactionIndex == events[j].TransactionIndex { + return events[i].EventIndex < events[j].EventIndex + } + return events[i].TransactionIndex < events[j].TransactionIndex + }) + + return events, nil +} + +// ByBlockIDTransactionID checks data availability and return events for the given block ID and transaction ID +// Expected errors: +// - indexer.ErrIndexNotInitialized if the `EventsIndex` has not been initialized +// - storage.ErrHeightNotIndexed when data is unavailable +// - codes.NotFound if result cannot be provided by storage due to the absence of data. +func (e *EventsIndex) ByBlockIDTransactionID(blockID flow.Identifier, height uint64, transactionID flow.Identifier) ([]flow.Event, error) { + if err := e.checkDataAvailability(height); err != nil { + return nil, err + } + + return e.events.ByBlockIDTransactionID(blockID, transactionID) +} + +// ByBlockIDTransactionIndex checks data availability and return events for the transaction at given index in a given block +// Expected errors: +// - indexer.ErrIndexNotInitialized if the `EventsIndex` has not been initialized +// - storage.ErrHeightNotIndexed when data is unavailable +// - codes.NotFound if result cannot be provided by storage due to the absence of data. +func (e *EventsIndex) ByBlockIDTransactionIndex(blockID flow.Identifier, height uint64, txIndex uint32) ([]flow.Event, error) { + if err := e.checkDataAvailability(height); err != nil { + return nil, err + } + + return e.events.ByBlockIDTransactionIndex(blockID, txIndex) +} diff --git a/engine/access/index/reporter.go b/engine/access/index/reporter.go new file mode 100644 index 00000000000..9bc2f2a1371 --- /dev/null +++ b/engine/access/index/reporter.go @@ -0,0 +1,106 @@ +package index + +import ( + "fmt" + + "go.uber.org/atomic" + + "github.com/onflow/flow-go/module/state_synchronization/indexer" + "github.com/onflow/flow-go/storage" + + "github.com/onflow/flow-go/module/state_synchronization" +) + +var _ state_synchronization.IndexReporter = (*Reporter)(nil) + +// Reporter implements a wrapper around `IndexReporter` ensuring that needed data has been synced and is available to the client. +// Note: `Reporter` is created with empty reporter due to the next reasoning: +// When the index is initially bootstrapped, the indexer needs to load an execution state checkpoint from +// disk and index all the data. This process can take more than 1 hour on some systems. Consequently, the Initialize +// pattern is implemented to enable the Access API to start up and serve queries before the index is fully ready. During +// the initialization phase, all calls to retrieve data from this struct should return indexer.ErrIndexNotInitialized. +// The caller is responsible for handling this error appropriately for the method. +type Reporter struct { + reporter *atomic.Pointer[state_synchronization.IndexReporter] +} + +func NewReporter() *Reporter { + return &Reporter{ + reporter: atomic.NewPointer[state_synchronization.IndexReporter](nil), + } +} + +// Initialize replaces a previously non-initialized reporter. Can be called once. +// No errors are expected during normal operations. +func (s *Reporter) Initialize(indexReporter state_synchronization.IndexReporter) error { + if s.reporter.CompareAndSwap(nil, &indexReporter) { + return nil + } + return fmt.Errorf("index reporter already initialized") +} + +// LowestIndexedHeight returns the lowest height indexed by the execution state indexer. +// Expected errors: +// - indexer.ErrIndexNotInitialized if the IndexReporter has not been initialized +func (s *Reporter) LowestIndexedHeight() (uint64, error) { + reporter, err := s.getReporter() + if err != nil { + return 0, err + } + + return reporter.LowestIndexedHeight() +} + +// HighestIndexedHeight returns the highest height indexed by the execution state indexer. +// Expected errors: +// - indexer.ErrIndexNotInitialized if the IndexReporter has not been initialized +func (s *Reporter) HighestIndexedHeight() (uint64, error) { + reporter, err := s.getReporter() + if err != nil { + return 0, err + } + + return reporter.HighestIndexedHeight() +} + +// checkDataAvailability checks the availability of data at the given height by comparing it with the highest and lowest +// indexed heights. If the height is beyond the indexed range, an error is returned. +// Expected errors: +// - indexer.ErrIndexNotInitialized if the `IndexReporter` has not been initialized +// - storage.ErrHeightNotIndexed if the block at the provided height is not indexed yet +// - all other errors are unexpected +func (s *Reporter) checkDataAvailability(height uint64) error { + reporter, err := s.getReporter() + if err != nil { + return err + } + + highestHeight, err := reporter.HighestIndexedHeight() + if err != nil { + return fmt.Errorf("could not get highest indexed height: %w", err) + } + if height > highestHeight { + return fmt.Errorf("%w: block not indexed yet", storage.ErrHeightNotIndexed) + } + + lowestHeight, err := reporter.LowestIndexedHeight() + if err != nil { + return fmt.Errorf("could not get lowest indexed height: %w", err) + } + if height < lowestHeight { + return fmt.Errorf("%w: block is before lowest indexed height", storage.ErrHeightNotIndexed) + } + + return nil +} + +// getReporter retrieves the current index reporter instance from the atomic pointer. +// Expected errors: +// - indexer.ErrIndexNotInitialized if the reporter is not initialized +func (s *Reporter) getReporter() (state_synchronization.IndexReporter, error) { + reporter := s.reporter.Load() + if reporter == nil { + return nil, indexer.ErrIndexNotInitialized + } + return *reporter, nil +} diff --git a/engine/access/index/transaction_results_indexer.go b/engine/access/index/transaction_results_indexer.go new file mode 100644 index 00000000000..62f980e78fc --- /dev/null +++ b/engine/access/index/transaction_results_indexer.go @@ -0,0 +1,59 @@ +package index + +import ( + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/storage" +) + +// TransactionResultsIndex implements a wrapper around `storage.LightTransactionResult` ensuring that needed data has been synced and is available to the client. +// Note: read detail how `Reporter` is working +type TransactionResultsIndex struct { + *Reporter + results storage.LightTransactionResults +} + +func NewTransactionResultsIndex(reporter *Reporter, results storage.LightTransactionResults) *TransactionResultsIndex { + return &TransactionResultsIndex{ + Reporter: reporter, + results: results, + } +} + +// ByBlockID checks data availability and returns all transaction results for a block +// Expected errors: +// - indexer.ErrIndexNotInitialized if the `TransactionResultsIndex` has not been initialized +// - storage.ErrHeightNotIndexed when data is unavailable +// - codes.NotFound if result cannot be provided by storage due to the absence of data. +func (t *TransactionResultsIndex) ByBlockID(blockID flow.Identifier, height uint64) ([]flow.LightTransactionResult, error) { + if err := t.checkDataAvailability(height); err != nil { + return nil, err + } + + return t.results.ByBlockID(blockID) +} + +// ByBlockIDTransactionID checks data availability and return the transaction result for the given block ID and transaction ID +// Expected errors: +// - indexer.ErrIndexNotInitialized if the `TransactionResultsIndex` has not been initialized +// - storage.ErrHeightNotIndexed when data is unavailable +// - codes.NotFound if result cannot be provided by storage due to the absence of data. +func (t *TransactionResultsIndex) ByBlockIDTransactionID(blockID flow.Identifier, height uint64, txID flow.Identifier) (*flow.LightTransactionResult, error) { + if err := t.checkDataAvailability(height); err != nil { + return nil, err + } + + return t.results.ByBlockIDTransactionID(blockID, txID) +} + +// ByBlockIDTransactionIndex checks data availability and return the transaction result for the given blockID and transaction index +// Expected errors: +// - indexer.ErrIndexNotInitialized if the `TransactionResultsIndex` has not been initialized +// - storage.ErrHeightNotIndexed when data is unavailable +// - codes.NotFound when result cannot be provided by storage due to the absence of data. +func (t *TransactionResultsIndex) ByBlockIDTransactionIndex(blockID flow.Identifier, height uint64, index uint32) (*flow.LightTransactionResult, error) { + if err := t.checkDataAvailability(height); err != nil { + return nil, err + } + + return t.results.ByBlockIDTransactionIndex(blockID, index) +} diff --git a/engine/access/ingestion/collection_syncer.go b/engine/access/ingestion/collection_syncer.go new file mode 100644 index 00000000000..7e0375abcff --- /dev/null +++ b/engine/access/ingestion/collection_syncer.go @@ -0,0 +1,411 @@ +package ingestion + +import ( + "context" + "errors" + "fmt" + "time" + + "github.com/rs/zerolog" + + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/model/flow/filter" + "github.com/onflow/flow-go/module" + "github.com/onflow/flow-go/module/component" + "github.com/onflow/flow-go/module/counters" + "github.com/onflow/flow-go/module/irrecoverable" + "github.com/onflow/flow-go/module/state_synchronization/indexer" + "github.com/onflow/flow-go/state/protocol" + "github.com/onflow/flow-go/storage" +) + +var ( + defaultMissingCollsForBlockThreshold = missingCollsForBlockThreshold + defaultMissingCollsForAgeThreshold uint64 = missingCollsForAgeThreshold +) + +// The CollectionSyncer type provides mechanisms for syncing and indexing data +// from the Flow blockchain into local storage. Specifically, it handles +// the retrieval and processing of collections and transactions that may +// have been missed due to network delays, restarts, or gaps in finalization. +// +// It is responsible for ensuring the local node has +// all collections associated with finalized blocks starting from the +// last fully synced height. It works by periodically scanning the finalized +// block range, identifying missing collections, and triggering requests +// to fetch them from the network. Once collections are retrieved, it +// ensures they are persisted in the local collection and transaction stores. +// +// The syncer maintains a persistent, strictly monotonic counter +// (`lastFullBlockHeight`) to track the highest finalized block for which +// all collections have been fully indexed. It uses this information to +// avoid redundant processing and to measure catch-up progress. +// +// It is meant to operate in a background goroutine as part of the +// node's ingestion pipeline. +type CollectionSyncer struct { + logger zerolog.Logger + collectionExecutedMetric module.CollectionExecutedMetric + + state protocol.State + requester module.Requester + + blocks storage.Blocks + collections storage.Collections + transactions storage.Transactions + + lastFullBlockHeight *counters.PersistentStrictMonotonicCounter + lockManager storage.LockManager +} + +// NewCollectionSyncer creates a new CollectionSyncer responsible for requesting, +// tracking, and indexing missing collections. +func NewCollectionSyncer( + logger zerolog.Logger, + collectionExecutedMetric module.CollectionExecutedMetric, + requester module.Requester, + state protocol.State, + blocks storage.Blocks, + collections storage.Collections, + transactions storage.Transactions, + lastFullBlockHeight *counters.PersistentStrictMonotonicCounter, + lockManager storage.LockManager, +) *CollectionSyncer { + collectionExecutedMetric.UpdateLastFullBlockHeight(lastFullBlockHeight.Value()) + + return &CollectionSyncer{ + logger: logger, + state: state, + requester: requester, + blocks: blocks, + collections: collections, + transactions: transactions, + lastFullBlockHeight: lastFullBlockHeight, + collectionExecutedMetric: collectionExecutedMetric, + lockManager: lockManager, + } +} + +// RequestCollections continuously monitors and triggers collection sync operations. +// It handles on startup collection catchup, periodic missing collection requests, and full block height updates. +func (s *CollectionSyncer) RequestCollections(ctx irrecoverable.SignalerContext, ready component.ReadyFunc) { + requestCtx, cancel := context.WithTimeout(ctx, collectionCatchupTimeout) + defer cancel() + + // on start-up, AN wants to download all missing collections to serve it to end users + err := s.requestMissingCollectionsBlocking(requestCtx) + if err != nil { + s.logger.Error().Err(err).Msg("error downloading missing collections") + } + ready() + + requestCollectionsTicker := time.NewTicker(missingCollsRequestInterval) + defer requestCollectionsTicker.Stop() + + // Collections are requested concurrently in this design. + // To maintain accurate progress tracking and avoid redundant requests, + // we periodically update the `lastFullBlockHeight` to reflect the latest + // finalized block with all collections successfully indexed. + updateLastFullBlockHeightTicker := time.NewTicker(fullBlockRefreshInterval) + defer updateLastFullBlockHeightTicker.Stop() + + for { + select { + case <-ctx.Done(): + return + + case <-requestCollectionsTicker.C: + err := s.requestMissingCollections() + if err != nil { + ctx.Throw(err) + } + + case <-updateLastFullBlockHeightTicker.C: + err := s.updateLastFullBlockHeight() + if err != nil { + ctx.Throw(err) + } + } + } +} + +// requestMissingCollections checks if missing collections should be requested based on configured +// block or age thresholds and triggers requests if needed. +// +// No errors are expected during normal operations. +func (s *CollectionSyncer) requestMissingCollections() error { + lastFullBlockHeight := s.lastFullBlockHeight.Value() + lastFinalizedBlock, err := s.state.Final().Head() + if err != nil { + return fmt.Errorf("failed to get finalized block: %w", err) + } + + collections, incompleteBlocksCount, err := s.findMissingCollections(lastFullBlockHeight) + if err != nil { + return err + } + + blocksThresholdReached := incompleteBlocksCount >= defaultMissingCollsForBlockThreshold + ageThresholdReached := lastFinalizedBlock.Height-lastFullBlockHeight > defaultMissingCollsForAgeThreshold + shouldRequest := blocksThresholdReached || ageThresholdReached + + if shouldRequest { + // warn log since generally this should not happen + s.logger.Warn(). + Uint64("finalized_height", lastFinalizedBlock.Height). + Uint64("last_full_blk_height", lastFullBlockHeight). + Int("missing_collection_blk_count", incompleteBlocksCount). + Int("missing_collection_count", len(collections)). + Msg("re-requesting missing collections") + + s.requestCollections(collections) + } + + return nil +} + +// requestMissingCollectionsBlocking requests and waits for all missing collections to be downloaded, +// blocking until either completion or context timeout. +// +// No errors are expected during normal operations. +func (s *CollectionSyncer) requestMissingCollectionsBlocking(ctx context.Context) error { + missingCollections, _, err := s.findMissingCollections(s.lastFullBlockHeight.Value()) + if err != nil { + return err + } + if len(missingCollections) == 0 { + s.logger.Info().Msg("skipping requesting missing collections. no missing collections found") + return nil + } + + s.requestCollections(missingCollections) + + collectionsToBeDownloaded := make(map[flow.Identifier]struct{}) + for _, collection := range missingCollections { + collectionsToBeDownloaded[collection.CollectionID] = struct{}{} + } + + collectionStoragePollTicker := time.NewTicker(collectionCatchupDBPollInterval) + defer collectionStoragePollTicker.Stop() + + // we want to wait for all collections to be downloaded so we poll local storage periodically to make sure each + // collection was successfully saved in the storage. + for len(collectionsToBeDownloaded) > 0 { + select { + case <-ctx.Done(): + return fmt.Errorf("failed to complete collection retrieval: %w", ctx.Err()) + + case <-collectionStoragePollTicker.C: + s.logger.Info(). + Int("total_missing_collections", len(collectionsToBeDownloaded)). + Msg("retrieving missing collections...") + + for collectionID := range collectionsToBeDownloaded { + downloaded, err := s.isCollectionInStorage(collectionID) + if err != nil { + return err + } + + if downloaded { + delete(collectionsToBeDownloaded, collectionID) + } + } + } + } + + s.logger.Info().Msg("collection catchup done") + return nil +} + +// findMissingCollections scans block heights from last known full block up to the latest finalized +// block and returns all missing collection along with the count of incomplete blocks. +// +// No errors are expected during normal operations. +func (s *CollectionSyncer) findMissingCollections(lastFullBlockHeight uint64) ([]*flow.CollectionGuarantee, int, error) { + // first block to look up collections at + firstBlockHeight := lastFullBlockHeight + 1 + + lastFinalizedBlock, err := s.state.Final().Head() + if err != nil { + return nil, 0, fmt.Errorf("failed to get finalized block: %w", err) + } + // last block to look up collections at + lastBlockHeight := lastFinalizedBlock.Height + + var missingCollections []*flow.CollectionGuarantee + var incompleteBlocksCount int + + for currBlockHeight := firstBlockHeight; currBlockHeight <= lastBlockHeight; currBlockHeight++ { + collections, err := s.findMissingCollectionsAtHeight(currBlockHeight) + if err != nil { + return nil, 0, err + } + + if len(collections) == 0 { + continue + } + + missingCollections = append(missingCollections, collections...) + incompleteBlocksCount += 1 + } + + return missingCollections, incompleteBlocksCount, nil +} + +// findMissingCollectionsAtHeight returns all missing collections for a specific block height. +// +// No errors are expected during normal operations. +func (s *CollectionSyncer) findMissingCollectionsAtHeight(height uint64) ([]*flow.CollectionGuarantee, error) { + block, err := s.blocks.ByHeight(height) + if err != nil { + return nil, fmt.Errorf("failed to retrieve block by height %d: %w", height, err) + } + + var missingCollections []*flow.CollectionGuarantee + for _, guarantee := range block.Payload.Guarantees { + inStorage, err := s.isCollectionInStorage(guarantee.CollectionID) + if err != nil { + return nil, err + } + + if !inStorage { + missingCollections = append(missingCollections, guarantee) + } + } + + return missingCollections, nil +} + +// isCollectionInStorage checks whether the given collection is present in local storage. +// +// No errors are expected during normal operations. +func (s *CollectionSyncer) isCollectionInStorage(collectionID flow.Identifier) (bool, error) { + _, err := s.collections.LightByID(collectionID) + if err == nil { + return true, nil + } + + if errors.Is(err, storage.ErrNotFound) { + return false, nil + } + + return false, fmt.Errorf("failed to retrieve collection %s: %w", collectionID.String(), err) +} + +// RequestCollectionsForBlock conditionally requests missing collections for a specific block height, +// skipping requests if the block is already below the known full block height. +func (s *CollectionSyncer) RequestCollectionsForBlock(height uint64, missingCollections []*flow.CollectionGuarantee) { + // skip requesting collections, if this block is below the last full block height. + // this means that either we have already received these collections, or the block + // may contain unverifiable guarantees (in case this node has just joined the network) + if height <= s.lastFullBlockHeight.Value() { + s.logger.Debug(). + Msg("skipping requesting collections for finalized block as its collections have been already retrieved") + return + } + + s.requestCollections(missingCollections) +} + +// requestCollections registers collection download requests in the requester engine and +// causes the requester to immediately dispatch requests. +func (s *CollectionSyncer) requestCollections(collections []*flow.CollectionGuarantee) { + for _, guarantee := range collections { + guarantors, err := protocol.FindGuarantors(s.state, guarantee) + if err != nil { + // failed to find guarantors for guarantees contained in a finalized block is fatal error + s.logger.Fatal().Err(err).Msgf("could not find guarantors for collection %v", guarantee.CollectionID) + } + s.requester.EntityByID(guarantee.CollectionID, filter.HasNodeID[flow.Identity](guarantors...)) + } + + if len(collections) > 0 { + s.requester.Force() + } +} + +// updateLastFullBlockHeight updates the next highest block height where all previous collections have been indexed. +// +// No errors are expected during normal operations. +func (s *CollectionSyncer) updateLastFullBlockHeight() error { + lastFullBlockHeight := s.lastFullBlockHeight.Value() + lastFinalizedBlock, err := s.state.Final().Head() + if err != nil { + return fmt.Errorf("failed to get finalized block: %w", err) + } + + // track the latest contiguous full height + newLastFullBlockHeight, err := s.findLowestBlockHeightWithMissingCollections(lastFullBlockHeight, lastFinalizedBlock.Height) + if err != nil { + return fmt.Errorf("failed to find last full block height: %w", err) + } + + // if more contiguous blocks are now complete, update db + if newLastFullBlockHeight > lastFullBlockHeight { + err := s.lastFullBlockHeight.Set(newLastFullBlockHeight) + if err != nil { + return fmt.Errorf("failed to update last full block height: %w", err) + } + + s.collectionExecutedMetric.UpdateLastFullBlockHeight(newLastFullBlockHeight) + + s.logger.Debug(). + Uint64("last_full_block_height", newLastFullBlockHeight). + Msg("updated last full block height counter") + } + + return nil +} + +// findLowestBlockHeightWithMissingCollections finds the next block height with missing collections, +// returning the latest contiguous height where all collections are present. +// +// No errors are expected during normal operations. +func (s *CollectionSyncer) findLowestBlockHeightWithMissingCollections( + lastKnownFullBlockHeight uint64, + finalizedBlockHeight uint64, +) (uint64, error) { + newLastFullBlockHeight := lastKnownFullBlockHeight + + for currBlockHeight := lastKnownFullBlockHeight + 1; currBlockHeight <= finalizedBlockHeight; currBlockHeight++ { + missingCollections, err := s.findMissingCollectionsAtHeight(currBlockHeight) + if err != nil { + return 0, err + } + + // return when we find the first block with missing collections + if len(missingCollections) > 0 { + return newLastFullBlockHeight, nil + } + + newLastFullBlockHeight = currBlockHeight + } + + return newLastFullBlockHeight, nil +} + +// OnCollectionDownloaded indexes and persists a downloaded collection. +// This is a callback intended to be used with the requester engine. +func (s *CollectionSyncer) OnCollectionDownloaded(_ flow.Identifier, entity flow.Entity) { + collection, ok := entity.(*flow.Collection) + if !ok { + s.logger.Error().Msgf("invalid entity type (%T)", entity) + return + } + + // Create a lock context for indexing + lctx := s.lockManager.NewContext() + defer lctx.Release() + err := lctx.AcquireLock(storage.LockInsertCollection) + if err != nil { + // TODO(leo): should be using irrecoverable.Context + s.logger.Fatal().Err(err).Msg("could not acquire lock for collection indexing") + return + } + + err = indexer.IndexCollection(lctx, collection, s.collections, s.logger, s.collectionExecutedMetric) + if err != nil { + s.logger.Error().Err(err).Msg("could not index collection after it has been downloaded") + return + } +} diff --git a/engine/access/ingestion/engine.go b/engine/access/ingestion/engine.go index a4bc7ecb624..d544b3effd8 100644 --- a/engine/access/ingestion/engine.go +++ b/engine/access/ingestion/engine.go @@ -1,10 +1,7 @@ -// (c) 2019 Dapper Labs - ALL RIGHTS RESERVED - package ingestion import ( "context" - "errors" "fmt" "time" @@ -12,95 +9,105 @@ import ( "github.com/onflow/flow-go/consensus/hotstuff/model" "github.com/onflow/flow-go/engine" + "github.com/onflow/flow-go/engine/access/ingestion/tx_error_messages" "github.com/onflow/flow-go/engine/common/fifoqueue" "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/model/flow/filter" "github.com/onflow/flow-go/module" "github.com/onflow/flow-go/module/component" "github.com/onflow/flow-go/module/irrecoverable" - "github.com/onflow/flow-go/module/mempool/stdmap" + "github.com/onflow/flow-go/module/jobqueue" + "github.com/onflow/flow-go/module/util" "github.com/onflow/flow-go/network" "github.com/onflow/flow-go/network/channels" "github.com/onflow/flow-go/state/protocol" "github.com/onflow/flow-go/storage" - "github.com/onflow/flow-go/utils/logging" ) -// time to wait for the all the missing collections to be received at node startup -const collectionCatchupTimeout = 30 * time.Second +const ( + // time to wait for the all the missing collections to be received at node startup + collectionCatchupTimeout = 30 * time.Second + + // time to poll the storage to check if missing collections have been received + collectionCatchupDBPollInterval = 10 * time.Millisecond -// time to poll the storage to check if missing collections have been received -const collectionCatchupDBPollInterval = 10 * time.Millisecond + // time to update the FullBlockHeight index + fullBlockRefreshInterval = 1 * time.Second -// time to update the FullBlockHeight index -const fullBlockUpdateInterval = 1 * time.Minute + // time to request missing collections from the network + missingCollsRequestInterval = 1 * time.Minute -// a threshold of number of blocks with missing collections beyond which collections should be re-requested -// this is to prevent spamming the collection nodes with request -const missingCollsForBlkThreshold = 100 + // a threshold of number of blocks with missing collections beyond which collections should be re-requested + // this is to prevent spamming the collection nodes with request + missingCollsForBlockThreshold = 100 -// a threshold of block height beyond which collections should be re-requested (regardless of the number of blocks for which collection are missing) -// this is to ensure that if a collection is missing for a long time (in terms of block height) it is eventually re-requested -const missingCollsForAgeThreshold = 100 + // a threshold of block height beyond which collections should be re-requested (regardless of the number of blocks for which collection are missing) + // this is to ensure that if a collection is missing for a long time (in terms of block height) it is eventually re-requested + missingCollsForAgeThreshold = 100 -// default queue capacity -const defaultQueueCapacity = 10_000 + // default queue capacity + defaultQueueCapacity = 10_000 -var defaultCollectionCatchupTimeout = collectionCatchupTimeout -var defaultCollectionCatchupDBPollInterval = collectionCatchupDBPollInterval -var defaultFullBlockUpdateInterval = fullBlockUpdateInterval -var defaultMissingCollsForBlkThreshold = missingCollsForBlkThreshold -var defaultMissingCollsForAgeThreshold = missingCollsForAgeThreshold + // processFinalizedBlocksWorkersCount defines the number of workers that + // concurrently process finalized blocks in the job queue. + processFinalizedBlocksWorkersCount = 1 + + // ensure blocks are processed sequentially by jobqueue + searchAhead = 1 +) // Engine represents the ingestion engine, used to funnel data from other nodes // to a centralized location that can be queried by a user +// +// No errors are expected during normal operation. type Engine struct { *component.ComponentManager messageHandler *engine.MessageHandler executionReceiptsNotifier engine.Notifier executionReceiptsQueue engine.MessageStore - finalizedBlockNotifier engine.Notifier - finalizedBlockQueue engine.MessageStore + // Job queue + finalizedBlockConsumer *jobqueue.ComponentConsumer + // Notifier for queue consumer + finalizedBlockNotifier engine.Notifier + + // txResultErrorMessagesChan is used to fetch and store transaction result error messages for blocks + txResultErrorMessagesChan chan flow.Identifier - log zerolog.Logger // used to log relevant actions with context - state protocol.State // used to access the protocol state - me module.Local // used to access local node information - request module.Requester // used to request collections + log zerolog.Logger // used to log relevant actions with context + state protocol.State // used to access the protocol state + me module.Local // used to access local node information // storage // FIX: remove direct DB access by substituting indexer module blocks storage.Blocks - headers storage.Headers - collections storage.Collections - transactions storage.Transactions executionReceipts storage.ExecutionReceipts maxReceiptHeight uint64 executionResults storage.ExecutionResults - // metrics - transactionMetrics module.TransactionMetrics - collectionsToMarkFinalized *stdmap.Times - collectionsToMarkExecuted *stdmap.Times - blocksToMarkExecuted *stdmap.Times + collectionSyncer *CollectionSyncer + // TODO: There's still a need for this metric to be in the ingestion engine rather than collection syncer. + // Maybe it is a good idea to split it up? + collectionExecutedMetric module.CollectionExecutedMetric + + txErrorMessagesCore *tx_error_messages.TxErrorMessagesCore } +var _ network.MessageProcessor = (*Engine)(nil) + // New creates a new access ingestion engine +// +// No errors are expected during normal operation. func New( log zerolog.Logger, - net network.Network, + net network.EngineRegistry, state protocol.State, me module.Local, - request module.Requester, blocks storage.Blocks, - headers storage.Headers, - collections storage.Collections, - transactions storage.Transactions, executionResults storage.ExecutionResults, executionReceipts storage.ExecutionReceipts, - transactionMetrics module.TransactionMetrics, - collectionsToMarkFinalized *stdmap.Times, - collectionsToMarkExecuted *stdmap.Times, - blocksToMarkExecuted *stdmap.Times, + finalizedProcessedHeight storage.ConsumerProgressInitializer, + collectionSyncer *CollectionSyncer, + collectionExecutedMetric module.CollectionExecutedMetric, + txErrorMessagesCore *tx_error_messages.TxErrorMessagesCore, ) (*Engine, error) { executionReceiptsRawQueue, err := fifoqueue.NewFifoQueue(defaultQueueCapacity) if err != nil { @@ -109,23 +116,9 @@ func New( executionReceiptsQueue := &engine.FifoMessageStore{FifoQueue: executionReceiptsRawQueue} - finalizedBlocksRawQueue, err := fifoqueue.NewFifoQueue(defaultQueueCapacity) - if err != nil { - return nil, fmt.Errorf("could not create finalized block queue: %w", err) - } - - finalizedBlocksQueue := &engine.FifoMessageStore{FifoQueue: finalizedBlocksRawQueue} - messageHandler := engine.NewMessageHandler( log, engine.NewNotifier(), - engine.Pattern{ - Match: func(msg *engine.Message) bool { - _, ok := msg.Payload.(*model.Block) - return ok - }, - Store: finalizedBlocksQueue, - }, engine.Pattern{ Match: func(msg *engine.Message) bool { _, ok := msg.Payload.(*flow.ExecutionReceipt) @@ -137,39 +130,67 @@ func New( // initialize the propagation engine with its dependencies e := &Engine{ - log: log.With().Str("engine", "ingestion").Logger(), - state: state, - me: me, - request: request, - blocks: blocks, - headers: headers, - collections: collections, - transactions: transactions, - executionResults: executionResults, - executionReceipts: executionReceipts, - maxReceiptHeight: 0, - transactionMetrics: transactionMetrics, - collectionsToMarkFinalized: collectionsToMarkFinalized, - collectionsToMarkExecuted: collectionsToMarkExecuted, - blocksToMarkExecuted: blocksToMarkExecuted, + log: log.With().Str("engine", "ingestion").Logger(), + state: state, + me: me, + blocks: blocks, + executionResults: executionResults, + executionReceipts: executionReceipts, + maxReceiptHeight: 0, + collectionExecutedMetric: collectionExecutedMetric, + finalizedBlockNotifier: engine.NewNotifier(), // queue / notifier for execution receipts executionReceiptsNotifier: engine.NewNotifier(), + txResultErrorMessagesChan: make(chan flow.Identifier, 1), executionReceiptsQueue: executionReceiptsQueue, + messageHandler: messageHandler, + txErrorMessagesCore: txErrorMessagesCore, + collectionSyncer: collectionSyncer, + } - // queue / notifier for finalized blocks - finalizedBlockNotifier: engine.NewNotifier(), - finalizedBlockQueue: finalizedBlocksQueue, + // jobqueue Jobs object that tracks finalized blocks by height. This is used by the finalizedBlockConsumer + // to get a sequential list of finalized blocks. + finalizedBlockReader := jobqueue.NewFinalizedBlockReader(state, blocks) - messageHandler: messageHandler, + defaultIndex, err := e.defaultProcessedIndex() + if err != nil { + return nil, fmt.Errorf("could not read default finalized processed index: %w", err) + } + + // create a jobqueue that will process new available finalized block. The `finalizedBlockNotifier` is used to + // signal new work, which is being triggered on the `processFinalizedBlockJob` handler. + e.finalizedBlockConsumer, err = jobqueue.NewComponentConsumer( + e.log.With().Str("module", "ingestion_block_consumer").Logger(), + e.finalizedBlockNotifier.Channel(), + finalizedProcessedHeight, + finalizedBlockReader, + defaultIndex, + e.processFinalizedBlockJob, + processFinalizedBlocksWorkersCount, + searchAhead, + ) + if err != nil { + return nil, fmt.Errorf("error creating finalizedBlock jobqueue: %w", err) } // Add workers - e.ComponentManager = component.NewComponentManagerBuilder(). - AddWorker(e.processBackground). + builder := component.NewComponentManagerBuilder(). + AddWorker(e.collectionSyncer.RequestCollections). AddWorker(e.processExecutionReceipts). - AddWorker(e.processFinalizedBlocks). - Build() + AddWorker(e.runFinalizedBlockConsumer) + + //TODO: should I add a check for nil ptr for collection syncer ? (as done below) + + // If txErrorMessagesCore is provided, add a worker responsible for processing + // transaction result error messages by receipts. This worker listens for blocks + // containing execution receipts and processes any associated transaction result + // error messages. The worker is added only when error message processing is enabled. + if txErrorMessagesCore != nil { + builder.AddWorker(e.processTransactionResultErrorMessagesByReceipts) + } + + e.ComponentManager = builder.Build() // register engine with the execution receipt provider _, err = net.Register(channels.ReceiveReceipts, e) @@ -180,57 +201,51 @@ func New( return e, nil } -func (e *Engine) Start(parent irrecoverable.SignalerContext) { - err := e.initLastFullBlockHeightIndex() +// defaultProcessedIndex returns the last finalized block height from the protocol state. +// +// The finalizedBlockConsumer utilizes this return height to fetch and consume block jobs from +// jobs queue the first time it initializes. +// +// No errors are expected during normal operation. +func (e *Engine) defaultProcessedIndex() (uint64, error) { + final, err := e.state.Final().Head() if err != nil { - parent.Throw(fmt.Errorf("unexpected error initializing full block index: %w", err)) + return 0, fmt.Errorf("could not get finalized height: %w", err) } - - e.ComponentManager.Start(parent) + return final.Height, nil } -// initializeLastFullBlockHeightIndex initializes the index of full blocks -// (blocks for which we have ingested all collections) to the root block height. -// This means that the Access Node will ingest all collections for all blocks -// ingested after state bootstrapping is complete (all blocks received from the network). -// If the index has already been initialized, this is a no-op. -// No errors are expected during normal operation. -func (e *Engine) initLastFullBlockHeightIndex() error { - rootBlock, err := e.state.Params().Root() - if err != nil { - return fmt.Errorf("failed to get root block: %w", err) - } - err = e.blocks.InsertLastFullBlockHeightIfNotExists(rootBlock.Height) - if err != nil { - return fmt.Errorf("failed to update last full block height during ingestion engine startup: %w", err) +// runFinalizedBlockConsumer runs the finalizedBlockConsumer component +func (e *Engine) runFinalizedBlockConsumer(ctx irrecoverable.SignalerContext, ready component.ReadyFunc) { + e.finalizedBlockConsumer.Start(ctx) + + err := util.WaitClosed(ctx, e.finalizedBlockConsumer.Ready()) + if err == nil { + ready() } - return nil + <-e.finalizedBlockConsumer.Done() } -func (e *Engine) processBackground(ctx irrecoverable.SignalerContext, ready component.ReadyFunc) { - // context with timeout - requestCtx, cancel := context.WithTimeout(ctx, defaultCollectionCatchupTimeout) - defer cancel() - - // request missing collections - err := e.requestMissingCollections(requestCtx) +// processFinalizedBlockJob is a handler function for processing finalized block jobs. +// It converts the job to a block, processes the block, and logs any errors encountered during processing. +func (e *Engine) processFinalizedBlockJob(ctx irrecoverable.SignalerContext, job module.Job, done func()) { + block, err := jobqueue.JobToBlock(job) if err != nil { - e.log.Error().Err(err).Msg("requesting missing collections failed") + ctx.Throw(fmt.Errorf("failed to convert job to block: %w", err)) } - ready() - ticker := time.NewTicker(defaultFullBlockUpdateInterval) - for { - select { - case <-ctx.Done(): - return - case <-ticker.C: - e.updateLastFullBlockReceivedIndex() - } + err = e.processFinalizedBlock(block) + if err == nil { + done() + return } + + e.log.Error().Err(err).Str("job_id", string(job.ID())).Msg("error during finalized block processing job") } +// processExecutionReceipts is responsible for processing the execution receipts. +// It listens for incoming execution receipts and processes them asynchronously. func (e *Engine) processExecutionReceipts(ctx irrecoverable.SignalerContext, ready component.ReadyFunc) { ready() notifier := e.executionReceiptsNotifier.Channel() @@ -250,6 +265,10 @@ func (e *Engine) processExecutionReceipts(ctx irrecoverable.SignalerContext, rea } } +// processAvailableExecutionReceipts processes available execution receipts in the queue and handles it. +// It continues processing until the context is canceled. +// +// No errors are expected during normal operation. func (e *Engine) processAvailableExecutionReceipts(ctx context.Context) error { for { select { @@ -267,46 +286,42 @@ func (e *Engine) processAvailableExecutionReceipts(ctx context.Context) error { if err := e.handleExecutionReceipt(msg.OriginID, receipt); err != nil { return err } - } + // Notify to fetch and store transaction result error messages for the block. + // If txErrorMessagesCore is enabled, the receipt's BlockID is sent to trigger + // transaction error message processing. This step is skipped if error message + // storage is not enabled. + if e.txErrorMessagesCore != nil { + e.txResultErrorMessagesChan <- receipt.BlockID + } + } } -func (e *Engine) processFinalizedBlocks(ctx irrecoverable.SignalerContext, ready component.ReadyFunc) { +// processTransactionResultErrorMessagesByReceipts handles error messages related to transaction +// results by reading from the error messages channel and processing them accordingly. +// +// This function listens for messages on the txResultErrorMessagesChan channel and +// processes each transaction result error message as it arrives. +// +// No errors are expected during normal operation. +func (e *Engine) processTransactionResultErrorMessagesByReceipts(ctx irrecoverable.SignalerContext, ready component.ReadyFunc) { ready() - notifier := e.finalizedBlockNotifier.Channel() for { select { case <-ctx.Done(): return - case <-notifier: - _ = e.processAvailableFinalizedBlocks(ctx) - } - } -} - -func (e *Engine) processAvailableFinalizedBlocks(ctx context.Context) error { - for { - select { - case <-ctx.Done(): - return nil - default: - } - - msg, ok := e.finalizedBlockQueue.Get() - if !ok { - return nil - } - - hb := msg.Payload.(*model.Block) - blockID := hb.BlockID - - if err := e.processFinalizedBlock(blockID); err != nil { - e.log.Error().Err(err).Hex("block_id", blockID[:]).Msg("failed to process block") - continue + case blockID := <-e.txResultErrorMessagesChan: + err := e.txErrorMessagesCore.FetchErrorMessages(ctx, blockID) + if err != nil { + // TODO: we should revisit error handling here. + // Errors that come from querying the EN and possibly ExecutionNodesForBlockID should be logged and + // retried later, while others should cause an exception. + e.log.Error(). + Err(err). + Msg("error encountered while processing transaction result error messages by receipts") + } } - - e.trackFinalizedMetricForBlock(hb) } } @@ -325,65 +340,39 @@ func (e *Engine) process(originID flow.Identifier, event interface{}) error { err := e.messageHandler.Process(originID, event) e.executionReceiptsNotifier.Notify() return err - case *model.Block: - err := e.messageHandler.Process(originID, event) - e.finalizedBlockNotifier.Notify() - return err default: return fmt.Errorf("invalid event type (%T)", event) } } -// SubmitLocal submits an event originating on the local node. -func (e *Engine) SubmitLocal(event interface{}) { - err := e.process(e.me.NodeID(), event) - if err != nil { - engine.LogError(e.log, err) - } -} - -// Submit submits the given event from the node with the given origin ID -// for processing in a non-blocking manner. It returns instantly and logs -// a potential processing error internally when done. -func (e *Engine) Submit(channel channels.Channel, originID flow.Identifier, event interface{}) { - err := e.process(originID, event) - if err != nil { - engine.LogError(e.log, err) - } -} - -// ProcessLocal processes an event originating on the local node. -func (e *Engine) ProcessLocal(event interface{}) error { - return e.process(e.me.NodeID(), event) -} - // Process processes the given event from the node with the given origin ID in // a blocking manner. It returns the potential processing error when done. -func (e *Engine) Process(channel channels.Channel, originID flow.Identifier, event interface{}) error { +func (e *Engine) Process(_ channels.Channel, originID flow.Identifier, event interface{}) error { return e.process(originID, event) } -// OnFinalizedBlock is called by the follower engine after a block has been finalized and the state has been updated -func (e *Engine) OnFinalizedBlock(hb *model.Block) { - _ = e.ProcessLocal(hb) +// OnFinalizedBlock is called by the follower engine after a block has been finalized and the state has been updated. +// Receives block finalized events from the finalization distributor and forwards them to the finalizedBlockConsumer. +func (e *Engine) OnFinalizedBlock(*model.Block) { + e.finalizedBlockNotifier.Notify() } -// processBlock handles an incoming finalized block. -func (e *Engine) processFinalizedBlock(blockID flow.Identifier) error { - - // TODO: consider using storage.Index.ByBlockID, the index contains collection id and seals ID - block, err := e.blocks.ByID(blockID) - if err != nil { - return fmt.Errorf("failed to lookup block: %w", err) - } - +// processFinalizedBlock handles an incoming finalized block. +// It processes the block, indexes it for further processing, and requests missing collections if necessary. +// +// Expected errors during normal operation: +// - storage.ErrNotFound - if last full block height does not exist in the database. +// - storage.ErrAlreadyExists - if the collection within block or an execution result ID already exists in the database. +// - generic error in case of unexpected failure from the database layer, or failure +// to decode an existing database value. +func (e *Engine) processFinalizedBlock(block *flow.Block) error { // FIX: we can't index guarantees here, as we might have more than one block // with the same collection as long as it is not finalized // TODO: substitute an indexer module as layer between engine and storage // index the block storage with each of the collection guarantee - err = e.blocks.IndexBlockForCollections(block.Header.ID(), flow.GetIDs(block.Payload.Guarantees)) + err := e.blocks.IndexBlockContainingCollectionGuarantees(block.ID(), flow.GetIDs(block.Payload.Guarantees)) if err != nil { return fmt.Errorf("could not index block for collections: %w", err) } @@ -396,415 +385,23 @@ func (e *Engine) processFinalizedBlock(blockID flow.Identifier) error { } } - // skip requesting collections, if this block is below the last full block height - // this means that either we have already received these collections, or the block - // may contain unverifiable guarantees (in case this node has just joined the network) - lastFullBlockHeight, err := e.blocks.GetLastFullBlockHeight() - if err != nil { - return fmt.Errorf("could not get last full block height: %w", err) - } - - if block.Header.Height <= lastFullBlockHeight { - e.log.Info().Msgf("skipping requesting collections for finalized block below last full block height (%d<=%d)", block.Header.Height, lastFullBlockHeight) - return nil - } - - // queue requesting each of the collections from the collection node - e.requestCollectionsInFinalizedBlock(block.Payload.Guarantees) + e.collectionSyncer.RequestCollectionsForBlock(block.Height, block.Payload.Guarantees) + e.collectionExecutedMetric.BlockFinalized(block) return nil } -func (e *Engine) trackFinalizedMetricForBlock(hb *model.Block) { - // TODO: consider using storage.Index.ByBlockID, the index contains collection id and seals ID - // retrieve the block - block, err := e.blocks.ByID(hb.BlockID) - if err != nil { - e.log.Warn().Err(err).Msg("could not track tx finalized metric: finalized block not found locally") - return - } - - // TODO lookup actual finalization time by looking at the block finalizing `b` - now := time.Now().UTC() - - // mark all transactions as finalized - // TODO: sample to reduce performance overhead - for _, g := range block.Payload.Guarantees { - l, err := e.collections.LightByID(g.CollectionID) - if errors.Is(err, storage.ErrNotFound) { - e.collectionsToMarkFinalized.Add(g.CollectionID, now) - continue - } else if err != nil { - e.log.Warn().Err(err).Str("collection_id", g.CollectionID.String()). - Msg("could not track tx finalized metric: finalized collection not found locally") - continue - } - - for _, t := range l.Transactions { - e.transactionMetrics.TransactionFinalized(t, now) - } - } - - if ti, found := e.blocksToMarkExecuted.ByID(hb.BlockID); found { - e.trackExecutedMetricForBlock(block, ti) - e.transactionMetrics.UpdateExecutionReceiptMaxHeight(block.Header.Height) - e.blocksToMarkExecuted.Remove(hb.BlockID) - } -} - -func (e *Engine) handleExecutionReceipt(originID flow.Identifier, r *flow.ExecutionReceipt) error { +// handleExecutionReceipt persists the execution receipt locally. +// Storing the execution receipt and updates the collection executed metric. +// +// No errors are expected during normal operation. +func (e *Engine) handleExecutionReceipt(_ flow.Identifier, r *flow.ExecutionReceipt) error { // persist the execution receipt locally, storing will also index the receipt err := e.executionReceipts.Store(r) if err != nil { return fmt.Errorf("failed to store execution receipt: %w", err) } - e.trackExecutionReceiptMetrics(r) + e.collectionExecutedMetric.ExecutionReceiptReceived(r) return nil } - -func (e *Engine) trackExecutionReceiptMetrics(r *flow.ExecutionReceipt) { - // TODO add actual execution time to execution receipt? - now := time.Now().UTC() - - // retrieve the block - // TODO: consider using storage.Index.ByBlockID, the index contains collection id and seals ID - b, err := e.blocks.ByID(r.ExecutionResult.BlockID) - - if errors.Is(err, storage.ErrNotFound) { - e.blocksToMarkExecuted.Add(r.ExecutionResult.BlockID, now) - return - } - - if err != nil { - e.log.Warn().Err(err).Msg("could not track tx executed metric: executed block not found locally") - return - } - - e.transactionMetrics.UpdateExecutionReceiptMaxHeight(b.Header.Height) - - e.trackExecutedMetricForBlock(b, now) -} - -func (e *Engine) trackExecutedMetricForBlock(block *flow.Block, ti time.Time) { - // mark all transactions as executed - // TODO: sample to reduce performance overhead - for _, g := range block.Payload.Guarantees { - l, err := e.collections.LightByID(g.CollectionID) - if errors.Is(err, storage.ErrNotFound) { - e.collectionsToMarkExecuted.Add(g.CollectionID, ti) - continue - } else if err != nil { - e.log.Warn().Err(err).Str("collection_id", g.CollectionID.String()). - Msg("could not track tx executed metric: executed collection not found locally") - continue - } - - for _, t := range l.Transactions { - e.transactionMetrics.TransactionExecuted(t, ti) - } - } -} - -// handleCollection handles the response of the a collection request made earlier when a block was received -func (e *Engine) handleCollection(originID flow.Identifier, entity flow.Entity) error { - - // convert the entity to a strictly typed collection - collection, ok := entity.(*flow.Collection) - if !ok { - return fmt.Errorf("invalid entity type (%T)", entity) - } - - light := collection.Light() - - if ti, found := e.collectionsToMarkFinalized.ByID(light.ID()); found { - for _, t := range light.Transactions { - e.transactionMetrics.TransactionFinalized(t, ti) - } - e.collectionsToMarkFinalized.Remove(light.ID()) - } - - if ti, found := e.collectionsToMarkExecuted.ByID(light.ID()); found { - for _, t := range light.Transactions { - e.transactionMetrics.TransactionExecuted(t, ti) - } - e.collectionsToMarkExecuted.Remove(light.ID()) - } - - // FIX: we can't index guarantees here, as we might have more than one block - // with the same collection as long as it is not finalized - - // store the light collection (collection minus the transaction body - those are stored separately) - // and add transaction ids as index - err := e.collections.StoreLightAndIndexByTransaction(&light) - if err != nil { - // ignore collection if already seen - if errors.Is(err, storage.ErrAlreadyExists) { - e.log.Debug(). - Hex("collection_id", logging.Entity(light)). - Msg("collection is already seen") - return nil - } - return err - } - - // now store each of the transaction body - for _, tx := range collection.Transactions { - err := e.transactions.Store(tx) - if err != nil { - return fmt.Errorf("could not store transaction (%x): %w", tx.ID(), err) - } - } - - return nil -} - -func (e *Engine) OnCollection(originID flow.Identifier, entity flow.Entity) { - err := e.handleCollection(originID, entity) - if err != nil { - e.log.Error().Err(err).Msg("could not handle collection") - return - } -} - -// requestMissingCollections requests missing collections for all blocks in the local db storage once at startup -func (e *Engine) requestMissingCollections(ctx context.Context) error { - - var startHeight, endHeight uint64 - - // get the height of the last block for which all collections were received - lastFullHeight, err := e.blocks.GetLastFullBlockHeight() - if err != nil { - return fmt.Errorf("failed to complete requests for missing collections: %w", err) - } - - // start from the next block - startHeight = lastFullHeight + 1 - - // end at the finalized block - finalBlk, err := e.state.Final().Head() - if err != nil { - return err - } - endHeight = finalBlk.Height - - e.log.Info(). - Uint64("start_height", startHeight). - Uint64("end_height", endHeight). - Msg("starting collection catchup") - - // collect all missing collection ids in a map - var missingCollMap = make(map[flow.Identifier]struct{}) - - // iterate through the complete chain and request the missing collections - for i := startHeight; i <= endHeight; i++ { - - // if deadline exceeded or someone cancelled the context - if ctx.Err() != nil { - return fmt.Errorf("failed to complete requests for missing collections: %w", ctx.Err()) - } - - missingColls, err := e.missingCollectionsAtHeight(i) - if err != nil { - return fmt.Errorf("failed to retreive missing collections by height %d during collection catchup: %w", i, err) - } - - // request the missing collections - e.requestCollectionsInFinalizedBlock(missingColls) - - // add them to the missing collection id map to track later - for _, cg := range missingColls { - missingCollMap[cg.CollectionID] = struct{}{} - } - } - - // if no collections were found to be missing we are done. - if len(missingCollMap) == 0 { - // nothing more to do - e.log.Info().Msg("no missing collections found") - return nil - } - - // the collection catchup needs to happen ASAP when the node starts up. Hence, force the requester to dispatch all request - e.request.Force() - - // track progress of retrieving all the missing collections by polling the db periodically - ticker := time.NewTicker(defaultCollectionCatchupDBPollInterval) - defer ticker.Stop() - - // while there are still missing collections, keep polling - for len(missingCollMap) > 0 { - select { - case <-ctx.Done(): - // context may have expired - return fmt.Errorf("failed to complete collection retreival: %w", ctx.Err()) - case <-ticker.C: - - // log progress - e.log.Info(). - Int("total_missing_collections", len(missingCollMap)). - Msg("retrieving missing collections...") - - var foundColls []flow.Identifier - // query db to find if collections are still missing - for collId := range missingCollMap { - found, err := e.lookupCollection(collId) - if err != nil { - return err - } - // if collection found in local db, remove it from missingColls later - if found { - foundColls = append(foundColls, collId) - } - } - - // update the missingColls list by removing collections that have now been received - for _, c := range foundColls { - delete(missingCollMap, c) - } - } - } - - e.log.Info().Msg("collection catchup done") - return nil -} - -// updateLastFullBlockReceivedIndex keeps the FullBlockHeight index upto date and requests missing collections if -// the number of blocks missing collection have reached the defaultMissingCollsForBlkThreshold value. -// (The FullBlockHeight index indicates that block for which all collections have been received) -func (e *Engine) updateLastFullBlockReceivedIndex() { - - logError := func(err error) { - e.log.Error().Err(err).Msg("failed to update the last full block height") - } - - lastFullHeight, err := e.blocks.GetLastFullBlockHeight() - if err != nil { - if !errors.Is(err, storage.ErrNotFound) { - logError(err) - return - } - // use the root height as the last full height - header, err := e.state.Params().Root() - if err != nil { - logError(err) - return - } - lastFullHeight = header.Height - } - - e.log.Debug().Uint64("last_full_block_height", lastFullHeight).Msg("updating LastFullBlockReceived index...") - - finalBlk, err := e.state.Final().Head() - if err != nil { - logError(err) - return - } - finalizedHeight := finalBlk.Height - - // track number of incomplete blocks - incompleteBlksCnt := 0 - - // track the latest contiguous full height - latestFullHeight := lastFullHeight - - // collect all missing collections - var allMissingColls []*flow.CollectionGuarantee - - // start from the next block till we either hit the finalized block or cross the max collection missing threshold - for i := lastFullHeight + 1; i <= finalizedHeight && incompleteBlksCnt < defaultMissingCollsForBlkThreshold; i++ { - - // find missing collections for block at height i - missingColls, err := e.missingCollectionsAtHeight(i) - if err != nil { - logError(err) - return - } - - // if there are missing collections - if len(missingColls) > 0 { - - // increment number of incomplete blocks - incompleteBlksCnt++ - - // collect the missing collections for requesting later - allMissingColls = append(allMissingColls, missingColls...) - - continue - } - - // if there are no missing collections so far, advance the latestFullHeight pointer - if incompleteBlksCnt == 0 { - latestFullHeight = i - } - } - - // if more contiguous blocks are now complete, update db - if latestFullHeight > lastFullHeight { - err = e.blocks.UpdateLastFullBlockHeight(latestFullHeight) - if err != nil { - logError(err) - return - } - } - - // additionally, if more than threshold blocks have missing collection OR collections are missing since defaultMissingCollsForAgeThreshold, re-request those collections - if incompleteBlksCnt >= defaultMissingCollsForBlkThreshold || (finalizedHeight-lastFullHeight) > uint64(defaultMissingCollsForAgeThreshold) { - // warn log since this should generally not happen - e.log.Warn(). - Int("missing_collection_blk_count", incompleteBlksCnt). - Int("threshold", defaultMissingCollsForBlkThreshold). - Uint64("last_full_blk_height", latestFullHeight). - Msg("re-requesting missing collections") - e.requestCollectionsInFinalizedBlock(allMissingColls) - } - - e.log.Debug().Uint64("last_full_blk_height", latestFullHeight).Msg("updated LastFullBlockReceived index") -} - -// missingCollectionsAtHeight returns all missing collection guarantees at a given height -func (e *Engine) missingCollectionsAtHeight(h uint64) ([]*flow.CollectionGuarantee, error) { - blk, err := e.blocks.ByHeight(h) - if err != nil { - return nil, fmt.Errorf("failed to retreive block by height %d: %w", h, err) - } - - var missingColls []*flow.CollectionGuarantee - for _, guarantee := range blk.Payload.Guarantees { - - collID := guarantee.CollectionID - found, err := e.lookupCollection(collID) - if err != nil { - return nil, err - } - if !found { - missingColls = append(missingColls, guarantee) - } - } - return missingColls, nil -} - -// lookupCollection looks up the collection from the collection db with collID -func (e *Engine) lookupCollection(collId flow.Identifier) (bool, error) { - _, err := e.collections.LightByID(collId) - if err == nil { - return true, nil - } - if errors.Is(err, storage.ErrNotFound) { - return false, nil - } - return false, fmt.Errorf("failed to retreive collection %s: %w", collId.String(), err) -} - -// requestCollectionsInFinalizedBlock registers collection requests with the requester engine -func (e *Engine) requestCollectionsInFinalizedBlock(missingColls []*flow.CollectionGuarantee) { - for _, cg := range missingColls { - // TODO: move this query out of for loop? - guarantors, err := protocol.FindGuarantors(e.state, cg) - if err != nil { - // failed to find guarantors for guarantees contained in a finalized block is fatal error - e.log.Fatal().Err(err).Msgf("could not find guarantors for guarantee %v", cg.ID()) - } - e.request.EntityByID(cg.ID(), filter.HasNodeID(guarantors...)) - } -} diff --git a/engine/access/ingestion/engine_test.go b/engine/access/ingestion/engine_test.go index 40beac9ffef..ece7712446a 100644 --- a/engine/access/ingestion/engine_test.go +++ b/engine/access/ingestion/engine_test.go @@ -2,13 +2,13 @@ package ingestion import ( "context" - "errors" "math/rand" "os" "sync" "testing" "time" + "github.com/jordanschalm/lockctx" "github.com/rs/zerolog" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" @@ -17,19 +17,25 @@ import ( hotmodel "github.com/onflow/flow-go/consensus/hotstuff/model" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/model/flow/filter" + "github.com/onflow/flow-go/module" "github.com/onflow/flow-go/module/component" + "github.com/onflow/flow-go/module/counters" downloadermock "github.com/onflow/flow-go/module/executiondatasync/execution_data/mock" "github.com/onflow/flow-go/module/irrecoverable" "github.com/onflow/flow-go/module/mempool/stdmap" "github.com/onflow/flow-go/module/metrics" - module "github.com/onflow/flow-go/module/mock" + modulemock "github.com/onflow/flow-go/module/mock" "github.com/onflow/flow-go/module/signature" + "github.com/onflow/flow-go/module/state_synchronization/indexer" "github.com/onflow/flow-go/network/channels" - "github.com/onflow/flow-go/network/mocknetwork" + mocknetwork "github.com/onflow/flow-go/network/mock" protocol "github.com/onflow/flow-go/state/protocol/mock" - storerr "github.com/onflow/flow-go/storage" - storage "github.com/onflow/flow-go/storage/mock" + "github.com/onflow/flow-go/storage" + storagemock "github.com/onflow/flow-go/storage/mock" + "github.com/onflow/flow-go/storage/operation/pebbleimpl" + "github.com/onflow/flow-go/storage/store" "github.com/onflow/flow-go/utils/unittest" + "github.com/onflow/flow-go/utils/unittest/mocks" ) type Suite struct { @@ -42,306 +48,479 @@ type Suite struct { params *protocol.Params } - me *module.Local - request *module.Requester - provider *mocknetwork.Engine - blocks *storage.Blocks - headers *storage.Headers - collections *storage.Collections - transactions *storage.Transactions - receipts *storage.ExecutionReceipts - results *storage.ExecutionResults - seals *storage.Seals + me *modulemock.Local + net *mocknetwork.EngineRegistry + request *modulemock.Requester + obsIdentity *flow.Identity + provider *mocknetwork.Engine + blocks *storagemock.Blocks + headers *storagemock.Headers + collections *storagemock.Collections + transactions *storagemock.Transactions + receipts *storagemock.ExecutionReceipts + results *storagemock.ExecutionResults + seals *storagemock.Seals + + conduit *mocknetwork.Conduit downloader *downloadermock.Downloader sealedBlock *flow.Header finalizedBlock *flow.Header + log zerolog.Logger + blockMap map[uint64]*flow.Block + rootBlock *flow.Block - eng *Engine + collectionExecutedMetric *indexer.CollectionExecutedMetricImpl + + ctx context.Context cancel context.CancelFunc + + db storage.DB + dbDir string + lastFullBlockHeight *counters.PersistentStrictMonotonicCounter + lockManager lockctx.Manager } func TestIngestEngine(t *testing.T) { suite.Run(t, new(Suite)) } -func (suite *Suite) TearDownTest() { - suite.cancel() +// TearDownTest stops the engine and cleans up the db +func (s *Suite) TearDownTest() { + s.cancel() + err := os.RemoveAll(s.dbDir) + s.Require().NoError(err) } -func (suite *Suite) SetupTest() { - log := zerolog.New(os.Stderr) +func (s *Suite) SetupTest() { + s.log = unittest.Logger() + s.ctx, s.cancel = context.WithCancel(context.Background()) + db, dbDir := unittest.TempPebbleDB(s.T()) + s.db = pebbleimpl.ToDB(db) + s.dbDir = dbDir + s.lockManager = storage.NewTestingLockManager() - obsIdentity := unittest.IdentityFixture(unittest.WithRole(flow.RoleAccess)) + s.obsIdentity = unittest.IdentityFixture(unittest.WithRole(flow.RoleAccess)) + s.blocks = storagemock.NewBlocks(s.T()) // mock out protocol state - suite.proto.state = new(protocol.FollowerState) - suite.proto.snapshot = new(protocol.Snapshot) - suite.proto.params = new(protocol.Params) - suite.finalizedBlock = unittest.BlockHeaderFixture(unittest.WithHeaderHeight(0)) - suite.proto.state.On("Identity").Return(obsIdentity, nil) - suite.proto.state.On("Final").Return(suite.proto.snapshot, nil) - suite.proto.state.On("Params").Return(suite.proto.params) - suite.proto.snapshot.On("Head").Return( + s.proto.state = new(protocol.FollowerState) + s.proto.snapshot = new(protocol.Snapshot) + s.proto.params = new(protocol.Params) + s.finalizedBlock = unittest.BlockHeaderFixture(unittest.WithHeaderHeight(0)) + s.proto.state.On("Identity").Return(s.obsIdentity, nil) + s.proto.state.On("Params").Return(s.proto.params) + s.proto.snapshot.On("Head").Return( func() *flow.Header { - return suite.finalizedBlock + return s.finalizedBlock }, nil, ).Maybe() - suite.me = new(module.Local) - suite.me.On("NodeID").Return(obsIdentity.NodeID) - - net := new(mocknetwork.Network) - conduit := new(mocknetwork.Conduit) - net.On("Register", channels.ReceiveReceipts, mock.Anything). + s.me = modulemock.NewLocal(s.T()) + s.me.On("NodeID").Return(s.obsIdentity.NodeID).Maybe() + s.net = mocknetwork.NewEngineRegistry(s.T()) + conduit := mocknetwork.NewConduit(s.T()) + s.net.On("Register", channels.ReceiveReceipts, mock.Anything). Return(conduit, nil). Once() - suite.request = new(module.Requester) - - suite.provider = new(mocknetwork.Engine) - suite.blocks = new(storage.Blocks) - suite.headers = new(storage.Headers) - suite.collections = new(storage.Collections) - suite.transactions = new(storage.Transactions) - suite.receipts = new(storage.ExecutionReceipts) - suite.results = new(storage.ExecutionResults) - collectionsToMarkFinalized, err := stdmap.NewTimes(100) - require.NoError(suite.T(), err) - collectionsToMarkExecuted, err := stdmap.NewTimes(100) - require.NoError(suite.T(), err) - blocksToMarkExecuted, err := stdmap.NewTimes(100) - require.NoError(suite.T(), err) - - eng, err := New(log, net, suite.proto.state, suite.me, suite.request, suite.blocks, suite.headers, suite.collections, - suite.transactions, suite.results, suite.receipts, metrics.NewNoopCollector(), collectionsToMarkFinalized, collectionsToMarkExecuted, - blocksToMarkExecuted) - require.NoError(suite.T(), err) - - suite.blocks.On("GetLastFullBlockHeight").Once().Return(uint64(0), errors.New("do nothing")) - - ctx, cancel := context.WithCancel(context.Background()) - irrecoverableCtx, _ := irrecoverable.WithSignaler(ctx) - eng.ComponentManager.Start(irrecoverableCtx) + s.request = modulemock.NewRequester(s.T()) + s.provider = mocknetwork.NewEngine(s.T()) + s.blocks = storagemock.NewBlocks(s.T()) + s.headers = storagemock.NewHeaders(s.T()) + s.collections = new(storagemock.Collections) + s.receipts = new(storagemock.ExecutionReceipts) + s.transactions = new(storagemock.Transactions) + s.results = new(storagemock.ExecutionResults) + collectionsToMarkFinalized := stdmap.NewTimes(100) + collectionsToMarkExecuted := stdmap.NewTimes(100) + blocksToMarkExecuted := stdmap.NewTimes(100) + blockTransactions := stdmap.NewIdentifierMap(100) + + s.proto.state.On("Identity").Return(s.obsIdentity, nil) + s.proto.state.On("Params").Return(s.proto.params) + + blockCount := 5 + s.blockMap = make(map[uint64]*flow.Block, blockCount) + s.rootBlock = unittest.Block.Genesis(flow.Emulator) + parent := s.rootBlock.ToHeader() + + for i := 0; i < blockCount; i++ { + block := unittest.BlockWithParentFixture(parent) + // update for next iteration + parent = block.ToHeader() + s.blockMap[block.Height] = block + } + s.finalizedBlock = parent + + s.blocks.On("ByHeight", mock.AnythingOfType("uint64")).Return( + mocks.ConvertStorageOutput( + mocks.StorageMapGetter(s.blockMap), + func(block *flow.Block) *flow.Block { return block }, + ), + ).Maybe() + + s.proto.snapshot.On("Head").Return( + func() *flow.Header { + return s.finalizedBlock + }, + nil, + ).Maybe() + s.proto.state.On("Final").Return(s.proto.snapshot, nil) + + // Mock the finalized root block header with height 0. + header := unittest.BlockHeaderFixture(unittest.WithHeaderHeight(0)) + s.proto.params.On("FinalizedRoot").Return(header, nil) + + var err error + s.collectionExecutedMetric, err = indexer.NewCollectionExecutedMetricImpl( + s.log, + metrics.NewNoopCollector(), + collectionsToMarkFinalized, + collectionsToMarkExecuted, + blocksToMarkExecuted, + s.collections, + s.blocks, + blockTransactions, + ) + require.NoError(s.T(), err) +} + +// initEngineAndSyncer create new instance of ingestion engine and collection syncer. +// It waits until the ingestion engine starts. +func (s *Suite) initEngineAndSyncer(ctx irrecoverable.SignalerContext) (*Engine, *CollectionSyncer) { + processedHeightInitializer := store.NewConsumerProgress(s.db, module.ConsumeProgressIngestionEngineBlockHeight) + + lastFullBlockHeight, err := store.NewConsumerProgress(s.db, module.ConsumeProgressLastFullBlockHeight).Initialize(s.finalizedBlock.Height) + require.NoError(s.T(), err) + + s.lastFullBlockHeight, err = counters.NewPersistentStrictMonotonicCounter(lastFullBlockHeight) + require.NoError(s.T(), err) + + syncer := NewCollectionSyncer( + s.log, + s.collectionExecutedMetric, + s.request, + s.proto.state, + s.blocks, + s.collections, + s.transactions, + s.lastFullBlockHeight, + s.lockManager, + ) + + eng, err := New( + s.log, + s.net, + s.proto.state, + s.me, + s.blocks, + s.results, + s.receipts, + processedHeightInitializer, + syncer, + s.collectionExecutedMetric, + nil, + ) + + require.NoError(s.T(), err) + + eng.ComponentManager.Start(ctx) <-eng.Ready() - suite.eng = eng - suite.cancel = cancel + return eng, syncer } -// TestOnFinalizedBlock checks that when a block is received, a request for each individual collection is made -func (suite *Suite) TestOnFinalizedBlock() { - suite.blocks.On("GetLastFullBlockHeight").Return(uint64(0), nil).Once() +// mockCollectionsForBlock mocks collections for block +func (s *Suite) mockCollectionsForBlock(block *flow.Block) { + // we should query the block once and index the guarantee payload once + for _, g := range block.Payload.Guarantees { + collection := unittest.CollectionFixture(1) + light := collection.Light() + s.collections.On("LightByID", g.CollectionID).Return(light, nil).Twice() + } +} - block := unittest.BlockFixture() - block.SetPayload(unittest.PayloadFixture( - unittest.WithGuarantees(unittest.CollectionGuaranteesFixture(4)...), - unittest.WithExecutionResults(unittest.ExecutionResultFixture()), - )) +// generateBlock prepares block with payload and specified guarantee.SignerIndices +func (s *Suite) generateBlock(clusterCommittee flow.IdentitySkeletonList, snap *protocol.Snapshot) *flow.Block { + block := unittest.BlockFixture( + unittest.Block.WithPayload(unittest.PayloadFixture( + unittest.WithGuarantees(unittest.CollectionGuaranteesFixture(4)...), + unittest.WithExecutionResults(unittest.ExecutionResultFixture()), + unittest.WithSeals(unittest.Seal.Fixture()), + )), + ) - // prepare cluster committee members - clusterCommittee := unittest.IdentityListFixture(32 * 4).Filter(filter.HasRole(flow.RoleCollection)) refBlockID := unittest.IdentifierFixture() for _, guarantee := range block.Payload.Guarantees { guarantee.ReferenceBlockID = refBlockID // guarantee signers must be cluster committee members, so that access will fetch collection from // the signers that are specified by guarantee.SignerIndices indices, err := signature.EncodeSignersToIndices(clusterCommittee.NodeIDs(), clusterCommittee.NodeIDs()) - require.NoError(suite.T(), err) + require.NoError(s.T(), err) guarantee.SignerIndices = indices } + s.proto.state.On("AtBlockID", refBlockID).Return(snap) + + return block +} + +// TestOnFinalizedBlock checks that when a block is received, a request for each individual collection is made +func (s *Suite) TestOnFinalizedBlockSingle() { + cluster := new(protocol.Cluster) + epoch := new(protocol.CommittedEpoch) + epochs := new(protocol.EpochQuery) + snap := new(protocol.Snapshot) + + epoch.On("ClusterByChainID", mock.Anything).Return(cluster, nil) + epochs.On("Current").Return(epoch, nil) + snap.On("Epochs").Return(epochs) + + // prepare cluster committee members + clusterCommittee := unittest.IdentityListFixture(32 * 4).Filter(filter.HasRole[flow.Identity](flow.RoleCollection)).ToSkeleton() + cluster.On("Members").Return(clusterCommittee, nil) + + irrecoverableCtx := irrecoverable.NewMockSignalerContext(s.T(), s.ctx) + eng, _ := s.initEngineAndSyncer(irrecoverableCtx) + + block := s.generateBlock(clusterCommittee, snap) + block.Height = s.finalizedBlock.Height + 1 + s.blockMap[block.Height] = block + s.mockCollectionsForBlock(block) + s.finalizedBlock = block.ToHeader() + hotstuffBlock := hotmodel.Block{ BlockID: block.ID(), } - // we should query the block once and index the guarantee payload once - suite.blocks.On("ByID", block.ID()).Return(&block, nil).Twice() - for _, g := range block.Payload.Guarantees { - collection := unittest.CollectionFixture(1) - light := collection.Light() - suite.collections.On("LightByID", g.CollectionID).Return(&light, nil).Twice() + // expect that the block storage is indexed with each of the collection guarantee + s.blocks.On("IndexBlockContainingCollectionGuarantees", block.ID(), []flow.Identifier(flow.GetIDs(block.Payload.Guarantees))).Return(nil).Once() + for _, seal := range block.Payload.Seals { + s.results.On("Index", seal.BlockID, seal.ResultID).Return(nil).Once() } - // expect that the block storage is indexed with each of the collection guarantee - suite.blocks.On("IndexBlockForCollections", block.ID(), []flow.Identifier(flow.GetIDs(block.Payload.Guarantees))).Return(nil).Once() + missingCollectionCount := 4 + wg := sync.WaitGroup{} + wg.Add(missingCollectionCount) + for _, cg := range block.Payload.Guarantees { + s.request.On("EntityByID", cg.CollectionID, mock.Anything).Return().Run(func(args mock.Arguments) { + // Ensure the test does not complete its work faster than necessary + wg.Done() + }).Once() + } + + // force should be called once + s.request.On("Force").Return().Once() + + // process the block through the finalized callback + eng.OnFinalizedBlock(&hotstuffBlock) + + unittest.RequireReturnsBefore(s.T(), wg.Wait, 100*time.Millisecond, "expect to process new block before timeout") + + // assert that the block was retrieved and all collections were requested + s.headers.AssertExpectations(s.T()) + s.request.AssertNumberOfCalls(s.T(), "EntityByID", len(block.Payload.Guarantees)) + s.results.AssertNumberOfCalls(s.T(), "Index", len(block.Payload.Seals)) +} + +// TestOnFinalizedBlockSeveralBlocksAhead checks OnFinalizedBlock with a block several blocks newer than the last block processed +func (s *Suite) TestOnFinalizedBlockSeveralBlocksAhead() { cluster := new(protocol.Cluster) - cluster.On("Members").Return(clusterCommittee, nil) - epoch := new(protocol.Epoch) - epoch.On("ClusterByChainID", mock.Anything).Return(cluster, nil) + epoch := new(protocol.CommittedEpoch) epochs := new(protocol.EpochQuery) - epochs.On("Current").Return(epoch) snap := new(protocol.Snapshot) + + epoch.On("ClusterByChainID", mock.Anything).Return(cluster, nil) + epochs.On("Current").Return(epoch, nil) snap.On("Epochs").Return(epochs) - suite.proto.state.On("AtBlockID", refBlockID).Return(snap) - suite.results.On("Index", mock.Anything, mock.Anything).Return(nil) - // for each of the guarantees, we should request the corresponding collection once - needed := make(map[flow.Identifier]struct{}) - for _, guarantee := range block.Payload.Guarantees { - needed[guarantee.ID()] = struct{}{} + // prepare cluster committee members + clusterCommittee := unittest.IdentityListFixture(32 * 4).Filter(filter.HasRole[flow.Identity](flow.RoleCollection)).ToSkeleton() + cluster.On("Members").Return(clusterCommittee, nil) + + irrecoverableCtx := irrecoverable.NewMockSignalerContext(s.T(), s.ctx) + eng, _ := s.initEngineAndSyncer(irrecoverableCtx) + + newBlocksCount := 3 + startHeight := s.finalizedBlock.Height + 1 + blocks := make([]*flow.Block, newBlocksCount) + + // generate the test blocks, cgs and collections + for i := 0; i < newBlocksCount; i++ { + block := s.generateBlock(clusterCommittee, snap) + block.Height = startHeight + uint64(i) + s.blockMap[block.Height] = block + blocks[i] = block + s.mockCollectionsForBlock(block) + s.finalizedBlock = block.ToHeader() + } + + // latest of all the new blocks which are newer than the last block processed + latestBlock := blocks[2] + + // block several blocks newer than the last block processed + hotstuffBlock := hotmodel.Block{ + BlockID: latestBlock.ID(), } + missingCollectionCountPerBlock := 4 wg := sync.WaitGroup{} - wg.Add(4) + wg.Add(missingCollectionCountPerBlock * newBlocksCount) - suite.request.On("EntityByID", mock.Anything, mock.Anything).Run( - func(args mock.Arguments) { - collID := args.Get(0).(flow.Identifier) - _, pending := needed[collID] - suite.Assert().True(pending, "collection should be pending (%x)", collID) - delete(needed, collID) - wg.Done() - }, - ) + // expected all new blocks after last block processed + for _, block := range blocks { + s.blocks.On("IndexBlockContainingCollectionGuarantees", block.ID(), []flow.Identifier(flow.GetIDs(block.Payload.Guarantees))).Return(nil).Once() - // process the block through the finalized callback - suite.eng.OnFinalizedBlock(&hotstuffBlock) - suite.Assertions.Eventually(func() bool { - wg.Wait() - return true - }, time.Millisecond*20, time.Millisecond) + for _, cg := range block.Payload.Guarantees { + s.request.On("EntityByID", cg.CollectionID, mock.Anything).Return().Run(func(args mock.Arguments) { + // Ensure the test does not complete its work faster than necessary, so we can check all expected results + wg.Done() + }).Once() + } + // force should be called once + s.request.On("Force").Return().Once() - // assert that the block was retrieved and all collections were requested - suite.headers.AssertExpectations(suite.T()) - suite.request.AssertNumberOfCalls(suite.T(), "EntityByID", len(block.Payload.Guarantees)) - suite.request.AssertNumberOfCalls(suite.T(), "Index", len(block.Payload.Seals)) + for _, seal := range block.Payload.Seals { + s.results.On("Index", seal.BlockID, seal.ResultID).Return(nil).Once() + } + } + + eng.OnFinalizedBlock(&hotstuffBlock) + + unittest.RequireReturnsBefore(s.T(), wg.Wait, 100*time.Millisecond, "expect to process all blocks before timeout") + + expectedEntityByIDCalls := 0 + expectedIndexCalls := 0 + for _, block := range blocks { + expectedEntityByIDCalls += len(block.Payload.Guarantees) + expectedIndexCalls += len(block.Payload.Seals) + } + + s.headers.AssertExpectations(s.T()) + s.blocks.AssertNumberOfCalls(s.T(), "IndexBlockContainingCollectionGuarantees", newBlocksCount) + s.request.AssertNumberOfCalls(s.T(), "EntityByID", expectedEntityByIDCalls) + s.results.AssertNumberOfCalls(s.T(), "Index", expectedIndexCalls) } // TestOnCollection checks that when a Collection is received, it is persisted -func (suite *Suite) TestOnCollection() { - originID := unittest.IdentifierFixture() +func (s *Suite) TestOnCollection() { + irrecoverableCtx := irrecoverable.NewMockSignalerContext(s.T(), s.ctx) + s.initEngineAndSyncer(irrecoverableCtx) + collection := unittest.CollectionFixture(5) light := collection.Light() - // we should store the light collection and index its transactions - suite.collections.On("StoreLightAndIndexByTransaction", &light).Return(nil).Once() + // we should store the collection and index its transactions + s.collections.On("StoreAndIndexByTransaction", mock.Anything, &collection).Return(light, nil).Once() - // for each transaction in the collection, we should store it - needed := make(map[flow.Identifier]struct{}) - for _, txID := range light.Transactions { - needed[txID] = struct{}{} - } - suite.transactions.On("Store", mock.Anything).Return(nil).Run( - func(args mock.Arguments) { - tx := args.Get(0).(*flow.TransactionBody) - _, pending := needed[tx.ID()] - suite.Assert().True(pending, "tx not pending (%x)", tx.ID()) - }, - ) - - // process the block through the collection callback - suite.eng.OnCollection(originID, &collection) + // Create a lock context for indexing + err := unittest.WithLock(s.T(), s.lockManager, storage.LockInsertCollection, func(lctx lockctx.Context) error { + return indexer.IndexCollection(lctx, &collection, s.collections, s.log, s.collectionExecutedMetric) + }) + require.NoError(s.T(), err) - // check that the collection was stored and indexed, and we stored all transactions - suite.collections.AssertExpectations(suite.T()) - suite.transactions.AssertNumberOfCalls(suite.T(), "Store", len(collection.Transactions)) + // check that the collection was stored and indexed + s.collections.AssertExpectations(s.T()) } // TestExecutionReceiptsAreIndexed checks that execution receipts are properly indexed -func (suite *Suite) TestExecutionReceiptsAreIndexed() { +func (s *Suite) TestExecutionReceiptsAreIndexed() { + irrecoverableCtx := irrecoverable.NewMockSignalerContext(s.T(), s.ctx) + eng, _ := s.initEngineAndSyncer(irrecoverableCtx) originID := unittest.IdentifierFixture() collection := unittest.CollectionFixture(5) light := collection.Light() - // we should store the light collection and index its transactions - suite.collections.On("StoreLightAndIndexByTransaction", &light).Return(nil).Once() - block := &flow.Block{ - Header: &flow.Header{Height: 0}, - Payload: &flow.Payload{Guarantees: []*flow.CollectionGuarantee{}}, - } - suite.blocks.On("ByID", mock.Anything).Return(block, nil) + // we should store the collection and index its transactions + s.collections.On("StoreAndIndexByTransaction", &collection).Return(light, nil).Once() + block := unittest.BlockFixture( + unittest.Block.WithHeight(0), + unittest.Block.WithPayload( + unittest.PayloadFixture(unittest.WithGuarantees([]*flow.CollectionGuarantee{}...)), + ), + ) + s.blocks.On("ByID", mock.Anything).Return(block, nil) // for each transaction in the collection, we should store it needed := make(map[flow.Identifier]struct{}) for _, txID := range light.Transactions { needed[txID] = struct{}{} } - suite.transactions.On("Store", mock.Anything).Return(nil).Run( + s.transactions.On("Store", mock.Anything).Return(nil).Run( func(args mock.Arguments) { tx := args.Get(0).(*flow.TransactionBody) _, pending := needed[tx.ID()] - suite.Assert().True(pending, "tx not pending (%x)", tx.ID()) + s.Assert().True(pending, "tx not pending (%x)", tx.ID()) }, ) er1 := unittest.ExecutionReceiptFixture() er2 := unittest.ExecutionReceiptFixture() - suite.receipts.On("Store", mock.Anything).Return(nil) - suite.blocks.On("ByID", er1.ExecutionResult.BlockID).Return(nil, storerr.ErrNotFound) + s.receipts.On("Store", mock.Anything).Return(nil) + s.blocks.On("ByID", er1.ExecutionResult.BlockID).Return(nil, storage.ErrNotFound) - suite.receipts.On("Store", mock.Anything).Return(nil) - suite.blocks.On("ByID", er2.ExecutionResult.BlockID).Return(nil, storerr.ErrNotFound) + s.receipts.On("Store", mock.Anything).Return(nil) + s.blocks.On("ByID", er2.ExecutionResult.BlockID).Return(nil, storage.ErrNotFound) - err := suite.eng.handleExecutionReceipt(originID, er1) - require.NoError(suite.T(), err) + err := eng.handleExecutionReceipt(originID, er1) + require.NoError(s.T(), err) - err = suite.eng.handleExecutionReceipt(originID, er2) - require.NoError(suite.T(), err) + err = eng.handleExecutionReceipt(originID, er2) + require.NoError(s.T(), err) - suite.receipts.AssertExpectations(suite.T()) - suite.results.AssertExpectations(suite.T()) - suite.receipts.AssertExpectations(suite.T()) + s.receipts.AssertExpectations(s.T()) + s.results.AssertExpectations(s.T()) + s.receipts.AssertExpectations(s.T()) } -// TestOnCollection checks that when a duplicate collection is received, the node doesn't +// TestOnCollectionDuplicate checks that when a duplicate collection is received, the node doesn't // crash but just ignores its transactions. -func (suite *Suite) TestOnCollectionDuplicate() { +func (s *Suite) TestOnCollectionDuplicate() { + irrecoverableCtx := irrecoverable.NewMockSignalerContext(s.T(), s.ctx) + s.initEngineAndSyncer(irrecoverableCtx) - originID := unittest.IdentifierFixture() collection := unittest.CollectionFixture(5) light := collection.Light() - // we should store the light collection and index its transactions - suite.collections.On("StoreLightAndIndexByTransaction", &light).Return(storerr.ErrAlreadyExists).Once() - - // for each transaction in the collection, we should store it - needed := make(map[flow.Identifier]struct{}) - for _, txID := range light.Transactions { - needed[txID] = struct{}{} - } - suite.transactions.On("Store", mock.Anything).Return(nil).Run( - func(args mock.Arguments) { - tx := args.Get(0).(*flow.TransactionBody) - _, pending := needed[tx.ID()] - suite.Assert().True(pending, "tx not pending (%x)", tx.ID()) - }, - ) + // we should store the collection and index its transactions + s.collections.On("StoreAndIndexByTransaction", mock.Anything, &collection).Return(light, storage.ErrAlreadyExists).Once() - // process the block through the collection callback - suite.eng.OnCollection(originID, &collection) + // Create a lock context for indexing + err := unittest.WithLock(s.T(), s.lockManager, storage.LockInsertCollection, func(lctx lockctx.Context) error { + return indexer.IndexCollection(lctx, &collection, s.collections, s.log, s.collectionExecutedMetric) + }) + require.ErrorIs(s.T(), err, storage.ErrAlreadyExists) - // check that the collection was stored and indexed, and we stored all transactions - suite.collections.AssertExpectations(suite.T()) - suite.transactions.AssertNotCalled(suite.T(), "Store", "should not store any transactions") + // check that the collection was stored and indexed + s.collections.AssertExpectations(s.T()) } // TestRequestMissingCollections tests that the all missing collections are requested on the call to requestMissingCollections -func (suite *Suite) TestRequestMissingCollections() { +func (s *Suite) TestRequestMissingCollections() { + irrecoverableCtx := irrecoverable.NewMockSignalerContext(s.T(), s.ctx) + _, syncer := s.initEngineAndSyncer(irrecoverableCtx) blkCnt := 3 startHeight := uint64(1000) - blocks := make([]flow.Block, blkCnt) - heightMap := make(map[uint64]*flow.Block, blkCnt) // prepare cluster committee members - clusterCommittee := unittest.IdentityListFixture(32 * 4).Filter(filter.HasRole(flow.RoleCollection)) + clusterCommittee := unittest.IdentityListFixture(32 * 4).Filter(filter.HasRole[flow.Identity](flow.RoleCollection)).ToSkeleton() // generate the test blocks and collections var collIDs []flow.Identifier refBlockID := unittest.IdentifierFixture() for i := 0; i < blkCnt; i++ { - block := unittest.BlockFixture() - block.SetPayload(unittest.PayloadFixture( - unittest.WithGuarantees( - unittest.CollectionGuaranteesFixture(4, unittest.WithCollRef(refBlockID))...), - )) - // some blocks may not be present hence add a gap - height := startHeight + uint64(i) - block.Header.Height = height - blocks[i] = block - heightMap[height] = &block + block := unittest.BlockFixture( + // some blocks may not be present hence add a gap + unittest.Block.WithHeight(startHeight+uint64(i)), + unittest.Block.WithPayload(unittest.PayloadFixture( + unittest.WithGuarantees(unittest.CollectionGuaranteesFixture(4, unittest.WithCollRef(refBlockID))...)), + )) + s.blockMap[block.Height] = block + s.finalizedBlock = block.ToHeader() + for _, c := range block.Payload.Guarantees { collIDs = append(collIDs, c.CollectionID) c.ReferenceBlockID = refBlockID @@ -349,28 +528,16 @@ func (suite *Suite) TestRequestMissingCollections() { // guarantee signers must be cluster committee members, so that access will fetch collection from // the signers that are specified by guarantee.SignerIndices indices, err := signature.EncodeSignersToIndices(clusterCommittee.NodeIDs(), clusterCommittee.NodeIDs()) - require.NoError(suite.T(), err) + require.NoError(s.T(), err) c.SignerIndices = indices } } - // setup the block storage mock - // each block should be queried by height - suite.blocks.On("ByHeight", mock.IsType(uint64(0))).Return( - func(h uint64) *flow.Block { - // simulate a db lookup - return heightMap[h] - }, - func(h uint64) error { - if _, ok := heightMap[h]; ok { - return nil - } - return storerr.ErrNotFound - }) // consider collections are missing for all blocks - suite.blocks.On("GetLastFullBlockHeight").Return(startHeight-1, nil) + err := s.lastFullBlockHeight.Set(startHeight - 1) + s.Require().NoError(err) + // consider the last test block as the head - suite.finalizedBlock = blocks[blkCnt-1].Header // p is the probability of not receiving the collection before the next poll and it // helps simulate the slow trickle of the requested collections being received @@ -381,7 +548,7 @@ func (suite *Suite) TestRequestMissingCollections() { // for the first lookup call for each collection, it will be reported as missing from db // for the subsequent calls, it will be reported as present with the probability p - suite.collections.On("LightByID", mock.Anything).Return( + s.collections.On("LightByID", mock.Anything).Return( func(cID flow.Identifier) *flow.LightCollection { return nil // the actual collection object return is never really read }, @@ -392,83 +559,86 @@ func (suite *Suite) TestRequestMissingCollections() { if rand.Float32() >= p { rcvdColl[cID] = struct{}{} } - return storerr.ErrNotFound + return storage.ErrNotFound }). // simulate some db i/o contention After(time.Millisecond * time.Duration(rand.Intn(5))) - // setup the requester engine mock + // set up the requester engine mock // entityByID should be called once per collection for _, c := range collIDs { - suite.request.On("EntityByID", c, mock.Anything).Return() + s.request.On("EntityByID", c, mock.Anything).Return() } // force should be called once - suite.request.On("Force").Return() + s.request.On("Force").Return() cluster := new(protocol.Cluster) cluster.On("Members").Return(clusterCommittee, nil) - epoch := new(protocol.Epoch) + epoch := new(protocol.CommittedEpoch) epoch.On("ClusterByChainID", mock.Anything).Return(cluster, nil) epochs := new(protocol.EpochQuery) - epochs.On("Current").Return(epoch) + epochs.On("Current").Return(epoch, nil) snap := new(protocol.Snapshot) snap.On("Epochs").Return(epochs) - suite.proto.state.On("AtBlockID", refBlockID).Return(snap) + s.proto.state.On("AtBlockID", refBlockID).Return(snap) assertExpectations := func() { - suite.request.AssertExpectations(suite.T()) - suite.collections.AssertExpectations(suite.T()) - suite.proto.snapshot.AssertExpectations(suite.T()) - suite.blocks.AssertExpectations(suite.T()) + s.request.AssertExpectations(s.T()) + s.collections.AssertExpectations(s.T()) + s.proto.snapshot.AssertExpectations(s.T()) + s.blocks.AssertExpectations(s.T()) } // test 1 - collections are not received before timeout - suite.Run("timeout before all missing collections are received", func() { + s.Run("timeout before all missing collections are received", func() { // simulate that collection are never received p = 1 // timeout after 3 db polls - ctx, cancel := context.WithTimeout(context.Background(), 100*defaultCollectionCatchupDBPollInterval) + ctx, cancel := context.WithTimeout(context.Background(), 100*collectionCatchupDBPollInterval) defer cancel() - err := suite.eng.requestMissingCollections(ctx) + err := syncer.requestMissingCollectionsBlocking(ctx) - require.Error(suite.T(), err) - require.Contains(suite.T(), err.Error(), "context deadline exceeded") + require.Error(s.T(), err) + require.Contains(s.T(), err.Error(), "context deadline exceeded") assertExpectations() }) // test 2 - all collections are eventually received before the deadline - suite.Run("all missing collections are received", func() { + s.Run("all missing collections are received", func() { // 90% of the time, collections are reported as not received when the collection storage is queried p = 0.9 - ctx, cancel := context.WithTimeout(context.Background(), defaultCollectionCatchupTimeout) + ctx, cancel := context.WithTimeout(context.Background(), collectionCatchupTimeout) defer cancel() - err := suite.eng.requestMissingCollections(ctx) + err := syncer.requestMissingCollectionsBlocking(ctx) - require.NoError(suite.T(), err) - require.Len(suite.T(), rcvdColl, len(collIDs)) + require.NoError(s.T(), err) + require.Len(s.T(), rcvdColl, len(collIDs)) assertExpectations() }) } -// TestUpdateLastFullBlockReceivedIndex tests that UpdateLastFullBlockReceivedIndex function keeps the FullBlockIndex -// upto date and request collections if blocks with missing collections exceed the threshold. -func (suite *Suite) TestUpdateLastFullBlockReceivedIndex() { +// TestProcessBackgroundCalls tests that updateLastFullBlockHeight and checkMissingCollections +// function calls keep the FullBlockIndex up-to-date and request collections if blocks with missing +// collections exceed the threshold. +func (s *Suite) TestProcessBackgroundCalls() { + irrecoverableCtx := irrecoverable.NewMockSignalerContext(s.T(), s.ctx) + _, syncer := s.initEngineAndSyncer(irrecoverableCtx) + blkCnt := 3 collPerBlk := 10 startHeight := uint64(1000) - blocks := make([]flow.Block, blkCnt) - heightMap := make(map[uint64]*flow.Block, blkCnt) + blocks := make([]*flow.Block, blkCnt) collMap := make(map[flow.Identifier]*flow.LightCollection, blkCnt*collPerBlk) // prepare cluster committee members - clusterCommittee := unittest.IdentityListFixture(32 * 4).Filter(filter.HasRole(flow.RoleCollection)) + clusterCommittee := unittest.IdentityListFixture(32 * 4).Filter(filter.HasRole[flow.Identity](flow.RoleCollection)).ToSkeleton() refBlockID := unittest.IdentifierFixture() // generate the test blocks, cgs and collections @@ -476,7 +646,7 @@ func (suite *Suite) TestUpdateLastFullBlockReceivedIndex() { guarantees := make([]*flow.CollectionGuarantee, collPerBlk) for j := 0; j < collPerBlk; j++ { coll := unittest.CollectionFixture(2).Light() - collMap[coll.ID()] = &coll + collMap[coll.ID()] = coll cg := unittest.CollectionGuaranteeFixture(func(cg *flow.CollectionGuarantee) { cg.CollectionID = coll.ID() cg.ReferenceBlockID = refBlockID @@ -485,47 +655,30 @@ func (suite *Suite) TestUpdateLastFullBlockReceivedIndex() { // guarantee signers must be cluster committee members, so that access will fetch collection from // the signers that are specified by guarantee.SignerIndices indices, err := signature.EncodeSignersToIndices(clusterCommittee.NodeIDs(), clusterCommittee.NodeIDs()) - require.NoError(suite.T(), err) + require.NoError(s.T(), err) cg.SignerIndices = indices guarantees[j] = cg } - block := unittest.BlockFixture() - block.SetPayload(unittest.PayloadFixture(unittest.WithGuarantees(guarantees...))) - // set the height - height := startHeight + uint64(i) - block.Header.Height = height + block := unittest.BlockFixture( + unittest.Block.WithHeight(startHeight+uint64(i)), + unittest.Block.WithPayload(unittest.PayloadFixture(unittest.WithGuarantees(guarantees...))), + ) + s.blockMap[block.Height] = block blocks[i] = block - heightMap[height] = &block + s.finalizedBlock = block.ToHeader() } - rootBlk := blocks[0] - rootBlkHeight := rootBlk.Header.Height - finalizedBlk := blocks[blkCnt-1] - finalizedHeight := finalizedBlk.Header.Height - - // setup the block storage mock - // each block should be queried by height - suite.blocks.On("ByHeight", mock.IsType(uint64(0))).Return( - func(h uint64) *flow.Block { - // simulate a db lookup - return heightMap[h] - }, - func(h uint64) error { - if _, ok := heightMap[h]; ok { - return nil - } - return storerr.ErrNotFound - }) + finalizedHeight := s.finalizedBlock.Height cluster := new(protocol.Cluster) cluster.On("Members").Return(clusterCommittee, nil) - epoch := new(protocol.Epoch) + epoch := new(protocol.CommittedEpoch) epoch.On("ClusterByChainID", mock.Anything).Return(cluster, nil) epochs := new(protocol.EpochQuery) - epochs.On("Current").Return(epoch) + epochs.On("Current").Return(epoch, nil) snap := new(protocol.Snapshot) snap.On("Epochs").Return(epochs) - suite.proto.state.On("AtBlockID", refBlockID).Return(snap) + s.proto.state.On("AtBlockID", refBlockID).Return(snap) // blkMissingColl controls which collections are reported as missing by the collections storage mock blkMissingColl := make([]bool, blkCnt) @@ -533,148 +686,141 @@ func (suite *Suite) TestUpdateLastFullBlockReceivedIndex() { blkMissingColl[i] = false for _, cg := range blocks[i].Payload.Guarantees { j := i - suite.collections.On("LightByID", cg.CollectionID).Return( + s.collections.On("LightByID", cg.CollectionID).Return( func(cID flow.Identifier) *flow.LightCollection { return collMap[cID] }, func(cID flow.Identifier) error { if blkMissingColl[j] { - return storerr.ErrNotFound + return storage.ErrNotFound } return nil }) } } - var lastFullBlockHeight uint64 - var rtnErr error - suite.blocks.On("GetLastFullBlockHeight").Return( - func() uint64 { - return lastFullBlockHeight - }, - func() error { - return rtnErr - }) - - // consider the last test block as the head - suite.finalizedBlock = finalizedBlk.Header - - suite.Run("full block height index is created and advanced if not present", func() { - // simulate the absence of the full block height index - lastFullBlockHeight = 0 - rtnErr = storerr.ErrNotFound - suite.proto.params.On("Root").Return(rootBlk.Header, nil) - suite.blocks.On("UpdateLastFullBlockHeight", finalizedHeight).Return(nil).Once() - - suite.eng.updateLastFullBlockReceivedIndex() - - suite.blocks.AssertExpectations(suite.T()) - }) - - suite.Run("full block height index is advanced if newer full blocks are discovered", func() { - rtnErr = nil - block := blocks[1] - lastFullBlockHeight = block.Header.Height - suite.blocks.On("UpdateLastFullBlockHeight", finalizedHeight).Return(nil).Once() - - suite.eng.updateLastFullBlockReceivedIndex() - - suite.blocks.AssertExpectations(suite.T()) - }) - - suite.Run("full block height index is not advanced beyond finalized blocks", func() { - rtnErr = nil - lastFullBlockHeight = finalizedHeight - - suite.eng.updateLastFullBlockReceivedIndex() - suite.blocks.AssertExpectations(suite.T()) // not new call to UpdateLastFullBlockHeight should be made - }) + rootBlk := blocks[0] - suite.Run("missing collections are requested when count exceeds defaultMissingCollsForBlkThreshold", func() { - // root block is the last complete block - rtnErr = nil - lastFullBlockHeight = rootBlkHeight + // root block is the last complete block + err := s.lastFullBlockHeight.Set(rootBlk.Height) + s.Require().NoError(err) + s.Run("missing collections are requested when count exceeds defaultMissingCollsForBlockThreshold", func() { // lower the block threshold to request missing collections - defaultMissingCollsForBlkThreshold = 2 + defaultMissingCollsForBlockThreshold = 2 // mark all blocks beyond the root block as incomplete for i := 1; i < blkCnt; i++ { blkMissingColl[i] = true // setup receive engine expectations for _, cg := range blocks[i].Payload.Guarantees { - suite.request.On("EntityByID", cg.CollectionID, mock.Anything).Return().Once() + s.request.On("EntityByID", cg.CollectionID, mock.Anything).Return().Once() } } + // force should be called once + s.request.On("Force").Return().Once() - suite.eng.updateLastFullBlockReceivedIndex() + err := syncer.requestMissingCollections() + s.Require().NoError(err) // assert that missing collections are requested - suite.request.AssertExpectations(suite.T()) + s.request.AssertExpectations(s.T()) // last full blk index is not advanced - suite.blocks.AssertExpectations(suite.T()) // no new call to UpdateLastFullBlockHeight should be made + s.blocks.AssertExpectations(s.T()) // no new call to UpdateLastFullBlockHeight should be made }) - suite.Run("missing collections are requested when count exceeds defaultMissingCollsForAgeThreshold", func() { - // root block is the last complete block - rtnErr = nil - lastFullBlockHeight = rootBlkHeight - + s.Run("missing collections are requested when count exceeds defaultMissingCollsForAgeThreshold", func() { // lower the height threshold to request missing collections defaultMissingCollsForAgeThreshold = 1 // raise the block threshold to ensure it does not trigger missing collection request - defaultMissingCollsForBlkThreshold = blkCnt + 1 + defaultMissingCollsForBlockThreshold = blkCnt + 1 // mark all blocks beyond the root block as incomplete for i := 1; i < blkCnt; i++ { blkMissingColl[i] = true // setup receive engine expectations for _, cg := range blocks[i].Payload.Guarantees { - suite.request.On("EntityByID", cg.CollectionID, mock.Anything).Return().Once() + s.request.On("EntityByID", cg.CollectionID, mock.Anything).Return().Once() } } + // force should be called once + s.request.On("Force").Return().Once() - suite.eng.updateLastFullBlockReceivedIndex() + err := syncer.requestMissingCollections() + s.Require().NoError(err) // assert that missing collections are requested - suite.request.AssertExpectations(suite.T()) + s.request.AssertExpectations(s.T()) // last full blk index is not advanced - suite.blocks.AssertExpectations(suite.T()) // not new call to UpdateLastFullBlockHeight should be made + s.blocks.AssertExpectations(s.T()) // not new call to UpdateLastFullBlockHeight should be made }) - suite.Run("missing collections are not requested if defaultMissingCollsForBlkThreshold not reached", func() { - // root block is the last complete block - rtnErr = nil - lastFullBlockHeight = rootBlkHeight - + s.Run("missing collections are not requested if defaultMissingCollsForBlockThreshold not reached", func() { // raise the thresholds to avoid requesting missing collections defaultMissingCollsForAgeThreshold = 3 - defaultMissingCollsForBlkThreshold = 3 + defaultMissingCollsForBlockThreshold = 3 // mark all blocks beyond the root block as incomplete for i := 1; i < blkCnt; i++ { blkMissingColl[i] = true } - suite.eng.updateLastFullBlockReceivedIndex() + err := syncer.requestMissingCollections() + s.Require().NoError(err) // assert that missing collections are not requested even though there are collections missing - suite.request.AssertExpectations(suite.T()) + s.request.AssertExpectations(s.T()) // last full blk index is not advanced - suite.blocks.AssertExpectations(suite.T()) // not new call to UpdateLastFullBlockHeight should be made + s.blocks.AssertExpectations(s.T()) // not new call to UpdateLastFullBlockHeight should be made + }) + + // create new block + height := blocks[blkCnt-1].Height + 1 + finalizedBlk := unittest.BlockFixture( + unittest.Block.WithHeight(height), + ) + s.blockMap[height] = finalizedBlk + + finalizedHeight = finalizedBlk.Height + s.finalizedBlock = finalizedBlk.ToHeader() + + blockBeforeFinalized := blocks[blkCnt-1] + + s.Run("full block height index is advanced if newer full blocks are discovered", func() { + // set lastFullBlockHeight to block + err = s.lastFullBlockHeight.Set(blockBeforeFinalized.Height) + s.Require().NoError(err) + + err = syncer.updateLastFullBlockHeight() + s.Require().NoError(err) + s.Require().Equal(finalizedHeight, s.lastFullBlockHeight.Value()) + s.Require().NoError(err) + + s.blocks.AssertExpectations(s.T()) + }) + + s.Run("full block height index is not advanced beyond finalized blocks", func() { + err = syncer.updateLastFullBlockHeight() + s.Require().NoError(err) + + s.Require().Equal(finalizedHeight, s.lastFullBlockHeight.Value()) + s.blocks.AssertExpectations(s.T()) }) } -func (suite *Suite) TestComponentShutdown() { +func (s *Suite) TestComponentShutdown() { + irrecoverableCtx := irrecoverable.NewMockSignalerContext(s.T(), s.ctx) + eng, _ := s.initEngineAndSyncer(irrecoverableCtx) + // start then shut down the engine - unittest.AssertClosesBefore(suite.T(), suite.eng.Ready(), 10*time.Millisecond) - suite.cancel() - unittest.AssertClosesBefore(suite.T(), suite.eng.Done(), 10*time.Millisecond) + unittest.AssertClosesBefore(s.T(), eng.Ready(), 10*time.Millisecond) + s.cancel() + unittest.AssertClosesBefore(s.T(), eng.Done(), 10*time.Millisecond) - err := suite.eng.ProcessLocal(&flow.ExecutionReceipt{}) - suite.Assert().ErrorIs(err, component.ErrComponentShutdown) + err := eng.Process(channels.ReceiveReceipts, unittest.IdentifierFixture(), new(flow.ExecutionReceipt)) + s.Assert().ErrorIs(err, component.ErrComponentShutdown) } diff --git a/engine/access/ingestion/tx_error_messages/mock/requester.go b/engine/access/ingestion/tx_error_messages/mock/requester.go new file mode 100644 index 00000000000..a486cbf9b3e --- /dev/null +++ b/engine/access/ingestion/tx_error_messages/mock/requester.go @@ -0,0 +1,59 @@ +// Code generated by mockery. DO NOT EDIT. + +package mock + +import ( + context "context" + + flow "github.com/onflow/flow-go/model/flow" + mock "github.com/stretchr/testify/mock" +) + +// Requester is an autogenerated mock type for the Requester type +type Requester struct { + mock.Mock +} + +// Request provides a mock function with given fields: ctx +func (_m *Requester) Request(ctx context.Context) ([]flow.TransactionResultErrorMessage, error) { + ret := _m.Called(ctx) + + if len(ret) == 0 { + panic("no return value specified for Request") + } + + var r0 []flow.TransactionResultErrorMessage + var r1 error + if rf, ok := ret.Get(0).(func(context.Context) ([]flow.TransactionResultErrorMessage, error)); ok { + return rf(ctx) + } + if rf, ok := ret.Get(0).(func(context.Context) []flow.TransactionResultErrorMessage); ok { + r0 = rf(ctx) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]flow.TransactionResultErrorMessage) + } + } + + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(ctx) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// NewRequester creates a new instance of Requester. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewRequester(t interface { + mock.TestingT + Cleanup(func()) +}) *Requester { + mock := &Requester{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/engine/access/ingestion/tx_error_messages/requester.go b/engine/access/ingestion/tx_error_messages/requester.go new file mode 100644 index 00000000000..a759cc9a6cb --- /dev/null +++ b/engine/access/ingestion/tx_error_messages/requester.go @@ -0,0 +1,184 @@ +package tx_error_messages + +import ( + "context" + "errors" + "fmt" + "time" + + execproto "github.com/onflow/flow/protobuf/go/flow/execution" + "github.com/rs/zerolog" + "github.com/sethvargo/go-retry" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + + "github.com/onflow/flow-go/engine/access/rpc/backend/transactions/error_messages" + "github.com/onflow/flow-go/engine/common/rpc/convert" + "github.com/onflow/flow-go/utils/logging" + + "github.com/onflow/flow-go/engine/common/rpc" + "github.com/onflow/flow-go/model/flow" +) + +// Requester defines the interface for requesting transaction result error messages. +type Requester interface { + // Request fetches transaction results error messages. + // Expected errors: + // - context.Canceled: if the provided context was canceled before completion + // + // No other errors are expected during normal operations + Request(ctx context.Context) ([]flow.TransactionResultErrorMessage, error) +} + +// RequesterConfig contains the retry settings for the tx error messages fetch. +type RequesterConfig struct { + // the initial delay used in the exponential backoff for failed tx error messages download + // retries. + RetryDelay time.Duration + // the max delay used in the exponential backoff for failed tx error messages download. + MaxRetryDelay time.Duration +} + +var _ Requester = (*RequesterImpl)(nil) + +type RequesterImpl struct { + logger zerolog.Logger + config *RequesterConfig + txErrorMessageProvider error_messages.Provider + execNodeIdentitiesProvider *rpc.ExecutionNodeIdentitiesProvider + executionResult *flow.ExecutionResult +} + +func NewRequester( + logger zerolog.Logger, + config *RequesterConfig, + txErrorMessageProvider error_messages.Provider, + execNodeIdentitiesProvider *rpc.ExecutionNodeIdentitiesProvider, + executionResult *flow.ExecutionResult, +) *RequesterImpl { + return &RequesterImpl{ + logger: logger, + config: config, + txErrorMessageProvider: txErrorMessageProvider, + execNodeIdentitiesProvider: execNodeIdentitiesProvider, + executionResult: executionResult, + } +} + +// Request fetches transaction error messages for the specific +// execution result this requester was configured with. +// +// Expected errors expected during normal operations: +// - context.DeadlineExceeded - if context timeouts +// - context.Canceled - if context was canceled +func (r *RequesterImpl) Request(ctx context.Context) ([]flow.TransactionResultErrorMessage, error) { + backoff := retry.NewExponential(r.config.RetryDelay) + backoff = retry.WithCappedDuration(r.config.MaxRetryDelay, backoff) + backoff = retry.WithJitterPercent(15, backoff) + + blockID := r.executionResult.BlockID + resultID := r.executionResult.ID() + + var errorMessages []flow.TransactionResultErrorMessage + + attempt := 0 + err := retry.Do(ctx, backoff, func(context.Context) error { + if attempt > 0 { + r.logger.Debug(). + Str("block_id", blockID.String()). + Str("result_id", resultID.String()). + Uint64("attempt", uint64(attempt)). + Msgf("retrying download") + } + attempt++ + + var err error + errorMessages, err = r.request(ctx, blockID, resultID) + if err == nil { + return nil + } + + // retry if there are no acceptable ENs to download messages from at this point + if errors.Is(err, rpc.ErrNoENsFoundForExecutionResult) { + return retry.RetryableError(err) + } + + // retry any grpc error except context canceled and deadline exceeded + if status, ok := status.FromError(err); ok { + if status.Code() == codes.DeadlineExceeded || status.Code() == codes.Canceled { + return errors.Join(err, ctx.Err()) + } + + return retry.RetryableError(err) + } + + return err + }) + + if err != nil { + return nil, err + } + return errorMessages, nil +} + +// request retrieves transaction error messages for a given block and result ID +// by querying the appropriate execution nodes. It returns a slice of error +// messages or an error if the retrieval fails. +// +// Expected errors during normal operations: +// 1. rpc.ErrNoENsFoundForExecutionResult - if no execution nodes were found that produced +// the provided execution result and matched the operators criteria +// 2. status.Error - GRPC call failed, some of possible codes are: +// - codes.NotFound - request cannot be served by EN because of absence of data. +// - codes.Unavailable - remote node is not unavailable. +// - codes.Canceled - if ctx is canceled during request +func (r *RequesterImpl) request( + ctx context.Context, + blockID flow.Identifier, + resultID flow.Identifier, +) ([]flow.TransactionResultErrorMessage, error) { + execNodes, err := r.execNodeIdentitiesProvider.ExecutionNodesForResultID(blockID, resultID) + if err != nil { + r.logger.Error().Err(err). + Str("block_id", blockID.String()). + Str("result_id", resultID.String()). + Msg("failed to find execution nodes for specific result ID") + return nil, fmt.Errorf("could not find execution nodes for result %v in block %v: %w", resultID, blockID, err) + } + + r.logger.Debug(). + Hex("block_id", logging.ID(blockID)). + Msg("started downloading transaction error messages for block") + + req := &execproto.GetTransactionErrorMessagesByBlockIDRequest{ + BlockId: convert.IdentifierToMessage(blockID), + } + + resp, execNode, err := r.txErrorMessageProvider.ErrorMessageByBlockIDFromAnyEN(ctx, execNodes, req) + if err != nil { + r.logger.Error().Err(err). + Msgf("failed to get transaction error messages from execution nodes for blockID: %s", blockID.String()) + return nil, err + } + + errorMessages := r.convertResponse(resp, execNode) + return errorMessages, nil +} + +func (r *RequesterImpl) convertResponse( + responseMessages []*execproto.GetTransactionErrorMessagesResponse_Result, + execNode *flow.IdentitySkeleton, +) []flow.TransactionResultErrorMessage { + errorMessages := make([]flow.TransactionResultErrorMessage, 0, len(responseMessages)) + for _, value := range responseMessages { + errorMessage := flow.TransactionResultErrorMessage{ + ErrorMessage: value.ErrorMessage, + TransactionID: convert.MessageToIdentifier(value.TransactionId), + Index: value.Index, + ExecutorID: execNode.NodeID, + } + errorMessages = append(errorMessages, errorMessage) + } + + return errorMessages +} diff --git a/engine/access/ingestion/tx_error_messages/requester_test.go b/engine/access/ingestion/tx_error_messages/requester_test.go new file mode 100644 index 00000000000..45c6ed40efe --- /dev/null +++ b/engine/access/ingestion/tx_error_messages/requester_test.go @@ -0,0 +1,254 @@ +package tx_error_messages + +import ( + "context" + "errors" + "testing" + "time" + + execproto "github.com/onflow/flow/protobuf/go/flow/execution" + "github.com/rs/zerolog" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + + "github.com/onflow/flow-go/engine/access/index" + accessmock "github.com/onflow/flow-go/engine/access/mock" + "github.com/onflow/flow-go/engine/access/rpc/backend/node_communicator" + "github.com/onflow/flow-go/engine/access/rpc/backend/transactions/error_messages" + connectionmock "github.com/onflow/flow-go/engine/access/rpc/connection/mock" + commonrpc "github.com/onflow/flow-go/engine/common/rpc" + "github.com/onflow/flow-go/model/flow" + syncmock "github.com/onflow/flow-go/module/state_synchronization/mock" + protocol "github.com/onflow/flow-go/state/protocol/mock" + storage "github.com/onflow/flow-go/storage/mock" + "github.com/onflow/flow-go/utils/unittest" +) + +type RequesterSuite struct { + suite.Suite + + log zerolog.Logger + proto struct { + state *protocol.FollowerState + snapshot *protocol.Snapshot + params *protocol.Params + } + + receipts *storage.ExecutionReceipts + enNodeIDs flow.IdentityList + execClient *accessmock.ExecutionAPIClient + connFactory *connectionmock.ConnectionFactory + + rootBlock *flow.Block + finalizedBlock *flow.Header + + txErrorMessages *storage.TransactionResultErrorMessages + lightTxResults *storage.LightTransactionResults + reporter *syncmock.IndexReporter + indexReporter *index.Reporter + txResultsIndex *index.TransactionResultsIndex +} + +func TestRequester(t *testing.T) { + suite.Run(t, new(RequesterSuite)) +} + +func (s *RequesterSuite) SetupTest() { + s.log = unittest.Logger() + s.proto.state = protocol.NewFollowerState(s.T()) + s.proto.snapshot = protocol.NewSnapshot(s.T()) + s.proto.params = protocol.NewParams(s.T()) + s.execClient = accessmock.NewExecutionAPIClient(s.T()) + s.connFactory = connectionmock.NewConnectionFactory(s.T()) + s.receipts = storage.NewExecutionReceipts(s.T()) + s.rootBlock = unittest.Block.Genesis(flow.Emulator) + s.finalizedBlock = unittest.BlockWithParentFixture(s.rootBlock.ToHeader()).ToHeader() + + s.txErrorMessages = storage.NewTransactionResultErrorMessages(s.T()) + s.lightTxResults = storage.NewLightTransactionResults(s.T()) + s.reporter = syncmock.NewIndexReporter(s.T()) + s.indexReporter = index.NewReporter() + err := s.indexReporter.Initialize(s.reporter) + s.Require().NoError(err) + s.txResultsIndex = index.NewTransactionResultsIndex(s.indexReporter, s.lightTxResults) + + s.proto.params.On("FinalizedRoot").Return(s.rootBlock.ToHeader(), nil) + s.proto.state.On("Params").Return(s.proto.params) + + s.proto.snapshot.On("Head").Return( + func() *flow.Header { + return s.finalizedBlock + }, + nil, + ).Maybe() + s.proto.state.On("Final").Return(s.proto.snapshot, nil) + + s.enNodeIDs = unittest.IdentityListFixture(1, unittest.WithRole(flow.RoleExecution)) +} + +func (s *RequesterSuite) TestRequest_HappyPath() { + execNodeIdentitiesProvider := commonrpc.NewExecutionNodeIdentitiesProvider( + s.log, + s.proto.state, + s.receipts, + flow.IdentifierList{}, + s.enNodeIDs.NodeIDs(), + ) + + errorMessageProvider := error_messages.NewTxErrorMessageProvider( + s.log, + s.txErrorMessages, + s.txResultsIndex, + s.connFactory, + node_communicator.NewNodeCommunicator(false), + execNodeIdentitiesProvider, + ) + + block := unittest.BlockWithParentFixture(s.finalizedBlock) + blockId := block.ID() + executionResult := &flow.ExecutionResult{ + BlockID: blockId, + Chunks: unittest.ChunkListFixture(1, blockId, unittest.StateCommitmentFixture()), + } + s.connFactory.On("GetExecutionAPIClient", mock.Anything).Return(s.execClient, &mockCloser{}, nil) + + // Mock the protocol snapshot to return fixed execution node IDs. + setupReceiptsForBlockWithResult(s.receipts, executionResult, s.enNodeIDs.NodeIDs()...) + s.proto.snapshot.On("Identities", mock.Anything).Return(s.enNodeIDs, nil) + + // Create mock transaction results with a mix of failed and non-failed transactions. + resultsByBlockID := mockTransactionResultsByBlock(5) + exeEventReq := &execproto.GetTransactionErrorMessagesByBlockIDRequest{ + BlockId: blockId[:], + } + s.execClient.On("GetTransactionErrorMessagesByBlockID", mock.Anything, exeEventReq). + Return(createTransactionErrorMessagesResponse(resultsByBlockID), nil). + Once() + + expectedErrorMessages := createExpectedTxErrorMessages(resultsByBlockID, s.enNodeIDs.NodeIDs()[0]) + config := &RequesterConfig{ + RetryDelay: 1 * time.Second, + MaxRetryDelay: 5 * time.Second, + } + requester := NewRequester(s.log, config, errorMessageProvider, execNodeIdentitiesProvider, executionResult) + actualErrorMessages, err := requester.Request(context.Background()) + require.NoError(s.T(), err) + require.ElementsMatch(s.T(), expectedErrorMessages, actualErrorMessages) +} + +func (s *RequesterSuite) TestRequest_ErrorCases() { + execNodeIdentitiesProvider := commonrpc.NewExecutionNodeIdentitiesProvider( + s.log, + s.proto.state, + s.receipts, + flow.IdentifierList{}, + s.enNodeIDs.NodeIDs(), + ) + + errorMessageProvider := error_messages.NewTxErrorMessageProvider( + s.log, + s.txErrorMessages, + s.txResultsIndex, + s.connFactory, + node_communicator.NewNodeCommunicator(false), + execNodeIdentitiesProvider, + ) + + block := unittest.BlockWithParentFixture(s.finalizedBlock) + blockId := block.ID() + executionResult := &flow.ExecutionResult{ + BlockID: blockId, + Chunks: unittest.ChunkListFixture(1, blockId, unittest.StateCommitmentFixture()), + } + config := &RequesterConfig{ + RetryDelay: 1 * time.Second, + MaxRetryDelay: 5 * time.Second, + } + + s.connFactory.On("GetExecutionAPIClient", mock.Anything).Return(s.execClient, &mockCloser{}, nil) + + // Mock the protocol snapshot to return fixed execution node IDs. + setupReceiptsForBlockWithResult(s.receipts, executionResult, s.enNodeIDs.NodeIDs()...) + s.proto.snapshot.On("Identities", mock.Anything).Return(s.enNodeIDs, nil) + + exeEventReq := &execproto.GetTransactionErrorMessagesByBlockIDRequest{ + BlockId: blockId[:], + } + + s.T().Run("Non-retryable error", func(t *testing.T) { + expectedError := errors.New("non-retryable error") + s.execClient.On("GetTransactionErrorMessagesByBlockID", mock.Anything, exeEventReq). + Return(nil, expectedError). + Once() + + requester := NewRequester(s.log, config, errorMessageProvider, execNodeIdentitiesProvider, executionResult) + actualErrorMessages, err := requester.Request(context.Background()) + require.ErrorIs(s.T(), err, expectedError) + require.Nil(s.T(), actualErrorMessages) + }) + + s.T().Run("Non-retryable grpc DeadlineExceeded error", func(t *testing.T) { + expectedError := status.Error(codes.DeadlineExceeded, "deadline exceeded") + s.execClient.On("GetTransactionErrorMessagesByBlockID", mock.Anything, exeEventReq). + Return(nil, expectedError). + Once() + + requester := NewRequester(s.log, config, errorMessageProvider, execNodeIdentitiesProvider, executionResult) + actualErrorMessages, err := requester.Request(context.Background()) + require.ErrorIs(s.T(), err, expectedError) + require.Nil(s.T(), actualErrorMessages) + }) + + s.T().Run("Non-retryable grpc Canceled error", func(t *testing.T) { + expectedError := status.Error(codes.Canceled, "context canceled") + s.execClient.On("GetTransactionErrorMessagesByBlockID", mock.Anything, exeEventReq). + Return(nil, expectedError). + Once() + + requester := NewRequester(s.log, config, errorMessageProvider, execNodeIdentitiesProvider, executionResult) + actualErrorMessages, err := requester.Request(context.Background()) + require.ErrorIs(s.T(), err, expectedError) + require.Nil(s.T(), actualErrorMessages) + }) + + s.T().Run("Retryable ErrNoENsFoundForExecutionResult error", func(t *testing.T) { + // first time return retryable error + s.execClient.On("GetTransactionErrorMessagesByBlockID", mock.Anything, exeEventReq). + Return(nil, commonrpc.ErrNoENsFoundForExecutionResult). + Once() + + // second time return error messages + resultsByBlockID := mockTransactionResultsByBlock(5) + s.execClient.On("GetTransactionErrorMessagesByBlockID", mock.Anything, exeEventReq). + Return(createTransactionErrorMessagesResponse(resultsByBlockID), nil). + Once() + + expectedErrorMessages := createExpectedTxErrorMessages(resultsByBlockID, s.enNodeIDs.NodeIDs()[0]) + requester := NewRequester(s.log, config, errorMessageProvider, execNodeIdentitiesProvider, executionResult) + actualErrorMessages, err := requester.Request(context.Background()) + require.NoError(s.T(), err) + require.ElementsMatch(s.T(), expectedErrorMessages, actualErrorMessages) + }) + + s.T().Run("Retryable valid grpc error", func(t *testing.T) { + // first time return retryable error + s.execClient.On("GetTransactionErrorMessagesByBlockID", mock.Anything, exeEventReq). + Return(nil, status.Error(codes.NotFound, "not found")). + Once() + + // second time return error messages + resultsByBlockID := mockTransactionResultsByBlock(5) + s.execClient.On("GetTransactionErrorMessagesByBlockID", mock.Anything, exeEventReq). + Return(createTransactionErrorMessagesResponse(resultsByBlockID), nil). + Once() + + expectedErrorMessages := createExpectedTxErrorMessages(resultsByBlockID, s.enNodeIDs.NodeIDs()[0]) + requester := NewRequester(s.log, config, errorMessageProvider, execNodeIdentitiesProvider, executionResult) + actualErrorMessages, err := requester.Request(context.Background()) + require.NoError(s.T(), err) + require.ElementsMatch(s.T(), expectedErrorMessages, actualErrorMessages) + }) +} diff --git a/engine/access/ingestion/tx_error_messages/tx_error_messages_core.go b/engine/access/ingestion/tx_error_messages/tx_error_messages_core.go new file mode 100644 index 00000000000..1ce681e051c --- /dev/null +++ b/engine/access/ingestion/tx_error_messages/tx_error_messages_core.go @@ -0,0 +1,133 @@ +package tx_error_messages + +import ( + "context" + "fmt" + + "github.com/rs/zerolog" + + "github.com/onflow/flow-go/engine/access/rpc/backend/transactions/error_messages" + commonrpc "github.com/onflow/flow-go/engine/common/rpc" + "github.com/onflow/flow-go/engine/common/rpc/convert" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/storage" + + execproto "github.com/onflow/flow/protobuf/go/flow/execution" +) + +// TxErrorMessagesCore is responsible for managing transaction result error messages +// It handles both storage and retrieval of error messages +// from execution nodes. +type TxErrorMessagesCore struct { + log zerolog.Logger // used to log relevant actions with context + + txErrorMessageProvider error_messages.Provider + transactionResultErrorMessages storage.TransactionResultErrorMessages + execNodeIdentitiesProvider *commonrpc.ExecutionNodeIdentitiesProvider +} + +// NewTxErrorMessagesCore creates a new instance of TxErrorMessagesCore. +func NewTxErrorMessagesCore( + log zerolog.Logger, + txErrorMessageProvider error_messages.Provider, + transactionResultErrorMessages storage.TransactionResultErrorMessages, + execNodeIdentitiesProvider *commonrpc.ExecutionNodeIdentitiesProvider, +) *TxErrorMessagesCore { + return &TxErrorMessagesCore{ + log: log.With().Str("module", "tx_error_messages_core").Logger(), + txErrorMessageProvider: txErrorMessageProvider, + transactionResultErrorMessages: transactionResultErrorMessages, + execNodeIdentitiesProvider: execNodeIdentitiesProvider, + } +} + +// FetchErrorMessages processes transaction result error messages for a given block ID. +// It retrieves error messages from the txErrorMessageProvider if they do not already exist in storage. +// +// The function first checks if error messages for the given block ID are already present in storage. +// If they are not, it fetches the messages from execution nodes and stores them. +// +// Parameters: +// - ctx: The context for managing cancellation and deadlines during the operation. +// - blockID: The identifier of the block for which transaction result error messages need to be processed. +// +// No errors are expected during normal operation. +func (c *TxErrorMessagesCore) FetchErrorMessages(ctx context.Context, blockID flow.Identifier) error { + execNodes, err := c.execNodeIdentitiesProvider.ExecutionNodesForBlockID(ctx, blockID) + if err != nil { + c.log.Error().Err(err).Msg(fmt.Sprintf("failed to find execution nodes for block id: %s", blockID)) + return fmt.Errorf("could not find execution nodes for block: %w", err) + } + + return c.FetchErrorMessagesByENs(ctx, blockID, execNodes) +} + +func (c *TxErrorMessagesCore) FetchErrorMessagesByENs( + ctx context.Context, + blockID flow.Identifier, + execNodes flow.IdentitySkeletonList, +) error { + exists, err := c.transactionResultErrorMessages.Exists(blockID) + if err != nil { + return fmt.Errorf("could not check existance of transaction result error messages: %w", err) + } + + if exists { + return nil + } + + // retrieves error messages from the txErrorMessageProvider if they do not already exist in storage + req := &execproto.GetTransactionErrorMessagesByBlockIDRequest{ + BlockId: convert.IdentifierToMessage(blockID), + } + + c.log.Debug(). + Msgf("transaction error messages for block %s are being downloaded", blockID) + + resp, execNode, err := c.txErrorMessageProvider.ErrorMessageByBlockIDFromAnyEN(ctx, execNodes, req) + if err != nil { + c.log.Error().Err(err).Msg("failed to get transaction error messages from execution nodes") + return err + } + + if len(resp) > 0 { + err = c.storeTransactionResultErrorMessages(blockID, resp, execNode) + if err != nil { + return fmt.Errorf("could not store error messages (block: %s): %w", blockID, err) + } + } + + return nil +} + +// storeTransactionResultErrorMessages stores the transaction result error messages for a given block ID. +// +// Parameters: +// - blockID: The identifier of the block for which the error messages are to be stored. +// - errorMessagesResponses: A slice of responses containing the error messages to be stored. +// - execNode: The execution node associated with the error messages. +// +// No errors are expected during normal operation. +func (c *TxErrorMessagesCore) storeTransactionResultErrorMessages( + blockID flow.Identifier, + errorMessagesResponses []*execproto.GetTransactionErrorMessagesResponse_Result, + execNode *flow.IdentitySkeleton, +) error { + errorMessages := make([]flow.TransactionResultErrorMessage, 0, len(errorMessagesResponses)) + for _, value := range errorMessagesResponses { + errorMessage := flow.TransactionResultErrorMessage{ + ErrorMessage: value.ErrorMessage, + TransactionID: convert.MessageToIdentifier(value.TransactionId), + Index: value.Index, + ExecutorID: execNode.NodeID, + } + errorMessages = append(errorMessages, errorMessage) + } + + err := c.transactionResultErrorMessages.Store(blockID, errorMessages) + if err != nil { + return fmt.Errorf("failed to store transaction error messages: %w", err) + } + + return nil +} diff --git a/engine/access/ingestion/tx_error_messages/tx_error_messages_core_test.go b/engine/access/ingestion/tx_error_messages/tx_error_messages_core_test.go new file mode 100644 index 00000000000..04e0e8ac426 --- /dev/null +++ b/engine/access/ingestion/tx_error_messages/tx_error_messages_core_test.go @@ -0,0 +1,363 @@ +package tx_error_messages + +import ( + "context" + "fmt" + "testing" + + execproto "github.com/onflow/flow/protobuf/go/flow/execution" + "github.com/rs/zerolog" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" + + "github.com/onflow/flow-go/engine/access/index" + accessmock "github.com/onflow/flow-go/engine/access/mock" + "github.com/onflow/flow-go/engine/access/rpc/backend/node_communicator" + "github.com/onflow/flow-go/engine/access/rpc/backend/transactions/error_messages" + connectionmock "github.com/onflow/flow-go/engine/access/rpc/connection/mock" + commonrpc "github.com/onflow/flow-go/engine/common/rpc" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module/irrecoverable" + syncmock "github.com/onflow/flow-go/module/state_synchronization/mock" + protocol "github.com/onflow/flow-go/state/protocol/mock" + storage "github.com/onflow/flow-go/storage/mock" + "github.com/onflow/flow-go/utils/unittest" +) + +const expectedErrorMsg = "expected test error" + +type TxErrorMessagesCoreSuite struct { + suite.Suite + + log zerolog.Logger + proto struct { + state *protocol.FollowerState + snapshot *protocol.Snapshot + params *protocol.Params + } + + receipts *storage.ExecutionReceipts + txErrorMessages *storage.TransactionResultErrorMessages + lightTxResults *storage.LightTransactionResults + + reporter *syncmock.IndexReporter + indexReporter *index.Reporter + txResultsIndex *index.TransactionResultsIndex + + enNodeIDs flow.IdentityList + execClient *accessmock.ExecutionAPIClient + connFactory *connectionmock.ConnectionFactory + + blockMap map[uint64]*flow.Block + rootBlock *flow.Block + finalizedBlock *flow.Header + + ctx context.Context + cancel context.CancelFunc +} + +func TestTxErrorMessagesCore(t *testing.T) { + suite.Run(t, new(TxErrorMessagesCoreSuite)) +} + +// TearDownTest stops the engine and cleans up the db +func (s *TxErrorMessagesCoreSuite) TearDownTest() { + s.cancel() +} + +type mockCloser struct{} + +func (mc *mockCloser) Close() error { return nil } + +func (s *TxErrorMessagesCoreSuite) SetupTest() { + s.log = unittest.Logger() + s.ctx, s.cancel = context.WithCancel(context.Background()) + // mock out protocol state + s.proto.state = protocol.NewFollowerState(s.T()) + s.proto.snapshot = protocol.NewSnapshot(s.T()) + s.proto.params = protocol.NewParams(s.T()) + s.execClient = accessmock.NewExecutionAPIClient(s.T()) + s.connFactory = connectionmock.NewConnectionFactory(s.T()) + s.receipts = storage.NewExecutionReceipts(s.T()) + s.txErrorMessages = storage.NewTransactionResultErrorMessages(s.T()) + s.rootBlock = unittest.Block.Genesis(flow.Emulator) + s.finalizedBlock = unittest.BlockWithParentFixture(s.rootBlock.ToHeader()).ToHeader() + + s.lightTxResults = storage.NewLightTransactionResults(s.T()) + s.reporter = syncmock.NewIndexReporter(s.T()) + s.indexReporter = index.NewReporter() + err := s.indexReporter.Initialize(s.reporter) + s.Require().NoError(err) + s.txResultsIndex = index.NewTransactionResultsIndex(s.indexReporter, s.lightTxResults) + + s.proto.state.On("Params").Return(s.proto.params) + + // Mock the finalized root block header with height 0. + s.proto.params.On("FinalizedRoot").Return(s.rootBlock.ToHeader(), nil) + + s.proto.snapshot.On("Head").Return( + func() *flow.Header { + return s.finalizedBlock + }, + nil, + ).Maybe() + s.proto.state.On("Final").Return(s.proto.snapshot, nil) + + // Create identities for 1 execution nodes. + s.enNodeIDs = unittest.IdentityListFixture(1, unittest.WithRole(flow.RoleExecution)) +} + +// TestHandleTransactionResultErrorMessages checks that transaction result error messages +// are properly fetched from the execution nodes, processed, and stored in the protocol database. +func (s *TxErrorMessagesCoreSuite) TestHandleTransactionResultErrorMessages() { + irrecoverableCtx := irrecoverable.NewMockSignalerContext(s.T(), s.ctx) + + block := unittest.BlockWithParentFixture(s.finalizedBlock) + blockId := block.ID() + + s.connFactory.On("GetExecutionAPIClient", mock.Anything).Return(s.execClient, &mockCloser{}, nil) + + // Mock the protocol snapshot to return fixed execution node IDs. + setupReceiptsForBlock(s.receipts, block, s.enNodeIDs.NodeIDs()[0]) + s.proto.snapshot.On("Identities", mock.Anything).Return(s.enNodeIDs, nil) + s.proto.state.On("AtBlockID", blockId).Return(s.proto.snapshot).Once() + + // Create mock transaction results with a mix of failed and non-failed transactions. + resultsByBlockID := mockTransactionResultsByBlock(5) + + // Prepare a request to fetch transaction error messages by block ID from execution nodes. + exeEventReq := &execproto.GetTransactionErrorMessagesByBlockIDRequest{ + BlockId: blockId[:], + } + + s.execClient.On("GetTransactionErrorMessagesByBlockID", mock.Anything, exeEventReq). + Return(createTransactionErrorMessagesResponse(resultsByBlockID), nil). + Once() + + // 1. Mock the txErrorMessages storage to confirm that error messages do not exist yet. + s.txErrorMessages.On("Exists", blockId). + Return(false, nil).Once() + + // Prepare the expected transaction error messages that should be stored. + expectedStoreTxErrorMessages := createExpectedTxErrorMessages(resultsByBlockID, s.enNodeIDs.NodeIDs()[0]) + + // Mock the storage of the fetched error messages into the protocol database. + s.txErrorMessages.On("Store", blockId, expectedStoreTxErrorMessages). + Return(nil).Once() + + core := s.initCore() + err := core.FetchErrorMessages(irrecoverableCtx, blockId) + require.NoError(s.T(), err) + + // Verify that the mock expectations for storing the error messages were met. + s.txErrorMessages.AssertExpectations(s.T()) + s.proto.state.AssertExpectations(s.T()) + + // 2. Now simulate the second try when the error messages already exist in storage. + // Mock the txErrorMessages storage to confirm that error messages exist. + s.txErrorMessages.On("Exists", blockId). + Return(true, nil).Once() + s.proto.state.On("AtBlockID", blockId).Return(s.proto.snapshot).Once() + err = core.FetchErrorMessages(irrecoverableCtx, blockId) + require.NoError(s.T(), err) + + // Verify that the mock expectations for storing the error messages were not met. + s.txErrorMessages.AssertExpectations(s.T()) + s.execClient.AssertExpectations(s.T()) + s.proto.state.AssertExpectations(s.T()) +} + +// TestHandleTransactionResultErrorMessages_ErrorCases tests the error handling of +// the FetchErrorMessages function in the following cases: +// +// 1. Execution node fetch error: When fetching transaction error messages from the execution node fails, +// the function should return an appropriate error and no further actions should be taken. +// 2. Storage store error after fetching results: When fetching transaction error messages succeeds, +// but storing them in the storage fails, the function should return an error and no further actions should be taken. +func (s *TxErrorMessagesCoreSuite) TestHandleTransactionResultErrorMessages_ErrorCases() { + irrecoverableCtx := irrecoverable.NewMockSignalerContext(s.T(), s.ctx) + + block := unittest.BlockWithParentFixture(s.finalizedBlock) + blockId := block.ID() + + s.connFactory.On("GetExecutionAPIClient", mock.Anything).Return(s.execClient, &mockCloser{}, nil) + + // Mock the protocol snapshot to return fixed execution node IDs. + setupReceiptsForBlock(s.receipts, block, s.enNodeIDs.NodeIDs()[0]) + s.proto.snapshot.On("Identities", mock.Anything).Return(s.enNodeIDs, nil) + s.proto.state.On("AtBlockID", blockId).Return(s.proto.snapshot) + + s.Run("Execution node fetch error", func() { + // Mock the txErrorMessages storage to confirm that error messages do not exist yet. + s.txErrorMessages.On("Exists", blockId).Return(false, nil).Once() + + // Simulate an error when fetching transaction error messages from the execution node. + exeEventReq := &execproto.GetTransactionErrorMessagesByBlockIDRequest{ + BlockId: blockId[:], + } + s.execClient.On("GetTransactionErrorMessagesByBlockID", mock.Anything, exeEventReq). + Return(nil, fmt.Errorf("execution node fetch error")).Once() + + core := s.initCore() + err := core.FetchErrorMessages(irrecoverableCtx, blockId) + + // Assert that the function returns an error due to the client fetch error. + require.Error(s.T(), err) + require.Contains(s.T(), err.Error(), "execution node fetch error") + + // Ensure that no further steps are taken after the client fetch error. + s.txErrorMessages.AssertNotCalled(s.T(), "Store", mock.Anything, mock.Anything) + }) + + s.Run("Storage error after fetching results", func() { + // Simulate successful fetching of transaction error messages but error in storing them. + + // Mock the txErrorMessages storage to confirm that error messages do not exist yet. + s.txErrorMessages.On("Exists", blockId).Return(false, nil).Once() + + // Create mock transaction results with a mix of failed and non-failed transactions. + resultsByBlockID := mockTransactionResultsByBlock(5) + + // Prepare a request to fetch transaction error messages by block ID from execution nodes. + exeEventReq := &execproto.GetTransactionErrorMessagesByBlockIDRequest{ + BlockId: blockId[:], + } + s.execClient.On("GetTransactionErrorMessagesByBlockID", mock.Anything, exeEventReq). + Return(createTransactionErrorMessagesResponse(resultsByBlockID), nil).Once() + + // Simulate an error when attempting to store the fetched transaction error messages in storage. + expectedStoreTxErrorMessages := createExpectedTxErrorMessages(resultsByBlockID, s.enNodeIDs.NodeIDs()[0]) + s.txErrorMessages.On("Store", blockId, expectedStoreTxErrorMessages). + Return(fmt.Errorf("storage error")).Once() + + core := s.initCore() + err := core.FetchErrorMessages(irrecoverableCtx, blockId) + + // Assert that the function returns an error due to the store error. + require.Error(s.T(), err) + require.Contains(s.T(), err.Error(), "storage error") + + // Ensure that storage existence check and transaction fetch were called before the store error. + s.txErrorMessages.AssertCalled(s.T(), "Exists", blockId) + s.execClient.AssertCalled(s.T(), "GetTransactionErrorMessagesByBlockID", mock.Anything, exeEventReq) + }) +} + +// initCore create new instance of transaction error messages core. +func (s *TxErrorMessagesCoreSuite) initCore() *TxErrorMessagesCore { + execNodeIdentitiesProvider := commonrpc.NewExecutionNodeIdentitiesProvider( + s.log, + s.proto.state, + s.receipts, + flow.IdentifierList{}, + s.enNodeIDs.NodeIDs(), + ) + + errorMessageProvider := error_messages.NewTxErrorMessageProvider( + s.log, + s.txErrorMessages, + s.txResultsIndex, + s.connFactory, + node_communicator.NewNodeCommunicator(false), + execNodeIdentitiesProvider, + ) + + core := NewTxErrorMessagesCore( + s.log, + errorMessageProvider, + s.txErrorMessages, + execNodeIdentitiesProvider, + ) + return core +} + +// createExpectedTxErrorMessages creates a list of expected transaction error messages based on transaction results +func createExpectedTxErrorMessages(resultsByBlockID []flow.LightTransactionResult, executionNode flow.Identifier) []flow.TransactionResultErrorMessage { + // Prepare the expected transaction error messages that should be stored. + var expectedStoreTxErrorMessages []flow.TransactionResultErrorMessage + + for i, result := range resultsByBlockID { + if result.Failed { + errMsg := fmt.Sprintf("%s.%s", expectedErrorMsg, result.TransactionID) + + expectedStoreTxErrorMessages = append(expectedStoreTxErrorMessages, + flow.TransactionResultErrorMessage{ + TransactionID: result.TransactionID, + ErrorMessage: errMsg, + Index: uint32(i), + ExecutorID: executionNode, + }) + } + } + + return expectedStoreTxErrorMessages +} + +// mockTransactionResultsByBlock create mock transaction results with a mix of failed and non-failed transactions. +func mockTransactionResultsByBlock(count int) []flow.LightTransactionResult { + // Create mock transaction results with a mix of failed and non-failed transactions. + resultsByBlockID := make([]flow.LightTransactionResult, 0) + for i := 0; i < count; i++ { + resultsByBlockID = append(resultsByBlockID, flow.LightTransactionResult{ + TransactionID: unittest.IdentifierFixture(), + Failed: i%2 == 0, // create a mix of failed and non-failed transactions + ComputationUsed: 0, + }) + } + + return resultsByBlockID +} + +// setupReceiptsForBlock sets up mock execution receipts for a block and returns the receipts along +// with the identities of the execution nodes that processed them. +func setupReceiptsForBlock(receipts *storage.ExecutionReceipts, block *flow.Block, eNodeID flow.Identifier) { + receipt1 := unittest.ReceiptForBlockFixture(block) + receipt1.ExecutorID = eNodeID + receipt2 := unittest.ReceiptForBlockFixture(block) + receipt2.ExecutorID = eNodeID + receipt1.ExecutionResult = receipt2.ExecutionResult + + receiptsList := flow.ExecutionReceiptList{receipt1, receipt2} + + receipts. + On("ByBlockID", block.ID()). + Return(func(flow.Identifier) flow.ExecutionReceiptList { + return receiptsList + }, nil) +} + +// setupReceiptsForBlockWithResult sets up mock execution receipts for a block with a specific execution result +func setupReceiptsForBlockWithResult(receipts *storage.ExecutionReceipts, executionResult *flow.ExecutionResult, executorIDs ...flow.Identifier) { + receiptList := make(flow.ExecutionReceiptList, 0, len(executorIDs)) + for _, enID := range executorIDs { + receiptList = append(receiptList, unittest.ExecutionReceiptFixture( + unittest.WithResult(executionResult), + unittest.WithExecutorID(enID), + )) + } + + receipts. + On("ByBlockID", executionResult.BlockID). + Return(func(flow.Identifier) flow.ExecutionReceiptList { + return receiptList + }, nil) +} + +// createTransactionErrorMessagesResponse create TransactionErrorMessagesResponse from execution node based on results. +func createTransactionErrorMessagesResponse(resultsByBlockID []flow.LightTransactionResult) *execproto.GetTransactionErrorMessagesResponse { + exeErrMessagesResp := &execproto.GetTransactionErrorMessagesResponse{} + + for i, result := range resultsByBlockID { + if result.Failed { + errMsg := fmt.Sprintf("%s.%s", expectedErrorMsg, result.TransactionID) + exeErrMessagesResp.Results = append(exeErrMessagesResp.Results, &execproto.GetTransactionErrorMessagesResponse_Result{ + TransactionId: result.TransactionID[:], + ErrorMessage: errMsg, + Index: uint32(i), + }) + } + } + + return exeErrMessagesResp +} diff --git a/engine/access/ingestion/tx_error_messages/tx_error_messages_engine.go b/engine/access/ingestion/tx_error_messages/tx_error_messages_engine.go new file mode 100644 index 00000000000..b5629829c89 --- /dev/null +++ b/engine/access/ingestion/tx_error_messages/tx_error_messages_engine.go @@ -0,0 +1,203 @@ +package tx_error_messages + +import ( + "context" + "fmt" + "time" + + "github.com/rs/zerolog" + "github.com/sethvargo/go-retry" + + "github.com/onflow/flow-go/consensus/hotstuff/model" + "github.com/onflow/flow-go/engine" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module" + "github.com/onflow/flow-go/module/component" + "github.com/onflow/flow-go/module/irrecoverable" + "github.com/onflow/flow-go/module/jobqueue" + "github.com/onflow/flow-go/module/util" + "github.com/onflow/flow-go/state/protocol" + "github.com/onflow/flow-go/storage" +) + +const ( + // processTxErrorMessagesWorkersCount defines the number of workers that + // concurrently process transaction error messages in the job queue. + processTxErrorMessagesWorkersCount = 3 + + // defaultRetryDelay specifies the initial delay for the exponential backoff + // when the process of fetching transaction error messages fails. + // + // This delay increases with each retry attempt, up to the maximum defined by + // defaultMaxRetryDelay. + defaultRetryDelay = 1 * time.Second + + // defaultMaxRetryDelay specifies the maximum delay for the exponential backoff + // when the process of fetching transaction error messages fails. + // + // Once this delay is reached, the backoff will no longer increase with each retry. + defaultMaxRetryDelay = 5 * time.Minute +) + +// Engine represents the component responsible for managing and processing +// transaction result error messages. It retrieves, stores, +// and retries fetching of error messages from execution nodes, ensuring +// that they are processed and stored for sealed blocks. +// +// No errors are expected during normal operation. +type Engine struct { + *component.ComponentManager + + log zerolog.Logger + metrics module.TransactionErrorMessagesMetrics + state protocol.State + headers storage.Headers + + // Job queue + txErrorMessagesConsumer *jobqueue.ComponentConsumer + // Notifiers for queue consumer + txErrorMessagesNotifier engine.Notifier + + txErrorMessagesCore *TxErrorMessagesCore // core logic for handling tx error messages +} + +// New creates a new Engine instance, initializing all necessary components +// for processing transaction result error messages. This includes setting +// up the job queue and the notifier for handling finalized blocks. +// +// No errors are expected during normal operation. +func New( + log zerolog.Logger, + metrics module.TransactionErrorMessagesMetrics, + state protocol.State, + headers storage.Headers, + txErrorMessagesProcessedHeight storage.ConsumerProgressInitializer, + txErrorMessagesCore *TxErrorMessagesCore, +) (*Engine, error) { + e := &Engine{ + log: log.With().Str("engine", "tx_error_messages_engine").Logger(), + metrics: metrics, + state: state, + headers: headers, + txErrorMessagesCore: txErrorMessagesCore, + txErrorMessagesNotifier: engine.NewNotifier(), + } + + // jobqueue Jobs object that tracks sealed blocks by height. This is used by the txErrorMessagesConsumer + // to get a sequential list of sealed blocks. + sealedBlockReader := jobqueue.NewSealedBlockHeaderReader(state, headers) + + var err error + // Create a job queue that will process error messages for new sealed blocks. + // It listens to block finalization events from `txErrorMessagesNotifier`, then checks if there + // are new sealed blocks with `sealedBlockReader`. If there are, it starts workers to process + // them with `processTxResultErrorMessagesJob`, which fetches transaction error messages. At most + // `processTxErrorMessagesWorkersCount` workers will be created for concurrent processing. + // When a sealed block's error messages has been processed, it updates and persists the highest consecutive + // processed height with `txErrorMessagesProcessedHeight`. That way, if the node crashes, + // it reads the `txErrorMessagesProcessedHeight` and resume from `txErrorMessagesProcessedHeight + 1`. + // If the database is empty, rootHeight will be used to init the last processed height. + e.txErrorMessagesConsumer, err = jobqueue.NewComponentConsumer( + e.log.With().Str("engine", "tx_error_messages").Logger(), + e.txErrorMessagesNotifier.Channel(), + txErrorMessagesProcessedHeight, + sealedBlockReader, + e.state.Params().SealedRoot().Height, + e.processTxResultErrorMessagesJob, + processTxErrorMessagesWorkersCount, + 0, + ) + if err != nil { + return nil, fmt.Errorf("error creating transaction result error messages jobqueue: %w", err) + } + + e.metrics.TxErrorsInitialHeight(e.txErrorMessagesConsumer.LastProcessedIndex()) + + // Add workers + e.ComponentManager = component.NewComponentManagerBuilder(). + AddWorker(e.runTxResultErrorMessagesConsumer). + Build() + + return e, nil +} + +// processTxResultErrorMessagesJob processes a job for transaction error messages by +// converting the job to a block and processing error messages. If processing +// fails for all attempts, it logs the error. +func (e *Engine) processTxResultErrorMessagesJob(ctx irrecoverable.SignalerContext, job module.Job, done func()) { + header, err := jobqueue.JobToBlockHeader(job) + if err != nil { + ctx.Throw(fmt.Errorf("failed to convert job to block: %w", err)) + } + + start := time.Now() + e.metrics.TxErrorsFetchStarted() + + err = e.processErrorMessagesForBlock(ctx, header.ID()) + + // use the last processed index to ensure the metrics reflect the highest _consecutive_ height. + // this makes it easier to see when downloading gets stuck at a height. + e.metrics.TxErrorsFetchFinished(time.Since(start), err == nil, e.txErrorMessagesConsumer.LastProcessedIndex()) + + if err == nil { + done() + return + } + + e.log.Error(). + Err(err). + Str("job_id", string(job.ID())). + Msg("error encountered while processing transaction result error messages job") +} + +// runTxResultErrorMessagesConsumer runs the txErrorMessagesConsumer component +func (e *Engine) runTxResultErrorMessagesConsumer(ctx irrecoverable.SignalerContext, ready component.ReadyFunc) { + e.txErrorMessagesConsumer.Start(ctx) + + err := util.WaitClosed(ctx, e.txErrorMessagesConsumer.Ready()) + if err == nil { + ready() + + // In the case where this component is started for the first time after a spork, we need to + // manually trigger the first check since OnFinalizedBlock will never be called. + // If this is started on a live network, an early check will be a no-op. + e.txErrorMessagesNotifier.Notify() + } + + <-e.txErrorMessagesConsumer.Done() +} + +// OnFinalizedBlock is called by the follower engine after a block has been finalized and the state has been updated. +// Receives block finalized events from the finalization distributor and forwards them to the txErrorMessagesConsumer. +func (e *Engine) OnFinalizedBlock(*model.Block) { + e.txErrorMessagesNotifier.Notify() +} + +// processErrorMessagesForBlock processes transaction result error messages for block. +// If the process fails, it will retry, using exponential backoff. +// +// No errors are expected during normal operation. +func (e *Engine) processErrorMessagesForBlock(ctx context.Context, blockID flow.Identifier) error { + backoff := retry.NewExponential(defaultRetryDelay) + backoff = retry.WithCappedDuration(defaultMaxRetryDelay, backoff) + backoff = retry.WithJitterPercent(15, backoff) + + attempt := 0 + return retry.Do(ctx, backoff, func(context.Context) error { + if attempt > 0 { + e.metrics.TxErrorsFetchRetried() + } + + err := e.txErrorMessagesCore.FetchErrorMessages(ctx, blockID) + if err != nil { + e.log.Debug(). + Err(err). + Str("block_id", blockID.String()). + Uint64("attempt", uint64(attempt)). + Msgf("failed to fetch transaction result error messages. will retry") + } + attempt++ + + return retry.RetryableError(err) + }) +} diff --git a/engine/access/ingestion/tx_error_messages/tx_error_messages_engine_test.go b/engine/access/ingestion/tx_error_messages/tx_error_messages_engine_test.go new file mode 100644 index 00000000000..7acc1f4ad01 --- /dev/null +++ b/engine/access/ingestion/tx_error_messages/tx_error_messages_engine_test.go @@ -0,0 +1,267 @@ +package tx_error_messages + +import ( + "context" + "os" + "sync" + "testing" + "time" + + execproto "github.com/onflow/flow/protobuf/go/flow/execution" + "github.com/rs/zerolog" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" + + hotmodel "github.com/onflow/flow-go/consensus/hotstuff/model" + "github.com/onflow/flow-go/engine/access/index" + accessmock "github.com/onflow/flow-go/engine/access/mock" + "github.com/onflow/flow-go/engine/access/rpc/backend/node_communicator" + "github.com/onflow/flow-go/engine/access/rpc/backend/transactions/error_messages" + connectionmock "github.com/onflow/flow-go/engine/access/rpc/connection/mock" + commonrpc "github.com/onflow/flow-go/engine/common/rpc" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module" + "github.com/onflow/flow-go/module/irrecoverable" + "github.com/onflow/flow-go/module/metrics" + syncmock "github.com/onflow/flow-go/module/state_synchronization/mock" + protocol "github.com/onflow/flow-go/state/protocol/mock" + "github.com/onflow/flow-go/storage" + storagemock "github.com/onflow/flow-go/storage/mock" + "github.com/onflow/flow-go/storage/operation/pebbleimpl" + "github.com/onflow/flow-go/storage/store" + "github.com/onflow/flow-go/utils/unittest" + "github.com/onflow/flow-go/utils/unittest/mocks" +) + +// TxErrorMessagesEngineSuite is a test suite for the transaction error messages engine. +// It sets up the necessary mocks and dependencies to test the functionality of +// handling transaction error messages. +type TxErrorMessagesEngineSuite struct { + suite.Suite + + log zerolog.Logger + metrics module.TransactionErrorMessagesMetrics + proto struct { + state *protocol.FollowerState + snapshot *protocol.Snapshot + params *protocol.Params + } + headers *storagemock.Headers + receipts *storagemock.ExecutionReceipts + txErrorMessages *storagemock.TransactionResultErrorMessages + lightTxResults *storagemock.LightTransactionResults + + reporter *syncmock.IndexReporter + indexReporter *index.Reporter + txResultsIndex *index.TransactionResultsIndex + + enNodeIDs flow.IdentityList + execClient *accessmock.ExecutionAPIClient + connFactory *connectionmock.ConnectionFactory + + blockMap map[uint64]*flow.Block + rootBlock *flow.Block + sealedBlock *flow.Header + + db storage.DB + dbDir string + + ctx context.Context + cancel context.CancelFunc +} + +// TestTxErrorMessagesEngine runs the test suite for the transaction error messages engine. +func TestTxErrorMessagesEngine(t *testing.T) { + suite.Run(t, new(TxErrorMessagesEngineSuite)) +} + +// TearDownTest stops the engine and cleans up the db +func (s *TxErrorMessagesEngineSuite) TearDownTest() { + s.cancel() + err := os.RemoveAll(s.dbDir) + s.Require().NoError(err) +} + +func (s *TxErrorMessagesEngineSuite) SetupTest() { + s.log = unittest.Logger() + s.metrics = metrics.NewNoopCollector() + s.ctx, s.cancel = context.WithCancel(context.Background()) + pdb, dbDir := unittest.TempPebbleDB(s.T()) + s.db = pebbleimpl.ToDB(pdb) + s.dbDir = dbDir + // mock out protocol state + s.proto.state = protocol.NewFollowerState(s.T()) + s.proto.snapshot = protocol.NewSnapshot(s.T()) + s.proto.params = protocol.NewParams(s.T()) + s.execClient = accessmock.NewExecutionAPIClient(s.T()) + s.connFactory = connectionmock.NewConnectionFactory(s.T()) + s.headers = storagemock.NewHeaders(s.T()) + s.receipts = storagemock.NewExecutionReceipts(s.T()) + s.txErrorMessages = storagemock.NewTransactionResultErrorMessages(s.T()) + s.lightTxResults = storagemock.NewLightTransactionResults(s.T()) + s.reporter = syncmock.NewIndexReporter(s.T()) + s.indexReporter = index.NewReporter() + err := s.indexReporter.Initialize(s.reporter) + s.Require().NoError(err) + s.txResultsIndex = index.NewTransactionResultsIndex(s.indexReporter, s.lightTxResults) + + blockCount := 5 + s.blockMap = make(map[uint64]*flow.Block, blockCount) + s.rootBlock = unittest.Block.Genesis(flow.Emulator) + parent := s.rootBlock.ToHeader() + + for i := 0; i < blockCount; i++ { + block := unittest.BlockWithParentFixture(parent) + // update for next iteration + parent = block.ToHeader() + s.blockMap[block.Height] = block + } + + s.sealedBlock = parent + + s.headers.On("ByHeight", mock.AnythingOfType("uint64")).Return( + mocks.ConvertStorageOutput( + mocks.StorageMapGetter(s.blockMap), + func(block *flow.Block) *flow.Header { return block.ToHeader() }, + ), + ).Maybe() + + s.proto.state.On("Params").Return(s.proto.params) + + // Mock the finalized and sealed root block header with height 0. + s.proto.params.On("FinalizedRoot").Return(s.rootBlock.ToHeader(), nil) + s.proto.params.On("SealedRoot").Return(s.rootBlock.ToHeader(), nil) + + s.proto.snapshot.On("Head").Return( + func() *flow.Header { + return s.sealedBlock + }, + nil, + ).Maybe() + + s.proto.state.On("Sealed").Return(s.proto.snapshot, nil) + s.proto.state.On("Final").Return(s.proto.snapshot, nil) + + // Create identities for 1 execution nodes. + s.enNodeIDs = unittest.IdentityListFixture(1, unittest.WithRole(flow.RoleExecution)) +} + +// initEngine creates a new instance of the transaction error messages engine +// and waits for it to start. It initializes the engine with mocked components and state. +func (s *TxErrorMessagesEngineSuite) initEngine(ctx irrecoverable.SignalerContext) *Engine { + processedTxErrorMessagesBlockHeight := store.NewConsumerProgress( + s.db, + module.ConsumeProgressEngineTxErrorMessagesBlockHeight, + ) + + execNodeIdentitiesProvider := commonrpc.NewExecutionNodeIdentitiesProvider( + s.log, + s.proto.state, + s.receipts, + s.enNodeIDs.NodeIDs(), + flow.IdentifierList{}, + ) + + errorMessageProvider := error_messages.NewTxErrorMessageProvider( + s.log, + s.txErrorMessages, + s.txResultsIndex, + s.connFactory, + node_communicator.NewNodeCommunicator(false), + execNodeIdentitiesProvider, + ) + + txResultErrorMessagesCore := NewTxErrorMessagesCore( + s.log, + errorMessageProvider, + s.txErrorMessages, + execNodeIdentitiesProvider, + ) + + eng, err := New( + s.log, + s.metrics, + s.proto.state, + s.headers, + processedTxErrorMessagesBlockHeight, + txResultErrorMessagesCore, + ) + require.NoError(s.T(), err) + + eng.ComponentManager.Start(ctx) + <-eng.Ready() + + return eng +} + +// TestOnFinalizedBlockHandleTxErrorMessages tests the handling of transaction error messages +// when a new finalized block is processed. It verifies that the engine fetches transaction +// error messages from execution nodes and stores them in the database. +func (s *TxErrorMessagesEngineSuite) TestOnFinalizedBlockHandleTxErrorMessages() { + irrecoverableCtx := irrecoverable.NewMockSignalerContext(s.T(), s.ctx) + + block := unittest.BlockWithParentFixture(s.sealedBlock) + + s.blockMap[block.Height] = block + s.sealedBlock = block.ToHeader() + + hotstuffBlock := hotmodel.Block{ + BlockID: block.ID(), + } + + // mock the connection factory + s.connFactory.On("GetExecutionAPIClient", mock.Anything).Return(s.execClient, &mockCloser{}, nil) + + s.proto.snapshot.On("Identities", mock.Anything).Return(s.enNodeIDs, nil) + s.proto.state.On("AtBlockID", mock.Anything).Return(s.proto.snapshot) + + count := 6 + wg := sync.WaitGroup{} + wg.Add(count) + + for _, b := range s.blockMap { + blockID := b.ID() + + // Mock the protocol snapshot to return fixed execution node IDs. + setupReceiptsForBlock(s.receipts, b, s.enNodeIDs.NodeIDs()[0]) + + // Mock the txErrorMessages storage to confirm that error messages do not exist yet. + s.txErrorMessages.On("Exists", blockID). + Return(false, nil).Once() + + // Create mock transaction results with a mix of failed and non-failed transactions. + resultsByBlockID := mockTransactionResultsByBlock(5) + + // Prepare a request to fetch transaction error messages by block ID from execution nodes. + exeEventReq := &execproto.GetTransactionErrorMessagesByBlockIDRequest{ + BlockId: blockID[:], + } + + s.execClient.On("GetTransactionErrorMessagesByBlockID", mock.Anything, exeEventReq). + Return(createTransactionErrorMessagesResponse(resultsByBlockID), nil).Once() + + // Prepare the expected transaction error messages that should be stored. + expectedStoreTxErrorMessages := createExpectedTxErrorMessages(resultsByBlockID, s.enNodeIDs.NodeIDs()[0]) + + // Mock the storage of the fetched error messages into the protocol database. + s.txErrorMessages.On("Store", blockID, expectedStoreTxErrorMessages).Return(nil). + Run(func(args mock.Arguments) { + // Ensure the test does not complete its work faster than necessary + wg.Done() + }).Once() + } + + eng := s.initEngine(irrecoverableCtx) + // process the block through the finalized callback + eng.OnFinalizedBlock(&hotstuffBlock) + + // Verify that all transaction error messages were processed within the timeout. + unittest.RequireReturnsBefore(s.T(), wg.Wait, 2*time.Second, "expect to process new block before timeout") + + // Ensure all expectations were met. + s.txErrorMessages.AssertExpectations(s.T()) + s.headers.AssertExpectations(s.T()) + s.proto.state.AssertExpectations(s.T()) + s.execClient.AssertExpectations(s.T()) +} diff --git a/engine/access/ingestion2/collection_syncer.go b/engine/access/ingestion2/collection_syncer.go new file mode 100644 index 00000000000..397c135b56a --- /dev/null +++ b/engine/access/ingestion2/collection_syncer.go @@ -0,0 +1,475 @@ +package ingestion2 + +import ( + "context" + "errors" + "fmt" + "time" + + "github.com/rs/zerolog" + + "github.com/onflow/flow-go/engine" + "github.com/onflow/flow-go/engine/common/fifoqueue" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/model/flow/filter" + "github.com/onflow/flow-go/module" + "github.com/onflow/flow-go/module/component" + "github.com/onflow/flow-go/module/counters" + "github.com/onflow/flow-go/module/irrecoverable" + "github.com/onflow/flow-go/module/state_synchronization/indexer" + "github.com/onflow/flow-go/state/protocol" + "github.com/onflow/flow-go/storage" +) + +const ( + // time to wait for the all the missing collections to be received at node startup + collectionCatchupTimeout = 30 * time.Second + + // time to poll the storage to check if missing collections have been received + collectionCatchupDBPollInterval = 10 * time.Millisecond + + // time to request missing collections from the network + missingCollsRequestInterval = 1 * time.Minute + + // a threshold of number of blocks with missing collections beyond which collections should be re-requested + // this is to prevent spamming the collection nodes with request + missingCollsForBlockThreshold = 100 + + // a threshold of block height beyond which collections should be re-requested (regardless of the number of blocks for which collection are missing) + // this is to ensure that if a collection is missing for a long time (in terms of block height) it is eventually re-requested + missingCollsForAgeThreshold = 100 + + // time to update the FullBlockHeight index + fullBlockRefreshInterval = 1 * time.Second +) + +var ( + // we change these values in tests. that's why we want to have their non-const shape + defaultMissingCollsForBlockThreshold = missingCollsForBlockThreshold + defaultMissingCollsForAgeThreshold uint64 = missingCollsForAgeThreshold +) + +// The CollectionSyncer type provides mechanisms for syncing and indexing data +// from the Flow blockchain into local storage. Specifically, it handles +// the retrieval and processing of collections and transactions that may +// have been missed due to network delays, restarts, or gaps in finalization. +// +// It is responsible for ensuring the local node has +// all collections associated with finalized blocks starting from the +// last fully synced height. It works by periodically scanning the finalized +// block range, identifying missing collections, and triggering requests +// to fetch them from the network. Once collections are retrieved, it +// ensures they are persisted in the local collection and transaction stores. +// +// The syncer maintains a persistent, strictly monotonic counter +// (`lastFullBlockHeight`) to track the highest finalized block for which +// all collections have been fully indexed. It uses this information to +// avoid redundant processing and to measure catch-up progress. +// +// It is meant to operate in a background goroutine as part of the +// node's ingestion pipeline. +type CollectionSyncer struct { + logger zerolog.Logger + collectionExecutedMetric module.CollectionExecutedMetric + + state protocol.State + requester module.Requester + + // collections to be indexed + pendingCollections *engine.FifoMessageStore + pendingCollectionsHandler *engine.MessageHandler + + blocks storage.Blocks + collections storage.Collections + transactions storage.Transactions + + lastFullBlockHeight *counters.PersistentStrictMonotonicCounter + lockManager storage.LockManager +} + +// NewCollectionSyncer creates a new CollectionSyncer responsible for requesting, +// tracking, and indexing missing collections. +func NewCollectionSyncer( + logger zerolog.Logger, + collectionExecutedMetric module.CollectionExecutedMetric, + requester module.Requester, + state protocol.State, + blocks storage.Blocks, + collections storage.Collections, + transactions storage.Transactions, + lastFullBlockHeight *counters.PersistentStrictMonotonicCounter, + lockManager storage.LockManager, +) (*CollectionSyncer, error) { + collectionExecutedMetric.UpdateLastFullBlockHeight(lastFullBlockHeight.Value()) + + collectionsQueue, err := fifoqueue.NewFifoQueue(defaultQueueCapacity) + if err != nil { + return nil, fmt.Errorf("could not create collections queue: %w", err) + } + + pendingCollections := &engine.FifoMessageStore{FifoQueue: collectionsQueue} + pendingCollectionsHandler := engine.NewMessageHandler( + logger, + engine.NewNotifier(), + engine.Pattern{ + Match: func(msg *engine.Message) bool { + _, ok := msg.Payload.(*flow.Collection) + return ok + }, + Store: pendingCollections, + }, + ) + + return &CollectionSyncer{ + logger: logger, + state: state, + requester: requester, + pendingCollectionsHandler: pendingCollectionsHandler, + pendingCollections: pendingCollections, + blocks: blocks, + collections: collections, + transactions: transactions, + lastFullBlockHeight: lastFullBlockHeight, + collectionExecutedMetric: collectionExecutedMetric, + lockManager: lockManager, + }, nil +} + +// StartWorkerLoop continuously monitors and triggers collection sync operations. +// It handles on startup collection catchup, periodic missing collection requests, and full block height updates. +func (s *CollectionSyncer) StartWorkerLoop(ctx irrecoverable.SignalerContext, ready component.ReadyFunc) { + requestCtx, cancel := context.WithTimeout(ctx, collectionCatchupTimeout) + defer cancel() + + // on start-up, AN wants to download all missing collections to serve it to end users + err := s.requestMissingCollectionsBlocking(requestCtx) + if err != nil { + s.logger.Error().Err(err).Msg("error downloading missing collections") + } + ready() + + requestCollectionsTicker := time.NewTicker(missingCollsRequestInterval) + defer requestCollectionsTicker.Stop() + + // Collections are requested concurrently in this design. + // To maintain accurate progress tracking and avoid redundant requests, + // we periodically update the `lastFullBlockHeight` to reflect the latest + // finalized block with all collections successfully indexed. + updateLastFullBlockHeightTicker := time.NewTicker(fullBlockRefreshInterval) + defer updateLastFullBlockHeightTicker.Stop() + + for { + select { + case <-ctx.Done(): + return + + case <-requestCollectionsTicker.C: + err := s.requestMissingCollections() + if err != nil { + ctx.Throw(err) + } + + case <-updateLastFullBlockHeightTicker.C: + err := s.updateLastFullBlockHeight() + if err != nil { + ctx.Throw(err) + } + + case <-s.pendingCollectionsHandler.GetNotifier(): + msg, ok := s.pendingCollections.Get() + if !ok { + ctx.Throw(fmt.Errorf("could not get pending collection")) + } + + collection, ok := msg.Payload.(*flow.Collection) + if !ok { + ctx.Throw(fmt.Errorf("could not cast pending collection to *flow.Collection. got: %T", msg.Payload)) + return + } + + // Create a lock context for indexing + lctx := s.lockManager.NewContext() + err := lctx.AcquireLock(storage.LockInsertCollection) + if err != nil { + ctx.Throw(fmt.Errorf("could not acquire lock for collection indexing: %w", err)) + return + } + defer lctx.Release() + + err = indexer.IndexCollection(lctx, collection, s.collections, s.logger, s.collectionExecutedMetric) + if err != nil { + ctx.Throw(fmt.Errorf("error indexing collection: %w", err)) + return + } + } + } +} + +// requestMissingCollections checks if missing collections should be requested based on configured +// block or age thresholds and triggers requests if needed. +// +// No errors are expected during normal operations. +func (s *CollectionSyncer) requestMissingCollections() error { + lastFullBlockHeight := s.lastFullBlockHeight.Value() + lastFinalizedBlock, err := s.state.Final().Head() + if err != nil { + return fmt.Errorf("failed to get finalized block: %w", err) + } + + collections, incompleteBlocksCount, err := s.findMissingCollections(lastFullBlockHeight) + if err != nil { + return err + } + + blocksThresholdReached := incompleteBlocksCount >= defaultMissingCollsForBlockThreshold + ageThresholdReached := lastFinalizedBlock.Height-lastFullBlockHeight > defaultMissingCollsForAgeThreshold + shouldRequest := blocksThresholdReached || ageThresholdReached + + if shouldRequest { + // warn log since generally this should not happen + s.logger.Warn(). + Uint64("finalized_height", lastFinalizedBlock.Height). + Uint64("last_full_blk_height", lastFullBlockHeight). + Int("missing_collection_blk_count", incompleteBlocksCount). + Int("missing_collection_count", len(collections)). + Msg("re-requesting missing collections") + + s.requestCollections(collections, false) + } + + return nil +} + +// requestMissingCollectionsBlocking requests and waits for all missing collections to be downloaded, +// blocking until either completion or context timeout. +// +// No errors are expected during normal operations. +func (s *CollectionSyncer) requestMissingCollectionsBlocking(ctx context.Context) error { + missingCollections, _, err := s.findMissingCollections(s.lastFullBlockHeight.Value()) + if err != nil { + return err + } + if len(missingCollections) == 0 { + s.logger.Info().Msg("skipping requesting missing collections. no missing collections found") + return nil + } + + s.requestCollections(missingCollections, true) + + collectionsToBeDownloaded := make(map[flow.Identifier]struct{}) + for _, collection := range missingCollections { + collectionsToBeDownloaded[collection.CollectionID] = struct{}{} + } + + collectionStoragePollTicker := time.NewTicker(collectionCatchupDBPollInterval) + defer collectionStoragePollTicker.Stop() + + // we want to wait for all collections to be downloaded so we poll local storage periodically to make sure each + // collection was successfully saved in the storage. + for len(collectionsToBeDownloaded) > 0 { + select { + case <-ctx.Done(): + return fmt.Errorf("failed to complete collection retrieval: %w", ctx.Err()) + + case <-collectionStoragePollTicker.C: + s.logger.Info(). + Int("total_missing_collections", len(collectionsToBeDownloaded)). + Msg("retrieving missing collections...") + + for collectionID := range collectionsToBeDownloaded { + downloaded, err := s.isCollectionInStorage(collectionID) + if err != nil { + return err + } + + if downloaded { + delete(collectionsToBeDownloaded, collectionID) + } + } + } + } + + s.logger.Info().Msg("collection catchup done") + return nil +} + +// findMissingCollections scans block heights from last known full block up to the latest finalized +// block and returns all missing collection along with the count of incomplete blocks. +// +// No errors are expected during normal operations. +func (s *CollectionSyncer) findMissingCollections(lastFullBlockHeight uint64) ([]*flow.CollectionGuarantee, int, error) { + // first block to look up collections at + firstBlockHeight := lastFullBlockHeight + 1 + + lastFinalizedBlock, err := s.state.Final().Head() + if err != nil { + return nil, 0, fmt.Errorf("failed to get finalized block: %w", err) + } + // last block to look up collections at + lastBlockHeight := lastFinalizedBlock.Height + + var missingCollections []*flow.CollectionGuarantee + var incompleteBlocksCount int + + for currBlockHeight := firstBlockHeight; currBlockHeight <= lastBlockHeight; currBlockHeight++ { + collections, err := s.findMissingCollectionsAtHeight(currBlockHeight) + if err != nil { + return nil, 0, err + } + + if len(collections) == 0 { + continue + } + + missingCollections = append(missingCollections, collections...) + incompleteBlocksCount += 1 + } + + return missingCollections, incompleteBlocksCount, nil +} + +// findMissingCollectionsAtHeight returns all missing collections for a specific block height. +// +// No errors are expected during normal operations. +func (s *CollectionSyncer) findMissingCollectionsAtHeight(height uint64) ([]*flow.CollectionGuarantee, error) { + block, err := s.blocks.ByHeight(height) + if err != nil { + return nil, fmt.Errorf("failed to retrieve block by height %d: %w", height, err) + } + + var missingCollections []*flow.CollectionGuarantee + for _, guarantee := range block.Payload.Guarantees { + inStorage, err := s.isCollectionInStorage(guarantee.CollectionID) + if err != nil { + return nil, err + } + + if !inStorage { + missingCollections = append(missingCollections, guarantee) + } + } + + return missingCollections, nil +} + +// isCollectionInStorage checks whether the given collection is present in local storage. +// +// No errors are expected during normal operations. +func (s *CollectionSyncer) isCollectionInStorage(collectionID flow.Identifier) (bool, error) { + _, err := s.collections.LightByID(collectionID) + if err == nil { + return true, nil + } + + if errors.Is(err, storage.ErrNotFound) { + return false, nil + } + + return false, fmt.Errorf("failed to retrieve collection %s: %w", collectionID.String(), err) +} + +// RequestCollectionsForBlock conditionally requests missing collections for a specific block height, +// skipping requests if the block is already below the known full block height. +func (s *CollectionSyncer) RequestCollectionsForBlock(height uint64, missingCollections []*flow.CollectionGuarantee) { + // skip requesting collections, if this block is below the last full block height. + // this means that either we have already received these collections, or the block + // may contain unverifiable guarantees (in case this node has just joined the network) + if height <= s.lastFullBlockHeight.Value() { + s.logger.Debug(). + Msg("skipping requesting collections for finalized block as its collections have been already retrieved") + return + } + + s.requestCollections(missingCollections, false) +} + +// requestCollections registers collection download requests in the requester engine, +// optionally forcing immediate dispatch. +func (s *CollectionSyncer) requestCollections(collections []*flow.CollectionGuarantee, immediately bool) { + for _, guarantee := range collections { + guarantors, err := protocol.FindGuarantors(s.state, guarantee) + if err != nil { + // failed to find guarantors for guarantees contained in a finalized block is fatal error + s.logger.Fatal().Err(err).Msgf("could not find guarantors for collection %v", guarantee.CollectionID) + } + s.requester.EntityByID(guarantee.CollectionID, filter.HasNodeID[flow.Identity](guarantors...)) + } + + if immediately { + s.requester.Force() + } +} + +// updateLastFullBlockHeight updates the next highest block height where all previous collections have been indexed. +// +// No errors are expected during normal operations. +func (s *CollectionSyncer) updateLastFullBlockHeight() error { + lastFullBlockHeight := s.lastFullBlockHeight.Value() + lastFinalizedBlock, err := s.state.Final().Head() + if err != nil { + return fmt.Errorf("failed to get finalized block: %w", err) + } + + // track the latest contiguous full height + newLastFullBlockHeight, err := s.findLowestBlockHeightWithMissingCollections(lastFullBlockHeight, lastFinalizedBlock.Height) + if err != nil { + return fmt.Errorf("failed to find last full block height: %w", err) + } + + // if more contiguous blocks are now complete, update db + if newLastFullBlockHeight > lastFullBlockHeight { + err := s.lastFullBlockHeight.Set(newLastFullBlockHeight) + if err != nil { + return fmt.Errorf("failed to update last full block height: %w", err) + } + + s.collectionExecutedMetric.UpdateLastFullBlockHeight(newLastFullBlockHeight) + + s.logger.Debug(). + Uint64("last_full_block_height", newLastFullBlockHeight). + Msg("updated last full block height counter") + } + + return nil +} + +// findLowestBlockHeightWithMissingCollections finds the next block height with missing collections, +// returning the latest contiguous height where all collections are present. +// +// No errors are expected during normal operations. +func (s *CollectionSyncer) findLowestBlockHeightWithMissingCollections( + lastKnownFullBlockHeight uint64, + finalizedBlockHeight uint64, +) (uint64, error) { + newLastFullBlockHeight := lastKnownFullBlockHeight + + for currBlockHeight := lastKnownFullBlockHeight + 1; currBlockHeight <= finalizedBlockHeight; currBlockHeight++ { + missingCollections, err := s.findMissingCollectionsAtHeight(currBlockHeight) + if err != nil { + return 0, err + } + + // return when we find the first block with missing collections + if len(missingCollections) > 0 { + return newLastFullBlockHeight, nil + } + + newLastFullBlockHeight = currBlockHeight + } + + return newLastFullBlockHeight, nil +} + +// OnCollectionDownloaded indexes and persists a downloaded collection. +// This function is a callback intended to be used by the requester engine. +func (s *CollectionSyncer) OnCollectionDownloaded(id flow.Identifier, entity flow.Entity) { + err := s.pendingCollectionsHandler.Process(id, entity) + if err != nil { + // this is an unexpected error condition. The only expected error returned from Process + // is for an unexpected type. since OnCollectionDownloaded is called from the requester engine, + // which is configured to only process collections, any error returned here indicates + // a bug or state corruption. + s.logger.Fatal().Err(err).Msg("failed to process pending collections") + return + } +} diff --git a/engine/access/ingestion2/engine.go b/engine/access/ingestion2/engine.go new file mode 100644 index 00000000000..3c8e42e2fde --- /dev/null +++ b/engine/access/ingestion2/engine.go @@ -0,0 +1,182 @@ +// Package ingestion2 implements a modular ingestion engine responsible for +// orchestrating the processing of finalized blockchain data and receiving +// execution receipts from the network. +// +// The Engine coordinates several internal workers, each dedicated to a specific task: +// - Receiving and persisting execution receipts from the network. +// - Subscribing to finalized block events. +// - Synchronizing collections associated with finalized blocks. +package ingestion2 + +import ( + "context" + "fmt" + + "github.com/rs/zerolog" + + "github.com/onflow/flow-go/consensus/hotstuff/model" + "github.com/onflow/flow-go/engine" + "github.com/onflow/flow-go/engine/common/fifoqueue" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module" + "github.com/onflow/flow-go/module/component" + "github.com/onflow/flow-go/module/irrecoverable" + "github.com/onflow/flow-go/network" + "github.com/onflow/flow-go/network/channels" + "github.com/onflow/flow-go/storage" +) + +// defaultQueueCapacity is a capacity for the execution receipt message queue +const defaultQueueCapacity = 10_000 + +type Engine struct { + *component.ComponentManager + + log zerolog.Logger + + finalizedBlockProcessor *FinalizedBlockProcessor + collectionSyncer *CollectionSyncer + + messageHandler *engine.MessageHandler + executionReceiptsQueue *engine.FifoMessageStore + receipts storage.ExecutionReceipts + collectionExecutedMetric module.CollectionExecutedMetric +} + +var _ network.MessageProcessor = (*Engine)(nil) + +func New( + log zerolog.Logger, + net network.EngineRegistry, + finalizedBlockProcessor *FinalizedBlockProcessor, + collectionSyncer *CollectionSyncer, + receipts storage.ExecutionReceipts, + collectionExecutedMetric module.CollectionExecutedMetric, +) (*Engine, error) { + executionReceiptsRawQueue, err := fifoqueue.NewFifoQueue(defaultQueueCapacity) + if err != nil { + return nil, fmt.Errorf("could not create execution receipts queue: %w", err) + } + executionReceiptsQueue := &engine.FifoMessageStore{FifoQueue: executionReceiptsRawQueue} + messageHandler := engine.NewMessageHandler( + log, + engine.NewNotifier(), + engine.Pattern{ + Match: func(msg *engine.Message) bool { + _, ok := msg.Payload.(*flow.ExecutionReceipt) + return ok + }, + Store: executionReceiptsQueue, + }, + ) + + e := &Engine{ + log: log.With().Str("engine", "ingestion2").Logger(), + finalizedBlockProcessor: finalizedBlockProcessor, + collectionSyncer: collectionSyncer, + messageHandler: messageHandler, + executionReceiptsQueue: executionReceiptsQueue, + receipts: receipts, + collectionExecutedMetric: collectionExecutedMetric, + } + + // register our workers which are basically consumers of different kinds of data. + // engine notifies workers when new data is available so that they can start processing them. + builder := component.NewComponentManagerBuilder(). + AddWorker(e.messageHandlerLoop). + AddWorker(e.finalizedBlockProcessor.StartWorkerLoop). + AddWorker(e.collectionSyncer.StartWorkerLoop) + e.ComponentManager = builder.Build() + + // engine gets execution receipts from channels.ReceiveReceipts channel + _, err = net.Register(channels.ReceiveReceipts, e) + if err != nil { + return nil, fmt.Errorf("could not register engine in network to receive execution receipts: %w", err) + } + + return e, nil +} + +// Process processes the given event from the node with the given origin ID in +// a blocking manner. It returns the potential processing error when done. +// +// No errors are expected during normal operations. +func (e *Engine) Process(chanName channels.Channel, originID flow.Identifier, event interface{}) error { + select { + case <-e.ComponentManager.ShutdownSignal(): + return component.ErrComponentShutdown + default: + } + + //TODO: we don't need this type switch as message handler has this check under the hood + switch event.(type) { + case *flow.ExecutionReceipt: + err := e.messageHandler.Process(originID, event) + return err + default: + return fmt.Errorf("got invalid event type (%T) from %s channel", event, chanName) + } +} + +// messageHandlerLoop reacts to message handler notifications and processes available execution receipts +// once notification has arrived. +func (e *Engine) messageHandlerLoop(ctx irrecoverable.SignalerContext, ready component.ReadyFunc) { + ready() + + for { + select { + case <-ctx.Done(): + return + case <-e.messageHandler.GetNotifier(): + err := e.processAvailableExecutionReceipts(ctx) + if err != nil { + // if an error reaches this point, it is unexpected + ctx.Throw(err) + return + } + } + } +} + +// processAvailableExecutionReceipts processes available execution receipts in the queue and handles it. +// It continues processing until all enqueued receipts are handled or the context is canceled. +// +// No errors are expected during normal operations. +func (e *Engine) processAvailableExecutionReceipts(ctx context.Context) error { + for { + select { + case <-ctx.Done(): + return nil + default: + } + msg, ok := e.executionReceiptsQueue.Get() + if !ok { + return nil + } + + receipt := msg.Payload.(*flow.ExecutionReceipt) + if err := e.persistExecutionReceipt(receipt); err != nil { + return err + } + } +} + +// persistExecutionReceipt persists the execution receipt. +// +// No errors are expected during normal operations. +func (e *Engine) persistExecutionReceipt(receipt *flow.ExecutionReceipt) error { + // persist the execution receipt locally, storing will also index the receipt + err := e.receipts.Store(receipt) + if err != nil { + return fmt.Errorf("failed to store execution receipt: %w", err) + } + + e.collectionExecutedMetric.ExecutionReceiptReceived(receipt) + return nil +} + +// OnFinalizedBlock is called by the follower engine after a block has been finalized and the state has been updated. +// Receives block finalized events from the finalization distributor and forwards them to the consumer. +func (e *Engine) OnFinalizedBlock(_ *model.Block) { + e.finalizedBlockProcessor.Notify() +} diff --git a/engine/access/ingestion2/engine_test.go b/engine/access/ingestion2/engine_test.go new file mode 100644 index 00000000000..c7813d7b028 --- /dev/null +++ b/engine/access/ingestion2/engine_test.go @@ -0,0 +1,821 @@ +package ingestion2 + +import ( + "context" + "math/rand" + "os" + "sync" + "testing" + "time" + + "github.com/jordanschalm/lockctx" + "github.com/rs/zerolog" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" + + hotmodel "github.com/onflow/flow-go/consensus/hotstuff/model" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/model/flow/filter" + "github.com/onflow/flow-go/module" + "github.com/onflow/flow-go/module/component" + "github.com/onflow/flow-go/module/counters" + downloadermock "github.com/onflow/flow-go/module/executiondatasync/execution_data/mock" + "github.com/onflow/flow-go/module/irrecoverable" + "github.com/onflow/flow-go/module/mempool/stdmap" + "github.com/onflow/flow-go/module/metrics" + modulemock "github.com/onflow/flow-go/module/mock" + "github.com/onflow/flow-go/module/signature" + "github.com/onflow/flow-go/module/state_synchronization/indexer" + "github.com/onflow/flow-go/network/channels" + mocknetwork "github.com/onflow/flow-go/network/mock" + protocol "github.com/onflow/flow-go/state/protocol/mock" + "github.com/onflow/flow-go/storage" + storagemock "github.com/onflow/flow-go/storage/mock" + "github.com/onflow/flow-go/storage/operation/pebbleimpl" + "github.com/onflow/flow-go/storage/store" + "github.com/onflow/flow-go/utils/unittest" + "github.com/onflow/flow-go/utils/unittest/mocks" +) + +type Suite struct { + suite.Suite + + // protocol state + proto struct { + state *protocol.FollowerState + snapshot *protocol.Snapshot + params *protocol.Params + } + + me *modulemock.Local + net *mocknetwork.EngineRegistry + request *modulemock.Requester + obsIdentity *flow.Identity + provider *mocknetwork.Engine + blocks *storagemock.Blocks + headers *storagemock.Headers + collections *storagemock.Collections + transactions *storagemock.Transactions + receipts *storagemock.ExecutionReceipts + results *storagemock.ExecutionResults + seals *storagemock.Seals + + conduit *mocknetwork.Conduit + downloader *downloadermock.Downloader + sealedBlock *flow.Header + finalizedBlock *flow.Header + log zerolog.Logger + blockMap map[uint64]*flow.Block + rootBlock *flow.Block + + collectionExecutedMetric *indexer.CollectionExecutedMetricImpl + + ctx context.Context + cancel context.CancelFunc + + db storage.DB + dbDir string + lastFullBlockHeight *counters.PersistentStrictMonotonicCounter + lockManager lockctx.Manager +} + +func TestIngestEngine(t *testing.T) { + suite.Run(t, new(Suite)) +} + +// TearDownTest stops the engine and cleans up the db +func (s *Suite) TearDownTest() { + s.cancel() + err := os.RemoveAll(s.dbDir) + s.Require().NoError(err) +} + +func (s *Suite) SetupTest() { + s.log = unittest.Logger() + s.ctx, s.cancel = context.WithCancel(context.Background()) + pdb, dbDir := unittest.TempPebbleDB(s.T()) + s.db = pebbleimpl.ToDB(pdb) + s.dbDir = dbDir + s.lockManager = storage.NewTestingLockManager() + + s.obsIdentity = unittest.IdentityFixture(unittest.WithRole(flow.RoleAccess)) + + s.blocks = storagemock.NewBlocks(s.T()) + // mock out protocol state + s.proto.state = new(protocol.FollowerState) + s.proto.snapshot = new(protocol.Snapshot) + s.proto.params = new(protocol.Params) + s.finalizedBlock = unittest.BlockHeaderFixture(unittest.WithHeaderHeight(0)) + s.proto.state.On("Identity").Return(s.obsIdentity, nil) + s.proto.state.On("Params").Return(s.proto.params) + s.proto.snapshot.On("Head").Return( + func() *flow.Header { + return s.finalizedBlock + }, + nil, + ).Maybe() + + s.me = modulemock.NewLocal(s.T()) + s.me.On("NodeID").Return(s.obsIdentity.NodeID).Maybe() + s.net = mocknetwork.NewEngineRegistry(s.T()) + conduit := mocknetwork.NewConduit(s.T()) + s.net.On("Register", channels.ReceiveReceipts, mock.Anything). + Return(conduit, nil). + Once() + s.request = modulemock.NewRequester(s.T()) + s.provider = mocknetwork.NewEngine(s.T()) + s.blocks = storagemock.NewBlocks(s.T()) + s.headers = storagemock.NewHeaders(s.T()) + s.collections = new(storagemock.Collections) + s.receipts = new(storagemock.ExecutionReceipts) + s.transactions = new(storagemock.Transactions) + s.results = new(storagemock.ExecutionResults) + collectionsToMarkFinalized := stdmap.NewTimes(100) + collectionsToMarkExecuted := stdmap.NewTimes(100) + blocksToMarkExecuted := stdmap.NewTimes(100) + blockTransactions := stdmap.NewIdentifierMap(100) + + s.proto.state.On("Identity").Return(s.obsIdentity, nil) + s.proto.state.On("Params").Return(s.proto.params) + + blockCount := 5 + s.blockMap = make(map[uint64]*flow.Block, blockCount) + s.rootBlock = unittest.Block.Genesis(flow.Emulator) + parent := s.rootBlock.ToHeader() + + for i := 0; i < blockCount; i++ { + block := unittest.BlockWithParentFixture(parent) + // update for next iteration + parent = block.ToHeader() + s.blockMap[block.Height] = block + } + s.finalizedBlock = parent + + s.blocks.On("ByHeight", mock.AnythingOfType("uint64")).Return( + mocks.ConvertStorageOutput( + mocks.StorageMapGetter(s.blockMap), + func(block *flow.Block) *flow.Block { return block }, + ), + ).Maybe() + + s.proto.snapshot.On("Head").Return( + func() *flow.Header { + return s.finalizedBlock + }, + nil, + ).Maybe() + s.proto.state.On("Final").Return(s.proto.snapshot, nil) + + // Mock the finalized root block header with height 0. + header := unittest.BlockHeaderFixture(unittest.WithHeaderHeight(0)) + s.proto.params.On("FinalizedRoot").Return(header, nil) + + var err error + s.collectionExecutedMetric, err = indexer.NewCollectionExecutedMetricImpl( + s.log, + metrics.NewNoopCollector(), + collectionsToMarkFinalized, + collectionsToMarkExecuted, + blocksToMarkExecuted, + s.collections, + s.blocks, + blockTransactions, + ) + require.NoError(s.T(), err) +} + +func (s *Suite) TestComponentShutdown() { + irrecoverableCtx := irrecoverable.NewMockSignalerContext(s.T(), s.ctx) + eng, _ := s.initEngineAndSyncer(irrecoverableCtx) + + // start then shut down the engine + unittest.AssertClosesBefore(s.T(), eng.Ready(), 10*time.Millisecond) + s.cancel() + unittest.AssertClosesBefore(s.T(), eng.Done(), 10*time.Millisecond) + + err := eng.Process(channels.ReceiveReceipts, unittest.IdentifierFixture(), &flow.ExecutionReceipt{}) + s.Assert().ErrorIs(err, component.ErrComponentShutdown) +} + +// initEngineAndSyncer create new instance of ingestion engine and collection collectionSyncer. +// It waits until the ingestion engine starts. +func (s *Suite) initEngineAndSyncer(ctx irrecoverable.SignalerContext) (*Engine, *CollectionSyncer) { + processedHeightInitializer := store.NewConsumerProgress(s.db, module.ConsumeProgressIngestionEngineBlockHeight) + + lastFullBlockHeight, err := store.NewConsumerProgress(s.db, module.ConsumeProgressLastFullBlockHeight).Initialize(s.finalizedBlock.Height) + require.NoError(s.T(), err) + + s.lastFullBlockHeight, err = counters.NewPersistentStrictMonotonicCounter(lastFullBlockHeight) + require.NoError(s.T(), err) + + syncer, err := NewCollectionSyncer( + s.log, + s.collectionExecutedMetric, + module.Requester(s.request), + s.proto.state, + s.blocks, + s.collections, + s.transactions, + s.lastFullBlockHeight, + s.lockManager, + ) + require.NoError(s.T(), err) + + blockProcessor, err := NewFinalizedBlockProcessor( + s.log, + s.proto.state, + s.blocks, + s.results, + processedHeightInitializer, + syncer, + s.collectionExecutedMetric, + ) + require.NoError(s.T(), err) + + eng, err := New( + s.log, + s.net, + blockProcessor, + syncer, + s.receipts, + s.collectionExecutedMetric, + ) + + require.NoError(s.T(), err) + + eng.ComponentManager.Start(ctx) + <-eng.Ready() + + return eng, syncer +} + +// mockCollectionsForBlock mocks collections for block +func (s *Suite) mockCollectionsForBlock(block *flow.Block) { + // we should query the block once and index the guarantee payload once + for _, g := range block.Payload.Guarantees { + collection := unittest.CollectionFixture(1) + light := collection.Light() + s.collections.On("LightByID", g.CollectionID).Return(light, nil).Twice() + } +} + +// generateBlock prepares block with payload and specified guarantee.SignerIndices +func (s *Suite) generateBlock(clusterCommittee flow.IdentitySkeletonList, snap *protocol.Snapshot) *flow.Block { + block := unittest.BlockFixture( + unittest.Block.WithPayload(unittest.PayloadFixture( + unittest.WithGuarantees(unittest.CollectionGuaranteesFixture(4)...), + unittest.WithExecutionResults(unittest.ExecutionResultFixture()), + unittest.WithSeals(unittest.Seal.Fixture()), + )), + ) + + refBlockID := unittest.IdentifierFixture() + for _, guarantee := range block.Payload.Guarantees { + guarantee.ReferenceBlockID = refBlockID + // guarantee signers must be cluster committee members, so that access will fetch collection from + // the signers that are specified by guarantee.SignerIndices + indices, err := signature.EncodeSignersToIndices(clusterCommittee.NodeIDs(), clusterCommittee.NodeIDs()) + require.NoError(s.T(), err) + guarantee.SignerIndices = indices + } + + s.proto.state.On("AtBlockID", refBlockID).Return(snap) + + return block +} + +// TestOnFinalizedBlock checks that when a block is received, a request for each individual collection is made +func (s *Suite) TestOnFinalizedBlockSingle() { + cluster := protocol.NewCluster(s.T()) + epoch := protocol.NewCommittedEpoch(s.T()) + epochs := protocol.NewEpochQuery(s.T()) + snap := protocol.NewSnapshot(s.T()) + + epoch.On("ClusterByChainID", mock.Anything).Return(cluster, nil) + epochs.On("Current").Return(epoch, nil) + snap.On("Epochs").Return(epochs) + + // prepare cluster committee members + clusterCommittee := unittest.IdentityListFixture(32 * 4).Filter(filter.HasRole[flow.Identity](flow.RoleCollection)).ToSkeleton() + cluster.On("Members").Return(clusterCommittee, nil) + + irrecoverableCtx := irrecoverable.NewMockSignalerContext(s.T(), s.ctx) + eng, _ := s.initEngineAndSyncer(irrecoverableCtx) + + block := s.generateBlock(clusterCommittee, snap) + block.Height = s.finalizedBlock.Height + 1 + s.blockMap[block.Height] = block + s.mockCollectionsForBlock(block) + s.finalizedBlock = block.ToHeader() + + hotstuffBlock := hotmodel.Block{ + BlockID: block.ID(), + } + + // expect that the block storage is indexed with each of the collection guarantee + s.blocks.On("IndexBlockContainingCollectionGuarantees", block.ID(), []flow.Identifier(flow.GetIDs(block.Payload.Guarantees))).Return(nil).Once() + for _, seal := range block.Payload.Seals { + s.results.On("Index", seal.BlockID, seal.ResultID).Return(nil).Once() + } + + missingCollectionCount := 4 + wg := sync.WaitGroup{} + wg.Add(missingCollectionCount) + + for _, cg := range block.Payload.Guarantees { + s.request.On("EntityByID", cg.CollectionID, mock.Anything).Return().Run(func(args mock.Arguments) { + // Ensure the test does not complete its work faster than necessary + wg.Done() + }).Once() + } + + // process the block through the finalized callback + eng.OnFinalizedBlock(&hotstuffBlock) + + unittest.RequireReturnsBefore(s.T(), wg.Wait, 100*time.Millisecond, "expect to process new block before timeout") + + // assert that the block was retrieved and all collections were requested + s.headers.AssertExpectations(s.T()) + s.request.AssertNumberOfCalls(s.T(), "EntityByID", len(block.Payload.Guarantees)) + s.results.AssertNumberOfCalls(s.T(), "Index", len(block.Payload.Seals)) +} + +// TestOnFinalizedBlockSeveralBlocksAhead checks OnFinalizedBlock with a block several blocks newer than the last block processed +func (s *Suite) TestOnFinalizedBlockSeveralBlocksAhead() { + cluster := protocol.NewCluster(s.T()) + epoch := protocol.NewCommittedEpoch(s.T()) + epochs := protocol.NewEpochQuery(s.T()) + snap := protocol.NewSnapshot(s.T()) + + epoch.On("ClusterByChainID", mock.Anything).Return(cluster, nil) + epochs.On("Current").Return(epoch, nil) + snap.On("Epochs").Return(epochs) + + // prepare cluster committee members + clusterCommittee := unittest.IdentityListFixture(32 * 4).Filter(filter.HasRole[flow.Identity](flow.RoleCollection)).ToSkeleton() + cluster.On("Members").Return(clusterCommittee, nil) + + irrecoverableCtx := irrecoverable.NewMockSignalerContext(s.T(), s.ctx) + eng, _ := s.initEngineAndSyncer(irrecoverableCtx) + + newBlocksCount := 3 + startHeight := s.finalizedBlock.Height + 1 + blocks := make([]*flow.Block, newBlocksCount) + + // generate the test blocks, cgs and collections + for i := 0; i < newBlocksCount; i++ { + block := s.generateBlock(clusterCommittee, snap) + block.Height = startHeight + uint64(i) + s.blockMap[block.Height] = block + blocks[i] = block + s.mockCollectionsForBlock(block) + s.finalizedBlock = block.ToHeader() + } + + // latest of all the new blocks which are newer than the last block processed + latestBlock := blocks[2] + + // block several blocks newer than the last block processed + hotstuffBlock := hotmodel.Block{ + BlockID: latestBlock.ID(), + } + + missingCollectionCountPerBlock := 4 + wg := sync.WaitGroup{} + wg.Add(missingCollectionCountPerBlock * newBlocksCount) + + // expected all new blocks after last block processed + for _, block := range blocks { + s.blocks.On("IndexBlockContainingCollectionGuarantees", block.ID(), []flow.Identifier(flow.GetIDs(block.Payload.Guarantees))).Return(nil).Once() + + for _, cg := range block.Payload.Guarantees { + s.request.On("EntityByID", cg.CollectionID, mock.Anything).Return().Run(func(args mock.Arguments) { + // Ensure the test does not complete its work faster than necessary, so we can check all expected results + wg.Done() + }).Once() + } + for _, seal := range block.Payload.Seals { + s.results.On("Index", seal.BlockID, seal.ResultID).Return(nil).Once() + } + } + + eng.OnFinalizedBlock(&hotstuffBlock) + + unittest.RequireReturnsBefore(s.T(), wg.Wait, 100*time.Millisecond, "expect to process all blocks before timeout") + + expectedEntityByIDCalls := 0 + expectedIndexCalls := 0 + for _, block := range blocks { + expectedEntityByIDCalls += len(block.Payload.Guarantees) + expectedIndexCalls += len(block.Payload.Seals) + } + + s.headers.AssertExpectations(s.T()) + s.blocks.AssertNumberOfCalls(s.T(), "IndexBlockContainingCollectionGuarantees", newBlocksCount) + s.request.AssertNumberOfCalls(s.T(), "EntityByID", expectedEntityByIDCalls) + s.results.AssertNumberOfCalls(s.T(), "Index", expectedIndexCalls) +} + +// TestOnCollection checks that when a Collection is received, it is persisted +func (s *Suite) TestOnCollection() { + irrecoverableCtx := irrecoverable.NewMockSignalerContext(s.T(), s.ctx) + s.initEngineAndSyncer(irrecoverableCtx) + + collection := unittest.CollectionFixture(5) + light := collection.Light() + + // we should store the collection and index its transactions + s.collections.On("StoreAndIndexByTransaction", mock.Anything, &collection).Return(light, nil).Once() + + // Create a lock context for indexing + err := unittest.WithLock(s.T(), s.lockManager, storage.LockInsertCollection, func(lctx lockctx.Context) error { + return indexer.IndexCollection(lctx, &collection, s.collections, s.log, s.collectionExecutedMetric) + }) + require.NoError(s.T(), err) + + // check that the collection was stored and indexed + s.collections.AssertExpectations(s.T()) +} + +// TestExecutionReceiptsAreIndexed checks that execution receipts are properly indexed +func (s *Suite) TestExecutionReceiptsAreIndexed() { + irrecoverableCtx := irrecoverable.NewMockSignalerContext(s.T(), s.ctx) + eng, _ := s.initEngineAndSyncer(irrecoverableCtx) + + collection := unittest.CollectionFixture(5) + light := collection.Light() + + // we should store the collection and index its transactions + s.collections.On("StoreAndIndexByTransaction", &collection).Return(light, nil).Once() + block := unittest.BlockFixture( + unittest.Block.WithHeight(0), + unittest.Block.WithPayload( + unittest.PayloadFixture(unittest.WithGuarantees([]*flow.CollectionGuarantee{}...)), + ), + ) + s.blocks.On("ByID", mock.Anything).Return(block, nil) + + // for each transaction in the collection, we should store it + needed := make(map[flow.Identifier]struct{}) + for _, txID := range light.Transactions { + needed[txID] = struct{}{} + } + s.transactions.On("Store", mock.Anything).Return(nil).Run( + func(args mock.Arguments) { + tx := args.Get(0).(*flow.TransactionBody) + _, pending := needed[tx.ID()] + s.Assert().True(pending, "tx not pending (%x)", tx.ID()) + }, + ) + er1 := unittest.ExecutionReceiptFixture() + er2 := unittest.ExecutionReceiptFixture() + + s.receipts.On("Store", mock.Anything).Return(nil) + s.blocks.On("ByID", er1.ExecutionResult.BlockID).Return(nil, storage.ErrNotFound) + + s.receipts.On("Store", mock.Anything).Return(nil) + s.blocks.On("ByID", er2.ExecutionResult.BlockID).Return(nil, storage.ErrNotFound) + + err := eng.persistExecutionReceipt(er1) + require.NoError(s.T(), err) + + err = eng.persistExecutionReceipt(er2) + require.NoError(s.T(), err) + + s.receipts.AssertExpectations(s.T()) + s.results.AssertExpectations(s.T()) + s.receipts.AssertExpectations(s.T()) +} + +// TestOnCollectionDuplicate checks that when a duplicate collection is received, the node doesn't +// crash but just ignores its transactions. +func (s *Suite) TestOnCollectionDuplicate() { + irrecoverableCtx := irrecoverable.NewMockSignalerContext(s.T(), s.ctx) + s.initEngineAndSyncer(irrecoverableCtx) + collection := unittest.CollectionFixture(5) + + // we should store the collection and index its transactions + s.collections.On("StoreAndIndexByTransaction", mock.Anything, &collection).Return(nil, storage.ErrAlreadyExists).Once() + + // Create a lock context for indexing + err := unittest.WithLock(s.T(), s.lockManager, storage.LockInsertCollection, func(lctx lockctx.Context) error { + return indexer.IndexCollection(lctx, &collection, s.collections, s.log, s.collectionExecutedMetric) + }) + require.Error(s.T(), err) + require.ErrorIs(s.T(), err, storage.ErrAlreadyExists) + + // check that the collection was stored and indexed + s.collections.AssertExpectations(s.T()) +} + +// TestRequestMissingCollections tests that the all missing collections are requested on the call to requestMissingCollections +func (s *Suite) TestRequestMissingCollections() { + irrecoverableCtx := irrecoverable.NewMockSignalerContext(s.T(), s.ctx) + _, syncer := s.initEngineAndSyncer(irrecoverableCtx) + + blkCnt := 3 + startHeight := uint64(1000) + + // prepare cluster committee members + clusterCommittee := unittest.IdentityListFixture(32 * 4).Filter(filter.HasRole[flow.Identity](flow.RoleCollection)).ToSkeleton() + + // generate the test blocks and collections + var collIDs []flow.Identifier + refBlockID := unittest.IdentifierFixture() + for i := 0; i < blkCnt; i++ { + block := unittest.BlockFixture( + // some blocks may not be present hence add a gap + unittest.Block.WithHeight(startHeight+uint64(i)), + unittest.Block.WithPayload(unittest.PayloadFixture( + unittest.WithGuarantees(unittest.CollectionGuaranteesFixture(4, unittest.WithCollRef(refBlockID))...)), + )) + s.blockMap[block.Height] = block + s.finalizedBlock = block.ToHeader() + + for _, c := range block.Payload.Guarantees { + collIDs = append(collIDs, c.CollectionID) + c.ReferenceBlockID = refBlockID + + // guarantee signers must be cluster committee members, so that access will fetch collection from + // the signers that are specified by guarantee.SignerIndices + indices, err := signature.EncodeSignersToIndices(clusterCommittee.NodeIDs(), clusterCommittee.NodeIDs()) + require.NoError(s.T(), err) + c.SignerIndices = indices + } + } + + // consider collections are missing for all blocks + err := s.lastFullBlockHeight.Set(startHeight - 1) + s.Require().NoError(err) + + // consider the last test block as the head + + // p is the probability of not receiving the collection before the next poll and it + // helps simulate the slow trickle of the requested collections being received + var p float32 + + // rcvdColl is the map simulating the collection storage key-values + rcvdColl := make(map[flow.Identifier]struct{}) + + // for the first lookup call for each collection, it will be reported as missing from db + // for the subsequent calls, it will be reported as present with the probability p + s.collections.On("LightByID", mock.Anything).Return( + func(cID flow.Identifier) *flow.LightCollection { + return nil // the actual collection object return is never really read + }, + func(cID flow.Identifier) error { + if _, ok := rcvdColl[cID]; ok { + return nil + } + if rand.Float32() >= p { + rcvdColl[cID] = struct{}{} + } + return storage.ErrNotFound + }). + // simulate some db i/o contention + After(time.Millisecond * time.Duration(rand.Intn(5))) + + // setup the requester engine mock + // entityByID should be called once per collection + for _, c := range collIDs { + s.request.On("EntityByID", c, mock.Anything).Return() + } + // force should be called once + s.request.On("Force").Return() + + cluster := protocol.NewCluster(s.T()) + cluster.On("Members").Return(clusterCommittee, nil) + epoch := protocol.NewCommittedEpoch(s.T()) + epoch.On("ClusterByChainID", mock.Anything).Return(cluster, nil) + epochs := protocol.NewEpochQuery(s.T()) + epochs.On("Current").Return(epoch, nil) + snap := protocol.NewSnapshot(s.T()) + snap.On("Epochs").Return(epochs) + s.proto.state.On("AtBlockID", refBlockID).Return(snap) + + assertExpectations := func() { + s.request.AssertExpectations(s.T()) + s.collections.AssertExpectations(s.T()) + s.proto.snapshot.AssertExpectations(s.T()) + s.blocks.AssertExpectations(s.T()) + } + + // test 1 - collections are not received before timeout + s.Run("timeout before all missing collections are received", func() { + + // simulate that collection are never received + p = 1 + + // timeout after 3 db polls + ctx, cancel := context.WithTimeout(context.Background(), 100*collectionCatchupDBPollInterval) + defer cancel() + + err := syncer.requestMissingCollectionsBlocking(ctx) + + require.Error(s.T(), err) + require.Contains(s.T(), err.Error(), "context deadline exceeded") + + assertExpectations() + }) + // test 2 - all collections are eventually received before the deadline + s.Run("all missing collections are received", func() { + + // 90% of the time, collections are reported as not received when the collection storage is queried + p = 0.9 + + ctx, cancel := context.WithTimeout(context.Background(), collectionCatchupTimeout) + defer cancel() + + err := syncer.requestMissingCollectionsBlocking(ctx) + + require.NoError(s.T(), err) + require.Len(s.T(), rcvdColl, len(collIDs)) + + assertExpectations() + }) +} + +// TestProcessBackgroundCalls tests that updateLastFullBlockHeight and checkMissingCollections +// function calls keep the FullBlockIndex up-to-date and request collections if blocks with missing +// collections exceed the threshold. +func (s *Suite) TestProcessBackgroundCalls() { + irrecoverableCtx := irrecoverable.NewMockSignalerContext(s.T(), s.ctx) + _, syncer := s.initEngineAndSyncer(irrecoverableCtx) + + blkCnt := 3 + collPerBlk := 10 + startHeight := uint64(1000) + blocks := make([]*flow.Block, blkCnt) + collMap := make(map[flow.Identifier]*flow.LightCollection, blkCnt*collPerBlk) + + // prepare cluster committee members + clusterCommittee := unittest.IdentityListFixture(32 * 4).Filter(filter.HasRole[flow.Identity](flow.RoleCollection)).ToSkeleton() + + refBlockID := unittest.IdentifierFixture() + // generate the test blocks, cgs and collections + for i := 0; i < blkCnt; i++ { + guarantees := make([]*flow.CollectionGuarantee, collPerBlk) + for j := 0; j < collPerBlk; j++ { + coll := unittest.CollectionFixture(2).Light() + collMap[coll.ID()] = coll + cg := unittest.CollectionGuaranteeFixture(func(cg *flow.CollectionGuarantee) { + cg.CollectionID = coll.ID() + cg.ReferenceBlockID = refBlockID + }) + + // guarantee signers must be cluster committee members, so that access will fetch collection from + // the signers that are specified by guarantee.SignerIndices + indices, err := signature.EncodeSignersToIndices(clusterCommittee.NodeIDs(), clusterCommittee.NodeIDs()) + require.NoError(s.T(), err) + cg.SignerIndices = indices + guarantees[j] = cg + } + block := unittest.BlockFixture( + unittest.Block.WithHeight(startHeight+uint64(i)), + unittest.Block.WithPayload(unittest.PayloadFixture(unittest.WithGuarantees(guarantees...))), + ) + s.blockMap[block.Height] = block + blocks[i] = block + s.finalizedBlock = block.ToHeader() + } + + finalizedHeight := s.finalizedBlock.Height + + cluster := protocol.NewCluster(s.T()) + cluster.On("Members").Return(clusterCommittee, nil) + epoch := protocol.NewCommittedEpoch(s.T()) + epoch.On("ClusterByChainID", mock.Anything).Return(cluster, nil) + epochs := protocol.NewEpochQuery(s.T()) + epochs.On("Current").Return(epoch, nil) + snap := protocol.NewSnapshot(s.T()) + snap.On("Epochs").Return(epochs) + s.proto.state.On("AtBlockID", refBlockID).Return(snap) + + // blkMissingColl controls which collections are reported as missing by the collections storage mock + blkMissingColl := make([]bool, blkCnt) + for i := 0; i < blkCnt; i++ { + blkMissingColl[i] = false + for _, cg := range blocks[i].Payload.Guarantees { + j := i + s.collections.On("LightByID", cg.CollectionID).Return( + func(cID flow.Identifier) *flow.LightCollection { + return collMap[cID] + }, + func(cID flow.Identifier) error { + if blkMissingColl[j] { + return storage.ErrNotFound + } + return nil + }) + } + } + + rootBlk := blocks[0] + + // root block is the last complete block + err := s.lastFullBlockHeight.Set(rootBlk.Height) + s.Require().NoError(err) + + s.Run("missing collections are requested when count exceeds defaultMissingCollsForBlockThreshold", func() { + // lower the block threshold to request missing collections + defaultMissingCollsForBlockThreshold = 2 + + // mark all blocks beyond the root block as incomplete + for i := 1; i < blkCnt; i++ { + blkMissingColl[i] = true + // setup receive engine expectations + for _, cg := range blocks[i].Payload.Guarantees { + s.request.On("EntityByID", cg.CollectionID, mock.Anything).Return().Once() + } + } + + err := syncer.requestMissingCollections() + s.Require().NoError(err) + + // assert that missing collections are requested + s.request.AssertExpectations(s.T()) + + // last full blk index is not advanced + s.blocks.AssertExpectations(s.T()) // no new call to UpdateLastFullBlockHeight should be made + }) + + s.Run("missing collections are requested when count exceeds defaultMissingCollsForAgeThreshold", func() { + // lower the height threshold to request missing collections + defaultMissingCollsForAgeThreshold = 1 + + // raise the block threshold to ensure it does not trigger missing collection request + defaultMissingCollsForBlockThreshold = blkCnt + 1 + + // mark all blocks beyond the root block as incomplete + for i := 1; i < blkCnt; i++ { + blkMissingColl[i] = true + // setup receive engine expectations + for _, cg := range blocks[i].Payload.Guarantees { + s.request.On("EntityByID", cg.CollectionID, mock.Anything).Return().Once() + } + } + + err := syncer.requestMissingCollections() + s.Require().NoError(err) + + // assert that missing collections are requested + s.request.AssertExpectations(s.T()) + + // last full blk index is not advanced + s.blocks.AssertExpectations(s.T()) // not new call to UpdateLastFullBlockHeight should be made + }) + + s.Run("missing collections are not requested if defaultMissingCollsForBlockThreshold not reached", func() { + // raise the thresholds to avoid requesting missing collections + defaultMissingCollsForAgeThreshold = 3 + defaultMissingCollsForBlockThreshold = 3 + + // mark all blocks beyond the root block as incomplete + for i := 1; i < blkCnt; i++ { + blkMissingColl[i] = true + } + + err := syncer.requestMissingCollections() + s.Require().NoError(err) + + // assert that missing collections are not requested even though there are collections missing + s.request.AssertExpectations(s.T()) + + // last full blk index is not advanced + s.blocks.AssertExpectations(s.T()) // not new call to UpdateLastFullBlockHeight should be made + }) + + // create new block + height := blocks[blkCnt-1].Height + 1 + finalizedBlk := unittest.BlockFixture( + unittest.Block.WithHeight(height), + ) + s.blockMap[height] = finalizedBlk + + finalizedHeight = finalizedBlk.Height + s.finalizedBlock = finalizedBlk.ToHeader() + + blockBeforeFinalized := blocks[blkCnt-1] + + s.Run("full block height index is advanced if newer full blocks are discovered", func() { + // set lastFullBlockHeight to block + err = s.lastFullBlockHeight.Set(blockBeforeFinalized.Height) + s.Require().NoError(err) + + err = syncer.updateLastFullBlockHeight() + s.Require().NoError(err) + s.Require().Equal(finalizedHeight, s.lastFullBlockHeight.Value()) + s.Require().NoError(err) + + s.blocks.AssertExpectations(s.T()) + }) + + s.Run("full block height index is not advanced beyond finalized blocks", func() { + err = syncer.updateLastFullBlockHeight() + s.Require().NoError(err) + + s.Require().Equal(finalizedHeight, s.lastFullBlockHeight.Value()) + s.blocks.AssertExpectations(s.T()) + }) +} diff --git a/engine/access/ingestion2/finalized_block_processor.go b/engine/access/ingestion2/finalized_block_processor.go new file mode 100644 index 00000000000..78511dd9803 --- /dev/null +++ b/engine/access/ingestion2/finalized_block_processor.go @@ -0,0 +1,165 @@ +package ingestion2 + +import ( + "fmt" + + "github.com/rs/zerolog" + + "github.com/onflow/flow-go/engine" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module" + "github.com/onflow/flow-go/module/component" + "github.com/onflow/flow-go/module/irrecoverable" + "github.com/onflow/flow-go/module/jobqueue" + "github.com/onflow/flow-go/module/util" + "github.com/onflow/flow-go/state/protocol" + "github.com/onflow/flow-go/storage" +) + +const ( + // finalizedBlockProcessorWorkerCount defines the number of workers that + // concurrently process finalized blocks in the job queue. + // MUST be 1 to ensure sequential processing + finalizedBlockProcessorWorkerCount = 1 + + // searchAhead is a number of blocks that should be processed ahead by jobqueue + // MUST be 1 to ensure sequential processing + searchAhead = 1 +) + +// FinalizedBlockProcessor handles processing of finalized blocks, +// including indexing and syncing of related collections and execution results. +// +// FinalizedBlockProcessor is designed to handle the ingestion of finalized Flow blocks +// in a scalable and decoupled manner. It uses a jobqueue.ComponentConsumer to consume +// and process finalized block jobs asynchronously. This design enables the processor +// to handle high-throughput block finalization events without blocking other parts +// of the system. +// +// The processor relies on a notifier (engine.Notifier) to signal when a new finalized +// block is available, which triggers the job consumer to process it. The actual +// processing involves indexing block-to-collection and block-to-execution-result +// mappings, as well as requesting the associated collections. +type FinalizedBlockProcessor struct { + log zerolog.Logger + + consumer *jobqueue.ComponentConsumer + consumerNotifier engine.Notifier + blocks storage.Blocks + + executionResults storage.ExecutionResults + + collectionSyncer *CollectionSyncer + collectionExecutedMetric module.CollectionExecutedMetric +} + +// NewFinalizedBlockProcessor creates and initializes a new FinalizedBlockProcessor, +// setting up job consumer infrastructure to handle finalized block processing. +// +// No errors are expected during normal operations. +func NewFinalizedBlockProcessor( + log zerolog.Logger, + state protocol.State, + blocks storage.Blocks, + executionResults storage.ExecutionResults, + finalizedProcessedHeight storage.ConsumerProgressInitializer, + syncer *CollectionSyncer, + collectionExecutedMetric module.CollectionExecutedMetric, +) (*FinalizedBlockProcessor, error) { + reader := jobqueue.NewFinalizedBlockReader(state, blocks) + finalizedBlock, err := state.Final().Head() + if err != nil { + return nil, fmt.Errorf("could not get finalized block header: %w", err) + } + + consumerNotifier := engine.NewNotifier() + processor := &FinalizedBlockProcessor{ + log: log, + blocks: blocks, + executionResults: executionResults, + consumerNotifier: consumerNotifier, + collectionSyncer: syncer, + collectionExecutedMetric: collectionExecutedMetric, + } + + processor.consumer, err = jobqueue.NewComponentConsumer( + log.With().Str("module", "ingestion_block_consumer").Logger(), + consumerNotifier.Channel(), + finalizedProcessedHeight, + reader, + finalizedBlock.Height, + processor.processFinalizedBlockJobCallback, + finalizedBlockProcessorWorkerCount, + searchAhead, + ) + if err != nil { + return nil, fmt.Errorf("error creating finalized block jobqueue: %w", err) + } + + return processor, nil +} + +// Notify notifies the processor that a new finalized block is available for processing. +func (p *FinalizedBlockProcessor) Notify() { + p.consumerNotifier.Notify() +} + +// StartWorkerLoop begins processing of finalized blocks and signals readiness when initialization is complete. +func (p *FinalizedBlockProcessor) StartWorkerLoop(ctx irrecoverable.SignalerContext, ready component.ReadyFunc) { + p.consumer.Start(ctx) + + err := util.WaitClosed(ctx, p.consumer.Ready()) + if err == nil { + ready() + } + + <-p.consumer.Done() +} + +// processFinalizedBlockJobCallback is a jobqueue callback that processes a finalized block job. +func (p *FinalizedBlockProcessor) processFinalizedBlockJobCallback( + ctx irrecoverable.SignalerContext, + job module.Job, + done func(), +) { + block, err := jobqueue.JobToBlock(job) + if err != nil { + ctx.Throw(fmt.Errorf("failed to convert job to block: %w", err)) + return + } + + err = p.indexFinalizedBlock(block) + if err != nil { + p.log.Error().Err(err). + Str("job_id", string(job.ID())). + Msg("unexpected error during finalized block processing job") + ctx.Throw(fmt.Errorf("failed to index finalized block: %w", err)) + return + } + + done() +} + +// indexFinalizedBlock indexes the given finalized block’s collection guarantees and execution results, +// and requests related collections from the syncer. +// +// No errors are expected during normal operations. +func (p *FinalizedBlockProcessor) indexFinalizedBlock(block *flow.Block) error { + err := p.blocks.IndexBlockContainingCollectionGuarantees(block.ID(), flow.GetIDs(block.Payload.Guarantees)) + if err != nil { + return fmt.Errorf("could not index block for collections: %w", err) + } + + // loop through seals and index ID -> result ID + for _, seal := range block.Payload.Seals { + err := p.executionResults.Index(seal.BlockID, seal.ResultID) + if err != nil { + return fmt.Errorf("could not index block for execution result: %w", err) + } + } + + p.collectionSyncer.RequestCollectionsForBlock(block.Height, block.Payload.Guarantees) + p.collectionExecutedMetric.BlockFinalized(block) + + return nil +} diff --git a/engine/access/integration_unsecure_grpc_server_test.go b/engine/access/integration_unsecure_grpc_server_test.go new file mode 100644 index 00000000000..13c35b52692 --- /dev/null +++ b/engine/access/integration_unsecure_grpc_server_test.go @@ -0,0 +1,376 @@ +package access + +import ( + "context" + "io" + "testing" + "time" + + accessproto "github.com/onflow/flow/protobuf/go/flow/access" + executiondataproto "github.com/onflow/flow/protobuf/go/flow/executiondata" + "github.com/rs/zerolog" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" + "google.golang.org/grpc" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/credentials/insecure" + + "github.com/onflow/flow-go/engine" + "github.com/onflow/flow-go/engine/access/index" + accessmock "github.com/onflow/flow-go/engine/access/mock" + "github.com/onflow/flow-go/engine/access/rest/websockets" + "github.com/onflow/flow-go/engine/access/rpc" + "github.com/onflow/flow-go/engine/access/rpc/backend" + "github.com/onflow/flow-go/engine/access/rpc/backend/node_communicator" + "github.com/onflow/flow-go/engine/access/rpc/backend/query_mode" + "github.com/onflow/flow-go/engine/access/state_stream" + statestreambackend "github.com/onflow/flow-go/engine/access/state_stream/backend" + "github.com/onflow/flow-go/engine/access/subscription" + "github.com/onflow/flow-go/engine/access/subscription/tracker" + commonrpc "github.com/onflow/flow-go/engine/common/rpc" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module/blobs" + "github.com/onflow/flow-go/module/execution" + "github.com/onflow/flow-go/module/executiondatasync/execution_data" + "github.com/onflow/flow-go/module/executiondatasync/execution_data/cache" + "github.com/onflow/flow-go/module/grpcserver" + "github.com/onflow/flow-go/module/irrecoverable" + "github.com/onflow/flow-go/module/mempool/herocache" + "github.com/onflow/flow-go/module/metrics" + module "github.com/onflow/flow-go/module/mock" + "github.com/onflow/flow-go/network" + protocol "github.com/onflow/flow-go/state/protocol/mock" + "github.com/onflow/flow-go/storage" + storagemock "github.com/onflow/flow-go/storage/mock" + "github.com/onflow/flow-go/utils/grpcutils" + "github.com/onflow/flow-go/utils/unittest" +) + +// SameGRPCPortTestSuite verifies both AccessAPI and ExecutionDataAPI client continue to work when configured +// on the same port +type SameGRPCPortTestSuite struct { + suite.Suite + state *protocol.State + snapshot *protocol.Snapshot + epochQuery *protocol.EpochQuery + log zerolog.Logger + net *network.EngineRegistry + request *module.Requester + collClient *accessmock.AccessAPIClient + execClient *accessmock.ExecutionAPIClient + me *module.Local + chainID flow.ChainID + metrics *metrics.NoopCollector + rpcEng *rpc.Engine + stateStreamEng *statestreambackend.Engine + executionDataTracker tracker.ExecutionDataTracker + + // storage + blocks *storagemock.Blocks + headers *storagemock.Headers + events *storagemock.Events + collections *storagemock.Collections + transactions *storagemock.Transactions + receipts *storagemock.ExecutionReceipts + seals *storagemock.Seals + results *storagemock.ExecutionResults + registers *execution.RegistersAsyncStore + + ctx irrecoverable.SignalerContext + cancel context.CancelFunc + + // grpc servers + secureGrpcServer *grpcserver.GrpcServer + unsecureGrpcServer *grpcserver.GrpcServer + + bs blobs.Blobstore + eds execution_data.ExecutionDataStore + broadcaster *engine.Broadcaster + execDataCache *cache.ExecutionDataCache + execDataHeroCache *herocache.BlockExecutionData + + blockMap map[uint64]*flow.Block +} + +func (suite *SameGRPCPortTestSuite) SetupTest() { + suite.log = unittest.Logger() + suite.net = new(network.EngineRegistry) + suite.state = new(protocol.State) + suite.snapshot = new(protocol.Snapshot) + params := new(protocol.Params) + suite.registers = execution.NewRegistersAsyncStore() + + suite.epochQuery = new(protocol.EpochQuery) + suite.state.On("Sealed").Return(suite.snapshot, nil).Maybe() + suite.state.On("Final").Return(suite.snapshot, nil).Maybe() + suite.state.On("Params").Return(params) + suite.snapshot.On("Epochs").Return(suite.epochQuery).Maybe() + suite.blocks = new(storagemock.Blocks) + suite.headers = new(storagemock.Headers) + suite.events = new(storagemock.Events) + suite.transactions = new(storagemock.Transactions) + suite.collections = new(storagemock.Collections) + suite.receipts = new(storagemock.ExecutionReceipts) + suite.results = new(storagemock.ExecutionResults) + suite.seals = new(storagemock.Seals) + + suite.collClient = new(accessmock.AccessAPIClient) + suite.execClient = new(accessmock.ExecutionAPIClient) + + suite.request = new(module.Requester) + suite.request.On("EntityByID", mock.Anything, mock.Anything) + + suite.me = new(module.Local) + suite.eds = execution_data.NewExecutionDataStore(suite.bs, execution_data.DefaultSerializer) + + suite.broadcaster = engine.NewBroadcaster() + + suite.execDataHeroCache = herocache.NewBlockExecutionData(subscription.DefaultCacheSize, suite.log, metrics.NewNoopCollector()) + suite.execDataCache = cache.NewExecutionDataCache(suite.eds, suite.headers, suite.seals, suite.results, suite.execDataHeroCache) + + accessIdentity := unittest.IdentityFixture(unittest.WithRole(flow.RoleAccess)) + suite.me. + On("NodeID"). + Return(accessIdentity.NodeID) + + suite.chainID = flow.Testnet + suite.metrics = metrics.NewNoopCollector() + + config := rpc.Config{ + UnsecureGRPCListenAddr: unittest.DefaultAddress, + SecureGRPCListenAddr: unittest.DefaultAddress, + HTTPListenAddr: unittest.DefaultAddress, + WebSocketConfig: websockets.NewDefaultWebsocketConfig(), + } + + blockCount := 5 + suite.blockMap = make(map[uint64]*flow.Block, blockCount) + // generate blockCount consecutive blocks with associated seal, result and execution data + rootBlock := unittest.BlockFixture() + parent := rootBlock.ToHeader() + suite.blockMap[rootBlock.Height] = rootBlock + + for i := 0; i < blockCount; i++ { + block := unittest.BlockWithParentFixture(parent) + suite.blockMap[block.Height] = block + } + + params.On("SporkID").Return(unittest.IdentifierFixture(), nil) + params.On("SporkRootBlockHeight").Return(rootBlock.Height, nil) + + // generate a server certificate that will be served by the GRPC server + networkingKey := unittest.NetworkingPrivKeyFixture() + x509Certificate, err := grpcutils.X509Certificate(networkingKey) + assert.NoError(suite.T(), err) + tlsConfig := grpcutils.DefaultServerTLSConfig(x509Certificate) + // set the transport credentials for the server to use + config.TransportCredentials = credentials.NewTLS(tlsConfig) + + suite.secureGrpcServer = grpcserver.NewGrpcServerBuilder(suite.log, + config.SecureGRPCListenAddr, + commonrpc.DefaultAccessMaxRequestSize, + commonrpc.DefaultAccessMaxResponseSize, + false, + nil, + nil, + grpcserver.WithTransportCredentials(config.TransportCredentials)).Build() + + suite.unsecureGrpcServer = grpcserver.NewGrpcServerBuilder(suite.log, + config.UnsecureGRPCListenAddr, + commonrpc.DefaultAccessMaxRequestSize, + commonrpc.DefaultAccessMaxResponseSize, + false, + nil, + nil).Build() + + block := unittest.BlockHeaderFixture() + suite.snapshot.On("Head").Return(block, nil) + + bnd, err := backend.New(backend.Params{ + State: suite.state, + CollectionRPC: suite.collClient, + Blocks: suite.blocks, + Headers: suite.headers, + Collections: suite.collections, + Transactions: suite.transactions, + ChainID: suite.chainID, + AccessMetrics: suite.metrics, + MaxHeightRange: 0, + Log: suite.log, + SnapshotHistoryLimit: 0, + Communicator: node_communicator.NewNodeCommunicator(false), + EventQueryMode: query_mode.IndexQueryModeExecutionNodesOnly, + ScriptExecutionMode: query_mode.IndexQueryModeExecutionNodesOnly, + TxResultQueryMode: query_mode.IndexQueryModeExecutionNodesOnly, + }) + require.NoError(suite.T(), err) + + stateStreamConfig := statestreambackend.Config{} + // create rpc engine builder + rpcEngBuilder, err := rpc.NewBuilder( + suite.log, + suite.state, + config, + suite.chainID, + suite.metrics, + false, + suite.me, + bnd, + bnd, + suite.secureGrpcServer, + suite.unsecureGrpcServer, + nil, + stateStreamConfig, + nil, + ) + assert.NoError(suite.T(), err) + suite.rpcEng, err = rpcEngBuilder.WithLegacy().Build() + assert.NoError(suite.T(), err) + suite.ctx, suite.cancel = irrecoverable.NewMockSignalerContextWithCancel(suite.T(), context.Background()) + + suite.headers.On("BlockIDByHeight", mock.AnythingOfType("uint64")).Return( + func(height uint64) flow.Identifier { + if block, ok := suite.blockMap[height]; ok { + return block.ID() + } + return flow.ZeroID + }, + func(height uint64) error { + if _, ok := suite.blockMap[height]; ok { + return nil + } + return storage.ErrNotFound + }, + ).Maybe() + + conf := statestreambackend.Config{ + ClientSendTimeout: subscription.DefaultSendTimeout, + ClientSendBufferSize: subscription.DefaultSendBufferSize, + } + + subscriptionHandler := subscription.NewSubscriptionHandler( + suite.log, + suite.broadcaster, + subscription.DefaultSendTimeout, + subscription.DefaultResponseLimit, + subscription.DefaultSendBufferSize, + ) + + eventIndexer := index.NewEventsIndex(index.NewReporter(), suite.events) + + suite.executionDataTracker = tracker.NewExecutionDataTracker( + suite.log, + suite.state, + rootBlock.Height, + suite.headers, + nil, + rootBlock.Height, + eventIndexer, + false, + ) + + stateStreamBackend, err := statestreambackend.New( + suite.log, + suite.state, + suite.headers, + suite.seals, + suite.results, + nil, + suite.execDataCache, + suite.registers, + eventIndexer, + false, + state_stream.DefaultRegisterIDsRequestLimit, + subscriptionHandler, + suite.executionDataTracker, + ) + assert.NoError(suite.T(), err) + + // create state stream engine + suite.stateStreamEng, err = statestreambackend.NewEng( + suite.log, + conf, + suite.execDataCache, + suite.headers, + suite.chainID, + suite.unsecureGrpcServer, + stateStreamBackend, + ) + assert.NoError(suite.T(), err) + + suite.rpcEng.Start(suite.ctx) + suite.stateStreamEng.Start(suite.ctx) + + suite.secureGrpcServer.Start(suite.ctx) + suite.unsecureGrpcServer.Start(suite.ctx) + + // wait for the servers to startup + unittest.AssertClosesBefore(suite.T(), suite.secureGrpcServer.Ready(), 2*time.Second) + unittest.AssertClosesBefore(suite.T(), suite.unsecureGrpcServer.Ready(), 2*time.Second) + + // wait for the rpc engine to startup + unittest.AssertClosesBefore(suite.T(), suite.rpcEng.Ready(), 2*time.Second) + // wait for the state stream engine to startup + unittest.AssertClosesBefore(suite.T(), suite.stateStreamEng.Ready(), 2*time.Second) +} + +func (suite *SameGRPCPortTestSuite) TearDownTest() { + suite.cancel() + unittest.AssertClosesBefore(suite.T(), suite.secureGrpcServer.Done(), 2*time.Second) + unittest.AssertClosesBefore(suite.T(), suite.unsecureGrpcServer.Done(), 2*time.Second) + unittest.AssertClosesBefore(suite.T(), suite.rpcEng.Done(), 2*time.Second) + unittest.AssertClosesBefore(suite.T(), suite.stateStreamEng.Done(), 2*time.Second) +} + +// TestEnginesOnTheSameGrpcPort verifies if both AccessAPI and ExecutionDataAPI client successfully connect and continue +// to work when configured on the same port +func (suite *SameGRPCPortTestSuite) TestEnginesOnTheSameGrpcPort() { + ctx := context.Background() + + conn, err := grpc.Dial( + suite.unsecureGrpcServer.GRPCAddress().String(), + grpc.WithTransportCredentials(insecure.NewCredentials())) + assert.NoError(suite.T(), err) + closer := io.Closer(conn) + + suite.Run("happy path - grpc access api client can connect successfully", func() { + req := &accessproto.GetNetworkParametersRequest{} + + // expect 2 upstream calls + suite.execClient.On("GetNetworkParameters", mock.Anything, mock.Anything).Return(nil, nil).Twice() + suite.collClient.On("GetNetworkParameters", mock.Anything, mock.Anything).Return(nil, nil).Twice() + + client := suite.unsecureAccessAPIClient(conn) + + _, err := client.GetNetworkParameters(ctx, req) + assert.NoError(suite.T(), err, "failed to get network") + }) + + suite.Run("happy path - grpc execution data api client can connect successfully", func() { + req := &executiondataproto.SubscribeEventsFromLatestRequest{} + + client := suite.unsecureExecutionDataAPIClient(conn) + + _, err := client.SubscribeEventsFromLatest(ctx, req) + assert.NoError(suite.T(), err, "failed to subscribe events") + }) + defer closer.Close() +} + +func TestSameGRPCTestSuite(t *testing.T) { + suite.Run(t, new(SameGRPCPortTestSuite)) +} + +// unsecureAccessAPIClient creates an unsecure grpc AccessAPI client +func (suite *SameGRPCPortTestSuite) unsecureAccessAPIClient(conn *grpc.ClientConn) accessproto.AccessAPIClient { + client := accessproto.NewAccessAPIClient(conn) + return client +} + +// unsecureExecutionDataAPIClient creates an unsecure ExecutionDataAPI client +func (suite *SameGRPCPortTestSuite) unsecureExecutionDataAPIClient(conn *grpc.ClientConn) executiondataproto.ExecutionDataAPIClient { + client := executiondataproto.NewExecutionDataAPIClient(conn) + return client +} diff --git a/engine/access/mock/access_api_client.go b/engine/access/mock/access_api_client.go index 234e4ffcdee..ee70f212d21 100644 --- a/engine/access/mock/access_api_client.go +++ b/engine/access/mock/access_api_client.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mock @@ -28,6 +28,10 @@ func (_m *AccessAPIClient) ExecuteScriptAtBlockHeight(ctx context.Context, in *a _ca = append(_ca, _va...) ret := _m.Called(_ca...) + if len(ret) == 0 { + panic("no return value specified for ExecuteScriptAtBlockHeight") + } + var r0 *access.ExecuteScriptResponse var r1 error if rf, ok := ret.Get(0).(func(context.Context, *access.ExecuteScriptAtBlockHeightRequest, ...grpc.CallOption) (*access.ExecuteScriptResponse, error)); ok { @@ -61,6 +65,10 @@ func (_m *AccessAPIClient) ExecuteScriptAtBlockID(ctx context.Context, in *acces _ca = append(_ca, _va...) ret := _m.Called(_ca...) + if len(ret) == 0 { + panic("no return value specified for ExecuteScriptAtBlockID") + } + var r0 *access.ExecuteScriptResponse var r1 error if rf, ok := ret.Get(0).(func(context.Context, *access.ExecuteScriptAtBlockIDRequest, ...grpc.CallOption) (*access.ExecuteScriptResponse, error)); ok { @@ -94,6 +102,10 @@ func (_m *AccessAPIClient) ExecuteScriptAtLatestBlock(ctx context.Context, in *a _ca = append(_ca, _va...) ret := _m.Called(_ca...) + if len(ret) == 0 { + panic("no return value specified for ExecuteScriptAtLatestBlock") + } + var r0 *access.ExecuteScriptResponse var r1 error if rf, ok := ret.Get(0).(func(context.Context, *access.ExecuteScriptAtLatestBlockRequest, ...grpc.CallOption) (*access.ExecuteScriptResponse, error)); ok { @@ -127,6 +139,10 @@ func (_m *AccessAPIClient) GetAccount(ctx context.Context, in *access.GetAccount _ca = append(_ca, _va...) ret := _m.Called(_ca...) + if len(ret) == 0 { + panic("no return value specified for GetAccount") + } + var r0 *access.GetAccountResponse var r1 error if rf, ok := ret.Get(0).(func(context.Context, *access.GetAccountRequest, ...grpc.CallOption) (*access.GetAccountResponse, error)); ok { @@ -160,6 +176,10 @@ func (_m *AccessAPIClient) GetAccountAtBlockHeight(ctx context.Context, in *acce _ca = append(_ca, _va...) ret := _m.Called(_ca...) + if len(ret) == 0 { + panic("no return value specified for GetAccountAtBlockHeight") + } + var r0 *access.AccountResponse var r1 error if rf, ok := ret.Get(0).(func(context.Context, *access.GetAccountAtBlockHeightRequest, ...grpc.CallOption) (*access.AccountResponse, error)); ok { @@ -193,6 +213,10 @@ func (_m *AccessAPIClient) GetAccountAtLatestBlock(ctx context.Context, in *acce _ca = append(_ca, _va...) ret := _m.Called(_ca...) + if len(ret) == 0 { + panic("no return value specified for GetAccountAtLatestBlock") + } + var r0 *access.AccountResponse var r1 error if rf, ok := ret.Get(0).(func(context.Context, *access.GetAccountAtLatestBlockRequest, ...grpc.CallOption) (*access.AccountResponse, error)); ok { @@ -215,8 +239,8 @@ func (_m *AccessAPIClient) GetAccountAtLatestBlock(ctx context.Context, in *acce return r0, r1 } -// GetBlockByHeight provides a mock function with given fields: ctx, in, opts -func (_m *AccessAPIClient) GetBlockByHeight(ctx context.Context, in *access.GetBlockByHeightRequest, opts ...grpc.CallOption) (*access.BlockResponse, error) { +// GetAccountBalanceAtBlockHeight provides a mock function with given fields: ctx, in, opts +func (_m *AccessAPIClient) GetAccountBalanceAtBlockHeight(ctx context.Context, in *access.GetAccountBalanceAtBlockHeightRequest, opts ...grpc.CallOption) (*access.AccountBalanceResponse, error) { _va := make([]interface{}, len(opts)) for _i := range opts { _va[_i] = opts[_i] @@ -226,20 +250,24 @@ func (_m *AccessAPIClient) GetBlockByHeight(ctx context.Context, in *access.GetB _ca = append(_ca, _va...) ret := _m.Called(_ca...) - var r0 *access.BlockResponse + if len(ret) == 0 { + panic("no return value specified for GetAccountBalanceAtBlockHeight") + } + + var r0 *access.AccountBalanceResponse var r1 error - if rf, ok := ret.Get(0).(func(context.Context, *access.GetBlockByHeightRequest, ...grpc.CallOption) (*access.BlockResponse, error)); ok { + if rf, ok := ret.Get(0).(func(context.Context, *access.GetAccountBalanceAtBlockHeightRequest, ...grpc.CallOption) (*access.AccountBalanceResponse, error)); ok { return rf(ctx, in, opts...) } - if rf, ok := ret.Get(0).(func(context.Context, *access.GetBlockByHeightRequest, ...grpc.CallOption) *access.BlockResponse); ok { + if rf, ok := ret.Get(0).(func(context.Context, *access.GetAccountBalanceAtBlockHeightRequest, ...grpc.CallOption) *access.AccountBalanceResponse); ok { r0 = rf(ctx, in, opts...) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(*access.BlockResponse) + r0 = ret.Get(0).(*access.AccountBalanceResponse) } } - if rf, ok := ret.Get(1).(func(context.Context, *access.GetBlockByHeightRequest, ...grpc.CallOption) error); ok { + if rf, ok := ret.Get(1).(func(context.Context, *access.GetAccountBalanceAtBlockHeightRequest, ...grpc.CallOption) error); ok { r1 = rf(ctx, in, opts...) } else { r1 = ret.Error(1) @@ -248,8 +276,8 @@ func (_m *AccessAPIClient) GetBlockByHeight(ctx context.Context, in *access.GetB return r0, r1 } -// GetBlockByID provides a mock function with given fields: ctx, in, opts -func (_m *AccessAPIClient) GetBlockByID(ctx context.Context, in *access.GetBlockByIDRequest, opts ...grpc.CallOption) (*access.BlockResponse, error) { +// GetAccountBalanceAtLatestBlock provides a mock function with given fields: ctx, in, opts +func (_m *AccessAPIClient) GetAccountBalanceAtLatestBlock(ctx context.Context, in *access.GetAccountBalanceAtLatestBlockRequest, opts ...grpc.CallOption) (*access.AccountBalanceResponse, error) { _va := make([]interface{}, len(opts)) for _i := range opts { _va[_i] = opts[_i] @@ -259,20 +287,24 @@ func (_m *AccessAPIClient) GetBlockByID(ctx context.Context, in *access.GetBlock _ca = append(_ca, _va...) ret := _m.Called(_ca...) - var r0 *access.BlockResponse + if len(ret) == 0 { + panic("no return value specified for GetAccountBalanceAtLatestBlock") + } + + var r0 *access.AccountBalanceResponse var r1 error - if rf, ok := ret.Get(0).(func(context.Context, *access.GetBlockByIDRequest, ...grpc.CallOption) (*access.BlockResponse, error)); ok { + if rf, ok := ret.Get(0).(func(context.Context, *access.GetAccountBalanceAtLatestBlockRequest, ...grpc.CallOption) (*access.AccountBalanceResponse, error)); ok { return rf(ctx, in, opts...) } - if rf, ok := ret.Get(0).(func(context.Context, *access.GetBlockByIDRequest, ...grpc.CallOption) *access.BlockResponse); ok { + if rf, ok := ret.Get(0).(func(context.Context, *access.GetAccountBalanceAtLatestBlockRequest, ...grpc.CallOption) *access.AccountBalanceResponse); ok { r0 = rf(ctx, in, opts...) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(*access.BlockResponse) + r0 = ret.Get(0).(*access.AccountBalanceResponse) } } - if rf, ok := ret.Get(1).(func(context.Context, *access.GetBlockByIDRequest, ...grpc.CallOption) error); ok { + if rf, ok := ret.Get(1).(func(context.Context, *access.GetAccountBalanceAtLatestBlockRequest, ...grpc.CallOption) error); ok { r1 = rf(ctx, in, opts...) } else { r1 = ret.Error(1) @@ -281,8 +313,8 @@ func (_m *AccessAPIClient) GetBlockByID(ctx context.Context, in *access.GetBlock return r0, r1 } -// GetBlockHeaderByHeight provides a mock function with given fields: ctx, in, opts -func (_m *AccessAPIClient) GetBlockHeaderByHeight(ctx context.Context, in *access.GetBlockHeaderByHeightRequest, opts ...grpc.CallOption) (*access.BlockHeaderResponse, error) { +// GetAccountKeyAtBlockHeight provides a mock function with given fields: ctx, in, opts +func (_m *AccessAPIClient) GetAccountKeyAtBlockHeight(ctx context.Context, in *access.GetAccountKeyAtBlockHeightRequest, opts ...grpc.CallOption) (*access.AccountKeyResponse, error) { _va := make([]interface{}, len(opts)) for _i := range opts { _va[_i] = opts[_i] @@ -292,20 +324,24 @@ func (_m *AccessAPIClient) GetBlockHeaderByHeight(ctx context.Context, in *acces _ca = append(_ca, _va...) ret := _m.Called(_ca...) - var r0 *access.BlockHeaderResponse + if len(ret) == 0 { + panic("no return value specified for GetAccountKeyAtBlockHeight") + } + + var r0 *access.AccountKeyResponse var r1 error - if rf, ok := ret.Get(0).(func(context.Context, *access.GetBlockHeaderByHeightRequest, ...grpc.CallOption) (*access.BlockHeaderResponse, error)); ok { + if rf, ok := ret.Get(0).(func(context.Context, *access.GetAccountKeyAtBlockHeightRequest, ...grpc.CallOption) (*access.AccountKeyResponse, error)); ok { return rf(ctx, in, opts...) } - if rf, ok := ret.Get(0).(func(context.Context, *access.GetBlockHeaderByHeightRequest, ...grpc.CallOption) *access.BlockHeaderResponse); ok { + if rf, ok := ret.Get(0).(func(context.Context, *access.GetAccountKeyAtBlockHeightRequest, ...grpc.CallOption) *access.AccountKeyResponse); ok { r0 = rf(ctx, in, opts...) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(*access.BlockHeaderResponse) + r0 = ret.Get(0).(*access.AccountKeyResponse) } } - if rf, ok := ret.Get(1).(func(context.Context, *access.GetBlockHeaderByHeightRequest, ...grpc.CallOption) error); ok { + if rf, ok := ret.Get(1).(func(context.Context, *access.GetAccountKeyAtBlockHeightRequest, ...grpc.CallOption) error); ok { r1 = rf(ctx, in, opts...) } else { r1 = ret.Error(1) @@ -314,8 +350,8 @@ func (_m *AccessAPIClient) GetBlockHeaderByHeight(ctx context.Context, in *acces return r0, r1 } -// GetBlockHeaderByID provides a mock function with given fields: ctx, in, opts -func (_m *AccessAPIClient) GetBlockHeaderByID(ctx context.Context, in *access.GetBlockHeaderByIDRequest, opts ...grpc.CallOption) (*access.BlockHeaderResponse, error) { +// GetAccountKeyAtLatestBlock provides a mock function with given fields: ctx, in, opts +func (_m *AccessAPIClient) GetAccountKeyAtLatestBlock(ctx context.Context, in *access.GetAccountKeyAtLatestBlockRequest, opts ...grpc.CallOption) (*access.AccountKeyResponse, error) { _va := make([]interface{}, len(opts)) for _i := range opts { _va[_i] = opts[_i] @@ -325,20 +361,24 @@ func (_m *AccessAPIClient) GetBlockHeaderByID(ctx context.Context, in *access.Ge _ca = append(_ca, _va...) ret := _m.Called(_ca...) - var r0 *access.BlockHeaderResponse + if len(ret) == 0 { + panic("no return value specified for GetAccountKeyAtLatestBlock") + } + + var r0 *access.AccountKeyResponse var r1 error - if rf, ok := ret.Get(0).(func(context.Context, *access.GetBlockHeaderByIDRequest, ...grpc.CallOption) (*access.BlockHeaderResponse, error)); ok { + if rf, ok := ret.Get(0).(func(context.Context, *access.GetAccountKeyAtLatestBlockRequest, ...grpc.CallOption) (*access.AccountKeyResponse, error)); ok { return rf(ctx, in, opts...) } - if rf, ok := ret.Get(0).(func(context.Context, *access.GetBlockHeaderByIDRequest, ...grpc.CallOption) *access.BlockHeaderResponse); ok { + if rf, ok := ret.Get(0).(func(context.Context, *access.GetAccountKeyAtLatestBlockRequest, ...grpc.CallOption) *access.AccountKeyResponse); ok { r0 = rf(ctx, in, opts...) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(*access.BlockHeaderResponse) + r0 = ret.Get(0).(*access.AccountKeyResponse) } } - if rf, ok := ret.Get(1).(func(context.Context, *access.GetBlockHeaderByIDRequest, ...grpc.CallOption) error); ok { + if rf, ok := ret.Get(1).(func(context.Context, *access.GetAccountKeyAtLatestBlockRequest, ...grpc.CallOption) error); ok { r1 = rf(ctx, in, opts...) } else { r1 = ret.Error(1) @@ -347,8 +387,8 @@ func (_m *AccessAPIClient) GetBlockHeaderByID(ctx context.Context, in *access.Ge return r0, r1 } -// GetCollectionByID provides a mock function with given fields: ctx, in, opts -func (_m *AccessAPIClient) GetCollectionByID(ctx context.Context, in *access.GetCollectionByIDRequest, opts ...grpc.CallOption) (*access.CollectionResponse, error) { +// GetAccountKeysAtBlockHeight provides a mock function with given fields: ctx, in, opts +func (_m *AccessAPIClient) GetAccountKeysAtBlockHeight(ctx context.Context, in *access.GetAccountKeysAtBlockHeightRequest, opts ...grpc.CallOption) (*access.AccountKeysResponse, error) { _va := make([]interface{}, len(opts)) for _i := range opts { _va[_i] = opts[_i] @@ -358,20 +398,24 @@ func (_m *AccessAPIClient) GetCollectionByID(ctx context.Context, in *access.Get _ca = append(_ca, _va...) ret := _m.Called(_ca...) - var r0 *access.CollectionResponse + if len(ret) == 0 { + panic("no return value specified for GetAccountKeysAtBlockHeight") + } + + var r0 *access.AccountKeysResponse var r1 error - if rf, ok := ret.Get(0).(func(context.Context, *access.GetCollectionByIDRequest, ...grpc.CallOption) (*access.CollectionResponse, error)); ok { + if rf, ok := ret.Get(0).(func(context.Context, *access.GetAccountKeysAtBlockHeightRequest, ...grpc.CallOption) (*access.AccountKeysResponse, error)); ok { return rf(ctx, in, opts...) } - if rf, ok := ret.Get(0).(func(context.Context, *access.GetCollectionByIDRequest, ...grpc.CallOption) *access.CollectionResponse); ok { + if rf, ok := ret.Get(0).(func(context.Context, *access.GetAccountKeysAtBlockHeightRequest, ...grpc.CallOption) *access.AccountKeysResponse); ok { r0 = rf(ctx, in, opts...) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(*access.CollectionResponse) + r0 = ret.Get(0).(*access.AccountKeysResponse) } } - if rf, ok := ret.Get(1).(func(context.Context, *access.GetCollectionByIDRequest, ...grpc.CallOption) error); ok { + if rf, ok := ret.Get(1).(func(context.Context, *access.GetAccountKeysAtBlockHeightRequest, ...grpc.CallOption) error); ok { r1 = rf(ctx, in, opts...) } else { r1 = ret.Error(1) @@ -380,8 +424,8 @@ func (_m *AccessAPIClient) GetCollectionByID(ctx context.Context, in *access.Get return r0, r1 } -// GetEventsForBlockIDs provides a mock function with given fields: ctx, in, opts -func (_m *AccessAPIClient) GetEventsForBlockIDs(ctx context.Context, in *access.GetEventsForBlockIDsRequest, opts ...grpc.CallOption) (*access.EventsResponse, error) { +// GetAccountKeysAtLatestBlock provides a mock function with given fields: ctx, in, opts +func (_m *AccessAPIClient) GetAccountKeysAtLatestBlock(ctx context.Context, in *access.GetAccountKeysAtLatestBlockRequest, opts ...grpc.CallOption) (*access.AccountKeysResponse, error) { _va := make([]interface{}, len(opts)) for _i := range opts { _va[_i] = opts[_i] @@ -391,20 +435,24 @@ func (_m *AccessAPIClient) GetEventsForBlockIDs(ctx context.Context, in *access. _ca = append(_ca, _va...) ret := _m.Called(_ca...) - var r0 *access.EventsResponse + if len(ret) == 0 { + panic("no return value specified for GetAccountKeysAtLatestBlock") + } + + var r0 *access.AccountKeysResponse var r1 error - if rf, ok := ret.Get(0).(func(context.Context, *access.GetEventsForBlockIDsRequest, ...grpc.CallOption) (*access.EventsResponse, error)); ok { + if rf, ok := ret.Get(0).(func(context.Context, *access.GetAccountKeysAtLatestBlockRequest, ...grpc.CallOption) (*access.AccountKeysResponse, error)); ok { return rf(ctx, in, opts...) } - if rf, ok := ret.Get(0).(func(context.Context, *access.GetEventsForBlockIDsRequest, ...grpc.CallOption) *access.EventsResponse); ok { + if rf, ok := ret.Get(0).(func(context.Context, *access.GetAccountKeysAtLatestBlockRequest, ...grpc.CallOption) *access.AccountKeysResponse); ok { r0 = rf(ctx, in, opts...) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(*access.EventsResponse) + r0 = ret.Get(0).(*access.AccountKeysResponse) } } - if rf, ok := ret.Get(1).(func(context.Context, *access.GetEventsForBlockIDsRequest, ...grpc.CallOption) error); ok { + if rf, ok := ret.Get(1).(func(context.Context, *access.GetAccountKeysAtLatestBlockRequest, ...grpc.CallOption) error); ok { r1 = rf(ctx, in, opts...) } else { r1 = ret.Error(1) @@ -413,8 +461,8 @@ func (_m *AccessAPIClient) GetEventsForBlockIDs(ctx context.Context, in *access. return r0, r1 } -// GetEventsForHeightRange provides a mock function with given fields: ctx, in, opts -func (_m *AccessAPIClient) GetEventsForHeightRange(ctx context.Context, in *access.GetEventsForHeightRangeRequest, opts ...grpc.CallOption) (*access.EventsResponse, error) { +// GetBlockByHeight provides a mock function with given fields: ctx, in, opts +func (_m *AccessAPIClient) GetBlockByHeight(ctx context.Context, in *access.GetBlockByHeightRequest, opts ...grpc.CallOption) (*access.BlockResponse, error) { _va := make([]interface{}, len(opts)) for _i := range opts { _va[_i] = opts[_i] @@ -424,20 +472,24 @@ func (_m *AccessAPIClient) GetEventsForHeightRange(ctx context.Context, in *acce _ca = append(_ca, _va...) ret := _m.Called(_ca...) - var r0 *access.EventsResponse + if len(ret) == 0 { + panic("no return value specified for GetBlockByHeight") + } + + var r0 *access.BlockResponse var r1 error - if rf, ok := ret.Get(0).(func(context.Context, *access.GetEventsForHeightRangeRequest, ...grpc.CallOption) (*access.EventsResponse, error)); ok { + if rf, ok := ret.Get(0).(func(context.Context, *access.GetBlockByHeightRequest, ...grpc.CallOption) (*access.BlockResponse, error)); ok { return rf(ctx, in, opts...) } - if rf, ok := ret.Get(0).(func(context.Context, *access.GetEventsForHeightRangeRequest, ...grpc.CallOption) *access.EventsResponse); ok { + if rf, ok := ret.Get(0).(func(context.Context, *access.GetBlockByHeightRequest, ...grpc.CallOption) *access.BlockResponse); ok { r0 = rf(ctx, in, opts...) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(*access.EventsResponse) + r0 = ret.Get(0).(*access.BlockResponse) } } - if rf, ok := ret.Get(1).(func(context.Context, *access.GetEventsForHeightRangeRequest, ...grpc.CallOption) error); ok { + if rf, ok := ret.Get(1).(func(context.Context, *access.GetBlockByHeightRequest, ...grpc.CallOption) error); ok { r1 = rf(ctx, in, opts...) } else { r1 = ret.Error(1) @@ -446,8 +498,8 @@ func (_m *AccessAPIClient) GetEventsForHeightRange(ctx context.Context, in *acce return r0, r1 } -// GetExecutionResultForBlockID provides a mock function with given fields: ctx, in, opts -func (_m *AccessAPIClient) GetExecutionResultForBlockID(ctx context.Context, in *access.GetExecutionResultForBlockIDRequest, opts ...grpc.CallOption) (*access.ExecutionResultForBlockIDResponse, error) { +// GetBlockByID provides a mock function with given fields: ctx, in, opts +func (_m *AccessAPIClient) GetBlockByID(ctx context.Context, in *access.GetBlockByIDRequest, opts ...grpc.CallOption) (*access.BlockResponse, error) { _va := make([]interface{}, len(opts)) for _i := range opts { _va[_i] = opts[_i] @@ -457,20 +509,24 @@ func (_m *AccessAPIClient) GetExecutionResultForBlockID(ctx context.Context, in _ca = append(_ca, _va...) ret := _m.Called(_ca...) - var r0 *access.ExecutionResultForBlockIDResponse + if len(ret) == 0 { + panic("no return value specified for GetBlockByID") + } + + var r0 *access.BlockResponse var r1 error - if rf, ok := ret.Get(0).(func(context.Context, *access.GetExecutionResultForBlockIDRequest, ...grpc.CallOption) (*access.ExecutionResultForBlockIDResponse, error)); ok { + if rf, ok := ret.Get(0).(func(context.Context, *access.GetBlockByIDRequest, ...grpc.CallOption) (*access.BlockResponse, error)); ok { return rf(ctx, in, opts...) } - if rf, ok := ret.Get(0).(func(context.Context, *access.GetExecutionResultForBlockIDRequest, ...grpc.CallOption) *access.ExecutionResultForBlockIDResponse); ok { + if rf, ok := ret.Get(0).(func(context.Context, *access.GetBlockByIDRequest, ...grpc.CallOption) *access.BlockResponse); ok { r0 = rf(ctx, in, opts...) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(*access.ExecutionResultForBlockIDResponse) + r0 = ret.Get(0).(*access.BlockResponse) } } - if rf, ok := ret.Get(1).(func(context.Context, *access.GetExecutionResultForBlockIDRequest, ...grpc.CallOption) error); ok { + if rf, ok := ret.Get(1).(func(context.Context, *access.GetBlockByIDRequest, ...grpc.CallOption) error); ok { r1 = rf(ctx, in, opts...) } else { r1 = ret.Error(1) @@ -479,8 +535,8 @@ func (_m *AccessAPIClient) GetExecutionResultForBlockID(ctx context.Context, in return r0, r1 } -// GetLatestBlock provides a mock function with given fields: ctx, in, opts -func (_m *AccessAPIClient) GetLatestBlock(ctx context.Context, in *access.GetLatestBlockRequest, opts ...grpc.CallOption) (*access.BlockResponse, error) { +// GetBlockHeaderByHeight provides a mock function with given fields: ctx, in, opts +func (_m *AccessAPIClient) GetBlockHeaderByHeight(ctx context.Context, in *access.GetBlockHeaderByHeightRequest, opts ...grpc.CallOption) (*access.BlockHeaderResponse, error) { _va := make([]interface{}, len(opts)) for _i := range opts { _va[_i] = opts[_i] @@ -490,20 +546,24 @@ func (_m *AccessAPIClient) GetLatestBlock(ctx context.Context, in *access.GetLat _ca = append(_ca, _va...) ret := _m.Called(_ca...) - var r0 *access.BlockResponse + if len(ret) == 0 { + panic("no return value specified for GetBlockHeaderByHeight") + } + + var r0 *access.BlockHeaderResponse var r1 error - if rf, ok := ret.Get(0).(func(context.Context, *access.GetLatestBlockRequest, ...grpc.CallOption) (*access.BlockResponse, error)); ok { + if rf, ok := ret.Get(0).(func(context.Context, *access.GetBlockHeaderByHeightRequest, ...grpc.CallOption) (*access.BlockHeaderResponse, error)); ok { return rf(ctx, in, opts...) } - if rf, ok := ret.Get(0).(func(context.Context, *access.GetLatestBlockRequest, ...grpc.CallOption) *access.BlockResponse); ok { + if rf, ok := ret.Get(0).(func(context.Context, *access.GetBlockHeaderByHeightRequest, ...grpc.CallOption) *access.BlockHeaderResponse); ok { r0 = rf(ctx, in, opts...) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(*access.BlockResponse) + r0 = ret.Get(0).(*access.BlockHeaderResponse) } } - if rf, ok := ret.Get(1).(func(context.Context, *access.GetLatestBlockRequest, ...grpc.CallOption) error); ok { + if rf, ok := ret.Get(1).(func(context.Context, *access.GetBlockHeaderByHeightRequest, ...grpc.CallOption) error); ok { r1 = rf(ctx, in, opts...) } else { r1 = ret.Error(1) @@ -512,8 +572,8 @@ func (_m *AccessAPIClient) GetLatestBlock(ctx context.Context, in *access.GetLat return r0, r1 } -// GetLatestBlockHeader provides a mock function with given fields: ctx, in, opts -func (_m *AccessAPIClient) GetLatestBlockHeader(ctx context.Context, in *access.GetLatestBlockHeaderRequest, opts ...grpc.CallOption) (*access.BlockHeaderResponse, error) { +// GetBlockHeaderByID provides a mock function with given fields: ctx, in, opts +func (_m *AccessAPIClient) GetBlockHeaderByID(ctx context.Context, in *access.GetBlockHeaderByIDRequest, opts ...grpc.CallOption) (*access.BlockHeaderResponse, error) { _va := make([]interface{}, len(opts)) for _i := range opts { _va[_i] = opts[_i] @@ -523,12 +583,16 @@ func (_m *AccessAPIClient) GetLatestBlockHeader(ctx context.Context, in *access. _ca = append(_ca, _va...) ret := _m.Called(_ca...) + if len(ret) == 0 { + panic("no return value specified for GetBlockHeaderByID") + } + var r0 *access.BlockHeaderResponse var r1 error - if rf, ok := ret.Get(0).(func(context.Context, *access.GetLatestBlockHeaderRequest, ...grpc.CallOption) (*access.BlockHeaderResponse, error)); ok { + if rf, ok := ret.Get(0).(func(context.Context, *access.GetBlockHeaderByIDRequest, ...grpc.CallOption) (*access.BlockHeaderResponse, error)); ok { return rf(ctx, in, opts...) } - if rf, ok := ret.Get(0).(func(context.Context, *access.GetLatestBlockHeaderRequest, ...grpc.CallOption) *access.BlockHeaderResponse); ok { + if rf, ok := ret.Get(0).(func(context.Context, *access.GetBlockHeaderByIDRequest, ...grpc.CallOption) *access.BlockHeaderResponse); ok { r0 = rf(ctx, in, opts...) } else { if ret.Get(0) != nil { @@ -536,7 +600,7 @@ func (_m *AccessAPIClient) GetLatestBlockHeader(ctx context.Context, in *access. } } - if rf, ok := ret.Get(1).(func(context.Context, *access.GetLatestBlockHeaderRequest, ...grpc.CallOption) error); ok { + if rf, ok := ret.Get(1).(func(context.Context, *access.GetBlockHeaderByIDRequest, ...grpc.CallOption) error); ok { r1 = rf(ctx, in, opts...) } else { r1 = ret.Error(1) @@ -545,8 +609,8 @@ func (_m *AccessAPIClient) GetLatestBlockHeader(ctx context.Context, in *access. return r0, r1 } -// GetLatestProtocolStateSnapshot provides a mock function with given fields: ctx, in, opts -func (_m *AccessAPIClient) GetLatestProtocolStateSnapshot(ctx context.Context, in *access.GetLatestProtocolStateSnapshotRequest, opts ...grpc.CallOption) (*access.ProtocolStateSnapshotResponse, error) { +// GetCollectionByID provides a mock function with given fields: ctx, in, opts +func (_m *AccessAPIClient) GetCollectionByID(ctx context.Context, in *access.GetCollectionByIDRequest, opts ...grpc.CallOption) (*access.CollectionResponse, error) { _va := make([]interface{}, len(opts)) for _i := range opts { _va[_i] = opts[_i] @@ -556,20 +620,24 @@ func (_m *AccessAPIClient) GetLatestProtocolStateSnapshot(ctx context.Context, i _ca = append(_ca, _va...) ret := _m.Called(_ca...) - var r0 *access.ProtocolStateSnapshotResponse + if len(ret) == 0 { + panic("no return value specified for GetCollectionByID") + } + + var r0 *access.CollectionResponse var r1 error - if rf, ok := ret.Get(0).(func(context.Context, *access.GetLatestProtocolStateSnapshotRequest, ...grpc.CallOption) (*access.ProtocolStateSnapshotResponse, error)); ok { + if rf, ok := ret.Get(0).(func(context.Context, *access.GetCollectionByIDRequest, ...grpc.CallOption) (*access.CollectionResponse, error)); ok { return rf(ctx, in, opts...) } - if rf, ok := ret.Get(0).(func(context.Context, *access.GetLatestProtocolStateSnapshotRequest, ...grpc.CallOption) *access.ProtocolStateSnapshotResponse); ok { + if rf, ok := ret.Get(0).(func(context.Context, *access.GetCollectionByIDRequest, ...grpc.CallOption) *access.CollectionResponse); ok { r0 = rf(ctx, in, opts...) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(*access.ProtocolStateSnapshotResponse) + r0 = ret.Get(0).(*access.CollectionResponse) } } - if rf, ok := ret.Get(1).(func(context.Context, *access.GetLatestProtocolStateSnapshotRequest, ...grpc.CallOption) error); ok { + if rf, ok := ret.Get(1).(func(context.Context, *access.GetCollectionByIDRequest, ...grpc.CallOption) error); ok { r1 = rf(ctx, in, opts...) } else { r1 = ret.Error(1) @@ -578,8 +646,8 @@ func (_m *AccessAPIClient) GetLatestProtocolStateSnapshot(ctx context.Context, i return r0, r1 } -// GetNetworkParameters provides a mock function with given fields: ctx, in, opts -func (_m *AccessAPIClient) GetNetworkParameters(ctx context.Context, in *access.GetNetworkParametersRequest, opts ...grpc.CallOption) (*access.GetNetworkParametersResponse, error) { +// GetEventsForBlockIDs provides a mock function with given fields: ctx, in, opts +func (_m *AccessAPIClient) GetEventsForBlockIDs(ctx context.Context, in *access.GetEventsForBlockIDsRequest, opts ...grpc.CallOption) (*access.EventsResponse, error) { _va := make([]interface{}, len(opts)) for _i := range opts { _va[_i] = opts[_i] @@ -589,20 +657,24 @@ func (_m *AccessAPIClient) GetNetworkParameters(ctx context.Context, in *access. _ca = append(_ca, _va...) ret := _m.Called(_ca...) - var r0 *access.GetNetworkParametersResponse + if len(ret) == 0 { + panic("no return value specified for GetEventsForBlockIDs") + } + + var r0 *access.EventsResponse var r1 error - if rf, ok := ret.Get(0).(func(context.Context, *access.GetNetworkParametersRequest, ...grpc.CallOption) (*access.GetNetworkParametersResponse, error)); ok { + if rf, ok := ret.Get(0).(func(context.Context, *access.GetEventsForBlockIDsRequest, ...grpc.CallOption) (*access.EventsResponse, error)); ok { return rf(ctx, in, opts...) } - if rf, ok := ret.Get(0).(func(context.Context, *access.GetNetworkParametersRequest, ...grpc.CallOption) *access.GetNetworkParametersResponse); ok { + if rf, ok := ret.Get(0).(func(context.Context, *access.GetEventsForBlockIDsRequest, ...grpc.CallOption) *access.EventsResponse); ok { r0 = rf(ctx, in, opts...) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(*access.GetNetworkParametersResponse) + r0 = ret.Get(0).(*access.EventsResponse) } } - if rf, ok := ret.Get(1).(func(context.Context, *access.GetNetworkParametersRequest, ...grpc.CallOption) error); ok { + if rf, ok := ret.Get(1).(func(context.Context, *access.GetEventsForBlockIDsRequest, ...grpc.CallOption) error); ok { r1 = rf(ctx, in, opts...) } else { r1 = ret.Error(1) @@ -611,8 +683,8 @@ func (_m *AccessAPIClient) GetNetworkParameters(ctx context.Context, in *access. return r0, r1 } -// GetNodeVersionInfo provides a mock function with given fields: ctx, in, opts -func (_m *AccessAPIClient) GetNodeVersionInfo(ctx context.Context, in *access.GetNodeVersionInfoRequest, opts ...grpc.CallOption) (*access.GetNodeVersionInfoResponse, error) { +// GetEventsForHeightRange provides a mock function with given fields: ctx, in, opts +func (_m *AccessAPIClient) GetEventsForHeightRange(ctx context.Context, in *access.GetEventsForHeightRangeRequest, opts ...grpc.CallOption) (*access.EventsResponse, error) { _va := make([]interface{}, len(opts)) for _i := range opts { _va[_i] = opts[_i] @@ -622,20 +694,24 @@ func (_m *AccessAPIClient) GetNodeVersionInfo(ctx context.Context, in *access.Ge _ca = append(_ca, _va...) ret := _m.Called(_ca...) - var r0 *access.GetNodeVersionInfoResponse + if len(ret) == 0 { + panic("no return value specified for GetEventsForHeightRange") + } + + var r0 *access.EventsResponse var r1 error - if rf, ok := ret.Get(0).(func(context.Context, *access.GetNodeVersionInfoRequest, ...grpc.CallOption) (*access.GetNodeVersionInfoResponse, error)); ok { + if rf, ok := ret.Get(0).(func(context.Context, *access.GetEventsForHeightRangeRequest, ...grpc.CallOption) (*access.EventsResponse, error)); ok { return rf(ctx, in, opts...) } - if rf, ok := ret.Get(0).(func(context.Context, *access.GetNodeVersionInfoRequest, ...grpc.CallOption) *access.GetNodeVersionInfoResponse); ok { + if rf, ok := ret.Get(0).(func(context.Context, *access.GetEventsForHeightRangeRequest, ...grpc.CallOption) *access.EventsResponse); ok { r0 = rf(ctx, in, opts...) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(*access.GetNodeVersionInfoResponse) + r0 = ret.Get(0).(*access.EventsResponse) } } - if rf, ok := ret.Get(1).(func(context.Context, *access.GetNodeVersionInfoRequest, ...grpc.CallOption) error); ok { + if rf, ok := ret.Get(1).(func(context.Context, *access.GetEventsForHeightRangeRequest, ...grpc.CallOption) error); ok { r1 = rf(ctx, in, opts...) } else { r1 = ret.Error(1) @@ -644,8 +720,8 @@ func (_m *AccessAPIClient) GetNodeVersionInfo(ctx context.Context, in *access.Ge return r0, r1 } -// GetTransaction provides a mock function with given fields: ctx, in, opts -func (_m *AccessAPIClient) GetTransaction(ctx context.Context, in *access.GetTransactionRequest, opts ...grpc.CallOption) (*access.TransactionResponse, error) { +// GetExecutionResultByID provides a mock function with given fields: ctx, in, opts +func (_m *AccessAPIClient) GetExecutionResultByID(ctx context.Context, in *access.GetExecutionResultByIDRequest, opts ...grpc.CallOption) (*access.ExecutionResultByIDResponse, error) { _va := make([]interface{}, len(opts)) for _i := range opts { _va[_i] = opts[_i] @@ -655,20 +731,24 @@ func (_m *AccessAPIClient) GetTransaction(ctx context.Context, in *access.GetTra _ca = append(_ca, _va...) ret := _m.Called(_ca...) - var r0 *access.TransactionResponse + if len(ret) == 0 { + panic("no return value specified for GetExecutionResultByID") + } + + var r0 *access.ExecutionResultByIDResponse var r1 error - if rf, ok := ret.Get(0).(func(context.Context, *access.GetTransactionRequest, ...grpc.CallOption) (*access.TransactionResponse, error)); ok { + if rf, ok := ret.Get(0).(func(context.Context, *access.GetExecutionResultByIDRequest, ...grpc.CallOption) (*access.ExecutionResultByIDResponse, error)); ok { return rf(ctx, in, opts...) } - if rf, ok := ret.Get(0).(func(context.Context, *access.GetTransactionRequest, ...grpc.CallOption) *access.TransactionResponse); ok { + if rf, ok := ret.Get(0).(func(context.Context, *access.GetExecutionResultByIDRequest, ...grpc.CallOption) *access.ExecutionResultByIDResponse); ok { r0 = rf(ctx, in, opts...) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(*access.TransactionResponse) + r0 = ret.Get(0).(*access.ExecutionResultByIDResponse) } } - if rf, ok := ret.Get(1).(func(context.Context, *access.GetTransactionRequest, ...grpc.CallOption) error); ok { + if rf, ok := ret.Get(1).(func(context.Context, *access.GetExecutionResultByIDRequest, ...grpc.CallOption) error); ok { r1 = rf(ctx, in, opts...) } else { r1 = ret.Error(1) @@ -677,8 +757,8 @@ func (_m *AccessAPIClient) GetTransaction(ctx context.Context, in *access.GetTra return r0, r1 } -// GetTransactionResult provides a mock function with given fields: ctx, in, opts -func (_m *AccessAPIClient) GetTransactionResult(ctx context.Context, in *access.GetTransactionRequest, opts ...grpc.CallOption) (*access.TransactionResultResponse, error) { +// GetExecutionResultForBlockID provides a mock function with given fields: ctx, in, opts +func (_m *AccessAPIClient) GetExecutionResultForBlockID(ctx context.Context, in *access.GetExecutionResultForBlockIDRequest, opts ...grpc.CallOption) (*access.ExecutionResultForBlockIDResponse, error) { _va := make([]interface{}, len(opts)) for _i := range opts { _va[_i] = opts[_i] @@ -688,20 +768,24 @@ func (_m *AccessAPIClient) GetTransactionResult(ctx context.Context, in *access. _ca = append(_ca, _va...) ret := _m.Called(_ca...) - var r0 *access.TransactionResultResponse + if len(ret) == 0 { + panic("no return value specified for GetExecutionResultForBlockID") + } + + var r0 *access.ExecutionResultForBlockIDResponse var r1 error - if rf, ok := ret.Get(0).(func(context.Context, *access.GetTransactionRequest, ...grpc.CallOption) (*access.TransactionResultResponse, error)); ok { + if rf, ok := ret.Get(0).(func(context.Context, *access.GetExecutionResultForBlockIDRequest, ...grpc.CallOption) (*access.ExecutionResultForBlockIDResponse, error)); ok { return rf(ctx, in, opts...) } - if rf, ok := ret.Get(0).(func(context.Context, *access.GetTransactionRequest, ...grpc.CallOption) *access.TransactionResultResponse); ok { + if rf, ok := ret.Get(0).(func(context.Context, *access.GetExecutionResultForBlockIDRequest, ...grpc.CallOption) *access.ExecutionResultForBlockIDResponse); ok { r0 = rf(ctx, in, opts...) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(*access.TransactionResultResponse) + r0 = ret.Get(0).(*access.ExecutionResultForBlockIDResponse) } } - if rf, ok := ret.Get(1).(func(context.Context, *access.GetTransactionRequest, ...grpc.CallOption) error); ok { + if rf, ok := ret.Get(1).(func(context.Context, *access.GetExecutionResultForBlockIDRequest, ...grpc.CallOption) error); ok { r1 = rf(ctx, in, opts...) } else { r1 = ret.Error(1) @@ -710,8 +794,8 @@ func (_m *AccessAPIClient) GetTransactionResult(ctx context.Context, in *access. return r0, r1 } -// GetTransactionResultByIndex provides a mock function with given fields: ctx, in, opts -func (_m *AccessAPIClient) GetTransactionResultByIndex(ctx context.Context, in *access.GetTransactionByIndexRequest, opts ...grpc.CallOption) (*access.TransactionResultResponse, error) { +// GetFullCollectionByID provides a mock function with given fields: ctx, in, opts +func (_m *AccessAPIClient) GetFullCollectionByID(ctx context.Context, in *access.GetFullCollectionByIDRequest, opts ...grpc.CallOption) (*access.FullCollectionResponse, error) { _va := make([]interface{}, len(opts)) for _i := range opts { _va[_i] = opts[_i] @@ -721,20 +805,24 @@ func (_m *AccessAPIClient) GetTransactionResultByIndex(ctx context.Context, in * _ca = append(_ca, _va...) ret := _m.Called(_ca...) - var r0 *access.TransactionResultResponse + if len(ret) == 0 { + panic("no return value specified for GetFullCollectionByID") + } + + var r0 *access.FullCollectionResponse var r1 error - if rf, ok := ret.Get(0).(func(context.Context, *access.GetTransactionByIndexRequest, ...grpc.CallOption) (*access.TransactionResultResponse, error)); ok { + if rf, ok := ret.Get(0).(func(context.Context, *access.GetFullCollectionByIDRequest, ...grpc.CallOption) (*access.FullCollectionResponse, error)); ok { return rf(ctx, in, opts...) } - if rf, ok := ret.Get(0).(func(context.Context, *access.GetTransactionByIndexRequest, ...grpc.CallOption) *access.TransactionResultResponse); ok { + if rf, ok := ret.Get(0).(func(context.Context, *access.GetFullCollectionByIDRequest, ...grpc.CallOption) *access.FullCollectionResponse); ok { r0 = rf(ctx, in, opts...) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(*access.TransactionResultResponse) + r0 = ret.Get(0).(*access.FullCollectionResponse) } } - if rf, ok := ret.Get(1).(func(context.Context, *access.GetTransactionByIndexRequest, ...grpc.CallOption) error); ok { + if rf, ok := ret.Get(1).(func(context.Context, *access.GetFullCollectionByIDRequest, ...grpc.CallOption) error); ok { r1 = rf(ctx, in, opts...) } else { r1 = ret.Error(1) @@ -743,8 +831,8 @@ func (_m *AccessAPIClient) GetTransactionResultByIndex(ctx context.Context, in * return r0, r1 } -// GetTransactionResultsByBlockID provides a mock function with given fields: ctx, in, opts -func (_m *AccessAPIClient) GetTransactionResultsByBlockID(ctx context.Context, in *access.GetTransactionsByBlockIDRequest, opts ...grpc.CallOption) (*access.TransactionResultsResponse, error) { +// GetLatestBlock provides a mock function with given fields: ctx, in, opts +func (_m *AccessAPIClient) GetLatestBlock(ctx context.Context, in *access.GetLatestBlockRequest, opts ...grpc.CallOption) (*access.BlockResponse, error) { _va := make([]interface{}, len(opts)) for _i := range opts { _va[_i] = opts[_i] @@ -754,20 +842,24 @@ func (_m *AccessAPIClient) GetTransactionResultsByBlockID(ctx context.Context, i _ca = append(_ca, _va...) ret := _m.Called(_ca...) - var r0 *access.TransactionResultsResponse + if len(ret) == 0 { + panic("no return value specified for GetLatestBlock") + } + + var r0 *access.BlockResponse var r1 error - if rf, ok := ret.Get(0).(func(context.Context, *access.GetTransactionsByBlockIDRequest, ...grpc.CallOption) (*access.TransactionResultsResponse, error)); ok { + if rf, ok := ret.Get(0).(func(context.Context, *access.GetLatestBlockRequest, ...grpc.CallOption) (*access.BlockResponse, error)); ok { return rf(ctx, in, opts...) } - if rf, ok := ret.Get(0).(func(context.Context, *access.GetTransactionsByBlockIDRequest, ...grpc.CallOption) *access.TransactionResultsResponse); ok { + if rf, ok := ret.Get(0).(func(context.Context, *access.GetLatestBlockRequest, ...grpc.CallOption) *access.BlockResponse); ok { r0 = rf(ctx, in, opts...) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(*access.TransactionResultsResponse) + r0 = ret.Get(0).(*access.BlockResponse) } } - if rf, ok := ret.Get(1).(func(context.Context, *access.GetTransactionsByBlockIDRequest, ...grpc.CallOption) error); ok { + if rf, ok := ret.Get(1).(func(context.Context, *access.GetLatestBlockRequest, ...grpc.CallOption) error); ok { r1 = rf(ctx, in, opts...) } else { r1 = ret.Error(1) @@ -776,8 +868,8 @@ func (_m *AccessAPIClient) GetTransactionResultsByBlockID(ctx context.Context, i return r0, r1 } -// GetTransactionsByBlockID provides a mock function with given fields: ctx, in, opts -func (_m *AccessAPIClient) GetTransactionsByBlockID(ctx context.Context, in *access.GetTransactionsByBlockIDRequest, opts ...grpc.CallOption) (*access.TransactionsResponse, error) { +// GetLatestBlockHeader provides a mock function with given fields: ctx, in, opts +func (_m *AccessAPIClient) GetLatestBlockHeader(ctx context.Context, in *access.GetLatestBlockHeaderRequest, opts ...grpc.CallOption) (*access.BlockHeaderResponse, error) { _va := make([]interface{}, len(opts)) for _i := range opts { _va[_i] = opts[_i] @@ -787,20 +879,24 @@ func (_m *AccessAPIClient) GetTransactionsByBlockID(ctx context.Context, in *acc _ca = append(_ca, _va...) ret := _m.Called(_ca...) - var r0 *access.TransactionsResponse + if len(ret) == 0 { + panic("no return value specified for GetLatestBlockHeader") + } + + var r0 *access.BlockHeaderResponse var r1 error - if rf, ok := ret.Get(0).(func(context.Context, *access.GetTransactionsByBlockIDRequest, ...grpc.CallOption) (*access.TransactionsResponse, error)); ok { + if rf, ok := ret.Get(0).(func(context.Context, *access.GetLatestBlockHeaderRequest, ...grpc.CallOption) (*access.BlockHeaderResponse, error)); ok { return rf(ctx, in, opts...) } - if rf, ok := ret.Get(0).(func(context.Context, *access.GetTransactionsByBlockIDRequest, ...grpc.CallOption) *access.TransactionsResponse); ok { + if rf, ok := ret.Get(0).(func(context.Context, *access.GetLatestBlockHeaderRequest, ...grpc.CallOption) *access.BlockHeaderResponse); ok { r0 = rf(ctx, in, opts...) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(*access.TransactionsResponse) + r0 = ret.Get(0).(*access.BlockHeaderResponse) } } - if rf, ok := ret.Get(1).(func(context.Context, *access.GetTransactionsByBlockIDRequest, ...grpc.CallOption) error); ok { + if rf, ok := ret.Get(1).(func(context.Context, *access.GetLatestBlockHeaderRequest, ...grpc.CallOption) error); ok { r1 = rf(ctx, in, opts...) } else { r1 = ret.Error(1) @@ -809,8 +905,8 @@ func (_m *AccessAPIClient) GetTransactionsByBlockID(ctx context.Context, in *acc return r0, r1 } -// Ping provides a mock function with given fields: ctx, in, opts -func (_m *AccessAPIClient) Ping(ctx context.Context, in *access.PingRequest, opts ...grpc.CallOption) (*access.PingResponse, error) { +// GetLatestProtocolStateSnapshot provides a mock function with given fields: ctx, in, opts +func (_m *AccessAPIClient) GetLatestProtocolStateSnapshot(ctx context.Context, in *access.GetLatestProtocolStateSnapshotRequest, opts ...grpc.CallOption) (*access.ProtocolStateSnapshotResponse, error) { _va := make([]interface{}, len(opts)) for _i := range opts { _va[_i] = opts[_i] @@ -820,20 +916,505 @@ func (_m *AccessAPIClient) Ping(ctx context.Context, in *access.PingRequest, opt _ca = append(_ca, _va...) ret := _m.Called(_ca...) - var r0 *access.PingResponse + if len(ret) == 0 { + panic("no return value specified for GetLatestProtocolStateSnapshot") + } + + var r0 *access.ProtocolStateSnapshotResponse var r1 error - if rf, ok := ret.Get(0).(func(context.Context, *access.PingRequest, ...grpc.CallOption) (*access.PingResponse, error)); ok { + if rf, ok := ret.Get(0).(func(context.Context, *access.GetLatestProtocolStateSnapshotRequest, ...grpc.CallOption) (*access.ProtocolStateSnapshotResponse, error)); ok { return rf(ctx, in, opts...) } - if rf, ok := ret.Get(0).(func(context.Context, *access.PingRequest, ...grpc.CallOption) *access.PingResponse); ok { + if rf, ok := ret.Get(0).(func(context.Context, *access.GetLatestProtocolStateSnapshotRequest, ...grpc.CallOption) *access.ProtocolStateSnapshotResponse); ok { r0 = rf(ctx, in, opts...) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(*access.PingResponse) + r0 = ret.Get(0).(*access.ProtocolStateSnapshotResponse) } } - if rf, ok := ret.Get(1).(func(context.Context, *access.PingRequest, ...grpc.CallOption) error); ok { + if rf, ok := ret.Get(1).(func(context.Context, *access.GetLatestProtocolStateSnapshotRequest, ...grpc.CallOption) error); ok { + r1 = rf(ctx, in, opts...) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetNetworkParameters provides a mock function with given fields: ctx, in, opts +func (_m *AccessAPIClient) GetNetworkParameters(ctx context.Context, in *access.GetNetworkParametersRequest, opts ...grpc.CallOption) (*access.GetNetworkParametersResponse, error) { + _va := make([]interface{}, len(opts)) + for _i := range opts { + _va[_i] = opts[_i] + } + var _ca []interface{} + _ca = append(_ca, ctx, in) + _ca = append(_ca, _va...) + ret := _m.Called(_ca...) + + if len(ret) == 0 { + panic("no return value specified for GetNetworkParameters") + } + + var r0 *access.GetNetworkParametersResponse + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, *access.GetNetworkParametersRequest, ...grpc.CallOption) (*access.GetNetworkParametersResponse, error)); ok { + return rf(ctx, in, opts...) + } + if rf, ok := ret.Get(0).(func(context.Context, *access.GetNetworkParametersRequest, ...grpc.CallOption) *access.GetNetworkParametersResponse); ok { + r0 = rf(ctx, in, opts...) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*access.GetNetworkParametersResponse) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, *access.GetNetworkParametersRequest, ...grpc.CallOption) error); ok { + r1 = rf(ctx, in, opts...) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetNodeVersionInfo provides a mock function with given fields: ctx, in, opts +func (_m *AccessAPIClient) GetNodeVersionInfo(ctx context.Context, in *access.GetNodeVersionInfoRequest, opts ...grpc.CallOption) (*access.GetNodeVersionInfoResponse, error) { + _va := make([]interface{}, len(opts)) + for _i := range opts { + _va[_i] = opts[_i] + } + var _ca []interface{} + _ca = append(_ca, ctx, in) + _ca = append(_ca, _va...) + ret := _m.Called(_ca...) + + if len(ret) == 0 { + panic("no return value specified for GetNodeVersionInfo") + } + + var r0 *access.GetNodeVersionInfoResponse + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, *access.GetNodeVersionInfoRequest, ...grpc.CallOption) (*access.GetNodeVersionInfoResponse, error)); ok { + return rf(ctx, in, opts...) + } + if rf, ok := ret.Get(0).(func(context.Context, *access.GetNodeVersionInfoRequest, ...grpc.CallOption) *access.GetNodeVersionInfoResponse); ok { + r0 = rf(ctx, in, opts...) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*access.GetNodeVersionInfoResponse) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, *access.GetNodeVersionInfoRequest, ...grpc.CallOption) error); ok { + r1 = rf(ctx, in, opts...) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetProtocolStateSnapshotByBlockID provides a mock function with given fields: ctx, in, opts +func (_m *AccessAPIClient) GetProtocolStateSnapshotByBlockID(ctx context.Context, in *access.GetProtocolStateSnapshotByBlockIDRequest, opts ...grpc.CallOption) (*access.ProtocolStateSnapshotResponse, error) { + _va := make([]interface{}, len(opts)) + for _i := range opts { + _va[_i] = opts[_i] + } + var _ca []interface{} + _ca = append(_ca, ctx, in) + _ca = append(_ca, _va...) + ret := _m.Called(_ca...) + + if len(ret) == 0 { + panic("no return value specified for GetProtocolStateSnapshotByBlockID") + } + + var r0 *access.ProtocolStateSnapshotResponse + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, *access.GetProtocolStateSnapshotByBlockIDRequest, ...grpc.CallOption) (*access.ProtocolStateSnapshotResponse, error)); ok { + return rf(ctx, in, opts...) + } + if rf, ok := ret.Get(0).(func(context.Context, *access.GetProtocolStateSnapshotByBlockIDRequest, ...grpc.CallOption) *access.ProtocolStateSnapshotResponse); ok { + r0 = rf(ctx, in, opts...) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*access.ProtocolStateSnapshotResponse) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, *access.GetProtocolStateSnapshotByBlockIDRequest, ...grpc.CallOption) error); ok { + r1 = rf(ctx, in, opts...) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetProtocolStateSnapshotByHeight provides a mock function with given fields: ctx, in, opts +func (_m *AccessAPIClient) GetProtocolStateSnapshotByHeight(ctx context.Context, in *access.GetProtocolStateSnapshotByHeightRequest, opts ...grpc.CallOption) (*access.ProtocolStateSnapshotResponse, error) { + _va := make([]interface{}, len(opts)) + for _i := range opts { + _va[_i] = opts[_i] + } + var _ca []interface{} + _ca = append(_ca, ctx, in) + _ca = append(_ca, _va...) + ret := _m.Called(_ca...) + + if len(ret) == 0 { + panic("no return value specified for GetProtocolStateSnapshotByHeight") + } + + var r0 *access.ProtocolStateSnapshotResponse + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, *access.GetProtocolStateSnapshotByHeightRequest, ...grpc.CallOption) (*access.ProtocolStateSnapshotResponse, error)); ok { + return rf(ctx, in, opts...) + } + if rf, ok := ret.Get(0).(func(context.Context, *access.GetProtocolStateSnapshotByHeightRequest, ...grpc.CallOption) *access.ProtocolStateSnapshotResponse); ok { + r0 = rf(ctx, in, opts...) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*access.ProtocolStateSnapshotResponse) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, *access.GetProtocolStateSnapshotByHeightRequest, ...grpc.CallOption) error); ok { + r1 = rf(ctx, in, opts...) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetSystemTransaction provides a mock function with given fields: ctx, in, opts +func (_m *AccessAPIClient) GetSystemTransaction(ctx context.Context, in *access.GetSystemTransactionRequest, opts ...grpc.CallOption) (*access.TransactionResponse, error) { + _va := make([]interface{}, len(opts)) + for _i := range opts { + _va[_i] = opts[_i] + } + var _ca []interface{} + _ca = append(_ca, ctx, in) + _ca = append(_ca, _va...) + ret := _m.Called(_ca...) + + if len(ret) == 0 { + panic("no return value specified for GetSystemTransaction") + } + + var r0 *access.TransactionResponse + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, *access.GetSystemTransactionRequest, ...grpc.CallOption) (*access.TransactionResponse, error)); ok { + return rf(ctx, in, opts...) + } + if rf, ok := ret.Get(0).(func(context.Context, *access.GetSystemTransactionRequest, ...grpc.CallOption) *access.TransactionResponse); ok { + r0 = rf(ctx, in, opts...) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*access.TransactionResponse) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, *access.GetSystemTransactionRequest, ...grpc.CallOption) error); ok { + r1 = rf(ctx, in, opts...) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetSystemTransactionResult provides a mock function with given fields: ctx, in, opts +func (_m *AccessAPIClient) GetSystemTransactionResult(ctx context.Context, in *access.GetSystemTransactionResultRequest, opts ...grpc.CallOption) (*access.TransactionResultResponse, error) { + _va := make([]interface{}, len(opts)) + for _i := range opts { + _va[_i] = opts[_i] + } + var _ca []interface{} + _ca = append(_ca, ctx, in) + _ca = append(_ca, _va...) + ret := _m.Called(_ca...) + + if len(ret) == 0 { + panic("no return value specified for GetSystemTransactionResult") + } + + var r0 *access.TransactionResultResponse + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, *access.GetSystemTransactionResultRequest, ...grpc.CallOption) (*access.TransactionResultResponse, error)); ok { + return rf(ctx, in, opts...) + } + if rf, ok := ret.Get(0).(func(context.Context, *access.GetSystemTransactionResultRequest, ...grpc.CallOption) *access.TransactionResultResponse); ok { + r0 = rf(ctx, in, opts...) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*access.TransactionResultResponse) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, *access.GetSystemTransactionResultRequest, ...grpc.CallOption) error); ok { + r1 = rf(ctx, in, opts...) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetTransaction provides a mock function with given fields: ctx, in, opts +func (_m *AccessAPIClient) GetTransaction(ctx context.Context, in *access.GetTransactionRequest, opts ...grpc.CallOption) (*access.TransactionResponse, error) { + _va := make([]interface{}, len(opts)) + for _i := range opts { + _va[_i] = opts[_i] + } + var _ca []interface{} + _ca = append(_ca, ctx, in) + _ca = append(_ca, _va...) + ret := _m.Called(_ca...) + + if len(ret) == 0 { + panic("no return value specified for GetTransaction") + } + + var r0 *access.TransactionResponse + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, *access.GetTransactionRequest, ...grpc.CallOption) (*access.TransactionResponse, error)); ok { + return rf(ctx, in, opts...) + } + if rf, ok := ret.Get(0).(func(context.Context, *access.GetTransactionRequest, ...grpc.CallOption) *access.TransactionResponse); ok { + r0 = rf(ctx, in, opts...) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*access.TransactionResponse) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, *access.GetTransactionRequest, ...grpc.CallOption) error); ok { + r1 = rf(ctx, in, opts...) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetTransactionResult provides a mock function with given fields: ctx, in, opts +func (_m *AccessAPIClient) GetTransactionResult(ctx context.Context, in *access.GetTransactionRequest, opts ...grpc.CallOption) (*access.TransactionResultResponse, error) { + _va := make([]interface{}, len(opts)) + for _i := range opts { + _va[_i] = opts[_i] + } + var _ca []interface{} + _ca = append(_ca, ctx, in) + _ca = append(_ca, _va...) + ret := _m.Called(_ca...) + + if len(ret) == 0 { + panic("no return value specified for GetTransactionResult") + } + + var r0 *access.TransactionResultResponse + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, *access.GetTransactionRequest, ...grpc.CallOption) (*access.TransactionResultResponse, error)); ok { + return rf(ctx, in, opts...) + } + if rf, ok := ret.Get(0).(func(context.Context, *access.GetTransactionRequest, ...grpc.CallOption) *access.TransactionResultResponse); ok { + r0 = rf(ctx, in, opts...) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*access.TransactionResultResponse) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, *access.GetTransactionRequest, ...grpc.CallOption) error); ok { + r1 = rf(ctx, in, opts...) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetTransactionResultByIndex provides a mock function with given fields: ctx, in, opts +func (_m *AccessAPIClient) GetTransactionResultByIndex(ctx context.Context, in *access.GetTransactionByIndexRequest, opts ...grpc.CallOption) (*access.TransactionResultResponse, error) { + _va := make([]interface{}, len(opts)) + for _i := range opts { + _va[_i] = opts[_i] + } + var _ca []interface{} + _ca = append(_ca, ctx, in) + _ca = append(_ca, _va...) + ret := _m.Called(_ca...) + + if len(ret) == 0 { + panic("no return value specified for GetTransactionResultByIndex") + } + + var r0 *access.TransactionResultResponse + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, *access.GetTransactionByIndexRequest, ...grpc.CallOption) (*access.TransactionResultResponse, error)); ok { + return rf(ctx, in, opts...) + } + if rf, ok := ret.Get(0).(func(context.Context, *access.GetTransactionByIndexRequest, ...grpc.CallOption) *access.TransactionResultResponse); ok { + r0 = rf(ctx, in, opts...) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*access.TransactionResultResponse) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, *access.GetTransactionByIndexRequest, ...grpc.CallOption) error); ok { + r1 = rf(ctx, in, opts...) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetTransactionResultsByBlockID provides a mock function with given fields: ctx, in, opts +func (_m *AccessAPIClient) GetTransactionResultsByBlockID(ctx context.Context, in *access.GetTransactionsByBlockIDRequest, opts ...grpc.CallOption) (*access.TransactionResultsResponse, error) { + _va := make([]interface{}, len(opts)) + for _i := range opts { + _va[_i] = opts[_i] + } + var _ca []interface{} + _ca = append(_ca, ctx, in) + _ca = append(_ca, _va...) + ret := _m.Called(_ca...) + + if len(ret) == 0 { + panic("no return value specified for GetTransactionResultsByBlockID") + } + + var r0 *access.TransactionResultsResponse + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, *access.GetTransactionsByBlockIDRequest, ...grpc.CallOption) (*access.TransactionResultsResponse, error)); ok { + return rf(ctx, in, opts...) + } + if rf, ok := ret.Get(0).(func(context.Context, *access.GetTransactionsByBlockIDRequest, ...grpc.CallOption) *access.TransactionResultsResponse); ok { + r0 = rf(ctx, in, opts...) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*access.TransactionResultsResponse) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, *access.GetTransactionsByBlockIDRequest, ...grpc.CallOption) error); ok { + r1 = rf(ctx, in, opts...) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetTransactionsByBlockID provides a mock function with given fields: ctx, in, opts +func (_m *AccessAPIClient) GetTransactionsByBlockID(ctx context.Context, in *access.GetTransactionsByBlockIDRequest, opts ...grpc.CallOption) (*access.TransactionsResponse, error) { + _va := make([]interface{}, len(opts)) + for _i := range opts { + _va[_i] = opts[_i] + } + var _ca []interface{} + _ca = append(_ca, ctx, in) + _ca = append(_ca, _va...) + ret := _m.Called(_ca...) + + if len(ret) == 0 { + panic("no return value specified for GetTransactionsByBlockID") + } + + var r0 *access.TransactionsResponse + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, *access.GetTransactionsByBlockIDRequest, ...grpc.CallOption) (*access.TransactionsResponse, error)); ok { + return rf(ctx, in, opts...) + } + if rf, ok := ret.Get(0).(func(context.Context, *access.GetTransactionsByBlockIDRequest, ...grpc.CallOption) *access.TransactionsResponse); ok { + r0 = rf(ctx, in, opts...) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*access.TransactionsResponse) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, *access.GetTransactionsByBlockIDRequest, ...grpc.CallOption) error); ok { + r1 = rf(ctx, in, opts...) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Ping provides a mock function with given fields: ctx, in, opts +func (_m *AccessAPIClient) Ping(ctx context.Context, in *access.PingRequest, opts ...grpc.CallOption) (*access.PingResponse, error) { + _va := make([]interface{}, len(opts)) + for _i := range opts { + _va[_i] = opts[_i] + } + var _ca []interface{} + _ca = append(_ca, ctx, in) + _ca = append(_ca, _va...) + ret := _m.Called(_ca...) + + if len(ret) == 0 { + panic("no return value specified for Ping") + } + + var r0 *access.PingResponse + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, *access.PingRequest, ...grpc.CallOption) (*access.PingResponse, error)); ok { + return rf(ctx, in, opts...) + } + if rf, ok := ret.Get(0).(func(context.Context, *access.PingRequest, ...grpc.CallOption) *access.PingResponse); ok { + r0 = rf(ctx, in, opts...) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*access.PingResponse) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, *access.PingRequest, ...grpc.CallOption) error); ok { + r1 = rf(ctx, in, opts...) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// SendAndSubscribeTransactionStatuses provides a mock function with given fields: ctx, in, opts +func (_m *AccessAPIClient) SendAndSubscribeTransactionStatuses(ctx context.Context, in *access.SendAndSubscribeTransactionStatusesRequest, opts ...grpc.CallOption) (access.AccessAPI_SendAndSubscribeTransactionStatusesClient, error) { + _va := make([]interface{}, len(opts)) + for _i := range opts { + _va[_i] = opts[_i] + } + var _ca []interface{} + _ca = append(_ca, ctx, in) + _ca = append(_ca, _va...) + ret := _m.Called(_ca...) + + if len(ret) == 0 { + panic("no return value specified for SendAndSubscribeTransactionStatuses") + } + + var r0 access.AccessAPI_SendAndSubscribeTransactionStatusesClient + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, *access.SendAndSubscribeTransactionStatusesRequest, ...grpc.CallOption) (access.AccessAPI_SendAndSubscribeTransactionStatusesClient, error)); ok { + return rf(ctx, in, opts...) + } + if rf, ok := ret.Get(0).(func(context.Context, *access.SendAndSubscribeTransactionStatusesRequest, ...grpc.CallOption) access.AccessAPI_SendAndSubscribeTransactionStatusesClient); ok { + r0 = rf(ctx, in, opts...) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(access.AccessAPI_SendAndSubscribeTransactionStatusesClient) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, *access.SendAndSubscribeTransactionStatusesRequest, ...grpc.CallOption) error); ok { r1 = rf(ctx, in, opts...) } else { r1 = ret.Error(1) @@ -853,6 +1434,10 @@ func (_m *AccessAPIClient) SendTransaction(ctx context.Context, in *access.SendT _ca = append(_ca, _va...) ret := _m.Called(_ca...) + if len(ret) == 0 { + panic("no return value specified for SendTransaction") + } + var r0 *access.SendTransactionResponse var r1 error if rf, ok := ret.Get(0).(func(context.Context, *access.SendTransactionRequest, ...grpc.CallOption) (*access.SendTransactionResponse, error)); ok { @@ -875,13 +1460,345 @@ func (_m *AccessAPIClient) SendTransaction(ctx context.Context, in *access.SendT return r0, r1 } -type mockConstructorTestingTNewAccessAPIClient interface { - mock.TestingT - Cleanup(func()) +// SubscribeBlockDigestsFromLatest provides a mock function with given fields: ctx, in, opts +func (_m *AccessAPIClient) SubscribeBlockDigestsFromLatest(ctx context.Context, in *access.SubscribeBlockDigestsFromLatestRequest, opts ...grpc.CallOption) (access.AccessAPI_SubscribeBlockDigestsFromLatestClient, error) { + _va := make([]interface{}, len(opts)) + for _i := range opts { + _va[_i] = opts[_i] + } + var _ca []interface{} + _ca = append(_ca, ctx, in) + _ca = append(_ca, _va...) + ret := _m.Called(_ca...) + + if len(ret) == 0 { + panic("no return value specified for SubscribeBlockDigestsFromLatest") + } + + var r0 access.AccessAPI_SubscribeBlockDigestsFromLatestClient + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, *access.SubscribeBlockDigestsFromLatestRequest, ...grpc.CallOption) (access.AccessAPI_SubscribeBlockDigestsFromLatestClient, error)); ok { + return rf(ctx, in, opts...) + } + if rf, ok := ret.Get(0).(func(context.Context, *access.SubscribeBlockDigestsFromLatestRequest, ...grpc.CallOption) access.AccessAPI_SubscribeBlockDigestsFromLatestClient); ok { + r0 = rf(ctx, in, opts...) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(access.AccessAPI_SubscribeBlockDigestsFromLatestClient) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, *access.SubscribeBlockDigestsFromLatestRequest, ...grpc.CallOption) error); ok { + r1 = rf(ctx, in, opts...) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// SubscribeBlockDigestsFromStartBlockID provides a mock function with given fields: ctx, in, opts +func (_m *AccessAPIClient) SubscribeBlockDigestsFromStartBlockID(ctx context.Context, in *access.SubscribeBlockDigestsFromStartBlockIDRequest, opts ...grpc.CallOption) (access.AccessAPI_SubscribeBlockDigestsFromStartBlockIDClient, error) { + _va := make([]interface{}, len(opts)) + for _i := range opts { + _va[_i] = opts[_i] + } + var _ca []interface{} + _ca = append(_ca, ctx, in) + _ca = append(_ca, _va...) + ret := _m.Called(_ca...) + + if len(ret) == 0 { + panic("no return value specified for SubscribeBlockDigestsFromStartBlockID") + } + + var r0 access.AccessAPI_SubscribeBlockDigestsFromStartBlockIDClient + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, *access.SubscribeBlockDigestsFromStartBlockIDRequest, ...grpc.CallOption) (access.AccessAPI_SubscribeBlockDigestsFromStartBlockIDClient, error)); ok { + return rf(ctx, in, opts...) + } + if rf, ok := ret.Get(0).(func(context.Context, *access.SubscribeBlockDigestsFromStartBlockIDRequest, ...grpc.CallOption) access.AccessAPI_SubscribeBlockDigestsFromStartBlockIDClient); ok { + r0 = rf(ctx, in, opts...) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(access.AccessAPI_SubscribeBlockDigestsFromStartBlockIDClient) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, *access.SubscribeBlockDigestsFromStartBlockIDRequest, ...grpc.CallOption) error); ok { + r1 = rf(ctx, in, opts...) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// SubscribeBlockDigestsFromStartHeight provides a mock function with given fields: ctx, in, opts +func (_m *AccessAPIClient) SubscribeBlockDigestsFromStartHeight(ctx context.Context, in *access.SubscribeBlockDigestsFromStartHeightRequest, opts ...grpc.CallOption) (access.AccessAPI_SubscribeBlockDigestsFromStartHeightClient, error) { + _va := make([]interface{}, len(opts)) + for _i := range opts { + _va[_i] = opts[_i] + } + var _ca []interface{} + _ca = append(_ca, ctx, in) + _ca = append(_ca, _va...) + ret := _m.Called(_ca...) + + if len(ret) == 0 { + panic("no return value specified for SubscribeBlockDigestsFromStartHeight") + } + + var r0 access.AccessAPI_SubscribeBlockDigestsFromStartHeightClient + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, *access.SubscribeBlockDigestsFromStartHeightRequest, ...grpc.CallOption) (access.AccessAPI_SubscribeBlockDigestsFromStartHeightClient, error)); ok { + return rf(ctx, in, opts...) + } + if rf, ok := ret.Get(0).(func(context.Context, *access.SubscribeBlockDigestsFromStartHeightRequest, ...grpc.CallOption) access.AccessAPI_SubscribeBlockDigestsFromStartHeightClient); ok { + r0 = rf(ctx, in, opts...) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(access.AccessAPI_SubscribeBlockDigestsFromStartHeightClient) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, *access.SubscribeBlockDigestsFromStartHeightRequest, ...grpc.CallOption) error); ok { + r1 = rf(ctx, in, opts...) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// SubscribeBlockHeadersFromLatest provides a mock function with given fields: ctx, in, opts +func (_m *AccessAPIClient) SubscribeBlockHeadersFromLatest(ctx context.Context, in *access.SubscribeBlockHeadersFromLatestRequest, opts ...grpc.CallOption) (access.AccessAPI_SubscribeBlockHeadersFromLatestClient, error) { + _va := make([]interface{}, len(opts)) + for _i := range opts { + _va[_i] = opts[_i] + } + var _ca []interface{} + _ca = append(_ca, ctx, in) + _ca = append(_ca, _va...) + ret := _m.Called(_ca...) + + if len(ret) == 0 { + panic("no return value specified for SubscribeBlockHeadersFromLatest") + } + + var r0 access.AccessAPI_SubscribeBlockHeadersFromLatestClient + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, *access.SubscribeBlockHeadersFromLatestRequest, ...grpc.CallOption) (access.AccessAPI_SubscribeBlockHeadersFromLatestClient, error)); ok { + return rf(ctx, in, opts...) + } + if rf, ok := ret.Get(0).(func(context.Context, *access.SubscribeBlockHeadersFromLatestRequest, ...grpc.CallOption) access.AccessAPI_SubscribeBlockHeadersFromLatestClient); ok { + r0 = rf(ctx, in, opts...) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(access.AccessAPI_SubscribeBlockHeadersFromLatestClient) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, *access.SubscribeBlockHeadersFromLatestRequest, ...grpc.CallOption) error); ok { + r1 = rf(ctx, in, opts...) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// SubscribeBlockHeadersFromStartBlockID provides a mock function with given fields: ctx, in, opts +func (_m *AccessAPIClient) SubscribeBlockHeadersFromStartBlockID(ctx context.Context, in *access.SubscribeBlockHeadersFromStartBlockIDRequest, opts ...grpc.CallOption) (access.AccessAPI_SubscribeBlockHeadersFromStartBlockIDClient, error) { + _va := make([]interface{}, len(opts)) + for _i := range opts { + _va[_i] = opts[_i] + } + var _ca []interface{} + _ca = append(_ca, ctx, in) + _ca = append(_ca, _va...) + ret := _m.Called(_ca...) + + if len(ret) == 0 { + panic("no return value specified for SubscribeBlockHeadersFromStartBlockID") + } + + var r0 access.AccessAPI_SubscribeBlockHeadersFromStartBlockIDClient + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, *access.SubscribeBlockHeadersFromStartBlockIDRequest, ...grpc.CallOption) (access.AccessAPI_SubscribeBlockHeadersFromStartBlockIDClient, error)); ok { + return rf(ctx, in, opts...) + } + if rf, ok := ret.Get(0).(func(context.Context, *access.SubscribeBlockHeadersFromStartBlockIDRequest, ...grpc.CallOption) access.AccessAPI_SubscribeBlockHeadersFromStartBlockIDClient); ok { + r0 = rf(ctx, in, opts...) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(access.AccessAPI_SubscribeBlockHeadersFromStartBlockIDClient) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, *access.SubscribeBlockHeadersFromStartBlockIDRequest, ...grpc.CallOption) error); ok { + r1 = rf(ctx, in, opts...) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// SubscribeBlockHeadersFromStartHeight provides a mock function with given fields: ctx, in, opts +func (_m *AccessAPIClient) SubscribeBlockHeadersFromStartHeight(ctx context.Context, in *access.SubscribeBlockHeadersFromStartHeightRequest, opts ...grpc.CallOption) (access.AccessAPI_SubscribeBlockHeadersFromStartHeightClient, error) { + _va := make([]interface{}, len(opts)) + for _i := range opts { + _va[_i] = opts[_i] + } + var _ca []interface{} + _ca = append(_ca, ctx, in) + _ca = append(_ca, _va...) + ret := _m.Called(_ca...) + + if len(ret) == 0 { + panic("no return value specified for SubscribeBlockHeadersFromStartHeight") + } + + var r0 access.AccessAPI_SubscribeBlockHeadersFromStartHeightClient + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, *access.SubscribeBlockHeadersFromStartHeightRequest, ...grpc.CallOption) (access.AccessAPI_SubscribeBlockHeadersFromStartHeightClient, error)); ok { + return rf(ctx, in, opts...) + } + if rf, ok := ret.Get(0).(func(context.Context, *access.SubscribeBlockHeadersFromStartHeightRequest, ...grpc.CallOption) access.AccessAPI_SubscribeBlockHeadersFromStartHeightClient); ok { + r0 = rf(ctx, in, opts...) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(access.AccessAPI_SubscribeBlockHeadersFromStartHeightClient) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, *access.SubscribeBlockHeadersFromStartHeightRequest, ...grpc.CallOption) error); ok { + r1 = rf(ctx, in, opts...) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// SubscribeBlocksFromLatest provides a mock function with given fields: ctx, in, opts +func (_m *AccessAPIClient) SubscribeBlocksFromLatest(ctx context.Context, in *access.SubscribeBlocksFromLatestRequest, opts ...grpc.CallOption) (access.AccessAPI_SubscribeBlocksFromLatestClient, error) { + _va := make([]interface{}, len(opts)) + for _i := range opts { + _va[_i] = opts[_i] + } + var _ca []interface{} + _ca = append(_ca, ctx, in) + _ca = append(_ca, _va...) + ret := _m.Called(_ca...) + + if len(ret) == 0 { + panic("no return value specified for SubscribeBlocksFromLatest") + } + + var r0 access.AccessAPI_SubscribeBlocksFromLatestClient + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, *access.SubscribeBlocksFromLatestRequest, ...grpc.CallOption) (access.AccessAPI_SubscribeBlocksFromLatestClient, error)); ok { + return rf(ctx, in, opts...) + } + if rf, ok := ret.Get(0).(func(context.Context, *access.SubscribeBlocksFromLatestRequest, ...grpc.CallOption) access.AccessAPI_SubscribeBlocksFromLatestClient); ok { + r0 = rf(ctx, in, opts...) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(access.AccessAPI_SubscribeBlocksFromLatestClient) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, *access.SubscribeBlocksFromLatestRequest, ...grpc.CallOption) error); ok { + r1 = rf(ctx, in, opts...) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// SubscribeBlocksFromStartBlockID provides a mock function with given fields: ctx, in, opts +func (_m *AccessAPIClient) SubscribeBlocksFromStartBlockID(ctx context.Context, in *access.SubscribeBlocksFromStartBlockIDRequest, opts ...grpc.CallOption) (access.AccessAPI_SubscribeBlocksFromStartBlockIDClient, error) { + _va := make([]interface{}, len(opts)) + for _i := range opts { + _va[_i] = opts[_i] + } + var _ca []interface{} + _ca = append(_ca, ctx, in) + _ca = append(_ca, _va...) + ret := _m.Called(_ca...) + + if len(ret) == 0 { + panic("no return value specified for SubscribeBlocksFromStartBlockID") + } + + var r0 access.AccessAPI_SubscribeBlocksFromStartBlockIDClient + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, *access.SubscribeBlocksFromStartBlockIDRequest, ...grpc.CallOption) (access.AccessAPI_SubscribeBlocksFromStartBlockIDClient, error)); ok { + return rf(ctx, in, opts...) + } + if rf, ok := ret.Get(0).(func(context.Context, *access.SubscribeBlocksFromStartBlockIDRequest, ...grpc.CallOption) access.AccessAPI_SubscribeBlocksFromStartBlockIDClient); ok { + r0 = rf(ctx, in, opts...) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(access.AccessAPI_SubscribeBlocksFromStartBlockIDClient) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, *access.SubscribeBlocksFromStartBlockIDRequest, ...grpc.CallOption) error); ok { + r1 = rf(ctx, in, opts...) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// SubscribeBlocksFromStartHeight provides a mock function with given fields: ctx, in, opts +func (_m *AccessAPIClient) SubscribeBlocksFromStartHeight(ctx context.Context, in *access.SubscribeBlocksFromStartHeightRequest, opts ...grpc.CallOption) (access.AccessAPI_SubscribeBlocksFromStartHeightClient, error) { + _va := make([]interface{}, len(opts)) + for _i := range opts { + _va[_i] = opts[_i] + } + var _ca []interface{} + _ca = append(_ca, ctx, in) + _ca = append(_ca, _va...) + ret := _m.Called(_ca...) + + if len(ret) == 0 { + panic("no return value specified for SubscribeBlocksFromStartHeight") + } + + var r0 access.AccessAPI_SubscribeBlocksFromStartHeightClient + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, *access.SubscribeBlocksFromStartHeightRequest, ...grpc.CallOption) (access.AccessAPI_SubscribeBlocksFromStartHeightClient, error)); ok { + return rf(ctx, in, opts...) + } + if rf, ok := ret.Get(0).(func(context.Context, *access.SubscribeBlocksFromStartHeightRequest, ...grpc.CallOption) access.AccessAPI_SubscribeBlocksFromStartHeightClient); ok { + r0 = rf(ctx, in, opts...) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(access.AccessAPI_SubscribeBlocksFromStartHeightClient) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, *access.SubscribeBlocksFromStartHeightRequest, ...grpc.CallOption) error); ok { + r1 = rf(ctx, in, opts...) + } else { + r1 = ret.Error(1) + } + + return r0, r1 } // NewAccessAPIClient creates a new instance of AccessAPIClient. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewAccessAPIClient(t mockConstructorTestingTNewAccessAPIClient) *AccessAPIClient { +// The first argument is typically a *testing.T value. +func NewAccessAPIClient(t interface { + mock.TestingT + Cleanup(func()) +}) *AccessAPIClient { mock := &AccessAPIClient{} mock.Mock.Test(t) diff --git a/engine/access/mock/access_api_server.go b/engine/access/mock/access_api_server.go index 5515698eacd..c7af30bdae4 100644 --- a/engine/access/mock/access_api_server.go +++ b/engine/access/mock/access_api_server.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mock @@ -19,6 +19,10 @@ type AccessAPIServer struct { func (_m *AccessAPIServer) ExecuteScriptAtBlockHeight(_a0 context.Context, _a1 *access.ExecuteScriptAtBlockHeightRequest) (*access.ExecuteScriptResponse, error) { ret := _m.Called(_a0, _a1) + if len(ret) == 0 { + panic("no return value specified for ExecuteScriptAtBlockHeight") + } + var r0 *access.ExecuteScriptResponse var r1 error if rf, ok := ret.Get(0).(func(context.Context, *access.ExecuteScriptAtBlockHeightRequest) (*access.ExecuteScriptResponse, error)); ok { @@ -45,6 +49,10 @@ func (_m *AccessAPIServer) ExecuteScriptAtBlockHeight(_a0 context.Context, _a1 * func (_m *AccessAPIServer) ExecuteScriptAtBlockID(_a0 context.Context, _a1 *access.ExecuteScriptAtBlockIDRequest) (*access.ExecuteScriptResponse, error) { ret := _m.Called(_a0, _a1) + if len(ret) == 0 { + panic("no return value specified for ExecuteScriptAtBlockID") + } + var r0 *access.ExecuteScriptResponse var r1 error if rf, ok := ret.Get(0).(func(context.Context, *access.ExecuteScriptAtBlockIDRequest) (*access.ExecuteScriptResponse, error)); ok { @@ -71,6 +79,10 @@ func (_m *AccessAPIServer) ExecuteScriptAtBlockID(_a0 context.Context, _a1 *acce func (_m *AccessAPIServer) ExecuteScriptAtLatestBlock(_a0 context.Context, _a1 *access.ExecuteScriptAtLatestBlockRequest) (*access.ExecuteScriptResponse, error) { ret := _m.Called(_a0, _a1) + if len(ret) == 0 { + panic("no return value specified for ExecuteScriptAtLatestBlock") + } + var r0 *access.ExecuteScriptResponse var r1 error if rf, ok := ret.Get(0).(func(context.Context, *access.ExecuteScriptAtLatestBlockRequest) (*access.ExecuteScriptResponse, error)); ok { @@ -97,6 +109,10 @@ func (_m *AccessAPIServer) ExecuteScriptAtLatestBlock(_a0 context.Context, _a1 * func (_m *AccessAPIServer) GetAccount(_a0 context.Context, _a1 *access.GetAccountRequest) (*access.GetAccountResponse, error) { ret := _m.Called(_a0, _a1) + if len(ret) == 0 { + panic("no return value specified for GetAccount") + } + var r0 *access.GetAccountResponse var r1 error if rf, ok := ret.Get(0).(func(context.Context, *access.GetAccountRequest) (*access.GetAccountResponse, error)); ok { @@ -123,6 +139,10 @@ func (_m *AccessAPIServer) GetAccount(_a0 context.Context, _a1 *access.GetAccoun func (_m *AccessAPIServer) GetAccountAtBlockHeight(_a0 context.Context, _a1 *access.GetAccountAtBlockHeightRequest) (*access.AccountResponse, error) { ret := _m.Called(_a0, _a1) + if len(ret) == 0 { + panic("no return value specified for GetAccountAtBlockHeight") + } + var r0 *access.AccountResponse var r1 error if rf, ok := ret.Get(0).(func(context.Context, *access.GetAccountAtBlockHeightRequest) (*access.AccountResponse, error)); ok { @@ -149,6 +169,10 @@ func (_m *AccessAPIServer) GetAccountAtBlockHeight(_a0 context.Context, _a1 *acc func (_m *AccessAPIServer) GetAccountAtLatestBlock(_a0 context.Context, _a1 *access.GetAccountAtLatestBlockRequest) (*access.AccountResponse, error) { ret := _m.Called(_a0, _a1) + if len(ret) == 0 { + panic("no return value specified for GetAccountAtLatestBlock") + } + var r0 *access.AccountResponse var r1 error if rf, ok := ret.Get(0).(func(context.Context, *access.GetAccountAtLatestBlockRequest) (*access.AccountResponse, error)); ok { @@ -171,10 +195,194 @@ func (_m *AccessAPIServer) GetAccountAtLatestBlock(_a0 context.Context, _a1 *acc return r0, r1 } +// GetAccountBalanceAtBlockHeight provides a mock function with given fields: _a0, _a1 +func (_m *AccessAPIServer) GetAccountBalanceAtBlockHeight(_a0 context.Context, _a1 *access.GetAccountBalanceAtBlockHeightRequest) (*access.AccountBalanceResponse, error) { + ret := _m.Called(_a0, _a1) + + if len(ret) == 0 { + panic("no return value specified for GetAccountBalanceAtBlockHeight") + } + + var r0 *access.AccountBalanceResponse + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, *access.GetAccountBalanceAtBlockHeightRequest) (*access.AccountBalanceResponse, error)); ok { + return rf(_a0, _a1) + } + if rf, ok := ret.Get(0).(func(context.Context, *access.GetAccountBalanceAtBlockHeightRequest) *access.AccountBalanceResponse); ok { + r0 = rf(_a0, _a1) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*access.AccountBalanceResponse) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, *access.GetAccountBalanceAtBlockHeightRequest) error); ok { + r1 = rf(_a0, _a1) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetAccountBalanceAtLatestBlock provides a mock function with given fields: _a0, _a1 +func (_m *AccessAPIServer) GetAccountBalanceAtLatestBlock(_a0 context.Context, _a1 *access.GetAccountBalanceAtLatestBlockRequest) (*access.AccountBalanceResponse, error) { + ret := _m.Called(_a0, _a1) + + if len(ret) == 0 { + panic("no return value specified for GetAccountBalanceAtLatestBlock") + } + + var r0 *access.AccountBalanceResponse + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, *access.GetAccountBalanceAtLatestBlockRequest) (*access.AccountBalanceResponse, error)); ok { + return rf(_a0, _a1) + } + if rf, ok := ret.Get(0).(func(context.Context, *access.GetAccountBalanceAtLatestBlockRequest) *access.AccountBalanceResponse); ok { + r0 = rf(_a0, _a1) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*access.AccountBalanceResponse) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, *access.GetAccountBalanceAtLatestBlockRequest) error); ok { + r1 = rf(_a0, _a1) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetAccountKeyAtBlockHeight provides a mock function with given fields: _a0, _a1 +func (_m *AccessAPIServer) GetAccountKeyAtBlockHeight(_a0 context.Context, _a1 *access.GetAccountKeyAtBlockHeightRequest) (*access.AccountKeyResponse, error) { + ret := _m.Called(_a0, _a1) + + if len(ret) == 0 { + panic("no return value specified for GetAccountKeyAtBlockHeight") + } + + var r0 *access.AccountKeyResponse + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, *access.GetAccountKeyAtBlockHeightRequest) (*access.AccountKeyResponse, error)); ok { + return rf(_a0, _a1) + } + if rf, ok := ret.Get(0).(func(context.Context, *access.GetAccountKeyAtBlockHeightRequest) *access.AccountKeyResponse); ok { + r0 = rf(_a0, _a1) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*access.AccountKeyResponse) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, *access.GetAccountKeyAtBlockHeightRequest) error); ok { + r1 = rf(_a0, _a1) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetAccountKeyAtLatestBlock provides a mock function with given fields: _a0, _a1 +func (_m *AccessAPIServer) GetAccountKeyAtLatestBlock(_a0 context.Context, _a1 *access.GetAccountKeyAtLatestBlockRequest) (*access.AccountKeyResponse, error) { + ret := _m.Called(_a0, _a1) + + if len(ret) == 0 { + panic("no return value specified for GetAccountKeyAtLatestBlock") + } + + var r0 *access.AccountKeyResponse + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, *access.GetAccountKeyAtLatestBlockRequest) (*access.AccountKeyResponse, error)); ok { + return rf(_a0, _a1) + } + if rf, ok := ret.Get(0).(func(context.Context, *access.GetAccountKeyAtLatestBlockRequest) *access.AccountKeyResponse); ok { + r0 = rf(_a0, _a1) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*access.AccountKeyResponse) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, *access.GetAccountKeyAtLatestBlockRequest) error); ok { + r1 = rf(_a0, _a1) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetAccountKeysAtBlockHeight provides a mock function with given fields: _a0, _a1 +func (_m *AccessAPIServer) GetAccountKeysAtBlockHeight(_a0 context.Context, _a1 *access.GetAccountKeysAtBlockHeightRequest) (*access.AccountKeysResponse, error) { + ret := _m.Called(_a0, _a1) + + if len(ret) == 0 { + panic("no return value specified for GetAccountKeysAtBlockHeight") + } + + var r0 *access.AccountKeysResponse + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, *access.GetAccountKeysAtBlockHeightRequest) (*access.AccountKeysResponse, error)); ok { + return rf(_a0, _a1) + } + if rf, ok := ret.Get(0).(func(context.Context, *access.GetAccountKeysAtBlockHeightRequest) *access.AccountKeysResponse); ok { + r0 = rf(_a0, _a1) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*access.AccountKeysResponse) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, *access.GetAccountKeysAtBlockHeightRequest) error); ok { + r1 = rf(_a0, _a1) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetAccountKeysAtLatestBlock provides a mock function with given fields: _a0, _a1 +func (_m *AccessAPIServer) GetAccountKeysAtLatestBlock(_a0 context.Context, _a1 *access.GetAccountKeysAtLatestBlockRequest) (*access.AccountKeysResponse, error) { + ret := _m.Called(_a0, _a1) + + if len(ret) == 0 { + panic("no return value specified for GetAccountKeysAtLatestBlock") + } + + var r0 *access.AccountKeysResponse + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, *access.GetAccountKeysAtLatestBlockRequest) (*access.AccountKeysResponse, error)); ok { + return rf(_a0, _a1) + } + if rf, ok := ret.Get(0).(func(context.Context, *access.GetAccountKeysAtLatestBlockRequest) *access.AccountKeysResponse); ok { + r0 = rf(_a0, _a1) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*access.AccountKeysResponse) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, *access.GetAccountKeysAtLatestBlockRequest) error); ok { + r1 = rf(_a0, _a1) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + // GetBlockByHeight provides a mock function with given fields: _a0, _a1 func (_m *AccessAPIServer) GetBlockByHeight(_a0 context.Context, _a1 *access.GetBlockByHeightRequest) (*access.BlockResponse, error) { ret := _m.Called(_a0, _a1) + if len(ret) == 0 { + panic("no return value specified for GetBlockByHeight") + } + var r0 *access.BlockResponse var r1 error if rf, ok := ret.Get(0).(func(context.Context, *access.GetBlockByHeightRequest) (*access.BlockResponse, error)); ok { @@ -201,6 +409,10 @@ func (_m *AccessAPIServer) GetBlockByHeight(_a0 context.Context, _a1 *access.Get func (_m *AccessAPIServer) GetBlockByID(_a0 context.Context, _a1 *access.GetBlockByIDRequest) (*access.BlockResponse, error) { ret := _m.Called(_a0, _a1) + if len(ret) == 0 { + panic("no return value specified for GetBlockByID") + } + var r0 *access.BlockResponse var r1 error if rf, ok := ret.Get(0).(func(context.Context, *access.GetBlockByIDRequest) (*access.BlockResponse, error)); ok { @@ -227,6 +439,10 @@ func (_m *AccessAPIServer) GetBlockByID(_a0 context.Context, _a1 *access.GetBloc func (_m *AccessAPIServer) GetBlockHeaderByHeight(_a0 context.Context, _a1 *access.GetBlockHeaderByHeightRequest) (*access.BlockHeaderResponse, error) { ret := _m.Called(_a0, _a1) + if len(ret) == 0 { + panic("no return value specified for GetBlockHeaderByHeight") + } + var r0 *access.BlockHeaderResponse var r1 error if rf, ok := ret.Get(0).(func(context.Context, *access.GetBlockHeaderByHeightRequest) (*access.BlockHeaderResponse, error)); ok { @@ -253,6 +469,10 @@ func (_m *AccessAPIServer) GetBlockHeaderByHeight(_a0 context.Context, _a1 *acce func (_m *AccessAPIServer) GetBlockHeaderByID(_a0 context.Context, _a1 *access.GetBlockHeaderByIDRequest) (*access.BlockHeaderResponse, error) { ret := _m.Called(_a0, _a1) + if len(ret) == 0 { + panic("no return value specified for GetBlockHeaderByID") + } + var r0 *access.BlockHeaderResponse var r1 error if rf, ok := ret.Get(0).(func(context.Context, *access.GetBlockHeaderByIDRequest) (*access.BlockHeaderResponse, error)); ok { @@ -279,6 +499,10 @@ func (_m *AccessAPIServer) GetBlockHeaderByID(_a0 context.Context, _a1 *access.G func (_m *AccessAPIServer) GetCollectionByID(_a0 context.Context, _a1 *access.GetCollectionByIDRequest) (*access.CollectionResponse, error) { ret := _m.Called(_a0, _a1) + if len(ret) == 0 { + panic("no return value specified for GetCollectionByID") + } + var r0 *access.CollectionResponse var r1 error if rf, ok := ret.Get(0).(func(context.Context, *access.GetCollectionByIDRequest) (*access.CollectionResponse, error)); ok { @@ -305,6 +529,10 @@ func (_m *AccessAPIServer) GetCollectionByID(_a0 context.Context, _a1 *access.Ge func (_m *AccessAPIServer) GetEventsForBlockIDs(_a0 context.Context, _a1 *access.GetEventsForBlockIDsRequest) (*access.EventsResponse, error) { ret := _m.Called(_a0, _a1) + if len(ret) == 0 { + panic("no return value specified for GetEventsForBlockIDs") + } + var r0 *access.EventsResponse var r1 error if rf, ok := ret.Get(0).(func(context.Context, *access.GetEventsForBlockIDsRequest) (*access.EventsResponse, error)); ok { @@ -331,6 +559,10 @@ func (_m *AccessAPIServer) GetEventsForBlockIDs(_a0 context.Context, _a1 *access func (_m *AccessAPIServer) GetEventsForHeightRange(_a0 context.Context, _a1 *access.GetEventsForHeightRangeRequest) (*access.EventsResponse, error) { ret := _m.Called(_a0, _a1) + if len(ret) == 0 { + panic("no return value specified for GetEventsForHeightRange") + } + var r0 *access.EventsResponse var r1 error if rf, ok := ret.Get(0).(func(context.Context, *access.GetEventsForHeightRangeRequest) (*access.EventsResponse, error)); ok { @@ -353,10 +585,44 @@ func (_m *AccessAPIServer) GetEventsForHeightRange(_a0 context.Context, _a1 *acc return r0, r1 } +// GetExecutionResultByID provides a mock function with given fields: _a0, _a1 +func (_m *AccessAPIServer) GetExecutionResultByID(_a0 context.Context, _a1 *access.GetExecutionResultByIDRequest) (*access.ExecutionResultByIDResponse, error) { + ret := _m.Called(_a0, _a1) + + if len(ret) == 0 { + panic("no return value specified for GetExecutionResultByID") + } + + var r0 *access.ExecutionResultByIDResponse + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, *access.GetExecutionResultByIDRequest) (*access.ExecutionResultByIDResponse, error)); ok { + return rf(_a0, _a1) + } + if rf, ok := ret.Get(0).(func(context.Context, *access.GetExecutionResultByIDRequest) *access.ExecutionResultByIDResponse); ok { + r0 = rf(_a0, _a1) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*access.ExecutionResultByIDResponse) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, *access.GetExecutionResultByIDRequest) error); ok { + r1 = rf(_a0, _a1) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + // GetExecutionResultForBlockID provides a mock function with given fields: _a0, _a1 func (_m *AccessAPIServer) GetExecutionResultForBlockID(_a0 context.Context, _a1 *access.GetExecutionResultForBlockIDRequest) (*access.ExecutionResultForBlockIDResponse, error) { ret := _m.Called(_a0, _a1) + if len(ret) == 0 { + panic("no return value specified for GetExecutionResultForBlockID") + } + var r0 *access.ExecutionResultForBlockIDResponse var r1 error if rf, ok := ret.Get(0).(func(context.Context, *access.GetExecutionResultForBlockIDRequest) (*access.ExecutionResultForBlockIDResponse, error)); ok { @@ -379,10 +645,44 @@ func (_m *AccessAPIServer) GetExecutionResultForBlockID(_a0 context.Context, _a1 return r0, r1 } +// GetFullCollectionByID provides a mock function with given fields: _a0, _a1 +func (_m *AccessAPIServer) GetFullCollectionByID(_a0 context.Context, _a1 *access.GetFullCollectionByIDRequest) (*access.FullCollectionResponse, error) { + ret := _m.Called(_a0, _a1) + + if len(ret) == 0 { + panic("no return value specified for GetFullCollectionByID") + } + + var r0 *access.FullCollectionResponse + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, *access.GetFullCollectionByIDRequest) (*access.FullCollectionResponse, error)); ok { + return rf(_a0, _a1) + } + if rf, ok := ret.Get(0).(func(context.Context, *access.GetFullCollectionByIDRequest) *access.FullCollectionResponse); ok { + r0 = rf(_a0, _a1) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*access.FullCollectionResponse) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, *access.GetFullCollectionByIDRequest) error); ok { + r1 = rf(_a0, _a1) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + // GetLatestBlock provides a mock function with given fields: _a0, _a1 func (_m *AccessAPIServer) GetLatestBlock(_a0 context.Context, _a1 *access.GetLatestBlockRequest) (*access.BlockResponse, error) { ret := _m.Called(_a0, _a1) + if len(ret) == 0 { + panic("no return value specified for GetLatestBlock") + } + var r0 *access.BlockResponse var r1 error if rf, ok := ret.Get(0).(func(context.Context, *access.GetLatestBlockRequest) (*access.BlockResponse, error)); ok { @@ -409,6 +709,10 @@ func (_m *AccessAPIServer) GetLatestBlock(_a0 context.Context, _a1 *access.GetLa func (_m *AccessAPIServer) GetLatestBlockHeader(_a0 context.Context, _a1 *access.GetLatestBlockHeaderRequest) (*access.BlockHeaderResponse, error) { ret := _m.Called(_a0, _a1) + if len(ret) == 0 { + panic("no return value specified for GetLatestBlockHeader") + } + var r0 *access.BlockHeaderResponse var r1 error if rf, ok := ret.Get(0).(func(context.Context, *access.GetLatestBlockHeaderRequest) (*access.BlockHeaderResponse, error)); ok { @@ -435,6 +739,10 @@ func (_m *AccessAPIServer) GetLatestBlockHeader(_a0 context.Context, _a1 *access func (_m *AccessAPIServer) GetLatestProtocolStateSnapshot(_a0 context.Context, _a1 *access.GetLatestProtocolStateSnapshotRequest) (*access.ProtocolStateSnapshotResponse, error) { ret := _m.Called(_a0, _a1) + if len(ret) == 0 { + panic("no return value specified for GetLatestProtocolStateSnapshot") + } + var r0 *access.ProtocolStateSnapshotResponse var r1 error if rf, ok := ret.Get(0).(func(context.Context, *access.GetLatestProtocolStateSnapshotRequest) (*access.ProtocolStateSnapshotResponse, error)); ok { @@ -461,6 +769,10 @@ func (_m *AccessAPIServer) GetLatestProtocolStateSnapshot(_a0 context.Context, _ func (_m *AccessAPIServer) GetNetworkParameters(_a0 context.Context, _a1 *access.GetNetworkParametersRequest) (*access.GetNetworkParametersResponse, error) { ret := _m.Called(_a0, _a1) + if len(ret) == 0 { + panic("no return value specified for GetNetworkParameters") + } + var r0 *access.GetNetworkParametersResponse var r1 error if rf, ok := ret.Get(0).(func(context.Context, *access.GetNetworkParametersRequest) (*access.GetNetworkParametersResponse, error)); ok { @@ -487,6 +799,10 @@ func (_m *AccessAPIServer) GetNetworkParameters(_a0 context.Context, _a1 *access func (_m *AccessAPIServer) GetNodeVersionInfo(_a0 context.Context, _a1 *access.GetNodeVersionInfoRequest) (*access.GetNodeVersionInfoResponse, error) { ret := _m.Called(_a0, _a1) + if len(ret) == 0 { + panic("no return value specified for GetNodeVersionInfo") + } + var r0 *access.GetNodeVersionInfoResponse var r1 error if rf, ok := ret.Get(0).(func(context.Context, *access.GetNodeVersionInfoRequest) (*access.GetNodeVersionInfoResponse, error)); ok { @@ -509,10 +825,134 @@ func (_m *AccessAPIServer) GetNodeVersionInfo(_a0 context.Context, _a1 *access.G return r0, r1 } +// GetProtocolStateSnapshotByBlockID provides a mock function with given fields: _a0, _a1 +func (_m *AccessAPIServer) GetProtocolStateSnapshotByBlockID(_a0 context.Context, _a1 *access.GetProtocolStateSnapshotByBlockIDRequest) (*access.ProtocolStateSnapshotResponse, error) { + ret := _m.Called(_a0, _a1) + + if len(ret) == 0 { + panic("no return value specified for GetProtocolStateSnapshotByBlockID") + } + + var r0 *access.ProtocolStateSnapshotResponse + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, *access.GetProtocolStateSnapshotByBlockIDRequest) (*access.ProtocolStateSnapshotResponse, error)); ok { + return rf(_a0, _a1) + } + if rf, ok := ret.Get(0).(func(context.Context, *access.GetProtocolStateSnapshotByBlockIDRequest) *access.ProtocolStateSnapshotResponse); ok { + r0 = rf(_a0, _a1) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*access.ProtocolStateSnapshotResponse) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, *access.GetProtocolStateSnapshotByBlockIDRequest) error); ok { + r1 = rf(_a0, _a1) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetProtocolStateSnapshotByHeight provides a mock function with given fields: _a0, _a1 +func (_m *AccessAPIServer) GetProtocolStateSnapshotByHeight(_a0 context.Context, _a1 *access.GetProtocolStateSnapshotByHeightRequest) (*access.ProtocolStateSnapshotResponse, error) { + ret := _m.Called(_a0, _a1) + + if len(ret) == 0 { + panic("no return value specified for GetProtocolStateSnapshotByHeight") + } + + var r0 *access.ProtocolStateSnapshotResponse + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, *access.GetProtocolStateSnapshotByHeightRequest) (*access.ProtocolStateSnapshotResponse, error)); ok { + return rf(_a0, _a1) + } + if rf, ok := ret.Get(0).(func(context.Context, *access.GetProtocolStateSnapshotByHeightRequest) *access.ProtocolStateSnapshotResponse); ok { + r0 = rf(_a0, _a1) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*access.ProtocolStateSnapshotResponse) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, *access.GetProtocolStateSnapshotByHeightRequest) error); ok { + r1 = rf(_a0, _a1) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetSystemTransaction provides a mock function with given fields: _a0, _a1 +func (_m *AccessAPIServer) GetSystemTransaction(_a0 context.Context, _a1 *access.GetSystemTransactionRequest) (*access.TransactionResponse, error) { + ret := _m.Called(_a0, _a1) + + if len(ret) == 0 { + panic("no return value specified for GetSystemTransaction") + } + + var r0 *access.TransactionResponse + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, *access.GetSystemTransactionRequest) (*access.TransactionResponse, error)); ok { + return rf(_a0, _a1) + } + if rf, ok := ret.Get(0).(func(context.Context, *access.GetSystemTransactionRequest) *access.TransactionResponse); ok { + r0 = rf(_a0, _a1) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*access.TransactionResponse) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, *access.GetSystemTransactionRequest) error); ok { + r1 = rf(_a0, _a1) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetSystemTransactionResult provides a mock function with given fields: _a0, _a1 +func (_m *AccessAPIServer) GetSystemTransactionResult(_a0 context.Context, _a1 *access.GetSystemTransactionResultRequest) (*access.TransactionResultResponse, error) { + ret := _m.Called(_a0, _a1) + + if len(ret) == 0 { + panic("no return value specified for GetSystemTransactionResult") + } + + var r0 *access.TransactionResultResponse + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, *access.GetSystemTransactionResultRequest) (*access.TransactionResultResponse, error)); ok { + return rf(_a0, _a1) + } + if rf, ok := ret.Get(0).(func(context.Context, *access.GetSystemTransactionResultRequest) *access.TransactionResultResponse); ok { + r0 = rf(_a0, _a1) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*access.TransactionResultResponse) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, *access.GetSystemTransactionResultRequest) error); ok { + r1 = rf(_a0, _a1) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + // GetTransaction provides a mock function with given fields: _a0, _a1 func (_m *AccessAPIServer) GetTransaction(_a0 context.Context, _a1 *access.GetTransactionRequest) (*access.TransactionResponse, error) { ret := _m.Called(_a0, _a1) + if len(ret) == 0 { + panic("no return value specified for GetTransaction") + } + var r0 *access.TransactionResponse var r1 error if rf, ok := ret.Get(0).(func(context.Context, *access.GetTransactionRequest) (*access.TransactionResponse, error)); ok { @@ -539,6 +979,10 @@ func (_m *AccessAPIServer) GetTransaction(_a0 context.Context, _a1 *access.GetTr func (_m *AccessAPIServer) GetTransactionResult(_a0 context.Context, _a1 *access.GetTransactionRequest) (*access.TransactionResultResponse, error) { ret := _m.Called(_a0, _a1) + if len(ret) == 0 { + panic("no return value specified for GetTransactionResult") + } + var r0 *access.TransactionResultResponse var r1 error if rf, ok := ret.Get(0).(func(context.Context, *access.GetTransactionRequest) (*access.TransactionResultResponse, error)); ok { @@ -565,6 +1009,10 @@ func (_m *AccessAPIServer) GetTransactionResult(_a0 context.Context, _a1 *access func (_m *AccessAPIServer) GetTransactionResultByIndex(_a0 context.Context, _a1 *access.GetTransactionByIndexRequest) (*access.TransactionResultResponse, error) { ret := _m.Called(_a0, _a1) + if len(ret) == 0 { + panic("no return value specified for GetTransactionResultByIndex") + } + var r0 *access.TransactionResultResponse var r1 error if rf, ok := ret.Get(0).(func(context.Context, *access.GetTransactionByIndexRequest) (*access.TransactionResultResponse, error)); ok { @@ -591,6 +1039,10 @@ func (_m *AccessAPIServer) GetTransactionResultByIndex(_a0 context.Context, _a1 func (_m *AccessAPIServer) GetTransactionResultsByBlockID(_a0 context.Context, _a1 *access.GetTransactionsByBlockIDRequest) (*access.TransactionResultsResponse, error) { ret := _m.Called(_a0, _a1) + if len(ret) == 0 { + panic("no return value specified for GetTransactionResultsByBlockID") + } + var r0 *access.TransactionResultsResponse var r1 error if rf, ok := ret.Get(0).(func(context.Context, *access.GetTransactionsByBlockIDRequest) (*access.TransactionResultsResponse, error)); ok { @@ -617,6 +1069,10 @@ func (_m *AccessAPIServer) GetTransactionResultsByBlockID(_a0 context.Context, _ func (_m *AccessAPIServer) GetTransactionsByBlockID(_a0 context.Context, _a1 *access.GetTransactionsByBlockIDRequest) (*access.TransactionsResponse, error) { ret := _m.Called(_a0, _a1) + if len(ret) == 0 { + panic("no return value specified for GetTransactionsByBlockID") + } + var r0 *access.TransactionsResponse var r1 error if rf, ok := ret.Get(0).(func(context.Context, *access.GetTransactionsByBlockIDRequest) (*access.TransactionsResponse, error)); ok { @@ -643,6 +1099,10 @@ func (_m *AccessAPIServer) GetTransactionsByBlockID(_a0 context.Context, _a1 *ac func (_m *AccessAPIServer) Ping(_a0 context.Context, _a1 *access.PingRequest) (*access.PingResponse, error) { ret := _m.Called(_a0, _a1) + if len(ret) == 0 { + panic("no return value specified for Ping") + } + var r0 *access.PingResponse var r1 error if rf, ok := ret.Get(0).(func(context.Context, *access.PingRequest) (*access.PingResponse, error)); ok { @@ -665,10 +1125,32 @@ func (_m *AccessAPIServer) Ping(_a0 context.Context, _a1 *access.PingRequest) (* return r0, r1 } +// SendAndSubscribeTransactionStatuses provides a mock function with given fields: _a0, _a1 +func (_m *AccessAPIServer) SendAndSubscribeTransactionStatuses(_a0 *access.SendAndSubscribeTransactionStatusesRequest, _a1 access.AccessAPI_SendAndSubscribeTransactionStatusesServer) error { + ret := _m.Called(_a0, _a1) + + if len(ret) == 0 { + panic("no return value specified for SendAndSubscribeTransactionStatuses") + } + + var r0 error + if rf, ok := ret.Get(0).(func(*access.SendAndSubscribeTransactionStatusesRequest, access.AccessAPI_SendAndSubscribeTransactionStatusesServer) error); ok { + r0 = rf(_a0, _a1) + } else { + r0 = ret.Error(0) + } + + return r0 +} + // SendTransaction provides a mock function with given fields: _a0, _a1 func (_m *AccessAPIServer) SendTransaction(_a0 context.Context, _a1 *access.SendTransactionRequest) (*access.SendTransactionResponse, error) { ret := _m.Called(_a0, _a1) + if len(ret) == 0 { + panic("no return value specified for SendTransaction") + } + var r0 *access.SendTransactionResponse var r1 error if rf, ok := ret.Get(0).(func(context.Context, *access.SendTransactionRequest) (*access.SendTransactionResponse, error)); ok { @@ -691,13 +1173,174 @@ func (_m *AccessAPIServer) SendTransaction(_a0 context.Context, _a1 *access.Send return r0, r1 } -type mockConstructorTestingTNewAccessAPIServer interface { - mock.TestingT - Cleanup(func()) +// SubscribeBlockDigestsFromLatest provides a mock function with given fields: _a0, _a1 +func (_m *AccessAPIServer) SubscribeBlockDigestsFromLatest(_a0 *access.SubscribeBlockDigestsFromLatestRequest, _a1 access.AccessAPI_SubscribeBlockDigestsFromLatestServer) error { + ret := _m.Called(_a0, _a1) + + if len(ret) == 0 { + panic("no return value specified for SubscribeBlockDigestsFromLatest") + } + + var r0 error + if rf, ok := ret.Get(0).(func(*access.SubscribeBlockDigestsFromLatestRequest, access.AccessAPI_SubscribeBlockDigestsFromLatestServer) error); ok { + r0 = rf(_a0, _a1) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// SubscribeBlockDigestsFromStartBlockID provides a mock function with given fields: _a0, _a1 +func (_m *AccessAPIServer) SubscribeBlockDigestsFromStartBlockID(_a0 *access.SubscribeBlockDigestsFromStartBlockIDRequest, _a1 access.AccessAPI_SubscribeBlockDigestsFromStartBlockIDServer) error { + ret := _m.Called(_a0, _a1) + + if len(ret) == 0 { + panic("no return value specified for SubscribeBlockDigestsFromStartBlockID") + } + + var r0 error + if rf, ok := ret.Get(0).(func(*access.SubscribeBlockDigestsFromStartBlockIDRequest, access.AccessAPI_SubscribeBlockDigestsFromStartBlockIDServer) error); ok { + r0 = rf(_a0, _a1) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// SubscribeBlockDigestsFromStartHeight provides a mock function with given fields: _a0, _a1 +func (_m *AccessAPIServer) SubscribeBlockDigestsFromStartHeight(_a0 *access.SubscribeBlockDigestsFromStartHeightRequest, _a1 access.AccessAPI_SubscribeBlockDigestsFromStartHeightServer) error { + ret := _m.Called(_a0, _a1) + + if len(ret) == 0 { + panic("no return value specified for SubscribeBlockDigestsFromStartHeight") + } + + var r0 error + if rf, ok := ret.Get(0).(func(*access.SubscribeBlockDigestsFromStartHeightRequest, access.AccessAPI_SubscribeBlockDigestsFromStartHeightServer) error); ok { + r0 = rf(_a0, _a1) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// SubscribeBlockHeadersFromLatest provides a mock function with given fields: _a0, _a1 +func (_m *AccessAPIServer) SubscribeBlockHeadersFromLatest(_a0 *access.SubscribeBlockHeadersFromLatestRequest, _a1 access.AccessAPI_SubscribeBlockHeadersFromLatestServer) error { + ret := _m.Called(_a0, _a1) + + if len(ret) == 0 { + panic("no return value specified for SubscribeBlockHeadersFromLatest") + } + + var r0 error + if rf, ok := ret.Get(0).(func(*access.SubscribeBlockHeadersFromLatestRequest, access.AccessAPI_SubscribeBlockHeadersFromLatestServer) error); ok { + r0 = rf(_a0, _a1) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// SubscribeBlockHeadersFromStartBlockID provides a mock function with given fields: _a0, _a1 +func (_m *AccessAPIServer) SubscribeBlockHeadersFromStartBlockID(_a0 *access.SubscribeBlockHeadersFromStartBlockIDRequest, _a1 access.AccessAPI_SubscribeBlockHeadersFromStartBlockIDServer) error { + ret := _m.Called(_a0, _a1) + + if len(ret) == 0 { + panic("no return value specified for SubscribeBlockHeadersFromStartBlockID") + } + + var r0 error + if rf, ok := ret.Get(0).(func(*access.SubscribeBlockHeadersFromStartBlockIDRequest, access.AccessAPI_SubscribeBlockHeadersFromStartBlockIDServer) error); ok { + r0 = rf(_a0, _a1) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// SubscribeBlockHeadersFromStartHeight provides a mock function with given fields: _a0, _a1 +func (_m *AccessAPIServer) SubscribeBlockHeadersFromStartHeight(_a0 *access.SubscribeBlockHeadersFromStartHeightRequest, _a1 access.AccessAPI_SubscribeBlockHeadersFromStartHeightServer) error { + ret := _m.Called(_a0, _a1) + + if len(ret) == 0 { + panic("no return value specified for SubscribeBlockHeadersFromStartHeight") + } + + var r0 error + if rf, ok := ret.Get(0).(func(*access.SubscribeBlockHeadersFromStartHeightRequest, access.AccessAPI_SubscribeBlockHeadersFromStartHeightServer) error); ok { + r0 = rf(_a0, _a1) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// SubscribeBlocksFromLatest provides a mock function with given fields: _a0, _a1 +func (_m *AccessAPIServer) SubscribeBlocksFromLatest(_a0 *access.SubscribeBlocksFromLatestRequest, _a1 access.AccessAPI_SubscribeBlocksFromLatestServer) error { + ret := _m.Called(_a0, _a1) + + if len(ret) == 0 { + panic("no return value specified for SubscribeBlocksFromLatest") + } + + var r0 error + if rf, ok := ret.Get(0).(func(*access.SubscribeBlocksFromLatestRequest, access.AccessAPI_SubscribeBlocksFromLatestServer) error); ok { + r0 = rf(_a0, _a1) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// SubscribeBlocksFromStartBlockID provides a mock function with given fields: _a0, _a1 +func (_m *AccessAPIServer) SubscribeBlocksFromStartBlockID(_a0 *access.SubscribeBlocksFromStartBlockIDRequest, _a1 access.AccessAPI_SubscribeBlocksFromStartBlockIDServer) error { + ret := _m.Called(_a0, _a1) + + if len(ret) == 0 { + panic("no return value specified for SubscribeBlocksFromStartBlockID") + } + + var r0 error + if rf, ok := ret.Get(0).(func(*access.SubscribeBlocksFromStartBlockIDRequest, access.AccessAPI_SubscribeBlocksFromStartBlockIDServer) error); ok { + r0 = rf(_a0, _a1) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// SubscribeBlocksFromStartHeight provides a mock function with given fields: _a0, _a1 +func (_m *AccessAPIServer) SubscribeBlocksFromStartHeight(_a0 *access.SubscribeBlocksFromStartHeightRequest, _a1 access.AccessAPI_SubscribeBlocksFromStartHeightServer) error { + ret := _m.Called(_a0, _a1) + + if len(ret) == 0 { + panic("no return value specified for SubscribeBlocksFromStartHeight") + } + + var r0 error + if rf, ok := ret.Get(0).(func(*access.SubscribeBlocksFromStartHeightRequest, access.AccessAPI_SubscribeBlocksFromStartHeightServer) error); ok { + r0 = rf(_a0, _a1) + } else { + r0 = ret.Error(0) + } + + return r0 } // NewAccessAPIServer creates a new instance of AccessAPIServer. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewAccessAPIServer(t mockConstructorTestingTNewAccessAPIServer) *AccessAPIServer { +// The first argument is typically a *testing.T value. +func NewAccessAPIServer(t interface { + mock.TestingT + Cleanup(func()) +}) *AccessAPIServer { mock := &AccessAPIServer{} mock.Mock.Test(t) diff --git a/engine/access/mock/execution_api_client.go b/engine/access/mock/execution_api_client.go index 759ca90c81f..21dc0f71b8f 100644 --- a/engine/access/mock/execution_api_client.go +++ b/engine/access/mock/execution_api_client.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mock @@ -27,6 +27,10 @@ func (_m *ExecutionAPIClient) ExecuteScriptAtBlockID(ctx context.Context, in *ex _ca = append(_ca, _va...) ret := _m.Called(_ca...) + if len(ret) == 0 { + panic("no return value specified for ExecuteScriptAtBlockID") + } + var r0 *execution.ExecuteScriptAtBlockIDResponse var r1 error if rf, ok := ret.Get(0).(func(context.Context, *execution.ExecuteScriptAtBlockIDRequest, ...grpc.CallOption) (*execution.ExecuteScriptAtBlockIDResponse, error)); ok { @@ -60,6 +64,10 @@ func (_m *ExecutionAPIClient) GetAccountAtBlockID(ctx context.Context, in *execu _ca = append(_ca, _va...) ret := _m.Called(_ca...) + if len(ret) == 0 { + panic("no return value specified for GetAccountAtBlockID") + } + var r0 *execution.GetAccountAtBlockIDResponse var r1 error if rf, ok := ret.Get(0).(func(context.Context, *execution.GetAccountAtBlockIDRequest, ...grpc.CallOption) (*execution.GetAccountAtBlockIDResponse, error)); ok { @@ -93,6 +101,10 @@ func (_m *ExecutionAPIClient) GetBlockHeaderByID(ctx context.Context, in *execut _ca = append(_ca, _va...) ret := _m.Called(_ca...) + if len(ret) == 0 { + panic("no return value specified for GetBlockHeaderByID") + } + var r0 *execution.BlockHeaderResponse var r1 error if rf, ok := ret.Get(0).(func(context.Context, *execution.GetBlockHeaderByIDRequest, ...grpc.CallOption) (*execution.BlockHeaderResponse, error)); ok { @@ -126,6 +138,10 @@ func (_m *ExecutionAPIClient) GetEventsForBlockIDs(ctx context.Context, in *exec _ca = append(_ca, _va...) ret := _m.Called(_ca...) + if len(ret) == 0 { + panic("no return value specified for GetEventsForBlockIDs") + } + var r0 *execution.GetEventsForBlockIDsResponse var r1 error if rf, ok := ret.Get(0).(func(context.Context, *execution.GetEventsForBlockIDsRequest, ...grpc.CallOption) (*execution.GetEventsForBlockIDsResponse, error)); ok { @@ -159,6 +175,10 @@ func (_m *ExecutionAPIClient) GetLatestBlockHeader(ctx context.Context, in *exec _ca = append(_ca, _va...) ret := _m.Called(_ca...) + if len(ret) == 0 { + panic("no return value specified for GetLatestBlockHeader") + } + var r0 *execution.BlockHeaderResponse var r1 error if rf, ok := ret.Get(0).(func(context.Context, *execution.GetLatestBlockHeaderRequest, ...grpc.CallOption) (*execution.BlockHeaderResponse, error)); ok { @@ -192,6 +212,10 @@ func (_m *ExecutionAPIClient) GetRegisterAtBlockID(ctx context.Context, in *exec _ca = append(_ca, _va...) ret := _m.Called(_ca...) + if len(ret) == 0 { + panic("no return value specified for GetRegisterAtBlockID") + } + var r0 *execution.GetRegisterAtBlockIDResponse var r1 error if rf, ok := ret.Get(0).(func(context.Context, *execution.GetRegisterAtBlockIDRequest, ...grpc.CallOption) (*execution.GetRegisterAtBlockIDResponse, error)); ok { @@ -214,6 +238,154 @@ func (_m *ExecutionAPIClient) GetRegisterAtBlockID(ctx context.Context, in *exec return r0, r1 } +// GetTransactionErrorMessage provides a mock function with given fields: ctx, in, opts +func (_m *ExecutionAPIClient) GetTransactionErrorMessage(ctx context.Context, in *execution.GetTransactionErrorMessageRequest, opts ...grpc.CallOption) (*execution.GetTransactionErrorMessageResponse, error) { + _va := make([]interface{}, len(opts)) + for _i := range opts { + _va[_i] = opts[_i] + } + var _ca []interface{} + _ca = append(_ca, ctx, in) + _ca = append(_ca, _va...) + ret := _m.Called(_ca...) + + if len(ret) == 0 { + panic("no return value specified for GetTransactionErrorMessage") + } + + var r0 *execution.GetTransactionErrorMessageResponse + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, *execution.GetTransactionErrorMessageRequest, ...grpc.CallOption) (*execution.GetTransactionErrorMessageResponse, error)); ok { + return rf(ctx, in, opts...) + } + if rf, ok := ret.Get(0).(func(context.Context, *execution.GetTransactionErrorMessageRequest, ...grpc.CallOption) *execution.GetTransactionErrorMessageResponse); ok { + r0 = rf(ctx, in, opts...) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*execution.GetTransactionErrorMessageResponse) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, *execution.GetTransactionErrorMessageRequest, ...grpc.CallOption) error); ok { + r1 = rf(ctx, in, opts...) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetTransactionErrorMessageByIndex provides a mock function with given fields: ctx, in, opts +func (_m *ExecutionAPIClient) GetTransactionErrorMessageByIndex(ctx context.Context, in *execution.GetTransactionErrorMessageByIndexRequest, opts ...grpc.CallOption) (*execution.GetTransactionErrorMessageResponse, error) { + _va := make([]interface{}, len(opts)) + for _i := range opts { + _va[_i] = opts[_i] + } + var _ca []interface{} + _ca = append(_ca, ctx, in) + _ca = append(_ca, _va...) + ret := _m.Called(_ca...) + + if len(ret) == 0 { + panic("no return value specified for GetTransactionErrorMessageByIndex") + } + + var r0 *execution.GetTransactionErrorMessageResponse + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, *execution.GetTransactionErrorMessageByIndexRequest, ...grpc.CallOption) (*execution.GetTransactionErrorMessageResponse, error)); ok { + return rf(ctx, in, opts...) + } + if rf, ok := ret.Get(0).(func(context.Context, *execution.GetTransactionErrorMessageByIndexRequest, ...grpc.CallOption) *execution.GetTransactionErrorMessageResponse); ok { + r0 = rf(ctx, in, opts...) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*execution.GetTransactionErrorMessageResponse) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, *execution.GetTransactionErrorMessageByIndexRequest, ...grpc.CallOption) error); ok { + r1 = rf(ctx, in, opts...) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetTransactionErrorMessagesByBlockID provides a mock function with given fields: ctx, in, opts +func (_m *ExecutionAPIClient) GetTransactionErrorMessagesByBlockID(ctx context.Context, in *execution.GetTransactionErrorMessagesByBlockIDRequest, opts ...grpc.CallOption) (*execution.GetTransactionErrorMessagesResponse, error) { + _va := make([]interface{}, len(opts)) + for _i := range opts { + _va[_i] = opts[_i] + } + var _ca []interface{} + _ca = append(_ca, ctx, in) + _ca = append(_ca, _va...) + ret := _m.Called(_ca...) + + if len(ret) == 0 { + panic("no return value specified for GetTransactionErrorMessagesByBlockID") + } + + var r0 *execution.GetTransactionErrorMessagesResponse + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, *execution.GetTransactionErrorMessagesByBlockIDRequest, ...grpc.CallOption) (*execution.GetTransactionErrorMessagesResponse, error)); ok { + return rf(ctx, in, opts...) + } + if rf, ok := ret.Get(0).(func(context.Context, *execution.GetTransactionErrorMessagesByBlockIDRequest, ...grpc.CallOption) *execution.GetTransactionErrorMessagesResponse); ok { + r0 = rf(ctx, in, opts...) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*execution.GetTransactionErrorMessagesResponse) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, *execution.GetTransactionErrorMessagesByBlockIDRequest, ...grpc.CallOption) error); ok { + r1 = rf(ctx, in, opts...) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetTransactionExecutionMetricsAfter provides a mock function with given fields: ctx, in, opts +func (_m *ExecutionAPIClient) GetTransactionExecutionMetricsAfter(ctx context.Context, in *execution.GetTransactionExecutionMetricsAfterRequest, opts ...grpc.CallOption) (*execution.GetTransactionExecutionMetricsAfterResponse, error) { + _va := make([]interface{}, len(opts)) + for _i := range opts { + _va[_i] = opts[_i] + } + var _ca []interface{} + _ca = append(_ca, ctx, in) + _ca = append(_ca, _va...) + ret := _m.Called(_ca...) + + if len(ret) == 0 { + panic("no return value specified for GetTransactionExecutionMetricsAfter") + } + + var r0 *execution.GetTransactionExecutionMetricsAfterResponse + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, *execution.GetTransactionExecutionMetricsAfterRequest, ...grpc.CallOption) (*execution.GetTransactionExecutionMetricsAfterResponse, error)); ok { + return rf(ctx, in, opts...) + } + if rf, ok := ret.Get(0).(func(context.Context, *execution.GetTransactionExecutionMetricsAfterRequest, ...grpc.CallOption) *execution.GetTransactionExecutionMetricsAfterResponse); ok { + r0 = rf(ctx, in, opts...) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*execution.GetTransactionExecutionMetricsAfterResponse) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, *execution.GetTransactionExecutionMetricsAfterRequest, ...grpc.CallOption) error); ok { + r1 = rf(ctx, in, opts...) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + // GetTransactionResult provides a mock function with given fields: ctx, in, opts func (_m *ExecutionAPIClient) GetTransactionResult(ctx context.Context, in *execution.GetTransactionResultRequest, opts ...grpc.CallOption) (*execution.GetTransactionResultResponse, error) { _va := make([]interface{}, len(opts)) @@ -225,6 +397,10 @@ func (_m *ExecutionAPIClient) GetTransactionResult(ctx context.Context, in *exec _ca = append(_ca, _va...) ret := _m.Called(_ca...) + if len(ret) == 0 { + panic("no return value specified for GetTransactionResult") + } + var r0 *execution.GetTransactionResultResponse var r1 error if rf, ok := ret.Get(0).(func(context.Context, *execution.GetTransactionResultRequest, ...grpc.CallOption) (*execution.GetTransactionResultResponse, error)); ok { @@ -258,6 +434,10 @@ func (_m *ExecutionAPIClient) GetTransactionResultByIndex(ctx context.Context, i _ca = append(_ca, _va...) ret := _m.Called(_ca...) + if len(ret) == 0 { + panic("no return value specified for GetTransactionResultByIndex") + } + var r0 *execution.GetTransactionResultResponse var r1 error if rf, ok := ret.Get(0).(func(context.Context, *execution.GetTransactionByIndexRequest, ...grpc.CallOption) (*execution.GetTransactionResultResponse, error)); ok { @@ -291,6 +471,10 @@ func (_m *ExecutionAPIClient) GetTransactionResultsByBlockID(ctx context.Context _ca = append(_ca, _va...) ret := _m.Called(_ca...) + if len(ret) == 0 { + panic("no return value specified for GetTransactionResultsByBlockID") + } + var r0 *execution.GetTransactionResultsResponse var r1 error if rf, ok := ret.Get(0).(func(context.Context, *execution.GetTransactionsByBlockIDRequest, ...grpc.CallOption) (*execution.GetTransactionResultsResponse, error)); ok { @@ -324,6 +508,10 @@ func (_m *ExecutionAPIClient) Ping(ctx context.Context, in *execution.PingReques _ca = append(_ca, _va...) ret := _m.Called(_ca...) + if len(ret) == 0 { + panic("no return value specified for Ping") + } + var r0 *execution.PingResponse var r1 error if rf, ok := ret.Get(0).(func(context.Context, *execution.PingRequest, ...grpc.CallOption) (*execution.PingResponse, error)); ok { @@ -346,13 +534,12 @@ func (_m *ExecutionAPIClient) Ping(ctx context.Context, in *execution.PingReques return r0, r1 } -type mockConstructorTestingTNewExecutionAPIClient interface { +// NewExecutionAPIClient creates a new instance of ExecutionAPIClient. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewExecutionAPIClient(t interface { mock.TestingT Cleanup(func()) -} - -// NewExecutionAPIClient creates a new instance of ExecutionAPIClient. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewExecutionAPIClient(t mockConstructorTestingTNewExecutionAPIClient) *ExecutionAPIClient { +}) *ExecutionAPIClient { mock := &ExecutionAPIClient{} mock.Mock.Test(t) diff --git a/engine/access/mock/execution_api_server.go b/engine/access/mock/execution_api_server.go index 32ff605850a..0f66134f6b1 100644 --- a/engine/access/mock/execution_api_server.go +++ b/engine/access/mock/execution_api_server.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mock @@ -18,6 +18,10 @@ type ExecutionAPIServer struct { func (_m *ExecutionAPIServer) ExecuteScriptAtBlockID(_a0 context.Context, _a1 *execution.ExecuteScriptAtBlockIDRequest) (*execution.ExecuteScriptAtBlockIDResponse, error) { ret := _m.Called(_a0, _a1) + if len(ret) == 0 { + panic("no return value specified for ExecuteScriptAtBlockID") + } + var r0 *execution.ExecuteScriptAtBlockIDResponse var r1 error if rf, ok := ret.Get(0).(func(context.Context, *execution.ExecuteScriptAtBlockIDRequest) (*execution.ExecuteScriptAtBlockIDResponse, error)); ok { @@ -44,6 +48,10 @@ func (_m *ExecutionAPIServer) ExecuteScriptAtBlockID(_a0 context.Context, _a1 *e func (_m *ExecutionAPIServer) GetAccountAtBlockID(_a0 context.Context, _a1 *execution.GetAccountAtBlockIDRequest) (*execution.GetAccountAtBlockIDResponse, error) { ret := _m.Called(_a0, _a1) + if len(ret) == 0 { + panic("no return value specified for GetAccountAtBlockID") + } + var r0 *execution.GetAccountAtBlockIDResponse var r1 error if rf, ok := ret.Get(0).(func(context.Context, *execution.GetAccountAtBlockIDRequest) (*execution.GetAccountAtBlockIDResponse, error)); ok { @@ -70,6 +78,10 @@ func (_m *ExecutionAPIServer) GetAccountAtBlockID(_a0 context.Context, _a1 *exec func (_m *ExecutionAPIServer) GetBlockHeaderByID(_a0 context.Context, _a1 *execution.GetBlockHeaderByIDRequest) (*execution.BlockHeaderResponse, error) { ret := _m.Called(_a0, _a1) + if len(ret) == 0 { + panic("no return value specified for GetBlockHeaderByID") + } + var r0 *execution.BlockHeaderResponse var r1 error if rf, ok := ret.Get(0).(func(context.Context, *execution.GetBlockHeaderByIDRequest) (*execution.BlockHeaderResponse, error)); ok { @@ -96,6 +108,10 @@ func (_m *ExecutionAPIServer) GetBlockHeaderByID(_a0 context.Context, _a1 *execu func (_m *ExecutionAPIServer) GetEventsForBlockIDs(_a0 context.Context, _a1 *execution.GetEventsForBlockIDsRequest) (*execution.GetEventsForBlockIDsResponse, error) { ret := _m.Called(_a0, _a1) + if len(ret) == 0 { + panic("no return value specified for GetEventsForBlockIDs") + } + var r0 *execution.GetEventsForBlockIDsResponse var r1 error if rf, ok := ret.Get(0).(func(context.Context, *execution.GetEventsForBlockIDsRequest) (*execution.GetEventsForBlockIDsResponse, error)); ok { @@ -122,6 +138,10 @@ func (_m *ExecutionAPIServer) GetEventsForBlockIDs(_a0 context.Context, _a1 *exe func (_m *ExecutionAPIServer) GetLatestBlockHeader(_a0 context.Context, _a1 *execution.GetLatestBlockHeaderRequest) (*execution.BlockHeaderResponse, error) { ret := _m.Called(_a0, _a1) + if len(ret) == 0 { + panic("no return value specified for GetLatestBlockHeader") + } + var r0 *execution.BlockHeaderResponse var r1 error if rf, ok := ret.Get(0).(func(context.Context, *execution.GetLatestBlockHeaderRequest) (*execution.BlockHeaderResponse, error)); ok { @@ -148,6 +168,10 @@ func (_m *ExecutionAPIServer) GetLatestBlockHeader(_a0 context.Context, _a1 *exe func (_m *ExecutionAPIServer) GetRegisterAtBlockID(_a0 context.Context, _a1 *execution.GetRegisterAtBlockIDRequest) (*execution.GetRegisterAtBlockIDResponse, error) { ret := _m.Called(_a0, _a1) + if len(ret) == 0 { + panic("no return value specified for GetRegisterAtBlockID") + } + var r0 *execution.GetRegisterAtBlockIDResponse var r1 error if rf, ok := ret.Get(0).(func(context.Context, *execution.GetRegisterAtBlockIDRequest) (*execution.GetRegisterAtBlockIDResponse, error)); ok { @@ -170,10 +194,134 @@ func (_m *ExecutionAPIServer) GetRegisterAtBlockID(_a0 context.Context, _a1 *exe return r0, r1 } +// GetTransactionErrorMessage provides a mock function with given fields: _a0, _a1 +func (_m *ExecutionAPIServer) GetTransactionErrorMessage(_a0 context.Context, _a1 *execution.GetTransactionErrorMessageRequest) (*execution.GetTransactionErrorMessageResponse, error) { + ret := _m.Called(_a0, _a1) + + if len(ret) == 0 { + panic("no return value specified for GetTransactionErrorMessage") + } + + var r0 *execution.GetTransactionErrorMessageResponse + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, *execution.GetTransactionErrorMessageRequest) (*execution.GetTransactionErrorMessageResponse, error)); ok { + return rf(_a0, _a1) + } + if rf, ok := ret.Get(0).(func(context.Context, *execution.GetTransactionErrorMessageRequest) *execution.GetTransactionErrorMessageResponse); ok { + r0 = rf(_a0, _a1) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*execution.GetTransactionErrorMessageResponse) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, *execution.GetTransactionErrorMessageRequest) error); ok { + r1 = rf(_a0, _a1) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetTransactionErrorMessageByIndex provides a mock function with given fields: _a0, _a1 +func (_m *ExecutionAPIServer) GetTransactionErrorMessageByIndex(_a0 context.Context, _a1 *execution.GetTransactionErrorMessageByIndexRequest) (*execution.GetTransactionErrorMessageResponse, error) { + ret := _m.Called(_a0, _a1) + + if len(ret) == 0 { + panic("no return value specified for GetTransactionErrorMessageByIndex") + } + + var r0 *execution.GetTransactionErrorMessageResponse + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, *execution.GetTransactionErrorMessageByIndexRequest) (*execution.GetTransactionErrorMessageResponse, error)); ok { + return rf(_a0, _a1) + } + if rf, ok := ret.Get(0).(func(context.Context, *execution.GetTransactionErrorMessageByIndexRequest) *execution.GetTransactionErrorMessageResponse); ok { + r0 = rf(_a0, _a1) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*execution.GetTransactionErrorMessageResponse) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, *execution.GetTransactionErrorMessageByIndexRequest) error); ok { + r1 = rf(_a0, _a1) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetTransactionErrorMessagesByBlockID provides a mock function with given fields: _a0, _a1 +func (_m *ExecutionAPIServer) GetTransactionErrorMessagesByBlockID(_a0 context.Context, _a1 *execution.GetTransactionErrorMessagesByBlockIDRequest) (*execution.GetTransactionErrorMessagesResponse, error) { + ret := _m.Called(_a0, _a1) + + if len(ret) == 0 { + panic("no return value specified for GetTransactionErrorMessagesByBlockID") + } + + var r0 *execution.GetTransactionErrorMessagesResponse + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, *execution.GetTransactionErrorMessagesByBlockIDRequest) (*execution.GetTransactionErrorMessagesResponse, error)); ok { + return rf(_a0, _a1) + } + if rf, ok := ret.Get(0).(func(context.Context, *execution.GetTransactionErrorMessagesByBlockIDRequest) *execution.GetTransactionErrorMessagesResponse); ok { + r0 = rf(_a0, _a1) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*execution.GetTransactionErrorMessagesResponse) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, *execution.GetTransactionErrorMessagesByBlockIDRequest) error); ok { + r1 = rf(_a0, _a1) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetTransactionExecutionMetricsAfter provides a mock function with given fields: _a0, _a1 +func (_m *ExecutionAPIServer) GetTransactionExecutionMetricsAfter(_a0 context.Context, _a1 *execution.GetTransactionExecutionMetricsAfterRequest) (*execution.GetTransactionExecutionMetricsAfterResponse, error) { + ret := _m.Called(_a0, _a1) + + if len(ret) == 0 { + panic("no return value specified for GetTransactionExecutionMetricsAfter") + } + + var r0 *execution.GetTransactionExecutionMetricsAfterResponse + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, *execution.GetTransactionExecutionMetricsAfterRequest) (*execution.GetTransactionExecutionMetricsAfterResponse, error)); ok { + return rf(_a0, _a1) + } + if rf, ok := ret.Get(0).(func(context.Context, *execution.GetTransactionExecutionMetricsAfterRequest) *execution.GetTransactionExecutionMetricsAfterResponse); ok { + r0 = rf(_a0, _a1) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*execution.GetTransactionExecutionMetricsAfterResponse) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, *execution.GetTransactionExecutionMetricsAfterRequest) error); ok { + r1 = rf(_a0, _a1) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + // GetTransactionResult provides a mock function with given fields: _a0, _a1 func (_m *ExecutionAPIServer) GetTransactionResult(_a0 context.Context, _a1 *execution.GetTransactionResultRequest) (*execution.GetTransactionResultResponse, error) { ret := _m.Called(_a0, _a1) + if len(ret) == 0 { + panic("no return value specified for GetTransactionResult") + } + var r0 *execution.GetTransactionResultResponse var r1 error if rf, ok := ret.Get(0).(func(context.Context, *execution.GetTransactionResultRequest) (*execution.GetTransactionResultResponse, error)); ok { @@ -200,6 +348,10 @@ func (_m *ExecutionAPIServer) GetTransactionResult(_a0 context.Context, _a1 *exe func (_m *ExecutionAPIServer) GetTransactionResultByIndex(_a0 context.Context, _a1 *execution.GetTransactionByIndexRequest) (*execution.GetTransactionResultResponse, error) { ret := _m.Called(_a0, _a1) + if len(ret) == 0 { + panic("no return value specified for GetTransactionResultByIndex") + } + var r0 *execution.GetTransactionResultResponse var r1 error if rf, ok := ret.Get(0).(func(context.Context, *execution.GetTransactionByIndexRequest) (*execution.GetTransactionResultResponse, error)); ok { @@ -226,6 +378,10 @@ func (_m *ExecutionAPIServer) GetTransactionResultByIndex(_a0 context.Context, _ func (_m *ExecutionAPIServer) GetTransactionResultsByBlockID(_a0 context.Context, _a1 *execution.GetTransactionsByBlockIDRequest) (*execution.GetTransactionResultsResponse, error) { ret := _m.Called(_a0, _a1) + if len(ret) == 0 { + panic("no return value specified for GetTransactionResultsByBlockID") + } + var r0 *execution.GetTransactionResultsResponse var r1 error if rf, ok := ret.Get(0).(func(context.Context, *execution.GetTransactionsByBlockIDRequest) (*execution.GetTransactionResultsResponse, error)); ok { @@ -252,6 +408,10 @@ func (_m *ExecutionAPIServer) GetTransactionResultsByBlockID(_a0 context.Context func (_m *ExecutionAPIServer) Ping(_a0 context.Context, _a1 *execution.PingRequest) (*execution.PingResponse, error) { ret := _m.Called(_a0, _a1) + if len(ret) == 0 { + panic("no return value specified for Ping") + } + var r0 *execution.PingResponse var r1 error if rf, ok := ret.Get(0).(func(context.Context, *execution.PingRequest) (*execution.PingResponse, error)); ok { @@ -274,13 +434,12 @@ func (_m *ExecutionAPIServer) Ping(_a0 context.Context, _a1 *execution.PingReque return r0, r1 } -type mockConstructorTestingTNewExecutionAPIServer interface { +// NewExecutionAPIServer creates a new instance of ExecutionAPIServer. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewExecutionAPIServer(t interface { mock.TestingT Cleanup(func()) -} - -// NewExecutionAPIServer creates a new instance of ExecutionAPIServer. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewExecutionAPIServer(t mockConstructorTestingTNewExecutionAPIServer) *ExecutionAPIServer { +}) *ExecutionAPIServer { mock := &ExecutionAPIServer{} mock.Mock.Test(t) diff --git a/engine/access/ping/engine.go b/engine/access/ping/engine.go index e85128fccdb..d1ecb567030 100644 --- a/engine/access/ping/engine.go +++ b/engine/access/ping/engine.go @@ -2,32 +2,43 @@ package ping import ( "context" - "encoding/binary" "time" "github.com/rs/zerolog" + "golang.org/x/sync/errgroup" - "github.com/onflow/flow-go/engine" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/model/flow/filter" "github.com/onflow/flow-go/module" + "github.com/onflow/flow-go/module/component" + "github.com/onflow/flow-go/module/irrecoverable" "github.com/onflow/flow-go/network" "github.com/onflow/flow-go/network/p2p" ) -// PingTimeout is maximum time to wait for a ping reply from a remote node -const PingTimeout = time.Second * 4 -const PingInterval = time.Minute +const ( + // PingTimeout is maximum time to wait for a ping reply from a remote node + PingTimeout = time.Second * 4 + + // PingInterval is the interval between pings to remote nodes + PingInterval = time.Minute + + // MaxConcurrentPings is the maximum number of ping requests that can be sent concurrently + MaxConcurrentPings = 100 + + // MaxJitter is the maximum time to pause between nodes during ping + MaxJitter = 5 * time.Second +) type Engine struct { - unit *engine.Unit + component.Component + log zerolog.Logger idProvider module.IdentityProvider idTranslator p2p.IDTranslator me module.Local metrics module.PingMetrics - pingEnabled bool pingService network.PingService nodeInfo map[flow.Identifier]string // additional details about a node such as operator name } @@ -38,97 +49,118 @@ func New( idTranslator p2p.IDTranslator, me module.Local, metrics module.PingMetrics, - pingEnabled bool, nodeInfoFile string, pingService network.PingService, ) (*Engine, error) { eng := &Engine{ - unit: engine.NewUnit(), log: log.With().Str("engine", "ping").Logger(), idProvider: idProvider, idTranslator: idTranslator, me: me, metrics: metrics, - pingEnabled: pingEnabled, pingService: pingService, } + eng.nodeInfo = eng.loadNodeInfo(nodeInfoFile) - // if a node info file is provided, it is read and the additional node information is reported as part of the ping metric - if nodeInfoFile != "" { - nodeInfo, err := readExtraNodeInfoJSON(nodeInfoFile) - if err != nil { - log.Error().Err(err).Str("node_info_file", nodeInfoFile).Msg("failed to read node info file") - } else { - eng.nodeInfo = nodeInfo - log.Debug().Str("node_info_file", nodeInfoFile).Msg("using node info file") - } - } else { + eng.Component = component.NewComponentManagerBuilder(). + AddWorker(eng.pingLoop). + Build() + + return eng, nil +} + +func (e *Engine) loadNodeInfo(nodeInfoFile string) map[flow.Identifier]string { + if nodeInfoFile == "" { // initialize nodeInfo with an empty map - eng.nodeInfo = make(map[flow.Identifier]string) // the node info file is not mandatory and should not stop the Ping engine from running - log.Trace().Msg("no node info file specified") + e.log.Trace().Msg("no node info file specified") + return make(map[flow.Identifier]string) } - return eng, nil + nodeInfo, err := readExtraNodeInfoJSON(nodeInfoFile) + if err != nil { + e.log.Error().Err(err). + Str("node_info_file", nodeInfoFile). + Msg("failed to read node info file") + return make(map[flow.Identifier]string) + } + + e.log.Debug(). + Str("node_info_file", nodeInfoFile). + Msg("using node info file") + return nodeInfo } -// Ready returns a ready channel that is closed once the engine has fully -// started. For the ingestion engine, we consider the engine up and running -// upon initialization. -func (e *Engine) Ready() <-chan struct{} { - // only launch when ping is enabled - if e.pingEnabled { - e.unit.Launch(e.startPing) +func (e *Engine) pingLoop(ctx irrecoverable.SignalerContext, ready component.ReadyFunc) { + ticker := time.NewTicker(PingInterval) + defer ticker.Stop() + + for { + select { + case <-ctx.Done(): + return + case <-ticker.C: + e.pingAllNodes(ctx) + } } - e.log.Info().Bool("ping enabled", e.pingEnabled).Msg("ping enabled") - return e.unit.Ready() } -// Done returns a done channel that is closed once the engine has fully stopped. -// For the ingestion engine, it only waits for all submit goroutines to end. -func (e *Engine) Done() <-chan struct{} { - return e.unit.Done() -} +func (e *Engine) pingAllNodes(ctx context.Context) { + start := time.Now() + e.log.Debug().Msg("pinging all nodes") -func (e *Engine) startPing() { + g := new(errgroup.Group) - e.unit.LaunchPeriodically(func() { - peers := e.idProvider.Identities(filter.Not(filter.HasNodeID(e.me.NodeID()))) + // restrict the number of concurrently running ping requests. + g.SetLimit(MaxConcurrentPings) - // for each peer, send a ping every ping interval - for _, peer := range peers { - peer := peer - pid := peer.ID() - delay := time.Duration(binary.BigEndian.Uint16(pid[:2])) % (PingInterval / time.Millisecond) - e.unit.LaunchAfter(delay, func() { - e.pingNode(peer) - }) - } - }, PingInterval, 0) + peers := e.idProvider.Identities(filter.Not(filter.HasNodeID[flow.Identity](e.me.NodeID()))) + for i, peer := range peers { + peer := peer + delay := makeJitter(i) + + g.Go(func() error { + select { + case <-ctx.Done(): + return nil + case <-time.After(delay): + } + + e.pingNode(ctx, peer) + return nil + }) + } + + _ = g.Wait() + + e.log.Debug(). + Dur("duration", time.Since(start)). + Int("node_count", len(peers)). + Msg("finished pinging all nodes") } // pingNode pings the given peer and updates the metrics with the result and the additional node information -func (e *Engine) pingNode(peer *flow.Identity) { - pid, err := e.idTranslator.GetPeerID(peer.ID()) +func (e *Engine) pingNode(ctx context.Context, peer *flow.Identity) { + pid, err := e.idTranslator.GetPeerID(peer.NodeID) if err != nil { e.log.Error().Err(err).Str("peer", peer.String()).Msg("failed to get peer ID") return } - ctx, cancel := context.WithTimeout(context.Background(), PingTimeout) + ctx, cancel := context.WithTimeout(ctx, PingTimeout) defer cancel() // ping the node - resp, rtt, pingErr := e.pingService.Ping(ctx, pid) // ping will timeout in libp2p.PingTimeout seconds + resp, rtt, pingErr := e.pingService.Ping(ctx, pid) // ping will timeout in PingTimeout seconds if pingErr != nil { - e.log.Debug().Err(pingErr).Str("target", peer.ID().String()).Msg("failed to ping") + e.log.Debug().Err(pingErr).Str("target", peer.NodeID.String()).Msg("failed to ping") // report the rtt duration as negative to make it easier to distinguish between pingable and non-pingable nodes rtt = -1 } // get the additional info about the node - info := e.nodeInfo[peer.ID()] + info := e.nodeInfo[peer.NodeID] // update metric e.metrics.NodeReachable(peer, info, rtt) @@ -138,3 +170,9 @@ func (e *Engine) pingNode(peer *flow.Identity) { e.metrics.NodeInfo(peer, info, resp.Version, resp.BlockHeight, resp.HotstuffView) } } + +// makeJitter returns a jitter between 0 and MaxJitter +func makeJitter(offset int) time.Duration { + jitter := float64(MaxJitter) * float64(offset%MaxConcurrentPings) / float64(MaxConcurrentPings) + return time.Duration(jitter) +} diff --git a/engine/access/relay/engine.go b/engine/access/relay/engine.go deleted file mode 100644 index d277b102c0f..00000000000 --- a/engine/access/relay/engine.go +++ /dev/null @@ -1,113 +0,0 @@ -package relay - -import ( - "fmt" - - "github.com/rs/zerolog" - - "github.com/onflow/flow-go/engine" - "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/network" - "github.com/onflow/flow-go/network/channels" -) - -// Relay engine relays all the messages that are received to the given network for the corresponding channel -type Engine struct { - unit *engine.Unit // used to manage concurrency & shutdown - log zerolog.Logger // used to log relevant actions with context - conduits map[channels.Channel]network.Conduit // conduits for unstaked network -} - -func New( - log zerolog.Logger, - channelList channels.ChannelList, - net network.Network, - unstakedNet network.Network, -) (*Engine, error) { - e := &Engine{ - unit: engine.NewUnit(), - log: log.With().Str("engine", "relay").Logger(), - conduits: make(map[channels.Channel]network.Conduit), - } - - for _, channel := range channelList { - _, err := net.Register(channel, e) - if err != nil { - return nil, fmt.Errorf("could not register relay engine on channel: %w", err) - } - - conduit, err := unstakedNet.Register(channel, e) - if err != nil { - return nil, fmt.Errorf("could not register relay engine on unstaked network channel: %w", err) - } - e.conduits[channel] = conduit - } - - return e, nil -} - -// Ready returns a ready channel that is closed once the engine has fully -// started. -func (e *Engine) Ready() <-chan struct{} { - return e.unit.Ready() -} - -// Done returns a done channel that is closed once the engine has fully stopped. -func (e *Engine) Done() <-chan struct{} { - return e.unit.Done() -} - -// SubmitLocal submits an event originating on the local node. -func (e *Engine) SubmitLocal(event interface{}) { - e.unit.Launch(func() { - err := e.ProcessLocal(event) - if err != nil { - engine.LogError(e.log, err) - } - }) -} - -// ProcessLocal processes an event originating on the local node. -func (e *Engine) ProcessLocal(event interface{}) error { - return e.unit.Do(func() error { - return fmt.Errorf("relay engine does not process local events") - }) -} - -// Submit submits the given event from the node with the given origin ID -// for processing in a non-blocking manner. It returns instantly and logs -// a potential processing error internally when done. -func (e *Engine) Submit(channel channels.Channel, originID flow.Identifier, event interface{}) { - e.unit.Launch(func() { - err := e.Process(channel, originID, event) - if err != nil { - engine.LogError(e.log, err) - } - }) -} - -// Process processes the given event from the node with the given origin ID -// in a blocking manner. It returns the potential processing error when -// done. -func (e *Engine) Process(channel channels.Channel, originID flow.Identifier, event interface{}) error { - return e.unit.Do(func() error { - return e.process(channel, originID, event) - }) -} - -func (e *Engine) process(channel channels.Channel, originID flow.Identifier, event interface{}) error { - conduit, ok := e.conduits[channel] - - if !ok { - return fmt.Errorf("received message on unknown channel %s", channel) - } - - e.log.Trace().Interface("event", event).Str("channel", channel.String()).Str("originID", originID.String()).Msg("relaying message") - - // We use a dummy target ID here so that events are broadcast to the entire network - if err := conduit.Publish(event, flow.ZeroID); err != nil { - return fmt.Errorf("could not relay message: %w", err) - } - - return nil -} diff --git a/engine/access/relay/engine_test.go b/engine/access/relay/engine_test.go deleted file mode 100644 index 3afe442c093..00000000000 --- a/engine/access/relay/engine_test.go +++ /dev/null @@ -1,77 +0,0 @@ -package relay - -import ( - "testing" - - "github.com/rs/zerolog" - "github.com/stretchr/testify/mock" - "github.com/stretchr/testify/suite" - - "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/network/channels" - "github.com/onflow/flow-go/network/mocknetwork" - "github.com/onflow/flow-go/utils/unittest" -) - -type Suite struct { - suite.Suite - - engine *Engine - channels channels.ChannelList - conduits map[channels.Channel]*mocknetwork.Conduit -} - -func (suite *Suite) SetupTest() { - suite.channels = channels.ChannelList{ - channels.Channel("test-channel-1"), - } - net := new(mocknetwork.Network) - unstakedNet := new(mocknetwork.Network) - suite.conduits = make(map[channels.Channel]*mocknetwork.Conduit) - - for _, channel := range suite.channels { - con := new(mocknetwork.Conduit) - suite.conduits[channel] = con - net.On("Register", channel, mock.Anything).Return(new(mocknetwork.Conduit), nil).Once() - unstakedNet.On("Register", channel, mock.Anything).Return(con, nil).Once() - } - - eng, err := New( - zerolog.Logger{}, - suite.channels, - net, - unstakedNet, - ) - suite.Require().Nil(err) - - suite.engine = eng -} - -func TestRelayEngine(t *testing.T) { - suite.Run(t, new(Suite)) -} - -func getEvent() interface{} { - return struct { - foo string - }{ - foo: "bar", - } -} - -// TestHappyPath tests that the relay engine relays events for each -// channel that it was created with -func (suite *Suite) TestHappyPath() { - for channel, conduit := range suite.conduits { - id := unittest.IdentifierFixture() - event := getEvent() - - conduit.On("Publish", event, flow.ZeroID).Return(nil).Once() - - err := suite.engine.Process(channel, id, event) - suite.Assert().Nil(err) - - conduit.AssertNumberOfCalls(suite.T(), "Publish", 1) - conduit.AssertExpectations(suite.T()) - } -} diff --git a/engine/access/relay/example_test.go b/engine/access/relay/example_test.go deleted file mode 100644 index 6574dce4567..00000000000 --- a/engine/access/relay/example_test.go +++ /dev/null @@ -1,79 +0,0 @@ -package relay_test - -import ( - "fmt" - "math/rand" - - "github.com/rs/zerolog" - - "github.com/onflow/flow-go/engine/access/relay" - splitterNetwork "github.com/onflow/flow-go/engine/common/splitter/network" - "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/network/channels" - testnet "github.com/onflow/flow-go/utils/unittest/network" -) - -func Example() { - // create a mock network - net := testnet.NewNetwork() - - // create splitter network - logger := zerolog.Nop() - splitterNet := splitterNetwork.NewNetwork(net, logger) - - // generate a random origin ID - var id flow.Identifier - rand.Seed(0) - rand.Read(id[:]) - - // create engines - engineProcessFunc := func(engineName string) testnet.EngineProcessFunc { - return func(channel channels.Channel, originID flow.Identifier, event interface{}) error { - fmt.Printf("Engine %v received message: channel=%v, originID=%v, event=%v\n", engineName, channel, originID, event) - return nil - } - } - fooEngine := testnet.NewEngine().OnProcess(engineProcessFunc("Foo")) - barEngine := testnet.NewEngine().OnProcess(engineProcessFunc("Bar")) - - // register engines on the splitter network - fooChannel := channels.Channel("foo-channel") - barChannel := channels.Channel("bar-channel") - _, err := splitterNet.Register(fooChannel, fooEngine) - if err != nil { - fmt.Println(err) - } - _, err = splitterNet.Register(barChannel, barEngine) - if err != nil { - fmt.Println(err) - } - - // create another network that messages will be relayed to - relayNet := testnet.NewNetwork().OnPublish(func(channel channels.Channel, event interface{}, targetIDs ...flow.Identifier) error { - fmt.Printf("Message published to relay network: channel=%v, event=%v, targetIDs=%v\n", channel, event, targetIDs) - return nil - }) - - // create relay engine - channels := channels.ChannelList{fooChannel, barChannel} - _, err = relay.New(logger, channels, splitterNet, relayNet) - if err != nil { - fmt.Println(err) - } - - // send messages to network - err = net.Send(fooChannel, id, "foo") - if err != nil { - fmt.Println(err) - } - err = net.Send(barChannel, id, "bar") - if err != nil { - fmt.Println(err) - } - - // Unordered output: - // Message published to relay network: channel=foo-channel, event=foo, targetIDs=[0000000000000000000000000000000000000000000000000000000000000000] - // Engine Foo received message: channel=foo-channel, originID=0194fdc2fa2ffcc041d3ff12045b73c86e4ff95ff662a5eee82abdf44a2d0b75, event=foo - // Message published to relay network: channel=bar-channel, event=bar, targetIDs=[0000000000000000000000000000000000000000000000000000000000000000] - // Engine Bar received message: channel=bar-channel, originID=0194fdc2fa2ffcc041d3ff12045b73c86e4ff95ff662a5eee82abdf44a2d0b75, event=bar -} diff --git a/engine/access/rest/README.md b/engine/access/rest/README.md index fd7b970493d..746f9ec2415 100644 --- a/engine/access/rest/README.md +++ b/engine/access/rest/README.md @@ -5,17 +5,24 @@ the [Flow OpenAPI definition](https://github.com/onflow/flow/blob/master/openapi available on our [docs site](https://docs.onflow.org/http-api/). ## Packages - -- `rest`: The HTTP handlers for all the request, server generator and the select filter. -- `middleware`: The common [middlewares](https://github.com/gorilla/mux#middleware) that all request pass through. -- `models`: The generated models using openapi generators and implementation of model builders. -- `request`: Implementation of API requests that provide validation for input data and build request models. +- `rest`: The HTTP handlers for the server generator and the select filter, implementation of handling local requests. +- `common`: Includes shared components for REST requests. + - `middleware`: The common [middlewares](https://github.com/gorilla/mux#middleware) that all request pass through. + - `models`: The common generated models using openapi generators. +- `http`: Implements core HTTP handling functionality for access node. + - `models`: The generated models using openapi generators and implementation of model builders. + - `request`: Implementation of API requests that provide validation for input data and build request models. + - `routes`: The HTTP handlers for all http requests, tests for each request. +- `router`: Implementation of building HTTP routers with common middleware and routes. +- `apiproxy`: Implementation of proxy backend handler which includes the local backend and forwards the methods which +can't be handled locally to an upstream using gRPC API. This is used by observers that don't have all data in their +local db. ## Request lifecycle 1. Every incoming request passes through a common set of middlewares - logging middleware, query expandable and query select middleware defined in the middleware package. -2. Each request is then wrapped by our handler (`rest/handler.go`) and request input data is used to build the request +2. Each request is then wrapped by our handler (`rest/http/handler.go`) and request input data is used to build the request models defined in request package. 3. The request is then sent to the corresponding API handler based on the configuration in the router. 4. Each handler implements actions to perform the request (database lookups etc) and after the response is built using @@ -37,7 +44,7 @@ make generate-openapi ### Adding New API Endpoints -A new endpoint can be added by first implementing a new request handler, a request handle is a function in the rest +A new endpoint can be added by first implementing a new request handler, a request handle is a function in the routes package that complies with function interfaced defined as: ```go @@ -48,6 +55,7 @@ generator models.LinkGenerator, ) (interface{}, error) ``` -That handler implementation needs to be added to the `router.go` with corresponding API endpoint and method. Adding a -new API endpoint also requires for a new request builder to be implemented and added in request package. Make sure to -not forget about adding tests for each of the API handler. +That handler implementation needs to be added to the `router.go` with corresponding API endpoint and method. If the data +is not available on observers, an override the method is needed in the backend handler `RestProxyHandler` for request +forwarding. Adding a new API endpoint also requires for a new request builder to be implemented and added in request +package. Make sure to not forget about adding tests for each of the API handler. diff --git a/engine/access/rest/accounts.go b/engine/access/rest/accounts.go deleted file mode 100644 index 36371bf6c57..00000000000 --- a/engine/access/rest/accounts.go +++ /dev/null @@ -1,33 +0,0 @@ -package rest - -import ( - "github.com/onflow/flow-go/access" - "github.com/onflow/flow-go/engine/access/rest/models" - "github.com/onflow/flow-go/engine/access/rest/request" -) - -// GetAccount handler retrieves account by address and returns the response -func GetAccount(r *request.Request, backend access.API, link models.LinkGenerator) (interface{}, error) { - req, err := r.GetAccountRequest() - if err != nil { - return nil, NewBadRequestError(err) - } - - // in case we receive special height values 'final' and 'sealed', fetch that height and overwrite request with it - if req.Height == request.FinalHeight || req.Height == request.SealedHeight { - header, _, err := backend.GetLatestBlockHeader(r.Context(), req.Height == request.SealedHeight) - if err != nil { - return nil, err - } - req.Height = header.Height - } - - account, err := backend.GetAccountAtBlockHeight(r.Context(), req.Address, req.Height) - if err != nil { - return nil, err - } - - var response models.Account - err = response.Build(account, link, r.ExpandFields) - return response, err -} diff --git a/engine/access/rest/accounts_test.go b/engine/access/rest/accounts_test.go deleted file mode 100644 index 61982ff5f9c..00000000000 --- a/engine/access/rest/accounts_test.go +++ /dev/null @@ -1,176 +0,0 @@ -package rest - -import ( - "fmt" - "net/http" - "net/url" - "strings" - "testing" - - "github.com/stretchr/testify/assert" - mocktestify "github.com/stretchr/testify/mock" - "github.com/stretchr/testify/require" - - "github.com/onflow/flow-go/access/mock" - "github.com/onflow/flow-go/engine/access/rest/middleware" - "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/utils/unittest" -) - -const expandableFieldKeys = "keys" -const expandableFieldContracts = "contracts" - -func accountURL(t *testing.T, address string, height string) string { - u, err := url.ParseRequestURI(fmt.Sprintf("/v1/accounts/%s", address)) - require.NoError(t, err) - q := u.Query() - - if height != "" { - q.Add("block_height", height) - } - - u.RawQuery = q.Encode() - return u.String() -} - -func TestGetAccount(t *testing.T) { - backend := &mock.API{} - - t.Run("get by address at latest sealed block", func(t *testing.T) { - account := accountFixture(t) - var height uint64 = 100 - block := unittest.BlockHeaderFixture(unittest.WithHeaderHeight(height)) - - req := getAccountRequest(t, account, sealedHeightQueryParam, expandableFieldKeys, expandableFieldContracts) - - backend.Mock. - On("GetLatestBlockHeader", mocktestify.Anything, true). - Return(block, flow.BlockStatusSealed, nil) - - backend.Mock. - On("GetAccountAtBlockHeight", mocktestify.Anything, account.Address, height). - Return(account, nil) - - expected := expectedExpandedResponse(account) - - assertOKResponse(t, req, expected, backend) - mocktestify.AssertExpectationsForObjects(t, backend) - }) - - t.Run("get by address at latest finalized block", func(t *testing.T) { - - var height uint64 = 100 - block := unittest.BlockHeaderFixture(unittest.WithHeaderHeight(height)) - account := accountFixture(t) - - req := getAccountRequest(t, account, finalHeightQueryParam, expandableFieldKeys, expandableFieldContracts) - backend.Mock. - On("GetLatestBlockHeader", mocktestify.Anything, false). - Return(block, flow.BlockStatusFinalized, nil) - backend.Mock. - On("GetAccountAtBlockHeight", mocktestify.Anything, account.Address, height). - Return(account, nil) - - expected := expectedExpandedResponse(account) - - assertOKResponse(t, req, expected, backend) - mocktestify.AssertExpectationsForObjects(t, backend) - }) - - t.Run("get by address at height", func(t *testing.T) { - var height uint64 = 1337 - account := accountFixture(t) - req := getAccountRequest(t, account, fmt.Sprintf("%d", height), expandableFieldKeys, expandableFieldContracts) - - backend.Mock. - On("GetAccountAtBlockHeight", mocktestify.Anything, account.Address, height). - Return(account, nil) - - expected := expectedExpandedResponse(account) - - assertOKResponse(t, req, expected, backend) - mocktestify.AssertExpectationsForObjects(t, backend) - }) - - t.Run("get by address at height condensed", func(t *testing.T) { - var height uint64 = 1337 - account := accountFixture(t) - req := getAccountRequest(t, account, fmt.Sprintf("%d", height)) - - backend.Mock. - On("GetAccountAtBlockHeight", mocktestify.Anything, account.Address, height). - Return(account, nil) - - expected := expectedCondensedResponse(account) - - assertOKResponse(t, req, expected, backend) - mocktestify.AssertExpectationsForObjects(t, backend) - }) - - t.Run("get invalid", func(t *testing.T) { - tests := []struct { - url string - out string - }{ - {accountURL(t, "123", ""), `{"code":400, "message":"invalid address"}`}, - {accountURL(t, unittest.AddressFixture().String(), "foo"), `{"code":400, "message":"invalid height format"}`}, - } - - for i, test := range tests { - req, _ := http.NewRequest("GET", test.url, nil) - rr, err := executeRequest(req, backend) - assert.NoError(t, err) - - assert.Equal(t, http.StatusBadRequest, rr.Code) - assert.JSONEq(t, test.out, rr.Body.String(), fmt.Sprintf("test #%d failed: %v", i, test)) - } - }) -} - -func expectedExpandedResponse(account *flow.Account) string { - return fmt.Sprintf(`{ - "address":"%s", - "balance":"100", - "keys":[ - { - "index":"0", - "public_key":"%s", - "signing_algorithm":"ECDSA_P256", - "hashing_algorithm":"SHA3_256", - "sequence_number":"0", - "weight":"1000", - "revoked":false - } - ], - "_links":{"_self":"/v1/accounts/%s" }, - "_expandable": {}, - "contracts": {"contract1":"Y29udHJhY3Qx", "contract2":"Y29udHJhY3Qy"} - }`, account.Address, account.Keys[0].PublicKey.String(), account.Address) -} - -func expectedCondensedResponse(account *flow.Account) string { - return fmt.Sprintf(`{ - "address":"%s", - "balance":"100", - "_links":{"_self":"/v1/accounts/%s" }, - "_expandable":{"contracts":"contracts", "keys":"keys"} - }`, account.Address, account.Address) -} - -func getAccountRequest(t *testing.T, account *flow.Account, height string, expandFields ...string) *http.Request { - req, err := http.NewRequest("GET", accountURL(t, account.Address.String(), height), nil) - if len(expandFields) > 0 { - fieldParam := strings.Join(expandFields, ",") - q := req.URL.Query() - q.Add(middleware.ExpandQueryParam, fieldParam) - req.URL.RawQuery = q.Encode() - } - require.NoError(t, err) - return req -} - -func accountFixture(t *testing.T) *flow.Account { - account, err := unittest.AccountFixture() - require.NoError(t, err) - return account -} diff --git a/engine/access/rest/apiproxy/rest_proxy_handler.go b/engine/access/rest/apiproxy/rest_proxy_handler.go new file mode 100644 index 00000000000..3b29778e0d0 --- /dev/null +++ b/engine/access/rest/apiproxy/rest_proxy_handler.go @@ -0,0 +1,455 @@ +package apiproxy + +import ( + "context" + "fmt" + + "github.com/rs/zerolog" + "google.golang.org/grpc/status" + + accessproto "github.com/onflow/flow/protobuf/go/flow/access" + "github.com/onflow/flow/protobuf/go/flow/entities" + + "github.com/onflow/flow-go/access" + "github.com/onflow/flow-go/engine/access/rpc/connection" + "github.com/onflow/flow-go/engine/common/grpc/forwarder" + "github.com/onflow/flow-go/engine/common/rpc/convert" + accessmodel "github.com/onflow/flow-go/model/access" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module/metrics" +) + +// RestProxyHandler is a structure that represents the proxy algorithm for observer node. +// It includes the local backend and forwards the methods which can't be handled locally to an upstream using gRPC API. +type RestProxyHandler struct { + access.API + *forwarder.Forwarder + Logger zerolog.Logger + Metrics metrics.ObserverMetrics + Chain flow.Chain +} + +// NewRestProxyHandler returns a new rest proxy handler for observer node. +func NewRestProxyHandler( + api access.API, + identities flow.IdentitySkeletonList, + connectionFactory connection.ConnectionFactory, + log zerolog.Logger, + metrics metrics.ObserverMetrics, + chain flow.Chain, +) (*RestProxyHandler, error) { + forwarder, err := forwarder.NewForwarder( + identities, + connectionFactory, + ) + if err != nil { + return nil, fmt.Errorf("could not create REST forwarder: %w", err) + } + + restProxyHandler := &RestProxyHandler{ + Logger: log, + Metrics: metrics, + Chain: chain, + } + + restProxyHandler.API = api + restProxyHandler.Forwarder = forwarder + + return restProxyHandler, nil +} + +func (r *RestProxyHandler) log(handler, rpc string, err error) { + code := status.Code(err) + r.Metrics.RecordRPC(handler, rpc, code) + + logger := r.Logger.With(). + Str("handler", handler). + Str("rest_method", rpc). + Str("rest_code", code.String()). + Logger() + + if err != nil { + logger.Error().Err(err).Msg("request failed") + return + } + + logger.Info().Msg("request succeeded") +} + +// GetCollectionByID returns a collection by ID. +func (r *RestProxyHandler) GetCollectionByID(ctx context.Context, id flow.Identifier) (*flow.LightCollection, error) { + upstream, closer, err := r.FaultTolerantClient() + if err != nil { + return nil, err + } + defer closer.Close() + + getCollectionByIDRequest := &accessproto.GetCollectionByIDRequest{ + Id: id[:], + } + + collectionResponse, err := upstream.GetCollectionByID(ctx, getCollectionByIDRequest) + r.log("upstream", "GetCollectionByID", err) + + if err != nil { + return nil, err + } + + transactions, err := convert.MessageToLightCollection(collectionResponse.Collection) + if err != nil { + return nil, err + } + + return transactions, nil +} + +// SendTransaction sends already created transaction. +func (r *RestProxyHandler) SendTransaction(ctx context.Context, tx *flow.TransactionBody) error { + upstream, closer, err := r.FaultTolerantClient() + if err != nil { + return err + } + defer closer.Close() + + transaction := convert.TransactionToMessage(*tx) + sendTransactionRequest := &accessproto.SendTransactionRequest{ + Transaction: transaction, + } + + _, err = upstream.SendTransaction(ctx, sendTransactionRequest) + r.log("upstream", "SendTransaction", err) + + return err +} + +// GetTransaction returns transaction by ID. +func (r *RestProxyHandler) GetTransaction(ctx context.Context, id flow.Identifier) (*flow.TransactionBody, error) { + upstream, closer, err := r.FaultTolerantClient() + if err != nil { + return nil, err + } + defer closer.Close() + + getTransactionRequest := &accessproto.GetTransactionRequest{ + Id: id[:], + } + transactionResponse, err := upstream.GetTransaction(ctx, getTransactionRequest) + r.log("upstream", "GetTransaction", err) + + if err != nil { + return nil, err + } + + transactionBody, err := convert.MessageToTransaction(transactionResponse.Transaction, r.Chain) + if err != nil { + return nil, err + } + + return &transactionBody, nil +} + +// GetTransactionResult returns transaction result by the transaction ID. +func (r *RestProxyHandler) GetTransactionResult( + ctx context.Context, + id flow.Identifier, + blockID flow.Identifier, + collectionID flow.Identifier, + requiredEventEncodingVersion entities.EventEncodingVersion, +) (*accessmodel.TransactionResult, error) { + upstream, closer, err := r.FaultTolerantClient() + if err != nil { + + return nil, err + } + defer closer.Close() + + getTransactionResultRequest := &accessproto.GetTransactionRequest{ + Id: id[:], + BlockId: blockID[:], + CollectionId: collectionID[:], + EventEncodingVersion: requiredEventEncodingVersion, + } + + transactionResultResponse, err := upstream.GetTransactionResult(ctx, getTransactionResultRequest) + r.log("upstream", "GetTransactionResult", err) + + if err != nil { + return nil, err + } + + return convert.MessageToTransactionResult(transactionResultResponse) +} + +// GetAccountAtBlockHeight returns account by account address and block height. +func (r *RestProxyHandler) GetAccountAtBlockHeight(ctx context.Context, address flow.Address, height uint64) (*flow.Account, error) { + upstream, closer, err := r.FaultTolerantClient() + if err != nil { + return nil, err + } + defer closer.Close() + + getAccountAtBlockHeightRequest := &accessproto.GetAccountAtBlockHeightRequest{ + Address: address.Bytes(), + BlockHeight: height, + } + + accountResponse, err := upstream.GetAccountAtBlockHeight(ctx, getAccountAtBlockHeightRequest) + r.log("upstream", "GetAccountAtBlockHeight", err) + + if err != nil { + return nil, err + } + + return convert.MessageToAccount(accountResponse.Account) +} + +// GetAccountBalanceAtBlockHeight returns account balance by account address and block height. +func (r *RestProxyHandler) GetAccountBalanceAtBlockHeight(ctx context.Context, address flow.Address, height uint64) (uint64, error) { + upstream, closer, err := r.FaultTolerantClient() + if err != nil { + return 0, err + } + defer closer.Close() + + getAccountBalanceAtBlockHeightRequest := &accessproto.GetAccountBalanceAtBlockHeightRequest{ + Address: address.Bytes(), + BlockHeight: height, + } + + accountBalanceResponse, err := upstream.GetAccountBalanceAtBlockHeight(ctx, getAccountBalanceAtBlockHeightRequest) + r.log("upstream", "GetAccountBalanceAtBlockHeight", err) + + if err != nil { + return 0, err + } + + return accountBalanceResponse.GetBalance(), nil + +} + +// GetAccountKeys returns account keys by account address and block height. +func (r *RestProxyHandler) GetAccountKeys(ctx context.Context, address flow.Address, height uint64) ([]flow.AccountPublicKey, error) { + upstream, closer, err := r.FaultTolerantClient() + if err != nil { + return nil, err + } + defer closer.Close() + + getAccountKeysAtBlockHeightRequest := &accessproto.GetAccountKeysAtBlockHeightRequest{ + Address: address.Bytes(), + BlockHeight: height, + } + + accountKeyResponse, err := upstream.GetAccountKeysAtBlockHeight(ctx, getAccountKeysAtBlockHeightRequest) + r.log("upstream", "GetAccountKeysAtBlockHeight", err) + + if err != nil { + return nil, err + } + + accountKeys := make([]flow.AccountPublicKey, len(accountKeyResponse.GetAccountKeys())) + for i, key := range accountKeyResponse.GetAccountKeys() { + accountKey, err := convert.MessageToAccountKey(key) + if err != nil { + return nil, err + } + + accountKeys[i] = *accountKey + } + + return accountKeys, nil +} + +// GetAccountKeyByIndex returns account key by account address, key index and block height. +func (r *RestProxyHandler) GetAccountKeyByIndex(ctx context.Context, address flow.Address, keyIndex uint32, height uint64) (*flow.AccountPublicKey, error) { + upstream, closer, err := r.FaultTolerantClient() + if err != nil { + return nil, err + } + defer closer.Close() + + getAccountKeyAtBlockHeightRequest := &accessproto.GetAccountKeyAtBlockHeightRequest{ + Address: address.Bytes(), + Index: keyIndex, + BlockHeight: height, + } + + accountKeyResponse, err := upstream.GetAccountKeyAtBlockHeight(ctx, getAccountKeyAtBlockHeightRequest) + r.log("upstream", "GetAccountKeyAtBlockHeight", err) + + if err != nil { + return nil, err + } + + return convert.MessageToAccountKey(accountKeyResponse.AccountKey) +} + +// ExecuteScriptAtLatestBlock executes script at latest block. +func (r *RestProxyHandler) ExecuteScriptAtLatestBlock(ctx context.Context, script []byte, arguments [][]byte) ([]byte, error) { + upstream, closer, err := r.FaultTolerantClient() + if err != nil { + return nil, err + } + defer closer.Close() + + executeScriptAtLatestBlockRequest := &accessproto.ExecuteScriptAtLatestBlockRequest{ + Script: script, + Arguments: arguments, + } + executeScriptAtLatestBlockResponse, err := upstream.ExecuteScriptAtLatestBlock(ctx, executeScriptAtLatestBlockRequest) + r.log("upstream", "ExecuteScriptAtLatestBlock", err) + + if err != nil { + return nil, err + } + + return executeScriptAtLatestBlockResponse.Value, nil +} + +// ExecuteScriptAtBlockHeight executes script at the given block height . +func (r *RestProxyHandler) ExecuteScriptAtBlockHeight(ctx context.Context, blockHeight uint64, script []byte, arguments [][]byte) ([]byte, error) { + upstream, closer, err := r.FaultTolerantClient() + if err != nil { + return nil, err + } + defer closer.Close() + + executeScriptAtBlockHeightRequest := &accessproto.ExecuteScriptAtBlockHeightRequest{ + BlockHeight: blockHeight, + Script: script, + Arguments: arguments, + } + executeScriptAtBlockHeightResponse, err := upstream.ExecuteScriptAtBlockHeight(ctx, executeScriptAtBlockHeightRequest) + r.log("upstream", "ExecuteScriptAtBlockHeight", err) + + if err != nil { + return nil, err + } + + return executeScriptAtBlockHeightResponse.Value, nil +} + +// ExecuteScriptAtBlockID executes script at the given block id . +func (r *RestProxyHandler) ExecuteScriptAtBlockID(ctx context.Context, blockID flow.Identifier, script []byte, arguments [][]byte) ([]byte, error) { + upstream, closer, err := r.FaultTolerantClient() + if err != nil { + return nil, err + } + defer closer.Close() + + executeScriptAtBlockIDRequest := &accessproto.ExecuteScriptAtBlockIDRequest{ + BlockId: blockID[:], + Script: script, + Arguments: arguments, + } + executeScriptAtBlockIDResponse, err := upstream.ExecuteScriptAtBlockID(ctx, executeScriptAtBlockIDRequest) + r.log("upstream", "ExecuteScriptAtBlockID", err) + + if err != nil { + return nil, err + } + + return executeScriptAtBlockIDResponse.Value, nil +} + +// GetEventsForHeightRange returns events by their name in the specified blocks heights. +func (r *RestProxyHandler) GetEventsForHeightRange( + ctx context.Context, + eventType string, + startHeight, endHeight uint64, + requiredEventEncodingVersion entities.EventEncodingVersion, +) ([]flow.BlockEvents, error) { + upstream, closer, err := r.FaultTolerantClient() + if err != nil { + return nil, err + } + defer closer.Close() + + getEventsForHeightRangeRequest := &accessproto.GetEventsForHeightRangeRequest{ + Type: eventType, + StartHeight: startHeight, + EndHeight: endHeight, + EventEncodingVersion: requiredEventEncodingVersion, + } + eventsResponse, err := upstream.GetEventsForHeightRange(ctx, getEventsForHeightRangeRequest) + r.log("upstream", "GetEventsForHeightRange", err) + + if err != nil { + return nil, err + } + + return convert.MessagesToBlockEvents(eventsResponse.Results) +} + +// GetEventsForBlockIDs returns events by their name in the specified block IDs. +func (r *RestProxyHandler) GetEventsForBlockIDs( + ctx context.Context, + eventType string, + blockIDs []flow.Identifier, + requiredEventEncodingVersion entities.EventEncodingVersion, +) ([]flow.BlockEvents, error) { + upstream, closer, err := r.FaultTolerantClient() + if err != nil { + return nil, err + } + defer closer.Close() + + blockIds := convert.IdentifiersToMessages(blockIDs) + + getEventsForBlockIDsRequest := &accessproto.GetEventsForBlockIDsRequest{ + Type: eventType, + BlockIds: blockIds, + EventEncodingVersion: requiredEventEncodingVersion, + } + eventsResponse, err := upstream.GetEventsForBlockIDs(ctx, getEventsForBlockIDsRequest) + r.log("upstream", "GetEventsForBlockIDs", err) + + if err != nil { + return nil, err + } + + return convert.MessagesToBlockEvents(eventsResponse.Results) +} + +// GetExecutionResultForBlockID gets execution result by provided block ID. +func (r *RestProxyHandler) GetExecutionResultForBlockID(ctx context.Context, blockID flow.Identifier) (*flow.ExecutionResult, error) { + upstream, closer, err := r.FaultTolerantClient() + if err != nil { + return nil, err + } + defer closer.Close() + + getExecutionResultForBlockID := &accessproto.GetExecutionResultForBlockIDRequest{ + BlockId: blockID[:], + } + executionResultForBlockIDResponse, err := upstream.GetExecutionResultForBlockID(ctx, getExecutionResultForBlockID) + r.log("upstream", "GetExecutionResultForBlockID", err) + + if err != nil { + return nil, err + } + + return convert.MessageToExecutionResult(executionResultForBlockIDResponse.ExecutionResult) +} + +// GetExecutionResultByID gets execution result by its ID. +func (r *RestProxyHandler) GetExecutionResultByID(ctx context.Context, id flow.Identifier) (*flow.ExecutionResult, error) { + upstream, closer, err := r.FaultTolerantClient() + if err != nil { + return nil, err + } + defer closer.Close() + + executionResultByIDRequest := &accessproto.GetExecutionResultByIDRequest{ + Id: id[:], + } + + executionResultByIDResponse, err := upstream.GetExecutionResultByID(ctx, executionResultByIDRequest) + r.log("upstream", "GetExecutionResultByID", err) + + if err != nil { + return nil, err + } + + return convert.MessageToExecutionResult(executionResultByIDResponse.ExecutionResult) +} diff --git a/engine/access/rest/blocks.go b/engine/access/rest/blocks.go deleted file mode 100644 index e729f67a9bd..00000000000 --- a/engine/access/rest/blocks.go +++ /dev/null @@ -1,220 +0,0 @@ -package rest - -import ( - "context" - "fmt" - "net/http" - - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" - - "github.com/onflow/flow-go/access" - "github.com/onflow/flow-go/engine/access/rest/models" - "github.com/onflow/flow-go/engine/access/rest/request" - "github.com/onflow/flow-go/model/flow" -) - -// GetBlocksByIDs gets blocks by provided ID or list of IDs. -func GetBlocksByIDs(r *request.Request, backend access.API, link models.LinkGenerator) (interface{}, error) { - req, err := r.GetBlockByIDsRequest() - if err != nil { - return nil, NewBadRequestError(err) - } - - blocks := make([]*models.Block, len(req.IDs)) - for i, id := range req.IDs { - block, err := getBlock(forID(&id), r, backend, link) - if err != nil { - return nil, err - } - blocks[i] = block - } - - return blocks, nil -} - -func GetBlocksByHeight(r *request.Request, backend access.API, link models.LinkGenerator) (interface{}, error) { - req, err := r.GetBlockRequest() - if err != nil { - return nil, NewBadRequestError(err) - } - - if req.FinalHeight || req.SealedHeight { - block, err := getBlock(forFinalized(req.Heights[0]), r, backend, link) - if err != nil { - return nil, err - } - - return []*models.Block{block}, nil - } - - // if the query is /blocks/height=1000,1008,1049... - if req.HasHeights() { - blocks := make([]*models.Block, len(req.Heights)) - for i, h := range req.Heights { - block, err := getBlock(forHeight(h), r, backend, link) - if err != nil { - return nil, err - } - blocks[i] = block - } - - return blocks, nil - } - - // support providing end height as "sealed" or "final" - if req.EndHeight == request.FinalHeight || req.EndHeight == request.SealedHeight { - latest, _, err := backend.GetLatestBlock(r.Context(), req.EndHeight == request.SealedHeight) - if err != nil { - return nil, err - } - - req.EndHeight = latest.Header.Height // overwrite special value height with fetched - - if req.StartHeight > req.EndHeight { - return nil, NewBadRequestError(fmt.Errorf("start height must be less than or equal to end height")) - } - } - - blocks := make([]*models.Block, 0) - // start and end height inclusive - for i := req.StartHeight; i <= req.EndHeight; i++ { - block, err := getBlock(forHeight(i), r, backend, link) - if err != nil { - return nil, err - } - blocks = append(blocks, block) - } - - return blocks, nil -} - -// GetBlockPayloadByID gets block payload by ID -func GetBlockPayloadByID(r *request.Request, backend access.API, _ models.LinkGenerator) (interface{}, error) { - req, err := r.GetBlockPayloadRequest() - if err != nil { - return nil, NewBadRequestError(err) - } - - blkProvider := NewBlockProvider(backend, forID(&req.ID)) - blk, _, statusErr := blkProvider.getBlock(r.Context()) - if statusErr != nil { - return nil, statusErr - } - - var payload models.BlockPayload - err = payload.Build(blk.Payload) - if err != nil { - return nil, err - } - - return payload, nil -} - -func getBlock(option blockProviderOption, req *request.Request, backend access.API, link models.LinkGenerator) (*models.Block, error) { - // lookup block - blkProvider := NewBlockProvider(backend, option) - blk, blockStatus, err := blkProvider.getBlock(req.Context()) - if err != nil { - return nil, err - } - - // lookup execution result - // (even if not specified as expandable, since we need the execution result ID to generate its expandable link) - var block models.Block - executionResult, err := backend.GetExecutionResultForBlockID(req.Context(), blk.ID()) - if err != nil { - // handle case where execution result is not yet available - if se, ok := status.FromError(err); ok { - if se.Code() == codes.NotFound { - err := block.Build(blk, nil, link, blockStatus, req.ExpandFields) - if err != nil { - return nil, err - } - return &block, nil - } - } - return nil, err - } - - err = block.Build(blk, executionResult, link, blockStatus, req.ExpandFields) - if err != nil { - return nil, err - } - return &block, nil -} - -// blockProvider is a layer of abstraction on top of the backend access.API and provides a uniform way to -// look up a block or a block header either by ID or by height -type blockProvider struct { - id *flow.Identifier - height uint64 - latest bool - sealed bool - backend access.API -} - -type blockProviderOption func(blkProvider *blockProvider) - -func forID(id *flow.Identifier) blockProviderOption { - return func(blkProvider *blockProvider) { - blkProvider.id = id - } -} -func forHeight(height uint64) blockProviderOption { - return func(blkProvider *blockProvider) { - blkProvider.height = height - } -} - -func forFinalized(queryParam uint64) blockProviderOption { - return func(blkProvider *blockProvider) { - switch queryParam { - case request.SealedHeight: - blkProvider.sealed = true - fallthrough - case request.FinalHeight: - blkProvider.latest = true - } - } -} - -func NewBlockProvider(backend access.API, options ...blockProviderOption) *blockProvider { - blkProvider := &blockProvider{ - backend: backend, - } - - for _, o := range options { - o(blkProvider) - } - return blkProvider -} - -func (blkProvider *blockProvider) getBlock(ctx context.Context) (*flow.Block, flow.BlockStatus, error) { - if blkProvider.id != nil { - blk, _, err := blkProvider.backend.GetBlockByID(ctx, *blkProvider.id) - if err != nil { // unfortunately backend returns internal error status if not found - return nil, flow.BlockStatusUnknown, NewNotFoundError( - fmt.Sprintf("error looking up block with ID %s", blkProvider.id.String()), err, - ) - } - return blk, flow.BlockStatusUnknown, nil - } - - if blkProvider.latest { - blk, status, err := blkProvider.backend.GetLatestBlock(ctx, blkProvider.sealed) - if err != nil { - // cannot be a 'not found' error since final and sealed block should always be found - return nil, flow.BlockStatusUnknown, NewRestError(http.StatusInternalServerError, "block lookup failed", err) - } - return blk, status, nil - } - - blk, status, err := blkProvider.backend.GetBlockByHeight(ctx, blkProvider.height) - if err != nil { // unfortunately backend returns internal error status if not found - return nil, flow.BlockStatusUnknown, NewNotFoundError( - fmt.Sprintf("error looking up block at height %d", blkProvider.height), err, - ) - } - return blk, status, nil -} diff --git a/engine/access/rest/blocks_test.go b/engine/access/rest/blocks_test.go deleted file mode 100644 index 7f977b06d69..00000000000 --- a/engine/access/rest/blocks_test.go +++ /dev/null @@ -1,289 +0,0 @@ -package rest - -import ( - "fmt" - "net/http" - "net/url" - "strings" - "testing" - "time" - - "github.com/stretchr/testify/assert" - - "github.com/onflow/flow-go/engine/access/rest/request" - "github.com/onflow/flow-go/engine/access/rest/util" - - mocks "github.com/stretchr/testify/mock" - "github.com/stretchr/testify/require" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" - - "github.com/onflow/flow-go/access/mock" - "github.com/onflow/flow-go/engine/access/rest/middleware" - "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/utils/unittest" -) - -type testVector struct { - description string - request *http.Request - expectedStatus int - expectedResponse string -} - -// TestGetBlocks tests the get blocks by ID and get blocks by heights API -func TestGetBlocks(t *testing.T) { - backend := &mock.API{} - - blkCnt := 10 - blockIDs, heights, blocks, executionResults := generateMocks(backend, blkCnt) - - singleBlockExpandedResponse := expectedBlockResponsesExpanded(blocks[:1], executionResults[:1], true, flow.BlockStatusUnknown) - singleSealedBlockExpandedResponse := expectedBlockResponsesExpanded(blocks[:1], executionResults[:1], true, flow.BlockStatusSealed) - multipleBlockExpandedResponse := expectedBlockResponsesExpanded(blocks, executionResults, true, flow.BlockStatusUnknown) - multipleSealedBlockExpandedResponse := expectedBlockResponsesExpanded(blocks, executionResults, true, flow.BlockStatusSealed) - - singleBlockCondensedResponse := expectedBlockResponsesExpanded(blocks[:1], executionResults[:1], false, flow.BlockStatusUnknown) - multipleBlockCondensedResponse := expectedBlockResponsesExpanded(blocks, executionResults, false, flow.BlockStatusUnknown) - - invalidID := unittest.IdentifierFixture().String() - invalidHeight := fmt.Sprintf("%d", blkCnt+1) - - maxIDs := flow.IdentifierList(unittest.IdentifierListFixture(request.MaxBlockRequestHeightRange + 1)) - - testVectors := []testVector{ - { - description: "Get single expanded block by ID", - request: getByIDsExpandedURL(t, blockIDs[:1]), - expectedStatus: http.StatusOK, - expectedResponse: singleBlockExpandedResponse, - }, - { - description: "Get multiple expanded blocks by IDs", - request: getByIDsExpandedURL(t, blockIDs), - expectedStatus: http.StatusOK, - expectedResponse: multipleBlockExpandedResponse, - }, - { - description: "Get single condensed block by ID", - request: getByIDsCondensedURL(t, blockIDs[:1]), - expectedStatus: http.StatusOK, - expectedResponse: singleBlockCondensedResponse, - }, - { - description: "Get multiple condensed blocks by IDs", - request: getByIDsCondensedURL(t, blockIDs), - expectedStatus: http.StatusOK, - expectedResponse: multipleBlockCondensedResponse, - }, - { - description: "Get single expanded block by height", - request: getByHeightsExpandedURL(t, heights[:1]...), - expectedStatus: http.StatusOK, - expectedResponse: singleSealedBlockExpandedResponse, - }, - { - description: "Get multiple expanded blocks by heights", - request: getByHeightsExpandedURL(t, heights...), - expectedStatus: http.StatusOK, - expectedResponse: multipleSealedBlockExpandedResponse, - }, - { - description: "Get multiple expanded blocks by start and end height", - request: getByStartEndHeightExpandedURL(t, heights[0], heights[len(heights)-1]), - expectedStatus: http.StatusOK, - expectedResponse: multipleSealedBlockExpandedResponse, - }, - { - description: "Get block by ID not found", - request: getByIDsExpandedURL(t, []string{invalidID}), - expectedStatus: http.StatusNotFound, - expectedResponse: fmt.Sprintf(`{"code":404, "message":"error looking up block with ID %s"}`, invalidID), - }, - { - description: "Get block by height not found", - request: getByHeightsExpandedURL(t, invalidHeight), - expectedStatus: http.StatusNotFound, - expectedResponse: fmt.Sprintf(`{"code":404, "message":"error looking up block at height %s"}`, invalidHeight), - }, - { - description: "Get block by end height less than start height", - request: getByStartEndHeightExpandedURL(t, heights[len(heights)-1], heights[0]), - expectedStatus: http.StatusBadRequest, - expectedResponse: `{"code":400, "message": "start height must be less than or equal to end height"}`, - }, - { - description: "Get block by both heights and start and end height", - request: requestURL(t, nil, heights[len(heights)-1], heights[0], true, heights...), - expectedStatus: http.StatusBadRequest, - expectedResponse: `{"code":400, "message": "can only provide either heights or start and end height range"}`, - }, - { - description: "Get block with missing height param", - request: getByHeightsExpandedURL(t), // no height query param specified - expectedStatus: http.StatusBadRequest, - expectedResponse: `{"code":400, "message": "must provide either heights or start and end height range"}`, - }, - { - description: "Get block with missing height values", - request: getByHeightsExpandedURL(t, ""), // height query param specified with no value - expectedStatus: http.StatusBadRequest, - expectedResponse: `{"code":400, "message": "must provide either heights or start and end height range"}`, - }, - { - description: "Get block by more than maximum permissible number of IDs", - request: getByIDsCondensedURL(t, maxIDs.Strings()), // height query param specified with no value - expectedStatus: http.StatusBadRequest, - expectedResponse: fmt.Sprintf(`{"code":400, "message": "at most %d IDs can be requested at a time"}`, request.MaxBlockRequestHeightRange), - }, - } - - for _, tv := range testVectors { - responseRec, err := executeRequest(tv.request, backend) - assert.NoError(t, err) - require.Equal(t, tv.expectedStatus, responseRec.Code, "failed test %s: incorrect response code", tv.description) - actualResp := responseRec.Body.String() - require.JSONEq(t, tv.expectedResponse, actualResp, "Failed: %s: incorrect response body", tv.description) - } -} - -func requestURL(t *testing.T, ids []string, start string, end string, expandResponse bool, heights ...string) *http.Request { - u, _ := url.Parse("/v1/blocks") - q := u.Query() - - if len(ids) > 0 { - u, _ = url.Parse(u.String() + "/" + strings.Join(ids, ",")) - } - - if start != "" { - q.Add(startHeightQueryParam, start) - q.Add(endHeightQueryParam, end) - } - - if len(heights) > 0 { - heightsStr := strings.Join(heights, ",") - q.Add(heightQueryParam, heightsStr) - } - - if expandResponse { - var expands []string - expands = append(expands, ExpandableFieldPayload) - expands = append(expands, ExpandableExecutionResult) - expandsStr := strings.Join(expands, ",") - q.Add(middleware.ExpandQueryParam, expandsStr) - } - - u.RawQuery = q.Encode() - - req, err := http.NewRequest("GET", u.String(), nil) - require.NoError(t, err) - return req -} - -func getByIDsExpandedURL(t *testing.T, ids []string) *http.Request { - return requestURL(t, ids, "", "", true) -} - -func getByHeightsExpandedURL(t *testing.T, heights ...string) *http.Request { - return requestURL(t, nil, "", "", true, heights...) -} - -func getByStartEndHeightExpandedURL(t *testing.T, start, end string) *http.Request { - return requestURL(t, nil, start, end, true) -} - -func getByIDsCondensedURL(t *testing.T, ids []string) *http.Request { - return requestURL(t, ids, "", "", false) -} - -func generateMocks(backend *mock.API, count int) ([]string, []string, []*flow.Block, []*flow.ExecutionResult) { - blockIDs := make([]string, count) - heights := make([]string, count) - blocks := make([]*flow.Block, count) - executionResults := make([]*flow.ExecutionResult, count) - - for i := 0; i < count; i++ { - block := unittest.BlockFixture() - block.Header.Height = uint64(i) - blocks[i] = &block - blockIDs[i] = block.Header.ID().String() - heights[i] = fmt.Sprintf("%d", block.Header.Height) - - executionResult := unittest.ExecutionResultFixture() - executionResult.BlockID = block.ID() - executionResults[i] = executionResult - - backend.Mock.On("GetBlockByID", mocks.Anything, block.ID()).Return(&block, flow.BlockStatusSealed, nil) - backend.Mock.On("GetBlockByHeight", mocks.Anything, block.Header.Height).Return(&block, flow.BlockStatusSealed, nil) - backend.Mock.On("GetExecutionResultForBlockID", mocks.Anything, block.ID()).Return(executionResults[i], nil) - } - - // any other call to the backend should return a not found error - backend.Mock.On("GetBlockByID", mocks.Anything, mocks.Anything).Return(nil, flow.BlockStatusUnknown, status.Error(codes.NotFound, "not found")) - backend.Mock.On("GetBlockByHeight", mocks.Anything, mocks.Anything).Return(nil, flow.BlockStatusUnknown, status.Error(codes.NotFound, "not found")) - - return blockIDs, heights, blocks, executionResults -} - -func expectedBlockResponsesExpanded(blocks []*flow.Block, execResult []*flow.ExecutionResult, expanded bool, status flow.BlockStatus) string { - blockResponses := make([]string, len(blocks)) - for i, b := range blocks { - blockResponses[i] = expectedBlockResponse(b, execResult[i], expanded, status) - } - return fmt.Sprintf("[%s]", strings.Join(blockResponses, ",")) -} - -func expectedBlockResponse(block *flow.Block, execResult *flow.ExecutionResult, expanded bool, status flow.BlockStatus) string { - id := block.ID().String() - execResultID := execResult.ID().String() - execLink := fmt.Sprintf("/v1/execution_results/%s", execResultID) - blockLink := fmt.Sprintf("/v1/blocks/%s", id) - payloadLink := fmt.Sprintf("/v1/blocks/%s/payload", id) - blockStatus := status.String() - - timestamp := block.Header.Timestamp.Format(time.RFC3339Nano) - - if expanded { - return fmt.Sprintf(` - { - "header": { - "id": "%s", - "parent_id": "%s", - "height": "%d", - "timestamp": "%s", - "parent_voter_signature": "%s" - }, - "payload": { - "collection_guarantees": [], - "block_seals": [] - }, - "execution_result": %s, - "_expandable": {}, - "_links": { - "_self": "%s" - }, - "block_status": "%s" - }`, id, block.Header.ParentID.String(), block.Header.Height, timestamp, - util.ToBase64(block.Header.ParentVoterSigData), executionResultExpectedStr(execResult), blockLink, blockStatus) - } - - return fmt.Sprintf(` - { - "header": { - "id": "%s", - "parent_id": "%s", - "height": "%d", - "timestamp": "%s", - "parent_voter_signature": "%s" - }, - "_expandable": { - "payload": "%s", - "execution_result": "%s" - }, - "_links": { - "_self": "%s" - }, - "block_status": "%s" - }`, id, block.Header.ParentID.String(), block.Header.Height, timestamp, - util.ToBase64(block.Header.ParentVoterSigData), payloadLink, execLink, blockLink, blockStatus) -} diff --git a/engine/access/rest/collections.go b/engine/access/rest/collections.go deleted file mode 100644 index 807be2c0c41..00000000000 --- a/engine/access/rest/collections.go +++ /dev/null @@ -1,42 +0,0 @@ -package rest - -import ( - "github.com/onflow/flow-go/access" - "github.com/onflow/flow-go/engine/access/rest/models" - "github.com/onflow/flow-go/engine/access/rest/request" - "github.com/onflow/flow-go/model/flow" -) - -// GetCollectionByID retrieves a collection by ID and builds a response -func GetCollectionByID(r *request.Request, backend access.API, link models.LinkGenerator) (interface{}, error) { - req, err := r.GetCollectionRequest() - if err != nil { - return nil, NewBadRequestError(err) - } - - collection, err := backend.GetCollectionByID(r.Context(), req.ID) - if err != nil { - return nil, err - } - - // if we expand transactions in the query retrieve each transaction data - transactions := make([]*flow.TransactionBody, 0) - if req.ExpandsTransactions { - for _, tid := range collection.Transactions { - tx, err := backend.GetTransaction(r.Context(), tid) - if err != nil { - return nil, err - } - - transactions = append(transactions, tx) - } - } - - var response models.Collection - err = response.Build(collection, transactions, link, r.ExpandFields) - if err != nil { - return nil, err - } - - return response, nil -} diff --git a/engine/access/rest/collections_test.go b/engine/access/rest/collections_test.go deleted file mode 100644 index 3981541f3a7..00000000000 --- a/engine/access/rest/collections_test.go +++ /dev/null @@ -1,152 +0,0 @@ -package rest - -import ( - "encoding/json" - "fmt" - "net/http" - "strings" - "testing" - - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" - - "github.com/onflow/flow-go/model/flow" - - "github.com/stretchr/testify/assert" - mocks "github.com/stretchr/testify/mock" - - "github.com/onflow/flow-go/access/mock" - "github.com/onflow/flow-go/utils/unittest" -) - -func getCollectionReq(id string, expandTransactions bool) *http.Request { - url := fmt.Sprintf("/v1/collections/%s", id) - if expandTransactions { - url = fmt.Sprintf("%s?expand=transactions", url) - } - - req, _ := http.NewRequest("GET", url, nil) - return req -} - -func TestGetCollections(t *testing.T) { - backend := &mock.API{} - - t.Run("get by ID", func(t *testing.T) { - inputs := []flow.LightCollection{ - unittest.CollectionFixture(1).Light(), - unittest.CollectionFixture(10).Light(), - unittest.CollectionFixture(100).Light(), - } - - for _, col := range inputs { - backend.Mock. - On("GetCollectionByID", mocks.Anything, col.ID()). - Return(&col, nil). - Once() - - txs := make([]string, len(col.Transactions)) - for i, tx := range col.Transactions { - txs[i] = fmt.Sprintf("\"/v1/transactions/%s\"", tx.String()) - } - transactionsStr := fmt.Sprintf("[%s]", strings.Join(txs, ",")) - - expected := fmt.Sprintf(`{ - "id":"%s", - "_links": { - "_self": "/v1/collections/%s" - }, - "_expandable": { - "transactions": %s - } - }`, col.ID(), col.ID(), transactionsStr) - - req := getCollectionReq(col.ID().String(), false) - assertOKResponse(t, req, expected, backend) - mocks.AssertExpectationsForObjects(t, backend) - } - }) - - t.Run("get by ID expand transactions", func(t *testing.T) { - col := unittest.CollectionFixture(3).Light() - - transactions := make([]flow.TransactionBody, len(col.Transactions)) - for i := range col.Transactions { - transactions[i] = unittest.TransactionBodyFixture() - col.Transactions[i] = transactions[i].ID() // overwrite tx ids - - backend.Mock. - On("GetTransaction", mocks.Anything, transactions[i].ID()). - Return(&transactions[i], nil). - Once() - } - - backend.Mock. - On("GetCollectionByID", mocks.Anything, col.ID()). - Return(&col, nil). - Once() - - req := getCollectionReq(col.ID().String(), true) - rr, err := executeRequest(req, backend) - assert.NoError(t, err) - - assert.Equal(t, http.StatusOK, rr.Code) - // really hacky but we can't build whole response since it's really complex - // so we just make sure the transactions are included and have defined values - // anyhow we already test transaction responses in transaction tests - var res map[string]interface{} - err = json.Unmarshal(rr.Body.Bytes(), &res) - assert.NoError(t, err) - resTx := res["transactions"].([]interface{}) - for i, r := range resTx { - c := r.(map[string]interface{}) - assert.Equal(t, transactions[i].ID().String(), c["id"]) - assert.NotNil(t, c["envelope_signatures"]) - } - - mocks.AssertExpectationsForObjects(t, backend) - }) - - t.Run("get by ID errors out", func(t *testing.T) { - testID := unittest.IdentifierFixture() - tests := []struct { - id string - mockValue *flow.LightCollection - mockErr error - response string - status int - }{{ - testID.String(), - nil, - status.Error(codes.NotFound, "not found"), - `{"code":404,"message":"Flow resource not found: not found"}`, - http.StatusNotFound, - }, { - "invalidID", - nil, - nil, - `{"code":400,"message":"invalid ID format"}`, - http.StatusBadRequest, - }, - { - unittest.IdentifierFixture().String(), - nil, - status.Errorf(codes.Internal, "block not found"), - `{"code":400,"message":"Invalid Flow request: block not found"}`, - http.StatusBadRequest, - }, - } - - for _, test := range tests { - id, err := flow.HexStringToIdentifier(test.id) - if err == nil { - // setup the backend mock ti return a not found error if this is a valid id - backend.Mock. - On("GetCollectionByID", mocks.Anything, id). - Return(test.mockValue, test.mockErr) - } - req := getCollectionReq(test.id, false) - assertResponse(t, req, test.status, test.response, backend) - } - }) -} diff --git a/engine/access/rest/error.go b/engine/access/rest/common/error.go similarity index 98% rename from engine/access/rest/error.go rename to engine/access/rest/common/error.go index 7403510ba55..d39ad03e193 100644 --- a/engine/access/rest/error.go +++ b/engine/access/rest/common/error.go @@ -1,4 +1,4 @@ -package rest +package common import "net/http" diff --git a/engine/access/rest/common/http_request_handler.go b/engine/access/rest/common/http_request_handler.go new file mode 100644 index 00000000000..fe40f2a97c0 --- /dev/null +++ b/engine/access/rest/common/http_request_handler.go @@ -0,0 +1,152 @@ +package common + +import ( + "encoding/json" + "errors" + "fmt" + "net/http" + + "github.com/rs/zerolog" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + + "github.com/onflow/flow-go/engine/access/rest/common/models" + fvmErrors "github.com/onflow/flow-go/fvm/errors" + "github.com/onflow/flow-go/model/flow" +) + +// HttpHandler is custom http handler implementing custom handler function. +// HttpHandler function allows easier handling of errors and responses as it +// wraps functionality for handling error and responses outside of endpoint handling. +type HttpHandler struct { + Logger zerolog.Logger + Chain flow.Chain + + MaxRequestSize int64 + MaxResponseSize int64 +} + +func NewHttpHandler( + logger zerolog.Logger, + chain flow.Chain, + maxRequestSize int64, + maxResponseSize int64, +) *HttpHandler { + return &HttpHandler{ + Logger: logger, + Chain: chain, + MaxRequestSize: maxRequestSize, + MaxResponseSize: maxResponseSize, + } +} + +// VerifyRequest function acts as a wrapper to each request providing common handling functionality +// such as logging, error handling +func (h *HttpHandler) VerifyRequest(w http.ResponseWriter, r *http.Request) error { + // create a logger + errLog := h.Logger.With().Str("request_url", r.URL.String()).Logger() + + // limit requested body size + r.Body = http.MaxBytesReader(w, r.Body, h.MaxRequestSize) + err := r.ParseForm() + if err != nil { + h.ErrorHandler(w, err, errLog) + return err + } + return nil +} + +func (h *HttpHandler) ErrorHandler(w http.ResponseWriter, err error, errorLogger zerolog.Logger) { + // rest status type error should be returned with status and user message provided + var statusErr StatusError + if errors.As(err, &statusErr) { + h.errorResponse(w, statusErr.Status(), statusErr.UserMessage(), errorLogger) + return + } + + // handle cadence errors + cadenceError := fvmErrors.Find(err, fvmErrors.ErrCodeCadenceRunTimeError) + if cadenceError != nil { + msg := fmt.Sprintf("Cadence error: %s", cadenceError.Error()) + h.errorResponse(w, http.StatusBadRequest, msg, errorLogger) + return + } + + var sizeErr *http.MaxBytesError + if errors.As(err, &sizeErr) { + h.errorResponse(w, http.StatusRequestEntityTooLarge, "request size exceeds maximum allowed", errorLogger) + return + } + + // handle grpc status error returned from the backend calls, we are forwarding the message to the client + if se, ok := status.FromError(err); ok { + if se.Code() == codes.NotFound { + msg := fmt.Sprintf("Flow resource not found: %s", se.Message()) + h.errorResponse(w, http.StatusNotFound, msg, errorLogger) + return + } + if se.Code() == codes.InvalidArgument { + msg := fmt.Sprintf("Invalid Flow argument: %s", se.Message()) + h.errorResponse(w, http.StatusBadRequest, msg, errorLogger) + return + } + if se.Code() == codes.Internal { + msg := fmt.Sprintf("Invalid Flow request: %s", se.Message()) + h.errorResponse(w, http.StatusBadRequest, msg, errorLogger) + return + } + if se.Code() == codes.Unavailable { + msg := fmt.Sprintf("Failed to process request: %s", se.Message()) + h.errorResponse(w, http.StatusServiceUnavailable, msg, errorLogger) + return + } + } + + // stop going further - catch all error + msg := "internal server error" + errorLogger.Error().Err(err).Msg(msg) + h.errorResponse(w, http.StatusInternalServerError, msg, errorLogger) +} + +// JsonResponse builds a JSON response and send it to the client +func (h *HttpHandler) JsonResponse(w http.ResponseWriter, code int, response interface{}, errLogger zerolog.Logger) { + w.Header().Set("Content-Type", "application/json; charset=UTF-8") + + // serialize response to JSON and handler errors + encodedResponse, err := json.MarshalIndent(response, "", "\t") + if err != nil { + w.WriteHeader(http.StatusInternalServerError) + errLogger.Error().Err(err).Str("response", string(encodedResponse)).Msg("failed to indent response") + return + } + + if len(encodedResponse) > int(h.MaxResponseSize) { + w.WriteHeader(http.StatusInternalServerError) + errLogger.Error().Int("response_size", len(encodedResponse)).Msg("response size exceeds maximum allowed") + return + } + + w.WriteHeader(code) + // write response to response stream + _, err = w.Write(encodedResponse) + if err != nil { + errLogger.Error().Err(err).Str("response", string(encodedResponse)).Msg("failed to write http response") + } +} + +// errorResponse sends an HTTP error response to the client with the given return code +// and a model error with the given response message in the response body +func (h *HttpHandler) errorResponse( + w http.ResponseWriter, + returnCode int, + responseMessage string, + logger zerolog.Logger, +) { + // create error response model + modelError := models.ModelError{ + Code: int32(returnCode), + Message: responseMessage, + } + h.JsonResponse(w, returnCode, modelError, logger) +} diff --git a/engine/access/rest/middleware/common_query_params.go b/engine/access/rest/common/middleware/common_query_params.go similarity index 93% rename from engine/access/rest/middleware/common_query_params.go rename to engine/access/rest/common/middleware/common_query_params.go index b81c828d91a..cfd9bf1bfde 100644 --- a/engine/access/rest/middleware/common_query_params.go +++ b/engine/access/rest/common/middleware/common_query_params.go @@ -8,7 +8,7 @@ import ( ) const ExpandQueryParam = "expand" -const selectQueryParam = "select" +const SelectQueryParam = "select" // commonQueryParamMiddleware generates a Middleware function that extracts the given query parameter from the request // and adds it to the request context as a key value pair with the key as the query param name. @@ -37,7 +37,7 @@ func QueryExpandable() mux.MiddlewareFunc { // QuerySelect middleware extracts out the 'select' query param field if present in the request func QuerySelect() mux.MiddlewareFunc { - return commonQueryParamMiddleware(selectQueryParam) + return commonQueryParamMiddleware(SelectQueryParam) } func getField(req *http.Request, key string) ([]string, bool) { @@ -54,5 +54,5 @@ func GetFieldsToExpand(req *http.Request) ([]string, bool) { } func GetFieldsToSelect(req *http.Request) ([]string, bool) { - return getField(req, selectQueryParam) + return getField(req, SelectQueryParam) } diff --git a/engine/access/rest/middleware/common_query_params_test.go b/engine/access/rest/common/middleware/common_query_params_test.go similarity index 97% rename from engine/access/rest/middleware/common_query_params_test.go rename to engine/access/rest/common/middleware/common_query_params_test.go index c40a70e0783..a04882f7613 100644 --- a/engine/access/rest/middleware/common_query_params_test.go +++ b/engine/access/rest/common/middleware/common_query_params_test.go @@ -35,7 +35,7 @@ func TestCommonQueryParamMiddlewares(t *testing.T) { query.Add(ExpandQueryParam, strings.Join(expandList, ",")) } if len(selectList) > 0 { - query.Add(selectQueryParam, strings.Join(selectList, ",")) + query.Add(SelectQueryParam, strings.Join(selectList, ",")) } req.URL.RawQuery = query.Encode() diff --git a/engine/access/rest/common/middleware/logging.go b/engine/access/rest/common/middleware/logging.go new file mode 100644 index 00000000000..1f03d810cf1 --- /dev/null +++ b/engine/access/rest/common/middleware/logging.go @@ -0,0 +1,61 @@ +package middleware + +import ( + "bufio" + "fmt" + "net" + "net/http" + "time" + + "github.com/gorilla/mux" + "github.com/rs/zerolog" +) + +// LoggingMiddleware creates a middleware which adds a logger interceptor to each request to log the request method, uri, +// duration and response code +func LoggingMiddleware(logger zerolog.Logger) mux.MiddlewareFunc { + return func(inner http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + // record star time + start := time.Now() + // modify the writer + respWriter := newResponseWriter(w) + // continue to the next handler + inner.ServeHTTP(respWriter, req) + log := logger.Info() + log.Str("method", req.Method). + Str("uri", req.RequestURI). + Str("client_ip", req.RemoteAddr). + Str("user_agent", req.UserAgent()). + Dur("duration", time.Since(start)). + Int("response_code", respWriter.statusCode). + Msg("api") + }) + } +} + +// responseWriter is a wrapper around http.ResponseWriter and helps capture the response code +type responseWriter struct { + http.ResponseWriter + statusCode int +} + +// http.Hijacker necessary for using middleware with gorilla websocket connections. +var _ http.Hijacker = (*responseWriter)(nil) + +func newResponseWriter(w http.ResponseWriter) *responseWriter { + return &responseWriter{w, http.StatusOK} +} + +func (rw *responseWriter) WriteHeader(code int) { + rw.statusCode = code + rw.ResponseWriter.WriteHeader(code) +} + +func (rw *responseWriter) Hijack() (net.Conn, *bufio.ReadWriter, error) { + hijacker, ok := rw.ResponseWriter.(http.Hijacker) + if !ok { + return nil, nil, fmt.Errorf("hijacking not supported") + } + return hijacker.Hijack() +} diff --git a/engine/access/rest/common/middleware/metrics.go b/engine/access/rest/common/middleware/metrics.go new file mode 100644 index 00000000000..54dd5dd2c6a --- /dev/null +++ b/engine/access/rest/common/middleware/metrics.go @@ -0,0 +1,28 @@ +package middleware + +import ( + "net/http" + + "github.com/slok/go-http-metrics/middleware" + "github.com/slok/go-http-metrics/middleware/std" + + "github.com/gorilla/mux" + + "github.com/onflow/flow-go/module" +) + +func MetricsMiddleware(restCollector module.RestMetrics) mux.MiddlewareFunc { + metricsMiddleware := middleware.New(middleware.Config{Recorder: restCollector}) + return func(next http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + // This is a custom metric being called on every http request + restCollector.AddTotalRequests(req.Context(), req.Method, req.URL.Path) + + // Modify the writer + respWriter := &responseWriter{w, http.StatusOK} + + // Record go-http-metrics/middleware metrics and continue to the next handler + std.Handler("", metricsMiddleware, next).ServeHTTP(respWriter, req) + }) + } +} diff --git a/engine/access/rest/middleware/request_attribute.go b/engine/access/rest/common/middleware/request_attribute.go similarity index 100% rename from engine/access/rest/middleware/request_attribute.go rename to engine/access/rest/common/middleware/request_attribute.go diff --git a/engine/access/rest/common/models/block.go b/engine/access/rest/common/models/block.go new file mode 100644 index 00000000000..f8e3398fc9e --- /dev/null +++ b/engine/access/rest/common/models/block.go @@ -0,0 +1,238 @@ +package models + +import ( + "time" + + "github.com/onflow/flow-go/engine/access/rest/util" + "github.com/onflow/flow-go/model/flow" +) + +const ExpandableFieldPayload = "payload" +const ExpandableExecutionResult = "execution_result" + +func NewBlock( + block *flow.Block, + execResult *flow.ExecutionResult, + link LinkGenerator, + blockStatus flow.BlockStatus, + expand map[string]bool, +) (*Block, error) { + self, err := SelfLink(block.ID(), link.BlockLink) + if err != nil { + return nil, err + } + + var result Block + result.Header = NewBlockHeader(block.ToHeader()) + + // add the payload to the response if it is specified as an expandable field + result.Expandable = &BlockExpandable{} + if expand[ExpandableFieldPayload] { + var payload BlockPayload + err := payload.Build(&block.Payload) + if err != nil { + return nil, err + } + result.Payload = &payload + } else { + // else add the payload expandable link + payloadExpandable, err := link.PayloadLink(block.ID()) + if err != nil { + return nil, err + } + result.Expandable.Payload = payloadExpandable + } + + // execution result might not yet exist + if execResult != nil { + // add the execution result to the response if it is specified as an expandable field + if expand[ExpandableExecutionResult] { + var exeResult ExecutionResult + err := exeResult.Build(execResult, link) + if err != nil { + return nil, err + } + result.ExecutionResult = &exeResult + } else { + // else add the execution result expandable link + executionResultExpandable, err := link.ExecutionResultLink(execResult.ID()) + if err != nil { + return nil, err + } + result.Expandable.ExecutionResult = executionResultExpandable + } + } + + result.Links = self + + var status BlockStatus + status.Build(blockStatus) + result.BlockStatus = &status + + return &result, nil +} + +func (b *Block) Build( + block *flow.Block, + execResult *flow.ExecutionResult, + link LinkGenerator, + blockStatus flow.BlockStatus, + expand map[string]bool, +) error { + self, err := SelfLink(block.ID(), link.BlockLink) + if err != nil { + return err + } + + var header BlockHeader + header.Build(block.ToHeader()) + b.Header = &header + + // add the payload to the response if it is specified as an expandable field + b.Expandable = &BlockExpandable{} + if expand[ExpandableFieldPayload] { + var payload BlockPayload + err := payload.Build(&block.Payload) + if err != nil { + return err + } + b.Payload = &payload + } else { + // else add the payload expandable link + payloadExpandable, err := link.PayloadLink(block.ID()) + if err != nil { + return err + } + b.Expandable.Payload = payloadExpandable + } + + // execution result might not yet exist + if execResult != nil { + // add the execution result to the response if it is specified as an expandable field + if expand[ExpandableExecutionResult] { + var exeResult ExecutionResult + err := exeResult.Build(execResult, link) + if err != nil { + return err + } + b.ExecutionResult = &exeResult + } else { + // else add the execution result expandable link + executionResultExpandable, err := link.ExecutionResultLink(execResult.ID()) + if err != nil { + return err + } + b.Expandable.ExecutionResult = executionResultExpandable + } + } + + b.Links = self + + var status BlockStatus + status.Build(blockStatus) + b.BlockStatus = &status + + return nil +} + +func (b *BlockStatus) Build(status flow.BlockStatus) { + switch status { + case flow.BlockStatusUnknown: + *b = BLOCK_UNKNOWN + case flow.BlockStatusFinalized: + *b = BLOCK_FINALIZED + case flow.BlockStatusSealed: + *b = BLOCK_SEALED + default: + *b = "" + } +} + +func (b *BlockPayload) Build(payload *flow.Payload) error { + var blockSeal BlockSeals + err := blockSeal.Build(payload.Seals) + if err != nil { + return err + } + b.BlockSeals = blockSeal + + var guarantees CollectionGuarantees + guarantees.Build(payload.Guarantees) + b.CollectionGuarantees = guarantees + + return nil +} + +func NewBlockHeader(header *flow.Header) *BlockHeader { + return &BlockHeader{ + Id: header.ID().String(), + ParentId: header.ParentID.String(), + Height: util.FromUint(header.Height), + Timestamp: time.UnixMilli(int64(header.Timestamp)).UTC(), + ParentVoterSignature: util.ToBase64(header.ParentVoterSigData), + } +} + +func (b *BlockHeader) Build(header *flow.Header) { + b.Id = header.ID().String() + b.ParentId = header.ParentID.String() + b.Height = util.FromUint(header.Height) + b.Timestamp = time.UnixMilli(int64(header.Timestamp)).UTC() + b.ParentVoterSignature = util.ToBase64(header.ParentVoterSigData) +} + +type BlockSeals []BlockSeal + +func (b *BlockSeals) Build(seals []*flow.Seal) error { + blkSeals := make([]BlockSeal, len(seals)) + for i, s := range seals { + var seal BlockSeal + err := seal.Build(s) + if err != nil { + return err + } + blkSeals[i] = seal + } + + *b = blkSeals + return nil +} + +func (b *BlockSeal) Build(seal *flow.Seal) error { + var aggregatedSigs AggregatedSignatures + aggregatedSigs.Build(seal.AggregatedApprovalSigs) + + b.BlockId = seal.BlockID.String() + b.ResultId = seal.ResultID.String() + b.FinalState = seal.FinalState.String() + b.AggregatedApprovalSignatures = aggregatedSigs + return nil +} + +type AggregatedSignatures []AggregatedSignature + +func (a *AggregatedSignatures) Build(signatures []flow.AggregatedSignature) { + response := make([]AggregatedSignature, len(signatures)) + for i, signature := range signatures { + var sig AggregatedSignature + sig.Build(signature) + response[i] = sig + } + + *a = response +} + +func (a *AggregatedSignature) Build(signature flow.AggregatedSignature) { + verifierSignatures := make([]string, len(signature.VerifierSignatures)) + for y, verifierSignature := range signature.VerifierSignatures { + verifierSignatures[y] = util.ToBase64(verifierSignature.Bytes()) + } + + signerIDs := make([]string, len(signature.SignerIDs)) + for j, signerID := range signature.SignerIDs { + signerIDs[j] = signerID.String() + } + + a.VerifierSignatures = verifierSignatures + a.SignerIds = signerIDs +} diff --git a/engine/access/rest/common/models/collection.go b/engine/access/rest/common/models/collection.go new file mode 100644 index 00000000000..07f92ce343f --- /dev/null +++ b/engine/access/rest/common/models/collection.go @@ -0,0 +1,63 @@ +package models + +import ( + "fmt" + + "github.com/onflow/flow-go/engine/access/rest/util" + "github.com/onflow/flow-go/model/flow" +) + +const ExpandsTransactions = "transactions" + +func (c *Collection) Build( + collection *flow.LightCollection, + txs []*flow.TransactionBody, + link LinkGenerator, + expand map[string]bool) error { + + self, err := SelfLink(collection.ID(), link.CollectionLink) + if err != nil { + return err + } + + var expandable CollectionExpandable + var transactions Transactions + if expand[ExpandsTransactions] { + transactions.Build(txs, link) + } else { + expandable.Transactions = make([]string, len(collection.Transactions)) + for i, tx := range collection.Transactions { + expandable.Transactions[i], err = link.TransactionLink(tx) + if err != nil { + return err + } + } + } + + c.Id = collection.ID().String() + c.Transactions = transactions + c.Links = self + c.Expandable = &expandable + + return nil +} + +func (c *CollectionGuarantee) Build(guarantee *flow.CollectionGuarantee) { + c.CollectionId = guarantee.CollectionID.String() + c.SignerIndices = fmt.Sprintf("%x", guarantee.SignerIndices) + c.Signature = util.ToBase64(guarantee.Signature.Bytes()) + c.ReferenceBlockID = guarantee.ReferenceBlockID.String() + c.ClusterChainID = guarantee.ClusterChainID.String() +} + +type CollectionGuarantees []CollectionGuarantee + +func (c *CollectionGuarantees) Build(guarantees []*flow.CollectionGuarantee) { + collGuarantees := make([]CollectionGuarantee, len(guarantees)) + for i, g := range guarantees { + var col CollectionGuarantee + col.Build(g) + collGuarantees[i] = col + } + *c = collGuarantees +} diff --git a/engine/access/rest/models/enums.go b/engine/access/rest/common/models/enums.go similarity index 78% rename from engine/access/rest/models/enums.go rename to engine/access/rest/common/models/enums.go index 6d9b0d33b10..375aefdc6b1 100644 --- a/engine/access/rest/models/enums.go +++ b/engine/access/rest/common/models/enums.go @@ -15,3 +15,9 @@ const ( SUCCESS_RESULT = SUCCESS_TransactionExecution FAILURE_RESULT = FAILURE_TransactionExecution ) + +const ( + BLOCK_UNKNOWN = UNKNOWN_BlockStatus + BLOCK_FINALIZED = FINALIZED_BlockStatus + BLOCK_SEALED = SEALED_BlockStatus +) diff --git a/engine/access/rest/common/models/event.go b/engine/access/rest/common/models/event.go new file mode 100644 index 00000000000..856f95c1a83 --- /dev/null +++ b/engine/access/rest/common/models/event.go @@ -0,0 +1,50 @@ +package models + +import ( + "github.com/onflow/flow-go/engine/access/rest/util" + "github.com/onflow/flow-go/model/flow" +) + +func (e *Event) Build(event flow.Event) { + e.Type_ = string(event.Type) + e.TransactionId = event.TransactionID.String() + e.TransactionIndex = util.FromUint(uint64(event.TransactionIndex)) + e.EventIndex = util.FromUint(uint64(event.EventIndex)) + e.Payload = util.ToBase64(event.Payload) +} + +type Events []Event + +func (e *Events) Build(events []flow.Event) { + evs := make([]Event, len(events)) + for i, ev := range events { + var event Event + event.Build(ev) + evs[i] = event + } + + *e = evs +} + +func (b *BlockEvents) Build(blockEvents flow.BlockEvents) { + b.BlockHeight = util.FromUint(blockEvents.BlockHeight) + b.BlockId = blockEvents.BlockID.String() + b.BlockTimestamp = blockEvents.BlockTimestamp + + var events Events + events.Build(blockEvents.Events) + b.Events = events +} + +type BlocksEvents []BlockEvents + +func (b *BlocksEvents) Build(blocksEvents []flow.BlockEvents) { + evs := make([]BlockEvents, 0) + for _, ev := range blocksEvents { + var blockEvent BlockEvents + blockEvent.Build(ev) + evs = append(evs, blockEvent) + } + + *b = evs +} diff --git a/engine/access/rest/common/models/execution_result.go b/engine/access/rest/common/models/execution_result.go new file mode 100644 index 00000000000..cd434fc8210 --- /dev/null +++ b/engine/access/rest/common/models/execution_result.go @@ -0,0 +1,51 @@ +package models + +import ( + "github.com/onflow/flow-go/engine/access/rest/util" + "github.com/onflow/flow-go/model/flow" +) + +func (e *ExecutionResult) Build( + exeResult *flow.ExecutionResult, + link LinkGenerator, +) error { + self, err := SelfLink(exeResult.ID(), link.ExecutionResultLink) + if err != nil { + return err + } + + events := make([]Event, len(exeResult.ServiceEvents)) + for i, e := range exeResult.ServiceEvents { + events[i] = Event{ + Type_: e.Type.String(), + } + } + + e.Id = exeResult.ID().String() + e.BlockId = exeResult.BlockID.String() + e.Events = events + e.Links = self + + e.PreviousResultId = exeResult.PreviousResultID.String() + + chunks := make([]Chunk, len(exeResult.Chunks)) + + for i, flowChunk := range exeResult.Chunks { + var chunk Chunk + chunk.Build(flowChunk) + chunks[i] = chunk + } + e.Chunks = chunks + return nil +} + +func (c *Chunk) Build(chunk *flow.Chunk) { + c.BlockId = chunk.BlockID.String() + c.Index = util.FromUint(chunk.Index) + c.CollectionIndex = util.FromUint(uint64(chunk.CollectionIndex)) + c.StartState = util.ToBase64(chunk.StartState[:]) + c.EndState = util.ToBase64(chunk.EndState[:]) + c.NumberOfTransactions = util.FromUint(chunk.NumberOfTransactions) + c.EventCollection = chunk.EventCollection.String() + c.TotalComputationUsed = util.FromUint(chunk.TotalComputationUsed) +} diff --git a/engine/access/rest/models/link.go b/engine/access/rest/common/models/link.go similarity index 100% rename from engine/access/rest/models/link.go rename to engine/access/rest/common/models/link.go diff --git a/engine/access/rest/common/models/mock/link_generator.go b/engine/access/rest/common/models/mock/link_generator.go new file mode 100644 index 00000000000..07740cff2e0 --- /dev/null +++ b/engine/access/rest/common/models/mock/link_generator.go @@ -0,0 +1,223 @@ +// Code generated by mockery. DO NOT EDIT. + +package mock + +import ( + flow "github.com/onflow/flow-go/model/flow" + mock "github.com/stretchr/testify/mock" +) + +// LinkGenerator is an autogenerated mock type for the LinkGenerator type +type LinkGenerator struct { + mock.Mock +} + +// AccountLink provides a mock function with given fields: address +func (_m *LinkGenerator) AccountLink(address string) (string, error) { + ret := _m.Called(address) + + if len(ret) == 0 { + panic("no return value specified for AccountLink") + } + + var r0 string + var r1 error + if rf, ok := ret.Get(0).(func(string) (string, error)); ok { + return rf(address) + } + if rf, ok := ret.Get(0).(func(string) string); ok { + r0 = rf(address) + } else { + r0 = ret.Get(0).(string) + } + + if rf, ok := ret.Get(1).(func(string) error); ok { + r1 = rf(address) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// BlockLink provides a mock function with given fields: id +func (_m *LinkGenerator) BlockLink(id flow.Identifier) (string, error) { + ret := _m.Called(id) + + if len(ret) == 0 { + panic("no return value specified for BlockLink") + } + + var r0 string + var r1 error + if rf, ok := ret.Get(0).(func(flow.Identifier) (string, error)); ok { + return rf(id) + } + if rf, ok := ret.Get(0).(func(flow.Identifier) string); ok { + r0 = rf(id) + } else { + r0 = ret.Get(0).(string) + } + + if rf, ok := ret.Get(1).(func(flow.Identifier) error); ok { + r1 = rf(id) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// CollectionLink provides a mock function with given fields: id +func (_m *LinkGenerator) CollectionLink(id flow.Identifier) (string, error) { + ret := _m.Called(id) + + if len(ret) == 0 { + panic("no return value specified for CollectionLink") + } + + var r0 string + var r1 error + if rf, ok := ret.Get(0).(func(flow.Identifier) (string, error)); ok { + return rf(id) + } + if rf, ok := ret.Get(0).(func(flow.Identifier) string); ok { + r0 = rf(id) + } else { + r0 = ret.Get(0).(string) + } + + if rf, ok := ret.Get(1).(func(flow.Identifier) error); ok { + r1 = rf(id) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ExecutionResultLink provides a mock function with given fields: id +func (_m *LinkGenerator) ExecutionResultLink(id flow.Identifier) (string, error) { + ret := _m.Called(id) + + if len(ret) == 0 { + panic("no return value specified for ExecutionResultLink") + } + + var r0 string + var r1 error + if rf, ok := ret.Get(0).(func(flow.Identifier) (string, error)); ok { + return rf(id) + } + if rf, ok := ret.Get(0).(func(flow.Identifier) string); ok { + r0 = rf(id) + } else { + r0 = ret.Get(0).(string) + } + + if rf, ok := ret.Get(1).(func(flow.Identifier) error); ok { + r1 = rf(id) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// PayloadLink provides a mock function with given fields: id +func (_m *LinkGenerator) PayloadLink(id flow.Identifier) (string, error) { + ret := _m.Called(id) + + if len(ret) == 0 { + panic("no return value specified for PayloadLink") + } + + var r0 string + var r1 error + if rf, ok := ret.Get(0).(func(flow.Identifier) (string, error)); ok { + return rf(id) + } + if rf, ok := ret.Get(0).(func(flow.Identifier) string); ok { + r0 = rf(id) + } else { + r0 = ret.Get(0).(string) + } + + if rf, ok := ret.Get(1).(func(flow.Identifier) error); ok { + r1 = rf(id) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// TransactionLink provides a mock function with given fields: id +func (_m *LinkGenerator) TransactionLink(id flow.Identifier) (string, error) { + ret := _m.Called(id) + + if len(ret) == 0 { + panic("no return value specified for TransactionLink") + } + + var r0 string + var r1 error + if rf, ok := ret.Get(0).(func(flow.Identifier) (string, error)); ok { + return rf(id) + } + if rf, ok := ret.Get(0).(func(flow.Identifier) string); ok { + r0 = rf(id) + } else { + r0 = ret.Get(0).(string) + } + + if rf, ok := ret.Get(1).(func(flow.Identifier) error); ok { + r1 = rf(id) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// TransactionResultLink provides a mock function with given fields: id +func (_m *LinkGenerator) TransactionResultLink(id flow.Identifier) (string, error) { + ret := _m.Called(id) + + if len(ret) == 0 { + panic("no return value specified for TransactionResultLink") + } + + var r0 string + var r1 error + if rf, ok := ret.Get(0).(func(flow.Identifier) (string, error)); ok { + return rf(id) + } + if rf, ok := ret.Get(0).(func(flow.Identifier) string); ok { + r0 = rf(id) + } else { + r0 = ret.Get(0).(string) + } + + if rf, ok := ret.Get(1).(func(flow.Identifier) error); ok { + r1 = rf(id) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// NewLinkGenerator creates a new instance of LinkGenerator. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewLinkGenerator(t interface { + mock.TestingT + Cleanup(func()) +}) *LinkGenerator { + mock := &LinkGenerator{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/engine/access/rest/models/model_aggregated_signature.go b/engine/access/rest/common/models/model_aggregated_signature.go similarity index 100% rename from engine/access/rest/models/model_aggregated_signature.go rename to engine/access/rest/common/models/model_aggregated_signature.go diff --git a/engine/access/rest/models/model_block.go b/engine/access/rest/common/models/model_block.go similarity index 81% rename from engine/access/rest/models/model_block.go rename to engine/access/rest/common/models/model_block.go index ded5008bcdc..f30836ea1e4 100644 --- a/engine/access/rest/models/model_block.go +++ b/engine/access/rest/common/models/model_block.go @@ -12,7 +12,7 @@ type Block struct { Header *BlockHeader `json:"header"` Payload *BlockPayload `json:"payload,omitempty"` ExecutionResult *ExecutionResult `json:"execution_result,omitempty"` - Expandable *BlockExpandable `json:"_expandable"` + Expandable *BlockExpandable `json:"_expandable,omitempty"` Links *Links `json:"_links,omitempty"` - BlockStatus string `json:"block_status"` + BlockStatus *BlockStatus `json:"block_status"` } diff --git a/engine/access/rest/models/model_block__expandable.go b/engine/access/rest/common/models/model_block__expandable.go similarity index 100% rename from engine/access/rest/models/model_block__expandable.go rename to engine/access/rest/common/models/model_block__expandable.go diff --git a/engine/access/rest/models/model_block_events.go b/engine/access/rest/common/models/model_block_events.go similarity index 100% rename from engine/access/rest/models/model_block_events.go rename to engine/access/rest/common/models/model_block_events.go diff --git a/engine/access/rest/models/model_block_header.go b/engine/access/rest/common/models/model_block_header.go similarity index 100% rename from engine/access/rest/models/model_block_header.go rename to engine/access/rest/common/models/model_block_header.go diff --git a/engine/access/rest/models/model_block_height.go b/engine/access/rest/common/models/model_block_height.go similarity index 100% rename from engine/access/rest/models/model_block_height.go rename to engine/access/rest/common/models/model_block_height.go diff --git a/engine/access/rest/models/model_block_payload.go b/engine/access/rest/common/models/model_block_payload.go similarity index 100% rename from engine/access/rest/models/model_block_payload.go rename to engine/access/rest/common/models/model_block_payload.go diff --git a/engine/access/rest/models/model_block_seal.go b/engine/access/rest/common/models/model_block_seal.go similarity index 100% rename from engine/access/rest/models/model_block_seal.go rename to engine/access/rest/common/models/model_block_seal.go diff --git a/engine/access/rest/common/models/model_block_status.go b/engine/access/rest/common/models/model_block_status.go new file mode 100644 index 00000000000..66e04825a42 --- /dev/null +++ b/engine/access/rest/common/models/model_block_status.go @@ -0,0 +1,19 @@ +/* + * Access API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 1.0.0 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ +package models + +// BlockStatus : This value indicates the status of a block. +type BlockStatus string + +// List of BlockStatus +const ( + UNKNOWN_BlockStatus BlockStatus = "BLOCK_UNKNOWN" + FINALIZED_BlockStatus BlockStatus = "BLOCK_FINALIZED" + SEALED_BlockStatus BlockStatus = "BLOCK_SEALED" +) diff --git a/engine/access/rest/models/model_chunk.go b/engine/access/rest/common/models/model_chunk.go similarity index 100% rename from engine/access/rest/models/model_chunk.go rename to engine/access/rest/common/models/model_chunk.go diff --git a/engine/access/rest/models/model_collection.go b/engine/access/rest/common/models/model_collection.go similarity index 100% rename from engine/access/rest/models/model_collection.go rename to engine/access/rest/common/models/model_collection.go diff --git a/engine/access/rest/models/model_collection__expandable.go b/engine/access/rest/common/models/model_collection__expandable.go similarity index 100% rename from engine/access/rest/models/model_collection__expandable.go rename to engine/access/rest/common/models/model_collection__expandable.go diff --git a/engine/access/rest/common/models/model_collection_guarantee.go b/engine/access/rest/common/models/model_collection_guarantee.go new file mode 100644 index 00000000000..5426ad3f805 --- /dev/null +++ b/engine/access/rest/common/models/model_collection_guarantee.go @@ -0,0 +1,17 @@ +/* + * Access API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 1.0.0 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ +package models + +type CollectionGuarantee struct { + CollectionId string `json:"collection_id"` + SignerIndices string `json:"signer_indices"` + Signature string `json:"signature"` + ReferenceBlockID string `json:"reference_block_id"` + ClusterChainID string `json:"cluster_chain_id"` +} diff --git a/engine/access/rest/models/model_error.go b/engine/access/rest/common/models/model_error.go similarity index 100% rename from engine/access/rest/models/model_error.go rename to engine/access/rest/common/models/model_error.go diff --git a/engine/access/rest/models/model_event.go b/engine/access/rest/common/models/model_event.go similarity index 100% rename from engine/access/rest/models/model_event.go rename to engine/access/rest/common/models/model_event.go diff --git a/engine/access/rest/models/model_execution_result.go b/engine/access/rest/common/models/model_execution_result.go similarity index 100% rename from engine/access/rest/models/model_execution_result.go rename to engine/access/rest/common/models/model_execution_result.go diff --git a/engine/access/rest/models/model_links.go b/engine/access/rest/common/models/model_links.go similarity index 100% rename from engine/access/rest/models/model_links.go rename to engine/access/rest/common/models/model_links.go diff --git a/engine/access/rest/models/model_proposal_key.go b/engine/access/rest/common/models/model_proposal_key.go similarity index 100% rename from engine/access/rest/models/model_proposal_key.go rename to engine/access/rest/common/models/model_proposal_key.go diff --git a/engine/access/rest/models/model_transaction.go b/engine/access/rest/common/models/model_transaction.go similarity index 100% rename from engine/access/rest/models/model_transaction.go rename to engine/access/rest/common/models/model_transaction.go diff --git a/engine/access/rest/models/model_transaction__expandable.go b/engine/access/rest/common/models/model_transaction__expandable.go similarity index 100% rename from engine/access/rest/models/model_transaction__expandable.go rename to engine/access/rest/common/models/model_transaction__expandable.go diff --git a/engine/access/rest/models/model_transaction_execution.go b/engine/access/rest/common/models/model_transaction_execution.go similarity index 100% rename from engine/access/rest/models/model_transaction_execution.go rename to engine/access/rest/common/models/model_transaction_execution.go diff --git a/engine/access/rest/models/model_transaction_result.go b/engine/access/rest/common/models/model_transaction_result.go similarity index 100% rename from engine/access/rest/models/model_transaction_result.go rename to engine/access/rest/common/models/model_transaction_result.go diff --git a/engine/access/rest/common/models/model_transaction_signature.go b/engine/access/rest/common/models/model_transaction_signature.go new file mode 100644 index 00000000000..a87c9777389 --- /dev/null +++ b/engine/access/rest/common/models/model_transaction_signature.go @@ -0,0 +1,16 @@ +/* + * Access API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 1.0.0 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ +package models + +type TransactionSignature struct { + Address string `json:"address"` + KeyIndex string `json:"key_index"` + Signature string `json:"signature"` + ExtensionData string `json:"extension_data,omitempty"` +} diff --git a/engine/access/rest/models/model_transaction_status.go b/engine/access/rest/common/models/model_transaction_status.go similarity index 100% rename from engine/access/rest/models/model_transaction_status.go rename to engine/access/rest/common/models/model_transaction_status.go diff --git a/engine/access/rest/common/models/transaction.go b/engine/access/rest/common/models/transaction.go new file mode 100644 index 00000000000..89f7e85ef42 --- /dev/null +++ b/engine/access/rest/common/models/transaction.go @@ -0,0 +1,152 @@ +package models + +import ( + "github.com/onflow/flow-go/engine/access/rest/util" + accessmodel "github.com/onflow/flow-go/model/access" + "github.com/onflow/flow-go/model/flow" +) + +func (t *Transaction) Build(tx *flow.TransactionBody, txr *accessmodel.TransactionResult, link LinkGenerator) { + args := make([]string, len(tx.Arguments)) + for i, arg := range tx.Arguments { + args[i] = util.ToBase64(arg) + } + + auths := make([]string, len(tx.Authorizers)) + for i, auth := range tx.Authorizers { + auths[i] = auth.String() + } + + // if transaction result is provided then add that to the response, else add the result link to the expandable + t.Expandable = &TransactionExpandable{} + if txr != nil { + var txResult TransactionResult + txResult.Build(txr, tx.ID(), link) + t.Result = &txResult + } else { + resultLink, _ := link.TransactionResultLink(tx.ID()) + t.Expandable.Result = resultLink + } + + var payloadSigs TransactionSignatures + payloadSigs.Build(tx.PayloadSignatures) + + var envelopeSigs TransactionSignatures + envelopeSigs.Build(tx.EnvelopeSignatures) + + var proposalKey ProposalKey + proposalKey.Build(tx.ProposalKey) + + t.Id = tx.ID().String() + t.Script = util.ToBase64(tx.Script) + t.Arguments = args + t.ReferenceBlockId = tx.ReferenceBlockID.String() + t.GasLimit = util.FromUint(tx.GasLimit) + t.Payer = tx.Payer.String() + t.ProposalKey = &proposalKey + t.Authorizers = auths + t.PayloadSignatures = payloadSigs + t.EnvelopeSignatures = envelopeSigs + + self, _ := SelfLink(tx.ID(), link.TransactionLink) + t.Links = self +} + +type Transactions []Transaction + +func (t *Transactions) Build(transactions []*flow.TransactionBody, link LinkGenerator) { + txs := make([]Transaction, len(transactions)) + for i, tr := range transactions { + var tx Transaction + tx.Build(tr, nil, link) + txs[i] = tx + } + + *t = txs +} + +type TransactionSignatures []TransactionSignature + +func (t *TransactionSignatures) Build(signatures []flow.TransactionSignature) { + sigs := make([]TransactionSignature, len(signatures)) + for i, s := range signatures { + var sig TransactionSignature + sig.Build(s) + sigs[i] = sig + } + + *t = sigs +} + +func (t *TransactionSignature) Build(sig flow.TransactionSignature) { + t.Address = sig.Address.String() + t.KeyIndex = util.FromUint(sig.KeyIndex) + t.Signature = util.ToBase64(sig.Signature) + t.ExtensionData = util.ToBase64(sig.ExtensionData) +} + +func (t *TransactionResult) Build(txr *accessmodel.TransactionResult, txID flow.Identifier, link LinkGenerator) { + var status TransactionStatus + status.Build(txr.Status) + + var execution TransactionExecution + execution.Build(txr) + + var events Events + events.Build(txr.Events) + + if txr.BlockID != flow.ZeroID { // don't send back 0 ID + t.BlockId = txr.BlockID.String() + } + + if txr.CollectionID != flow.ZeroID { // don't send back 0 ID + t.CollectionId = txr.CollectionID.String() + } + + t.Status = &status + t.Execution = &execution + t.StatusCode = int32(txr.StatusCode) + t.ErrorMessage = txr.ErrorMessage + t.ComputationUsed = util.FromUint(uint64(0)) // todo: define this + t.Events = events + + self, _ := SelfLink(txID, link.TransactionResultLink) + t.Links = self +} + +func (t *TransactionStatus) Build(status flow.TransactionStatus) { + switch status { + case flow.TransactionStatusExpired: + *t = EXPIRED + case flow.TransactionStatusExecuted: + *t = EXECUTED + case flow.TransactionStatusFinalized: + *t = FINALIZED + case flow.TransactionStatusSealed: + *t = SEALED + case flow.TransactionStatusPending: + *t = PENDING + default: + *t = "" + } +} + +func (t *TransactionExecution) Build(result *accessmodel.TransactionResult) { + *t = PENDING_RESULT + + if result.Status == flow.TransactionStatusSealed && result.ErrorMessage == "" { + *t = SUCCESS_RESULT + } + if result.ErrorMessage != "" { + *t = FAILURE_RESULT + } + if result.Status == flow.TransactionStatusExpired { + *t = FAILURE_RESULT + } +} + +func (p *ProposalKey) Build(key flow.ProposalKey) { + p.Address = key.Address.String() + p.KeyIndex = util.FromUint(key.KeyIndex) + p.SequenceNumber = util.FromUint(key.SequenceNumber) +} diff --git a/engine/access/rest/common/parser/address.go b/engine/access/rest/common/parser/address.go new file mode 100644 index 00000000000..a2db50c4427 --- /dev/null +++ b/engine/access/rest/common/parser/address.go @@ -0,0 +1,26 @@ +package parser + +import ( + "fmt" + "regexp" + "strings" + + "github.com/onflow/flow-go/engine/common/rpc/convert" + "github.com/onflow/flow-go/model/flow" +) + +func ParseAddress(raw string, chain flow.Chain) (flow.Address, error) { + raw = strings.ReplaceAll(raw, "0x", "") // remove 0x prefix + + valid, _ := regexp.MatchString(`^[0-9a-fA-F]{16}$`, raw) + if !valid { + return flow.EmptyAddress, fmt.Errorf("invalid address") + } + + address, err := convert.HexToAddress(raw, chain) + if err != nil { + return flow.EmptyAddress, err + } + + return address, nil +} diff --git a/engine/access/rest/common/parser/address_test.go b/engine/access/rest/common/parser/address_test.go new file mode 100644 index 00000000000..95cc90dbdf0 --- /dev/null +++ b/engine/access/rest/common/parser/address_test.go @@ -0,0 +1,59 @@ +package parser + +import ( + "strings" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + + "github.com/onflow/flow-go/model/flow" +) + +func TestAddress_InvalidParse(t *testing.T) { + inputs := []string{ + "0x1", + "", + "foo", + "1", + "@", + "ead892083b3e2c61222", // too long + } + + chain := flow.Localnet.Chain() + for _, input := range inputs { + _, err := ParseAddress(input, chain) + assert.EqualError(t, err, "invalid address") + } +} + +func TestAddress_InvalidNetwork(t *testing.T) { + inputs := []string{ + "18eb4ee6b3c026d2", + "0x18eb4ee6b3c026d2", + } + + chain := flow.Localnet.Chain() + for _, input := range inputs { + _, err := ParseAddress(input, chain) + require.Error(t, err) + assert.Equal(t, codes.InvalidArgument, status.Code(err)) + } +} + +func TestAddress_ValidParse(t *testing.T) { + inputs := []string{ + "f8d6e0586b0a20c7", + "148602c0600814da", + "0x0b807ae5da6210df", + } + + chain := flow.Localnet.Chain() + for _, input := range inputs { + address, err := ParseAddress(input, chain) + require.NoError(t, err) + assert.Equal(t, strings.ReplaceAll(input, "0x", ""), address.String()) + } +} diff --git a/engine/access/rest/common/parser/arguments.go b/engine/access/rest/common/parser/arguments.go new file mode 100644 index 00000000000..493b1f9a0c2 --- /dev/null +++ b/engine/access/rest/common/parser/arguments.go @@ -0,0 +1,38 @@ +package parser + +import ( + "fmt" + + "github.com/onflow/flow-go/engine/access/rest/util" +) + +const maxArgumentsLength = 100 +const MaxAllowedScriptArguments = 100 + +type Arguments [][]byte + +func (a *Arguments) Parse(raw []string) error { + args := make([][]byte, 0) + for _, rawArg := range raw { + if rawArg == "" { // skip empty + continue + } + + arg, err := util.FromBase64(rawArg) + if err != nil { + return fmt.Errorf("invalid argument encoding: %w", err) + } + args = append(args, arg) + } + + if len(args) > maxArgumentsLength { + return fmt.Errorf("too many arguments. Maximum arguments allowed: %d", MaxAllowedScriptArguments) + } + + *a = args + return nil +} + +func (a Arguments) Flow() [][]byte { + return a +} diff --git a/engine/access/rest/request/arguments_test.go b/engine/access/rest/common/parser/arguments_test.go similarity index 85% rename from engine/access/rest/request/arguments_test.go rename to engine/access/rest/common/parser/arguments_test.go index bd1dfc677e1..5c8bd8250e4 100644 --- a/engine/access/rest/request/arguments_test.go +++ b/engine/access/rest/common/parser/arguments_test.go @@ -1,4 +1,4 @@ -package request +package parser import ( "fmt" @@ -21,13 +21,13 @@ func TestArguments_InvalidParse(t *testing.T) { assert.EqualError(t, err, "invalid argument encoding: illegal base64 data at input byte 0", a) } - tooLong := make([]string, maxAllowedScriptArguments+1) + tooLong := make([]string, MaxAllowedScriptArguments+1) for i := range tooLong { tooLong[i] = "dGVzdA==" } err := arguments.Parse(tooLong) - assert.EqualError(t, err, fmt.Sprintf("too many arguments. Maximum arguments allowed: %d", maxAllowedScriptArguments)) + assert.EqualError(t, err, fmt.Sprintf("too many arguments. Maximum arguments allowed: %d", MaxAllowedScriptArguments)) } func TestArguments_ValidParse(t *testing.T) { diff --git a/engine/access/rest/common/parser/block_status.go b/engine/access/rest/common/parser/block_status.go new file mode 100644 index 00000000000..efb34519894 --- /dev/null +++ b/engine/access/rest/common/parser/block_status.go @@ -0,0 +1,24 @@ +package parser + +import ( + "fmt" + + "github.com/onflow/flow-go/model/flow" +) + +// Finalized and Sealed represents the status of a block. +// It is used in rest arguments to provide block status. +const ( + Finalized = "finalized" + Sealed = "sealed" +) + +func ParseBlockStatus(blockStatus string) (flow.BlockStatus, error) { + switch blockStatus { + case Finalized: + return flow.BlockStatusFinalized, nil + case Sealed: + return flow.BlockStatusSealed, nil + } + return flow.BlockStatusUnknown, fmt.Errorf("invalid 'block_status', must be '%s' or '%s'", Finalized, Sealed) +} diff --git a/engine/access/rest/common/parser/block_status_test.go b/engine/access/rest/common/parser/block_status_test.go new file mode 100644 index 00000000000..0bbaa30c56b --- /dev/null +++ b/engine/access/rest/common/parser/block_status_test.go @@ -0,0 +1,39 @@ +package parser + +import ( + "fmt" + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/onflow/flow-go/model/flow" +) + +// TestParseBlockStatus_Invalid tests the ParseBlockStatus function with invalid inputs. +// It verifies that for each invalid block status string, the function returns an error +// matching the expected error message format. +func TestParseBlockStatus_Invalid(t *testing.T) { + tests := []string{"unknown", "pending", ""} + expectedErr := fmt.Sprintf("invalid 'block_status', must be '%s' or '%s'", Finalized, Sealed) + + for _, input := range tests { + _, err := ParseBlockStatus(input) + assert.EqualError(t, err, expectedErr) + } +} + +// TestParseBlockStatus_Valid tests the ParseBlockStatus function with valid inputs. +// It ensures that the function returns the correct flow.BlockStatus for valid status +// strings "finalized" and "sealed" without errors. +func TestParseBlockStatus_Valid(t *testing.T) { + tests := map[string]flow.BlockStatus{ + Finalized: flow.BlockStatusFinalized, + Sealed: flow.BlockStatusSealed, + } + + for input, expectedStatus := range tests { + status, err := ParseBlockStatus(input) + assert.NoError(t, err) + assert.Equal(t, expectedStatus, status) + } +} diff --git a/engine/access/rest/common/parser/event_type.go b/engine/access/rest/common/parser/event_type.go new file mode 100644 index 00000000000..f1610a966d5 --- /dev/null +++ b/engine/access/rest/common/parser/event_type.go @@ -0,0 +1,50 @@ +package parser + +import ( + "fmt" + "regexp" +) + +type EventType string + +var basicEventRe = regexp.MustCompile(`[A-Z]\.[a-f0-9]{16}\.[\w+]*\.[\w+]*`) +var flowEventRe = regexp.MustCompile(`flow\.[\w]*`) + +func NewEventType(raw string) (EventType, error) { + if !basicEventRe.MatchString(raw) && !flowEventRe.MatchString(raw) { + return "", fmt.Errorf("invalid event type format") + } + return EventType(raw), nil +} + +func (e EventType) Flow() string { + return string(e) +} + +type EventTypes []EventType + +func NewEventTypes(raw []string) (EventTypes, error) { + eventTypes := make(EventTypes, 0) + uniqueTypes := make(map[string]bool) + for i, r := range raw { + eType, err := NewEventType(r) + if err != nil { + return nil, fmt.Errorf("error at index %d: %w", i, err) + } + + if !uniqueTypes[eType.Flow()] { + uniqueTypes[eType.Flow()] = true + eventTypes = append(eventTypes, eType) + } + } + + return eventTypes, nil +} + +func (e EventTypes) Flow() []string { + eventTypes := make([]string, len(e)) + for j, eType := range e { + eventTypes[j] = eType.Flow() + } + return eventTypes +} diff --git a/engine/access/rest/request/id.go b/engine/access/rest/common/parser/id.go similarity index 98% rename from engine/access/rest/request/id.go rename to engine/access/rest/common/parser/id.go index ba3c1200527..7b1436b4761 100644 --- a/engine/access/rest/request/id.go +++ b/engine/access/rest/common/parser/id.go @@ -1,4 +1,4 @@ -package request +package parser import ( "errors" diff --git a/engine/access/rest/request/id_test.go b/engine/access/rest/common/parser/id_test.go similarity index 98% rename from engine/access/rest/request/id_test.go rename to engine/access/rest/common/parser/id_test.go index 1096fdbe696..a663c915e7a 100644 --- a/engine/access/rest/request/id_test.go +++ b/engine/access/rest/common/parser/id_test.go @@ -1,4 +1,4 @@ -package request +package parser import ( "testing" diff --git a/engine/access/rest/common/parser/proposal_key.go b/engine/access/rest/common/parser/proposal_key.go new file mode 100644 index 00000000000..26aae6a7e50 --- /dev/null +++ b/engine/access/rest/common/parser/proposal_key.go @@ -0,0 +1,39 @@ +package parser + +import ( + "fmt" + + "github.com/onflow/flow-go/engine/access/rest/common/models" + "github.com/onflow/flow-go/engine/access/rest/util" + "github.com/onflow/flow-go/model/flow" +) + +type ProposalKey flow.ProposalKey + +func (p *ProposalKey) Parse(raw models.ProposalKey, chain flow.Chain) error { + address, err := ParseAddress(raw.Address, chain) + if err != nil { + return err + } + + keyIndex, err := util.ToUint32(raw.KeyIndex) + if err != nil { + return fmt.Errorf("invalid key index: %w", err) + } + + seqNumber, err := util.ToUint64(raw.SequenceNumber) + if err != nil { + return fmt.Errorf("invalid sequence number: %w", err) + } + + *p = ProposalKey(flow.ProposalKey{ + Address: address, + KeyIndex: keyIndex, + SequenceNumber: seqNumber, + }) + return nil +} + +func (p ProposalKey) Flow() flow.ProposalKey { + return flow.ProposalKey(p) +} diff --git a/engine/access/rest/common/parser/signature_test.go b/engine/access/rest/common/parser/signature_test.go new file mode 100644 index 00000000000..1bd22c4a1ca --- /dev/null +++ b/engine/access/rest/common/parser/signature_test.go @@ -0,0 +1,105 @@ +package parser + +import ( + "encoding/hex" + "fmt" + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/onflow/flow-go/engine/access/rest/common/models" + "github.com/onflow/flow-go/engine/access/rest/util" + "github.com/onflow/flow-go/model/flow" +) + +func TestSignature_InvalidParse(t *testing.T) { + var signature Signature + + tests := []struct { + in string + err string + }{ + {"s", "invalid encoding"}, + {"", "missing value"}, + } + + for _, test := range tests { + err := signature.Parse(test.in) + assert.EqualError(t, err, test.err) + } + +} + +func TestSignature_ValidParse(t *testing.T) { + var signature Signature + err := signature.Parse("Mzg0MTQ5ODg4ZTg4MjRmYjMyNzM4MmM2ZWQ4ZjNjZjk1ODRlNTNlMzk4NGNhMDAxZmZjMjgwNzM4NmM0MzY3NTYxNmYwMTAwMTMzNDVkNjhmNzZkMmQ5YTBkYmI1MDA0MmEzOWRlOThlYzAzNTJjYTBkZWY3YjBlNjQ0YWJjOTQ=") + assert.NoError(t, err) + // todo test values +} + +func TestTransactionSignature_ValidParse(t *testing.T) { + var txSignature TransactionSignature + addr := "01cf0e2f2f715450" + sig := "c83665f5212fad065cd27d370ef80e5fbdd20cd57411af5c76076a15dced05ac6e6d9afa88cd7337bf9c869f6785ecc1c568ca593a99dfeec14e024c0cd78289" + sigHex, _ := hex.DecodeString(sig) + encodedSig := util.ToBase64(sigHex) + + extData := "f899a59caaf435410c08def0ecabfe372f7820ef71fb22bb82a0c8875499682d3a268e014e2e2254b8717b226368616c6c656e6765223a22306a7574344b2d334b696947307437432d776159767259447a6474765849594b323553414637767a5f4e493d222c226f726967696e223a2268747470733a2f2f74657374696e672e636f6d222c2274797065223a22776562617574686e2e676574227d" + extHex, _ := hex.DecodeString(extData) + encodedExt := util.ToBase64(extHex) + + err := txSignature.Parse(addr, "0", encodedSig, encodedExt, flow.Localnet.Chain()) + + assert.NoError(t, err) + assert.Equal(t, addr, txSignature.Address.String()) + assert.Equal(t, 0, txSignature.SignerIndex) + assert.Equal(t, uint32(0), txSignature.KeyIndex) + assert.Equal(t, sig, fmt.Sprintf("%x", txSignature.Signature)) +} + +func TestTransactionSignatures_ValidParse(t *testing.T) { + tests := []struct { + inAddresses []string + inSigs []string + inExtData []string + }{ + { + []string{"01cf0e2f2f715450"}, + []string{"c83665f5212fad065cd27d370ef80e5fbdd20cd57411af5c76076a15dced05ac6e6d9afa88cd7337bf9c869f6785ecc1c568ca593a99dfeec14e024c0cd78289"}, + []string{"f899a59caaf435410c08def0ecabfe372f7820ef71fb22bb82a0c8875499682d3a268e014e2e2254b8717b226368616c6c656e6765223a22306a7574344b2d334b696947307437432d776159767259447a6474765849594b323553414637767a5f4e493d222c226f726967696e223a2268747470733a2f2f74657374696e672e636f6d222c2274797065223a22776562617574686e2e676574227d"}, + }, + { + []string{"ee82856bf20e2aa6", "e03daebed8ca0615"}, + []string{"223665f5212fad065cd27d370ef80e5fbdd20cd57411af5c76076a15dced05ac6e6d9afa88cd7337bf9c869f6785ecc1c568ca593a99dfeec14e024c0cd78289", "5553665f5212fad065cd27d370ef80e5fbdd20cd57411af5c76076a15dced05ac6e6d9afa88cd7337bf9c869f6785ecc1c568ca593a99dfeec14e024c0cd7822"}, + []string{"", ""}, + }, + } + + var txSigantures TransactionSignatures + chain := flow.Localnet.Chain() + for _, test := range tests { + sigs := make([]models.TransactionSignature, len(test.inAddresses)) + for i, a := range test.inAddresses { + sigHex, _ := hex.DecodeString(test.inSigs[i]) + encodedSig := util.ToBase64(sigHex) + extHex, _ := hex.DecodeString(test.inExtData[i]) + encodedExt := util.ToBase64(extHex) + sigs[i].Signature = encodedSig + sigs[i].ExtensionData = encodedExt + sigs[i].KeyIndex = "0" + sigs[i].Address = a + } + + err := txSigantures.Parse(sigs, chain) + assert.NoError(t, err) + + assert.Equal(t, len(txSigantures), len(sigs)) + for i, sig := range sigs { + assert.Equal(t, sig.Address, txSigantures[i].Address.String()) + assert.Equal(t, 0, txSigantures[i].SignerIndex) + assert.Equal(t, uint32(0), txSigantures[i].KeyIndex) + assert.Equal(t, test.inSigs[i], fmt.Sprintf("%x", txSigantures[i].Signature)) + assert.Equal(t, test.inExtData[i], fmt.Sprintf("%x", txSigantures[i].ExtensionData)) + } + } +} diff --git a/engine/access/rest/common/parser/signatures.go b/engine/access/rest/common/parser/signatures.go new file mode 100644 index 00000000000..1c509427bb3 --- /dev/null +++ b/engine/access/rest/common/parser/signatures.go @@ -0,0 +1,116 @@ +package parser + +import ( + "fmt" + + "github.com/onflow/flow-go/engine/access/rest/common/models" + "github.com/onflow/flow-go/engine/access/rest/util" + "github.com/onflow/flow-go/model/flow" +) + +type TransactionSignature flow.TransactionSignature + +func (s *TransactionSignature) Parse( + rawAddress string, + rawKeyIndex string, + rawSignature string, + rawExtensionData string, + chain flow.Chain, +) error { + address, err := ParseAddress(rawAddress, chain) + if err != nil { + return err + } + + keyIndex, err := util.ToUint32(rawKeyIndex) + if err != nil { + return fmt.Errorf("invalid key index: %w", err) + } + + var signature Signature + err = signature.Parse(rawSignature) + if err != nil { + return fmt.Errorf("invalid signature: %w", err) + } + + var extensionData ExtensionData + err = extensionData.Parse(rawExtensionData) + if err != nil { + return fmt.Errorf("invalid extension data: %w", err) + } + + *s = TransactionSignature(flow.TransactionSignature{ + Address: address, + KeyIndex: keyIndex, + Signature: signature, + ExtensionData: extensionData, + }) + + return nil +} + +func (s TransactionSignature) Flow() flow.TransactionSignature { + return flow.TransactionSignature(s) +} + +type TransactionSignatures []TransactionSignature + +func (t *TransactionSignatures) Parse(rawSigs []models.TransactionSignature, chain flow.Chain) error { + signatures := make([]TransactionSignature, len(rawSigs)) + for i, sig := range rawSigs { + var signature TransactionSignature + err := signature.Parse(sig.Address, sig.KeyIndex, sig.Signature, sig.ExtensionData, chain) + if err != nil { + return err + } + signatures[i] = signature + } + + *t = signatures + return nil +} + +func (t TransactionSignatures) Flow() []flow.TransactionSignature { + sigs := make([]flow.TransactionSignature, len(t)) + for i, sig := range t { + sigs[i] = sig.Flow() + } + return sigs +} + +type Signature []byte + +func (s *Signature) Parse(raw string) error { + if raw == "" { + return fmt.Errorf("missing value") + } + + signatureBytes, err := util.FromBase64(raw) + if err != nil { + return fmt.Errorf("invalid encoding") + } + + *s = signatureBytes + return nil +} + +func (s Signature) Flow() []byte { + return s +} + +type ExtensionData []byte + +func (s *ExtensionData) Parse(raw string) error { + // Allow empty + extensionDataBytes, err := util.FromBase64(raw) + if err != nil { + return fmt.Errorf("invalid encoding") + } + + *s = extensionDataBytes + return nil +} + +func (s ExtensionData) Flow() []byte { + return s +} diff --git a/engine/access/rest/common/parser/transaction.go b/engine/access/rest/common/parser/transaction.go new file mode 100644 index 00000000000..c7a44ec2a93 --- /dev/null +++ b/engine/access/rest/common/parser/transaction.go @@ -0,0 +1,133 @@ +package parser + +import ( + "fmt" + "io" + + "github.com/onflow/flow-go/engine/access/rest/common" + "github.com/onflow/flow-go/engine/access/rest/http/models" + "github.com/onflow/flow-go/engine/access/rest/util" + "github.com/onflow/flow-go/engine/common/rpc/convert" + "github.com/onflow/flow-go/model/flow" +) + +const maxAuthorizers = 100 + +type Transaction flow.TransactionBody + +func (t *Transaction) Parse(raw io.Reader, chain flow.Chain) error { + var tx models.TransactionsBody + err := common.ParseBody(raw, &tx) + if err != nil { + return err + } + + if tx.ProposalKey == nil { + return fmt.Errorf("proposal key not provided") + } + if tx.Script == "" { + return fmt.Errorf("script not provided") + } + if tx.Payer == "" { + return fmt.Errorf("payer not provided") + } + if len(tx.Authorizers) > maxAuthorizers { + return fmt.Errorf("too many authorizers. Maximum authorizers allowed: %d", maxAuthorizers) + } + if len(tx.Arguments) > MaxAllowedScriptArguments { + return fmt.Errorf("too many arguments. Maximum arguments allowed: %d", MaxAllowedScriptArguments) + } + if tx.ReferenceBlockId == "" { + return fmt.Errorf("reference block not provided") + } + if len(tx.EnvelopeSignatures) == 0 { + return fmt.Errorf("envelope signatures not provided") + } + + var args Arguments + err = args.Parse(tx.Arguments) + if err != nil { + return err + } + + payer, err := ParseAddress(tx.Payer, chain) + if err != nil { + return fmt.Errorf("invalid payer: %w", err) + } + + auths := make([]flow.Address, len(tx.Authorizers)) + for i, auth := range tx.Authorizers { + a, err := ParseAddress(auth, chain) + if err != nil { + return err + } + + auths[i] = a + } + + var proposal ProposalKey + err = proposal.Parse(*tx.ProposalKey, chain) + if err != nil { + return err + } + + var payloadSigs TransactionSignatures + err = payloadSigs.Parse(tx.PayloadSignatures, chain) + if err != nil { + return err + } + + var envelopeSigs TransactionSignatures + err = envelopeSigs.Parse(tx.EnvelopeSignatures, chain) + if err != nil { + return err + } + + // script comes in as a base64 encoded string, decode base64 back to a string here + script, err := util.FromBase64(tx.Script) + if err != nil { + return fmt.Errorf("invalid transaction script encoding") + } + + var blockID ID + err = blockID.Parse(tx.ReferenceBlockId) + if err != nil { + return fmt.Errorf("invalid reference block ID: %w", err) + } + + gasLimit, err := util.ToUint64(tx.GasLimit) + if err != nil { + return fmt.Errorf("invalid gas limit: %w", err) + } + + flowTransaction, err := flow.NewTransactionBody(flow.UntrustedTransactionBody{ + ReferenceBlockID: blockID.Flow(), + Script: script, + Arguments: args.Flow(), + GasLimit: gasLimit, + ProposalKey: proposal.Flow(), + Payer: payer, + Authorizers: auths, + PayloadSignatures: payloadSigs.Flow(), + EnvelopeSignatures: envelopeSigs.Flow(), + }) + if err != nil { + return fmt.Errorf("could not construct transaction body: %w", err) + } + + // we use the gRPC method of converting the incoming transaction to a Flow transaction since + // it sets the signer_index appropriately. + entityTransaction := convert.TransactionToMessage(*flowTransaction) + flowTx, err := convert.MessageToTransaction(entityTransaction, chain) + if err != nil { + return err + } + + *t = Transaction(flowTx) + + return nil +} + +func (t Transaction) Flow() flow.TransactionBody { + return flow.TransactionBody(t) +} diff --git a/engine/access/rest/common/parser/transaction_test.go b/engine/access/rest/common/parser/transaction_test.go new file mode 100644 index 00000000000..fe13c8bb902 --- /dev/null +++ b/engine/access/rest/common/parser/transaction_test.go @@ -0,0 +1,137 @@ +package parser + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/onflow/flow-go/engine/access/rest/util" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/utils/unittest" +) + +func buildTransaction() map[string]interface{} { + tx := unittest.TransactionFixture() + tx.Arguments = [][]uint8{} + tx.PayloadSignatures = []flow.TransactionSignature{} + auth := make([]string, len(tx.Authorizers)) + for i, a := range tx.Authorizers { + auth[i] = a.String() + } + + return map[string]interface{}{ + "script": util.ToBase64(tx.Script), + "arguments": tx.Arguments, + "reference_block_id": tx.ReferenceBlockID.String(), + "gas_limit": fmt.Sprintf("%d", tx.GasLimit), + "payer": tx.Payer.String(), + "proposal_key": map[string]interface{}{ + "address": tx.ProposalKey.Address.String(), + "key_index": fmt.Sprintf("%d", tx.ProposalKey.KeyIndex), + "sequence_number": fmt.Sprintf("%d", tx.ProposalKey.SequenceNumber), + }, + "authorizers": auth, + "envelope_signatures": []map[string]interface{}{{ + "address": tx.EnvelopeSignatures[0].Address.String(), + "key_index": fmt.Sprintf("%d", tx.EnvelopeSignatures[0].KeyIndex), + "signature": util.ToBase64(tx.EnvelopeSignatures[0].Signature), + }}, + } +} + +func transactionToReader(tx map[string]interface{}) io.Reader { + res, _ := json.Marshal(tx) + return bytes.NewReader(res) +} + +func TestTransaction_InvalidParse(t *testing.T) { + tests := []struct { + inputField string + inputValue string + output string + }{ + {"script", "-1", "invalid transaction script encoding"}, + {"arguments", "-1", `request body contains an invalid value for the "arguments" field (at position 17)`}, + {"reference_block_id", "-1", "invalid reference block ID: invalid ID format"}, + {"gas_limit", "-1", "invalid gas limit: value must be an unsigned 64 bit integer"}, + {"payer", "-1", "invalid payer: invalid address"}, + {"authorizers", "-1", `request body contains an invalid value for the "authorizers" field (at position 34)`}, + {"proposal_key", "-1", `request body contains an invalid value for the "proposal_key" field (at position 288)`}, + {"envelope_signatures", "", `request body contains an invalid value for the "envelope_signatures" field (at position 75)`}, + {"envelope_signatures", "[]", `request body contains an invalid value for the "envelope_signatures" field (at position 77)`}, + } + + for _, test := range tests { + tx := buildTransaction() + tx[test.inputField] = test.inputValue + input := transactionToReader(tx) + + var transaction Transaction + err := transaction.Parse(input, flow.Testnet.Chain()) + + assert.EqualError(t, err, test.output) + } + + keyTests := []struct { + inputField string + inputValue string + output string + }{ + {"address", "-1", "invalid address"}, + {"key_index", "-1", `invalid key index: value must be an unsigned 32 bit integer`}, + {"sequence_number", "-1", "invalid sequence number: value must be an unsigned 64 bit integer"}, + } + + for _, test := range keyTests { + tx := buildTransaction() + tx["proposal_key"].(map[string]interface{})[test.inputField] = test.inputValue + input := transactionToReader(tx) + + var transaction Transaction + err := transaction.Parse(input, flow.Testnet.Chain()) + + assert.EqualError(t, err, test.output) + } + + sigTests := []struct { + inputField string + inputValue string + output string + }{ + {"address", "-1", "invalid address"}, + {"key_index", "-1", `invalid key index: value must be an unsigned 32 bit integer`}, + {"signature", "-1", "invalid signature: invalid encoding"}, + } + + for _, test := range sigTests { + tx := buildTransaction() + tx["envelope_signatures"].([]map[string]interface{})[0][test.inputField] = test.inputValue + input := transactionToReader(tx) + + var transaction Transaction + err := transaction.Parse(input, flow.Testnet.Chain()) + + assert.EqualError(t, err, test.output) + } +} + +func TestTransaction_ValidParse(t *testing.T) { + script := `access(all) fun main() {}` + tx := buildTransaction() + tx["script"] = util.ToBase64([]byte(script)) + input := transactionToReader(tx) + + var transaction Transaction + err := transaction.Parse(input, flow.Testnet.Chain()) + + assert.NoError(t, err) + assert.Equal(t, tx["payer"], transaction.Flow().Payer.String()) + assert.Equal(t, script, string(transaction.Flow().Script)) + assert.Equal(t, tx["reference_block_id"], transaction.Flow().ReferenceBlockID.String()) + assert.Equal(t, tx["gas_limit"], fmt.Sprint(transaction.Flow().GasLimit)) + assert.Equal(t, len(tx["authorizers"].([]string)), len(transaction.Flow().Authorizers)) +} diff --git a/engine/access/rest/common/request.go b/engine/access/rest/common/request.go new file mode 100644 index 00000000000..efabf0661ec --- /dev/null +++ b/engine/access/rest/common/request.go @@ -0,0 +1,87 @@ +package common + +import ( + "net/http" + "strings" + + "github.com/gorilla/mux" + + "github.com/onflow/flow-go/engine/access/rest/common/middleware" + "github.com/onflow/flow-go/model/flow" +) + +// Request a convenience wrapper around the http request to make it easy to read request query params +type Request struct { + *http.Request + ExpandFields map[string]bool + selectFields []string + Chain flow.Chain +} + +func (rd *Request) Expands(field string) bool { + return rd.ExpandFields[field] +} + +func (rd *Request) Selects() []string { + return rd.selectFields +} + +func (rd *Request) GetVar(name string) string { + vars := mux.Vars(rd.Request) + return vars[name] +} + +func (rd *Request) GetVars(name string) []string { + vars := mux.Vars(rd.Request) + return toStringArray(vars[name]) +} + +func (rd *Request) GetQueryParam(name string) string { + return rd.Request.URL.Query().Get(name) +} + +func (rd *Request) GetQueryParams(name string) []string { + param := rd.Request.URL.Query().Get(name) + return toStringArray(param) +} + +// Decorate takes http request and applies functions to produce our custom +// request object decorated with values we need +func Decorate(r *http.Request, chain flow.Chain) *Request { + decoratedReq := &Request{ + Request: r, + Chain: chain, + } + + if expandFields, found := middleware.GetFieldsToExpand(r); found { + decoratedReq.ExpandFields = SliceToMap(expandFields) + } + + if selectFields, found := middleware.GetFieldsToSelect(r); found { + decoratedReq.selectFields = selectFields + } + + return decoratedReq +} + +func toStringArray(in string) []string { + // currently, the swagger generated Go REST client is incorrectly doing a `fmt.Sprintf("%v", id)` for the id slice + // resulting in the client sending the ids in the format [id1 id2 id3...]. This is a temporary workaround to + // accommodate the client for now by doing a strings.Fields if commas are not present. + // Issue to to fix the client: https://github.com/onflow/flow/issues/698 + in = strings.TrimSuffix(in, "]") + in = strings.TrimPrefix(in, "[") + var out []string + + if len(in) == 0 { + return []string{} + } + + if strings.Contains(in, ",") { + out = strings.Split(in, ",") + } else { + out = strings.Fields(in) + } + + return out +} diff --git a/engine/access/rest/common/utils.go b/engine/access/rest/common/utils.go new file mode 100644 index 00000000000..6141c373433 --- /dev/null +++ b/engine/access/rest/common/utils.go @@ -0,0 +1,85 @@ +package common + +import ( + "encoding/json" + "errors" + "fmt" + "io" + "strings" +) + +// SliceToMap converts a slice of strings into a map where each string +// in the slice becomes a key in the map with the value set to true. +func SliceToMap(values []string) map[string]bool { + valueMap := make(map[string]bool, len(values)) + for _, v := range values { + valueMap[v] = true + } + return valueMap +} + +// ParseBody parses the input data into the destination interface and returns any decoding errors +// updated to be more user-friendly. It also checks that there is exactly one json object in the input +func ParseBody(raw io.Reader, dst interface{}) error { + dec := json.NewDecoder(raw) + dec.DisallowUnknownFields() + + err := dec.Decode(&dst) + if err != nil { + var syntaxError *json.SyntaxError + var unmarshalTypeError *json.UnmarshalTypeError + + switch { + case errors.As(err, &syntaxError): + return fmt.Errorf("request body contains badly-formed JSON (at position %d)", syntaxError.Offset) + case errors.Is(err, io.ErrUnexpectedEOF): + return fmt.Errorf("request body contains badly-formed JSON") + case errors.As(err, &unmarshalTypeError): + return fmt.Errorf("request body contains an invalid value for the %q field (at position %d)", unmarshalTypeError.Field, unmarshalTypeError.Offset) + case strings.HasPrefix(err.Error(), "json: unknown field "): + fieldName := strings.TrimPrefix(err.Error(), "json: unknown field ") + return fmt.Errorf("request body contains unknown field %s", fieldName) + case errors.Is(err, io.EOF): + return fmt.Errorf("request body must not be empty") + default: + return err + } + } + + if dst == nil { + return fmt.Errorf("request body must not be empty") + } + + // verify the request contained exactly one json object + err = dec.Decode(&struct{}{}) + if err != io.EOF { + return fmt.Errorf("request body must only contain a single JSON object") + } + + return nil +} + +// ConvertInterfaceToArrayOfStrings converts a slice of interface{} to a slice of strings. +// +// No errors are expected during normal operations. +func ConvertInterfaceToArrayOfStrings(value interface{}) ([]string, error) { + if strSlice, ok := value.([]string); ok { + return strSlice, nil + } + + interfaceSlice, ok := value.([]interface{}) + if !ok { + return nil, fmt.Errorf("value must be an array. got %T", value) + } + + result := make([]string, len(interfaceSlice)) + for i, v := range interfaceSlice { + str, ok := v.(string) + if !ok { + return nil, fmt.Errorf("value must be an array of strings. got %T", v) + } + result[i] = str + } + + return result, nil +} diff --git a/engine/access/rest/common/utils_test.go b/engine/access/rest/common/utils_test.go new file mode 100644 index 00000000000..326e24f4d37 --- /dev/null +++ b/engine/access/rest/common/utils_test.go @@ -0,0 +1,124 @@ +package common + +import ( + "fmt" + "reflect" + "strings" + "testing" + + "github.com/stretchr/testify/assert" +) + +func Test_ParseBody(t *testing.T) { + + invalid := []struct { + in string + err string + }{ + {"{f}", "request body contains badly-formed JSON (at position 2)"}, + {"foo", "request body contains badly-formed JSON (at position 2)"}, + {"", "request body must not be empty"}, + {`{"foo": "bar"`, "request body contains badly-formed JSON"}, + {`{"foo": "bar" "foo2":"bar2"}`, "request body contains badly-formed JSON (at position 15)"}, + {`{"foo":"bar"}, {}`, "request body must only contain a single JSON object"}, + {`[][]`, "request body must only contain a single JSON object"}, + } + + for i, test := range invalid { + readerIn := strings.NewReader(test.in) + var out interface{} + err := ParseBody(readerIn, out) + assert.EqualError(t, err, test.err, fmt.Sprintf("test #%d failed", i)) + } + + type body struct { + Foo string + Bar bool + Zoo uint64 + } + var b body + err := ParseBody(strings.NewReader(`{ "foo": "test", "bar": true }`), &b) + assert.NoError(t, err) + assert.Equal(t, b.Bar, true) + assert.Equal(t, b.Foo, "test") + assert.Equal(t, b.Zoo, uint64(0)) + + err = ParseBody(strings.NewReader(`{ "foo": false }`), &b) + assert.EqualError(t, err, `request body contains an invalid value for the "Foo" field (at position 14)`) +} + +func TestConvertInterfaceToArrayOfStrings(t *testing.T) { + tests := []struct { + name string + input interface{} + expect []string + expectErr bool + }{ + { + name: "Valid slice of strings", + input: []string{"a", "b", "c"}, + expect: []string{"a", "b", "c"}, + expectErr: false, + }, + { + name: "Valid slice of interfaces containing strings", + input: []interface{}{"a", "b", "c"}, + expect: []string{"a", "b", "c"}, + expectErr: false, + }, + { + name: "Empty slice", + input: []interface{}{}, + expect: []string{}, + expectErr: false, + }, + { + name: "Array contains nil value", + input: []interface{}{"a", nil, "c"}, + expect: nil, + expectErr: true, + }, + { + name: "Mixed types in slice", + input: []interface{}{"a", 123, "c"}, + expect: nil, + expectErr: true, + }, + { + name: "Non-array input", + input: 42, + expect: nil, + expectErr: true, + }, + { + name: "Nil input", + input: nil, + expect: nil, + expectErr: true, + }, + { + name: "Slice with non-string interface values", + input: []interface{}{true, false}, + expect: nil, + expectErr: true, + }, + { + name: "Slice with nested slices", + input: []interface{}{[]string{"a"}}, + expect: nil, + expectErr: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result, err := ConvertInterfaceToArrayOfStrings(tt.input) + if (err != nil) != tt.expectErr { + t.Fatalf("unexpected error status. got: %v, want error: %v", err, tt.expectErr) + } + if !reflect.DeepEqual(result, tt.expect) { + t.Fatalf("unexpected result. got: %v, want: %v", result, tt.expect) + } + }) + } +} diff --git a/engine/access/rest/events.go b/engine/access/rest/events.go deleted file mode 100644 index 2a79939bc21..00000000000 --- a/engine/access/rest/events.go +++ /dev/null @@ -1,56 +0,0 @@ -package rest - -import ( - "fmt" - - "github.com/onflow/flow-go/engine/access/rest/models" - "github.com/onflow/flow-go/engine/access/rest/request" - - "github.com/onflow/flow-go/access" -) - -const blockQueryParam = "block_ids" -const eventTypeQuery = "type" - -// GetEvents for the provided block range or list of block IDs filtered by type. -func GetEvents(r *request.Request, backend access.API, _ models.LinkGenerator) (interface{}, error) { - req, err := r.GetEventsRequest() - if err != nil { - return nil, NewBadRequestError(err) - } - - // if the request has block IDs provided then return events for block IDs - var blocksEvents models.BlocksEvents - if len(req.BlockIDs) > 0 { - events, err := backend.GetEventsForBlockIDs(r.Context(), req.Type, req.BlockIDs) - if err != nil { - return nil, err - } - - blocksEvents.Build(events) - return blocksEvents, nil - } - - // if end height is provided with special values then load the height - if req.EndHeight == request.FinalHeight || req.EndHeight == request.SealedHeight { - latest, _, err := backend.GetLatestBlockHeader(r.Context(), req.EndHeight == request.SealedHeight) - if err != nil { - return nil, err - } - - req.EndHeight = latest.Height - // special check after we resolve special height value - if req.StartHeight > req.EndHeight { - return nil, NewBadRequestError(fmt.Errorf("current retrieved end height value is lower than start height")) - } - } - - // if request provided block height range then return events for that range - events, err := backend.GetEventsForHeightRange(r.Context(), req.Type, req.StartHeight, req.EndHeight) - if err != nil { - return nil, err - } - - blocksEvents.Build(events) - return blocksEvents, nil -} diff --git a/engine/access/rest/events_test.go b/engine/access/rest/events_test.go deleted file mode 100644 index 560ca224968..00000000000 --- a/engine/access/rest/events_test.go +++ /dev/null @@ -1,218 +0,0 @@ -package rest - -import ( - "fmt" - "net/http" - "net/url" - "strings" - "testing" - "time" - - "github.com/onflow/flow-go/engine/access/rest/util" - - "github.com/onflow/flow-go/access/mock" - "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/utils/unittest" - - mocks "github.com/stretchr/testify/mock" - "github.com/stretchr/testify/require" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" -) - -func TestGetEvents(t *testing.T) { - backend := &mock.API{} - events := generateEventsMocks(backend, 5) - - allBlockIDs := make([]string, len(events)) - for i, e := range events { - allBlockIDs[i] = e.BlockID.String() - } - startHeight := fmt.Sprintf("%d", events[0].BlockHeight) - endHeight := fmt.Sprintf("%d", events[len(events)-1].BlockHeight) - - testVectors := []testVector{ - // valid - { - description: "Get events for a single block by ID", - request: getEventReq(t, "A.179b6b1cb6755e31.Foo.Bar", "", "", []string{events[0].BlockID.String()}), - expectedStatus: http.StatusOK, - expectedResponse: testBlockEventResponse([]flow.BlockEvents{events[0]}), - }, - { - description: "Get events by all block IDs", - request: getEventReq(t, "A.179b6b1cb6755e31.Foo.Bar", "", "", allBlockIDs), - expectedStatus: http.StatusOK, - expectedResponse: testBlockEventResponse(events), - }, - { - description: "Get events for height range", - request: getEventReq(t, "A.179b6b1cb6755e31.Foo.Bar", startHeight, endHeight, nil), - expectedStatus: http.StatusOK, - expectedResponse: testBlockEventResponse(events), - }, - { - description: "Get invalid - invalid height format", - request: getEventReq(t, "A.179b6b1cb6755e31.Foo.Bar", "0", "sealed", nil), - expectedStatus: http.StatusOK, - expectedResponse: testBlockEventResponse(events), - }, - // invalid - { - description: "Get invalid - missing all fields", - request: getEventReq(t, "", "", "", nil), - expectedStatus: http.StatusBadRequest, - expectedResponse: `{"code":400,"message":"must provide either block IDs or start and end height range"}`, - }, - { - description: "Get invalid - missing query event type", - request: getEventReq(t, "", "", "", []string{events[0].BlockID.String()}), - expectedStatus: http.StatusBadRequest, - expectedResponse: `{"code":400,"message":"event type must be provided"}`, - }, - { - description: "Get invalid - missing end height", - request: getEventReq(t, "A.179b6b1cb6755e31.Foo.Bar", "100", "", nil), - expectedStatus: http.StatusBadRequest, - expectedResponse: `{"code":400,"message":"must provide either block IDs or start and end height range"}`, - }, - { - description: "Get invalid - start height bigger than end height", - request: getEventReq(t, "A.179b6b1cb6755e31.Foo.Bar", "100", "50", nil), - expectedStatus: http.StatusBadRequest, - expectedResponse: `{"code":400,"message":"start height must be less than or equal to end height"}`, - }, - { - description: "Get invalid - too big interval", - request: getEventReq(t, "A.179b6b1cb6755e31.Foo.Bar", "0", "5000", nil), - expectedStatus: http.StatusBadRequest, - expectedResponse: `{"code":400,"message":"height range 5000 exceeds maximum allowed of 250"}`, - }, - { - description: "Get invalid - can not provide all params", - request: getEventReq(t, "A.179b6b1cb6755e31.Foo.Bar", "100", "120", []string{"10e782612a014b5c9c7d17994d7e67157064f3dd42fa92cd080bfb0fe22c3f71"}), - expectedStatus: http.StatusBadRequest, - expectedResponse: `{"code":400,"message":"can only provide either block IDs or start and end height range"}`, - }, - { - description: "Get invalid - invalid height format", - request: getEventReq(t, "A.179b6b1cb6755e31.Foo.Bar", "foo", "120", nil), - expectedStatus: http.StatusBadRequest, - expectedResponse: `{"code":400,"message":"invalid start height: invalid height format"}`, - }, - { - description: "Get invalid - latest block smaller than start", - request: getEventReq(t, "A.179b6b1cb6755e31.Foo.Bar", "100000", "sealed", nil), - expectedStatus: http.StatusBadRequest, - expectedResponse: `{"code":400,"message":"current retrieved end height value is lower than start height"}`, - }, - } - - for _, test := range testVectors { - t.Run(test.description, func(t *testing.T) { - assertResponse(t, test.request, test.expectedStatus, test.expectedResponse, backend) - }) - } - -} - -func getEventReq(t *testing.T, eventType string, start string, end string, blockIDs []string) *http.Request { - u, _ := url.Parse("/v1/events") - q := u.Query() - - if len(blockIDs) > 0 { - q.Add(blockQueryParam, strings.Join(blockIDs, ",")) - } - - if start != "" && end != "" { - q.Add(startHeightQueryParam, start) - q.Add(endHeightQueryParam, end) - } - - q.Add(eventTypeQuery, eventType) - - u.RawQuery = q.Encode() - - req, err := http.NewRequest("GET", u.String(), nil) - require.NoError(t, err) - - return req -} - -func generateEventsMocks(backend *mock.API, n int) []flow.BlockEvents { - events := make([]flow.BlockEvents, n) - ids := make([]flow.Identifier, n) - - for i := 0; i < n; i++ { - header := unittest.BlockHeaderFixture(unittest.WithHeaderHeight(uint64(i))) - ids[i] = header.ID() - - events[i] = unittest.BlockEventsFixture(header, 2) - - backend.Mock. - On("GetEventsForBlockIDs", mocks.Anything, mocks.Anything, []flow.Identifier{header.ID()}). - Return([]flow.BlockEvents{events[i]}, nil) - } - - backend.Mock. - On("GetEventsForBlockIDs", mocks.Anything, mocks.Anything, ids). - Return(events, nil) - - backend.Mock.On( - "GetEventsForHeightRange", - mocks.Anything, - mocks.Anything, - events[0].BlockHeight, - events[len(events)-1].BlockHeight, - ).Return(events, nil) - - latestBlock := unittest.BlockHeaderFixture() - latestBlock.Height = uint64(n - 1) - - // default not found - backend.Mock. - On("GetEventsForBlockIDs", mocks.Anything, mocks.Anything, mocks.Anything). - Return(nil, status.Error(codes.NotFound, "not found")) - - backend.Mock. - On("GetEventsForHeightRange", mocks.Anything, mocks.Anything). - Return(nil, status.Error(codes.NotFound, "not found")) - - backend.Mock. - On("GetLatestBlockHeader", mocks.Anything, true). - Return(latestBlock, flow.BlockStatusSealed, nil) - - return events -} - -func testBlockEventResponse(events []flow.BlockEvents) string { - res := make([]string, len(events)) - - for i, e := range events { - events := make([]string, len(e.Events)) - - for i, ev := range e.Events { - events[i] = fmt.Sprintf(`{ - "type": "%s", - "transaction_id": "%s", - "transaction_index": "%d", - "event_index": "%d", - "payload": "%s" - }`, ev.Type, ev.TransactionID, ev.TransactionIndex, ev.EventIndex, util.ToBase64(ev.Payload)) - } - - res[i] = fmt.Sprintf(`{ - "block_id": "%s", - "block_height": "%d", - "block_timestamp": "%s", - "events": [%s] - }`, - e.BlockID.String(), - e.BlockHeight, - e.BlockTimestamp.Format(time.RFC3339Nano), - strings.Join(events, ","), - ) - } - - return fmt.Sprintf(`[%s]`, strings.Join(res, ",")) -} diff --git a/engine/access/rest/execution_result.go b/engine/access/rest/execution_result.go deleted file mode 100644 index b0583d43b0d..00000000000 --- a/engine/access/rest/execution_result.go +++ /dev/null @@ -1,61 +0,0 @@ -package rest - -import ( - "fmt" - - "github.com/onflow/flow-go/access" - "github.com/onflow/flow-go/engine/access/rest/models" - "github.com/onflow/flow-go/engine/access/rest/request" -) - -// GetExecutionResultsByBlockIDs gets Execution Result payload by block IDs. -func GetExecutionResultsByBlockIDs(r *request.Request, backend access.API, link models.LinkGenerator) (interface{}, error) { - req, err := r.GetExecutionResultByBlockIDsRequest() - if err != nil { - return nil, NewBadRequestError(err) - } - - // for each block ID we retrieve execution result - results := make([]models.ExecutionResult, len(req.BlockIDs)) - for i, id := range req.BlockIDs { - res, err := backend.GetExecutionResultForBlockID(r.Context(), id) - if err != nil { - return nil, err - } - - var response models.ExecutionResult - err = response.Build(res, link) - if err != nil { - return nil, err - } - results[i] = response - } - - return results, nil -} - -// GetExecutionResultByID gets execution result by the ID. -func GetExecutionResultByID(r *request.Request, backend access.API, link models.LinkGenerator) (interface{}, error) { - req, err := r.GetExecutionResultRequest() - if err != nil { - return nil, NewBadRequestError(err) - } - - res, err := backend.GetExecutionResultByID(r.Context(), req.ID) - if err != nil { - return nil, err - } - - if res == nil { - err := fmt.Errorf("execution result with ID: %s not found", req.ID.String()) - return nil, NewNotFoundError(err.Error(), err) - } - - var response models.ExecutionResult - err = response.Build(res, link) - if err != nil { - return nil, err - } - - return response, nil -} diff --git a/engine/access/rest/handler.go b/engine/access/rest/handler.go deleted file mode 100644 index 028176fc9e0..00000000000 --- a/engine/access/rest/handler.go +++ /dev/null @@ -1,169 +0,0 @@ -package rest - -import ( - "encoding/json" - "errors" - "fmt" - "net/http" - - "github.com/onflow/flow-go/engine/access/rest/models" - "github.com/onflow/flow-go/engine/access/rest/request" - "github.com/onflow/flow-go/engine/access/rest/util" - fvmErrors "github.com/onflow/flow-go/fvm/errors" - "github.com/onflow/flow-go/model/flow" - - "github.com/rs/zerolog" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" - - "github.com/onflow/flow-go/access" -) - -const MaxRequestSize = 2 << 20 // 2MB - -// ApiHandlerFunc is a function that contains endpoint handling logic, -// it fetches necessary resources and returns an error or response model. -type ApiHandlerFunc func( - r *request.Request, - backend access.API, - generator models.LinkGenerator, -) (interface{}, error) - -// Handler is custom http handler implementing custom handler function. -// Handler function allows easier handling of errors and responses as it -// wraps functionality for handling error and responses outside of endpoint handling. -type Handler struct { - logger zerolog.Logger - backend access.API - linkGenerator models.LinkGenerator - apiHandlerFunc ApiHandlerFunc - chain flow.Chain -} - -func NewHandler( - logger zerolog.Logger, - backend access.API, - handlerFunc ApiHandlerFunc, - generator models.LinkGenerator, - chain flow.Chain, -) *Handler { - return &Handler{ - logger: logger, - backend: backend, - apiHandlerFunc: handlerFunc, - linkGenerator: generator, - chain: chain, - } -} - -// ServerHTTP function acts as a wrapper to each request providing common handling functionality -// such as logging, error handling, request decorators -func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) { - // create a logger - errLog := h.logger.With().Str("request_url", r.URL.String()).Logger() - - // limit requested body size - r.Body = http.MaxBytesReader(w, r.Body, MaxRequestSize) - err := r.ParseForm() - if err != nil { - h.errorHandler(w, err, errLog) - return - } - - // create request decorator with parsed values - decoratedRequest := request.Decorate(r, h.chain) - - // execute handler function and check for error - response, err := h.apiHandlerFunc(decoratedRequest, h.backend, h.linkGenerator) - if err != nil { - h.errorHandler(w, err, errLog) - return - } - - // apply the select filter if any select fields have been specified - response, err = util.SelectFilter(response, decoratedRequest.Selects()) - if err != nil { - h.errorHandler(w, err, errLog) - return - } - - // write response to response stream - h.jsonResponse(w, http.StatusOK, response, errLog) -} - -func (h *Handler) errorHandler(w http.ResponseWriter, err error, errorLogger zerolog.Logger) { - // rest status type error should be returned with status and user message provided - var statusErr StatusError - if errors.As(err, &statusErr) { - h.errorResponse(w, statusErr.Status(), statusErr.UserMessage(), errorLogger) - return - } - - // handle cadence errors - cadenceError := fvmErrors.Find(err, fvmErrors.ErrCodeCadenceRunTimeError) - if cadenceError != nil { - msg := fmt.Sprintf("Cadence error: %s", cadenceError.Error()) - h.errorResponse(w, http.StatusBadRequest, msg, errorLogger) - return - } - - // handle grpc status error returned from the backend calls, we are forwarding the message to the client - if se, ok := status.FromError(err); ok { - if se.Code() == codes.NotFound { - msg := fmt.Sprintf("Flow resource not found: %s", se.Message()) - h.errorResponse(w, http.StatusNotFound, msg, errorLogger) - return - } - if se.Code() == codes.InvalidArgument { - msg := fmt.Sprintf("Invalid Flow argument: %s", se.Message()) - h.errorResponse(w, http.StatusBadRequest, msg, errorLogger) - return - } - if se.Code() == codes.Internal { - msg := fmt.Sprintf("Invalid Flow request: %s", se.Message()) - h.errorResponse(w, http.StatusBadRequest, msg, errorLogger) - return - } - } - - // stop going further - catch all error - msg := "internal server error" - errorLogger.Error().Err(err).Msg(msg) - h.errorResponse(w, http.StatusInternalServerError, msg, errorLogger) -} - -// jsonResponse builds a JSON response and send it to the client -func (h *Handler) jsonResponse(w http.ResponseWriter, code int, response interface{}, errLogger zerolog.Logger) { - w.Header().Set("Content-Type", "application/json; charset=UTF-8") - - // serialize response to JSON and handler errors - encodedResponse, err := json.MarshalIndent(response, "", "\t") - if err != nil { - w.WriteHeader(http.StatusInternalServerError) - errLogger.Error().Err(err).Str("response", string(encodedResponse)).Msg("failed to indent response") - return - } - - w.WriteHeader(code) - // write response to response stream - _, err = w.Write(encodedResponse) - if err != nil { - errLogger.Error().Err(err).Str("response", string(encodedResponse)).Msg("failed to write http response") - } -} - -// errorResponse sends an HTTP error response to the client with the given return code -// and a model error with the given response message in the response body -func (h *Handler) errorResponse( - w http.ResponseWriter, - returnCode int, - responseMessage string, - logger zerolog.Logger, -) { - // create error response model - modelError := models.ModelError{ - Code: int32(returnCode), - Message: responseMessage, - } - h.jsonResponse(w, returnCode, modelError, logger) -} diff --git a/engine/access/rest/http/handler.go b/engine/access/rest/http/handler.go new file mode 100644 index 00000000000..eb634806f16 --- /dev/null +++ b/engine/access/rest/http/handler.go @@ -0,0 +1,80 @@ +package http + +import ( + "net/http" + + "github.com/rs/zerolog" + + "github.com/onflow/flow-go/access" + "github.com/onflow/flow-go/engine/access/rest/common" + "github.com/onflow/flow-go/engine/access/rest/common/models" + "github.com/onflow/flow-go/engine/access/rest/util" + "github.com/onflow/flow-go/model/flow" +) + +// ApiHandlerFunc is a function that contains endpoint handling logic, +// it fetches necessary resources and returns an error or response model. +type ApiHandlerFunc func( + r *common.Request, + backend access.API, + generator models.LinkGenerator, +) (interface{}, error) + +// Handler is custom http handler implementing custom handler function. +// Handler function allows easier handling of errors and responses as it +// wraps functionality for handling error and responses outside of endpoint handling. +type Handler struct { + *common.HttpHandler + backend access.API + linkGenerator models.LinkGenerator + apiHandlerFunc ApiHandlerFunc +} + +func NewHandler( + logger zerolog.Logger, + backend access.API, + handlerFunc ApiHandlerFunc, + generator models.LinkGenerator, + chain flow.Chain, + maxRequestSize int64, + maxResponseSize int64, +) *Handler { + handler := &Handler{ + backend: backend, + apiHandlerFunc: handlerFunc, + linkGenerator: generator, + HttpHandler: common.NewHttpHandler(logger, chain, maxRequestSize, maxResponseSize), + } + + return handler +} + +// ServerHTTP function acts as a wrapper to each request providing common handling functionality +// such as logging, error handling, request decorators +func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) { + // create a logger + errLog := h.Logger.With().Str("request_url", r.URL.String()).Logger() + + err := h.VerifyRequest(w, r) + if err != nil { + return + } + decoratedRequest := common.Decorate(r, h.Chain) + + // execute handler function and check for error + response, err := h.apiHandlerFunc(decoratedRequest, h.backend, h.linkGenerator) + if err != nil { + h.ErrorHandler(w, err, errLog) + return + } + + // apply the select filter if any select fields have been specified + response, err = util.SelectFilter(response, decoratedRequest.Selects()) + if err != nil { + h.ErrorHandler(w, err, errLog) + return + } + + // write response to response stream + h.JsonResponse(w, http.StatusOK, response, errLog) +} diff --git a/engine/access/rest/http/models/account.go b/engine/access/rest/http/models/account.go new file mode 100644 index 00000000000..7f43b257d81 --- /dev/null +++ b/engine/access/rest/http/models/account.go @@ -0,0 +1,86 @@ +package models + +import ( + "github.com/onflow/flow-go/engine/access/rest/common/models" + "github.com/onflow/flow-go/engine/access/rest/util" + "github.com/onflow/flow-go/model/flow" +) + +const expandableKeys = "keys" +const expandableContracts = "contracts" + +func (a *Account) Build(flowAccount *flow.Account, link models.LinkGenerator, expand map[string]bool) error { + a.Address = flowAccount.Address.String() + a.Balance = util.FromUint(flowAccount.Balance) + a.Expandable = &AccountExpandable{} + + if expand[expandableKeys] { + var keys AccountKeys + keys.Build(flowAccount.Keys) + a.Keys = keys + } else { + a.Expandable.Keys = expandableKeys + } + + if expand[expandableContracts] { + contracts := make(map[string]string, len(flowAccount.Contracts)) + for name, code := range flowAccount.Contracts { + contracts[name] = util.ToBase64(code) + } + a.Contracts = contracts + } else { + a.Expandable.Contracts = expandableContracts + } + + var self models.Links + err := self.Build(link.AccountLink(a.Address)) + if err != nil { + return err + } + a.Links = &self + + return nil +} + +func (a *AccountPublicKey) Build(k flow.AccountPublicKey) { + sigAlgo := SigningAlgorithm(k.SignAlgo.String()) + hashAlgo := HashingAlgorithm(k.HashAlgo.String()) + + a.Index = util.FromUint(k.Index) + a.PublicKey = k.PublicKey.String() + a.SigningAlgorithm = &sigAlgo + a.HashingAlgorithm = &hashAlgo + a.SequenceNumber = util.FromUint(k.SeqNumber) + a.Weight = util.FromUint(uint64(k.Weight)) + a.Revoked = k.Revoked +} + +type AccountKeys []AccountPublicKey + +func (a *AccountKeys) Build(accountKeys []flow.AccountPublicKey) { + keys := make([]AccountPublicKey, len(accountKeys)) + for i, k := range accountKeys { + var key AccountPublicKey + key.Build(k) + keys[i] = key + } + + *a = keys +} + +// Build function use model AccountPublicKeys type for GetAccountKeys call +// AccountPublicKeys is an auto-generated type from the openapi spec +func (a *AccountPublicKeys) Build(accountKeys []flow.AccountPublicKey) { + keys := make([]AccountPublicKey, len(accountKeys)) + for i, k := range accountKeys { + var key AccountPublicKey + key.Build(k) + keys[i] = key + } + + a.Keys = keys +} + +func (b *AccountBalance) Build(balance uint64) { + b.Balance = util.FromUint(balance) +} diff --git a/engine/access/rest/models/model_account.go b/engine/access/rest/http/models/model_account.go similarity index 81% rename from engine/access/rest/models/model_account.go rename to engine/access/rest/http/models/model_account.go index bca079b86b2..822a059d965 100644 --- a/engine/access/rest/models/model_account.go +++ b/engine/access/rest/http/models/model_account.go @@ -8,6 +8,8 @@ */ package models +import "github.com/onflow/flow-go/engine/access/rest/common/models" + type Account struct { Address string `json:"address"` // Flow balance of the account. @@ -15,5 +17,5 @@ type Account struct { Keys []AccountPublicKey `json:"keys,omitempty"` Contracts map[string]string `json:"contracts,omitempty"` Expandable *AccountExpandable `json:"_expandable"` - Links *Links `json:"_links,omitempty"` + Links *models.Links `json:"_links,omitempty"` } diff --git a/engine/access/rest/models/model_account__expandable.go b/engine/access/rest/http/models/model_account__expandable.go similarity index 100% rename from engine/access/rest/models/model_account__expandable.go rename to engine/access/rest/http/models/model_account__expandable.go diff --git a/engine/access/rest/http/models/model_account_balance.go b/engine/access/rest/http/models/model_account_balance.go new file mode 100644 index 00000000000..7449e24d4cd --- /dev/null +++ b/engine/access/rest/http/models/model_account_balance.go @@ -0,0 +1,14 @@ +/* + * Access API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 1.0.0 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ +package models + +type AccountBalance struct { + // Flow balance of the account. + Balance string `json:"balance"` +} diff --git a/engine/access/rest/models/model_account_public_key.go b/engine/access/rest/http/models/model_account_public_key.go similarity index 100% rename from engine/access/rest/models/model_account_public_key.go rename to engine/access/rest/http/models/model_account_public_key.go diff --git a/engine/access/rest/http/models/model_account_public_keys.go b/engine/access/rest/http/models/model_account_public_keys.go new file mode 100644 index 00000000000..81bc0adbcb6 --- /dev/null +++ b/engine/access/rest/http/models/model_account_public_keys.go @@ -0,0 +1,13 @@ +/* + * Access API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 1.0.0 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ +package models + +type AccountPublicKeys struct { + Keys []AccountPublicKey `json:"keys"` +} diff --git a/engine/access/rest/http/models/model_compatible_range.go b/engine/access/rest/http/models/model_compatible_range.go new file mode 100644 index 00000000000..1937299f7dd --- /dev/null +++ b/engine/access/rest/http/models/model_compatible_range.go @@ -0,0 +1,15 @@ +/* + * Access API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 1.0.0 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ +package models + +// A compatible version range. +type CompatibleRange struct { + StartHeight string `json:"start_height"` + EndHeight string `json:"end_height"` +} diff --git a/engine/access/rest/models/model_hashing_algorithm.go b/engine/access/rest/http/models/model_hashing_algorithm.go similarity index 100% rename from engine/access/rest/models/model_hashing_algorithm.go rename to engine/access/rest/http/models/model_hashing_algorithm.go diff --git a/engine/access/rest/models/model_network_parameters.go b/engine/access/rest/http/models/model_network_parameters.go similarity index 100% rename from engine/access/rest/models/model_network_parameters.go rename to engine/access/rest/http/models/model_network_parameters.go diff --git a/engine/access/rest/http/models/model_node_version_info.go b/engine/access/rest/http/models/model_node_version_info.go new file mode 100644 index 00000000000..a010fb0df1d --- /dev/null +++ b/engine/access/rest/http/models/model_node_version_info.go @@ -0,0 +1,20 @@ +/* + * Access API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 1.0.0 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ +package models + +type NodeVersionInfo struct { + Semver string `json:"semver"` + Commit string `json:"commit"` + SporkId string `json:"spork_id"` + ProtocolVersion string `json:"protocol_version"` + ProtocolStateVersion string `json:"protocol_state_version"` + SporkRootBlockHeight string `json:"spork_root_block_height"` + NodeRootBlockHeight string `json:"node_root_block_height"` + CompatibleRange *CompatibleRange `json:"compatible_range,omitempty"` +} diff --git a/engine/access/rest/models/model_signing_algorithm.go b/engine/access/rest/http/models/model_signing_algorithm.go similarity index 100% rename from engine/access/rest/models/model_signing_algorithm.go rename to engine/access/rest/http/models/model_signing_algorithm.go diff --git a/engine/access/rest/http/models/model_transactions_body.go b/engine/access/rest/http/models/model_transactions_body.go new file mode 100644 index 00000000000..59ad8318f61 --- /dev/null +++ b/engine/access/rest/http/models/model_transactions_body.go @@ -0,0 +1,26 @@ +/* + * Access API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 1.0.0 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ +package models + +import "github.com/onflow/flow-go/engine/access/rest/common/models" + +type TransactionsBody struct { + // Base64 encoded content of the Cadence script. + Script string `json:"script"` + // A list of arguments each encoded as Base64 passed in the [JSON-Cadence interchange format](https://docs.onflow.org/cadence/json-cadence-spec/). + Arguments []string `json:"arguments"` + ReferenceBlockId string `json:"reference_block_id"` + // The limit on the amount of computation a transaction is allowed to preform. + GasLimit string `json:"gas_limit"` + Payer string `json:"payer"` + ProposalKey *models.ProposalKey `json:"proposal_key"` + Authorizers []string `json:"authorizers"` + PayloadSignatures []models.TransactionSignature `json:"payload_signatures"` + EnvelopeSignatures []models.TransactionSignature `json:"envelope_signatures"` +} diff --git a/engine/access/rest/http/models/network.go b/engine/access/rest/http/models/network.go new file mode 100644 index 00000000000..66f42d3edb5 --- /dev/null +++ b/engine/access/rest/http/models/network.go @@ -0,0 +1,9 @@ +package models + +import ( + accessmodel "github.com/onflow/flow-go/model/access" +) + +func (t *NetworkParameters) Build(params *accessmodel.NetworkParameters) { + t.ChainId = params.ChainID.String() +} diff --git a/engine/access/rest/http/models/node_version_info.go b/engine/access/rest/http/models/node_version_info.go new file mode 100644 index 00000000000..7209fb9f076 --- /dev/null +++ b/engine/access/rest/http/models/node_version_info.go @@ -0,0 +1,23 @@ +package models + +import ( + "github.com/onflow/flow-go/engine/access/rest/util" + accessmodel "github.com/onflow/flow-go/model/access" +) + +func (t *NodeVersionInfo) Build(params *accessmodel.NodeVersionInfo) { + t.Semver = params.Semver + t.Commit = params.Commit + t.SporkId = params.SporkId.String() + t.ProtocolStateVersion = util.FromUint(params.ProtocolStateVersion) + t.ProtocolVersion = util.FromUint(params.ProtocolVersion) + t.SporkRootBlockHeight = util.FromUint(params.SporkRootBlockHeight) + t.NodeRootBlockHeight = util.FromUint(params.NodeRootBlockHeight) + + if params.CompatibleRange != nil { + t.CompatibleRange = &CompatibleRange{ + StartHeight: util.FromUint(params.CompatibleRange.StartHeight), + EndHeight: util.FromUint(params.CompatibleRange.EndHeight), + } + } +} diff --git a/engine/access/rest/http/request/create_transaction.go b/engine/access/rest/http/request/create_transaction.go new file mode 100644 index 00000000000..d3eaa55c527 --- /dev/null +++ b/engine/access/rest/http/request/create_transaction.go @@ -0,0 +1,34 @@ +package request + +import ( + "io" + + "github.com/onflow/flow-go/engine/access/rest/common" + "github.com/onflow/flow-go/engine/access/rest/common/parser" + "github.com/onflow/flow-go/model/flow" +) + +type CreateTransaction struct { + Transaction flow.TransactionBody +} + +func CreateTransactionRequest(r *common.Request) (CreateTransaction, error) { + var req CreateTransaction + err := req.Build(r) + return req, err +} + +func (c *CreateTransaction) Build(r *common.Request) error { + return c.Parse(r.Body, r.Chain) +} + +func (c *CreateTransaction) Parse(rawTransaction io.Reader, chain flow.Chain) error { + var tx parser.Transaction + err := tx.Parse(rawTransaction, chain) + if err != nil { + return err + } + + c.Transaction = tx.Flow() + return nil +} diff --git a/engine/access/rest/http/request/get_account.go b/engine/access/rest/http/request/get_account.go new file mode 100644 index 00000000000..52c2a9c1bf0 --- /dev/null +++ b/engine/access/rest/http/request/get_account.go @@ -0,0 +1,56 @@ +package request + +import ( + "github.com/onflow/flow-go/engine/access/rest/common" + "github.com/onflow/flow-go/engine/access/rest/common/parser" + "github.com/onflow/flow-go/model/flow" +) + +const addressVar = "address" +const blockHeightQuery = "block_height" + +type GetAccount struct { + Address flow.Address + Height uint64 +} + +// GetAccountRequest extracts necessary variables and query parameters from the provided request, +// builds a GetAccount instance, and validates it. +// +// No errors are expected during normal operation. +func GetAccountRequest(r *common.Request) (GetAccount, error) { + var req GetAccount + err := req.Build(r) + return req, err +} + +func (g *GetAccount) Build(r *common.Request) error { + return g.Parse( + r.GetVar(addressVar), + r.GetQueryParam(blockHeightQuery), + r.Chain, + ) +} + +func (g *GetAccount) Parse(rawAddress string, rawHeight string, chain flow.Chain) error { + address, err := parser.ParseAddress(rawAddress, chain) + if err != nil { + return err + } + + var height Height + err = height.Parse(rawHeight) + if err != nil { + return err + } + + g.Address = address + g.Height = height.Flow() + + // default to last block + if g.Height == EmptyHeight { + g.Height = SealedHeight + } + + return nil +} diff --git a/engine/access/rest/http/request/get_account_balance.go b/engine/access/rest/http/request/get_account_balance.go new file mode 100644 index 00000000000..3a8cb0b6fb5 --- /dev/null +++ b/engine/access/rest/http/request/get_account_balance.go @@ -0,0 +1,57 @@ +package request + +import ( + "github.com/onflow/flow-go/engine/access/rest/common" + "github.com/onflow/flow-go/engine/access/rest/common/parser" + "github.com/onflow/flow-go/model/flow" +) + +type GetAccountBalance struct { + Address flow.Address + Height uint64 +} + +// GetAccountBalanceRequest extracts necessary variables and query parameters from the provided request, +// builds a GetAccountBalance instance, and validates it. +// +// No errors are expected during normal operation. +func GetAccountBalanceRequest(r *common.Request) (GetAccountBalance, error) { + var req GetAccountBalance + err := req.Build(r) + return req, err +} + +func (g *GetAccountBalance) Build(r *common.Request) error { + return g.Parse( + r.GetVar(addressVar), + r.GetQueryParam(blockHeightQuery), + r.Chain, + ) +} + +func (g *GetAccountBalance) Parse( + rawAddress string, + rawHeight string, + chain flow.Chain, +) error { + address, err := parser.ParseAddress(rawAddress, chain) + if err != nil { + return err + } + + var height Height + err = height.Parse(rawHeight) + if err != nil { + return err + } + + g.Address = address + g.Height = height.Flow() + + // default to last block + if g.Height == EmptyHeight { + g.Height = SealedHeight + } + + return nil +} diff --git a/engine/access/rest/http/request/get_account_balance_test.go b/engine/access/rest/http/request/get_account_balance_test.go new file mode 100644 index 00000000000..4fd2b73c554 --- /dev/null +++ b/engine/access/rest/http/request/get_account_balance_test.go @@ -0,0 +1,53 @@ +package request + +import ( + "fmt" + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/onflow/flow-go/model/flow" +) + +func Test_GetAccountBalance_InvalidParse(t *testing.T) { + var getAccountBalance GetAccountBalance + + tests := []struct { + address string + height string + err string + }{ + {"", "", "invalid address"}, + {"f8d6e0586b0a20c7", "-1", "invalid height format"}, + } + + chain := flow.Localnet.Chain() + for i, test := range tests { + err := getAccountBalance.Parse(test.address, test.height, chain) + assert.EqualError(t, err, test.err, fmt.Sprintf("test #%d failed", i)) + } +} + +func Test_GetAccountBalance_ValidParse(t *testing.T) { + + var getAccountBalance GetAccountBalance + + addr := "f8d6e0586b0a20c7" + chain := flow.Localnet.Chain() + err := getAccountBalance.Parse(addr, "", chain) + assert.NoError(t, err) + assert.Equal(t, getAccountBalance.Address.String(), addr) + assert.Equal(t, getAccountBalance.Height, SealedHeight) + + err = getAccountBalance.Parse(addr, "100", chain) + assert.NoError(t, err) + assert.Equal(t, getAccountBalance.Height, uint64(100)) + + err = getAccountBalance.Parse(addr, sealed, chain) + assert.NoError(t, err) + assert.Equal(t, getAccountBalance.Height, SealedHeight) + + err = getAccountBalance.Parse(addr, final, chain) + assert.NoError(t, err) + assert.Equal(t, getAccountBalance.Height, FinalHeight) +} diff --git a/engine/access/rest/http/request/get_account_key.go b/engine/access/rest/http/request/get_account_key.go new file mode 100644 index 00000000000..79190b5980b --- /dev/null +++ b/engine/access/rest/http/request/get_account_key.go @@ -0,0 +1,71 @@ +package request + +import ( + "fmt" + + "github.com/onflow/flow-go/engine/access/rest/common" + "github.com/onflow/flow-go/engine/access/rest/common/parser" + "github.com/onflow/flow-go/engine/access/rest/util" + "github.com/onflow/flow-go/model/flow" +) + +const indexVar = "index" + +type GetAccountKey struct { + Address flow.Address + Index uint32 + Height uint64 +} + +// GetAccountKeyRequest extracts necessary variables and query parameters from the provided request, +// builds a GetAccountKey instance, and validates it. +// +// No errors are expected during normal operation. +func GetAccountKeyRequest(r *common.Request) (GetAccountKey, error) { + var req GetAccountKey + err := req.Build(r) + return req, err +} + +func (g *GetAccountKey) Build(r *common.Request) error { + return g.Parse( + r.GetVar(addressVar), + r.GetVar(indexVar), + r.GetQueryParam(blockHeightQuery), + r.Chain, + ) +} + +func (g *GetAccountKey) Parse( + rawAddress string, + rawIndex string, + rawHeight string, + chain flow.Chain, +) error { + address, err := parser.ParseAddress(rawAddress, chain) + if err != nil { + return err + } + + index, err := util.ToUint32(rawIndex) + if err != nil { + return fmt.Errorf("invalid key index: %w", err) + } + + var height Height + err = height.Parse(rawHeight) + if err != nil { + return err + } + + g.Address = address + g.Index = index + g.Height = height.Flow() + + // default to last block + if g.Height == EmptyHeight { + g.Height = SealedHeight + } + + return nil +} diff --git a/engine/access/rest/http/request/get_account_key_test.go b/engine/access/rest/http/request/get_account_key_test.go new file mode 100644 index 00000000000..82c356c038e --- /dev/null +++ b/engine/access/rest/http/request/get_account_key_test.go @@ -0,0 +1,83 @@ +package request + +import ( + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/onflow/flow-go/model/flow" +) + +func Test_GetAccountKey_InvalidParse(t *testing.T) { + var getAccountKey GetAccountKey + + tests := []struct { + name string + address string + index string + height string + err string + }{ + { + "parse with invalid address", + "0xxxaddr", + "1", + "100", + "invalid address", + }, + { + "parse with invalid keyIndex", + "0xf8d6e0586b0a20c7", + "-1.2", + "100", + "invalid key index: value must be an unsigned 32 bit integer", + }, + { + "parse with invalid height", + "0xf8d6e0586b0a20c7", + "2", + "-100", + "invalid height format", + }, + } + + chain := flow.Localnet.Chain() + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + err := getAccountKey.Parse(test.address, test.index, test.height, chain) + assert.EqualError(t, err, test.err) + }) + } +} + +func Test_GetAccountKey_ValidParse(t *testing.T) { + var getAccountKey GetAccountKey + + addr := "f8d6e0586b0a20c7" + keyIndex := "5" + height := "100" + chain := flow.Localnet.Chain() + err := getAccountKey.Parse(addr, keyIndex, height, chain) + assert.NoError(t, err) + assert.Equal(t, getAccountKey.Address.String(), addr) + assert.Equal(t, getAccountKey.Index, uint32(5)) + assert.Equal(t, getAccountKey.Height, uint64(100)) + + err = getAccountKey.Parse(addr, keyIndex, "", chain) + assert.NoError(t, err) + assert.Equal(t, getAccountKey.Address.String(), addr) + assert.Equal(t, getAccountKey.Index, uint32(5)) + assert.Equal(t, getAccountKey.Height, SealedHeight) + + err = getAccountKey.Parse(addr, keyIndex, "sealed", chain) + assert.NoError(t, err) + assert.Equal(t, getAccountKey.Address.String(), addr) + assert.Equal(t, getAccountKey.Index, uint32(5)) + assert.Equal(t, getAccountKey.Height, SealedHeight) + + err = getAccountKey.Parse(addr, keyIndex, "final", chain) + assert.NoError(t, err) + assert.Equal(t, getAccountKey.Address.String(), addr) + assert.Equal(t, getAccountKey.Index, uint32(5)) + assert.Equal(t, getAccountKey.Height, FinalHeight) +} diff --git a/engine/access/rest/http/request/get_account_keys.go b/engine/access/rest/http/request/get_account_keys.go new file mode 100644 index 00000000000..4dd75967fcb --- /dev/null +++ b/engine/access/rest/http/request/get_account_keys.go @@ -0,0 +1,57 @@ +package request + +import ( + "github.com/onflow/flow-go/engine/access/rest/common" + "github.com/onflow/flow-go/engine/access/rest/common/parser" + "github.com/onflow/flow-go/model/flow" +) + +type GetAccountKeys struct { + Address flow.Address + Height uint64 +} + +// GetAccountKeysRequest extracts necessary variables and query parameters from the provided request, +// builds a GetAccountKeys instance, and validates it. +// +// No errors are expected during normal operation. +func GetAccountKeysRequest(r *common.Request) (GetAccountKeys, error) { + var req GetAccountKeys + err := req.Build(r) + return req, err +} + +func (g *GetAccountKeys) Build(r *common.Request) error { + return g.Parse( + r.GetVar(addressVar), + r.GetQueryParam(blockHeightQuery), + r.Chain, + ) +} + +func (g *GetAccountKeys) Parse( + rawAddress string, + rawHeight string, + chain flow.Chain, +) error { + address, err := parser.ParseAddress(rawAddress, chain) + if err != nil { + return err + } + + var height Height + err = height.Parse(rawHeight) + if err != nil { + return err + } + + g.Address = address + g.Height = height.Flow() + + // default to last block + if g.Height == EmptyHeight { + g.Height = SealedHeight + } + + return nil +} diff --git a/engine/access/rest/http/request/get_account_keys_test.go b/engine/access/rest/http/request/get_account_keys_test.go new file mode 100644 index 00000000000..d37b4f82ac6 --- /dev/null +++ b/engine/access/rest/http/request/get_account_keys_test.go @@ -0,0 +1,52 @@ +package request + +import ( + "fmt" + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/onflow/flow-go/model/flow" +) + +func Test_GetAccountKeys_InvalidParse(t *testing.T) { + var getAccountKeys GetAccountKeys + + tests := []struct { + address string + height string + err string + }{ + {"", "", "invalid address"}, + {"f8d6e0586b0a20c7", "-1", "invalid height format"}, + } + + chain := flow.Localnet.Chain() + for i, test := range tests { + err := getAccountKeys.Parse(test.address, test.height, chain) + assert.EqualError(t, err, test.err, fmt.Sprintf("test #%d failed", i)) + } +} + +func Test_GetAccountKeys_ValidParse(t *testing.T) { + var getAccountKeys GetAccountKeys + + addr := "f8d6e0586b0a20c7" + chain := flow.Localnet.Chain() + err := getAccountKeys.Parse(addr, "", chain) + assert.NoError(t, err) + assert.Equal(t, getAccountKeys.Address.String(), addr) + assert.Equal(t, getAccountKeys.Height, SealedHeight) + + err = getAccountKeys.Parse(addr, "100", chain) + assert.NoError(t, err) + assert.Equal(t, getAccountKeys.Height, uint64(100)) + + err = getAccountKeys.Parse(addr, sealed, chain) + assert.NoError(t, err) + assert.Equal(t, getAccountKeys.Height, SealedHeight) + + err = getAccountKeys.Parse(addr, final, chain) + assert.NoError(t, err) + assert.Equal(t, getAccountKeys.Height, FinalHeight) +} diff --git a/engine/access/rest/http/request/get_account_test.go b/engine/access/rest/http/request/get_account_test.go new file mode 100644 index 00000000000..89b3a9d1f8b --- /dev/null +++ b/engine/access/rest/http/request/get_account_test.go @@ -0,0 +1,44 @@ +package request + +import ( + "fmt" + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/onflow/flow-go/model/flow" +) + +func Test_GetAccount_InvalidParse(t *testing.T) { + var getAccount GetAccount + + tests := []struct { + address string + height string + err string + }{ + {"", "", "invalid address"}, + {"f8d6e0586b0a20c7", "-1", "invalid height format"}, + } + + chain := flow.Localnet.Chain() + for i, test := range tests { + err := getAccount.Parse(test.address, test.height, chain) + assert.EqualError(t, err, test.err, fmt.Sprintf("test #%d failed", i)) + } +} + +func Test_GetAccount_ValidParse(t *testing.T) { + var getAccount GetAccount + + addr := "f8d6e0586b0a20c7" + chain := flow.Localnet.Chain() + err := getAccount.Parse(addr, "", chain) + assert.NoError(t, err) + assert.Equal(t, getAccount.Address.String(), addr) + assert.Equal(t, getAccount.Height, SealedHeight) + + err = getAccount.Parse(addr, "100", chain) + assert.NoError(t, err) + assert.Equal(t, getAccount.Height, uint64(100)) +} diff --git a/engine/access/rest/http/request/get_block.go b/engine/access/rest/http/request/get_block.go new file mode 100644 index 00000000000..972cd2ee97b --- /dev/null +++ b/engine/access/rest/http/request/get_block.go @@ -0,0 +1,148 @@ +package request + +import ( + "fmt" + + "github.com/onflow/flow-go/engine/access/rest/common" + "github.com/onflow/flow-go/engine/access/rest/common/parser" + "github.com/onflow/flow-go/model/flow" +) + +const heightQuery = "height" +const startHeightQuery = "start_height" +const endHeightQuery = "end_height" +const MaxBlockRequestHeightRange = 50 +const idParam = "id" + +type GetBlock struct { + Heights []uint64 + StartHeight uint64 + EndHeight uint64 + FinalHeight bool + SealedHeight bool +} + +// GetBlockRequest extracts necessary query parameters from the provided request, +// builds a GetBlock instance, and validates it. +// +// No errors are expected during normal operation. +func GetBlockRequest(r *common.Request) (GetBlock, error) { + var req GetBlock + err := req.Build(r) + return req, err +} + +func (g *GetBlock) Build(r *common.Request) error { + return g.Parse( + r.GetQueryParams(heightQuery), + r.GetQueryParam(startHeightQuery), + r.GetQueryParam(endHeightQuery), + ) +} + +func (g *GetBlock) HasHeights() bool { + return len(g.Heights) > 0 +} + +func (g *GetBlock) Parse(rawHeights []string, rawStart string, rawEnd string) error { + var height Height + err := height.Parse(rawStart) + if err != nil { + return err + } + g.StartHeight = height.Flow() + err = height.Parse(rawEnd) + if err != nil { + return err + } + g.EndHeight = height.Flow() + + var heights Heights + err = heights.Parse(rawHeights) + if err != nil { + return err + } + g.Heights = heights.Flow() + + // if both height and one or both of start and end height are provided + if len(g.Heights) > 0 && (g.StartHeight != EmptyHeight || g.EndHeight != EmptyHeight) { + return fmt.Errorf("can only provide either heights or start and end height range") + } + + // if neither height nor start and end height are provided + if len(heights) == 0 && (g.StartHeight == EmptyHeight || g.EndHeight == EmptyHeight) { + return fmt.Errorf("must provide either heights or start and end height range") + } + + if g.StartHeight > g.EndHeight { + return fmt.Errorf("start height must be less than or equal to end height") + } + // check if range exceeds maximum but only if end is not equal to special value which is not known yet + if g.EndHeight-g.StartHeight >= MaxBlockRequestHeightRange && g.EndHeight != FinalHeight && g.EndHeight != SealedHeight { + return fmt.Errorf("height range %d exceeds maximum allowed of %d", g.EndHeight-g.StartHeight, MaxBlockRequestHeightRange) + } + + if len(heights) > MaxBlockRequestHeightRange { + return fmt.Errorf("at most %d heights can be requested at a time", MaxBlockRequestHeightRange) + } + + // check that if sealed or final are used they are provided as only value as mix and matching heights with sealed is not encouraged + if len(heights) > 1 { + for _, h := range heights { + if h == Height(SealedHeight) || h == Height(FinalHeight) { + return fmt.Errorf("can not provide '%s' or '%s' values with other height values", final, sealed) + } + } + } else if len(heights) == 1 { + // if we have special values for heights set the booleans + g.FinalHeight = heights[0] == Height(FinalHeight) + g.SealedHeight = heights[0] == Height(SealedHeight) + } + + return nil +} + +type GetBlockByIDs struct { + IDs []flow.Identifier +} + +// GetBlockByIDsRequest extracts necessary variables from the provided request, +// builds a GetBlockByIDs instance, and validates it. +// +// No errors are expected during normal operation. +func GetBlockByIDsRequest(r *common.Request) (GetBlockByIDs, error) { + var req GetBlockByIDs + err := req.Build(r) + return req, err +} + +func (g *GetBlockByIDs) Build(r *common.Request) error { + return g.Parse( + r.GetVars(idParam), + ) +} + +func (g *GetBlockByIDs) Parse(rawIds []string) error { + var ids parser.IDs + err := ids.Parse(rawIds) + if err != nil { + return err + } + g.IDs = ids.Flow() + + return nil +} + +type GetBlockPayload struct { + GetByIDRequest +} + +// GetBlockPayloadRequest extracts necessary variables from the provided request, +// builds a GetBlockPayload instance, and validates it. +// +// No errors are expected during normal operation. +func GetBlockPayloadRequest(r *common.Request) (GetBlockPayload, error) { + var req GetBlockPayload + err := req.Build(r) + return req, err +} diff --git a/engine/access/rest/request/get_block_test.go b/engine/access/rest/http/request/get_block_test.go similarity index 100% rename from engine/access/rest/request/get_block_test.go rename to engine/access/rest/http/request/get_block_test.go diff --git a/engine/access/rest/http/request/get_collection.go b/engine/access/rest/http/request/get_collection.go new file mode 100644 index 00000000000..f7af68a6dcf --- /dev/null +++ b/engine/access/rest/http/request/get_collection.go @@ -0,0 +1,29 @@ +package request + +import ( + "github.com/onflow/flow-go/engine/access/rest/common" +) + +const ExpandsTransactions = "transactions" + +type GetCollection struct { + GetByIDRequest + ExpandsTransactions bool +} + +// GetCollectionRequest extracts necessary variables from the provided request, +// builds a GetCollection instance, and validates it. +// +// No errors are expected during normal operation. +func GetCollectionRequest(r *common.Request) (GetCollection, error) { + var req GetCollection + err := req.Build(r) + return req, err +} + +func (g *GetCollection) Build(r *common.Request) error { + err := g.GetByIDRequest.Build(r) + g.ExpandsTransactions = r.Expands(ExpandsTransactions) + + return err +} diff --git a/engine/access/rest/request/get_events.go b/engine/access/rest/http/request/get_events.go similarity index 77% rename from engine/access/rest/request/get_events.go rename to engine/access/rest/http/request/get_events.go index db4839343a1..dcb6eb0c15d 100644 --- a/engine/access/rest/request/get_events.go +++ b/engine/access/rest/http/request/get_events.go @@ -2,8 +2,9 @@ package request import ( "fmt" - "regexp" + "github.com/onflow/flow-go/engine/access/rest/common" + "github.com/onflow/flow-go/engine/access/rest/common/parser" "github.com/onflow/flow-go/model/flow" ) @@ -18,7 +19,17 @@ type GetEvents struct { BlockIDs []flow.Identifier } -func (g *GetEvents) Build(r *Request) error { +// GetEventsRequest extracts necessary variables from the provided request, +// builds a GetEvents instance, and validates it. +// +// No errors are expected during normal operation. +func GetEventsRequest(r *common.Request) (GetEvents, error) { + var req GetEvents + err := req.Build(r) + return req, err +} + +func (g *GetEvents) Build(r *common.Request) error { return g.Parse( r.GetQueryParam(eventTypeQuery), r.GetQueryParam(startHeightQuery), @@ -40,7 +51,7 @@ func (g *GetEvents) Parse(rawType string, rawStart string, rawEnd string, rawBlo } g.EndHeight = height.Flow() - var blockIDs IDs + var blockIDs parser.IDs err = blockIDs.Parse(rawBlockIDs) if err != nil { return err @@ -57,19 +68,15 @@ func (g *GetEvents) Parse(rawType string, rawStart string, rawEnd string, rawBlo return fmt.Errorf("must provide either block IDs or start and end height range") } - g.Type = rawType - if g.Type == "" { + if rawType == "" { return fmt.Errorf("event type must be provided") } - // match basic format A.address.contract.event (ignore err since regex will always compile) - basic, _ := regexp.MatchString(`[A-Z]\.[a-f0-9]{16}\.[\w+]*\.[\w+]*`, g.Type) - // match core events flow.event - core, _ := regexp.MatchString(`flow\.[\w]*`, g.Type) - - if !core && !basic { - return fmt.Errorf("invalid event type format") + eventType, err := parser.NewEventType(rawType) + if err != nil { + return err } + g.Type = eventType.Flow() // validate start end height option if g.StartHeight != EmptyHeight && g.EndHeight != EmptyHeight { diff --git a/engine/access/rest/request/get_events_test.go b/engine/access/rest/http/request/get_events_test.go similarity index 100% rename from engine/access/rest/request/get_events_test.go rename to engine/access/rest/http/request/get_events_test.go diff --git a/engine/access/rest/http/request/get_execution_result.go b/engine/access/rest/http/request/get_execution_result.go new file mode 100644 index 00000000000..4947cd8f07f --- /dev/null +++ b/engine/access/rest/http/request/get_execution_result.go @@ -0,0 +1,60 @@ +package request + +import ( + "fmt" + + "github.com/onflow/flow-go/engine/access/rest/common" + "github.com/onflow/flow-go/engine/access/rest/common/parser" + "github.com/onflow/flow-go/model/flow" +) + +const idQuery = "id" + +type GetExecutionResultByBlockIDs struct { + BlockIDs []flow.Identifier +} + +// GetExecutionResultByBlockIDsRequest extracts necessary variables from the provided request, +// builds a GetExecutionResultByBlockIDs instance, and validates it. +// +// No errors are expected during normal operation. +func GetExecutionResultByBlockIDsRequest(r *common.Request) (GetExecutionResultByBlockIDs, error) { + var req GetExecutionResultByBlockIDs + err := req.Build(r) + return req, err +} + +func (g *GetExecutionResultByBlockIDs) Build(r *common.Request) error { + return g.Parse( + r.GetQueryParams(blockIDQuery), + ) +} + +func (g *GetExecutionResultByBlockIDs) Parse(rawIDs []string) error { + var ids parser.IDs + err := ids.Parse(rawIDs) + if err != nil { + return err + } + g.BlockIDs = ids.Flow() + + if len(g.BlockIDs) == 0 { + return fmt.Errorf("no block IDs provided") + } + + return nil +} + +type GetExecutionResult struct { + GetByIDRequest +} + +// GetExecutionResultRequest extracts necessary variables from the provided request, +// builds a GetExecutionResult instance, and validates it. +// +// No errors are expected during normal operation. +func GetExecutionResultRequest(r *common.Request) (GetExecutionResult, error) { + var req GetExecutionResult + err := req.Build(r) + return req, err +} diff --git a/engine/access/rest/http/request/get_script.go b/engine/access/rest/http/request/get_script.go new file mode 100644 index 00000000000..a01a025465a --- /dev/null +++ b/engine/access/rest/http/request/get_script.go @@ -0,0 +1,70 @@ +package request + +import ( + "fmt" + "io" + + "github.com/onflow/flow-go/engine/access/rest/common" + "github.com/onflow/flow-go/engine/access/rest/common/parser" + "github.com/onflow/flow-go/model/flow" +) + +const blockIDQuery = "block_id" + +type GetScript struct { + BlockID flow.Identifier + BlockHeight uint64 + Script Script +} + +// GetScriptRequest extracts necessary variables from the provided request, +// builds a GetScript instance, and validates it. +// +// No errors are expected during normal operation. +func GetScriptRequest(r *common.Request) (GetScript, error) { + var req GetScript + err := req.Build(r) + return req, err +} + +func (g *GetScript) Build(r *common.Request) error { + return g.Parse( + r.GetQueryParam(blockHeightQuery), + r.GetQueryParam(blockIDQuery), + r.Body, + ) +} + +func (g *GetScript) Parse(rawHeight string, rawID string, rawScript io.Reader) error { + var height Height + err := height.Parse(rawHeight) + if err != nil { + return err + } + g.BlockHeight = height.Flow() + + var id parser.ID + err = id.Parse(rawID) + if err != nil { + return err + } + g.BlockID = id.Flow() + + var script Script + err = script.Parse(rawScript) + if err != nil { + return err + } + g.Script = script + + // default to last sealed block + if g.BlockHeight == EmptyHeight && g.BlockID == flow.ZeroID { + g.BlockHeight = SealedHeight + } + + if g.BlockID != flow.ZeroID && g.BlockHeight != EmptyHeight { + return fmt.Errorf("can not provide both block ID and block height") + } + + return nil +} diff --git a/engine/access/rest/request/get_script_test.go b/engine/access/rest/http/request/get_script_test.go similarity index 94% rename from engine/access/rest/request/get_script_test.go rename to engine/access/rest/http/request/get_script_test.go index 99a14bf9f79..3b37a45eaa3 100644 --- a/engine/access/rest/request/get_script_test.go +++ b/engine/access/rest/http/request/get_script_test.go @@ -13,7 +13,7 @@ import ( func TestGetScript_InvalidParse(t *testing.T) { var getScript GetScript - validScript := fmt.Sprintf(`{ "script": "%s", "arguments": [] }`, util.ToBase64([]byte(`pub fun main() {}`))) + validScript := fmt.Sprintf(`{ "script": "%s", "arguments": [] }`, util.ToBase64([]byte(`access(all) fun main() {}`))) tests := []struct { height string id string @@ -36,7 +36,7 @@ func TestGetScript_InvalidParse(t *testing.T) { func TestGetScript_ValidParse(t *testing.T) { var getScript GetScript - source := "pub fun main() {}" + source := "access(all) fun main() {}" validScript := strings.NewReader(fmt.Sprintf(`{ "script": "%s", "arguments": [] }`, util.ToBase64([]byte(source)))) err := getScript.Parse("1", "", validScript) diff --git a/engine/access/rest/http/request/get_transaction.go b/engine/access/rest/http/request/get_transaction.go new file mode 100644 index 00000000000..0d5df1e541e --- /dev/null +++ b/engine/access/rest/http/request/get_transaction.go @@ -0,0 +1,88 @@ +package request + +import ( + "github.com/onflow/flow-go/engine/access/rest/common" + "github.com/onflow/flow-go/engine/access/rest/common/parser" + "github.com/onflow/flow-go/model/flow" +) + +const resultExpandable = "result" +const blockIDQueryParam = "block_id" +const collectionIDQueryParam = "collection_id" + +type TransactionOptionals struct { + BlockID flow.Identifier + CollectionID flow.Identifier +} + +func (t *TransactionOptionals) Parse(r *common.Request) error { + var blockId parser.ID + err := blockId.Parse(r.GetQueryParam(blockIDQueryParam)) + if err != nil { + return err + } + t.BlockID = blockId.Flow() + + var collectionId parser.ID + err = collectionId.Parse(r.GetQueryParam(collectionIDQueryParam)) + if err != nil { + return err + } + t.CollectionID = collectionId.Flow() + + return nil +} + +type GetTransaction struct { + GetByIDRequest + TransactionOptionals + ExpandsResult bool +} + +// GetTransactionRequest extracts necessary variables from the provided request, +// builds a GetTransaction instance, and validates it. +// +// No errors are expected during normal operation. +func GetTransactionRequest(r *common.Request) (GetTransaction, error) { + var req GetTransaction + err := req.Build(r) + return req, err +} + +func (g *GetTransaction) Build(r *common.Request) error { + err := g.TransactionOptionals.Parse(r) + if err != nil { + return err + } + + err = g.GetByIDRequest.Build(r) + g.ExpandsResult = r.Expands(resultExpandable) + + return err +} + +type GetTransactionResult struct { + GetByIDRequest + TransactionOptionals +} + +// GetTransactionResultRequest extracts necessary variables from the provided request, +// builds a GetTransactionResult instance, and validates it. +// +// No errors are expected during normal operation. +func GetTransactionResultRequest(r *common.Request) (GetTransactionResult, error) { + var req GetTransactionResult + err := req.Build(r) + return req, err +} + +func (g *GetTransactionResult) Build(r *common.Request) error { + err := g.TransactionOptionals.Parse(r) + if err != nil { + return err + } + + err = g.GetByIDRequest.Build(r) + + return err +} diff --git a/engine/access/rest/request/height.go b/engine/access/rest/http/request/height.go similarity index 100% rename from engine/access/rest/request/height.go rename to engine/access/rest/http/request/height.go diff --git a/engine/access/rest/request/height_test.go b/engine/access/rest/http/request/height_test.go similarity index 100% rename from engine/access/rest/request/height_test.go rename to engine/access/rest/http/request/height_test.go diff --git a/engine/access/rest/http/request/helpers.go b/engine/access/rest/http/request/helpers.go new file mode 100644 index 00000000000..faee5a437fd --- /dev/null +++ b/engine/access/rest/http/request/helpers.go @@ -0,0 +1,28 @@ +package request + +import ( + "github.com/onflow/flow-go/engine/access/rest/common" + "github.com/onflow/flow-go/engine/access/rest/common/parser" + "github.com/onflow/flow-go/model/flow" +) + +type GetByIDRequest struct { + ID flow.Identifier +} + +func (g *GetByIDRequest) Build(r *common.Request) error { + return g.Parse( + r.GetVar(idQuery), + ) +} + +func (g *GetByIDRequest) Parse(rawID string) error { + var id parser.ID + err := id.Parse(rawID) + if err != nil { + return err + } + g.ID = id.Flow() + + return nil +} diff --git a/engine/access/rest/http/request/helpers_test.go b/engine/access/rest/http/request/helpers_test.go new file mode 100644 index 00000000000..de47bbb3959 --- /dev/null +++ b/engine/access/rest/http/request/helpers_test.go @@ -0,0 +1,21 @@ +package request + +import ( + "testing" + + "github.com/onflow/flow-go/utils/unittest" + + "github.com/stretchr/testify/assert" +) + +func Test_GetByID_Parse(t *testing.T) { + var getByID GetByIDRequest + + id := unittest.IdentifierFixture() + err := getByID.Parse(id.String()) + assert.NoError(t, err) + assert.Equal(t, getByID.ID, id) + + err = getByID.Parse("1") + assert.EqualError(t, err, "invalid ID format") +} diff --git a/engine/access/rest/http/request/script.go b/engine/access/rest/http/request/script.go new file mode 100644 index 00000000000..6050a4441dc --- /dev/null +++ b/engine/access/rest/http/request/script.go @@ -0,0 +1,44 @@ +package request + +import ( + "fmt" + "io" + + "github.com/onflow/flow-go/engine/access/rest/common" + "github.com/onflow/flow-go/engine/access/rest/common/parser" + "github.com/onflow/flow-go/engine/access/rest/util" +) + +type scriptBody struct { + Script string `json:"script,omitempty"` + Arguments []string `json:"arguments,omitempty"` +} + +type Script struct { + Args parser.Arguments + Source []byte +} + +func (s *Script) Parse(raw io.Reader) error { + var body scriptBody + err := common.ParseBody(raw, &body) + if err != nil { + return err + } + + source, err := util.FromBase64(body.Script) + if err != nil { + return fmt.Errorf("invalid script source encoding") + } + + var args parser.Arguments + err = args.Parse(body.Arguments) + if err != nil { + return err + } + + s.Source = source + s.Args = args + + return nil +} diff --git a/engine/access/rest/http/request/script_test.go b/engine/access/rest/http/request/script_test.go new file mode 100644 index 00000000000..ca5f6f7f7a8 --- /dev/null +++ b/engine/access/rest/http/request/script_test.go @@ -0,0 +1,48 @@ +package request + +import ( + "fmt" + "strings" + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/onflow/flow-go/engine/access/rest/util" +) + +const validBody = "access(all) fun main() { }" + +var validBodyEncoded = util.ToBase64([]byte(validBody)) + +func TestScript_InvalidParse(t *testing.T) { + test := map[string]string{ + "": "request body must not be empty", + "foo": "request body contains badly-formed JSON (at position 2)", + `{ "script": "123", "arguments": [] }`: "invalid script source encoding", + fmt.Sprintf(`{ "script": "%s", "arguments": [123] }`, validBodyEncoded): `request body contains an invalid value for the "arguments" field (at position 69)`, + } + + for in, errOut := range test { + body := strings.NewReader(in) + var script Script + err := script.Parse(body) + assert.EqualError(t, err, errOut, in) + } +} + +func TestScript_ValidParse(t *testing.T) { + arg1 := []byte(`{"type": "String", "value": "hello" }`) + body := strings.NewReader(fmt.Sprintf( + `{ "script": "%s", "arguments": ["%s"] }`, + validBodyEncoded, + util.ToBase64(arg1), + )) + + var script Script + err := script.Parse(body) + + assert.NoError(t, err) + assert.Equal(t, 1, len(script.Args)) + assert.Equal(t, arg1, script.Args[0]) + assert.Equal(t, validBody, string(script.Source)) +} diff --git a/engine/access/rest/http/routes/account_balance.go b/engine/access/rest/http/routes/account_balance.go new file mode 100644 index 00000000000..44afc38f164 --- /dev/null +++ b/engine/access/rest/http/routes/account_balance.go @@ -0,0 +1,42 @@ +package routes + +import ( + "fmt" + + "github.com/onflow/flow-go/access" + "github.com/onflow/flow-go/engine/access/rest/common" + commonmodels "github.com/onflow/flow-go/engine/access/rest/common/models" + "github.com/onflow/flow-go/engine/access/rest/http/models" + "github.com/onflow/flow-go/engine/access/rest/http/request" +) + +// GetAccountBalance handler retrieves an account balance by address and block height and returns the response +func GetAccountBalance(r *common.Request, backend access.API, _ commonmodels.LinkGenerator) (interface{}, error) { + req, err := request.GetAccountBalanceRequest(r) + if err != nil { + return nil, common.NewBadRequestError(err) + } + + // In case we receive special height values 'final' and 'sealed', + // fetch that height and overwrite request with it. + isSealed := req.Height == request.SealedHeight + isFinal := req.Height == request.FinalHeight + if isFinal || isSealed { + header, _, err := backend.GetLatestBlockHeader(r.Context(), isSealed) + if err != nil { + err := fmt.Errorf("block with height: %d does not exist", req.Height) + return nil, common.NewNotFoundError(err.Error(), err) + } + req.Height = header.Height + } + + balance, err := backend.GetAccountBalanceAtBlockHeight(r.Context(), req.Address, req.Height) + if err != nil { + err = fmt.Errorf("failed to get account balance, reason: %w", err) + return nil, common.NewNotFoundError(err.Error(), err) + } + + var response models.AccountBalance + response.Build(balance) + return response, nil +} diff --git a/engine/access/rest/http/routes/account_balance_test.go b/engine/access/rest/http/routes/account_balance_test.go new file mode 100644 index 00000000000..df541547aae --- /dev/null +++ b/engine/access/rest/http/routes/account_balance_test.go @@ -0,0 +1,136 @@ +package routes_test + +import ( + "fmt" + "net/http" + "net/url" + "testing" + + "github.com/stretchr/testify/assert" + mocktestify "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/access/mock" + "github.com/onflow/flow-go/engine/access/rest/router" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/utils/unittest" +) + +// TestGetAccountBalance tests local getAccountBalance request. +// +// Runs the following tests: +// 1. Get account balance by address at latest sealed block. +// 2. Get account balance by address at latest finalized block. +// 3. Get account balance by address at height. +// 4. Get invalid account balance. +func TestGetAccountBalance(t *testing.T) { + backend := mock.NewAPI(t) + + t.Run("get balance by address at latest sealed block", func(t *testing.T) { + account := accountFixture(t) + var height uint64 = 100 + block := unittest.BlockHeaderFixture(unittest.WithHeaderHeight(height)) + + req := getAccountBalanceRequest(t, account, router.SealedHeightQueryParam) + + backend.Mock. + On("GetLatestBlockHeader", mocktestify.Anything, true). + Return(block, flow.BlockStatusSealed, nil) + + backend.Mock. + On("GetAccountBalanceAtBlockHeight", mocktestify.Anything, account.Address, height). + Return(account.Balance, nil) + + expected := expectedAccountBalanceResponse(account) + + router.AssertOKResponse(t, req, expected, backend) + mocktestify.AssertExpectationsForObjects(t, backend) + }) + + t.Run("get balance by address at latest finalized block", func(t *testing.T) { + account := accountFixture(t) + var height uint64 = 100 + block := unittest.BlockHeaderFixture(unittest.WithHeaderHeight(height)) + + req := getAccountBalanceRequest(t, account, router.FinalHeightQueryParam) + + backend.Mock. + On("GetLatestBlockHeader", mocktestify.Anything, false). + Return(block, flow.BlockStatusFinalized, nil) + + backend.Mock. + On("GetAccountBalanceAtBlockHeight", mocktestify.Anything, account.Address, height). + Return(account.Balance, nil) + + expected := expectedAccountBalanceResponse(account) + + router.AssertOKResponse(t, req, expected, backend) + mocktestify.AssertExpectationsForObjects(t, backend) + }) + + t.Run("get balance by address at height", func(t *testing.T) { + account := accountFixture(t) + var height uint64 = 1337 + req := getAccountBalanceRequest(t, account, fmt.Sprintf("%d", height)) + + backend.Mock. + On("GetAccountBalanceAtBlockHeight", mocktestify.Anything, account.Address, height). + Return(account.Balance, nil) + + expected := expectedAccountBalanceResponse(account) + + router.AssertOKResponse(t, req, expected, backend) + mocktestify.AssertExpectationsForObjects(t, backend) + }) + + t.Run("get invalid", func(t *testing.T) { + tests := []struct { + url string + out string + }{ + {accountBalanceURL(t, "123", ""), `{"code":400, "message":"invalid address"}`}, + {accountBalanceURL(t, unittest.AddressFixture().String(), "foo"), `{"code":400, "message":"invalid height format"}`}, + } + + for i, test := range tests { + req, _ := http.NewRequest("GET", test.url, nil) + rr := router.ExecuteRequest(req, backend) + + assert.Equal(t, http.StatusBadRequest, rr.Code) + assert.JSONEq(t, test.out, rr.Body.String(), fmt.Sprintf("test #%d failed: %v", i, test)) + } + }) +} + +func accountBalanceURL(t *testing.T, address string, height string) string { + u, err := url.ParseRequestURI(fmt.Sprintf("/v1/accounts/%s/balance", address)) + require.NoError(t, err) + q := u.Query() + + if height != "" { + q.Add("block_height", height) + } + + u.RawQuery = q.Encode() + return u.String() +} + +func getAccountBalanceRequest(t *testing.T, account *flow.Account, height string) *http.Request { + req, err := http.NewRequest( + "GET", + accountBalanceURL(t, account.Address.String(), height), + nil, + ) + + require.NoError(t, err) + return req +} + +func expectedAccountBalanceResponse(account *flow.Account) string { + return fmt.Sprintf(` + { + "balance":"%d" + }`, + account.Balance, + ) +} diff --git a/engine/access/rest/http/routes/account_keys.go b/engine/access/rest/http/routes/account_keys.go new file mode 100644 index 00000000000..4b3a5647f79 --- /dev/null +++ b/engine/access/rest/http/routes/account_keys.go @@ -0,0 +1,73 @@ +package routes + +import ( + "fmt" + + "github.com/onflow/flow-go/access" + "github.com/onflow/flow-go/engine/access/rest/common" + commonmodels "github.com/onflow/flow-go/engine/access/rest/common/models" + "github.com/onflow/flow-go/engine/access/rest/http/request" + + "github.com/onflow/flow-go/engine/access/rest/http/models" +) + +// GetAccountKeyByIndex handler retrieves an account key by address and index and returns the response +func GetAccountKeyByIndex(r *common.Request, backend access.API, _ commonmodels.LinkGenerator) (interface{}, error) { + req, err := request.GetAccountKeyRequest(r) + if err != nil { + return nil, common.NewBadRequestError(err) + } + + // In case we receive special height values 'final' and 'sealed', + // fetch that height and overwrite request with it. + if req.Height == request.FinalHeight || req.Height == request.SealedHeight { + isSealed := req.Height == request.SealedHeight + header, _, err := backend.GetLatestBlockHeader(r.Context(), isSealed) + if err != nil { + err := fmt.Errorf("block with height: %d does not exist", req.Height) + return nil, common.NewNotFoundError(err.Error(), err) + } + req.Height = header.Height + } + + accountKey, err := backend.GetAccountKeyAtBlockHeight(r.Context(), req.Address, req.Index, req.Height) + if err != nil { + err = fmt.Errorf("failed to get account key with index: %d, reason: %w", req.Index, err) + return nil, common.NewNotFoundError(err.Error(), err) + } + + var response models.AccountPublicKey + response.Build(*accountKey) + return response, nil +} + +// GetAccountKeys handler retrieves an account keys by address and returns the response +func GetAccountKeys(r *common.Request, backend access.API, _ commonmodels.LinkGenerator) (interface{}, error) { + req, err := request.GetAccountKeysRequest(r) + if err != nil { + return nil, common.NewBadRequestError(err) + } + + // In case we receive special height values 'final' and 'sealed', + // fetch that height and overwrite request with it. + isSealed := req.Height == request.SealedHeight + isFinalized := req.Height == request.FinalHeight + if isFinalized || isSealed { + header, _, err := backend.GetLatestBlockHeader(r.Context(), isSealed) + if err != nil { + err := fmt.Errorf("block with height: %d does not exist", req.Height) + return nil, common.NewNotFoundError(err.Error(), err) + } + req.Height = header.Height + } + + accountKeys, err := backend.GetAccountKeysAtBlockHeight(r.Context(), req.Address, req.Height) + if err != nil { + err = fmt.Errorf("failed to get account keys, reason: %w", err) + return nil, common.NewNotFoundError(err.Error(), err) + } + + var response models.AccountPublicKeys + response.Build(accountKeys) + return response, nil +} diff --git a/engine/access/rest/http/routes/account_keys_test.go b/engine/access/rest/http/routes/account_keys_test.go new file mode 100644 index 00000000000..6ad11bc89fc --- /dev/null +++ b/engine/access/rest/http/routes/account_keys_test.go @@ -0,0 +1,472 @@ +package routes_test + +import ( + "fmt" + "math" + "net/http" + "net/url" + "testing" + + "github.com/stretchr/testify/assert" + mocktestify "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + + "github.com/onflow/crypto" + "github.com/onflow/crypto/hash" + + "github.com/onflow/flow-go/access/mock" + "github.com/onflow/flow-go/engine/access/rest/router" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/utils/unittest" +) + +// TestGetAccountKeyByIndex tests local getAccountKeyByIndex request. +// +// Runs the following tests: +// 1. Get key by address and index at latest sealed block. +// 2. Get key by address and index at latest finalized block. +// 3. Get missing key by address and index at latest sealed block. +// 4. Get missing key by address and index at latest finalized block. +// 5. Get key by address and index at height. +// 6. Get key by address and index at missing block. +func TestGetAccountKeyByIndex(t *testing.T) { + backend := mock.NewAPI(t) + + t.Run("get key by address and index at latest sealed block", func(t *testing.T) { + account := accountFixture(t) + var height uint64 = 100 + block := unittest.BlockHeaderFixture(unittest.WithHeaderHeight(height)) + var keyIndex uint32 = 0 + keyByIndex := findAccountKeyByIndex(account.Keys, keyIndex) + + req := getAccountKeyByIndexRequest(t, account, "0", router.SealedHeightQueryParam) + + backend.Mock. + On("GetLatestBlockHeader", mocktestify.Anything, true). + Return(block, flow.BlockStatusSealed, nil) + + backend.Mock. + On("GetAccountKeyAtBlockHeight", mocktestify.Anything, account.Address, keyIndex, height). + Return(keyByIndex, nil) + + expected := expectedAccountKeyResponse(account) + + router.AssertOKResponse(t, req, expected, backend) + mocktestify.AssertExpectationsForObjects(t, backend) + }) + + t.Run("get key by address and index at latest finalized block", func(t *testing.T) { + account := accountFixture(t) + var height uint64 = 100 + block := unittest.BlockHeaderFixture(unittest.WithHeaderHeight(height)) + var keyIndex uint32 = 0 + keyByIndex := findAccountKeyByIndex(account.Keys, keyIndex) + + req := getAccountKeyByIndexRequest(t, account, "0", router.FinalHeightQueryParam) + + backend.Mock. + On("GetLatestBlockHeader", mocktestify.Anything, false). + Return(block, flow.BlockStatusFinalized, nil) + + backend.Mock. + On("GetAccountKeyAtBlockHeight", mocktestify.Anything, account.Address, keyIndex, height). + Return(keyByIndex, nil) + + expected := expectedAccountKeyResponse(account) + + router.AssertOKResponse(t, req, expected, backend) + mocktestify.AssertExpectationsForObjects(t, backend) + }) + + t.Run("get missing key by address and index at latest sealed block", func(t *testing.T) { + account := accountFixture(t) + var height uint64 = 100 + index := "2" + block := unittest.BlockHeaderFixture(unittest.WithHeaderHeight(height)) + + req := getAccountKeyByIndexRequest(t, account, index, router.SealedHeightQueryParam) + + backend.Mock. + On("GetLatestBlockHeader", mocktestify.Anything, true). + Return(block, flow.BlockStatusSealed, nil) + + var keyIndex uint32 = 2 + err := fmt.Errorf("failed to get account key with index: %d", keyIndex) + backend.Mock. + On("GetAccountKeyAtBlockHeight", mocktestify.Anything, account.Address, keyIndex, height). + Return(nil, err) + + statusCode := 404 + expected := fmt.Sprintf(` + { + "code": %d, + "message": "failed to get account key with index: %s, reason: failed to get account key with index: %s" + } + `, statusCode, index, index) + + router.AssertResponse(t, req, statusCode, expected, backend) + mocktestify.AssertExpectationsForObjects(t, backend) + }) + + t.Run("get missing key by address and index at latest finalized block", func(t *testing.T) { + account := accountFixture(t) + var height uint64 = 100 + index := "2" + block := unittest.BlockHeaderFixture(unittest.WithHeaderHeight(height)) + + req := getAccountKeyByIndexRequest(t, account, index, router.FinalHeightQueryParam) + + backend.Mock. + On("GetLatestBlockHeader", mocktestify.Anything, false). + Return(block, flow.BlockStatusFinalized, nil) + + var keyIndex uint32 = 2 + err := fmt.Errorf("failed to get account key with index: %d", keyIndex) + backend.Mock. + On("GetAccountKeyAtBlockHeight", mocktestify.Anything, account.Address, keyIndex, height). + Return(nil, err) + + statusCode := 404 + expected := fmt.Sprintf(` + { + "code": %d, + "message": "failed to get account key with index: %s, reason: failed to get account key with index: %s" + } + `, statusCode, index, index) + + router.AssertResponse(t, req, statusCode, expected, backend) + mocktestify.AssertExpectationsForObjects(t, backend) + }) + + t.Run("get key by address and index at height", func(t *testing.T) { + var height uint64 = 1337 + account := accountFixture(t) + req := getAccountKeyByIndexRequest(t, account, "0", "1337") + + var keyIndex uint32 = 0 + keyByIndex := findAccountKeyByIndex(account.Keys, keyIndex) + + backend.Mock. + On("GetAccountKeyAtBlockHeight", mocktestify.Anything, account.Address, keyIndex, height). + Return(keyByIndex, nil) + + expected := expectedAccountKeyResponse(account) + + router.AssertOKResponse(t, req, expected, backend) + mocktestify.AssertExpectationsForObjects(t, backend) + }) + + t.Run("get key by address and index at missing block", func(t *testing.T) { + backend := mock.NewAPI(t) + account := accountFixture(t) + const finalHeight uint64 = math.MaxUint64 - 2 + + req := getAccountKeyByIndexRequest(t, account, "0", router.FinalHeightQueryParam) + + err := fmt.Errorf("block with height: %d does not exist", finalHeight) + backend.Mock. + On("GetLatestBlockHeader", mocktestify.Anything, false). + Return(nil, flow.BlockStatusUnknown, err) + + statusCode := 404 + expected := fmt.Sprintf(` + { + "code": %d, + "message": "block with height: %d does not exist" + } + `, statusCode, finalHeight) + + router.AssertResponse(t, req, statusCode, expected, backend) + mocktestify.AssertExpectationsForObjects(t, backend) + }) + + tests := []struct { + name string + url string + out string + }{ + { + "get key with invalid address", + accountKeyURL(t, "123", "3", "100"), + `{"code":400, "message":"invalid address"}`, + }, + { + "get key with invalid index", + accountKeyURL( + t, + unittest.AddressFixture().String(), + "foo", + "100", + ), + `{"code":400, "message":"invalid key index: value must be an unsigned 32 bit integer"}`, + }, + { + "get key with invalid height", + accountKeyURL( + t, + unittest.AddressFixture().String(), + "2", + "-100", + ), + `{"code":400, "message":"invalid height format"}`, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + req, _ := http.NewRequest("GET", test.url, nil) + rr := router.ExecuteRequest(req, backend) + assert.Equal(t, http.StatusBadRequest, rr.Code) + assert.JSONEq(t, test.out, rr.Body.String()) + }) + } +} + +// TestGetAccountKeys tests local getAccountKeys request. +// +// Runs the following tests: +// 1. Get keys by address at latest sealed block. +// 2. Get keys by address at latest finalized block. +// 3. Get keys by address at height. +// 4. Get key by address and index at missing block. +func TestGetAccountKeys(t *testing.T) { + backend := mock.NewAPI(t) + + t.Run("get keys by address at latest sealed block", func(t *testing.T) { + account := accountWithKeysFixture(t) + var height uint64 = 100 + block := unittest.BlockHeaderFixture(unittest.WithHeaderHeight(height)) + + req := getAccountKeysRequest(t, account, router.SealedHeightQueryParam) + + backend.Mock. + On("GetLatestBlockHeader", mocktestify.Anything, true). + Return(block, flow.BlockStatusSealed, nil) + + backend.Mock. + On("GetAccountKeysAtBlockHeight", mocktestify.Anything, account.Address, height). + Return(account.Keys, nil) + + expected := expectedAccountKeysResponse(account) + + router.AssertOKResponse(t, req, expected, backend) + mocktestify.AssertExpectationsForObjects(t, backend) + }) + + t.Run("get keys by address at latest finalized block", func(t *testing.T) { + account := accountWithKeysFixture(t) + var height uint64 = 100 + block := unittest.BlockHeaderFixture(unittest.WithHeaderHeight(height)) + + req := getAccountKeysRequest(t, account, router.FinalHeightQueryParam) + + backend.Mock. + On("GetLatestBlockHeader", mocktestify.Anything, false). + Return(block, flow.BlockStatusFinalized, nil) + + backend.Mock. + On("GetAccountKeysAtBlockHeight", mocktestify.Anything, account.Address, height). + Return(account.Keys, nil) + + expected := expectedAccountKeysResponse(account) + + router.AssertOKResponse(t, req, expected, backend) + mocktestify.AssertExpectationsForObjects(t, backend) + }) + + t.Run("get keys by address at height", func(t *testing.T) { + var height uint64 = 1337 + account := accountWithKeysFixture(t) + req := getAccountKeysRequest(t, account, "1337") + + backend.Mock. + On("GetAccountKeysAtBlockHeight", mocktestify.Anything, account.Address, height). + Return(account.Keys, nil) + + expected := expectedAccountKeysResponse(account) + + router.AssertOKResponse(t, req, expected, backend) + mocktestify.AssertExpectationsForObjects(t, backend) + }) + + t.Run("get keys by address at missing block", func(t *testing.T) { + backend := mock.NewAPI(t) + account := accountWithKeysFixture(t) + const finalHeight uint64 = math.MaxUint64 - 2 + + req := getAccountKeysRequest(t, account, router.FinalHeightQueryParam) + + err := fmt.Errorf("block with height: %d does not exist", finalHeight) + backend.Mock. + On("GetLatestBlockHeader", mocktestify.Anything, false). + Return(nil, flow.BlockStatusUnknown, err) + + statusCode := 404 + expected := fmt.Sprintf(` + { + "code": %d, + "message": "block with height: %d does not exist" + } + `, statusCode, finalHeight) + + router.AssertResponse(t, req, statusCode, expected, backend) + mocktestify.AssertExpectationsForObjects(t, backend) + }) + + tests := []struct { + name string + url string + out string + }{ + { + "get keys with invalid address", + accountKeysURL(t, "123", "100"), + `{"code":400, "message":"invalid address"}`, + }, + { + "get keys with invalid height", + accountKeysURL( + t, + unittest.AddressFixture().String(), + "-100", + ), + `{"code":400, "message":"invalid height format"}`, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + req, _ := http.NewRequest("GET", test.url, nil) + rr := router.ExecuteRequest(req, backend) + assert.Equal(t, http.StatusBadRequest, rr.Code) + assert.JSONEq(t, test.out, rr.Body.String()) + }) + } +} + +func accountKeyURL(t *testing.T, address string, index string, height string) string { + u, err := url.ParseRequestURI( + fmt.Sprintf("/v1/accounts/%s/keys/%s", address, index), + ) + require.NoError(t, err) + q := u.Query() + + if height != "" { + q.Add("block_height", height) + } + + u.RawQuery = q.Encode() + return u.String() +} + +func accountKeysURL(t *testing.T, address string, height string) string { + u, err := url.ParseRequestURI( + fmt.Sprintf("/v1/accounts/%s/keys", address), + ) + require.NoError(t, err) + q := u.Query() + + if height != "" { + q.Add("block_height", height) + } + + u.RawQuery = q.Encode() + return u.String() +} + +func getAccountKeyByIndexRequest( + t *testing.T, + account *flow.Account, + index string, + height string, +) *http.Request { + req, err := http.NewRequest( + "GET", + accountKeyURL(t, account.Address.String(), index, height), + nil, + ) + require.NoError(t, err) + + return req +} + +func getAccountKeysRequest( + t *testing.T, + account *flow.Account, + height string, +) *http.Request { + req, err := http.NewRequest( + "GET", + accountKeysURL(t, account.Address.String(), height), + nil, + ) + require.NoError(t, err) + + return req +} + +func expectedAccountKeyResponse(account *flow.Account) string { + return fmt.Sprintf(` + { + "index":"0", + "public_key":"%s", + "signing_algorithm":"ECDSA_P256", + "hashing_algorithm":"SHA3_256", + "sequence_number":"0", + "weight":"1000", + "revoked":false + }`, + account.Keys[0].PublicKey.String(), + ) +} + +func expectedAccountKeysResponse(account *flow.Account) string { + return fmt.Sprintf(` + { + "keys":[ + { + "index":"0", + "public_key":"%s", + "signing_algorithm":"ECDSA_P256", + "hashing_algorithm":"SHA3_256", + "sequence_number":"0", + "weight":"1000", + "revoked":false + }, + { + "index":"1", + "public_key":"%s", + "signing_algorithm":"ECDSA_P256", + "hashing_algorithm":"SHA3_256", + "sequence_number":"0", + "weight":"500", + "revoked":false + } + ] + }`, + account.Keys[0].PublicKey.String(), + account.Keys[1].PublicKey.String(), + ) +} + +func findAccountKeyByIndex(keys []flow.AccountPublicKey, keyIndex uint32) *flow.AccountPublicKey { + for _, key := range keys { + if key.Index == keyIndex { + return &key + } + } + return &flow.AccountPublicKey{} +} + +func accountWithKeysFixture(t *testing.T) *flow.Account { + account, err := unittest.AccountFixture() + require.NoError(t, err) + + key2, err := unittest.AccountKeyFixture(128, crypto.ECDSAP256, hash.SHA3_256) + require.NoError(t, err) + + account.Keys = append(account.Keys, key2.PublicKey(500)) + account.Keys[1].Index = 1 + + return account +} diff --git a/engine/access/rest/http/routes/accounts.go b/engine/access/rest/http/routes/accounts.go new file mode 100644 index 00000000000..ade6736d4ac --- /dev/null +++ b/engine/access/rest/http/routes/accounts.go @@ -0,0 +1,35 @@ +package routes + +import ( + "github.com/onflow/flow-go/access" + "github.com/onflow/flow-go/engine/access/rest/common" + commonmodels "github.com/onflow/flow-go/engine/access/rest/common/models" + "github.com/onflow/flow-go/engine/access/rest/http/models" + "github.com/onflow/flow-go/engine/access/rest/http/request" +) + +// GetAccount handler retrieves account by address and returns the response +func GetAccount(r *common.Request, backend access.API, link commonmodels.LinkGenerator) (interface{}, error) { + req, err := request.GetAccountRequest(r) + if err != nil { + return nil, common.NewBadRequestError(err) + } + + // in case we receive special height values 'final' and 'sealed', fetch that height and overwrite request with it + if req.Height == request.FinalHeight || req.Height == request.SealedHeight { + header, _, err := backend.GetLatestBlockHeader(r.Context(), req.Height == request.SealedHeight) + if err != nil { + return nil, err + } + req.Height = header.Height + } + + account, err := backend.GetAccountAtBlockHeight(r.Context(), req.Address, req.Height) + if err != nil { + return nil, err + } + + var response models.Account + err = response.Build(account, link, r.ExpandFields) + return response, err +} diff --git a/engine/access/rest/http/routes/accounts_test.go b/engine/access/rest/http/routes/accounts_test.go new file mode 100644 index 00000000000..983f13c3448 --- /dev/null +++ b/engine/access/rest/http/routes/accounts_test.go @@ -0,0 +1,185 @@ +package routes_test + +import ( + "fmt" + "net/http" + "net/url" + "strings" + "testing" + + "github.com/stretchr/testify/assert" + mocktestify "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/access/mock" + "github.com/onflow/flow-go/engine/access/rest/common/middleware" + "github.com/onflow/flow-go/engine/access/rest/router" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/utils/unittest" +) + +const expandableFieldKeys = "keys" +const expandableFieldContracts = "contracts" + +func accountURL(t *testing.T, address string, height string) string { + u, err := url.ParseRequestURI(fmt.Sprintf("/v1/accounts/%s", address)) + require.NoError(t, err) + q := u.Query() + + if height != "" { + q.Add("block_height", height) + } + + u.RawQuery = q.Encode() + return u.String() +} + +// TestAccessGetAccount tests local getAccount request. +// +// Runs the following tests: +// 1. Get account by address at latest sealed block. +// 2. Get account by address at latest finalized block. +// 3. Get account by address at height. +// 4. Get account by address at height condensed. +// 5. Get invalid account. +func TestAccessGetAccount(t *testing.T) { + backend := &mock.API{} + + t.Run("get by address at latest sealed block", func(t *testing.T) { + account := accountFixture(t) + var height uint64 = 100 + block := unittest.BlockHeaderFixture(unittest.WithHeaderHeight(height)) + + req := getAccountRequest(t, account, router.SealedHeightQueryParam, expandableFieldKeys, expandableFieldContracts) + + backend.Mock. + On("GetLatestBlockHeader", mocktestify.Anything, true). + Return(block, flow.BlockStatusSealed, nil) + + backend.Mock. + On("GetAccountAtBlockHeight", mocktestify.Anything, account.Address, height). + Return(account, nil) + + expected := expectedExpandedResponse(account) + + router.AssertOKResponse(t, req, expected, backend) + mocktestify.AssertExpectationsForObjects(t, backend) + }) + + t.Run("get by address at latest finalized block", func(t *testing.T) { + + var height uint64 = 100 + block := unittest.BlockHeaderFixture(unittest.WithHeaderHeight(height)) + account := accountFixture(t) + + req := getAccountRequest(t, account, router.FinalHeightQueryParam, expandableFieldKeys, expandableFieldContracts) + backend.Mock. + On("GetLatestBlockHeader", mocktestify.Anything, false). + Return(block, flow.BlockStatusFinalized, nil) + backend.Mock. + On("GetAccountAtBlockHeight", mocktestify.Anything, account.Address, height). + Return(account, nil) + + expected := expectedExpandedResponse(account) + + router.AssertOKResponse(t, req, expected, backend) + mocktestify.AssertExpectationsForObjects(t, backend) + }) + + t.Run("get by address at height", func(t *testing.T) { + var height uint64 = 1337 + account := accountFixture(t) + req := getAccountRequest(t, account, fmt.Sprintf("%d", height), expandableFieldKeys, expandableFieldContracts) + + backend.Mock. + On("GetAccountAtBlockHeight", mocktestify.Anything, account.Address, height). + Return(account, nil) + + expected := expectedExpandedResponse(account) + + router.AssertOKResponse(t, req, expected, backend) + mocktestify.AssertExpectationsForObjects(t, backend) + }) + + t.Run("get by address at height condensed", func(t *testing.T) { + var height uint64 = 1337 + account := accountFixture(t) + req := getAccountRequest(t, account, fmt.Sprintf("%d", height)) + + backend.Mock. + On("GetAccountAtBlockHeight", mocktestify.Anything, account.Address, height). + Return(account, nil) + + expected := expectedCondensedResponse(account) + + router.AssertOKResponse(t, req, expected, backend) + mocktestify.AssertExpectationsForObjects(t, backend) + }) + + t.Run("get invalid", func(t *testing.T) { + tests := []struct { + url string + out string + }{ + {accountURL(t, "123", ""), `{"code":400, "message":"invalid address"}`}, + {accountURL(t, unittest.AddressFixture().String(), "foo"), `{"code":400, "message":"invalid height format"}`}, + } + + for i, test := range tests { + req, _ := http.NewRequest("GET", test.url, nil) + rr := router.ExecuteRequest(req, backend) + + assert.Equal(t, http.StatusBadRequest, rr.Code) + assert.JSONEq(t, test.out, rr.Body.String(), fmt.Sprintf("test #%d failed: %v", i, test)) + } + }) +} + +func expectedExpandedResponse(account *flow.Account) string { + return fmt.Sprintf(`{ + "address":"%s", + "balance":"100", + "keys":[ + { + "index":"0", + "public_key":"%s", + "signing_algorithm":"ECDSA_P256", + "hashing_algorithm":"SHA3_256", + "sequence_number":"0", + "weight":"1000", + "revoked":false + } + ], + "_links":{"_self":"/v1/accounts/%s" }, + "_expandable": {}, + "contracts": {"contract1":"Y29udHJhY3Qx", "contract2":"Y29udHJhY3Qy"} + }`, account.Address, account.Keys[0].PublicKey.String(), account.Address) +} + +func expectedCondensedResponse(account *flow.Account) string { + return fmt.Sprintf(`{ + "address":"%s", + "balance":"100", + "_links":{"_self":"/v1/accounts/%s" }, + "_expandable":{"contracts":"contracts", "keys":"keys"} + }`, account.Address, account.Address) +} + +func getAccountRequest(t *testing.T, account *flow.Account, height string, expandFields ...string) *http.Request { + req, err := http.NewRequest("GET", accountURL(t, account.Address.String(), height), nil) + if len(expandFields) > 0 { + fieldParam := strings.Join(expandFields, ",") + q := req.URL.Query() + q.Add(middleware.ExpandQueryParam, fieldParam) + req.URL.RawQuery = q.Encode() + } + + require.NoError(t, err) + return req +} + +func accountFixture(t *testing.T) *flow.Account { + account, err := unittest.AccountFixture() + require.NoError(t, err) + return account +} diff --git a/engine/access/rest/http/routes/blocks.go b/engine/access/rest/http/routes/blocks.go new file mode 100644 index 00000000000..f5baea84946 --- /dev/null +++ b/engine/access/rest/http/routes/blocks.go @@ -0,0 +1,223 @@ +package routes + +import ( + "context" + "fmt" + "net/http" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + + "github.com/onflow/flow-go/access" + "github.com/onflow/flow-go/engine/access/rest/common" + commonmodels "github.com/onflow/flow-go/engine/access/rest/common/models" + "github.com/onflow/flow-go/engine/access/rest/http/request" + "github.com/onflow/flow-go/model/flow" +) + +// GetBlocksByIDs gets blocks by provided ID or list of IDs. +func GetBlocksByIDs(r *common.Request, backend access.API, link commonmodels.LinkGenerator) (interface{}, error) { + req, err := request.GetBlockByIDsRequest(r) + if err != nil { + return nil, common.NewBadRequestError(err) + } + + blocks := make([]*commonmodels.Block, len(req.IDs)) + + for i, id := range req.IDs { + block, err := getBlock(forID(&id), r, backend, link) + if err != nil { + return nil, err + } + blocks[i] = block + } + + return blocks, nil +} + +// GetBlocksByHeight gets blocks by height. +func GetBlocksByHeight(r *common.Request, backend access.API, link commonmodels.LinkGenerator) (interface{}, error) { + req, err := request.GetBlockRequest(r) + if err != nil { + return nil, common.NewBadRequestError(err) + } + + if req.FinalHeight || req.SealedHeight { + block, err := getBlock(forFinalized(req.Heights[0]), r, backend, link) + if err != nil { + return nil, err + } + + return []*commonmodels.Block{block}, nil + } + + // if the query is /blocks/height=1000,1008,1049... + if req.HasHeights() { + blocks := make([]*commonmodels.Block, len(req.Heights)) + for i, h := range req.Heights { + block, err := getBlock(forHeight(h), r, backend, link) + if err != nil { + return nil, err + } + blocks[i] = block + } + + return blocks, nil + } + + // support providing end height as "sealed" or "final" + if req.EndHeight == request.FinalHeight || req.EndHeight == request.SealedHeight { + latest, _, err := backend.GetLatestBlock(r.Context(), req.EndHeight == request.SealedHeight) + if err != nil { + return nil, err + } + + req.EndHeight = latest.Height // overwrite special value height with fetched + + if req.StartHeight > req.EndHeight { + return nil, common.NewBadRequestError(fmt.Errorf("start height must be less than or equal to end height")) + } + } + + blocks := make([]*commonmodels.Block, 0) + // start and end height inclusive + for i := req.StartHeight; i <= req.EndHeight; i++ { + block, err := getBlock(forHeight(i), r, backend, link) + if err != nil { + return nil, err + } + blocks = append(blocks, block) + } + + return blocks, nil +} + +// GetBlockPayloadByID gets block payload by ID +func GetBlockPayloadByID(r *common.Request, backend access.API, _ commonmodels.LinkGenerator) (interface{}, error) { + req, err := request.GetBlockPayloadRequest(r) + if err != nil { + return nil, common.NewBadRequestError(err) + } + + blkProvider := NewBlockProvider(backend, forID(&req.ID)) + blk, _, statusErr := blkProvider.getBlock(r.Context()) + if statusErr != nil { + return nil, statusErr + } + + var payload commonmodels.BlockPayload + err = payload.Build(&blk.Payload) + if err != nil { + return nil, err + } + + return payload, nil +} + +func getBlock(option blockProviderOption, req *common.Request, backend access.API, link commonmodels.LinkGenerator) (*commonmodels.Block, error) { + // lookup block + blkProvider := NewBlockProvider(backend, option) + blk, blockStatus, err := blkProvider.getBlock(req.Context()) + if err != nil { + return nil, err + } + + // lookup execution result + // (even if not specified as expandable, since we need the execution result ID to generate its expandable link) + var block commonmodels.Block + executionResult, err := backend.GetExecutionResultForBlockID(req.Context(), blk.ID()) + if err != nil { + // handle case where execution result is not yet available + if se, ok := status.FromError(err); ok { + if se.Code() == codes.NotFound { + err := block.Build(blk, nil, link, blockStatus, req.ExpandFields) + if err != nil { + return nil, err + } + return &block, nil + } + } + return nil, err + } + + err = block.Build(blk, executionResult, link, blockStatus, req.ExpandFields) + if err != nil { + return nil, err + } + return &block, nil +} + +// blockProvider is a layer of abstraction on top of the backend access.API and provides a uniform way to +// look up a block or a block header either by ID or by height +type blockProvider struct { + id *flow.Identifier + height uint64 + latest bool + sealed bool + backend access.API +} + +type blockProviderOption func(blkProvider *blockProvider) + +func forID(id *flow.Identifier) blockProviderOption { + return func(blkProvider *blockProvider) { + blkProvider.id = id + } +} +func forHeight(height uint64) blockProviderOption { + return func(blkProvider *blockProvider) { + blkProvider.height = height + } +} + +func forFinalized(queryParam uint64) blockProviderOption { + return func(blkProvider *blockProvider) { + switch queryParam { + case request.SealedHeight: + blkProvider.sealed = true + fallthrough + case request.FinalHeight: + blkProvider.latest = true + } + } +} + +func NewBlockProvider(backend access.API, options ...blockProviderOption) *blockProvider { + blkProvider := &blockProvider{ + backend: backend, + } + + for _, o := range options { + o(blkProvider) + } + return blkProvider +} + +func (blkProvider *blockProvider) getBlock(ctx context.Context) (*flow.Block, flow.BlockStatus, error) { + if blkProvider.id != nil { + blk, status, err := blkProvider.backend.GetBlockByID(ctx, *blkProvider.id) + if err != nil { // unfortunately backend returns internal error status if not found + return nil, flow.BlockStatusUnknown, common.NewNotFoundError( + fmt.Sprintf("error looking up block with ID %s", blkProvider.id.String()), err, + ) + } + return blk, status, nil + } + + if blkProvider.latest { + blk, status, err := blkProvider.backend.GetLatestBlock(ctx, blkProvider.sealed) + if err != nil { + // cannot be a 'not found' error since final and sealed block should always be found + return nil, flow.BlockStatusUnknown, common.NewRestError(http.StatusInternalServerError, "block lookup failed", err) + } + return blk, status, nil + } + + blk, status, err := blkProvider.backend.GetBlockByHeight(ctx, blkProvider.height) + if err != nil { // unfortunately backend returns internal error status if not found + return nil, flow.BlockStatusUnknown, common.NewNotFoundError( + fmt.Sprintf("error looking up block at height %d", blkProvider.height), err, + ) + } + return blk, status, nil +} diff --git a/engine/access/rest/http/routes/blocks_test.go b/engine/access/rest/http/routes/blocks_test.go new file mode 100644 index 00000000000..7adb937c836 --- /dev/null +++ b/engine/access/rest/http/routes/blocks_test.go @@ -0,0 +1,367 @@ +package routes_test + +import ( + "fmt" + "net/http" + "net/url" + "strings" + "testing" + "time" + + mocks "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + + "github.com/onflow/flow-go/access/mock" + "github.com/onflow/flow-go/engine/access/rest/common/middleware" + "github.com/onflow/flow-go/engine/access/rest/http/request" + "github.com/onflow/flow-go/engine/access/rest/router" + "github.com/onflow/flow-go/engine/access/rest/util" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/utils/unittest" +) + +type testVector struct { + description string + request *http.Request + expectedStatus int + expectedResponse string +} + +func prepareTestVectors(t *testing.T, + blockIDs []string, + heights []string, + blocks []*flow.Block, + executionResults []*flow.ExecutionResult, + blkCnt int) []testVector { + + singleBlockExpandedResponse := expectedBlockResponsesExpanded(blocks[:1], executionResults[:1], true, flow.BlockStatusSealed) + multipleBlockExpandedResponse := expectedBlockResponsesExpanded(blocks, executionResults, true, flow.BlockStatusSealed) + + singleBlockCondensedResponse := expectedBlockResponsesExpanded(blocks[:1], executionResults[:1], false, flow.BlockStatusSealed) + multipleBlockCondensedResponse := expectedBlockResponsesExpanded(blocks, executionResults, false, flow.BlockStatusSealed) + + multipleBlockHeaderWithHeaderSelectedResponse := expectedBlockResponsesSelected(blocks, executionResults, flow.BlockStatusSealed, []string{"header"}) + multipleBlockHeaderWithHeaderAndStatusSelectedResponse := expectedBlockResponsesSelected(blocks, executionResults, flow.BlockStatusSealed, []string{"header", "block_status"}) + multipleBlockHeaderWithUnknownSelectedResponse := expectedBlockResponsesSelected(blocks, executionResults, flow.BlockStatusSealed, []string{"unknown"}) + + invalidID := unittest.IdentifierFixture().String() + invalidHeight := fmt.Sprintf("%d", blkCnt+1) + + maxIDs := flow.IdentifierList(unittest.IdentifierListFixture(request.MaxBlockRequestHeightRange + 1)) + + testVectors := []testVector{ + { + description: "Get single expanded block by ID", + request: getByIDsExpandedURL(t, blockIDs[:1]), + expectedStatus: http.StatusOK, + expectedResponse: singleBlockExpandedResponse, + }, + { + description: "Get multiple expanded blocks by IDs", + request: getByIDsExpandedURL(t, blockIDs), + expectedStatus: http.StatusOK, + expectedResponse: multipleBlockExpandedResponse, + }, + { + description: "Get single condensed block by ID", + request: getByIDsCondensedURL(t, blockIDs[:1]), + expectedStatus: http.StatusOK, + expectedResponse: singleBlockCondensedResponse, + }, + { + description: "Get multiple condensed blocks by IDs", + request: getByIDsCondensedURL(t, blockIDs), + expectedStatus: http.StatusOK, + expectedResponse: multipleBlockCondensedResponse, + }, + { + description: "Get single expanded block by height", + request: getByHeightsExpandedURL(t, heights[:1]...), + expectedStatus: http.StatusOK, + expectedResponse: singleBlockExpandedResponse, + }, + { + description: "Get multiple expanded blocks by heights", + request: getByHeightsExpandedURL(t, heights...), + expectedStatus: http.StatusOK, + expectedResponse: multipleBlockExpandedResponse, + }, + { + description: "Get multiple expanded blocks by start and end height", + request: getByStartEndHeightExpandedURL(t, heights[0], heights[len(heights)-1]), + expectedStatus: http.StatusOK, + expectedResponse: multipleBlockExpandedResponse, + }, + { + description: "Get block by ID not found", + request: getByIDsExpandedURL(t, []string{invalidID}), + expectedStatus: http.StatusNotFound, + expectedResponse: fmt.Sprintf(`{"code":404, "message":"error looking up block with ID %s"}`, invalidID), + }, + { + description: "Get block by height not found", + request: getByHeightsExpandedURL(t, invalidHeight), + expectedStatus: http.StatusNotFound, + expectedResponse: fmt.Sprintf(`{"code":404, "message":"error looking up block at height %s"}`, invalidHeight), + }, + { + description: "Get block by end height less than start height", + request: getByStartEndHeightExpandedURL(t, heights[len(heights)-1], heights[0]), + expectedStatus: http.StatusBadRequest, + expectedResponse: `{"code":400, "message": "start height must be less than or equal to end height"}`, + }, + { + description: "Get block by both heights and start and end height", + request: requestURL(t, nil, heights[len(heights)-1], heights[0], true, []string{}, heights...), + expectedStatus: http.StatusBadRequest, + expectedResponse: `{"code":400, "message": "can only provide either heights or start and end height range"}`, + }, + { + description: "Get block with missing height param", + request: getByHeightsExpandedURL(t), // no height query param specified + expectedStatus: http.StatusBadRequest, + expectedResponse: `{"code":400, "message": "must provide either heights or start and end height range"}`, + }, + { + description: "Get block with missing height values", + request: getByHeightsExpandedURL(t, ""), // height query param specified with no value + expectedStatus: http.StatusBadRequest, + expectedResponse: `{"code":400, "message": "must provide either heights or start and end height range"}`, + }, + { + description: "Get block by more than maximum permissible number of IDs", + request: getByIDsCondensedURL(t, maxIDs.Strings()), // height query param specified with no value + expectedStatus: http.StatusBadRequest, + expectedResponse: fmt.Sprintf(`{"code":400, "message": "at most %d IDs can be requested at a time"}`, request.MaxBlockRequestHeightRange), + }, + { + description: "Get multiple blocks by IDs with header selected", + request: getByIDsCondensedWithSelectURL(t, blockIDs, []string{"header"}), + expectedStatus: http.StatusOK, + expectedResponse: multipleBlockHeaderWithHeaderSelectedResponse, + }, + { + description: "Get multiple blocks by IDs with header and block_status selected", + request: getByIDsCondensedWithSelectURL(t, blockIDs, []string{"header", "block_status"}), + expectedStatus: http.StatusOK, + expectedResponse: multipleBlockHeaderWithHeaderAndStatusSelectedResponse, + }, + { + description: "Get multiple blocks by IDs with unknown select object", + request: getByIDsCondensedWithSelectURL(t, blockIDs, []string{"unknown"}), + expectedStatus: http.StatusOK, + expectedResponse: multipleBlockHeaderWithUnknownSelectedResponse, + }, + } + return testVectors +} + +// TestGetBlocks tests local get blocks by ID and get blocks by heights API +func TestAccessGetBlocks(t *testing.T) { + backend := &mock.API{} + + blkCnt := 10 + blockIDs, heights, blocks, executionResults := generateMocks(backend, blkCnt) + testVectors := prepareTestVectors(t, blockIDs, heights, blocks, executionResults, blkCnt) + + for _, tv := range testVectors { + rr := router.ExecuteRequest(tv.request, backend) + require.Equal(t, tv.expectedStatus, rr.Code, "failed test %s: incorrect response code", tv.description) + actualResp := rr.Body.String() + require.JSONEq(t, tv.expectedResponse, actualResp, "Failed: %s: incorrect response body", tv.description) + } +} + +func requestURL(t *testing.T, ids []string, start string, end string, expandResponse bool, selectedFields []string, heights ...string) *http.Request { + u, _ := url.Parse("/v1/blocks") + q := u.Query() + + if len(ids) > 0 { + u, _ = url.Parse(u.String() + "/" + strings.Join(ids, ",")) + } + + if start != "" { + q.Add(router.StartHeightQueryParam, start) + q.Add(router.EndHeightQueryParam, end) + } + + if len(heights) > 0 { + heightsStr := strings.Join(heights, ",") + q.Add(router.HeightQueryParam, heightsStr) + } + + if len(selectedFields) > 0 { + selectedStr := strings.Join(selectedFields, ",") + q.Add(middleware.SelectQueryParam, selectedStr) + } + + if expandResponse { + var expands []string + expands = append(expands, router.ExpandableFieldPayload) + expands = append(expands, router.ExpandableExecutionResult) + expandsStr := strings.Join(expands, ",") + q.Add(middleware.ExpandQueryParam, expandsStr) + } + + u.RawQuery = q.Encode() + + req, err := http.NewRequest("GET", u.String(), nil) + require.NoError(t, err) + return req +} + +func getByIDsExpandedURL(t *testing.T, ids []string) *http.Request { + return requestURL(t, ids, "", "", true, []string{}) +} + +func getByHeightsExpandedURL(t *testing.T, heights ...string) *http.Request { + return requestURL(t, nil, "", "", true, []string{}, heights...) +} + +func getByStartEndHeightExpandedURL(t *testing.T, start, end string) *http.Request { + return requestURL(t, nil, start, end, true, []string{}) +} + +func getByIDsCondensedURL(t *testing.T, ids []string) *http.Request { + return requestURL(t, ids, "", "", false, []string{}) +} + +func getByIDsCondensedWithSelectURL(t *testing.T, ids []string, selectedFields []string) *http.Request { + return requestURL(t, ids, "", "", false, selectedFields) +} + +func generateMocks(backend *mock.API, count int) ([]string, []string, []*flow.Block, []*flow.ExecutionResult) { + blockIDs := make([]string, count) + heights := make([]string, count) + blocks := make([]*flow.Block, count) + executionResults := make([]*flow.ExecutionResult, count) + + for i := 0; i < count; i++ { + block := unittest.BlockFixture( + unittest.Block.WithHeight(uint64(i + 1)), // avoiding edge case of height = 0 (genesis block) + ) + blocks[i] = block + blockIDs[i] = block.ID().String() + heights[i] = fmt.Sprintf("%d", block.Height) + + executionResult := unittest.ExecutionResultFixture() + executionResult.BlockID = block.ID() + executionResults[i] = executionResult + + backend.Mock.On("GetBlockByID", mocks.Anything, block.ID()).Return(block, flow.BlockStatusSealed, nil) + backend.Mock.On("GetBlockByHeight", mocks.Anything, block.Height).Return(block, flow.BlockStatusSealed, nil) + backend.Mock.On("GetExecutionResultForBlockID", mocks.Anything, block.ID()).Return(executionResults[i], nil) + } + + // any other call to the backend should return a not found error + backend.Mock.On("GetBlockByID", mocks.Anything, mocks.Anything).Return(nil, flow.BlockStatusUnknown, status.Error(codes.NotFound, "not found")) + backend.Mock.On("GetBlockByHeight", mocks.Anything, mocks.Anything).Return(nil, flow.BlockStatusUnknown, status.Error(codes.NotFound, "not found")) + + return blockIDs, heights, blocks, executionResults +} + +func expectedBlockResponsesExpanded( + blocks []*flow.Block, + execResult []*flow.ExecutionResult, + expanded bool, + status flow.BlockStatus, + selectedFields ...string, +) string { + blockResponses := make([]string, 0) + for i, b := range blocks { + response := expectedBlockResponse(b, execResult[i], expanded, status, selectedFields...) + if response != "" { + blockResponses = append(blockResponses, response) + } + } + return fmt.Sprintf("[%s]", strings.Join(blockResponses, ",")) +} + +func expectedBlockResponsesSelected( + blocks []*flow.Block, + execResult []*flow.ExecutionResult, + status flow.BlockStatus, + selectedFields []string, +) string { + return expectedBlockResponsesExpanded(blocks, execResult, false, status, selectedFields...) +} + +func expectedBlockResponse( + block *flow.Block, + execResult *flow.ExecutionResult, + expanded bool, + status flow.BlockStatus, + selectedFields ...string, +) string { + id := block.ID().String() + execResultID := execResult.ID().String() + blockLink := fmt.Sprintf("/v1/blocks/%s", id) + payloadLink := fmt.Sprintf("/v1/blocks/%s/payload", id) + execLink := fmt.Sprintf("/v1/execution_results/%s", execResultID) + timestamp := time.UnixMilli(int64(block.Timestamp)).UTC().Format(time.RFC3339Nano) + + header := fmt.Sprintf(`"header": { + "id": "%s", + "parent_id": "%s", + "height": "%d", + "timestamp": "%s", + "parent_voter_signature": "%s" + }`, id, block.ParentID.String(), block.Height, timestamp, util.ToBase64(block.ParentVoterSigData)) + + links := fmt.Sprintf(`"_links": { + "_self": "%s" + }`, blockLink) + + expandable := fmt.Sprintf(`"_expandable": { + "payload": "%s", + "execution_result": "%s" + }`, payloadLink, execLink) + + blockStatus := fmt.Sprintf(`"block_status": "%s"`, status.String()) + payload := `"payload": {"collection_guarantees": [],"block_seals": []}` + executionResult := fmt.Sprintf(`"execution_result": %s`, executionResultExpectedStr(execResult)) + + partsSet := make(map[string]string) + + if expanded { + partsSet["header"] = header + partsSet["payload"] = payload + partsSet["executionResult"] = executionResult + partsSet["_expandable"] = `"_expandable": {}` + partsSet["_links"] = links + partsSet["block_status"] = blockStatus + } else { + partsSet["header"] = header + partsSet["_expandable"] = expandable + partsSet["_links"] = links + partsSet["block_status"] = blockStatus + } + + if len(selectedFields) > 0 { + // filter a json struct + // elements whose keys are not found in the filter map will be removed + selectedFieldSet := make(map[string]struct{}, len(selectedFields)) + for _, field := range selectedFields { + selectedFieldSet[field] = struct{}{} + } + + for key := range partsSet { + if _, found := selectedFieldSet[key]; !found { + delete(partsSet, key) + } + } + } + + // Iterate over the map and append the values to the slice + var values []string + for _, value := range partsSet { + values = append(values, value) + } + if len(values) == 0 { + return "" + } + + return fmt.Sprintf("{%s}", strings.Join(values, ",")) +} diff --git a/engine/access/rest/http/routes/collections.go b/engine/access/rest/http/routes/collections.go new file mode 100644 index 00000000000..574ab96318d --- /dev/null +++ b/engine/access/rest/http/routes/collections.go @@ -0,0 +1,43 @@ +package routes + +import ( + "github.com/onflow/flow-go/access" + "github.com/onflow/flow-go/engine/access/rest/common" + commonmodels "github.com/onflow/flow-go/engine/access/rest/common/models" + "github.com/onflow/flow-go/engine/access/rest/http/request" + "github.com/onflow/flow-go/model/flow" +) + +// GetCollectionByID retrieves a collection by ID and builds a response +func GetCollectionByID(r *common.Request, backend access.API, link commonmodels.LinkGenerator) (interface{}, error) { + req, err := request.GetCollectionRequest(r) + if err != nil { + return nil, common.NewBadRequestError(err) + } + + collection, err := backend.GetCollectionByID(r.Context(), req.ID) + if err != nil { + return nil, err + } + + // if we expand transactions in the query retrieve each transaction data + transactions := make([]*flow.TransactionBody, 0) + if req.ExpandsTransactions { + for _, tid := range collection.Transactions { + tx, err := backend.GetTransaction(r.Context(), tid) + if err != nil { + return nil, err + } + + transactions = append(transactions, tx) + } + } + + var response commonmodels.Collection + err = response.Build(collection, transactions, link, r.ExpandFields) + if err != nil { + return nil, err + } + + return response, nil +} diff --git a/engine/access/rest/http/routes/collections_test.go b/engine/access/rest/http/routes/collections_test.go new file mode 100644 index 00000000000..990cc5596ce --- /dev/null +++ b/engine/access/rest/http/routes/collections_test.go @@ -0,0 +1,150 @@ +package routes_test + +import ( + "encoding/json" + "fmt" + "net/http" + "strings" + "testing" + + "github.com/stretchr/testify/assert" + mocks "github.com/stretchr/testify/mock" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + + "github.com/onflow/flow-go/access/mock" + "github.com/onflow/flow-go/engine/access/rest/router" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/utils/unittest" +) + +func getCollectionReq(id string, expandTransactions bool) *http.Request { + url := fmt.Sprintf("/v1/collections/%s", id) + if expandTransactions { + url = fmt.Sprintf("%s?expand=transactions", url) + } + + req, _ := http.NewRequest("GET", url, nil) + return req +} + +func TestGetCollections(t *testing.T) { + backend := &mock.API{} + + t.Run("get by ID", func(t *testing.T) { + inputs := []*flow.LightCollection{ + unittest.CollectionFixture(1).Light(), + unittest.CollectionFixture(10).Light(), + unittest.CollectionFixture(100).Light(), + } + + for _, col := range inputs { + backend.Mock. + On("GetCollectionByID", mocks.Anything, col.ID()). + Return(col, nil). + Once() + + txs := make([]string, len(col.Transactions)) + for i, tx := range col.Transactions { + txs[i] = fmt.Sprintf("\"/v1/transactions/%s\"", tx.String()) + } + transactionsStr := fmt.Sprintf("[%s]", strings.Join(txs, ",")) + + expected := fmt.Sprintf(`{ + "id":"%s", + "_links": { + "_self": "/v1/collections/%s" + }, + "_expandable": { + "transactions": %s + } + }`, col.ID(), col.ID(), transactionsStr) + + req := getCollectionReq(col.ID().String(), false) + router.AssertOKResponse(t, req, expected, backend) + mocks.AssertExpectationsForObjects(t, backend) + } + }) + + t.Run("get by ID expand transactions", func(t *testing.T) { + col := unittest.CollectionFixture(3).Light() + + transactions := make([]flow.TransactionBody, len(col.Transactions)) + for i := range col.Transactions { + transactions[i] = unittest.TransactionBodyFixture() + col.Transactions[i] = transactions[i].ID() // overwrite tx ids + + backend.Mock. + On("GetTransaction", mocks.Anything, transactions[i].ID()). + Return(&transactions[i], nil). + Once() + } + + backend.Mock. + On("GetCollectionByID", mocks.Anything, col.ID()). + Return(col, nil). + Once() + + req := getCollectionReq(col.ID().String(), true) + rr := router.ExecuteRequest(req, backend) + + assert.Equal(t, http.StatusOK, rr.Code) + // really hacky but we can't build whole response since it's really complex + // so we just make sure the transactions are included and have defined values + // anyhow we already test transaction responses in transaction tests + var res map[string]interface{} + err := json.Unmarshal(rr.Body.Bytes(), &res) + assert.NoError(t, err) + resTx := res["transactions"].([]interface{}) + for i, r := range resTx { + c := r.(map[string]interface{}) + assert.Equal(t, transactions[i].ID().String(), c["id"]) + assert.NotNil(t, c["envelope_signatures"]) + } + + mocks.AssertExpectationsForObjects(t, backend) + }) + + t.Run("get by ID errors out", func(t *testing.T) { + testID := unittest.IdentifierFixture() + tests := []struct { + id string + mockValue *flow.LightCollection + mockErr error + response string + status int + }{{ + testID.String(), + nil, + status.Error(codes.NotFound, "not found"), + `{"code":404,"message":"Flow resource not found: not found"}`, + http.StatusNotFound, + }, { + "invalidID", + nil, + nil, + `{"code":400,"message":"invalid ID format"}`, + http.StatusBadRequest, + }, + { + unittest.IdentifierFixture().String(), + nil, + status.Errorf(codes.Internal, "block not found"), + `{"code":400,"message":"Invalid Flow request: block not found"}`, + http.StatusBadRequest, + }, + } + + for _, test := range tests { + id, err := flow.HexStringToIdentifier(test.id) + if err == nil { + // setup the backend mock ti return a not found error if this is a valid id + backend.Mock. + On("GetCollectionByID", mocks.Anything, id). + Return(test.mockValue, test.mockErr) + } + req := getCollectionReq(test.id, false) + router.AssertResponse(t, req, test.status, test.response, backend) + } + }) +} diff --git a/engine/access/rest/http/routes/events.go b/engine/access/rest/http/routes/events.go new file mode 100644 index 00000000000..93ea1367b3b --- /dev/null +++ b/engine/access/rest/http/routes/events.go @@ -0,0 +1,69 @@ +package routes + +import ( + "fmt" + + entitiesproto "github.com/onflow/flow/protobuf/go/flow/entities" + + "github.com/onflow/flow-go/access" + "github.com/onflow/flow-go/engine/access/rest/common" + commonmodels "github.com/onflow/flow-go/engine/access/rest/common/models" + "github.com/onflow/flow-go/engine/access/rest/http/request" +) + +const BlockQueryParam = "block_ids" +const EventTypeQuery = "type" + +// GetEvents for the provided block range or list of block IDs filtered by type. +func GetEvents(r *common.Request, backend access.API, _ commonmodels.LinkGenerator) (interface{}, error) { + req, err := request.GetEventsRequest(r) + if err != nil { + return nil, common.NewBadRequestError(err) + } + + // if the request has block IDs provided then return events for block IDs + var blocksEvents commonmodels.BlocksEvents + if len(req.BlockIDs) > 0 { + events, err := backend.GetEventsForBlockIDs( + r.Context(), + req.Type, + req.BlockIDs, + entitiesproto.EventEncodingVersion_JSON_CDC_V0, + ) + if err != nil { + return nil, err + } + + blocksEvents.Build(events) + return blocksEvents, nil + } + + // if end height is provided with special values then load the height + if req.EndHeight == request.FinalHeight || req.EndHeight == request.SealedHeight { + latest, _, err := backend.GetLatestBlockHeader(r.Context(), req.EndHeight == request.SealedHeight) + if err != nil { + return nil, err + } + + req.EndHeight = latest.Height + // special check after we resolve special height value + if req.StartHeight > req.EndHeight { + return nil, common.NewBadRequestError(fmt.Errorf("current retrieved end height value is lower than start height")) + } + } + + // if request provided block height range then return events for that range + events, err := backend.GetEventsForHeightRange( + r.Context(), + req.Type, + req.StartHeight, + req.EndHeight, + entitiesproto.EventEncodingVersion_JSON_CDC_V0, + ) + if err != nil { + return nil, err + } + + blocksEvents.Build(events) + return blocksEvents, nil +} diff --git a/engine/access/rest/http/routes/events_test.go b/engine/access/rest/http/routes/events_test.go new file mode 100644 index 00000000000..dcaa1e01268 --- /dev/null +++ b/engine/access/rest/http/routes/events_test.go @@ -0,0 +1,265 @@ +package routes_test + +import ( + "encoding/json" + "fmt" + "net/http" + "net/url" + "strings" + "testing" + "time" + + mocks "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + + "github.com/onflow/flow-go/access/mock" + "github.com/onflow/flow-go/engine/access/rest/http/routes" + "github.com/onflow/flow-go/engine/access/rest/router" + "github.com/onflow/flow-go/engine/access/rest/util" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/utils/unittest" + + "github.com/onflow/flow/protobuf/go/flow/entities" +) + +func TestGetEvents(t *testing.T) { + backend := &mock.API{} + events := generateEventsMocks(backend, 5) + + allBlockIDs := make([]string, len(events)) + for i, e := range events { + allBlockIDs[i] = e.BlockID.String() + } + startHeight := fmt.Sprint(events[0].BlockHeight) + endHeight := fmt.Sprint(events[len(events)-1].BlockHeight) + + // remove events from the last block to test that an empty BlockEvents is returned when the last + // block contains no events + truncatedEvents := append(events[:len(events)-1], flow.BlockEvents{ + BlockHeight: events[len(events)-1].BlockHeight, + BlockID: events[len(events)-1].BlockID, + BlockTimestamp: events[len(events)-1].BlockTimestamp, + }) + + testVectors := []testVector{ + // valid + { + description: "Get events for a single block by ID", + request: getEventReq(t, "A.179b6b1cb6755e31.Foo.Bar", "", "", []string{events[0].BlockID.String()}), + expectedStatus: http.StatusOK, + expectedResponse: testBlockEventResponse(t, []flow.BlockEvents{events[0]}), + }, + { + description: "Get events by all block IDs", + request: getEventReq(t, "A.179b6b1cb6755e31.Foo.Bar", "", "", allBlockIDs), + expectedStatus: http.StatusOK, + expectedResponse: testBlockEventResponse(t, events), + }, + { + description: "Get events for height range", + request: getEventReq(t, "A.179b6b1cb6755e31.Foo.Bar", startHeight, endHeight, nil), + expectedStatus: http.StatusOK, + expectedResponse: testBlockEventResponse(t, events), + }, + { + description: "Get events range ending at sealed block", + request: getEventReq(t, "A.179b6b1cb6755e31.Foo.Bar", "0", "sealed", nil), + expectedStatus: http.StatusOK, + expectedResponse: testBlockEventResponse(t, events), + }, + { + description: "Get events range ending after last block", + request: getEventReq(t, "A.179b6b1cb6755e31.Foo.Bar", "0", fmt.Sprint(events[len(events)-1].BlockHeight+5), nil), + expectedStatus: http.StatusOK, + expectedResponse: testBlockEventResponse(t, truncatedEvents), + }, + // invalid + { + description: "Get invalid - missing all fields", + request: getEventReq(t, "", "", "", nil), + expectedStatus: http.StatusBadRequest, + expectedResponse: `{"code":400,"message":"must provide either block IDs or start and end height range"}`, + }, + { + description: "Get invalid - missing query event type", + request: getEventReq(t, "", "", "", []string{events[0].BlockID.String()}), + expectedStatus: http.StatusBadRequest, + expectedResponse: `{"code":400,"message":"event type must be provided"}`, + }, + { + description: "Get invalid - missing end height", + request: getEventReq(t, "A.179b6b1cb6755e31.Foo.Bar", "100", "", nil), + expectedStatus: http.StatusBadRequest, + expectedResponse: `{"code":400,"message":"must provide either block IDs or start and end height range"}`, + }, + { + description: "Get invalid - start height bigger than end height", + request: getEventReq(t, "A.179b6b1cb6755e31.Foo.Bar", "100", "50", nil), + expectedStatus: http.StatusBadRequest, + expectedResponse: `{"code":400,"message":"start height must be less than or equal to end height"}`, + }, + { + description: "Get invalid - too big interval", + request: getEventReq(t, "A.179b6b1cb6755e31.Foo.Bar", "0", "5000", nil), + expectedStatus: http.StatusBadRequest, + expectedResponse: `{"code":400,"message":"height range 5000 exceeds maximum allowed of 250"}`, + }, + { + description: "Get invalid - can not provide all params", + request: getEventReq(t, "A.179b6b1cb6755e31.Foo.Bar", "100", "120", []string{"10e782612a014b5c9c7d17994d7e67157064f3dd42fa92cd080bfb0fe22c3f71"}), + expectedStatus: http.StatusBadRequest, + expectedResponse: `{"code":400,"message":"can only provide either block IDs or start and end height range"}`, + }, + { + description: "Get invalid - invalid height format", + request: getEventReq(t, "A.179b6b1cb6755e31.Foo.Bar", "foo", "120", nil), + expectedStatus: http.StatusBadRequest, + expectedResponse: `{"code":400,"message":"invalid start height: invalid height format"}`, + }, + { + description: "Get invalid - latest block smaller than start", + request: getEventReq(t, "A.179b6b1cb6755e31.Foo.Bar", "100000", "sealed", nil), + expectedStatus: http.StatusBadRequest, + expectedResponse: `{"code":400,"message":"current retrieved end height value is lower than start height"}`, + }, + } + + for _, test := range testVectors { + t.Run(test.description, func(t *testing.T) { + router.AssertResponse(t, test.request, test.expectedStatus, test.expectedResponse, backend) + }) + } + +} + +func getEventReq(t *testing.T, eventType string, start string, end string, blockIDs []string) *http.Request { + u, _ := url.Parse("/v1/events") + q := u.Query() + + if len(blockIDs) > 0 { + q.Add(routes.BlockQueryParam, strings.Join(blockIDs, ",")) + } + + if start != "" && end != "" { + q.Add(router.StartHeightQueryParam, start) + q.Add(router.EndHeightQueryParam, end) + } + + q.Add(routes.EventTypeQuery, eventType) + + u.RawQuery = q.Encode() + + req, err := http.NewRequest("GET", u.String(), nil) + require.NoError(t, err) + + return req +} + +func generateEventsMocks(backend *mock.API, n int) []flow.BlockEvents { + events := make([]flow.BlockEvents, n) + ids := make([]flow.Identifier, n) + + var lastHeader *flow.Header + for i := 0; i < n; i++ { + header := unittest.BlockHeaderFixture(unittest.WithHeaderHeight(uint64(i))) + ids[i] = header.ID() + + events[i] = unittest.BlockEventsFixture(header, 2) + + backend.Mock. + On("GetEventsForBlockIDs", mocks.Anything, mocks.Anything, []flow.Identifier{header.ID()}, entities.EventEncodingVersion_JSON_CDC_V0). + Return([]flow.BlockEvents{events[i]}, nil) + + lastHeader = header + } + + backend.Mock. + On("GetEventsForBlockIDs", mocks.Anything, mocks.Anything, ids, entities.EventEncodingVersion_JSON_CDC_V0). + Return(events, nil) + + // range from first to last block + backend.Mock.On( + "GetEventsForHeightRange", + mocks.Anything, + mocks.Anything, + events[0].BlockHeight, + events[len(events)-1].BlockHeight, + entities.EventEncodingVersion_JSON_CDC_V0, + ).Return(events, nil) + + // range from first to last block + 5 + backend.Mock.On( + "GetEventsForHeightRange", + mocks.Anything, + mocks.Anything, + events[0].BlockHeight, + events[len(events)-1].BlockHeight+5, + entities.EventEncodingVersion_JSON_CDC_V0, + ).Return(append(events[:len(events)-1], unittest.BlockEventsFixture(lastHeader, 0)), nil) + + latestBlock := unittest.BlockHeaderFixture() + latestBlock.Height = uint64(n - 1) + + // default not found + backend.Mock. + On("GetEventsForBlockIDs", mocks.Anything, mocks.Anything, mocks.Anything, entities.EventEncodingVersion_JSON_CDC_V0). + Return(nil, status.Error(codes.NotFound, "not found")) + + backend.Mock. + On("GetEventsForHeightRange", mocks.Anything, mocks.Anything). + Return(nil, status.Error(codes.NotFound, "not found")) + + backend.Mock. + On("GetLatestBlockHeader", mocks.Anything, true). + Return(latestBlock, flow.BlockStatusSealed, nil) + + return events +} + +func testBlockEventResponse(t *testing.T, events []flow.BlockEvents) string { + + type eventResponse struct { + Type flow.EventType `json:"type"` + TransactionID flow.Identifier `json:"transaction_id"` + TransactionIndex string `json:"transaction_index"` + EventIndex string `json:"event_index"` + Payload string `json:"payload"` + } + + type blockEventsResponse struct { + BlockID flow.Identifier `json:"block_id"` + BlockHeight string `json:"block_height"` + BlockTimestamp string `json:"block_timestamp"` + Events []eventResponse `json:"events,omitempty"` + } + + res := make([]blockEventsResponse, len(events)) + + for i, e := range events { + events := make([]eventResponse, len(e.Events)) + + for i, ev := range e.Events { + events[i] = eventResponse{ + Type: ev.Type, + TransactionID: ev.TransactionID, + TransactionIndex: fmt.Sprint(ev.TransactionIndex), + EventIndex: fmt.Sprint(ev.EventIndex), + Payload: util.ToBase64(ev.Payload), + } + } + + res[i] = blockEventsResponse{ + BlockID: e.BlockID, + BlockHeight: fmt.Sprint(e.BlockHeight), + BlockTimestamp: e.BlockTimestamp.Format(time.RFC3339Nano), + Events: events, + } + } + + data, err := json.Marshal(res) + require.NoError(t, err) + + return string(data) +} diff --git a/engine/access/rest/http/routes/execution_result.go b/engine/access/rest/http/routes/execution_result.go new file mode 100644 index 00000000000..74508753d77 --- /dev/null +++ b/engine/access/rest/http/routes/execution_result.go @@ -0,0 +1,62 @@ +package routes + +import ( + "fmt" + + "github.com/onflow/flow-go/access" + "github.com/onflow/flow-go/engine/access/rest/common" + commonmodels "github.com/onflow/flow-go/engine/access/rest/common/models" + "github.com/onflow/flow-go/engine/access/rest/http/request" +) + +// GetExecutionResultsByBlockIDs gets Execution Result payload by block IDs. +func GetExecutionResultsByBlockIDs(r *common.Request, backend access.API, link commonmodels.LinkGenerator) (interface{}, error) { + req, err := request.GetExecutionResultByBlockIDsRequest(r) + if err != nil { + return nil, common.NewBadRequestError(err) + } + + // for each block ID we retrieve execution result + results := make([]commonmodels.ExecutionResult, len(req.BlockIDs)) + for i, id := range req.BlockIDs { + res, err := backend.GetExecutionResultForBlockID(r.Context(), id) + if err != nil { + return nil, err + } + + var response commonmodels.ExecutionResult + err = response.Build(res, link) + if err != nil { + return nil, err + } + results[i] = response + } + + return results, nil +} + +// GetExecutionResultByID gets execution result by the ID. +func GetExecutionResultByID(r *common.Request, backend access.API, link commonmodels.LinkGenerator) (interface{}, error) { + req, err := request.GetExecutionResultRequest(r) + if err != nil { + return nil, common.NewBadRequestError(err) + } + + res, err := backend.GetExecutionResultByID(r.Context(), req.ID) + if err != nil { + return nil, err + } + + if res == nil { + err := fmt.Errorf("execution result with ID: %s not found", req.ID.String()) + return nil, common.NewNotFoundError(err.Error(), err) + } + + var response commonmodels.ExecutionResult + err = response.Build(res, link) + if err != nil { + return nil, err + } + + return response, nil +} diff --git a/engine/access/rest/execution_result_test.go b/engine/access/rest/http/routes/execution_result_test.go similarity index 87% rename from engine/access/rest/execution_result_test.go rename to engine/access/rest/http/routes/execution_result_test.go index adb3852c668..f38d5a6c78f 100644 --- a/engine/access/rest/execution_result_test.go +++ b/engine/access/rest/http/routes/execution_result_test.go @@ -1,4 +1,4 @@ -package rest +package routes_test import ( "fmt" @@ -7,12 +7,12 @@ import ( "strings" "testing" + mocks "github.com/stretchr/testify/mock" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" - mocks "github.com/stretchr/testify/mock" - "github.com/onflow/flow-go/access/mock" + "github.com/onflow/flow-go/engine/access/rest/router" "github.com/onflow/flow-go/engine/access/rest/util" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/utils/unittest" @@ -37,7 +37,6 @@ func getResultByIDReq(id string, blockIDs []string) *http.Request { } func TestGetResultByID(t *testing.T) { - t.Run("get by ID", func(t *testing.T) { backend := &mock.API{} result := unittest.ExecutionResultFixture() @@ -49,7 +48,7 @@ func TestGetResultByID(t *testing.T) { req := getResultByIDReq(id.String(), nil) expected := executionResultExpectedStr(result) - assertOKResponse(t, req, expected, backend) + router.AssertOKResponse(t, req, expected, backend) mocks.AssertExpectationsForObjects(t, backend) }) @@ -62,12 +61,13 @@ func TestGetResultByID(t *testing.T) { Once() req := getResultByIDReq(id.String(), nil) - assertResponse(t, req, http.StatusNotFound, `{"code":404,"message":"Flow resource not found: block not found"}`, backend) + router.AssertResponse(t, req, http.StatusNotFound, `{"code":404,"message":"Flow resource not found: block not found"}`, backend) mocks.AssertExpectationsForObjects(t, backend) }) } func TestGetResultBlockID(t *testing.T) { + t.Run("get by block ID", func(t *testing.T) { backend := &mock.API{} blockID := unittest.IdentifierFixture() @@ -81,7 +81,7 @@ func TestGetResultBlockID(t *testing.T) { req := getResultByIDReq("", []string{blockID.String()}) expected := fmt.Sprintf(`[%s]`, executionResultExpectedStr(result)) - assertOKResponse(t, req, expected, backend) + router.AssertOKResponse(t, req, expected, backend) mocks.AssertExpectationsForObjects(t, backend) }) @@ -94,7 +94,7 @@ func TestGetResultBlockID(t *testing.T) { Once() req := getResultByIDReq("", []string{blockID.String()}) - assertResponse(t, req, http.StatusNotFound, `{"code":404,"message":"Flow resource not found: block not found"}`, backend) + router.AssertResponse(t, req, http.StatusNotFound, `{"code":404,"message":"Flow resource not found: block not found"}`, backend) mocks.AssertExpectationsForObjects(t, backend) }) } @@ -119,8 +119,8 @@ func executionResultExpectedStr(result *flow.ExecutionResult) string { "id": "%s", "block_id": "%s", "events": [], - "chunks": %s, - "previous_result_id": "%s", + "chunks": %s, + "previous_result_id": "%s", "_links": { "_self": "/v1/execution_results/%s" } diff --git a/engine/access/rest/http/routes/network.go b/engine/access/rest/http/routes/network.go new file mode 100644 index 00000000000..5f2954e4b8e --- /dev/null +++ b/engine/access/rest/http/routes/network.go @@ -0,0 +1,17 @@ +package routes + +import ( + "github.com/onflow/flow-go/access" + "github.com/onflow/flow-go/engine/access/rest/common" + commonmodels "github.com/onflow/flow-go/engine/access/rest/common/models" + "github.com/onflow/flow-go/engine/access/rest/http/models" +) + +// GetNetworkParameters returns network-wide parameters of the blockchain +func GetNetworkParameters(r *common.Request, backend access.API, _ commonmodels.LinkGenerator) (interface{}, error) { + params := backend.GetNetworkParameters(r.Context()) + + var response models.NetworkParameters + response.Build(¶ms) + return response, nil +} diff --git a/engine/access/rest/http/routes/network_test.go b/engine/access/rest/http/routes/network_test.go new file mode 100644 index 00000000000..add64f53009 --- /dev/null +++ b/engine/access/rest/http/routes/network_test.go @@ -0,0 +1,57 @@ +package routes_test + +import ( + "fmt" + "net/http" + "net/url" + "testing" + + mocktestify "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/access/mock" + "github.com/onflow/flow-go/engine/access/rest/router" + accessmodel "github.com/onflow/flow-go/model/access" + "github.com/onflow/flow-go/model/flow" +) + +func networkURL(t *testing.T) string { + u, err := url.ParseRequestURI("/v1/network/parameters") + require.NoError(t, err) + + return u.String() +} + +func TestGetNetworkParameters(t *testing.T) { + backend := &mock.API{} + + t.Run("get network parameters on mainnet", func(t *testing.T) { + + req := getNetworkParametersRequest(t) + + params := accessmodel.NetworkParameters{ + ChainID: flow.Mainnet, + } + + backend.Mock. + On("GetNetworkParameters", mocktestify.Anything). + Return(params) + + expected := networkParametersExpectedStr(flow.Mainnet) + + router.AssertOKResponse(t, req, expected, backend) + mocktestify.AssertExpectationsForObjects(t, backend) + }) +} + +func networkParametersExpectedStr(chainID flow.ChainID) string { + return fmt.Sprintf(`{ + "chain_id": "%s" + }`, chainID) +} + +func getNetworkParametersRequest(t *testing.T) *http.Request { + req, err := http.NewRequest("GET", networkURL(t), nil) + require.NoError(t, err) + return req +} diff --git a/engine/access/rest/http/routes/node_version_info.go b/engine/access/rest/http/routes/node_version_info.go new file mode 100644 index 00000000000..da2da3e59af --- /dev/null +++ b/engine/access/rest/http/routes/node_version_info.go @@ -0,0 +1,20 @@ +package routes + +import ( + "github.com/onflow/flow-go/access" + "github.com/onflow/flow-go/engine/access/rest/common" + commonmodels "github.com/onflow/flow-go/engine/access/rest/common/models" + "github.com/onflow/flow-go/engine/access/rest/http/models" +) + +// GetNodeVersionInfo returns node version information +func GetNodeVersionInfo(r *common.Request, backend access.API, _ commonmodels.LinkGenerator) (interface{}, error) { + params, err := backend.GetNodeVersionInfo(r.Context()) + if err != nil { + return nil, err + } + + var response models.NodeVersionInfo + response.Build(params) + return response, nil +} diff --git a/engine/access/rest/http/routes/node_version_info_test.go b/engine/access/rest/http/routes/node_version_info_test.go new file mode 100644 index 00000000000..7bac9375249 --- /dev/null +++ b/engine/access/rest/http/routes/node_version_info_test.go @@ -0,0 +1,92 @@ +package routes_test + +import ( + "fmt" + "net/http" + "net/url" + "testing" + + mocktestify "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/access/mock" + "github.com/onflow/flow-go/cmd/build" + "github.com/onflow/flow-go/engine/access/rest/router" + accessmodel "github.com/onflow/flow-go/model/access" + "github.com/onflow/flow-go/utils/unittest" +) + +func nodeVersionInfoURL(t *testing.T) string { + u, err := url.ParseRequestURI("/v1/node_version_info") + require.NoError(t, err) + + return u.String() +} + +func TestGetNodeVersionInfo(t *testing.T) { + backend := mock.NewAPI(t) + + t.Run("get node version info", func(t *testing.T) { + req := getNodeVersionInfoRequest(t) + + nodeRootBlockHeight := unittest.Uint64InRange(10_000, 100_000) + + params := &accessmodel.NodeVersionInfo{ + Semver: build.Version(), + Commit: build.Commit(), + SporkId: unittest.IdentifierFixture(), + ProtocolStateVersion: unittest.Uint64InRange(10, 30), + SporkRootBlockHeight: unittest.Uint64InRange(1000, 10_000), + NodeRootBlockHeight: nodeRootBlockHeight, + CompatibleRange: &accessmodel.CompatibleRange{ + StartHeight: nodeRootBlockHeight, + EndHeight: uint64(0), + }, + } + + backend.Mock. + On("GetNodeVersionInfo", mocktestify.Anything). + Return(params, nil) + + expected := nodeVersionInfoExpectedStr(params) + + router.AssertOKResponse(t, req, expected, backend) + mocktestify.AssertExpectationsForObjects(t, backend) + }) +} + +func nodeVersionInfoExpectedStr(nodeVersionInfo *accessmodel.NodeVersionInfo) string { + compatibleRange := fmt.Sprintf(`"compatible_range": { + "start_height": "%d", + "end_height": "%d" + }`, + nodeVersionInfo.CompatibleRange.StartHeight, + nodeVersionInfo.CompatibleRange.EndHeight, + ) + + return fmt.Sprintf(`{ + "semver": "%s", + "commit": "%s", + "spork_id": "%s", + "protocol_state_version": "%d", + "protocol_version": "%d", + "spork_root_block_height": "%d", + "node_root_block_height": "%d", + %s + }`, + nodeVersionInfo.Semver, + nodeVersionInfo.Commit, + nodeVersionInfo.SporkId.String(), + nodeVersionInfo.ProtocolStateVersion, + nodeVersionInfo.ProtocolVersion, + nodeVersionInfo.SporkRootBlockHeight, + nodeVersionInfo.NodeRootBlockHeight, + compatibleRange, + ) +} + +func getNodeVersionInfoRequest(t *testing.T) *http.Request { + req, err := http.NewRequest("GET", nodeVersionInfoURL(t), nil) + require.NoError(t, err) + return req +} diff --git a/engine/access/rest/http/routes/scripts.go b/engine/access/rest/http/routes/scripts.go new file mode 100644 index 00000000000..92ec825de7e --- /dev/null +++ b/engine/access/rest/http/routes/scripts.go @@ -0,0 +1,36 @@ +package routes + +import ( + "github.com/onflow/flow-go/access" + "github.com/onflow/flow-go/engine/access/rest/common" + "github.com/onflow/flow-go/engine/access/rest/common/models" + "github.com/onflow/flow-go/engine/access/rest/http/request" + "github.com/onflow/flow-go/model/flow" +) + +// ExecuteScript handler sends the script from the request to be executed. +func ExecuteScript(r *common.Request, backend access.API, _ models.LinkGenerator) (interface{}, error) { + req, err := request.GetScriptRequest(r) + if err != nil { + return nil, common.NewBadRequestError(err) + } + + if req.BlockID != flow.ZeroID { + return backend.ExecuteScriptAtBlockID(r.Context(), req.BlockID, req.Script.Source, req.Script.Args) + } + + // default to sealed height + if req.BlockHeight == request.SealedHeight || req.BlockHeight == request.EmptyHeight { + return backend.ExecuteScriptAtLatestBlock(r.Context(), req.Script.Source, req.Script.Args) + } + + if req.BlockHeight == request.FinalHeight { + finalBlock, _, err := backend.GetLatestBlockHeader(r.Context(), false) + if err != nil { + return nil, err + } + req.BlockHeight = finalBlock.Height + } + + return backend.ExecuteScriptAtBlockHeight(r.Context(), req.BlockHeight, req.Script.Source, req.Script.Args) +} diff --git a/engine/access/rest/http/routes/scripts_test.go b/engine/access/rest/http/routes/scripts_test.go new file mode 100644 index 00000000000..8e08c5d21a6 --- /dev/null +++ b/engine/access/rest/http/routes/scripts_test.go @@ -0,0 +1,132 @@ +package routes_test + +import ( + "bytes" + "encoding/base64" + "encoding/json" + "fmt" + "net/http" + "net/url" + "testing" + + mocks "github.com/stretchr/testify/mock" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + + "github.com/onflow/flow-go/access/mock" + "github.com/onflow/flow-go/engine/access/rest/router" + "github.com/onflow/flow-go/engine/access/rest/util" + "github.com/onflow/flow-go/model/flow" +) + +func scriptReq(id string, height string, body interface{}) *http.Request { + u, _ := url.ParseRequestURI("/v1/scripts") + q := u.Query() + + if id != "" { + q.Add("block_id", id) + } + if height != "" { + q.Add("block_height", height) + } + + u.RawQuery = q.Encode() + + jsonBody, _ := json.Marshal(body) + req, _ := http.NewRequest("POST", u.String(), bytes.NewBuffer(jsonBody)) + + return req +} + +func TestScripts(t *testing.T) { + validCode := []byte(`access(all) fun main(foo: String): String { return foo }`) + validArgs := []byte(`{ "type": "String", "value": "hello world" }`) + validBody := map[string]interface{}{ + "script": util.ToBase64(validCode), + "arguments": []string{util.ToBase64(validArgs)}, + } + + t.Run("get by Latest height", func(t *testing.T) { + backend := &mock.API{} + backend.Mock. + On("ExecuteScriptAtLatestBlock", mocks.Anything, validCode, [][]byte{validArgs}). + Return([]byte("hello world"), nil) + + req := scriptReq("", router.SealedHeightQueryParam, validBody) + router.AssertOKResponse(t, req, fmt.Sprintf( + "\"%s\"", + base64.StdEncoding.EncodeToString([]byte(`hello world`)), + ), backend) + }) + + t.Run("get by height", func(t *testing.T) { + backend := &mock.API{} + height := uint64(1337) + + backend.Mock. + On("ExecuteScriptAtBlockHeight", mocks.Anything, height, validCode, [][]byte{validArgs}). + Return([]byte("hello world"), nil) + + req := scriptReq("", fmt.Sprintf("%d", height), validBody) + router.AssertOKResponse(t, req, fmt.Sprintf( + "\"%s\"", + base64.StdEncoding.EncodeToString([]byte(`hello world`)), + ), backend) + }) + + t.Run("get by ID", func(t *testing.T) { + backend := &mock.API{} + id, _ := flow.HexStringToIdentifier("222dc5dd51b9e4910f687e475f892f495f3352362ba318b53e318b4d78131312") + + backend.Mock. + On("ExecuteScriptAtBlockID", mocks.Anything, id, validCode, [][]byte{validArgs}). + Return([]byte("hello world"), nil) + + req := scriptReq(id.String(), "", validBody) + router.AssertOKResponse(t, req, fmt.Sprintf( + "\"%s\"", + base64.StdEncoding.EncodeToString([]byte(`hello world`)), + ), backend) + }) + + t.Run("get error", func(t *testing.T) { + backend := &mock.API{} + backend.Mock. + On("ExecuteScriptAtBlockHeight", mocks.Anything, uint64(1337), validCode, [][]byte{validArgs}). + Return(nil, status.Error(codes.Internal, "internal server error")) + + req := scriptReq("", "1337", validBody) + router.AssertResponse( + t, + req, + http.StatusBadRequest, + `{"code":400, "message":"Invalid Flow request: internal server error"}`, + backend, + ) + }) + + t.Run("get invalid", func(t *testing.T) { + backend := &mock.API{} + backend.Mock. + On("ExecuteScriptAtBlockHeight", mocks.Anything, mocks.Anything, mocks.Anything, mocks.Anything). + Return(nil, nil) + + tests := []struct { + id string + height string + body map[string]interface{} + out string + status int + }{ + {"invalidID", "", validBody, `{"code":400,"message":"invalid ID format"}`, http.StatusBadRequest}, + {"", "invalid", validBody, `{"code":400,"message":"invalid height format"}`, http.StatusBadRequest}, + {"", "-1", validBody, `{"code":400,"message":"invalid height format"}`, http.StatusBadRequest}, + {"", "1337", nil, `{"code":400,"message":"request body must not be empty"}`, http.StatusBadRequest}, + } + + for _, test := range tests { + req := scriptReq(test.id, test.height, test.body) + router.AssertResponse(t, req, http.StatusBadRequest, test.out, backend) + } + }) +} diff --git a/engine/access/rest/http/routes/transactions.go b/engine/access/rest/http/routes/transactions.go new file mode 100644 index 00000000000..aad6d819e60 --- /dev/null +++ b/engine/access/rest/http/routes/transactions.go @@ -0,0 +1,83 @@ +package routes + +import ( + entitiesproto "github.com/onflow/flow/protobuf/go/flow/entities" + + "github.com/onflow/flow-go/access" + "github.com/onflow/flow-go/engine/access/rest/common" + commonmodels "github.com/onflow/flow-go/engine/access/rest/common/models" + "github.com/onflow/flow-go/engine/access/rest/http/request" + accessmodel "github.com/onflow/flow-go/model/access" +) + +// GetTransactionByID gets a transaction by requested ID. +func GetTransactionByID(r *common.Request, backend access.API, link commonmodels.LinkGenerator) (interface{}, error) { + req, err := request.GetTransactionRequest(r) + if err != nil { + return nil, common.NewBadRequestError(err) + } + + tx, err := backend.GetTransaction(r.Context(), req.ID) + if err != nil { + return nil, err + } + + var txr *accessmodel.TransactionResult + // only lookup result if transaction result is to be expanded + if req.ExpandsResult { + txr, err = backend.GetTransactionResult( + r.Context(), + req.ID, + req.BlockID, + req.CollectionID, + entitiesproto.EventEncodingVersion_JSON_CDC_V0, + ) + if err != nil { + return nil, err + } + } + + var response commonmodels.Transaction + response.Build(tx, txr, link) + return response, nil +} + +// GetTransactionResultByID retrieves transaction result by the transaction ID. +func GetTransactionResultByID(r *common.Request, backend access.API, link commonmodels.LinkGenerator) (interface{}, error) { + req, err := request.GetTransactionResultRequest(r) + if err != nil { + return nil, common.NewBadRequestError(err) + } + + txr, err := backend.GetTransactionResult( + r.Context(), + req.ID, + req.BlockID, + req.CollectionID, + entitiesproto.EventEncodingVersion_JSON_CDC_V0, + ) + if err != nil { + return nil, err + } + + var response commonmodels.TransactionResult + response.Build(txr, req.ID, link) + return response, nil +} + +// CreateTransaction creates a new transaction from provided payload. +func CreateTransaction(r *common.Request, backend access.API, link commonmodels.LinkGenerator) (interface{}, error) { + req, err := request.CreateTransactionRequest(r) + if err != nil { + return nil, common.NewBadRequestError(err) + } + + err = backend.SendTransaction(r.Context(), &req.Transaction) + if err != nil { + return nil, err + } + + var response commonmodels.Transaction + response.Build(&req.Transaction, nil, link) + return response, nil +} diff --git a/engine/access/rest/http/routes/transactions_test.go b/engine/access/rest/http/routes/transactions_test.go new file mode 100644 index 00000000000..cb2f8a384df --- /dev/null +++ b/engine/access/rest/http/routes/transactions_test.go @@ -0,0 +1,451 @@ +package routes_test + +import ( + "bytes" + "encoding/json" + "fmt" + "net/http" + "net/url" + "strings" + "testing" + + mocks "github.com/stretchr/testify/mock" + "golang.org/x/text/cases" + "golang.org/x/text/language" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + + "github.com/onflow/flow/protobuf/go/flow/entities" + + "github.com/onflow/flow-go/access/mock" + "github.com/onflow/flow-go/engine/access/rest/common/models" + "github.com/onflow/flow-go/engine/access/rest/router" + "github.com/onflow/flow-go/engine/access/rest/util" + accessmodel "github.com/onflow/flow-go/model/access" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/utils/unittest" +) + +func getTransactionReq(id string, expandResult bool, blockIdQuery string, collectionIdQuery string) *http.Request { + u, _ := url.Parse(fmt.Sprintf("/v1/transactions/%s", id)) + q := u.Query() + + if expandResult { + // by default expand all since we test expanding with converters + q.Add("expand", "result") + } + + if blockIdQuery != "" { + q.Add("block_id", blockIdQuery) + } + + if collectionIdQuery != "" { + q.Add("collection_id", collectionIdQuery) + } + + u.RawQuery = q.Encode() + + req, _ := http.NewRequest("GET", u.String(), nil) + return req +} + +func getTransactionResultReq(id string, blockIdQuery string, collectionIdQuery string) *http.Request { + u, _ := url.Parse(fmt.Sprintf("/v1/transaction_results/%s", id)) + q := u.Query() + if blockIdQuery != "" { + q.Add("block_id", blockIdQuery) + } + + if collectionIdQuery != "" { + q.Add("collection_id", collectionIdQuery) + } + + u.RawQuery = q.Encode() + + req, _ := http.NewRequest("GET", u.String(), nil) + return req +} + +func createTransactionReq(body interface{}) *http.Request { + jsonBody, _ := json.Marshal(body) + req, _ := http.NewRequest("POST", "/v1/transactions", bytes.NewBuffer(jsonBody)) + return req +} + +func TestGetTransactions(t *testing.T) { + t.Run("get by ID without results", func(t *testing.T) { + backend := &mock.API{} + tx := unittest.TransactionFixture() + req := getTransactionReq(tx.ID().String(), false, "", "") + + backend.Mock. + On("GetTransaction", mocks.Anything, tx.ID()). + Return(&tx, nil) + + expected := fmt.Sprintf(` + { + "id":"%s", + "script":"YWNjZXNzKGFsbCkgZnVuIG1haW4oKSB7fQ==", + "arguments": [], + "reference_block_id":"%s", + "gas_limit":"10", + "payer":"8c5303eaa26202d6", + "proposal_key":{ + "address":"8c5303eaa26202d6", + "key_index":"1", + "sequence_number":"0" + }, + "authorizers":[ + "8c5303eaa26202d6" + ], + "payload_signatures": [], + "envelope_signatures":[ + { + "address":"8c5303eaa26202d6", + "key_index":"1", + "signature":"%s" + } + ], + "_links":{ + "_self":"/v1/transactions/%s" + }, + "_expandable": { + "result": "/v1/transaction_results/%s" + } + }`, + tx.ID(), tx.ReferenceBlockID, util.ToBase64(tx.EnvelopeSignatures[0].Signature), tx.ID(), tx.ID()) + + router.AssertOKResponse(t, req, expected, backend) + }) + + t.Run("Get by ID with results", func(t *testing.T) { + backend := &mock.API{} + + tx := unittest.TransactionBodyFixture() + txr := transactionResultFixture(tx) + + backend.Mock. + On("GetTransaction", mocks.Anything, tx.ID()). + Return(&tx, nil) + + backend.Mock. + On("GetTransactionResult", mocks.Anything, tx.ID(), flow.ZeroID, flow.ZeroID, entities.EventEncodingVersion_JSON_CDC_V0). + Return(txr, nil) + + req := getTransactionReq(tx.ID().String(), true, "", "") + + expected := fmt.Sprintf(` + { + "id":"%s", + "script":"YWNjZXNzKGFsbCkgZnVuIG1haW4oKSB7fQ==", + "arguments": [], + "reference_block_id":"%s", + "gas_limit":"10", + "payer":"8c5303eaa26202d6", + "proposal_key":{ + "address":"8c5303eaa26202d6", + "key_index":"1", + "sequence_number":"0" + }, + "authorizers":[ + "8c5303eaa26202d6" + ], + "payload_signatures": [], + "envelope_signatures":[ + { + "address":"8c5303eaa26202d6", + "key_index":"1", + "signature":"%s" + } + ], + "result": { + "block_id": "%s", + "collection_id": "%s", + "execution": "Success", + "status": "Sealed", + "status_code": 1, + "error_message": "", + "computation_used": "0", + "events": [ + { + "type": "flow.AccountCreated", + "transaction_id": "%s", + "transaction_index": "0", + "event_index": "0", + "payload": "" + } + ], + "_links": { + "_self": "/v1/transaction_results/%s" + } + }, + "_expandable": {}, + "_links":{ + "_self":"/v1/transactions/%s" + } + }`, + tx.ID(), tx.ReferenceBlockID, util.ToBase64(tx.EnvelopeSignatures[0].Signature), tx.ReferenceBlockID, txr.CollectionID, tx.ID(), tx.ID(), tx.ID()) + router.AssertOKResponse(t, req, expected, backend) + }) + + t.Run("get by ID Invalid", func(t *testing.T) { + backend := &mock.API{} + + req := getTransactionReq("invalid", false, "", "") + expected := `{"code":400, "message":"invalid ID format"}` + router.AssertResponse(t, req, http.StatusBadRequest, expected, backend) + }) + + t.Run("get by ID non-existing", func(t *testing.T) { + backend := &mock.API{} + + tx := unittest.TransactionFixture() + req := getTransactionReq(tx.ID().String(), false, "", "") + + backend.Mock. + On("GetTransaction", mocks.Anything, tx.ID()). + Return(nil, status.Error(codes.NotFound, "transaction not found")) + + expected := `{"code":404, "message":"Flow resource not found: transaction not found"}` + router.AssertResponse(t, req, http.StatusNotFound, expected, backend) + }) +} + +func TestGetTransactionResult(t *testing.T) { + id := unittest.IdentifierFixture() + bid := unittest.IdentifierFixture() + cid := unittest.IdentifierFixture() + txr := &accessmodel.TransactionResult{ + Status: flow.TransactionStatusSealed, + StatusCode: 10, + Events: []flow.Event{ + unittest.EventFixture( + unittest.Event.WithEventType(flow.EventAccountCreated), + unittest.Event.WithTransactionIndex(1), + unittest.Event.WithEventIndex(0), + unittest.Event.WithTransactionID(id), + ), + }, + ErrorMessage: "", + BlockID: bid, + CollectionID: cid, + } + txr.Events[0].Payload = []byte(`test payload`) + expected := fmt.Sprintf(`{ + "block_id": "%s", + "collection_id": "%s", + "execution": "Success", + "status": "Sealed", + "status_code": 10, + "error_message": "", + "computation_used": "0", + "events": [ + { + "type": "flow.AccountCreated", + "transaction_id": "%s", + "transaction_index": "1", + "event_index": "0", + "payload": "%s" + } + ], + "_links": { + "_self": "/v1/transaction_results/%s" + } + }`, bid.String(), cid.String(), id.String(), util.ToBase64(txr.Events[0].Payload), id.String()) + + t.Run("get by transaction ID", func(t *testing.T) { + backend := &mock.API{} + req := getTransactionResultReq(id.String(), "", "") + + backend.Mock. + On("GetTransactionResult", mocks.Anything, id, flow.ZeroID, flow.ZeroID, entities.EventEncodingVersion_JSON_CDC_V0). + Return(txr, nil) + + router.AssertOKResponse(t, req, expected, backend) + }) + + t.Run("get by block ID", func(t *testing.T) { + backend := &mock.API{} + + req := getTransactionResultReq(id.String(), bid.String(), "") + + backend.Mock. + On("GetTransactionResult", mocks.Anything, id, bid, flow.ZeroID, entities.EventEncodingVersion_JSON_CDC_V0). + Return(txr, nil) + + router.AssertOKResponse(t, req, expected, backend) + }) + + t.Run("get by collection ID", func(t *testing.T) { + backend := &mock.API{} + req := getTransactionResultReq(id.String(), "", cid.String()) + + backend.Mock. + On("GetTransactionResult", mocks.Anything, id, flow.ZeroID, cid, entities.EventEncodingVersion_JSON_CDC_V0). + Return(txr, nil) + + router.AssertOKResponse(t, req, expected, backend) + }) + + t.Run("get execution statuses", func(t *testing.T) { + backend := &mock.API{} + + testVectors := map[*accessmodel.TransactionResult]string{{ + Status: flow.TransactionStatusExpired, + ErrorMessage: "", + }: string(models.FAILURE_RESULT), { + Status: flow.TransactionStatusSealed, + ErrorMessage: "cadence runtime exception", + }: string(models.FAILURE_RESULT), { + Status: flow.TransactionStatusFinalized, + ErrorMessage: "", + }: string(models.PENDING_RESULT), { + Status: flow.TransactionStatusPending, + ErrorMessage: "", + }: string(models.PENDING_RESULT), { + Status: flow.TransactionStatusExecuted, + ErrorMessage: "", + }: string(models.PENDING_RESULT), { + Status: flow.TransactionStatusSealed, + ErrorMessage: "", + }: string(models.SUCCESS_RESULT)} + + for txResult, err := range testVectors { + txResult.BlockID = bid + txResult.CollectionID = cid + req := getTransactionResultReq(id.String(), "", "") + backend.Mock. + On("GetTransactionResult", mocks.Anything, id, flow.ZeroID, flow.ZeroID, entities.EventEncodingVersion_JSON_CDC_V0). + Return(txResult, nil). + Once() + + expectedResp := fmt.Sprintf(`{ + "block_id": "%s", + "collection_id": "%s", + "execution": "%s", + "status": "%s", + "status_code": 0, + "error_message": "%s", + "computation_used": "0", + "events": [], + "_links": { + "_self": "/v1/transaction_results/%s" + } + }`, bid.String(), cid.String(), err, cases.Title(language.English).String(strings.ToLower(txResult.Status.String())), txResult.ErrorMessage, id.String()) + router.AssertOKResponse(t, req, expectedResp, backend) + } + }) + + t.Run("get by ID Invalid", func(t *testing.T) { + backend := &mock.API{} + + req := getTransactionResultReq("invalid", "", "") + + expected := `{"code":400, "message":"invalid ID format"}` + router.AssertResponse(t, req, http.StatusBadRequest, expected, backend) + }) +} + +func TestCreateTransaction(t *testing.T) { + backend := &mock.API{} + + t.Run("create", func(t *testing.T) { + tx := unittest.TransactionBodyFixture() + tx.PayloadSignatures = []flow.TransactionSignature{unittest.TransactionSignatureFixture()} + tx.Arguments = [][]uint8{} + req := createTransactionReq(unittest.CreateSendTxHttpPayload(tx)) + + backend.Mock. + On("SendTransaction", mocks.Anything, &tx). + Return(nil) + + expected := fmt.Sprintf(` + { + "id":"%s", + "script":"YWNjZXNzKGFsbCkgZnVuIG1haW4oKSB7fQ==", + "arguments": [], + "reference_block_id":"%s", + "gas_limit":"10", + "payer":"8c5303eaa26202d6", + "proposal_key":{ + "address":"8c5303eaa26202d6", + "key_index":"1", + "sequence_number":"0" + }, + "authorizers":[ + "8c5303eaa26202d6" + ], + "payload_signatures":[ + { + "address":"8c5303eaa26202d6", + "key_index":"1", + "signature":"%s" + } + ], + "envelope_signatures":[ + { + "address":"8c5303eaa26202d6", + "key_index":"1", + "signature":"%s" + } + ], + "_expandable": { + "result": "/v1/transaction_results/%s" + }, + "_links":{ + "_self":"/v1/transactions/%s" + } + }`, + tx.ID(), tx.ReferenceBlockID, util.ToBase64(tx.PayloadSignatures[0].Signature), util.ToBase64(tx.EnvelopeSignatures[0].Signature), tx.ID(), tx.ID()) + router.AssertOKResponse(t, req, expected, backend) + }) + + t.Run("post invalid transaction", func(t *testing.T) { + tests := []struct { + inputField string + inputValue string + output string + }{ + {"reference_block_id", "-1", `{"code":400, "message":"invalid reference block ID: invalid ID format"}`}, + {"reference_block_id", "", `{"code":400, "message":"reference block not provided"}`}, + {"gas_limit", "-1", `{"code":400, "message":"invalid gas limit: value must be an unsigned 64 bit integer"}`}, + {"gas_limit", "18446744073709551616", `{"code":400, "message":"invalid gas limit: value overflows uint64 range"}`}, + {"payer", "yo", `{"code":400, "message":"invalid payer: invalid address"}`}, + {"proposal_key", "yo", `{"code":400, "message":"request body contains an invalid value for the \"proposal_key\" field (at position 461)"}`}, + {"authorizers", "", `{"code":400, "message":"request body contains an invalid value for the \"authorizers\" field (at position 32)"}`}, + {"authorizers", "yo", `{"code":400, "message":"request body contains an invalid value for the \"authorizers\" field (at position 34)"}`}, + {"envelope_signatures", "", `{"code":400, "message":"request body contains an invalid value for the \"envelope_signatures\" field (at position 75)"}`}, + {"payload_signatures", "", `{"code":400, "message":"request body contains an invalid value for the \"payload_signatures\" field (at position 292)"}`}, + } + + for _, test := range tests { + tx := unittest.TransactionBodyFixture() + tx.PayloadSignatures = []flow.TransactionSignature{unittest.TransactionSignatureFixture()} + testTx := unittest.CreateSendTxHttpPayload(tx) + testTx[test.inputField] = test.inputValue + req := createTransactionReq(testTx) + + router.AssertResponse(t, req, http.StatusBadRequest, test.output, backend) + } + }) +} + +func transactionResultFixture(tx flow.TransactionBody) *accessmodel.TransactionResult { + cid := unittest.IdentifierFixture() + return &accessmodel.TransactionResult{ + Status: flow.TransactionStatusSealed, + StatusCode: 1, + Events: []flow.Event{ + unittest.EventFixture( + unittest.Event.WithEventType(flow.EventAccountCreated), + unittest.Event.WithTransactionIndex(0), + unittest.Event.WithEventIndex(0), + unittest.Event.WithTransactionID(tx.ID()), + unittest.Event.WithPayload([]byte{}), + ), + }, + ErrorMessage: "", + BlockID: tx.ReferenceBlockID, + CollectionID: cid, + } +} diff --git a/engine/access/rest/middleware/logging.go b/engine/access/rest/middleware/logging.go deleted file mode 100644 index 577843e2c86..00000000000 --- a/engine/access/rest/middleware/logging.go +++ /dev/null @@ -1,50 +0,0 @@ -package middleware - -import ( - "net/http" - "time" - - "github.com/gorilla/mux" - "github.com/rs/zerolog" -) - -// LoggingMiddleware creates a middleware which adds a logger interceptor to each request to log the request method, uri, -// duration and response code -func LoggingMiddleware(logger zerolog.Logger) mux.MiddlewareFunc { - return func(inner http.Handler) http.Handler { - return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { - // record star time - start := time.Now() - // modify the writer - respWriter := newResponseWriter(w) - // continue to the next handler - inner.ServeHTTP(respWriter, req) - log := logger.Info() - if respWriter.statusCode != http.StatusOK { - log = logger.Error() - } - log.Str("method", req.Method). - Str("uri", req.RequestURI). - Str("client_ip", req.RemoteAddr). - Str("user_agent", req.UserAgent()). - Dur("duration", time.Since(start)). - Int("response_code", respWriter.statusCode). - Msg("api") - }) - } -} - -// responseWriter is a wrapper around http.ResponseWriter and helps capture the response code -type responseWriter struct { - http.ResponseWriter - statusCode int -} - -func newResponseWriter(w http.ResponseWriter) *responseWriter { - return &responseWriter{w, http.StatusOK} -} - -func (rw *responseWriter) WriteHeader(code int) { - rw.statusCode = code - rw.ResponseWriter.WriteHeader(code) -} diff --git a/engine/access/rest/middleware/metrics.go b/engine/access/rest/middleware/metrics.go deleted file mode 100644 index c0d51d36eb6..00000000000 --- a/engine/access/rest/middleware/metrics.go +++ /dev/null @@ -1,32 +0,0 @@ -package middleware - -import ( - "net/http" - - "github.com/onflow/flow-go/module/metrics" - - "github.com/slok/go-http-metrics/middleware" - "github.com/slok/go-http-metrics/middleware/std" - - metricsProm "github.com/slok/go-http-metrics/metrics/prometheus" - - "github.com/gorilla/mux" -) - -func MetricsMiddleware() mux.MiddlewareFunc { - r := metrics.NewRestCollector(metricsProm.Config{Prefix: "access_rest_api"}) - metricsMiddleware := middleware.New(middleware.Config{Recorder: r}) - - return func(next http.Handler) http.Handler { - return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { - // This is a custom metric being called on every http request - r.AddTotalRequests(req.Context(), req.Method, req.URL.Path) - - // Modify the writer - respWriter := &responseWriter{w, http.StatusOK} - - // Record go-http-metrics/middleware metrics and continue to the next handler - std.Handler("", metricsMiddleware, next).ServeHTTP(respWriter, req) - }) - } -} diff --git a/engine/access/rest/models/account.go b/engine/access/rest/models/account.go deleted file mode 100644 index 80b63d9aecd..00000000000 --- a/engine/access/rest/models/account.go +++ /dev/null @@ -1,68 +0,0 @@ -package models - -import ( - "github.com/onflow/flow-go/engine/access/rest/util" - "github.com/onflow/flow-go/model/flow" -) - -const expandableKeys = "keys" -const expandableContracts = "contracts" - -func (a *Account) Build(flowAccount *flow.Account, link LinkGenerator, expand map[string]bool) error { - a.Address = flowAccount.Address.String() - a.Balance = util.FromUint64(flowAccount.Balance) - a.Expandable = &AccountExpandable{} - - if expand[expandableKeys] { - var keys AccountPublicKeys - keys.Build(flowAccount.Keys) - a.Keys = keys - } else { - a.Expandable.Keys = expandableKeys - } - - if expand[expandableContracts] { - contracts := make(map[string]string, len(flowAccount.Contracts)) - for name, code := range flowAccount.Contracts { - contracts[name] = util.ToBase64(code) - } - a.Contracts = contracts - } else { - a.Expandable.Contracts = expandableContracts - } - - var self Links - err := self.Build(link.AccountLink(a.Address)) - if err != nil { - return err - } - a.Links = &self - - return nil -} - -func (a *AccountPublicKey) Build(k flow.AccountPublicKey) { - sigAlgo := SigningAlgorithm(k.SignAlgo.String()) - hashAlgo := HashingAlgorithm(k.HashAlgo.String()) - - a.Index = util.FromUint64(uint64(k.Index)) - a.PublicKey = k.PublicKey.String() - a.SigningAlgorithm = &sigAlgo - a.HashingAlgorithm = &hashAlgo - a.SequenceNumber = util.FromUint64(k.SeqNumber) - a.Weight = util.FromUint64(uint64(k.Weight)) - a.Revoked = k.Revoked -} - -type AccountPublicKeys []AccountPublicKey - -func (a *AccountPublicKeys) Build(accountKeys []flow.AccountPublicKey) { - keys := make([]AccountPublicKey, len(accountKeys)) - for i, k := range accountKeys { - var key AccountPublicKey - key.Build(k) - keys[i] = key - } - - *a = keys -} diff --git a/engine/access/rest/models/block.go b/engine/access/rest/models/block.go deleted file mode 100644 index 3d5ccc64bf9..00000000000 --- a/engine/access/rest/models/block.go +++ /dev/null @@ -1,155 +0,0 @@ -package models - -import ( - "github.com/onflow/flow-go/engine/access/rest/util" - "github.com/onflow/flow-go/model/flow" -) - -func (b *Block) Build( - block *flow.Block, - execResult *flow.ExecutionResult, - link LinkGenerator, - blockStatus flow.BlockStatus, - expand map[string]bool, -) error { - self, err := SelfLink(block.ID(), link.BlockLink) - if err != nil { - return err - } - - var header BlockHeader - header.Build(block.Header) - b.Header = &header - - // add the payload to the response if it is specified as an expandable field - b.Expandable = &BlockExpandable{} - const ExpandableFieldPayload = "payload" - if expand[ExpandableFieldPayload] { - var payload BlockPayload - err := payload.Build(block.Payload) - if err != nil { - return err - } - b.Payload = &payload - } else { - // else add the payload expandable link - payloadExpandable, err := link.PayloadLink(block.ID()) - if err != nil { - return err - } - b.Expandable.Payload = payloadExpandable - } - - // execution result might not yet exist - if execResult != nil { - // add the execution result to the response if it is specified as an expandable field - const ExpandableExecutionResult = "execution_result" - if expand[ExpandableExecutionResult] { - var exeResult ExecutionResult - err := exeResult.Build(execResult, link) - if err != nil { - return err - } - b.ExecutionResult = &exeResult - } else { - // else add the execution result expandable link - executionResultExpandable, err := link.ExecutionResultLink(execResult.ID()) - if err != nil { - return err - } - b.Expandable.ExecutionResult = executionResultExpandable - } - } - - b.Links = self - b.BlockStatus = blockStatus.String() - return nil -} - -func (b *BlockPayload) Build(payload *flow.Payload) error { - var blockSeal BlockSeals - err := blockSeal.Build(payload.Seals) - if err != nil { - return err - } - b.BlockSeals = blockSeal - - var guarantees CollectionGuarantees - guarantees.Build(payload.Guarantees) - b.CollectionGuarantees = guarantees - - return nil -} - -func (b *BlockHeader) Build(header *flow.Header) { - b.Id = header.ID().String() - b.ParentId = header.ParentID.String() - b.Height = util.FromUint64(header.Height) - b.Timestamp = header.Timestamp - b.ParentVoterSignature = util.ToBase64(header.ParentVoterSigData) -} - -type BlockSeals []BlockSeal - -func (b *BlockSeals) Build(seals []*flow.Seal) error { - blkSeals := make([]BlockSeal, len(seals)) - for i, s := range seals { - var seal BlockSeal - err := seal.Build(s) - if err != nil { - return err - } - blkSeals[i] = seal - } - - *b = blkSeals - return nil -} - -func (b *BlockSeal) Build(seal *flow.Seal) error { - finalState := "" - if len(seal.FinalState) > 0 { // todo(sideninja) this is always true? - finalStateBytes, err := seal.FinalState.MarshalJSON() - if err != nil { - return err - } - finalState = string(finalStateBytes) - } - - var aggregatedSigs AggregatedSignatures - aggregatedSigs.Build(seal.AggregatedApprovalSigs) - - b.BlockId = seal.BlockID.String() - b.ResultId = seal.ResultID.String() - b.FinalState = finalState - b.AggregatedApprovalSignatures = aggregatedSigs - return nil -} - -type AggregatedSignatures []AggregatedSignature - -func (a *AggregatedSignatures) Build(signatures []flow.AggregatedSignature) { - response := make([]AggregatedSignature, len(signatures)) - for i, signature := range signatures { - var sig AggregatedSignature - sig.Build(signature) - response[i] = sig - } - - *a = response -} - -func (a *AggregatedSignature) Build(signature flow.AggregatedSignature) { - verifierSignatures := make([]string, len(signature.VerifierSignatures)) - for y, verifierSignature := range signature.VerifierSignatures { - verifierSignatures[y] = util.ToBase64(verifierSignature.Bytes()) - } - - signerIDs := make([]string, len(signature.SignerIDs)) - for j, signerID := range signature.SignerIDs { - signerIDs[j] = signerID.String() - } - - a.VerifierSignatures = verifierSignatures - a.SignerIds = signerIDs -} diff --git a/engine/access/rest/models/collection.go b/engine/access/rest/models/collection.go deleted file mode 100644 index c5076fdc7db..00000000000 --- a/engine/access/rest/models/collection.go +++ /dev/null @@ -1,61 +0,0 @@ -package models - -import ( - "fmt" - - "github.com/onflow/flow-go/engine/access/rest/util" - "github.com/onflow/flow-go/model/flow" -) - -const ExpandsTransactions = "transactions" - -func (c *Collection) Build( - collection *flow.LightCollection, - txs []*flow.TransactionBody, - link LinkGenerator, - expand map[string]bool) error { - - self, err := SelfLink(collection.ID(), link.CollectionLink) - if err != nil { - return err - } - - var expandable CollectionExpandable - var transactions Transactions - if expand[ExpandsTransactions] { - transactions.Build(txs, link) - } else { - expandable.Transactions = make([]string, len(collection.Transactions)) - for i, tx := range collection.Transactions { - expandable.Transactions[i], err = link.TransactionLink(tx) - if err != nil { - return err - } - } - } - - c.Id = collection.ID().String() - c.Transactions = transactions - c.Links = self - c.Expandable = &expandable - - return nil -} - -func (c *CollectionGuarantee) Build(guarantee *flow.CollectionGuarantee) { - c.CollectionId = guarantee.CollectionID.String() - c.SignerIndices = fmt.Sprintf("%x", guarantee.SignerIndices) - c.Signature = util.ToBase64(guarantee.Signature.Bytes()) -} - -type CollectionGuarantees []CollectionGuarantee - -func (c *CollectionGuarantees) Build(guarantees []*flow.CollectionGuarantee) { - collGuarantees := make([]CollectionGuarantee, len(guarantees)) - for i, g := range guarantees { - var col CollectionGuarantee - col.Build(g) - collGuarantees[i] = col - } - *c = collGuarantees -} diff --git a/engine/access/rest/models/event.go b/engine/access/rest/models/event.go deleted file mode 100644 index b8af9e11d81..00000000000 --- a/engine/access/rest/models/event.go +++ /dev/null @@ -1,55 +0,0 @@ -package models - -import ( - "github.com/onflow/flow-go/engine/access/rest/util" - "github.com/onflow/flow-go/model/flow" -) - -func (e *Event) Build(event flow.Event) { - e.Type_ = string(event.Type) - e.TransactionId = event.TransactionID.String() - e.TransactionIndex = util.FromUint64(uint64(event.TransactionIndex)) - e.EventIndex = util.FromUint64(uint64(event.EventIndex)) - e.Payload = util.ToBase64(event.Payload) -} - -type Events []Event - -func (e *Events) Build(events []flow.Event) { - evs := make([]Event, len(events)) - for i, ev := range events { - var event Event - event.Build(ev) - evs[i] = event - } - - *e = evs -} - -func (b *BlockEvents) Build(blockEvents flow.BlockEvents) { - b.BlockHeight = util.FromUint64(blockEvents.BlockHeight) - b.BlockId = blockEvents.BlockID.String() - b.BlockTimestamp = blockEvents.BlockTimestamp - - var events Events - events.Build(blockEvents.Events) - b.Events = events -} - -type BlocksEvents []BlockEvents - -func (b *BlocksEvents) Build(blocksEvents []flow.BlockEvents) { - evs := make([]BlockEvents, 0) - for _, ev := range blocksEvents { - // don't include blocks without events - if len(ev.Events) == 0 { - continue - } - - var blockEvent BlockEvents - blockEvent.Build(ev) - evs = append(evs, blockEvent) - } - - *b = evs -} diff --git a/engine/access/rest/models/execution_result.go b/engine/access/rest/models/execution_result.go deleted file mode 100644 index a8048b09883..00000000000 --- a/engine/access/rest/models/execution_result.go +++ /dev/null @@ -1,51 +0,0 @@ -package models - -import ( - "github.com/onflow/flow-go/engine/access/rest/util" - "github.com/onflow/flow-go/model/flow" -) - -func (e *ExecutionResult) Build( - exeResult *flow.ExecutionResult, - link LinkGenerator, -) error { - self, err := SelfLink(exeResult.ID(), link.ExecutionResultLink) - if err != nil { - return err - } - - events := make([]Event, len(exeResult.ServiceEvents)) - for i, e := range exeResult.ServiceEvents { - events[i] = Event{ - Type_: e.Type.String(), - } - } - - e.Id = exeResult.ID().String() - e.BlockId = exeResult.BlockID.String() - e.Events = events - e.Links = self - - e.PreviousResultId = exeResult.PreviousResultID.String() - - chunks := make([]Chunk, len(exeResult.Chunks)) - - for i, flowChunk := range exeResult.Chunks { - var chunk Chunk - chunk.Build(flowChunk) - chunks[i] = chunk - } - e.Chunks = chunks - return nil -} - -func (c *Chunk) Build(chunk *flow.Chunk) { - c.BlockId = chunk.BlockID.String() - c.Index = util.FromUint64(chunk.Index) - c.CollectionIndex = util.FromUint64(uint64(chunk.CollectionIndex)) - c.StartState = util.ToBase64(chunk.StartState[:]) - c.EndState = util.ToBase64(chunk.EndState[:]) - c.NumberOfTransactions = util.FromUint64(chunk.NumberOfTransactions) - c.EventCollection = chunk.EventCollection.String() - c.TotalComputationUsed = util.FromUint64(chunk.TotalComputationUsed) -} diff --git a/engine/access/rest/models/model_collection_guarantee.go b/engine/access/rest/models/model_collection_guarantee.go deleted file mode 100644 index c41908c9951..00000000000 --- a/engine/access/rest/models/model_collection_guarantee.go +++ /dev/null @@ -1,15 +0,0 @@ -/* - * Access API - * - * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) - * - * API version: 1.0.0 - * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) - */ -package models - -type CollectionGuarantee struct { - CollectionId string `json:"collection_id"` - SignerIndices string `json:"signer_indices"` - Signature string `json:"signature"` -} diff --git a/engine/access/rest/models/model_inline_response_200.go b/engine/access/rest/models/model_inline_response_200.go deleted file mode 100644 index 75b1b895639..00000000000 --- a/engine/access/rest/models/model_inline_response_200.go +++ /dev/null @@ -1,13 +0,0 @@ -/* - * Access API - * - * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) - * - * API version: 1.0.0 - * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) - */ -package models - -type InlineResponse200 struct { - Value string `json:"value,omitempty"` -} diff --git a/engine/access/rest/models/model_node_version_info.go b/engine/access/rest/models/model_node_version_info.go deleted file mode 100644 index 0e29f8d480a..00000000000 --- a/engine/access/rest/models/model_node_version_info.go +++ /dev/null @@ -1,16 +0,0 @@ -/* - * Access API - * - * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) - * - * API version: 1.0.0 - * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) - */ -package models - -type NodeVersionInfo struct { - Semver string `json:"semver"` - Commit string `json:"commit"` - SporkId string `json:"spork_id"` - ProtocolVersion string `json:"protocol_version"` -} diff --git a/engine/access/rest/models/model_one_of_block_height.go b/engine/access/rest/models/model_one_of_block_height.go deleted file mode 100644 index a28c35ddfa1..00000000000 --- a/engine/access/rest/models/model_one_of_block_height.go +++ /dev/null @@ -1,12 +0,0 @@ -/* - * Access API - * - * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) - * - * API version: 1.0.0 - * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) - */ -package models - -type OneOfBlockHeight struct { -} diff --git a/engine/access/rest/models/model_scripts_body.go b/engine/access/rest/models/model_scripts_body.go deleted file mode 100644 index e2d5a8533fd..00000000000 --- a/engine/access/rest/models/model_scripts_body.go +++ /dev/null @@ -1,16 +0,0 @@ -/* - * Access API - * - * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) - * - * API version: 1.0.0 - * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) - */ -package models - -type ScriptsBody struct { - // Base64 encoded content of the Cadence script. - Script string `json:"script,omitempty"` - // An array containing arguments each encoded as Base64 passed in the [JSON-Cadence interchange format](https://docs.onflow.org/cadence/json-cadence-spec/). - Arguments []string `json:"arguments,omitempty"` -} diff --git a/engine/access/rest/models/model_transaction_result__expandable.go b/engine/access/rest/models/model_transaction_result__expandable.go deleted file mode 100644 index dbc01abeae6..00000000000 --- a/engine/access/rest/models/model_transaction_result__expandable.go +++ /dev/null @@ -1,13 +0,0 @@ -/* - * Access API - * - * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) - * - * API version: 1.0.0 - * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) - */ -package models - -type TransactionResultExpandable struct { - Events string `json:"events,omitempty"` -} diff --git a/engine/access/rest/models/model_transaction_signature.go b/engine/access/rest/models/model_transaction_signature.go deleted file mode 100644 index 4db8713f19a..00000000000 --- a/engine/access/rest/models/model_transaction_signature.go +++ /dev/null @@ -1,15 +0,0 @@ -/* - * Access API - * - * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) - * - * API version: 1.0.0 - * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) - */ -package models - -type TransactionSignature struct { - Address string `json:"address"` - KeyIndex string `json:"key_index"` - Signature string `json:"signature"` -} diff --git a/engine/access/rest/models/model_transactions_body.go b/engine/access/rest/models/model_transactions_body.go deleted file mode 100644 index dafb271c86e..00000000000 --- a/engine/access/rest/models/model_transactions_body.go +++ /dev/null @@ -1,24 +0,0 @@ -/* - * Access API - * - * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) - * - * API version: 1.0.0 - * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) - */ -package models - -type TransactionsBody struct { - // Base64 encoded content of the Cadence script. - Script string `json:"script"` - // An array containing arguments each encoded as Base64 passed in the [JSON-Cadence interchange format](https://docs.onflow.org/cadence/json-cadence-spec/). - Arguments []string `json:"arguments"` - ReferenceBlockId string `json:"reference_block_id"` - // The limit on the amount of computation a transaction is allowed to preform. - GasLimit string `json:"gas_limit"` - Payer string `json:"payer"` - ProposalKey *ProposalKey `json:"proposal_key"` - Authorizers []string `json:"authorizers"` - PayloadSignatures []TransactionSignature `json:"payload_signatures"` - EnvelopeSignatures []TransactionSignature `json:"envelope_signatures"` -} diff --git a/engine/access/rest/models/network.go b/engine/access/rest/models/network.go deleted file mode 100644 index 927b5a23362..00000000000 --- a/engine/access/rest/models/network.go +++ /dev/null @@ -1,9 +0,0 @@ -package models - -import ( - "github.com/onflow/flow-go/access" -) - -func (t *NetworkParameters) Build(params *access.NetworkParameters) { - t.ChainId = params.ChainID.String() -} diff --git a/engine/access/rest/models/node_version_info.go b/engine/access/rest/models/node_version_info.go deleted file mode 100644 index 6a85e9f8d42..00000000000 --- a/engine/access/rest/models/node_version_info.go +++ /dev/null @@ -1,13 +0,0 @@ -package models - -import ( - "github.com/onflow/flow-go/access" - "github.com/onflow/flow-go/engine/access/rest/util" -) - -func (t *NodeVersionInfo) Build(params *access.NodeVersionInfo) { - t.Semver = params.Semver - t.Commit = params.Commit - t.SporkId = params.SporkId.String() - t.ProtocolVersion = util.FromUint64(params.ProtocolVersion) -} diff --git a/engine/access/rest/models/transaction.go b/engine/access/rest/models/transaction.go deleted file mode 100644 index 5553ec5bec6..00000000000 --- a/engine/access/rest/models/transaction.go +++ /dev/null @@ -1,151 +0,0 @@ -package models - -import ( - "github.com/onflow/flow-go/access" - "github.com/onflow/flow-go/engine/access/rest/util" - "github.com/onflow/flow-go/model/flow" -) - -func (t *Transaction) Build(tx *flow.TransactionBody, txr *access.TransactionResult, link LinkGenerator) { - args := make([]string, len(tx.Arguments)) - for i, arg := range tx.Arguments { - args[i] = util.ToBase64(arg) - } - - auths := make([]string, len(tx.Authorizers)) - for i, auth := range tx.Authorizers { - auths[i] = auth.String() - } - - // if transaction result is provided then add that to the response, else add the result link to the expandable - t.Expandable = &TransactionExpandable{} - if txr != nil { - var txResult TransactionResult - txResult.Build(txr, tx.ID(), link) - t.Result = &txResult - } else { - resultLink, _ := link.TransactionResultLink(tx.ID()) - t.Expandable.Result = resultLink - } - - var payloadSigs TransactionSignatures - payloadSigs.Build(tx.PayloadSignatures) - - var envelopeSigs TransactionSignatures - envelopeSigs.Build(tx.EnvelopeSignatures) - - var proposalKey ProposalKey - proposalKey.Build(tx.ProposalKey) - - t.Id = tx.ID().String() - t.Script = util.ToBase64(tx.Script) - t.Arguments = args - t.ReferenceBlockId = tx.ReferenceBlockID.String() - t.GasLimit = util.FromUint64(tx.GasLimit) - t.Payer = tx.Payer.String() - t.ProposalKey = &proposalKey - t.Authorizers = auths - t.PayloadSignatures = payloadSigs - t.EnvelopeSignatures = envelopeSigs - - self, _ := SelfLink(tx.ID(), link.TransactionLink) - t.Links = self -} - -type Transactions []Transaction - -func (t *Transactions) Build(transactions []*flow.TransactionBody, link LinkGenerator) { - txs := make([]Transaction, len(transactions)) - for i, tr := range transactions { - var tx Transaction - tx.Build(tr, nil, link) - txs[i] = tx - } - - *t = txs -} - -type TransactionSignatures []TransactionSignature - -func (t *TransactionSignatures) Build(signatures []flow.TransactionSignature) { - sigs := make([]TransactionSignature, len(signatures)) - for i, s := range signatures { - var sig TransactionSignature - sig.Build(s) - sigs[i] = sig - } - - *t = sigs -} - -func (t *TransactionSignature) Build(sig flow.TransactionSignature) { - t.Address = sig.Address.String() - t.KeyIndex = util.FromUint64(sig.KeyIndex) - t.Signature = util.ToBase64(sig.Signature) -} - -func (t *TransactionResult) Build(txr *access.TransactionResult, txID flow.Identifier, link LinkGenerator) { - var status TransactionStatus - status.Build(txr.Status) - - var execution TransactionExecution - execution.Build(txr) - - var events Events - events.Build(txr.Events) - - if txr.BlockID != flow.ZeroID { // don't send back 0 ID - t.BlockId = txr.BlockID.String() - } - - if txr.CollectionID != flow.ZeroID { // don't send back 0 ID - t.CollectionId = txr.CollectionID.String() - } - - t.Status = &status - t.Execution = &execution - t.StatusCode = int32(txr.StatusCode) - t.ErrorMessage = txr.ErrorMessage - t.ComputationUsed = util.FromUint64(0) // todo: define this - t.Events = events - - self, _ := SelfLink(txID, link.TransactionResultLink) - t.Links = self -} - -func (t *TransactionStatus) Build(status flow.TransactionStatus) { - switch status { - case flow.TransactionStatusExpired: - *t = EXPIRED - case flow.TransactionStatusExecuted: - *t = EXECUTED - case flow.TransactionStatusFinalized: - *t = FINALIZED - case flow.TransactionStatusSealed: - *t = SEALED - case flow.TransactionStatusPending: - *t = PENDING - default: - *t = "" - } -} - -func (t *TransactionExecution) Build(result *access.TransactionResult) { - *t = PENDING_RESULT - - if result.Status == flow.TransactionStatusSealed && result.ErrorMessage == "" { - *t = SUCCESS_RESULT - } - if result.ErrorMessage != "" { - *t = FAILURE_RESULT - } - if result.Status == flow.TransactionStatusExpired { - *t = FAILURE_RESULT - } -} - -func (p *ProposalKey) Build(key flow.ProposalKey) { - p.Address = key.Address.String() - p.KeyIndex = util.FromUint64(key.KeyIndex) - p.SequenceNumber = util.FromUint64(key.SequenceNumber) -} diff --git a/engine/access/rest/network.go b/engine/access/rest/network.go deleted file mode 100644 index 6100bc765d5..00000000000 --- a/engine/access/rest/network.go +++ /dev/null @@ -1,16 +0,0 @@ -package rest - -import ( - "github.com/onflow/flow-go/access" - "github.com/onflow/flow-go/engine/access/rest/models" - "github.com/onflow/flow-go/engine/access/rest/request" -) - -// GetNetworkParameters returns network-wide parameters of the blockchain -func GetNetworkParameters(r *request.Request, backend access.API, link models.LinkGenerator) (interface{}, error) { - params := backend.GetNetworkParameters(r.Context()) - - var response models.NetworkParameters - response.Build(¶ms) - return response, nil -} diff --git a/engine/access/rest/network_test.go b/engine/access/rest/network_test.go deleted file mode 100644 index c4ce7492476..00000000000 --- a/engine/access/rest/network_test.go +++ /dev/null @@ -1,56 +0,0 @@ -package rest - -import ( - "fmt" - "net/http" - "net/url" - "testing" - - mocktestify "github.com/stretchr/testify/mock" - "github.com/stretchr/testify/require" - - "github.com/onflow/flow-go/access" - "github.com/onflow/flow-go/access/mock" - "github.com/onflow/flow-go/model/flow" -) - -func networkURL(t *testing.T) string { - u, err := url.ParseRequestURI("/v1/network/parameters") - require.NoError(t, err) - - return u.String() -} - -func TestGetNetworkParameters(t *testing.T) { - backend := &mock.API{} - - t.Run("get network parameters on mainnet", func(t *testing.T) { - - req := getNetworkParametersRequest(t) - - params := access.NetworkParameters{ - ChainID: flow.Mainnet, - } - - backend.Mock. - On("GetNetworkParameters", mocktestify.Anything). - Return(params) - - expected := networkParametersExpectedStr(flow.Mainnet) - - assertOKResponse(t, req, expected, backend) - mocktestify.AssertExpectationsForObjects(t, backend) - }) -} - -func networkParametersExpectedStr(chainID flow.ChainID) string { - return fmt.Sprintf(`{ - "chain_id": "%s" - }`, chainID) -} - -func getNetworkParametersRequest(t *testing.T) *http.Request { - req, err := http.NewRequest("GET", networkURL(t), nil) - require.NoError(t, err) - return req -} diff --git a/engine/access/rest/node_version_info.go b/engine/access/rest/node_version_info.go deleted file mode 100644 index 899d159cf4f..00000000000 --- a/engine/access/rest/node_version_info.go +++ /dev/null @@ -1,19 +0,0 @@ -package rest - -import ( - "github.com/onflow/flow-go/access" - "github.com/onflow/flow-go/engine/access/rest/models" - "github.com/onflow/flow-go/engine/access/rest/request" -) - -// GetNodeVersionInfo returns node version information -func GetNodeVersionInfo(r *request.Request, backend access.API, link models.LinkGenerator) (interface{}, error) { - params, err := backend.GetNodeVersionInfo(r.Context()) - if err != nil { - return nil, err - } - - var response models.NodeVersionInfo - response.Build(params) - return response, nil -} diff --git a/engine/access/rest/node_version_info_test.go b/engine/access/rest/node_version_info_test.go deleted file mode 100644 index 4140089a280..00000000000 --- a/engine/access/rest/node_version_info_test.go +++ /dev/null @@ -1,62 +0,0 @@ -package rest - -import ( - "fmt" - "net/http" - "net/url" - "testing" - - mocktestify "github.com/stretchr/testify/mock" - "github.com/stretchr/testify/require" - - "github.com/onflow/flow-go/access" - "github.com/onflow/flow-go/access/mock" - "github.com/onflow/flow-go/cmd/build" - "github.com/onflow/flow-go/utils/unittest" -) - -func nodeVersionInfoURL(t *testing.T) string { - u, err := url.ParseRequestURI("/v1/node_version_info") - require.NoError(t, err) - - return u.String() -} - -func TestGetNodeVersionInfo(t *testing.T) { - backend := mock.NewAPI(t) - - t.Run("get node version info", func(t *testing.T) { - req := getNodeVersionInfoRequest(t) - - params := &access.NodeVersionInfo{ - Semver: build.Semver(), - Commit: build.Commit(), - SporkId: unittest.IdentifierFixture(), - ProtocolVersion: unittest.Uint64InRange(10, 30), - } - - backend.Mock. - On("GetNodeVersionInfo", mocktestify.Anything). - Return(params, nil) - - expected := nodeVersionInfoExpectedStr(params) - - assertOKResponse(t, req, expected, backend) - mocktestify.AssertExpectationsForObjects(t, backend) - }) -} - -func nodeVersionInfoExpectedStr(nodeVersionInfo *access.NodeVersionInfo) string { - return fmt.Sprintf(`{ - "semver": "%s", - "commit": "%s", - "spork_id": "%s", - "protocol_version": "%d" - }`, nodeVersionInfo.Semver, nodeVersionInfo.Commit, nodeVersionInfo.SporkId.String(), nodeVersionInfo.ProtocolVersion) -} - -func getNodeVersionInfoRequest(t *testing.T) *http.Request { - req, err := http.NewRequest("GET", nodeVersionInfoURL(t), nil) - require.NoError(t, err) - return req -} diff --git a/engine/access/rest/request/address.go b/engine/access/rest/request/address.go deleted file mode 100644 index 4dc2eebbbd7..00000000000 --- a/engine/access/rest/request/address.go +++ /dev/null @@ -1,20 +0,0 @@ -package request - -import ( - "fmt" - "regexp" - "strings" - - "github.com/onflow/flow-go/model/flow" -) - -func ParseAddress(raw string) (flow.Address, error) { - raw = strings.ReplaceAll(raw, "0x", "") // remove 0x prefix - - valid, _ := regexp.MatchString(`^[0-9a-fA-F]{16}$`, raw) - if !valid { - return flow.EmptyAddress, fmt.Errorf("invalid address") - } - - return flow.HexToAddress(raw), nil -} diff --git a/engine/access/rest/request/address_test.go b/engine/access/rest/request/address_test.go deleted file mode 100644 index 3f0f7a45bf2..00000000000 --- a/engine/access/rest/request/address_test.go +++ /dev/null @@ -1,38 +0,0 @@ -package request - -import ( - "strings" - "testing" - - "github.com/stretchr/testify/assert" -) - -func TestAddress_InvalidParse(t *testing.T) { - inputs := []string{ - "0x1", - "", - "foo", - "1", - "@", - "ead892083b3e2c61222", - } - - for _, input := range inputs { - _, err := ParseAddress(input) - assert.EqualError(t, err, "invalid address") - } -} - -func TestAddress_ValidParse(t *testing.T) { - inputs := []string{ - "f8d6e0586b0a20c7", - "f3ad66eea58c97d2", - "0xead892083b3e2c6c", - } - - for _, input := range inputs { - address, err := ParseAddress(input) - assert.NoError(t, err) - assert.Equal(t, strings.ReplaceAll(input, "0x", ""), address.String()) - } -} diff --git a/engine/access/rest/request/arguments.go b/engine/access/rest/request/arguments.go deleted file mode 100644 index 54da5fdd9af..00000000000 --- a/engine/access/rest/request/arguments.go +++ /dev/null @@ -1,37 +0,0 @@ -package request - -import ( - "fmt" - - "github.com/onflow/flow-go/engine/access/rest/util" -) - -const maxArgumentsLength = 100 - -type Arguments [][]byte - -func (a *Arguments) Parse(raw []string) error { - args := make([][]byte, 0) - for _, rawArg := range raw { - if rawArg == "" { // skip empty - continue - } - - arg, err := util.FromBase64(rawArg) - if err != nil { - return fmt.Errorf("invalid argument encoding: %w", err) - } - args = append(args, arg) - } - - if len(args) > maxArgumentsLength { - return fmt.Errorf("too many arguments. Maximum arguments allowed: %d", maxAllowedScriptArguments) - } - - *a = args - return nil -} - -func (a Arguments) Flow() [][]byte { - return a -} diff --git a/engine/access/rest/request/create_transaction.go b/engine/access/rest/request/create_transaction.go deleted file mode 100644 index 3fa8bdafb94..00000000000 --- a/engine/access/rest/request/create_transaction.go +++ /dev/null @@ -1,26 +0,0 @@ -package request - -import ( - "io" - - "github.com/onflow/flow-go/model/flow" -) - -type CreateTransaction struct { - Transaction flow.TransactionBody -} - -func (c *CreateTransaction) Build(r *Request) error { - return c.Parse(r.Body, r.Chain) -} - -func (c *CreateTransaction) Parse(rawTransaction io.Reader, chain flow.Chain) error { - var tx Transaction - err := tx.Parse(rawTransaction, chain) - if err != nil { - return err - } - - c.Transaction = tx.Flow() - return nil -} diff --git a/engine/access/rest/request/get_account.go b/engine/access/rest/request/get_account.go deleted file mode 100644 index fde9cf6d6e7..00000000000 --- a/engine/access/rest/request/get_account.go +++ /dev/null @@ -1,43 +0,0 @@ -package request - -import ( - "github.com/onflow/flow-go/model/flow" -) - -const addressVar = "address" -const blockHeightQuery = "block_height" - -type GetAccount struct { - Address flow.Address - Height uint64 -} - -func (g *GetAccount) Build(r *Request) error { - return g.Parse( - r.GetVar(addressVar), - r.GetQueryParam(blockHeightQuery), - ) -} - -func (g *GetAccount) Parse(rawAddress string, rawHeight string) error { - address, err := ParseAddress(rawAddress) - if err != nil { - return err - } - - var height Height - err = height.Parse(rawHeight) - if err != nil { - return err - } - - g.Address = address - g.Height = height.Flow() - - // default to last block - if g.Height == EmptyHeight { - g.Height = SealedHeight - } - - return nil -} diff --git a/engine/access/rest/request/get_account_test.go b/engine/access/rest/request/get_account_test.go deleted file mode 100644 index 30b807c20f5..00000000000 --- a/engine/access/rest/request/get_account_test.go +++ /dev/null @@ -1,40 +0,0 @@ -package request - -import ( - "fmt" - "testing" - - "github.com/stretchr/testify/assert" -) - -func Test_GetAccount_InvalidParse(t *testing.T) { - var getAccount GetAccount - - tests := []struct { - address string - height string - err string - }{ - {"", "", "invalid address"}, - {"f8d6e0586b0a20c7", "-1", "invalid height format"}, - } - - for i, test := range tests { - err := getAccount.Parse(test.address, test.height) - assert.EqualError(t, err, test.err, fmt.Sprintf("test #%d failed", i)) - } -} - -func Test_GetAccount_ValidParse(t *testing.T) { - var getAccount GetAccount - - addr := "f8d6e0586b0a20c7" - err := getAccount.Parse(addr, "") - assert.NoError(t, err) - assert.Equal(t, getAccount.Address.String(), addr) - assert.Equal(t, getAccount.Height, SealedHeight) - - err = getAccount.Parse(addr, "100") - assert.NoError(t, err) - assert.Equal(t, getAccount.Height, uint64(100)) -} diff --git a/engine/access/rest/request/get_block.go b/engine/access/rest/request/get_block.go deleted file mode 100644 index 4a4a1cd319d..00000000000 --- a/engine/access/rest/request/get_block.go +++ /dev/null @@ -1,116 +0,0 @@ -package request - -import ( - "fmt" - - "github.com/onflow/flow-go/model/flow" -) - -const heightQuery = "height" -const startHeightQuery = "start_height" -const endHeightQuery = "end_height" -const MaxBlockRequestHeightRange = 50 -const idParam = "id" - -type GetBlock struct { - Heights []uint64 - StartHeight uint64 - EndHeight uint64 - FinalHeight bool - SealedHeight bool -} - -func (g *GetBlock) Build(r *Request) error { - return g.Parse( - r.GetQueryParams(heightQuery), - r.GetQueryParam(startHeightQuery), - r.GetQueryParam(endHeightQuery), - ) -} - -func (g *GetBlock) HasHeights() bool { - return len(g.Heights) > 0 -} - -func (g *GetBlock) Parse(rawHeights []string, rawStart string, rawEnd string) error { - var height Height - err := height.Parse(rawStart) - if err != nil { - return err - } - g.StartHeight = height.Flow() - err = height.Parse(rawEnd) - if err != nil { - return err - } - g.EndHeight = height.Flow() - - var heights Heights - err = heights.Parse(rawHeights) - if err != nil { - return err - } - g.Heights = heights.Flow() - - // if both height and one or both of start and end height are provided - if len(g.Heights) > 0 && (g.StartHeight != EmptyHeight || g.EndHeight != EmptyHeight) { - return fmt.Errorf("can only provide either heights or start and end height range") - } - - // if neither height nor start and end height are provided - if len(heights) == 0 && (g.StartHeight == EmptyHeight || g.EndHeight == EmptyHeight) { - return fmt.Errorf("must provide either heights or start and end height range") - } - - if g.StartHeight > g.EndHeight { - return fmt.Errorf("start height must be less than or equal to end height") - } - // check if range exceeds maximum but only if end is not equal to special value which is not known yet - if g.EndHeight-g.StartHeight >= MaxBlockRequestHeightRange && g.EndHeight != FinalHeight && g.EndHeight != SealedHeight { - return fmt.Errorf("height range %d exceeds maximum allowed of %d", g.EndHeight-g.StartHeight, MaxBlockRequestHeightRange) - } - - if len(heights) > MaxBlockRequestHeightRange { - return fmt.Errorf("at most %d heights can be requested at a time", MaxBlockRequestHeightRange) - } - - // check that if sealed or final are used they are provided as only value as mix and matching heights with sealed is not encouraged - if len(heights) > 1 { - for _, h := range heights { - if h == Height(SealedHeight) || h == Height(FinalHeight) { - return fmt.Errorf("can not provide '%s' or '%s' values with other height values", final, sealed) - } - } - } else if len(heights) == 1 { - // if we have special values for heights set the booleans - g.FinalHeight = heights[0] == Height(FinalHeight) - g.SealedHeight = heights[0] == Height(SealedHeight) - } - - return nil -} - -type GetBlockByIDs struct { - IDs []flow.Identifier -} - -func (g *GetBlockByIDs) Build(r *Request) error { - return g.Parse( - r.GetVars(idParam), - ) -} - -func (g *GetBlockByIDs) Parse(rawIds []string) error { - var ids IDs - err := ids.Parse(rawIds) - if err != nil { - return err - } - g.IDs = ids.Flow() - - return nil -} - -type GetBlockPayload struct { - GetByIDRequest -} diff --git a/engine/access/rest/request/get_collection.go b/engine/access/rest/request/get_collection.go deleted file mode 100644 index 151f7ddc6d5..00000000000 --- a/engine/access/rest/request/get_collection.go +++ /dev/null @@ -1,15 +0,0 @@ -package request - -const ExpandsTransactions = "transactions" - -type GetCollection struct { - GetByIDRequest - ExpandsTransactions bool -} - -func (g *GetCollection) Build(r *Request) error { - err := g.GetByIDRequest.Build(r) - g.ExpandsTransactions = r.Expands(ExpandsTransactions) - - return err -} diff --git a/engine/access/rest/request/get_execution_result.go b/engine/access/rest/request/get_execution_result.go deleted file mode 100644 index 4feda42a0b6..00000000000 --- a/engine/access/rest/request/get_execution_result.go +++ /dev/null @@ -1,38 +0,0 @@ -package request - -import ( - "fmt" - - "github.com/onflow/flow-go/model/flow" -) - -const idQuery = "id" - -type GetExecutionResultByBlockIDs struct { - BlockIDs []flow.Identifier -} - -func (g *GetExecutionResultByBlockIDs) Build(r *Request) error { - return g.Parse( - r.GetQueryParams(blockIDQuery), - ) -} - -func (g *GetExecutionResultByBlockIDs) Parse(rawIDs []string) error { - var ids IDs - err := ids.Parse(rawIDs) - if err != nil { - return err - } - g.BlockIDs = ids.Flow() - - if len(g.BlockIDs) == 0 { - return fmt.Errorf("no block IDs provided") - } - - return nil -} - -type GetExecutionResult struct { - GetByIDRequest -} diff --git a/engine/access/rest/request/get_script.go b/engine/access/rest/request/get_script.go deleted file mode 100644 index 3e4abc29be7..00000000000 --- a/engine/access/rest/request/get_script.go +++ /dev/null @@ -1,58 +0,0 @@ -package request - -import ( - "fmt" - "io" - - "github.com/onflow/flow-go/model/flow" -) - -const blockIDQuery = "block_id" - -type GetScript struct { - BlockID flow.Identifier - BlockHeight uint64 - Script Script -} - -func (g *GetScript) Build(r *Request) error { - return g.Parse( - r.GetQueryParam(blockHeightQuery), - r.GetQueryParam(blockIDQuery), - r.Body, - ) -} - -func (g *GetScript) Parse(rawHeight string, rawID string, rawScript io.Reader) error { - var height Height - err := height.Parse(rawHeight) - if err != nil { - return err - } - g.BlockHeight = height.Flow() - - var id ID - err = id.Parse(rawID) - if err != nil { - return err - } - g.BlockID = id.Flow() - - var script Script - err = script.Parse(rawScript) - if err != nil { - return err - } - g.Script = script - - // default to last sealed block - if g.BlockHeight == EmptyHeight && g.BlockID == flow.ZeroID { - g.BlockHeight = SealedHeight - } - - if g.BlockID != flow.ZeroID && g.BlockHeight != EmptyHeight { - return fmt.Errorf("can not provide both block ID and block height") - } - - return nil -} diff --git a/engine/access/rest/request/get_transaction.go b/engine/access/rest/request/get_transaction.go deleted file mode 100644 index e2748f2ef14..00000000000 --- a/engine/access/rest/request/get_transaction.go +++ /dev/null @@ -1,64 +0,0 @@ -package request - -import "github.com/onflow/flow-go/model/flow" - -const resultExpandable = "result" -const blockIDQueryParam = "block_id" -const collectionIDQueryParam = "collection_id" - -type TransactionOptionals struct { - BlockID flow.Identifier - CollectionID flow.Identifier -} - -func (t *TransactionOptionals) Parse(r *Request) error { - var blockId ID - err := blockId.Parse(r.GetQueryParam(blockIDQueryParam)) - if err != nil { - return err - } - t.BlockID = blockId.Flow() - - var collectionId ID - err = collectionId.Parse(r.GetQueryParam(collectionIDQueryParam)) - if err != nil { - return err - } - t.CollectionID = collectionId.Flow() - - return nil -} - -type GetTransaction struct { - GetByIDRequest - TransactionOptionals - ExpandsResult bool -} - -func (g *GetTransaction) Build(r *Request) error { - err := g.TransactionOptionals.Parse(r) - if err != nil { - return err - } - - err = g.GetByIDRequest.Build(r) - g.ExpandsResult = r.Expands(resultExpandable) - - return err -} - -type GetTransactionResult struct { - GetByIDRequest - TransactionOptionals -} - -func (g *GetTransactionResult) Build(r *Request) error { - err := g.TransactionOptionals.Parse(r) - if err != nil { - return err - } - - err = g.GetByIDRequest.Build(r) - - return err -} diff --git a/engine/access/rest/request/helpers.go b/engine/access/rest/request/helpers.go deleted file mode 100644 index 33d5246c674..00000000000 --- a/engine/access/rest/request/helpers.go +++ /dev/null @@ -1,70 +0,0 @@ -package request - -import ( - "encoding/json" - "errors" - "fmt" - "io" - "strings" - - "github.com/onflow/flow-go/model/flow" -) - -func parseBody(raw io.Reader, dst interface{}) error { - dec := json.NewDecoder(raw) - dec.DisallowUnknownFields() - - err := dec.Decode(&dst) - if err != nil { - var syntaxError *json.SyntaxError - var unmarshalTypeError *json.UnmarshalTypeError - - switch { - case errors.As(err, &syntaxError): - return fmt.Errorf("request body contains badly-formed JSON (at position %d)", syntaxError.Offset) - case errors.Is(err, io.ErrUnexpectedEOF): - return fmt.Errorf("request body contains badly-formed JSON") - case errors.As(err, &unmarshalTypeError): - return fmt.Errorf("request body contains an invalid value for the %q field (at position %d)", unmarshalTypeError.Field, unmarshalTypeError.Offset) - case strings.HasPrefix(err.Error(), "json: unknown field "): - fieldName := strings.TrimPrefix(err.Error(), "json: unknown field ") - return fmt.Errorf("request body contains unknown field %s", fieldName) - case errors.Is(err, io.EOF): - return fmt.Errorf("request body must not be empty") - default: - return err - } - } - - if dst == nil { - return fmt.Errorf("request body must not be empty") - } - - err = dec.Decode(&struct{}{}) - if err != io.EOF { - return fmt.Errorf("request body must only contain a single JSON object") - } - - return nil -} - -type GetByIDRequest struct { - ID flow.Identifier -} - -func (g *GetByIDRequest) Build(r *Request) error { - return g.Parse( - r.GetVar(idQuery), - ) -} - -func (g *GetByIDRequest) Parse(rawID string) error { - var id ID - err := id.Parse(rawID) - if err != nil { - return err - } - g.ID = id.Flow() - - return nil -} diff --git a/engine/access/rest/request/helpers_test.go b/engine/access/rest/request/helpers_test.go deleted file mode 100644 index e35ab24c148..00000000000 --- a/engine/access/rest/request/helpers_test.go +++ /dev/null @@ -1,61 +0,0 @@ -package request - -import ( - "fmt" - "strings" - "testing" - - "github.com/onflow/flow-go/utils/unittest" - - "github.com/stretchr/testify/assert" -) - -func Test_GetByID_Parse(t *testing.T) { - var getByID GetByIDRequest - - id := unittest.IdentifierFixture() - err := getByID.Parse(id.String()) - assert.NoError(t, err) - assert.Equal(t, getByID.ID, id) - - err = getByID.Parse("1") - assert.EqualError(t, err, "invalid ID format") -} - -func Test_ParseBody(t *testing.T) { - - invalid := []struct { - in string - err string - }{ - {"{f}", "request body contains badly-formed JSON (at position 2)"}, - {"foo", "request body contains badly-formed JSON (at position 2)"}, - {"", "request body must not be empty"}, - {`{"foo": "bar"`, "request body contains badly-formed JSON"}, - {`{"foo": "bar" "foo2":"bar2"}`, "request body contains badly-formed JSON (at position 15)"}, - {`{"foo":"bar"}, {}`, "request body must only contain a single JSON object"}, - {`[][]`, "request body must only contain a single JSON object"}, - } - - for i, test := range invalid { - readerIn := strings.NewReader(test.in) - var out interface{} - err := parseBody(readerIn, out) - assert.EqualError(t, err, test.err, fmt.Sprintf("test #%d failed", i)) - } - - type body struct { - Foo string - Bar bool - Zoo uint64 - } - var b body - err := parseBody(strings.NewReader(`{ "foo": "test", "bar": true }`), &b) - assert.NoError(t, err) - assert.Equal(t, b.Bar, true) - assert.Equal(t, b.Foo, "test") - assert.Equal(t, b.Zoo, uint64(0)) - - err = parseBody(strings.NewReader(`{ "foo": false }`), &b) - assert.EqualError(t, err, `request body contains an invalid value for the "Foo" field (at position 14)`) -} diff --git a/engine/access/rest/request/proposal_key.go b/engine/access/rest/request/proposal_key.go deleted file mode 100644 index 1b5d5c13dc6..00000000000 --- a/engine/access/rest/request/proposal_key.go +++ /dev/null @@ -1,39 +0,0 @@ -package request - -import ( - "fmt" - - "github.com/onflow/flow-go/engine/access/rest/models" - "github.com/onflow/flow-go/engine/access/rest/util" - "github.com/onflow/flow-go/model/flow" -) - -type ProposalKey flow.ProposalKey - -func (p *ProposalKey) Parse(raw models.ProposalKey) error { - address, err := ParseAddress(raw.Address) - if err != nil { - return err - } - - keyIndex, err := util.ToUint64(raw.KeyIndex) - if err != nil { - return fmt.Errorf("invalid key index: %w", err) - } - - seqNumber, err := util.ToUint64(raw.SequenceNumber) - if err != nil { - return fmt.Errorf("invalid sequence number: %w", err) - } - - *p = ProposalKey(flow.ProposalKey{ - Address: address, - KeyIndex: keyIndex, - SequenceNumber: seqNumber, - }) - return nil -} - -func (p ProposalKey) Flow() flow.ProposalKey { - return flow.ProposalKey(p) -} diff --git a/engine/access/rest/request/request.go b/engine/access/rest/request/request.go deleted file mode 100644 index b7500206fac..00000000000 --- a/engine/access/rest/request/request.go +++ /dev/null @@ -1,167 +0,0 @@ -package request - -import ( - "net/http" - "strings" - - "github.com/gorilla/mux" - - "github.com/onflow/flow-go/engine/access/rest/middleware" - "github.com/onflow/flow-go/model/flow" -) - -// Request a convenience wrapper around the http request to make it easy to read request query params -type Request struct { - *http.Request - ExpandFields map[string]bool - selectFields []string - Chain flow.Chain -} - -func (rd *Request) GetScriptRequest() (GetScript, error) { - var req GetScript - err := req.Build(rd) - return req, err -} - -func (rd *Request) GetBlockRequest() (GetBlock, error) { - var req GetBlock - err := req.Build(rd) - return req, err -} - -func (rd *Request) GetBlockByIDsRequest() (GetBlockByIDs, error) { - var req GetBlockByIDs - err := req.Build(rd) - return req, err -} - -func (rd *Request) GetBlockPayloadRequest() (GetBlockPayload, error) { - var req GetBlockPayload - err := req.Build(rd) - return req, err -} - -func (rd *Request) GetCollectionRequest() (GetCollection, error) { - var req GetCollection - err := req.Build(rd) - return req, err -} - -func (rd *Request) GetAccountRequest() (GetAccount, error) { - var req GetAccount - err := req.Build(rd) - return req, err -} - -func (rd *Request) GetExecutionResultByBlockIDsRequest() (GetExecutionResultByBlockIDs, error) { - var req GetExecutionResultByBlockIDs - err := req.Build(rd) - return req, err -} - -func (rd *Request) GetExecutionResultRequest() (GetExecutionResult, error) { - var req GetExecutionResult - err := req.Build(rd) - return req, err -} - -func (rd *Request) GetTransactionRequest() (GetTransaction, error) { - var req GetTransaction - err := req.Build(rd) - return req, err -} - -func (rd *Request) GetTransactionResultRequest() (GetTransactionResult, error) { - var req GetTransactionResult - err := req.Build(rd) - return req, err -} - -func (rd *Request) GetEventsRequest() (GetEvents, error) { - var req GetEvents - err := req.Build(rd) - return req, err -} - -func (rd *Request) CreateTransactionRequest() (CreateTransaction, error) { - var req CreateTransaction - err := req.Build(rd) - return req, err -} - -func (rd *Request) Expands(field string) bool { - return rd.ExpandFields[field] -} - -func (rd *Request) Selects() []string { - return rd.selectFields -} - -func (rd *Request) GetVar(name string) string { - vars := mux.Vars(rd.Request) - return vars[name] -} - -func (rd *Request) GetVars(name string) []string { - vars := mux.Vars(rd.Request) - return toStringArray(vars[name]) -} - -func (rd *Request) GetQueryParam(name string) string { - return rd.Request.URL.Query().Get(name) -} - -func (rd *Request) GetQueryParams(name string) []string { - param := rd.Request.URL.Query().Get(name) - return toStringArray(param) -} - -// Decorate takes http request and applies functions to produce our custom -// request object decorated with values we need -func Decorate(r *http.Request, chain flow.Chain) *Request { - decoratedReq := &Request{ - Request: r, - Chain: chain, - } - - if expandFields, found := middleware.GetFieldsToExpand(r); found { - decoratedReq.ExpandFields = sliceToMap(expandFields) - } - - if selectFields, found := middleware.GetFieldsToSelect(r); found { - decoratedReq.selectFields = selectFields - } - - return decoratedReq -} - -func sliceToMap(values []string) map[string]bool { - valueMap := make(map[string]bool, len(values)) - for _, v := range values { - valueMap[v] = true - } - return valueMap -} - -func toStringArray(in string) []string { - // currently, the swagger generated Go REST client is incorrectly doing a `fmt.Sprintf("%v", id)` for the id slice - // resulting in the client sending the ids in the format [id1 id2 id3...]. This is a temporary workaround to - // accommodate the client for now by doing a strings.Fields if commas are not present. - // Issue to to fix the client: https://github.com/onflow/flow/issues/698 - in = strings.TrimSuffix(in, "]") - in = strings.TrimPrefix(in, "[") - var out []string - - if len(in) == 0 { - return []string{} - } - - if strings.Contains(in, ",") { - out = strings.Split(in, ",") - } else { - out = strings.Fields(in) - } - - return out -} diff --git a/engine/access/rest/request/script.go b/engine/access/rest/request/script.go deleted file mode 100644 index db0b097491f..00000000000 --- a/engine/access/rest/request/script.go +++ /dev/null @@ -1,42 +0,0 @@ -package request - -import ( - "fmt" - "io" - - "github.com/onflow/flow-go/engine/access/rest/util" -) - -type scriptBody struct { - Script string `json:"script,omitempty"` - Arguments []string `json:"arguments,omitempty"` -} - -type Script struct { - Args Arguments - Source []byte -} - -func (s *Script) Parse(raw io.Reader) error { - var body scriptBody - err := parseBody(raw, &body) - if err != nil { - return err - } - - source, err := util.FromBase64(body.Script) - if err != nil { - return fmt.Errorf("invalid script source encoding") - } - - var args Arguments - err = args.Parse(body.Arguments) - if err != nil { - return err - } - - s.Source = source - s.Args = args - - return nil -} diff --git a/engine/access/rest/request/script_test.go b/engine/access/rest/request/script_test.go deleted file mode 100644 index ab74ae86ea5..00000000000 --- a/engine/access/rest/request/script_test.go +++ /dev/null @@ -1,48 +0,0 @@ -package request - -import ( - "fmt" - "strings" - "testing" - - "github.com/stretchr/testify/assert" - - "github.com/onflow/flow-go/engine/access/rest/util" -) - -const validBody = "pub fun main() { }" - -var validBodyEncoded = util.ToBase64([]byte(validBody)) - -func TestScript_InvalidParse(t *testing.T) { - test := map[string]string{ - "": "request body must not be empty", - "foo": "request body contains badly-formed JSON (at position 2)", - `{ "script": "123", "arguments": [] }`: "invalid script source encoding", - fmt.Sprintf(`{ "script": "%s", "arguments": [123] }`, validBodyEncoded): `request body contains an invalid value for the "arguments" field (at position 57)`, - } - - for in, errOut := range test { - body := strings.NewReader(in) - var script Script - err := script.Parse(body) - assert.EqualError(t, err, errOut, in) - } -} - -func TestScript_ValidParse(t *testing.T) { - arg1 := []byte(`{"type": "String", "value": "hello" }`) - body := strings.NewReader(fmt.Sprintf( - `{ "script": "%s", "arguments": ["%s"] }`, - validBodyEncoded, - util.ToBase64(arg1), - )) - - var script Script - err := script.Parse(body) - - assert.NoError(t, err) - assert.Equal(t, 1, len(script.Args)) - assert.Equal(t, arg1, script.Args[0]) - assert.Equal(t, validBody, string(script.Source)) -} diff --git a/engine/access/rest/request/signature_test.go b/engine/access/rest/request/signature_test.go deleted file mode 100644 index 4acafa4a06c..00000000000 --- a/engine/access/rest/request/signature_test.go +++ /dev/null @@ -1,85 +0,0 @@ -package request - -import ( - "encoding/hex" - "fmt" - "testing" - - "github.com/stretchr/testify/assert" - - "github.com/onflow/flow-go/engine/access/rest/models" - "github.com/onflow/flow-go/engine/access/rest/util" -) - -func TestSignature_InvalidParse(t *testing.T) { - var signature Signature - - tests := []struct { - in string - err string - }{ - {"s", "invalid encoding"}, - {"", "missing value"}, - } - - for _, test := range tests { - err := signature.Parse(test.in) - assert.EqualError(t, err, test.err) - } - -} - -func TestSignature_ValidParse(t *testing.T) { - var signature Signature - err := signature.Parse("Mzg0MTQ5ODg4ZTg4MjRmYjMyNzM4MmM2ZWQ4ZjNjZjk1ODRlNTNlMzk4NGNhMDAxZmZjMjgwNzM4NmM0MzY3NTYxNmYwMTAwMTMzNDVkNjhmNzZkMmQ5YTBkYmI1MDA0MmEzOWRlOThlYzAzNTJjYTBkZWY3YjBlNjQ0YWJjOTQ=") - assert.NoError(t, err) - // todo test values -} - -func TestTransactionSignature_ValidParse(t *testing.T) { - var txSignature TransactionSignature - addr := "01cf0e2f2f715450" - sig := "c83665f5212fad065cd27d370ef80e5fbdd20cd57411af5c76076a15dced05ac6e6d9afa88cd7337bf9c869f6785ecc1c568ca593a99dfeec14e024c0cd78289" - sigHex, _ := hex.DecodeString(sig) - encodedSig := util.ToBase64(sigHex) - err := txSignature.Parse(addr, "0", encodedSig) - - assert.NoError(t, err) - assert.Equal(t, addr, txSignature.Address.String()) - assert.Equal(t, 0, txSignature.SignerIndex) - assert.Equal(t, uint64(0), txSignature.KeyIndex) - assert.Equal(t, sig, fmt.Sprintf("%x", txSignature.Signature)) -} - -func TestTransactionSignatures_ValidParse(t *testing.T) { - tests := []struct { - inAddresses []string - inSigs []string - }{ - {[]string{"01cf0e2f2f715450"}, []string{"c83665f5212fad065cd27d370ef80e5fbdd20cd57411af5c76076a15dced05ac6e6d9afa88cd7337bf9c869f6785ecc1c568ca593a99dfeec14e024c0cd78289"}}, - {[]string{"51cf0e2f2f715450", "21cf0e2f2f715454"}, []string{"223665f5212fad065cd27d370ef80e5fbdd20cd57411af5c76076a15dced05ac6e6d9afa88cd7337bf9c869f6785ecc1c568ca593a99dfeec14e024c0cd78289", "5553665f5212fad065cd27d370ef80e5fbdd20cd57411af5c76076a15dced05ac6e6d9afa88cd7337bf9c869f6785ecc1c568ca593a99dfeec14e024c0cd7822"}}, - } - - var txSigantures TransactionSignatures - for _, test := range tests { - sigs := make([]models.TransactionSignature, len(test.inAddresses)) - for i, a := range test.inAddresses { - sigHex, _ := hex.DecodeString(test.inSigs[i]) - encodedSig := util.ToBase64(sigHex) - sigs[i].Signature = encodedSig - sigs[i].KeyIndex = "0" - sigs[i].Address = a - } - - err := txSigantures.Parse(sigs) - assert.NoError(t, err) - - assert.Equal(t, len(txSigantures), len(sigs)) - for i, sig := range sigs { - assert.Equal(t, sig.Address, txSigantures[i].Address.String()) - assert.Equal(t, 0, txSigantures[i].SignerIndex) - assert.Equal(t, uint64(0), txSigantures[i].KeyIndex) - assert.Equal(t, test.inSigs[i], fmt.Sprintf("%x", txSigantures[i].Signature)) - } - } -} diff --git a/engine/access/rest/request/signatures.go b/engine/access/rest/request/signatures.go deleted file mode 100644 index 8afa83bf072..00000000000 --- a/engine/access/rest/request/signatures.go +++ /dev/null @@ -1,90 +0,0 @@ -package request - -import ( - "fmt" - - "github.com/onflow/flow-go/engine/access/rest/models" - "github.com/onflow/flow-go/engine/access/rest/util" - "github.com/onflow/flow-go/model/flow" -) - -type TransactionSignature flow.TransactionSignature - -func (s *TransactionSignature) Parse( - rawAddress string, - rawKeyIndex string, - rawSignature string, -) error { - address, err := ParseAddress(rawAddress) - if err != nil { - return err - } - - keyIndex, err := util.ToUint64(rawKeyIndex) - if err != nil { - return fmt.Errorf("invalid key index: %w", err) - } - - var signature Signature - err = signature.Parse(rawSignature) - if err != nil { - return fmt.Errorf("invalid signature: %w", err) - } - - *s = TransactionSignature(flow.TransactionSignature{ - Address: address, - KeyIndex: keyIndex, - Signature: signature, - }) - - return nil -} - -func (s TransactionSignature) Flow() flow.TransactionSignature { - return flow.TransactionSignature(s) -} - -type TransactionSignatures []TransactionSignature - -func (t *TransactionSignatures) Parse(rawSigs []models.TransactionSignature) error { - signatures := make([]TransactionSignature, len(rawSigs)) - for i, sig := range rawSigs { - var signature TransactionSignature - err := signature.Parse(sig.Address, sig.KeyIndex, sig.Signature) - if err != nil { - return err - } - signatures[i] = signature - } - - *t = signatures - return nil -} - -func (t TransactionSignatures) Flow() []flow.TransactionSignature { - sigs := make([]flow.TransactionSignature, len(t)) - for i, sig := range t { - sigs[i] = sig.Flow() - } - return sigs -} - -type Signature []byte - -func (s *Signature) Parse(raw string) error { - if raw == "" { - return fmt.Errorf("missing value") - } - - signatureBytes, err := util.FromBase64(raw) - if err != nil { - return fmt.Errorf("invalid encoding") - } - - *s = signatureBytes - return nil -} - -func (s Signature) Flow() []byte { - return s -} diff --git a/engine/access/rest/request/transaction.go b/engine/access/rest/request/transaction.go deleted file mode 100644 index 10564a34ea1..00000000000 --- a/engine/access/rest/request/transaction.go +++ /dev/null @@ -1,130 +0,0 @@ -package request - -import ( - "fmt" - "io" - - "github.com/onflow/flow-go/engine/access/rest/models" - "github.com/onflow/flow-go/engine/access/rest/util" - "github.com/onflow/flow-go/engine/common/rpc/convert" - "github.com/onflow/flow-go/model/flow" -) - -const maxAuthorizers = 100 -const maxAllowedScriptArguments = 100 - -type Transaction flow.TransactionBody - -func (t *Transaction) Parse(raw io.Reader, chain flow.Chain) error { - var tx models.TransactionsBody - err := parseBody(raw, &tx) - if err != nil { - return err - } - - if tx.ProposalKey == nil { - return fmt.Errorf("proposal key not provided") - } - if tx.Script == "" { - return fmt.Errorf("script not provided") - } - if tx.Payer == "" { - return fmt.Errorf("payer not provided") - } - if len(tx.Authorizers) > maxAuthorizers { - return fmt.Errorf("too many authorizers. Maximum authorizers allowed: %d", maxAuthorizers) - } - if len(tx.Arguments) > maxAllowedScriptArguments { - return fmt.Errorf("too many arguments. Maximum arguments allowed: %d", maxAllowedScriptArguments) - } - if tx.ReferenceBlockId == "" { - return fmt.Errorf("reference block not provided") - } - if len(tx.EnvelopeSignatures) == 0 { - return fmt.Errorf("envelope signatures not provided") - } - - var args Arguments - err = args.Parse(tx.Arguments) - if err != nil { - return err - } - - payer, err := ParseAddress(tx.Payer) - if err != nil { - return fmt.Errorf("invalid payer: %w", err) - } - - auths := make([]flow.Address, len(tx.Authorizers)) - for i, auth := range tx.Authorizers { - a, err := ParseAddress(auth) - if err != nil { - return err - } - - auths[i] = a - } - - var proposal ProposalKey - err = proposal.Parse(*tx.ProposalKey) - if err != nil { - return err - } - - var payloadSigs TransactionSignatures - err = payloadSigs.Parse(tx.PayloadSignatures) - if err != nil { - return err - } - - var envelopeSigs TransactionSignatures - err = envelopeSigs.Parse(tx.EnvelopeSignatures) - if err != nil { - return err - } - - // script comes in as a base64 encoded string, decode base64 back to a string here - script, err := util.FromBase64(tx.Script) - if err != nil { - return fmt.Errorf("invalid transaction script encoding") - } - - var blockID ID - err = blockID.Parse(tx.ReferenceBlockId) - if err != nil { - return fmt.Errorf("invalid reference block ID: %w", err) - } - - gasLimit, err := util.ToUint64(tx.GasLimit) - if err != nil { - return fmt.Errorf("invalid gas limit: %w", err) - } - - flowTransaction := flow.TransactionBody{ - ReferenceBlockID: blockID.Flow(), - Script: script, - Arguments: args.Flow(), - GasLimit: gasLimit, - ProposalKey: proposal.Flow(), - Payer: payer, - Authorizers: auths, - PayloadSignatures: payloadSigs.Flow(), - EnvelopeSignatures: envelopeSigs.Flow(), - } - - // we use the gRPC method of converting the incoming transaction to a Flow transaction since - // it sets the signer_index appropriately. - entityTransaction := convert.TransactionToMessage(flowTransaction) - flowTx, err := convert.MessageToTransaction(entityTransaction, chain) - if err != nil { - return err - } - - *t = Transaction(flowTx) - - return nil -} - -func (t Transaction) Flow() flow.TransactionBody { - return flow.TransactionBody(t) -} diff --git a/engine/access/rest/request/transaction_test.go b/engine/access/rest/request/transaction_test.go deleted file mode 100644 index 30b5a4cf97c..00000000000 --- a/engine/access/rest/request/transaction_test.go +++ /dev/null @@ -1,137 +0,0 @@ -package request - -import ( - "bytes" - "encoding/json" - "fmt" - "io" - "testing" - - "github.com/stretchr/testify/assert" - - "github.com/onflow/flow-go/engine/access/rest/util" - "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/utils/unittest" -) - -func buildTransaction() map[string]interface{} { - tx := unittest.TransactionFixture() - tx.Arguments = [][]uint8{} - tx.PayloadSignatures = []flow.TransactionSignature{} - auth := make([]string, len(tx.Authorizers)) - for i, a := range tx.Authorizers { - auth[i] = a.String() - } - - return map[string]interface{}{ - "script": util.ToBase64(tx.Script), - "arguments": tx.Arguments, - "reference_block_id": tx.ReferenceBlockID.String(), - "gas_limit": fmt.Sprintf("%d", tx.GasLimit), - "payer": tx.Payer.String(), - "proposal_key": map[string]interface{}{ - "address": tx.ProposalKey.Address.String(), - "key_index": fmt.Sprintf("%d", tx.ProposalKey.KeyIndex), - "sequence_number": fmt.Sprintf("%d", tx.ProposalKey.SequenceNumber), - }, - "authorizers": auth, - "envelope_signatures": []map[string]interface{}{{ - "address": tx.EnvelopeSignatures[0].Address.String(), - "key_index": fmt.Sprintf("%d", tx.EnvelopeSignatures[0].KeyIndex), - "signature": util.ToBase64(tx.EnvelopeSignatures[0].Signature), - }}, - } -} - -func transactionToReader(tx map[string]interface{}) io.Reader { - res, _ := json.Marshal(tx) - return bytes.NewReader(res) -} - -func TestTransaction_InvalidParse(t *testing.T) { - tests := []struct { - inputField string - inputValue string - output string - }{ - {"script", "-1", "invalid transaction script encoding"}, - {"arguments", "-1", `request body contains an invalid value for the "arguments" field (at position 17)`}, - {"reference_block_id", "-1", "invalid reference block ID: invalid ID format"}, - {"gas_limit", "-1", "invalid gas limit: value must be an unsigned 64 bit integer"}, - {"payer", "-1", "invalid payer: invalid address"}, - {"authorizers", "-1", `request body contains an invalid value for the "authorizers" field (at position 34)`}, - {"proposal_key", "-1", `request body contains an invalid value for the "proposal_key" field (at position 288)`}, - {"envelope_signatures", "", `request body contains an invalid value for the "envelope_signatures" field (at position 75)`}, - {"envelope_signatures", "[]", `request body contains an invalid value for the "envelope_signatures" field (at position 77)`}, - } - - for _, test := range tests { - tx := buildTransaction() - tx[test.inputField] = test.inputValue - input := transactionToReader(tx) - - var transaction Transaction - err := transaction.Parse(input, flow.Testnet.Chain()) - - assert.EqualError(t, err, test.output) - } - - keyTests := []struct { - inputField string - inputValue string - output string - }{ - {"address", "-1", "invalid address"}, - {"key_index", "-1", `invalid key index: value must be an unsigned 64 bit integer`}, - {"sequence_number", "-1", "invalid sequence number: value must be an unsigned 64 bit integer"}, - } - - for _, test := range keyTests { - tx := buildTransaction() - tx["proposal_key"].(map[string]interface{})[test.inputField] = test.inputValue - input := transactionToReader(tx) - - var transaction Transaction - err := transaction.Parse(input, flow.Testnet.Chain()) - - assert.EqualError(t, err, test.output) - } - - sigTests := []struct { - inputField string - inputValue string - output string - }{ - {"address", "-1", "invalid address"}, - {"key_index", "-1", `invalid key index: value must be an unsigned 64 bit integer`}, - {"signature", "-1", "invalid signature: invalid encoding"}, - } - - for _, test := range sigTests { - tx := buildTransaction() - tx["envelope_signatures"].([]map[string]interface{})[0][test.inputField] = test.inputValue - input := transactionToReader(tx) - - var transaction Transaction - err := transaction.Parse(input, flow.Testnet.Chain()) - - assert.EqualError(t, err, test.output) - } -} - -func TestTransaction_ValidParse(t *testing.T) { - script := `pub fun main() {}` - tx := buildTransaction() - tx["script"] = util.ToBase64([]byte(script)) - input := transactionToReader(tx) - - var transaction Transaction - err := transaction.Parse(input, flow.Testnet.Chain()) - - assert.NoError(t, err) - assert.Equal(t, tx["payer"], transaction.Flow().Payer.String()) - assert.Equal(t, script, string(transaction.Flow().Script)) - assert.Equal(t, tx["reference_block_id"], transaction.Flow().ReferenceBlockID.String()) - assert.Equal(t, tx["gas_limit"], fmt.Sprint(transaction.Flow().GasLimit)) - assert.Equal(t, len(tx["authorizers"].([]string)), len(transaction.Flow().Authorizers)) -} diff --git a/engine/access/rest/router.go b/engine/access/rest/router.go deleted file mode 100644 index 9f5ba4c2468..00000000000 --- a/engine/access/rest/router.go +++ /dev/null @@ -1,115 +0,0 @@ -package rest - -import ( - "net/http" - - "github.com/gorilla/mux" - "github.com/rs/zerolog" - - "github.com/onflow/flow-go/access" - "github.com/onflow/flow-go/engine/access/rest/middleware" - "github.com/onflow/flow-go/engine/access/rest/models" - "github.com/onflow/flow-go/model/flow" -) - -func newRouter(backend access.API, logger zerolog.Logger, chain flow.Chain) (*mux.Router, error) { - router := mux.NewRouter().StrictSlash(true) - v1SubRouter := router.PathPrefix("/v1").Subrouter() - - // common middleware for all request - v1SubRouter.Use(middleware.LoggingMiddleware(logger)) - v1SubRouter.Use(middleware.QueryExpandable()) - v1SubRouter.Use(middleware.QuerySelect()) - v1SubRouter.Use(middleware.MetricsMiddleware()) - - linkGenerator := models.NewLinkGeneratorImpl(v1SubRouter) - - for _, r := range Routes { - h := NewHandler(logger, backend, r.Handler, linkGenerator, chain) - v1SubRouter. - Methods(r.Method). - Path(r.Pattern). - Name(r.Name). - Handler(h) - } - return router, nil -} - -type route struct { - Name string - Method string - Pattern string - Handler ApiHandlerFunc -} - -var Routes = []route{{ - Method: http.MethodGet, - Pattern: "/transactions/{id}", - Name: "getTransactionByID", - Handler: GetTransactionByID, -}, { - Method: http.MethodPost, - Pattern: "/transactions", - Name: "createTransaction", - Handler: CreateTransaction, -}, { - Method: http.MethodGet, - Pattern: "/transaction_results/{id}", - Name: "getTransactionResultByID", - Handler: GetTransactionResultByID, -}, { - Method: http.MethodGet, - Pattern: "/blocks/{id}", - Name: "getBlocksByIDs", - Handler: GetBlocksByIDs, -}, { - Method: http.MethodGet, - Pattern: "/blocks", - Name: "getBlocksByHeight", - Handler: GetBlocksByHeight, -}, { - Method: http.MethodGet, - Pattern: "/blocks/{id}/payload", - Name: "getBlockPayloadByID", - Handler: GetBlockPayloadByID, -}, { - Method: http.MethodGet, - Pattern: "/execution_results/{id}", - Name: "getExecutionResultByID", - Handler: GetExecutionResultByID, -}, { - Method: http.MethodGet, - Pattern: "/execution_results", - Name: "getExecutionResultByBlockID", - Handler: GetExecutionResultsByBlockIDs, -}, { - Method: http.MethodGet, - Pattern: "/collections/{id}", - Name: "getCollectionByID", - Handler: GetCollectionByID, -}, { - Method: http.MethodPost, - Pattern: "/scripts", - Name: "executeScript", - Handler: ExecuteScript, -}, { - Method: http.MethodGet, - Pattern: "/accounts/{address}", - Name: "getAccount", - Handler: GetAccount, -}, { - Method: http.MethodGet, - Pattern: "/events", - Name: "getEvents", - Handler: GetEvents, -}, { - Method: http.MethodGet, - Pattern: "/network/parameters", - Name: "getNetworkParameters", - Handler: GetNetworkParameters, -}, { - Method: http.MethodGet, - Pattern: "/node_version_info", - Name: "getNodeVersionInfo", - Handler: GetNodeVersionInfo, -}} diff --git a/engine/access/rest/router/http_routes.go b/engine/access/rest/router/http_routes.go new file mode 100644 index 00000000000..5032f591142 --- /dev/null +++ b/engine/access/rest/router/http_routes.go @@ -0,0 +1,102 @@ +package router + +import ( + "net/http" + + resthttp "github.com/onflow/flow-go/engine/access/rest/http" + "github.com/onflow/flow-go/engine/access/rest/http/routes" +) + +type route struct { + Name string + Method string + Pattern string + Handler resthttp.ApiHandlerFunc +} + +var Routes = []route{{ + Method: http.MethodGet, + Pattern: "/transactions/{id}", + Name: "getTransactionByID", + Handler: routes.GetTransactionByID, +}, { + Method: http.MethodPost, + Pattern: "/transactions", + Name: "createTransaction", + Handler: routes.CreateTransaction, +}, { + Method: http.MethodGet, + Pattern: "/transaction_results/{id}", + Name: "getTransactionResultByID", + Handler: routes.GetTransactionResultByID, +}, { + Method: http.MethodGet, + Pattern: "/blocks/{id}", + Name: "getBlocksByIDs", + Handler: routes.GetBlocksByIDs, +}, { + Method: http.MethodGet, + Pattern: "/blocks", + Name: "getBlocksByHeight", + Handler: routes.GetBlocksByHeight, +}, { + Method: http.MethodGet, + Pattern: "/blocks/{id}/payload", + Name: "getBlockPayloadByID", + Handler: routes.GetBlockPayloadByID, +}, { + Method: http.MethodGet, + Pattern: "/execution_results/{id}", + Name: "getExecutionResultByID", + Handler: routes.GetExecutionResultByID, +}, { + Method: http.MethodGet, + Pattern: "/execution_results", + Name: "getExecutionResultByBlockID", + Handler: routes.GetExecutionResultsByBlockIDs, +}, { + Method: http.MethodGet, + Pattern: "/collections/{id}", + Name: "getCollectionByID", + Handler: routes.GetCollectionByID, +}, { + Method: http.MethodPost, + Pattern: "/scripts", + Name: "executeScript", + Handler: routes.ExecuteScript, +}, { + Method: http.MethodGet, + Pattern: "/accounts/{address}", + Name: "getAccount", + Handler: routes.GetAccount, +}, { + Method: http.MethodGet, + Pattern: "/accounts/{address}/balance", + Name: "getAccountBalance", + Handler: routes.GetAccountBalance, +}, { + Method: http.MethodGet, + Pattern: "/accounts/{address}/keys/{index}", + Name: "getAccountKeyByIndex", + Handler: routes.GetAccountKeyByIndex, +}, { + Method: http.MethodGet, + Pattern: "/accounts/{address}/keys", + Name: "getAccountKeys", + Handler: routes.GetAccountKeys, +}, { + Method: http.MethodGet, + Pattern: "/events", + Name: "getEvents", + Handler: routes.GetEvents, +}, { + Method: http.MethodGet, + Pattern: "/network/parameters", + Name: "getNetworkParameters", + Handler: routes.GetNetworkParameters, +}, { + Method: http.MethodGet, + Pattern: "/node_version_info", + Name: "getNodeVersionInfo", + Handler: routes.GetNodeVersionInfo, +}} diff --git a/engine/access/rest/router/router.go b/engine/access/rest/router/router.go new file mode 100644 index 00000000000..a1fff4b6326 --- /dev/null +++ b/engine/access/rest/router/router.go @@ -0,0 +1,181 @@ +package router + +import ( + "fmt" + "net/http" + "regexp" + "strings" + + "github.com/gorilla/mux" + "github.com/rs/zerolog" + + "github.com/onflow/flow-go/access" + "github.com/onflow/flow-go/engine/access/rest/common/middleware" + "github.com/onflow/flow-go/engine/access/rest/common/models" + flowhttp "github.com/onflow/flow-go/engine/access/rest/http" + "github.com/onflow/flow-go/engine/access/rest/websockets" + dp "github.com/onflow/flow-go/engine/access/rest/websockets/data_providers" + legacyws "github.com/onflow/flow-go/engine/access/rest/websockets/legacy" + "github.com/onflow/flow-go/engine/access/state_stream" + "github.com/onflow/flow-go/engine/access/state_stream/backend" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module" + "github.com/onflow/flow-go/module/irrecoverable" +) + +// RouterBuilder is a utility for building HTTP routers with common middleware and routes. +type RouterBuilder struct { + logger zerolog.Logger + router *mux.Router + v1SubRouter *mux.Router + + LinkGenerator models.LinkGenerator +} + +// NewRouterBuilder creates a new RouterBuilder instance with common middleware and a v1 sub-router. +func NewRouterBuilder( + logger zerolog.Logger, + restCollector module.RestMetrics) *RouterBuilder { + router := mux.NewRouter().StrictSlash(true) + v1SubRouter := router.PathPrefix("/v1").Subrouter() + + // common middleware for all request + v1SubRouter.Use(middleware.LoggingMiddleware(logger)) + v1SubRouter.Use(middleware.QueryExpandable()) + v1SubRouter.Use(middleware.QuerySelect()) + v1SubRouter.Use(middleware.MetricsMiddleware(restCollector)) + + return &RouterBuilder{ + logger: logger, + router: router, + v1SubRouter: v1SubRouter, + LinkGenerator: models.NewLinkGeneratorImpl(v1SubRouter), + } +} + +// AddRestRoutes adds rest routes to the router. +func (b *RouterBuilder) AddRestRoutes( + backend access.API, + chain flow.Chain, + maxRequestSize int64, + maxResponseSize int64, +) *RouterBuilder { + for _, r := range Routes { + h := flowhttp.NewHandler(b.logger, backend, r.Handler, b.LinkGenerator, chain, maxRequestSize, maxResponseSize) + b.v1SubRouter. + Methods(r.Method). + Path(r.Pattern). + Name(r.Name). + Handler(h) + } + return b +} + +// AddLegacyWebsocketsRoutes adds WebSocket routes to the router. +// +// Deprecated: Use AddWebsocketsRoute instead, which allows managing multiple streams with +// a single endpoint. +func (b *RouterBuilder) AddLegacyWebsocketsRoutes( + stateStreamApi state_stream.API, + chain flow.Chain, + stateStreamConfig backend.Config, + maxRequestSize int64, + maxResponseSize int64, +) *RouterBuilder { + + for _, r := range WSLegacyRoutes { + h := legacyws.NewWSHandler(b.logger, stateStreamApi, r.Handler, chain, stateStreamConfig, maxRequestSize, maxResponseSize) + b.v1SubRouter. + Methods(r.Method). + Path(r.Pattern). + Name(r.Name). + Handler(h) + } + + return b +} + +func (b *RouterBuilder) AddWebsocketsRoute( + ctx irrecoverable.SignalerContext, + chain flow.Chain, + config websockets.Config, + maxRequestSize int64, + maxResponseSize int64, + dataProviderFactory dp.DataProviderFactory, +) *RouterBuilder { + handler := websockets.NewWebSocketHandler(ctx, b.logger, config, chain, maxRequestSize, maxResponseSize, dataProviderFactory) + b.v1SubRouter. + Methods(http.MethodGet). + Path("/ws"). + Name("ws"). + Handler(handler) + + return b +} + +func (b *RouterBuilder) Build() *mux.Router { + return b.router +} + +var routeUrlMap = map[string]string{} +var routeRE = regexp.MustCompile(`(?i)/v1/(\w+)(/(\w+))?(/(\w+))?(/(\w+))?`) + +func init() { + for _, r := range Routes { + routeUrlMap[r.Pattern] = r.Name + } + for _, r := range WSLegacyRoutes { + routeUrlMap[r.Pattern] = r.Name + } +} + +func URLToRoute(url string) (string, error) { + normalized, err := normalizeURL(url) + if err != nil { + return "", err + } + + name, ok := routeUrlMap[normalized] + if !ok { + return "", fmt.Errorf("invalid url") + } + return name, nil +} + +func normalizeURL(url string) (string, error) { + matches := routeRE.FindAllStringSubmatch(url, -1) + if len(matches) != 1 || len(matches[0]) != 8 { + return "", fmt.Errorf("invalid url") + } + + // given a URL like + // /v1/blocks/1234567890abcdef1234567890abcdef1234567890abcdef1234567890abcdef/payload + // groups [ 1 ] [ 3 ] [ 5 ] + // normalized form like /v1/blocks/{id}/payload + + parts := []string{matches[0][1]} + + switch len(matches[0][3]) { + case 0: + // top level resource. e.g. /v1/blocks + case 64: + // id based resource. e.g. /v1/blocks/1234567890abcdef1234567890abcdef1234567890abcdef1234567890abcdef + parts = append(parts, "{id}") + if matches[0][5] != "" { + parts = append(parts, matches[0][5]) + } + case 16: + // address based resource. e.g. /v1/accounts/1234567890abcdef + parts = append(parts, "{address}") + if matches[0][5] == "keys" && matches[0][7] != "" { + parts = append(parts, "keys", "{index}") + } else if matches[0][5] != "" { + parts = append(parts, matches[0][5]) + } + default: + // named resource. e.g. /v1/network/parameters + parts = append(parts, matches[0][3]) + } + + return "/" + strings.Join(parts, "/"), nil +} diff --git a/engine/access/rest/router/router_test.go b/engine/access/rest/router/router_test.go new file mode 100644 index 00000000000..36f0c0d003f --- /dev/null +++ b/engine/access/rest/router/router_test.go @@ -0,0 +1,225 @@ +package router + +import ( + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestParseURL(t *testing.T) { + tests := []struct { + name string + url string + expected string + }{ + { + name: "/v1/transactions", + url: "/v1/transactions", + expected: "createTransaction", + }, + { + name: "/v1/transactions/{id}", + url: "/v1/transactions/53730d3f3d2d2f46cb910b16db817d3a62adaaa72fdb3a92ee373c37c5b55a76", + expected: "getTransactionByID", + }, + { + name: "/v1/transaction_results/{id}", + url: "/v1/transaction_results/53730d3f3d2d2f46cb910b16db817d3a62adaaa72fdb3a92ee373c37c5b55a76", + expected: "getTransactionResultByID", + }, + { + name: "/v1/blocks", + url: "/v1/blocks", + expected: "getBlocksByHeight", + }, + { + name: "/v1/blocks/{id}", + url: "/v1/blocks/53730d3f3d2d2f46cb910b16db817d3a62adaaa72fdb3a92ee373c37c5b55a76", + expected: "getBlocksByIDs", + }, + { + name: "/v1/blocks/{id}/payload", + url: "/v1/blocks/53730d3f3d2d2f46cb910b16db817d3a62adaaa72fdb3a92ee373c37c5b55a76/payload", + expected: "getBlockPayloadByID", + }, + { + name: "/v1/execution_results/{id}", + url: "/v1/execution_results/53730d3f3d2d2f46cb910b16db817d3a62adaaa72fdb3a92ee373c37c5b55a76", + expected: "getExecutionResultByID", + }, + { + name: "/v1/execution_results", + url: "/v1/execution_results", + expected: "getExecutionResultByBlockID", + }, + { + name: "/v1/collections/{id}", + url: "/v1/collections/53730d3f3d2d2f46cb910b16db817d3a62adaaa72fdb3a92ee373c37c5b55a76", + expected: "getCollectionByID", + }, + { + name: "/v1/scripts", + url: "/v1/scripts", + expected: "executeScript", + }, + { + name: "/v1/accounts/{address}", + url: "/v1/accounts/6a587be304c1224c", + expected: "getAccount", + }, + { + name: "/v1/accounts/{address}/balance", + url: "/v1/accounts/6a587be304c1224c/balance", + expected: "getAccountBalance", + }, + { + name: "/v1/accounts/{address}/keys/{index}", + url: "/v1/accounts/6a587be304c1224c/keys/0", + expected: "getAccountKeyByIndex", + }, + { + name: "/v1/accounts/{address}/keys", + url: "/v1/accounts/6a587be304c1224c/keys", + expected: "getAccountKeys", + }, + { + name: "/v1/events", + url: "/v1/events", + expected: "getEvents", + }, + { + name: "/v1/network/parameters", + url: "/v1/network/parameters", + expected: "getNetworkParameters", + }, + { + name: "/v1/node_version_info", + url: "/v1/node_version_info", + expected: "getNodeVersionInfo", + }, + { + name: "/v1/subscribe_events", + url: "/v1/subscribe_events", + expected: "subscribeEvents", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, err := URLToRoute(tt.url) + require.NoError(t, err) + assert.Equal(t, tt.expected, got) + }) + } +} + +func TestBenchmarkParseURL(t *testing.T) { + tests := []struct { + name string + url string + expected string + }{ + { + name: "/v1/transactions", + url: "/v1/transactions", + expected: "createTransaction", + }, + { + name: "/v1/transactions/{id}", + url: "/v1/transactions/53730d3f3d2d2f46cb910b16db817d3a62adaaa72fdb3a92ee373c37c5b55a76", + expected: "getTransactionByID", + }, + { + name: "/v1/transaction_results/{id}", + url: "/v1/transaction_results/53730d3f3d2d2f46cb910b16db817d3a62adaaa72fdb3a92ee373c37c5b55a76", + expected: "getTransactionResultByID", + }, + { + name: "/v1/blocks", + url: "/v1/blocks", + expected: "getBlocksByHeight", + }, + { + name: "/v1/blocks/{id}", + url: "/v1/blocks/53730d3f3d2d2f46cb910b16db817d3a62adaaa72fdb3a92ee373c37c5b55a76", + expected: "getBlocksByIDs", + }, + { + name: "/v1/blocks/{id}/payload", + url: "/v1/blocks/53730d3f3d2d2f46cb910b16db817d3a62adaaa72fdb3a92ee373c37c5b55a76/payload", + expected: "getBlockPayloadByID", + }, + { + name: "/v1/execution_results/{id}", + url: "/v1/execution_results/53730d3f3d2d2f46cb910b16db817d3a62adaaa72fdb3a92ee373c37c5b55a76", + expected: "getExecutionResultByID", + }, + { + name: "/v1/execution_results", + url: "/v1/execution_results", + expected: "getExecutionResultByBlockID", + }, + { + name: "/v1/collections/{id}", + url: "/v1/collections/53730d3f3d2d2f46cb910b16db817d3a62adaaa72fdb3a92ee373c37c5b55a76", + expected: "getCollectionByID", + }, + { + name: "/v1/scripts", + url: "/v1/scripts", + expected: "executeScript", + }, + { + name: "/v1/accounts/{address}", + url: "/v1/accounts/6a587be304c1224c", + expected: "getAccount", + }, + { + name: "/v1/accounts/{address}/balance", + url: "/v1/accounts/6a587be304c1224c/balance", + expected: "getAccountBalance", + }, + { + name: "/v1/accounts/{address}/keys/{index}", + url: "/v1/accounts/6a587be304c1224c/keys/0", + expected: "getAccountKeyByIndex", + }, + { + name: "/v1/accounts/{address}/keys", + url: "/v1/accounts/6a587be304c1224c/keys", + expected: "getAccountKeys", + }, + { + name: "/v1/events", + url: "/v1/events", + expected: "getEvents", + }, + { + name: "/v1/network/parameters", + url: "/v1/network/parameters", + expected: "getNetworkParameters", + }, + { + name: "/v1/node_version_info", + url: "/v1/node_version_info", + expected: "getNodeVersionInfo", + }, + { + name: "/v1/subscribe_events", + url: "/v1/subscribe_events", + expected: "subscribeEvents", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + start := time.Now() + for i := 0; i < 100_000; i++ { + _, _ = URLToRoute(tt.url) + } + t.Logf("%s: %v", tt.name, time.Since(start)/100_000) + }) + } +} diff --git a/engine/access/rest/router/router_test_helpers.go b/engine/access/rest/router/router_test_helpers.go new file mode 100644 index 00000000000..7abd297f39b --- /dev/null +++ b/engine/access/rest/router/router_test_helpers.go @@ -0,0 +1,171 @@ +package router + +import ( + "bufio" + "bytes" + "fmt" + "io" + "net" + "net/http" + "net/http/httptest" + "strings" + "testing" + "time" + + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/access" + "github.com/onflow/flow-go/access/mock" + "github.com/onflow/flow-go/engine/access/state_stream" + "github.com/onflow/flow-go/engine/access/state_stream/backend" + "github.com/onflow/flow-go/engine/access/subscription" + commonrpc "github.com/onflow/flow-go/engine/common/rpc" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module/metrics" + "github.com/onflow/flow-go/utils/unittest" +) + +const ( + ExpandableFieldPayload = "payload" + ExpandableExecutionResult = "execution_result" + SealedHeightQueryParam = "sealed" + FinalHeightQueryParam = "final" + StartHeightQueryParam = "start_height" + EndHeightQueryParam = "end_height" + HeightQueryParam = "height" + StartBlockIdQueryParam = "start_block_id" + EventTypesQueryParams = "event_types" + AddressesQueryParams = "addresses" + ContractsQueryParams = "contracts" + HeartbeatIntervalQueryParam = "heartbeat_interval" +) + +// fakeNetConn implements a mocked ws connection that can be injected in testing logic. +type fakeNetConn struct { + io.Writer + closed chan struct{} +} + +var _ net.Conn = (*fakeNetConn)(nil) + +// Close closes the fakeNetConn and signals its closure by closing the "Closed" channel. +func (c fakeNetConn) Close() error { + select { + case <-c.closed: + default: + close(c.closed) + } + return nil +} + +func (c fakeNetConn) LocalAddr() net.Addr { return localAddr } +func (c fakeNetConn) RemoteAddr() net.Addr { return remoteAddr } +func (c fakeNetConn) SetDeadline(t time.Time) error { return nil } +func (c fakeNetConn) SetReadDeadline(t time.Time) error { return nil } +func (c fakeNetConn) SetWriteDeadline(t time.Time) error { return nil } +func (c fakeNetConn) Read(p []byte) (n int, err error) { + <-c.closed + return 0, fmt.Errorf("closed") +} + +type fakeAddr int + +var ( + localAddr = fakeAddr(1) + remoteAddr = fakeAddr(2) +) + +func (a fakeAddr) Network() string { + return "net" +} + +func (a fakeAddr) String() string { + return "str" +} + +// TestHijackResponseRecorder is a custom ResponseRecorder that implements the http.Hijacker interface +// for testing WebSocket connections and hijacking. +type TestHijackResponseRecorder struct { + *httptest.ResponseRecorder + Closed chan struct{} + ResponseBuff *bytes.Buffer +} + +var _ http.Hijacker = (*TestHijackResponseRecorder)(nil) + +// Hijack implements the http.Hijacker interface by returning a fakeNetConn and a bufio.ReadWriter +// that simulate a hijacked connection. +func (w *TestHijackResponseRecorder) Hijack() (net.Conn, *bufio.ReadWriter, error) { + br := bufio.NewReaderSize(strings.NewReader(""), subscription.DefaultSendBufferSize) + bw := bufio.NewWriterSize(&bytes.Buffer{}, subscription.DefaultSendBufferSize) + w.ResponseBuff = bytes.NewBuffer(make([]byte, 0)) + w.Closed = make(chan struct{}, 1) + + return fakeNetConn{w.ResponseBuff, w.Closed}, bufio.NewReadWriter(br, bw), nil +} + +func (w *TestHijackResponseRecorder) Close() error { + select { + case <-w.Closed: + default: + close(w.Closed) + } + return nil +} + +// NewTestHijackResponseRecorder creates a new instance of TestHijackResponseRecorder. +func NewTestHijackResponseRecorder() *TestHijackResponseRecorder { + return &TestHijackResponseRecorder{ + ResponseRecorder: httptest.NewRecorder(), + } +} + +func ExecuteRequest(req *http.Request, backend access.API) *httptest.ResponseRecorder { + router := NewRouterBuilder( + unittest.Logger(), + metrics.NewNoopCollector(), + ).AddRestRoutes( + backend, + flow.Testnet.Chain(), + commonrpc.DefaultAccessMaxRequestSize, + commonrpc.DefaultAccessMaxResponseSize, + ).Build() + + rr := httptest.NewRecorder() + router.ServeHTTP(rr, req) + return rr +} + +func ExecuteLegacyWsRequest(req *http.Request, stateStreamApi state_stream.API, responseRecorder *TestHijackResponseRecorder, chain flow.Chain) { + restCollector := metrics.NewNoopCollector() + + config := backend.Config{ + EventFilterConfig: state_stream.DefaultEventFilterConfig, + MaxGlobalStreams: subscription.DefaultMaxGlobalStreams, + HeartbeatInterval: subscription.DefaultHeartbeatInterval, + } + + router := NewRouterBuilder( + unittest.Logger(), + restCollector, + ).AddLegacyWebsocketsRoutes( + stateStreamApi, + chain, config, commonrpc.DefaultAccessMaxRequestSize, commonrpc.DefaultAccessMaxResponseSize, + ).Build() + router.ServeHTTP(responseRecorder, req) +} + +func AssertOKResponse(t *testing.T, req *http.Request, expectedRespBody string, backend *mock.API) { + AssertResponse(t, req, http.StatusOK, expectedRespBody, backend) +} + +func AssertResponse(t *testing.T, req *http.Request, status int, expectedRespBody string, backend *mock.API) { + rr := ExecuteRequest(req, backend) + actualResponseBody := rr.Body.String() + require.JSONEq(t, + expectedRespBody, + actualResponseBody, + fmt.Sprintf("Failed Request: %s\nExpected JSON:\n %s \nActual JSON:\n %s\n", req.URL, expectedRespBody, actualResponseBody), + ) + require.Equal(t, status, rr.Code) +} diff --git a/engine/access/rest/router/ws_routes.go b/engine/access/rest/router/ws_routes.go new file mode 100644 index 00000000000..03c28fa27aa --- /dev/null +++ b/engine/access/rest/router/ws_routes.go @@ -0,0 +1,22 @@ +package router + +import ( + "net/http" + + "github.com/onflow/flow-go/engine/access/rest/websockets/legacy" + "github.com/onflow/flow-go/engine/access/rest/websockets/legacy/routes" +) + +type wsLegacyRoute struct { + Name string + Method string + Pattern string + Handler legacy.SubscribeHandlerFunc +} + +var WSLegacyRoutes = []wsLegacyRoute{{ + Method: http.MethodGet, + Pattern: "/subscribe_events", + Name: "subscribeEvents", + Handler: routes.SubscribeEvents, +}} diff --git a/engine/access/rest/scripts.go b/engine/access/rest/scripts.go deleted file mode 100644 index 8bd86bae54f..00000000000 --- a/engine/access/rest/scripts.go +++ /dev/null @@ -1,36 +0,0 @@ -package rest - -import ( - "github.com/onflow/flow-go/engine/access/rest/models" - "github.com/onflow/flow-go/engine/access/rest/request" - "github.com/onflow/flow-go/model/flow" - - "github.com/onflow/flow-go/access" -) - -// ExecuteScript handler sends the script from the request to be executed. -func ExecuteScript(r *request.Request, backend access.API, _ models.LinkGenerator) (interface{}, error) { - req, err := r.GetScriptRequest() - if err != nil { - return nil, NewBadRequestError(err) - } - - if req.BlockID != flow.ZeroID { - return backend.ExecuteScriptAtBlockID(r.Context(), req.BlockID, req.Script.Source, req.Script.Args) - } - - // default to sealed height - if req.BlockHeight == request.SealedHeight || req.BlockHeight == request.EmptyHeight { - return backend.ExecuteScriptAtLatestBlock(r.Context(), req.Script.Source, req.Script.Args) - } - - if req.BlockHeight == request.FinalHeight { - finalBlock, _, err := backend.GetLatestBlockHeader(r.Context(), false) - if err != nil { - return nil, err - } - req.BlockHeight = finalBlock.Height - } - - return backend.ExecuteScriptAtBlockHeight(r.Context(), req.BlockHeight, req.Script.Source, req.Script.Args) -} diff --git a/engine/access/rest/scripts_test.go b/engine/access/rest/scripts_test.go deleted file mode 100644 index 7e3271c1d81..00000000000 --- a/engine/access/rest/scripts_test.go +++ /dev/null @@ -1,132 +0,0 @@ -package rest - -import ( - "bytes" - "encoding/base64" - "encoding/json" - "fmt" - "net/http" - "net/url" - "testing" - - "github.com/onflow/flow-go/engine/access/rest/util" - - mocks "github.com/stretchr/testify/mock" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" - - "github.com/onflow/flow-go/access/mock" - "github.com/onflow/flow-go/model/flow" -) - -func scriptReq(id string, height string, body interface{}) *http.Request { - u, _ := url.ParseRequestURI("/v1/scripts") - q := u.Query() - - if id != "" { - q.Add("block_id", id) - } - if height != "" { - q.Add("block_height", height) - } - - u.RawQuery = q.Encode() - - jsonBody, _ := json.Marshal(body) - req, _ := http.NewRequest("POST", u.String(), bytes.NewBuffer(jsonBody)) - - return req -} - -func TestScripts(t *testing.T) { - validCode := []byte(`pub fun main(foo: String): String { return foo }`) - validArgs := []byte(`{ "type": "String", "value": "hello world" }`) - validBody := map[string]interface{}{ - "script": util.ToBase64(validCode), - "arguments": []string{util.ToBase64(validArgs)}, - } - - t.Run("get by Latest height", func(t *testing.T) { - backend := &mock.API{} - backend.Mock. - On("ExecuteScriptAtLatestBlock", mocks.Anything, validCode, [][]byte{validArgs}). - Return([]byte("hello world"), nil) - - req := scriptReq("", sealedHeightQueryParam, validBody) - assertOKResponse(t, req, fmt.Sprintf( - "\"%s\"", - base64.StdEncoding.EncodeToString([]byte(`hello world`)), - ), backend) - }) - - t.Run("get by height", func(t *testing.T) { - backend := &mock.API{} - height := uint64(1337) - - backend.Mock. - On("ExecuteScriptAtBlockHeight", mocks.Anything, height, validCode, [][]byte{validArgs}). - Return([]byte("hello world"), nil) - - req := scriptReq("", fmt.Sprintf("%d", height), validBody) - assertOKResponse(t, req, fmt.Sprintf( - "\"%s\"", - base64.StdEncoding.EncodeToString([]byte(`hello world`)), - ), backend) - }) - - t.Run("get by ID", func(t *testing.T) { - backend := &mock.API{} - id, _ := flow.HexStringToIdentifier("222dc5dd51b9e4910f687e475f892f495f3352362ba318b53e318b4d78131312") - - backend.Mock. - On("ExecuteScriptAtBlockID", mocks.Anything, id, validCode, [][]byte{validArgs}). - Return([]byte("hello world"), nil) - - req := scriptReq(id.String(), "", validBody) - assertOKResponse(t, req, fmt.Sprintf( - "\"%s\"", - base64.StdEncoding.EncodeToString([]byte(`hello world`)), - ), backend) - }) - - t.Run("get error", func(t *testing.T) { - backend := &mock.API{} - backend.Mock. - On("ExecuteScriptAtBlockHeight", mocks.Anything, uint64(1337), validCode, [][]byte{validArgs}). - Return(nil, status.Error(codes.Internal, "internal server error")) - - req := scriptReq("", "1337", validBody) - assertResponse( - t, - req, - http.StatusBadRequest, - `{"code":400, "message":"Invalid Flow request: internal server error"}`, - backend, - ) - }) - - t.Run("get invalid", func(t *testing.T) { - backend := &mock.API{} - backend.Mock. - On("ExecuteScriptAtBlockHeight", mocks.Anything, mocks.Anything, mocks.Anything, mocks.Anything). - Return(nil, nil) - - tests := []struct { - id string - height string - body map[string]interface{} - out string - status int - }{ - {"invalidID", "", validBody, `{"code":400,"message":"invalid ID format"}`, http.StatusBadRequest}, - {"", "invalid", validBody, `{"code":400,"message":"invalid height format"}`, http.StatusBadRequest}, - {"", "-1", validBody, `{"code":400,"message":"invalid height format"}`, http.StatusBadRequest}, - {"", "1337", nil, `{"code":400,"message":"request body must not be empty"}`, http.StatusBadRequest}, - } - - for _, test := range tests { - req := scriptReq(test.id, test.height, test.body) - assertResponse(t, req, http.StatusBadRequest, test.out, backend) - } - }) -} diff --git a/engine/access/rest/server.go b/engine/access/rest/server.go index b7f45bb8645..ce31c99ebf1 100644 --- a/engine/access/rest/server.go +++ b/engine/access/rest/server.go @@ -8,15 +8,66 @@ import ( "github.com/rs/zerolog" "github.com/onflow/flow-go/access" + "github.com/onflow/flow-go/engine/access/rest/router" + "github.com/onflow/flow-go/engine/access/rest/websockets" + dp "github.com/onflow/flow-go/engine/access/rest/websockets/data_providers" + "github.com/onflow/flow-go/engine/access/state_stream" + "github.com/onflow/flow-go/engine/access/state_stream/backend" "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module" + "github.com/onflow/flow-go/module/irrecoverable" ) +const ( + // DefaultReadTimeout is the default read timeout for the HTTP server + DefaultReadTimeout = time.Second * 15 + + // DefaultWriteTimeout is the default write timeout for the HTTP server + DefaultWriteTimeout = time.Second * 30 + + // DefaultIdleTimeout is the default idle timeout for the HTTP server + DefaultIdleTimeout = time.Second * 60 +) + +type Config struct { + ListenAddress string + WriteTimeout time.Duration + ReadTimeout time.Duration + IdleTimeout time.Duration + MaxRequestSize int64 + MaxResponseSize int64 +} + // NewServer returns an HTTP server initialized with the REST API handler -func NewServer(backend access.API, listenAddress string, logger zerolog.Logger, chain flow.Chain) (*http.Server, error) { +func NewServer( + ctx irrecoverable.SignalerContext, + serverAPI access.API, + config Config, + logger zerolog.Logger, + chain flow.Chain, + restCollector module.RestMetrics, + stateStreamApi state_stream.API, + stateStreamConfig backend.Config, + enableNewWebsocketsStreamAPI bool, + wsConfig websockets.Config, +) (*http.Server, error) { + builder := router.NewRouterBuilder(logger, restCollector).AddRestRoutes(serverAPI, chain, config.MaxRequestSize, config.MaxResponseSize) + if stateStreamApi != nil { + builder.AddLegacyWebsocketsRoutes(stateStreamApi, chain, stateStreamConfig, config.MaxRequestSize, config.MaxResponseSize) + } + + dataProviderFactory := dp.NewDataProviderFactory( + logger, + stateStreamApi, + serverAPI, + chain, + stateStreamConfig.EventFilterConfig, + stateStreamConfig.HeartbeatInterval, + builder.LinkGenerator, + ) - router, err := newRouter(backend, logger, chain) - if err != nil { - return nil, err + if enableNewWebsocketsStreamAPI { + builder.AddWebsocketsRoute(ctx, chain, wsConfig, config.MaxRequestSize, config.MaxResponseSize, dataProviderFactory) } c := cors.New(cors.Options{ @@ -30,10 +81,10 @@ func NewServer(backend access.API, listenAddress string, logger zerolog.Logger, }) return &http.Server{ - Addr: listenAddress, - Handler: c.Handler(router), - WriteTimeout: time.Second * 15, - ReadTimeout: time.Second * 15, - IdleTimeout: time.Second * 60, + Handler: c.Handler(builder.Build()), + Addr: config.ListenAddress, + WriteTimeout: config.WriteTimeout, + ReadTimeout: config.ReadTimeout, + IdleTimeout: config.IdleTimeout, }, nil } diff --git a/engine/access/rest/test_helpers.go b/engine/access/rest/test_helpers.go deleted file mode 100644 index eb63376da4e..00000000000 --- a/engine/access/rest/test_helpers.go +++ /dev/null @@ -1,56 +0,0 @@ -package rest - -import ( - "bytes" - "fmt" - "net/http" - "net/http/httptest" - "testing" - - "github.com/rs/zerolog" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/onflow/flow-go/access/mock" - "github.com/onflow/flow-go/model/flow" -) - -const ( - ExpandableFieldPayload = "payload" - ExpandableExecutionResult = "execution_result" - sealedHeightQueryParam = "sealed" - finalHeightQueryParam = "final" - startHeightQueryParam = "start_height" - endHeightQueryParam = "end_height" - heightQueryParam = "height" -) - -func executeRequest(req *http.Request, backend *mock.API) (*httptest.ResponseRecorder, error) { - var b bytes.Buffer - logger := zerolog.New(&b) - router, err := newRouter(backend, logger, flow.Testnet.Chain()) - if err != nil { - return nil, err - } - - rr := httptest.NewRecorder() - router.ServeHTTP(rr, req) - return rr, nil -} - -func assertOKResponse(t *testing.T, req *http.Request, expectedRespBody string, backend *mock.API) { - assertResponse(t, req, http.StatusOK, expectedRespBody, backend) -} - -func assertResponse(t *testing.T, req *http.Request, status int, expectedRespBody string, backend *mock.API) { - rr, err := executeRequest(req, backend) - assert.NoError(t, err) - - actualResponseBody := rr.Body.String() - require.JSONEq(t, - expectedRespBody, - actualResponseBody, - fmt.Sprintf("Failed Request: %s\nExpected JSON:\n %s \nActual JSON:\n %s\n", req.URL, expectedRespBody, actualResponseBody), - ) - require.Equal(t, status, rr.Code) -} diff --git a/engine/access/rest/transactions.go b/engine/access/rest/transactions.go deleted file mode 100644 index f8dfc83dedb..00000000000 --- a/engine/access/rest/transactions.go +++ /dev/null @@ -1,67 +0,0 @@ -package rest - -import ( - "github.com/onflow/flow-go/access" - "github.com/onflow/flow-go/engine/access/rest/models" - "github.com/onflow/flow-go/engine/access/rest/request" -) - -// GetTransactionByID gets a transaction by requested ID. -func GetTransactionByID(r *request.Request, backend access.API, link models.LinkGenerator) (interface{}, error) { - req, err := r.GetTransactionRequest() - if err != nil { - return nil, NewBadRequestError(err) - } - - tx, err := backend.GetTransaction(r.Context(), req.ID) - if err != nil { - return nil, err - } - - var txr *access.TransactionResult - // only lookup result if transaction result is to be expanded - if req.ExpandsResult { - txr, err = backend.GetTransactionResult(r.Context(), req.ID, req.BlockID, req.CollectionID) - if err != nil { - return nil, err - } - } - - var response models.Transaction - response.Build(tx, txr, link) - return response, nil -} - -// GetTransactionResultByID retrieves transaction result by the transaction ID. -func GetTransactionResultByID(r *request.Request, backend access.API, link models.LinkGenerator) (interface{}, error) { - req, err := r.GetTransactionResultRequest() - if err != nil { - return nil, NewBadRequestError(err) - } - - txr, err := backend.GetTransactionResult(r.Context(), req.ID, req.BlockID, req.CollectionID) - if err != nil { - return nil, err - } - - var response models.TransactionResult - response.Build(txr, req.ID, link) - return response, nil -} - -// CreateTransaction creates a new transaction from provided payload. -func CreateTransaction(r *request.Request, backend access.API, link models.LinkGenerator) (interface{}, error) { - req, err := r.CreateTransactionRequest() - if err != nil { - return nil, NewBadRequestError(err) - } - - err = backend.SendTransaction(r.Context(), &req.Transaction) - if err != nil { - return nil, err - } - - var response models.Transaction - response.Build(&req.Transaction, nil, link) - return response, nil -} diff --git a/engine/access/rest/transactions_test.go b/engine/access/rest/transactions_test.go deleted file mode 100644 index 26710c747e5..00000000000 --- a/engine/access/rest/transactions_test.go +++ /dev/null @@ -1,465 +0,0 @@ -package rest - -import ( - "bytes" - "encoding/json" - "fmt" - "net/http" - "net/url" - "strings" - "testing" - - mocks "github.com/stretchr/testify/mock" - "golang.org/x/text/cases" - "golang.org/x/text/language" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" - - "github.com/onflow/flow-go/access" - "github.com/onflow/flow-go/access/mock" - "github.com/onflow/flow-go/engine/access/rest/models" - "github.com/onflow/flow-go/engine/access/rest/util" - "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/utils/unittest" -) - -func getTransactionReq(id string, expandResult bool, blockIdQuery string, collectionIdQuery string) *http.Request { - u, _ := url.Parse(fmt.Sprintf("/v1/transactions/%s", id)) - q := u.Query() - - if expandResult { - // by default expand all since we test expanding with converters - q.Add("expand", "result") - } - - if blockIdQuery != "" { - q.Add("block_id", blockIdQuery) - } - - if collectionIdQuery != "" { - q.Add("collection_id", collectionIdQuery) - } - - u.RawQuery = q.Encode() - - req, _ := http.NewRequest("GET", u.String(), nil) - return req -} - -func getTransactionResultReq(id string, blockIdQuery string, collectionIdQuery string) *http.Request { - u, _ := url.Parse(fmt.Sprintf("/v1/transaction_results/%s", id)) - q := u.Query() - if blockIdQuery != "" { - q.Add("block_id", blockIdQuery) - } - - if collectionIdQuery != "" { - q.Add("collection_id", collectionIdQuery) - } - - u.RawQuery = q.Encode() - - req, _ := http.NewRequest("GET", u.String(), nil) - return req -} - -func createTransactionReq(body interface{}) *http.Request { - jsonBody, _ := json.Marshal(body) - req, _ := http.NewRequest("POST", "/v1/transactions", bytes.NewBuffer(jsonBody)) - return req -} - -func validCreateBody(tx flow.TransactionBody) map[string]interface{} { - tx.Arguments = [][]uint8{} // fix how fixture creates nil values - auth := make([]string, len(tx.Authorizers)) - for i, a := range tx.Authorizers { - auth[i] = a.String() - } - - return map[string]interface{}{ - "script": util.ToBase64(tx.Script), - "arguments": tx.Arguments, - "reference_block_id": tx.ReferenceBlockID.String(), - "gas_limit": fmt.Sprintf("%d", tx.GasLimit), - "payer": tx.Payer.String(), - "proposal_key": map[string]interface{}{ - "address": tx.ProposalKey.Address.String(), - "key_index": fmt.Sprintf("%d", tx.ProposalKey.KeyIndex), - "sequence_number": fmt.Sprintf("%d", tx.ProposalKey.SequenceNumber), - }, - "authorizers": auth, - "payload_signatures": []map[string]interface{}{{ - "address": tx.PayloadSignatures[0].Address.String(), - "key_index": fmt.Sprintf("%d", tx.PayloadSignatures[0].KeyIndex), - "signature": util.ToBase64(tx.PayloadSignatures[0].Signature), - }}, - "envelope_signatures": []map[string]interface{}{{ - "address": tx.EnvelopeSignatures[0].Address.String(), - "key_index": fmt.Sprintf("%d", tx.EnvelopeSignatures[0].KeyIndex), - "signature": util.ToBase64(tx.EnvelopeSignatures[0].Signature), - }}, - } -} - -func TestGetTransactions(t *testing.T) { - - t.Run("get by ID without results", func(t *testing.T) { - backend := &mock.API{} - tx := unittest.TransactionFixture() - req := getTransactionReq(tx.ID().String(), false, "", "") - - backend.Mock. - On("GetTransaction", mocks.Anything, tx.ID()). - Return(&tx.TransactionBody, nil) - - expected := fmt.Sprintf(` - { - "id":"%s", - "script":"cHViIGZ1biBtYWluKCkge30=", - "arguments": [], - "reference_block_id":"%s", - "gas_limit":"10", - "payer":"8c5303eaa26202d6", - "proposal_key":{ - "address":"8c5303eaa26202d6", - "key_index":"1", - "sequence_number":"0" - }, - "authorizers":[ - "8c5303eaa26202d6" - ], - "payload_signatures": [], - "envelope_signatures":[ - { - "address":"8c5303eaa26202d6", - "key_index":"1", - "signature":"%s" - } - ], - "_links":{ - "_self":"/v1/transactions/%s" - }, - "_expandable": { - "result": "/v1/transaction_results/%s" - } - }`, - tx.ID(), tx.ReferenceBlockID, util.ToBase64(tx.EnvelopeSignatures[0].Signature), tx.ID(), tx.ID()) - - assertOKResponse(t, req, expected, backend) - }) - - t.Run("Get by ID with results", func(t *testing.T) { - backend := &mock.API{} - tx := unittest.TransactionFixture() - txr := transactionResultFixture(tx) - - backend.Mock. - On("GetTransaction", mocks.Anything, tx.ID()). - Return(&tx.TransactionBody, nil) - - backend.Mock. - On("GetTransactionResult", mocks.Anything, tx.ID(), flow.ZeroID, flow.ZeroID). - Return(txr, nil) - - req := getTransactionReq(tx.ID().String(), true, "", "") - - expected := fmt.Sprintf(` - { - "id":"%s", - "script":"cHViIGZ1biBtYWluKCkge30=", - "arguments": [], - "reference_block_id":"%s", - "gas_limit":"10", - "payer":"8c5303eaa26202d6", - "proposal_key":{ - "address":"8c5303eaa26202d6", - "key_index":"1", - "sequence_number":"0" - }, - "authorizers":[ - "8c5303eaa26202d6" - ], - "payload_signatures": [], - "envelope_signatures":[ - { - "address":"8c5303eaa26202d6", - "key_index":"1", - "signature":"%s" - } - ], - "result": { - "block_id": "%s", - "collection_id": "%s", - "execution": "Success", - "status": "Sealed", - "status_code": 1, - "error_message": "", - "computation_used": "0", - "events": [ - { - "type": "flow.AccountCreated", - "transaction_id": "%s", - "transaction_index": "0", - "event_index": "0", - "payload": "" - } - ], - "_links": { - "_self": "/v1/transaction_results/%s" - } - }, - "_expandable": {}, - "_links":{ - "_self":"/v1/transactions/%s" - } - }`, - tx.ID(), tx.ReferenceBlockID, util.ToBase64(tx.EnvelopeSignatures[0].Signature), tx.ReferenceBlockID, txr.CollectionID, tx.ID(), tx.ID(), tx.ID()) - assertOKResponse(t, req, expected, backend) - }) - - t.Run("get by ID Invalid", func(t *testing.T) { - backend := &mock.API{} - - req := getTransactionReq("invalid", false, "", "") - expected := `{"code":400, "message":"invalid ID format"}` - assertResponse(t, req, http.StatusBadRequest, expected, backend) - }) - - t.Run("get by ID non-existing", func(t *testing.T) { - backend := &mock.API{} - tx := unittest.TransactionFixture() - req := getTransactionReq(tx.ID().String(), false, "", "") - - backend.Mock. - On("GetTransaction", mocks.Anything, tx.ID()). - Return(nil, status.Error(codes.NotFound, "transaction not found")) - - expected := `{"code":404, "message":"Flow resource not found: transaction not found"}` - assertResponse(t, req, http.StatusNotFound, expected, backend) - }) -} - -func TestGetTransactionResult(t *testing.T) { - id := unittest.IdentifierFixture() - bid := unittest.IdentifierFixture() - cid := unittest.IdentifierFixture() - txr := &access.TransactionResult{ - Status: flow.TransactionStatusSealed, - StatusCode: 10, - Events: []flow.Event{ - unittest.EventFixture(flow.EventAccountCreated, 1, 0, id, 200), - }, - ErrorMessage: "", - BlockID: bid, - CollectionID: cid, - } - txr.Events[0].Payload = []byte(`test payload`) - expected := fmt.Sprintf(`{ - "block_id": "%s", - "collection_id": "%s", - "execution": "Success", - "status": "Sealed", - "status_code": 10, - "error_message": "", - "computation_used": "0", - "events": [ - { - "type": "flow.AccountCreated", - "transaction_id": "%s", - "transaction_index": "1", - "event_index": "0", - "payload": "%s" - } - ], - "_links": { - "_self": "/v1/transaction_results/%s" - } - }`, bid.String(), cid.String(), id.String(), util.ToBase64(txr.Events[0].Payload), id.String()) - - t.Run("get by transaction ID", func(t *testing.T) { - backend := &mock.API{} - req := getTransactionResultReq(id.String(), "", "") - - backend.Mock. - On("GetTransactionResult", mocks.Anything, id, flow.ZeroID, flow.ZeroID). - Return(txr, nil) - - assertOKResponse(t, req, expected, backend) - }) - - t.Run("get by block ID", func(t *testing.T) { - backend := &mock.API{} - req := getTransactionResultReq(id.String(), bid.String(), "") - - backend.Mock. - On("GetTransactionResult", mocks.Anything, id, bid, flow.ZeroID). - Return(txr, nil) - - assertOKResponse(t, req, expected, backend) - }) - - t.Run("get by collection ID", func(t *testing.T) { - backend := &mock.API{} - req := getTransactionResultReq(id.String(), "", cid.String()) - - backend.Mock. - On("GetTransactionResult", mocks.Anything, id, flow.ZeroID, cid). - Return(txr, nil) - - assertOKResponse(t, req, expected, backend) - }) - - t.Run("get execution statuses", func(t *testing.T) { - backend := &mock.API{} - testVectors := map[*access.TransactionResult]string{{ - Status: flow.TransactionStatusExpired, - ErrorMessage: "", - }: string(models.FAILURE_RESULT), { - Status: flow.TransactionStatusSealed, - ErrorMessage: "cadence runtime exception", - }: string(models.FAILURE_RESULT), { - Status: flow.TransactionStatusFinalized, - ErrorMessage: "", - }: string(models.PENDING_RESULT), { - Status: flow.TransactionStatusPending, - ErrorMessage: "", - }: string(models.PENDING_RESULT), { - Status: flow.TransactionStatusExecuted, - ErrorMessage: "", - }: string(models.PENDING_RESULT), { - Status: flow.TransactionStatusSealed, - ErrorMessage: "", - }: string(models.SUCCESS_RESULT)} - - for txResult, err := range testVectors { - txResult.BlockID = bid - txResult.CollectionID = cid - req := getTransactionResultReq(id.String(), "", "") - backend.Mock. - On("GetTransactionResult", mocks.Anything, id, flow.ZeroID, flow.ZeroID). - Return(txResult, nil). - Once() - - expectedResp := fmt.Sprintf(`{ - "block_id": "%s", - "collection_id": "%s", - "execution": "%s", - "status": "%s", - "status_code": 0, - "error_message": "%s", - "computation_used": "0", - "events": [], - "_links": { - "_self": "/v1/transaction_results/%s" - } - }`, bid.String(), cid.String(), err, cases.Title(language.English).String(strings.ToLower(txResult.Status.String())), txResult.ErrorMessage, id.String()) - assertOKResponse(t, req, expectedResp, backend) - } - }) - - t.Run("get by ID Invalid", func(t *testing.T) { - backend := &mock.API{} - req := getTransactionResultReq("invalid", "", "") - - expected := `{"code":400, "message":"invalid ID format"}` - assertResponse(t, req, http.StatusBadRequest, expected, backend) - }) -} - -func TestCreateTransaction(t *testing.T) { - - t.Run("create", func(t *testing.T) { - backend := &mock.API{} - tx := unittest.TransactionBodyFixture() - tx.PayloadSignatures = []flow.TransactionSignature{unittest.TransactionSignatureFixture()} - tx.Arguments = [][]uint8{} - req := createTransactionReq(validCreateBody(tx)) - - backend.Mock. - On("SendTransaction", mocks.Anything, &tx). - Return(nil) - - expected := fmt.Sprintf(` - { - "id":"%s", - "script":"cHViIGZ1biBtYWluKCkge30=", - "arguments": [], - "reference_block_id":"%s", - "gas_limit":"10", - "payer":"8c5303eaa26202d6", - "proposal_key":{ - "address":"8c5303eaa26202d6", - "key_index":"1", - "sequence_number":"0" - }, - "authorizers":[ - "8c5303eaa26202d6" - ], - "payload_signatures":[ - { - "address":"8c5303eaa26202d6", - "key_index":"1", - "signature":"%s" - } - ], - "envelope_signatures":[ - { - "address":"8c5303eaa26202d6", - "key_index":"1", - "signature":"%s" - } - ], - "_expandable": { - "result": "/v1/transaction_results/%s" - }, - "_links":{ - "_self":"/v1/transactions/%s" - } - }`, - tx.ID(), tx.ReferenceBlockID, util.ToBase64(tx.PayloadSignatures[0].Signature), util.ToBase64(tx.EnvelopeSignatures[0].Signature), tx.ID(), tx.ID()) - assertOKResponse(t, req, expected, backend) - }) - - t.Run("post invalid transaction", func(t *testing.T) { - backend := &mock.API{} - tests := []struct { - inputField string - inputValue string - output string - }{ - {"reference_block_id", "-1", `{"code":400, "message":"invalid reference block ID: invalid ID format"}`}, - {"reference_block_id", "", `{"code":400, "message":"reference block not provided"}`}, - {"gas_limit", "-1", `{"code":400, "message":"invalid gas limit: value must be an unsigned 64 bit integer"}`}, - {"payer", "yo", `{"code":400, "message":"invalid payer: invalid address"}`}, - {"proposal_key", "yo", `{"code":400, "message":"request body contains an invalid value for the \"proposal_key\" field (at position 461)"}`}, - {"authorizers", "", `{"code":400, "message":"request body contains an invalid value for the \"authorizers\" field (at position 32)"}`}, - {"authorizers", "yo", `{"code":400, "message":"request body contains an invalid value for the \"authorizers\" field (at position 34)"}`}, - {"envelope_signatures", "", `{"code":400, "message":"request body contains an invalid value for the \"envelope_signatures\" field (at position 75)"}`}, - {"payload_signatures", "", `{"code":400, "message":"request body contains an invalid value for the \"payload_signatures\" field (at position 292)"}`}, - } - - for _, test := range tests { - tx := unittest.TransactionBodyFixture() - tx.PayloadSignatures = []flow.TransactionSignature{unittest.TransactionSignatureFixture()} - testTx := validCreateBody(tx) - testTx[test.inputField] = test.inputValue - req := createTransactionReq(testTx) - - assertResponse(t, req, http.StatusBadRequest, test.output, backend) - } - }) -} - -func transactionResultFixture(tx flow.Transaction) *access.TransactionResult { - cid := unittest.IdentifierFixture() - return &access.TransactionResult{ - Status: flow.TransactionStatusSealed, - StatusCode: 1, - Events: []flow.Event{ - unittest.EventFixture(flow.EventAccountCreated, 0, 0, tx.ID(), 255), - }, - ErrorMessage: "", - BlockID: tx.ReferenceBlockID, - CollectionID: cid, - } -} diff --git a/engine/access/rest/util/converter.go b/engine/access/rest/util/converter.go index e1acdf02dfa..a0fec230d43 100644 --- a/engine/access/rest/util/converter.go +++ b/engine/access/rest/util/converter.go @@ -2,12 +2,13 @@ package util import ( "encoding/base64" + "errors" "fmt" "strconv" ) -// FromUint64 convert uint64 to string -func FromUint64(number uint64) string { +// FromUint convert uint to string +func FromUint[U uint | uint64 | uint32](number U) string { return fmt.Sprintf("%d", number) } @@ -15,11 +16,38 @@ func FromUint64(number uint64) string { func ToUint64(uint64Str string) (uint64, error) { val, err := strconv.ParseUint(uint64Str, 10, 64) if err != nil { + if errors.Is(err, strconv.ErrRange) { + return 0, fmt.Errorf("value overflows uint64 range") + } return 0, fmt.Errorf("value must be an unsigned 64 bit integer") // hide error from user } return val, nil } +// ToUint32 convert input string to uint64 number +func ToUint32(uint32Str string) (uint32, error) { + val, err := strconv.ParseUint(uint32Str, 10, 32) + if err != nil { + if errors.Is(err, strconv.ErrRange) { + return 0, fmt.Errorf("value overflows uint32 range") + } + return 0, fmt.Errorf("value must be an unsigned 32 bit integer") // hide error from user + } + return uint32(val), nil +} + +// ToInt converts an input string to an int. +func ToInt(intStr string) (int, error) { + val, err := strconv.ParseInt(intStr, 10, 0) // "0" automatically adapts to the native int size + if err != nil { + if errors.Is(err, strconv.ErrRange) { + return 0, fmt.Errorf("value overflows int range") + } + return 0, fmt.Errorf("value must be a valid integer") // hide detailed error from user + } + return int(val), nil +} + // ToBase64 converts byte input to string base64 encoded output func ToBase64(byteValue []byte) string { return base64.StdEncoding.EncodeToString(byteValue) diff --git a/engine/access/rest/util/select_filter.go b/engine/access/rest/util/select_filter.go index f63e5fa6814..4f7172a7ff5 100644 --- a/engine/access/rest/util/select_filter.go +++ b/engine/access/rest/util/select_filter.go @@ -25,10 +25,10 @@ func SelectFilter(object interface{}, selectKeys []string) (interface{}, error) } filter := sliceToMap(selectKeys) - switch itemAsType := (*outputMap).(type) { case []interface{}: - filterSlice(itemAsType, "", filter) + filteredSlice, _ := filterSlice(itemAsType, "", filter) + *outputMap = filteredSlice case map[string]interface{}: filterObject(itemAsType, "", filter) } @@ -40,6 +40,10 @@ func SelectFilter(object interface{}, selectKeys []string) (interface{}, error) func filterObject(jsonStruct map[string]interface{}, prefix string, filterMap map[string]bool) { for key, item := range jsonStruct { newPrefix := jsonPath(prefix, key) + // if the leaf object is the key, go to others + if filterMap[newPrefix] { + continue + } switch itemAsType := item.(type) { case []interface{}: // if the value of a key is a list, call filterSlice @@ -87,7 +91,7 @@ func filterSlice(jsonSlice []interface{}, prefix string, filterMap map[string]bo if len(itemAsType) == 0 { // since all elements of the slice are the same, if one sub-slice has been filtered out, we can safely // remove all sub-slices and return (instead of iterating all slice elements) - return nil, sliceType + return make([]interface{}, 0), sliceType } case map[string]interface{}: // if the slice has structs as elements, call filterObject @@ -96,7 +100,7 @@ func filterSlice(jsonSlice []interface{}, prefix string, filterMap map[string]bo if len(itemAsType) == 0 { // since all elements of the slice are the same, if one struct element has been filtered out, we can safely // remove all struct elements and return (instead of iterating all slice elements) - return nil, false + return make([]interface{}, 0), false } default: // if the elements are neither a slice nor a struct, then return the slice and true to indicate the slice has diff --git a/engine/access/rest/util/select_filter_test.go b/engine/access/rest/util/select_filter_test.go index e810affece8..61c194b039a 100644 --- a/engine/access/rest/util/select_filter_test.go +++ b/engine/access/rest/util/select_filter_test.go @@ -8,7 +8,7 @@ import ( "testing" "time" - "github.com/onflow/flow-go/engine/access/rest/models" + commonmodels "github.com/onflow/flow-go/engine/access/rest/common/models" "github.com/onflow/flow-go/engine/access/rest/util" "github.com/stretchr/testify/require" @@ -52,6 +52,18 @@ func TestSelectFilter(t *testing.T) { keys: []string{"b.c"}, description: "single object with arrays as values", }, + { + input: `{ "a": 1, "b": {"c":2, "d":3}}`, + output: `{ "b": {"c":2, "d":3}}`, + keys: []string{"b"}, + description: "full single object with nested fields", + }, + { + input: `{ "a": 1, "b": {"c":2, "d":3}}`, + output: `{}`, + keys: []string{"e"}, + description: "unknown object", + }, } for _, tv := range testVectors { @@ -79,7 +91,7 @@ func testFilter(t *testing.T, inputJson, exepectedJson string, description strin func TestExampleSelectFilter(t *testing.T) { - blocks := make([]models.Block, 2) + blocks := make([]commonmodels.Block, 2) for i := range blocks { block, err := generateBlock() require.NoError(t, err) @@ -110,7 +122,7 @@ func TestExampleSelectFilter(t *testing.T) { require.Equal(t, string(byteValue), string(marshalled)) } -func generateBlock() (models.Block, error) { +func generateBlock() (commonmodels.Block, error) { dummySignature := "abcdef0123456789" multipleDummySignatures := []string{dummySignature, dummySignature} @@ -118,31 +130,31 @@ func generateBlock() (models.Block, error) { dateString := "2021-11-20T11:45:26.371Z" t, err := time.Parse(time.RFC3339, dateString) if err != nil { - return models.Block{}, err + return commonmodels.Block{}, err } - return models.Block{ - Header: &models.BlockHeader{ + return commonmodels.Block{ + Header: &commonmodels.BlockHeader{ Id: dummyID, ParentId: dummyID, Height: "100", - Timestamp: t, + Timestamp: t.UTC(), ParentVoterSignature: dummySignature, }, - Payload: &models.BlockPayload{ - CollectionGuarantees: []models.CollectionGuarantee{ + Payload: &commonmodels.BlockPayload{ + CollectionGuarantees: []commonmodels.CollectionGuarantee{ { CollectionId: "abcdef0123456789", SignerIndices: fmt.Sprintf("%x", []byte{1}), Signature: dummySignature, }, }, - BlockSeals: []models.BlockSeal{ + BlockSeals: []commonmodels.BlockSeal{ { BlockId: dummyID, ResultId: dummyID, FinalState: "final", - AggregatedApprovalSignatures: []models.AggregatedSignature{ + AggregatedApprovalSignatures: []commonmodels.AggregatedSignature{ { VerifierSignatures: multipleDummySignatures, SignerIds: multipleDummySignatures, @@ -151,10 +163,10 @@ func generateBlock() (models.Block, error) { }, }, }, - ExecutionResult: &models.ExecutionResult{ + ExecutionResult: &commonmodels.ExecutionResult{ Id: dummyID, BlockId: dummyID, - Events: []models.Event{ + Events: []commonmodels.Event{ { Type_: "type", TransactionId: dummyID, @@ -170,7 +182,7 @@ func generateBlock() (models.Block, error) { Payload: "payload", }, }, - Links: &models.Links{ + Links: &commonmodels.Links{ Self: "link", }, }, diff --git a/engine/access/rest/websockets/config.go b/engine/access/rest/websockets/config.go new file mode 100644 index 00000000000..0b4c48d2d06 --- /dev/null +++ b/engine/access/rest/websockets/config.go @@ -0,0 +1,78 @@ +package websockets + +import ( + "time" +) + +const ( + // PingPeriod defines the interval at which ping messages are sent to the client. + // This value must be less than pongWait, cause it that case the server ensures it sends a ping well before the PongWait + // timeout elapses. Each new pong message resets the server's read deadline, keeping the connection alive as long as + // the client is responsive. + // + // Example: + // At t=9, the server sends a ping, initial read deadline is t=10 (for the first message) + // At t=10, the client responds with a pong. The server resets its read deadline to t=20. + // At t=18, the server sends another ping. If the client responds with a pong at t=19, the read deadline is extended to t=29. + // + // In case of failure: + // If the client stops responding, the server will send a ping at t=9 but won't receive a pong by t=10. The server then closes the connection. + PingPeriod = (PongWait * 9) / 10 + + // PongWait specifies the maximum time to wait for a pong response message from the peer + // after sending a ping + PongWait = 10 * time.Second + + // WriteWait specifies a timeout for the write operation. If the write + // isn't completed within this duration, it fails with a timeout error. + // SetWriteDeadline ensures the write operation does not block indefinitely + // if the client is slow or unresponsive. This prevents resource exhaustion + // and allows the server to gracefully handle timeouts for delayed writes. + WriteWait = 10 * time.Second + + // DefaultMaxSubscriptionsPerConnection defines the default maximum number + // of WebSocket subscriptions allowed per connection. + DefaultMaxSubscriptionsPerConnection = 20 + + // DefaultMaxResponsesPerSecond defines the default maximum number of responses + // that can be sent to a single client per second. + DefaultMaxResponsesPerSecond = float64(0) + + // DefaultInactivityTimeout is the default duration a WebSocket connection can remain open without any active subscriptions + // before being automatically closed + DefaultInactivityTimeout time.Duration = 1 * time.Minute +) + +type Config struct { + // MaxSubscriptionsPerConnection specifies the maximum number of active + // WebSocket subscriptions allowed per connection. If a client attempts + // to create more subscriptions than this limit, an error will be returned, + // and the additional subscriptions will be rejected. + MaxSubscriptionsPerConnection uint64 + // MaxResponsesPerSecond defines the maximum number of responses that + // can be sent to a single client per second. + MaxResponsesPerSecond float64 + // InactivityTimeout specifies the duration a WebSocket connection can remain open without any active subscriptions + // before being automatically closed + InactivityTimeout time.Duration +} + +func NewDefaultWebsocketConfig() Config { + return Config{ + MaxSubscriptionsPerConnection: DefaultMaxSubscriptionsPerConnection, + MaxResponsesPerSecond: DefaultMaxResponsesPerSecond, + InactivityTimeout: DefaultInactivityTimeout, + } +} + +type KeepaliveConfig struct { + PingPeriod time.Duration + PongWait time.Duration +} + +func DefaultKeepaliveConfig() KeepaliveConfig { + return KeepaliveConfig{ + PingPeriod: PingPeriod, + PongWait: PongWait, + } +} diff --git a/engine/access/rest/websockets/connection.go b/engine/access/rest/websockets/connection.go new file mode 100644 index 00000000000..5170e917e9f --- /dev/null +++ b/engine/access/rest/websockets/connection.go @@ -0,0 +1,57 @@ +package websockets + +import ( + "time" + + "github.com/gorilla/websocket" +) + +type WebsocketConnection interface { + ReadJSON(v interface{}) error + WriteJSON(v interface{}) error + WriteControl(messageType int, deadline time.Time) error + Close() error + SetReadDeadline(deadline time.Time) error + SetWriteDeadline(deadline time.Time) error + SetPongHandler(h func(string) error) +} + +type WebsocketConnectionImpl struct { + conn *websocket.Conn +} + +func NewWebsocketConnection(conn *websocket.Conn) *WebsocketConnectionImpl { + return &WebsocketConnectionImpl{ + conn: conn, + } +} + +var _ WebsocketConnection = (*WebsocketConnectionImpl)(nil) + +func (c *WebsocketConnectionImpl) ReadJSON(v interface{}) error { + return c.conn.ReadJSON(v) +} + +func (c *WebsocketConnectionImpl) WriteJSON(v interface{}) error { + return c.conn.WriteJSON(v) +} + +func (c *WebsocketConnectionImpl) WriteControl(messageType int, deadline time.Time) error { + return c.conn.WriteControl(messageType, nil, deadline) +} + +func (c *WebsocketConnectionImpl) Close() error { + return c.conn.Close() +} + +func (c *WebsocketConnectionImpl) SetReadDeadline(deadline time.Time) error { + return c.conn.SetReadDeadline(deadline) +} + +func (c *WebsocketConnectionImpl) SetWriteDeadline(deadline time.Time) error { + return c.conn.SetWriteDeadline(deadline) +} + +func (c *WebsocketConnectionImpl) SetPongHandler(h func(string) error) { + c.conn.SetPongHandler(h) +} diff --git a/engine/access/rest/websockets/controller.go b/engine/access/rest/websockets/controller.go new file mode 100644 index 00000000000..bf201afe95f --- /dev/null +++ b/engine/access/rest/websockets/controller.go @@ -0,0 +1,612 @@ +// Package websockets provides a number of abstractions for managing WebSocket connections. +// It supports handling client subscriptions, sending messages, and maintaining +// the lifecycle of WebSocket connections with robust keepalive mechanisms. +// +// Overview +// +// The architecture of this package consists of three main components: +// +// 1. **Connection**: Responsible for providing a channel that allows the client +// to communicate with the server. It encapsulates WebSocket-level operations +// such as sending and receiving messages. +// 2. **Data Providers**: Standalone units responsible for fetching data from +// the blockchain (protocol). These providers act as sources of data that are +// sent to clients based on their subscriptions. +// 3. **Controller**: Acts as a mediator between the connection and data providers. +// It governs client subscriptions, handles client requests and responses, +// validates messages, and manages error handling. The controller ensures smooth +// coordination between the client and the data-fetching units. +// +// Basically, it is an N:1:1 approach: N data providers, 1 controller, 1 websocket connection. +// This allows a client to receive messages from different subscriptions over a single connection. +// +// ### Controller Details +// +// The `Controller` is the core component that coordinates the interactions between +// the client and data providers. It achieves this through three routines that run +// in parallel (writer, reader, and keepalive routine). If any of the three routines +// fails with an error, the remaining routines will be canceled using the provided +// context to ensure proper cleanup and termination. +// +// 1. **Reader Routine**: +// - Reads messages from the client WebSocket connection. +// - Parses and validates the messages. +// - Handles the messages by triggering the appropriate actions, such as subscribing +// to a topic or unsubscribing from an existing subscription. +// - Ensures proper validation of message formats and data before passing them to +// the internal handlers. +// +// 2. **Writer Routine**: +// - Listens to the `multiplexedStream`, which is a channel filled by data providers +// with messages that clients have subscribed to. +// - Writes these messages to the client WebSocket connection. +// - Ensures the outgoing messages respect the required deadlines to maintain the +// stability of the connection. +// +// 3. **Keepalive Routine**: +// - Periodically sends a WebSocket ping control message to the client to indicate +// that the controller and all its subscriptions are working as expected. +// - Ensures the connection remains clean and avoids timeout scenarios due to +// inactivity. +// - Resets the connection's read deadline whenever a pong message is received. +// +// Example +// +// Usage typically involves creating a `Controller` instance and invoking its +// `HandleConnection` method to manage a single WebSocket connection: +// +// logger := zerolog.New(os.Stdout) +// config := websockets.Config{/* configuration options */} +// conn := /* a WebsocketConnection implementation */ +// factory := /* a DataProviderFactory implementation */ +// +// controller := websockets.NewWebSocketController(logger, config, conn, factory) +// ctx := context.Background() +// controller.HandleConnection(ctx) +// +// +// Package Constants +// +// This package expects constants like `PongWait` and `WriteWait` for controlling +// the read/write deadlines. They need to be defined in your application as appropriate. + +package websockets + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "net/http" + "sync" + "time" + + "golang.org/x/time/rate" + + "github.com/gorilla/websocket" + "github.com/rs/zerolog" + "golang.org/x/sync/errgroup" + + dp "github.com/onflow/flow-go/engine/access/rest/websockets/data_providers" + "github.com/onflow/flow-go/engine/access/rest/websockets/models" + "github.com/onflow/flow-go/utils/concurrentmap" +) + +// ErrMaxSubscriptionsReached is returned when the maximum number of active subscriptions per connection is exceeded. +var ErrMaxSubscriptionsReached = errors.New("maximum number of subscriptions reached") + +type Controller struct { + logger zerolog.Logger + config Config + conn WebsocketConnection + + // The `multiplexedStream` is a core channel used for communication between the + // `Controller` and Data Providers. Its lifecycle is as follows: + // + // 1. **Data Providers**: + // - Data providers write their data into this channel, which is consumed by + // the writer routine to send messages to the client. + // 2. **Reader Routine**: + // - Writes OK/error responses to the channel as a result of processing client messages. + // 3. **Writer Routine**: + // - Reads messages from this channel and forwards them to the client WebSocket connection. + // + // 4. **Channel Closing**: + // The intention to close the channel comes from the reader-from-this-channel routines (controller's routines), + // not the writer-to-this-channel routines (data providers). + // Therefore, we have to signal the data providers to stop writing, wait for them to finish write operations, + // and only after that we can close the channel. + // + // - The `Controller` is responsible for starting and managing the lifecycle of the channel. + // - If an unrecoverable error occurs in any of the three routines (reader, writer, or keepalive), + // the parent context is canceled. This triggers data providers to stop their work. + // - The `multiplexedStream` will not be closed until all data providers signal that + // they have stopped writing to it via the `dataProvidersGroup` wait group. + // + // 5. **Edge Case - Writer Routine Finished Before Providers**: + // - If the writer routine finishes before all data providers, a separate draining routine + // ensures that the `multiplexedStream` is fully drained to prevent deadlocks. + // All remaining messages in this case will be discarded. + // + // This design ensures that the channel is only closed when it is safe to do so, avoiding + // issues such as sending on a closed channel while maintaining proper cleanup. + multiplexedStream chan interface{} + + dataProviders *concurrentmap.Map[SubscriptionID, dp.DataProvider] + dataProviderFactory dp.DataProviderFactory + dataProvidersGroup *sync.WaitGroup + limiter *rate.Limiter + + keepaliveConfig KeepaliveConfig +} + +func NewWebSocketController( + logger zerolog.Logger, + config Config, + conn WebsocketConnection, + dataProviderFactory dp.DataProviderFactory, +) *Controller { + var limiter *rate.Limiter + if config.MaxResponsesPerSecond > 0 { + limiter = rate.NewLimiter(rate.Limit(config.MaxResponsesPerSecond), 1) + } + + return &Controller{ + logger: logger.With().Str("component", "websocket-controller").Logger(), + config: config, + conn: conn, + multiplexedStream: make(chan interface{}), + dataProviders: concurrentmap.New[SubscriptionID, dp.DataProvider](), + dataProviderFactory: dataProviderFactory, + dataProvidersGroup: &sync.WaitGroup{}, + limiter: limiter, + keepaliveConfig: DefaultKeepaliveConfig(), + } +} + +// HandleConnection manages the lifecycle of a WebSocket connection, +// including setup, message processing, and graceful shutdown. +// +// Parameters: +// - ctx: The context for controlling cancellation and timeouts. +func (c *Controller) HandleConnection(ctx context.Context) { + defer c.shutdownConnection() + + err := c.configureKeepalive() + if err != nil { + c.logger.Error().Err(err).Msg("error configuring keepalive connection") + return + } + + g, gCtx := errgroup.WithContext(ctx) + + g.Go(func() error { + return c.keepalive(gCtx) + }) + g.Go(func() error { + return c.writeMessages(gCtx) + }) + g.Go(func() error { + return c.readMessages(gCtx) + }) + + if err = g.Wait(); err != nil { + if errors.Is(err, websocket.ErrCloseSent) { + return + } + + c.logger.Error().Err(err).Msg("error detected in one of the goroutines") + } +} + +// configureKeepalive sets up the WebSocket connection with a read deadline +// and a handler for receiving pong messages from the client. +// +// The function does the following: +// 1. Sets an initial read deadline to ensure the server doesn't wait indefinitely +// for a pong message from the client. If no message is received within the +// specified `pongWait` duration, the connection will be closed. +// 2. Establishes a Pong handler that resets the read deadline every time a pong +// message is received from the client, allowing the server to continue waiting +// for further pong messages within the new deadline. +// +// No errors are expected during normal operation. +func (c *Controller) configureKeepalive() error { + // Set the initial read deadline for the first pong message + // The Pong handler itself only resets the read deadline after receiving a Pong. + // It doesn't set an initial deadline. The initial read deadline is crucial to prevent the server from waiting + // forever if the client doesn't send Pongs. + if err := c.conn.SetReadDeadline(time.Now().Add(c.keepaliveConfig.PongWait)); err != nil { + return fmt.Errorf("failed to set the initial read deadline: %w", err) + } + + // Establish a Pong handler which sets the handler for pong messages received from the peer. + c.conn.SetPongHandler(func(string) error { + return c.conn.SetReadDeadline(time.Now().Add(c.keepaliveConfig.PongWait)) + }) + + return nil +} + +// keepalive sends a ping message periodically to keep the WebSocket connection alive +// and avoid timeouts. +func (c *Controller) keepalive(ctx context.Context) error { + defer func() { + // gracefully handle panics from github.com/gorilla/websocket + if r := recover(); r != nil { + c.logger.Warn().Interface("recovered_context", r).Msg("keepalive routine recovered from panic") + } + }() + + pingTicker := time.NewTicker(c.keepaliveConfig.PingPeriod) + defer pingTicker.Stop() + + for { + select { + case <-ctx.Done(): + return nil + case <-pingTicker.C: + err := c.conn.WriteControl(websocket.PingMessage, time.Now().Add(WriteWait)) + if err != nil { + var closeErr *websocket.CloseError + if errors.As(err, &closeErr) { + return err + } + + return fmt.Errorf("error sending ping: %w", err) + } + } + } +} + +// writeMessages reads a messages from multiplexed stream and passes them on to a client WebSocket connection. +// The multiplexed stream channel is filled by data providers. +// The function tracks the last message sent and periodically checks for inactivity. +// If no messages are sent within InactivityTimeout and no active data providers exist, +// the connection will be closed. +func (c *Controller) writeMessages(ctx context.Context) error { + defer func() { + // gracefully handle panics from github.com/gorilla/websocket + if r := recover(); r != nil { + c.logger.Warn().Interface("recovered_context", r).Msg("writer routine recovered from panic") + } + }() + + defer func() { + // drain the channel as some providers may still send data to it after this routine shutdowns + // so, in order to not run into deadlock there should be at least 1 reader on the channel + go func() { + for range c.multiplexedStream { + } + }() + }() + + inactivityTicker := time.NewTicker(c.inactivityTickerPeriod()) + defer inactivityTicker.Stop() + + lastMessageSentAt := time.Now() + + for { + select { + case <-ctx.Done(): + return nil + case message, ok := <-c.multiplexedStream: + if !ok { + return nil + } + + if err := c.checkRateLimit(ctx); err != nil { + return fmt.Errorf("rate limiter wait failed: %w", err) + } + + // Specifies a timeout for the write operation. If the write + // isn't completed within this duration, it fails with a timeout error. + // SetWriteDeadline ensures the write operation does not block indefinitely + // if the client is slow or unresponsive. This prevents resource exhaustion + // and allows the server to gracefully handle timeouts for delayed writes. + if err := c.conn.SetWriteDeadline(time.Now().Add(WriteWait)); err != nil { + return fmt.Errorf("failed to set the write deadline: %w", err) + } + + if err := c.conn.WriteJSON(message); err != nil { + return err + } + + lastMessageSentAt = time.Now() + + case <-inactivityTicker.C: + hasNoActiveSubscriptions := c.dataProviders.Size() == 0 + exceedsInactivityTimeout := time.Since(lastMessageSentAt) > c.config.InactivityTimeout + if hasNoActiveSubscriptions && exceedsInactivityTimeout { + c.logger.Debug(). + Dur("timeout", c.config.InactivityTimeout). + Msg("connection inactive, closing due to timeout") + return fmt.Errorf("no recent activity for %v", c.config.InactivityTimeout) + } + } + } +} + +// inactivityTickerPeriod determines the interval at which the inactivity ticker is triggered. +// +// The inactivity ticker is used in the `writeMessages` routine to monitor periods of inactivity +// in outgoing messages. If no messages are sent within the defined inactivity timeout +// and there are no active data providers, the WebSocket connection will be terminated. +func (c *Controller) inactivityTickerPeriod() time.Duration { + return c.config.InactivityTimeout / 10 +} + +// readMessages continuously reads messages from a client WebSocket connection, +// validates each message, and processes it based on the message type. +func (c *Controller) readMessages(ctx context.Context) error { + defer func() { + // gracefully handle panics from github.com/gorilla/websocket + if r := recover(); r != nil { + c.logger.Warn().Interface("recovered_context", r).Msg("reader routine recovered from panic") + } + }() + + for { + select { + // ctx.Done() is necessary in readMessages() to gracefully handle the termination of the connection + // and prevent a potential panic ("repeated read on failed websocket connection"). If an error occurs in writeMessages(), + // it indirectly affect the keepalive mechanism. + // This can stop periodic ping messages from being sent to the server, causing the server to close the connection. + // Without ctx.Done(), readMessages could continue blocking on a read operation, eventually encountering an i/o timeout + // when no data arrives. By monitoring ctx.Done(), we ensure that readMessages exits promptly when the context is canceled + // due to errors elsewhere in the system or intentional shutdown. + case <-ctx.Done(): + return nil + default: + var message json.RawMessage + if err := c.conn.ReadJSON(&message); err != nil { + var closeErr *websocket.CloseError + if errors.As(err, &closeErr) { + return err + } + + err = fmt.Errorf("error reading message: %w", err) + c.writeErrorResponse( + ctx, + err, + wrapErrorMessage(http.StatusBadRequest, err.Error(), "", ""), + ) + continue + } + + err := c.handleMessage(ctx, message) + if err != nil { + err = fmt.Errorf("error parsing message: %w", err) + c.writeErrorResponse( + ctx, + err, + wrapErrorMessage(http.StatusBadRequest, err.Error(), "", ""), + ) + continue + } + } + } + +} + +func (c *Controller) handleMessage(ctx context.Context, message json.RawMessage) error { + var baseMsg models.BaseMessageRequest + if err := json.Unmarshal(message, &baseMsg); err != nil { + return fmt.Errorf("error unmarshalling base message: %w", err) + } + + switch baseMsg.Action { + case models.SubscribeAction: + var subscribeMsg models.SubscribeMessageRequest + if err := json.Unmarshal(message, &subscribeMsg); err != nil { + return fmt.Errorf("error unmarshalling subscribe message: %w", err) + } + c.handleSubscribe(ctx, subscribeMsg) + + case models.UnsubscribeAction: + var unsubscribeMsg models.UnsubscribeMessageRequest + if err := json.Unmarshal(message, &unsubscribeMsg); err != nil { + return fmt.Errorf("error unmarshalling unsubscribe message: %w", err) + } + c.handleUnsubscribe(ctx, unsubscribeMsg) + + case models.ListSubscriptionsAction: + var listMsg models.ListSubscriptionsMessageRequest + if err := json.Unmarshal(message, &listMsg); err != nil { + return fmt.Errorf("error unmarshalling list subscriptions message: %w", err) + } + c.handleListSubscriptions(ctx, listMsg) + + default: + c.logger.Debug().Str("action", baseMsg.Action).Msg("unknown action type") + return fmt.Errorf("unknown action type: %s", baseMsg.Action) + } + + return nil +} + +// handleSubscribe processes a subscription request. +// +// Expected error returns during normal operations: +// - ErrMaxSubscriptionsReached: if the maximum number of active subscriptions per connection is exceeded. +func (c *Controller) handleSubscribe(ctx context.Context, msg models.SubscribeMessageRequest) { + // Check if the maximum number of active subscriptions per connection has been reached. + // If the limit is exceeded, an error is returned, and the subscription request is rejected. + if uint64(c.dataProviders.Size()) >= c.config.MaxSubscriptionsPerConnection { + err := fmt.Errorf("error creating new subscription: %w", ErrMaxSubscriptionsReached) + c.writeErrorResponse( + ctx, + err, + wrapErrorMessage(http.StatusTooManyRequests, err.Error(), models.SubscribeAction, msg.SubscriptionID), + ) + return + } + + subscriptionID, err := c.parseOrCreateSubscriptionID(msg.SubscriptionID) + if err != nil { + err = fmt.Errorf("error parsing subscription id: %w", err) + c.writeErrorResponse( + ctx, + err, + wrapErrorMessage(http.StatusBadRequest, err.Error(), models.SubscribeAction, msg.SubscriptionID), + ) + return + } + + // register new provider + provider, err := c.dataProviderFactory.NewDataProvider(ctx, subscriptionID.String(), msg.Topic, msg.Arguments, c.multiplexedStream) + if err != nil { + err = fmt.Errorf("error creating data provider: %w", err) + c.writeErrorResponse( + ctx, + err, + wrapErrorMessage(http.StatusBadRequest, err.Error(), models.SubscribeAction, subscriptionID.String()), + ) + return + } + c.dataProviders.Add(subscriptionID, provider) + + // write OK response to client + responseOk := models.SubscribeMessageResponse{ + BaseMessageResponse: models.BaseMessageResponse{ + SubscriptionID: subscriptionID.String(), + Action: models.SubscribeAction, + }, + } + c.writeResponse(ctx, responseOk) + + // run provider + c.dataProvidersGroup.Add(1) + go func() { + err = provider.Run() + if err != nil { + err = fmt.Errorf("internal error: %w", err) + c.writeErrorResponse( + ctx, + err, + wrapErrorMessage(http.StatusInternalServerError, err.Error(), + models.SubscribeAction, subscriptionID.String()), + ) + } + + c.dataProvidersGroup.Done() + c.dataProviders.Remove(subscriptionID) + }() +} + +func (c *Controller) handleUnsubscribe(ctx context.Context, msg models.UnsubscribeMessageRequest) { + subscriptionID, err := ParseClientSubscriptionID(msg.SubscriptionID) + if err != nil { + err = fmt.Errorf("error parsing subscription id: %w", err) + c.writeErrorResponse( + ctx, + err, + wrapErrorMessage(http.StatusBadRequest, err.Error(), models.UnsubscribeAction, msg.SubscriptionID), + ) + return + } + + provider, ok := c.dataProviders.Get(subscriptionID) + if !ok { + c.writeErrorResponse( + ctx, + err, + wrapErrorMessage(http.StatusNotFound, "subscription not found", + models.UnsubscribeAction, subscriptionID.String()), + ) + return + } + + provider.Close() + c.dataProviders.Remove(subscriptionID) + + responseOk := models.UnsubscribeMessageResponse{ + BaseMessageResponse: models.BaseMessageResponse{ + SubscriptionID: subscriptionID.String(), + Action: models.UnsubscribeAction, + }, + } + c.writeResponse(ctx, responseOk) +} + +func (c *Controller) handleListSubscriptions(ctx context.Context, _ models.ListSubscriptionsMessageRequest) { + var subs []*models.SubscriptionEntry + _ = c.dataProviders.ForEach(func(id SubscriptionID, provider dp.DataProvider) error { + subs = append(subs, &models.SubscriptionEntry{ + SubscriptionID: id.String(), + Topic: provider.Topic(), + Arguments: provider.Arguments(), + }) + return nil + }) + + responseOk := models.ListSubscriptionsMessageResponse{ + Subscriptions: subs, + Action: models.ListSubscriptionsAction, + } + c.writeResponse(ctx, responseOk) +} + +func (c *Controller) shutdownConnection() { + err := c.conn.Close() + if err != nil { + c.logger.Debug().Err(err).Msg("error closing connection") + } + + _ = c.dataProviders.ForEach(func(_ SubscriptionID, provider dp.DataProvider) error { + provider.Close() + return nil + }) + + c.dataProviders.Clear() + c.dataProvidersGroup.Wait() + close(c.multiplexedStream) +} + +func (c *Controller) writeErrorResponse(ctx context.Context, err error, msg models.BaseMessageResponse) { + c.logger.Debug().Err(err).Msg(msg.Error.Message) + c.writeResponse(ctx, msg) +} + +func (c *Controller) writeResponse(ctx context.Context, response interface{}) { + select { + case <-ctx.Done(): + return + case c.multiplexedStream <- response: + } +} + +func wrapErrorMessage(code int, message string, action string, subscriptionID string) models.BaseMessageResponse { + return models.BaseMessageResponse{ + SubscriptionID: subscriptionID, + Error: &models.ErrorMessage{ + Code: code, + Message: message, + }, + Action: action, + } +} + +func (c *Controller) parseOrCreateSubscriptionID(id string) (SubscriptionID, error) { + newId, err := NewSubscriptionID(id) + if err != nil { + return SubscriptionID{}, err + } + + if c.dataProviders.Has(newId) { + return SubscriptionID{}, fmt.Errorf("subscription ID is already in use: %s", newId) + } + + return newId, nil +} + +// checkRateLimit checks the controller rate limit and blocks until there is room to send a response. +// An error is returned if the context is canceled or the expected wait time exceeds the context's +// deadline. +func (c *Controller) checkRateLimit(ctx context.Context) error { + if c.limiter == nil { + return nil + } + + return c.limiter.WaitN(ctx, 1) +} diff --git a/engine/access/rest/websockets/controller_test.go b/engine/access/rest/websockets/controller_test.go new file mode 100644 index 00000000000..44df8a0746b --- /dev/null +++ b/engine/access/rest/websockets/controller_test.go @@ -0,0 +1,1069 @@ +package websockets + +import ( + "context" + "encoding/json" + "fmt" + "net/http" + "testing" + "time" + + "github.com/onflow/flow-go/engine/access/rest/common/parser" + + "github.com/google/uuid" + "github.com/gorilla/websocket" + "github.com/rs/zerolog" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" + + dp "github.com/onflow/flow-go/engine/access/rest/websockets/data_providers" + dpmock "github.com/onflow/flow-go/engine/access/rest/websockets/data_providers/mock" + connmock "github.com/onflow/flow-go/engine/access/rest/websockets/mock" + "github.com/onflow/flow-go/engine/access/rest/websockets/models" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/utils/unittest" +) + +// WsControllerSuite is a test suite for the WebSocket Controller. +type WsControllerSuite struct { + suite.Suite + + logger zerolog.Logger + wsConfig Config +} + +func TestControllerSuite(t *testing.T) { + suite.Run(t, new(WsControllerSuite)) +} + +// SetupTest initializes the test suite with required dependencies. +func (s *WsControllerSuite) SetupTest() { + s.logger = unittest.Logger() + s.wsConfig = NewDefaultWebsocketConfig() +} + +// TestSubscribeRequest tests the subscribe to topic flow. +// We emulate a request message from a client, and a response message from a controller. +func (s *WsControllerSuite) TestSubscribeRequest() { + s.T().Run("Happy path", func(t *testing.T) { + t.Parallel() + + conn, dataProviderFactory, dataProvider := newControllerMocks(t) + controller := NewWebSocketController(s.logger, s.wsConfig, conn, dataProviderFactory) + + dataProviderFactory. + On("NewDataProvider", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything). + Return(dataProvider, nil). + Once() + + done := make(chan struct{}) + // data provider might finish on its own or controller will close it via Close() + dataProvider.On("Close").Return(nil).Maybe() + dataProvider. + On("Run", mock.Anything). + Run(func(args mock.Arguments) { + <-done + }). + Return(nil). + Once() + + request := models.SubscribeMessageRequest{ + BaseMessageRequest: models.BaseMessageRequest{ + SubscriptionID: "dummy-id", + Action: models.SubscribeAction, + }, + Topic: dp.BlocksTopic, + Arguments: nil, + } + requestJson, err := json.Marshal(request) + require.NoError(t, err) + + // Simulate receiving the subscription request from the client + conn. + On("ReadJSON", mock.Anything). + Run(func(args mock.Arguments) { + msg, ok := args.Get(0).(*json.RawMessage) + require.True(t, ok) + *msg = requestJson + }). + Return(nil). + Once() + + conn. + On("WriteJSON", mock.Anything). + Return(func(msg interface{}) error { + defer close(done) + + response, ok := msg.(models.SubscribeMessageResponse) + require.True(t, ok) + require.Equal(t, request.SubscriptionID, response.SubscriptionID) + + return &websocket.CloseError{Code: websocket.CloseNormalClosure} + }) + + s.expectCloseConnection(conn, done) + controller.HandleConnection(context.Background()) + + conn.AssertExpectations(t) + dataProviderFactory.AssertExpectations(t) + dataProvider.AssertExpectations(t) + }) + + s.T().Run("Validate message error", func(t *testing.T) { + t.Parallel() + + conn, dataProviderFactory, _ := newControllerMocks(t) + controller := NewWebSocketController(s.logger, s.wsConfig, conn, dataProviderFactory) + + type Request struct { + Action string `json:"action"` + } + + subscribeRequest := Request{ + Action: "SubscribeBlocks", + } + subscribeRequestJson, err := json.Marshal(subscribeRequest) + require.NoError(t, err) + + // Simulate receiving the subscription request from the client + conn. + On("ReadJSON", mock.Anything). + Run(func(args mock.Arguments) { + msg, ok := args.Get(0).(*json.RawMessage) + require.True(t, ok) + *msg = subscribeRequestJson + }). + Return(nil). + Once() + + done := make(chan struct{}) + conn. + On("WriteJSON", mock.Anything). + Return(func(msg interface{}) error { + defer close(done) + + response, ok := msg.(models.BaseMessageResponse) + require.True(t, ok) + require.NotEmpty(t, response.Error) + require.Equal(t, http.StatusBadRequest, response.Error.Code) + require.Equal(t, "", response.Action) + return &websocket.CloseError{Code: websocket.CloseNormalClosure} + }) + + s.expectCloseConnection(conn, done) + + controller.HandleConnection(context.Background()) + + conn.AssertExpectations(t) + dataProviderFactory.AssertExpectations(t) + }) + + s.T().Run("Error creating data provider", func(t *testing.T) { + t.Parallel() + + conn, dataProviderFactory, _ := newControllerMocks(t) + controller := NewWebSocketController(s.logger, s.wsConfig, conn, dataProviderFactory) + + dataProviderFactory. + On("NewDataProvider", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything). + Return(nil, fmt.Errorf("error creating data provider")). + Once() + + done := make(chan struct{}) + subscriptionID := "dummy-id" + s.expectSubscribeRequest(t, conn, subscriptionID) + + conn. + On("WriteJSON", mock.Anything). + Return(func(msg interface{}) error { + defer close(done) + + response, ok := msg.(models.BaseMessageResponse) + require.True(t, ok) + require.NotEmpty(t, response.Error) + require.Equal(t, http.StatusBadRequest, response.Error.Code) + require.Equal(t, models.SubscribeAction, response.Action) + + return &websocket.CloseError{Code: websocket.CloseNormalClosure} + }) + + s.expectCloseConnection(conn, done) + + controller.HandleConnection(context.Background()) + + conn.AssertExpectations(t) + dataProviderFactory.AssertExpectations(t) + }) + + s.T().Run("Provider execution error", func(t *testing.T) { + t.Parallel() + + conn, dataProviderFactory, dataProvider := newControllerMocks(t) + controller := NewWebSocketController(s.logger, s.wsConfig, conn, dataProviderFactory) + + // data provider might finish on its own or controller will close it via Close() + dataProvider.On("Close").Return(nil).Maybe() + dataProvider. + On("Run", mock.Anything). + Run(func(args mock.Arguments) {}). + Return(fmt.Errorf("error running data provider")). + Once() + + dataProviderFactory. + On("NewDataProvider", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything). + Return(dataProvider, nil). + Once() + + done := make(chan struct{}) + subscriptionID := "dummy-id" + s.expectSubscribeRequest(t, conn, subscriptionID) + s.expectSubscribeResponse(t, conn, subscriptionID) + + conn. + On("WriteJSON", mock.Anything). + Return(func(msg interface{}) error { + defer close(done) + + response, ok := msg.(models.BaseMessageResponse) + require.True(t, ok) + require.NotEmpty(t, response.Error) + require.Equal(t, http.StatusInternalServerError, response.Error.Code) + require.Equal(t, models.SubscribeAction, response.Action) + + return &websocket.CloseError{Code: websocket.CloseNormalClosure} + }) + + s.expectCloseConnection(conn, done) + + controller.HandleConnection(context.Background()) + + conn.AssertExpectations(t) + dataProviderFactory.AssertExpectations(t) + dataProvider.AssertExpectations(t) + }) +} + +func (s *WsControllerSuite) TestUnsubscribeRequest() { + s.T().Run("Happy path", func(t *testing.T) { + t.Parallel() + + conn, dataProviderFactory, dataProvider := newControllerMocks(t) + controller := NewWebSocketController(s.logger, s.wsConfig, conn, dataProviderFactory) + + dataProviderFactory. + On("NewDataProvider", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything). + Return(dataProvider, nil). + Once() + + done := make(chan struct{}) + // data provider might finish on its own or controller will close it via Close() + dataProvider.On("Close").Return(nil).Maybe() + dataProvider. + On("Run", mock.Anything). + Run(func(args mock.Arguments) { + <-done + }). + Return(nil). + Once() + + subscriptionID := "dummy-id" + s.expectSubscribeRequest(t, conn, subscriptionID) + s.expectSubscribeResponse(t, conn, subscriptionID) + + request := models.UnsubscribeMessageRequest{ + BaseMessageRequest: models.BaseMessageRequest{ + SubscriptionID: subscriptionID, + Action: models.UnsubscribeAction, + }, + } + requestJson, err := json.Marshal(request) + require.NoError(t, err) + + conn. + On("ReadJSON", mock.Anything). + Run(func(args mock.Arguments) { + msg, ok := args.Get(0).(*json.RawMessage) + require.True(t, ok) + *msg = requestJson + }). + Return(nil). + Once() + + conn. + On("WriteJSON", mock.Anything). + Return(func(msg interface{}) error { + defer close(done) + + response, ok := msg.(models.UnsubscribeMessageResponse) + require.True(t, ok) + require.Empty(t, response.Error) + require.Equal(t, request.SubscriptionID, response.SubscriptionID) + + return &websocket.CloseError{Code: websocket.CloseNormalClosure} + }). + Once() + + s.expectCloseConnection(conn, done) + + controller.HandleConnection(context.Background()) + + conn.AssertExpectations(t) + dataProviderFactory.AssertExpectations(t) + dataProvider.AssertExpectations(t) + }) + + s.T().Run("Invalid subscription uuid", func(t *testing.T) { + t.Parallel() + + conn, dataProviderFactory, dataProvider := newControllerMocks(t) + controller := NewWebSocketController(s.logger, s.wsConfig, conn, dataProviderFactory) + + dataProviderFactory. + On("NewDataProvider", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything). + Return(dataProvider, nil). + Once() + + done := make(chan struct{}) + // data provider might finish on its own or controller will close it via Close() + dataProvider.On("Close").Return(nil).Maybe() + dataProvider. + On("Run", mock.Anything). + Run(func(args mock.Arguments) { + <-done + }). + Return(nil). + Once() + + subscriptionID := "dummy-id" + s.expectSubscribeRequest(t, conn, subscriptionID) + s.expectSubscribeResponse(t, conn, subscriptionID) + + request := models.UnsubscribeMessageRequest{ + BaseMessageRequest: models.BaseMessageRequest{ + SubscriptionID: uuid.New().String() + " .42", // invalid subscription ID + Action: models.UnsubscribeAction, + }, + } + requestJson, err := json.Marshal(request) + require.NoError(t, err) + + conn. + On("ReadJSON", mock.Anything). + Run(func(args mock.Arguments) { + msg, ok := args.Get(0).(*json.RawMessage) + require.True(t, ok) + *msg = requestJson + }). + Return(nil). + Once() + + conn. + On("WriteJSON", mock.Anything). + Return(func(msg interface{}) error { + defer close(done) + + response, ok := msg.(models.BaseMessageResponse) + require.True(t, ok) + require.NotEmpty(t, response.Error) + require.Equal(t, request.SubscriptionID, response.SubscriptionID) + require.Equal(t, http.StatusBadRequest, response.Error.Code) + require.Equal(t, models.UnsubscribeAction, response.Action) + + return &websocket.CloseError{Code: websocket.CloseNormalClosure} + }). + Once() + + s.expectCloseConnection(conn, done) + + controller.HandleConnection(context.Background()) + + conn.AssertExpectations(t) + dataProviderFactory.AssertExpectations(t) + dataProvider.AssertExpectations(t) + }) + + s.T().Run("Unsubscribe from unknown subscription", func(t *testing.T) { + t.Parallel() + + conn, dataProviderFactory, dataProvider := newControllerMocks(t) + controller := NewWebSocketController(s.logger, s.wsConfig, conn, dataProviderFactory) + + dataProviderFactory. + On("NewDataProvider", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything). + Return(dataProvider, nil). + Once() + + done := make(chan struct{}) + // data provider might finish on its own or controller will close it via Close() + dataProvider.On("Close").Return(nil).Maybe() + dataProvider. + On("Run", mock.Anything). + Run(func(args mock.Arguments) { + <-done + }). + Return(nil). + Once() + + subscriptionID := "dummy-id" + s.expectSubscribeRequest(t, conn, subscriptionID) + s.expectSubscribeResponse(t, conn, subscriptionID) + + request := models.UnsubscribeMessageRequest{ + BaseMessageRequest: models.BaseMessageRequest{ + SubscriptionID: "unknown-sub-id", + Action: models.UnsubscribeAction, + }, + } + requestJson, err := json.Marshal(request) + require.NoError(t, err) + + conn. + On("ReadJSON", mock.Anything). + Run(func(args mock.Arguments) { + msg, ok := args.Get(0).(*json.RawMessage) + require.True(t, ok) + *msg = requestJson + }). + Return(nil). + Once() + + conn. + On("WriteJSON", mock.Anything). + Return(func(msg interface{}) error { + defer close(done) + + response, ok := msg.(models.BaseMessageResponse) + require.True(t, ok) + require.Equal(t, request.SubscriptionID, response.SubscriptionID) + + require.NotEmpty(t, response.Error) + require.Equal(t, http.StatusNotFound, response.Error.Code) + + require.Equal(t, models.UnsubscribeAction, response.Action) + + return &websocket.CloseError{Code: websocket.CloseNormalClosure} + }). + Once() + + s.expectCloseConnection(conn, done) + + controller.HandleConnection(context.Background()) + + conn.AssertExpectations(t) + dataProviderFactory.AssertExpectations(t) + dataProvider.AssertExpectations(t) + }) +} + +func (s *WsControllerSuite) TestListSubscriptions() { + s.T().Run("Happy path", func(t *testing.T) { + + conn, dataProviderFactory, dataProvider := newControllerMocks(t) + controller := NewWebSocketController(s.logger, s.wsConfig, conn, dataProviderFactory) + + dataProviderFactory. + On("NewDataProvider", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything). + Return(dataProvider, nil). + Once() + + done := make(chan struct{}) + + topic := dp.BlocksTopic + arguments := models.Arguments{ + "start_block_id": unittest.IdentifierFixture().String(), + "block_status": parser.Finalized, + } + dataProvider.On("Topic").Return(topic) + dataProvider.On("Arguments").Return(arguments) + // data provider might finish on its own or controller will close it via Close() + dataProvider.On("Close").Return(nil).Maybe() + dataProvider. + On("Run", mock.Anything). + Run(func(args mock.Arguments) { + <-done + }). + Return(nil). + Once() + + subscriptionID := "dummy-id" + s.expectSubscribeRequest(t, conn, subscriptionID) + s.expectSubscribeResponse(t, conn, subscriptionID) + + request := models.ListSubscriptionsMessageRequest{ + BaseMessageRequest: models.BaseMessageRequest{ + SubscriptionID: "", + Action: models.ListSubscriptionsAction, + }, + } + requestJson, err := json.Marshal(request) + require.NoError(t, err) + + conn. + On("ReadJSON", mock.Anything). + Run(func(args mock.Arguments) { + msg, ok := args.Get(0).(*json.RawMessage) + require.True(t, ok) + *msg = requestJson + }). + Return(nil). + Once() + + conn. + On("WriteJSON", mock.Anything). + Return(func(msg interface{}) error { + defer close(done) + + response, ok := msg.(models.ListSubscriptionsMessageResponse) + require.True(t, ok) + require.Equal(t, 1, len(response.Subscriptions)) + require.Equal(t, subscriptionID, response.Subscriptions[0].SubscriptionID) + require.Equal(t, topic, response.Subscriptions[0].Topic) + require.Equal(t, arguments, response.Subscriptions[0].Arguments) + require.Equal(t, models.ListSubscriptionsAction, response.Action) + + return &websocket.CloseError{Code: websocket.CloseNormalClosure} + }). + Once() + + s.expectCloseConnection(conn, done) + + controller.HandleConnection(context.Background()) + + conn.AssertExpectations(t) + dataProviderFactory.AssertExpectations(t) + dataProvider.AssertExpectations(t) + }) +} + +// TestSubscribeBlocks tests the functionality for streaming blocks to a subscriber. +func (s *WsControllerSuite) TestSubscribeBlocks() { + s.T().Run("Stream one block", func(t *testing.T) { + t.Parallel() + + conn, dataProviderFactory, dataProvider := newControllerMocks(t) + controller := NewWebSocketController(s.logger, s.wsConfig, conn, dataProviderFactory) + + dataProviderFactory. + On("NewDataProvider", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything). + Return(dataProvider, nil). + Once() + + // data provider might finish on its own or controller will close it via Close() + dataProvider.On("Close").Return(nil).Maybe() + + // Simulate data provider write a block to the controller + expectedBlock := *unittest.BlockFixture() + dataProvider. + On("Run", mock.Anything). + Run(func(args mock.Arguments) { + controller.multiplexedStream <- expectedBlock + }). + Return(nil). + Once() + + done := make(chan struct{}) + subscriptionID := "dummy-id" + s.expectSubscribeRequest(t, conn, subscriptionID) + s.expectSubscribeResponse(t, conn, subscriptionID) + + // Expect a valid block to be passed to WriteJSON. + // If we got to this point, the controller executed all its logic properly + var actualBlock flow.Block + conn. + On("WriteJSON", mock.Anything). + Return(func(msg interface{}) error { + defer close(done) + + block, ok := msg.(flow.Block) + require.True(t, ok) + actualBlock = block + require.Equal(t, expectedBlock, actualBlock) + + return &websocket.CloseError{Code: websocket.CloseNormalClosure} + }) + + s.expectCloseConnection(conn, done) + + controller.HandleConnection(context.Background()) + + conn.AssertExpectations(t) + dataProviderFactory.AssertExpectations(t) + dataProvider.AssertExpectations(t) + }) + + s.T().Run("Stream many blocks", func(t *testing.T) { + t.Parallel() + + conn, dataProviderFactory, dataProvider := newControllerMocks(t) + controller := NewWebSocketController(s.logger, s.wsConfig, conn, dataProviderFactory) + + dataProviderFactory. + On("NewDataProvider", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything). + Return(dataProvider, nil). + Once() + + // data provider might finish on its own or controller will close it via Close() + dataProvider.On("Close").Return(nil).Maybe() + + // Simulate data provider writes some blocks to the controller + expectedBlocks := unittest.BlockFixtures(100) + dataProvider. + On("Run", mock.Anything). + Run(func(args mock.Arguments) { + for _, block := range expectedBlocks { + controller.multiplexedStream <- *block + } + }). + Return(nil). + Once() + + done := make(chan struct{}) + subscriptionID := "dummy-id" + s.expectSubscribeRequest(t, conn, subscriptionID) + s.expectSubscribeResponse(t, conn, subscriptionID) + + i := 0 + actualBlocks := make([]*flow.Block, len(expectedBlocks)) + + // Expect valid blocks to be passed to WriteJSON. + // If we got to this point, the controller executed all its logic properly + conn. + On("WriteJSON", mock.Anything). + Return(func(msg interface{}) error { + block, ok := msg.(flow.Block) + require.True(t, ok) + + actualBlocks[i] = &block + i += 1 + + if i == len(expectedBlocks) { + require.Equal(t, expectedBlocks, actualBlocks) + close(done) + return &websocket.CloseError{Code: websocket.CloseNormalClosure} + } + + return nil + }). + Times(len(expectedBlocks)) + + s.expectCloseConnection(conn, done) + + controller.HandleConnection(context.Background()) + + conn.AssertExpectations(t) + dataProviderFactory.AssertExpectations(t) + dataProvider.AssertExpectations(t) + }) +} + +// TestRateLimiter tests the rate-limiting functionality of the WebSocket controller. +// +// Test Steps: +// 1. Create a mock WebSocket connection with behavior for `SetWriteDeadline` and `WriteJSON`. +// 2. Configure the WebSocket controller with a rate limit of 2 responses per second. +// 3. Simulate sending messages to the `multiplexedStream` channel. +// 4. Collect timestamps of message writes to verify rate-limiting behavior. +// 5. Assert that all messages are processed and that the delay between messages respects the configured rate limit. +// +// The test ensures that: +// - The number of messages processed matches the total messages sent. +// - The delay between consecutive messages falls within the expected range based on the rate limit, with a tolerance of 5ms. +func (s *WsControllerSuite) TestRateLimiter() { + t := s.T() + totalMessages := 5 // Number of messages to simulate. + + // Step 1: Create a mock WebSocket connection. + conn := connmock.NewWebsocketConnection(t) + conn.On("SetWriteDeadline", mock.Anything).Return(nil).Times(totalMessages) + + // Step 2: Configure the WebSocket controller with a rate limit. + config := NewDefaultWebsocketConfig() + config.MaxResponsesPerSecond = 2 + + controller := NewWebSocketController(s.logger, config, conn, nil) + + // Step 3: Simulate sending messages to the controller's `multiplexedStream`. + go func() { + for i := 0; i < totalMessages; i++ { + controller.multiplexedStream <- map[string]interface{}{ + "message": i, + } + } + close(controller.multiplexedStream) + }() + + // Step 4: Collect timestamps of message writes for verification. + var timestamps []time.Time + msgCounter := 0 + conn.On("WriteJSON", mock.Anything).Run(func(args mock.Arguments) { + timestamps = append(timestamps, time.Now()) + + // Extract the actual written message + actualMessage := args.Get(0).(map[string]interface{}) + expectedMessage := map[string]interface{}{"message": msgCounter} + msgCounter++ + + assert.Equal(t, expectedMessage, actualMessage, "Received message does not match the expected message") + }).Return(nil).Times(totalMessages) + + // Invoke the `writeMessages` method to process the stream. + _ = controller.writeMessages(context.Background()) + + // Step 5: Verify that all messages are processed. + require.Len(t, timestamps, totalMessages, "All messages should be processed") + + // Calculate the expected delay between messages based on the rate limit. + expectedDelay := time.Second / time.Duration(config.MaxResponsesPerSecond) + const tolerance = float64(5 * time.Millisecond) // Allow up to 5ms deviation. + + // Step 6: Assert that the delays respect the rate limit with tolerance. + for i := 1; i < len(timestamps); i++ { + delay := timestamps[i].Sub(timestamps[i-1]) + assert.InDelta(t, expectedDelay, delay, tolerance, "Messages should respect the rate limit") + } +} + +// TestConfigureKeepaliveConnection ensures that the WebSocket connection is configured correctly. +func (s *WsControllerSuite) TestConfigureKeepaliveConnection() { + s.T().Run("Happy path", func(t *testing.T) { + conn := connmock.NewWebsocketConnection(t) + conn.On("SetPongHandler", mock.AnythingOfType("func(string) error")).Return(nil).Once() + conn.On("SetReadDeadline", mock.Anything).Return(nil) + + factory := dpmock.NewDataProviderFactory(t) + controller := NewWebSocketController(s.logger, s.wsConfig, conn, factory) + + err := controller.configureKeepalive() + s.Require().NoError(err, "configureKeepalive should not return an error") + + conn.AssertExpectations(t) + }) +} + +func (s *WsControllerSuite) TestControllerShutdown() { + s.T().Run("Keepalive routine initiated shutdown", func(t *testing.T) { + t.Parallel() + + conn := connmock.NewWebsocketConnection(t) + conn.On("Close").Return(nil).Once() + conn.On("SetReadDeadline", mock.Anything).Return(nil).Once() + conn.On("SetPongHandler", mock.AnythingOfType("func(string) error")).Return(nil).Once() + + factory := dpmock.NewDataProviderFactory(t) + controller := NewWebSocketController(s.logger, s.wsConfig, conn, factory) + + // Mock keepalive to return an error + done := make(chan struct{}, 1) + conn. + On("WriteControl", websocket.PingMessage, mock.Anything). + Return(func(int, time.Time) error { + close(done) + return assert.AnError + }). + Once() + + conn. + On("ReadJSON", mock.Anything). + Return(func(interface{}) error { + <-done + return &websocket.CloseError{Code: websocket.CloseNormalClosure} + }). + Once() + + controller.HandleConnection(context.Background()) + conn.AssertExpectations(t) + }) + + // TODO: we should test a case when the read routine fails with an arbitrary error (assert.NoError) + s.T().Run("Read routine initiated shutdown", func(t *testing.T) { + t.Parallel() + + conn := connmock.NewWebsocketConnection(t) + conn.On("Close").Return(nil).Once() + conn.On("SetReadDeadline", mock.Anything).Return(nil).Once() + conn.On("SetPongHandler", mock.AnythingOfType("func(string) error")).Return(nil).Once() + + factory := dpmock.NewDataProviderFactory(t) + controller := NewWebSocketController(s.logger, s.wsConfig, conn, factory) + + conn. + On("ReadJSON", mock.Anything). + Return(func(_ interface{}) error { + return &websocket.CloseError{Code: websocket.CloseNormalClosure} + }). + Once() + + controller.HandleConnection(context.Background()) + conn.AssertExpectations(t) + }) + + s.T().Run("Write routine failed", func(t *testing.T) { + t.Parallel() + + conn, dataProviderFactory, dataProvider := newControllerMocks(t) + controller := NewWebSocketController(s.logger, s.wsConfig, conn, dataProviderFactory) + + dataProviderFactory. + On("NewDataProvider", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything). + Return(dataProvider, nil). + Once() + + // data provider might finish on its own or controller will close it via Close() + dataProvider.On("Close").Return(nil).Maybe() + + dataProvider. + On("Run", mock.Anything). + Run(func(args mock.Arguments) { + controller.multiplexedStream <- unittest.BlockFixture() + }). + Return(nil). + Once() + + done := make(chan struct{}) + subscriptionID := "dummy-id" + s.expectSubscribeRequest(t, conn, subscriptionID) + s.expectSubscribeResponse(t, conn, subscriptionID) + + conn. + On("WriteJSON", mock.Anything). + Return(func(msg interface{}) error { + close(done) + return assert.AnError + }) + + s.expectCloseConnection(conn, done) + + controller.HandleConnection(context.Background()) + + // Ensure all expectations are met + conn.AssertExpectations(t) + dataProviderFactory.AssertExpectations(t) + dataProvider.AssertExpectations(t) + }) + + s.T().Run("Context cancelled", func(t *testing.T) { + t.Parallel() + + conn := connmock.NewWebsocketConnection(t) + conn.On("Close").Return(nil).Once() + conn.On("SetReadDeadline", mock.Anything).Return(nil).Once() + conn.On("SetPongHandler", mock.AnythingOfType("func(string) error")).Return(nil).Once() + + factory := dpmock.NewDataProviderFactory(t) + controller := NewWebSocketController(s.logger, s.wsConfig, conn, factory) + + ctx, cancel := context.WithCancel(context.Background()) + + cancel() + controller.HandleConnection(ctx) + + conn.AssertExpectations(t) + }) + + s.T().Run("Inactivity tracking", func(t *testing.T) { + t.Parallel() + + conn := connmock.NewWebsocketConnection(t) + conn.On("Close").Return(nil).Once() + conn.On("SetReadDeadline", mock.Anything).Return(nil).Once() + conn.On("SetPongHandler", mock.AnythingOfType("func(string) error")).Return(nil).Once() + + factory := dpmock.NewDataProviderFactory(t) + // Mock with short inactivity timeout for testing + wsConfig := s.wsConfig + + wsConfig.InactivityTimeout = 50 * time.Millisecond + controller := NewWebSocketController(s.logger, wsConfig, conn, factory) + + conn. + On("ReadJSON", mock.Anything). + Return(func(interface{}) error { + // make sure the reader routine sleeps for more time than InactivityTimeout + inactivity ticker period. + // meanwhile, the writer routine must shut down the controller. + <-time.After(wsConfig.InactivityTimeout + controller.inactivityTickerPeriod()*2) + return &websocket.CloseError{Code: websocket.CloseNormalClosure} + }). + Once() + + controller.HandleConnection(context.Background()) + + conn.AssertExpectations(t) + }) +} + +func (s *WsControllerSuite) TestKeepaliveRoutine() { + keepaliveConfig := KeepaliveConfig{ + PingPeriod: time.Microsecond, + PongWait: 2 * time.Microsecond, + } + + s.T().Run("Successfully pings connection n times", func(t *testing.T) { + conn := connmock.NewWebsocketConnection(t) + conn.On("Close").Return(nil).Once() + conn.On("SetPongHandler", mock.AnythingOfType("func(string) error")).Return(nil).Once() + conn.On("SetReadDeadline", mock.Anything).Return(nil) + + done := make(chan struct{}) + i := 0 + expectedCalls := 2 + conn. + On("WriteControl", websocket.PingMessage, mock.Anything). + Return(func(int, time.Time) error { + if i == expectedCalls { + close(done) + return &websocket.CloseError{Code: websocket.CloseNormalClosure} + } + + i += 1 + return nil + }). + Times(expectedCalls + 1) + + conn.On("ReadJSON", mock.Anything).Return(func(_ interface{}) error { + <-done + return &websocket.CloseError{Code: websocket.CloseNormalClosure} + }) + + factory := dpmock.NewDataProviderFactory(t) + controller := NewWebSocketController(s.logger, s.wsConfig, conn, factory) + controller.keepaliveConfig = keepaliveConfig + + controller.HandleConnection(context.Background()) + }) + + s.T().Run("Error on write to closed connection", func(t *testing.T) { + conn := connmock.NewWebsocketConnection(t) + expectedError := &websocket.CloseError{Code: websocket.CloseNormalClosure} + conn. + On("WriteControl", websocket.PingMessage, mock.Anything). + Return(expectedError). + Once() + + factory := dpmock.NewDataProviderFactory(t) + controller := NewWebSocketController(s.logger, s.wsConfig, conn, factory) + controller.keepaliveConfig = keepaliveConfig + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + err := controller.keepalive(ctx) + s.Require().Error(err) + s.Require().ErrorIs(expectedError, err) + }) + + s.T().Run("Error on write to open connection", func(t *testing.T) { + conn := connmock.NewWebsocketConnection(t) + conn. + On("WriteControl", websocket.PingMessage, mock.Anything). + Return(assert.AnError). + Once() + + factory := dpmock.NewDataProviderFactory(t) + controller := NewWebSocketController(s.logger, s.wsConfig, conn, factory) + controller.keepaliveConfig = keepaliveConfig + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + err := controller.keepalive(ctx) + s.Require().Error(err) + s.Require().ErrorContains(err, "error sending ping") + }) + + s.T().Run("Context cancelled", func(t *testing.T) { + conn := connmock.NewWebsocketConnection(t) + factory := dpmock.NewDataProviderFactory(t) + controller := NewWebSocketController(s.logger, s.wsConfig, conn, factory) + controller.keepaliveConfig = keepaliveConfig + + ctx, cancel := context.WithCancel(context.Background()) + cancel() // Immediately cancel the context + + // Start the keepalive process with the context canceled + err := controller.keepalive(ctx) + s.Require().NoError(err) + }) +} + +// newControllerMocks initializes mock WebSocket connection, data provider, and data provider factory +func newControllerMocks(t *testing.T) (*connmock.WebsocketConnection, *dpmock.DataProviderFactory, *dpmock.DataProvider) { + conn := connmock.NewWebsocketConnection(t) + conn.On("Close").Return(nil).Once() + conn.On("SetPongHandler", mock.AnythingOfType("func(string) error")).Return(nil).Once() + conn.On("SetReadDeadline", mock.Anything).Return(nil) + conn.On("SetWriteDeadline", mock.Anything).Return(nil) + + dataProvider := dpmock.NewDataProvider(t) + factory := dpmock.NewDataProviderFactory(t) + + return conn, factory, dataProvider +} + +// expectSubscribeRequest mocks the client's subscription request. +func (s *WsControllerSuite) expectSubscribeRequest(t *testing.T, conn *connmock.WebsocketConnection, subscriptionID string) { + request := models.SubscribeMessageRequest{ + BaseMessageRequest: models.BaseMessageRequest{ + SubscriptionID: subscriptionID, + Action: models.SubscribeAction, + }, + Topic: dp.BlocksTopic, + } + requestJson, err := json.Marshal(request) + require.NoError(t, err) + + // The very first message from a client is a request to subscribe to some topic + conn. + On("ReadJSON", mock.Anything). + Run(func(args mock.Arguments) { + msg, ok := args.Get(0).(*json.RawMessage) + require.True(t, ok) + *msg = requestJson + }). + Return(nil). + Once() +} + +// expectSubscribeResponse mocks the subscription response sent to the client. +func (s *WsControllerSuite) expectSubscribeResponse(t *testing.T, conn *connmock.WebsocketConnection, subscriptionID string) { + conn. + On("WriteJSON", mock.Anything). + Run(func(args mock.Arguments) { + response, ok := args.Get(0).(models.SubscribeMessageResponse) + require.True(t, ok) + require.Equal(t, subscriptionID, response.SubscriptionID) + }). + Return(nil). + Once() +} + +func (s *WsControllerSuite) expectCloseConnection(conn *connmock.WebsocketConnection, done <-chan struct{}) { + // In the default case, no further communication is expected from the client. + // We wait for the writer routine to signal completion, allowing us to close the connection gracefully + // This call is optional because it is not needed in cases where readMessages exits promptly when the context is canceled. + conn. + On("ReadJSON", mock.Anything). + Return(func(msg interface{}) error { + <-done + return &websocket.CloseError{Code: websocket.CloseNormalClosure} + }). + Maybe() + + s.expectKeepaliveRoutineShutdown(conn, done) +} + +func (s *WsControllerSuite) expectKeepaliveRoutineShutdown(conn *connmock.WebsocketConnection, done <-chan struct{}) { + // We use Maybe() because a test may finish faster than keepalive routine trigger WriteControl + conn. + On("WriteControl", websocket.PingMessage, mock.Anything). + Return(func(int, time.Time) error { + select { + case <-done: + return &websocket.CloseError{Code: websocket.CloseNormalClosure} + default: + return nil + } + }). + Maybe() +} diff --git a/engine/access/rest/websockets/data_providers/account_statuses_provider.go b/engine/access/rest/websockets/data_providers/account_statuses_provider.go new file mode 100644 index 00000000000..9ad67d9ad52 --- /dev/null +++ b/engine/access/rest/websockets/data_providers/account_statuses_provider.go @@ -0,0 +1,225 @@ +package data_providers + +import ( + "context" + "fmt" + + "github.com/rs/zerolog" + + "github.com/onflow/flow-go/engine/access/rest/http/request" + "github.com/onflow/flow-go/engine/access/rest/websockets/data_providers/models" + wsmodels "github.com/onflow/flow-go/engine/access/rest/websockets/models" + "github.com/onflow/flow-go/engine/access/state_stream" + "github.com/onflow/flow-go/engine/access/state_stream/backend" + "github.com/onflow/flow-go/engine/access/subscription" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module/counters" +) + +// accountStatusesArguments contains the arguments required for subscribing to account statuses +type accountStatusesArguments struct { + StartBlockID flow.Identifier // ID of the block to start subscription from + StartBlockHeight uint64 // Height of the block to start subscription from + Filter state_stream.AccountStatusFilter // Filter applied to events for a given subscription + HeartbeatInterval uint64 // Maximum number of blocks message won't be sent +} + +type AccountStatusesDataProvider struct { + *baseDataProvider + + arguments accountStatusesArguments + messageIndex counters.StrictMonotonicCounter + blocksSinceLastMessage uint64 + stateStreamApi state_stream.API +} + +var _ DataProvider = (*AccountStatusesDataProvider)(nil) + +// NewAccountStatusesDataProvider creates a new instance of AccountStatusesDataProvider. +func NewAccountStatusesDataProvider( + ctx context.Context, + logger zerolog.Logger, + stateStreamApi state_stream.API, + subscriptionID string, + topic string, + rawArguments wsmodels.Arguments, + send chan<- interface{}, + chain flow.Chain, + eventFilterConfig state_stream.EventFilterConfig, + defaultHeartbeatInterval uint64, +) (*AccountStatusesDataProvider, error) { + if stateStreamApi == nil { + return nil, fmt.Errorf("this access node does not support streaming account statuses") + } + + args, err := parseAccountStatusesArguments(rawArguments, chain, eventFilterConfig, defaultHeartbeatInterval) + if err != nil { + return nil, fmt.Errorf("invalid arguments for account statuses data provider: %w", err) + } + + provider := newBaseDataProvider( + ctx, + logger.With().Str("component", "account-statuses-data-provider").Logger(), + nil, + subscriptionID, + topic, + rawArguments, + send, + ) + + return &AccountStatusesDataProvider{ + baseDataProvider: provider, + arguments: args, + messageIndex: counters.NewMonotonicCounter(0), + blocksSinceLastMessage: 0, + stateStreamApi: stateStreamApi, + }, nil +} + +// Run starts processing the subscription for events and handles responses. +// Must be called once. +// +// No errors expected during normal operations. +func (p *AccountStatusesDataProvider) Run() error { + return run( + p.createAndStartSubscription(p.ctx, p.arguments), + p.handleResponse, + ) +} + +// handleResponse processes the response from the subscription and sends it to the client's channel. +// As part of the processing, it converts the event payloads from CCF to JSON-CDC format. +// This function is not expected to be called concurrently. +// +// No errors expected during normal operations. +func (p *AccountStatusesDataProvider) handleResponse(response *backend.AccountStatusesResponse) error { + // convert events to JSON-CDC format + convertedResponse, err := convertAccountStatusesResponse(response) + if err != nil { + return fmt.Errorf("failed to convert account status events to JSON-CDC format: %w", err) + } + + return p.sendResponse(convertedResponse) +} + +// sendResponse processes an account statuses message and sends it to data provider's channel. +// This function is not safe to call concurrently. +// +// No errors are expected during normal operations +func (p *AccountStatusesDataProvider) sendResponse(response *backend.AccountStatusesResponse) error { + // Only send a response if there's meaningful data to send + // or the heartbeat interval limit is reached + p.blocksSinceLastMessage += 1 + accountEmittedEvents := len(response.AccountEvents) != 0 + reachedHeartbeatLimit := p.blocksSinceLastMessage >= p.arguments.HeartbeatInterval + if !accountEmittedEvents && !reachedHeartbeatLimit { + return nil + } + + accountStatusesPayload := models.NewAccountStatusesResponse(response, p.messageIndex.Value()) + resp := models.BaseDataProvidersResponse{ + SubscriptionID: p.ID(), + Topic: p.Topic(), + Payload: accountStatusesPayload, + } + p.send <- &resp + + p.blocksSinceLastMessage = 0 + p.messageIndex.Increment() + + return nil +} + +// createAndStartSubscription creates a new subscription using the specified input arguments. +func (p *AccountStatusesDataProvider) createAndStartSubscription( + ctx context.Context, + args accountStatusesArguments, +) subscription.Subscription { + if args.StartBlockID != flow.ZeroID { + return p.stateStreamApi.SubscribeAccountStatusesFromStartBlockID(ctx, args.StartBlockID, args.Filter) + } + + if args.StartBlockHeight != request.EmptyHeight { + return p.stateStreamApi.SubscribeAccountStatusesFromStartHeight(ctx, args.StartBlockHeight, args.Filter) + } + + return p.stateStreamApi.SubscribeAccountStatusesFromLatestBlock(ctx, args.Filter) +} + +// convertAccountStatusesResponse converts events in the provided AccountStatusesResponse from CCF +// to JSON-CDC format. +// +// No errors expected during normal operations. +func convertAccountStatusesResponse(resp *backend.AccountStatusesResponse) (*backend.AccountStatusesResponse, error) { + jsoncdcEvents := make(map[string]flow.EventsList, len(resp.AccountEvents)) + for eventType, events := range resp.AccountEvents { + convertedEvents, err := convertEvents(events) + if err != nil { + return nil, fmt.Errorf("failed to convert %s events to JSON-CDC: %w", eventType, err) + } + jsoncdcEvents[eventType] = convertedEvents + } + + return &backend.AccountStatusesResponse{ + BlockID: resp.BlockID, + Height: resp.Height, + AccountEvents: jsoncdcEvents, + }, nil +} + +// parseAccountStatusesArguments validates and initializes the account statuses arguments. +func parseAccountStatusesArguments( + arguments wsmodels.Arguments, + chain flow.Chain, + eventFilterConfig state_stream.EventFilterConfig, + defaultHeartbeatInterval uint64, +) (accountStatusesArguments, error) { + allowedFields := map[string]struct{}{ + "start_block_id": {}, + "start_block_height": {}, + "event_types": {}, + "account_addresses": {}, + "heartbeat_interval": {}, + } + err := ensureAllowedFields(arguments, allowedFields) + if err != nil { + return accountStatusesArguments{}, err + } + + var args accountStatusesArguments + + // Parse block arguments + startBlockID, startBlockHeight, err := parseStartBlock(arguments) + if err != nil { + return args, err + } + args.StartBlockID = startBlockID + args.StartBlockHeight = startBlockHeight + + // Parse 'heartbeat_interval' argument + heartbeatInterval, err := extractHeartbeatInterval(arguments, defaultHeartbeatInterval) + if err != nil { + return accountStatusesArguments{}, err + } + args.HeartbeatInterval = heartbeatInterval + + // Parse 'event_types' as a JSON array + eventTypes, err := extractArrayOfStrings(arguments, "event_types", false) + if err != nil { + return accountStatusesArguments{}, err + } + + // Parse 'account_addresses' as []string + accountAddresses, err := extractArrayOfStrings(arguments, "account_addresses", false) + if err != nil { + return accountStatusesArguments{}, err + } + + // Initialize the event filter with the parsed arguments + args.Filter, err = state_stream.NewAccountStatusFilter(eventFilterConfig, chain, eventTypes, accountAddresses) + if err != nil { + return accountStatusesArguments{}, fmt.Errorf("failed to create event filter: %w", err) + } + + return args, nil +} diff --git a/engine/access/rest/websockets/data_providers/account_statuses_provider_test.go b/engine/access/rest/websockets/data_providers/account_statuses_provider_test.go new file mode 100644 index 00000000000..aef1c3e14e1 --- /dev/null +++ b/engine/access/rest/websockets/data_providers/account_statuses_provider_test.go @@ -0,0 +1,404 @@ +package data_providers + +import ( + "context" + "fmt" + "strconv" + "testing" + "time" + + "github.com/onflow/flow/protobuf/go/flow/entities" + "github.com/rs/zerolog" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" + + "github.com/onflow/flow-go/engine/access/rest/websockets/data_providers/models" + wsmodels "github.com/onflow/flow-go/engine/access/rest/websockets/models" + "github.com/onflow/flow-go/engine/access/state_stream" + "github.com/onflow/flow-go/engine/access/state_stream/backend" + ssmock "github.com/onflow/flow-go/engine/access/state_stream/mock" + "github.com/onflow/flow-go/engine/access/subscription" + submock "github.com/onflow/flow-go/engine/access/subscription/mock" + "github.com/onflow/flow-go/engine/common/rpc/convert" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/utils/unittest" +) + +// AccountStatusesProviderSuite is a test suite for testing the account statuses providers functionality. +type AccountStatusesProviderSuite struct { + suite.Suite + + log zerolog.Logger + api *ssmock.API + + chain flow.Chain + rootBlock *flow.Block + finalizedBlock *flow.Header + + factory *DataProviderFactoryImpl +} + +func TestNewAccountStatusesDataProvider(t *testing.T) { + suite.Run(t, new(AccountStatusesProviderSuite)) +} + +func (s *AccountStatusesProviderSuite) SetupTest() { + s.log = unittest.Logger() + s.api = ssmock.NewAPI(s.T()) + s.chain = flow.Testnet.Chain() + s.rootBlock = unittest.Block.Genesis(s.chain.ChainID()) + s.factory = NewDataProviderFactory( + s.log, + s.api, + nil, + s.chain, + state_stream.DefaultEventFilterConfig, + subscription.DefaultHeartbeatInterval, + nil, + ) + s.Require().NotNil(s.factory) +} + +// TestAccountStatusesDataProvider_HappyPath tests the behavior of the account statuses data provider +// when it is configured correctly and operating under normal conditions. It +// validates that events are correctly streamed to the channel and ensures +// no unexpected errors occur. +func (s *AccountStatusesProviderSuite) TestAccountStatusesDataProvider_HappyPath() { + eventGenerator := unittest.NewEventGenerator(unittest.EventGenerator.WithEncoding(entities.EventEncodingVersion_CCF_V0)) + events := []flow.Event{ + eventGenerator.New(), + eventGenerator.New(), + } + + // use account status events + events[0].Type = state_stream.CoreEventAccountCreated + events[1].Type = state_stream.CoreEventAccountKeyAdded + + backendResponses := s.backendAccountStatusesResponses(events) + + testHappyPath( + s.T(), + AccountStatusesTopic, + s.factory, + s.subscribeAccountStatusesDataProviderTestCases(backendResponses), + func(dataChan chan interface{}) { + for i := 0; i < len(backendResponses); i++ { + dataChan <- backendResponses[i] + } + }, + s.requireAccountStatuses, + ) +} + +func (s *AccountStatusesProviderSuite) TestAccountStatusesDataProvider_StateStreamNotConfigured() { + send := make(chan interface{}) + topic := AccountStatusesTopic + + provider, err := NewAccountStatusesDataProvider( + context.Background(), + s.log, + nil, + "dummy-id", + topic, + wsmodels.Arguments{}, + send, + s.chain, + state_stream.DefaultEventFilterConfig, + subscription.DefaultHeartbeatInterval, + ) + s.Require().Error(err) + s.Require().Nil(provider) + s.Require().Contains(err.Error(), "does not support streaming account statuses") +} + +func (s *AccountStatusesProviderSuite) subscribeAccountStatusesDataProviderTestCases( + backendResponses []*backend.AccountStatusesResponse, +) []testType { + expectedResponses := s.expectedAccountStatusesResponses(backendResponses) + + return []testType{ + { + name: "SubscribeAccountStatusesFromStartBlockID happy path", + arguments: wsmodels.Arguments{ + "start_block_id": s.rootBlock.ID().String(), + "event_types": []string{string(flow.EventAccountCreated)}, + "account_addresses": []string{unittest.AddressFixture().String()}, + }, + setupBackend: func(sub *submock.Subscription) { + s.api.On( + "SubscribeAccountStatusesFromStartBlockID", + mock.Anything, + s.rootBlock.ID(), + mock.Anything, + ).Return(sub).Once() + }, + expectedResponses: expectedResponses, + }, + { + name: "SubscribeAccountStatusesFromStartHeight happy path", + arguments: wsmodels.Arguments{ + "start_block_height": strconv.FormatUint(s.rootBlock.Height, 10), + "event_types": []string{string(flow.EventAccountCreated)}, + "account_addresses": []string{unittest.AddressFixture().String()}, + }, + setupBackend: func(sub *submock.Subscription) { + s.api.On( + "SubscribeAccountStatusesFromStartHeight", + mock.Anything, + s.rootBlock.Height, + mock.Anything, + ).Return(sub).Once() + }, + expectedResponses: expectedResponses, + }, + { + name: "SubscribeAccountStatusesFromLatestBlock happy path", + arguments: wsmodels.Arguments{ + "event_types": []string{string(flow.EventAccountCreated)}, + "account_addresses": []string{unittest.AddressFixture().String()}, + }, + setupBackend: func(sub *submock.Subscription) { + s.api.On( + "SubscribeAccountStatusesFromLatestBlock", + mock.Anything, + mock.Anything, + ).Return(sub).Once() + }, + expectedResponses: expectedResponses, + }, + } +} + +// requireAccountStatuses ensures that the received account statuses information matches the expected data. +func (s *AccountStatusesProviderSuite) requireAccountStatuses(actual interface{}, expected interface{}) { + expectedResponse, expectedResponsePayload := extractPayload[*models.AccountStatusesResponse](s.T(), expected) + actualResponse, actualResponsePayload := extractPayload[*models.AccountStatusesResponse](s.T(), actual) + + require.Equal(s.T(), expectedResponsePayload.BlockID, actualResponsePayload.BlockID) + require.Equal(s.T(), len(expectedResponsePayload.AccountEvents), len(actualResponsePayload.AccountEvents)) + require.Equal(s.T(), expectedResponsePayload.MessageIndex, actualResponsePayload.MessageIndex) + require.Equal(s.T(), expectedResponsePayload.Height, actualResponsePayload.Height) + require.Equal(s.T(), expectedResponse.Topic, actualResponse.Topic) + + for key, expectedEvents := range expectedResponsePayload.AccountEvents { + actualEvents, ok := actualResponsePayload.AccountEvents[key] + require.True(s.T(), ok, "Missing key in actual AccountEvents: %s", key) + + s.Require().Equal(expectedEvents, actualEvents, "Mismatch for key: %s", key) + } +} + +// expectedAccountStatusesResponses creates the expected responses for the provided events and backend responses. +func (s *AccountStatusesProviderSuite) expectedAccountStatusesResponses(backendResponses []*backend.AccountStatusesResponse) []interface{} { + expectedResponses := make([]interface{}, len(backendResponses)) + + for i, resp := range backendResponses { + // avoid updating the original response + expected := &backend.AccountStatusesResponse{ + Height: resp.Height, + BlockID: resp.BlockID, + AccountEvents: make(map[string]flow.EventsList, len(resp.AccountEvents)), + } + + // events are provided in CCF format, but we expect all event payloads in JSON-CDC format + for eventType, events := range resp.AccountEvents { + convertedEvents := make([]flow.Event, len(events)) + for j, event := range events { + converted, err := convert.CcfEventToJsonEvent(event) + s.Require().NoError(err) + convertedEvents[j] = *converted + } + expected.AccountEvents[eventType] = convertedEvents + } + + expectedResponsePayload := models.NewAccountStatusesResponse(expected, uint64(i)) + expectedResponses[i] = &models.BaseDataProvidersResponse{ + Topic: AccountStatusesTopic, + Payload: expectedResponsePayload, + } + } + + return expectedResponses +} + +// TestAccountStatusesDataProvider_InvalidArguments tests the behavior of the account statuses data provider +// when invalid arguments are provided. It verifies that appropriate errors are returned +// for missing or conflicting arguments. +func (s *AccountStatusesProviderSuite) TestAccountStatusesDataProvider_InvalidArguments() { + send := make(chan interface{}) + topic := AccountStatusesTopic + + for _, test := range invalidAccountStatusesArgumentsTestCases() { + s.Run(test.name, func() { + provider, err := NewAccountStatusesDataProvider( + context.Background(), + s.log, + s.api, + "dummy-id", + topic, + test.arguments, + send, + s.chain, + state_stream.DefaultEventFilterConfig, + subscription.DefaultHeartbeatInterval, + ) + s.Require().Error(err) + s.Require().Nil(provider) + s.Require().Contains(err.Error(), test.expectedErrorMsg) + }) + } +} + +// TestMessageIndexAccountStatusesProviderResponse_HappyPath tests that MessageIndex values in response are strictly increasing. +func (s *AccountStatusesProviderSuite) TestMessageIndexAccountStatusesProviderResponse_HappyPath() { + send := make(chan interface{}, 10) + topic := AccountStatusesTopic + accountStatusesCount := 4 + + // Create a channel to simulate the subscription's account statuses channel + accountStatusesChan := make(chan interface{}) + + // Create a mock subscription and mock the channel + sub := submock.NewSubscription(s.T()) + sub.On("Channel").Return((<-chan interface{})(accountStatusesChan)) + sub.On("Err").Return(nil).Once() + + s.api.On("SubscribeAccountStatusesFromStartBlockID", mock.Anything, mock.Anything, mock.Anything).Return(sub) + + arguments := + map[string]interface{}{ + "start_block_id": s.rootBlock.ID().String(), + "event_types": []string{string(flow.EventAccountCreated)}, + "account_addresses": []string{unittest.AddressFixture().String()}, + } + + // Create the AccountStatusesDataProvider instance + provider, err := NewAccountStatusesDataProvider( + context.Background(), + s.log, + s.api, + "dummy-id", + topic, + arguments, + send, + s.chain, + state_stream.DefaultEventFilterConfig, + subscription.DefaultHeartbeatInterval, + ) + s.Require().NoError(err) + s.Require().NotNil(provider) + + // Ensure the provider is properly closed after the test + defer provider.Close() + + // Run the provider in a separate goroutine to simulate subscription processing + done := make(chan struct{}) + go func() { + defer close(done) + err = provider.Run() + s.Require().NoError(err) + }() + + // Simulate emitting data to the account statuses channel + go func() { + defer close(accountStatusesChan) // Close the channel when done + + for i := 0; i < accountStatusesCount; i++ { + accountStatusesChan <- &backend.AccountStatusesResponse{} + } + }() + + // Collect responses + var responses []*models.AccountStatusesResponse + for i := 0; i < accountStatusesCount; i++ { + res := <-send + + _, accStatusesResponsePayload := extractPayload[*models.AccountStatusesResponse](s.T(), res) + + responses = append(responses, accStatusesResponsePayload) + } + + // Wait for the provider goroutine to finish + unittest.RequireCloseBefore(s.T(), done, time.Second, "provider failed to stop") + + // Verifying that indices are starting from 0 + s.Require().Equal(uint64(0), responses[0].MessageIndex, "Expected MessageIndex to start with 0") + + // Verifying that indices are strictly increasing + for i := 1; i < len(responses); i++ { + prevIndex := responses[i-1].MessageIndex + currentIndex := responses[i].MessageIndex + s.Require().Equal(prevIndex+1, currentIndex, "Expected MessageIndex to increment by 1") + } +} + +// backendAccountStatusesResponses creates backend account statuses responses based on the provided events. +func (s *AccountStatusesProviderSuite) backendAccountStatusesResponses(events []flow.Event) []*backend.AccountStatusesResponse { + responses := make([]*backend.AccountStatusesResponse, len(events)) + + for i := range events { + responses[i] = &backend.AccountStatusesResponse{ + Height: s.rootBlock.Height, + BlockID: s.rootBlock.ID(), + AccountEvents: map[string]flow.EventsList{ + unittest.RandomAddressFixture().String(): events, + }, + } + } + + return responses +} + +func invalidAccountStatusesArgumentsTestCases() []testErrType { + return []testErrType{ + { + name: "provide both 'start_block_id' and 'start_block_height' arguments", + arguments: wsmodels.Arguments{ + "start_block_id": unittest.BlockFixture().ID().String(), + "start_block_height": fmt.Sprintf("%d", unittest.BlockFixture().Height), + "event_types": []string{state_stream.CoreEventAccountCreated}, + "account_addresses": []string{unittest.AddressFixture().String()}, + }, + expectedErrorMsg: "can only provide either 'start_block_id' or 'start_block_height'", + }, + { + name: "invalid 'start_block_id' argument", + arguments: map[string]interface{}{ + "start_block_id": "invalid_block_id", + "event_types": []string{state_stream.CoreEventAccountCreated}, + "account_addresses": []string{unittest.AddressFixture().String()}, + }, + expectedErrorMsg: "invalid ID format", + }, + { + name: "invalid 'start_block_height' argument", + arguments: map[string]interface{}{ + "start_block_height": "-1", + "event_types": []string{state_stream.CoreEventAccountCreated}, + "account_addresses": []string{unittest.AddressFixture().String()}, + }, + expectedErrorMsg: "'start_block_height' must be convertible to uint64", + }, + { + name: "invalid 'heartbeat_interval' argument", + arguments: map[string]interface{}{ + "start_block_id": unittest.BlockFixture().ID().String(), + "event_types": []string{state_stream.CoreEventAccountCreated}, + "account_addresses": []string{unittest.AddressFixture().String()}, + "heartbeat_interval": "-1", + }, + expectedErrorMsg: "'heartbeat_interval' must be convertible to uint64", + }, + { + name: "unexpected argument", + arguments: map[string]interface{}{ + "start_block_id": unittest.BlockFixture().ID().String(), + "event_types": []string{state_stream.CoreEventAccountCreated}, + "account_addresses": []string{unittest.AddressFixture().String()}, + "unexpected_argument": "dummy", + }, + expectedErrorMsg: "unexpected field: 'unexpected_argument'", + }, + } +} diff --git a/engine/access/rest/websockets/data_providers/args_validation.go b/engine/access/rest/websockets/data_providers/args_validation.go new file mode 100644 index 00000000000..831801cdff4 --- /dev/null +++ b/engine/access/rest/websockets/data_providers/args_validation.go @@ -0,0 +1,57 @@ +package data_providers + +import ( + "fmt" + "strconv" + + "github.com/onflow/flow-go/engine/access/rest/common" + "github.com/onflow/flow-go/engine/access/rest/websockets/models" +) + +func ensureAllowedFields(fields map[string]interface{}, allowedFields map[string]struct{}) error { + // Ensure only allowed fields are present + for key := range fields { + if _, exists := allowedFields[key]; !exists { + return fmt.Errorf("unexpected field: '%s'", key) + } + } + + return nil +} + +func extractArrayOfStrings(args models.Arguments, name string, required bool) ([]string, error) { + raw, exists := args[name] + if !exists { + if required { + return nil, fmt.Errorf("missing '%s' field", name) + } + return []string{}, nil + } + + converted, err := common.ConvertInterfaceToArrayOfStrings(raw) + if err != nil { + return nil, fmt.Errorf("'%s' must be an array of strings: %w", name, err) + } + + return converted, nil +} + +// extractHeartbeatInterval extracts 'heartbeat_interval' argument which is always optional +func extractHeartbeatInterval(args models.Arguments, defaultHeartbeatInterval uint64) (uint64, error) { + heartbeatIntervalRaw, exists := args["heartbeat_interval"] + if !exists { + return defaultHeartbeatInterval, nil + } + + heartbeatIntervalString, ok := heartbeatIntervalRaw.(string) + if !ok { + return 0, fmt.Errorf("'heartbeat_interval' must be a string") + } + + heartbeatInterval, err := strconv.ParseUint(heartbeatIntervalString, 10, 64) + if err != nil { + return 0, fmt.Errorf("'heartbeat_interval' must be convertible to uint64: %w", err) + } + + return heartbeatInterval, nil +} diff --git a/engine/access/rest/websockets/data_providers/base_provider.go b/engine/access/rest/websockets/data_providers/base_provider.go new file mode 100644 index 00000000000..6d6e55bb49d --- /dev/null +++ b/engine/access/rest/websockets/data_providers/base_provider.go @@ -0,0 +1,117 @@ +package data_providers + +import ( + "context" + "errors" + "fmt" + + "github.com/rs/zerolog" + + "github.com/onflow/flow-go/access" + wsmodels "github.com/onflow/flow-go/engine/access/rest/websockets/models" + "github.com/onflow/flow-go/engine/access/subscription" +) + +// baseDataProvider holds common objects for the provider +type baseDataProvider struct { + ctx context.Context + logger zerolog.Logger + api access.API + subscriptionID string + topic string + rawArguments wsmodels.Arguments + send chan<- interface{} + cancelSubscriptionContext context.CancelFunc +} + +// newBaseDataProvider creates a new instance of baseDataProvider. +func newBaseDataProvider( + ctx context.Context, + logger zerolog.Logger, + api access.API, + subscriptionID string, + topic string, + rawArguments wsmodels.Arguments, + send chan<- interface{}, +) *baseDataProvider { + ctx, cancel := context.WithCancel(ctx) + return &baseDataProvider{ + ctx: ctx, + logger: logger, + api: api, + subscriptionID: subscriptionID, + topic: topic, + rawArguments: rawArguments, + send: send, + cancelSubscriptionContext: cancel, + } +} + +// ID returns the subscription ID associated with current data provider +func (b *baseDataProvider) ID() string { + return b.subscriptionID +} + +// Topic returns the topic associated with the data provider. +func (b *baseDataProvider) Topic() string { + return b.topic +} + +// Arguments returns the arguments associated with the data provider. +func (b *baseDataProvider) Arguments() wsmodels.Arguments { + return b.rawArguments +} + +// Close terminates the data provider. +func (b *baseDataProvider) Close() { + b.cancelSubscriptionContext() +} + +type sendResponseCallback[T any] func(T) error + +// run reads data from a subscription and sends it to clients using the provided +// sendResponse callback. It continuously listens to the subscription's data +// channel and forwards the received values until the subscription ends. +// It is used as a helper function for each data provider's Run() function. +// +// Parameters: +// - subscription: An instance of the Subscription interface, which provides a +// data stream through its Channel() method and an optional error through Err(). +// - sendResponse: A callback function that processes and forwards the received +// data to the clients (e.g. a WebSocket controller). If the callback +// returns an error, the function terminates with that error. +// +// Returns: +// - error: If any error occurs while reading from the subscription or sending +// responses, it returns an error wrapped with additional context. +// +// Errors +// - If the subscription or sendResponse return an error, it is returned. +// +// No other errors are expected during normal operation +func run[T any]( + subscription subscription.Subscription, + sendResponse sendResponseCallback[T], +) error { + for { + value, ok := <-subscription.Channel() + if !ok { + err := subscription.Err() + if err != nil && !errors.Is(err, context.Canceled) { + return fmt.Errorf("subscription finished with error: %w", err) + } + + return nil + } + + response, ok := value.(T) + if !ok { + return fmt.Errorf("unexpected response type: %T", value) + } + + err := sendResponse(response) + if err != nil { + return fmt.Errorf("error sending response: %w", err) + } + } +} diff --git a/engine/access/rest/websockets/data_providers/block_digests_provider.go b/engine/access/rest/websockets/data_providers/block_digests_provider.go new file mode 100644 index 00000000000..123ad6ac452 --- /dev/null +++ b/engine/access/rest/websockets/data_providers/block_digests_provider.go @@ -0,0 +1,94 @@ +package data_providers + +import ( + "context" + "fmt" + + "github.com/rs/zerolog" + + "github.com/onflow/flow-go/access" + "github.com/onflow/flow-go/engine/access/rest/http/request" + "github.com/onflow/flow-go/engine/access/rest/websockets/data_providers/models" + wsmodels "github.com/onflow/flow-go/engine/access/rest/websockets/models" + "github.com/onflow/flow-go/engine/access/subscription" + "github.com/onflow/flow-go/model/flow" +) + +// BlockDigestsDataProvider is responsible for providing block digests +type BlockDigestsDataProvider struct { + *baseDataProvider + + arguments blocksArguments +} + +var _ DataProvider = (*BlockDigestsDataProvider)(nil) + +// NewBlockDigestsDataProvider creates a new instance of BlockDigestsDataProvider. +func NewBlockDigestsDataProvider( + ctx context.Context, + logger zerolog.Logger, + api access.API, + subscriptionID string, + topic string, + rawArguments wsmodels.Arguments, + send chan<- interface{}, +) (*BlockDigestsDataProvider, error) { + args, err := parseBlocksArguments(rawArguments) + if err != nil { + return nil, fmt.Errorf("invalid arguments: %w", err) + } + + base := newBaseDataProvider( + ctx, + logger.With().Str("component", "block-digests-data-provider").Logger(), + api, + subscriptionID, + topic, + rawArguments, + send, + ) + + return &BlockDigestsDataProvider{ + baseDataProvider: base, + arguments: args, + }, nil +} + +// Run starts processing the subscription for block digests and handles responses. +// Must be called once. +// +// No errors expected during normal operations +func (p *BlockDigestsDataProvider) Run() error { + return run( + p.createAndStartSubscription(p.ctx, p.arguments), + p.sendResponse, + ) +} + +// createAndStartSubscription creates a new subscription using the specified input arguments. +func (p *BlockDigestsDataProvider) createAndStartSubscription( + ctx context.Context, + args blocksArguments, +) subscription.Subscription { + if args.StartBlockID != flow.ZeroID { + return p.api.SubscribeBlockDigestsFromStartBlockID(ctx, args.StartBlockID, args.BlockStatus) + } + + if args.StartBlockHeight != request.EmptyHeight { + return p.api.SubscribeBlockDigestsFromStartHeight(ctx, args.StartBlockHeight, args.BlockStatus) + } + + return p.api.SubscribeBlockDigestsFromLatest(ctx, args.BlockStatus) +} + +func (p *BlockDigestsDataProvider) sendResponse(b *flow.BlockDigest) error { + blockDigest := models.NewBlockDigest(b) + response := models.BaseDataProvidersResponse{ + SubscriptionID: p.ID(), + Topic: p.Topic(), + Payload: blockDigest, + } + p.send <- &response + + return nil +} diff --git a/engine/access/rest/websockets/data_providers/block_digests_provider_test.go b/engine/access/rest/websockets/data_providers/block_digests_provider_test.go new file mode 100644 index 00000000000..7f75d7244f9 --- /dev/null +++ b/engine/access/rest/websockets/data_providers/block_digests_provider_test.go @@ -0,0 +1,139 @@ +package data_providers + +import ( + "context" + "strconv" + "testing" + "time" + + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/suite" + + "github.com/onflow/flow-go/engine/access/rest/common/parser" + "github.com/onflow/flow-go/engine/access/rest/websockets/data_providers/models" + wsmodels "github.com/onflow/flow-go/engine/access/rest/websockets/models" + submock "github.com/onflow/flow-go/engine/access/subscription/mock" + "github.com/onflow/flow-go/model/flow" +) + +type BlockDigestsProviderSuite struct { + BlocksProviderSuite +} + +func TestBlockDigestsProviderSuite(t *testing.T) { + suite.Run(t, new(BlockDigestsProviderSuite)) +} + +// SetupTest initializes the test suite with required dependencies. +func (s *BlockDigestsProviderSuite) SetupTest() { + s.BlocksProviderSuite.SetupTest() +} + +// TestBlockDigestsDataProvider_HappyPath tests the behavior of the block digests data provider +// when it is configured correctly and operating under normal conditions. It +// validates that block digests are correctly streamed to the channel and ensures +// no unexpected errors occur. +func (s *BlockDigestsProviderSuite) TestBlockDigestsDataProvider_HappyPath() { + testHappyPath( + s.T(), + BlockDigestsTopic, + s.factory, + s.validBlockDigestsArgumentsTestCases(), + func(dataChan chan interface{}) { + for _, block := range s.blocks { + dataChan <- flow.NewBlockDigest(block.ID(), block.Height, time.UnixMilli(int64(block.Timestamp)).UTC()) + } + }, + s.requireBlockDigest, + ) +} + +// validBlockDigestsArgumentsTestCases defines test happy cases for block digests data providers. +// Each test case specifies input arguments, and setup functions for the mock API used in the test. +func (s *BlockDigestsProviderSuite) validBlockDigestsArgumentsTestCases() []testType { + expectedResponses := make([]interface{}, len(s.blocks)) + for i, b := range s.blocks { + blockDigest := flow.NewBlockDigest(b.ID(), b.Height, time.UnixMilli(int64(b.Timestamp)).UTC()) + blockDigestPayload := models.NewBlockDigest(blockDigest) + expectedResponses[i] = &models.BaseDataProvidersResponse{ + Topic: BlockDigestsTopic, + Payload: blockDigestPayload, + } + } + + return []testType{ + { + name: "happy path with start_block_id argument", + arguments: wsmodels.Arguments{ + "start_block_id": s.rootBlock.ID().String(), + "block_status": parser.Finalized, + }, + setupBackend: func(sub *submock.Subscription) { + s.api.On( + "SubscribeBlockDigestsFromStartBlockID", + mock.Anything, + s.rootBlock.ID(), + flow.BlockStatusFinalized, + ).Return(sub).Once() + }, + expectedResponses: expectedResponses, + }, + { + name: "happy path with start_block_height argument", + arguments: wsmodels.Arguments{ + "start_block_height": strconv.FormatUint(s.rootBlock.Height, 10), + "block_status": parser.Finalized, + }, + setupBackend: func(sub *submock.Subscription) { + s.api.On( + "SubscribeBlockDigestsFromStartHeight", + mock.Anything, + s.rootBlock.Height, + flow.BlockStatusFinalized, + ).Return(sub).Once() + }, + expectedResponses: expectedResponses, + }, + { + name: "happy path without any start argument", + arguments: wsmodels.Arguments{ + "block_status": parser.Finalized, + }, + setupBackend: func(sub *submock.Subscription) { + s.api.On( + "SubscribeBlockDigestsFromLatest", + mock.Anything, + flow.BlockStatusFinalized, + ).Return(sub).Once() + }, + expectedResponses: expectedResponses, + }, + } +} + +// requireBlockDigest ensures that the received block header information matches the expected data. +func (s *BlocksProviderSuite) requireBlockDigest(actual interface{}, expected interface{}) { + expectedResponse, expectedResponsePayload := extractPayload[*models.BlockDigest](s.T(), expected) + actualResponse, actualResponsePayload := extractPayload[*models.BlockDigest](s.T(), actual) + + s.Require().Equal(expectedResponse.Topic, actualResponse.Topic) + s.Require().Equal(expectedResponsePayload, actualResponsePayload) +} + +// TestBlockDigestsDataProvider_InvalidArguments tests the behavior of the block digests data provider +// when invalid arguments are provided. It verifies that appropriate errors are returned +// for missing or conflicting arguments. +func (s *BlockDigestsProviderSuite) TestBlockDigestsDataProvider_InvalidArguments() { + send := make(chan interface{}) + + topic := BlockDigestsTopic + + for _, test := range s.invalidArgumentsTestCases() { + s.Run(test.name, func() { + provider, err := NewBlockDigestsDataProvider(context.Background(), s.log, s.api, "dummy-id", topic, test.arguments, send) + s.Require().Error(err) + s.Require().Nil(provider) + s.Require().Contains(err.Error(), test.expectedErrorMsg) + }) + } +} diff --git a/engine/access/rest/websockets/data_providers/block_headers_provider.go b/engine/access/rest/websockets/data_providers/block_headers_provider.go new file mode 100644 index 00000000000..8e5eb4159fb --- /dev/null +++ b/engine/access/rest/websockets/data_providers/block_headers_provider.go @@ -0,0 +1,95 @@ +package data_providers + +import ( + "context" + "fmt" + + "github.com/rs/zerolog" + + "github.com/onflow/flow-go/access" + commonmodels "github.com/onflow/flow-go/engine/access/rest/common/models" + "github.com/onflow/flow-go/engine/access/rest/http/request" + "github.com/onflow/flow-go/engine/access/rest/websockets/data_providers/models" + wsmodels "github.com/onflow/flow-go/engine/access/rest/websockets/models" + "github.com/onflow/flow-go/engine/access/subscription" + "github.com/onflow/flow-go/model/flow" +) + +// BlockHeadersDataProvider is responsible for providing block headers +type BlockHeadersDataProvider struct { + *baseDataProvider + + arguments blocksArguments +} + +var _ DataProvider = (*BlockHeadersDataProvider)(nil) + +// NewBlockHeadersDataProvider creates a new instance of BlockHeadersDataProvider. +func NewBlockHeadersDataProvider( + ctx context.Context, + logger zerolog.Logger, + api access.API, + subscriptionID string, + topic string, + rawArguments wsmodels.Arguments, + send chan<- interface{}, +) (*BlockHeadersDataProvider, error) { + args, err := parseBlocksArguments(rawArguments) + if err != nil { + return nil, fmt.Errorf("invalid arguments: %w", err) + } + + base := newBaseDataProvider( + ctx, + logger.With().Str("component", "block-headers-data-provider").Logger(), + api, + subscriptionID, + topic, + rawArguments, + send, + ) + + return &BlockHeadersDataProvider{ + baseDataProvider: base, + arguments: args, + }, nil +} + +// Run starts processing the subscription for block headers and handles responses. +// Must be called once. +// +// No errors expected during normal operations +func (p *BlockHeadersDataProvider) Run() error { + return run( + p.createAndStartSubscription(p.ctx, p.arguments), + p.sendResponse, + ) +} + +// createAndStartSubscription creates a new subscription using the specified input arguments. +func (p *BlockHeadersDataProvider) createAndStartSubscription( + ctx context.Context, + args blocksArguments, +) subscription.Subscription { + if args.StartBlockID != flow.ZeroID { + return p.api.SubscribeBlockHeadersFromStartBlockID(ctx, args.StartBlockID, args.BlockStatus) + } + + if args.StartBlockHeight != request.EmptyHeight { + return p.api.SubscribeBlockHeadersFromStartHeight(ctx, args.StartBlockHeight, args.BlockStatus) + } + + return p.api.SubscribeBlockHeadersFromLatest(ctx, args.BlockStatus) +} + +func (p *BlockHeadersDataProvider) sendResponse(header *flow.Header) error { + headerPayload := commonmodels.NewBlockHeader(header) + response := models.BaseDataProvidersResponse{ + SubscriptionID: p.ID(), + Topic: p.Topic(), + Payload: headerPayload, + } + p.send <- &response + + return nil +} diff --git a/engine/access/rest/websockets/data_providers/block_headers_provider_test.go b/engine/access/rest/websockets/data_providers/block_headers_provider_test.go new file mode 100644 index 00000000000..b834ebaf609 --- /dev/null +++ b/engine/access/rest/websockets/data_providers/block_headers_provider_test.go @@ -0,0 +1,139 @@ +package data_providers + +import ( + "context" + "strconv" + "testing" + + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/suite" + + commonmodels "github.com/onflow/flow-go/engine/access/rest/common/models" + "github.com/onflow/flow-go/engine/access/rest/common/parser" + "github.com/onflow/flow-go/engine/access/rest/websockets/data_providers/models" + wsmodels "github.com/onflow/flow-go/engine/access/rest/websockets/models" + submock "github.com/onflow/flow-go/engine/access/subscription/mock" + "github.com/onflow/flow-go/model/flow" +) + +type BlockHeadersProviderSuite struct { + BlocksProviderSuite +} + +func TestBlockHeadersProviderSuite(t *testing.T) { + suite.Run(t, new(BlockHeadersProviderSuite)) +} + +// SetupTest initializes the test suite with required dependencies. +func (s *BlockHeadersProviderSuite) SetupTest() { + s.BlocksProviderSuite.SetupTest() +} + +// TestBlockHeadersDataProvider_HappyPath tests the behavior of the block headers data provider +// when it is configured correctly and operating under normal conditions. It +// validates that block headers are correctly streamed to the channel and ensures +// no unexpected errors occur. +func (s *BlockHeadersProviderSuite) TestBlockHeadersDataProvider_HappyPath() { + testHappyPath( + s.T(), + BlockHeadersTopic, + s.factory, + s.validBlockHeadersArgumentsTestCases(), + func(dataChan chan interface{}) { + for _, block := range s.blocks { + dataChan <- block.ToHeader() + } + }, + s.requireBlockHeader, + ) +} + +// validBlockHeadersArgumentsTestCases defines test happy cases for block headers data providers. +// Each test case specifies input arguments, and setup functions for the mock API used in the test. +func (s *BlockHeadersProviderSuite) validBlockHeadersArgumentsTestCases() []testType { + expectedResponses := make([]interface{}, len(s.blocks)) + for i, b := range s.blocks { + var header commonmodels.BlockHeader + header.Build(b.ToHeader()) + + expectedResponses[i] = &models.BaseDataProvidersResponse{ + Topic: BlockHeadersTopic, + Payload: &header, + } + } + + return []testType{ + { + name: "happy path with start_block_id argument", + arguments: wsmodels.Arguments{ + "start_block_id": s.rootBlock.ID().String(), + "block_status": parser.Finalized, + }, + setupBackend: func(sub *submock.Subscription) { + s.api.On( + "SubscribeBlockHeadersFromStartBlockID", + mock.Anything, + s.rootBlock.ID(), + flow.BlockStatusFinalized, + ).Return(sub).Once() + }, + expectedResponses: expectedResponses, + }, + { + name: "happy path with start_block_height argument", + arguments: wsmodels.Arguments{ + "start_block_height": strconv.FormatUint(s.rootBlock.Height, 10), + "block_status": parser.Finalized, + }, + setupBackend: func(sub *submock.Subscription) { + s.api.On( + "SubscribeBlockHeadersFromStartHeight", + mock.Anything, + s.rootBlock.Height, + flow.BlockStatusFinalized, + ).Return(sub).Once() + }, + expectedResponses: expectedResponses, + }, + { + name: "happy path without any start argument", + arguments: wsmodels.Arguments{ + "block_status": parser.Finalized, + }, + setupBackend: func(sub *submock.Subscription) { + s.api.On( + "SubscribeBlockHeadersFromLatest", + mock.Anything, + flow.BlockStatusFinalized, + ).Return(sub).Once() + }, + expectedResponses: expectedResponses, + }, + } +} + +// requireBlockHeaders ensures that the received block header information matches the expected data. +func (s *BlockHeadersProviderSuite) requireBlockHeader(actual interface{}, expected interface{}) { + expectedResponse, expectedResponsePayload := extractPayload[*commonmodels.BlockHeader](s.T(), expected) + actualResponse, actualResponsePayload := extractPayload[*commonmodels.BlockHeader](s.T(), actual) + + s.Require().Equal(expectedResponse.Topic, actualResponse.Topic) + s.Require().Equal(expectedResponsePayload, actualResponsePayload) +} + +// TestBlockHeadersDataProvider_InvalidArguments tests the behavior of the block headers data provider +// when invalid arguments are provided. It verifies that appropriate errors are returned +// for missing or conflicting arguments. +func (s *BlockHeadersProviderSuite) TestBlockHeadersDataProvider_InvalidArguments() { + send := make(chan interface{}) + topic := BlockHeadersTopic + + for _, test := range s.invalidArgumentsTestCases() { + s.Run(test.name, func() { + provider, err := NewBlockHeadersDataProvider(context.Background(), s.log, s.api, "dummy-id", topic, test.arguments, send) + s.Require().Error(err) + s.Require().Nil(provider) + s.Require().Contains(err.Error(), test.expectedErrorMsg) + }) + } +} diff --git a/engine/access/rest/websockets/data_providers/blocks_provider.go b/engine/access/rest/websockets/data_providers/blocks_provider.go new file mode 100644 index 00000000000..9368f2e17df --- /dev/null +++ b/engine/access/rest/websockets/data_providers/blocks_provider.go @@ -0,0 +1,203 @@ +package data_providers + +import ( + "context" + "fmt" + + "github.com/rs/zerolog" + + "github.com/onflow/flow-go/access" + commonmodels "github.com/onflow/flow-go/engine/access/rest/common/models" + "github.com/onflow/flow-go/engine/access/rest/common/parser" + "github.com/onflow/flow-go/engine/access/rest/http/request" + "github.com/onflow/flow-go/engine/access/rest/util" + "github.com/onflow/flow-go/engine/access/rest/websockets/data_providers/models" + wsmodels "github.com/onflow/flow-go/engine/access/rest/websockets/models" + "github.com/onflow/flow-go/engine/access/subscription" + "github.com/onflow/flow-go/model/flow" +) + +// BlocksArguments contains the arguments required for subscribing to blocks / block headers / block digests +type blocksArguments struct { + StartBlockID flow.Identifier // ID of the block to start subscription from + StartBlockHeight uint64 // Height of the block to start subscription from + BlockStatus flow.BlockStatus // Status of blocks to subscribe to +} + +// BlocksDataProvider is responsible for providing blocks +type BlocksDataProvider struct { + *baseDataProvider + + arguments blocksArguments + linkGenerator commonmodels.LinkGenerator +} + +var _ DataProvider = (*BlocksDataProvider)(nil) + +// NewBlocksDataProvider creates a new instance of BlocksDataProvider. +func NewBlocksDataProvider( + ctx context.Context, + logger zerolog.Logger, + api access.API, + subscriptionID string, + linkGenerator commonmodels.LinkGenerator, + topic string, + rawArguments wsmodels.Arguments, + send chan<- interface{}, +) (*BlocksDataProvider, error) { + args, err := parseBlocksArguments(rawArguments) + if err != nil { + return nil, fmt.Errorf("invalid arguments: %w", err) + } + + provider := newBaseDataProvider( + ctx, + logger.With().Str("component", "blocks-data-provider").Logger(), + api, + subscriptionID, + topic, + rawArguments, + send, + ) + + return &BlocksDataProvider{ + baseDataProvider: provider, + arguments: args, + linkGenerator: linkGenerator, + }, nil +} + +// Run starts processing the subscription for blocks and handles responses. +// Must be called once. +// +// No errors expected during normal operations +func (p *BlocksDataProvider) Run() error { + return run( + p.createAndStartSubscription(p.ctx, p.arguments), + p.sendResponse, + ) +} + +// createAndStartSubscription creates a new subscription using the specified input arguments. +func (p *BlocksDataProvider) createAndStartSubscription( + ctx context.Context, + args blocksArguments, +) subscription.Subscription { + if args.StartBlockID != flow.ZeroID { + return p.api.SubscribeBlocksFromStartBlockID(ctx, args.StartBlockID, args.BlockStatus) + } + + if args.StartBlockHeight != request.EmptyHeight { + return p.api.SubscribeBlocksFromStartHeight(ctx, args.StartBlockHeight, args.BlockStatus) + } + + return p.api.SubscribeBlocksFromLatest(ctx, args.BlockStatus) +} + +func (p *BlocksDataProvider) sendResponse(block *flow.Block) error { + expandPayload := map[string]bool{commonmodels.ExpandableFieldPayload: true} + blockPayload, err := commonmodels.NewBlock( + block, + nil, + p.linkGenerator, + p.arguments.BlockStatus, + expandPayload, + ) + if err != nil { + return fmt.Errorf("failed to build block payload response: %w", err) + } + + response := models.BaseDataProvidersResponse{ + SubscriptionID: p.ID(), + Topic: p.Topic(), + Payload: blockPayload, + } + p.send <- &response + + return nil +} + +// parseBlocksArguments validates and initializes the blocks arguments. +func parseBlocksArguments(arguments wsmodels.Arguments) (blocksArguments, error) { + allowedFields := map[string]struct{}{ + "start_block_id": {}, + "start_block_height": {}, + "block_status": {}, + } + err := ensureAllowedFields(arguments, allowedFields) + if err != nil { + return blocksArguments{}, err + } + + var args blocksArguments + + // Parse block arguments + startBlockID, startBlockHeight, err := parseStartBlock(arguments) + if err != nil { + return blocksArguments{}, err + } + args.StartBlockID = startBlockID + args.StartBlockHeight = startBlockHeight + + // Parse 'block_status' + rawBlockStatus, exists := arguments["block_status"] + if !exists { + return blocksArguments{}, fmt.Errorf("missing 'block_status' field") + } + + blockStatusStr, isString := rawBlockStatus.(string) + if !isString { + return blocksArguments{}, fmt.Errorf("'block_status' must be string") + } + + if len(blockStatusStr) == 0 { + return blocksArguments{}, fmt.Errorf("'block_status' field must not be empty") + } + + blockStatus, err := parser.ParseBlockStatus(blockStatusStr) + if err != nil { + return blocksArguments{}, err + } + args.BlockStatus = blockStatus + + return args, nil +} + +func parseStartBlock(arguments wsmodels.Arguments) (flow.Identifier, uint64, error) { + startBlockIDIn, hasStartBlockID := arguments["start_block_id"] + startBlockHeightIn, hasStartBlockHeight := arguments["start_block_height"] + + // Check for mutual exclusivity of start_block_id and start_block_height early + if hasStartBlockID && hasStartBlockHeight { + return flow.ZeroID, 0, fmt.Errorf("can only provide either 'start_block_id' or 'start_block_height'") + } + + // Parse 'start_block_id' + if hasStartBlockID { + result, ok := startBlockIDIn.(string) + if !ok { + return flow.ZeroID, request.EmptyHeight, fmt.Errorf("'start_block_id' must be a string") + } + var startBlockID parser.ID + err := startBlockID.Parse(result) + if err != nil { + return flow.ZeroID, request.EmptyHeight, fmt.Errorf("invalid 'start_block_id': %w", err) + } + return startBlockID.Flow(), request.EmptyHeight, nil + } + + // Parse 'start_block_height' + if hasStartBlockHeight { + result, ok := startBlockHeightIn.(string) + if !ok { + return flow.ZeroID, 0, fmt.Errorf("'start_block_height' must be a string") + } + startBlockHeight, err := util.ToUint64(result) + if err != nil { + return flow.ZeroID, request.EmptyHeight, fmt.Errorf("'start_block_height' must be convertible to uint64: %w", err) + } + return flow.ZeroID, startBlockHeight, nil + } + + return flow.ZeroID, request.EmptyHeight, nil +} diff --git a/engine/access/rest/websockets/data_providers/blocks_provider_test.go b/engine/access/rest/websockets/data_providers/blocks_provider_test.go new file mode 100644 index 00000000000..5b51613677c --- /dev/null +++ b/engine/access/rest/websockets/data_providers/blocks_provider_test.go @@ -0,0 +1,268 @@ +package data_providers + +import ( + "context" + "fmt" + "strconv" + "testing" + + "github.com/rs/zerolog" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/suite" + + accessmock "github.com/onflow/flow-go/access/mock" + commonmodels "github.com/onflow/flow-go/engine/access/rest/common/models" + mockcommonmodels "github.com/onflow/flow-go/engine/access/rest/common/models/mock" + "github.com/onflow/flow-go/engine/access/rest/common/parser" + "github.com/onflow/flow-go/engine/access/rest/websockets/data_providers/models" + wsmodels "github.com/onflow/flow-go/engine/access/rest/websockets/models" + "github.com/onflow/flow-go/engine/access/state_stream" + "github.com/onflow/flow-go/engine/access/subscription" + submock "github.com/onflow/flow-go/engine/access/subscription/mock" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/utils/unittest" +) + +const unknownBlockStatus = "unknown_block_status" + +// BlocksProviderSuite is a test suite for testing the block providers functionality. +type BlocksProviderSuite struct { + suite.Suite + + log zerolog.Logger + api *accessmock.API + + blocks []*flow.Block + + rootBlock *flow.Block + finalizedBlock *flow.Header + + factory *DataProviderFactoryImpl + linkGenerator *mockcommonmodels.LinkGenerator +} + +func TestBlocksProviderSuite(t *testing.T) { + suite.Run(t, new(BlocksProviderSuite)) +} + +func (s *BlocksProviderSuite) SetupTest() { + s.log = unittest.Logger() + s.api = accessmock.NewAPI(s.T()) + s.linkGenerator = mockcommonmodels.NewLinkGenerator(s.T()) + + blockCount := 5 + s.blocks = make([]*flow.Block, 0, blockCount) + s.rootBlock = unittest.Block.Genesis(flow.Emulator) + parent := s.rootBlock.ToHeader() + + for i := 0; i < blockCount; i++ { + transaction := unittest.TransactionBodyFixture() + col := unittest.CollectionFromTransactions(&transaction) + guarantee := &flow.CollectionGuarantee{CollectionID: col.ID()} + block := unittest.BlockWithParentAndPayload( + parent, + unittest.PayloadFixture(unittest.WithGuarantees(guarantee)), + ) + // update for next iteration + parent = block.ToHeader() + s.blocks = append(s.blocks, block) + } + s.finalizedBlock = parent + + s.factory = NewDataProviderFactory( + s.log, + nil, + s.api, + flow.Testnet.Chain(), + state_stream.DefaultEventFilterConfig, + subscription.DefaultHeartbeatInterval, + s.linkGenerator, + ) + s.Require().NotNil(s.factory) +} + +// TestBlocksDataProvider_HappyPath tests the behavior of the block data provider +// when it is configured correctly and operating under normal conditions. It +// validates that blocks are correctly streamed to the channel and ensures +// no unexpected errors occur. +func (s *BlocksProviderSuite) TestBlocksDataProvider_HappyPath() { + s.linkGenerator.On("BlockLink", mock.AnythingOfType("flow.Identifier")).Return( + func(id flow.Identifier) (string, error) { + for _, block := range s.blocks { + if block.ID() == id { + return fmt.Sprintf("/v1/blocks/%s", id), nil + } + } + return "", assert.AnError + }, + ) + + testHappyPath( + s.T(), + BlocksTopic, + s.factory, + s.validBlockArgumentsTestCases(), + func(dataChan chan interface{}) { + for _, block := range s.blocks { + dataChan <- block + } + }, + s.requireBlock, + ) +} + +// validBlockArgumentsTestCases defines test happy cases for block data providers. +// Each test case specifies input arguments, and setup functions for the mock API used in the test. +func (s *BlocksProviderSuite) validBlockArgumentsTestCases() []testType { + expectedResponses := s.expectedBlockResponses(s.blocks, map[string]bool{commonmodels.ExpandableFieldPayload: true}, flow.BlockStatusFinalized) + + return []testType{ + { + name: "happy path with start_block_id argument", + arguments: wsmodels.Arguments{ + "start_block_id": s.rootBlock.ID().String(), + "block_status": parser.Finalized, + }, + setupBackend: func(sub *submock.Subscription) { + s.api.On( + "SubscribeBlocksFromStartBlockID", + mock.Anything, + s.rootBlock.ID(), + flow.BlockStatusFinalized, + ).Return(sub).Once() + }, + expectedResponses: expectedResponses, + }, + { + name: "happy path with start_block_height argument", + arguments: wsmodels.Arguments{ + "start_block_height": strconv.FormatUint(s.rootBlock.Height, 10), + "block_status": parser.Finalized, + }, + setupBackend: func(sub *submock.Subscription) { + s.api.On( + "SubscribeBlocksFromStartHeight", + mock.Anything, + s.rootBlock.Height, + flow.BlockStatusFinalized, + ).Return(sub).Once() + }, + expectedResponses: expectedResponses, + }, + { + name: "happy path without any start argument", + arguments: wsmodels.Arguments{ + "block_status": parser.Finalized, + }, + setupBackend: func(sub *submock.Subscription) { + s.api.On( + "SubscribeBlocksFromLatest", + mock.Anything, + flow.BlockStatusFinalized, + ).Return(sub).Once() + }, + expectedResponses: expectedResponses, + }, + { + name: "happy path without any start argument", + arguments: wsmodels.Arguments{ + "block_status": parser.Finalized, + }, + setupBackend: func(sub *submock.Subscription) { + s.api.On( + "SubscribeBlocksFromLatest", + mock.Anything, + flow.BlockStatusFinalized, + ).Return(sub).Once() + }, + expectedResponses: expectedResponses, + }, + } +} + +// requireBlock ensures that the received block information matches the expected data. +func (s *BlocksProviderSuite) requireBlock(actual interface{}, expected interface{}) { + expectedResponse, expectedResponsePayload := extractPayload[*commonmodels.Block](s.T(), expected) + actualResponse, actualResponsePayload := extractPayload[*commonmodels.Block](s.T(), actual) + + s.Require().Equal(expectedResponse.Topic, actualResponse.Topic) + s.Require().Equal(expectedResponsePayload, actualResponsePayload) +} + +// expectedBlockResponses generates a list of expected block responses for the given blocks. +func (s *BlocksProviderSuite) expectedBlockResponses( + blocks []*flow.Block, + expand map[string]bool, + status flow.BlockStatus, +) []interface{} { + responses := make([]interface{}, len(blocks)) + for i, b := range blocks { + var block commonmodels.Block + err := block.Build(b, nil, s.linkGenerator, status, expand) + s.Require().NoError(err) + + responses[i] = &models.BaseDataProvidersResponse{ + Topic: BlocksTopic, + Payload: &block, + } + } + + return responses +} + +// TestBlocksDataProvider_InvalidArguments tests the behavior of the block data provider +// when invalid arguments are provided. It verifies that appropriate errors are returned +// for missing or conflicting arguments. +func (s *BlocksProviderSuite) TestBlocksDataProvider_InvalidArguments() { + send := make(chan interface{}) + + for _, test := range s.invalidArgumentsTestCases() { + s.Run(test.name, func() { + provider, err := NewBlocksDataProvider(context.Background(), s.log, s.api, "dummy-id", nil, BlocksTopic, test.arguments, send) + s.Require().Error(err) + s.Require().Nil(provider) + s.Require().Contains(err.Error(), test.expectedErrorMsg) + }) + } +} + +// invalidArgumentsTestCases returns a list of test cases with invalid argument combinations +// for testing the behavior of block, block headers, block digests data providers. Each test case includes a name, +// a set of input arguments, and the expected error message that should be returned. +func (s *BlocksProviderSuite) invalidArgumentsTestCases() []testErrType { + return []testErrType{ + { + name: "missing 'block_status' argument", + arguments: wsmodels.Arguments{ + "start_block_id": s.rootBlock.ID().String(), + }, + expectedErrorMsg: "missing 'block_status' field", + }, + { + name: "unknown 'block_status' argument", + arguments: wsmodels.Arguments{ + "block_status": unknownBlockStatus, + }, + expectedErrorMsg: fmt.Sprintf("invalid 'block_status', must be '%s' or '%s'", parser.Finalized, parser.Sealed), + }, + { + name: "provide both 'start_block_id' and 'start_block_height' arguments", + arguments: wsmodels.Arguments{ + "block_status": parser.Finalized, + "start_block_id": s.rootBlock.ID().String(), + "start_block_height": fmt.Sprintf("%d", s.rootBlock.Height), + }, + expectedErrorMsg: "can only provide either 'start_block_id' or 'start_block_height'", + }, + { + name: "unexpected argument", + arguments: map[string]interface{}{ + "block_status": parser.Finalized, + "start_block_id": unittest.BlockFixture().ID().String(), + "unexpected_argument": "dummy", + }, + expectedErrorMsg: "unexpected field: 'unexpected_argument'", + }, + } +} diff --git a/engine/access/rest/websockets/data_providers/data_provider.go b/engine/access/rest/websockets/data_providers/data_provider.go new file mode 100644 index 00000000000..7174a09feea --- /dev/null +++ b/engine/access/rest/websockets/data_providers/data_provider.go @@ -0,0 +1,37 @@ +package data_providers + +import ( + "github.com/onflow/flow-go/engine/access/rest/websockets/models" +) + +// The DataProvider is the interface abstracts of the actual data provider used by the WebSocketCollector. +// It provides methods for retrieving the provider's unique SubscriptionID, topic, and a methods to close and run the provider. +type DataProvider interface { + // ID returns the unique identifier of the data provider. + ID() string + // Topic returns the topic associated with the data provider. + Topic() string + // Arguments returns the arguments associated with the data provider. + Arguments() models.Arguments + // Close terminates the data provider. + // + // No errors are expected during normal operations. + Close() + // Run starts processing the subscription and handles responses. + // + // The separation of the data provider's creation and its Run() method + // allows for better control over the subscription lifecycle. By doing so, + // a confirmation message can be sent to the client immediately upon + // successful subscription creation or failure. This ensures any required + // setup or preparation steps can be handled prior to initiating the + // subscription and data streaming process. + // + // Run() begins the actual processing of the subscription. At this point, + // the context used for provider creation is no longer needed, as all + // necessary preparation steps should have been completed. + // + // Must be called once. + // + // No errors expected during normal operations + Run() error +} diff --git a/engine/access/rest/websockets/data_providers/events_provider.go b/engine/access/rest/websockets/data_providers/events_provider.go new file mode 100644 index 00000000000..7b2f933c285 --- /dev/null +++ b/engine/access/rest/websockets/data_providers/events_provider.go @@ -0,0 +1,247 @@ +package data_providers + +import ( + "context" + "fmt" + + "github.com/rs/zerolog" + + "github.com/onflow/flow-go/engine/access/rest/http/request" + "github.com/onflow/flow-go/engine/access/rest/websockets/data_providers/models" + wsmodels "github.com/onflow/flow-go/engine/access/rest/websockets/models" + "github.com/onflow/flow-go/engine/access/state_stream" + "github.com/onflow/flow-go/engine/access/state_stream/backend" + "github.com/onflow/flow-go/engine/access/subscription" + "github.com/onflow/flow-go/engine/common/rpc/convert" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module/counters" +) + +// eventsArguments contains the arguments a user passes to subscribe to events +type eventsArguments struct { + StartBlockID flow.Identifier // ID of the block to start subscription from + StartBlockHeight uint64 // Height of the block to start subscription from + Filter state_stream.EventFilter // Filter applied to events for a given subscription + HeartbeatInterval uint64 // Maximum number of blocks message won't be sent +} + +// EventsDataProvider is responsible for providing events +type EventsDataProvider struct { + *baseDataProvider + + stateStreamApi state_stream.API + arguments eventsArguments + messageIndex counters.StrictMonotonicCounter + blocksSinceLastMessage uint64 +} + +var _ DataProvider = (*EventsDataProvider)(nil) + +// NewEventsDataProvider creates a new instance of EventsDataProvider. +func NewEventsDataProvider( + ctx context.Context, + logger zerolog.Logger, + stateStreamApi state_stream.API, + subscriptionID string, + topic string, + rawArguments wsmodels.Arguments, + send chan<- interface{}, + chain flow.Chain, + eventFilterConfig state_stream.EventFilterConfig, + defaultHeartbeatInterval uint64, +) (*EventsDataProvider, error) { + if stateStreamApi == nil { + return nil, fmt.Errorf("this access node does not support streaming events") + } + + args, err := parseEventsArguments(rawArguments, chain, eventFilterConfig, defaultHeartbeatInterval) + if err != nil { + return nil, fmt.Errorf("invalid arguments for events data provider: %w", err) + } + + provider := newBaseDataProvider( + ctx, + logger.With().Str("component", "events-data-provider").Logger(), + nil, + subscriptionID, + topic, + rawArguments, + send, + ) + + return &EventsDataProvider{ + baseDataProvider: provider, + stateStreamApi: stateStreamApi, + arguments: args, + messageIndex: counters.NewMonotonicCounter(0), + blocksSinceLastMessage: 0, + }, nil +} + +// Run starts processing the subscription for events and handles responses. +// Must be called once. +// +// No errors expected during normal operations +func (p *EventsDataProvider) Run() error { + return run( + p.createAndStartSubscription(p.ctx, p.arguments), + p.handleResponse, + ) +} + +// handleResponse processes the response from the subscription and sends it to the client's channel. +// As part of the processing, it converts the event payloads from CCF to JSON-CDC format. +// This function is not expected to be called concurrently. +// +// No errors expected during normal operations. +func (p *EventsDataProvider) handleResponse(response *backend.EventsResponse) error { + // convert events to JSON-CDC format + convertedResponse, err := convertEventsResponse(response) + if err != nil { + return fmt.Errorf("failed to convert events to JSON-CDC format: %w", err) + } + + return p.sendResponse(convertedResponse) +} + +// sendResponse processes an event message and sends it to client's channel. +// This function is not expected to be called concurrently. +// +// No errors are expected during normal operations. +func (p *EventsDataProvider) sendResponse(eventsResponse *backend.EventsResponse) error { + // Only send a response if there's meaningful data to send + // or the heartbeat interval limit is reached + p.blocksSinceLastMessage += 1 + contractEmittedEvents := len(eventsResponse.Events) != 0 + reachedHeartbeatLimit := p.blocksSinceLastMessage >= p.arguments.HeartbeatInterval + if !contractEmittedEvents && !reachedHeartbeatLimit { + return nil + } + + eventsPayload := models.NewEventResponse(eventsResponse, p.messageIndex.Value()) + response := models.BaseDataProvidersResponse{ + SubscriptionID: p.ID(), + Topic: p.Topic(), + Payload: eventsPayload, + } + p.send <- &response + + p.blocksSinceLastMessage = 0 + p.messageIndex.Increment() + + return nil +} + +// createAndStartSubscription creates a new subscription using the specified input arguments. +func (p *EventsDataProvider) createAndStartSubscription(ctx context.Context, args eventsArguments) subscription.Subscription { + if args.StartBlockID != flow.ZeroID { + return p.stateStreamApi.SubscribeEventsFromStartBlockID(ctx, args.StartBlockID, args.Filter) + } + + if args.StartBlockHeight != request.EmptyHeight { + return p.stateStreamApi.SubscribeEventsFromStartHeight(ctx, args.StartBlockHeight, args.Filter) + } + + return p.stateStreamApi.SubscribeEventsFromLatest(ctx, args.Filter) +} + +// convertEventsResponse converts events in the provided EventsResponse from CCF to JSON-CDC format. +// +// No errors expected during normal operations. +func convertEventsResponse(resp *backend.EventsResponse) (*backend.EventsResponse, error) { + jsoncdcEvents, err := convertEvents(resp.Events) + if err != nil { + return nil, fmt.Errorf("failed to convert events to JSON-CDC: %w", err) + } + + return &backend.EventsResponse{ + BlockID: resp.BlockID, + Height: resp.Height, + BlockTimestamp: resp.BlockTimestamp, + Events: jsoncdcEvents, + }, nil +} + +// convertEvents converts a slice events with CCF encoded payloads into a slice of new events who's +// payloads are encoded in JSON-CDC format. +// +// Note: this function creates a copy of the original events before converting the payload. This +// is important to ensure the original data structure is not modified, which could impact data held +// in caches. +// +// No errors expected during normal operations. +func convertEvents(ccfEvents []flow.Event) ([]flow.Event, error) { + jsoncdcEvents := make([]flow.Event, len(ccfEvents)) + for i, ccfEvent := range ccfEvents { + converted, err := convert.CcfEventToJsonEvent(ccfEvent) + if err != nil { + return nil, fmt.Errorf("failed to convert event %d: %w", i, err) + } + jsoncdcEvents[i] = *converted + } + return jsoncdcEvents, nil +} + +// parseEventsArguments validates and initializes the events arguments. +func parseEventsArguments( + arguments wsmodels.Arguments, + chain flow.Chain, + eventFilterConfig state_stream.EventFilterConfig, + defaultHeartbeatInterval uint64, +) (eventsArguments, error) { + allowedFields := map[string]struct{}{ + "start_block_id": {}, + "start_block_height": {}, + "event_types": {}, + "addresses": {}, + "contracts": {}, + "heartbeat_interval": {}, + } + err := ensureAllowedFields(arguments, allowedFields) + if err != nil { + return eventsArguments{}, err + } + + var args eventsArguments + + // Parse block arguments + startBlockID, startBlockHeight, err := parseStartBlock(arguments) + if err != nil { + return eventsArguments{}, err + } + args.StartBlockID = startBlockID + args.StartBlockHeight = startBlockHeight + + // Parse 'heartbeat_interval' argument + heartbeatInterval, err := extractHeartbeatInterval(arguments, defaultHeartbeatInterval) + if err != nil { + return eventsArguments{}, err + } + args.HeartbeatInterval = heartbeatInterval + + // Parse 'event_types' as a JSON array + eventTypes, err := extractArrayOfStrings(arguments, "event_types", false) + if err != nil { + return eventsArguments{}, err + } + + // Parse 'addresses' as []string{} + addresses, err := extractArrayOfStrings(arguments, "addresses", false) + if err != nil { + return eventsArguments{}, err + } + + // Parse 'contracts' as []string{} + contracts, err := extractArrayOfStrings(arguments, "contracts", false) + if err != nil { + return eventsArguments{}, err + } + + // Initialize the event filter with the parsed arguments + args.Filter, err = state_stream.NewEventFilter(eventFilterConfig, chain, eventTypes, addresses, contracts) + if err != nil { + return eventsArguments{}, fmt.Errorf("error creating event filter: %w", err) + } + + return args, nil +} diff --git a/engine/access/rest/websockets/data_providers/events_provider_test.go b/engine/access/rest/websockets/data_providers/events_provider_test.go new file mode 100644 index 00000000000..88ef6a67c13 --- /dev/null +++ b/engine/access/rest/websockets/data_providers/events_provider_test.go @@ -0,0 +1,409 @@ +package data_providers + +import ( + "context" + "fmt" + "strconv" + "testing" + "time" + + "github.com/onflow/flow/protobuf/go/flow/entities" + "github.com/rs/zerolog" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/suite" + + "github.com/onflow/flow-go/engine/access/rest/websockets/data_providers/models" + wsmodels "github.com/onflow/flow-go/engine/access/rest/websockets/models" + "github.com/onflow/flow-go/engine/access/state_stream" + "github.com/onflow/flow-go/engine/access/state_stream/backend" + ssmock "github.com/onflow/flow-go/engine/access/state_stream/mock" + "github.com/onflow/flow-go/engine/access/subscription" + submock "github.com/onflow/flow-go/engine/access/subscription/mock" + "github.com/onflow/flow-go/engine/common/rpc/convert" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/utils/unittest" +) + +// EventsProviderSuite is a test suite for testing the events providers functionality. +type EventsProviderSuite struct { + suite.Suite + + log zerolog.Logger + api *ssmock.API + + chain flow.Chain + rootBlock *flow.Block + finalizedBlock *flow.Header + + factory *DataProviderFactoryImpl +} + +func TestEventsProviderSuite(t *testing.T) { + suite.Run(t, new(EventsProviderSuite)) +} + +func (s *EventsProviderSuite) SetupTest() { + s.log = unittest.Logger() + s.api = ssmock.NewAPI(s.T()) + s.chain = flow.Testnet.Chain() + s.rootBlock = unittest.Block.Genesis(s.chain.ChainID()) + s.factory = NewDataProviderFactory( + s.log, + s.api, + nil, + s.chain, + state_stream.DefaultEventFilterConfig, + subscription.DefaultHeartbeatInterval, + nil, + ) + s.Require().NotNil(s.factory) +} + +// TestEventsDataProvider_HappyPath tests the behavior of the events data provider +// when it is configured correctly and operating under normal conditions. It +// validates that events are correctly streamed to the channel and ensures +// no unexpected errors occur. +func (s *EventsProviderSuite) TestEventsDataProvider_HappyPath() { + eventGenerator := unittest.NewEventGenerator(unittest.EventGenerator.WithEncoding(entities.EventEncodingVersion_CCF_V0)) + events := []flow.Event{ + eventGenerator.New(), + eventGenerator.New(), + } + + backendResponses := s.backendEventsResponses(events) + + testHappyPath( + s.T(), + EventsTopic, + s.factory, + s.subscribeEventsDataProviderTestCases(backendResponses), + func(dataChan chan interface{}) { + for i := 0; i < len(backendResponses); i++ { + dataChan <- backendResponses[i] + } + }, + s.requireEvents, + ) +} + +// subscribeEventsDataProviderTestCases generates test cases for events data providers. +func (s *EventsProviderSuite) subscribeEventsDataProviderTestCases(backendResponses []*backend.EventsResponse) []testType { + expectedResponses := s.expectedEventsResponses(backendResponses) + + return []testType{ + { + name: "SubscribeBlocksFromStartBlockID happy path", + arguments: wsmodels.Arguments{ + "start_block_id": s.rootBlock.ID().String(), + "event_types": []string{string(flow.EventAccountCreated)}, + "addresses": []string{unittest.AddressFixture().String()}, + "contracts": []string{"A.0000000000000001.Contract1", "A.0000000000000001.Contract2"}, + "heartbeat_interval": "3", + }, + setupBackend: func(sub *submock.Subscription) { + s.api.On( + "SubscribeEventsFromStartBlockID", + mock.Anything, + s.rootBlock.ID(), + mock.Anything, + ).Return(sub).Once() + }, + expectedResponses: expectedResponses, + }, + { + name: "SubscribeEventsFromStartHeight happy path", + arguments: wsmodels.Arguments{ + "start_block_height": strconv.FormatUint(s.rootBlock.Height, 10), + "event_types": []string{string(flow.EventAccountCreated)}, + "addresses": []string{unittest.AddressFixture().String()}, + "contracts": []string{"A.0000000000000001.Contract1", "A.0000000000000001.Contract2"}, + "heartbeat_interval": "3", + }, + setupBackend: func(sub *submock.Subscription) { + s.api.On( + "SubscribeEventsFromStartHeight", + mock.Anything, + s.rootBlock.Height, + mock.Anything, + ).Return(sub).Once() + }, + expectedResponses: expectedResponses, + }, + { + name: "SubscribeEventsFromLatest happy path", + arguments: wsmodels.Arguments{ + "event_types": []string{string(flow.EventAccountCreated)}, + "addresses": []string{unittest.AddressFixture().String()}, + "contracts": []string{"A.0000000000000001.Contract1", "A.0000000000000001.Contract2"}, + "heartbeat_interval": "3", + }, + setupBackend: func(sub *submock.Subscription) { + s.api.On( + "SubscribeEventsFromLatest", + mock.Anything, + mock.Anything, + ).Return(sub).Once() + }, + expectedResponses: expectedResponses, + }, + } +} + +// requireEvents ensures that the received event information matches the expected data. +func (s *EventsProviderSuite) requireEvents(actual interface{}, expected interface{}) { + expectedResponse, expectedResponsePayload := extractPayload[*models.EventResponse](s.T(), expected) + actualResponse, actualResponsePayload := extractPayload[*models.EventResponse](s.T(), actual) + + s.Require().Equal(expectedResponse.Topic, actualResponse.Topic) + s.Require().Equal(expectedResponsePayload.MessageIndex, actualResponsePayload.MessageIndex) + s.Require().ElementsMatch(expectedResponsePayload.Events, actualResponsePayload.Events) +} + +// backendEventsResponses creates backend events responses based on the provided events. +func (s *EventsProviderSuite) backendEventsResponses(events []flow.Event) []*backend.EventsResponse { + responses := make([]*backend.EventsResponse, len(events)) + + for i := range events { + responses[i] = &backend.EventsResponse{ + Height: s.rootBlock.Height, + BlockID: s.rootBlock.ID(), + Events: events, + BlockTimestamp: time.UnixMilli(int64(s.rootBlock.Timestamp)).UTC(), + } + } + + return responses +} + +// expectedEventsResponses creates the expected responses for the provided backend responses. +func (s *EventsProviderSuite) expectedEventsResponses( + backendResponses []*backend.EventsResponse, +) []interface{} { + expectedResponses := make([]interface{}, len(backendResponses)) + + for i, resp := range backendResponses { + // avoid updating the original response + expected := &backend.EventsResponse{ + Height: resp.Height, + BlockID: resp.BlockID, + BlockTimestamp: resp.BlockTimestamp, + Events: make(flow.EventsList, len(resp.Events)), + } + + // events are provided in CCF format, but we expect all event payloads in JSON-CDC format + for i, event := range resp.Events { + converted, err := convert.CcfEventToJsonEvent(event) + s.Require().NoError(err) + + expected.Events[i] = *converted + } + + expectedResponsePayload := models.NewEventResponse(expected, uint64(i)) + expectedResponses[i] = &models.BaseDataProvidersResponse{ + Topic: EventsTopic, + Payload: expectedResponsePayload, + } + } + return expectedResponses +} + +// TestMessageIndexEventProviderResponse_HappyPath tests that MessageIndex values in response are strictly increasing. +func (s *EventsProviderSuite) TestMessageIndexEventProviderResponse_HappyPath() { + send := make(chan interface{}, 10) + topic := EventsTopic + eventsCount := 4 + + // Create a channel to simulate the subscription's event channel + eventChan := make(chan interface{}) + + // Create a mock subscription and mock the channel + sub := submock.NewSubscription(s.T()) + sub.On("Channel").Return((<-chan interface{})(eventChan)) + sub.On("Err").Return(nil).Once() + + s.api.On("SubscribeEventsFromStartBlockID", mock.Anything, mock.Anything, mock.Anything).Return(sub) + + arguments := + map[string]interface{}{ + "start_block_id": s.rootBlock.ID().String(), + "event_types": []string{state_stream.CoreEventAccountCreated}, + "addresses": []string{unittest.AddressFixture().String()}, + "contracts": []string{"A.0000000000000001.Contract1", "A.0000000000000001.Contract2"}, + } + + // Create the EventsDataProvider instance + provider, err := NewEventsDataProvider( + context.Background(), + s.log, + s.api, + "dummy-id", + topic, + arguments, + send, + s.chain, + state_stream.DefaultEventFilterConfig, + subscription.DefaultHeartbeatInterval, + ) + + s.Require().NoError(err) + s.Require().NotNil(provider) + + // Ensure the provider is properly closed after the test + defer provider.Close() + + // Run the provider in a separate goroutine to simulate subscription processing + done := make(chan struct{}) + go func() { + defer close(done) + err = provider.Run() + s.Require().NoError(err) + }() + + // Simulate emitting events to the event channel + go func() { + defer close(eventChan) // Close the channel when done + + for i := 0; i < eventsCount; i++ { + eventChan <- &backend.EventsResponse{ + Height: s.rootBlock.Height, + } + } + }() + + // Collect responses + var responses []*models.EventResponse + for i := 0; i < eventsCount; i++ { + res := <-send + + _, eventResData := extractPayload[*models.EventResponse](s.T(), res) + + responses = append(responses, eventResData) + } + + // Wait for the provider goroutine to finish + unittest.RequireCloseBefore(s.T(), done, time.Second, "provider failed to stop") + + // Verifying that indices are starting from 0 + s.Require().Equal(uint64(0), responses[0].MessageIndex, "Expected MessageIndex to start with 0") + + // Verifying that indices are strictly increasing + for i := 1; i < len(responses); i++ { + prevIndex := responses[i-1].MessageIndex + currentIndex := responses[i].MessageIndex + s.Require().Equal(prevIndex+1, currentIndex, "Expected MessageIndex to increment by 1") + } +} + +// TestEventsDataProvider_InvalidArguments tests the behavior of the event data provider +// when invalid arguments are provided. It verifies that appropriate errors are returned +// for missing or conflicting arguments. +// This test covers the test cases: +// 1. Providing both 'start_block_id' and 'start_block_height' simultaneously. +// 2. Invalid 'start_block_id' argument. +// 3. Invalid 'start_block_height' argument. +func (s *EventsProviderSuite) TestEventsDataProvider_InvalidArguments() { + send := make(chan interface{}) + topic := EventsTopic + + for _, test := range invalidEventsArgumentsTestCases() { + s.Run(test.name, func() { + provider, err := NewEventsDataProvider( + context.Background(), + s.log, + s.api, + "dummy-id", + topic, + test.arguments, + send, + s.chain, + state_stream.DefaultEventFilterConfig, + subscription.DefaultHeartbeatInterval, + ) + s.Require().Error(err) + s.Require().Nil(provider) + s.Require().Contains(err.Error(), test.expectedErrorMsg) + }) + } +} + +func (s *EventsProviderSuite) TestEventsDataProvider_StateStreamNotConfigured() { + send := make(chan interface{}) + topic := EventsTopic + + provider, err := NewEventsDataProvider( + context.Background(), + s.log, + nil, + "dummy-id", + topic, + wsmodels.Arguments{}, + send, + s.chain, + state_stream.DefaultEventFilterConfig, + subscription.DefaultHeartbeatInterval, + ) + s.Require().Error(err) + s.Require().Nil(provider) + s.Require().Contains(err.Error(), "does not support streaming events") +} + +// invalidEventsArgumentsTestCases returns a list of test cases with invalid argument combinations +// for testing the behavior of events data providers. Each test case includes a name, +// a set of input arguments, and the expected error message that should be returned. +func invalidEventsArgumentsTestCases() []testErrType { + return []testErrType{ + { + name: "provide both 'start_block_id' and 'start_block_height' arguments", + arguments: wsmodels.Arguments{ + "start_block_id": unittest.BlockFixture().ID().String(), + "start_block_height": fmt.Sprintf("%d", unittest.BlockFixture().Height), + "event_types": []string{state_stream.CoreEventAccountCreated}, + "addresses": []string{unittest.AddressFixture().String()}, + "contracts": []string{"A.0000000000000001.Contract1", "A.0000000000000001.Contract2"}, + }, + expectedErrorMsg: "can only provide either 'start_block_id' or 'start_block_height'", + }, + { + name: "invalid 'start_block_id' argument", + arguments: map[string]interface{}{ + "start_block_id": "invalid_block_id", + "event_types": []string{state_stream.CoreEventAccountCreated}, + "addresses": []string{unittest.AddressFixture().String()}, + "contracts": []string{"A.0000000000000001.Contract1", "A.0000000000000001.Contract2"}, + }, + expectedErrorMsg: "invalid ID format", + }, + { + name: "invalid 'start_block_height' argument", + arguments: map[string]interface{}{ + "start_block_height": "-1", + "event_types": []string{state_stream.CoreEventAccountCreated}, + "addresses": []string{unittest.AddressFixture().String()}, + "contracts": []string{"A.0000000000000001.Contract1", "A.0000000000000001.Contract2"}, + }, + expectedErrorMsg: "'start_block_height' must be convertible to uint64", + }, + { + name: "invalid 'heartbeat_interval' argument", + arguments: map[string]interface{}{ + "start_block_id": unittest.BlockFixture().ID().String(), + "event_types": []string{state_stream.CoreEventAccountCreated}, + "addresses": []string{unittest.AddressFixture().String()}, + "contracts": []string{"A.0000000000000001.Contract1", "A.0000000000000001.Contract2"}, + "heartbeat_interval": "-1", + }, + expectedErrorMsg: "'heartbeat_interval' must be convertible to uint64", + }, + { + name: "unexpected argument", + arguments: map[string]interface{}{ + "start_block_id": unittest.BlockFixture().ID().String(), + "event_types": []string{state_stream.CoreEventAccountCreated}, + "addresses": []string{unittest.AddressFixture().String()}, + "contracts": []string{"A.0000000000000001.Contract1", "A.0000000000000001.Contract2"}, + "unexpected_argument": "dummy", + }, + expectedErrorMsg: "unexpected field: 'unexpected_argument'", + }, + } +} diff --git a/engine/access/rest/websockets/data_providers/factory.go b/engine/access/rest/websockets/data_providers/factory.go new file mode 100644 index 00000000000..e612549611a --- /dev/null +++ b/engine/access/rest/websockets/data_providers/factory.go @@ -0,0 +1,113 @@ +package data_providers + +import ( + "context" + "fmt" + + "github.com/rs/zerolog" + + "github.com/onflow/flow-go/access" + commonmodels "github.com/onflow/flow-go/engine/access/rest/common/models" + wsmodels "github.com/onflow/flow-go/engine/access/rest/websockets/models" + "github.com/onflow/flow-go/engine/access/state_stream" + "github.com/onflow/flow-go/model/flow" +) + +// Constants defining various topic names used to specify different types of +// data providers. +const ( + EventsTopic = "events" + AccountStatusesTopic = "account_statuses" + BlocksTopic = "blocks" + BlockHeadersTopic = "block_headers" + BlockDigestsTopic = "block_digests" + TransactionStatusesTopic = "transaction_statuses" + SendAndGetTransactionStatusesTopic = "send_and_get_transaction_statuses" +) + +// DataProviderFactory defines an interface for creating data providers +// based on specified topics. The factory abstracts the creation process +// and ensures consistent access to required APIs. +type DataProviderFactory interface { + // NewDataProvider creates a new data provider based on the specified topic + // and configuration parameters. + // + // No errors are expected during normal operations. + NewDataProvider(ctx context.Context, subID string, topic string, args wsmodels.Arguments, stream chan<- interface{}) (DataProvider, error) +} + +var _ DataProviderFactory = (*DataProviderFactoryImpl)(nil) + +// DataProviderFactoryImpl is an implementation of the DataProviderFactory interface. +// It is responsible for creating data providers based on the +// requested topic. It manages access to logging and relevant APIs needed to retrieve data. +type DataProviderFactoryImpl struct { + logger zerolog.Logger + + stateStreamApi state_stream.API + accessApi access.API + + chain flow.Chain + eventFilterConfig state_stream.EventFilterConfig + heartbeatInterval uint64 + + linkGenerator commonmodels.LinkGenerator +} + +// NewDataProviderFactory creates a new DataProviderFactory +// +// Parameters: +// - logger: Used for logging within the data providers. +// - eventFilterConfig: Configuration for filtering events from state streams. +// - stateStreamApi: API for accessing data from the Flow state stream API. +// - accessApi: API for accessing data from the Flow Access API. +func NewDataProviderFactory( + logger zerolog.Logger, + stateStreamApi state_stream.API, + accessApi access.API, + chain flow.Chain, + eventFilterConfig state_stream.EventFilterConfig, + heartbeatInterval uint64, + linkGenerator commonmodels.LinkGenerator, +) *DataProviderFactoryImpl { + return &DataProviderFactoryImpl{ + logger: logger, + stateStreamApi: stateStreamApi, + accessApi: accessApi, + chain: chain, + eventFilterConfig: eventFilterConfig, + heartbeatInterval: heartbeatInterval, + linkGenerator: linkGenerator, + } +} + +// NewDataProvider creates a new data provider based on the specified topic +// and configuration parameters. +// +// Parameters: +// - ctx: Context for managing request lifetime and cancellation. +// - topic: The topic for which a data provider is to be created. +// - arguments: Configuration arguments for the data provider. +// - ch: Channel to which the data provider sends data. +// +// No errors are expected during normal operations. +func (s *DataProviderFactoryImpl) NewDataProvider(ctx context.Context, subscriptionID string, topic string, arguments wsmodels.Arguments, ch chan<- interface{}) (DataProvider, error) { + switch topic { + case BlocksTopic: + return NewBlocksDataProvider(ctx, s.logger, s.accessApi, subscriptionID, s.linkGenerator, topic, arguments, ch) + case BlockHeadersTopic: + return NewBlockHeadersDataProvider(ctx, s.logger, s.accessApi, subscriptionID, topic, arguments, ch) + case BlockDigestsTopic: + return NewBlockDigestsDataProvider(ctx, s.logger, s.accessApi, subscriptionID, topic, arguments, ch) + case EventsTopic: + return NewEventsDataProvider(ctx, s.logger, s.stateStreamApi, subscriptionID, topic, arguments, ch, s.chain, s.eventFilterConfig, s.heartbeatInterval) + case AccountStatusesTopic: + return NewAccountStatusesDataProvider(ctx, s.logger, s.stateStreamApi, subscriptionID, topic, arguments, ch, s.chain, s.eventFilterConfig, s.heartbeatInterval) + case TransactionStatusesTopic: + return NewTransactionStatusesDataProvider(ctx, s.logger, s.accessApi, subscriptionID, s.linkGenerator, topic, arguments, ch) + case SendAndGetTransactionStatusesTopic: + return NewSendAndGetTransactionStatusesDataProvider(ctx, s.logger, s.accessApi, subscriptionID, s.linkGenerator, topic, arguments, ch, s.chain) + default: + return nil, fmt.Errorf("unsupported topic \"%s\"", topic) + } +} diff --git a/engine/access/rest/websockets/data_providers/factory_test.go b/engine/access/rest/websockets/data_providers/factory_test.go new file mode 100644 index 00000000000..dd5ed485179 --- /dev/null +++ b/engine/access/rest/websockets/data_providers/factory_test.go @@ -0,0 +1,190 @@ +package data_providers + +import ( + "context" + "fmt" + "testing" + + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/suite" + + accessmock "github.com/onflow/flow-go/access/mock" + "github.com/onflow/flow-go/engine/access/rest/common/parser" + wsmodels "github.com/onflow/flow-go/engine/access/rest/websockets/models" + "github.com/onflow/flow-go/engine/access/state_stream" + ssmock "github.com/onflow/flow-go/engine/access/state_stream/mock" + "github.com/onflow/flow-go/engine/access/subscription" + submock "github.com/onflow/flow-go/engine/access/subscription/mock" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/utils/unittest" +) + +// DataProviderFactorySuite is a test suite for testing the DataProviderFactory functionality. +type DataProviderFactorySuite struct { + suite.Suite + + ctx context.Context + ch chan interface{} + + accessApi *accessmock.API + stateStreamApi *ssmock.API + + factory *DataProviderFactoryImpl +} + +func TestDataProviderFactorySuite(t *testing.T) { + suite.Run(t, new(DataProviderFactorySuite)) +} + +// SetupTest sets up the initial context and dependencies for each test case. +// It initializes the factory with mock instances and validates that it is created successfully. +func (s *DataProviderFactorySuite) SetupTest() { + log := unittest.Logger() + s.stateStreamApi = ssmock.NewAPI(s.T()) + s.accessApi = accessmock.NewAPI(s.T()) + + s.ctx = context.Background() + s.ch = make(chan interface{}) + + s.factory = NewDataProviderFactory( + log, + s.stateStreamApi, + s.accessApi, + flow.Testnet.Chain(), + state_stream.DefaultEventFilterConfig, + subscription.DefaultHeartbeatInterval, + nil, + ) + s.Require().NotNil(s.factory) +} + +// setupSubscription creates a mock subscription instance for testing purposes. +// It configures the return value of the specified API call to the mock subscription. +func (s *DataProviderFactorySuite) setupSubscription(apiCall *mock.Call) { + sub := submock.NewSubscription(s.T()) + apiCall.Return(sub).Once() +} + +// TestSupportedTopics verifies that supported topics return a valid provider and no errors. +// Each test case includes a topic and arguments for which a data provider should be created. +func (s *DataProviderFactorySuite) TestSupportedTopics() { + // Define supported topics and check if each returns the correct provider without errors + tx := unittest.TransactionBodyFixture() + tx.PayloadSignatures = []flow.TransactionSignature{unittest.TransactionSignatureFixture()} + tx.Arguments = [][]uint8{} + + testCases := []struct { + name string + topic string + arguments wsmodels.Arguments + setupSubscription func() + assertExpectations func() + }{ + { + name: "block topic", + topic: BlocksTopic, + arguments: wsmodels.Arguments{"block_status": parser.Finalized}, + setupSubscription: func() {}, + assertExpectations: func() { + s.accessApi.AssertExpectations(s.T()) + }, + }, + { + name: "block headers topic", + topic: BlockHeadersTopic, + arguments: wsmodels.Arguments{"block_status": parser.Finalized}, + setupSubscription: func() {}, + assertExpectations: func() { + s.accessApi.AssertExpectations(s.T()) + }, + }, + { + name: "block digests topic", + topic: BlockDigestsTopic, + arguments: wsmodels.Arguments{"block_status": parser.Finalized}, + setupSubscription: func() {}, + assertExpectations: func() { + s.accessApi.AssertExpectations(s.T()) + }, + }, + { + name: "events topic", + topic: EventsTopic, + arguments: wsmodels.Arguments{ + "event_types": []string{state_stream.CoreEventAccountCreated}, + "addresses": []string{unittest.AddressFixture().String()}, + "contracts": []string{"A.0000000000000001.Contract1", "A.0000000000000001.Contract2"}, + }, + setupSubscription: func() {}, + assertExpectations: func() { + s.stateStreamApi.AssertExpectations(s.T()) + }, + }, + { + name: "account statuses topic", + topic: AccountStatusesTopic, + arguments: wsmodels.Arguments{ + "event_types": []string{state_stream.CoreEventAccountCreated}, + "account_addresses": []string{unittest.AddressFixture().String()}, + }, + setupSubscription: func() {}, + assertExpectations: func() { + s.stateStreamApi.AssertExpectations(s.T()) + }, + }, + { + name: "transaction statuses topic", + topic: TransactionStatusesTopic, + arguments: wsmodels.Arguments{ + "tx_id": unittest.IdentifierFixture().String(), + }, + setupSubscription: func() {}, + assertExpectations: func() { + s.stateStreamApi.AssertExpectations(s.T()) + }, + }, + { + name: "send transaction statuses topic", + topic: SendAndGetTransactionStatusesTopic, + arguments: wsmodels.Arguments(unittest.CreateSendTxHttpPayload(tx)), + setupSubscription: func() {}, + assertExpectations: func() { + s.stateStreamApi.AssertExpectations(s.T()) + }, + }, + } + + for _, test := range testCases { + s.Run(test.name, func() { + s.T().Parallel() + test.setupSubscription() + + provider, err := s.factory.NewDataProvider(context.Background(), "dummy-id", test.topic, test.arguments, s.ch) + s.Require().NoError(err, "Expected no error for topic %s", test.topic) + s.Require().NotNil(provider, "Expected provider for topic %s", test.topic) + s.Require().Equal(test.topic, provider.Topic()) + s.Require().Equal(test.arguments, provider.Arguments()) + + test.assertExpectations() + }) + } +} + +// TestUnsupportedTopics verifies that unsupported topics do not return a provider +// and instead return an error indicating the topic is unsupported. +func (s *DataProviderFactorySuite) TestUnsupportedTopics() { + s.T().Parallel() + + // Define unsupported topics + unsupportedTopics := []string{ + "unknown_topic", + "", + } + + for _, topic := range unsupportedTopics { + provider, err := s.factory.NewDataProvider(context.Background(), "dummy-id", topic, nil, s.ch) + s.Require().Error(err, "Expected error for unsupported topic %s", topic) + s.Require().Nil(provider, "Expected no provider for unsupported topic %s", topic) + s.Require().EqualError(err, fmt.Sprintf("unsupported topic \"%s\"", topic)) + } +} diff --git a/engine/access/rest/websockets/data_providers/mock/data_provider.go b/engine/access/rest/websockets/data_providers/mock/data_provider.go new file mode 100644 index 00000000000..5e6ef7846ae --- /dev/null +++ b/engine/access/rest/websockets/data_providers/mock/data_provider.go @@ -0,0 +1,106 @@ +// Code generated by mockery. DO NOT EDIT. + +package mock + +import ( + models "github.com/onflow/flow-go/engine/access/rest/websockets/models" + mock "github.com/stretchr/testify/mock" +) + +// DataProvider is an autogenerated mock type for the DataProvider type +type DataProvider struct { + mock.Mock +} + +// Arguments provides a mock function with no fields +func (_m *DataProvider) Arguments() models.Arguments { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Arguments") + } + + var r0 models.Arguments + if rf, ok := ret.Get(0).(func() models.Arguments); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(models.Arguments) + } + } + + return r0 +} + +// Close provides a mock function with no fields +func (_m *DataProvider) Close() { + _m.Called() +} + +// ID provides a mock function with no fields +func (_m *DataProvider) ID() string { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for ID") + } + + var r0 string + if rf, ok := ret.Get(0).(func() string); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(string) + } + + return r0 +} + +// Run provides a mock function with no fields +func (_m *DataProvider) Run() error { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Run") + } + + var r0 error + if rf, ok := ret.Get(0).(func() error); ok { + r0 = rf() + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// Topic provides a mock function with no fields +func (_m *DataProvider) Topic() string { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Topic") + } + + var r0 string + if rf, ok := ret.Get(0).(func() string); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(string) + } + + return r0 +} + +// NewDataProvider creates a new instance of DataProvider. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewDataProvider(t interface { + mock.TestingT + Cleanup(func()) +}) *DataProvider { + mock := &DataProvider{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/engine/access/rest/websockets/data_providers/mock/data_provider_factory.go b/engine/access/rest/websockets/data_providers/mock/data_provider_factory.go new file mode 100644 index 00000000000..61be02fc1b0 --- /dev/null +++ b/engine/access/rest/websockets/data_providers/mock/data_provider_factory.go @@ -0,0 +1,61 @@ +// Code generated by mockery. DO NOT EDIT. + +package mock + +import ( + context "context" + + data_providers "github.com/onflow/flow-go/engine/access/rest/websockets/data_providers" + mock "github.com/stretchr/testify/mock" + + models "github.com/onflow/flow-go/engine/access/rest/websockets/models" +) + +// DataProviderFactory is an autogenerated mock type for the DataProviderFactory type +type DataProviderFactory struct { + mock.Mock +} + +// NewDataProvider provides a mock function with given fields: ctx, subID, topic, args, stream +func (_m *DataProviderFactory) NewDataProvider(ctx context.Context, subID string, topic string, args models.Arguments, stream chan<- interface{}) (data_providers.DataProvider, error) { + ret := _m.Called(ctx, subID, topic, args, stream) + + if len(ret) == 0 { + panic("no return value specified for NewDataProvider") + } + + var r0 data_providers.DataProvider + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, string, string, models.Arguments, chan<- interface{}) (data_providers.DataProvider, error)); ok { + return rf(ctx, subID, topic, args, stream) + } + if rf, ok := ret.Get(0).(func(context.Context, string, string, models.Arguments, chan<- interface{}) data_providers.DataProvider); ok { + r0 = rf(ctx, subID, topic, args, stream) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(data_providers.DataProvider) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, string, string, models.Arguments, chan<- interface{}) error); ok { + r1 = rf(ctx, subID, topic, args, stream) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// NewDataProviderFactory creates a new instance of DataProviderFactory. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewDataProviderFactory(t interface { + mock.TestingT + Cleanup(func()) +}) *DataProviderFactory { + mock := &DataProviderFactory{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/engine/access/rest/websockets/data_providers/models/account_events.go b/engine/access/rest/websockets/data_providers/models/account_events.go new file mode 100644 index 00000000000..d8fec6c9cce --- /dev/null +++ b/engine/access/rest/websockets/data_providers/models/account_events.go @@ -0,0 +1,22 @@ +package models + +import ( + "github.com/onflow/flow-go/engine/access/rest/common/models" + "github.com/onflow/flow-go/model/flow" +) + +// AccountEvents represents a mapping of account addresses to their associated events. +type AccountEvents map[string]models.Events + +// NewAccountEvents creates account events by converting each flow.EventsList to the corresponding models.Events. +func NewAccountEvents(accountEvents map[string]flow.EventsList) AccountEvents { + result := make(map[string]models.Events, len(accountEvents)) + + for i, e := range accountEvents { + var events models.Events + events.Build(e) + result[i] = events + } + + return result +} diff --git a/engine/access/rest/websockets/data_providers/models/account_statuses.go b/engine/access/rest/websockets/data_providers/models/account_statuses.go new file mode 100644 index 00000000000..b3373e8fc5f --- /dev/null +++ b/engine/access/rest/websockets/data_providers/models/account_statuses.go @@ -0,0 +1,26 @@ +package models + +import ( + "strconv" + + "github.com/onflow/flow-go/engine/access/state_stream/backend" +) + +// AccountStatusesResponse is the response message for 'events' topic. +type AccountStatusesResponse struct { + BlockID string `json:"block_id"` + Height string `json:"height"` + AccountEvents AccountEvents `json:"account_events"` + MessageIndex uint64 `json:"message_index"` +} + +func NewAccountStatusesResponse(accountStatusesResponse *backend.AccountStatusesResponse, index uint64) *AccountStatusesResponse { + accountEvents := NewAccountEvents(accountStatusesResponse.AccountEvents) + + return &AccountStatusesResponse{ + BlockID: accountStatusesResponse.BlockID.String(), + Height: strconv.FormatUint(accountStatusesResponse.Height, 10), + AccountEvents: accountEvents, + MessageIndex: index, + } +} diff --git a/engine/access/rest/websockets/data_providers/models/base_data_provider.go b/engine/access/rest/websockets/data_providers/models/base_data_provider.go new file mode 100644 index 00000000000..31fd72a7380 --- /dev/null +++ b/engine/access/rest/websockets/data_providers/models/base_data_provider.go @@ -0,0 +1,8 @@ +package models + +// BaseDataProvidersResponse represents a base structure for responses from subscriptions. +type BaseDataProvidersResponse struct { + SubscriptionID string `json:"subscription_id"` // Unique subscriptionID + Topic string `json:"topic"` // Topic of the subscription + Payload interface{} `json:"payload"` // Payload that's being returned within a subscription. +} diff --git a/engine/access/rest/websockets/data_providers/models/block.go b/engine/access/rest/websockets/data_providers/models/block.go new file mode 100644 index 00000000000..76ea0697962 --- /dev/null +++ b/engine/access/rest/websockets/data_providers/models/block.go @@ -0,0 +1,26 @@ +package models + +import ( + "github.com/onflow/flow-go/engine/access/rest/common/models" +) + +// BlockMessageResponse is the response message for 'blocks' topic. +type BlockMessageResponse struct { + // The sealed or finalized blocks according to the block status + // in the request. + Block *models.Block `json:"block"` +} + +// BlockHeaderMessageResponse is the response message for 'block_headers' topic. +type BlockHeaderMessageResponse struct { + // The sealed or finalized block headers according to the block status + // in the request. + Header *models.BlockHeader `json:"header"` +} + +// BlockDigestMessageResponse is the response message for 'block_digests' topic. +type BlockDigestMessageResponse struct { + // The sealed or finalized block digest according to the block status + // in the request. + Block *BlockDigest `json:"block_digest"` +} diff --git a/engine/access/rest/websockets/data_providers/models/block_digest.go b/engine/access/rest/websockets/data_providers/models/block_digest.go new file mode 100644 index 00000000000..27448857b5c --- /dev/null +++ b/engine/access/rest/websockets/data_providers/models/block_digest.go @@ -0,0 +1,24 @@ +package models + +import ( + "time" + + "github.com/onflow/flow-go/engine/access/rest/util" + "github.com/onflow/flow-go/model/flow" +) + +// BlockDigest is a lightweight block information model. +type BlockDigest struct { + BlockId string `json:"block_id"` + Height string `json:"height"` + Timestamp time.Time `json:"timestamp"` +} + +// NewBlockDigest creates a block digest instance with data from the provided flow.BlockDigest. +func NewBlockDigest(block *flow.BlockDigest) *BlockDigest { + return &BlockDigest{ + BlockId: block.BlockID.String(), + Height: util.FromUint(block.Height), + Timestamp: block.Timestamp, + } +} diff --git a/engine/access/rest/websockets/data_providers/models/event.go b/engine/access/rest/websockets/data_providers/models/event.go new file mode 100644 index 00000000000..c3db39bb559 --- /dev/null +++ b/engine/access/rest/websockets/data_providers/models/event.go @@ -0,0 +1,31 @@ +package models + +import ( + "strconv" + + "github.com/onflow/flow-go/engine/access/rest/common/models" + commonmodels "github.com/onflow/flow-go/engine/access/rest/common/models" + "github.com/onflow/flow-go/engine/access/state_stream/backend" +) + +// EventResponse is the response message for 'events' topic. +type EventResponse struct { + models.BlockEvents // Embed BlockEvents struct to reuse its fields + MessageIndex uint64 `json:"message_index"` +} + +// NewEventResponse creates EventResponse instance. +func NewEventResponse(eventsResponse *backend.EventsResponse, index uint64) *EventResponse { + var events commonmodels.Events + events.Build(eventsResponse.Events) + + return &EventResponse{ + BlockEvents: commonmodels.BlockEvents{ + BlockId: eventsResponse.BlockID.String(), + BlockHeight: strconv.FormatUint(eventsResponse.Height, 10), + BlockTimestamp: eventsResponse.BlockTimestamp, + Events: events, + }, + MessageIndex: index, + } +} diff --git a/engine/access/rest/websockets/data_providers/models/transaction_statuses.go b/engine/access/rest/websockets/data_providers/models/transaction_statuses.go new file mode 100644 index 00000000000..19d2fad8769 --- /dev/null +++ b/engine/access/rest/websockets/data_providers/models/transaction_statuses.go @@ -0,0 +1,28 @@ +package models + +import ( + commonmodels "github.com/onflow/flow-go/engine/access/rest/common/models" + accessmodel "github.com/onflow/flow-go/model/access" +) + +// TransactionStatusesResponse is the response message for 'events' topic. +type TransactionStatusesResponse struct { + TransactionResult *commonmodels.TransactionResult `json:"transaction_result"` + MessageIndex uint64 `json:"message_index"` +} + +// NewTransactionStatusesResponse creates a TransactionStatusesResponse instance. +func NewTransactionStatusesResponse( + linkGenerator commonmodels.LinkGenerator, + txResult *accessmodel.TransactionResult, + index uint64, +) *TransactionStatusesResponse { + var transactionResult commonmodels.TransactionResult + txID := txResult.TransactionID + transactionResult.Build(txResult, txID, linkGenerator) + + return &TransactionStatusesResponse{ + TransactionResult: &transactionResult, + MessageIndex: index, + } +} diff --git a/engine/access/rest/websockets/data_providers/send_and_get_transaction_statuses_provider.go b/engine/access/rest/websockets/data_providers/send_and_get_transaction_statuses_provider.go new file mode 100644 index 00000000000..9ae1839c01c --- /dev/null +++ b/engine/access/rest/websockets/data_providers/send_and_get_transaction_statuses_provider.go @@ -0,0 +1,135 @@ +package data_providers + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + + "github.com/rs/zerolog" + + "github.com/onflow/flow-go/access" + commonmodels "github.com/onflow/flow-go/engine/access/rest/common/models" + commonparser "github.com/onflow/flow-go/engine/access/rest/common/parser" + "github.com/onflow/flow-go/engine/access/rest/websockets/data_providers/models" + wsmodels "github.com/onflow/flow-go/engine/access/rest/websockets/models" + "github.com/onflow/flow-go/engine/access/subscription" + accessmodel "github.com/onflow/flow-go/model/access" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module/counters" + + "github.com/onflow/flow/protobuf/go/flow/entities" +) + +// sendAndGetTransactionStatusesArguments contains the arguments required for sending tx and subscribing to transaction statuses +type sendAndGetTransactionStatusesArguments struct { + Transaction flow.TransactionBody // The transaction body to be sent and monitored. +} + +type SendAndGetTransactionStatusesDataProvider struct { + *baseDataProvider + + arguments sendAndGetTransactionStatusesArguments + messageIndex counters.StrictMonotonicCounter + linkGenerator commonmodels.LinkGenerator +} + +var _ DataProvider = (*SendAndGetTransactionStatusesDataProvider)(nil) + +func NewSendAndGetTransactionStatusesDataProvider( + ctx context.Context, + logger zerolog.Logger, + api access.API, + subscriptionID string, + linkGenerator commonmodels.LinkGenerator, + topic string, + rawArguments wsmodels.Arguments, + send chan<- interface{}, + chain flow.Chain, +) (*SendAndGetTransactionStatusesDataProvider, error) { + args, err := parseSendAndGetTransactionStatusesArguments(rawArguments, chain) + if err != nil { + return nil, fmt.Errorf("invalid arguments for send tx statuses data provider: %w", err) + } + + provider := newBaseDataProvider( + ctx, + logger.With().Str("component", "send-transaction-statuses-data-provider").Logger(), + api, + subscriptionID, + topic, + rawArguments, + send, + ) + + return &SendAndGetTransactionStatusesDataProvider{ + baseDataProvider: provider, + arguments: args, + messageIndex: counters.NewMonotonicCounter(0), + linkGenerator: linkGenerator, + }, nil +} + +// Run starts processing the subscription for events and handles responses. +// Must be called once. +// +// No errors are expected during normal operations +func (p *SendAndGetTransactionStatusesDataProvider) Run() error { + return run( + p.createAndStartSubscription(p.ctx, p.arguments), + p.sendResponse, + ) +} + +// sendResponse processes a tx status message and sends it to client's channel. +// This function is not safe to call concurrently. +// +// No errors are expected during normal operations. +func (p *SendAndGetTransactionStatusesDataProvider) sendResponse(txResults []*accessmodel.TransactionResult) error { + for i := range txResults { + txStatusesPayload := models.NewTransactionStatusesResponse(p.linkGenerator, txResults[i], p.messageIndex.Value()) + response := models.BaseDataProvidersResponse{ + SubscriptionID: p.ID(), + Topic: p.Topic(), + Payload: txStatusesPayload, + } + p.send <- &response + + p.messageIndex.Increment() + } + + return nil +} + +// createAndStartSubscription creates a new subscription using the specified input arguments. +func (p *SendAndGetTransactionStatusesDataProvider) createAndStartSubscription( + ctx context.Context, + args sendAndGetTransactionStatusesArguments, +) subscription.Subscription { + return p.api.SendAndSubscribeTransactionStatuses(ctx, &args.Transaction, entities.EventEncodingVersion_JSON_CDC_V0) +} + +// parseSendAndGetTransactionStatusesArguments validates and initializes the account statuses arguments. +func parseSendAndGetTransactionStatusesArguments( + arguments wsmodels.Arguments, + chain flow.Chain, +) (sendAndGetTransactionStatusesArguments, error) { + var args sendAndGetTransactionStatusesArguments + + // Convert the arguments map to JSON + rawJSON, err := json.Marshal(arguments) + if err != nil { + return sendAndGetTransactionStatusesArguments{}, fmt.Errorf("failed to marshal arguments: %w", err) + } + + // Create an io.Reader from the JSON bytes + var tx commonparser.Transaction + rawReader := bytes.NewReader(rawJSON) + err = tx.Parse(rawReader, chain) + if err != nil { + return sendAndGetTransactionStatusesArguments{}, fmt.Errorf("failed to parse transaction: %w", err) + } + + args.Transaction = tx.Flow() + return args, nil +} diff --git a/engine/access/rest/websockets/data_providers/send_and_get_transaction_statuses_provider_test.go b/engine/access/rest/websockets/data_providers/send_and_get_transaction_statuses_provider_test.go new file mode 100644 index 00000000000..3e17f49bed3 --- /dev/null +++ b/engine/access/rest/websockets/data_providers/send_and_get_transaction_statuses_provider_test.go @@ -0,0 +1,240 @@ +package data_providers + +import ( + "context" + "testing" + + "github.com/rs/zerolog" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" + + accessmock "github.com/onflow/flow-go/access/mock" + mockcommonmodels "github.com/onflow/flow-go/engine/access/rest/common/models/mock" + "github.com/onflow/flow-go/engine/access/rest/websockets/data_providers/models" + "github.com/onflow/flow-go/engine/access/state_stream" + "github.com/onflow/flow-go/engine/access/subscription" + submock "github.com/onflow/flow-go/engine/access/subscription/mock" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/utils/unittest" + + "github.com/onflow/flow/protobuf/go/flow/entities" +) + +type SendTransactionStatusesProviderSuite struct { + suite.Suite + + log zerolog.Logger + api *accessmock.API + + chain flow.Chain + rootBlock *flow.Block + finalizedBlock *flow.Header + + factory *DataProviderFactoryImpl + linkGenerator *mockcommonmodels.LinkGenerator +} + +func TestNewSendTransactionStatusesDataProvider(t *testing.T) { + suite.Run(t, new(SendTransactionStatusesProviderSuite)) +} + +func (s *SendTransactionStatusesProviderSuite) SetupTest() { + s.log = unittest.Logger() + s.api = accessmock.NewAPI(s.T()) + s.linkGenerator = mockcommonmodels.NewLinkGenerator(s.T()) + + s.chain = flow.Testnet.Chain() + + s.rootBlock = unittest.BlockFixture( + unittest.Block.WithHeight(0), + ) + + s.factory = NewDataProviderFactory( + s.log, + nil, + s.api, + s.chain, + state_stream.DefaultEventFilterConfig, + subscription.DefaultHeartbeatInterval, + s.linkGenerator, + ) + s.Require().NotNil(s.factory) +} + +// TestSendTransactionStatusesDataProvider_HappyPath tests the behavior of the send transaction statuses data provider +// when it is configured correctly and operating under normal conditions. It +// validates that tx statuses are correctly streamed to the channel and ensures +// no unexpected errors occur. +func (s *TransactionStatusesProviderSuite) TestSendTransactionStatusesDataProvider_HappyPath() { + tx := unittest.TransactionBodyFixture() + tx.PayloadSignatures = []flow.TransactionSignature{unittest.TransactionSignatureFixture()} + tx.Arguments = [][]uint8{} + + s.linkGenerator.On("TransactionResultLink", mock.AnythingOfType("flow.Identifier")).Return( + func(id flow.Identifier) (string, error) { + return "some_link", nil + }, + ) + + backendResponse := backendTransactionStatusesResponse(s.rootBlock) + expectedResponse := s.expectedTransactionStatusesResponses(backendResponse, SendAndGetTransactionStatusesTopic) + + sendTxStatutesTestCases := []testType{ + { + name: "SubscribeTransactionStatusesFromStartBlockID happy path", + arguments: unittest.CreateSendTxHttpPayload(tx), + setupBackend: func(sub *submock.Subscription) { + s.api.On( + "SendAndSubscribeTransactionStatuses", + mock.Anything, + mock.Anything, + entities.EventEncodingVersion_JSON_CDC_V0, + ).Return(sub).Once() + }, + expectedResponses: expectedResponse, + }, + } + + testHappyPath( + s.T(), + SendAndGetTransactionStatusesTopic, + s.factory, + sendTxStatutesTestCases, + func(dataChan chan interface{}) { + dataChan <- backendResponse + }, + s.requireTransactionStatuses, + ) +} + +// requireTransactionStatuses ensures that the received transaction statuses information matches the expected data. +func (s *SendTransactionStatusesProviderSuite) requireTransactionStatuses( + actual interface{}, + expected interface{}, +) { + expectedResponse, expectedResponsePayload := extractPayload[*models.TransactionStatusesResponse](s.T(), expected) + actualResponse, actualResponsePayload := extractPayload[*models.TransactionStatusesResponse](s.T(), actual) + + require.Equal(s.T(), expectedResponse.Topic, actualResponse.Topic) + require.Equal(s.T(), expectedResponsePayload.TransactionResult.BlockId, actualResponsePayload.TransactionResult.BlockId) +} + +// TestSendTransactionStatusesDataProvider_InvalidArguments tests the behavior of the send transaction statuses data provider +// when invalid arguments are provided. It verifies that appropriate errors are returned +// for missing or conflicting arguments. +func (s *SendTransactionStatusesProviderSuite) TestSendTransactionStatusesDataProvider_InvalidArguments() { + send := make(chan interface{}) + topic := SendAndGetTransactionStatusesTopic + + for _, test := range invalidSendTransactionStatusesArgumentsTestCases() { + s.Run(test.name, func() { + provider, err := NewSendAndGetTransactionStatusesDataProvider( + context.Background(), + s.log, + s.api, + "dummy-id", + s.linkGenerator, + topic, + test.arguments, + send, + s.chain, + ) + s.Require().Error(err) + s.Require().Contains(err.Error(), test.expectedErrorMsg) + s.Require().Nil(provider) + }) + } +} + +// invalidSendTransactionStatusesArgumentsTestCases returns a list of test cases with invalid argument combinations +// for testing the behavior of send transaction statuses data providers. Each test case includes a name, +// a set of input arguments, and the expected error message that should be returned. +func invalidSendTransactionStatusesArgumentsTestCases() []testErrType { + return []testErrType{ + { + name: "invalid 'script' argument type", + arguments: map[string]interface{}{ + "script": 0, + }, + expectedErrorMsg: "failed to parse transaction", + }, + { + name: "invalid 'script' argument", + arguments: map[string]interface{}{ + "script": "invalid_script", + }, + expectedErrorMsg: "failed to parse transaction", + }, + { + name: "invalid 'arguments' type", + arguments: map[string]interface{}{ + "arguments": 0, + }, + expectedErrorMsg: "failed to parse transaction", + }, + { + name: "invalid 'arguments' argument", + arguments: map[string]interface{}{ + "arguments": []string{"invalid_base64_1", "invalid_base64_2"}, + }, + expectedErrorMsg: "failed to parse transaction", + }, + { + name: "invalid 'reference_block_id' argument", + arguments: map[string]interface{}{ + "reference_block_id": "invalid_reference_block_id", + }, + expectedErrorMsg: "failed to parse transaction", + }, + { + name: "invalid 'gas_limit' argument", + arguments: map[string]interface{}{ + "gas_limit": "-1", + }, + expectedErrorMsg: "failed to parse transaction", + }, + { + name: "invalid 'payer' argument", + arguments: map[string]interface{}{ + "payer": "invalid_payer", + }, + expectedErrorMsg: "failed to parse transaction", + }, + { + name: "invalid 'proposal_key' argument", + arguments: map[string]interface{}{ + "proposal_key": "invalid ProposalKey object", + }, + expectedErrorMsg: "failed to parse transaction", + }, + { + name: "invalid 'authorizers' argument", + arguments: map[string]interface{}{ + "authorizers": []string{"invalid_base64_1", "invalid_base64_2"}, + }, + expectedErrorMsg: "failed to parse transaction", + }, + { + name: "invalid 'payload_signatures' argument", + arguments: map[string]interface{}{ + "payload_signatures": "invalid TransactionSignature array", + }, + expectedErrorMsg: "failed to parse transaction", + }, + { + name: "invalid 'envelope_signatures' argument", + arguments: map[string]interface{}{ + "envelope_signatures": "invalid TransactionSignature array", + }, + expectedErrorMsg: "failed to parse transaction", + }, + { + name: "unexpected argument", + arguments: map[string]interface{}{ + "unexpected_argument": "dummy", + }, + expectedErrorMsg: "request body contains unknown field", + }, + } +} diff --git a/engine/access/rest/websockets/data_providers/transaction_statuses_provider.go b/engine/access/rest/websockets/data_providers/transaction_statuses_provider.go new file mode 100644 index 00000000000..81770055fe6 --- /dev/null +++ b/engine/access/rest/websockets/data_providers/transaction_statuses_provider.go @@ -0,0 +1,147 @@ +package data_providers + +import ( + "context" + "fmt" + + "github.com/rs/zerolog" + + "github.com/onflow/flow-go/access" + commonmodels "github.com/onflow/flow-go/engine/access/rest/common/models" + "github.com/onflow/flow-go/engine/access/rest/common/parser" + "github.com/onflow/flow-go/engine/access/rest/websockets/data_providers/models" + wsmodels "github.com/onflow/flow-go/engine/access/rest/websockets/models" + "github.com/onflow/flow-go/engine/access/subscription" + accessmodel "github.com/onflow/flow-go/model/access" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module/counters" + + "github.com/onflow/flow/protobuf/go/flow/entities" +) + +// transactionStatusesArguments contains the arguments required for subscribing to transaction statuses +type transactionStatusesArguments struct { + TxID flow.Identifier `json:"tx_id"` // ID of the transaction to monitor. +} + +// TransactionStatusesDataProvider is responsible for providing tx statuses +type TransactionStatusesDataProvider struct { + *baseDataProvider + + arguments transactionStatusesArguments + messageIndex counters.StrictMonotonicCounter + linkGenerator commonmodels.LinkGenerator +} + +var _ DataProvider = (*TransactionStatusesDataProvider)(nil) + +func NewTransactionStatusesDataProvider( + ctx context.Context, + logger zerolog.Logger, + api access.API, + subscriptionID string, + linkGenerator commonmodels.LinkGenerator, + topic string, + rawArguments wsmodels.Arguments, + send chan<- interface{}, +) (*TransactionStatusesDataProvider, error) { + args, err := parseTransactionStatusesArguments(rawArguments) + if err != nil { + return nil, fmt.Errorf("invalid arguments for tx statuses data provider: %w", err) + } + provider := newBaseDataProvider( + ctx, + logger.With().Str("component", "transaction-statuses-data-provider").Logger(), + api, + subscriptionID, + topic, + rawArguments, + send, + ) + + return &TransactionStatusesDataProvider{ + baseDataProvider: provider, + arguments: args, + messageIndex: counters.NewMonotonicCounter(0), + linkGenerator: linkGenerator, + }, nil +} + +// Run starts processing the subscription for events and handles responses. +// Must be called once. +// +// No errors are expected during normal operations +func (p *TransactionStatusesDataProvider) Run() error { + return run( + p.createAndStartSubscription(p.ctx, p.arguments), + p.sendResponse, + ) +} + +// sendResponse processes a tx status message and sends it to client's channel. +// This function is not safe to call concurrently. +// +// No errors are expected during normal operations. +func (p *TransactionStatusesDataProvider) sendResponse(txResults []*accessmodel.TransactionResult) error { + for i := range txResults { + txStatusesPayload := models.NewTransactionStatusesResponse(p.linkGenerator, txResults[i], p.messageIndex.Value()) + response := models.BaseDataProvidersResponse{ + SubscriptionID: p.ID(), + Topic: p.Topic(), + Payload: txStatusesPayload, + } + p.send <- &response + + p.messageIndex.Increment() + } + + return nil +} + +// createAndStartSubscription creates a new subscription using the specified input arguments. +func (p *TransactionStatusesDataProvider) createAndStartSubscription( + ctx context.Context, + args transactionStatusesArguments, +) subscription.Subscription { + return p.api.SubscribeTransactionStatuses(ctx, args.TxID, entities.EventEncodingVersion_JSON_CDC_V0) +} + +// parseAccountStatusesArguments validates and initializes the account statuses arguments. +func parseTransactionStatusesArguments( + arguments wsmodels.Arguments, +) (transactionStatusesArguments, error) { + allowedFields := map[string]struct{}{ + "tx_id": {}, + } + err := ensureAllowedFields(arguments, allowedFields) + if err != nil { + return transactionStatusesArguments{}, err + } + + var args transactionStatusesArguments + + // Check if tx_id exists and is not empty + rawTxID, exists := arguments["tx_id"] + if !exists { + return transactionStatusesArguments{}, fmt.Errorf("missing 'tx_id' field") + } + + // Ensure the transaction ID is a string + txIDString, isString := rawTxID.(string) + if !isString { + return transactionStatusesArguments{}, fmt.Errorf("'tx_id' must be a string") + } + + if len(txIDString) == 0 { + return transactionStatusesArguments{}, fmt.Errorf("'tx_id' must not be empty") + } + + var parsedTxID parser.ID + if err = parsedTxID.Parse(txIDString); err != nil { + return transactionStatusesArguments{}, fmt.Errorf("invalid 'tx_id': %w", err) + } + + // Assign the validated transaction ID to the args + args.TxID = parsedTxID.Flow() + return args, nil +} diff --git a/engine/access/rest/websockets/data_providers/transaction_statuses_provider_test.go b/engine/access/rest/websockets/data_providers/transaction_statuses_provider_test.go new file mode 100644 index 00000000000..8421a28eafa --- /dev/null +++ b/engine/access/rest/websockets/data_providers/transaction_statuses_provider_test.go @@ -0,0 +1,314 @@ +package data_providers + +import ( + "context" + "testing" + "time" + + "github.com/rs/zerolog" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" + + accessmock "github.com/onflow/flow-go/access/mock" + mockcommonmodels "github.com/onflow/flow-go/engine/access/rest/common/models/mock" + "github.com/onflow/flow-go/engine/access/rest/websockets/data_providers/models" + wsmodels "github.com/onflow/flow-go/engine/access/rest/websockets/models" + "github.com/onflow/flow-go/engine/access/state_stream" + "github.com/onflow/flow-go/engine/access/subscription" + submock "github.com/onflow/flow-go/engine/access/subscription/mock" + accessmodel "github.com/onflow/flow-go/model/access" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/utils/unittest" + + "github.com/onflow/flow/protobuf/go/flow/entities" +) + +type TransactionStatusesProviderSuite struct { + suite.Suite + + log zerolog.Logger + api *accessmock.API + + chain flow.Chain + rootBlock *flow.Block + finalizedBlock *flow.Header + + factory *DataProviderFactoryImpl + linkGenerator *mockcommonmodels.LinkGenerator +} + +func TestNewTransactionStatusesDataProvider(t *testing.T) { + suite.Run(t, new(TransactionStatusesProviderSuite)) +} + +func (s *TransactionStatusesProviderSuite) SetupTest() { + s.log = unittest.Logger() + s.api = accessmock.NewAPI(s.T()) + s.linkGenerator = mockcommonmodels.NewLinkGenerator(s.T()) + s.chain = flow.Testnet.Chain() + s.rootBlock = unittest.Block.Genesis(s.chain.ChainID()) + s.factory = NewDataProviderFactory( + s.log, + nil, + s.api, + s.chain, + state_stream.DefaultEventFilterConfig, + subscription.DefaultHeartbeatInterval, + s.linkGenerator, + ) + s.Require().NotNil(s.factory) +} + +// TestTransactionStatusesDataProvider_HappyPath tests the behavior of the transaction statuses data provider +// when it is configured correctly and operating under normal conditions. It +// validates that tx statuses are correctly streamed to the channel and ensures +// no unexpected errors occur. +func (s *TransactionStatusesProviderSuite) TestTransactionStatusesDataProvider_HappyPath() { + backendResponse := backendTransactionStatusesResponse(s.rootBlock) + + s.linkGenerator.On("TransactionResultLink", mock.AnythingOfType("flow.Identifier")).Return( + func(id flow.Identifier) (string, error) { + return "some_link", nil + }, + ) + + testHappyPath( + s.T(), + TransactionStatusesTopic, + s.factory, + s.subscribeTransactionStatusesDataProviderTestCases(backendResponse), + func(dataChan chan interface{}) { + dataChan <- backendResponse + }, + s.requireTransactionStatuses, + ) +} + +func (s *TransactionStatusesProviderSuite) subscribeTransactionStatusesDataProviderTestCases(backendResponses []*accessmodel.TransactionResult) []testType { + expectedResponses := s.expectedTransactionStatusesResponses(backendResponses, TransactionStatusesTopic) + + return []testType{ + { + name: "SubscribeTransactionStatuses happy path", + arguments: wsmodels.Arguments{ + "tx_id": unittest.IdentifierFixture().String(), + }, + setupBackend: func(sub *submock.Subscription) { + s.api.On( + "SubscribeTransactionStatuses", + mock.Anything, + mock.Anything, + entities.EventEncodingVersion_JSON_CDC_V0, + ).Return(sub).Once() + }, + expectedResponses: expectedResponses, + }, + } +} + +// requireTransactionStatuses ensures that the received transaction statuses information matches the expected data. +func (s *TransactionStatusesProviderSuite) requireTransactionStatuses( + actual interface{}, + expected interface{}, +) { + expectedResponse, expectedResponsePayload := extractPayload[*models.TransactionStatusesResponse](s.T(), expected) + actualResponse, actualResponsePayload := extractPayload[*models.TransactionStatusesResponse](s.T(), actual) + + require.Equal(s.T(), expectedResponse.Topic, actualResponse.Topic) + require.Equal(s.T(), expectedResponsePayload.TransactionResult.BlockId, actualResponsePayload.TransactionResult.BlockId) +} + +func backendTransactionStatusesResponse(block *flow.Block) []*accessmodel.TransactionResult { + cid := unittest.IdentifierFixture() + txr := accessmodel.TransactionResult{ + Status: flow.TransactionStatusSealed, + StatusCode: 10, + Events: unittest.EventsFixture(1), + ErrorMessage: "", + BlockID: block.ID(), + CollectionID: cid, + BlockHeight: block.Height, + } + + var expectedTxResultsResponses []*accessmodel.TransactionResult + + for i := 0; i < 2; i++ { + expectedTxResultsResponses = append(expectedTxResultsResponses, &txr) + } + + return expectedTxResultsResponses +} + +// expectedTransactionStatusesResponses creates the expected responses for the provided backend responses. +func (s *TransactionStatusesProviderSuite) expectedTransactionStatusesResponses( + backendResponses []*accessmodel.TransactionResult, + topic string, +) []interface{} { + expectedResponses := make([]interface{}, len(backendResponses)) + + for i, resp := range backendResponses { + expectedResponsePayload := models.NewTransactionStatusesResponse(s.linkGenerator, resp, uint64(i)) + expectedResponses[i] = &models.BaseDataProvidersResponse{ + Topic: topic, + Payload: expectedResponsePayload, + } + } + + return expectedResponses +} + +// TestMessageIndexTransactionStatusesProviderResponse_HappyPath tests that MessageIndex values in response are strictly increasing. +func (s *TransactionStatusesProviderSuite) TestMessageIndexTransactionStatusesProviderResponse_HappyPath() { + send := make(chan interface{}, 10) + topic := TransactionStatusesTopic + txStatusesCount := 4 + + // Create a channel to simulate the subscription's account statuses channel + txStatusesChan := make(chan interface{}) + + // Create a mock subscription and mock the channel + sub := submock.NewSubscription(s.T()) + sub.On("Channel").Return((<-chan interface{})(txStatusesChan)) + sub.On("Err").Return(nil).Once() + + s.api.On( + "SubscribeTransactionStatuses", + mock.Anything, + mock.Anything, + entities.EventEncodingVersion_JSON_CDC_V0, + ).Return(sub) + + s.linkGenerator.On("TransactionResultLink", mock.AnythingOfType("flow.Identifier")).Return( + func(id flow.Identifier) (string, error) { + return "some_link", nil + }, + ) + + arguments := + map[string]interface{}{ + "tx_id": unittest.TransactionFixture().ID().String(), + } + + // Create the TransactionStatusesDataProvider instance + provider, err := NewTransactionStatusesDataProvider( + context.Background(), + s.log, + s.api, + "dummy-id", + s.linkGenerator, + topic, + arguments, + send, + ) + s.Require().NoError(err) + s.Require().NotNil(provider) + + // Ensure the provider is properly doneOnce after the test + defer provider.Close() + + // Run the provider in a separate goroutine to simulate subscription processing + done := make(chan struct{}) + go func() { + defer close(done) + err = provider.Run() + s.Require().NoError(err) + }() + + // Simulate emitting data to the tx statuses channel + var txResults []*accessmodel.TransactionResult + for i := 0; i < txStatusesCount; i++ { + txResults = append(txResults, &accessmodel.TransactionResult{ + BlockHeight: s.rootBlock.Height, + }) + } + + go func() { + defer close(txStatusesChan) // Close the channel when done + + txStatusesChan <- txResults + }() + + // Collect responses + var responses []*models.TransactionStatusesResponse + for i := 0; i < txStatusesCount; i++ { + res := <-send + _, txStatusesResData := extractPayload[*models.TransactionStatusesResponse](s.T(), res) + responses = append(responses, txStatusesResData) + } + + // Wait for the provider goroutine to finish + unittest.RequireCloseBefore(s.T(), done, time.Second, "provider failed to stop") + + // Verifying that indices are starting from 0 + s.Require().Equal(uint64(0), responses[0].MessageIndex, "Expected MessageIndex to start with 0") + + // Verifying that indices are strictly increasing + for i := 1; i < len(responses); i++ { + prevIndex := responses[i-1].MessageIndex + currentIndex := responses[i].MessageIndex + s.Require().Equal(prevIndex+1, currentIndex, "Expected MessageIndex to increment by 1") + } +} + +// TestTransactionStatusesDataProvider_InvalidArguments tests the behavior of the transaction statuses data provider +// when invalid arguments are provided. It verifies that appropriate errors are returned +// for missing or conflicting arguments. +func (s *TransactionStatusesProviderSuite) TestTransactionStatusesDataProvider_InvalidArguments() { + send := make(chan interface{}) + + topic := TransactionStatusesTopic + + for _, test := range invalidTransactionStatusesArgumentsTestCases() { + s.Run(test.name, func() { + provider, err := NewTransactionStatusesDataProvider( + context.Background(), + s.log, + s.api, + "dummy-id", + s.linkGenerator, + topic, + test.arguments, + send, + ) + s.Require().Error(err) + s.Require().Nil(provider) + s.Require().Contains(err.Error(), test.expectedErrorMsg) + }) + } +} + +// invalidTransactionStatusesArgumentsTestCases returns a list of test cases with invalid argument combinations +// for testing the behavior of transaction statuses data providers. Each test case includes a name, +// a set of input arguments, and the expected error message that should be returned. +func invalidTransactionStatusesArgumentsTestCases() []testErrType { + return []testErrType{ + { + name: "invalid 'tx_id' argument", + arguments: map[string]interface{}{ + "tx_id": "invalid_tx_id", + }, + expectedErrorMsg: "invalid ID format", + }, + { + name: "empty 'tx_id' argument", + arguments: map[string]interface{}{ + "tx_id": "", + }, + expectedErrorMsg: "'tx_id' must not be empty", + }, + { + name: "missing 'tx_id' argument", + arguments: map[string]interface{}{}, + expectedErrorMsg: "missing 'tx_id' field", + }, + { + name: "unexpected argument", + arguments: map[string]interface{}{ + "unexpected_argument": "dummy", + "tx_id": unittest.TransactionFixture().ID().String(), + }, + expectedErrorMsg: "unexpected field: 'unexpected_argument'", + }, + } +} diff --git a/engine/access/rest/websockets/data_providers/unit_test.go b/engine/access/rest/websockets/data_providers/unit_test.go new file mode 100644 index 00000000000..de9abde3cba --- /dev/null +++ b/engine/access/rest/websockets/data_providers/unit_test.go @@ -0,0 +1,214 @@ +package data_providers + +import ( + "context" + "fmt" + "reflect" + "testing" + "time" + + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/engine/access/rest/websockets/data_providers/models" + wsmodels "github.com/onflow/flow-go/engine/access/rest/websockets/models" + submock "github.com/onflow/flow-go/engine/access/subscription/mock" + "github.com/onflow/flow-go/utils/unittest" +) + +// testType represents a valid test scenario for subscribing +type testType struct { + name string + arguments wsmodels.Arguments + setupBackend func(sub *submock.Subscription) + expectedResponses []interface{} +} + +// testErrType represents an error cases for subscribing +type testErrType struct { + name string + arguments wsmodels.Arguments + expectedErrorMsg string +} + +// testHappyPath tests a variety of scenarios for data providers in +// happy path scenarios. This function runs parameterized test cases that +// simulate various configurations and verifies that the data provider operates +// as expected without encountering errors. +// +// Arguments: +// - t: The testing context. +// - topic: The topic associated with the data provider. +// - factory: A factory for creating data provider instance. +// - tests: A slice of test cases to run, each specifying setup and validation logic. +// - sendData: A function to simulate emitting data into the subscription's data channel. +// - requireFn: A function to validate the output received in the send channel. +func testHappyPath( + t *testing.T, + topic string, + factory *DataProviderFactoryImpl, + tests []testType, + sendData func(chan interface{}), + requireFn func(interface{}, interface{}), +) { + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + send := make(chan interface{}, 10) + + // Create a channel to simulate the subscription's data channel + dataChan := make(chan interface{}) + + // Create a mock subscription and mock the channel + sub := submock.NewSubscription(t) + sub.On("Channel").Return((<-chan interface{})(dataChan)) + sub.On("Err").Return(nil) + test.setupBackend(sub) + + // Create the data provider instance + provider, err := factory.NewDataProvider(context.Background(), "dummy-id", topic, test.arguments, send) + require.NoError(t, err) + require.NotNil(t, provider) + + // Ensure the provider is properly closed after the test + defer provider.Close() + + // Run the provider in a separate goroutine + done := make(chan struct{}) + go func() { + defer close(done) + err = provider.Run() + require.NoError(t, err) + }() + + // Simulate emitting data to the data channel + go func() { + defer close(dataChan) + sendData(dataChan) + }() + + // Wait for the provider goroutine to finish + unittest.RequireCloseBefore(t, done, time.Second, "provider failed to stop") + + // Collect responses + for i, expected := range test.expectedResponses { + unittest.RequireReturnsBefore(t, func() { + v, ok := <-send + require.True(t, ok, "channel closed while waiting for response %v: err: %v", expected, sub.Err()) + + requireFn(v, expected) + }, time.Second, fmt.Sprintf("timed out waiting for response %d %v", i, expected)) + } + }) + } +} + +// extractPayload extracts the BaseDataProvidersResponse and its typed Payload. +func extractPayload[T any](t *testing.T, v interface{}) (*models.BaseDataProvidersResponse, T) { + response, ok := v.(*models.BaseDataProvidersResponse) + require.True(t, ok, "Expected *models.BaseDataProvidersResponse, got %T", v) + + payload, ok := response.Payload.(T) + var empty T + require.True(t, ok, "Unexpected response payload type, got: %T, expect: %T", response.Payload, empty) + + return response, payload +} + +func TestEnsureAllowedFields(t *testing.T) { + t.Parallel() + + allowedFields := map[string]struct{}{ + "start_block_id": {}, + "start_block_height": {}, + "event_types": {}, + "account_addresses": {}, + "heartbeat_interval": {}, + } + + t.Run("Valid fields with all required", func(t *testing.T) { + fields := map[string]interface{}{ + "start_block_id": "abc", + "start_block_height": 123, + "event_types": []string{"flow.Event"}, + "account_addresses": []string{"0x1"}, + "heartbeat_interval": 10, + } + if err := ensureAllowedFields(fields, allowedFields); err != nil { + t.Errorf("unexpected error: %v", err) + } + }) + + t.Run("Unexpected field present", func(t *testing.T) { + fields := map[string]interface{}{ + "start_block_id": "abc", + "start_block_height": 123, + "unknown_field": "unexpected", + } + if err := ensureAllowedFields(fields, allowedFields); err == nil { + t.Error("expected error for unexpected field, got nil") + } + }) +} + +func TestExtractArrayOfStrings(t *testing.T) { + tests := []struct { + name string + args wsmodels.Arguments + key string + required bool + expect []string + expectErr bool + }{ + { + name: "Valid string array", + args: wsmodels.Arguments{"tags": []string{"a", "b"}}, + key: "tags", + required: true, + expect: []string{"a", "b"}, + expectErr: false, + }, + { + name: "Missing required key", + args: wsmodels.Arguments{}, + key: "tags", + required: true, + expect: nil, + expectErr: true, + }, + { + name: "Missing optional key", + args: wsmodels.Arguments{}, + key: "tags", + required: false, + expect: []string{}, + expectErr: false, + }, + { + name: "Invalid type in array", + args: wsmodels.Arguments{"tags": []interface{}{"a", 123}}, + key: "tags", + required: true, + expect: nil, + expectErr: true, + }, + { + name: "Nil value", + args: wsmodels.Arguments{"tags": nil}, + key: "tags", + required: false, + expect: nil, + expectErr: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result, err := extractArrayOfStrings(tt.args, tt.key, tt.required) + if (err != nil) != tt.expectErr { + t.Fatalf("unexpected error status. got: %v, want error: %v", err, tt.expectErr) + } + if !reflect.DeepEqual(result, tt.expect) { + t.Fatalf("unexpected result. got: %v, want: %v", result, tt.expect) + } + }) + } +} diff --git a/engine/access/rest/websockets/handler.go b/engine/access/rest/websockets/handler.go new file mode 100644 index 00000000000..951e56e7896 --- /dev/null +++ b/engine/access/rest/websockets/handler.go @@ -0,0 +1,74 @@ +package websockets + +import ( + "net/http" + + "github.com/gorilla/websocket" + "github.com/rs/zerolog" + + "github.com/onflow/flow-go/engine/access/rest/common" + dp "github.com/onflow/flow-go/engine/access/rest/websockets/data_providers" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module/irrecoverable" +) + +type Handler struct { + *common.HttpHandler + + // ctx holds the irrecoverable context used to start the REST server + // typically we do not store contexts within a struct. it is necessary in this case + // because we need to pass an irrecoverable context into the API backend logic to + // handle exceptions, and we cannot use the request's context since the websocket + // connection lives longer than the request duration. + ctx irrecoverable.SignalerContext + logger zerolog.Logger + websocketConfig Config + dataProviderFactory dp.DataProviderFactory +} + +var _ http.Handler = (*Handler)(nil) + +func NewWebSocketHandler( + ctx irrecoverable.SignalerContext, + logger zerolog.Logger, + config Config, + chain flow.Chain, + maxRequestSize int64, + maxResponseSize int64, + dataProviderFactory dp.DataProviderFactory, +) *Handler { + return &Handler{ + ctx: ctx, + HttpHandler: common.NewHttpHandler(logger, chain, maxRequestSize, maxResponseSize), + websocketConfig: config, + logger: logger, + dataProviderFactory: dataProviderFactory, + } +} + +func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) { + logger := h.HttpHandler.Logger.With().Str("component", "websocket-handler").Logger() + + err := h.HttpHandler.VerifyRequest(w, r) + if err != nil { + // VerifyRequest sets the response error before returning + logger.Debug().Err(err).Msg("error validating websocket request") + return + } + + upgrader := websocket.Upgrader{ + // allow all origins by default, operators can override using a proxy + CheckOrigin: func(r *http.Request) bool { + return true + }, + } + + conn, err := upgrader.Upgrade(w, r, nil) + if err != nil { + h.HttpHandler.ErrorHandler(w, common.NewRestError(http.StatusInternalServerError, "webSocket upgrade error: ", err), logger) + return + } + + controller := NewWebSocketController(logger, h.websocketConfig, NewWebsocketConnection(conn), h.dataProviderFactory) + controller.HandleConnection(h.ctx) +} diff --git a/engine/access/rest/websockets/legacy/request/subscribe_events.go b/engine/access/rest/websockets/legacy/request/subscribe_events.go new file mode 100644 index 00000000000..bd246a45209 --- /dev/null +++ b/engine/access/rest/websockets/legacy/request/subscribe_events.go @@ -0,0 +1,106 @@ +package request + +import ( + "fmt" + "strconv" + + "github.com/onflow/flow-go/engine/access/rest/common" + "github.com/onflow/flow-go/engine/access/rest/common/parser" + "github.com/onflow/flow-go/engine/access/rest/http/request" + "github.com/onflow/flow-go/model/flow" +) + +const startHeightQuery = "start_height" +const startBlockIdQuery = "start_block_id" +const eventTypesQuery = "event_types" +const addressesQuery = "addresses" +const contractsQuery = "contracts" +const heartbeatIntervalQuery = "heartbeat_interval" + +type SubscribeEvents struct { + StartBlockID flow.Identifier + StartHeight uint64 + + EventTypes []string + Addresses []string + Contracts []string + + HeartbeatInterval uint64 +} + +// SubscribeEventsRequest extracts necessary variables from the provided request, +// builds a SubscribeEvents instance, and validates it. +// +// No errors are expected during normal operation. +func SubscribeEventsRequest(r *common.Request) (SubscribeEvents, error) { + var req SubscribeEvents + err := req.Build(r) + return req, err +} + +func (g *SubscribeEvents) Build(r *common.Request) error { + return g.Parse( + r.GetQueryParam(startBlockIdQuery), + r.GetQueryParam(startHeightQuery), + r.GetQueryParams(eventTypesQuery), + r.GetQueryParams(addressesQuery), + r.GetQueryParams(contractsQuery), + r.GetQueryParam(heartbeatIntervalQuery), + ) +} + +func (g *SubscribeEvents) Parse( + rawStartBlockID string, + rawStartHeight string, + rawTypes []string, + rawAddresses []string, + rawContracts []string, + rawHeartbeatInterval string, +) error { + var startBlockID parser.ID + err := startBlockID.Parse(rawStartBlockID) + if err != nil { + return err + } + g.StartBlockID = startBlockID.Flow() + + var height request.Height + err = height.Parse(rawStartHeight) + if err != nil { + return fmt.Errorf("invalid start height: %w", err) + } + g.StartHeight = height.Flow() + + // if both start_block_id and start_height are provided + if g.StartBlockID != flow.ZeroID && g.StartHeight != request.EmptyHeight { + return fmt.Errorf("can only provide either block ID or start height") + } + + // default to root block + if g.StartHeight == request.EmptyHeight { + g.StartHeight = 0 + } + + eventTypes, err := parser.NewEventTypes(rawTypes) + if err != nil { + return err + } + + g.EventTypes = eventTypes.Flow() + g.Addresses = rawAddresses + g.Contracts = rawContracts + + // parse heartbeat interval + if rawHeartbeatInterval == "" { + // set zero if the interval wasn't passed in request, so we can check it later and apply any default value if needed + g.HeartbeatInterval = 0 + return nil + } + + g.HeartbeatInterval, err = strconv.ParseUint(rawHeartbeatInterval, 10, 64) + if err != nil { + return fmt.Errorf("invalid heartbeat interval format") + } + + return nil +} diff --git a/engine/access/rest/websockets/legacy/routes/subscribe_events.go b/engine/access/rest/websockets/legacy/routes/subscribe_events.go new file mode 100644 index 00000000000..9d159e7bdd3 --- /dev/null +++ b/engine/access/rest/websockets/legacy/routes/subscribe_events.go @@ -0,0 +1,41 @@ +package routes + +import ( + "context" + + "github.com/onflow/flow-go/engine/access/rest/common" + "github.com/onflow/flow-go/engine/access/rest/websockets/legacy" + "github.com/onflow/flow-go/engine/access/rest/websockets/legacy/request" + "github.com/onflow/flow-go/engine/access/state_stream" + "github.com/onflow/flow-go/engine/access/subscription" +) + +// SubscribeEvents create websocket connection and write to it requested events. +func SubscribeEvents( + ctx context.Context, + r *common.Request, + wsController *legacy.WebsocketController, +) (subscription.Subscription, error) { + req, err := request.SubscribeEventsRequest(r) + if err != nil { + return nil, common.NewBadRequestError(err) + } + // Retrieve the filter parameters from the request, if provided + filter, err := state_stream.NewEventFilter( + wsController.EventFilterConfig, + r.Chain, + req.EventTypes, + req.Addresses, + req.Contracts, + ) + if err != nil { + return nil, common.NewBadRequestError(err) + } + + // Check if heartbeat interval was passed via request + if req.HeartbeatInterval > 0 { + wsController.HeartbeatInterval = req.HeartbeatInterval + } + + return wsController.Api.SubscribeEvents(ctx, req.StartBlockID, req.StartHeight, filter), nil +} diff --git a/engine/access/rest/websockets/legacy/routes/subscribe_events_test.go b/engine/access/rest/websockets/legacy/routes/subscribe_events_test.go new file mode 100644 index 00000000000..aa59ffc8d99 --- /dev/null +++ b/engine/access/rest/websockets/legacy/routes/subscribe_events_test.go @@ -0,0 +1,441 @@ +package routes_test + +import ( + "crypto/rand" + "encoding/base64" + "encoding/json" + "fmt" + "net/http" + "net/url" + "regexp" + "strings" + "testing" + "time" + + "golang.org/x/exp/slices" + + jsoncdc "github.com/onflow/cadence/encoding/json" + mocks "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" + + "github.com/onflow/flow-go/engine/access/rest/http/request" + "github.com/onflow/flow-go/engine/access/rest/router" + "github.com/onflow/flow-go/engine/access/state_stream" + "github.com/onflow/flow-go/engine/access/state_stream/backend" + ssmock "github.com/onflow/flow-go/engine/access/state_stream/mock" + submock "github.com/onflow/flow-go/engine/access/subscription/mock" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/utils/unittest" +) + +type testType struct { + name string + startBlockID flow.Identifier + startHeight uint64 + + eventTypes []string + addresses []string + contracts []string + + heartbeatInterval uint64 + + headers http.Header +} + +var chainID = flow.Testnet +var testEventTypes = []flow.EventType{ + unittest.EventTypeFixture(chainID), + unittest.EventTypeFixture(chainID), + unittest.EventTypeFixture(chainID), +} + +type SubscribeEventsSuite struct { + suite.Suite + + blocks []*flow.Block + blockEvents map[flow.Identifier]flow.EventsList +} + +func TestSubscribeEventsSuite(t *testing.T) { + suite.Run(t, new(SubscribeEventsSuite)) +} + +func (s *SubscribeEventsSuite) SetupTest() { + rootBlock := unittest.BlockFixture() + parent := rootBlock.ToHeader() + + blockCount := 5 + + s.blocks = make([]*flow.Block, 0, blockCount) + s.blockEvents = make(map[flow.Identifier]flow.EventsList, blockCount) + + for i := 0; i < blockCount; i++ { + block := unittest.BlockWithParentFixture(parent) + // update for next iteration + parent = block.ToHeader() + + result := unittest.ExecutionResultFixture() + + s.blocks = append(s.blocks, block) + + var events []flow.Event + for j := 0; j < len(testEventTypes); j++ { + events = append(events, unittest.EventFixture( + unittest.Event.WithEventType(testEventTypes[j]), + )) + } + + s.blockEvents[block.ID()] = events + + s.T().Logf("adding exec data for block %d %d %v => %v", i, block.Height, block.ID(), result.ExecutionDataID) + } +} + +// TestSubscribeEvents is a happy cases tests for the SubscribeEvents functionality. +// This test function covers various scenarios for subscribing to events via WebSocket. +// +// It tests scenarios: +// - Subscribing to events from the root height. +// - Subscribing to events from a specific start height. +// - Subscribing to events from a specific start block ID. +// - Subscribing to events from the root height with custom heartbeat interval. +// +// Every scenario covers the following aspects: +// - Subscribing to all events. +// - Subscribing to events of a specific type (some events). +// +// For each scenario, this test function creates WebSocket requests, simulates WebSocket responses with mock data, +// and validates that the received WebSocket response matches the expected EventsResponses. +func (s *SubscribeEventsSuite) TestSubscribeEvents() { + testVectors := []testType{ + { + name: "happy path - all events from root height", + startBlockID: flow.ZeroID, + startHeight: request.EmptyHeight, + heartbeatInterval: 1, + }, + { + name: "happy path - all events from startHeight", + startBlockID: flow.ZeroID, + startHeight: s.blocks[0].Height, + heartbeatInterval: 1, + }, + { + name: "happy path - all events from startBlockID", + startBlockID: s.blocks[0].ID(), + startHeight: request.EmptyHeight, + heartbeatInterval: 1, + }, + { + name: "happy path - events from root height with custom heartbeat", + startBlockID: flow.ZeroID, + startHeight: request.EmptyHeight, + heartbeatInterval: 2, + }, + { + name: "happy path - all origins allowed", + startBlockID: flow.ZeroID, + startHeight: request.EmptyHeight, + heartbeatInterval: 1, + headers: http.Header{ + "Origin": []string{"https://example.com"}, + }, + }, + } + + // create variations for each of the base test + tests := make([]testType, 0, len(testVectors)*2) + for _, test := range testVectors { + t1 := test + t1.name = fmt.Sprintf("%s - all events", test.name) + tests = append(tests, t1) + + t2 := test + t2.name = fmt.Sprintf("%s - some events", test.name) + t2.eventTypes = []string{string(testEventTypes[0])} + tests = append(tests, t2) + + t3 := test + t3.name = fmt.Sprintf("%s - non existing events", test.name) + t3.eventTypes = []string{fmt.Sprintf("%s_unknown", unittest.EventTypeFixture(chainID))} + tests = append(tests, t3) + } + + for _, test := range tests { + s.Run(test.name, func() { + stateStreamBackend := ssmock.NewAPI(s.T()) + subscription := submock.NewSubscription(s.T()) + + filter, err := state_stream.NewEventFilter( + state_stream.DefaultEventFilterConfig, + chainID.Chain(), + test.eventTypes, + test.addresses, + test.contracts) + require.NoError(s.T(), err) + + var expectedEventsResponses []*backend.EventsResponse + var subscriptionEventsResponses []*backend.EventsResponse + startBlockFound := test.startBlockID == flow.ZeroID + + // construct expected event responses based on the provided test configuration + for i, block := range s.blocks { + blockID := block.ID() + if startBlockFound || blockID == test.startBlockID { + startBlockFound = true + if test.startHeight == request.EmptyHeight || block.Height >= test.startHeight { + // track 2 lists, one for the expected results and one that is passed back + // from the subscription to the handler. These cannot be shared since the + // response struct is passed by reference from the mock to the handler, so + // a bug within the handler could go unnoticed + expectedEvents := flow.EventsList{} + subscriptionEvents := flow.EventsList{} + for _, event := range s.blockEvents[blockID] { + if slices.Contains(test.eventTypes, string(event.Type)) || + len(test.eventTypes) == 0 { // Include all events + expectedEvents = append(expectedEvents, event) + subscriptionEvents = append(subscriptionEvents, event) + } + } + if len(expectedEvents) > 0 || (i+1)%int(test.heartbeatInterval) == 0 { + expectedEventsResponses = append(expectedEventsResponses, &backend.EventsResponse{ + Height: block.Height, + BlockID: blockID, + Events: expectedEvents, + BlockTimestamp: time.UnixMilli(int64(block.Timestamp)).UTC(), + }) + } + subscriptionEventsResponses = append(subscriptionEventsResponses, &backend.EventsResponse{ + Height: block.Height, + BlockID: blockID, + Events: subscriptionEvents, + BlockTimestamp: time.UnixMilli(int64(block.Timestamp)).UTC(), + }) + } + } + } + + // Create a channel to receive mock EventsResponse objects + ch := make(chan interface{}) + var chReadOnly <-chan interface{} + // Simulate sending a mock EventsResponse + go func() { + for _, eventResponse := range subscriptionEventsResponses { + // Send the mock EventsResponse through the channel + ch <- eventResponse + } + }() + + chReadOnly = ch + subscription.Mock.On("Channel").Return(chReadOnly) + + var startHeight uint64 + if test.startHeight == request.EmptyHeight { + startHeight = uint64(0) + } else { + startHeight = test.startHeight + } + stateStreamBackend.Mock. + On("SubscribeEvents", mocks.Anything, test.startBlockID, startHeight, filter). + Return(subscription) + + req, err := getSubscribeEventsRequest(s.T(), test.startBlockID, test.startHeight, test.eventTypes, test.addresses, test.contracts, test.heartbeatInterval, test.headers) + require.NoError(s.T(), err) + respRecorder := router.NewTestHijackResponseRecorder() + // closing the connection after 1 second + go func() { + time.Sleep(1 * time.Second) + respRecorder.Close() + }() + router.ExecuteLegacyWsRequest(req, stateStreamBackend, respRecorder, chainID.Chain()) + requireResponse(s.T(), respRecorder, expectedEventsResponses) + }) + } +} + +func (s *SubscribeEventsSuite) TestSubscribeEventsHandlesErrors() { + s.Run("returns error for block id and height", func() { + stateStreamBackend := ssmock.NewAPI(s.T()) + req, err := getSubscribeEventsRequest(s.T(), s.blocks[0].ID(), s.blocks[0].Height, nil, nil, nil, 1, nil) + require.NoError(s.T(), err) + respRecorder := router.NewTestHijackResponseRecorder() + router.ExecuteLegacyWsRequest(req, stateStreamBackend, respRecorder, chainID.Chain()) + requireError(s.T(), respRecorder, "can only provide either block ID or start height") + }) + + s.Run("returns error for invalid block id", func() { + stateStreamBackend := ssmock.NewAPI(s.T()) + invalidBlock := unittest.BlockFixture() + subscription := submock.NewSubscription(s.T()) + + ch := make(chan interface{}) + var chReadOnly <-chan interface{} + go func() { + close(ch) + }() + chReadOnly = ch + + subscription.Mock.On("Channel").Return(chReadOnly) + subscription.Mock.On("Err").Return(fmt.Errorf("subscription error")) + stateStreamBackend.Mock. + On("SubscribeEvents", mocks.Anything, invalidBlock.ID(), uint64(0), mocks.Anything). + Return(subscription) + + req, err := getSubscribeEventsRequest(s.T(), invalidBlock.ID(), request.EmptyHeight, nil, nil, nil, 1, nil) + require.NoError(s.T(), err) + respRecorder := router.NewTestHijackResponseRecorder() + router.ExecuteLegacyWsRequest(req, stateStreamBackend, respRecorder, chainID.Chain()) + requireError(s.T(), respRecorder, "stream encountered an error: subscription error") + }) + + s.Run("returns error for invalid event filter", func() { + stateStreamBackend := ssmock.NewAPI(s.T()) + req, err := getSubscribeEventsRequest(s.T(), s.blocks[0].ID(), request.EmptyHeight, []string{"foo"}, nil, nil, 1, nil) + require.NoError(s.T(), err) + respRecorder := router.NewTestHijackResponseRecorder() + router.ExecuteLegacyWsRequest(req, stateStreamBackend, respRecorder, chainID.Chain()) + requireError(s.T(), respRecorder, "invalid event type format") + }) + + s.Run("returns error when channel closed", func() { + stateStreamBackend := ssmock.NewAPI(s.T()) + subscription := submock.NewSubscription(s.T()) + + ch := make(chan interface{}) + var chReadOnly <-chan interface{} + + go func() { + close(ch) + }() + chReadOnly = ch + + subscription.Mock.On("Channel").Return(chReadOnly) + subscription.Mock.On("Err").Return(nil) + stateStreamBackend.Mock. + On("SubscribeEvents", mocks.Anything, s.blocks[0].ID(), uint64(0), mocks.Anything). + Return(subscription) + + req, err := getSubscribeEventsRequest(s.T(), s.blocks[0].ID(), request.EmptyHeight, nil, nil, nil, 1, nil) + require.NoError(s.T(), err) + respRecorder := router.NewTestHijackResponseRecorder() + router.ExecuteLegacyWsRequest(req, stateStreamBackend, respRecorder, chainID.Chain()) + requireError(s.T(), respRecorder, "subscription channel closed") + }) +} + +func getSubscribeEventsRequest(t *testing.T, + startBlockId flow.Identifier, + startHeight uint64, + eventTypes []string, + addresses []string, + contracts []string, + heartbeatInterval uint64, + header http.Header, +) (*http.Request, error) { + u, _ := url.Parse("/v1/subscribe_events") + q := u.Query() + + if startBlockId != flow.ZeroID { + q.Add(router.StartBlockIdQueryParam, startBlockId.String()) + } + + if startHeight != request.EmptyHeight { + q.Add(router.StartHeightQueryParam, fmt.Sprintf("%d", startHeight)) + } + + if len(eventTypes) > 0 { + q.Add(router.EventTypesQueryParams, strings.Join(eventTypes, ",")) + } + if len(addresses) > 0 { + q.Add(router.AddressesQueryParams, strings.Join(addresses, ",")) + } + if len(contracts) > 0 { + q.Add(router.ContractsQueryParams, strings.Join(contracts, ",")) + } + + q.Add(router.HeartbeatIntervalQueryParam, fmt.Sprintf("%d", heartbeatInterval)) + + u.RawQuery = q.Encode() + key, err := generateWebSocketKey() + if err != nil { + err := fmt.Errorf("error generating websocket key: %v", err) + return nil, err + } + + req, err := http.NewRequest("GET", u.String(), nil) + require.NoError(t, err) + + req.Header.Set("Connection", "upgrade") + req.Header.Set("Upgrade", "websocket") + req.Header.Set("Sec-Websocket-Version", "13") + req.Header.Set("Sec-Websocket-Key", key) + + for k, v := range header { + req.Header.Set(k, v[0]) + } + + return req, nil +} + +func generateWebSocketKey() (string, error) { + // Generate 16 random bytes. + keyBytes := make([]byte, 16) + if _, err := rand.Read(keyBytes); err != nil { + return "", err + } + + // Encode the bytes to base64 and return the key as a string. + return base64.StdEncoding.EncodeToString(keyBytes), nil +} + +func requireError(t *testing.T, recorder *router.TestHijackResponseRecorder, expected string) { + <-recorder.Closed + require.Contains(t, recorder.ResponseBuff.String(), expected) +} + +// requireResponse validates that the response received from WebSocket communication matches the expected EventsResponse. +// This function compares the BlockID, Events count, and individual event properties for each expected and actual +// EventsResponse. It ensures that the response received from WebSocket matches the expected structure and content. +func requireResponse(t *testing.T, recorder *router.TestHijackResponseRecorder, expected []*backend.EventsResponse) { + <-recorder.Closed + // Convert the actual response from respRecorder to JSON bytes + actualJSON := recorder.ResponseBuff.Bytes() + // Define a regular expression pattern to match JSON objects + pattern := `\{"BlockID":".*?","Height":\d+,"Events":\[(\{.*?})*\],"BlockTimestamp":".*?"\}` + matches := regexp.MustCompile(pattern).FindAll(actualJSON, -1) + + // Unmarshal each matched JSON into []state_stream.EventsResponse + var actual []backend.EventsResponse + for _, match := range matches { + var response backend.EventsResponse + if err := json.Unmarshal(match, &response); err == nil { + actual = append(actual, response) + } + } + + // Compare the count of expected and actual responses + require.Equal(t, len(expected), len(actual)) + + // Compare the BlockID and Events count for each response + for responseIndex := range expected { + expectedEventsResponse := expected[responseIndex] + actualEventsResponse := actual[responseIndex] + + require.Equal(t, expectedEventsResponse.BlockID, actualEventsResponse.BlockID) + require.Equal(t, len(expectedEventsResponse.Events), len(actualEventsResponse.Events)) + + for eventIndex, expectedEvent := range expectedEventsResponse.Events { + actualEvent := actualEventsResponse.Events[eventIndex] + require.Equal(t, expectedEvent.Type, actualEvent.Type) + require.Equal(t, expectedEvent.TransactionID, actualEvent.TransactionID) + require.Equal(t, expectedEvent.TransactionIndex, actualEvent.TransactionIndex) + require.Equal(t, expectedEvent.EventIndex, actualEvent.EventIndex) + // payload is not expected to match, but it should decode + + // payload must decode to valid json-cdc encoded data + _, err := jsoncdc.Decode(nil, actualEvent.Payload) + require.NoError(t, err) + } + } +} diff --git a/engine/access/rest/websockets/legacy/websocket_handler.go b/engine/access/rest/websockets/legacy/websocket_handler.go new file mode 100644 index 00000000000..37e30d53d49 --- /dev/null +++ b/engine/access/rest/websockets/legacy/websocket_handler.go @@ -0,0 +1,340 @@ +package legacy + +import ( + "context" + "errors" + "fmt" + "net/http" + "time" + + "github.com/gorilla/websocket" + "github.com/rs/zerolog" + "go.uber.org/atomic" + + "github.com/onflow/flow-go/engine/access/rest/common" + "github.com/onflow/flow-go/engine/access/rest/websockets" + "github.com/onflow/flow-go/engine/access/state_stream" + "github.com/onflow/flow-go/engine/access/state_stream/backend" + "github.com/onflow/flow-go/engine/access/subscription" + "github.com/onflow/flow-go/engine/common/rpc/convert" + "github.com/onflow/flow-go/model/flow" +) + +// WebsocketController holds the necessary components and parameters for handling a WebSocket subscription. +// It manages the communication between the server and the WebSocket client for subscribing. +// +// Deprecated: Use websockets.Controller which allows managing multiple subscriptions with a single connection. +type WebsocketController struct { + logger zerolog.Logger + conn *websocket.Conn // the WebSocket connection for communication with the client + Api state_stream.API // the state_stream.API instance for managing event subscriptions + EventFilterConfig state_stream.EventFilterConfig // the configuration for filtering events + maxStreams int32 // the maximum number of streams allowed + activeStreamCount *atomic.Int32 // the current number of active streams + readChannel chan error // channel which notify closing connection by the client and provide errors to the client + HeartbeatInterval uint64 // the interval to deliver heartbeat messages to client[IN BLOCKS] +} + +// SetWebsocketConf used to set read and write deadlines for WebSocket connections and establishes a Pong handler to +// manage incoming Pong messages. These methods allow to specify a time limit for reading from or writing to a WebSocket +// connection. If the operation (reading or writing) takes longer than the specified deadline, the connection will be closed. +func (wsController *WebsocketController) SetWebsocketConf() error { + err := wsController.conn.SetWriteDeadline(time.Now().Add(websockets.WriteWait)) // Set the initial write deadline for the first ping message + if err != nil { + return common.NewRestError(http.StatusInternalServerError, "Set the initial write deadline error: ", err) + } + err = wsController.conn.SetReadDeadline(time.Now().Add(websockets.PongWait)) // Set the initial read deadline for the first pong message + if err != nil { + return common.NewRestError(http.StatusInternalServerError, "Set the initial read deadline error: ", err) + } + // Establish a Pong handler + wsController.conn.SetPongHandler(func(string) error { + err := wsController.conn.SetReadDeadline(time.Now().Add(websockets.PongWait)) + if err != nil { + return err + } + return nil + }) + return nil +} + +// wsErrorHandler handles WebSocket errors by sending an appropriate close message +// to the client WebSocket connection. +// +// If the error is an instance of models.StatusError, the function extracts the +// relevant information like status code and user message to construct the WebSocket +// close code and message. If the error is not a models.StatusError, a default +// internal server error close code and the error's message are used. +// The connection is then closed using WriteControl to send a CloseMessage with the +// constructed close code and message. Any errors that occur during the closing +// process are logged using the provided logger. +func (wsController *WebsocketController) wsErrorHandler(err error) { + // rest status type error should be returned with status and user message provided + var statusErr common.StatusError + var wsCode int + var wsMsg string + + if errors.As(err, &statusErr) { + if statusErr.Status() == http.StatusBadRequest { + wsCode = websocket.CloseUnsupportedData + } + if statusErr.Status() == http.StatusServiceUnavailable { + wsCode = websocket.CloseTryAgainLater + } + if statusErr.Status() == http.StatusRequestTimeout { + wsCode = websocket.CloseGoingAway + } + wsMsg = statusErr.UserMessage() + + } else { + wsCode = websocket.CloseInternalServerErr + wsMsg = err.Error() + } + + // Close the connection with the CloseError message + err = wsController.conn.WriteControl(websocket.CloseMessage, websocket.FormatCloseMessage(wsCode, wsMsg), time.Now().Add(time.Second)) + if err != nil { + wsController.logger.Error().Err(err).Msg(fmt.Sprintf("error sending WebSocket error: %v", err)) + } +} + +// writeEvents is used for writing events and pings to the WebSocket connection for a given subscription. +// It listens to the subscription's channel for events and writes them to the WebSocket connection. +// If an error occurs or the subscription channel is closed, it handles the error or termination accordingly. +// The function uses a ticker to periodically send ping messages to the client to maintain the connection. +func (wsController *WebsocketController) writeEvents(sub subscription.Subscription) { + ticker := time.NewTicker(websockets.PingPeriod) + defer ticker.Stop() + + blocksSinceLastMessage := uint64(0) + for { + select { + case err := <-wsController.readChannel: + // we use `readChannel` + // 1) as indicator of client's status, when `readChannel` closes it means that client + // connection has been terminated and we need to stop this goroutine to avoid memory leak. + // 2) as error receiver for any errors that occur during the reading process + if err != nil { + wsController.wsErrorHandler(err) + } + return + case event, ok := <-sub.Channel(): + if !ok { + if sub.Err() != nil { + err := fmt.Errorf("stream encountered an error: %v", sub.Err()) + wsController.wsErrorHandler(err) + return + } + err := fmt.Errorf("subscription channel closed, no error occurred") + wsController.wsErrorHandler(common.NewRestError(http.StatusRequestTimeout, "subscription channel closed", err)) + return + } + err := wsController.conn.SetWriteDeadline(time.Now().Add(websockets.WriteWait)) + if err != nil { + wsController.wsErrorHandler(common.NewRestError(http.StatusInternalServerError, "failed to set the initial write deadline: ", err)) + return + } + + resp, ok := event.(*backend.EventsResponse) + if !ok { + err = fmt.Errorf("unexpected response type: %s", event) + wsController.wsErrorHandler(err) + return + } + // responses with empty events increase heartbeat interval counter, when threshold is met a heartbeat + // message will be emitted. + if len(resp.Events) == 0 { + blocksSinceLastMessage++ + if blocksSinceLastMessage < wsController.HeartbeatInterval { + continue + } + } + blocksSinceLastMessage = 0 + + // EventsResponse contains CCF encoded events, and this API returns JSON-CDC events. + // convert event payload formats. + for i, e := range resp.Events { + payload, err := convert.CcfPayloadToJsonPayload(e.Payload) + if err != nil { + err = fmt.Errorf("could not convert event payload from CCF to Json: %w", err) + wsController.wsErrorHandler(err) + return + } + convertedEvent, err := flow.NewEvent( + flow.UntrustedEvent{ + Type: e.Type, + TransactionID: e.TransactionID, + TransactionIndex: e.TransactionIndex, + EventIndex: e.EventIndex, + Payload: payload, + }, + ) + if err != nil { + wsController.wsErrorHandler(common.NewRestError(http.StatusInternalServerError, "could not construct event: ", err)) + return + } + resp.Events[i] = *convertedEvent + } + + // Write the response to the WebSocket connection + err = wsController.conn.WriteJSON(event) + if err != nil { + wsController.wsErrorHandler(err) + return + } + case <-ticker.C: + err := wsController.conn.SetWriteDeadline(time.Now().Add(websockets.WriteWait)) + if err != nil { + wsController.wsErrorHandler(common.NewRestError(http.StatusInternalServerError, "failed to set the initial write deadline: ", err)) + return + } + if err := wsController.conn.WriteMessage(websocket.PingMessage, nil); err != nil { + wsController.wsErrorHandler(err) + return + } + } + } +} + +// read function handles WebSocket messages from the client. +// It continuously reads messages from the WebSocket connection and closes +// the associated read channel when the connection is closed by client or when an +// any additional message is received from the client. +// +// This method should be called after establishing the WebSocket connection +// to handle incoming messages asynchronously. +func (wsController *WebsocketController) read() { + // Start a goroutine to handle the WebSocket connection + defer close(wsController.readChannel) // notify websocket about closed connection + + for { + // reads messages from the WebSocket connection when + // 1) the connection is closed by client + // 2) a message is received from the client + _, msg, err := wsController.conn.ReadMessage() + if err != nil { + if _, ok := err.(*websocket.CloseError); !ok { + wsController.readChannel <- err + } + return + } + + // Check the message from the client, if is any just close the connection + if len(msg) > 0 { + err := fmt.Errorf("the client sent an unexpected message, connection closed") + wsController.logger.Debug().Msg(err.Error()) + wsController.readChannel <- err + return + } + } +} + +// SubscribeHandlerFunc is a function that contains endpoint handling logic for subscribes, fetches necessary resources +type SubscribeHandlerFunc func( + ctx context.Context, + request *common.Request, + wsController *WebsocketController, +) (subscription.Subscription, error) + +// WSHandler is websocket handler implementing custom websocket handler function and allows easier handling of errors and +// responses as it wraps functionality for handling error and responses outside of endpoint handling. +type WSHandler struct { + *common.HttpHandler + subscribeFunc SubscribeHandlerFunc + + api state_stream.API + eventFilterConfig state_stream.EventFilterConfig + maxStreams int32 + defaultHeartbeatInterval uint64 + activeStreamCount *atomic.Int32 +} + +var _ http.Handler = (*WSHandler)(nil) + +func NewWSHandler( + logger zerolog.Logger, + api state_stream.API, + subscribeFunc SubscribeHandlerFunc, + chain flow.Chain, + stateStreamConfig backend.Config, + maxRequestSize int64, + maxResponseSize int64, +) *WSHandler { + handler := &WSHandler{ + subscribeFunc: subscribeFunc, + api: api, + eventFilterConfig: stateStreamConfig.EventFilterConfig, + maxStreams: int32(stateStreamConfig.MaxGlobalStreams), + defaultHeartbeatInterval: stateStreamConfig.HeartbeatInterval, + activeStreamCount: atomic.NewInt32(0), + HttpHandler: common.NewHttpHandler(logger, chain, maxRequestSize, maxResponseSize), + } + + return handler +} + +// ServeHTTP function acts as a wrapper to each request providing common handling functionality +// such as logging, error handling, request decorators +func (h *WSHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { + // create a logger + logger := h.HttpHandler.Logger.With().Str("subscribe_url", r.URL.String()).Logger() + + err := h.HttpHandler.VerifyRequest(w, r) + if err != nil { + // VerifyRequest sets the response error before returning + return + } + + // Upgrade the HTTP connection to a WebSocket connection + upgrader := websocket.Upgrader{ + // allow all origins by default, operators can override using a proxy + CheckOrigin: func(r *http.Request) bool { + return true + }, + } + conn, err := upgrader.Upgrade(w, r, nil) + if err != nil { + h.HttpHandler.ErrorHandler(w, common.NewRestError(http.StatusInternalServerError, "webSocket upgrade error: ", err), logger) + return + } + defer conn.Close() + + wsController := &WebsocketController{ + logger: logger, + conn: conn, + Api: h.api, + EventFilterConfig: h.eventFilterConfig, + maxStreams: h.maxStreams, + activeStreamCount: h.activeStreamCount, + readChannel: make(chan error), + HeartbeatInterval: h.defaultHeartbeatInterval, // set default heartbeat interval from state stream config + } + + err = wsController.SetWebsocketConf() + if err != nil { + wsController.wsErrorHandler(err) + return + } + + if wsController.activeStreamCount.Load() >= wsController.maxStreams { + err := fmt.Errorf("maximum number of streams reached") + wsController.wsErrorHandler(common.NewRestError(http.StatusServiceUnavailable, err.Error(), err)) + return + } + wsController.activeStreamCount.Add(1) + defer wsController.activeStreamCount.Add(-1) + + // cancelling the context passed into the `subscribeFunc` to ensure that when the client disconnects, + // gorountines setup by the backend are cleaned up. + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + sub, err := h.subscribeFunc(ctx, common.Decorate(r, h.HttpHandler.Chain), wsController) + if err != nil { + wsController.wsErrorHandler(err) + return + } + + go wsController.read() + wsController.writeEvents(sub) +} diff --git a/engine/access/rest/websockets/mock/websocket_connection.go b/engine/access/rest/websockets/mock/websocket_connection.go new file mode 100644 index 00000000000..c235d8246ba --- /dev/null +++ b/engine/access/rest/websockets/mock/websocket_connection.go @@ -0,0 +1,141 @@ +// Code generated by mockery. DO NOT EDIT. + +package mock + +import ( + time "time" + + mock "github.com/stretchr/testify/mock" +) + +// WebsocketConnection is an autogenerated mock type for the WebsocketConnection type +type WebsocketConnection struct { + mock.Mock +} + +// Close provides a mock function with no fields +func (_m *WebsocketConnection) Close() error { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Close") + } + + var r0 error + if rf, ok := ret.Get(0).(func() error); ok { + r0 = rf() + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// ReadJSON provides a mock function with given fields: v +func (_m *WebsocketConnection) ReadJSON(v interface{}) error { + ret := _m.Called(v) + + if len(ret) == 0 { + panic("no return value specified for ReadJSON") + } + + var r0 error + if rf, ok := ret.Get(0).(func(interface{}) error); ok { + r0 = rf(v) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// SetPongHandler provides a mock function with given fields: h +func (_m *WebsocketConnection) SetPongHandler(h func(string) error) { + _m.Called(h) +} + +// SetReadDeadline provides a mock function with given fields: deadline +func (_m *WebsocketConnection) SetReadDeadline(deadline time.Time) error { + ret := _m.Called(deadline) + + if len(ret) == 0 { + panic("no return value specified for SetReadDeadline") + } + + var r0 error + if rf, ok := ret.Get(0).(func(time.Time) error); ok { + r0 = rf(deadline) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// SetWriteDeadline provides a mock function with given fields: deadline +func (_m *WebsocketConnection) SetWriteDeadline(deadline time.Time) error { + ret := _m.Called(deadline) + + if len(ret) == 0 { + panic("no return value specified for SetWriteDeadline") + } + + var r0 error + if rf, ok := ret.Get(0).(func(time.Time) error); ok { + r0 = rf(deadline) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// WriteControl provides a mock function with given fields: messageType, deadline +func (_m *WebsocketConnection) WriteControl(messageType int, deadline time.Time) error { + ret := _m.Called(messageType, deadline) + + if len(ret) == 0 { + panic("no return value specified for WriteControl") + } + + var r0 error + if rf, ok := ret.Get(0).(func(int, time.Time) error); ok { + r0 = rf(messageType, deadline) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// WriteJSON provides a mock function with given fields: v +func (_m *WebsocketConnection) WriteJSON(v interface{}) error { + ret := _m.Called(v) + + if len(ret) == 0 { + panic("no return value specified for WriteJSON") + } + + var r0 error + if rf, ok := ret.Get(0).(func(interface{}) error); ok { + r0 = rf(v) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// NewWebsocketConnection creates a new instance of WebsocketConnection. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewWebsocketConnection(t interface { + mock.TestingT + Cleanup(func()) +}) *WebsocketConnection { + mock := &WebsocketConnection{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/engine/access/rest/websockets/models/base_message.go b/engine/access/rest/websockets/models/base_message.go new file mode 100644 index 00000000000..5334cc5067d --- /dev/null +++ b/engine/access/rest/websockets/models/base_message.go @@ -0,0 +1,27 @@ +package models + +const ( + SubscribeAction = "subscribe" + UnsubscribeAction = "unsubscribe" + ListSubscriptionsAction = "list_subscriptions" +) + +// BaseMessageRequest represents a base structure for incoming messages. +type BaseMessageRequest struct { + // SubscriptionID is UUID generated by either client or server to uniquely identify subscription. + // It is empty for 'list_subscriptions' action + SubscriptionID string `json:"subscription_id,omitempty"` + Action string `json:"action"` // Action is an action to perform (e.g. 'subscribe' to some data) +} + +// BaseMessageResponse represents a base structure for outgoing messages. +type BaseMessageResponse struct { + SubscriptionID string `json:"subscription_id"` // SubscriptionID might be empty in case of error response + Error *ErrorMessage `json:"error,omitempty"` // Error might be empty in case of OK response + Action string `json:"action"` +} + +type ErrorMessage struct { + Code int `json:"code"` // Code is an error code that categorizes an error + Message string `json:"message"` +} diff --git a/engine/access/rest/websockets/models/list_subscriptions.go b/engine/access/rest/websockets/models/list_subscriptions.go new file mode 100644 index 00000000000..49c8edf5b96 --- /dev/null +++ b/engine/access/rest/websockets/models/list_subscriptions.go @@ -0,0 +1,14 @@ +package models + +// ListSubscriptionsMessageRequest represents a request to list active subscriptions. +type ListSubscriptionsMessageRequest struct { + BaseMessageRequest +} + +// ListSubscriptionsMessageResponse is the structure used to respond to list_subscriptions requests. +// It contains a list of active subscriptions for the current WebSocket connection. +type ListSubscriptionsMessageResponse struct { + // Subscription list might be empty in case of no active subscriptions + Subscriptions []*SubscriptionEntry `json:"subscriptions"` + Action string `json:"action"` +} diff --git a/engine/access/rest/websockets/models/subscribe_message.go b/engine/access/rest/websockets/models/subscribe_message.go new file mode 100644 index 00000000000..532e4c6a987 --- /dev/null +++ b/engine/access/rest/websockets/models/subscribe_message.go @@ -0,0 +1,15 @@ +package models + +type Arguments map[string]interface{} + +// SubscribeMessageRequest represents a request to subscribe to a topic. +type SubscribeMessageRequest struct { + BaseMessageRequest + Topic string `json:"topic"` // Topic to subscribe to + Arguments Arguments `json:"arguments"` // Additional arguments for subscription +} + +// SubscribeMessageResponse represents the response to a subscription request. +type SubscribeMessageResponse struct { + BaseMessageResponse +} diff --git a/engine/access/rest/websockets/models/subscription_entry.go b/engine/access/rest/websockets/models/subscription_entry.go new file mode 100644 index 00000000000..9a60ab1a0d9 --- /dev/null +++ b/engine/access/rest/websockets/models/subscription_entry.go @@ -0,0 +1,8 @@ +package models + +// SubscriptionEntry represents an active subscription entry. +type SubscriptionEntry struct { + SubscriptionID string `json:"subscription_id"` // ID is a client generated UUID for subscription + Topic string `json:"topic"` // Topic of the subscription + Arguments Arguments `json:"arguments"` +} diff --git a/engine/access/rest/websockets/models/unsubscribe_message.go b/engine/access/rest/websockets/models/unsubscribe_message.go new file mode 100644 index 00000000000..f72e6cb5c7b --- /dev/null +++ b/engine/access/rest/websockets/models/unsubscribe_message.go @@ -0,0 +1,12 @@ +package models + +// UnsubscribeMessageRequest represents a request to unsubscribe from a topic. +type UnsubscribeMessageRequest struct { + // Note: subscription_id is mandatory for this request + BaseMessageRequest +} + +// UnsubscribeMessageResponse represents the response to an unsubscription request. +type UnsubscribeMessageResponse struct { + BaseMessageResponse +} diff --git a/engine/access/rest/websockets/subscription_id.go b/engine/access/rest/websockets/subscription_id.go new file mode 100644 index 00000000000..39f0aba1ec6 --- /dev/null +++ b/engine/access/rest/websockets/subscription_id.go @@ -0,0 +1,59 @@ +package websockets + +import ( + "fmt" + + randutils "github.com/onflow/flow-go/utils/rand" +) + +const maxLen = 20 + +// SubscriptionID represents a subscription identifier used in websockets. +// The ID can either be provided by the client or generated by the server. +// - If provided by the client, it must adhere to specific restrictions. +// - If generated by the server, it is created as a UUID. +type SubscriptionID struct { + id string +} + +// NewSubscriptionID creates a new SubscriptionID based on the provided input. +// - If the input `id` is empty, a random ID is generated and returned. +// - If the input `id` is non-empty, it is validated and returned if no errors. +func NewSubscriptionID(id string) (SubscriptionID, error) { + if len(id) == 0 { + randomString, err := randutils.GenerateRandomString(maxLen) + if err != nil { + return SubscriptionID{}, fmt.Errorf("could not generate subscription ID: %w", err) + } + + return SubscriptionID{ + id: randomString, + }, nil + } + + newID, err := ParseClientSubscriptionID(id) + if err != nil { + return SubscriptionID{}, err + } + + return newID, nil +} + +func ParseClientSubscriptionID(id string) (SubscriptionID, error) { + if len(id) == 0 { + return SubscriptionID{}, fmt.Errorf("subscription ID provided by the client must not be empty") + } + + if len(id) > maxLen { + return SubscriptionID{}, fmt.Errorf("subscription ID provided by the client must not exceed %d characters", maxLen) + } + + return SubscriptionID{ + id: id, + }, nil +} + +// String returns the string representation of the SubscriptionID. +func (id SubscriptionID) String() string { + return id.id +} diff --git a/engine/access/rest/websockets/subscription_id_test.go b/engine/access/rest/websockets/subscription_id_test.go new file mode 100644 index 00000000000..33c50e71596 --- /dev/null +++ b/engine/access/rest/websockets/subscription_id_test.go @@ -0,0 +1,59 @@ +package websockets + +import ( + "fmt" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestNewSubscriptionID(t *testing.T) { + t.Run("should generate new ID when input ID is empty", func(t *testing.T) { + subscriptionID, err := NewSubscriptionID("") + + assert.NoError(t, err) + assert.NotEmpty(t, subscriptionID.id) + assert.Len(t, subscriptionID.id, maxLen) + }) + + t.Run("should return valid SubscriptionID when input ID is valid", func(t *testing.T) { + validID := "subscription/blocks" + subscriptionID, err := NewSubscriptionID(validID) + + assert.NoError(t, err) + assert.Equal(t, validID, subscriptionID.id) + }) + + t.Run("should return an error for invalid input in ParseClientSubscriptionID", func(t *testing.T) { + longID := fmt.Sprintf("%s%s", "id-", make([]byte, maxLen+1)) + _, err := NewSubscriptionID(longID) + + assert.Error(t, err) + assert.EqualError(t, err, fmt.Sprintf("subscription ID provided by the client must not exceed %d characters", maxLen)) + }) +} + +func TestParseClientSubscriptionID(t *testing.T) { + t.Run("should return error if input ID is empty", func(t *testing.T) { + _, err := ParseClientSubscriptionID("") + + assert.Error(t, err) + assert.EqualError(t, err, "subscription ID provided by the client must not be empty") + }) + + t.Run("should return error if input ID exceeds max length", func(t *testing.T) { + longID := fmt.Sprintf("%s%s", "id-", make([]byte, maxLen+1)) + _, err := ParseClientSubscriptionID(longID) + + assert.Error(t, err) + assert.EqualError(t, err, fmt.Sprintf("subscription ID provided by the client must not exceed %d characters", maxLen)) + }) + + t.Run("should return valid SubscriptionID for valid input", func(t *testing.T) { + validID := "subscription/blocks" + subscription, err := ParseClientSubscriptionID(validID) + + assert.NoError(t, err) + assert.Equal(t, validID, subscription.id) + }) +} diff --git a/engine/access/rest_api_test.go b/engine/access/rest_api_test.go index b01983a30ab..683055564b1 100644 --- a/engine/access/rest_api_test.go +++ b/engine/access/rest_api_test.go @@ -5,7 +5,6 @@ import ( "fmt" "math/rand" "net/http" - "os" "strings" "testing" "time" @@ -17,12 +16,21 @@ import ( "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" + "google.golang.org/grpc/credentials" accessmock "github.com/onflow/flow-go/engine/access/mock" "github.com/onflow/flow-go/engine/access/rest" - "github.com/onflow/flow-go/engine/access/rest/request" + "github.com/onflow/flow-go/engine/access/rest/common/parser" + "github.com/onflow/flow-go/engine/access/rest/router" + "github.com/onflow/flow-go/engine/access/rest/websockets" "github.com/onflow/flow-go/engine/access/rpc" + "github.com/onflow/flow-go/engine/access/rpc/backend" + "github.com/onflow/flow-go/engine/access/rpc/backend/node_communicator" + "github.com/onflow/flow-go/engine/access/rpc/backend/query_mode" + statestreambackend "github.com/onflow/flow-go/engine/access/state_stream/backend" + commonrpc "github.com/onflow/flow-go/engine/common/rpc" "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module/grpcserver" "github.com/onflow/flow-go/module/irrecoverable" "github.com/onflow/flow-go/module/metrics" module "github.com/onflow/flow-go/module/mock" @@ -30,6 +38,7 @@ import ( protocol "github.com/onflow/flow-go/state/protocol/mock" "github.com/onflow/flow-go/storage" storagemock "github.com/onflow/flow-go/storage/mock" + "github.com/onflow/flow-go/utils/grpcutils" "github.com/onflow/flow-go/utils/unittest" ) @@ -42,7 +51,7 @@ type RestAPITestSuite struct { sealedSnaphost *protocol.Snapshot finalizedSnapshot *protocol.Snapshot log zerolog.Logger - net *network.Network + net *network.EngineRegistry request *module.Requester collClient *accessmock.AccessAPIClient execClient *accessmock.ExecutionAPIClient @@ -63,19 +72,30 @@ type RestAPITestSuite struct { ctx irrecoverable.SignalerContext cancel context.CancelFunc + + // grpc servers + secureGrpcServer *grpcserver.GrpcServer + unsecureGrpcServer *grpcserver.GrpcServer } func (suite *RestAPITestSuite) SetupTest() { - suite.log = zerolog.New(os.Stdout) - suite.net = new(network.Network) + suite.log = unittest.Logger() + suite.net = new(network.EngineRegistry) suite.state = new(protocol.State) suite.sealedSnaphost = new(protocol.Snapshot) suite.finalizedSnapshot = new(protocol.Snapshot) suite.sealedBlock = unittest.BlockHeaderFixture(unittest.WithHeaderHeight(0)) suite.finalizedBlock = unittest.BlockHeaderWithParentFixture(suite.sealedBlock) + rootHeader := unittest.BlockHeaderFixture() + params := new(protocol.Params) + params.On("SporkID").Return(unittest.IdentifierFixture(), nil) + params.On("SporkRootBlockHeight").Return(rootHeader.Height, nil) + params.On("SealedRoot").Return(rootHeader, nil) + suite.state.On("Sealed").Return(suite.sealedSnaphost, nil) suite.state.On("Final").Return(suite.finalizedSnapshot, nil) + suite.state.On("Params").Return(params) suite.sealedSnaphost.On("Head").Return( func() *flow.Header { return suite.sealedBlock @@ -115,25 +135,102 @@ func (suite *RestAPITestSuite) SetupTest() { UnsecureGRPCListenAddr: unittest.DefaultAddress, SecureGRPCListenAddr: unittest.DefaultAddress, HTTPListenAddr: unittest.DefaultAddress, - RESTListenAddr: unittest.DefaultAddress, + RestConfig: rest.Config{ + ListenAddress: unittest.DefaultAddress, + MaxRequestSize: commonrpc.DefaultAccessMaxRequestSize, + MaxResponseSize: commonrpc.DefaultAccessMaxResponseSize, + }, + WebSocketConfig: websockets.NewDefaultWebsocketConfig(), } - rpcEngBuilder, err := rpc.NewBuilder(suite.log, suite.state, config, suite.collClient, nil, suite.blocks, suite.headers, suite.collections, suite.transactions, - nil, suite.executionResults, suite.chainID, suite.metrics, suite.metrics, 0, 0, false, - false, nil, nil, suite.me) + // generate a server certificate that will be served by the GRPC server + networkingKey := unittest.NetworkingPrivKeyFixture() + x509Certificate, err := grpcutils.X509Certificate(networkingKey) + assert.NoError(suite.T(), err) + tlsConfig := grpcutils.DefaultServerTLSConfig(x509Certificate) + // set the transport credentials for the server to use + config.TransportCredentials = credentials.NewTLS(tlsConfig) + + suite.secureGrpcServer = grpcserver.NewGrpcServerBuilder(suite.log, + config.SecureGRPCListenAddr, + commonrpc.DefaultAccessMaxRequestSize, + commonrpc.DefaultAccessMaxResponseSize, + false, + nil, + nil, + grpcserver.WithTransportCredentials(config.TransportCredentials)).Build() + + suite.unsecureGrpcServer = grpcserver.NewGrpcServerBuilder(suite.log, + config.UnsecureGRPCListenAddr, + commonrpc.DefaultAccessMaxRequestSize, + commonrpc.DefaultAccessMaxResponseSize, + false, + nil, + nil).Build() + + bnd, err := backend.New(backend.Params{ + State: suite.state, + CollectionRPC: suite.collClient, + Blocks: suite.blocks, + Headers: suite.headers, + Collections: suite.collections, + Transactions: suite.transactions, + ExecutionResults: suite.executionResults, + ChainID: suite.chainID, + AccessMetrics: suite.metrics, + MaxHeightRange: 0, + Log: suite.log, + SnapshotHistoryLimit: 0, + Communicator: node_communicator.NewNodeCommunicator(false), + EventQueryMode: query_mode.IndexQueryModeExecutionNodesOnly, + ScriptExecutionMode: query_mode.IndexQueryModeExecutionNodesOnly, + TxResultQueryMode: query_mode.IndexQueryModeExecutionNodesOnly, + }) + require.NoError(suite.T(), err) + + stateStreamConfig := statestreambackend.Config{} + rpcEngBuilder, err := rpc.NewBuilder( + suite.log, + suite.state, + config, + suite.chainID, + suite.metrics, + false, + suite.me, + bnd, + bnd, + suite.secureGrpcServer, + suite.unsecureGrpcServer, + nil, + stateStreamConfig, + nil, + ) assert.NoError(suite.T(), err) suite.rpcEng, err = rpcEngBuilder.WithLegacy().Build() assert.NoError(suite.T(), err) suite.ctx, suite.cancel = irrecoverable.NewMockSignalerContextWithCancel(suite.T(), context.Background()) + suite.rpcEng.Start(suite.ctx) - // wait for the server to startup + + suite.secureGrpcServer.Start(suite.ctx) + suite.unsecureGrpcServer.Start(suite.ctx) + + // wait for the servers to startup + unittest.AssertClosesBefore(suite.T(), suite.secureGrpcServer.Ready(), 2*time.Second) + unittest.AssertClosesBefore(suite.T(), suite.unsecureGrpcServer.Ready(), 2*time.Second) + + // wait for the engine to startup unittest.AssertClosesBefore(suite.T(), suite.rpcEng.Ready(), 2*time.Second) } func (suite *RestAPITestSuite) TearDownTest() { - suite.cancel() - unittest.AssertClosesBefore(suite.T(), suite.rpcEng.Done(), 2*time.Second) + if suite.cancel != nil { + suite.cancel() + unittest.AssertClosesBefore(suite.T(), suite.secureGrpcServer.Done(), 2*time.Second) + unittest.AssertClosesBefore(suite.T(), suite.unsecureGrpcServer.Done(), 2*time.Second) + unittest.AssertClosesBefore(suite.T(), suite.rpcEng.Done(), 2*time.Second) + } } func TestRestAPI(t *testing.T) { @@ -142,16 +239,19 @@ func TestRestAPI(t *testing.T) { func (suite *RestAPITestSuite) TestGetBlock() { - testBlockIDs := make([]string, request.MaxIDsLength) - testBlocks := make([]*flow.Block, request.MaxIDsLength) + testBlockIDs := make([]string, parser.MaxIDsLength) + testBlocks := make([]*flow.Block, parser.MaxIDsLength) for i := range testBlockIDs { collections := unittest.CollectionListFixture(1) - block := unittest.BlockWithGuaranteesFixture( - unittest.CollectionGuaranteesWithCollectionIDFixture(collections), + block := unittest.BlockFixture( + unittest.Block.WithHeight(uint64(i+1)), // avoiding edge case of height = 0 (genesis block) + unittest.Block.WithPayload( + unittest.PayloadFixture(unittest.WithGuarantees(unittest.CollectionGuaranteesWithCollectionIDFixture(collections)...)), + ), ) - block.Header.Height = uint64(i) suite.blocks.On("ByID", block.ID()).Return(block, nil) - suite.blocks.On("ByHeight", block.Header.Height).Return(block, nil) + suite.blocks.On("ByHeight", block.Height).Return(block, nil) + suite.headers.On("BlockIDByHeight", block.Height).Return(block.ID(), nil) testBlocks[i] = block testBlockIDs[i] = block.ID().String() @@ -159,8 +259,8 @@ func (suite *RestAPITestSuite) TestGetBlock() { suite.executionResults.On("ByBlockID", block.ID()).Return(execResult, nil) } - suite.sealedBlock = testBlocks[len(testBlocks)-1].Header - suite.finalizedBlock = testBlocks[len(testBlocks)-2].Header + suite.sealedBlock = testBlocks[len(testBlocks)-1].ToHeader() + suite.finalizedBlock = testBlocks[len(testBlocks)-2].ToHeader() client := suite.restAPIClient() @@ -193,7 +293,7 @@ func (suite *RestAPITestSuite) TestGetBlock() { actualBlocks, resp, err := client.BlocksApi.BlocksIdGet(ctx, blockIDSlice, optionsForBlockByID()) require.NoError(suite.T(), err) assert.Equal(suite.T(), http.StatusOK, resp.StatusCode) - assert.Len(suite.T(), actualBlocks, request.MaxIDsLength) + assert.Len(suite.T(), actualBlocks, parser.MaxIDsLength) for i, b := range testBlocks { assert.Equal(suite.T(), b.ID().String(), actualBlocks[i].Header.Id) } @@ -204,9 +304,9 @@ func (suite *RestAPITestSuite) TestGetBlock() { ctx, cancel := context.WithTimeout(context.Background(), time.Second*5) defer cancel() - startHeight := testBlocks[0].Header.Height + startHeight := testBlocks[0].Height blkCnt := len(testBlocks) - endHeight := testBlocks[blkCnt-1].Header.Height + endHeight := testBlocks[blkCnt-1].Height actualBlocks, resp, err := client.BlocksApi.BlocksGet(ctx, optionsForBlockByStartEndHeight(startHeight, endHeight)) require.NoError(suite.T(), err) @@ -214,7 +314,7 @@ func (suite *RestAPITestSuite) TestGetBlock() { assert.Len(suite.T(), actualBlocks, blkCnt) for i := 0; i < blkCnt; i++ { assert.Equal(suite.T(), testBlocks[i].ID().String(), actualBlocks[i].Header.Id) - assert.Equal(suite.T(), fmt.Sprintf("%d", testBlocks[i].Header.Height), actualBlocks[i].Header.Height) + assert.Equal(suite.T(), fmt.Sprintf("%d", testBlocks[i].Height), actualBlocks[i].Header.Height) } }) @@ -226,7 +326,7 @@ func (suite *RestAPITestSuite) TestGetBlock() { lastIndex := len(testBlocks) var reqHeights = make([]uint64, len(testBlocks)) for i := 0; i < lastIndex; i++ { - reqHeights[i] = testBlocks[i].Header.Height + reqHeights[i] = testBlocks[i].Height } actualBlocks, resp, err := client.BlocksApi.BlocksGet(ctx, optionsForBlockByHeights(reqHeights)) @@ -235,7 +335,7 @@ func (suite *RestAPITestSuite) TestGetBlock() { assert.Len(suite.T(), actualBlocks, lastIndex) for i := 0; i < lastIndex; i++ { assert.Equal(suite.T(), testBlocks[i].ID().String(), actualBlocks[i].Header.Id) - assert.Equal(suite.T(), fmt.Sprintf("%d", testBlocks[i].Header.Height), actualBlocks[i].Header.Height) + assert.Equal(suite.T(), fmt.Sprintf("%d", testBlocks[i].Height), actualBlocks[i].Header.Height) } }) @@ -291,13 +391,13 @@ func (suite *RestAPITestSuite) TestGetBlock() { ctx, cancel := context.WithTimeout(context.Background(), time.Second*5) defer cancel() - blockIDs := make([]string, request.MaxIDsLength+1) + blockIDs := make([]string, parser.MaxIDsLength+1) copy(blockIDs, testBlockIDs) - blockIDs[request.MaxIDsLength] = unittest.IdentifierFixture().String() + blockIDs[parser.MaxIDsLength] = unittest.IdentifierFixture().String() blockIDSlice := []string{strings.Join(blockIDs, ",")} _, resp, err := client.BlocksApi.BlocksIdGet(ctx, blockIDSlice, optionsForBlockByID()) - assertError(suite.T(), resp, err, http.StatusBadRequest, fmt.Sprintf("at most %d IDs can be requested at a time", request.MaxIDsLength)) + assertError(suite.T(), resp, err, http.StatusBadRequest, fmt.Sprintf("at most %d IDs can be requested at a time", parser.MaxIDsLength)) }) suite.Run("GetBlockByID with one non-existing block ID", func() { @@ -306,7 +406,6 @@ func (suite *RestAPITestSuite) TestGetBlock() { defer cancel() // replace one ID with a block ID for which the storage returns a not found error - rand.Seed(time.Now().Unix()) invalidBlockIndex := rand.Intn(len(testBlocks)) invalidID := unittest.IdentifierFixture() suite.blocks.On("ByID", invalidID).Return(nil, storage.ErrNotFound).Once() @@ -324,7 +423,7 @@ func (suite *RestAPITestSuite) TestGetBlock() { ctx, cancel := context.WithTimeout(context.Background(), time.Second*5) defer cancel() - invalidHeight := uint64(len(testBlocks)) + invalidHeight := uint64(len(testBlocks) * 2) var reqHeights = []uint64{invalidHeight} suite.blocks.On("ByHeight", invalidHeight).Return(nil, storage.ErrNotFound).Once() @@ -338,7 +437,7 @@ func (suite *RestAPITestSuite) TestRequestSizeRestriction() { ctx, cancel := context.WithTimeout(context.Background(), time.Second*5) defer cancel() // make a request of size larger than the max permitted size - requestBytes := make([]byte, rest.MaxRequestSize+1) + requestBytes := make([]byte, commonrpc.DefaultAccessMaxRequestSize+1) script := restclient.ScriptsBody{ Script: string(requestBytes), } @@ -365,13 +464,13 @@ func assertError(t *testing.T, resp *http.Response, err error, expectedCode int, func optionsForBlockByID() *restclient.BlocksApiBlocksIdGetOpts { return &restclient.BlocksApiBlocksIdGetOpts{ - Expand: optional.NewInterface([]string{rest.ExpandableFieldPayload}), + Expand: optional.NewInterface([]string{router.ExpandableFieldPayload}), Select_: optional.NewInterface([]string{"header.id"}), } } func optionsForBlockByStartEndHeight(startHeight, endHeight uint64) *restclient.BlocksApiBlocksGetOpts { return &restclient.BlocksApiBlocksGetOpts{ - Expand: optional.NewInterface([]string{rest.ExpandableFieldPayload}), + Expand: optional.NewInterface([]string{router.ExpandableFieldPayload}), Select_: optional.NewInterface([]string{"header.id", "header.height"}), StartHeight: optional.NewInterface(startHeight), EndHeight: optional.NewInterface(endHeight), @@ -380,7 +479,7 @@ func optionsForBlockByStartEndHeight(startHeight, endHeight uint64) *restclient. func optionsForBlockByHeights(heights []uint64) *restclient.BlocksApiBlocksGetOpts { return &restclient.BlocksApiBlocksGetOpts{ - Expand: optional.NewInterface([]string{rest.ExpandableFieldPayload}), + Expand: optional.NewInterface([]string{router.ExpandableFieldPayload}), Select_: optional.NewInterface([]string{"header.id", "header.height"}), Height: optional.NewInterface(heights), } @@ -388,7 +487,7 @@ func optionsForBlockByHeights(heights []uint64) *restclient.BlocksApiBlocksGetOp func optionsForFinalizedBlock(finalOrSealed string) *restclient.BlocksApiBlocksGetOpts { return &restclient.BlocksApiBlocksGetOpts{ - Expand: optional.NewInterface([]string{rest.ExpandableFieldPayload}), + Expand: optional.NewInterface([]string{router.ExpandableFieldPayload}), Select_: optional.NewInterface([]string{"header.id", "header.height"}), Height: optional.NewInterface(finalOrSealed), } diff --git a/engine/access/rpc/backend/accounts/accounts.go b/engine/access/rpc/backend/accounts/accounts.go new file mode 100644 index 00000000000..76eb072fe10 --- /dev/null +++ b/engine/access/rpc/backend/accounts/accounts.go @@ -0,0 +1,241 @@ +package accounts + +import ( + "context" + "fmt" + + "github.com/rs/zerolog" + + "github.com/onflow/flow-go/access" + "github.com/onflow/flow-go/engine/access/rpc/backend/accounts/provider" + "github.com/onflow/flow-go/engine/access/rpc/backend/common" + "github.com/onflow/flow-go/engine/access/rpc/backend/node_communicator" + "github.com/onflow/flow-go/engine/access/rpc/backend/query_mode" + "github.com/onflow/flow-go/engine/access/rpc/connection" + commonrpc "github.com/onflow/flow-go/engine/common/rpc" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module/execution" + "github.com/onflow/flow-go/module/irrecoverable" + "github.com/onflow/flow-go/state/protocol" + "github.com/onflow/flow-go/storage" +) + +type Accounts struct { + log zerolog.Logger + state protocol.State + headers storage.Headers + provider provider.AccountProvider +} + +var _ access.AccountsAPI = (*Accounts)(nil) + +func NewAccountsBackend( + log zerolog.Logger, + state protocol.State, + headers storage.Headers, + connFactory connection.ConnectionFactory, + nodeCommunicator node_communicator.Communicator, + scriptExecMode query_mode.IndexQueryMode, + scriptExecutor execution.ScriptExecutor, + execNodeIdentitiesProvider *commonrpc.ExecutionNodeIdentitiesProvider, +) (*Accounts, error) { + var accountProvider provider.AccountProvider + + switch scriptExecMode { + case query_mode.IndexQueryModeLocalOnly: + accountProvider = provider.NewLocalAccountProvider(log, state, scriptExecutor) + + case query_mode.IndexQueryModeExecutionNodesOnly: + accountProvider = provider.NewENAccountProvider(log, state, connFactory, nodeCommunicator, execNodeIdentitiesProvider) + + case query_mode.IndexQueryModeFailover: + local := provider.NewLocalAccountProvider(log, state, scriptExecutor) + execNode := provider.NewENAccountProvider(log, state, connFactory, nodeCommunicator, execNodeIdentitiesProvider) + accountProvider = provider.NewFailoverAccountProvider(log, state, local, execNode) + + case query_mode.IndexQueryModeCompare: + local := provider.NewLocalAccountProvider(log, state, scriptExecutor) + execNode := provider.NewENAccountProvider(log, state, connFactory, nodeCommunicator, execNodeIdentitiesProvider) + accountProvider = provider.NewComparingAccountProvider(log, state, local, execNode) + + default: + return nil, fmt.Errorf("unknown execution mode: %v", scriptExecMode) + } + + return &Accounts{ + log: log, + state: state, + headers: headers, + provider: accountProvider, + }, nil +} + +// GetAccount returns the account details at the latest sealed block. +// Alias for GetAccountAtLatestBlock +func (a *Accounts) GetAccount(ctx context.Context, address flow.Address) (*flow.Account, error) { + return a.GetAccountAtLatestBlock(ctx, address) +} + +// GetAccountAtLatestBlock returns the account details at the latest sealed block. +func (a *Accounts) GetAccountAtLatestBlock(ctx context.Context, address flow.Address) (*flow.Account, error) { + sealed, err := a.state.Sealed().Head() + if err != nil { + err := irrecoverable.NewExceptionf("failed to lookup sealed header: %w", err) + irrecoverable.Throw(ctx, err) + return nil, err + } + + sealedBlockID := sealed.ID() + account, err := a.provider.GetAccountAtBlock(ctx, address, sealedBlockID, sealed.Height) + if err != nil { + a.log.Debug().Err(err).Msgf("failed to get account at blockID: %v", sealedBlockID) + return nil, err + } + + return account, nil +} + +// GetAccountAtBlockHeight returns the account details at the given block height. +func (a *Accounts) GetAccountAtBlockHeight( + ctx context.Context, + address flow.Address, + height uint64, +) (*flow.Account, error) { + blockID, err := a.headers.BlockIDByHeight(height) + if err != nil { + return nil, commonrpc.ConvertStorageError(common.ResolveHeightError(a.state.Params(), height, err)) + } + + account, err := a.provider.GetAccountAtBlock(ctx, address, blockID, height) + if err != nil { + a.log.Debug().Err(err).Msgf("failed to get account at height: %d", height) + return nil, err + } + + return account, nil +} + +// GetAccountBalanceAtLatestBlock returns the account balance at the latest sealed block. +func (a *Accounts) GetAccountBalanceAtLatestBlock(ctx context.Context, address flow.Address) (uint64, error) { + sealed, err := a.state.Sealed().Head() + if err != nil { + err := irrecoverable.NewExceptionf("failed to lookup sealed header: %w", err) + irrecoverable.Throw(ctx, err) + return 0, err + } + + sealedBlockID := sealed.ID() + balance, err := a.provider.GetAccountBalanceAtBlock(ctx, address, sealedBlockID, sealed.Height) + if err != nil { + a.log.Debug().Err(err).Msgf("failed to get account balance at blockID: %v", sealedBlockID) + return 0, err + } + + return balance, nil +} + +// GetAccountBalanceAtBlockHeight returns the account balance at the given block height. +func (a *Accounts) GetAccountBalanceAtBlockHeight( + ctx context.Context, + address flow.Address, + height uint64, +) (uint64, error) { + blockID, err := a.headers.BlockIDByHeight(height) + if err != nil { + return 0, commonrpc.ConvertStorageError(common.ResolveHeightError(a.state.Params(), height, err)) + } + + balance, err := a.provider.GetAccountBalanceAtBlock(ctx, address, blockID, height) + if err != nil { + a.log.Debug().Err(err).Msgf("failed to get account balance at height: %v", height) + return 0, err + } + + return balance, nil +} + +// GetAccountKeyAtLatestBlock returns the account public key at the latest sealed block. +func (a *Accounts) GetAccountKeyAtLatestBlock( + ctx context.Context, + address flow.Address, + keyIndex uint32, +) (*flow.AccountPublicKey, error) { + sealed, err := a.state.Sealed().Head() + if err != nil { + err := irrecoverable.NewExceptionf("failed to lookup sealed header: %w", err) + irrecoverable.Throw(ctx, err) + return nil, err + } + + sealedBlockID := sealed.ID() + key, err := a.provider.GetAccountKeyAtBlock(ctx, address, keyIndex, sealedBlockID, sealed.Height) + if err != nil { + a.log.Debug().Err(err).Msgf("failed to get account key at blockID: %v", sealedBlockID) + return nil, err + } + + return key, nil +} + +// GetAccountKeyAtBlockHeight returns the account public key by key index at the given block height. +func (a *Accounts) GetAccountKeyAtBlockHeight( + ctx context.Context, + address flow.Address, + keyIndex uint32, + height uint64, +) (*flow.AccountPublicKey, error) { + blockID, err := a.headers.BlockIDByHeight(height) + if err != nil { + return nil, commonrpc.ConvertStorageError(common.ResolveHeightError(a.state.Params(), height, err)) + } + + key, err := a.provider.GetAccountKeyAtBlock(ctx, address, keyIndex, blockID, height) + if err != nil { + a.log.Debug().Err(err).Msgf("failed to get account key at height: %v", height) + return nil, err + } + + return key, nil +} + +// GetAccountKeysAtLatestBlock returns the account public keys at the latest sealed block. +func (a *Accounts) GetAccountKeysAtLatestBlock( + ctx context.Context, + address flow.Address, +) ([]flow.AccountPublicKey, error) { + sealed, err := a.state.Sealed().Head() + if err != nil { + err := irrecoverable.NewExceptionf("failed to lookup sealed header: %w", err) + irrecoverable.Throw(ctx, err) + return nil, err + } + + sealedBlockID := sealed.ID() + keys, err := a.provider.GetAccountKeysAtBlock(ctx, address, sealedBlockID, sealed.Height) + if err != nil { + a.log.Debug().Err(err).Msgf("failed to get account keys at blockID: %v", sealedBlockID) + return nil, err + } + + return keys, nil +} + +// GetAccountKeysAtBlockHeight returns the account public keys at the given block height. +func (a *Accounts) GetAccountKeysAtBlockHeight( + ctx context.Context, + address flow.Address, + height uint64, +) ([]flow.AccountPublicKey, error) { + blockID, err := a.headers.BlockIDByHeight(height) + if err != nil { + return nil, commonrpc.ConvertStorageError(common.ResolveHeightError(a.state.Params(), height, err)) + } + + keys, err := a.provider.GetAccountKeysAtBlock(ctx, address, blockID, height) + if err != nil { + a.log.Debug().Err(err).Msgf("failed to get account keys at height: %v", height) + return nil, err + } + + return keys, nil +} diff --git a/engine/access/rpc/backend/accounts/accounts_test.go b/engine/access/rpc/backend/accounts/accounts_test.go new file mode 100644 index 00000000000..bfb597607e2 --- /dev/null +++ b/engine/access/rpc/backend/accounts/accounts_test.go @@ -0,0 +1,668 @@ +package accounts + +import ( + "context" + "fmt" + "testing" + + "github.com/rs/zerolog" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + + access "github.com/onflow/flow-go/engine/access/mock" + "github.com/onflow/flow-go/engine/access/rpc/backend/node_communicator" + "github.com/onflow/flow-go/engine/access/rpc/backend/query_mode" + connectionmock "github.com/onflow/flow-go/engine/access/rpc/connection/mock" + commonrpc "github.com/onflow/flow-go/engine/common/rpc" + "github.com/onflow/flow-go/engine/common/rpc/convert" + "github.com/onflow/flow-go/model/flow" + execmock "github.com/onflow/flow-go/module/execution/mock" + "github.com/onflow/flow-go/module/irrecoverable" + protocol "github.com/onflow/flow-go/state/protocol/mock" + "github.com/onflow/flow-go/storage" + storagemock "github.com/onflow/flow-go/storage/mock" + "github.com/onflow/flow-go/utils/unittest" + "github.com/onflow/flow-go/utils/unittest/mocks" + + execproto "github.com/onflow/flow/protobuf/go/flow/execution" +) + +type AccountsSuite struct { + suite.Suite + + log zerolog.Logger + state *protocol.State + snapshot *protocol.Snapshot + params *protocol.Params + rootHeader *flow.Header + + headers *storagemock.Headers + receipts *storagemock.ExecutionReceipts + connectionFactory *connectionmock.ConnectionFactory + chainID flow.ChainID + + executionNodes flow.IdentityList + execClient *access.ExecutionAPIClient + + block *flow.Block + account *flow.Account + failingAddress flow.Address +} + +func TestBackendAccountsSuite(t *testing.T) { + suite.Run(t, new(AccountsSuite)) +} + +func (s *AccountsSuite) SetupTest() { + s.log = unittest.Logger() + s.state = protocol.NewState(s.T()) + s.snapshot = protocol.NewSnapshot(s.T()) + s.rootHeader = unittest.BlockHeaderFixture() + s.params = protocol.NewParams(s.T()) + s.headers = storagemock.NewHeaders(s.T()) + s.receipts = storagemock.NewExecutionReceipts(s.T()) + s.connectionFactory = connectionmock.NewConnectionFactory(s.T()) + s.chainID = flow.Testnet + + s.execClient = access.NewExecutionAPIClient(s.T()) + s.executionNodes = unittest.IdentityListFixture(2, unittest.WithRole(flow.RoleExecution)) + s.block = unittest.BlockFixture() + + var err error + s.account, err = unittest.AccountFixture() + s.Require().NoError(err) + + s.failingAddress = unittest.AddressFixture() +} + +// TestGetAccountFromExecutionNode_HappyPath tests successfully getting accounts from execution nodes +func (s *AccountsSuite) TestGetAccountFromExecutionNode_HappyPath() { + ctx := context.Background() + + s.setupExecutionNodes(s.block) + s.setupENSuccessResponse(s.block.ID()) + + scriptExecutor := execmock.NewScriptExecutor(s.T()) + backend := s.defaultAccountsBackend(query_mode.IndexQueryModeExecutionNodesOnly, scriptExecutor) + + s.Run("GetAccount - happy path", func() { + s.testGetAccount(ctx, backend, codes.OK) + }) + + s.Run("GetAccountAtLatestBlock - happy path", func() { + s.testGetAccountAtLatestBlock(ctx, backend, codes.OK) + }) + + s.Run("GetAccountAtBlockHeight - happy path", func() { + s.testGetAccountAtBlockHeight(ctx, backend, codes.OK) + }) +} + +// TestGetAccountFromExecutionNode_Fails errors received from execution nodes are returned +func (s *AccountsSuite) TestGetAccountFromExecutionNode_Fails() { + ctx := context.Background() + + // use a status code that's not used in the API to make sure it's passed through + statusCode := codes.FailedPrecondition + errToReturn := status.Error(statusCode, "random error") + + s.setupExecutionNodes(s.block) + s.setupENFailingResponse(s.block.ID(), errToReturn) + + scriptExecutor := execmock.NewScriptExecutor(s.T()) + backend := s.defaultAccountsBackend(query_mode.IndexQueryModeExecutionNodesOnly, scriptExecutor) + + s.Run("GetAccount - fails with backend err", func() { + s.testGetAccount(ctx, backend, statusCode) + }) + + s.Run("GetAccountAtLatestBlock - fails with backend err", func() { + s.testGetAccountAtLatestBlock(ctx, backend, statusCode) + }) + + s.Run("GetAccountAtBlockHeight - fails with backend err", func() { + s.testGetAccountAtBlockHeight(ctx, backend, statusCode) + }) +} + +// TestGetAccountFromStorage_HappyPath test successfully getting accounts from local storage +func (s *AccountsSuite) TestGetAccountFromStorage_HappyPath() { + ctx := context.Background() + + scriptExecutor := execmock.NewScriptExecutor(s.T()) + scriptExecutor.On("GetAccountAtBlockHeight", mock.Anything, s.account.Address, s.block.Height). + Return(s.account, nil) + + backend := s.defaultAccountsBackend(query_mode.IndexQueryModeLocalOnly, scriptExecutor) + + s.Run("GetAccount - happy path", func() { + s.testGetAccount(ctx, backend, codes.OK) + }) + + s.Run("GetAccountAtLatestBlock - happy path", func() { + s.testGetAccountAtLatestBlock(ctx, backend, codes.OK) + }) + + s.Run("GetAccountAtBlockHeight - happy path", func() { + s.testGetAccountAtBlockHeight(ctx, backend, codes.OK) + }) +} + +// TestGetAccountFromStorage_Fails tests that errors received from local storage are handled +// and converted to the appropriate status code +func (s *AccountsSuite) TestGetAccountFromStorage_Fails() { + ctx := context.Background() + + scriptExecutor := execmock.NewScriptExecutor(s.T()) + backend := s.defaultAccountsBackend(query_mode.IndexQueryModeLocalOnly, scriptExecutor) + + testCases := []struct { + err error + statusCode codes.Code + }{ + { + err: storage.ErrHeightNotIndexed, + statusCode: codes.OutOfRange, + }, + { + err: storage.ErrNotFound, + statusCode: codes.NotFound, + }, + { + err: fmt.Errorf("system error"), + statusCode: codes.Internal, + }, + } + + for _, tt := range testCases { + scriptExecutor.On("GetAccountAtBlockHeight", mock.Anything, s.failingAddress, s.block.Height). + Return(nil, tt.err).Times(3) + + s.state.On("Params").Return(s.params).Times(3) + + s.Run(fmt.Sprintf("GetAccount - fails with %v", tt.err), func() { + s.testGetAccount(ctx, backend, tt.statusCode) + }) + + s.Run(fmt.Sprintf("GetAccountAtLatestBlock - fails with %v", tt.err), func() { + s.testGetAccountAtLatestBlock(ctx, backend, tt.statusCode) + }) + + s.Run(fmt.Sprintf("GetAccountAtBlockHeight - fails with %v", tt.err), func() { + s.params.On("SporkRootBlockHeight").Return(s.block.Height-10, nil) + s.params.On("SealedRoot").Return(s.block.ToHeader(), nil) + + s.testGetAccountAtBlockHeight(ctx, backend, tt.statusCode) + }) + } +} + +// TestGetAccountFromFailover_HappyPath tests that when an error is returned getting an account +// from local storage, the backend will attempt to get the account from an execution node +func (s *AccountsSuite) TestGetAccountFromFailover_HappyPath() { + ctx := context.Background() + + s.setupExecutionNodes(s.block) + s.setupENSuccessResponse(s.block.ID()) + + scriptExecutor := execmock.NewScriptExecutor(s.T()) + backend := s.defaultAccountsBackend(query_mode.IndexQueryModeFailover, scriptExecutor) + + for _, errToReturn := range []error{storage.ErrHeightNotIndexed, storage.ErrNotFound} { + scriptExecutor.On("GetAccountAtBlockHeight", mock.Anything, s.account.Address, s.block.Height). + Return(nil, errToReturn).Times(3) + + s.Run(fmt.Sprintf("GetAccount - happy path - recovers %v", errToReturn), func() { + s.testGetAccount(ctx, backend, codes.OK) + }) + + s.Run(fmt.Sprintf("GetAccountAtLatestBlock - happy path - recovers %v", errToReturn), func() { + s.testGetAccountAtLatestBlock(ctx, backend, codes.OK) + }) + + s.Run(fmt.Sprintf("GetAccountAtBlockHeight - happy path - recovers %v", errToReturn), func() { + s.params.On("SporkRootBlockHeight").Return(s.block.Height-10, nil) + s.params.On("SealedRoot").Return(s.block.ToHeader(), nil) + + s.testGetAccountAtBlockHeight(ctx, backend, codes.OK) + }) + } +} + +// TestGetAccountFromFailover_ReturnsENErrors tests that when an error is returned from the execution +// node during a failover, it is returned to the caller. +func (s *AccountsSuite) TestGetAccountFromFailover_ReturnsENErrors() { + ctx := context.Background() + + // use a status code that's not used in the API to make sure it's passed through + statusCode := codes.FailedPrecondition + errToReturn := status.Error(statusCode, "random error") + + s.setupExecutionNodes(s.block) + s.setupENFailingResponse(s.block.ID(), errToReturn) + + scriptExecutor := execmock.NewScriptExecutor(s.T()) + scriptExecutor.On("GetAccountAtBlockHeight", mock.Anything, s.failingAddress, s.block.Height). + Return(nil, storage.ErrHeightNotIndexed) + + backend := s.defaultAccountsBackend(query_mode.IndexQueryModeFailover, scriptExecutor) + + s.Run("GetAccount - fails with backend err", func() { + s.testGetAccount(ctx, backend, statusCode) + }) + + s.Run("GetAccountAtLatestBlock - fails with backend err", func() { + s.testGetAccountAtLatestBlock(ctx, backend, statusCode) + }) + + s.Run("GetAccountAtBlockHeight - fails with backend err", func() { + s.testGetAccountAtBlockHeight(ctx, backend, statusCode) + }) +} + +// TestGetAccountAtLatestBlock_InconsistentState tests that signaler context received error when node state is +// inconsistent +func (s *AccountsSuite) TestGetAccountAtLatestBlockFromStorage_InconsistentState() { + scriptExecutor := execmock.NewScriptExecutor(s.T()) + backend := s.defaultAccountsBackend(query_mode.IndexQueryModeLocalOnly, scriptExecutor) + + s.Run(fmt.Sprintf("GetAccountAtLatestBlock - fails with %v", "inconsistent node's state"), func() { + s.state.On("Sealed").Return(s.snapshot, nil) + + err := fmt.Errorf("inconsistent node's state") + s.snapshot.On("Head").Return(nil, err) + + signCtxErr := irrecoverable.NewExceptionf("failed to lookup sealed header: %w", err) + signalerCtx := irrecoverable.WithSignalerContext(context.Background(), irrecoverable.NewMockSignalerContextExpectError(s.T(), context.Background(), signCtxErr)) + + actual, err := backend.GetAccountAtLatestBlock(signalerCtx, s.failingAddress) + s.Require().Error(err) + s.Require().Nil(actual) + }) +} + +// TestGetAccountBalanceFromStorage_HappyPaths tests successfully getting accounts balance from storage +func (s *AccountsSuite) TestGetAccountBalanceFromStorage_HappyPath() { + ctx := context.Background() + + scriptExecutor := execmock.NewScriptExecutor(s.T()) + scriptExecutor.On("GetAccountBalance", mock.Anything, s.account.Address, s.block.Height). + Return(s.account.Balance, nil) + + backend := s.defaultAccountsBackend(query_mode.IndexQueryModeLocalOnly, scriptExecutor) + + s.Run("GetAccountBalanceAtLatestBlock - happy path", func() { + s.testGetAccountBalanceAtLatestBlock(ctx, backend) + }) + + s.Run("GetAccountBalanceAtBlockHeight - happy path", func() { + s.headers.On("BlockIDByHeight", s.block.Height).Return(s.block.ID(), nil).Once() + s.testGetAccountBalanceAtBlockHeight(ctx, backend) + }) +} + +// TestGetAccountBalanceFromExecutionNode_HappyPath tests successfully getting accounts balance from execution nodes +func (s *AccountsSuite) TestGetAccountBalanceFromExecutionNode_HappyPath() { + ctx := context.Background() + + s.setupExecutionNodes(s.block) + s.setupENSuccessResponse(s.block.ID()) + + scriptExecutor := execmock.NewScriptExecutor(s.T()) + backend := s.defaultAccountsBackend(query_mode.IndexQueryModeExecutionNodesOnly, scriptExecutor) + + s.Run("GetAccountBalanceAtLatestBlock - happy path", func() { + s.testGetAccountBalanceAtLatestBlock(ctx, backend) + }) + + s.Run("GetAccountBalanceAtBlockHeight - happy path", func() { + s.headers.On("BlockIDByHeight", s.block.Height).Return(s.block.ID(), nil).Once() + s.testGetAccountBalanceAtBlockHeight(ctx, backend) + }) +} + +// TestGetAccountBalanceFromFailover_HappyPath tests that when an error is returned getting accounts balance +// from local storage, the backend will attempt to get the account balances from an execution node +func (s *AccountsSuite) TestGetAccountBalanceFromFailover_HappyPath() { + ctx := context.Background() + + s.setupExecutionNodes(s.block) + s.setupENSuccessResponse(s.block.ID()) + + scriptExecutor := execmock.NewScriptExecutor(s.T()) + backend := s.defaultAccountsBackend(query_mode.IndexQueryModeFailover, scriptExecutor) + + for _, errToReturn := range []error{storage.ErrHeightNotIndexed, storage.ErrNotFound} { + scriptExecutor.On("GetAccountBalance", mock.Anything, s.account.Address, s.block.Height). + Return(uint64(0), errToReturn).Times(2) + + s.Run(fmt.Sprintf("GetAccountBalanceAtLatestBlock - happy path -recovers %v", errToReturn), func() { + s.testGetAccountBalanceAtLatestBlock(ctx, backend) + }) + + s.Run(fmt.Sprintf("GetAccountBalanceAtBlockHeight - happy path -recovers %v", errToReturn), func() { + s.headers.On("BlockIDByHeight", s.block.Height).Return(s.block.ID(), nil).Once() + s.testGetAccountBalanceAtBlockHeight(ctx, backend) + }) + + } +} + +// TestGetAccountKeysScriptExecutionEnabled_HappyPath tests successfully getting accounts keys when +// script execution is enabled. +func (s *AccountsSuite) TestGetAccountKeysFromStorage_HappyPath() { + ctx := context.Background() + + scriptExecutor := execmock.NewScriptExecutor(s.T()) + scriptExecutor.On("GetAccountKeys", mock.Anything, s.account.Address, s.block.Height). + Return(s.account.Keys, nil) + + backend := s.defaultAccountsBackend(query_mode.IndexQueryModeLocalOnly, scriptExecutor) + + s.Run("GetAccountKeysAtLatestBlock - happy path", func() { + s.testGetAccountKeysAtLatestBlock(ctx, backend) + }) + + s.Run("GetAccountKeysAtBlockHeight - happy path", func() { + s.headers.On("BlockIDByHeight", s.block.Height).Return(s.block.ID(), nil).Once() + + s.testGetAccountKeysAtBlockHeight(ctx, backend) + + }) +} + +// TestGetAccountKeyScriptExecutionEnabled_HappyPath tests successfully getting account key by key index when +// script execution is enabled. +func (s *AccountsSuite) TestGetAccountKeyFromStorage_HappyPath() { + ctx := context.Background() + scriptExecutor := execmock.NewScriptExecutor(s.T()) + backend := s.defaultAccountsBackend(query_mode.IndexQueryModeLocalOnly, scriptExecutor) + + var keyIndex uint32 = 0 + keyByIndex := findAccountKeyByIndex(s.account.Keys, keyIndex) + scriptExecutor.On("GetAccountKey", mock.Anything, s.account.Address, keyIndex, s.block.Height). + Return(keyByIndex, nil).Twice() + + s.Run("GetAccountKeyAtLatestBlock - by key index - happy path", func() { + s.testGetAccountKeyAtLatestBlock(ctx, backend, keyIndex) + }) + + s.Run("GetAccountKeyAtBlockHeight - by key index - happy path", func() { + s.headers.On("BlockIDByHeight", s.block.Height).Return(s.block.ID(), nil).Once() + + s.testGetAccountKeyAtBlockHeight(ctx, backend, keyIndex) + }) +} + +// TestGetAccountKeysFromExecutionNode_HappyPath tests successfully getting accounts keys from execution nodes +func (s *AccountsSuite) TestGetAccountKeysFromExecutionNode_HappyPath() { + ctx := context.Background() + + s.setupExecutionNodes(s.block) + s.setupENSuccessResponse(s.block.ID()) + + scriptExecutor := execmock.NewScriptExecutor(s.T()) + backend := s.defaultAccountsBackend(query_mode.IndexQueryModeExecutionNodesOnly, scriptExecutor) + + s.Run("GetAccountKeysAtLatestBlock - all keys - happy path", func() { + s.testGetAccountKeysAtLatestBlock(ctx, backend) + }) + + s.Run("GetAccountKeysAtBlockHeight - all keys - happy path", func() { + s.headers.On("BlockIDByHeight", s.block.Height).Return(s.block.ID(), nil).Once() + + s.testGetAccountKeysAtBlockHeight(ctx, backend) + }) + +} + +// TestGetAccountKeyFromExecutionNode_HappyPath tests successfully getting accounts key by key index from execution nodes +func (s *AccountsSuite) TestGetAccountKeyFromExecutionNode_HappyPath() { + ctx := context.Background() + + s.setupExecutionNodes(s.block) + s.setupENSuccessResponse(s.block.ID()) + + scriptExecutor := execmock.NewScriptExecutor(s.T()) + backend := s.defaultAccountsBackend(query_mode.IndexQueryModeExecutionNodesOnly, scriptExecutor) + var keyIndex uint32 = 0 + + s.Run("GetAccountKeysAtLatestBlock - by key index - happy path", func() { + + s.testGetAccountKeyAtLatestBlock(ctx, backend, keyIndex) + }) + + s.Run("GetAccountKeysAtLatestBlock - by key index - happy path", func() { + s.headers.On("BlockIDByHeight", s.block.Height).Return(s.block.ID(), nil).Once() + + s.testGetAccountKeyAtBlockHeight(ctx, backend, keyIndex) + }) +} + +// TestGetAccountBalanceFromFailover_HappyPath tests that when an error is returned getting accounts keys +// from local storage, the backend will attempt to get the account key from an execution node +func (s *AccountsSuite) TestGetAccountKeysFromFailover_HappyPath() { + ctx := context.Background() + + s.setupExecutionNodes(s.block) + s.setupENSuccessResponse(s.block.ID()) + + scriptExecutor := execmock.NewScriptExecutor(s.T()) + backend := s.defaultAccountsBackend(query_mode.IndexQueryModeFailover, scriptExecutor) + + for _, errToReturn := range []error{storage.ErrHeightNotIndexed, storage.ErrNotFound} { + scriptExecutor.On("GetAccountKeys", mock.Anything, s.account.Address, s.block.Height). + Return(nil, errToReturn).Times(2) + + s.Run(fmt.Sprintf("testGetAccountKeysAtLatestBlock -all keys - happy path - recovers %v", errToReturn), func() { + s.testGetAccountKeysAtLatestBlock(ctx, backend) + }) + + s.Run(fmt.Sprintf("GetAccountKeysAtBlockHeight - all keys - happy path - recovers %v", errToReturn), func() { + s.headers.On("BlockIDByHeight", s.block.Height).Return(s.block.ID(), nil).Once() + + s.testGetAccountKeysAtBlockHeight(ctx, backend) + }) + } +} + +// TestGetAccountKeyFromFailover_HappyPath tests that when an error is returned getting account key by key index +// from local storage, the backend will attempt to get the account key from an execution node +func (s *AccountsSuite) TestGetAccountKeyFromFailover_HappyPath() { + ctx := context.Background() + + s.setupExecutionNodes(s.block) + s.setupENSuccessResponse(s.block.ID()) + + scriptExecutor := execmock.NewScriptExecutor(s.T()) + backend := s.defaultAccountsBackend(query_mode.IndexQueryModeFailover, scriptExecutor) + var keyIndex uint32 = 0 + + for _, errToReturn := range []error{storage.ErrHeightNotIndexed, storage.ErrNotFound} { + scriptExecutor.On("GetAccountKey", mock.Anything, s.account.Address, keyIndex, s.block.Height). + Return(nil, errToReturn).Times(2) + + s.Run(fmt.Sprintf("testGetAccountKeysAtLatestBlock - by key index - happy path - recovers %v", errToReturn), func() { + s.testGetAccountKeyAtLatestBlock(ctx, backend, keyIndex) + }) + + s.Run(fmt.Sprintf("GetAccountKeysAtBlockHeight - by key index - happy path - recovers %v", errToReturn), func() { + s.headers.On("BlockIDByHeight", s.block.Height).Return(s.block.ID(), nil).Once() + + s.testGetAccountKeyAtBlockHeight(ctx, backend, keyIndex) + }) + } +} + +func (s *AccountsSuite) testGetAccount(ctx context.Context, backend *Accounts, statusCode codes.Code) { + s.state.On("Sealed").Return(s.snapshot, nil).Once() + s.snapshot.On("Head").Return(s.block.ToHeader(), nil).Once() + + if statusCode == codes.OK { + actual, err := backend.GetAccount(ctx, s.account.Address) + s.Require().NoError(err) + s.Require().Equal(s.account, actual) + } else { + actual, err := backend.GetAccount(ctx, s.failingAddress) + s.Require().Error(err) + s.Require().Equal(statusCode, status.Code(err)) + s.Require().Nil(actual) + } +} + +func (s *AccountsSuite) testGetAccountAtLatestBlock(ctx context.Context, backend *Accounts, statusCode codes.Code) { + s.state.On("Sealed").Return(s.snapshot, nil).Once() + s.snapshot.On("Head").Return(s.block.ToHeader(), nil).Once() + + if statusCode == codes.OK { + actual, err := backend.GetAccountAtLatestBlock(ctx, s.account.Address) + s.Require().NoError(err) + s.Require().Equal(s.account, actual) + } else { + actual, err := backend.GetAccountAtLatestBlock(ctx, s.failingAddress) + s.Require().Error(err) + s.Require().Equal(statusCode, status.Code(err)) + s.Require().Nil(actual) + } +} + +func (s *AccountsSuite) testGetAccountAtBlockHeight(ctx context.Context, backend *Accounts, statusCode codes.Code) { + height := s.block.Height + s.headers.On("BlockIDByHeight", height).Return(s.block.ID(), nil).Once() + + if statusCode == codes.OK { + actual, err := backend.GetAccountAtBlockHeight(ctx, s.account.Address, height) + s.Require().NoError(err) + s.Require().Equal(s.account, actual) + } else { + actual, err := backend.GetAccountAtBlockHeight(ctx, s.failingAddress, height) + s.Require().Error(err) + s.Require().Equal(statusCode, status.Code(err)) + s.Require().Nil(actual) + } +} + +func (s *AccountsSuite) testGetAccountBalanceAtLatestBlock(ctx context.Context, backend *Accounts) { + s.state.On("Sealed").Return(s.snapshot, nil).Once() + s.snapshot.On("Head").Return(s.block.ToHeader(), nil).Once() + + actual, err := backend.GetAccountBalanceAtLatestBlock(ctx, s.account.Address) + s.Require().NoError(err) + s.Require().Equal(s.account.Balance, actual) +} + +func (s *AccountsSuite) testGetAccountBalanceAtBlockHeight(ctx context.Context, backend *Accounts) { + actual, err := backend.GetAccountBalanceAtBlockHeight(ctx, s.account.Address, s.block.Height) + s.Require().NoError(err) + s.Require().Equal(s.account.Balance, actual) +} + +func (s *AccountsSuite) testGetAccountKeysAtLatestBlock(ctx context.Context, backend *Accounts) { + s.state.On("Sealed").Return(s.snapshot, nil).Once() + s.snapshot.On("Head").Return(s.block.ToHeader(), nil).Once() + + actual, err := backend.GetAccountKeysAtLatestBlock(ctx, s.account.Address) + s.Require().NoError(err) + s.Require().Equal(s.account.Keys, actual) +} + +func (s *AccountsSuite) testGetAccountKeyAtLatestBlock(ctx context.Context, backend *Accounts, keyIndex uint32) { + s.state.On("Sealed").Return(s.snapshot, nil).Once() + s.snapshot.On("Head").Return(s.block.ToHeader(), nil).Once() + + actual, err := backend.GetAccountKeyAtLatestBlock(ctx, s.account.Address, keyIndex) + expectedKeyByIndex := findAccountKeyByIndex(s.account.Keys, keyIndex) + s.Require().NoError(err) + s.Require().Equal(expectedKeyByIndex, actual) +} + +func (s *AccountsSuite) testGetAccountKeysAtBlockHeight(ctx context.Context, backend *Accounts) { + actual, err := backend.GetAccountKeysAtBlockHeight(ctx, s.account.Address, s.block.Height) + s.Require().NoError(err) + s.Require().Equal(s.account.Keys, actual) +} + +func (s *AccountsSuite) testGetAccountKeyAtBlockHeight(ctx context.Context, backend *Accounts, keyIndex uint32) { + actual, err := backend.GetAccountKeyAtBlockHeight(ctx, s.account.Address, keyIndex, s.block.Height) + expectedKeyByIndex := findAccountKeyByIndex(s.account.Keys, keyIndex) + s.Require().NoError(err) + s.Require().Equal(expectedKeyByIndex, actual) +} + +func findAccountKeyByIndex(keys []flow.AccountPublicKey, keyIndex uint32) *flow.AccountPublicKey { + for _, key := range keys { + if key.Index == keyIndex { + return &key + } + } + return &flow.AccountPublicKey{} +} + +func (s *AccountsSuite) defaultAccountsBackend(mode query_mode.IndexQueryMode, executor *execmock.ScriptExecutor) *Accounts { + accounts, err := NewAccountsBackend( + s.log, + s.state, + s.headers, + s.connectionFactory, + node_communicator.NewNodeCommunicator(false), + mode, + executor, + commonrpc.NewExecutionNodeIdentitiesProvider( + s.log, + s.state, + s.receipts, + flow.IdentifierList{}, + flow.IdentifierList{}, + ), + ) + require.NoError(s.T(), err) + + return accounts +} + +// setupExecutionNodes sets up the mocks required to test against an EN backend +func (s *AccountsSuite) setupExecutionNodes(block *flow.Block) { + s.params.On("FinalizedRoot").Return(s.rootHeader, nil) + s.state.On("Params").Return(s.params) + s.state.On("Final").Return(s.snapshot) + s.snapshot.On("Identities", mock.Anything).Return(s.executionNodes, nil) + + // this line causes a S1021 lint error because receipts is explicitly declared. this is required + // to ensure the mock library handles the response type correctly + var receipts flow.ExecutionReceiptList //nolint:gosimple + receipts = unittest.ReceiptsForBlockFixture(block, s.executionNodes.NodeIDs()) + s.receipts.On("ByBlockID", block.ID()).Return(receipts, nil) + + s.connectionFactory.On("GetExecutionAPIClient", mock.Anything). + Return(s.execClient, &mocks.MockCloser{}, nil) +} + +// setupENSuccessResponse configures the execution node client to return a successful response +func (s *AccountsSuite) setupENSuccessResponse(blockID flow.Identifier) { + expectedExecRequest := &execproto.GetAccountAtBlockIDRequest{ + BlockId: blockID[:], + Address: s.account.Address.Bytes(), + } + + convertedAccount, err := convert.AccountToMessage(s.account) + s.Require().NoError(err) + + s.execClient.On("GetAccountAtBlockID", mock.Anything, expectedExecRequest). + Return(&execproto.GetAccountAtBlockIDResponse{ + Account: convertedAccount, + }, nil) +} + +// setupENFailingResponse configures the execution node client to return an error +func (s *AccountsSuite) setupENFailingResponse(blockID flow.Identifier, err error) { + failingRequest := &execproto.GetAccountAtBlockIDRequest{ + BlockId: blockID[:], + Address: s.failingAddress.Bytes(), + } + + s.execClient.On("GetAccountAtBlockID", mock.Anything, failingRequest). + Return(nil, err) +} diff --git a/engine/access/rpc/backend/accounts/provider/comparing.go b/engine/access/rpc/backend/accounts/provider/comparing.go new file mode 100644 index 00000000000..011a1a38713 --- /dev/null +++ b/engine/access/rpc/backend/accounts/provider/comparing.go @@ -0,0 +1,175 @@ +package provider + +import ( + "bytes" + "context" + "errors" + + "github.com/rs/zerolog" + + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/state/protocol" +) + +type ComparingAccountProvider struct { + FailoverAccountProvider +} + +var _ AccountProvider = (*ComparingAccountProvider)(nil) + +func NewComparingAccountProvider( + log zerolog.Logger, + state protocol.State, + localRequester AccountProvider, + execNodeRequester AccountProvider, +) *ComparingAccountProvider { + return &ComparingAccountProvider{ + FailoverAccountProvider: FailoverAccountProvider{ + log: log.With().Str("account_provider", "comparing").Logger(), + state: state, + localRequester: localRequester, + execNodeRequester: execNodeRequester, + }, + } +} + +func (c *ComparingAccountProvider) GetAccountAtBlock( + ctx context.Context, + address flow.Address, + blockID flow.Identifier, + height uint64, +) (*flow.Account, error) { + localAccount, localErr := c.localRequester.GetAccountAtBlock(ctx, address, blockID, height) + if localErr == nil { + return localAccount, nil + } + + execNodeAccount, execNodeErr := c.execNodeRequester.GetAccountAtBlock(ctx, address, blockID, height) + c.compareAccountResults(execNodeAccount, execNodeErr, localAccount, localErr, blockID, address) + + return execNodeAccount, execNodeErr +} + +// compareAccountResults compares the result and error returned from local and remote getAccount calls +// and logs the results if they are different +func (c *ComparingAccountProvider) compareAccountResults( + execNodeResult *flow.Account, + execErr error, + localResult *flow.Account, + localErr error, + blockID flow.Identifier, + address flow.Address, +) { + if c.log.GetLevel() > zerolog.DebugLevel { + return + } + + lgCtx := c.log.With(). + Hex("block_id", blockID[:]). + Str("address", address.String()) + + // errors are different + if !errors.Is(execErr, localErr) { + lgCtx = lgCtx. + AnErr("execution_node_error", execErr). + AnErr("local_error", localErr) + + lg := lgCtx.Logger() + lg.Debug().Msg("errors from getting account on local and EN do not match") + return + } + + // both errors are nil, compare the accounts + if execErr == nil { + lgCtx, ok := compareAccountsLogger(execNodeResult, localResult, lgCtx) + if !ok { + lg := lgCtx.Logger() + lg.Debug().Msg("accounts from local and EN do not match") + } + } +} + +// compareAccountsLogger compares accounts produced by the execution node and local storage and +// return a logger configured to log the differences +func compareAccountsLogger(exec, local *flow.Account, lgCtx zerolog.Context) (zerolog.Context, bool) { + different := false + + if exec.Address != local.Address { + lgCtx = lgCtx. + Str("exec_node_address", exec.Address.String()). + Str("local_address", local.Address.String()) + different = true + } + + if exec.Balance != local.Balance { + lgCtx = lgCtx. + Uint64("exec_node_balance", exec.Balance). + Uint64("local_balance", local.Balance) + different = true + } + + contractListMatches := true + if len(exec.Contracts) != len(local.Contracts) { + lgCtx = lgCtx. + Int("exec_node_contract_count", len(exec.Contracts)). + Int("local_contract_count", len(local.Contracts)) + contractListMatches = false + different = true + } + + missingContracts := zerolog.Arr() + mismatchContracts := zerolog.Arr() + + for name, execContract := range exec.Contracts { + localContract, ok := local.Contracts[name] + + if !ok { + missingContracts.Str(name) + contractListMatches = false + different = true + } + + if !bytes.Equal(execContract, localContract) { + mismatchContracts.Str(name) + different = true + } + } + + lgCtx = lgCtx. + Array("missing_contracts", missingContracts). + Array("mismatch_contracts", mismatchContracts) + + // only check if there were any missing + if !contractListMatches { + extraContracts := zerolog.Arr() + for name := range local.Contracts { + if _, ok := exec.Contracts[name]; !ok { + extraContracts.Str(name) + different = true + } + } + lgCtx = lgCtx.Array("extra_contracts", extraContracts) + } + + if len(exec.Keys) != len(local.Keys) { + lgCtx = lgCtx. + Int("exec_node_key_count", len(exec.Keys)). + Int("local_key_count", len(local.Keys)) + different = true + } + + mismatchKeys := zerolog.Arr() + + for i, execKey := range exec.Keys { + localKey := local.Keys[i] + + if !execKey.PublicKey.Equals(localKey.PublicKey) { + mismatchKeys.Uint32(execKey.Index) + different = true + } + } + + lgCtx = lgCtx.Array("mismatch_keys", mismatchKeys) + + return lgCtx, !different +} diff --git a/engine/access/rpc/backend/accounts/provider/execution_node.go b/engine/access/rpc/backend/accounts/provider/execution_node.go new file mode 100644 index 00000000000..f04be98de6d --- /dev/null +++ b/engine/access/rpc/backend/accounts/provider/execution_node.go @@ -0,0 +1,170 @@ +package provider + +import ( + "context" + "time" + + "github.com/rs/zerolog" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + + execproto "github.com/onflow/flow/protobuf/go/flow/execution" + + "github.com/onflow/flow-go/engine/access/rpc/backend/node_communicator" + "github.com/onflow/flow-go/engine/access/rpc/connection" + "github.com/onflow/flow-go/engine/common/rpc" + "github.com/onflow/flow-go/engine/common/rpc/convert" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/state/protocol" +) + +type ENAccountProvider struct { + log zerolog.Logger + state protocol.State + connFactory connection.ConnectionFactory + nodeCommunicator node_communicator.Communicator + execNodeIdentitiesProvider *rpc.ExecutionNodeIdentitiesProvider +} + +var _ AccountProvider = (*ENAccountProvider)(nil) + +func NewENAccountProvider( + log zerolog.Logger, + state protocol.State, + connFactory connection.ConnectionFactory, + nodeCommunicator node_communicator.Communicator, + execNodeIdentityProvider *rpc.ExecutionNodeIdentitiesProvider, +) *ENAccountProvider { + return &ENAccountProvider{ + log: log.With().Str("account_provider", "execution_node").Logger(), + state: state, + connFactory: connFactory, + nodeCommunicator: nodeCommunicator, + execNodeIdentitiesProvider: execNodeIdentityProvider, + } +} + +func (e *ENAccountProvider) GetAccountAtBlock( + ctx context.Context, + address flow.Address, + blockID flow.Identifier, + _ uint64, +) (*flow.Account, error) { + req := &execproto.GetAccountAtBlockIDRequest{ + Address: address.Bytes(), + BlockId: blockID[:], + } + + execNodes, err := e.execNodeIdentitiesProvider.ExecutionNodesForBlockID( + ctx, + blockID, + ) + if err != nil { + return nil, rpc.ConvertError(err, "failed to find execution node to query", codes.Internal) + } + + var resp *execproto.GetAccountAtBlockIDResponse + errToReturn := e.nodeCommunicator.CallAvailableNode( + execNodes, + func(node *flow.IdentitySkeleton) error { + var err error + start := time.Now() + + resp, err = e.tryGetAccount(ctx, node, req) + duration := time.Since(start) + + lg := e.log.With(). + Str("execution_node", node.String()). + Hex("block_id", req.GetBlockId()). + Hex("address", req.GetAddress()). + Int64("rtt_ms", duration.Milliseconds()). + Logger() + + if err != nil { + lg.Err(err).Msg("failed to execute GetAccount") + return err + } + + // return if any execution node replied successfully + lg.Debug().Msg("Successfully got account info") + return nil + }, + nil, + ) + + if errToReturn != nil { + return nil, rpc.ConvertError(errToReturn, "failed to get account from the execution node", codes.Internal) + } + + account, err := convert.MessageToAccount(resp.GetAccount()) + if err != nil { + return nil, status.Errorf(codes.Internal, "failed to convert account message: %v", err) + } + + return account, nil +} + +func (e *ENAccountProvider) GetAccountBalanceAtBlock( + ctx context.Context, + address flow.Address, + blockID flow.Identifier, + height uint64, +) (uint64, error) { + account, err := e.GetAccountAtBlock(ctx, address, blockID, height) + if err != nil { + return 0, err + } + + return account.Balance, nil +} + +func (e *ENAccountProvider) GetAccountKeyAtBlock( + ctx context.Context, + address flow.Address, + keyIndex uint32, + blockID flow.Identifier, + height uint64, +) (*flow.AccountPublicKey, error) { + account, err := e.GetAccountAtBlock(ctx, address, blockID, height) + if err != nil { + return nil, err + } + + for _, key := range account.Keys { + if key.Index == keyIndex { + return &key, nil + } + } + + return nil, status.Errorf(codes.NotFound, "failed to get account key by index: %d", keyIndex) +} + +func (e *ENAccountProvider) GetAccountKeysAtBlock( + ctx context.Context, + address flow.Address, + blockID flow.Identifier, + height uint64, +) ([]flow.AccountPublicKey, error) { + account, err := e.GetAccountAtBlock(ctx, address, blockID, height) + if err != nil { + return nil, err + } + + return account.Keys, nil +} + +// tryGetAccount attempts to get the account from the given execution node. +func (e *ENAccountProvider) tryGetAccount( + ctx context.Context, + execNode *flow.IdentitySkeleton, + req *execproto.GetAccountAtBlockIDRequest, +) (*execproto.GetAccountAtBlockIDResponse, error) { + execRPCClient, closer, err := e.connFactory.GetExecutionAPIClient(execNode.Address) + if err != nil { + return nil, err + } + defer closer.Close() + + return execRPCClient.GetAccountAtBlockID(ctx, req) +} diff --git a/engine/access/rpc/backend/accounts/provider/failover.go b/engine/access/rpc/backend/accounts/provider/failover.go new file mode 100644 index 00000000000..d9a942133af --- /dev/null +++ b/engine/access/rpc/backend/accounts/provider/failover.go @@ -0,0 +1,106 @@ +package provider + +import ( + "context" + + "github.com/rs/zerolog" + + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/state/protocol" +) + +type FailoverAccountProvider struct { + log zerolog.Logger + state protocol.State + localRequester AccountProvider + execNodeRequester AccountProvider +} + +var _ AccountProvider = (*FailoverAccountProvider)(nil) + +func NewFailoverAccountProvider( + log zerolog.Logger, + state protocol.State, + localRequester AccountProvider, + execNodeRequester AccountProvider, +) *FailoverAccountProvider { + return &FailoverAccountProvider{ + log: log.With().Str("account_provider", "failover").Logger(), + state: state, + localRequester: localRequester, + execNodeRequester: execNodeRequester, + } +} + +func (f *FailoverAccountProvider) GetAccountAtBlock( + ctx context.Context, + address flow.Address, + blockID flow.Identifier, + height uint64, +) (*flow.Account, error) { + localAccount, localErr := f.localRequester.GetAccountAtBlock(ctx, address, blockID, height) + if localErr == nil { + return localAccount, nil + } + + execNodeAccount, execNodeErr := f.execNodeRequester.GetAccountAtBlock(ctx, address, blockID, height) + return execNodeAccount, execNodeErr +} + +func (f *FailoverAccountProvider) GetAccountBalanceAtBlock( + ctx context.Context, + address flow.Address, + blockID flow.Identifier, + height uint64, +) (uint64, error) { + localBalance, localErr := f.localRequester.GetAccountBalanceAtBlock(ctx, address, blockID, height) + if localErr == nil { + return localBalance, nil + } + + execNodeBalance, execNodeErr := f.execNodeRequester.GetAccountBalanceAtBlock(ctx, address, blockID, height) + if execNodeErr != nil { + return 0, execNodeErr + } + + return execNodeBalance, nil +} + +func (f *FailoverAccountProvider) GetAccountKeyAtBlock( + ctx context.Context, + address flow.Address, + keyIndex uint32, + blockID flow.Identifier, + height uint64, +) (*flow.AccountPublicKey, error) { + localKey, localErr := f.localRequester.GetAccountKeyAtBlock(ctx, address, keyIndex, blockID, height) + if localErr == nil { + return localKey, nil + } + + execNodeKey, execNodeErr := f.execNodeRequester.GetAccountKeyAtBlock(ctx, address, keyIndex, blockID, height) + if execNodeErr != nil { + return nil, execNodeErr + } + + return execNodeKey, nil +} + +func (f *FailoverAccountProvider) GetAccountKeysAtBlock( + ctx context.Context, + address flow.Address, + blockID flow.Identifier, + height uint64, +) ([]flow.AccountPublicKey, error) { + localKeys, localErr := f.localRequester.GetAccountKeysAtBlock(ctx, address, blockID, height) + if localErr == nil { + return localKeys, nil + } + + execNodeKeys, execNodeErr := f.execNodeRequester.GetAccountKeysAtBlock(ctx, address, blockID, height) + if execNodeErr != nil { + return nil, execNodeErr + } + + return execNodeKeys, nil +} diff --git a/engine/access/rpc/backend/accounts/provider/local.go b/engine/access/rpc/backend/accounts/provider/local.go new file mode 100644 index 00000000000..5fe943e2261 --- /dev/null +++ b/engine/access/rpc/backend/accounts/provider/local.go @@ -0,0 +1,114 @@ +package provider + +import ( + "context" + "errors" + + "github.com/rs/zerolog" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + + "github.com/onflow/flow-go/engine/access/rpc/backend/common" + "github.com/onflow/flow-go/engine/common/rpc" + fvmerrors "github.com/onflow/flow-go/fvm/errors" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module/execution" + "github.com/onflow/flow-go/state/protocol" + "github.com/onflow/flow-go/storage" +) + +type LocalAccountProvider struct { + log zerolog.Logger + state protocol.State + scriptExecutor execution.ScriptExecutor +} + +var _ AccountProvider = (*LocalAccountProvider)(nil) + +func NewLocalAccountProvider( + log zerolog.Logger, + state protocol.State, + scriptExecutor execution.ScriptExecutor, +) *LocalAccountProvider { + return &LocalAccountProvider{ + log: log.With().Str("account_provider", "local").Logger(), + state: state, + scriptExecutor: scriptExecutor, + } +} + +func (l *LocalAccountProvider) GetAccountAtBlock( + ctx context.Context, + address flow.Address, + _ flow.Identifier, + height uint64, +) (*flow.Account, error) { + account, err := l.scriptExecutor.GetAccountAtBlockHeight(ctx, address, height) + if err != nil { + return nil, convertAccountError(common.ResolveHeightError(l.state.Params(), height, err), address, height) + } + return account, nil +} + +func (l *LocalAccountProvider) GetAccountBalanceAtBlock( + ctx context.Context, + address flow.Address, + blockID flow.Identifier, + height uint64, +) (uint64, error) { + accountBalance, err := l.scriptExecutor.GetAccountBalance(ctx, address, height) + if err != nil { + l.log.Debug().Err(err).Msgf("failed to get account balance at blockID: %v", blockID) + return 0, err + } + + return accountBalance, nil +} + +func (l *LocalAccountProvider) GetAccountKeyAtBlock( + ctx context.Context, + address flow.Address, + keyIndex uint32, + _ flow.Identifier, + height uint64, +) (*flow.AccountPublicKey, error) { + accountKey, err := l.scriptExecutor.GetAccountKey(ctx, address, keyIndex, height) + if err != nil { + l.log.Debug().Err(err).Msgf("failed to get account key at height: %d", height) + return nil, err + } + + return accountKey, nil +} + +func (l *LocalAccountProvider) GetAccountKeysAtBlock( + ctx context.Context, + address flow.Address, + _ flow.Identifier, + height uint64, +) ([]flow.AccountPublicKey, error) { + accountKeys, err := l.scriptExecutor.GetAccountKeys(ctx, address, height) + if err != nil { + l.log.Debug().Err(err).Msgf("failed to get account keys at height: %d", height) + return nil, err + } + + return accountKeys, nil +} + +// convertAccountError converts the script execution error to a gRPC error +func convertAccountError(err error, address flow.Address, height uint64) error { + if err == nil { + return nil + } + + if errors.Is(err, storage.ErrNotFound) { + return status.Errorf(codes.NotFound, "account with address %s not found: %v", address, err) + } + + if fvmerrors.IsAccountNotFoundError(err) { + return status.Errorf(codes.NotFound, "account not found") + } + + return rpc.ConvertIndexError(err, height, "failed to get account") +} diff --git a/engine/access/rpc/backend/accounts/provider/mock/account_provider.go b/engine/access/rpc/backend/accounts/provider/mock/account_provider.go new file mode 100644 index 00000000000..7d5034c1187 --- /dev/null +++ b/engine/access/rpc/backend/accounts/provider/mock/account_provider.go @@ -0,0 +1,147 @@ +// Code generated by mockery. DO NOT EDIT. + +package mock + +import ( + context "context" + + flow "github.com/onflow/flow-go/model/flow" + mock "github.com/stretchr/testify/mock" +) + +// AccountProvider is an autogenerated mock type for the AccountProvider type +type AccountProvider struct { + mock.Mock +} + +// GetAccountAtBlock provides a mock function with given fields: ctx, address, blockID, height +func (_m *AccountProvider) GetAccountAtBlock(ctx context.Context, address flow.Address, blockID flow.Identifier, height uint64) (*flow.Account, error) { + ret := _m.Called(ctx, address, blockID, height) + + if len(ret) == 0 { + panic("no return value specified for GetAccountAtBlock") + } + + var r0 *flow.Account + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, flow.Address, flow.Identifier, uint64) (*flow.Account, error)); ok { + return rf(ctx, address, blockID, height) + } + if rf, ok := ret.Get(0).(func(context.Context, flow.Address, flow.Identifier, uint64) *flow.Account); ok { + r0 = rf(ctx, address, blockID, height) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*flow.Account) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, flow.Address, flow.Identifier, uint64) error); ok { + r1 = rf(ctx, address, blockID, height) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetAccountBalanceAtBlock provides a mock function with given fields: ctx, address, blockID, height +func (_m *AccountProvider) GetAccountBalanceAtBlock(ctx context.Context, address flow.Address, blockID flow.Identifier, height uint64) (uint64, error) { + ret := _m.Called(ctx, address, blockID, height) + + if len(ret) == 0 { + panic("no return value specified for GetAccountBalanceAtBlock") + } + + var r0 uint64 + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, flow.Address, flow.Identifier, uint64) (uint64, error)); ok { + return rf(ctx, address, blockID, height) + } + if rf, ok := ret.Get(0).(func(context.Context, flow.Address, flow.Identifier, uint64) uint64); ok { + r0 = rf(ctx, address, blockID, height) + } else { + r0 = ret.Get(0).(uint64) + } + + if rf, ok := ret.Get(1).(func(context.Context, flow.Address, flow.Identifier, uint64) error); ok { + r1 = rf(ctx, address, blockID, height) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetAccountKeyAtBlock provides a mock function with given fields: ctx, address, keyIndex, blockID, height +func (_m *AccountProvider) GetAccountKeyAtBlock(ctx context.Context, address flow.Address, keyIndex uint32, blockID flow.Identifier, height uint64) (*flow.AccountPublicKey, error) { + ret := _m.Called(ctx, address, keyIndex, blockID, height) + + if len(ret) == 0 { + panic("no return value specified for GetAccountKeyAtBlock") + } + + var r0 *flow.AccountPublicKey + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, flow.Address, uint32, flow.Identifier, uint64) (*flow.AccountPublicKey, error)); ok { + return rf(ctx, address, keyIndex, blockID, height) + } + if rf, ok := ret.Get(0).(func(context.Context, flow.Address, uint32, flow.Identifier, uint64) *flow.AccountPublicKey); ok { + r0 = rf(ctx, address, keyIndex, blockID, height) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*flow.AccountPublicKey) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, flow.Address, uint32, flow.Identifier, uint64) error); ok { + r1 = rf(ctx, address, keyIndex, blockID, height) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetAccountKeysAtBlock provides a mock function with given fields: ctx, address, blockID, height +func (_m *AccountProvider) GetAccountKeysAtBlock(ctx context.Context, address flow.Address, blockID flow.Identifier, height uint64) ([]flow.AccountPublicKey, error) { + ret := _m.Called(ctx, address, blockID, height) + + if len(ret) == 0 { + panic("no return value specified for GetAccountKeysAtBlock") + } + + var r0 []flow.AccountPublicKey + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, flow.Address, flow.Identifier, uint64) ([]flow.AccountPublicKey, error)); ok { + return rf(ctx, address, blockID, height) + } + if rf, ok := ret.Get(0).(func(context.Context, flow.Address, flow.Identifier, uint64) []flow.AccountPublicKey); ok { + r0 = rf(ctx, address, blockID, height) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]flow.AccountPublicKey) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, flow.Address, flow.Identifier, uint64) error); ok { + r1 = rf(ctx, address, blockID, height) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// NewAccountProvider creates a new instance of AccountProvider. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewAccountProvider(t interface { + mock.TestingT + Cleanup(func()) +}) *AccountProvider { + mock := &AccountProvider{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/engine/access/rpc/backend/accounts/provider/provider.go b/engine/access/rpc/backend/accounts/provider/provider.go new file mode 100644 index 00000000000..36818399fae --- /dev/null +++ b/engine/access/rpc/backend/accounts/provider/provider.go @@ -0,0 +1,14 @@ +package provider + +import ( + "context" + + "github.com/onflow/flow-go/model/flow" +) + +type AccountProvider interface { + GetAccountAtBlock(ctx context.Context, address flow.Address, blockID flow.Identifier, height uint64) (*flow.Account, error) + GetAccountBalanceAtBlock(ctx context.Context, address flow.Address, blockID flow.Identifier, height uint64) (uint64, error) + GetAccountKeyAtBlock(ctx context.Context, address flow.Address, keyIndex uint32, blockID flow.Identifier, height uint64) (*flow.AccountPublicKey, error) + GetAccountKeysAtBlock(ctx context.Context, address flow.Address, blockID flow.Identifier, height uint64) ([]flow.AccountPublicKey, error) +} diff --git a/engine/access/rpc/backend/backend.go b/engine/access/rpc/backend/backend.go index 721b3b063c9..2c00ae9bc28 100644 --- a/engine/access/rpc/backend/backend.go +++ b/engine/access/rpc/backend/backend.go @@ -2,53 +2,52 @@ package backend import ( "context" + "crypto/md5" //nolint:gosec "fmt" "time" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" - - lru "github.com/hashicorp/golang-lru" + lru "github.com/hashicorp/golang-lru/v2" accessproto "github.com/onflow/flow/protobuf/go/flow/access" "github.com/rs/zerolog" "github.com/onflow/flow-go/access" + "github.com/onflow/flow-go/access/validator" "github.com/onflow/flow-go/cmd/build" + "github.com/onflow/flow-go/engine/access/index" + "github.com/onflow/flow-go/engine/access/rpc/backend/accounts" + "github.com/onflow/flow-go/engine/access/rpc/backend/common" + "github.com/onflow/flow-go/engine/access/rpc/backend/events" + "github.com/onflow/flow-go/engine/access/rpc/backend/node_communicator" + "github.com/onflow/flow-go/engine/access/rpc/backend/query_mode" + "github.com/onflow/flow-go/engine/access/rpc/backend/scripts" + "github.com/onflow/flow-go/engine/access/rpc/backend/transactions" + "github.com/onflow/flow-go/engine/access/rpc/backend/transactions/error_messages" + "github.com/onflow/flow-go/engine/access/rpc/backend/transactions/provider" + "github.com/onflow/flow-go/engine/access/rpc/backend/transactions/status" + txstream "github.com/onflow/flow-go/engine/access/rpc/backend/transactions/stream" + "github.com/onflow/flow-go/engine/access/rpc/connection" + "github.com/onflow/flow-go/engine/access/subscription" + "github.com/onflow/flow-go/engine/access/subscription/tracker" "github.com/onflow/flow-go/engine/common/rpc" - "github.com/onflow/flow-go/engine/common/rpc/convert" + "github.com/onflow/flow-go/engine/common/version" + "github.com/onflow/flow-go/fvm/blueprints" + accessmodel "github.com/onflow/flow-go/model/access" "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/model/flow/filter" "github.com/onflow/flow-go/module" + "github.com/onflow/flow-go/module/counters" + "github.com/onflow/flow-go/module/execution" + "github.com/onflow/flow-go/module/state_synchronization" "github.com/onflow/flow-go/state/protocol" "github.com/onflow/flow-go/storage" ) -// maxExecutionNodesCnt is the max number of execution nodes that will be contacted to complete an execution api request -const maxExecutionNodesCnt = 3 - -// minExecutionNodesCnt is the minimum number of execution nodes expected to have sent the execution receipt for a block -const minExecutionNodesCnt = 2 - -// maxAttemptsForExecutionReceipt is the maximum number of attempts to find execution receipts for a given block ID -const maxAttemptsForExecutionReceipt = 3 - -// DefaultMaxHeightRange is the default maximum size of range requests. -const DefaultMaxHeightRange = 250 - // DefaultSnapshotHistoryLimit the amount of blocks to look back in state // when recursively searching for a valid snapshot -const DefaultSnapshotHistoryLimit = 50 - -// DefaultLoggedScriptsCacheSize is the default size of the lookup cache used to dedupe logs of scripts sent to ENs -// limiting cache size to 16MB and does not affect script execution, only for keeping logs tidy -const DefaultLoggedScriptsCacheSize = 1_000_000 +const DefaultSnapshotHistoryLimit = 500 // DefaultConnectionPoolSize is the default size for the connection pool to collection and execution nodes const DefaultConnectionPoolSize = 250 -var preferredENIdentifiers flow.IdentifierList -var fixedENIdentifiers flow.IdentifierList - // Backend implements the Access API. // // It is composed of several sub-backends that implement part of the Access API. @@ -62,150 +61,142 @@ var fixedENIdentifiers flow.IdentifierList // // All remaining calls are handled by the base Backend in this file. type Backend struct { - backendScripts - backendTransactions - backendEvents + accounts.Accounts + events.Events + scripts.Scripts + transactions.Transactions + txstream.TransactionStream backendBlockHeaders backendBlockDetails - backendAccounts backendExecutionResults backendNetwork + backendSubscribeBlocks + + state protocol.State + collections storage.Collections + staticCollectionRPC accessproto.AccessAPIClient - state protocol.State - chainID flow.ChainID - collections storage.Collections - executionReceipts storage.ExecutionReceipts - connFactory ConnectionFactory + stateParams protocol.Params + versionControl *version.VersionControl + + BlockTracker tracker.BlockTracker } -func New( - state protocol.State, - collectionRPC accessproto.AccessAPIClient, - historicalAccessNodes []accessproto.AccessAPIClient, - blocks storage.Blocks, - headers storage.Headers, - collections storage.Collections, - transactions storage.Transactions, - executionReceipts storage.ExecutionReceipts, - executionResults storage.ExecutionResults, - chainID flow.ChainID, - transactionMetrics module.TransactionMetrics, - connFactory ConnectionFactory, - retryEnabled bool, - maxHeightRange uint, - preferredExecutionNodeIDs []string, - fixedExecutionNodeIDs []string, - log zerolog.Logger, - snapshotHistoryLimit int, - archiveAddressList []string, -) *Backend { - retry := newRetry() - if retryEnabled { - retry.Activate() - } +type Params struct { + State protocol.State + CollectionRPC accessproto.AccessAPIClient + HistoricalAccessNodes []accessproto.AccessAPIClient + Blocks storage.Blocks + Headers storage.Headers + Collections storage.Collections + Transactions storage.Transactions + ExecutionReceipts storage.ExecutionReceipts + ExecutionResults storage.ExecutionResults + TxResultErrorMessages storage.TransactionResultErrorMessages + ChainID flow.ChainID + AccessMetrics module.AccessMetrics + ConnFactory connection.ConnectionFactory + RetryEnabled bool + MaxHeightRange uint + Log zerolog.Logger + SnapshotHistoryLimit int + Communicator node_communicator.Communicator + TxResultCacheSize uint + ScriptExecutor execution.ScriptExecutor + ScriptExecutionMode query_mode.IndexQueryMode + CheckPayerBalanceMode validator.PayerBalanceMode + EventQueryMode query_mode.IndexQueryMode + BlockTracker tracker.BlockTracker + SubscriptionHandler *subscription.SubscriptionHandler + MaxScriptAndArgumentSize uint + + EventsIndex *index.EventsIndex + TxResultQueryMode query_mode.IndexQueryMode + TxResultsIndex *index.TransactionResultsIndex + LastFullBlockHeight *counters.PersistentStrictMonotonicCounter + IndexReporter state_synchronization.IndexReporter + VersionControl *version.VersionControl + ExecNodeIdentitiesProvider *rpc.ExecutionNodeIdentitiesProvider + TxErrorMessageProvider error_messages.Provider + ScheduledCallbacksEnabled bool +} + +var _ access.API = (*Backend)(nil) - loggedScripts, err := lru.New(DefaultLoggedScriptsCacheSize) +// New creates backend instance +func New(params Params) (*Backend, error) { + loggedScripts, err := lru.New[[md5.Size]byte, time.Time](common.DefaultLoggedScriptsCacheSize) if err != nil { - log.Fatal().Err(err).Msg("failed to initialize script logging cache") + return nil, fmt.Errorf("failed to initialize script logging cache: %w", err) } - b := &Backend{ - state: state, - // create the sub-backends - backendScripts: backendScripts{ - headers: headers, - executionReceipts: executionReceipts, - connFactory: connFactory, - state: state, - log: log, - metrics: transactionMetrics, - loggedScripts: loggedScripts, - archiveAddressList: archiveAddressList, - }, - backendTransactions: backendTransactions{ - staticCollectionRPC: collectionRPC, - state: state, - chainID: chainID, - collections: collections, - blocks: blocks, - transactions: transactions, - executionReceipts: executionReceipts, - transactionValidator: configureTransactionValidator(state, chainID), - transactionMetrics: transactionMetrics, - retry: retry, - connFactory: connFactory, - previousAccessNodes: historicalAccessNodes, - log: log, - }, - backendEvents: backendEvents{ - state: state, - headers: headers, - executionReceipts: executionReceipts, - connFactory: connFactory, - log: log, - maxHeightRange: maxHeightRange, - }, - backendBlockHeaders: backendBlockHeaders{ - headers: headers, - state: state, - }, - backendBlockDetails: backendBlockDetails{ - blocks: blocks, - state: state, - }, - backendAccounts: backendAccounts{ - state: state, - headers: headers, - executionReceipts: executionReceipts, - connFactory: connFactory, - log: log, - }, - backendExecutionResults: backendExecutionResults{ - executionResults: executionResults, - }, - backendNetwork: backendNetwork{ - state: state, - chainID: chainID, - snapshotHistoryLimit: snapshotHistoryLimit, - }, - collections: collections, - executionReceipts: executionReceipts, - connFactory: connFactory, - chainID: chainID, + var txResCache *lru.Cache[flow.Identifier, *accessmodel.TransactionResult] + if params.TxResultCacheSize > 0 { + txResCache, err = lru.New[flow.Identifier, *accessmodel.TransactionResult](int(params.TxResultCacheSize)) + if err != nil { + return nil, fmt.Errorf("failed to init cache for transaction results: %w", err) + } } - retry.SetBackend(b) - - preferredENIdentifiers, err = identifierList(preferredExecutionNodeIDs) + // the system tx is hardcoded and never changes during runtime + systemTx, err := blueprints.SystemChunkTransaction(params.ChainID.Chain()) if err != nil { - log.Fatal().Err(err).Msg("failed to convert node id string to Flow Identifier for preferred EN map") + return nil, fmt.Errorf("failed to create system chunk transaction: %w", err) } - - fixedENIdentifiers, err = identifierList(fixedExecutionNodeIDs) + systemTxID := systemTx.ID() + + accountsBackend, err := accounts.NewAccountsBackend( + params.Log, + params.State, + params.Headers, + params.ConnFactory, + params.Communicator, + params.ScriptExecutionMode, + params.ScriptExecutor, + params.ExecNodeIdentitiesProvider, + ) if err != nil { - log.Fatal().Err(err).Msg("failed to convert node id string to Flow Identifier for fixed EN map") + return nil, fmt.Errorf("failed to create accounts: %w", err) } - return b -} + eventsBackend, err := events.NewEventsBackend( + params.Log, + params.State, + params.ChainID.Chain(), + params.MaxHeightRange, + params.Headers, + params.ConnFactory, + params.Communicator, + params.EventQueryMode, + params.EventsIndex, + params.ExecNodeIdentitiesProvider, + ) + if err != nil { + return nil, fmt.Errorf("failed to create events: %w", err) + } -func identifierList(ids []string) (flow.IdentifierList, error) { - idList := make(flow.IdentifierList, len(ids)) - for i, idStr := range ids { - id, err := flow.HexStringToIdentifier(idStr) - if err != nil { - return nil, fmt.Errorf("failed to convert node id string %s to Flow Identifier: %w", id, err) - } - idList[i] = id + scriptsBackend, err := scripts.NewScriptsBackend( + params.Log, + params.AccessMetrics, + params.Headers, + params.State, + params.ConnFactory, + params.Communicator, + params.ScriptExecutor, + params.ScriptExecutionMode, + params.ExecNodeIdentitiesProvider, + loggedScripts, + params.MaxScriptAndArgumentSize, + ) + if err != nil { + return nil, fmt.Errorf("failed to create scripts: %w", err) } - return idList, nil -} -func configureTransactionValidator(state protocol.State, chainID flow.ChainID) *access.TransactionValidator { - return access.NewTransactionValidator( - access.NewProtocolStateBlocks(state), - chainID.Chain(), - access.TransactionValidationOptions{ + txValidator, err := validator.NewTransactionValidator( + validator.NewProtocolStateBlocks(params.State, params.IndexReporter), + params.ChainID.Chain(), + params.AccessMetrics, + validator.TransactionValidationOptions{ Expiry: flow.DefaultTransactionExpiry, ExpiryBuffer: flow.DefaultTransactionExpiryBuffer, AllowEmptyReferenceBlockID: false, @@ -214,13 +205,146 @@ func configureTransactionValidator(state protocol.State, chainID flow.ChainID) * MaxGasLimit: flow.DefaultMaxTransactionGasLimit, MaxTransactionByteSize: flow.DefaultMaxTransactionByteSize, MaxCollectionByteSize: flow.DefaultMaxCollectionByteSize, + CheckPayerBalanceMode: params.CheckPayerBalanceMode, }, + params.ScriptExecutor, + ) + if err != nil { + return nil, fmt.Errorf("could not create transaction validator: %w", err) + } + + txStatusDeriver := status.NewTxStatusDeriver(params.State, params.LastFullBlockHeight) + + localTxProvider := provider.NewLocalTransactionProvider( + params.State, + params.Collections, + params.Blocks, + params.EventsIndex, + params.TxResultsIndex, + params.TxErrorMessageProvider, + systemTxID, + txStatusDeriver, + params.ChainID, + params.ScheduledCallbacksEnabled, + ) + execNodeTxProvider := provider.NewENTransactionProvider( + params.Log, + params.State, + params.Collections, + params.ConnFactory, + params.Communicator, + params.ExecNodeIdentitiesProvider, + txStatusDeriver, + systemTxID, + params.ChainID, + params.ScheduledCallbacksEnabled, ) + failoverTxProvider := provider.NewFailoverTransactionProvider(localTxProvider, execNodeTxProvider) + + txParams := transactions.Params{ + Log: params.Log, + Metrics: params.AccessMetrics, + State: params.State, + ChainID: params.ChainID, + SystemTxID: systemTxID, + StaticCollectionRPCClient: params.CollectionRPC, + HistoricalAccessNodeClients: params.HistoricalAccessNodes, + NodeCommunicator: params.Communicator, + ConnFactory: params.ConnFactory, + EnableRetries: params.RetryEnabled, + NodeProvider: params.ExecNodeIdentitiesProvider, + Blocks: params.Blocks, + Collections: params.Collections, + Transactions: params.Transactions, + TxErrorMessageProvider: params.TxErrorMessageProvider, + TxResultCache: txResCache, + TxValidator: txValidator, + TxStatusDeriver: txStatusDeriver, + EventsIndex: params.EventsIndex, + TxResultsIndex: params.TxResultsIndex, + ScheduledCallbacksEnabled: params.ScheduledCallbacksEnabled, + } + + switch params.TxResultQueryMode { + case query_mode.IndexQueryModeLocalOnly: + txParams.TxProvider = localTxProvider + case query_mode.IndexQueryModeExecutionNodesOnly: + txParams.TxProvider = execNodeTxProvider + case query_mode.IndexQueryModeFailover: + txParams.TxProvider = failoverTxProvider + default: + return nil, fmt.Errorf("invalid tx result query mode: %s", params.TxResultQueryMode) + } + + txBackend, err := transactions.NewTransactionsBackend(txParams) + if err != nil { + return nil, fmt.Errorf("failed to create transactions backend: %w", err) + } + + txStreamBackend := txstream.NewTransactionStreamBackend( + params.Log, + params.State, + params.SubscriptionHandler, + params.BlockTracker, + txBackend.SendTransaction, + params.Blocks, + params.Collections, + params.Transactions, + failoverTxProvider, + txStatusDeriver, + ) + + b := &Backend{ + Accounts: *accountsBackend, + Events: *eventsBackend, + Scripts: *scriptsBackend, + Transactions: *txBackend, + TransactionStream: *txStreamBackend, + backendBlockHeaders: backendBlockHeaders{ + backendBlockBase: backendBlockBase{ + blocks: params.Blocks, + headers: params.Headers, + state: params.State, + }, + }, + backendBlockDetails: backendBlockDetails{ + backendBlockBase: backendBlockBase{ + blocks: params.Blocks, + headers: params.Headers, + state: params.State, + }, + }, + backendExecutionResults: backendExecutionResults{ + executionResults: params.ExecutionResults, + }, + backendNetwork: backendNetwork{ + state: params.State, + chainID: params.ChainID, + headers: params.Headers, + snapshotHistoryLimit: params.SnapshotHistoryLimit, + }, + backendSubscribeBlocks: backendSubscribeBlocks{ + log: params.Log, + state: params.State, + headers: params.Headers, + blocks: params.Blocks, + subscriptionHandler: params.SubscriptionHandler, + blockTracker: params.BlockTracker, + }, + + state: params.State, + collections: params.Collections, + staticCollectionRPC: params.CollectionRPC, + stateParams: params.State.Params(), + versionControl: params.VersionControl, + BlockTracker: params.BlockTracker, + } + + return b, nil } // Ping responds to requests when the server is up. func (b *Backend) Ping(ctx context.Context) error { - // staticCollectionRPC is only set if a collection node address was provided at startup if b.staticCollectionRPC != nil { _, err := b.staticCollectionRPC.Ping(ctx, &accessproto.PingRequest{}) @@ -233,24 +357,37 @@ func (b *Backend) Ping(ctx context.Context) error { } // GetNodeVersionInfo returns node version information such as semver, commit, sporkID, protocolVersion, etc -func (b *Backend) GetNodeVersionInfo(ctx context.Context) (*access.NodeVersionInfo, error) { - stateParams := b.state.Params() - sporkId, err := stateParams.SporkID() +func (b *Backend) GetNodeVersionInfo(_ context.Context) (*accessmodel.NodeVersionInfo, error) { + sporkID := b.stateParams.SporkID() + sporkRootBlockHeight := b.stateParams.SporkRootBlockHeight() + nodeRootBlockHeader := b.stateParams.SealedRoot() + protocolSnapshot, err := b.state.Final().ProtocolState() if err != nil { - return nil, status.Errorf(codes.Internal, "failed to read spork ID: %v", err) + return nil, fmt.Errorf("could not read finalized protocol kvstore: %w", err) } - protocolVersion, err := stateParams.ProtocolVersion() - if err != nil { - return nil, status.Errorf(codes.Internal, "failed to read protocol version: %v", err) + var compatibleRange *accessmodel.CompatibleRange + + // Version control feature could be disabled + if b.versionControl != nil { + compatibleRange = &accessmodel.CompatibleRange{ + StartHeight: b.versionControl.StartHeight(), + EndHeight: b.versionControl.EndHeight(), + } + } + + nodeInfo := &accessmodel.NodeVersionInfo{ + Semver: build.Version(), + Commit: build.Commit(), + SporkId: sporkID, + ProtocolVersion: 0, + ProtocolStateVersion: protocolSnapshot.GetProtocolStateVersion(), + SporkRootBlockHeight: sporkRootBlockHeight, + NodeRootBlockHeight: nodeRootBlockHeader.Height, + CompatibleRange: compatibleRange, } - return &access.NodeVersionInfo{ - Semver: build.Semver(), - Commit: build.Commit(), - SporkId: sporkId, - ProtocolVersion: uint64(protocolVersion), - }, nil + return nodeInfo, nil } func (b *Backend) GetCollectionByID(_ context.Context, colID flow.Identifier) (*flow.LightCollection, error) { @@ -268,197 +405,23 @@ func (b *Backend) GetCollectionByID(_ context.Context, colID flow.Identifier) (* return col, nil } -func (b *Backend) GetNetworkParameters(_ context.Context) access.NetworkParameters { - return access.NetworkParameters{ - ChainID: b.chainID, - } -} - -// GetLatestProtocolStateSnapshot returns the latest finalized snapshot -func (b *Backend) GetLatestProtocolStateSnapshot(_ context.Context) ([]byte, error) { - snapshot := b.state.Final() - - validSnapshot, err := b.getValidSnapshot(snapshot, 0) +func (b *Backend) GetFullCollectionByID(_ context.Context, colID flow.Identifier) (*flow.Collection, error) { + // retrieve the collection from the collection storage + col, err := b.collections.ByID(colID) if err != nil { + // Collections are retrieved asynchronously as we finalize blocks, so + // it is possible for a client to request a finalized block from us + // containing some collection, then get a not found error when requesting + // that collection. These clients should retry. + err = rpc.ConvertStorageError(fmt.Errorf("please retry for collection in finalized block: %w", err)) return nil, err } - return convert.SnapshotToBytes(validSnapshot) -} - -// executionNodesForBlockID returns upto maxExecutionNodesCnt number of randomly chosen execution node identities -// which have executed the given block ID. -// If no such execution node is found, an InsufficientExecutionReceipts error is returned. -func executionNodesForBlockID( - ctx context.Context, - blockID flow.Identifier, - executionReceipts storage.ExecutionReceipts, - state protocol.State, - log zerolog.Logger) (flow.IdentityList, error) { - - var executorIDs flow.IdentifierList - - // check if the block ID is of the root block. If it is then don't look for execution receipts since they - // will not be present for the root block. - rootBlock, err := state.Params().Root() - if err != nil { - return nil, fmt.Errorf("failed to retreive execution IDs for block ID %v: %w", blockID, err) - } - - if rootBlock.ID() == blockID { - executorIdentities, err := state.Final().Identities(filter.HasRole(flow.RoleExecution)) - if err != nil { - return nil, fmt.Errorf("failed to retreive execution IDs for block ID %v: %w", blockID, err) - } - executorIDs = executorIdentities.NodeIDs() - } else { - // try to find atleast minExecutionNodesCnt execution node ids from the execution receipts for the given blockID - for attempt := 0; attempt < maxAttemptsForExecutionReceipt; attempt++ { - executorIDs, err = findAllExecutionNodes(blockID, executionReceipts, log) - if err != nil { - return nil, err - } - - if len(executorIDs) >= minExecutionNodesCnt { - break - } - - // log the attempt - log.Debug().Int("attempt", attempt).Int("max_attempt", maxAttemptsForExecutionReceipt). - Int("execution_receipts_found", len(executorIDs)). - Str("block_id", blockID.String()). - Msg("insufficient execution receipts") - - // if one or less execution receipts may have been received then re-query - // in the hope that more might have been received by now - - select { - case <-ctx.Done(): - return nil, ctx.Err() - case <-time.After(100 * time.Millisecond << time.Duration(attempt)): - //retry after an exponential backoff - } - } - - receiptCnt := len(executorIDs) - // if less than minExecutionNodesCnt execution receipts have been received so far, then return random ENs - if receiptCnt < minExecutionNodesCnt { - newExecutorIDs, err := state.AtBlockID(blockID).Identities(filter.HasRole(flow.RoleExecution)) - if err != nil { - return nil, fmt.Errorf("failed to retreive execution IDs for block ID %v: %w", blockID, err) - } - executorIDs = newExecutorIDs.NodeIDs() - } - } - - // choose from the preferred or fixed execution nodes - subsetENs, err := chooseExecutionNodes(state, executorIDs) - if err != nil { - return nil, fmt.Errorf("failed to retreive execution IDs for block ID %v: %w", blockID, err) - } - - // randomly choose upto maxExecutionNodesCnt identities - executionIdentitiesRandom := subsetENs.Sample(maxExecutionNodesCnt) - - if len(executionIdentitiesRandom) == 0 { - return nil, fmt.Errorf("no matching execution node found for block ID %v", blockID) - } - - return executionIdentitiesRandom, nil -} - -// findAllExecutionNodes find all the execution nodes ids from the execution receipts that have been received for the -// given blockID -func findAllExecutionNodes( - blockID flow.Identifier, - executionReceipts storage.ExecutionReceipts, - log zerolog.Logger) (flow.IdentifierList, error) { - - // lookup the receipt's storage with the block ID - allReceipts, err := executionReceipts.ByBlockID(blockID) - if err != nil { - return nil, fmt.Errorf("failed to retreive execution receipts for block ID %v: %w", blockID, err) - } - - executionResultMetaList := make(flow.ExecutionReceiptMetaList, 0, len(allReceipts)) - for _, r := range allReceipts { - executionResultMetaList = append(executionResultMetaList, r.Meta()) - } - executionResultGroupedMetaList := executionResultMetaList.GroupByResultID() - - // maximum number of matching receipts found so far for any execution result id - maxMatchedReceiptCnt := 0 - // execution result id key for the highest number of matching receipts in the identicalReceipts map - var maxMatchedReceiptResultID flow.Identifier - - // find the largest list of receipts which have the same result ID - for resultID, executionReceiptList := range executionResultGroupedMetaList { - currentMatchedReceiptCnt := executionReceiptList.Size() - if currentMatchedReceiptCnt > maxMatchedReceiptCnt { - maxMatchedReceiptCnt = currentMatchedReceiptCnt - maxMatchedReceiptResultID = resultID - } - } - - // if there are more than one execution result for the same block ID, log as error - if executionResultGroupedMetaList.NumberGroups() > 1 { - identicalReceiptsStr := fmt.Sprintf("%v", flow.GetIDs(allReceipts)) - log.Error(). - Str("block_id", blockID.String()). - Str("execution_receipts", identicalReceiptsStr). - Msg("execution receipt mismatch") - } - - // pick the largest list of matching receipts - matchingReceiptMetaList := executionResultGroupedMetaList.GetGroup(maxMatchedReceiptResultID) - - metaReceiptGroupedByExecutorID := matchingReceiptMetaList.GroupByExecutorID() - - // collect all unique execution node ids from the receipts - var executorIDs flow.IdentifierList - for executorID := range metaReceiptGroupedByExecutorID { - executorIDs = append(executorIDs, executorID) - } - - return executorIDs, nil + return col, nil } -// chooseExecutionNodes finds the subset of execution nodes defined in the identity table by first -// choosing the preferred execution nodes which have executed the transaction. If no such preferred -// execution nodes are found, then the fixed execution nodes defined in the identity table are returned -// If neither preferred nor fixed nodes are defined, then all execution node matching the executor IDs are returned. -// e.g. If execution nodes in identity table are {1,2,3,4}, preferred ENs are defined as {2,3,4} -// and the executor IDs is {1,2,3}, then {2, 3} is returned as the chosen subset of ENs -func chooseExecutionNodes(state protocol.State, executorIDs flow.IdentifierList) (flow.IdentityList, error) { - - allENs, err := state.Final().Identities(filter.HasRole(flow.RoleExecution)) - if err != nil { - return nil, fmt.Errorf("failed to retreive all execution IDs: %w", err) +func (b *Backend) GetNetworkParameters(_ context.Context) accessmodel.NetworkParameters { + return accessmodel.NetworkParameters{ + ChainID: b.backendNetwork.chainID, } - - // first try and choose from the preferred EN IDs - var chosenIDs flow.IdentityList - if len(preferredENIdentifiers) > 0 { - // find the preferred execution node IDs which have executed the transaction - chosenIDs = allENs.Filter(filter.And(filter.HasNodeID(preferredENIdentifiers...), - filter.HasNodeID(executorIDs...))) - if len(chosenIDs) > 0 { - return chosenIDs, nil - } - } - - // if no preferred EN ID is found, then choose from the fixed EN IDs - if len(fixedENIdentifiers) > 0 { - // choose fixed ENs which have executed the transaction - chosenIDs = allENs.Filter(filter.And(filter.HasNodeID(fixedENIdentifiers...), filter.HasNodeID(executorIDs...))) - if len(chosenIDs) > 0 { - return chosenIDs, nil - } - // if no such ENs are found then just choose all fixed ENs - chosenIDs = allENs.Filter(filter.HasNodeID(fixedENIdentifiers...)) - return chosenIDs, nil - } - - // If no preferred or fixed ENs have been specified, then return all executor IDs i.e. no preference at all - return allENs.Filter(filter.HasNodeID(executorIDs...)), nil } diff --git a/engine/access/rpc/backend/backend_accounts.go b/engine/access/rpc/backend/backend_accounts.go deleted file mode 100644 index a3a41053c61..00000000000 --- a/engine/access/rpc/backend/backend_accounts.go +++ /dev/null @@ -1,155 +0,0 @@ -package backend - -import ( - "context" - "time" - - "github.com/hashicorp/go-multierror" - execproto "github.com/onflow/flow/protobuf/go/flow/execution" - "github.com/rs/zerolog" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" - - "github.com/onflow/flow-go/engine/common/rpc" - "github.com/onflow/flow-go/engine/common/rpc/convert" - "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/state/protocol" - "github.com/onflow/flow-go/storage" -) - -type backendAccounts struct { - state protocol.State - headers storage.Headers - executionReceipts storage.ExecutionReceipts - connFactory ConnectionFactory - log zerolog.Logger -} - -func (b *backendAccounts) GetAccount(ctx context.Context, address flow.Address) (*flow.Account, error) { - return b.GetAccountAtLatestBlock(ctx, address) -} - -func (b *backendAccounts) GetAccountAtLatestBlock(ctx context.Context, address flow.Address) (*flow.Account, error) { - - // get the latest sealed header - latestHeader, err := b.state.Sealed().Head() - if err != nil { - return nil, status.Errorf(codes.Internal, "failed to get latest sealed header: %v", err) - } - - // get the block id of the latest sealed header - latestBlockID := latestHeader.ID() - - account, err := b.getAccountAtBlockID(ctx, address, latestBlockID) - if err != nil { - b.log.Error().Err(err).Msgf("failed to get account at blockID: %v", latestBlockID) - return nil, err - } - - return account, nil -} - -func (b *backendAccounts) GetAccountAtBlockHeight( - ctx context.Context, - address flow.Address, - height uint64, -) (*flow.Account, error) { - // get header at given height - header, err := b.headers.ByHeight(height) - if err != nil { - return nil, rpc.ConvertStorageError(err) - } - - // get block ID of the header at the given height - blockID := header.ID() - - account, err := b.getAccountAtBlockID(ctx, address, blockID) - if err != nil { - return nil, err - } - - return account, nil -} - -func (b *backendAccounts) getAccountAtBlockID( - ctx context.Context, - address flow.Address, - blockID flow.Identifier, -) (*flow.Account, error) { - - exeReq := &execproto.GetAccountAtBlockIDRequest{ - Address: address.Bytes(), - BlockId: blockID[:], - } - - execNodes, err := executionNodesForBlockID(ctx, blockID, b.executionReceipts, b.state, b.log) - if err != nil { - return nil, rpc.ConvertError(err, "failed to get account from the execution node", codes.Internal) - } - - var exeRes *execproto.GetAccountAtBlockIDResponse - exeRes, err = b.getAccountFromAnyExeNode(ctx, execNodes, exeReq) - if err != nil { - b.log.Error().Err(err).Msg("failed to get account from execution nodes") - return nil, rpc.ConvertError(err, "failed to get account from the execution node", codes.Internal) - } - - account, err := convert.MessageToAccount(exeRes.GetAccount()) - if err != nil { - return nil, status.Errorf(codes.Internal, "failed to convert account message: %v", err) - } - - return account, nil -} - -// getAccountFromAnyExeNode retrieves the given account from any EN in `execNodes`. -// We attempt querying each EN in sequence. If any EN returns a valid response, then errors from -// other ENs are logged and swallowed. If all ENs fail to return a valid response, then an -// error aggregating all failures is returned. -func (b *backendAccounts) getAccountFromAnyExeNode(ctx context.Context, execNodes flow.IdentityList, req *execproto.GetAccountAtBlockIDRequest) (*execproto.GetAccountAtBlockIDResponse, error) { - var errors *multierror.Error - for _, execNode := range execNodes { - // TODO: use the GRPC Client interceptor - start := time.Now() - - resp, err := b.tryGetAccount(ctx, execNode, req) - duration := time.Since(start) - if err == nil { - // return if any execution node replied successfully - b.log.Debug(). - Str("execution_node", execNode.String()). - Hex("block_id", req.GetBlockId()). - Hex("address", req.GetAddress()). - Int64("rtt_ms", duration.Milliseconds()). - Msg("Successfully got account info") - return resp, nil - } - b.log.Error(). - Str("execution_node", execNode.String()). - Hex("block_id", req.GetBlockId()). - Hex("address", req.GetAddress()). - Int64("rtt_ms", duration.Milliseconds()). - Err(err). - Msg("failed to execute GetAccount") - errors = multierror.Append(errors, err) - } - - return nil, errors.ErrorOrNil() -} - -func (b *backendAccounts) tryGetAccount(ctx context.Context, execNode *flow.Identity, req *execproto.GetAccountAtBlockIDRequest) (*execproto.GetAccountAtBlockIDResponse, error) { - execRPCClient, closer, err := b.connFactory.GetExecutionAPIClient(execNode.Address) - if err != nil { - return nil, err - } - defer closer.Close() - - resp, err := execRPCClient.GetAccountAtBlockID(ctx, req) - if err != nil { - if status.Code(err) == codes.Unavailable { - b.connFactory.InvalidateExecutionAPIClient(execNode.Address) - } - return nil, err - } - return resp, nil -} diff --git a/engine/access/rpc/backend/backend_block_base.go b/engine/access/rpc/backend/backend_block_base.go new file mode 100644 index 00000000000..9ea0a9e0ce9 --- /dev/null +++ b/engine/access/rpc/backend/backend_block_base.go @@ -0,0 +1,48 @@ +package backend + +import ( + "errors" + "fmt" + + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/state/protocol" + "github.com/onflow/flow-go/storage" +) + +// backendBlockBase provides shared functionality for block status determination +type backendBlockBase struct { + blocks storage.Blocks + headers storage.Headers + state protocol.State +} + +// getBlockStatus returns the block status for a given header. +// +// No errors are expected during normal operations. +func (b *backendBlockBase) getBlockStatus(header *flow.Header) (flow.BlockStatus, error) { + // check which block is finalized at the target block's height + // note: this index is only populated for finalized blocks + blockIDFinalizedAtHeight, err := b.headers.BlockIDByHeight(header.Height) + if err != nil { + if errors.Is(err, storage.ErrNotFound) { + return flow.BlockStatusUnknown, nil // height not indexed yet (not finalized) + } + return flow.BlockStatusUnknown, fmt.Errorf("failed to lookup block ID by height: %w", err) + } + + if blockIDFinalizedAtHeight != header.ID() { + // The queried block has been orphaned. It will never be finalized or sealed. + return flow.BlockStatusUnknown, nil + } + + sealed, err := b.state.Sealed().Head() + if err != nil { + return flow.BlockStatusUnknown, fmt.Errorf("failed to lookup sealed header: %w", err) + } + + if header.Height > sealed.Height { + return flow.BlockStatusFinalized, nil + } + + return flow.BlockStatusSealed, nil +} diff --git a/engine/access/rpc/backend/backend_block_details.go b/engine/access/rpc/backend/backend_block_details.go index 19336ded8a4..b5daa7802bd 100644 --- a/engine/access/rpc/backend/backend_block_details.go +++ b/engine/access/rpc/backend/backend_block_details.go @@ -6,41 +6,47 @@ import ( "google.golang.org/grpc/codes" "google.golang.org/grpc/status" + "github.com/onflow/flow-go/engine/access/rpc/backend/common" "github.com/onflow/flow-go/engine/common/rpc" "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/state/protocol" - "github.com/onflow/flow-go/storage" + "github.com/onflow/flow-go/module/irrecoverable" ) type backendBlockDetails struct { - blocks storage.Blocks - state protocol.State + backendBlockBase } -func (b *backendBlockDetails) GetLatestBlock(_ context.Context, isSealed bool) (*flow.Block, flow.BlockStatus, error) { +func (b *backendBlockDetails) GetLatestBlock(ctx context.Context, isSealed bool) (*flow.Block, flow.BlockStatus, error) { var header *flow.Header + var blockStatus flow.BlockStatus var err error if isSealed { - // get the latest seal header from storage header, err = b.state.Sealed().Head() + if err != nil { + // sealed header must exist in the db, otherwise the node's state may be corrupt + err = irrecoverable.NewExceptionf("failed to lookup sealed header: %w", err) + irrecoverable.Throw(ctx, err) + return nil, flow.BlockStatusUnknown, err + } + blockStatus = flow.BlockStatusSealed } else { - // get the finalized header from state header, err = b.state.Final().Head() - } - - if err != nil { - // node should always have the latest block - - // In the RPC engine, if we encounter an error from the protocol state indicating state corruption, - // we should halt processing requests, but do throw an exception which might cause a crash: - // - It is unsafe to process requests if we have an internally bad state. - // TODO: https://github.com/onflow/flow-go/issues/4028 - // - We would like to avoid throwing an exception as a result of an Access API request by policy - // because this can cause DOS potential - // - Since the protocol state is widely shared, we assume that in practice another component will - // observe the protocol state error and throw an exception. - return nil, flow.BlockStatusUnknown, status.Errorf(codes.Internal, "could not get latest block: %v", err) + if err != nil { + // finalized header must exist in the db, otherwise the node's state may be corrupt + err = irrecoverable.NewExceptionf("failed to lookup final header: %w", err) + irrecoverable.Throw(ctx, err) + return nil, flow.BlockStatusUnknown, err + } + + // Note: there is a corner case when requesting the latest finalized block before the + // consensus follower has progressed past the spork root block. In this case, the returned + // blockStatus will be finalized, however, the block is actually sealed. + if header.Height == b.state.Params().SporkRootBlockHeight() { + blockStatus = flow.BlockStatusSealed + } else { + blockStatus = flow.BlockStatusFinalized + } } // since we are querying a finalized or sealed block, we can use the height index and save an ID computation @@ -49,55 +55,38 @@ func (b *backendBlockDetails) GetLatestBlock(_ context.Context, isSealed bool) ( return nil, flow.BlockStatusUnknown, status.Errorf(codes.Internal, "could not get latest block: %v", err) } - status, err := b.getBlockStatus(block) - if err != nil { - return nil, status, err - } - return block, status, nil + return block, blockStatus, nil } -func (b *backendBlockDetails) GetBlockByID(_ context.Context, id flow.Identifier) (*flow.Block, flow.BlockStatus, error) { +func (b *backendBlockDetails) GetBlockByID(ctx context.Context, id flow.Identifier) (*flow.Block, flow.BlockStatus, error) { block, err := b.blocks.ByID(id) if err != nil { return nil, flow.BlockStatusUnknown, rpc.ConvertStorageError(err) } - status, err := b.getBlockStatus(block) + status, err := b.getBlockStatus(block.ToHeader()) if err != nil { - return nil, status, err + // Any error returned is an indication of a bug or state corruption. we must not continue processing. + err = irrecoverable.NewException(err) + irrecoverable.Throw(ctx, err) + return nil, flow.BlockStatusUnknown, err } return block, status, nil } -func (b *backendBlockDetails) GetBlockByHeight(_ context.Context, height uint64) (*flow.Block, flow.BlockStatus, error) { +func (b *backendBlockDetails) GetBlockByHeight(ctx context.Context, height uint64) (*flow.Block, flow.BlockStatus, error) { block, err := b.blocks.ByHeight(height) if err != nil { - return nil, flow.BlockStatusUnknown, rpc.ConvertStorageError(err) - } - - status, err := b.getBlockStatus(block) - if err != nil { - return nil, status, err + return nil, flow.BlockStatusUnknown, rpc.ConvertStorageError(common.ResolveHeightError(b.state.Params(), height, err)) } - return block, status, nil -} -func (b *backendBlockDetails) getBlockStatus(block *flow.Block) (flow.BlockStatus, error) { - sealed, err := b.state.Sealed().Head() + status, err := b.getBlockStatus(block.ToHeader()) if err != nil { - // In the RPC engine, if we encounter an error from the protocol state indicating state corruption, - // we should halt processing requests, but do throw an exception which might cause a crash: - // - It is unsafe to process requests if we have an internally bad state. - // TODO: https://github.com/onflow/flow-go/issues/4028 - // - We would like to avoid throwing an exception as a result of an Access API request by policy - // because this can cause DOS potential - // - Since the protocol state is widely shared, we assume that in practice another component will - // observe the protocol state error and throw an exception. - return flow.BlockStatusUnknown, status.Errorf(codes.Internal, "failed to find latest sealed header: %v", err) + // Any error returned is an indication of a bug or state corruption. we must not continue processing. + err = irrecoverable.NewException(err) + irrecoverable.Throw(ctx, err) + return nil, flow.BlockStatusUnknown, err } - if block.Header.Height > sealed.Height { - return flow.BlockStatusFinalized, nil - } - return flow.BlockStatusSealed, nil + return block, status, nil } diff --git a/engine/access/rpc/backend/backend_block_headers.go b/engine/access/rpc/backend/backend_block_headers.go index 178f9064f1f..562bbe4cfca 100644 --- a/engine/access/rpc/backend/backend_block_headers.go +++ b/engine/access/rpc/backend/backend_block_headers.go @@ -3,53 +3,47 @@ package backend import ( "context" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" - + "github.com/onflow/flow-go/engine/access/rpc/backend/common" "github.com/onflow/flow-go/engine/common/rpc" "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/state/protocol" - "github.com/onflow/flow-go/storage" + "github.com/onflow/flow-go/module/irrecoverable" ) type backendBlockHeaders struct { - headers storage.Headers - state protocol.State + backendBlockBase } -func (b *backendBlockHeaders) GetLatestBlockHeader(_ context.Context, isSealed bool) (*flow.Header, flow.BlockStatus, error) { - var header *flow.Header - var err error - +func (b *backendBlockHeaders) GetLatestBlockHeader(ctx context.Context, isSealed bool) (*flow.Header, flow.BlockStatus, error) { if isSealed { - // get the latest seal header from storage - header, err = b.state.Sealed().Head() - } else { - // get the finalized header from state - header, err = b.state.Final().Head() + header, err := b.state.Sealed().Head() + if err != nil { + // sealed header must exist in the db, otherwise the node's state may be corrupt + err = irrecoverable.NewExceptionf("failed to lookup sealed header: %w", err) + irrecoverable.Throw(ctx, err) + return nil, flow.BlockStatusUnknown, err + } + return header, flow.BlockStatusSealed, nil } + header, err := b.state.Final().Head() if err != nil { - // node should always have the latest block - // In the RPC engine, if we encounter an error from the protocol state indicating state corruption, - // we should halt processing requests, but do throw an exception which might cause a crash: - // - It is unsafe to process requests if we have an internally bad state. - // TODO: https://github.com/onflow/flow-go/issues/4028 - // - We would like to avoid throwing an exception as a result of an Access API request by policy - // because this can cause DOS potential - // - Since the protocol state is widely shared, we assume that in practice another component will - // observe the protocol state error and throw an exception. - return nil, flow.BlockStatusUnknown, status.Errorf(codes.Internal, "could not get latest block header: %v", err) + // finalized header must exist in the db, otherwise the node's state may be corrupt + err = irrecoverable.NewExceptionf("failed to lookup final header: %w", err) + irrecoverable.Throw(ctx, err) + return nil, flow.BlockStatusUnknown, err } - status, err := b.getBlockStatus(header) - if err != nil { - return nil, status, err + // Note: there is a corner case when requesting the latest finalized block before the + // consensus follower has progressed past the spork root block. In this case, the returned + // blockStatus will be finalized, however, the block is actually sealed. + if header.Height == b.state.Params().SporkRootBlockHeight() { + return header, flow.BlockStatusSealed, nil + } else { + return header, flow.BlockStatusFinalized, nil } - return header, status, nil } -func (b *backendBlockHeaders) GetBlockHeaderByID(_ context.Context, id flow.Identifier) (*flow.Header, flow.BlockStatus, error) { +func (b *backendBlockHeaders) GetBlockHeaderByID(ctx context.Context, id flow.Identifier) (*flow.Header, flow.BlockStatus, error) { header, err := b.headers.ByBlockID(id) if err != nil { return nil, flow.BlockStatusUnknown, rpc.ConvertStorageError(err) @@ -57,40 +51,26 @@ func (b *backendBlockHeaders) GetBlockHeaderByID(_ context.Context, id flow.Iden status, err := b.getBlockStatus(header) if err != nil { - return nil, status, err + // Any error returned is an indication of a bug or state corruption. we must not continue processing. + err = irrecoverable.NewException(err) + irrecoverable.Throw(ctx, err) + return nil, flow.BlockStatusUnknown, err } return header, status, nil } -func (b *backendBlockHeaders) GetBlockHeaderByHeight(_ context.Context, height uint64) (*flow.Header, flow.BlockStatus, error) { +func (b *backendBlockHeaders) GetBlockHeaderByHeight(ctx context.Context, height uint64) (*flow.Header, flow.BlockStatus, error) { header, err := b.headers.ByHeight(height) if err != nil { - return nil, flow.BlockStatusUnknown, rpc.ConvertStorageError(err) + return nil, flow.BlockStatusUnknown, rpc.ConvertStorageError(common.ResolveHeightError(b.state.Params(), height, err)) } status, err := b.getBlockStatus(header) if err != nil { - return nil, status, err + // Any error returned is an indication of a bug or state corruption. we must not continue processing. + err = irrecoverable.NewException(err) + irrecoverable.Throw(ctx, err) + return nil, flow.BlockStatusUnknown, err } return header, status, nil } - -func (b *backendBlockHeaders) getBlockStatus(header *flow.Header) (flow.BlockStatus, error) { - sealed, err := b.state.Sealed().Head() - if err != nil { - // In the RPC engine, if we encounter an error from the protocol state indicating state corruption, - // we should halt processing requests, but do throw an exception which might cause a crash: - // - It is unsafe to process requests if we have an internally bad state. - // TODO: https://github.com/onflow/flow-go/issues/4028 - // - We would like to avoid throwing an exception as a result of an Access API request by policy - // because this can cause DOS potential - // - Since the protocol state is widely shared, we assume that in practice another component will - // observe the protocol state error and throw an exception. - return flow.BlockStatusUnknown, status.Errorf(codes.Internal, "failed to find latest sealed header: %v", err) - } - - if header.Height > sealed.Height { - return flow.BlockStatusFinalized, nil - } - return flow.BlockStatusSealed, nil -} diff --git a/engine/access/rpc/backend/backend_events.go b/engine/access/rpc/backend/backend_events.go deleted file mode 100644 index e097843b933..00000000000 --- a/engine/access/rpc/backend/backend_events.go +++ /dev/null @@ -1,246 +0,0 @@ -package backend - -import ( - "context" - "encoding/hex" - "errors" - "fmt" - "time" - - "github.com/hashicorp/go-multierror" - execproto "github.com/onflow/flow/protobuf/go/flow/execution" - "github.com/rs/zerolog" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" - - "github.com/onflow/flow-go/engine/common/rpc" - "github.com/onflow/flow-go/engine/common/rpc/convert" - "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/state/protocol" - "github.com/onflow/flow-go/storage" -) - -type backendEvents struct { - headers storage.Headers - executionReceipts storage.ExecutionReceipts - state protocol.State - connFactory ConnectionFactory - log zerolog.Logger - maxHeightRange uint -} - -// GetEventsForHeightRange retrieves events for all sealed blocks between the start block height and -// the end block height (inclusive) that have the given type. -func (b *backendEvents) GetEventsForHeightRange( - ctx context.Context, - eventType string, - startHeight, endHeight uint64, -) ([]flow.BlockEvents, error) { - - if endHeight < startHeight { - return nil, status.Error(codes.InvalidArgument, "invalid start or end height") - } - - rangeSize := endHeight - startHeight + 1 // range is inclusive on both ends - if rangeSize > uint64(b.maxHeightRange) { - return nil, status.Errorf(codes.InvalidArgument, "requested block range (%d) exceeded maximum (%d)", rangeSize, b.maxHeightRange) - } - - // get the latest sealed block header - head, err := b.state.Sealed().Head() - if err != nil { - // sealed block must be in the store, so return an Internal code even if we got NotFound - return nil, status.Errorf(codes.Internal, "failed to get events: %v", err) - } - - // start height should not be beyond the last sealed height - if head.Height < startHeight { - return nil, status.Errorf(codes.OutOfRange, - "start height %d is greater than the last sealed block height %d", startHeight, head.Height) - } - - // limit max height to last sealed block in the chain - if head.Height < endHeight { - endHeight = head.Height - } - - // find the block headers for all the blocks between min and max height (inclusive) - blockHeaders := make([]*flow.Header, 0) - - for i := startHeight; i <= endHeight; i++ { - header, err := b.headers.ByHeight(i) - if err != nil { - return nil, rpc.ConvertStorageError(fmt.Errorf("failed to get events: %w", err)) - } - - blockHeaders = append(blockHeaders, header) - } - - return b.getBlockEventsFromExecutionNode(ctx, blockHeaders, eventType) -} - -// GetEventsForBlockIDs retrieves events for all the specified block IDs that have the given type -func (b *backendEvents) GetEventsForBlockIDs( - ctx context.Context, - eventType string, - blockIDs []flow.Identifier, -) ([]flow.BlockEvents, error) { - - if uint(len(blockIDs)) > b.maxHeightRange { - return nil, status.Errorf(codes.InvalidArgument, "requested block range (%d) exceeded maximum (%d)", len(blockIDs), b.maxHeightRange) - } - - // find the block headers for all the block IDs - blockHeaders := make([]*flow.Header, 0) - for _, blockID := range blockIDs { - header, err := b.headers.ByBlockID(blockID) - if err != nil { - return nil, rpc.ConvertStorageError(fmt.Errorf("failed to get events: %w", err)) - } - - blockHeaders = append(blockHeaders, header) - } - - // forward the request to the execution node - return b.getBlockEventsFromExecutionNode(ctx, blockHeaders, eventType) -} - -func (b *backendEvents) getBlockEventsFromExecutionNode( - ctx context.Context, - blockHeaders []*flow.Header, - eventType string, -) ([]flow.BlockEvents, error) { - - // create an execution API request for events at block ID - blockIDs := make([]flow.Identifier, len(blockHeaders)) - for i := range blockIDs { - blockIDs[i] = blockHeaders[i].ID() - } - - if len(blockIDs) == 0 { - return []flow.BlockEvents{}, nil - } - - req := &execproto.GetEventsForBlockIDsRequest{ - Type: eventType, - BlockIds: convert.IdentifiersToMessages(blockIDs), - } - - // choose the last block ID to find the list of execution nodes - lastBlockID := blockIDs[len(blockIDs)-1] - - execNodes, err := executionNodesForBlockID(ctx, lastBlockID, b.executionReceipts, b.state, b.log) - if err != nil { - b.log.Error().Err(err).Msg("failed to retrieve events from execution node") - return nil, rpc.ConvertError(err, "failed to retrieve events from execution node", codes.Internal) - } - - var resp *execproto.GetEventsForBlockIDsResponse - var successfulNode *flow.Identity - resp, successfulNode, err = b.getEventsFromAnyExeNode(ctx, execNodes, req) - if err != nil { - b.log.Error().Err(err).Msg("failed to retrieve events from execution nodes") - return nil, rpc.ConvertError(err, "failed to retrieve events from execution nodes", codes.Internal) - } - b.log.Trace(). - Str("execution_id", successfulNode.String()). - Str("last_block_id", lastBlockID.String()). - Msg("successfully got events") - - // convert execution node api result to access node api result - results, err := verifyAndConvertToAccessEvents(resp.GetResults(), blockHeaders) - if err != nil { - return nil, status.Errorf(codes.Internal, "failed to verify retrieved events from execution node: %v", err) - } - - return results, nil -} - -// verifyAndConvertToAccessEvents converts execution node api result to access node api result, and verifies that the results contains -// results from each block that was requested -func verifyAndConvertToAccessEvents(execEvents []*execproto.GetEventsForBlockIDsResponse_Result, requestedBlockHeaders []*flow.Header) ([]flow.BlockEvents, error) { - if len(execEvents) != len(requestedBlockHeaders) { - return nil, errors.New("number of results does not match number of blocks requested") - } - - requestedBlockHeaderSet := map[string]*flow.Header{} - for _, header := range requestedBlockHeaders { - requestedBlockHeaderSet[header.ID().String()] = header - } - - results := make([]flow.BlockEvents, len(execEvents)) - - for i, result := range execEvents { - header, expected := requestedBlockHeaderSet[hex.EncodeToString(result.GetBlockId())] - if !expected { - return nil, fmt.Errorf("unexpected blockID from exe node %x", result.GetBlockId()) - } - if result.GetBlockHeight() != header.Height { - return nil, fmt.Errorf("unexpected block height %d for block %x from exe node", - result.GetBlockHeight(), - result.GetBlockId()) - } - - results[i] = flow.BlockEvents{ - BlockID: header.ID(), - BlockHeight: header.Height, - BlockTimestamp: header.Timestamp, - Events: convert.MessagesToEvents(result.GetEvents()), - } - } - - return results, nil -} - -// getEventsFromAnyExeNode retrieves the given events from any EN in `execNodes`. -// We attempt querying each EN in sequence. If any EN returns a valid response, then errors from -// other ENs are logged and swallowed. If all ENs fail to return a valid response, then an -// error aggregating all failures is returned. -func (b *backendEvents) getEventsFromAnyExeNode(ctx context.Context, - execNodes flow.IdentityList, - req *execproto.GetEventsForBlockIDsRequest) (*execproto.GetEventsForBlockIDsResponse, *flow.Identity, error) { - var errors *multierror.Error - // try to get events from one of the execution nodes - for _, execNode := range execNodes { - start := time.Now() - resp, err := b.tryGetEvents(ctx, execNode, req) - duration := time.Since(start) - - logger := b.log.With(). - Str("execution_node", execNode.String()). - Str("event", req.GetType()). - Int("blocks", len(req.BlockIds)). - Int64("rtt_ms", duration.Milliseconds()). - Logger() - - if err == nil { - // return if any execution node replied successfully - logger.Debug().Msg("Successfully got events") - return resp, execNode, nil - } - - logger.Err(err).Msg("failed to execute GetEvents") - - errors = multierror.Append(errors, err) - } - return nil, nil, errors.ErrorOrNil() -} - -func (b *backendEvents) tryGetEvents(ctx context.Context, - execNode *flow.Identity, - req *execproto.GetEventsForBlockIDsRequest) (*execproto.GetEventsForBlockIDsResponse, error) { - execRPCClient, closer, err := b.connFactory.GetExecutionAPIClient(execNode.Address) - if err != nil { - return nil, err - } - defer closer.Close() - - resp, err := execRPCClient.GetEventsForBlockIDs(ctx, req) - if err != nil { - if status.Code(err) == codes.Unavailable { - b.connFactory.InvalidateExecutionAPIClient(execNode.Address) - } - return nil, err - } - return resp, nil -} diff --git a/engine/access/rpc/backend/backend_network.go b/engine/access/rpc/backend/backend_network.go index d88c36db070..ba444326f77 100644 --- a/engine/access/rpc/backend/backend_network.go +++ b/engine/access/rpc/backend/backend_network.go @@ -2,24 +2,24 @@ package backend import ( "context" - "fmt" + "errors" - "github.com/onflow/flow-go/cmd/build" + accessmodel "github.com/onflow/flow-go/model/access" + "github.com/onflow/flow-go/state" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" - "github.com/onflow/flow-go/access" "github.com/onflow/flow-go/engine/common/rpc/convert" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/state/protocol" + "github.com/onflow/flow-go/storage" ) -var SnapshotHistoryLimitErr = fmt.Errorf("reached the snapshot history limit") - type backendNetwork struct { state protocol.State chainID flow.ChainID + headers storage.Headers snapshotHistoryLimit int } @@ -30,50 +30,30 @@ The observer and access nodes need to be able to handle GetNetworkParameters and GetLatestProtocolStateSnapshot RPCs so this logic was split into the backendNetwork so that we can ignore the rest of the backend logic */ -func NewNetworkAPI(state protocol.State, chainID flow.ChainID, snapshotHistoryLimit int) *backendNetwork { +func NewNetworkAPI( + state protocol.State, + chainID flow.ChainID, + headers storage.Headers, + snapshotHistoryLimit int, +) *backendNetwork { return &backendNetwork{ state: state, chainID: chainID, + headers: headers, snapshotHistoryLimit: snapshotHistoryLimit, } } -func (b *backendNetwork) GetNetworkParameters(_ context.Context) access.NetworkParameters { - return access.NetworkParameters{ +func (b *backendNetwork) GetNetworkParameters(_ context.Context) accessmodel.NetworkParameters { + return accessmodel.NetworkParameters{ ChainID: b.chainID, } } -func (b *backendNetwork) GetNodeVersionInfo(ctx context.Context) (*access.NodeVersionInfo, error) { - stateParams := b.state.Params() - sporkId, err := stateParams.SporkID() - if err != nil { - return nil, status.Errorf(codes.Internal, "failed to read spork ID: %v", err) - } - - protocolVersion, err := stateParams.ProtocolVersion() - if err != nil { - return nil, status.Errorf(codes.Internal, "failed to read protocol version: %v", err) - } - - return &access.NodeVersionInfo{ - Semver: build.Semver(), - Commit: build.Commit(), - SporkId: sporkId, - ProtocolVersion: uint64(protocolVersion), - }, nil -} - -// GetLatestProtocolStateSnapshot returns the latest finalized snapshot +// GetLatestProtocolStateSnapshot returns the latest finalized snapshot. func (b *backendNetwork) GetLatestProtocolStateSnapshot(_ context.Context) ([]byte, error) { snapshot := b.state.Final() - - validSnapshot, err := b.getValidSnapshot(snapshot, 0) - if err != nil { - return nil, err - } - - data, err := convert.SnapshotToBytes(validSnapshot) + data, err := convert.SnapshotToBytes(snapshot) if err != nil { return nil, status.Errorf(codes.Internal, "failed to convert snapshot to bytes: %v", err) } @@ -81,76 +61,70 @@ func (b *backendNetwork) GetLatestProtocolStateSnapshot(_ context.Context) ([]by return data, nil } -func (b *backendNetwork) isEpochOrPhaseDifferent(counter1, counter2 uint64, phase1, phase2 flow.EpochPhase) bool { - return counter1 != counter2 || phase1 != phase2 -} - -// getValidSnapshot will return a valid snapshot that has a sealing segment which -// 1. does not contain any blocks that span an epoch transition -// 2. does not contain any blocks that span an epoch phase transition -// If a snapshot does contain an invalid sealing segment query the state -// by height of each block in the segment and return a snapshot at the point -// where the transition happens. -func (b *backendNetwork) getValidSnapshot(snapshot protocol.Snapshot, blocksVisited int) (protocol.Snapshot, error) { - segment, err := snapshot.SealingSegment() +// GetProtocolStateSnapshotByBlockID returns serializable Snapshot for a block, by blockID. +// The requested block must be finalized, otherwise an error is returned. +// Expected errors during normal operation: +// - status.Error[codes.NotFound] - No block with the given ID was found +// - status.Error[codes.InvalidArgument] - Block ID is for an orphaned block and will never have a valid snapshot +// - status.Error[codes.FailedPrecondition] - A block was found, but it is not finalized and is above the finalized height. +// The block may or may not be finalized in the future; the client can retry later. +func (b *backendNetwork) GetProtocolStateSnapshotByBlockID(_ context.Context, blockID flow.Identifier) ([]byte, error) { + snapshot := b.state.AtBlockID(blockID) + snapshotHeadByBlockId, err := snapshot.Head() if err != nil { - return nil, fmt.Errorf("failed to get sealing segment: %w", err) + if errors.Is(err, state.ErrUnknownSnapshotReference) { + return nil, status.Errorf(codes.NotFound, "failed to get a valid snapshot: block not found") + } + return nil, status.Errorf(codes.Internal, "could not get header by blockID: %v", err) } - counterAtHighest, phaseAtHighest, err := b.getCounterAndPhase(segment.Highest().Header.Height) + // Because there is no index from block ID to finalized height, we separately look up the finalized + // block ID by the height of the queried block, then compare the queried ID to the finalized ID. + // If they match, then the queried block must be finalized. + blockIDFinalizedAtHeight, err := b.headers.BlockIDByHeight(snapshotHeadByBlockId.Height) if err != nil { - return nil, fmt.Errorf("failed to get counter and phase at highest block in the segment: %w", err) + if errors.Is(err, storage.ErrNotFound) { + // The block exists, but no block has been finalized at its height. Therefore, this block + // may be finalized in the future, and the client can retry. + return nil, status.Errorf(codes.FailedPrecondition, + "failed to retrieve snapshot for block with height %d: block not finalized and is above finalized height", + snapshotHeadByBlockId.Height) + } + return nil, status.Errorf(codes.Internal, "failed to lookup block id by height %d", snapshotHeadByBlockId.Height) } - counterAtLowest, phaseAtLowest, err := b.getCounterAndPhase(segment.Sealed().Header.Height) - if err != nil { - return nil, fmt.Errorf("failed to get counter and phase at lowest block in the segment: %w", err) + if blockIDFinalizedAtHeight != blockID { + // A different block than what was queried has been finalized at this height. + // Therefore, the queried block will never be finalized. + return nil, status.Errorf(codes.InvalidArgument, + "failed to retrieve snapshot for block: block not finalized and is below finalized height") } - // Check if the counters and phase are different this indicates that the sealing segment - // of the snapshot requested spans either an epoch transition or phase transition. - if b.isEpochOrPhaseDifferent(counterAtHighest, counterAtLowest, phaseAtHighest, phaseAtLowest) { - // Visit each node in strict order of decreasing height starting at head - // to find the block that straddles the transition boundary. - for i := len(segment.Blocks) - 1; i >= 0; i-- { - blocksVisited++ - - // NOTE: Check if we have reached our history limit, in edge cases - // where the sealing segment is abnormally long we want to short circuit - // the recursive calls and return an error. The API caller can retry. - if blocksVisited > b.snapshotHistoryLimit { - return nil, fmt.Errorf("%w: (%d)", SnapshotHistoryLimitErr, b.snapshotHistoryLimit) - } - - counterAtBlock, phaseAtBlock, err := b.getCounterAndPhase(segment.Blocks[i].Header.Height) - if err != nil { - return nil, fmt.Errorf("failed to get epoch counter and phase for snapshot at block %s: %w", segment.Blocks[i].ID(), err) - } - - // Check if this block straddles the transition boundary, if it does return the snapshot - // at that block height. - if b.isEpochOrPhaseDifferent(counterAtHighest, counterAtBlock, phaseAtHighest, phaseAtBlock) { - return b.getValidSnapshot(b.state.AtHeight(segment.Blocks[i].Header.Height), blocksVisited) - } - } + data, err := convert.SnapshotToBytes(snapshot) + if err != nil { + return nil, status.Errorf(codes.Internal, "failed to convert snapshot to bytes: %v", err) } - - return snapshot, nil + return data, nil } -// getCounterAndPhase will return the epoch counter and phase at the specified height in state -func (b *backendNetwork) getCounterAndPhase(height uint64) (uint64, flow.EpochPhase, error) { - snapshot := b.state.AtHeight(height) - - counter, err := snapshot.Epochs().Current().Counter() +// GetProtocolStateSnapshotByHeight returns serializable Snapshot by block height. +// The block must be finalized (otherwise the by-height query is ambiguous). +// Expected errors during normal operation: +// - status.Error[codes.NotFound] - No block with the given height was found. +// The block height may or may not be finalized in the future; the client can retry later. +func (b *backendNetwork) GetProtocolStateSnapshotByHeight(_ context.Context, blockHeight uint64) ([]byte, error) { + snapshot := b.state.AtHeight(blockHeight) + _, err := snapshot.Head() if err != nil { - return 0, 0, fmt.Errorf("failed to get counter for block (height=%d): %w", height, err) + if errors.Is(err, state.ErrUnknownSnapshotReference) { + return nil, status.Errorf(codes.NotFound, "failed to find snapshot: %v", err) + } + return nil, status.Errorf(codes.Internal, "failed to get a valid snapshot: %v", err) } - phase, err := snapshot.Phase() + data, err := convert.SnapshotToBytes(snapshot) if err != nil { - return 0, 0, fmt.Errorf("failed to get phase for block (height=%d): %w", height, err) + return nil, status.Errorf(codes.Internal, "failed to convert snapshot to bytes: %v", err) } - - return counter, phase, nil + return data, nil } diff --git a/engine/access/rpc/backend/backend_scripts.go b/engine/access/rpc/backend/backend_scripts.go deleted file mode 100644 index 9f4ec5dffb2..00000000000 --- a/engine/access/rpc/backend/backend_scripts.go +++ /dev/null @@ -1,207 +0,0 @@ -package backend - -import ( - "context" - "crypto/md5" //nolint:gosec - "time" - - lru "github.com/hashicorp/golang-lru" - - "github.com/hashicorp/go-multierror" - execproto "github.com/onflow/flow/protobuf/go/flow/execution" - "github.com/rs/zerolog" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" - - "github.com/onflow/flow-go/engine/common/rpc" - "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/module" - "github.com/onflow/flow-go/state/protocol" - "github.com/onflow/flow-go/storage" -) - -// uniqueScriptLoggingTimeWindow is the duration for checking the uniqueness of scripts sent for execution -const uniqueScriptLoggingTimeWindow = 10 * time.Minute - -type backendScripts struct { - headers storage.Headers - executionReceipts storage.ExecutionReceipts - state protocol.State - connFactory ConnectionFactory - log zerolog.Logger - metrics module.BackendScriptsMetrics - loggedScripts *lru.Cache - archiveAddressList []string -} - -func (b *backendScripts) ExecuteScriptAtLatestBlock( - ctx context.Context, - script []byte, - arguments [][]byte, -) ([]byte, error) { - - // get the latest sealed header - latestHeader, err := b.state.Sealed().Head() - if err != nil { - return nil, status.Errorf(codes.Internal, "failed to get latest sealed header: %v", err) - } - - // get the block id of the latest sealed header - latestBlockID := latestHeader.ID() - - // execute script on the execution node at that block id - return b.executeScriptOnExecutionNode(ctx, latestBlockID, script, arguments) -} - -func (b *backendScripts) ExecuteScriptAtBlockID( - ctx context.Context, - blockID flow.Identifier, - script []byte, - arguments [][]byte, -) ([]byte, error) { - // execute script on the execution node at that block id - return b.executeScriptOnExecutionNode(ctx, blockID, script, arguments) -} - -func (b *backendScripts) ExecuteScriptAtBlockHeight( - ctx context.Context, - blockHeight uint64, - script []byte, - arguments [][]byte, -) ([]byte, error) { - // get header at given height - header, err := b.headers.ByHeight(blockHeight) - if err != nil { - err = rpc.ConvertStorageError(err) - return nil, err - } - - blockID := header.ID() - - // execute script on the execution node at that block id - return b.executeScriptOnExecutionNode(ctx, blockID, script, arguments) -} - -func (b *backendScripts) findScriptExecutors( - ctx context.Context, - blockID flow.Identifier, -) ([]string, error) { - // send script queries to archive nodes if archive addres is configured - if len(b.archiveAddressList) > 0 { - return b.archiveAddressList, nil - } - - executors, err := executionNodesForBlockID(ctx, blockID, b.executionReceipts, b.state, b.log) - if err != nil { - return nil, err - } - - executorAddrs := make([]string, 0, len(executors)) - for _, executor := range executors { - executorAddrs = append(executorAddrs, executor.Address) - } - return executorAddrs, nil -} - -// executeScriptOnExecutionNode forwards the request to the execution node using the execution node -// grpc client and converts the response back to the access node api response format -func (b *backendScripts) executeScriptOnExecutionNode( - ctx context.Context, - blockID flow.Identifier, - script []byte, - arguments [][]byte, -) ([]byte, error) { - - execReq := &execproto.ExecuteScriptAtBlockIDRequest{ - BlockId: blockID[:], - Script: script, - Arguments: arguments, - } - - // find few execution nodes which have executed the block earlier and provided an execution receipt for it - scriptExecutors, err := b.findScriptExecutors(ctx, blockID) - if err != nil { - return nil, status.Errorf(codes.Internal, "failed to find script executors at blockId %v: %v", blockID.String(), err) - } - // encode to MD5 as low compute/memory lookup key - // CAUTION: cryptographically insecure md5 is used here, but only to de-duplicate logs. - // *DO NOT* use this hash for any protocol-related or cryptographic functions. - insecureScriptHash := md5.Sum(script) //nolint:gosec - - // try each of the execution nodes found - var errors *multierror.Error - // try to execute the script on one of the execution nodes - for _, executorAddress := range scriptExecutors { - execStartTime := time.Now() // record start time - result, err := b.tryExecuteScript(ctx, executorAddress, execReq) - if err == nil { - if b.log.GetLevel() == zerolog.DebugLevel { - executionTime := time.Now() - if b.shouldLogScript(executionTime, insecureScriptHash) { - b.log.Debug(). - Str("script_executor_addr", executorAddress). - Hex("block_id", blockID[:]). - Hex("script_hash", insecureScriptHash[:]). - Str("script", string(script)). - Msg("Successfully executed script") - b.loggedScripts.Add(insecureScriptHash, executionTime) - } - } - - // log execution time - b.metrics.ScriptExecuted( - time.Since(execStartTime), - len(script), - ) - - return result, nil - } - // return if it's just a script failure as opposed to an EN failure and skip trying other ENs - if status.Code(err) == codes.InvalidArgument { - b.log.Debug().Err(err). - Str("script_executor_addr", executorAddress). - Hex("block_id", blockID[:]). - Hex("script_hash", insecureScriptHash[:]). - Str("script", string(script)). - Msg("script failed to execute on the execution node") - return nil, err - } - errors = multierror.Append(errors, err) - } - - errToReturn := errors.ErrorOrNil() - if errToReturn != nil { - b.log.Error().Err(errToReturn).Msg("script execution failed for execution node internal reasons") - } - - return nil, rpc.ConvertMultiError(errors, "failed to execute script on execution nodes", codes.Internal) -} - -// shouldLogScript checks if the script hash is unique in the time window -func (b *backendScripts) shouldLogScript(execTime time.Time, scriptHash [16]byte) bool { - rawTimestamp, seen := b.loggedScripts.Get(scriptHash) - if !seen || rawTimestamp == nil { - return true - } else { - // safe cast - timestamp := rawTimestamp.(time.Time) - return execTime.Sub(timestamp) >= uniqueScriptLoggingTimeWindow - } -} - -func (b *backendScripts) tryExecuteScript(ctx context.Context, executorAddress string, req *execproto.ExecuteScriptAtBlockIDRequest) ([]byte, error) { - execRPCClient, closer, err := b.connFactory.GetExecutionAPIClient(executorAddress) - if err != nil { - return nil, status.Errorf(codes.Internal, "failed to create client for execution node %s: %v", executorAddress, err) - } - defer closer.Close() - - execResp, err := execRPCClient.ExecuteScriptAtBlockID(ctx, req) - if err != nil { - if status.Code(err) == codes.Unavailable { - b.connFactory.InvalidateExecutionAPIClient(executorAddress) - } - return nil, status.Errorf(status.Code(err), "failed to execute the script on the execution node %s: %v", executorAddress, err) - } - return execResp.GetValue(), nil -} diff --git a/engine/access/rpc/backend/backend_stream_block_digests_test.go b/engine/access/rpc/backend/backend_stream_block_digests_test.go new file mode 100644 index 00000000000..76e4c508207 --- /dev/null +++ b/engine/access/rpc/backend/backend_stream_block_digests_test.go @@ -0,0 +1,114 @@ +package backend + +import ( + "context" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + + "github.com/onflow/flow-go/engine" + "github.com/onflow/flow-go/engine/access/subscription" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/utils/unittest" +) + +type BackendBlockDigestSuite struct { + BackendBlocksSuite +} + +func TestBackendBlockDigestSuite(t *testing.T) { + suite.Run(t, new(BackendBlockDigestSuite)) +} + +// SetupTest initializes the test suite with required dependencies. +func (s *BackendBlockDigestSuite) SetupTest() { + s.BackendBlocksSuite.SetupTest() +} + +// TestSubscribeBlockDigestsFromStartBlockID tests the SubscribeBlockDigestsFromStartBlockID method. +func (s *BackendBlockDigestSuite) TestSubscribeBlockDigestsFromStartBlockID() { + call := func(ctx context.Context, startValue interface{}, blockStatus flow.BlockStatus) subscription.Subscription { + return s.backend.SubscribeBlockDigestsFromStartBlockID(ctx, startValue.(flow.Identifier), blockStatus) + } + + s.subscribe(call, s.requireBlockDigests, s.subscribeFromStartBlockIdTestCases()) +} + +// TestSubscribeBlockDigestsFromStartHeight tests the SubscribeBlockDigestsFromStartHeight method. +func (s *BackendBlockDigestSuite) TestSubscribeBlockDigestsFromStartHeight() { + call := func(ctx context.Context, startValue interface{}, blockStatus flow.BlockStatus) subscription.Subscription { + return s.backend.SubscribeBlockDigestsFromStartHeight(ctx, startValue.(uint64), blockStatus) + } + + s.subscribe(call, s.requireBlockDigests, s.subscribeFromStartHeightTestCases()) +} + +// TestSubscribeBlockDigestsFromLatest tests the SubscribeBlockDigestsFromLatest method. +func (s *BackendBlockDigestSuite) TestSubscribeBlockDigestsFromLatest() { + call := func(ctx context.Context, startValue interface{}, blockStatus flow.BlockStatus) subscription.Subscription { + return s.backend.SubscribeBlockDigestsFromLatest(ctx, blockStatus) + } + + s.subscribe(call, s.requireBlockDigests, s.subscribeFromLatestTestCases()) +} + +// requireBlockDigests ensures that the received block digest information matches the expected data. +func (s *BackendBlockDigestSuite) requireBlockDigests(v interface{}, expectedBlock *flow.Block) { + actualBlock, ok := v.(*flow.BlockDigest) + require.True(s.T(), ok, "unexpected response type: %T", v) + + s.Require().Equal(expectedBlock.ID(), actualBlock.BlockID) + s.Require().Equal(expectedBlock.Height, actualBlock.Height) + s.Require().Equal(expectedBlock.Timestamp, uint64(actualBlock.Timestamp.UnixMilli())) +} + +// TestSubscribeBlockDigestsHandlesErrors tests error handling scenarios for the SubscribeBlockDigestsFromStartBlockID and SubscribeBlockDigestsFromStartHeight methods in the Backend. +// It ensures that the method correctly returns errors for various invalid input cases. +// +// Test Cases: +// +// 1. Returns error for unindexed start block id: +// - Tests that subscribing to block headers with an unindexed start block ID results in a NotFound error. +// +// 2. Returns error for start height before root height: +// - Validates that attempting to subscribe to block headers with a start height before the root height results in an InvalidArgument error. +// +// 3. Returns error for unindexed start height: +// - Tests that subscribing to block headers with an unindexed start height results in a NotFound error. +// +// Each test case checks for specific error conditions and ensures that the methods responds appropriately. +func (s *BackendBlockDigestSuite) TestSubscribeBlockDigestsHandlesErrors() { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + backend, err := New(s.backendParams(engine.NewBroadcaster())) + s.Require().NoError(err) + + s.Run("returns error if unknown start block id is provided", func() { + subCtx, subCancel := context.WithCancel(ctx) + defer subCancel() + + sub := backend.SubscribeBlockDigestsFromStartBlockID(subCtx, unittest.IdentifierFixture(), flow.BlockStatusFinalized) + assert.Equal(s.T(), codes.NotFound, status.Code(sub.Err()), "expected %s, got %v: %v", codes.NotFound, status.Code(sub.Err()).String(), sub.Err()) + }) + + s.Run("returns error for start height before root height", func() { + subCtx, subCancel := context.WithCancel(ctx) + defer subCancel() + + sub := backend.SubscribeBlockDigestsFromStartHeight(subCtx, s.rootBlock.Height-1, flow.BlockStatusFinalized) + assert.Equal(s.T(), codes.InvalidArgument, status.Code(sub.Err()), "expected %s, got %v: %v", codes.InvalidArgument, status.Code(sub.Err()).String(), sub.Err()) + }) + + s.Run("returns error if unknown start height is provided", func() { + subCtx, subCancel := context.WithCancel(ctx) + defer subCancel() + + sub := backend.SubscribeBlockDigestsFromStartHeight(subCtx, s.blocksArray[len(s.blocksArray)-1].Height+10, flow.BlockStatusFinalized) + assert.Equal(s.T(), codes.NotFound, status.Code(sub.Err()), "expected %s, got %v: %v", codes.NotFound, status.Code(sub.Err()).String(), sub.Err()) + }) +} diff --git a/engine/access/rpc/backend/backend_stream_block_headers_test.go b/engine/access/rpc/backend/backend_stream_block_headers_test.go new file mode 100644 index 00000000000..5994b01cfa3 --- /dev/null +++ b/engine/access/rpc/backend/backend_stream_block_headers_test.go @@ -0,0 +1,114 @@ +package backend + +import ( + "context" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + + "github.com/onflow/flow-go/engine" + "github.com/onflow/flow-go/engine/access/subscription" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/utils/unittest" +) + +type BackendBlockHeadersSuite struct { + BackendBlocksSuite +} + +func TestBackendBlockHeadersSuite(t *testing.T) { + suite.Run(t, new(BackendBlockHeadersSuite)) +} + +// SetupTest initializes the test suite with required dependencies. +func (s *BackendBlockHeadersSuite) SetupTest() { + s.BackendBlocksSuite.SetupTest() +} + +// TestSubscribeBlockHeadersFromStartBlockID tests the SubscribeBlockHeadersFromStartBlockID method. +func (s *BackendBlockHeadersSuite) TestSubscribeBlockHeadersFromStartBlockID() { + call := func(ctx context.Context, startValue interface{}, blockStatus flow.BlockStatus) subscription.Subscription { + return s.backend.SubscribeBlockHeadersFromStartBlockID(ctx, startValue.(flow.Identifier), blockStatus) + } + + s.subscribe(call, s.requireBlockHeaders, s.subscribeFromStartBlockIdTestCases()) +} + +// TestSubscribeBlockHeadersFromStartHeight tests the SubscribeBlockHeadersFromStartHeight method. +func (s *BackendBlockHeadersSuite) TestSubscribeBlockHeadersFromStartHeight() { + call := func(ctx context.Context, startValue interface{}, blockStatus flow.BlockStatus) subscription.Subscription { + return s.backend.SubscribeBlockHeadersFromStartHeight(ctx, startValue.(uint64), blockStatus) + } + + s.subscribe(call, s.requireBlockHeaders, s.subscribeFromStartHeightTestCases()) +} + +// TestSubscribeBlockHeadersFromLatest tests the SubscribeBlockHeadersFromLatest method. +func (s *BackendBlockHeadersSuite) TestSubscribeBlockHeadersFromLatest() { + call := func(ctx context.Context, startValue interface{}, blockStatus flow.BlockStatus) subscription.Subscription { + return s.backend.SubscribeBlockHeadersFromLatest(ctx, blockStatus) + } + + s.subscribe(call, s.requireBlockHeaders, s.subscribeFromLatestTestCases()) +} + +// requireBlockHeaders ensures that the received block header information matches the expected data. +func (s *BackendBlockHeadersSuite) requireBlockHeaders(v interface{}, expectedBlock *flow.Block) { + actualHeader, ok := v.(*flow.Header) + require.True(s.T(), ok, "unexpected response type: %T", v) + + s.Require().Equal(expectedBlock.Height, actualHeader.Height) + s.Require().Equal(expectedBlock.ToHeader().ID(), actualHeader.ID()) + s.Require().Equal(*expectedBlock.ToHeader(), *actualHeader) +} + +// TestSubscribeBlockHeadersHandlesErrors tests error handling scenarios for the SubscribeBlockHeadersFromStartBlockID and SubscribeBlockHeadersFromStartHeight methods in the Backend. +// It ensures that the method correctly returns errors for various invalid input cases. +// +// Test Cases: +// +// 1. Returns error for unindexed start block id: +// - Tests that subscribing to block headers with an unindexed start block ID results in a NotFound error. +// +// 2. Returns error for start height before root height: +// - Validates that attempting to subscribe to block headers with a start height before the root height results in an InvalidArgument error. +// +// 3. Returns error for unindexed start height: +// - Tests that subscribing to block headers with an unindexed start height results in a NotFound error. +// +// Each test case checks for specific error conditions and ensures that the methods responds appropriately. +func (s *BackendBlockHeadersSuite) TestSubscribeBlockHeadersHandlesErrors() { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + backend, err := New(s.backendParams(engine.NewBroadcaster())) + s.Require().NoError(err) + + s.Run("returns error for unknown start block id is provided", func() { + subCtx, subCancel := context.WithCancel(ctx) + defer subCancel() + + sub := backend.SubscribeBlockHeadersFromStartBlockID(subCtx, unittest.IdentifierFixture(), flow.BlockStatusFinalized) + assert.Equal(s.T(), codes.NotFound, status.Code(sub.Err()), "expected %s, got %v: %v", codes.NotFound, status.Code(sub.Err()).String(), sub.Err()) + }) + + s.Run("returns error if start height before root height", func() { + subCtx, subCancel := context.WithCancel(ctx) + defer subCancel() + + sub := backend.SubscribeBlockHeadersFromStartHeight(subCtx, s.rootBlock.Height-1, flow.BlockStatusFinalized) + assert.Equal(s.T(), codes.InvalidArgument, status.Code(sub.Err()), "expected %s, got %v: %v", codes.InvalidArgument, status.Code(sub.Err()).String(), sub.Err()) + }) + + s.Run("returns error for unknown start height is provided", func() { + subCtx, subCancel := context.WithCancel(ctx) + defer subCancel() + + sub := backend.SubscribeBlockHeadersFromStartHeight(subCtx, s.blocksArray[len(s.blocksArray)-1].Height+10, flow.BlockStatusFinalized) + assert.Equal(s.T(), codes.NotFound, status.Code(sub.Err()), "expected %s, got %v: %v", codes.NotFound, status.Code(sub.Err()).String(), sub.Err()) + }) +} diff --git a/engine/access/rpc/backend/backend_stream_blocks.go b/engine/access/rpc/backend/backend_stream_blocks.go new file mode 100644 index 00000000000..4c7b032fbc7 --- /dev/null +++ b/engine/access/rpc/backend/backend_stream_blocks.go @@ -0,0 +1,346 @@ +package backend + +import ( + "context" + "errors" + "fmt" + "time" + + "github.com/rs/zerolog" + + "github.com/onflow/flow-go/engine/access/subscription" + "github.com/onflow/flow-go/engine/access/subscription/tracker" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/state/protocol" + "github.com/onflow/flow-go/storage" + "github.com/onflow/flow-go/utils/logging" +) + +// backendSubscribeBlocks is a struct representing a backend implementation for subscribing to blocks. +type backendSubscribeBlocks struct { + log zerolog.Logger + state protocol.State + blocks storage.Blocks + headers storage.Headers + + subscriptionHandler *subscription.SubscriptionHandler + blockTracker tracker.BlockTracker +} + +// SubscribeBlocksFromStartBlockID subscribes to the finalized or sealed blocks starting at the requested +// start block id, up until the latest available block. Once the latest is +// reached, the stream will remain open and responses are sent for each new +// block as it becomes available. +// +// Each block is filtered by the provided block status, and only +// those blocks that match the status are returned. +// +// Parameters: +// - ctx: Context for the operation. +// - startBlockID: The identifier of the starting block. +// - blockStatus: The status of the block, which could be only BlockStatusSealed or BlockStatusFinalized. +// +// If invalid parameters will be supplied SubscribeBlocksFromStartBlockID will return a failed subscription. +func (b *backendSubscribeBlocks) SubscribeBlocksFromStartBlockID(ctx context.Context, startBlockID flow.Identifier, blockStatus flow.BlockStatus) subscription.Subscription { + return b.subscribeFromStartBlockID(ctx, startBlockID, b.getBlockResponse(blockStatus)) +} + +// SubscribeBlocksFromStartHeight subscribes to the finalized or sealed blocks starting at the requested +// start block height, up until the latest available block. Once the latest is +// reached, the stream will remain open and responses are sent for each new +// block as it becomes available. +// +// Each block is filtered by the provided block status, and only +// those blocks that match the status are returned. +// +// Parameters: +// - ctx: Context for the operation. +// - startHeight: The height of the starting block. +// - blockStatus: The status of the block, which could be only BlockStatusSealed or BlockStatusFinalized. +// +// If invalid parameters will be supplied SubscribeBlocksFromStartHeight will return a failed subscription. +func (b *backendSubscribeBlocks) SubscribeBlocksFromStartHeight(ctx context.Context, startHeight uint64, blockStatus flow.BlockStatus) subscription.Subscription { + return b.subscribeFromStartHeight(ctx, startHeight, b.getBlockResponse(blockStatus)) +} + +// SubscribeBlocksFromLatest subscribes to the finalized or sealed blocks starting at the latest sealed block, +// up until the latest available block. Once the latest is +// reached, the stream will remain open and responses are sent for each new +// block as it becomes available. +// +// Each block is filtered by the provided block status, and only +// those blocks that match the status are returned. +// +// Parameters: +// - ctx: Context for the operation. +// - blockStatus: The status of the block, which could be only BlockStatusSealed or BlockStatusFinalized. +// +// If invalid parameters will be supplied SubscribeBlocksFromLatest will return a failed subscription. +func (b *backendSubscribeBlocks) SubscribeBlocksFromLatest(ctx context.Context, blockStatus flow.BlockStatus) subscription.Subscription { + return b.subscribeFromLatest(ctx, b.getBlockResponse(blockStatus)) +} + +// SubscribeBlockHeadersFromStartBlockID streams finalized or sealed block headers starting at the requested +// start block id, up until the latest available block header. Once the latest is +// reached, the stream will remain open and responses are sent for each new +// block header as it becomes available. +// +// Each block header are filtered by the provided block status, and only +// those block headers that match the status are returned. +// +// Parameters: +// - ctx: Context for the operation. +// - startBlockID: The identifier of the starting block. +// - blockStatus: The status of the block, which could be only BlockStatusSealed or BlockStatusFinalized. +// +// If invalid parameters will be supplied SubscribeBlockHeadersFromStartBlockID will return a failed subscription. +func (b *backendSubscribeBlocks) SubscribeBlockHeadersFromStartBlockID(ctx context.Context, startBlockID flow.Identifier, blockStatus flow.BlockStatus) subscription.Subscription { + return b.subscribeFromStartBlockID(ctx, startBlockID, b.getBlockHeaderResponse(blockStatus)) +} + +// SubscribeBlockHeadersFromStartHeight streams finalized or sealed block headers starting at the requested +// start block height, up until the latest available block header. Once the latest is +// reached, the stream will remain open and responses are sent for each new +// block header as it becomes available. +// +// Each block header are filtered by the provided block status, and only +// those block headers that match the status are returned. +// +// Parameters: +// - ctx: Context for the operation. +// - startHeight: The height of the starting block. +// - blockStatus: The status of the block, which could be only BlockStatusSealed or BlockStatusFinalized. +// +// If invalid parameters will be supplied SubscribeBlockHeadersFromStartHeight will return a failed subscription. +func (b *backendSubscribeBlocks) SubscribeBlockHeadersFromStartHeight(ctx context.Context, startHeight uint64, blockStatus flow.BlockStatus) subscription.Subscription { + return b.subscribeFromStartHeight(ctx, startHeight, b.getBlockHeaderResponse(blockStatus)) +} + +// SubscribeBlockHeadersFromLatest streams finalized or sealed block headers starting at the latest sealed block, +// up until the latest available block header. Once the latest is +// reached, the stream will remain open and responses are sent for each new +// block header as it becomes available. +// +// Each block header are filtered by the provided block status, and only +// those block headers that match the status are returned. +// +// Parameters: +// - ctx: Context for the operation. +// - blockStatus: The status of the block, which could be only BlockStatusSealed or BlockStatusFinalized. +// +// If invalid parameters will be supplied SubscribeBlockHeadersFromLatest will return a failed subscription. +func (b *backendSubscribeBlocks) SubscribeBlockHeadersFromLatest(ctx context.Context, blockStatus flow.BlockStatus) subscription.Subscription { + return b.subscribeFromLatest(ctx, b.getBlockHeaderResponse(blockStatus)) +} + +// SubscribeBlockDigestsFromStartBlockID streams finalized or sealed lightweight block starting at the requested +// start block id, up until the latest available block. Once the latest is +// reached, the stream will remain open and responses are sent for each new +// block as it becomes available. +// +// Each lightweight block are filtered by the provided block status, and only +// those blocks that match the status are returned. +// +// Parameters: +// - ctx: Context for the operation. +// - startBlockID: The identifier of the starting block. +// - blockStatus: The status of the block, which could be only BlockStatusSealed or BlockStatusFinalized. +// +// If invalid parameters will be supplied SubscribeBlockDigestsFromStartBlockID will return a failed subscription. +func (b *backendSubscribeBlocks) SubscribeBlockDigestsFromStartBlockID(ctx context.Context, startBlockID flow.Identifier, blockStatus flow.BlockStatus) subscription.Subscription { + return b.subscribeFromStartBlockID(ctx, startBlockID, b.getBlockDigestResponse(blockStatus)) +} + +// SubscribeBlockDigestsFromStartHeight streams finalized or sealed lightweight block starting at the requested +// start block height, up until the latest available block. Once the latest is +// reached, the stream will remain open and responses are sent for each new +// block as it becomes available. +// +// Each lightweight block are filtered by the provided block status, and only +// those blocks that match the status are returned. +// +// Parameters: +// - ctx: Context for the operation. +// - startHeight: The height of the starting block. +// - blockStatus: The status of the block, which could be only BlockStatusSealed or BlockStatusFinalized. +// +// If invalid parameters will be supplied SubscribeBlockDigestsFromStartHeight will return a failed subscription. +func (b *backendSubscribeBlocks) SubscribeBlockDigestsFromStartHeight(ctx context.Context, startHeight uint64, blockStatus flow.BlockStatus) subscription.Subscription { + return b.subscribeFromStartHeight(ctx, startHeight, b.getBlockDigestResponse(blockStatus)) +} + +// SubscribeBlockDigestsFromLatest streams finalized or sealed lightweight block starting at the latest sealed block, +// up until the latest available block. Once the latest is +// reached, the stream will remain open and responses are sent for each new +// block as it becomes available. +// +// Each lightweight block are filtered by the provided block status, and only +// those blocks that match the status are returned. +// +// Parameters: +// - ctx: Context for the operation. +// - blockStatus: The status of the block, which could be only BlockStatusSealed or BlockStatusFinalized. +// +// If invalid parameters will be supplied SubscribeBlockDigestsFromLatest will return a failed subscription. +func (b *backendSubscribeBlocks) SubscribeBlockDigestsFromLatest(ctx context.Context, blockStatus flow.BlockStatus) subscription.Subscription { + return b.subscribeFromLatest(ctx, b.getBlockDigestResponse(blockStatus)) +} + +// subscribeFromStartBlockID is common method that allows clients to subscribe starting at the requested start block id. +// +// Parameters: +// - ctx: Context for the operation. +// - startBlockID: The identifier of the starting block. +// - getData: The callback used by subscriptions to retrieve data information for the specified height and block status. +// +// If invalid parameters are supplied, subscribeFromStartBlockID will return a failed subscription. +func (b *backendSubscribeBlocks) subscribeFromStartBlockID(ctx context.Context, startBlockID flow.Identifier, getData subscription.GetDataByHeightFunc) subscription.Subscription { + nextHeight, err := b.blockTracker.GetStartHeightFromBlockID(startBlockID) + if err != nil { + return subscription.NewFailedSubscription(err, "could not get start height from block id") + } + return b.subscriptionHandler.Subscribe(ctx, nextHeight, getData) +} + +// subscribeFromStartHeight is common method that allows clients to subscribe starting at the requested start block height. +// +// Parameters: +// - ctx: Context for the operation. +// - startHeight: The height of the starting block. +// - getData: The callback used by subscriptions to retrieve data information for the specified height and block status. +// +// If invalid parameters are supplied, subscribeFromStartHeight will return a failed subscription. +func (b *backendSubscribeBlocks) subscribeFromStartHeight(ctx context.Context, startHeight uint64, getData subscription.GetDataByHeightFunc) subscription.Subscription { + nextHeight, err := b.blockTracker.GetStartHeightFromHeight(startHeight) + if err != nil { + return subscription.NewFailedSubscription(err, "could not get start height from block height") + } + return b.subscriptionHandler.Subscribe(ctx, nextHeight, getData) +} + +// subscribeFromLatest is common method that allows clients to subscribe starting at the latest sealed block. +// +// Parameters: +// - ctx: Context for the operation. +// - getData: The callback used by subscriptions to retrieve data information for the specified height and block status. +// +// No errors are expected during normal operation. +func (b *backendSubscribeBlocks) subscribeFromLatest(ctx context.Context, getData subscription.GetDataByHeightFunc) subscription.Subscription { + nextHeight, err := b.blockTracker.GetStartHeightFromLatest(ctx) + if err != nil { + return subscription.NewFailedSubscription(err, "could not get start height from latest") + } + return b.subscriptionHandler.Subscribe(ctx, nextHeight, getData) +} + +// getBlockResponse returns a GetDataByHeightFunc that retrieves block information for the specified height. +func (b *backendSubscribeBlocks) getBlockResponse(blockStatus flow.BlockStatus) subscription.GetDataByHeightFunc { + return func(_ context.Context, height uint64) (interface{}, error) { + block, err := b.getBlock(height, blockStatus) + if err != nil { + return nil, err + } + + b.log.Trace(). + Hex("block_id", logging.ID(block.ID())). + Uint64("height", height). + Msgf("sending block info") + + return block, nil + } +} + +// getBlockHeaderResponse returns a GetDataByHeightFunc that retrieves block header information for the specified height. +func (b *backendSubscribeBlocks) getBlockHeaderResponse(blockStatus flow.BlockStatus) subscription.GetDataByHeightFunc { + return func(_ context.Context, height uint64) (interface{}, error) { + header, err := b.getBlockHeader(height, blockStatus) + if err != nil { + return nil, err + } + + b.log.Trace(). + Hex("block_id", logging.ID(header.ID())). + Uint64("height", height). + Msgf("sending block header info") + + return header, nil + } +} + +// getBlockDigestResponse returns a GetDataByHeightFunc that retrieves lightweight block information for the specified height. +func (b *backendSubscribeBlocks) getBlockDigestResponse(blockStatus flow.BlockStatus) subscription.GetDataByHeightFunc { + return func(_ context.Context, height uint64) (interface{}, error) { + header, err := b.getBlockHeader(height, blockStatus) + if err != nil { + return nil, err + } + + b.log.Trace(). + Hex("block_id", logging.ID(header.ID())). + Uint64("height", height). + Msgf("sending lightweight block info") + + return flow.NewBlockDigest(header.ID(), header.Height, time.UnixMilli(int64(header.Timestamp)).UTC()), nil + } +} + +// getBlockHeader returns the block header for the given block height. +// Expected errors during normal operation: +// - subscription.ErrBlockNotReady: block for the given block height is not available. +func (b *backendSubscribeBlocks) getBlockHeader(height uint64, expectedBlockStatus flow.BlockStatus) (*flow.Header, error) { + err := b.validateHeight(height, expectedBlockStatus) + if err != nil { + return nil, err + } + + // since we are querying a finalized or sealed block header, we can use the height index and save an ID computation + header, err := b.headers.ByHeight(height) + if err != nil { + if errors.Is(err, storage.ErrNotFound) { + return nil, fmt.Errorf("failed to retrieve block header for height %d: %w", height, subscription.ErrBlockNotReady) + } + return nil, err + } + + return header, nil +} + +// getBlock returns the block for the given block height. +// Expected errors during normal operation: +// - subscription.ErrBlockNotReady: block for the given block height is not available. +func (b *backendSubscribeBlocks) getBlock(height uint64, expectedBlockStatus flow.BlockStatus) (*flow.Block, error) { + err := b.validateHeight(height, expectedBlockStatus) + if err != nil { + return nil, err + } + + // since we are querying a finalized or sealed block, we can use the height index and save an ID computation + block, err := b.blocks.ByHeight(height) + if err != nil { + if errors.Is(err, storage.ErrNotFound) { + return nil, fmt.Errorf("failed to retrieve block for height %d: %w", height, subscription.ErrBlockNotReady) + } + return nil, err + } + + return block, nil +} + +// validateHeight checks if the given block height is valid and available based on the expected block status. +// Expected errors during normal operation: +// - subscription.ErrBlockNotReady when unable to retrieve the block by height. +func (b *backendSubscribeBlocks) validateHeight(height uint64, expectedBlockStatus flow.BlockStatus) error { + highestHeight, err := b.blockTracker.GetHighestHeight(expectedBlockStatus) + if err != nil { + return fmt.Errorf("could not get highest available height: %w", err) + } + + // fail early if no notification has been received for the given block height. + // note: it's possible for the data to exist in the data store before the notification is + // received. this ensures a consistent view is available to all streams. + if height > highestHeight { + return fmt.Errorf("block %d is not available yet: %w", height, subscription.ErrBlockNotReady) + } + + return nil +} diff --git a/engine/access/rpc/backend/backend_stream_blocks_test.go b/engine/access/rpc/backend/backend_stream_blocks_test.go new file mode 100644 index 00000000000..3bf259dc7fa --- /dev/null +++ b/engine/access/rpc/backend/backend_stream_blocks_test.go @@ -0,0 +1,494 @@ +package backend + +import ( + "context" + "fmt" + "testing" + "testing/synctest" + + "github.com/rs/zerolog" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + + "github.com/onflow/flow-go/engine" + "github.com/onflow/flow-go/engine/access/rpc/backend/events" + "github.com/onflow/flow-go/engine/access/rpc/backend/query_mode" + connectionmock "github.com/onflow/flow-go/engine/access/rpc/connection/mock" + "github.com/onflow/flow-go/engine/access/subscription" + "github.com/onflow/flow-go/engine/access/subscription/tracker" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module/metrics" + protocol "github.com/onflow/flow-go/state/protocol/mock" + "github.com/onflow/flow-go/storage" + storagemock "github.com/onflow/flow-go/storage/mock" + "github.com/onflow/flow-go/utils/unittest" + "github.com/onflow/flow-go/utils/unittest/mocks" +) + +// BackendBlocksSuite is a test suite for the backendBlocks functionality related to blocks subscription. +// It utilizes the suite to organize and structure test code. +type BackendBlocksSuite struct { + suite.Suite + + state *protocol.State + snapshot *protocol.Snapshot + log zerolog.Logger + + blocks *storagemock.Blocks + headers *storagemock.Headers + blockTracker tracker.BlockTracker + + connectionFactory *connectionmock.ConnectionFactory + + chainID flow.ChainID + + broadcaster *engine.Broadcaster + blocksArray []*flow.Block + blockMap map[uint64]*flow.Block + rootBlock *flow.Block + + backend *Backend +} + +// testType represents a test scenario for subscribing +type testType struct { + name string + highestBackfill int + startValue interface{} + blockStatus flow.BlockStatus + expectedBlocks []*flow.Block +} + +func TestBackendBlocksSuite(t *testing.T) { + suite.Run(t, new(BackendBlocksSuite)) +} + +// SetupTest initializes the test suite with required dependencies. +func (s *BackendBlocksSuite) SetupTest() { + s.log = unittest.Logger() + s.state = new(protocol.State) + s.snapshot = new(protocol.Snapshot) + header := unittest.BlockHeaderFixture() + + params := new(protocol.Params) + params.On("SporkID").Return(unittest.IdentifierFixture(), nil) + params.On("SporkRootBlockHeight").Return(header.Height, nil) + params.On("SealedRoot").Return(header, nil) + s.state.On("Params").Return(params) + + s.blocks = new(storagemock.Blocks) + s.headers = new(storagemock.Headers) + s.chainID = flow.Testnet + s.connectionFactory = connectionmock.NewConnectionFactory(s.T()) + + blockCount := 5 + s.blockMap = make(map[uint64]*flow.Block, blockCount) + s.blocksArray = make([]*flow.Block, 0, blockCount) + + // generate blockCount consecutive blocks with associated seal, result and execution data + s.rootBlock = unittest.BlockFixture() + parent := s.rootBlock.ToHeader() + s.blockMap[s.rootBlock.Height] = s.rootBlock + + s.T().Logf("Generating %d blocks, root block: %d %s", blockCount, s.rootBlock.Height, s.rootBlock.ID()) + for i := 0; i < blockCount; i++ { + block := unittest.BlockWithParentFixture(parent) + // update for next iteration + parent = block.ToHeader() + + s.blocksArray = append(s.blocksArray, block) + s.blockMap[block.Height] = block + s.T().Logf("Adding block %d %s", block.Height, block.ID()) + } + + s.headers.On("ByBlockID", mock.AnythingOfType("flow.Identifier")).Return( + func(blockID flow.Identifier) (*flow.Header, error) { + for _, block := range s.blockMap { + if block.ID() == blockID { + return block.ToHeader(), nil + } + } + return nil, storage.ErrNotFound + }, + ).Maybe() + + s.headers.On("ByHeight", mock.AnythingOfType("uint64")).Return( + mocks.ConvertStorageOutput( + mocks.StorageMapGetter(s.blockMap), + func(block *flow.Block) *flow.Header { return block.ToHeader() }, + ), + ).Maybe() + + s.blocks.On("ByHeight", mock.AnythingOfType("uint64")).Return( + mocks.StorageMapGetter(s.blockMap), + ).Maybe() + + s.state.On("Final").Return(s.snapshot, nil).Maybe() + s.state.On("Sealed").Return(s.snapshot, nil).Maybe() +} + +// backendParams returns the Params configuration for the backend. +func (s *BackendBlocksSuite) backendParams(broadcaster *engine.Broadcaster) Params { + var err error + // Head() is called twice by NewBlockTracker + s.snapshot.On("Head").Return(s.rootBlock.ToHeader(), nil).Twice() + s.blockTracker, err = tracker.NewBlockTracker( + s.state, + s.rootBlock.Height, + s.headers, + broadcaster, + ) + s.Require().NoError(err) + + return Params{ + State: s.state, + Blocks: s.blocks, + Headers: s.headers, + ChainID: s.chainID, + MaxHeightRange: events.DefaultMaxHeightRange, + SnapshotHistoryLimit: DefaultSnapshotHistoryLimit, + AccessMetrics: metrics.NewNoopCollector(), + Log: s.log, + SubscriptionHandler: subscription.NewSubscriptionHandler( + s.log, + broadcaster, + subscription.DefaultSendTimeout, + subscription.DefaultResponseLimit, + subscription.DefaultSendBufferSize, + ), + BlockTracker: s.blockTracker, + EventQueryMode: query_mode.IndexQueryModeExecutionNodesOnly, + ScriptExecutionMode: query_mode.IndexQueryModeExecutionNodesOnly, + TxResultQueryMode: query_mode.IndexQueryModeExecutionNodesOnly, + } +} + +// subscribeFromStartBlockIdTestCases generates variations of testType scenarios for subscriptions +// starting from a specified block ID. It is designed to test the subscription functionality when the subscription +// starts from a custom block ID, either sealed or finalized. +func (s *BackendBlocksSuite) subscribeFromStartBlockIdTestCases() []testType { + expectedFromRoot := []*flow.Block{s.rootBlock} + expectedFromRoot = append(expectedFromRoot, s.blocksArray...) + + baseTests := []testType{ + { + name: "happy path - all new blocks", + highestBackfill: -1, // no backfill + startValue: s.rootBlock.ID(), + expectedBlocks: expectedFromRoot, + }, + { + name: "happy path - partial backfill", + highestBackfill: 2, // backfill the first 3 blocks + startValue: s.blocksArray[0].ID(), + expectedBlocks: s.blocksArray, + }, + { + name: "happy path - complete backfill", + highestBackfill: len(s.blocksArray) - 1, // backfill all blocks + startValue: s.blocksArray[0].ID(), + expectedBlocks: s.blocksArray, + }, + { + name: "happy path - start from root block by id", + highestBackfill: len(s.blocksArray) - 1, // backfill all blocks + startValue: s.rootBlock.ID(), // start from root block + expectedBlocks: expectedFromRoot, + }, + } + + return s.setupBlockStatusesForTestCases(baseTests) +} + +// subscribeFromStartHeightTestCases generates variations of testType scenarios for subscriptions +// starting from a specified block height. It is designed to test the subscription functionality when the subscription +// starts from a custom height, either sealed or finalized. +func (s *BackendBlocksSuite) subscribeFromStartHeightTestCases() []testType { + expectedFromRoot := []*flow.Block{s.rootBlock} + expectedFromRoot = append(expectedFromRoot, s.blocksArray...) + + baseTests := []testType{ + { + name: "happy path - all new blocks", + highestBackfill: -1, // no backfill + startValue: s.rootBlock.Height, + expectedBlocks: expectedFromRoot, + }, + { + name: "happy path - partial backfill", + highestBackfill: 2, // backfill the first 3 blocks + startValue: s.blocksArray[0].Height, + expectedBlocks: s.blocksArray, + }, + { + name: "happy path - complete backfill", + highestBackfill: len(s.blocksArray) - 1, // backfill all blocks + startValue: s.blocksArray[0].Height, + expectedBlocks: s.blocksArray, + }, + { + name: "happy path - start from root block by id", + highestBackfill: len(s.blocksArray) - 1, // backfill all blocks + startValue: s.rootBlock.Height, // start from root block + expectedBlocks: expectedFromRoot, + }, + } + + return s.setupBlockStatusesForTestCases(baseTests) +} + +// subscribeFromLatestTestCases generates variations of testType scenarios for subscriptions +// starting from the latest sealed block. It is designed to test the subscription functionality when the subscription +// starts from the latest available block, either sealed or finalized. +func (s *BackendBlocksSuite) subscribeFromLatestTestCases() []testType { + expectedFromRoot := []*flow.Block{s.rootBlock} + expectedFromRoot = append(expectedFromRoot, s.blocksArray...) + + baseTests := []testType{ + { + name: "happy path - all new blocks", + highestBackfill: -1, // no backfill + expectedBlocks: expectedFromRoot, + }, + { + name: "happy path - partial backfill", + highestBackfill: 2, // backfill the first 3 blocks + expectedBlocks: expectedFromRoot, + }, + { + name: "happy path - complete backfill", + highestBackfill: len(s.blocksArray) - 1, // backfill all blocks + expectedBlocks: expectedFromRoot, + }, + } + + return s.setupBlockStatusesForTestCases(baseTests) +} + +// setupBlockStatusesForTestCases sets up variations for each of the base test cases. +// The function performs the following actions: +// +// 1. Creates variations for each of the provided base test scenarios. +// 2. For each base test, it generates two variations: one for Sealed blocks and one for Finalized blocks. +// 3. Returns a slice of testType containing all variations of test scenarios. +// +// Parameters: +// - baseTests: A slice of testType representing base test scenarios. +func (s *BackendBlocksSuite) setupBlockStatusesForTestCases(baseTests []testType) []testType { + // create variations for each of the base test + tests := make([]testType, 0, len(baseTests)*2) + for _, test := range baseTests { + t1 := test + t1.name = fmt.Sprintf("%s - finalized blocks", test.name) + t1.blockStatus = flow.BlockStatusFinalized + tests = append(tests, t1) + + t2 := test + t2.name = fmt.Sprintf("%s - sealed blocks", test.name) + t2.blockStatus = flow.BlockStatusSealed + tests = append(tests, t2) + } + + return tests +} + +// setupBlockTrackerMock configures a mock for the block tracker based on the provided parameters. +// +// Parameters: +// - blockStatus: The status of the blocks being tracked (Sealed or Finalized). +// - highestHeader: The highest header that the block tracker should report. +func (s *BackendBlocksSuite) setupBlockTrackerMock(blockStatus flow.BlockStatus, highestHeader *flow.Header) { + s.snapshot.On("Head").Unset() + s.snapshot.On("Head").Return(highestHeader, nil) + err := s.blockTracker.ProcessOnFinalizedBlock() + s.Require().NoError(err) +} + +// TestSubscribeBlocksFromStartBlockID tests the SubscribeBlocksFromStartBlockID method. +func (s *BackendBlocksSuite) TestSubscribeBlocksFromStartBlockID() { + call := func(ctx context.Context, startValue interface{}, blockStatus flow.BlockStatus) subscription.Subscription { + return s.backend.SubscribeBlocksFromStartBlockID(ctx, startValue.(flow.Identifier), blockStatus) + } + + s.subscribe(call, s.requireBlocks, s.subscribeFromStartBlockIdTestCases()) +} + +// TestSubscribeBlocksFromStartHeight tests the SubscribeBlocksFromStartHeight method. +func (s *BackendBlocksSuite) TestSubscribeBlocksFromStartHeight() { + call := func(ctx context.Context, startValue interface{}, blockStatus flow.BlockStatus) subscription.Subscription { + return s.backend.SubscribeBlocksFromStartHeight(ctx, startValue.(uint64), blockStatus) + } + + s.subscribe(call, s.requireBlocks, s.subscribeFromStartHeightTestCases()) +} + +// TestSubscribeBlocksFromLatest tests the SubscribeBlocksFromLatest method. +func (s *BackendBlocksSuite) TestSubscribeBlocksFromLatest() { + call := func(ctx context.Context, startValue interface{}, blockStatus flow.BlockStatus) subscription.Subscription { + return s.backend.SubscribeBlocksFromLatest(ctx, blockStatus) + } + + s.subscribe(call, s.requireBlocks, s.subscribeFromLatestTestCases()) +} + +// subscribe is the common method with tests the functionality of the subscribe methods in the Backend. +// It covers various scenarios for subscribing, handling backfill, and receiving block updates. +// The test cases include scenarios for both finalized and sealed blocks. +// +// Parameters: +// +// - subscribeFn: A function representing the subscription method to be tested. +// It takes a context, startValue, and blockStatus as parameters +// and returns a subscription.Subscription. +// +// - requireFn: A function responsible for validating that the received information +// matches the expected data. It takes an actual interface{} and an expected *flow.Block as parameters. +// +// - tests: A slice of testType representing different test scenarios for subscriptions. +// +// The function performs the following steps for each test case: +// +// 1. Initializes the test context and cancellation function. +// 2. Iterates through the provided test cases. +// 3. For each test case, sets up a block tracker mock if there are blocks to backfill. +// 4. Mocks the latest sealed block if no start value is provided. +// 5. Subscribes using the provided subscription function. +// 6. Simulates the reception of new blocks and consumes them from the subscription channel. +// 7. Ensures that there are no new messages waiting after all blocks have been processed. +// 8. Cancels the subscription and ensures it shuts down gracefully. +func (s *BackendBlocksSuite) subscribe( + subscribeFn func(ctx context.Context, startValue interface{}, blockStatus flow.BlockStatus) subscription.Subscription, + requireFn func(interface{}, *flow.Block), + tests []testType, +) { + for _, test := range tests { + s.Run(test.name, func() { + synctest.Test(s.T(), func(t *testing.T) { + // the broadcaster must be setup inside of the synctest bubble otherwise the test + // will panic. + var err error + broadcaster := engine.NewBroadcaster() + s.backend, err = New(s.backendParams(broadcaster)) + s.Require().NoError(err) + + // add "backfill" block - blocks that are already in the database before the test starts + // this simulates a subscription on a past block + if test.highestBackfill > 0 { + s.setupBlockTrackerMock(test.blockStatus, s.blocksArray[test.highestBackfill].ToHeader()) + } + + subCtx, subCancel := context.WithCancel(context.Background()) + + // mock latest sealed if no start value provided + if test.startValue == nil { + s.snapshot.On("Head").Unset() + s.snapshot.On("Head").Return(s.rootBlock.ToHeader(), nil).Once() + } + + sub := subscribeFn(subCtx, test.startValue, test.blockStatus) + + // loop over all blocks + for i, b := range test.expectedBlocks { + // simulate new block received. + // all blocks with index <= highestBackfill were already received + if i > test.highestBackfill { + s.setupBlockTrackerMock(test.blockStatus, b.ToHeader()) + + broadcaster.Publish() + } + + // block until there is data waiting in the subscription channel + synctest.Wait() + + // consume block from subscription + v, ok := <-sub.Channel() + s.Require().True(ok, "channel closed while waiting for exec data for block %x %v: err: %v", b.Height, b.ID(), sub.Err()) + + requireFn(v, b) + } + + // block until the subscription goroutine stops + synctest.Wait() + + // make sure there are no new messages waiting. the channel should be opened with nothing waiting + select { + case <-sub.Channel(): + s.T().Error("expected channel to be empty") + default: + } + + // stop the subscription + subCancel() + + // block until the subscription goroutine stops + synctest.Wait() + + // ensure subscription shuts down gracefully + v, ok := <-sub.Channel() + s.Nil(v) + s.False(ok) + s.ErrorIs(sub.Err(), context.Canceled) + }) + }) + } +} + +// requireBlocks ensures that the received block information matches the expected data. +func (s *BackendBlocksSuite) requireBlocks(v interface{}, expectedBlock *flow.Block) { + actualBlock, ok := v.(*flow.Block) + require.True(s.T(), ok, "unexpected response type: %T", v) + + s.Require().Equalf(expectedBlock.Height, actualBlock.Height, "expected block height %d, got %d", expectedBlock.Height, actualBlock.Height) + s.Require().Equal(expectedBlock.ID(), actualBlock.ID()) + s.Require().Equal(*expectedBlock, *actualBlock) +} + +// TestSubscribeBlocksHandlesErrors tests error handling scenarios for the SubscribeBlocksFromStartBlockID and SubscribeBlocksFromStartHeight methods in the Backend. +// It ensures that the method correctly returns errors for various invalid input cases. +// +// Test Cases: +// +// 1. Returns error for unindexed start block id: +// - Tests that subscribing to block headers with an unindexed start block ID results in a NotFound error. +// +// 2. Returns error for start height before root height: +// - Validates that attempting to subscribe to block headers with a start height before the root height results in an InvalidArgument error. +// +// 3. Returns error for unindexed start height: +// - Tests that subscribing to block headers with an unindexed start height results in a NotFound error. +// +// Each test case checks for specific error conditions and ensures that the methods responds appropriately. +func (s *BackendBlocksSuite) TestSubscribeBlocksHandlesErrors() { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + backend, err := New(s.backendParams(engine.NewBroadcaster())) + s.Require().NoError(err) + + s.Run("returns error if unknown start block id is provided", func() { + subCtx, subCancel := context.WithCancel(ctx) + defer subCancel() + + sub := backend.SubscribeBlocksFromStartBlockID(subCtx, unittest.IdentifierFixture(), flow.BlockStatusFinalized) + s.Equal(codes.NotFound, status.Code(sub.Err()), "expected %s, got %v: %v", codes.NotFound, status.Code(sub.Err()), sub.Err()) + }) + + s.Run("returns error for start height before root height", func() { + subCtx, subCancel := context.WithCancel(ctx) + defer subCancel() + + sub := backend.SubscribeBlocksFromStartHeight(subCtx, s.rootBlock.Height-1, flow.BlockStatusFinalized) + s.Equal(codes.InvalidArgument, status.Code(sub.Err()), "expected %s, got %v: %v", codes.InvalidArgument, status.Code(sub.Err()), sub.Err()) + }) + + s.Run("returns error if unknown start height is provided", func() { + subCtx, subCancel := context.WithCancel(ctx) + defer subCancel() + + sub := backend.SubscribeBlocksFromStartHeight(subCtx, s.blocksArray[len(s.blocksArray)-1].Height+10, flow.BlockStatusFinalized) + s.Equal(codes.NotFound, status.Code(sub.Err()), "expected %s, got %v: %v", codes.NotFound, status.Code(sub.Err()), sub.Err()) + }) +} diff --git a/engine/access/rpc/backend/backend_test.go b/engine/access/rpc/backend/backend_test.go index 9d4382f0db0..acee00c278d 100644 --- a/engine/access/rpc/backend/backend_test.go +++ b/engine/access/rpc/backend/backend_test.go @@ -2,36 +2,67 @@ package backend import ( "context" + "errors" "fmt" - "math/rand" + "os" + "sort" "testing" "time" - "github.com/dgraph-io/badger/v2" + "github.com/coreos/go-semver/semver" + accessproto "github.com/onflow/flow/protobuf/go/flow/access" + "github.com/onflow/flow/protobuf/go/flow/entities" entitiesproto "github.com/onflow/flow/protobuf/go/flow/entities" execproto "github.com/onflow/flow/protobuf/go/flow/execution" "github.com/rs/zerolog" - "github.com/stretchr/testify/assert" + "github.com/sony/gobreaker" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" - access "github.com/onflow/flow-go/engine/access/mock" - backendmock "github.com/onflow/flow-go/engine/access/rpc/backend/mock" + "github.com/onflow/flow-go/cmd/build" + accessmock "github.com/onflow/flow-go/engine/access/mock" + "github.com/onflow/flow-go/engine/access/rpc/backend/common" + "github.com/onflow/flow-go/engine/access/rpc/backend/events" + "github.com/onflow/flow-go/engine/access/rpc/backend/node_communicator" + communicatormock "github.com/onflow/flow-go/engine/access/rpc/backend/node_communicator/mock" + "github.com/onflow/flow-go/engine/access/rpc/backend/query_mode" + "github.com/onflow/flow-go/engine/access/rpc/connection" + connectionmock "github.com/onflow/flow-go/engine/access/rpc/connection/mock" + commonrpc "github.com/onflow/flow-go/engine/common/rpc" "github.com/onflow/flow-go/engine/common/rpc/convert" + "github.com/onflow/flow-go/engine/common/version" + "github.com/onflow/flow-go/fvm/blueprints" + accessmodel "github.com/onflow/flow-go/model/access" "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module" + "github.com/onflow/flow-go/module/counters" + "github.com/onflow/flow-go/module/irrecoverable" "github.com/onflow/flow-go/module/metrics" + realstate "github.com/onflow/flow-go/state" + realprotocol "github.com/onflow/flow-go/state/protocol" bprotocol "github.com/onflow/flow-go/state/protocol/badger" + "github.com/onflow/flow-go/state/protocol/invalid" protocol "github.com/onflow/flow-go/state/protocol/mock" "github.com/onflow/flow-go/state/protocol/util" "github.com/onflow/flow-go/storage" storagemock "github.com/onflow/flow-go/storage/mock" + "github.com/onflow/flow-go/storage/operation/pebbleimpl" + "github.com/onflow/flow-go/storage/store" "github.com/onflow/flow-go/utils/unittest" + "github.com/onflow/flow-go/utils/unittest/mocks" ) +const TEST_MAX_HEIGHT = 100 + +var eventEncodingVersions = []entitiesproto.EventEncodingVersion{ + entitiesproto.EventEncodingVersion_JSON_CDC_V0, + entitiesproto.EventEncodingVersion_CCF_V0, +} + type Suite struct { suite.Suite @@ -39,17 +70,33 @@ type Suite struct { snapshot *protocol.Snapshot log zerolog.Logger - blocks *storagemock.Blocks - headers *storagemock.Headers - collections *storagemock.Collections - transactions *storagemock.Transactions - receipts *storagemock.ExecutionReceipts - results *storagemock.ExecutionResults - colClient *access.AccessAPIClient - execClient *access.ExecutionAPIClient - historicalAccessClient *access.AccessAPIClient - connectionFactory *backendmock.ConnectionFactory - chainID flow.ChainID + blocks *storagemock.Blocks + headers *storagemock.Headers + collections *storagemock.Collections + transactions *storagemock.Transactions + receipts *storagemock.ExecutionReceipts + results *storagemock.ExecutionResults + transactionResults *storagemock.LightTransactionResults + events *storagemock.Events + txErrorMessages *storagemock.TransactionResultErrorMessages + + db storage.DB + dbDir string + lastFullBlockHeight *counters.PersistentStrictMonotonicCounter + versionControl *version.VersionControl + + colClient *accessmock.AccessAPIClient + execClient *accessmock.ExecutionAPIClient + historicalAccessClient *accessmock.AccessAPIClient + + connectionFactory *connectionmock.ConnectionFactory + communicator *communicatormock.Communicator + + chainID flow.ChainID + systemTx *flow.TransactionBody + + fixedExecutionNodeIDs flow.IdentifierList + preferredExecutionNodeIDs flow.IdentifierList } func TestHandler(t *testing.T) { @@ -57,26 +104,51 @@ func TestHandler(t *testing.T) { } func (suite *Suite) SetupTest() { - rand.Seed(time.Now().UnixNano()) - suite.log = zerolog.New(zerolog.NewConsoleWriter()) + suite.log = unittest.Logger() suite.state = new(protocol.State) suite.snapshot = new(protocol.Snapshot) header := unittest.BlockHeaderFixture() params := new(protocol.Params) - params.On("Root").Return(header, nil) + params.On("FinalizedRoot").Return(header, nil) + params.On("SporkID").Return(unittest.IdentifierFixture(), nil) params.On("SporkRootBlockHeight").Return(header.Height, nil) - suite.state.On("Params").Return(params).Maybe() + params.On("SealedRoot").Return(header, nil) + suite.state.On("Params").Return(params) + suite.blocks = new(storagemock.Blocks) suite.headers = new(storagemock.Headers) suite.transactions = new(storagemock.Transactions) suite.collections = new(storagemock.Collections) suite.receipts = new(storagemock.ExecutionReceipts) suite.results = new(storagemock.ExecutionResults) - suite.colClient = new(access.AccessAPIClient) - suite.execClient = new(access.ExecutionAPIClient) + suite.txErrorMessages = storagemock.NewTransactionResultErrorMessages(suite.T()) + suite.colClient = new(accessmock.AccessAPIClient) + suite.execClient = new(accessmock.ExecutionAPIClient) + suite.transactionResults = storagemock.NewLightTransactionResults(suite.T()) + suite.events = storagemock.NewEvents(suite.T()) suite.chainID = flow.Testnet - suite.historicalAccessClient = new(access.AccessAPIClient) - suite.connectionFactory = new(backendmock.ConnectionFactory) + suite.historicalAccessClient = new(accessmock.AccessAPIClient) + suite.connectionFactory = connectionmock.NewConnectionFactory(suite.T()) + + suite.communicator = new(communicatormock.Communicator) + + var err error + suite.systemTx, err = blueprints.SystemChunkTransaction(flow.Testnet.Chain()) + suite.Require().NoError(err) + + pdb, dbDir := unittest.TempPebbleDB(suite.T()) + suite.dbDir = dbDir + suite.db = pebbleimpl.ToDB(pdb) + progress, err := store.NewConsumerProgress(suite.db, module.ConsumeProgressLastFullBlockHeight).Initialize(0) + require.NoError(suite.T(), err) + suite.lastFullBlockHeight, err = counters.NewPersistentStrictMonotonicCounter(progress) + suite.Require().NoError(err) +} + +// TearDownTest cleans up the db +func (suite *Suite) TearDownTest() { + err := os.RemoveAll(suite.dbDir) + suite.Require().NoError(err) } func (suite *Suite) TestPing() { @@ -88,30 +160,12 @@ func (suite *Suite) TestPing() { On("Ping", mock.Anything, &execproto.PingRequest{}). Return(&execproto.PingResponse{}, nil) - backend := New( - suite.state, - suite.colClient, - nil, - nil, - nil, - nil, - nil, - nil, - nil, - suite.chainID, - metrics.NewNoopCollector(), - nil, - false, - DefaultMaxHeightRange, - nil, - nil, - suite.log, - DefaultSnapshotHistoryLimit, - nil, - ) + params := suite.defaultBackendParams() - err := backend.Ping(context.Background()) + backend, err := New(params) + suite.Require().NoError(err) + err = backend.Ping(context.Background()) suite.Require().NoError(err) } @@ -120,55 +174,36 @@ func (suite *Suite) TestGetLatestFinalizedBlockHeader() { block := unittest.BlockHeaderFixture() suite.state.On("Final").Return(suite.snapshot, nil).Maybe() suite.snapshot.On("Head").Return(block, nil).Once() - suite.state.On("Sealed").Return(suite.snapshot, nil) - suite.snapshot.On("Head").Return(block, nil).Once() - backend := New( - suite.state, - nil, - nil, - nil, - nil, - nil, - nil, - nil, - nil, - suite.chainID, - metrics.NewNoopCollector(), - nil, - false, - DefaultMaxHeightRange, - nil, - nil, - suite.log, - DefaultSnapshotHistoryLimit, - nil, - ) + params := suite.defaultBackendParams() + + backend, err := New(params) + suite.Require().NoError(err) // query the handler for the latest finalized block - header, status, err := backend.GetLatestBlockHeader(context.Background(), false) - suite.checkResponse(header, err) + header, stat, err := backend.GetLatestBlockHeader(context.Background(), false) + suite.Require().NoError(err) + suite.Require().NotNil(header) // make sure we got the latest block suite.Require().Equal(block.ID(), header.ID()) suite.Require().Equal(block.Height, header.Height) suite.Require().Equal(block.ParentID, header.ParentID) - suite.Require().Equal(status, flow.BlockStatusSealed) + suite.Require().Equal(stat, flow.BlockStatusFinalized) suite.assertAllExpectations() - } // TestGetLatestProtocolStateSnapshot_NoTransitionSpan tests our GetLatestProtocolStateSnapshot RPC endpoint -// where the sealing segment for the state requested at latest finalized block does not contain any blocks that +// where the sealing segment for the State requested at latest finalized block does not contain any Blocks that // spans an epoch or epoch phase transition. func (suite *Suite) TestGetLatestProtocolStateSnapshot_NoTransitionSpan() { identities := unittest.CompleteIdentitySet() rootSnapshot := unittest.RootSnapshotFixture(identities) - util.RunWithFullProtocolState(suite.T(), rootSnapshot, func(db *badger.DB, state *bprotocol.ParticipantState) { - epochBuilder := unittest.NewEpochBuilder(suite.T(), state) + util.RunWithFullProtocolStateAndMutator(suite.T(), rootSnapshot, func(db storage.DB, state *bprotocol.ParticipantState, mutableState realprotocol.MutableProtocolState) { + epochBuilder := unittest.NewEpochBuilder(suite.T(), mutableState, state) // build epoch 1 - // blocks in current state + // Blocks in current State // P <- A(S_P-1) <- B(S_P) <- C(S_A) <- D(S_B) |setup| <- E(S_C) <- F(S_D) |commit| epochBuilder. BuildEpoch(). @@ -178,7 +213,7 @@ func (suite *Suite) TestGetLatestProtocolStateSnapshot_NoTransitionSpan() { epoch1, ok := epochBuilder.EpochHeights(1) require.True(suite.T(), ok) - // setup AtBlockID mock returns for state + // setup AtBlockID mock returns for State for _, height := range epoch1.Range() { suite.state.On("AtHeight", height).Return(state.AtHeight(height)).Once() } @@ -188,34 +223,18 @@ func (suite *Suite) TestGetLatestProtocolStateSnapshot_NoTransitionSpan() { snap := state.AtHeight(epoch1.Range()[2]) suite.state.On("Final").Return(snap).Once() - backend := New( - suite.state, - nil, - nil, - nil, - nil, - nil, - nil, - nil, - nil, - suite.chainID, - metrics.NewNoopCollector(), - nil, - false, - 100, - nil, - nil, - suite.log, - DefaultSnapshotHistoryLimit, - nil, - ) + params := suite.defaultBackendParams() + params.MaxHeightRange = TEST_MAX_HEIGHT + + backend, err := New(params) + suite.Require().NoError(err) // query the handler for the latest finalized snapshot bytes, err := backend.GetLatestProtocolStateSnapshot(context.Background()) suite.Require().NoError(err) // we expect the endpoint to return the snapshot at the same height we requested - // because it has a valid sealing segment with no blocks spanning an epoch or phase transition + // because it has a valid sealing segment with no Blocks spanning an epoch or phase transition expectedSnapshotBytes, err := convert.SnapshotToBytes(snap) suite.Require().NoError(err) suite.Require().Equal(expectedSnapshotBytes, bytes) @@ -223,13 +242,13 @@ func (suite *Suite) TestGetLatestProtocolStateSnapshot_NoTransitionSpan() { } // TestGetLatestProtocolStateSnapshot_TransitionSpans tests our GetLatestProtocolStateSnapshot RPC endpoint -// where the sealing segment for the state requested for latest finalized block contains a block that -// spans an epoch transition and blocks that span epoch phase transitions. +// where the sealing segment for the State requested for latest finalized block contains a block that +// spans an epoch transition and Blocks that span epoch phase transitions. func (suite *Suite) TestGetLatestProtocolStateSnapshot_TransitionSpans() { identities := unittest.CompleteIdentitySet() rootSnapshot := unittest.RootSnapshotFixture(identities) - util.RunWithFullProtocolState(suite.T(), rootSnapshot, func(db *badger.DB, state *bprotocol.ParticipantState) { - epochBuilder := unittest.NewEpochBuilder(suite.T(), state) + util.RunWithFullProtocolStateAndMutator(suite.T(), rootSnapshot, func(db storage.DB, state *bprotocol.ParticipantState, mutableState realprotocol.MutableProtocolState) { + epochBuilder := unittest.NewEpochBuilder(suite.T(), mutableState, state) // building 2 epochs allows us to take a snapshot at a point in time where // an epoch transition happens @@ -247,63 +266,45 @@ func (suite *Suite) TestGetLatestProtocolStateSnapshot_TransitionSpans() { epoch2, ok := epochBuilder.EpochHeights(2) require.True(suite.T(), ok) - // setup AtHeight mock returns for state + // setup AtHeight mock returns for State for _, height := range append(epoch1.Range(), epoch2.Range()...) { suite.state.On("AtHeight", height).Return(state.AtHeight(height)) } // Take snapshot at height of the first block of epoch2, the sealing segment of this snapshot - // will have contain block spanning an epoch transition as well as an epoch phase transition. - // This will cause our GetLatestProtocolStateSnapshot func to return a snapshot - // at block with height 3, the first block of the staking phase of epoch1. + // will contain a block spanning an epoch transition as well as an epoch phase transition. snap := state.AtHeight(epoch2.Range()[0]) suite.state.On("Final").Return(snap).Once() - backend := New( - suite.state, - nil, - nil, - nil, - nil, - nil, - nil, - nil, - nil, - suite.chainID, - metrics.NewNoopCollector(), - nil, - false, - 100, - nil, - nil, - suite.log, - DefaultSnapshotHistoryLimit, - nil, - ) + params := suite.defaultBackendParams() + params.MaxHeightRange = TEST_MAX_HEIGHT + + backend, err := New(params) + suite.Require().NoError(err) // query the handler for the latest finalized snapshot bytes, err := backend.GetLatestProtocolStateSnapshot(context.Background()) suite.Require().NoError(err) fmt.Println() - // we expect the endpoint to return last valid snapshot which is the snapshot at block C (height 2) - expectedSnapshotBytes, err := convert.SnapshotToBytes(state.AtHeight(epoch1.Range()[2])) + // we expect the endpoint to return the latest snapshot, even though it spans an epoch transition + expectedSnapshotBytes, err := convert.SnapshotToBytes(snap) suite.Require().NoError(err) suite.Require().Equal(expectedSnapshotBytes, bytes) }) } // TestGetLatestProtocolStateSnapshot_PhaseTransitionSpan tests our GetLatestProtocolStateSnapshot RPC endpoint -// where the sealing segment for the state requested at latest finalized block contains a blocks that +// where the sealing segment for the State requested at latest finalized block contains a Blocks that // spans an epoch phase transition. func (suite *Suite) TestGetLatestProtocolStateSnapshot_PhaseTransitionSpan() { identities := unittest.CompleteIdentitySet() rootSnapshot := unittest.RootSnapshotFixture(identities) - util.RunWithFullProtocolState(suite.T(), rootSnapshot, func(db *badger.DB, state *bprotocol.ParticipantState) { - epochBuilder := unittest.NewEpochBuilder(suite.T(), state) + util.RunWithFullProtocolStateAndMutator(suite.T(), rootSnapshot, func(db storage.DB, state *bprotocol.ParticipantState, mutableState realprotocol.MutableProtocolState) { + epochBuilder := unittest.NewEpochBuilder(suite.T(), mutableState, state) // build epoch 1 - // blocks in current state + // Blocks in current State // P <- A(S_P-1) <- B(S_P) <- C(S_A) <- D(S_B) |setup| <- E(S_C) <- F(S_D) |commit| epochBuilder. BuildEpoch(). @@ -313,7 +314,7 @@ func (suite *Suite) TestGetLatestProtocolStateSnapshot_PhaseTransitionSpan() { epoch1, ok := epochBuilder.EpochHeights(1) require.True(suite.T(), ok) - // setup AtBlockID mock returns for state + // setup AtBlockID mock returns for State for _, height := range epoch1.Range() { suite.state.On("AtHeight", height).Return(state.AtHeight(height)) } @@ -325,53 +326,37 @@ func (suite *Suite) TestGetLatestProtocolStateSnapshot_PhaseTransitionSpan() { snap := state.AtHeight(epoch1.Range()[3]) suite.state.On("Final").Return(snap).Once() - backend := New( - suite.state, - nil, - nil, - nil, - nil, - nil, - nil, - nil, - nil, - suite.chainID, - metrics.NewNoopCollector(), - nil, - false, - 100, - nil, - nil, - suite.log, - DefaultSnapshotHistoryLimit, - nil, - ) + params := suite.defaultBackendParams() + params.MaxHeightRange = TEST_MAX_HEIGHT + + backend, err := New(params) + suite.Require().NoError(err) // query the handler for the latest finalized snapshot bytes, err := backend.GetLatestProtocolStateSnapshot(context.Background()) suite.Require().NoError(err) - // we expect the endpoint to return last valid snapshot which is the snapshot at block C (height 2) - expectedSnapshotBytes, err := convert.SnapshotToBytes(state.AtHeight(epoch1.Range()[2])) + // we expect the endpoint to return latest snapshot, even though it spans an epoch phase transition + expectedSnapshotBytes, err := convert.SnapshotToBytes(snap) suite.Require().NoError(err) suite.Require().Equal(expectedSnapshotBytes, bytes) }) } // TestGetLatestProtocolStateSnapshot_EpochTransitionSpan tests our GetLatestProtocolStateSnapshot RPC endpoint -// where the sealing segment for the state requested at latest finalized block contains a blocks that +// where the sealing segment for the State requested at latest finalized block contains a Blocks that // spans an epoch transition. func (suite *Suite) TestGetLatestProtocolStateSnapshot_EpochTransitionSpan() { identities := unittest.CompleteIdentitySet() rootSnapshot := unittest.RootSnapshotFixture(identities) - util.RunWithFullProtocolState(suite.T(), rootSnapshot, func(db *badger.DB, state *bprotocol.ParticipantState) { - epochBuilder := unittest.NewEpochBuilder(suite.T(), state) + util.RunWithFullProtocolStateAndMutator(suite.T(), rootSnapshot, func(db storage.DB, state *bprotocol.ParticipantState, mutableState realprotocol.MutableProtocolState) { + epochBuilder := unittest.NewEpochBuilder(suite.T(), mutableState, state) // build epoch 1 - // blocks in current state + // Blocks in current State // P <- A(S_P-1) <- B(S_P) <- C(S_A) <- D(S_B) |setup| <- E(S_C) <- F(S_D) |commit| epochBuilder.BuildEpoch() - // add more blocks to our state in the commit phase, this will allow + // add more Blocks to our State in the commit phase, this will allow // us to take a snapshot at the height where the epoch1 -> epoch2 transition // and no block spans an epoch phase transition. The third block added will // have a seal for the first block in the commit phase allowing us to avoid @@ -390,7 +375,7 @@ func (suite *Suite) TestGetLatestProtocolStateSnapshot_EpochTransitionSpan() { epoch2, ok := epochBuilder.EpochHeights(2) require.True(suite.T(), ok) - // setup AtHeight mock returns for state + // setup AtHeight mock returns for State for _, height := range append(epoch1.Range(), epoch2.Range()...) { suite.state.On("AtHeight", height).Return(state.AtHeight(height)) } @@ -401,176 +386,504 @@ func (suite *Suite) TestGetLatestProtocolStateSnapshot_EpochTransitionSpan() { snap := state.AtHeight(epoch2.Range()[0]) suite.state.On("Final").Return(snap).Once() - backend := New( - suite.state, - nil, - nil, - nil, - nil, - nil, - nil, - nil, - nil, - suite.chainID, - metrics.NewNoopCollector(), - nil, - false, - 100, - nil, - nil, - suite.log, - DefaultSnapshotHistoryLimit, - nil, - ) + params := suite.defaultBackendParams() + params.MaxHeightRange = TEST_MAX_HEIGHT + + backend, err := New(params) + suite.Require().NoError(err) // query the handler for the latest finalized snapshot bytes, err := backend.GetLatestProtocolStateSnapshot(context.Background()) suite.Require().NoError(err) - // we expect the endpoint to return last valid snapshot which is the snapshot at the final block - // of the previous epoch - expectedSnapshotBytes, err := convert.SnapshotToBytes(state.AtHeight(epoch1.Range()[len(epoch1.Range())-1])) + // we expect endpoint to return the latest snapshot, even though it spans an epoch transition + expectedSnapshotBytes, err := convert.SnapshotToBytes(snap) suite.Require().NoError(err) suite.Require().Equal(expectedSnapshotBytes, bytes) }) } -// TestGetLatestProtocolStateSnapshot_EpochTransitionSpan tests our GetLatestProtocolStateSnapshot RPC endpoint -// where the length of the sealing segment is greater than the configured snapshotHistoryLimit -func (suite *Suite) TestGetLatestProtocolStateSnapshot_HistoryLimit() { +// TestGetProtocolStateSnapshotByBlockID tests our GetProtocolStateSnapshotByBlockID RPC endpoint. +func (suite *Suite) TestGetProtocolStateSnapshotByBlockID() { + identities := unittest.CompleteIdentitySet() + rootSnapshot := unittest.RootSnapshotFixture(identities) + util.RunWithFullProtocolStateAndMutator(suite.T(), rootSnapshot, func(db storage.DB, state *bprotocol.ParticipantState, mutableState realprotocol.MutableProtocolState) { + epochBuilder := unittest.NewEpochBuilder(suite.T(), mutableState, state) + // build epoch 1 + // Blocks in current State + // P <- A(S_P-1) <- B(S_P) <- C(S_A) <- D(S_B) |setup| <- E(S_C) <- F(S_D) |commit| + epochBuilder. + BuildEpoch(). + CompleteEpoch() + + // get heights of each phase in built epochs + epoch1, ok := epochBuilder.EpochHeights(1) + require.True(suite.T(), ok) + + // setup AtBlockID, AtHeight and BlockIDByHeight mock returns for State + for _, height := range epoch1.Range() { + snap := state.AtHeight(height) + blockHead, err := snap.Head() + suite.Require().NoError(err) + + suite.state.On("AtHeight", height).Return(snap) + suite.state.On("AtBlockID", blockHead.ID()).Return(snap) + suite.headers.On("BlockIDByHeight", height).Return(blockHead.ID(), nil) + } + + // Take snapshot at height of block D (epoch1.heights[2]) for valid segment and valid snapshot + snap := state.AtHeight(epoch1.Range()[2]) + blockHead, err := snap.Head() + suite.Require().NoError(err) + + params := suite.defaultBackendParams() + params.MaxHeightRange = TEST_MAX_HEIGHT + + backend, err := New(params) + suite.Require().NoError(err) + + // query the handler for the latest finalized snapshot + bytes, err := backend.GetProtocolStateSnapshotByBlockID(context.Background(), blockHead.ID()) + suite.Require().NoError(err) + + // we expect the endpoint to return the snapshot at the same height we requested + expectedSnapshotBytes, err := convert.SnapshotToBytes(snap) + suite.Require().NoError(err) + suite.Require().Equal(expectedSnapshotBytes, bytes) + }) +} + +// TestGetProtocolStateSnapshotByBlockID_NonQueryBlock tests our GetProtocolStateSnapshotByBlockID RPC endpoint +// where no block with the given ID was found. +func (suite *Suite) TestGetProtocolStateSnapshotByBlockID_UnknownQueryBlock() { + identities := unittest.CompleteIdentitySet() + rootSnapshot := unittest.RootSnapshotFixture(identities) + util.RunWithFullProtocolState(suite.T(), rootSnapshot, func(db storage.DB, state *bprotocol.ParticipantState) { + rootBlock, err := rootSnapshot.Head() + suite.Require().NoError(err) + + params := suite.defaultBackendParams() + params.MaxHeightRange = TEST_MAX_HEIGHT + + backend, err := New(params) + suite.Require().NoError(err) + + // create a new block with root block as parent + newBlock := unittest.BlockWithParentFixture(rootBlock) + ctx := context.Background() + + suite.state.On("AtBlockID", newBlock.ID()).Return(unittest.StateSnapshotForUnknownBlock()) + + // query the handler for the snapshot for non existing block + snapshotBytes, err := backend.GetProtocolStateSnapshotByBlockID(ctx, newBlock.ID()) + suite.Require().Nil(snapshotBytes) + suite.Require().Error(err) + suite.Require().Equal(codes.NotFound, status.Code(err)) + suite.Require().Equal(status.Errorf(codes.NotFound, "failed to get a valid snapshot: block not found").Error(), + err.Error()) + suite.assertAllExpectations() + }) +} + +// TestGetProtocolStateSnapshotByBlockID_AtBlockIDInternalError tests our GetProtocolStateSnapshotByBlockID RPC endpoint +// where unexpected error from snapshot.AtBlockID appear because of closed database. +func (suite *Suite) TestGetProtocolStateSnapshotByBlockID_AtBlockIDInternalError() { + identities := unittest.CompleteIdentitySet() + rootSnapshot := unittest.RootSnapshotFixture(identities) + util.RunWithFullProtocolState(suite.T(), rootSnapshot, func(db storage.DB, state *bprotocol.ParticipantState) { + params := suite.defaultBackendParams() + params.MaxHeightRange = TEST_MAX_HEIGHT + + backend, err := New(params) + suite.Require().NoError(err) + + ctx := context.Background() + newBlock := unittest.BlockFixture() + + expectedError := errors.New("runtime-error") + suite.state.On("AtBlockID", newBlock.ID()).Return(invalid.NewSnapshot(expectedError)) + + // query the handler for the snapshot + snapshotBytes, err := backend.GetProtocolStateSnapshotByBlockID(ctx, newBlock.ID()) + suite.Require().Nil(snapshotBytes) + suite.Require().Error(err) + suite.Require().ErrorAs(err, &expectedError) + suite.Require().Equal(codes.Internal, status.Code(err)) + suite.assertAllExpectations() + }) +} + +// TestGetProtocolStateSnapshotByBlockID_BlockNotFinalizedAtHeight tests our GetProtocolStateSnapshotByBlockID RPC endpoint +// where The block exists, but no block has been finalized at its height. +func (suite *Suite) TestGetProtocolStateSnapshotByBlockID_BlockNotFinalizedAtHeight() { + identities := unittest.CompleteIdentitySet() + rootSnapshot := unittest.RootSnapshotFixture(identities) + rootProtocolState, err := rootSnapshot.ProtocolState() + require.NoError(suite.T(), err) + rootProtocolStateID := rootProtocolState.ID() + util.RunWithFullProtocolState(suite.T(), rootSnapshot, func(db storage.DB, state *bprotocol.ParticipantState) { + rootBlock, err := rootSnapshot.Head() + suite.Require().NoError(err) + + params := suite.defaultBackendParams() + params.MaxHeightRange = TEST_MAX_HEIGHT + + backend, err := New(params) + suite.Require().NoError(err) + + // create a new block with root block as parent + newBlock := unittest.BlockWithParentAndPayload( + rootBlock, + unittest.PayloadFixture(unittest.WithProtocolStateID(rootProtocolStateID)), + ) + ctx := context.Background() + // add new block to the chain state + err = state.Extend(ctx, unittest.ProposalFromBlock(newBlock)) + suite.Require().NoError(err) + + // since block was added to the block tree it must be queryable by block ID + suite.state.On("AtBlockID", newBlock.ID()).Return(state.AtBlockID(newBlock.ID())) + suite.headers.On("BlockIDByHeight", newBlock.Height).Return(flow.ZeroID, storage.ErrNotFound) + + // query the handler for the snapshot for non finalized block + snapshotBytes, err := backend.GetProtocolStateSnapshotByBlockID(ctx, newBlock.ID()) + suite.Require().Nil(snapshotBytes) + suite.Require().Error(err) + suite.Require().Equal(codes.FailedPrecondition, status.Code(err)) + suite.assertAllExpectations() + }) +} + +// TestGetProtocolStateSnapshotByBlockID_DifferentBlockFinalizedAtHeight tests our GetProtocolStateSnapshotByBlockID RPC +// endpoint where a different block than what was queried has been finalized at this height. +func (suite *Suite) TestGetProtocolStateSnapshotByBlockID_DifferentBlockFinalizedAtHeight() { + identities := unittest.CompleteIdentitySet() + rootSnapshot := unittest.RootSnapshotFixture(identities) + rootProtocolState, err := rootSnapshot.ProtocolState() + require.NoError(suite.T(), err) + rootProtocolStateID := rootProtocolState.ID() + util.RunWithFullProtocolState(suite.T(), rootSnapshot, func(db storage.DB, state *bprotocol.ParticipantState) { + rootBlock, err := rootSnapshot.Head() + suite.Require().NoError(err) + + params := suite.defaultBackendParams() + params.MaxHeightRange = TEST_MAX_HEIGHT + + backend, err := New(params) + suite.Require().NoError(err) + + // create a new block with root block as parent + finalizedBlock := unittest.BlockWithParentAndPayload( + rootBlock, + unittest.PayloadFixture(unittest.WithProtocolStateID(rootProtocolStateID)), + ) + orphanBlock := unittest.BlockWithParentAndPayload( + rootBlock, + unittest.PayloadFixture(unittest.WithProtocolStateID(rootProtocolStateID)), + ) + ctx := context.Background() + + // add new block to the chain state + err = state.Extend(ctx, unittest.ProposalFromBlock(finalizedBlock)) + suite.Require().NoError(err) + + // add orphan block to the chain state as well + err = state.Extend(ctx, unittest.ProposalFromBlock(orphanBlock)) + suite.Require().NoError(err) + + suite.Equal(finalizedBlock.Height, orphanBlock.Height, + "expect both blocks to have same height to have a fork") + + // since block was added to the block tree it must be queryable by block ID + suite.state.On("AtBlockID", orphanBlock.ID()).Return(state.AtBlockID(orphanBlock.ID())) + + // since there are two candidate blocks with the same height, we will return the one that was finalized + suite.headers.On("BlockIDByHeight", finalizedBlock.Height).Return(finalizedBlock.ID(), nil) + + // query the handler for the snapshot for non finalized block + snapshotBytes, err := backend.GetProtocolStateSnapshotByBlockID(ctx, orphanBlock.ID()) + suite.Require().Nil(snapshotBytes) + suite.Require().Error(err) + suite.Require().Equal(codes.InvalidArgument, status.Code(err)) + suite.assertAllExpectations() + }) +} + +// TestGetProtocolStateSnapshotByBlockID_UnexpectedErrorBlockIDByHeight tests our GetProtocolStateSnapshotByBlockID RPC +// endpoint where a unexpected error from BlockIDByHeight appear. +func (suite *Suite) TestGetProtocolStateSnapshotByBlockID_UnexpectedErrorBlockIDByHeight() { + identities := unittest.CompleteIdentitySet() + rootSnapshot := unittest.RootSnapshotFixture(identities) + rootProtocolState, err := rootSnapshot.ProtocolState() + require.NoError(suite.T(), err) + rootProtocolStateID := rootProtocolState.ID() + util.RunWithFullProtocolState(suite.T(), rootSnapshot, func(db storage.DB, state *bprotocol.ParticipantState) { + rootBlock, err := rootSnapshot.Head() + suite.Require().NoError(err) + + params := suite.defaultBackendParams() + params.MaxHeightRange = TEST_MAX_HEIGHT + + backend, err := New(params) + suite.Require().NoError(err) + + // create a new block with root block as parent + newBlock := unittest.BlockWithParentAndPayload( + rootBlock, + unittest.PayloadFixture(unittest.WithProtocolStateID(rootProtocolStateID)), + ) + ctx := context.Background() + // add new block to the chain state + err = state.Extend(ctx, unittest.ProposalFromBlock(newBlock)) + suite.Require().NoError(err) + + // since block was added to the block tree it must be queryable by block ID + suite.state.On("AtBlockID", newBlock.ID()).Return(state.AtBlockID(newBlock.ID())) + // expectedError := errors.New("runtime-error") + suite.headers.On("BlockIDByHeight", newBlock.Height).Return(flow.ZeroID, + status.Errorf(codes.Internal, "failed to lookup block id by height %d", newBlock.Height)) + + // query the handler for the snapshot + snapshotBytes, err := backend.GetProtocolStateSnapshotByBlockID(ctx, newBlock.ID()) + suite.Require().Nil(snapshotBytes) + suite.Require().Error(err) + suite.Require().Equal(codes.Internal, status.Code(err)) + suite.assertAllExpectations() + }) +} + +// TestGetProtocolStateSnapshotByBlockID_InvalidSegment tests our GetProtocolStateSnapshotByBlockID RPC endpoint +// for segments between phases and between epochs. We should return a valid snapshot in these edge cases. +func (suite *Suite) TestGetProtocolStateSnapshotByBlockID_InvalidSegment() { + identities := unittest.CompleteIdentitySet() + rootSnapshot := unittest.RootSnapshotFixture(identities) + util.RunWithFullProtocolStateAndMutator(suite.T(), rootSnapshot, func(db storage.DB, state *bprotocol.ParticipantState, mutableState realprotocol.MutableProtocolState) { + epochBuilder := unittest.NewEpochBuilder(suite.T(), mutableState, state) + // build epoch 1 + // Blocks in current State + // P <- A(S_P-1) <- B(S_P) <- C(S_A) <- D(S_B) |setup| <- E(S_C) <- F(S_D) |commit| + epochBuilder. + BuildEpoch(). + CompleteEpoch() + + // get heights of each phase in built epochs + epoch1, ok := epochBuilder.EpochHeights(1) + require.True(suite.T(), ok) + + // setup AtBlockID and AtHeight mock returns for State + for _, height := range epoch1.Range() { + snap := state.AtHeight(height) + blockHead, err := snap.Head() + suite.Require().NoError(err) + + suite.state.On("AtHeight", height).Return(snap) + suite.state.On("AtBlockID", blockHead.ID()).Return(snap) + suite.headers.On("BlockIDByHeight", height).Return(blockHead.ID(), nil) + } + + backend, err := New(suite.defaultBackendParams()) + suite.Require().NoError(err) + + suite.T().Run("sealing segment between phases", func(t *testing.T) { + // Take snapshot at height of first block of setup phase + snap := state.AtHeight(epoch1.SetupRange()[0]) + block, err := snap.Head() + suite.Require().NoError(err) + + expectedSnapshotBytes, err := convert.SnapshotToBytes(snap) + suite.Require().NoError(err) + + suite.T().Run("ByBlockID", func(t *testing.T) { + bytes, err := backend.GetProtocolStateSnapshotByBlockID(context.Background(), block.ID()) + suite.Require().NoError(err) + suite.Require().Equal(expectedSnapshotBytes, bytes) + }) + suite.T().Run("ByHeight", func(t *testing.T) { + bytes, err := backend.GetProtocolStateSnapshotByHeight(context.Background(), block.Height) + suite.Require().NoError(err) + suite.Require().Equal(expectedSnapshotBytes, bytes) + }) + }) + + suite.T().Run("sealing segment between epochs", func(t *testing.T) { + // Take snapshot at height of latest finalized block + snap := state.Final() + currentEpoch, err := snap.Epochs().Current() + suite.Require().NoError(err) + suite.Require().Equal(epoch1.Counter+1, currentEpoch.Counter(), "expect to be in next epoch") + block, err := snap.Head() + suite.Require().NoError(err) + + suite.state.On("AtBlockID", block.ID()).Return(snap) + suite.state.On("AtHeight", block.Height).Return(snap) + suite.headers.On("BlockIDByHeight", block.Height).Return(block.ID(), nil) + + expectedSnapshotBytes, err := convert.SnapshotToBytes(snap) + suite.Require().NoError(err) + + suite.T().Run("ByBlockID", func(t *testing.T) { + bytes, err := backend.GetProtocolStateSnapshotByBlockID(context.Background(), block.ID()) + suite.Require().NoError(err) + suite.Require().Equal(expectedSnapshotBytes, bytes) + }) + suite.T().Run("ByHeight", func(t *testing.T) { + bytes, err := backend.GetProtocolStateSnapshotByHeight(context.Background(), block.Height) + suite.Require().NoError(err) + suite.Require().Equal(expectedSnapshotBytes, bytes) + }) + }) + }) +} + +// TestGetProtocolStateSnapshotByHeight tests our GetProtocolStateSnapshotByHeight RPC endpoint +// where non finalized block is added to state +func (suite *Suite) TestGetProtocolStateSnapshotByHeight() { identities := unittest.CompleteIdentitySet() rootSnapshot := unittest.RootSnapshotFixture(identities) - util.RunWithFullProtocolState(suite.T(), rootSnapshot, func(db *badger.DB, state *bprotocol.ParticipantState) { - epochBuilder := unittest.NewEpochBuilder(suite.T(), state).BuildEpoch().CompleteEpoch() + util.RunWithFullProtocolStateAndMutator(suite.T(), rootSnapshot, func(db storage.DB, state *bprotocol.ParticipantState, mutableState realprotocol.MutableProtocolState) { + epochBuilder := unittest.NewEpochBuilder(suite.T(), mutableState, state) + // build epoch 1 + // Blocks in current State + // P <- A(S_P-1) <- B(S_P) <- C(S_A) <- D(S_B) |setup| <- E(S_C) <- F(S_D) |commit| + epochBuilder. + BuildEpoch(). + CompleteEpoch() // get heights of each phase in built epochs epoch1, ok := epochBuilder.EpochHeights(1) require.True(suite.T(), ok) - // setup AtBlockID mock returns for state + // setup AtHeight mock returns for State for _, height := range epoch1.Range() { suite.state.On("AtHeight", height).Return(state.AtHeight(height)) } - // Take snapshot at height of block E (epoch1.heights[4]) the sealing segment for this snapshot - // is C(S_A) <- D(S_B) |setup| <- E(S_C) which spans the epoch setup phase. This will force - // our RPC endpoint to return a snapshot at block D which is the snapshot at the boundary where a phase - // transition happens. - snap := state.AtHeight(epoch1.Range()[4]) - suite.state.On("Final").Return(snap).Once() + // Take snapshot at height of block D (epoch1.heights[2]) for valid segment and valid snapshot + snap := state.AtHeight(epoch1.Range()[2]) - // very short history limit, any segment with any blocks spanning any transition should force the endpoint to return a history limit error - snapshotHistoryLimit := 1 - backend := New( - suite.state, - nil, - nil, - nil, - nil, - nil, - nil, - nil, - nil, - suite.chainID, - metrics.NewNoopCollector(), - nil, - false, - DefaultMaxHeightRange, - nil, - nil, - suite.log, - snapshotHistoryLimit, - nil, + params := suite.defaultBackendParams() + params.MaxHeightRange = TEST_MAX_HEIGHT + + backend, err := New(params) + suite.Require().NoError(err) + + // query the handler for the latest finalized snapshot + bytes, err := backend.GetProtocolStateSnapshotByHeight(context.Background(), epoch1.Range()[2]) + suite.Require().NoError(err) + + // we expect the endpoint to return the snapshot at the same height we requested + expectedSnapshotBytes, err := convert.SnapshotToBytes(snap) + suite.Require().NoError(err) + suite.Require().Equal(expectedSnapshotBytes, bytes) + }) +} + +// TestGetProtocolStateSnapshotByHeight_NonFinalizedBlocks tests our GetProtocolStateSnapshotByHeight RPC endpoint +// where non finalized block is added to state +func (suite *Suite) TestGetProtocolStateSnapshotByHeight_NonFinalizedBlocks() { + identities := unittest.CompleteIdentitySet() + rootSnapshot := unittest.RootSnapshotFixture(identities) + rootProtocolState, err := rootSnapshot.ProtocolState() + require.NoError(suite.T(), err) + rootProtocolStateID := rootProtocolState.ID() + util.RunWithFullProtocolState(suite.T(), rootSnapshot, func(db storage.DB, state *bprotocol.ParticipantState) { + rootBlock, err := rootSnapshot.Head() + suite.Require().NoError(err) + // create a new block with root block as parent + newBlock := unittest.BlockWithParentAndPayload( + rootBlock, + unittest.PayloadFixture(unittest.WithProtocolStateID(rootProtocolStateID)), ) + ctx := context.Background() + // add new block to the chain state + err = state.Extend(ctx, unittest.ProposalFromBlock(newBlock)) + suite.Require().NoError(err) + + // since block was not yet finalized AtHeight must return an invalid snapshot + suite.state.On("AtHeight", newBlock.Height).Return(invalid.NewSnapshot(realstate.ErrUnknownSnapshotReference)) + + params := suite.defaultBackendParams() + params.MaxHeightRange = TEST_MAX_HEIGHT + + backend, err := New(params) + suite.Require().NoError(err) + + // query the handler for the snapshot for non finalized block + bytes, err := backend.GetProtocolStateSnapshotByHeight(context.Background(), newBlock.Height) - // the handler should return a snapshot history limit error - _, err := backend.GetLatestProtocolStateSnapshot(context.Background()) - suite.Require().ErrorIs(err, SnapshotHistoryLimitErr) + suite.Require().Nil(bytes) + suite.Require().Error(err) + suite.Require().Equal(status.Errorf(codes.NotFound, "failed to find snapshot: %v", + realstate.ErrUnknownSnapshotReference).Error(), + err.Error()) }) } func (suite *Suite) TestGetLatestSealedBlockHeader() { // setup the mocks suite.state.On("Sealed").Return(suite.snapshot, nil).Maybe() + suite.state.On("Sealed").Return(suite.snapshot, nil) - block := unittest.BlockHeaderFixture() - suite.snapshot.On("Head").Return(block, nil).Once() + params := suite.defaultBackendParams() - suite.state.On("Sealed").Return(suite.snapshot, nil) - suite.snapshot.On("Head").Return(block, nil).Once() + backend, err := New(params) + suite.Require().NoError(err) - backend := New( - suite.state, - nil, - nil, - nil, - nil, - nil, - nil, - nil, - nil, - suite.chainID, - metrics.NewNoopCollector(), - nil, - false, - DefaultMaxHeightRange, - nil, - nil, - suite.log, - DefaultSnapshotHistoryLimit, - nil, - ) + suite.Run("GetLatestSealedBlockHeader - happy path", func() { + block := unittest.BlockHeaderFixture() + suite.snapshot.On("Head").Return(block, nil).Once() - // query the handler for the latest sealed block - header, status, err := backend.GetLatestBlockHeader(context.Background(), true) - suite.checkResponse(header, err) + // query the handler for the latest sealed block + header, stat, err := backend.GetLatestBlockHeader(context.Background(), true) + suite.Require().NoError(err) + suite.Require().NotNil(header) - // make sure we got the latest sealed block - suite.Require().Equal(block.ID(), header.ID()) - suite.Require().Equal(block.Height, header.Height) - suite.Require().Equal(block.ParentID, header.ParentID) - suite.Require().Equal(status, flow.BlockStatusSealed) + // make sure we got the latest sealed block + suite.Require().Equal(block.ID(), header.ID()) + suite.Require().Equal(block.Height, header.Height) + suite.Require().Equal(block.ParentID, header.ParentID) + suite.Require().Equal(stat, flow.BlockStatusSealed) - suite.assertAllExpectations() + suite.assertAllExpectations() + }) + + // tests that signaler context received error when node state is inconsistent + suite.Run("GetLatestSealedBlockHeader - fails with inconsistent node's state", func() { + err := fmt.Errorf("inconsistent node's state") + suite.snapshot.On("Head").Return(nil, err) + + // mock signaler context expect an error + signCtxErr := irrecoverable.NewExceptionf("failed to lookup sealed header: %w", err) + signalerCtx := irrecoverable.WithSignalerContext(context.Background(), + irrecoverable.NewMockSignalerContextExpectError(suite.T(), context.Background(), signCtxErr)) + + actualHeader, actualStatus, err := backend.GetLatestBlockHeader(signalerCtx, true) + suite.Require().Error(err) + suite.Require().Nil(actualHeader) + suite.Require().Equal(flow.BlockStatusUnknown, actualStatus) + }) } func (suite *Suite) TestGetTransaction() { suite.state.On("Sealed").Return(suite.snapshot, nil).Maybe() transaction := unittest.TransactionFixture() - expected := transaction.TransactionBody suite.transactions. On("ByID", transaction.ID()). - Return(&expected, nil). + Return(&transaction, nil). Once() - backend := New( - suite.state, - nil, - nil, - nil, - nil, - nil, - suite.transactions, - nil, - nil, - suite.chainID, - metrics.NewNoopCollector(), - nil, - false, - DefaultMaxHeightRange, - nil, - nil, - suite.log, - DefaultSnapshotHistoryLimit, - nil, - ) + params := suite.defaultBackendParams() + + backend, err := New(params) + suite.Require().NoError(err) actual, err := backend.GetTransaction(context.Background(), transaction.ID()) - suite.checkResponse(actual, err) + suite.Require().NoError(err) + suite.Require().NotNil(actual) - suite.Require().Equal(expected, *actual) + suite.Require().Equal(transaction, *actual) suite.assertAllExpectations() } @@ -582,36 +895,20 @@ func (suite *Suite) TestGetCollection() { suite.collections. On("LightByID", expected.ID()). - Return(&expected, nil). + Return(expected, nil). Once() - backend := New( - suite.state, - nil, - nil, - nil, - nil, - suite.collections, - suite.transactions, - nil, - nil, - suite.chainID, - metrics.NewNoopCollector(), - nil, - false, - DefaultMaxHeightRange, - nil, - nil, - suite.log, - DefaultSnapshotHistoryLimit, - nil, - ) + params := suite.defaultBackendParams() + + backend, err := New(params) + suite.Require().NoError(err) actual, err := backend.GetCollectionByID(context.Background(), expected.ID()) suite.transactions.AssertExpectations(suite.T()) - suite.checkResponse(actual, err) + suite.Require().NoError(err) + suite.Require().NotNil(actual) - suite.Equal(expected, *actual) + suite.Equal(expected, actual) suite.assertAllExpectations() } @@ -624,21 +921,15 @@ func (suite *Suite) TestGetTransactionResultByIndex() { blockId := block.ID() index := uint32(0) - suite.snapshot.On("Head").Return(block.Header, nil) - // block storage returns the corresponding block suite.blocks. On("ByID", blockId). - Return(&block, nil) + Return(block, nil) - _, fixedENIDs := suite.setupReceipts(&block) + _, fixedENIDs := suite.setupReceipts(block) suite.state.On("Final").Return(suite.snapshot, nil).Maybe() suite.snapshot.On("Identities", mock.Anything).Return(fixedENIDs, nil) - // create a mock connection factory - connFactory := new(backendmock.ConnectionFactory) - connFactory.On("GetExecutionAPIClient", mock.Anything).Return(suite.execClient, &mockCloser{}, nil) - exeEventReq := &execproto.GetTransactionByIndexRequest{ BlockId: blockId[:], Index: index, @@ -648,124 +939,141 @@ func (suite *Suite) TestGetTransactionResultByIndex() { Events: nil, } - backend := New( - suite.state, - nil, - nil, - suite.blocks, - suite.headers, - suite.collections, - suite.transactions, - suite.receipts, - suite.results, - suite.chainID, - metrics.NewNoopCollector(), - connFactory, // the connection factory should be used to get the execution node client - false, - DefaultMaxHeightRange, - nil, - flow.IdentifierList(fixedENIDs.NodeIDs()).Strings(), - suite.log, - DefaultSnapshotHistoryLimit, - nil, - ) - suite.execClient. - On("GetTransactionResultByIndex", ctx, exeEventReq). - Return(exeEventResp, nil). - Once() + suite.fixedExecutionNodeIDs = fixedENIDs.NodeIDs() - result, err := backend.GetTransactionResultByIndex(ctx, blockId, index) - suite.checkResponse(result, err) - suite.Assert().Equal(result.BlockHeight, block.Header.Height) + params := suite.defaultBackendParams() + // the connection factory should be used to get the execution node client + params.ConnFactory = suite.setupConnectionFactory() - suite.assertAllExpectations() + backend, err := New(params) + suite.Require().NoError(err) + + suite.execClient. + On("GetTransactionResultByIndex", mock.Anything, exeEventReq). + Return(exeEventResp, nil) + + suite.Run("TestGetTransactionResultByIndex - happy path", func() { + suite.snapshot.On("Head").Return(block.ToHeader(), nil).Once() + result, err := backend.GetTransactionResultByIndex(ctx, blockId, index, entitiesproto.EventEncodingVersion_JSON_CDC_V0) + suite.Require().NoError(err) + suite.Require().NotNil(result) + suite.Assert().Equal(result.BlockHeight, block.Height) + + suite.assertAllExpectations() + }) + + // tests that signaler context received error when node state is inconsistent + suite.Run("TestGetTransactionResultByIndex - fails with inconsistent node's state", func() { + err := fmt.Errorf("inconsistent node's state") + suite.snapshot.On("Head").Return(nil, err).Once() + + // mock signaler context expect an error + signCtxErr := fmt.Errorf("failed to derive transaction status: %w", irrecoverable.NewExceptionf("failed to lookup sealed header: %w", err)) + signalerCtx := irrecoverable.WithSignalerContext(context.Background(), + irrecoverable.NewMockSignalerContextExpectError(suite.T(), context.Background(), signCtxErr)) + + actual, err := backend.GetTransactionResultByIndex(signalerCtx, blockId, index, entitiesproto.EventEncodingVersion_JSON_CDC_V0) + suite.Require().Error(err) + suite.Require().Nil(actual) + }) } func (suite *Suite) TestGetTransactionResultsByBlockID() { - head := unittest.BlockHeaderFixture() suite.state.On("Sealed").Return(suite.snapshot, nil).Maybe() - suite.snapshot.On("Head").Return(head, nil).Maybe() ctx := context.Background() - block := unittest.BlockFixture() + + sporkRootBlockHeight := suite.state.Params().SporkRootBlockHeight() + block := unittest.BlockFixture( + unittest.Block.WithHeight(sporkRootBlockHeight + 1), + ) blockId := block.ID() // block storage returns the corresponding block suite.blocks. On("ByID", blockId). - Return(&block, nil) + Return(block, nil) - _, fixedENIDs := suite.setupReceipts(&block) + _, fixedENIDs := suite.setupReceipts(block) suite.state.On("Final").Return(suite.snapshot, nil).Maybe() suite.snapshot.On("Identities", mock.Anything).Return(fixedENIDs, nil) - // create a mock connection factory - connFactory := new(backendmock.ConnectionFactory) - connFactory.On("GetExecutionAPIClient", mock.Anything).Return(suite.execClient, &mockCloser{}, nil) - exeEventReq := &execproto.GetTransactionsByBlockIDRequest{ BlockId: blockId[:], } exeEventResp := &execproto.GetTransactionResultsResponse{ - TransactionResults: []*execproto.GetTransactionResultResponse{{}}, + TransactionResults: []*execproto.GetTransactionResultResponse{{}}, + EventEncodingVersion: entitiesproto.EventEncodingVersion_CCF_V0, } - backend := New( - suite.state, - nil, - nil, - suite.blocks, - suite.headers, - suite.collections, - suite.transactions, - suite.receipts, - suite.results, - suite.chainID, - metrics.NewNoopCollector(), - connFactory, // the connection factory should be used to get the execution node client - false, - DefaultMaxHeightRange, - nil, - flow.IdentifierList(fixedENIDs.NodeIDs()).Strings(), - suite.log, - DefaultSnapshotHistoryLimit, - nil, - ) + suite.fixedExecutionNodeIDs = fixedENIDs.NodeIDs() + + params := suite.defaultBackendParams() + // the connection factory should be used to get the execution node client + params.ConnFactory = suite.setupConnectionFactory() + params.ScheduledCallbacksEnabled = true + + backend, err := New(params) + suite.Require().NoError(err) + suite.execClient. - On("GetTransactionResultsByBlockID", ctx, exeEventReq). - Return(exeEventResp, nil). - Once() + On("GetTransactionResultsByBlockID", mock.Anything, exeEventReq). + Return(exeEventResp, nil) - result, err := backend.GetTransactionResultsByBlockID(ctx, blockId) - suite.checkResponse(result, err) + suite.Run("GetTransactionResultsByBlockID - happy path", func() { + suite.snapshot.On("Head").Return(block.ToHeader(), nil).Once() - suite.assertAllExpectations() + result, err := backend.GetTransactionResultsByBlockID(ctx, blockId, entitiesproto.EventEncodingVersion_JSON_CDC_V0) + suite.Require().NoError(err) + suite.Require().NotNil(result) + + suite.assertAllExpectations() + }) + + // tests that signaler context received error when node state is inconsistent + suite.Run("GetTransactionResultsByBlockID - fails with inconsistent node's state", func() { + err := fmt.Errorf("inconsistent node's state") + suite.snapshot.On("Head").Return(nil, err).Once() + + // mock signaler context expect an error + signCtxErr := fmt.Errorf("failed to derive transaction status: %w", irrecoverable.NewExceptionf("failed to lookup sealed header: %w", err)) + signalerCtx := irrecoverable.WithSignalerContext(context.Background(), + irrecoverable.NewMockSignalerContextExpectError(suite.T(), context.Background(), signCtxErr)) + + actual, err := backend.GetTransactionResultsByBlockID(signalerCtx, blockId, entitiesproto.EventEncodingVersion_JSON_CDC_V0) + suite.Require().Error(err) + suite.Require().Nil(actual) + }) } // TestTransactionStatusTransition tests that the status of transaction changes from Finalized to Sealed -// when the protocol state is updated +// when the protocol State is updated func (suite *Suite) TestTransactionStatusTransition() { suite.state.On("Sealed").Return(suite.snapshot, nil).Maybe() ctx := context.Background() collection := unittest.CollectionFixture(1) transactionBody := collection.Transactions[0] - block := unittest.BlockFixture() - block.Header.Height = 2 - headBlock := unittest.BlockFixture() - headBlock.Header.Height = block.Header.Height - 1 // head is behind the current block - block.SetPayload( - unittest.PayloadFixture( + block := unittest.BlockFixture( + unittest.Block.WithHeight(2), + unittest.Block.WithPayload(unittest.PayloadFixture( unittest.WithGuarantees( - unittest.CollectionGuaranteesWithCollectionIDFixture([]*flow.Collection{&collection})...))) + unittest.CollectionGuaranteesWithCollectionIDFixture([]*flow.Collection{&collection})...), + )), + ) + headBlock := unittest.BlockFixture( + unittest.Block.WithHeight(block.Height - 1), // head is behind the current block + ) suite.snapshot. On("Head"). - Return(headBlock.Header, nil) + Return(func() *flow.Header { + return headBlock.ToHeader() + }, nil) light := collection.Light() - suite.collections.On("LightByID", light.ID()).Return(&light, nil) + suite.collections.On("LightByID", collection.ID()).Return(light, nil) // transaction storage returns the corresponding transaction suite.transactions. @@ -775,24 +1083,19 @@ func (suite *Suite) TestTransactionStatusTransition() { // collection storage returns the corresponding collection suite.collections. On("LightByTransactionID", transactionBody.ID()). - Return(&light, nil) + Return(light, nil) // block storage returns the corresponding block suite.blocks. On("ByCollectionID", collection.ID()). - Return(&block, nil) + Return(block, nil) txID := transactionBody.ID() blockID := block.ID() - _, fixedENIDs := suite.setupReceipts(&block) + _, fixedENIDs := suite.setupReceipts(block) suite.state.On("Final").Return(suite.snapshot, nil).Maybe() suite.snapshot.On("Identities", mock.Anything).Return(fixedENIDs, nil) - // create a mock connection factory - connFactory := new(backendmock.ConnectionFactory) - connFactory.On("GetExecutionAPIClient", mock.Anything).Return(suite.execClient, &mockCloser{}, nil) - connFactory.On("InvalidateExecutionAPIClient", mock.Anything) - exeEventReq := &execproto.GetTransactionResultRequest{ BlockId: blockID[:], TransactionId: txID[:], @@ -802,39 +1105,27 @@ func (suite *Suite) TestTransactionStatusTransition() { Events: nil, } - backend := New( - suite.state, - nil, - nil, - suite.blocks, - suite.headers, - suite.collections, - suite.transactions, - suite.receipts, - suite.results, - suite.chainID, - metrics.NewNoopCollector(), - connFactory, // the connection factory should be used to get the execution node client - false, - DefaultMaxHeightRange, - nil, - flow.IdentifierList(fixedENIDs.NodeIDs()).Strings(), - suite.log, - DefaultSnapshotHistoryLimit, - nil, - ) + suite.fixedExecutionNodeIDs = fixedENIDs.NodeIDs() + + params := suite.defaultBackendParams() + // the connection factory should be used to get the execution node client + params.ConnFactory = suite.setupConnectionFactory() + + backend, err := New(params) + suite.Require().NoError(err) // Successfully return empty event list suite.execClient. On("GetTransactionResult", ctx, exeEventReq). Return(exeEventResp, status.Errorf(codes.NotFound, "not found")). - Once() + Times(len(fixedENIDs)) // should call each EN once // first call - when block under test is greater height than the sealed head, but execution node does not know about Tx - result, err := backend.GetTransactionResult(ctx, txID, flow.ZeroID, flow.ZeroID) - suite.checkResponse(result, err) + result, err := backend.GetTransactionResult(ctx, txID, flow.ZeroID, flow.ZeroID, entitiesproto.EventEncodingVersion_JSON_CDC_V0) + suite.Require().NoError(err) + suite.Require().NotNil(result) - // status should be finalized since the sealed blocks is smaller in height + // status should be finalized since the sealed Blocks is smaller in height suite.Assert().Equal(flow.TransactionStatusFinalized, result.Status) // block ID should be included in the response @@ -846,29 +1137,32 @@ func (suite *Suite) TestTransactionStatusTransition() { Return(exeEventResp, nil) // second call - when block under test's height is greater height than the sealed head - result, err = backend.GetTransactionResult(ctx, txID, flow.ZeroID, flow.ZeroID) - suite.checkResponse(result, err) + result, err = backend.GetTransactionResult(ctx, txID, flow.ZeroID, flow.ZeroID, entitiesproto.EventEncodingVersion_JSON_CDC_V0) + suite.Require().NoError(err) + suite.Require().NotNil(result) // status should be executed since no `NotFound` error in the `GetTransactionResult` call suite.Assert().Equal(flow.TransactionStatusExecuted, result.Status) // now let the head block be finalized - headBlock.Header.Height = block.Header.Height + 1 + headBlock.Height = block.Height + 1 // third call - when block under test's height is less than sealed head's height - result, err = backend.GetTransactionResult(ctx, txID, flow.ZeroID, flow.ZeroID) - suite.checkResponse(result, err) + result, err = backend.GetTransactionResult(ctx, txID, flow.ZeroID, flow.ZeroID, entitiesproto.EventEncodingVersion_JSON_CDC_V0) + suite.Require().NoError(err) + suite.Require().NotNil(result) - // status should be sealed since the sealed blocks is greater in height + // status should be sealed since the sealed Blocks is greater in height suite.Assert().Equal(flow.TransactionStatusSealed, result.Status) // now go far into the future - headBlock.Header.Height = block.Header.Height + flow.DefaultTransactionExpiry + 1 + headBlock.Height = block.Height + flow.DefaultTransactionExpiry + 1 // fourth call - when block under test's height so much less than the head's height that it's considered expired, // but since there is a execution result, means it should retain it's sealed status - result, err = backend.GetTransactionResult(ctx, txID, flow.ZeroID, flow.ZeroID) - suite.checkResponse(result, err) + result, err = backend.GetTransactionResult(ctx, txID, flow.ZeroID, flow.ZeroID, entitiesproto.EventEncodingVersion_JSON_CDC_V0) + suite.Require().NoError(err) + suite.Require().NotNil(result) // status should be expired since suite.Assert().Equal(flow.TransactionStatusSealed, result.Status) @@ -877,7 +1171,7 @@ func (suite *Suite) TestTransactionStatusTransition() { } // TestTransactionExpiredStatusTransition tests that the status -// of transaction changes from Pending to Expired when enough blocks pass +// of transaction changes from Pending to Expired when enough Blocks pass func (suite *Suite) TestTransactionExpiredStatusTransition() { suite.state.On("Sealed").Return(suite.snapshot, nil).Maybe() suite.state.On("Final").Return(suite.snapshot, nil).Maybe() @@ -885,26 +1179,26 @@ func (suite *Suite) TestTransactionExpiredStatusTransition() { ctx := context.Background() collection := unittest.CollectionFixture(1) transactionBody := collection.Transactions[0] - block := unittest.BlockFixture() - block.Header.Height = 2 - transactionBody.SetReferenceBlockID(block.ID()) + block := unittest.BlockFixture( + unittest.Block.WithHeight(2), + ) + transactionBody.ReferenceBlockID = block.ID() - headBlock := unittest.BlockFixture() - headBlock.Header.Height = block.Header.Height - 1 // head is behind the current block + headBlock := unittest.BlockFixture( + unittest.Block.WithHeight(block.Height - 1), // head is behind the current block + ) // set up GetLastFullBlockHeight mock - fullHeight := headBlock.Header.Height - suite.blocks.On("GetLastFullBlockHeight").Return( - func() uint64 { return fullHeight }, - func() error { return nil }, - ) + fullHeight := headBlock.Height suite.snapshot. On("Head"). - Return(headBlock.Header, nil) + Return(func() *flow.Header { + return headBlock.ToHeader() + }, nil) snapshotAtBlock := new(protocol.Snapshot) - snapshotAtBlock.On("Head").Return(block.Header, nil) + snapshotAtBlock.On("Head").Return(block.ToHeader(), nil) suite.state. On("AtBlockID", block.ID()). @@ -922,75 +1216,64 @@ func (suite *Suite) TestTransactionExpiredStatusTransition() { txID := transactionBody.ID() - backend := New( - suite.state, - nil, - nil, - suite.blocks, - suite.headers, - suite.collections, - suite.transactions, - nil, - nil, - suite.chainID, - metrics.NewNoopCollector(), - nil, - false, - DefaultMaxHeightRange, - nil, - nil, - suite.log, - DefaultSnapshotHistoryLimit, - nil, - ) + params := suite.defaultBackendParams() + + backend, err := New(params) + suite.Require().NoError(err) // should return pending status when we have not observed an expiry block suite.Run("pending", func() { // referenced block isn't known yet, so should return pending status - result, err := backend.GetTransactionResult(ctx, txID, flow.ZeroID, flow.ZeroID) - suite.checkResponse(result, err) + result, err := backend.GetTransactionResult(ctx, txID, flow.ZeroID, flow.ZeroID, entitiesproto.EventEncodingVersion_JSON_CDC_V0) + suite.Require().NoError(err) + suite.Require().NotNil(result) suite.Assert().Equal(flow.TransactionStatusPending, result.Status) }) // should return pending status when we have observed an expiry block but - // have not observed all intermediary collections + // have not observed all intermediary Collections suite.Run("expiry un-confirmed", func() { - suite.Run("ONLY finalized expiry block", func() { // we have finalized an expiry block - headBlock.Header.Height = block.Header.Height + flow.DefaultTransactionExpiry + 1 - // we have NOT observed all intermediary collections - fullHeight = block.Header.Height + flow.DefaultTransactionExpiry/2 - - result, err := backend.GetTransactionResult(ctx, txID, flow.ZeroID, flow.ZeroID) - suite.checkResponse(result, err) + headBlock.Height = block.Height + flow.DefaultTransactionExpiry + 1 + // we have NOT observed all intermediary Collections + fullHeight = block.Height + flow.DefaultTransactionExpiry/2 + err := suite.lastFullBlockHeight.Set(fullHeight) + suite.Require().NoError(err) + + result, err := backend.GetTransactionResult(ctx, txID, flow.ZeroID, flow.ZeroID, entitiesproto.EventEncodingVersion_JSON_CDC_V0) + suite.Require().NoError(err) + suite.Require().NotNil(result) suite.Assert().Equal(flow.TransactionStatusPending, result.Status) }) - suite.Run("ONLY observed intermediary collections", func() { + + // we have observed all intermediary Collections + fullHeight = block.Height + flow.DefaultTransactionExpiry + 1 + err = suite.lastFullBlockHeight.Set(fullHeight) + suite.Require().NoError(err) + + suite.Run("ONLY observed intermediary Collections", func() { // we have NOT finalized an expiry block - headBlock.Header.Height = block.Header.Height + flow.DefaultTransactionExpiry/2 - // we have observed all intermediary collections - fullHeight = block.Header.Height + flow.DefaultTransactionExpiry + 1 + headBlock.Height = block.Height + flow.DefaultTransactionExpiry/2 - result, err := backend.GetTransactionResult(ctx, txID, flow.ZeroID, flow.ZeroID) - suite.checkResponse(result, err) + result, err := backend.GetTransactionResult(ctx, txID, flow.ZeroID, flow.ZeroID, entitiesproto.EventEncodingVersion_JSON_CDC_V0) + suite.Require().NoError(err) + suite.Require().NotNil(result) suite.Assert().Equal(flow.TransactionStatusPending, result.Status) }) - }) + // should return expired status only when we have observed an expiry block + // and have observed all intermediary Collections + suite.Run("expired", func() { + // we have finalized an expiry block + headBlock.Height = block.Height + flow.DefaultTransactionExpiry + 1 - // should return expired status only when we have observed an expiry block - // and have observed all intermediary collections - suite.Run("expired", func() { - // we have finalized an expiry block - headBlock.Header.Height = block.Header.Height + flow.DefaultTransactionExpiry + 1 - // we have observed all intermediary collections - fullHeight = block.Header.Height + flow.DefaultTransactionExpiry + 1 - - result, err := backend.GetTransactionResult(ctx, txID, flow.ZeroID, flow.ZeroID) - suite.checkResponse(result, err) - suite.Assert().Equal(flow.TransactionStatusExpired, result.Status) + result, err := backend.GetTransactionResult(ctx, txID, flow.ZeroID, flow.ZeroID, entitiesproto.EventEncodingVersion_JSON_CDC_V0) + suite.Require().NoError(err) + suite.Require().NotNil(result) + suite.Assert().Equal(flow.TransactionStatusExpired, result.Status) + }) }) suite.assertAllExpectations() @@ -998,36 +1281,38 @@ func (suite *Suite) TestTransactionExpiredStatusTransition() { // TestTransactionPendingToFinalizedStatusTransition tests that the status of transaction changes from Finalized to Expired func (suite *Suite) TestTransactionPendingToFinalizedStatusTransition() { - ctx := context.Background() collection := unittest.CollectionFixture(1) transactionBody := collection.Transactions[0] // block which will eventually contain the transaction - block := unittest.BlockFixture() - block.SetPayload( - unittest.PayloadFixture( + block := unittest.BlockFixture( + unittest.Block.WithPayload(unittest.PayloadFixture( unittest.WithGuarantees( - unittest.CollectionGuaranteesWithCollectionIDFixture([]*flow.Collection{&collection})...))) + unittest.CollectionGuaranteesWithCollectionIDFixture([]*flow.Collection{&collection})...), + )), + ) blockID := block.ID() // reference block to which the transaction points to - refBlock := unittest.BlockFixture() + refBlock := unittest.BlockFixture( + unittest.Block.WithHeight(2), + ) refBlockID := refBlock.ID() - refBlock.Header.Height = 2 - transactionBody.SetReferenceBlockID(refBlockID) + transactionBody.ReferenceBlockID = refBlockID txID := transactionBody.ID() - headBlock := unittest.BlockFixture() - headBlock.Header.Height = refBlock.Header.Height - 1 // head is behind the current refBlock + headBlock := unittest.BlockFixture( + unittest.Block.WithHeight(refBlock.Height - 1), // head is behind the current refBlock + ) suite.snapshot. On("Head"). - Return(headBlock.Header, nil) + Return(headBlock.ToHeader(), nil) snapshotAtBlock := new(protocol.Snapshot) - snapshotAtBlock.On("Head").Return(refBlock.Header, nil) + snapshotAtBlock.On("Head").Return(refBlock.ToHeader(), nil) - _, enIDs := suite.setupReceipts(&block) + _, enIDs := suite.setupReceipts(block) suite.state.On("Final").Return(suite.snapshot, nil).Maybe() suite.snapshot.On("Identities", mock.Anything).Return(enIDs, nil) @@ -1043,7 +1328,7 @@ func (suite *Suite) TestTransactionPendingToFinalizedStatusTransition() { On("ByID", txID). Return(transactionBody, nil) - currentState := flow.TransactionStatusPending // marker for the current state + currentState := flow.TransactionStatusPending // marker for the current State // collection storage returns a not found error if tx is pending, else it returns the collection light reference suite.collections. On("LightByTransactionID", txID). @@ -1052,7 +1337,7 @@ func (suite *Suite) TestTransactionPendingToFinalizedStatusTransition() { return nil } collLight := collection.Light() - return &collLight + return collLight }, func(txID flow.Identifier) error { if currentState == flow.TransactionStatusPending { @@ -1062,14 +1347,14 @@ func (suite *Suite) TestTransactionPendingToFinalizedStatusTransition() { }) light := collection.Light() - suite.collections.On("LightByID", mock.Anything).Return(&light, nil) + suite.collections.On("LightByID", mock.Anything).Return(light, nil) // refBlock storage returns the corresponding refBlock suite.blocks. On("ByCollectionID", collection.ID()). - Return(&block, nil) + Return(block, nil) - receipts, _ := suite.setupReceipts(&block) + receipts, _ := suite.setupReceipts(block) exeEventReq := &execproto.GetTransactionResultRequest{ BlockId: blockID[:], @@ -1084,40 +1369,25 @@ func (suite *Suite) TestTransactionPendingToFinalizedStatusTransition() { suite.execClient. On("GetTransactionResult", ctx, exeEventReq). Return(exeEventResp, status.Errorf(codes.NotFound, "not found")). - Once() + Times(len(enIDs)) // should call each EN once // create a mock connection factory connFactory := suite.setupConnectionFactory() - backend := New( - suite.state, - nil, - nil, - suite.blocks, - suite.headers, - suite.collections, - suite.transactions, - suite.receipts, - suite.results, - suite.chainID, - metrics.NewNoopCollector(), - connFactory, // the connection factory should be used to get the execution node client - false, - 100, - nil, - flow.IdentifierList(enIDs.NodeIDs()).Strings(), - suite.log, - DefaultSnapshotHistoryLimit, - nil, - ) + params := suite.defaultBackendParams() + params.ConnFactory = connFactory + params.MaxHeightRange = TEST_MAX_HEIGHT + suite.preferredExecutionNodeIDs = flow.IdentifierList{receipts[0].ExecutorID} - preferredENIdentifiers = flow.IdentifierList{receipts[0].ExecutorID} + backend, err := New(params) + suite.Require().NoError(err) // should return pending status when we have not observed collection for the transaction suite.Run("pending", func() { currentState = flow.TransactionStatusPending - result, err := backend.GetTransactionResult(ctx, txID, flow.ZeroID, flow.ZeroID) - suite.checkResponse(result, err) + result, err := backend.GetTransactionResult(ctx, txID, flow.ZeroID, flow.ZeroID, entitiesproto.EventEncodingVersion_JSON_CDC_V0) + suite.Require().NoError(err) + suite.Require().NotNil(result) suite.Assert().Equal(flow.TransactionStatusPending, result.Status) // assert that no call to an execution node is made suite.execClient.AssertNotCalled(suite.T(), "GetTransactionResult", mock.Anything, mock.Anything) @@ -1127,8 +1397,9 @@ func (suite *Suite) TestTransactionPendingToFinalizedStatusTransition() { // preceding sealed refBlock) suite.Run("finalized", func() { currentState = flow.TransactionStatusFinalized - result, err := backend.GetTransactionResult(ctx, txID, flow.ZeroID, flow.ZeroID) - suite.checkResponse(result, err) + result, err := backend.GetTransactionResult(ctx, txID, flow.ZeroID, flow.ZeroID, entitiesproto.EventEncodingVersion_JSON_CDC_V0) + suite.Require().NoError(err) + suite.Require().NotNil(result) suite.Assert().Equal(flow.TransactionStatusFinalized, result.Status) }) @@ -1138,7 +1409,6 @@ func (suite *Suite) TestTransactionPendingToFinalizedStatusTransition() { // TestTransactionResultUnknown tests that the status of transaction is reported as unknown when it is not found in the // local storage func (suite *Suite) TestTransactionResultUnknown() { - ctx := context.Background() txID := unittest.IdentifierFixture() @@ -1147,31 +1417,15 @@ func (suite *Suite) TestTransactionResultUnknown() { On("ByID", txID). Return(nil, storage.ErrNotFound) - backend := New( - suite.state, - nil, - nil, - nil, - nil, - nil, - suite.transactions, - nil, - nil, - suite.chainID, - metrics.NewNoopCollector(), - nil, - false, - DefaultMaxHeightRange, - nil, - nil, - suite.log, - DefaultSnapshotHistoryLimit, - nil, - ) + params := suite.defaultBackendParams() + + backend, err := New(params) + suite.Require().NoError(err) // first call - when block under test is greater height than the sealed head, but execution node does not know about Tx - result, err := backend.GetTransactionResult(ctx, txID, flow.ZeroID, flow.ZeroID) - suite.checkResponse(result, err) + result, err := backend.GetTransactionResult(ctx, txID, flow.ZeroID, flow.ZeroID, entitiesproto.EventEncodingVersion_JSON_CDC_V0) + suite.Require().NoError(err) + suite.Require().NotNil(result) // status should be reported as unknown suite.Assert().Equal(flow.TransactionStatusUnknown, result.Status) @@ -1181,217 +1435,51 @@ func (suite *Suite) TestGetLatestFinalizedBlock() { suite.state.On("Sealed").Return(suite.snapshot, nil).Maybe() suite.state.On("Final").Return(suite.snapshot, nil).Maybe() - // setup the mocks - expected := unittest.BlockFixture() - header := expected.Header - - suite.snapshot. - On("Head"). - Return(header, nil).Once() - - headerClone := *header - headerClone.Height = 0 - - suite.snapshot. - On("Head"). - Return(&headerClone, nil). - Once() - - suite.blocks. - On("ByHeight", header.Height). - Return(&expected, nil) - - backend := New( - suite.state, - nil, - nil, - suite.blocks, - nil, - nil, - nil, - nil, - nil, - suite.chainID, - metrics.NewNoopCollector(), - nil, - false, - DefaultMaxHeightRange, - nil, - nil, - suite.log, - DefaultSnapshotHistoryLimit, - nil, - ) - - // query the handler for the latest finalized header - actual, status, err := backend.GetLatestBlock(context.Background(), false) - suite.checkResponse(actual, err) - - // make sure we got the latest header - suite.Require().Equal(expected, *actual) - suite.Assert().Equal(status, flow.BlockStatusFinalized) - - suite.assertAllExpectations() -} - -type mockCloser struct{} - -func (mc *mockCloser) Close() error { return nil } - -func (suite *Suite) TestGetEventsForBlockIDs() { - suite.state.On("Sealed").Return(suite.snapshot, nil).Maybe() - suite.state.On("Final").Return(suite.snapshot, nil).Maybe() - - events := getEvents(10) - validExecutorIdentities := flow.IdentityList{} - - setupStorage := func(n int) []*flow.Header { - headers := make([]*flow.Header, n) - ids := unittest.IdentityListFixture(2, unittest.WithRole(flow.RoleExecution)) - - for i := 0; i < n; i++ { - b := unittest.BlockFixture() - suite.headers. - On("ByBlockID", b.ID()). - Return(b.Header, nil).Once() - - headers[i] = b.Header - - receipt1 := unittest.ReceiptForBlockFixture(&b) - receipt1.ExecutorID = ids[0].NodeID - receipt2 := unittest.ReceiptForBlockFixture(&b) - receipt2.ExecutorID = ids[1].NodeID - receipt1.ExecutionResult = receipt2.ExecutionResult - suite.receipts. - On("ByBlockID", b.ID()). - Return(flow.ExecutionReceiptList{receipt1, receipt2}, nil).Once() - validExecutorIdentities = append(validExecutorIdentities, ids...) - } + params := suite.defaultBackendParams() - return headers - } - blockHeaders := setupStorage(5) - - suite.snapshot.On("Identities", mock.Anything).Return(validExecutorIdentities, nil) - validENIDs := flow.IdentifierList(validExecutorIdentities.NodeIDs()) - - // create a mock connection factory - connFactory := new(backendmock.ConnectionFactory) - connFactory.On("GetExecutionAPIClient", mock.Anything).Return(suite.execClient, &mockCloser{}, nil) - - // create the expected results from execution node and access node - exeResults := make([]*execproto.GetEventsForBlockIDsResponse_Result, len(blockHeaders)) - - for i := 0; i < len(blockHeaders); i++ { - exeResults[i] = &execproto.GetEventsForBlockIDsResponse_Result{ - BlockId: convert.IdentifierToMessage(blockHeaders[i].ID()), - BlockHeight: blockHeaders[i].Height, - Events: convert.EventsToMessages(events), - } - } - - expected := make([]flow.BlockEvents, len(blockHeaders)) - for i := 0; i < len(blockHeaders); i++ { - expected[i] = flow.BlockEvents{ - BlockID: blockHeaders[i].ID(), - BlockHeight: blockHeaders[i].Height, - BlockTimestamp: blockHeaders[i].Timestamp, - Events: events, - } - } - - // create the execution node response - exeResp := &execproto.GetEventsForBlockIDsResponse{ - Results: exeResults, - } - - ctx := context.Background() - - blockIDs := make([]flow.Identifier, len(blockHeaders)) - for i, header := range blockHeaders { - blockIDs[i] = header.ID() - } - exeReq := &execproto.GetEventsForBlockIDsRequest{ - BlockIds: convert.IdentifiersToMessages(blockIDs), - Type: string(flow.EventAccountCreated), - } - - // create receipt mocks that always returns empty - receipts := new(storagemock.ExecutionReceipts) - receipts. - On("ByBlockID", mock.Anything). - Return(flow.ExecutionReceiptList{}, nil) + backend, err := New(params) + suite.Require().NoError(err) - // expect two calls to the executor api client (one for each of the following 2 test cases) - suite.execClient. - On("GetEventsForBlockIDs", ctx, exeReq). - Return(exeResp, nil). - Once() + suite.Run("GetLatestFinalizedBlock - happy path", func() { + // setup the mocks + expected := unittest.BlockFixture() + header := expected.ToHeader() - suite.Run("with an execution node chosen using block ID form the list of Fixed ENs", func() { + suite.snapshot. + On("Head"). + Return(header, nil).Once() - // create the handler - backend := New( - suite.state, - nil, - nil, - nil, - suite.headers, - nil, - nil, - suite.receipts, - suite.results, - suite.chainID, - metrics.NewNoopCollector(), - connFactory, // the connection factory should be used to get the execution node client - false, - DefaultMaxHeightRange, - nil, - validENIDs.Strings(), // set the fixed EN Identifiers to the generated execution IDs - suite.log, - DefaultSnapshotHistoryLimit, - nil, - ) + suite.blocks. + On("ByHeight", header.Height). + Return(expected, nil) - // execute request - actual, err := backend.GetEventsForBlockIDs(ctx, string(flow.EventAccountCreated), blockIDs) - suite.checkResponse(actual, err) + // query the handler for the latest finalized header + actual, stat, err := backend.GetLatestBlock(context.Background(), false) + suite.Require().NoError(err) + suite.Require().NotNil(actual) + // make sure we got the latest header suite.Require().Equal(expected, actual) + suite.Assert().Equal(stat, flow.BlockStatusFinalized) + + suite.assertAllExpectations() }) - suite.Run("with an empty block ID list", func() { + // tests that signaler context received error when node state is inconsistent + suite.Run("GetLatestFinalizedBlock - fails with inconsistent node's state", func() { + err := fmt.Errorf("inconsistent node's state") + suite.snapshot.On("Head").Return(nil, err) - // create the handler - backend := New( - suite.state, - nil, - nil, - nil, - suite.headers, - nil, - nil, - receipts, - nil, - suite.chainID, - metrics.NewNoopCollector(), - connFactory, // the connection factory should be used to get the execution node client - false, - DefaultMaxHeightRange, - nil, - validENIDs.Strings(), // set the fixed EN Identifiers to the generated execution IDs - suite.log, - DefaultSnapshotHistoryLimit, - nil, - ) + // mock signaler context expect an error + signCtxErr := irrecoverable.NewExceptionf("failed to lookup final header: %w", err) + signalerCtx := irrecoverable.WithSignalerContext(context.Background(), + irrecoverable.NewMockSignalerContextExpectError(suite.T(), context.Background(), signCtxErr)) - // execute request with an empty block id list and expect an empty list of events and no error - resp, err := backend.GetEventsForBlockIDs(ctx, string(flow.EventAccountCreated), []flow.Identifier{}) - require.NoError(suite.T(), err) - require.Empty(suite.T(), resp) + actualBlock, actualStatus, err := backend.GetLatestBlock(signalerCtx, false) + suite.Require().Error(err) + suite.Require().Nil(actualBlock) + suite.Require().Equal(flow.BlockStatusUnknown, actualStatus) }) - - suite.assertAllExpectations() } func (suite *Suite) TestGetExecutionResultByID() { @@ -1401,7 +1489,7 @@ func (suite *Suite) TestGetExecutionResultByID() { validENIDs := flow.IdentifierList(validExecutorIdentities.NodeIDs()) // create a mock connection factory - connFactory := new(backendmock.ConnectionFactory) + connFactory := connectionmock.NewConnectionFactory(suite.T()) nonexistingID := unittest.IdentifierFixture() blockID := unittest.IdentifierFixture() @@ -1420,63 +1508,35 @@ func (suite *Suite) TestGetExecutionResultByID() { Return(executionResult, nil) suite.Run("nonexisting execution result for id", func() { + suite.fixedExecutionNodeIDs = validENIDs - // create the handler - backend := New( - suite.state, - nil, - nil, - nil, - suite.headers, - nil, - nil, - suite.receipts, - results, - suite.chainID, - metrics.NewNoopCollector(), - connFactory, // the connection factory should be used to get the execution node client - false, - DefaultMaxHeightRange, - nil, - validENIDs.Strings(), // set the fixed EN Identifiers to the generated execution IDs - suite.log, - DefaultSnapshotHistoryLimit, - nil, - ) + params := suite.defaultBackendParams() + params.ExecutionResults = results + params.ConnFactory = connFactory + + backend, err := New(params) + suite.Require().NoError(err) // execute request - _, err := backend.GetExecutionResultByID(ctx, nonexistingID) + _, err = backend.GetExecutionResultByID(ctx, nonexistingID) - assert.Error(suite.T(), err) + suite.Assert().Error(err) }) suite.Run("existing execution result id", func() { - // create the handler - backend := New( - suite.state, - nil, - nil, - nil, - suite.headers, - nil, - nil, - nil, - results, - suite.chainID, - metrics.NewNoopCollector(), - connFactory, // the connection factory should be used to get the execution node client - false, - DefaultMaxHeightRange, - nil, - validENIDs.Strings(), // set the fixed EN Identifiers to the generated execution IDs - suite.log, - DefaultSnapshotHistoryLimit, - nil, - ) + suite.fixedExecutionNodeIDs = validENIDs + + params := suite.defaultBackendParams() + params.ExecutionResults = results + params.ConnFactory = connFactory + + backend, err := New(params) + suite.Require().NoError(err) // execute request er, err := backend.GetExecutionResultByID(ctx, executionResult.ID()) - suite.checkResponse(er, err) + suite.Require().NoError(err) + suite.Require().NotNil(er) require.Equal(suite.T(), executionResult, er) }) @@ -1492,7 +1552,7 @@ func (suite *Suite) TestGetExecutionResultByBlockID() { validENIDs := flow.IdentifierList(validExecutorIdentities.NodeIDs()) // create a mock connection factory - connFactory := new(backendmock.ConnectionFactory) + connFactory := connectionmock.NewConnectionFactory(suite.T()) blockID := unittest.IdentifierFixture() executionResult := unittest.ExecutionResultFixture( @@ -1513,64 +1573,35 @@ func (suite *Suite) TestGetExecutionResultByBlockID() { Return(executionResult, nil) suite.Run("nonexisting execution results", func() { + suite.fixedExecutionNodeIDs = validENIDs - // create the handler - backend := New( - suite.state, - nil, - nil, - nil, - suite.headers, - nil, - nil, - suite.receipts, - results, - suite.chainID, - metrics.NewNoopCollector(), - connFactory, // the connection factory should be used to get the execution node client - false, - DefaultMaxHeightRange, - nil, - validENIDs.Strings(), // set the fixed EN Identifiers to the generated execution IDs - suite.log, - DefaultSnapshotHistoryLimit, - nil, - ) + params := suite.defaultBackendParams() + params.ExecutionResults = results + params.ConnFactory = connFactory + + backend, err := New(params) + suite.Require().NoError(err) // execute request - _, err := backend.GetExecutionResultForBlockID(ctx, nonexistingBlockID) + _, err = backend.GetExecutionResultForBlockID(ctx, nonexistingBlockID) - assert.Error(suite.T(), err) + suite.Assert().Error(err) }) suite.Run("existing execution results", func() { + suite.fixedExecutionNodeIDs = validENIDs - // create the handler - backend := New( - suite.state, - nil, - nil, - nil, - suite.headers, - nil, - nil, - nil, - results, - suite.chainID, - metrics.NewNoopCollector(), - connFactory, // the connection factory should be used to get the execution node client - false, - DefaultMaxHeightRange, - nil, - validENIDs.Strings(), // set the fixed EN Identifiers to the generated execution IDs - suite.log, - DefaultSnapshotHistoryLimit, - nil, - ) + params := suite.defaultBackendParams() + params.ExecutionResults = results + params.ConnFactory = connFactory + + backend, err := New(params) + suite.Require().NoError(err) // execute request er, err := backend.GetExecutionResultForBlockID(ctx, blockID) - suite.checkResponse(er, err) + suite.Require().NoError(err) + suite.Require().NotNil(er) require.Equal(suite.T(), executionResult, er) }) @@ -1579,712 +1610,384 @@ func (suite *Suite) TestGetExecutionResultByBlockID() { suite.assertAllExpectations() } -func (suite *Suite) TestGetEventsForHeightRange() { - - ctx := context.Background() - const minHeight uint64 = 5 - const maxHeight uint64 = 10 - var headHeight uint64 - var blockHeaders []*flow.Header - var nodeIdentities flow.IdentityList - - headersDB := make(map[uint64]*flow.Header) // backend for storage.Headers - var head *flow.Header // backend for Snapshot.Head - - state := new(protocol.State) - snapshot := new(protocol.Snapshot) - state.On("Final").Return(snapshot, nil) - state.On("Sealed").Return(snapshot, nil) - - rootHeader := unittest.BlockHeaderFixture() - params := new(protocol.Params) - params.On("Root").Return(rootHeader, nil) - state.On("Params").Return(params).Maybe() - - // mock snapshot to return head backend - snapshot.On("Head").Return( - func() *flow.Header { return head }, - func() error { return nil }, - ) - snapshot.On("Identities", mock.Anything).Return( - func(_ flow.IdentityFilter) flow.IdentityList { - return nodeIdentities - }, - func(flow.IdentityFilter) error { return nil }, - ) - - // mock headers to pull from headers backend - suite.headers.On("ByHeight", mock.Anything).Return( - func(height uint64) *flow.Header { - return headersDB[height] - }, - func(height uint64) error { - _, ok := headersDB[height] - if !ok { - return storage.ErrNotFound - } - return nil - }).Maybe() - - setupHeadHeight := func(height uint64) { - header := unittest.BlockHeaderFixture() // create a mock header - header.Height = height // set the header height - head = header - } - - setupStorage := func(min uint64, max uint64) ([]*flow.Header, []*flow.ExecutionReceipt, flow.IdentityList) { - headersDB = make(map[uint64]*flow.Header) // reset backend - - var headers []*flow.Header - var ers []*flow.ExecutionReceipt - var enIDs flow.IdentityList - for i := min; i <= max; i++ { - block := unittest.BlockFixture() - header := block.Header - headersDB[i] = header - headers = append(headers, header) - newErs, ids := suite.setupReceipts(&block) - ers = append(ers, newErs...) - enIDs = append(enIDs, ids...) +func (suite *Suite) TestGetNodeVersionInfo() { + sporkRootBlock := unittest.BlockHeaderFixture() + nodeRootBlock := unittest.BlockHeaderFixture(unittest.WithHeaderHeight(sporkRootBlock.Height + 100)) + sporkID := unittest.IdentifierFixture() + protocolStateVersion := uint64(1234) + + stateParams := protocol.NewParams(suite.T()) + stateParams.On("SporkID").Return(sporkID, nil) + stateParams.On("SporkRootBlockHeight").Return(sporkRootBlock.Height, nil) + stateParams.On("SealedRoot").Return(nodeRootBlock, nil) + + state := protocol.NewState(suite.T()) + snap := protocol.NewSnapshot(suite.T()) + kvstore := protocol.NewKVStoreReader(suite.T()) + state.On("Params").Return(stateParams, nil).Maybe() + state.On("Final").Return(snap).Maybe() + snap.On("ProtocolState").Return(kvstore, nil).Maybe() + kvstore.On("GetProtocolStateVersion").Return(protocolStateVersion).Maybe() + + suite.Run("happy path", func() { + expected := &accessmodel.NodeVersionInfo{ + Semver: build.Version(), + Commit: build.Commit(), + SporkId: sporkID, + ProtocolStateVersion: protocolStateVersion, + SporkRootBlockHeight: sporkRootBlock.Height, + NodeRootBlockHeight: nodeRootBlock.Height, + CompatibleRange: nil, } - return headers, ers, enIDs - } - setupExecClient := func() []flow.BlockEvents { - blockIDs := make([]flow.Identifier, len(blockHeaders)) - for i, header := range blockHeaders { - blockIDs[i] = header.ID() - } - execReq := &execproto.GetEventsForBlockIDsRequest{ - BlockIds: convert.IdentifiersToMessages(blockIDs), - Type: string(flow.EventAccountCreated), - } + params := suite.defaultBackendParams() + params.State = state - results := make([]flow.BlockEvents, len(blockHeaders)) - exeResults := make([]*execproto.GetEventsForBlockIDsResponse_Result, len(blockHeaders)) + backend, err := New(params) + suite.Require().NoError(err) - for i, header := range blockHeaders { - events := getEvents(1) - height := header.Height + actual, err := backend.GetNodeVersionInfo(context.Background()) + suite.Require().NoError(err) - results[i] = flow.BlockEvents{ - BlockID: header.ID(), - BlockHeight: height, - BlockTimestamp: header.Timestamp, - Events: events, - } + suite.Require().Equal(expected, actual) + }) - exeResults[i] = &execproto.GetEventsForBlockIDsResponse_Result{ - BlockId: convert.IdentifierToMessage(header.ID()), - BlockHeight: header.Height, - Events: convert.EventsToMessages(events), - } + suite.Run("start and end version set", func() { + latestBlockHeight := nodeRootBlock.Height + 100 + versionBeacons := storagemock.NewVersionBeacons(suite.T()) + + events := []*flow.SealedVersionBeacon{ + { + VersionBeacon: unittest.VersionBeaconFixture( + unittest.WithBoundaries(flow.VersionBoundary{BlockHeight: nodeRootBlock.Height + 4, Version: "0.0.1"}), + ), + SealHeight: nodeRootBlock.Height + 2, + }, + { + VersionBeacon: unittest.VersionBeaconFixture( + unittest.WithBoundaries(flow.VersionBoundary{BlockHeight: nodeRootBlock.Height + 12, Version: "0.0.2"}), + ), + SealHeight: nodeRootBlock.Height + 10, + }, + { + VersionBeacon: unittest.VersionBeaconFixture( + unittest.WithBoundaries(flow.VersionBoundary{BlockHeight: latestBlockHeight - 8, Version: "0.0.3"}), + ), + SealHeight: latestBlockHeight - 10, + }, } - exeResp := &execproto.GetEventsForBlockIDsResponse{ - Results: exeResults, + eventMap := make(map[uint64]*flow.SealedVersionBeacon, len(events)) + for _, event := range events { + eventMap[event.SealHeight] = event } - suite.execClient. - On("GetEventsForBlockIDs", ctx, execReq). - Return(exeResp, nil). - Once() - - return results - } + // make sure events are sorted descending by seal height + sort.Slice(events, func(i, j int) bool { + return events[i].SealHeight > events[j].SealHeight + }) - connFactory := suite.setupConnectionFactory() + versionBeacons. + On("Highest", mock.AnythingOfType("uint64")). + Return(func(height uint64) (*flow.SealedVersionBeacon, error) { + // iterating through events sorted descending by seal height + // return the first event that was sealed in a height less than or equal to height + for _, event := range events { + if event.SealHeight <= height { + return event, nil + } + } + return nil, storage.ErrNotFound + }) - suite.Run("invalid request max height < min height", func() { - backend := New( - suite.state, - nil, - nil, - nil, - suite.headers, - nil, - nil, - suite.receipts, - suite.results, - suite.chainID, - metrics.NewNoopCollector(), - connFactory, // the connection factory should be used to get the execution node client - false, - DefaultMaxHeightRange, - nil, - nil, - suite.log, - DefaultSnapshotHistoryLimit, - nil, + var err error + suite.versionControl, err = version.NewVersionControl( + unittest.Logger(), + versionBeacons, + semver.New("0.0.2"), + nodeRootBlock.Height, + latestBlockHeight, ) + require.NoError(suite.T(), err) - _, err := backend.GetEventsForHeightRange(ctx, string(flow.EventAccountCreated), maxHeight, minHeight) - suite.Require().Error(err) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + // Start the VersionControl component. + suite.versionControl.Start(irrecoverable.NewMockSignalerContext(suite.T(), ctx)) + unittest.RequireComponentsReadyBefore(suite.T(), 2*time.Second, suite.versionControl) + + expected := &accessmodel.NodeVersionInfo{ + Semver: build.Version(), + Commit: build.Commit(), + SporkId: sporkID, + ProtocolStateVersion: protocolStateVersion, + SporkRootBlockHeight: sporkRootBlock.Height, + NodeRootBlockHeight: nodeRootBlock.Height, + CompatibleRange: &accessmodel.CompatibleRange{ + StartHeight: nodeRootBlock.Height + 12, + EndHeight: latestBlockHeight - 9, + }, + } - suite.assertAllExpectations() // assert that request was not sent to execution node - }) + params := suite.defaultBackendParams() + params.State = state - suite.Run("valid request with min_height < max_height < last_sealed_block_height", func() { - - headHeight = maxHeight + 1 - - // setup mocks - setupHeadHeight(headHeight) - blockHeaders, _, nodeIdentities = setupStorage(minHeight, maxHeight) - expectedResp := setupExecClient() - fixedENIdentifiersStr := flow.IdentifierList(nodeIdentities.NodeIDs()).Strings() - - // create handler - backend := New( - state, - nil, - nil, - suite.blocks, - suite.headers, - nil, - nil, - suite.receipts, - suite.results, - suite.chainID, - metrics.NewNoopCollector(), - connFactory, // the connection factory should be used to get the execution node client - false, - DefaultMaxHeightRange, - nil, - fixedENIdentifiersStr, - suite.log, - DefaultSnapshotHistoryLimit, - nil, - ) + backend, err := New(params) + suite.Require().NoError(err) - // execute request - actualResp, err := backend.GetEventsForHeightRange(ctx, string(flow.EventAccountCreated), minHeight, maxHeight) + actual, err := backend.GetNodeVersionInfo(ctx) + suite.Require().NoError(err) - // check response - suite.checkResponse(actualResp, err) - suite.assertAllExpectations() - suite.Require().Equal(expectedResp, actualResp) + suite.Require().Equal(expected, actual) }) +} - suite.Run("valid request with max_height > last_sealed_block_height", func() { - headHeight = maxHeight - 1 - setupHeadHeight(headHeight) - blockHeaders, _, nodeIdentities = setupStorage(minHeight, headHeight) - expectedResp := setupExecClient() - fixedENIdentifiersStr := flow.IdentifierList(nodeIdentities.NodeIDs()).Strings() - - backend := New( - state, - nil, - nil, - suite.blocks, - suite.headers, - nil, - nil, - suite.receipts, - suite.results, - suite.chainID, - metrics.NewNoopCollector(), - connFactory, // the connection factory should be used to get the execution node client - false, - DefaultMaxHeightRange, - nil, - fixedENIdentifiersStr, - suite.log, - DefaultSnapshotHistoryLimit, - nil, - ) - - actualResp, err := backend.GetEventsForHeightRange(ctx, string(flow.EventAccountCreated), minHeight, maxHeight) - suite.checkResponse(actualResp, err) - - suite.assertAllExpectations() - suite.Require().Equal(expectedResp, actualResp) - }) +func (suite *Suite) TestGetNetworkParameters() { + suite.state.On("Sealed").Return(suite.snapshot, nil).Maybe() - // set max height range to 1 and request range of 2 - suite.Run("invalid request exceeding max height range", func() { - headHeight = maxHeight - 1 - setupHeadHeight(headHeight) - blockHeaders, _, nodeIdentities = setupStorage(minHeight, headHeight) - fixedENIdentifiersStr := flow.IdentifierList(nodeIdentities.NodeIDs()).Strings() - - // create handler - backend := New( - state, - nil, - nil, - suite.blocks, - suite.headers, - nil, - nil, - suite.receipts, - suite.results, - suite.chainID, - metrics.NewNoopCollector(), - connFactory, // the connection factory should be used to get the execution node client - false, - 1, // set maximum range to 1 - nil, - fixedENIdentifiersStr, - suite.log, - DefaultSnapshotHistoryLimit, - nil, - ) + expectedChainID := flow.Mainnet - _, err := backend.GetEventsForHeightRange(ctx, string(flow.EventAccountCreated), minHeight, minHeight+1) - suite.Require().Error(err) - }) + params := suite.defaultBackendParams() + params.ChainID = expectedChainID - suite.Run("invalid request last_sealed_block_height < min height", func() { - - // set sealed height to one less than the request start height - headHeight = minHeight - 1 - - // setup mocks - setupHeadHeight(headHeight) - blockHeaders, _, nodeIdentities = setupStorage(minHeight, maxHeight) - fixedENIdentifiersStr := flow.IdentifierList(nodeIdentities.NodeIDs()).Strings() - - // create handler - backend := New( - state, - nil, - nil, - suite.blocks, - suite.headers, - nil, - nil, - suite.receipts, - suite.results, - suite.chainID, - metrics.NewNoopCollector(), - connFactory, // the connection factory should be used to get the execution node client - false, - DefaultMaxHeightRange, - nil, - fixedENIdentifiersStr, - suite.log, - DefaultSnapshotHistoryLimit, - nil, - ) + backend, err := New(params) + suite.Require().NoError(err) - _, err := backend.GetEventsForHeightRange(ctx, string(flow.EventAccountCreated), minHeight, maxHeight) - suite.Require().Error(err) - }) + actual := backend.GetNetworkParameters(context.Background()) + suite.Require().Equal(expectedChainID, actual.ChainID) } -func (suite *Suite) TestGetAccount() { +// TestGetTransactionResultEventEncodingVersion tests the GetTransactionResult function with different event encoding versions. +func (suite *Suite) TestGetTransactionResultEventEncodingVersion() { suite.state.On("Sealed").Return(suite.snapshot, nil).Maybe() - suite.state.On("Final").Return(suite.snapshot, nil).Maybe() - - address, err := suite.chainID.Chain().NewAddressGenerator().NextAddress() - suite.Require().NoError(err) - account := &entitiesproto.Account{ - Address: address.Bytes(), - } ctx := context.Background() - // setup the latest sealed block - block := unittest.BlockFixture() - header := block.Header // create a mock header - seal := unittest.Seal.Fixture() // create a mock seal - seal.BlockID = header.ID() // make the seal point to the header - - suite.snapshot. - On("Head"). - Return(header, nil). - Once() - - // create the expected execution API request - blockID := header.ID() - exeReq := &execproto.GetAccountAtBlockIDRequest{ - BlockId: blockID[:], - Address: address.Bytes(), - } - - // create the expected execution API response - exeResp := &execproto.GetAccountAtBlockIDResponse{ - Account: account, - } - - // setup the execution client mock - suite.execClient. - On("GetAccountAtBlockID", ctx, exeReq). - Return(exeResp, nil). - Once() - - receipts, ids := suite.setupReceipts(&block) + collection := unittest.CollectionFixture(1) + transactionBody := collection.Transactions[0] + // block which will eventually contain the transaction + block := unittest.BlockFixture( + unittest.Block.WithPayload(unittest.PayloadFixture( + unittest.WithGuarantees( + unittest.CollectionGuaranteesWithCollectionIDFixture([]*flow.Collection{&collection})...), + )), + ) + blockId := block.ID() - suite.snapshot.On("Identities", mock.Anything).Return(ids, nil) - // create a mock connection factory - connFactory := new(backendmock.ConnectionFactory) - connFactory.On("GetExecutionAPIClient", mock.Anything).Return(suite.execClient, &mockCloser{}, nil) - - // create the handler with the mock - backend := New( - suite.state, - nil, - nil, - nil, - suite.headers, - nil, - nil, - suite.receipts, - suite.results, - suite.chainID, - metrics.NewNoopCollector(), - connFactory, // the connection factory should be used to get the execution node client - false, - DefaultMaxHeightRange, - nil, - nil, - suite.log, - DefaultSnapshotHistoryLimit, - nil, + // reference block to which the transaction points to + refBlock := unittest.BlockFixture( + unittest.Block.WithHeight(2), ) + transactionBody.ReferenceBlockID = refBlock.ID() + txId := transactionBody.ID() - preferredENIdentifiers = flow.IdentifierList{receipts[0].ExecutorID} + // transaction storage returns the corresponding transaction + suite.transactions. + On("ByID", txId). + Return(transactionBody, nil) - suite.Run("happy path - valid request and valid response", func() { - account, err := backend.GetAccountAtLatestBlock(ctx, address) - suite.checkResponse(account, err) + light := collection.Light() + suite.collections.On("LightByID", mock.Anything).Return(light, nil) - suite.Require().Equal(address, account.Address) + suite.snapshot.On("Head").Return(block.ToHeader(), nil) - suite.assertAllExpectations() - }) -} + // block storage returns the corresponding block + suite.blocks. + On("ByID", blockId). + Return(block, nil) -func (suite *Suite) TestGetAccountAtBlockHeight() { - suite.state.On("Sealed").Return(suite.snapshot, nil).Maybe() + _, fixedENIDs := suite.setupReceipts(block) suite.state.On("Final").Return(suite.snapshot, nil).Maybe() + suite.snapshot.On("Identities", mock.Anything).Return(fixedENIDs, nil) - height := uint64(5) - address := unittest.AddressFixture() - account := &entitiesproto.Account{ - Address: address.Bytes(), - } - ctx := context.Background() - - // create a mock block header - b := unittest.BlockFixture() - h := b.Header - - // setup headers storage to return the header when queried by height - suite.headers. - On("ByHeight", height). - Return(h, nil). - Once() - - receipts, ids := suite.setupReceipts(&b) - suite.snapshot.On("Identities", mock.Anything).Return(ids, nil) - - // create a mock connection factory - connFactory := new(backendmock.ConnectionFactory) - connFactory.On("GetExecutionAPIClient", mock.Anything).Return(suite.execClient, &mockCloser{}, nil) - - // create the expected execution API request - blockID := h.ID() - exeReq := &execproto.GetAccountAtBlockIDRequest{ - BlockId: blockID[:], - Address: address.Bytes(), - } - - // create the expected execution API response - exeResp := &execproto.GetAccountAtBlockIDResponse{ - Account: account, - } + suite.fixedExecutionNodeIDs = fixedENIDs.NodeIDs() - // setup the execution client mock - suite.execClient. - On("GetAccountAtBlockID", ctx, exeReq). - Return(exeResp, nil). - Once() + params := suite.defaultBackendParams() + // the connection factory should be used to get the execution node client + params.ConnFactory = suite.setupConnectionFactory() - // create the handler with the mock - backend := New( - suite.state, - nil, - nil, - nil, - suite.headers, - nil, - nil, - suite.receipts, - suite.results, - flow.Testnet, - metrics.NewNoopCollector(), - connFactory, // the connection factory should be used to get the execution node client - false, - DefaultMaxHeightRange, - nil, - nil, - suite.log, - DefaultSnapshotHistoryLimit, - nil, - ) + backend, err := New(params) + suite.Require().NoError(err) - preferredENIdentifiers = flow.IdentifierList{receipts[0].ExecutorID} + ccfEvents, jsoncdcEvents := generateEncodedEvents(suite.T(), 1) + eventMessages := convert.EventsToMessages(ccfEvents) - suite.Run("happy path - valid request and valid response", func() { - account, err := backend.GetAccountAtBlockHeight(ctx, address, height) - suite.checkResponse(account, err) + for _, version := range eventEncodingVersions { + suite.Run(fmt.Sprintf("test %s event encoding version for GetTransactionResult", version.String()), func() { + exeEventResp := &execproto.GetTransactionResultResponse{ + Events: eventMessages, + EventEncodingVersion: entitiesproto.EventEncodingVersion_CCF_V0, + } - suite.Require().Equal(address, account.Address) + suite.execClient. + On("GetTransactionResult", ctx, &execproto.GetTransactionResultRequest{ + BlockId: blockId[:], + TransactionId: txId[:], + }). + Return(exeEventResp, nil). + Once() + + result, err := backend.GetTransactionResult(ctx, txId, blockId, flow.ZeroID, version) + suite.Require().NoError(err) + suite.Require().NotNil(result) + + var expectedResult []flow.Event + switch version { + case entitiesproto.EventEncodingVersion_CCF_V0: + expectedResult = append(expectedResult, ccfEvents...) + case entitiesproto.EventEncodingVersion_JSON_CDC_V0: + expectedResult = append(expectedResult, jsoncdcEvents...) + } - suite.assertAllExpectations() - }) + suite.Assert().Equal(result.Events, expectedResult) + }) + } } -func (suite *Suite) TestGetNetworkParameters() { +// TestGetTransactionResultEventEncodingVersion tests the GetTransactionResult function with different event encoding versions. +func (suite *Suite) TestGetTransactionResultByIndexAndBlockIdEventEncodingVersion() { suite.state.On("Sealed").Return(suite.snapshot, nil).Maybe() - expectedChainID := flow.Mainnet + ctx := context.Background() + block := unittest.BlockFixture() + blockId := block.ID() + index := uint32(0) - backend := New( - nil, - nil, - nil, - nil, - nil, - nil, - nil, - nil, - nil, - flow.Mainnet, - metrics.NewNoopCollector(), - nil, - false, - DefaultMaxHeightRange, - nil, - nil, - suite.log, - DefaultSnapshotHistoryLimit, - nil, - ) + suite.snapshot.On("Head").Return(block.ToHeader(), nil) - params := backend.GetNetworkParameters(context.Background()) + // block storage returns the corresponding block + suite.blocks. + On("ByID", blockId). + Return(block, nil) - suite.Require().Equal(expectedChainID, params.ChainID) -} + _, fixedENIDs := suite.setupReceipts(block) + suite.state.On("Final").Return(suite.snapshot, nil).Maybe() + suite.snapshot.On("Identities", mock.Anything).Return(fixedENIDs, nil) -// TestExecutionNodesForBlockID tests the common method backend.executionNodesForBlockID used for serving all API calls -// that need to talk to an execution node. -func (suite *Suite) TestExecutionNodesForBlockID() { + suite.fixedExecutionNodeIDs = fixedENIDs.NodeIDs() - totalReceipts := 5 + params := suite.defaultBackendParams() + // the connection factory should be used to get the execution node client + params.ConnFactory = suite.setupConnectionFactory() - block := unittest.BlockFixture() + backend, err := New(params) + suite.Require().NoError(err) - // generate one execution node identities for each receipt assuming that each ER is generated by a unique exec node - allExecutionNodes := unittest.IdentityListFixture(totalReceipts, unittest.WithRole(flow.RoleExecution)) + exeNodeEventEncodingVersion := entitiesproto.EventEncodingVersion_CCF_V0 + ccfEvents, jsoncdcEvents := generateEncodedEvents(suite.T(), 1) + eventMessages := convert.EventsToMessages(ccfEvents) - // one execution result for all receipts for this block - executionResult := unittest.ExecutionResultFixture() + for _, version := range eventEncodingVersions { + suite.Run(fmt.Sprintf("test %s event encoding version for GetTransactionResultByIndex", version.String()), func() { + exeEventResp := &execproto.GetTransactionResultResponse{ + Events: eventMessages, + EventEncodingVersion: exeNodeEventEncodingVersion, + } - // generate execution receipts - receipts := make(flow.ExecutionReceiptList, totalReceipts) - for j := 0; j < totalReceipts; j++ { - r := unittest.ReceiptForBlockFixture(&block) - r.ExecutorID = allExecutionNodes[j].NodeID - er := *executionResult - r.ExecutionResult = er - receipts[j] = r - } + suite.execClient. + On("GetTransactionResultByIndex", ctx, &execproto.GetTransactionByIndexRequest{ + BlockId: blockId[:], + Index: index, + }). + Return(exeEventResp, nil). + Once() + + result, err := backend.GetTransactionResultByIndex(ctx, blockId, index, version) + suite.Require().NoError(err) + suite.Require().NotNil(result) + + var expectedResult []flow.Event + switch version { + case entitiesproto.EventEncodingVersion_CCF_V0: + expectedResult = append(expectedResult, ccfEvents...) + case entitiesproto.EventEncodingVersion_JSON_CDC_V0: + expectedResult = append(expectedResult, jsoncdcEvents...) + } - currentAttempt := 0 - attempt1Receipts, attempt2Receipts, attempt3Receipts := receipts, receipts, receipts + suite.Assert().Equal(expectedResult, result.Events) + }) - // setup receipts storage mock to return different list of receipts on each call - suite.receipts. - On("ByBlockID", block.ID()).Return( - func(id flow.Identifier) flow.ExecutionReceiptList { - switch currentAttempt { - case 0: - currentAttempt++ - return attempt1Receipts - case 1: - currentAttempt++ - return attempt2Receipts - default: - currentAttempt = 0 - return attempt3Receipts + suite.Run(fmt.Sprintf("test %s event encoding version for GetTransactionResultsByBlockID", version.String()), func() { + exeEventResp := &execproto.GetTransactionResultsResponse{ + TransactionResults: []*execproto.GetTransactionResultResponse{ + { + Events: eventMessages, + EventEncodingVersion: exeNodeEventEncodingVersion, + }}, + EventEncodingVersion: exeNodeEventEncodingVersion, } - }, - func(id flow.Identifier) error { return nil }) - - suite.snapshot.On("Identities", mock.Anything).Return( - func(filter flow.IdentityFilter) flow.IdentityList { - // apply the filter passed in to the list of all the execution nodes - return allExecutionNodes.Filter(filter) - }, - func(flow.IdentityFilter) error { return nil }) - suite.state.On("Final").Return(suite.snapshot, nil).Maybe() - testExecutionNodesForBlockID := func(preferredENs, fixedENs, expectedENs flow.IdentityList) { + suite.execClient. + On("GetTransactionResultsByBlockID", ctx, &execproto.GetTransactionsByBlockIDRequest{ + BlockId: blockId[:], + }). + Return(exeEventResp, nil). + Once() + + results, err := backend.GetTransactionResultsByBlockID(ctx, blockId, version) + suite.Require().NoError(err) + suite.Require().NotNil(results) + + var expectedResult []flow.Event + switch version { + case entitiesproto.EventEncodingVersion_CCF_V0: + expectedResult = append(expectedResult, ccfEvents...) + case entitiesproto.EventEncodingVersion_JSON_CDC_V0: + expectedResult = append(expectedResult, jsoncdcEvents...) + } - if preferredENs != nil { - preferredENIdentifiers = preferredENs.NodeIDs() - } - if fixedENs != nil { - fixedENIdentifiers = fixedENs.NodeIDs() - } - actualList, err := executionNodesForBlockID(context.Background(), block.ID(), suite.receipts, suite.state, suite.log) - require.NoError(suite.T(), err) - if expectedENs == nil { - expectedENs = flow.IdentityList{} - } - if len(expectedENs) > maxExecutionNodesCnt { - for _, actual := range actualList { - require.Contains(suite.T(), expectedENs, actual) + for _, result := range results { + suite.Assert().Equal(result.Events, expectedResult) } - } else { - require.ElementsMatch(suite.T(), actualList, expectedENs) - } + }) } - // if we don't find sufficient receipts, executionNodesForBlockID should return a list of random ENs - suite.Run("insufficient receipts return random ENs in state", func() { - // return no receipts at all attempts - attempt1Receipts = flow.ExecutionReceiptList{} - attempt2Receipts = flow.ExecutionReceiptList{} - attempt3Receipts = flow.ExecutionReceiptList{} - suite.state.On("AtBlockID", mock.Anything).Return(suite.snapshot) - actualList, err := executionNodesForBlockID(context.Background(), block.ID(), suite.receipts, suite.state, suite.log) - require.NoError(suite.T(), err) - require.Equal(suite.T(), len(actualList), maxExecutionNodesCnt) - }) - - // if no preferred or fixed ENs are specified, the ExecutionNodesForBlockID function should - // return the exe node list without a filter - suite.Run("no preferred or fixed ENs", func() { - testExecutionNodesForBlockID(nil, nil, allExecutionNodes) - }) - // if only preferred ENs are specified, the ExecutionNodesForBlockID function should - // return the preferred ENs list - suite.Run("two preferred ENs with zero fixed EN", func() { - // mark the first two ENs as preferred - preferredENs := allExecutionNodes[0:2] - expectedList := preferredENs - testExecutionNodesForBlockID(preferredENs, nil, expectedList) - }) - // if only fixed ENs are specified, the ExecutionNodesForBlockID function should - // return the fixed ENs list - suite.Run("two fixed ENs with zero preferred EN", func() { - // mark the first two ENs as fixed - fixedENs := allExecutionNodes[0:2] - expectedList := fixedENs - testExecutionNodesForBlockID(nil, fixedENs, expectedList) - }) - // if both are specified, the ExecutionNodesForBlockID function should - // return the preferred ENs list - suite.Run("four fixed ENs of which two are preferred ENs", func() { - // mark the first four ENs as fixed - fixedENs := allExecutionNodes[0:5] - // mark the first two of the fixed ENs as preferred ENs - preferredENs := fixedENs[0:2] - expectedList := preferredENs - testExecutionNodesForBlockID(preferredENs, fixedENs, expectedList) - }) - // if both are specified, but the preferred ENs don't match the ExecutorIDs in the ER, - // the ExecutionNodesForBlockID function should return the fixed ENs list - suite.Run("four fixed ENs of which two are preferred ENs but have not generated the ER", func() { - // mark the first two ENs as fixed - fixedENs := allExecutionNodes[0:2] - // specify two ENs not specified in the ERs as preferred - preferredENs := unittest.IdentityListFixture(2, unittest.WithRole(flow.RoleExecution)) - expectedList := fixedENs - testExecutionNodesForBlockID(preferredENs, fixedENs, expectedList) - }) - // if execution receipts are not yet available, the ExecutionNodesForBlockID function should retry twice - suite.Run("retry execution receipt query", func() { - // on first attempt, no execution receipts are available - attempt1Receipts = flow.ExecutionReceiptList{} - // on second attempt ony one is available - attempt2Receipts = flow.ExecutionReceiptList{receipts[0]} - // on third attempt all receipts are available - attempt3Receipts = receipts - currentAttempt = 0 - // mark the first two ENs as preferred - preferredENs := allExecutionNodes[0:2] - expectedList := preferredENs - testExecutionNodesForBlockID(preferredENs, nil, expectedList) - }) } -// TestExecuteScriptOnExecutionNode tests the method backend.scripts.executeScriptOnExecutionNode for script execution -func (suite *Suite) TestExecuteScriptOnExecutionNode() { - - // create a mock connection factory - connFactory := new(backendmock.ConnectionFactory) - connFactory.On("GetExecutionAPIClient", mock.Anything).Return(suite.execClient, &mockCloser{}, nil) - connFactory.On("InvalidateExecutionAPIClient", mock.Anything) - - // create the handler with the mock - backend := New( - suite.state, - nil, - nil, - nil, - suite.headers, - nil, - nil, - suite.receipts, - suite.results, - flow.Mainnet, - metrics.NewNoopCollector(), - connFactory, // the connection factory should be used to get the execution node client - false, - DefaultMaxHeightRange, - nil, - nil, - suite.log, - DefaultSnapshotHistoryLimit, - nil, - ) +// TestNodeCommunicator tests special case for node communicator, when only one node available and communicator gets +// gobreaker.ErrOpenState +func (suite *Suite) TestNodeCommunicator() { + head := unittest.BlockHeaderFixture() + suite.state.On("Sealed").Return(suite.snapshot, nil).Maybe() + suite.snapshot.On("Head").Return(head, nil).Maybe() - // mock parameters ctx := context.Background() block := unittest.BlockFixture() - blockID := block.ID() - script := []byte("dummy script") - arguments := [][]byte(nil) - executionNode := unittest.IdentityFixture(unittest.WithRole(flow.RoleExecution)) - execReq := &execproto.ExecuteScriptAtBlockIDRequest{ - BlockId: blockID[:], - Script: script, - Arguments: arguments, - } - execRes := &execproto.ExecuteScriptAtBlockIDResponse{ - Value: []byte{4, 5, 6}, + blockId := block.ID() + + // block storage returns the corresponding block + suite.blocks. + On("ByID", blockId). + Return(block, nil) + + _, fixedENIDs := suite.setupReceipts(block) + suite.state.On("Final").Return(suite.snapshot, nil).Maybe() + suite.snapshot.On("Identities", mock.Anything).Return(fixedENIDs, nil) + + exeEventReq := &execproto.GetTransactionsByBlockIDRequest{ + BlockId: blockId[:], } - suite.Run("happy path script execution success", func() { - suite.execClient.On("ExecuteScriptAtBlockID", ctx, execReq).Return(execRes, nil).Once() - res, err := backend.tryExecuteScript(ctx, executionNode.Address, execReq) - suite.execClient.AssertExpectations(suite.T()) - suite.checkResponse(res, err) - }) + // Left only one preferred execution node + suite.fixedExecutionNodeIDs = fixedENIDs.NodeIDs() + suite.preferredExecutionNodeIDs = flow.IdentifierList{fixedENIDs[0].NodeID} - suite.Run("script execution failure returns status OK", func() { - suite.execClient.On("ExecuteScriptAtBlockID", ctx, execReq). - Return(nil, status.Error(codes.InvalidArgument, "execution failure!")).Once() - _, err := backend.tryExecuteScript(ctx, executionNode.Address, execReq) - suite.execClient.AssertExpectations(suite.T()) - suite.Require().Error(err) - suite.Require().Equal(status.Code(err), codes.InvalidArgument) - }) + params := suite.defaultBackendParams() + // the connection factory should be used to get the execution node client + params.ConnFactory = suite.setupConnectionFactory() - suite.Run("execution node internal failure returns status code Internal", func() { - suite.execClient.On("ExecuteScriptAtBlockID", ctx, execReq). - Return(nil, status.Error(codes.Internal, "execution node internal error!")).Once() - _, err := backend.tryExecuteScript(ctx, executionNode.Address, execReq) - suite.execClient.AssertExpectations(suite.T()) - suite.Require().Error(err) - suite.Require().Equal(status.Code(err), codes.Internal) - }) + backend, err := New(params) + suite.Require().NoError(err) + + // Simulate closed circuit breaker error + suite.execClient. + On("GetTransactionResultsByBlockID", ctx, exeEventReq). + Return(nil, gobreaker.ErrOpenState) + + result, err := backend.GetTransactionResultsByBlockID(ctx, blockId, entitiesproto.EventEncodingVersion_JSON_CDC_V0) + suite.Assert().Nil(result) + suite.Assert().Error(err) + suite.Assert().Equal(codes.Unavailable, status.Code(err)) } func (suite *Suite) assertAllExpectations() { @@ -2297,11 +2000,6 @@ func (suite *Suite) assertAllExpectations() { suite.execClient.AssertExpectations(suite.T()) } -func (suite *Suite) checkResponse(resp interface{}, err error) { - suite.Require().NoError(err) - suite.Require().NotNil(resp) -} - func (suite *Suite) setupReceipts(block *flow.Block) ([]*flow.ExecutionReceipt, flow.IdentityList) { ids := unittest.IdentityListFixture(2, unittest.WithRole(flow.RoleExecution)) receipt1 := unittest.ReceiptForBlockFixture(block) @@ -2318,18 +2016,129 @@ func (suite *Suite) setupReceipts(block *flow.Block) ([]*flow.ExecutionReceipt, return receipts, ids } -func (suite *Suite) setupConnectionFactory() ConnectionFactory { +func (suite *Suite) setupConnectionFactory() connection.ConnectionFactory { // create a mock connection factory - connFactory := new(backendmock.ConnectionFactory) - connFactory.On("GetExecutionAPIClient", mock.Anything).Return(suite.execClient, &mockCloser{}, nil) - connFactory.On("InvalidateExecutionAPIClient", mock.Anything) + connFactory := connectionmock.NewConnectionFactory(suite.T()) + connFactory.On("GetExecutionAPIClient", mock.Anything).Return(suite.execClient, &mocks.MockCloser{}, nil) return connFactory } -func getEvents(n int) []flow.Event { - events := make([]flow.Event, n) - for i := range events { - events[i] = flow.Event{Type: flow.EventAccountCreated} +func generateEncodedEvents(t *testing.T, n int) ([]flow.Event, []flow.Event) { + ccfEvents := unittest.EventGenerator.GetEventsWithEncoding(n, entities.EventEncodingVersion_CCF_V0) + jsonEvents := make([]flow.Event, n) + for i, e := range ccfEvents { + jsonEvent, err := convert.CcfEventToJsonEvent(e) + require.NoError(t, err) + jsonEvents[i] = *jsonEvent + } + return ccfEvents, jsonEvents +} + +func (suite *Suite) defaultBackendParams() Params { + return Params{ + State: suite.state, + Blocks: suite.blocks, + Headers: suite.headers, + Collections: suite.collections, + Transactions: suite.transactions, + ExecutionReceipts: suite.receipts, + ExecutionResults: suite.results, + ChainID: suite.chainID, + CollectionRPC: suite.colClient, + MaxHeightRange: events.DefaultMaxHeightRange, + SnapshotHistoryLimit: DefaultSnapshotHistoryLimit, + Communicator: node_communicator.NewNodeCommunicator(false), + AccessMetrics: metrics.NewNoopCollector(), + Log: suite.log, + BlockTracker: nil, + TxResultQueryMode: query_mode.IndexQueryModeExecutionNodesOnly, + EventQueryMode: query_mode.IndexQueryModeExecutionNodesOnly, + ScriptExecutionMode: query_mode.IndexQueryModeExecutionNodesOnly, + LastFullBlockHeight: suite.lastFullBlockHeight, + VersionControl: suite.versionControl, + ExecNodeIdentitiesProvider: commonrpc.NewExecutionNodeIdentitiesProvider( + suite.log, + suite.state, + suite.receipts, + suite.preferredExecutionNodeIDs, + suite.fixedExecutionNodeIDs, + ), + } +} + +// TestResolveHeightError tests the ResolveHeightError function for various scenarios where the block height +// is below the spork root height, below the node root height, above the node root height, or when a different +// error is provided. It validates that ResolveHeightError returns an appropriate error message for each case. +// +// Test cases: +// 1) If height is below the spork root height, it suggests using a historic node. +// 2) If height is below the node root height, it suggests using a different Access node. +// 3) If height is above the node root height, it returns the original error without modification. +// 4) If a non-storage-related error is provided, it returns the error as is. +func (suite *Suite) TestResolveHeightError() { + tests := []struct { + name string + height uint64 + sporkRootHeight uint64 + nodeRootHeight uint64 + genericErr error + expectedErrorMsg string + expectOriginalErr bool + }{ + { + name: "height below spork root height", + height: uint64(50), + sporkRootHeight: uint64(100), + nodeRootHeight: uint64(200), + genericErr: storage.ErrNotFound, + expectedErrorMsg: "block height %d is less than the spork root block height 100. Try to use a historic node: %v"}, + { + name: "height below node root height", + height: uint64(150), + sporkRootHeight: uint64(100), + nodeRootHeight: uint64(200), + genericErr: storage.ErrNotFound, + expectedErrorMsg: "block height %d is less than the node's root block height 200. Try to use a different Access node: %v", + expectOriginalErr: false, + }, + { + name: "height above node root height", + height: uint64(205), + sporkRootHeight: uint64(100), + nodeRootHeight: uint64(200), + genericErr: storage.ErrNotFound, + expectedErrorMsg: "%v", + expectOriginalErr: true, + }, + { + name: "non-storage related error", + height: uint64(150), + sporkRootHeight: uint64(100), + nodeRootHeight: uint64(200), + genericErr: fmt.Errorf("some other error"), + expectedErrorMsg: "%v", + expectOriginalErr: true, + }, + } + + for _, test := range tests { + suite.T().Run(test.name, func(t *testing.T) { + stateParams := protocol.NewParams(suite.T()) + + if errors.Is(test.genericErr, storage.ErrNotFound) { + stateParams.On("SporkRootBlockHeight").Return(test.sporkRootHeight).Once() + sealedRootHeader := unittest.BlockHeaderWithHeight(test.nodeRootHeight) + stateParams.On("SealedRoot").Return(sealedRootHeader, nil).Once() + } + + err := common.ResolveHeightError(stateParams, test.height, test.genericErr) + + if test.expectOriginalErr { + suite.Assert().True(errors.Is(err, test.genericErr)) + } else { + expectedError := fmt.Sprintf(test.expectedErrorMsg, test.height, test.genericErr) + suite.Assert().Equal(err.Error(), expectedError) + } + }) } - return events } diff --git a/engine/access/rpc/backend/backend_transactions.go b/engine/access/rpc/backend/backend_transactions.go deleted file mode 100644 index 661fc3f90f8..00000000000 --- a/engine/access/rpc/backend/backend_transactions.go +++ /dev/null @@ -1,913 +0,0 @@ -package backend - -import ( - "context" - "errors" - "fmt" - "time" - - "github.com/hashicorp/go-multierror" - accessproto "github.com/onflow/flow/protobuf/go/flow/access" - "github.com/onflow/flow/protobuf/go/flow/entities" - execproto "github.com/onflow/flow/protobuf/go/flow/execution" - "github.com/rs/zerolog" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" - - "github.com/onflow/flow-go/access" - "github.com/onflow/flow-go/engine/common/rpc" - "github.com/onflow/flow-go/engine/common/rpc/convert" - "github.com/onflow/flow-go/fvm/blueprints" - "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/module" - "github.com/onflow/flow-go/state/protocol" - "github.com/onflow/flow-go/storage" -) - -const collectionNodesToTry uint = 3 - -type backendTransactions struct { - staticCollectionRPC accessproto.AccessAPIClient // rpc client tied to a fixed collection node - transactions storage.Transactions - executionReceipts storage.ExecutionReceipts - collections storage.Collections - blocks storage.Blocks - state protocol.State - chainID flow.ChainID - transactionMetrics module.TransactionMetrics - transactionValidator *access.TransactionValidator - retry *Retry - connFactory ConnectionFactory - - previousAccessNodes []accessproto.AccessAPIClient - log zerolog.Logger -} - -// SendTransaction forwards the transaction to the collection node -func (b *backendTransactions) SendTransaction( - ctx context.Context, - tx *flow.TransactionBody, -) error { - now := time.Now().UTC() - - err := b.transactionValidator.Validate(tx) - if err != nil { - return status.Errorf(codes.InvalidArgument, "invalid transaction: %s", err.Error()) - } - - // send the transaction to the collection node if valid - err = b.trySendTransaction(ctx, tx) - if err != nil { - b.transactionMetrics.TransactionSubmissionFailed() - return rpc.ConvertError(err, "failed to send transaction to a collection node", codes.Internal) - } - - b.transactionMetrics.TransactionReceived(tx.ID(), now) - - // store the transaction locally - err = b.transactions.Store(tx) - if err != nil { - return status.Errorf(codes.Internal, "failed to store transaction: %v", err) - } - - if b.retry.IsActive() { - go b.registerTransactionForRetry(tx) - } - - return nil -} - -// trySendTransaction tries to transaction to a collection node -func (b *backendTransactions) trySendTransaction(ctx context.Context, tx *flow.TransactionBody) error { - - // if a collection node rpc client was provided at startup, just use that - if b.staticCollectionRPC != nil { - return b.grpcTxSend(ctx, b.staticCollectionRPC, tx) - } - - // otherwise choose a random set of collections nodes to try - collAddrs, err := b.chooseCollectionNodes(tx, collectionNodesToTry) - if err != nil { - return fmt.Errorf("failed to determine collection node for tx %x: %w", tx, err) - } - - var sendErrors *multierror.Error - logAnyError := func() { - err = sendErrors.ErrorOrNil() - if err != nil { - b.log.Info().Err(err).Msg("failed to send transactions to collector nodes") - } - } - defer logAnyError() - - // try sending the transaction to one of the chosen collection nodes - for _, addr := range collAddrs { - err = b.sendTransactionToCollector(ctx, tx, addr) - if err == nil { - return nil - } - sendErrors = multierror.Append(sendErrors, err) - } - - return sendErrors.ErrorOrNil() -} - -// chooseCollectionNodes finds a random subset of size sampleSize of collection node addresses from the -// collection node cluster responsible for the given tx -func (b *backendTransactions) chooseCollectionNodes(tx *flow.TransactionBody, sampleSize uint) ([]string, error) { - - // retrieve the set of collector clusters - clusters, err := b.state.Final().Epochs().Current().Clustering() - if err != nil { - return nil, fmt.Errorf("could not cluster collection nodes: %w", err) - } - - // get the cluster responsible for the transaction - txCluster, ok := clusters.ByTxID(tx.ID()) - if !ok { - return nil, fmt.Errorf("could not get local cluster by txID: %x", tx.ID()) - } - - // select a random subset of collection nodes from the cluster to be tried in order - targetNodes := txCluster.Sample(sampleSize) - - // collect the addresses of all the chosen collection nodes - var targetAddrs = make([]string, len(targetNodes)) - for i, id := range targetNodes { - targetAddrs[i] = id.Address - } - - return targetAddrs, nil -} - -// sendTransactionToCollection sends the transaction to the given collection node via grpc -func (b *backendTransactions) sendTransactionToCollector(ctx context.Context, - tx *flow.TransactionBody, - collectionNodeAddr string) error { - - collectionRPC, closer, err := b.connFactory.GetAccessAPIClient(collectionNodeAddr) - if err != nil { - return fmt.Errorf("failed to connect to collection node at %s: %w", collectionNodeAddr, err) - } - defer closer.Close() - - err = b.grpcTxSend(ctx, collectionRPC, tx) - if err != nil { - if status.Code(err) == codes.Unavailable { - b.connFactory.InvalidateAccessAPIClient(collectionNodeAddr) - } - return fmt.Errorf("failed to send transaction to collection node at %s: %w", collectionNodeAddr, err) - } - return nil -} - -func (b *backendTransactions) grpcTxSend(ctx context.Context, client accessproto.AccessAPIClient, tx *flow.TransactionBody) error { - colReq := &accessproto.SendTransactionRequest{ - Transaction: convert.TransactionToMessage(*tx), - } - - clientDeadline := time.Now().Add(time.Duration(2) * time.Second) - ctx, cancel := context.WithDeadline(ctx, clientDeadline) - defer cancel() - - _, err := client.SendTransaction(ctx, colReq) - return err -} - -// SendRawTransaction sends a raw transaction to the collection node -func (b *backendTransactions) SendRawTransaction( - ctx context.Context, - tx *flow.TransactionBody, -) error { - - // send the transaction to the collection node - return b.trySendTransaction(ctx, tx) -} - -func (b *backendTransactions) GetTransaction(ctx context.Context, txID flow.Identifier) (*flow.TransactionBody, error) { - // look up transaction from storage - tx, err := b.transactions.ByID(txID) - txErr := rpc.ConvertStorageError(err) - - if txErr != nil { - if status.Code(txErr) == codes.NotFound { - return b.getHistoricalTransaction(ctx, txID) - } - // Other Error trying to retrieve the transaction, return with err - return nil, txErr - } - - return tx, nil -} - -func (b *backendTransactions) GetTransactionsByBlockID( - ctx context.Context, - blockID flow.Identifier, -) ([]*flow.TransactionBody, error) { - var transactions []*flow.TransactionBody - - // TODO: consider using storage.Index.ByBlockID, the index contains collection id and seals ID - block, err := b.blocks.ByID(blockID) - if err != nil { - return nil, rpc.ConvertStorageError(err) - } - - for _, guarantee := range block.Payload.Guarantees { - collection, err := b.collections.ByID(guarantee.CollectionID) - if err != nil { - return nil, rpc.ConvertStorageError(err) - } - - transactions = append(transactions, collection.Transactions...) - } - - systemTx, err := blueprints.SystemChunkTransaction(b.chainID.Chain()) - if err != nil { - return nil, status.Errorf(codes.Internal, "could not get system chunk transaction: %v", err) - } - - transactions = append(transactions, systemTx) - - return transactions, nil -} - -func (b *backendTransactions) GetTransactionResult( - ctx context.Context, - txID flow.Identifier, - blockID flow.Identifier, - collectionID flow.Identifier, -) (*access.TransactionResult, error) { - // look up transaction from storage - start := time.Now() - tx, err := b.transactions.ByID(txID) - - txErr := rpc.ConvertStorageError(err) - if txErr != nil { - if status.Code(txErr) == codes.NotFound { - // Tx not found. If we have historical Sporks setup, lets look through those as well - historicalTxResult, err := b.getHistoricalTransactionResult(ctx, txID) - if err != nil { - // if tx not found in old access nodes either, then assume that the tx was submitted to a different AN - // and return status as unknown - txStatus := flow.TransactionStatusUnknown - return &access.TransactionResult{ - Status: txStatus, - StatusCode: uint(txStatus), - }, nil - } - return historicalTxResult, nil - } - return nil, txErr - } - - block, err := b.retrieveBlock(blockID, collectionID, txID) - if err != nil { - return nil, rpc.ConvertStorageError(err) - } - - var transactionWasExecuted bool - var events []flow.Event - var txError string - var statusCode uint32 - var blockHeight uint64 - - // access node may not have the block if it hasn't yet been finalized, hence block can be nil at this point - if block != nil { - blockID = block.ID() - transactionWasExecuted, events, statusCode, txError, err = b.lookupTransactionResult(ctx, txID, blockID) - blockHeight = block.Header.Height - if err != nil { - return nil, rpc.ConvertError(err, "failed to retrieve result from any execution node", codes.Internal) - } - - // an additional check to ensure the correctness of the collection ID. - expectedCollectionID, err := b.lookupCollectionIDInBlock(block, txID) - if err != nil { - return nil, rpc.ConvertStorageError(err) - } - - if collectionID == flow.ZeroID { - collectionID = expectedCollectionID - } else if collectionID != expectedCollectionID { - return nil, status.Error(codes.InvalidArgument, "transaction not found in provided collection") - } - } - - // derive status of the transaction - txStatus, err := b.deriveTransactionStatus(tx, transactionWasExecuted, block) - if err != nil { - return nil, rpc.ConvertStorageError(err) - } - - b.transactionMetrics.TransactionResultFetched(time.Since(start), len(tx.Script)) - - return &access.TransactionResult{ - Status: txStatus, - StatusCode: uint(statusCode), - Events: events, - ErrorMessage: txError, - BlockID: blockID, - TransactionID: txID, - CollectionID: collectionID, - BlockHeight: blockHeight, - }, nil -} - -// lookupCollectionIDInBlock returns the collection ID based on the transaction ID. The lookup is performed in block -// collections. -func (b *backendTransactions) lookupCollectionIDInBlock( - block *flow.Block, - txID flow.Identifier, -) (flow.Identifier, error) { - for _, guarantee := range block.Payload.Guarantees { - collection, err := b.collections.LightByID(guarantee.ID()) - if err != nil { - return flow.ZeroID, err - } - - for _, collectionTxID := range collection.Transactions { - if collectionTxID == txID { - return collection.ID(), nil - } - } - } - return flow.ZeroID, status.Error(codes.NotFound, "transaction not found in block") -} - -// retrieveBlock function returns a block based on the input argument. The block ID lookup has the highest priority, -// followed by the collection ID lookup. If both are missing, the default lookup by transaction ID is performed. -func (b *backendTransactions) retrieveBlock( - blockID flow.Identifier, - collectionID flow.Identifier, - txID flow.Identifier, -) (*flow.Block, error) { - if blockID != flow.ZeroID { - return b.blocks.ByID(blockID) - } - - if collectionID != flow.ZeroID { - return b.blocks.ByCollectionID(collectionID) - } - - // find the block for the transaction - block, err := b.lookupBlock(txID) - if err != nil && !errors.Is(err, storage.ErrNotFound) { - return nil, err - } - - return block, nil -} - -func (b *backendTransactions) GetTransactionResultsByBlockID( - ctx context.Context, - blockID flow.Identifier, -) ([]*access.TransactionResult, error) { - // TODO: consider using storage.Index.ByBlockID, the index contains collection id and seals ID - block, err := b.blocks.ByID(blockID) - if err != nil { - return nil, rpc.ConvertStorageError(err) - } - - req := &execproto.GetTransactionsByBlockIDRequest{ - BlockId: blockID[:], - } - execNodes, err := executionNodesForBlockID(ctx, blockID, b.executionReceipts, b.state, b.log) - if err != nil { - if IsInsufficientExecutionReceipts(err) { - return nil, status.Errorf(codes.NotFound, err.Error()) - } - return nil, rpc.ConvertError(err, "failed to retrieve result from any execution node", codes.Internal) - } - - resp, err := b.getTransactionResultsByBlockIDFromAnyExeNode(ctx, execNodes, req) - if err != nil { - return nil, rpc.ConvertError(err, "failed to retrieve result from execution node", codes.Internal) - } - - results := make([]*access.TransactionResult, 0, len(resp.TransactionResults)) - i := 0 - errInsufficientResults := status.Errorf( - codes.Internal, - "number of transaction results returned by execution node is less than the number of transactions in the block", - ) - - for _, guarantee := range block.Payload.Guarantees { - collection, err := b.collections.LightByID(guarantee.CollectionID) - if err != nil { - return nil, rpc.ConvertStorageError(err) - } - - for _, txID := range collection.Transactions { - // bounds check. this means the EN returned fewer transaction results than the transactions in the block - if i >= len(resp.TransactionResults) { - return nil, errInsufficientResults - } - txResult := resp.TransactionResults[i] - - // tx body is irrelevant to status if it's in an executed block - txStatus, err := b.deriveTransactionStatus(nil, true, block) - if err != nil { - return nil, rpc.ConvertStorageError(err) - } - - results = append(results, &access.TransactionResult{ - Status: txStatus, - StatusCode: uint(txResult.GetStatusCode()), - Events: convert.MessagesToEvents(txResult.GetEvents()), - ErrorMessage: txResult.GetErrorMessage(), - BlockID: blockID, - TransactionID: txID, - CollectionID: guarantee.CollectionID, - BlockHeight: block.Header.Height, - }) - - i++ - } - } - - // after iterating through all transactions in each collection, i equals the total number of - // user transactions in the block - txCount := i - - sporkRootBlockHeight, err := b.state.Params().SporkRootBlockHeight() - if err != nil { - return nil, status.Errorf(codes.Internal, "failed to retrieve root block: %v", err) - } - - // root block has no system transaction result - if block.Header.Height > sporkRootBlockHeight { - // system chunk transaction - - // resp.TransactionResults includes the system tx result, so there should be exactly one - // more result than txCount - if txCount != len(resp.TransactionResults)-1 { - if txCount >= len(resp.TransactionResults) { - return nil, errInsufficientResults - } - // otherwise there are extra results - // TODO(bft): slashable offense - return nil, status.Errorf(codes.Internal, "number of transaction results returned by execution node is more than the number of transactions in the block") - } - - systemTx, err := blueprints.SystemChunkTransaction(b.chainID.Chain()) - if err != nil { - return nil, status.Errorf(codes.Internal, "could not get system chunk transaction: %v", err) - } - systemTxResult := resp.TransactionResults[len(resp.TransactionResults)-1] - systemTxStatus, err := b.deriveTransactionStatus(systemTx, true, block) - if err != nil { - return nil, rpc.ConvertStorageError(err) - } - - results = append(results, &access.TransactionResult{ - Status: systemTxStatus, - StatusCode: uint(systemTxResult.GetStatusCode()), - Events: convert.MessagesToEvents(systemTxResult.GetEvents()), - ErrorMessage: systemTxResult.GetErrorMessage(), - BlockID: blockID, - TransactionID: systemTx.ID(), - BlockHeight: block.Header.Height, - }) - } - - return results, nil -} - -// GetTransactionResultByIndex returns TransactionsResults for an index in a block that is executed, -// pending or finalized transactions return errors -func (b *backendTransactions) GetTransactionResultByIndex( - ctx context.Context, - blockID flow.Identifier, - index uint32, -) (*access.TransactionResult, error) { - // TODO: https://github.com/onflow/flow-go/issues/2175 so caching doesn't cause a circular dependency - block, err := b.blocks.ByID(blockID) - if err != nil { - return nil, rpc.ConvertStorageError(err) - } - - // create request and forward to EN - req := &execproto.GetTransactionByIndexRequest{ - BlockId: blockID[:], - Index: index, - } - execNodes, err := executionNodesForBlockID(ctx, blockID, b.executionReceipts, b.state, b.log) - if err != nil { - if IsInsufficientExecutionReceipts(err) { - return nil, status.Errorf(codes.NotFound, err.Error()) - } - return nil, rpc.ConvertError(err, "failed to retrieve result from any execution node", codes.Internal) - } - - resp, err := b.getTransactionResultByIndexFromAnyExeNode(ctx, execNodes, req) - if err != nil { - return nil, rpc.ConvertError(err, "failed to retrieve result from execution node", codes.Internal) - } - - // tx body is irrelevant to status if it's in an executed block - txStatus, err := b.deriveTransactionStatus(nil, true, block) - if err != nil { - return nil, rpc.ConvertStorageError(err) - } - - // convert to response, cache and return - return &access.TransactionResult{ - Status: txStatus, - StatusCode: uint(resp.GetStatusCode()), - Events: convert.MessagesToEvents(resp.GetEvents()), - ErrorMessage: resp.GetErrorMessage(), - BlockID: blockID, - BlockHeight: block.Header.Height, - }, nil -} - -// deriveTransactionStatus derives the transaction status based on current protocol state -func (b *backendTransactions) deriveTransactionStatus( - tx *flow.TransactionBody, - executed bool, - block *flow.Block, -) (flow.TransactionStatus, error) { - - if block == nil { - // Not in a block, let's see if it's expired - referenceBlock, err := b.state.AtBlockID(tx.ReferenceBlockID).Head() - if err != nil { - return flow.TransactionStatusUnknown, err - } - refHeight := referenceBlock.Height - // get the latest finalized block from the state - finalized, err := b.state.Final().Head() - if err != nil { - return flow.TransactionStatusUnknown, err - } - finalizedHeight := finalized.Height - - // if we haven't seen the expiry block for this transaction, it's not expired - if !b.isExpired(refHeight, finalizedHeight) { - return flow.TransactionStatusPending, nil - } - - // At this point, we have seen the expiry block for the transaction. - // This means that, if no collections prior to the expiry block contain - // the transaction, it can never be included and is expired. - // - // To ensure this, we need to have received all collections up to the - // expiry block to ensure the transaction did not appear in any. - - // the last full height is the height where we have received all - // collections for all blocks with a lower height - fullHeight, err := b.blocks.GetLastFullBlockHeight() - if err != nil { - return flow.TransactionStatusUnknown, err - } - - // if we have received collections for all blocks up to the expiry block, the transaction is expired - if b.isExpired(refHeight, fullHeight) { - return flow.TransactionStatusExpired, nil - } - - // tx found in transaction storage and collection storage but not in block storage - // However, this will not happen as of now since the ingestion engine doesn't subscribe - // for collections - return flow.TransactionStatusPending, nil - } - - if !executed { - // If we've gotten here, but the block has not yet been executed, report it as only been finalized - return flow.TransactionStatusFinalized, nil - } - - // From this point on, we know for sure this transaction has at least been executed - - // get the latest sealed block from the state - sealed, err := b.state.Sealed().Head() - if err != nil { - return flow.TransactionStatusUnknown, err - } - - if block.Header.Height > sealed.Height { - // The block is not yet sealed, so we'll report it as only executed - return flow.TransactionStatusExecuted, nil - } - - // otherwise, this block has been executed, and sealed, so report as sealed - return flow.TransactionStatusSealed, nil -} - -// isExpired checks whether a transaction is expired given the height of the -// transaction's reference block and the height to compare against. -func (b *backendTransactions) isExpired(refHeight, compareToHeight uint64) bool { - if compareToHeight <= refHeight { - return false - } - return compareToHeight-refHeight > flow.DefaultTransactionExpiry -} - -func (b *backendTransactions) lookupBlock(txID flow.Identifier) (*flow.Block, error) { - - collection, err := b.collections.LightByTransactionID(txID) - if err != nil { - return nil, err - } - - block, err := b.blocks.ByCollectionID(collection.ID()) - if err != nil { - return nil, err - } - - return block, nil -} - -func (b *backendTransactions) lookupTransactionResult( - ctx context.Context, - txID flow.Identifier, - blockID flow.Identifier, -) (bool, []flow.Event, uint32, string, error) { - - events, txStatus, message, err := b.getTransactionResultFromExecutionNode(ctx, blockID, txID[:]) - if err != nil { - // if either the execution node reported no results or the execution node could not be chosen - if status.Code(err) == codes.NotFound { - // No result yet, indicate that it has not been executed - return false, nil, 0, "", nil - } - // Other Error trying to retrieve the result, return with err - return false, nil, 0, "", err - } - - // considered executed as long as some result is returned, even if it's an error message - return true, events, txStatus, message, nil -} - -func (b *backendTransactions) getHistoricalTransaction( - ctx context.Context, - txID flow.Identifier, -) (*flow.TransactionBody, error) { - for _, historicalNode := range b.previousAccessNodes { - txResp, err := historicalNode.GetTransaction(ctx, &accessproto.GetTransactionRequest{Id: txID[:]}) - if err == nil { - tx, err := convert.MessageToTransaction(txResp.Transaction, b.chainID.Chain()) - if err != nil { - return nil, status.Errorf(codes.Internal, "could not convert transaction: %v", err) - } - - // Found on a historical node. Report - return &tx, nil - } - // Otherwise, if not found, just continue - if status.Code(err) == codes.NotFound { - continue - } - // TODO should we do something if the error isn't not found? - } - return nil, status.Errorf(codes.NotFound, "no known transaction with ID %s", txID) -} - -func (b *backendTransactions) getHistoricalTransactionResult( - ctx context.Context, - txID flow.Identifier, -) (*access.TransactionResult, error) { - for _, historicalNode := range b.previousAccessNodes { - result, err := historicalNode.GetTransactionResult(ctx, &accessproto.GetTransactionRequest{Id: txID[:]}) - if err == nil { - // Found on a historical node. Report - if result.GetStatus() == entities.TransactionStatus_UNKNOWN { - // We've moved to returning Status UNKNOWN instead of an error with the NotFound status, - // Therefore we should continue and look at the next access node for answers. - continue - } - - if result.GetStatus() == entities.TransactionStatus_PENDING { - // This is on a historical node. No transactions from it will ever be - // executed, therefore we should consider this expired - result.Status = entities.TransactionStatus_EXPIRED - } - - return access.MessageToTransactionResult(result), nil - } - // Otherwise, if not found, just continue - if status.Code(err) == codes.NotFound { - continue - } - // TODO should we do something if the error isn't not found? - } - return nil, status.Errorf(codes.NotFound, "no known transaction with ID %s", txID) -} - -func (b *backendTransactions) registerTransactionForRetry(tx *flow.TransactionBody) { - referenceBlock, err := b.state.AtBlockID(tx.ReferenceBlockID).Head() - if err != nil { - return - } - - b.retry.RegisterTransaction(referenceBlock.Height, tx) -} - -func (b *backendTransactions) getTransactionResultFromExecutionNode( - ctx context.Context, - blockID flow.Identifier, - transactionID []byte, -) ([]flow.Event, uint32, string, error) { - - // create an execution API request for events at blockID and transactionID - req := &execproto.GetTransactionResultRequest{ - BlockId: blockID[:], - TransactionId: transactionID, - } - - execNodes, err := executionNodesForBlockID(ctx, blockID, b.executionReceipts, b.state, b.log) - if err != nil { - // if no execution receipt were found, return a NotFound GRPC error - if IsInsufficientExecutionReceipts(err) { - return nil, 0, "", status.Errorf(codes.NotFound, err.Error()) - } - return nil, 0, "", err - } - - resp, err := b.getTransactionResultFromAnyExeNode(ctx, execNodes, req) - if err != nil { - return nil, 0, "", err - } - - events := convert.MessagesToEvents(resp.GetEvents()) - - return events, resp.GetStatusCode(), resp.GetErrorMessage(), nil -} - -func (b *backendTransactions) NotifyFinalizedBlockHeight(height uint64) { - b.retry.Retry(height) -} - -func (b *backendTransactions) getTransactionResultFromAnyExeNode( - ctx context.Context, - execNodes flow.IdentityList, - req *execproto.GetTransactionResultRequest, -) (*execproto.GetTransactionResultResponse, error) { - var errs *multierror.Error - logAnyError := func() { - errToReturn := errs.ErrorOrNil() - if errToReturn != nil { - b.log.Info().Err(errToReturn).Msg("failed to get transaction result from execution nodes") - } - } - defer logAnyError() - // try to execute the script on one of the execution nodes - for _, execNode := range execNodes { - resp, err := b.tryGetTransactionResult(ctx, execNode, req) - if err == nil { - b.log.Debug(). - Str("execution_node", execNode.String()). - Hex("block_id", req.GetBlockId()). - Hex("transaction_id", req.GetTransactionId()). - Msg("Successfully got transaction results from any node") - return resp, nil - } - if status.Code(err) == codes.NotFound { - return nil, err - } - errs = multierror.Append(errs, err) - } - - return nil, errs.ErrorOrNil() -} - -func (b *backendTransactions) tryGetTransactionResult( - ctx context.Context, - execNode *flow.Identity, - req *execproto.GetTransactionResultRequest, -) (*execproto.GetTransactionResultResponse, error) { - execRPCClient, closer, err := b.connFactory.GetExecutionAPIClient(execNode.Address) - if err != nil { - return nil, err - } - defer closer.Close() - - resp, err := execRPCClient.GetTransactionResult(ctx, req) - if err != nil { - if status.Code(err) == codes.Unavailable { - b.connFactory.InvalidateExecutionAPIClient(execNode.Address) - } - return nil, err - } - - return resp, nil -} - -func (b *backendTransactions) getTransactionResultsByBlockIDFromAnyExeNode( - ctx context.Context, - execNodes flow.IdentityList, - req *execproto.GetTransactionsByBlockIDRequest, -) (*execproto.GetTransactionResultsResponse, error) { - var errs *multierror.Error - - defer func() { - // log the errors - if err := errs.ErrorOrNil(); err != nil { - b.log.Err(errs).Msg("failed to get transaction results from execution nodes") - } - }() - - // if we were passed 0 execution nodes add a specific error - if len(execNodes) == 0 { - return nil, errors.New("zero execution nodes") - } - - for _, execNode := range execNodes { - resp, err := b.tryGetTransactionResultsByBlockID(ctx, execNode, req) - if err == nil { - b.log.Debug(). - Str("execution_node", execNode.String()). - Hex("block_id", req.GetBlockId()). - Msg("Successfully got transaction results from any node") - return resp, nil - } - if status.Code(err) == codes.NotFound { - return nil, err - } - errs = multierror.Append(errs, err) - } - - return nil, errs.ErrorOrNil() -} - -func (b *backendTransactions) tryGetTransactionResultsByBlockID( - ctx context.Context, - execNode *flow.Identity, - req *execproto.GetTransactionsByBlockIDRequest, -) (*execproto.GetTransactionResultsResponse, error) { - execRPCClient, closer, err := b.connFactory.GetExecutionAPIClient(execNode.Address) - if err != nil { - return nil, err - } - defer closer.Close() - - resp, err := execRPCClient.GetTransactionResultsByBlockID(ctx, req) - if err != nil { - if status.Code(err) == codes.Unavailable { - b.connFactory.InvalidateExecutionAPIClient(execNode.Address) - } - return nil, err - } - - return resp, nil -} - -func (b *backendTransactions) getTransactionResultByIndexFromAnyExeNode( - ctx context.Context, - execNodes flow.IdentityList, - req *execproto.GetTransactionByIndexRequest, -) (*execproto.GetTransactionResultResponse, error) { - var errs *multierror.Error - logAnyError := func() { - errToReturn := errs.ErrorOrNil() - if errToReturn != nil { - b.log.Info().Err(errToReturn).Msg("failed to get transaction result from execution nodes") - } - } - defer logAnyError() - - if len(execNodes) == 0 { - return nil, errors.New("zero execution nodes provided") - } - - // try to execute the script on one of the execution nodes - for _, execNode := range execNodes { - resp, err := b.tryGetTransactionResultByIndex(ctx, execNode, req) - if err == nil { - b.log.Debug(). - Str("execution_node", execNode.String()). - Hex("block_id", req.GetBlockId()). - Uint32("index", req.GetIndex()). - Msg("Successfully got transaction results from any node") - return resp, nil - } - if status.Code(err) == codes.NotFound { - return nil, err - } - errs = multierror.Append(errs, err) - } - - return nil, errs.ErrorOrNil() -} - -func (b *backendTransactions) tryGetTransactionResultByIndex( - ctx context.Context, - execNode *flow.Identity, - req *execproto.GetTransactionByIndexRequest, -) (*execproto.GetTransactionResultResponse, error) { - execRPCClient, closer, err := b.connFactory.GetExecutionAPIClient(execNode.Address) - if err != nil { - return nil, err - } - defer closer.Close() - - resp, err := execRPCClient.GetTransactionResultByIndex(ctx, req) - if err != nil { - if status.Code(err) == codes.Unavailable { - b.connFactory.InvalidateExecutionAPIClient(execNode.Address) - } - return nil, err - } - - return resp, nil -} diff --git a/engine/access/rpc/backend/common/consts.go b/engine/access/rpc/backend/common/consts.go new file mode 100644 index 00000000000..f4a379a8e00 --- /dev/null +++ b/engine/access/rpc/backend/common/consts.go @@ -0,0 +1,5 @@ +package common + +// DefaultLoggedScriptsCacheSize is the default size of the lookup cache used to dedupe logs of scripts sent to ENs +// limiting cache size to 16MB and does not affect script execution, only for keeping logs tidy +const DefaultLoggedScriptsCacheSize = 1_000_000 diff --git a/engine/access/rpc/backend/common/errors.go b/engine/access/rpc/backend/common/errors.go new file mode 100644 index 00000000000..51de9fbfcba --- /dev/null +++ b/engine/access/rpc/backend/common/errors.go @@ -0,0 +1,29 @@ +package common + +import ( + "errors" + "fmt" + + "github.com/onflow/flow-go/model/flow" +) + +// InsufficientExecutionReceipts indicates that no execution receipts were found for a given block ID +type InsufficientExecutionReceipts struct { + blockID flow.Identifier + receiptCount int +} + +func NewInsufficientExecutionReceipts(blockID flow.Identifier, receiptCount int) InsufficientExecutionReceipts { + return InsufficientExecutionReceipts{blockID: blockID, receiptCount: receiptCount} +} + +var _ error = (*InsufficientExecutionReceipts)(nil) + +func (e InsufficientExecutionReceipts) Error() string { + return fmt.Sprintf("insufficient execution receipts found (%d) for block ID: %s", e.receiptCount, e.blockID.String()) +} + +func IsInsufficientExecutionReceipts(err error) bool { + var errInsufficientExecutionReceipts InsufficientExecutionReceipts + return errors.As(err, &errInsufficientExecutionReceipts) +} diff --git a/engine/access/rpc/backend/common/height_error.go b/engine/access/rpc/backend/common/height_error.go new file mode 100644 index 00000000000..d9759cc7f2a --- /dev/null +++ b/engine/access/rpc/backend/common/height_error.go @@ -0,0 +1,45 @@ +package common + +import ( + "errors" + "fmt" + + "github.com/onflow/flow-go/state/protocol" + "github.com/onflow/flow-go/storage" +) + +// ResolveHeightError processes errors returned during height-based queries. +// If the error is due to a block not being found, this function determines whether the queried +// height falls outside the node's accessible range and provides context-sensitive error messages +// based on spork and node root block heights. +// +// Expected errors during normal operation: +// - storage.ErrNotFound - Indicates that the queried block does not exist in the local database. +func ResolveHeightError( + stateParams protocol.Params, + height uint64, + genericErr error, +) error { + if !errors.Is(genericErr, storage.ErrNotFound) { + return genericErr + } + + sporkRootBlockHeight := stateParams.SporkRootBlockHeight() + nodeRootBlockHeader := stateParams.SealedRoot().Height + + if height < sporkRootBlockHeight { + return fmt.Errorf("block height %d is less than the spork root block height %d. Try to use a historic node: %w", + height, + sporkRootBlockHeight, + genericErr, + ) + } else if height < nodeRootBlockHeader { + return fmt.Errorf("block height %d is less than the node's root block height %d. Try to use a different Access node: %w", + height, + nodeRootBlockHeader, + genericErr, + ) + } else { + return genericErr + } +} diff --git a/engine/access/rpc/backend/config.go b/engine/access/rpc/backend/config.go new file mode 100644 index 00000000000..5564f275704 --- /dev/null +++ b/engine/access/rpc/backend/config.go @@ -0,0 +1,20 @@ +package backend + +import ( + "github.com/onflow/flow-go/engine/access/rpc/connection" +) + +// Config defines the configurable options for creating Backend +type Config struct { + AccessConfig connection.Config // access API GRPC client config + ExecutionConfig connection.Config // execution API GRPC client config + CollectionConfig connection.Config // collection API GRPC client config + ConnectionPoolSize uint // size of the cache for storing collection and execution connections + MaxHeightRange uint // max size of height range requests + PreferredExecutionNodeIDs []string // preferred list of upstream execution node IDs + FixedExecutionNodeIDs []string // fixed list of execution node IDs to choose from if no node ID can be chosen from the PreferredExecutionNodeIDs + CircuitBreakerConfig connection.CircuitBreakerConfig // the configuration for circuit breaker + ScriptExecutionMode string // the mode in which scripts are executed + EventQueryMode string // the mode in which events are queried + TxResultQueryMode string // the mode in which tx results are queried +} diff --git a/engine/access/rpc/backend/connection_factory.go b/engine/access/rpc/backend/connection_factory.go deleted file mode 100644 index 63ead3d3e32..00000000000 --- a/engine/access/rpc/backend/connection_factory.go +++ /dev/null @@ -1,276 +0,0 @@ -package backend - -import ( - "context" - "fmt" - "io" - "net" - "sync" - "time" - - lru "github.com/hashicorp/golang-lru" - "github.com/onflow/flow/protobuf/go/flow/access" - "github.com/onflow/flow/protobuf/go/flow/execution" - "github.com/rs/zerolog" - "google.golang.org/grpc" - "google.golang.org/grpc/connectivity" - "google.golang.org/grpc/credentials/insecure" - "google.golang.org/grpc/keepalive" - - "github.com/onflow/flow-go/module" -) - -// DefaultClientTimeout is used when making a GRPC request to a collection node or an execution node -const DefaultClientTimeout = 3 * time.Second - -// ConnectionFactory is used to create an access api client -type ConnectionFactory interface { - GetAccessAPIClient(address string) (access.AccessAPIClient, io.Closer, error) - InvalidateAccessAPIClient(address string) - GetExecutionAPIClient(address string) (execution.ExecutionAPIClient, io.Closer, error) - InvalidateExecutionAPIClient(address string) -} - -type ProxyConnectionFactory struct { - ConnectionFactory - targetAddress string -} - -type noopCloser struct{} - -func (c *noopCloser) Close() error { - return nil -} - -func (p *ProxyConnectionFactory) GetAccessAPIClient(address string) (access.AccessAPIClient, io.Closer, error) { - return p.ConnectionFactory.GetAccessAPIClient(p.targetAddress) -} - -func (p *ProxyConnectionFactory) GetExecutionAPIClient(address string) (execution.ExecutionAPIClient, io.Closer, error) { - return p.ConnectionFactory.GetExecutionAPIClient(p.targetAddress) -} - -type ConnectionFactoryImpl struct { - CollectionGRPCPort uint - ExecutionGRPCPort uint - CollectionNodeGRPCTimeout time.Duration - ExecutionNodeGRPCTimeout time.Duration - ConnectionsCache *lru.Cache - CacheSize uint - MaxMsgSize uint - AccessMetrics module.AccessMetrics - Log zerolog.Logger - mutex sync.Mutex -} - -type CachedClient struct { - ClientConn *grpc.ClientConn - Address string - mutex sync.Mutex - timeout time.Duration -} - -// createConnection creates new gRPC connections to remote node -func (cf *ConnectionFactoryImpl) createConnection(address string, timeout time.Duration) (*grpc.ClientConn, error) { - - if timeout == 0 { - timeout = DefaultClientTimeout - } - - keepaliveParams := keepalive.ClientParameters{ - // how long the client will wait before sending a keepalive to the server if there is no activity - Time: 10 * time.Second, - // how long the client will wait for a response from the keepalive before closing - Timeout: timeout, - } - - // ClientConn's default KeepAlive on connections is indefinite, assuming the timeout isn't reached - // The connections should be safe to be persisted and reused - // https://pkg.go.dev/google.golang.org/grpc#WithKeepaliveParams - // https://grpc.io/blog/grpc-on-http2/#keeping-connections-alive - conn, err := grpc.Dial( - address, - grpc.WithDefaultCallOptions(grpc.MaxCallRecvMsgSize(int(cf.MaxMsgSize))), - grpc.WithTransportCredentials(insecure.NewCredentials()), - grpc.WithKeepaliveParams(keepaliveParams), - WithClientUnaryInterceptor(timeout)) - if err != nil { - return nil, fmt.Errorf("failed to connect to address %s: %w", address, err) - } - return conn, nil -} - -func (cf *ConnectionFactoryImpl) retrieveConnection(grpcAddress string, timeout time.Duration) (*grpc.ClientConn, error) { - var conn *grpc.ClientConn - var store *CachedClient - cacheHit := false - cf.mutex.Lock() - if res, ok := cf.ConnectionsCache.Get(grpcAddress); ok { - cacheHit = true - store = res.(*CachedClient) - conn = store.ClientConn - } else { - store = &CachedClient{ - ClientConn: nil, - Address: grpcAddress, - timeout: timeout, - } - cf.Log.Debug().Str("cached_client_added", grpcAddress).Msg("adding new cached client to pool") - cf.ConnectionsCache.Add(grpcAddress, store) - if cf.AccessMetrics != nil { - cf.AccessMetrics.ConnectionAddedToPool() - } - } - cf.mutex.Unlock() - store.mutex.Lock() - defer store.mutex.Unlock() - - if conn == nil || conn.GetState() == connectivity.Shutdown { - var err error - conn, err = cf.createConnection(grpcAddress, timeout) - if err != nil { - return nil, err - } - store.ClientConn = conn - if cf.AccessMetrics != nil { - if cacheHit { - cf.AccessMetrics.ConnectionFromPoolUpdated() - } - cf.AccessMetrics.NewConnectionEstablished() - cf.AccessMetrics.TotalConnectionsInPool(uint(cf.ConnectionsCache.Len()), cf.CacheSize) - } - } else if cf.AccessMetrics != nil { - cf.AccessMetrics.ConnectionFromPoolReused() - } - return conn, nil -} - -func (cf *ConnectionFactoryImpl) GetAccessAPIClient(address string) (access.AccessAPIClient, io.Closer, error) { - - grpcAddress, err := getGRPCAddress(address, cf.CollectionGRPCPort) - if err != nil { - return nil, nil, err - } - - var conn *grpc.ClientConn - if cf.ConnectionsCache != nil { - conn, err = cf.retrieveConnection(grpcAddress, cf.CollectionNodeGRPCTimeout) - if err != nil { - return nil, nil, err - } - return access.NewAccessAPIClient(conn), &noopCloser{}, err - } - - conn, err = cf.createConnection(grpcAddress, cf.CollectionNodeGRPCTimeout) - if err != nil { - return nil, nil, err - } - - accessAPIClient := access.NewAccessAPIClient(conn) - closer := io.Closer(conn) - return accessAPIClient, closer, nil -} - -func (cf *ConnectionFactoryImpl) InvalidateAccessAPIClient(address string) { - if cf.ConnectionsCache != nil { - cf.Log.Debug().Str("cached_access_client_invalidated", address).Msg("invalidating cached access client") - cf.invalidateAPIClient(address, cf.CollectionGRPCPort) - } -} - -func (cf *ConnectionFactoryImpl) GetExecutionAPIClient(address string) (execution.ExecutionAPIClient, io.Closer, error) { - - grpcAddress, err := getGRPCAddress(address, cf.ExecutionGRPCPort) - if err != nil { - return nil, nil, err - } - - var conn *grpc.ClientConn - if cf.ConnectionsCache != nil { - conn, err = cf.retrieveConnection(grpcAddress, cf.ExecutionNodeGRPCTimeout) - if err != nil { - return nil, nil, err - } - return execution.NewExecutionAPIClient(conn), &noopCloser{}, nil - } - - conn, err = cf.createConnection(grpcAddress, cf.ExecutionNodeGRPCTimeout) - if err != nil { - return nil, nil, err - } - - executionAPIClient := execution.NewExecutionAPIClient(conn) - closer := io.Closer(conn) - return executionAPIClient, closer, nil -} - -func (cf *ConnectionFactoryImpl) InvalidateExecutionAPIClient(address string) { - if cf.ConnectionsCache != nil { - cf.Log.Debug().Str("cached_execution_client_invalidated", address).Msg("invalidating cached execution client") - cf.invalidateAPIClient(address, cf.ExecutionGRPCPort) - } -} - -func (cf *ConnectionFactoryImpl) invalidateAPIClient(address string, port uint) { - grpcAddress, _ := getGRPCAddress(address, port) - if res, ok := cf.ConnectionsCache.Get(grpcAddress); ok { - store := res.(*CachedClient) - store.Close() - if cf.AccessMetrics != nil { - cf.AccessMetrics.ConnectionFromPoolInvalidated() - } - } -} - -func (s *CachedClient) Close() { - s.mutex.Lock() - conn := s.ClientConn - s.ClientConn = nil - s.mutex.Unlock() - if conn == nil { - return - } - // allow time for any existing requests to finish before closing the connection - time.Sleep(s.timeout + 1*time.Second) - conn.Close() -} - -// getExecutionNodeAddress translates flow.Identity address to the GRPC address of the node by switching the port to the -// GRPC port from the libp2p port -func getGRPCAddress(address string, grpcPort uint) (string, error) { - // split hostname and port - hostnameOrIP, _, err := net.SplitHostPort(address) - if err != nil { - return "", err - } - // use the hostname from identity list and port number as the one passed in as argument - grpcAddress := fmt.Sprintf("%s:%d", hostnameOrIP, grpcPort) - - return grpcAddress, nil -} - -func WithClientUnaryInterceptor(timeout time.Duration) grpc.DialOption { - - clientTimeoutInterceptor := func( - ctx context.Context, - method string, - req interface{}, - reply interface{}, - cc *grpc.ClientConn, - invoker grpc.UnaryInvoker, - opts ...grpc.CallOption, - ) error { - - // create a context that expires after timeout - ctxWithTimeout, cancel := context.WithTimeout(ctx, timeout) - - defer cancel() - - // call the remote GRPC using the short context - err := invoker(ctxWithTimeout, method, req, reply, cc, opts...) - - return err - } - - return grpc.WithUnaryInterceptor(clientTimeoutInterceptor) -} diff --git a/engine/access/rpc/backend/connection_factory_test.go b/engine/access/rpc/backend/connection_factory_test.go deleted file mode 100644 index fa4801a5897..00000000000 --- a/engine/access/rpc/backend/connection_factory_test.go +++ /dev/null @@ -1,484 +0,0 @@ -package backend - -import ( - "context" - "fmt" - "net" - "strconv" - "strings" - "sync" - "testing" - "time" - - lru "github.com/hashicorp/golang-lru" - "github.com/onflow/flow/protobuf/go/flow/access" - "github.com/onflow/flow/protobuf/go/flow/execution" - "github.com/stretchr/testify/assert" - testifymock "github.com/stretchr/testify/mock" - "google.golang.org/grpc" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" - - "github.com/onflow/flow-go/engine/access/mock" - "github.com/onflow/flow-go/module/metrics" - "github.com/onflow/flow-go/utils/unittest" -) - -func TestProxyAccessAPI(t *testing.T) { - // create a collection node - cn := new(collectionNode) - cn.start(t) - defer cn.stop(t) - - req := &access.PingRequest{} - expected := &access.PingResponse{} - cn.handler.On("Ping", testifymock.Anything, req).Return(expected, nil) - - // create the factory - connectionFactory := new(ConnectionFactoryImpl) - // set the collection grpc port - connectionFactory.CollectionGRPCPort = cn.port - // set metrics reporting - connectionFactory.AccessMetrics = metrics.NewNoopCollector() - - proxyConnectionFactory := ProxyConnectionFactory{ - ConnectionFactory: connectionFactory, - targetAddress: cn.listener.Addr().String(), - } - - // get a collection API client - client, conn, err := proxyConnectionFactory.GetAccessAPIClient("foo") - defer conn.Close() - assert.NoError(t, err) - - ctx := context.Background() - // make the call to the collection node - resp, err := client.Ping(ctx, req) - assert.NoError(t, err) - assert.Equal(t, resp, expected) -} - -func TestProxyExecutionAPI(t *testing.T) { - // create an execution node - en := new(executionNode) - en.start(t) - defer en.stop(t) - - req := &execution.PingRequest{} - expected := &execution.PingResponse{} - en.handler.On("Ping", testifymock.Anything, req).Return(expected, nil) - - // create the factory - connectionFactory := new(ConnectionFactoryImpl) - // set the execution grpc port - connectionFactory.ExecutionGRPCPort = en.port - // set metrics reporting - connectionFactory.AccessMetrics = metrics.NewNoopCollector() - - proxyConnectionFactory := ProxyConnectionFactory{ - ConnectionFactory: connectionFactory, - targetAddress: en.listener.Addr().String(), - } - - // get an execution API client - client, _, err := proxyConnectionFactory.GetExecutionAPIClient("foo") - assert.NoError(t, err) - - ctx := context.Background() - // make the call to the execution node - resp, err := client.Ping(ctx, req) - assert.NoError(t, err) - assert.Equal(t, resp, expected) -} - -func TestProxyAccessAPIConnectionReuse(t *testing.T) { - // create a collection node - cn := new(collectionNode) - cn.start(t) - defer cn.stop(t) - - req := &access.PingRequest{} - expected := &access.PingResponse{} - cn.handler.On("Ping", testifymock.Anything, req).Return(expected, nil) - - // create the factory - connectionFactory := new(ConnectionFactoryImpl) - // set the collection grpc port - connectionFactory.CollectionGRPCPort = cn.port - // set the connection pool cache size - cacheSize := 5 - cache, _ := lru.NewWithEvict(cacheSize, func(_, evictedValue interface{}) { - evictedValue.(*CachedClient).Close() - }) - connectionFactory.ConnectionsCache = cache - connectionFactory.CacheSize = uint(cacheSize) - // set metrics reporting - connectionFactory.AccessMetrics = metrics.NewNoopCollector() - - proxyConnectionFactory := ProxyConnectionFactory{ - ConnectionFactory: connectionFactory, - targetAddress: cn.listener.Addr().String(), - } - - // get a collection API client - _, closer, err := proxyConnectionFactory.GetAccessAPIClient("foo") - assert.Equal(t, connectionFactory.ConnectionsCache.Len(), 1) - assert.NoError(t, err) - assert.Nil(t, closer.Close()) - - var conn *grpc.ClientConn - res, ok := connectionFactory.ConnectionsCache.Get(proxyConnectionFactory.targetAddress) - assert.True(t, ok) - conn = res.(*CachedClient).ClientConn - - // check if api client can be rebuilt with retrieved connection - accessAPIClient := access.NewAccessAPIClient(conn) - ctx := context.Background() - resp, err := accessAPIClient.Ping(ctx, req) - assert.NoError(t, err) - assert.Equal(t, resp, expected) -} - -func TestProxyExecutionAPIConnectionReuse(t *testing.T) { - // create an execution node - en := new(executionNode) - en.start(t) - defer en.stop(t) - - req := &execution.PingRequest{} - expected := &execution.PingResponse{} - en.handler.On("Ping", testifymock.Anything, req).Return(expected, nil) - - // create the factory - connectionFactory := new(ConnectionFactoryImpl) - // set the execution grpc port - connectionFactory.ExecutionGRPCPort = en.port - // set the connection pool cache size - cacheSize := 5 - cache, _ := lru.NewWithEvict(cacheSize, func(_, evictedValue interface{}) { - evictedValue.(*CachedClient).Close() - }) - connectionFactory.ConnectionsCache = cache - connectionFactory.CacheSize = uint(cacheSize) - // set metrics reporting - connectionFactory.AccessMetrics = metrics.NewNoopCollector() - - proxyConnectionFactory := ProxyConnectionFactory{ - ConnectionFactory: connectionFactory, - targetAddress: en.listener.Addr().String(), - } - - // get an execution API client - _, closer, err := proxyConnectionFactory.GetExecutionAPIClient("foo") - assert.Equal(t, connectionFactory.ConnectionsCache.Len(), 1) - assert.NoError(t, err) - assert.Nil(t, closer.Close()) - - var conn *grpc.ClientConn - res, ok := connectionFactory.ConnectionsCache.Get(proxyConnectionFactory.targetAddress) - assert.True(t, ok) - conn = res.(*CachedClient).ClientConn - - // check if api client can be rebuilt with retrieved connection - executionAPIClient := execution.NewExecutionAPIClient(conn) - ctx := context.Background() - resp, err := executionAPIClient.Ping(ctx, req) - assert.NoError(t, err) - assert.Equal(t, resp, expected) -} - -// TestExecutionNodeClientTimeout tests that the execution API client times out after the timeout duration -func TestExecutionNodeClientTimeout(t *testing.T) { - - timeout := 10 * time.Millisecond - - // create an execution node - en := new(executionNode) - en.start(t) - defer en.stop(t) - - // setup the handler mock to not respond within the timeout - req := &execution.PingRequest{} - resp := &execution.PingResponse{} - en.handler.On("Ping", testifymock.Anything, req).After(timeout+time.Second).Return(resp, nil) - - // create the factory - connectionFactory := new(ConnectionFactoryImpl) - // set the execution grpc port - connectionFactory.ExecutionGRPCPort = en.port - // set the execution grpc client timeout - connectionFactory.ExecutionNodeGRPCTimeout = timeout - // set the connection pool cache size - cacheSize := 5 - cache, _ := lru.NewWithEvict(cacheSize, func(_, evictedValue interface{}) { - evictedValue.(*CachedClient).Close() - }) - connectionFactory.ConnectionsCache = cache - connectionFactory.CacheSize = uint(cacheSize) - // set metrics reporting - connectionFactory.AccessMetrics = metrics.NewNoopCollector() - - // create the execution API client - client, _, err := connectionFactory.GetExecutionAPIClient(en.listener.Addr().String()) - assert.NoError(t, err) - - ctx := context.Background() - // make the call to the execution node - _, err = client.Ping(ctx, req) - - // assert that the client timed out - assert.Equal(t, codes.DeadlineExceeded, status.Code(err)) -} - -// TestCollectionNodeClientTimeout tests that the collection API client times out after the timeout duration -func TestCollectionNodeClientTimeout(t *testing.T) { - - timeout := 10 * time.Millisecond - - // create a collection node - cn := new(collectionNode) - cn.start(t) - defer cn.stop(t) - - // setup the handler mock to not respond within the timeout - req := &access.PingRequest{} - resp := &access.PingResponse{} - cn.handler.On("Ping", testifymock.Anything, req).After(timeout+time.Second).Return(resp, nil) - - // create the factory - connectionFactory := new(ConnectionFactoryImpl) - // set the collection grpc port - connectionFactory.CollectionGRPCPort = cn.port - // set the collection grpc client timeout - connectionFactory.CollectionNodeGRPCTimeout = timeout - // set the connection pool cache size - cacheSize := 5 - cache, _ := lru.NewWithEvict(cacheSize, func(_, evictedValue interface{}) { - evictedValue.(*CachedClient).Close() - }) - connectionFactory.ConnectionsCache = cache - connectionFactory.CacheSize = uint(cacheSize) - // set metrics reporting - connectionFactory.AccessMetrics = metrics.NewNoopCollector() - - // create the collection API client - client, _, err := connectionFactory.GetAccessAPIClient(cn.listener.Addr().String()) - assert.NoError(t, err) - - ctx := context.Background() - // make the call to the execution node - _, err = client.Ping(ctx, req) - - // assert that the client timed out - assert.Equal(t, codes.DeadlineExceeded, status.Code(err)) -} - -// TestConnectionPoolFull tests that the LRU cache replaces connections when full -func TestConnectionPoolFull(t *testing.T) { - // create a collection node - cn1, cn2, cn3 := new(collectionNode), new(collectionNode), new(collectionNode) - cn1.start(t) - cn2.start(t) - cn3.start(t) - defer cn1.stop(t) - defer cn2.stop(t) - defer cn3.stop(t) - - req := &access.PingRequest{} - expected := &access.PingResponse{} - cn1.handler.On("Ping", testifymock.Anything, req).Return(expected, nil) - cn2.handler.On("Ping", testifymock.Anything, req).Return(expected, nil) - cn3.handler.On("Ping", testifymock.Anything, req).Return(expected, nil) - - // create the factory - connectionFactory := new(ConnectionFactoryImpl) - // set the collection grpc port - connectionFactory.CollectionGRPCPort = cn1.port - // set the connection pool cache size - cacheSize := 2 - cache, _ := lru.NewWithEvict(cacheSize, func(_, evictedValue interface{}) { - evictedValue.(*CachedClient).Close() - }) - connectionFactory.ConnectionsCache = cache - connectionFactory.CacheSize = uint(cacheSize) - // set metrics reporting - connectionFactory.AccessMetrics = metrics.NewNoopCollector() - - cn1Address := "foo1:123" - cn2Address := "foo2:123" - cn3Address := "foo3:123" - - // get a collection API client - _, _, err := connectionFactory.GetAccessAPIClient(cn1Address) - assert.Equal(t, connectionFactory.ConnectionsCache.Len(), 1) - assert.NoError(t, err) - - _, _, err = connectionFactory.GetAccessAPIClient(cn2Address) - assert.Equal(t, connectionFactory.ConnectionsCache.Len(), 2) - assert.NoError(t, err) - - _, _, err = connectionFactory.GetAccessAPIClient(cn1Address) - assert.Equal(t, connectionFactory.ConnectionsCache.Len(), 2) - assert.NoError(t, err) - - // Expecting to replace cn2 because cn1 was accessed more recently - _, _, err = connectionFactory.GetAccessAPIClient(cn3Address) - assert.Equal(t, connectionFactory.ConnectionsCache.Len(), 2) - assert.NoError(t, err) - - var hostnameOrIP string - hostnameOrIP, _, err = net.SplitHostPort(cn1Address) - assert.NoError(t, err) - grpcAddress1 := fmt.Sprintf("%s:%d", hostnameOrIP, connectionFactory.CollectionGRPCPort) - hostnameOrIP, _, err = net.SplitHostPort(cn2Address) - assert.NoError(t, err) - grpcAddress2 := fmt.Sprintf("%s:%d", hostnameOrIP, connectionFactory.CollectionGRPCPort) - hostnameOrIP, _, err = net.SplitHostPort(cn3Address) - assert.NoError(t, err) - grpcAddress3 := fmt.Sprintf("%s:%d", hostnameOrIP, connectionFactory.CollectionGRPCPort) - - contains1 := connectionFactory.ConnectionsCache.Contains(grpcAddress1) - contains2 := connectionFactory.ConnectionsCache.Contains(grpcAddress2) - contains3 := connectionFactory.ConnectionsCache.Contains(grpcAddress3) - - assert.True(t, contains1) - assert.False(t, contains2) - assert.True(t, contains3) -} - -// TestConnectionPoolStale tests that a new connection will be established if the old one cached is stale -func TestConnectionPoolStale(t *testing.T) { - // create a collection node - cn := new(collectionNode) - cn.start(t) - defer cn.stop(t) - - req := &access.PingRequest{} - expected := &access.PingResponse{} - cn.handler.On("Ping", testifymock.Anything, req).Return(expected, nil) - - // create the factory - connectionFactory := new(ConnectionFactoryImpl) - // set the collection grpc port - connectionFactory.CollectionGRPCPort = cn.port - // set the connection pool cache size - cacheSize := 5 - cache, _ := lru.NewWithEvict(cacheSize, func(_, evictedValue interface{}) { - evictedValue.(*CachedClient).Close() - }) - connectionFactory.ConnectionsCache = cache - connectionFactory.CacheSize = uint(cacheSize) - // set metrics reporting - connectionFactory.AccessMetrics = metrics.NewNoopCollector() - - proxyConnectionFactory := ProxyConnectionFactory{ - ConnectionFactory: connectionFactory, - targetAddress: cn.listener.Addr().String(), - } - - // get a collection API client - client, _, err := proxyConnectionFactory.GetAccessAPIClient("foo") - assert.Equal(t, connectionFactory.ConnectionsCache.Len(), 1) - assert.NoError(t, err) - // close connection to simulate something "going wrong" with our stored connection - res, _ := connectionFactory.ConnectionsCache.Get(proxyConnectionFactory.targetAddress) - - res.(*CachedClient).Close() - - ctx := context.Background() - // make the call to the collection node (should fail, connection closed) - _, err = client.Ping(ctx, req) - assert.Error(t, err) - - // re-access, should replace stale connection in cache with new one - _, _, _ = proxyConnectionFactory.GetAccessAPIClient("foo") - assert.Equal(t, connectionFactory.ConnectionsCache.Len(), 1) - - var conn *grpc.ClientConn - res, ok := connectionFactory.ConnectionsCache.Get(proxyConnectionFactory.targetAddress) - assert.True(t, ok) - conn = res.(*CachedClient).ClientConn - - // check if api client can be rebuilt with retrieved connection - accessAPIClient := access.NewAccessAPIClient(conn) - ctx = context.Background() - resp, err := accessAPIClient.Ping(ctx, req) - assert.NoError(t, err) - assert.Equal(t, resp, expected) -} - -// node mocks a flow node that runs a GRPC server -type node struct { - server *grpc.Server - listener net.Listener - port uint -} - -func (n *node) setupNode(t *testing.T) { - n.server = grpc.NewServer() - listener, err := net.Listen("tcp4", unittest.DefaultAddress) - assert.NoError(t, err) - n.listener = listener - assert.Eventually(t, func() bool { - return !strings.HasSuffix(listener.Addr().String(), ":0") - }, time.Second*4, 10*time.Millisecond) - - _, port, err := net.SplitHostPort(listener.Addr().String()) - assert.NoError(t, err) - portAsUint, err := strconv.ParseUint(port, 10, 32) - assert.NoError(t, err) - n.port = uint(portAsUint) -} - -func (n *node) start(t *testing.T) { - // using a wait group here to ensure the goroutine has started before returning. Otherwise, - // there's a race condition where the server is sometimes stopped before it has started - wg := sync.WaitGroup{} - wg.Add(1) - go func() { - wg.Done() - err := n.server.Serve(n.listener) - assert.NoError(t, err) - }() - unittest.RequireReturnsBefore(t, wg.Wait, 10*time.Millisecond, "could not start goroutine on time") -} - -func (n *node) stop(t *testing.T) { - if n.server != nil { - n.server.Stop() - } -} - -type executionNode struct { - node - handler *mock.ExecutionAPIServer -} - -func (en *executionNode) start(t *testing.T) { - en.setupNode(t) - handler := new(mock.ExecutionAPIServer) - execution.RegisterExecutionAPIServer(en.server, handler) - en.handler = handler - en.node.start(t) -} - -func (en *executionNode) stop(t *testing.T) { - en.node.stop(t) -} - -type collectionNode struct { - node - handler *mock.AccessAPIServer -} - -func (cn *collectionNode) start(t *testing.T) { - cn.setupNode(t) - handler := new(mock.AccessAPIServer) - access.RegisterAccessAPIServer(cn.server, handler) - cn.handler = handler - cn.node.start(t) -} - -func (cn *collectionNode) stop(t *testing.T) { - cn.node.stop(t) -} diff --git a/engine/access/rpc/backend/errors.go b/engine/access/rpc/backend/errors.go deleted file mode 100644 index 4752c6563ce..00000000000 --- a/engine/access/rpc/backend/errors.go +++ /dev/null @@ -1,23 +0,0 @@ -package backend - -import ( - "errors" - "fmt" - - "github.com/onflow/flow-go/model/flow" -) - -// InsufficientExecutionReceipts indicates that no execution receipt were found for a given block ID -type InsufficientExecutionReceipts struct { - blockID flow.Identifier - receiptCount int -} - -func (e InsufficientExecutionReceipts) Error() string { - return fmt.Sprintf("insufficient execution receipts found (%d) for block ID: %s", e.receiptCount, e.blockID.String()) -} - -func IsInsufficientExecutionReceipts(err error) bool { - var errInsufficientExecutionReceipts InsufficientExecutionReceipts - return errors.As(err, &errInsufficientExecutionReceipts) -} diff --git a/engine/access/rpc/backend/events/events.go b/engine/access/rpc/backend/events/events.go new file mode 100644 index 00000000000..2afbced5f8b --- /dev/null +++ b/engine/access/rpc/backend/events/events.go @@ -0,0 +1,198 @@ +package events + +import ( + "context" + "fmt" + "time" + + "github.com/rs/zerolog" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + + "github.com/onflow/flow/protobuf/go/flow/entities" + + "github.com/onflow/flow-go/access" + "github.com/onflow/flow-go/engine/access/index" + "github.com/onflow/flow-go/engine/access/rpc/backend/common" + "github.com/onflow/flow-go/engine/access/rpc/backend/events/provider" + "github.com/onflow/flow-go/engine/access/rpc/backend/node_communicator" + "github.com/onflow/flow-go/engine/access/rpc/backend/query_mode" + "github.com/onflow/flow-go/engine/access/rpc/connection" + "github.com/onflow/flow-go/engine/common/rpc" + "github.com/onflow/flow-go/model/events" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module/irrecoverable" + "github.com/onflow/flow-go/state/protocol" + "github.com/onflow/flow-go/storage" +) + +// DefaultMaxHeightRange is the default maximum size of range requests. +const DefaultMaxHeightRange = 250 + +type Events struct { + headers storage.Headers + state protocol.State + chain flow.Chain + maxHeightRange uint + provider provider.EventProvider +} + +var _ access.EventsAPI = (*Events)(nil) + +func NewEventsBackend( + log zerolog.Logger, + state protocol.State, + chain flow.Chain, + maxHeightRange uint, + headers storage.Headers, + connFactory connection.ConnectionFactory, + nodeCommunicator node_communicator.Communicator, + queryMode query_mode.IndexQueryMode, + eventsIndex *index.EventsIndex, + execNodeIdentitiesProvider *rpc.ExecutionNodeIdentitiesProvider, +) (*Events, error) { + var eventProvider provider.EventProvider + + switch queryMode { + case query_mode.IndexQueryModeLocalOnly: + eventProvider = provider.NewLocalEventProvider(eventsIndex) + + case query_mode.IndexQueryModeExecutionNodesOnly: + eventProvider = provider.NewENEventProvider(log, execNodeIdentitiesProvider, connFactory, nodeCommunicator) + + case query_mode.IndexQueryModeFailover: + local := provider.NewLocalEventProvider(eventsIndex) + execNode := provider.NewENEventProvider(log, execNodeIdentitiesProvider, connFactory, nodeCommunicator) + eventProvider = provider.NewFailoverEventProvider(log, local, execNode) + + default: + return nil, fmt.Errorf("unknown execution mode: %v", queryMode) + } + + return &Events{ + state: state, + chain: chain, + maxHeightRange: maxHeightRange, + headers: headers, + provider: eventProvider, + }, nil +} + +// GetEventsForHeightRange retrieves events for all sealed blocks between the start block height and +// the end block height (inclusive) that have the given type. +func (e *Events) GetEventsForHeightRange( + ctx context.Context, + eventType string, + startHeight, endHeight uint64, + requiredEventEncodingVersion entities.EventEncodingVersion, +) ([]flow.BlockEvents, error) { + if _, err := events.ValidateEvent(flow.EventType(eventType), e.chain); err != nil { + return nil, status.Errorf(codes.InvalidArgument, "invalid event type: %v", err) + } + + if endHeight < startHeight { + return nil, status.Error(codes.InvalidArgument, "start height must not be larger than end height") + } + + rangeSize := endHeight - startHeight + 1 // range is inclusive on both ends + if rangeSize > uint64(e.maxHeightRange) { + return nil, status.Errorf(codes.InvalidArgument, + "requested block range (%d) exceeded maximum (%d)", rangeSize, e.maxHeightRange) + } + + // get the latest sealed block header + sealed, err := e.state.Sealed().Head() + if err != nil { + // sealed block must be in the store, so throw an exception for any error + err := irrecoverable.NewExceptionf("failed to lookup sealed header: %w", err) + irrecoverable.Throw(ctx, err) + return nil, err + } + + // start height should not be beyond the last sealed height + if startHeight > sealed.Height { + return nil, status.Errorf(codes.OutOfRange, + "start height %d is greater than the last sealed block height %d", startHeight, sealed.Height) + } + + // limit max height to last sealed block in the chain + // + // Note: this causes unintuitive behavior for clients making requests through a proxy that + // fronts multiple nodes. With that setup, clients may receive responses for a smaller range + // than requested because the node serving the request has a slightly delayed view of the chain. + // + // An alternative option is to return an error here, but that's likely to cause more pain for + // these clients since the requests would intermittently fail. it's recommended instead to + // check the block height of the last message in the response. this will be the last block + // height searched, and can be used to determine the start height for the next range. + if endHeight > sealed.Height { + endHeight = sealed.Height + } + + // find the block headers for all the blocks between min and max height (inclusive) + blockHeaders := make([]provider.BlockMetadata, 0, endHeight-startHeight+1) + + for i := startHeight; i <= endHeight; i++ { + // this looks inefficient, but is actually what's done under the covers by `headers.ByHeight` + // and avoids calculating header.ID() for each block. + blockID, err := e.headers.BlockIDByHeight(i) + if err != nil { + return nil, rpc.ConvertStorageError(common.ResolveHeightError(e.state.Params(), i, err)) + } + header, err := e.headers.ByBlockID(blockID) + if err != nil { + return nil, rpc.ConvertStorageError(fmt.Errorf("failed to get block header for %d: %w", i, err)) + } + + blockHeaders = append(blockHeaders, provider.BlockMetadata{ + ID: blockID, + Height: header.Height, + Timestamp: time.UnixMilli(int64(header.Timestamp)).UTC(), + }) + } + + resp, err := e.provider.Events(ctx, blockHeaders, flow.EventType(eventType), requiredEventEncodingVersion) + if err != nil { + return nil, err + } + + return resp.Events, nil +} + +// GetEventsForBlockIDs retrieves events for all the specified block IDs that have the given type +func (e *Events) GetEventsForBlockIDs( + ctx context.Context, + eventType string, + blockIDs []flow.Identifier, + requiredEventEncodingVersion entities.EventEncodingVersion, +) ([]flow.BlockEvents, error) { + if _, err := events.ValidateEvent(flow.EventType(eventType), e.chain); err != nil { + return nil, status.Errorf(codes.InvalidArgument, "invalid event type: %v", err) + } + + if uint(len(blockIDs)) > e.maxHeightRange { + return nil, status.Errorf(codes.InvalidArgument, "requested block range (%d) exceeded maximum (%d)", len(blockIDs), e.maxHeightRange) + } + + // find the block headers for all the block IDs + blockHeaders := make([]provider.BlockMetadata, 0, len(blockIDs)) + for _, blockID := range blockIDs { + header, err := e.headers.ByBlockID(blockID) + if err != nil { + return nil, rpc.ConvertStorageError(fmt.Errorf("failed to get block header for %s: %w", blockID, err)) + } + + blockHeaders = append(blockHeaders, provider.BlockMetadata{ + ID: blockID, + Height: header.Height, + Timestamp: time.UnixMilli(int64(header.Timestamp)).UTC(), + }) + } + + resp, err := e.provider.Events(ctx, blockHeaders, flow.EventType(eventType), requiredEventEncodingVersion) + if err != nil { + return nil, err + } + + return resp.Events, nil +} diff --git a/engine/access/rpc/backend/events/events_test.go b/engine/access/rpc/backend/events/events_test.go new file mode 100644 index 00000000000..4884527a506 --- /dev/null +++ b/engine/access/rpc/backend/events/events_test.go @@ -0,0 +1,533 @@ +package events + +import ( + "bytes" + "context" + "fmt" + "sort" + "testing" + + "github.com/rs/zerolog" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + + "github.com/onflow/cadence/encoding/ccf" + jsoncdc "github.com/onflow/cadence/encoding/json" + "github.com/onflow/flow/protobuf/go/flow/entities" + execproto "github.com/onflow/flow/protobuf/go/flow/execution" + + "github.com/onflow/flow-go/engine/access/index" + access "github.com/onflow/flow-go/engine/access/mock" + "github.com/onflow/flow-go/engine/access/rpc/backend/node_communicator" + "github.com/onflow/flow-go/engine/access/rpc/backend/query_mode" + connectionmock "github.com/onflow/flow-go/engine/access/rpc/connection/mock" + commonrpc "github.com/onflow/flow-go/engine/common/rpc" + "github.com/onflow/flow-go/engine/common/rpc/convert" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module/irrecoverable" + syncmock "github.com/onflow/flow-go/module/state_synchronization/mock" + protocol "github.com/onflow/flow-go/state/protocol/mock" + "github.com/onflow/flow-go/storage" + storagemock "github.com/onflow/flow-go/storage/mock" + "github.com/onflow/flow-go/utils/unittest" + "github.com/onflow/flow-go/utils/unittest/mocks" +) + +var targetEvent string + +type testCase struct { + encoding entities.EventEncodingVersion + queryMode query_mode.IndexQueryMode +} + +type EventsSuite struct { + suite.Suite + + log zerolog.Logger + state *protocol.State + snapshot *protocol.Snapshot + params *protocol.Params + rootHeader *flow.Header + + eventsIndex *index.EventsIndex + events *storagemock.Events + headers *storagemock.Headers + receipts *storagemock.ExecutionReceipts + connectionFactory *connectionmock.ConnectionFactory + chainID flow.ChainID + + executionNodes flow.IdentityList + execClient *access.ExecutionAPIClient + + sealedHead *flow.Header + blocks []*flow.Block + blockIDs []flow.Identifier + blockEvents []flow.Event + + testCases []testCase +} + +func TestBackendEventsSuite(t *testing.T) { + suite.Run(t, new(EventsSuite)) +} + +func (s *EventsSuite) SetupTest() { + s.log = unittest.Logger() + s.state = protocol.NewState(s.T()) + s.snapshot = protocol.NewSnapshot(s.T()) + s.rootHeader = unittest.BlockHeaderFixture() + s.params = protocol.NewParams(s.T()) + s.events = storagemock.NewEvents(s.T()) + s.headers = storagemock.NewHeaders(s.T()) + s.receipts = storagemock.NewExecutionReceipts(s.T()) + s.connectionFactory = connectionmock.NewConnectionFactory(s.T()) + s.chainID = flow.Testnet + + s.execClient = access.NewExecutionAPIClient(s.T()) + s.executionNodes = unittest.IdentityListFixture(2, unittest.WithRole(flow.RoleExecution)) + s.eventsIndex = index.NewEventsIndex(index.NewReporter(), s.events) + + blockCount := 5 + s.blocks = make([]*flow.Block, blockCount) + s.blockIDs = make([]flow.Identifier, blockCount) + + for i := 0; i < blockCount; i++ { + var header *flow.Header + if i == 0 { + header = unittest.BlockHeaderFixture() + } else { + header = unittest.BlockHeaderWithParentFixture(s.blocks[i-1].ToHeader()) + } + + payload := unittest.PayloadFixture() + header.PayloadHash = payload.Hash() + block, err := flow.NewBlock( + flow.UntrustedBlock{ + HeaderBody: header.HeaderBody, + Payload: payload, + }, + ) + require.NoError(s.T(), err) + + // the last block is sealed + if i == blockCount-1 { + s.sealedHead = header + } + + s.blocks[i] = block + s.blockIDs[i] = block.ID() + + s.T().Logf("block %d: %s", header.Height, block.ID()) + } + + s.blockEvents = unittest.EventGenerator.GetEventsWithEncoding(10, entities.EventEncodingVersion_CCF_V0) + targetEvent = string(s.blockEvents[0].Type) + + // events returned from the db are sorted by txID, txIndex, then eventIndex. + // reproduce that here to ensure output order works as expected + returnBlockEvents := make([]flow.Event, len(s.blockEvents)) + copy(returnBlockEvents, s.blockEvents) + + sort.Slice(returnBlockEvents, func(i, j int) bool { + return bytes.Compare(returnBlockEvents[i].TransactionID[:], returnBlockEvents[j].TransactionID[:]) < 0 + }) + + s.events.On("ByBlockID", mock.Anything).Return(func(blockID flow.Identifier) ([]flow.Event, error) { + for _, headerID := range s.blockIDs { + if blockID == headerID { + return returnBlockEvents, nil + } + } + return nil, storage.ErrNotFound + }).Maybe() + + s.headers.On("BlockIDByHeight", mock.Anything).Return(func(height uint64) (flow.Identifier, error) { + for _, block := range s.blocks { + if height == block.Height { + return block.ID(), nil + } + } + return flow.ZeroID, storage.ErrNotFound + }).Maybe() + + s.headers.On("ByBlockID", mock.Anything).Return(func(blockID flow.Identifier) (*flow.Header, error) { + for _, block := range s.blocks { + if blockID == block.ID() { + return block.ToHeader(), nil + } + } + return nil, storage.ErrNotFound + }).Maybe() + + s.testCases = make([]testCase, 0) + + for _, encoding := range []entities.EventEncodingVersion{ + entities.EventEncodingVersion_CCF_V0, + entities.EventEncodingVersion_JSON_CDC_V0, + } { + for _, queryMode := range []query_mode.IndexQueryMode{ + query_mode.IndexQueryModeExecutionNodesOnly, + query_mode.IndexQueryModeLocalOnly, + query_mode.IndexQueryModeFailover, + } { + s.testCases = append(s.testCases, testCase{ + encoding: encoding, + queryMode: queryMode, + }) + } + } +} + +// TestGetEvents_HappyPaths tests the happy paths for GetEventsForBlockIDs and GetEventsForHeightRange +// across all queryModes and encodings +func (s *EventsSuite) TestGetEvents_HappyPaths() { + ctx := context.Background() + + startHeight := s.blocks[0].Height + endHeight := s.sealedHead.Height + + reporter := syncmock.NewIndexReporter(s.T()) + reporter.On("LowestIndexedHeight").Return(startHeight, nil) + reporter.On("HighestIndexedHeight").Return(endHeight+10, nil) + err := s.eventsIndex.Initialize(reporter) + s.Require().NoError(err) + + s.state.On("Sealed").Return(s.snapshot) + s.snapshot.On("Head").Return(s.sealedHead, nil) + + s.Run("GetEventsForHeightRange - end height updated", func() { + backend := s.defaultBackend(query_mode.IndexQueryModeFailover, s.eventsIndex) + endHeight := startHeight + 20 // should still return 5 responses + encoding := entities.EventEncodingVersion_CCF_V0 + + response, err := backend.GetEventsForHeightRange(ctx, targetEvent, startHeight, endHeight, encoding) + s.Require().NoError(err) + + s.assertResponse(response, encoding) + }) + + for _, tt := range s.testCases { + s.Run(fmt.Sprintf("all from storage - %s - %s", tt.encoding.String(), tt.queryMode), func() { + switch tt.queryMode { + case query_mode.IndexQueryModeExecutionNodesOnly: + // not applicable + return + case query_mode.IndexQueryModeLocalOnly, query_mode.IndexQueryModeFailover: + // only calls to local storage + } + + backend := s.defaultBackend(tt.queryMode, s.eventsIndex) + + response, err := backend.GetEventsForBlockIDs(ctx, targetEvent, s.blockIDs, tt.encoding) + s.Require().NoError(err) + s.assertResponse(response, tt.encoding) + + response, err = backend.GetEventsForHeightRange(ctx, targetEvent, startHeight, endHeight, tt.encoding) + s.Require().NoError(err) + s.assertResponse(response, tt.encoding) + }) + + s.Run(fmt.Sprintf("all from en - %s - %s", tt.encoding.String(), tt.queryMode), func() { + events := storagemock.NewEvents(s.T()) + eventsIndex := index.NewEventsIndex(index.NewReporter(), events) + + switch tt.queryMode { + case query_mode.IndexQueryModeLocalOnly: + // not applicable + return + case query_mode.IndexQueryModeExecutionNodesOnly: + // only calls to EN, no calls to storage + case query_mode.IndexQueryModeFailover: + // all calls to storage fail + // simulated by not initializing the eventIndex so all calls return ErrIndexNotInitialized + } + + backend := s.defaultBackend(tt.queryMode, eventsIndex) + s.setupENSuccessResponse(targetEvent, s.blocks) + + response, err := backend.GetEventsForBlockIDs(ctx, targetEvent, s.blockIDs, tt.encoding) + s.Require().NoError(err) + s.assertResponse(response, tt.encoding) + + response, err = backend.GetEventsForHeightRange(ctx, targetEvent, startHeight, endHeight, tt.encoding) + s.Require().NoError(err) + s.assertResponse(response, tt.encoding) + }) + + s.Run(fmt.Sprintf("mixed storage & en - %s - %s", tt.encoding.String(), tt.queryMode), func() { + events := storagemock.NewEvents(s.T()) + eventsIndex := index.NewEventsIndex(index.NewReporter(), events) + + switch tt.queryMode { + case query_mode.IndexQueryModeLocalOnly, query_mode.IndexQueryModeExecutionNodesOnly: + // not applicable + return + case query_mode.IndexQueryModeFailover: + // only failing blocks queried from EN + s.setupENSuccessResponse(targetEvent, []*flow.Block{s.blocks[0], s.blocks[4]}) + } + + // the first and last blocks are not available from storage, and should be fetched from the EN + reporter := syncmock.NewIndexReporter(s.T()) + reporter.On("LowestIndexedHeight").Return(s.blocks[1].Height, nil) + reporter.On("HighestIndexedHeight").Return(s.blocks[3].Height, nil) + + events.On("ByBlockID", s.blockIDs[1]).Return(s.blockEvents, nil) + events.On("ByBlockID", s.blockIDs[2]).Return(s.blockEvents, nil) + events.On("ByBlockID", s.blockIDs[3]).Return(s.blockEvents, nil) + + err := eventsIndex.Initialize(reporter) + s.Require().NoError(err) + + backend := s.defaultBackend(tt.queryMode, eventsIndex) + response, err := backend.GetEventsForBlockIDs(ctx, targetEvent, s.blockIDs, tt.encoding) + s.Require().NoError(err) + s.assertResponse(response, tt.encoding) + + response, err = backend.GetEventsForHeightRange(ctx, targetEvent, startHeight, endHeight, tt.encoding) + s.Require().NoError(err) + s.assertResponse(response, tt.encoding) + }) + } +} + +func (s *EventsSuite) TestGetEventsForHeightRange_HandlesErrors() { + ctx := context.Background() + + startHeight := s.blocks[0].Height + endHeight := s.sealedHead.Height + encoding := entities.EventEncodingVersion_CCF_V0 + + s.Run("returns error for endHeight < startHeight", func() { + backend := s.defaultBackend(query_mode.IndexQueryModeExecutionNodesOnly, s.eventsIndex) + endHeight := startHeight - 1 + + response, err := backend.GetEventsForHeightRange(ctx, targetEvent, startHeight, endHeight, encoding) + s.Assert().Equal(codes.InvalidArgument, status.Code(err)) + s.Assert().Nil(response) + }) + + s.Run("returns error for range larger than max", func() { + backend := s.defaultBackend(query_mode.IndexQueryModeExecutionNodesOnly, s.eventsIndex) + endHeight := startHeight + DefaultMaxHeightRange + + response, err := backend.GetEventsForHeightRange(ctx, targetEvent, startHeight, endHeight, encoding) + s.Assert().Equal(codes.InvalidArgument, status.Code(err)) + s.Assert().Nil(response) + }) + + s.Run("throws irrecoverable if sealed header not available", func() { + s.state.On("Sealed").Return(s.snapshot) + s.snapshot.On("Head").Return(nil, storage.ErrNotFound).Once() + + signCtxErr := irrecoverable.NewExceptionf("failed to lookup sealed header: %w", storage.ErrNotFound) + signalerCtx := irrecoverable.WithSignalerContext(context.Background(), + irrecoverable.NewMockSignalerContextExpectError(s.T(), ctx, signCtxErr)) + + backend := s.defaultBackend(query_mode.IndexQueryModeExecutionNodesOnly, s.eventsIndex) + response, err := backend.GetEventsForHeightRange(signalerCtx, targetEvent, startHeight, endHeight, encoding) + // these will never be returned in production + s.Assert().Equal(codes.Unknown, status.Code(err)) + s.Assert().Nil(response) + }) + + s.state.On("Sealed").Return(s.snapshot) + s.snapshot.On("Head").Return(s.sealedHead, nil) + + s.Run("returns error for startHeight > sealed height", func() { + startHeight := s.sealedHead.Height + 1 + endHeight := startHeight + 1 + + backend := s.defaultBackend(query_mode.IndexQueryModeExecutionNodesOnly, s.eventsIndex) + response, err := backend.GetEventsForHeightRange(ctx, targetEvent, startHeight, endHeight, encoding) + s.Assert().Equal(codes.OutOfRange, status.Code(err)) + s.Assert().Nil(response) + }) + + s.state.On("Params").Return(s.params) + + s.Run("returns error for startHeight < spork root height", func() { + sporkRootHeight := s.blocks[0].Height - 10 + startHeight := sporkRootHeight - 1 + + s.params.On("SporkRootBlockHeight").Return(sporkRootHeight).Once() + s.params.On("SealedRoot").Return(s.rootHeader, nil).Once() + + backend := s.defaultBackend(query_mode.IndexQueryModeExecutionNodesOnly, s.eventsIndex) + response, err := backend.GetEventsForHeightRange(ctx, targetEvent, startHeight, endHeight, encoding) + s.Assert().Equal(codes.NotFound, status.Code(err)) + s.Assert().ErrorContains(err, "Try to use a historic node") + s.Assert().Nil(response) + }) + + s.Run("returns error for startHeight < node root height", func() { + backend := s.defaultBackend(query_mode.IndexQueryModeExecutionNodesOnly, s.eventsIndex) + + sporkRootHeight := s.blocks[0].Height - 10 + nodeRootHeader := unittest.BlockHeaderWithHeight(s.blocks[0].Height) + startHeight := nodeRootHeader.Height - 5 + + s.params.On("SporkRootBlockHeight").Return(sporkRootHeight).Once() + s.params.On("SealedRoot").Return(nodeRootHeader, nil).Once() + + response, err := backend.GetEventsForHeightRange(ctx, targetEvent, startHeight, endHeight, encoding) + s.Assert().Equal(codes.NotFound, status.Code(err)) + s.Assert().ErrorContains(err, "Try to use a different Access node") + s.Assert().Nil(response) + }) +} + +func (s *EventsSuite) TestGetEventsForBlockIDs_HandlesErrors() { + ctx := context.Background() + + encoding := entities.EventEncodingVersion_CCF_V0 + + s.Run("returns error when too many blockIDs requested", func() { + backend := s.defaultBackend(query_mode.IndexQueryModeExecutionNodesOnly, s.eventsIndex) + backend.maxHeightRange = 3 + + response, err := backend.GetEventsForBlockIDs(ctx, targetEvent, s.blockIDs, encoding) + s.Assert().Equal(codes.InvalidArgument, status.Code(err)) + s.Assert().Nil(response) + }) + + s.Run("returns error for missing header", func() { + headers := storagemock.NewHeaders(s.T()) + backend := s.defaultBackend(query_mode.IndexQueryModeExecutionNodesOnly, s.eventsIndex) + backend.headers = headers + + for i, blockID := range s.blockIDs { + // return error on the last header + if i == len(s.blocks)-1 { + headers.On("ByBlockID", blockID).Return(nil, storage.ErrNotFound) + continue + } + + headers.On("ByBlockID", blockID).Return(s.blocks[i].ToHeader(), nil) + } + + response, err := backend.GetEventsForBlockIDs(ctx, targetEvent, s.blockIDs, encoding) + s.Assert().Equal(codes.NotFound, status.Code(err)) + s.Assert().Nil(response) + }) +} + +func (s *EventsSuite) assertResponse(response []flow.BlockEvents, encoding entities.EventEncodingVersion) { + s.Assert().Len(response, len(s.blocks)) + for i, block := range s.blocks { + s.Assert().Equal(block.Height, response[i].BlockHeight) + s.Assert().Equal(block.ID(), response[i].BlockID) + s.Assert().Len(response[i].Events, 1) + + s.assertEncoding(&response[i].Events[0], encoding) + } +} + +func (s *EventsSuite) assertEncoding(event *flow.Event, encoding entities.EventEncodingVersion) { + var err error + switch encoding { + case entities.EventEncodingVersion_CCF_V0: + _, err = ccf.Decode(nil, event.Payload) + case entities.EventEncodingVersion_JSON_CDC_V0: + _, err = jsoncdc.Decode(nil, event.Payload) + default: + s.T().Errorf("unknown encoding: %s", encoding.String()) + } + s.Require().NoError(err) +} + +func (s *EventsSuite) defaultBackend(mode query_mode.IndexQueryMode, eventsIndex *index.EventsIndex) *Events { + e, err := NewEventsBackend( + s.log, + s.state, + s.chainID.Chain(), + DefaultMaxHeightRange, + s.headers, + s.connectionFactory, + node_communicator.NewNodeCommunicator(false), + mode, + eventsIndex, + commonrpc.NewExecutionNodeIdentitiesProvider( + s.log, + s.state, + s.receipts, + flow.IdentifierList{}, + flow.IdentifierList{}, + )) + + require.NoError(s.T(), err) + + return e +} + +// setupExecutionNodes sets up the mocks required to test against an EN backend +func (s *EventsSuite) setupExecutionNodes(block *flow.Block) { + s.params.On("FinalizedRoot").Return(s.rootHeader, nil) + s.state.On("Params").Return(s.params) + s.state.On("Final").Return(s.snapshot) + s.snapshot.On("Identities", mock.Anything).Return(s.executionNodes, nil) + + // this line causes a S1021 lint error because receipts is explicitly declared. this is required + // to ensure the mock library handles the response type correctly + var receipts flow.ExecutionReceiptList //nolint:gosimple + receipts = unittest.ReceiptsForBlockFixture(block, s.executionNodes.NodeIDs()) + s.receipts.On("ByBlockID", block.ID()).Return(receipts, nil) + + s.connectionFactory.On("GetExecutionAPIClient", mock.Anything). + Return(s.execClient, &mocks.MockCloser{}, nil) +} + +// setupENSuccessResponse configures the execution node client to return a successful response +func (s *EventsSuite) setupENSuccessResponse(eventType string, blocks []*flow.Block) { + s.setupExecutionNodes(blocks[len(blocks)-1]) + + ids := make([][]byte, len(blocks)) + results := make([]*execproto.GetEventsForBlockIDsResponse_Result, len(blocks)) + + events := make([]*entities.Event, 0) + for _, event := range s.blockEvents { + if string(event.Type) == eventType { + events = append(events, convert.EventToMessage(event)) + } + } + + for i, block := range blocks { + id := block.ID() + ids[i] = id[:] + results[i] = &execproto.GetEventsForBlockIDsResponse_Result{ + BlockId: id[:], + BlockHeight: block.Height, + Events: events, + } + } + expectedExecRequest := &execproto.GetEventsForBlockIDsRequest{ + Type: eventType, + BlockIds: ids, + } + expectedResponse := &execproto.GetEventsForBlockIDsResponse{ + Results: results, + EventEncodingVersion: entities.EventEncodingVersion_CCF_V0, + } + + s.execClient.On("GetEventsForBlockIDs", mock.Anything, expectedExecRequest). + Return(expectedResponse, nil) +} + +// setupENFailingResponse configures the execution node client to return an error +func (s *EventsSuite) setupENFailingResponse(eventType string, headers []*flow.Header, err error) { + ids := make([][]byte, len(headers)) + for i, header := range headers { + id := header.ID() + ids[i] = id[:] + } + failingRequest := &execproto.GetEventsForBlockIDsRequest{ + Type: eventType, + BlockIds: ids, + } + + s.execClient.On("GetEventsForBlockIDs", mock.Anything, failingRequest). + Return(nil, err) +} diff --git a/engine/access/rpc/backend/events/provider/execution_node.go b/engine/access/rpc/backend/events/provider/execution_node.go new file mode 100644 index 00000000000..cb224c8a2db --- /dev/null +++ b/engine/access/rpc/backend/events/provider/execution_node.go @@ -0,0 +1,207 @@ +package provider + +import ( + "context" + "encoding/hex" + "errors" + "fmt" + "time" + + "github.com/rs/zerolog" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + + "github.com/onflow/flow/protobuf/go/flow/entities" + execproto "github.com/onflow/flow/protobuf/go/flow/execution" + + "github.com/onflow/flow-go/engine/access/rpc/backend/node_communicator" + "github.com/onflow/flow-go/engine/access/rpc/connection" + "github.com/onflow/flow-go/engine/common/rpc" + "github.com/onflow/flow-go/engine/common/rpc/convert" + "github.com/onflow/flow-go/model/flow" +) + +type ENEventProvider struct { + log zerolog.Logger + nodeProvider *rpc.ExecutionNodeIdentitiesProvider + connFactory connection.ConnectionFactory + nodeCommunicator node_communicator.Communicator +} + +var _ EventProvider = (*ENEventProvider)(nil) + +func NewENEventProvider( + log zerolog.Logger, + nodeProvider *rpc.ExecutionNodeIdentitiesProvider, + connFactory connection.ConnectionFactory, + nodeCommunicator node_communicator.Communicator, +) *ENEventProvider { + return &ENEventProvider{ + log: log.With().Str("event_provider", "execution_node").Logger(), + nodeProvider: nodeProvider, + connFactory: connFactory, + nodeCommunicator: nodeCommunicator, + } +} + +func (e *ENEventProvider) Events( + ctx context.Context, + blocks []BlockMetadata, + eventType flow.EventType, + encoding entities.EventEncodingVersion, +) (Response, error) { + if len(blocks) == 0 { + return Response{}, nil + } + + // create an execution API request for events at block ID + blockIDs := make([]flow.Identifier, len(blocks)) + for i := range blocks { + blockIDs[i] = blocks[i].ID + } + + req := &execproto.GetEventsForBlockIDsRequest{ + Type: string(eventType), + BlockIds: convert.IdentifiersToMessages(blockIDs), + } + + // choose the last block ID to find the list of execution nodes + lastBlockID := blockIDs[len(blockIDs)-1] + + execNodes, err := e.nodeProvider.ExecutionNodesForBlockID( + ctx, + lastBlockID, + ) + if err != nil { + return Response{}, rpc.ConvertError(err, "failed to get execution nodes for events query", codes.Internal) + } + + var resp *execproto.GetEventsForBlockIDsResponse + var successfulNode *flow.IdentitySkeleton + resp, successfulNode, err = e.getEventsFromAnyExeNode(ctx, execNodes, req) + if err != nil { + return Response{}, rpc.ConvertError(err, "failed to get execution nodes for events query", codes.Internal) + } + e.log.Trace(). + Str("execution_id", successfulNode.String()). + Str("last_block_id", lastBlockID.String()). + Msg("successfully got events") + + // convert execution node api result to access node api result + results, err := verifyAndConvertToAccessEvents( + resp.GetResults(), + blocks, + resp.GetEventEncodingVersion(), + encoding, + ) + if err != nil { + return Response{}, status.Errorf(codes.Internal, "failed to verify retrieved events from execution node: %v", err) + } + + return Response{ + Events: results, + }, nil +} + +// getEventsFromAnyExeNode retrieves the given events from any EN in `execNodes`. +// We attempt querying each EN in sequence. If any EN returns a valid response, then errors from +// other ENs are logged and swallowed. If all ENs fail to return a valid response, then an +// error aggregating all failures is returned. +func (e *ENEventProvider) getEventsFromAnyExeNode( + ctx context.Context, + execNodes flow.IdentitySkeletonList, + req *execproto.GetEventsForBlockIDsRequest, +) (*execproto.GetEventsForBlockIDsResponse, *flow.IdentitySkeleton, error) { + var resp *execproto.GetEventsForBlockIDsResponse + var execNode *flow.IdentitySkeleton + errToReturn := e.nodeCommunicator.CallAvailableNode( + execNodes, + func(node *flow.IdentitySkeleton) error { + var err error + start := time.Now() + resp, err = e.tryGetEvents(ctx, node, req) + duration := time.Since(start) + + logger := e.log.With(). + Str("execution_node", node.String()). + Str("event", req.GetType()). + Int("blocks", len(req.BlockIds)). + Int64("rtt_ms", duration.Milliseconds()). + Logger() + + if err == nil { + // return if any execution node replied successfully + logger.Debug().Msg("Successfully got events") + execNode = node + return nil + } + + logger.Err(err).Msg("failed to execute Events") + return err + }, + nil, + ) + + return resp, execNode, errToReturn +} + +func (e *ENEventProvider) tryGetEvents( + ctx context.Context, + execNode *flow.IdentitySkeleton, + req *execproto.GetEventsForBlockIDsRequest, +) (*execproto.GetEventsForBlockIDsResponse, error) { + execRPCClient, closer, err := e.connFactory.GetExecutionAPIClient(execNode.Address) + if err != nil { + return nil, err + } + defer closer.Close() + + return execRPCClient.GetEventsForBlockIDs(ctx, req) +} + +// verifyAndConvertToAccessEvents converts execution node api result to access node api result, +// and verifies that the results contains results from each block that was requested +func verifyAndConvertToAccessEvents( + execEvents []*execproto.GetEventsForBlockIDsResponse_Result, + requestedBlockInfos []BlockMetadata, + from entities.EventEncodingVersion, + to entities.EventEncodingVersion, +) ([]flow.BlockEvents, error) { + if len(execEvents) != len(requestedBlockInfos) { + return nil, errors.New("number of results does not match number of blocks requested") + } + + requestedBlockInfoSet := map[string]BlockMetadata{} + for _, header := range requestedBlockInfos { + requestedBlockInfoSet[header.ID.String()] = header + } + + results := make([]flow.BlockEvents, len(execEvents)) + + for i, result := range execEvents { + blockInfo, expected := requestedBlockInfoSet[hex.EncodeToString(result.GetBlockId())] + if !expected { + return nil, fmt.Errorf("unexpected blockID from exe node %x", result.GetBlockId()) + } + if result.GetBlockHeight() != blockInfo.Height { + return nil, fmt.Errorf("unexpected block height %d for block %x from exe node", + result.GetBlockHeight(), + result.GetBlockId()) + } + + events, err := convert.MessagesToEventsWithEncodingConversion(result.GetEvents(), from, to) + if err != nil { + return nil, fmt.Errorf("failed to unmarshal events in event %d with encoding version %s: %w", + i, to.String(), err) + } + + results[i] = flow.BlockEvents{ + BlockID: blockInfo.ID, + BlockHeight: blockInfo.Height, + BlockTimestamp: blockInfo.Timestamp, + Events: events, + } + } + + return results, nil +} diff --git a/engine/access/rpc/backend/events/provider/failover.go b/engine/access/rpc/backend/events/provider/failover.go new file mode 100644 index 00000000000..82792a19395 --- /dev/null +++ b/engine/access/rpc/backend/events/provider/failover.go @@ -0,0 +1,75 @@ +package provider + +import ( + "context" + "sort" + + "github.com/rs/zerolog" + + "github.com/onflow/flow/protobuf/go/flow/entities" + + "github.com/onflow/flow-go/model/flow" +) + +type FailoverEventProvider struct { + log zerolog.Logger + localProvider EventProvider + execNodeProvider EventProvider +} + +var _ EventProvider = (*FailoverEventProvider)(nil) + +func NewFailoverEventProvider( + log zerolog.Logger, + localProvider EventProvider, + execNodeProvider EventProvider, +) *FailoverEventProvider { + return &FailoverEventProvider{ + log: log.With().Str("event_provider", "failover").Logger(), + localProvider: localProvider, + execNodeProvider: execNodeProvider, + } +} + +func (f *FailoverEventProvider) Events( + ctx context.Context, + blocks []BlockMetadata, + eventType flow.EventType, + encoding entities.EventEncodingVersion, +) (Response, error) { + localEvents, localErr := f.localProvider.Events(ctx, blocks, eventType, encoding) + if localErr != nil { + f.log.Debug().Err(localErr). + Msg("failed to get events from local storage. will try to get them from execution node") + + localEvents.MissingBlocks = blocks + } + + if len(localEvents.MissingBlocks) == 0 { + return localEvents, nil + } + + f.log.Debug(). + Int("missing_blocks", len(localEvents.MissingBlocks)). + Msg("querying execution nodes for events from missing blocks") + + execNodeEvents, execNodeErr := f.execNodeProvider.Events(ctx, localEvents.MissingBlocks, eventType, encoding) + if execNodeErr != nil { + return Response{}, execNodeErr + } + + // sort ascending by block height + // this is needed because some blocks may be retrieved from storage and others from execution nodes. + // most likely, the earlier blocks will all be found in local storage, but that's not guaranteed, + // especially for nodes started after a spork, or once pruning is enabled. + // Note: this may not match the order of the original request for clients using GetEventsForBlockIDs + // that provide out of order block IDs + combinedEvents := append(localEvents.Events, execNodeEvents.Events...) + sort.Slice(combinedEvents, func(i, j int) bool { + return combinedEvents[i].BlockHeight < combinedEvents[j].BlockHeight + }) + + return Response{ + Events: combinedEvents, + }, nil +} diff --git a/engine/access/rpc/backend/events/provider/local.go b/engine/access/rpc/backend/events/provider/local.go new file mode 100644 index 00000000000..50b8483e90a --- /dev/null +++ b/engine/access/rpc/backend/events/provider/local.go @@ -0,0 +1,101 @@ +package provider + +import ( + "context" + "errors" + "fmt" + + "google.golang.org/grpc/codes" + + "github.com/onflow/flow/protobuf/go/flow/entities" + + "github.com/onflow/flow-go/engine/access/index" + "github.com/onflow/flow-go/engine/common/rpc" + "github.com/onflow/flow-go/engine/common/rpc/convert" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module/state_synchronization/indexer" + "github.com/onflow/flow-go/storage" +) + +type LocalEventProvider struct { + index *index.EventsIndex +} + +var _ EventProvider = (*LocalEventProvider)(nil) + +func NewLocalEventProvider(index *index.EventsIndex) *LocalEventProvider { + return &LocalEventProvider{ + index: index, + } +} + +func (l *LocalEventProvider) Events( + ctx context.Context, + blocks []BlockMetadata, + eventType flow.EventType, + encoding entities.EventEncodingVersion, +) (Response, error) { + missing := make([]BlockMetadata, 0) + resp := make([]flow.BlockEvents, 0) + + for _, blockInfo := range blocks { + if ctx.Err() != nil { + return Response{}, rpc.ConvertError(ctx.Err(), "failed to get events from storage", codes.Canceled) + } + + events, err := l.index.ByBlockID(blockInfo.ID, blockInfo.Height) + if err != nil { + if errors.Is(err, storage.ErrNotFound) || + errors.Is(err, storage.ErrHeightNotIndexed) || + errors.Is(err, indexer.ErrIndexNotInitialized) { + missing = append(missing, blockInfo) + continue + } + err = fmt.Errorf("failed to get events for block %s: %w", blockInfo.ID, err) + return Response{}, rpc.ConvertError(err, "failed to get events from storage", codes.Internal) + } + + filteredEvents := make([]flow.Event, 0) + for _, event := range events { + if event.Type != eventType { + continue + } + + // events are encoded in CCF format in storage. convert to JSON-CDC if requested + if encoding == entities.EventEncodingVersion_JSON_CDC_V0 { + payload, err := convert.CcfPayloadToJsonPayload(event.Payload) + if err != nil { + err = fmt.Errorf("failed to convert event payload for block %s: %w", blockInfo.ID, err) + return Response{}, rpc.ConvertError(err, "failed to convert event payload", codes.Internal) + } + filteredEvent, err := flow.NewEvent( + flow.UntrustedEvent{ + Type: event.Type, + TransactionID: event.TransactionID, + TransactionIndex: event.TransactionIndex, + EventIndex: event.EventIndex, + Payload: payload, + }, + ) + if err != nil { + return Response{}, rpc.ConvertError(err, "could not construct event", codes.Internal) + } + event = *filteredEvent + } + + filteredEvents = append(filteredEvents, event) + } + + resp = append(resp, flow.BlockEvents{ + BlockID: blockInfo.ID, + BlockHeight: blockInfo.Height, + BlockTimestamp: blockInfo.Timestamp, + Events: filteredEvents, + }) + } + + return Response{ + Events: resp, + MissingBlocks: missing, + }, nil +} diff --git a/engine/access/rpc/backend/events/provider/mock/event_provider.go b/engine/access/rpc/backend/events/provider/mock/event_provider.go new file mode 100644 index 00000000000..1b1f8523d74 --- /dev/null +++ b/engine/access/rpc/backend/events/provider/mock/event_provider.go @@ -0,0 +1,61 @@ +// Code generated by mockery. DO NOT EDIT. + +package mock + +import ( + context "context" + + flow "github.com/onflow/flow-go/model/flow" + entities "github.com/onflow/flow/protobuf/go/flow/entities" + + mock "github.com/stretchr/testify/mock" + + provider "github.com/onflow/flow-go/engine/access/rpc/backend/events/provider" +) + +// EventProvider is an autogenerated mock type for the EventProvider type +type EventProvider struct { + mock.Mock +} + +// Events provides a mock function with given fields: ctx, blocks, eventType, requiredEventEncodingVersion +func (_m *EventProvider) Events(ctx context.Context, blocks []provider.BlockMetadata, eventType flow.EventType, requiredEventEncodingVersion entities.EventEncodingVersion) (provider.Response, error) { + ret := _m.Called(ctx, blocks, eventType, requiredEventEncodingVersion) + + if len(ret) == 0 { + panic("no return value specified for Events") + } + + var r0 provider.Response + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, []provider.BlockMetadata, flow.EventType, entities.EventEncodingVersion) (provider.Response, error)); ok { + return rf(ctx, blocks, eventType, requiredEventEncodingVersion) + } + if rf, ok := ret.Get(0).(func(context.Context, []provider.BlockMetadata, flow.EventType, entities.EventEncodingVersion) provider.Response); ok { + r0 = rf(ctx, blocks, eventType, requiredEventEncodingVersion) + } else { + r0 = ret.Get(0).(provider.Response) + } + + if rf, ok := ret.Get(1).(func(context.Context, []provider.BlockMetadata, flow.EventType, entities.EventEncodingVersion) error); ok { + r1 = rf(ctx, blocks, eventType, requiredEventEncodingVersion) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// NewEventProvider creates a new instance of EventProvider. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewEventProvider(t interface { + mock.TestingT + Cleanup(func()) +}) *EventProvider { + mock := &EventProvider{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/engine/access/rpc/backend/events/provider/provider.go b/engine/access/rpc/backend/events/provider/provider.go new file mode 100644 index 00000000000..e7b83d4b849 --- /dev/null +++ b/engine/access/rpc/backend/events/provider/provider.go @@ -0,0 +1,32 @@ +package provider + +import ( + "context" + "time" + + "github.com/onflow/flow/protobuf/go/flow/entities" + + "github.com/onflow/flow-go/model/flow" +) + +type EventProvider interface { + Events( + ctx context.Context, + blocks []BlockMetadata, + eventType flow.EventType, + requiredEventEncodingVersion entities.EventEncodingVersion, + ) (Response, error) +} + +// BlockMetadata is used to capture information about requested blocks to avoid repeated blockID +// calculations and passing around full block headers. +type BlockMetadata struct { + ID flow.Identifier + Height uint64 + Timestamp time.Time +} + +type Response struct { + Events []flow.BlockEvents + MissingBlocks []BlockMetadata +} diff --git a/engine/access/rpc/backend/historical_access_test.go b/engine/access/rpc/backend/historical_access_test.go index b66904f6604..4c6a076bde0 100644 --- a/engine/access/rpc/backend/historical_access_test.go +++ b/engine/access/rpc/backend/historical_access_test.go @@ -3,21 +3,18 @@ package backend import ( "context" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" - accessproto "github.com/onflow/flow/protobuf/go/flow/access" "github.com/onflow/flow/protobuf/go/flow/entities" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" "github.com/onflow/flow-go/engine/common/rpc/convert" "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/module/metrics" "github.com/onflow/flow-go/utils/unittest" ) // TestHistoricalTransactionResult tests to see if the historical transaction status can be retrieved func (suite *Suite) TestHistoricalTransactionResult() { - ctx := context.Background() collection := unittest.CollectionFixture(1) transactionBody := collection.Transactions[0] @@ -37,26 +34,11 @@ func (suite *Suite) TestHistoricalTransactionResult() { Events: nil, } - backend := New(suite.state, - nil, - []accessproto.AccessAPIClient{suite.historicalAccessClient}, - suite.blocks, - suite.headers, - suite.collections, - suite.transactions, - suite.receipts, - suite.results, - suite.chainID, - metrics.NewNoopCollector(), - nil, - false, - DefaultMaxHeightRange, - nil, - nil, - suite.log, - DefaultSnapshotHistoryLimit, - nil, - ) + params := suite.defaultBackendParams() + params.HistoricalAccessNodes = []accessproto.AccessAPIClient{suite.historicalAccessClient} + + backend, err := New(params) + suite.Require().NoError(err) // Successfully return the transaction from the historical node suite.historicalAccessClient. @@ -65,8 +47,15 @@ func (suite *Suite) TestHistoricalTransactionResult() { Once() // Make the call for the transaction result - result, err := backend.GetTransactionResult(ctx, txID, flow.ZeroID, flow.ZeroID) - suite.checkResponse(result, err) + result, err := backend.GetTransactionResult( + ctx, + txID, + flow.ZeroID, + flow.ZeroID, + entities.EventEncodingVersion_JSON_CDC_V0, + ) + suite.Require().NoError(err) + suite.Require().NotNil(result) // status should be sealed suite.Assert().Equal(flow.TransactionStatusSealed, result.Status) @@ -95,26 +84,11 @@ func (suite *Suite) TestHistoricalTransaction() { Transaction: convert.TransactionToMessage(*transactionBody), } - backend := New(suite.state, - nil, - []accessproto.AccessAPIClient{suite.historicalAccessClient}, - suite.blocks, - suite.headers, - suite.collections, - suite.transactions, - suite.receipts, - suite.results, - suite.chainID, - metrics.NewNoopCollector(), - nil, - false, - DefaultMaxHeightRange, - nil, - nil, - suite.log, - DefaultSnapshotHistoryLimit, - nil, - ) + params := suite.defaultBackendParams() + params.HistoricalAccessNodes = []accessproto.AccessAPIClient{suite.historicalAccessClient} + + backend, err := New(params) + suite.Require().NoError(err) // Successfully return the transaction from the historical node suite.historicalAccessClient. @@ -124,7 +98,8 @@ func (suite *Suite) TestHistoricalTransaction() { // Make the call for the transaction result tx, err := backend.GetTransaction(ctx, txID) - suite.checkResponse(tx, err) + suite.Require().NoError(err) + suite.Require().NotNil(tx) suite.assertAllExpectations() } diff --git a/engine/access/rpc/backend/mock/connection_factory.go b/engine/access/rpc/backend/mock/connection_factory.go deleted file mode 100644 index 5dfd657ec7e..00000000000 --- a/engine/access/rpc/backend/mock/connection_factory.go +++ /dev/null @@ -1,113 +0,0 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. - -package mock - -import ( - access "github.com/onflow/flow/protobuf/go/flow/access" - - execution "github.com/onflow/flow/protobuf/go/flow/execution" - - io "io" - - mock "github.com/stretchr/testify/mock" -) - -// ConnectionFactory is an autogenerated mock type for the ConnectionFactory type -type ConnectionFactory struct { - mock.Mock -} - -// GetAccessAPIClient provides a mock function with given fields: address -func (_m *ConnectionFactory) GetAccessAPIClient(address string) (access.AccessAPIClient, io.Closer, error) { - ret := _m.Called(address) - - var r0 access.AccessAPIClient - var r1 io.Closer - var r2 error - if rf, ok := ret.Get(0).(func(string) (access.AccessAPIClient, io.Closer, error)); ok { - return rf(address) - } - if rf, ok := ret.Get(0).(func(string) access.AccessAPIClient); ok { - r0 = rf(address) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(access.AccessAPIClient) - } - } - - if rf, ok := ret.Get(1).(func(string) io.Closer); ok { - r1 = rf(address) - } else { - if ret.Get(1) != nil { - r1 = ret.Get(1).(io.Closer) - } - } - - if rf, ok := ret.Get(2).(func(string) error); ok { - r2 = rf(address) - } else { - r2 = ret.Error(2) - } - - return r0, r1, r2 -} - -// GetExecutionAPIClient provides a mock function with given fields: address -func (_m *ConnectionFactory) GetExecutionAPIClient(address string) (execution.ExecutionAPIClient, io.Closer, error) { - ret := _m.Called(address) - - var r0 execution.ExecutionAPIClient - var r1 io.Closer - var r2 error - if rf, ok := ret.Get(0).(func(string) (execution.ExecutionAPIClient, io.Closer, error)); ok { - return rf(address) - } - if rf, ok := ret.Get(0).(func(string) execution.ExecutionAPIClient); ok { - r0 = rf(address) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(execution.ExecutionAPIClient) - } - } - - if rf, ok := ret.Get(1).(func(string) io.Closer); ok { - r1 = rf(address) - } else { - if ret.Get(1) != nil { - r1 = ret.Get(1).(io.Closer) - } - } - - if rf, ok := ret.Get(2).(func(string) error); ok { - r2 = rf(address) - } else { - r2 = ret.Error(2) - } - - return r0, r1, r2 -} - -// InvalidateAccessAPIClient provides a mock function with given fields: address -func (_m *ConnectionFactory) InvalidateAccessAPIClient(address string) { - _m.Called(address) -} - -// InvalidateExecutionAPIClient provides a mock function with given fields: address -func (_m *ConnectionFactory) InvalidateExecutionAPIClient(address string) { - _m.Called(address) -} - -type mockConstructorTestingTNewConnectionFactory interface { - mock.TestingT - Cleanup(func()) -} - -// NewConnectionFactory creates a new instance of ConnectionFactory. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewConnectionFactory(t mockConstructorTestingTNewConnectionFactory) *ConnectionFactory { - mock := &ConnectionFactory{} - mock.Mock.Test(t) - - t.Cleanup(func() { mock.AssertExpectations(t) }) - - return mock -} diff --git a/engine/access/rpc/backend/node_communicator/communicator.go b/engine/access/rpc/backend/node_communicator/communicator.go new file mode 100644 index 00000000000..626841c3d38 --- /dev/null +++ b/engine/access/rpc/backend/node_communicator/communicator.go @@ -0,0 +1,87 @@ +package node_communicator + +import ( + "github.com/hashicorp/go-multierror" + "github.com/sony/gobreaker" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + + "github.com/onflow/flow-go/model/flow" +) + +// maxFailedRequestCount represents the maximum number of failed requests before returning errors. +const maxFailedRequestCount = 3 + +type Communicator interface { + CallAvailableNode( + //List of node identifiers to execute callback on + nodes flow.IdentitySkeletonList, + //Callback function that represents an action to be performed on a node. + //It takes a node as input and returns an error indicating the result of the action. + call func(node *flow.IdentitySkeleton) error, + // Callback function that determines whether an error should terminate further execution. + // It takes an error as input and returns a boolean value indicating whether the error should be considered terminal. + shouldTerminateOnError func(node *flow.IdentitySkeleton, err error) bool, + ) error +} + +var _ Communicator = (*NodeCommunicator)(nil) + +// NodeCommunicator is responsible for calling available nodes in the backend. +type NodeCommunicator struct { + nodeSelectorFactory NodeSelectorFactory +} + +// NewNodeCommunicator creates a new instance of NodeCommunicator. +func NewNodeCommunicator(circuitBreakerEnabled bool) *NodeCommunicator { + return &NodeCommunicator{ + nodeSelectorFactory: NodeSelectorFactory{circuitBreakerEnabled: circuitBreakerEnabled}, + } +} + +// CallAvailableNode calls the provided function on the available nodes. +// It iterates through the nodes and executes the function. +// If an error occurs, it applies the custom error terminator (if provided) and keeps track of the errors. +// If the error occurs in circuit breaker, it continues to the next node. +// If the maximum failed request count is reached, it returns the accumulated errors. +func (b *NodeCommunicator) CallAvailableNode( + //List of node identifiers to execute callback on + nodes flow.IdentitySkeletonList, + //Callback function that determines whether an error should terminate further execution. + // It takes an error as input and returns a boolean value indicating whether the error should be considered terminal. + call func(id *flow.IdentitySkeleton) error, + // Callback function that determines whether an error should terminate further execution. + // It takes an error as input and returns a boolean value indicating whether the error should be considered terminal. + shouldTerminateOnError func(node *flow.IdentitySkeleton, err error) bool, +) error { + var errs *multierror.Error + nodeSelector, err := b.nodeSelectorFactory.SelectNodes(nodes) + if err != nil { + return err + } + + for node := nodeSelector.Next(); node != nil; node = nodeSelector.Next() { + err := call(node) + if err == nil { + return nil + } + + if shouldTerminateOnError != nil && shouldTerminateOnError(node, err) { + return err + } + + if err == gobreaker.ErrOpenState { + if !nodeSelector.HasNext() && errs == nil { + errs = multierror.Append(errs, status.Error(codes.Unavailable, "there are no available nodes")) + } + continue + } + + errs = multierror.Append(errs, err) + if len(errs.Errors) >= maxFailedRequestCount { + return errs.ErrorOrNil() + } + } + + return errs.ErrorOrNil() +} diff --git a/engine/access/rpc/backend/node_communicator/mock/communicator.go b/engine/access/rpc/backend/node_communicator/mock/communicator.go new file mode 100644 index 00000000000..21be9e88f90 --- /dev/null +++ b/engine/access/rpc/backend/node_communicator/mock/communicator.go @@ -0,0 +1,45 @@ +// Code generated by mockery. DO NOT EDIT. + +package mock + +import ( + flow "github.com/onflow/flow-go/model/flow" + mock "github.com/stretchr/testify/mock" +) + +// Communicator is an autogenerated mock type for the Communicator type +type Communicator struct { + mock.Mock +} + +// CallAvailableNode provides a mock function with given fields: nodes, call, shouldTerminateOnError +func (_m *Communicator) CallAvailableNode(nodes flow.IdentitySkeletonList, call func(*flow.IdentitySkeleton) error, shouldTerminateOnError func(*flow.IdentitySkeleton, error) bool) error { + ret := _m.Called(nodes, call, shouldTerminateOnError) + + if len(ret) == 0 { + panic("no return value specified for CallAvailableNode") + } + + var r0 error + if rf, ok := ret.Get(0).(func(flow.IdentitySkeletonList, func(*flow.IdentitySkeleton) error, func(*flow.IdentitySkeleton, error) bool) error); ok { + r0 = rf(nodes, call, shouldTerminateOnError) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// NewCommunicator creates a new instance of Communicator. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewCommunicator(t interface { + mock.TestingT + Cleanup(func()) +}) *Communicator { + mock := &Communicator{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/engine/access/rpc/backend/node_communicator/mock/node_selector.go b/engine/access/rpc/backend/node_communicator/mock/node_selector.go new file mode 100644 index 00000000000..40d3fb78a36 --- /dev/null +++ b/engine/access/rpc/backend/node_communicator/mock/node_selector.go @@ -0,0 +1,65 @@ +// Code generated by mockery. DO NOT EDIT. + +package mock + +import ( + flow "github.com/onflow/flow-go/model/flow" + mock "github.com/stretchr/testify/mock" +) + +// NodeSelector is an autogenerated mock type for the NodeSelector type +type NodeSelector struct { + mock.Mock +} + +// HasNext provides a mock function with no fields +func (_m *NodeSelector) HasNext() bool { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for HasNext") + } + + var r0 bool + if rf, ok := ret.Get(0).(func() bool); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(bool) + } + + return r0 +} + +// Next provides a mock function with no fields +func (_m *NodeSelector) Next() *flow.IdentitySkeleton { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Next") + } + + var r0 *flow.IdentitySkeleton + if rf, ok := ret.Get(0).(func() *flow.IdentitySkeleton); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*flow.IdentitySkeleton) + } + } + + return r0 +} + +// NewNodeSelector creates a new instance of NodeSelector. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewNodeSelector(t interface { + mock.TestingT + Cleanup(func()) +}) *NodeSelector { + mock := &NodeSelector{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/engine/access/rpc/backend/node_communicator/selector.go b/engine/access/rpc/backend/node_communicator/selector.go new file mode 100644 index 00000000000..e5bd9f6e4b3 --- /dev/null +++ b/engine/access/rpc/backend/node_communicator/selector.go @@ -0,0 +1,81 @@ +package node_communicator + +import ( + "fmt" + + commonrpc "github.com/onflow/flow-go/engine/common/rpc" + "github.com/onflow/flow-go/model/flow" +) + +// NodeSelector is an interface that represents the ability to select node identities that the access node is trying to reach. +// It encapsulates the internal logic of node selection and provides a way to change implementations for different types +// of nodes. Implementations of this interface should define the Next method, which returns the next node identity to be +// selected. HasNext checks if there is next node available. +type NodeSelector interface { + Next() *flow.IdentitySkeleton + HasNext() bool +} + +// NodeSelectorFactory is a factory for creating node selectors based on factory configuration and node type. +// Supported configurations: +// circuitBreakerEnabled = true - nodes will be pseudo-randomly sampled and picked in-order. +// circuitBreakerEnabled = false - nodes will be picked from proposed list in-order without any changes. +type NodeSelectorFactory struct { + circuitBreakerEnabled bool +} + +// NewNodeSelectorFactory creates a new instance of NodeSelectorFactory with the provided circuit breaker configuration. +// +// When `circuitBreakerEnabled` is set to true, nodes will be selected using a pseudo-random sampling mechanism and picked in-order. +// When set to false, nodes will be selected in the order they are proposed, without any changes. +// +// Parameters: +// - circuitBreakerEnabled: A boolean that controls whether the circuit breaker is enabled for node selection. +func NewNodeSelectorFactory(circuitBreakerEnabled bool) *NodeSelectorFactory { + return &NodeSelectorFactory{ + circuitBreakerEnabled: circuitBreakerEnabled, + } +} + +// SelectNodes selects the configured number of node identities from the provided list of nodes +// and returns the node selector to iterate through them. +func (n *NodeSelectorFactory) SelectNodes(nodes flow.IdentitySkeletonList) (NodeSelector, error) { + var err error + // If the circuit breaker is disabled, the legacy logic should be used, which selects only a specified number of nodes. + if !n.circuitBreakerEnabled { + nodes, err = nodes.Sample(commonrpc.MaxNodesCnt) + if err != nil { + return nil, fmt.Errorf("sampling failed: %w", err) + } + } + + return NewMainNodeSelector(nodes), nil +} + +// MainNodeSelector is a specific implementation of the node selector. +// Which performs in-order node selection using fixed list of pre-defined nodes. +type MainNodeSelector struct { + nodes flow.IdentitySkeletonList + index int +} + +var _ NodeSelector = (*MainNodeSelector)(nil) + +func NewMainNodeSelector(nodes flow.IdentitySkeletonList) *MainNodeSelector { + return &MainNodeSelector{nodes: nodes, index: 0} +} + +// HasNext returns true if next node is available. +func (e *MainNodeSelector) HasNext() bool { + return e.index < len(e.nodes) +} + +// Next returns the next node in the selector. +func (e *MainNodeSelector) Next() *flow.IdentitySkeleton { + if e.index < len(e.nodes) { + next := e.nodes[e.index] + e.index++ + return next + } + return nil +} diff --git a/engine/access/rpc/backend/query_mode/mode.go b/engine/access/rpc/backend/query_mode/mode.go new file mode 100644 index 00000000000..b9678674ed2 --- /dev/null +++ b/engine/access/rpc/backend/query_mode/mode.go @@ -0,0 +1,55 @@ +package query_mode + +import ( + "errors" +) + +type IndexQueryMode int + +const ( + // IndexQueryModeLocalOnly executes scripts and gets accounts using only local storage + IndexQueryModeLocalOnly IndexQueryMode = iota + 1 + + // IndexQueryModeExecutionNodesOnly executes scripts and gets accounts using only + // execution nodes + IndexQueryModeExecutionNodesOnly + + // IndexQueryModeFailover executes scripts and gets accounts using local storage first, + // then falls back to execution nodes if data is not available for the height or if request + // failed due to a non-user error. + IndexQueryModeFailover + + // IndexQueryModeCompare executes scripts and gets accounts using both local storage and + // execution nodes and compares the results. The execution node result is always returned. + IndexQueryModeCompare +) + +func ParseIndexQueryMode(s string) (IndexQueryMode, error) { + switch s { + case IndexQueryModeLocalOnly.String(): + return IndexQueryModeLocalOnly, nil + case IndexQueryModeExecutionNodesOnly.String(): + return IndexQueryModeExecutionNodesOnly, nil + case IndexQueryModeFailover.String(): + return IndexQueryModeFailover, nil + case IndexQueryModeCompare.String(): + return IndexQueryModeCompare, nil + default: + return 0, errors.New("invalid script execution mode") + } +} + +func (m IndexQueryMode) String() string { + switch m { + case IndexQueryModeLocalOnly: + return "local-only" + case IndexQueryModeExecutionNodesOnly: + return "execution-nodes-only" + case IndexQueryModeFailover: + return "failover" + case IndexQueryModeCompare: + return "compare" + default: + return "" + } +} diff --git a/engine/access/rpc/backend/retry.go b/engine/access/rpc/backend/retry.go deleted file mode 100644 index c5765bfbbe5..00000000000 --- a/engine/access/rpc/backend/retry.go +++ /dev/null @@ -1,124 +0,0 @@ -package backend - -import ( - "context" - "errors" - "sync" - - "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/storage" -) - -// retryFrequency has to be less than TransactionExpiry or else this module does nothing -const retryFrequency uint64 = 120 // blocks - -// Retry implements a simple retry mechanism for transaction submission. -type Retry struct { - mu sync.RWMutex - // pending transactions - transactionByReferencBlockHeight map[uint64]map[flow.Identifier]*flow.TransactionBody - backend *Backend - active bool -} - -func newRetry() *Retry { - return &Retry{ - transactionByReferencBlockHeight: map[uint64]map[flow.Identifier]*flow.TransactionBody{}, - } -} - -func (r *Retry) Activate() *Retry { - r.active = true - return r -} - -func (r *Retry) IsActive() bool { - return r.active -} - -func (r *Retry) SetBackend(b *Backend) *Retry { - r.backend = b - return r -} - -func (r *Retry) Retry(height uint64) { - // No need to retry if height is lower than DefaultTransactionExpiry - if height < flow.DefaultTransactionExpiry { - return - } - - // naive cleanup for now, prune every 120 blocks - if height%retryFrequency == 0 { - r.prune(height) - } - - heightToRetry := height - flow.DefaultTransactionExpiry + retryFrequency - - for heightToRetry < height { - r.retryTxsAtHeight(heightToRetry) - - heightToRetry = heightToRetry + retryFrequency - } - -} - -func (b *Retry) Notify(signal interface{}) bool { - height, ok := signal.(uint64) - if !ok { - return false - } - b.Retry(height) - return true -} - -// RegisterTransaction adds a transaction that could possibly be retried -func (r *Retry) RegisterTransaction(height uint64, tx *flow.TransactionBody) { - r.mu.Lock() - defer r.mu.Unlock() - if r.transactionByReferencBlockHeight[height] == nil { - r.transactionByReferencBlockHeight[height] = make(map[flow.Identifier]*flow.TransactionBody) - } - r.transactionByReferencBlockHeight[height][tx.ID()] = tx -} - -func (r *Retry) prune(height uint64) { - r.mu.Lock() - defer r.mu.Unlock() - // If height is less than the default, there will be no expired transactions - if height < flow.DefaultTransactionExpiry { - return - } - for h := range r.transactionByReferencBlockHeight { - if h < height-flow.DefaultTransactionExpiry { - delete(r.transactionByReferencBlockHeight, h) - } - } -} - -func (r *Retry) retryTxsAtHeight(heightToRetry uint64) { - r.mu.Lock() - defer r.mu.Unlock() - txsAtHeight := r.transactionByReferencBlockHeight[heightToRetry] - for txID, tx := range txsAtHeight { - // find the block for the transaction - block, err := r.backend.lookupBlock(txID) - if err != nil { - if !errors.Is(err, storage.ErrNotFound) { - continue - } - block = nil - } - - // find the transaction status - status, err := r.backend.deriveTransactionStatus(tx, false, block) - if err != nil { - continue - } - if status == flow.TransactionStatusPending { - _ = r.backend.SendRawTransaction(context.Background(), tx) - } else if status != flow.TransactionStatusUnknown { - // not pending or unknown, don't need to retry anymore - delete(txsAtHeight, txID) - } - } -} diff --git a/engine/access/rpc/backend/retry_test.go b/engine/access/rpc/backend/retry_test.go deleted file mode 100644 index c10b66bbbc0..00000000000 --- a/engine/access/rpc/backend/retry_test.go +++ /dev/null @@ -1,187 +0,0 @@ -package backend - -import ( - "context" - - "github.com/stretchr/testify/mock" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" - - "github.com/onflow/flow/protobuf/go/flow/access" - "github.com/onflow/flow/protobuf/go/flow/execution" - - "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/module/metrics" - protocol "github.com/onflow/flow-go/state/protocol/mock" - realstorage "github.com/onflow/flow-go/storage" - "github.com/onflow/flow-go/utils/unittest" -) - -// TestTransactionRetry tests that the retry mechanism will send retries at specific times -func (suite *Suite) TestTransactionRetry() { - - // ctx := context.Background() - collection := unittest.CollectionFixture(1) - transactionBody := collection.Transactions[0] - block := unittest.BlockFixture() - // Height needs to be at least DefaultTransactionExpiry before we start doing retries - block.Header.Height = flow.DefaultTransactionExpiry + 1 - transactionBody.SetReferenceBlockID(block.ID()) - headBlock := unittest.BlockFixture() - headBlock.Header.Height = block.Header.Height - 1 // head is behind the current block - suite.state.On("Final").Return(suite.snapshot, nil).Maybe() - - suite.snapshot.On("Head").Return(headBlock.Header, nil) - snapshotAtBlock := new(protocol.Snapshot) - snapshotAtBlock.On("Head").Return(block.Header, nil) - suite.state.On("AtBlockID", block.ID()).Return(snapshotAtBlock, nil) - - // collection storage returns a not found error - suite.collections.On("LightByTransactionID", transactionBody.ID()).Return(nil, realstorage.ErrNotFound) - - // txID := transactionBody.ID() - // blockID := block.ID() - // Setup Handler + Retry - backend := New(suite.state, - suite.colClient, - nil, - suite.blocks, - suite.headers, - suite.collections, - suite.transactions, - suite.receipts, - suite.results, - suite.chainID, - metrics.NewNoopCollector(), - nil, - false, - DefaultMaxHeightRange, - nil, - nil, - suite.log, - DefaultSnapshotHistoryLimit, - nil, - ) - retry := newRetry().SetBackend(backend).Activate() - backend.retry = retry - - retry.RegisterTransaction(block.Header.Height, transactionBody) - - suite.colClient.On("SendTransaction", mock.Anything, mock.Anything).Return(&access.SendTransactionResponse{}, nil) - - // Don't retry on every height - retry.Retry(block.Header.Height + 1) - - suite.colClient.AssertNotCalled(suite.T(), "SendTransaction", mock.Anything, mock.Anything) - - // Retry every `retryFrequency` - retry.Retry(block.Header.Height + retryFrequency) - - suite.colClient.AssertNumberOfCalls(suite.T(), "SendTransaction", 1) - - // do not retry if expired - retry.Retry(block.Header.Height + retryFrequency + flow.DefaultTransactionExpiry) - - // Should've still only been called once - suite.colClient.AssertNumberOfCalls(suite.T(), "SendTransaction", 1) - - suite.assertAllExpectations() -} - -// TestSuccessfulTransactionsDontRetry tests that the retry mechanism will send retries at specific times -func (suite *Suite) TestSuccessfulTransactionsDontRetry() { - - ctx := context.Background() - collection := unittest.CollectionFixture(1) - transactionBody := collection.Transactions[0] - block := unittest.BlockFixture() - // Height needs to be at least DefaultTransactionExpiry before we start doing retries - block.Header.Height = flow.DefaultTransactionExpiry + 1 - refBlock := unittest.BlockFixture() - refBlock.Header.Height = 2 - transactionBody.SetReferenceBlockID(refBlock.ID()) - - block.SetPayload( - unittest.PayloadFixture( - unittest.WithGuarantees( - unittest.CollectionGuaranteesWithCollectionIDFixture([]*flow.Collection{&collection})...))) - - light := collection.Light() - suite.state.On("Final").Return(suite.snapshot, nil).Maybe() - // transaction storage returns the corresponding transaction - suite.transactions.On("ByID", transactionBody.ID()).Return(transactionBody, nil) - // collection storage returns the corresponding collection - suite.collections.On("LightByTransactionID", transactionBody.ID()).Return(&light, nil) - suite.collections.On("LightByID", light.ID()).Return(&light, nil) - // block storage returns the corresponding block - suite.blocks.On("ByCollectionID", collection.ID()).Return(&block, nil) - - txID := transactionBody.ID() - blockID := block.ID() - exeEventReq := execution.GetTransactionResultRequest{ - BlockId: blockID[:], - TransactionId: txID[:], - } - exeEventResp := execution.GetTransactionResultResponse{ - Events: nil, - } - - _, enIDs := suite.setupReceipts(&block) - suite.snapshot.On("Identities", mock.Anything).Return(enIDs, nil) - connFactory := suite.setupConnectionFactory() - - // Setup Handler + Retry - backend := New(suite.state, - suite.colClient, - nil, - suite.blocks, - suite.headers, - suite.collections, - suite.transactions, - suite.receipts, - suite.results, - suite.chainID, - metrics.NewNoopCollector(), - connFactory, - false, - DefaultMaxHeightRange, - nil, - nil, - suite.log, - DefaultSnapshotHistoryLimit, - nil, - ) - retry := newRetry().SetBackend(backend).Activate() - backend.retry = retry - - retry.RegisterTransaction(block.Header.Height, transactionBody) - - suite.colClient.On("SendTransaction", mock.Anything, mock.Anything).Return(&access.SendTransactionResponse{}, nil) - - // return not found to return finalized status - suite.execClient.On("GetTransactionResult", ctx, &exeEventReq).Return(&exeEventResp, status.Errorf(codes.NotFound, "not found")).Once() - // first call - when block under test is greater height than the sealed head, but execution node does not know about Tx - result, err := backend.GetTransactionResult(ctx, txID, flow.ZeroID, flow.ZeroID) - suite.checkResponse(result, err) - - // status should be finalized since the sealed blocks is smaller in height - suite.Assert().Equal(flow.TransactionStatusFinalized, result.Status) - - // Don't retry now now that block is finalized - retry.Retry(block.Header.Height + 1) - - suite.colClient.AssertNotCalled(suite.T(), "SendTransaction", mock.Anything, mock.Anything) - - // Don't retry now now that block is finalized - retry.Retry(block.Header.Height + retryFrequency) - - suite.colClient.AssertNotCalled(suite.T(), "SendTransaction", mock.Anything, mock.Anything) - - // Don't retry now now that block is finalized - retry.Retry(block.Header.Height + retryFrequency + flow.DefaultTransactionExpiry) - - // Should've still should not be called - suite.colClient.AssertNotCalled(suite.T(), "SendTransaction", mock.Anything, mock.Anything) - - suite.assertAllExpectations() -} diff --git a/engine/access/rpc/backend/script_executor.go b/engine/access/rpc/backend/script_executor.go new file mode 100644 index 00000000000..fc4ea418e51 --- /dev/null +++ b/engine/access/rpc/backend/script_executor.go @@ -0,0 +1,233 @@ +package backend + +import ( + "context" + "errors" + "fmt" + + "github.com/rs/zerolog" + "go.uber.org/atomic" + + "github.com/onflow/flow-go/engine/common/version" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module/execution" + "github.com/onflow/flow-go/module/state_synchronization" + "github.com/onflow/flow-go/storage" +) + +// ErrIncompatibleNodeVersion indicates that node version is incompatible with the block version +var ErrIncompatibleNodeVersion = errors.New("node version is incompatible with data for block") + +type ScriptExecutor struct { + log zerolog.Logger + + // scriptExecutor is used to interact with execution state + scriptExecutor *execution.Scripts + + // indexReporter provides information about the current state of the execution state indexer. + indexReporter state_synchronization.IndexReporter + + // versionControl provides information about the current version beacon for each block + versionControl *version.VersionControl + + // initialized is used to signal that the index and executor are ready + initialized *atomic.Bool + + // minCompatibleHeight and maxCompatibleHeight are used to limit the block range that can be queried using local execution + // to ensure only blocks that are compatible with the node's current software version are allowed. + // Note: this is a temporary solution for cadence/fvm upgrades while version beacon support is added + minCompatibleHeight *atomic.Uint64 + maxCompatibleHeight *atomic.Uint64 +} + +func NewScriptExecutor(log zerolog.Logger, minHeight, maxHeight uint64) *ScriptExecutor { + logger := log.With().Str("component", "script-executor").Logger() + logger.Info(). + Uint64("min_height", minHeight). + Uint64("max_height", maxHeight). + Msg("script executor created") + + return &ScriptExecutor{ + log: logger, + initialized: atomic.NewBool(false), + minCompatibleHeight: atomic.NewUint64(minHeight), + maxCompatibleHeight: atomic.NewUint64(maxHeight), + } +} + +// SetMinCompatibleHeight sets the lowest block height (inclusive) that can be queried using local execution +// Use this to limit the executable block range supported by the node's current software version. +func (s *ScriptExecutor) SetMinCompatibleHeight(height uint64) { + s.minCompatibleHeight.Store(height) + s.log.Info().Uint64("height", height).Msg("minimum compatible height set") +} + +// SetMaxCompatibleHeight sets the highest block height (inclusive) that can be queried using local execution +// Use this to limit the executable block range supported by the node's current software version. +func (s *ScriptExecutor) SetMaxCompatibleHeight(height uint64) { + s.maxCompatibleHeight.Store(height) + s.log.Info().Uint64("height", height).Msg("maximum compatible height set") +} + +// Initialize initializes the indexReporter and script executor +// This method can be called at any time after the ScriptExecutor object is created. Any requests +// made to the other methods will return storage.ErrHeightNotIndexed until this method is called. +func (s *ScriptExecutor) Initialize( + indexReporter state_synchronization.IndexReporter, + scriptExecutor *execution.Scripts, + versionControl *version.VersionControl, +) error { + if s.initialized.CompareAndSwap(false, true) { + s.log.Info().Msg("script executor initialized") + s.indexReporter = indexReporter + s.scriptExecutor = scriptExecutor + s.versionControl = versionControl + return nil + } + return fmt.Errorf("script executor already initialized") +} + +// ExecuteAtBlockHeight executes provided script at the provided block height against a local execution state. +// +// Expected errors: +// - storage.ErrNotFound if the register or block height is not found +// - storage.ErrHeightNotIndexed if the ScriptExecutor is not initialized, or if the height is not indexed yet, +// or if the height is before the lowest indexed height. +// - ErrIncompatibleNodeVersion if the block height is not compatible with the node version. +func (s *ScriptExecutor) ExecuteAtBlockHeight(ctx context.Context, script []byte, arguments [][]byte, height uint64) ([]byte, error) { + if err := s.checkHeight(height); err != nil { + return nil, err + } + + return s.scriptExecutor.ExecuteAtBlockHeight(ctx, script, arguments, height) +} + +// GetAccountAtBlockHeight returns the account at the provided block height from a local execution state. +// +// Expected errors: +// - storage.ErrNotFound if the account or block height is not found +// - storage.ErrHeightNotIndexed if the ScriptExecutor is not initialized, or if the height is not indexed yet, +// or if the height is before the lowest indexed height. +// - ErrIncompatibleNodeVersion if the block height is not compatible with the node version. +func (s *ScriptExecutor) GetAccountAtBlockHeight(ctx context.Context, address flow.Address, height uint64) (*flow.Account, error) { + if err := s.checkHeight(height); err != nil { + return nil, err + } + + return s.scriptExecutor.GetAccountAtBlockHeight(ctx, address, height) +} + +// GetAccountBalance returns a balance of Flow account by the provided address and block height. +// Expected errors: +// - Script execution related errors +// - storage.ErrHeightNotIndexed if the ScriptExecutor is not initialized, or if the height is not indexed yet, +// or if the height is before the lowest indexed height. +// - ErrIncompatibleNodeVersion if the block height is not compatible with the node version. +func (s *ScriptExecutor) GetAccountBalance(ctx context.Context, address flow.Address, height uint64) (uint64, error) { + if err := s.checkHeight(height); err != nil { + return 0, err + } + + return s.scriptExecutor.GetAccountBalance(ctx, address, height) +} + +// GetAccountAvailableBalance returns an available balance of Flow account by the provided address and block height. +// Expected errors: +// - Script execution related errors +// - storage.ErrHeightNotIndexed if the ScriptExecutor is not initialized, or if the height is not indexed yet, +// or if the height is before the lowest indexed height. +// - ErrIncompatibleNodeVersion if the block height is not compatible with the node version. +func (s *ScriptExecutor) GetAccountAvailableBalance(ctx context.Context, address flow.Address, height uint64) (uint64, error) { + if err := s.checkHeight(height); err != nil { + return 0, err + } + + return s.scriptExecutor.GetAccountAvailableBalance(ctx, address, height) +} + +// GetAccountKeys returns a public key of Flow account by the provided address, block height and index. +// Expected errors: +// - Script execution related errors +// - storage.ErrHeightNotIndexed if the ScriptExecutor is not initialized, or if the height is not indexed yet, +// or if the height is before the lowest indexed height. +// - ErrIncompatibleNodeVersion if the block height is not compatible with the node version. +func (s *ScriptExecutor) GetAccountKeys(ctx context.Context, address flow.Address, height uint64) ([]flow.AccountPublicKey, error) { + if err := s.checkHeight(height); err != nil { + return nil, err + } + + return s.scriptExecutor.GetAccountKeys(ctx, address, height) +} + +// GetAccountKey returns +// Expected errors: +// - Script execution related errors +// - storage.ErrHeightNotIndexed if the ScriptExecutor is not initialized, or if the height is not indexed yet, +// or if the height is before the lowest indexed height. +// - ErrIncompatibleNodeVersion if the block height is not compatible with the node version. +func (s *ScriptExecutor) GetAccountKey(ctx context.Context, address flow.Address, keyIndex uint32, height uint64) (*flow.AccountPublicKey, error) { + if err := s.checkHeight(height); err != nil { + return nil, err + } + + return s.scriptExecutor.GetAccountKey(ctx, address, keyIndex, height) +} + +// checkHeight checks if the provided block height is within the range of indexed heights +// and compatible with the node's version. +// +// It performs several checks: +// 1. Ensures the ScriptExecutor is initialized. +// 2. Compares the provided height with the highest and lowest indexed heights. +// 3. Ensures the height is within the compatible version range if version control is enabled. +// +// Parameters: +// - height: the block height to check. +// +// Returns: +// - error: if the block height is not within the indexed range or not compatible with the node's version. +// +// Expected errors: +// - storage.ErrHeightNotIndexed if the ScriptExecutor is not initialized, or if the height is not indexed yet, +// or if the height is before the lowest indexed height. +// - ErrIncompatibleNodeVersion if the block height is not compatible with the node version. +func (s *ScriptExecutor) checkHeight(height uint64) error { + if !s.initialized.Load() { + return fmt.Errorf("%w: script executor not initialized", storage.ErrHeightNotIndexed) + } + + highestHeight, err := s.indexReporter.HighestIndexedHeight() + if err != nil { + return fmt.Errorf("could not get highest indexed height: %w", err) + } + if height > highestHeight { + return fmt.Errorf("%w: block not indexed yet", storage.ErrHeightNotIndexed) + } + + lowestHeight, err := s.indexReporter.LowestIndexedHeight() + if err != nil { + return fmt.Errorf("could not get lowest indexed height: %w", err) + } + + if height < lowestHeight { + return fmt.Errorf("%w: block is before lowest indexed height", storage.ErrHeightNotIndexed) + } + + if height > s.maxCompatibleHeight.Load() || height < s.minCompatibleHeight.Load() { + return ErrIncompatibleNodeVersion + } + + // Version control feature could be disabled. In such a case, ignore related functionality. + if s.versionControl != nil { + compatible, err := s.versionControl.CompatibleAtBlock(height) + if err != nil { + return fmt.Errorf("failed to check compatibility with block height %d: %w", height, err) + } + + if !compatible { + return ErrIncompatibleNodeVersion + } + } + + return nil +} diff --git a/engine/access/rpc/backend/script_executor_test.go b/engine/access/rpc/backend/script_executor_test.go new file mode 100644 index 00000000000..1ec86b2d4da --- /dev/null +++ b/engine/access/rpc/backend/script_executor_test.go @@ -0,0 +1,324 @@ +package backend + +import ( + "context" + "math" + "testing" + "time" + + "github.com/coreos/go-semver/semver" + "github.com/rs/zerolog" + testifyMock "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" + + "github.com/onflow/flow-go/engine/access/index" + "github.com/onflow/flow-go/engine/common/version" + "github.com/onflow/flow-go/engine/execution/computation/query" + "github.com/onflow/flow-go/engine/execution/testutil" + "github.com/onflow/flow-go/fvm" + "github.com/onflow/flow-go/fvm/storage/derived" + "github.com/onflow/flow-go/fvm/storage/snapshot" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module" + "github.com/onflow/flow-go/module/execution" + "github.com/onflow/flow-go/module/irrecoverable" + "github.com/onflow/flow-go/module/metrics" + "github.com/onflow/flow-go/module/state_synchronization/indexer" + syncmock "github.com/onflow/flow-go/module/state_synchronization/mock" + synctest "github.com/onflow/flow-go/module/state_synchronization/requester/unittest" + "github.com/onflow/flow-go/storage" + storageMock "github.com/onflow/flow-go/storage/mock" + pebbleStorage "github.com/onflow/flow-go/storage/pebble" + "github.com/onflow/flow-go/utils/unittest" + "github.com/onflow/flow-go/utils/unittest/mocks" +) + +// ScriptExecutorSuite is a test suite for testing the ScriptExecutor. +// It sets up the necessary components and dependencies for executing scripts. +type ScriptExecutorSuite struct { + suite.Suite + + log zerolog.Logger + registerIndex storage.RegisterIndex + versionControl *version.VersionControl + reporter *syncmock.IndexReporter + indexReporter *index.Reporter + scripts *execution.Scripts + chain flow.Chain + dbDir string + height uint64 + headers storage.Headers + vm *fvm.VirtualMachine + vmCtx fvm.Context + snapshot snapshot.SnapshotTree +} + +// TestScriptExecutorSuite runs the ScriptExecutorSuite test suite. +func TestScriptExecutorSuite(t *testing.T) { + suite.Run(t, new(ScriptExecutorSuite)) +} + +// newBlockHeadersStorage creates a mock block header storage for the given blocks. +func newBlockHeadersStorage(blocks []*flow.Block) storage.Headers { + blocksByHeight := make(map[uint64]*flow.Block) + for _, b := range blocks { + blocksByHeight[b.Height] = b + } + + return synctest.MockBlockHeaderStorage(synctest.WithByHeight(blocksByHeight)) +} + +// bootstrap initializes the virtual machine and updates the register index and snapshot. +// This method sets up the initial state for the virtual machine. +func (s *ScriptExecutorSuite) bootstrap() { + bootstrapOpts := []fvm.BootstrapProcedureOption{ + fvm.WithInitialTokenSupply(unittest.GenesisTokenSupply), + } + + executionSnapshot, out, err := s.vm.Run( + s.vmCtx, + fvm.Bootstrap(unittest.ServiceAccountPublicKey, bootstrapOpts...), + s.snapshot) + + // Ensure no errors occurred during the bootstrap process + s.Require().NoError(err) + s.Require().NoError(out.Err) + + // Update the block height and store the updated registers + s.height++ + err = s.registerIndex.Store(executionSnapshot.UpdatedRegisters(), s.height) + s.Require().NoError(err) + + // Append the execution snapshot to the snapshot tree + s.snapshot = s.snapshot.Append(executionSnapshot) +} + +// SetupTest sets up the test environment for each test in the suite. +// This includes initializing various components and mock objects needed for the tests. +func (s *ScriptExecutorSuite) SetupTest() { + lockManager := storage.NewTestingLockManager() + s.log = unittest.Logger() + s.chain = flow.Emulator.Chain() + + s.reporter = syncmock.NewIndexReporter(s.T()) + s.indexReporter = index.NewReporter() + err := s.indexReporter.Initialize(s.reporter) + require.NoError(s.T(), err) + + blockchain := unittest.BlockchainFixture(10) + s.headers = newBlockHeadersStorage(blockchain) + s.height = blockchain[0].Height + + protocolState := testutil.ProtocolStateWithSourceFixture(nil) + + s.snapshot = snapshot.NewSnapshotTree(nil) + s.vm = fvm.NewVirtualMachine() + s.vmCtx = fvm.NewContext( + fvm.WithChain(s.chain), + fvm.WithAuthorizationChecksEnabled(false), + fvm.WithSequenceNumberCheckAndIncrementEnabled(false), + ) + + s.dbDir = unittest.TempDir(s.T()) + db := pebbleStorage.NewBootstrappedRegistersWithPathForTest(s.T(), s.dbDir, s.height, s.height) + pebbleRegisters, err := pebbleStorage.NewRegisters(db, pebbleStorage.PruningDisabled) + s.Require().NoError(err) + s.registerIndex = pebbleRegisters + + derivedChainData, err := derived.NewDerivedChainData(derived.DefaultDerivedDataCacheSize) + s.Require().NoError(err) + + indexerCore, err := indexer.New( + s.log, + module.ExecutionStateIndexerMetrics(metrics.NewNoopCollector()), + nil, + s.registerIndex, + s.headers, + nil, + nil, + nil, + nil, + s.chain, + derivedChainData, + module.CollectionExecutedMetric(metrics.NewNoopCollector()), + lockManager, + ) + s.Require().NoError(err) + + s.scripts = execution.NewScripts( + s.log, + metrics.NewNoopCollector(), + s.chain.ChainID(), + protocolState, + s.headers, + indexerCore.RegisterValue, + query.NewDefaultConfig(), + derivedChainData, + true, + ) + s.bootstrap() +} + +// TearDownTest runs after each test finishes and ensures components are done before continuing. +func (s *ScriptExecutorSuite) TearDownTest() { + unittest.RequireComponentsDoneBefore(s.T(), 100*time.Millisecond, s.versionControl) +} + +// TestExecuteAtBlockHeight tests script execution at a specific block height. +// It verifies the behavior of script execution with and without version control. +func (s *ScriptExecutorSuite) TestExecuteAtBlockHeight() { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + script := []byte("access(all) fun main() { }") + var scriptArgs [][]byte + var expectedResult = []byte("{\"type\":\"Void\"}\n") + + s.reporter.On("LowestIndexedHeight").Return(s.height, nil) + + // This test simulates the behavior when the version beacon is not set in the script executor, + // but it should still work by omitting the version control checks. + s.Run("test script execution without version control", func() { + scriptExec := NewScriptExecutor(s.log, uint64(0), math.MaxUint64) + s.reporter.On("HighestIndexedHeight").Return(s.height+1, nil).Once() + + // Initialize the script executor without version control + err := scriptExec.Initialize(s.indexReporter, s.scripts, nil) + s.Require().NoError(err) + + // Execute the script at the specified block height + res, err := scriptExec.ExecuteAtBlockHeight(ctx, script, scriptArgs, s.height) + s.Assert().NoError(err) + s.Assert().NotNil(res) + s.Assert().Equal(expectedResult, res) + }) + + // This test simulates the behavior when the version beacon is set in the script executor, + // and the script is running on a block with a compatible version that matches the current node version. + s.Run("test script execution with version control with compatible version", func() { + // Set up a mock version beacons events storage + versionBeacons := storageMock.NewVersionBeacons(s.T()) + versionEvents := map[uint64]*flow.SealedVersionBeacon{ + s.height: versionBeaconEventFixture( + s.T(), + s.height, + []uint64{s.height}, + []string{"0.0.1"}, + ), + } + // Mock the Highest method to return a version beacon with a specific version + versionBeacons. + On("Highest", testifyMock.AnythingOfType("uint64")). + Return(mocks.StorageMapGetter(versionEvents)) + + var err error + // Initialize version control with the mock version beacons + s.versionControl, err = version.NewVersionControl( + s.log, + versionBeacons, + semver.New("0.0.1"), + s.height-1, + s.height, + ) + require.NoError(s.T(), err) + + // Create a mock signaler context for testing + ictx := irrecoverable.NewMockSignalerContext(s.T(), ctx) + + // Start the VersionControl component + s.versionControl.Start(ictx) + + // Ensure the component is ready before proceeding + unittest.RequireComponentsReadyBefore(s.T(), 2*time.Second, s.versionControl) + + // Initialize the script executor with version control + scriptExec := NewScriptExecutor(s.log, uint64(0), math.MaxUint64) + s.reporter.On("HighestIndexedHeight").Return(s.height+1, nil) + + err = scriptExec.Initialize(s.indexReporter, s.scripts, s.versionControl) + s.Require().NoError(err) + + // Execute the script at the specified block height + res, err := scriptExec.ExecuteAtBlockHeight(ctx, script, scriptArgs, s.height) + s.Assert().NoError(err) + s.Assert().NotNil(res) + s.Assert().Equal(expectedResult, res) + }) + + // This test simulates the behavior when the version beacon is set in the script executor, + // and the script is running on a block with incompatible version that mismatch the current node version. + s.Run("test script execution with version control with incompatible version", func() { + // Set up a mock version beacons events storage + versionBeacons := storageMock.NewVersionBeacons(s.T()) + versionEvents := map[uint64]*flow.SealedVersionBeacon{ + s.height: versionBeaconEventFixture( + s.T(), + s.height, + []uint64{s.height}, + []string{"0.0.2"}, + ), + } + // Mock the Highest method to return a version beacon with a specific version + versionBeacons. + On("Highest", testifyMock.AnythingOfType("uint64")). + Return(mocks.StorageMapGetter(versionEvents)) + + var err error + // Initialize version control with the mock version beacons + s.versionControl, err = version.NewVersionControl( + s.log, + versionBeacons, + semver.New("0.0.1"), + s.height-1, + s.height, + ) + require.NoError(s.T(), err) + + // Create a mock signaler context for testing + ictx := irrecoverable.NewMockSignalerContext(s.T(), ctx) + + // Start the VersionControl component + s.versionControl.Start(ictx) + + // Ensure the component is ready before proceeding + unittest.RequireComponentsReadyBefore(s.T(), 2*time.Second, s.versionControl) + + // Initialize the script executor with version control + scriptExec := NewScriptExecutor(s.log, uint64(0), math.MaxUint64) + s.reporter.On("HighestIndexedHeight").Return(s.height+1, nil) + + err = scriptExec.Initialize(s.indexReporter, s.scripts, s.versionControl) + s.Require().NoError(err) + + // Execute the script at the specified block height + res, err := scriptExec.ExecuteAtBlockHeight(ctx, script, scriptArgs, s.height) + s.Assert().ErrorIs(ErrIncompatibleNodeVersion, err) + s.Assert().Nil(res) + }) +} + +// versionBeaconEventFixture creates a SealedVersionBeacon for the given heights and versions. +// This is used to simulate version events in the tests. +func versionBeaconEventFixture( + t *testing.T, + sealHeight uint64, + heights []uint64, + versions []string, +) *flow.SealedVersionBeacon { + require.Equal(t, len(heights), len(versions), "the heights array should be the same length as the versions array") + var vb []flow.VersionBoundary + for i := 0; i < len(heights); i++ { + vb = append(vb, flow.VersionBoundary{ + BlockHeight: heights[i], + Version: versions[i], + }) + } + + return &flow.SealedVersionBeacon{ + VersionBeacon: unittest.VersionBeaconFixture( + unittest.WithBoundaries(vb...), + ), + SealHeight: sealHeight, + } +} diff --git a/engine/access/rpc/backend/scripts/executor/compare.go b/engine/access/rpc/backend/scripts/executor/compare.go new file mode 100644 index 00000000000..40f26f956b2 --- /dev/null +++ b/engine/access/rpc/backend/scripts/executor/compare.go @@ -0,0 +1,62 @@ +package executor + +import ( + "context" + "time" + + "github.com/rs/zerolog" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + + "github.com/onflow/flow-go/module" +) + +type ComparingScriptExecutor struct { + log zerolog.Logger + metrics module.BackendScriptsMetrics + + localExecutor ScriptExecutor + executionNodeExecutor ScriptExecutor + + scriptCache *LoggedScriptCache +} + +var _ ScriptExecutor = (*ComparingScriptExecutor)(nil) + +func NewComparingScriptExecutor( + log zerolog.Logger, + metrics module.BackendScriptsMetrics, + scriptCache *LoggedScriptCache, + localExecutor ScriptExecutor, + execNodeExecutor ScriptExecutor, +) *ComparingScriptExecutor { + return &ComparingScriptExecutor{ + log: log.With().Str("script_executor", "comparing").Logger(), + metrics: metrics, + scriptCache: scriptCache, + localExecutor: localExecutor, + executionNodeExecutor: execNodeExecutor, + } +} + +func (c *ComparingScriptExecutor) Execute(ctx context.Context, request *Request) ([]byte, time.Duration, error) { + execResult, execDuration, execErr := c.executionNodeExecutor.Execute(ctx, request) + + // we can only compare the results if there were either no errors or a cadence error + // since we cannot distinguish the EN error as caused by the block being pruned or some other reason, + // which may produce a valid RN output but an error for the EN + isInvalidArgument := status.Code(execErr) == codes.InvalidArgument + if execErr != nil && !isInvalidArgument { + return nil, 0, execErr + } + + localResult, localDuration, localErr := c.localExecutor.Execute(ctx, request) + + resultComparer := newScriptResultComparison(c.log, c.metrics, c.scriptCache.shouldLogScript, request) + _ = resultComparer.compare( + newScriptResult(execResult, execDuration, execErr), + newScriptResult(localResult, localDuration, localErr), + ) + + return execResult, execDuration, execErr +} diff --git a/engine/access/rpc/backend/scripts/executor/comparer.go b/engine/access/rpc/backend/scripts/executor/comparer.go new file mode 100644 index 00000000000..46364ffcea0 --- /dev/null +++ b/engine/access/rpc/backend/scripts/executor/comparer.go @@ -0,0 +1,180 @@ +package executor + +import ( + "bytes" + "crypto/md5" //nolint:gosec + "encoding/base64" + "errors" + "strings" + "time" + + "github.com/rs/zerolog" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + + "github.com/onflow/flow-go/module" +) + +const ( + executeErrorPrefix = "failed to execute script at block" + logDiffAsError = false +) + +type scriptResult struct { + result []byte + duration time.Duration + err error +} + +func newScriptResult(result []byte, duration time.Duration, err error) *scriptResult { + return &scriptResult{ + result: result, + duration: duration, + err: err, + } +} + +type scriptResultComparison struct { + log zerolog.Logger + metrics module.BackendScriptsMetrics + request *Request + shouldLogScript func(time.Time, [md5.Size]byte) bool +} + +func newScriptResultComparison( + log zerolog.Logger, + metrics module.BackendScriptsMetrics, + shouldLogScript func(time.Time, [md5.Size]byte) bool, + request *Request, +) *scriptResultComparison { + return &scriptResultComparison{ + log: log, + metrics: metrics, + request: request, + shouldLogScript: shouldLogScript, + } +} + +func (c *scriptResultComparison) compare(execResult, localResult *scriptResult) bool { + // record errors caused by missing local data + if isOutOfRangeError(localResult.err) { + c.metrics.ScriptExecutionNotIndexed() + c.logComparison(execResult, localResult, + "script execution results do not match EN because data is not indexed yet", false) + return false + } + + // check errors first + if execResult.err != nil { + if compareErrors(execResult.err, localResult.err) { + c.metrics.ScriptExecutionErrorMatch() + return true + } + + c.metrics.ScriptExecutionErrorMismatch() + c.logComparison(execResult, localResult, + "cadence errors from local execution do not match EN", logDiffAsError) + return false + } + + if bytes.Equal(execResult.result, localResult.result) { + c.metrics.ScriptExecutionResultMatch() + return true + } + + c.metrics.ScriptExecutionResultMismatch() + c.logComparison(execResult, localResult, + "script execution results from local execution do not match EN", logDiffAsError) + return false +} + +// logScriptExecutionComparison logs the script execution comparison between local execution and execution node +func (c *scriptResultComparison) logComparison(execResult, localResult *scriptResult, msg string, useError bool) { + args := make([]string, len(c.request.arguments)) + for i, arg := range c.request.arguments { + args[i] = string(arg) + } + + lgCtx := c.log.With(). + Hex("block_id", c.request.blockID[:]). + Hex("script_hash", c.request.insecureScriptHash[:]). + Strs("args", args) + + if c.shouldLogScript(time.Now(), c.request.insecureScriptHash) { + lgCtx = lgCtx.Str("script", string(c.request.script)) + } + + if execResult.err != nil { + lgCtx = lgCtx.AnErr("execution_node_error", execResult.err) + } else { + lgCtx = lgCtx.Str("execution_node_result", base64.StdEncoding.EncodeToString(execResult.result)) + } + lgCtx = lgCtx.Dur("execution_node_duration_ms", execResult.duration) + + if localResult.err != nil { + lgCtx = lgCtx.AnErr("local_error", localResult.err) + } else { + lgCtx = lgCtx.Str("local_result", base64.StdEncoding.EncodeToString(localResult.result)) + } + lgCtx = lgCtx.Dur("local_duration_ms", localResult.duration) + + lg := lgCtx.Logger() + if useError { + lg.Error().Msg(msg) + } else { + lg.Debug().Msg(msg) + } +} + +func isOutOfRangeError(err error) bool { + return status.Code(err) == codes.OutOfRange +} + +func compareErrors(execErr, localErr error) bool { + if errors.Is(execErr, localErr) { + return true + } + + // if the status code is different, then they definitely don't match + if status.Code(execErr) != status.Code(localErr) { + return false + } + + // absolute error strings generally won't match since the code paths are slightly different + // check if the original error is the same by removing unneeded error wrapping. + return containsError(execErr, localErr) +} + +func containsError(execErr, localErr error) bool { + // both script execution implementations use the same engine, which adds + // "failed to execute script at block" to the message before returning. Any characters + // before this can be ignored. The string that comes after is the original error and + // should match. + execErrStr := trimErrorPrefix(execErr) + localErrStr := trimErrorPrefix(localErr) + + if execErrStr == localErrStr { + return true + } + + // by default ENs are configured with longer script error size limits, which means that the AN's + // error may be truncated. check if the non-truncated parts match. + subParts := strings.Split(localErrStr, " ... ") + + return len(subParts) == 2 && + strings.HasPrefix(execErrStr, subParts[0]) && + strings.HasSuffix(execErrStr, subParts[1]) +} + +func trimErrorPrefix(err error) string { + if err == nil { + return "" + } + + parts := strings.Split(err.Error(), executeErrorPrefix) + if len(parts) != 2 { + return err.Error() + } + + return parts[1] +} diff --git a/engine/access/rpc/backend/scripts/executor/comparer_test.go b/engine/access/rpc/backend/scripts/executor/comparer_test.go new file mode 100644 index 00000000000..56c756d795e --- /dev/null +++ b/engine/access/rpc/backend/scripts/executor/comparer_test.go @@ -0,0 +1,169 @@ +package executor + +import ( + "fmt" + "testing" + "time" + + "github.com/rs/zerolog" + "github.com/stretchr/testify/assert" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + + "github.com/onflow/flow-go/module/metrics" + "github.com/onflow/flow-go/storage" + "github.com/onflow/flow-go/utils/unittest" +) + +func TestCompare(t *testing.T) { + m := metrics.NewNoopCollector() + logger := zerolog.Nop() + + result1 := []byte("result1") + result2 := []byte("result2") + + error1 := status.Error(codes.InvalidArgument, "error1") + error2 := status.Error(codes.InvalidArgument, "error2") + + outOfRange := status.Error(codes.OutOfRange, "out of range") + + testcases := []struct { + name string + execResult *scriptResult + localResult *scriptResult + expected bool + }{ + { + name: "results match", + execResult: newScriptResult(result1, 0, nil), + localResult: newScriptResult(result1, 0, nil), + expected: true, + }, + { + name: "results do not match", + execResult: newScriptResult(result1, 0, nil), + localResult: newScriptResult(result2, 0, nil), + expected: false, + }, + { + name: "en returns result, local returns error", + execResult: newScriptResult(result1, 0, nil), + localResult: newScriptResult(nil, 0, error1), + expected: false, + }, + { + name: "en returns error, local returns result", + execResult: newScriptResult(nil, 0, error1), + localResult: newScriptResult(result1, 0, nil), + expected: false, + }, + { + // demonstrate this works by passing the same result since the OOR check happens first + // if the check failed, this should return true + name: "local returns out of range", + execResult: newScriptResult(result1, 0, nil), + localResult: newScriptResult(result1, 0, outOfRange), + expected: false, + }, + { + name: "both return same error", + execResult: newScriptResult(nil, 0, error1), + localResult: newScriptResult(nil, 0, error1), + expected: true, + }, + { + name: "both return different errors", + execResult: newScriptResult(nil, 0, error1), + localResult: newScriptResult(nil, 0, error2), + expected: false, + }, + } + + request := NewScriptExecutionRequest(unittest.IdentifierFixture(), 1, []byte("script"), [][]byte{}) + shouldLogScript := func(time.Time, [16]byte) bool { return true } + comparer := newScriptResultComparison(logger, m, shouldLogScript, request) + + for _, tc := range testcases { + t.Run(tc.name, func(t *testing.T) { + actual := comparer.compare(tc.execResult, tc.localResult) + assert.Equalf(t, tc.expected, actual, "expected %v, got %v", tc.expected, actual) + }) + } +} + +func TestCompareErrors(t *testing.T) { + testcases := []struct { + name string + execErr error + localErr error + expected bool + }{ + { + name: "both nil", + execErr: nil, + localErr: nil, + expected: true, + }, + { + name: "same error", + execErr: storage.ErrNotFound, + localErr: storage.ErrNotFound, + expected: true, + }, + { + name: "same error message", + execErr: fmt.Errorf("same error message"), + localErr: fmt.Errorf("same error message"), + expected: true, + }, + { + name: "same error code", + execErr: status.Error(codes.InvalidArgument, "some message"), + localErr: status.Error(codes.InvalidArgument, "some message"), + expected: true, + }, + { + name: "different error code", + execErr: status.Error(codes.Canceled, "some message"), + localErr: status.Error(codes.DeadlineExceeded, "some message"), + expected: false, + }, + { + name: "same error code, different message", + execErr: status.Error(codes.InvalidArgument, "some message"), + localErr: status.Error(codes.InvalidArgument, "different message"), + expected: false, + }, + { + name: "same error, different prefix", + execErr: status.Errorf(codes.InvalidArgument, "original: %s: some message", executeErrorPrefix), + localErr: status.Errorf(codes.InvalidArgument, "anything: %s: some message", executeErrorPrefix), + expected: true, + }, + { + name: "different error, different prefix", + execErr: status.Errorf(codes.InvalidArgument, "original: %s: some message", executeErrorPrefix), + localErr: status.Errorf(codes.InvalidArgument, "anything: %s: another message", executeErrorPrefix), + expected: false, + }, + { + name: "truncated error, match", + execErr: status.Errorf(codes.InvalidArgument, "original: %s: this is the original message", executeErrorPrefix), + localErr: status.Errorf(codes.InvalidArgument, "anything: %s: this is ... message", executeErrorPrefix), + expected: true, + }, + { + name: "truncated error, do not match", + execErr: status.Errorf(codes.InvalidArgument, "original: %s: this is the original message", executeErrorPrefix), + localErr: status.Errorf(codes.InvalidArgument, "anything: %s: this is ... a different message", executeErrorPrefix), + expected: false, + }, + } + + for _, tc := range testcases { + t.Run(tc.name, func(t *testing.T) { + actual := compareErrors(tc.execErr, tc.localErr) + assert.Equalf(t, tc.expected, actual, "expected %v, got %v", tc.expected, actual) + }) + } +} diff --git a/engine/access/rpc/backend/scripts/executor/execution_node.go b/engine/access/rpc/backend/scripts/executor/execution_node.go new file mode 100644 index 00000000000..d6c58e35b43 --- /dev/null +++ b/engine/access/rpc/backend/scripts/executor/execution_node.go @@ -0,0 +1,128 @@ +package executor + +import ( + "context" + "time" + + execproto "github.com/onflow/flow/protobuf/go/flow/execution" + "github.com/rs/zerolog" + "google.golang.org/grpc/codes" + + "google.golang.org/grpc/status" + + "github.com/onflow/flow-go/engine/access/rpc/backend/node_communicator" + "github.com/onflow/flow-go/engine/access/rpc/connection" + "github.com/onflow/flow-go/engine/common/rpc" + commonrpc "github.com/onflow/flow-go/engine/common/rpc" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module" +) + +type ENScriptExecutor struct { + log zerolog.Logger + metrics module.BackendScriptsMetrics //TODO: move this metrics to scriptCache struct? + + nodeProvider *commonrpc.ExecutionNodeIdentitiesProvider + nodeCommunicator node_communicator.Communicator + connFactory connection.ConnectionFactory + + scriptCache *LoggedScriptCache +} + +var _ ScriptExecutor = (*ENScriptExecutor)(nil) + +func NewENScriptExecutor( + log zerolog.Logger, + metrics module.BackendScriptsMetrics, + nodeProvider *commonrpc.ExecutionNodeIdentitiesProvider, + nodeCommunicator node_communicator.Communicator, + connFactory connection.ConnectionFactory, + scriptCache *LoggedScriptCache, +) *ENScriptExecutor { + return &ENScriptExecutor{ + log: log.With().Str("script_executor", "execution_node").Logger(), + metrics: metrics, + nodeProvider: nodeProvider, + nodeCommunicator: nodeCommunicator, + connFactory: connFactory, + scriptCache: scriptCache, + } +} + +func (e *ENScriptExecutor) Execute(ctx context.Context, request *Request) ([]byte, time.Duration, error) { + // find few execution nodes which have executed the block earlier and provided an execution receipt for it + executors, err := e.nodeProvider.ExecutionNodesForBlockID(ctx, request.blockID) + if err != nil { + return nil, 0, status.Errorf( + codes.Internal, "failed to find script executors at blockId %v: %v", + request.blockID.String(), + err, + ) + } + + var result []byte + var executionTime time.Time + var execDuration time.Duration + errToReturn := e.nodeCommunicator.CallAvailableNode( + executors, + func(node *flow.IdentitySkeleton) error { + execStartTime := time.Now() + + result, err = e.tryExecuteScriptOnExecutionNode(ctx, node.Address, request) + + executionTime = time.Now() + execDuration = executionTime.Sub(execStartTime) + + if err != nil { + return err + } + + e.scriptCache.LogExecutedScript(request.blockID, request.insecureScriptHash, executionTime, node.Address, request.script, execDuration) + e.metrics.ScriptExecuted(time.Since(execStartTime), len(request.script)) + + return nil + }, + func(node *flow.IdentitySkeleton, err error) bool { + if status.Code(err) == codes.InvalidArgument { + e.scriptCache.LogFailedScript(request.blockID, request.insecureScriptHash, executionTime, node.Address, request.script) + return true + } + return false + }, + ) + + if errToReturn != nil { + if status.Code(errToReturn) != codes.InvalidArgument { + e.metrics.ScriptExecutionErrorOnExecutionNode() + e.log.Error().Err(errToReturn).Msg("script execution failed for execution node internal reasons") + } + return nil, execDuration, rpc.ConvertError(errToReturn, "failed to execute script on execution nodes", codes.Internal) + } + + return result, execDuration, nil +} + +// tryExecuteScriptOnExecutionNode attempts to execute the script on the given execution node. +func (e *ENScriptExecutor) tryExecuteScriptOnExecutionNode( + ctx context.Context, + executorAddress string, + r *Request, +) ([]byte, error) { + execRPCClient, closer, err := e.connFactory.GetExecutionAPIClient(executorAddress) + if err != nil { + return nil, status.Errorf(codes.Internal, "failed to create client for execution node %s: %v", + executorAddress, err) + } + defer closer.Close() + + execResp, err := execRPCClient.ExecuteScriptAtBlockID(ctx, &execproto.ExecuteScriptAtBlockIDRequest{ + BlockId: r.blockID[:], + Script: r.script, + Arguments: r.arguments, + }) + if err != nil { + return nil, status.Errorf(status.Code(err), "failed to execute the script on the execution node %s: %v", executorAddress, err) + } + + return execResp.GetValue(), nil +} diff --git a/engine/access/rpc/backend/scripts/executor/executor.go b/engine/access/rpc/backend/scripts/executor/executor.go new file mode 100644 index 00000000000..807db700416 --- /dev/null +++ b/engine/access/rpc/backend/scripts/executor/executor.go @@ -0,0 +1,42 @@ +package executor + +import ( + "context" + "crypto/md5" //nolint:gosec + "time" + + "github.com/onflow/flow-go/model/flow" +) + +type ScriptExecutor interface { + Execute(ctx context.Context, scriptRequest *Request) ([]byte, time.Duration, error) +} + +// Request encapsulates the data needed to execute a script to make it easier +// to pass around between the various methods involved in script execution +type Request struct { + blockID flow.Identifier + height uint64 + script []byte + arguments [][]byte + insecureScriptHash [md5.Size]byte +} + +func NewScriptExecutionRequest( + blockID flow.Identifier, + height uint64, + script []byte, + arguments [][]byte, +) *Request { + return &Request{ + blockID: blockID, + height: height, + script: script, + arguments: arguments, + + // encode to MD5 as low compute/memory lookup key + // CAUTION: cryptographically insecure md5 is used here, but only to de-duplicate logs. + // *DO NOT* use this hash for any protocol-related or cryptographic functions. + insecureScriptHash: md5.Sum(script), //nolint:gosec + } +} diff --git a/engine/access/rpc/backend/scripts/executor/failover.go b/engine/access/rpc/backend/scripts/executor/failover.go new file mode 100644 index 00000000000..3d69ca5a217 --- /dev/null +++ b/engine/access/rpc/backend/scripts/executor/failover.go @@ -0,0 +1,38 @@ +package executor + +import ( + "context" + "time" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +type FailoverScriptExecutor struct { + localExecutor ScriptExecutor + executionNodeExecutor ScriptExecutor +} + +var _ ScriptExecutor = (*FailoverScriptExecutor)(nil) + +func NewFailoverScriptExecutor(localExecutor ScriptExecutor, execNodeExecutor ScriptExecutor) *FailoverScriptExecutor { + return &FailoverScriptExecutor{ + localExecutor: localExecutor, + executionNodeExecutor: execNodeExecutor, + } +} + +func (f *FailoverScriptExecutor) Execute(ctx context.Context, request *Request) ([]byte, time.Duration, error) { + localResult, localDuration, localErr := f.localExecutor.Execute(ctx, request) + + isInvalidArgument := status.Code(localErr) == codes.InvalidArgument + isCanceled := status.Code(localErr) == codes.Canceled + if localErr == nil || isInvalidArgument || isCanceled { + return localResult, localDuration, localErr + } + + // Note: scripts that timeout are retried on the execution nodes since ANs may have performance + // issues for some scripts. + execResult, execDuration, execErr := f.executionNodeExecutor.Execute(ctx, request) + return execResult, execDuration, execErr +} diff --git a/engine/access/rpc/backend/scripts/executor/local.go b/engine/access/rpc/backend/scripts/executor/local.go new file mode 100644 index 00000000000..0c0973b944c --- /dev/null +++ b/engine/access/rpc/backend/scripts/executor/local.go @@ -0,0 +1,113 @@ +package executor + +import ( + "context" + "time" + + "github.com/rs/zerolog" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + + "github.com/onflow/flow-go/engine/common/rpc" + fvmerrors "github.com/onflow/flow-go/fvm/errors" + "github.com/onflow/flow-go/module" + "github.com/onflow/flow-go/module/execution" + "github.com/onflow/flow-go/utils/logging" +) + +type LocalScriptExecutor struct { + log zerolog.Logger + metrics module.BackendScriptsMetrics + + scriptExecutor execution.ScriptExecutor + scriptCache *LoggedScriptCache +} + +var _ ScriptExecutor = (*LocalScriptExecutor)(nil) + +func NewLocalScriptExecutor( + log zerolog.Logger, + metrics module.BackendScriptsMetrics, + executor execution.ScriptExecutor, + scriptCache *LoggedScriptCache, +) *LocalScriptExecutor { + return &LocalScriptExecutor{ + log: log.With().Str("script_executor", "local").Logger(), + metrics: metrics, + scriptCache: scriptCache, + scriptExecutor: executor, + } +} + +func (l *LocalScriptExecutor) Execute(ctx context.Context, r *Request) ([]byte, time.Duration, error) { + execStartTime := time.Now() + + result, err := l.scriptExecutor.ExecuteAtBlockHeight(ctx, r.script, r.arguments, r.height) + + execEndTime := time.Now() + execDuration := execEndTime.Sub(execStartTime) + + log := l.log.With(). + Str("script_executor_addr", "localhost"). + Hex("block_id", logging.ID(r.blockID)). + Uint64("height", r.height). + Hex("script_hash", r.insecureScriptHash[:]). + Dur("execution_dur_ms", execDuration). + Logger() + + if err != nil { + convertedErr := convertScriptExecutionError(err, r.height) + + switch status.Code(convertedErr) { + case codes.InvalidArgument, codes.Canceled, codes.DeadlineExceeded: + l.scriptCache.LogFailedScript(r.blockID, r.insecureScriptHash, execEndTime, "localhost", r.script) + + default: + log.Debug().Err(err).Msg("script execution failed") + l.metrics.ScriptExecutionErrorLocal() //TODO: this should be called in above cases as well? + } + + return nil, execDuration, convertedErr + } + + l.scriptCache.LogExecutedScript(r.blockID, r.insecureScriptHash, execEndTime, "localhost", r.script, execDuration) + l.metrics.ScriptExecuted(execDuration, len(r.script)) + + return result, execDuration, nil +} + +// convertScriptExecutionError converts the script execution error to a gRPC error +func convertScriptExecutionError(err error, height uint64) error { + if err == nil { + return nil + } + + var failure fvmerrors.CodedFailure + if fvmerrors.As(err, &failure) { + return rpc.ConvertError(err, "failed to execute script", codes.Internal) + } + + // general FVM/ledger errors + var coded fvmerrors.CodedError + if fvmerrors.As(err, &coded) { + switch coded.Code() { + case fvmerrors.ErrCodeScriptExecutionCancelledError: + return status.Errorf(codes.Canceled, "script execution canceled: %v", err) + + case fvmerrors.ErrCodeScriptExecutionTimedOutError: + return status.Errorf(codes.DeadlineExceeded, "script execution timed out: %v", err) + + case fvmerrors.ErrCodeComputationLimitExceededError: + return status.Errorf(codes.ResourceExhausted, "script execution computation limit exceeded: %v", err) + + case fvmerrors.ErrCodeMemoryLimitExceededError: + return status.Errorf(codes.ResourceExhausted, "script execution memory limit exceeded: %v", err) + + default: + // runtime errors + return status.Errorf(codes.InvalidArgument, "failed to execute script: %v", err) + } + } + + return rpc.ConvertIndexError(err, height, "failed to execute script") +} diff --git a/engine/access/rpc/backend/scripts/executor/logged_script_cache.go b/engine/access/rpc/backend/scripts/executor/logged_script_cache.go new file mode 100644 index 00000000000..965efcd8e85 --- /dev/null +++ b/engine/access/rpc/backend/scripts/executor/logged_script_cache.go @@ -0,0 +1,76 @@ +package executor + +import ( + "crypto/md5" //nolint:gosec + "time" + + lru "github.com/hashicorp/golang-lru/v2" + "github.com/rs/zerolog" + + "github.com/onflow/flow-go/model/flow" +) + +// uniqueScriptLoggingTimeWindow is the duration for checking the uniqueness of scripts sent for execution +const uniqueScriptLoggingTimeWindow = 10 * time.Minute + +type LoggedScriptCache struct { + log zerolog.Logger + loggedScripts *lru.Cache[[md5.Size]byte, time.Time] +} + +func NewLoggedScriptCache(log zerolog.Logger, loggedScripts *lru.Cache[[md5.Size]byte, time.Time]) *LoggedScriptCache { + return &LoggedScriptCache{ + log: log, + loggedScripts: loggedScripts, + } +} + +func (s *LoggedScriptCache) LogExecutedScript( + blockID flow.Identifier, + scriptHash [md5.Size]byte, + executionTime time.Time, + address string, + script []byte, + dur time.Duration, +) { + if s.shouldLogScript(executionTime, scriptHash) { + s.log.Debug(). + Str("block_id", blockID.String()). + Str("script_executor_addr", address). + Str("script", string(script)). + Dur("execution_dur_ms", dur). + Msg("Successfully executed script") + + s.loggedScripts.Add(scriptHash, executionTime) + } +} + +func (s *LoggedScriptCache) LogFailedScript( + blockID flow.Identifier, + scriptHash [md5.Size]byte, + executionTime time.Time, + address string, + script []byte, +) { + logEvent := s.log.Debug(). + Str("block_id", blockID.String()). + Str("script_executor_addr", address) + + if s.shouldLogScript(executionTime, scriptHash) { + logEvent.Str("script", string(script)) + } + + logEvent.Msg("failed to execute script") + s.loggedScripts.Add(scriptHash, executionTime) +} + +func (s *LoggedScriptCache) shouldLogScript(execTime time.Time, scriptHash [md5.Size]byte) bool { + if s.log.GetLevel() > zerolog.DebugLevel { + return false + } + timestamp, seen := s.loggedScripts.Get(scriptHash) + if seen { + return execTime.Sub(timestamp) >= uniqueScriptLoggingTimeWindow + } + return true +} diff --git a/engine/access/rpc/backend/scripts/scripts.go b/engine/access/rpc/backend/scripts/scripts.go new file mode 100644 index 00000000000..9d3fdf07b57 --- /dev/null +++ b/engine/access/rpc/backend/scripts/scripts.go @@ -0,0 +1,143 @@ +package scripts + +import ( + "context" + "crypto/md5" //nolint:gosec + "fmt" + "time" + + lru "github.com/hashicorp/golang-lru/v2" + "github.com/rs/zerolog" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + + "github.com/onflow/flow-go/access" + "github.com/onflow/flow-go/engine/access/rpc/backend/common" + "github.com/onflow/flow-go/engine/access/rpc/backend/node_communicator" + "github.com/onflow/flow-go/engine/access/rpc/backend/query_mode" + "github.com/onflow/flow-go/engine/access/rpc/backend/scripts/executor" + "github.com/onflow/flow-go/engine/access/rpc/connection" + commonrpc "github.com/onflow/flow-go/engine/common/rpc" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module" + "github.com/onflow/flow-go/module/execution" + "github.com/onflow/flow-go/module/irrecoverable" + "github.com/onflow/flow-go/state/protocol" + "github.com/onflow/flow-go/storage" +) + +type Scripts struct { + headers storage.Headers + state protocol.State + executor executor.ScriptExecutor + maxScriptAndArgumentSize uint +} + +var _ access.ScriptsAPI = (*Scripts)(nil) + +func NewScriptsBackend( + log zerolog.Logger, + metrics module.BackendScriptsMetrics, + headers storage.Headers, + state protocol.State, + connFactory connection.ConnectionFactory, + nodeCommunicator node_communicator.Communicator, + scriptExecutor execution.ScriptExecutor, + scriptExecMode query_mode.IndexQueryMode, + nodeProvider *commonrpc.ExecutionNodeIdentitiesProvider, + loggedScripts *lru.Cache[[md5.Size]byte, time.Time], + maxScriptAndArgumentSize uint, +) (*Scripts, error) { + var exec executor.ScriptExecutor + cache := executor.NewLoggedScriptCache(log, loggedScripts) + + switch scriptExecMode { + case query_mode.IndexQueryModeLocalOnly: + exec = executor.NewLocalScriptExecutor(log, metrics, scriptExecutor, cache) + + case query_mode.IndexQueryModeExecutionNodesOnly: + exec = executor.NewENScriptExecutor(log, metrics, nodeProvider, nodeCommunicator, connFactory, cache) + + case query_mode.IndexQueryModeFailover: + local := executor.NewLocalScriptExecutor(log, metrics, scriptExecutor, cache) + execNode := executor.NewENScriptExecutor(log, metrics, nodeProvider, nodeCommunicator, connFactory, cache) + exec = executor.NewFailoverScriptExecutor(local, execNode) + + case query_mode.IndexQueryModeCompare: + local := executor.NewLocalScriptExecutor(log, metrics, scriptExecutor, cache) + execNode := executor.NewENScriptExecutor(log, metrics, nodeProvider, nodeCommunicator, connFactory, cache) + exec = executor.NewComparingScriptExecutor(log, metrics, cache, local, execNode) + + default: + return nil, fmt.Errorf("invalid index mode: %s", scriptExecMode.String()) + } + + return &Scripts{ + headers: headers, + state: state, + executor: exec, + maxScriptAndArgumentSize: maxScriptAndArgumentSize, + }, nil +} + +// ExecuteScriptAtLatestBlock executes provided script at the latest sealed block. +func (b *Scripts) ExecuteScriptAtLatestBlock( + ctx context.Context, + script []byte, + arguments [][]byte, +) ([]byte, error) { + if !commonrpc.CheckScriptSize(script, arguments, b.maxScriptAndArgumentSize) { + return nil, status.Error(codes.InvalidArgument, commonrpc.ErrScriptTooLarge.Error()) + } + + latestHeader, err := b.state.Sealed().Head() + if err != nil { + // the latest sealed header MUST be available + err := irrecoverable.NewExceptionf("failed to lookup sealed header: %w", err) + irrecoverable.Throw(ctx, err) + return nil, err + } + + res, _, err := b.executor.Execute(ctx, executor.NewScriptExecutionRequest(latestHeader.ID(), latestHeader.Height, script, arguments)) + return res, err +} + +// ExecuteScriptAtBlockID executes provided script at the provided block ID. +func (b *Scripts) ExecuteScriptAtBlockID( + ctx context.Context, + blockID flow.Identifier, + script []byte, + arguments [][]byte, +) ([]byte, error) { + if !commonrpc.CheckScriptSize(script, arguments, b.maxScriptAndArgumentSize) { + return nil, status.Error(codes.InvalidArgument, commonrpc.ErrScriptTooLarge.Error()) + } + + header, err := b.headers.ByBlockID(blockID) + if err != nil { + return nil, commonrpc.ConvertStorageError(err) + } + + res, _, err := b.executor.Execute(ctx, executor.NewScriptExecutionRequest(blockID, header.Height, script, arguments)) + return res, err +} + +// ExecuteScriptAtBlockHeight executes provided script at the provided block height. +func (b *Scripts) ExecuteScriptAtBlockHeight( + ctx context.Context, + blockHeight uint64, + script []byte, + arguments [][]byte, +) ([]byte, error) { + if !commonrpc.CheckScriptSize(script, arguments, b.maxScriptAndArgumentSize) { + return nil, status.Error(codes.InvalidArgument, commonrpc.ErrScriptTooLarge.Error()) + } + + header, err := b.headers.ByHeight(blockHeight) + if err != nil { + return nil, commonrpc.ConvertStorageError(common.ResolveHeightError(b.state.Params(), blockHeight, err)) + } + + res, _, err := b.executor.Execute(ctx, executor.NewScriptExecutionRequest(header.ID(), blockHeight, script, arguments)) + return res, err +} diff --git a/engine/access/rpc/backend/scripts/scripts_test.go b/engine/access/rpc/backend/scripts/scripts_test.go new file mode 100644 index 00000000000..bc997576060 --- /dev/null +++ b/engine/access/rpc/backend/scripts/scripts_test.go @@ -0,0 +1,511 @@ +package scripts + +import ( + "context" + "crypto/md5" //nolint:gosec + "fmt" + "testing" + "time" + + lru "github.com/hashicorp/golang-lru/v2" + "github.com/rs/zerolog" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + + execproto "github.com/onflow/flow/protobuf/go/flow/execution" + + access "github.com/onflow/flow-go/engine/access/mock" + "github.com/onflow/flow-go/engine/access/rpc/backend/common" + "github.com/onflow/flow-go/engine/access/rpc/backend/node_communicator" + "github.com/onflow/flow-go/engine/access/rpc/backend/query_mode" + connectionmock "github.com/onflow/flow-go/engine/access/rpc/connection/mock" + commonrpc "github.com/onflow/flow-go/engine/common/rpc" + fvmerrors "github.com/onflow/flow-go/fvm/errors" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module/execution" + execmock "github.com/onflow/flow-go/module/execution/mock" + "github.com/onflow/flow-go/module/irrecoverable" + "github.com/onflow/flow-go/module/metrics" + protocol "github.com/onflow/flow-go/state/protocol/mock" + "github.com/onflow/flow-go/storage" + storagemock "github.com/onflow/flow-go/storage/mock" + "github.com/onflow/flow-go/utils/unittest" + "github.com/onflow/flow-go/utils/unittest/mocks" +) + +var ( + expectedResponse = []byte("response_data") + + cadenceErr = fvmerrors.NewCodedError(fvmerrors.ErrCodeCadenceRunTimeError, "cadence error") + fvmFailureErr = fvmerrors.NewCodedFailure(fvmerrors.FailureCodeBlockFinderFailure, "fvm error") + ctxCancelErr = fvmerrors.NewCodedError(fvmerrors.ErrCodeScriptExecutionCancelledError, "context canceled error") + timeoutErr = fvmerrors.NewCodedError(fvmerrors.ErrCodeScriptExecutionTimedOutError, "timeout error") + compLimitErr = fvmerrors.NewCodedError(fvmerrors.ErrCodeComputationLimitExceededError, "computation limit exceeded error") + memLimitErr = fvmerrors.NewCodedError(fvmerrors.ErrCodeMemoryLimitExceededError, "memory limit exceeded error") +) + +// Create a suite similar to GetAccount that covers each of the modes +type BackendScriptsSuite struct { + suite.Suite + + log zerolog.Logger + state *protocol.State + snapshot *protocol.Snapshot + params *protocol.Params + rootHeader *flow.Header + + headers *storagemock.Headers + receipts *storagemock.ExecutionReceipts + connectionFactory *connectionmock.ConnectionFactory + chainID flow.ChainID + + executionNodes flow.IdentityList + execClient *access.ExecutionAPIClient + + block *flow.Block + + script []byte + arguments [][]byte + failingScript []byte +} + +func TestBackendScriptsSuite(t *testing.T) { + suite.Run(t, new(BackendScriptsSuite)) +} + +func (s *BackendScriptsSuite) SetupTest() { + s.log = unittest.Logger() + s.state = protocol.NewState(s.T()) + s.snapshot = protocol.NewSnapshot(s.T()) + s.rootHeader = unittest.BlockHeaderFixture() + s.params = protocol.NewParams(s.T()) + s.headers = storagemock.NewHeaders(s.T()) + s.receipts = storagemock.NewExecutionReceipts(s.T()) + s.connectionFactory = connectionmock.NewConnectionFactory(s.T()) + s.chainID = flow.Testnet + + s.execClient = access.NewExecutionAPIClient(s.T()) + s.executionNodes = unittest.IdentityListFixture(2, unittest.WithRole(flow.RoleExecution)) + s.block = unittest.BlockFixture() + + s.script = []byte("access(all) fun main() { return 1 }") + s.arguments = [][]byte{[]byte("arg1"), []byte("arg2")} + s.failingScript = []byte("access(all) fun main() { panic(\"!!\") }") +} + +func (s *BackendScriptsSuite) defaultBackend(executor execution.ScriptExecutor, mode query_mode.IndexQueryMode) *Scripts { + loggedScripts, err := lru.New[[md5.Size]byte, time.Time](common.DefaultLoggedScriptsCacheSize) + s.Require().NoError(err) + + scripts, err := NewScriptsBackend( + s.log, + metrics.NewNoopCollector(), + s.headers, + s.state, + s.connectionFactory, + node_communicator.NewNodeCommunicator(false), + executor, + mode, + commonrpc.NewExecutionNodeIdentitiesProvider( + s.log, + s.state, + s.receipts, + flow.IdentifierList{}, + flow.IdentifierList{}, + ), + loggedScripts, + commonrpc.DefaultAccessMaxRequestSize, + ) + require.NoError(s.T(), err) + + return scripts +} + +// setupExecutionNodes sets up the mocks required to test against an EN backend +func (s *BackendScriptsSuite) setupExecutionNodes(block *flow.Block) { + s.params.On("FinalizedRoot").Return(s.rootHeader, nil) + s.state.On("Params").Return(s.params) + s.state.On("Final").Return(s.snapshot) + s.snapshot.On("Identities", mock.Anything).Return(s.executionNodes, nil) + + // this line causes a S1021 lint error because receipts is explicitly declared. this is required + // to ensure the mock library handles the response type correctly + var receipts flow.ExecutionReceiptList //nolint:gosimple + receipts = unittest.ReceiptsForBlockFixture(block, s.executionNodes.NodeIDs()) + s.receipts.On("ByBlockID", block.ID()).Return(receipts, nil) + + s.connectionFactory.On("GetExecutionAPIClient", mock.Anything). + Return(s.execClient, &mocks.MockCloser{}, nil) +} + +// setupENSuccessResponse configures the execution client mock to return a successful response +func (s *BackendScriptsSuite) setupENSuccessResponse(blockID flow.Identifier) { + expectedExecRequest := &execproto.ExecuteScriptAtBlockIDRequest{ + BlockId: blockID[:], + Script: s.script, + Arguments: s.arguments, + } + + s.execClient.On("ExecuteScriptAtBlockID", mock.Anything, expectedExecRequest). + Return(&execproto.ExecuteScriptAtBlockIDResponse{ + Value: expectedResponse, + }, nil) +} + +// setupENFailingResponse configures the execution client mock to return a failing response +func (s *BackendScriptsSuite) setupENFailingResponse(blockID flow.Identifier, err error) { + expectedExecRequest := &execproto.ExecuteScriptAtBlockIDRequest{ + BlockId: blockID[:], + Script: s.failingScript, + Arguments: s.arguments, + } + + s.execClient.On("ExecuteScriptAtBlockID", mock.Anything, expectedExecRequest). + Return(nil, err) +} + +// TestExecuteScriptOnExecutionNode_HappyPath tests that the backend successfully executes scripts +// on execution nodes +func (s *BackendScriptsSuite) TestExecuteScriptOnExecutionNode_HappyPath() { + ctx := context.Background() + + s.setupExecutionNodes(s.block) + s.setupENSuccessResponse(s.block.ID()) + + scripts := s.defaultBackend(execmock.NewScriptExecutor(s.T()), query_mode.IndexQueryModeExecutionNodesOnly) + + s.Run("GetAccount", func() { + s.testExecuteScriptAtLatestBlock(ctx, scripts, codes.OK) + }) + + s.Run("ExecuteScriptAtBlockID", func() { + s.testExecuteScriptAtBlockID(ctx, scripts, codes.OK) + }) + + s.Run("ExecuteScriptAtBlockHeight", func() { + s.testExecuteScriptAtBlockHeight(ctx, scripts, codes.OK) + }) +} + +// TestExecuteScriptOnExecutionNode_Fails tests that the backend returns an error when the execution +// node returns an error +func (s *BackendScriptsSuite) TestExecuteScriptOnExecutionNode_Fails() { + ctx := context.Background() + + // use a status code that's not used in the API to make sure it's passed through + statusCode := codes.FailedPrecondition + errToReturn := status.Error(statusCode, "random error") + + s.setupExecutionNodes(s.block) + s.setupENFailingResponse(s.block.ID(), errToReturn) + + scripts := s.defaultBackend(execmock.NewScriptExecutor(s.T()), query_mode.IndexQueryModeExecutionNodesOnly) + + s.Run("GetAccount", func() { + s.testExecuteScriptAtLatestBlock(ctx, scripts, statusCode) + }) + + s.Run("ExecuteScriptAtBlockID", func() { + s.testExecuteScriptAtBlockID(ctx, scripts, statusCode) + }) + + s.Run("ExecuteScriptAtBlockHeight", func() { + s.testExecuteScriptAtBlockHeight(ctx, scripts, statusCode) + }) +} + +// TestExecuteScriptFromStorage_HappyPath tests that the backend successfully executes scripts using +// the local storage +func (s *BackendScriptsSuite) TestExecuteScriptFromStorage_HappyPath() { + ctx := context.Background() + + scriptExecutor := execmock.NewScriptExecutor(s.T()) + scriptExecutor.On("ExecuteAtBlockHeight", mock.Anything, s.script, s.arguments, s.block.Height). + Return(expectedResponse, nil) + + scripts := s.defaultBackend(scriptExecutor, query_mode.IndexQueryModeLocalOnly) + + s.Run("GetAccount - happy path", func() { + s.testExecuteScriptAtLatestBlock(ctx, scripts, codes.OK) + }) + + s.Run("GetAccountAtLatestBlock - happy path", func() { + s.testExecuteScriptAtBlockID(ctx, scripts, codes.OK) + }) + + s.Run("GetAccountAtBlockHeight - happy path", func() { + s.testExecuteScriptAtBlockHeight(ctx, scripts, codes.OK) + }) +} + +// TestExecuteScriptFromStorage_Fails tests that errors received from local storage are handled +// and converted to the appropriate status code +func (s *BackendScriptsSuite) TestExecuteScriptFromStorage_Fails() { + ctx := context.Background() + + scriptExecutor := execmock.NewScriptExecutor(s.T()) + scripts := s.defaultBackend(scriptExecutor, query_mode.IndexQueryModeLocalOnly) + + testCases := []struct { + err error + statusCode codes.Code + }{ + { + err: storage.ErrHeightNotIndexed, + statusCode: codes.OutOfRange, + }, + { + err: storage.ErrNotFound, + statusCode: codes.NotFound, + }, + { + err: fmt.Errorf("system error"), + statusCode: codes.Internal, + }, + { + err: cadenceErr, + statusCode: codes.InvalidArgument, + }, + { + err: fvmFailureErr, + statusCode: codes.Internal, + }, + } + + for _, tt := range testCases { + scriptExecutor.On("ExecuteAtBlockHeight", mock.Anything, s.failingScript, s.arguments, s.block.Height). + Return(nil, tt.err).Times(3) + + s.Run(fmt.Sprintf("GetAccount - fails with %v", tt.err), func() { + s.testExecuteScriptAtLatestBlock(ctx, scripts, tt.statusCode) + }) + + s.Run(fmt.Sprintf("GetAccountAtLatestBlock - fails with %v", tt.err), func() { + s.testExecuteScriptAtBlockID(ctx, scripts, tt.statusCode) + }) + + s.Run(fmt.Sprintf("GetAccountAtBlockHeight - fails with %v", tt.err), func() { + s.testExecuteScriptAtBlockHeight(ctx, scripts, tt.statusCode) + }) + } +} + +// TestExecuteScriptWithFailover_HappyPath tests that when an error is returned executing a script +// from local storage, the backend will attempt to run it on an execution node +func (s *BackendScriptsSuite) TestExecuteScriptWithFailover_HappyPath() { + ctx := context.Background() + + errors := []error{ + storage.ErrHeightNotIndexed, + storage.ErrNotFound, + fmt.Errorf("system error"), + fvmFailureErr, + compLimitErr, + memLimitErr, + } + + s.setupExecutionNodes(s.block) + s.setupENSuccessResponse(s.block.ID()) + + scriptExecutor := execmock.NewScriptExecutor(s.T()) + scripts := s.defaultBackend(scriptExecutor, query_mode.IndexQueryModeFailover) + + for _, errToReturn := range errors { + // configure local script executor to fail + scriptExecutor.On("ExecuteAtBlockHeight", mock.Anything, s.script, s.arguments, s.block.Height). + Return(nil, errToReturn).Times(3) + + s.Run(fmt.Sprintf("ExecuteScriptAtLatestBlock - recovers %v", errToReturn), func() { + s.testExecuteScriptAtLatestBlock(ctx, scripts, codes.OK) + }) + + s.Run(fmt.Sprintf("ExecuteScriptAtBlockID - recovers %v", errToReturn), func() { + s.testExecuteScriptAtBlockID(ctx, scripts, codes.OK) + }) + + s.Run(fmt.Sprintf("ExecuteScriptAtBlockHeight - recovers %v", errToReturn), func() { + s.testExecuteScriptAtBlockHeight(ctx, scripts, codes.OK) + }) + } +} + +// TestExecuteScriptWithFailover_SkippedForCorrectCodes tests that failover is skipped for +// FVM errors that result in InvalidArgument or Canceled errors +func (s *BackendScriptsSuite) TestExecuteScriptWithFailover_SkippedForCorrectCodes() { + ctx := context.Background() + + // configure local script executor to fail + scriptExecutor := execmock.NewScriptExecutor(s.T()) + scripts := s.defaultBackend(scriptExecutor, query_mode.IndexQueryModeFailover) + + testCases := []struct { + err error + statusCode codes.Code + }{ + { + err: cadenceErr, + statusCode: codes.InvalidArgument, + }, + { + err: ctxCancelErr, + statusCode: codes.Canceled, + }, + } + + for _, tt := range testCases { + scriptExecutor.On("ExecuteAtBlockHeight", mock.Anything, s.failingScript, s.arguments, s.block.Height). + Return(nil, tt.err). + Times(3) + + s.Run(fmt.Sprintf("ExecuteScriptAtLatestBlock - %s", tt.statusCode), func() { + s.testExecuteScriptAtLatestBlock(ctx, scripts, tt.statusCode) + }) + + s.Run(fmt.Sprintf("ExecuteScriptAtBlockID - %s", tt.statusCode), func() { + s.testExecuteScriptAtBlockID(ctx, scripts, tt.statusCode) + }) + + s.Run(fmt.Sprintf("ExecuteScriptAtBlockHeight - %s", tt.statusCode), func() { + s.testExecuteScriptAtBlockHeight(ctx, scripts, tt.statusCode) + }) + } +} + +// TestExecuteScriptWithFailover_ReturnsENErrors tests that when an error is returned from the execution +// node during a failover, it is returned to the caller. +func (s *BackendScriptsSuite) TestExecuteScriptWithFailover_ReturnsENErrors() { + ctx := context.Background() + + // use a status code that's not used in the API to make sure it's passed through + statusCode := codes.FailedPrecondition + errToReturn := status.Error(statusCode, "random error") + + // setup the execution client mocks + s.setupExecutionNodes(s.block) + s.setupENFailingResponse(s.block.ID(), errToReturn) + + // configure local script executor to fail + scriptExecutor := execmock.NewScriptExecutor(s.T()) + scriptExecutor.On("ExecuteAtBlockHeight", mock.Anything, mock.Anything, mock.Anything, s.block.Height). + Return(nil, storage.ErrHeightNotIndexed) + + scripts := s.defaultBackend(scriptExecutor, query_mode.IndexQueryModeFailover) + + s.Run("ExecuteScriptAtLatestBlock", func() { + s.testExecuteScriptAtLatestBlock(ctx, scripts, statusCode) + }) + + s.Run("ExecuteScriptAtBlockID", func() { + s.testExecuteScriptAtBlockID(ctx, scripts, statusCode) + }) + + s.Run("ExecuteScriptAtBlockHeight", func() { + s.testExecuteScriptAtBlockHeight(ctx, scripts, statusCode) + }) +} + +// TestExecuteScriptAtLatestBlockFromStorage_InconsistentState tests that signaler context received error when node state is +// inconsistent +func (s *BackendScriptsSuite) TestExecuteScriptAtLatestBlockFromStorage_InconsistentState() { + scriptExecutor := execmock.NewScriptExecutor(s.T()) + scripts := s.defaultBackend(scriptExecutor, query_mode.IndexQueryModeLocalOnly) + + s.Run(fmt.Sprintf("ExecuteScriptAtLatestBlock - fails with %v", "inconsistent node's state"), func() { + s.state.On("Sealed").Return(s.snapshot, nil) + + err := fmt.Errorf("inconsistent node's state") + s.snapshot.On("Head").Return(nil, err) + + signCtxErr := irrecoverable.NewExceptionf("failed to lookup sealed header: %w", err) + signalerCtx := irrecoverable.WithSignalerContext(context.Background(), + irrecoverable.NewMockSignalerContextExpectError(s.T(), context.Background(), signCtxErr)) + + actual, err := scripts.ExecuteScriptAtLatestBlock(signalerCtx, s.script, s.arguments) + s.Require().Error(err) + s.Require().Nil(actual) + }) +} + +// TestExecuteScript_ExceedsMaxSize tests that when a script exceeds the max size, it returns an error +func (s *BackendScriptsSuite) TestExecuteScript_ExceedsMaxSize() { + ctx := context.Background() + + script := unittest.RandomBytes(commonrpc.DefaultAccessMaxRequestSize + 1) + + // configure local script executor which will never be called + scriptExecutor := execmock.NewScriptExecutor(s.T()) + + scripts := s.defaultBackend(scriptExecutor, query_mode.IndexQueryModeLocalOnly) + + s.Run("ExecuteScriptAtLatestBlock", func() { + actual, err := scripts.ExecuteScriptAtLatestBlock(ctx, script, s.arguments) + s.Require().Error(err) + s.Require().Equal(codes.InvalidArgument, status.Code(err), "error code mismatch: expected %d, got %d: %s", codes.InvalidArgument, status.Code(err), err) + s.Require().Nil(actual) + }) + + s.Run("ExecuteScriptAtBlockID", func() { + actual, err := scripts.ExecuteScriptAtBlockID(ctx, s.block.ID(), script, s.arguments) + s.Require().Error(err) + s.Require().Equal(codes.InvalidArgument, status.Code(err), "error code mismatch: expected %d, got %d: %s", codes.InvalidArgument, status.Code(err), err) + s.Require().Nil(actual) + }) + + s.Run("ExecuteScriptAtBlockHeight", func() { + actual, err := scripts.ExecuteScriptAtBlockHeight(ctx, s.block.Height, script, s.arguments) + s.Require().Error(err) + s.Require().Equal(codes.InvalidArgument, status.Code(err), "error code mismatch: expected %d, got %d: %s", codes.InvalidArgument, status.Code(err), err) + s.Require().Nil(actual) + }) +} + +func (s *BackendScriptsSuite) testExecuteScriptAtLatestBlock(ctx context.Context, scripts *Scripts, statusCode codes.Code) { + s.state.On("Sealed").Return(s.snapshot, nil).Once() + s.snapshot.On("Head").Return(s.block.ToHeader(), nil).Once() + + if statusCode == codes.OK { + actual, err := scripts.ExecuteScriptAtLatestBlock(ctx, s.script, s.arguments) + s.Require().NoError(err) + s.Require().Equal(expectedResponse, actual) + } else { + actual, err := scripts.ExecuteScriptAtLatestBlock(ctx, s.failingScript, s.arguments) + s.Require().Error(err) + s.Require().Equal(statusCode, status.Code(err), "error code mismatch: expected %d, got %d: %s", statusCode, status.Code(err), err) + s.Require().Nil(actual) + } +} + +func (s *BackendScriptsSuite) testExecuteScriptAtBlockID(ctx context.Context, scripts *Scripts, statusCode codes.Code) { + blockID := s.block.ID() + s.headers.On("ByBlockID", blockID).Return(s.block.ToHeader(), nil).Once() + + if statusCode == codes.OK { + actual, err := scripts.ExecuteScriptAtBlockID(ctx, blockID, s.script, s.arguments) + s.Require().NoError(err) + s.Require().Equal(expectedResponse, actual) + } else { + actual, err := scripts.ExecuteScriptAtBlockID(ctx, blockID, s.failingScript, s.arguments) + s.Require().Error(err) + s.Require().Equal(statusCode, status.Code(err), "error code mismatch: expected %d, got %d: %s", statusCode, status.Code(err), err) + s.Require().Nil(actual) + } +} + +func (s *BackendScriptsSuite) testExecuteScriptAtBlockHeight(ctx context.Context, scripts *Scripts, statusCode codes.Code) { + height := s.block.Height + s.headers.On("ByHeight", height).Return(s.block.ToHeader(), nil).Once() + + if statusCode == codes.OK { + actual, err := scripts.ExecuteScriptAtBlockHeight(ctx, height, s.script, s.arguments) + s.Require().NoError(err) + s.Require().Equal(expectedResponse, actual) + } else { + actual, err := scripts.ExecuteScriptAtBlockHeight(ctx, height, s.failingScript, s.arguments) + s.Require().Error(err) + s.Require().Equalf(statusCode, status.Code(err), "error code mismatch: expected %d, got %d: %s", statusCode, status.Code(err), err) + s.Require().Nil(actual) + } +} diff --git a/engine/access/rpc/backend/transactions/error_messages/mock/provider.go b/engine/access/rpc/backend/transactions/error_messages/mock/provider.go new file mode 100644 index 00000000000..f829c16512b --- /dev/null +++ b/engine/access/rpc/backend/transactions/error_messages/mock/provider.go @@ -0,0 +1,217 @@ +// Code generated by mockery. DO NOT EDIT. + +package mock + +import ( + context "context" + + execution "github.com/onflow/flow/protobuf/go/flow/execution" + + flow "github.com/onflow/flow-go/model/flow" + + mock "github.com/stretchr/testify/mock" +) + +// Provider is an autogenerated mock type for the Provider type +type Provider struct { + mock.Mock +} + +// ErrorMessageByBlockIDFromAnyEN provides a mock function with given fields: ctx, execNodes, req +func (_m *Provider) ErrorMessageByBlockIDFromAnyEN(ctx context.Context, execNodes flow.IdentitySkeletonList, req *execution.GetTransactionErrorMessagesByBlockIDRequest) ([]*execution.GetTransactionErrorMessagesResponse_Result, *flow.IdentitySkeleton, error) { + ret := _m.Called(ctx, execNodes, req) + + if len(ret) == 0 { + panic("no return value specified for ErrorMessageByBlockIDFromAnyEN") + } + + var r0 []*execution.GetTransactionErrorMessagesResponse_Result + var r1 *flow.IdentitySkeleton + var r2 error + if rf, ok := ret.Get(0).(func(context.Context, flow.IdentitySkeletonList, *execution.GetTransactionErrorMessagesByBlockIDRequest) ([]*execution.GetTransactionErrorMessagesResponse_Result, *flow.IdentitySkeleton, error)); ok { + return rf(ctx, execNodes, req) + } + if rf, ok := ret.Get(0).(func(context.Context, flow.IdentitySkeletonList, *execution.GetTransactionErrorMessagesByBlockIDRequest) []*execution.GetTransactionErrorMessagesResponse_Result); ok { + r0 = rf(ctx, execNodes, req) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]*execution.GetTransactionErrorMessagesResponse_Result) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, flow.IdentitySkeletonList, *execution.GetTransactionErrorMessagesByBlockIDRequest) *flow.IdentitySkeleton); ok { + r1 = rf(ctx, execNodes, req) + } else { + if ret.Get(1) != nil { + r1 = ret.Get(1).(*flow.IdentitySkeleton) + } + } + + if rf, ok := ret.Get(2).(func(context.Context, flow.IdentitySkeletonList, *execution.GetTransactionErrorMessagesByBlockIDRequest) error); ok { + r2 = rf(ctx, execNodes, req) + } else { + r2 = ret.Error(2) + } + + return r0, r1, r2 +} + +// ErrorMessageByIndex provides a mock function with given fields: ctx, blockID, height, index +func (_m *Provider) ErrorMessageByIndex(ctx context.Context, blockID flow.Identifier, height uint64, index uint32) (string, error) { + ret := _m.Called(ctx, blockID, height, index) + + if len(ret) == 0 { + panic("no return value specified for ErrorMessageByIndex") + } + + var r0 string + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, flow.Identifier, uint64, uint32) (string, error)); ok { + return rf(ctx, blockID, height, index) + } + if rf, ok := ret.Get(0).(func(context.Context, flow.Identifier, uint64, uint32) string); ok { + r0 = rf(ctx, blockID, height, index) + } else { + r0 = ret.Get(0).(string) + } + + if rf, ok := ret.Get(1).(func(context.Context, flow.Identifier, uint64, uint32) error); ok { + r1 = rf(ctx, blockID, height, index) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ErrorMessageByIndexFromAnyEN provides a mock function with given fields: ctx, execNodes, req +func (_m *Provider) ErrorMessageByIndexFromAnyEN(ctx context.Context, execNodes flow.IdentitySkeletonList, req *execution.GetTransactionErrorMessageByIndexRequest) (*execution.GetTransactionErrorMessageResponse, error) { + ret := _m.Called(ctx, execNodes, req) + + if len(ret) == 0 { + panic("no return value specified for ErrorMessageByIndexFromAnyEN") + } + + var r0 *execution.GetTransactionErrorMessageResponse + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, flow.IdentitySkeletonList, *execution.GetTransactionErrorMessageByIndexRequest) (*execution.GetTransactionErrorMessageResponse, error)); ok { + return rf(ctx, execNodes, req) + } + if rf, ok := ret.Get(0).(func(context.Context, flow.IdentitySkeletonList, *execution.GetTransactionErrorMessageByIndexRequest) *execution.GetTransactionErrorMessageResponse); ok { + r0 = rf(ctx, execNodes, req) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*execution.GetTransactionErrorMessageResponse) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, flow.IdentitySkeletonList, *execution.GetTransactionErrorMessageByIndexRequest) error); ok { + r1 = rf(ctx, execNodes, req) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ErrorMessageByTransactionID provides a mock function with given fields: ctx, blockID, height, transactionID +func (_m *Provider) ErrorMessageByTransactionID(ctx context.Context, blockID flow.Identifier, height uint64, transactionID flow.Identifier) (string, error) { + ret := _m.Called(ctx, blockID, height, transactionID) + + if len(ret) == 0 { + panic("no return value specified for ErrorMessageByTransactionID") + } + + var r0 string + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, flow.Identifier, uint64, flow.Identifier) (string, error)); ok { + return rf(ctx, blockID, height, transactionID) + } + if rf, ok := ret.Get(0).(func(context.Context, flow.Identifier, uint64, flow.Identifier) string); ok { + r0 = rf(ctx, blockID, height, transactionID) + } else { + r0 = ret.Get(0).(string) + } + + if rf, ok := ret.Get(1).(func(context.Context, flow.Identifier, uint64, flow.Identifier) error); ok { + r1 = rf(ctx, blockID, height, transactionID) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ErrorMessageFromAnyEN provides a mock function with given fields: ctx, execNodes, req +func (_m *Provider) ErrorMessageFromAnyEN(ctx context.Context, execNodes flow.IdentitySkeletonList, req *execution.GetTransactionErrorMessageRequest) (*execution.GetTransactionErrorMessageResponse, error) { + ret := _m.Called(ctx, execNodes, req) + + if len(ret) == 0 { + panic("no return value specified for ErrorMessageFromAnyEN") + } + + var r0 *execution.GetTransactionErrorMessageResponse + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, flow.IdentitySkeletonList, *execution.GetTransactionErrorMessageRequest) (*execution.GetTransactionErrorMessageResponse, error)); ok { + return rf(ctx, execNodes, req) + } + if rf, ok := ret.Get(0).(func(context.Context, flow.IdentitySkeletonList, *execution.GetTransactionErrorMessageRequest) *execution.GetTransactionErrorMessageResponse); ok { + r0 = rf(ctx, execNodes, req) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*execution.GetTransactionErrorMessageResponse) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, flow.IdentitySkeletonList, *execution.GetTransactionErrorMessageRequest) error); ok { + r1 = rf(ctx, execNodes, req) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ErrorMessagesByBlockID provides a mock function with given fields: ctx, blockID, height +func (_m *Provider) ErrorMessagesByBlockID(ctx context.Context, blockID flow.Identifier, height uint64) (map[flow.Identifier]string, error) { + ret := _m.Called(ctx, blockID, height) + + if len(ret) == 0 { + panic("no return value specified for ErrorMessagesByBlockID") + } + + var r0 map[flow.Identifier]string + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, flow.Identifier, uint64) (map[flow.Identifier]string, error)); ok { + return rf(ctx, blockID, height) + } + if rf, ok := ret.Get(0).(func(context.Context, flow.Identifier, uint64) map[flow.Identifier]string); ok { + r0 = rf(ctx, blockID, height) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(map[flow.Identifier]string) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, flow.Identifier, uint64) error); ok { + r1 = rf(ctx, blockID, height) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// NewProvider creates a new instance of Provider. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewProvider(t interface { + mock.TestingT + Cleanup(func()) +}) *Provider { + mock := &Provider{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/engine/access/rpc/backend/transactions/error_messages/provider.go b/engine/access/rpc/backend/transactions/error_messages/provider.go new file mode 100644 index 00000000000..20c96d7f5fa --- /dev/null +++ b/engine/access/rpc/backend/transactions/error_messages/provider.go @@ -0,0 +1,472 @@ +package error_messages + +import ( + "context" + "errors" + + execproto "github.com/onflow/flow/protobuf/go/flow/execution" + "github.com/rs/zerolog" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + + "github.com/onflow/flow-go/engine/access/index" + "github.com/onflow/flow-go/engine/access/rpc/backend/common" + "github.com/onflow/flow-go/engine/access/rpc/backend/node_communicator" + "github.com/onflow/flow-go/engine/access/rpc/connection" + "github.com/onflow/flow-go/engine/common/rpc/convert" + + "github.com/onflow/flow-go/engine/common/rpc" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/storage" +) + +const DefaultFailedErrorMessage = "failed" + +// Provider declares the lookup transaction error methods by different input parameters. +type Provider interface { + // ErrorMessageByTransactionID is a function type for getting transaction error message by block ID and transaction ID. + // Expected errors during normal operation: + // - InsufficientExecutionReceipts - found insufficient receipts for given block ID. + // - status.Error - remote GRPC call to EN has failed. + ErrorMessageByTransactionID(ctx context.Context, blockID flow.Identifier, height uint64, transactionID flow.Identifier) (string, error) + + // ErrorMessageByIndex is a function type for getting transaction error message by index. + // Expected errors during normal operation: + // - InsufficientExecutionReceipts - found insufficient receipts for given block ID. + // - status.Error - remote GRPC call to EN has failed. + ErrorMessageByIndex(ctx context.Context, blockID flow.Identifier, height uint64, index uint32) (string, error) + + // ErrorMessagesByBlockID is a function type for getting transaction error messages by block ID. + // Expected errors during normal operation: + // - InsufficientExecutionReceipts - found insufficient receipts for given block ID. + // - status.Error - remote GRPC call to EN has failed. + ErrorMessagesByBlockID(ctx context.Context, blockID flow.Identifier, height uint64) (map[flow.Identifier]string, error) + + // ErrorMessageFromAnyEN performs an RPC call using available nodes passed as argument. + // List of nodes must be non-empty otherwise an error will be returned. + // Expected errors during normal operation: + // - status.Error - GRPC call failed, some of possible codes are: + // - codes.NotFound - request cannot be served by EN because of absence of data. + // - codes.Unavailable - remote node is not unavailable. + ErrorMessageFromAnyEN( + ctx context.Context, + execNodes flow.IdentitySkeletonList, + req *execproto.GetTransactionErrorMessageRequest, + ) (*execproto.GetTransactionErrorMessageResponse, error) + + // ErrorMessageByIndexFromAnyEN performs an RPC call using available nodes passed as argument. + // List of nodes must be non-empty otherwise an error will be returned. + // Expected errors during normal operation: + // - status.Error - GRPC call failed, some of possible codes are: + // - codes.NotFound - request cannot be served by EN because of absence of data. + // - codes.Unavailable - remote node is not unavailable. + ErrorMessageByIndexFromAnyEN( + ctx context.Context, + execNodes flow.IdentitySkeletonList, + req *execproto.GetTransactionErrorMessageByIndexRequest, + ) (*execproto.GetTransactionErrorMessageResponse, error) + + // ErrorMessageByBlockIDFromAnyEN performs an RPC call using available nodes passed as argument. + // List of nodes must be non-empty otherwise an error will be returned. + // Expected errors during normal operation: + // - status.Error - GRPC call failed, some of possible codes are: + // - codes.NotFound - request cannot be served by EN because of absence of data. + // - codes.Unavailable - remote node is not unavailable. + ErrorMessageByBlockIDFromAnyEN( + ctx context.Context, + execNodes flow.IdentitySkeletonList, + req *execproto.GetTransactionErrorMessagesByBlockIDRequest, + ) ([]*execproto.GetTransactionErrorMessagesResponse_Result, *flow.IdentitySkeleton, error) +} + +type ProviderImpl struct { + log zerolog.Logger + + txResultErrorMessages storage.TransactionResultErrorMessages + txResultsIndex *index.TransactionResultsIndex + + connFactory connection.ConnectionFactory + nodeCommunicator node_communicator.Communicator + execNodeIdentitiesProvider *rpc.ExecutionNodeIdentitiesProvider +} + +var _ Provider = (*ProviderImpl)(nil) + +func NewTxErrorMessageProvider( + log zerolog.Logger, + txResultErrorMessages storage.TransactionResultErrorMessages, + txResultsIndex *index.TransactionResultsIndex, + connFactory connection.ConnectionFactory, + nodeCommunicator node_communicator.Communicator, + execNodeIdentitiesProvider *rpc.ExecutionNodeIdentitiesProvider, +) *ProviderImpl { + return &ProviderImpl{ + log: log, + txResultErrorMessages: txResultErrorMessages, + txResultsIndex: txResultsIndex, + connFactory: connFactory, + nodeCommunicator: nodeCommunicator, + execNodeIdentitiesProvider: execNodeIdentitiesProvider, + } +} + +// ErrorMessageByTransactionID returns transaction error message for specified transaction. +// If transaction error messages are stored locally, they will be checked first in local storage. +// If error messages are not stored locally, an RPC call will be made to the EN to fetch message. +// +// Expected errors during normal operation: +// - InsufficientExecutionReceipts - found insufficient receipts for the given block ID. +// - status.Error - remote GRPC call to EN has failed. +func (e *ProviderImpl) ErrorMessageByTransactionID( + ctx context.Context, + blockID flow.Identifier, + height uint64, + transactionID flow.Identifier, +) (string, error) { + if e.txResultErrorMessages != nil { + res, err := e.txResultErrorMessages.ByBlockIDTransactionID(blockID, transactionID) + if err == nil { + return res.ErrorMessage, nil + } + } + + execNodes, err := e.execNodeIdentitiesProvider.ExecutionNodesForBlockID( + ctx, + blockID, + ) + if err != nil { + if common.IsInsufficientExecutionReceipts(err) { + return "", status.Error(codes.NotFound, err.Error()) + } + return "", rpc.ConvertError(err, "failed to select execution nodes", codes.Internal) + } + req := &execproto.GetTransactionErrorMessageRequest{ + BlockId: convert.IdentifierToMessage(blockID), + TransactionId: convert.IdentifierToMessage(transactionID), + } + + resp, err := e.ErrorMessageFromAnyEN(ctx, execNodes, req) + if err != nil { + // If no execution nodes return a valid response, + // return a static message "failed". + txResult, err := e.txResultsIndex.ByBlockIDTransactionID(blockID, height, transactionID) + if err != nil { + return "", rpc.ConvertStorageError(err) + } + + if txResult.Failed { + return DefaultFailedErrorMessage, nil + } + + // in case tx result is not failed + return "", nil + } + + return resp.ErrorMessage, nil +} + +// ErrorMessageByIndex returns the transaction error message for a specified transaction using its index. +// If transaction error messages are stored locally, they will be checked first in local storage. +// If error messages are not stored locally, an RPC call will be made to the EN to fetch message. +// +// Expected errors during normal operation: +// - InsufficientExecutionReceipts - found insufficient receipts for the given block ID. +// - status.Error - remote GRPC call to EN has failed. +func (e *ProviderImpl) ErrorMessageByIndex( + ctx context.Context, + blockID flow.Identifier, + height uint64, + index uint32, +) (string, error) { + if e.txResultErrorMessages != nil { + res, err := e.txResultErrorMessages.ByBlockIDTransactionIndex(blockID, index) + if err == nil { + return res.ErrorMessage, nil + } + } + + execNodes, err := e.execNodeIdentitiesProvider.ExecutionNodesForBlockID( + ctx, + blockID, + ) + if err != nil { + if common.IsInsufficientExecutionReceipts(err) { + return "", status.Error(codes.NotFound, err.Error()) + } + return "", rpc.ConvertError(err, "failed to select execution nodes", codes.Internal) + } + req := &execproto.GetTransactionErrorMessageByIndexRequest{ + BlockId: convert.IdentifierToMessage(blockID), + Index: index, + } + + resp, err := e.ErrorMessageByIndexFromAnyEN(ctx, execNodes, req) + if err != nil { + // If no execution nodes return a valid response, + // return a static message "failed" + txResult, err := e.txResultsIndex.ByBlockIDTransactionIndex(blockID, height, index) + if err != nil { + return "", rpc.ConvertStorageError(err) + } + + if txResult.Failed { + return DefaultFailedErrorMessage, nil + } + + // in case tx result is not failed + return "", nil + } + + return resp.ErrorMessage, nil +} + +// ErrorMessagesByBlockID returns all error messages for failed transactions by blockID. +// If transaction error messages are stored locally, they will be checked first in local storage. +// If error messages are not stored locally, an RPC call will be made to the EN to fetch messages. +// +// Expected errors during normal operation: +// - InsufficientExecutionReceipts - found insufficient receipts for the given block ID. +// - status.Error - remote GRPC call to EN has failed. +func (e *ProviderImpl) ErrorMessagesByBlockID( + ctx context.Context, + blockID flow.Identifier, + height uint64, +) (map[flow.Identifier]string, error) { + result := make(map[flow.Identifier]string) + + if e.txResultErrorMessages != nil { + res, err := e.txResultErrorMessages.ByBlockID(blockID) + if err == nil { + for _, value := range res { + result[value.TransactionID] = value.ErrorMessage + } + + return result, nil + } + } + + execNodes, err := e.execNodeIdentitiesProvider.ExecutionNodesForBlockID( + ctx, + blockID, + ) + if err != nil { + if common.IsInsufficientExecutionReceipts(err) { + return nil, status.Error(codes.NotFound, err.Error()) + } + return nil, rpc.ConvertError(err, "failed to select execution nodes", codes.Internal) + } + req := &execproto.GetTransactionErrorMessagesByBlockIDRequest{ + BlockId: convert.IdentifierToMessage(blockID), + } + + resp, _, err := e.ErrorMessageByBlockIDFromAnyEN(ctx, execNodes, req) + if err != nil { + // If no execution nodes return a valid response, + // return a static message "failed" + txResults, err := e.txResultsIndex.ByBlockID(blockID, height) + if err != nil { + return nil, rpc.ConvertStorageError(err) + } + + for _, txResult := range txResults { + if txResult.Failed { + result[txResult.TransactionID] = DefaultFailedErrorMessage + } + } + + return result, nil + } + + for _, value := range resp { + result[convert.MessageToIdentifier(value.TransactionId)] = value.ErrorMessage + } + + return result, nil +} + +// ErrorMessageFromAnyEN performs an RPC call using available nodes passed as argument. +// List of nodes must be non-empty otherwise an error will be returned. +// Expected errors during normal operation: +// - status.Error - GRPC call failed, some of possible codes are: +// - codes.NotFound - request cannot be served by EN because of absence of data. +// - codes.Unavailable - remote node is not unavailable. +func (e *ProviderImpl) ErrorMessageFromAnyEN( + ctx context.Context, + execNodes flow.IdentitySkeletonList, + req *execproto.GetTransactionErrorMessageRequest, +) (*execproto.GetTransactionErrorMessageResponse, error) { + // if we were passed 0 execution nodes add a specific error + if len(execNodes) == 0 { + return nil, errors.New("zero execution nodes") + } + + var resp *execproto.GetTransactionErrorMessageResponse + errToReturn := e.nodeCommunicator.CallAvailableNode( + execNodes, + func(node *flow.IdentitySkeleton) error { + var err error + resp, err = e.tryGetTransactionErrorMessageFromEN(ctx, node, req) + if err == nil { + e.log.Debug(). + Str("execution_node", node.String()). + Hex("block_id", req.GetBlockId()). + Hex("transaction_id", req.GetTransactionId()). + Msg("Successfully got transaction error message from any node") + return nil + } + return err + }, + nil, + ) + + // log the errors + if errToReturn != nil { + e.log.Err(errToReturn).Msg("failed to get transaction error message from execution nodes") + return nil, errToReturn + } + + return resp, nil +} + +// ErrorMessageByIndexFromAnyEN performs an RPC call using available nodes passed as argument. +// List of nodes must be non-empty otherwise an error will be returned. +// Expected errors during normal operation: +// - status.Error - GRPC call failed, some of possible codes are: +// - codes.NotFound - request cannot be served by EN because of absence of data. +// - codes.Unavailable - remote node is not unavailable. +func (e *ProviderImpl) ErrorMessageByIndexFromAnyEN( + ctx context.Context, + execNodes flow.IdentitySkeletonList, + req *execproto.GetTransactionErrorMessageByIndexRequest, +) (*execproto.GetTransactionErrorMessageResponse, error) { + // if we were passed 0 execution nodes add a specific error + if len(execNodes) == 0 { + return nil, errors.New("zero execution nodes") + } + + var resp *execproto.GetTransactionErrorMessageResponse + errToReturn := e.nodeCommunicator.CallAvailableNode( + execNodes, + func(node *flow.IdentitySkeleton) error { + var err error + resp, err = e.tryGetTransactionErrorMessageByIndexFromEN(ctx, node, req) + if err == nil { + e.log.Debug(). + Str("execution_node", node.String()). + Hex("block_id", req.GetBlockId()). + Uint32("index", req.GetIndex()). + Msg("Successfully got transaction error message by index from any node") + return nil + } + return err + }, + nil, + ) + if errToReturn != nil { + e.log.Err(errToReturn).Msg("failed to get transaction error message by index from execution nodes") + return nil, errToReturn + } + + return resp, nil +} + +// ErrorMessageByBlockIDFromAnyEN performs an RPC call using available nodes passed as argument. +// List of nodes must be non-empty otherwise an error will be returned. +// Expected errors during normal operation: +// - status.Error - GRPC call failed, some of possible codes are: +// - codes.NotFound - request cannot be served by EN because of absence of data. +// - codes.Unavailable - remote node is not unavailable. +func (e *ProviderImpl) ErrorMessageByBlockIDFromAnyEN( + ctx context.Context, + execNodes flow.IdentitySkeletonList, + req *execproto.GetTransactionErrorMessagesByBlockIDRequest, +) ([]*execproto.GetTransactionErrorMessagesResponse_Result, *flow.IdentitySkeleton, error) { + // if we were passed 0 execution nodes add a specific error + if len(execNodes) == 0 { + return nil, nil, errors.New("zero execution nodes") + } + + var resp *execproto.GetTransactionErrorMessagesResponse + var execNode *flow.IdentitySkeleton + + errToReturn := e.nodeCommunicator.CallAvailableNode( + execNodes, + func(node *flow.IdentitySkeleton) error { + var err error + execNode = node + resp, err = e.tryGetTransactionErrorMessagesByBlockIDFromEN(ctx, node, req) + if err == nil { + e.log.Debug(). + Str("execution_node", node.String()). + Hex("block_id", req.GetBlockId()). + Msg("Successfully got transaction error messages from any node") + return nil + } + return err + }, + nil, + ) + + // log the errors + if errToReturn != nil { + e.log.Err(errToReturn).Msg("failed to get transaction error messages from execution nodes") + return nil, nil, errToReturn + } + + return resp.GetResults(), execNode, nil +} + +// tryGetTransactionErrorMessageFromEN performs a grpc call to the specified execution node and returns response. +// +// Expected errors during normal operation: +// - status.Error - GRPC call failed, some of possible codes are: +// - codes.NotFound - request cannot be served by EN because of absence of data. +// - codes.Unavailable - remote node is not unavailable. +func (e *ProviderImpl) tryGetTransactionErrorMessageFromEN( + ctx context.Context, + execNode *flow.IdentitySkeleton, + req *execproto.GetTransactionErrorMessageRequest, +) (*execproto.GetTransactionErrorMessageResponse, error) { + execRPCClient, closer, err := e.connFactory.GetExecutionAPIClient(execNode.Address) + if err != nil { + return nil, err + } + defer closer.Close() + return execRPCClient.GetTransactionErrorMessage(ctx, req) +} + +// tryGetTransactionErrorMessageByIndexFromEN performs a grpc call to the specified execution node and returns response. +// Expected errors during normal operation: +// - status.Error - GRPC call failed, some of possible codes are: +// - codes.NotFound - request cannot be served by EN because of absence of data. +// - codes.Unavailable - remote node is not unavailable. +func (e *ProviderImpl) tryGetTransactionErrorMessageByIndexFromEN( + ctx context.Context, + execNode *flow.IdentitySkeleton, + req *execproto.GetTransactionErrorMessageByIndexRequest, +) (*execproto.GetTransactionErrorMessageResponse, error) { + execRPCClient, closer, err := e.connFactory.GetExecutionAPIClient(execNode.Address) + if err != nil { + return nil, err + } + defer closer.Close() + return execRPCClient.GetTransactionErrorMessageByIndex(ctx, req) +} + +// tryGetTransactionErrorMessagesByBlockIDFromEN performs a grpc call to the specified execution node and returns response. +// Expected errors during normal operation: +// - status.Error - GRPC call failed, some of possible codes are: +// - codes.NotFound - request cannot be served by EN because of absence of data. +// - codes.Unavailable - remote node is not unavailable. +func (e *ProviderImpl) tryGetTransactionErrorMessagesByBlockIDFromEN( + ctx context.Context, + execNode *flow.IdentitySkeleton, + req *execproto.GetTransactionErrorMessagesByBlockIDRequest, +) (*execproto.GetTransactionErrorMessagesResponse, error) { + execRPCClient, closer, err := e.connFactory.GetExecutionAPIClient(execNode.Address) + if err != nil { + return nil, err + } + defer closer.Close() + return execRPCClient.GetTransactionErrorMessagesByBlockID(ctx, req) +} diff --git a/engine/access/rpc/backend/transactions/error_messages/provider_test.go b/engine/access/rpc/backend/transactions/error_messages/provider_test.go new file mode 100644 index 00000000000..2c087fb8a93 --- /dev/null +++ b/engine/access/rpc/backend/transactions/error_messages/provider_test.go @@ -0,0 +1,964 @@ +package error_messages + +import ( + "context" + "fmt" + "math/rand" + "testing" + + execproto "github.com/onflow/flow/protobuf/go/flow/execution" + "github.com/rs/zerolog" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/suite" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + + "github.com/onflow/flow-go/engine/access/index" + accessmock "github.com/onflow/flow-go/engine/access/mock" + "github.com/onflow/flow-go/engine/access/rpc/backend/node_communicator" + connectionmock "github.com/onflow/flow-go/engine/access/rpc/connection/mock" + commonrpc "github.com/onflow/flow-go/engine/common/rpc" + "github.com/onflow/flow-go/engine/common/rpc/convert" + "github.com/onflow/flow-go/model/flow" + syncmock "github.com/onflow/flow-go/module/state_synchronization/mock" + protocolmock "github.com/onflow/flow-go/state/protocol/mock" + "github.com/onflow/flow-go/storage" + storagemock "github.com/onflow/flow-go/storage/mock" + "github.com/onflow/flow-go/utils/unittest" + "github.com/onflow/flow-go/utils/unittest/mocks" +) + +const expectedErrorMsg = "expected test error" + +type Suite struct { + suite.Suite + + log zerolog.Logger + state *protocolmock.State + snapshot *protocolmock.Snapshot + + block *flow.Block + blockID flow.Identifier + fixedExecutionNodes flow.IdentityList + fixedExecutionNodeIDs flow.IdentifierList + preferredExecutionNodeIDs flow.IdentifierList + + receipts *storagemock.ExecutionReceipts + lightTxResults *storagemock.LightTransactionResults + txResultErrorMessages *storagemock.TransactionResultErrorMessages + + executionAPIClient *accessmock.ExecutionAPIClient + + nodeCommunicator *node_communicator.NodeCommunicator + nodeProvider *commonrpc.ExecutionNodeIdentitiesProvider + connectionFactory *connectionmock.ConnectionFactory + + reporter *syncmock.IndexReporter + indexReporter *index.Reporter + txResultsIndex *index.TransactionResultsIndex +} + +func TestSuite(t *testing.T) { + suite.Run(t, new(Suite)) +} + +func (suite *Suite) SetupTest() { + suite.log = unittest.Logger() + suite.snapshot = protocolmock.NewSnapshot(suite.T()) + + header := unittest.BlockHeaderFixture() + params := protocolmock.NewParams(suite.T()) + params.On("FinalizedRoot").Return(header, nil).Maybe() + params.On("SporkID").Return(unittest.IdentifierFixture(), nil).Maybe() + params.On("SporkRootBlockHeight").Return(header.Height, nil).Maybe() + params.On("SealedRoot").Return(header, nil).Maybe() + + suite.state = protocolmock.NewState(suite.T()) + suite.state.On("Params").Return(params).Maybe() + + suite.receipts = storagemock.NewExecutionReceipts(suite.T()) + suite.lightTxResults = storagemock.NewLightTransactionResults(suite.T()) + suite.txResultErrorMessages = storagemock.NewTransactionResultErrorMessages(suite.T()) + suite.executionAPIClient = accessmock.NewExecutionAPIClient(suite.T()) + suite.connectionFactory = connectionmock.NewConnectionFactory(suite.T()) + suite.nodeCommunicator = node_communicator.NewNodeCommunicator(false) + + suite.block = unittest.BlockFixture() + suite.blockID = suite.block.ID() + _, suite.fixedExecutionNodes = suite.setupReceipts(suite.block) + suite.fixedExecutionNodeIDs = suite.fixedExecutionNodes.NodeIDs() + suite.preferredExecutionNodeIDs = nil + + suite.nodeProvider = commonrpc.NewExecutionNodeIdentitiesProvider( + suite.log, + suite.state, + suite.receipts, + suite.preferredExecutionNodeIDs, + suite.fixedExecutionNodeIDs, + ) + + suite.reporter = syncmock.NewIndexReporter(suite.T()) + suite.indexReporter = index.NewReporter() + err := suite.indexReporter.Initialize(suite.reporter) + suite.Require().NoError(err) + suite.txResultsIndex = index.NewTransactionResultsIndex(suite.indexReporter, suite.lightTxResults) +} + +func (suite *Suite) TestLookupByTxID_FromExecutionNode_HappyPath() { + failedTx := unittest.TransactionFixture() + failedTxId := failedTx.ID() + + // Setup mock receipts and execution node identities. + suite.state.On("Final").Return(suite.snapshot, nil).Once() + suite.snapshot.On("Identities", mock.Anything).Return(suite.fixedExecutionNodes, nil).Once() + + // the connection factory should be used to get the execution node client + suite.connectionFactory. + On("GetExecutionAPIClient", mock.Anything). + Return(suite.executionAPIClient, &mocks.MockCloser{}, nil). + Once() + + // Mock the cache lookup for the transaction error message, returning "not found". + suite.txResultErrorMessages. + On("ByBlockIDTransactionID", suite.blockID, failedTxId). + Return(nil, storage.ErrNotFound). + Once() + + errMessageProvider := NewTxErrorMessageProvider( + suite.log, + suite.txResultErrorMessages, + suite.txResultsIndex, + suite.connectionFactory, + suite.nodeCommunicator, + suite.nodeProvider, + ) + + // Mock the execution node API call to fetch the error message. + exeEventReq := &execproto.GetTransactionErrorMessageRequest{ + BlockId: suite.blockID[:], + TransactionId: failedTxId[:], + } + exeEventResp := &execproto.GetTransactionErrorMessageResponse{ + TransactionId: failedTxId[:], + ErrorMessage: expectedErrorMsg, + } + + suite.executionAPIClient. + On("GetTransactionErrorMessage", mock.Anything, exeEventReq). + Return(exeEventResp, nil). + Once() + + // Perform the lookup and assert that the error message is retrieved correctly. + errMsg, err := errMessageProvider.ErrorMessageByTransactionID( + context.Background(), + suite.blockID, + suite.block.Height, + failedTxId, + ) + suite.Require().NoError(err) + suite.Require().Equal(expectedErrorMsg, errMsg) +} + +func (suite *Suite) TestLookupByTxID_FromStorage_HappyPath() { + failedTx := unittest.TransactionFixture() + failedTxId := failedTx.ID() + failedTxIndex := rand.Uint32() + + errMessageProvider := NewTxErrorMessageProvider( + suite.log, + suite.txResultErrorMessages, + suite.txResultsIndex, + suite.connectionFactory, + suite.nodeCommunicator, + suite.nodeProvider, + ) + + // Mock the cache lookup for the transaction error message, returning a stored result. + suite.txResultErrorMessages. + On("ByBlockIDTransactionID", suite.blockID, failedTxId). + Return(&flow.TransactionResultErrorMessage{ + TransactionID: failedTxId, + ErrorMessage: expectedErrorMsg, + Index: failedTxIndex, + ExecutorID: unittest.IdentifierFixture(), + }, nil). + Once() + + errMsg, err := errMessageProvider.ErrorMessageByTransactionID( + context.Background(), + suite.blockID, + suite.block.Height, + failedTxId, + ) + suite.Require().NoError(err) + suite.Require().Equal(expectedErrorMsg, errMsg) +} + +func (suite *Suite) TestLookupByTxID_ExecNodeError_UnknownTx() { + failedTx := unittest.TransactionFixture() + failedTxId := failedTx.ID() + + suite.state.On("Final").Return(suite.snapshot, nil).Once() + suite.snapshot.On("Identities", mock.Anything).Return(suite.fixedExecutionNodes, nil).Once() + suite.reporter.On("LowestIndexedHeight").Return(suite.block.Height, nil).Once() + suite.reporter.On("HighestIndexedHeight").Return(suite.block.Height+10, nil).Once() + + // lookup should try each of the 2 ENs in fixedENIDs + suite.executionAPIClient. + On("GetTransactionErrorMessage", mock.Anything, mock.Anything). + Return(nil, status.Error(codes.Unavailable, "")). + Twice() + + suite.connectionFactory. + On("GetExecutionAPIClient", mock.Anything). + Return(suite.executionAPIClient, &mocks.MockCloser{}, nil). + Twice() + + // Setup mock that the transaction and tx error message is not found in the storage. + suite.txResultErrorMessages. + On("ByBlockIDTransactionID", suite.blockID, failedTxId). + Return(nil, storage.ErrNotFound). + Once() + suite.lightTxResults. + On("ByBlockIDTransactionID", suite.blockID, failedTxId). + Return(nil, storage.ErrNotFound). + Once() + + errMessageProvider := NewTxErrorMessageProvider( + suite.log, + suite.txResultErrorMessages, + suite.txResultsIndex, + suite.connectionFactory, + suite.nodeCommunicator, + suite.nodeProvider, + ) + + errMsg, err := errMessageProvider.ErrorMessageByTransactionID( + context.Background(), + suite.blockID, + suite.block.Height, + failedTxId, + ) + suite.Require().Error(err) + suite.Require().Equal(codes.NotFound, status.Code(err)) + suite.Require().Empty(errMsg) +} + +func (suite *Suite) TestLookupByTxID_ExecNodeError_TxResultNotFailed() { + failedTx := unittest.TransactionFixture() + failedTxId := failedTx.ID() + + suite.state.On("Final").Return(suite.snapshot, nil).Once() + suite.snapshot.On("Identities", mock.Anything).Return(suite.fixedExecutionNodes, nil).Once() + suite.reporter.On("LowestIndexedHeight").Return(suite.block.Height, nil).Once() + suite.reporter.On("HighestIndexedHeight").Return(suite.block.Height+10, nil).Once() + + // Lookup should try each of the 2 ENs in fixedENIDs + suite.executionAPIClient. + On("GetTransactionErrorMessage", mock.Anything, mock.Anything). + Return(nil, status.Error(codes.Unavailable, "")). + Twice() + + suite.connectionFactory. + On("GetExecutionAPIClient", mock.Anything). + Return(suite.executionAPIClient, &mocks.MockCloser{}, nil). + Twice() + + // Setup mock that the transaction error message is not found in storage. + suite.txResultErrorMessages. + On("ByBlockIDTransactionID", suite.blockID, failedTxId). + Return(nil, storage.ErrNotFound). + Once() + + // Setup mock that the transaction result exists and is not failed. + suite.lightTxResults. + On("ByBlockIDTransactionID", suite.blockID, failedTxId). + Return(&flow.LightTransactionResult{ + TransactionID: failedTxId, + Failed: false, + ComputationUsed: 0, + }, nil). + Once() + + errMessageProvider := NewTxErrorMessageProvider( + suite.log, + suite.txResultErrorMessages, + suite.txResultsIndex, + suite.connectionFactory, + suite.nodeCommunicator, + suite.nodeProvider, + ) + + errMsg, err := errMessageProvider.ErrorMessageByTransactionID( + context.Background(), + suite.blockID, + suite.block.Height, + failedTxId, + ) + suite.Require().NoError(err) + suite.Require().Empty(errMsg) +} + +func (suite *Suite) TestLookupByTxID_ExecNodeError_TxResultFailed() { + failedTx := unittest.TransactionFixture() + failedTxId := failedTx.ID() + + suite.state.On("Final").Return(suite.snapshot, nil).Once() + suite.snapshot.On("Identities", mock.Anything).Return(suite.fixedExecutionNodes, nil).Once() + suite.reporter.On("LowestIndexedHeight").Return(suite.block.Height, nil).Once() + suite.reporter.On("HighestIndexedHeight").Return(suite.block.Height+10, nil).Once() + + // lookup should try each of the 2 ENs in fixedENIDs + suite.executionAPIClient. + On("GetTransactionErrorMessage", mock.Anything, mock.Anything). + Return(nil, status.Error(codes.Unavailable, "")). + Twice() + + suite.connectionFactory. + On("GetExecutionAPIClient", mock.Anything). + Return(suite.executionAPIClient, &mocks.MockCloser{}, nil). + Twice() + + // Setup mock that the transaction error message is not found in storage. + suite.txResultErrorMessages. + On("ByBlockIDTransactionID", suite.blockID, failedTxId). + Return(nil, storage.ErrNotFound). + Once() + + // Setup mock that the transaction result exists and is failed. + suite.lightTxResults. + On("ByBlockIDTransactionID", suite.blockID, failedTxId). + Return(&flow.LightTransactionResult{ + TransactionID: failedTxId, + Failed: true, + ComputationUsed: 0, + }, nil). + Once() + + errMessageProvider := NewTxErrorMessageProvider( + suite.log, + suite.txResultErrorMessages, + suite.txResultsIndex, + suite.connectionFactory, + suite.nodeCommunicator, + suite.nodeProvider, + ) + + errMsg, err := errMessageProvider.ErrorMessageByTransactionID( + context.Background(), + suite.blockID, + suite.block.Height, + failedTxId, + ) + suite.Require().NoError(err) + suite.Require().Equal(errMsg, DefaultFailedErrorMessage) +} + +func (suite *Suite) TestLookupByIndex_FromExecutionNode_HappyPath() { + failedTx := unittest.TransactionFixture() + failedTxId := failedTx.ID() + failedTxIndex := rand.Uint32() + + suite.state.On("Final").Return(suite.snapshot, nil).Once() + suite.snapshot. + On("Identities", mock.Anything). + Return(suite.fixedExecutionNodes, nil). + Once() + + exeEventReq := &execproto.GetTransactionErrorMessageByIndexRequest{ + BlockId: suite.blockID[:], + Index: failedTxIndex, + } + exeEventResp := &execproto.GetTransactionErrorMessageResponse{ + TransactionId: failedTxId[:], + ErrorMessage: expectedErrorMsg, + } + suite.executionAPIClient. + On("GetTransactionErrorMessageByIndex", mock.Anything, exeEventReq). + Return(exeEventResp, nil). + Once() + + suite.connectionFactory. + On("GetExecutionAPIClient", mock.Anything). + Return(suite.executionAPIClient, &mocks.MockCloser{}, nil). + Once() + + suite.txResultErrorMessages. + On("ByBlockIDTransactionIndex", suite.blockID, failedTxIndex). + Return(nil, storage.ErrNotFound). + Once() + + errMessageProvider := NewTxErrorMessageProvider( + suite.log, + suite.txResultErrorMessages, + suite.txResultsIndex, + suite.connectionFactory, + suite.nodeCommunicator, + suite.nodeProvider, + ) + + errMsg, err := errMessageProvider.ErrorMessageByIndex( + context.Background(), + suite.blockID, + suite.block.Height, + failedTxIndex, + ) + suite.Require().NoError(err) + suite.Require().Equal(expectedErrorMsg, errMsg) +} + +// TestLookupTransactionErrorMessageByIndex_HappyPath verifies the lookup of a transaction error message +// by block ID and transaction index. +// It tests two cases: +// 1. Happy path where the error message is fetched from the EN if it is not found in the cache. +// 2. Happy path where the error message is served from the storage database if it exists. +func (suite *Suite) TestLookupTransactionErrorMessageByIndex_HappyPath() { + failedTx := unittest.TransactionFixture() + failedTxId := failedTx.ID() + failedTxIndex := rand.Uint32() + + suite.state.On("Final").Return(suite.snapshot, nil).Once() + suite.snapshot.On("Identities", mock.Anything).Return(suite.fixedExecutionNodes, nil).Once() + + suite.Run("happy path from EN", func() { + exeEventReq := &execproto.GetTransactionErrorMessageByIndexRequest{ + BlockId: suite.blockID[:], + Index: failedTxIndex, + } + exeEventResp := &execproto.GetTransactionErrorMessageResponse{ + TransactionId: failedTxId[:], + ErrorMessage: expectedErrorMsg, + } + suite.executionAPIClient. + On("GetTransactionErrorMessageByIndex", mock.Anything, exeEventReq). + Return(exeEventResp, nil). + Once() + + suite.connectionFactory. + On("GetExecutionAPIClient", mock.Anything). + Return(suite.executionAPIClient, &mocks.MockCloser{}, nil). + Once() + + suite.txResultErrorMessages. + On("ByBlockIDTransactionIndex", suite.blockID, failedTxIndex). + Return(nil, storage.ErrNotFound). + Once() + + errMessageProvider := NewTxErrorMessageProvider( + suite.log, + suite.txResultErrorMessages, + suite.txResultsIndex, + suite.connectionFactory, + suite.nodeCommunicator, + suite.nodeProvider, + ) + + errMsg, err := errMessageProvider.ErrorMessageByIndex(context.Background(), suite.blockID, suite.block.Height, failedTxIndex) + suite.Require().NoError(err) + suite.Require().Equal(expectedErrorMsg, errMsg) + }) + + suite.Run("happy path from storage db", func() { + errMessageProvider := NewTxErrorMessageProvider( + suite.log, + suite.txResultErrorMessages, + suite.txResultsIndex, + suite.connectionFactory, + suite.nodeCommunicator, + suite.nodeProvider, + ) + + suite.txResultErrorMessages. + On("ByBlockIDTransactionIndex", suite.blockID, failedTxIndex). + Return(&flow.TransactionResultErrorMessage{ + TransactionID: failedTxId, + ErrorMessage: expectedErrorMsg, + Index: failedTxIndex, + ExecutorID: unittest.IdentifierFixture(), + }, nil). + Once() + + errMsg, err := errMessageProvider.ErrorMessageByIndex( + context.Background(), + suite.blockID, + suite.block.Height, + failedTxIndex, + ) + suite.Require().NoError(err) + suite.Require().Equal(expectedErrorMsg, errMsg) + }) +} + +func (suite *Suite) TestLookupByIndex_ExecutionNodeError_UnknownTx() { + failedTxIndex := rand.Uint32() + + suite.state.On("Final").Return(suite.snapshot, nil).Once() + suite.snapshot.On("Identities", mock.Anything).Return(suite.fixedExecutionNodes, nil).Once() + suite.reporter.On("LowestIndexedHeight").Return(suite.block.Height, nil).Once() + suite.reporter.On("HighestIndexedHeight").Return(suite.block.Height+10, nil).Once() + + suite.connectionFactory. + On("GetExecutionAPIClient", mock.Anything). + Return(suite.executionAPIClient, &mocks.MockCloser{}, nil). + Twice() + + // lookup should try each of the 2 ENs in fixedENIDs + suite.executionAPIClient. + On("GetTransactionErrorMessageByIndex", mock.Anything, mock.Anything). + Return(nil, status.Error(codes.Unavailable, "")). + Twice() + + // Setup mock that the transaction and tx error message is not found in the storage. + suite.txResultErrorMessages. + On("ByBlockIDTransactionIndex", suite.blockID, failedTxIndex). + Return(nil, storage.ErrNotFound). + Once() + suite.lightTxResults. + On("ByBlockIDTransactionIndex", suite.blockID, failedTxIndex). + Return(nil, storage.ErrNotFound). + Once() + + errMessageProvider := NewTxErrorMessageProvider( + suite.log, + suite.txResultErrorMessages, + suite.txResultsIndex, + suite.connectionFactory, + suite.nodeCommunicator, + suite.nodeProvider, + ) + + errMsg, err := errMessageProvider.ErrorMessageByIndex( + context.Background(), + suite.blockID, + suite.block.Height, + failedTxIndex, + ) + suite.Require().Error(err) + suite.Require().Equal(codes.NotFound, status.Code(err)) + suite.Require().Empty(errMsg) +} + +func (suite *Suite) TestLookupByIndex_ExecutionNodeError_TxResultNotFailed() { + failedTxIndex := rand.Uint32() + failedTx := unittest.TransactionFixture() + failedTxId := failedTx.ID() + + suite.state.On("Final").Return(suite.snapshot, nil).Once() + suite.snapshot.On("Identities", mock.Anything).Return(suite.fixedExecutionNodes, nil).Once() + suite.reporter.On("LowestIndexedHeight").Return(suite.block.Height, nil).Once() + suite.reporter.On("HighestIndexedHeight").Return(suite.block.Height+10, nil).Once() + + suite.connectionFactory. + On("GetExecutionAPIClient", mock.Anything). + Return(suite.executionAPIClient, &mocks.MockCloser{}, nil). + Twice() + + // lookup should try each of the 2 ENs in fixedENIDs + suite.executionAPIClient. + On("GetTransactionErrorMessageByIndex", mock.Anything, mock.Anything). + Return(nil, status.Error(codes.Unavailable, "")). + Twice() + + // Setup mock that the transaction error message is not found in storage. + suite.txResultErrorMessages. + On("ByBlockIDTransactionIndex", suite.blockID, failedTxIndex). + Return(nil, storage.ErrNotFound). + Once() + + // Setup mock that the transaction result exists and is not failed. + suite.lightTxResults. + On("ByBlockIDTransactionIndex", suite.blockID, failedTxIndex). + Return(&flow.LightTransactionResult{ + TransactionID: failedTxId, + Failed: false, + ComputationUsed: 0, + }, nil). + Once() + + errMessageProvider := NewTxErrorMessageProvider( + suite.log, + suite.txResultErrorMessages, + suite.txResultsIndex, + suite.connectionFactory, + suite.nodeCommunicator, + suite.nodeProvider, + ) + + errMsg, err := errMessageProvider.ErrorMessageByIndex( + context.Background(), + suite.blockID, + suite.block.Height, + failedTxIndex, + ) + suite.Require().NoError(err) + suite.Require().Empty(errMsg) +} + +func (suite *Suite) TestLookupByIndex_ExecutionNodeError_TxResultFailed() { + failedTxIndex := rand.Uint32() + failedTx := unittest.TransactionFixture() + failedTxId := failedTx.ID() + + suite.state.On("Final").Return(suite.snapshot, nil).Once() + suite.snapshot.On("Identities", mock.Anything).Return(suite.fixedExecutionNodes, nil).Once() + suite.reporter.On("LowestIndexedHeight").Return(suite.block.Height, nil).Once() + suite.reporter.On("HighestIndexedHeight").Return(suite.block.Height+10, nil).Once() + + // lookup should try each of the 2 ENs in fixedENIDs + suite.executionAPIClient. + On("GetTransactionErrorMessageByIndex", mock.Anything, mock.Anything). + Return(nil, status.Error(codes.Unavailable, "")). + Twice() + + suite.connectionFactory. + On("GetExecutionAPIClient", mock.Anything). + Return(suite.executionAPIClient, &mocks.MockCloser{}, nil). + Twice() + + // Setup mock that the transaction error message is not found in storage. + suite.txResultErrorMessages. + On("ByBlockIDTransactionIndex", suite.blockID, failedTxIndex). + Return(nil, storage.ErrNotFound). + Once() + + // Setup mock that the transaction result exists and is failed. + suite.lightTxResults. + On("ByBlockIDTransactionIndex", suite.blockID, failedTxIndex). + Return(&flow.LightTransactionResult{ + TransactionID: failedTxId, + Failed: true, + ComputationUsed: 0, + }, nil). + Once() + + errMessageProvider := NewTxErrorMessageProvider( + suite.log, + suite.txResultErrorMessages, + suite.txResultsIndex, + suite.connectionFactory, + suite.nodeCommunicator, + suite.nodeProvider, + ) + + errMsg, err := errMessageProvider.ErrorMessageByIndex( + context.Background(), + suite.blockID, + suite.block.Height, + failedTxIndex, + ) + suite.Require().NoError(err) + suite.Require().Equal(errMsg, DefaultFailedErrorMessage) +} + +func (suite *Suite) TestLookupByBlockID_FromExecutionNode_HappyPath() { + resultsByBlockID := make([]flow.LightTransactionResult, 0) + for i := 0; i < 5; i++ { + resultsByBlockID = append(resultsByBlockID, flow.LightTransactionResult{ + TransactionID: unittest.IdentifierFixture(), + Failed: i%2 == 0, // create a mix of failed and non-failed transactions + ComputationUsed: 0, + }) + } + + suite.state.On("Final").Return(suite.snapshot, nil).Once() + suite.snapshot.On("Identities", mock.Anything).Return(suite.fixedExecutionNodes, nil).Once() + + // Mock the execution node API call to fetch the error messages. + exeEventReq := &execproto.GetTransactionErrorMessagesByBlockIDRequest{ + BlockId: suite.blockID[:], + } + exeErrMessagesResp := &execproto.GetTransactionErrorMessagesResponse{} + for _, result := range resultsByBlockID { + r := result + if r.Failed { + errMsg := fmt.Sprintf("%s.%s", expectedErrorMsg, r.TransactionID) + exeErrMessagesResp.Results = append(exeErrMessagesResp.Results, &execproto.GetTransactionErrorMessagesResponse_Result{ + TransactionId: r.TransactionID[:], + ErrorMessage: errMsg, + }) + } + } + suite.executionAPIClient. + On("GetTransactionErrorMessagesByBlockID", mock.Anything, exeEventReq). + Return(exeErrMessagesResp, nil). + Once() + + suite.connectionFactory. + On("GetExecutionAPIClient", mock.Anything). + Return(suite.executionAPIClient, &mocks.MockCloser{}, nil). + Once() + + suite.txResultErrorMessages. + On("ByBlockID", suite.blockID). + Return(nil, storage.ErrNotFound). + Once() + + errMessageProvider := NewTxErrorMessageProvider( + suite.log, + suite.txResultErrorMessages, + suite.txResultsIndex, + suite.connectionFactory, + suite.nodeCommunicator, + suite.nodeProvider, + ) + + errMessages, err := errMessageProvider.ErrorMessagesByBlockID( + context.Background(), + suite.blockID, + suite.block.Height, + ) + suite.Require().NoError(err) + suite.Require().Len(errMessages, len(exeErrMessagesResp.Results)) + for _, expectedResult := range exeErrMessagesResp.Results { + errMsg, ok := errMessages[convert.MessageToIdentifier(expectedResult.TransactionId)] + suite.Require().True(ok) + suite.Assert().Equal(expectedResult.ErrorMessage, errMsg) + } +} + +func (suite *Suite) TestLookupByBlockID_FromStorage_HappyPath() { + resultsByBlockID := make([]flow.LightTransactionResult, 0) + for i := 0; i < 5; i++ { + resultsByBlockID = append(resultsByBlockID, flow.LightTransactionResult{ + TransactionID: unittest.IdentifierFixture(), + Failed: i%2 == 0, // create a mix of failed and non-failed transactions + ComputationUsed: 0, + }) + } + + var txErrorMessages []flow.TransactionResultErrorMessage + for i, result := range resultsByBlockID { + if result.Failed { + errMsg := fmt.Sprintf("%s.%s", expectedErrorMsg, result.TransactionID) + + txErrorMessages = append(txErrorMessages, + flow.TransactionResultErrorMessage{ + TransactionID: result.TransactionID, + ErrorMessage: errMsg, + Index: uint32(i), + ExecutorID: unittest.IdentifierFixture(), + }) + } + } + suite.txResultErrorMessages. + On("ByBlockID", suite.blockID). + Return(txErrorMessages, nil). + Once() + + errMessageProvider := NewTxErrorMessageProvider( + suite.log, + suite.txResultErrorMessages, + suite.txResultsIndex, + suite.connectionFactory, + suite.nodeCommunicator, + suite.nodeProvider, + ) + + errMessages, err := errMessageProvider.ErrorMessagesByBlockID( + context.Background(), + suite.blockID, + suite.block.Height, + ) + suite.Require().NoError(err) + suite.Require().Len(errMessages, len(txErrorMessages)) + + for _, expected := range txErrorMessages { + errMsg, ok := errMessages[expected.TransactionID] + suite.Require().True(ok) + suite.Assert().Equal(expected.ErrorMessage, errMsg) + } +} + +func (suite *Suite) TestLookupByBlockID_ExecutionNodeError_UnknownBlock() { + suite.state.On("Final").Return(suite.snapshot, nil).Once() + suite.snapshot.On("Identities", mock.Anything).Return(suite.fixedExecutionNodes, nil).Once() + suite.reporter.On("LowestIndexedHeight").Return(suite.block.Height, nil).Once() + suite.reporter.On("HighestIndexedHeight").Return(suite.block.Height+10, nil).Once() + + suite.connectionFactory. + On("GetExecutionAPIClient", mock.Anything). + Return(suite.executionAPIClient, &mocks.MockCloser{}, nil). + Twice() + + suite.executionAPIClient. + On("GetTransactionErrorMessagesByBlockID", mock.Anything, mock.Anything). + Return(nil, status.Error(codes.Unavailable, "")). + Twice() + + // Setup mock that the transaction and tx error messages is not found in the storage. + suite.txResultErrorMessages. + On("ByBlockID", suite.blockID). + Return(nil, storage.ErrNotFound). + Once() + suite.lightTxResults. + On("ByBlockID", suite.blockID). + Return(nil, storage.ErrNotFound). + Once() + + errMessageProvider := NewTxErrorMessageProvider( + suite.log, + suite.txResultErrorMessages, + suite.txResultsIndex, + suite.connectionFactory, + suite.nodeCommunicator, + suite.nodeProvider, + ) + + // Perform the lookup and expect a "NotFound" error with an empty error message. + errMsg, err := errMessageProvider.ErrorMessagesByBlockID( + context.Background(), + suite.blockID, + suite.block.Height, + ) + suite.Require().Error(err) + suite.Require().Equal(codes.NotFound, status.Code(err)) + suite.Require().Empty(errMsg) +} + +func (suite *Suite) TestLookupByBlockID_ExecutionNodeError_TxResultNotFailed() { + suite.state.On("Final").Return(suite.snapshot, nil).Once() + suite.snapshot.On("Identities", mock.Anything).Return(suite.fixedExecutionNodes, nil).Once() + suite.reporter.On("LowestIndexedHeight").Return(suite.block.Height, nil).Once() + suite.reporter.On("HighestIndexedHeight").Return(suite.block.Height+10, nil).Once() + + suite.connectionFactory. + On("GetExecutionAPIClient", mock.Anything). + Return(suite.executionAPIClient, &mocks.MockCloser{}, nil). + Twice() + + // lookup should try each of the 2 ENs in fixedENIDs + suite.executionAPIClient. + On("GetTransactionErrorMessagesByBlockID", mock.Anything, mock.Anything). + Return(nil, status.Error(codes.Unavailable, "")). + Twice() + + // Setup mock that the transaction error message is not found in storage. + suite.txResultErrorMessages. + On("ByBlockID", suite.blockID). + Return(nil, storage.ErrNotFound). + Once() + + // Setup mock that the transaction results exists and is not failed. + suite.lightTxResults. + On("ByBlockID", suite.blockID). + Return([]flow.LightTransactionResult{ + { + TransactionID: unittest.IdentifierFixture(), + Failed: false, + ComputationUsed: 0, + }, + { + TransactionID: unittest.IdentifierFixture(), + Failed: false, + ComputationUsed: 0, + }, + }, nil). + Once() + + errMessageProvider := NewTxErrorMessageProvider( + suite.log, + suite.txResultErrorMessages, + suite.txResultsIndex, + suite.connectionFactory, + suite.nodeCommunicator, + suite.nodeProvider, + ) + + errMsg, err := errMessageProvider.ErrorMessagesByBlockID( + context.Background(), + suite.blockID, + suite.block.Height, + ) + suite.Require().NoError(err) + suite.Require().Empty(errMsg) +} + +func (suite *Suite) TestLookupTransactionErrorMessagesByBlockID_FailedToFetch() { + suite.state.On("Final").Return(suite.snapshot, nil).Once() + suite.snapshot.On("Identities", mock.Anything).Return(suite.fixedExecutionNodes, nil).Once() + suite.reporter.On("LowestIndexedHeight").Return(suite.block.Height, nil).Once() + suite.reporter.On("HighestIndexedHeight").Return(suite.block.Height+10, nil).Once() + + suite.connectionFactory. + On("GetExecutionAPIClient", mock.Anything). + Return(suite.executionAPIClient, &mocks.MockCloser{}, nil). + Twice() + + // lookup should try each of the 2 ENs in fixedENIDs + suite.executionAPIClient. + On("GetTransactionErrorMessagesByBlockID", mock.Anything, mock.Anything). + Return(nil, status.Error(codes.Unavailable, "")). + Twice() + + // Setup mock that the transaction error messages is not found in storage. + suite.txResultErrorMessages. + On("ByBlockID", suite.blockID). + Return(nil, storage.ErrNotFound). + Once() + + failedResultsByBlockID := []flow.LightTransactionResult{ + { + TransactionID: unittest.IdentifierFixture(), + Failed: true, + ComputationUsed: 0, + }, + { + TransactionID: unittest.IdentifierFixture(), + Failed: true, + ComputationUsed: 0, + }, + } + + suite.lightTxResults. + On("ByBlockID", suite.blockID). + Return(failedResultsByBlockID, nil). + Once() + + expectedTxErrorMessages := make(map[flow.Identifier]string) + for _, result := range failedResultsByBlockID { + if result.Failed { + expectedTxErrorMessages[result.TransactionID] = DefaultFailedErrorMessage + } + } + + errMessageProvider := NewTxErrorMessageProvider( + suite.log, + suite.txResultErrorMessages, + suite.txResultsIndex, + suite.connectionFactory, + suite.nodeCommunicator, + suite.nodeProvider, + ) + + // Perform the lookup and expect the failed error messages to be returned. + errMsg, err := errMessageProvider.ErrorMessagesByBlockID( + context.Background(), + suite.blockID, + suite.block.Height, + ) + suite.Require().NoError(err) + suite.Require().Len(errMsg, len(expectedTxErrorMessages)) + + for txID, expectedMessage := range expectedTxErrorMessages { + actualMessage, ok := errMsg[txID] + suite.Require().True(ok) + suite.Assert().Equal(expectedMessage, actualMessage) + } +} + +func (suite *Suite) setupReceipts(block *flow.Block) ([]*flow.ExecutionReceipt, flow.IdentityList) { + ids := unittest.IdentityListFixture(2, unittest.WithRole(flow.RoleExecution)) + receipt1 := unittest.ReceiptForBlockFixture(block) + receipt1.ExecutorID = ids[0].NodeID + receipt2 := unittest.ReceiptForBlockFixture(block) + receipt2.ExecutorID = ids[1].NodeID + receipt1.ExecutionResult = receipt2.ExecutionResult + + receipts := flow.ExecutionReceiptList{receipt1, receipt2} + suite.receipts. + On("ByBlockID", block.ID()). + Return(receipts, nil). + Maybe() + + return receipts, ids +} diff --git a/engine/access/rpc/backend/transactions/provider/execution_node.go b/engine/access/rpc/backend/transactions/provider/execution_node.go new file mode 100644 index 00000000000..1031e66de60 --- /dev/null +++ b/engine/access/rpc/backend/transactions/provider/execution_node.go @@ -0,0 +1,752 @@ +package provider + +import ( + "context" + "errors" + "fmt" + + "github.com/onflow/flow/protobuf/go/flow/entities" + execproto "github.com/onflow/flow/protobuf/go/flow/execution" + "github.com/rs/zerolog" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + + "github.com/onflow/flow-go/engine/access/rpc/backend/common" + "github.com/onflow/flow-go/engine/access/rpc/backend/node_communicator" + txstatus "github.com/onflow/flow-go/engine/access/rpc/backend/transactions/status" + "github.com/onflow/flow-go/engine/access/rpc/connection" + "github.com/onflow/flow-go/engine/common/rpc" + "github.com/onflow/flow-go/engine/common/rpc/convert" + "github.com/onflow/flow-go/fvm/blueprints" + "github.com/onflow/flow-go/fvm/systemcontracts" + accessmodel "github.com/onflow/flow-go/model/access" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module/irrecoverable" + "github.com/onflow/flow-go/state/protocol" + "github.com/onflow/flow-go/storage" +) + +type ENTransactionProvider struct { + log zerolog.Logger + state protocol.State + chainID flow.ChainID + + collections storage.Collections + + connFactory connection.ConnectionFactory + nodeCommunicator node_communicator.Communicator + nodeProvider *rpc.ExecutionNodeIdentitiesProvider + + txStatusDeriver *txstatus.TxStatusDeriver + + systemTxID flow.Identifier + scheduledCallbacksEnabled bool + processScheduledCallbackEventType flow.EventType +} + +var _ TransactionProvider = (*ENTransactionProvider)(nil) + +func NewENTransactionProvider( + log zerolog.Logger, + state protocol.State, + collections storage.Collections, + connFactory connection.ConnectionFactory, + nodeCommunicator node_communicator.Communicator, + execNodeIdentitiesProvider *rpc.ExecutionNodeIdentitiesProvider, + txStatusDeriver *txstatus.TxStatusDeriver, + systemTxID flow.Identifier, + chainID flow.ChainID, + scheduledCallbacksEnabled bool, +) *ENTransactionProvider { + env := systemcontracts.SystemContractsForChain(chainID).AsTemplateEnv() + return &ENTransactionProvider{ + log: log.With().Str("transaction_provider", "execution_node").Logger(), + state: state, + collections: collections, + connFactory: connFactory, + nodeCommunicator: nodeCommunicator, + nodeProvider: execNodeIdentitiesProvider, + txStatusDeriver: txStatusDeriver, + systemTxID: systemTxID, + chainID: chainID, + scheduledCallbacksEnabled: scheduledCallbacksEnabled, + processScheduledCallbackEventType: blueprints.PendingExecutionEventType(env), + } +} + +func (e *ENTransactionProvider) TransactionResult( + ctx context.Context, + block *flow.Header, + transactionID flow.Identifier, + requiredEventEncodingVersion entities.EventEncodingVersion, +) (*accessmodel.TransactionResult, error) { + blockID := block.ID() + // create an execution API request for events at blockID and transactionID + req := &execproto.GetTransactionResultRequest{ + BlockId: blockID[:], + TransactionId: transactionID[:], + } + + execNodes, err := e.nodeProvider.ExecutionNodesForBlockID( + ctx, + blockID, + ) + if err != nil { + // if no execution receipt were found, return a NotFound GRPC error + if common.IsInsufficientExecutionReceipts(err) { + return nil, status.Error(codes.NotFound, err.Error()) + } + return nil, err + } + + resp, err := e.getTransactionResultFromAnyExeNode(ctx, execNodes, req) + if err != nil { + return nil, err + } + + // tx body is irrelevant to status if it's in an executed block + txStatus, err := e.txStatusDeriver.DeriveTransactionStatus(block.Height, true) + if err != nil { + // this is an executed transaction. If we can't derive transaction status something is very wrong. + irrecoverable.Throw(ctx, fmt.Errorf("failed to derive transaction status: %w", err)) + return nil, err + } + + events, err := convert.MessagesToEventsWithEncodingConversion(resp.GetEvents(), resp.GetEventEncodingVersion(), requiredEventEncodingVersion) + if err != nil { + return nil, rpc.ConvertError(err, "failed to convert events to message", codes.Internal) + } + + return &accessmodel.TransactionResult{ + TransactionID: transactionID, + Status: txStatus, + StatusCode: uint(resp.GetStatusCode()), + Events: events, + ErrorMessage: resp.GetErrorMessage(), + BlockID: blockID, + BlockHeight: block.Height, + }, nil +} + +func (e *ENTransactionProvider) TransactionsByBlockID( + ctx context.Context, + block *flow.Block, +) ([]*flow.TransactionBody, error) { + var transactions []*flow.TransactionBody + blockID := block.ID() + + // user transactions + for _, guarantee := range block.Payload.Guarantees { + collection, err := e.collections.ByID(guarantee.CollectionID) + if err != nil { + return nil, rpc.ConvertStorageError(err) + } + + transactions = append(transactions, collection.Transactions...) + } + + // system transactions + // TODO: implement system that allows this endpoint to dynamically determine if scheduled + // transactions were enabled for this block. See https://github.com/onflow/flow-go/issues/7873 + if !e.scheduledCallbacksEnabled { + systemTx, err := blueprints.SystemChunkTransaction(e.chainID.Chain()) + if err != nil { + return nil, fmt.Errorf("failed to construct system chunk transaction: %w", err) + } + + return append(transactions, systemTx), nil + } + + events, err := e.getBlockEvents(ctx, blockID, e.processScheduledCallbackEventType) + if err != nil { + return nil, rpc.ConvertError(err, "failed to retrieve events from any execution node", codes.Internal) + } + + sysCollection, err := blueprints.SystemCollection(e.chainID.Chain(), events) + if err != nil { + return nil, status.Errorf(codes.Internal, "could not construct system collection: %v", err) + } + + return append(transactions, sysCollection.Transactions...), nil +} + +func (e *ENTransactionProvider) TransactionResultByIndex( + ctx context.Context, + block *flow.Block, + index uint32, + encodingVersion entities.EventEncodingVersion, +) (*accessmodel.TransactionResult, error) { + blockID := block.ID() + // create request and forward to EN + req := &execproto.GetTransactionByIndexRequest{ + BlockId: blockID[:], + Index: index, + } + + execNodes, err := e.nodeProvider.ExecutionNodesForBlockID( + ctx, + blockID, + ) + if err != nil { + if common.IsInsufficientExecutionReceipts(err) { + return nil, status.Error(codes.NotFound, err.Error()) + } + return nil, rpc.ConvertError(err, "failed to retrieve result from any execution node", codes.Internal) + } + + resp, err := e.getTransactionResultByIndexFromAnyExeNode(ctx, execNodes, req) + if err != nil { + return nil, rpc.ConvertError(err, "failed to retrieve result from execution node", codes.Internal) + } + + // tx body is irrelevant to status if it's in an executed block + txStatus, err := e.txStatusDeriver.DeriveTransactionStatus(block.Height, true) + if err != nil { + irrecoverable.Throw(ctx, fmt.Errorf("failed to derive transaction status: %w", err)) + return nil, err + } + + events, err := convert.MessagesToEventsWithEncodingConversion(resp.GetEvents(), resp.GetEventEncodingVersion(), encodingVersion) + if err != nil { + return nil, status.Errorf(codes.Internal, "failed to convert events in blockID %x: %v", blockID, err) + } + + // convert to response, cache and return + return &accessmodel.TransactionResult{ + Status: txStatus, + StatusCode: uint(resp.GetStatusCode()), + Events: events, + ErrorMessage: resp.GetErrorMessage(), + BlockID: blockID, + BlockHeight: block.Height, + }, nil +} + +func (e *ENTransactionProvider) TransactionResultsByBlockID( + ctx context.Context, + block *flow.Block, + requiredEventEncodingVersion entities.EventEncodingVersion, +) ([]*accessmodel.TransactionResult, error) { + blockID := block.ID() + req := &execproto.GetTransactionsByBlockIDRequest{ + BlockId: blockID[:], + } + + execNodes, err := e.nodeProvider.ExecutionNodesForBlockID( + ctx, + blockID, + ) + if err != nil { + if common.IsInsufficientExecutionReceipts(err) { + return nil, status.Error(codes.NotFound, err.Error()) + } + return nil, rpc.ConvertError(err, "failed to retrieve result from any execution node", codes.Internal) + } + + executionResponse, err := e.getTransactionResultsByBlockIDFromAnyExeNode(ctx, execNodes, req) + if err != nil { + return nil, rpc.ConvertError(err, "failed to retrieve result from execution node", codes.Internal) + } + + txStatus, err := e.txStatusDeriver.DeriveTransactionStatus(block.Height, true) + if err != nil { + irrecoverable.Throw(ctx, fmt.Errorf("failed to derive transaction status: %w", err)) + return nil, err + } + + userTxResults, err := e.userTransactionResults( + ctx, + executionResponse, + block, + blockID, + txStatus, + requiredEventEncodingVersion, + ) + if err != nil { + return nil, rpc.ConvertError(err, "failed to construct user transaction results", codes.Internal) + } + + // root block has no system transaction result + if block.Height == e.state.Params().SporkRootBlockHeight() { + return userTxResults, nil + } + + // there must be at least one system transaction result + if len(userTxResults) >= len(executionResponse.TransactionResults) { + return nil, status.Errorf(codes.Internal, "no system transaction results") + } + + remainingTxResults := executionResponse.TransactionResults[len(userTxResults):] + + systemTxResults, err := e.systemTransactionResults( + remainingTxResults, + block, + blockID, + txStatus, + executionResponse, + requiredEventEncodingVersion, + ) + if err != nil { + return nil, rpc.ConvertError(err, "failed to construct system transaction results", codes.Internal) + } + + return append(userTxResults, systemTxResults...), nil +} + +func (e *ENTransactionProvider) SystemTransaction( + ctx context.Context, + block *flow.Block, + txID flow.Identifier, +) (*flow.TransactionBody, error) { + blockID := block.ID() + + if txID == e.systemTxID || !e.scheduledCallbacksEnabled { + systemTx, err := blueprints.SystemChunkTransaction(e.chainID.Chain()) + if err != nil { + return nil, status.Errorf(codes.Internal, "failed to construct system chunk transaction: %v", err) + } + + if txID == systemTx.ID() { + return systemTx, nil + } + return nil, fmt.Errorf("transaction %s not found in block %s", txID, blockID) + } + + events, err := e.getBlockEvents(ctx, blockID, e.processScheduledCallbackEventType) + if err != nil { + return nil, rpc.ConvertError(err, "failed to retrieve events from any execution node", codes.Internal) + } + + sysCollection, err := blueprints.SystemCollection(e.chainID.Chain(), events) + if err != nil { + return nil, status.Errorf(codes.Internal, "could not construct system collection: %v", err) + } + + for _, tx := range sysCollection.Transactions { + if tx.ID() == txID { + return tx, nil + } + } + + return nil, status.Errorf(codes.NotFound, "system transaction not found") +} + +func (e *ENTransactionProvider) SystemTransactionResult( + ctx context.Context, + block *flow.Block, + txID flow.Identifier, + requiredEventEncodingVersion entities.EventEncodingVersion, +) (*accessmodel.TransactionResult, error) { + // make sure the request is for a system transaction + if txID != e.systemTxID { + if _, err := e.SystemTransaction(ctx, block, txID); err != nil { + return nil, status.Errorf(codes.NotFound, "system transaction not found") + } + } + return e.TransactionResult(ctx, block.ToHeader(), txID, requiredEventEncodingVersion) + +} + +// userTransactionResults constructs the user transaction results from the execution node response. +// +// It does so by iterating through all user collections (without system collection) in the block +// and constructing the transaction results. +func (e *ENTransactionProvider) userTransactionResults( + ctx context.Context, + resp *execproto.GetTransactionResultsResponse, + block *flow.Block, + blockID flow.Identifier, + txStatus flow.TransactionStatus, + requiredEventEncodingVersion entities.EventEncodingVersion, +) ([]*accessmodel.TransactionResult, error) { + + results := make([]*accessmodel.TransactionResult, 0, len(resp.TransactionResults)) + errInsufficientResults := status.Errorf( + codes.Internal, + "number of transaction results returned by execution node is less than the number of transactions in the block", + ) + + i := 0 + for _, guarantee := range block.Payload.Guarantees { + collection, err := e.collections.LightByID(guarantee.CollectionID) + if err != nil { + return nil, rpc.ConvertStorageError(err) + } + + for _, txID := range collection.Transactions { + // bounds check. this means the EN returned fewer transaction results than the transactions in the block + if i >= len(resp.TransactionResults) { + return nil, errInsufficientResults + } + txResult := resp.TransactionResults[i] + + events, err := convert.MessagesToEventsWithEncodingConversion(txResult.GetEvents(), resp.GetEventEncodingVersion(), requiredEventEncodingVersion) + if err != nil { + return nil, status.Errorf(codes.Internal, + "failed to convert events to message in txID %x: %v", txID, err) + } + + results = append(results, &accessmodel.TransactionResult{ + Status: txStatus, + StatusCode: uint(txResult.GetStatusCode()), + Events: events, + ErrorMessage: txResult.GetErrorMessage(), + BlockID: blockID, + TransactionID: txID, + CollectionID: guarantee.CollectionID, + BlockHeight: block.Height, + }) + + i++ + } + } + + return results, nil +} + +// systemTransactionResults constructs the system transaction results from the execution node response. +// +// It does so by iterating through all system transactions in the block and constructing the transaction results. +// System transactions are transactions that follow the user transactions from the execution node response. +// We should always return transaction result for system chunk transaction, but if scheduled callbacks are enabled +// we also return results for the process and execute callbacks transactions. +func (e *ENTransactionProvider) systemTransactionResults( + systemTxResults []*execproto.GetTransactionResultResponse, + block *flow.Block, + blockID flow.Identifier, + txStatus flow.TransactionStatus, + resp *execproto.GetTransactionResultsResponse, + requiredEventEncodingVersion entities.EventEncodingVersion, +) ([]*accessmodel.TransactionResult, error) { + systemTxIDs, err := e.systemTransactionIDs(systemTxResults, resp.GetEventEncodingVersion()) + if err != nil { + return nil, rpc.ConvertError(err, "failed to determine system transaction IDs", codes.Internal) + } + + // systemTransactionIDs automatically detects if scheduled callbacks was enabled for the block + // based on the number of system transactions in the response. The resulting list should always + // have the same length as the number of system transactions in the response. + if len(systemTxIDs) != len(systemTxResults) { + return nil, status.Errorf(codes.Internal, "system transaction count mismatch: expected %d, got %d", len(systemTxResults), len(systemTxIDs)) + } + + results := make([]*accessmodel.TransactionResult, 0, len(systemTxResults)) + for i, systemTxResult := range systemTxResults { + events, err := convert.MessagesToEventsWithEncodingConversion(systemTxResult.GetEvents(), resp.GetEventEncodingVersion(), requiredEventEncodingVersion) + if err != nil { + return nil, rpc.ConvertError(err, "failed to convert events from system tx result", codes.Internal) + } + + results = append(results, &accessmodel.TransactionResult{ + Status: txStatus, + StatusCode: uint(systemTxResult.GetStatusCode()), + Events: events, + ErrorMessage: systemTxResult.GetErrorMessage(), + BlockID: blockID, + TransactionID: systemTxIDs[i], + CollectionID: flow.ZeroID, + BlockHeight: block.Height, + }) + } + + return results, nil +} + +// systemTransactionIDs determines the system transaction IDs upfront +func (e *ENTransactionProvider) systemTransactionIDs( + systemTxResults []*execproto.GetTransactionResultResponse, + actualEventEncodingVersion entities.EventEncodingVersion, +) ([]flow.Identifier, error) { + // TODO: implement system that allows this endpoint to dynamically determine if scheduled + // transactions were enabled for this block. See https://github.com/onflow/flow-go/issues/7873 + if len(systemTxResults) == 1 { + return []flow.Identifier{e.systemTxID}, nil + } + + // if scheduled callbacks are enabled, the first transaction will always be the "process" transaction + // get its events to reconstruct the system collection + processResult := systemTxResults[0] + + // blueprints.SystemCollection requires events are CCF encoded + events, err := convert.MessagesToEventsWithEncodingConversion( + processResult.GetEvents(), + actualEventEncodingVersion, + entities.EventEncodingVersion_CCF_V0, + ) + if err != nil { + return nil, rpc.ConvertError(err, "failed to convert events", codes.Internal) + } + + sysCollection, err := blueprints.SystemCollection(e.chainID.Chain(), events) + if err != nil { + return nil, rpc.ConvertError(err, "failed to construct system collection", codes.Internal) + } + + var systemTxIDs []flow.Identifier + for _, tx := range sysCollection.Transactions { + systemTxIDs = append(systemTxIDs, tx.ID()) + } + + return systemTxIDs, nil +} + +func (e *ENTransactionProvider) getBlockEvents( + ctx context.Context, + blockID flow.Identifier, + eventType flow.EventType, +) (flow.EventsList, error) { + execNodes, err := e.nodeProvider.ExecutionNodesForBlockID( + ctx, + blockID, + ) + if err != nil { + if common.IsInsufficientExecutionReceipts(err) { + return nil, status.Error(codes.NotFound, err.Error()) + } + return nil, rpc.ConvertError(err, "failed to retrieve result from any execution node", codes.Internal) + } + + request := &execproto.GetEventsForBlockIDsRequest{ + BlockIds: [][]byte{blockID[:]}, + Type: string(eventType), + } + + resp, err := e.getBlockEventsByBlockIDsFromAnyExeNode(ctx, execNodes, request) + if err != nil { + return nil, rpc.ConvertError(err, "failed to retrieve result from any execution node", codes.Internal) + } + + var events flow.EventsList + for _, result := range resp.GetResults() { + resultEvents, err := convert.MessagesToEventsWithEncodingConversion( + result.GetEvents(), + resp.GetEventEncodingVersion(), + entities.EventEncodingVersion_CCF_V0, + ) + if err != nil { + return nil, rpc.ConvertError(err, "failed to convert events", codes.Internal) + } + events = append(events, resultEvents...) + } + + return events, nil +} + +func (e *ENTransactionProvider) getTransactionResultFromAnyExeNode( + ctx context.Context, + execNodes flow.IdentitySkeletonList, + req *execproto.GetTransactionResultRequest, +) (*execproto.GetTransactionResultResponse, error) { + var errToReturn error + + defer func() { + if errToReturn != nil { + e.log.Info().Err(errToReturn).Msg("failed to get transaction result from execution nodes") + } + }() + + var resp *execproto.GetTransactionResultResponse + errToReturn = e.nodeCommunicator.CallAvailableNode( + execNodes, + func(node *flow.IdentitySkeleton) error { + var err error + resp, err = e.tryGetTransactionResult(ctx, node, req) + if err == nil { + e.log.Debug(). + Str("execution_node", node.String()). + Hex("block_id", req.GetBlockId()). + Hex("transaction_id", req.GetTransactionId()). + Msg("Successfully got transaction results from any node") + return nil + } + return err + }, + nil, + ) + + return resp, errToReturn +} + +func (e *ENTransactionProvider) getTransactionResultsByBlockIDFromAnyExeNode( + ctx context.Context, + execNodes flow.IdentitySkeletonList, + req *execproto.GetTransactionsByBlockIDRequest, +) (*execproto.GetTransactionResultsResponse, error) { + var errToReturn error + + defer func() { + // log the errors + if errToReturn != nil { + e.log.Err(errToReturn).Msg("failed to get transaction results from execution nodes") + } + }() + + // if we were passed 0 execution nodes add a specific error + if len(execNodes) == 0 { + return nil, errors.New("zero execution nodes") + } + + var resp *execproto.GetTransactionResultsResponse + errToReturn = e.nodeCommunicator.CallAvailableNode( + execNodes, + func(node *flow.IdentitySkeleton) error { + var err error + resp, err = e.tryGetTransactionResultsByBlockID(ctx, node, req) + if err == nil { + e.log.Debug(). + Str("execution_node", node.String()). + Hex("block_id", req.GetBlockId()). + Msg("Successfully got transaction results from any node") + return nil + } + return err + }, + nil, + ) + + return resp, errToReturn +} + +func (e *ENTransactionProvider) getTransactionResultByIndexFromAnyExeNode( + ctx context.Context, + execNodes flow.IdentitySkeletonList, + req *execproto.GetTransactionByIndexRequest, +) (*execproto.GetTransactionResultResponse, error) { + var errToReturn error + defer func() { + if errToReturn != nil { + e.log.Info().Err(errToReturn).Msg("failed to get transaction result from execution nodes") + } + }() + + if len(execNodes) == 0 { + return nil, errors.New("zero execution nodes provided") + } + + var resp *execproto.GetTransactionResultResponse + errToReturn = e.nodeCommunicator.CallAvailableNode( + execNodes, + func(node *flow.IdentitySkeleton) error { + var err error + resp, err = e.tryGetTransactionResultByIndex(ctx, node, req) + if err == nil { + e.log.Debug(). + Str("execution_node", node.String()). + Hex("block_id", req.GetBlockId()). + Uint32("index", req.GetIndex()). + Msg("Successfully got transaction results from any node") + return nil + } + return err + }, + nil, + ) + + return resp, errToReturn +} + +func (e *ENTransactionProvider) getBlockEventsByBlockIDsFromAnyExeNode( + ctx context.Context, + execNodes flow.IdentitySkeletonList, + req *execproto.GetEventsForBlockIDsRequest, +) (*execproto.GetEventsForBlockIDsResponse, error) { + var errToReturn error + defer func() { + if errToReturn != nil { + e.log.Info().Err(errToReturn).Msg("failed to get block events from execution nodes") + } + }() + + if len(execNodes) == 0 { + return nil, errors.New("zero execution nodes provided") + } + + var resp *execproto.GetEventsForBlockIDsResponse + errToReturn = e.nodeCommunicator.CallAvailableNode( + execNodes, + func(node *flow.IdentitySkeleton) error { + var err error + resp, err = e.tryGetBlockEventsByBlockIDs(ctx, node, req) + return err + }, + nil, + ) + + return resp, errToReturn +} + +func (e *ENTransactionProvider) tryGetTransactionResult( + ctx context.Context, + execNode *flow.IdentitySkeleton, + req *execproto.GetTransactionResultRequest, +) (*execproto.GetTransactionResultResponse, error) { + execRPCClient, closer, err := e.connFactory.GetExecutionAPIClient(execNode.Address) + if err != nil { + return nil, err + } + defer closer.Close() + + resp, err := execRPCClient.GetTransactionResult(ctx, req) + if err != nil { + return nil, err + } + + return resp, nil +} + +func (e *ENTransactionProvider) tryGetTransactionResultsByBlockID( + ctx context.Context, + execNode *flow.IdentitySkeleton, + req *execproto.GetTransactionsByBlockIDRequest, +) (*execproto.GetTransactionResultsResponse, error) { + execRPCClient, closer, err := e.connFactory.GetExecutionAPIClient(execNode.Address) + if err != nil { + return nil, err + } + defer closer.Close() + + resp, err := execRPCClient.GetTransactionResultsByBlockID(ctx, req) + if err != nil { + return nil, err + } + + return resp, nil +} + +func (e *ENTransactionProvider) tryGetTransactionResultByIndex( + ctx context.Context, + execNode *flow.IdentitySkeleton, + req *execproto.GetTransactionByIndexRequest, +) (*execproto.GetTransactionResultResponse, error) { + execRPCClient, closer, err := e.connFactory.GetExecutionAPIClient(execNode.Address) + if err != nil { + return nil, err + } + defer closer.Close() + + resp, err := execRPCClient.GetTransactionResultByIndex(ctx, req) + if err != nil { + return nil, err + } + + return resp, nil +} + +func (e *ENTransactionProvider) tryGetBlockEventsByBlockIDs( + ctx context.Context, + execNode *flow.IdentitySkeleton, + req *execproto.GetEventsForBlockIDsRequest, +) (*execproto.GetEventsForBlockIDsResponse, error) { + execRPCClient, closer, err := e.connFactory.GetExecutionAPIClient(execNode.Address) + if err != nil { + return nil, err + } + defer closer.Close() + + resp, err := execRPCClient.GetEventsForBlockIDs(ctx, req) + if err != nil { + return nil, err + } + + return resp, nil +} diff --git a/engine/access/rpc/backend/transactions/provider/failover.go b/engine/access/rpc/backend/transactions/provider/failover.go new file mode 100644 index 00000000000..d20b1a2c37d --- /dev/null +++ b/engine/access/rpc/backend/transactions/provider/failover.go @@ -0,0 +1,110 @@ +package provider + +import ( + "context" + + "github.com/onflow/flow/protobuf/go/flow/entities" + + accessmodel "github.com/onflow/flow-go/model/access" + "github.com/onflow/flow-go/model/flow" +) + +type FailoverTransactionProvider struct { + localProvider TransactionProvider + execNodeProvider TransactionProvider +} + +var _ TransactionProvider = (*FailoverTransactionProvider)(nil) + +func NewFailoverTransactionProvider(local TransactionProvider, execNode TransactionProvider) *FailoverTransactionProvider { + return &FailoverTransactionProvider{ + localProvider: local, + execNodeProvider: execNode, + } +} + +func (f *FailoverTransactionProvider) TransactionResult( + ctx context.Context, + header *flow.Header, + txID flow.Identifier, + encodingVersion entities.EventEncodingVersion, +) (*accessmodel.TransactionResult, error) { + localResult, localErr := f.localProvider.TransactionResult(ctx, header, txID, encodingVersion) + if localErr == nil { + return localResult, nil + } + + execNodeResult, execNodeErr := f.execNodeProvider.TransactionResult(ctx, header, txID, encodingVersion) + return execNodeResult, execNodeErr +} + +func (f *FailoverTransactionProvider) TransactionResultByIndex( + ctx context.Context, + block *flow.Block, + index uint32, + encodingVersion entities.EventEncodingVersion, +) (*accessmodel.TransactionResult, error) { + localResult, localErr := f.localProvider.TransactionResultByIndex(ctx, block, index, encodingVersion) + if localErr == nil { + return localResult, nil + } + + execNodeResult, execNodeErr := f.execNodeProvider.TransactionResultByIndex(ctx, block, index, encodingVersion) + return execNodeResult, execNodeErr +} + +func (f *FailoverTransactionProvider) TransactionResultsByBlockID( + ctx context.Context, + block *flow.Block, + encodingVersion entities.EventEncodingVersion, +) ([]*accessmodel.TransactionResult, error) { + localResults, localErr := f.localProvider.TransactionResultsByBlockID(ctx, block, encodingVersion) + if localErr == nil { + return localResults, nil + } + + execNodeResults, execNodeErr := f.execNodeProvider.TransactionResultsByBlockID(ctx, block, encodingVersion) + return execNodeResults, execNodeErr +} + +func (f *FailoverTransactionProvider) TransactionsByBlockID( + ctx context.Context, + block *flow.Block, +) ([]*flow.TransactionBody, error) { + localResults, localErr := f.localProvider.TransactionsByBlockID(ctx, block) + if localErr == nil { + return localResults, nil + } + + execNodeResults, execNodeErr := f.execNodeProvider.TransactionsByBlockID(ctx, block) + return execNodeResults, execNodeErr +} + +func (f *FailoverTransactionProvider) SystemTransaction( + ctx context.Context, + block *flow.Block, + txID flow.Identifier, +) (*flow.TransactionBody, error) { + localResult, localErr := f.localProvider.SystemTransaction(ctx, block, txID) + if localErr == nil { + return localResult, nil + } + + execNodeResult, execNodeErr := f.execNodeProvider.SystemTransaction(ctx, block, txID) + return execNodeResult, execNodeErr +} + +func (f *FailoverTransactionProvider) SystemTransactionResult( + ctx context.Context, + block *flow.Block, + txID flow.Identifier, + requiredEventEncodingVersion entities.EventEncodingVersion, +) (*accessmodel.TransactionResult, error) { + localResult, localErr := f.localProvider.SystemTransactionResult(ctx, block, txID, requiredEventEncodingVersion) + if localErr == nil { + return localResult, nil + } + + execNodeResult, execNodeErr := f.execNodeProvider.SystemTransactionResult(ctx, block, txID, requiredEventEncodingVersion) + return execNodeResult, execNodeErr +} diff --git a/engine/access/rpc/backend/transactions/provider/local.go b/engine/access/rpc/backend/transactions/provider/local.go new file mode 100644 index 00000000000..9fa1ebec785 --- /dev/null +++ b/engine/access/rpc/backend/transactions/provider/local.go @@ -0,0 +1,452 @@ +package provider + +import ( + "context" + "errors" + "fmt" + + "google.golang.org/grpc/status" + + "github.com/onflow/flow/protobuf/go/flow/entities" + "google.golang.org/grpc/codes" + + "github.com/onflow/flow-go/engine/access/index" + "github.com/onflow/flow-go/engine/access/rpc/backend/transactions/error_messages" + txstatus "github.com/onflow/flow-go/engine/access/rpc/backend/transactions/status" + "github.com/onflow/flow-go/engine/common/rpc" + "github.com/onflow/flow-go/engine/common/rpc/convert" + "github.com/onflow/flow-go/fvm/blueprints" + accessmodel "github.com/onflow/flow-go/model/access" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module/irrecoverable" + "github.com/onflow/flow-go/state" + "github.com/onflow/flow-go/state/protocol" + "github.com/onflow/flow-go/storage" +) + +// ErrTransactionNotInBlock represents an error indicating that the transaction is not found in the block. +var ErrTransactionNotInBlock = errors.New("transaction not in block") + +// LocalTransactionProvider provides functionality for retrieving transaction results and error messages from local storages +type LocalTransactionProvider struct { + state protocol.State + collections storage.Collections + blocks storage.Blocks + eventsIndex *index.EventsIndex + txResultsIndex *index.TransactionResultsIndex + txErrorMessages error_messages.Provider + systemTxID flow.Identifier + txStatusDeriver *txstatus.TxStatusDeriver + scheduledCallbacksEnabled bool + chainID flow.ChainID +} + +var _ TransactionProvider = (*LocalTransactionProvider)(nil) + +func NewLocalTransactionProvider( + state protocol.State, + collections storage.Collections, + blocks storage.Blocks, + eventsIndex *index.EventsIndex, + txResultsIndex *index.TransactionResultsIndex, + txErrorMessages error_messages.Provider, + systemTxID flow.Identifier, + txStatusDeriver *txstatus.TxStatusDeriver, + chainID flow.ChainID, + scheduledCallbacksEnabled bool, +) *LocalTransactionProvider { + return &LocalTransactionProvider{ + state: state, + collections: collections, + blocks: blocks, + eventsIndex: eventsIndex, + txResultsIndex: txResultsIndex, + txErrorMessages: txErrorMessages, + systemTxID: systemTxID, + txStatusDeriver: txStatusDeriver, + scheduledCallbacksEnabled: scheduledCallbacksEnabled, + chainID: chainID, + } +} + +// TransactionResult retrieves a transaction result from storage by block ID and transaction ID. +// Expected errors during normal operation: +// - codes.NotFound when result cannot be provided by storage due to the absence of data. +// - codes.Internal if event payload conversion failed. +// - indexer.ErrIndexNotInitialized when txResultsIndex not initialized +// - storage.ErrHeightNotIndexed when data is unavailable +// +// All other errors are considered as state corruption (fatal) or internal errors in the transaction error message +// getter or when deriving transaction status. +func (t *LocalTransactionProvider) TransactionResult( + ctx context.Context, + block *flow.Header, + transactionID flow.Identifier, + encodingVersion entities.EventEncodingVersion, +) (*accessmodel.TransactionResult, error) { + blockID := block.ID() + txResult, err := t.txResultsIndex.ByBlockIDTransactionID(blockID, block.Height, transactionID) + if err != nil { + return nil, rpc.ConvertIndexError(err, block.Height, "failed to get transaction result") + } + + var txErrorMessage string + var txStatusCode uint = 0 + if txResult.Failed { + txErrorMessage, err = t.txErrorMessages.ErrorMessageByTransactionID(ctx, blockID, block.Height, transactionID) + if err != nil { + return nil, err + } + + if len(txErrorMessage) == 0 { + return nil, status.Errorf( + codes.Internal, + "transaction failed but error message is empty for tx ID: %s block ID: %s", + txResult.TransactionID, + blockID, + ) + } + + txStatusCode = 1 // statusCode of 1 indicates an error and 0 indicates no error, the same as on EN + } + + txStatus, err := t.txStatusDeriver.DeriveTransactionStatus(block.Height, true) + if err != nil { + if !errors.Is(err, state.ErrUnknownSnapshotReference) { + irrecoverable.Throw(ctx, err) + } + return nil, rpc.ConvertStorageError(err) + } + + events, err := t.eventsIndex.ByBlockIDTransactionID(blockID, block.Height, transactionID) + if err != nil { + return nil, rpc.ConvertIndexError(err, block.Height, "failed to get events") + } + + // events are encoded in CCF format in storage. convert to JSON-CDC if requested + if encodingVersion == entities.EventEncodingVersion_JSON_CDC_V0 { + events, err = convert.CcfEventsToJsonEvents(events) + if err != nil { + return nil, rpc.ConvertError(err, "failed to convert event payload", codes.Internal) + } + } + + return &accessmodel.TransactionResult{ + TransactionID: txResult.TransactionID, + Status: txStatus, + StatusCode: txStatusCode, + Events: events, + ErrorMessage: txErrorMessage, + BlockID: blockID, + BlockHeight: block.Height, + }, nil +} + +// TransactionResultByIndex retrieves a transaction result by index from storage. +// Expected errors during normal operation: +// - codes.NotFound if result cannot be provided by storage due to the absence of data. +// - codes.Internal when event payload conversion failed. +// - indexer.ErrIndexNotInitialized when txResultsIndex not initialized +// - storage.ErrHeightNotIndexed when data is unavailable +// +// All other errors are considered as state corruption (fatal) or internal errors in the transaction error message +// getter or when deriving transaction status. +func (t *LocalTransactionProvider) TransactionResultByIndex( + ctx context.Context, + block *flow.Block, + index uint32, + eventEncoding entities.EventEncodingVersion, +) (*accessmodel.TransactionResult, error) { + blockID := block.ID() + txResult, err := t.txResultsIndex.ByBlockIDTransactionIndex(blockID, block.Height, index) + if err != nil { + return nil, rpc.ConvertIndexError(err, block.Height, "failed to get transaction result") + } + + var txErrorMessage string + var txStatusCode uint = 0 + if txResult.Failed { + txErrorMessage, err = t.txErrorMessages.ErrorMessageByIndex(ctx, blockID, block.Height, index) + if err != nil { + return nil, err + } + + if len(txErrorMessage) == 0 { + return nil, status.Errorf(codes.Internal, "transaction failed but error message is empty for tx ID: %s block ID: %s", txResult.TransactionID, blockID) + } + + txStatusCode = 1 // statusCode of 1 indicates an error and 0 indicates no error, the same as on EN + } + + txStatus, err := t.txStatusDeriver.DeriveTransactionStatus(block.Height, true) + if err != nil { + if !errors.Is(err, state.ErrUnknownSnapshotReference) { + irrecoverable.Throw(ctx, err) + } + return nil, rpc.ConvertStorageError(err) + } + + events, err := t.eventsIndex.ByBlockIDTransactionIndex(blockID, block.Height, index) + if err != nil { + return nil, rpc.ConvertIndexError(err, block.Height, "failed to get events") + } + + // events are encoded in CCF format in storage. convert to JSON-CDC if requested + if eventEncoding == entities.EventEncodingVersion_JSON_CDC_V0 { + events, err = convert.CcfEventsToJsonEvents(events) + if err != nil { + return nil, rpc.ConvertError(err, "failed to convert event payload", codes.Internal) + } + } + + collectionID, err := t.lookupCollectionIDInBlock(block, txResult.TransactionID) + if err != nil { + return nil, err + } + + return &accessmodel.TransactionResult{ + TransactionID: txResult.TransactionID, + Status: txStatus, + StatusCode: txStatusCode, + Events: events, + ErrorMessage: txErrorMessage, + BlockID: blockID, + BlockHeight: block.Height, + CollectionID: collectionID, + }, nil +} + +// TransactionsByBlockID retrieves transactions by block ID from storage +// Expected errors during normal operation: +// - codes.NotFound if result cannot be provided by storage due to the absence of data. +// - codes.Internal when event payload conversion failed. +// - indexer.ErrIndexNotInitialized when txResultsIndex not initialized +// - storage.ErrHeightNotIndexed when data is unavailable +// +// All other errors are considered as state corruption (fatal) or internal errors in the transaction error message +// getter or when deriving transaction status. +func (t *LocalTransactionProvider) TransactionsByBlockID( + ctx context.Context, + block *flow.Block, +) ([]*flow.TransactionBody, error) { + var transactions []*flow.TransactionBody + blockID := block.ID() + + for _, guarantee := range block.Payload.Guarantees { + collection, err := t.collections.ByID(guarantee.CollectionID) + if err != nil { + return nil, rpc.ConvertStorageError(err) + } + + transactions = append(transactions, collection.Transactions...) + } + + if !t.scheduledCallbacksEnabled { + systemTx, err := blueprints.SystemChunkTransaction(t.chainID.Chain()) + if err != nil { + return nil, fmt.Errorf("failed to construct system chunk transaction: %w", err) + } + + return append(transactions, systemTx), nil + } + + events, err := t.eventsIndex.ByBlockID(blockID, block.Height) + if err != nil { + return nil, rpc.ConvertIndexError(err, block.Height, "failed to get events") + } + + sysCollection, err := blueprints.SystemCollection(t.chainID.Chain(), events) + if err != nil { + return nil, status.Errorf(codes.Internal, "could not construct system collection: %v", err) + } + + return append(transactions, sysCollection.Transactions...), nil +} + +// TransactionResultsByBlockID retrieves transaction results by block ID from storage +// Expected errors during normal operation: +// - codes.NotFound if result cannot be provided by storage due to the absence of data. +// - codes.Internal when event payload conversion failed. +// - indexer.ErrIndexNotInitialized when txResultsIndex not initialized +// - storage.ErrHeightNotIndexed when data is unavailable +// +// All other errors are considered as state corruption (fatal) or internal errors in the transaction error message +// getter or when deriving transaction status. +func (t *LocalTransactionProvider) TransactionResultsByBlockID( + ctx context.Context, + block *flow.Block, + requiredEventEncodingVersion entities.EventEncodingVersion, +) ([]*accessmodel.TransactionResult, error) { + blockID := block.ID() + txResults, err := t.txResultsIndex.ByBlockID(blockID, block.Height) + if err != nil { + return nil, rpc.ConvertIndexError(err, block.Height, "failed to get transaction result") + } + + txErrors, err := t.txErrorMessages.ErrorMessagesByBlockID(ctx, blockID, block.Height) + if err != nil { + return nil, err + } + + numberOfTxResults := len(txResults) + results := make([]*accessmodel.TransactionResult, 0, numberOfTxResults) + + // cache the tx to collectionID mapping to avoid repeated lookups + txToCollectionID, err := t.buildTxIDToCollectionIDMapping(block) + if err != nil { + // this indicates that one or more of the collections for the block are not indexed. Since + // lookups are gated on the indexer signaling it has finished processing all data for the + // block, all data must be available in storage, otherwise there is an inconsistency in the + // state. + irrecoverable.Throw(ctx, fmt.Errorf("inconsistent index state: %w", err)) + return nil, status.Errorf(codes.Internal, "failed to map tx to collection ID: %v", err) + } + + txStatus, err := t.txStatusDeriver.DeriveTransactionStatus(block.Height, true) + if err != nil { + if !errors.Is(err, state.ErrUnknownSnapshotReference) { + irrecoverable.Throw(ctx, err) + } + return nil, rpc.ConvertStorageError(err) + } + + for _, txResult := range txResults { + txID := txResult.TransactionID + + var txErrorMessage string + var txStatusCode uint = 0 + if txResult.Failed { + txErrorMessage = txErrors[txResult.TransactionID] + if len(txErrorMessage) == 0 { + return nil, status.Errorf(codes.Internal, "transaction failed but error message is empty for tx ID: %s block ID: %s", txID, blockID) + } + txStatusCode = 1 + } + + events, err := t.eventsIndex.ByBlockIDTransactionID(blockID, block.Height, txResult.TransactionID) + if err != nil { + return nil, rpc.ConvertIndexError(err, block.Height, "failed to get events") + } + + // events are encoded in CCF format in storage. convert to JSON-CDC if requested + if requiredEventEncodingVersion == entities.EventEncodingVersion_JSON_CDC_V0 { + events, err = convert.CcfEventsToJsonEvents(events) + if err != nil { + return nil, rpc.ConvertError(err, "failed to convert event payload", codes.Internal) + } + } + + collectionID, ok := txToCollectionID[txID] + if !ok { + // for all the transactions that are not in the block's user collections we assign the + // ZeroID indicating system collection. + collectionID = flow.ZeroID + } + + results = append(results, &accessmodel.TransactionResult{ + Status: txStatus, + StatusCode: txStatusCode, + Events: events, + ErrorMessage: txErrorMessage, + BlockID: blockID, + TransactionID: txID, + CollectionID: collectionID, + BlockHeight: block.Height, + }) + } + + return results, nil +} + +// SystemTransaction rebuilds the system transaction from storage +func (t *LocalTransactionProvider) SystemTransaction( + ctx context.Context, + block *flow.Block, + txID flow.Identifier, +) (*flow.TransactionBody, error) { + blockID := block.ID() + + if txID == t.systemTxID || !t.scheduledCallbacksEnabled { + systemTx, err := blueprints.SystemChunkTransaction(t.chainID.Chain()) + if err != nil { + return nil, status.Errorf(codes.Internal, "failed to construct system chunk transaction: %v", err) + } + + if txID == systemTx.ID() { + return systemTx, nil + } + return nil, fmt.Errorf("transaction %s not found in block %s", txID, blockID) + } + + events, err := t.eventsIndex.ByBlockID(blockID, block.Height) + if err != nil { + return nil, rpc.ConvertIndexError(err, block.Height, "failed to get events") + } + + sysCollection, err := blueprints.SystemCollection(t.chainID.Chain(), events) + if err != nil { + return nil, status.Errorf(codes.Internal, "could not construct system collection: %v", err) + } + + for _, tx := range sysCollection.Transactions { + if tx.ID() == txID { + return tx, nil + } + } + + return nil, status.Errorf(codes.NotFound, "system transaction not found") +} + +func (t *LocalTransactionProvider) SystemTransactionResult( + ctx context.Context, + block *flow.Block, + txID flow.Identifier, + requiredEventEncodingVersion entities.EventEncodingVersion, +) (*accessmodel.TransactionResult, error) { + // make sure the request is for a system transaction + if txID != t.systemTxID { + if _, err := t.SystemTransaction(ctx, block, txID); err != nil { + return nil, status.Errorf(codes.NotFound, "system transaction not found") + } + } + return t.TransactionResult(ctx, block.ToHeader(), txID, requiredEventEncodingVersion) +} + +// lookupCollectionIDInBlock returns the collection ID based on the transaction ID. +// The lookup is performed in block collections. +func (t *LocalTransactionProvider) lookupCollectionIDInBlock( + block *flow.Block, + txID flow.Identifier, +) (flow.Identifier, error) { + for _, guarantee := range block.Payload.Guarantees { + collection, err := t.collections.LightByID(guarantee.CollectionID) + if err != nil { + return flow.ZeroID, fmt.Errorf("failed to get collection %s in indexed block: %w", guarantee.CollectionID, err) + } + + for _, collectionTxID := range collection.Transactions { + if collectionTxID == txID { + return guarantee.CollectionID, nil + } + } + } + return flow.ZeroID, ErrTransactionNotInBlock +} + +// buildTxIDToCollectionIDMapping returns a map of transaction ID to collection ID based on the provided block. +// No errors expected during normal operations. +func (t *LocalTransactionProvider) buildTxIDToCollectionIDMapping(block *flow.Block) (map[flow.Identifier]flow.Identifier, error) { + txToCollectionID := make(map[flow.Identifier]flow.Identifier) + for _, guarantee := range block.Payload.Guarantees { + collection, err := t.collections.LightByID(guarantee.CollectionID) + if err != nil { + // if the tx result is in storage, the collection must be too. + return nil, fmt.Errorf("failed to get collection %s in indexed block: %w", guarantee.CollectionID, err) + } + for _, txID := range collection.Transactions { + txToCollectionID[txID] = guarantee.CollectionID + } + } + + return txToCollectionID, nil +} diff --git a/engine/access/rpc/backend/transactions/provider/mock/transaction_provider.go b/engine/access/rpc/backend/transactions/provider/mock/transaction_provider.go new file mode 100644 index 00000000000..31d2714821c --- /dev/null +++ b/engine/access/rpc/backend/transactions/provider/mock/transaction_provider.go @@ -0,0 +1,214 @@ +// Code generated by mockery. DO NOT EDIT. + +package mock + +import ( + context "context" + + access "github.com/onflow/flow-go/model/access" + + entities "github.com/onflow/flow/protobuf/go/flow/entities" + + flow "github.com/onflow/flow-go/model/flow" + + mock "github.com/stretchr/testify/mock" +) + +// TransactionProvider is an autogenerated mock type for the TransactionProvider type +type TransactionProvider struct { + mock.Mock +} + +// SystemTransaction provides a mock function with given fields: ctx, block, txID +func (_m *TransactionProvider) SystemTransaction(ctx context.Context, block *flow.Block, txID flow.Identifier) (*flow.TransactionBody, error) { + ret := _m.Called(ctx, block, txID) + + if len(ret) == 0 { + panic("no return value specified for SystemTransaction") + } + + var r0 *flow.TransactionBody + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, *flow.Block, flow.Identifier) (*flow.TransactionBody, error)); ok { + return rf(ctx, block, txID) + } + if rf, ok := ret.Get(0).(func(context.Context, *flow.Block, flow.Identifier) *flow.TransactionBody); ok { + r0 = rf(ctx, block, txID) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*flow.TransactionBody) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, *flow.Block, flow.Identifier) error); ok { + r1 = rf(ctx, block, txID) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// SystemTransactionResult provides a mock function with given fields: ctx, block, txID, requiredEventEncodingVersion +func (_m *TransactionProvider) SystemTransactionResult(ctx context.Context, block *flow.Block, txID flow.Identifier, requiredEventEncodingVersion entities.EventEncodingVersion) (*access.TransactionResult, error) { + ret := _m.Called(ctx, block, txID, requiredEventEncodingVersion) + + if len(ret) == 0 { + panic("no return value specified for SystemTransactionResult") + } + + var r0 *access.TransactionResult + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, *flow.Block, flow.Identifier, entities.EventEncodingVersion) (*access.TransactionResult, error)); ok { + return rf(ctx, block, txID, requiredEventEncodingVersion) + } + if rf, ok := ret.Get(0).(func(context.Context, *flow.Block, flow.Identifier, entities.EventEncodingVersion) *access.TransactionResult); ok { + r0 = rf(ctx, block, txID, requiredEventEncodingVersion) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*access.TransactionResult) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, *flow.Block, flow.Identifier, entities.EventEncodingVersion) error); ok { + r1 = rf(ctx, block, txID, requiredEventEncodingVersion) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// TransactionResult provides a mock function with given fields: ctx, header, txID, encodingVersion +func (_m *TransactionProvider) TransactionResult(ctx context.Context, header *flow.Header, txID flow.Identifier, encodingVersion entities.EventEncodingVersion) (*access.TransactionResult, error) { + ret := _m.Called(ctx, header, txID, encodingVersion) + + if len(ret) == 0 { + panic("no return value specified for TransactionResult") + } + + var r0 *access.TransactionResult + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, *flow.Header, flow.Identifier, entities.EventEncodingVersion) (*access.TransactionResult, error)); ok { + return rf(ctx, header, txID, encodingVersion) + } + if rf, ok := ret.Get(0).(func(context.Context, *flow.Header, flow.Identifier, entities.EventEncodingVersion) *access.TransactionResult); ok { + r0 = rf(ctx, header, txID, encodingVersion) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*access.TransactionResult) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, *flow.Header, flow.Identifier, entities.EventEncodingVersion) error); ok { + r1 = rf(ctx, header, txID, encodingVersion) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// TransactionResultByIndex provides a mock function with given fields: ctx, block, index, encodingVersion +func (_m *TransactionProvider) TransactionResultByIndex(ctx context.Context, block *flow.Block, index uint32, encodingVersion entities.EventEncodingVersion) (*access.TransactionResult, error) { + ret := _m.Called(ctx, block, index, encodingVersion) + + if len(ret) == 0 { + panic("no return value specified for TransactionResultByIndex") + } + + var r0 *access.TransactionResult + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, *flow.Block, uint32, entities.EventEncodingVersion) (*access.TransactionResult, error)); ok { + return rf(ctx, block, index, encodingVersion) + } + if rf, ok := ret.Get(0).(func(context.Context, *flow.Block, uint32, entities.EventEncodingVersion) *access.TransactionResult); ok { + r0 = rf(ctx, block, index, encodingVersion) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*access.TransactionResult) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, *flow.Block, uint32, entities.EventEncodingVersion) error); ok { + r1 = rf(ctx, block, index, encodingVersion) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// TransactionResultsByBlockID provides a mock function with given fields: ctx, block, encodingVersion +func (_m *TransactionProvider) TransactionResultsByBlockID(ctx context.Context, block *flow.Block, encodingVersion entities.EventEncodingVersion) ([]*access.TransactionResult, error) { + ret := _m.Called(ctx, block, encodingVersion) + + if len(ret) == 0 { + panic("no return value specified for TransactionResultsByBlockID") + } + + var r0 []*access.TransactionResult + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, *flow.Block, entities.EventEncodingVersion) ([]*access.TransactionResult, error)); ok { + return rf(ctx, block, encodingVersion) + } + if rf, ok := ret.Get(0).(func(context.Context, *flow.Block, entities.EventEncodingVersion) []*access.TransactionResult); ok { + r0 = rf(ctx, block, encodingVersion) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]*access.TransactionResult) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, *flow.Block, entities.EventEncodingVersion) error); ok { + r1 = rf(ctx, block, encodingVersion) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// TransactionsByBlockID provides a mock function with given fields: ctx, block +func (_m *TransactionProvider) TransactionsByBlockID(ctx context.Context, block *flow.Block) ([]*flow.TransactionBody, error) { + ret := _m.Called(ctx, block) + + if len(ret) == 0 { + panic("no return value specified for TransactionsByBlockID") + } + + var r0 []*flow.TransactionBody + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, *flow.Block) ([]*flow.TransactionBody, error)); ok { + return rf(ctx, block) + } + if rf, ok := ret.Get(0).(func(context.Context, *flow.Block) []*flow.TransactionBody); ok { + r0 = rf(ctx, block) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]*flow.TransactionBody) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, *flow.Block) error); ok { + r1 = rf(ctx, block) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// NewTransactionProvider creates a new instance of TransactionProvider. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewTransactionProvider(t interface { + mock.TestingT + Cleanup(func()) +}) *TransactionProvider { + mock := &TransactionProvider{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/engine/access/rpc/backend/transactions/provider/provider.go b/engine/access/rpc/backend/transactions/provider/provider.go new file mode 100644 index 00000000000..6daa7d6c4bf --- /dev/null +++ b/engine/access/rpc/backend/transactions/provider/provider.go @@ -0,0 +1,52 @@ +package provider + +import ( + "context" + + "github.com/onflow/flow/protobuf/go/flow/entities" + + accessmodel "github.com/onflow/flow-go/model/access" + "github.com/onflow/flow-go/model/flow" +) + +// TransactionProvider defines an interface for retrieving transaction results +// from various data sources, such as local storage and execution nodes. +type TransactionProvider interface { + TransactionResult( + ctx context.Context, + header *flow.Header, + txID flow.Identifier, + encodingVersion entities.EventEncodingVersion, + ) (*accessmodel.TransactionResult, error) + + TransactionResultByIndex( + ctx context.Context, + block *flow.Block, + index uint32, + encodingVersion entities.EventEncodingVersion, + ) (*accessmodel.TransactionResult, error) + + TransactionResultsByBlockID( + ctx context.Context, + block *flow.Block, + encodingVersion entities.EventEncodingVersion, + ) ([]*accessmodel.TransactionResult, error) + + TransactionsByBlockID( + ctx context.Context, + block *flow.Block, + ) ([]*flow.TransactionBody, error) + + SystemTransaction( + ctx context.Context, + block *flow.Block, + txID flow.Identifier, + ) (*flow.TransactionBody, error) + + SystemTransactionResult( + ctx context.Context, + block *flow.Block, + txID flow.Identifier, + requiredEventEncodingVersion entities.EventEncodingVersion, + ) (*accessmodel.TransactionResult, error) +} diff --git a/engine/access/rpc/backend/transactions/retrier/mock/retrier.go b/engine/access/rpc/backend/transactions/retrier/mock/retrier.go new file mode 100644 index 00000000000..9dc257e6640 --- /dev/null +++ b/engine/access/rpc/backend/transactions/retrier/mock/retrier.go @@ -0,0 +1,50 @@ +// Code generated by mockery. DO NOT EDIT. + +package mock + +import ( + flow "github.com/onflow/flow-go/model/flow" + mock "github.com/stretchr/testify/mock" +) + +// Retrier is an autogenerated mock type for the Retrier type +type Retrier struct { + mock.Mock +} + +// RegisterTransaction provides a mock function with given fields: height, tx +func (_m *Retrier) RegisterTransaction(height uint64, tx *flow.TransactionBody) { + _m.Called(height, tx) +} + +// Retry provides a mock function with given fields: height +func (_m *Retrier) Retry(height uint64) error { + ret := _m.Called(height) + + if len(ret) == 0 { + panic("no return value specified for Retry") + } + + var r0 error + if rf, ok := ret.Get(0).(func(uint64) error); ok { + r0 = rf(height) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// NewRetrier creates a new instance of Retrier. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewRetrier(t interface { + mock.TestingT + Cleanup(func()) +}) *Retrier { + mock := &Retrier{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/engine/access/rpc/backend/transactions/retrier/mock/transaction_sender.go b/engine/access/rpc/backend/transactions/retrier/mock/transaction_sender.go new file mode 100644 index 00000000000..1322ab974c2 --- /dev/null +++ b/engine/access/rpc/backend/transactions/retrier/mock/transaction_sender.go @@ -0,0 +1,47 @@ +// Code generated by mockery. DO NOT EDIT. + +package mock + +import ( + context "context" + + flow "github.com/onflow/flow-go/model/flow" + mock "github.com/stretchr/testify/mock" +) + +// TransactionSender is an autogenerated mock type for the TransactionSender type +type TransactionSender struct { + mock.Mock +} + +// SendRawTransaction provides a mock function with given fields: ctx, tx +func (_m *TransactionSender) SendRawTransaction(ctx context.Context, tx *flow.TransactionBody) error { + ret := _m.Called(ctx, tx) + + if len(ret) == 0 { + panic("no return value specified for SendRawTransaction") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, *flow.TransactionBody) error); ok { + r0 = rf(ctx, tx) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// NewTransactionSender creates a new instance of TransactionSender. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewTransactionSender(t interface { + mock.TestingT + Cleanup(func()) +}) *TransactionSender { + mock := &TransactionSender{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/engine/access/rpc/backend/transactions/retrier/noop.go b/engine/access/rpc/backend/transactions/retrier/noop.go new file mode 100644 index 00000000000..5f228e078c7 --- /dev/null +++ b/engine/access/rpc/backend/transactions/retrier/noop.go @@ -0,0 +1,19 @@ +package retrier + +import ( + "github.com/onflow/flow-go/model/flow" +) + +type NoopRetrier struct{} + +var _ Retrier = (*NoopRetrier)(nil) + +func NewNoopRetrier() *NoopRetrier { + return &NoopRetrier{} +} + +func (n *NoopRetrier) Retry(_ uint64) error { + return nil +} + +func (n *NoopRetrier) RegisterTransaction(_ uint64, _ *flow.TransactionBody) {} diff --git a/engine/access/rpc/backend/transactions/retrier/retrier.go b/engine/access/rpc/backend/transactions/retrier/retrier.go new file mode 100644 index 00000000000..8f829827a14 --- /dev/null +++ b/engine/access/rpc/backend/transactions/retrier/retrier.go @@ -0,0 +1,181 @@ +package retrier + +import ( + "context" + "errors" + "fmt" + "sync" + + "github.com/rs/zerolog" + + "github.com/onflow/flow-go/engine/access/rpc/backend/transactions/status" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/state" + "github.com/onflow/flow-go/storage" +) + +// RetryFrequency has to be less than TransactionExpiry or else this module does nothing +const RetryFrequency uint64 = 120 // Blocks + +type Transactions map[flow.Identifier]*flow.TransactionBody +type BlockHeightToTransactions map[uint64]Transactions + +type TransactionSender interface { + SendRawTransaction(ctx context.Context, tx *flow.TransactionBody) error +} + +type Retrier interface { + Retry(height uint64) error + RegisterTransaction(height uint64, tx *flow.TransactionBody) +} + +// RetrierImpl implements a simple retry mechanism for transaction submission. +type RetrierImpl struct { + log zerolog.Logger + + mu sync.RWMutex + pendingTransactions BlockHeightToTransactions + + blocks storage.Blocks + collections storage.Collections + + txSender TransactionSender + txStatusDeriver *status.TxStatusDeriver +} + +func NewRetrier( + log zerolog.Logger, + blocks storage.Blocks, + collections storage.Collections, + txSender TransactionSender, + txStatusDeriver *status.TxStatusDeriver, +) *RetrierImpl { + return &RetrierImpl{ + log: log, + pendingTransactions: BlockHeightToTransactions{}, + blocks: blocks, + collections: collections, + txSender: txSender, + txStatusDeriver: txStatusDeriver, + } +} + +// Retry attempts to resend transactions for a specified block height. +// It performs cleanup operations, including pruning old transactions, and retries sending +// transactions that are still pending. +// The method takes a block height as input. If the provided height is lower than +// flow.DefaultTransactionExpiry, no retries are performed, and the method returns nil. +// No errors expected during normal operations. +func (r *RetrierImpl) Retry(height uint64) error { + // No need to retry if height is lower than DefaultTransactionExpiry + if height < flow.DefaultTransactionExpiry { + return nil + } + + // naive cleanup for now, prune every 120 Blocks + if height%RetryFrequency == 0 { + r.prune(height) + } + + heightToRetry := height - flow.DefaultTransactionExpiry + RetryFrequency + + for heightToRetry < height { + err := r.retryTxsAtHeight(heightToRetry) + if err != nil { + return err + } + heightToRetry = heightToRetry + RetryFrequency + } + return nil +} + +// RegisterTransaction adds a transaction that could possibly be retried +func (r *RetrierImpl) RegisterTransaction(height uint64, tx *flow.TransactionBody) { + r.mu.Lock() + defer r.mu.Unlock() + if r.pendingTransactions[height] == nil { + r.pendingTransactions[height] = make(map[flow.Identifier]*flow.TransactionBody) + } + r.pendingTransactions[height][tx.ID()] = tx +} + +func (r *RetrierImpl) prune(height uint64) { + r.mu.Lock() + defer r.mu.Unlock() + // If height is less than the default, there will be no expired Transactions + if height < flow.DefaultTransactionExpiry { + return + } + for h := range r.pendingTransactions { + if h < height-flow.DefaultTransactionExpiry { + delete(r.pendingTransactions, h) + } + } +} + +// retryTxsAtHeight retries transactions at a specific block height. +// It looks up transactions at the specified height and retries sending +// raw transactions for those that are still pending. It also cleans up +// transactions that are no longer pending or have an unknown status. +// Error returns: +// - errors are unexpected and potentially symptoms of internal implementation bugs or state corruption (fatal). +func (r *RetrierImpl) retryTxsAtHeight(heightToRetry uint64) error { + r.mu.Lock() + defer r.mu.Unlock() + txsAtHeight := r.pendingTransactions[heightToRetry] + for txID, tx := range txsAtHeight { + // find the block for the transaction + block, err := r.lookupBlock(txID) + if err != nil { + if !errors.Is(err, storage.ErrNotFound) { + return err + } + block = nil + } + + // find the transaction status + var status flow.TransactionStatus + if block == nil { + status, err = r.txStatusDeriver.DeriveUnknownTransactionStatus(tx.ReferenceBlockID) + } else { + status, err = r.txStatusDeriver.DeriveTransactionStatus(block.Height, false) + } + + if err != nil { + if !errors.Is(err, state.ErrUnknownSnapshotReference) { + return err + } + continue + } + if status == flow.TransactionStatusPending { + err = r.txSender.SendRawTransaction(context.Background(), tx) + if err != nil { + r.log.Info(). + Str("retry", fmt.Sprintf("retryTxsAtHeight: %v", heightToRetry)). + Err(err). + Msg("failed to send raw transactions") + } + } else if status != flow.TransactionStatusUnknown { + // not pending or unknown, don't need to retry anymore + delete(txsAtHeight, txID) + } + } + return nil +} + +// Error returns: +// - `storage.ErrNotFound` - collection referenced by transaction or block by a collection has not been found. +// - all other errors are unexpected and potentially symptoms of internal implementation bugs or state corruption (fatal). +func (r *RetrierImpl) lookupBlock(txID flow.Identifier) (*flow.Block, error) { + collection, err := r.collections.LightByTransactionID(txID) + if err != nil { + return nil, err + } + + block, err := r.blocks.ByCollectionID(collection.ID()) + if err != nil { + return nil, err + } + + return block, nil +} diff --git a/engine/access/rpc/backend/transactions/status/deriver.go b/engine/access/rpc/backend/transactions/status/deriver.go new file mode 100644 index 00000000000..d32b9d26357 --- /dev/null +++ b/engine/access/rpc/backend/transactions/status/deriver.go @@ -0,0 +1,96 @@ +package status + +import ( + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module/counters" + "github.com/onflow/flow-go/module/irrecoverable" + "github.com/onflow/flow-go/state/protocol" +) + +type TxStatusDeriver struct { + state protocol.State + lastFullBlockHeight *counters.PersistentStrictMonotonicCounter +} + +func NewTxStatusDeriver(state protocol.State, lastFullBlockHeight *counters.PersistentStrictMonotonicCounter) *TxStatusDeriver { + return &TxStatusDeriver{ + state: state, + lastFullBlockHeight: lastFullBlockHeight, + } +} + +// DeriveUnknownTransactionStatus is used to determine the status of transaction +// that are not in a block yet based on the provided reference block ID. +func (t *TxStatusDeriver) DeriveUnknownTransactionStatus(refBlockID flow.Identifier) (flow.TransactionStatus, error) { + referenceBlock, err := t.state.AtBlockID(refBlockID).Head() + if err != nil { + return flow.TransactionStatusUnknown, err + } + refHeight := referenceBlock.Height + // get the latest finalized block from the state + finalized, err := t.state.Final().Head() + if err != nil { + return flow.TransactionStatusUnknown, irrecoverable.NewExceptionf("failed to lookup final header: %w", err) + } + finalizedHeight := finalized.Height + + // if we haven't seen the expiry block for this transaction, it's not expired + if !isExpired(refHeight, finalizedHeight) { + return flow.TransactionStatusPending, nil + } + + // At this point, we have seen the expiry block for the transaction. + // This means that, if no collections prior to the expiry block contain + // the transaction, it can never be included and is expired. + // + // To ensure this, we need to have received all collections up to the + // expiry block to ensure the transaction did not appear in any. + + // the last full height is the height where we have received all + // collections for all blocks with a lower height + fullHeight := t.lastFullBlockHeight.Value() + + // if we have received collections for all blocks up to the expiry block, the transaction is expired + if isExpired(refHeight, fullHeight) { + return flow.TransactionStatusExpired, nil + } + + // tx found in transaction storage and collection storage but not in block storage + // However, this will not happen as of now since the ingestion engine doesn't subscribe + // for collections + return flow.TransactionStatusPending, nil +} + +// DeriveTransactionStatus is used to determine the status of a transaction based on the provided block height, and execution status. +// No errors expected during normal operations. +func (t *TxStatusDeriver) DeriveTransactionStatus(blockHeight uint64, executed bool) (flow.TransactionStatus, error) { + if !executed { + // If we've gotten here, but the block has not yet been executed, report it as only been finalized + return flow.TransactionStatusFinalized, nil + } + + // From this point on, we know for sure this transaction has at least been executed + + // get the latest sealed block from the State + sealed, err := t.state.Sealed().Head() + if err != nil { + return flow.TransactionStatusUnknown, irrecoverable.NewExceptionf("failed to lookup sealed header: %w", err) + } + + if blockHeight > sealed.Height { + // The block is not yet sealed, so we'll report it as only executed + return flow.TransactionStatusExecuted, nil + } + + // otherwise, this block has been executed, and sealed, so report as sealed + return flow.TransactionStatusSealed, nil +} + +// isExpired checks whether a transaction is expired given the height of the +// transaction's reference block and the height to compare against. +func isExpired(refHeight, compareToHeight uint64) bool { + if compareToHeight <= refHeight { + return false + } + return compareToHeight-refHeight > flow.DefaultTransactionExpiry +} diff --git a/engine/access/rpc/backend/transactions/stream/stream_backend.go b/engine/access/rpc/backend/transactions/stream/stream_backend.go new file mode 100644 index 00000000000..65f029a8d4d --- /dev/null +++ b/engine/access/rpc/backend/transactions/stream/stream_backend.go @@ -0,0 +1,304 @@ +package stream + +import ( + "context" + "errors" + "fmt" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + + "github.com/rs/zerolog" + + "github.com/onflow/flow/protobuf/go/flow/entities" + + "github.com/onflow/flow-go/access" + txprovider "github.com/onflow/flow-go/engine/access/rpc/backend/transactions/provider" + txstatus "github.com/onflow/flow-go/engine/access/rpc/backend/transactions/status" + "github.com/onflow/flow-go/engine/access/subscription" + "github.com/onflow/flow-go/engine/access/subscription/tracker" + accessmodel "github.com/onflow/flow-go/model/access" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module/irrecoverable" + "github.com/onflow/flow-go/state/protocol" + "github.com/onflow/flow-go/storage" +) + +// TransactionExpiryForUnknownStatus defines the number of blocks after which +// a transaction with an unknown status is considered expired. +const TransactionExpiryForUnknownStatus = flow.DefaultTransactionExpiry + +// sendTransaction defines a function type for sending a transaction. +type sendTransaction func(ctx context.Context, tx *flow.TransactionBody) error + +// TransactionStream manages transaction subscriptions for monitoring transaction statuses. +// It provides functionalities to send transactions, subscribe to transaction status updates, +// and handle subscription lifecycles. +type TransactionStream struct { + log zerolog.Logger + state protocol.State + subscriptionHandler *subscription.SubscriptionHandler + blockTracker tracker.BlockTracker + sendTransaction sendTransaction + + blocks storage.Blocks + collections storage.Collections + transactions storage.Transactions + + txProvider *txprovider.FailoverTransactionProvider + txStatusDeriver *txstatus.TxStatusDeriver +} + +var _ access.TransactionStreamAPI = (*TransactionStream)(nil) + +func NewTransactionStreamBackend( + log zerolog.Logger, + state protocol.State, + subscriptionHandler *subscription.SubscriptionHandler, + blockTracker tracker.BlockTracker, + sendTransaction sendTransaction, + blocks storage.Blocks, + collections storage.Collections, + transactions storage.Transactions, + txProvider *txprovider.FailoverTransactionProvider, + txStatusDeriver *txstatus.TxStatusDeriver, +) *TransactionStream { + return &TransactionStream{ + log: log, + state: state, + subscriptionHandler: subscriptionHandler, + blockTracker: blockTracker, + sendTransaction: sendTransaction, + blocks: blocks, + collections: collections, + transactions: transactions, + txProvider: txProvider, + txStatusDeriver: txStatusDeriver, + } +} + +// SendAndSubscribeTransactionStatuses sends a transaction and subscribes to its status updates. +// +// The subscription begins monitoring from the reference block specified in the transaction itself and +// streams updates until the transaction reaches a final state ([flow.TransactionStatusSealed] or [flow.TransactionStatusExpired]). +// Upon reaching a final state, the subscription automatically terminates. +// +// Parameters: +// - ctx: The context to manage the transaction sending and subscription lifecycle, including cancellation. +// - tx: The transaction body to be sent and monitored. +// - requiredEventEncodingVersion: The version of event encoding required for the subscription. +// +// If the transaction cannot be sent, the subscription will fail and return a failed subscription. +func (t *TransactionStream) SendAndSubscribeTransactionStatuses( + ctx context.Context, + tx *flow.TransactionBody, + requiredEventEncodingVersion entities.EventEncodingVersion, +) subscription.Subscription { + if err := t.sendTransaction(ctx, tx); err != nil { + t.log.Debug().Err(err).Str("tx_id", tx.ID().String()).Msg("failed to send transaction") + return subscription.NewFailedSubscription(err, "failed to send transaction") + } + + return t.createSubscription(ctx, tx.ID(), tx.ReferenceBlockID, tx.ReferenceBlockID, requiredEventEncodingVersion) +} + +// SubscribeTransactionStatuses subscribes to status updates for a given transaction ID. +// +// The subscription starts monitoring from the last sealed block. Updates are streamed +// until the transaction reaches a final state ([flow.TransactionStatusSealed] or [flow.TransactionStatusExpired]). +// The subscription terminates automatically once the final state is reached. +// +// Parameters: +// - ctx: The context to manage the subscription's lifecycle, including cancellation. +// - txID: The unique identifier of the transaction to monitor. +// - requiredEventEncodingVersion: The version of event encoding required for the subscription. +func (t *TransactionStream) SubscribeTransactionStatuses( + ctx context.Context, + txID flow.Identifier, + requiredEventEncodingVersion entities.EventEncodingVersion, +) subscription.Subscription { + header, err := t.state.Sealed().Head() + if err != nil { + // throw the exception as the node must have the current sealed block in storage + irrecoverable.Throw(ctx, fmt.Errorf("failed to lookup sealed block: %w", err)) + return subscription.NewFailedSubscription(err, "failed to lookup sealed block") + } + + return t.createSubscription(ctx, txID, header.ID(), flow.ZeroID, requiredEventEncodingVersion) +} + +// createSubscription initializes a transaction subscription for monitoring status updates. +// +// The subscription monitors the transaction's progress starting from the specified block ID. +// It streams updates until the transaction reaches a final state or an error occurs. +// +// Parameters: +// - ctx: Context to manage the subscription lifecycle. +// - txID: The unique identifier of the transaction to monitor. +// - startBlockID: The ID of the block to start monitoring from. +// - referenceBlockID: The ID of the transaction's reference block. +// - requiredEventEncodingVersion: The required version of event encoding. +// +// Returns: +// - subscription.Subscription: A subscription for monitoring transaction status updates. +// +// If the start height cannot be determined or current transaction state cannot be determined, a failed subscription is returned. +func (t *TransactionStream) createSubscription( + ctx context.Context, + txID flow.Identifier, + startBlockID flow.Identifier, + referenceBlockID flow.Identifier, + requiredEventEncodingVersion entities.EventEncodingVersion, +) subscription.Subscription { + // Determine the height of the block to start the subscription from. + startHeight, err := t.blockTracker.GetStartHeightFromBlockID(startBlockID) + if err != nil { + t.log.Debug().Err(err).Str("block_id", startBlockID.String()).Msg("failed to get start height") + return subscription.NewFailedSubscription(err, "failed to get start height") + } + + txInfo := NewTransactionMetadata( + t.blocks, + t.collections, + t.transactions, + txID, + referenceBlockID, + requiredEventEncodingVersion, + t.txProvider, + t.txStatusDeriver, + ) + + return t.subscriptionHandler.Subscribe(ctx, startHeight, t.getTransactionStatusResponse(txInfo, startHeight)) +} + +// getTransactionStatusResponse returns a callback function that produces transaction status +// subscription responses based on new blocks. +// The returned callback is not concurrency-safe +func (t *TransactionStream) getTransactionStatusResponse( + txInfo *TransactionMetadata, + startHeight uint64, +) func(context.Context, uint64) (interface{}, error) { + return func(ctx context.Context, height uint64) (interface{}, error) { + err := t.checkBlockReady(height) + if err != nil { + return nil, err + } + + if txInfo.txResult.IsFinal() { + return nil, fmt.Errorf("transaction final status %s already reported: %w", txInfo.txResult.Status.String(), subscription.ErrEndOfData) + } + + // timeout waiting for unknown tx that are never indexed + if hasReachedUnknownStatusLimit(height, startHeight, txInfo.txResult.Status) { + txInfo.txResult.Status = flow.TransactionStatusExpired + return generateResultsStatuses(txInfo.txResult, flow.TransactionStatusUnknown) + } + + // Get old status here, as it could be replaced by status from founded tx result + prevTxStatus := txInfo.txResult.Status + + if err = txInfo.Refresh(ctx); err != nil { + if errors.Is(err, subscription.ErrBlockNotReady) { + return nil, err + } + if statusErr, ok := status.FromError(err); ok { + return nil, status.Errorf(codes.Internal, "failed to refresh transaction information: %v", statusErr) + } + return nil, fmt.Errorf("unexpected error refreshing transaction information: %w", err) + } + + return generateResultsStatuses(txInfo.txResult, prevTxStatus) + } +} + +// hasReachedUnknownStatusLimit checks if a transaction's status is still unknown +// after the expiry limit has been reached. +func hasReachedUnknownStatusLimit(height, startHeight uint64, status flow.TransactionStatus) bool { + if status != flow.TransactionStatusUnknown { + return false + } + + return height-startHeight >= TransactionExpiryForUnknownStatus +} + +// checkBlockReady checks if the given block height is valid and available based on the expected block status. +// Expected errors during normal operation: +// - [subscription.ErrBlockNotReady]: block for the given block height is not available. +func (t *TransactionStream) checkBlockReady(height uint64) error { + // Get the highest available finalized block height + highestHeight, err := t.blockTracker.GetHighestHeight(flow.BlockStatusFinalized) + if err != nil { + return fmt.Errorf("could not get highest height for block %d: %w", height, err) + } + + // Fail early if no block finalized notification has been received for the given height. + // Note: It's possible that the block is locally finalized before the notification is + // received. This ensures a consistent view is available to all streams. + if height > highestHeight { + return fmt.Errorf("block %d is not available yet: %w", height, subscription.ErrBlockNotReady) + } + + return nil +} + +// generateResultsStatuses checks if the current result differs from the previous result by more than one step. +// If yes, it generates results for the missing transaction statuses. This is done because the subscription should send +// responses for each of the statuses in the transaction lifecycle, and the message should be sent in the order of transaction statuses. +// Possible orders of transaction statuses: +// 1. pending(1) -> finalized(2) -> executed(3) -> sealed(4) +// 2. pending(1) -> expired(5) +// No errors expected during normal operations. +func generateResultsStatuses( + txResult *accessmodel.TransactionResult, + prevTxStatus flow.TransactionStatus, +) ([]*accessmodel.TransactionResult, error) { + // If the old and new transaction statuses are still the same, the status change should not be reported, so + // return here with no response. + if prevTxStatus == txResult.Status { + return nil, nil + } + + // return immediately if the new status is expired, since it's the last status + // If the previous status is anything other than pending or unknown, return an error since this transition is unexpected. + if txResult.Status == flow.TransactionStatusExpired { + if prevTxStatus == flow.TransactionStatusPending || prevTxStatus == flow.TransactionStatusUnknown { + return []*accessmodel.TransactionResult{ + txResult, + }, nil + } else { + return nil, fmt.Errorf("unexpected transition from %s to %s transaction status", prevTxStatus.String(), txResult.Status.String()) + } + } + + var results []*accessmodel.TransactionResult + + // If the difference between statuses' values is more than one step, fill in the missing results. + if (txResult.Status - prevTxStatus) > 1 { + for missingStatus := prevTxStatus + 1; missingStatus < txResult.Status; missingStatus++ { + switch missingStatus { + case flow.TransactionStatusPending: + results = append(results, &accessmodel.TransactionResult{ + Status: missingStatus, + TransactionID: txResult.TransactionID, + }) + case flow.TransactionStatusFinalized: + results = append(results, &accessmodel.TransactionResult{ + Status: missingStatus, + TransactionID: txResult.TransactionID, + BlockID: txResult.BlockID, + BlockHeight: txResult.BlockHeight, + CollectionID: txResult.CollectionID, + }) + case flow.TransactionStatusExecuted: + missingTxResult := *txResult + missingTxResult.Status = missingStatus + results = append(results, &missingTxResult) + default: + return nil, fmt.Errorf("unexpected missing transaction status") + } + } + } + + results = append(results, txResult) + return results, nil +} diff --git a/engine/access/rpc/backend/transactions/stream/stream_backend_test.go b/engine/access/rpc/backend/transactions/stream/stream_backend_test.go new file mode 100644 index 00000000000..630a2671480 --- /dev/null +++ b/engine/access/rpc/backend/transactions/stream/stream_backend_test.go @@ -0,0 +1,796 @@ +package stream + +import ( + "context" + "fmt" + "os" + "testing" + "time" + + lru "github.com/hashicorp/golang-lru/v2" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + + "github.com/rs/zerolog" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" + + accessproto "github.com/onflow/flow/protobuf/go/flow/access" + "github.com/onflow/flow/protobuf/go/flow/entities" + + "github.com/onflow/flow-go/access/validator" + validatormock "github.com/onflow/flow-go/access/validator/mock" + "github.com/onflow/flow-go/engine" + "github.com/onflow/flow-go/engine/access/index" + access "github.com/onflow/flow-go/engine/access/mock" + "github.com/onflow/flow-go/engine/access/rpc/backend/node_communicator" + "github.com/onflow/flow-go/engine/access/rpc/backend/transactions" + "github.com/onflow/flow-go/engine/access/rpc/backend/transactions/error_messages" + "github.com/onflow/flow-go/engine/access/rpc/backend/transactions/provider" + txstatus "github.com/onflow/flow-go/engine/access/rpc/backend/transactions/status" + connectionmock "github.com/onflow/flow-go/engine/access/rpc/connection/mock" + "github.com/onflow/flow-go/engine/access/subscription" + trackermock "github.com/onflow/flow-go/engine/access/subscription/tracker/mock" + commonrpc "github.com/onflow/flow-go/engine/common/rpc" + "github.com/onflow/flow-go/engine/common/rpc/convert" + "github.com/onflow/flow-go/fvm/blueprints" + accessmodel "github.com/onflow/flow-go/model/access" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module" + "github.com/onflow/flow-go/module/counters" + execmock "github.com/onflow/flow-go/module/execution/mock" + "github.com/onflow/flow-go/module/irrecoverable" + "github.com/onflow/flow-go/module/metrics" + syncmock "github.com/onflow/flow-go/module/state_synchronization/mock" + protocolint "github.com/onflow/flow-go/state/protocol" + protocol "github.com/onflow/flow-go/state/protocol/mock" + "github.com/onflow/flow-go/storage" + storagemock "github.com/onflow/flow-go/storage/mock" + "github.com/onflow/flow-go/storage/operation/pebbleimpl" + "github.com/onflow/flow-go/storage/store" + "github.com/onflow/flow-go/utils/concurrentmap" + "github.com/onflow/flow-go/utils/unittest" + "github.com/onflow/flow-go/utils/unittest/mocks" +) + +// TransactionStreamSuite represents a suite for testing transaction status-related functionality in the Flow blockchain. +type TransactionStreamSuite struct { + suite.Suite + + state *protocol.State + sealedSnapshot *protocol.Snapshot + finalSnapshot *protocol.Snapshot + tempSnapshot *protocol.Snapshot + log zerolog.Logger + + blocks *storagemock.Blocks + headers *storagemock.Headers + collections *storagemock.Collections + transactions *storagemock.Transactions + receipts *storagemock.ExecutionReceipts + results *storagemock.ExecutionResults + transactionResults *storagemock.LightTransactionResults + events *storagemock.Events + seals *storagemock.Seals + + colClient *access.AccessAPIClient + execClient *access.ExecutionAPIClient + historicalAccessClient *access.AccessAPIClient + archiveClient *access.AccessAPIClient + + connectionFactory *connectionmock.ConnectionFactory + + blockTracker *trackermock.BlockTracker + reporter *syncmock.IndexReporter + indexReporter *index.Reporter + eventIndex *index.EventsIndex + txResultIndex *index.TransactionResultsIndex + + chainID flow.ChainID + + broadcaster *engine.Broadcaster + rootBlock *flow.Block + sealedBlock *flow.Block + finalizedBlock *flow.Block + + blockMap *concurrentmap.Map[uint64, *flow.Block] + + txStreamBackend *TransactionStream + + db storage.DB + dbDir string + lastFullBlockHeight *counters.PersistentStrictMonotonicCounter + + systemTx *flow.TransactionBody + + fixedExecutionNodeIDs flow.IdentifierList + preferredExecutionNodeIDs flow.IdentifierList +} + +func TestTransactionStatusSuite(t *testing.T) { + suite.Run(t, new(TransactionStreamSuite)) +} + +// SetupTest initializes the test dependencies, configurations, and mock objects for TransactionStreamSuite tests. +func (s *TransactionStreamSuite) SetupTest() { + s.log = unittest.Logger() + s.state = protocol.NewState(s.T()) + s.sealedSnapshot = protocol.NewSnapshot(s.T()) + s.finalSnapshot = protocol.NewSnapshot(s.T()) + s.tempSnapshot = &protocol.Snapshot{} + pdb, dbDir := unittest.TempPebbleDB(s.T()) + s.db = pebbleimpl.ToDB(pdb) + s.dbDir = dbDir + + s.blocks = storagemock.NewBlocks(s.T()) + s.headers = storagemock.NewHeaders(s.T()) + s.transactions = storagemock.NewTransactions(s.T()) + s.collections = storagemock.NewCollections(s.T()) + s.receipts = storagemock.NewExecutionReceipts(s.T()) + s.results = storagemock.NewExecutionResults(s.T()) + s.seals = storagemock.NewSeals(s.T()) + s.colClient = access.NewAccessAPIClient(s.T()) + s.archiveClient = access.NewAccessAPIClient(s.T()) + s.execClient = access.NewExecutionAPIClient(s.T()) + s.transactionResults = storagemock.NewLightTransactionResults(s.T()) + s.events = storagemock.NewEvents(s.T()) + s.chainID = flow.Testnet + s.historicalAccessClient = access.NewAccessAPIClient(s.T()) + s.connectionFactory = connectionmock.NewConnectionFactory(s.T()) + s.broadcaster = engine.NewBroadcaster() + s.blockTracker = trackermock.NewBlockTracker(s.T()) + s.reporter = syncmock.NewIndexReporter(s.T()) + s.indexReporter = index.NewReporter() + err := s.indexReporter.Initialize(s.reporter) + require.NoError(s.T(), err) + s.eventIndex = index.NewEventsIndex(s.indexReporter, s.events) + s.txResultIndex = index.NewTransactionResultsIndex(s.indexReporter, s.transactionResults) + + s.systemTx, err = blueprints.SystemChunkTransaction(s.chainID.Chain()) + s.Require().NoError(err) + + s.fixedExecutionNodeIDs = nil + s.preferredExecutionNodeIDs = nil + + s.initializeBackend() +} + +// TearDownTest cleans up the db +func (s *TransactionStreamSuite) TearDownTest() { + err := os.RemoveAll(s.dbDir) + s.Require().NoError(err) +} + +// initializeBackend sets up and initializes the txStreamBackend with required dependencies, mocks, and configurations for testing. +func (s *TransactionStreamSuite) initializeBackend() { + s.transactions. + On("Store", mock.Anything). + Return(nil). + Maybe() + + s.execClient. + On("GetTransactionResult", mock.Anything, mock.Anything). + Return(nil, status.Error(codes.NotFound, "not found")). + Maybe() + + s.connectionFactory. + On("GetExecutionAPIClient", mock.Anything). + Return(s.execClient, &mocks.MockCloser{}, nil). + Maybe() + + s.colClient. + On("SendTransaction", mock.Anything, mock.Anything). + Return(&accessproto.SendTransactionResponse{}, nil). + Maybe() + + // generate blockCount consecutive blocks with associated seal, result and execution data + s.rootBlock = unittest.BlockFixture() + + params := protocol.NewParams(s.T()) + params.On("FinalizedRoot").Return(s.rootBlock.ToHeader()).Maybe() + s.state.On("Params").Return(params).Maybe() + + // this line causes a S1021 lint error because receipts is explicitly declared. this is required + // to ensure the mock library handles the response type correctly + var receipts flow.ExecutionReceiptList //nolint:gosimple + executionNodes := unittest.IdentityListFixture(2, unittest.WithRole(flow.RoleExecution)) + receipts = unittest.ReceiptsForBlockFixture(s.rootBlock, executionNodes.NodeIDs()) + s.receipts.On("ByBlockID", mock.AnythingOfType("flow.Identifier")).Return(receipts, nil).Maybe() + s.finalSnapshot.On("Identities", mock.Anything).Return(executionNodes, nil).Maybe() + + progress, err := store.NewConsumerProgress(s.db, module.ConsumeProgressLastFullBlockHeight).Initialize(s.rootBlock.Height) + require.NoError(s.T(), err) + s.lastFullBlockHeight, err = counters.NewPersistentStrictMonotonicCounter(progress) + require.NoError(s.T(), err) + + s.sealedBlock = s.rootBlock + s.finalizedBlock = unittest.BlockWithParentFixture(s.sealedBlock.ToHeader()) + s.blockMap = concurrentmap.New[uint64, *flow.Block]() + s.blockMap.Add(s.sealedBlock.Height, s.sealedBlock) + s.blockMap.Add(s.finalizedBlock.Height, s.finalizedBlock) + + txStatusDeriver := txstatus.NewTxStatusDeriver( + s.state, + s.lastFullBlockHeight, + ) + + nodeCommunicator := node_communicator.NewNodeCommunicator(false) + + execNodeProvider := commonrpc.NewExecutionNodeIdentitiesProvider( + s.log, + s.state, + s.receipts, + s.preferredExecutionNodeIDs, + s.fixedExecutionNodeIDs, + ) + + errorMessageProvider := error_messages.NewTxErrorMessageProvider( + s.log, + nil, + s.txResultIndex, + s.connectionFactory, + nodeCommunicator, + execNodeProvider, + ) + + localTxProvider := provider.NewLocalTransactionProvider( + s.state, + s.collections, + s.blocks, + s.eventIndex, + s.txResultIndex, + errorMessageProvider, + s.systemTx.ID(), + txStatusDeriver, + s.chainID, + true, // scheduledCallbacksEnabled + ) + + execNodeTxProvider := provider.NewENTransactionProvider( + s.log, + s.state, + s.collections, + s.connectionFactory, + nodeCommunicator, + execNodeProvider, + txStatusDeriver, + s.systemTx.ID(), + s.chainID, + true, // scheduledCallbacksEnabled + ) + + txProvider := provider.NewFailoverTransactionProvider(localTxProvider, execNodeTxProvider) + + subscriptionHandler := subscription.NewSubscriptionHandler( + s.log, + s.broadcaster, + subscription.DefaultSendTimeout, + subscription.DefaultResponseLimit, + subscription.DefaultSendBufferSize, + ) + + validatorBlocks := validatormock.NewBlocks(s.T()) + validatorBlocks. + On("HeaderByID", mock.Anything). + Return(s.finalizedBlock.ToHeader(), nil). + Maybe() // used for some tests + + validatorBlocks. + On("FinalizedHeader", mock.Anything). + Return(s.finalizedBlock.ToHeader(), nil). + Maybe() // used for some tests + + txValidator, err := validator.NewTransactionValidator( + validatorBlocks, + s.chainID.Chain(), + metrics.NewNoopCollector(), + validator.TransactionValidationOptions{ + MaxTransactionByteSize: flow.DefaultMaxTransactionByteSize, + MaxCollectionByteSize: flow.DefaultMaxCollectionByteSize, + }, + execmock.NewScriptExecutor(s.T()), + ) + s.Require().NoError(err) + + txResCache, err := lru.New[flow.Identifier, *accessmodel.TransactionResult](10) + s.Require().NoError(err) + + client := access.NewAccessAPIClient(s.T()) + client. + On("SendTransaction", mock.Anything, mock.Anything). + Return(&accessproto.SendTransactionResponse{}, nil). + Maybe() // used for some tests + + txParams := transactions.Params{ + Log: s.log, + Metrics: metrics.NewNoopCollector(), + State: s.state, + ChainID: s.chainID, + SystemTxID: s.systemTx.ID(), + StaticCollectionRPCClient: client, + HistoricalAccessNodeClients: nil, + NodeCommunicator: nodeCommunicator, + ConnFactory: s.connectionFactory, + EnableRetries: false, + NodeProvider: execNodeProvider, + Blocks: s.blocks, + Collections: s.collections, + Transactions: s.transactions, + TxErrorMessageProvider: errorMessageProvider, + TxResultCache: txResCache, + TxProvider: txProvider, + TxValidator: txValidator, + TxStatusDeriver: txStatusDeriver, + EventsIndex: s.eventIndex, + TxResultsIndex: s.txResultIndex, + } + txBackend, err := transactions.NewTransactionsBackend(txParams) + s.Require().NoError(err) + + s.txStreamBackend = NewTransactionStreamBackend( + s.log, + s.state, + subscriptionHandler, + s.blockTracker, + txBackend.SendTransaction, + s.blocks, + s.collections, + s.transactions, + txProvider, + txStatusDeriver, + ) +} + +func blockByID(blockMap *concurrentmap.Map[uint64, *flow.Block]) func(flow.Identifier) (*flow.Block, error) { + return func(blockID flow.Identifier) (*flow.Block, error) { + var block *flow.Block + _ = blockMap.ForEach(func(height uint64, b *flow.Block) error { + if b.ID() == blockID { + block = b + } + return nil + }) + if block == nil { + return nil, storage.ErrNotFound + } + return block, nil + } +} + +func blockByHeight(blockMap *concurrentmap.Map[uint64, *flow.Block]) func(uint64) (*flow.Block, error) { + return func(height uint64) (*flow.Block, error) { + if block, ok := blockMap.Get(height); ok { + return block, nil + } + return nil, storage.ErrNotFound + } +} + +// initializeMainMockInstructions sets up the main mock behaviors for components used in TransactionStreamSuite tests. +func (s *TransactionStreamSuite) initializeMainMockInstructions() { + s.transactions.On("Store", mock.Anything).Return(nil).Maybe() + + s.blocks.On("ByHeight", mock.AnythingOfType("uint64")).Return(blockByHeight(s.blockMap)).Maybe() + s.blocks.On("ByID", mock.Anything).Return(blockByID(s.blockMap)).Maybe() + + s.state.On("Final").Return(s.finalSnapshot, nil).Maybe() + s.state.On("AtBlockID", mock.AnythingOfType("flow.Identifier")).Return( + func(blockID flow.Identifier) protocolint.Snapshot { + s.tempSnapshot.On("Head").Unset() + s.tempSnapshot.On("Head").Return( + func() (*flow.Header, error) { + block, err := blockByID(s.blockMap)(blockID) + if err != nil { + return nil, err + } + return block.ToHeader(), nil + }, nil) + + return s.tempSnapshot + }, nil).Maybe() + + s.finalSnapshot.On("Head").Return(func() *flow.Header { + return s.finalizedBlock.ToHeader() + }, nil).Maybe() + + s.blockTracker.On("GetStartHeightFromBlockID", mock.Anything).Return(func(_ flow.Identifier) (uint64, error) { + return s.finalizedBlock.Height, nil + }, nil).Maybe() + + s.blockTracker.On("GetHighestHeight", flow.BlockStatusFinalized).Return(func(_ flow.BlockStatus) (uint64, error) { + return s.finalizedBlock.Height, nil + }, nil).Maybe() +} + +// initializeHappyCaseMockInstructions sets up mock behaviors for a happy-case scenario in transaction status testing. +func (s *TransactionStreamSuite) initializeHappyCaseMockInstructions() { + s.initializeMainMockInstructions() + + s.reporter.On("LowestIndexedHeight").Return(s.rootBlock.Height, nil).Maybe() + s.reporter.On("HighestIndexedHeight").Return(func() (uint64, error) { + return s.finalizedBlock.Height, nil + }, nil).Maybe() + + s.sealedSnapshot.On("Head").Return(func() *flow.Header { + return s.sealedBlock.ToHeader() + }, nil).Maybe() + s.state.On("Sealed").Return(s.sealedSnapshot, nil).Maybe() + + eventsCount := 1 + eventsForTx := unittest.EventsFixture(eventsCount) + eventMessages := make([]*entities.Event, eventsCount) + for j, event := range eventsForTx { + eventMessages[j] = convert.EventToMessage(event) + } + + s.events.On( + "ByBlockIDTransactionID", + mock.AnythingOfType("flow.Identifier"), + mock.AnythingOfType("flow.Identifier"), + ).Return(eventsForTx, nil).Maybe() +} + +// createSendTransaction generate sent transaction with ref block of the current finalized block +func (s *TransactionStreamSuite) createSendTransaction() flow.TransactionBody { + transaction := unittest.TransactionBodyFixture(unittest.WithReferenceBlock(s.finalizedBlock.ID())) + s.transactions.On("ByID", mock.AnythingOfType("flow.Identifier")).Return(&transaction, nil).Maybe() + return transaction +} + +// addNewFinalizedBlock sets up a new finalized block using the provided parent header and options, and optionally notifies via broadcasting. +func (s *TransactionStreamSuite) addNewFinalizedBlock(parent *flow.Header, notify bool, options ...func(*flow.Block)) { + s.finalizedBlock = unittest.BlockWithParentFixture(parent) + for _, option := range options { + option(s.finalizedBlock) + } + + s.blockMap.Add(s.finalizedBlock.Height, s.finalizedBlock) + + if notify { + s.broadcaster.Publish() + } +} + +func (s *TransactionStreamSuite) mockTransactionResult(transactionID *flow.Identifier, hasTransactionResultInStorage *bool) { + s.transactionResults. + On("ByBlockIDTransactionID", mock.Anything, mock.Anything). + Return( + func(blockID, txID flow.Identifier) (*flow.LightTransactionResult, error) { + if *hasTransactionResultInStorage { + return &flow.LightTransactionResult{ + TransactionID: *transactionID, + Failed: false, + ComputationUsed: 0, + }, nil + } + return nil, storage.ErrNotFound + }, + ) +} + +func (s *TransactionStreamSuite) addBlockWithTransaction(transaction *flow.TransactionBody) { + col := unittest.CollectionFromTransactions(transaction) + colID := col.ID() + guarantee := flow.CollectionGuarantee{CollectionID: colID} + light := col.Light() + s.sealedBlock = s.finalizedBlock + s.addNewFinalizedBlock(s.sealedBlock.ToHeader(), true, func(block *flow.Block) { + var err error + block, err = flow.NewBlock( + flow.UntrustedBlock{ + HeaderBody: block.HeaderBody, + Payload: unittest.PayloadFixture(unittest.WithGuarantees(&guarantee)), + }, + ) + require.NoError(s.T(), err) + s.collections.On("LightByID", colID).Return(light, nil).Maybe() + s.collections.On("LightByTransactionID", transaction.ID()).Return(light, nil) + s.blocks.On("ByCollectionID", colID).Return(block, nil) + }) +} + +// Create a special common function to read subscription messages from the channel and check converting it to transaction info +// and check results for correctness +func (s *TransactionStreamSuite) checkNewSubscriptionMessage(sub subscription.Subscription, txId flow.Identifier, expectedTxStatuses []flow.TransactionStatus) { + unittest.RequireReturnsBefore(s.T(), func() { + v, ok := <-sub.Channel() + require.True(s.T(), ok, + "channel closed while waiting for transaction info:\n\t- txID %x\n\t- blockID: %x \n\t- err: %v", + txId, s.finalizedBlock.ID(), sub.Err()) + + txResults, ok := v.([]*accessmodel.TransactionResult) + require.True(s.T(), ok, "unexpected response type: %T", v) + require.Len(s.T(), txResults, len(expectedTxStatuses)) + + for i, expectedTxStatus := range expectedTxStatuses { + result := txResults[i] + assert.Equal(s.T(), txId, result.TransactionID) + assert.Equal(s.T(), expectedTxStatus, result.Status) + } + + }, 180*time.Second, fmt.Sprintf("timed out waiting for transaction info:\n\t- txID: %x\n\t- blockID: %x", txId, s.finalizedBlock.ID())) +} + +// checkGracefulShutdown ensures the provided subscription shuts down gracefully within a specified timeout duration. +func (s *TransactionStreamSuite) checkGracefulShutdown(sub subscription.Subscription) { + // Ensure subscription shuts down gracefully + unittest.RequireReturnsBefore(s.T(), func() { + <-sub.Channel() + assert.NoError(s.T(), sub.Err()) + }, 100*time.Millisecond, "timed out waiting for subscription to shutdown") +} + +// TestSendAndSubscribeTransactionStatusHappyCase tests the functionality of the SubscribeTransactionStatusesFromStartBlockID method in the Backend. +// It covers the emulation of transaction stages from pending to sealed, and receiving status updates. +func (s *TransactionStreamSuite) TestSendAndSubscribeTransactionStatusHappyCase() { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + s.initializeHappyCaseMockInstructions() + + // Generate sent transaction with ref block of the current finalized block + transaction := s.createSendTransaction() + txId := transaction.ID() + + s.collections.On("LightByTransactionID", txId).Return(nil, storage.ErrNotFound).Once() + + hasTransactionResultInStorage := false + s.mockTransactionResult(&txId, &hasTransactionResultInStorage) + + // 1. Subscribe to transaction status and receive the first message with pending status + sub := s.txStreamBackend.SendAndSubscribeTransactionStatuses(ctx, &transaction, entities.EventEncodingVersion_CCF_V0) + s.checkNewSubscriptionMessage(sub, txId, []flow.TransactionStatus{flow.TransactionStatusPending}) + + // 2. Make transaction reference block sealed, and add a new finalized block that includes the transaction + s.addBlockWithTransaction(&transaction) + s.checkNewSubscriptionMessage(sub, txId, []flow.TransactionStatus{flow.TransactionStatusFinalized}) + + // 3. Add one more finalized block on top of the transaction block and add execution results to storage + // init transaction result for storage + hasTransactionResultInStorage = true + s.addNewFinalizedBlock(s.finalizedBlock.ToHeader(), true) + s.checkNewSubscriptionMessage(sub, txId, []flow.TransactionStatus{flow.TransactionStatusExecuted}) + + // 4. Make the transaction block sealed, and add a new finalized block + s.sealedBlock = s.finalizedBlock + s.addNewFinalizedBlock(s.sealedBlock.ToHeader(), true) + s.checkNewSubscriptionMessage(sub, txId, []flow.TransactionStatus{flow.TransactionStatusSealed}) + + // 5. Stop subscription + s.sealedBlock = s.finalizedBlock + s.addNewFinalizedBlock(s.sealedBlock.ToHeader(), true) + + s.checkGracefulShutdown(sub) +} + +// TestSendAndSubscribeTransactionStatusExpired tests the functionality of the SubscribeTransactionStatusesFromStartBlockID method in the Backend +// when transaction become expired +func (s *TransactionStreamSuite) TestSendAndSubscribeTransactionStatusExpired() { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + s.initializeMainMockInstructions() + + s.reporter.On("LowestIndexedHeight").Return(s.rootBlock.Height, nil).Maybe() + s.reporter.On("HighestIndexedHeight").Return(func() (uint64, error) { + return s.finalizedBlock.Height, nil + }, nil).Maybe() + s.transactionResults.On( + "ByBlockIDTransactionID", + mock.AnythingOfType("flow.Identifier"), + mock.AnythingOfType("flow.Identifier"), + ).Return(nil, storage.ErrNotFound).Maybe() + + // Generate sent transaction with ref block of the current finalized block + transaction := s.createSendTransaction() + txId := transaction.ID() + s.collections.On("LightByTransactionID", txId).Return(nil, storage.ErrNotFound) + + // Subscribe to transaction status and receive the first message with pending status + sub := s.txStreamBackend.SendAndSubscribeTransactionStatuses(ctx, &transaction, entities.EventEncodingVersion_CCF_V0) + s.checkNewSubscriptionMessage(sub, txId, []flow.TransactionStatus{flow.TransactionStatusPending}) + + // Generate 600 blocks without transaction included and check, that transaction still pending + startHeight := s.finalizedBlock.Height + 1 + lastHeight := startHeight + flow.DefaultTransactionExpiry + + for i := startHeight; i <= lastHeight; i++ { + s.sealedBlock = s.finalizedBlock + s.addNewFinalizedBlock(s.sealedBlock.ToHeader(), false) + } + + // Generate final blocks and check transaction expired + s.sealedBlock = s.finalizedBlock + err := s.lastFullBlockHeight.Set(s.sealedBlock.Height) + s.Require().NoError(err) + s.addNewFinalizedBlock(s.sealedBlock.ToHeader(), true) + + s.checkNewSubscriptionMessage(sub, txId, []flow.TransactionStatus{flow.TransactionStatusExpired}) + + s.checkGracefulShutdown(sub) +} + +// TestSubscribeTransactionStatusWithCurrentPending verifies the subscription behavior for a transaction starting as pending. +func (s *TransactionStreamSuite) TestSubscribeTransactionStatusWithCurrentPending() { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + s.initializeHappyCaseMockInstructions() + + transaction := s.createSendTransaction() + txId := transaction.ID() + s.collections.On("LightByTransactionID", txId).Return(nil, storage.ErrNotFound).Once() + + hasTransactionResultInStorage := false + s.mockTransactionResult(&txId, &hasTransactionResultInStorage) + + sub := s.txStreamBackend.SubscribeTransactionStatuses(ctx, txId, entities.EventEncodingVersion_CCF_V0) + s.checkNewSubscriptionMessage(sub, txId, []flow.TransactionStatus{flow.TransactionStatusPending}) + + s.addBlockWithTransaction(&transaction) + s.checkNewSubscriptionMessage(sub, txId, []flow.TransactionStatus{flow.TransactionStatusFinalized}) + + hasTransactionResultInStorage = true + s.addNewFinalizedBlock(s.finalizedBlock.ToHeader(), true) + s.checkNewSubscriptionMessage(sub, txId, []flow.TransactionStatus{flow.TransactionStatusExecuted}) + + s.sealedBlock = s.finalizedBlock + s.addNewFinalizedBlock(s.sealedBlock.ToHeader(), true) + s.checkNewSubscriptionMessage(sub, txId, []flow.TransactionStatus{flow.TransactionStatusSealed}) + + s.sealedBlock = s.finalizedBlock + s.addNewFinalizedBlock(s.sealedBlock.ToHeader(), true) + + s.checkGracefulShutdown(sub) +} + +// TestSubscribeTransactionStatusWithCurrentFinalized verifies the subscription behavior for a transaction starting as finalized. +func (s *TransactionStreamSuite) TestSubscribeTransactionStatusWithCurrentFinalized() { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + s.initializeHappyCaseMockInstructions() + + transaction := s.createSendTransaction() + txId := transaction.ID() + + hasTransactionResultInStorage := false + s.mockTransactionResult(&txId, &hasTransactionResultInStorage) + + s.addBlockWithTransaction(&transaction) + + sub := s.txStreamBackend.SubscribeTransactionStatuses(ctx, txId, entities.EventEncodingVersion_CCF_V0) + s.checkNewSubscriptionMessage(sub, txId, []flow.TransactionStatus{flow.TransactionStatusPending, flow.TransactionStatusFinalized}) + + hasTransactionResultInStorage = true + s.addNewFinalizedBlock(s.finalizedBlock.ToHeader(), true) + s.checkNewSubscriptionMessage(sub, txId, []flow.TransactionStatus{flow.TransactionStatusExecuted}) + + s.sealedBlock = s.finalizedBlock + s.addNewFinalizedBlock(s.sealedBlock.ToHeader(), true) + s.checkNewSubscriptionMessage(sub, txId, []flow.TransactionStatus{flow.TransactionStatusSealed}) + + s.sealedBlock = s.finalizedBlock + s.addNewFinalizedBlock(s.sealedBlock.ToHeader(), true) + + s.checkGracefulShutdown(sub) +} + +// TestSubscribeTransactionStatusWithCurrentExecuted verifies the subscription behavior for a transaction starting as executed. +func (s *TransactionStreamSuite) TestSubscribeTransactionStatusWithCurrentExecuted() { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + s.initializeHappyCaseMockInstructions() + + transaction := s.createSendTransaction() + txId := transaction.ID() + + hasTransactionResultInStorage := false + s.mockTransactionResult(&txId, &hasTransactionResultInStorage) + + s.addBlockWithTransaction(&transaction) + + // 3. Add one more finalized block on top of the transaction block and add execution results to storage + // init transaction result for storage + hasTransactionResultInStorage = true + s.addNewFinalizedBlock(s.finalizedBlock.ToHeader(), true) + sub := s.txStreamBackend.SubscribeTransactionStatuses(ctx, txId, entities.EventEncodingVersion_CCF_V0) + s.checkNewSubscriptionMessage( + sub, + txId, + []flow.TransactionStatus{ + flow.TransactionStatusPending, + flow.TransactionStatusFinalized, + flow.TransactionStatusExecuted, + }) + + // 4. Make the transaction block sealed, and add a new finalized block + s.sealedBlock = s.finalizedBlock + s.addNewFinalizedBlock(s.sealedBlock.ToHeader(), true) + s.checkNewSubscriptionMessage(sub, txId, []flow.TransactionStatus{flow.TransactionStatusSealed}) + + //// 5. Stop subscription + s.sealedBlock = s.finalizedBlock + s.addNewFinalizedBlock(s.sealedBlock.ToHeader(), true) + + s.checkGracefulShutdown(sub) +} + +// TestSubscribeTransactionStatusWithCurrentSealed verifies the subscription behavior for a transaction starting as sealed. +func (s *TransactionStreamSuite) TestSubscribeTransactionStatusWithCurrentSealed() { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + s.initializeHappyCaseMockInstructions() + + transaction := s.createSendTransaction() + txId := transaction.ID() + + hasTransactionResultInStorage := false + s.mockTransactionResult(&txId, &hasTransactionResultInStorage) + + s.addBlockWithTransaction(&transaction) + + // init transaction result for storage + hasTransactionResultInStorage = true + s.addNewFinalizedBlock(s.finalizedBlock.ToHeader(), true) + + s.sealedBlock = s.finalizedBlock + s.addNewFinalizedBlock(s.sealedBlock.ToHeader(), true) + + sub := s.txStreamBackend.SubscribeTransactionStatuses(ctx, txId, entities.EventEncodingVersion_CCF_V0) + + s.checkNewSubscriptionMessage( + sub, + txId, + []flow.TransactionStatus{ + flow.TransactionStatusPending, + flow.TransactionStatusFinalized, + flow.TransactionStatusExecuted, + flow.TransactionStatusSealed, + }, + ) + + // 5. Stop subscription + s.sealedBlock = s.finalizedBlock + s.addNewFinalizedBlock(s.sealedBlock.ToHeader(), true) + + s.checkGracefulShutdown(sub) +} + +// TestSubscribeTransactionStatusFailedSubscription verifies the behavior of subscription when transaction status fails. +// Ensures failure scenarios are handled correctly, such as missing sealed header, start height, or transaction by ID. +func (s *TransactionStreamSuite) TestSubscribeTransactionStatusFailedSubscription() { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + txId := unittest.IdentifierFixture() // ID of transaction with ref block of the current finalized block + + s.Run("throws irrecoverable if sealed header not available", func() { + expectedError := storage.ErrNotFound + s.state.On("Sealed").Return(s.sealedSnapshot, nil).Once() + s.sealedSnapshot.On("Head").Return(nil, expectedError).Once() + + signalerCtx := irrecoverable.WithSignalerContext(ctx, + irrecoverable.NewMockSignalerContextExpectError(s.T(), ctx, fmt.Errorf("failed to lookup sealed block: %w", expectedError))) + + sub := s.txStreamBackend.SubscribeTransactionStatuses(signalerCtx, txId, entities.EventEncodingVersion_CCF_V0) + s.Assert().ErrorContains(sub.Err(), fmt.Errorf("failed to lookup sealed block: %w", expectedError).Error()) + }) + + s.Run("if could not get start height", func() { + s.sealedSnapshot.On("Head").Return(func() *flow.Header { + return s.sealedBlock.ToHeader() + }, nil).Once() + s.state.On("Sealed").Return(s.sealedSnapshot, nil).Once() + expectedError := storage.ErrNotFound + s.blockTracker.On("GetStartHeightFromBlockID", s.sealedBlock.ID()).Return(uint64(0), expectedError).Once() + + sub := s.txStreamBackend.SubscribeTransactionStatuses(ctx, txId, entities.EventEncodingVersion_CCF_V0) + s.Assert().ErrorContains(sub.Err(), expectedError.Error()) + s.Require().ErrorIs(sub.Err(), expectedError) + }) +} diff --git a/engine/access/rpc/backend/transactions/stream/transaction_metadata.go b/engine/access/rpc/backend/transactions/stream/transaction_metadata.go new file mode 100644 index 00000000000..33f7df25686 --- /dev/null +++ b/engine/access/rpc/backend/transactions/stream/transaction_metadata.go @@ -0,0 +1,257 @@ +package stream + +import ( + "context" + "errors" + "fmt" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + + txprovider "github.com/onflow/flow-go/engine/access/rpc/backend/transactions/provider" + txstatus "github.com/onflow/flow-go/engine/access/rpc/backend/transactions/status" + "github.com/onflow/flow-go/engine/access/subscription" + "github.com/onflow/flow-go/engine/common/rpc" + accessmodel "github.com/onflow/flow-go/model/access" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module/irrecoverable" + "github.com/onflow/flow-go/state" + "github.com/onflow/flow-go/storage" + + "github.com/onflow/flow/protobuf/go/flow/entities" +) + +// TransactionMetadata manages the state of a transaction subscription. +// +// This struct contains metadata for tracking a transaction's progress, including +// references to relevant blocks, collections, and transaction results. +type TransactionMetadata struct { + blocks storage.Blocks + collections storage.Collections + transactions storage.Transactions + + txResult *accessmodel.TransactionResult + txReferenceBlockID flow.Identifier + blockWithTx *flow.Header //TODO: what is this??? + + eventEncodingVersion entities.EventEncodingVersion + + txProvider *txprovider.FailoverTransactionProvider + txStatusDeriver *txstatus.TxStatusDeriver +} + +// NewTransactionMetadata initializes a new metadata object for a transaction subscription. +// +// This function constructs a transaction metadata object used for tracking the transaction's progress +// and maintaining its state throughout execution. +// +// Parameters: +// - ctx: Context for managing the lifecycle of the operation. +// - backendTransactions: A reference to the txStreamBackend transaction manager. +// - txID: The unique identifier of the transaction. +// - txReferenceBlockID: The ID of the transaction’s reference block. +// - eventEncodingVersion: The required version of event encoding. +// +// Returns: +// - *TransactionMetadata: The initialized transaction metadata object. +func NewTransactionMetadata( + blocks storage.Blocks, + collections storage.Collections, + transactions storage.Transactions, + txID flow.Identifier, + txReferenceBlockID flow.Identifier, + eventEncodingVersion entities.EventEncodingVersion, + txProvider *txprovider.FailoverTransactionProvider, + txStatusDeriver *txstatus.TxStatusDeriver, +) *TransactionMetadata { + return &TransactionMetadata{ + txResult: &accessmodel.TransactionResult{TransactionID: txID}, + eventEncodingVersion: eventEncodingVersion, + blocks: blocks, + collections: collections, + transactions: transactions, + txReferenceBlockID: txReferenceBlockID, + txProvider: txProvider, + txStatusDeriver: txStatusDeriver, + } +} + +// Refresh updates the transaction subscription metadata to reflect the latest state. +// +// Parameters: +// - ctx: Context for managing the operation lifecycle. +// +// Expected errors during normal operation: +// - [ErrBlockNotReady] if the block at the given height is not found. +// - codes.Internal if impossible to get transaction result due to event payload conversion failed +// +// All other errors are considered as state corruption (fatal) or internal errors in the refreshing transaction result +// or when refreshing transaction status. +func (t *TransactionMetadata) Refresh(ctx context.Context) error { + if err := t.refreshCollection(); err != nil { + return err + } + if err := t.refreshBlock(); err != nil { + return err + } + if err := t.refreshTransactionResult(ctx); err != nil { + return err + } + if err := t.refreshStatus(ctx); err != nil { + return err + } + return nil +} + +// refreshTransactionReferenceBlockID sets the reference block ID for the transaction. +// +// If the reference block ID is unset, it attempts to retrieve it from storage. +// +// Parameters: +// - txReferenceBlockID: The reference block ID of the transaction. +// +// No errors expected during normal operations. +func (t *TransactionMetadata) refreshTransactionReferenceBlockID() error { + // Get referenceBlockID if it is not set + if t.txReferenceBlockID != flow.ZeroID { + return nil + } + + tx, err := t.transactions.ByID(t.txResult.TransactionID) + if err != nil { + return fmt.Errorf("failed to lookup transaction by transaction ID: %w", err) + } + t.txReferenceBlockID = tx.ReferenceBlockID + return nil +} + +// refreshStatus updates the transaction's status based on its execution result. +// +// Parameters: +// - ctx: Context for managing the operation lifecycle. +// +// No errors expected during normal operations. +func (t *TransactionMetadata) refreshStatus(ctx context.Context) error { + var err error + + if t.blockWithTx == nil { + if err = t.refreshTransactionReferenceBlockID(); err != nil { + // transaction was not sent from this node, and it has not been indexed yet. + if errors.Is(err, storage.ErrNotFound) { + t.txResult.Status = flow.TransactionStatusUnknown + return nil + } + return err + } + + t.txResult.Status, err = t.txStatusDeriver.DeriveUnknownTransactionStatus(t.txReferenceBlockID) + if err != nil { + if !errors.Is(err, state.ErrUnknownSnapshotReference) { + irrecoverable.Throw(ctx, err) + } + return rpc.ConvertStorageError(err) + } + return nil + } + + // When the transaction is included in an executed block, the `txResult` may be updated during `Refresh` + // Recheck the status to ensure it's accurate. + t.txResult.Status, err = t.txStatusDeriver.DeriveTransactionStatus(t.blockWithTx.Height, t.txResult.IsExecuted()) + if err != nil { + if !errors.Is(err, state.ErrUnknownSnapshotReference) { + irrecoverable.Throw(ctx, err) + } + return rpc.ConvertStorageError(err) + } + return nil +} + +// refreshBlock updates the block metadata if the transaction has been included in a block. +// +// Expected errors during normal operation: +// - [ErrBlockNotReady] if the block for collection ID is not found. +// +// All other errors should be treated as exceptions. +func (t *TransactionMetadata) refreshBlock() error { + if t.txResult.CollectionID == flow.ZeroID || t.blockWithTx != nil { + return nil + } + + block, err := t.blocks.ByCollectionID(t.txResult.CollectionID) + if err != nil { + if errors.Is(err, storage.ErrNotFound) { + return subscription.ErrBlockNotReady + } + + return fmt.Errorf("failed to lookup block containing collection: %w", err) + } + + t.blockWithTx = block.ToHeader() + t.txResult.BlockID = block.ID() + t.txResult.BlockHeight = block.Height + return nil +} + +// refreshCollection updates the collection metadata if the transaction is included in a block. +// +// Expected errors during normal operation: +// - [ErrTransactionNotInBlock] if the transaction is not found in the block. +// +// All other errors should be treated as exceptions. +func (t *TransactionMetadata) refreshCollection() error { + if t.txResult.CollectionID != flow.ZeroID { + return nil + } + + collection, err := t.collections.LightByTransactionID(t.txResult.TransactionID) + if err != nil { + if errors.Is(err, storage.ErrNotFound) { + return nil + } + return fmt.Errorf("failed to lookup collection containing tx: %w", err) + } + t.txResult.CollectionID = collection.ID() + return nil +} + +// refreshTransactionResult attempts to retrieve the transaction result from storage or an execution node. +// +// Parameters: +// - ctx: Context for managing the operation lifecycle. +// +// Expected errors during normal operation: +// - [codes.NotFound] if the transaction result is unavailable. +// +// All other errors should be treated as exceptions. +func (t *TransactionMetadata) refreshTransactionResult(ctx context.Context) error { + // skip check if we already have the result, or if we don't know which block it is in yet + if t.blockWithTx == nil || t.txResult.IsExecuted() { + return nil + } + + txResult, err := t.txProvider.TransactionResult( + ctx, + t.blockWithTx, + t.txResult.TransactionID, + t.eventEncodingVersion, + ) + if err != nil { + // TODO: I don't like the fact we propagate this error from txProvider. + // Fix it during error handling polishing project + if status.Code(err) == codes.NotFound { + // No result yet, indicate that it has not been executed + return nil + } + + return fmt.Errorf("unexpected error while getting transaction result: %w", err) + } + + // If transaction result was found, fully replace it in metadata. New transaction status already included in result. + if txResult != nil { + // Preserve the CollectionID to ensure it is not lost during the transaction result assignment. + txResult.CollectionID = t.txResult.CollectionID + t.txResult = txResult + } + + return nil +} diff --git a/engine/access/rpc/backend/transactions/transactions.go b/engine/access/rpc/backend/transactions/transactions.go new file mode 100644 index 00000000000..e04831f7e03 --- /dev/null +++ b/engine/access/rpc/backend/transactions/transactions.go @@ -0,0 +1,647 @@ +package transactions + +import ( + "context" + "errors" + "fmt" + "time" + + lru "github.com/hashicorp/golang-lru/v2" + accessproto "github.com/onflow/flow/protobuf/go/flow/access" + "github.com/onflow/flow/protobuf/go/flow/entities" + "github.com/rs/zerolog" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + + "github.com/onflow/flow-go/access" + "github.com/onflow/flow-go/access/validator" + "github.com/onflow/flow-go/engine/access/index" + "github.com/onflow/flow-go/engine/access/rpc/backend/node_communicator" + "github.com/onflow/flow-go/engine/access/rpc/backend/transactions/error_messages" + "github.com/onflow/flow-go/engine/access/rpc/backend/transactions/provider" + "github.com/onflow/flow-go/engine/access/rpc/backend/transactions/retrier" + txstatus "github.com/onflow/flow-go/engine/access/rpc/backend/transactions/status" + "github.com/onflow/flow-go/engine/access/rpc/connection" + "github.com/onflow/flow-go/engine/common/rpc" + "github.com/onflow/flow-go/engine/common/rpc/convert" + accessmodel "github.com/onflow/flow-go/model/access" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module" + "github.com/onflow/flow-go/module/irrecoverable" + "github.com/onflow/flow-go/state" + "github.com/onflow/flow-go/state/protocol" + "github.com/onflow/flow-go/storage" +) + +// ErrTransactionNotInBlock represents an error indicating that the transaction is not found in the block. +var ErrTransactionNotInBlock = errors.New("transaction not in block") + +type Transactions struct { + log zerolog.Logger + metrics module.TransactionMetrics + + state protocol.State + chainID flow.ChainID + + systemTxID flow.Identifier + + // RPC Clients & Network + collectionRPCClient accessproto.AccessAPIClient // RPC client tied to a fixed collection node + historicalAccessNodeClients []accessproto.AccessAPIClient + nodeCommunicator node_communicator.Communicator + connectionFactory connection.ConnectionFactory + retrier retrier.Retrier + + // Storages + blocks storage.Blocks + collections storage.Collections + transactions storage.Transactions + events storage.Events + + txResultCache *lru.Cache[flow.Identifier, *accessmodel.TransactionResult] + + txValidator *validator.TransactionValidator + txProvider provider.TransactionProvider + txStatusDeriver *txstatus.TxStatusDeriver + + scheduledCallbacksEnabled bool +} + +var _ access.TransactionsAPI = (*Transactions)(nil) + +type Params struct { + Log zerolog.Logger + Metrics module.TransactionMetrics + State protocol.State + ChainID flow.ChainID + SystemTxID flow.Identifier + StaticCollectionRPCClient accessproto.AccessAPIClient + HistoricalAccessNodeClients []accessproto.AccessAPIClient + NodeCommunicator node_communicator.Communicator + ConnFactory connection.ConnectionFactory + EnableRetries bool + NodeProvider *rpc.ExecutionNodeIdentitiesProvider + Blocks storage.Blocks + Collections storage.Collections + Transactions storage.Transactions + Events storage.Events + TxErrorMessageProvider error_messages.Provider + TxResultCache *lru.Cache[flow.Identifier, *accessmodel.TransactionResult] + TxProvider provider.TransactionProvider + TxValidator *validator.TransactionValidator + TxStatusDeriver *txstatus.TxStatusDeriver + EventsIndex *index.EventsIndex + TxResultsIndex *index.TransactionResultsIndex + ScheduledCallbacksEnabled bool +} + +func NewTransactionsBackend(params Params) (*Transactions, error) { + txs := &Transactions{ + log: params.Log, + metrics: params.Metrics, + state: params.State, + chainID: params.ChainID, + systemTxID: params.SystemTxID, + collectionRPCClient: params.StaticCollectionRPCClient, + historicalAccessNodeClients: params.HistoricalAccessNodeClients, + nodeCommunicator: params.NodeCommunicator, + connectionFactory: params.ConnFactory, + blocks: params.Blocks, + collections: params.Collections, + transactions: params.Transactions, + events: params.Events, + txResultCache: params.TxResultCache, + txValidator: params.TxValidator, + txProvider: params.TxProvider, + txStatusDeriver: params.TxStatusDeriver, + scheduledCallbacksEnabled: params.ScheduledCallbacksEnabled, + } + + if params.EnableRetries { + txs.retrier = retrier.NewRetrier( + params.Log, + params.Blocks, + params.Collections, + txs, + params.TxStatusDeriver, + ) + } else { + txs.retrier = retrier.NewNoopRetrier() + } + + return txs, nil +} + +// SendTransaction forwards the transaction to the collection node +func (t *Transactions) SendTransaction(ctx context.Context, tx *flow.TransactionBody) error { + now := time.Now().UTC() + + err := t.txValidator.Validate(ctx, tx) + if err != nil { + return status.Errorf(codes.InvalidArgument, "invalid transaction: %s", err.Error()) + } + + // send the transaction to the collection node if valid + err = t.trySendTransaction(ctx, tx) + if err != nil { + t.metrics.TransactionSubmissionFailed() + return rpc.ConvertError(err, "failed to send transaction to a collection node", codes.Internal) + } + + t.metrics.TransactionReceived(tx.ID(), now) + + // store the transaction locally + err = t.transactions.Store(tx) + if err != nil { + return status.Errorf(codes.Internal, "failed to store transaction: %v", err) + } + + go t.registerTransactionForRetry(tx) + + return nil +} + +// trySendTransaction tries to transaction to a collection node +func (t *Transactions) trySendTransaction(ctx context.Context, tx *flow.TransactionBody) error { + // if a collection node rpc client was provided at startup, just use that + if t.collectionRPCClient != nil { + return t.grpcTxSend(ctx, t.collectionRPCClient, tx) + } + + // otherwise choose all collection nodes to try + collNodes, err := t.chooseCollectionNodes(tx.ID()) + if err != nil { + return fmt.Errorf("failed to determine collection node for tx %x: %w", tx, err) + } + + var sendError error + logAnyError := func() { + if sendError != nil { + t.log.Info().Err(err).Msg("failed to send transactions to collector nodes") + } + } + defer logAnyError() + + // try sending the transaction to one of the chosen collection nodes + sendError = t.nodeCommunicator.CallAvailableNode( + collNodes, + func(node *flow.IdentitySkeleton) error { + err = t.sendTransactionToCollector(ctx, tx, node.Address) + if err != nil { + return err + } + return nil + }, + nil, + ) + + return sendError +} + +// chooseCollectionNodes finds a random subset of size sampleSize of collection node addresses from the +// collection node cluster responsible for the given tx +func (t *Transactions) chooseCollectionNodes(txID flow.Identifier) (flow.IdentitySkeletonList, error) { + // retrieve the set of collector clusters + currentEpoch, err := t.state.Final().Epochs().Current() + if err != nil { + return nil, fmt.Errorf("could not get current epoch: %w", err) + } + clusters, err := currentEpoch.Clustering() + if err != nil { + return nil, fmt.Errorf("could not cluster collection nodes: %w", err) + } + + // get the cluster responsible for the transaction + targetNodes, ok := clusters.ByTxID(txID) + if !ok { + return nil, fmt.Errorf("could not get local cluster by txID: %x", txID) + } + + return targetNodes, nil +} + +// sendTransactionToCollection sends the transaction to the given collection node via grpc +func (t *Transactions) sendTransactionToCollector( + ctx context.Context, + tx *flow.TransactionBody, + collectionNodeAddr string, +) error { + collectionRPC, closer, err := t.connectionFactory.GetCollectionAPIClient(collectionNodeAddr, nil) + if err != nil { + return fmt.Errorf("failed to connect to collection node at %s: %w", collectionNodeAddr, err) + } + defer closer.Close() + + err = t.grpcTxSend(ctx, collectionRPC, tx) + if err != nil { + return fmt.Errorf("failed to send transaction to collection node at %s: %w", collectionNodeAddr, err) + } + return nil +} + +func (t *Transactions) grpcTxSend( + ctx context.Context, + client accessproto.AccessAPIClient, + tx *flow.TransactionBody, +) error { + colReq := &accessproto.SendTransactionRequest{ + Transaction: convert.TransactionToMessage(*tx), + } + + clientDeadline := time.Now().Add(time.Duration(2) * time.Second) + ctx, cancel := context.WithDeadline(ctx, clientDeadline) + defer cancel() + + _, err := client.SendTransaction(ctx, colReq) + return err +} + +// SendRawTransaction sends a raw transaction to the collection node +func (t *Transactions) SendRawTransaction( + ctx context.Context, + tx *flow.TransactionBody, +) error { + // send the transaction to the collection node + return t.trySendTransaction(ctx, tx) +} + +func (t *Transactions) GetTransaction(ctx context.Context, txID flow.Identifier) (*flow.TransactionBody, error) { + // look up transaction from storage + tx, err := t.transactions.ByID(txID) + txErr := rpc.ConvertStorageError(err) + + if txErr != nil { + if status.Code(txErr) == codes.NotFound { + return t.getHistoricalTransaction(ctx, txID) + } + // Other Error trying to retrieve the transaction, return with err + return nil, txErr + } + + return tx, nil +} + +func (t *Transactions) GetTransactionsByBlockID( + ctx context.Context, + blockID flow.Identifier, +) ([]*flow.TransactionBody, error) { + // TODO: consider using storage.Index.ByBlockID, the index contains collection id and seals ID + block, err := t.blocks.ByID(blockID) + if err != nil { + return nil, rpc.ConvertStorageError(err) + } + + return t.txProvider.TransactionsByBlockID(ctx, block) +} + +func (t *Transactions) GetTransactionResult( + ctx context.Context, + txID flow.Identifier, + blockID flow.Identifier, + collectionID flow.Identifier, + requiredEventEncodingVersion entities.EventEncodingVersion, +) (*accessmodel.TransactionResult, error) { + // look up transaction from storage + start := time.Now() + + tx, err := t.transactions.ByID(txID) + if err != nil { + txErr := rpc.ConvertStorageError(err) + if status.Code(txErr) != codes.NotFound { + return nil, txErr + } + + // Tx not found. If we have historical Sporks setup, lets look through those as well + if t.txResultCache != nil { + val, ok := t.txResultCache.Get(txID) + if ok { + return val, nil + } + } + historicalTxResult, err := t.getHistoricalTransactionResult(ctx, txID) + if err != nil { + // if tx not found in old access nodes either, then assume that the tx was submitted to a different AN + // and return status as unknown + txStatus := flow.TransactionStatusUnknown + result := &accessmodel.TransactionResult{ + Status: txStatus, + StatusCode: uint(txStatus), + } + if t.txResultCache != nil { + t.txResultCache.Add(txID, result) + } + return result, nil + } + + if t.txResultCache != nil { + t.txResultCache.Add(txID, historicalTxResult) + } + return historicalTxResult, nil + } + + block, err := t.retrieveBlock(blockID, collectionID, txID) + // an error occurred looking up the block or the requested block or collection was not found. + // If looking up the block based solely on the txID returns not found, then no error is + // returned since the block may not be finalized yet. + if err != nil { + return nil, rpc.ConvertStorageError(err) + } + + var blockHeight uint64 + var txResult *accessmodel.TransactionResult + // access node may not have the block if it hasn't yet been finalized, hence block can be nil at this point + if block != nil { + txResult, err = t.lookupTransactionResult(ctx, txID, block.ToHeader(), requiredEventEncodingVersion) + if err != nil { + return nil, rpc.ConvertError(err, "failed to retrieve result", codes.Internal) + } + + // an additional check to ensure the correctness of the collection ID. + expectedCollectionID, err := t.lookupCollectionIDInBlock(block, txID) + if err != nil { + // if the collection has not been indexed yet, the lookup will return a not found error. + // if the request included a blockID or collectionID in its the search criteria, not found + // should result in an error because it's not possible to guarantee that the result found + // is the correct one. + if blockID != flow.ZeroID || collectionID != flow.ZeroID { + return nil, rpc.ConvertStorageError(err) + } + } + + if collectionID == flow.ZeroID { + collectionID = expectedCollectionID + } else if collectionID != expectedCollectionID { + return nil, status.Error(codes.InvalidArgument, "transaction not found in provided collection") + } + + blockID = block.ID() + blockHeight = block.Height + } + + // If there is still no transaction result, provide one based on available information. + if txResult == nil { + var txStatus flow.TransactionStatus + // Derive the status of the transaction. + if block == nil { + txStatus, err = t.txStatusDeriver.DeriveUnknownTransactionStatus(tx.ReferenceBlockID) + } else { + txStatus, err = t.txStatusDeriver.DeriveTransactionStatus(blockHeight, false) + } + + if err != nil { + if !errors.Is(err, state.ErrUnknownSnapshotReference) { + irrecoverable.Throw(ctx, err) + } + return nil, rpc.ConvertStorageError(err) + } + + txResult = &accessmodel.TransactionResult{ + BlockID: blockID, + BlockHeight: blockHeight, + TransactionID: txID, + Status: txStatus, + CollectionID: collectionID, + } + } else { + txResult.CollectionID = collectionID + } + + t.metrics.TransactionResultFetched(time.Since(start), len(tx.Script)) + + return txResult, nil +} + +// lookupCollectionIDInBlock returns the collection ID based on the transaction ID. The lookup is performed in block +// collections. +func (t *Transactions) lookupCollectionIDInBlock( + block *flow.Block, + txID flow.Identifier, +) (flow.Identifier, error) { + for _, guarantee := range block.Payload.Guarantees { + collectionID := guarantee.CollectionID + collection, err := t.collections.LightByID(collectionID) + if err != nil { + return flow.ZeroID, fmt.Errorf("failed to get collection %s in indexed block: %w", collectionID, err) + } + for _, collectionTxID := range collection.Transactions { + if collectionTxID == txID { + return collectionID, nil + } + } + } + return flow.ZeroID, ErrTransactionNotInBlock +} + +// retrieveBlock function returns a block based on the input arguments. +// The block ID lookup has the highest priority, followed by the collection ID lookup. +// If both are missing, the default lookup by transaction ID is performed. +// +// If looking up the block based solely on the txID returns not found, then no error is returned. +// +// Expected errors: +// - storage.ErrNotFound if the requested block or collection was not found. +func (t *Transactions) retrieveBlock( + blockID flow.Identifier, + collectionID flow.Identifier, + txID flow.Identifier, +) (*flow.Block, error) { + if blockID != flow.ZeroID { + return t.blocks.ByID(blockID) + } + + if collectionID != flow.ZeroID { + return t.blocks.ByCollectionID(collectionID) + } + + // find the block for the transaction + block, err := t.lookupBlock(txID) + + if err != nil && !errors.Is(err, storage.ErrNotFound) { + return nil, err + } + + return block, nil +} + +func (t *Transactions) GetTransactionResultsByBlockID( + ctx context.Context, + blockID flow.Identifier, + requiredEventEncodingVersion entities.EventEncodingVersion, +) ([]*accessmodel.TransactionResult, error) { + // TODO: consider using storage.Index.ByBlockID, the index contains collection id and seals ID + block, err := t.blocks.ByID(blockID) + if err != nil { + return nil, rpc.ConvertStorageError(err) + } + + return t.txProvider.TransactionResultsByBlockID(ctx, block, requiredEventEncodingVersion) +} + +// GetTransactionResultByIndex returns transactions Results for an index in a block that is executed, +// pending or finalized transactions return errors +func (t *Transactions) GetTransactionResultByIndex( + ctx context.Context, + blockID flow.Identifier, + index uint32, + requiredEventEncodingVersion entities.EventEncodingVersion, +) (*accessmodel.TransactionResult, error) { + block, err := t.blocks.ByID(blockID) + if err != nil { + return nil, rpc.ConvertStorageError(err) + } + + return t.txProvider.TransactionResultByIndex(ctx, block, index, requiredEventEncodingVersion) +} + +// GetSystemTransaction returns system transaction +func (t *Transactions) GetSystemTransaction( + ctx context.Context, + txID flow.Identifier, + blockID flow.Identifier, +) (*flow.TransactionBody, error) { + block, err := t.blocks.ByID(blockID) + if err != nil { + return nil, rpc.ConvertStorageError(err) + } + + if txID == flow.ZeroID { + txID = t.systemTxID + } + + return t.txProvider.SystemTransaction(ctx, block, txID) +} + +// GetSystemTransactionResult returns system transaction result +func (t *Transactions) GetSystemTransactionResult( + ctx context.Context, + txID flow.Identifier, + blockID flow.Identifier, + requiredEventEncodingVersion entities.EventEncodingVersion, +) (*accessmodel.TransactionResult, error) { + block, err := t.blocks.ByID(blockID) + if err != nil { + return nil, rpc.ConvertStorageError(err) + } + + if txID == flow.ZeroID { + txID = t.systemTxID + } + + return t.txProvider.SystemTransactionResult(ctx, block, txID, requiredEventEncodingVersion) +} + +// Error returns: +// - `storage.ErrNotFound` - collection referenced by transaction or block by a collection has not been found. +// - all other errors are unexpected and potentially symptoms of internal implementation bugs or state corruption (fatal). +func (t *Transactions) lookupBlock(txID flow.Identifier) (*flow.Block, error) { + collection, err := t.collections.LightByTransactionID(txID) + if err != nil { + return nil, err + } + + block, err := t.blocks.ByCollectionID(collection.ID()) + if err != nil { + return nil, err + } + + return block, nil +} + +func (t *Transactions) lookupTransactionResult( + ctx context.Context, + txID flow.Identifier, + header *flow.Header, + requiredEventEncodingVersion entities.EventEncodingVersion, +) (*accessmodel.TransactionResult, error) { + txResult, err := t.txProvider.TransactionResult(ctx, header, txID, requiredEventEncodingVersion) + if err != nil { + // if either the storage or execution node reported no results or there were not enough execution results + if status.Code(err) == codes.NotFound { + // No result yet, indicate that it has not been executed + return nil, nil + } + // Other Error trying to retrieve the result, return with err + return nil, err + } + + // considered executed as long as some result is returned, even if it's an error message + return txResult, nil +} + +func (t *Transactions) getHistoricalTransaction( + ctx context.Context, + txID flow.Identifier, +) (*flow.TransactionBody, error) { + for _, historicalNode := range t.historicalAccessNodeClients { + txResp, err := historicalNode.GetTransaction(ctx, &accessproto.GetTransactionRequest{Id: txID[:]}) + if err == nil { + tx, err := convert.MessageToTransaction(txResp.Transaction, t.chainID.Chain()) + if err != nil { + return nil, status.Errorf(codes.Internal, "could not convert transaction: %v", err) + } + + // Found on a historical node. Report + return &tx, nil + } + // Otherwise, if not found, just continue + if status.Code(err) == codes.NotFound { + continue + } + // TODO should we do something if the error isn't not found? + } + return nil, status.Errorf(codes.NotFound, "no known transaction with ID %s", txID) +} + +func (t *Transactions) getHistoricalTransactionResult( + ctx context.Context, + txID flow.Identifier, +) (*accessmodel.TransactionResult, error) { + for _, historicalNode := range t.historicalAccessNodeClients { + result, err := historicalNode.GetTransactionResult(ctx, &accessproto.GetTransactionRequest{Id: txID[:]}) + if err == nil { + // Found on a historical node. Report + if result.GetStatus() == entities.TransactionStatus_UNKNOWN { + // We've moved to returning Status UNKNOWN instead of an error with the NotFound status, + // Therefore we should continue and look at the next access node for answers. + continue + } + + if result.GetStatus() == entities.TransactionStatus_PENDING { + // This is on a historical node. No transactions from it will ever be + // executed, therefore we should consider this expired + result.Status = entities.TransactionStatus_EXPIRED + } + + txResult, err := convert.MessageToTransactionResult(result) + if err != nil { + return nil, status.Errorf(codes.Internal, "could not convert transaction result: %v", err) + } + + return txResult, nil + } + // Otherwise, if not found, just continue + if status.Code(err) == codes.NotFound { + continue + } + // TODO should we do something if the error isn't not found? + } + return nil, status.Errorf(codes.NotFound, "no known transaction with ID %s", txID) +} + +func (t *Transactions) registerTransactionForRetry(tx *flow.TransactionBody) { + referenceBlock, err := t.state.AtBlockID(tx.ReferenceBlockID).Head() + if err != nil { + return + } + + t.retrier.RegisterTransaction(referenceBlock.Height, tx) +} + +// ATTENTION: might be a source of problems in future. We run this code on finalization gorotuine, +// potentially lagging finalization events if operations take long time. +// We might need to move this logic on dedicated goroutine and provide a way to skip finalization events if they are delivered +// too often for this engine. An example of similar approach - https://github.com/onflow/flow-go/blob/10b0fcbf7e2031674c00f3cdd280f27bd1b16c47/engine/common/follower/compliance_engine.go#L201.. +// No errors expected during normal operations. +func (t *Transactions) ProcessFinalizedBlockHeight(height uint64) error { + return t.retrier.Retry(height) +} diff --git a/engine/access/rpc/backend/transactions/transactions_test.go b/engine/access/rpc/backend/transactions/transactions_test.go new file mode 100644 index 00000000000..7531cf6d29d --- /dev/null +++ b/engine/access/rpc/backend/transactions/transactions_test.go @@ -0,0 +1,1908 @@ +package transactions + +import ( + "bytes" + "context" + "fmt" + "math/rand" + "os" + "testing" + + "github.com/cockroachdb/pebble/v2" + lru "github.com/hashicorp/golang-lru/v2" + "github.com/onflow/cadence" + cadenceCommon "github.com/onflow/cadence/common" + "github.com/onflow/cadence/encoding/ccf" + jsoncdc "github.com/onflow/cadence/encoding/json" + "github.com/onflow/flow/protobuf/go/flow/access" + "github.com/onflow/flow/protobuf/go/flow/entities" + execproto "github.com/onflow/flow/protobuf/go/flow/execution" + "github.com/rs/zerolog" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + + "github.com/onflow/flow-go/access/validator" + validatormock "github.com/onflow/flow-go/access/validator/mock" + "github.com/onflow/flow-go/engine/access/index" + accessmock "github.com/onflow/flow-go/engine/access/mock" + "github.com/onflow/flow-go/engine/access/rpc/backend/node_communicator" + "github.com/onflow/flow-go/engine/access/rpc/backend/transactions/error_messages" + "github.com/onflow/flow-go/engine/access/rpc/backend/transactions/provider" + "github.com/onflow/flow-go/engine/access/rpc/backend/transactions/retrier" + txstatus "github.com/onflow/flow-go/engine/access/rpc/backend/transactions/status" + connectionmock "github.com/onflow/flow-go/engine/access/rpc/connection/mock" + commonrpc "github.com/onflow/flow-go/engine/common/rpc" + "github.com/onflow/flow-go/engine/common/rpc/convert" + "github.com/onflow/flow-go/fvm/blueprints" + "github.com/onflow/flow-go/fvm/systemcontracts" + accessmodel "github.com/onflow/flow-go/model/access" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/model/flow/filter" + "github.com/onflow/flow-go/module" + "github.com/onflow/flow-go/module/counters" + execmock "github.com/onflow/flow-go/module/execution/mock" + "github.com/onflow/flow-go/module/metrics" + syncmock "github.com/onflow/flow-go/module/state_synchronization/mock" + "github.com/onflow/flow-go/state/protocol" + bprotocol "github.com/onflow/flow-go/state/protocol/badger" + protocolmock "github.com/onflow/flow-go/state/protocol/mock" + "github.com/onflow/flow-go/state/protocol/util" + "github.com/onflow/flow-go/storage" + storagemock "github.com/onflow/flow-go/storage/mock" + "github.com/onflow/flow-go/storage/operation/pebbleimpl" + "github.com/onflow/flow-go/storage/store" + "github.com/onflow/flow-go/utils/unittest" + "github.com/onflow/flow-go/utils/unittest/mocks" +) + +const expectedErrorMsg = "expected test error" + +type Suite struct { + suite.Suite + + log zerolog.Logger + state *protocolmock.State + snapshot *protocolmock.Snapshot + params *protocolmock.Params + + blocks *storagemock.Blocks + headers *storagemock.Headers + collections *storagemock.Collections + transactions *storagemock.Transactions + receipts *storagemock.ExecutionReceipts + results *storagemock.ExecutionResults + lightTxResults *storagemock.LightTransactionResults + events *storagemock.Events + txResultErrorMessages *storagemock.TransactionResultErrorMessages + txResultCache *lru.Cache[flow.Identifier, *accessmodel.TransactionResult] + + db *pebble.DB + dbDir string + lastFullBlockHeight *counters.PersistentStrictMonotonicCounter + + executionAPIClient *accessmock.ExecutionAPIClient + historicalAccessAPIClient *accessmock.AccessAPIClient + + connectionFactory *connectionmock.ConnectionFactory + + reporter *syncmock.IndexReporter + indexReporter *index.Reporter + eventsIndex *index.EventsIndex + txResultsIndex *index.TransactionResultsIndex + + errorMessageProvider error_messages.Provider + + chainID flow.ChainID + systemTx *flow.TransactionBody + systemCollection *flow.Collection + pendingExecutionEvents []flow.Event + processScheduledCallbackEventType flow.EventType + scheduledCallbacksEnabled bool + + fixedExecutionNodeIDs flow.IdentifierList + preferredExecutionNodeIDs flow.IdentifierList +} + +func TestTransactionsBackend(t *testing.T) { + suite.Run(t, new(Suite)) +} + +func (suite *Suite) SetupTest() { + suite.log = unittest.Logger() + suite.snapshot = protocolmock.NewSnapshot(suite.T()) + + header := unittest.BlockHeaderFixture() + suite.params = protocolmock.NewParams(suite.T()) + suite.params.On("FinalizedRoot").Return(header, nil).Maybe() + suite.params.On("SporkID").Return(unittest.IdentifierFixture(), nil).Maybe() + suite.params.On("SporkRootBlockHeight").Return(header.Height, nil).Maybe() + suite.params.On("SealedRoot").Return(header, nil).Maybe() + + suite.state = protocolmock.NewState(suite.T()) + suite.state.On("Params").Return(suite.params).Maybe() + + suite.blocks = storagemock.NewBlocks(suite.T()) + suite.headers = storagemock.NewHeaders(suite.T()) + suite.transactions = storagemock.NewTransactions(suite.T()) + suite.collections = storagemock.NewCollections(suite.T()) + suite.receipts = storagemock.NewExecutionReceipts(suite.T()) + suite.results = storagemock.NewExecutionResults(suite.T()) + suite.txResultErrorMessages = storagemock.NewTransactionResultErrorMessages(suite.T()) + suite.executionAPIClient = accessmock.NewExecutionAPIClient(suite.T()) + suite.lightTxResults = storagemock.NewLightTransactionResults(suite.T()) + suite.events = storagemock.NewEvents(suite.T()) + suite.chainID = flow.Testnet + suite.historicalAccessAPIClient = accessmock.NewAccessAPIClient(suite.T()) + suite.connectionFactory = connectionmock.NewConnectionFactory(suite.T()) + + txResCache, err := lru.New[flow.Identifier, *accessmodel.TransactionResult](10) + suite.Require().NoError(err) + suite.txResultCache = txResCache + + suite.reporter = syncmock.NewIndexReporter(suite.T()) + suite.indexReporter = index.NewReporter() + err = suite.indexReporter.Initialize(suite.reporter) + suite.Require().NoError(err) + suite.eventsIndex = index.NewEventsIndex(suite.indexReporter, suite.events) + suite.txResultsIndex = index.NewTransactionResultsIndex(suite.indexReporter, suite.lightTxResults) + + suite.systemTx, err = blueprints.SystemChunkTransaction(flow.Testnet.Chain()) + suite.Require().NoError(err) + suite.scheduledCallbacksEnabled = true + + suite.pendingExecutionEvents = suite.createPendingExecutionEvents(2) // 2 callbacks + suite.systemCollection, err = blueprints.SystemCollection(suite.chainID.Chain(), suite.pendingExecutionEvents) + suite.Require().NoError(err) + suite.processScheduledCallbackEventType = suite.pendingExecutionEvents[0].Type + + suite.db, suite.dbDir = unittest.TempPebbleDB(suite.T()) + progress, err := store.NewConsumerProgress(pebbleimpl.ToDB(suite.db), module.ConsumeProgressLastFullBlockHeight).Initialize(0) + require.NoError(suite.T(), err) + suite.lastFullBlockHeight, err = counters.NewPersistentStrictMonotonicCounter(progress) + suite.Require().NoError(err) + + suite.fixedExecutionNodeIDs = nil + suite.preferredExecutionNodeIDs = nil + suite.errorMessageProvider = nil +} + +func (suite *Suite) TearDownTest() { + err := os.RemoveAll(suite.dbDir) + suite.Require().NoError(err) +} + +func (suite *Suite) defaultTransactionsParams() Params { + nodeProvider := commonrpc.NewExecutionNodeIdentitiesProvider( + suite.log, + suite.state, + suite.receipts, + suite.preferredExecutionNodeIDs, + suite.fixedExecutionNodeIDs, + ) + + txStatusDeriver := txstatus.NewTxStatusDeriver( + suite.state, + suite.lastFullBlockHeight, + ) + + txValidator, err := validator.NewTransactionValidator( + validatormock.NewBlocks(suite.T()), + suite.chainID.Chain(), + metrics.NewNoopCollector(), + validator.TransactionValidationOptions{}, + execmock.NewScriptExecutor(suite.T()), + ) + suite.Require().NoError(err) + + nodeCommunicator := node_communicator.NewNodeCommunicator(false) + + txProvider := provider.NewENTransactionProvider( + suite.log, + suite.state, + suite.collections, + suite.connectionFactory, + nodeCommunicator, + nodeProvider, + txStatusDeriver, + suite.systemTx.ID(), + suite.chainID, + suite.scheduledCallbacksEnabled, + ) + + return Params{ + Log: suite.log, + Metrics: metrics.NewNoopCollector(), + State: suite.state, + ChainID: flow.Testnet, + SystemTxID: suite.systemTx.ID(), + StaticCollectionRPCClient: suite.historicalAccessAPIClient, + HistoricalAccessNodeClients: nil, + NodeCommunicator: nodeCommunicator, + ConnFactory: suite.connectionFactory, + EnableRetries: true, + NodeProvider: nodeProvider, + Blocks: suite.blocks, + Collections: suite.collections, + Transactions: suite.transactions, + Events: suite.events, + TxErrorMessageProvider: suite.errorMessageProvider, + TxResultCache: suite.txResultCache, + TxProvider: txProvider, + TxValidator: txValidator, + TxStatusDeriver: txStatusDeriver, + EventsIndex: suite.eventsIndex, + TxResultsIndex: suite.txResultsIndex, + ScheduledCallbacksEnabled: suite.scheduledCallbacksEnabled, + } +} + +// TestGetTransactionResult_UnknownTx returns unknown result when tx not found +func (suite *Suite) TestGetTransactionResult_UnknownTx() { + block := unittest.BlockFixture() + tx := unittest.TransactionBodyFixture() + coll := unittest.CollectionFromTransactions(&tx) + + suite.transactions. + On("ByID", tx.ID()). + Return(nil, storage.ErrNotFound) + + params := suite.defaultTransactionsParams() + txBackend, err := NewTransactionsBackend(params) + require.NoError(suite.T(), err) + res, err := txBackend.GetTransactionResult( + context.Background(), + tx.ID(), + block.ID(), + coll.ID(), + entities.EventEncodingVersion_JSON_CDC_V0, + ) + suite.Require().NoError(err) + suite.Require().Equal(res.Status, flow.TransactionStatusUnknown) + suite.Require().Empty(res.BlockID) + suite.Require().Empty(res.BlockHeight) + suite.Require().Empty(res.TransactionID) + suite.Require().Empty(res.CollectionID) + suite.Require().Empty(res.ErrorMessage) +} + +// TestGetTransactionResult_TxLookupFailure returns error from transaction storage +func (suite *Suite) TestGetTransactionResult_TxLookupFailure() { + block := unittest.BlockFixture() + tx := unittest.TransactionBodyFixture() + coll := unittest.CollectionFromTransactions(&tx) + + expectedErr := fmt.Errorf("some other error") + suite.transactions. + On("ByID", tx.ID()). + Return(nil, expectedErr) + + params := suite.defaultTransactionsParams() + txBackend, err := NewTransactionsBackend(params) + require.NoError(suite.T(), err) + + _, err = txBackend.GetTransactionResult( + context.Background(), + tx.ID(), + block.ID(), + coll.ID(), + entities.EventEncodingVersion_JSON_CDC_V0, + ) + suite.Require().Equal(err, status.Errorf(codes.Internal, "failed to find: %v", expectedErr)) +} + +// TestGetTransactionResult_HistoricNodes_Success tests lookup in historic nodes +func (suite *Suite) TestGetTransactionResult_HistoricNodes_Success() { + block := unittest.BlockFixture() + tx := unittest.TransactionBodyFixture() + coll := unittest.CollectionFromTransactions(&tx) + + suite.transactions. + On("ByID", tx.ID()). + Return(nil, storage.ErrNotFound) + + transactionResultResponse := access.TransactionResultResponse{ + Status: entities.TransactionStatus_EXECUTED, + StatusCode: uint32(entities.TransactionStatus_EXECUTED), + } + + suite.historicalAccessAPIClient. + On("GetTransactionResult", mock.Anything, mock.MatchedBy(func(req *access.GetTransactionRequest) bool { + txID := tx.ID() + return bytes.Equal(txID[:], req.Id) + })). + Return(&transactionResultResponse, nil). + Once() + + params := suite.defaultTransactionsParams() + params.HistoricalAccessNodeClients = []access.AccessAPIClient{suite.historicalAccessAPIClient} + txBackend, err := NewTransactionsBackend(params) + require.NoError(suite.T(), err) + + resp, err := txBackend.GetTransactionResult( + context.Background(), + tx.ID(), + block.ID(), + coll.ID(), + entities.EventEncodingVersion_JSON_CDC_V0, + ) + suite.Require().NoError(err) + suite.Require().Equal(flow.TransactionStatusExecuted, resp.Status) + suite.Require().Equal(uint(flow.TransactionStatusExecuted), resp.StatusCode) +} + +// TestGetTransactionResult_HistoricNodes_FromCache get historic transaction result from cache +func (suite *Suite) TestGetTransactionResult_HistoricNodes_FromCache() { + block := unittest.BlockFixture() + tx := unittest.TransactionBodyFixture() + + suite.transactions. + On("ByID", tx.ID()). + Return(nil, storage.ErrNotFound) + + transactionResultResponse := access.TransactionResultResponse{ + Status: entities.TransactionStatus_EXECUTED, + StatusCode: uint32(entities.TransactionStatus_EXECUTED), + } + + suite.historicalAccessAPIClient. + On("GetTransactionResult", mock.Anything, mock.MatchedBy(func(req *access.GetTransactionRequest) bool { + txID := tx.ID() + return bytes.Equal(txID[:], req.Id) + })). + Return(&transactionResultResponse, nil). + Once() + + params := suite.defaultTransactionsParams() + params.HistoricalAccessNodeClients = []access.AccessAPIClient{suite.historicalAccessAPIClient} + txBackend, err := NewTransactionsBackend(params) + require.NoError(suite.T(), err) + + coll := unittest.CollectionFromTransactions(&tx) + resp, err := txBackend.GetTransactionResult( + context.Background(), + tx.ID(), + block.ID(), + coll.ID(), + entities.EventEncodingVersion_JSON_CDC_V0, + ) + suite.Require().NoError(err) + suite.Require().Equal(flow.TransactionStatusExecuted, resp.Status) + suite.Require().Equal(uint(flow.TransactionStatusExecuted), resp.StatusCode) + + resp2, err := txBackend.GetTransactionResult( + context.Background(), + tx.ID(), + block.ID(), + coll.ID(), + entities.EventEncodingVersion_JSON_CDC_V0, + ) + suite.Require().NoError(err) + suite.Require().Equal(flow.TransactionStatusExecuted, resp2.Status) + suite.Require().Equal(uint(flow.TransactionStatusExecuted), resp2.StatusCode) +} + +// TestGetTransactionResultUnknownFromCache retrieve unknown result from cache. +func (suite *Suite) TestGetTransactionResultUnknownFromCache() { + block := unittest.BlockFixture() + tx := unittest.TransactionBodyFixture() + + suite.transactions. + On("ByID", tx.ID()). + Return(nil, storage.ErrNotFound) + + suite.historicalAccessAPIClient. + On("GetTransactionResult", mock.Anything, mock.MatchedBy(func(req *access.GetTransactionRequest) bool { + txID := tx.ID() + return bytes.Equal(txID[:], req.Id) + })). + Return(nil, status.Errorf(codes.NotFound, "no known transaction with ID %s", tx.ID())). + Once() + + params := suite.defaultTransactionsParams() + params.HistoricalAccessNodeClients = []access.AccessAPIClient{suite.historicalAccessAPIClient} + txBackend, err := NewTransactionsBackend(params) + require.NoError(suite.T(), err) + + coll := unittest.CollectionFromTransactions(&tx) + resp, err := txBackend.GetTransactionResult( + context.Background(), + tx.ID(), + block.ID(), + coll.ID(), + entities.EventEncodingVersion_JSON_CDC_V0, + ) + suite.Require().NoError(err) + suite.Require().Equal(flow.TransactionStatusUnknown, resp.Status) + suite.Require().Equal(uint(flow.TransactionStatusUnknown), resp.StatusCode) + + // ensure the unknown transaction is cached when not found anywhere + txStatus := flow.TransactionStatusUnknown + res, ok := txBackend.txResultCache.Get(tx.ID()) + suite.Require().True(ok) + suite.Require().Equal(res, &accessmodel.TransactionResult{ + Status: txStatus, + StatusCode: uint(txStatus), + }) + + // ensure underlying GetTransactionResult() won't be called the second time + resp2, err := txBackend.GetTransactionResult( + context.Background(), + tx.ID(), + block.ID(), + coll.ID(), + entities.EventEncodingVersion_JSON_CDC_V0, + ) + suite.Require().NoError(err) + suite.Require().Equal(flow.TransactionStatusUnknown, resp2.Status) + suite.Require().Equal(uint(flow.TransactionStatusUnknown), resp2.StatusCode) +} + +// TestGetSystemTransaction_HappyPath tests that GetSystemTransaction call returns system chunk transaction. +func (suite *Suite) TestGetSystemTransaction_ExecutionNode_HappyPath() { + block := unittest.BlockFixture() + blockID := block.ID() + + params := suite.defaultTransactionsParams() + enabledProvider := provider.NewENTransactionProvider( + suite.log, + suite.state, + suite.collections, + suite.connectionFactory, + params.NodeCommunicator, + params.NodeProvider, + params.TxStatusDeriver, + suite.systemTx.ID(), + suite.chainID, + true, + ) + disabledProvider := provider.NewENTransactionProvider( + suite.log, + suite.state, + suite.collections, + suite.connectionFactory, + params.NodeCommunicator, + params.NodeProvider, + params.TxStatusDeriver, + suite.systemTx.ID(), + suite.chainID, + false, + ) + + suite.params.On("FinalizedRoot").Unset() + + suite.Run("scheduled callbacks DISABLED - ZeroID", func() { + suite.blocks.On("ByID", blockID).Return(block, nil).Once() + + params.TxProvider = disabledProvider + + txBackend, err := NewTransactionsBackend(params) + suite.Require().NoError(err) + + res, err := txBackend.GetSystemTransaction(context.Background(), flow.ZeroID, blockID) + suite.Require().NoError(err) + + suite.Require().Equal(suite.systemTx, res) + }) + + suite.Run("scheduled callbacks DISABLED - system txID", func() { + suite.blocks.On("ByID", blockID).Return(block, nil).Once() + + params.TxProvider = disabledProvider + + txBackend, err := NewTransactionsBackend(params) + suite.Require().NoError(err) + + res, err := txBackend.GetSystemTransaction(context.Background(), suite.systemTx.ID(), blockID) + suite.Require().NoError(err) + + suite.Require().Equal(suite.systemTx, res) + }) + + suite.Run("scheduled callbacks DISABLED - non-system txID fails", func() { + suite.blocks.On("ByID", blockID).Return(block, nil).Once() + + params.TxProvider = disabledProvider + + txBackend, err := NewTransactionsBackend(params) + suite.Require().NoError(err) + + res, err := txBackend.GetSystemTransaction(context.Background(), unittest.IdentifierFixture(), blockID) + suite.Require().Error(err) + suite.Require().Nil(res) + }) + + suite.Run("scheduled callbacks ENABLED - ZeroID", func() { + suite.blocks.On("ByID", blockID).Return(block, nil).Once() + + params.TxProvider = enabledProvider + + txBackend, err := NewTransactionsBackend(params) + suite.Require().NoError(err) + + res, err := txBackend.GetSystemTransaction(context.Background(), flow.ZeroID, blockID) + suite.Require().NoError(err) + + suite.Require().Equal(suite.systemTx, res) + }) + + suite.Run("scheduled callbacks ENABLED - system txID", func() { + suite.blocks.On("ByID", blockID).Return(block, nil).Once() + + params.TxProvider = enabledProvider + + txBackend, err := NewTransactionsBackend(params) + suite.Require().NoError(err) + + res, err := txBackend.GetSystemTransaction(context.Background(), suite.systemTx.ID(), blockID) + suite.Require().NoError(err) + + suite.Require().Equal(suite.systemTx, res) + }) + + suite.Run("scheduled callbacks ENABLED - system collection TX", func() { + suite.blocks.On("ByID", blockID).Return(block, nil).Once() + + // get execution node identities + suite.params.On("FinalizedRoot").Return(block.ToHeader(), nil) + suite.state.On("Final").Return(suite.snapshot, nil).Twice() + suite.snapshot.On("Identities", mock.Anything).Return(unittest.IdentityListFixture(1), nil).Twice() + + suite.setupExecutionGetEventsRequest(blockID, block.Height, suite.pendingExecutionEvents) + + params.TxProvider = enabledProvider + + txBackend, err := NewTransactionsBackend(params) + suite.Require().NoError(err) + + systemTx := suite.systemCollection.Transactions[2] + res, err := txBackend.GetSystemTransaction(context.Background(), systemTx.ID(), blockID) + suite.Require().NoError(err) + + suite.Require().Equal(systemTx, res) + }) + + suite.Run("scheduled callbacks ENABLED - non-system txID fails", func() { + suite.blocks.On("ByID", blockID).Return(block, nil).Once() + + params.TxProvider = enabledProvider + + suite.params.On("FinalizedRoot").Return(block.ToHeader(), nil) + suite.state.On("Final").Return(suite.snapshot, nil).Twice() + suite.snapshot.On("Identities", mock.Anything).Return(unittest.IdentityListFixture(1), nil).Twice() + + suite.setupExecutionGetEventsRequest(blockID, block.Height, suite.pendingExecutionEvents) + + txBackend, err := NewTransactionsBackend(params) + suite.Require().NoError(err) + + res, err := txBackend.GetSystemTransaction(context.Background(), unittest.IdentifierFixture(), blockID) + suite.Require().Error(err) + suite.Require().Nil(res) + }) +} + +// TestGetSystemTransaction_HappyPath tests that GetSystemTransaction call returns system chunk transaction. +func (suite *Suite) TestGetSystemTransaction_Local_HappyPath() { + block := unittest.BlockFixture() + blockID := block.ID() + + params := suite.defaultTransactionsParams() + enabledProvider := provider.NewLocalTransactionProvider( + suite.state, + suite.collections, + suite.blocks, + params.EventsIndex, + params.TxResultsIndex, + params.TxErrorMessageProvider, + suite.systemTx.ID(), + params.TxStatusDeriver, + suite.chainID, + true, + ) + disabledProvider := provider.NewLocalTransactionProvider( + suite.state, + suite.collections, + suite.blocks, + params.EventsIndex, + params.TxResultsIndex, + params.TxErrorMessageProvider, + suite.systemTx.ID(), + params.TxStatusDeriver, + suite.chainID, + false, + ) + + suite.params.On("FinalizedRoot").Unset() + + suite.Run("scheduled callbacks DISABLED - ZeroID", func() { + suite.blocks.On("ByID", blockID).Return(block, nil).Once() + + params.TxProvider = disabledProvider + + txBackend, err := NewTransactionsBackend(params) + suite.Require().NoError(err) + + res, err := txBackend.GetSystemTransaction(context.Background(), flow.ZeroID, blockID) + suite.Require().NoError(err) + + suite.Require().Equal(suite.systemTx, res) + }) + + suite.Run("scheduled callbacks DISABLED - system txID", func() { + suite.blocks.On("ByID", blockID).Return(block, nil).Once() + + params.TxProvider = disabledProvider + + txBackend, err := NewTransactionsBackend(params) + suite.Require().NoError(err) + + res, err := txBackend.GetSystemTransaction(context.Background(), suite.systemTx.ID(), blockID) + suite.Require().NoError(err) + + suite.Require().Equal(suite.systemTx, res) + }) + + suite.Run("scheduled callbacks DISABLED - non-system txID fails", func() { + suite.blocks.On("ByID", blockID).Return(block, nil).Once() + + params.TxProvider = disabledProvider + + txBackend, err := NewTransactionsBackend(params) + suite.Require().NoError(err) + + res, err := txBackend.GetSystemTransaction(context.Background(), unittest.IdentifierFixture(), blockID) + suite.Require().Error(err) + suite.Require().Nil(res) + }) + + suite.Run("scheduled callbacks ENABLED - ZeroID", func() { + suite.blocks.On("ByID", blockID).Return(block, nil).Once() + + params.TxProvider = enabledProvider + + txBackend, err := NewTransactionsBackend(params) + suite.Require().NoError(err) + + res, err := txBackend.GetSystemTransaction(context.Background(), flow.ZeroID, blockID) + suite.Require().NoError(err) + + suite.Require().Equal(suite.systemTx, res) + }) + + suite.Run("scheduled callbacks ENABLED - system txID", func() { + suite.blocks.On("ByID", blockID).Return(block, nil).Once() + + params.TxProvider = enabledProvider + + txBackend, err := NewTransactionsBackend(params) + suite.Require().NoError(err) + + res, err := txBackend.GetSystemTransaction(context.Background(), suite.systemTx.ID(), blockID) + suite.Require().NoError(err) + + suite.Require().Equal(suite.systemTx, res) + }) + + suite.Run("scheduled callbacks ENABLED - system collection TX", func() { + suite.blocks.On("ByID", blockID).Return(block, nil).Once() + + suite.reporter.On("LowestIndexedHeight").Return(block.Height, nil).Once() + suite.reporter.On("HighestIndexedHeight").Return(block.Height+10, nil).Once() + suite.events.On("ByBlockID", blockID).Return(suite.pendingExecutionEvents, nil).Once() + + params.TxProvider = enabledProvider + + txBackend, err := NewTransactionsBackend(params) + suite.Require().NoError(err) + + systemTx := suite.systemCollection.Transactions[2] + res, err := txBackend.GetSystemTransaction(context.Background(), systemTx.ID(), blockID) + suite.Require().NoError(err) + + suite.Require().Equal(systemTx, res) + }) + + suite.Run("scheduled callbacks ENABLED - non-system txID fails", func() { + suite.blocks.On("ByID", blockID).Return(block, nil).Once() + + params.TxProvider = enabledProvider + + suite.reporter.On("LowestIndexedHeight").Return(block.Height, nil).Once() + suite.reporter.On("HighestIndexedHeight").Return(block.Height+10, nil).Once() + suite.events.On("ByBlockID", blockID).Return(suite.pendingExecutionEvents, nil).Once() + + txBackend, err := NewTransactionsBackend(params) + suite.Require().NoError(err) + + res, err := txBackend.GetSystemTransaction(context.Background(), unittest.IdentifierFixture(), blockID) + suite.Require().Error(err) + suite.Require().Nil(res) + }) +} + +func (suite *Suite) TestGetSystemTransactionResult_ExecutionNode_HappyPath() { + test := func(snapshot protocol.Snapshot) { + suite.state. + On("Sealed"). + Return(snapshot, nil). + Once() + + lastBlock, err := snapshot.Head() + suite.Require().NoError(err) + + identities, err := snapshot.Identities(filter.Any) + suite.Require().NoError(err) + + block := unittest.BlockWithParentFixture(lastBlock) + blockID := block.ID() + suite.state. + On("AtBlockID", blockID). + Return(unittest.StateSnapshotForKnownBlock(block.ToHeader(), identities.Lookup()), nil). + Once() + + // block storage returns the corresponding block + suite.blocks. + On("ByID", blockID). + Return(block, nil). + Once() + + receipt1 := unittest.ReceiptForBlockFixture(block) + suite.receipts. + On("ByBlockID", block.ID()). + Return(flow.ExecutionReceiptList{receipt1}, nil) + + // Generating events with event generator + exeNodeEventEncodingVersion := entities.EventEncodingVersion_CCF_V0 + events := unittest.EventGenerator.GetEventsWithEncoding(1, exeNodeEventEncodingVersion) + eventMessages := convert.EventsToMessages(events) + + systemTxID := suite.systemTx.ID() + expectedRequest := &execproto.GetTransactionResultRequest{ + BlockId: blockID[:], + TransactionId: systemTxID[:], + } + exeEventResp := &execproto.GetTransactionResultResponse{ + Events: eventMessages, + EventEncodingVersion: exeNodeEventEncodingVersion, + } + + suite.executionAPIClient. + On("GetTransactionResult", mock.Anything, expectedRequest). + Return(exeEventResp, nil). + Once() + + suite.connectionFactory. + On("GetExecutionAPIClient", mock.Anything). + Return(suite.executionAPIClient, &mocks.MockCloser{}, nil). + Once() + + // the connection factory should be used to get the execution node client + params := suite.defaultTransactionsParams() + backend, err := NewTransactionsBackend(params) + suite.Require().NoError(err) + + res, err := backend.GetSystemTransactionResult( + context.Background(), + flow.ZeroID, + block.ID(), + entities.EventEncodingVersion_JSON_CDC_V0, + ) + suite.Require().NoError(err) + + // Expected system chunk transaction + suite.Require().Equal(flow.TransactionStatusExecuted, res.Status) + suite.Require().Equal(suite.systemTx.ID(), res.TransactionID) + + // Check for successful decoding of event + _, err = jsoncdc.Decode(nil, res.Events[0].Payload) + suite.Require().NoError(err) + + events, err = convert.MessagesToEventsWithEncodingConversion( + eventMessages, + exeNodeEventEncodingVersion, + entities.EventEncodingVersion_JSON_CDC_V0, + ) + suite.Require().NoError(err) + suite.Require().Equal(events, res.Events) + } + + identities := unittest.CompleteIdentitySet() + rootSnapshot := unittest.RootSnapshotFixture(identities) + util.RunWithFullProtocolStateAndMutator( + suite.T(), + rootSnapshot, + func(db storage.DB, state *bprotocol.ParticipantState, mutableState protocol.MutableProtocolState) { + epochBuilder := unittest.NewEpochBuilder(suite.T(), mutableState, state) + + epochBuilder. + BuildEpoch(). + CompleteEpoch() + + // get heights of each phase in built epochs + epoch1, ok := epochBuilder.EpochHeights(1) + require.True(suite.T(), ok) + + snapshot := state.AtHeight(epoch1.FinalHeight()) + suite.state.On("Final").Return(snapshot) + test(snapshot) + }, + ) +} + +func (suite *Suite) TestGetSystemTransactionResult_Local_HappyPath() { + block := unittest.BlockFixture() + sysTx, err := blueprints.SystemChunkTransaction(suite.chainID.Chain()) + suite.Require().NoError(err) + suite.Require().NotNil(sysTx) + txId := suite.systemTx.ID() + blockId := block.ID() + + suite.blocks. + On("ByID", blockId). + Return(block, nil). + Once() + + lightTxShouldFail := false + suite.lightTxResults. + On("ByBlockIDTransactionID", blockId, txId). + Return(&flow.LightTransactionResult{ + TransactionID: txId, + Failed: lightTxShouldFail, + ComputationUsed: 0, + }, nil). + Once() + + // Set up the events storage mock + var eventsForTx []flow.Event + // expect a call to lookup events by block ID and transaction ID + suite.events.On("ByBlockIDTransactionID", blockId, txId).Return(eventsForTx, nil) + + // Set up the state and snapshot mocks + suite.state.On("Sealed").Return(suite.snapshot, nil) + suite.snapshot.On("Head").Return(block.ToHeader(), nil) + + // create a mock index reporter + reporter := syncmock.NewIndexReporter(suite.T()) + reporter.On("LowestIndexedHeight").Return(block.Height, nil) + reporter.On("HighestIndexedHeight").Return(block.Height+10, nil) + + indexReporter := index.NewReporter() + err = indexReporter.Initialize(reporter) + suite.Require().NoError(err) + + // Set up the backend parameters and the backend instance + params := suite.defaultTransactionsParams() + params.EventsIndex = index.NewEventsIndex(indexReporter, suite.events) + params.TxResultsIndex = index.NewTransactionResultsIndex(indexReporter, suite.lightTxResults) + params.TxProvider = provider.NewLocalTransactionProvider( + params.State, + params.Collections, + params.Blocks, + params.EventsIndex, + params.TxResultsIndex, + params.TxErrorMessageProvider, + params.SystemTxID, + params.TxStatusDeriver, + params.ChainID, + params.ScheduledCallbacksEnabled, + ) + + txBackend, err := NewTransactionsBackend(params) + suite.Require().NoError(err) + response, err := txBackend.GetSystemTransactionResult(context.Background(), flow.ZeroID, blockId, entities.EventEncodingVersion_JSON_CDC_V0) + suite.assertTransactionResultResponse(err, response, *block, txId, lightTxShouldFail, eventsForTx) +} + +// TestGetSystemTransactionResult_BlockNotFound tests GetSystemTransactionResult function when block was not found. +func (suite *Suite) TestGetSystemTransactionResult_BlockNotFound() { + block := unittest.BlockFixture() + suite.blocks. + On("ByID", block.ID()). + Return(nil, storage.ErrNotFound). + Once() + + params := suite.defaultTransactionsParams() + txBackend, err := NewTransactionsBackend(params) + suite.Require().NoError(err) + res, err := txBackend.GetSystemTransactionResult( + context.Background(), + flow.ZeroID, + block.ID(), + entities.EventEncodingVersion_JSON_CDC_V0, + ) + + suite.Require().Nil(res) + suite.Require().Error(err) + suite.Require().Equal(err, status.Errorf(codes.NotFound, "not found: %v", fmt.Errorf("key not found"))) +} + +// TestGetSystemTransactionResult_FailedEncodingConversion tests the GetSystemTransactionResult function with different +// event encoding versions. +func (suite *Suite) TestGetSystemTransactionResult_FailedEncodingConversion() { + block := unittest.BlockFixture() + blockID := block.ID() + + _, fixedENIDs := suite.setupReceipts(block) + suite.fixedExecutionNodeIDs = fixedENIDs.NodeIDs() + + suite.snapshot.On("Head").Return(block.ToHeader(), nil) + suite.snapshot.On("Identities", mock.Anything).Return(fixedENIDs, nil) + suite.state.On("Sealed").Return(suite.snapshot, nil) + suite.state.On("Final").Return(suite.snapshot, nil) + + // block storage returns the corresponding block + suite.blocks. + On("ByID", blockID). + Return(block, nil). + Once() + + // create empty events + eventsPerBlock := 10 + eventMessages := make([]*entities.Event, eventsPerBlock) + + systemTxID := suite.systemTx.ID() + expectedRequest := &execproto.GetTransactionResultRequest{ + BlockId: blockID[:], + TransactionId: systemTxID[:], + } + exeEventResp := &execproto.GetTransactionResultResponse{ + Events: eventMessages, + EventEncodingVersion: entities.EventEncodingVersion_JSON_CDC_V0, + } + + suite.executionAPIClient. + On("GetTransactionResult", mock.Anything, expectedRequest). + Return(exeEventResp, nil). + Once() + + suite.connectionFactory. + On("GetExecutionAPIClient", mock.Anything). + Return(suite.executionAPIClient, &mocks.MockCloser{}, nil). + Once() + + params := suite.defaultTransactionsParams() + txBackend, err := NewTransactionsBackend(params) + suite.Require().NoError(err) + + res, err := txBackend.GetSystemTransactionResult( + context.Background(), + flow.ZeroID, + block.ID(), + entities.EventEncodingVersion_CCF_V0, + ) + + suite.Require().Nil(res) + suite.Require().Error(err) + suite.Require().Equal(err, status.Errorf(codes.Internal, "failed to convert events to message: %v", + fmt.Errorf("conversion from format JSON_CDC_V0 to CCF_V0 is not supported"))) +} + +// TestGetTransactionResult_FromStorage tests the retrieval of a transaction result (flow.TransactionResult) from storage +// instead of requesting it from the Execution Node. +func (suite *Suite) TestGetTransactionResult_FromStorage() { + // Create fixtures for block, transaction, and collection + transaction := unittest.TransactionBodyFixture() + col := unittest.CollectionFromTransactions(&transaction) + guarantee := &flow.CollectionGuarantee{CollectionID: col.ID()} + block := unittest.BlockFixture( + unittest.Block.WithPayload(unittest.PayloadFixture(unittest.WithGuarantees(guarantee))), + ) + txId := transaction.ID() + blockId := block.ID() + + suite.blocks. + On("ByID", blockId). + Return(block, nil) + + suite.lightTxResults.On("ByBlockIDTransactionID", blockId, txId). + Return(&flow.LightTransactionResult{ + TransactionID: txId, + Failed: true, + ComputationUsed: 0, + }, nil) + + suite.transactions. + On("ByID", txId). + Return(&transaction, nil) + + // Set up the light collection and mock the behavior of the collections object + lightCol := col.Light() + suite.collections.On("LightByID", col.ID()).Return(lightCol, nil) + + // Set up the events storage mock + totalEvents := 5 + eventsForTx := unittest.EventsFixture(totalEvents) + eventMessages := make([]*entities.Event, totalEvents) + for j, event := range eventsForTx { + eventMessages[j] = convert.EventToMessage(event) + } + // expect a call to lookup events by block ID and transaction ID + suite.events.On("ByBlockIDTransactionID", blockId, txId).Return(eventsForTx, nil) + + // Set up the state and snapshot mocks + _, fixedENIDs := suite.setupReceipts(block) + suite.fixedExecutionNodeIDs = fixedENIDs.NodeIDs() + + suite.state.On("Final").Return(suite.snapshot, nil) + suite.state.On("Sealed").Return(suite.snapshot, nil) + suite.snapshot.On("Identities", mock.Anything).Return(fixedENIDs, nil) + suite.snapshot.On("Head").Return(block.ToHeader(), nil) + + suite.reporter.On("LowestIndexedHeight").Return(block.Height, nil) + suite.reporter.On("HighestIndexedHeight").Return(block.Height+10, nil) + + suite.connectionFactory. + On("GetExecutionAPIClient", mock.Anything). + Return(suite.executionAPIClient, &mocks.MockCloser{}, nil). + Once() + + // Set up the expected error message for the execution node response + exeEventReq := &execproto.GetTransactionErrorMessageRequest{ + BlockId: blockId[:], + TransactionId: txId[:], + } + exeEventResp := &execproto.GetTransactionErrorMessageResponse{ + TransactionId: txId[:], + ErrorMessage: expectedErrorMsg, + } + suite.executionAPIClient. + On("GetTransactionErrorMessage", mock.Anything, exeEventReq). + Return(exeEventResp, nil). + Once() + + params := suite.defaultTransactionsParams() + params.TxErrorMessageProvider = error_messages.NewTxErrorMessageProvider( + params.Log, + nil, + params.TxResultsIndex, + params.ConnFactory, + params.NodeCommunicator, + params.NodeProvider, + ) + params.TxProvider = provider.NewLocalTransactionProvider( + params.State, + params.Collections, + params.Blocks, + params.EventsIndex, + params.TxResultsIndex, + params.TxErrorMessageProvider, + params.SystemTxID, + params.TxStatusDeriver, + params.ChainID, + params.ScheduledCallbacksEnabled, + ) + + txBackend, err := NewTransactionsBackend(params) + suite.Require().NoError(err) + + response, err := txBackend.GetTransactionResult(context.Background(), txId, blockId, flow.ZeroID, entities.EventEncodingVersion_JSON_CDC_V0) + suite.assertTransactionResultResponse(err, response, *block, txId, true, eventsForTx) + + suite.reporter.AssertExpectations(suite.T()) + suite.connectionFactory.AssertExpectations(suite.T()) + suite.executionAPIClient.AssertExpectations(suite.T()) + suite.blocks.AssertExpectations(suite.T()) + suite.events.AssertExpectations(suite.T()) + suite.state.AssertExpectations(suite.T()) +} + +// TestTransactionByIndexFromStorage tests the retrieval of a transaction result (flow.TransactionResult) by index +// and returns it from storage instead of requesting from the Execution Node. +func (suite *Suite) TestTransactionByIndexFromStorage() { + // Create fixtures for block, transaction, and collection + transaction := unittest.TransactionBodyFixture() + col := unittest.CollectionFromTransactions(&transaction) + guarantee := &flow.CollectionGuarantee{CollectionID: col.ID()} + block := unittest.BlockFixture( + unittest.Block.WithPayload(unittest.PayloadFixture(unittest.WithGuarantees(guarantee))), + ) + blockId := block.ID() + txId := transaction.ID() + txIndex := rand.Uint32() + + // Set up the light collection and mock the behavior of the collections object + lightCol := col.Light() + suite.collections.On("LightByID", col.ID()).Return(lightCol, nil) + + // Mock the behavior of the blocks and lightTxResults objects + suite.blocks. + On("ByID", blockId). + Return(block, nil) + + suite.lightTxResults.On("ByBlockIDTransactionIndex", blockId, txIndex). + Return(&flow.LightTransactionResult{ + TransactionID: txId, + Failed: true, + ComputationUsed: 0, + }, nil) + + // Set up the events storage mock + totalEvents := 5 + eventsForTx := unittest.EventsFixture(totalEvents) + eventMessages := make([]*entities.Event, totalEvents) + for j, event := range eventsForTx { + eventMessages[j] = convert.EventToMessage(event) + } + + // expect a call to lookup events by block ID and transaction ID + suite.events.On("ByBlockIDTransactionIndex", blockId, txIndex).Return(eventsForTx, nil) + + // Set up the state and snapshot mocks + _, fixedENIDs := suite.setupReceipts(block) + suite.fixedExecutionNodeIDs = fixedENIDs.NodeIDs() + suite.state.On("Final").Return(suite.snapshot, nil) + suite.state.On("Sealed").Return(suite.snapshot, nil) + suite.snapshot.On("Identities", mock.Anything).Return(fixedENIDs, nil) + suite.snapshot.On("Head").Return(block.ToHeader(), nil) + + suite.reporter.On("LowestIndexedHeight").Return(block.Height, nil) + suite.reporter.On("HighestIndexedHeight").Return(block.Height+10, nil) + + suite.connectionFactory. + On("GetExecutionAPIClient", mock.Anything). + Return(suite.executionAPIClient, &mocks.MockCloser{}, nil). + Once() + + params := suite.defaultTransactionsParams() + params.TxErrorMessageProvider = error_messages.NewTxErrorMessageProvider( + params.Log, + nil, + params.TxResultsIndex, + params.ConnFactory, + params.NodeCommunicator, + params.NodeProvider, + ) + params.TxProvider = provider.NewLocalTransactionProvider( + params.State, + params.Collections, + params.Blocks, + params.EventsIndex, + params.TxResultsIndex, + params.TxErrorMessageProvider, + params.SystemTxID, + params.TxStatusDeriver, + params.ChainID, + params.ScheduledCallbacksEnabled, + ) + + txBackend, err := NewTransactionsBackend(params) + suite.Require().NoError(err) + + // Set up the expected error message for the execution node response + exeEventReq := &execproto.GetTransactionErrorMessageByIndexRequest{ + BlockId: blockId[:], + Index: txIndex, + } + + exeEventResp := &execproto.GetTransactionErrorMessageResponse{ + TransactionId: txId[:], + ErrorMessage: expectedErrorMsg, + } + + suite.executionAPIClient. + On("GetTransactionErrorMessageByIndex", mock.Anything, exeEventReq). + Return(exeEventResp, nil). + Once() + + response, err := txBackend.GetTransactionResultByIndex(context.Background(), blockId, txIndex, entities.EventEncodingVersion_JSON_CDC_V0) + suite.assertTransactionResultResponse(err, response, *block, txId, true, eventsForTx) +} + +// TestTransactionResultsByBlockIDFromStorage tests the retrieval of transaction results ([]flow.TransactionResult) +// by block ID from storage instead of requesting from the Execution Node. +func (suite *Suite) TestTransactionResultsByBlockIDFromStorage() { + // Create fixtures for the block and collection + col := unittest.CollectionFixture(2) + guarantee := &flow.CollectionGuarantee{CollectionID: col.ID()} + block := unittest.BlockFixture( + unittest.Block.WithPayload(unittest.PayloadFixture(unittest.WithGuarantees(guarantee))), + ) + blockId := block.ID() + + // Mock the behavior of the blocks, collections and light transaction results objects + suite.blocks. + On("ByID", blockId). + Return(block, nil) + lightCol := col.Light() + suite.collections. + On("LightByID", mock.Anything). + Return(lightCol, nil). + Once() + + lightTxResults := make([]flow.LightTransactionResult, len(lightCol.Transactions)) + for i, txID := range lightCol.Transactions { + lightTxResults[i] = flow.LightTransactionResult{ + TransactionID: txID, + Failed: false, + ComputationUsed: 0, + } + } + // simulate the system tx + lightTxResults = append(lightTxResults, flow.LightTransactionResult{ + TransactionID: suite.systemTx.ID(), + Failed: false, + ComputationUsed: 10, + }) + + // Mark the first transaction as failed + lightTxResults[0].Failed = true + suite.lightTxResults. + On("ByBlockID", blockId). + Return(lightTxResults, nil). + Once() + + // Set up the events storage mock + totalEvents := 5 + eventsForTx := unittest.EventsFixture(totalEvents) + eventMessages := make([]*entities.Event, totalEvents) + for j, event := range eventsForTx { + eventMessages[j] = convert.EventToMessage(event) + } + + // expect a call to lookup events by block ID and transaction ID + suite.events. + On("ByBlockIDTransactionID", blockId, mock.Anything). + Return(eventsForTx, nil) + + // Set up the state and snapshot mocks + _, fixedENIDs := suite.setupReceipts(block) + suite.fixedExecutionNodeIDs = fixedENIDs.NodeIDs() + suite.state.On("Final").Return(suite.snapshot, nil) + suite.state.On("Sealed").Return(suite.snapshot, nil) + suite.snapshot.On("Identities", mock.Anything).Return(fixedENIDs, nil) + suite.snapshot.On("Head").Return(block.ToHeader(), nil) + + suite.reporter.On("LowestIndexedHeight").Return(block.Height, nil) + suite.reporter.On("HighestIndexedHeight").Return(block.Height+10, nil) + + suite.connectionFactory. + On("GetExecutionAPIClient", mock.Anything). + Return(suite.executionAPIClient, &mocks.MockCloser{}, nil). + Once() + + params := suite.defaultTransactionsParams() + params.TxErrorMessageProvider = error_messages.NewTxErrorMessageProvider( + params.Log, + nil, + params.TxResultsIndex, + params.ConnFactory, + params.NodeCommunicator, + params.NodeProvider, + ) + params.TxProvider = provider.NewLocalTransactionProvider( + params.State, + params.Collections, + params.Blocks, + params.EventsIndex, + params.TxResultsIndex, + params.TxErrorMessageProvider, + params.SystemTxID, + params.TxStatusDeriver, + params.ChainID, + params.ScheduledCallbacksEnabled, + ) + txBackend, err := NewTransactionsBackend(params) + suite.Require().NoError(err) + + // Set up the expected error message for the execution node response + exeEventReq := &execproto.GetTransactionErrorMessagesByBlockIDRequest{ + BlockId: blockId[:], + } + res := &execproto.GetTransactionErrorMessagesResponse_Result{ + TransactionId: lightTxResults[0].TransactionID[:], + ErrorMessage: expectedErrorMsg, + Index: 1, + } + exeEventResp := &execproto.GetTransactionErrorMessagesResponse{ + Results: []*execproto.GetTransactionErrorMessagesResponse_Result{ + res, + }, + } + suite.executionAPIClient. + On("GetTransactionErrorMessagesByBlockID", mock.Anything, exeEventReq). + Return(exeEventResp, nil). + Once() + + response, err := txBackend.GetTransactionResultsByBlockID(context.Background(), blockId, entities.EventEncodingVersion_JSON_CDC_V0) + suite.Require().NoError(err) + suite.Assert().Equal(len(lightTxResults), len(response)) + + // Assertions for each transaction result in the response + for i, responseResult := range response { + lightTx := lightTxResults[i] + suite.assertTransactionResultResponse(err, responseResult, *block, lightTx.TransactionID, lightTx.Failed, eventsForTx) + } +} + +func (suite *Suite) TestGetTransactionsByBlockID() { + // Create fixtures + col := unittest.CollectionFixture(3) + guarantee := &flow.CollectionGuarantee{CollectionID: col.ID()} + block := unittest.BlockFixture( + unittest.Block.WithPayload(unittest.PayloadFixture(unittest.WithGuarantees(guarantee))), + ) + blockID := block.ID() + + // Create PendingExecution events for scheduled callbacks + pendingExecutionEvents := suite.createPendingExecutionEvents(2) // 2 callbacks + + // Reconstruct expected system collection to get the actual transaction IDs + expectedSystemCollection, err := blueprints.SystemCollection(suite.chainID.Chain(), pendingExecutionEvents) + suite.Require().NoError(err) + + // Expected transaction counts + expectedUserTxCount := len(col.Transactions) + expectedSystemTxCount := len(expectedSystemCollection.Transactions) + expectedTotalCount := expectedUserTxCount + expectedSystemTxCount + + // Test with Local Provider + suite.Run("LocalProvider", func() { + // Mock the blocks storage + suite.blocks. + On("ByID", blockID). + Return(block, nil). + Once() + + // Mock the collections storage + suite.collections. + On("ByID", col.ID()). + Return(&col, nil). + Once() + + // Mock the events storage to return PendingExecution events + suite.events. + On("ByBlockID", blockID). + Return(pendingExecutionEvents, nil). + Once() + + suite.reporter.On("LowestIndexedHeight").Return(block.Height, nil) + suite.reporter.On("HighestIndexedHeight").Return(block.Height+10, nil) + + // Set up the backend parameters with local transaction provider + params := suite.defaultTransactionsParams() + + params.TxProvider = provider.NewLocalTransactionProvider( + params.State, + params.Collections, + params.Blocks, + params.EventsIndex, + params.TxResultsIndex, + params.TxErrorMessageProvider, + params.SystemTxID, + params.TxStatusDeriver, + params.ChainID, + params.ScheduledCallbacksEnabled, + ) + + txBackend, err := NewTransactionsBackend(params) + suite.Require().NoError(err) + + // Call GetTransactionsByBlockID + transactions, err := txBackend.GetTransactionsByBlockID(context.Background(), blockID) + suite.Require().NoError(err) + + // Verify transaction count + suite.Require().Equal(expectedTotalCount, len(transactions), "expected %d transactions but got %d", expectedTotalCount, len(transactions)) + + // Verify user transactions + for i, tx := range col.Transactions { + suite.Assert().Equal(tx.ID(), transactions[i].ID(), "user transaction %d mismatch", i) + } + + // Verify system transactions + for i, expectedTx := range expectedSystemCollection.Transactions { + actualTx := transactions[expectedUserTxCount+i] + suite.Assert().Equal(expectedTx.ID(), actualTx.ID(), "system transaction %d mismatch", i) + } + }) + + // Test with Execution Node Provider + suite.Run("ExecutionNodeProvider", func() { + // Set up the state and snapshot mocks first + _, fixedENIDs := suite.setupReceipts(block) + suite.fixedExecutionNodeIDs = fixedENIDs.NodeIDs() + suite.state.On("Final").Return(suite.snapshot, nil).Maybe() + suite.state.On("Sealed").Return(suite.snapshot, nil).Maybe() + suite.snapshot.On("Identities", mock.Anything).Return(fixedENIDs, nil).Maybe() + suite.snapshot.On("Head").Return(block.ToHeader(), nil).Maybe() + + // Mock the blocks storage + suite.blocks. + On("ByID", blockID). + Return(block, nil). + Once() + + // Mock the collections storage + suite.collections. + On("ByID", col.ID()). + Return(&col, nil). + Once() + + suite.setupExecutionGetEventsRequest(blockID, block.Height, pendingExecutionEvents) + + // Set up the backend parameters with EN transaction provider + params := suite.defaultTransactionsParams() + params.TxProvider = provider.NewENTransactionProvider( + params.Log, + params.State, + params.Collections, + params.ConnFactory, + params.NodeCommunicator, + params.NodeProvider, + params.TxStatusDeriver, + params.SystemTxID, + params.ChainID, + params.ScheduledCallbacksEnabled, + ) + + txBackend, err := NewTransactionsBackend(params) + suite.Require().NoError(err) + + // Call GetTransactionsByBlockID + transactions, err := txBackend.GetTransactionsByBlockID(context.Background(), blockID) + suite.Require().NoError(err) + + // For empty events, we expect: user transactions + process tx + system chunk tx (no execute callback txs) + expectedSystemCollectionEmpty, err := blueprints.SystemCollection(suite.chainID.Chain(), pendingExecutionEvents) + suite.Require().NoError(err) + expectedTotalCountEmpty := len(col.Transactions) + len(expectedSystemCollectionEmpty.Transactions) + + // Verify transaction count + suite.Assert().Equal(expectedTotalCountEmpty, len(transactions)) + + // Verify user transactions + for i, tx := range col.Transactions { + suite.Assert().Equal(tx.ID(), transactions[i].ID()) + } + + // Verify system transactions (process + system chunk only, no execute callbacks) + for i, expectedTx := range expectedSystemCollectionEmpty.Transactions { + actualTx := transactions[len(col.Transactions)+i] + suite.Assert().Equal(expectedTx.ID(), actualTx.ID()) + } + }) +} + +func (suite *Suite) TestTransactionResultsByBlockIDFromExecutionNode() { + // Create fixtures for the block and collection + col := unittest.CollectionFixture(2) + guarantee := &flow.CollectionGuarantee{CollectionID: col.ID()} + block := unittest.BlockFixture( + unittest.Block.WithPayload(unittest.PayloadFixture(unittest.WithGuarantees(guarantee))), + ) + blockId := block.ID() + + // Mock the behavior of the blocks, collections and light transaction results objects + suite.blocks. + On("ByID", blockId). + Return(block, nil) + lightCol := col.Light() + suite.collections. + On("LightByID", mock.Anything). + Return(lightCol, nil). + Once() + + // Execute callback transactions will be reconstructed from PendingExecution events + // We don't create them manually - they'll be generated by blueprints.SystemCollection + // System collection will be reconstructed from events, so we don't need to pre-populate lightTxResults + + lightTxResults := make([]flow.LightTransactionResult, len(lightCol.Transactions)) + for i, txID := range lightCol.Transactions { + lightTxResults[i] = flow.LightTransactionResult{ + TransactionID: txID, + Failed: false, + ComputationUsed: 0, + } + } + + // Create PendingExecution events that would be emitted by the process callback transaction + pendingExecutionEvents := suite.createPendingExecutionEvents(2) // 2 callbacks + + // Convert PendingExecution events to protobuf messages for execution node response + pendingEventMessages := make([]*entities.Event, len(pendingExecutionEvents)) + for i, event := range pendingExecutionEvents { + pendingEventMessages[i] = convert.EventToMessage(event) + } + + // Reconstruct the expected system collection to get the actual transaction IDs + expectedSystemCollection, err := blueprints.SystemCollection(suite.chainID.Chain(), pendingExecutionEvents) + suite.Require().NoError(err) + + // Extract the expected transaction IDs in order: process, execute callbacks, system chunk + expectedSystemTxIDs := make([]flow.Identifier, len(expectedSystemCollection.Transactions)) + for i, tx := range expectedSystemCollection.Transactions { + expectedSystemTxIDs[i] = tx.ID() + } + + suite.Require().Equal(4, len(expectedSystemTxIDs), "should have 4 system transactions: process + 2 execute callbacks + system chunk") + + // Build the execution response with all transaction results including proper events + userTxResults := make([]*execproto.GetTransactionResultResponse, len(lightCol.Transactions)) + for i := 0; i < len(lightCol.Transactions); i++ { + userTxResults[i] = &execproto.GetTransactionResultResponse{ + Events: []*entities.Event{}, + } + } + + // System transaction results: process (with events), execute callback txs, system chunk + systemTxResults := []*execproto.GetTransactionResultResponse{ + // Process callback transaction with PendingExecution events + {Events: pendingEventMessages}, + // Execute callback transaction 1 + {Events: []*entities.Event{}}, + // Execute callback transaction 2 + {Events: []*entities.Event{}}, + // System chunk transaction + {Events: []*entities.Event{}}, + } + + allTxResults := append(userTxResults, systemTxResults...) + + // Set up execution node response with system transactions + // The execution node response should include: user txs + process tx (with PendingExecution events) + execute txs + system chunk tx + exeGetTxReq := &execproto.GetTransactionsByBlockIDRequest{ + BlockId: blockId[:], + } + exeGetTxResp := &execproto.GetTransactionResultsResponse{ + TransactionResults: allTxResults, + EventEncodingVersion: entities.EventEncodingVersion_CCF_V0, + } + suite.executionAPIClient. + On("GetTransactionResultsByBlockID", mock.Anything, exeGetTxReq). + Return(exeGetTxResp, nil). + Once() + + // Set up the state and snapshot mocks + _, fixedENIDs := suite.setupReceipts(block) + suite.fixedExecutionNodeIDs = fixedENIDs.NodeIDs() + suite.state.On("Final").Return(suite.snapshot, nil) + suite.state.On("Sealed").Return(suite.snapshot, nil) + suite.snapshot.On("Identities", mock.Anything).Return(fixedENIDs, nil) + suite.snapshot.On("Head").Return(block.ToHeader(), nil) + + suite.connectionFactory. + On("GetExecutionAPIClient", mock.Anything). + Return(suite.executionAPIClient, &mocks.MockCloser{}, nil). + Once() + + params := suite.defaultTransactionsParams() + params.TxErrorMessageProvider = error_messages.NewTxErrorMessageProvider( + params.Log, + nil, + params.TxResultsIndex, + params.ConnFactory, + params.NodeCommunicator, + params.NodeProvider, + ) + + params.TxProvider = provider.NewENTransactionProvider( + params.Log, + params.State, + params.Collections, + params.ConnFactory, + params.NodeCommunicator, + params.NodeProvider, + params.TxStatusDeriver, + params.SystemTxID, + params.ChainID, + params.ScheduledCallbacksEnabled, + ) + txBackend, err := NewTransactionsBackend(params) + suite.Require().NoError(err) + + response, err := txBackend.GetTransactionResultsByBlockID(context.Background(), blockId, entities.EventEncodingVersion_CCF_V0) + suite.Require().NoError(err) + + // Expected total: user transactions + system transactions (process + 2 execute callbacks + system chunk) + expectedTotal := len(lightTxResults) + len(expectedSystemTxIDs) + suite.Assert().Equal(expectedTotal, len(response), "should have user txs + system txs") + + // Verify user transactions + userTxCount := len(lightCol.Transactions) + for i := 0; i < userTxCount; i++ { + suite.Assert().Equal(lightTxResults[i].TransactionID, response[i].TransactionID) + suite.Assert().Equal(block.Payload.Guarantees[0].CollectionID, response[i].CollectionID) + suite.Assert().Equal(block.ID(), response[i].BlockID) + suite.Assert().Equal(block.Height, response[i].BlockHeight) + suite.Assert().Equal(flow.TransactionStatusSealed, response[i].Status) + } + + // Verify system collection transactions (all should have ZeroID as collectionID) + systemTxCount := len(response) - userTxCount + suite.Assert().Equal(len(expectedSystemTxIDs), systemTxCount, "should have 4 system transactions: process + 2 execute callbacks + system chunk") + + for i := 0; i < systemTxCount; i++ { + systemTxIndex := userTxCount + i + suite.Assert().Equal(flow.ZeroID, response[systemTxIndex].CollectionID) + suite.Assert().Equal(block.ID(), response[systemTxIndex].BlockID) + suite.Assert().Equal(block.Height, response[systemTxIndex].BlockHeight) + suite.Assert().Equal(flow.TransactionStatusSealed, response[systemTxIndex].Status) + suite.Assert().Equal(expectedSystemTxIDs[i], response[userTxCount+i].TransactionID, "system transaction %d should match reconstructed ID", i) + } +} + +// TestTransactionRetry tests that the retry mechanism will send retries at specific times +func (suite *Suite) TestTransactionRetry() { + block := unittest.BlockFixture( + // Height needs to be at least DefaultTransactionExpiry before we start doing retries + unittest.Block.WithHeight(flow.DefaultTransactionExpiry + 1), + ) + transactionBody := unittest.TransactionBodyFixture(unittest.WithReferenceBlock(block.ID())) + headBlock := unittest.BlockFixture() + headBlock.Height = block.Height - 1 // head is behind the current block + suite.state.On("Final").Return(suite.snapshot, nil) + + suite.snapshot.On("Head").Return(headBlock.ToHeader(), nil) + snapshotAtBlock := protocolmock.NewSnapshot(suite.T()) + snapshotAtBlock.On("Head").Return(block.ToHeader(), nil) + suite.state.On("AtBlockID", block.ID()).Return(snapshotAtBlock, nil) + + // collection storage returns a not found error + suite.collections. + On("LightByTransactionID", transactionBody.ID()). + Return(nil, storage.ErrNotFound) + + client := accessmock.NewAccessAPIClient(suite.T()) + params := suite.defaultTransactionsParams() + params.StaticCollectionRPCClient = client + txBackend, err := NewTransactionsBackend(params) + suite.Require().NoError(err) + + retry := retrier.NewRetrier( + suite.log, + suite.blocks, + suite.collections, + txBackend, + txBackend.txStatusDeriver, + ) + retry.RegisterTransaction(block.Height, &transactionBody) + + client.On("SendTransaction", mock.Anything, mock.Anything).Return(&access.SendTransactionResponse{}, nil) + + // Don't retry on every height + err = retry.Retry(block.Height + 1) + suite.Require().NoError(err) + + client.AssertNotCalled(suite.T(), "SendTransaction", mock.Anything, mock.Anything) + + // Retry every `retryFrequency` + err = retry.Retry(block.Height + retrier.RetryFrequency) + suite.Require().NoError(err) + + client.AssertNumberOfCalls(suite.T(), "SendTransaction", 1) + + // do not retry if expired + err = retry.Retry(block.Height + retrier.RetryFrequency + flow.DefaultTransactionExpiry) + suite.Require().NoError(err) + + // Should've still only been called once + client.AssertNumberOfCalls(suite.T(), "SendTransaction", 1) +} + +// TestSuccessfulTransactionsDontRetry tests that the retry mechanism will send retries at specific times +func (suite *Suite) TestSuccessfulTransactionsDontRetry() { + collection := unittest.CollectionFixture(1) + light := collection.Light() + transactionBody := collection.Transactions[0] + txID := transactionBody.ID() + + block := unittest.BlockFixture() + blockID := block.ID() + + // setup chain state + _, fixedENIDs := suite.setupReceipts(block) + suite.fixedExecutionNodeIDs = fixedENIDs.NodeIDs() + + suite.state.On("Final").Return(suite.snapshot, nil) + suite.transactions.On("ByID", transactionBody.ID()).Return(transactionBody, nil) + suite.collections.On("LightByTransactionID", transactionBody.ID()).Return(light, nil) + suite.blocks.On("ByCollectionID", collection.ID()).Return(block, nil) + suite.snapshot.On("Identities", mock.Anything).Return(fixedENIDs, nil) + + exeEventReq := execproto.GetTransactionResultRequest{ + BlockId: blockID[:], + TransactionId: txID[:], + } + exeEventResp := execproto.GetTransactionResultResponse{ + Events: nil, + } + suite.executionAPIClient. + On("GetTransactionResult", context.Background(), &exeEventReq). + Return(&exeEventResp, status.Errorf(codes.NotFound, "not found")). + Times(len(fixedENIDs)) // should call each EN once + + suite.connectionFactory. + On("GetExecutionAPIClient", mock.Anything). + Return(suite.executionAPIClient, &mocks.MockCloser{}, nil). + Times(len(fixedENIDs)) + + params := suite.defaultTransactionsParams() + client := accessmock.NewAccessAPIClient(suite.T()) + params.StaticCollectionRPCClient = client + txBackend, err := NewTransactionsBackend(params) + suite.Require().NoError(err) + + retry := retrier.NewRetrier( + suite.log, + suite.blocks, + suite.collections, + txBackend, + txBackend.txStatusDeriver, + ) + retry.RegisterTransaction(block.Height, transactionBody) + + // first call - when block under test is greater height than the sealed head, but execution node does not know about Tx + result, err := txBackend.GetTransactionResult( + context.Background(), + txID, + flow.ZeroID, + flow.ZeroID, + entities.EventEncodingVersion_JSON_CDC_V0, + ) + suite.Require().NoError(err) + suite.Require().NotNil(result) + + // status should be finalized since the sealed Blocks is smaller in height + suite.Assert().Equal(flow.TransactionStatusFinalized, result.Status) + + // Don't retry when block is finalized + err = retry.Retry(block.Height + 1) + suite.Require().NoError(err) + + client.AssertNotCalled(suite.T(), "SendTransaction", mock.Anything, mock.Anything) + + // Don't retry when block is finalized + err = retry.Retry(block.Height + retrier.RetryFrequency) + suite.Require().NoError(err) + + client.AssertNotCalled(suite.T(), "SendTransaction", mock.Anything, mock.Anything) + + // Don't retry when block is finalized + err = retry.Retry(block.Height + retrier.RetryFrequency + flow.DefaultTransactionExpiry) + suite.Require().NoError(err) + + // Should've still should not be called + client.AssertNotCalled(suite.T(), "SendTransaction", mock.Anything, mock.Anything) +} + +func (suite *Suite) setupReceipts(block *flow.Block) ([]*flow.ExecutionReceipt, flow.IdentityList) { + ids := unittest.IdentityListFixture(2, unittest.WithRole(flow.RoleExecution)) + receipt1 := unittest.ReceiptForBlockFixture(block) + receipt1.ExecutorID = ids[0].NodeID + receipt2 := unittest.ReceiptForBlockFixture(block) + receipt2.ExecutorID = ids[1].NodeID + receipt1.ExecutionResult = receipt2.ExecutionResult + + receipts := flow.ExecutionReceiptList{receipt1, receipt2} + suite.receipts. + On("ByBlockID", block.ID()). + Return(receipts, nil) + + return receipts, ids +} + +func (suite *Suite) assertTransactionResultResponse( + err error, + response *accessmodel.TransactionResult, + block flow.Block, + txId flow.Identifier, + txFailed bool, + eventsForTx []flow.Event, +) { + suite.Require().NoError(err) + suite.Assert().Equal(block.ID(), response.BlockID) + suite.Assert().Equal(block.Height, response.BlockHeight) + suite.Assert().Equal(txId, response.TransactionID) + if txId == suite.systemTx.ID() { + suite.Assert().Equal(flow.ZeroID, response.CollectionID) + } else { + suite.Assert().Equal(block.Payload.Guarantees[0].CollectionID, response.CollectionID) + } + suite.Assert().Equal(len(eventsForTx), len(response.Events)) + // When there are error messages occurred in the transaction, the status should be 1 + if txFailed { + suite.Assert().Equal(uint(1), response.StatusCode) + suite.Assert().Equal(expectedErrorMsg, response.ErrorMessage) + } else { + suite.Assert().Equal(uint(0), response.StatusCode) + suite.Assert().Equal("", response.ErrorMessage) + } + suite.Assert().Equal(flow.TransactionStatusSealed, response.Status) +} + +// createPendingExecutionEvents creates properly formatted PendingExecution events +// that blueprints.SystemCollection expects for reconstructing the system collection. +func (suite *Suite) createPendingExecutionEvents(numCallbacks int) []flow.Event { + events := make([]flow.Event, numCallbacks) + + // Get system contracts for the test chain + env := systemcontracts.SystemContractsForChain(suite.chainID).AsTemplateEnv() + + for i := 0; i < numCallbacks; i++ { + // Create the PendingExecution event as it would be emitted by the process callback transaction + const processedEventTypeTemplate = "A.%v.FlowTransactionScheduler.PendingExecution" + eventTypeString := fmt.Sprintf(processedEventTypeTemplate, env.FlowTransactionSchedulerAddress) + + // Create Cadence event type + loc, err := cadenceCommon.HexToAddress(env.FlowTransactionSchedulerAddress) + suite.Require().NoError(err) + location := cadenceCommon.NewAddressLocation(nil, loc, "PendingExecution") + + eventType := cadence.NewEventType( + location, + "PendingExecution", + []cadence.Field{ + {Identifier: "id", Type: cadence.UInt64Type}, + {Identifier: "priority", Type: cadence.UInt8Type}, + {Identifier: "executionEffort", Type: cadence.UInt64Type}, + {Identifier: "fees", Type: cadence.UFix64Type}, + {Identifier: "callbackOwner", Type: cadence.AddressType}, + }, + nil, + ) + + fees, err := cadence.NewUFix64("0.0") + suite.Require().NoError(err) + + // Create the Cadence event with proper values + event := cadence.NewEvent( + []cadence.Value{ + cadence.NewUInt64(uint64(i + 1)), // id: unique callback ID + cadence.NewUInt8(1), // priority + cadence.NewUInt64(uint64((i+1)*100 + 100)), // executionEffort (200, 300, etc.) + fees, // fees: 0.0 + cadence.NewAddress([8]byte{}), // callbackOwner + }, + ).WithType(eventType) + + // Encode the event using CCF + payload, err := ccf.Encode(event) + suite.Require().NoError(err) + + // Create the Flow event + events[i] = flow.Event{ + Type: flow.EventType(eventTypeString), + TransactionID: unittest.IdentifierFixture(), // Process callback transaction ID + TransactionIndex: 0, + EventIndex: uint32(i), + Payload: payload, + } + } + + return events +} + +func (suite *Suite) setupExecutionGetEventsRequest(blockID flow.Identifier, blockHeight uint64, events []flow.Event) { + eventMessages := make([]*entities.Event, len(events)) + for i, event := range events { + eventMessages[i] = convert.EventToMessage(event) + } + + request := &execproto.GetEventsForBlockIDsRequest{ + Type: string(suite.processScheduledCallbackEventType), + BlockIds: [][]byte{blockID[:]}, + } + expectedResponse := &execproto.GetEventsForBlockIDsResponse{ + Results: []*execproto.GetEventsForBlockIDsResponse_Result{ + { + BlockId: blockID[:], + BlockHeight: blockHeight, + Events: eventMessages, + }, + }, + EventEncodingVersion: entities.EventEncodingVersion_CCF_V0, + } + + suite.executionAPIClient. + On("GetEventsForBlockIDs", mock.Anything, request). + Return(expectedResponse, nil). + Once() + + suite.connectionFactory. + On("GetExecutionAPIClient", mock.Anything). + Return(suite.executionAPIClient, &mocks.MockCloser{}, nil). + Once() +} diff --git a/engine/access/rpc/connection/cache.go b/engine/access/rpc/connection/cache.go new file mode 100644 index 00000000000..3453134c611 --- /dev/null +++ b/engine/access/rpc/connection/cache.go @@ -0,0 +1,188 @@ +package connection + +import ( + "fmt" + "sync" + + lru "github.com/hashicorp/golang-lru/v2" + "github.com/onflow/crypto" + "github.com/rs/zerolog" + "go.uber.org/atomic" + "google.golang.org/grpc" + "google.golang.org/grpc/connectivity" + + "github.com/onflow/flow-go/module" +) + +// CachedClient represents a gRPC client connection that is cached for reuse. +type CachedClient struct { + conn *grpc.ClientConn + address string + cfg Config + + cache *Cache + closeRequested *atomic.Bool + wg sync.WaitGroup + mu sync.RWMutex +} + +// ClientConn returns the underlying gRPC client connection. +func (cc *CachedClient) ClientConn() *grpc.ClientConn { + cc.mu.RLock() + defer cc.mu.RUnlock() + return cc.conn +} + +// Address returns the address of the remote server. +func (cc *CachedClient) Address() string { + return cc.address +} + +// CloseRequested returns true if the CachedClient has been marked for closure. +func (cc *CachedClient) CloseRequested() bool { + return cc.closeRequested.Load() +} + +// AddRequest increments the in-flight request counter for the CachedClient. +// It returns a function that should be called when the request completes to decrement the counter +func (cc *CachedClient) AddRequest() func() { + cc.wg.Add(1) + return cc.wg.Done +} + +// Invalidate removes the CachedClient from the cache and closes the connection. +func (cc *CachedClient) Invalidate() { + cc.cache.invalidate(cc.address) + + // Close the connection asynchronously to avoid blocking requests + go cc.Close() +} + +// Close closes the CachedClient connection. It marks the connection for closure and waits asynchronously for ongoing +// requests to complete before closing the connection. +func (cc *CachedClient) Close() { + // Mark the connection for closure + if !cc.closeRequested.CompareAndSwap(false, true) { + return + } + + // Obtain the lock to ensure that any connection attempts have completed + cc.mu.RLock() + conn := cc.conn + cc.mu.RUnlock() + + // If the initial connection attempt failed, conn will be nil + if conn == nil { + return + } + + // If there are ongoing requests, wait for them to complete asynchronously + // this avoids tearing down the connection while requests are in-flight resulting in errors + cc.wg.Wait() + + // Close the connection + conn.Close() +} + +// Cache represents a cache of CachedClient instances with a given maximum size. +type Cache struct { + cache *lru.Cache[string, *CachedClient] + maxSize int + + logger zerolog.Logger + metrics module.GRPCConnectionPoolMetrics +} + +// NewCache creates a new Cache with the specified maximum size and the underlying LRU cache. +func NewCache( + log zerolog.Logger, + metrics module.GRPCConnectionPoolMetrics, + maxSize int, +) (*Cache, error) { + cache, err := lru.NewWithEvict(maxSize, func(_ string, client *CachedClient) { + go client.Close() // close is blocking, so run in a goroutine + + log.Debug().Str("grpc_conn_evicted", client.address).Msg("closing grpc connection evicted from pool") + metrics.ConnectionFromPoolEvicted() + }) + + if err != nil { + return nil, fmt.Errorf("could not initialize connection pool cache: %w", err) + } + + return &Cache{ + cache: cache, + maxSize: maxSize, + logger: log, + metrics: metrics, + }, nil +} + +// GetConnected returns a CachedClient for the given address that has an active connection. +// If the address is not in the cache, it creates a new entry and connects. +func (c *Cache) GetConnected( + address string, + cfg Config, + networkPubKey crypto.PublicKey, + connectFn func(string, Config, crypto.PublicKey, *CachedClient) (*grpc.ClientConn, error), +) (*CachedClient, error) { + client := &CachedClient{ + address: address, + cfg: cfg, + closeRequested: atomic.NewBool(false), + cache: c, + } + + // Note: PeekOrAdd does not "visit" the existing entry, so we need to call Get explicitly + // to mark the entry as "visited" and update the LRU order. Unfortunately, the lru library + // doesn't have a GetOrAdd method, so this is the simplest way to achieve atomic get-or-add + val, existed, _ := c.cache.PeekOrAdd(address, client) + if existed { + client = val + _, _ = c.cache.Get(address) + c.metrics.ConnectionFromPoolReused() + } else { + c.metrics.ConnectionAddedToPool() + } + + client.mu.Lock() + defer client.mu.Unlock() + + // after getting the lock, check if the connection is still active + if client.conn != nil && client.conn.GetState() != connectivity.Shutdown { + return client, nil + } + + // if the connection is not setup yet or closed, create a new connection and cache it + conn, err := connectFn(client.address, client.cfg, networkPubKey, client) + if err != nil { + return nil, err + } + + c.metrics.NewConnectionEstablished() + c.metrics.TotalConnectionsInPool(uint(c.Len()), uint(c.MaxSize())) + + client.conn = conn + return client, nil +} + +// invalidate removes the CachedClient entry from the cache with the given address, and shuts +// down the connection. +func (c *Cache) invalidate(address string) { + if !c.cache.Remove(address) { + return + } + + c.logger.Debug().Str("cached_client_invalidated", address).Msg("invalidating cached client") + c.metrics.ConnectionFromPoolInvalidated() +} + +// Len returns the number of CachedClient entries in the cache. +func (c *Cache) Len() int { + return c.cache.Len() +} + +// MaxSize returns the maximum size of the cache. +func (c *Cache) MaxSize() int { + return c.maxSize +} diff --git a/engine/access/rpc/connection/cache_test.go b/engine/access/rpc/connection/cache_test.go new file mode 100644 index 00000000000..a54187307b8 --- /dev/null +++ b/engine/access/rpc/connection/cache_test.go @@ -0,0 +1,214 @@ +package connection + +import ( + "net" + "sync" + "testing" + "time" + + "github.com/onflow/crypto" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.uber.org/atomic" + "google.golang.org/grpc" + "google.golang.org/grpc/credentials/insecure" + + "github.com/onflow/flow-go/module/metrics" + "github.com/onflow/flow-go/utils/unittest" +) + +func TestCachedClientShutdown(t *testing.T) { + // Test that a completely uninitialized client can be closed without panics + t.Run("uninitialized client", func(t *testing.T) { + client := &CachedClient{ + closeRequested: atomic.NewBool(false), + } + client.Close() + assert.True(t, client.closeRequested.Load()) + }) + + // Test closing a client with no outstanding requests + // Close() should return quickly + t.Run("with no outstanding requests", func(t *testing.T) { + client := &CachedClient{ + closeRequested: atomic.NewBool(false), + conn: setupGRPCServer(t), + } + + unittest.RequireReturnsBefore(t, func() { + client.Close() + }, 100*time.Millisecond, "client timed out closing connection") + + assert.True(t, client.closeRequested.Load()) + }) + + // Test closing a client with outstanding requests waits for requests to complete + // Close() should block until the request completes + t.Run("with some outstanding requests", func(t *testing.T) { + client := &CachedClient{ + closeRequested: atomic.NewBool(false), + conn: setupGRPCServer(t), + } + done := client.AddRequest() + + doneCalled := atomic.NewBool(false) + go func() { + defer done() + time.Sleep(50 * time.Millisecond) + doneCalled.Store(true) + }() + + unittest.RequireReturnsBefore(t, func() { + client.Close() + }, 100*time.Millisecond, "client timed out closing connection") + + assert.True(t, client.closeRequested.Load()) + assert.True(t, doneCalled.Load()) + }) + + // Test closing a client that is already closing does not block + // Close() should return immediately + t.Run("already closing", func(t *testing.T) { + client := &CachedClient{ + closeRequested: atomic.NewBool(true), // close already requested + conn: setupGRPCServer(t), + } + done := client.AddRequest() + + doneCalled := atomic.NewBool(false) + go func() { + defer done() + + // use a long delay and require Close() to complete faster + time.Sleep(5 * time.Second) + doneCalled.Store(true) + }() + + // should return immediately + unittest.RequireReturnsBefore(t, func() { + client.Close() + }, 10*time.Millisecond, "client timed out closing connection") + + assert.True(t, client.closeRequested.Load()) + assert.False(t, doneCalled.Load()) + }) + + // Test closing a client that is locked during connection setup + // Close() should wait for the lock before shutting down + t.Run("connection setting up", func(t *testing.T) { + client := &CachedClient{ + closeRequested: atomic.NewBool(false), + } + + // simulate an in-progress connection setup + client.mu.Lock() + + go func() { + // unlock after setting up the connection + defer client.mu.Unlock() + + // pause before setting the connection to cause client.Close() to block + time.Sleep(100 * time.Millisecond) + client.conn = setupGRPCServer(t) + }() + + // should wait at least 100 milliseconds before returning + unittest.RequireReturnsBefore(t, func() { + client.Close() + }, 500*time.Millisecond, "client timed out closing connection") + + assert.True(t, client.closeRequested.Load()) + assert.NotNil(t, client.conn) + }) +} + +// Test that rapid connections and disconnects do not cause a panic. +func TestConcurrentConnectionsAndDisconnects(t *testing.T) { + logger := unittest.Logger() + metrics := metrics.NewNoopCollector() + + cache, err := NewCache(logger, metrics, 1) + require.NoError(t, err) + + connectionCount := 100_000 + conn := setupGRPCServer(t) + + cfg := DefaultCollectionConfig() + + t.Run("test concurrent connections", func(t *testing.T) { + wg := sync.WaitGroup{} + wg.Add(connectionCount) + callCount := atomic.NewInt32(0) + for i := 0; i < connectionCount; i++ { + go func() { + defer wg.Done() + cachedConn, err := cache.GetConnected("foo", cfg, nil, func(string, Config, crypto.PublicKey, *CachedClient) (*grpc.ClientConn, error) { + callCount.Inc() + return conn, nil + }) + require.NoError(t, err) + + done := cachedConn.AddRequest() + time.Sleep(1 * time.Millisecond) + done() + }() + } + unittest.RequireReturnsBefore(t, wg.Wait, time.Second, "timed out waiting for connections to finish") + + // the client should be cached, so only a single connection is created + assert.Equal(t, int32(1), callCount.Load()) + }) + + t.Run("test rapid connections and invalidations", func(t *testing.T) { + wg := sync.WaitGroup{} + wg.Add(connectionCount) + callCount := atomic.NewInt32(0) + for i := 0; i < connectionCount; i++ { + go func() { + defer wg.Done() + cachedConn, err := cache.GetConnected("foo", cfg, nil, func(string, Config, crypto.PublicKey, *CachedClient) (*grpc.ClientConn, error) { + callCount.Inc() + return conn, nil + }) + require.NoError(t, err) + + done := cachedConn.AddRequest() + time.Sleep(1 * time.Millisecond) + cachedConn.Invalidate() + done() + }() + } + wg.Wait() + + // since all connections are invalidated, the cache should be empty at the end + require.Eventually(t, func() bool { + return cache.Len() == 0 + }, time.Second, 20*time.Millisecond, "cache should be empty") + + // Many connections should be created, but some will be shared + assert.Greater(t, callCount.Load(), int32(1)) + assert.LessOrEqual(t, callCount.Load(), int32(connectionCount)) + }) +} + +// setupGRPCServer starts a dummy grpc server for connection tests +func setupGRPCServer(t *testing.T) *grpc.ClientConn { + l, err := net.Listen("tcp", net.JoinHostPort("localhost", "0")) + require.NoError(t, err) + + server := grpc.NewServer() + + t.Cleanup(func() { + server.Stop() + }) + + go func() { + err = server.Serve(l) + require.NoError(t, err) + }() + + conn, err := grpc.Dial(l.Addr().String(), grpc.WithTransportCredentials(insecure.NewCredentials())) + require.NoError(t, err) + + return conn +} diff --git a/engine/access/rpc/connection/connection.go b/engine/access/rpc/connection/connection.go new file mode 100644 index 00000000000..a97c2710386 --- /dev/null +++ b/engine/access/rpc/connection/connection.go @@ -0,0 +1,159 @@ +package connection + +import ( + "fmt" + "io" + "net" + "time" + + "github.com/onflow/crypto" + "github.com/onflow/flow/protobuf/go/flow/access" + "github.com/onflow/flow/protobuf/go/flow/execution" + "github.com/rs/zerolog" + + commonrpc "github.com/onflow/flow-go/engine/common/rpc" + "github.com/onflow/flow-go/module" +) + +// ConnectionFactory is an interface for creating access and execution API clients. +type ConnectionFactory interface { + // GetCollectionAPIClient gets an access API client for the specified address using the default CollectionGRPCPort, networkPubKey is optional, + // and it is used for secure gRPC connection. Can be nil for an unsecured connection. + // The returned io.Closer should close the connection after the call if no error occurred during client creation. + GetCollectionAPIClient(address string, networkPubKey crypto.PublicKey) (access.AccessAPIClient, io.Closer, error) + // GetAccessAPIClientWithPort gets an access API client for the specified address with port, networkPubKey is optional, + // and it is used for secure gRPC connection. Can be nil for an unsecured connection. + // The returned io.Closer should close the connection after the call if no error occurred during client creation. + GetAccessAPIClientWithPort(address string, networkPubKey crypto.PublicKey) (access.AccessAPIClient, io.Closer, error) + // GetExecutionAPIClient gets an execution API client for the specified address using the default ExecutionGRPCPort. + // The returned io.Closer should close the connection after the call if no error occurred during client creation. + GetExecutionAPIClient(address string) (execution.ExecutionAPIClient, io.Closer, error) +} + +// ProxyConnectionFactory wraps an existing ConnectionFactory and allows getting API clients for a target address. +type ProxyConnectionFactory struct { + ConnectionFactory + targetAddress string +} + +// GetCollectionAPIClient gets an access API client for a target address using the default CollectionGRPCPort. +// The networkPubKey is the public key used for a secure gRPC connection. It can be nil for an unsecured connection. +// The returned io.Closer should close the connection after the call if no error occurred during client creation. +func (p *ProxyConnectionFactory) GetCollectionAPIClient(_ string, networkPubKey crypto.PublicKey) (access.AccessAPIClient, io.Closer, error) { + return p.ConnectionFactory.GetCollectionAPIClient(p.targetAddress, networkPubKey) +} + +// GetExecutionAPIClient gets an execution API client for a target address using the default ExecutionGRPCPort. +// The returned io.Closer should close the connection after the call if no error occurred during client creation. +func (p *ProxyConnectionFactory) GetExecutionAPIClient(_ string) (execution.ExecutionAPIClient, io.Closer, error) { + return p.ConnectionFactory.GetExecutionAPIClient(p.targetAddress) +} + +var _ ConnectionFactory = (*ConnectionFactoryImpl)(nil) + +type ConnectionFactoryImpl struct { + AccessMetrics module.AccessMetrics + Log zerolog.Logger + Manager Manager + + AccessConfig Config + ExecutionConfig Config + CollectionConfig Config +} + +type Config struct { + GRPCPort uint + Timeout time.Duration + MaxRequestMsgSize uint + MaxResponseMsgSize uint +} + +// DefaultExecutionConfig returns the default execution client config. +func DefaultAccessConfig() Config { + return Config{ + GRPCPort: 9000, + Timeout: 3 * time.Second, + MaxRequestMsgSize: commonrpc.DefaultAccessMaxRequestSize, + MaxResponseMsgSize: commonrpc.DefaultAccessMaxResponseSize, + } +} + +// DefaultCollectionConfig returns the default collection client config. +func DefaultCollectionConfig() Config { + return Config{ + GRPCPort: 9000, + Timeout: 3 * time.Second, + MaxRequestMsgSize: commonrpc.DefaultCollectionMaxRequestSize, + MaxResponseMsgSize: commonrpc.DefaultCollectionMaxResponseSize, + } +} + +// DefaultExecutionConfig returns the default execution client config. +func DefaultExecutionConfig() Config { + return Config{ + GRPCPort: 9000, + Timeout: 3 * time.Second, + MaxRequestMsgSize: commonrpc.DefaultExecutionMaxRequestSize, + MaxResponseMsgSize: commonrpc.DefaultExecutionMaxResponseSize, + } +} + +// GetCollectionAPIClient gets an access API client for the specified collection node address using +// the default CollectionConfig. +// The networkPubKey is the public key used for secure gRPC connection. Can be nil for an unsecured connection. +// The returned io.Closer should close the connection after the call if no error occurred during client creation. +func (cf *ConnectionFactoryImpl) GetCollectionAPIClient(address string, networkPubKey crypto.PublicKey) (access.AccessAPIClient, io.Closer, error) { + address, err := getGRPCAddress(address, cf.CollectionConfig.GRPCPort) + if err != nil { + return nil, nil, err + } + + conn, closer, err := cf.Manager.GetConnection(address, cf.CollectionConfig, networkPubKey) + if err != nil { + return nil, nil, err + } + + return access.NewAccessAPIClient(conn), closer, nil +} + +// GetAccessAPIClientWithPort gets an access API client for the specified address with port. +// The networkPubKey is the public key used for secure gRPC connection. Can be nil for an unsecured connection. +// The returned io.Closer should close the connection after the call if no error occurred during client creation. +func (cf *ConnectionFactoryImpl) GetAccessAPIClientWithPort(address string, networkPubKey crypto.PublicKey) (access.AccessAPIClient, io.Closer, error) { + conn, closer, err := cf.Manager.GetConnection(address, cf.AccessConfig, networkPubKey) + if err != nil { + return nil, nil, err + } + + return access.NewAccessAPIClient(conn), closer, nil +} + +// GetExecutionAPIClient gets an execution API client for the specified address using the ExecutionConfig. +// The returned io.Closer should close the connection after the call if no error occurred during client creation. +func (cf *ConnectionFactoryImpl) GetExecutionAPIClient(address string) (execution.ExecutionAPIClient, io.Closer, error) { + grpcAddress, err := getGRPCAddress(address, cf.ExecutionConfig.GRPCPort) + if err != nil { + return nil, nil, err + } + + conn, closer, err := cf.Manager.GetConnection(grpcAddress, cf.ExecutionConfig, nil) + if err != nil { + return nil, nil, err + } + + return execution.NewExecutionAPIClient(conn), closer, nil +} + +// getGRPCAddress translates the flow.Identity address to the GRPC address of the node by switching the port to the +// GRPC port from the libp2p port. +func getGRPCAddress(address string, grpcPort uint) (string, error) { + // Split hostname and port + hostnameOrIP, _, err := net.SplitHostPort(address) + if err != nil { + return "", err + } + // Use the hostname from the identity list and the GRPC port number as the one passed in as an argument. + grpcAddress := fmt.Sprintf("%s:%d", hostnameOrIP, grpcPort) + + return grpcAddress, nil +} diff --git a/engine/access/rpc/connection/connection_test.go b/engine/access/rpc/connection/connection_test.go new file mode 100644 index 00000000000..1e4488c109d --- /dev/null +++ b/engine/access/rpc/connection/connection_test.go @@ -0,0 +1,1083 @@ +package connection + +import ( + "context" + "crypto/rand" + "fmt" + "math/big" + "net" + "sync" + "testing" + "time" + + lru "github.com/hashicorp/golang-lru/v2" + "github.com/onflow/flow/protobuf/go/flow/access" + "github.com/onflow/flow/protobuf/go/flow/execution" + "github.com/sony/gobreaker" + "github.com/stretchr/testify/assert" + testifymock "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + "go.uber.org/atomic" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/status" + "pgregory.net/rapid" + + "github.com/onflow/flow-go/module/metrics" + "github.com/onflow/flow-go/utils/grpcutils" + "github.com/onflow/flow-go/utils/unittest" +) + +func TestProxyAccessAPI(t *testing.T) { + logger := unittest.Logger() + metrics := metrics.NewNoopCollector() + + // create a collection node + cn := newCollectionNode(t) + cn.start(t) + defer cn.stop(t) + + req := &access.PingRequest{} + expected := &access.PingResponse{} + cn.handler. + On("Ping", + testifymock.Anything, + testifymock.AnythingOfType("*access.PingRequest")). + Return(expected, nil) + + // create the factory + connectionFactory := new(ConnectionFactoryImpl) + // set the collection grpc port + connectionFactory.CollectionConfig = DefaultCollectionConfig() + connectionFactory.CollectionConfig.GRPCPort = cn.port + // set metrics reporting + connectionFactory.AccessMetrics = metrics + connectionFactory.Manager = NewManager( + logger, + connectionFactory.AccessMetrics, + nil, + CircuitBreakerConfig{}, + grpcutils.NoCompressor, + ) + + proxyConnectionFactory := ProxyConnectionFactory{ + ConnectionFactory: connectionFactory, + targetAddress: cn.listener.Addr().String(), + } + + // get a collection API client + client, conn, err := proxyConnectionFactory.GetCollectionAPIClient("foo", nil) + defer conn.Close() + assert.NoError(t, err) + + ctx := context.Background() + // make the call to the collection node + resp, err := client.Ping(ctx, req) + assert.NoError(t, err) + assert.IsType(t, expected, resp) +} + +func TestProxyExecutionAPI(t *testing.T) { + logger := unittest.Logger() + metrics := metrics.NewNoopCollector() + + // create an execution node + en := newExecutionNode(t) + en.start(t) + defer en.stop(t) + + req := &execution.PingRequest{} + expected := &execution.PingResponse{} + en.handler. + On("Ping", + testifymock.Anything, + testifymock.AnythingOfType("*execution.PingRequest")). + Return(expected, nil) + + // create the factory + connectionFactory := new(ConnectionFactoryImpl) + // set the execution grpc port + connectionFactory.ExecutionConfig = DefaultExecutionConfig() + connectionFactory.ExecutionConfig.GRPCPort = en.port + + // set metrics reporting + connectionFactory.AccessMetrics = metrics + connectionFactory.Manager = NewManager( + logger, + connectionFactory.AccessMetrics, + nil, + CircuitBreakerConfig{}, + grpcutils.NoCompressor, + ) + + proxyConnectionFactory := ProxyConnectionFactory{ + ConnectionFactory: connectionFactory, + targetAddress: en.listener.Addr().String(), + } + + // get an execution API client + client, _, err := proxyConnectionFactory.GetExecutionAPIClient("foo") + assert.NoError(t, err) + + ctx := context.Background() + // make the call to the execution node + resp, err := client.Ping(ctx, req) + assert.NoError(t, err) + assert.IsType(t, expected, resp) +} + +func TestProxyAccessAPIConnectionReuse(t *testing.T) { + logger := unittest.Logger() + metrics := metrics.NewNoopCollector() + + // create a collection node + cn := newCollectionNode(t) + cn.start(t) + defer cn.stop(t) + + req := &access.PingRequest{} + expected := &access.PingResponse{} + cn.handler. + On("Ping", + testifymock.Anything, + testifymock.AnythingOfType("*access.PingRequest")). + Return(expected, nil) + + // create the factory + connectionFactory := new(ConnectionFactoryImpl) + // set the collection grpc port + connectionFactory.CollectionConfig = DefaultCollectionConfig() + connectionFactory.CollectionConfig.GRPCPort = cn.port + + // set the connection pool cache size + cacheSize := 1 + connectionCache, err := NewCache(logger, metrics, cacheSize) + require.NoError(t, err) + + // set metrics reporting + connectionFactory.AccessMetrics = metrics + connectionFactory.Manager = NewManager( + logger, + connectionFactory.AccessMetrics, + connectionCache, + CircuitBreakerConfig{}, + grpcutils.NoCompressor, + ) + + proxyConnectionFactory := ProxyConnectionFactory{ + ConnectionFactory: connectionFactory, + targetAddress: cn.listener.Addr().String(), + } + + // get a collection API client + _, closer, err := proxyConnectionFactory.GetCollectionAPIClient("foo", nil) + assert.Equal(t, connectionCache.Len(), 1) + assert.NoError(t, err) + assert.Nil(t, closer.Close()) + + var conn *grpc.ClientConn + res, ok := connectionCache.cache.Get(proxyConnectionFactory.targetAddress) + assert.True(t, ok) + conn = res.ClientConn() + + // check if api client can be rebuilt with retrieved connection + accessAPIClient := access.NewAccessAPIClient(conn) + ctx := context.Background() + resp, err := accessAPIClient.Ping(ctx, req) + assert.NoError(t, err) + assert.IsType(t, expected, resp) +} + +func TestProxyExecutionAPIConnectionReuse(t *testing.T) { + logger := unittest.Logger() + metrics := metrics.NewNoopCollector() + + // create an execution node + en := newExecutionNode(t) + en.start(t) + defer en.stop(t) + + req := &execution.PingRequest{} + expected := &execution.PingResponse{} + en.handler. + On("Ping", + testifymock.Anything, + testifymock.AnythingOfType("*execution.PingRequest")). + Return(expected, nil) + + // create the factory + connectionFactory := new(ConnectionFactoryImpl) + // set the execution grpc port + connectionFactory.ExecutionConfig = DefaultExecutionConfig() + connectionFactory.ExecutionConfig.GRPCPort = en.port + + // set the connection pool cache size + cacheSize := 5 + connectionCache, err := NewCache(logger, metrics, cacheSize) + require.NoError(t, err) + + // set metrics reporting + connectionFactory.AccessMetrics = metrics + connectionFactory.Manager = NewManager( + logger, + connectionFactory.AccessMetrics, + connectionCache, + CircuitBreakerConfig{}, + grpcutils.NoCompressor, + ) + + proxyConnectionFactory := ProxyConnectionFactory{ + ConnectionFactory: connectionFactory, + targetAddress: en.listener.Addr().String(), + } + + // get an execution API client + _, closer, err := proxyConnectionFactory.GetExecutionAPIClient("foo") + assert.Equal(t, connectionCache.Len(), 1) + assert.NoError(t, err) + assert.Nil(t, closer.Close()) + + var conn *grpc.ClientConn + res, ok := connectionCache.cache.Get(proxyConnectionFactory.targetAddress) + assert.True(t, ok) + conn = res.ClientConn() + + // check if api client can be rebuilt with retrieved connection + executionAPIClient := execution.NewExecutionAPIClient(conn) + ctx := context.Background() + resp, err := executionAPIClient.Ping(ctx, req) + assert.NoError(t, err) + assert.IsType(t, expected, resp) +} + +// TestExecutionNodeClientTimeout tests that the execution API client times out after the timeout duration +func TestExecutionNodeClientTimeout(t *testing.T) { + logger := unittest.Logger() + metrics := metrics.NewNoopCollector() + + timeout := 10 * time.Millisecond + + // create an execution node + en := newExecutionNode(t) + en.start(t) + defer en.stop(t) + + // setup the handler mock to not respond within the timeout + req := &execution.PingRequest{} + resp := &execution.PingResponse{} + en.handler. + On("Ping", + testifymock.Anything, + testifymock.AnythingOfType("*execution.PingRequest")). + After(timeout+time.Second). + Return(resp, nil) + + // create the factory + connectionFactory := new(ConnectionFactoryImpl) + // set the execution config + connectionFactory.ExecutionConfig = DefaultExecutionConfig() + connectionFactory.ExecutionConfig.GRPCPort = en.port + connectionFactory.ExecutionConfig.Timeout = timeout + + // set the connection pool cache size + cacheSize := 5 + connectionCache, err := NewCache(logger, metrics, cacheSize) + require.NoError(t, err) + + // set metrics reporting + connectionFactory.AccessMetrics = metrics + connectionFactory.Manager = NewManager( + logger, + connectionFactory.AccessMetrics, + connectionCache, + CircuitBreakerConfig{}, + grpcutils.NoCompressor, + ) + + // create the execution API client + client, _, err := connectionFactory.GetExecutionAPIClient(en.listener.Addr().String()) + require.NoError(t, err) + + ctx := context.Background() + // make the call to the execution node + _, err = client.Ping(ctx, req) + + // assert that the client timed out + assert.Equal(t, codes.DeadlineExceeded, status.Code(err)) +} + +// TestCollectionNodeClientTimeout tests that the collection API client times out after the timeout duration +func TestCollectionNodeClientTimeout(t *testing.T) { + logger := unittest.Logger() + metrics := metrics.NewNoopCollector() + + timeout := 10 * time.Millisecond + + // create a collection node + cn := newCollectionNode(t) + cn.start(t) + defer cn.stop(t) + + // setup the handler mock to not respond within the timeout + req := &access.PingRequest{} + resp := &access.PingResponse{} + cn.handler. + On("Ping", + testifymock.Anything, + testifymock.AnythingOfType("*access.PingRequest")). + After(timeout+time.Second). + Return(resp, nil) + + // create the factory + connectionFactory := new(ConnectionFactoryImpl) + // set the collection grpc config + connectionFactory.CollectionConfig = DefaultCollectionConfig() + connectionFactory.CollectionConfig.GRPCPort = cn.port + connectionFactory.CollectionConfig.Timeout = timeout + + // set the connection pool cache size + cacheSize := 5 + connectionCache, err := NewCache(logger, metrics, cacheSize) + require.NoError(t, err) + + // set metrics reporting + connectionFactory.AccessMetrics = metrics + connectionFactory.Manager = NewManager( + logger, + connectionFactory.AccessMetrics, + connectionCache, + CircuitBreakerConfig{}, + grpcutils.NoCompressor, + ) + + // create the collection API client + client, _, err := connectionFactory.GetCollectionAPIClient(cn.listener.Addr().String(), nil) + assert.NoError(t, err) + + ctx := context.Background() + // make the call to the execution node + _, err = client.Ping(ctx, req) + + // assert that the client timed out + assert.Equal(t, codes.DeadlineExceeded, status.Code(err)) +} + +// TestConnectionPoolFull tests that the LRU cache replaces connections when full +func TestConnectionPoolFull(t *testing.T) { + logger := unittest.Logger() + metrics := metrics.NewNoopCollector() + + // create a collection node + cn1, cn2, cn3 := newCollectionNode(t), newCollectionNode(t), newCollectionNode(t) + cn1.start(t) + cn2.start(t) + cn3.start(t) + defer cn1.stop(t) + defer cn2.stop(t) + defer cn3.stop(t) + + // create the factory + connectionFactory := new(ConnectionFactoryImpl) + // set the collection grpc port + connectionFactory.CollectionConfig = DefaultCollectionConfig() + connectionFactory.CollectionConfig.GRPCPort = cn1.port + + // set the connection pool cache size + cacheSize := 2 + connectionCache, err := NewCache(logger, metrics, cacheSize) + require.NoError(t, err) + + // set metrics reporting + connectionFactory.AccessMetrics = metrics + connectionFactory.Manager = NewManager( + logger, + connectionFactory.AccessMetrics, + connectionCache, + CircuitBreakerConfig{}, + grpcutils.NoCompressor, + ) + + cn1Address := "foo1:123" + cn2Address := "foo2:123" + cn3Address := "foo3:123" + + // get a collection API client + // Create and add first client to cache + _, _, err = connectionFactory.GetCollectionAPIClient(cn1Address, nil) + assert.Equal(t, connectionCache.Len(), 1) + assert.NoError(t, err) + + // Create and add second client to cache + _, _, err = connectionFactory.GetCollectionAPIClient(cn2Address, nil) + assert.Equal(t, connectionCache.Len(), 2) + assert.NoError(t, err) + + // Get the first client from cache. + _, _, err = connectionFactory.GetCollectionAPIClient(cn1Address, nil) + assert.Equal(t, connectionCache.Len(), 2) + assert.NoError(t, err) + + // Create and add third client to cache, second client will be removed from cache + _, _, err = connectionFactory.GetCollectionAPIClient(cn3Address, nil) + assert.Equal(t, connectionCache.Len(), 2) + assert.NoError(t, err) + + var hostnameOrIP string + + hostnameOrIP, _, err = net.SplitHostPort(cn1Address) + require.NoError(t, err) + grpcAddress1 := fmt.Sprintf("%s:%d", hostnameOrIP, connectionFactory.CollectionConfig.GRPCPort) + + hostnameOrIP, _, err = net.SplitHostPort(cn2Address) + require.NoError(t, err) + grpcAddress2 := fmt.Sprintf("%s:%d", hostnameOrIP, connectionFactory.CollectionConfig.GRPCPort) + + hostnameOrIP, _, err = net.SplitHostPort(cn3Address) + require.NoError(t, err) + grpcAddress3 := fmt.Sprintf("%s:%d", hostnameOrIP, connectionFactory.CollectionConfig.GRPCPort) + + assert.True(t, connectionCache.cache.Contains(grpcAddress1)) + assert.False(t, connectionCache.cache.Contains(grpcAddress2)) + assert.True(t, connectionCache.cache.Contains(grpcAddress3)) +} + +// TestConnectionPoolStale tests that a new connection will be established if the old one cached is stale +func TestConnectionPoolStale(t *testing.T) { + logger := unittest.Logger() + metrics := metrics.NewNoopCollector() + + // create a collection node + cn := newCollectionNode(t) + cn.start(t) + defer cn.stop(t) + + req := &access.PingRequest{} + expected := &access.PingResponse{} + cn.handler. + On("Ping", + testifymock.Anything, + testifymock.AnythingOfType("*access.PingRequest")). + Return(expected, nil) + + // create the factory + connectionFactory := new(ConnectionFactoryImpl) + // set the collection grpc port + connectionFactory.CollectionConfig = DefaultCollectionConfig() + connectionFactory.CollectionConfig.GRPCPort = cn.port + + // set the connection pool cache size + cacheSize := 5 + connectionCache, err := NewCache(logger, metrics, cacheSize) + require.NoError(t, err) + + // set metrics reporting + connectionFactory.AccessMetrics = metrics + connectionFactory.Manager = NewManager( + logger, + connectionFactory.AccessMetrics, + connectionCache, + CircuitBreakerConfig{}, + grpcutils.NoCompressor, + ) + + proxyConnectionFactory := ProxyConnectionFactory{ + ConnectionFactory: connectionFactory, + targetAddress: cn.listener.Addr().String(), + } + + // get a collection API client + client, _, err := proxyConnectionFactory.GetCollectionAPIClient("foo", nil) + assert.Equal(t, connectionCache.Len(), 1) + assert.NoError(t, err) + // close connection to simulate something "going wrong" with our stored connection + cachedClient, _ := connectionCache.cache.Get(proxyConnectionFactory.targetAddress) + + cachedClient.Invalidate() + cachedClient.Close() + + ctx := context.Background() + // make the call to the collection node (should fail, connection closed) + _, err = client.Ping(ctx, req) + assert.Error(t, err) + + // re-access, should replace stale connection in cache with new one + _, _, _ = proxyConnectionFactory.GetCollectionAPIClient("foo", nil) + assert.Equal(t, connectionCache.Len(), 1) + + var conn *grpc.ClientConn + res, ok := connectionCache.cache.Get(proxyConnectionFactory.targetAddress) + assert.True(t, ok) + conn = res.ClientConn() + + // check if api client can be rebuilt with retrieved connection + accessAPIClient := access.NewAccessAPIClient(conn) + ctx = context.Background() + resp, err := accessAPIClient.Ping(ctx, req) + assert.NoError(t, err) + assert.IsType(t, expected, resp) +} + +// TestExecutionNodeClientClosedGracefully tests the scenario where the execution node client is closed gracefully. +// +// Test Steps: +// - Generate a random number of requests and start goroutines to handle each request. +// - Invalidate the execution API client. +// - Wait for all goroutines to finish. +// - Verify that the number of completed requests matches the number of sent responses. +func TestExecutionNodeClientClosedGracefully(t *testing.T) { + logger := unittest.Logger() + metrics := metrics.NewNoopCollector() + + // Add createExecNode function to recreate it each time for rapid test + createExecNode := func() (*executionNode, func()) { + en := newExecutionNode(t) + en.start(t) + return en, func() { + en.stop(t) + } + } + + // Add rapid test, to check graceful close on different number of requests + rapid.Check(t, func(tt *rapid.T) { + en, closer := createExecNode() + defer closer() + + // setup the handler mock + req := &execution.PingRequest{} + resp := &execution.PingResponse{} + respSent := atomic.NewUint64(0) + en.handler. + On("Ping", + testifymock.Anything, + testifymock.AnythingOfType("*execution.PingRequest")). + Run(func(_ testifymock.Arguments) { + respSent.Inc() + }). + Return(resp, nil) + + // create the factory + connectionFactory := new(ConnectionFactoryImpl) + // set the execution grpc config + connectionFactory.ExecutionConfig = DefaultExecutionConfig() + connectionFactory.ExecutionConfig.GRPCPort = en.port + connectionFactory.ExecutionConfig.Timeout = time.Second + + // set the connection pool cache size + cacheSize := 1 + connectionCache, err := NewCache(logger, metrics, cacheSize) + require.NoError(t, err) + + // set metrics reporting + connectionFactory.AccessMetrics = metrics + connectionFactory.Manager = NewManager( + logger, + connectionFactory.AccessMetrics, + connectionCache, + CircuitBreakerConfig{}, + grpcutils.NoCompressor, + ) + + clientAddress := en.listener.Addr().String() + // create the execution API client + client, _, err := connectionFactory.GetExecutionAPIClient(clientAddress) + assert.NoError(t, err) + + ctx := context.Background() + + // Generate random number of requests + nofRequests := rapid.IntRange(10, 100).Draw(tt, "nofRequests") + reqCompleted := atomic.NewUint64(0) + + var waitGroup sync.WaitGroup + + for i := 0; i < nofRequests; i++ { + waitGroup.Add(1) + + // call Ping request from different goroutines + go func() { + defer waitGroup.Done() + _, err := client.Ping(ctx, req) + + if err == nil { + reqCompleted.Inc() + } else { + require.Equalf(t, codes.Unavailable, status.Code(err), "unexpected error: %v", err) + } + }() + } + + // Close connection + // connectionFactory.Manager.Remove(clientAddress) + + waitGroup.Wait() + + assert.Equal(t, reqCompleted.Load(), respSent.Load()) + }) +} + +// TestEvictingCacheClients tests the eviction of cached clients. +// It verifies that when a client is evicted from the cache, subsequent requests are handled correctly. +// +// Test Steps: +// - Call the gRPC method Ping +// - While the request is still in progress, remove the connection +// - Call the gRPC method GetNetworkParameters on the client immediately after eviction and assert the expected +// error response. +// - Wait for the client state to change from "Ready" to "Shutdown", indicating that the client connection was closed. +func TestEvictingCacheClients(t *testing.T) { + logger := unittest.Logger() + metrics := metrics.NewNoopCollector() + + // Create a new collection node for testing + cn := newCollectionNode(t) + cn.start(t) + defer cn.stop(t) + + // Channels used to synchronize test with grpc calls + startPing := make(chan struct{}) // notify Ping in progress + returnFromPing := make(chan struct{}) // notify OK to return from Ping + + // Set up mock handlers for Ping and GetNetworkParameters + pingReq := &access.PingRequest{} + pingResp := &access.PingResponse{} + cn.handler. + On("Ping", + testifymock.Anything, + testifymock.AnythingOfType("*access.PingRequest")). + Return( + func(context.Context, *access.PingRequest) *access.PingResponse { + close(startPing) + <-returnFromPing // keeps request open until returnFromPing is closed + return pingResp + }, + func(context.Context, *access.PingRequest) error { return nil }, + ) + + // Create the connection factory + connectionFactory := new(ConnectionFactoryImpl) + // Set the gRPC config + connectionFactory.CollectionConfig = DefaultCollectionConfig() + connectionFactory.CollectionConfig.GRPCPort = cn.port + connectionFactory.CollectionConfig.Timeout = 5 * time.Second + // Set the connection pool cache size + cacheSize := 1 + + connectionCache, err := NewCache(logger, metrics, cacheSize) + require.NoError(t, err) + + // create a non-blocking cache + connectionCache.cache, err = lru.NewWithEvict[string, *CachedClient](cacheSize, func(_ string, client *CachedClient) { + go client.Close() + }) + require.NoError(t, err) + + // set metrics reporting + connectionFactory.AccessMetrics = metrics + connectionFactory.Manager = NewManager( + logger, + connectionFactory.AccessMetrics, + connectionCache, + CircuitBreakerConfig{}, + grpcutils.NoCompressor, + ) + + clientAddress := cn.listener.Addr().String() + // Create the collection API client + client, _, err := connectionFactory.GetCollectionAPIClient(clientAddress, nil) + require.NoError(t, err) + + ctx := context.Background() + + // Retrieve the cached client from the cache + cachedClient, ok := connectionCache.cache.Get(clientAddress) + require.True(t, ok) + + // wait until the client connection is ready + require.Eventually(t, func() bool { + return cachedClient.ClientConn().GetState() == connectivity.Ready + }, 100*time.Millisecond, 10*time.Millisecond, "client timed out before ready") + + // Schedule the invalidation of the access API client while the Ping call is in progress + wg := sync.WaitGroup{} + wg.Add(1) + go func() { + defer wg.Done() + + <-startPing // wait until Ping is called + + // Invalidate the access API client + cachedClient.Invalidate() + + // Invalidate marks the connection for closure asynchronously, so give it some time to run + require.Eventually(t, func() bool { + return cachedClient.closeRequested.Load() + }, 100*time.Millisecond, 10*time.Millisecond, "client timed out closing connection") + + // Call a gRPC method on the client, requests should be blocked since the connection is invalidated + resp, err := client.GetNetworkParameters(ctx, &access.GetNetworkParametersRequest{}) + assert.Equal(t, status.Errorf(codes.Unavailable, "the connection to %s was closed", clientAddress), err) + assert.Nil(t, resp) + + close(returnFromPing) // signal it's ok to return from Ping + }() + + // Call a gRPC method on the client + _, err = client.Ping(ctx, pingReq) + require.NoError(t, err) + + // Wait for the client connection to change state from "Ready" to "Shutdown" as connection was closed. + require.Eventually(t, func() bool { + return cachedClient.ClientConn().WaitForStateChange(ctx, connectivity.Ready) + }, 100*time.Millisecond, 10*time.Millisecond, "client timed out transitioning state") + + assert.Equal(t, connectivity.Shutdown, cachedClient.ClientConn().GetState()) + assert.Equal(t, 0, connectionCache.Len()) + + wg.Wait() // wait until the move test routine is done +} + +func TestConcurrentConnections(t *testing.T) { + logger := unittest.Logger() + metrics := metrics.NewNoopCollector() + + // Add createExecNode function to recreate it each time for rapid test + createExecNode := func() (*executionNode, func()) { + en := newExecutionNode(t) + en.start(t) + return en, func() { + en.stop(t) + } + } + + // setup the handler mock + req := &execution.PingRequest{} + resp := &execution.PingResponse{} + + // Note: rapid will randomly fail with an error: "group did not use any data from bitstream" + // See https://github.com/flyingmutant/rapid/issues/65 + rapid.Check(t, func(tt *rapid.T) { + en, closer := createExecNode() + defer closer() + + // Note: rapid does not support concurrent calls to Draw for a given T, so they must be serialized + mu := sync.Mutex{} + getSleep := func() time.Duration { + mu.Lock() + defer mu.Unlock() + return time.Duration(rapid.Int64Range(100, 10_000).Draw(tt, "s")) + } + + requestCount := rapid.IntRange(50, 1000).Draw(tt, "r") + responsesSent := atomic.NewInt32(0) + en.handler. + On("Ping", + testifymock.Anything, + testifymock.AnythingOfType("*execution.PingRequest")). + Return(func(_ context.Context, _ *execution.PingRequest) (*execution.PingResponse, error) { + time.Sleep(getSleep() * time.Microsecond) + + // randomly fail ~25% of the time to test that client connection and reuse logic + // handles concurrent connect/disconnects + fail, err := rand.Int(rand.Reader, big.NewInt(4)) + require.NoError(tt, err) + + if fail.Uint64()%4 == 0 { + err = status.Errorf(codes.Unavailable, "random error") + } + + responsesSent.Inc() + return resp, err + }) + + connectionCache, err := NewCache(logger, metrics, 1) + require.NoError(tt, err) + + enConfig := DefaultExecutionConfig() + enConfig.GRPCPort = en.port + enConfig.Timeout = time.Second + + connectionFactory := &ConnectionFactoryImpl{ + ExecutionConfig: enConfig, + AccessMetrics: metrics, + Manager: NewManager( + logger, + metrics, + connectionCache, + CircuitBreakerConfig{}, + grpcutils.NoCompressor, + ), + } + + clientAddress := en.listener.Addr().String() + + ctx := context.Background() + + // Generate random number of requests + var wg sync.WaitGroup + wg.Add(requestCount) + + for i := 0; i < requestCount; i++ { + go func() { + defer wg.Done() + + client, _, err := connectionFactory.GetExecutionAPIClient(clientAddress) + require.NoError(tt, err) + + _, err = client.Ping(ctx, req) + + if err != nil { + // Note: for some reason, when Unavailable is returned, the error message is + // changed to "the connection to 127.0.0.1:57753 was closed". Other error codes + // preserve the message. + require.Equalf(tt, codes.Unavailable, status.Code(err), "unexpected error: %v", err) + } + }() + } + wg.Wait() + + // the grpc client seems to throttle requests to servers that return Unavailable, so not + // all of the requests make it through to the backend every test. Requiring that at least 1 + // request is handled for these cases, but all should be handled in most runs. + assert.LessOrEqual(tt, responsesSent.Load(), int32(requestCount)) + assert.Greater(tt, responsesSent.Load(), int32(0)) + }) +} + +var successCodes = []codes.Code{ + codes.Canceled, + codes.InvalidArgument, + codes.NotFound, + codes.Unimplemented, + codes.OutOfRange, +} + +// TestCircuitBreakerExecutionNode tests the circuit breaker for execution nodes. +func TestCircuitBreakerExecutionNode(t *testing.T) { + logger := unittest.Logger() + metrics := metrics.NewNoopCollector() + + requestTimeout := 500 * time.Millisecond + circuitBreakerRestoreTimeout := 1500 * time.Millisecond + + // Create an execution node for testing. + en := newExecutionNode(t) + en.start(t) + defer en.stop(t) + + // Create the connection factory. + connectionFactory := new(ConnectionFactoryImpl) + + // Set the execution gRPC config + connectionFactory.ExecutionConfig = DefaultExecutionConfig() + connectionFactory.ExecutionConfig.GRPCPort = en.port + connectionFactory.ExecutionConfig.Timeout = requestTimeout + + // Set the connection pool cache size. + cacheSize := 1 + connectionCache, err := NewCache(logger, metrics, cacheSize) + require.NoError(t, err) + + connectionFactory.Manager = NewManager( + logger, + connectionFactory.AccessMetrics, + connectionCache, + CircuitBreakerConfig{ + Enabled: true, + MaxFailures: 1, + MaxRequests: 1, + RestoreTimeout: circuitBreakerRestoreTimeout, + }, + grpcutils.NoCompressor, + ) + + // Set metrics reporting. + connectionFactory.AccessMetrics = metrics + + // Create the execution API client. + client, _, err := connectionFactory.GetExecutionAPIClient(en.listener.Addr().String()) + require.NoError(t, err) + + req := &execution.PingRequest{} + resp := &execution.PingResponse{} + + // Helper function to make the Ping call to the execution node and measure the duration. + callAndMeasurePingDuration := func(ctx context.Context) (time.Duration, error) { + start := time.Now() + + // Make the call to the execution node. + _, err = client.Ping(ctx, req) + return time.Since(start), err + } + + t.Run("test different states of the circuit breaker", func(t *testing.T) { + ctx := context.Background() + + // Set up the handler mock to not respond within the requestTimeout. + en.handler. + On("Ping", + testifymock.Anything, + testifymock.AnythingOfType("*execution.PingRequest")). + After(2*requestTimeout). + Return(resp, nil). + Once() + + // Call and measure the duration for the first invocation. + duration, err := callAndMeasurePingDuration(ctx) + assert.Equal(t, codes.DeadlineExceeded, status.Code(err)) + assert.LessOrEqual(t, requestTimeout, duration) + + // Call and measure the duration for the second invocation (circuit breaker state is now "Open"). + duration, err = callAndMeasurePingDuration(ctx) + assert.ErrorIs(t, err, gobreaker.ErrOpenState) + assert.Greater(t, requestTimeout, duration) + + en.handler. + On("Ping", + testifymock.Anything, + testifymock.AnythingOfType("*execution.PingRequest")). + Return(resp, nil). + Once() + + // Wait until the circuit breaker transitions to the "HalfOpen" state. + time.Sleep(circuitBreakerRestoreTimeout + (500 * time.Millisecond)) + + // Call and measure the duration for the third invocation (circuit breaker state is now "HalfOpen"). + duration, err = callAndMeasurePingDuration(ctx) + assert.Greater(t, requestTimeout, duration) + assert.NoError(t, err) + }) + + for _, code := range successCodes { + t.Run(fmt.Sprintf("test error %s treated as a success for circuit breaker ", code.String()), func(t *testing.T) { + ctx := context.Background() + + en.handler. + On("Ping", + testifymock.Anything, + testifymock.AnythingOfType("*execution.PingRequest")). + Return(nil, status.Error(code, code.String())). + Once() + + duration, err := callAndMeasurePingDuration(ctx) + require.Error(t, err) + require.Equal(t, code, status.Code(err)) + require.Greater(t, requestTimeout, duration) + }) + } +} + +// TestCircuitBreakerCollectionNode tests the circuit breaker for collection nodes. +func TestCircuitBreakerCollectionNode(t *testing.T) { + logger := unittest.Logger() + metrics := metrics.NewNoopCollector() + + requestTimeout := 500 * time.Millisecond + circuitBreakerRestoreTimeout := 1500 * time.Millisecond + + // Create a collection node for testing. + cn := newCollectionNode(t) + cn.start(t) + defer cn.stop(t) + + // Create the connection factory. + connectionFactory := new(ConnectionFactoryImpl) + + // Set the collection gRPC config + connectionFactory.CollectionConfig = DefaultCollectionConfig() + connectionFactory.CollectionConfig.GRPCPort = cn.port + connectionFactory.CollectionConfig.Timeout = requestTimeout + + // Set the connection pool cache size. + cacheSize := 1 + connectionCache, err := NewCache(logger, metrics, cacheSize) + require.NoError(t, err) + + connectionFactory.Manager = NewManager( + logger, + connectionFactory.AccessMetrics, + connectionCache, + CircuitBreakerConfig{ + Enabled: true, + MaxFailures: 1, + MaxRequests: 1, + RestoreTimeout: circuitBreakerRestoreTimeout, + }, + grpcutils.NoCompressor, + ) + + // Set metrics reporting. + connectionFactory.AccessMetrics = metrics + + // Create the collection API client. + client, _, err := connectionFactory.GetCollectionAPIClient(cn.listener.Addr().String(), nil) + assert.NoError(t, err) + + req := &access.PingRequest{} + resp := &access.PingResponse{} + + // Helper function to make the Ping call to the collection node and measure the duration. + callAndMeasurePingDuration := func(ctx context.Context) (time.Duration, error) { + start := time.Now() + + // Make the call to the collection node. + _, err = client.Ping(ctx, req) + return time.Since(start), err + } + + t.Run("test different states of the circuit breaker", func(t *testing.T) { + ctx := context.Background() + + // Set up the handler mock to not respond within the requestTimeout. + cn.handler. + On("Ping", + testifymock.Anything, + testifymock.AnythingOfType("*access.PingRequest")). + After(2*requestTimeout). + Return(resp, nil). + Once() + + // Call and measure the duration for the first invocation. + duration, err := callAndMeasurePingDuration(ctx) + assert.Equal(t, codes.DeadlineExceeded, status.Code(err)) + assert.LessOrEqual(t, requestTimeout, duration) + + // Call and measure the duration for the second invocation (circuit breaker state is now "Open"). + duration, err = callAndMeasurePingDuration(ctx) + assert.Equal(t, gobreaker.ErrOpenState, err) + assert.Greater(t, requestTimeout, duration) + + cn.handler. + On("Ping", + testifymock.Anything, + testifymock.AnythingOfType("*access.PingRequest")). + Return(resp, nil). + Once() + + // Wait until the circuit breaker transitions to the "HalfOpen" state. + time.Sleep(circuitBreakerRestoreTimeout + (500 * time.Millisecond)) + + // Call and measure the duration for the third invocation (circuit breaker state is now "HalfOpen"). + duration, err = callAndMeasurePingDuration(ctx) + assert.Greater(t, requestTimeout, duration) + assert.Equal(t, nil, err) + }) + + for _, code := range successCodes { + t.Run(fmt.Sprintf("test error %s treated as a success for circuit breaker ", code.String()), func(t *testing.T) { + ctx := context.Background() + + cn.handler. + On("Ping", + testifymock.Anything, + testifymock.AnythingOfType("*access.PingRequest")). + Return(nil, status.Error(code, code.String())). + Once() + + duration, err := callAndMeasurePingDuration(ctx) + require.Error(t, err) + require.Equal(t, code, status.Code(err)) + require.Greater(t, requestTimeout, duration) + }) + } +} diff --git a/engine/access/rpc/connection/grpc_compression_benchmark_test.go b/engine/access/rpc/connection/grpc_compression_benchmark_test.go new file mode 100644 index 00000000000..4fed6759681 --- /dev/null +++ b/engine/access/rpc/connection/grpc_compression_benchmark_test.go @@ -0,0 +1,121 @@ +package connection + +import ( + "context" + "testing" + + "github.com/stretchr/testify/assert" + testifymock "github.com/stretchr/testify/mock" + + "google.golang.org/grpc/encoding/gzip" + + "github.com/onflow/flow-go/engine/common/grpc/compressor/deflate" + "github.com/onflow/flow-go/engine/common/grpc/compressor/snappy" + "github.com/onflow/flow-go/engine/common/rpc/convert" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module/metrics" + "github.com/onflow/flow-go/utils/unittest" + + "github.com/onflow/flow/protobuf/go/flow/execution" +) + +// BenchmarkWithGzipCompression benchmarks the gRPC request to execution nodes using gzip compressor. +func BenchmarkWithGzipCompression(b *testing.B) { + runBenchmark(b, gzip.Name) +} + +// BenchmarkWithSnappyCompression benchmarks the gRPC request to execution nodes using snappy compressor. +func BenchmarkWithSnappyCompression(b *testing.B) { + runBenchmark(b, snappy.Name) +} + +// BenchmarkWithDeflateCompression benchmarks the gRPC request to execution nodes using deflate compressor. +func BenchmarkWithDeflateCompression(b *testing.B) { + runBenchmark(b, deflate.Name) +} + +// runBenchmark is a helper function that performs the benchmarking for different compressors. +func runBenchmark(b *testing.B, compressorName string) { + // create an execution node + en := newExecutionNode(b) + en.start(b) + defer en.stop(b) + + blockHeaders := getHeaders(5) + exeResults := make([]*execution.GetEventsForBlockIDsResponse_Result, len(blockHeaders)) + for i := 0; i < len(blockHeaders); i++ { + exeResults[i] = &execution.GetEventsForBlockIDsResponse_Result{ + BlockId: convert.IdentifierToMessage(blockHeaders[i].ID()), + BlockHeight: blockHeaders[i].Height, + Events: convert.EventsToMessages(getEvents(10)), + } + } + expectedEventsResponse := &execution.GetEventsForBlockIDsResponse{ + Results: exeResults, + } + + blockIDs := make([]flow.Identifier, len(blockHeaders)) + for i, header := range blockHeaders { + blockIDs[i] = header.ID() + } + eventsReq := &execution.GetEventsForBlockIDsRequest{ + BlockIds: convert.IdentifiersToMessages(blockIDs), + Type: string(flow.EventAccountCreated), + } + + en.handler.On("GetEventsForBlockIDs", testifymock.Anything, testifymock.Anything). + Return(expectedEventsResponse, nil) + + // create the factory + connectionFactory := new(ConnectionFactoryImpl) + // set the execution grpc config + connectionFactory.ExecutionConfig = DefaultExecutionConfig() + connectionFactory.ExecutionConfig.GRPCPort = en.port + + // set metrics reporting + connectionFactory.AccessMetrics = metrics.NewNoopCollector() + connectionFactory.Manager = NewManager( + unittest.Logger(), + connectionFactory.AccessMetrics, + nil, + CircuitBreakerConfig{}, + compressorName, + ) + + proxyConnectionFactory := ProxyConnectionFactory{ + ConnectionFactory: connectionFactory, + targetAddress: en.listener.Addr().String(), + } + + // get an execution API client + client, _, err := proxyConnectionFactory.GetExecutionAPIClient("foo") + assert.NoError(b, err) + + ctx := context.Background() + b.ResetTimer() + // make the call to the execution node + for i := 0; i < b.N; i++ { + _, err := client.GetEventsForBlockIDs(ctx, eventsReq) + assert.NoError(b, err) + } +} + +// getEvents generates a slice of flow events with a specified length. +func getEvents(n int) []flow.Event { + events := make([]flow.Event, n) + for i := range events { + events[i] = flow.Event{Type: flow.EventAccountCreated} + } + return events +} + +// getHeaders generates a slice of flow headers with a specified length. +func getHeaders(n int) []*flow.Header { + headers := make([]*flow.Header, n) + for i := range headers { + b := unittest.BlockFixture() + headers[i] = b.ToHeader() + + } + return headers +} diff --git a/engine/access/rpc/connection/manager.go b/engine/access/rpc/connection/manager.go new file mode 100644 index 00000000000..00a92ccdc6c --- /dev/null +++ b/engine/access/rpc/connection/manager.go @@ -0,0 +1,340 @@ +package connection + +import ( + "context" + "fmt" + "io" + "time" + + "github.com/onflow/crypto" + "github.com/rs/zerolog" + "github.com/sony/gobreaker" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/credentials/insecure" + _ "google.golang.org/grpc/encoding/gzip" //required for gRPC compression + "google.golang.org/grpc/keepalive" + "google.golang.org/grpc/status" + + _ "github.com/onflow/flow-go/engine/common/grpc/compressor/deflate" //required for gRPC compression + _ "github.com/onflow/flow-go/engine/common/grpc/compressor/snappy" //required for gRPC compression + "github.com/onflow/flow-go/module" + "github.com/onflow/flow-go/utils/grpcutils" +) + +// DefaultClientTimeout is used when making a GRPC request to a collection or execution node. +const DefaultClientTimeout = 3 * time.Second + +type noopCloser struct{} + +func (c *noopCloser) Close() error { + return nil +} + +// Manager provides methods for getting and managing gRPC client connections. +type Manager struct { + logger zerolog.Logger + metrics module.AccessMetrics + cache *Cache + circuitBreakerConfig CircuitBreakerConfig + compressorName string +} + +// CircuitBreakerConfig is a configuration struct for the circuit breaker. +type CircuitBreakerConfig struct { + // Enabled specifies whether the circuit breaker is enabled for collection and execution API clients. + Enabled bool + // RestoreTimeout specifies the duration after which the circuit breaker will restore the connection to the client + // after closing it due to failures. + RestoreTimeout time.Duration + // MaxFailures specifies the maximum number of failed calls to the client that will cause the circuit breaker + // to close the connection. + MaxFailures uint32 + // MaxRequests specifies the maximum number of requests to check if connection restored after timeout. + MaxRequests uint32 +} + +// NewManager creates a new Manager with the specified parameters. +func NewManager( + logger zerolog.Logger, + metrics module.AccessMetrics, + cache *Cache, + circuitBreakerConfig CircuitBreakerConfig, + compressorName string, +) Manager { + return Manager{ + cache: cache, + logger: logger, + metrics: metrics, + circuitBreakerConfig: circuitBreakerConfig, + compressorName: compressorName, + } +} + +// GetConnection returns a gRPC client connection for the given grpcAddress and timeout. +// If a cache is used, it retrieves a cached connection, otherwise creates a new connection. +// It returns the client connection and an io.Closer to close the connection when done. +// The networkPubKey is the public key used for creating secure gRPC connection. Can be nil for an unsecured connection. +func (m *Manager) GetConnection( + grpcAddress string, + cfg Config, + networkPubKey crypto.PublicKey, +) (*grpc.ClientConn, io.Closer, error) { + if m.cache != nil { + client, err := m.cache.GetConnected(grpcAddress, cfg, networkPubKey, m.createConnection) + if err != nil { + return nil, nil, err + } + + return client.ClientConn(), &noopCloser{}, nil + } + + conn, err := m.createConnection(grpcAddress, cfg, networkPubKey, nil) + if err != nil { + return nil, nil, err + } + + return conn, io.Closer(conn), nil +} + +// createConnection creates a new gRPC connection to the remote node at the given address with the specified timeout. +// If the cachedClient is not nil, it means a new entry in the cache is being created, so it's locked to give priority +// to the caller working with the new client, allowing it to create the underlying connection. +// The networkPubKey is optional and configures a connection level security for gRPC connection. If it is not nil, +// it means that it used for creating secure gRPC connection. If it is nil, it means unsecure gRPC connection is being created. +func (m *Manager) createConnection( + address string, + cfg Config, + networkPubKey crypto.PublicKey, + cachedClient *CachedClient, +) (*grpc.ClientConn, error) { + timeout := cfg.Timeout + if timeout == 0 { + timeout = DefaultClientTimeout + } + + keepaliveParams := keepalive.ClientParameters{ + Time: 10 * time.Second, // How long the client will wait before sending a keepalive to the server if there is no activity. + Timeout: timeout, // How long the client will wait for a response from the keepalive before closing. + } + + // The order in which interceptors are added to the `connInterceptors` slice is important since they will be called + // in the opposite order during gRPC requests. See documentation for more info: + // https://grpc.io/blog/grpc-web-interceptor/#binding-interceptors + var connInterceptors []grpc.UnaryClientInterceptor + + if !m.circuitBreakerConfig.Enabled && cachedClient != nil { + connInterceptors = append(connInterceptors, m.createClientInvalidationInterceptor(cachedClient)) + } + + connInterceptors = append(connInterceptors, createClientTimeoutInterceptor(timeout)) + + // This interceptor monitors ongoing requests before passing control to subsequent interceptors. + if cachedClient != nil { + connInterceptors = append(connInterceptors, createRequestWatcherInterceptor(cachedClient)) + } + + if m.circuitBreakerConfig.Enabled { + // If the circuit breaker interceptor is enabled, it should always be called first before passing control to + // subsequent interceptors. + connInterceptors = append(connInterceptors, m.createCircuitBreakerInterceptor()) + } + + // ClientConn's default KeepAlive on connections is indefinite, assuming the timeout isn't reached + // The connections should be safe to be persisted and reused. + // https://pkg.go.dev/google.golang.org/grpc#WithKeepaliveParams + // https://grpc.io/blog/grpc-on-http2/#keeping-connections-alive + var opts []grpc.DialOption + opts = append(opts, grpc.WithDefaultCallOptions( + grpc.MaxCallSendMsgSize(int(cfg.MaxRequestMsgSize)), + grpc.MaxCallRecvMsgSize(int(cfg.MaxResponseMsgSize)), + )) + opts = append(opts, grpc.WithKeepaliveParams(keepaliveParams)) + opts = append(opts, grpc.WithChainUnaryInterceptor(connInterceptors...)) + + if m.compressorName != grpcutils.NoCompressor { + opts = append(opts, grpc.WithDefaultCallOptions(grpc.UseCompressor(m.compressorName))) + } + + if networkPubKey != nil { + tlsConfig, err := grpcutils.DefaultClientTLSConfig(networkPubKey) + if err != nil { + return nil, fmt.Errorf("failed to get default TLS client config using public flow networking key %s %w", networkPubKey.String(), err) + } + opts = append(opts, grpc.WithTransportCredentials(credentials.NewTLS(tlsConfig))) + } else { + opts = append(opts, grpc.WithTransportCredentials(insecure.NewCredentials())) + } + + conn, err := grpc.Dial( + address, + opts..., + ) + if err != nil { + return nil, fmt.Errorf("failed to connect to address %s: %w", address, err) + } + return conn, nil +} + +// createRequestWatcherInterceptor creates a request watcher interceptor to wait for unfinished requests before closing. +func createRequestWatcherInterceptor(cachedClient *CachedClient) grpc.UnaryClientInterceptor { + requestWatcherInterceptor := func( + ctx context.Context, + method string, + req interface{}, + reply interface{}, + cc *grpc.ClientConn, + invoker grpc.UnaryInvoker, + opts ...grpc.CallOption, + ) error { + // Prevent new requests from being sent if the connection is marked for closure. + if cachedClient.CloseRequested() { + return status.Errorf(codes.Unavailable, "the connection to %s was closed", cachedClient.Address()) + } + + // Increment the request counter to track ongoing requests, then decrement the request counter before returning. + done := cachedClient.AddRequest() + defer done() + + // Invoke the actual RPC method. + return invoker(ctx, method, req, reply, cc, opts...) + } + + return requestWatcherInterceptor +} + +// WithClientTimeoutOption is a helper function to create a GRPC dial option +// with the specified client timeout interceptor. +func WithClientTimeoutOption(timeout time.Duration) grpc.DialOption { + return grpc.WithUnaryInterceptor(createClientTimeoutInterceptor(timeout)) +} + +// createClientTimeoutInterceptor creates a client interceptor with a context that expires after the timeout. +func createClientTimeoutInterceptor(timeout time.Duration) grpc.UnaryClientInterceptor { + clientTimeoutInterceptor := func( + ctx context.Context, + method string, + req interface{}, + reply interface{}, + cc *grpc.ClientConn, + invoker grpc.UnaryInvoker, + opts ...grpc.CallOption, + ) error { + // Create a context that expires after the specified timeout. + ctxWithTimeout, cancel := context.WithTimeout(ctx, timeout) + defer cancel() + + // Call the remote GRPC using the short context. + err := invoker(ctxWithTimeout, method, req, reply, cc, opts...) + + return err + } + + return clientTimeoutInterceptor +} + +// createClientInvalidationInterceptor creates a client interceptor for client invalidation. It should only be created +// if the circuit breaker is disabled. If the response from the server indicates an unavailable status, it invalidates +// the corresponding client. +func (m *Manager) createClientInvalidationInterceptor(cachedClient *CachedClient) grpc.UnaryClientInterceptor { + return func( + ctx context.Context, + method string, + req interface{}, + reply interface{}, + cc *grpc.ClientConn, + invoker grpc.UnaryInvoker, + opts ...grpc.CallOption, + ) error { + err := invoker(ctx, method, req, reply, cc, opts...) + if status.Code(err) == codes.Unavailable { + cachedClient.Invalidate() + } + + return err + } +} + +// The simplified representation and description of circuit breaker pattern, that used to handle node connectivity: +// +// Circuit Open --> Circuit Half-Open --> Circuit Closed +// ^ | +// | | +// +--------------------------------------+ +// +// The "Circuit Open" state represents the circuit being open, indicating that the node is not available. +// This state is entered when the number of consecutive failures exceeds the maximum allowed failures. +// +// The "Circuit Half-Open" state represents the circuit transitioning from the open state to the half-open +// state after a configured restore timeout. In this state, the circuit allows a limited number of requests +// to test if the node has recovered. +// +// The "Circuit Closed" state represents the circuit being closed, indicating that the node is available. +// This state is initial or entered when the test requests in the half-open state succeed. + +// createCircuitBreakerInterceptor creates a client interceptor for circuit breaker functionality. It should only be +// created if the circuit breaker is enabled. All invocations will go through the circuit breaker to be tracked for +// success or failure of the call. +func (m *Manager) createCircuitBreakerInterceptor() grpc.UnaryClientInterceptor { + if m.circuitBreakerConfig.Enabled { + circuitBreaker := gobreaker.NewCircuitBreaker(gobreaker.Settings{ + // Timeout defines how long the circuit breaker will remain open before transitioning to the HalfClose state. + Timeout: m.circuitBreakerConfig.RestoreTimeout, + // ReadyToTrip returns true when the circuit breaker should trip and transition to the Open state + ReadyToTrip: func(counts gobreaker.Counts) bool { + // The number of maximum failures is checked before the circuit breaker goes to the Open state. + return counts.ConsecutiveFailures >= m.circuitBreakerConfig.MaxFailures + }, + // MaxRequests defines the max number of concurrent requests while the circuit breaker is in the HalfClosed + // state. + MaxRequests: m.circuitBreakerConfig.MaxRequests, + // IsSuccessful defines gRPC status codes that should be treated as a successful result for the circuit breaker. + IsSuccessful: func(err error) bool { + if se, ok := status.FromError(err); ok { + if se == nil { + return true + } + + // There are several error cases that may occur during normal operation and should be considered + // as "successful" from the perspective of the circuit breaker. + switch se.Code() { + case codes.OK, codes.Canceled, codes.InvalidArgument, codes.NotFound, codes.Unimplemented, codes.OutOfRange: + return true + default: + return false + } + } + + return false + }, + }) + + circuitBreakerInterceptor := func( + ctx context.Context, + method string, + req interface{}, + reply interface{}, + cc *grpc.ClientConn, + invoker grpc.UnaryInvoker, + opts ...grpc.CallOption, + ) error { + // The circuit breaker integration occurs here, where all invoked calls to the node pass through the + // CircuitBreaker.Execute method. This method counts successful and failed invocations, and switches to the + // "StateOpen" when the maximum failure threshold is reached. When the circuit breaker is in the "StateOpen" + // it immediately rejects connections and returns without waiting for the call timeout. After the + // "RestoreTimeout" period elapses, the circuit breaker transitions to the "StateHalfOpen" and attempts the + // invocation again. If the invocation fails, it returns to the "StateOpen"; otherwise, it transitions to + // the "StateClosed" and handles invocations as usual. + _, err := circuitBreaker.Execute(func() (interface{}, error) { + err := invoker(ctx, method, req, reply, cc, opts...) + return nil, err + }) + return err + } + + return circuitBreakerInterceptor + } + + return nil +} diff --git a/engine/access/rpc/connection/mock/connection_factory.go b/engine/access/rpc/connection/mock/connection_factory.go new file mode 100644 index 00000000000..c7120b1f5de --- /dev/null +++ b/engine/access/rpc/connection/mock/connection_factory.go @@ -0,0 +1,151 @@ +// Code generated by mockery. DO NOT EDIT. + +package mock + +import ( + access "github.com/onflow/flow/protobuf/go/flow/access" + + crypto "github.com/onflow/crypto" + + execution "github.com/onflow/flow/protobuf/go/flow/execution" + + io "io" + + mock "github.com/stretchr/testify/mock" +) + +// ConnectionFactory is an autogenerated mock type for the ConnectionFactory type +type ConnectionFactory struct { + mock.Mock +} + +// GetAccessAPIClientWithPort provides a mock function with given fields: address, networkPubKey +func (_m *ConnectionFactory) GetAccessAPIClientWithPort(address string, networkPubKey crypto.PublicKey) (access.AccessAPIClient, io.Closer, error) { + ret := _m.Called(address, networkPubKey) + + if len(ret) == 0 { + panic("no return value specified for GetAccessAPIClientWithPort") + } + + var r0 access.AccessAPIClient + var r1 io.Closer + var r2 error + if rf, ok := ret.Get(0).(func(string, crypto.PublicKey) (access.AccessAPIClient, io.Closer, error)); ok { + return rf(address, networkPubKey) + } + if rf, ok := ret.Get(0).(func(string, crypto.PublicKey) access.AccessAPIClient); ok { + r0 = rf(address, networkPubKey) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(access.AccessAPIClient) + } + } + + if rf, ok := ret.Get(1).(func(string, crypto.PublicKey) io.Closer); ok { + r1 = rf(address, networkPubKey) + } else { + if ret.Get(1) != nil { + r1 = ret.Get(1).(io.Closer) + } + } + + if rf, ok := ret.Get(2).(func(string, crypto.PublicKey) error); ok { + r2 = rf(address, networkPubKey) + } else { + r2 = ret.Error(2) + } + + return r0, r1, r2 +} + +// GetCollectionAPIClient provides a mock function with given fields: address, networkPubKey +func (_m *ConnectionFactory) GetCollectionAPIClient(address string, networkPubKey crypto.PublicKey) (access.AccessAPIClient, io.Closer, error) { + ret := _m.Called(address, networkPubKey) + + if len(ret) == 0 { + panic("no return value specified for GetCollectionAPIClient") + } + + var r0 access.AccessAPIClient + var r1 io.Closer + var r2 error + if rf, ok := ret.Get(0).(func(string, crypto.PublicKey) (access.AccessAPIClient, io.Closer, error)); ok { + return rf(address, networkPubKey) + } + if rf, ok := ret.Get(0).(func(string, crypto.PublicKey) access.AccessAPIClient); ok { + r0 = rf(address, networkPubKey) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(access.AccessAPIClient) + } + } + + if rf, ok := ret.Get(1).(func(string, crypto.PublicKey) io.Closer); ok { + r1 = rf(address, networkPubKey) + } else { + if ret.Get(1) != nil { + r1 = ret.Get(1).(io.Closer) + } + } + + if rf, ok := ret.Get(2).(func(string, crypto.PublicKey) error); ok { + r2 = rf(address, networkPubKey) + } else { + r2 = ret.Error(2) + } + + return r0, r1, r2 +} + +// GetExecutionAPIClient provides a mock function with given fields: address +func (_m *ConnectionFactory) GetExecutionAPIClient(address string) (execution.ExecutionAPIClient, io.Closer, error) { + ret := _m.Called(address) + + if len(ret) == 0 { + panic("no return value specified for GetExecutionAPIClient") + } + + var r0 execution.ExecutionAPIClient + var r1 io.Closer + var r2 error + if rf, ok := ret.Get(0).(func(string) (execution.ExecutionAPIClient, io.Closer, error)); ok { + return rf(address) + } + if rf, ok := ret.Get(0).(func(string) execution.ExecutionAPIClient); ok { + r0 = rf(address) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(execution.ExecutionAPIClient) + } + } + + if rf, ok := ret.Get(1).(func(string) io.Closer); ok { + r1 = rf(address) + } else { + if ret.Get(1) != nil { + r1 = ret.Get(1).(io.Closer) + } + } + + if rf, ok := ret.Get(2).(func(string) error); ok { + r2 = rf(address) + } else { + r2 = ret.Error(2) + } + + return r0, r1, r2 +} + +// NewConnectionFactory creates a new instance of ConnectionFactory. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewConnectionFactory(t interface { + mock.TestingT + Cleanup(func()) +}) *ConnectionFactory { + mock := &ConnectionFactory{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/engine/access/rpc/connection/node_mock.go b/engine/access/rpc/connection/node_mock.go new file mode 100644 index 00000000000..af613f4ffc8 --- /dev/null +++ b/engine/access/rpc/connection/node_mock.go @@ -0,0 +1,110 @@ +package connection + +import ( + "net" + "strconv" + "strings" + "sync" + "testing" + "time" + + "github.com/onflow/flow/protobuf/go/flow/access" + "github.com/onflow/flow/protobuf/go/flow/execution" + "github.com/stretchr/testify/assert" + "google.golang.org/grpc" + + "github.com/onflow/flow-go/engine/access/mock" + "github.com/onflow/flow-go/utils/unittest" +) + +// node mocks a flow node that runs a GRPC server +type node struct { + server *grpc.Server + listener net.Listener + port uint +} + +func (n *node) setupNode(tb testing.TB) { + n.server = grpc.NewServer() + listener, err := net.Listen("tcp4", unittest.DefaultAddress) + assert.NoError(tb, err) + n.listener = listener + assert.Eventually(tb, func() bool { + return !strings.HasSuffix(listener.Addr().String(), ":0") + }, time.Second*4, 10*time.Millisecond) + + _, port, err := net.SplitHostPort(listener.Addr().String()) + assert.NoError(tb, err) + portAsUint, err := strconv.ParseUint(port, 10, 32) + assert.NoError(tb, err) + n.port = uint(portAsUint) +} + +func (n *node) start(tb testing.TB) { + // using a wait group here to ensure the goroutine has started before returning. Otherwise, + // there's a race condition where the server is sometimes stopped before it has started + wg := sync.WaitGroup{} + wg.Add(1) + go func() { + wg.Done() + err := n.server.Serve(n.listener) + assert.NoError(tb, err) + }() + unittest.RequireReturnsBefore(tb, wg.Wait, 10*time.Millisecond, "could not start goroutine on time") +} + +func (n *node) stop(tb testing.TB) { + if n.server != nil { + n.server.Stop() + } +} + +type executionNode struct { + node + handler *mock.ExecutionAPIServer +} + +func newExecutionNode(tb testing.TB) *executionNode { + return &executionNode{ + handler: mock.NewExecutionAPIServer(tb), + } +} + +func (en *executionNode) start(tb testing.TB) { + if en.handler == nil { + tb.Fatalf("executionNode must be initialized using newExecutionNode") + } + + en.setupNode(tb) + execution.RegisterExecutionAPIServer(en.server, en.handler) + en.node.start(tb) +} + +func (en *executionNode) stop(tb testing.TB) { + en.node.stop(tb) +} + +type collectionNode struct { + node + handler *mock.AccessAPIServer +} + +func newCollectionNode(tb testing.TB) *collectionNode { + return &collectionNode{ + handler: mock.NewAccessAPIServer(tb), + } +} + +func (cn *collectionNode) start(tb testing.TB) { + if cn.handler == nil { + tb.Fatalf("collectionNode must be initialized using newCollectionNode") + } + + cn.setupNode(tb) + access.RegisterAccessAPIServer(cn.server, cn.handler) + cn.node.start(tb) +} + +func (cn *collectionNode) stop(tb testing.TB) { + cn.node.stop(tb) +} diff --git a/engine/access/rpc/engine.go b/engine/access/rpc/engine.go index 76df14a2127..4e0e9ff4394 100644 --- a/engine/access/rpc/engine.go +++ b/engine/access/rpc/engine.go @@ -7,47 +7,47 @@ import ( "net" "net/http" "sync" - "time" - grpc_prometheus "github.com/grpc-ecosystem/go-grpc-prometheus" - lru "github.com/hashicorp/golang-lru" - accessproto "github.com/onflow/flow/protobuf/go/flow/access" "github.com/rs/zerolog" - "google.golang.org/grpc" "google.golang.org/grpc/credentials" + "github.com/onflow/flow-go/access" "github.com/onflow/flow-go/consensus/hotstuff/model" "github.com/onflow/flow-go/engine/access/rest" + "github.com/onflow/flow-go/engine/access/rest/websockets" "github.com/onflow/flow-go/engine/access/rpc/backend" - "github.com/onflow/flow-go/engine/common/rpc" + "github.com/onflow/flow-go/engine/access/state_stream" + statestreambackend "github.com/onflow/flow-go/engine/access/state_stream/backend" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/module" "github.com/onflow/flow-go/module/component" "github.com/onflow/flow-go/module/events" + "github.com/onflow/flow-go/module/grpcserver" "github.com/onflow/flow-go/module/irrecoverable" + "github.com/onflow/flow-go/module/state_synchronization" "github.com/onflow/flow-go/state/protocol" - "github.com/onflow/flow-go/storage" ) // Config defines the configurable options for the access node server // A secure GRPC server here implies a server that presents a self-signed TLS certificate and a client that authenticates // the server via a pre-shared public key type Config struct { - UnsecureGRPCListenAddr string // the non-secure GRPC server address as ip:port - SecureGRPCListenAddr string // the secure GRPC server address as ip:port - TransportCredentials credentials.TransportCredentials // the secure GRPC credentials - HTTPListenAddr string // the HTTP web proxy address as ip:port - RESTListenAddr string // the REST server address as ip:port (if empty the REST server will not be started) - CollectionAddr string // the address of the upstream collection node - HistoricalAccessAddrs string // the list of all access nodes from previous spork - MaxMsgSize uint // GRPC max message size - ExecutionClientTimeout time.Duration // execution API GRPC client timeout - CollectionClientTimeout time.Duration // collection API GRPC client timeout - ConnectionPoolSize uint // size of the cache for storing collection and execution connections - MaxHeightRange uint // max size of height range requests - PreferredExecutionNodeIDs []string // preferred list of upstream execution node IDs - FixedExecutionNodeIDs []string // fixed list of execution node IDs to choose from if no node node ID can be chosen from the PreferredExecutionNodeIDs - ArchiveAddressList []string // the archive node address list to send script executions. when configured, script executions will be all sent to the archive node + UnsecureGRPCListenAddr string // the non-secure GRPC server address as ip:port + SecureGRPCListenAddr string // the secure GRPC server address as ip:port + TransportCredentials credentials.TransportCredentials // the secure GRPC credentials + HTTPListenAddr string // the HTTP web proxy address as ip:port + CollectionAddr string // the address of the upstream collection node + HistoricalAccessAddrs string // the list of all access nodes from previous spork + + BackendConfig backend.Config // configurable options for creating Backend + RestConfig rest.Config // the REST server configuration + CompressorName string // GRPC compressor name + WebSocketConfig websockets.Config + EnableWebSocketsStreamAPI bool + + // holds value of deprecated MaxMsgSize flag for use during bootstrapping. + // will be removed in a future release. + DeprecatedMaxMsgSize uint // in bytes } // Engine exposes the server with a simplified version of the Access API. @@ -61,139 +61,47 @@ type Engine struct { finalizedHeaderCache *events.FinalizedHeaderCache log zerolog.Logger - backend *backend.Backend // the gRPC service implementation - unsecureGrpcServer *grpc.Server // the unsecure gRPC server - secureGrpcServer *grpc.Server // the secure gRPC server + restCollector module.RestMetrics + backend *backend.Backend // the gRPC service implementation + unsecureGrpcServer *grpcserver.GrpcServer // the unsecure gRPC server + secureGrpcServer *grpcserver.GrpcServer // the secure gRPC server httpServer *http.Server restServer *http.Server config Config chain flow.Chain - addrLock sync.RWMutex - unsecureGrpcAddress net.Addr - secureGrpcAddress net.Addr - restAPIAddress net.Addr + restHandler access.API + + addrLock sync.RWMutex + restAPIAddress net.Addr + + stateStreamBackend state_stream.API + stateStreamConfig statestreambackend.Config } +type Option func(*RPCEngineBuilder) // NewBuilder returns a new RPC engine builder. -func NewBuilder(log zerolog.Logger, +func NewBuilder( + log zerolog.Logger, state protocol.State, config Config, - collectionRPC accessproto.AccessAPIClient, - historicalAccessNodes []accessproto.AccessAPIClient, - blocks storage.Blocks, - headers storage.Headers, - collections storage.Collections, - transactions storage.Transactions, - executionReceipts storage.ExecutionReceipts, - executionResults storage.ExecutionResults, chainID flow.ChainID, - transactionMetrics module.TransactionMetrics, accessMetrics module.AccessMetrics, - collectionGRPCPort uint, - executionGRPCPort uint, - retryEnabled bool, rpcMetricsEnabled bool, - apiRatelimits map[string]int, // the api rate limit (max calls per second) for each of the Access API e.g. Ping->100, GetTransaction->300 - apiBurstLimits map[string]int, // the api burst limit (max calls at the same time) for each of the Access API e.g. Ping->50, GetTransaction->10 me module.Local, + backend *backend.Backend, + restHandler access.API, + secureGrpcServer *grpcserver.GrpcServer, + unsecureGrpcServer *grpcserver.GrpcServer, + stateStreamBackend state_stream.API, + stateStreamConfig statestreambackend.Config, + indexReporter state_synchronization.IndexReporter, ) (*RPCEngineBuilder, error) { - log = log.With().Str("engine", "rpc").Logger() - // create a GRPC server to serve GRPC clients - grpcOpts := []grpc.ServerOption{ - grpc.MaxRecvMsgSize(int(config.MaxMsgSize)), - grpc.MaxSendMsgSize(int(config.MaxMsgSize)), - } - - var interceptors []grpc.UnaryServerInterceptor // ordered list of interceptors - // if rpc metrics is enabled, first create the grpc metrics interceptor - if rpcMetricsEnabled { - interceptors = append(interceptors, grpc_prometheus.UnaryServerInterceptor) - } - - if len(apiRatelimits) > 0 { - // create a rate limit interceptor - rateLimitInterceptor := rpc.NewRateLimiterInterceptor(log, apiRatelimits, apiBurstLimits).UnaryServerInterceptor - // append the rate limit interceptor to the list of interceptors - interceptors = append(interceptors, rateLimitInterceptor) - } - - // add the logging interceptor, ensure it is innermost wrapper - interceptors = append(interceptors, rpc.LoggingInterceptor(log)...) - - // create a chained unary interceptor - chainedInterceptors := grpc.ChainUnaryInterceptor(interceptors...) - grpcOpts = append(grpcOpts, chainedInterceptors) - - // create an unsecured grpc server - unsecureGrpcServer := grpc.NewServer(grpcOpts...) - - // create a secure server by using the secure grpc credentials that are passed in as part of config - grpcOpts = append(grpcOpts, grpc.Creds(config.TransportCredentials)) - secureGrpcServer := grpc.NewServer(grpcOpts...) - // wrap the unsecured server with an HTTP proxy server to serve HTTP clients httpServer := newHTTPProxyServer(unsecureGrpcServer) - var cache *lru.Cache - cacheSize := config.ConnectionPoolSize - if cacheSize > 0 { - // TODO: remove this fallback after fixing issues with evictions - // It was observed that evictions cause connection errors for in flight requests. This works around - // the issue by forcing hte pool size to be greater than the number of ENs + LNs - if cacheSize < backend.DefaultConnectionPoolSize { - log.Warn().Msg("connection pool size below threshold, setting pool size to default value ") - cacheSize = backend.DefaultConnectionPoolSize - } - var err error - cache, err = lru.NewWithEvict(int(cacheSize), func(_, evictedValue interface{}) { - store := evictedValue.(*backend.CachedClient) - store.Close() - log.Debug().Str("grpc_conn_evicted", store.Address).Msg("closing grpc connection evicted from pool") - if accessMetrics != nil { - accessMetrics.ConnectionFromPoolEvicted() - } - }) - if err != nil { - return nil, fmt.Errorf("could not initialize connection pool cache: %w", err) - } - } - - connectionFactory := &backend.ConnectionFactoryImpl{ - CollectionGRPCPort: collectionGRPCPort, - ExecutionGRPCPort: executionGRPCPort, - CollectionNodeGRPCTimeout: config.CollectionClientTimeout, - ExecutionNodeGRPCTimeout: config.ExecutionClientTimeout, - ConnectionsCache: cache, - CacheSize: cacheSize, - MaxMsgSize: config.MaxMsgSize, - AccessMetrics: accessMetrics, - Log: log, - } - - backend := backend.New(state, - collectionRPC, - historicalAccessNodes, - blocks, - headers, - collections, - transactions, - executionReceipts, - executionResults, - chainID, - transactionMetrics, - connectionFactory, - retryEnabled, - config.MaxHeightRange, - config.PreferredExecutionNodeIDs, - config.FixedExecutionNodeIDs, - log, - backend.DefaultSnapshotHistoryLimit, - config.ArchiveAddressList, - ) - finalizedCache, finalizedCacheWorker, err := events.NewFinalizedHeaderCache(state) if err != nil { return nil, fmt.Errorf("could not create header cache: %w", err) @@ -209,13 +117,23 @@ func NewBuilder(log zerolog.Logger, httpServer: httpServer, config: config, chain: chainID.Chain(), + restCollector: accessMetrics, + restHandler: restHandler, + stateStreamBackend: stateStreamBackend, + stateStreamConfig: stateStreamConfig, } - backendNotifierActor, backendNotifierWorker := events.NewFinalizationActor(eng.notifyBackendOnBlockFinalized) + backendNotifierActor, backendNotifierWorker := events.NewFinalizationActor(eng.processOnFinalizedBlock) eng.backendNotifierActor = backendNotifierActor eng.Component = component.NewComponentManagerBuilder(). - AddWorker(eng.serveUnsecureGRPCWorker). - AddWorker(eng.serveSecureGRPCWorker). + AddWorker(func(ctx irrecoverable.SignalerContext, ready component.ReadyFunc) { + ready() + <-secureGrpcServer.Done() + }). + AddWorker(func(ctx irrecoverable.SignalerContext, ready component.ReadyFunc) { + ready() + <-unsecureGrpcServer.Done() + }). AddWorker(eng.serveGRPCWebProxyWorker). AddWorker(eng.serveREST). AddWorker(finalizedCacheWorker). @@ -223,7 +141,7 @@ func NewBuilder(log zerolog.Logger, AddWorker(eng.shutdownWorker). Build() - builder := NewRPCEngineBuilder(eng, me, finalizedCache) + builder := NewRPCEngineBuilder(eng, me, finalizedCache, indexReporter) if rpcMetricsEnabled { builder.WithMetrics() } @@ -244,8 +162,6 @@ func (e *Engine) shutdown() { // use unbounded context, rely on shutdown logic to have timeout ctx := context.Background() - e.unsecureGrpcServer.GracefulStop() - e.secureGrpcServer.GracefulStop() err := e.httpServer.Shutdown(ctx) if err != nil { e.log.Error().Err(err).Msg("error stopping http server") @@ -264,28 +180,28 @@ func (e *Engine) OnFinalizedBlock(block *model.Block) { e.backendNotifierActor.OnFinalizedBlock(block) } -// notifyBackendOnBlockFinalized is invoked by the FinalizationActor when a new block is finalized. -// It notifies the backend of the newly finalized block. -func (e *Engine) notifyBackendOnBlockFinalized(_ *model.Block) error { +// processOnFinalizedBlock is invoked by the FinalizationActor when a new block is finalized. +// It informs the backend of the newly finalized block. +// The input to this callback is treated as trusted. +// No errors expected during normal operations. +func (e *Engine) processOnFinalizedBlock(_ *model.Block) error { finalizedHeader := e.finalizedHeaderCache.Get() - e.backend.NotifyFinalizedBlockHeight(finalizedHeader.Height) - return nil -} -// UnsecureGRPCAddress returns the listen address of the unsecure GRPC server. -// Guaranteed to be non-nil after Engine.Ready is closed. -func (e *Engine) UnsecureGRPCAddress() net.Addr { - e.addrLock.RLock() - defer e.addrLock.RUnlock() - return e.unsecureGrpcAddress -} + var err error + // NOTE: The BlockTracker is currently only used by the access node and not by the observer node. + if e.backend.BlockTracker != nil { + err = e.backend.BlockTracker.ProcessOnFinalizedBlock() + if err != nil { + return err + } + } -// SecureGRPCAddress returns the listen address of the secure GRPC server. -// Guaranteed to be non-nil after Engine.Ready is closed. -func (e *Engine) SecureGRPCAddress() net.Addr { - e.addrLock.RLock() - defer e.addrLock.RUnlock() - return e.secureGrpcAddress + err = e.backend.ProcessFinalizedBlockHeight(finalizedHeader.Height) + if err != nil { + return fmt.Errorf("could not process finalized block height %d: %w", finalizedHeader.Height, err) + } + + return nil } // RestApiAddress returns the listen address of the REST API server. @@ -296,59 +212,6 @@ func (e *Engine) RestApiAddress() net.Addr { return e.restAPIAddress } -// serveUnsecureGRPCWorker is a worker routine which starts the unsecure gRPC server. -// The ready callback is called after the server address is bound and set. -func (e *Engine) serveUnsecureGRPCWorker(ctx irrecoverable.SignalerContext, ready component.ReadyFunc) { - e.log.Info().Str("grpc_address", e.config.UnsecureGRPCListenAddr).Msg("starting grpc server on address") - - l, err := net.Listen("tcp", e.config.UnsecureGRPCListenAddr) - if err != nil { - e.log.Err(err).Msg("failed to start the grpc server") - ctx.Throw(err) - return - } - - // save the actual address on which we are listening (may be different from e.config.UnsecureGRPCListenAddr if not port - // was specified) - e.addrLock.Lock() - e.unsecureGrpcAddress = l.Addr() - e.addrLock.Unlock() - e.log.Debug().Str("unsecure_grpc_address", e.unsecureGrpcAddress.String()).Msg("listening on port") - ready() - - err = e.unsecureGrpcServer.Serve(l) // blocking call - if err != nil { - e.log.Err(err).Msg("fatal error in unsecure grpc server") - ctx.Throw(err) - } -} - -// serveSecureGRPCWorker is a worker routine which starts the secure gRPC server. -// The ready callback is called after the server address is bound and set. -func (e *Engine) serveSecureGRPCWorker(ctx irrecoverable.SignalerContext, ready component.ReadyFunc) { - e.log.Info().Str("secure_grpc_address", e.config.SecureGRPCListenAddr).Msg("starting grpc server on address") - - l, err := net.Listen("tcp", e.config.SecureGRPCListenAddr) - if err != nil { - e.log.Err(err).Msg("failed to start the grpc server") - ctx.Throw(err) - return - } - - e.addrLock.Lock() - e.secureGrpcAddress = l.Addr() - e.addrLock.Unlock() - - e.log.Debug().Str("secure_grpc_address", e.secureGrpcAddress.String()).Msg("listening on port") - ready() - - err = e.secureGrpcServer.Serve(l) // blocking call - if err != nil { - e.log.Err(err).Msg("fatal error in secure grpc server") - ctx.Throw(err) - } -} - // serveGRPCWebProxyWorker is a worker routine which starts the gRPC web proxy server. func (e *Engine) serveGRPCWebProxyWorker(ctx irrecoverable.SignalerContext, ready component.ReadyFunc) { log := e.log.With().Str("http_proxy_address", e.config.HTTPListenAddr).Logger() @@ -374,16 +237,26 @@ func (e *Engine) serveGRPCWebProxyWorker(ctx irrecoverable.SignalerContext, read // serveREST is a worker routine which starts the HTTP REST server. // The ready callback is called after the server address is bound and set. +// Note: The original REST BaseContext is discarded, and the irrecoverable.SignalerContext is used for error handling. func (e *Engine) serveREST(ctx irrecoverable.SignalerContext, ready component.ReadyFunc) { - if e.config.RESTListenAddr == "" { + if e.config.RestConfig.ListenAddress == "" { e.log.Debug().Msg("no REST API address specified - not starting the server") ready() return } - e.log.Info().Str("rest_api_address", e.config.RESTListenAddr).Msg("starting REST server on address") - - r, err := rest.NewServer(e.backend, e.config.RESTListenAddr, e.log, e.chain) + r, err := rest.NewServer( + ctx, + e.restHandler, + e.config.RestConfig, + e.log, + e.chain, + e.restCollector, + e.stateStreamBackend, + e.stateStreamConfig, + e.config.EnableWebSocketsStreamAPI, + e.config.WebSocketConfig, + ) if err != nil { e.log.Err(err).Msg("failed to initialize the REST server") ctx.Throw(err) @@ -391,7 +264,14 @@ func (e *Engine) serveREST(ctx irrecoverable.SignalerContext, ready component.Re } e.restServer = r - l, err := net.Listen("tcp", e.config.RESTListenAddr) + // Set up the irrecoverable.SignalerContext for error handling in the REST server. + e.restServer.BaseContext = func(_ net.Listener) context.Context { + return irrecoverable.WithSignalerContext(ctx, ctx) + } + + e.log.Info().Str("rest_api_address", e.config.RestConfig.ListenAddress).Msg("starting REST server on address") + + l, err := net.Listen("tcp", e.config.RestConfig.ListenAddress) if err != nil { e.log.Err(err).Msg("failed to start the REST server") ctx.Throw(err) diff --git a/engine/access/rpc/engine_builder.go b/engine/access/rpc/engine_builder.go index a4694547b03..88dec3aac96 100644 --- a/engine/access/rpc/engine_builder.go +++ b/engine/access/rpc/engine_builder.go @@ -4,38 +4,40 @@ import ( "fmt" grpc_prometheus "github.com/grpc-ecosystem/go-grpc-prometheus" - accessproto "github.com/onflow/flow/protobuf/go/flow/access" legacyaccessproto "github.com/onflow/flow/protobuf/go/flow/legacy/access" + "google.golang.org/grpc" - "github.com/onflow/flow-go/access" legacyaccess "github.com/onflow/flow-go/access/legacy" "github.com/onflow/flow-go/consensus/hotstuff" "github.com/onflow/flow-go/module" + "github.com/onflow/flow-go/module/state_synchronization" ) type RPCEngineBuilder struct { *Engine me module.Local finalizedHeaderCache module.FinalizedHeaderCache + indexReporter state_synchronization.IndexReporter // optional parameters, only one can be set during build phase signerIndicesDecoder hotstuff.BlockSignerDecoder - handler accessproto.AccessAPIServer // Use the parent interface instead of implementation, so that we can assign it to proxy. + rpcHandler accessproto.AccessAPIServer // Use the parent interface instead of implementation, so that we can assign it to proxy. } // NewRPCEngineBuilder helps to build a new RPC engine. -func NewRPCEngineBuilder(engine *Engine, me module.Local, finalizedHeaderCache module.FinalizedHeaderCache) *RPCEngineBuilder { +func NewRPCEngineBuilder(engine *Engine, me module.Local, finalizedHeaderCache module.FinalizedHeaderCache, indexReporter state_synchronization.IndexReporter) *RPCEngineBuilder { // the default handler will use the engine.backend implementation return &RPCEngineBuilder{ Engine: engine, me: me, finalizedHeaderCache: finalizedHeaderCache, + indexReporter: indexReporter, } } -func (builder *RPCEngineBuilder) Handler() accessproto.AccessAPIServer { - return builder.handler +func (builder *RPCEngineBuilder) RpcHandler() accessproto.AccessAPIServer { + return builder.rpcHandler } // WithBlockSignerDecoder specifies that signer indices in block headers should be translated @@ -51,15 +53,15 @@ func (builder *RPCEngineBuilder) WithBlockSignerDecoder(signerIndicesDecoder hot return builder } -// WithNewHandler specifies that the given `AccessAPIServer` should be used for serving API queries. +// WithRpcHandler specifies that the given `AccessAPIServer` should be used for serving API queries. // Caution: // you can inject either a `BlockSignerDecoder` (via method `WithBlockSignerDecoder`) -// or an `AccessAPIServer` (via method `WithNewHandler`); but not both. If both are +// or an `AccessAPIServer` (via method `WithRpcHandler`); but not both. If both are // specified, the builder will error during the build step. // // Returns self-reference for chaining. -func (builder *RPCEngineBuilder) WithNewHandler(handler accessproto.AccessAPIServer) *RPCEngineBuilder { - builder.handler = handler +func (builder *RPCEngineBuilder) WithRpcHandler(handler accessproto.AccessAPIServer) *RPCEngineBuilder { + builder.rpcHandler = handler return builder } @@ -67,40 +69,51 @@ func (builder *RPCEngineBuilder) WithNewHandler(handler accessproto.AccessAPISer // Returns self-reference for chaining. func (builder *RPCEngineBuilder) WithLegacy() *RPCEngineBuilder { // Register legacy gRPC handlers for backwards compatibility, to be removed at a later date - legacyaccessproto.RegisterAccessAPIServer( - builder.unsecureGrpcServer, - legacyaccess.NewHandler(builder.backend, builder.chain), - ) - legacyaccessproto.RegisterAccessAPIServer( - builder.secureGrpcServer, - legacyaccess.NewHandler(builder.backend, builder.chain), - ) + builder.unsecureGrpcServer.RegisterService(func(s *grpc.Server) { + legacyaccessproto.RegisterAccessAPIServer(s, legacyaccess.NewHandler(builder.backend, builder.chain)) + }) + builder.secureGrpcServer.RegisterService(func(s *grpc.Server) { + legacyaccessproto.RegisterAccessAPIServer(s, legacyaccess.NewHandler(builder.backend, builder.chain)) + }) + return builder } +func (builder *RPCEngineBuilder) DefaultHandler(signerIndicesDecoder hotstuff.BlockSignerDecoder) *Handler { + if signerIndicesDecoder == nil { + return NewHandler(builder.Engine.backend, builder.Engine.chain, builder.finalizedHeaderCache, builder.me, builder.stateStreamConfig.MaxGlobalStreams, WithIndexReporter(builder.indexReporter)) + } else { + return NewHandler(builder.Engine.backend, builder.Engine.chain, builder.finalizedHeaderCache, builder.me, builder.stateStreamConfig.MaxGlobalStreams, WithBlockSignerDecoder(signerIndicesDecoder), WithIndexReporter(builder.indexReporter)) + } +} + // WithMetrics specifies the metrics should be collected. // Returns self-reference for chaining. func (builder *RPCEngineBuilder) WithMetrics() *RPCEngineBuilder { // Not interested in legacy metrics, so initialize here grpc_prometheus.EnableHandlingTimeHistogram() - grpc_prometheus.Register(builder.unsecureGrpcServer) - grpc_prometheus.Register(builder.secureGrpcServer) + builder.unsecureGrpcServer.RegisterService(func(s *grpc.Server) { + grpc_prometheus.Register(s) + }) + builder.secureGrpcServer.RegisterService(func(s *grpc.Server) { + grpc_prometheus.Register(s) + }) return builder } func (builder *RPCEngineBuilder) Build() (*Engine, error) { - if builder.signerIndicesDecoder != nil && builder.handler != nil { + if builder.signerIndicesDecoder != nil && builder.rpcHandler != nil { return nil, fmt.Errorf("only BlockSignerDecoder (via method `WithBlockSignerDecoder`) or AccessAPIServer (via method `WithNewHandler`) can be specified but not both") } - handler := builder.handler - if handler == nil { - if builder.signerIndicesDecoder == nil { - handler = access.NewHandler(builder.Engine.backend, builder.Engine.chain, builder.finalizedHeaderCache, builder.me) - } else { - handler = access.NewHandler(builder.Engine.backend, builder.Engine.chain, builder.finalizedHeaderCache, builder.me, access.WithBlockSignerDecoder(builder.signerIndicesDecoder)) - } + rpcHandler := builder.rpcHandler + if rpcHandler == nil { + rpcHandler = builder.DefaultHandler(builder.signerIndicesDecoder) } - accessproto.RegisterAccessAPIServer(builder.unsecureGrpcServer, handler) - accessproto.RegisterAccessAPIServer(builder.secureGrpcServer, handler) + builder.unsecureGrpcServer.RegisterService(func(s *grpc.Server) { + accessproto.RegisterAccessAPIServer(s, rpcHandler) + }) + builder.secureGrpcServer.RegisterService(func(s *grpc.Server) { + accessproto.RegisterAccessAPIServer(s, rpcHandler) + }) return builder.Engine, nil } diff --git a/engine/access/rpc/handler.go b/engine/access/rpc/handler.go new file mode 100644 index 00000000000..e6a79af259c --- /dev/null +++ b/engine/access/rpc/handler.go @@ -0,0 +1,1608 @@ +package rpc + +import ( + "context" + "errors" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + "google.golang.org/protobuf/types/known/timestamppb" + + accessproto "github.com/onflow/flow/protobuf/go/flow/access" + "github.com/onflow/flow/protobuf/go/flow/entities" + + "github.com/onflow/flow-go/access" + "github.com/onflow/flow-go/consensus/hotstuff" + "github.com/onflow/flow-go/consensus/hotstuff/signature" + "github.com/onflow/flow-go/engine/access/subscription" + "github.com/onflow/flow-go/engine/common/rpc" + "github.com/onflow/flow-go/engine/common/rpc/convert" + accessmodel "github.com/onflow/flow-go/model/access" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module" + "github.com/onflow/flow-go/module/counters" + "github.com/onflow/flow-go/module/state_synchronization" + "github.com/onflow/flow-go/module/state_synchronization/indexer" +) + +type Handler struct { + subscription.StreamingData + api access.API + chain flow.Chain + signerIndicesDecoder hotstuff.BlockSignerDecoder + finalizedHeaderCache module.FinalizedHeaderCache + me module.Local + indexReporter state_synchronization.IndexReporter +} + +// HandlerOption is used to hand over optional constructor parameters +type HandlerOption func(*Handler) + +var _ accessproto.AccessAPIServer = (*Handler)(nil) + +// sendSubscribeBlocksResponseFunc is a callback function used to send +// SubscribeBlocksResponse to the client stream. +type sendSubscribeBlocksResponseFunc func(*accessproto.SubscribeBlocksResponse) error + +// sendSubscribeBlockHeadersResponseFunc is a callback function used to send +// SubscribeBlockHeadersResponse to the client stream. +type sendSubscribeBlockHeadersResponseFunc func(*accessproto.SubscribeBlockHeadersResponse) error + +// sendSubscribeBlockDigestsResponseFunc is a callback function used to send +// SubscribeBlockDigestsResponse to the client stream. +type sendSubscribeBlockDigestsResponseFunc func(*accessproto.SubscribeBlockDigestsResponse) error + +func NewHandler( + api access.API, + chain flow.Chain, + finalizedHeader module.FinalizedHeaderCache, + me module.Local, + maxStreams uint32, + options ...HandlerOption, +) *Handler { + h := &Handler{ + StreamingData: subscription.NewStreamingData(maxStreams), + api: api, + chain: chain, + finalizedHeaderCache: finalizedHeader, + me: me, + signerIndicesDecoder: &signature.NoopBlockSignerDecoder{}, + } + for _, opt := range options { + opt(h) + } + return h +} + +// Ping the Access API server for a response. +func (h *Handler) Ping(ctx context.Context, _ *accessproto.PingRequest) (*accessproto.PingResponse, error) { + err := h.api.Ping(ctx) + if err != nil { + return nil, err + } + + return &accessproto.PingResponse{}, nil +} + +// GetNodeVersionInfo gets node version information such as semver, commit, sporkID, protocolVersion, etc +func (h *Handler) GetNodeVersionInfo( + ctx context.Context, + _ *accessproto.GetNodeVersionInfoRequest, +) (*accessproto.GetNodeVersionInfoResponse, error) { + nodeVersionInfo, err := h.api.GetNodeVersionInfo(ctx) + if err != nil { + return nil, err + } + + return &accessproto.GetNodeVersionInfoResponse{ + Info: &entities.NodeVersionInfo{ + Semver: nodeVersionInfo.Semver, + Commit: nodeVersionInfo.Commit, + SporkId: nodeVersionInfo.SporkId[:], + ProtocolVersion: nodeVersionInfo.ProtocolVersion, + ProtocolStateVersion: nodeVersionInfo.ProtocolStateVersion, + SporkRootBlockHeight: nodeVersionInfo.SporkRootBlockHeight, + NodeRootBlockHeight: nodeVersionInfo.NodeRootBlockHeight, + CompatibleRange: convert.CompatibleRangeToMessage(nodeVersionInfo.CompatibleRange), + }, + }, nil +} + +func (h *Handler) GetNetworkParameters( + ctx context.Context, + _ *accessproto.GetNetworkParametersRequest, +) (*accessproto.GetNetworkParametersResponse, error) { + params := h.api.GetNetworkParameters(ctx) + + return &accessproto.GetNetworkParametersResponse{ + ChainId: string(params.ChainID), + }, nil +} + +// GetLatestBlockHeader gets the latest sealed block header. +func (h *Handler) GetLatestBlockHeader( + ctx context.Context, + req *accessproto.GetLatestBlockHeaderRequest, +) (*accessproto.BlockHeaderResponse, error) { + header, status, err := h.api.GetLatestBlockHeader(ctx, req.GetIsSealed()) + if err != nil { + return nil, err + } + return h.blockHeaderResponse(header, status) +} + +// GetBlockHeaderByHeight gets a block header by height. +func (h *Handler) GetBlockHeaderByHeight( + ctx context.Context, + req *accessproto.GetBlockHeaderByHeightRequest, +) (*accessproto.BlockHeaderResponse, error) { + header, status, err := h.api.GetBlockHeaderByHeight(ctx, req.GetHeight()) + if err != nil { + return nil, err + } + return h.blockHeaderResponse(header, status) +} + +// GetBlockHeaderByID gets a block header by ID. +func (h *Handler) GetBlockHeaderByID( + ctx context.Context, + req *accessproto.GetBlockHeaderByIDRequest, +) (*accessproto.BlockHeaderResponse, error) { + id, err := convert.BlockID(req.GetId()) + if err != nil { + return nil, status.Errorf(codes.InvalidArgument, "invalid block id: %v", err) + } + header, status, err := h.api.GetBlockHeaderByID(ctx, id) + if err != nil { + return nil, err + } + return h.blockHeaderResponse(header, status) +} + +// GetLatestBlock gets the latest sealed block. +func (h *Handler) GetLatestBlock( + ctx context.Context, + req *accessproto.GetLatestBlockRequest, +) (*accessproto.BlockResponse, error) { + block, status, err := h.api.GetLatestBlock(ctx, req.GetIsSealed()) + if err != nil { + return nil, err + } + return h.blockResponse(block, req.GetFullBlockResponse(), status) +} + +// GetBlockByHeight gets a block by height. +func (h *Handler) GetBlockByHeight( + ctx context.Context, + req *accessproto.GetBlockByHeightRequest, +) (*accessproto.BlockResponse, error) { + block, status, err := h.api.GetBlockByHeight(ctx, req.GetHeight()) + if err != nil { + return nil, err + } + return h.blockResponse(block, req.GetFullBlockResponse(), status) +} + +// GetBlockByID gets a block by ID. +func (h *Handler) GetBlockByID( + ctx context.Context, + req *accessproto.GetBlockByIDRequest, +) (*accessproto.BlockResponse, error) { + id, err := convert.BlockID(req.GetId()) + if err != nil { + return nil, status.Errorf(codes.InvalidArgument, "invalid block id: %v", err) + } + block, status, err := h.api.GetBlockByID(ctx, id) + if err != nil { + return nil, err + } + return h.blockResponse(block, req.GetFullBlockResponse(), status) +} + +// GetCollectionByID gets a collection by ID. +func (h *Handler) GetCollectionByID( + ctx context.Context, + req *accessproto.GetCollectionByIDRequest, +) (*accessproto.CollectionResponse, error) { + metadata, err := h.buildMetadataResponse() + if err != nil { + return nil, err + } + + id, err := convert.CollectionID(req.GetId()) + if err != nil { + return nil, status.Errorf(codes.InvalidArgument, "invalid collection id: %v", err) + } + + col, err := h.api.GetCollectionByID(ctx, id) + if err != nil { + return nil, err + } + + colMsg, err := convert.LightCollectionToMessage(col) + if err != nil { + return nil, status.Error(codes.Internal, err.Error()) + } + + return &accessproto.CollectionResponse{ + Collection: colMsg, + Metadata: metadata, + }, nil +} + +func (h *Handler) GetFullCollectionByID( + ctx context.Context, + req *accessproto.GetFullCollectionByIDRequest, +) (*accessproto.FullCollectionResponse, error) { + metadata, err := h.buildMetadataResponse() + if err != nil { + return nil, err + } + + id, err := convert.CollectionID(req.GetId()) + if err != nil { + return nil, status.Errorf(codes.InvalidArgument, "invalid collection id: %v", err) + } + + col, err := h.api.GetFullCollectionByID(ctx, id) + if err != nil { + return nil, err + } + + transactions, err := convert.FullCollectionToMessage(col) + if err != nil { + return nil, status.Error(codes.Internal, err.Error()) + } + + return &accessproto.FullCollectionResponse{ + Transactions: transactions, + Metadata: metadata, + }, nil +} + +// SendTransaction submits a transaction to the network. +func (h *Handler) SendTransaction( + ctx context.Context, + req *accessproto.SendTransactionRequest, +) (*accessproto.SendTransactionResponse, error) { + metadata, err := h.buildMetadataResponse() + if err != nil { + return nil, err + } + + txMsg := req.GetTransaction() + + tx, err := convert.MessageToTransaction(txMsg, h.chain) + if err != nil { + return nil, status.Error(codes.InvalidArgument, err.Error()) + } + + err = h.api.SendTransaction(ctx, &tx) + if err != nil { + return nil, err + } + + txID := tx.ID() + + return &accessproto.SendTransactionResponse{ + Id: txID[:], + Metadata: metadata, + }, nil +} + +// GetTransaction gets a transaction by ID. +func (h *Handler) GetTransaction( + ctx context.Context, + req *accessproto.GetTransactionRequest, +) (*accessproto.TransactionResponse, error) { + metadata, err := h.buildMetadataResponse() + if err != nil { + return nil, err + } + + id, err := convert.TransactionID(req.GetId()) + if err != nil { + return nil, status.Errorf(codes.InvalidArgument, "invalid transaction id: %v", err) + } + + tx, err := h.api.GetTransaction(ctx, id) + if err != nil { + return nil, err + } + + return &accessproto.TransactionResponse{ + Transaction: convert.TransactionToMessage(*tx), + Metadata: metadata, + }, nil +} + +// GetTransactionResult gets a transaction by ID. +func (h *Handler) GetTransactionResult( + ctx context.Context, + req *accessproto.GetTransactionRequest, +) (*accessproto.TransactionResultResponse, error) { + metadata, err := h.buildMetadataResponse() + if err != nil { + return nil, err + } + + transactionID, err := convert.TransactionID(req.GetId()) + if err != nil { + return nil, status.Errorf(codes.InvalidArgument, "invalid transaction id: %v", err) + } + + blockId := flow.ZeroID + requestBlockId := req.GetBlockId() + if requestBlockId != nil { + blockId, err = convert.BlockID(requestBlockId) + if err != nil { + return nil, status.Errorf(codes.InvalidArgument, "invalid block id: %v", err) + } + } + + collectionId := flow.ZeroID + requestCollectionId := req.GetCollectionId() + if requestCollectionId != nil { + collectionId, err = convert.CollectionID(requestCollectionId) + if err != nil { + return nil, status.Errorf(codes.InvalidArgument, "invalid collection id: %v", err) + } + } + + eventEncodingVersion := req.GetEventEncodingVersion() + result, err := h.api.GetTransactionResult(ctx, transactionID, blockId, collectionId, eventEncodingVersion) + if err != nil { + return nil, err + } + + message := convert.TransactionResultToMessage(result) + message.Metadata = metadata + + return message, nil +} + +func (h *Handler) GetTransactionResultsByBlockID( + ctx context.Context, + req *accessproto.GetTransactionsByBlockIDRequest, +) (*accessproto.TransactionResultsResponse, error) { + metadata, err := h.buildMetadataResponse() + if err != nil { + return nil, err + } + + id, err := convert.BlockID(req.GetBlockId()) + if err != nil { + return nil, status.Errorf(codes.InvalidArgument, "invalid block id: %v", err) + } + + eventEncodingVersion := req.GetEventEncodingVersion() + + results, err := h.api.GetTransactionResultsByBlockID(ctx, id, eventEncodingVersion) + if err != nil { + return nil, err + } + + message := convert.TransactionResultsToMessage(results) + message.Metadata = metadata + + return message, nil +} + +func (h *Handler) GetSystemTransaction( + ctx context.Context, + req *accessproto.GetSystemTransactionRequest, +) (*accessproto.TransactionResponse, error) { + metadata, err := h.buildMetadataResponse() + if err != nil { + return nil, err + } + + blockID, err := convert.BlockID(req.GetBlockId()) + if err != nil { + return nil, status.Errorf(codes.InvalidArgument, "invalid block id: %v", err) + } + + var txID flow.Identifier + if id := req.GetId(); id == nil { + txID = flow.ZeroID + } else { + txID, err = convert.TransactionID(id) + if err != nil { + return nil, status.Errorf(codes.InvalidArgument, "invalid transaction id: %v", err) + } + } + + tx, err := h.api.GetSystemTransaction(ctx, txID, blockID) + if err != nil { + return nil, err + } + + return &accessproto.TransactionResponse{ + Transaction: convert.TransactionToMessage(*tx), + Metadata: metadata, + }, nil +} + +func (h *Handler) GetSystemTransactionResult( + ctx context.Context, + req *accessproto.GetSystemTransactionResultRequest, +) (*accessproto.TransactionResultResponse, error) { + metadata, err := h.buildMetadataResponse() + if err != nil { + return nil, err + } + + blockID, err := convert.BlockID(req.GetBlockId()) + if err != nil { + return nil, status.Errorf(codes.InvalidArgument, "invalid block id: %v", err) + } + + var txID flow.Identifier + if id := req.GetId(); id == nil { + txID = flow.ZeroID + } else { + txID, err = convert.TransactionID(id) + if err != nil { + return nil, status.Errorf(codes.InvalidArgument, "invalid transaction id: %v", err) + } + } + + result, err := h.api.GetSystemTransactionResult(ctx, txID, blockID, req.GetEventEncodingVersion()) + if err != nil { + return nil, err + } + + message := convert.TransactionResultToMessage(result) + message.Metadata = metadata + + return message, nil +} + +func (h *Handler) GetTransactionsByBlockID( + ctx context.Context, + req *accessproto.GetTransactionsByBlockIDRequest, +) (*accessproto.TransactionsResponse, error) { + metadata, err := h.buildMetadataResponse() + if err != nil { + return nil, err + } + + id, err := convert.BlockID(req.GetBlockId()) + if err != nil { + return nil, status.Errorf(codes.InvalidArgument, "invalid block id: %v", err) + } + + transactions, err := h.api.GetTransactionsByBlockID(ctx, id) + if err != nil { + return nil, err + } + + return &accessproto.TransactionsResponse{ + Transactions: convert.TransactionsToMessages(transactions), + Metadata: metadata, + }, nil +} + +// GetTransactionResultByIndex gets a transaction at a specific index for in a block that is executed, +// pending or finalized transactions return errors +func (h *Handler) GetTransactionResultByIndex( + ctx context.Context, + req *accessproto.GetTransactionByIndexRequest, +) (*accessproto.TransactionResultResponse, error) { + metadata, err := h.buildMetadataResponse() + if err != nil { + return nil, err + } + + blockID, err := convert.BlockID(req.GetBlockId()) + if err != nil { + return nil, status.Errorf(codes.InvalidArgument, "invalid block id: %v", err) + } + + eventEncodingVersion := req.GetEventEncodingVersion() + + result, err := h.api.GetTransactionResultByIndex(ctx, blockID, req.GetIndex(), eventEncodingVersion) + if err != nil { + return nil, err + } + + message := convert.TransactionResultToMessage(result) + message.Metadata = metadata + + return message, nil +} + +// GetAccount returns an account by address at the latest sealed block. +func (h *Handler) GetAccount( + ctx context.Context, + req *accessproto.GetAccountRequest, +) (*accessproto.GetAccountResponse, error) { + metadata, err := h.buildMetadataResponse() + if err != nil { + return nil, err + } + + address, err := convert.Address(req.GetAddress(), h.chain) + if err != nil { + return nil, status.Errorf(codes.InvalidArgument, "invalid address: %v", err) + } + + account, err := h.api.GetAccount(ctx, address) + if err != nil { + return nil, err + } + + accountMsg, err := convert.AccountToMessage(account) + if err != nil { + return nil, status.Error(codes.Internal, err.Error()) + } + + return &accessproto.GetAccountResponse{ + Account: accountMsg, + Metadata: metadata, + }, nil +} + +// GetAccountAtLatestBlock returns an account by address at the latest sealed block. +func (h *Handler) GetAccountAtLatestBlock( + ctx context.Context, + req *accessproto.GetAccountAtLatestBlockRequest, +) (*accessproto.AccountResponse, error) { + metadata, err := h.buildMetadataResponse() + if err != nil { + return nil, err + } + + address, err := convert.Address(req.GetAddress(), h.chain) + if err != nil { + return nil, status.Errorf(codes.InvalidArgument, "invalid address: %v", err) + } + + account, err := h.api.GetAccountAtLatestBlock(ctx, address) + if err != nil { + return nil, err + } + + accountMsg, err := convert.AccountToMessage(account) + if err != nil { + return nil, status.Error(codes.Internal, err.Error()) + } + + return &accessproto.AccountResponse{ + Account: accountMsg, + Metadata: metadata, + }, nil +} + +// GetAccountAtBlockHeight returns an account by address at the given block height. +func (h *Handler) GetAccountAtBlockHeight( + ctx context.Context, + req *accessproto.GetAccountAtBlockHeightRequest, +) (*accessproto.AccountResponse, error) { + metadata, err := h.buildMetadataResponse() + if err != nil { + return nil, err + } + + address, err := convert.Address(req.GetAddress(), h.chain) + if err != nil { + return nil, status.Errorf(codes.InvalidArgument, "invalid address: %v", err) + } + + account, err := h.api.GetAccountAtBlockHeight(ctx, address, req.GetBlockHeight()) + if err != nil { + return nil, err + } + + accountMsg, err := convert.AccountToMessage(account) + if err != nil { + return nil, status.Error(codes.Internal, err.Error()) + } + + return &accessproto.AccountResponse{ + Account: accountMsg, + Metadata: metadata, + }, nil +} + +// GetAccountBalanceAtLatestBlock returns an account balance by address at the latest sealed block. +// +// Expected errors during normal operation: +// - codes.InvalidArgument - if invalid account address provided. +// - codes.Internal - if failed to get account from the execution node or failed to convert account message. +func (h *Handler) GetAccountBalanceAtLatestBlock( + ctx context.Context, + req *accessproto.GetAccountBalanceAtLatestBlockRequest, +) (*accessproto.AccountBalanceResponse, error) { + metadata, err := h.buildMetadataResponse() + if err != nil { + return nil, err + } + + address, err := convert.Address(req.GetAddress(), h.chain) + if err != nil { + return nil, status.Errorf(codes.InvalidArgument, "invalid address: %v", err) + } + + accountBalance, err := h.api.GetAccountBalanceAtLatestBlock(ctx, address) + if err != nil { + return nil, err + } + + return &accessproto.AccountBalanceResponse{ + Balance: accountBalance, + Metadata: metadata, + }, nil +} + +// GetAccountBalanceAtBlockHeight returns an account balance by address at the given block height. +// +// Expected errors during normal operation: +// - codes.InvalidArgument - if invalid account address provided. +// - codes.Internal - if failed to get account from the execution node or failed to convert account message. + +func (h *Handler) GetAccountBalanceAtBlockHeight( + ctx context.Context, + req *accessproto.GetAccountBalanceAtBlockHeightRequest, +) (*accessproto.AccountBalanceResponse, error) { + metadata, err := h.buildMetadataResponse() + if err != nil { + return nil, err + } + + address, err := convert.Address(req.GetAddress(), h.chain) + if err != nil { + return nil, status.Errorf(codes.InvalidArgument, "invalid address: %v", err) + } + + accountBalance, err := h.api.GetAccountBalanceAtBlockHeight(ctx, address, req.GetBlockHeight()) + if err != nil { + return nil, err + } + + return &accessproto.AccountBalanceResponse{ + Balance: accountBalance, + Metadata: metadata, + }, nil +} + +// GetAccountKeyAtLatestBlock returns an account public key by address and key index at the latest sealed block. +// +// Expected errors during normal operation: +// - codes.InvalidArgument - if invalid account address provided. +// - codes.Internal - if failed to get account from the execution node, ailed to convert account message or failed to encode account key. +func (h *Handler) GetAccountKeyAtLatestBlock( + ctx context.Context, + req *accessproto.GetAccountKeyAtLatestBlockRequest, +) (*accessproto.AccountKeyResponse, error) { + metadata, err := h.buildMetadataResponse() + if err != nil { + return nil, err + } + + address, err := convert.Address(req.GetAddress(), h.chain) + if err != nil { + return nil, status.Errorf(codes.InvalidArgument, "invalid address: %v", err) + } + + keyByIndex, err := h.api.GetAccountKeyAtLatestBlock(ctx, address, req.GetIndex()) + if err != nil { + return nil, err + } + + accountKey, err := convert.AccountKeyToMessage(*keyByIndex) + if err != nil { + return nil, status.Errorf(codes.Internal, "failed to encode account key: %v", err) + } + + return &accessproto.AccountKeyResponse{ + AccountKey: accountKey, + Metadata: metadata, + }, nil +} + +// GetAccountKeysAtLatestBlock returns an account public keys by address at the latest sealed block. +// GetAccountKeyAtLatestBlock returns an account public key by address and key index at the latest sealed block. +// +// Expected errors during normal operation: +// - codes.InvalidArgument - if invalid account address provided. +// - codes.Internal - if failed to get account from the execution node, ailed to convert account message or failed to encode account key. +func (h *Handler) GetAccountKeysAtLatestBlock( + ctx context.Context, + req *accessproto.GetAccountKeysAtLatestBlockRequest, +) (*accessproto.AccountKeysResponse, error) { + metadata, err := h.buildMetadataResponse() + if err != nil { + return nil, err + } + + address, err := convert.Address(req.GetAddress(), h.chain) + if err != nil { + return nil, status.Errorf(codes.InvalidArgument, "invalid address: %v", err) + } + + accountKeys, err := h.api.GetAccountKeysAtLatestBlock(ctx, address) + if err != nil { + return nil, err + } + + var publicKeys []*entities.AccountKey + + for i, key := range accountKeys { + accountKey, err := convert.AccountKeyToMessage(key) + if err != nil { + return nil, status.Errorf(codes.Internal, "failed to encode account key %d: %v", i, err) + } + + publicKeys = append(publicKeys, accountKey) + } + + return &accessproto.AccountKeysResponse{ + AccountKeys: publicKeys, + Metadata: metadata, + }, nil +} + +// GetAccountKeyAtBlockHeight returns an account public keys by address and key index at the given block height. +// GetAccountKeyAtLatestBlock returns an account public key by address and key index at the latest sealed block. +// +// Expected errors during normal operation: +// - codes.InvalidArgument - if invalid account address provided. +// - codes.Internal - if failed to get account from the execution node, ailed to convert account message or failed to encode account key. +func (h *Handler) GetAccountKeyAtBlockHeight( + ctx context.Context, + req *accessproto.GetAccountKeyAtBlockHeightRequest, +) (*accessproto.AccountKeyResponse, error) { + metadata, err := h.buildMetadataResponse() + if err != nil { + return nil, err + } + + address, err := convert.Address(req.GetAddress(), h.chain) + if err != nil { + return nil, status.Errorf(codes.InvalidArgument, "invalid address: %v", err) + } + + keyByIndex, err := h.api.GetAccountKeyAtBlockHeight(ctx, address, req.GetIndex(), req.GetBlockHeight()) + if err != nil { + return nil, err + } + + accountKey, err := convert.AccountKeyToMessage(*keyByIndex) + if err != nil { + return nil, status.Errorf(codes.Internal, "failed to encode account key: %v", err) + } + + return &accessproto.AccountKeyResponse{ + AccountKey: accountKey, + Metadata: metadata, + }, nil +} + +// GetAccountKeysAtBlockHeight returns an account public keys by address at the given block height. +// GetAccountKeyAtLatestBlock returns an account public key by address and key index at the latest sealed block. +// +// Expected errors during normal operation: +// - codes.InvalidArgument - if invalid account address provided. +// - codes.Internal - if failed to get account from the execution node, ailed to convert account message or failed to encode account key. +func (h *Handler) GetAccountKeysAtBlockHeight( + ctx context.Context, + req *accessproto.GetAccountKeysAtBlockHeightRequest, +) (*accessproto.AccountKeysResponse, error) { + metadata, err := h.buildMetadataResponse() + if err != nil { + return nil, err + } + + address, err := convert.Address(req.GetAddress(), h.chain) + if err != nil { + return nil, status.Errorf(codes.InvalidArgument, "invalid address: %v", err) + } + + accountKeys, err := h.api.GetAccountKeysAtBlockHeight(ctx, address, req.GetBlockHeight()) + if err != nil { + return nil, err + } + + var publicKeys []*entities.AccountKey + + for i, key := range accountKeys { + accountKey, err := convert.AccountKeyToMessage(key) + if err != nil { + return nil, status.Errorf(codes.Internal, "failed to encode account key %d: %v", i, err) + } + + publicKeys = append(publicKeys, accountKey) + } + + return &accessproto.AccountKeysResponse{ + AccountKeys: publicKeys, + Metadata: metadata, + }, nil +} + +// ExecuteScriptAtLatestBlock executes a script at a the latest block. +func (h *Handler) ExecuteScriptAtLatestBlock( + ctx context.Context, + req *accessproto.ExecuteScriptAtLatestBlockRequest, +) (*accessproto.ExecuteScriptResponse, error) { + metadata, err := h.buildMetadataResponse() + if err != nil { + return nil, err + } + + script := req.GetScript() + arguments := req.GetArguments() + + value, err := h.api.ExecuteScriptAtLatestBlock(ctx, script, arguments) + if err != nil { + return nil, err + } + + return &accessproto.ExecuteScriptResponse{ + Value: value, + Metadata: metadata, + }, nil +} + +// ExecuteScriptAtBlockHeight executes a script at a specific block height. +func (h *Handler) ExecuteScriptAtBlockHeight( + ctx context.Context, + req *accessproto.ExecuteScriptAtBlockHeightRequest, +) (*accessproto.ExecuteScriptResponse, error) { + metadata, err := h.buildMetadataResponse() + if err != nil { + return nil, err + } + + script := req.GetScript() + arguments := req.GetArguments() + blockHeight := req.GetBlockHeight() + + value, err := h.api.ExecuteScriptAtBlockHeight(ctx, blockHeight, script, arguments) + if err != nil { + return nil, err + } + + return &accessproto.ExecuteScriptResponse{ + Value: value, + Metadata: metadata, + }, nil +} + +// ExecuteScriptAtBlockID executes a script at a specific block ID. +func (h *Handler) ExecuteScriptAtBlockID( + ctx context.Context, + req *accessproto.ExecuteScriptAtBlockIDRequest, +) (*accessproto.ExecuteScriptResponse, error) { + metadata, err := h.buildMetadataResponse() + if err != nil { + return nil, err + } + script := req.GetScript() + arguments := req.GetArguments() + blockID := convert.MessageToIdentifier(req.GetBlockId()) + + value, err := h.api.ExecuteScriptAtBlockID(ctx, blockID, script, arguments) + if err != nil { + return nil, err + } + + return &accessproto.ExecuteScriptResponse{ + Value: value, + Metadata: metadata, + }, nil +} + +// GetEventsForHeightRange returns events matching a query. +func (h *Handler) GetEventsForHeightRange( + ctx context.Context, + req *accessproto.GetEventsForHeightRangeRequest, +) (*accessproto.EventsResponse, error) { + metadata, err := h.buildMetadataResponse() + if err != nil { + return nil, err + } + + eventType, err := convert.EventType(req.GetType()) + if err != nil { + return nil, err + } + + startHeight := req.GetStartHeight() + endHeight := req.GetEndHeight() + + eventEncodingVersion := req.GetEventEncodingVersion() + + results, err := h.api.GetEventsForHeightRange(ctx, eventType, startHeight, endHeight, eventEncodingVersion) + if err != nil { + return nil, err + } + + resultEvents, err := convert.BlockEventsToMessages(results) + if err != nil { + return nil, err + } + return &accessproto.EventsResponse{ + Results: resultEvents, + Metadata: metadata, + }, nil +} + +// GetEventsForBlockIDs returns events matching a set of block IDs. +func (h *Handler) GetEventsForBlockIDs( + ctx context.Context, + req *accessproto.GetEventsForBlockIDsRequest, +) (*accessproto.EventsResponse, error) { + metadata, err := h.buildMetadataResponse() + if err != nil { + return nil, err + } + + eventType, err := convert.EventType(req.GetType()) + if err != nil { + return nil, err + } + + blockIDs, err := convert.BlockIDs(req.GetBlockIds()) + if err != nil { + return nil, err + } + + eventEncodingVersion := req.GetEventEncodingVersion() + + results, err := h.api.GetEventsForBlockIDs(ctx, eventType, blockIDs, eventEncodingVersion) + if err != nil { + return nil, err + } + + resultEvents, err := convert.BlockEventsToMessages(results) + if err != nil { + return nil, err + } + + return &accessproto.EventsResponse{ + Results: resultEvents, + Metadata: metadata, + }, nil +} + +// GetLatestProtocolStateSnapshot returns the latest serializable Snapshot +func (h *Handler) GetLatestProtocolStateSnapshot(ctx context.Context, req *accessproto.GetLatestProtocolStateSnapshotRequest) (*accessproto.ProtocolStateSnapshotResponse, error) { + metadata, err := h.buildMetadataResponse() + if err != nil { + return nil, err + } + + snapshot, err := h.api.GetLatestProtocolStateSnapshot(ctx) + if err != nil { + return nil, err + } + + return &accessproto.ProtocolStateSnapshotResponse{ + SerializedSnapshot: snapshot, + Metadata: metadata, + }, nil +} + +// GetProtocolStateSnapshotByBlockID returns serializable Snapshot by blockID +func (h *Handler) GetProtocolStateSnapshotByBlockID(ctx context.Context, req *accessproto.GetProtocolStateSnapshotByBlockIDRequest) (*accessproto.ProtocolStateSnapshotResponse, error) { + metadata, err := h.buildMetadataResponse() + if err != nil { + return nil, err + } + + blockID := convert.MessageToIdentifier(req.GetBlockId()) + + snapshot, err := h.api.GetProtocolStateSnapshotByBlockID(ctx, blockID) + if err != nil { + return nil, err + } + + return &accessproto.ProtocolStateSnapshotResponse{ + SerializedSnapshot: snapshot, + Metadata: metadata, + }, nil +} + +// GetProtocolStateSnapshotByHeight returns serializable Snapshot by block height +func (h *Handler) GetProtocolStateSnapshotByHeight(ctx context.Context, req *accessproto.GetProtocolStateSnapshotByHeightRequest) (*accessproto.ProtocolStateSnapshotResponse, error) { + metadata, err := h.buildMetadataResponse() + if err != nil { + return nil, err + } + + snapshot, err := h.api.GetProtocolStateSnapshotByHeight(ctx, req.GetBlockHeight()) + if err != nil { + return nil, err + } + + return &accessproto.ProtocolStateSnapshotResponse{ + SerializedSnapshot: snapshot, + Metadata: metadata, + }, nil +} + +// GetExecutionResultForBlockID returns the latest received execution result for the given block ID. +// AN might receive multiple receipts with conflicting results for unsealed blocks. +// If this case happens, since AN is not able to determine which result is the correct one until the block is sealed, it has to pick one result to respond to this query. For now, we return the result from the latest received receipt. +func (h *Handler) GetExecutionResultForBlockID(ctx context.Context, req *accessproto.GetExecutionResultForBlockIDRequest) (*accessproto.ExecutionResultForBlockIDResponse, error) { + metadata, err := h.buildMetadataResponse() + if err != nil { + return nil, err + } + + blockID := convert.MessageToIdentifier(req.GetBlockId()) + + result, err := h.api.GetExecutionResultForBlockID(ctx, blockID) + if err != nil { + return nil, err + } + + return executionResultToMessages(result, metadata) +} + +// GetExecutionResultByID returns the execution result for the given ID. +func (h *Handler) GetExecutionResultByID(ctx context.Context, req *accessproto.GetExecutionResultByIDRequest) (*accessproto.ExecutionResultByIDResponse, error) { + metadata, err := h.buildMetadataResponse() + if err != nil { + return nil, err + } + + resultID := convert.MessageToIdentifier(req.GetId()) + + result, err := h.api.GetExecutionResultByID(ctx, resultID) + if err != nil { + return nil, err + } + + execResult, err := convert.ExecutionResultToMessage(result) + if err != nil { + return nil, err + } + return &accessproto.ExecutionResultByIDResponse{ + ExecutionResult: execResult, + Metadata: metadata, + }, nil +} + +// SubscribeBlocksFromStartBlockID handles subscription requests for blocks started from block id. +// It takes a SubscribeBlocksFromStartBlockIDRequest and an AccessAPI_SubscribeBlocksFromStartBlockIDServer stream as input. +// The handler manages the subscription to block updates and sends the subscribed block information +// to the client via the provided stream. +// +// Expected errors during normal operation: +// - codes.InvalidArgument - if invalid startBlockID provided or unknown block status provided. +// - codes.ResourceExhausted - if the maximum number of streams is reached. +// - codes.Internal - if stream encountered an error, if stream got unexpected response or could not convert block to message or could not send response. +func (h *Handler) SubscribeBlocksFromStartBlockID(request *accessproto.SubscribeBlocksFromStartBlockIDRequest, stream accessproto.AccessAPI_SubscribeBlocksFromStartBlockIDServer) error { + // check if the maximum number of streams is reached + if h.StreamCount.Load() >= h.MaxStreams { + return status.Errorf(codes.ResourceExhausted, "maximum number of streams reached") + } + h.StreamCount.Add(1) + defer h.StreamCount.Add(-1) + + startBlockID, blockStatus, err := h.getSubscriptionDataFromStartBlockID(request.GetStartBlockId(), request.GetBlockStatus()) + if err != nil { + return err + } + + sub := h.api.SubscribeBlocksFromStartBlockID(stream.Context(), startBlockID, blockStatus) + return HandleRPCSubscription(sub, h.handleBlocksResponse(stream.Send, request.GetFullBlockResponse(), blockStatus)) +} + +// SubscribeBlocksFromStartHeight handles subscription requests for blocks started from block height. +// It takes a SubscribeBlocksFromStartHeightRequest and an AccessAPI_SubscribeBlocksFromStartHeightServer stream as input. +// The handler manages the subscription to block updates and sends the subscribed block information +// to the client via the provided stream. +// +// Expected errors during normal operation: +// - codes.InvalidArgument - if unknown block status provided. +// - codes.ResourceExhausted - if the maximum number of streams is reached. +// - codes.Internal - if stream encountered an error, if stream got unexpected response or could not convert block to message or could not send response. +func (h *Handler) SubscribeBlocksFromStartHeight(request *accessproto.SubscribeBlocksFromStartHeightRequest, stream accessproto.AccessAPI_SubscribeBlocksFromStartHeightServer) error { + // check if the maximum number of streams is reached + if h.StreamCount.Load() >= h.MaxStreams { + return status.Errorf(codes.ResourceExhausted, "maximum number of streams reached") + } + h.StreamCount.Add(1) + defer h.StreamCount.Add(-1) + + blockStatus := convert.MessageToBlockStatus(request.GetBlockStatus()) + err := checkBlockStatus(blockStatus) + if err != nil { + return err + } + + sub := h.api.SubscribeBlocksFromStartHeight(stream.Context(), request.GetStartBlockHeight(), blockStatus) + return HandleRPCSubscription(sub, h.handleBlocksResponse(stream.Send, request.GetFullBlockResponse(), blockStatus)) +} + +// SubscribeBlocksFromLatest handles subscription requests for blocks started from latest sealed block. +// It takes a SubscribeBlocksFromLatestRequest and an AccessAPI_SubscribeBlocksFromLatestServer stream as input. +// The handler manages the subscription to block updates and sends the subscribed block information +// to the client via the provided stream. +// +// Expected errors during normal operation: +// - codes.InvalidArgument - if unknown block status provided. +// - codes.ResourceExhausted - if the maximum number of streams is reached. +// - codes.Internal - if stream encountered an error, if stream got unexpected response or could not convert block to message or could not send response. +func (h *Handler) SubscribeBlocksFromLatest(request *accessproto.SubscribeBlocksFromLatestRequest, stream accessproto.AccessAPI_SubscribeBlocksFromLatestServer) error { + // check if the maximum number of streams is reached + if h.StreamCount.Load() >= h.MaxStreams { + return status.Errorf(codes.ResourceExhausted, "maximum number of streams reached") + } + h.StreamCount.Add(1) + defer h.StreamCount.Add(-1) + + blockStatus := convert.MessageToBlockStatus(request.GetBlockStatus()) + err := checkBlockStatus(blockStatus) + if err != nil { + return err + } + + sub := h.api.SubscribeBlocksFromLatest(stream.Context(), blockStatus) + return HandleRPCSubscription(sub, h.handleBlocksResponse(stream.Send, request.GetFullBlockResponse(), blockStatus)) +} + +// handleBlocksResponse handles the subscription to block updates and sends +// the subscribed block information to the client via the provided stream. +// +// Parameters: +// - send: The function responsible for sending the block response to the client. +// - fullBlockResponse: A boolean indicating whether to include full block responses. +// - blockStatus: The current block status. +// +// Returns a function that can be used as a callback for block updates. +// +// This function is designed to be used as a callback for block updates in a subscription. +// It takes a block, processes it, and sends the corresponding response to the client using the provided send function. +// +// Expected errors during normal operation: +// - codes.Internal: If cannot convert a block to a message or the stream could not send a response. +func (h *Handler) handleBlocksResponse(send sendSubscribeBlocksResponseFunc, fullBlockResponse bool, blockStatus flow.BlockStatus) func(*flow.Block) error { + return func(block *flow.Block) error { + msgBlockResponse, err := h.blockResponse(block, fullBlockResponse, blockStatus) + if err != nil { + return rpc.ConvertError(err, "could not convert block to message", codes.Internal) + } + + err = send(&accessproto.SubscribeBlocksResponse{ + Block: msgBlockResponse.Block, + }) + if err != nil { + return rpc.ConvertError(err, "could not send response", codes.Internal) + } + + return nil + } +} + +// SubscribeBlockHeadersFromStartBlockID handles subscription requests for block headers started from block id. +// It takes a SubscribeBlockHeadersFromStartBlockIDRequest and an AccessAPI_SubscribeBlockHeadersFromStartBlockIDServer stream as input. +// The handler manages the subscription to block updates and sends the subscribed block header information +// to the client via the provided stream. +// +// Expected errors during normal operation: +// - codes.InvalidArgument - if invalid startBlockID provided or unknown block status provided. +// - codes.ResourceExhausted - if the maximum number of streams is reached. +// - codes.Internal - if stream encountered an error, if stream got unexpected response or could not convert block header to message or could not send response. +func (h *Handler) SubscribeBlockHeadersFromStartBlockID(request *accessproto.SubscribeBlockHeadersFromStartBlockIDRequest, stream accessproto.AccessAPI_SubscribeBlockHeadersFromStartBlockIDServer) error { + // check if the maximum number of streams is reached + if h.StreamCount.Load() >= h.MaxStreams { + return status.Errorf(codes.ResourceExhausted, "maximum number of streams reached") + } + h.StreamCount.Add(1) + defer h.StreamCount.Add(-1) + + startBlockID, blockStatus, err := h.getSubscriptionDataFromStartBlockID(request.GetStartBlockId(), request.GetBlockStatus()) + if err != nil { + return err + } + + sub := h.api.SubscribeBlockHeadersFromStartBlockID(stream.Context(), startBlockID, blockStatus) + return HandleRPCSubscription(sub, h.handleBlockHeadersResponse(stream.Send)) +} + +// SubscribeBlockHeadersFromStartHeight handles subscription requests for block headers started from block height. +// It takes a SubscribeBlockHeadersFromStartHeightRequest and an AccessAPI_SubscribeBlockHeadersFromStartHeightServer stream as input. +// The handler manages the subscription to block updates and sends the subscribed block header information +// to the client via the provided stream. +// +// Expected errors during normal operation: +// - codes.InvalidArgument - if unknown block status provided. +// - codes.ResourceExhausted - if the maximum number of streams is reached. +// - codes.Internal - if stream encountered an error, if stream got unexpected response or could not convert block header to message or could not send response. +func (h *Handler) SubscribeBlockHeadersFromStartHeight(request *accessproto.SubscribeBlockHeadersFromStartHeightRequest, stream accessproto.AccessAPI_SubscribeBlockHeadersFromStartHeightServer) error { + // check if the maximum number of streams is reached + if h.StreamCount.Load() >= h.MaxStreams { + return status.Errorf(codes.ResourceExhausted, "maximum number of streams reached") + } + h.StreamCount.Add(1) + defer h.StreamCount.Add(-1) + + blockStatus := convert.MessageToBlockStatus(request.GetBlockStatus()) + err := checkBlockStatus(blockStatus) + if err != nil { + return err + } + + sub := h.api.SubscribeBlockHeadersFromStartHeight(stream.Context(), request.GetStartBlockHeight(), blockStatus) + return HandleRPCSubscription(sub, h.handleBlockHeadersResponse(stream.Send)) +} + +// SubscribeBlockHeadersFromLatest handles subscription requests for block headers started from latest sealed block. +// It takes a SubscribeBlockHeadersFromLatestRequest and an AccessAPI_SubscribeBlockHeadersFromLatestServer stream as input. +// The handler manages the subscription to block updates and sends the subscribed block header information +// to the client via the provided stream. +// +// Expected errors during normal operation: +// - codes.InvalidArgument - if unknown block status provided. +// - codes.ResourceExhausted - if the maximum number of streams is reached. +// - codes.Internal - if stream encountered an error, if stream got unexpected response or could not convert block header to message or could not send response. +func (h *Handler) SubscribeBlockHeadersFromLatest(request *accessproto.SubscribeBlockHeadersFromLatestRequest, stream accessproto.AccessAPI_SubscribeBlockHeadersFromLatestServer) error { + // check if the maximum number of streams is reached + if h.StreamCount.Load() >= h.MaxStreams { + return status.Errorf(codes.ResourceExhausted, "maximum number of streams reached") + } + h.StreamCount.Add(1) + defer h.StreamCount.Add(-1) + + blockStatus := convert.MessageToBlockStatus(request.GetBlockStatus()) + err := checkBlockStatus(blockStatus) + if err != nil { + return err + } + + sub := h.api.SubscribeBlockHeadersFromLatest(stream.Context(), blockStatus) + return HandleRPCSubscription(sub, h.handleBlockHeadersResponse(stream.Send)) +} + +// handleBlockHeadersResponse handles the subscription to block updates and sends +// the subscribed block header information to the client via the provided stream. +// +// Parameters: +// - send: The function responsible for sending the block header response to the client. +// +// Returns a function that can be used as a callback for block header updates. +// +// This function is designed to be used as a callback for block header updates in a subscription. +// It takes a block header, processes it, and sends the corresponding response to the client using the provided send function. +// +// Expected errors during normal operation: +// - codes.Internal: If could not decode the signer indices from the given block header, could not convert a block header to a message or the stream could not send a response. +func (h *Handler) handleBlockHeadersResponse(send sendSubscribeBlockHeadersResponseFunc) func(*flow.Header) error { + return func(header *flow.Header) error { + signerIDs, err := h.signerIndicesDecoder.DecodeSignerIDs(header) + if err != nil { + return rpc.ConvertError(err, "could not decode the signer indices from the given block header", codes.Internal) // the block was retrieved from local storage - so no errors are expected + } + + msgHeader, err := convert.BlockHeaderToMessage(header, signerIDs) + if err != nil { + return rpc.ConvertError(err, "could not convert block header to message", codes.Internal) + } + + err = send(&accessproto.SubscribeBlockHeadersResponse{ + Header: msgHeader, + }) + if err != nil { + return rpc.ConvertError(err, "could not send response", codes.Internal) + } + + return nil + } +} + +// SubscribeBlockDigestsFromStartBlockID streams finalized or sealed lightweight block starting at the requested block id. +// It takes a SubscribeBlockDigestsFromStartBlockIDRequest and an AccessAPI_SubscribeBlockDigestsFromStartBlockIDServer stream as input. +// +// Expected errors during normal operation: +// - codes.InvalidArgument - if invalid startBlockID provided or unknown block status provided, +// - codes.ResourceExhausted - if the maximum number of streams is reached. +// - codes.Internal - if stream encountered an error, if stream got unexpected response or could not convert block to message or could not send response. +func (h *Handler) SubscribeBlockDigestsFromStartBlockID(request *accessproto.SubscribeBlockDigestsFromStartBlockIDRequest, stream accessproto.AccessAPI_SubscribeBlockDigestsFromStartBlockIDServer) error { + // check if the maximum number of streams is reached + if h.StreamCount.Load() >= h.MaxStreams { + return status.Errorf(codes.ResourceExhausted, "maximum number of streams reached") + } + h.StreamCount.Add(1) + defer h.StreamCount.Add(-1) + + startBlockID, blockStatus, err := h.getSubscriptionDataFromStartBlockID(request.GetStartBlockId(), request.GetBlockStatus()) + if err != nil { + return err + } + + sub := h.api.SubscribeBlockDigestsFromStartBlockID(stream.Context(), startBlockID, blockStatus) + return HandleRPCSubscription(sub, h.handleBlockDigestsResponse(stream.Send)) +} + +// SubscribeBlockDigestsFromStartHeight handles subscription requests for lightweight blocks started from block height. +// It takes a SubscribeBlockDigestsFromStartHeightRequest and an AccessAPI_SubscribeBlockDigestsFromStartHeightServer stream as input. +// The handler manages the subscription to block updates and sends the subscribed block information +// to the client via the provided stream. +// +// Expected errors during normal operation: +// - codes.InvalidArgument - if unknown block status provided. +// - codes.ResourceExhausted - if the maximum number of streams is reached. +// - codes.Internal - if stream encountered an error, if stream got unexpected response or could not convert block to message or could not send response. +func (h *Handler) SubscribeBlockDigestsFromStartHeight(request *accessproto.SubscribeBlockDigestsFromStartHeightRequest, stream accessproto.AccessAPI_SubscribeBlockDigestsFromStartHeightServer) error { + // check if the maximum number of streams is reached + if h.StreamCount.Load() >= h.MaxStreams { + return status.Errorf(codes.ResourceExhausted, "maximum number of streams reached") + } + h.StreamCount.Add(1) + defer h.StreamCount.Add(-1) + + blockStatus := convert.MessageToBlockStatus(request.GetBlockStatus()) + err := checkBlockStatus(blockStatus) + if err != nil { + return err + } + + sub := h.api.SubscribeBlockDigestsFromStartHeight(stream.Context(), request.GetStartBlockHeight(), blockStatus) + return HandleRPCSubscription(sub, h.handleBlockDigestsResponse(stream.Send)) +} + +// SubscribeBlockDigestsFromLatest handles subscription requests for lightweight block started from latest sealed block. +// It takes a SubscribeBlockDigestsFromLatestRequest and an AccessAPI_SubscribeBlockDigestsFromLatestServer stream as input. +// The handler manages the subscription to block updates and sends the subscribed block header information +// to the client via the provided stream. +// +// Expected errors during normal operation: +// - codes.InvalidArgument - if unknown block status provided. +// - codes.ResourceExhausted - if the maximum number of streams is reached. +// - codes.Internal - if stream encountered an error, if stream got unexpected response or could not convert block to message or could not send response. +func (h *Handler) SubscribeBlockDigestsFromLatest(request *accessproto.SubscribeBlockDigestsFromLatestRequest, stream accessproto.AccessAPI_SubscribeBlockDigestsFromLatestServer) error { + // check if the maximum number of streams is reached + if h.StreamCount.Load() >= h.MaxStreams { + return status.Errorf(codes.ResourceExhausted, "maximum number of streams reached") + } + h.StreamCount.Add(1) + defer h.StreamCount.Add(-1) + + blockStatus := convert.MessageToBlockStatus(request.GetBlockStatus()) + err := checkBlockStatus(blockStatus) + if err != nil { + return err + } + + sub := h.api.SubscribeBlockDigestsFromLatest(stream.Context(), blockStatus) + return HandleRPCSubscription(sub, h.handleBlockDigestsResponse(stream.Send)) +} + +// handleBlockDigestsResponse handles the subscription to block updates and sends +// the subscribed block digest information to the client via the provided stream. +// +// Parameters: +// - send: The function responsible for sending the block digest response to the client. +// +// Returns a function that can be used as a callback for block digest updates. +// +// This function is designed to be used as a callback for block digest updates in a subscription. +// It takes a block digest, processes it, and sends the corresponding response to the client using the provided send function. +// +// Expected errors during normal operation: +// - codes.Internal: if the stream cannot send a response. +func (h *Handler) handleBlockDigestsResponse(send sendSubscribeBlockDigestsResponseFunc) func(*flow.BlockDigest) error { + return func(blockDigest *flow.BlockDigest) error { + err := send(&accessproto.SubscribeBlockDigestsResponse{ + BlockId: convert.IdentifierToMessage(blockDigest.BlockID), + BlockHeight: blockDigest.Height, + BlockTimestamp: timestamppb.New(blockDigest.Timestamp), + }) + if err != nil { + return rpc.ConvertError(err, "could not send response", codes.Internal) + } + + return nil + } +} + +// getSubscriptionDataFromStartBlockID processes subscription start data from start block id. +// It takes a union representing the start block id and a BlockStatus from the entities package. +// Performs validation of input data and returns it in expected format for further processing. +// +// Returns: +// - flow.Identifier: The start block id for searching. +// - flow.BlockStatus: Block status. +// - error: An error indicating the result of the operation, if any. +// +// Expected errors during normal operation: +// - codes.InvalidArgument: If blockStatus is flow.BlockStatusUnknown, or startBlockID could not convert to flow.Identifier. +func (h *Handler) getSubscriptionDataFromStartBlockID(msgBlockId []byte, msgBlockStatus entities.BlockStatus) (flow.Identifier, flow.BlockStatus, error) { + startBlockID, err := convert.BlockID(msgBlockId) + if err != nil { + return flow.ZeroID, flow.BlockStatusUnknown, err + } + + blockStatus := convert.MessageToBlockStatus(msgBlockStatus) + err = checkBlockStatus(blockStatus) + if err != nil { + return flow.ZeroID, flow.BlockStatusUnknown, err + } + + return startBlockID, blockStatus, nil +} + +// SendAndSubscribeTransactionStatuses streams transaction statuses starting from the reference block saved in the +// transaction itself until the block containing the transaction becomes sealed or expired. When the transaction +// status becomes TransactionStatusSealed or TransactionStatusExpired, the subscription will automatically shut down. +func (h *Handler) SendAndSubscribeTransactionStatuses( + request *accessproto.SendAndSubscribeTransactionStatusesRequest, + stream accessproto.AccessAPI_SendAndSubscribeTransactionStatusesServer, +) error { + ctx := stream.Context() + + // check if the maximum number of streams is reached + if h.StreamCount.Load() >= h.MaxStreams { + return status.Errorf(codes.ResourceExhausted, "maximum number of streams reached") + } + h.StreamCount.Add(1) + defer h.StreamCount.Add(-1) + + tx, err := convert.MessageToTransaction(request.GetTransaction(), h.chain) + if err != nil { + return status.Error(codes.InvalidArgument, err.Error()) + } + + sub := h.api.SendAndSubscribeTransactionStatuses(ctx, &tx, request.GetEventEncodingVersion()) + + messageIndex := counters.NewMonotonicCounter(0) + return HandleRPCSubscription(sub, func(txResults []*accessmodel.TransactionResult) error { + for i := range txResults { + index := messageIndex.Value() + if ok := messageIndex.Set(index + 1); !ok { + return status.Errorf(codes.Internal, "message index already incremented to %d", messageIndex.Value()) + } + + err = stream.Send(&accessproto.SendAndSubscribeTransactionStatusesResponse{ + TransactionResults: convert.TransactionResultToMessage(txResults[i]), + MessageIndex: index, + }) + if err != nil { + return rpc.ConvertError(err, "could not send response", codes.Internal) + } + + } + + return nil + }) +} + +func (h *Handler) blockResponse(block *flow.Block, fullResponse bool, status flow.BlockStatus) (*accessproto.BlockResponse, error) { + metadata, err := h.buildMetadataResponse() + if err != nil { + return nil, err + } + + signerIDs, err := h.signerIndicesDecoder.DecodeSignerIDs(block.ToHeader()) + if err != nil { + return nil, err // the block was retrieved from local storage - so no errors are expected + } + + var msg *entities.Block + if fullResponse { + msg, err = convert.BlockToMessage(block, signerIDs) + if err != nil { + return nil, rpc.ConvertError(err, "could not convert block to message", codes.Internal) + } + } else { + msg = convert.BlockToMessageLight(block) + } + + return &accessproto.BlockResponse{ + Block: msg, + BlockStatus: entities.BlockStatus(status), + Metadata: metadata, + }, nil +} + +func (h *Handler) blockHeaderResponse(header *flow.Header, status flow.BlockStatus) (*accessproto.BlockHeaderResponse, error) { + metadata, err := h.buildMetadataResponse() + if err != nil { + return nil, err + } + + signerIDs, err := h.signerIndicesDecoder.DecodeSignerIDs(header) + if err != nil { + return nil, err // the block was retrieved from local storage - so no errors are expected + } + + msg, err := convert.BlockHeaderToMessage(header, signerIDs) + if err != nil { + return nil, rpc.ConvertError(err, "could not convert block header to message", codes.Internal) + } + + return &accessproto.BlockHeaderResponse{ + Block: msg, + BlockStatus: entities.BlockStatus(status), + Metadata: metadata, + }, nil +} + +// buildMetadataResponse builds and returns the metadata response object. +// Expected errors during normal operation: +// - codes.NotFound if result cannot be provided by storage due to the absence of data. +// - storage.ErrHeightNotIndexed when data is unavailable +func (h *Handler) buildMetadataResponse() (*entities.Metadata, error) { + lastFinalizedHeader := h.finalizedHeaderCache.Get() + blockId := lastFinalizedHeader.ID() + nodeId := h.me.NodeID() + + metadata := &entities.Metadata{ + LatestFinalizedBlockId: blockId[:], + LatestFinalizedHeight: lastFinalizedHeader.Height, + NodeId: nodeId[:], + } + + if h.indexReporter != nil { + highestIndexedHeight, err := h.indexReporter.HighestIndexedHeight() + if err != nil { + if !errors.Is(err, indexer.ErrIndexNotInitialized) { + return nil, rpc.ConvertIndexError(err, lastFinalizedHeader.Height, "could not get highest indexed height") + } + highestIndexedHeight = 0 + } + + metadata.HighestIndexedHeight = highestIndexedHeight + } + + return metadata, nil +} + +func executionResultToMessages(er *flow.ExecutionResult, metadata *entities.Metadata) (*accessproto.ExecutionResultForBlockIDResponse, error) { + execResult, err := convert.ExecutionResultToMessage(er) + if err != nil { + return nil, err + } + return &accessproto.ExecutionResultForBlockIDResponse{ + ExecutionResult: execResult, + Metadata: metadata, + }, nil +} + +// WithBlockSignerDecoder configures the Handler to decode signer indices +// via the provided hotstuff.BlockSignerDecoder +func WithBlockSignerDecoder(signerIndicesDecoder hotstuff.BlockSignerDecoder) func(*Handler) { + return func(handler *Handler) { + handler.signerIndicesDecoder = signerIndicesDecoder + } +} + +// WithIndexReporter configures the Handler to work with index reporter +func WithIndexReporter(indexReporter state_synchronization.IndexReporter) func(*Handler) { + return func(handler *Handler) { + handler.indexReporter = indexReporter + } +} + +// checkBlockStatus checks the validity of the provided block status. +// +// Expected errors during normal operation: +// - codes.InvalidArgument - if blockStatus is flow.BlockStatusUnknown +func checkBlockStatus(blockStatus flow.BlockStatus) error { + if blockStatus != flow.BlockStatusFinalized && blockStatus != flow.BlockStatusSealed { + return status.Errorf(codes.InvalidArgument, "block status is unknown. Possible variants: BLOCK_FINALIZED, BLOCK_SEALED") + } + return nil +} + +// HandleRPCSubscription is a generic handler for subscriptions to a specific type for rpc calls. +// +// Parameters: +// - sub: The subscription. +// - handleResponse: The function responsible for handling the response of the subscribed type. +// +// Expected errors during normal operation: +// - codes.Internal: If the subscription encounters an error or gets an unexpected response. +func HandleRPCSubscription[T any](sub subscription.Subscription, handleResponse func(resp T) error) error { + err := subscription.HandleSubscription(sub, handleResponse) + if err != nil { + return rpc.ConvertError(err, "handle subscription error", codes.Internal) + } + + return nil +} diff --git a/engine/access/rpc/http_server.go b/engine/access/rpc/http_server.go index 036361a9ad4..7f1fa927a10 100644 --- a/engine/access/rpc/http_server.go +++ b/engine/access/rpc/http_server.go @@ -5,6 +5,8 @@ import ( "github.com/improbable-eng/grpc-web/go/grpcweb" "google.golang.org/grpc" + + "github.com/onflow/flow-go/module/grpcserver" ) type HTTPHeader struct { @@ -28,11 +30,11 @@ var defaultHTTPHeaders = []HTTPHeader{ } // newHTTPProxyServer creates a new HTTP GRPC proxy server. -func newHTTPProxyServer(grpcServer *grpc.Server) *http.Server { - wrappedServer := grpcweb.WrapServer( - grpcServer, - grpcweb.WithOriginFunc(func(origin string) bool { return true }), - ) +func newHTTPProxyServer(grpcServer *grpcserver.GrpcServer) *http.Server { + var wrappedServer *grpcweb.WrappedGrpcServer + grpcServer.RegisterService(func(s *grpc.Server) { + wrappedServer = grpcweb.WrapServer(s, grpcweb.WithOriginFunc(func(origin string) bool { return true })) + }) // register gRPC HTTP proxy mux := http.NewServeMux() diff --git a/engine/access/rpc/rate_limit_test.go b/engine/access/rpc/rate_limit_test.go index 8a43b8271a9..e189986e259 100644 --- a/engine/access/rpc/rate_limit_test.go +++ b/engine/access/rpc/rate_limit_test.go @@ -4,7 +4,6 @@ import ( "context" "fmt" "io" - "os" "testing" "time" @@ -16,11 +15,19 @@ import ( "github.com/stretchr/testify/suite" "google.golang.org/grpc" "google.golang.org/grpc/codes" + "google.golang.org/grpc/credentials" "google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/status" accessmock "github.com/onflow/flow-go/engine/access/mock" + "github.com/onflow/flow-go/engine/access/rest/websockets" + "github.com/onflow/flow-go/engine/access/rpc/backend" + "github.com/onflow/flow-go/engine/access/rpc/backend/node_communicator" + "github.com/onflow/flow-go/engine/access/rpc/backend/query_mode" + statestreambackend "github.com/onflow/flow-go/engine/access/state_stream/backend" + commonrpc "github.com/onflow/flow-go/engine/common/rpc" "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module/grpcserver" "github.com/onflow/flow-go/module/irrecoverable" "github.com/onflow/flow-go/module/metrics" module "github.com/onflow/flow-go/module/mock" @@ -37,7 +44,7 @@ type RateLimitTestSuite struct { snapshot *protocol.Snapshot epochQuery *protocol.EpochQuery log zerolog.Logger - net *network.Network + net *network.EngineRegistry request *module.Requester collClient *accessmock.AccessAPIClient execClient *accessmock.ExecutionAPIClient @@ -61,17 +68,28 @@ type RateLimitTestSuite struct { ctx irrecoverable.SignalerContext cancel context.CancelFunc + + // grpc servers + secureGrpcServer *grpcserver.GrpcServer + unsecureGrpcServer *grpcserver.GrpcServer } func (suite *RateLimitTestSuite) SetupTest() { - suite.log = zerolog.New(os.Stdout) - suite.net = new(network.Network) + suite.log = unittest.Logger() + suite.net = new(network.EngineRegistry) suite.state = new(protocol.State) suite.snapshot = new(protocol.Snapshot) + rootHeader := unittest.BlockHeaderFixture() + params := new(protocol.Params) + params.On("SporkID").Return(unittest.IdentifierFixture(), nil) + params.On("SporkRootBlockHeight").Return(rootHeader.Height, nil) + params.On("SealedRoot").Return(rootHeader, nil) + suite.epochQuery = new(protocol.EpochQuery) suite.state.On("Sealed").Return(suite.snapshot, nil).Maybe() suite.state.On("Final").Return(suite.snapshot, nil).Maybe() + suite.state.On("Params").Return(params, nil).Maybe() suite.snapshot.On("Epochs").Return(suite.epochQuery).Maybe() suite.blocks = new(storagemock.Blocks) suite.headers = new(storagemock.Headers) @@ -99,8 +117,17 @@ func (suite *RateLimitTestSuite) SetupTest() { UnsecureGRPCListenAddr: unittest.DefaultAddress, SecureGRPCListenAddr: unittest.DefaultAddress, HTTPListenAddr: unittest.DefaultAddress, + WebSocketConfig: websockets.NewDefaultWebsocketConfig(), } + // generate a server certificate that will be served by the GRPC server + networkingKey := unittest.NetworkingPrivKeyFixture() + x509Certificate, err := grpcutils.X509Certificate(networkingKey) + assert.NoError(suite.T(), err) + tlsConfig := grpcutils.DefaultServerTLSConfig(x509Certificate) + // set the transport credentials for the server to use + config.TransportCredentials = credentials.NewTLS(tlsConfig) + // set the rate limit to test with suite.rateLimit = 2 // set the burst limit to test with @@ -114,21 +141,81 @@ func (suite *RateLimitTestSuite) SetupTest() { "Ping": suite.rateLimit, } + suite.secureGrpcServer = grpcserver.NewGrpcServerBuilder(suite.log, + config.SecureGRPCListenAddr, + commonrpc.DefaultAccessMaxRequestSize, + commonrpc.DefaultAccessMaxResponseSize, + false, + apiRateLimt, + apiBurstLimt, + grpcserver.WithTransportCredentials(config.TransportCredentials)).Build() + + suite.unsecureGrpcServer = grpcserver.NewGrpcServerBuilder(suite.log, + config.UnsecureGRPCListenAddr, + commonrpc.DefaultAccessMaxRequestSize, + commonrpc.DefaultAccessMaxResponseSize, + false, + apiRateLimt, + apiBurstLimt).Build() + block := unittest.BlockHeaderFixture() suite.snapshot.On("Head").Return(block, nil) - rpcEngBuilder, err := NewBuilder(suite.log, suite.state, config, suite.collClient, nil, suite.blocks, suite.headers, suite.collections, suite.transactions, nil, - nil, suite.chainID, suite.metrics, suite.metrics, 0, 0, false, false, apiRateLimt, apiBurstLimt, suite.me) + bnd, err := backend.New(backend.Params{ + State: suite.state, + CollectionRPC: suite.collClient, + Blocks: suite.blocks, + Headers: suite.headers, + Collections: suite.collections, + Transactions: suite.transactions, + ChainID: suite.chainID, + AccessMetrics: suite.metrics, + MaxHeightRange: 0, + Log: suite.log, + SnapshotHistoryLimit: 0, + Communicator: node_communicator.NewNodeCommunicator(false), + EventQueryMode: query_mode.IndexQueryModeExecutionNodesOnly, + ScriptExecutionMode: query_mode.IndexQueryModeExecutionNodesOnly, + TxResultQueryMode: query_mode.IndexQueryModeExecutionNodesOnly, + }) + suite.Require().NoError(err) + + stateStreamConfig := statestreambackend.Config{} + rpcEngBuilder, err := NewBuilder( + suite.log, + suite.state, + config, + suite.chainID, + suite.metrics, + false, + suite.me, + bnd, + bnd, + suite.secureGrpcServer, + suite.unsecureGrpcServer, + nil, + stateStreamConfig, + nil, + ) require.NoError(suite.T(), err) suite.rpcEng, err = rpcEngBuilder.WithLegacy().Build() require.NoError(suite.T(), err) suite.ctx, suite.cancel = irrecoverable.NewMockSignalerContextWithCancel(suite.T(), context.Background()) + suite.rpcEng.Start(suite.ctx) - // wait for the server to startup + + suite.secureGrpcServer.Start(suite.ctx) + suite.unsecureGrpcServer.Start(suite.ctx) + + // wait for the servers to startup + unittest.AssertClosesBefore(suite.T(), suite.secureGrpcServer.Ready(), 2*time.Second) + unittest.AssertClosesBefore(suite.T(), suite.unsecureGrpcServer.Ready(), 2*time.Second) + + // wait for the engine to startup unittest.RequireCloseBefore(suite.T(), suite.rpcEng.Ready(), 2*time.Second, "engine not ready at startup") // create the access api client - suite.client, suite.closer, err = accessAPIClient(suite.rpcEng.UnsecureGRPCAddress().String()) + suite.client, suite.closer, err = accessAPIClient(suite.unsecureGrpcServer.GRPCAddress().String()) require.NoError(suite.T(), err) } @@ -140,8 +227,9 @@ func (suite *RateLimitTestSuite) TearDownTest() { if suite.closer != nil { suite.closer.Close() } - // close the server - unittest.AssertClosesBefore(suite.T(), suite.rpcEng.Done(), 2*time.Second) + // close servers + unittest.AssertClosesBefore(suite.T(), suite.secureGrpcServer.Done(), 2*time.Second) + unittest.AssertClosesBefore(suite.T(), suite.unsecureGrpcServer.Done(), 2*time.Second) } func TestRateLimit(t *testing.T) { @@ -208,7 +296,7 @@ func (suite *RateLimitTestSuite) assertRateLimitError(err error) { func accessAPIClient(address string) (accessproto.AccessAPIClient, io.Closer, error) { conn, err := grpc.Dial( address, - grpc.WithDefaultCallOptions(grpc.MaxCallRecvMsgSize(grpcutils.DefaultMaxMsgSize)), + grpc.WithDefaultCallOptions(grpc.MaxCallRecvMsgSize(commonrpc.DefaultAccessMaxResponseSize)), grpc.WithTransportCredentials(insecure.NewCredentials())) if err != nil { return nil, nil, fmt.Errorf("failed to connect to address %s: %w", address, err) diff --git a/engine/access/secure_grpcr_test.go b/engine/access/secure_grpcr_test.go index 5bf94eb2059..006f574d9ef 100644 --- a/engine/access/secure_grpcr_test.go +++ b/engine/access/secure_grpcr_test.go @@ -3,7 +3,6 @@ package access import ( "context" "io" - "os" "testing" "time" @@ -14,11 +13,20 @@ import ( "github.com/stretchr/testify/suite" "google.golang.org/grpc" "google.golang.org/grpc/credentials" + "google.golang.org/grpc/credentials/insecure" + + "github.com/onflow/crypto" - "github.com/onflow/flow-go/crypto" accessmock "github.com/onflow/flow-go/engine/access/mock" + "github.com/onflow/flow-go/engine/access/rest/websockets" "github.com/onflow/flow-go/engine/access/rpc" + "github.com/onflow/flow-go/engine/access/rpc/backend" + "github.com/onflow/flow-go/engine/access/rpc/backend/node_communicator" + "github.com/onflow/flow-go/engine/access/rpc/backend/query_mode" + statestreambackend "github.com/onflow/flow-go/engine/access/state_stream/backend" + commonrpc "github.com/onflow/flow-go/engine/common/rpc" "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module/grpcserver" "github.com/onflow/flow-go/module/irrecoverable" "github.com/onflow/flow-go/module/metrics" module "github.com/onflow/flow-go/module/mock" @@ -36,7 +44,7 @@ type SecureGRPCTestSuite struct { snapshot *protocol.Snapshot epochQuery *protocol.EpochQuery log zerolog.Logger - net *network.Network + net *network.EngineRegistry request *module.Requester collClient *accessmock.AccessAPIClient execClient *accessmock.ExecutionAPIClient @@ -55,17 +63,28 @@ type SecureGRPCTestSuite struct { ctx irrecoverable.SignalerContext cancel context.CancelFunc + + // grpc servers + secureGrpcServer *grpcserver.GrpcServer + unsecureGrpcServer *grpcserver.GrpcServer } func (suite *SecureGRPCTestSuite) SetupTest() { - suite.log = zerolog.New(os.Stdout) - suite.net = new(network.Network) + suite.log = unittest.Logger() + suite.net = new(network.EngineRegistry) suite.state = new(protocol.State) suite.snapshot = new(protocol.Snapshot) + rootHeader := unittest.BlockHeaderFixture() + params := new(protocol.Params) + params.On("SporkID").Return(unittest.IdentifierFixture(), nil) + params.On("SporkRootBlockHeight").Return(rootHeader.Height, nil) + params.On("SealedRoot").Return(rootHeader, nil) + suite.epochQuery = new(protocol.EpochQuery) suite.state.On("Sealed").Return(suite.snapshot, nil).Maybe() suite.state.On("Final").Return(suite.snapshot, nil).Maybe() + suite.state.On("Params").Return(params, nil).Maybe() suite.snapshot.On("Epochs").Return(suite.epochQuery).Maybe() suite.blocks = new(storagemock.Blocks) suite.headers = new(storagemock.Headers) @@ -93,6 +112,7 @@ func (suite *SecureGRPCTestSuite) SetupTest() { UnsecureGRPCListenAddr: unittest.DefaultAddress, SecureGRPCListenAddr: unittest.DefaultAddress, HTTPListenAddr: unittest.DefaultAddress, + WebSocketConfig: websockets.NewDefaultWebsocketConfig(), } // generate a server certificate that will be served by the GRPC server @@ -105,23 +125,87 @@ func (suite *SecureGRPCTestSuite) SetupTest() { // save the public key to use later in tests later suite.publicKey = networkingKey.PublicKey() + suite.secureGrpcServer = grpcserver.NewGrpcServerBuilder(suite.log, + config.SecureGRPCListenAddr, + commonrpc.DefaultAccessMaxRequestSize, + commonrpc.DefaultAccessMaxResponseSize, + false, + nil, + nil, + grpcserver.WithTransportCredentials(config.TransportCredentials)).Build() + + suite.unsecureGrpcServer = grpcserver.NewGrpcServerBuilder(suite.log, + config.UnsecureGRPCListenAddr, + commonrpc.DefaultAccessMaxRequestSize, + commonrpc.DefaultAccessMaxResponseSize, + false, + nil, + nil).Build() + block := unittest.BlockHeaderFixture() suite.snapshot.On("Head").Return(block, nil) - rpcEngBuilder, err := rpc.NewBuilder(suite.log, suite.state, config, suite.collClient, nil, suite.blocks, suite.headers, suite.collections, suite.transactions, nil, - nil, suite.chainID, suite.metrics, suite.metrics, 0, 0, false, false, nil, nil, suite.me) + bnd, err := backend.New(backend.Params{ + State: suite.state, + CollectionRPC: suite.collClient, + Blocks: suite.blocks, + Headers: suite.headers, + Collections: suite.collections, + Transactions: suite.transactions, + ChainID: suite.chainID, + AccessMetrics: suite.metrics, + MaxHeightRange: 0, + Log: suite.log, + SnapshotHistoryLimit: 0, + Communicator: node_communicator.NewNodeCommunicator(false), + EventQueryMode: query_mode.IndexQueryModeExecutionNodesOnly, + ScriptExecutionMode: query_mode.IndexQueryModeExecutionNodesOnly, + TxResultQueryMode: query_mode.IndexQueryModeExecutionNodesOnly, + }) + suite.Require().NoError(err) + + stateStreamConfig := statestreambackend.Config{} + rpcEngBuilder, err := rpc.NewBuilder( + suite.log, + suite.state, + config, + suite.chainID, + suite.metrics, + false, + suite.me, + bnd, + bnd, + suite.secureGrpcServer, + suite.unsecureGrpcServer, + nil, + stateStreamConfig, + nil, + ) assert.NoError(suite.T(), err) suite.rpcEng, err = rpcEngBuilder.WithLegacy().Build() assert.NoError(suite.T(), err) suite.ctx, suite.cancel = irrecoverable.NewMockSignalerContextWithCancel(suite.T(), context.Background()) + suite.rpcEng.Start(suite.ctx) - // wait for the server to startup + + suite.secureGrpcServer.Start(suite.ctx) + suite.unsecureGrpcServer.Start(suite.ctx) + + // wait for the servers to startup + unittest.AssertClosesBefore(suite.T(), suite.secureGrpcServer.Ready(), 2*time.Second) + unittest.AssertClosesBefore(suite.T(), suite.unsecureGrpcServer.Ready(), 2*time.Second) + + // wait for the engine to startup unittest.AssertClosesBefore(suite.T(), suite.rpcEng.Ready(), 2*time.Second) } func (suite *SecureGRPCTestSuite) TearDownTest() { - suite.cancel() - unittest.AssertClosesBefore(suite.T(), suite.rpcEng.Done(), 2*time.Second) + if suite.cancel != nil { + suite.cancel() + unittest.AssertClosesBefore(suite.T(), suite.secureGrpcServer.Done(), 2*time.Second) + unittest.AssertClosesBefore(suite.T(), suite.unsecureGrpcServer.Done(), 2*time.Second) + unittest.AssertClosesBefore(suite.T(), suite.rpcEng.Done(), 2*time.Second) + } } func TestSecureGRPC(t *testing.T) { @@ -152,6 +236,19 @@ func (suite *SecureGRPCTestSuite) TestAPICallUsingSecureGRPC() { _, err := client.Ping(ctx, req) assert.Error(suite.T(), err) }) + + suite.Run("happy path - connection fails, unsecure client can not get info from secure server connection", func() { + conn, err := grpc.Dial( + suite.secureGrpcServer.GRPCAddress().String(), grpc.WithTransportCredentials(insecure.NewCredentials())) + assert.NoError(suite.T(), err) + + client := accessproto.NewAccessAPIClient(conn) + closer := io.Closer(conn) + defer closer.Close() + + _, err = client.Ping(ctx, req) + assert.Error(suite.T(), err) + }) } // secureGRPCClient creates a secure GRPC client using the given public key @@ -160,7 +257,7 @@ func (suite *SecureGRPCTestSuite) secureGRPCClient(publicKey crypto.PublicKey) ( assert.NoError(suite.T(), err) conn, err := grpc.Dial( - suite.rpcEng.SecureGRPCAddress().String(), + suite.secureGrpcServer.GRPCAddress().String(), grpc.WithTransportCredentials(credentials.NewTLS(tlsConfig))) assert.NoError(suite.T(), err) diff --git a/engine/access/state_stream/account_status_filter.go b/engine/access/state_stream/account_status_filter.go new file mode 100644 index 00000000000..dc452d0433a --- /dev/null +++ b/engine/access/state_stream/account_status_filter.go @@ -0,0 +1,226 @@ +package state_stream + +import ( + "fmt" + + "github.com/rs/zerolog" + + "github.com/onflow/flow-go/model/flow" +) + +// Core event types based on documentation https://cadence-lang.org/docs/language/core-events +const ( + // CoreEventAccountCreated is emitted when a new account gets created + CoreEventAccountCreated = "flow.AccountCreated" + + // CoreEventAccountKeyAdded is emitted when a key gets added to an account + CoreEventAccountKeyAdded = "flow.AccountKeyAdded" + + // CoreEventAccountKeyRemoved is emitted when a key gets removed from an account + CoreEventAccountKeyRemoved = "flow.AccountKeyRemoved" + + // CoreEventAccountContractAdded is emitted when a contract gets deployed to an account + CoreEventAccountContractAdded = "flow.AccountContractAdded" + + // CoreEventAccountContractUpdated is emitted when a contract gets updated on an account + CoreEventAccountContractUpdated = "flow.AccountContractUpdated" + + // CoreEventAccountContractRemoved is emitted when a contract gets removed from an account + CoreEventAccountContractRemoved = "flow.AccountContractRemoved" + + // CoreEventInboxValuePublished is emitted when a Capability is published from an account + CoreEventInboxValuePublished = "flow.InboxValuePublished" + + // CoreEventInboxValueUnpublished is emitted when a Capability is unpublished from an account + CoreEventInboxValueUnpublished = "flow.InboxValueUnpublished" + + // CoreEventInboxValueClaimed is emitted when a Capability is claimed by an account + CoreEventInboxValueClaimed = "flow.InboxValueClaimed" +) + +var defaultCoreEventsMap map[string]map[string]struct{} + +func init() { + defaultCoreEventsMap = make(map[string]map[string]struct{}, len(DefaultCoreEvents)) + + addFilter := func(eventType, field string) { + if _, ok := defaultCoreEventsMap[eventType]; !ok { + defaultCoreEventsMap[eventType] = make(map[string]struct{}) + } + defaultCoreEventsMap[eventType][field] = struct{}{} + } + + for _, eventType := range DefaultCoreEvents { + switch eventType { + case CoreEventAccountCreated, + CoreEventAccountKeyAdded, + CoreEventAccountKeyRemoved, + CoreEventAccountContractAdded, + CoreEventAccountContractUpdated, + CoreEventAccountContractRemoved: + addFilter(eventType, "address") + case CoreEventInboxValuePublished, + CoreEventInboxValueClaimed: + addFilter(eventType, "provider") + addFilter(eventType, "recipient") + case CoreEventInboxValueUnpublished: + addFilter(eventType, "provider") + default: + panic(fmt.Errorf("unsupported event type: %s", eventType)) + } + } +} + +// DefaultCoreEvents is an array containing all default core event types. +var DefaultCoreEvents = []string{ + CoreEventAccountCreated, + CoreEventAccountKeyAdded, + CoreEventAccountKeyRemoved, + CoreEventAccountContractAdded, + CoreEventAccountContractUpdated, + CoreEventAccountContractRemoved, + CoreEventInboxValuePublished, + CoreEventInboxValueUnpublished, + CoreEventInboxValueClaimed, +} + +// AccountStatusFilter defines a specific filter for account statuses. +// It embeds the EventFilter type to inherit its functionality. +type AccountStatusFilter struct { + *EventFilter +} + +// NewAccountStatusFilter creates a new AccountStatusFilter based on the provided configuration. +// Expected errors: +// - error: An error, if any, encountered during core event type validating, check for max account addresses +// or validating account addresses. +func NewAccountStatusFilter( + config EventFilterConfig, + chain flow.Chain, + eventTypes []string, + accountAddresses []string, +) (AccountStatusFilter, error) { + if len(accountAddresses) == 0 { + // If `accountAddresses` is empty, the validation on `addCoreEventFieldFilter` would not happen. + // Therefore, event types are validated with `validateCoreEventTypes` to fail at the beginning of filter creation. + err := validateCoreEventTypes(eventTypes) + if err != nil { + return AccountStatusFilter{}, err + } + } else if len(accountAddresses) > DefaultMaxAccountAddresses { + // If `accountAddresses` exceeds the `DefaultAccountAddressesLimit`, it returns an error. + return AccountStatusFilter{}, fmt.Errorf("account limit exceeds, the limit is %d", DefaultMaxAccountAddresses) + } + + // If `eventTypes` is empty, the filter returns all core events for any accounts. + if len(eventTypes) == 0 { + eventTypes = DefaultCoreEvents + } + + // It's important to only set eventTypes if there are no addresses passed. + var filterEventTypes []string + if len(accountAddresses) == 0 { + filterEventTypes = eventTypes + } + + // Creates an `EventFilter` with the provided `eventTypes`. + filter, err := NewEventFilter(config, chain, filterEventTypes, []string{}, []string{}) + if err != nil { + return AccountStatusFilter{}, err + } + + accountStatusFilter := AccountStatusFilter{ + EventFilter: &filter, + } + + for _, address := range accountAddresses { + // Validate account address + addr := flow.HexToAddress(address) + if err := validateAddress(addr, chain); err != nil { + return AccountStatusFilter{}, err + } + + // If there are non-core event types at this stage, it returns an error from `addCoreEventFieldFilter`. + for _, eventType := range eventTypes { + // use the hex with prefix address to make sure it will match the cadence address + err = accountStatusFilter.addCoreEventFieldFilter(flow.EventType(eventType), addr.HexWithPrefix()) + if err != nil { + return AccountStatusFilter{}, err + } + } + } + + // We need to set hasFilters here if filterEventTypes was empty + accountStatusFilter.hasFilters = len(accountStatusFilter.EventFieldFilters) > 0 || len(eventTypes) > 0 + + return accountStatusFilter, nil +} + +// GroupCoreEventsByAccountAddress extracts account-related core events from the provided list of events. +// It filters events based on the account field specified by the event type and organizes them by account address. +// Parameters: +// - events: The list of events to extract account-related core events from. +// - log: The logger to log errors encountered during event decoding and processing. +// Returns: +// - A map[string]flow.EventsList: A map where the key is the account address and the value is a list of +// account-related core events associated with that address. +func (f *AccountStatusFilter) GroupCoreEventsByAccountAddress(events flow.EventsList, log zerolog.Logger) map[string]flow.EventsList { + allAccountProtocolEvents := make(map[string]flow.EventsList) + + for _, event := range events { + fields, err := getEventFields(&event) + if err != nil { + log.Info().Err(err).Msg("could not get event fields") + continue + } + + //accountField := f.EventFieldFilters[event.Type] + accountField := defaultCoreEventsMap[string(event.Type)] + for name, value := range fields { + _, ok := accountField[name] + if ok { + address := value.String() + allAccountProtocolEvents[address] = append(allAccountProtocolEvents[address], event) + } + } + } + + return allAccountProtocolEvents +} + +// addCoreEventFieldFilter adds a field filter for each core event type +func (f *AccountStatusFilter) addCoreEventFieldFilter(eventType flow.EventType, address string) error { + // Get the field associated with the event type from the defaultCoreEventsMap + fields, ok := defaultCoreEventsMap[string(eventType)] + if !ok { + return fmt.Errorf("unsupported event type: %s", eventType) + } + + // Add the field filter for each field associated with the event type + for field := range fields { + if _, ok := f.EventFieldFilters[eventType]; !ok { + f.EventFieldFilters[eventType] = make(FieldFilter) + } + if _, ok := f.EventFieldFilters[eventType][field]; !ok { + f.EventFieldFilters[eventType][field] = make(map[string]struct{}) + } + f.EventFieldFilters[eventType][field][address] = struct{}{} + } + + return nil +} + +// validateCoreEventTypes validates the provided event types against the default core event types. +// It returns an error if any of the provided event types are not in the default core event types list. Note, an empty +// event types array is also valid. +func validateCoreEventTypes(eventTypes []string) error { + for _, eventType := range eventTypes { + _, ok := defaultCoreEventsMap[eventType] + // If the provided event type does not match any of the default core event types, return an error + if !ok { + return fmt.Errorf("invalid provided event types for filter") + } + } + + return nil // All provided event types are valid core event types or event types are empty +} diff --git a/engine/access/state_stream/account_status_filter_test.go b/engine/access/state_stream/account_status_filter_test.go new file mode 100644 index 00000000000..20b69b12953 --- /dev/null +++ b/engine/access/state_stream/account_status_filter_test.go @@ -0,0 +1,153 @@ +package state_stream_test + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/engine/access/state_stream" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/utils/unittest" +) + +// TestAccountStatusFilterConstructor tests the constructor of the AccountStatusFilter with different scenarios. +func TestAccountStatusFilterConstructor(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + eventTypes []string + accountAddresses []string + err bool + }{ + { + name: "no filters, no addresses", + }, + { + name: "valid filters, no addresses", + eventTypes: []string{state_stream.CoreEventAccountCreated, state_stream.CoreEventAccountContractAdded, state_stream.CoreEventInboxValueClaimed}, + }, + { + name: "invalid filters, no addresses", + eventTypes: []string{state_stream.CoreEventAccountCreated, "A.0000000000000001.Contract1.EventA"}, + err: true, + }, + { + name: "no filters, valid addresses", + accountAddresses: []string{"0x0000000000000001", "0x0000000000000002", "0x0000000000000003"}, + }, + { + name: "valid filters, valid addresses", + eventTypes: []string{state_stream.CoreEventAccountCreated, state_stream.CoreEventAccountContractAdded, state_stream.CoreEventInboxValueClaimed}, + accountAddresses: []string{"0x0000000000000001", "0x0000000000000002", "0x0000000000000003"}, + }, + { + name: "invalid filters, valid addresses", + eventTypes: []string{state_stream.CoreEventAccountCreated, "A.0000000000000001.Contract1.EventA"}, + accountAddresses: []string{"0x0000000000000001", "0x0000000000000002", "0x0000000000000003"}, + err: true, + }, + { + name: "valid filters, invalid addresses", + eventTypes: []string{state_stream.CoreEventAccountCreated, state_stream.CoreEventAccountContractAdded, state_stream.CoreEventInboxValueClaimed}, + accountAddresses: []string{"invalid"}, + err: true, + }, + } + + chain := flow.MonotonicEmulator.Chain() + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + filter, err := state_stream.NewAccountStatusFilter(state_stream.DefaultEventFilterConfig, chain, test.eventTypes, test.accountAddresses) + + if test.err { + assert.Error(t, err) + assert.Equal(t, filter, state_stream.AccountStatusFilter{}) + } else { + assert.NoError(t, err) + + if len(test.eventTypes) == 0 { + if len(test.accountAddresses) > 0 { + assert.Equal(t, 0, len(filter.EventTypes)) + } else { + assert.Equal(t, len(state_stream.DefaultCoreEvents), len(filter.EventTypes)) + } + } + + for key := range filter.EventTypes { + switch key { + case state_stream.CoreEventAccountCreated, + state_stream.CoreEventAccountContractAdded: + actualAccountValues := filter.EventFieldFilters[key]["address"] + assert.Equal(t, len(test.accountAddresses), len(actualAccountValues)) + for _, address := range test.accountAddresses { + _, ok := actualAccountValues[address] + assert.True(t, ok) + } + case state_stream.CoreEventInboxValueClaimed: + actualAccountValues := filter.EventFieldFilters[key]["provider"] + assert.Equal(t, len(test.accountAddresses), len(actualAccountValues)) + for _, address := range test.accountAddresses { + _, ok := actualAccountValues[address] + assert.True(t, ok) + } + } + } + } + }) + } +} + +// TestAccountStatusFilterFiltering tests the filtering mechanism of the AccountStatusFilter. +// It verifies that the filter correctly filters the events based on the provided event types and account addresses. +func TestAccountStatusFilterFiltering(t *testing.T) { + chain := flow.MonotonicEmulator.Chain() + + filterEventTypes := []string{state_stream.CoreEventAccountCreated, state_stream.CoreEventAccountContractAdded} + + addressGenerator := chain.NewAddressGenerator() + addressAccountCreate, err := addressGenerator.NextAddress() + require.NoError(t, err) + + accountContractAddedAddress, err := addressGenerator.NextAddress() + require.NoError(t, err) + + filter, err := state_stream.NewAccountStatusFilter( + state_stream.DefaultEventFilterConfig, + chain, + filterEventTypes, + []string{addressAccountCreate.HexWithPrefix(), accountContractAddedAddress.HexWithPrefix()}, + ) + require.NoError(t, err) + + accountCreateEvent := unittest.EventGenerator.GenerateAccountCreateEvent(t, addressAccountCreate) + accountContractAdded := unittest.EventGenerator.GenerateAccountContractEvent(t, "AccountContractAdded", accountContractAddedAddress) + + events := flow.EventsList{ + unittest.EventFixture( + unittest.Event.WithEventType("A.0000000000000001.Contract1.EventA"), + ), + accountCreateEvent, + unittest.EventFixture( + unittest.Event.WithEventType("A.0000000000000001.Contract2.EventA"), + ), + accountContractAdded, + } + + matched := filter.Filter(events) + matchedByAddress := filter.GroupCoreEventsByAccountAddress(matched, unittest.Logger()) + + assert.Len(t, matched, 2) + + assert.Equal(t, events[1], matched[0]) + matchAccCreated, ok := matchedByAddress[addressAccountCreate.HexWithPrefix()] + require.True(t, ok) + assert.Equal(t, flow.EventsList{accountCreateEvent}, matchAccCreated) + + assert.Equal(t, events[3], matched[1]) + matchContractAdded, ok := matchedByAddress[accountContractAddedAddress.HexWithPrefix()] + require.True(t, ok) + assert.Equal(t, flow.EventsList{accountContractAdded}, matchContractAdded) +} diff --git a/engine/access/state_stream/backend.go b/engine/access/state_stream/backend.go deleted file mode 100644 index ce5d761f5ea..00000000000 --- a/engine/access/state_stream/backend.go +++ /dev/null @@ -1,173 +0,0 @@ -package state_stream - -import ( - "context" - "fmt" - "time" - - "github.com/rs/zerolog" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" - - "github.com/onflow/flow-go/engine" - "github.com/onflow/flow-go/engine/common/rpc" - "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/module/executiondatasync/execution_data" - "github.com/onflow/flow-go/module/mempool/herocache" - "github.com/onflow/flow-go/state/protocol" - "github.com/onflow/flow-go/storage" - "github.com/onflow/flow-go/utils/logging" -) - -const ( - // DefaultMaxGlobalStreams defines the default max number of streams that can be open at the same time. - DefaultMaxGlobalStreams = 1000 - - // DefaultCacheSize defines the default max number of objects for the execution data cache. - DefaultCacheSize = 100 - - // DefaultSendTimeout is the default timeout for sending a message to the client. After the timeout - // expires, the connection is closed. - DefaultSendTimeout = 30 * time.Second -) - -type GetExecutionDataFunc func(context.Context, flow.Identifier) (*execution_data.BlockExecutionDataEntity, error) -type GetStartHeightFunc func(flow.Identifier, uint64) (uint64, error) - -type API interface { - GetExecutionDataByBlockID(ctx context.Context, blockID flow.Identifier) (*execution_data.BlockExecutionData, error) - SubscribeExecutionData(ctx context.Context, startBlockID flow.Identifier, startBlockHeight uint64) Subscription - SubscribeEvents(ctx context.Context, startBlockID flow.Identifier, startHeight uint64, filter EventFilter) Subscription -} - -type StateStreamBackend struct { - ExecutionDataBackend - EventsBackend - - log zerolog.Logger - state protocol.State - headers storage.Headers - seals storage.Seals - results storage.ExecutionResults - execDataStore execution_data.ExecutionDataStore - execDataCache *herocache.BlockExecutionData - broadcaster *engine.Broadcaster -} - -func New( - log zerolog.Logger, - config Config, - state protocol.State, - headers storage.Headers, - seals storage.Seals, - results storage.ExecutionResults, - execDataStore execution_data.ExecutionDataStore, - execDataCache *herocache.BlockExecutionData, - broadcaster *engine.Broadcaster, -) (*StateStreamBackend, error) { - logger := log.With().Str("module", "state_stream_api").Logger() - - b := &StateStreamBackend{ - log: logger, - state: state, - headers: headers, - seals: seals, - results: results, - execDataStore: execDataStore, - execDataCache: execDataCache, - broadcaster: broadcaster, - } - - b.ExecutionDataBackend = ExecutionDataBackend{ - log: logger, - headers: headers, - broadcaster: broadcaster, - sendTimeout: config.ClientSendTimeout, - sendBufferSize: int(config.ClientSendBufferSize), - getExecutionData: b.getExecutionData, - getStartHeight: b.getStartHeight, - } - - b.EventsBackend = EventsBackend{ - log: logger, - headers: headers, - broadcaster: broadcaster, - sendTimeout: config.ClientSendTimeout, - sendBufferSize: int(config.ClientSendBufferSize), - getExecutionData: b.getExecutionData, - getStartHeight: b.getStartHeight, - } - - return b, nil -} - -func (b *StateStreamBackend) getExecutionData(ctx context.Context, blockID flow.Identifier) (*execution_data.BlockExecutionDataEntity, error) { - if cached, ok := b.execDataCache.ByID(blockID); ok { - b.log.Trace(). - Hex("block_id", logging.ID(blockID)). - Msg("execution data cache hit") - return cached, nil - } - b.log.Trace(). - Hex("block_id", logging.ID(blockID)). - Msg("execution data cache miss") - - seal, err := b.seals.FinalizedSealForBlock(blockID) - if err != nil { - return nil, fmt.Errorf("could not get finalized seal for block: %w", err) - } - - result, err := b.results.ByID(seal.ResultID) - if err != nil { - return nil, fmt.Errorf("could not get execution result (id: %s): %w", seal.ResultID, err) - } - - execData, err := b.execDataStore.GetExecutionData(ctx, result.ExecutionDataID) - if err != nil { - return nil, fmt.Errorf("could not get execution data (id: %s): %w", result.ExecutionDataID, err) - } - - blockExecData := execution_data.NewBlockExecutionDataEntity(result.ExecutionDataID, execData) - - b.execDataCache.Add(blockExecData) - - return blockExecData, nil -} - -// getStartHeight returns the start height to use when searching. -// Only one of startBlockID and startHeight may be set. Otherwise, an InvalidArgument error is returned. -// If a block is provided and does not exist, a NotFound error is returned. -// If neither startBlockID nor startHeight is provided, the latest sealed block is used. -func (b *StateStreamBackend) getStartHeight(startBlockID flow.Identifier, startHeight uint64) (uint64, error) { - // make sure only one of start block ID and start height is provided - if startBlockID != flow.ZeroID && startHeight > 0 { - return 0, status.Errorf(codes.InvalidArgument, "only one of start block ID and start height may be provided") - } - - // first, if a start block ID is provided, use that - // invalid or missing block IDs will result in an error - if startBlockID != flow.ZeroID { - header, err := b.headers.ByBlockID(startBlockID) - if err != nil { - return 0, rpc.ConvertStorageError(fmt.Errorf("could not get header for block %v: %w", startBlockID, err)) - } - return header.Height, nil - } - - // next, if the start height is provided, use that - // heights that are in the future or before the root block will result in an error - if startHeight > 0 { - header, err := b.headers.ByHeight(startHeight) - if err != nil { - return 0, rpc.ConvertStorageError(fmt.Errorf("could not get header for height %d: %w", startHeight, err)) - } - return header.Height, nil - } - - // if no start block was provided, use the latest sealed block - header, err := b.state.Sealed().Head() - if err != nil { - return 0, status.Errorf(codes.Internal, "could not get latest sealed block: %v", err) - } - return header.Height, nil -} diff --git a/engine/access/state_stream/backend/backend.go b/engine/access/state_stream/backend/backend.go new file mode 100644 index 00000000000..4e99013ebd2 --- /dev/null +++ b/engine/access/state_stream/backend/backend.go @@ -0,0 +1,200 @@ +package backend + +import ( + "context" + "errors" + "fmt" + "time" + + "github.com/rs/zerolog" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + + "github.com/onflow/flow-go/engine/access/index" + "github.com/onflow/flow-go/engine/access/state_stream" + "github.com/onflow/flow-go/engine/access/subscription" + "github.com/onflow/flow-go/engine/access/subscription/tracker" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module/execution" + "github.com/onflow/flow-go/module/executiondatasync/execution_data" + "github.com/onflow/flow-go/module/executiondatasync/execution_data/cache" + "github.com/onflow/flow-go/state/protocol" + "github.com/onflow/flow-go/storage" +) + +// Config defines the configurable options for the ingress server. +type Config struct { + state_stream.EventFilterConfig + + // ListenAddr is the address the GRPC server will listen on as host:port + ListenAddr string + + // MaxExecutionDataMsgSize is the max message size for block execution data API + MaxExecutionDataMsgSize uint + + // RpcMetricsEnabled specifies whether to enable the GRPC metrics + RpcMetricsEnabled bool + + // MaxGlobalStreams defines the global max number of streams that can be open at the same time. + MaxGlobalStreams uint32 + + // RegisterIDsRequestLimit defines the max number of register IDs that can be received in a single request. + RegisterIDsRequestLimit uint32 + + // ExecutionDataCacheSize is the max number of objects for the execution data cache. + ExecutionDataCacheSize uint32 + + // ClientSendTimeout is the timeout for sending a message to the client. After the timeout, + // the stream is closed with an error. + ClientSendTimeout time.Duration + + // ClientSendBufferSize is the size of the response buffer for sending messages to the client. + ClientSendBufferSize uint + + // ResponseLimit is the max responses per second allowed on a stream. After exceeding the limit, + // the stream is paused until more capacity is available. Searches of past data can be CPU + // intensive, so this helps manage the impact. + ResponseLimit float64 + + // HeartbeatInterval specifies the block interval at which heartbeat messages should be sent. + HeartbeatInterval uint64 +} + +type GetExecutionDataFunc func(context.Context, uint64) (*execution_data.BlockExecutionDataEntity, error) + +type StateStreamBackend struct { + tracker.ExecutionDataTracker + + ExecutionDataBackend + EventsBackend + AccountStatusesBackend + + log zerolog.Logger + state protocol.State + headers storage.Headers + seals storage.Seals + results storage.ExecutionResults + execDataStore execution_data.ExecutionDataStore + execDataCache *cache.ExecutionDataCache + registers *execution.RegistersAsyncStore + registerRequestLimit int + sporkRootBlockHeight uint64 +} + +func New( + log zerolog.Logger, + state protocol.State, + headers storage.Headers, + seals storage.Seals, + results storage.ExecutionResults, + execDataStore execution_data.ExecutionDataStore, + execDataCache *cache.ExecutionDataCache, + registers *execution.RegistersAsyncStore, + eventsIndex *index.EventsIndex, + useEventsIndex bool, + registerIDsRequestLimit int, + subscriptionHandler *subscription.SubscriptionHandler, + executionDataTracker tracker.ExecutionDataTracker, +) (*StateStreamBackend, error) { + logger := log.With().Str("module", "state_stream_api").Logger() + + b := &StateStreamBackend{ + ExecutionDataTracker: executionDataTracker, + log: logger, + state: state, + headers: headers, + seals: seals, + results: results, + execDataStore: execDataStore, + execDataCache: execDataCache, + registers: registers, + registerRequestLimit: registerIDsRequestLimit, + sporkRootBlockHeight: state.Params().SporkRootBlockHeight(), + } + + b.ExecutionDataBackend = ExecutionDataBackend{ + log: logger, + headers: headers, + subscriptionHandler: subscriptionHandler, + getExecutionData: b.getExecutionData, + executionDataTracker: executionDataTracker, + } + + eventsProvider := EventsProvider{ + log: logger, + headers: headers, + getExecutionData: b.getExecutionData, + useEventsIndex: useEventsIndex, + eventsIndex: eventsIndex, + } + + b.EventsBackend = EventsBackend{ + log: logger, + subscriptionHandler: subscriptionHandler, + executionDataTracker: executionDataTracker, + eventsProvider: eventsProvider, + } + + b.AccountStatusesBackend = AccountStatusesBackend{ + log: logger, + subscriptionHandler: subscriptionHandler, + executionDataTracker: b.ExecutionDataTracker, + eventsProvider: eventsProvider, + } + + return b, nil +} + +// getExecutionData returns the execution data for the given block height. +// Expected errors during normal operation: +// - subscription.ErrBlockNotReady: execution data for the given block height is not available. +func (b *StateStreamBackend) getExecutionData(ctx context.Context, height uint64) (*execution_data.BlockExecutionDataEntity, error) { + highestHeight := b.ExecutionDataTracker.GetHighestHeight() + // fail early if no notification has been received for the given block height. + // note: it's possible for the data to exist in the data store before the notification is + // received. this ensures a consistent view is available to all streams. + if height > highestHeight { + return nil, fmt.Errorf("execution data for block %d is not available yet: %w", height, subscription.ErrBlockNotReady) + } + + // the spork root block will never have execution data available. If requested, return an empty result. + if height == b.sporkRootBlockHeight { + return &execution_data.BlockExecutionDataEntity{ + BlockExecutionData: &execution_data.BlockExecutionData{ + BlockID: b.state.Params().SporkRootBlock().ID(), + }, + }, nil + } + + execData, err := b.execDataCache.ByHeight(ctx, height) + if err != nil { + if errors.Is(err, storage.ErrNotFound) || + execution_data.IsBlobNotFoundError(err) { + err = errors.Join(err, subscription.ErrBlockNotReady) + return nil, fmt.Errorf("could not get execution data for block %d: %w", height, err) + } + return nil, fmt.Errorf("could not get execution data for block %d: %w", height, err) + } + + return execData, nil +} + +// GetRegisterValues returns the register values for the given register IDs at the given block height. +func (b *StateStreamBackend) GetRegisterValues(ids flow.RegisterIDs, height uint64) ([]flow.RegisterValue, error) { + if len(ids) > b.registerRequestLimit { + return nil, status.Errorf(codes.InvalidArgument, "number of register IDs exceeds limit of %d", b.registerRequestLimit) + } + + values, err := b.registers.RegisterValues(ids, height) + if err != nil { + if errors.Is(err, storage.ErrHeightNotIndexed) { + return nil, status.Errorf(codes.OutOfRange, "register values for block %d is not available", height) + } + if errors.Is(err, storage.ErrNotFound) { + return nil, status.Errorf(codes.NotFound, "register values for block %d not found", height) + } + return nil, err + } + + return values, nil +} diff --git a/engine/access/state_stream/backend/backend_account_statuses.go b/engine/access/state_stream/backend/backend_account_statuses.go new file mode 100644 index 00000000000..d168e7ddd96 --- /dev/null +++ b/engine/access/state_stream/backend/backend_account_statuses.go @@ -0,0 +1,117 @@ +package backend + +import ( + "context" + "fmt" + + "github.com/rs/zerolog" + + "github.com/onflow/flow-go/engine/access/state_stream" + "github.com/onflow/flow-go/engine/access/subscription" + "github.com/onflow/flow-go/engine/access/subscription/tracker" + + "github.com/onflow/flow-go/fvm/errors" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/storage" +) + +type AccountStatusesResponse struct { + BlockID flow.Identifier + Height uint64 + AccountEvents map[string]flow.EventsList +} + +// AccountStatusesBackend is a struct representing a backend implementation for subscribing to account statuses changes. +type AccountStatusesBackend struct { + log zerolog.Logger + subscriptionHandler *subscription.SubscriptionHandler + + executionDataTracker tracker.ExecutionDataTracker + eventsProvider EventsProvider +} + +// subscribe creates and returns a subscription to receive account status updates starting from the specified height. +func (b *AccountStatusesBackend) subscribe( + ctx context.Context, + nextHeight uint64, + filter state_stream.AccountStatusFilter, +) subscription.Subscription { + return b.subscriptionHandler.Subscribe(ctx, nextHeight, b.getAccountStatusResponseFactory(filter)) +} + +// SubscribeAccountStatusesFromStartBlockID subscribes to the streaming of account status changes starting from +// a specific block ID with an optional status filter. +// Errors: +// - codes.ErrNotFound if could not get block by start blockID. +// - codes.Internal if there is an internal error. +func (b *AccountStatusesBackend) SubscribeAccountStatusesFromStartBlockID( + ctx context.Context, + startBlockID flow.Identifier, + filter state_stream.AccountStatusFilter, +) subscription.Subscription { + nextHeight, err := b.executionDataTracker.GetStartHeightFromBlockID(startBlockID) + if err != nil { + return subscription.NewFailedSubscription(err, "could not get start height from block id") + } + return b.subscribe(ctx, nextHeight, filter) +} + +// SubscribeAccountStatusesFromStartHeight subscribes to the streaming of account status changes starting from +// a specific block height, with an optional status filter. +// Errors: +// - codes.ErrNotFound if could not get block by start height. +// - codes.Internal if there is an internal error. +func (b *AccountStatusesBackend) SubscribeAccountStatusesFromStartHeight( + ctx context.Context, + startHeight uint64, + filter state_stream.AccountStatusFilter, +) subscription.Subscription { + nextHeight, err := b.executionDataTracker.GetStartHeightFromHeight(startHeight) + if err != nil { + return subscription.NewFailedSubscription(err, "could not get start height from block height") + } + return b.subscribe(ctx, nextHeight, filter) +} + +// SubscribeAccountStatusesFromLatestBlock subscribes to the streaming of account status changes starting from a +// latest sealed block, with an optional status filter. +// +// No errors are expected during normal operation. +func (b *AccountStatusesBackend) SubscribeAccountStatusesFromLatestBlock( + ctx context.Context, + filter state_stream.AccountStatusFilter, +) subscription.Subscription { + nextHeight, err := b.executionDataTracker.GetStartHeightFromLatest(ctx) + if err != nil { + return subscription.NewFailedSubscription(err, "could not get start height from latest") + } + return b.subscribe(ctx, nextHeight, filter) +} + +// getAccountStatusResponseFactory returns a function that returns the account statuses response for a given height. +// +// Errors: +// - subscription.ErrBlockNotReady: If block header for the specified block height is not found. +// - error: An error, if any, encountered during getting events from storage or execution data. +func (b *AccountStatusesBackend) getAccountStatusResponseFactory( + filter state_stream.AccountStatusFilter, +) subscription.GetDataByHeightFunc { + return func(ctx context.Context, height uint64) (interface{}, error) { + eventsResponse, err := b.eventsProvider.GetAllEventsResponse(ctx, height) + if err != nil { + if errors.Is(err, storage.ErrNotFound) || + errors.Is(err, storage.ErrHeightNotIndexed) { + return nil, fmt.Errorf("block %d is not available yet: %w", height, subscription.ErrBlockNotReady) + } + return nil, err + } + filteredProtocolEvents := filter.Filter(eventsResponse.Events) + allAccountProtocolEvents := filter.GroupCoreEventsByAccountAddress(filteredProtocolEvents, b.log) + + return &AccountStatusesResponse{ + BlockID: eventsResponse.BlockID, + Height: eventsResponse.Height, + AccountEvents: allAccountProtocolEvents, + }, nil + } +} diff --git a/engine/access/state_stream/backend/backend_account_statuses_test.go b/engine/access/state_stream/backend/backend_account_statuses_test.go new file mode 100644 index 00000000000..65d3e350aa4 --- /dev/null +++ b/engine/access/state_stream/backend/backend_account_statuses_test.go @@ -0,0 +1,554 @@ +package backend + +import ( + "context" + "fmt" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + + "github.com/onflow/flow-go/engine/access/state_stream" + "github.com/onflow/flow-go/engine/access/subscription" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module/executiondatasync/execution_data" + "github.com/onflow/flow-go/utils/unittest" +) + +var testProtocolEventTypes = []flow.EventType{ + state_stream.CoreEventAccountCreated, + state_stream.CoreEventAccountContractAdded, + state_stream.CoreEventAccountContractUpdated, +} + +// Define the test type struct +// The struct is used for testing different test cases of each endpoint from AccountStatusesBackend. +type testType struct { + name string // Test case name + highestBackfill int // Highest backfill index + startValue interface{} + filters state_stream.AccountStatusFilter // Event filters +} + +// BackendAccountStatusesSuite is a test suite for the AccountStatusesBackend functionality. +// It is used to test the endpoints which enables users to subscribe to the streaming of account status changes. +// It verified that each of endpoints works properly with expected data being returned. Also the suite tests +// handling of expected errors in the SubscribeAccountStatuses. +type BackendAccountStatusesSuite struct { + BackendExecutionDataSuite + accountCreatedAddress flow.Address + accountContractAdded flow.Address + accountContractUpdated flow.Address +} + +func TestBackendAccountStatusesSuite(t *testing.T) { + suite.Run(t, new(BackendAccountStatusesSuite)) +} + +// generateProtocolMockEvents generates a set of mock events. +func (s *BackendAccountStatusesSuite) generateProtocolMockEvents() flow.EventsList { + events := make([]flow.Event, 4) + events = append(events, unittest.EventFixture( + unittest.Event.WithEventType(testEventTypes[0]), + )) + + accountCreateEvent := unittest.EventGenerator.GenerateAccountCreateEvent(s.T(), s.accountCreatedAddress) + accountCreateEvent.TransactionIndex = 1 + events = append(events, accountCreateEvent) + + accountContractAdded := unittest.EventGenerator.GenerateAccountContractEvent(s.T(), "AccountContractAdded", s.accountContractAdded) + accountContractAdded.TransactionIndex = 2 + events = append(events, accountContractAdded) + + accountContractUpdated := unittest.EventGenerator.GenerateAccountContractEvent(s.T(), "AccountContractUpdated", s.accountContractUpdated) + accountContractUpdated.TransactionIndex = 3 + events = append(events, accountContractUpdated) + + return events +} + +// SetupTest initializes the test suite. +func (s *BackendAccountStatusesSuite) SetupTest() { + blockCount := 5 + var err error + s.SetupTestSuite(blockCount) + + addressGenerator := chainID.Chain().NewAddressGenerator() + s.accountCreatedAddress, err = addressGenerator.NextAddress() + require.NoError(s.T(), err) + s.accountContractAdded, err = addressGenerator.NextAddress() + require.NoError(s.T(), err) + s.accountContractUpdated, err = addressGenerator.NextAddress() + require.NoError(s.T(), err) + + parent := s.rootBlock.ToHeader() + events := s.generateProtocolMockEvents() + + for i := 0; i < blockCount; i++ { + block := unittest.BlockWithParentFixture(parent) + // update for next iteration + parent = block.ToHeader() + + seal := unittest.BlockSealsFixture(1)[0] + result := unittest.ExecutionResultFixture() + + chunkDatas := []*execution_data.ChunkExecutionData{ + unittest.ChunkExecutionDataFixture(s.T(), execution_data.DefaultMaxBlobSize/5, unittest.WithChunkEvents(events)), + } + + execData := unittest.BlockExecutionDataFixture( + unittest.WithBlockExecutionDataBlockID(block.ID()), + unittest.WithChunkExecutionDatas(chunkDatas...), + ) + + result.ExecutionDataID, err = s.eds.Add(context.TODO(), execData) + assert.NoError(s.T(), err) + + s.blocks = append(s.blocks, block) + s.execDataMap[block.ID()] = execution_data.NewBlockExecutionDataEntity(result.ExecutionDataID, execData) + s.blockEvents[block.ID()] = events + s.blockMap[block.Height] = block + s.sealMap[block.ID()] = seal + s.resultMap[seal.ResultID] = result + + s.T().Logf("adding exec data for block %d %d %v => %v", i, block.Height, block.ID(), result.ExecutionDataID) + } + + s.SetupTestMocks() +} + +// subscribeFromStartBlockIdTestCases generates test cases for subscribing from a start block ID. +func (s *BackendAccountStatusesSuite) subscribeFromStartBlockIdTestCases() []testType { + baseTests := []testType{ + { + name: "happy path - all new blocks", + highestBackfill: -1, // no backfill + startValue: s.blocks[0].ID(), + }, + { + name: "happy path - partial backfill", + highestBackfill: 2, // backfill the first 3 blocks + startValue: s.blocks[0].ID(), + }, + { + name: "happy path - complete backfill", + highestBackfill: len(s.blocks) - 1, // backfill all blocks + startValue: s.blocks[0].ID(), + }, + } + + return s.generateFiltersForTestCases(baseTests) +} + +// subscribeFromStartHeightTestCases generates test cases for subscribing from a start height. +func (s *BackendAccountStatusesSuite) subscribeFromStartHeightTestCases() []testType { + baseTests := []testType{ + { + name: "happy path - all new blocks", + highestBackfill: -1, // no backfill + startValue: s.blocks[0].Height, + }, + { + name: "happy path - partial backfill", + highestBackfill: 2, // backfill the first 3 blocks + startValue: s.blocks[0].Height, + }, + { + name: "happy path - complete backfill", + highestBackfill: len(s.blocks) - 1, // backfill all blocks + startValue: s.blocks[0].Height, + }, + } + + return s.generateFiltersForTestCases(baseTests) +} + +// subscribeFromLatestTestCases generates test cases for subscribing from the latest block. +func (s *BackendAccountStatusesSuite) subscribeFromLatestTestCases() []testType { + baseTests := []testType{ + { + name: "happy path - all new blocks", + highestBackfill: -1, // no backfill + }, + { + name: "happy path - partial backfill", + highestBackfill: 2, // backfill the first 3 blocks + }, + { + name: "happy path - complete backfill", + highestBackfill: len(s.blocks) - 1, // backfill all blocks + }, + } + + return s.generateFiltersForTestCases(baseTests) +} + +// generateFiltersForTestCases generates variations of test cases with different event filters. +// +// This function takes an array of base testType structs and creates variations for each of them. +// For each base test case, it generates three variations: +// - All events: Includes all protocol event types filtered by the provided account address. +// - Some events: Includes only the first protocol event type filtered by the provided account address. +// - No events: Includes a custom event type "flow.AccountKeyAdded" filtered by the provided account address. +func (s *BackendAccountStatusesSuite) generateFiltersForTestCases(baseTests []testType) []testType { + // Create variations for each of the base tests + tests := make([]testType, 0, len(baseTests)*3) + var err error + for _, test := range baseTests { + t1 := test + t1.name = fmt.Sprintf("%s - all events", test.name) + t1.filters, err = state_stream.NewAccountStatusFilter( + state_stream.DefaultEventFilterConfig, + chainID.Chain(), + []string{string(testProtocolEventTypes[0]), string(testProtocolEventTypes[1]), string(testProtocolEventTypes[2])}, + []string{s.accountCreatedAddress.HexWithPrefix(), s.accountContractAdded.HexWithPrefix(), s.accountContractUpdated.HexWithPrefix()}, + ) + require.NoError(s.T(), err) + tests = append(tests, t1) + + t2 := test + t2.name = fmt.Sprintf("%s - some events", test.name) + t2.filters, err = state_stream.NewAccountStatusFilter( + state_stream.DefaultEventFilterConfig, + chainID.Chain(), + []string{string(testProtocolEventTypes[0])}, + []string{s.accountCreatedAddress.HexWithPrefix(), s.accountContractAdded.HexWithPrefix(), s.accountContractUpdated.HexWithPrefix()}, + ) + require.NoError(s.T(), err) + tests = append(tests, t2) + + t3 := test + t3.name = fmt.Sprintf("%s - no events", test.name) + t3.filters, err = state_stream.NewAccountStatusFilter( + state_stream.DefaultEventFilterConfig, + chainID.Chain(), + []string{"flow.AccountKeyAdded"}, + []string{s.accountCreatedAddress.HexWithPrefix(), s.accountContractAdded.HexWithPrefix(), s.accountContractUpdated.HexWithPrefix()}, + ) + require.NoError(s.T(), err) + tests = append(tests, t3) + + t4 := test + t4.name = fmt.Sprintf("%s - no events, no addresses", test.name) + t4.filters, err = state_stream.NewAccountStatusFilter( + state_stream.DefaultEventFilterConfig, + chainID.Chain(), + []string{}, + []string{}, + ) + require.NoError(s.T(), err) + tests = append(tests, t4) + + t5 := test + t5.name = fmt.Sprintf("%s - some events, no addresses", test.name) + t5.filters, err = state_stream.NewAccountStatusFilter( + state_stream.DefaultEventFilterConfig, + chainID.Chain(), + []string{"flow.AccountKeyAdded"}, + []string{}, + ) + require.NoError(s.T(), err) + tests = append(tests, t5) + } + + return tests +} + +// subscribeToAccountStatuses runs subscription tests for account statuses. +// +// This function takes a subscribeFn function, which is a subscription function for account statuses, +// and an array of testType structs representing the test cases. +// It iterates over each test case and sets up the necessary context and cancellation for the subscription. +// For each test case, it simulates backfill blocks and verifies the expected account events for each block. +// It also ensures that the subscription shuts down gracefully after completing the test cases. +func (s *BackendAccountStatusesSuite) subscribeToAccountStatuses( + subscribeFn func(ctx context.Context, startValue interface{}, filter state_stream.AccountStatusFilter) subscription.Subscription, + tests []testType, +) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + // Iterate over each test case + for _, test := range tests { + s.Run(test.name, func() { + s.T().Logf("len(s.execDataMap) %d", len(s.execDataMap)) + + // Add "backfill" block - blocks that are already in the database before the test starts + // This simulates a subscription on a past block + if test.highestBackfill > 0 { + s.highestBlockHeader = s.blocks[test.highestBackfill].ToHeader() + } + + // Set up subscription context and cancellation + subCtx, subCancel := context.WithCancel(ctx) + + sub := subscribeFn(subCtx, test.startValue, test.filters) + + // Loop over all the blocks + for i, b := range s.blocks { + s.T().Logf("checking block %d %v", i, b.ID()) + + // Simulate new exec data received. + // Exec data for all blocks with index <= highestBackfill were already received + if i > test.highestBackfill { + s.highestBlockHeader = b.ToHeader() + + s.broadcaster.Publish() + } + + expectedEvents := s.expectedAccountStatuses(b.ID(), test.filters) + + // Consume execution data from subscription + unittest.RequireReturnsBefore(s.T(), func() { + v, ok := <-sub.Channel() + require.True(s.T(), ok, "channel closed while waiting for exec data for block %d %v: err: %v", b.Height, b.ID(), sub.Err()) + + expected := &AccountStatusesResponse{ + BlockID: b.ID(), + Height: b.Height, + AccountEvents: expectedEvents, + } + s.requireEventsResponse(v, expected) + + }, 60*time.Second, fmt.Sprintf("timed out waiting for exec data for block %d %v", b.Height, b.ID())) + } + + // Make sure there are no new messages waiting. The channel should be opened with nothing waiting + unittest.RequireNeverReturnBefore(s.T(), func() { + <-sub.Channel() + }, 100*time.Millisecond, "timed out waiting for subscription to shutdown") + + // Stop the subscription + subCancel() + + // Ensure subscription shuts down gracefully + unittest.RequireReturnsBefore(s.T(), func() { + v, ok := <-sub.Channel() + assert.Nil(s.T(), v) + assert.False(s.T(), ok) + assert.ErrorIs(s.T(), sub.Err(), context.Canceled) + }, 100*time.Millisecond, "timed out waiting for subscription to shutdown") + }) + } +} + +// TestSubscribeAccountStatusesFromStartBlockID tests the SubscribeAccountStatusesFromStartBlockID method. +func (s *BackendAccountStatusesSuite) TestSubscribeAccountStatusesFromStartBlockID() { + s.executionDataTracker.On( + "GetStartHeightFromBlockID", + mock.AnythingOfType("flow.Identifier"), + ).Return(func(startBlockID flow.Identifier) (uint64, error) { + return s.executionDataTrackerReal.GetStartHeightFromBlockID(startBlockID) + }, nil) + + call := func(ctx context.Context, startValue interface{}, filter state_stream.AccountStatusFilter) subscription.Subscription { + return s.backend.SubscribeAccountStatusesFromStartBlockID(ctx, startValue.(flow.Identifier), filter) + } + + s.subscribeToAccountStatuses(call, s.subscribeFromStartBlockIdTestCases()) +} + +// TestSubscribeAccountStatusesFromStartHeight tests the SubscribeAccountStatusesFromStartHeight method. +func (s *BackendAccountStatusesSuite) TestSubscribeAccountStatusesFromStartHeight() { + s.executionDataTracker.On( + "GetStartHeightFromHeight", + mock.AnythingOfType("uint64"), + ).Return(func(startHeight uint64) (uint64, error) { + return s.executionDataTrackerReal.GetStartHeightFromHeight(startHeight) + }, nil) + + call := func(ctx context.Context, startValue interface{}, filter state_stream.AccountStatusFilter) subscription.Subscription { + return s.backend.SubscribeAccountStatusesFromStartHeight(ctx, startValue.(uint64), filter) + } + + s.subscribeToAccountStatuses(call, s.subscribeFromStartHeightTestCases()) +} + +// TestSubscribeAccountStatusesFromLatestBlock tests the SubscribeAccountStatusesFromLatestBlock method. +func (s *BackendAccountStatusesSuite) TestSubscribeAccountStatusesFromLatestBlock() { + s.executionDataTracker.On( + "GetStartHeightFromLatest", + mock.Anything, + ).Return(func(ctx context.Context) (uint64, error) { + return s.executionDataTrackerReal.GetStartHeightFromLatest(ctx) + }, nil) + + call := func(ctx context.Context, startValue interface{}, filter state_stream.AccountStatusFilter) subscription.Subscription { + return s.backend.SubscribeAccountStatusesFromLatestBlock(ctx, filter) + } + + s.subscribeToAccountStatuses(call, s.subscribeFromLatestTestCases()) +} + +// requireEventsResponse ensures that the received event information matches the expected data. +func (s *BackendAccountStatusesSuite) requireEventsResponse(v interface{}, expected *AccountStatusesResponse) { + actual, ok := v.(*AccountStatusesResponse) + require.True(s.T(), ok, "unexpected response type: %T", v) + + assert.Equal(s.T(), expected.BlockID, actual.BlockID) + assert.Equal(s.T(), expected.Height, actual.Height) + assert.Equal(s.T(), expected.AccountEvents, actual.AccountEvents) +} + +// TestSubscribeAccountStatusesFromSporkRootBlock tests that events subscriptions starting from the spork +// root block return an empty result for the root block. +func (s *BackendAccountStatusesSuite) TestSubscribeAccountStatusesFromSporkRootBlock() { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + // setup the backend to have 1 available block + s.highestBlockHeader = s.blocks[0].ToHeader() + + rootEventResponse := &AccountStatusesResponse{ + BlockID: s.rootBlock.ID(), + Height: s.rootBlock.Height, + AccountEvents: map[string]flow.EventsList{}, + } + + filter, err := state_stream.NewAccountStatusFilter(state_stream.DefaultEventFilterConfig, chainID.Chain(), []string{}, []string{}) + require.NoError(s.T(), err) + + expectedEvents := s.expectedAccountStatuses(s.blocks[0].ID(), filter) + firstEventResponse := &AccountStatusesResponse{ + BlockID: s.blocks[0].ID(), + Height: s.blocks[0].Height, + AccountEvents: expectedEvents, + } + + assertSubscriptionResponses := func(sub subscription.Subscription, cancel context.CancelFunc) { + // the first response should have details from the root block and no events + resp := <-sub.Channel() + s.requireEventsResponse(resp, rootEventResponse) + + // the second response should have details from the first block and its events + resp = <-sub.Channel() + s.requireEventsResponse(resp, firstEventResponse) + + cancel() + resp, ok := <-sub.Channel() + assert.False(s.T(), ok) + assert.Nil(s.T(), resp) + assert.ErrorIs(s.T(), sub.Err(), context.Canceled) + } + + s.Run("by height", func() { + subCtx, subCancel := context.WithCancel(ctx) + defer subCancel() + + s.executionDataTracker.On("GetStartHeightFromHeight", s.rootBlock.Height). + Return(func(startHeight uint64) (uint64, error) { + return s.executionDataTrackerReal.GetStartHeightFromHeight(startHeight) + }) + + sub := s.backend.SubscribeAccountStatusesFromStartHeight(subCtx, s.rootBlock.Height, filter) + assertSubscriptionResponses(sub, subCancel) + }) + + s.Run("by ID", func() { + subCtx, subCancel := context.WithCancel(ctx) + defer subCancel() + + s.executionDataTracker.On("GetStartHeightFromBlockID", s.rootBlock.ID()). + Return(func(startBlockID flow.Identifier) (uint64, error) { + return s.executionDataTrackerReal.GetStartHeightFromBlockID(startBlockID) + }) + + sub := s.backend.SubscribeAccountStatusesFromStartBlockID(subCtx, s.rootBlock.ID(), filter) + assertSubscriptionResponses(sub, subCancel) + }) + + s.Run("by latest", func() { + subCtx, subCancel := context.WithCancel(ctx) + defer subCancel() + + // simulate the case where the latest block is also the root block + s.snapshot.On("Head").Unset() + s.snapshot.On("Head").Return(s.rootBlock.ToHeader(), nil).Once() + + s.executionDataTracker.On("GetStartHeightFromLatest", mock.Anything). + Return(func(ctx context.Context) (uint64, error) { + return s.executionDataTrackerReal.GetStartHeightFromLatest(ctx) + }) + + sub := s.backend.SubscribeAccountStatusesFromLatestBlock(subCtx, filter) + assertSubscriptionResponses(sub, subCancel) + }) + +} + +// TestSubscribeAccountStatusesHandlesErrors tests handling of expected errors in the SubscribeAccountStatuses. +func (s *BackendExecutionDataSuite) TestSubscribeAccountStatusesHandlesErrors() { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + // mock block tracker for SubscribeBlocksFromStartBlockID + s.executionDataTracker.On( + "GetStartHeightFromBlockID", + mock.AnythingOfType("flow.Identifier"), + ).Return(func(startBlockID flow.Identifier) (uint64, error) { + return s.executionDataTrackerReal.GetStartHeightFromBlockID(startBlockID) + }, nil) + + s.Run("returns error for unindexed start blockID", func() { + subCtx, subCancel := context.WithCancel(ctx) + defer subCancel() + + sub := s.backend.SubscribeAccountStatusesFromStartBlockID(subCtx, unittest.IdentifierFixture(), state_stream.AccountStatusFilter{}) + assert.Equal(s.T(), codes.NotFound, status.Code(sub.Err()), "expected NotFound, got %v: %v", status.Code(sub.Err()).String(), sub.Err()) + }) + + s.executionDataTracker.On( + "GetStartHeightFromHeight", + mock.AnythingOfType("uint64"), + ).Return(func(startHeight uint64) (uint64, error) { + return s.executionDataTrackerReal.GetStartHeightFromHeight(startHeight) + }, nil) + + s.Run("returns error for start height before root height", func() { + subCtx, subCancel := context.WithCancel(ctx) + defer subCancel() + + sub := s.backend.SubscribeAccountStatusesFromStartHeight(subCtx, s.rootBlock.Height-1, state_stream.AccountStatusFilter{}) + assert.Equal(s.T(), codes.InvalidArgument, status.Code(sub.Err()), "expected InvalidArgument, got %v: %v", status.Code(sub.Err()).String(), sub.Err()) + }) + + // make sure we're starting with a fresh cache + s.execDataHeroCache.Clear() + + s.Run("returns error for unindexed start height", func() { + subCtx, subCancel := context.WithCancel(ctx) + defer subCancel() + + sub := s.backend.SubscribeAccountStatusesFromStartHeight(subCtx, s.blocks[len(s.blocks)-1].Height+10, state_stream.AccountStatusFilter{}) + assert.Equal(s.T(), codes.NotFound, status.Code(sub.Err()), "expected NotFound, got %v: %v", status.Code(sub.Err()).String(), sub.Err()) + }) +} + +// expectedAccountStatuses returns the account status events from the mock block events that match +// the provided filter. +func (s *BackendAccountStatusesSuite) expectedAccountStatuses( + blockID flow.Identifier, + filter state_stream.AccountStatusFilter, +) map[string]flow.EventsList { + expectedEvents := map[string]flow.EventsList{} + for _, event := range s.blockEvents[blockID] { + if filter.Match(event) { + var address string + switch event.Type { + case state_stream.CoreEventAccountCreated: + address = s.accountCreatedAddress.HexWithPrefix() + case state_stream.CoreEventAccountContractAdded: + address = s.accountContractAdded.HexWithPrefix() + case state_stream.CoreEventAccountContractUpdated: + address = s.accountContractUpdated.HexWithPrefix() + } + expectedEvents[address] = append(expectedEvents[address], event) + } + } + return expectedEvents +} diff --git a/engine/access/state_stream/backend/backend_events.go b/engine/access/state_stream/backend/backend_events.go new file mode 100644 index 00000000000..e4c94ffc5dd --- /dev/null +++ b/engine/access/state_stream/backend/backend_events.go @@ -0,0 +1,149 @@ +package backend + +import ( + "context" + "fmt" + + "github.com/rs/zerolog" + + "github.com/onflow/flow-go/engine/access/state_stream" + "github.com/onflow/flow-go/engine/access/subscription" + "github.com/onflow/flow-go/engine/access/subscription/tracker" + "github.com/onflow/flow-go/fvm/errors" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/storage" +) + +type EventsBackend struct { + log zerolog.Logger + + subscriptionHandler *subscription.SubscriptionHandler + executionDataTracker tracker.ExecutionDataTracker + eventsProvider EventsProvider +} + +// SubscribeEvents is deprecated and will be removed in a future version. +// Use SubscribeEventsFromStartBlockID, SubscribeEventsFromStartHeight or SubscribeEventsFromLatest. +// +// SubscribeEvents streams events for all blocks starting at the specified block ID or block height +// up until the latest available block. Once the latest is +// reached, the stream will remain open and responses are sent for each new +// block as it becomes available. +// +// Only one of startBlockID and startHeight may be set. If neither startBlockID nor startHeight is provided, +// the latest sealed block is used. +// +// Events within each block are filtered by the provided EventFilter, and only +// those events that match the filter are returned. If no filter is provided, +// all events are returned. +// +// Parameters: +// - ctx: Context for the operation. +// - startBlockID: The identifier of the starting block. If provided, startHeight should be 0. +// - startHeight: The height of the starting block. If provided, startBlockID should be flow.ZeroID. +// - filter: The event filter used to filter events. +// +// If invalid parameters will be supplied SubscribeEvents will return a failed subscription. +func (b *EventsBackend) SubscribeEvents(ctx context.Context, startBlockID flow.Identifier, startHeight uint64, filter state_stream.EventFilter) subscription.Subscription { + nextHeight, err := b.executionDataTracker.GetStartHeight(ctx, startBlockID, startHeight) + if err != nil { + return subscription.NewFailedSubscription(err, "could not get start height") + } + + return b.subscriptionHandler.Subscribe(ctx, nextHeight, b.getResponseFactory(filter)) +} + +// SubscribeEventsFromStartBlockID streams events starting at the specified block ID, +// up until the latest available block. Once the latest is +// reached, the stream will remain open and responses are sent for each new +// block as it becomes available. +// +// Events within each block are filtered by the provided EventFilter, and only +// those events that match the filter are returned. If no filter is provided, +// all events are returned. +// +// Parameters: +// - ctx: Context for the operation. +// - startBlockID: The identifier of the starting block. +// - filter: The event filter used to filter events. +// +// If invalid parameters will be supplied SubscribeEventsFromStartBlockID will return a failed subscription. +func (b *EventsBackend) SubscribeEventsFromStartBlockID(ctx context.Context, startBlockID flow.Identifier, filter state_stream.EventFilter) subscription.Subscription { + nextHeight, err := b.executionDataTracker.GetStartHeightFromBlockID(startBlockID) + if err != nil { + return subscription.NewFailedSubscription(err, "could not get start height from block id") + } + + return b.subscriptionHandler.Subscribe(ctx, nextHeight, b.getResponseFactory(filter)) +} + +// SubscribeEventsFromStartHeight streams events starting at the specified block height, +// up until the latest available block. Once the latest is +// reached, the stream will remain open and responses are sent for each new +// block as it becomes available. +// +// Events within each block are filtered by the provided EventFilter, and only +// those events that match the filter are returned. If no filter is provided, +// all events are returned. +// +// Parameters: +// - ctx: Context for the operation. +// - startHeight: The height of the starting block. +// - filter: The event filter used to filter events. +// +// If invalid parameters will be supplied SubscribeEventsFromStartHeight will return a failed subscription. +func (b *EventsBackend) SubscribeEventsFromStartHeight(ctx context.Context, startHeight uint64, filter state_stream.EventFilter) subscription.Subscription { + nextHeight, err := b.executionDataTracker.GetStartHeightFromHeight(startHeight) + if err != nil { + return subscription.NewFailedSubscription(err, "could not get start height from block height") + } + + return b.subscriptionHandler.Subscribe(ctx, nextHeight, b.getResponseFactory(filter)) +} + +// SubscribeEventsFromLatest subscribes to events starting at the latest sealed block, +// up until the latest available block. Once the latest is +// reached, the stream will remain open and responses are sent for each new +// block as it becomes available. +// +// Events within each block are filtered by the provided EventFilter, and only +// those events that match the filter are returned. If no filter is provided, +// all events are returned. +// +// Parameters: +// - ctx: Context for the operation. +// - filter: The event filter used to filter events. +// +// If invalid parameters will be supplied SubscribeEventsFromLatest will return a failed subscription. +func (b *EventsBackend) SubscribeEventsFromLatest(ctx context.Context, filter state_stream.EventFilter) subscription.Subscription { + nextHeight, err := b.executionDataTracker.GetStartHeightFromLatest(ctx) + if err != nil { + return subscription.NewFailedSubscription(err, "could not get start height from block height") + } + + return b.subscriptionHandler.Subscribe(ctx, nextHeight, b.getResponseFactory(filter)) +} + +// getResponseFactory returns a function that retrieves the event response for a given height. +// +// Parameters: +// - filter: The event filter used to filter events. +// +// Expected errors during normal operation: +// - subscription.ErrBlockNotReady: execution data for the given block height is not available. +func (b *EventsBackend) getResponseFactory(filter state_stream.EventFilter) subscription.GetDataByHeightFunc { + return func(ctx context.Context, height uint64) (response interface{}, err error) { + eventsResponse, err := b.eventsProvider.GetAllEventsResponse(ctx, height) + if err != nil { + if errors.Is(err, storage.ErrNotFound) || + errors.Is(err, storage.ErrHeightNotIndexed) { + return nil, subscription.ErrBlockNotReady + } + return nil, fmt.Errorf("block %d is not available yet: %w", height, subscription.ErrBlockNotReady) + } + + eventsResponse.Events = filter.Filter(eventsResponse.Events) + + return eventsResponse, nil + } +} diff --git a/engine/access/state_stream/backend/backend_events_test.go b/engine/access/state_stream/backend/backend_events_test.go new file mode 100644 index 00000000000..36e62e96807 --- /dev/null +++ b/engine/access/state_stream/backend/backend_events_test.go @@ -0,0 +1,826 @@ +package backend + +import ( + "bytes" + "context" + "fmt" + "sort" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + + "github.com/onflow/flow-go/engine/access/state_stream" + "github.com/onflow/flow-go/engine/access/subscription" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module/state_synchronization/indexer" + syncmock "github.com/onflow/flow-go/module/state_synchronization/mock" + "github.com/onflow/flow-go/utils/unittest" + "github.com/onflow/flow-go/utils/unittest/mocks" +) + +// eventsTestType represents a test scenario for subscribe events endpoints. +// The old version of test case is used to test SubscribeEvents as well. +// After removing SubscribeEvents endpoint testType struct as a test case can be used. +type eventsTestType struct { + name string + highestBackfill int + startBlockID flow.Identifier + startHeight uint64 + filter state_stream.EventFilter +} + +// BackendEventsSuite is a test suite for the EventsBackend functionality. +// It is used to test the endpoints which enable users to subscribe to block events. +// It verifies that each endpoint works properly with the expected data being returned and tests +// handling of expected errors. +// +// Test cases cover various subscription methods: +// - Subscribing from a start block ID or start height (SubscribeEvents) +// - Subscribing from a start block ID (SubscribeEventsFromStartBlockID) +// - Subscribing from a start height (SubscribeEventsFromStartHeight) +// - Subscribing from the latest data (SubscribeEventsFromLatest) +// +// Each test case covers various scenarios and edge cases, thoroughly assessing the +// EventsBackend's subscription functionality and its ability to handle different +// starting points, event sources, and filtering criteria. +// +// The suite focuses on events extracted from local storage and extracted from ExecutionData, +// ensuring proper testing of event retrieval from both sources. +type BackendEventsSuite struct { + BackendExecutionDataSuite +} + +func TestBackendEventsSuite(t *testing.T) { + suite.Run(t, new(BackendEventsSuite)) +} + +// SetupTest initializes the test suite. +func (s *BackendEventsSuite) SetupTest() { + s.BackendExecutionDataSuite.SetupTest() +} + +// setupFilterForTestCases sets up variations of test scenarios with different event filters +// +// This function takes an array of base testType structs and creates variations for each of them. +// For each base test case, it generates three variations: +// - All events: Includes all event types. +// - Some events: Includes only event types that match the provided filter. +// - No events: Includes a custom event type "A.0x1.NonExistent.Event". +func (s *BackendEventsSuite) setupFilterForTestCases(baseTests []eventsTestType) []eventsTestType { + // create variations for each of the base test + tests := make([]eventsTestType, 0, len(baseTests)*3) + var err error + + for _, test := range baseTests { + t1 := test + t1.name = fmt.Sprintf("%s - all events", test.name) + t1.filter = state_stream.EventFilter{} + tests = append(tests, t1) + + t2 := test + t2.name = fmt.Sprintf("%s - some events", test.name) + t2.filter, err = state_stream.NewEventFilter(state_stream.DefaultEventFilterConfig, chainID.Chain(), []string{string(testEventTypes[0])}, nil, nil) + require.NoError(s.T(), err) + tests = append(tests, t2) + + t3 := test + t3.name = fmt.Sprintf("%s - no events", test.name) + t3.filter, err = state_stream.NewEventFilter(state_stream.DefaultEventFilterConfig, chainID.Chain(), []string{"A.0x1.NonExistent.Event"}, nil, nil) + require.NoError(s.T(), err) + tests = append(tests, t3) + } + + return tests +} + +// setupLocalStorage prepares local storage for testing +func (s *BackendEventsSuite) setupLocalStorage() { + s.SetupBackend(true) + + // events returned from the db are sorted by txID, txIndex, then eventIndex. + // reproduce that here to ensure output order works as expected + blockEvents := make(map[flow.Identifier][]flow.Event) + for _, b := range s.blocks { + events := make([]flow.Event, len(s.blockEvents[b.ID()])) + for i, event := range s.blockEvents[b.ID()] { + events[i] = event + } + sort.Slice(events, func(i, j int) bool { + cmp := bytes.Compare(events[i].TransactionID[:], events[j].TransactionID[:]) + if cmp == 0 { + if events[i].TransactionIndex == events[j].TransactionIndex { + return events[i].EventIndex < events[j].EventIndex + } + return events[i].TransactionIndex < events[j].TransactionIndex + } + return cmp < 0 + }) + blockEvents[b.ID()] = events + } + + s.events.On("ByBlockID", mock.AnythingOfType("flow.Identifier")).Return( + mocks.StorageMapGetter(blockEvents), + ) + + reporter := syncmock.NewIndexReporter(s.T()) + reporter.On("LowestIndexedHeight").Return(s.blocks[0].Height, nil) + reporter.On("HighestIndexedHeight").Return(s.blocks[len(s.blocks)-1].Height, nil) + err := s.eventsIndex.Initialize(reporter) + s.Require().NoError(err) +} + +// TestSubscribeEventsFromExecutionData tests the SubscribeEvents method happy path for events +// extracted from ExecutionData +func (s *BackendEventsSuite) TestSubscribeEventsFromExecutionData() { + s.runTestSubscribeEvents() +} + +// TestSubscribeEventsFromLocalStorage tests the SubscribeEvents method happy path for events +// extracted from local storage +func (s *BackendEventsSuite) TestSubscribeEventsFromLocalStorage() { + s.setupLocalStorage() + s.runTestSubscribeEvents() +} + +// TestSubscribeEventsFromStartBlockIDFromExecutionData tests the SubscribeEventsFromStartBlockID method happy path for events +// extracted from ExecutionData +func (s *BackendEventsSuite) TestSubscribeEventsFromStartBlockIDFromExecutionData() { + s.runTestSubscribeEventsFromStartBlockID() +} + +// TestSubscribeEventsFromStartBlockIDFromLocalStorage tests the SubscribeEventsFromStartBlockID method happy path for events +// extracted from local storage +func (s *BackendEventsSuite) TestSubscribeEventsFromStartBlockIDFromLocalStorage() { + s.setupLocalStorage() + s.runTestSubscribeEventsFromStartBlockID() +} + +// TestSubscribeEventsFromStartHeightFromExecutionData tests the SubscribeEventsFromStartHeight method happy path for events +// extracted from ExecutionData +func (s *BackendEventsSuite) TestSubscribeEventsFromStartHeightFromExecutionData() { + s.runTestSubscribeEventsFromStartHeight() +} + +// TestSubscribeEventsFromStartHeightFromLocalStorage tests the SubscribeEventsFromStartHeight method happy path for events +// extracted from local storage +func (s *BackendEventsSuite) TestSubscribeEventsFromStartHeightFromLocalStorage() { + s.setupLocalStorage() + s.runTestSubscribeEventsFromStartHeight() +} + +// TestSubscribeEventsFromLatestFromExecutionData tests the SubscribeEventsFromLatest method happy path for events +// extracted from ExecutionData +func (s *BackendEventsSuite) TestSubscribeEventsFromLatestFromExecutionData() { + s.runTestSubscribeEventsFromLatest() +} + +// TestSubscribeEventsFromLatestFromLocalStorage tests the SubscribeEventsFromLatest method happy path for events +// extracted from local storage +func (s *BackendEventsSuite) TestSubscribeEventsFromLatestFromLocalStorage() { + s.setupLocalStorage() + s.runTestSubscribeEventsFromLatest() +} + +// runTestSubscribeEvents runs the test suite for SubscribeEvents subscription +func (s *BackendEventsSuite) runTestSubscribeEvents() { + tests := []eventsTestType{ + { + name: "happy path - all new blocks - latest", + highestBackfill: -1, // no backfill + startBlockID: flow.ZeroID, + startHeight: 0, + }, + { + name: "happy path - partial backfill - by height", + highestBackfill: 2, // backfill the first 3 blocks + startBlockID: flow.ZeroID, + startHeight: s.blocks[0].Height, + }, + { + name: "happy path - complete backfill - by id", + highestBackfill: len(s.blocks) - 1, // backfill all blocks + startBlockID: s.blocks[0].ID(), + startHeight: 0, + }, + } + + call := func(ctx context.Context, startBlockID flow.Identifier, startHeight uint64, filter state_stream.EventFilter) subscription.Subscription { + return s.backend.SubscribeEvents(ctx, startBlockID, startHeight, filter) + } + + s.subscribe(call, s.requireEventsResponse, s.setupFilterForTestCases(tests)) +} + +// runTestSubscribeEventsFromStartBlockID runs the test suite for SubscribeEventsFromStartBlockID subscription +func (s *BackendEventsSuite) runTestSubscribeEventsFromStartBlockID() { + tests := []eventsTestType{ + { + name: "happy path - all new blocks", + highestBackfill: -1, // no backfill + startBlockID: s.blocks[0].ID(), + }, + { + name: "happy path - partial backfill", + highestBackfill: 2, // backfill the first 3 blocks + startBlockID: s.blocks[0].ID(), + }, + { + name: "happy path - complete backfill", + highestBackfill: len(s.blocks) - 1, // backfill all blocks + startBlockID: s.blocks[0].ID(), + }, + } + + s.executionDataTracker.On( + "GetStartHeightFromBlockID", + mock.AnythingOfType("flow.Identifier"), + ).Return(func(startBlockID flow.Identifier) (uint64, error) { + return s.executionDataTrackerReal.GetStartHeightFromBlockID(startBlockID) + }, nil) + + call := func(ctx context.Context, startBlockID flow.Identifier, _ uint64, filter state_stream.EventFilter) subscription.Subscription { + return s.backend.SubscribeEventsFromStartBlockID(ctx, startBlockID, filter) + } + + s.subscribe(call, s.requireEventsResponse, s.setupFilterForTestCases(tests)) +} + +// runTestSubscribeEventsFromStartHeight runs the test suite for SubscribeEventsFromStartHeight subscription +func (s *BackendEventsSuite) runTestSubscribeEventsFromStartHeight() { + tests := []eventsTestType{ + { + name: "happy path - all new blocks", + highestBackfill: -1, // no backfill + startHeight: s.blocks[0].Height, + }, + { + name: "happy path - partial backfill", + highestBackfill: 2, // backfill the first 3 blocks + startHeight: s.blocks[0].Height, + }, + { + name: "happy path - complete backfill", + highestBackfill: len(s.blocks) - 1, // backfill all blocks + startHeight: s.blocks[0].Height, + }, + } + + s.executionDataTracker.On( + "GetStartHeightFromHeight", + mock.AnythingOfType("uint64"), + ).Return(func(startHeight uint64) (uint64, error) { + return s.executionDataTrackerReal.GetStartHeightFromHeight(startHeight) + }, nil) + + call := func(ctx context.Context, _ flow.Identifier, startHeight uint64, filter state_stream.EventFilter) subscription.Subscription { + return s.backend.SubscribeEventsFromStartHeight(ctx, startHeight, filter) + } + + s.subscribe(call, s.requireEventsResponse, s.setupFilterForTestCases(tests)) +} + +// runTestSubscribeEventsFromLatest runs the test suite for SubscribeEventsFromLatest subscription +func (s *BackendEventsSuite) runTestSubscribeEventsFromLatest() { + tests := []eventsTestType{ + { + name: "happy path - all new blocks", + highestBackfill: -1, // no backfill + }, + { + name: "happy path - partial backfill", + highestBackfill: 2, // backfill the first 3 blocks + }, + { + name: "happy path - complete backfill", + highestBackfill: len(s.blocks) - 1, // backfill all blocks + }, + } + + s.executionDataTracker.On( + "GetStartHeightFromLatest", + mock.Anything, + ).Return(func(ctx context.Context) (uint64, error) { + return s.executionDataTrackerReal.GetStartHeightFromLatest(ctx) + }, nil) + + call := func(ctx context.Context, _ flow.Identifier, _ uint64, filter state_stream.EventFilter) subscription.Subscription { + return s.backend.SubscribeEventsFromLatest(ctx, filter) + } + + s.subscribe(call, s.requireEventsResponse, s.setupFilterForTestCases(tests)) +} + +// subscribe is a helper function to run test scenarios for event subscription in the BackendEventsSuite. +// It covers various scenarios for subscribing, handling backfill, and receiving block updates. +// The test cases include scenarios for different event filters. +// +// Parameters: +// +// - subscribeFn: A function representing the subscription method to be tested. +// It takes a context, startBlockID, startHeight, and filter as parameters +// and returns a subscription.Subscription. +// +// - requireFn: A function responsible for validating that the received information +// matches the expected data. It takes an actual interface{} and an expected *EventsResponse as parameters. +// +// - tests: A slice of testType representing different test scenarios for subscriptions. +// +// The function performs the following steps for each test case: +// +// 1. Initializes the test context and cancellation function. +// 2. Iterates through the provided test cases. +// 3. For each test case, sets up a executionDataTracker mock if there are blocks to backfill. +// 4. Mocks the latest sealed block if no startBlockID or startHeight is provided. +// 5. Subscribes using the provided subscription function. +// 6. Simulates the reception of new blocks and consumes them from the subscription channel. +// 7. Ensures that there are no new messages waiting after all blocks have been processed. +// 8. Cancels the subscription and ensures it shuts down gracefully. +func (s *BackendEventsSuite) subscribe( + subscribeFn func(ctx context.Context, startBlockID flow.Identifier, startHeight uint64, filter state_stream.EventFilter) subscription.Subscription, + requireFn func(interface{}, *EventsResponse), + tests []eventsTestType, +) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + for _, test := range tests { + s.Run(test.name, func() { + // add "backfill" block - blocks that are already in the database before the test starts + // this simulates a subscription on a past block + if test.highestBackfill > 0 { + s.highestBlockHeader = s.blocks[test.highestBackfill].ToHeader() + } + + subCtx, subCancel := context.WithCancel(ctx) + + // mock latest sealed if test case has no start value provided + if test.startBlockID == flow.ZeroID && test.startHeight == 0 { + s.snapshot.On("Head").Unset() + s.snapshot.On("Head").Return(s.blocks[0].ToHeader(), nil).Once() + } + + sub := subscribeFn(subCtx, test.startBlockID, test.startHeight, test.filter) + + // loop over all blocks + for i, b := range s.blocks { + s.T().Logf("checking block %d %v %d", i, b.ID(), b.Height) + + // simulate new block received. + // all blocks with index <= highestBackfill were already received + if i > test.highestBackfill { + s.highestBlockHeader = b.ToHeader() + + s.broadcaster.Publish() + } + + var expectedEvents flow.EventsList + for _, event := range s.blockEvents[b.ID()] { + if test.filter.Match(event) { + expectedEvents = append(expectedEvents, event) + } + } + + // consume events response from subscription + unittest.RequireReturnsBefore(s.T(), func() { + v, ok := <-sub.Channel() + require.True(s.T(), ok, "channel closed while waiting for exec data for block %x %v: err: %v", b.Height, b.ID(), sub.Err()) + + expected := &EventsResponse{ + BlockID: b.ID(), + Height: b.Height, + Events: expectedEvents, + BlockTimestamp: time.UnixMilli(int64(b.Timestamp)).UTC(), + } + requireFn(v, expected) + + }, time.Second, fmt.Sprintf("timed out waiting for block %d %v", b.Height, b.ID())) + } + + // make sure there are no new messages waiting. the channel should be opened with nothing waiting + unittest.RequireNeverReturnBefore(s.T(), func() { + <-sub.Channel() + }, 100*time.Millisecond, "timed out waiting for subscription to shutdown") + + // stop the subscription + subCancel() + + // ensure subscription shuts down gracefully + unittest.RequireReturnsBefore(s.T(), func() { + v, ok := <-sub.Channel() + assert.Nil(s.T(), v) + assert.False(s.T(), ok) + assert.ErrorIs(s.T(), sub.Err(), context.Canceled) + }, 100*time.Millisecond, "timed out waiting for subscription to shutdown") + }) + } +} + +// requireEventsResponse ensures that the received event information matches the expected data. +func (s *BackendEventsSuite) requireEventsResponse(v interface{}, expected *EventsResponse) { + actual, ok := v.(*EventsResponse) + require.True(s.T(), ok, "unexpected response type: %T", v) + + assert.Equal(s.T(), expected.BlockID, actual.BlockID) + assert.Equal(s.T(), expected.Height, actual.Height) + assert.Equal(s.T(), expected.Events, actual.Events) + assert.Equal(s.T(), expected.BlockTimestamp, actual.BlockTimestamp) +} + +// TestSubscribeEventsFromSporkRootBlock tests that events subscriptions starting from the spork +// root block return an empty result for the root block. +func (s *BackendEventsSuite) TestSubscribeEventsFromSporkRootBlock() { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + // setup the backend to have 1 available block + s.highestBlockHeader = s.blocks[0].ToHeader() + + rootEventResponse := &EventsResponse{ + BlockID: s.rootBlock.ID(), + Height: s.rootBlock.Height, + BlockTimestamp: time.UnixMilli(int64(s.rootBlock.Timestamp)).UTC(), + } + + firstEventResponse := &EventsResponse{ + BlockID: s.blocks[0].ID(), + Height: s.blocks[0].Height, + BlockTimestamp: time.UnixMilli(int64(s.blocks[0].Timestamp)).UTC(), + Events: flow.EventsList(s.blockEvents[s.blocks[0].ID()]), + } + + assertSubscriptionResponses := func(sub subscription.Subscription, cancel context.CancelFunc) { + // the first response should have details from the root block and no events + resp := <-sub.Channel() + s.requireEventsResponse(resp, rootEventResponse) + + // the second response should have details from the first block and its events + resp = <-sub.Channel() + s.requireEventsResponse(resp, firstEventResponse) + + cancel() + resp, ok := <-sub.Channel() + assert.False(s.T(), ok) + assert.Nil(s.T(), resp) + assert.ErrorIs(s.T(), sub.Err(), context.Canceled) + } + + s.Run("by height", func() { + subCtx, subCancel := context.WithCancel(ctx) + defer subCancel() + + s.executionDataTracker.On("GetStartHeightFromHeight", s.rootBlock.Height). + Return(func(startHeight uint64) (uint64, error) { + return s.executionDataTrackerReal.GetStartHeightFromHeight(startHeight) + }) + + sub := s.backend.SubscribeEventsFromStartHeight(subCtx, s.rootBlock.Height, state_stream.EventFilter{}) + assertSubscriptionResponses(sub, subCancel) + }) + + s.Run("by height - legacy", func() { + subCtx, subCancel := context.WithCancel(ctx) + defer subCancel() + + s.executionDataTracker.On("GetStartHeightFromHeight", s.rootBlock.Height). + Return(func(startHeight uint64) (uint64, error) { + return s.executionDataTrackerReal.GetStartHeightFromHeight(startHeight) + }) + + sub := s.backend.SubscribeEvents(subCtx, flow.ZeroID, s.rootBlock.Height, state_stream.EventFilter{}) + assertSubscriptionResponses(sub, subCancel) + }) + + s.Run("by ID", func() { + subCtx, subCancel := context.WithCancel(ctx) + defer subCancel() + + s.executionDataTracker.On("GetStartHeightFromBlockID", s.rootBlock.ID()). + Return(func(startBlockID flow.Identifier) (uint64, error) { + return s.executionDataTrackerReal.GetStartHeightFromBlockID(startBlockID) + }) + + sub := s.backend.SubscribeEventsFromStartBlockID(subCtx, s.rootBlock.ID(), state_stream.EventFilter{}) + assertSubscriptionResponses(sub, subCancel) + }) + + s.Run("by ID - legacy", func() { + subCtx, subCancel := context.WithCancel(ctx) + defer subCancel() + + s.executionDataTracker.On("GetStartHeightFromBlockID", s.rootBlock.ID()). + Return(func(startBlockID flow.Identifier) (uint64, error) { + return s.executionDataTrackerReal.GetStartHeightFromBlockID(startBlockID) + }) + + sub := s.backend.SubscribeEvents(subCtx, s.rootBlock.ID(), 0, state_stream.EventFilter{}) + assertSubscriptionResponses(sub, subCancel) + }) + + s.Run("by latest", func() { + subCtx, subCancel := context.WithCancel(ctx) + defer subCancel() + + // simulate the case where the latest block is also the root block + s.snapshot.On("Head").Unset() + s.snapshot.On("Head").Return(s.rootBlock.ToHeader(), nil).Once() + + s.executionDataTracker.On("GetStartHeightFromLatest", mock.Anything). + Return(func(ctx context.Context) (uint64, error) { + return s.executionDataTrackerReal.GetStartHeightFromLatest(ctx) + }) + + sub := s.backend.SubscribeEventsFromLatest(subCtx, state_stream.EventFilter{}) + assertSubscriptionResponses(sub, subCancel) + }) + + s.Run("by latest - legacy", func() { + subCtx, subCancel := context.WithCancel(ctx) + defer subCancel() + + // simulate the case where the latest block is also the root block + s.snapshot.On("Head").Unset() + s.snapshot.On("Head").Return(s.rootBlock.ToHeader(), nil).Once() + + s.executionDataTracker.On("GetStartHeightFromLatest", mock.Anything). + Return(func(ctx context.Context) (uint64, error) { + return s.executionDataTrackerReal.GetStartHeightFromLatest(ctx) + }) + + sub := s.backend.SubscribeEvents(subCtx, flow.ZeroID, 0, state_stream.EventFilter{}) + assertSubscriptionResponses(sub, subCancel) + }) +} + +// TestSubscribeEventsHandlesErrors tests error handling for SubscribeEvents subscription +// +// Test Cases: +// +// 1. Returns error if both start blockID and start height are provided: +// - Ensures that providing both start blockID and start height results in an InvalidArgument error. +// +// 2. Returns error for start height before root height: +// - Validates that attempting to subscribe with a start height before the root height results in an InvalidArgument error. +// +// 3. Returns error for unindexed start blockID: +// - Tests that subscribing with an unindexed start blockID results in a NotFound error. +// +// 4. Returns error for unindexed start height: +// - Tests that subscribing with an unindexed start height results in a NotFound error. +// +// 5. Returns error for uninitialized index: +// - Ensures that subscribing with an uninitialized index results in a FailedPrecondition error. +// +// 6. Returns error for start below lowest indexed: +// - Validates that subscribing with a start height below the lowest indexed height results in an InvalidArgument error. +// +// 7. Returns error for start above highest indexed: +// - Validates that subscribing with a start height above the highest indexed height results in an InvalidArgument error. +func (s *BackendExecutionDataSuite) TestSubscribeEventsHandlesErrors() { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + s.Run("returns error if both start blockID and start height are provided", func() { + subCtx, subCancel := context.WithCancel(ctx) + defer subCancel() + + sub := s.backend.SubscribeEvents(subCtx, unittest.IdentifierFixture(), 1, state_stream.EventFilter{}) + assert.Equal(s.T(), codes.InvalidArgument, status.Code(sub.Err())) + }) + + s.Run("returns error for start height before root height", func() { + subCtx, subCancel := context.WithCancel(ctx) + defer subCancel() + + sub := s.backend.SubscribeEvents(subCtx, flow.ZeroID, s.rootBlock.Height-1, state_stream.EventFilter{}) + assert.Equal(s.T(), codes.InvalidArgument, status.Code(sub.Err()), "expected InvalidArgument, got %v: %v", status.Code(sub.Err()).String(), sub.Err()) + }) + + s.Run("returns error for unindexed start blockID", func() { + subCtx, subCancel := context.WithCancel(ctx) + defer subCancel() + + sub := s.backend.SubscribeEvents(subCtx, unittest.IdentifierFixture(), 0, state_stream.EventFilter{}) + assert.Equal(s.T(), codes.NotFound, status.Code(sub.Err()), "expected NotFound, got %v: %v", status.Code(sub.Err()).String(), sub.Err()) + }) + + // make sure we're starting with a fresh cache + s.execDataHeroCache.Clear() + + s.Run("returns error for unindexed start height", func() { + subCtx, subCancel := context.WithCancel(ctx) + defer subCancel() + + sub := s.backend.SubscribeEvents(subCtx, flow.ZeroID, s.blocks[len(s.blocks)-1].Height+10, state_stream.EventFilter{}) + assert.Equal(s.T(), codes.NotFound, status.Code(sub.Err()), "expected NotFound, got %v: %v", status.Code(sub.Err()).String(), sub.Err()) + }) + + // Unset GetStartHeight to mock new behavior instead of default one + s.executionDataTracker.On("GetStartHeight", mock.Anything, mock.Anything).Unset() + + s.Run("returns error for uninitialized index", func() { + subCtx, subCancel := context.WithCancel(ctx) + defer subCancel() + + s.executionDataTracker.On("GetStartHeight", subCtx, flow.ZeroID, uint64(0)). + Return(uint64(0), status.Errorf(codes.FailedPrecondition, "failed to get lowest indexed height: %v", indexer.ErrIndexNotInitialized)). + Once() + + // Note: eventIndex.Initialize() is not called in this test + sub := s.backend.SubscribeEvents(subCtx, flow.ZeroID, 0, state_stream.EventFilter{}) + assert.Equal(s.T(), codes.FailedPrecondition, status.Code(sub.Err()), "expected FailedPrecondition, got %v: %v", status.Code(sub.Err()).String(), sub.Err()) + }) + + s.Run("returns error for start below lowest indexed", func() { + subCtx, subCancel := context.WithCancel(ctx) + defer subCancel() + + s.executionDataTracker.On("GetStartHeight", subCtx, flow.ZeroID, s.blocks[0].Height). + Return(uint64(0), status.Errorf(codes.InvalidArgument, "start height %d is lower than lowest indexed height %d", s.blocks[0].Height, 0)). + Once() + + sub := s.backend.SubscribeEvents(subCtx, flow.ZeroID, s.blocks[0].Height, state_stream.EventFilter{}) + assert.Equal(s.T(), codes.InvalidArgument, status.Code(sub.Err()), "expected InvalidArgument, got %v: %v", status.Code(sub.Err()).String(), sub.Err()) + }) + + s.Run("returns error for start above highest indexed", func() { + subCtx, subCancel := context.WithCancel(ctx) + defer subCancel() + + s.executionDataTracker.On("GetStartHeight", subCtx, flow.ZeroID, s.blocks[len(s.blocks)-1].Height). + Return(uint64(0), status.Errorf(codes.InvalidArgument, "start height %d is higher than highest indexed height %d", s.blocks[len(s.blocks)-1].Height, s.blocks[0].Height)). + Once() + + sub := s.backend.SubscribeEvents(subCtx, flow.ZeroID, s.blocks[len(s.blocks)-1].Height, state_stream.EventFilter{}) + assert.Equal(s.T(), codes.InvalidArgument, status.Code(sub.Err()), "expected InvalidArgument, got %v: %v", status.Code(sub.Err()).String(), sub.Err()) + }) +} + +// TestSubscribeEventsFromStartBlockIDHandlesErrors tests error handling for SubscribeEventsFromStartBlockID subscription +// +// Test Cases: +// +// 1. Returns error for unindexed start blockID: +// - Ensures that subscribing with an unindexed start blockID results in a NotFound error. +// +// 2. Returns error for uninitialized index: +// - Ensures that subscribing with an uninitialized index results in a FailedPrecondition error. +// +// 3. Returns error for start below lowest indexed: +// - Validates that subscribing with a start blockID below the lowest indexed height results in an InvalidArgument error. +// +// 4. Returns error for start above highest indexed: +// - Validates that subscribing with a start blockID above the highest indexed height results in an InvalidArgument error. +func (s *BackendExecutionDataSuite) TestSubscribeEventsFromStartBlockIDHandlesErrors() { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + s.executionDataTracker.On( + "GetStartHeightFromBlockID", + mock.Anything, + ).Return(func(startBlockID flow.Identifier) (uint64, error) { + return s.executionDataTrackerReal.GetStartHeightFromBlockID(startBlockID) + }, nil) + + s.Run("returns error for unindexed start blockID", func() { + subCtx, subCancel := context.WithCancel(ctx) + defer subCancel() + + sub := s.backend.SubscribeEventsFromStartBlockID(subCtx, unittest.IdentifierFixture(), state_stream.EventFilter{}) + assert.Equal(s.T(), codes.NotFound, status.Code(sub.Err()), "expected NotFound, got %v: %v", status.Code(sub.Err()).String(), sub.Err()) + }) + + // Unset GetStartHeightFromBlockID to mock new behavior instead of default one + s.executionDataTracker.On("GetStartHeightFromBlockID", mock.Anything).Unset() + + s.Run("returns error for uninitialized index", func() { + subCtx, subCancel := context.WithCancel(ctx) + defer subCancel() + + s.executionDataTracker.On("GetStartHeightFromBlockID", flow.ZeroID). + Return(uint64(0), status.Errorf(codes.FailedPrecondition, "failed to get lowest indexed height: %v", indexer.ErrIndexNotInitialized)). + Once() + + // Note: eventIndex.Initialize() is not called in this test + sub := s.backend.SubscribeEventsFromStartBlockID(subCtx, flow.ZeroID, state_stream.EventFilter{}) + assert.Equal(s.T(), codes.FailedPrecondition, status.Code(sub.Err()), "expected FailedPrecondition, got %v: %v", status.Code(sub.Err()).String(), sub.Err()) + }) + + s.Run("returns error for start below lowest indexed", func() { + subCtx, subCancel := context.WithCancel(ctx) + defer subCancel() + + s.executionDataTracker.On("GetStartHeightFromBlockID", s.blocks[0].ID()). + Return(uint64(0), status.Errorf(codes.InvalidArgument, "start height %d is lower than lowest indexed height %d", s.blocks[0].Height, 0)). + Once() + + sub := s.backend.SubscribeEventsFromStartBlockID(subCtx, s.blocks[0].ID(), state_stream.EventFilter{}) + assert.Equal(s.T(), codes.InvalidArgument, status.Code(sub.Err()), "expected InvalidArgument, got %v: %v", status.Code(sub.Err()).String(), sub.Err()) + }) + + s.Run("returns error for start above highest indexed", func() { + subCtx, subCancel := context.WithCancel(ctx) + defer subCancel() + + s.executionDataTracker.On("GetStartHeightFromBlockID", s.blocks[len(s.blocks)-1].ID()). + Return(uint64(0), status.Errorf(codes.InvalidArgument, "start height %d is higher than highest indexed height %d", s.blocks[len(s.blocks)-1].Height, s.blocks[0].Height)). + Once() + + sub := s.backend.SubscribeEventsFromStartBlockID(subCtx, s.blocks[len(s.blocks)-1].ID(), state_stream.EventFilter{}) + assert.Equal(s.T(), codes.InvalidArgument, status.Code(sub.Err()), "expected InvalidArgument, got %v: %v", status.Code(sub.Err()).String(), sub.Err()) + }) +} + +// TestSubscribeEventsFromStartHeightHandlesErrors tests error handling for SubscribeEventsFromStartHeight subscription. +// +// Test Cases: +// +// 1. Returns error for start height before root height: +// - Validates that attempting to subscribe with a start height before the root height results in an InvalidArgument error. +// +// 2. Returns error for unindexed start height: +// - Tests that subscribing with an unindexed start height results in a NotFound error. +// +// 3. Returns error for uninitialized index: +// - Ensures that subscribing with an uninitialized index results in a FailedPrecondition error. +// +// 4. Returns error for start below lowest indexed: +// - Validates that subscribing with a start height below the lowest indexed height results in an InvalidArgument error. +// +// 5. Returns error for start above highest indexed: +// - Validates that subscribing with a start height above the highest indexed height results in an InvalidArgument error. +func (s *BackendExecutionDataSuite) TestSubscribeEventsFromStartHeightHandlesErrors() { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + s.executionDataTracker.On( + "GetStartHeightFromHeight", + mock.Anything, + ).Return(func(startHeight uint64) (uint64, error) { + return s.executionDataTrackerReal.GetStartHeightFromHeight(startHeight) + }, nil) + + s.Run("returns error for start height before root height", func() { + subCtx, subCancel := context.WithCancel(ctx) + defer subCancel() + + sub := s.backend.SubscribeEventsFromStartHeight(subCtx, s.rootBlock.Height-1, state_stream.EventFilter{}) + assert.Equal(s.T(), codes.InvalidArgument, status.Code(sub.Err()), "expected InvalidArgument, got %v: %v", status.Code(sub.Err()).String(), sub.Err()) + }) + + // make sure we're starting with a fresh cache + s.execDataHeroCache.Clear() + + s.Run("returns error for unindexed start height", func() { + subCtx, subCancel := context.WithCancel(ctx) + defer subCancel() + + sub := s.backend.SubscribeEventsFromStartHeight(subCtx, s.blocks[len(s.blocks)-1].Height+10, state_stream.EventFilter{}) + assert.Equal(s.T(), codes.NotFound, status.Code(sub.Err()), "expected NotFound, got %v: %v", status.Code(sub.Err()).String(), sub.Err()) + }) + + // Unset GetStartHeightFromHeight to mock new behavior instead of default one + s.executionDataTracker.On("GetStartHeightFromHeight", mock.Anything).Unset() + + s.Run("returns error for uninitialized index", func() { + subCtx, subCancel := context.WithCancel(ctx) + defer subCancel() + + s.executionDataTracker.On("GetStartHeightFromHeight", s.blocks[0].Height). + Return(uint64(0), status.Errorf(codes.FailedPrecondition, "failed to get lowest indexed height: %v", indexer.ErrIndexNotInitialized)). + Once() + + // Note: eventIndex.Initialize() is not called in this test + sub := s.backend.SubscribeEventsFromStartHeight(subCtx, s.blocks[0].Height, state_stream.EventFilter{}) + assert.Equal(s.T(), codes.FailedPrecondition, status.Code(sub.Err()), "expected FailedPrecondition, got %v: %v", status.Code(sub.Err()).String(), sub.Err()) + }) + + s.Run("returns error for start below lowest indexed", func() { + subCtx, subCancel := context.WithCancel(ctx) + defer subCancel() + + s.executionDataTracker.On("GetStartHeightFromHeight", s.blocks[0].Height). + Return(uint64(0), status.Errorf(codes.InvalidArgument, "start height %d is lower than lowest indexed height %d", s.blocks[0].Height, 0)). + Once() + + sub := s.backend.SubscribeEventsFromStartHeight(subCtx, s.blocks[0].Height, state_stream.EventFilter{}) + assert.Equal(s.T(), codes.InvalidArgument, status.Code(sub.Err()), "expected InvalidArgument, got %v: %v", status.Code(sub.Err()).String(), sub.Err()) + }) + + s.Run("returns error for start above highest indexed", func() { + subCtx, subCancel := context.WithCancel(ctx) + defer subCancel() + + s.executionDataTracker.On("GetStartHeightFromHeight", s.blocks[len(s.blocks)-1].Height). + Return(uint64(0), status.Errorf(codes.InvalidArgument, "start height %d is higher than highest indexed height %d", s.blocks[len(s.blocks)-1].Height, s.blocks[0].Height)). + Once() + + sub := s.backend.SubscribeEventsFromStartHeight(subCtx, s.blocks[len(s.blocks)-1].Height, state_stream.EventFilter{}) + assert.Equal(s.T(), codes.InvalidArgument, status.Code(sub.Err()), "expected InvalidArgument, got %v: %v", status.Code(sub.Err()).String(), sub.Err()) + }) +} diff --git a/engine/access/state_stream/backend/backend_executiondata.go b/engine/access/state_stream/backend/backend_executiondata.go new file mode 100644 index 00000000000..954640f3cb9 --- /dev/null +++ b/engine/access/state_stream/backend/backend_executiondata.go @@ -0,0 +1,145 @@ +package backend + +import ( + "context" + "errors" + "fmt" + "time" + + "github.com/rs/zerolog" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + + "github.com/onflow/flow-go/engine/access/subscription" + "github.com/onflow/flow-go/engine/access/subscription/tracker" + "github.com/onflow/flow-go/engine/common/rpc" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module/executiondatasync/execution_data" + "github.com/onflow/flow-go/storage" +) + +type ExecutionDataResponse struct { + Height uint64 + ExecutionData *execution_data.BlockExecutionData + BlockTimestamp time.Time +} + +type ExecutionDataBackend struct { + log zerolog.Logger + headers storage.Headers + + getExecutionData GetExecutionDataFunc + + subscriptionHandler *subscription.SubscriptionHandler + executionDataTracker tracker.ExecutionDataTracker +} + +func (b *ExecutionDataBackend) GetExecutionDataByBlockID(ctx context.Context, blockID flow.Identifier) (*execution_data.BlockExecutionData, error) { + header, err := b.headers.ByBlockID(blockID) + if err != nil { + return nil, fmt.Errorf("could not get block header for %s: %w", blockID, err) + } + + executionData, err := b.getExecutionData(ctx, header.Height) + + if err != nil { + // need custom not found handler due to blob not found error + if errors.Is(err, storage.ErrNotFound) || execution_data.IsBlobNotFoundError(err) || errors.Is(err, subscription.ErrBlockNotReady) { + return nil, status.Errorf(codes.NotFound, "could not find execution data: %v", err) + } + + return nil, rpc.ConvertError(err, "could not get execution data", codes.Internal) + } + + return executionData.BlockExecutionData, nil +} + +// SubscribeExecutionData is deprecated and will be removed in future versions. +// Use SubscribeExecutionDataFromStartBlockID, SubscribeExecutionDataFromStartBlockHeight or SubscribeExecutionDataFromLatest. +// +// SubscribeExecutionData streams execution data for all blocks starting at the specified block ID or block height +// up until the latest available block. Once the latest is reached, the stream will remain open and responses +// are sent for each new block as it becomes available. +// +// Only one of startBlockID and startHeight may be set. If neither startBlockID nor startHeight is provided, +// the latest sealed block is used. +// +// Parameters: +// - ctx: Context for the operation. +// - startBlockID: The identifier of the starting block. If provided, startHeight should be 0. +// - startHeight: The height of the starting block. If provided, startBlockID should be flow.ZeroID. +// +// If invalid parameters are provided, failed subscription will be returned. +func (b *ExecutionDataBackend) SubscribeExecutionData(ctx context.Context, startBlockID flow.Identifier, startHeight uint64) subscription.Subscription { + nextHeight, err := b.executionDataTracker.GetStartHeight(ctx, startBlockID, startHeight) + if err != nil { + return subscription.NewFailedSubscription(err, "could not get start block height") + } + + return b.subscriptionHandler.Subscribe(ctx, nextHeight, b.getResponse) +} + +// SubscribeExecutionDataFromStartBlockID streams execution data for all blocks starting at the specified block ID +// up until the latest available block. Once the latest is reached, the stream will remain open and responses +// are sent for each new block as it becomes available. +// +// Parameters: +// - ctx: Context for the operation. +// - startBlockID: The identifier of the starting block. +// +// If invalid parameters are provided, failed subscription will be returned. +func (b *ExecutionDataBackend) SubscribeExecutionDataFromStartBlockID(ctx context.Context, startBlockID flow.Identifier) subscription.Subscription { + nextHeight, err := b.executionDataTracker.GetStartHeightFromBlockID(startBlockID) + if err != nil { + return subscription.NewFailedSubscription(err, "could not get start block height") + } + + return b.subscriptionHandler.Subscribe(ctx, nextHeight, b.getResponse) +} + +// SubscribeExecutionDataFromStartBlockHeight streams execution data for all blocks starting at the specified block height +// up until the latest available block. Once the latest is reached, the stream will remain open and responses +// are sent for each new block as it becomes available. +// +// Parameters: +// - ctx: Context for the operation. +// - startHeight: The height of the starting block. +// +// If invalid parameters are provided, failed subscription will be returned. +func (b *ExecutionDataBackend) SubscribeExecutionDataFromStartBlockHeight(ctx context.Context, startBlockHeight uint64) subscription.Subscription { + nextHeight, err := b.executionDataTracker.GetStartHeightFromHeight(startBlockHeight) + if err != nil { + return subscription.NewFailedSubscription(err, "could not get start block height") + } + + return b.subscriptionHandler.Subscribe(ctx, nextHeight, b.getResponse) +} + +// SubscribeExecutionDataFromLatest streams execution data starting at the latest block. +// Once the latest is reached, the stream will remain open and responses are sent for each new +// block as it becomes available. +// +// Parameters: +// - ctx: Context for the operation. +// +// If invalid parameters are provided, failed subscription will be returned. +func (b *ExecutionDataBackend) SubscribeExecutionDataFromLatest(ctx context.Context) subscription.Subscription { + nextHeight, err := b.executionDataTracker.GetStartHeightFromLatest(ctx) + if err != nil { + return subscription.NewFailedSubscription(err, "could not get start block height") + } + + return b.subscriptionHandler.Subscribe(ctx, nextHeight, b.getResponse) +} + +func (b *ExecutionDataBackend) getResponse(ctx context.Context, height uint64) (interface{}, error) { + executionData, err := b.getExecutionData(ctx, height) + if err != nil { + return nil, fmt.Errorf("could not get execution data for block %d: %w", height, err) + } + + return &ExecutionDataResponse{ + Height: height, + ExecutionData: executionData.BlockExecutionData, + }, nil +} diff --git a/engine/access/state_stream/backend/backend_executiondata_test.go b/engine/access/state_stream/backend/backend_executiondata_test.go new file mode 100644 index 00000000000..f7587affa6d --- /dev/null +++ b/engine/access/state_stream/backend/backend_executiondata_test.go @@ -0,0 +1,745 @@ +package backend + +import ( + "context" + "fmt" + "testing" + "time" + + "github.com/ipfs/go-datastore" + dssync "github.com/ipfs/go-datastore/sync" + "github.com/rs/zerolog" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + + "github.com/onflow/flow-go/engine" + "github.com/onflow/flow-go/engine/access/index" + "github.com/onflow/flow-go/engine/access/state_stream" + "github.com/onflow/flow-go/engine/access/subscription" + "github.com/onflow/flow-go/engine/access/subscription/tracker" + trackermock "github.com/onflow/flow-go/engine/access/subscription/tracker/mock" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module/blobs" + "github.com/onflow/flow-go/module/execution" + "github.com/onflow/flow-go/module/executiondatasync/execution_data" + "github.com/onflow/flow-go/module/executiondatasync/execution_data/cache" + "github.com/onflow/flow-go/module/mempool/herocache" + "github.com/onflow/flow-go/module/metrics" + protocolmock "github.com/onflow/flow-go/state/protocol/mock" + "github.com/onflow/flow-go/storage" + storagemock "github.com/onflow/flow-go/storage/mock" + "github.com/onflow/flow-go/utils/unittest" + "github.com/onflow/flow-go/utils/unittest/mocks" +) + +var ( + chainID = flow.MonotonicEmulator + testEventTypes = []flow.EventType{ + unittest.EventTypeFixture(chainID), + unittest.EventTypeFixture(chainID), + unittest.EventTypeFixture(chainID), + } +) + +type BackendExecutionDataSuite struct { + suite.Suite + logger zerolog.Logger + state *protocolmock.State + params *protocolmock.Params + snapshot *protocolmock.Snapshot + headers *storagemock.Headers + events *storagemock.Events + seals *storagemock.Seals + results *storagemock.ExecutionResults + registers *storagemock.RegisterIndex + registersAsync *execution.RegistersAsyncStore + eventsIndex *index.EventsIndex + + bs blobs.Blobstore + eds execution_data.ExecutionDataStore + broadcaster *engine.Broadcaster + execDataCache *cache.ExecutionDataCache + execDataHeroCache *herocache.BlockExecutionData + executionDataTracker *trackermock.ExecutionDataTracker + backend *StateStreamBackend + executionDataTrackerReal tracker.ExecutionDataTracker + + blocks []*flow.Block + blockEvents map[flow.Identifier][]flow.Event + execDataMap map[flow.Identifier]*execution_data.BlockExecutionDataEntity + blockMap map[uint64]*flow.Block + sealMap map[flow.Identifier]*flow.Seal + resultMap map[flow.Identifier]*flow.ExecutionResult + registerID flow.RegisterID + + rootBlock *flow.Block + highestBlockHeader *flow.Header +} + +type executionDataTestType struct { + name string + highestBackfill int + startBlockID flow.Identifier + startHeight uint64 +} + +func TestBackendExecutionDataSuite(t *testing.T) { + suite.Run(t, new(BackendExecutionDataSuite)) +} + +func (s *BackendExecutionDataSuite) SetupTest() { + blockCount := 5 + s.SetupTestSuite(blockCount) + + var err error + parent := s.rootBlock.ToHeader() + + for i := 0; i < blockCount; i++ { + block := unittest.BlockWithParentFixture(parent) + // update for next iteration + parent = block.ToHeader() + + seal := unittest.BlockSealsFixture(1)[0] + result := unittest.ExecutionResultFixture() + blockEvents := generateMockEvents(block.ToHeader(), (i%len(testEventTypes))*3+1) + + numChunks := 5 + chunkDatas := make([]*execution_data.ChunkExecutionData, 0, numChunks) + for i := 0; i < numChunks; i++ { + var events flow.EventsList + switch { + case i >= len(blockEvents.Events): + events = flow.EventsList{} + case i == numChunks-1: + events = blockEvents.Events[i:] + default: + events = flow.EventsList{blockEvents.Events[i]} + } + chunkDatas = append(chunkDatas, unittest.ChunkExecutionDataFixture(s.T(), execution_data.DefaultMaxBlobSize/5, unittest.WithChunkEvents(events))) + } + execData := unittest.BlockExecutionDataFixture( + unittest.WithBlockExecutionDataBlockID(block.ID()), + unittest.WithChunkExecutionDatas(chunkDatas...), + ) + + result.ExecutionDataID, err = s.eds.Add(context.TODO(), execData) + assert.NoError(s.T(), err) + + s.blocks = append(s.blocks, block) + s.execDataMap[block.ID()] = execution_data.NewBlockExecutionDataEntity(result.ExecutionDataID, execData) + s.blockEvents[block.ID()] = blockEvents.Events + s.blockMap[block.Height] = block + s.sealMap[block.ID()] = seal + s.resultMap[seal.ResultID] = result + + s.T().Logf("adding exec data for block %d %d %v => %v", i, block.Height, block.ID(), result.ExecutionDataID) + } + + s.SetupTestMocks() +} + +func (s *BackendExecutionDataSuite) SetupTestSuite(blockCount int) { + s.logger = unittest.Logger() + + s.state = protocolmock.NewState(s.T()) + s.snapshot = protocolmock.NewSnapshot(s.T()) + s.params = protocolmock.NewParams(s.T()) + s.headers = storagemock.NewHeaders(s.T()) + s.events = storagemock.NewEvents(s.T()) + s.seals = storagemock.NewSeals(s.T()) + s.results = storagemock.NewExecutionResults(s.T()) + + s.bs = blobs.NewBlobstore(dssync.MutexWrap(datastore.NewMapDatastore())) + s.eds = execution_data.NewExecutionDataStore(s.bs, execution_data.DefaultSerializer) + + s.broadcaster = engine.NewBroadcaster() + + s.execDataHeroCache = herocache.NewBlockExecutionData(subscription.DefaultCacheSize, s.logger, metrics.NewNoopCollector()) + s.execDataCache = cache.NewExecutionDataCache(s.eds, s.headers, s.seals, s.results, s.execDataHeroCache) + s.executionDataTracker = trackermock.NewExecutionDataTracker(s.T()) + + s.execDataMap = make(map[flow.Identifier]*execution_data.BlockExecutionDataEntity, blockCount) + s.blockEvents = make(map[flow.Identifier][]flow.Event, blockCount) + s.blockMap = make(map[uint64]*flow.Block, blockCount) + s.sealMap = make(map[flow.Identifier]*flow.Seal, blockCount) + s.resultMap = make(map[flow.Identifier]*flow.ExecutionResult, blockCount) + s.blocks = make([]*flow.Block, 0, blockCount) + + // generate blockCount consecutive blocks with associated seal, result and execution data + s.rootBlock = unittest.BlockFixture() + s.blockMap[s.rootBlock.Height] = s.rootBlock + s.highestBlockHeader = s.rootBlock.ToHeader() + + s.T().Logf("Generating %d blocks, root block: %d %s", blockCount, s.rootBlock.Height, s.rootBlock.ID()) +} + +func (s *BackendExecutionDataSuite) SetupTestMocks() { + s.registerID = unittest.RegisterIDFixture() + + s.eventsIndex = index.NewEventsIndex(index.NewReporter(), s.events) + s.registersAsync = execution.NewRegistersAsyncStore() + s.registers = storagemock.NewRegisterIndex(s.T()) + err := s.registersAsync.Initialize(s.registers) + require.NoError(s.T(), err) + s.registers.On("LatestHeight").Return(s.rootBlock.Height).Maybe() + s.registers.On("FirstHeight").Return(s.rootBlock.Height).Maybe() + s.registers.On("Get", mock.AnythingOfType("RegisterID"), mock.AnythingOfType("uint64")).Return( + func(id flow.RegisterID, height uint64) (flow.RegisterValue, error) { + if id == s.registerID { + return flow.RegisterValue{}, nil + } + return nil, storage.ErrNotFound + }).Maybe() + + s.state.On("Sealed").Return(s.snapshot, nil).Maybe() + s.snapshot.On("Head").Return(s.blocks[0].ToHeader(), nil).Maybe() + + s.state.On("Params").Return(s.params).Maybe() + s.params.On("SporkRootBlockHeight").Return(s.rootBlock.Height, nil).Maybe() + s.params.On("SporkRootBlock").Return(s.rootBlock, nil).Maybe() + s.headers.On("BlockIDByHeight", s.rootBlock.Height).Return(s.rootBlock.ID(), nil).Maybe() + + s.seals.On("FinalizedSealForBlock", mock.AnythingOfType("flow.Identifier")).Return( + mocks.StorageMapGetter(s.sealMap), + ).Maybe() + + s.results.On("ByID", mock.AnythingOfType("flow.Identifier")).Return( + mocks.StorageMapGetter(s.resultMap), + ).Maybe() + + s.headers.On("ByBlockID", mock.AnythingOfType("flow.Identifier")).Return( + func(blockID flow.Identifier) (*flow.Header, error) { + for _, block := range s.blockMap { + if block.ID() == blockID { + return block.ToHeader(), nil + } + } + return nil, storage.ErrNotFound + }, + ).Maybe() + + s.headers.On("ByHeight", mock.AnythingOfType("uint64")).Return( + mocks.ConvertStorageOutput( + mocks.StorageMapGetter(s.blockMap), + func(block *flow.Block) *flow.Header { return block.ToHeader() }, + ), + ).Maybe() + + s.headers.On("BlockIDByHeight", mock.AnythingOfType("uint64")).Return( + mocks.ConvertStorageOutput( + mocks.StorageMapGetter(s.blockMap), + func(block *flow.Block) flow.Identifier { return block.ID() }, + ), + ).Maybe() + + s.SetupBackend(false) +} + +func (s *BackendExecutionDataSuite) SetupBackend(useEventsIndex bool) { + var err error + s.backend, err = New( + s.logger, + s.state, + s.headers, + s.seals, + s.results, + s.eds, + s.execDataCache, + s.registersAsync, + s.eventsIndex, + useEventsIndex, + state_stream.DefaultRegisterIDsRequestLimit, + subscription.NewSubscriptionHandler( + s.logger, + s.broadcaster, + subscription.DefaultSendTimeout, + subscription.DefaultResponseLimit, + subscription.DefaultSendBufferSize, + ), + s.executionDataTracker, + ) + require.NoError(s.T(), err) + + // create real execution data tracker to use GetStartHeight from it, instead of mocking + s.executionDataTrackerReal = tracker.NewExecutionDataTracker( + s.logger, + s.state, + s.rootBlock.Height, + s.headers, + s.broadcaster, + s.rootBlock.Height, + s.eventsIndex, + useEventsIndex, + ) + + s.executionDataTracker.On( + "GetStartHeight", + mock.Anything, + mock.Anything, + mock.Anything, + ).Return(func(ctx context.Context, startBlockID flow.Identifier, startHeight uint64) (uint64, error) { + return s.executionDataTrackerReal.GetStartHeight(ctx, startBlockID, startHeight) + }, nil).Maybe() + + s.executionDataTracker.On("GetHighestHeight").Return(func() uint64 { + return s.highestBlockHeader.Height + }).Maybe() +} + +// generateMockEvents generates a set of mock events for a block split into multiple tx with +// appropriate indexes set +func generateMockEvents(header *flow.Header, eventCount int) flow.BlockEvents { + txCount := eventCount / 3 + + txID := unittest.IdentifierFixture() + txIndex := uint32(0) + eventIndex := uint32(0) + + events := make([]flow.Event, eventCount) + for i := 0; i < eventCount; i++ { + if i > 0 && i%txCount == 0 { + txIndex++ + txID = unittest.IdentifierFixture() + eventIndex = 0 + } + + events[i] = unittest.EventFixture( + unittest.Event.WithEventType(testEventTypes[i%len(testEventTypes)]), + unittest.Event.WithTransactionIndex(txIndex), + unittest.Event.WithEventIndex(eventIndex), + unittest.Event.WithTransactionID(txID), + ) + } + + return flow.BlockEvents{ + BlockID: header.ID(), + BlockHeight: header.Height, + BlockTimestamp: time.UnixMilli(int64(header.Timestamp)).UTC(), + Events: events, + } +} + +func (s *BackendExecutionDataSuite) TestGetExecutionDataByBlockID() { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + block := s.blocks[0] + seal := s.sealMap[block.ID()] + result := s.resultMap[seal.ResultID] + execData := s.execDataMap[block.ID()] + + // notify backend block is available + s.highestBlockHeader = block.ToHeader() + + var err error + s.Run("happy path TestGetExecutionDataByBlockID success", func() { + result.ExecutionDataID, err = s.eds.Add(ctx, execData.BlockExecutionData) + require.NoError(s.T(), err) + + res, err := s.backend.GetExecutionDataByBlockID(ctx, block.ID()) + assert.Equal(s.T(), execData.BlockExecutionData, res) + assert.NoError(s.T(), err) + }) + + s.execDataHeroCache.Clear() + + s.Run("missing exec data for TestGetExecutionDataByBlockID failure", func() { + result.ExecutionDataID = unittest.IdentifierFixture() + + execDataRes, err := s.backend.GetExecutionDataByBlockID(ctx, block.ID()) + assert.Nil(s.T(), execDataRes) + assert.Equal(s.T(), codes.NotFound, status.Code(err)) + }) +} + +func (s *BackendExecutionDataSuite) TestSubscribeExecutionData() { + tests := []executionDataTestType{ + { + name: "happy path - all new blocks", + highestBackfill: -1, // no backfill + startBlockID: flow.ZeroID, + startHeight: 0, + }, + { + name: "happy path - partial backfill", + highestBackfill: 2, // backfill the first 3 blocks + startBlockID: flow.ZeroID, + startHeight: s.blocks[0].Height, + }, + { + name: "happy path - complete backfill", + highestBackfill: len(s.blocks) - 1, // backfill all blocks + startBlockID: s.blocks[0].ID(), + startHeight: 0, + }, + } + + subFunc := func(ctx context.Context, blockID flow.Identifier, startHeight uint64) subscription.Subscription { + return s.backend.SubscribeExecutionData(ctx, blockID, startHeight) + } + + s.subscribe(subFunc, tests) +} + +func (s *BackendExecutionDataSuite) TestSubscribeExecutionDataFromStartBlockID() { + tests := []executionDataTestType{ + { + name: "happy path - all new blocks", + highestBackfill: -1, // no backfill + startBlockID: s.blocks[0].ID(), + }, + { + name: "happy path - partial backfill", + highestBackfill: 2, // backfill the first 3 blocks + startBlockID: s.blocks[0].ID(), + }, + { + name: "happy path - complete backfill", + highestBackfill: len(s.blocks) - 1, // backfill all blocks + startBlockID: s.blocks[0].ID(), + }, + } + + s.executionDataTracker.On( + "GetStartHeightFromBlockID", + mock.AnythingOfType("flow.Identifier"), + ).Return(func(startBlockID flow.Identifier) (uint64, error) { + return s.executionDataTrackerReal.GetStartHeightFromBlockID(startBlockID) + }, nil) + + subFunc := func(ctx context.Context, blockID flow.Identifier, startHeight uint64) subscription.Subscription { + return s.backend.SubscribeExecutionDataFromStartBlockID(ctx, blockID) + } + + s.subscribe(subFunc, tests) +} + +func (s *BackendExecutionDataSuite) TestSubscribeExecutionDataFromStartBlockHeight() { + tests := []executionDataTestType{ + { + name: "happy path - all new blocks", + highestBackfill: -1, // no backfill + startHeight: s.blocks[0].Height, + }, + { + name: "happy path - partial backfill", + highestBackfill: 2, // backfill the first 3 blocks + startHeight: s.blocks[0].Height, + }, + { + name: "happy path - complete backfill", + highestBackfill: len(s.blocks) - 1, // backfill all blocks + startHeight: s.blocks[0].Height, + }, + } + + s.executionDataTracker.On( + "GetStartHeightFromHeight", + mock.AnythingOfType("uint64"), + ).Return(func(startHeight uint64) (uint64, error) { + return s.executionDataTrackerReal.GetStartHeightFromHeight(startHeight) + }, nil) + + subFunc := func(ctx context.Context, blockID flow.Identifier, startHeight uint64) subscription.Subscription { + return s.backend.SubscribeExecutionDataFromStartBlockHeight(ctx, startHeight) + } + + s.subscribe(subFunc, tests) +} + +func (s *BackendExecutionDataSuite) TestSubscribeExecutionDataFromLatest() { + tests := []executionDataTestType{ + { + name: "happy path - all new blocks", + highestBackfill: -1, // no backfill + }, + { + name: "happy path - partial backfill", + highestBackfill: 2, // backfill the first 3 blocks + }, + { + name: "happy path - complete backfill", + highestBackfill: len(s.blocks) - 1, // backfill all blocks + }, + } + + s.executionDataTracker.On( + "GetStartHeightFromLatest", + mock.Anything, + ).Return(func(ctx context.Context) (uint64, error) { + return s.executionDataTrackerReal.GetStartHeightFromLatest(ctx) + }, nil) + + subFunc := func(ctx context.Context, blockID flow.Identifier, startHeight uint64) subscription.Subscription { + return s.backend.SubscribeExecutionDataFromLatest(ctx) + } + + s.subscribe(subFunc, tests) +} + +func (s *BackendExecutionDataSuite) subscribe(subscribeFunc func(ctx context.Context, startBlockID flow.Identifier, startHeight uint64) subscription.Subscription, tests []executionDataTestType) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + for _, test := range tests { + s.Run(test.name, func() { + // make sure we're starting with a fresh cache + s.execDataHeroCache.Clear() + + s.T().Logf("len(s.execDataMap) %d", len(s.execDataMap)) + + // add "backfill" block - blocks that are already in the database before the test starts + // this simulates a subscription on a past block + for i := 0; i <= test.highestBackfill; i++ { + s.T().Logf("backfilling block %d", i) + s.highestBlockHeader = s.blocks[i].ToHeader() + } + + subCtx, subCancel := context.WithCancel(ctx) + sub := subscribeFunc(subCtx, test.startBlockID, test.startHeight) + + // loop over of the all blocks + for i, b := range s.blocks { + execData := s.execDataMap[b.ID()] + s.T().Logf("checking block %d %v %v", i, b.Height, b.ID()) + + // simulate new exec data received. + // exec data for all blocks with index <= highestBackfill were already received + if i > test.highestBackfill { + s.highestBlockHeader = b.ToHeader() + s.broadcaster.Publish() + } + + // consume execution data from subscription + unittest.RequireReturnsBefore(s.T(), func() { + v, ok := <-sub.Channel() + require.True(s.T(), ok, "channel closed while waiting for exec data for block %d %v: err: %v", b.Height, b.ID(), sub.Err()) + + resp, ok := v.(*ExecutionDataResponse) + require.True(s.T(), ok, "unexpected response type: %T", v) + + assert.Equal(s.T(), b.Height, resp.Height) + assert.Equal(s.T(), execData.BlockExecutionData, resp.ExecutionData) + }, time.Second, fmt.Sprintf("timed out waiting for exec data for block %d %v", b.Height, b.ID())) + } + + // make sure there are no new messages waiting. the channel should be opened with nothing waiting + unittest.RequireNeverReturnBefore(s.T(), func() { + <-sub.Channel() + }, 100*time.Millisecond, "timed out waiting for subscription to shutdown") + + // stop the subscription + subCancel() + + // ensure subscription shuts down gracefully + unittest.RequireReturnsBefore(s.T(), func() { + v, ok := <-sub.Channel() + assert.Nil(s.T(), v) + assert.False(s.T(), ok) + assert.ErrorIs(s.T(), sub.Err(), context.Canceled) + }, 100*time.Millisecond, "timed out waiting for subscription to shutdown") + }) + } +} + +// TestSubscribeEventsFromSporkRootBlock tests that events subscriptions starting from the spork +// root block return an empty result for the root block. +func (s *BackendExecutionDataSuite) TestSubscribeExecutionFromSporkRootBlock() { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + // setup the backend to have 1 available block + s.highestBlockHeader = s.blocks[0].ToHeader() + + rootEventResponse := &ExecutionDataResponse{ + Height: s.rootBlock.Height, + ExecutionData: &execution_data.BlockExecutionData{ + BlockID: s.rootBlock.ID(), + }, + } + + firstEventResponse := &ExecutionDataResponse{ + Height: s.blocks[0].Height, + ExecutionData: s.execDataMap[s.blocks[0].ID()].BlockExecutionData, + } + + assertExecutionDataResponse := func(v interface{}, expected *ExecutionDataResponse) { + resp, ok := v.(*ExecutionDataResponse) + require.True(s.T(), ok, "unexpected response type: %T", v) + + assert.Equal(s.T(), expected, resp) + } + + assertSubscriptionResponses := func(sub subscription.Subscription, cancel context.CancelFunc) { + // the first response should have details from the root block and no events + resp := <-sub.Channel() + assertExecutionDataResponse(resp, rootEventResponse) + + // the second response should have details from the first block and its events + resp = <-sub.Channel() + assertExecutionDataResponse(resp, firstEventResponse) + + cancel() + resp, ok := <-sub.Channel() + assert.False(s.T(), ok) + assert.Nil(s.T(), resp) + assert.ErrorIs(s.T(), sub.Err(), context.Canceled) + } + + s.Run("by height", func() { + subCtx, subCancel := context.WithCancel(ctx) + defer subCancel() + + s.executionDataTracker.On("GetStartHeightFromHeight", s.rootBlock.Height). + Return(func(startHeight uint64) (uint64, error) { + return s.executionDataTrackerReal.GetStartHeightFromHeight(startHeight) + }) + + sub := s.backend.SubscribeExecutionDataFromStartBlockHeight(subCtx, s.rootBlock.Height) + assertSubscriptionResponses(sub, subCancel) + }) + + s.Run("by height - legacy", func() { + subCtx, subCancel := context.WithCancel(ctx) + defer subCancel() + + s.executionDataTracker.On("GetStartHeightFromHeight", s.rootBlock.Height). + Return(func(startHeight uint64) (uint64, error) { + return s.executionDataTrackerReal.GetStartHeightFromHeight(startHeight) + }) + + sub := s.backend.SubscribeExecutionData(subCtx, flow.ZeroID, s.rootBlock.Height) + assertSubscriptionResponses(sub, subCancel) + }) + + s.Run("by ID", func() { + subCtx, subCancel := context.WithCancel(ctx) + defer subCancel() + + s.executionDataTracker.On("GetStartHeightFromBlockID", s.rootBlock.ID()). + Return(func(startBlockID flow.Identifier) (uint64, error) { + return s.executionDataTrackerReal.GetStartHeightFromBlockID(startBlockID) + }) + + sub := s.backend.SubscribeExecutionDataFromStartBlockID(subCtx, s.rootBlock.ID()) + assertSubscriptionResponses(sub, subCancel) + }) + + s.Run("by ID - legacy", func() { + subCtx, subCancel := context.WithCancel(ctx) + defer subCancel() + + s.executionDataTracker.On("GetStartHeightFromBlockID", s.rootBlock.ID()). + Return(func(startBlockID flow.Identifier) (uint64, error) { + return s.executionDataTrackerReal.GetStartHeightFromBlockID(startBlockID) + }) + + sub := s.backend.SubscribeExecutionData(subCtx, s.rootBlock.ID(), 0) + assertSubscriptionResponses(sub, subCancel) + }) + + s.Run("by latest", func() { + subCtx, subCancel := context.WithCancel(ctx) + defer subCancel() + + // simulate the case where the latest block is also the root block + s.snapshot.On("Head").Unset() + s.snapshot.On("Head").Return(s.rootBlock.ToHeader(), nil).Once() + + s.executionDataTracker.On("GetStartHeightFromLatest", mock.Anything). + Return(func(ctx context.Context) (uint64, error) { + return s.executionDataTrackerReal.GetStartHeightFromLatest(ctx) + }) + + sub := s.backend.SubscribeExecutionDataFromLatest(subCtx) + assertSubscriptionResponses(sub, subCancel) + }) + + s.Run("by latest - legacy", func() { + subCtx, subCancel := context.WithCancel(ctx) + defer subCancel() + + // simulate the case where the latest block is also the root block + s.snapshot.On("Head").Unset() + s.snapshot.On("Head").Return(s.rootBlock.ToHeader(), nil).Once() + + s.executionDataTracker.On("GetStartHeightFromLatest", mock.Anything). + Return(func(ctx context.Context) (uint64, error) { + return s.executionDataTrackerReal.GetStartHeightFromLatest(ctx) + }) + + sub := s.backend.SubscribeExecutionData(subCtx, flow.ZeroID, 0) + assertSubscriptionResponses(sub, subCancel) + }) +} + +func (s *BackendExecutionDataSuite) TestSubscribeExecutionDataHandlesErrors() { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + s.Run("returns error if both start blockID and start height are provided", func() { + subCtx, subCancel := context.WithCancel(ctx) + defer subCancel() + + sub := s.backend.SubscribeExecutionData(subCtx, unittest.IdentifierFixture(), 1) + assert.Equal(s.T(), codes.InvalidArgument, status.Code(sub.Err())) + }) + + s.Run("returns error for start height before root height", func() { + subCtx, subCancel := context.WithCancel(ctx) + defer subCancel() + + sub := s.backend.SubscribeExecutionData(subCtx, flow.ZeroID, s.rootBlock.Height-1) + assert.Equal(s.T(), codes.InvalidArgument, status.Code(sub.Err())) + }) + + s.Run("returns error for unindexed start blockID", func() { + subCtx, subCancel := context.WithCancel(ctx) + defer subCancel() + + sub := s.backend.SubscribeExecutionData(subCtx, unittest.IdentifierFixture(), 0) + assert.Equal(s.T(), codes.NotFound, status.Code(sub.Err())) + }) + + // make sure we're starting with a fresh cache + s.execDataHeroCache.Clear() + + s.Run("returns error for unindexed start height", func() { + subCtx, subCancel := context.WithCancel(ctx) + defer subCancel() + + sub := s.backend.SubscribeExecutionData(subCtx, flow.ZeroID, s.blocks[len(s.blocks)-1].Height+10) + assert.Equal(s.T(), codes.NotFound, status.Code(sub.Err())) + }) +} + +func (s *BackendExecutionDataSuite) TestGetRegisterValues() { + s.Run("normal case", func() { + res, err := s.backend.GetRegisterValues(flow.RegisterIDs{s.registerID}, s.rootBlock.Height) + require.NoError(s.T(), err) + require.NotEmpty(s.T(), res) + }) + + s.Run("returns error if block height is out of range", func() { + res, err := s.backend.GetRegisterValues(flow.RegisterIDs{s.registerID}, s.rootBlock.Height+1) + require.Nil(s.T(), res) + require.Equal(s.T(), codes.OutOfRange, status.Code(err)) + }) + + s.Run("returns error if register path is not indexed", func() { + falseID := flow.RegisterIDs{flow.RegisterID{Owner: "ha", Key: "ha"}} + res, err := s.backend.GetRegisterValues(falseID, s.rootBlock.Height) + require.Nil(s.T(), res) + require.Equal(s.T(), codes.NotFound, status.Code(err)) + }) + + s.Run("returns error if too many registers are requested", func() { + res, err := s.backend.GetRegisterValues(make(flow.RegisterIDs, s.backend.registerRequestLimit+1), s.rootBlock.Height) + require.Nil(s.T(), res) + require.Equal(s.T(), codes.InvalidArgument, status.Code(err)) + }) +} diff --git a/engine/access/state_stream/backend/engine.go b/engine/access/state_stream/backend/engine.go new file mode 100644 index 00000000000..97ce090dd04 --- /dev/null +++ b/engine/access/state_stream/backend/engine.go @@ -0,0 +1,66 @@ +package backend + +import ( + "github.com/rs/zerolog" + "google.golang.org/grpc" + + "github.com/onflow/flow/protobuf/go/flow/executiondata" + + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module/component" + "github.com/onflow/flow-go/module/executiondatasync/execution_data/cache" + "github.com/onflow/flow-go/module/grpcserver" + "github.com/onflow/flow-go/module/irrecoverable" + "github.com/onflow/flow-go/storage" +) + +// Engine exposes the server with the state stream API. +// By default, this engine is not enabled. +// In order to run this engine a port for the GRPC server to be served on should be specified in the run config. +type Engine struct { + *component.ComponentManager + log zerolog.Logger + backend *StateStreamBackend + config Config + chain flow.Chain + handler *Handler + + execDataCache *cache.ExecutionDataCache + headers storage.Headers +} + +// NewEng returns a new ingress server. +func NewEng( + log zerolog.Logger, + config Config, + execDataCache *cache.ExecutionDataCache, + headers storage.Headers, + chainID flow.ChainID, + server *grpcserver.GrpcServer, + backend *StateStreamBackend, +) (*Engine, error) { + logger := log.With().Str("engine", "state_stream_rpc").Logger() + + e := &Engine{ + log: logger, + backend: backend, + headers: headers, + chain: chainID.Chain(), + config: config, + handler: NewHandler(backend, chainID.Chain(), config), + execDataCache: execDataCache, + } + + e.ComponentManager = component.NewComponentManagerBuilder(). + AddWorker(func(ctx irrecoverable.SignalerContext, ready component.ReadyFunc) { + ready() + <-server.Done() + }). + Build() + + server.RegisterService(func(s *grpc.Server) { + executiondata.RegisterExecutionDataAPIServer(s, e.handler) + }) + + return e, nil +} diff --git a/engine/access/state_stream/backend/event_retriever.go b/engine/access/state_stream/backend/event_retriever.go new file mode 100644 index 00000000000..70aa5db032f --- /dev/null +++ b/engine/access/state_stream/backend/event_retriever.go @@ -0,0 +1,107 @@ +package backend + +import ( + "context" + "fmt" + "time" + + "github.com/rs/zerolog" + + "github.com/onflow/flow-go/engine/access/index" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/storage" + "github.com/onflow/flow-go/utils/logging" +) + +// EventsResponse represents the response containing events for a specific block. +type EventsResponse struct { + BlockID flow.Identifier + Height uint64 + Events flow.EventsList + BlockTimestamp time.Time +} + +// EventsProvider retrieves events by block height. It can be configured to retrieve events from +// the events indexer(if available) or using a dedicated callback to query it from other sources. +type EventsProvider struct { + log zerolog.Logger + headers storage.Headers + getExecutionData GetExecutionDataFunc + eventsIndex *index.EventsIndex + useEventsIndex bool +} + +// GetAllEventsResponse returns a function that retrieves the event response for a given block height. +// Expected errors: +// - codes.NotFound: If block header for the specified block height is not found. +// - error: An error, if any, encountered during getting events from storage or execution data. +func (b *EventsProvider) GetAllEventsResponse(ctx context.Context, height uint64) (*EventsResponse, error) { + var response *EventsResponse + var err error + if b.useEventsIndex { + response, err = b.getEventsFromStorage(height) + } else { + response, err = b.getEventsFromExecutionData(ctx, height) + } + + if err == nil { + header, err := b.headers.ByHeight(height) + if err != nil { + return nil, fmt.Errorf("could not get header for height %d: %w", height, err) + } + response.BlockTimestamp = time.UnixMilli(int64(header.Timestamp)).UTC() + + if b.log.GetLevel() == zerolog.TraceLevel { + b.log.Trace(). + Hex("block_id", logging.ID(response.BlockID)). + Uint64("height", height). + Int("events", len(response.Events)). + Msg("sending events") + } + } + + return response, err +} + +// getEventsFromExecutionData returns the events for a given height extract from the execution data. +// Expected errors: +// - error: An error indicating issues with getting execution data for block +func (b *EventsProvider) getEventsFromExecutionData(ctx context.Context, height uint64) (*EventsResponse, error) { + executionData, err := b.getExecutionData(ctx, height) + if err != nil { + return nil, fmt.Errorf("could not get execution data for block %d: %w", height, err) + } + + var events flow.EventsList + for _, chunkExecutionData := range executionData.ChunkExecutionDatas { + events = append(events, chunkExecutionData.Events...) + } + + return &EventsResponse{ + BlockID: executionData.BlockID, + Height: height, + Events: events, + }, nil +} + +// getEventsFromStorage returns the events for a given height from the index storage. +// Expected errors: +// - error: An error indicating any issues with the provided block height or +// an error indicating issue with getting events for a block. +func (b *EventsProvider) getEventsFromStorage(height uint64) (*EventsResponse, error) { + blockID, err := b.headers.BlockIDByHeight(height) + if err != nil { + return nil, fmt.Errorf("could not get header for height %d: %w", height, err) + } + + events, err := b.eventsIndex.ByBlockID(blockID, height) + if err != nil { + return nil, fmt.Errorf("could not get events for block %d: %w", height, err) + } + + return &EventsResponse{ + BlockID: blockID, + Height: height, + Events: events, + }, nil +} diff --git a/engine/access/state_stream/backend/handler.go b/engine/access/state_stream/backend/handler.go new file mode 100644 index 00000000000..ea9cded1bed --- /dev/null +++ b/engine/access/state_stream/backend/handler.go @@ -0,0 +1,621 @@ +package backend + +import ( + "context" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + "google.golang.org/protobuf/types/known/timestamppb" + + "github.com/onflow/flow/protobuf/go/flow/entities" + "github.com/onflow/flow/protobuf/go/flow/executiondata" + + "github.com/onflow/flow-go/engine/access/state_stream" + "github.com/onflow/flow-go/engine/access/subscription" + "github.com/onflow/flow-go/engine/common/rpc" + "github.com/onflow/flow-go/engine/common/rpc/convert" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module/counters" +) + +type Handler struct { + subscription.StreamingData + + api state_stream.API + chain flow.Chain + + eventFilterConfig state_stream.EventFilterConfig + defaultHeartbeatInterval uint64 +} + +// sendSubscribeEventsResponseFunc is a callback function used to send +// SubscribeEventsResponse to the client stream. +type sendSubscribeEventsResponseFunc func(*executiondata.SubscribeEventsResponse) error + +// sendSubscribeExecutionDataResponseFunc is a callback function used to send +// SubscribeExecutionDataResponse to the client stream. +type sendSubscribeExecutionDataResponseFunc func(*executiondata.SubscribeExecutionDataResponse) error + +var _ executiondata.ExecutionDataAPIServer = (*Handler)(nil) + +func NewHandler(api state_stream.API, chain flow.Chain, config Config) *Handler { + h := &Handler{ + StreamingData: subscription.NewStreamingData(config.MaxGlobalStreams), + api: api, + chain: chain, + eventFilterConfig: config.EventFilterConfig, + defaultHeartbeatInterval: config.HeartbeatInterval, + } + return h +} + +func (h *Handler) GetExecutionDataByBlockID(ctx context.Context, request *executiondata.GetExecutionDataByBlockIDRequest) (*executiondata.GetExecutionDataByBlockIDResponse, error) { + blockID, err := convert.BlockID(request.GetBlockId()) + if err != nil { + return nil, status.Errorf(codes.InvalidArgument, "could not convert block ID: %v", err) + } + + execData, err := h.api.GetExecutionDataByBlockID(ctx, blockID) + if err != nil { + return nil, rpc.ConvertError(err, "could no get execution data", codes.Internal) + } + + message, err := convert.BlockExecutionDataToMessage(execData) + if err != nil { + return nil, status.Errorf(codes.Internal, "could not convert execution data to entity: %v", err) + } + + err = convert.BlockExecutionDataEventPayloadsToVersion(message, request.GetEventEncodingVersion()) + if err != nil { + return nil, status.Errorf(codes.Internal, "could not convert execution data event payloads to JSON: %v", err) + } + + return &executiondata.GetExecutionDataByBlockIDResponse{BlockExecutionData: message}, nil +} + +// SubscribeExecutionData is deprecated and will be removed in a future version. +// Use SubscribeExecutionDataFromStartBlockID, SubscribeExecutionDataFromStartBlockHeight or SubscribeExecutionDataFromLatest. +// +// SubscribeExecutionData handles subscription requests for execution data starting at the specified block ID or block height. +// The handler manages the subscription and sends the subscribed information to the client via the provided stream. +// +// Expected errors during normal operation: +// - codes.InvalidArgument - if request contains invalid startBlockID. +// - codes.ResourceExhausted - if the maximum number of streams is reached. +// - codes.Internal - if stream got unexpected response or could not send response. +func (h *Handler) SubscribeExecutionData(request *executiondata.SubscribeExecutionDataRequest, stream executiondata.ExecutionDataAPI_SubscribeExecutionDataServer) error { + // check if the maximum number of streams is reached + if h.StreamCount.Load() >= h.MaxStreams { + return status.Errorf(codes.ResourceExhausted, "maximum number of streams reached") + } + h.StreamCount.Add(1) + defer h.StreamCount.Add(-1) + + startBlockID := flow.ZeroID + if request.GetStartBlockId() != nil { + blockID, err := convert.BlockID(request.GetStartBlockId()) + if err != nil { + return status.Errorf(codes.InvalidArgument, "could not convert start block ID: %v", err) + } + startBlockID = blockID + } + + sub := h.api.SubscribeExecutionData(stream.Context(), startBlockID, request.GetStartBlockHeight()) + + return HandleRPCSubscription(sub, handleSubscribeExecutionData(stream.Send, request.GetEventEncodingVersion())) +} + +// SubscribeExecutionDataFromStartBlockID handles subscription requests for +// execution data starting at the specified block ID. The handler manages the +// subscription and sends the subscribed information to the client via the +// provided stream. +// +// Expected errors during normal operation: +// - codes.InvalidArgument - if request contains invalid startBlockID. +// - codes.ResourceExhausted - if the maximum number of streams is reached. +// - codes.Internal - if stream got unexpected response or could not send response. +func (h *Handler) SubscribeExecutionDataFromStartBlockID(request *executiondata.SubscribeExecutionDataFromStartBlockIDRequest, stream executiondata.ExecutionDataAPI_SubscribeExecutionDataFromStartBlockIDServer) error { + // check if the maximum number of streams is reached + if h.StreamCount.Load() >= h.MaxStreams { + return status.Errorf(codes.ResourceExhausted, "maximum number of streams reached") + } + h.StreamCount.Add(1) + defer h.StreamCount.Add(-1) + + startBlockID, err := convert.BlockID(request.GetStartBlockId()) + if err != nil { + return status.Errorf(codes.InvalidArgument, "could not convert start block ID: %v", err) + } + + sub := h.api.SubscribeExecutionDataFromStartBlockID(stream.Context(), startBlockID) + + return HandleRPCSubscription(sub, handleSubscribeExecutionData(stream.Send, request.GetEventEncodingVersion())) +} + +// SubscribeExecutionDataFromStartBlockHeight handles subscription requests for +// execution data starting at the specified block height. The handler manages the +// subscription and sends the subscribed information to the client via the +// provided stream. +// +// Expected errors during normal operation: +// - codes.ResourceExhausted - if the maximum number of streams is reached. +// - codes.Internal - if stream got unexpected response or could not send response. +func (h *Handler) SubscribeExecutionDataFromStartBlockHeight(request *executiondata.SubscribeExecutionDataFromStartBlockHeightRequest, stream executiondata.ExecutionDataAPI_SubscribeExecutionDataFromStartBlockHeightServer) error { + // check if the maximum number of streams is reached + if h.StreamCount.Load() >= h.MaxStreams { + return status.Errorf(codes.ResourceExhausted, "maximum number of streams reached") + } + h.StreamCount.Add(1) + defer h.StreamCount.Add(-1) + + sub := h.api.SubscribeExecutionDataFromStartBlockHeight(stream.Context(), request.GetStartBlockHeight()) + + return HandleRPCSubscription(sub, handleSubscribeExecutionData(stream.Send, request.GetEventEncodingVersion())) +} + +// SubscribeExecutionDataFromLatest handles subscription requests for +// execution data starting at the latest block. The handler manages the +// subscription and sends the subscribed information to the client via the +// provided stream. +// +// Expected errors during normal operation: +// - codes.ResourceExhausted - if the maximum number of streams is reached. +// - codes.Internal - if stream got unexpected response or could not send response. +func (h *Handler) SubscribeExecutionDataFromLatest(request *executiondata.SubscribeExecutionDataFromLatestRequest, stream executiondata.ExecutionDataAPI_SubscribeExecutionDataFromLatestServer) error { + // check if the maximum number of streams is reached + if h.StreamCount.Load() >= h.MaxStreams { + return status.Errorf(codes.ResourceExhausted, "maximum number of streams reached") + } + h.StreamCount.Add(1) + defer h.StreamCount.Add(-1) + + sub := h.api.SubscribeExecutionDataFromLatest(stream.Context()) + + return HandleRPCSubscription(sub, handleSubscribeExecutionData(stream.Send, request.GetEventEncodingVersion())) +} + +// SubscribeEvents is deprecated and will be removed in a future version. +// Use SubscribeEventsFromStartBlockID, SubscribeEventsFromStartHeight or SubscribeEventsFromLatest. +// +// SubscribeEvents handles subscription requests for events starting at the specified block ID or block height. +// The handler manages the subscription and sends the subscribed information to the client via the provided stream. +// +// Responses are returned for each block containing at least one event that matches the filter. Additionally, +// heartbeat responses (SubscribeEventsResponse with no events) are returned periodically to allow +// clients to track which blocks were searched. Clients can use this +// information to determine which block to start from when reconnecting. +// +// Expected errors during normal operation: +// - codes.InvalidArgument - if provided both startBlockID and startHeight, if invalid startBlockID is provided, if invalid event filter is provided. +// - codes.ResourceExhausted - if the maximum number of streams is reached. +// - codes.Internal - could not convert events to entity, if stream encountered an error, if stream got unexpected response or could not send response. +func (h *Handler) SubscribeEvents(request *executiondata.SubscribeEventsRequest, stream executiondata.ExecutionDataAPI_SubscribeEventsServer) error { + // check if the maximum number of streams is reached + if h.StreamCount.Load() >= h.MaxStreams { + return status.Errorf(codes.ResourceExhausted, "maximum number of streams reached") + } + h.StreamCount.Add(1) + defer h.StreamCount.Add(-1) + + startBlockID := flow.ZeroID + if request.GetStartBlockId() != nil { + blockID, err := convert.BlockID(request.GetStartBlockId()) + if err != nil { + return status.Errorf(codes.InvalidArgument, "could not convert start block ID: %v", err) + } + startBlockID = blockID + } + + filter, err := h.getEventFilter(request.GetFilter()) + if err != nil { + return err + } + + sub := h.api.SubscribeEvents(stream.Context(), startBlockID, request.GetStartBlockHeight(), filter) + + return HandleRPCSubscription(sub, h.handleEventsResponse(stream.Send, request.HeartbeatInterval, request.GetEventEncodingVersion())) +} + +// SubscribeEventsFromStartBlockID handles subscription requests for events starting at the specified block ID. +// The handler manages the subscription and sends the subscribed information to the client via the provided stream. +// +// Responses are returned for each block containing at least one event that matches the filter. Additionally, +// heartbeat responses (SubscribeEventsResponse with no events) are returned periodically to allow +// clients to track which blocks were searched. Clients can use this +// information to determine which block to start from when reconnecting. +// +// Expected errors during normal operation: +// - codes.InvalidArgument - if invalid startBlockID is provided, if invalid event filter is provided. +// - codes.ResourceExhausted - if the maximum number of streams is reached. +// - codes.Internal - could not convert events to entity, if stream encountered an error, if stream got unexpected response or could not send response. +func (h *Handler) SubscribeEventsFromStartBlockID(request *executiondata.SubscribeEventsFromStartBlockIDRequest, stream executiondata.ExecutionDataAPI_SubscribeEventsFromStartBlockIDServer) error { + // check if the maximum number of streams is reached + if h.StreamCount.Load() >= h.MaxStreams { + return status.Errorf(codes.ResourceExhausted, "maximum number of streams reached") + } + h.StreamCount.Add(1) + defer h.StreamCount.Add(-1) + + startBlockID, err := convert.BlockID(request.GetStartBlockId()) + if err != nil { + return status.Errorf(codes.InvalidArgument, "could not convert start block ID: %v", err) + } + + filter, err := h.getEventFilter(request.GetFilter()) + if err != nil { + return err + } + + sub := h.api.SubscribeEventsFromStartBlockID(stream.Context(), startBlockID, filter) + + return HandleRPCSubscription(sub, h.handleEventsResponse(stream.Send, request.HeartbeatInterval, request.GetEventEncodingVersion())) +} + +// SubscribeEventsFromStartHeight handles subscription requests for events starting at the specified block height. +// The handler manages the subscription and sends the subscribed information to the client via the provided stream. +// +// Responses are returned for each block containing at least one event that matches the filter. Additionally, +// heartbeat responses (SubscribeEventsResponse with no events) are returned periodically to allow +// clients to track which blocks were searched. Clients can use this +// information to determine which block to start from when reconnecting. +// +// Expected errors during normal operation: +// - codes.InvalidArgument - if invalid event filter is provided. +// - codes.ResourceExhausted - if the maximum number of streams is reached. +// - codes.Internal - could not convert events to entity, if stream encountered an error, if stream got unexpected response or could not send response. +func (h *Handler) SubscribeEventsFromStartHeight(request *executiondata.SubscribeEventsFromStartHeightRequest, stream executiondata.ExecutionDataAPI_SubscribeEventsFromStartHeightServer) error { + // check if the maximum number of streams is reached + if h.StreamCount.Load() >= h.MaxStreams { + return status.Errorf(codes.ResourceExhausted, "maximum number of streams reached") + } + h.StreamCount.Add(1) + defer h.StreamCount.Add(-1) + + filter, err := h.getEventFilter(request.GetFilter()) + if err != nil { + return err + } + + sub := h.api.SubscribeEventsFromStartHeight(stream.Context(), request.GetStartBlockHeight(), filter) + + return HandleRPCSubscription(sub, h.handleEventsResponse(stream.Send, request.HeartbeatInterval, request.GetEventEncodingVersion())) +} + +// SubscribeEventsFromLatest handles subscription requests for events started from latest sealed block.. +// The handler manages the subscription and sends the subscribed information to the client via the provided stream. +// +// Responses are returned for each block containing at least one event that matches the filter. Additionally, +// heartbeat responses (SubscribeEventsResponse with no events) are returned periodically to allow +// clients to track which blocks were searched. Clients can use this +// information to determine which block to start from when reconnecting. +// +// Expected errors during normal operation: +// - codes.InvalidArgument - if invalid event filter is provided. +// - codes.ResourceExhausted - if the maximum number of streams is reached. +// - codes.Internal - could not convert events to entity, if stream encountered an error, if stream got unexpected response or could not send response. +func (h *Handler) SubscribeEventsFromLatest(request *executiondata.SubscribeEventsFromLatestRequest, stream executiondata.ExecutionDataAPI_SubscribeEventsFromLatestServer) error { + // check if the maximum number of streams is reached + if h.StreamCount.Load() >= h.MaxStreams { + return status.Errorf(codes.ResourceExhausted, "maximum number of streams reached") + } + h.StreamCount.Add(1) + defer h.StreamCount.Add(-1) + + filter, err := h.getEventFilter(request.GetFilter()) + if err != nil { + return err + } + + sub := h.api.SubscribeEventsFromLatest(stream.Context(), filter) + + return HandleRPCSubscription(sub, h.handleEventsResponse(stream.Send, request.HeartbeatInterval, request.GetEventEncodingVersion())) +} + +// handleSubscribeExecutionData handles the subscription to execution data and sends it to the client via the provided stream. +// This function is designed to be used as a callback for execution data updates in a subscription. +// +// Parameters: +// - send: The function responsible for sending execution data in response to the client. +// +// Returns a function that can be used as a callback for execution data updates. +// +// Expected errors during normal operation: +// - codes.Internal - could not convert execution data to entity or could not convert execution data event payloads to JSON. +func handleSubscribeExecutionData(send sendSubscribeExecutionDataResponseFunc, eventEncodingVersion entities.EventEncodingVersion) func(response *ExecutionDataResponse) error { + return func(resp *ExecutionDataResponse) error { + execData, err := convert.BlockExecutionDataToMessage(resp.ExecutionData) + if err != nil { + return status.Errorf(codes.Internal, "could not convert execution data to entity: %v", err) + } + + err = convert.BlockExecutionDataEventPayloadsToVersion(execData, eventEncodingVersion) + if err != nil { + return status.Errorf(codes.Internal, "could not convert execution data event payloads to JSON: %v", err) + } + + err = send(&executiondata.SubscribeExecutionDataResponse{ + BlockHeight: resp.Height, + BlockExecutionData: execData, + BlockTimestamp: timestamppb.New(resp.BlockTimestamp), + }) + + return err + } +} + +// handleEventsResponse handles the event subscription and sends subscribed events to the client via the provided stream. +// This function is designed to be used as a callback for events updates in a subscription. +// It takes a EventsResponse, processes it, and sends the corresponding response to the client using the provided send function. +// +// Parameters: +// - send: The function responsible for sending events response to the client. +// +// Returns a function that can be used as a callback for events updates. +// +// Expected errors during normal operation: +// - codes.Internal - could not convert events to entity or the stream could not send a response. +func (h *Handler) handleEventsResponse(send sendSubscribeEventsResponseFunc, heartbeatInterval uint64, eventEncodingVersion entities.EventEncodingVersion) func(*EventsResponse) error { + if heartbeatInterval == 0 { + heartbeatInterval = h.defaultHeartbeatInterval + } + + blocksSinceLastMessage := uint64(0) + messageIndex := counters.NewMonotonicCounter(0) + + return func(resp *EventsResponse) error { + // check if there are any events in the response. if not, do not send a message unless the last + // response was more than HeartbeatInterval blocks ago + if len(resp.Events) == 0 { + blocksSinceLastMessage++ + if blocksSinceLastMessage < heartbeatInterval { + return nil + } + } + blocksSinceLastMessage = 0 + + // BlockExecutionData contains CCF encoded events, and the Access API returns JSON-CDC events. + // convert event payload formats. + // This is a temporary solution until the Access API supports specifying the encoding in the request + events, err := convert.EventsToMessagesWithEncodingConversion(resp.Events, entities.EventEncodingVersion_CCF_V0, eventEncodingVersion) + if err != nil { + return status.Errorf(codes.Internal, "could not convert events to entity: %v", err) + } + + index := messageIndex.Value() + if ok := messageIndex.Set(index + 1); !ok { + return status.Errorf(codes.Internal, "message index already incremented to %d", messageIndex.Value()) + } + + err = send(&executiondata.SubscribeEventsResponse{ + BlockHeight: resp.Height, + BlockId: convert.IdentifierToMessage(resp.BlockID), + Events: events, + BlockTimestamp: timestamppb.New(resp.BlockTimestamp), + MessageIndex: index, + }) + if err != nil { + return rpc.ConvertError(err, "could not send response", codes.Internal) + } + + return nil + } +} + +// getEventFilter returns an event filter based on the provided event filter configuration. +// If the event filter is nil, it returns an empty filter. +// Otherwise, it initializes a new event filter using the provided filter parameters, +// including the event type, address, and contract. It then validates the filter configuration +// and returns the constructed event filter or an error if the filter configuration is invalid. +// The event filter is used for subscription to events. +// +// Parameters: +// - eventFilter: executiondata.EventFilter object containing filter parameters. +// +// Expected errors during normal operation: +// - codes.InvalidArgument - if the provided event filter is invalid. +func (h *Handler) getEventFilter(eventFilter *executiondata.EventFilter) (state_stream.EventFilter, error) { + if eventFilter == nil { + return state_stream.EventFilter{}, nil + } + filter, err := state_stream.NewEventFilter( + h.eventFilterConfig, + h.chain, + eventFilter.GetEventType(), + eventFilter.GetAddress(), + eventFilter.GetContract(), + ) + if err != nil { + return filter, status.Errorf(codes.InvalidArgument, "invalid event filter: %v", err) + } + return filter, nil +} + +func (h *Handler) GetRegisterValues(_ context.Context, request *executiondata.GetRegisterValuesRequest) (*executiondata.GetRegisterValuesResponse, error) { + // Convert data + registerIDs, err := convert.MessagesToRegisterIDs(request.GetRegisterIds(), h.chain) + if err != nil { + return nil, status.Errorf(codes.InvalidArgument, "could not convert register IDs: %v", err) + } + + // get payload from store + values, err := h.api.GetRegisterValues(registerIDs, request.GetBlockHeight()) + if err != nil { + return nil, rpc.ConvertError(err, "could not get register values", codes.Internal) + } + + return &executiondata.GetRegisterValuesResponse{Values: values}, nil +} + +// convertAccountsStatusesResultsToMessage converts account status responses to the message +func convertAccountsStatusesResultsToMessage( + eventVersion entities.EventEncodingVersion, + resp *AccountStatusesResponse, +) ([]*executiondata.SubscribeAccountStatusesResponse_Result, error) { + var results []*executiondata.SubscribeAccountStatusesResponse_Result + for address, events := range resp.AccountEvents { + convertedEvent, err := convert.EventsToMessagesWithEncodingConversion(events, entities.EventEncodingVersion_CCF_V0, eventVersion) + if err != nil { + return nil, status.Errorf(codes.Internal, "could not convert events to entity: %v", err) + } + + results = append(results, &executiondata.SubscribeAccountStatusesResponse_Result{ + Address: flow.HexToAddress(address).Bytes(), + Events: convertedEvent, + }) + } + return results, nil +} + +// sendSubscribeAccountStatusesResponseFunc defines the function signature for sending account status responses +type sendSubscribeAccountStatusesResponseFunc func(*executiondata.SubscribeAccountStatusesResponse) error + +// handleAccountStatusesResponse handles account status responses by converting them to the message and sending them to the subscriber. +func (h *Handler) handleAccountStatusesResponse( + heartbeatInterval uint64, + evenVersion entities.EventEncodingVersion, + send sendSubscribeAccountStatusesResponseFunc, +) func(resp *AccountStatusesResponse) error { + if heartbeatInterval == 0 { + heartbeatInterval = h.defaultHeartbeatInterval + } + + blocksSinceLastMessage := uint64(0) + messageIndex := counters.NewMonotonicCounter(0) + + return func(resp *AccountStatusesResponse) error { + // check if there are any events in the response. if not, do not send a message unless the last + // response was more than HeartbeatInterval blocks ago + if len(resp.AccountEvents) == 0 { + blocksSinceLastMessage++ + if blocksSinceLastMessage < heartbeatInterval { + return nil + } + } + blocksSinceLastMessage = 0 + + results, err := convertAccountsStatusesResultsToMessage(evenVersion, resp) + if err != nil { + return err + } + + index := messageIndex.Value() + if ok := messageIndex.Set(index + 1); !ok { + return status.Errorf(codes.Internal, "message index already incremented to %d", messageIndex.Value()) + } + + err = send(&executiondata.SubscribeAccountStatusesResponse{ + BlockId: convert.IdentifierToMessage(resp.BlockID), + BlockHeight: resp.Height, + Results: results, + MessageIndex: index, + }) + if err != nil { + return rpc.ConvertError(err, "could not send response", codes.Internal) + } + + return nil + } +} + +// SubscribeAccountStatusesFromStartBlockID streams account statuses for all blocks starting at the requested +// start block ID, up until the latest available block. Once the latest is +// reached, the stream will remain open and responses are sent for each new +// block as it becomes available. +func (h *Handler) SubscribeAccountStatusesFromStartBlockID( + request *executiondata.SubscribeAccountStatusesFromStartBlockIDRequest, + stream executiondata.ExecutionDataAPI_SubscribeAccountStatusesFromStartBlockIDServer, +) error { + // check if the maximum number of streams is reached + if h.StreamCount.Load() >= h.MaxStreams { + return status.Errorf(codes.ResourceExhausted, "maximum number of streams reached") + } + + h.StreamCount.Add(1) + defer h.StreamCount.Add(-1) + + startBlockID, err := convert.BlockID(request.GetStartBlockId()) + if err != nil { + return status.Errorf(codes.InvalidArgument, "could not convert start block ID: %v", err) + } + + statusFilter := request.GetFilter() + filter, err := state_stream.NewAccountStatusFilter(h.eventFilterConfig, h.chain, statusFilter.GetEventType(), statusFilter.GetAddress()) + if err != nil { + return status.Errorf(codes.InvalidArgument, "could not create account status filter: %v", err) + } + + sub := h.api.SubscribeAccountStatusesFromStartBlockID(stream.Context(), startBlockID, filter) + + return HandleRPCSubscription(sub, h.handleAccountStatusesResponse(request.HeartbeatInterval, request.GetEventEncodingVersion(), stream.Send)) +} + +// SubscribeAccountStatusesFromStartHeight streams account statuses for all blocks starting at the requested +// start block height, up until the latest available block. Once the latest is +// reached, the stream will remain open and responses are sent for each new +// block as it becomes available. +func (h *Handler) SubscribeAccountStatusesFromStartHeight( + request *executiondata.SubscribeAccountStatusesFromStartHeightRequest, + stream executiondata.ExecutionDataAPI_SubscribeAccountStatusesFromStartHeightServer, +) error { + // check if the maximum number of streams is reached + if h.StreamCount.Load() >= h.MaxStreams { + return status.Errorf(codes.ResourceExhausted, "maximum number of streams reached") + } + + h.StreamCount.Add(1) + defer h.StreamCount.Add(-1) + + statusFilter := request.GetFilter() + filter, err := state_stream.NewAccountStatusFilter(h.eventFilterConfig, h.chain, statusFilter.GetEventType(), statusFilter.GetAddress()) + if err != nil { + return status.Errorf(codes.InvalidArgument, "could not create account status filter: %v", err) + } + + sub := h.api.SubscribeAccountStatusesFromStartHeight(stream.Context(), request.GetStartBlockHeight(), filter) + + return HandleRPCSubscription(sub, h.handleAccountStatusesResponse(request.HeartbeatInterval, request.GetEventEncodingVersion(), stream.Send)) +} + +// SubscribeAccountStatusesFromLatestBlock streams account statuses for all blocks starting +// at the last sealed block, up until the latest available block. Once the latest is +// reached, the stream will remain open and responses are sent for each new +// block as it becomes available. +func (h *Handler) SubscribeAccountStatusesFromLatestBlock( + request *executiondata.SubscribeAccountStatusesFromLatestBlockRequest, + stream executiondata.ExecutionDataAPI_SubscribeAccountStatusesFromLatestBlockServer, +) error { + // check if the maximum number of streams is reached + if h.StreamCount.Load() >= h.MaxStreams { + return status.Errorf(codes.ResourceExhausted, "maximum number of streams reached") + } + + h.StreamCount.Add(1) + defer h.StreamCount.Add(-1) + + statusFilter := request.GetFilter() + filter, err := state_stream.NewAccountStatusFilter(h.eventFilterConfig, h.chain, statusFilter.GetEventType(), statusFilter.GetAddress()) + if err != nil { + return status.Errorf(codes.InvalidArgument, "could not create account status filter: %v", err) + } + + sub := h.api.SubscribeAccountStatusesFromLatestBlock(stream.Context(), filter) + + return HandleRPCSubscription(sub, h.handleAccountStatusesResponse(request.HeartbeatInterval, request.GetEventEncodingVersion(), stream.Send)) +} + +// HandleRPCSubscription is a generic handler for subscriptions to a specific type for rpc calls. +// +// Parameters: +// - sub: The subscription. +// - handleResponse: The function responsible for handling the response of the subscribed type. +// +// Expected errors during normal operation: +// - codes.Internal: If the subscription encounters an error or gets an unexpected response. +func HandleRPCSubscription[T any](sub subscription.Subscription, handleResponse func(resp T) error) error { + err := subscription.HandleSubscription(sub, handleResponse) + if err != nil { + return rpc.ConvertError(err, "handle subscription error", codes.Internal) + } + + return nil +} diff --git a/engine/access/state_stream/backend/handler_test.go b/engine/access/state_stream/backend/handler_test.go new file mode 100644 index 00000000000..affe2167dfe --- /dev/null +++ b/engine/access/state_stream/backend/handler_test.go @@ -0,0 +1,668 @@ +package backend + +import ( + "context" + "fmt" + "io" + "sync" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" + pb "google.golang.org/genproto/googleapis/bytestream" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + + "github.com/onflow/cadence/encoding/ccf" + jsoncdc "github.com/onflow/cadence/encoding/json" + "github.com/onflow/flow/protobuf/go/flow/entities" + "github.com/onflow/flow/protobuf/go/flow/executiondata" + + "github.com/onflow/flow-go/engine/access/state_stream" + ssmock "github.com/onflow/flow-go/engine/access/state_stream/mock" + "github.com/onflow/flow-go/engine/access/subscription" + "github.com/onflow/flow-go/engine/common/rpc/convert" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/storage" + "github.com/onflow/flow-go/utils/unittest" +) + +func TestHeartbeatResponseSuite(t *testing.T) { + suite.Run(t, new(HandlerTestSuite)) +} + +type HandlerTestSuite struct { + BackendExecutionDataSuite + handler *Handler +} + +// fakeReadServerImpl is an utility structure for receiving response from grpc handler without building a complete pipeline with client and server. +// It allows to receive streamed events pushed by server in buffered channel that can be later used to assert correctness of responses +type fakeReadServerImpl struct { + pb.ByteStream_ReadServer + ctx context.Context + received chan *executiondata.SubscribeEventsResponse +} + +var _ executiondata.ExecutionDataAPI_SubscribeEventsServer = (*fakeReadServerImpl)(nil) + +func (fake *fakeReadServerImpl) Context() context.Context { + return fake.ctx +} + +func (fake *fakeReadServerImpl) Send(response *executiondata.SubscribeEventsResponse) error { + fake.received <- response + return nil +} + +func (s *HandlerTestSuite) SetupTest() { + s.BackendExecutionDataSuite.SetupTest() + chain := flow.MonotonicEmulator.Chain() + s.handler = NewHandler(s.backend, chain, makeConfig(5)) +} + +// TestHeartbeatResponse tests the periodic heartbeat response. +// +// Test Steps: +// - Generate different events in blocks. +// - Create different filters for generated events. +// - Wait for either responses with filtered events or heartbeat responses. +// - Verify that the responses are being sent with proper heartbeat interval. +func (s *HandlerTestSuite) TestHeartbeatResponse() { + reader := &fakeReadServerImpl{ + ctx: context.Background(), + received: make(chan *executiondata.SubscribeEventsResponse, 100), + } + + // notify backend block is available + s.highestBlockHeader = s.blocks[len(s.blocks)-1].ToHeader() + + s.Run("All events filter", func() { + // create empty event filter + filter := &executiondata.EventFilter{} + // create subscribe events request, set the created filter and heartbeatInterval + req := &executiondata.SubscribeEventsRequest{ + StartBlockHeight: 0, + Filter: filter, + HeartbeatInterval: 1, + } + + // subscribe for events + go func() { + err := s.handler.SubscribeEvents(req, reader) + require.NoError(s.T(), err) + }() + + for _, b := range s.blocks { + // consume execution data from subscription + unittest.RequireReturnsBefore(s.T(), func() { + resp, ok := <-reader.received + require.True(s.T(), ok, "channel closed while waiting for exec data for block %d %v", b.Height, b.ID()) + + blockID, err := convert.BlockID(resp.BlockId) + require.NoError(s.T(), err) + require.Equal(s.T(), b.ID(), blockID) + require.Equal(s.T(), b.Height, resp.BlockHeight) + }, time.Second, fmt.Sprintf("timed out waiting for exec data for block %d %v", b.Height, b.ID())) + } + }) + + s.Run("Event A.0x1.Foo.Bar filter with heartbeat interval 1", func() { + // create A.0x1.Foo.Bar event filter + pbFilter := &executiondata.EventFilter{ + EventType: []string{string(testEventTypes[0])}, + Contract: nil, + Address: nil, + } + // create subscribe events request, set the created filter and heartbeatInterval + req := &executiondata.SubscribeEventsRequest{ + StartBlockHeight: 0, + Filter: pbFilter, + HeartbeatInterval: 1, + } + + // subscribe for events + go func() { + err := s.handler.SubscribeEvents(req, reader) + require.NoError(s.T(), err) + }() + + for _, b := range s.blocks { + + // consume execution data from subscription + unittest.RequireReturnsBefore(s.T(), func() { + resp, ok := <-reader.received + require.True(s.T(), ok, "channel closed while waiting for exec data for block %d %v", b.Height, b.ID()) + + blockID, err := convert.BlockID(resp.BlockId) + require.NoError(s.T(), err) + require.Equal(s.T(), b.ID(), blockID) + require.Equal(s.T(), b.Height, resp.BlockHeight) + }, time.Second, fmt.Sprintf("timed out waiting for exec data for block %d %v", b.Height, b.ID())) + } + }) + + s.Run("Non existent filter with heartbeat interval 2", func() { + // create non existent filter + pbFilter := &executiondata.EventFilter{ + EventType: []string{"A.0x1.NonExistent.Event"}, + Contract: nil, + Address: nil, + } + + // create subscribe events request, set the created filter and heartbeatInterval + req := &executiondata.SubscribeEventsRequest{ + StartBlockHeight: 0, + Filter: pbFilter, + HeartbeatInterval: 2, + } + + // subscribe for events + go func() { + err := s.handler.SubscribeEvents(req, reader) + require.NoError(s.T(), err) + }() + + // expect a response for every other block + expectedBlocks := make([]*flow.Block, 0) + for i, block := range s.blocks { + if (i+1)%int(req.HeartbeatInterval) == 0 { + expectedBlocks = append(expectedBlocks, block) + } + } + + require.Len(s.T(), expectedBlocks, len(s.blocks)/int(req.HeartbeatInterval)) + + for _, b := range expectedBlocks { + // consume execution data from subscription + unittest.RequireReturnsBefore(s.T(), func() { + resp, ok := <-reader.received + require.True(s.T(), ok, "channel closed while waiting for exec data for block %d %v", b.Height, b.ID()) + + blockID, err := convert.BlockID(resp.BlockId) + require.NoError(s.T(), err) + require.Equal(s.T(), b.Height, resp.BlockHeight) + require.Equal(s.T(), b.ID(), blockID) + require.Empty(s.T(), resp.Events) + }, time.Second, fmt.Sprintf("timed out waiting for exec data for block %d %v", b.Height, b.ID())) + } + }) +} + +// TestGetExecutionDataByBlockID tests the execution data by block id with different event encoding versions. +func TestGetExecutionDataByBlockID(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + ccfEvents, jsonEvents := generateEvents(t, 3) + + tests := []struct { + eventVersion entities.EventEncodingVersion + expected []flow.Event + }{ + { + entities.EventEncodingVersion_JSON_CDC_V0, + jsonEvents, + }, + { + entities.EventEncodingVersion_CCF_V0, + ccfEvents, + }, + } + + for _, test := range tests { + t.Run(fmt.Sprintf("test %s event encoding version", test.eventVersion.String()), func(t *testing.T) { + result := unittest.BlockExecutionDataFixture( + unittest.WithChunkExecutionDatas( + unittest.ChunkExecutionDataFixture(t, 1024, unittest.WithChunkEvents(ccfEvents)), + unittest.ChunkExecutionDataFixture(t, 1024, unittest.WithChunkEvents(ccfEvents)), + ), + ) + blockID := result.BlockID + + api := ssmock.NewAPI(t) + api.On("GetExecutionDataByBlockID", mock.Anything, blockID).Return(result, nil) + + h := NewHandler(api, flow.Localnet.Chain(), makeConfig(1)) + + response, err := h.GetExecutionDataByBlockID(ctx, &executiondata.GetExecutionDataByBlockIDRequest{ + BlockId: blockID[:], + EventEncodingVersion: test.eventVersion, + }) + require.NoError(t, err) + require.NotNil(t, response) + + blockExecutionData := response.GetBlockExecutionData() + require.Equal(t, blockID[:], blockExecutionData.GetBlockId()) + + convertedExecData, err := convert.MessageToBlockExecutionData(blockExecutionData, flow.Testnet.Chain()) + require.NoError(t, err) + + // Verify that the payload is valid + for _, chunk := range convertedExecData.ChunkExecutionDatas { + for i, e := range chunk.Events { + assert.Equal(t, test.expected[i], e) + + var err error + if test.eventVersion == entities.EventEncodingVersion_JSON_CDC_V0 { + _, err = jsoncdc.Decode(nil, e.Payload) + } else { + _, err = ccf.Decode(nil, e.Payload) + } + require.NoError(t, err) + } + } + }) + } +} + +// TestExecutionDataStream tests the execution data stream with different event encoding versions. +func TestExecutionDataStream(t *testing.T) { + t.Parallel() + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + // Send a single response. + blockHeight := uint64(1) + + // Helper function to perform a stream request and handle responses. + makeStreamRequest := func( + stream *StreamMock[executiondata.SubscribeExecutionDataRequest, executiondata.SubscribeExecutionDataResponse], + api *ssmock.API, + request *executiondata.SubscribeExecutionDataRequest, + response *ExecutionDataResponse, + ) { + sub := subscription.NewSubscription(1) + + api.On("SubscribeExecutionData", mock.Anything, flow.ZeroID, uint64(0), mock.Anything).Return(sub) + + h := NewHandler(api, flow.Localnet.Chain(), makeConfig(1)) + + wg := sync.WaitGroup{} + wg.Add(1) + go func() { + wg.Done() + err := h.SubscribeExecutionData(request, stream) + require.NoError(t, err) + t.Log("subscription closed") + }() + wg.Wait() + + err := sub.Send(ctx, response, 100*time.Millisecond) + require.NoError(t, err) + + // Notify end of data. + sub.Close() + } + + // handleExecutionDataStreamResponses handles responses from the execution data stream. + handleExecutionDataStreamResponses := func( + stream *StreamMock[executiondata.SubscribeExecutionDataRequest, executiondata.SubscribeExecutionDataResponse], + version entities.EventEncodingVersion, + expectedEvents []flow.Event, + ) { + var responses []*executiondata.SubscribeExecutionDataResponse + for { + t.Log(len(responses)) + resp, err := stream.RecvToClient() + if err == io.EOF { + break + } + require.NoError(t, err) + responses = append(responses, resp) + close(stream.sentFromServer) + } + + for _, resp := range responses { + convertedExecData, err := convert.MessageToBlockExecutionData(resp.GetBlockExecutionData(), flow.Testnet.Chain()) + require.NoError(t, err) + + assert.Equal(t, blockHeight, resp.GetBlockHeight()) + + // only expect a single response + assert.Equal(t, 1, len(responses)) + + // Verify that the payload is valid + for _, chunk := range convertedExecData.ChunkExecutionDatas { + for i, e := range chunk.Events { + assert.Equal(t, expectedEvents[i], e) + + var err error + if version == entities.EventEncodingVersion_JSON_CDC_V0 { + _, err = jsoncdc.Decode(nil, e.Payload) + } else { + _, err = ccf.Decode(nil, e.Payload) + } + require.NoError(t, err) + } + } + } + } + + ccfEvents, jsonEvents := generateEvents(t, 3) + + tests := []struct { + eventVersion entities.EventEncodingVersion + expected []flow.Event + }{ + { + entities.EventEncodingVersion_JSON_CDC_V0, + jsonEvents, + }, + { + entities.EventEncodingVersion_CCF_V0, + ccfEvents, + }, + } + + for _, test := range tests { + t.Run(fmt.Sprintf("test %s event encoding version", test.eventVersion.String()), func(t *testing.T) { + api := ssmock.NewAPI(t) + stream := makeStreamMock[executiondata.SubscribeExecutionDataRequest, executiondata.SubscribeExecutionDataResponse](ctx) + + makeStreamRequest( + stream, + api, + &executiondata.SubscribeExecutionDataRequest{ + EventEncodingVersion: test.eventVersion, + }, + &ExecutionDataResponse{ + Height: blockHeight, + ExecutionData: unittest.BlockExecutionDataFixture( + unittest.WithChunkExecutionDatas( + unittest.ChunkExecutionDataFixture(t, 1024, unittest.WithChunkEvents(ccfEvents)), + unittest.ChunkExecutionDataFixture(t, 1024, unittest.WithChunkEvents(ccfEvents)), + ), + ), + }, + ) + handleExecutionDataStreamResponses(stream, test.eventVersion, test.expected) + }) + } +} + +// TestEventStream tests the event stream with different event encoding versions. +func TestEventStream(t *testing.T) { + t.Parallel() + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + blockHeight := uint64(1) + blockID := unittest.IdentifierFixture() + + // Helper function to perform a stream request and handle responses. + makeStreamRequest := func( + stream *StreamMock[executiondata.SubscribeEventsRequest, executiondata.SubscribeEventsResponse], + api *ssmock.API, + request *executiondata.SubscribeEventsRequest, + response *EventsResponse, + ) { + sub := subscription.NewSubscription(1) + + api.On("SubscribeEvents", mock.Anything, flow.ZeroID, uint64(0), mock.Anything).Return(sub) + + h := NewHandler(api, flow.Localnet.Chain(), makeConfig(1)) + + wg := sync.WaitGroup{} + wg.Add(1) + go func() { + wg.Done() + err := h.SubscribeEvents(request, stream) + require.NoError(t, err) + t.Log("subscription closed") + }() + wg.Wait() + + // send a single response + err := sub.Send(ctx, response, 100*time.Millisecond) + require.NoError(t, err) + + // notify end of data + sub.Close() + } + + // handleExecutionDataStreamResponses handles responses from the execution data stream. + handleExecutionDataStreamResponses := func( + stream *StreamMock[executiondata.SubscribeEventsRequest, executiondata.SubscribeEventsResponse], + version entities.EventEncodingVersion, + expectedEvents []flow.Event, + ) { + var responses []*executiondata.SubscribeEventsResponse + for { + t.Log(len(responses)) + resp, err := stream.RecvToClient() + if err == io.EOF { + break + } + // make sure the payload is valid + require.NoError(t, err) + responses = append(responses, resp) + + // shutdown the stream after one response + close(stream.sentFromServer) + } + + for _, resp := range responses { + convertedEvents, err := convert.MessagesToEvents(resp.GetEvents()) + require.NoError(t, err) + + assert.Equal(t, blockHeight, resp.GetBlockHeight()) + assert.Equal(t, blockID, convert.MessageToIdentifier(resp.GetBlockId())) + assert.Equal(t, expectedEvents, convertedEvents) + // only expect a single response + assert.Equal(t, 1, len(responses)) + + for _, e := range convertedEvents { + var err error + if version == entities.EventEncodingVersion_JSON_CDC_V0 { + _, err = jsoncdc.Decode(nil, e.Payload) + } else { + _, err = ccf.Decode(nil, e.Payload) + } + require.NoError(t, err) + } + } + } + + // generate events with a payload to include + ccfEvents, jsonEvents := generateEvents(t, 3) + + tests := []struct { + eventVersion entities.EventEncodingVersion + expected []flow.Event + }{ + { + entities.EventEncodingVersion_JSON_CDC_V0, + jsonEvents, + }, + { + entities.EventEncodingVersion_CCF_V0, + ccfEvents, + }, + } + + for _, test := range tests { + t.Run(fmt.Sprintf("test %s event encoding version", test.eventVersion.String()), func(t *testing.T) { + stream := makeStreamMock[executiondata.SubscribeEventsRequest, executiondata.SubscribeEventsResponse](ctx) + + makeStreamRequest( + stream, + ssmock.NewAPI(t), + &executiondata.SubscribeEventsRequest{ + EventEncodingVersion: test.eventVersion, + }, + &EventsResponse{ + BlockID: blockID, + Height: blockHeight, + Events: ccfEvents, + }, + ) + handleExecutionDataStreamResponses(stream, test.eventVersion, test.expected) + }) + } +} + +// TestGetRegisterValues tests the register values. +func TestGetRegisterValues(t *testing.T) { + t.Parallel() + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + testHeight := uint64(1) + + // test register IDs + values + testIds := flow.RegisterIDs{ + flow.UUIDRegisterID(0), + flow.AccountStatusRegisterID(unittest.AddressFixture()), + unittest.RegisterIDFixture(), + } + testValues := []flow.RegisterValue{ + []byte("uno"), + []byte("dos"), + []byte("tres"), + } + invalidIDs := append(testIds, flow.RegisterID{}) // valid + invalid IDs + + t.Run("invalid message", func(t *testing.T) { + api := ssmock.NewAPI(t) + h := NewHandler(api, flow.Testnet.Chain(), makeConfig(1)) + + invalidMessage := &executiondata.GetRegisterValuesRequest{ + RegisterIds: nil, + } + _, err := h.GetRegisterValues(ctx, invalidMessage) + require.Equal(t, codes.InvalidArgument, status.Code(err)) + }) + + t.Run("valid registers", func(t *testing.T) { + api := ssmock.NewAPI(t) + api.On("GetRegisterValues", testIds, testHeight).Return(testValues, nil) + h := NewHandler(api, flow.Testnet.Chain(), makeConfig(1)) + + validRegisters := make([]*entities.RegisterID, len(testIds)) + for i, id := range testIds { + validRegisters[i] = convert.RegisterIDToMessage(id) + } + + req := &executiondata.GetRegisterValuesRequest{ + RegisterIds: validRegisters, + BlockHeight: testHeight, + } + + resp, err := h.GetRegisterValues(ctx, req) + require.NoError(t, err) + require.Equal(t, testValues, resp.GetValues()) + }) + + t.Run("unavailable registers", func(t *testing.T) { + api := ssmock.NewAPI(t) + expectedErr := status.Errorf(codes.NotFound, "could not get register values: %v", storage.ErrNotFound) + api.On("GetRegisterValues", invalidIDs, testHeight).Return(nil, expectedErr) + h := NewHandler(api, flow.Testnet.Chain(), makeConfig(1)) + + unavailableRegisters := make([]*entities.RegisterID, len(invalidIDs)) + for i, id := range invalidIDs { + unavailableRegisters[i] = convert.RegisterIDToMessage(id) + } + + req := &executiondata.GetRegisterValuesRequest{ + RegisterIds: unavailableRegisters, + BlockHeight: testHeight, + } + + _, err := h.GetRegisterValues(ctx, req) + require.Equal(t, codes.NotFound, status.Code(err)) + }) + + t.Run("wrong height", func(t *testing.T) { + api := ssmock.NewAPI(t) + expectedErr := status.Errorf(codes.OutOfRange, "could not get register values: %v", storage.ErrHeightNotIndexed) + api.On("GetRegisterValues", testIds, testHeight+1).Return(nil, expectedErr) + h := NewHandler(api, flow.Testnet.Chain(), makeConfig(1)) + + validRegisters := make([]*entities.RegisterID, len(testIds)) + for i, id := range testIds { + validRegisters[i] = convert.RegisterIDToMessage(id) + } + + req := &executiondata.GetRegisterValuesRequest{ + RegisterIds: validRegisters, + BlockHeight: testHeight + 1, + } + + _, err := h.GetRegisterValues(ctx, req) + require.Equal(t, codes.OutOfRange, status.Code(err)) + }) +} + +func generateEvents(t *testing.T, n int) ([]flow.Event, []flow.Event) { + ccfEvents := unittest.EventGenerator.GetEventsWithEncoding(n, entities.EventEncodingVersion_CCF_V0) + jsonEvents := make([]flow.Event, len(ccfEvents)) + for i, e := range ccfEvents { + jsonEvent, err := convert.CcfEventToJsonEvent(e) + require.NoError(t, err) + jsonEvents[i] = *jsonEvent + } + return ccfEvents, jsonEvents +} + +func makeConfig(maxGlobalStreams uint32) Config { + return Config{ + EventFilterConfig: state_stream.DefaultEventFilterConfig, + ClientSendTimeout: subscription.DefaultSendTimeout, + ClientSendBufferSize: subscription.DefaultSendBufferSize, + MaxGlobalStreams: maxGlobalStreams, + HeartbeatInterval: subscription.DefaultHeartbeatInterval, + } +} + +func makeStreamMock[R, T any](ctx context.Context) *StreamMock[R, T] { + return &StreamMock[R, T]{ + ctx: ctx, + recvToServer: make(chan *R, 10), + sentFromServer: make(chan *T, 10), + } +} + +type StreamMock[R, T any] struct { + grpc.ServerStream + ctx context.Context + recvToServer chan *R + sentFromServer chan *T +} + +func (m *StreamMock[R, T]) Context() context.Context { + return m.ctx +} +func (m *StreamMock[R, T]) Send(resp *T) error { + m.sentFromServer <- resp + return nil +} + +func (m *StreamMock[R, T]) Recv() (*R, error) { + req, more := <-m.recvToServer + if !more { + return nil, io.EOF + } + return req, nil +} + +func (m *StreamMock[R, T]) SendFromClient(req *R) error { + m.recvToServer <- req + return nil +} + +func (m *StreamMock[R, T]) RecvToClient() (*T, error) { + response, more := <-m.sentFromServer + if !more { + return nil, io.EOF + } + return response, nil +} diff --git a/engine/access/state_stream/backend_events.go b/engine/access/state_stream/backend_events.go deleted file mode 100644 index 0f6472f59f8..00000000000 --- a/engine/access/state_stream/backend_events.go +++ /dev/null @@ -1,82 +0,0 @@ -package state_stream - -import ( - "context" - "fmt" - "time" - - "github.com/rs/zerolog" - "google.golang.org/grpc/status" - - "github.com/onflow/flow-go/engine" - "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/storage" - "github.com/onflow/flow-go/utils/logging" -) - -type EventsResponse struct { - BlockID flow.Identifier - Height uint64 - Events flow.EventsList -} - -type EventsBackend struct { - log zerolog.Logger - headers storage.Headers - broadcaster *engine.Broadcaster - sendTimeout time.Duration - sendBufferSize int - - getExecutionData GetExecutionDataFunc - getStartHeight GetStartHeightFunc -} - -func (b EventsBackend) SubscribeEvents(ctx context.Context, startBlockID flow.Identifier, startHeight uint64, filter EventFilter) Subscription { - nextHeight, err := b.getStartHeight(startBlockID, startHeight) - if err != nil { - sub := NewSubscription(b.sendBufferSize) - if st, ok := status.FromError(err); ok { - sub.Fail(status.Errorf(st.Code(), "could not get start height: %s", st.Message())) - return sub - } - - sub.Fail(fmt.Errorf("could not get start height: %w", err)) - return sub - } - - sub := NewHeightBasedSubscription(b.sendBufferSize, nextHeight, b.getResponseFactory(filter)) - - go NewStreamer(b.log, b.broadcaster, b.sendTimeout, sub).Stream(ctx) - - return sub -} - -func (b EventsBackend) getResponseFactory(filter EventFilter) GetDataByHeightFunc { - return func(ctx context.Context, height uint64) (interface{}, error) { - header, err := b.headers.ByHeight(height) - if err != nil { - return nil, fmt.Errorf("could not get block header for height %d: %w", height, err) - } - - executionData, err := b.getExecutionData(ctx, header.ID()) - if err != nil { - return nil, fmt.Errorf("could not get execution data for block %s: %w", header.ID(), err) - } - - events := []flow.Event{} - for _, chunkExecutionData := range executionData.ChunkExecutionDatas { - events = append(events, filter.Filter(chunkExecutionData.Events)...) - } - - b.log.Trace(). - Hex("block_id", logging.ID(header.ID())). - Uint64("height", header.Height). - Msgf("sending %d events", len(events)) - - return &EventsResponse{ - BlockID: header.ID(), - Height: header.Height, - Events: events, - }, nil - } -} diff --git a/engine/access/state_stream/backend_events_test.go b/engine/access/state_stream/backend_events_test.go deleted file mode 100644 index 1b3067399c9..00000000000 --- a/engine/access/state_stream/backend_events_test.go +++ /dev/null @@ -1,188 +0,0 @@ -package state_stream - -import ( - "context" - "fmt" - "testing" - "time" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - "github.com/stretchr/testify/suite" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" - - "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/utils/unittest" -) - -type BackendEventsSuite struct { - BackendExecutionDataSuite -} - -func TestBackendEventsSuite(t *testing.T) { - suite.Run(t, new(BackendEventsSuite)) -} - -func (s *BackendEventsSuite) SetupTest() { - s.BackendExecutionDataSuite.SetupTest() -} - -// TestSubscribeEvents tests the SubscribeEvents method happy path -func (s *BackendEventsSuite) TestSubscribeEvents() { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - var err error - - type testType struct { - name string - highestBackfill int - startBlockID flow.Identifier - startHeight uint64 - filters EventFilter - } - - baseTests := []testType{ - { - name: "happy path - all new blocks", - highestBackfill: -1, // no backfill - startBlockID: flow.ZeroID, - startHeight: 0, - }, - { - name: "happy path - partial backfill", - highestBackfill: 2, // backfill the first 3 blocks - startBlockID: flow.ZeroID, - startHeight: s.blocks[0].Header.Height, - }, - { - name: "happy path - complete backfill", - highestBackfill: len(s.blocks) - 1, // backfill all blocks - startBlockID: s.blocks[0].ID(), - startHeight: 0, - }, - } - - // supports simple address comparisions for testing - chain := flow.MonotonicEmulator.Chain() - - // create variations for each of the base test - tests := make([]testType, 0, len(baseTests)*3) - for _, test := range baseTests { - t1 := test - t1.name = fmt.Sprintf("%s - all events", test.name) - t1.filters = EventFilter{} - tests = append(tests, t1) - - t2 := test - t2.name = fmt.Sprintf("%s - some events", test.name) - t2.filters, err = NewEventFilter(DefaultEventFilterConfig, chain, []string{string(testEventTypes[0])}, nil, nil) - require.NoError(s.T(), err) - tests = append(tests, t2) - - t3 := test - t3.name = fmt.Sprintf("%s - no events", test.name) - t3.filters, err = NewEventFilter(DefaultEventFilterConfig, chain, []string{"A.0x1.NonExistent.Event"}, nil, nil) - require.NoError(s.T(), err) - tests = append(tests, t3) - } - - for _, test := range tests { - s.Run(test.name, func() { - s.T().Logf("len(s.execDataMap) %d", len(s.execDataMap)) - - // add "backfill" block - blocks that are already in the database before the test starts - // this simulates a subscription on a past block - for i := 0; i <= test.highestBackfill; i++ { - s.T().Logf("backfilling block %d", i) - execData := s.execDataMap[s.blocks[i].ID()] - s.execDataDistributor.OnExecutionDataReceived(execData) - } - - subCtx, subCancel := context.WithCancel(ctx) - sub := s.backend.SubscribeEvents(subCtx, test.startBlockID, test.startHeight, test.filters) - - // loop over all of the blocks - for i, b := range s.blocks { - execData := s.execDataMap[b.ID()] - s.T().Logf("checking block %d %v", i, b.ID()) - - // simulate new exec data received. - // exec data for all blocks with index <= highestBackfill were already received - if i > test.highestBackfill { - s.execDataDistributor.OnExecutionDataReceived(execData) - s.broadcaster.Publish() - } - - expectedEvents := flow.EventsList{} - for _, event := range s.blockEvents[b.ID()] { - if test.filters.Match(event) { - expectedEvents = append(expectedEvents, event) - } - } - - // consume execution data from subscription - unittest.RequireReturnsBefore(s.T(), func() { - v, ok := <-sub.Channel() - require.True(s.T(), ok, "channel closed while waiting for exec data for block %d %v: err: %v", b.Header.Height, b.ID(), sub.Err()) - - resp, ok := v.(*EventsResponse) - require.True(s.T(), ok, "unexpected response type: %T", v) - - assert.Equal(s.T(), b.Header.ID(), resp.BlockID) - assert.Equal(s.T(), b.Header.Height, resp.Height) - assert.Equal(s.T(), expectedEvents, resp.Events) - }, time.Second, fmt.Sprintf("timed out waiting for exec data for block %d %v", b.Header.Height, b.ID())) - } - - // make sure there are no new messages waiting. the channel should be opened with nothing waiting - unittest.RequireNeverReturnBefore(s.T(), func() { - <-sub.Channel() - }, 100*time.Millisecond, "timed out waiting for subscription to shutdown") - - // stop the subscription - subCancel() - - // ensure subscription shuts down gracefully - unittest.RequireReturnsBefore(s.T(), func() { - v, ok := <-sub.Channel() - assert.Nil(s.T(), v) - assert.False(s.T(), ok) - assert.ErrorIs(s.T(), sub.Err(), context.Canceled) - }, 100*time.Millisecond, "timed out waiting for subscription to shutdown") - }) - } -} - -func (s *BackendExecutionDataSuite) TestSubscribeEventsHandlesErrors() { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - s.Run("returns error if both start blockID and start height are provided", func() { - subCtx, subCancel := context.WithCancel(ctx) - defer subCancel() - - sub := s.backend.SubscribeEvents(subCtx, unittest.IdentifierFixture(), 1, EventFilter{}) - assert.Equal(s.T(), codes.InvalidArgument, status.Code(sub.Err())) - }) - - s.Run("returns error for unindexed start blockID", func() { - subCtx, subCancel := context.WithCancel(ctx) - defer subCancel() - - sub := s.backend.SubscribeEvents(subCtx, unittest.IdentifierFixture(), 0, EventFilter{}) - assert.Equal(s.T(), codes.NotFound, status.Code(sub.Err()), "exepected NotFound, got %v: %v", status.Code(sub.Err()).String(), sub.Err()) - }) - - // make sure we're starting with a fresh cache - s.execDataCache.Clear() - - s.Run("returns error for unindexed start height", func() { - subCtx, subCancel := context.WithCancel(ctx) - defer subCancel() - - sub := s.backend.SubscribeEvents(subCtx, flow.ZeroID, s.blocks[len(s.blocks)-1].Header.Height+10, EventFilter{}) - assert.Equal(s.T(), codes.NotFound, status.Code(sub.Err()), "exepected NotFound, got %v: %v", status.Code(sub.Err()).String(), sub.Err()) - }) -} diff --git a/engine/access/state_stream/backend_executiondata.go b/engine/access/state_stream/backend_executiondata.go deleted file mode 100644 index b39df9da610..00000000000 --- a/engine/access/state_stream/backend_executiondata.go +++ /dev/null @@ -1,86 +0,0 @@ -package state_stream - -import ( - "context" - "errors" - "fmt" - "time" - - "github.com/rs/zerolog" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" - - "github.com/onflow/flow-go/engine" - "github.com/onflow/flow-go/engine/common/rpc" - "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/module/executiondatasync/execution_data" - "github.com/onflow/flow-go/storage" -) - -type ExecutionDataResponse struct { - Height uint64 - ExecutionData *execution_data.BlockExecutionData -} - -type ExecutionDataBackend struct { - log zerolog.Logger - headers storage.Headers - broadcaster *engine.Broadcaster - sendTimeout time.Duration - sendBufferSize int - - getExecutionData GetExecutionDataFunc - getStartHeight GetStartHeightFunc -} - -func (b *ExecutionDataBackend) GetExecutionDataByBlockID(ctx context.Context, blockID flow.Identifier) (*execution_data.BlockExecutionData, error) { - executionData, err := b.getExecutionData(ctx, blockID) - - if err != nil { - // need custom not found handler due to blob not found error - if errors.Is(err, storage.ErrNotFound) || execution_data.IsBlobNotFoundError(err) { - return nil, status.Errorf(codes.NotFound, "could not find execution data: %v", err) - } - - return nil, rpc.ConvertError(err, "could not get execution data", codes.Internal) - } - - return executionData.BlockExecutionData, nil -} - -func (b *ExecutionDataBackend) SubscribeExecutionData(ctx context.Context, startBlockID flow.Identifier, startHeight uint64) Subscription { - nextHeight, err := b.getStartHeight(startBlockID, startHeight) - if err != nil { - sub := NewSubscription(b.sendBufferSize) - if st, ok := status.FromError(err); ok { - sub.Fail(status.Errorf(st.Code(), "could not get start height: %s", st.Message())) - return sub - } - - sub.Fail(fmt.Errorf("could not get start height: %w", err)) - return sub - } - - sub := NewHeightBasedSubscription(b.sendBufferSize, nextHeight, b.getResponse) - - go NewStreamer(b.log, b.broadcaster, b.sendTimeout, sub).Stream(ctx) - - return sub -} - -func (b *ExecutionDataBackend) getResponse(ctx context.Context, height uint64) (interface{}, error) { - header, err := b.headers.ByHeight(height) - if err != nil { - return nil, fmt.Errorf("could not get block header for height %d: %w", height, err) - } - - executionData, err := b.getExecutionData(ctx, header.ID()) - if err != nil { - return nil, fmt.Errorf("could not get execution data for block %s: %w", header.ID(), err) - } - - return &ExecutionDataResponse{ - Height: header.Height, - ExecutionData: executionData.BlockExecutionData, - }, nil -} diff --git a/engine/access/state_stream/backend_executiondata_test.go b/engine/access/state_stream/backend_executiondata_test.go deleted file mode 100644 index 0120d47a335..00000000000 --- a/engine/access/state_stream/backend_executiondata_test.go +++ /dev/null @@ -1,381 +0,0 @@ -package state_stream - -import ( - "context" - "fmt" - "math/rand" - "testing" - "time" - - "github.com/ipfs/go-datastore" - dssync "github.com/ipfs/go-datastore/sync" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/mock" - "github.com/stretchr/testify/require" - "github.com/stretchr/testify/suite" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" - - "github.com/onflow/flow-go/engine" - "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/module/blobs" - "github.com/onflow/flow-go/module/executiondatasync/execution_data" - "github.com/onflow/flow-go/module/mempool/herocache" - "github.com/onflow/flow-go/module/metrics" - "github.com/onflow/flow-go/module/state_synchronization/requester" - protocolmock "github.com/onflow/flow-go/state/protocol/mock" - "github.com/onflow/flow-go/storage" - storagemock "github.com/onflow/flow-go/storage/mock" - "github.com/onflow/flow-go/utils/unittest" -) - -var testEventTypes = []flow.EventType{ - "A.0x1.Foo.Bar", - "A.0x2.Zoo.Moo", - "A.0x3.Goo.Hoo", -} - -type BackendExecutionDataSuite struct { - suite.Suite - - state *protocolmock.State - snapshot *protocolmock.Snapshot - headers *storagemock.Headers - seals *storagemock.Seals - results *storagemock.ExecutionResults - - bs blobs.Blobstore - eds execution_data.ExecutionDataStore - broadcaster *engine.Broadcaster - execDataDistributor *requester.ExecutionDataDistributor - execDataCache *herocache.BlockExecutionData - backend *StateStreamBackend - - blocks []*flow.Block - blockEvents map[flow.Identifier]flow.EventsList - execDataMap map[flow.Identifier]*execution_data.BlockExecutionDataEntity - blockMap map[uint64]*flow.Block - sealMap map[flow.Identifier]*flow.Seal - resultMap map[flow.Identifier]*flow.ExecutionResult -} - -func TestBackendExecutionDataSuite(t *testing.T) { - suite.Run(t, new(BackendExecutionDataSuite)) -} - -func (s *BackendExecutionDataSuite) SetupTest() { - rand.Seed(time.Now().UnixNano()) - - logger := unittest.Logger() - - s.state = protocolmock.NewState(s.T()) - s.snapshot = protocolmock.NewSnapshot(s.T()) - s.headers = storagemock.NewHeaders(s.T()) - s.seals = storagemock.NewSeals(s.T()) - s.results = storagemock.NewExecutionResults(s.T()) - - s.bs = blobs.NewBlobstore(dssync.MutexWrap(datastore.NewMapDatastore())) - s.eds = execution_data.NewExecutionDataStore(s.bs, execution_data.DefaultSerializer) - - s.broadcaster = engine.NewBroadcaster() - s.execDataDistributor = requester.NewExecutionDataDistributor() - - s.execDataCache = herocache.NewBlockExecutionData(DefaultCacheSize, logger, metrics.NewNoopCollector()) - - conf := Config{ - ClientSendTimeout: DefaultSendTimeout, - ClientSendBufferSize: DefaultSendBufferSize, - } - - var err error - s.backend, err = New( - logger, - conf, - s.state, - s.headers, - s.seals, - s.results, - s.eds, - s.execDataCache, - s.broadcaster, - ) - require.NoError(s.T(), err) - - blockCount := 5 - s.execDataMap = make(map[flow.Identifier]*execution_data.BlockExecutionDataEntity, blockCount) - s.blockEvents = make(map[flow.Identifier]flow.EventsList, blockCount) - s.blockMap = make(map[uint64]*flow.Block, blockCount) - s.sealMap = make(map[flow.Identifier]*flow.Seal, blockCount) - s.resultMap = make(map[flow.Identifier]*flow.ExecutionResult, blockCount) - s.blocks = make([]*flow.Block, 0, blockCount) - - // generate blockCount consecutive blocks with associated seal, result and execution data - firstBlock := unittest.BlockFixture() - parent := firstBlock.Header - for i := 0; i < blockCount; i++ { - var block *flow.Block - if i == 0 { - block = &firstBlock - } else { - block = unittest.BlockWithParentFixture(parent) - } - // update for next iteration - parent = block.Header - - seal := unittest.BlockSealsFixture(1)[0] - result := unittest.ExecutionResultFixture() - blockEvents := unittest.BlockEventsFixture(block.Header, (i%len(testEventTypes))*3+1, testEventTypes...) - - numChunks := 5 - chunkDatas := make([]*execution_data.ChunkExecutionData, 0, numChunks) - for i := 0; i < numChunks; i++ { - var events flow.EventsList - switch { - case i >= len(blockEvents.Events): - events = flow.EventsList{} - case i == numChunks-1: - events = blockEvents.Events[i:] - default: - events = flow.EventsList{blockEvents.Events[i]} - } - chunkDatas = append(chunkDatas, unittest.ChunkExecutionDataFixture(s.T(), 5*execution_data.DefaultMaxBlobSize, unittest.WithChunkEvents(events))) - } - execData := unittest.BlockExecutionDataFixture( - unittest.WithBlockExecutionDataBlockID(block.ID()), - unittest.WithChunkExecutionDatas(chunkDatas...), - ) - - result.ExecutionDataID, err = s.eds.AddExecutionData(context.TODO(), execData) - assert.NoError(s.T(), err) - - s.blocks = append(s.blocks, block) - s.execDataMap[block.ID()] = execution_data.NewBlockExecutionDataEntity(result.ExecutionDataID, execData) - s.blockEvents[block.ID()] = blockEvents.Events - s.blockMap[block.Header.Height] = block - s.sealMap[block.ID()] = seal - s.resultMap[seal.ResultID] = result - - s.T().Logf("adding exec data for block %d %d %v => %v", i, block.Header.Height, block.ID(), result.ExecutionDataID) - } - - s.state.On("Sealed").Return(s.snapshot, nil).Maybe() - s.snapshot.On("Head").Return(firstBlock.Header, nil).Maybe() - - s.seals.On("FinalizedSealForBlock", mock.AnythingOfType("flow.Identifier")).Return( - func(blockID flow.Identifier) *flow.Seal { - if seal, ok := s.sealMap[blockID]; ok { - return seal - } - return nil - }, - func(blockID flow.Identifier) error { - if _, ok := s.sealMap[blockID]; ok { - return nil - } - return storage.ErrNotFound - }, - ).Maybe() - - s.results.On("ByID", mock.AnythingOfType("flow.Identifier")).Return( - func(resultID flow.Identifier) *flow.ExecutionResult { - if result, ok := s.resultMap[resultID]; ok { - return result - } - return nil - }, - func(resultID flow.Identifier) error { - if _, ok := s.resultMap[resultID]; ok { - return nil - } - return storage.ErrNotFound - }, - ).Maybe() - - s.headers.On("ByBlockID", mock.AnythingOfType("flow.Identifier")).Return( - func(blockID flow.Identifier) *flow.Header { - for _, block := range s.blockMap { - if block.ID() == blockID { - return block.Header - } - } - return nil - }, - func(blockID flow.Identifier) error { - for _, block := range s.blockMap { - if block.ID() == blockID { - return nil - } - } - return storage.ErrNotFound - }, - ).Maybe() - - s.headers.On("ByHeight", mock.AnythingOfType("uint64")).Return( - func(height uint64) *flow.Header { - if block, ok := s.blockMap[height]; ok { - return block.Header - } - return nil - }, - func(height uint64) error { - if _, ok := s.blockMap[height]; ok { - return nil - } - return storage.ErrNotFound - }, - ).Maybe() -} - -func (s *BackendExecutionDataSuite) TestGetExecutionDataByBlockID() { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - block := s.blocks[0] - seal := s.sealMap[block.ID()] - result := s.resultMap[seal.ResultID] - execData := s.execDataMap[block.ID()] - - var err error - s.Run("happy path TestGetExecutionDataByBlockID success", func() { - result.ExecutionDataID, err = s.eds.AddExecutionData(ctx, execData.BlockExecutionData) - require.NoError(s.T(), err) - - res, err := s.backend.GetExecutionDataByBlockID(ctx, block.ID()) - assert.Equal(s.T(), execData.BlockExecutionData, res) - assert.NoError(s.T(), err) - }) - - s.execDataCache.Clear() - - s.Run("missing exec data for TestGetExecutionDataByBlockID failure", func() { - result.ExecutionDataID = unittest.IdentifierFixture() - - execDataRes, err := s.backend.GetExecutionDataByBlockID(ctx, block.ID()) - assert.Nil(s.T(), execDataRes) - assert.Equal(s.T(), codes.NotFound, status.Code(err)) - }) -} - -func (s *BackendExecutionDataSuite) TestSubscribeExecutionData() { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - tests := []struct { - name string - highestBackfill int - startBlockID flow.Identifier - startHeight uint64 - }{ - { - name: "happy path - all new blocks", - highestBackfill: -1, // no backfill - startBlockID: flow.ZeroID, - startHeight: 0, - }, - { - name: "happy path - partial backfill", - highestBackfill: 2, // backfill the first 3 blocks - startBlockID: flow.ZeroID, - startHeight: s.blocks[0].Header.Height, - }, - { - name: "happy path - complete backfill", - highestBackfill: len(s.blocks) - 1, // backfill all blocks - startBlockID: s.blocks[0].ID(), - startHeight: 0, - }, - } - - for _, test := range tests { - s.Run(test.name, func() { - // make sure we're starting with a fresh cache - s.execDataCache.Clear() - - s.T().Logf("len(s.execDataMap) %d", len(s.execDataMap)) - - // add "backfill" block - blocks that are already in the database before the test starts - // this simulates a subscription on a past block - for i := 0; i <= test.highestBackfill; i++ { - s.T().Logf("backfilling block %d", i) - execData := s.execDataMap[s.blocks[i].ID()] - s.execDataDistributor.OnExecutionDataReceived(execData) - } - - subCtx, subCancel := context.WithCancel(ctx) - sub := s.backend.SubscribeExecutionData(subCtx, test.startBlockID, test.startHeight) - - // loop over all of the blocks - for i, b := range s.blocks { - execData := s.execDataMap[b.ID()] - s.T().Logf("checking block %d %v", i, b.ID()) - - // simulate new exec data received. - // exec data for all blocks with index <= highestBackfill were already received - if i > test.highestBackfill { - s.execDataDistributor.OnExecutionDataReceived(execData) - s.broadcaster.Publish() - } - - // consume execution data from subscription - unittest.RequireReturnsBefore(s.T(), func() { - v, ok := <-sub.Channel() - require.True(s.T(), ok, "channel closed while waiting for exec data for block %d %v: err: %v", b.Header.Height, b.ID(), sub.Err()) - - resp, ok := v.(*ExecutionDataResponse) - require.True(s.T(), ok, "unexpected response type: %T", v) - - assert.Equal(s.T(), b.Header.Height, resp.Height) - assert.Equal(s.T(), execData.BlockExecutionData, resp.ExecutionData) - }, time.Second, fmt.Sprintf("timed out waiting for exec data for block %d %v", b.Header.Height, b.ID())) - } - - // make sure there are no new messages waiting. the channel should be opened with nothing waiting - unittest.RequireNeverReturnBefore(s.T(), func() { - <-sub.Channel() - }, 100*time.Millisecond, "timed out waiting for subscription to shutdown") - - // stop the subscription - subCancel() - - // ensure subscription shuts down gracefully - unittest.RequireReturnsBefore(s.T(), func() { - v, ok := <-sub.Channel() - assert.Nil(s.T(), v) - assert.False(s.T(), ok) - assert.ErrorIs(s.T(), sub.Err(), context.Canceled) - }, 100*time.Millisecond, "timed out waiting for subscription to shutdown") - }) - } -} - -func (s *BackendExecutionDataSuite) TestSubscribeExecutionDataHandlesErrors() { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - s.Run("returns error if both start blockID and start height are provided", func() { - subCtx, subCancel := context.WithCancel(ctx) - defer subCancel() - - sub := s.backend.SubscribeExecutionData(subCtx, unittest.IdentifierFixture(), 1) - assert.Equal(s.T(), codes.InvalidArgument, status.Code(sub.Err())) - }) - - s.Run("returns error for unindexed start blockID", func() { - subCtx, subCancel := context.WithCancel(ctx) - defer subCancel() - - sub := s.backend.SubscribeExecutionData(subCtx, unittest.IdentifierFixture(), 0) - assert.Equal(s.T(), codes.NotFound, status.Code(sub.Err())) - }) - - // make sure we're starting with a fresh cache - s.execDataCache.Clear() - - s.Run("returns error for unindexed start height", func() { - subCtx, subCancel := context.WithCancel(ctx) - defer subCancel() - - sub := s.backend.SubscribeExecutionData(subCtx, flow.ZeroID, s.blocks[len(s.blocks)-1].Header.Height+10) - assert.Equal(s.T(), codes.NotFound, status.Code(sub.Err())) - }) -} diff --git a/engine/access/state_stream/engine.go b/engine/access/state_stream/engine.go deleted file mode 100644 index ee61ed56ec7..00000000000 --- a/engine/access/state_stream/engine.go +++ /dev/null @@ -1,179 +0,0 @@ -package state_stream - -import ( - "fmt" - "net" - "time" - - grpc_prometheus "github.com/grpc-ecosystem/go-grpc-prometheus" - access "github.com/onflow/flow/protobuf/go/flow/executiondata" - "github.com/rs/zerolog" - "google.golang.org/grpc" - - "github.com/onflow/flow-go/engine" - "github.com/onflow/flow-go/engine/common/rpc" - "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/module" - "github.com/onflow/flow-go/module/component" - "github.com/onflow/flow-go/module/executiondatasync/execution_data" - "github.com/onflow/flow-go/module/irrecoverable" - "github.com/onflow/flow-go/module/mempool/herocache" - "github.com/onflow/flow-go/state/protocol" - "github.com/onflow/flow-go/storage" - "github.com/onflow/flow-go/utils/logging" -) - -// Config defines the configurable options for the ingress server. -type Config struct { - EventFilterConfig - - // ListenAddr is the address the GRPC server will listen on as host:port - ListenAddr string - - // MaxExecutionDataMsgSize is the max message size for block execution data API - MaxExecutionDataMsgSize uint - - // RpcMetricsEnabled specifies whether to enable the GRPC metrics - RpcMetricsEnabled bool - - // MaxGlobalStreams defines the global max number of streams that can be open at the same time. - MaxGlobalStreams uint32 - - // ExecutionDataCacheSize is the max number of objects for the execution data cache. - ExecutionDataCacheSize uint32 - - // ClientSendTimeout is the timeout for sending a message to the client. After the timeout, - // the stream is closed with an error. - ClientSendTimeout time.Duration - - // ClientSendBufferSize is the size of the response buffer for sending messages to the client. - ClientSendBufferSize uint -} - -// Engine exposes the server with the state stream API. -// By default, this engine is not enabled. -// In order to run this engine a port for the GRPC server to be served on should be specified in the run config. -type Engine struct { - *component.ComponentManager - log zerolog.Logger - backend *StateStreamBackend - server *grpc.Server - config Config - chain flow.Chain - handler *Handler - - execDataBroadcaster *engine.Broadcaster - execDataCache *herocache.BlockExecutionData - - stateStreamGrpcAddress net.Addr -} - -// NewEng returns a new ingress server. -func NewEng( - log zerolog.Logger, - config Config, - execDataStore execution_data.ExecutionDataStore, - state protocol.State, - headers storage.Headers, - seals storage.Seals, - results storage.ExecutionResults, - chainID flow.ChainID, - apiRatelimits map[string]int, // the api rate limit (max calls per second) for each of the gRPC API e.g. Ping->100, GetExecutionDataByBlockID->300 - apiBurstLimits map[string]int, // the api burst limit (max calls at the same time) for each of the gRPC API e.g. Ping->50, GetExecutionDataByBlockID->10 - heroCacheMetrics module.HeroCacheMetrics, -) (*Engine, error) { - logger := log.With().Str("engine", "state_stream_rpc").Logger() - - // create a GRPC server to serve GRPC clients - grpcOpts := []grpc.ServerOption{ - grpc.MaxRecvMsgSize(int(config.MaxExecutionDataMsgSize)), - grpc.MaxSendMsgSize(int(config.MaxExecutionDataMsgSize)), - } - - var interceptors []grpc.UnaryServerInterceptor // ordered list of interceptors - // if rpc metrics is enabled, add the grpc metrics interceptor as a server option - if config.RpcMetricsEnabled { - interceptors = append(interceptors, grpc_prometheus.UnaryServerInterceptor) - } - - if len(apiRatelimits) > 0 { - // create a rate limit interceptor - rateLimitInterceptor := rpc.NewRateLimiterInterceptor(log, apiRatelimits, apiBurstLimits).UnaryServerInterceptor - // append the rate limit interceptor to the list of interceptors - interceptors = append(interceptors, rateLimitInterceptor) - } - - // add the logging interceptor, ensure it is innermost wrapper - interceptors = append(interceptors, rpc.LoggingInterceptor(log)...) - - // create a chained unary interceptor - chainedInterceptors := grpc.ChainUnaryInterceptor(interceptors...) - grpcOpts = append(grpcOpts, chainedInterceptors) - - server := grpc.NewServer(grpcOpts...) - - execDataCache := herocache.NewBlockExecutionData(config.ExecutionDataCacheSize, logger, heroCacheMetrics) - - broadcaster := engine.NewBroadcaster() - - backend, err := New(logger, config, state, headers, seals, results, execDataStore, execDataCache, broadcaster) - if err != nil { - return nil, fmt.Errorf("could not create state stream backend: %w", err) - } - - e := &Engine{ - log: logger, - backend: backend, - server: server, - chain: chainID.Chain(), - config: config, - handler: NewHandler(backend, chainID.Chain(), config.EventFilterConfig, config.MaxGlobalStreams), - execDataBroadcaster: broadcaster, - execDataCache: execDataCache, - } - - e.ComponentManager = component.NewComponentManagerBuilder(). - AddWorker(e.serve). - Build() - - access.RegisterExecutionDataAPIServer(e.server, e.handler) - - return e, nil -} - -// OnExecutionData is called to notify the engine when a new execution data is received. -func (e *Engine) OnExecutionData(executionData *execution_data.BlockExecutionDataEntity) { - lg := e.log.With().Hex("block_id", logging.ID(executionData.BlockID)).Logger() - - lg.Trace().Msg("received execution data") - - if ok := e.execDataCache.Add(executionData); !ok { - lg.Warn().Msg("failed to add execution data to cache") - } - - e.execDataBroadcaster.Publish() -} - -// serve starts the gRPC server. -// When this function returns, the server is considered ready. -func (e *Engine) serve(ctx irrecoverable.SignalerContext, ready component.ReadyFunc) { - e.log.Info().Str("state_stream_address", e.config.ListenAddr).Msg("starting grpc server on address") - l, err := net.Listen("tcp", e.config.ListenAddr) - if err != nil { - ctx.Throw(fmt.Errorf("error starting grpc server: %w", err)) - } - - e.stateStreamGrpcAddress = l.Addr() - e.log.Debug().Str("state_stream_address", e.stateStreamGrpcAddress.String()).Msg("listening on port") - - go func() { - ready() - err = e.server.Serve(l) - if err != nil { - ctx.Throw(fmt.Errorf("error trying to serve grpc server: %w", err)) - } - }() - - <-ctx.Done() - e.server.GracefulStop() -} diff --git a/engine/access/state_stream/event.go b/engine/access/state_stream/event.go deleted file mode 100644 index c88c78c9a66..00000000000 --- a/engine/access/state_stream/event.go +++ /dev/null @@ -1,59 +0,0 @@ -package state_stream - -import ( - "fmt" - "strings" - - "github.com/onflow/flow-go/model/flow" -) - -type ParsedEventType int - -const ( - ProtocolEventType ParsedEventType = iota + 1 - AccountEventType -) - -type ParsedEvent struct { - Type ParsedEventType - EventType flow.EventType - Address string - Contract string - ContractName string - Name string -} - -// ParseEvent parses an event type into its parts. There are 2 valid EventType formats: -// - flow.[EventName] -// - A.[Address].[Contract].[EventName] -// Any other format results in an error. -func ParseEvent(eventType flow.EventType) (*ParsedEvent, error) { - parts := strings.Split(string(eventType), ".") - - switch parts[0] { - case "flow": - if len(parts) == 2 { - return &ParsedEvent{ - Type: ProtocolEventType, - EventType: eventType, - Contract: parts[0], - ContractName: parts[0], - Name: parts[1], - }, nil - } - - case "A": - if len(parts) == 4 { - return &ParsedEvent{ - Type: AccountEventType, - EventType: eventType, - Address: parts[1], - Contract: fmt.Sprintf("A.%s.%s", parts[1], parts[2]), - ContractName: parts[2], - Name: parts[3], - }, nil - } - } - - return nil, fmt.Errorf("invalid event type: %s", eventType) -} diff --git a/engine/access/state_stream/event_test.go b/engine/access/state_stream/event_test.go deleted file mode 100644 index 3dbccd34406..00000000000 --- a/engine/access/state_stream/event_test.go +++ /dev/null @@ -1,79 +0,0 @@ -package state_stream_test - -import ( - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/onflow/flow-go/engine/access/state_stream" - "github.com/onflow/flow-go/model/flow" -) - -func TestParseEvent(t *testing.T) { - t.Parallel() - - tests := []struct { - name string - eventType flow.EventType - expected state_stream.ParsedEvent - }{ - { - name: "flow event", - eventType: "flow.AccountCreated", - expected: state_stream.ParsedEvent{ - Type: state_stream.ProtocolEventType, - EventType: "flow.AccountCreated", - Contract: "flow", - ContractName: "flow", - Name: "AccountCreated", - }, - }, - { - name: "account event", - eventType: "A.0000000000000001.Contract1.EventA", - expected: state_stream.ParsedEvent{ - Type: state_stream.AccountEventType, - EventType: "A.0000000000000001.Contract1.EventA", - Address: "0000000000000001", - Contract: "A.0000000000000001.Contract1", - ContractName: "Contract1", - Name: "EventA", - }, - }, - } - - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - event, err := state_stream.ParseEvent(test.eventType) - require.NoError(t, err) - - assert.Equal(t, test.expected.Type, event.Type) - assert.Equal(t, test.expected.EventType, event.EventType) - assert.Equal(t, test.expected.Address, event.Address) - assert.Equal(t, test.expected.Contract, event.Contract) - assert.Equal(t, test.expected.Name, event.Name) - }) - } -} - -func TestParseEvent_Invalid(t *testing.T) { - t.Parallel() - - eventTypes := []flow.EventType{ - "", // not enough parts - "invalid", // not enough parts - "invalid.event", // invalid first part - "B.0000000000000001.invalid.event", // invalid first part - "flow", // incorrect number of parts for protocol event - "flow.invalid.event", // incorrect number of parts for protocol event - "A.0000000000000001.invalid", // incorrect number of parts for account event - "A.0000000000000001.invalid.a.b", // incorrect number of parts for account event - - } - - for _, eventType := range eventTypes { - _, err := state_stream.ParseEvent(eventType) - assert.Error(t, err, "expected error for event type: %s", eventType) - } -} diff --git a/engine/access/state_stream/filter.go b/engine/access/state_stream/filter.go index ab90b98240c..f26a401f9fc 100644 --- a/engine/access/state_stream/filter.go +++ b/engine/access/state_stream/filter.go @@ -4,6 +4,10 @@ import ( "fmt" "strings" + "github.com/onflow/cadence" + "github.com/onflow/cadence/encoding/ccf" + + "github.com/onflow/flow-go/model/events" "github.com/onflow/flow-go/model/flow" ) @@ -16,28 +20,36 @@ const ( // DefaultMaxContracts is the default maximum number of contracts that can be specified in a filter DefaultMaxContracts = 1000 + + // DefaultMaxAccountAddresses specifies limitation for possible number of accounts that could be used in filter + DefaultMaxAccountAddresses = 100 ) // EventFilterConfig is used to configure the limits for EventFilters type EventFilterConfig struct { - MaxEventTypes int - MaxAddresses int - MaxContracts int + MaxEventTypes int + MaxAddresses int + MaxContracts int + MaxAccountAddress int } // DefaultEventFilterConfig is the default configuration for EventFilters var DefaultEventFilterConfig = EventFilterConfig{ - MaxEventTypes: DefaultMaxEventTypes, - MaxAddresses: DefaultMaxAddresses, - MaxContracts: DefaultMaxContracts, + MaxEventTypes: DefaultMaxEventTypes, + MaxAddresses: DefaultMaxAddresses, + MaxContracts: DefaultMaxContracts, + MaxAccountAddress: DefaultMaxAccountAddresses, } +type FieldFilter map[string]map[string]struct{} + // EventFilter represents a filter applied to events for a given subscription type EventFilter struct { - hasFilters bool - EventTypes map[flow.EventType]struct{} - Addresses map[string]struct{} - Contracts map[string]struct{} + hasFilters bool + EventTypes map[flow.EventType]struct{} + Addresses map[string]struct{} + Contracts map[string]struct{} + EventFieldFilters map[flow.EventType]FieldFilter } func NewEventFilter( @@ -62,16 +74,17 @@ func NewEventFilter( } f := EventFilter{ - EventTypes: make(map[flow.EventType]struct{}, len(eventTypes)), - Addresses: make(map[string]struct{}, len(addresses)), - Contracts: make(map[string]struct{}, len(contracts)), + EventTypes: make(map[flow.EventType]struct{}, len(eventTypes)), + Addresses: make(map[string]struct{}, len(addresses)), + Contracts: make(map[string]struct{}, len(contracts)), + EventFieldFilters: make(map[flow.EventType]FieldFilter), } // Check all of the filters to ensure they are correctly formatted. This helps avoid searching // with criteria that will never match. for _, event := range eventTypes { eventType := flow.EventType(event) - if err := validateEventType(eventType); err != nil { + if err := validateEventType(eventType, chain); err != nil { return EventFilter{}, err } f.EventTypes[eventType] = struct{}{} @@ -97,8 +110,7 @@ func NewEventFilter( return f, nil } -// Filter applies the all filters on the provided list of events, and returns a list of events that -// match +// Filter applies the all filters on the provided list of events, and returns a list of events that match func (f *EventFilter) Filter(events flow.EventsList) flow.EventsList { var filteredEvents flow.EventsList for _, event := range events { @@ -116,11 +128,15 @@ func (f *EventFilter) Match(event flow.Event) bool { return true } + if fieldFilter, ok := f.EventFieldFilters[event.Type]; ok { + return f.matchFieldFilter(&event, fieldFilter) + } + if _, ok := f.EventTypes[event.Type]; ok { return true } - parsed, err := ParseEvent(event.Type) + parsed, err := events.ParseEvent(event.Type) if err != nil { // TODO: log this error return false @@ -130,7 +146,7 @@ func (f *EventFilter) Match(event flow.Event) bool { return true } - if parsed.Type == AccountEventType { + if parsed.Type == events.AccountEventType { _, ok := f.Addresses[parsed.Address] return ok } @@ -138,9 +154,61 @@ func (f *EventFilter) Match(event flow.Event) bool { return false } +// matchFieldFilter checks if the given event matches the specified field filters. +// It returns true if the event matches any of the provided field filters, otherwise false. +func (f *EventFilter) matchFieldFilter(event *flow.Event, fieldFilters FieldFilter) bool { + if len(fieldFilters) == 0 { + return true // empty list always matches + } + + fields, err := getEventFields(event) + if err != nil { + return false + } + + for name, value := range fields { + filters, ok := fieldFilters[name] + if !ok { + continue // no filter for this field + } + + fieldValue := value.String() + if _, ok := filters[fieldValue]; ok { + return true + } + } + + return false +} + +// getEventFields extracts field values and field names from the payload of a flow event. +// It decodes the event payload into a Cadence event, retrieves the field values and fields, and returns them. +// Parameters: +// - event: The Flow event to extract field values and field names from. +// Returns: +// - map[string]cadence.Value: A map containing name and value for each field extracted from the event payload. +// - error: An error, if any, encountered during event decoding or if the fields are empty. +func getEventFields(event *flow.Event) (map[string]cadence.Value, error) { + data, err := ccf.Decode(nil, event.Payload) + if err != nil { + return nil, err + } + + cdcEvent, ok := data.(cadence.Event) + if !ok { + return nil, err + } + + fields := cadence.FieldsMappedByName(cdcEvent) + if fields == nil { + return nil, fmt.Errorf("fields are empty") + } + return fields, nil +} + // validateEventType ensures that the event type matches the expected format -func validateEventType(eventType flow.EventType) error { - _, err := ParseEvent(flow.EventType(eventType)) +func validateEventType(eventType flow.EventType, chain flow.Chain) error { + _, err := events.ValidateEvent(eventType, chain) if err != nil { return fmt.Errorf("invalid event type %s: %w", eventType, err) } diff --git a/engine/access/state_stream/filter_test.go b/engine/access/state_stream/filter_test.go index d25c272a06f..6071d221ef4 100644 --- a/engine/access/state_stream/filter_test.go +++ b/engine/access/state_stream/filter_test.go @@ -87,9 +87,15 @@ func TestFilter(t *testing.T) { assert.NoError(t, err) events := flow.EventsList{ - unittest.EventFixture("A.0000000000000001.Contract1.EventA", 0, 0, unittest.IdentifierFixture(), 0), - unittest.EventFixture("A.0000000000000001.Contract2.EventA", 0, 0, unittest.IdentifierFixture(), 0), - unittest.EventFixture("flow.AccountCreated", 0, 0, unittest.IdentifierFixture(), 0), + unittest.EventFixture( + unittest.Event.WithEventType("A.0000000000000001.Contract1.EventA"), + ), + unittest.EventFixture( + unittest.Event.WithEventType("A.0000000000000001.Contract2.EventA"), + ), + unittest.EventFixture( + unittest.Event.WithEventType("flow.AccountCreated"), + ), } matched := filter.Filter(events) diff --git a/engine/access/state_stream/handler.go b/engine/access/state_stream/handler.go deleted file mode 100644 index df7c4dd9f6b..00000000000 --- a/engine/access/state_stream/handler.go +++ /dev/null @@ -1,163 +0,0 @@ -package state_stream - -import ( - "context" - "sync/atomic" - - access "github.com/onflow/flow/protobuf/go/flow/executiondata" - executiondata "github.com/onflow/flow/protobuf/go/flow/executiondata" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" - - "github.com/onflow/flow-go/engine/common/rpc" - "github.com/onflow/flow-go/engine/common/rpc/convert" - "github.com/onflow/flow-go/model/flow" -) - -type Handler struct { - api API - chain flow.Chain - - eventFilterConfig EventFilterConfig - - maxStreams int32 - streamCount atomic.Int32 -} - -func NewHandler(api API, chain flow.Chain, conf EventFilterConfig, maxGlobalStreams uint32) *Handler { - h := &Handler{ - api: api, - chain: chain, - eventFilterConfig: conf, - maxStreams: int32(maxGlobalStreams), - streamCount: atomic.Int32{}, - } - return h -} - -func (h *Handler) GetExecutionDataByBlockID(ctx context.Context, request *access.GetExecutionDataByBlockIDRequest) (*access.GetExecutionDataByBlockIDResponse, error) { - blockID, err := convert.BlockID(request.GetBlockId()) - if err != nil { - return nil, status.Errorf(codes.InvalidArgument, "could not convert block ID: %v", err) - } - - execData, err := h.api.GetExecutionDataByBlockID(ctx, blockID) - if err != nil { - return nil, rpc.ConvertError(err, "could no get execution data", codes.Internal) - } - - message, err := convert.BlockExecutionDataToMessage(execData) - if err != nil { - return nil, status.Errorf(codes.Internal, "could not convert execution data to entity: %v", err) - } - - return &access.GetExecutionDataByBlockIDResponse{BlockExecutionData: message}, nil -} - -func (h *Handler) SubscribeExecutionData(request *access.SubscribeExecutionDataRequest, stream access.ExecutionDataAPI_SubscribeExecutionDataServer) error { - // check if the maximum number of streams is reached - if h.streamCount.Load() >= h.maxStreams { - return status.Errorf(codes.ResourceExhausted, "maximum number of streams reached") - } - h.streamCount.Add(1) - defer h.streamCount.Add(-1) - - startBlockID := flow.ZeroID - if request.GetStartBlockId() != nil { - blockID, err := convert.BlockID(request.GetStartBlockId()) - if err != nil { - return status.Errorf(codes.InvalidArgument, "could not convert start block ID: %v", err) - } - startBlockID = blockID - } - - sub := h.api.SubscribeExecutionData(stream.Context(), startBlockID, request.GetStartBlockHeight()) - - for { - v, ok := <-sub.Channel() - if !ok { - if sub.Err() != nil { - return rpc.ConvertError(sub.Err(), "stream encountered an error", codes.Internal) - } - return nil - } - - resp, ok := v.(*ExecutionDataResponse) - if !ok { - return status.Errorf(codes.Internal, "unexpected response type: %T", v) - } - - execData, err := convert.BlockExecutionDataToMessage(resp.ExecutionData) - if err != nil { - return status.Errorf(codes.Internal, "could not convert execution data to entity: %v", err) - } - - err = stream.Send(&executiondata.SubscribeExecutionDataResponse{ - BlockHeight: resp.Height, - BlockExecutionData: execData, - }) - if err != nil { - return rpc.ConvertError(err, "could not send response", codes.Internal) - } - } -} - -func (h *Handler) SubscribeEvents(request *access.SubscribeEventsRequest, stream access.ExecutionDataAPI_SubscribeEventsServer) error { - // check if the maximum number of streams is reached - if h.streamCount.Load() >= h.maxStreams { - return status.Errorf(codes.ResourceExhausted, "maximum number of streams reached") - } - h.streamCount.Add(1) - defer h.streamCount.Add(-1) - - startBlockID := flow.ZeroID - if request.GetStartBlockId() != nil { - blockID, err := convert.BlockID(request.GetStartBlockId()) - if err != nil { - return status.Errorf(codes.InvalidArgument, "could not convert start block ID: %v", err) - } - startBlockID = blockID - } - - filter := EventFilter{} - if request.GetFilter() != nil { - var err error - reqFilter := request.GetFilter() - filter, err = NewEventFilter( - h.eventFilterConfig, - h.chain, - reqFilter.GetEventType(), - reqFilter.GetAddress(), - reqFilter.GetContract(), - ) - if err != nil { - return status.Errorf(codes.InvalidArgument, "invalid event filter: %v", err) - } - } - - sub := h.api.SubscribeEvents(stream.Context(), startBlockID, request.GetStartBlockHeight(), filter) - - for { - v, ok := <-sub.Channel() - if !ok { - if sub.Err() != nil { - return rpc.ConvertError(sub.Err(), "stream encountered an error", codes.Internal) - } - return nil - } - - resp, ok := v.(*EventsResponse) - if !ok { - return status.Errorf(codes.Internal, "unexpected response type: %T", v) - } - - err := stream.Send(&executiondata.SubscribeEventsResponse{ - BlockHeight: resp.Height, - BlockId: convert.IdentifierToMessage(resp.BlockID), - Events: convert.EventsToMessages(resp.Events), - }) - if err != nil { - return rpc.ConvertError(err, "could not send response", codes.Internal) - } - } -} diff --git a/engine/access/state_stream/mock/api.go b/engine/access/state_stream/mock/api.go index 5b57efc917f..ba8bd9e7544 100644 --- a/engine/access/state_stream/mock/api.go +++ b/engine/access/state_stream/mock/api.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mock @@ -11,6 +11,8 @@ import ( mock "github.com/stretchr/testify/mock" state_stream "github.com/onflow/flow-go/engine/access/state_stream" + + subscription "github.com/onflow/flow-go/engine/access/subscription" ) // API is an autogenerated mock type for the API type @@ -22,6 +24,10 @@ type API struct { func (_m *API) GetExecutionDataByBlockID(ctx context.Context, blockID flow.Identifier) (*execution_data.BlockExecutionData, error) { ret := _m.Called(ctx, blockID) + if len(ret) == 0 { + panic("no return value specified for GetExecutionDataByBlockID") + } + var r0 *execution_data.BlockExecutionData var r1 error if rf, ok := ret.Get(0).(func(context.Context, flow.Identifier) (*execution_data.BlockExecutionData, error)); ok { @@ -44,16 +50,170 @@ func (_m *API) GetExecutionDataByBlockID(ctx context.Context, blockID flow.Ident return r0, r1 } +// GetRegisterValues provides a mock function with given fields: registerIDs, height +func (_m *API) GetRegisterValues(registerIDs flow.RegisterIDs, height uint64) ([]flow.RegisterValue, error) { + ret := _m.Called(registerIDs, height) + + if len(ret) == 0 { + panic("no return value specified for GetRegisterValues") + } + + var r0 []flow.RegisterValue + var r1 error + if rf, ok := ret.Get(0).(func(flow.RegisterIDs, uint64) ([]flow.RegisterValue, error)); ok { + return rf(registerIDs, height) + } + if rf, ok := ret.Get(0).(func(flow.RegisterIDs, uint64) []flow.RegisterValue); ok { + r0 = rf(registerIDs, height) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]flow.RegisterValue) + } + } + + if rf, ok := ret.Get(1).(func(flow.RegisterIDs, uint64) error); ok { + r1 = rf(registerIDs, height) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// SubscribeAccountStatusesFromLatestBlock provides a mock function with given fields: ctx, filter +func (_m *API) SubscribeAccountStatusesFromLatestBlock(ctx context.Context, filter state_stream.AccountStatusFilter) subscription.Subscription { + ret := _m.Called(ctx, filter) + + if len(ret) == 0 { + panic("no return value specified for SubscribeAccountStatusesFromLatestBlock") + } + + var r0 subscription.Subscription + if rf, ok := ret.Get(0).(func(context.Context, state_stream.AccountStatusFilter) subscription.Subscription); ok { + r0 = rf(ctx, filter) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(subscription.Subscription) + } + } + + return r0 +} + +// SubscribeAccountStatusesFromStartBlockID provides a mock function with given fields: ctx, startBlockID, filter +func (_m *API) SubscribeAccountStatusesFromStartBlockID(ctx context.Context, startBlockID flow.Identifier, filter state_stream.AccountStatusFilter) subscription.Subscription { + ret := _m.Called(ctx, startBlockID, filter) + + if len(ret) == 0 { + panic("no return value specified for SubscribeAccountStatusesFromStartBlockID") + } + + var r0 subscription.Subscription + if rf, ok := ret.Get(0).(func(context.Context, flow.Identifier, state_stream.AccountStatusFilter) subscription.Subscription); ok { + r0 = rf(ctx, startBlockID, filter) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(subscription.Subscription) + } + } + + return r0 +} + +// SubscribeAccountStatusesFromStartHeight provides a mock function with given fields: ctx, startHeight, filter +func (_m *API) SubscribeAccountStatusesFromStartHeight(ctx context.Context, startHeight uint64, filter state_stream.AccountStatusFilter) subscription.Subscription { + ret := _m.Called(ctx, startHeight, filter) + + if len(ret) == 0 { + panic("no return value specified for SubscribeAccountStatusesFromStartHeight") + } + + var r0 subscription.Subscription + if rf, ok := ret.Get(0).(func(context.Context, uint64, state_stream.AccountStatusFilter) subscription.Subscription); ok { + r0 = rf(ctx, startHeight, filter) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(subscription.Subscription) + } + } + + return r0 +} + // SubscribeEvents provides a mock function with given fields: ctx, startBlockID, startHeight, filter -func (_m *API) SubscribeEvents(ctx context.Context, startBlockID flow.Identifier, startHeight uint64, filter state_stream.EventFilter) state_stream.Subscription { +func (_m *API) SubscribeEvents(ctx context.Context, startBlockID flow.Identifier, startHeight uint64, filter state_stream.EventFilter) subscription.Subscription { ret := _m.Called(ctx, startBlockID, startHeight, filter) - var r0 state_stream.Subscription - if rf, ok := ret.Get(0).(func(context.Context, flow.Identifier, uint64, state_stream.EventFilter) state_stream.Subscription); ok { + if len(ret) == 0 { + panic("no return value specified for SubscribeEvents") + } + + var r0 subscription.Subscription + if rf, ok := ret.Get(0).(func(context.Context, flow.Identifier, uint64, state_stream.EventFilter) subscription.Subscription); ok { r0 = rf(ctx, startBlockID, startHeight, filter) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(state_stream.Subscription) + r0 = ret.Get(0).(subscription.Subscription) + } + } + + return r0 +} + +// SubscribeEventsFromLatest provides a mock function with given fields: ctx, filter +func (_m *API) SubscribeEventsFromLatest(ctx context.Context, filter state_stream.EventFilter) subscription.Subscription { + ret := _m.Called(ctx, filter) + + if len(ret) == 0 { + panic("no return value specified for SubscribeEventsFromLatest") + } + + var r0 subscription.Subscription + if rf, ok := ret.Get(0).(func(context.Context, state_stream.EventFilter) subscription.Subscription); ok { + r0 = rf(ctx, filter) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(subscription.Subscription) + } + } + + return r0 +} + +// SubscribeEventsFromStartBlockID provides a mock function with given fields: ctx, startBlockID, filter +func (_m *API) SubscribeEventsFromStartBlockID(ctx context.Context, startBlockID flow.Identifier, filter state_stream.EventFilter) subscription.Subscription { + ret := _m.Called(ctx, startBlockID, filter) + + if len(ret) == 0 { + panic("no return value specified for SubscribeEventsFromStartBlockID") + } + + var r0 subscription.Subscription + if rf, ok := ret.Get(0).(func(context.Context, flow.Identifier, state_stream.EventFilter) subscription.Subscription); ok { + r0 = rf(ctx, startBlockID, filter) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(subscription.Subscription) + } + } + + return r0 +} + +// SubscribeEventsFromStartHeight provides a mock function with given fields: ctx, startHeight, filter +func (_m *API) SubscribeEventsFromStartHeight(ctx context.Context, startHeight uint64, filter state_stream.EventFilter) subscription.Subscription { + ret := _m.Called(ctx, startHeight, filter) + + if len(ret) == 0 { + panic("no return value specified for SubscribeEventsFromStartHeight") + } + + var r0 subscription.Subscription + if rf, ok := ret.Get(0).(func(context.Context, uint64, state_stream.EventFilter) subscription.Subscription); ok { + r0 = rf(ctx, startHeight, filter) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(subscription.Subscription) } } @@ -61,28 +221,91 @@ func (_m *API) SubscribeEvents(ctx context.Context, startBlockID flow.Identifier } // SubscribeExecutionData provides a mock function with given fields: ctx, startBlockID, startBlockHeight -func (_m *API) SubscribeExecutionData(ctx context.Context, startBlockID flow.Identifier, startBlockHeight uint64) state_stream.Subscription { +func (_m *API) SubscribeExecutionData(ctx context.Context, startBlockID flow.Identifier, startBlockHeight uint64) subscription.Subscription { ret := _m.Called(ctx, startBlockID, startBlockHeight) - var r0 state_stream.Subscription - if rf, ok := ret.Get(0).(func(context.Context, flow.Identifier, uint64) state_stream.Subscription); ok { + if len(ret) == 0 { + panic("no return value specified for SubscribeExecutionData") + } + + var r0 subscription.Subscription + if rf, ok := ret.Get(0).(func(context.Context, flow.Identifier, uint64) subscription.Subscription); ok { r0 = rf(ctx, startBlockID, startBlockHeight) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(state_stream.Subscription) + r0 = ret.Get(0).(subscription.Subscription) } } return r0 } -type mockConstructorTestingTNewAPI interface { - mock.TestingT - Cleanup(func()) +// SubscribeExecutionDataFromLatest provides a mock function with given fields: ctx +func (_m *API) SubscribeExecutionDataFromLatest(ctx context.Context) subscription.Subscription { + ret := _m.Called(ctx) + + if len(ret) == 0 { + panic("no return value specified for SubscribeExecutionDataFromLatest") + } + + var r0 subscription.Subscription + if rf, ok := ret.Get(0).(func(context.Context) subscription.Subscription); ok { + r0 = rf(ctx) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(subscription.Subscription) + } + } + + return r0 +} + +// SubscribeExecutionDataFromStartBlockHeight provides a mock function with given fields: ctx, startBlockHeight +func (_m *API) SubscribeExecutionDataFromStartBlockHeight(ctx context.Context, startBlockHeight uint64) subscription.Subscription { + ret := _m.Called(ctx, startBlockHeight) + + if len(ret) == 0 { + panic("no return value specified for SubscribeExecutionDataFromStartBlockHeight") + } + + var r0 subscription.Subscription + if rf, ok := ret.Get(0).(func(context.Context, uint64) subscription.Subscription); ok { + r0 = rf(ctx, startBlockHeight) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(subscription.Subscription) + } + } + + return r0 +} + +// SubscribeExecutionDataFromStartBlockID provides a mock function with given fields: ctx, startBlockID +func (_m *API) SubscribeExecutionDataFromStartBlockID(ctx context.Context, startBlockID flow.Identifier) subscription.Subscription { + ret := _m.Called(ctx, startBlockID) + + if len(ret) == 0 { + panic("no return value specified for SubscribeExecutionDataFromStartBlockID") + } + + var r0 subscription.Subscription + if rf, ok := ret.Get(0).(func(context.Context, flow.Identifier) subscription.Subscription); ok { + r0 = rf(ctx, startBlockID) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(subscription.Subscription) + } + } + + return r0 } // NewAPI creates a new instance of API. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewAPI(t mockConstructorTestingTNewAPI) *API { +// The first argument is typically a *testing.T value. +func NewAPI(t interface { + mock.TestingT + Cleanup(func()) +}) *API { mock := &API{} mock.Mock.Test(t) diff --git a/engine/access/state_stream/state_stream.go b/engine/access/state_stream/state_stream.go new file mode 100644 index 00000000000..862c0c0e3b1 --- /dev/null +++ b/engine/access/state_stream/state_stream.go @@ -0,0 +1,112 @@ +package state_stream + +import ( + "context" + + "github.com/onflow/flow-go/engine/access/subscription" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module/executiondatasync/execution_data" +) + +const ( + // DefaultRegisterIDsRequestLimit defines the default limit of register IDs for a single request to the get register endpoint + DefaultRegisterIDsRequestLimit = 100 +) + +// API represents an interface that defines methods for interacting with a blockchain's execution data and events. +type API interface { + // GetExecutionDataByBlockID retrieves execution data for a specific block by its block ID. + GetExecutionDataByBlockID(ctx context.Context, blockID flow.Identifier) (*execution_data.BlockExecutionData, error) + // SubscribeExecutionData is deprecated and will be removed in future versions. + // Use SubscribeExecutionDataFromStartBlockID, SubscribeExecutionDataFromStartBlockHeight or SubscribeExecutionDataFromLatest. + // + // SubscribeExecutionData subscribes to execution data starting from a specific block ID and block height. + SubscribeExecutionData(ctx context.Context, startBlockID flow.Identifier, startBlockHeight uint64) subscription.Subscription + // SubscribeExecutionDataFromStartBlockID subscribes to execution data starting from a specific block id. + SubscribeExecutionDataFromStartBlockID(ctx context.Context, startBlockID flow.Identifier) subscription.Subscription + // SubscribeExecutionDataFromStartBlockHeight subscribes to execution data starting from a specific block height. + SubscribeExecutionDataFromStartBlockHeight(ctx context.Context, startBlockHeight uint64) subscription.Subscription + // SubscribeExecutionDataFromLatest subscribes to execution data starting from latest block. + SubscribeExecutionDataFromLatest(ctx context.Context) subscription.Subscription + // SubscribeEvents is deprecated and will be removed in a future version. + // Use SubscribeEventsFromStartBlockID, SubscribeEventsFromStartHeight or SubscribeEventsFromLatest. + // + // SubscribeEvents streams events for all blocks starting at the specified block ID or block height + // up until the latest available block. Once the latest is + // reached, the stream will remain open and responses are sent for each new + // block as it becomes available. + // + // Only one of startBlockID and startHeight may be set. If neither startBlockID nor startHeight is provided, + // the latest sealed block is used. + // + // Events within each block are filtered by the provided EventFilter, and only + // those events that match the filter are returned. If no filter is provided, + // all events are returned. + // + // Parameters: + // - ctx: Context for the operation. + // - startBlockID: The identifier of the starting block. If provided, startHeight should be 0. + // - startHeight: The height of the starting block. If provided, startBlockID should be flow.ZeroID. + // - filter: The event filter used to filter events. + // + // If invalid parameters will be supplied SubscribeEvents will return a failed subscription. + SubscribeEvents(ctx context.Context, startBlockID flow.Identifier, startHeight uint64, filter EventFilter) subscription.Subscription + // SubscribeEventsFromStartBlockID streams events starting at the specified block ID, + // up until the latest available block. Once the latest is + // reached, the stream will remain open and responses are sent for each new + // block as it becomes available. + // + // Events within each block are filtered by the provided EventFilter, and only + // those events that match the filter are returned. If no filter is provided, + // all events are returned. + // + // Parameters: + // - ctx: Context for the operation. + // - startBlockID: The identifier of the starting block. + // - filter: The event filter used to filter events. + // + // If invalid parameters will be supplied SubscribeEventsFromStartBlockID will return a failed subscription. + SubscribeEventsFromStartBlockID(ctx context.Context, startBlockID flow.Identifier, filter EventFilter) subscription.Subscription + // SubscribeEventsFromStartHeight streams events starting at the specified block height, + // up until the latest available block. Once the latest is + // reached, the stream will remain open and responses are sent for each new + // block as it becomes available. + // + // Events within each block are filtered by the provided EventFilter, and only + // those events that match the filter are returned. If no filter is provided, + // all events are returned. + // + // Parameters: + // - ctx: Context for the operation. + // - startHeight: The height of the starting block. + // - filter: The event filter used to filter events. + // + // If invalid parameters will be supplied SubscribeEventsFromStartHeight will return a failed subscription. + SubscribeEventsFromStartHeight(ctx context.Context, startHeight uint64, filter EventFilter) subscription.Subscription + // SubscribeEventsFromLatest subscribes to events starting at the latest sealed block, + // up until the latest available block. Once the latest is + // reached, the stream will remain open and responses are sent for each new + // block as it becomes available. + // + // Events within each block are filtered by the provided EventFilter, and only + // those events that match the filter are returned. If no filter is provided, + // all events are returned. + // + // Parameters: + // - ctx: Context for the operation. + // - filter: The event filter used to filter events. + // + // If invalid parameters will be supplied SubscribeEventsFromLatest will return a failed subscription. + SubscribeEventsFromLatest(ctx context.Context, filter EventFilter) subscription.Subscription + // GetRegisterValues returns register values for a set of register IDs at the provided block height. + GetRegisterValues(registerIDs flow.RegisterIDs, height uint64) ([]flow.RegisterValue, error) + // SubscribeAccountStatusesFromStartBlockID subscribes to the streaming of account status changes starting from + // a specific block ID with an optional status filter. + SubscribeAccountStatusesFromStartBlockID(ctx context.Context, startBlockID flow.Identifier, filter AccountStatusFilter) subscription.Subscription + // SubscribeAccountStatusesFromStartHeight subscribes to the streaming of account status changes starting from + // a specific block height, with an optional status filter. + SubscribeAccountStatusesFromStartHeight(ctx context.Context, startHeight uint64, filter AccountStatusFilter) subscription.Subscription + // SubscribeAccountStatusesFromLatestBlock subscribes to the streaming of account status changes starting from a + // latest sealed block, with an optional status filter. + SubscribeAccountStatusesFromLatestBlock(ctx context.Context, filter AccountStatusFilter) subscription.Subscription +} diff --git a/engine/access/state_stream/streamer.go b/engine/access/state_stream/streamer.go deleted file mode 100644 index d2313f7d693..00000000000 --- a/engine/access/state_stream/streamer.go +++ /dev/null @@ -1,104 +0,0 @@ -package state_stream - -import ( - "context" - "errors" - "fmt" - "time" - - "github.com/rs/zerolog" - - "github.com/onflow/flow-go/engine" - "github.com/onflow/flow-go/module/executiondatasync/execution_data" - "github.com/onflow/flow-go/storage" -) - -// Streamable represents a subscription that can be streamed. -type Streamable interface { - ID() string - Close() - Fail(error) - Send(context.Context, interface{}, time.Duration) error - Next(context.Context) (interface{}, error) -} - -// Streamer -type Streamer struct { - log zerolog.Logger - broadcaster *engine.Broadcaster - sendTimeout time.Duration - sub Streamable -} - -func NewStreamer( - log zerolog.Logger, - broadcaster *engine.Broadcaster, - sendTimeout time.Duration, - sub Streamable, -) *Streamer { - return &Streamer{ - log: log.With().Str("sub_id", sub.ID()).Logger(), - broadcaster: broadcaster, - sendTimeout: sendTimeout, - sub: sub, - } -} - -// Stream is a blocking method that streams data to the subscription until either the context is -// cancelled or it encounters an error. -func (s *Streamer) Stream(ctx context.Context) { - s.log.Debug().Msg("starting streaming") - defer s.log.Debug().Msg("finished streaming") - - notifier := engine.NewNotifier() - s.broadcaster.Subscribe(notifier) - - // always check the first time. This ensures that streaming continues to work even if the - // execution sync is not functioning (e.g. on a past spork network, or during an temporary outage) - notifier.Notify() - - for { - select { - case <-ctx.Done(): - s.sub.Fail(fmt.Errorf("client disconnected: %w", ctx.Err())) - return - case <-notifier.Channel(): - s.log.Debug().Msg("received broadcast notification") - } - - err := s.sendAllAvailable(ctx) - - if err != nil { - s.log.Err(err).Msg("error sending response") - s.sub.Fail(err) - return - } - } -} - -// sendAllAvailable reads data from the streamable and sends it to the client until no more data is available. -func (s *Streamer) sendAllAvailable(ctx context.Context) error { - for { - response, err := s.sub.Next(ctx) - - if err != nil { - if errors.Is(err, storage.ErrNotFound) || execution_data.IsBlobNotFoundError(err) { - // no more available - return nil - } - - return fmt.Errorf("could not get response: %w", err) - } - - if ssub, ok := s.sub.(*HeightBasedSubscription); ok { - s.log.Trace(). - Uint64("next_height", ssub.nextHeight). - Msg("sending response") - } - - err = s.sub.Send(ctx, response, s.sendTimeout) - if err != nil { - return err - } - } -} diff --git a/engine/access/state_stream/subscription.go b/engine/access/state_stream/subscription.go deleted file mode 100644 index 83f9775a005..00000000000 --- a/engine/access/state_stream/subscription.go +++ /dev/null @@ -1,136 +0,0 @@ -package state_stream - -import ( - "context" - "fmt" - "sync" - "time" - - "github.com/google/uuid" -) - -// DefaultSendBufferSize is the default buffer size for the subscription's send channel. -// The size is chosen to balance memory overhead from each subscription with performance when -// streaming existing data. -const DefaultSendBufferSize = 10 - -// GetDataByHeightFunc is a callback used by subscriptions to retrieve data for a given height. -// Expected errors: -// - storage.ErrNotFound -// - execution_data.BlobNotFoundError -// All other errors are considered exceptions -type GetDataByHeightFunc func(ctx context.Context, height uint64) (interface{}, error) - -// Subscription represents a streaming request, and handles the communication between the grpc handler -// and the backend implementation. -type Subscription interface { - // ID returns the unique identifier for this subscription used for logging - ID() string - - // Channel returns the channel from which subscriptino data can be read - Channel() <-chan interface{} - - // Err returns the error that caused the subscription to fail - Err() error -} - -type SubscriptionImpl struct { - id string - - // ch is the channel used to pass data to the receiver - ch chan interface{} - - // err is the error that caused the subscription to fail - err error - - // once is used to ensure that the channel is only closed once - once sync.Once - - // closed tracks whether or not the subscription has been closed - closed bool -} - -func NewSubscription(bufferSize int) *SubscriptionImpl { - return &SubscriptionImpl{ - id: uuid.New().String(), - ch: make(chan interface{}, bufferSize), - } -} - -// ID returns the subscription ID -// Note: this is not a cryptographic hash -func (sub *SubscriptionImpl) ID() string { - return sub.id -} - -// Channel returns the channel from which subscriptino data can be read -func (sub *SubscriptionImpl) Channel() <-chan interface{} { - return sub.ch -} - -// Err returns the error that caused the subscription to fail -func (sub *SubscriptionImpl) Err() error { - return sub.err -} - -// Fail registers an error and closes the subscription channel -func (sub *SubscriptionImpl) Fail(err error) { - sub.err = err - sub.Close() -} - -// Close is called when a subscription ends gracefully, and closes the subscription channel -func (sub *SubscriptionImpl) Close() { - sub.once.Do(func() { - close(sub.ch) - sub.closed = true - }) -} - -// Send sends a value to the subscription channel or returns an error -// Expected errors: -// - context.DeadlineExceeded if send timed out -// - context.Canceled if the client disconnected -func (sub *SubscriptionImpl) Send(ctx context.Context, v interface{}, timeout time.Duration) error { - if sub.closed { - return fmt.Errorf("subscription closed") - } - - waitCtx, cancel := context.WithTimeout(ctx, timeout) - defer cancel() - - select { - case <-waitCtx.Done(): - return waitCtx.Err() - case sub.ch <- v: - return nil - } -} - -var _ Subscription = (*HeightBasedSubscription)(nil) -var _ Streamable = (*HeightBasedSubscription)(nil) - -// HeightBasedSubscription is a subscription that retrieves data sequentially by block height -type HeightBasedSubscription struct { - *SubscriptionImpl - nextHeight uint64 - getData GetDataByHeightFunc -} - -func NewHeightBasedSubscription(bufferSize int, firstHeight uint64, getData GetDataByHeightFunc) *HeightBasedSubscription { - return &HeightBasedSubscription{ - SubscriptionImpl: NewSubscription(bufferSize), - nextHeight: firstHeight, - getData: getData, - } -} - -// Next returns the value for the next height from the subscription -func (s *HeightBasedSubscription) Next(ctx context.Context) (interface{}, error) { - v, err := s.getData(ctx, s.nextHeight) - if err != nil { - return nil, fmt.Errorf("could not get data for height %d: %w", s.nextHeight, err) - } - s.nextHeight++ - return v, nil -} diff --git a/engine/access/subscription/mock/streamable.go b/engine/access/subscription/mock/streamable.go new file mode 100644 index 00000000000..c4cd36a00e2 --- /dev/null +++ b/engine/access/subscription/mock/streamable.go @@ -0,0 +1,106 @@ +// Code generated by mockery. DO NOT EDIT. + +package mock + +import ( + context "context" + + mock "github.com/stretchr/testify/mock" + + time "time" +) + +// Streamable is an autogenerated mock type for the Streamable type +type Streamable struct { + mock.Mock +} + +// Close provides a mock function with no fields +func (_m *Streamable) Close() { + _m.Called() +} + +// Fail provides a mock function with given fields: _a0 +func (_m *Streamable) Fail(_a0 error) { + _m.Called(_a0) +} + +// ID provides a mock function with no fields +func (_m *Streamable) ID() string { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for ID") + } + + var r0 string + if rf, ok := ret.Get(0).(func() string); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(string) + } + + return r0 +} + +// Next provides a mock function with given fields: _a0 +func (_m *Streamable) Next(_a0 context.Context) (interface{}, error) { + ret := _m.Called(_a0) + + if len(ret) == 0 { + panic("no return value specified for Next") + } + + var r0 interface{} + var r1 error + if rf, ok := ret.Get(0).(func(context.Context) (interface{}, error)); ok { + return rf(_a0) + } + if rf, ok := ret.Get(0).(func(context.Context) interface{}); ok { + r0 = rf(_a0) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(interface{}) + } + } + + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(_a0) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Send provides a mock function with given fields: _a0, _a1, _a2 +func (_m *Streamable) Send(_a0 context.Context, _a1 interface{}, _a2 time.Duration) error { + ret := _m.Called(_a0, _a1, _a2) + + if len(ret) == 0 { + panic("no return value specified for Send") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, interface{}, time.Duration) error); ok { + r0 = rf(_a0, _a1, _a2) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// NewStreamable creates a new instance of Streamable. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewStreamable(t interface { + mock.TestingT + Cleanup(func()) +}) *Streamable { + mock := &Streamable{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/engine/access/subscription/mock/subscription.go b/engine/access/subscription/mock/subscription.go new file mode 100644 index 00000000000..467cd80f7cc --- /dev/null +++ b/engine/access/subscription/mock/subscription.go @@ -0,0 +1,80 @@ +// Code generated by mockery. DO NOT EDIT. + +package mock + +import mock "github.com/stretchr/testify/mock" + +// Subscription is an autogenerated mock type for the Subscription type +type Subscription struct { + mock.Mock +} + +// Channel provides a mock function with no fields +func (_m *Subscription) Channel() <-chan interface{} { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Channel") + } + + var r0 <-chan interface{} + if rf, ok := ret.Get(0).(func() <-chan interface{}); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(<-chan interface{}) + } + } + + return r0 +} + +// Err provides a mock function with no fields +func (_m *Subscription) Err() error { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Err") + } + + var r0 error + if rf, ok := ret.Get(0).(func() error); ok { + r0 = rf() + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// ID provides a mock function with no fields +func (_m *Subscription) ID() string { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for ID") + } + + var r0 string + if rf, ok := ret.Get(0).(func() string); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(string) + } + + return r0 +} + +// NewSubscription creates a new instance of Subscription. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewSubscription(t interface { + mock.TestingT + Cleanup(func()) +}) *Subscription { + mock := &Subscription{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/engine/access/subscription/streamer.go b/engine/access/subscription/streamer.go new file mode 100644 index 00000000000..437028edc6c --- /dev/null +++ b/engine/access/subscription/streamer.go @@ -0,0 +1,142 @@ +package subscription + +import ( + "context" + "errors" + "fmt" + "time" + + "github.com/rs/zerolog" + "golang.org/x/time/rate" + + "github.com/onflow/flow-go/engine" +) + +// ErrBlockNotReady represents an error indicating that a block is not yet available or ready. +var ErrBlockNotReady = errors.New("block not ready") + +// ErrEndOfData represents an error indicating that no more data available for streaming. +var ErrEndOfData = errors.New("end of data") + +// Streamer represents a streaming subscription that delivers data to clients. +type Streamer struct { + log zerolog.Logger + sub Streamable + broadcaster *engine.Broadcaster + sendTimeout time.Duration + limiter *rate.Limiter +} + +// NewStreamer creates a new Streamer instance. +func NewStreamer( + log zerolog.Logger, + broadcaster *engine.Broadcaster, + sendTimeout time.Duration, + limit float64, + sub Streamable, +) *Streamer { + var limiter *rate.Limiter + if limit > 0 { + // allows for 1 response per call, averaging `limit` responses per second over longer time frames + limiter = rate.NewLimiter(rate.Limit(limit), 1) + } + + return &Streamer{ + log: log.With().Str("sub_id", sub.ID()).Logger(), + broadcaster: broadcaster, + sendTimeout: sendTimeout, + limiter: limiter, + sub: sub, + } +} + +// Stream is a blocking method that streams data to the subscription until either the context is +// cancelled or it encounters an error. +// This function follows a somewhat unintuitive contract: if the context is canceled, +// it is treated as an error and written to the subscription. However, you can rely on +// this behavior in the subscription to handle it as a graceful shutdown. +func (s *Streamer) Stream(ctx context.Context) { + s.log.Debug().Msg("starting streaming") + defer s.log.Debug().Msg("finished streaming") + + notifier := engine.NewNotifier() + s.broadcaster.Subscribe(notifier) + + // always check the first time. This ensures that streaming continues to work even if the + // execution sync is not functioning (e.g. on a past spork network, or during an temporary outage) + notifier.Notify() + + for { + select { + case <-ctx.Done(): + s.sub.Fail(fmt.Errorf("client disconnected: %w", ctx.Err())) + return + case <-notifier.Channel(): + s.log.Debug().Msg("received broadcast notification") + } + + err := s.sendAllAvailable(ctx) + + if err != nil { + // TODO: The functionality to graceful shutdown on demand should be improved with https://github.com/onflow/flow-go/issues/5561 + if errors.Is(err, ErrEndOfData) { + s.sub.Close() + return + } + if errors.Is(err, context.Canceled) { + s.sub.Fail(fmt.Errorf("client disconnected: %w", ctx.Err())) + return + } + s.log.Err(err).Msg("error sending response") + s.sub.Fail(err) + return + } + } +} + +// sendAllAvailable reads data from the streamable and sends it to the client until no more data is available. +func (s *Streamer) sendAllAvailable(ctx context.Context) error { + for { + // blocking wait for the streamer's rate limit to have available capacity + if err := s.checkRateLimit(ctx); err != nil { + return fmt.Errorf("error waiting for response capacity: %w", err) + } + + response, err := s.sub.Next(ctx) + + if response == nil && err == nil { + continue + } + + if err != nil { + if errors.Is(err, ErrBlockNotReady) { + // no more available + return nil + } + + return fmt.Errorf("could not get response: %w", err) + } + + if ssub, ok := s.sub.(*HeightBasedSubscription); ok { + s.log.Trace(). + Uint64("next_height", ssub.nextHeight). + Msg("sending response") + } + + err = s.sub.Send(ctx, response, s.sendTimeout) + if err != nil { + return err + } + } +} + +// checkRateLimit checks the stream's rate limit and blocks until there is room to send a response. +// An error is returned if the context is canceled or the expected wait time exceeds the context's +// deadline. +func (s *Streamer) checkRateLimit(ctx context.Context) error { + if s.limiter == nil { + return nil + } + + return s.limiter.WaitN(ctx, 1) +} diff --git a/engine/access/subscription/streamer_test.go b/engine/access/subscription/streamer_test.go new file mode 100644 index 00000000000..bc6bc7df72f --- /dev/null +++ b/engine/access/subscription/streamer_test.go @@ -0,0 +1,153 @@ +package subscription_test + +import ( + "context" + "fmt" + "testing" + "time" + + "github.com/google/uuid" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + + "github.com/onflow/flow-go/engine" + "github.com/onflow/flow-go/engine/access/subscription" + submock "github.com/onflow/flow-go/engine/access/subscription/mock" + "github.com/onflow/flow-go/utils/unittest" +) + +type testData struct { + data string + err error +} + +var testErr = fmt.Errorf("test error") + +func TestStream(t *testing.T) { + t.Parallel() + + ctx := context.Background() + timeout := subscription.DefaultSendTimeout + + sub := submock.NewStreamable(t) + sub.On("ID").Return(uuid.NewString()) + + tests := []testData{} + for i := 0; i < 4; i++ { + tests = append(tests, testData{fmt.Sprintf("test%d", i), nil}) + } + tests = append(tests, testData{"", testErr}) + + broadcaster := engine.NewBroadcaster() + streamer := subscription.NewStreamer(unittest.Logger(), broadcaster, timeout, subscription.DefaultResponseLimit, sub) + + for _, d := range tests { + sub.On("Next", mock.Anything).Return(d.data, d.err).Once() + if d.err == nil { + sub.On("Send", mock.Anything, d.data, timeout).Return(nil).Once() + } else { + mocked := sub.On("Fail", mock.Anything).Return().Once() + mocked.RunFn = func(args mock.Arguments) { + assert.ErrorIs(t, args.Get(0).(error), d.err) + } + } + } + + broadcaster.Publish() + + unittest.RequireReturnsBefore(t, func() { + streamer.Stream(ctx) + }, 100*time.Millisecond, "streamer.Stream() should return quickly") +} + +func TestStreamRatelimited(t *testing.T) { + t.Parallel() + + ctx := context.Background() + timeout := subscription.DefaultSendTimeout + duration := 100 * time.Millisecond + + for _, limit := range []float64{0.2, 3, 20, 500} { + t.Run(fmt.Sprintf("responses are limited - %.1f rps", limit), func(t *testing.T) { + sub := submock.NewStreamable(t) + sub.On("ID").Return(uuid.NewString()) + + broadcaster := engine.NewBroadcaster() + streamer := subscription.NewStreamer(unittest.Logger(), broadcaster, timeout, limit, sub) + + var nextCalls, sendCalls int + sub.On("Next", mock.Anything).Return("data", nil).Run(func(args mock.Arguments) { + nextCalls++ + }) + sub.On("Send", mock.Anything, "data", timeout).Return(nil).Run(func(args mock.Arguments) { + sendCalls++ + }) + + broadcaster.Publish() + + unittest.RequireNeverReturnBefore(t, func() { + streamer.Stream(ctx) + }, duration, "streamer.Stream() should never stop") + + // check the number of calls and make sure they are sane. + // ratelimit uses a token bucket algorithm which adds 1 token every 1/r seconds. This + // comes to roughly 10% of r within 100ms. + // + // Add a large buffer since the algorithm only guarantees the rate over longer time + // ranges. Since this test covers various orders of magnitude, we can still validate it + // is working as expected. + target := int(limit * float64(duration) / float64(time.Second)) + if target == 0 { + target = 1 + } + + assert.LessOrEqual(t, nextCalls, target*3) + assert.LessOrEqual(t, sendCalls, target*3) + }) + } +} + +// TestLongStreamRatelimited tests that the streamer is uses the correct rate limit over a longer +// period of time +func TestLongStreamRatelimited(t *testing.T) { + t.Parallel() + + unittest.SkipUnless(t, unittest.TEST_LONG_RUNNING, "skipping long stream rate limit test") + + ctx := context.Background() + timeout := subscription.DefaultSendTimeout + + limit := 5.0 + duration := 30 * time.Second + + sub := submock.NewStreamable(t) + sub.On("ID").Return(uuid.NewString()) + + broadcaster := engine.NewBroadcaster() + streamer := subscription.NewStreamer(unittest.Logger(), broadcaster, timeout, limit, sub) + + var nextCalls, sendCalls int + sub.On("Next", mock.Anything).Return("data", nil).Run(func(args mock.Arguments) { + nextCalls++ + }) + sub.On("Send", mock.Anything, "data", timeout).Return(nil).Run(func(args mock.Arguments) { + sendCalls++ + }) + + broadcaster.Publish() + + unittest.RequireNeverReturnBefore(t, func() { + streamer.Stream(ctx) + }, duration, "streamer.Stream() should never stop") + + // check the number of calls and make sure they are sane. + // over a longer time, the rate limit should be more accurate + target := int(limit) * int(duration/time.Second) + diff := 5 // 5 ~= 3% of 150 expected + + assert.LessOrEqual(t, nextCalls, target+diff) + assert.GreaterOrEqual(t, nextCalls, target-diff) + + assert.LessOrEqual(t, sendCalls, target+diff) + assert.GreaterOrEqual(t, sendCalls, target-diff) +} diff --git a/engine/access/subscription/streaming_data.go b/engine/access/subscription/streaming_data.go new file mode 100644 index 00000000000..90fc9d0f788 --- /dev/null +++ b/engine/access/subscription/streaming_data.go @@ -0,0 +1,18 @@ +package subscription + +import ( + "sync/atomic" +) + +// StreamingData represents common streaming data configuration for access and state_stream handlers. +type StreamingData struct { + MaxStreams int32 + StreamCount atomic.Int32 +} + +func NewStreamingData(maxStreams uint32) StreamingData { + return StreamingData{ + MaxStreams: int32(maxStreams), + StreamCount: atomic.Int32{}, + } +} diff --git a/engine/access/subscription/subscribe_handler.go b/engine/access/subscription/subscribe_handler.go new file mode 100644 index 00000000000..7b72dffad8d --- /dev/null +++ b/engine/access/subscription/subscribe_handler.go @@ -0,0 +1,64 @@ +package subscription + +import ( + "context" + "time" + + "github.com/rs/zerolog" + + "github.com/onflow/flow-go/engine" +) + +// SubscriptionHandler represents common streaming data configuration for creating streaming subscription. +type SubscriptionHandler struct { + log zerolog.Logger + + broadcaster *engine.Broadcaster + + sendTimeout time.Duration + responseLimit float64 + sendBufferSize int +} + +// NewSubscriptionHandler creates a new SubscriptionHandler instance. +// +// Parameters: +// - log: The logger to use for logging. +// - broadcaster: The engine broadcaster for publishing notifications. +// - sendTimeout: The duration after which a send operation will timeout. +// - responseLimit: The maximum allowed response time for a single stream. +// - sendBufferSize: The size of the response buffer for sending messages to the client. +// +// Returns a new SubscriptionHandler instance. +func NewSubscriptionHandler( + log zerolog.Logger, + broadcaster *engine.Broadcaster, + sendTimeout time.Duration, + responseLimit float64, + sendBufferSize uint, +) *SubscriptionHandler { + return &SubscriptionHandler{ + log: log, + broadcaster: broadcaster, + sendTimeout: sendTimeout, + responseLimit: responseLimit, + sendBufferSize: int(sendBufferSize), + } +} + +// Subscribe creates and starts a new subscription. +// +// Parameters: +// - ctx: The context for the operation. +// - startHeight: The height to start subscription from. +// - getData: The function to retrieve data by height. +func (h *SubscriptionHandler) Subscribe( + ctx context.Context, + startHeight uint64, + getData GetDataByHeightFunc, +) Subscription { + sub := NewHeightBasedSubscription(h.sendBufferSize, startHeight, getData) + go NewStreamer(h.log, h.broadcaster, h.sendTimeout, h.responseLimit, sub).Stream(ctx) + + return sub +} diff --git a/engine/access/subscription/subscription.go b/engine/access/subscription/subscription.go new file mode 100644 index 00000000000..3c5a12cee31 --- /dev/null +++ b/engine/access/subscription/subscription.go @@ -0,0 +1,192 @@ +package subscription + +import ( + "context" + "fmt" + "sync" + "time" + + "github.com/google/uuid" + "google.golang.org/grpc/status" +) + +const ( + // DefaultSendBufferSize is the default buffer size for the subscription's send channel. + // The size is chosen to balance memory overhead from each subscription with performance when + // streaming existing data. + DefaultSendBufferSize = 10 + + // DefaultMaxGlobalStreams defines the default max number of streams that can be open at the same time. + DefaultMaxGlobalStreams = 1000 + + // DefaultCacheSize defines the default max number of objects for the execution data cache. + DefaultCacheSize = 100 + + // DefaultSendTimeout is the default timeout for sending a message to the client. After the timeout + // expires, the connection is closed. + DefaultSendTimeout = 30 * time.Second + + // DefaultResponseLimit is default max responses per second allowed on a stream. After exceeding + // the limit, the stream is paused until more capacity is available. + DefaultResponseLimit = float64(0) + + // DefaultHeartbeatInterval specifies the block interval at which heartbeat messages should be sent. + DefaultHeartbeatInterval = 1 +) + +// GetDataByHeightFunc is a callback used by subscriptions to retrieve data for a given height. +// Expected errors: +// - storage.ErrNotFound +// - execution_data.BlobNotFoundError +// All other errors are considered exceptions +type GetDataByHeightFunc func(ctx context.Context, height uint64) (interface{}, error) + +// Subscription represents a streaming request, and handles the communication between the grpc handler +// and the backend implementation. +type Subscription interface { + // ID returns the unique identifier for this subscription used for logging + ID() string + + // Channel returns the channel from which subscription data can be read + Channel() <-chan interface{} + + // Err returns the error that caused the subscription to fail + Err() error +} + +// Streamable represents a subscription that can be streamed. +type Streamable interface { + // ID returns the subscription ID + // Note: this is not a cryptographic hash + ID() string + // Close is called when a subscription ends gracefully, and closes the subscription channel + Close() + // Fail registers an error and closes the subscription channel + Fail(error) + // Send sends a value to the subscription channel or returns an error + // Expected errors: + // - context.DeadlineExceeded if send timed out + // - context.Canceled if the client disconnected + Send(context.Context, interface{}, time.Duration) error + // Next returns the value for the next height from the subscription + Next(context.Context) (interface{}, error) +} + +var _ Subscription = (*SubscriptionImpl)(nil) + +type SubscriptionImpl struct { + id string + + // ch is the channel used to pass data to the receiver + ch chan interface{} + + // err is the error that caused the subscription to fail + err error + + // once is used to ensure that the channel is only closed once + once sync.Once + + // closed tracks whether or not the subscription has been closed + closed bool +} + +func NewSubscription(bufferSize int) *SubscriptionImpl { + return &SubscriptionImpl{ + id: uuid.New().String(), + ch: make(chan interface{}, bufferSize), + } +} + +// ID returns the subscription ID +// Note: this is not a cryptographic hash +func (sub *SubscriptionImpl) ID() string { + return sub.id +} + +// Channel returns the channel from which subscription data can be read +func (sub *SubscriptionImpl) Channel() <-chan interface{} { + return sub.ch +} + +// Err returns the error that caused the subscription to fail +func (sub *SubscriptionImpl) Err() error { + return sub.err +} + +// Fail registers an error and closes the subscription channel +func (sub *SubscriptionImpl) Fail(err error) { + sub.err = err + sub.Close() +} + +// Close is called when a subscription ends gracefully, and closes the subscription channel +func (sub *SubscriptionImpl) Close() { + sub.once.Do(func() { + close(sub.ch) + sub.closed = true + }) +} + +// Send sends a value to the subscription channel or returns an error +// Expected errors: +// - context.DeadlineExceeded if send timed out +// - context.Canceled if the client disconnected +func (sub *SubscriptionImpl) Send(ctx context.Context, v interface{}, timeout time.Duration) error { + if sub.closed { + return fmt.Errorf("subscription closed") + } + + waitCtx, cancel := context.WithTimeout(ctx, timeout) + defer cancel() + + select { + case <-waitCtx.Done(): + return waitCtx.Err() + case sub.ch <- v: + return nil + } +} + +// NewFailedSubscription returns a new subscription that has already failed with the given error and +// message. This is useful to return an error that occurred during subscription setup. +func NewFailedSubscription(err error, msg string) *SubscriptionImpl { + sub := NewSubscription(0) + + // if error is a grpc error, wrap it to preserve the error code + if st, ok := status.FromError(err); ok { + sub.Fail(status.Errorf(st.Code(), "%s: %s", msg, st.Message())) + return sub + } + + // otherwise, return wrap the message normally + sub.Fail(fmt.Errorf("%s: %w", msg, err)) + return sub +} + +var _ Subscription = (*HeightBasedSubscription)(nil) +var _ Streamable = (*HeightBasedSubscription)(nil) + +// HeightBasedSubscription is a subscription that retrieves data sequentially by block height +type HeightBasedSubscription struct { + *SubscriptionImpl + nextHeight uint64 + getData GetDataByHeightFunc +} + +func NewHeightBasedSubscription(bufferSize int, firstHeight uint64, getData GetDataByHeightFunc) *HeightBasedSubscription { + return &HeightBasedSubscription{ + SubscriptionImpl: NewSubscription(bufferSize), + nextHeight: firstHeight, + getData: getData, + } +} + +// Next returns the value for the next height from the subscription +func (s *HeightBasedSubscription) Next(ctx context.Context) (interface{}, error) { + v, err := s.getData(ctx, s.nextHeight) + if err != nil { + return nil, fmt.Errorf("could not get data for height %d: %w", s.nextHeight, err) + } + s.nextHeight++ + return v, nil +} diff --git a/engine/access/state_stream/subscription_test.go b/engine/access/subscription/subscription_test.go similarity index 90% rename from engine/access/state_stream/subscription_test.go rename to engine/access/subscription/subscription_test.go index d5ef7296cf3..a86422c17fd 100644 --- a/engine/access/state_stream/subscription_test.go +++ b/engine/access/subscription/subscription_test.go @@ -1,4 +1,4 @@ -package state_stream_test +package subscription_test import ( "context" @@ -10,7 +10,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "github.com/onflow/flow-go/engine/access/state_stream" + "github.com/onflow/flow-go/engine/access/subscription" "github.com/onflow/flow-go/utils/unittest" ) @@ -20,7 +20,7 @@ func TestSubscription_SendReceive(t *testing.T) { ctx := context.Background() - sub := state_stream.NewSubscription(1) + sub := subscription.NewSubscription(1) assert.NotEmpty(t, sub.ID()) @@ -66,7 +66,7 @@ func TestSubscription_Failures(t *testing.T) { // make sure closing a subscription twice does not cause a panic t.Run("close only called once", func(t *testing.T) { - sub := state_stream.NewSubscription(1) + sub := subscription.NewSubscription(1) sub.Close() sub.Close() @@ -75,7 +75,7 @@ func TestSubscription_Failures(t *testing.T) { // make sure failing and closing the same subscription does not cause a panic t.Run("close only called once with fail", func(t *testing.T) { - sub := state_stream.NewSubscription(1) + sub := subscription.NewSubscription(1) sub.Fail(testErr) sub.Close() @@ -84,7 +84,7 @@ func TestSubscription_Failures(t *testing.T) { // make sure an error is returned when sending on a closed subscription t.Run("send after closed returns an error", func(t *testing.T) { - sub := state_stream.NewSubscription(1) + sub := subscription.NewSubscription(1) sub.Fail(testErr) err := sub.Send(context.Background(), "test", 10*time.Millisecond) @@ -117,7 +117,7 @@ func TestHeightBasedSubscription(t *testing.T) { } // search from [start, last], checking the correct data is returned - sub := state_stream.NewHeightBasedSubscription(1, start, getData) + sub := subscription.NewHeightBasedSubscription(1, start, getData) for i := start; i <= last; i++ { data, err := sub.Next(ctx) if err != nil { diff --git a/engine/access/subscription/tracker/base_tracker.go b/engine/access/subscription/tracker/base_tracker.go new file mode 100644 index 00000000000..643a753050d --- /dev/null +++ b/engine/access/subscription/tracker/base_tracker.go @@ -0,0 +1,153 @@ +package tracker + +import ( + "context" + "fmt" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + + "github.com/onflow/flow-go/engine/common/rpc" + + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module/irrecoverable" + "github.com/onflow/flow-go/state/protocol" + "github.com/onflow/flow-go/storage" +) + +// BaseTracker is an interface for a tracker that provides base GetStartHeight method related to both blocks and execution data tracking. +type BaseTracker interface { + // GetStartHeightFromBlockID returns the start height based on the provided starting block ID. + // + // Parameters: + // - startBlockID: The identifier of the starting block. + // + // Returns: + // - uint64: The start height associated with the provided block ID. + // - error: An error indicating any issues with retrieving the start height. + // + // Expected errors during normal operation: + // - codes.NotFound - if the block was not found in storage + // - codes.Internal - for any other error + GetStartHeightFromBlockID(flow.Identifier) (uint64, error) + + // GetStartHeightFromHeight returns the start height based on the provided starting block height. + // + // Parameters: + // - startHeight: The height of the starting block. + // + // Returns: + // - uint64: The start height associated with the provided block height. + // - error: An error indicating any issues with retrieving the start height. + // + // Expected errors during normal operation: + // - codes.InvalidArgument - if the start height is less than the root block height. + // - codes.NotFound - if the header was not found in storage. + GetStartHeightFromHeight(uint64) (uint64, error) + + // GetStartHeightFromLatest returns the start height based on the latest sealed block. + // + // Parameters: + // - ctx: Context for the operation. + // + // No errors are expected during normal operation. + GetStartHeightFromLatest(context.Context) (uint64, error) +} + +var _ BaseTracker = (*BaseTrackerImpl)(nil) + +// BaseTrackerImpl is an implementation of the BaseTracker interface. +type BaseTrackerImpl struct { + rootBlockHeight uint64 + state protocol.State + headers storage.Headers +} + +// NewBaseTrackerImpl creates a new instance of BaseTrackerImpl. +// +// Parameters: +// - rootBlockHeight: The root block height, which serves as the baseline for calculating the start height. +// - state: The protocol state used for retrieving block information. +// - headers: The storage headers for accessing block headers. +// +// Returns: +// - *BaseTrackerImpl: A new instance of BaseTrackerImpl. +func NewBaseTrackerImpl( + rootBlockHeight uint64, + state protocol.State, + headers storage.Headers, +) *BaseTrackerImpl { + return &BaseTrackerImpl{ + rootBlockHeight: rootBlockHeight, + state: state, + headers: headers, + } +} + +// GetStartHeightFromBlockID returns the start height based on the provided starting block ID. +// +// Parameters: +// - startBlockID: The identifier of the starting block. +// +// Returns: +// - uint64: The start height associated with the provided block ID. +// - error: An error indicating any issues with retrieving the start height. +// +// Expected errors during normal operation: +// - codes.NotFound - if the block was not found in storage +// - codes.Internal - for any other error +func (b *BaseTrackerImpl) GetStartHeightFromBlockID(startBlockID flow.Identifier) (uint64, error) { + header, err := b.headers.ByBlockID(startBlockID) + if err != nil { + return 0, rpc.ConvertStorageError(fmt.Errorf("could not get header for block %v: %w", startBlockID, err)) + } + + // ensure that the resolved start height is available + return header.Height, nil +} + +// GetStartHeightFromHeight returns the start height based on the provided starting block height. +// +// Parameters: +// - startHeight: The height of the starting block. +// +// Returns: +// - uint64: The start height associated with the provided block height. +// - error: An error indicating any issues with retrieving the start height. +// +// Expected errors during normal operation: +// - codes.InvalidArgument - if the start height is less than the root block height. +// - codes.NotFound - if the header was not found in storage. +func (b *BaseTrackerImpl) GetStartHeightFromHeight(startHeight uint64) (uint64, error) { + if startHeight < b.rootBlockHeight { + return 0, status.Errorf(codes.InvalidArgument, "start height must be greater than or equal to the root height %d", b.rootBlockHeight) + } + + header, err := b.headers.ByHeight(startHeight) + if err != nil { + return 0, rpc.ConvertStorageError(fmt.Errorf("could not get header for height %d: %w", startHeight, err)) + } + + // ensure that the resolved start height is available + return header.Height, nil +} + +// GetStartHeightFromLatest returns the start height based on the latest sealed block. +// +// Parameters: +// - ctx: Context for the operation. +// +// No errors are expected during normal operation. +func (b *BaseTrackerImpl) GetStartHeightFromLatest(ctx context.Context) (uint64, error) { + // if no start block was provided, use the latest sealed block + header, err := b.state.Sealed().Head() + if err != nil { + // In the RPC engine, if we encounter an error from the protocol state indicating state corruption, + // we should halt processing requests + err := irrecoverable.NewExceptionf("failed to lookup sealed header: %w", err) + irrecoverable.Throw(ctx, err) + return 0, err + } + + return header.Height, nil +} diff --git a/engine/access/subscription/tracker/block_tracker.go b/engine/access/subscription/tracker/block_tracker.go new file mode 100644 index 00000000000..ecdfa8744ca --- /dev/null +++ b/engine/access/subscription/tracker/block_tracker.go @@ -0,0 +1,119 @@ +package tracker + +import ( + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + + "github.com/onflow/flow-go/engine" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module/counters" + "github.com/onflow/flow-go/module/irrecoverable" + "github.com/onflow/flow-go/state/protocol" + "github.com/onflow/flow-go/storage" +) + +// BlockTracker is an interface for tracking blocks and handling block-related operations. +type BlockTracker interface { + BaseTracker + + // GetHighestHeight returns the highest height based on the specified block status which could be only BlockStatusSealed + // or BlockStatusFinalized. + // No errors are expected during normal operation. + GetHighestHeight(flow.BlockStatus) (uint64, error) + + // ProcessOnFinalizedBlock drives the subscription logic when a block is finalized. + // The input to this callback is treated as trusted. This method should be executed on + // `OnFinalizedBlock` notifications from the node-internal consensus instance. + // No errors are expected during normal operation. + ProcessOnFinalizedBlock() error +} + +var _ BlockTracker = (*BlockTrackerImpl)(nil) + +// BlockTrackerImpl is an implementation of the BlockTracker interface. +type BlockTrackerImpl struct { + BaseTracker + state protocol.State + broadcaster *engine.Broadcaster + + // finalizedHighestHeight contains the highest consecutive block height for which we have received a new notification. + finalizedHighestHeight counters.StrictMonotonicCounter + // sealedHighestHeight contains the highest consecutive block height for which we have received a new notification. + sealedHighestHeight counters.StrictMonotonicCounter +} + +// NewBlockTracker creates a new BlockTrackerImpl instance. +// +// No errors are expected during normal operation. +func NewBlockTracker( + state protocol.State, + sealedRootHeight uint64, + headers storage.Headers, + broadcaster *engine.Broadcaster, +) (*BlockTrackerImpl, error) { + lastFinalized, err := state.Final().Head() + if err != nil { + // this header MUST exist in the db, otherwise the node likely has inconsistent state. + return nil, irrecoverable.NewExceptionf("could not retrieve last finalized block: %w", err) + } + + lastSealed, err := state.Sealed().Head() + if err != nil { + // this header MUST exist in the db, otherwise the node likely has inconsistent state. + return nil, irrecoverable.NewExceptionf("could not retrieve last sealed block: %w", err) + } + + return &BlockTrackerImpl{ + BaseTracker: NewBaseTrackerImpl(sealedRootHeight, state, headers), + state: state, + finalizedHighestHeight: counters.NewMonotonicCounter(lastFinalized.Height), + sealedHighestHeight: counters.NewMonotonicCounter(lastSealed.Height), + broadcaster: broadcaster, + }, nil +} + +// GetHighestHeight returns the highest height based on the specified block status. +// +// Parameters: +// - blockStatus: The status of the block. It is expected that blockStatus has already been handled for invalid flow.BlockStatusUnknown. +// +// Expected errors during normal operation: +// - codes.InvalidArgument - if block status is flow.BlockStatusUnknown. +func (b *BlockTrackerImpl) GetHighestHeight(blockStatus flow.BlockStatus) (uint64, error) { + switch blockStatus { + case flow.BlockStatusFinalized: + return b.finalizedHighestHeight.Value(), nil + case flow.BlockStatusSealed: + return b.sealedHighestHeight.Value(), nil + } + return 0, status.Errorf(codes.InvalidArgument, "invalid block status: %s", blockStatus) +} + +// ProcessOnFinalizedBlock drives the subscription logic when a block is finalized. +// The input to this callback is treated as trusted. This method should be executed on +// `OnFinalizedBlock` notifications from the node-internal consensus instance. +// No errors are expected during normal operation. Any errors encountered should be +// treated as an exception. +func (b *BlockTrackerImpl) ProcessOnFinalizedBlock() error { + // get the finalized header from state + finalizedHeader, err := b.state.Final().Head() + if err != nil { + return irrecoverable.NewExceptionf("unable to get latest finalized header: %w", err) + } + + if !b.finalizedHighestHeight.Set(finalizedHeader.Height) { + return nil + } + + // get the latest seal header from state + sealedHeader, err := b.state.Sealed().Head() + if err != nil { + return irrecoverable.NewExceptionf("unable to get latest sealed header: %w", err) + } + + _ = b.sealedHighestHeight.Set(sealedHeader.Height) + // always publish since there is also a new finalized block. + b.broadcaster.Publish() + + return nil +} diff --git a/engine/access/subscription/tracker/execution_data_tracker.go b/engine/access/subscription/tracker/execution_data_tracker.go new file mode 100644 index 00000000000..f797e775652 --- /dev/null +++ b/engine/access/subscription/tracker/execution_data_tracker.go @@ -0,0 +1,316 @@ +package tracker + +import ( + "context" + + "github.com/rs/zerolog" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + + "github.com/onflow/flow-go/engine" + "github.com/onflow/flow-go/engine/common/rpc" + "github.com/onflow/flow-go/fvm/errors" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module/counters" + "github.com/onflow/flow-go/module/executiondatasync/execution_data" + "github.com/onflow/flow-go/module/state_synchronization" + "github.com/onflow/flow-go/module/state_synchronization/indexer" + "github.com/onflow/flow-go/state/protocol" + "github.com/onflow/flow-go/storage" + "github.com/onflow/flow-go/utils/logging" +) + +const ( + // maxIndexBlockDiff is the maximum difference between the highest indexed block height and the + // provided start height to allow when starting a new stream. + // this is used to account for small delays indexing or requests made to different ANs behind a + // load balancer. The diff will result in the stream waiting a few blocks before starting. + maxIndexBlockDiff = 30 +) + +// ExecutionDataTracker is an interface for tracking the highest consecutive block height for which we have received a +// new Execution Data notification +type ExecutionDataTracker interface { + BaseTracker + + // GetStartHeight returns the start height to use when searching. + // Only one of startBlockID and startHeight may be set. Otherwise, an InvalidArgument error is returned. + // If a block is provided and does not exist, a NotFound error is returned. + // If neither startBlockID nor startHeight is provided, the latest sealed block is used. + // + // Parameters: + // - ctx: Context for the operation. + // - startBlockID: The identifier of the starting block. If provided, startHeight should be 0. + // - startHeight: The height of the starting block. If provided, startBlockID should be flow.ZeroID. + // + // Returns: + // - uint64: The start height for searching. + // - error: An error indicating the result of the operation, if any. + // + // Expected errors during normal operation: + // - codes.InvalidArgument - if both startBlockID and startHeight are provided, if the start height is less than the root block height, + // if the start height is out of bounds based on indexed heights (when index is used). + // - codes.NotFound - if a block is provided and does not exist. + // - codes.Internal - if there is an internal error. + GetStartHeight(context.Context, flow.Identifier, uint64) (uint64, error) + + // GetHighestHeight returns the highest height that we have consecutive execution data for. + GetHighestHeight() uint64 + + // OnExecutionData is used to notify the tracker when a new execution data is received. + OnExecutionData(*execution_data.BlockExecutionDataEntity) +} + +var _ ExecutionDataTracker = (*ExecutionDataTrackerImpl)(nil) + +// ExecutionDataTrackerImpl is an implementation of the ExecutionDataTracker interface. +type ExecutionDataTrackerImpl struct { + BaseTracker + log zerolog.Logger + headers storage.Headers + broadcaster *engine.Broadcaster + indexReporter state_synchronization.IndexReporter + useIndex bool + + // highestHeight contains the highest consecutive block height that we have consecutive execution data for + highestHeight counters.StrictMonotonicCounter +} + +// NewExecutionDataTracker creates a new ExecutionDataTrackerImpl instance. +// +// Parameters: +// - log: The logger to use for logging. +// - state: The protocol state used for retrieving block information. +// - rootHeight: The root block height, serving as the baseline for calculating the start height. +// - headers: The storage headers for accessing block headers. +// - broadcaster: The engine broadcaster for publishing notifications. +// - highestAvailableFinalizedHeight: The highest available finalized block height. +// - indexReporter: The index reporter for checking indexed block heights. +// - useIndex: A flag indicating whether to use indexed block heights for validation. +// +// Returns: +// - *ExecutionDataTrackerImpl: A new instance of ExecutionDataTrackerImpl. +func NewExecutionDataTracker( + log zerolog.Logger, + state protocol.State, + rootHeight uint64, + headers storage.Headers, + broadcaster *engine.Broadcaster, + highestAvailableFinalizedHeight uint64, + indexReporter state_synchronization.IndexReporter, + useIndex bool, +) *ExecutionDataTrackerImpl { + return &ExecutionDataTrackerImpl{ + BaseTracker: NewBaseTrackerImpl(rootHeight, state, headers), + log: log, + headers: headers, + broadcaster: broadcaster, + highestHeight: counters.NewMonotonicCounter(highestAvailableFinalizedHeight), + indexReporter: indexReporter, + useIndex: useIndex, + } +} + +// GetStartHeight returns the start height to use when searching. +// Only one of startBlockID and startHeight may be set. Otherwise, an InvalidArgument error is returned. +// If a block is provided and does not exist, a NotFound error is returned. +// If neither startBlockID nor startHeight is provided, the latest sealed block is used. +// +// Parameters: +// - ctx: Context for the operation. +// - startBlockID: The identifier of the starting block. If provided, startHeight should be 0. +// - startHeight: The height of the starting block. If provided, startBlockID should be flow.ZeroID. +// +// Returns: +// - uint64: The start height for searching. +// - error: An error indicating the result of the operation, if any. +// +// Expected errors during normal operation: +// - codes.InvalidArgument - if both startBlockID and startHeight are provided, if the start height is less than the root block height, +// if the start height is out of bounds based on indexed heights (when index is used). +// - codes.NotFound - if a block is provided and does not exist. +// - codes.Internal - if there is an internal error. +func (e *ExecutionDataTrackerImpl) GetStartHeight(ctx context.Context, startBlockID flow.Identifier, startHeight uint64) (uint64, error) { + if startBlockID != flow.ZeroID && startHeight > 0 { + return 0, status.Errorf(codes.InvalidArgument, "only one of start block ID and start height may be provided") + } + + // get the start height based on the provided starting block ID + if startBlockID != flow.ZeroID { + return e.GetStartHeightFromBlockID(startBlockID) + } + + // get start height based on the provided starting block height + if startHeight > 0 { + return e.GetStartHeightFromHeight(startHeight) + } + + return e.GetStartHeightFromLatest(ctx) +} + +// GetStartHeightFromBlockID returns the start height based on the provided starting block ID. +// +// Parameters: +// - startBlockID: The identifier of the starting block. +// +// Returns: +// - uint64: The start height associated with the provided block ID. +// - error: An error indicating any issues with retrieving the start height. +// +// Expected errors during normal operation: +// - codes.NotFound - if the block was not found in storage +// - codes.InvalidArgument - if the start height is out of bounds based on indexed heights. +// - codes.FailedPrecondition - if the index reporter is not ready yet. +// - codes.Internal - for any other error during validation. +func (e *ExecutionDataTrackerImpl) GetStartHeightFromBlockID(startBlockID flow.Identifier) (uint64, error) { + // get start height based on the provided starting block id + height, err := e.BaseTracker.GetStartHeightFromBlockID(startBlockID) + if err != nil { + return 0, err + } + + // ensure that the resolved start height is available + return e.checkStartHeight(height) +} + +// GetStartHeightFromHeight returns the start height based on the provided starting block height. +// +// Parameters: +// - startHeight: The height of the starting block. +// +// Returns: +// - uint64: The start height associated with the provided block height. +// - error: An error indicating any issues with retrieving the start height. +// +// Expected errors during normal operation: +// - codes.InvalidArgument - if the start height is less than the root block height, if the start height is out of bounds based on indexed heights +// - codes.NotFound - if the header was not found in storage. +// - codes.FailedPrecondition - if the index reporter is not ready yet. +// - codes.Internal - for any other error during validation. +func (e *ExecutionDataTrackerImpl) GetStartHeightFromHeight(startHeight uint64) (uint64, error) { + // get start height based on the provided starting block height + height, err := e.BaseTracker.GetStartHeightFromHeight(startHeight) + if err != nil { + return 0, err + } + + // ensure that the resolved start height is available + return e.checkStartHeight(height) +} + +// GetStartHeightFromLatest returns the start height based on the latest sealed block. +// +// Parameters: +// - ctx: Context for the operation. +// +// Expected errors during normal operation: +// - codes.InvalidArgument - if the start height is out of bounds based on indexed heights. +// - codes.FailedPrecondition - if the index reporter is not ready yet. +// - codes.Internal - for any other error during validation. +func (e *ExecutionDataTrackerImpl) GetStartHeightFromLatest(ctx context.Context) (uint64, error) { + // get start height based latest sealed block + height, err := e.BaseTracker.GetStartHeightFromLatest(ctx) + if err != nil { + return 0, err + } + + // ensure that the resolved start height is available + return e.checkStartHeight(height) +} + +// GetHighestHeight returns the highest height that we have consecutive execution data for. +func (e *ExecutionDataTrackerImpl) GetHighestHeight() uint64 { + return e.highestHeight.Value() +} + +// OnExecutionData is used to notify the tracker when a new execution data is received. +func (e *ExecutionDataTrackerImpl) OnExecutionData(executionData *execution_data.BlockExecutionDataEntity) { + log := e.log.With().Hex("block_id", logging.ID(executionData.BlockID)).Logger() + + log.Trace().Msg("received execution data") + + header, err := e.headers.ByBlockID(executionData.BlockID) + if err != nil { + // if the execution data is available, the block must be locally finalized + log.Fatal().Err(err).Msg("failed to notify of new execution data") + return + } + + // sets the highest height for which execution data is available. + _ = e.highestHeight.Set(header.Height) + + e.broadcaster.Publish() +} + +// checkStartHeight validates the provided start height and adjusts it if necessary based on the tracker's configuration. +// +// Parameters: +// - height: The start height to be checked. +// +// Returns: +// - uint64: The adjusted start height, if validation passes. +// - error: An error indicating any issues with the provided start height. +// +// Validation Steps: +// 1. If index usage is disabled, return the original height without further checks. +// 2. Retrieve the lowest and highest indexed block heights. +// 3. Check if the provided height is within the bounds of indexed heights. +// - If below the lowest indexed height, return codes.InvalidArgument error. +// - If above the highest indexed height, return codes.InvalidArgument error. +// +// 4. If validation passes, return the adjusted start height. +// +// Expected errors during normal operation: +// - codes.InvalidArgument - if the start height is out of bounds based on indexed heights. +// - codes.FailedPrecondition - if the index reporter is not ready yet. +// - codes.Internal - for any other error during validation. +func (e *ExecutionDataTrackerImpl) checkStartHeight(height uint64) (uint64, error) { + if !e.useIndex { + return height, nil + } + + lowestHeight, highestHeight, err := e.getIndexedHeightBound() + if err != nil { + return 0, err + } + + if height < lowestHeight { + return 0, status.Errorf(codes.InvalidArgument, "start height %d is lower than lowest indexed height %d", height, lowestHeight) + } + + // allow for a small difference between the highest indexed height and the provided height to + // account for small delays indexing or requests made to different ANs behind a load balancer. + // this will just result in the stream waiting a few blocks before starting. + if height > highestHeight+maxIndexBlockDiff { + return 0, status.Errorf(codes.InvalidArgument, "start height %d is higher than highest indexed height %d (maxIndexBlockDiff: %d)", height, highestHeight, maxIndexBlockDiff) + } + + return height, nil +} + +// getIndexedHeightBound returns the lowest and highest indexed block heights +// Expected errors during normal operation: +// - codes.FailedPrecondition - if the index reporter is not ready yet. +// - codes.Internal - if there was any other error getting the heights. +func (e *ExecutionDataTrackerImpl) getIndexedHeightBound() (uint64, uint64, error) { + lowestHeight, err := e.indexReporter.LowestIndexedHeight() + if err != nil { + if errors.Is(err, storage.ErrHeightNotIndexed) || errors.Is(err, indexer.ErrIndexNotInitialized) { + // the index is not ready yet, but likely will be eventually + return 0, 0, status.Errorf(codes.FailedPrecondition, "failed to get lowest indexed height: %v", err) + } + return 0, 0, rpc.ConvertError(err, "failed to get lowest indexed height", codes.Internal) + } + + highestHeight, err := e.indexReporter.HighestIndexedHeight() + if err != nil { + if errors.Is(err, storage.ErrHeightNotIndexed) || errors.Is(err, indexer.ErrIndexNotInitialized) { + // the index is not ready yet, but likely will be eventually + return 0, 0, status.Errorf(codes.FailedPrecondition, "failed to get highest indexed height: %v", err) + } + return 0, 0, rpc.ConvertError(err, "failed to get highest indexed height", codes.Internal) + } + + return lowestHeight, highestHeight, nil +} diff --git a/engine/access/subscription/tracker/mock/base_tracker.go b/engine/access/subscription/tracker/mock/base_tracker.go new file mode 100644 index 00000000000..1b3c125b5fb --- /dev/null +++ b/engine/access/subscription/tracker/mock/base_tracker.go @@ -0,0 +1,113 @@ +// Code generated by mockery. DO NOT EDIT. + +package mock + +import ( + context "context" + + flow "github.com/onflow/flow-go/model/flow" + mock "github.com/stretchr/testify/mock" +) + +// BaseTracker is an autogenerated mock type for the BaseTracker type +type BaseTracker struct { + mock.Mock +} + +// GetStartHeightFromBlockID provides a mock function with given fields: _a0 +func (_m *BaseTracker) GetStartHeightFromBlockID(_a0 flow.Identifier) (uint64, error) { + ret := _m.Called(_a0) + + if len(ret) == 0 { + panic("no return value specified for GetStartHeightFromBlockID") + } + + var r0 uint64 + var r1 error + if rf, ok := ret.Get(0).(func(flow.Identifier) (uint64, error)); ok { + return rf(_a0) + } + if rf, ok := ret.Get(0).(func(flow.Identifier) uint64); ok { + r0 = rf(_a0) + } else { + r0 = ret.Get(0).(uint64) + } + + if rf, ok := ret.Get(1).(func(flow.Identifier) error); ok { + r1 = rf(_a0) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetStartHeightFromHeight provides a mock function with given fields: _a0 +func (_m *BaseTracker) GetStartHeightFromHeight(_a0 uint64) (uint64, error) { + ret := _m.Called(_a0) + + if len(ret) == 0 { + panic("no return value specified for GetStartHeightFromHeight") + } + + var r0 uint64 + var r1 error + if rf, ok := ret.Get(0).(func(uint64) (uint64, error)); ok { + return rf(_a0) + } + if rf, ok := ret.Get(0).(func(uint64) uint64); ok { + r0 = rf(_a0) + } else { + r0 = ret.Get(0).(uint64) + } + + if rf, ok := ret.Get(1).(func(uint64) error); ok { + r1 = rf(_a0) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetStartHeightFromLatest provides a mock function with given fields: _a0 +func (_m *BaseTracker) GetStartHeightFromLatest(_a0 context.Context) (uint64, error) { + ret := _m.Called(_a0) + + if len(ret) == 0 { + panic("no return value specified for GetStartHeightFromLatest") + } + + var r0 uint64 + var r1 error + if rf, ok := ret.Get(0).(func(context.Context) (uint64, error)); ok { + return rf(_a0) + } + if rf, ok := ret.Get(0).(func(context.Context) uint64); ok { + r0 = rf(_a0) + } else { + r0 = ret.Get(0).(uint64) + } + + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(_a0) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// NewBaseTracker creates a new instance of BaseTracker. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewBaseTracker(t interface { + mock.TestingT + Cleanup(func()) +}) *BaseTracker { + mock := &BaseTracker{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/engine/access/subscription/tracker/mock/block_tracker.go b/engine/access/subscription/tracker/mock/block_tracker.go new file mode 100644 index 00000000000..b1481656bd9 --- /dev/null +++ b/engine/access/subscription/tracker/mock/block_tracker.go @@ -0,0 +1,159 @@ +// Code generated by mockery. DO NOT EDIT. + +package mock + +import ( + context "context" + + flow "github.com/onflow/flow-go/model/flow" + mock "github.com/stretchr/testify/mock" +) + +// BlockTracker is an autogenerated mock type for the BlockTracker type +type BlockTracker struct { + mock.Mock +} + +// GetHighestHeight provides a mock function with given fields: _a0 +func (_m *BlockTracker) GetHighestHeight(_a0 flow.BlockStatus) (uint64, error) { + ret := _m.Called(_a0) + + if len(ret) == 0 { + panic("no return value specified for GetHighestHeight") + } + + var r0 uint64 + var r1 error + if rf, ok := ret.Get(0).(func(flow.BlockStatus) (uint64, error)); ok { + return rf(_a0) + } + if rf, ok := ret.Get(0).(func(flow.BlockStatus) uint64); ok { + r0 = rf(_a0) + } else { + r0 = ret.Get(0).(uint64) + } + + if rf, ok := ret.Get(1).(func(flow.BlockStatus) error); ok { + r1 = rf(_a0) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetStartHeightFromBlockID provides a mock function with given fields: _a0 +func (_m *BlockTracker) GetStartHeightFromBlockID(_a0 flow.Identifier) (uint64, error) { + ret := _m.Called(_a0) + + if len(ret) == 0 { + panic("no return value specified for GetStartHeightFromBlockID") + } + + var r0 uint64 + var r1 error + if rf, ok := ret.Get(0).(func(flow.Identifier) (uint64, error)); ok { + return rf(_a0) + } + if rf, ok := ret.Get(0).(func(flow.Identifier) uint64); ok { + r0 = rf(_a0) + } else { + r0 = ret.Get(0).(uint64) + } + + if rf, ok := ret.Get(1).(func(flow.Identifier) error); ok { + r1 = rf(_a0) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetStartHeightFromHeight provides a mock function with given fields: _a0 +func (_m *BlockTracker) GetStartHeightFromHeight(_a0 uint64) (uint64, error) { + ret := _m.Called(_a0) + + if len(ret) == 0 { + panic("no return value specified for GetStartHeightFromHeight") + } + + var r0 uint64 + var r1 error + if rf, ok := ret.Get(0).(func(uint64) (uint64, error)); ok { + return rf(_a0) + } + if rf, ok := ret.Get(0).(func(uint64) uint64); ok { + r0 = rf(_a0) + } else { + r0 = ret.Get(0).(uint64) + } + + if rf, ok := ret.Get(1).(func(uint64) error); ok { + r1 = rf(_a0) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetStartHeightFromLatest provides a mock function with given fields: _a0 +func (_m *BlockTracker) GetStartHeightFromLatest(_a0 context.Context) (uint64, error) { + ret := _m.Called(_a0) + + if len(ret) == 0 { + panic("no return value specified for GetStartHeightFromLatest") + } + + var r0 uint64 + var r1 error + if rf, ok := ret.Get(0).(func(context.Context) (uint64, error)); ok { + return rf(_a0) + } + if rf, ok := ret.Get(0).(func(context.Context) uint64); ok { + r0 = rf(_a0) + } else { + r0 = ret.Get(0).(uint64) + } + + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(_a0) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ProcessOnFinalizedBlock provides a mock function with no fields +func (_m *BlockTracker) ProcessOnFinalizedBlock() error { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for ProcessOnFinalizedBlock") + } + + var r0 error + if rf, ok := ret.Get(0).(func() error); ok { + r0 = rf() + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// NewBlockTracker creates a new instance of BlockTracker. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewBlockTracker(t interface { + mock.TestingT + Cleanup(func()) +}) *BlockTracker { + mock := &BlockTracker{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/engine/access/subscription/tracker/mock/execution_data_tracker.go b/engine/access/subscription/tracker/mock/execution_data_tracker.go new file mode 100644 index 00000000000..ccfad6bc8b4 --- /dev/null +++ b/engine/access/subscription/tracker/mock/execution_data_tracker.go @@ -0,0 +1,166 @@ +// Code generated by mockery. DO NOT EDIT. + +package mock + +import ( + context "context" + + flow "github.com/onflow/flow-go/model/flow" + execution_data "github.com/onflow/flow-go/module/executiondatasync/execution_data" + + mock "github.com/stretchr/testify/mock" +) + +// ExecutionDataTracker is an autogenerated mock type for the ExecutionDataTracker type +type ExecutionDataTracker struct { + mock.Mock +} + +// GetHighestHeight provides a mock function with no fields +func (_m *ExecutionDataTracker) GetHighestHeight() uint64 { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for GetHighestHeight") + } + + var r0 uint64 + if rf, ok := ret.Get(0).(func() uint64); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(uint64) + } + + return r0 +} + +// GetStartHeight provides a mock function with given fields: _a0, _a1, _a2 +func (_m *ExecutionDataTracker) GetStartHeight(_a0 context.Context, _a1 flow.Identifier, _a2 uint64) (uint64, error) { + ret := _m.Called(_a0, _a1, _a2) + + if len(ret) == 0 { + panic("no return value specified for GetStartHeight") + } + + var r0 uint64 + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, flow.Identifier, uint64) (uint64, error)); ok { + return rf(_a0, _a1, _a2) + } + if rf, ok := ret.Get(0).(func(context.Context, flow.Identifier, uint64) uint64); ok { + r0 = rf(_a0, _a1, _a2) + } else { + r0 = ret.Get(0).(uint64) + } + + if rf, ok := ret.Get(1).(func(context.Context, flow.Identifier, uint64) error); ok { + r1 = rf(_a0, _a1, _a2) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetStartHeightFromBlockID provides a mock function with given fields: _a0 +func (_m *ExecutionDataTracker) GetStartHeightFromBlockID(_a0 flow.Identifier) (uint64, error) { + ret := _m.Called(_a0) + + if len(ret) == 0 { + panic("no return value specified for GetStartHeightFromBlockID") + } + + var r0 uint64 + var r1 error + if rf, ok := ret.Get(0).(func(flow.Identifier) (uint64, error)); ok { + return rf(_a0) + } + if rf, ok := ret.Get(0).(func(flow.Identifier) uint64); ok { + r0 = rf(_a0) + } else { + r0 = ret.Get(0).(uint64) + } + + if rf, ok := ret.Get(1).(func(flow.Identifier) error); ok { + r1 = rf(_a0) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetStartHeightFromHeight provides a mock function with given fields: _a0 +func (_m *ExecutionDataTracker) GetStartHeightFromHeight(_a0 uint64) (uint64, error) { + ret := _m.Called(_a0) + + if len(ret) == 0 { + panic("no return value specified for GetStartHeightFromHeight") + } + + var r0 uint64 + var r1 error + if rf, ok := ret.Get(0).(func(uint64) (uint64, error)); ok { + return rf(_a0) + } + if rf, ok := ret.Get(0).(func(uint64) uint64); ok { + r0 = rf(_a0) + } else { + r0 = ret.Get(0).(uint64) + } + + if rf, ok := ret.Get(1).(func(uint64) error); ok { + r1 = rf(_a0) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetStartHeightFromLatest provides a mock function with given fields: _a0 +func (_m *ExecutionDataTracker) GetStartHeightFromLatest(_a0 context.Context) (uint64, error) { + ret := _m.Called(_a0) + + if len(ret) == 0 { + panic("no return value specified for GetStartHeightFromLatest") + } + + var r0 uint64 + var r1 error + if rf, ok := ret.Get(0).(func(context.Context) (uint64, error)); ok { + return rf(_a0) + } + if rf, ok := ret.Get(0).(func(context.Context) uint64); ok { + r0 = rf(_a0) + } else { + r0 = ret.Get(0).(uint64) + } + + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(_a0) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// OnExecutionData provides a mock function with given fields: _a0 +func (_m *ExecutionDataTracker) OnExecutionData(_a0 *execution_data.BlockExecutionDataEntity) { + _m.Called(_a0) +} + +// NewExecutionDataTracker creates a new instance of ExecutionDataTracker. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewExecutionDataTracker(t interface { + mock.TestingT + Cleanup(func()) +}) *ExecutionDataTracker { + mock := &ExecutionDataTracker{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/engine/access/subscription/util.go b/engine/access/subscription/util.go new file mode 100644 index 00000000000..6ecfeeb8b16 --- /dev/null +++ b/engine/access/subscription/util.go @@ -0,0 +1,57 @@ +package subscription + +import ( + "fmt" +) + +// HandleSubscription is a generic handler for subscriptions to a specific type. It continuously listens to the subscription channel, +// handles the received responses, and sends the processed information to the client via the provided stream using handleResponse. +// +// Parameters: +// - sub: The subscription. +// - handleResponse: The function responsible for handling the response of the subscribed type. +// +// No errors are expected during normal operations. +func HandleSubscription[T any](sub Subscription, handleResponse func(resp T) error) error { + for { + v, ok := <-sub.Channel() + if !ok { + if sub.Err() != nil { + return fmt.Errorf("stream encountered an error: %w", sub.Err()) + } + return nil + } + + resp, ok := v.(T) + if !ok { + return fmt.Errorf("unexpected response type: %T", v) + } + + err := handleResponse(resp) + if err != nil { + return err + } + } +} + +// HandleResponse processes a generic response of type and sends it to the provided channel. +// +// Parameters: +// - send: The channel to which the processed response is sent. +// - transform: A function to transform the response into the expected interface{} type. +// +// No errors are expected during normal operations. +func HandleResponse[T any](send chan<- interface{}, transform func(resp T) (interface{}, error)) func(resp T) error { + return func(response T) error { + // Transform the response + resp, err := transform(response) + if err != nil { + return fmt.Errorf("failed to transform response: %w", err) + } + + // send to the channel + send <- resp + + return nil + } +} diff --git a/engine/collection/compliance.go b/engine/collection/compliance.go index 934e852bb02..37d15bf3bdf 100644 --- a/engine/collection/compliance.go +++ b/engine/collection/compliance.go @@ -1,8 +1,8 @@ package collection import ( + "github.com/onflow/flow-go/model/cluster" "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/model/messages" "github.com/onflow/flow-go/module/component" ) @@ -17,11 +17,13 @@ type Compliance interface { component.Component // OnClusterBlockProposal feeds a new block proposal into the processing pipeline. + // Inputs are structurally valid, but untrusted and must be validated by internal business logic. // Incoming proposals will be queued and eventually dispatched by worker. // This method is non-blocking. - OnClusterBlockProposal(proposal flow.Slashable[*messages.ClusterBlockProposal]) - // OnSyncedClusterBlock feeds a block obtained from sync proposal into the processing pipeline. + OnClusterBlockProposal(proposal flow.Slashable[*cluster.Proposal]) + // OnSyncedClusterBlock feeds a new block proposal into the processing pipeline. + // Inputs are structurally valid, but untrusted and must be validated by internal business logic. // Incoming proposals will be queued and eventually dispatched by worker. // This method is non-blocking. - OnSyncedClusterBlock(block flow.Slashable[*messages.ClusterBlockProposal]) + OnSyncedClusterBlock(block flow.Slashable[*cluster.Proposal]) } diff --git a/engine/collection/compliance/core.go b/engine/collection/compliance/core.go index 1bc3cbc410e..a969c94b7f3 100644 --- a/engine/collection/compliance/core.go +++ b/engine/collection/compliance/core.go @@ -1,5 +1,3 @@ -// (c) 2019 Dapper Labs - ALL RIGHTS RESERVED - package compliance import ( @@ -12,12 +10,12 @@ import ( "github.com/onflow/flow-go/consensus/hotstuff" "github.com/onflow/flow-go/consensus/hotstuff/model" "github.com/onflow/flow-go/engine" - "github.com/onflow/flow-go/engine/consensus/sealing/counters" "github.com/onflow/flow-go/model/cluster" "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/model/messages" "github.com/onflow/flow-go/module" "github.com/onflow/flow-go/module/compliance" + "github.com/onflow/flow-go/module/counters" + "github.com/onflow/flow-go/module/irrecoverable" "github.com/onflow/flow-go/module/mempool" "github.com/onflow/flow-go/module/metrics" "github.com/onflow/flow-go/state" @@ -44,8 +42,8 @@ type Core struct { headers storage.Headers state clusterkv.MutableState // track latest finalized view/height - used to efficiently drop outdated or too-far-ahead blocks - finalizedView counters.StrictMonotonousCounter - finalizedHeight counters.StrictMonotonousCounter + finalizedView counters.StrictMonotonicCounter + finalizedHeight counters.StrictMonotonicCounter pending module.PendingClusterBlockBuffer // pending block cache sync module.BlockRequester hotstuff module.HotStuff @@ -70,14 +68,8 @@ func NewCore( hotstuff module.HotStuff, voteAggregator hotstuff.VoteAggregator, timeoutAggregator hotstuff.TimeoutAggregator, - opts ...compliance.Opt, + config compliance.Config, ) (*Core, error) { - - config := compliance.DefaultConfig() - for _, apply := range opts { - apply(&config) - } - c := &Core{ log: log.With().Str("cluster_compliance", "core").Logger(), config: config, @@ -111,51 +103,56 @@ func NewCore( // OnBlockProposal handles incoming block proposals. // No errors are expected during normal operation. -func (c *Core) OnBlockProposal(originID flow.Identifier, proposal *messages.ClusterBlockProposal) error { +func (c *Core) OnBlockProposal(proposal flow.Slashable[*cluster.Proposal]) error { startTime := time.Now() defer func() { c.hotstuffMetrics.BlockProcessingDuration(time.Since(startTime)) }() - block := proposal.Block.ToInternal() - header := block.Header - blockID := header.ID() + block := proposal.Message.Block + payload := proposal.Message.Block.Payload + blockID := proposal.Message.Block.ID() finalHeight := c.finalizedHeight.Value() finalView := c.finalizedView.Value() log := c.log.With(). - Hex("origin_id", originID[:]). - Str("chain_id", header.ChainID.String()). - Uint64("block_height", header.Height). - Uint64("block_view", header.View). + Hex("origin_id", proposal.OriginID[:]). + Uint64("block_height", block.Height). + Uint64("block_view", block.View). Hex("block_id", blockID[:]). - Hex("parent_id", header.ParentID[:]). - Hex("ref_block_id", block.Payload.ReferenceBlockID[:]). - Hex("collection_id", logging.Entity(block.Payload.Collection)). - Int("tx_count", block.Payload.Collection.Len()). - Time("timestamp", header.Timestamp). - Hex("proposer", header.ProposerID[:]). - Hex("parent_signer_indices", header.ParentVoterIndices). - Uint64("finalized_height", finalHeight). - Uint64("finalized_view", finalView). + Hex("ref_block_id", payload.ReferenceBlockID[:]). + Hex("collection_id", logging.Entity(payload.Collection)). + Int("tx_count", payload.Collection.Len()). + Hex("parent_id", block.ParentID[:]). + Hex("proposer", block.ProposerID[:]). + Time("timestamp", time.UnixMilli(int64(block.Timestamp)).UTC()). Logger() if log.Debug().Enabled() { - log = log.With().Strs("tx_ids", flow.IdentifierList(block.Payload.Collection.Light().Transactions).Strings()).Logger() + payloadHash := payload.Hash() + log = log.With(). + Uint64("finalized_height", finalHeight). + Uint64("finalized_view", finalView). + Str("chain_id", block.ChainID.String()). + Hex("payload_hash", payloadHash[:]). + Hex("parent_signer_indices", block.ParentVoterIndices). + Strs("tx_ids", flow.IdentifierList(payload.Collection.Light().Transactions).Strings()). + Logger() } log.Info().Msg("block proposal received") // drop proposals below the finalized threshold - if header.Height <= finalHeight || header.View <= finalView { + if block.Height <= finalHeight || block.View <= finalView { log.Debug().Msg("dropping block below finalized boundary") return nil } + skipNewProposalsThreshold := c.config.GetSkipNewProposalsThreshold() // ignore proposals which are too far ahead of our local finalized state // instead, rely on sync engine to catch up finalization more effectively, and avoid // large subtree of blocks to be cached. - if header.View > finalView+c.config.SkipNewProposalsThreshold { + if block.View > finalView+skipNewProposalsThreshold { log.Debug(). - Uint64("skip_new_proposals_threshold", c.config.SkipNewProposalsThreshold). + Uint64("skip_new_proposals_threshold", skipNewProposalsThreshold). Msg("dropping block too far ahead of locally finalized view") return nil } @@ -193,10 +190,10 @@ func (c *Core) OnBlockProposal(originID flow.Identifier, proposal *messages.Clus // pending block, its parent block must have been requested. // if there was problem requesting its parent or ancestors, the sync engine's forward // syncing with range requests for finalized blocks will request for the blocks. - _, found := c.pending.ByID(header.ParentID) + _, found := c.pending.ByID(block.ParentID) if found { // add the block to the cache - _ = c.pending.Add(originID, block) + _ = c.pending.Add(proposal) c.mempoolMetrics.MempoolEntries(metrics.ResourceClusterProposal, c.pending.Size()) return nil @@ -205,15 +202,15 @@ func (c *Core) OnBlockProposal(originID flow.Identifier, proposal *messages.Clus // if the proposal is connected to a block that is neither in the cache, nor // in persistent storage, its direct parent is missing; cache the proposal // and request the parent - exists, err := c.headers.Exists(header.ParentID) + exists, err := c.headers.Exists(block.ParentID) if err != nil { return fmt.Errorf("could not check parent exists: %w", err) } if !exists { - _ = c.pending.Add(originID, block) + _ = c.pending.Add(proposal) c.mempoolMetrics.MempoolEntries(metrics.ResourceClusterProposal, c.pending.Size()) - c.sync.RequestBlock(header.ParentID, header.Height-1) + c.sync.RequestBlock(block.ParentID, block.Height-1) log.Debug().Msg("requesting missing parent for proposal") return nil } @@ -223,7 +220,7 @@ func (c *Core) OnBlockProposal(originID flow.Identifier, proposal *messages.Clus // execution of the entire recursion, which might include processing the // proposal's pending children. There is another span within // processBlockProposal that measures the time spent for a single proposal. - err = c.processBlockAndDescendants(block) + err = c.processBlockAndDescendants(proposal) c.mempoolMetrics.MempoolEntries(metrics.ResourceClusterProposal, c.pending.Size()) if err != nil { return fmt.Errorf("could not process block proposal: %w", err) @@ -236,24 +233,33 @@ func (c *Core) OnBlockProposal(originID flow.Identifier, proposal *messages.Clus // its pending descendants. By induction, any child block of a // valid proposal is itself connected to the finalized state and can be // processed as well. -func (c *Core) processBlockAndDescendants(proposal *cluster.Block) error { - blockID := proposal.ID() +func (c *Core) processBlockAndDescendants(proposal flow.Slashable[*cluster.Proposal]) error { + block := proposal.Message.Block + blockID := proposal.Message.Block.ID() log := c.log.With(). Str("block_id", blockID.String()). - Uint64("block_height", proposal.Header.Height). - Uint64("block_view", proposal.Header.View). - Uint64("parent_view", proposal.Header.ParentView). + Uint64("block_height", block.Height). + Uint64("block_view", block.View). + Uint64("parent_view", block.ParentView). Logger() // process block itself - err := c.processBlockProposal(proposal) + err := c.processBlockProposal(proposal.Message) if err != nil { if checkForAndLogOutdatedInputError(err, log) || checkForAndLogUnverifiableInputError(err, log) { return nil } - if checkForAndLogInvalidInputError(err, log) { + if invalidBlockErr, ok := model.AsInvalidProposalError(err); ok { + log.Err(err).Msg("received invalid block from other node (potential slashing evidence?)") + + // notify consumers about invalid block + c.proposalViolationNotifier.OnInvalidBlockDetected(flow.Slashable[model.InvalidProposalError]{ + OriginID: proposal.OriginID, + Message: *invalidBlockErr, + }) + // notify VoteAggregator about the invalid block - err = c.voteAggregator.InvalidBlock(model.ProposalFromFlow(proposal.Header)) + err = c.voteAggregator.InvalidBlock(model.SignedProposalFromClusterBlock(proposal.Message)) if err != nil { if mempool.IsBelowPrunedThresholdError(err) { log.Warn().Msg("received invalid block, but is below pruned threshold") @@ -275,7 +281,7 @@ func (c *Core) processBlockAndDescendants(proposal *cluster.Block) error { return nil } for _, child := range children { - cpr := c.processBlockAndDescendants(child.Message) + cpr := c.processBlockAndDescendants(child) if cpr != nil { // unexpected error: potentially corrupted internal state => abort processing and escalate error return cpr @@ -292,35 +298,35 @@ func (c *Core) processBlockAndDescendants(proposal *cluster.Block) error { // the finalized state. // Expected errors during normal operations: // - engine.OutdatedInputError if the block proposal is outdated (e.g. orphaned) -// - engine.InvalidInputError if the block proposal is invalid +// - model.InvalidProposalError if the block proposal is invalid // - engine.UnverifiableInputError if the proposal cannot be validated -func (c *Core) processBlockProposal(proposal *cluster.Block) error { - header := proposal.Header - blockID := header.ID() +func (c *Core) processBlockProposal(proposal *cluster.Proposal) error { + block := proposal.Block + blockID := proposal.Block.ID() + payloadHash := proposal.Block.Payload.Hash() log := c.log.With(). - Str("chain_id", header.ChainID.String()). - Uint64("block_height", header.Height). - Uint64("block_view", header.View). + Str("chain_id", block.ChainID.String()). + Uint64("block_height", block.Height). + Uint64("block_view", block.View). Hex("block_id", blockID[:]). - Hex("parent_id", header.ParentID[:]). - Hex("payload_hash", header.PayloadHash[:]). - Time("timestamp", header.Timestamp). - Hex("proposer", header.ProposerID[:]). - Hex("parent_signer_indices", header.ParentVoterIndices). + Hex("parent_id", block.ParentID[:]). + Hex("payload_hash", payloadHash[:]). + Time("timestamp", time.UnixMilli(int64(block.Timestamp)).UTC()). + Hex("proposer", block.ProposerID[:]). + Hex("parent_signer_indices", block.ParentVoterIndices). Logger() - log.Info().Msg("processing block proposal") + log.Debug().Msg("processing block proposal") - hotstuffProposal := model.ProposalFromFlow(header) + hotstuffProposal := model.SignedProposalFromClusterBlock(proposal) err := c.validator.ValidateProposal(hotstuffProposal) if err != nil { - if invalidBlockErr, ok := model.AsInvalidProposalError(err); ok { - c.proposalViolationNotifier.OnInvalidBlockDetected(*invalidBlockErr) - return engine.NewInvalidInputErrorf("invalid block proposal: %w", err) + if model.IsInvalidProposalError(err) { + return err } if errors.Is(err, model.ErrViewForUnknownEpoch) { // The cluster committee never returns ErrViewForUnknownEpoch, therefore this case // is an unexpected error in cluster consensus. - return fmt.Errorf("unexpected error: cluster committee reported unknown epoch : %w", err) + return fmt.Errorf("unexpected error: cluster committee reported unknown epoch : %w", irrecoverable.NewException(err)) } return fmt.Errorf("unexpected error validating proposal: %w", err) } @@ -330,17 +336,16 @@ func (c *Core) processBlockProposal(proposal *cluster.Block) error { if err != nil { if state.IsInvalidExtensionError(err) { // if the block proposes an invalid extension of the cluster state, then the block is invalid - // TODO: we should slash the block proposer - return engine.NewInvalidInputErrorf("invalid extension of cluster state (block: %x, height: %d): %w", blockID, header.Height, err) + return model.NewInvalidProposalErrorf(hotstuffProposal, "invalid extension of cluster state (block: %x, height: %d): %w", blockID, block.Height, err) } else if state.IsOutdatedExtensionError(err) { // cluster state aborted processing of block as it is on an abandoned fork: block is outdated return engine.NewOutdatedInputErrorf("outdated extension of cluster state: %w", err) } else if state.IsUnverifiableExtensionError(err) { return engine.NewUnverifiableInputError("unverifiable extension of cluster state (block_id: %x, height: %d): %w", - header.ID(), header.Height, err) + blockID, block.Height, err) } else { // unexpected error: potentially corrupted internal state => abort processing and escalate error - return fmt.Errorf("unexpected exception while extending cluster state with block %x at height %d: %w", blockID, header.Height, err) + return fmt.Errorf("unexpected exception while extending cluster state with block %x at height %d: %w", blockID, block.Height, err) } } @@ -350,7 +355,7 @@ func (c *Core) processBlockProposal(proposal *cluster.Block) error { // submit the model to hotstuff for processing // TODO replace with pubsub https://github.com/dapperlabs/flow-go/issues/6395 - log.Info().Msg("forwarding block proposal to hotstuff") + log.Debug().Msg("forwarding block proposal to hotstuff") c.hotstuff.SubmitProposal(hotstuffProposal) return nil @@ -382,19 +387,6 @@ func checkForAndLogOutdatedInputError(err error, log zerolog.Logger) bool { return false } -// checkForAndLogInvalidInputError checks whether error is an `engine.InvalidInputError`. -// If this is the case, we emit a log message and return true. -// For any error other than `engine.InvalidInputError`, this function is a no-op -// and returns false. -func checkForAndLogInvalidInputError(err error, log zerolog.Logger) bool { - if engine.IsInvalidInputError(err) { - // the block is invalid; log as error as we desire honest participation - log.Err(err).Msg("received invalid block from other node (potential slashing evidence?)") - return true - } - return false -} - // checkForAndLogUnverifiableInputError checks whether error is an `engine.UnverifiableInputError`. // If this is the case, we emit a log message and return true. // For any error other than `engine.UnverifiableInputError`, this function is a no-op @@ -402,7 +394,8 @@ func checkForAndLogInvalidInputError(err error, log zerolog.Logger) bool { func checkForAndLogUnverifiableInputError(err error, log zerolog.Logger) bool { if engine.IsUnverifiableInputError(err) { // the block cannot be validated - log.Err(err).Msg("received unverifiable block proposal; this is an indicator of a proposal that cannot be verified under current state") + log.Warn().Err(err).Msg("received collection proposal with unknown reference block; " + + "this might be an indicator that the node is slightly behind or the proposer published an invalid collection") return true } return false diff --git a/engine/collection/compliance/core_test.go b/engine/collection/compliance/core_test.go index 81f19c2f3b1..aaf44149dc1 100644 --- a/engine/collection/compliance/core_test.go +++ b/engine/collection/compliance/core_test.go @@ -2,9 +2,7 @@ package compliance import ( "errors" - "math/rand" "testing" - "time" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" @@ -15,7 +13,6 @@ import ( "github.com/onflow/flow-go/consensus/hotstuff/model" "github.com/onflow/flow-go/model/cluster" "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/model/messages" realbuffer "github.com/onflow/flow-go/module/buffer" "github.com/onflow/flow-go/module/compliance" "github.com/onflow/flow-go/module/metrics" @@ -41,12 +38,12 @@ type CoreSuite struct { type CommonSuite struct { suite.Suite - head *cluster.Block + head *cluster.Proposal // storage data - headerDB map[flow.Identifier]*cluster.Block + headerDB map[flow.Identifier]*flow.Header - pendingDB map[flow.Identifier]flow.Slashable[*cluster.Block] - childrenDB map[flow.Identifier][]flow.Slashable[*cluster.Block] + pendingDB map[flow.Identifier]flow.Slashable[*cluster.Proposal] + childrenDB map[flow.Identifier][]flow.Slashable[*cluster.Proposal] // mocked dependencies state *clusterstate.MutableState @@ -66,26 +63,23 @@ type CommonSuite struct { } func (cs *CommonSuite) SetupTest() { - // seed the RNG - rand.Seed(time.Now().UnixNano()) - block := unittest.ClusterBlockFixture() - cs.head = &block + cs.head = unittest.ClusterProposalFromBlock(block) // initialize the storage data - cs.headerDB = make(map[flow.Identifier]*cluster.Block) - cs.pendingDB = make(map[flow.Identifier]flow.Slashable[*cluster.Block]) - cs.childrenDB = make(map[flow.Identifier][]flow.Slashable[*cluster.Block]) + cs.headerDB = make(map[flow.Identifier]*flow.Header) + cs.pendingDB = make(map[flow.Identifier]flow.Slashable[*cluster.Proposal]) + cs.childrenDB = make(map[flow.Identifier][]flow.Slashable[*cluster.Proposal]) // store the head header and payload - cs.headerDB[block.ID()] = cs.head + cs.headerDB[block.ID()] = cs.head.Block.ToHeader() // set up header storage mock cs.headers = &storage.Headers{} cs.headers.On("ByBlockID", mock.Anything).Return( func(blockID flow.Identifier) *flow.Header { if header := cs.headerDB[blockID]; header != nil { - return cs.headerDB[blockID].Header + return cs.headerDB[blockID] } return nil }, @@ -123,7 +117,7 @@ func (cs *CommonSuite) SetupTest() { cs.snapshot = &clusterstate.Snapshot{} cs.snapshot.On("Head").Return( func() *flow.Header { - return cs.head.Header + return cs.head.Block.ToHeader() }, nil, ) @@ -132,7 +126,7 @@ func (cs *CommonSuite) SetupTest() { cs.pending = &module.PendingClusterBlockBuffer{} cs.pending.On("Add", mock.Anything, mock.Anything).Return(true) cs.pending.On("ByID", mock.Anything).Return( - func(blockID flow.Identifier) flow.Slashable[*cluster.Block] { + func(blockID flow.Identifier) flow.Slashable[*cluster.Proposal] { return cs.pendingDB[blockID] }, func(blockID flow.Identifier) bool { @@ -141,7 +135,7 @@ func (cs *CommonSuite) SetupTest() { }, ) cs.pending.On("ByParentID", mock.Anything).Return( - func(blockID flow.Identifier) []flow.Slashable[*cluster.Block] { + func(blockID flow.Identifier) []flow.Slashable[*cluster.Proposal] { return cs.childrenDB[blockID] }, func(blockID flow.Identifier) bool { @@ -193,6 +187,7 @@ func (cs *CommonSuite) SetupTest() { cs.hotstuff, cs.voteAggregator, cs.timeoutAggregator, + compliance.DefaultConfig(), ) require.NoError(cs.T(), err, "engine initialization should pass") @@ -200,23 +195,23 @@ func (cs *CommonSuite) SetupTest() { } func (cs *CoreSuite) TestOnBlockProposalValidParent() { - // create a proposal that directly descends from the latest finalized header originID := unittest.IdentifierFixture() - block := unittest.ClusterBlockWithParent(cs.head) - - proposal := messages.NewClusterBlockProposal(&block) - - // store the data for retrieval - cs.headerDB[block.Header.ParentID] = cs.head + block := unittest.ClusterBlockFixture( + unittest.ClusterBlock.WithParent(&cs.head.Block), + ) + proposal := unittest.ClusterProposalFromBlock(block) - hotstuffProposal := model.ProposalFromFlow(block.Header) + hotstuffProposal := model.SignedProposalFromClusterBlock(proposal) cs.validator.On("ValidateProposal", hotstuffProposal).Return(nil) cs.voteAggregator.On("AddBlock", hotstuffProposal).Once() cs.hotstuff.On("SubmitProposal", hotstuffProposal) // it should be processed without error - err := cs.core.OnBlockProposal(originID, proposal) + err := cs.core.OnBlockProposal(flow.Slashable[*cluster.Proposal]{ + OriginID: originID, + Message: proposal, + }) require.NoError(cs.T(), err, "valid block proposal should pass") } @@ -224,37 +219,50 @@ func (cs *CoreSuite) TestOnBlockProposalValidAncestor() { // create a proposal that has two ancestors in the cache originID := unittest.IdentifierFixture() - ancestor := unittest.ClusterBlockWithParent(cs.head) - parent := unittest.ClusterBlockWithParent(&ancestor) - block := unittest.ClusterBlockWithParent(&parent) - proposal := messages.NewClusterBlockProposal(&block) + ancestor := unittest.ClusterBlockFixture( + unittest.ClusterBlock.WithParent(&cs.head.Block), + ) + parent := unittest.ClusterBlockFixture( + unittest.ClusterBlock.WithParent(ancestor), + ) + block := unittest.ClusterBlockFixture( + unittest.ClusterBlock.WithParent(parent), + ) + proposal := unittest.ClusterProposalFromBlock(block) // store the data for retrieval - cs.headerDB[parent.ID()] = &parent - cs.headerDB[ancestor.ID()] = &ancestor + cs.headerDB[parent.ID()] = parent.ToHeader() + cs.headerDB[ancestor.ID()] = ancestor.ToHeader() - hotstuffProposal := model.ProposalFromFlow(block.Header) + hotstuffProposal := model.SignedProposalFromClusterBlock(proposal) cs.validator.On("ValidateProposal", hotstuffProposal).Return(nil) cs.voteAggregator.On("AddBlock", hotstuffProposal).Once() cs.hotstuff.On("SubmitProposal", hotstuffProposal).Once() // it should be processed without error - err := cs.core.OnBlockProposal(originID, proposal) + err := cs.core.OnBlockProposal(flow.Slashable[*cluster.Proposal]{ + OriginID: originID, + Message: proposal, + }) require.NoError(cs.T(), err, "valid block proposal should pass") // we should extend the state with the header - cs.state.AssertCalled(cs.T(), "Extend", &block) + cs.state.AssertCalled(cs.T(), "Extend", proposal) } func (cs *CoreSuite) TestOnBlockProposalSkipProposalThreshold() { // create a proposal which is far enough ahead to be dropped originID := unittest.IdentifierFixture() - block := unittest.ClusterBlockFixture() - block.Header.Height = cs.head.Header.Height + compliance.DefaultConfig().SkipNewProposalsThreshold + 1 - proposal := unittest.ClusterProposalFromBlock(&block) + block := unittest.ClusterBlockFixture( + unittest.ClusterBlock.WithHeight(cs.head.Block.Height + compliance.DefaultConfig().SkipNewProposalsThreshold + 1), + ) + proposal := unittest.ClusterProposalFromBlock(block) - err := cs.core.OnBlockProposal(originID, proposal) + err := cs.core.OnBlockProposal(flow.Slashable[*cluster.Proposal]{ + OriginID: originID, + Message: proposal, + }) require.NoError(cs.T(), err) // block should be dropped - not added to state or cache @@ -268,30 +276,41 @@ func (cs *CoreSuite) TestOnBlockProposalSkipProposalThreshold() { // - we should not attempt to process its children // - we should notify VoteAggregator, for known errors func (cs *CoreSuite) TestOnBlockProposal_FailsHotStuffValidation() { - // create a proposal that has two ancestors in the cache originID := unittest.IdentifierFixture() - ancestor := unittest.ClusterBlockWithParent(cs.head) - parent := unittest.ClusterBlockWithParent(&ancestor) - block := unittest.ClusterBlockWithParent(&parent) - proposal := messages.NewClusterBlockProposal(&block) - hotstuffProposal := model.ProposalFromFlow(block.Header) + ancestor := unittest.ClusterBlockFixture( + unittest.ClusterBlock.WithParent(&cs.head.Block), + ) + parent := unittest.ClusterBlockFixture( + unittest.ClusterBlock.WithParent(ancestor), + ) + block := unittest.ClusterBlockFixture( + unittest.ClusterBlock.WithParent(parent), + ) + proposal := unittest.ClusterProposalFromBlock(block) + hotstuffProposal := model.SignedProposalFromClusterBlock(proposal) // store the data for retrieval - cs.headerDB[parent.ID()] = &parent - cs.headerDB[ancestor.ID()] = &ancestor + cs.headerDB[parent.ID()] = parent.ToHeader() + cs.headerDB[ancestor.ID()] = ancestor.ToHeader() cs.Run("invalid block error", func() { // the block fails HotStuff validation *cs.validator = *hotstuff.NewValidator(cs.T()) sentinelError := model.NewInvalidProposalErrorf(hotstuffProposal, "") cs.validator.On("ValidateProposal", hotstuffProposal).Return(sentinelError) - cs.proposalViolationNotifier.On("OnInvalidBlockDetected", sentinelError).Return().Once() + cs.proposalViolationNotifier.On("OnInvalidBlockDetected", flow.Slashable[model.InvalidProposalError]{ + OriginID: originID, + Message: sentinelError.(model.InvalidProposalError), + }).Return().Once() // we should notify VoteAggregator about the invalid block cs.voteAggregator.On("InvalidBlock", hotstuffProposal).Return(nil) // the expected error should be handled within the Core - err := cs.core.OnBlockProposal(originID, proposal) + err := cs.core.OnBlockProposal(flow.Slashable[*cluster.Proposal]{ + OriginID: originID, + Message: proposal, + }) require.NoError(cs.T(), err, "proposal with invalid extension should fail") // we should not extend the state with the header @@ -306,9 +325,12 @@ func (cs *CoreSuite) TestOnBlockProposal_FailsHotStuffValidation() { cs.validator.On("ValidateProposal", hotstuffProposal).Return(model.ErrViewForUnknownEpoch) // this error is not expected should raise an exception - err := cs.core.OnBlockProposal(originID, proposal) + err := cs.core.OnBlockProposal(flow.Slashable[*cluster.Proposal]{ + OriginID: originID, + Message: proposal, + }) require.Error(cs.T(), err, "proposal with invalid extension should fail") - require.ErrorIs(cs.T(), err, model.ErrViewForUnknownEpoch) + require.NotErrorIs(cs.T(), err, model.ErrViewForUnknownEpoch) // we should not extend the state with the header cs.state.AssertNotCalled(cs.T(), "Extend", mock.Anything) @@ -323,7 +345,10 @@ func (cs *CoreSuite) TestOnBlockProposal_FailsHotStuffValidation() { cs.validator.On("ValidateProposal", hotstuffProposal).Return(unexpectedErr) // the error should be propagated - err := cs.core.OnBlockProposal(originID, proposal) + err := cs.core.OnBlockProposal(flow.Slashable[*cluster.Proposal]{ + OriginID: originID, + Message: proposal, + }) require.ErrorIs(cs.T(), err, unexpectedErr) // we should not extend the state with the header @@ -342,15 +367,21 @@ func (cs *CoreSuite) TestOnBlockProposal_FailsProtocolStateValidation() { // create a proposal that has two ancestors in the cache originID := unittest.IdentifierFixture() - ancestor := unittest.ClusterBlockWithParent(cs.head) - parent := unittest.ClusterBlockWithParent(&ancestor) - block := unittest.ClusterBlockWithParent(&parent) - proposal := messages.NewClusterBlockProposal(&block) - hotstuffProposal := model.ProposalFromFlow(block.Header) + ancestor := unittest.ClusterBlockFixture( + unittest.ClusterBlock.WithParent(&cs.head.Block), + ) + parent := unittest.ClusterBlockFixture( + unittest.ClusterBlock.WithParent(ancestor), + ) + block := unittest.ClusterBlockFixture( + unittest.ClusterBlock.WithParent(parent), + ) + proposal := unittest.ClusterProposalFromBlock(block) + hotstuffProposal := model.SignedProposalFromClusterBlock(proposal) // store the data for retrieval - cs.headerDB[parent.ID()] = &parent - cs.headerDB[ancestor.ID()] = &ancestor + cs.headerDB[parent.ID()] = parent.ToHeader() + cs.headerDB[ancestor.ID()] = ancestor.ToHeader() // the block passes HotStuff validation cs.validator.On("ValidateProposal", hotstuffProposal).Return(nil) @@ -359,16 +390,26 @@ func (cs *CoreSuite) TestOnBlockProposal_FailsProtocolStateValidation() { // make sure we fail to extend the state *cs.state = clusterstate.MutableState{} cs.state.On("Final").Return(func() clusterint.Snapshot { return cs.snapshot }) - cs.state.On("Extend", mock.Anything).Return(state.NewInvalidExtensionError("")) + sentinelErr := state.NewInvalidExtensionErrorf("") + cs.state.On("Extend", mock.Anything).Return(sentinelErr) + cs.proposalViolationNotifier.On("OnInvalidBlockDetected", mock.Anything).Run(func(args mock.Arguments) { + err := args.Get(0).(flow.Slashable[model.InvalidProposalError]) + require.ErrorIs(cs.T(), err.Message, sentinelErr) + require.Equal(cs.T(), err.Message.InvalidProposal, hotstuffProposal) + require.Equal(cs.T(), err.OriginID, originID) + }).Return().Once() // we should notify VoteAggregator about the invalid block cs.voteAggregator.On("InvalidBlock", hotstuffProposal).Return(nil) // the expected error should be handled within the Core - err := cs.core.OnBlockProposal(originID, proposal) + err := cs.core.OnBlockProposal(flow.Slashable[*cluster.Proposal]{ + OriginID: originID, + Message: proposal, + }) require.NoError(cs.T(), err, "proposal with invalid extension should fail") // we should extend the state with the header - cs.state.AssertCalled(cs.T(), "Extend", &block) + cs.state.AssertCalled(cs.T(), "Extend", proposal) // we should not pass the block to hotstuff cs.hotstuff.AssertNotCalled(cs.T(), "SubmitProposal", mock.Anything) // we should not attempt to process the children @@ -379,14 +420,17 @@ func (cs *CoreSuite) TestOnBlockProposal_FailsProtocolStateValidation() { // make sure we fail to extend the state *cs.state = clusterstate.MutableState{} cs.state.On("Final").Return(func() clusterint.Snapshot { return cs.snapshot }) - cs.state.On("Extend", mock.Anything).Return(state.NewOutdatedExtensionError("")) + cs.state.On("Extend", mock.Anything).Return(state.NewOutdatedExtensionErrorf("")) // the expected error should be handled within the Core - err := cs.core.OnBlockProposal(originID, proposal) + err := cs.core.OnBlockProposal(flow.Slashable[*cluster.Proposal]{ + OriginID: originID, + Message: proposal, + }) require.NoError(cs.T(), err, "proposal with invalid extension should fail") // we should extend the state with the header - cs.state.AssertCalled(cs.T(), "Extend", &block) + cs.state.AssertCalled(cs.T(), "Extend", proposal) // we should not pass the block to hotstuff cs.hotstuff.AssertNotCalled(cs.T(), "SubmitProposal", mock.Anything) // we should not attempt to process the children @@ -401,11 +445,14 @@ func (cs *CoreSuite) TestOnBlockProposal_FailsProtocolStateValidation() { cs.state.On("Extend", mock.Anything).Return(unexpectedErr) // it should be processed without error - err := cs.core.OnBlockProposal(originID, proposal) + err := cs.core.OnBlockProposal(flow.Slashable[*cluster.Proposal]{ + OriginID: originID, + Message: proposal, + }) require.ErrorIs(cs.T(), err, unexpectedErr) // we should extend the state with the header - cs.state.AssertCalled(cs.T(), "Extend", &block) + cs.state.AssertCalled(cs.T(), "Extend", proposal) // we should not pass the block to hotstuff cs.hotstuff.AssertNotCalled(cs.T(), "SubmitProposal", mock.Anything, mock.Anything) // we should not attempt to process the children @@ -416,86 +463,107 @@ func (cs *CoreSuite) TestOnBlockProposal_FailsProtocolStateValidation() { func (cs *CoreSuite) TestProcessBlockAndDescendants() { // create three children blocks - parent := unittest.ClusterBlockWithParent(cs.head) - block1 := unittest.ClusterBlockWithParent(&parent) - block2 := unittest.ClusterBlockWithParent(&parent) - block3 := unittest.ClusterBlockWithParent(&parent) - - pendingFromBlock := func(block *cluster.Block) flow.Slashable[*cluster.Block] { - return flow.Slashable[*cluster.Block]{ - OriginID: block.Header.ProposerID, + parent := unittest.ClusterBlockFixture( + unittest.ClusterBlock.WithParent(&cs.head.Block), + ) + block1 := unittest.ClusterBlockFixture( + unittest.ClusterBlock.WithParent(parent), + ) + block2 := unittest.ClusterBlockFixture( + unittest.ClusterBlock.WithParent(parent), + ) + block3 := unittest.ClusterBlockFixture( + unittest.ClusterBlock.WithParent(parent), + ) + + proposal0 := unittest.ClusterProposalFromBlock(parent) + proposal1 := unittest.ClusterProposalFromBlock(block1) + proposal2 := unittest.ClusterProposalFromBlock(block2) + proposal3 := unittest.ClusterProposalFromBlock(block3) + + pendingFromProposal := func(block *cluster.Proposal) flow.Slashable[*cluster.Proposal] { + return flow.Slashable[*cluster.Proposal]{ + OriginID: block.Block.ProposerID, Message: block, } } // create the pending blocks - pending1 := pendingFromBlock(&block1) - pending2 := pendingFromBlock(&block2) - pending3 := pendingFromBlock(&block3) + pending1 := pendingFromProposal(proposal1) + pending2 := pendingFromProposal(proposal2) + pending3 := pendingFromProposal(proposal3) // store the parent on disk parentID := parent.ID() - cs.headerDB[parentID] = &parent + cs.headerDB[parentID] = proposal0.Block.ToHeader() // store the pending children in the cache cs.childrenDB[parentID] = append(cs.childrenDB[parentID], pending1) cs.childrenDB[parentID] = append(cs.childrenDB[parentID], pending2) cs.childrenDB[parentID] = append(cs.childrenDB[parentID], pending3) - for _, block := range []cluster.Block{parent, block1, block2, block3} { - hotstuffProposal := model.ProposalFromFlow(block.Header) + for _, prop := range []*cluster.Proposal{proposal0, proposal1, proposal2, proposal3} { + hotstuffProposal := model.SignedProposalFromClusterBlock(prop) cs.validator.On("ValidateProposal", hotstuffProposal).Return(nil) cs.voteAggregator.On("AddBlock", hotstuffProposal).Once() cs.hotstuff.On("SubmitProposal", hotstuffProposal).Once() } // execute the connected children handling - err := cs.core.processBlockAndDescendants(&parent) + err := cs.core.processBlockAndDescendants(flow.Slashable[*cluster.Proposal]{ + OriginID: unittest.IdentifierFixture(), + Message: proposal0, + }) require.NoError(cs.T(), err, "should pass handling children") // check that we submitted each child to hotstuff cs.hotstuff.AssertExpectations(cs.T()) // make sure we drop the cache after trying to process - cs.pending.AssertCalled(cs.T(), "DropForParent", parent.Header.ID()) + cs.pending.AssertCalled(cs.T(), "DropForParent", parent.ID()) } func (cs *CoreSuite) TestProposalBufferingOrder() { - // create a proposal that we will not submit until the end originID := unittest.IdentifierFixture() - block := unittest.ClusterBlockWithParent(cs.head) - missing := &block + block := unittest.ClusterBlockFixture( + unittest.ClusterBlock.WithParent(&cs.head.Block), + ) + missing := block // create a chain of descendants - var proposals []*cluster.Block - proposalsLookup := make(map[flow.Identifier]*cluster.Block) + var proposals []*cluster.Proposal + proposalsLookup := make(map[flow.Identifier]*cluster.Proposal) parent := missing for i := 0; i < 3; i++ { - proposal := unittest.ClusterBlockWithParent(parent) - proposals = append(proposals, &proposal) - proposalsLookup[proposal.ID()] = &proposal - parent = &proposal + block := unittest.ClusterBlockFixture( + unittest.ClusterBlock.WithParent(parent), + ) + proposal := unittest.ClusterProposalFromBlock(block) + proposals = append(proposals, proposal) + proposalsLookup[block.ID()] = proposal + parent = block } // replace the engine buffer with the real one cs.core.pending = realbuffer.NewPendingClusterBlocks() // process all of the descendants - for _, block := range proposals { + for _, proposal := range proposals { // check that we request the ancestor block each time cs.sync.On("RequestBlock", mock.Anything, mock.AnythingOfType("uint64")).Once().Run( func(args mock.Arguments) { ancestorID := args.Get(0).(flow.Identifier) - assert.Equal(cs.T(), missing.Header.ID(), ancestorID, "should always request root block") + assert.Equal(cs.T(), missing.ID(), ancestorID, "should always request root block") }, ) - proposal := messages.NewClusterBlockProposal(block) - // process and make sure no error occurs (as they are unverifiable) - err := cs.core.OnBlockProposal(originID, proposal) + err := cs.core.OnBlockProposal(flow.Slashable[*cluster.Proposal]{ + OriginID: originID, + Message: proposal, + }) require.NoError(cs.T(), err, "proposal buffering should pass") // make sure no block is forwarded to hotstuff @@ -506,28 +574,31 @@ func (cs *CoreSuite) TestProposalBufferingOrder() { *cs.hotstuff = module.HotStuff{} index := 0 order := []flow.Identifier{ - missing.Header.ID(), - proposals[0].Header.ID(), - proposals[1].Header.ID(), - proposals[2].Header.ID(), + missing.ID(), + proposals[0].Block.ToHeader().ID(), + proposals[1].Block.ToHeader().ID(), + proposals[2].Block.ToHeader().ID(), } cs.hotstuff.On("SubmitProposal", mock.Anything).Times(4).Run( func(args mock.Arguments) { - header := args.Get(0).(*model.Proposal).Block + header := args.Get(0).(*model.SignedProposal).Block assert.Equal(cs.T(), order[index], header.BlockID, "should submit correct header to hotstuff") index++ - cs.headerDB[header.BlockID] = proposalsLookup[header.BlockID] + cs.headerDB[header.BlockID] = proposalsLookup[header.BlockID].Block.ToHeader() }, ) cs.voteAggregator.On("AddBlock", mock.Anything).Times(4) cs.validator.On("ValidateProposal", mock.Anything).Times(4).Return(nil) - missingProposal := messages.NewClusterBlockProposal(missing) + missingProposal := unittest.ClusterProposalFromBlock(missing) - proposalsLookup[missing.ID()] = missing + proposalsLookup[missing.ID()] = missingProposal // process the root proposal - err := cs.core.OnBlockProposal(originID, missingProposal) + err := cs.core.OnBlockProposal(flow.Slashable[*cluster.Proposal]{ + OriginID: originID, + Message: missingProposal, + }) require.NoError(cs.T(), err, "root proposal should pass") // make sure we submitted all four proposals diff --git a/engine/collection/compliance/engine.go b/engine/collection/compliance/engine.go index 4a43219d021..b2aea597977 100644 --- a/engine/collection/compliance/engine.go +++ b/engine/collection/compliance/engine.go @@ -10,8 +10,8 @@ import ( "github.com/onflow/flow-go/engine" "github.com/onflow/flow-go/engine/collection" "github.com/onflow/flow-go/engine/common/fifoqueue" + "github.com/onflow/flow-go/model/cluster" "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/model/messages" "github.com/onflow/flow-go/module" "github.com/onflow/flow-go/module/component" "github.com/onflow/flow-go/module/events" @@ -21,7 +21,7 @@ import ( "github.com/onflow/flow-go/storage" ) -// defaultBlockQueueCapacity maximum capacity of inbound queue for `messages.ClusterBlockProposal`s +// defaultBlockQueueCapacity maximum capacity of inbound queue for `cluster.UntrustedProposal`s const defaultBlockQueueCapacity = 10_000 // Engine is a wrapper struct for `Core` which implements cluster consensus algorithm. @@ -119,8 +119,8 @@ func (e *Engine) processQueuedBlocks(doneSignal <-chan struct{}) error { msg, ok := e.pendingBlocks.Pop() if ok { - inBlock := msg.(flow.Slashable[*messages.ClusterBlockProposal]) - err := e.core.OnBlockProposal(inBlock.OriginID, inBlock.Message) + inBlock := msg.(flow.Slashable[*cluster.Proposal]) + err := e.core.OnBlockProposal(inBlock) e.core.engineMetrics.MessageHandled(metrics.EngineClusterCompliance, metrics.MessageBlockProposal) if err != nil { return fmt.Errorf("could not handle block proposal: %w", err) @@ -134,9 +134,9 @@ func (e *Engine) processQueuedBlocks(doneSignal <-chan struct{}) error { } } -// OnClusterBlockProposal feeds a new block proposal into the processing pipeline. +// OnClusterBlockProposal feeds a new structurally validated block proposal into the processing pipeline. // Incoming proposals are queued and eventually dispatched by worker. -func (e *Engine) OnClusterBlockProposal(proposal flow.Slashable[*messages.ClusterBlockProposal]) { +func (e *Engine) OnClusterBlockProposal(proposal flow.Slashable[*cluster.Proposal]) { e.core.engineMetrics.MessageReceived(metrics.EngineClusterCompliance, metrics.MessageBlockProposal) if e.pendingBlocks.Push(proposal) { e.pendingBlocksNotifier.Notify() @@ -145,9 +145,9 @@ func (e *Engine) OnClusterBlockProposal(proposal flow.Slashable[*messages.Cluste } } -// OnSyncedClusterBlock feeds a block obtained from sync proposal into the processing pipeline. +// OnSyncedClusterBlock feeds a structurally validated block proposal obtained from sync proposal into the processing pipeline. // Incoming proposals are queued and eventually dispatched by worker. -func (e *Engine) OnSyncedClusterBlock(syncedBlock flow.Slashable[*messages.ClusterBlockProposal]) { +func (e *Engine) OnSyncedClusterBlock(syncedBlock flow.Slashable[*cluster.Proposal]) { e.core.engineMetrics.MessageReceived(metrics.EngineClusterCompliance, metrics.MessageSyncedClusterBlock) if e.pendingBlocks.Push(syncedBlock) { e.pendingBlocksNotifier.Notify() diff --git a/engine/collection/compliance/engine_test.go b/engine/collection/compliance/engine_test.go index 3c760ed05c3..ef050881b46 100644 --- a/engine/collection/compliance/engine_test.go +++ b/engine/collection/compliance/engine_test.go @@ -14,12 +14,11 @@ import ( "github.com/onflow/flow-go/consensus/hotstuff/model" "github.com/onflow/flow-go/model/cluster" "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/model/messages" "github.com/onflow/flow-go/module/irrecoverable" module "github.com/onflow/flow-go/module/mock" netint "github.com/onflow/flow-go/network" "github.com/onflow/flow-go/network/channels" - "github.com/onflow/flow-go/network/mocknetwork" + mocknetwork "github.com/onflow/flow-go/network/mock" protocol "github.com/onflow/flow-go/state/protocol/mock" storerr "github.com/onflow/flow-go/storage" storage "github.com/onflow/flow-go/storage/mock" @@ -37,7 +36,7 @@ type EngineSuite struct { myID flow.Identifier cluster flow.IdentityList me *module.Local - net *mocknetwork.Network + net *mocknetwork.EngineRegistry payloads *storage.ClusterPayloads protoState *protocol.State con *mocknetwork.Conduit @@ -57,21 +56,21 @@ func (cs *EngineSuite) SetupTest() { // initialize the parameters cs.cluster = unittest.IdentityListFixture(3, unittest.WithRole(flow.RoleCollection), - unittest.WithWeight(1000), + unittest.WithInitialWeight(1000), ) cs.myID = cs.cluster[0].NodeID - protoEpoch := &protocol.Epoch{} - clusters := flow.ClusterList{cs.cluster} + protoEpoch := &protocol.CommittedEpoch{} + clusters := flow.ClusterList{cs.cluster.ToSkeleton()} protoEpoch.On("Clustering").Return(clusters, nil) protoQuery := &protocol.EpochQuery{} - protoQuery.On("Current").Return(protoEpoch) + protoQuery.On("Current").Return(protoEpoch, nil) protoSnapshot := &protocol.Snapshot{} protoSnapshot.On("Epochs").Return(protoQuery) protoSnapshot.On("Identities", mock.Anything).Return( - func(selector flow.IdentityFilter) flow.IdentityList { + func(selector flow.IdentityFilter[flow.Identity]) flow.IdentityList { return cs.cluster.Filter(selector) }, nil, @@ -125,7 +124,7 @@ func (cs *EngineSuite) SetupTest() { cs.con.On("Unicast", mock.Anything, mock.Anything).Return(nil) // set up network module mock - cs.net = &mocknetwork.Network{} + cs.net = &mocknetwork.EngineRegistry{} cs.net.On("Register", mock.Anything, mock.Anything).Return( func(channel channels.Channel, engine netint.MessageProcessor) netint.Conduit { return cs.con @@ -163,14 +162,16 @@ func (cs *EngineSuite) TestSubmittingMultipleEntries() { wg.Add(1) go func() { for i := 0; i < blockCount; i++ { - block := unittest.ClusterBlockWithParent(cs.head) - proposal := messages.NewClusterBlockProposal(&block) - hotstuffProposal := model.ProposalFromFlow(block.Header) + block := unittest.ClusterBlockFixture( + unittest.ClusterBlock.WithParent(&cs.head.Block), + ) + proposal := unittest.ClusterProposalFromBlock(block) + hotstuffProposal := model.SignedProposalFromClusterBlock(proposal) cs.hotstuff.On("SubmitProposal", hotstuffProposal).Return().Once() cs.voteAggregator.On("AddBlock", hotstuffProposal).Once() cs.validator.On("ValidateProposal", hotstuffProposal).Return(nil).Once() // execute the block submission - cs.engine.OnClusterBlockProposal(flow.Slashable[*messages.ClusterBlockProposal]{ + cs.engine.OnClusterBlockProposal(flow.Slashable[*cluster.Proposal]{ OriginID: unittest.IdentifierFixture(), Message: proposal, }) @@ -180,14 +181,16 @@ func (cs *EngineSuite) TestSubmittingMultipleEntries() { wg.Add(1) go func() { // create a proposal that directly descends from the latest finalized header - block := unittest.ClusterBlockWithParent(cs.head) - proposal := messages.NewClusterBlockProposal(&block) + block := unittest.ClusterBlockFixture( + unittest.ClusterBlock.WithParent(&cs.head.Block), + ) + proposal := unittest.ClusterProposalFromBlock(block) - hotstuffProposal := model.ProposalFromFlow(block.Header) + hotstuffProposal := model.SignedProposalFromClusterBlock(proposal) cs.hotstuff.On("SubmitProposal", hotstuffProposal).Once() cs.voteAggregator.On("AddBlock", hotstuffProposal).Once() cs.validator.On("ValidateProposal", hotstuffProposal).Return(nil).Once() - cs.engine.OnClusterBlockProposal(flow.Slashable[*messages.ClusterBlockProposal]{ + cs.engine.OnClusterBlockProposal(flow.Slashable[*cluster.Proposal]{ OriginID: unittest.IdentifierFixture(), Message: proposal, }) @@ -206,21 +209,22 @@ func (cs *EngineSuite) TestSubmittingMultipleEntries() { // Tests the whole processing pipeline. func (cs *EngineSuite) TestOnFinalizedBlock() { finalizedBlock := unittest.ClusterBlockFixture() - cs.head = &finalizedBlock - cs.headerDB[finalizedBlock.ID()] = &finalizedBlock + proposal := unittest.ClusterProposalFromBlock(finalizedBlock) + cs.head = proposal + cs.headerDB[finalizedBlock.ID()] = proposal.Block.ToHeader() *cs.pending = module.PendingClusterBlockBuffer{} // wait for both expected calls before ending the test wg := new(sync.WaitGroup) wg.Add(2) - cs.pending.On("PruneByView", finalizedBlock.Header.View). + cs.pending.On("PruneByView", finalizedBlock.View). Run(func(_ mock.Arguments) { wg.Done() }). Return(nil).Once() cs.pending.On("Size"). Run(func(_ mock.Arguments) { wg.Done() }). Return(uint(0)).Once() - err := cs.engine.processOnFinalizedBlock(model.BlockFromFlow(finalizedBlock.Header)) + err := cs.engine.processOnFinalizedBlock(model.BlockFromFlow(finalizedBlock.ToHeader())) require.NoError(cs.T(), err) unittest.AssertReturnsBefore(cs.T(), wg.Wait, time.Second, "an expected call to block buffer wasn't made") } diff --git a/engine/collection/epochmgr/engine.go b/engine/collection/epochmgr/engine.go index eee3891dc1a..c0bc2d87a81 100644 --- a/engine/collection/epochmgr/engine.go +++ b/engine/collection/epochmgr/engine.go @@ -1,13 +1,16 @@ package epochmgr import ( + "context" "errors" "fmt" "sync" "time" "github.com/rs/zerolog" + "go.uber.org/atomic" + "github.com/onflow/flow-go/engine/collection" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/module" "github.com/onflow/flow-go/module/component" @@ -55,12 +58,14 @@ type Engine struct { mu sync.RWMutex // protects epochs map epochs map[uint64]*RunningEpochComponents // epoch-scoped components per epoch - // internal event notifications - epochTransitionEvents chan *flow.Header // sends first block of new epoch - epochSetupPhaseStartedEvents chan *flow.Header // sends first block of EpochSetup phase - epochStopEvents chan uint64 // sends counter of epoch to stop + inProgressQCVote *atomic.Pointer[context.CancelFunc] // tracks the cancel callback of the in progress QC vote - cm *component.ComponentManager + // internal event notifications + epochTransitionEvents chan *flow.Header // sends first block of new epoch + epochSetupPhaseStartedEvents chan *flow.Header // sends first block of EpochSetup phase + epochStopEvents chan uint64 // sends counter of epoch to stop + clusterIDUpdateDistributor collection.ClusterEvents // sends cluster ID updates to consumers + cm *component.ComponentManager component.Component } @@ -75,6 +80,7 @@ func New( voter module.ClusterRootQCVoter, factory EpochComponentsFactory, heightEvents events.Heights, + clusterIDUpdateDistributor collection.ClusterEvents, ) (*Engine, error) { e := &Engine{ log: log.With().Str("engine", "epochmgr").Logger(), @@ -89,6 +95,8 @@ func New( epochTransitionEvents: make(chan *flow.Header, 1), epochSetupPhaseStartedEvents: make(chan *flow.Header, 1), epochStopEvents: make(chan uint64, 1), + clusterIDUpdateDistributor: clusterIDUpdateDistributor, + inProgressQCVote: atomic.NewPointer[context.CancelFunc](nil), } e.cm = component.NewComponentManagerBuilder(). @@ -134,10 +142,9 @@ func (e *Engine) Start(ctx irrecoverable.SignalerContext) { // authorized participant in the current epoch. // No errors are expected during normal operation. func (e *Engine) checkShouldStartCurrentEpochComponentsOnStartup(ctx irrecoverable.SignalerContext, finalSnapshot protocol.Snapshot) error { - currentEpoch := finalSnapshot.Epochs().Current() - currentEpochCounter, err := currentEpoch.Counter() + currentEpoch, err := finalSnapshot.Epochs().Current() if err != nil { - return fmt.Errorf("could not get epoch counter: %w", err) + return fmt.Errorf("could not get current epoch: %w", err) } components, err := e.createEpochComponents(currentEpoch) @@ -149,7 +156,7 @@ func (e *Engine) checkShouldStartCurrentEpochComponentsOnStartup(ctx irrecoverab } return fmt.Errorf("could not create epoch components: %w", err) } - err = e.startEpochComponents(ctx, currentEpochCounter, components) + err = e.startEpochComponents(ctx, currentEpoch.Counter(), components) if err != nil { // all failures to start epoch components are critical return fmt.Errorf("could not start epoch components: %w", err) @@ -171,16 +178,24 @@ func (e *Engine) checkShouldStartPreviousEpochComponentsOnStartup(engineCtx irre } finalizedHeight := finalHeader.Height - prevEpoch := finalSnapshot.Epochs().Previous() - prevEpochCounter, err := prevEpoch.Counter() + prevEpoch, err := finalSnapshot.Epochs().Previous() if err != nil { if errors.Is(err, protocol.ErrNoPreviousEpoch) { return nil } - return fmt.Errorf("[unexpected] could not get previous epoch counter: %w", err) + return fmt.Errorf("[unexpected] could not get previous epoch: %w", err) } + prevEpochCounter := prevEpoch.Counter() prevEpochFinalHeight, err := prevEpoch.FinalHeight() if err != nil { + // If we don't know the end boundary of the previous epoch, then our root snapshot + // is relatively recent and excludes the most recent epoch boundary. + // In this case, because sealing segments contain flow.DefaultTransactionExpiry + // many blocks, this is also an indication that we do not need to start up the + // previous epoch's consensus components. + if errors.Is(err, protocol.ErrUnknownEpochBoundary) { + return nil + } // no expected errors because we are querying finalized snapshot return fmt.Errorf("[unexpected] could not get previous epoch final height: %w", err) } @@ -224,7 +239,7 @@ func (e *Engine) checkShouldStartPreviousEpochComponentsOnStartup(engineCtx irre func (e *Engine) checkShouldVoteOnStartup(finalSnapshot protocol.Snapshot) error { // check the current phase on startup, in case we are in setup phase // and haven't yet voted for the next root QC - phase, err := finalSnapshot.Phase() + phase, err := finalSnapshot.EpochPhase() if err != nil { return fmt.Errorf("could not get epoch phase for finalized snapshot: %w", err) } @@ -272,14 +287,10 @@ func (e *Engine) Done() <-chan struct{} { // the given epoch, using the configured factory. // Error returns: // - ErrNotAuthorizedForEpoch if this node is not authorized in the epoch. -func (e *Engine) createEpochComponents(epoch protocol.Epoch) (*EpochComponents, error) { - counter, err := epoch.Counter() - if err != nil { - return nil, fmt.Errorf("could not get epoch counter: %w", err) - } +func (e *Engine) createEpochComponents(epoch protocol.CommittedEpoch) (*EpochComponents, error) { state, prop, sync, hot, voteAggregator, timeoutAggregator, messageHub, err := e.factory.Create(epoch) if err != nil { - return nil, fmt.Errorf("could not setup requirements for epoch (%d): %w", counter, err) + return nil, fmt.Errorf("could not setup requirements for epoch (%d): %w", epoch.Counter(), err) } components := NewEpochComponents(state, prop, sync, hot, voteAggregator, timeoutAggregator, messageHub) @@ -301,6 +312,12 @@ func (e *Engine) EpochSetupPhaseStarted(_ uint64, first *flow.Header) { e.epochSetupPhaseStartedEvents <- first } +// EpochEmergencyFallbackTriggered handles the epoch emergency fallback triggered protocol event. +// If epoch emergency fallback is triggered, root QC voting must be stopped. +func (e *Engine) EpochEmergencyFallbackTriggered() { + e.stopInProgressQcVote() +} + // handleEpochEvents handles events relating to the epoch lifecycle: // - EpochTransition protocol event - we start epoch components for the starting epoch, // and schedule shutdown for the ending epoch @@ -321,7 +338,12 @@ func (e *Engine) handleEpochEvents(ctx irrecoverable.SignalerContext, ready comp ctx.Throw(err) } case firstBlock := <-e.epochSetupPhaseStartedEvents: - nextEpoch := e.state.AtBlockID(firstBlock.ID()).Epochs().Next() + // This is one of the few places where we have to use the configuration for a future epoch that + // has not yet been committed. CAUTION: the epoch transition might not happen as described here! + nextEpoch, err := e.state.AtBlockID(firstBlock.ID()).Epochs().NextUnsafe() + if err != nil { // since the Epoch Setup Phase just started, this call should never error + ctx.Throw(err) + } e.onEpochSetupPhaseStarted(ctx, nextEpoch) case epochCounter := <-e.epochStopEvents: err := e.stopEpochComponents(epochCounter) @@ -353,11 +375,11 @@ func (e *Engine) handleEpochErrors(ctx irrecoverable.SignalerContext, errCh <-ch // // No errors are expected during normal operation. func (e *Engine) onEpochTransition(ctx irrecoverable.SignalerContext, first *flow.Header) error { - epoch := e.state.AtBlockID(first.ID()).Epochs().Current() - counter, err := epoch.Counter() + epoch, err := e.state.AtBlockID(first.ID()).Epochs().Current() if err != nil { - return fmt.Errorf("could not get epoch counter: %w", err) + return fmt.Errorf("could not get current epoch: %w", err) } + counter := epoch.Counter() // greatest block height in the previous epoch is one less than the first // block in current epoch @@ -432,8 +454,16 @@ func (e *Engine) prepareToStopEpochComponents(epochCounter, epochMaxHeight uint6 // setup phase, or when the node is restarted during the epoch setup phase. It // kicks off setup tasks for the phase, in particular submitting a vote for the // next epoch's root cluster QC. -func (e *Engine) onEpochSetupPhaseStarted(ctx irrecoverable.SignalerContext, nextEpoch protocol.Epoch) { - err := e.voter.Vote(ctx, nextEpoch) +// This is one of the few places where we have to use the configuration for a +// future epoch that has not yet been committed. +// CAUTION: the epoch transition might not happen as described by `nextEpoch`! +func (e *Engine) onEpochSetupPhaseStarted(ctx irrecoverable.SignalerContext, nextEpoch protocol.TentativeEpoch) { + ctxWithCancel, cancel := context.WithCancel(ctx) + defer cancel() + + e.inProgressQCVote.Store(&cancel) + + err := e.voter.Vote(ctxWithCancel, nextEpoch) if err != nil { if epochs.IsClusterQCNoVoteError(err) { e.log.Warn().Err(err).Msg("unable to submit QC vote for next epoch") @@ -448,14 +478,18 @@ func (e *Engine) onEpochSetupPhaseStarted(ctx irrecoverable.SignalerContext, nex // No errors are expected during normal operation. func (e *Engine) startEpochComponents(engineCtx irrecoverable.SignalerContext, counter uint64, components *EpochComponents) error { epochCtx, cancel, errCh := irrecoverable.WithSignallerAndCancel(engineCtx) - // start component using its own context components.Start(epochCtx) - go e.handleEpochErrors(engineCtx, errCh) + go e.handleEpochErrors(epochCtx, errCh) select { case <-components.Ready(): e.storeEpochComponents(counter, NewRunningEpochComponents(components, cancel)) + activeClusterIDS, err := e.activeClusterIDs() + if err != nil { + return fmt.Errorf("failed to get active cluster IDs: %w", err) + } + e.clusterIDUpdateDistributor.ActiveClustersChanged(activeClusterIDS) return nil case <-time.After(e.startupTimeout): cancel() // cancel current context if we didn't start in time @@ -481,6 +515,11 @@ func (e *Engine) stopEpochComponents(counter uint64) error { case <-components.Done(): e.removeEpoch(counter) e.pools.ForEpoch(counter).Clear() + activeClusterIDS, err := e.activeClusterIDs() + if err != nil { + return fmt.Errorf("failed to get active cluster IDs: %w", err) + } + e.clusterIDUpdateDistributor.ActiveClustersChanged(activeClusterIDS) return nil case <-time.After(e.startupTimeout): return fmt.Errorf("could not stop epoch %d components after %s", counter, e.startupTimeout) @@ -512,3 +551,27 @@ func (e *Engine) removeEpoch(counter uint64) { delete(e.epochs, counter) e.mu.Unlock() } + +// activeClusterIDs returns the active canonical cluster ID's for the assigned collection clusters. +// No errors are expected during normal operation. +func (e *Engine) activeClusterIDs() (flow.ChainIDList, error) { + e.mu.RLock() + clusterIDs := make(flow.ChainIDList, len(e.epochs)) + i := 0 + for _, epoch := range e.epochs { + chainID := epoch.state.Params().ChainID() // cached, does not hit database + clusterIDs[i] = chainID + i++ + } + e.mu.RUnlock() + return clusterIDs, nil +} + +// stopInProgressQcVote cancels the context for all in progress root qc voting. +func (e *Engine) stopInProgressQcVote() { + cancel := e.inProgressQCVote.Load() + if cancel != nil { + e.log.Warn().Msgf("voting for cluster root block cancelled") + (*cancel)() // cancel + } +} diff --git a/engine/collection/epochmgr/engine_test.go b/engine/collection/epochmgr/engine_test.go index e477c9a9256..e5309df3f82 100644 --- a/engine/collection/epochmgr/engine_test.go +++ b/engine/collection/epochmgr/engine_test.go @@ -14,6 +14,7 @@ import ( "github.com/onflow/flow-go/consensus/hotstuff" mockhotstuff "github.com/onflow/flow-go/consensus/hotstuff/mocks" epochmgr "github.com/onflow/flow-go/engine/collection/epochmgr/mock" + mockcollection "github.com/onflow/flow-go/engine/collection/mock" "github.com/onflow/flow-go/model/flow" realmodule "github.com/onflow/flow-go/module" "github.com/onflow/flow-go/module/component" @@ -45,7 +46,6 @@ type mockComponents struct { } func newMockComponents(t *testing.T) *mockComponents { - components := &mockComponents{ state: cluster.NewState(t), prop: mockcomponent.NewComponent(t), @@ -67,7 +67,9 @@ func newMockComponents(t *testing.T) *mockComponents { components.voteAggregator.On("Start", mock.Anything) components.timeoutAggregator.On("Start", mock.Anything) components.messageHub.On("Start", mock.Anything) - + params := cluster.NewParams(t) + params.On("ChainID").Return(flow.ChainID("chain-id"), nil).Maybe() + components.state.On("Params").Return(params).Maybe() return components } @@ -89,42 +91,52 @@ type Suite struct { heights *events.Heights epochQuery *mocks.EpochQuery - counter uint64 // reflects the counter of the current epoch - phase flow.EpochPhase // phase at mocked snapshot - header *flow.Header // header at mocked snapshot - epochs map[uint64]*protocol.Epoch // track all epochs - components map[uint64]*mockComponents // track all epoch components + counter uint64 // reflects the counter of the current epoch + phase flow.EpochPhase // phase at mocked snapshot + header *flow.Header // header at mocked snapshot + epochs map[uint64]*protocol.CommittedEpoch // track all epochs + components map[uint64]*mockComponents // track all epoch components ctx irrecoverable.SignalerContext cancel context.CancelFunc errs <-chan error engine *Engine + + engineEventsDistributor *mockcollection.EngineEvents } // MockFactoryCreate mocks the epoch factory to create epoch components for the given epoch. func (suite *Suite) MockFactoryCreate(arg any) { suite.factory.On("Create", arg). Run(func(args mock.Arguments) { - epoch, ok := args.Get(0).(realprotocol.Epoch) + epoch, ok := args.Get(0).(realprotocol.CommittedEpoch) suite.Require().Truef(ok, "invalid type %T", args.Get(0)) - counter, err := epoch.Counter() - suite.Require().Nil(err) - suite.components[counter] = newMockComponents(suite.T()) + suite.components[epoch.Counter()] = newMockComponents(suite.T()) }). Return( - func(epoch realprotocol.Epoch) realcluster.State { return suite.ComponentsForEpoch(epoch).state }, - func(epoch realprotocol.Epoch) component.Component { return suite.ComponentsForEpoch(epoch).prop }, - func(epoch realprotocol.Epoch) realmodule.ReadyDoneAware { return suite.ComponentsForEpoch(epoch).sync }, - func(epoch realprotocol.Epoch) realmodule.HotStuff { return suite.ComponentsForEpoch(epoch).hotstuff }, - func(epoch realprotocol.Epoch) hotstuff.VoteAggregator { + func(epoch realprotocol.CommittedEpoch) realcluster.State { + return suite.ComponentsForEpoch(epoch).state + }, + func(epoch realprotocol.CommittedEpoch) component.Component { + return suite.ComponentsForEpoch(epoch).prop + }, + func(epoch realprotocol.CommittedEpoch) realmodule.ReadyDoneAware { + return suite.ComponentsForEpoch(epoch).sync + }, + func(epoch realprotocol.CommittedEpoch) realmodule.HotStuff { + return suite.ComponentsForEpoch(epoch).hotstuff + }, + func(epoch realprotocol.CommittedEpoch) hotstuff.VoteAggregator { return suite.ComponentsForEpoch(epoch).voteAggregator }, - func(epoch realprotocol.Epoch) hotstuff.TimeoutAggregator { + func(epoch realprotocol.CommittedEpoch) hotstuff.TimeoutAggregator { return suite.ComponentsForEpoch(epoch).timeoutAggregator }, - func(epoch realprotocol.Epoch) component.Component { return suite.ComponentsForEpoch(epoch).messageHub }, - func(epoch realprotocol.Epoch) error { return nil }, + func(epoch realprotocol.CommittedEpoch) component.Component { + return suite.ComponentsForEpoch(epoch).messageHub + }, + func(epoch realprotocol.CommittedEpoch) error { return nil }, ).Maybe() } @@ -134,7 +146,7 @@ func (suite *Suite) SetupTest() { suite.state = protocol.NewState(suite.T()) suite.snap = protocol.NewSnapshot(suite.T()) - suite.epochs = make(map[uint64]*protocol.Epoch) + suite.epochs = make(map[uint64]*protocol.CommittedEpoch) suite.components = make(map[uint64]*mockComponents) suite.signer = mockhotstuff.NewSigner(suite.T()) @@ -149,27 +161,32 @@ func (suite *Suite) SetupTest() { suite.phase = flow.EpochPhaseSetup suite.header = unittest.BlockHeaderFixture() suite.epochQuery = mocks.NewEpochQuery(suite.T(), suite.counter) + suite.state.On("Final").Return(suite.snap) suite.state.On("AtBlockID", suite.header.ID()).Return(suite.snap).Maybe() suite.snap.On("Epochs").Return(suite.epochQuery) suite.snap.On("Head").Return( func() *flow.Header { return suite.header }, func() error { return nil }) - suite.snap.On("Phase").Return( + suite.snap.On("EpochPhase").Return( func() flow.EpochPhase { return suite.phase }, func() error { return nil }) - // add current and next epochs - suite.AddEpoch(suite.counter) - suite.AddEpoch(suite.counter + 1) + // add current epoch + suite.AddCommittedEpoch(suite.counter) + // next epoch (with counter+1) is added later, as either setup/tentative (if we need to start QC) + // or committed (if we need to transition to it) depending on the test suite.pools = epochs.NewTransactionPools(func(_ uint64) mempool.Transactions { return herocache.NewTransactions(1000, suite.log, metrics.NewNoopCollector()) }) + suite.engineEventsDistributor = mockcollection.NewEngineEvents(suite.T()) + var err error - suite.engine, err = New(suite.log, suite.me, suite.state, suite.pools, suite.voter, suite.factory, suite.heights) + suite.engine, err = New(suite.log, suite.me, suite.state, suite.pools, suite.voter, suite.factory, suite.heights, suite.engineEventsDistributor) suite.Require().Nil(err) + } // StartEngine starts the engine under test, and spawns a routine to check for irrecoverable errors. @@ -194,22 +211,29 @@ func (suite *Suite) TearDownTest() { } } -func TestEpochManager(t *testing.T) { - suite.Run(t, new(Suite)) -} - // TransitionEpoch triggers an epoch transition in the suite's mocks. func (suite *Suite) TransitionEpoch() { suite.counter++ + require.Contains(suite.T(), suite.epochs, suite.counter) suite.epochQuery.Transition() } -// AddEpoch adds an epoch with the given counter. -func (suite *Suite) AddEpoch(counter uint64) *protocol.Epoch { - epoch := new(protocol.Epoch) +// AddCommittedEpoch adds a Committed Epoch with the given counter to the test suite, +// so the epoch information can be retrieved by the business logic. +func (suite *Suite) AddCommittedEpoch(counter uint64) *protocol.CommittedEpoch { + epoch := new(protocol.CommittedEpoch) epoch.On("Counter").Return(counter, nil) suite.epochs[counter] = epoch - suite.epochQuery.Add(epoch) + suite.epochQuery.AddCommitted(epoch) + return epoch +} + +// AddTentativeEpoch adds a Tentative Epoch with the given counter to the test suite, +// so the epoch information can be retrieved by the business logic. +func (suite *Suite) AddTentativeEpoch(counter uint64) *protocol.TentativeEpoch { + epoch := new(protocol.TentativeEpoch) + epoch.On("Counter").Return(counter, nil) + suite.epochQuery.AddTentative(epoch) return epoch } @@ -231,9 +255,8 @@ func (suite *Suite) AssertEpochStopped(counter uint64) { components.sync.AssertCalled(suite.T(), "Done") } -func (suite *Suite) ComponentsForEpoch(epoch realprotocol.Epoch) *mockComponents { - counter, err := epoch.Counter() - suite.Require().Nil(err, "cannot get counter") +func (suite *Suite) ComponentsForEpoch(epoch realprotocol.CommittedEpoch) *mockComponents { + counter := epoch.Counter() components, ok := suite.components[counter] suite.Require().True(ok, "missing component for counter", counter) return components @@ -244,12 +267,10 @@ func (suite *Suite) ComponentsForEpoch(epoch realprotocol.Epoch) *mockComponents func (suite *Suite) MockAsUnauthorizedNode(forEpoch uint64) { // mock as unauthorized for given epoch only - unauthorizedMatcher := func(epoch realprotocol.Epoch) bool { - counter, err := epoch.Counter() - require.NoError(suite.T(), err) - return counter == forEpoch + unauthorizedMatcher := func(epoch realprotocol.CommittedEpoch) bool { + return epoch.Counter() == forEpoch } - authorizedMatcher := func(epoch realprotocol.Epoch) bool { return !unauthorizedMatcher(epoch) } + authorizedMatcher := func(epoch realprotocol.CommittedEpoch) bool { return !unauthorizedMatcher(epoch) } suite.factory = epochmgr.NewEpochComponentsFactory(suite.T()) suite.factory. @@ -258,18 +279,28 @@ func (suite *Suite) MockAsUnauthorizedNode(forEpoch uint64) { suite.MockFactoryCreate(mock.MatchedBy(authorizedMatcher)) var err error - suite.engine, err = New(suite.log, suite.me, suite.state, suite.pools, suite.voter, suite.factory, suite.heights) + suite.engine, err = New(suite.log, suite.me, suite.state, suite.pools, suite.voter, suite.factory, suite.heights, suite.engineEventsDistributor) suite.Require().Nil(err) } +func TestEpochManager(t *testing.T) { + suite.Run(t, new(Suite)) +} + // TestRestartInSetupPhase tests that, if we start up during the setup phase, // we should kick off the root QC voter func (suite *Suite) TestRestartInSetupPhase() { + // we expect 1 ActiveClustersChanged events when the engine first starts and the first set of epoch components are started + suite.engineEventsDistributor.On("ActiveClustersChanged", mock.AnythingOfType("flow.ChainIDList")).Once() + defer suite.engineEventsDistributor.AssertExpectations(suite.T()) // we are in setup phase + suite.AddTentativeEpoch(suite.counter + 1) suite.phase = flow.EpochPhaseSetup // should call voter with next epoch var called = make(chan struct{}) - suite.voter.On("Vote", mock.Anything, suite.epochQuery.Next()). + nextEpochTentative, err := suite.epochQuery.NextUnsafe() + require.NoError(suite.T(), err, "cannot get next tentative epoch") + suite.voter.On("Vote", mock.Anything, nextEpochTentative). Return(nil). Run(func(args mock.Arguments) { close(called) @@ -285,8 +316,12 @@ func (suite *Suite) TestRestartInSetupPhase() { // When the finalized height is within the first tx_expiry blocks of the new epoch // the engine should restart the previous epoch cluster consensus. func (suite *Suite) TestStartAfterEpochBoundary_WithinTxExpiry() { + // we expect 2 ActiveClustersChanged events once when the engine first starts and the first set of epoch components are started and on restart + suite.engineEventsDistributor.On("ActiveClustersChanged", mock.AnythingOfType("flow.ChainIDList")).Twice() + defer suite.engineEventsDistributor.AssertExpectations(suite.T()) suite.phase = flow.EpochPhaseStaking // transition epochs, so that a Previous epoch is queryable + suite.AddCommittedEpoch(suite.counter + 1) suite.TransitionEpoch() prevEpoch := suite.epochs[suite.counter-1] // the finalized height is within [1,tx_expiry] heights of previous epoch final height @@ -305,8 +340,12 @@ func (suite *Suite) TestStartAfterEpochBoundary_WithinTxExpiry() { // When the finalized height is beyond the first tx_expiry blocks of the new epoch // the engine should NOT restart the previous epoch cluster consensus. func (suite *Suite) TestStartAfterEpochBoundary_BeyondTxExpiry() { + // we expect 1 ActiveClustersChanged events when the engine first starts and the first set of epoch components are started + suite.engineEventsDistributor.On("ActiveClustersChanged", mock.AnythingOfType("flow.ChainIDList")).Once() + defer suite.engineEventsDistributor.AssertExpectations(suite.T()) suite.phase = flow.EpochPhaseStaking // transition epochs, so that a Previous epoch is queryable + suite.AddCommittedEpoch(suite.counter + 1) suite.TransitionEpoch() prevEpoch := suite.epochs[suite.counter-1] // the finalized height is more than tx_expiry above previous epoch final height @@ -325,8 +364,12 @@ func (suite *Suite) TestStartAfterEpochBoundary_BeyondTxExpiry() { // boundary that we could start the previous epoch cluster consensus - however, // since we are not approved for the epoch, we should only start current epoch components. func (suite *Suite) TestStartAfterEpochBoundary_NotApprovedForPreviousEpoch() { + // we expect 1 ActiveClustersChanged events when the current epoch components are started + suite.engineEventsDistributor.On("ActiveClustersChanged", mock.AnythingOfType("flow.ChainIDList")).Once() + defer suite.engineEventsDistributor.AssertExpectations(suite.T()) suite.phase = flow.EpochPhaseStaking // transition epochs, so that a Previous epoch is queryable + suite.AddCommittedEpoch(suite.counter + 1) suite.TransitionEpoch() prevEpoch := suite.epochs[suite.counter-1] // the finalized height is within [1,tx_expiry] heights of previous epoch final height @@ -344,10 +387,14 @@ func (suite *Suite) TestStartAfterEpochBoundary_NotApprovedForPreviousEpoch() { // TestStartAfterEpochBoundary_NotApprovedForCurrentEpoch tests starting the engine // shortly after an epoch transition. The finalized boundary is near enough the epoch // boundary that we should start the previous epoch cluster consensus. However, we are -// not approved for the current epoch -> we should only start *current* epoch components. +// not approved for the current epoch -> we should only start *previous* epoch components. func (suite *Suite) TestStartAfterEpochBoundary_NotApprovedForCurrentEpoch() { + // we expect 1 ActiveClustersChanged events when the current epoch components are started + suite.engineEventsDistributor.On("ActiveClustersChanged", mock.AnythingOfType("flow.ChainIDList")).Once() + defer suite.engineEventsDistributor.AssertExpectations(suite.T()) suite.phase = flow.EpochPhaseStaking // transition epochs, so that a Previous epoch is queryable + suite.AddCommittedEpoch(suite.counter + 1) suite.TransitionEpoch() prevEpoch := suite.epochs[suite.counter-1] // the finalized height is within [1,tx_expiry] heights of previous epoch final height @@ -363,6 +410,27 @@ func (suite *Suite) TestStartAfterEpochBoundary_NotApprovedForCurrentEpoch() { suite.Assert().Len(suite.components, 1) } +// TestStartAfterEpochBoundary_PreviousEpochTransitionBeforeRoot tests starting the engine +// with a root snapshot whose sealing segment excludes the last epoch boundary. +// In this case we should only start up current-epoch components. +func (suite *Suite) TestStartAfterEpochBoundary_PreviousEpochTransitionBeforeRoot() { + // we expect 1 ActiveClustersChanged events when the current epoch components are started + suite.engineEventsDistributor.On("ActiveClustersChanged", mock.AnythingOfType("flow.ChainIDList")).Once() + defer suite.engineEventsDistributor.AssertExpectations(suite.T()) + suite.phase = flow.EpochPhaseStaking + // transition epochs, so that a Previous epoch is queryable + suite.AddCommittedEpoch(suite.counter + 1) + suite.TransitionEpoch() + prevEpoch := suite.epochs[suite.counter-1] + // Previous epoch end boundary is unknown because it is before our root snapshot + prevEpoch.On("FinalHeight").Return(uint64(0), realprotocol.ErrUnknownEpochBoundary) + + suite.StartEngine() + // only current epoch components should have been started + suite.AssertEpochStarted(suite.counter) + suite.Assert().Len(suite.components, 1) +} + // TestStartAsUnauthorizedNode test that when a collection node joins the network // at an epoch boundary, they must start running during the EpochSetup phase in the // epoch before they become an authorized member so they submit their cluster QC vote. @@ -372,10 +440,13 @@ func (suite *Suite) TestStartAfterEpochBoundary_NotApprovedForCurrentEpoch() { func (suite *Suite) TestStartAsUnauthorizedNode() { suite.MockAsUnauthorizedNode(suite.counter) // we are in setup phase + suite.AddTentativeEpoch(suite.counter + 1) suite.phase = flow.EpochPhaseSetup // should call voter with next epoch var called = make(chan struct{}) - suite.voter.On("Vote", mock.Anything, suite.epochQuery.Next()). + nextEpochTentative, err := suite.epochQuery.NextUnsafe() + require.NoError(suite.T(), err, "cannot get next tentative epoch") + suite.voter.On("Vote", mock.Anything, nextEpochTentative). Return(nil). Run(func(args mock.Arguments) { close(called) @@ -393,11 +464,18 @@ func (suite *Suite) TestStartAsUnauthorizedNode() { // TestRespondToPhaseChange should kick off root QC voter when we receive an event // indicating the EpochSetup phase has started. func (suite *Suite) TestRespondToPhaseChange() { + // we expect 1 ActiveClustersChanged events when the engine first starts and the first set of epoch components are started + suite.engineEventsDistributor.On("ActiveClustersChanged", mock.AnythingOfType("flow.ChainIDList")).Once() + defer suite.engineEventsDistributor.AssertExpectations(suite.T()) + // start in staking phase suite.phase = flow.EpochPhaseStaking + suite.AddTentativeEpoch(suite.counter + 1) // should call voter with next epoch var called = make(chan struct{}) - suite.voter.On("Vote", mock.Anything, suite.epochQuery.Next()). + nextEpochTentative, err := suite.epochQuery.NextUnsafe() + require.NoError(suite.T(), err, "cannot get next tentative epoch") + suite.voter.On("Vote", mock.Anything, nextEpochTentative). Return(nil). Run(func(args mock.Arguments) { close(called) @@ -418,8 +496,16 @@ func (suite *Suite) TestRespondToPhaseChange() { // - register callback to stop the previous epoch's cluster consensus // - stop the previous epoch's cluster consensus when the callback is invoked func (suite *Suite) TestRespondToEpochTransition() { + // we expect 3 ActiveClustersChanged events + // - once when the engine first starts and the first set of epoch components are started + // - once when the epoch transitions and the new set of epoch components are started + // - once when the epoch transitions and the old set of epoch components are stopped + expectedNumOfEvents := 3 + suite.engineEventsDistributor.On("ActiveClustersChanged", mock.AnythingOfType("flow.ChainIDList")).Times(expectedNumOfEvents) + defer suite.engineEventsDistributor.AssertExpectations(suite.T()) // we are in committed phase + suite.AddCommittedEpoch(suite.counter + 1) suite.phase = flow.EpochPhaseCommitted suite.StartEngine() @@ -478,3 +564,38 @@ func (suite *Suite) TestRespondToEpochTransition() { // the expired epoch should have been stopped suite.AssertEpochStopped(suite.counter - 1) } + +// TestStopQcVoting tests that, if we encounter an EpochEmergencyFallbackTriggered event +// the engine will stop in progress QC voting. The engine keeps track of the current in progress +// qc vote by keeping a pointer to the cancel func for the context of that process. +// When the EFM event is encountered and voting is in progress the cancel func will be invoked +// and the voting process will be stopped. +func (suite *Suite) TestStopQcVoting() { + // we expect 1 ActiveClustersChanged events when the engine first starts and the first set of epoch components are started + suite.engineEventsDistributor.On("ActiveClustersChanged", mock.AnythingOfType("flow.ChainIDList")).Once() + + // we are in setup phase, forces engine to start voting on startup + suite.AddTentativeEpoch(suite.counter + 1) + suite.phase = flow.EpochPhaseSetup + + receivedCancelSignal := make(chan struct{}) + nextEpochTentative, err := suite.epochQuery.NextUnsafe() + require.NoError(suite.T(), err, "cannot get next tentative epoch") + suite.voter.On("Vote", mock.Anything, nextEpochTentative). + Return(nil). + Run(func(args mock.Arguments) { + ctx := args.Get(0).(context.Context) + <-ctx.Done() + close(receivedCancelSignal) + }).Once() + + // start up the engine + suite.StartEngine() + + require.NotNil(suite.T(), suite.engine.inProgressQCVote.Load(), "expected qc vote to be in progress") + + // simulate processing efm triggered event, this should cancel all in progress voting + suite.engine.EpochEmergencyFallbackTriggered() + + unittest.AssertClosesBefore(suite.T(), receivedCancelSignal, time.Second) +} diff --git a/engine/collection/epochmgr/factories/builder.go b/engine/collection/epochmgr/factories/builder.go index a00a73ac97e..6ce70ce724a 100644 --- a/engine/collection/epochmgr/factories/builder.go +++ b/engine/collection/epochmgr/factories/builder.go @@ -3,49 +3,55 @@ package factories import ( "fmt" - "github.com/dgraph-io/badger/v2" + "github.com/jordanschalm/lockctx" "github.com/rs/zerolog" + "github.com/onflow/flow-go/engine/collection" "github.com/onflow/flow-go/module" builder "github.com/onflow/flow-go/module/builder/collection" finalizer "github.com/onflow/flow-go/module/finalizer/collection" "github.com/onflow/flow-go/module/mempool" - "github.com/onflow/flow-go/network" clusterstate "github.com/onflow/flow-go/state/cluster" "github.com/onflow/flow-go/state/protocol" "github.com/onflow/flow-go/storage" ) type BuilderFactory struct { - db *badger.DB + db storage.DB protoState protocol.State + lockManager lockctx.Manager mainChainHeaders storage.Headers trace module.Tracer opts []builder.Opt metrics module.CollectionMetrics - pusher network.Engine // engine for pushing finalized collection to consensus committee + pusher collection.GuaranteedCollectionPublisher // engine for pushing finalized collection to consensus committee + configGetter module.ReadonlySealingLagRateLimiterConfig log zerolog.Logger } func NewBuilderFactory( - db *badger.DB, + db storage.DB, protoState protocol.State, + lockManager lockctx.Manager, mainChainHeaders storage.Headers, trace module.Tracer, metrics module.CollectionMetrics, - pusher network.Engine, + pusher collection.GuaranteedCollectionPublisher, log zerolog.Logger, + configGetter module.ReadonlySealingLagRateLimiterConfig, opts ...builder.Opt, ) (*BuilderFactory, error) { factory := &BuilderFactory{ db: db, protoState: protoState, + lockManager: lockManager, mainChainHeaders: mainChainHeaders, trace: trace, metrics: metrics, pusher: pusher, log: log, + configGetter: configGetter, opts: opts, } return factory, nil @@ -62,6 +68,8 @@ func (f *BuilderFactory) Create( build, err := builder.NewBuilder( f.db, f.trace, + f.lockManager, + f.metrics, f.protoState, clusterState, f.mainChainHeaders, @@ -70,6 +78,7 @@ func (f *BuilderFactory) Create( pool, f.log, epoch, + f.configGetter, f.opts..., ) if err != nil { @@ -78,6 +87,7 @@ func (f *BuilderFactory) Create( final := finalizer.NewFinalizer( f.db, + f.lockManager, pool, f.pusher, f.metrics, diff --git a/engine/collection/epochmgr/factories/cluster_state.go b/engine/collection/epochmgr/factories/cluster_state.go index 7f786f4ff36..9548f033943 100644 --- a/engine/collection/epochmgr/factories/cluster_state.go +++ b/engine/collection/epochmgr/factories/cluster_state.go @@ -3,43 +3,47 @@ package factories import ( "fmt" - "github.com/dgraph-io/badger/v2" + "github.com/jordanschalm/lockctx" "github.com/onflow/flow-go/module" clusterkv "github.com/onflow/flow-go/state/cluster/badger" - bstorage "github.com/onflow/flow-go/storage/badger" + "github.com/onflow/flow-go/storage" + "github.com/onflow/flow-go/storage/store" ) type ClusterStateFactory struct { - db *badger.DB - metrics module.CacheMetrics - tracer module.Tracer + db storage.DB + lockManager lockctx.Manager + metrics module.CacheMetrics + tracer module.Tracer } func NewClusterStateFactory( - db *badger.DB, + db storage.DB, + lockManager lockctx.Manager, metrics module.CacheMetrics, tracer module.Tracer, ) (*ClusterStateFactory, error) { factory := &ClusterStateFactory{ - db: db, - metrics: metrics, - tracer: tracer, + db: db, + lockManager: lockManager, + metrics: metrics, + tracer: tracer, } return factory, nil } func (f *ClusterStateFactory) Create(stateRoot *clusterkv.StateRoot) ( *clusterkv.MutableState, - *bstorage.Headers, - *bstorage.ClusterPayloads, - *bstorage.ClusterBlocks, + *store.Headers, + storage.ClusterPayloads, + storage.ClusterBlocks, error, ) { - headers := bstorage.NewHeaders(f.metrics, f.db) - payloads := bstorage.NewClusterPayloads(f.metrics, f.db) - blocks := bstorage.NewClusterBlocks(f.db, stateRoot.ClusterID(), headers, payloads) + headers := store.NewHeaders(f.metrics, f.db) + payloads := store.NewClusterPayloads(f.metrics, f.db) + blocks := store.NewClusterBlocks(f.db, stateRoot.ClusterID(), headers, payloads) isBootStrapped, err := clusterkv.IsBootstrapped(f.db, stateRoot.ClusterID()) if err != nil { @@ -52,13 +56,13 @@ func (f *ClusterStateFactory) Create(stateRoot *clusterkv.StateRoot) ( return nil, nil, nil, nil, fmt.Errorf("could not open cluster state: %w", err) } } else { - clusterState, err = clusterkv.Bootstrap(f.db, stateRoot) + clusterState, err = clusterkv.Bootstrap(f.db, f.lockManager, stateRoot) if err != nil { return nil, nil, nil, nil, fmt.Errorf("could not bootstrap cluster state: %w", err) } } - mutableState, err := clusterkv.NewMutableState(clusterState, f.tracer, headers, payloads) + mutableState, err := clusterkv.NewMutableState(clusterState, f.lockManager, f.tracer, headers, payloads) if err != nil { return nil, nil, nil, nil, fmt.Errorf("could create mutable cluster state: %w", err) } diff --git a/engine/collection/epochmgr/factories/compliance.go b/engine/collection/epochmgr/factories/compliance.go index 5db39834045..26a2d57e224 100644 --- a/engine/collection/epochmgr/factories/compliance.go +++ b/engine/collection/epochmgr/factories/compliance.go @@ -20,26 +20,24 @@ import ( type ComplianceEngineFactory struct { log zerolog.Logger me module.Local - net network.Network + net network.EngineRegistry colMetrics module.CollectionMetrics engMetrics module.EngineMetrics mempoolMetrics module.MempoolMetrics protoState protocol.State - transactions storage.Transactions - complianceOpts []modulecompliance.Opt + config modulecompliance.Config } // NewComplianceEngineFactory returns a new collection compliance engine factory. func NewComplianceEngineFactory( log zerolog.Logger, - net network.Network, + net network.EngineRegistry, me module.Local, colMetrics module.CollectionMetrics, engMetrics module.EngineMetrics, mempoolMetrics module.MempoolMetrics, protoState protocol.State, - transactions storage.Transactions, - complianceOpts ...modulecompliance.Opt, + config modulecompliance.Config, ) (*ComplianceEngineFactory, error) { factory := &ComplianceEngineFactory{ @@ -50,8 +48,7 @@ func NewComplianceEngineFactory( engMetrics: engMetrics, mempoolMetrics: mempoolMetrics, protoState: protoState, - transactions: transactions, - complianceOpts: complianceOpts, + config: config, } return factory, nil } @@ -85,7 +82,7 @@ func (f *ComplianceEngineFactory) Create( hot, voteAggregator, timeoutAggregator, - f.complianceOpts..., + f.config, ) if err != nil { return nil, fmt.Errorf("could create cluster compliance core: %w", err) diff --git a/engine/collection/epochmgr/factories/epoch.go b/engine/collection/epochmgr/factories/epoch.go index 25f6c42ab89..c55224a62bc 100644 --- a/engine/collection/epochmgr/factories/epoch.go +++ b/engine/collection/epochmgr/factories/epoch.go @@ -55,7 +55,7 @@ func NewEpochComponentsFactory( } func (factory *EpochComponentsFactory) Create( - epoch protocol.Epoch, + epoch protocol.CommittedEpoch, ) ( state cluster.State, compliance component.Component, @@ -67,18 +67,10 @@ func (factory *EpochComponentsFactory) Create( err error, ) { - epochCounter, err := epoch.Counter() - if err != nil { - err = fmt.Errorf("could not get epoch counter: %w", err) - return - } + epochCounter := epoch.Counter() // if we are not an authorized participant in this epoch, return a sentinel - identities, err := epoch.InitialIdentities() - if err != nil { - err = fmt.Errorf("could not get initial identities for epoch: %w", err) - return - } + identities := epoch.InitialIdentities() _, exists := identities.ByNodeID(factory.me.NodeID()) if !exists { err = fmt.Errorf("%w (node_id=%x, epoch=%d)", epochmgr.ErrNotAuthorizedForEpoch, factory.me.NodeID(), epochCounter) diff --git a/engine/collection/epochmgr/factories/hotstuff.go b/engine/collection/epochmgr/factories/hotstuff.go index c6d521bc851..07a99c7d398 100644 --- a/engine/collection/epochmgr/factories/hotstuff.go +++ b/engine/collection/epochmgr/factories/hotstuff.go @@ -3,7 +3,6 @@ package factories import ( "fmt" - "github.com/dgraph-io/badger/v2" "github.com/rs/zerolog" "github.com/onflow/flow-go/consensus" @@ -32,7 +31,8 @@ type HotStuffMetricsFunc func(chainID flow.ChainID) module.HotstuffMetrics type HotStuffFactory struct { baseLogger zerolog.Logger me module.Local - db *badger.DB + db storage.DB + lockManager storage.LockManager protoState protocol.State engineMetrics module.EngineMetrics mempoolMetrics module.MempoolMetrics @@ -43,7 +43,8 @@ type HotStuffFactory struct { func NewHotStuffFactory( log zerolog.Logger, me module.Local, - db *badger.DB, + db storage.DB, + lockManager storage.LockManager, protoState protocol.State, engineMetrics module.EngineMetrics, mempoolMetrics module.MempoolMetrics, @@ -55,6 +56,7 @@ func NewHotStuffFactory( baseLogger: log, me: me, db: db, + lockManager: lockManager, protoState: protoState, engineMetrics: engineMetrics, mempoolMetrics: mempoolMetrics, @@ -65,7 +67,7 @@ func NewHotStuffFactory( } func (f *HotStuffFactory) CreateModules( - epoch protocol.Epoch, + epoch protocol.CommittedEpoch, cluster protocol.Cluster, clusterState cluster.State, headers storage.Headers, @@ -107,7 +109,7 @@ func (f *HotStuffFactory) CreateModules( headers, updater, notifier, - cluster.RootBlock().Header, + cluster.RootBlock().ToHeader(), cluster.RootQC(), ) if err != nil { @@ -156,13 +158,18 @@ func (f *HotStuffFactory) CreateModules( return nil, nil, err } + persist, err := persister.New(f.db, cluster.ChainID(), f.lockManager) + if err != nil { + return nil, nil, err + } + return &consensus.HotstuffModules{ Forks: forks, Validator: validator, Notifier: notifier, Committee: committee, Signer: signer, - Persist: persister.New(f.db, cluster.ChainID()), + Persist: persist, VoteAggregator: voteAggregator, TimeoutAggregator: timeoutAggregator, VoteCollectorDistributor: voteAggregationDistributor.VoteCollectorDistributor, @@ -191,6 +198,7 @@ func (f *HotStuffFactory) Create( participant, err := consensus.NewParticipant( log, metrics, + f.mempoolMetrics, builder, finalizedBlock, pendingBlocks, diff --git a/engine/collection/epochmgr/factories/hub.go b/engine/collection/epochmgr/factories/hub.go index 434a699aef7..eae1f79bf82 100644 --- a/engine/collection/epochmgr/factories/hub.go +++ b/engine/collection/epochmgr/factories/hub.go @@ -16,13 +16,13 @@ import ( type MessageHubFactory struct { log zerolog.Logger me module.Local - net network.Network + net network.EngineRegistry protoState protocol.State engineMetrics module.EngineMetrics } func NewMessageHubFactory(log zerolog.Logger, - net network.Network, + net network.EngineRegistry, me module.Local, engineMetrics module.EngineMetrics, protoState protocol.State) *MessageHubFactory { diff --git a/engine/collection/epochmgr/factories/sync.go b/engine/collection/epochmgr/factories/sync.go index 98bf10c4142..e3dbe15dca6 100644 --- a/engine/collection/epochmgr/factories/sync.go +++ b/engine/collection/epochmgr/factories/sync.go @@ -15,7 +15,7 @@ import ( type SyncEngineFactory struct { log zerolog.Logger - net network.Network + net network.EngineRegistry me module.Local metrics module.EngineMetrics } @@ -23,7 +23,7 @@ type SyncEngineFactory struct { func NewSyncEngineFactory( log zerolog.Logger, metrics module.EngineMetrics, - net network.Network, + net network.EngineRegistry, me module.Local, ) (*SyncEngineFactory, error) { @@ -37,7 +37,7 @@ func NewSyncEngineFactory( } func (f *SyncEngineFactory) Create( - participants flow.IdentityList, + participants flow.IdentitySkeletonList, state cluster.State, blocks storage.ClusterBlocks, core *chainsync.Core, diff --git a/engine/collection/epochmgr/factory.go b/engine/collection/epochmgr/factory.go index 801f314ff87..c6370674e51 100644 --- a/engine/collection/epochmgr/factory.go +++ b/engine/collection/epochmgr/factory.go @@ -18,7 +18,7 @@ type EpochComponentsFactory interface { // a given epoch counter. // // Must return ErrNotAuthorizedForEpoch if this node is not authorized in the epoch. - Create(epoch protocol.Epoch) ( + Create(epoch protocol.CommittedEpoch) ( state cluster.State, proposal component.Component, sync module.ReadyDoneAware, diff --git a/engine/collection/epochmgr/mock/epoch_components_factory.go b/engine/collection/epochmgr/mock/epoch_components_factory.go index a4b7f9b9356..4e58c01c41a 100644 --- a/engine/collection/epochmgr/mock/epoch_components_factory.go +++ b/engine/collection/epochmgr/mock/epoch_components_factory.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mock @@ -21,9 +21,13 @@ type EpochComponentsFactory struct { } // Create provides a mock function with given fields: epoch -func (_m *EpochComponentsFactory) Create(epoch protocol.Epoch) (cluster.State, component.Component, module.ReadyDoneAware, module.HotStuff, hotstuff.VoteAggregator, hotstuff.TimeoutAggregator, component.Component, error) { +func (_m *EpochComponentsFactory) Create(epoch protocol.CommittedEpoch) (cluster.State, component.Component, module.ReadyDoneAware, module.HotStuff, hotstuff.VoteAggregator, hotstuff.TimeoutAggregator, component.Component, error) { ret := _m.Called(epoch) + if len(ret) == 0 { + panic("no return value specified for Create") + } + var r0 cluster.State var r1 component.Component var r2 module.ReadyDoneAware @@ -32,10 +36,10 @@ func (_m *EpochComponentsFactory) Create(epoch protocol.Epoch) (cluster.State, c var r5 hotstuff.TimeoutAggregator var r6 component.Component var r7 error - if rf, ok := ret.Get(0).(func(protocol.Epoch) (cluster.State, component.Component, module.ReadyDoneAware, module.HotStuff, hotstuff.VoteAggregator, hotstuff.TimeoutAggregator, component.Component, error)); ok { + if rf, ok := ret.Get(0).(func(protocol.CommittedEpoch) (cluster.State, component.Component, module.ReadyDoneAware, module.HotStuff, hotstuff.VoteAggregator, hotstuff.TimeoutAggregator, component.Component, error)); ok { return rf(epoch) } - if rf, ok := ret.Get(0).(func(protocol.Epoch) cluster.State); ok { + if rf, ok := ret.Get(0).(func(protocol.CommittedEpoch) cluster.State); ok { r0 = rf(epoch) } else { if ret.Get(0) != nil { @@ -43,7 +47,7 @@ func (_m *EpochComponentsFactory) Create(epoch protocol.Epoch) (cluster.State, c } } - if rf, ok := ret.Get(1).(func(protocol.Epoch) component.Component); ok { + if rf, ok := ret.Get(1).(func(protocol.CommittedEpoch) component.Component); ok { r1 = rf(epoch) } else { if ret.Get(1) != nil { @@ -51,7 +55,7 @@ func (_m *EpochComponentsFactory) Create(epoch protocol.Epoch) (cluster.State, c } } - if rf, ok := ret.Get(2).(func(protocol.Epoch) module.ReadyDoneAware); ok { + if rf, ok := ret.Get(2).(func(protocol.CommittedEpoch) module.ReadyDoneAware); ok { r2 = rf(epoch) } else { if ret.Get(2) != nil { @@ -59,7 +63,7 @@ func (_m *EpochComponentsFactory) Create(epoch protocol.Epoch) (cluster.State, c } } - if rf, ok := ret.Get(3).(func(protocol.Epoch) module.HotStuff); ok { + if rf, ok := ret.Get(3).(func(protocol.CommittedEpoch) module.HotStuff); ok { r3 = rf(epoch) } else { if ret.Get(3) != nil { @@ -67,7 +71,7 @@ func (_m *EpochComponentsFactory) Create(epoch protocol.Epoch) (cluster.State, c } } - if rf, ok := ret.Get(4).(func(protocol.Epoch) hotstuff.VoteAggregator); ok { + if rf, ok := ret.Get(4).(func(protocol.CommittedEpoch) hotstuff.VoteAggregator); ok { r4 = rf(epoch) } else { if ret.Get(4) != nil { @@ -75,7 +79,7 @@ func (_m *EpochComponentsFactory) Create(epoch protocol.Epoch) (cluster.State, c } } - if rf, ok := ret.Get(5).(func(protocol.Epoch) hotstuff.TimeoutAggregator); ok { + if rf, ok := ret.Get(5).(func(protocol.CommittedEpoch) hotstuff.TimeoutAggregator); ok { r5 = rf(epoch) } else { if ret.Get(5) != nil { @@ -83,7 +87,7 @@ func (_m *EpochComponentsFactory) Create(epoch protocol.Epoch) (cluster.State, c } } - if rf, ok := ret.Get(6).(func(protocol.Epoch) component.Component); ok { + if rf, ok := ret.Get(6).(func(protocol.CommittedEpoch) component.Component); ok { r6 = rf(epoch) } else { if ret.Get(6) != nil { @@ -91,7 +95,7 @@ func (_m *EpochComponentsFactory) Create(epoch protocol.Epoch) (cluster.State, c } } - if rf, ok := ret.Get(7).(func(protocol.Epoch) error); ok { + if rf, ok := ret.Get(7).(func(protocol.CommittedEpoch) error); ok { r7 = rf(epoch) } else { r7 = ret.Error(7) @@ -100,13 +104,12 @@ func (_m *EpochComponentsFactory) Create(epoch protocol.Epoch) (cluster.State, c return r0, r1, r2, r3, r4, r5, r6, r7 } -type mockConstructorTestingTNewEpochComponentsFactory interface { +// NewEpochComponentsFactory creates a new instance of EpochComponentsFactory. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewEpochComponentsFactory(t interface { mock.TestingT Cleanup(func()) -} - -// NewEpochComponentsFactory creates a new instance of EpochComponentsFactory. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewEpochComponentsFactory(t mockConstructorTestingTNewEpochComponentsFactory) *EpochComponentsFactory { +}) *EpochComponentsFactory { mock := &EpochComponentsFactory{} mock.Mock.Test(t) diff --git a/engine/collection/events.go b/engine/collection/events.go new file mode 100644 index 00000000000..1c5809806b4 --- /dev/null +++ b/engine/collection/events.go @@ -0,0 +1,19 @@ +package collection + +import "github.com/onflow/flow-go/model/flow" + +// EngineEvents set of methods used to distribute and consume events related to collection node engine components. +type EngineEvents interface { + ClusterEvents +} + +// ClusterEvents defines methods used to disseminate cluster ID update events. +// Cluster IDs are updated when a new set of epoch components start and the old set of epoch components stops. +// A new list of cluster IDs will be assigned when the new set of epoch components are started, and the old set of cluster +// IDs are removed when the current set of epoch components are stopped. The implementation must be concurrency safe and non-blocking. +type ClusterEvents interface { + // ActiveClustersChanged is called when a new cluster ID update event is distributed. + // Any error encountered on consuming event must handle internally by the implementation. + // The implementation must be concurrency safe, but can be blocking. + ActiveClustersChanged(flow.ChainIDList) +} diff --git a/engine/collection/events/cluster_events_distributor.go b/engine/collection/events/cluster_events_distributor.go new file mode 100644 index 00000000000..caff0ebd26a --- /dev/null +++ b/engine/collection/events/cluster_events_distributor.go @@ -0,0 +1,36 @@ +package events + +import ( + "sync" + + "github.com/onflow/flow-go/engine/collection" + "github.com/onflow/flow-go/model/flow" +) + +// ClusterEventsDistributor distributes cluster events to a list of subscribers. +type ClusterEventsDistributor struct { + subscribers []collection.ClusterEvents + mu sync.RWMutex +} + +var _ collection.ClusterEvents = (*ClusterEventsDistributor)(nil) + +// NewClusterEventsDistributor returns a new events *ClusterEventsDistributor. +func NewClusterEventsDistributor() *ClusterEventsDistributor { + return &ClusterEventsDistributor{} +} + +func (d *ClusterEventsDistributor) AddConsumer(consumer collection.ClusterEvents) { + d.mu.Lock() + defer d.mu.Unlock() + d.subscribers = append(d.subscribers, consumer) +} + +// ActiveClustersChanged distributes events to all subscribers. +func (d *ClusterEventsDistributor) ActiveClustersChanged(list flow.ChainIDList) { + d.mu.RLock() + defer d.mu.RUnlock() + for _, sub := range d.subscribers { + sub.ActiveClustersChanged(list) + } +} diff --git a/engine/collection/events/distributor.go b/engine/collection/events/distributor.go new file mode 100644 index 00000000000..39e723f30db --- /dev/null +++ b/engine/collection/events/distributor.go @@ -0,0 +1,23 @@ +package events + +import ( + "github.com/onflow/flow-go/engine/collection" +) + +// CollectionEngineEventsDistributor set of structs that implement all collection engine event interfaces. +type CollectionEngineEventsDistributor struct { + *ClusterEventsDistributor +} + +var _ collection.EngineEvents = (*CollectionEngineEventsDistributor)(nil) + +// NewDistributor returns a new *CollectionEngineEventsDistributor. +func NewDistributor() *CollectionEngineEventsDistributor { + return &CollectionEngineEventsDistributor{ + ClusterEventsDistributor: NewClusterEventsDistributor(), + } +} + +func (d *CollectionEngineEventsDistributor) AddConsumer(consumer collection.EngineEvents) { + d.ClusterEventsDistributor.AddConsumer(consumer) +} diff --git a/engine/collection/guaranteed_collection_publisher.go b/engine/collection/guaranteed_collection_publisher.go new file mode 100644 index 00000000000..42c0646282e --- /dev/null +++ b/engine/collection/guaranteed_collection_publisher.go @@ -0,0 +1,15 @@ +package collection + +import ( + "github.com/onflow/flow-go/model/messages" +) + +// GuaranteedCollectionPublisher defines the interface to send collection guarantees +// from a collection node to consensus nodes. Collection guarantees are broadcast on a best-effort basis, +// and it is acceptable to discard some guarantees (especially those that are out of date). +// Implementation is non-blocking and concurrency safe. +type GuaranteedCollectionPublisher interface { + // SubmitCollectionGuarantee adds a guarantee to an internal queue + // to be published to consensus nodes. + SubmitCollectionGuarantee(guarantee *messages.CollectionGuarantee) +} diff --git a/engine/collection/ingest/engine.go b/engine/collection/ingest/engine.go index 31aadf451e2..dde2f4753a0 100644 --- a/engine/collection/ingest/engine.go +++ b/engine/collection/ingest/engine.go @@ -9,10 +9,11 @@ import ( "github.com/rs/zerolog" - "github.com/onflow/flow-go/access" + "github.com/onflow/flow-go/access/validator" "github.com/onflow/flow-go/engine" "github.com/onflow/flow-go/engine/common/fifoqueue" "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/model/messages" "github.com/onflow/flow-go/module" "github.com/onflow/flow-go/module/component" "github.com/onflow/flow-go/module/irrecoverable" @@ -38,7 +39,7 @@ type Engine struct { pendingTransactions engine.MessageStore messageHandler *engine.MessageHandler pools *epochs.TransactionPools - transactionValidator *access.TransactionValidator + transactionValidator *validator.TransactionValidator config Config } @@ -46,7 +47,7 @@ type Engine struct { // New creates a new collection ingest engine. func New( log zerolog.Logger, - net network.Network, + net network.EngineRegistry, state protocol.State, engMetrics module.EngineMetrics, mempoolMetrics module.MempoolMetrics, @@ -55,21 +56,25 @@ func New( chain flow.Chain, pools *epochs.TransactionPools, config Config, + limiter *AddressRateLimiter, ) (*Engine, error) { logger := log.With().Str("engine", "ingest").Logger() - transactionValidator := access.NewTransactionValidator( - access.NewProtocolStateBlocks(state), + transactionValidator := validator.NewTransactionValidatorWithLimiter( + validator.NewProtocolStateBlocks(state, nil), chain, - access.TransactionValidationOptions{ + validator.TransactionValidationOptions{ Expiry: flow.DefaultTransactionExpiry, ExpiryBuffer: config.ExpiryBuffer, MaxGasLimit: config.MaxGasLimit, CheckScriptsParse: config.CheckScriptsParse, MaxTransactionByteSize: config.MaxTransactionByteSize, MaxCollectionByteSize: config.MaxCollectionByteSize, + CheckPayerBalanceMode: validator.Disabled, }, + colMetrics, + limiter, ) // FIFO queue for transactions @@ -246,8 +251,10 @@ func (e *Engine) onTransaction(originID flow.Identifier, tx *flow.TransactionBod // using the transaction's reference block, determine which cluster we're in. // if we don't know the reference block, we will fail when attempting to query the epoch. - refEpoch := refSnapshot.Epochs().Current() - + refEpoch, err := refSnapshot.Epochs().Current() + if err != nil { + return fmt.Errorf("could not get current epoch for reference block: %w", err) + } localCluster, err := e.getLocalCluster(refEpoch) if err != nil { return fmt.Errorf("could not get local cluster: %w", err) @@ -295,11 +302,8 @@ func (e *Engine) onTransaction(originID flow.Identifier, tx *flow.TransactionBod // a member of the reference epoch. This is an expected condition and the transaction // should be discarded. // - other error for any other, unexpected error condition. -func (e *Engine) getLocalCluster(refEpoch protocol.Epoch) (flow.IdentityList, error) { - epochCounter, err := refEpoch.Counter() - if err != nil { - return nil, fmt.Errorf("could not get counter for reference epoch: %w", err) - } +func (e *Engine) getLocalCluster(refEpoch protocol.CommittedEpoch) (flow.IdentitySkeletonList, error) { + epochCounter := refEpoch.Counter() clusters, err := refEpoch.Clustering() if err != nil { return nil, fmt.Errorf("could not get clusters for reference epoch: %w", err) @@ -309,10 +313,7 @@ func (e *Engine) getLocalCluster(refEpoch protocol.Epoch) (flow.IdentityList, er if !ok { // if we aren't assigned to a cluster, check that we are a member of // the reference epoch - refIdentities, err := refEpoch.InitialIdentities() - if err != nil { - return nil, fmt.Errorf("could not get initial identities for reference epoch: %w", err) - } + refIdentities := refEpoch.InitialIdentities() if _, ok := refIdentities.ByNodeID(e.me.NodeID()); ok { // CAUTION: we are a member of the epoch, but have no assigned cluster! @@ -333,19 +334,14 @@ func (e *Engine) getLocalCluster(refEpoch protocol.Epoch) (flow.IdentityList, er // * other error for any other unexpected error condition. func (e *Engine) ingestTransaction( log zerolog.Logger, - refEpoch protocol.Epoch, + refEpoch protocol.CommittedEpoch, tx *flow.TransactionBody, txID flow.Identifier, localClusterFingerprint flow.Identifier, txClusterFingerprint flow.Identifier, ) error { - epochCounter, err := refEpoch.Counter() - if err != nil { - return fmt.Errorf("could not get counter for reference epoch: %w", err) - } - // use the transaction pool for the epoch the reference block is part of - pool := e.pools.ForEpoch(epochCounter) + pool := e.pools.ForEpoch(refEpoch.Counter()) // short-circuit if we have already stored the transaction if pool.Has(txID) { @@ -353,15 +349,15 @@ func (e *Engine) ingestTransaction( return nil } - // check if the transaction is valid - err = e.transactionValidator.Validate(tx) + // we don't pass actual ctx as we don't execute any scripts inside for now + err := e.transactionValidator.Validate(context.Background(), tx) if err != nil { return engine.NewInvalidInputErrorf("invalid transaction (%x): %w", txID, err) } // if our cluster is responsible for the transaction, add it to our local mempool if localClusterFingerprint == txClusterFingerprint { - _ = pool.Add(tx) + _ = pool.Add(tx.ID(), tx) e.colMetrics.TransactionIngested(txID) } @@ -370,10 +366,16 @@ func (e *Engine) ingestTransaction( // propagateTransaction propagates the transaction to a number of the responsible // cluster's members. Any unexpected networking errors are logged. -func (e *Engine) propagateTransaction(log zerolog.Logger, tx *flow.TransactionBody, txCluster flow.IdentityList) { +func (e *Engine) propagateTransaction(log zerolog.Logger, tx *flow.TransactionBody, txCluster flow.IdentitySkeletonList) { log.Debug().Msg("propagating transaction to cluster") - err := e.conduit.Multicast(tx, e.config.PropagationRedundancy+1, txCluster.NodeIDs()...) + msg, err := messages.InternalToMessage(tx) + if err != nil { + e.log.Error().Err(err).Msg("failed to convert event to message") + return + } + + err = e.conduit.Multicast(msg, e.config.PropagationRedundancy+1, txCluster.NodeIDs()...) if err != nil && !errors.Is(err, network.EmptyTargetList) { // if multicast to a target cluster with at least one node failed, log an error and exit e.log.Error().Err(err).Msg("could not route transaction to cluster") diff --git a/engine/collection/ingest/engine_test.go b/engine/collection/ingest/engine_test.go index cdaa33eb7db..9f9a96902f2 100644 --- a/engine/collection/ingest/engine_test.go +++ b/engine/collection/ingest/engine_test.go @@ -10,12 +10,14 @@ import ( "github.com/rs/zerolog" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/suite" + "golang.org/x/time/rate" - "github.com/onflow/flow-go/access" + "github.com/onflow/flow-go/access/validator" "github.com/onflow/flow-go/engine" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/model/flow/factory" "github.com/onflow/flow-go/model/flow/filter" + "github.com/onflow/flow-go/model/messages" "github.com/onflow/flow-go/module/component" "github.com/onflow/flow-go/module/irrecoverable" "github.com/onflow/flow-go/module/mempool" @@ -24,7 +26,7 @@ import ( "github.com/onflow/flow-go/module/metrics" module "github.com/onflow/flow-go/module/mock" "github.com/onflow/flow-go/network" - "github.com/onflow/flow-go/network/mocknetwork" + mocknetwork "github.com/onflow/flow-go/network/mock" realprotocol "github.com/onflow/flow-go/state/protocol" protocol "github.com/onflow/flow-go/state/protocol/mock" "github.com/onflow/flow-go/storage" @@ -72,7 +74,7 @@ func (suite *Suite) SetupTest() { log := zerolog.New(io.Discard) metrics := metrics.NewNoopCollector() - net := new(mocknetwork.Network) + net := new(mocknetwork.EngineRegistry) suite.conduit = new(mocknetwork.Conduit) net.On("Register", mock.Anything, mock.Anything).Return(suite.conduit, nil).Once() @@ -88,11 +90,11 @@ func (suite *Suite) SetupTest() { return herocache.NewTransactions(1000, log, metrics) }) - assignments := unittest.ClusterAssignment(suite.N_CLUSTERS, collectors) - suite.clusters, err = factory.NewClusterList(assignments, collectors) + assignments := unittest.ClusterAssignment(suite.N_CLUSTERS, collectors.ToSkeleton()) + suite.clusters, err = factory.NewClusterList(assignments, collectors.ToSkeleton()) suite.Require().NoError(err) - suite.root = unittest.GenesisFixture() + suite.root = unittest.Block.Genesis(flow.Emulator) suite.final = suite.root suite.blocks = make(map[flow.Identifier]*flow.Block) suite.blocks[suite.root.ID()] = suite.root @@ -101,7 +103,7 @@ func (suite *Suite) SetupTest() { suite.snapshot = new(protocol.Snapshot) suite.state.On("Final").Return(suite.snapshot) suite.snapshot.On("Head").Return( - func() *flow.Header { return suite.final.Header }, + func() *flow.Header { return suite.final.ToHeader() }, func() error { return nil }, ) suite.state.On("AtBlockID", mock.Anything).Return( @@ -109,7 +111,7 @@ func (suite *Suite) SetupTest() { snap := new(protocol.Snapshot) block, ok := suite.blocks[blockID] if ok { - snap.On("Head").Return(block.Header, nil) + snap.On("Head").Return(block.ToHeader(), nil) } else { snap.On("Head").Return(nil, storage.ErrNotFound) } @@ -118,14 +120,14 @@ func (suite *Suite) SetupTest() { }) // set up the current epoch by default, with counter=1 - epoch := new(protocol.Epoch) + epoch := new(protocol.CommittedEpoch) epoch.On("Counter").Return(uint64(1), nil) epoch.On("Clustering").Return(suite.clusters, nil) suite.epochQuery = mocks.NewEpochQuery(suite.T(), 1, epoch) suite.conf = DefaultConfig() chain := flow.Testnet.Chain() - suite.engine, err = New(log, net, suite.state, metrics, metrics, metrics, suite.me, chain, suite.pools, suite.conf) + suite.engine, err = New(log, net, suite.state, metrics, metrics, metrics, suite.me, chain, suite.pools, suite.conf, NewAddressRateLimiter(rate.Limit(1), 1)) suite.Require().NoError(err) } @@ -138,7 +140,7 @@ func (suite *Suite) TestInvalidTransaction() { err := suite.engine.ProcessTransaction(&tx) suite.Assert().Error(err) - suite.Assert().True(errors.As(err, &access.IncompleteTransactionError{})) + suite.Assert().True(errors.As(err, &validator.IncompleteTransactionError{})) }) suite.Run("gas limit exceeds the maximum allowed", func() { @@ -149,7 +151,7 @@ func (suite *Suite) TestInvalidTransaction() { err := suite.engine.ProcessTransaction(&tx) suite.Assert().Error(err) - suite.Assert().True(errors.As(err, &access.InvalidGasLimitError{})) + suite.Assert().True(errors.As(err, &validator.InvalidGasLimitError{})) }) suite.Run("invalid reference block ID", func() { @@ -168,12 +170,31 @@ func (suite *Suite) TestInvalidTransaction() { err := suite.engine.ProcessTransaction(&tx) suite.Assert().Error(err) - suite.Assert().True(errors.As(err, &access.InvalidScriptError{})) + suite.Assert().True(errors.As(err, &validator.InvalidScriptError{})) + }) + + // In some cases the Cadence parser will panic rather than return an error. + // If this happens, we should recover from the panic and return an InvalidScriptError. + // See: https://github.com/onflow/cadence/issues/3428, https://github.com/dapperlabs/flow-go/issues/6964 + suite.Run("transaction script exceeds parse token limit (Cadence parser panic should be caught)", func() { + const tokenLimit = 1 << 19 + script := "{};" + for len(script) < tokenLimit { + script += script + } + + tx := unittest.TransactionBodyFixture() + tx.ReferenceBlockID = suite.root.ID() + tx.Script = []byte("transaction { execute {" + script + "}}") + + err := suite.engine.ProcessTransaction(&tx) + suite.Assert().Error(err) + suite.Assert().True(errors.As(err, &validator.InvalidScriptError{})) }) suite.Run("invalid signature format", func() { signer := flow.Testnet.Chain().ServiceAddress() - keyIndex := uint64(0) + keyIndex := uint32(0) sig1 := unittest.TransactionSignatureFixture() sig1.KeyIndex = keyIndex @@ -193,7 +214,7 @@ func (suite *Suite) TestInvalidTransaction() { err := suite.engine.ProcessTransaction(&tx) suite.Assert().Error(err) - suite.Assert().True(errors.As(err, &access.InvalidSignatureError{})) + suite.Assert().True(errors.As(err, &validator.InvalidRawSignatureError{})) }) suite.Run("invalid format of a payload signature", func() { @@ -204,7 +225,7 @@ func (suite *Suite) TestInvalidTransaction() { err := suite.engine.ProcessTransaction(&tx) suite.Assert().Error(err) - suite.Assert().True(errors.As(err, &access.InvalidSignatureError{})) + suite.Assert().True(errors.As(err, &validator.InvalidRawSignatureError{})) }) suite.Run("duplicated signature (envelope only)", func() { @@ -213,7 +234,7 @@ func (suite *Suite) TestInvalidTransaction() { tx.EnvelopeSignatures = []flow.TransactionSignature{sig1, sig2} err := suite.engine.ProcessTransaction(&tx) suite.Assert().Error(err) - suite.Assert().True(errors.As(err, &access.DuplicatedSignatureError{})) + suite.Assert().True(errors.As(err, &validator.DuplicatedSignatureError{})) }) suite.Run("duplicated signature (payload only)", func() { @@ -223,7 +244,7 @@ func (suite *Suite) TestInvalidTransaction() { err := suite.engine.ProcessTransaction(&tx) suite.Assert().Error(err) - suite.Assert().True(errors.As(err, &access.DuplicatedSignatureError{})) + suite.Assert().True(errors.As(err, &validator.DuplicatedSignatureError{})) }) suite.Run("duplicated signature (cross case)", func() { @@ -234,7 +255,7 @@ func (suite *Suite) TestInvalidTransaction() { err := suite.engine.ProcessTransaction(&tx) suite.Assert().Error(err) - suite.Assert().True(errors.As(err, &access.DuplicatedSignatureError{})) + suite.Assert().True(errors.As(err, &validator.DuplicatedSignatureError{})) }) }) @@ -251,21 +272,21 @@ func (suite *Suite) TestInvalidTransaction() { err := suite.engine.ProcessTransaction(&tx) suite.Assert().Error(err) - suite.Assert().True(errors.As(err, &access.InvalidAddressError{})) + suite.Assert().True(errors.As(err, &validator.InvalidAddressError{})) }) suite.Run("expired reference block ID", func() { // "finalize" a sufficiently high block that root block is expired - final := unittest.BlockFixture() - final.Header.Height = suite.root.Header.Height + flow.DefaultTransactionExpiry + 1 - suite.final = &final + suite.final = unittest.BlockFixture( + unittest.Block.WithHeight(suite.root.Height + flow.DefaultTransactionExpiry + 1), + ) tx := unittest.TransactionBodyFixture() tx.ReferenceBlockID = suite.root.ID() err := suite.engine.ProcessTransaction(&tx) suite.Assert().Error(err) - suite.Assert().True(errors.As(err, &access.ExpiredTransactionError{})) + suite.Assert().True(errors.As(err, &validator.ExpiredTransactionError{})) }) } @@ -277,7 +298,7 @@ func (suite *Suite) TestComponentShutdown() { // start then shut down the engine parentCtx, cancel := context.WithCancel(context.Background()) - ctx, _ := irrecoverable.WithSignaler(parentCtx) + ctx := irrecoverable.NewMockSignalerContext(suite.T(), parentCtx) suite.engine.Start(ctx) unittest.AssertClosesBefore(suite.T(), suite.engine.Ready(), 10*time.Millisecond) cancel() @@ -300,16 +321,16 @@ func (suite *Suite) TestRoutingLocalCluster() { // should route to local cluster suite.conduit. - On("Multicast", &tx, suite.conf.PropagationRedundancy+1, local.NodeIDs()[0], local.NodeIDs()[1]). + On("Multicast", (*messages.TransactionBody)(&tx), suite.conf.PropagationRedundancy+1, local.NodeIDs()[0], local.NodeIDs()[1]). Return(nil) err := suite.engine.ProcessTransaction(&tx) suite.Assert().NoError(err) // should be added to local mempool for the current epoch - counter, err := suite.epochQuery.Current().Counter() + currentEpoch, err := suite.epochQuery.Current() suite.Assert().NoError(err) - suite.Assert().True(suite.pools.ForEpoch(counter).Has(tx.ID())) + suite.Assert().True(suite.pools.ForEpoch(currentEpoch.Counter()).Has(tx.ID())) suite.conduit.AssertExpectations(suite.T()) } @@ -330,16 +351,16 @@ func (suite *Suite) TestRoutingRemoteCluster() { // should route to remote cluster suite.conduit. - On("Multicast", &tx, suite.conf.PropagationRedundancy+1, remote[0].NodeID, remote[1].NodeID). + On("Multicast", (*messages.TransactionBody)(&tx), suite.conf.PropagationRedundancy+1, remote[0].NodeID, remote[1].NodeID). Return(nil) err := suite.engine.ProcessTransaction(&tx) suite.Assert().NoError(err) // should not be added to local mempool - counter, err := suite.epochQuery.Current().Counter() + currentEpoch, err := suite.epochQuery.Current() suite.Assert().NoError(err) - suite.Assert().False(suite.pools.ForEpoch(counter).Has(tx.ID())) + suite.Assert().False(suite.pools.ForEpoch(currentEpoch.Counter()).Has(tx.ID())) suite.conduit.AssertExpectations(suite.T()) } @@ -352,7 +373,7 @@ func (suite *Suite) TestRoutingToRemoteClusterWithNoNodes() { suite.Require().True(ok) // set the next cluster to be empty - emptyIdentityList := flow.IdentityList{} + emptyIdentityList := flow.IdentitySkeletonList{} nextClusterIndex := (index + 1) % suite.N_CLUSTERS suite.clusters[nextClusterIndex] = emptyIdentityList @@ -363,16 +384,16 @@ func (suite *Suite) TestRoutingToRemoteClusterWithNoNodes() { // should attempt route to remote cluster without providing any node ids suite.conduit. - On("Multicast", &tx, suite.conf.PropagationRedundancy+1). + On("Multicast", (*messages.TransactionBody)(&tx), suite.conf.PropagationRedundancy+1). Return(network.EmptyTargetList) err := suite.engine.ProcessTransaction(&tx) suite.Assert().NoError(err) // should not be added to local mempool - counter, err := suite.epochQuery.Current().Counter() + currentEpoch, err := suite.epochQuery.Current() suite.Assert().NoError(err) - suite.Assert().False(suite.pools.ForEpoch(counter).Has(tx.ID())) + suite.Assert().False(suite.pools.ForEpoch(currentEpoch.Counter()).Has(tx.ID())) suite.conduit.AssertExpectations(suite.T()) } @@ -384,7 +405,7 @@ func (suite *Suite) TestRoutingLocalClusterFromOtherNode() { suite.Require().True(ok) // another node will send us the transaction - sender := local.Filter(filter.Not(filter.HasNodeID(suite.me.NodeID())))[0] + sender := local.Filter(filter.Not(filter.HasNodeID[flow.IdentitySkeleton](suite.me.NodeID())))[0] // get a transaction that will be routed to local cluster tx := unittest.TransactionBodyFixture() @@ -398,9 +419,9 @@ func (suite *Suite) TestRoutingLocalClusterFromOtherNode() { suite.Assert().NoError(err) // should be added to local mempool for current epoch - counter, err := suite.epochQuery.Current().Counter() + currentEpoch, err := suite.epochQuery.Current() suite.Assert().NoError(err) - suite.Assert().True(suite.pools.ForEpoch(counter).Has(tx.ID())) + suite.Assert().True(suite.pools.ForEpoch(currentEpoch.Counter()).Has(tx.ID())) suite.conduit.AssertExpectations(suite.T()) } @@ -426,9 +447,9 @@ func (suite *Suite) TestRoutingInvalidTransaction() { _ = suite.engine.ProcessTransaction(&tx) // should not be added to local mempool - counter, err := suite.epochQuery.Current().Counter() + currentEpoch, err := suite.epochQuery.Current() suite.Assert().NoError(err) - suite.Assert().False(suite.pools.ForEpoch(counter).Has(tx.ID())) + suite.Assert().False(suite.pools.ForEpoch(currentEpoch.Counter()).Has(tx.ID())) suite.conduit.AssertExpectations(suite.T()) } @@ -442,11 +463,11 @@ func (suite *Suite) TestRouting_ClusterAssignmentChanged() { suite.clusters[1], suite.clusters[0], } - epoch2 := new(protocol.Epoch) + epoch2 := new(protocol.CommittedEpoch) epoch2.On("Counter").Return(uint64(2), nil) epoch2.On("Clustering").Return(epoch2Clusters, nil) // update the mocks to behave as though we have transitioned to epoch 2 - suite.epochQuery.Add(epoch2) + suite.epochQuery.AddCommitted(epoch2) suite.epochQuery.Transition() // get the local cluster in epoch 2 @@ -459,7 +480,7 @@ func (suite *Suite) TestRouting_ClusterAssignmentChanged() { tx = unittest.AlterTransactionForCluster(tx, epoch2Clusters, epoch2Local, func(transaction *flow.TransactionBody) {}) // should route to local cluster - suite.conduit.On("Multicast", &tx, suite.conf.PropagationRedundancy+1, epoch2Local.NodeIDs()[0], epoch2Local.NodeIDs()[1]).Return(nil).Once() + suite.conduit.On("Multicast", (*messages.TransactionBody)(&tx), suite.conf.PropagationRedundancy+1, epoch2Local.NodeIDs()[0], epoch2Local.NodeIDs()[1]).Return(nil).Once() err := suite.engine.ProcessTransaction(&tx) suite.Assert().NoError(err) @@ -475,18 +496,18 @@ func (suite *Suite) TestRouting_ClusterAssignmentRemoved() { // remove ourselves from the cluster assignment for epoch 2 withoutMe := suite.identities. - Filter(filter.Not(filter.HasNodeID(suite.me.NodeID()))). - Filter(filter.HasRole(flow.RoleCollection)) + Filter(filter.Not(filter.HasNodeID[flow.Identity](suite.me.NodeID()))). + Filter(filter.HasRole[flow.Identity](flow.RoleCollection)).ToSkeleton() epoch2Assignment := unittest.ClusterAssignment(suite.N_CLUSTERS, withoutMe) epoch2Clusters, err := factory.NewClusterList(epoch2Assignment, withoutMe) suite.Require().NoError(err) - epoch2 := new(protocol.Epoch) + epoch2 := new(protocol.CommittedEpoch) epoch2.On("Counter").Return(uint64(2), nil) epoch2.On("InitialIdentities").Return(withoutMe, nil) epoch2.On("Clustering").Return(epoch2Clusters, nil) // update the mocks to behave as though we have transitioned to epoch 2 - suite.epochQuery.Add(epoch2) + suite.epochQuery.AddCommitted(epoch2) suite.epochQuery.Transition() // any transaction is OK here, since we're not in any cluster @@ -514,18 +535,18 @@ func (suite *Suite) TestRouting_ClusterAssignmentAdded() { // remove ourselves from the cluster assignment for epoch 2 withoutMe := suite.identities. - Filter(filter.Not(filter.HasNodeID(suite.me.NodeID()))). - Filter(filter.HasRole(flow.RoleCollection)) + Filter(filter.Not(filter.HasNodeID[flow.Identity](suite.me.NodeID()))). + Filter(filter.HasRole[flow.Identity](flow.RoleCollection)).ToSkeleton() epoch2Assignment := unittest.ClusterAssignment(suite.N_CLUSTERS, withoutMe) epoch2Clusters, err := factory.NewClusterList(epoch2Assignment, withoutMe) suite.Require().NoError(err) - epoch2 := new(protocol.Epoch) + epoch2 := new(protocol.CommittedEpoch) epoch2.On("Counter").Return(uint64(2), nil) epoch2.On("InitialIdentities").Return(withoutMe, nil) epoch2.On("Clustering").Return(epoch2Clusters, nil) // update the mocks to behave as though we have transitioned to epoch 2 - suite.epochQuery.Add(epoch2) + suite.epochQuery.AddCommitted(epoch2) suite.epochQuery.Transition() // any transaction is OK here, since we're not in any cluster @@ -544,16 +565,16 @@ func (suite *Suite) TestRouting_ClusterAssignmentAdded() { // EPOCH 3: // include ourselves in cluster assignment - withMe := suite.identities.Filter(filter.HasRole(flow.RoleCollection)) + withMe := suite.identities.Filter(filter.HasRole[flow.Identity](flow.RoleCollection)).ToSkeleton() epoch3Assignment := unittest.ClusterAssignment(suite.N_CLUSTERS, withMe) epoch3Clusters, err := factory.NewClusterList(epoch3Assignment, withMe) suite.Require().NoError(err) - epoch3 := new(protocol.Epoch) + epoch3 := new(protocol.CommittedEpoch) epoch3.On("Counter").Return(uint64(3), nil) epoch3.On("Clustering").Return(epoch3Clusters, nil) // transition to epoch 3 - suite.epochQuery.Add(epoch3) + suite.epochQuery.AddCommitted(epoch3) suite.epochQuery.Transition() // get the local cluster in epoch 2 @@ -566,7 +587,7 @@ func (suite *Suite) TestRouting_ClusterAssignmentAdded() { tx = unittest.AlterTransactionForCluster(tx, epoch3Clusters, epoch3Local, func(transaction *flow.TransactionBody) {}) // should route to local cluster - suite.conduit.On("Multicast", &tx, suite.conf.PropagationRedundancy+1, epoch3Local.NodeIDs()[0], epoch3Local.NodeIDs()[1]).Return(nil).Once() + suite.conduit.On("Multicast", (*messages.TransactionBody)(&tx), suite.conf.PropagationRedundancy+1, epoch3Local.NodeIDs()[0], epoch3Local.NodeIDs()[1]).Return(nil).Once() err = suite.engine.ProcessTransaction(&tx) suite.Assert().NoError(err) diff --git a/engine/collection/ingest/rate_limiter.go b/engine/collection/ingest/rate_limiter.go new file mode 100644 index 00000000000..66733ae03cc --- /dev/null +++ b/engine/collection/ingest/rate_limiter.go @@ -0,0 +1,147 @@ +package ingest + +import ( + "strings" + "sync" + + "golang.org/x/time/rate" + + "github.com/onflow/flow-go/model/flow" +) + +// AddressRateLimiter limits the rate of ingested transactions with a given payer address. +type AddressRateLimiter struct { + mu sync.RWMutex + limiters map[flow.Address]*rate.Limiter + limit rate.Limit // X messages allowed per second + burst int // X messages allowed at one time +} + +// AddressRateLimiter limits the rate of ingested transactions with a given payer address. +// It allows the given "limit" amount messages per second with a "burst" amount of messages to be sent at once +// +// for example, +// To config 1 message per 100 milliseconds, convert to per second first, which is 10 message per second, +// so limit is 10 ( rate.Limit(10) ), and burst is 1. +// Note: rate.Limit(0.1), burst = 1 means 1 message per 10 seconds, instead of 1 message per 100 milliseconds. +// +// To config 3 message per minute, the per-second-basis is 0.05 (3/60), so the limit should be rate.Limit(0.05), +// and burst is 3. +// +// Note: The rate limit configured for each node may differ from the effective network-wide rate limit +// for a given payer. In particular, the number of clusters and the message propagation factor will +// influence how the individual rate limit translates to a network-wide rate limit. +// For example, suppose we have 5 collection clusters and configure each Collection Node with a rate +// limit of 1 message per second. Then, the effective network-wide rate limit for a payer address would +// be *at least* 5 messages per second. +func NewAddressRateLimiter(limit rate.Limit, burst int) *AddressRateLimiter { + return &AddressRateLimiter{ + limiters: make(map[flow.Address]*rate.Limiter), + limit: limit, + burst: burst, + } +} + +// Allow returns whether the given address should be allowed (not rate limited) +func (r *AddressRateLimiter) Allow(address flow.Address) bool { + return !r.IsRateLimited(address) +} + +// IsRateLimited returns whether the given address should be rate limited +func (r *AddressRateLimiter) IsRateLimited(address flow.Address) bool { + r.mu.RLock() + limiter, ok := r.limiters[address] + r.mu.RUnlock() + + if !ok { + return false + } + + rateLimited := !limiter.Allow() + return rateLimited +} + +// AddAddress add an address to be rate limited +func (r *AddressRateLimiter) AddAddress(address flow.Address) { + r.mu.Lock() + defer r.mu.Unlock() + + _, ok := r.limiters[address] + if ok { + return + } + + r.limiters[address] = rate.NewLimiter(r.limit, r.burst) +} + +// RemoveAddress remove an address for being rate limited +func (r *AddressRateLimiter) RemoveAddress(address flow.Address) { + r.mu.Lock() + defer r.mu.Unlock() + + delete(r.limiters, address) +} + +// GetAddresses get the list of rate limited address +func (r *AddressRateLimiter) GetAddresses() []flow.Address { + r.mu.RLock() + defer r.mu.RUnlock() + + addresses := make([]flow.Address, 0, len(r.limiters)) + for address := range r.limiters { + addresses = append(addresses, address) + } + + return addresses +} + +// GetLimitConfig get the limit config +func (r *AddressRateLimiter) GetLimitConfig() (rate.Limit, int) { + r.mu.RLock() + defer r.mu.RUnlock() + return r.limit, r.burst +} + +// SetLimitConfig update the limit config +// Note all the existing limiters will be updated, and reset +func (r *AddressRateLimiter) SetLimitConfig(limit rate.Limit, burst int) { + r.mu.Lock() + defer r.mu.Unlock() + + for address := range r.limiters { + r.limiters[address] = rate.NewLimiter(limit, burst) + } + + r.limit = limit + r.burst = burst +} + +// Util functions +func AddAddresses(r *AddressRateLimiter, addresses []flow.Address) { + for _, address := range addresses { + r.AddAddress(address) + } +} + +func RemoveAddresses(r *AddressRateLimiter, addresses []flow.Address) { + for _, address := range addresses { + r.RemoveAddress(address) + } +} + +// parse addresses string into a list of flow addresses +func ParseAddresses(addresses string) ([]flow.Address, error) { + addressList := make([]flow.Address, 0) + for _, addr := range strings.Split(addresses, ",") { + addr = strings.TrimSpace(addr) + if addr == "" { + continue + } + flowAddr, err := flow.StringToAddress(addr) + if err != nil { + return nil, err + } + addressList = append(addressList, flowAddr) + } + return addressList, nil +} diff --git a/engine/collection/ingest/rate_limiter_test.go b/engine/collection/ingest/rate_limiter_test.go new file mode 100644 index 00000000000..38d7d66d9dc --- /dev/null +++ b/engine/collection/ingest/rate_limiter_test.go @@ -0,0 +1,185 @@ +package ingest_test + +import ( + "fmt" + "sync" + "testing" + "time" + + "github.com/stretchr/testify/require" + "go.uber.org/atomic" + "golang.org/x/time/rate" + + "github.com/onflow/flow-go/access/ratelimit" + "github.com/onflow/flow-go/engine/collection/ingest" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/utils/unittest" +) + +var _ ratelimit.RateLimiter = (*ingest.AddressRateLimiter)(nil) + +func TestLimiterAddRemoveAddress(t *testing.T) { + t.Parallel() + + good1 := unittest.RandomAddressFixture() + limited1 := unittest.RandomAddressFixture() + limited2 := unittest.RandomAddressFixture() + + numPerSec := rate.Limit(1) + burst := 1 + l := ingest.NewAddressRateLimiter(numPerSec, burst) + + require.False(t, l.IsRateLimited(good1)) + require.False(t, l.IsRateLimited(good1)) // address are not limited + + l.AddAddress(limited1) + require.Equal(t, []flow.Address{limited1}, l.GetAddresses()) + + require.False(t, l.IsRateLimited(limited1)) // address 1 is not limited on the first call + require.True(t, l.IsRateLimited(limited1)) // limited on the second call immediately + require.True(t, l.IsRateLimited(limited1)) // limited on the second call immediately + + require.False(t, l.IsRateLimited(good1)) + require.False(t, l.IsRateLimited(good1)) // address are not limited + + l.AddAddress(limited2) + list := l.GetAddresses() + require.Len(t, list, 2) + require.Contains(t, list, limited1, limited2) + + require.False(t, l.IsRateLimited(limited2)) // address 2 is not limited on the first call + require.True(t, l.IsRateLimited(limited2)) // limited on the second call immediately + require.True(t, l.IsRateLimited(limited2)) // limited on the second call immediately + + l.RemoveAddress(limited1) // after remove the limit, it no longer limited + require.False(t, l.IsRateLimited(limited1)) + require.False(t, l.IsRateLimited(limited1)) + + // but limit2 is still limited + require.True(t, l.IsRateLimited(limited2)) +} + +func TestLimiterBurst(t *testing.T) { + t.Parallel() + + limited1 := unittest.RandomAddressFixture() + + numPerSec := rate.Limit(1) + burst := 3 + l := ingest.NewAddressRateLimiter(numPerSec, burst) + + l.AddAddress(limited1) + for i := 0; i < burst; i++ { + require.False(t, l.IsRateLimited(limited1), fmt.Sprintf("%v-nth call", i)) + } + + require.True(t, l.IsRateLimited(limited1)) // limited + require.True(t, l.IsRateLimited(limited1)) // limited +} + +// verify that if wait long enough after rate limited +func TestLimiterWaitLongEnough(t *testing.T) { + t.Parallel() + + addr1 := unittest.RandomAddressFixture() + + // with limit set to 10, it means we allow 10 messages per second, + // and with burst set to 1, it means we only allow 1 message at a time, + // so the limit is 1 message per 100 milliseconds. + // Note rate.Limit(0.1) is not to set 1 message per 100 milliseconds, but + // 1 message per 10 seconds. + numPerSec := rate.Limit(10) + burst := 1 + l := ingest.NewAddressRateLimiter(numPerSec, burst) + + l.AddAddress(addr1) + require.False(t, l.IsRateLimited(addr1)) + require.True(t, l.IsRateLimited(addr1)) + + // check every 10 Millisecond then after 100 Millisecond it should be allowed + require.Eventually(t, func() bool { + return l.Allow(addr1) + }, 110*time.Millisecond, 10*time.Millisecond) + + // block again until another 100 ms + require.True(t, l.IsRateLimited(addr1)) + + // block until another 100 ms + require.Eventually(t, func() bool { + return l.Allow(addr1) + }, 110*time.Millisecond, 10*time.Millisecond) +} + +func TestLimiterConcurrentSafe(t *testing.T) { + t.Parallel() + good1 := unittest.RandomAddressFixture() + limited1 := unittest.RandomAddressFixture() + + numPerSec := rate.Limit(1) + burst := 1 + l := ingest.NewAddressRateLimiter(numPerSec, burst) + + l.AddAddress(limited1) + + wg := sync.WaitGroup{} + wg.Add(2) + + succeed := atomic.NewUint64(0) + go func(wg *sync.WaitGroup) { + defer wg.Done() + ok := l.IsRateLimited(limited1) + if ok { + succeed.Add(1) + } + require.False(t, l.IsRateLimited(good1)) // never limited + }(&wg) + + go func(wg *sync.WaitGroup) { + defer wg.Done() + ok := l.IsRateLimited(limited1) + if ok { + succeed.Add(1) + } + require.False(t, l.IsRateLimited(good1)) // never limited + }(&wg) + + wg.Wait() + require.Equal(t, uint64(1), succeed.Load()) // should only succeed once +} + +func TestLimiterGetSetConfig(t *testing.T) { + t.Parallel() + + addr1 := unittest.RandomAddressFixture() + + // with limit set to 10, it means we allow 10 messages per second, + // and with burst set to 1, it means we only allow 1 message at a time, + // so the limit is 1 message per 100 milliseconds. + // Note rate.Limit(0.1) is not to set 1 message per 100 milliseconds, but + // 1 message per 10 seconds. + numPerSec := rate.Limit(10) + burst := 1 + l := ingest.NewAddressRateLimiter(numPerSec, burst) + + l.AddAddress(addr1) + require.False(t, l.IsRateLimited(addr1)) + require.True(t, l.IsRateLimited(addr1)) + + limitConfig, burstConfig := l.GetLimitConfig() + require.Equal(t, numPerSec, limitConfig) + require.Equal(t, burst, burstConfig) + + // change from 1 message per 100 ms to 4 messages per 200 ms + l.SetLimitConfig(rate.Limit(20), 4) + + // verify the quota is reset, and the new limit is applied + for i := 0; i < 4; i++ { + require.False(t, l.IsRateLimited(addr1), fmt.Sprintf("fail at %v-th call", i)) + } + require.True(t, l.IsRateLimited(addr1)) + + // check every 10 Millisecond then after 100 Millisecond it should be allowed + require.Eventually(t, func() bool { + return l.Allow(addr1) + }, 210*time.Millisecond, 10*time.Millisecond) +} diff --git a/engine/collection/message_hub/message_hub.go b/engine/collection/message_hub/message_hub.go index 6c73ec2ab22..ca620719763 100644 --- a/engine/collection/message_hub/message_hub.go +++ b/engine/collection/message_hub/message_hub.go @@ -86,7 +86,7 @@ type MessageHub struct { ownOutboundVotes *fifoqueue.FifoQueue // queue for handling outgoing vote transmissions ownOutboundProposals *fifoqueue.FifoQueue // queue for handling outgoing proposal transmissions ownOutboundTimeouts *fifoqueue.FifoQueue // queue for handling outgoing timeout transmissions - clusterIdentityFilter flow.IdentityFilter + clusterIdentityFilter flow.IdentityFilter[flow.Identity] // injected dependencies compliance collection.Compliance // handler of incoming block proposals @@ -102,7 +102,7 @@ var _ hotstuff.CommunicatorConsumer = (*MessageHub)(nil) // No errors are expected during normal operations. func NewMessageHub(log zerolog.Logger, engineMetrics module.EngineMetrics, - net network.Network, + net network.EngineRegistry, me module.Local, compliance collection.Compliance, hotstuff module.HotStuff, @@ -113,8 +113,11 @@ func NewMessageHub(log zerolog.Logger, payloads storage.ClusterPayloads, ) (*MessageHub, error) { // find my cluster for the current epoch - // TODO this should flow from cluster state as source of truth - clusters, err := state.Final().Epochs().Current().Clustering() + epoch, err := state.Final().Epochs().Current() + if err != nil { + return nil, fmt.Errorf("could not get current epoch: %w", err) + } + clusters, err := epoch.Clustering() if err != nil { return nil, fmt.Errorf("could not get clusters: %w", err) } @@ -150,16 +153,13 @@ func NewMessageHub(log zerolog.Logger, ownOutboundProposals: ownOutboundProposals, ownOutboundTimeouts: ownOutboundTimeouts, clusterIdentityFilter: filter.And( - filter.In(currentCluster), - filter.Not(filter.HasNodeID(me.NodeID())), + filter.Adapt(filter.In(currentCluster)), + filter.Not(filter.HasNodeID[flow.Identity](me.NodeID())), ), } // register network conduit - chainID, err := clusterState.Params().ChainID() - if err != nil { - return nil, fmt.Errorf("could not get chain ID: %w", err) - } + chainID := clusterState.Params().ChainID() conduit, err := net.Register(channels.ConsensusCluster(chainID), hub) if err != nil { return nil, fmt.Errorf("could not register engine: %w", err) @@ -210,10 +210,10 @@ func (h *MessageHub) sendOwnMessages(ctx context.Context) error { msg, ok := h.ownOutboundProposals.Pop() if ok { - block := msg.(*flow.Header) - err := h.sendOwnProposal(block) + proposal := msg.(*flow.ProposalHeader) + err := h.sendOwnProposal(proposal) if err != nil { - return fmt.Errorf("could not process queued block %v: %w", block.ID(), err) + return fmt.Errorf("could not process queued proposal %v: %w", proposal.Header.ID(), err) } continue } @@ -247,7 +247,7 @@ func (h *MessageHub) sendOwnMessages(ctx context.Context) error { // No errors are expected during normal operations. func (h *MessageHub) sendOwnTimeout(timeout *model.TimeoutObject) error { log := timeout.LogContext(h.log).Logger() - log.Info().Msg("processing timeout broadcast request from hotstuff") + log.Debug().Msg("processing timeout broadcast request from hotstuff") // Retrieve all collection nodes in our cluster (excluding myself). recipients, err := h.state.Final().Identities(h.clusterIdentityFilter) @@ -255,13 +255,7 @@ func (h *MessageHub) sendOwnTimeout(timeout *model.TimeoutObject) error { return fmt.Errorf("could not get cluster members for broadcasting timeout: %w", err) } // create the timeout message - msg := &messages.ClusterTimeoutObject{ - View: timeout.View, - NewestQC: timeout.NewestQC, - LastViewTC: timeout.LastViewTC, - SigData: timeout.SigData, - TimeoutTick: timeout.TimeoutTick, - } + msg := (*messages.ClusterTimeoutObject)(timeout) err = h.con.Publish(msg, recipients.NodeIDs()...) if err != nil { @@ -270,7 +264,7 @@ func (h *MessageHub) sendOwnTimeout(timeout *model.TimeoutObject) error { } return nil } - log.Info().Msg("cluster timeout was broadcast") + log.Debug().Msg("cluster timeout was broadcast") h.engineMetrics.MessageSent(metrics.EngineCollectionMessageHub, metrics.MessageTimeoutObject) return nil @@ -284,7 +278,7 @@ func (h *MessageHub) sendOwnVote(packed *packedVote) error { Uint64("collection_view", packed.vote.View). Hex("recipient_id", packed.recipientID[:]). Logger() - log.Info().Msg("processing vote transmission request from hotstuff") + log.Debug().Msg("processing vote transmission request from hotstuff") // send the vote the desired recipient err := h.con.Unicast(packed.vote, packed.recipientID) @@ -292,7 +286,7 @@ func (h *MessageHub) sendOwnVote(packed *packedVote) error { log.Err(err).Msg("could not send vote") return nil } - log.Info().Msg("collection vote transmitted") + log.Debug().Msg("collection vote transmitted") h.engineMetrics.MessageSent(metrics.EngineCollectionMessageHub, metrics.MessageBlockVote) return nil @@ -300,7 +294,8 @@ func (h *MessageHub) sendOwnVote(packed *packedVote) error { // sendOwnProposal propagates the block proposal to the consensus committee by broadcasting to all other cluster participants (excluding myself) // No errors are expected during normal operations. -func (h *MessageHub) sendOwnProposal(header *flow.Header) error { +func (h *MessageHub) sendOwnProposal(proposal *flow.ProposalHeader) error { + header := proposal.Header // first, check that we are the proposer of the block if header.ProposerID != h.me.NodeID() { return fmt.Errorf("cannot broadcast proposal with non-local proposer (%x)", header.ProposerID) @@ -331,14 +326,28 @@ func (h *MessageHub) sendOwnProposal(header *flow.Header) error { return fmt.Errorf("could not get cluster members for broadcasting collection proposal") } + block, err := cluster.NewBlock( + cluster.UntrustedBlock{ + HeaderBody: header.HeaderBody, + Payload: *payload, + }, + ) + if err != nil { + return fmt.Errorf("could not build cluster block: %w", err) + } + // create the proposal message for the collection - proposal := messages.NewClusterBlockProposal(&cluster.Block{ - Header: header, - Payload: payload, - }) + blockProposal := &cluster.UntrustedProposal{ + Block: *block, + ProposerSigData: proposal.ProposerSigData, + } + if _, err = cluster.NewProposal(*blockProposal); err != nil { + return fmt.Errorf("could not build cluster proposal: %w", err) + } + message := (*messages.ClusterProposal)(blockProposal) // broadcast the proposal to consensus nodes - err = h.con.Publish(proposal, recipients.NodeIDs()...) + err = h.con.Publish(message, recipients.NodeIDs()...) if err != nil { if !errors.Is(err, network.EmptyTargetList) { log.Err(err).Msg("could not send proposal message") @@ -354,23 +363,21 @@ func (h *MessageHub) sendOwnProposal(header *flow.Header) error { // OnOwnVote propagates the vote to relevant recipient(s): // - [common case] vote is queued and is sent via unicast to another node that is the next leader by worker // - [special case] this node is the next leader: vote is directly forwarded to the node's internal `VoteAggregator` -func (h *MessageHub) OnOwnVote(blockID flow.Identifier, view uint64, sigData []byte, recipientID flow.Identifier) { - vote := &messages.ClusterBlockVote{ - BlockID: blockID, - View: view, - SigData: sigData, - } - +func (h *MessageHub) OnOwnVote(vote *model.Vote, recipientID flow.Identifier) { // special case: I am the next leader if recipientID == h.me.NodeID() { - h.forwardToOwnVoteAggregator(vote, h.me.NodeID()) // forward vote to my own `voteAggregator` + h.forwardToOwnVoteAggregator(vote) // forward vote to my own `voteAggregator` return } // common case: someone else is leader packed := &packedVote{ recipientID: recipientID, - vote: vote, + vote: &messages.ClusterBlockVote{ + BlockID: vote.BlockID, + View: vote.View, + SigData: vote.SigData, + }, } if ok := h.ownOutboundVotes.Push(packed); ok { h.ownOutboundMessageNotifier.Notify() @@ -393,7 +400,7 @@ func (h *MessageHub) OnOwnTimeout(timeout *model.TimeoutObject) { // OnOwnProposal directly forwards proposal to HotStuff core logic(skipping compliance engine as we assume our // own proposals to be correct) and queues proposal for subsequent propagation to all consensus participants (including this node). // The proposal will only be placed in the queue, after the specified delay (or dropped on shutdown signal). -func (h *MessageHub) OnOwnProposal(proposal *flow.Header, targetPublicationTime time.Time) { +func (h *MessageHub) OnOwnProposal(proposal *flow.ProposalHeader, targetPublicationTime time.Time) { go func() { select { case <-time.After(time.Until(targetPublicationTime)): @@ -401,7 +408,7 @@ func (h *MessageHub) OnOwnProposal(proposal *flow.Header, targetPublicationTime return } - hotstuffProposal := model.ProposalFromFlow(proposal) + hotstuffProposal := model.SignedProposalFromFlow(proposal) // notify vote aggregator that new block proposal is available, in case we are next leader h.voteAggregator.AddBlock(hotstuffProposal) // non-blocking @@ -420,25 +427,41 @@ func (h *MessageHub) OnOwnProposal(proposal *flow.Header, targetPublicationTime // Process handles incoming messages from consensus channel. After matching message by type, sends it to the correct // component for handling. // No errors are expected during normal operations. +// +// TODO(BFT, #7620): This function should not return an error. The networking layer's responsibility is fulfilled +// once it delivers a message to an engine. It does not possess the context required to handle +// errors that may arise during an engine's processing of the message, as error handling for +// message processing falls outside the domain of the networking layer. +// +// Some of the current error returns signal Byzantine behavior, such as forged or malformed +// messages. These cases must be logged and routed to a dedicated violation reporting consumer. func (h *MessageHub) Process(channel channels.Channel, originID flow.Identifier, message interface{}) error { switch msg := message.(type) { - case *messages.ClusterBlockProposal: - h.compliance.OnClusterBlockProposal(flow.Slashable[*messages.ClusterBlockProposal]{ + case *cluster.Proposal: + h.compliance.OnClusterBlockProposal(flow.Slashable[*cluster.Proposal]{ OriginID: originID, Message: msg, }) - case *messages.ClusterBlockVote: - h.forwardToOwnVoteAggregator(msg, originID) - case *messages.ClusterTimeoutObject: - t := &model.TimeoutObject{ - View: msg.View, - NewestQC: msg.NewestQC, - LastViewTC: msg.LastViewTC, - SignerID: originID, - SigData: msg.SigData, - TimeoutTick: msg.TimeoutTick, + case *flow.BlockVote: + vote, err := model.NewVote(model.UntrustedVote{ + View: msg.View, + BlockID: msg.BlockID, + SignerID: originID, + SigData: msg.SigData, + }) + if err != nil { + // TODO(BFT, #7620): Replace this log statement with a call to the protocol violation consumer. + h.log.Warn(). + Hex("origin_id", originID[:]). + Hex("block_id", msg.BlockID[:]). + Uint64("view", msg.View). + Err(err).Msgf("received invalid cluster vote message") + return nil } - h.forwardToOwnTimeoutAggregator(t) + + h.forwardToOwnVoteAggregator(vote) + case *model.TimeoutObject: + h.forwardToOwnTimeoutAggregator(msg) default: h.log.Warn(). Bool(logging.KeySuspicious, true). @@ -452,31 +475,25 @@ func (h *MessageHub) Process(channel channels.Channel, originID flow.Identifier, // forwardToOwnVoteAggregator converts vote to generic `model.Vote`, logs vote and forwards it to own `voteAggregator`. // Per API convention, timeoutAggregator` is non-blocking, hence, this call returns quickly. -func (h *MessageHub) forwardToOwnVoteAggregator(vote *messages.ClusterBlockVote, originID flow.Identifier) { +func (h *MessageHub) forwardToOwnVoteAggregator(vote *model.Vote) { h.engineMetrics.MessageReceived(metrics.EngineCollectionMessageHub, metrics.MessageBlockVote) - v := &model.Vote{ - View: vote.View, - BlockID: vote.BlockID, - SignerID: originID, - SigData: vote.SigData, - } - h.log.Info(). - Uint64("block_view", v.View). - Hex("block_id", v.BlockID[:]). - Hex("voter", v.SignerID[:]). - Str("vote_id", v.ID().String()). + h.log.Debug(). + Uint64("block_view", vote.View). + Hex("block_id", vote.BlockID[:]). + Hex("voter", vote.SignerID[:]). + Str("vote_id", vote.ID().String()). Msg("block vote received, forwarding block vote to hotstuff vote aggregator") - h.voteAggregator.AddVote(v) + h.voteAggregator.AddVote(vote) } // forwardToOwnTimeoutAggregator logs timeout and forwards it to own `timeoutAggregator`. // Per API convention, timeoutAggregator` is non-blocking, hence, this call returns quickly. func (h *MessageHub) forwardToOwnTimeoutAggregator(t *model.TimeoutObject) { h.engineMetrics.MessageReceived(metrics.EngineCollectionMessageHub, metrics.MessageTimeoutObject) - h.log.Info(). - Hex("origin_id", t.SignerID[:]). + h.log.Debug(). + Hex("signer_id", t.SignerID[:]). Uint64("view", t.View). - Str("timeout_id", t.ID().String()). + Uint64("newest_qc_view", t.NewestQC.View). Msg("timeout received, forwarding timeout to hotstuff timeout aggregator") h.timeoutAggregator.AddTimeout(t) } diff --git a/engine/collection/message_hub/message_hub_test.go b/engine/collection/message_hub/message_hub_test.go index 9d574082475..cb67a12d918 100644 --- a/engine/collection/message_hub/message_hub_test.go +++ b/engine/collection/message_hub/message_hub_test.go @@ -2,7 +2,6 @@ package message_hub import ( "context" - "math/rand" "sync" "testing" "time" @@ -25,7 +24,7 @@ import ( "github.com/onflow/flow-go/module/util" netint "github.com/onflow/flow-go/network" "github.com/onflow/flow-go/network/channels" - "github.com/onflow/flow-go/network/mocknetwork" + mocknetwork "github.com/onflow/flow-go/network/mock" clusterint "github.com/onflow/flow-go/state/cluster" clusterstate "github.com/onflow/flow-go/state/cluster/mock" protocol "github.com/onflow/flow-go/state/protocol/mock" @@ -53,7 +52,7 @@ type MessageHubSuite struct { me *module.Local state *clusterstate.MutableState protoState *protocol.State - net *mocknetwork.Network + net *mocknetwork.EngineRegistry con *mocknetwork.Conduit hotstuff *module.HotStuff voteAggregator *hotstuff.VoteAggregator @@ -68,23 +67,19 @@ type MessageHubSuite struct { } func (s *MessageHubSuite) SetupTest() { - // seed the RNG - rand.Seed(time.Now().UnixNano()) - // initialize the paramaters s.cluster = unittest.IdentityListFixture(3, unittest.WithRole(flow.RoleCollection), - unittest.WithWeight(1000), + unittest.WithInitialWeight(1000), ) s.myID = s.cluster[0].NodeID s.clusterID = "cluster-id" - block := unittest.ClusterBlockFixture() - s.head = &block + s.head = unittest.ClusterBlockFixture() s.payloads = storage.NewClusterPayloads(s.T()) s.me = module.NewLocal(s.T()) s.protoState = protocol.NewState(s.T()) - s.net = mocknetwork.NewNetwork(s.T()) + s.net = mocknetwork.NewEngineRegistry(s.T()) s.con = mocknetwork.NewConduit(s.T()) s.hotstuff = module.NewHotStuff(s.T()) s.voteAggregator = hotstuff.NewVoteAggregator(s.T()) @@ -92,17 +87,17 @@ func (s *MessageHubSuite) SetupTest() { s.compliance = mockcollection.NewCompliance(s.T()) // set up proto state mock - protoEpoch := &protocol.Epoch{} - clusters := flow.ClusterList{s.cluster} + protoEpoch := &protocol.CommittedEpoch{} + clusters := flow.ClusterList{s.cluster.ToSkeleton()} protoEpoch.On("Clustering").Return(clusters, nil) protoQuery := &protocol.EpochQuery{} - protoQuery.On("Current").Return(protoEpoch) + protoQuery.On("Current").Return(protoEpoch, nil) protoSnapshot := &protocol.Snapshot{} protoSnapshot.On("Epochs").Return(protoQuery) protoSnapshot.On("Identities", mock.Anything).Return( - func(selector flow.IdentityFilter) flow.IdentityList { + func(selector flow.IdentityFilter[flow.Identity]) flow.IdentityList { return s.cluster.Filter(selector) }, nil, @@ -145,7 +140,7 @@ func (s *MessageHubSuite) SetupTest() { s.snapshot = &clusterstate.Snapshot{} s.snapshot.On("Head").Return( func() *flow.Header { - return s.head.Header + return s.head.ToHeader() }, nil, ) @@ -184,26 +179,24 @@ func (s *MessageHubSuite) TearDownTest() { } } -// TestProcessIncomingMessages tests processing of incoming messages, MessageHub matches messages by type +// TestProcessValidIncomingMessages tests processing of structurally valid incoming messages, MessageHub matches messages by type // and sends them to other modules which execute business logic. -func (s *MessageHubSuite) TestProcessIncomingMessages() { +func (s *MessageHubSuite) TestProcessValidIncomingMessages() { var channel channels.Channel originID := unittest.IdentifierFixture() s.Run("to-compliance-engine", func() { - block := unittest.ClusterBlockFixture() - - blockProposalMsg := messages.NewClusterBlockProposal(&block) - expectedComplianceMsg := flow.Slashable[*messages.ClusterBlockProposal]{ + proposal := unittest.ClusterProposalFixture() + expectedComplianceMsg := flow.Slashable[*cluster.Proposal]{ OriginID: originID, - Message: blockProposalMsg, + Message: proposal, } s.compliance.On("OnClusterBlockProposal", expectedComplianceMsg).Return(nil).Once() - err := s.hub.Process(channel, originID, blockProposalMsg) + err := s.hub.Process(channel, originID, proposal) require.NoError(s.T(), err) }) s.Run("to-vote-aggregator", func() { expectedVote := unittest.VoteFixture(unittest.WithVoteSignerID(originID)) - msg := &messages.ClusterBlockVote{ + msg := &flow.BlockVote{ View: expectedVote.View, BlockID: expectedVote.BlockID, SigData: expectedVote.SigData, @@ -214,14 +207,8 @@ func (s *MessageHubSuite) TestProcessIncomingMessages() { }) s.Run("to-timeout-aggregator", func() { expectedTimeout := helper.TimeoutObjectFixture(helper.WithTimeoutObjectSignerID(originID)) - msg := &messages.ClusterTimeoutObject{ - View: expectedTimeout.View, - NewestQC: expectedTimeout.NewestQC, - LastViewTC: expectedTimeout.LastViewTC, - SigData: expectedTimeout.SigData, - } s.timeoutAggregator.On("AddTimeout", expectedTimeout) - err := s.hub.Process(channel, originID, msg) + err := s.hub.Process(channel, originID, expectedTimeout) require.NoError(s.T(), err) }) s.Run("unsupported-msg-type", func() { @@ -230,54 +217,81 @@ func (s *MessageHubSuite) TestProcessIncomingMessages() { }) } +// TestProcessInvalidIncomingMessages tests processing of structurally invalid incoming messages, MessageHub matches messages by type +// and sends them to other modules which execute business logic. +func (s *MessageHubSuite) TestProcessInvalidIncomingMessages() { + var channel channels.Channel + originID := unittest.IdentifierFixture() + s.Run("to-vote-aggregator", func() { + expectedVote := unittest.VoteFixture(unittest.WithVoteSignerID(originID)) + msg := &messages.ClusterBlockVote{ + View: expectedVote.View, + BlockID: flow.ZeroID, // invalid value + SigData: expectedVote.SigData, + } + + err := s.hub.Process(channel, originID, msg) + require.NoError(s.T(), err) + + // AddVote should NOT be called for invalid Vote + s.voteAggregator.AssertNotCalled(s.T(), "AddVote", mock.Anything) + }) +} + // TestOnOwnProposal tests broadcasting proposals with different inputs func (s *MessageHubSuite) TestOnOwnProposal() { // add execution node to cluster to make sure we exclude them from broadcast s.cluster = append(s.cluster, unittest.IdentityFixture(unittest.WithRole(flow.RoleExecution))) // generate a parent with height and chain ID set - parent := unittest.ClusterBlockFixture() - parent.Header.ChainID = "test" - parent.Header.Height = 10 + parent := unittest.ClusterBlockFixture( + unittest.ClusterBlock.WithHeight(10), + unittest.ClusterBlock.WithChainID("test"), + ) // create a block with the parent and store the payload with correct ID - block := unittest.ClusterBlockWithParent(&parent) - block.Header.ProposerID = s.myID - - s.payloads.On("ByBlockID", block.Header.ID()).Return(block.Payload, nil) + block := unittest.ClusterBlockFixture( + unittest.ClusterBlock.WithParent(parent), + unittest.ClusterBlock.WithProposerID(s.myID), + ) + s.payloads.On("ByBlockID", block.ID()).Return(&block.Payload, nil) s.payloads.On("ByBlockID", mock.Anything).Return(nil, storerr.ErrNotFound) s.Run("should fail with wrong proposer", func() { - header := *block.Header + header := block.ToHeader() header.ProposerID = unittest.IdentifierFixture() - err := s.hub.sendOwnProposal(&header) + err := s.hub.sendOwnProposal(unittest.ProposalHeaderFromHeader(header)) require.Error(s.T(), err, "should fail with wrong proposer") header.ProposerID = s.myID }) // should fail since we can't query payload s.Run("should fail with changed/missing parent", func() { - header := *block.Header + header := *block.ToHeader() header.ParentID[0]++ - err := s.hub.sendOwnProposal(&header) + err := s.hub.sendOwnProposal(unittest.ProposalHeaderFromHeader(&header)) require.Error(s.T(), err, "should fail with missing parent") header.ParentID[0]-- }) // should fail with wrong block ID (payload unavailable) s.Run("should fail with wrong block ID", func() { - header := *block.Header + header := *block.ToHeader() header.View++ - err := s.hub.sendOwnProposal(&header) + err := s.hub.sendOwnProposal(unittest.ProposalHeaderFromHeader(&header)) require.Error(s.T(), err, "should fail with missing payload") header.View-- }) s.Run("should broadcast proposal and pass to HotStuff for valid proposals", func() { - expectedBroadcastMsg := messages.NewClusterBlockProposal(&block) + expectedBroadcastMsg := &messages.ClusterProposal{ + Block: *block, + ProposerSigData: unittest.SignatureFixture(), + } submitted := make(chan struct{}) // closed when proposal is submitted to hotstuff - hotstuffProposal := model.ProposalFromFlow(block.Header) + headerProposal := &flow.ProposalHeader{Header: block.ToHeader(), ProposerSigData: expectedBroadcastMsg.ProposerSigData} + hotstuffProposal := model.SignedProposalFromFlow(headerProposal) s.voteAggregator.On("AddBlock", hotstuffProposal).Once() s.hotstuff.On("SubmitProposal", hotstuffProposal). Run(func(args mock.Arguments) { close(submitted) }). @@ -290,7 +304,7 @@ func (s *MessageHubSuite) TestOnOwnProposal() { Once() // submit to broadcast proposal - s.hub.OnOwnProposal(block.Header, time.Now()) + s.hub.OnOwnProposal(headerProposal, time.Now()) unittest.AssertClosesBefore(s.T(), util.AllClosed(broadcast, submitted), time.Second) }) @@ -311,18 +325,13 @@ func (s *MessageHubSuite) TestProcessMultipleMessagesHappyPath() { }).Return(nil) // submit vote - s.hub.OnOwnVote(vote.BlockID, vote.View, vote.SigData, recipientID) + s.hub.OnOwnVote(vote, recipientID) }) s.Run("timeout", func() { wg.Add(1) // prepare timeout fixture timeout := helper.TimeoutObjectFixture() - expectedBroadcastMsg := &messages.ClusterTimeoutObject{ - View: timeout.View, - NewestQC: timeout.NewestQC, - LastViewTC: timeout.LastViewTC, - SigData: timeout.SigData, - } + expectedBroadcastMsg := (*messages.ClusterTimeoutObject)(timeout) s.con.On("Publish", expectedBroadcastMsg, s.cluster[1].NodeID, s.cluster[2].NodeID). Run(func(_ mock.Arguments) { wg.Done() }). Return(nil) @@ -333,21 +342,27 @@ func (s *MessageHubSuite) TestProcessMultipleMessagesHappyPath() { s.Run("proposal", func() { wg.Add(1) // prepare proposal fixture - proposal := unittest.ClusterBlockWithParent(s.head) - proposal.Header.ProposerID = s.myID - s.payloads.On("ByBlockID", proposal.Header.ID()).Return(proposal.Payload, nil) + block := unittest.ClusterBlockFixture( + unittest.ClusterBlock.WithParent(s.head), + unittest.ClusterBlock.WithProposerID(s.myID), + ) + s.payloads.On("ByBlockID", block.ID()).Return(&block.Payload, nil) + proposal := unittest.ProposalHeaderFromHeader(block.ToHeader()) // unset chain and height to make sure they are correctly reconstructed - hotstuffProposal := model.ProposalFromFlow(proposal.Header) + hotstuffProposal := model.SignedProposalFromFlow(proposal) s.voteAggregator.On("AddBlock", hotstuffProposal) s.hotstuff.On("SubmitProposal", hotstuffProposal) - expectedBroadcastMsg := messages.NewClusterBlockProposal(&proposal) + expectedBroadcastMsg := &messages.ClusterProposal{ + Block: *block, + ProposerSigData: proposal.ProposerSigData, + } s.con.On("Publish", expectedBroadcastMsg, s.cluster[1].NodeID, s.cluster[2].NodeID). Run(func(_ mock.Arguments) { wg.Done() }). Return(nil) // submit proposal - s.hub.OnOwnProposal(proposal.Header, time.Now()) + s.hub.OnOwnProposal(proposal, time.Now()) }) unittest.RequireReturnsBefore(s.T(), func() { diff --git a/engine/collection/mock/cluster_events.go b/engine/collection/mock/cluster_events.go new file mode 100644 index 00000000000..07c02b298ca --- /dev/null +++ b/engine/collection/mock/cluster_events.go @@ -0,0 +1,32 @@ +// Code generated by mockery. DO NOT EDIT. + +package mock + +import ( + flow "github.com/onflow/flow-go/model/flow" + mock "github.com/stretchr/testify/mock" +) + +// ClusterEvents is an autogenerated mock type for the ClusterEvents type +type ClusterEvents struct { + mock.Mock +} + +// ActiveClustersChanged provides a mock function with given fields: _a0 +func (_m *ClusterEvents) ActiveClustersChanged(_a0 flow.ChainIDList) { + _m.Called(_a0) +} + +// NewClusterEvents creates a new instance of ClusterEvents. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewClusterEvents(t interface { + mock.TestingT + Cleanup(func()) +}) *ClusterEvents { + mock := &ClusterEvents{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/engine/collection/mock/compliance.go b/engine/collection/mock/compliance.go index f505d128518..95bf491c08a 100644 --- a/engine/collection/mock/compliance.go +++ b/engine/collection/mock/compliance.go @@ -1,12 +1,13 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mock import ( + cluster "github.com/onflow/flow-go/model/cluster" + flow "github.com/onflow/flow-go/model/flow" - irrecoverable "github.com/onflow/flow-go/module/irrecoverable" - messages "github.com/onflow/flow-go/model/messages" + irrecoverable "github.com/onflow/flow-go/module/irrecoverable" mock "github.com/stretchr/testify/mock" ) @@ -16,10 +17,14 @@ type Compliance struct { mock.Mock } -// Done provides a mock function with given fields: +// Done provides a mock function with no fields func (_m *Compliance) Done() <-chan struct{} { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Done") + } + var r0 <-chan struct{} if rf, ok := ret.Get(0).(func() <-chan struct{}); ok { r0 = rf() @@ -33,19 +38,23 @@ func (_m *Compliance) Done() <-chan struct{} { } // OnClusterBlockProposal provides a mock function with given fields: proposal -func (_m *Compliance) OnClusterBlockProposal(proposal flow.Slashable[*messages.ClusterBlockProposal]) { +func (_m *Compliance) OnClusterBlockProposal(proposal flow.Slashable[*cluster.Proposal]) { _m.Called(proposal) } // OnSyncedClusterBlock provides a mock function with given fields: block -func (_m *Compliance) OnSyncedClusterBlock(block flow.Slashable[*messages.ClusterBlockProposal]) { +func (_m *Compliance) OnSyncedClusterBlock(block flow.Slashable[*cluster.Proposal]) { _m.Called(block) } -// Ready provides a mock function with given fields: +// Ready provides a mock function with no fields func (_m *Compliance) Ready() <-chan struct{} { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Ready") + } + var r0 <-chan struct{} if rf, ok := ret.Get(0).(func() <-chan struct{}); ok { r0 = rf() @@ -63,13 +72,12 @@ func (_m *Compliance) Start(_a0 irrecoverable.SignalerContext) { _m.Called(_a0) } -type mockConstructorTestingTNewCompliance interface { +// NewCompliance creates a new instance of Compliance. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewCompliance(t interface { mock.TestingT Cleanup(func()) -} - -// NewCompliance creates a new instance of Compliance. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewCompliance(t mockConstructorTestingTNewCompliance) *Compliance { +}) *Compliance { mock := &Compliance{} mock.Mock.Test(t) diff --git a/engine/collection/mock/engine_events.go b/engine/collection/mock/engine_events.go new file mode 100644 index 00000000000..f34b9d5f085 --- /dev/null +++ b/engine/collection/mock/engine_events.go @@ -0,0 +1,32 @@ +// Code generated by mockery. DO NOT EDIT. + +package mock + +import ( + flow "github.com/onflow/flow-go/model/flow" + mock "github.com/stretchr/testify/mock" +) + +// EngineEvents is an autogenerated mock type for the EngineEvents type +type EngineEvents struct { + mock.Mock +} + +// ActiveClustersChanged provides a mock function with given fields: _a0 +func (_m *EngineEvents) ActiveClustersChanged(_a0 flow.ChainIDList) { + _m.Called(_a0) +} + +// NewEngineEvents creates a new instance of EngineEvents. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewEngineEvents(t interface { + mock.TestingT + Cleanup(func()) +}) *EngineEvents { + mock := &EngineEvents{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/engine/collection/mock/guaranteed_collection_publisher.go b/engine/collection/mock/guaranteed_collection_publisher.go new file mode 100644 index 00000000000..b2754e5f4ef --- /dev/null +++ b/engine/collection/mock/guaranteed_collection_publisher.go @@ -0,0 +1,32 @@ +// Code generated by mockery. DO NOT EDIT. + +package mock + +import ( + messages "github.com/onflow/flow-go/model/messages" + mock "github.com/stretchr/testify/mock" +) + +// GuaranteedCollectionPublisher is an autogenerated mock type for the GuaranteedCollectionPublisher type +type GuaranteedCollectionPublisher struct { + mock.Mock +} + +// SubmitCollectionGuarantee provides a mock function with given fields: guarantee +func (_m *GuaranteedCollectionPublisher) SubmitCollectionGuarantee(guarantee *messages.CollectionGuarantee) { + _m.Called(guarantee) +} + +// NewGuaranteedCollectionPublisher creates a new instance of GuaranteedCollectionPublisher. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewGuaranteedCollectionPublisher(t interface { + mock.TestingT + Cleanup(func()) +}) *GuaranteedCollectionPublisher { + mock := &GuaranteedCollectionPublisher{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/engine/collection/pusher/engine.go b/engine/collection/pusher/engine.go index 226b866bf5e..a186a2200e4 100644 --- a/engine/collection/pusher/engine.go +++ b/engine/collection/pusher/engine.go @@ -4,47 +4,73 @@ package pusher import ( + "context" "fmt" "github.com/rs/zerolog" "github.com/onflow/flow-go/engine" + "github.com/onflow/flow-go/engine/common/fifoqueue" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/model/flow/filter" "github.com/onflow/flow-go/model/messages" "github.com/onflow/flow-go/module" + "github.com/onflow/flow-go/module/component" + "github.com/onflow/flow-go/module/irrecoverable" "github.com/onflow/flow-go/module/metrics" "github.com/onflow/flow-go/network" "github.com/onflow/flow-go/network/channels" "github.com/onflow/flow-go/state/protocol" - "github.com/onflow/flow-go/storage" "github.com/onflow/flow-go/utils/logging" ) -// Engine is the collection pusher engine, which provides access to resources -// held by the collection node. +// Engine is part of the Collection Node. It broadcasts finalized collections +// ("collection guarantees") that the cluster generates to Consensus Nodes +// for inclusion in blocks. type Engine struct { - unit *engine.Unit - log zerolog.Logger - engMetrics module.EngineMetrics - colMetrics module.CollectionMetrics - conduit network.Conduit - me module.Local - state protocol.State - collections storage.Collections - transactions storage.Transactions + log zerolog.Logger + engMetrics module.EngineMetrics + conduit network.Conduit + me module.Local + state protocol.State + + notifier engine.Notifier + queue *fifoqueue.FifoQueue + + component.Component + cm *component.ComponentManager } -func New(log zerolog.Logger, net network.Network, state protocol.State, engMetrics module.EngineMetrics, colMetrics module.CollectionMetrics, me module.Local, collections storage.Collections, transactions storage.Transactions) (*Engine, error) { +var _ network.MessageProcessor = (*Engine)(nil) +var _ component.Component = (*Engine)(nil) + +// New creates a new pusher engine. +func New( + log zerolog.Logger, + net network.EngineRegistry, + state protocol.State, + engMetrics module.EngineMetrics, + mempoolMetrics module.MempoolMetrics, + me module.Local, +) (*Engine, error) { + queue, err := fifoqueue.NewFifoQueue( + 200, // roughly 1 minute of collections, at 3BPS + fifoqueue.WithLengthObserver(func(len int) { + mempoolMetrics.MempoolEntries(metrics.ResourceSubmitCollectionGuaranteesQueue, uint(len)) + }), + ) + if err != nil { + return nil, fmt.Errorf("could not create fifoqueue: %w", err) + } + e := &Engine{ - unit: engine.NewUnit(), - log: log.With().Str("engine", "pusher").Logger(), - engMetrics: engMetrics, - colMetrics: colMetrics, - me: me, - state: state, - collections: collections, - transactions: transactions, + log: log.With().Str("engine", "pusher").Logger(), + engMetrics: engMetrics, + me: me, + state: state, + + notifier: engine.NewNotifier(), + queue: queue, } conduit, err := net.Register(channels.PushGuarantees, e) @@ -53,88 +79,105 @@ func New(log zerolog.Logger, net network.Network, state protocol.State, engMetri } e.conduit = conduit + e.cm = component.NewComponentManagerBuilder(). + AddWorker(e.outboundQueueWorker). + Build() + e.Component = e.cm + return e, nil } -// Ready returns a ready channel that is closed once the engine has fully -// started. -func (e *Engine) Ready() <-chan struct{} { - return e.unit.Ready() +// outboundQueueWorker implements a component worker which broadcasts collection guarantees, +// enqueued by the Finalizer upon finalization, to Consensus Nodes. +func (e *Engine) outboundQueueWorker(ctx irrecoverable.SignalerContext, ready component.ReadyFunc) { + ready() + + done := ctx.Done() + wake := e.notifier.Channel() + for { + select { + case <-done: + return + case <-wake: + err := e.processOutboundMessages(ctx) + if err != nil { + ctx.Throw(err) + } + } + } } -// Done returns a done channel that is closed once the engine has fully stopped. -func (e *Engine) Done() <-chan struct{} { - return e.unit.Done() -} +// processOutboundMessages processes any available messages from the queue. +// Only returns when the queue is empty (or the engine is terminated). +// No errors expected during normal operations. +func (e *Engine) processOutboundMessages(ctx context.Context) error { + for { + item, ok := e.queue.Pop() + if !ok { + return nil + } -// SubmitLocal submits an event originating on the local node. -func (e *Engine) SubmitLocal(event interface{}) { - e.unit.Launch(func() { - err := e.process(e.me.NodeID(), event) - if err != nil { - engine.LogError(e.log, err) + guarantee, ok := item.(*messages.CollectionGuarantee) + if !ok { + return fmt.Errorf("invalid type in pusher engine queue") } - }) -} -// Submit submits the given event from the node with the given origin ID -// for processing in a non-blocking manner. It returns instantly and logs -// a potential processing error internally when done. -func (e *Engine) Submit(channel channels.Channel, originID flow.Identifier, event interface{}) { - e.unit.Launch(func() { - err := e.process(originID, event) + err := e.publishCollectionGuarantee(guarantee) if err != nil { - engine.LogError(e.log, err) + return err } - }) -} - -// ProcessLocal processes an event originating on the local node. -func (e *Engine) ProcessLocal(event interface{}) error { - return e.unit.Do(func() error { - return e.process(e.me.NodeID(), event) - }) -} -// Process processes the given event from the node with the given origin ID in -// a blocking manner. It returns the potential processing error when done. -func (e *Engine) Process(channel channels.Channel, originID flow.Identifier, event interface{}) error { - return e.unit.Do(func() error { - return e.process(originID, event) - }) + select { + case <-ctx.Done(): + return nil + default: + } + } } -// process processes events for the pusher engine on the collection node. -func (e *Engine) process(originID flow.Identifier, event interface{}) error { - switch ev := event.(type) { - case *messages.SubmitCollectionGuarantee: - e.engMetrics.MessageReceived(metrics.EngineCollectionProvider, metrics.MessageSubmitGuarantee) - defer e.engMetrics.MessageHandled(metrics.EngineCollectionProvider, metrics.MessageSubmitGuarantee) - return e.onSubmitCollectionGuarantee(originID, ev) - default: - return fmt.Errorf("invalid event type (%T)", event) - } +// Process is called by the networking layer, when peers broadcast messages with this node +// as one of the recipients. The protocol specifies that Collector nodes broadcast Collection +// Guarantees to Consensus Nodes and _only_ those. When the pusher engine (running only on +// Collectors) receives a message, this message is evidence of byzantine behavior. +// Byzantine inputs are internally handled by the pusher.Engine and do *not* result in +// error returns. No errors expected during normal operation (including byzantine inputs). +func (e *Engine) Process(channel channels.Channel, originID flow.Identifier, message any) error { + // Targeting a collector node's pusher.Engine with messages could be considered as a slashable offense. + // Though, for generating cryptographic evidence, we need Message Forensics - see reference [1]. + // Much further into the future, when we are implementing slashing challenges, we'll probably implement a + // dedicated consumer to post-process evidence of protocol violations into slashing challenges. For now, + // we just log this with the `KeySuspicious` to alert the node operator. + // [1] Message Forensics FLIP https://github.com/onflow/flips/pull/195) + errs := fmt.Errorf("collector node's pusher.Engine was targeted by message %T on channel %v", message, channel) + e.log.Warn(). + Err(errs). + Bool(logging.KeySuspicious, true). + Str("peer_id", originID.String()). + Msg("potentially byzantine networking traffic detected") + return nil } -// onSubmitCollectionGuarantee handles submitting the given collection guarantee -// to consensus nodes. -func (e *Engine) onSubmitCollectionGuarantee(originID flow.Identifier, req *messages.SubmitCollectionGuarantee) error { - if originID != e.me.NodeID() { - return fmt.Errorf("invalid remote request to submit collection guarantee (from=%x)", originID) +// SubmitCollectionGuarantee adds a collection guarantee to the engine's queue +// to later be published to consensus nodes. +func (e *Engine) SubmitCollectionGuarantee(guarantee *messages.CollectionGuarantee) { + if e.queue.Push(guarantee) { + e.notifier.Notify() + } else { + e.engMetrics.OutboundMessageDropped(metrics.EngineCollectionProvider, metrics.MessageCollectionGuarantee) } - - return e.SubmitCollectionGuarantee(&req.Guarantee) } -// SubmitCollectionGuarantee submits the collection guarantee to all consensus nodes. -func (e *Engine) SubmitCollectionGuarantee(guarantee *flow.CollectionGuarantee) error { - consensusNodes, err := e.state.Final().Identities(filter.HasRole(flow.RoleConsensus)) +// publishCollectionGuarantee publishes the collection guarantee to all consensus nodes. +// No errors expected during normal operation. +func (e *Engine) publishCollectionGuarantee(guarantee *messages.CollectionGuarantee) error { + consensusNodes, err := e.state.Final().Identities(filter.HasRole[flow.Identity](flow.RoleConsensus)) if err != nil { - return fmt.Errorf("could not get consensus nodes: %w", err) + return fmt.Errorf("could not get consensus nodes' identities: %w", err) } - // NOTE: Consensus nodes do not broadcast guarantees among themselves, so it needs that - // at least one collection node make a publish to all of them. + // NOTE: Consensus nodes do not broadcast guarantees among themselves. So for the collection to be included, + // at least one collector has to successfully broadcast the collection to consensus nodes. Otherwise, the + // collection is lost, which is acceptable as long as we only lose a small fraction of collections. err = e.conduit.Publish(guarantee, consensusNodes.NodeIDs()...) if err != nil { return fmt.Errorf("could not submit collection guarantee: %w", err) @@ -143,7 +186,7 @@ func (e *Engine) SubmitCollectionGuarantee(guarantee *flow.CollectionGuarantee) e.engMetrics.MessageSent(metrics.EngineCollectionProvider, metrics.MessageCollectionGuarantee) e.log.Debug(). - Hex("guarantee_id", logging.ID(guarantee.ID())). + Hex("collection_id", logging.ID(guarantee.CollectionID)). Hex("ref_block_id", logging.ID(guarantee.ReferenceBlockID)). Msg("submitting collection guarantee") diff --git a/engine/collection/pusher/engine_test.go b/engine/collection/pusher/engine_test.go index fec34346ad9..0670847b010 100644 --- a/engine/collection/pusher/engine_test.go +++ b/engine/collection/pusher/engine_test.go @@ -1,8 +1,10 @@ package pusher_test import ( + "context" "io" "testing" + "time" "github.com/rs/zerolog" "github.com/stretchr/testify/mock" @@ -12,10 +14,11 @@ import ( "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/model/flow/filter" "github.com/onflow/flow-go/model/messages" + "github.com/onflow/flow-go/module/irrecoverable" "github.com/onflow/flow-go/module/metrics" module "github.com/onflow/flow-go/module/mock" "github.com/onflow/flow-go/network/channels" - "github.com/onflow/flow-go/network/mocknetwork" + mocknetwork "github.com/onflow/flow-go/network/mock" protocol "github.com/onflow/flow-go/state/protocol/mock" storage "github.com/onflow/flow-go/storage/mock" "github.com/onflow/flow-go/utils/unittest" @@ -40,20 +43,20 @@ func (suite *Suite) SetupTest() { // add some dummy identities so we have one of each role suite.identities = unittest.IdentityListFixture(5, unittest.WithAllRoles()) - me := suite.identities.Filter(filter.HasRole(flow.RoleCollection))[0] + me := suite.identities.Filter(filter.HasRole[flow.Identity](flow.RoleCollection))[0] suite.state = new(protocol.State) suite.snapshot = new(protocol.Snapshot) - suite.snapshot.On("Identities", mock.Anything).Return(func(filter flow.IdentityFilter) flow.IdentityList { + suite.snapshot.On("Identities", mock.Anything).Return(func(filter flow.IdentityFilter[flow.Identity]) flow.IdentityList { return suite.identities.Filter(filter) - }, func(filter flow.IdentityFilter) error { + }, func(filter flow.IdentityFilter[flow.Identity]) error { return nil }) suite.state.On("Final").Return(suite.snapshot) metrics := metrics.NewNoopCollector() - net := new(mocknetwork.Network) + net := new(mocknetwork.EngineRegistry) suite.conduit = new(mocknetwork.Conduit) net.On("Register", mock.Anything, mock.Anything).Return(suite.conduit, nil) @@ -70,8 +73,6 @@ func (suite *Suite) SetupTest() { metrics, metrics, suite.me, - suite.collections, - suite.transactions, ) suite.Require().Nil(err) } @@ -82,18 +83,21 @@ func TestPusherEngine(t *testing.T) { // should be able to submit collection guarantees to consensus nodes func (suite *Suite) TestSubmitCollectionGuarantee() { + ctx, cancel := irrecoverable.NewMockSignalerContextWithCancel(suite.T(), context.Background()) + suite.engine.Start(ctx) + defer cancel() + done := make(chan struct{}) - guarantee := unittest.CollectionGuaranteeFixture() + guarantee := (*messages.CollectionGuarantee)(unittest.CollectionGuaranteeFixture()) // should submit the collection to consensus nodes - consensus := suite.identities.Filter(filter.HasRole(flow.RoleConsensus)) - suite.conduit.On("Publish", guarantee, consensus[0].NodeID).Return(nil) + consensus := suite.identities.Filter(filter.HasRole[flow.Identity](flow.RoleConsensus)) + suite.conduit.On("Publish", guarantee, consensus[0].NodeID). + Run(func(_ mock.Arguments) { close(done) }).Return(nil).Once() - msg := &messages.SubmitCollectionGuarantee{ - Guarantee: *guarantee, - } - err := suite.engine.ProcessLocal(msg) - suite.Require().Nil(err) + suite.engine.SubmitCollectionGuarantee(guarantee) + + unittest.RequireCloseBefore(suite.T(), done, time.Second, "message not sent") suite.conduit.AssertExpectations(suite.T()) } @@ -101,16 +105,15 @@ func (suite *Suite) TestSubmitCollectionGuarantee() { // should be able to submit collection guarantees to consensus nodes func (suite *Suite) TestSubmitCollectionGuaranteeNonLocal() { - guarantee := unittest.CollectionGuaranteeFixture() - - // send from a non-allowed role - sender := suite.identities.Filter(filter.HasRole(flow.RoleVerification))[0] + guarantee := (*messages.CollectionGuarantee)(unittest.CollectionGuaranteeFixture()) - msg := &messages.SubmitCollectionGuarantee{ - Guarantee: *guarantee, - } - err := suite.engine.Process(channels.PushGuarantees, sender.NodeID, msg) - suite.Require().Error(err) + // verify that pusher.Engine handles any (potentially byzantine) input: + // A byzantine peer could target the collector node's pusher engine with messages + // The pusher should discard those and explicitly not get tricked into broadcasting + // collection guarantees which a byzantine peer might try to inject into the system. + sender := suite.identities.Filter(filter.HasRole[flow.Identity](flow.RoleVerification))[0] + err := suite.engine.Process(channels.PushGuarantees, sender.NodeID, guarantee) + suite.Require().NoError(err) suite.conduit.AssertNumberOfCalls(suite.T(), "Multicast", 0) } diff --git a/engine/collection/rpc/engine.go b/engine/collection/rpc/engine.go index b6bb72673a5..c1b1f76e9d5 100644 --- a/engine/collection/rpc/engine.go +++ b/engine/collection/rpc/engine.go @@ -12,12 +12,16 @@ import ( "github.com/rs/zerolog" "google.golang.org/grpc" "google.golang.org/grpc/codes" + _ "google.golang.org/grpc/encoding/gzip" // required for gRPC compression "google.golang.org/grpc/status" + _ "github.com/onflow/flow-go/engine/common/grpc/compressor/deflate" // required for gRPC compression + _ "github.com/onflow/flow-go/engine/common/grpc/compressor/snappy" // required for gRPC compression + "github.com/onflow/flow-go/engine" - "github.com/onflow/flow-go/engine/common/rpc" "github.com/onflow/flow-go/engine/common/rpc/convert" "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module/grpcserver" ) // Backend defines the core functionality required by the RPC API. @@ -29,9 +33,14 @@ type Backend interface { // Config defines the configurable options for the ingress server. type Config struct { - ListenAddr string - MaxMsgSize uint // in bytes - RpcMetricsEnabled bool // enable GRPC metrics + ListenAddr string + MaxRequestMsgSize uint // in bytes + MaxResponseMsgSize uint // in bytes + RpcMetricsEnabled bool // enable GRPC metrics + + // holds value of deprecated MaxMsgSize flag for use during bootstrapping. + // will be removed in a future release. + DeprecatedMaxMsgSize uint // in bytes } // Engine implements a gRPC server with a simplified version of the Observation @@ -55,8 +64,8 @@ func New( ) *Engine { // create a GRPC server to serve GRPC clients grpcOpts := []grpc.ServerOption{ - grpc.MaxRecvMsgSize(int(config.MaxMsgSize)), - grpc.MaxSendMsgSize(int(config.MaxMsgSize)), + grpc.MaxRecvMsgSize(int(config.MaxRequestMsgSize)), + grpc.MaxSendMsgSize(int(config.MaxResponseMsgSize)), } var interceptors []grpc.UnaryServerInterceptor // ordered list of interceptors @@ -67,7 +76,7 @@ func New( if len(apiRatelimits) > 0 { // create a rate limit interceptor - rateLimitInterceptor := rpc.NewRateLimiterInterceptor(log, apiRatelimits, apiBurstLimits).UnaryServerInterceptor + rateLimitInterceptor := grpcserver.NewRateLimiterInterceptor(log, apiRatelimits, apiBurstLimits).UnaryServerInterceptor // append the rate limit interceptor to the list of interceptors interceptors = append(interceptors, rateLimitInterceptor) } diff --git a/engine/collection/rpc/mock/backend.go b/engine/collection/rpc/mock/backend.go index b7f0289db2c..ca92f33b0d1 100644 --- a/engine/collection/rpc/mock/backend.go +++ b/engine/collection/rpc/mock/backend.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mock @@ -16,6 +16,10 @@ type Backend struct { func (_m *Backend) ProcessTransaction(_a0 *flow.TransactionBody) error { ret := _m.Called(_a0) + if len(ret) == 0 { + panic("no return value specified for ProcessTransaction") + } + var r0 error if rf, ok := ret.Get(0).(func(*flow.TransactionBody) error); ok { r0 = rf(_a0) @@ -26,13 +30,12 @@ func (_m *Backend) ProcessTransaction(_a0 *flow.TransactionBody) error { return r0 } -type mockConstructorTestingTNewBackend interface { +// NewBackend creates a new instance of Backend. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewBackend(t interface { mock.TestingT Cleanup(func()) -} - -// NewBackend creates a new instance of Backend. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewBackend(t mockConstructorTestingTNewBackend) *Backend { +}) *Backend { mock := &Backend{} mock.Mock.Test(t) diff --git a/engine/collection/synchronization/engine.go b/engine/collection/synchronization/engine.go index 77ebdbd7792..d87910e2fa7 100644 --- a/engine/collection/synchronization/engine.go +++ b/engine/collection/synchronization/engine.go @@ -1,11 +1,8 @@ -// (c) 2019 Dapper Labs - ALL RIGHTS RESERVED - package synchronization import ( "errors" "fmt" - "math/rand" "time" "github.com/hashicorp/go-multierror" @@ -16,6 +13,7 @@ import ( "github.com/onflow/flow-go/engine/common/fifoqueue" commonsync "github.com/onflow/flow-go/engine/common/synchronization" "github.com/onflow/flow-go/model/chainsync" + clustermodel "github.com/onflow/flow-go/model/cluster" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/model/flow/filter" "github.com/onflow/flow-go/model/messages" @@ -27,6 +25,7 @@ import ( "github.com/onflow/flow-go/network/channels" "github.com/onflow/flow-go/state/cluster" "github.com/onflow/flow-go/storage" + "github.com/onflow/flow-go/utils/rand" ) // defaultSyncResponseQueueCapacity maximum capacity of sync responses queue @@ -42,7 +41,7 @@ type Engine struct { log zerolog.Logger metrics module.EngineMetrics me module.Local - participants flow.IdentityList + participants flow.IdentitySkeletonList con network.Conduit comp collection.Compliance // compliance layer engine @@ -62,9 +61,9 @@ type Engine struct { func New( log zerolog.Logger, metrics module.EngineMetrics, - net network.Network, + net network.EngineRegistry, me module.Local, - participants flow.IdentityList, + participants flow.IdentitySkeletonList, state cluster.State, blocks storage.ClusterBlocks, comp collection.Compliance, @@ -88,7 +87,7 @@ func New( log: log.With().Str("engine", "cluster_synchronization").Logger(), metrics: metrics, me: me, - participants: participants.Filter(filter.Not(filter.HasNodeID(me.NodeID()))), + participants: participants.Filter(filter.Not(filter.HasNodeID[flow.IdentitySkeleton](me.NodeID()))), comp: comp, core: core, pollInterval: opt.PollInterval, @@ -100,11 +99,7 @@ func New( if err != nil { return nil, fmt.Errorf("could not setup message handler") } - - chainID, err := state.Params().ChainID() - if err != nil { - return nil, fmt.Errorf("could not get chain ID: %w", err) - } + chainID := state.Params().ChainID() // register the engine with the network layer and store the conduit con, err := net.Register(channels.SyncCluster(chainID), e) @@ -144,7 +139,7 @@ func (e *Engine) setupResponseMessageHandler() error { engine.NewNotifier(), engine.Pattern{ Match: func(msg *engine.Message) bool { - _, ok := msg.Payload.(*messages.SyncResponse) + _, ok := msg.Payload.(*flow.SyncResponse) if ok { e.metrics.MessageReceived(metrics.EngineClusterSynchronization, metrics.MessageSyncResponse) } @@ -154,7 +149,7 @@ func (e *Engine) setupResponseMessageHandler() error { }, engine.Pattern{ Match: func(msg *engine.Message) bool { - _, ok := msg.Payload.(*messages.ClusterBlockResponse) + _, ok := msg.Payload.(*clustermodel.BlockResponse) if ok { e.metrics.MessageReceived(metrics.EngineClusterSynchronization, metrics.MessageBlockResponse) } @@ -234,9 +229,9 @@ func (e *Engine) Process(channel channels.Channel, originID flow.Identifier, eve // - All other errors are potential symptoms of internal state corruption or bugs (fatal). func (e *Engine) process(originID flow.Identifier, event interface{}) error { switch event.(type) { - case *messages.RangeRequest, *messages.BatchRequest, *messages.SyncRequest: + case *flow.RangeRequest, *flow.BatchRequest, *flow.SyncRequest: return e.requestHandler.process(originID, event) - case *messages.SyncResponse, *messages.ClusterBlockResponse: + case *flow.SyncResponse, *clustermodel.BlockResponse: return e.responseMessageHandler.Process(originID, event) default: return fmt.Errorf("received input with type %T from %x: %w", event, originID[:], engine.IncompatibleInputTypeError) @@ -267,14 +262,14 @@ func (e *Engine) processAvailableResponses() { msg, ok := e.pendingSyncResponses.Get() if ok { - e.onSyncResponse(msg.OriginID, msg.Payload.(*messages.SyncResponse)) + e.onSyncResponse(msg.OriginID, msg.Payload.(*flow.SyncResponse)) e.metrics.MessageHandled(metrics.EngineClusterSynchronization, metrics.MessageSyncResponse) continue } msg, ok = e.pendingBlockResponses.Get() if ok { - e.onBlockResponse(msg.OriginID, msg.Payload.(*messages.ClusterBlockResponse)) + e.onBlockResponse(msg.OriginID, msg.Payload.(*clustermodel.BlockResponse)) e.metrics.MessageHandled(metrics.EngineClusterSynchronization, metrics.MessageBlockResponse) continue } @@ -286,7 +281,7 @@ func (e *Engine) processAvailableResponses() { } // onSyncResponse processes a synchronization response. -func (e *Engine) onSyncResponse(originID flow.Identifier, res *messages.SyncResponse) { +func (e *Engine) onSyncResponse(_ flow.Identifier, res *flow.SyncResponse) { final, err := e.state.Final().Head() if err != nil { e.log.Error().Err(err).Msg("could not get last finalized header") @@ -295,19 +290,17 @@ func (e *Engine) onSyncResponse(originID flow.Identifier, res *messages.SyncResp e.core.HandleHeight(final, res.Height) } -// onBlockResponse processes a response containing a specifically requested block. -func (e *Engine) onBlockResponse(originID flow.Identifier, res *messages.ClusterBlockResponse) { +// onBlockResponse processes a slice of requested block proposals. +// Input proposals are structurally validated. +func (e *Engine) onBlockResponse(originID flow.Identifier, response *clustermodel.BlockResponse) { // process the blocks one by one - for _, block := range res.Blocks { - header := block.Header - if !e.core.HandleBlock(&header) { + for _, proposal := range response.Blocks { + if !e.core.HandleBlock(proposal.Block.ToHeader()) { continue } - synced := flow.Slashable[*messages.ClusterBlockProposal]{ + synced := flow.Slashable[*clustermodel.Proposal]{ OriginID: originID, - Message: &messages.ClusterBlockProposal{ - Block: block, - }, + Message: &proposal, } // forward the block to the compliance engine for validation and processing e.comp.OnSyncedClusterBlock(synced) @@ -361,9 +354,19 @@ func (e *Engine) pollHeight() { return } + nonce, err := rand.Uint64() + if err != nil { + // TODO: this error should be returned by pollHeight() + // it is logged for now since the only error possible is related to a failure + // of the system entropy generation. Such error is going to cause failures in other + // components where it's handled properly and will lead to crashing the module. + e.log.Error().Err(err).Msg("nonce generation failed during pollHeight") + return + } + // send the request for synchronization req := &messages.SyncRequest{ - Nonce: rand.Uint64(), + Nonce: nonce, Height: head.Height, } err = e.con.Multicast(req, synccore.DefaultPollNodes, e.participants.NodeIDs()...) @@ -379,12 +382,21 @@ func (e *Engine) sendRequests(ranges []chainsync.Range, batches []chainsync.Batc var errs *multierror.Error for _, ran := range ranges { + nonce, err := rand.Uint64() + if err != nil { + // TODO: this error should be returned by sendRequests + // it is logged for now since the only error possible is related to a failure + // of the system entropy generation. Such error is going to cause failures in other + // components where it's handled properly and will lead to crashing the module. + e.log.Error().Err(err).Msg("nonce generation failed during range request") + return + } req := &messages.RangeRequest{ - Nonce: rand.Uint64(), + Nonce: nonce, FromHeight: ran.From, ToHeight: ran.To, } - err := e.con.Multicast(req, synccore.DefaultBlockRequestNodes, e.participants.NodeIDs()...) + err = e.con.Multicast(req, synccore.DefaultBlockRequestNodes, e.participants.NodeIDs()...) if err != nil { errs = multierror.Append(errs, fmt.Errorf("could not submit range request: %w", err)) continue @@ -399,11 +411,20 @@ func (e *Engine) sendRequests(ranges []chainsync.Range, batches []chainsync.Batc } for _, batch := range batches { + nonce, err := rand.Uint64() + if err != nil { + // TODO: this error should be returned by sendRequests + // it is logged for now since the only error possible is related to a failure + // of the system entropy generation. Such error is going to cause failures in other + // components where it's handled properly and will lead to crashing the module. + e.log.Error().Err(err).Msg("nonce generation failed during batch request") + return + } req := &messages.BatchRequest{ - Nonce: rand.Uint64(), + Nonce: nonce, BlockIDs: batch.BlockIDs, } - err := e.con.Multicast(req, synccore.DefaultBlockRequestNodes, e.participants.NodeIDs()...) + err = e.con.Multicast(req, synccore.DefaultBlockRequestNodes, e.participants.NodeIDs()...) if err != nil { errs = multierror.Append(errs, fmt.Errorf("could not submit batch request: %w", err)) continue diff --git a/engine/collection/synchronization/engine_test.go b/engine/collection/synchronization/engine_test.go index cd79ffe1931..9b9fbf50015 100644 --- a/engine/collection/synchronization/engine_test.go +++ b/engine/collection/synchronization/engine_test.go @@ -25,7 +25,7 @@ import ( module "github.com/onflow/flow-go/module/mock" netint "github.com/onflow/flow-go/network" "github.com/onflow/flow-go/network/channels" - "github.com/onflow/flow-go/network/mocknetwork" + mocknetwork "github.com/onflow/flow-go/network/mock" clusterint "github.com/onflow/flow-go/state/cluster" cluster "github.com/onflow/flow-go/state/cluster/mock" storerr "github.com/onflow/flow-go/storage" @@ -42,9 +42,9 @@ type SyncSuite struct { myID flow.Identifier participants flow.IdentityList head *flow.Header - heights map[uint64]*clustermodel.Block - blockIDs map[flow.Identifier]*clustermodel.Block - net *mocknetwork.Network + heights map[uint64]*clustermodel.Proposal + blockIDs map[flow.Identifier]*clustermodel.Proposal + net *mocknetwork.EngineRegistry con *mocknetwork.Conduit me *module.Local state *cluster.State @@ -53,13 +53,11 @@ type SyncSuite struct { blocks *storage.ClusterBlocks comp *mockcollection.Compliance core *module.SyncCore + metrics *module.EngineMetrics e *Engine } func (ss *SyncSuite) SetupTest() { - // seed the RNG - rand.Seed(time.Now().UnixNano()) - // generate own ID ss.participants = unittest.IdentityListFixture(3, unittest.WithRole(flow.RoleCollection)) ss.myID = ss.participants[0].NodeID @@ -69,12 +67,12 @@ func (ss *SyncSuite) SetupTest() { ss.head = header // create maps to enable block returns - ss.heights = make(map[uint64]*clustermodel.Block) - ss.blockIDs = make(map[flow.Identifier]*clustermodel.Block) + ss.heights = make(map[uint64]*clustermodel.Proposal) + ss.blockIDs = make(map[flow.Identifier]*clustermodel.Proposal) clusterID := header.ChainID // set up the network module mock - ss.net = &mocknetwork.Network{} + ss.net = &mocknetwork.EngineRegistry{} ss.net.On("Register", channels.SyncCluster(clusterID), mock.Anything).Return( func(network channels.Channel, engine netint.MessageProcessor) netint.Conduit { return ss.con @@ -119,7 +117,7 @@ func (ss *SyncSuite) SetupTest() { nil, ) ss.snapshot.On("Identities", mock.Anything).Return( - func(selector flow.IdentityFilter) flow.IdentityList { + func(selector flow.IdentityFilter[flow.Identity]) flow.IdentityList { return ss.participants.Filter(selector) }, nil, @@ -127,30 +125,22 @@ func (ss *SyncSuite) SetupTest() { // set up blocks storage mock ss.blocks = &storage.ClusterBlocks{} - ss.blocks.On("ByHeight", mock.Anything).Return( - func(height uint64) *clustermodel.Block { - return ss.heights[height] - }, - func(height uint64) error { - _, enabled := ss.heights[height] + ss.blocks.On("ProposalByHeight", mock.Anything).Return( + func(height uint64) (*clustermodel.Proposal, error) { + block, enabled := ss.heights[height] if !enabled { - return storerr.ErrNotFound + return nil, storerr.ErrNotFound } - return nil - }, - ) - ss.blocks.On("ByID", mock.Anything).Return( - func(blockID flow.Identifier) *clustermodel.Block { - return ss.blockIDs[blockID] - }, - func(blockID flow.Identifier) error { - _, enabled := ss.blockIDs[blockID] + return block, nil + }) + ss.blocks.On("ProposalByID", mock.Anything).Return( + func(blockID flow.Identifier) (*clustermodel.Proposal, error) { + block, enabled := ss.blockIDs[blockID] if !enabled { - return storerr.ErrNotFound + return nil, storerr.ErrNotFound } - return nil - }, - ) + return block, nil + }) // set up compliance engine mock ss.comp = mockcollection.NewCompliance(ss.T()) @@ -160,19 +150,18 @@ func (ss *SyncSuite) SetupTest() { // initialize the engine log := zerolog.New(io.Discard) - metrics := metrics.NewNoopCollector() + ss.metrics = new(module.EngineMetrics) - e, err := New(log, metrics, ss.net, ss.me, ss.participants, ss.state, ss.blocks, ss.comp, ss.core) + e, err := New(log, ss.metrics, ss.net, ss.me, ss.participants.ToSkeleton(), ss.state, ss.blocks, ss.comp, ss.core) require.NoError(ss.T(), err, "should pass engine initialization") ss.e = e } func (ss *SyncSuite) TestOnSyncRequest() { - // generate origin and request message originID := unittest.IdentifierFixture() - req := &messages.SyncRequest{ + req := &flow.SyncRequest{ Nonce: rand.Uint64(), Height: 0, } @@ -205,17 +194,18 @@ func (ss *SyncSuite) TestOnSyncRequest() { assert.Equal(ss.T(), originID, recipientID, "should send response to original sender") }, ) + ss.metrics.On("MessageSent", metrics.EngineClusterSynchronization, metrics.MessageSyncResponse).Once() err = ss.e.requestHandler.onSyncRequest(originID, req) require.NoError(ss.T(), err, "smaller height sync request should pass") ss.core.AssertExpectations(ss.T()) + ss.metrics.AssertExpectations(ss.T()) } func (ss *SyncSuite) TestOnSyncResponse() { - // generate origin ID and response message originID := unittest.IdentifierFixture() - res := &messages.SyncResponse{ + res := &flow.SyncResponse{ Nonce: rand.Uint64(), Height: rand.Uint64(), } @@ -227,10 +217,9 @@ func (ss *SyncSuite) TestOnSyncResponse() { } func (ss *SyncSuite) TestOnRangeRequest() { - // generate originID and range request originID := unittest.IdentifierFixture() - req := &messages.RangeRequest{ + req := &flow.RangeRequest{ Nonce: rand.Uint64(), FromHeight: 0, ToHeight: 0, @@ -240,8 +229,9 @@ func (ss *SyncSuite) TestOnRangeRequest() { ref := ss.head.Height for height := ref; height >= ref-4; height-- { block := unittest.ClusterBlockFixture() - block.Header.Height = height - ss.heights[height] = &block + block.Height = height + ss.heights[height] = unittest.ClusterProposalFromBlock(block) + ss.blockIDs[block.ID()] = ss.heights[height] } // empty range should be a no-op @@ -270,13 +260,16 @@ func (ss *SyncSuite) TestOnRangeRequest() { func(args mock.Arguments) { res := args.Get(0).(*messages.ClusterBlockResponse) expected := ss.heights[ref-1] - actual := res.Blocks[0].ToInternal() - assert.Equal(ss.T(), expected.ID(), actual.ID(), "response should contain right block") + actual, err := clustermodel.NewProposal(res.Blocks[0]) + require.NoError(t, err) + assert.Equal(ss.T(), expected.Block.ID(), actual.Block.ID(), "response should contain right block") assert.Equal(ss.T(), req.Nonce, res.Nonce, "response should contain request nonce") recipientID := args.Get(1).(flow.Identifier) assert.Equal(ss.T(), originID, recipientID, "should send response to original requester") }, ) + ss.metrics.On("MessageSent", metrics.EngineClusterSynchronization, metrics.MessageBlockResponse).Once() + err := ss.e.requestHandler.onRangeRequest(originID, req) require.NoError(ss.T(), err, "range request with higher to height should pass") }) @@ -288,13 +281,19 @@ func (ss *SyncSuite) TestOnRangeRequest() { ss.con.On("Unicast", mock.Anything, mock.Anything).Return(nil).Once().Run( func(args mock.Arguments) { res := args.Get(0).(*messages.ClusterBlockResponse) - expected := []*clustermodel.Block{ss.heights[ref-2], ss.heights[ref-1], ss.heights[ref]} - assert.ElementsMatch(ss.T(), expected, res.BlocksInternal(), "response should contain right blocks") + expected := []clustermodel.Proposal{*ss.heights[ref-2], *ss.heights[ref-1], *ss.heights[ref]} + internal, err := res.ToInternal() + require.NoError(t, err) + actual, ok := internal.(*clustermodel.BlockResponse) + require.True(t, ok) + assert.ElementsMatch(ss.T(), expected, actual.Blocks, "response should contain right blocks") assert.Equal(ss.T(), req.Nonce, res.Nonce, "response should contain request nonce") recipientID := args.Get(1).(flow.Identifier) assert.Equal(ss.T(), originID, recipientID, "should send response to original requester") }, ) + ss.metrics.On("MessageSent", metrics.EngineClusterSynchronization, metrics.MessageBlockResponse).Once() + err := ss.e.requestHandler.onRangeRequest(originID, req) require.NoError(ss.T(), err, "valid range with missing blocks should fail") }) @@ -306,13 +305,19 @@ func (ss *SyncSuite) TestOnRangeRequest() { ss.con.On("Unicast", mock.Anything, mock.Anything).Return(nil).Once().Run( func(args mock.Arguments) { res := args.Get(0).(*messages.ClusterBlockResponse) - expected := []*clustermodel.Block{ss.heights[ref-2], ss.heights[ref-1], ss.heights[ref]} - assert.ElementsMatch(ss.T(), expected, res.BlocksInternal(), "response should contain right blocks") + expected := []clustermodel.Proposal{*ss.heights[ref-2], *ss.heights[ref-1], *ss.heights[ref]} + internal, err := res.ToInternal() + require.NoError(t, err) + actual, ok := internal.(*clustermodel.BlockResponse) + require.True(t, ok) + assert.ElementsMatch(ss.T(), expected, actual.Blocks, "response should contain right blocks") assert.Equal(ss.T(), req.Nonce, res.Nonce, "response should contain request nonce") recipientID := args.Get(1).(flow.Identifier) assert.Equal(ss.T(), originID, recipientID, "should send response to original requester") }, ) + ss.metrics.On("MessageSent", metrics.EngineClusterSynchronization, metrics.MessageBlockResponse).Once() + err := ss.e.requestHandler.onRangeRequest(originID, req) require.NoError(ss.T(), err, "valid range request should pass") }) @@ -326,8 +331,12 @@ func (ss *SyncSuite) TestOnRangeRequest() { ss.con.On("Unicast", mock.Anything, mock.Anything).Return(nil).Once().Run( func(args mock.Arguments) { res := args.Get(0).(*messages.ClusterBlockResponse) - expected := []*clustermodel.Block{ss.heights[ref-4], ss.heights[ref-3], ss.heights[ref-2]} - assert.ElementsMatch(ss.T(), expected, res.BlocksInternal(), "response should contain right blocks") + expected := []clustermodel.Proposal{*ss.heights[ref-4], *ss.heights[ref-3], *ss.heights[ref-2]} + internal, err := res.ToInternal() + require.NoError(t, err) + actual, ok := internal.(*clustermodel.BlockResponse) + require.True(t, ok) + assert.ElementsMatch(ss.T(), expected, actual.Blocks, "response should contain right blocks") assert.Equal(ss.T(), req.Nonce, res.Nonce, "response should contain request nonce") recipientID := args.Get(1).(flow.Identifier) assert.Equal(ss.T(), originID, recipientID, "should send response to original requester") @@ -340,17 +349,19 @@ func (ss *SyncSuite) TestOnRangeRequest() { config.MaxSize = 2 ss.e.requestHandler.core, err = chainsync.New(ss.e.log, config, metrics.NewNoopCollector(), flow.Localnet) require.NoError(ss.T(), err) + ss.metrics.On("MessageSent", metrics.EngineClusterSynchronization, metrics.MessageBlockResponse).Once() err = ss.e.requestHandler.onRangeRequest(originID, req) require.NoError(ss.T(), err, "valid range request should pass") }) + + ss.metrics.AssertExpectations(ss.T()) } func (ss *SyncSuite) TestOnBatchRequest() { - // generate origin ID and batch request originID := unittest.IdentifierFixture() - req := &messages.BatchRequest{ + req := &flow.BatchRequest{ Nonce: rand.Uint64(), BlockIDs: nil, } @@ -374,18 +385,22 @@ func (ss *SyncSuite) TestOnBatchRequest() { // a non-empty request for existing block IDs should send right response ss.T().Run("request for existing blocks", func(t *testing.T) { block := unittest.ClusterBlockFixture() - block.Header.Height = ss.head.Height - 1 + block.Height = ss.head.Height - 1 req.BlockIDs = []flow.Identifier{block.ID()} - ss.blockIDs[block.ID()] = &block + proposal := unittest.ClusterProposalFromBlock(block) + ss.blockIDs[block.ID()] = proposal ss.con.On("Unicast", mock.Anything, mock.Anything).Return(nil).Once().Run( func(args mock.Arguments) { res := args.Get(0).(*messages.ClusterBlockResponse) - assert.Equal(ss.T(), &block, res.Blocks[0].ToInternal(), "response should contain right block") + actual, err := clustermodel.NewProposal(res.Blocks[0]) + require.NoError(t, err) + assert.Equal(ss.T(), proposal, actual, "response should contain right block") assert.Equal(ss.T(), req.Nonce, res.Nonce, "response should contain request nonce") recipientID := args.Get(1).(flow.Identifier) assert.Equal(ss.T(), originID, recipientID, "response should be send to original requester") }, ) + ss.metrics.On("MessageSent", metrics.EngineClusterSynchronization, metrics.MessageBlockResponse).Once() err := ss.e.requestHandler.onBatchRequest(originID, req) require.NoError(ss.T(), err, "should pass request with valid block") }) @@ -393,19 +408,23 @@ func (ss *SyncSuite) TestOnBatchRequest() { // a request for an oversized batch should return MaxSize blocks ss.T().Run("oversized range", func(t *testing.T) { // setup request for 5 blocks. response should contain the first 2 (MaxSize) - ss.blockIDs = make(map[flow.Identifier]*clustermodel.Block) + ss.blockIDs = make(map[flow.Identifier]*clustermodel.Proposal) req.BlockIDs = make([]flow.Identifier, 5) for i := 0; i < len(req.BlockIDs); i++ { b := unittest.ClusterBlockFixture() - b.Header.Height = ss.head.Height - uint64(i) + b.Height = ss.head.Height - uint64(i) req.BlockIDs[i] = b.ID() - ss.blockIDs[b.ID()] = &b + ss.blockIDs[b.ID()] = unittest.ClusterProposalFromBlock(b) } ss.con.On("Unicast", mock.Anything, mock.Anything).Return(nil).Once().Run( func(args mock.Arguments) { res := args.Get(0).(*messages.ClusterBlockResponse) - assert.ElementsMatch(ss.T(), []*clustermodel.Block{ss.blockIDs[req.BlockIDs[0]], ss.blockIDs[req.BlockIDs[1]]}, res.BlocksInternal(), "response should contain right block") + internal, err := res.ToInternal() + require.NoError(t, err) + proposals, ok := internal.(*clustermodel.BlockResponse) + require.True(t, ok) + assert.ElementsMatch(ss.T(), []clustermodel.Proposal{*ss.blockIDs[req.BlockIDs[0]], *ss.blockIDs[req.BlockIDs[1]]}, proposals.Blocks, "response should contain right block") assert.Equal(ss.T(), req.Nonce, res.Nonce, "response should contain request nonce") recipientID := args.Get(1).(flow.Identifier) assert.Equal(ss.T(), originID, recipientID, "response should be send to original requester") @@ -418,56 +437,81 @@ func (ss *SyncSuite) TestOnBatchRequest() { config.MaxSize = 2 ss.e.requestHandler.core, err = chainsync.New(ss.e.log, config, metrics.NewNoopCollector(), flow.Localnet) require.NoError(ss.T(), err) + ss.metrics.On("MessageSent", metrics.EngineClusterSynchronization, metrics.MessageBlockResponse).Once() err = ss.e.requestHandler.onBatchRequest(originID, req) require.NoError(ss.T(), err, "should pass request with valid block") }) + + ss.metrics.AssertExpectations(ss.T()) } func (ss *SyncSuite) TestOnBlockResponse() { - // generate origin and block response originID := unittest.IdentifierFixture() - res := &messages.ClusterBlockResponse{ - Nonce: rand.Uint64(), - Blocks: []messages.UntrustedClusterBlock{}, - } + response := unittest.ClusterBlockResponseFixture(2) // add one block that should be processed - processable := unittest.ClusterBlockFixture() - ss.core.On("HandleBlock", processable.Header).Return(true) - res.Blocks = append(res.Blocks, messages.UntrustedClusterBlockFromInternal(&processable)) + processable := response.Blocks[0] + ss.core.On("HandleBlock", processable.Block.ToHeader()).Return(true) // add one block that should not be processed - unprocessable := unittest.ClusterBlockFixture() - ss.core.On("HandleBlock", unprocessable.Header).Return(false) - res.Blocks = append(res.Blocks, messages.UntrustedClusterBlockFromInternal(&unprocessable)) + unprocessable := response.Blocks[1] + ss.core.On("HandleBlock", unprocessable.Block.ToHeader()).Return(false) ss.comp.On("OnSyncedClusterBlock", mock.Anything).Run(func(args mock.Arguments) { - res := args.Get(0).(flow.Slashable[*messages.ClusterBlockProposal]) - converted := res.Message.Block.ToInternal() - ss.Assert().Equal(processable.Header, converted.Header) - ss.Assert().Equal(processable.Payload, converted.Payload) + res := args.Get(0).(flow.Slashable[*clustermodel.Proposal]) + ss.Assert().Equal(processable.Block.HeaderBody, res.Message.Block.HeaderBody) + ss.Assert().Equal(processable.Block.Payload, res.Message.Block.Payload) ss.Assert().Equal(originID, res.OriginID) }).Return(nil) - ss.e.onBlockResponse(originID, res) + ss.e.onBlockResponse(originID, response) ss.comp.AssertExpectations(ss.T()) ss.core.AssertExpectations(ss.T()) } +// TestOnInvalidBlockResponse verifies that the engine correctly handles a BlockResponse +// containing an invalid block proposal that cannot be converted to a trusted proposal. +func (ss *SyncSuite) TestOnInvalidBlockResponse() { + // generate origin and block response + originID := unittest.IdentifierFixture() + + proposal := unittest.ClusterProposalFixture() + proposal.ProposerSigData = nil // invalid value + + req := &clustermodel.BlockResponse{ + Nonce: 0, + Blocks: []clustermodel.Proposal{*proposal}, + } + + // Expect metrics to track message receipt and message drop for invalid block proposal + ss.metrics.On("MessageReceived", metrics.EngineClusterSynchronization, metrics.MessageBlockResponse).Once() + ss.metrics.On("InboundMessageDropped", metrics.EngineClusterSynchronization, metrics.MessageBlockProposal).Once() + + // Process the block response message through the engine + require.NoError(ss.T(), ss.e.Process(channels.SyncCommittee, originID, req)) + + // HandleBlock should NOT be called for invalid Proposal + ss.core.AssertNotCalled(ss.T(), "HandleBlock", mock.Anything) + // OnSyncedBlocks should NOT be called for invalid Proposal + ss.comp.AssertNotCalled(ss.T(), "onBlockResponse", mock.Anything) +} + func (ss *SyncSuite) TestPollHeight() { // check that we send to three nodes from our total list - others := ss.participants.Filter(filter.HasNodeID(ss.participants[1:].NodeIDs()...)) + others := ss.participants.Filter(filter.HasNodeID[flow.Identity](ss.participants[1:].NodeIDs()...)) ss.con.On("Multicast", mock.Anything, synccore.DefaultPollNodes, others[0].NodeID, others[1].NodeID).Return(nil).Run( func(args mock.Arguments) { req := args.Get(0).(*messages.SyncRequest) require.Equal(ss.T(), ss.head.Height, req.Height, "request should contain finalized height") }, ) + ss.metrics.On("MessageSent", metrics.EngineClusterSynchronization, metrics.MessageSyncRequest).Once() ss.e.pollHeight() ss.con.AssertExpectations(ss.T()) + ss.metrics.AssertExpectations(ss.T()) } func (ss *SyncSuite) TestSendRequests() { @@ -493,10 +537,13 @@ func (ss *SyncSuite) TestSendRequests() { }, ) ss.core.On("BatchRequested", batches[0]) + ss.metrics.On("MessageSent", metrics.EngineClusterSynchronization, metrics.MessageRangeRequest).Once() + ss.metrics.On("MessageSent", metrics.EngineClusterSynchronization, metrics.MessageBatchRequest).Once() // exclude my node ID ss.e.sendRequests(ranges, batches) ss.con.AssertExpectations(ss.T()) + ss.metrics.AssertExpectations(ss.T()) } // test a synchronization engine can be started and stopped @@ -513,17 +560,20 @@ func (ss *SyncSuite) TestProcessingMultipleItems() { originID := unittest.IdentifierFixture() for i := 0; i < 5; i++ { - msg := &messages.SyncResponse{ + msg := &flow.SyncResponse{ Nonce: uint64(i), Height: uint64(1000 + i), } ss.core.On("HandleHeight", mock.Anything, msg.Height).Once() + ss.metrics.On("MessageSent", metrics.EngineClusterSynchronization, metrics.MessageSyncResponse).Once() + ss.metrics.On("MessageReceived", metrics.EngineClusterSynchronization, metrics.MessageSyncResponse).Once() + ss.metrics.On("MessageHandled", metrics.EngineClusterSynchronization, metrics.MessageSyncResponse).Once() require.NoError(ss.T(), ss.e.Process(channels.SyncCommittee, originID, msg)) } finalHeight := ss.head.Height for i := 0; i < 5; i++ { - msg := &messages.SyncRequest{ + msg := &flow.SyncRequest{ Nonce: uint64(i), Height: finalHeight - 100, } @@ -532,6 +582,7 @@ func (ss *SyncSuite) TestProcessingMultipleItems() { ss.core.On("WithinTolerance", mock.Anything, mock.Anything).Return(false).Once() ss.core.On("HandleHeight", mock.Anything, msg.Height).Once() ss.con.On("Unicast", mock.Anything, mock.Anything).Return(nil) + ss.metrics.On("MessageReceived", metrics.EngineClusterSynchronization, metrics.MessageSyncRequest).Once() require.NoError(ss.T(), ss.e.Process(channels.SyncCommittee, originID, msg)) } @@ -540,6 +591,7 @@ func (ss *SyncSuite) TestProcessingMultipleItems() { time.Sleep(time.Millisecond * 100) ss.core.AssertExpectations(ss.T()) + ss.metrics.AssertExpectations(ss.T()) } // TestProcessUnsupportedMessageType tests that Process and ProcessLocal correctly handle a case where invalid message type diff --git a/engine/collection/synchronization/request_handler.go b/engine/collection/synchronization/request_handler.go index 346efd65bfd..428af23aaee 100644 --- a/engine/collection/synchronization/request_handler.go +++ b/engine/collection/synchronization/request_handler.go @@ -8,6 +8,7 @@ import ( "github.com/onflow/flow-go/engine" commonsync "github.com/onflow/flow-go/engine/common/synchronization" + clustermodel "github.com/onflow/flow-go/model/cluster" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/model/messages" "github.com/onflow/flow-go/module" @@ -135,7 +136,7 @@ func (r *RequestHandlerEngine) setupRequestMessageHandler() { engine.NewNotifier(), engine.Pattern{ Match: func(msg *engine.Message) bool { - _, ok := msg.Payload.(*messages.SyncRequest) + _, ok := msg.Payload.(*flow.SyncRequest) if ok { r.metrics.MessageReceived(metrics.EngineClusterSynchronization, metrics.MessageSyncRequest) } @@ -145,7 +146,7 @@ func (r *RequestHandlerEngine) setupRequestMessageHandler() { }, engine.Pattern{ Match: func(msg *engine.Message) bool { - _, ok := msg.Payload.(*messages.RangeRequest) + _, ok := msg.Payload.(*flow.RangeRequest) if ok { r.metrics.MessageReceived(metrics.EngineClusterSynchronization, metrics.MessageRangeRequest) } @@ -155,7 +156,7 @@ func (r *RequestHandlerEngine) setupRequestMessageHandler() { }, engine.Pattern{ Match: func(msg *engine.Message) bool { - _, ok := msg.Payload.(*messages.BatchRequest) + _, ok := msg.Payload.(*flow.BatchRequest) if ok { r.metrics.MessageReceived(metrics.EngineClusterSynchronization, metrics.MessageBatchRequest) } @@ -169,7 +170,7 @@ func (r *RequestHandlerEngine) setupRequestMessageHandler() { // onSyncRequest processes an outgoing handshake; if we have a higher height, we // inform the other node of it, so they can organize their block downloads. If // we have a lower height, we add the difference to our own download queue. -func (r *RequestHandlerEngine) onSyncRequest(originID flow.Identifier, req *messages.SyncRequest) error { +func (r *RequestHandlerEngine) onSyncRequest(originID flow.Identifier, req *flow.SyncRequest) error { final, err := r.state.Final().Head() if err != nil { return fmt.Errorf("could not get last finalized header: %w", err) @@ -200,7 +201,7 @@ func (r *RequestHandlerEngine) onSyncRequest(originID flow.Identifier, req *mess } // onRangeRequest processes a request for a range of blocks by height. -func (r *RequestHandlerEngine) onRangeRequest(originID flow.Identifier, req *messages.RangeRequest) error { +func (r *RequestHandlerEngine) onRangeRequest(originID flow.Identifier, req *flow.RangeRequest) error { r.log.Debug().Str("origin_id", originID.String()).Msg("received new range request") // get the latest final state to know if we can fulfill the request head, err := r.state.Final().Head() @@ -234,9 +235,9 @@ func (r *RequestHandlerEngine) onRangeRequest(originID flow.Identifier, req *mes } // get all of the blocks, one by one - blocks := make([]messages.UntrustedClusterBlock, 0, req.ToHeight-req.FromHeight+1) + proposals := make([]clustermodel.UntrustedProposal, 0, req.ToHeight-req.FromHeight+1) for height := req.FromHeight; height <= req.ToHeight; height++ { - block, err := r.blocks.ByHeight(height) + proposal, err := r.blocks.ProposalByHeight(height) if errors.Is(err, storage.ErrNotFound) { r.log.Error().Uint64("height", height).Msg("skipping unknown heights") break @@ -244,11 +245,11 @@ func (r *RequestHandlerEngine) onRangeRequest(originID flow.Identifier, req *mes if err != nil { return fmt.Errorf("could not get block for height (%d): %w", height, err) } - blocks = append(blocks, messages.UntrustedClusterBlockFromInternal(block)) + proposals = append(proposals, clustermodel.UntrustedProposal(*proposal)) } // if there are no blocks to send, skip network message - if len(blocks) == 0 { + if len(proposals) == 0 { r.log.Debug().Msg("skipping empty range response") return nil } @@ -256,7 +257,7 @@ func (r *RequestHandlerEngine) onRangeRequest(originID flow.Identifier, req *mes // send the response res := &messages.ClusterBlockResponse{ Nonce: req.Nonce, - Blocks: blocks, + Blocks: proposals, } err = r.con.Unicast(res, originID) if err != nil { @@ -269,7 +270,7 @@ func (r *RequestHandlerEngine) onRangeRequest(originID flow.Identifier, req *mes } // onBatchRequest processes a request for a specific block by block ID. -func (r *RequestHandlerEngine) onBatchRequest(originID flow.Identifier, req *messages.BatchRequest) error { +func (r *RequestHandlerEngine) onBatchRequest(originID flow.Identifier, req *flow.BatchRequest) error { r.log.Debug().Str("origin_id", originID.String()).Msg("received new batch request") // we should bail and send nothing on empty request if len(req.BlockIDs) == 0 { @@ -303,9 +304,9 @@ func (r *RequestHandlerEngine) onBatchRequest(originID flow.Identifier, req *mes } // try to get all the blocks by ID - blocks := make([]messages.UntrustedClusterBlock, 0, len(blockIDs)) + proposals := make([]clustermodel.UntrustedProposal, 0, len(blockIDs)) for blockID := range blockIDs { - block, err := r.blocks.ByID(blockID) + proposal, err := r.blocks.ProposalByID(blockID) if errors.Is(err, storage.ErrNotFound) { r.log.Debug().Hex("block_id", blockID[:]).Msg("skipping unknown block") continue @@ -313,11 +314,11 @@ func (r *RequestHandlerEngine) onBatchRequest(originID flow.Identifier, req *mes if err != nil { return fmt.Errorf("could not get block by ID (%s): %w", blockID, err) } - blocks = append(blocks, messages.UntrustedClusterBlockFromInternal(block)) + proposals = append(proposals, clustermodel.UntrustedProposal(*proposal)) } // if there are no blocks to send, skip network message - if len(blocks) == 0 { + if len(proposals) == 0 { r.log.Debug().Msg("skipping empty batch response") return nil } @@ -325,7 +326,7 @@ func (r *RequestHandlerEngine) onBatchRequest(originID flow.Identifier, req *mes // send the response res := &messages.ClusterBlockResponse{ Nonce: req.Nonce, - Blocks: blocks, + Blocks: proposals, } err := r.con.Unicast(res, originID) if err != nil { @@ -348,7 +349,7 @@ func (r *RequestHandlerEngine) processAvailableRequests() error { msg, ok := r.pendingSyncRequests.Get() if ok { - err := r.onSyncRequest(msg.OriginID, msg.Payload.(*messages.SyncRequest)) + err := r.onSyncRequest(msg.OriginID, msg.Payload.(*flow.SyncRequest)) if err != nil { return fmt.Errorf("processing sync request failed: %w", err) } @@ -357,7 +358,7 @@ func (r *RequestHandlerEngine) processAvailableRequests() error { msg, ok = r.pendingRangeRequests.Get() if ok { - err := r.onRangeRequest(msg.OriginID, msg.Payload.(*messages.RangeRequest)) + err := r.onRangeRequest(msg.OriginID, msg.Payload.(*flow.RangeRequest)) if err != nil { return fmt.Errorf("processing range request failed: %w", err) } @@ -366,7 +367,7 @@ func (r *RequestHandlerEngine) processAvailableRequests() error { msg, ok = r.pendingBatchRequests.Get() if ok { - err := r.onBatchRequest(msg.OriginID, msg.Payload.(*messages.BatchRequest)) + err := r.onBatchRequest(msg.OriginID, msg.Payload.(*flow.BatchRequest)) if err != nil { return fmt.Errorf("processing batch request failed: %w", err) } diff --git a/engine/collection/test/cluster_switchover_test.go b/engine/collection/test/cluster_switchover_test.go index a8f04173099..a0ae19371a2 100644 --- a/engine/collection/test/cluster_switchover_test.go +++ b/engine/collection/test/cluster_switchover_test.go @@ -18,12 +18,12 @@ import ( "github.com/onflow/flow-go/module" "github.com/onflow/flow-go/module/util" "github.com/onflow/flow-go/network/channels" - "github.com/onflow/flow-go/network/mocknetwork" + mocknetwork "github.com/onflow/flow-go/network/mock" "github.com/onflow/flow-go/network/stub" "github.com/onflow/flow-go/state/cluster" bcluster "github.com/onflow/flow-go/state/cluster/badger" "github.com/onflow/flow-go/state/protocol" - "github.com/onflow/flow-go/state/protocol/inmem" + protocol_state "github.com/onflow/flow-go/state/protocol/protocol_state/state" "github.com/onflow/flow-go/utils/unittest" ) @@ -36,12 +36,12 @@ type ClusterSwitchoverTestCase struct { t *testing.T conf ClusterSwitchoverTestConf - identities flow.IdentityList // identity table - hub *stub.Hub // mock network hub - root protocol.Snapshot // shared root snapshot - nodes []testmock.CollectionNode // collection nodes - sn *mocknetwork.Engine // fake consensus node engine for receiving guarantees - builder *unittest.EpochBuilder // utility for building epochs + nodeInfos []model.NodeInfo // identity table + hub *stub.Hub // mock network hub + root protocol.Snapshot // shared root snapshot + nodes []testmock.CollectionNode // collection nodes + sn *mocknetwork.Engine // fake consensus node engine for receiving guarantees + builder *unittest.EpochBuilder // utility for building epochs // epoch counter -> cluster index -> transaction IDs sentTransactions map[uint64]map[uint]flow.IdentifierList // track submitted transactions @@ -50,15 +50,16 @@ type ClusterSwitchoverTestCase struct { // NewClusterSwitchoverTestCase constructs a new cluster switchover test case // given the configuration, creating all dependencies and mock nodes. func NewClusterSwitchoverTestCase(t *testing.T, conf ClusterSwitchoverTestConf) *ClusterSwitchoverTestCase { - tc := &ClusterSwitchoverTestCase{ t: t, conf: conf, } - - nodeInfos := unittest.PrivateNodeInfosFixture(int(conf.collectors), unittest.WithRole(flow.RoleCollection)) - collectors := model.ToIdentityList(nodeInfos) - tc.identities = unittest.CompleteIdentitySet(collectors...) + tc.nodeInfos = unittest.PrivateNodeInfosFromIdentityList( + unittest.CompleteIdentitySet( + unittest.IdentityListFixture(int(conf.collectors), unittest.WithRole(flow.RoleCollection))...), + ) + identities := model.ToIdentityList(tc.nodeInfos) + collectors := identities.Filter(filter.HasRole[flow.Identity](flow.RoleCollection)).ToSkeleton() assignment := unittest.ClusterAssignment(tc.conf.clusters, collectors) clusters, err := factory.NewClusterList(assignment, collectors) require.NoError(t, err) @@ -66,19 +67,18 @@ func NewClusterSwitchoverTestCase(t *testing.T, conf ClusterSwitchoverTestConf) rootClusterQCs := make([]flow.ClusterQCVoteData, len(rootClusterBlocks)) for i, cluster := range clusters { signers := make([]model.NodeInfo, 0) - signerIDs := make([]flow.Identifier, 0) - for _, identity := range nodeInfos { + for _, identity := range tc.nodeInfos { if _, inCluster := cluster.ByNodeID(identity.NodeID); inCluster { signers = append(signers, identity) - signerIDs = append(signerIDs, identity.NodeID) } } - qc, err := run.GenerateClusterRootQC(signers, model.ToIdentityList(signers), rootClusterBlocks[i]) + signerIdentities := model.ToIdentityList(signers).Sort(flow.Canonical[flow.Identity]).ToSkeleton() + qc, err := run.GenerateClusterRootQC(signers, signerIdentities, rootClusterBlocks[i]) require.NoError(t, err) rootClusterQCs[i] = flow.ClusterQCVoteDataFromQC(&flow.QuorumCertificateWithSignerIDs{ View: qc.View, BlockID: qc.BlockID, - SignerIDs: signerIDs, + SignerIDs: signerIdentities.NodeIDs(), SigData: qc.SigData, }) } @@ -86,22 +86,41 @@ func NewClusterSwitchoverTestCase(t *testing.T, conf ClusterSwitchoverTestConf) tc.sentTransactions = make(map[uint64]map[uint]flow.IdentifierList) tc.hub = stub.NewNetworkHub() - // create a root snapshot with the given number of initial clusters - root, result, seal := unittest.BootstrapFixture(tc.identities) - qc := unittest.QuorumCertificateFixture(unittest.QCWithRootBlockID(root.ID())) - setup := result.ServiceEvents[0].Event.(*flow.EpochSetup) - commit := result.ServiceEvents[1].Event.(*flow.EpochCommit) + rootHeaderBody := unittest.Block.Genesis(flow.Emulator).HeaderBody + counter := uint64(1) - setup.Assignments = unittest.ClusterAssignment(tc.conf.clusters, tc.identities) - commit.ClusterQCs = rootClusterQCs + setup := unittest.EpochSetupFixture( + unittest.WithParticipants(identities.ToSkeleton()), + unittest.SetupWithCounter(counter), + unittest.WithFirstView(rootHeaderBody.View), + unittest.WithFinalView(rootHeaderBody.View+100_000), + unittest.WithAssignments(unittest.ClusterAssignment(tc.conf.clusters, identities.ToSkeleton())), + ) + commit := unittest.EpochCommitFixture( + unittest.CommitWithCounter(counter), + unittest.WithClusterQCsFromAssignments(setup.Assignments), + unittest.WithDKGFromParticipants(identities.ToSkeleton()), + unittest.WithClusterQCs(rootClusterQCs), + ) + // create a root snapshot with the given number of initial clusters + root, result, seal := unittest.BootstrapFixtureWithSetupAndCommit(rootHeaderBody, setup, commit) seal.ResultID = result.ID() - tc.root, err = inmem.SnapshotFromBootstrapState(root, result, seal, qc) + qc := unittest.QuorumCertificateFixture(unittest.QCWithRootBlockID(root.ID())) + + tc.root, err = unittest.SnapshotFromBootstrapState(root, result, seal, qc) require.NoError(t, err) + // build a lookup table for node infos + nodeInfoLookup := make(map[flow.Identifier]model.NodeInfo) + for _, nodeInfo := range tc.nodeInfos { + nodeInfoLookup[nodeInfo.NodeID] = nodeInfo + } + // create a mock node for each collector identity - for _, collector := range nodeInfos { - node := testutil.CollectionNode(tc.T(), tc.hub, collector, tc.root) + for _, collector := range collectors { + nodeInfo := nodeInfoLookup[collector.NodeID] + node := testutil.CollectionNode(tc.T(), tc.hub, nodeInfo, tc.root) tc.nodes = append(tc.nodes, node) } @@ -109,7 +128,7 @@ func NewClusterSwitchoverTestCase(t *testing.T, conf ClusterSwitchoverTestConf) consensus := testutil.GenericNode( tc.T(), tc.hub, - tc.identities.Filter(filter.HasRole(flow.RoleConsensus))[0], + nodeInfoLookup[identities.Filter(filter.HasRole[flow.Identity](flow.RoleConsensus))[0].NodeID], tc.root, ) tc.sn = new(mocknetwork.Engine) @@ -117,16 +136,30 @@ func NewClusterSwitchoverTestCase(t *testing.T, conf ClusterSwitchoverTestConf) require.NoError(tc.T(), err) // create an epoch builder hooked to each collector's protocol state - states := make([]protocol.FollowerState, 0, len(collectors)) + states := make([]protocol.FollowerState, 0) for _, node := range tc.nodes { states = append(states, node.State) } + + // take first collection node and use its storage as data source for stateMutator + refNode := tc.nodes[0] + stateMutator := protocol_state.NewMutableProtocolState( + refNode.Log, + refNode.EpochProtocolState, + refNode.ProtocolKVStore, + refNode.State.Params(), + refNode.Headers, + refNode.Results, + refNode.Setups, + refNode.EpochCommits, + ) + // when building new epoch we would like to replace fixture cluster QCs with real ones, for that we need // to generate them using node infos - tc.builder = unittest.NewEpochBuilder(tc.T(), states...).UsingCommitOpts(func(commit *flow.EpochCommit) { + tc.builder = unittest.NewEpochBuilder(tc.T(), stateMutator, states...).UsingCommitOpts(func(commit *flow.EpochCommit) { // build a lookup table for node infos nodeInfoLookup := make(map[flow.Identifier]model.NodeInfo) - for _, nodeInfo := range nodeInfos { + for _, nodeInfo := range tc.nodeInfos { nodeInfoLookup[nodeInfo.NodeID] = nodeInfo } @@ -140,9 +173,10 @@ func NewClusterSwitchoverTestCase(t *testing.T, conf ClusterSwitchoverTestConf) } // generate root cluster block - rootClusterBlock := cluster.CanonicalRootBlock(commit.Counter, model.ToIdentityList(signers)) + rootClusterBlock, err := cluster.CanonicalRootBlock(commit.Counter, model.ToIdentityList(signers).ToSkeleton()) + require.NoError(tc.T(), err) // generate cluster root qc - qc, err := run.GenerateClusterRootQC(signers, model.ToIdentityList(signers), rootClusterBlock) + qc, err := run.GenerateClusterRootQC(signers, model.ToIdentityList(signers).ToSkeleton(), rootClusterBlock) require.NoError(t, err) signerIDs := toSignerIDs(signers) qcWithSignerIDs := &flow.QuorumCertificateWithSignerIDs{ @@ -212,7 +246,7 @@ func (tc *ClusterSwitchoverTestCase) StartNodes() { nodes = append(nodes, node) } - unittest.RequireCloseBefore(tc.T(), util.AllReady(nodes...), time.Second, "could not start nodes") + unittest.RequireCloseBefore(tc.T(), util.AllReady(nodes...), 3*time.Second, "could not start nodes") // start continuous delivery for all nodes for _, node := range tc.nodes { @@ -241,11 +275,13 @@ func (tc *ClusterSwitchoverTestCase) ServiceAddress() flow.Address { // Transaction returns a transaction which is valid for ingestion by a // collection node in this test suite. func (tc *ClusterSwitchoverTestCase) Transaction(opts ...func(*flow.TransactionBody)) *flow.TransactionBody { - tx := flow.NewTransactionBody(). + tx, err := flow.NewTransactionBodyBuilder(). AddAuthorizer(tc.ServiceAddress()). SetPayer(tc.ServiceAddress()). SetScript(unittest.NoopTxScript()). - SetReferenceBlockID(tc.RootBlock().ID()) + SetReferenceBlockID(tc.RootBlock().ID()). + Build() + require.NoError(tc.T(), err) for _, apply := range opts { apply(tx) @@ -290,7 +326,7 @@ func (tc *ClusterSwitchoverTestCase) Collector(id flow.Identifier) testmock.Coll } // Clusters returns the clusters for the current epoch. -func (tc *ClusterSwitchoverTestCase) Clusters(epoch protocol.Epoch) []protocol.Cluster { +func (tc *ClusterSwitchoverTestCase) Clusters(epoch protocol.CommittedEpoch) []protocol.Cluster { clustering, err := epoch.Clustering() require.NoError(tc.T(), err) @@ -312,9 +348,10 @@ func (tc *ClusterSwitchoverTestCase) BlockInEpoch(epochCounter uint64) *flow.Hea for height := root.Height; ; height++ { curr := tc.State().AtHeight(height) next := tc.State().AtHeight(height + 1) - curCounter, err := curr.Epochs().Current().Counter() + currentEpoch, err := curr.Epochs().Current() require.NoError(tc.T(), err) - nextCounter, err := next.Epochs().Current().Counter() + curCounter := currentEpoch.Counter() + nextEpoch, err := next.Epochs().Current() // if we reach a point where the next block doesn't exist, but the // current block has the correct counter, return the current block if err != nil && curCounter == epochCounter { @@ -322,6 +359,7 @@ func (tc *ClusterSwitchoverTestCase) BlockInEpoch(epochCounter uint64) *flow.Hea require.NoError(tc.T(), err) return head } + nextCounter := nextEpoch.Counter() // otherwise, wait until we reach the block where the next block is in // the next epoch - this is the highest block in the requested epoch @@ -346,7 +384,7 @@ func (tc *ClusterSwitchoverTestCase) SubmitTransactionToCluster( // get any block within the target epoch as the transaction's reference block refBlock := tc.BlockInEpoch(epochCounter) tx := tc.Transaction(func(tx *flow.TransactionBody) { - tx.SetReferenceBlockID(refBlock.ID()) + tx.ReferenceBlockID = refBlock.ID() }) clusterTx := unittest.AlterTransactionForCluster(*tx, clustering, clusterMembers, nil) tc.ExpectTransaction(epochCounter, clusterIndex, clusterTx.ID()) @@ -360,7 +398,7 @@ func (tc *ClusterSwitchoverTestCase) SubmitTransactionToCluster( // cluster) and asserts that only transaction specified by ExpectTransaction are // included. func (tc *ClusterSwitchoverTestCase) CheckClusterState( - identity *flow.Identity, + identity *flow.IdentitySkeleton, clusterInfo protocol.Cluster, ) { node := tc.Collector(identity.NodeID) @@ -410,10 +448,13 @@ func RunTestCase(tc *ClusterSwitchoverTestCase) { // build the epoch, ending on the first block on the next epoch tc.builder.BuildEpoch().CompleteEpoch() // build halfway through the grace period for the epoch 1 cluster - tc.builder.BuildBlocks(flow.DefaultTransactionExpiry / 2) + tc.builder.AddBlocksWithSeals(flow.DefaultTransactionExpiry/2, 1) - epoch1 := tc.State().Final().Epochs().Previous() - epoch2 := tc.State().Final().Epochs().Current() + finalSnap := tc.State().Final() + epoch1, err := finalSnap.Epochs().Previous() + require.NoError(tc.T(), err) + epoch2, err := finalSnap.Epochs().Current() + require.NoError(tc.T(), err) epoch1Clusters := tc.Clusters(epoch1) epoch2Clusters := tc.Clusters(epoch2) @@ -441,7 +482,7 @@ func RunTestCase(tc *ClusterSwitchoverTestCase) { // NOTE: this is here solely to improve test reliability, as it means that // while we are waiting for a guarantee there is only one cluster consensus // instance running (per node) rather than two. - tc.builder.BuildBlocks(flow.DefaultTransactionExpiry/2 + 1) + tc.builder.AddBlocksWithSeals(flow.DefaultTransactionExpiry/2+1, 1) // wait for epoch 2 transactions to be guaranteed unittest.RequireReturnsBefore(tc.T(), waitForGuarantees.Wait, tc.Timeout(), "did not receive guarantees at consensus node") diff --git a/engine/common/follower/cache/cache.go b/engine/common/follower/cache/cache.go index cb246cdc41f..16a848e96c6 100644 --- a/engine/common/follower/cache/cache.go +++ b/engine/common/follower/cache/cache.go @@ -2,13 +2,16 @@ package cache import ( "errors" + "fmt" "sync" "github.com/rs/zerolog" - "github.com/onflow/flow-go/engine/consensus/sealing/counters" + "github.com/onflow/flow-go/consensus/hotstuff" + "github.com/onflow/flow-go/consensus/hotstuff/model" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/module" + "github.com/onflow/flow-go/module/counters" herocache "github.com/onflow/flow-go/module/mempool/herocache/backdata" "github.com/onflow/flow-go/module/mempool/herocache/backdata/heropool" ) @@ -17,19 +20,17 @@ var ( ErrDisconnectedBatch = errors.New("batch must be a sequence of connected blocks") ) -// OnEquivocation is a callback to report observing two different blocks with the same view. -type OnEquivocation func(first *flow.Block, other *flow.Block) -type BlocksByID map[flow.Identifier]*flow.Block +type BlocksByID map[flow.Identifier]*flow.Proposal // batchContext contains contextual data for batch of blocks. Per convention, a batch is // a continuous sequence of blocks, i.e. `batch[k]` is the parent block of `batch[k+1]`. type batchContext struct { - batchParent *flow.Block // immediate parent of the first block in batch, i.e. `batch[0]` - batchChild *flow.Block // immediate child of the last block in batch, i.e. `batch[len(batch)-1]` + batchParent *flow.Proposal // immediate parent of the first block in batch, i.e. `batch[0]` + batchChild *flow.Proposal // immediate child of the last block in batch, i.e. `batch[len(batch)-1]` // equivocatingBlocks holds the list of equivocations that the batch contained, when comparing to the // cached blocks. An equivocation are two blocks for the same view that have different block IDs. - equivocatingBlocks [][2]*flow.Block + equivocatingBlocks [][2]*flow.Proposal // redundant marks if ALL blocks in batch are already stored in cache, meaning that // such input is identical to what was previously processed. @@ -42,69 +43,68 @@ type batchContext struct { // Resolves certified blocks when processing incoming batches. // Concurrency safe. type Cache struct { - backend *herocache.Cache // cache with random ejection + backend *herocache.Cache[*flow.Proposal] // cache with random ejection lock sync.RWMutex // secondary indices byView map[uint64]BlocksByID // lookup of blocks by their respective view; used to detect equivocation byParent map[flow.Identifier]BlocksByID // lookup of blocks by their parentID, for finding a block's known children - onEquivocation OnEquivocation // when message equivocation has been detected report it using this callback - lowestView counters.StrictMonotonousCounter // lowest view that the cache accepts blocks for + notifier hotstuff.ProposalViolationConsumer // equivocations will be reported using this notifier + lowestView counters.StrictMonotonicCounter // lowest view that the cache accepts blocks for } // Peek performs lookup of cached block by blockID. // Concurrency safe -func (c *Cache) Peek(blockID flow.Identifier) *flow.Block { +func (c *Cache) Peek(blockID flow.Identifier) *flow.Proposal { c.lock.RLock() defer c.lock.RUnlock() - if block, found := c.backend.ByID(blockID); found { - return block.(*flow.Block) + if block, found := c.backend.Get(blockID); found { + return block } else { return nil } } // NewCache creates new instance of Cache -func NewCache(log zerolog.Logger, limit uint32, collector module.HeroCacheMetrics, onEquivocation OnEquivocation) *Cache { +func NewCache(log zerolog.Logger, limit uint32, collector module.HeroCacheMetrics, notifier hotstuff.ProposalViolationConsumer) *Cache { // We consume ejection event from HeroCache to here to drop ejected blocks from our secondary indices. - distributor := NewDistributor() + distributor := NewDistributor[*flow.Proposal]() cache := &Cache{ - backend: herocache.NewCache( + backend: herocache.NewCache[*flow.Proposal]( limit, herocache.DefaultOversizeFactor, heropool.RandomEjection, log.With().Str("component", "follower.cache").Logger(), collector, - herocache.WithTracer(distributor), + herocache.WithTracer[*flow.Proposal](distributor), ), - byView: make(map[uint64]BlocksByID), - byParent: make(map[flow.Identifier]BlocksByID), - onEquivocation: onEquivocation, + byView: make(map[uint64]BlocksByID), + byParent: make(map[flow.Identifier]BlocksByID), + notifier: notifier, } - distributor.AddConsumer(cache.handleEjectedEntity) + distributor.AddConsumer(cache.handleEjectedBlock) return cache } -// handleEjectedEntity performs cleanup of secondary indexes to prevent memory leaks. +// handleEjectedBlock performs cleanup of secondary indexes to prevent memory leaks. // WARNING: Concurrency safety of this function is guaranteed by `c.lock`. This method is only called // by `herocache.Cache.Add` and we perform this call while `c.lock` is in locked state. -func (c *Cache) handleEjectedEntity(entity flow.Entity) { - block := entity.(*flow.Block) - blockID := block.ID() +func (c *Cache) handleEjectedBlock(proposal *flow.Proposal) { + blockID := proposal.Block.ID() // remove block from the set of blocks for this view - blocksForView := c.byView[block.Header.View] + blocksForView := c.byView[proposal.Block.View] delete(blocksForView, blockID) if len(blocksForView) == 0 { - delete(c.byView, block.Header.View) + delete(c.byView, proposal.Block.View) } // remove block from the parent's set of its children - siblings := c.byParent[block.Header.ParentID] + siblings := c.byParent[proposal.Block.ParentID] delete(siblings, blockID) if len(siblings) == 0 { - delete(c.byParent, block.Header.ParentID) + delete(c.byParent, proposal.Block.ParentID) } } @@ -118,14 +118,14 @@ func (c *Cache) handleEjectedEntity(entity flow.Entity) { // - parent for first block available in cache allowing to certify it, we can certify one extra block(parent). // // - for last block: -// - no child available for last block, need to wait for child to certify it. -// - child for last block available in cache allowing to certify it, we can certify one extra block(child). +// - no child available for last block, need to wait for child to certify it (certify one fewer block). +// - child for last block available in cache allowing to certify it, we can certify the last block. // // All blocks from the batch are stored in the cache to provide deduplication. // The function returns any new certified chain of blocks created by addition of the batch. -// Returns `certifiedBatch, certifyingQC` if the input batch has more than one block, and/or if either a child +// Returns `certifiedBatch` if the input batch has more than one block, and/or if either a child // or parent of the batch is in the cache. The implementation correctly handles cases with `len(batch) == 1` -// or `len(batch) == 0`, where it returns `nil, nil` in the following cases: +// or `len(batch) == 0`, where it returns `nil` in the following cases: // - the input batch has exactly one block and neither its parent nor child is in the cache. // - the input batch is empty // @@ -134,18 +134,17 @@ func (c *Cache) handleEjectedEntity(entity flow.Entity) { // // Expected errors during normal operations: // - ErrDisconnectedBatch -func (c *Cache) AddBlocks(batch []*flow.Block) (certifiedBatch []*flow.Block, certifyingQC *flow.QuorumCertificate, err error) { +func (c *Cache) AddBlocks(batch []*flow.Proposal) (certifiedBatch []flow.CertifiedBlock, err error) { batch = c.trimLeadingBlocksBelowPruningThreshold(batch) - batchSize := len(batch) - if batchSize < 1 { // empty batch is no-op - return nil, nil, nil + if len(batch) < 1 { // empty batch is no-op + return nil, nil } // precompute block IDs (outside of lock) and sanity-check batch itself that blocks are connected blockIDs, err := enforceSequentialBlocks(batch) if err != nil { - return nil, nil, err + return nil, err } // Single atomic operation (main logic), with result returned as `batchContext` @@ -159,38 +158,45 @@ func (c *Cache) AddBlocks(batch []*flow.Block) (certifiedBatch []*flow.Block, ce // are already known: then skip further processing bc := c.unsafeAtomicAdd(blockIDs, batch) if bc.redundant { - return nil, nil, nil + return nil, nil } - // If there exists a child of the last block in the batch, then the entire batch is certified. - // Otherwise, all blocks in the batch _except_ for the last one are certified + // If there exists a parent for the batch's first block, then this is parent is certified + // by the batch. Hence, we prepend certifiedBatch by the parent. + if bc.batchParent != nil { + batch = append([]*flow.Proposal{bc.batchParent}, batch...) + } + // If a child of the last block in the batch already exists in the cache: Then the entire batch is certified, + // and we append the child to the batch. Hence, after this operation the following holds: all blocks in the + // batch _except_ for the last one, their certifying QC is in the subsequent batch element. if bc.batchChild != nil { - certifiedBatch = batch - certifyingQC = bc.batchChild.Header.QuorumCertificate() - } else { - certifiedBatch = batch[:batchSize-1] - certifyingQC = batch[batchSize-1].Header.QuorumCertificate() + batch = append(batch, bc.batchChild) } - // caution: in the case `len(batch) == 1`, the `certifiedBatch` might be empty now (else-case) - // If there exists a parent for the batch's first block, then this is parent is certified - // by the batch. Hence, we prepend certifiedBatch by the parent. - if bc.batchParent != nil { - s := make([]*flow.Block, 0, 1+len(certifiedBatch)) - s = append(s, bc.batchParent) - certifiedBatch = append(s, certifiedBatch...) + certifiedBatch = make([]flow.CertifiedBlock, 0, len(batch)-1) + for i, proposal := range batch[:len(batch)-1] { + child := batch[i+1].Block + if !child.ContainsParentQC() { + return nil, fmt.Errorf("could not retrieve ParentQC from block (id=%x)", child.ID()) + } + certifiedBlock, err := flow.NewCertifiedBlock(proposal, child.ParentQC()) + if err != nil { + return nil, fmt.Errorf("could not construct certified block: %w", err) + } + certifiedBatch = append(certifiedBatch, certifiedBlock) } + // caution: in the case `len(batch) == 1`, the `certifiedBatch` might be empty now (if there was no batchParent or batchChild) // report equivocations for _, pair := range bc.equivocatingBlocks { - c.onEquivocation(pair[0], pair[1]) + c.notifier.OnDoubleProposeDetected(model.BlockFromFlow(pair[0].Block.ToHeader()), model.BlockFromFlow(pair[1].Block.ToHeader())) } if len(certifiedBatch) < 1 { - return nil, nil, nil + return nil, nil } - return certifiedBatch, certifyingQC, nil + return certifiedBatch, nil } // PruneUpToView sets the lowest view that we are accepting blocks for. Any blocks @@ -235,10 +241,10 @@ func (c *Cache) removeByView(view uint64, blocks BlocksByID) { for blockID, block := range blocks { c.backend.Remove(blockID) - siblings := c.byParent[block.Header.ParentID] + siblings := c.byParent[block.Block.ParentID] delete(siblings, blockID) if len(siblings) == 0 { - delete(c.byParent, block.Header.ParentID) + delete(c.byParent, block.Block.ParentID) } } @@ -260,13 +266,13 @@ func (c *Cache) removeByView(view uint64, blocks BlocksByID) { // - requires pre-computed blockIDs in the same order as fullBlocks // // Any errors are symptoms of internal state corruption. -func (c *Cache) unsafeAtomicAdd(blockIDs []flow.Identifier, fullBlocks []*flow.Block) (bc batchContext) { +func (c *Cache) unsafeAtomicAdd(blockIDs []flow.Identifier, fullBlocks []*flow.Proposal) (bc batchContext) { c.lock.Lock() defer c.lock.Unlock() // check whether we have the parent of first block already in our cache: - if parent, ok := c.backend.ByID(fullBlocks[0].Header.ParentID); ok { - bc.batchParent = parent.(*flow.Block) + if parent, ok := c.backend.Get(fullBlocks[0].Block.ParentID); ok { + bc.batchParent = parent } // check whether we have a child of last block already in our cache: @@ -286,7 +292,7 @@ func (c *Cache) unsafeAtomicAdd(blockIDs []flow.Identifier, fullBlocks []*flow.B for i, block := range fullBlocks { equivocation, cached := c.cache(blockIDs[i], block) if equivocation != nil { - bc.equivocatingBlocks = append(bc.equivocatingBlocks, [2]*flow.Block{equivocation, block}) + bc.equivocatingBlocks = append(bc.equivocatingBlocks, [2]*flow.Proposal{equivocation, block}) } if cached { storedBlocks++ @@ -301,8 +307,8 @@ func (c *Cache) unsafeAtomicAdd(blockIDs []flow.Identifier, fullBlocks []*flow.B // equivocation. The first return value contains the already-cached equivocating block or `nil` otherwise. // Repeated calls with the same block are no-ops. // CAUTION: not concurrency safe: execute within Cache's lock. -func (c *Cache) cache(blockID flow.Identifier, block *flow.Block) (equivocation *flow.Block, stored bool) { - cachedBlocksAtView, haveCachedBlocksAtView := c.byView[block.Header.View] +func (c *Cache) cache(blockID flow.Identifier, block *flow.Proposal) (equivocation *flow.Proposal, stored bool) { + cachedBlocksAtView, haveCachedBlocksAtView := c.byView[block.Block.View] // Check whether there is a block with the same view already in the cache. // During happy-path operations `cachedBlocksAtView` contains usually zero blocks or exactly one block, which // is our input `block` (duplicate). Larger sets of blocks can only be caused by slashable byzantine actions. @@ -327,15 +333,15 @@ func (c *Cache) cache(blockID flow.Identifier, block *flow.Block) (equivocation // populate `byView` index if !haveCachedBlocksAtView { cachedBlocksAtView = make(BlocksByID) - c.byView[block.Header.View] = cachedBlocksAtView + c.byView[block.Block.View] = cachedBlocksAtView } cachedBlocksAtView[blockID] = block // populate `byParent` index - siblings, ok := c.byParent[block.Header.ParentID] + siblings, ok := c.byParent[block.Block.ParentID] if !ok { siblings = make(BlocksByID) - c.byParent[block.Header.ParentID] = siblings + c.byParent[block.Block.ParentID] = siblings } siblings[blockID] = block @@ -346,15 +352,15 @@ func (c *Cache) cache(blockID flow.Identifier, block *flow.Block) (equivocation // is the parent block of `batch[k+1]`. Returns a slice with IDs of the blocks in the same order // as batch. Returns `ErrDisconnectedBatch` if blocks are not a continuous sequence. // Pure function, hence concurrency safe. -func enforceSequentialBlocks(batch []*flow.Block) ([]flow.Identifier, error) { +func enforceSequentialBlocks(batch []*flow.Proposal) ([]flow.Identifier, error) { blockIDs := make([]flow.Identifier, 0, len(batch)) - parentID := batch[0].ID() + parentID := batch[0].Block.ID() blockIDs = append(blockIDs, parentID) for _, b := range batch[1:] { - if b.Header.ParentID != parentID { + if b.Block.ParentID != parentID { return nil, ErrDisconnectedBatch } - parentID = b.ID() + parentID = b.Block.ID() blockIDs = append(blockIDs, parentID) } return blockIDs, nil @@ -370,10 +376,10 @@ func enforceSequentialBlocks(batch []*flow.Block) ([]flow.Identifier, error) { // - For this method, we do _not_ assume any specific ordering of the blocks. // - We drop all blocks at the _beginning_ that we anyway would not want to cache. // - The returned slice of blocks could still contain blocks with views below the cutoff. -func (c *Cache) trimLeadingBlocksBelowPruningThreshold(batch []*flow.Block) []*flow.Block { +func (c *Cache) trimLeadingBlocksBelowPruningThreshold(batch []*flow.Proposal) []*flow.Proposal { lowestView := c.lowestView.Value() for i, block := range batch { - if block.Header.View >= lowestView { + if block.Block.View >= lowestView { return batch[i:] } } diff --git a/engine/common/follower/cache/cache_test.go b/engine/common/follower/cache/cache_test.go index a8babf61bef..fa96d4ec9e4 100644 --- a/engine/common/follower/cache/cache_test.go +++ b/engine/common/follower/cache/cache_test.go @@ -11,7 +11,8 @@ import ( "go.uber.org/atomic" "golang.org/x/exp/slices" - "github.com/onflow/flow-go/engine/common/follower/cache/mock" + "github.com/onflow/flow-go/consensus/hotstuff/mocks" + "github.com/onflow/flow-go/consensus/hotstuff/model" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/module/metrics" "github.com/onflow/flow-go/utils/unittest" @@ -27,49 +28,55 @@ const defaultHeroCacheLimit = 1000 type CacheSuite struct { suite.Suite - onEquivocation *mock.OnEquivocation - cache *Cache + consumer *mocks.ProposalViolationConsumer + cache *Cache } func (s *CacheSuite) SetupTest() { collector := metrics.NewNoopCollector() - s.onEquivocation = mock.NewOnEquivocation(s.T()) - s.cache = NewCache(unittest.Logger(), defaultHeroCacheLimit, collector, s.onEquivocation.Execute) + s.consumer = mocks.NewProposalViolationConsumer(s.T()) + s.cache = NewCache(unittest.Logger(), defaultHeroCacheLimit, collector, s.consumer) } // TestPeek tests if previously added blocks can be queried by block ID. func (s *CacheSuite) TestPeek() { - blocks := unittest.ChainFixtureFrom(10, unittest.BlockHeaderFixture()) - _, _, err := s.cache.AddBlocks(blocks) + blocks := unittest.ProposalChainFixtureFrom(10, unittest.BlockHeaderFixture()) + _, err := s.cache.AddBlocks(blocks) require.NoError(s.T(), err) - for _, block := range blocks { - actual := s.cache.Peek(block.ID()) + for _, proposal := range blocks { + actual := s.cache.Peek(proposal.Block.ID()) require.NotNil(s.T(), actual) - require.Equal(s.T(), actual.ID(), block.ID()) + require.Equal(s.T(), actual.Block.ID(), proposal.Block.ID()) } } // TestBlocksEquivocation tests that cache tracks blocks equivocation when adding blocks that have the same view // but different block ID. Equivocation is a symptom of byzantine actions and needs to be detected and addressed. func (s *CacheSuite) TestBlocksEquivocation() { - blocks := unittest.ChainFixtureFrom(10, unittest.BlockHeaderFixture()) - _, _, err := s.cache.AddBlocks(blocks) + blocks := unittest.ProposalChainFixtureFrom(10, unittest.BlockHeaderFixture()) + _, err := s.cache.AddBlocks(blocks) require.NoError(s.T(), err) // adding same blocks again shouldn't result in any equivocation events - _, _, err = s.cache.AddBlocks(blocks) + _, err = s.cache.AddBlocks(blocks) require.NoError(s.T(), err) equivocatedBlocks, _, _ := unittest.ChainFixture(len(blocks) - 1) + equivocatedProposals := make([]*flow.Proposal, 0, len(equivocatedBlocks)-1) + // we will skip genesis block as it will be the same for i := 1; i < len(equivocatedBlocks); i++ { block := equivocatedBlocks[i] // update view to be the same as already submitted batch to trigger equivocation - block.Header.View = blocks[i].Header.View - // update parentID so blocks are still connected - block.Header.ParentID = equivocatedBlocks[i-1].ID() - s.onEquivocation.On("Execute", blocks[i], block).Once() + block.View = blocks[i].Block.View + // update parentID and parentView so blocks are still connected + block.ParentID = equivocatedBlocks[i-1].ID() + block.ParentView = equivocatedBlocks[i-1].View + s.consumer.On("OnDoubleProposeDetected", + model.BlockFromFlow(blocks[i].Block.ToHeader()), model.BlockFromFlow(block.ToHeader())).Return().Once() + + equivocatedProposals = append(equivocatedProposals, unittest.ProposalFromBlock(block)) } - _, _, err = s.cache.AddBlocks(equivocatedBlocks) + _, err = s.cache.AddBlocks(equivocatedProposals) require.NoError(s.T(), err) } @@ -77,23 +84,23 @@ func (s *CacheSuite) TestBlocksEquivocation() { // results in error. func (s *CacheSuite) TestBlocksAreNotConnected() { s.Run("blocks-not-sequential", func() { - blocks := unittest.ChainFixtureFrom(10, unittest.BlockHeaderFixture()) + blocks := unittest.ProposalChainFixtureFrom(10, unittest.BlockHeaderFixture()) // shuffling blocks will break the order between them rendering batch as not sequential rand.Shuffle(len(blocks), func(i, j int) { blocks[i], blocks[j] = blocks[j], blocks[i] }) - _, _, err := s.cache.AddBlocks(blocks) + _, err := s.cache.AddBlocks(blocks) require.ErrorIs(s.T(), err, ErrDisconnectedBatch) }) s.Run("blocks-with-gaps", func() { - blocks := unittest.ChainFixtureFrom(10, unittest.BlockHeaderFixture()) + blocks := unittest.ProposalChainFixtureFrom(10, unittest.BlockHeaderFixture()) - // altering payload hash will break ParentID in next block rendering batch as not sequential - blocks[len(blocks)/2].Header.PayloadHash = unittest.IdentifierFixture() + // altering Height will break ParentID in next block, rendering batch as not sequential + blocks[len(blocks)/2].Block.Height += 1 - _, _, err := s.cache.AddBlocks(blocks) + _, err := s.cache.AddBlocks(blocks) require.ErrorIs(s.T(), err, ErrDisconnectedBatch) }) } @@ -103,101 +110,109 @@ func (s *CacheSuite) TestBlocksAreNotConnected() { // We expect that A will get certified after adding B. func (s *CacheSuite) TestChildCertifiesParent() { block := unittest.BlockFixture() - certifiedBatch, certifyingQC, err := s.cache.AddBlocks([]*flow.Block{&block}) + proposal := unittest.ProposalFromBlock(block) + certifiedBatch, err := s.cache.AddBlocks([]*flow.Proposal{proposal}) require.NoError(s.T(), err) require.Empty(s.T(), certifiedBatch) - require.Nil(s.T(), certifyingQC) - child := unittest.BlockWithParentFixture(block.Header) - certifiedBatch, certifyingQC, err = s.cache.AddBlocks([]*flow.Block{child}) + child := unittest.BlockWithParentFixture(block.ToHeader()) + certifiedBatch, err = s.cache.AddBlocks([]*flow.Proposal{unittest.ProposalFromBlock(child)}) require.NoError(s.T(), err) require.Len(s.T(), certifiedBatch, 1) - require.NotNil(s.T(), certifyingQC) - require.Equal(s.T(), block.ID(), certifyingQC.BlockID) - require.Equal(s.T(), certifiedBatch[0], &block) + require.Equal(s.T(), block.ID(), certifiedBatch[0].CertifyingQC.BlockID) + require.Equal(s.T(), certifiedBatch[0].Proposal, proposal) } // TestChildBeforeParent tests a scenario: A <- B[QC_A]. // First we add B and then A, in two different batches. // We expect that A will get certified after adding A. func (s *CacheSuite) TestChildBeforeParent() { - blocks := unittest.ChainFixtureFrom(2, unittest.BlockHeaderFixture()) - _, _, err := s.cache.AddBlocks([]*flow.Block{blocks[1]}) + blocks := unittest.ProposalChainFixtureFrom(2, unittest.BlockHeaderFixture()) + _, err := s.cache.AddBlocks(blocks[1:2]) require.NoError(s.T(), err) - certifiedBatch, certifyingQC, err := s.cache.AddBlocks([]*flow.Block{blocks[0]}) + certifiedBatch, err := s.cache.AddBlocks(blocks[0:1]) require.NoError(s.T(), err) require.Len(s.T(), certifiedBatch, 1) - require.NotNil(s.T(), certifyingQC) - require.Equal(s.T(), blocks[0].ID(), certifyingQC.BlockID) - require.Equal(s.T(), certifiedBatch[0], blocks[0]) + require.Equal(s.T(), blocks[0].Block.ID(), certifiedBatch[0].CertifyingQC.BlockID) + require.Equal(s.T(), blocks[0], certifiedBatch[0].Proposal) } // TestBlockInTheMiddle tests a scenario: A <- B[QC_A] <- C[QC_B]. // We add blocks one by one: C, A, B, we expect that after adding B, we will be able to // certify [A, B] with QC_B as certifying QC. func (s *CacheSuite) TestBlockInTheMiddle() { - blocks := unittest.ChainFixtureFrom(3, unittest.BlockHeaderFixture()) + blocks := unittest.ProposalChainFixtureFrom(3, unittest.BlockHeaderFixture()) // add C - certifiedBlocks, certifiedQC, err := s.cache.AddBlocks(blocks[2:]) + certifiedBlocks, err := s.cache.AddBlocks(blocks[2:]) require.NoError(s.T(), err) require.Empty(s.T(), certifiedBlocks) - require.Nil(s.T(), certifiedQC) // add A - certifiedBlocks, certifiedQC, err = s.cache.AddBlocks(blocks[:1]) + certifiedBlocks, err = s.cache.AddBlocks(blocks[:1]) require.NoError(s.T(), err) require.Empty(s.T(), certifiedBlocks) - require.Nil(s.T(), certifiedQC) // add B - certifiedBlocks, certifiedQC, err = s.cache.AddBlocks(blocks[1:2]) + certifiedBlocks, err = s.cache.AddBlocks(blocks[1:2]) require.NoError(s.T(), err) - require.Equal(s.T(), blocks[:2], certifiedBlocks) - require.Equal(s.T(), blocks[2].Header.QuorumCertificate(), certifiedQC) + require.Len(s.T(), certifiedBlocks, 2) + require.Equal(s.T(), blocks[0], certifiedBlocks[0].Proposal) + require.Equal(s.T(), blocks[len(blocks)-2], certifiedBlocks[len(certifiedBlocks)-1].Proposal) + require.Equal(s.T(), blocks[2].Block.ParentQC(), certifiedBlocks[1].CertifyingQC) } // TestAddBatch tests a scenario: B1 <- ... <- BN added in one batch. // We expect that all blocks except the last one will be certified. // Certifying QC will be taken from last block. func (s *CacheSuite) TestAddBatch() { - blocks := unittest.ChainFixtureFrom(10, unittest.BlockHeaderFixture()) - certifiedBatch, certifyingQC, err := s.cache.AddBlocks(blocks) + blocks := unittest.ProposalChainFixtureFrom(10, unittest.BlockHeaderFixture()) + certifiedBatch, err := s.cache.AddBlocks(blocks) require.NoError(s.T(), err) - require.Equal(s.T(), blocks[:len(blocks)-1], certifiedBatch) - require.Equal(s.T(), blocks[len(blocks)-1].Header.QuorumCertificate(), certifyingQC) + require.Len(s.T(), certifiedBatch, 9, "there should be %d - 1 certified blocks", len(blocks)) + for i := 0; i < len(certifiedBatch)-1; i++ { + certifiedBlock := certifiedBatch[i] + require.Equal(s.T(), blocks[i], certifiedBlock.Proposal) + require.Equal(s.T(), blocks[i+1].Block.ParentQC(), certifiedBlock.CertifyingQC) + require.Equal(s.T(), certifiedBlock.Proposal.Block.ID(), certifiedBlock.CertifyingQC.BlockID) + require.Equal(s.T(), certifiedBlock.Proposal.Block.View, certifiedBlock.CertifyingQC.View) + } } // TestDuplicatedBatch checks that processing redundant inputs rejects batches where all blocks // already reside in the cache. Batches that have at least one new block should be accepted. func (s *CacheSuite) TestDuplicatedBatch() { - blocks := unittest.ChainFixtureFrom(10, unittest.BlockHeaderFixture()) + blocks := unittest.ProposalChainFixtureFrom(10, unittest.BlockHeaderFixture()) - certifiedBatch, certifyingQC, err := s.cache.AddBlocks(blocks[1:]) + certifiedBatch, err := s.cache.AddBlocks(blocks[1:]) require.NoError(s.T(), err) - require.Equal(s.T(), blocks[1:len(blocks)-1], certifiedBatch) - require.Equal(s.T(), blocks[len(blocks)-1].Header.QuorumCertificate(), certifyingQC) + require.Len(s.T(), certifiedBatch, len(blocks)-2) + require.Equal(s.T(), blocks[1], certifiedBatch[0].Proposal) + require.Equal(s.T(), blocks[len(blocks)-2], certifiedBatch[len(certifiedBatch)-1].Proposal) + require.Equal(s.T(), blocks[len(blocks)-1].Block.ParentQC(), certifiedBatch[len(certifiedBatch)-1].CertifyingQC) // add same batch again, this has to be rejected as redundant input - certifiedBatch, certifyingQC, err = s.cache.AddBlocks(blocks[1:]) + certifiedBatch, err = s.cache.AddBlocks(blocks[1:]) require.NoError(s.T(), err) require.Empty(s.T(), certifiedBatch) - require.Nil(s.T(), certifyingQC) - // add batch with one extra leading block, this has to accepted even though 9 out of 10 blocks + // add batch with one extra leading block, this should be accepted even though 9 out of 10 blocks // were already processed - certifiedBatch, certifyingQC, err = s.cache.AddBlocks(blocks) + certifiedBatch, err = s.cache.AddBlocks(blocks) require.NoError(s.T(), err) - require.Equal(s.T(), blocks[:len(blocks)-1], certifiedBatch) - require.Equal(s.T(), blocks[len(blocks)-1].Header.QuorumCertificate(), certifyingQC) + require.Len(s.T(), certifiedBatch, len(blocks)-1) + require.Equal(s.T(), blocks[0], certifiedBatch[0].Proposal) + require.Equal(s.T(), blocks[len(blocks)-2], certifiedBatch[len(certifiedBatch)-1].Proposal) + require.Equal(s.T(), blocks[len(blocks)-1].Block.ParentQC(), certifiedBatch[len(certifiedBatch)-1].CertifyingQC) } // TestPruneUpToView tests that blocks lower than pruned height will be properly filtered out from incoming batch. func (s *CacheSuite) TestPruneUpToView() { - blocks := unittest.ChainFixtureFrom(3, unittest.BlockHeaderFixture()) - s.cache.PruneUpToView(blocks[1].Header.View) - certifiedBatch, certifyingQC, err := s.cache.AddBlocks(blocks) + blocks := unittest.ProposalChainFixtureFrom(3, unittest.BlockHeaderFixture()) + s.cache.PruneUpToView(blocks[1].Block.View) + certifiedBatch, err := s.cache.AddBlocks(blocks) require.NoError(s.T(), err) - require.Equal(s.T(), blocks[1:len(blocks)-1], certifiedBatch) - require.Equal(s.T(), blocks[len(blocks)-1].Header.QuorumCertificate(), certifyingQC) + require.Equal(s.T(), blocks[1], certifiedBatch[0].Proposal) + require.Equal(s.T(), blocks[len(blocks)-2], certifiedBatch[len(certifiedBatch)-1].Proposal) + require.Equal(s.T(), blocks[len(blocks)-1].Block.ParentQC(), certifiedBatch[len(certifiedBatch)-1].CertifyingQC) } // TestConcurrentAdd simulates multiple workers adding batches of blocks out of order. @@ -214,18 +229,18 @@ func (s *CacheSuite) TestConcurrentAdd() { blocksPerBatch := 10 blocksPerWorker := blocksPerBatch * batchesPerWorker // ChainFixture generates N+1 blocks since it adds a root block - blocks := unittest.ChainFixtureFrom(workers*blocksPerWorker, unittest.BlockHeaderFixture()) + blocks := unittest.ProposalChainFixtureFrom(workers*blocksPerWorker, unittest.BlockHeaderFixture()) var wg sync.WaitGroup wg.Add(workers) var certifiedBlocksLock sync.Mutex - var allCertifiedBlocks []*flow.Block + var allCertifiedBlocks []flow.CertifiedBlock for i := 0; i < workers; i++ { - go func(blocks []*flow.Block) { + go func(blocks []*flow.Proposal) { defer wg.Done() for batch := 0; batch < batchesPerWorker; batch++ { - certifiedBlocks, _, err := s.cache.AddBlocks(blocks[batch*blocksPerBatch : (batch+1)*blocksPerBatch]) + certifiedBlocks, err := s.cache.AddBlocks(blocks[batch*blocksPerBatch : (batch+1)*blocksPerBatch]) require.NoError(s.T(), err) certifiedBlocksLock.Lock() allCertifiedBlocks = append(allCertifiedBlocks, certifiedBlocks...) @@ -237,17 +252,19 @@ func (s *CacheSuite) TestConcurrentAdd() { unittest.RequireReturnsBefore(s.T(), wg.Wait, time.Millisecond*500, "should submit blocks before timeout") require.Len(s.T(), allCertifiedBlocks, len(blocks)-1) - slices.SortFunc(allCertifiedBlocks, func(lhs *flow.Block, rhs *flow.Block) bool { - return lhs.Header.Height < rhs.Header.Height + slices.SortFunc(allCertifiedBlocks, func(lhs flow.CertifiedBlock, rhs flow.CertifiedBlock) int { + return int(lhs.Proposal.Block.Height) - int(rhs.Proposal.Block.Height) }) - require.Equal(s.T(), blocks[:len(blocks)-1], allCertifiedBlocks) + for i, block := range blocks[:len(blocks)-1] { + require.Equal(s.T(), block, allCertifiedBlocks[i].Proposal) + } } // TestSecondaryIndexCleanup tests if ejected entities are correctly cleaned up from secondary index func (s *CacheSuite) TestSecondaryIndexCleanup() { // create blocks more than limit - blocks := unittest.ChainFixtureFrom(2*defaultHeroCacheLimit, unittest.BlockHeaderFixture()) - _, _, err := s.cache.AddBlocks(blocks) + blocks := unittest.ProposalChainFixtureFrom(2*defaultHeroCacheLimit, unittest.BlockHeaderFixture()) + _, err := s.cache.AddBlocks(blocks) require.NoError(s.T(), err) require.Len(s.T(), s.cache.byView, defaultHeroCacheLimit) require.Len(s.T(), s.cache.byParent, defaultHeroCacheLimit) @@ -263,19 +280,22 @@ func (s *CacheSuite) TestSecondaryIndexCleanup() { // We should be able to certify A since B and C are in cache, any QC will work. func (s *CacheSuite) TestMultipleChildrenForSameParent() { A := unittest.BlockFixture() - B := unittest.BlockWithParentFixture(A.Header) - C := unittest.BlockWithParentFixture(A.Header) - C.Header.View = B.Header.View + 1 // make sure views are different - - _, _, err := s.cache.AddBlocks([]*flow.Block{B}) + B := unittest.BlockWithParentFixture(A.ToHeader()) + C := unittest.BlockWithParentFixture(A.ToHeader()) + C.View = B.View + 1 // make sure views are different + Ap := unittest.ProposalFromBlock(A) + Bp := unittest.ProposalFromBlock(B) + Cp := unittest.ProposalFromBlock(C) + + _, err := s.cache.AddBlocks([]*flow.Proposal{Bp}) require.NoError(s.T(), err) - _, _, err = s.cache.AddBlocks([]*flow.Block{C}) + _, err = s.cache.AddBlocks([]*flow.Proposal{Cp}) require.NoError(s.T(), err) - certifiedBlocks, certifyingQC, err := s.cache.AddBlocks([]*flow.Block{&A}) + certifiedBlocks, err := s.cache.AddBlocks([]*flow.Proposal{Ap}) require.NoError(s.T(), err) require.Len(s.T(), certifiedBlocks, 1) - require.Equal(s.T(), &A, certifiedBlocks[0]) - require.Equal(s.T(), A.ID(), certifyingQC.BlockID) + require.Equal(s.T(), Ap, certifiedBlocks[0].Proposal) + require.Equal(s.T(), A.ID(), certifiedBlocks[0].CertifyingQC.BlockID) } // TestChildEjectedBeforeAddingParent tests a scenario where we have: @@ -288,23 +308,26 @@ func (s *CacheSuite) TestMultipleChildrenForSameParent() { // Between 2. and 3. B gets ejected, we should be able to certify A since C is still in cache. func (s *CacheSuite) TestChildEjectedBeforeAddingParent() { A := unittest.BlockFixture() - B := unittest.BlockWithParentFixture(A.Header) - C := unittest.BlockWithParentFixture(A.Header) - C.Header.View = B.Header.View + 1 // make sure views are different - - _, _, err := s.cache.AddBlocks([]*flow.Block{B}) + B := unittest.BlockWithParentFixture(A.ToHeader()) + C := unittest.BlockWithParentFixture(A.ToHeader()) + C.View = B.View + 1 // make sure views are different + + Ap := unittest.ProposalFromBlock(A) + Bp := unittest.ProposalFromBlock(B) + Cp := unittest.ProposalFromBlock(C) + _, err := s.cache.AddBlocks([]*flow.Proposal{Bp}) require.NoError(s.T(), err) - _, _, err = s.cache.AddBlocks([]*flow.Block{C}) + _, err = s.cache.AddBlocks([]*flow.Proposal{Cp}) require.NoError(s.T(), err) // eject B s.cache.backend.Remove(B.ID()) - s.cache.handleEjectedEntity(B) + s.cache.handleEjectedBlock(Bp) - certifiedBlocks, certifyingQC, err := s.cache.AddBlocks([]*flow.Block{&A}) + certifiedBlocks, err := s.cache.AddBlocks([]*flow.Proposal{Ap}) require.NoError(s.T(), err) require.Len(s.T(), certifiedBlocks, 1) - require.Equal(s.T(), &A, certifiedBlocks[0]) - require.Equal(s.T(), A.ID(), certifyingQC.BlockID) + require.Equal(s.T(), Ap, certifiedBlocks[0].Proposal) + require.Equal(s.T(), A.ID(), certifiedBlocks[0].CertifyingQC.BlockID) } // TestAddOverCacheLimit tests a scenario where caller feeds blocks to the cache in concurrent way @@ -315,9 +338,9 @@ func (s *CacheSuite) TestAddOverCacheLimit() { // create blocks more than limit workers := 10 blocksPerWorker := 10 - s.cache = NewCache(unittest.Logger(), uint32(blocksPerWorker), metrics.NewNoopCollector(), s.onEquivocation.Execute) + s.cache = NewCache(unittest.Logger(), uint32(blocksPerWorker), metrics.NewNoopCollector(), s.consumer) - blocks := unittest.ChainFixtureFrom(blocksPerWorker*workers, unittest.BlockHeaderFixture()) + blocks := unittest.ProposalChainFixtureFrom(blocksPerWorker*workers, unittest.BlockHeaderFixture()) var uniqueBlocksLock sync.Mutex // AddBlocks can certify same blocks, especially when we push same blocks over and over @@ -332,19 +355,19 @@ func (s *CacheSuite) TestAddOverCacheLimit() { var wg sync.WaitGroup wg.Add(workers) for i := 0; i < workers; i++ { - go func(blocks []*flow.Block) { + go func(blocks []*flow.Proposal) { defer wg.Done() for !done.Load() { // worker submits blocks while condition is not satisfied for _, block := range blocks { // push blocks one by one, pairing with randomness of scheduler // blocks will be delivered chaotically - certifiedBlocks, _, err := s.cache.AddBlocks([]*flow.Block{block}) + certifiedBlocks, err := s.cache.AddBlocks([]*flow.Proposal{block}) require.NoError(s.T(), err) if len(certifiedBlocks) > 0 { uniqueBlocksLock.Lock() for _, block := range certifiedBlocks { - uniqueBlocks[block.ID()] = struct{}{} + uniqueBlocks[block.BlockID()] = struct{}{} } if len(uniqueBlocks) == certifiedGoal { done.Store(true) diff --git a/engine/common/follower/cache/distributor.go b/engine/common/follower/cache/distributor.go index 779e966b9f7..fe8b5966b6a 100644 --- a/engine/common/follower/cache/distributor.go +++ b/engine/common/follower/cache/distributor.go @@ -1,38 +1,31 @@ package cache -import ( - "github.com/onflow/flow-go/model/flow" - herocache "github.com/onflow/flow-go/module/mempool/herocache/backdata" -) - -type OnEntityEjected func(ejectedEntity flow.Entity) +type OnEntityEjected[V any] func(ejectedEntity V) // HeroCacheDistributor implements herocache.Tracer and allows subscribers to receive events // for ejected entries from cache using herocache.Tracer API. // This structure is NOT concurrency safe. -type HeroCacheDistributor struct { - consumers []OnEntityEjected +type HeroCacheDistributor[V any] struct { + consumers []OnEntityEjected[V] } -var _ herocache.Tracer = (*HeroCacheDistributor)(nil) - -func NewDistributor() *HeroCacheDistributor { - return &HeroCacheDistributor{} +func NewDistributor[V any]() *HeroCacheDistributor[V] { + return &HeroCacheDistributor[V]{} } // AddConsumer adds subscriber for entity ejected events. // Is NOT concurrency safe. -func (d *HeroCacheDistributor) AddConsumer(consumer OnEntityEjected) { +func (d *HeroCacheDistributor[V]) AddConsumer(consumer OnEntityEjected[V]) { d.consumers = append(d.consumers, consumer) } -func (d *HeroCacheDistributor) EntityEjectionDueToEmergency(ejectedEntity flow.Entity) { +func (d *HeroCacheDistributor[V]) EntityEjectionDueToEmergency(ejectedEntity V) { for _, consumer := range d.consumers { consumer(ejectedEntity) } } -func (d *HeroCacheDistributor) EntityEjectionDueToFullCapacity(ejectedEntity flow.Entity) { +func (d *HeroCacheDistributor[V]) EntityEjectionDueToFullCapacity(ejectedEntity V) { for _, consumer := range d.consumers { consumer(ejectedEntity) } diff --git a/engine/common/follower/cache/mock/on_entity_ejected.go b/engine/common/follower/cache/mock/on_entity_ejected.go deleted file mode 100644 index b525bf43bea..00000000000 --- a/engine/common/follower/cache/mock/on_entity_ejected.go +++ /dev/null @@ -1,33 +0,0 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. - -package mock - -import ( - flow "github.com/onflow/flow-go/model/flow" - mock "github.com/stretchr/testify/mock" -) - -// OnEntityEjected is an autogenerated mock type for the OnEntityEjected type -type OnEntityEjected struct { - mock.Mock -} - -// Execute provides a mock function with given fields: ejectedEntity -func (_m *OnEntityEjected) Execute(ejectedEntity flow.Entity) { - _m.Called(ejectedEntity) -} - -type mockConstructorTestingTNewOnEntityEjected interface { - mock.TestingT - Cleanup(func()) -} - -// NewOnEntityEjected creates a new instance of OnEntityEjected. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewOnEntityEjected(t mockConstructorTestingTNewOnEntityEjected) *OnEntityEjected { - mock := &OnEntityEjected{} - mock.Mock.Test(t) - - t.Cleanup(func() { mock.AssertExpectations(t) }) - - return mock -} diff --git a/engine/common/follower/cache/mock/on_equivocation.go b/engine/common/follower/cache/mock/on_equivocation.go deleted file mode 100644 index 7f0119be8f5..00000000000 --- a/engine/common/follower/cache/mock/on_equivocation.go +++ /dev/null @@ -1,33 +0,0 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. - -package mock - -import ( - flow "github.com/onflow/flow-go/model/flow" - mock "github.com/stretchr/testify/mock" -) - -// OnEquivocation is an autogenerated mock type for the OnEquivocation type -type OnEquivocation struct { - mock.Mock -} - -// Execute provides a mock function with given fields: first, other -func (_m *OnEquivocation) Execute(first *flow.Block, other *flow.Block) { - _m.Called(first, other) -} - -type mockConstructorTestingTNewOnEquivocation interface { - mock.TestingT - Cleanup(func()) -} - -// NewOnEquivocation creates a new instance of OnEquivocation. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewOnEquivocation(t mockConstructorTestingTNewOnEquivocation) *OnEquivocation { - mock := &OnEquivocation{} - mock.Mock.Test(t) - - t.Cleanup(func() { mock.AssertExpectations(t) }) - - return mock -} diff --git a/engine/common/follower/compliance.go b/engine/common/follower/compliance.go index bb417aa6e5b..6d01be49af3 100644 --- a/engine/common/follower/compliance.go +++ b/engine/common/follower/compliance.go @@ -73,7 +73,7 @@ type complianceCore interface { module.Startable module.ReadyDoneAware - // OnBlockRange consumes an *untrusted* range of connected blocks( part of a fork). The originID parameter + // OnBlockRange consumes an *untrusted* range of connected blocks (part of a fork). The originID parameter // identifies the node that sent the batch of blocks. The input `connectedRange` must be sequentially ordered // blocks that form a chain, i.e. connectedRange[i] is the parent of connectedRange[i+1]. Submitting a // disconnected batch results in an `ErrDisconnectedBatch` error and the batch is dropped (no-op). @@ -81,7 +81,7 @@ type complianceCore interface { // Caution: this method is allowed to block. // Expected errors during normal operations: // - cache.ErrDisconnectedBatch - OnBlockRange(originID flow.Identifier, connectedRange []*flow.Block) error + OnBlockRange(originID flow.Identifier, connectedRange []*flow.Proposal) error // OnFinalizedBlock prunes all blocks below the finalized view from the compliance layer's Cache // and PendingTree. diff --git a/engine/common/follower/compliance_core.go b/engine/common/follower/compliance_core.go index efcc0c82d67..269e5e722c0 100644 --- a/engine/common/follower/compliance_core.go +++ b/engine/common/follower/compliance_core.go @@ -66,10 +66,6 @@ func NewComplianceCore(log zerolog.Logger, sync module.BlockRequester, tracer module.Tracer, ) (*ComplianceCore, error) { - onEquivocation := func(block, otherBlock *flow.Block) { - followerConsumer.OnDoubleProposeDetected(model.BlockFromFlow(block.Header), model.BlockFromFlow(otherBlock.Header)) - } - finalizedBlock, err := state.Final().Head() if err != nil { return nil, fmt.Errorf("could not query finalized block: %w", err) @@ -80,7 +76,7 @@ func NewComplianceCore(log zerolog.Logger, mempoolMetrics: mempoolMetrics, state: state, proposalViolationNotifier: followerConsumer, - pendingCache: cache.NewCache(log, defaultPendingBlocksCacheCapacity, heroCacheCollector, onEquivocation), + pendingCache: cache.NewCache(log, defaultPendingBlocksCacheCapacity, heroCacheCollector, followerConsumer), pendingTree: pending_tree.NewPendingTree(finalizedBlock), follower: follower, validator: validator, @@ -108,26 +104,27 @@ func NewComplianceCore(log zerolog.Logger, // Caution: method might block if internally too many certified blocks are queued in the channel `certifiedRangesChan`. // Expected errors during normal operations: // - cache.ErrDisconnectedBatch -func (c *ComplianceCore) OnBlockRange(originID flow.Identifier, batch []*flow.Block) error { +func (c *ComplianceCore) OnBlockRange(originID flow.Identifier, batch []*flow.Proposal) error { if len(batch) < 1 { return nil } - firstBlock := batch[0].Header - lastBlock := batch[len(batch)-1].Header - hotstuffProposal := model.ProposalFromFlow(lastBlock) + firstBlock := batch[0].Block + lastBlock := batch[len(batch)-1] + lastHeader := lastBlock.Block + hotstuffProposal := model.SignedProposalFromBlock(lastBlock) log := c.log.With(). Hex("origin_id", originID[:]). - Str("chain_id", lastBlock.ChainID.String()). + Str("chain_id", lastHeader.ChainID.String()). Uint64("first_block_height", firstBlock.Height). Uint64("first_block_view", firstBlock.View). - Uint64("last_block_height", lastBlock.Height). - Uint64("last_block_view", lastBlock.View). + Uint64("last_block_height", lastHeader.Height). + Uint64("last_block_view", lastHeader.View). Hex("last_block_id", hotstuffProposal.Block.BlockID[:]). Int("range_length", len(batch)). Logger() - log.Info().Msg("processing block range") + log.Debug().Msg("processing block range") if c.pendingCache.Peek(hotstuffProposal.Block.BlockID) == nil { log.Debug().Msg("block not found in cache, performing validation") @@ -144,7 +141,10 @@ func (c *ComplianceCore) OnBlockRange(originID flow.Identifier, batch []*flow.Bl err := c.validator.ValidateProposal(hotstuffProposal) if err != nil { if invalidBlockError, ok := model.AsInvalidProposalError(err); ok { - c.proposalViolationNotifier.OnInvalidBlockDetected(*invalidBlockError) + c.proposalViolationNotifier.OnInvalidBlockDetected(flow.Slashable[model.InvalidProposalError]{ + OriginID: originID, + Message: *invalidBlockError, + }) return nil } if errors.Is(err, model.ErrViewForUnknownEpoch) { @@ -156,20 +156,24 @@ func (c *ComplianceCore) OnBlockRange(originID flow.Identifier, batch []*flow.Bl // service event. // -> in this case we can disregard the block // Note: we could eliminate this edge case by dropping future blocks, iff their _view_ - // is strictly larger than `V + EpochCommitSafetyThreshold`, where `V` denotes + // is strictly larger than `V + FinalizationSafetyThreshold`, where `V` denotes // the latest finalized block known to this node. - // 3. No blocks have been finalized for the last `EpochCommitSafetyThreshold` views. This breaks - // a critical liveness assumption - see EpochCommitSafetyThreshold in protocol.Params for details. + // 3. No blocks have been finalized for the last `FinalizationSafetyThreshold` views. This breaks + // a critical liveness assumption - see FinalizationSafetyThreshold in protocol.Params for details. // -> In this case, it is ok for the protocol to halt. Consequently, we can just disregard // the block, which will probably lead to this node eventually halting. - log.Err(err).Msg("unable to validate proposal with view from unknown epoch") + log.Err(err).Msg( + "Unable to validate proposal with view from unknown epoch. While there is nothing wrong with the node, " + + "this could be a symptom of (i) the node being severely behind, (ii) there is a byzantine proposer in " + + "the network, or (iii) there was no finalization progress for hundreds of views. This should be " + + "investigated to confirm the cause is the benign scenario (i).") return nil } return fmt.Errorf("unexpected error validating proposal: %w", err) } } - certifiedBatch, certifyingQC, err := c.pendingCache.AddBlocks(batch) + certifiedBatch, err := c.pendingCache.AddBlocks(batch) if err != nil { return fmt.Errorf("could not add a range of pending blocks: %w", err) // ErrDisconnectedBatch or exception } @@ -178,15 +182,11 @@ func (c *ComplianceCore) OnBlockRange(originID flow.Identifier, batch []*flow.Bl if len(certifiedBatch) < 1 { return nil } - certifiedRange, err := rangeToCertifiedBlocks(certifiedBatch, certifyingQC) - if err != nil { - return fmt.Errorf("converting the certified batch to list of certified blocks failed: %w", err) - } // in case we have already stopped our worker, we use a select statement to avoid // blocking since there is no active consumer for this channel select { - case c.certifiedRangesChan <- certifiedRange: + case c.certifiedRangesChan <- certifiedBatch: case <-c.ComponentManager.ShutdownSignal(): } return nil @@ -201,6 +201,14 @@ func (c *ComplianceCore) processCoreSeqEvents(ctx irrecoverable.SignalerContext, doneSignal := ctx.Done() for { + // Check if shutdown was requested before attempting to process queued events. + // This helps to prioritize timely reaction in case of shutdown. + select { + case <-doneSignal: + return + default: + } + select { case <-doneSignal: return @@ -260,16 +268,16 @@ func (c *ComplianceCore) processCertifiedBlocks(ctx context.Context, blocks Cert // Step 2 & 3: extend protocol state with connected certified blocks and forward them to consensus follower for _, certifiedBlock := range connectedBlocks { - s, _ := c.tracer.StartBlockSpan(ctx, certifiedBlock.ID(), trace.FollowerExtendProtocolState) - err = c.state.ExtendCertified(ctx, certifiedBlock.Block, certifiedBlock.CertifyingQC) + s, _ := c.tracer.StartBlockSpan(ctx, certifiedBlock.BlockID(), trace.FollowerExtendProtocolState) + err = c.state.ExtendCertified(ctx, &certifiedBlock) s.End() if err != nil { return fmt.Errorf("could not extend protocol state with certified block: %w", err) } - b, err := model.NewCertifiedBlock(model.BlockFromFlow(certifiedBlock.Block.Header), certifiedBlock.CertifyingQC) + b, err := model.NewCertifiedBlock(model.BlockFromFlow(certifiedBlock.Proposal.Block.ToHeader()), certifiedBlock.CertifyingQC) if err != nil { - return fmt.Errorf("failed to convert certified block %v to HotStuff type: %w", certifiedBlock.Block.ID(), err) + return fmt.Errorf("failed to convert certified block %v to HotStuff type: %w", certifiedBlock.Proposal.Block.ID(), err) } c.follower.AddCertifiedBlock(&b) // submit the model to follower for async processing } @@ -299,27 +307,3 @@ func (c *ComplianceCore) processFinalizedBlock(ctx context.Context, finalized *f } return nil } - -// rangeToCertifiedBlocks transform batch of connected blocks and a QC that certifies last block to a range of -// certified and connected blocks. -// Pure function (side-effect free). No errors expected during normal operations. -func rangeToCertifiedBlocks(certifiedRange []*flow.Block, certifyingQC *flow.QuorumCertificate) (CertifiedBlocks, error) { - certifiedBlocks := make(CertifiedBlocks, 0, len(certifiedRange)) - lastIndex := len(certifiedRange) - 1 - for i, block := range certifiedRange { - var qc *flow.QuorumCertificate - if i < lastIndex { - qc = certifiedRange[i+1].Header.QuorumCertificate() - } else { - qc = certifyingQC - } - - // bundle block and its certifying QC to `CertifiedBlock`: - certBlock, err := flow.NewCertifiedBlock(block, qc) - if err != nil { - return nil, fmt.Errorf("constructing certified root block failed: %w", err) - } - certifiedBlocks = append(certifiedBlocks, certBlock) - } - return certifiedBlocks, nil -} diff --git a/engine/common/follower/compliance_core_test.go b/engine/common/follower/compliance_core_test.go index fc9bdc5170e..a0aad9e24ea 100644 --- a/engine/common/follower/compliance_core_test.go +++ b/engine/common/follower/compliance_core_test.go @@ -95,28 +95,32 @@ func (s *CoreSuite) TearDownTest() { // If block is already in cache it should be no-op. func (s *CoreSuite) TestProcessingSingleBlock() { block := unittest.BlockWithParentFixture(s.finalizedBlock) + proposal := unittest.ProposalFromBlock(block) // incoming block has to be validated - s.validator.On("ValidateProposal", model.ProposalFromFlow(block.Header)).Return(nil).Once() + s.validator.On("ValidateProposal", model.SignedProposalFromBlock(proposal)).Return(nil).Once() - err := s.core.OnBlockRange(s.originID, []*flow.Block{block}) + err := s.core.OnBlockRange(s.originID, []*flow.Proposal{proposal}) require.NoError(s.T(), err) require.NotNil(s.T(), s.core.pendingCache.Peek(block.ID())) - err = s.core.OnBlockRange(s.originID, []*flow.Block{block}) + err = s.core.OnBlockRange(s.originID, []*flow.Proposal{proposal}) require.NoError(s.T(), err) } // TestAddFinalizedBlock tests that adding block below finalized height results in processing it, but since cache was pruned // to finalized view, it must be rejected by it. func (s *CoreSuite) TestAddFinalizedBlock() { - block := unittest.BlockFixture() - block.Header.View = s.finalizedBlock.View - 1 // block is below finalized view + block := unittest.BlockFixture( + unittest.Block.WithView(s.finalizedBlock.View-1), // block is below finalized view + unittest.Block.WithParentView(s.finalizedBlock.View-2), // parent view must be below view + ) + proposal := unittest.ProposalFromBlock(block) // incoming block has to be validated - s.validator.On("ValidateProposal", model.ProposalFromFlow(block.Header)).Return(nil).Once() + s.validator.On("ValidateProposal", model.SignedProposalFromBlock(proposal)).Return(nil).Once() - err := s.core.OnBlockRange(s.originID, []*flow.Block{&block}) + err := s.core.OnBlockRange(s.originID, []*flow.Proposal{proposal}) require.NoError(s.T(), err) require.Nil(s.T(), s.core.pendingCache.Peek(block.ID())) } @@ -130,19 +134,23 @@ func (s *CoreSuite) TestAddFinalizedBlock() { // // Finally, the certified blocks should be forwarded to the HotStuff follower. func (s *CoreSuite) TestProcessingRangeHappyPath() { - blocks := unittest.ChainFixtureFrom(10, s.finalizedBlock) + proposals := unittest.ProposalChainFixtureFrom(10, s.finalizedBlock) var wg sync.WaitGroup - wg.Add(len(blocks) - 1) - for i := 1; i < len(blocks); i++ { - s.state.On("ExtendCertified", mock.Anything, blocks[i-1], blocks[i].Header.QuorumCertificate()).Return(nil).Once() - s.follower.On("AddCertifiedBlock", blockWithID(blocks[i-1].ID())).Run(func(args mock.Arguments) { + wg.Add(len(proposals) - 1) + for i := 1; i < len(proposals); i++ { + expectCertified := &flow.CertifiedBlock{ + Proposal: proposals[i-1], + CertifyingQC: proposals[i].Block.ParentQC(), + } + s.state.On("ExtendCertified", mock.Anything, expectCertified).Return(nil).Once() + s.follower.On("AddCertifiedBlock", blockWithID(proposals[i-1].Block.ID())).Run(func(args mock.Arguments) { wg.Done() }).Return().Once() } - s.validator.On("ValidateProposal", model.ProposalFromFlow(blocks[len(blocks)-1].Header)).Return(nil).Once() + s.validator.On("ValidateProposal", model.SignedProposalFromBlock(proposals[len(proposals)-1])).Return(nil).Once() - err := s.core.OnBlockRange(s.originID, blocks) + err := s.core.OnBlockRange(s.originID, proposals) require.NoError(s.T(), err) unittest.RequireReturnsBefore(s.T(), wg.Wait, 500*time.Millisecond, "expect all blocks to be processed before timeout") @@ -151,29 +159,32 @@ func (s *CoreSuite) TestProcessingRangeHappyPath() { // TestProcessingNotOrderedBatch tests that submitting a batch which is not properly ordered(meaning the batch is not connected) // has to result in error. func (s *CoreSuite) TestProcessingNotOrderedBatch() { - blocks := unittest.ChainFixtureFrom(10, s.finalizedBlock) - blocks[2], blocks[3] = blocks[3], blocks[2] + proposals := unittest.ProposalChainFixtureFrom(10, s.finalizedBlock) + proposals[2], proposals[3] = proposals[3], proposals[2] - s.validator.On("ValidateProposal", model.ProposalFromFlow(blocks[len(blocks)-1].Header)).Return(nil).Once() + s.validator.On("ValidateProposal", model.SignedProposalFromBlock(proposals[len(proposals)-1])).Return(nil).Once() - err := s.core.OnBlockRange(s.originID, blocks) + err := s.core.OnBlockRange(s.originID, proposals) require.ErrorIs(s.T(), err, cache.ErrDisconnectedBatch) } // TestProcessingInvalidBlock tests that processing a batch which ends with invalid block discards the whole batch func (s *CoreSuite) TestProcessingInvalidBlock() { - blocks := unittest.ChainFixtureFrom(10, s.finalizedBlock) + proposals := unittest.ProposalChainFixtureFrom(10, s.finalizedBlock) - invalidProposal := model.ProposalFromFlow(blocks[len(blocks)-1].Header) + invalidProposal := model.SignedProposalFromBlock(proposals[len(proposals)-1]) sentinelError := model.NewInvalidProposalErrorf(invalidProposal, "") s.validator.On("ValidateProposal", invalidProposal).Return(sentinelError).Once() - s.followerConsumer.On("OnInvalidBlockDetected", sentinelError).Return().Once() - err := s.core.OnBlockRange(s.originID, blocks) + s.followerConsumer.On("OnInvalidBlockDetected", flow.Slashable[model.InvalidProposalError]{ + OriginID: s.originID, + Message: sentinelError.(model.InvalidProposalError), + }).Return().Once() + err := s.core.OnBlockRange(s.originID, proposals) require.NoError(s.T(), err, "sentinel error has to be handled internally") exception := errors.New("validate-proposal-exception") s.validator.On("ValidateProposal", invalidProposal).Return(exception).Once() - err = s.core.OnBlockRange(s.originID, blocks) + err = s.core.OnBlockRange(s.originID, proposals) require.ErrorIs(s.T(), err, exception, "exception has to be propagated") } @@ -185,8 +196,8 @@ func (s *CoreSuite) TestProcessingBlocksAfterShutdown() { // at this point workers are stopped and processing valid range of connected blocks won't be delivered // to the protocol state - blocks := unittest.ChainFixtureFrom(10, s.finalizedBlock) - s.validator.On("ValidateProposal", model.ProposalFromFlow(blocks[len(blocks)-1].Header)).Return(nil).Once() + blocks := unittest.ProposalChainFixtureFrom(10, s.finalizedBlock) + s.validator.On("ValidateProposal", model.SignedProposalFromBlock(blocks[len(blocks)-1])).Return(nil).Once() err := s.core.OnBlockRange(s.originID, blocks) require.NoError(s.T(), err) @@ -195,7 +206,7 @@ func (s *CoreSuite) TestProcessingBlocksAfterShutdown() { // TestProcessingConnectedRangesOutOfOrder tests that processing range of connected blocks [B1 <- ... <- BN+1] our of order // results in extending [B1 <- ... <- BN] in correct order. func (s *CoreSuite) TestProcessingConnectedRangesOutOfOrder() { - blocks := unittest.ChainFixtureFrom(10, s.finalizedBlock) + blocks := unittest.ProposalChainFixtureFrom(10, s.finalizedBlock) midpoint := len(blocks) / 2 firstHalf, secondHalf := blocks[:midpoint], blocks[midpoint:] @@ -206,21 +217,21 @@ func (s *CoreSuite) TestProcessingConnectedRangesOutOfOrder() { var wg sync.WaitGroup wg.Add(len(blocks) - 1) for _, block := range blocks[:len(blocks)-1] { - s.follower.On("AddCertifiedBlock", blockWithID(block.ID())).Return().Run(func(args mock.Arguments) { + s.follower.On("AddCertifiedBlock", blockWithID(block.Block.ID())).Return().Run(func(args mock.Arguments) { wg.Done() }).Once() } lastSubmittedBlockID := flow.ZeroID - s.state.On("ExtendCertified", mock.Anything, mock.Anything, mock.Anything).Run(func(args mock.Arguments) { - block := args.Get(1).(*flow.Block) + s.state.On("ExtendCertified", mock.Anything, mock.Anything).Run(func(args mock.Arguments) { + certified := args.Get(1).(*flow.CertifiedBlock) if lastSubmittedBlockID != flow.ZeroID { - if block.Header.ParentID != lastSubmittedBlockID { + if certified.Proposal.Block.ParentID != lastSubmittedBlockID { s.Failf("blocks not sequential", - "blocks submitted to protocol state are not sequential at height %d", block.Header.Height) + "blocks submitted to protocol state are not sequential at height %d", certified.Proposal.Block.Height) } } - lastSubmittedBlockID = block.ID() + lastSubmittedBlockID = certified.Proposal.Block.ID() }).Return(nil).Times(len(blocks) - 1) s.validator.On("ValidateProposal", mock.Anything).Return(nil).Once() @@ -233,15 +244,17 @@ func (s *CoreSuite) TestProcessingConnectedRangesOutOfOrder() { func (s *CoreSuite) TestDetectingProposalEquivocation() { block := unittest.BlockWithParentFixture(s.finalizedBlock) otherBlock := unittest.BlockWithParentFixture(s.finalizedBlock) - otherBlock.Header.View = block.Header.View + otherBlock.View = block.View + proposal := unittest.ProposalFromBlock(block) + otherProposal := unittest.ProposalFromBlock(otherBlock) s.validator.On("ValidateProposal", mock.Anything).Return(nil).Times(2) s.followerConsumer.On("OnDoubleProposeDetected", mock.Anything, mock.Anything).Return().Once() - err := s.core.OnBlockRange(s.originID, []*flow.Block{block}) + err := s.core.OnBlockRange(s.originID, []*flow.Proposal{proposal}) require.NoError(s.T(), err) - err = s.core.OnBlockRange(s.originID, []*flow.Block{otherBlock}) + err = s.core.OnBlockRange(s.originID, []*flow.Proposal{otherProposal}) require.NoError(s.T(), err) } @@ -261,8 +274,8 @@ func (s *CoreSuite) TestConcurrentAdd() { batchesPerWorker := 10 blocksPerBatch := 10 blocksPerWorker := blocksPerBatch * batchesPerWorker - blocks := unittest.ChainFixtureFrom(workers*blocksPerWorker, s.finalizedBlock) - targetSubmittedBlockID := blocks[len(blocks)-2].ID() + blocks := unittest.ProposalChainFixtureFrom(workers*blocksPerWorker, s.finalizedBlock) + targetSubmittedBlockID := blocks[len(blocks)-2].Block.ID() require.Lessf(s.T(), len(blocks), defaultPendingBlocksCacheCapacity, "this test works under assumption that we operate under cache upper limit") s.validator.On("ValidateProposal", mock.Anything).Return(nil) // any proposal is valid @@ -271,27 +284,27 @@ func (s *CoreSuite) TestConcurrentAdd() { s.follower.On("AddCertifiedBlock", mock.Anything).Return(nil).Run(func(args mock.Arguments) { // ensure that proposals are submitted in-order block := args.Get(0).(*model.CertifiedBlock) - if block.ID() == targetSubmittedBlockID { + if block.BlockID() == targetSubmittedBlockID { close(done) } }).Return().Times(len(blocks) - 1) // all proposals have to be submitted lastSubmittedBlockID := flow.ZeroID - s.state.On("ExtendCertified", mock.Anything, mock.Anything, mock.Anything).Run(func(args mock.Arguments) { - block := args.Get(1).(*flow.Block) + s.state.On("ExtendCertified", mock.Anything, mock.Anything).Run(func(args mock.Arguments) { + certified := args.Get(1).(*flow.CertifiedBlock) if lastSubmittedBlockID != flow.ZeroID { - if block.Header.ParentID != lastSubmittedBlockID { + if certified.Proposal.Block.ParentID != lastSubmittedBlockID { s.Failf("blocks not sequential", - "blocks submitted to protocol state are not sequential at height %d", block.Header.Height) + "blocks submitted to protocol state are not sequential at height %d", certified.Proposal.Block.Height) } } - lastSubmittedBlockID = block.ID() + lastSubmittedBlockID = certified.Proposal.Block.ID() }).Return(nil).Times(len(blocks) - 1) var wg sync.WaitGroup wg.Add(workers) for i := 0; i < workers; i++ { - go func(blocks []*flow.Block) { + go func(blocks []*flow.Proposal) { defer wg.Done() for batch := 0; batch < batchesPerWorker; batch++ { err := s.core.OnBlockRange(s.originID, blocks[batch*blocksPerBatch:(batch+1)*blocksPerBatch]) @@ -306,5 +319,5 @@ func (s *CoreSuite) TestConcurrentAdd() { // blockWithID returns a testify `argumentMatcher` that only accepts blocks with the given ID func blockWithID(expectedBlockID flow.Identifier) interface{} { - return mock.MatchedBy(func(block *model.CertifiedBlock) bool { return expectedBlockID == block.ID() }) + return mock.MatchedBy(func(block *model.CertifiedBlock) bool { return expectedBlockID == block.BlockID() }) } diff --git a/engine/common/follower/compliance_engine.go b/engine/common/follower/compliance_engine.go index a0b28e34d17..2eddae19c0c 100644 --- a/engine/common/follower/compliance_engine.go +++ b/engine/common/follower/compliance_engine.go @@ -11,7 +11,6 @@ import ( "github.com/onflow/flow-go/engine/common/fifoqueue" "github.com/onflow/flow-go/engine/consensus" "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/model/messages" "github.com/onflow/flow-go/module" "github.com/onflow/flow-go/module/compliance" "github.com/onflow/flow-go/module/component" @@ -81,7 +80,7 @@ type ComplianceEngine struct { blocksAvailableNotifier engine.Notifier // notifies that new blocks are ready to be processed finalizedBlockTracker *tracker.NewestBlockTracker // tracks the latest finalization block finalizedBlockNotifier engine.Notifier // notifies when the latest finalized block changes - pendingConnectedBlocksChan chan flow.Slashable[[]*flow.Block] + pendingConnectedBlocksChan chan flow.Slashable[[]*flow.Proposal] core complianceCore // performs actual processing of incoming messages. } @@ -92,12 +91,13 @@ var _ consensus.Compliance = (*ComplianceEngine)(nil) // interface `complianceCore` (this package) for detailed documentation of the algorithm. func NewComplianceLayer( log zerolog.Logger, - net network.Network, + net network.EngineRegistry, me module.Local, engMetrics module.EngineMetrics, headers storage.Headers, finalized *flow.Header, core complianceCore, + config compliance.Config, opts ...EngineOption, ) (*ComplianceEngine, error) { // FIFO queue for inbound block proposals @@ -115,18 +115,26 @@ func NewComplianceLayer( log: log.With().Str("engine", "follower").Logger(), me: me, engMetrics: engMetrics, - config: compliance.DefaultConfig(), + config: config, channel: channels.ReceiveBlocks, pendingProposals: pendingBlocks, syncedBlocks: syncedBlocks, blocksAvailableNotifier: engine.NewNotifier(), - pendingConnectedBlocksChan: make(chan flow.Slashable[[]*flow.Block], defaultPendingConnectedBlocksChanCapacity), + pendingConnectedBlocksChan: make(chan flow.Slashable[[]*flow.Proposal], defaultPendingConnectedBlocksChanCapacity), finalizedBlockTracker: tracker.NewNewestBlockTracker(), finalizedBlockNotifier: engine.NewNotifier(), headers: headers, core: core, } - e.finalizedBlockTracker.Track(model.BlockFromFlow(finalized)) + + var block *model.Block + if finalized.ContainsParentQC() { + block = model.BlockFromFlow(finalized) + } else { + block = model.GenesisBlockFromFlow(finalized) + } + + e.finalizedBlockTracker.Track(block) for _, apply := range opts { apply(e) @@ -165,10 +173,10 @@ func NewComplianceLayer( return e, nil } -// OnBlockProposal queues *untrusted* proposals for further processing and notifies the Engine's +// OnBlockProposal queues structurally validated proposals for further processing and notifies the Engine's // internal workers. This method is intended for fresh proposals received directly from leaders. // It can ingest synced blocks as well, but is less performant compared to method `OnSyncedBlocks`. -func (e *ComplianceEngine) OnBlockProposal(proposal flow.Slashable[*messages.BlockProposal]) { +func (e *ComplianceEngine) OnBlockProposal(proposal flow.Slashable[*flow.Proposal]) { e.engMetrics.MessageReceived(metrics.EngineFollower, metrics.MessageBlockProposal) // queue proposal if e.pendingProposals.Push(proposal) { @@ -176,11 +184,11 @@ func (e *ComplianceEngine) OnBlockProposal(proposal flow.Slashable[*messages.Blo } } -// OnSyncedBlocks is an optimized consumer for *untrusted* synced blocks. It is specifically +// OnSyncedBlocks is an optimized consumer for structurally validated synced blocks. It is specifically // efficient for batches of continuously connected blocks (honest nodes supply finalized blocks // in suitable sequences where possible). Nevertheless, the method tolerates blocks in arbitrary // order (less efficient), making it robust against byzantine nodes. -func (e *ComplianceEngine) OnSyncedBlocks(blocks flow.Slashable[[]*messages.BlockProposal]) { +func (e *ComplianceEngine) OnSyncedBlocks(blocks flow.Slashable[[]*flow.Proposal]) { e.engMetrics.MessageReceived(metrics.EngineFollower, metrics.MessageSyncedBlocks) // The synchronization engine feeds the follower with batches of blocks. The field `Slashable.OriginID` // states which node forwarded the batch to us. Each block contains its proposer and signature. @@ -192,7 +200,7 @@ func (e *ComplianceEngine) OnSyncedBlocks(blocks flow.Slashable[[]*messages.Bloc // OnFinalizedBlock informs the compliance layer about finalization of a new block. It does not block // and asynchronously executes the internal pruning logic. We accept inputs out of order, and only act -// on inputs with strictly monotonously increasing views. +// on inputs with strictly monotonicly increasing views. // // Implements the `OnFinalizedBlock` callback from the `hotstuff.FinalizationConsumer` // CAUTION: the input to this callback is treated as trusted; precautions should be taken that messages @@ -207,10 +215,18 @@ func (e *ComplianceEngine) OnFinalizedBlock(block *model.Block) { // a blocking manner. It returns the potential processing error when done. // This method is intended to be used as a callback by the networking layer, // notifying us about fresh proposals directly from the consensus leaders. +// +// TODO(BFT, #7620): This function should not return an error. The networking layer's responsibility is fulfilled +// once it delivers a message to an engine. It does not possess the context required to handle +// errors that may arise during an engine's processing of the message, as error handling for +// message processing falls outside the domain of the networking layer. +// +// Some of the current error returns signal Byzantine behavior, such as forged or malformed +// messages. These cases must be logged and routed to a dedicated violation reporting consumer. func (e *ComplianceEngine) Process(channel channels.Channel, originID flow.Identifier, message interface{}) error { switch msg := message.(type) { - case *messages.BlockProposal: - e.OnBlockProposal(flow.Slashable[*messages.BlockProposal]{ + case *flow.Proposal: + e.OnBlockProposal(flow.Slashable[*flow.Proposal]{ OriginID: originID, Message: msg, }) @@ -266,16 +282,16 @@ func (e *ComplianceEngine) processQueuedBlocks(doneSignal <-chan struct{}) error // Priority 1: ingest fresh proposals msg, ok := e.pendingProposals.Pop() if ok { - blockMsg := msg.(flow.Slashable[*messages.BlockProposal]) - block := blockMsg.Message.Block.ToInternal() + proposal := msg.(flow.Slashable[*flow.Proposal]) + proposalMsg := proposal.Message log := e.log.With(). - Hex("origin_id", blockMsg.OriginID[:]). - Str("chain_id", block.Header.ChainID.String()). - Uint64("view", block.Header.View). - Uint64("height", block.Header.Height). + Hex("origin_id", proposal.OriginID[:]). + Str("chain_id", proposalMsg.Block.ChainID.String()). + Uint64("view", proposalMsg.Block.View). + Uint64("height", proposalMsg.Block.Height). Logger() latestFinalizedView := e.finalizedBlockTracker.NewestBlock().View - e.submitConnectedBatch(log, latestFinalizedView, blockMsg.OriginID, []*flow.Block{block}) + e.submitConnectedBatch(log, latestFinalizedView, proposal.OriginID, []*flow.Proposal{proposalMsg}) e.engMetrics.MessageHandled(metrics.EngineFollower, metrics.MessageBlockProposal) continue } @@ -288,17 +304,15 @@ func (e *ComplianceEngine) processQueuedBlocks(doneSignal <-chan struct{}) error return nil } - batch := msg.(flow.Slashable[[]*messages.BlockProposal]) + batch := msg.(flow.Slashable[[]*flow.Proposal]) if len(batch.Message) < 1 { continue } - blocks := make([]*flow.Block, 0, len(batch.Message)) - for _, block := range batch.Message { - blocks = append(blocks, block.Block.ToInternal()) - } + proposals := make([]*flow.Proposal, 0, len(batch.Message)) + proposals = append(proposals, batch.Message...) - firstBlock := blocks[0].Header - lastBlock := blocks[len(blocks)-1].Header + firstBlock := proposals[0].Block + lastBlock := proposals[len(proposals)-1].Block log := e.log.With(). Hex("origin_id", batch.OriginID[:]). Str("chain_id", lastBlock.ChainID.String()). @@ -306,48 +320,50 @@ func (e *ComplianceEngine) processQueuedBlocks(doneSignal <-chan struct{}) error Uint64("first_block_view", firstBlock.View). Uint64("last_block_height", lastBlock.Height). Uint64("last_block_view", lastBlock.View). - Int("range_length", len(blocks)). + Int("range_length", len(proposals)). Logger() // extract sequences of connected blocks and schedule them for further processing // we assume the sender has already ordered blocks into connected ranges if possible latestFinalizedView := e.finalizedBlockTracker.NewestBlock().View - parentID := blocks[0].ID() + parentID := proposals[0].Block.ID() indexOfLastConnected := 0 - for i, block := range blocks { - if block.Header.ParentID != parentID { - e.submitConnectedBatch(log, latestFinalizedView, batch.OriginID, blocks[indexOfLastConnected:i]) + for i, block := range proposals { + if block.Block.ParentID != parentID { + e.submitConnectedBatch(log, latestFinalizedView, batch.OriginID, proposals[indexOfLastConnected:i]) indexOfLastConnected = i } - parentID = block.Header.ID() + parentID = block.Block.ID() } - e.submitConnectedBatch(log, latestFinalizedView, batch.OriginID, blocks[indexOfLastConnected:]) + e.submitConnectedBatch(log, latestFinalizedView, batch.OriginID, proposals[indexOfLastConnected:]) e.engMetrics.MessageHandled(metrics.EngineFollower, metrics.MessageSyncedBlocks) } } // submitConnectedBatch checks if batch is still pending and submits it via channel for further processing by worker goroutines. -func (e *ComplianceEngine) submitConnectedBatch(log zerolog.Logger, latestFinalizedView uint64, originID flow.Identifier, blocks []*flow.Block) { +func (e *ComplianceEngine) submitConnectedBatch(log zerolog.Logger, latestFinalizedView uint64, originID flow.Identifier, blocks []*flow.Proposal) { if len(blocks) < 1 { return } // if latest block of batch is already finalized we can drop such input. - lastBlock := blocks[len(blocks)-1].Header + firstBlock := blocks[0].Block + lastBlock := blocks[len(blocks)-1].Block if lastBlock.View < latestFinalizedView { - log.Debug().Msgf("dropping range [%d, %d] below finalized view %d", blocks[0].Header.View, lastBlock.View, latestFinalizedView) + log.Debug().Msgf("dropping range [%d, %d] below finalized view %d", firstBlock.View, lastBlock.View, latestFinalizedView) return } - if lastBlock.View > latestFinalizedView+e.config.SkipNewProposalsThreshold { + skipNewProposalsThreshold := e.config.GetSkipNewProposalsThreshold() + if lastBlock.View > latestFinalizedView+skipNewProposalsThreshold { log.Debug(). - Uint64("skip_new_proposals_threshold", e.config.SkipNewProposalsThreshold). + Uint64("skip_new_proposals_threshold", skipNewProposalsThreshold). Msgf("dropping range [%d, %d] too far ahead of locally finalized view %d", - blocks[0].Header.View, lastBlock.View, latestFinalizedView) + firstBlock.View, lastBlock.View, latestFinalizedView) return } - log.Debug().Msgf("submitting sub-range with views [%d, %d] for further processing", blocks[0].Header.View, lastBlock.View) + log.Debug().Msgf("submitting sub-range with views [%d, %d] for further processing", firstBlock.View, lastBlock.View) select { - case e.pendingConnectedBlocksChan <- flow.Slashable[[]*flow.Block]{ + case e.pendingConnectedBlocksChan <- flow.Slashable[[]*flow.Proposal]{ OriginID: originID, Message: blocks, }: diff --git a/engine/common/follower/compliance_engine_test.go b/engine/common/follower/compliance_engine_test.go index 4abceba662a..28c2490b810 100644 --- a/engine/common/follower/compliance_engine_test.go +++ b/engine/common/follower/compliance_engine_test.go @@ -15,11 +15,12 @@ import ( followermock "github.com/onflow/flow-go/engine/common/follower/mock" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/model/messages" + "github.com/onflow/flow-go/module/compliance" "github.com/onflow/flow-go/module/irrecoverable" "github.com/onflow/flow-go/module/metrics" module "github.com/onflow/flow-go/module/mock" "github.com/onflow/flow-go/network/channels" - "github.com/onflow/flow-go/network/mocknetwork" + mocknetwork "github.com/onflow/flow-go/network/mock" storage "github.com/onflow/flow-go/storage/mock" "github.com/onflow/flow-go/utils/unittest" ) @@ -33,7 +34,7 @@ type EngineSuite struct { suite.Suite finalized *flow.Header - net *mocknetwork.Network + net *mocknetwork.EngineRegistry con *mocknetwork.Conduit me *module.Local headers *storage.Headers @@ -47,7 +48,7 @@ type EngineSuite struct { func (s *EngineSuite) SetupTest() { - s.net = mocknetwork.NewNetwork(s.T()) + s.net = mocknetwork.NewEngineRegistry(s.T()) s.con = mocknetwork.NewConduit(s.T()) s.me = module.NewLocal(s.T()) s.headers = storage.NewHeaders(s.T()) @@ -70,8 +71,9 @@ func (s *EngineSuite) SetupTest() { metrics, s.headers, s.finalized, - s.core) - require.Nil(s.T(), err) + s.core, + compliance.DefaultConfig()) + require.NoError(s.T(), err) s.engine = eng @@ -94,49 +96,67 @@ func (s *EngineSuite) TearDownTest() { // TestProcessSyncedBlock checks that processing single synced block results in call to FollowerCore. func (s *EngineSuite) TestProcessSyncedBlock() { block := unittest.BlockWithParentFixture(s.finalized) + proposal := unittest.ProposalFromBlock(block) originID := unittest.IdentifierFixture() done := make(chan struct{}) - s.core.On("OnBlockRange", originID, []*flow.Block{block}).Return(nil).Run(func(_ mock.Arguments) { + s.core.On("OnBlockRange", originID, []*flow.Proposal{proposal}).Return(nil).Run(func(_ mock.Arguments) { close(done) }).Once() - s.engine.OnSyncedBlocks(flow.Slashable[[]*messages.BlockProposal]{ + s.engine.OnSyncedBlocks(flow.Slashable[[]*flow.Proposal]{ OriginID: originID, - Message: flowBlocksToBlockProposals(block), + Message: []*flow.Proposal{proposal}, }) unittest.AssertClosesBefore(s.T(), done, time.Second) } -// TestProcessGossipedBlock check that processing single gossiped block results in call to FollowerCore. -func (s *EngineSuite) TestProcessGossipedBlock() { +// TestProcessGossipedValidBlock check that processing single structurally valid gossiped block results in call to FollowerCore. +func (s *EngineSuite) TestProcessGossipedValidBlock() { block := unittest.BlockWithParentFixture(s.finalized) + proposal := unittest.ProposalFromBlock(block) originID := unittest.IdentifierFixture() done := make(chan struct{}) - s.core.On("OnBlockRange", originID, []*flow.Block{block}).Return(nil).Run(func(_ mock.Arguments) { + s.core.On("OnBlockRange", originID, []*flow.Proposal{proposal}).Return(nil).Run(func(_ mock.Arguments) { close(done) }).Once() - err := s.engine.Process(channels.ReceiveBlocks, originID, messages.NewBlockProposal(block)) + err := s.engine.Process(channels.ReceiveBlocks, originID, proposal) require.NoError(s.T(), err) unittest.AssertClosesBefore(s.T(), done, time.Second) } +// TestProcessGossipedInvalidBlock check that processing single structurally invalid gossiped block results in call to FollowerCore. +func (s *EngineSuite) TestProcessGossipedInvalidBlock() { + block := unittest.BlockWithParentFixture(s.finalized) + proposal := unittest.ProposalFromBlock(block) + proposal.ProposerSigData = nil + + originID := unittest.IdentifierFixture() + + err := s.engine.Process(channels.ReceiveBlocks, originID, (*messages.Proposal)(proposal)) + require.NoError(s.T(), err) + + // OnBlockRange should NOT be called for invalid proposal + s.core.AssertNotCalled(s.T(), "OnBlockRange", mock.Anything, mock.Anything) +} + // TestProcessBlockFromComplianceInterface check that processing single gossiped block using compliance interface results in call to FollowerCore. func (s *EngineSuite) TestProcessBlockFromComplianceInterface() { block := unittest.BlockWithParentFixture(s.finalized) + proposal := unittest.ProposalFromBlock(block) originID := unittest.IdentifierFixture() done := make(chan struct{}) - s.core.On("OnBlockRange", originID, []*flow.Block{block}).Return(nil).Run(func(_ mock.Arguments) { + s.core.On("OnBlockRange", originID, []*flow.Proposal{proposal}).Return(nil).Run(func(_ mock.Arguments) { close(done) }).Once() - s.engine.OnBlockProposal(flow.Slashable[*messages.BlockProposal]{ + s.engine.OnBlockProposal(flow.Slashable[*flow.Proposal]{ OriginID: originID, - Message: messages.NewBlockProposal(block), + Message: proposal, }) unittest.AssertClosesBefore(s.T(), done, time.Second) @@ -146,7 +166,7 @@ func (s *EngineSuite) TestProcessBlockFromComplianceInterface() { // results in submitting all of them. func (s *EngineSuite) TestProcessBatchOfDisconnectedBlocks() { originID := unittest.IdentifierFixture() - blocks := unittest.ChainFixtureFrom(10, s.finalized) + blocks := unittest.ProposalChainFixtureFrom(10, s.finalized) // drop second block blocks = append(blocks[0:1], blocks[2:]...) // drop second from end block @@ -164,9 +184,9 @@ func (s *EngineSuite) TestProcessBatchOfDisconnectedBlocks() { wg.Done() }).Return(nil).Once() - s.engine.OnSyncedBlocks(flow.Slashable[[]*messages.BlockProposal]{ + s.engine.OnSyncedBlocks(flow.Slashable[[]*flow.Proposal]{ OriginID: originID, - Message: flowBlocksToBlockProposals(blocks...), + Message: blocks, }) unittest.RequireReturnsBefore(s.T(), wg.Wait, time.Millisecond*500, "expect to return before timeout") } @@ -189,7 +209,9 @@ func (s *EngineSuite) TestProcessFinalizedBlock() { // check if batch gets filtered out since it's lower than finalized view done = make(chan struct{}) block := unittest.BlockWithParentFixture(s.finalized) - block.Header.View = newFinalizedBlock.View - 1 // use block view lower than new latest finalized view + block.View = newFinalizedBlock.View - 1 // use block view lower than new latest finalized view + + proposal := unittest.ProposalFromBlock(block) // use metrics mock to track that we have indeed processed the message, and the batch was filtered out since it was // lower than finalized height @@ -200,9 +222,9 @@ func (s *EngineSuite) TestProcessFinalizedBlock() { }).Return().Once() s.engine.engMetrics = metricsMock - s.engine.OnSyncedBlocks(flow.Slashable[[]*messages.BlockProposal]{ + s.engine.OnSyncedBlocks(flow.Slashable[[]*flow.Proposal]{ OriginID: unittest.IdentifierFixture(), - Message: flowBlocksToBlockProposals(block), + Message: []*flow.Proposal{proposal}, }) unittest.RequireCloseBefore(s.T(), done, time.Millisecond*500, "expect to close before timeout") // check if message wasn't buffered in internal channel @@ -213,12 +235,3 @@ func (s *EngineSuite) TestProcessFinalizedBlock() { } } - -// flowBlocksToBlockProposals is a helper function to transform types. -func flowBlocksToBlockProposals(blocks ...*flow.Block) []*messages.BlockProposal { - result := make([]*messages.BlockProposal, 0, len(blocks)) - for _, block := range blocks { - result = append(result, messages.NewBlockProposal(block)) - } - return result -} diff --git a/engine/common/follower/integration_test.go b/engine/common/follower/integration_test.go index e88e2fffd20..2852110d26b 100644 --- a/engine/common/follower/integration_test.go +++ b/engine/common/follower/integration_test.go @@ -6,7 +6,7 @@ import ( "testing" "time" - "github.com/dgraph-io/badger/v2" + "github.com/cockroachdb/pebble/v2" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" "go.uber.org/atomic" @@ -16,19 +16,20 @@ import ( "github.com/onflow/flow-go/consensus/hotstuff/mocks" "github.com/onflow/flow-go/consensus/hotstuff/notifications/pubsub" "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/model/messages" + "github.com/onflow/flow-go/module/compliance" moduleconsensus "github.com/onflow/flow-go/module/finalizer/consensus" "github.com/onflow/flow-go/module/irrecoverable" "github.com/onflow/flow-go/module/metrics" module "github.com/onflow/flow-go/module/mock" "github.com/onflow/flow-go/module/trace" moduleutil "github.com/onflow/flow-go/module/util" - "github.com/onflow/flow-go/network/mocknetwork" + mocknetwork "github.com/onflow/flow-go/network/mock" pbadger "github.com/onflow/flow-go/state/protocol/badger" "github.com/onflow/flow-go/state/protocol/events" "github.com/onflow/flow-go/state/protocol/util" - "github.com/onflow/flow-go/storage/badger/operation" - storageutil "github.com/onflow/flow-go/storage/util" + "github.com/onflow/flow-go/storage" + "github.com/onflow/flow-go/storage/operation/pebbleimpl" + "github.com/onflow/flow-go/storage/store" "github.com/onflow/flow-go/utils/unittest" ) @@ -44,25 +45,28 @@ import ( func TestFollowerHappyPath(t *testing.T) { allIdentities := unittest.CompleteIdentitySet() rootSnapshot := unittest.RootSnapshotFixture(allIdentities) - unittest.RunWithBadgerDB(t, func(db *badger.DB) { + lockManager := storage.NewTestingLockManager() + unittest.RunWithPebbleDB(t, func(pdb *pebble.DB) { metrics := metrics.NewNoopCollector() tracer := trace.NewNoopTracer() log := unittest.Logger() consumer := events.NewNoop() - all := storageutil.StorageLayer(t, db) + all := store.InitAll(metrics, pebbleimpl.ToDB(pdb)) // bootstrap root snapshot state, err := pbadger.Bootstrap( metrics, - db, + pebbleimpl.ToDB(pdb), + lockManager, all.Headers, all.Seals, all.Results, all.Blocks, all.QuorumCertificates, - all.Setups, + all.EpochSetups, all.EpochCommits, - all.Statuses, + all.EpochProtocolStateEntries, + all.ProtocolKVStore, all.VersionBeacons, rootSnapshot, ) @@ -80,20 +84,14 @@ func TestFollowerHappyPath(t *testing.T) { mockTimer, ) require.NoError(t, err) - finalizer := moduleconsensus.NewFinalizer(db, all.Headers, followerState, tracer) + finalizer := moduleconsensus.NewFinalizer(pebbleimpl.ToDB(pdb).Reader(), all.Headers, followerState, tracer) rootHeader, err := rootSnapshot.Head() require.NoError(t, err) rootQC, err := rootSnapshot.QuorumCertificate() require.NoError(t, err) - - // Hack EECC. - // Since root snapshot is created with 1000 views for first epoch, we will forcefully enter EECC to avoid errors - // related to epoch transitions. - db.NewTransaction(true) - err = db.Update(func(txn *badger.Txn) error { - return operation.SetEpochEmergencyFallbackTriggered(rootHeader.ID())(txn) - }) + rootProtocolState, err := rootSnapshot.ProtocolState() require.NoError(t, err) + rootProtocolStateID := rootProtocolState.ID() consensusConsumer := pubsub.NewFollowerDistributor() // use real consensus modules @@ -105,7 +103,7 @@ func TestFollowerHappyPath(t *testing.T) { validator.On("ValidateProposal", mock.Anything).Return(nil) // initialize the follower loop - followerLoop, err := hotstuff.NewFollowerLoop(unittest.Logger(), forks) + followerLoop, err := hotstuff.NewFollowerLoop(unittest.Logger(), metrics, forks) require.NoError(t, err) syncCore := module.NewBlockRequester(t) @@ -126,20 +124,31 @@ func TestFollowerHappyPath(t *testing.T) { nodeID := unittest.IdentifierFixture() me.On("NodeID").Return(nodeID).Maybe() - net := mocknetwork.NewNetwork(t) + net := mocknetwork.NewEngineRegistry(t) con := mocknetwork.NewConduit(t) net.On("Register", mock.Anything, mock.Anything).Return(con, nil) // use real engine - engine, err := NewComplianceLayer(unittest.Logger(), net, me, metrics, all.Headers, rootHeader, followerCore) + engine, err := NewComplianceLayer( + unittest.Logger(), + net, + me, + metrics, + all.Headers, + rootHeader, + followerCore, + compliance.DefaultConfig(), + ) require.NoError(t, err) // don't forget to subscribe for finalization notifications consensusConsumer.AddOnBlockFinalizedConsumer(engine.OnFinalizedBlock) + // Create an [irrecoverable.SignalerContext] to consume any irrecoverable errors that might be thrown by + // hotstuff or follower engine. This mock will fail the test when `SignalerContext.Throw` is called. + mockCtx, cancel := irrecoverable.NewMockSignalerContextWithCancel(t, context.Background()) // start hotstuff logic and follower engine - ctx, cancel, errs := irrecoverable.WithSignallerAndCancel(context.Background()) - followerLoop.Start(ctx) - engine.Start(ctx) + followerLoop.Start(mockCtx) + engine.Start(mockCtx) unittest.RequireCloseBefore(t, moduleutil.AllReady(engine, followerLoop), time.Second, "engine failed to start") // prepare chain of blocks, we will use a continuous chain assuming it was generated on happy path. @@ -147,18 +156,28 @@ func TestFollowerHappyPath(t *testing.T) { batchesPerWorker := 10 blocksPerBatch := 100 blocksPerWorker := blocksPerBatch * batchesPerWorker - flowBlocks := unittest.ChainFixtureFrom(workers*blocksPerWorker, rootHeader) - require.Greaterf(t, len(flowBlocks), defaultPendingBlocksCacheCapacity, "this test assumes that we operate with more blocks than cache's upper limit") + pendingBlocks := unittest.ProposalChainFixtureFrom(workers*blocksPerWorker, rootHeader) + require.Greaterf(t, len(pendingBlocks), defaultPendingBlocksCacheCapacity, "this test assumes that we operate with more blocks than cache's upper limit") // ensure sequential block views - that way we can easily know which block will be finalized after the test - for i, block := range flowBlocks { - block.Header.View = block.Header.Height + for i, proposal := range pendingBlocks { + proposal.Block.View = proposal.Block.Height + proposal.Block.ParentView = proposal.Block.View - 1 + block, err := flow.NewBlock( + flow.UntrustedBlock{ + HeaderBody: proposal.Block.HeaderBody, + Payload: unittest.PayloadFixture(unittest.WithProtocolStateID(rootProtocolStateID)), + }, + ) + require.NoError(t, err) + + proposal.Block = *block + if i > 0 { - block.Header.ParentView = flowBlocks[i-1].Header.View - block.Header.ParentID = flowBlocks[i-1].Header.ID() + proposal.Block.ParentView = pendingBlocks[i-1].Block.View + proposal.Block.ParentID = pendingBlocks[i-1].Block.ID() } } - pendingBlocks := flowBlocksToBlockProposals(flowBlocks...) // Regarding the block that we expect to be finalized based on 2-chain finalization rule, we consider the last few blocks in `pendingBlocks` // ... <-- X <-- Y <-- Z @@ -168,7 +187,7 @@ func TestFollowerHappyPath(t *testing.T) { // Note: the HotStuff Follower does not see block Z (as there is no QC for X proving its validity). Instead, it sees the certified block // [◄(X) Y] ◄(Y) // where ◄(B) denotes a QC for block B - targetBlockHeight := pendingBlocks[len(pendingBlocks)-3].Block.Header.Height + targetBlockHeight := pendingBlocks[len(pendingBlocks)-3].Block.Height // emulate syncing logic, where we push same blocks over and over. originID := unittest.IdentifierFixture() @@ -176,11 +195,11 @@ func TestFollowerHappyPath(t *testing.T) { var wg sync.WaitGroup wg.Add(workers) for i := 0; i < workers; i++ { - go func(blocks []*messages.BlockProposal) { + go func(blocks []*flow.Proposal) { defer wg.Done() for submittingBlocks.Load() { for batch := 0; batch < batchesPerWorker; batch++ { - engine.OnSyncedBlocks(flow.Slashable[[]*messages.BlockProposal]{ + engine.OnSyncedBlocks(flow.Slashable[[]*flow.Proposal]{ OriginID: originID, Message: blocks[batch*blocksPerBatch : (batch+1)*blocksPerBatch], }) @@ -189,22 +208,33 @@ func TestFollowerHappyPath(t *testing.T) { }(pendingBlocks[i*blocksPerWorker : (i+1)*blocksPerWorker]) } + // Ensure graceful shutdown even if the test fails early (e.g., Eventually times out). + // Otherwise, the test may panic with "pebble: closed" when threads are attempting to still write to the database, while + // the test is unwinding and closing the database. If such panics happen, we don't know what assertation failed and + // just see the panic. Hence, we call `cancel()` and attempt to wait for the engine to stop in all cases. + defer func() { + // stop producers and wait for them to exit + submittingBlocks.Store(false) + unittest.RequireReturnsBefore(t, wg.Wait, time.Second, "expect workers to stop producing") + + // stop engines and wait for graceful shutdown + cancel() + unittest.RequireCloseBefore(t, moduleutil.AllDone(engine, followerLoop), time.Second, "engine failed to stop") + // Note: in case any error occur, the `mockCtx` will fail the test, due to the unexpected call of `Throw` on the mock. + }() + // wait for target block to become finalized, this might take a while. require.Eventually(t, func() bool { final, err := followerState.Final().Head() require.NoError(t, err) - return final.Height == targetBlockHeight - }, time.Minute, time.Second, "expect to process all blocks before timeout") - - // shutdown and cleanup test - submittingBlocks.Store(false) - unittest.RequireReturnsBefore(t, wg.Wait, time.Second, "expect workers to stop producing") - cancel() - unittest.RequireCloseBefore(t, moduleutil.AllDone(engine, followerLoop), time.Second, "engine failed to stop") - select { - case err := <-errs: - require.NoError(t, err) - default: - } + success := final.Height == targetBlockHeight + if !success { + t.Logf("finalized height %d, waiting for %d", final.Height, targetBlockHeight) + } else { + t.Logf("successfully finalized target height %d\n", targetBlockHeight) + } + return success + }, 90*time.Second, time.Second, "expect to process all blocks before timeout") + }) } diff --git a/engine/common/follower/mock/compliance_core.go b/engine/common/follower/mock/compliance_core.go index 05dfdfc19fc..b0ecb00adae 100644 --- a/engine/common/follower/mock/compliance_core.go +++ b/engine/common/follower/mock/compliance_core.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mock @@ -15,10 +15,14 @@ type ComplianceCore struct { mock.Mock } -// Done provides a mock function with given fields: +// Done provides a mock function with no fields func (_m *ComplianceCore) Done() <-chan struct{} { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Done") + } + var r0 <-chan struct{} if rf, ok := ret.Get(0).(func() <-chan struct{}); ok { r0 = rf() @@ -32,11 +36,15 @@ func (_m *ComplianceCore) Done() <-chan struct{} { } // OnBlockRange provides a mock function with given fields: originID, connectedRange -func (_m *ComplianceCore) OnBlockRange(originID flow.Identifier, connectedRange []*flow.Block) error { +func (_m *ComplianceCore) OnBlockRange(originID flow.Identifier, connectedRange []*flow.Proposal) error { ret := _m.Called(originID, connectedRange) + if len(ret) == 0 { + panic("no return value specified for OnBlockRange") + } + var r0 error - if rf, ok := ret.Get(0).(func(flow.Identifier, []*flow.Block) error); ok { + if rf, ok := ret.Get(0).(func(flow.Identifier, []*flow.Proposal) error); ok { r0 = rf(originID, connectedRange) } else { r0 = ret.Error(0) @@ -50,10 +58,14 @@ func (_m *ComplianceCore) OnFinalizedBlock(finalized *flow.Header) { _m.Called(finalized) } -// Ready provides a mock function with given fields: +// Ready provides a mock function with no fields func (_m *ComplianceCore) Ready() <-chan struct{} { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Ready") + } + var r0 <-chan struct{} if rf, ok := ret.Get(0).(func() <-chan struct{}); ok { r0 = rf() @@ -71,13 +83,12 @@ func (_m *ComplianceCore) Start(_a0 irrecoverable.SignalerContext) { _m.Called(_a0) } -type mockConstructorTestingTNewComplianceCore interface { +// NewComplianceCore creates a new instance of ComplianceCore. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewComplianceCore(t interface { mock.TestingT Cleanup(func()) -} - -// NewComplianceCore creates a new instance of ComplianceCore. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewComplianceCore(t mockConstructorTestingTNewComplianceCore) *ComplianceCore { +}) *ComplianceCore { mock := &ComplianceCore{} mock.Mock.Test(t) diff --git a/engine/common/follower/pending_tree/pending_tree.go b/engine/common/follower/pending_tree/pending_tree.go index 5c4b0081d36..5783a604354 100644 --- a/engine/common/follower/pending_tree/pending_tree.go +++ b/engine/common/follower/pending_tree/pending_tree.go @@ -29,7 +29,7 @@ func NewVertex(certifiedBlock flow.CertifiedBlock, connectedToFinalized bool) (* func (v *PendingBlockVertex) VertexID() flow.Identifier { return v.CertifyingQC.BlockID } func (v *PendingBlockVertex) Level() uint64 { return v.CertifyingQC.View } func (v *PendingBlockVertex) Parent() (flow.Identifier, uint64) { - return v.Block.Header.ParentID, v.Block.Header.ParentView + return v.Proposal.Block.ParentID, v.Proposal.Block.ParentView } // PendingTree is a mempool holding certified blocks that eventually might be connected to the finalized state. @@ -102,13 +102,13 @@ func (t *PendingTree) AddBlocks(certifiedBlocks []flow.CertifiedBlock) ([]flow.C if iter.HasNext() { v := iter.NextVertex().(*PendingBlockVertex) - if v.VertexID() == block.ID() { + if v.VertexID() == block.BlockID() { // this vertex is already in tree, skip it continue } else { return nil, model.ByzantineThresholdExceededError{Evidence: fmt.Sprintf( "conflicting QCs at view %d: %v and %v", - block.View(), v.ID(), block.ID(), + block.View(), v.BlockID(), block.BlockID(), )} } } @@ -133,10 +133,10 @@ func (t *PendingTree) AddBlocks(certifiedBlocks []flow.CertifiedBlock) ([]flow.C // connectsToFinalizedBlock checks if candidate block connects to the finalized state. func (t *PendingTree) connectsToFinalizedBlock(block flow.CertifiedBlock) bool { - if block.Block.Header.ParentID == t.lastFinalizedID { + if block.Proposal.Block.ParentID == t.lastFinalizedID { return true } - if parentVertex, found := t.forest.GetVertex(block.Block.Header.ParentID); found { + if parentVertex, found := t.forest.GetVertex(block.Proposal.Block.ParentID); found { return parentVertex.(*PendingBlockVertex).connectedToFinalized } return false diff --git a/engine/common/follower/pending_tree/pending_tree_test.go b/engine/common/follower/pending_tree/pending_tree_test.go index 14f45d23ca5..06f410cffbc 100644 --- a/engine/common/follower/pending_tree/pending_tree_test.go +++ b/engine/common/follower/pending_tree/pending_tree_test.go @@ -4,7 +4,6 @@ import ( "fmt" "math/rand" "testing" - "time" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -28,7 +27,6 @@ type PendingTreeSuite struct { } func (s *PendingTreeSuite) SetupTest() { - rand.Seed(time.Now().UnixNano()) s.finalized = unittest.BlockHeaderFixture() s.pendingTree = NewPendingTree(s.finalized) } @@ -38,7 +36,7 @@ func (s *PendingTreeSuite) SetupTest() { // Having: F ← B1 ← B2 ← B3 // Add [B1, B2, B3], expect to get [B1;QC_B1, B2;QC_B2; B3;QC_B3] func (s *PendingTreeSuite) TestBlocksConnectToFinalized() { - blocks := certifiedBlocksFixture(3, s.finalized) + blocks := certifiedBlocksFixture(s.T(), 3, s.finalized) connectedBlocks, err := s.pendingTree.AddBlocks(blocks) require.NoError(s.T(), err) require.Equal(s.T(), blocks, connectedBlocks) @@ -49,7 +47,7 @@ func (s *PendingTreeSuite) TestBlocksConnectToFinalized() { // Having: F ← B1 ← B2 ← B3 // Add [B2, B3], expect to get [] func (s *PendingTreeSuite) TestBlocksAreNotConnectedToFinalized() { - blocks := certifiedBlocksFixture(3, s.finalized) + blocks := certifiedBlocksFixture(s.T(), 3, s.finalized) connectedBlocks, err := s.pendingTree.AddBlocks(blocks[1:]) require.NoError(s.T(), err) require.Empty(s.T(), connectedBlocks) @@ -61,7 +59,7 @@ func (s *PendingTreeSuite) TestBlocksAreNotConnectedToFinalized() { // Add [B3, B4, B5], expect to get [] // Add [B1, B2], expect to get [B1, B2, B3, B4, B5] func (s *PendingTreeSuite) TestInsertingMissingBlockToFinalized() { - blocks := certifiedBlocksFixture(5, s.finalized) + blocks := certifiedBlocksFixture(s.T(), 5, s.finalized) connectedBlocks, err := s.pendingTree.AddBlocks(blocks[len(blocks)-3:]) require.NoError(s.T(), err) require.Empty(s.T(), connectedBlocks) @@ -83,14 +81,14 @@ func (s *PendingTreeSuite) TestInsertingMissingBlockToFinalized() { // Add [B4, B5, B6, B7], expect to get [] // Add [B1], expect to get [B1, B2, B3, B4, B5, B6, B7] func (s *PendingTreeSuite) TestAllConnectedForksAreCollected() { - longestFork := certifiedBlocksFixture(5, s.finalized) - B2 := unittest.BlockWithParentFixture(longestFork[0].Block.Header) + longestFork := certifiedBlocksFixture(s.T(), 5, s.finalized) + B2 := unittest.BlockWithParentFixture(longestFork[0].Proposal.Block.ToHeader()) // make sure short fork doesn't have conflicting views, so we don't trigger exception - B2.Header.View = longestFork[len(longestFork)-1].Block.Header.View + 1 - B3 := unittest.BlockWithParentFixture(B2.Header) + B2.View = longestFork[len(longestFork)-1].Proposal.Block.View + 1 + B3 := unittest.BlockWithParentFixture(B2.ToHeader()) shortFork := []flow.CertifiedBlock{{ - Block: B2, - CertifyingQC: B3.Header.QuorumCertificate(), + Proposal: unittest.ProposalFromBlock(B2), + CertifyingQC: B3.ParentQC(), }, certifiedBlockFixture(B3)} connectedBlocks, err := s.pendingTree.AddBlocks(shortFork) @@ -108,7 +106,7 @@ func (s *PendingTreeSuite) TestAllConnectedForksAreCollected() { // TestAddingConnectedBlocks tests that adding blocks that were already reported as connected is no-op. func (s *PendingTreeSuite) TestAddingConnectedBlocks() { - blocks := certifiedBlocksFixture(3, s.finalized) + blocks := certifiedBlocksFixture(s.T(), 3, s.finalized) connectedBlocks, err := s.pendingTree.AddBlocks(blocks) require.NoError(s.T(), err) require.Equal(s.T(), blocks, connectedBlocks) @@ -125,7 +123,7 @@ func (s *PendingTreeSuite) TestByzantineThresholdExceeded() { conflictingBlock := unittest.BlockWithParentFixture(s.finalized) // use same view for conflicted blocks, this is not possible unless there is more than // 1/3 byzantine participants - conflictingBlock.Header.View = block.Header.View + conflictingBlock.View = block.View _, err := s.pendingTree.AddBlocks([]flow.CertifiedBlock{certifiedBlockFixture(block)}) require.NoError(s.T(), err) // adding same block should result in no-op @@ -143,7 +141,7 @@ func (s *PendingTreeSuite) TestByzantineThresholdExceeded() { // Randomly shuffle [B, C, D, E] and add it as single batch, expect [] connected blocks. // Insert [A], expect [A, B, C, D, E] connected blocks. func (s *PendingTreeSuite) TestBatchWithSkipsAndInRandomOrder() { - blocks := certifiedBlocksFixture(5, s.finalized) + blocks := certifiedBlocksFixture(s.T(), 5, s.finalized) rand.Shuffle(len(blocks)-1, func(i, j int) { blocks[i+1], blocks[j+1] = blocks[j+1], blocks[i+1] @@ -156,8 +154,8 @@ func (s *PendingTreeSuite) TestBatchWithSkipsAndInRandomOrder() { require.NoError(s.T(), err) // restore view based order since that's what we will get from PendingTree - slices.SortFunc(blocks, func(lhs flow.CertifiedBlock, rhs flow.CertifiedBlock) bool { - return lhs.View() < rhs.View() + slices.SortFunc(blocks, func(lhs flow.CertifiedBlock, rhs flow.CertifiedBlock) int { + return int(lhs.View()) - int(rhs.View()) }) assert.Equal(s.T(), blocks, connectedBlocks) @@ -174,14 +172,14 @@ func (s *PendingTreeSuite) TestBatchWithSkipsAndInRandomOrder() { // Add [B5, B6, B7], expect to get [] // Finalize B4, expect to get [B5, B6, B7] func (s *PendingTreeSuite) TestResolveBlocksAfterFinalization() { - longestFork := certifiedBlocksFixture(5, s.finalized) - B2 := unittest.BlockWithParentFixture(longestFork[0].Block.Header) + longestFork := certifiedBlocksFixture(s.T(), 5, s.finalized) + B2 := unittest.BlockWithParentFixture(longestFork[0].Proposal.Block.ToHeader()) // make sure short fork doesn't have conflicting views, so we don't trigger exception - B2.Header.View = longestFork[len(longestFork)-1].Block.Header.View + 1 - B3 := unittest.BlockWithParentFixture(B2.Header) + B2.View = longestFork[len(longestFork)-1].Proposal.Block.View + 1 + B3 := unittest.BlockWithParentFixture(B2.ToHeader()) shortFork := []flow.CertifiedBlock{{ - Block: B2, - CertifyingQC: B3.Header.QuorumCertificate(), + Proposal: unittest.ProposalFromBlock(B2), + CertifyingQC: B3.ParentQC(), }, certifiedBlockFixture(B3)} connectedBlocks, err := s.pendingTree.AddBlocks(shortFork) @@ -192,7 +190,7 @@ func (s *PendingTreeSuite) TestResolveBlocksAfterFinalization() { require.NoError(s.T(), err) require.Empty(s.T(), connectedBlocks) - connectedBlocks, err = s.pendingTree.FinalizeFork(longestFork[1].Block.Header) + connectedBlocks, err = s.pendingTree.FinalizeFork(longestFork[1].Proposal.Block.ToHeader()) require.NoError(s.T(), err) require.ElementsMatch(s.T(), longestFork[2:], connectedBlocks) } @@ -200,8 +198,8 @@ func (s *PendingTreeSuite) TestResolveBlocksAfterFinalization() { // TestBlocksLowerThanFinalizedView tests that implementation drops blocks lower than finalized view. func (s *PendingTreeSuite) TestBlocksLowerThanFinalizedView() { block := unittest.BlockWithParentFixture(s.finalized) - newFinalized := unittest.BlockWithParentFixture(block.Header) - _, err := s.pendingTree.FinalizeFork(newFinalized.Header) + newFinalized := unittest.BlockWithParentFixture(block.ToHeader()) + _, err := s.pendingTree.FinalizeFork(newFinalized.ToHeader()) require.NoError(s.T(), err) _, err = s.pendingTree.AddBlocks([]flow.CertifiedBlock{certifiedBlockFixture(block)}) require.NoError(s.T(), err) @@ -215,13 +213,13 @@ func (s *PendingTreeSuite) TestBlocksLowerThanFinalizedView() { // Finalize A. // Adding [A, B, C, D] returns [D] since A is already finalized, [B, C] are already stored and connected to the finalized state. func (s *PendingTreeSuite) TestAddingBlockAfterFinalization() { - blocks := certifiedBlocksFixture(4, s.finalized) + blocks := certifiedBlocksFixture(s.T(), 4, s.finalized) connectedBlocks, err := s.pendingTree.AddBlocks(blocks[:3]) require.NoError(s.T(), err) assert.Equal(s.T(), blocks[:3], connectedBlocks) - _, err = s.pendingTree.FinalizeFork(blocks[0].Block.Header) + _, err = s.pendingTree.FinalizeFork(blocks[0].Proposal.Block.ToHeader()) require.NoError(s.T(), err) connectedBlocks, err = s.pendingTree.AddBlocks(blocks) @@ -238,13 +236,13 @@ func (s *PendingTreeSuite) TestAddingBlockAfterFinalization() { func (s *PendingTreeSuite) TestAddingBlocksWithSameHeight() { A := unittest.BlockWithParentFixture(s.finalized) B := unittest.BlockWithParentFixture(s.finalized) - B.Header.View = A.Header.View + 1 - C := unittest.BlockWithParentFixture(A.Header) - C.Header.View = B.Header.View + 1 - D := unittest.BlockWithParentFixture(B.Header) - D.Header.View = C.Header.View + 1 - E := unittest.BlockWithParentFixture(D.Header) - E.Header.View = D.Header.View + 1 + B.View = A.View + 1 + C := unittest.BlockWithParentFixture(A.ToHeader()) + C.View = B.View + 1 + D := unittest.BlockWithParentFixture(B.ToHeader()) + D.View = C.View + 1 + E := unittest.BlockWithParentFixture(D.ToHeader()) + E.View = D.View + 1 firstBatch := []flow.CertifiedBlock{certifiedBlockFixture(A), certifiedBlockFixture(B), certifiedBlockFixture(D)} secondBatch := []flow.CertifiedBlock{certifiedBlockFixture(C), certifiedBlockFixture(E)} @@ -259,27 +257,22 @@ func (s *PendingTreeSuite) TestAddingBlocksWithSameHeight() { } // certifiedBlocksFixture builds a chain of certified blocks starting at some block. -func certifiedBlocksFixture(count int, parent *flow.Header) []flow.CertifiedBlock { +func certifiedBlocksFixture(t *testing.T, count int, parent *flow.Header) []flow.CertifiedBlock { result := make([]flow.CertifiedBlock, 0, count) - blocks := unittest.ChainFixtureFrom(count, parent) + blocks := unittest.ProposalChainFixtureFrom(count, parent) for i := 0; i < count-1; i++ { - certBlock, err := flow.NewCertifiedBlock(blocks[i], blocks[i+1].Header.QuorumCertificate()) + certBlock, err := flow.NewCertifiedBlock(blocks[i], blocks[i+1].Block.ParentQC()) if err != nil { // this should never happen, as we are specifically constructing a certifying QC for the input block panic(fmt.Sprintf("unexpected error constructing certified block: %s", err.Error())) } result = append(result, certBlock) } - result = append(result, certifiedBlockFixture(blocks[len(blocks)-1])) + result = append(result, certifiedBlockFixture(&blocks[len(blocks)-1].Block)) return result } // certifiedBlockFixture builds a certified block using a QC with fixture signatures. func certifiedBlockFixture(block *flow.Block) flow.CertifiedBlock { - certBlock, err := flow.NewCertifiedBlock(block, unittest.CertifyBlock(block.Header)) - if err != nil { - // this should never happen, as we are specifically constructing a certifying QC for the input block - panic(fmt.Sprintf("unexpected error constructing certified block: %s", err.Error())) - } - return certBlock + return *unittest.NewCertifiedBlock(block) } diff --git a/engine/common/grpc/compressor/deflate/deflate.go b/engine/common/grpc/compressor/deflate/deflate.go new file mode 100644 index 00000000000..7bbba76f506 --- /dev/null +++ b/engine/common/grpc/compressor/deflate/deflate.go @@ -0,0 +1,74 @@ +// Package deflate implements and registers the DEFLATE compressor +// during initialization. +package deflate + +import ( + "compress/flate" + "io" + "sync" + + "google.golang.org/grpc/encoding" +) + +// Name is the name registered for the DEFLATE compressor. +const Name = "deflate" + +func init() { + c := &compressor{} + w, _ := flate.NewWriter(nil, flate.DefaultCompression) + c.poolCompressor.New = func() interface{} { + return &writer{Writer: w, pool: &c.poolCompressor} + } + encoding.RegisterCompressor(c) +} + +type compressor struct { + poolCompressor sync.Pool + poolDecompressor sync.Pool +} + +func (c *compressor) Name() string { + return Name +} + +func (c *compressor) Compress(w io.Writer) (io.WriteCloser, error) { + dw := c.poolCompressor.Get().(*writer) + dw.Reset(w) + return dw, nil +} + +func (c *compressor) Decompress(r io.Reader) (io.Reader, error) { + dr, inPool := c.poolDecompressor.Get().(*reader) + if !inPool { + newR := flate.NewReader(r) + return &reader{ReadCloser: newR, pool: &c.poolDecompressor}, nil + } + if err := dr.ReadCloser.(flate.Resetter).Reset(r, nil); err != nil { + c.poolDecompressor.Put(dr) + return nil, err + } + return dr, nil +} + +type writer struct { + *flate.Writer + pool *sync.Pool +} + +func (w *writer) Close() error { + defer w.pool.Put(w) + return w.Writer.Close() +} + +type reader struct { + io.ReadCloser + pool *sync.Pool +} + +func (r *reader) Read(p []byte) (n int, err error) { + n, err = r.ReadCloser.Read(p) + if err == io.EOF { + r.pool.Put(r) + } + return n, err +} diff --git a/engine/common/grpc/compressor/snappy/snappy.go b/engine/common/grpc/compressor/snappy/snappy.go new file mode 100644 index 00000000000..cb7dec75853 --- /dev/null +++ b/engine/common/grpc/compressor/snappy/snappy.go @@ -0,0 +1,67 @@ +package snappy + +import ( + "io" + "sync" + + "github.com/golang/snappy" + "google.golang.org/grpc/encoding" +) + +// Name is the name registered for the Snappy compressor. +const Name = "snappy" + +func init() { + c := &compressor{} + c.poolCompressor.New = func() interface{} { + return &writer{Writer: snappy.NewBufferedWriter(nil), pool: &c.poolCompressor} + } + encoding.RegisterCompressor(c) +} + +type compressor struct { + poolCompressor sync.Pool + poolDecompressor sync.Pool +} + +func (c *compressor) Name() string { + return Name +} + +func (c *compressor) Compress(w io.Writer) (io.WriteCloser, error) { + sw := c.poolCompressor.Get().(*writer) + sw.Reset(w) + return sw, nil +} + +func (c *compressor) Decompress(r io.Reader) (io.Reader, error) { + sr, inPool := c.poolDecompressor.Get().(*reader) + if !inPool { + return snappy.NewReader(r), nil + } + sr.Reset(r) + return sr, nil +} + +type writer struct { + *snappy.Writer + pool *sync.Pool +} + +func (w *writer) Close() error { + defer w.pool.Put(w) + return w.Writer.Close() +} + +type reader struct { + *snappy.Reader + pool *sync.Pool +} + +func (r *reader) Read(p []byte) (n int, err error) { + n, err = r.Reader.Read(p) + if err == io.EOF { + r.pool.Put(r) + } + return n, err +} diff --git a/engine/common/grpc/forwarder/forwarder.go b/engine/common/grpc/forwarder/forwarder.go new file mode 100644 index 00000000000..b685fefe780 --- /dev/null +++ b/engine/common/grpc/forwarder/forwarder.go @@ -0,0 +1,105 @@ +package forwarder + +import ( + "fmt" + "io" + "sync" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + + "github.com/onflow/flow-go/engine/access/rpc/connection" + "github.com/onflow/flow-go/model/flow" + + "github.com/onflow/flow/protobuf/go/flow/access" +) + +// Upstream is a container for an individual upstream containing the id, client and closer for it +type Upstream struct { + id *flow.IdentitySkeleton // the public identity of one network participant (node) + client access.AccessAPIClient // client with gRPC connection + closer io.Closer // closer for client connection, should use to close the connection when done +} + +// Forwarder forwards all requests to a set of upstream access nodes or observers +type Forwarder struct { + lock sync.Mutex + roundRobin int + upstream []Upstream + connFactory connection.ConnectionFactory +} + +func NewForwarder(identities flow.IdentitySkeletonList, connectionFactory connection.ConnectionFactory) (*Forwarder, error) { + forwarder := &Forwarder{connFactory: connectionFactory} + err := forwarder.setFlowAccessAPI(identities) + return forwarder, err +} + +// setFlowAccessAPI sets a backend access API that forwards some requests to an upstream node. +// It is used by Observer services, Blockchain Data Service, etc. +// Make sure that this is just for observation and not a staked participant in the flow network. +// This means that observers see a copy of the data but there is no interaction to ensure integrity from the root block. +func (f *Forwarder) setFlowAccessAPI(accessNodeAddressAndPort flow.IdentitySkeletonList) error { + f.upstream = make([]Upstream, accessNodeAddressAndPort.Count()) + for i, identity := range accessNodeAddressAndPort { + // Store the faultTolerantClient setup parameters such as address, public, key and timeout, so that + // we can refresh the API on connection loss + f.upstream[i].id = identity + + // We fail on any single error on startup, so that + // we identify bootstrapping errors early + err := f.reconnectingClient(i) + if err != nil { + return err + } + } + + f.roundRobin = 0 + return nil +} + +// reconnectingClient returns an active client, or creates a new connection. +func (f *Forwarder) reconnectingClient(i int) error { + identity := f.upstream[i].id + + accessApiClient, closer, err := f.connFactory.GetAccessAPIClientWithPort(identity.Address, identity.NetworkPubKey) + if err != nil { + return fmt.Errorf("failed to connect to access node at %s: %w", accessApiClient, err) + } + // closer is not nil iff err is nil, should use to close the connection when done + f.upstream[i].closer = closer + f.upstream[i].client = accessApiClient + return nil +} + +// FaultTolerantClient implements an upstream connection that reconnects on errors +// a reasonable amount of time. +func (f *Forwarder) FaultTolerantClient() (access.AccessAPIClient, io.Closer, error) { + if len(f.upstream) == 0 { + return nil, nil, status.Errorf(codes.Unimplemented, "method not implemented") + } + + // Reasoning: A retry count of three gives an acceptable 5% failure ratio from a 37% failure ratio. + // A bigger number is problematic due to the DNS resolve and connection times, + // plus the need to log and debug each individual connection failure. + // + // This reasoning eliminates the need of making this parameter configurable. + // The logic works rolling over a single connection as well making clean code. + const retryMax = 3 + + f.lock.Lock() + defer f.lock.Unlock() + + var err error + for i := 0; i < retryMax; i++ { + f.roundRobin++ + f.roundRobin = f.roundRobin % len(f.upstream) + err = f.reconnectingClient(f.roundRobin) + if err != nil { + continue + } + return f.upstream[f.roundRobin].client, f.upstream[f.roundRobin].closer, nil + } + + return nil, nil, status.Error(codes.Unavailable, err.Error()) +} diff --git a/engine/common/provider/engine.go b/engine/common/provider/engine.go index 69195a36145..5cb77c6a3e2 100644 --- a/engine/common/provider/engine.go +++ b/engine/common/provider/engine.go @@ -5,7 +5,7 @@ import ( "fmt" "github.com/rs/zerolog" - "github.com/vmihailenco/msgpack" + "github.com/vmihailenco/msgpack/v4" "github.com/onflow/flow-go/engine" "github.com/onflow/flow-go/engine/common/provider/internal" @@ -52,7 +52,7 @@ type Engine struct { channel channels.Channel requestHandler *engine.MessageHandler requestQueue engine.MessageStore - selector flow.IdentityFilter + selector flow.IdentityFilter[flow.Identity] retrieve RetrieveFunc // buffered channel for EntityRequest workers to pick and process. requestChannel chan *internal.EntityRequest @@ -66,19 +66,19 @@ var _ network.MessageProcessor = (*Engine)(nil) func New( log zerolog.Logger, metrics module.EngineMetrics, - net network.Network, + net network.EngineRegistry, me module.Local, state protocol.State, requestQueue engine.MessageStore, requestWorkers uint, channel channels.Channel, - selector flow.IdentityFilter, + selector flow.IdentityFilter[flow.Identity], retrieve RetrieveFunc) (*Engine, error) { // make sure we don't respond to request sent by self or unauthorized nodes selector = filter.And( selector, - filter.Not(filter.HasNodeID(me.NodeID())), + filter.Not(filter.HasNodeID[flow.Identity](me.NodeID())), ) handler := engine.NewMessageHandler( @@ -89,13 +89,13 @@ func New( // Provider engine only expects EntityRequest. // Other message types are discarded by Match. Match: func(message *engine.Message) bool { - _, ok := message.Payload.(*messages.EntityRequest) + _, ok := message.Payload.(*flow.EntityRequest) return ok }, // Map is called on messages that are Match(ed) successfully, i.e., // EntityRequest. Map: func(message *engine.Message) (*engine.Message, bool) { - request, ok := message.Payload.(*messages.EntityRequest) + request, ok := message.Payload.(*flow.EntityRequest) if !ok { // should never happen, unless there is a bug. log.Warn(). @@ -198,7 +198,7 @@ func (e *Engine) onEntityRequest(request *internal.EntityRequest) error { // for the handler to make sure the requester is authorized for this resource requesters, err := e.state.Final().Identities(filter.And( e.selector, - filter.HasNodeID(request.OriginId)), + filter.HasNodeID[flow.Identity](request.OriginId)), ) if err != nil { return fmt.Errorf("could not get requesters: %w", err) @@ -266,7 +266,7 @@ func (e *Engine) onEntityRequest(request *internal.EntityRequest) error { e.log.Info(). Str("origin_id", request.OriginId.String()). Strs("entity_ids", flow.IdentifierList(entityIDs).Strings()). - Uint64("nonce", request.Nonce). // to match with the the entity request received log + Uint64("nonce", request.Nonce). // to match with the entity request received log Msg("entity response sent") return nil @@ -305,7 +305,7 @@ func (e *Engine) processAvailableMessages(ctx irrecoverable.SignalerContext) { return } - requestEvent, ok := msg.Payload.(messages.EntityRequest) + requestEvent, ok := msg.Payload.(flow.EntityRequest) if !ok { // should never happen, as we only put EntityRequest in the queue, // if it does happen, it means there is a bug in the queue implementation. @@ -343,8 +343,14 @@ func (e *Engine) processEntityRequestWorker(ctx irrecoverable.SignalerContext, r lg.Trace().Msg("worker picked up entity request for processing") err := e.onEntityRequest(request) if err != nil { - if engine.IsInvalidInputError(err) || engine.IsNetworkTransmissionError(err) { - lg.Error().Err(err).Msg("worker could not process entity request") + if engine.IsInvalidInputError(err) { + // log at debug level since nodes that recently unstaked are allowed to communicate over + // the network, but not allowed to request entities. Even an honest node may have been + // behind processing blocks and inadvertently continue requesting entities after they + // have left the network. + lg.Debug().Err(err).Msg("could not process entity request: invalid request") + } else if engine.IsNetworkTransmissionError(err) { + lg.Error().Err(err).Msg("could not process entity request: transmit error") } else { // this is an unexpected error, we crash the node. ctx.Throw(err) diff --git a/engine/common/provider/engine_test.go b/engine/common/provider/engine_test.go index 8af1c41a18f..ad86e5a6d65 100644 --- a/engine/common/provider/engine_test.go +++ b/engine/common/provider/engine_test.go @@ -9,7 +9,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" - "github.com/vmihailenco/msgpack" + "github.com/vmihailenco/msgpack/v4" "github.com/onflow/flow-go/engine/common/provider" "github.com/onflow/flow-go/model/flow" @@ -20,7 +20,7 @@ import ( "github.com/onflow/flow-go/module/metrics" mockmodule "github.com/onflow/flow-go/module/mock" "github.com/onflow/flow-go/network/channels" - "github.com/onflow/flow-go/network/mocknetwork" + mocknetwork "github.com/onflow/flow-go/network/mock" protocol "github.com/onflow/flow-go/state/protocol/mock" "github.com/onflow/flow-go/storage" "github.com/onflow/flow-go/utils/unittest" @@ -34,7 +34,7 @@ func TestOnEntityRequestFull(t *testing.T) { entities := make(map[flow.Identifier]flow.Entity) identities := unittest.IdentityListFixture(8) - selector := filter.HasNodeID(identities.NodeIDs()...) + selector := filter.HasNodeID[flow.Identity](identities.NodeIDs()...) originID := identities[0].NodeID coll1 := unittest.CollectionFixture(1) @@ -59,7 +59,7 @@ func TestOnEntityRequestFull(t *testing.T) { final := protocol.NewSnapshot(t) final.On("Identities", mock.Anything).Return( - func(selector flow.IdentityFilter) flow.IdentityList { + func(selector flow.IdentityFilter[flow.Identity]) flow.IdentityList { return identities.Filter(selector) }, nil, @@ -68,7 +68,7 @@ func TestOnEntityRequestFull(t *testing.T) { state := protocol.NewState(t) state.On("Final").Return(final, nil) - net := mocknetwork.NewNetwork(t) + net := mocknetwork.NewEngineRegistry(t) con := mocknetwork.NewConduit(t) net.On("Register", mock.Anything, mock.Anything).Return(con, nil) con.On("Unicast", mock.Anything, mock.Anything).Run( @@ -105,7 +105,7 @@ func TestOnEntityRequestFull(t *testing.T) { retrieve) require.NoError(t, err) - request := &messages.EntityRequest{ + request := &flow.EntityRequest{ Nonce: rand.Uint64(), EntityIDs: []flow.Identifier{coll1.ID(), coll2.ID(), coll3.ID(), coll4.ID(), coll5.ID()}, } @@ -128,7 +128,7 @@ func TestOnEntityRequestPartial(t *testing.T) { entities := make(map[flow.Identifier]flow.Entity) identities := unittest.IdentityListFixture(8) - selector := filter.HasNodeID(identities.NodeIDs()...) + selector := filter.HasNodeID[flow.Identity](identities.NodeIDs()...) originID := identities[0].NodeID coll1 := unittest.CollectionFixture(1) @@ -153,7 +153,7 @@ func TestOnEntityRequestPartial(t *testing.T) { final := protocol.NewSnapshot(t) final.On("Identities", mock.Anything).Return( - func(selector flow.IdentityFilter) flow.IdentityList { + func(selector flow.IdentityFilter[flow.Identity]) flow.IdentityList { return identities.Filter(selector) }, nil, @@ -162,7 +162,7 @@ func TestOnEntityRequestPartial(t *testing.T) { state := protocol.NewState(t) state.On("Final").Return(final, nil) - net := mocknetwork.NewNetwork(t) + net := mocknetwork.NewEngineRegistry(t) con := mocknetwork.NewConduit(t) net.On("Register", mock.Anything, mock.Anything).Return(con, nil) con.On("Unicast", mock.Anything, mock.Anything).Run( @@ -199,7 +199,7 @@ func TestOnEntityRequestPartial(t *testing.T) { retrieve) require.NoError(t, err) - request := &messages.EntityRequest{ + request := &flow.EntityRequest{ Nonce: rand.Uint64(), EntityIDs: []flow.Identifier{coll1.ID(), coll2.ID(), coll3.ID(), coll4.ID(), coll5.ID()}, } @@ -220,7 +220,7 @@ func TestOnEntityRequestDuplicates(t *testing.T) { entities := make(map[flow.Identifier]flow.Entity) identities := unittest.IdentityListFixture(8) - selector := filter.HasNodeID(identities.NodeIDs()...) + selector := filter.HasNodeID[flow.Identity](identities.NodeIDs()...) originID := identities[0].NodeID coll1 := unittest.CollectionFixture(1) @@ -241,7 +241,7 @@ func TestOnEntityRequestDuplicates(t *testing.T) { final := protocol.NewSnapshot(t) final.On("Identities", mock.Anything).Return( - func(selector flow.IdentityFilter) flow.IdentityList { + func(selector flow.IdentityFilter[flow.Identity]) flow.IdentityList { return identities.Filter(selector) }, nil, @@ -250,7 +250,7 @@ func TestOnEntityRequestDuplicates(t *testing.T) { state := protocol.NewState(t) state.On("Final").Return(final, nil) - net := mocknetwork.NewNetwork(t) + net := mocknetwork.NewEngineRegistry(t) con := mocknetwork.NewConduit(t) net.On("Register", mock.Anything, mock.Anything).Return(con, nil) con.On("Unicast", mock.Anything, mock.Anything).Run( @@ -288,7 +288,7 @@ func TestOnEntityRequestDuplicates(t *testing.T) { require.NoError(t, err) // create entity requests with some duplicate entity IDs - request := &messages.EntityRequest{ + request := &flow.EntityRequest{ Nonce: rand.Uint64(), EntityIDs: []flow.Identifier{coll1.ID(), coll2.ID(), coll3.ID(), coll3.ID(), coll2.ID(), coll1.ID()}, } @@ -307,7 +307,7 @@ func TestOnEntityRequestEmpty(t *testing.T) { entities := make(map[flow.Identifier]flow.Entity) identities := unittest.IdentityListFixture(8) - selector := filter.HasNodeID(identities.NodeIDs()...) + selector := filter.HasNodeID[flow.Identity](identities.NodeIDs()...) originID := identities[0].NodeID coll1 := unittest.CollectionFixture(1) @@ -326,7 +326,7 @@ func TestOnEntityRequestEmpty(t *testing.T) { final := protocol.NewSnapshot(t) final.On("Identities", mock.Anything).Return( - func(selector flow.IdentityFilter) flow.IdentityList { + func(selector flow.IdentityFilter[flow.Identity]) flow.IdentityList { return identities.Filter(selector) }, nil, @@ -335,7 +335,7 @@ func TestOnEntityRequestEmpty(t *testing.T) { state := protocol.NewState(t) state.On("Final").Return(final, nil) - net := mocknetwork.NewNetwork(t) + net := mocknetwork.NewEngineRegistry(t) con := mocknetwork.NewConduit(t) net.On("Register", mock.Anything, mock.Anything).Return(con, nil) con.On("Unicast", mock.Anything, mock.Anything).Run( @@ -366,7 +366,7 @@ func TestOnEntityRequestEmpty(t *testing.T) { retrieve) require.NoError(t, err) - request := &messages.EntityRequest{ + request := &flow.EntityRequest{ Nonce: rand.Uint64(), EntityIDs: []flow.Identifier{coll1.ID(), coll2.ID(), coll3.ID(), coll4.ID(), coll5.ID()}, } @@ -385,7 +385,7 @@ func TestOnEntityRequestInvalidOrigin(t *testing.T) { entities := make(map[flow.Identifier]flow.Entity) identities := unittest.IdentityListFixture(8) - selector := filter.HasNodeID(identities.NodeIDs()...) + selector := filter.HasNodeID[flow.Identity](identities.NodeIDs()...) originID := unittest.IdentifierFixture() coll1 := unittest.CollectionFixture(1) @@ -410,7 +410,7 @@ func TestOnEntityRequestInvalidOrigin(t *testing.T) { final := protocol.NewSnapshot(t) final.On("Identities", mock.Anything).Return( - func(selector flow.IdentityFilter) flow.IdentityList { + func(selector flow.IdentityFilter[flow.Identity]) flow.IdentityList { defer cancel() return identities.Filter(selector) }, @@ -420,7 +420,7 @@ func TestOnEntityRequestInvalidOrigin(t *testing.T) { state := protocol.NewState(t) state.On("Final").Return(final, nil) - net := mocknetwork.NewNetwork(t) + net := mocknetwork.NewEngineRegistry(t) con := mocknetwork.NewConduit(t) net.On("Register", mock.Anything, mock.Anything).Return(con, nil) me := mockmodule.NewLocal(t) @@ -440,7 +440,7 @@ func TestOnEntityRequestInvalidOrigin(t *testing.T) { retrieve) require.NoError(t, err) - request := &messages.EntityRequest{ + request := &flow.EntityRequest{ Nonce: rand.Uint64(), EntityIDs: []flow.Identifier{coll1.ID(), coll2.ID(), coll3.ID(), coll4.ID(), coll5.ID()}, } diff --git a/engine/common/requester/engine.go b/engine/common/requester/engine.go index f83a2d03780..1354425b1ed 100644 --- a/engine/common/requester/engine.go +++ b/engine/common/requester/engine.go @@ -3,11 +3,10 @@ package requester import ( "fmt" "math" - "math/rand" "time" "github.com/rs/zerolog" - "github.com/vmihailenco/msgpack" + "github.com/vmihailenco/msgpack/v4" "go.uber.org/atomic" "github.com/onflow/flow-go/engine" @@ -20,6 +19,7 @@ import ( "github.com/onflow/flow-go/network/channels" "github.com/onflow/flow-go/state/protocol" "github.com/onflow/flow-go/utils/logging" + "github.com/onflow/flow-go/utils/rand" ) // HandleFunc is a function provided to the requester engine to handle an entity @@ -43,7 +43,7 @@ type Engine struct { state protocol.State con network.Conduit channel channels.Channel - selector flow.IdentityFilter + selector flow.IdentityFilter[flow.Identity] create CreateFunc handle HandleFunc @@ -51,14 +51,13 @@ type Engine struct { items map[flow.Identifier]*Item requests map[uint64]*messages.EntityRequest forcedDispatchOngoing *atomic.Bool // to ensure only trigger dispatching logic once at any time - rng *rand.Rand } // New creates a new requester engine, operating on the provided network channel, and requesting entities from a node // within the set obtained by applying the provided selector filter. The options allow customization of the parameters // related to the batch and retry logic. -func New(log zerolog.Logger, metrics module.EngineMetrics, net network.Network, me module.Local, state protocol.State, - channel channels.Channel, selector flow.IdentityFilter, create CreateFunc, options ...OptionFunc) (*Engine, error) { +func New(log zerolog.Logger, metrics module.EngineMetrics, net network.EngineRegistry, me module.Local, state protocol.State, + channel channels.Channel, selector flow.IdentityFilter[flow.Identity], create CreateFunc, options ...OptionFunc) (*Engine, error) { // initialize the default config cfg := Config{ @@ -90,15 +89,16 @@ func New(log zerolog.Logger, metrics module.EngineMetrics, net network.Network, // make sure we don't send requests from self selector = filter.And( selector, - filter.Not(filter.HasNodeID(me.NodeID())), - filter.Not(filter.Ejected), + filter.Not(filter.HasNodeID[flow.Identity](me.NodeID())), + filter.Not(filter.HasParticipationStatus(flow.EpochParticipationStatusEjected)), ) - // make sure we don't send requests to unauthorized nodes + // make sure we only send requests to nodes that are active in the current epoch and have positive weight if cfg.ValidateStaking { selector = filter.And( selector, - filter.HasWeight(true), + filter.HasInitialWeight[flow.Identity](true), + filter.HasParticipationStatus(flow.EpochParticipationStatusActive), ) } @@ -117,7 +117,6 @@ func New(log zerolog.Logger, metrics module.EngineMetrics, net network.Network, items: make(map[flow.Identifier]*Item), // holds all pending items requests: make(map[uint64]*messages.EntityRequest), // holds all sent requests forcedDispatchOngoing: atomic.NewBool(false), - rng: rand.New(rand.NewSource(time.Now().UnixNano())), } // register the engine with the network layer and store the conduit @@ -135,9 +134,8 @@ func New(log zerolog.Logger, metrics module.EngineMetrics, net network.Network, // function. It is done in a separate call so that the requester can be injected // into engines upon construction, and then provide a handle function to the // requester from that engine itself. -func (e *Engine) WithHandle(handle HandleFunc) *Engine { +func (e *Engine) WithHandle(handle HandleFunc) { e.handle = handle - return e } // Ready returns a ready channel that is closed once the engine has fully @@ -203,7 +201,7 @@ func (e *Engine) Process(channel channels.Channel, originID flow.Identifier, mes // control over which subset of providers to request a given entity from, such as // selection of a collection cluster. Use `filter.Any` if no additional selection // is required. Checks integrity of response to make sure that we got entity that we were requesting. -func (e *Engine) EntityByID(entityID flow.Identifier, selector flow.IdentityFilter) { +func (e *Engine) EntityByID(entityID flow.Identifier, selector flow.IdentityFilter[flow.Identity]) { e.addEntityRequest(entityID, selector, true) } @@ -212,11 +210,11 @@ func (e *Engine) EntityByID(entityID flow.Identifier, selector flow.IdentityFilt // of valid providers for the data and allows finer-grained control // over which providers to request data from. Doesn't perform integrity check // can be used to get entities without knowing their ID. -func (e *Engine) Query(key flow.Identifier, selector flow.IdentityFilter) { +func (e *Engine) Query(key flow.Identifier, selector flow.IdentityFilter[flow.Identity]) { e.addEntityRequest(key, selector, false) } -func (e *Engine) addEntityRequest(entityID flow.Identifier, selector flow.IdentityFilter, checkIntegrity bool) { +func (e *Engine) addEntityRequest(entityID flow.Identifier, selector flow.IdentityFilter[flow.Identity], checkIntegrity bool) { e.unit.Lock() defer e.unit.Unlock() @@ -314,19 +312,11 @@ func (e *Engine) dispatchRequest() (bool, error) { return false, fmt.Errorf("could not get providers: %w", err) } - // randomize order of items, so that they can be requested in different order each time - rndItems := make([]flow.Identifier, 0, len(e.items)) - for k := range e.items { - rndItems = append(rndItems, e.items[k].EntityID) - } - e.rng.Shuffle(len(rndItems), func(i, j int) { rndItems[i], rndItems[j] = rndItems[j], rndItems[i] }) - // go through each item and decide if it should be requested again now := time.Now().UTC() var providerID flow.Identifier var entityIDs []flow.Identifier - for _, entityID := range rndItems { - item := e.items[entityID] + for entityID, item := range e.items { // if the item should not be requested yet, ignore cutoff := item.LastRequested.Add(item.RetryAfter) @@ -346,7 +336,7 @@ func (e *Engine) dispatchRequest() (bool, error) { // for now, so it will be part of the next batch request if providerID != flow.ZeroID { overlap := providers.Filter(filter.And( - filter.HasNodeID(providerID), + filter.HasNodeID[flow.Identity](providerID), item.ExtraSelector, )) if len(overlap) == 0 { @@ -360,11 +350,18 @@ func (e *Engine) dispatchRequest() (bool, error) { // order is random and will skip the item most of the times // when other items are available if providerID == flow.ZeroID { - providers = providers.Filter(item.ExtraSelector) - if len(providers) == 0 { - return false, fmt.Errorf("no valid providers available") + filteredProviders := providers.Filter(item.ExtraSelector) + if len(filteredProviders) == 0 { + return false, fmt.Errorf("no valid providers available for item %s, total providers: %v", entityID.String(), len(providers)) + } + // ramdonly select a provider from the filtered set + // to send as many item requests as possible. + id, err := filteredProviders.Sample(1) + if err != nil { + return false, fmt.Errorf("sampling failed: %w", err) } - providerID = providers.Sample(1)[0].NodeID + providerID = id[0].NodeID + providers = filteredProviders } // add item to list and set retry parameters @@ -396,9 +393,14 @@ func (e *Engine) dispatchRequest() (bool, error) { return false, nil } + nonce, err := rand.Uint64() + if err != nil { + return false, fmt.Errorf("nonce generation failed: %w", err) + } + // create a batch request, send it and store it for reference req := &messages.EntityRequest{ - Nonce: e.rng.Uint64(), + Nonce: nonce, EntityIDs: entityIDs, } @@ -419,15 +421,6 @@ func (e *Engine) dispatchRequest() (bool, error) { } e.requests[req.Nonce] = req - if e.log.Debug().Enabled() { - e.log.Debug(). - Hex("provider", logging.ID(providerID)). - Uint64("nonce", req.Nonce). - Strs("entities", logging.IDs(entityIDs)). - TimeDiff("duration", time.Now(), requestStart). - Msg("entity request sent") - } - // NOTE: we forget about requests after the expiry of the shortest retry time // from the entities in the list; this means that we purge requests aggressively. // However, most requests should be responded to on the first attempt and clearing @@ -441,11 +434,15 @@ func (e *Engine) dispatchRequest() (bool, error) { delete(e.requests, req.Nonce) }() + if e.log.Debug().Enabled() { + e.log.Debug(). + Hex("provider", logging.ID(providerID)). + Uint64("nonce", req.Nonce). + Strs("entities", logging.IDs(entityIDs)). + TimeDiff("duration", time.Now(), requestStart). + Msg("entity request sent") + } e.metrics.MessageSent(e.channel.String(), metrics.MessageEntityRequest) - e.log.Debug(). - Uint64("nonce", req.Nonce). - Strs("entity_ids", flow.IdentifierList(req.EntityIDs).Strings()). - Msg("entity request sent") return true, nil } @@ -457,14 +454,14 @@ func (e *Engine) process(originID flow.Identifier, message interface{}) error { defer e.metrics.MessageHandled(e.channel.String(), metrics.MessageEntityResponse) switch msg := message.(type) { - case *messages.EntityResponse: + case *flow.EntityResponse: return e.onEntityResponse(originID, msg) default: return engine.NewInvalidInputErrorf("invalid message type (%T)", message) } } -func (e *Engine) onEntityResponse(originID flow.Identifier, res *messages.EntityResponse) error { +func (e *Engine) onEntityResponse(originID flow.Identifier, res *flow.EntityResponse) error { lg := e.log.With().Str("origin_id", originID.String()).Uint64("nonce", res.Nonce).Logger() lg.Debug().Strs("entity_ids", flow.IdentifierList(res.EntityIDs).Strings()).Msg("entity response received") @@ -474,7 +471,7 @@ func (e *Engine) onEntityResponse(originID flow.Identifier, res *messages.Entity // check that the response comes from a valid provider providers, err := e.state.Final().Identities(filter.And( e.selector, - filter.HasNodeID(originID), + filter.HasNodeID[flow.Identity](originID), )) if err != nil { return fmt.Errorf("could not get providers: %w", err) diff --git a/engine/common/requester/engine_test.go b/engine/common/requester/engine_test.go index a2a259d44dc..e10555e19ba 100644 --- a/engine/common/requester/engine_test.go +++ b/engine/common/requester/engine_test.go @@ -9,7 +9,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" - "github.com/vmihailenco/msgpack" + "github.com/vmihailenco/msgpack/v4" "go.uber.org/atomic" module "github.com/onflow/flow-go/module/mock" @@ -19,7 +19,7 @@ import ( "github.com/onflow/flow-go/model/flow/filter" "github.com/onflow/flow-go/model/messages" "github.com/onflow/flow-go/module/metrics" - "github.com/onflow/flow-go/network/mocknetwork" + mocknetwork "github.com/onflow/flow-go/network/mock" protocol "github.com/onflow/flow-go/state/protocol/mock" "github.com/onflow/flow-go/utils/unittest" ) @@ -29,7 +29,6 @@ func TestEntityByID(t *testing.T) { request := Engine{ unit: engine.NewUnit(), items: make(map[flow.Identifier]*Item), - rng: rand.New(rand.NewSource(0)), } now := time.Now().UTC() @@ -55,7 +54,7 @@ func TestDispatchRequestVarious(t *testing.T) { final := &protocol.Snapshot{} final.On("Identities", mock.Anything).Return( - func(selector flow.IdentityFilter) flow.IdentityList { + func(selector flow.IdentityFilter[flow.Identity]) flow.IdentityList { return identities.Filter(selector) }, nil, @@ -135,8 +134,7 @@ func TestDispatchRequestVarious(t *testing.T) { con: con, items: items, requests: make(map[uint64]*messages.EntityRequest), - selector: filter.HasNodeID(targetID), - rng: rand.New(rand.NewSource(0)), + selector: filter.HasNodeID[flow.Identity](targetID), } dispatched, err := request.dispatchRequest() require.NoError(t, err) @@ -165,7 +163,7 @@ func TestDispatchRequestBatchSize(t *testing.T) { final := &protocol.Snapshot{} final.On("Identities", mock.Anything).Return( - func(selector flow.IdentityFilter) flow.IdentityList { + func(selector flow.IdentityFilter[flow.Identity]) flow.IdentityList { return identities.Filter(selector) }, nil, @@ -213,7 +211,6 @@ func TestDispatchRequestBatchSize(t *testing.T) { items: items, requests: make(map[uint64]*messages.EntityRequest), selector: filter.Any, - rng: rand.New(rand.NewSource(0)), } dispatched, err := request.dispatchRequest() require.NoError(t, err) @@ -229,7 +226,7 @@ func TestOnEntityResponseValid(t *testing.T) { final := &protocol.Snapshot{} final.On("Identities", mock.Anything).Return( - func(selector flow.IdentityFilter) flow.IdentityList { + func(selector flow.IdentityFilter[flow.Identity]) flow.IdentityList { return identities.Filter(selector) }, nil, @@ -267,7 +264,7 @@ func TestOnEntityResponseValid(t *testing.T) { bwanted2, _ := msgpack.Marshal(wanted2) bunwanted, _ := msgpack.Marshal(unwanted) - res := &messages.EntityResponse{ + res := &flow.EntityResponse{ Nonce: nonce, EntityIDs: []flow.Identifier{wanted1.ID(), wanted2.ID(), unwanted.ID()}, Blobs: [][]byte{bwanted1, bwanted2, bunwanted}, @@ -286,14 +283,13 @@ func TestOnEntityResponseValid(t *testing.T) { state: state, items: make(map[flow.Identifier]*Item), requests: make(map[uint64]*messages.EntityRequest), - selector: filter.HasNodeID(targetID), + selector: filter.HasNodeID[flow.Identity](targetID), create: func() flow.Entity { return &flow.Collection{} }, handle: func(flow.Identifier, flow.Entity) { if called.Inc() >= 2 { close(done) } }, - rng: rand.New(rand.NewSource(0)), } request.items[iwanted1.EntityID] = iwanted1 @@ -328,7 +324,7 @@ func TestOnEntityIntegrityCheck(t *testing.T) { final := &protocol.Snapshot{} final.On("Identities", mock.Anything).Return( - func(selector flow.IdentityFilter) flow.IdentityList { + func(selector flow.IdentityFilter[flow.Identity]) flow.IdentityList { return identities.Filter(selector) }, nil, @@ -356,7 +352,7 @@ func TestOnEntityIntegrityCheck(t *testing.T) { // prepare payload from different entity bwanted, _ := msgpack.Marshal(wanted2) - res := &messages.EntityResponse{ + res := &flow.EntityResponse{ Nonce: nonce, EntityIDs: []flow.Identifier{wanted.ID()}, Blobs: [][]byte{bwanted}, @@ -374,10 +370,9 @@ func TestOnEntityIntegrityCheck(t *testing.T) { state: state, items: make(map[flow.Identifier]*Item), requests: make(map[uint64]*messages.EntityRequest), - selector: filter.HasNodeID(targetID), + selector: filter.HasNodeID[flow.Identity](targetID), create: func() flow.Entity { return &flow.Collection{} }, handle: func(flow.Identifier, flow.Entity) { close(called) }, - rng: rand.New(rand.NewSource(0)), } request.items[iwanted.EntityID] = iwanted @@ -413,7 +408,7 @@ func TestOriginValidation(t *testing.T) { final := &protocol.Snapshot{} final.On("Identities", mock.Anything).Return( - func(selector flow.IdentityFilter) flow.IdentityList { + func(selector flow.IdentityFilter[flow.Identity]) flow.IdentityList { return identities.Filter(selector) }, nil, @@ -435,14 +430,14 @@ func TestOriginValidation(t *testing.T) { iwanted := &Item{ EntityID: wanted.ID(), LastRequested: now, - ExtraSelector: filter.HasNodeID(targetID), + ExtraSelector: filter.HasNodeID[flow.Identity](targetID), checkIntegrity: true, } // prepare payload bwanted, _ := msgpack.Marshal(wanted) - res := &messages.EntityResponse{ + res := &flow.EntityResponse{ Nonce: nonce, EntityIDs: []flow.Identifier{wanted.ID()}, Blobs: [][]byte{bwanted}, @@ -453,7 +448,7 @@ func TestOriginValidation(t *testing.T) { EntityIDs: []flow.Identifier{wanted.ID()}, } - network := &mocknetwork.Network{} + network := &mocknetwork.EngineRegistry{} network.On("Register", mock.Anything, mock.Anything).Return(nil, nil) e, err := New( @@ -463,7 +458,7 @@ func TestOriginValidation(t *testing.T) { me, state, "", - filter.HasNodeID(targetID), + filter.HasNodeID[flow.Identity](targetID), func() flow.Entity { return &flow.Collection{} }, ) assert.NoError(t, err) diff --git a/engine/common/requester/item.go b/engine/common/requester/item.go index 456a33e881f..06cdf2acb01 100644 --- a/engine/common/requester/item.go +++ b/engine/common/requester/item.go @@ -7,10 +7,10 @@ import ( ) type Item struct { - EntityID flow.Identifier // ID for the entity to be requested - NumAttempts uint // number of times the entity was requested - LastRequested time.Time // approximate timestamp of last request - RetryAfter time.Duration // interval until request should be retried - ExtraSelector flow.IdentityFilter // additional filters for providers of this entity - checkIntegrity bool // check response integrity using `EntityID` + EntityID flow.Identifier // ID for the entity to be requested + NumAttempts uint // number of times the entity was requested + LastRequested time.Time // approximate timestamp of last request + RetryAfter time.Duration // interval until request should be retried + ExtraSelector flow.IdentityFilter[flow.Identity] // additional filters for providers of this entity + checkIntegrity bool // check response integrity using `EntityID` } diff --git a/engine/common/rpc/consts.go b/engine/common/rpc/consts.go new file mode 100644 index 00000000000..3c48bdf8696 --- /dev/null +++ b/engine/common/rpc/consts.go @@ -0,0 +1,34 @@ +package rpc + +const ( + // DefaultMaxMsgSize is the default maximum message size for GRPC servers and clients. + // This is the default used by the grpc library if no max is specified. + DefaultMaxMsgSize = 4 << (10 * 2) // 4 MiB + + // DefaultMaxResponseMsgSize is the default maximum response message size for GRPC servers and clients. + // This uses 1 GiB, which allows for reasonably large messages returned for execution data. + DefaultMaxResponseMsgSize = 1 << (10 * 3) // 1 GiB + + // DefaultAccessMaxRequestSize is the default maximum request message size for the access API. + DefaultAccessMaxRequestSize = DefaultMaxMsgSize + + // DefaultAccessMaxResponseSize is the default maximum response message size for the access API. + // This must be large enought to accomodate large execution data responses. + DefaultAccessMaxResponseSize = DefaultMaxResponseMsgSize + + // DefaultExecutionMaxRequestSize is the default maximum request message size for the execution API. + DefaultExecutionMaxRequestSize = DefaultMaxMsgSize + + // DefaultExecutionMaxResponseSize is the default maximum response message size for the execution API. + // This must be large enought to accomodate large execution data responses. + DefaultExecutionMaxResponseSize = DefaultMaxResponseMsgSize + + // DefaultCollectionMaxRequestSize is the default maximum request message size for the collection node. + // This is set to 4 MiB, which is larger than the default max service account transaction size (3 MiB). + // The service account size is controled by MaxCollectionByteSize used by the transaction validator. + DefaultCollectionMaxRequestSize = 4 << (10 * 2) // 4 MiB + + // DefaultCollectionMaxResponseSize is the default maximum response message size for the collection node. + // This can be set to a smaller value since responses should be very small. + DefaultCollectionMaxResponseSize = DefaultMaxMsgSize +) diff --git a/engine/common/rpc/convert/accounts.go b/engine/common/rpc/convert/accounts.go new file mode 100644 index 00000000000..3cd38b7e0ed --- /dev/null +++ b/engine/common/rpc/convert/accounts.go @@ -0,0 +1,92 @@ +package convert + +import ( + "github.com/onflow/crypto" + "github.com/onflow/crypto/hash" + "github.com/onflow/flow/protobuf/go/flow/entities" + + "github.com/onflow/flow-go/model/flow" +) + +// AccountToMessage converts a flow.Account to a protobuf message +func AccountToMessage(a *flow.Account) (*entities.Account, error) { + keys := make([]*entities.AccountKey, len(a.Keys)) + for i, k := range a.Keys { + messageKey, err := AccountKeyToMessage(k) + if err != nil { + return nil, err + } + keys[i] = messageKey + } + + return &entities.Account{ + Address: a.Address.Bytes(), + Balance: a.Balance, + Code: nil, + Keys: keys, + Contracts: a.Contracts, + }, nil +} + +// MessageToAccount converts a protobuf message to a flow.Account +func MessageToAccount(m *entities.Account) (*flow.Account, error) { + if m == nil { + return nil, ErrEmptyMessage + } + + accountKeys := make([]flow.AccountPublicKey, len(m.GetKeys())) + for i, key := range m.GetKeys() { + accountKey, err := MessageToAccountKey(key) + if err != nil { + return nil, err + } + + accountKeys[i] = *accountKey + } + + return &flow.Account{ + Address: flow.BytesToAddress(m.GetAddress()), + Balance: m.GetBalance(), + Keys: accountKeys, + Contracts: m.Contracts, + }, nil +} + +// AccountKeyToMessage converts a flow.AccountPublicKey to a protobuf message +func AccountKeyToMessage(a flow.AccountPublicKey) (*entities.AccountKey, error) { + publicKey := a.PublicKey.Encode() + return &entities.AccountKey{ + Index: a.Index, + PublicKey: publicKey, + SignAlgo: uint32(a.SignAlgo), + HashAlgo: uint32(a.HashAlgo), + Weight: uint32(a.Weight), + SequenceNumber: uint32(a.SeqNumber), + Revoked: a.Revoked, + }, nil +} + +// MessageToAccountKey converts a protobuf message to a flow.AccountPublicKey +func MessageToAccountKey(m *entities.AccountKey) (*flow.AccountPublicKey, error) { + if m == nil { + return nil, ErrEmptyMessage + } + + sigAlgo := crypto.SigningAlgorithm(m.GetSignAlgo()) + hashAlgo := hash.HashingAlgorithm(m.GetHashAlgo()) + + publicKey, err := crypto.DecodePublicKey(sigAlgo, m.GetPublicKey()) + if err != nil { + return nil, err + } + + return &flow.AccountPublicKey{ + Index: m.GetIndex(), + PublicKey: publicKey, + SignAlgo: sigAlgo, + HashAlgo: hashAlgo, + Weight: int(m.GetWeight()), + SeqNumber: uint64(m.GetSequenceNumber()), + Revoked: m.GetRevoked(), + }, nil +} diff --git a/engine/common/rpc/convert/accounts_test.go b/engine/common/rpc/convert/accounts_test.go new file mode 100644 index 00000000000..bd49faa958e --- /dev/null +++ b/engine/common/rpc/convert/accounts_test.go @@ -0,0 +1,59 @@ +package convert_test + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/onflow/crypto" + "github.com/onflow/crypto/hash" + + "github.com/onflow/flow-go/engine/common/rpc/convert" + "github.com/onflow/flow-go/fvm" + "github.com/onflow/flow-go/utils/unittest" +) + +// TestConvertAccount tests that converting an account to and from a protobuf message results in +// the same account +func TestConvertAccount(t *testing.T) { + t.Parallel() + + a, err := unittest.AccountFixture() + require.NoError(t, err) + + key2, err := unittest.AccountKeyFixture(128, crypto.ECDSAP256, hash.SHA3_256) + require.NoError(t, err) + + a.Keys = append(a.Keys, key2.PublicKey(500)) + + msg, err := convert.AccountToMessage(a) + require.NoError(t, err) + + converted, err := convert.MessageToAccount(msg) + require.NoError(t, err) + + assert.Equal(t, a, converted) +} + +// TestConvertAccountKey tests that converting an account key to and from a protobuf message results +// in the same account key +func TestConvertAccountKey(t *testing.T) { + t.Parallel() + + privateKey, _ := unittest.AccountKeyDefaultFixture() + accountKey := privateKey.PublicKey(fvm.AccountKeyWeightThreshold) + + // Explicitly test if Revoked is properly converted + accountKey.Revoked = true + + msg, err := convert.AccountKeyToMessage(accountKey) + assert.NoError(t, err) + + converted, err := convert.MessageToAccountKey(msg) + assert.NoError(t, err) + + assert.Equal(t, accountKey, *converted) + assert.Equal(t, accountKey.PublicKey, converted.PublicKey) + assert.Equal(t, accountKey.Revoked, converted.Revoked) +} diff --git a/engine/common/rpc/convert/blocks.go b/engine/common/rpc/convert/blocks.go new file mode 100644 index 00000000000..0c0d6654b22 --- /dev/null +++ b/engine/common/rpc/convert/blocks.go @@ -0,0 +1,212 @@ +package convert + +import ( + "fmt" + "time" + + "google.golang.org/protobuf/types/known/timestamppb" + + "github.com/onflow/flow-go/model/flow" + + "github.com/onflow/flow/protobuf/go/flow/entities" +) + +// BlockTimestamp2ProtobufTime is just a shorthand function to ensure consistent conversion +// of block timestamps (measured in unix milliseconds) to protobuf's Timestamp format. +func BlockTimestamp2ProtobufTime(blockTimestamp uint64) *timestamppb.Timestamp { + return timestamppb.New(time.UnixMilli(int64(blockTimestamp))) +} + +// BlockToMessage converts a flow.Block to a protobuf Block message. +// signerIDs is a precomputed list of signer IDs for the block based on the block's signer indices. +func BlockToMessage(h *flow.Block, signerIDs flow.IdentifierList) ( + *entities.Block, + error, +) { + id := h.ID() + cg := CollectionGuaranteesToMessages(h.Payload.Guarantees) + seals := BlockSealsToMessages(h.Payload.Seals) + + execResults, err := ExecutionResultsToMessages(h.Payload.Results) + if err != nil { + return nil, err + } + + blockHeader, err := BlockHeaderToMessage(h.ToHeader(), signerIDs) + if err != nil { + return nil, err + } + + bh := entities.Block{ + Id: IdentifierToMessage(id), + Height: h.Height, + ParentId: IdentifierToMessage(h.ParentID), + Timestamp: BlockTimestamp2ProtobufTime(h.Timestamp), + CollectionGuarantees: cg, + BlockSeals: seals, + Signatures: [][]byte{h.ParentVoterSigData}, + ExecutionReceiptMetaList: ExecutionResultMetaListToMessages(h.Payload.Receipts), + ExecutionResultList: execResults, + ProtocolStateId: IdentifierToMessage(h.Payload.ProtocolStateID), + BlockHeader: blockHeader, + } + return &bh, nil +} + +// BlockToMessageLight converts a flow.Block to the light form of a protobuf Block message. +func BlockToMessageLight(h *flow.Block) *entities.Block { + id := h.ID() + cg := CollectionGuaranteesToMessages(h.Payload.Guarantees) + + return &entities.Block{ + Id: id[:], + Height: h.Height, + ParentId: h.ParentID[:], + Timestamp: BlockTimestamp2ProtobufTime(h.Timestamp), + CollectionGuarantees: cg, + Signatures: [][]byte{h.ParentVoterSigData}, + } +} + +// MessageToBlock converts a protobuf Block message to a flow.Block. +func MessageToBlock(m *entities.Block) (*flow.Block, error) { + payload, err := PayloadFromMessage(m) + if err != nil { + return nil, fmt.Errorf("failed to extract payload data from message: %w", err) + } + header, err := MessageToBlockHeader(m.BlockHeader) + if err != nil { + return nil, fmt.Errorf("failed to convert block header: %w", err) + } + + if IsRootBlockHeader(m.BlockHeader) { + block, err := flow.NewRootBlock( + flow.UntrustedBlock{ + HeaderBody: header.HeaderBody, + Payload: *payload, + }, + ) + if err != nil { + return nil, fmt.Errorf("failed to create root block: %w", err) + } + return block, nil + } + + block, err := flow.NewBlock( + flow.UntrustedBlock{ + HeaderBody: header.HeaderBody, + Payload: *payload, + }, + ) + if err != nil { + return nil, fmt.Errorf("could not build block: %w", err) + } + + return block, nil +} + +// BlockSealToMessage converts a flow.Seal to a protobuf BlockSeal message. +func BlockSealToMessage(s *flow.Seal) *entities.BlockSeal { + id := s.BlockID + result := s.ResultID + return &entities.BlockSeal{ + BlockId: id[:], + ExecutionReceiptId: result[:], + ExecutionReceiptSignatures: [][]byte{}, // filling seals signature with zero + FinalState: StateCommitmentToMessage(s.FinalState), + AggregatedApprovalSigs: AggregatedSignaturesToMessages(s.AggregatedApprovalSigs), + ResultId: IdentifierToMessage(s.ResultID), + } +} + +// MessageToBlockSeal converts a protobuf BlockSeal message to a flow.Seal. +// +// All errors indicate the input cannot be converted to a valid seal. +func MessageToBlockSeal(m *entities.BlockSeal) (*flow.Seal, error) { + finalState, err := MessageToStateCommitment(m.FinalState) + if err != nil { + return nil, fmt.Errorf("failed to convert message to block seal: %w", err) + } + seal, err := flow.NewSeal( + flow.UntrustedSeal{ + BlockID: MessageToIdentifier(m.BlockId), + ResultID: MessageToIdentifier(m.ResultId), + FinalState: finalState, + AggregatedApprovalSigs: MessagesToAggregatedSignatures(m.AggregatedApprovalSigs), + }, + ) + if err != nil { + return nil, fmt.Errorf("could not construct seal: %w", err) + } + + return seal, nil +} + +// BlockSealsToMessages converts a slice of flow.Seal to a slice of protobuf BlockSeal messages. +func BlockSealsToMessages(b []*flow.Seal) []*entities.BlockSeal { + seals := make([]*entities.BlockSeal, len(b)) + for i, s := range b { + seals[i] = BlockSealToMessage(s) + } + return seals +} + +// MessagesToBlockSeals converts a slice of protobuf BlockSeal messages to a slice of flow.Seal. +func MessagesToBlockSeals(m []*entities.BlockSeal) ([]*flow.Seal, error) { + seals := make([]*flow.Seal, len(m)) + for i, s := range m { + msg, err := MessageToBlockSeal(s) + if err != nil { + return nil, err + } + seals[i] = msg + } + return seals, nil +} + +// PayloadFromMessage converts a protobuf Block message to a flow.Payload. +func PayloadFromMessage(m *entities.Block) (*flow.Payload, error) { + cgs, err := MessagesToCollectionGuarantees(m.CollectionGuarantees) + if err != nil { + return nil, err + } + seals, err := MessagesToBlockSeals(m.BlockSeals) + if err != nil { + return nil, err + } + receipts, err := MessagesToExecutionResultMetaList(m.ExecutionReceiptMetaList) + if err != nil { + return nil, err + } + results, err := MessagesToExecutionResults(m.ExecutionResultList) + if err != nil { + return nil, err + } + payload, err := flow.NewPayload( + flow.UntrustedPayload{ + Guarantees: cgs, + Seals: seals, + Receipts: receipts, + Results: results, + ProtocolStateID: MessageToIdentifier(m.ProtocolStateId), + }, + ) + if err != nil { + return nil, fmt.Errorf("could not build the payload: %w", err) + } + + return payload, nil +} + +// MessageToBlockStatus converts a protobuf BlockStatus message to a flow.BlockStatus. +func MessageToBlockStatus(status entities.BlockStatus) flow.BlockStatus { + switch status { + case entities.BlockStatus_BLOCK_UNKNOWN: + return flow.BlockStatusUnknown + case entities.BlockStatus_BLOCK_FINALIZED: + return flow.BlockStatusFinalized + case entities.BlockStatus_BLOCK_SEALED: + return flow.BlockStatusSealed + } + return flow.BlockStatusUnknown +} diff --git a/engine/common/rpc/convert/blocks_test.go b/engine/common/rpc/convert/blocks_test.go new file mode 100644 index 00000000000..7a8580051d4 --- /dev/null +++ b/engine/common/rpc/convert/blocks_test.go @@ -0,0 +1,78 @@ +package convert_test + +import ( + "bytes" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/engine/common/rpc/convert" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/utils/unittest" +) + +// TestConvertBlock tests that converting a block to and from a protobuf message results in the same +// block +func TestConvertBlock(t *testing.T) { + t.Parallel() + + block := unittest.FullBlockFixture() + signerIDs := unittest.IdentifierListFixture(5) + + msg, err := convert.BlockToMessage(block, signerIDs) + require.NoError(t, err) + + converted, err := convert.MessageToBlock(msg) + require.NoError(t, err) + + assert.Equal(t, block, converted) +} + +// TestConvertBlockLight tests that converting a block to its light form results in only the correct +// fields being set +func TestConvertBlockLight(t *testing.T) { + t.Parallel() + + block := unittest.FullBlockFixture() + msg := convert.BlockToMessageLight(block) + + // required fields are set + blockID := block.ID() + assert.Equal(t, 0, bytes.Compare(blockID[:], msg.Id)) + assert.Equal(t, block.Height, msg.Height) + assert.Equal(t, 0, bytes.Compare(block.ParentID[:], msg.ParentId)) + assert.Equal(t, block.Timestamp, uint64(msg.Timestamp.AsTime().UnixMilli())) + assert.Equal(t, 0, bytes.Compare(block.ParentVoterSigData, msg.Signatures[0])) + + guarantees := []*flow.CollectionGuarantee{} + for _, g := range msg.CollectionGuarantees { + guarantee, err := convert.MessageToCollectionGuarantee(g) + require.NoError(t, err) + guarantees = append(guarantees, guarantee) + } + + assert.Equal(t, block.Payload.Guarantees, guarantees) + + // all other fields are not + assert.Nil(t, msg.BlockHeader) + assert.Len(t, msg.BlockSeals, 0) + assert.Len(t, msg.ExecutionReceiptMetaList, 0) + assert.Len(t, msg.ExecutionResultList, 0) +} + +// TestConvertRootBlock tests that converting a root block to and from a protobuf message results in +// the same block +func TestConvertRootBlock(t *testing.T) { + t.Parallel() + + block := unittest.Block.Genesis(flow.Emulator) + + msg, err := convert.BlockToMessage(block, flow.IdentifierList{}) + require.NoError(t, err) + + converted, err := convert.MessageToBlock(msg) + require.NoError(t, err) + + assert.Equal(t, block.ID(), converted.ID()) +} diff --git a/engine/common/rpc/convert/collections.go b/engine/common/rpc/convert/collections.go new file mode 100644 index 00000000000..22adffb3bfa --- /dev/null +++ b/engine/common/rpc/convert/collections.go @@ -0,0 +1,128 @@ +package convert + +import ( + "fmt" + + "github.com/onflow/flow/protobuf/go/flow/entities" + + "github.com/onflow/flow-go/model/flow" +) + +// CollectionToMessage converts a collection to a protobuf message +func CollectionToMessage(c *flow.Collection) (*entities.Collection, error) { + if c == nil || c.Transactions == nil { + return nil, fmt.Errorf("invalid collection") + } + + transactionsIDs := make([][]byte, len(c.Transactions)) + for i, t := range c.Transactions { + id := t.ID() + transactionsIDs[i] = id[:] + } + + collectionID := c.ID() + + ce := &entities.Collection{ + Id: collectionID[:], + TransactionIds: transactionsIDs, + } + + return ce, nil +} + +// LightCollectionToMessage converts a light collection to a protobuf message +func LightCollectionToMessage(c *flow.LightCollection) (*entities.Collection, error) { + if c == nil || c.Transactions == nil { + return nil, fmt.Errorf("invalid collection") + } + + collectionID := c.ID() + + return &entities.Collection{ + Id: collectionID[:], + TransactionIds: IdentifiersToMessages(c.Transactions), + }, nil +} + +// MessageToLightCollection converts a protobuf message to a light collection +func MessageToLightCollection(m *entities.Collection) (*flow.LightCollection, error) { + transactions := make([]flow.Identifier, 0, len(m.TransactionIds)) + for _, txId := range m.TransactionIds { + transactions = append(transactions, MessageToIdentifier(txId)) + } + + return flow.NewLightCollection(flow.UntrustedLightCollection{ + Transactions: transactions, + }), nil +} + +func FullCollectionToMessage(c *flow.Collection) ([]*entities.Transaction, error) { + if c == nil { + return nil, fmt.Errorf("invalid collection") + } + + transactions := make([]*entities.Transaction, len(c.Transactions)) + for i, tx := range c.Transactions { + transactions[i] = TransactionToMessage(*tx) + } + + return transactions, nil +} + +func MessageToFullCollection(m []*entities.Transaction, chain flow.Chain) (*flow.Collection, error) { + transactions := make([]*flow.TransactionBody, len(m)) + for i, tx := range m { + t, err := MessageToTransaction(tx, chain) + if err != nil { + return nil, err + } + transactions[i] = &t + } + + return flow.NewCollection(flow.UntrustedCollection{Transactions: transactions}) +} + +// CollectionGuaranteeToMessage converts a collection guarantee to a protobuf message +func CollectionGuaranteeToMessage(g *flow.CollectionGuarantee) *entities.CollectionGuarantee { + return &entities.CollectionGuarantee{ + CollectionId: IdentifierToMessage(g.CollectionID), + Signatures: [][]byte{g.Signature}, + ReferenceBlockId: IdentifierToMessage(g.ReferenceBlockID), + Signature: g.Signature, + SignerIndices: g.SignerIndices, + ClusterChainId: []byte(g.ClusterChainID), + } +} + +// MessageToCollectionGuarantee converts a protobuf message to a collection guarantee +func MessageToCollectionGuarantee(m *entities.CollectionGuarantee) (*flow.CollectionGuarantee, error) { + return flow.NewCollectionGuarantee(flow.UntrustedCollectionGuarantee{ + CollectionID: MessageToIdentifier(m.CollectionId), + ReferenceBlockID: MessageToIdentifier(m.ReferenceBlockId), + ClusterChainID: flow.ChainID(m.ClusterChainId), + SignerIndices: m.SignerIndices, + Signature: MessageToSignature(m.Signature), + }) +} + +// CollectionGuaranteesToMessages converts a slice of collection guarantees to a slice of protobuf messages +func CollectionGuaranteesToMessages(c []*flow.CollectionGuarantee) []*entities.CollectionGuarantee { + cg := make([]*entities.CollectionGuarantee, len(c)) + for i, g := range c { + cg[i] = CollectionGuaranteeToMessage(g) + } + return cg +} + +// MessagesToCollectionGuarantees converts a slice of protobuf messages to a slice of collection guarantees +func MessagesToCollectionGuarantees(m []*entities.CollectionGuarantee) ([]*flow.CollectionGuarantee, error) { + cg := make([]*flow.CollectionGuarantee, len(m)) + for i, g := range m { + guarantee, err := MessageToCollectionGuarantee(g) + if err != nil { + return nil, fmt.Errorf("could not convert message to collection guarantee: %w", err) + } + cg[i] = guarantee + } + return cg, nil +} diff --git a/engine/common/rpc/convert/collections_test.go b/engine/common/rpc/convert/collections_test.go new file mode 100644 index 00000000000..41454bc1e15 --- /dev/null +++ b/engine/common/rpc/convert/collections_test.go @@ -0,0 +1,96 @@ +package convert_test + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/engine/common/rpc/convert" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/utils/unittest" + + "github.com/onflow/flow/protobuf/go/flow/entities" +) + +// TestConvertCollection tests that converting a collection to a protobuf message results in the correct +// set of transaction IDs +func TestConvertCollection(t *testing.T) { + t.Parallel() + + collection := unittest.CollectionFixture(5) + txIDs := make([]flow.Identifier, 0, len(collection.Transactions)) + for _, tx := range collection.Transactions { + txIDs = append(txIDs, tx.ID()) + } + + t.Run("convert collection to message", func(t *testing.T) { + msg, err := convert.CollectionToMessage(&collection) + require.NoError(t, err) + + assert.Len(t, msg.TransactionIds, len(txIDs)) + for i, txID := range txIDs { + assert.Equal(t, txID[:], msg.TransactionIds[i]) + } + }) + + var msg *entities.Collection + lightCollection := flow.LightCollection{Transactions: txIDs} + + t.Run("convert light collection to message", func(t *testing.T) { + var err error + msg, err = convert.LightCollectionToMessage(&lightCollection) + require.NoError(t, err) + + assert.Len(t, msg.TransactionIds, len(txIDs)) + for i, txID := range txIDs { + assert.Equal(t, txID[:], msg.TransactionIds[i]) + } + }) + + t.Run("convert message to light collection", func(t *testing.T) { + lightColl, err := convert.MessageToLightCollection(msg) + require.NoError(t, err) + + assert.Equal(t, len(txIDs), len(lightColl.Transactions)) + for _, txID := range lightColl.Transactions { + assert.Equal(t, txID[:], txID[:]) + } + }) + + t.Run("convert full collection to message and then back to collection", func(t *testing.T) { + msg, err := convert.FullCollectionToMessage(&collection) + require.NoError(t, err) + + converted, err := convert.MessageToFullCollection(msg, flow.Testnet.Chain()) + require.NoError(t, err) + + assert.Equal(t, &collection, converted) + }) +} + +// TestConvertCollectionGuarantee tests that converting a collection guarantee to and from a protobuf +// message results in the same collection guarantee +func TestConvertCollectionGuarantee(t *testing.T) { + t.Parallel() + + guarantee := unittest.CollectionGuaranteeFixture(unittest.WithCollRef(unittest.IdentifierFixture())) + + msg := convert.CollectionGuaranteeToMessage(guarantee) + converted, err := convert.MessageToCollectionGuarantee(msg) + require.NoError(t, err) + assert.Equal(t, guarantee, converted) +} + +// TestConvertCollectionGuarantees tests that converting a collection guarantee to and from a protobuf +// message results in the same collection guarantee +func TestConvertCollectionGuarantees(t *testing.T) { + t.Parallel() + + guarantees := unittest.CollectionGuaranteesFixture(5, unittest.WithCollRef(unittest.IdentifierFixture())) + + msg := convert.CollectionGuaranteesToMessages(guarantees) + converted, err := convert.MessagesToCollectionGuarantees(msg) + require.NoError(t, err) + assert.Equal(t, guarantees, converted) +} diff --git a/engine/common/rpc/convert/compatible_range.go b/engine/common/rpc/convert/compatible_range.go new file mode 100644 index 00000000000..4cd5c5a0962 --- /dev/null +++ b/engine/common/rpc/convert/compatible_range.go @@ -0,0 +1,33 @@ +package convert + +import ( + "github.com/onflow/flow/protobuf/go/flow/entities" + + accessmodel "github.com/onflow/flow-go/model/access" +) + +// CompatibleRangeToMessage converts an accessmodel.CompatibleRange to a protobuf message +func CompatibleRangeToMessage(c *accessmodel.CompatibleRange) *entities.CompatibleRange { + if c == nil { + // compatible range is optional, so nil is a valid value + return nil + } + + return &entities.CompatibleRange{ + StartHeight: c.StartHeight, + EndHeight: c.EndHeight, + } +} + +// MessageToCompatibleRange converts a protobuf message to an accessmodel.CompatibleRange +func MessageToCompatibleRange(c *entities.CompatibleRange) *accessmodel.CompatibleRange { + if c == nil { + // compatible range is optional, so nil is a valid value + return nil + } + + return &accessmodel.CompatibleRange{ + StartHeight: c.StartHeight, + EndHeight: c.EndHeight, + } +} diff --git a/engine/common/rpc/convert/compatible_range_test.go b/engine/common/rpc/convert/compatible_range_test.go new file mode 100644 index 00000000000..eb27dfa858b --- /dev/null +++ b/engine/common/rpc/convert/compatible_range_test.go @@ -0,0 +1,41 @@ +package convert_test + +import ( + "math/rand" + "testing" + + "github.com/onflow/flow/protobuf/go/flow/entities" + "github.com/stretchr/testify/assert" + + "github.com/onflow/flow-go/engine/common/rpc/convert" + accessmodel "github.com/onflow/flow-go/model/access" +) + +// TestConvertCompatibleRange tests that converting a compatible range to a protobuf message +func TestConvertCompatibleRange(t *testing.T) { + t.Parallel() + + t.Run("nil range is nil", func(t *testing.T) { + t.Parallel() + + assert.Nil(t, convert.CompatibleRangeToMessage(nil)) + assert.Nil(t, convert.MessageToCompatibleRange(nil)) + }) + + t.Run("convert range to message", func(t *testing.T) { + startHeight := uint64(rand.Uint32()) + endHeight := uint64(rand.Uint32()) + + comparableRange := &accessmodel.CompatibleRange{ + StartHeight: startHeight, + EndHeight: endHeight, + } + expected := &entities.CompatibleRange{ + StartHeight: startHeight, + EndHeight: endHeight, + } + + msg := convert.CompatibleRangeToMessage(comparableRange) + assert.Equal(t, msg, expected) + }) +} diff --git a/engine/common/rpc/convert/convert.go b/engine/common/rpc/convert/convert.go index 150e760d8de..db594a07761 100644 --- a/engine/common/rpc/convert/convert.go +++ b/engine/common/rpc/convert/convert.go @@ -1,22 +1,13 @@ package convert import ( - "encoding/json" "errors" "fmt" + "github.com/onflow/crypto" "github.com/onflow/flow/protobuf/go/flow/entities" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" - "google.golang.org/protobuf/types/known/timestamppb" - "github.com/onflow/flow-go/crypto" - "github.com/onflow/flow-go/crypto/hash" - "github.com/onflow/flow-go/ledger" "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/module/executiondatasync/execution_data" - "github.com/onflow/flow-go/state/protocol" - "github.com/onflow/flow-go/state/protocol/inmem" ) var ErrEmptyMessage = errors.New("protobuf message is empty") @@ -24,6 +15,7 @@ var ValidChainIds = map[string]bool{ flow.Mainnet.String(): true, flow.Testnet.String(): true, flow.Sandboxnet.String(): true, + flow.Previewnet.String(): true, flow.Benchnet.String(): true, flow.Localnet.String(): true, flow.Emulator.String(): true, @@ -31,204 +23,8 @@ var ValidChainIds = map[string]bool{ flow.MonotonicEmulator.String(): true, } -func MessageToTransaction( - m *entities.Transaction, - chain flow.Chain, -) (flow.TransactionBody, error) { - if m == nil { - return flow.TransactionBody{}, ErrEmptyMessage - } - - t := flow.NewTransactionBody() - - proposalKey := m.GetProposalKey() - if proposalKey != nil { - proposalAddress, err := Address(proposalKey.GetAddress(), chain) - if err != nil { - return *t, err - } - t.SetProposalKey(proposalAddress, uint64(proposalKey.GetKeyId()), proposalKey.GetSequenceNumber()) - } - - payer := m.GetPayer() - if payer != nil { - payerAddress, err := Address(payer, chain) - if err != nil { - return *t, err - } - t.SetPayer(payerAddress) - } - - for _, authorizer := range m.GetAuthorizers() { - authorizerAddress, err := Address(authorizer, chain) - if err != nil { - return *t, err - } - t.AddAuthorizer(authorizerAddress) - } - - for _, sig := range m.GetPayloadSignatures() { - addr, err := Address(sig.GetAddress(), chain) - if err != nil { - return *t, err - } - t.AddPayloadSignature(addr, uint64(sig.GetKeyId()), sig.GetSignature()) - } - - for _, sig := range m.GetEnvelopeSignatures() { - addr, err := Address(sig.GetAddress(), chain) - if err != nil { - return *t, err - } - t.AddEnvelopeSignature(addr, uint64(sig.GetKeyId()), sig.GetSignature()) - } - - t.SetScript(m.GetScript()) - t.SetArguments(m.GetArguments()) - t.SetReferenceBlockID(flow.HashToID(m.GetReferenceBlockId())) - t.SetGasLimit(m.GetGasLimit()) - - return *t, nil -} - -func TransactionsToMessages(transactions []*flow.TransactionBody) []*entities.Transaction { - transactionMessages := make([]*entities.Transaction, len(transactions)) - for i, t := range transactions { - transactionMessages[i] = TransactionToMessage(*t) - } - return transactionMessages -} - -func TransactionToMessage(tb flow.TransactionBody) *entities.Transaction { - proposalKeyMessage := &entities.Transaction_ProposalKey{ - Address: tb.ProposalKey.Address.Bytes(), - KeyId: uint32(tb.ProposalKey.KeyIndex), - SequenceNumber: tb.ProposalKey.SequenceNumber, - } - - authMessages := make([][]byte, len(tb.Authorizers)) - for i, auth := range tb.Authorizers { - authMessages[i] = auth.Bytes() - } - - payloadSigMessages := make([]*entities.Transaction_Signature, len(tb.PayloadSignatures)) - - for i, sig := range tb.PayloadSignatures { - payloadSigMessages[i] = &entities.Transaction_Signature{ - Address: sig.Address.Bytes(), - KeyId: uint32(sig.KeyIndex), - Signature: sig.Signature, - } - } - - envelopeSigMessages := make([]*entities.Transaction_Signature, len(tb.EnvelopeSignatures)) - - for i, sig := range tb.EnvelopeSignatures { - envelopeSigMessages[i] = &entities.Transaction_Signature{ - Address: sig.Address.Bytes(), - KeyId: uint32(sig.KeyIndex), - Signature: sig.Signature, - } - } - - return &entities.Transaction{ - Script: tb.Script, - Arguments: tb.Arguments, - ReferenceBlockId: tb.ReferenceBlockID[:], - GasLimit: tb.GasLimit, - ProposalKey: proposalKeyMessage, - Payer: tb.Payer.Bytes(), - Authorizers: authMessages, - PayloadSignatures: payloadSigMessages, - EnvelopeSignatures: envelopeSigMessages, - } -} - -func BlockHeaderToMessage( - h *flow.Header, - signerIDs flow.IdentifierList, -) (*entities.BlockHeader, error) { - id := h.ID() - - t := timestamppb.New(h.Timestamp) - var lastViewTC *entities.TimeoutCertificate - if h.LastViewTC != nil { - newestQC := h.LastViewTC.NewestQC - lastViewTC = &entities.TimeoutCertificate{ - View: h.LastViewTC.View, - HighQcViews: h.LastViewTC.NewestQCViews, - SignerIndices: h.LastViewTC.SignerIndices, - SigData: h.LastViewTC.SigData, - HighestQc: &entities.QuorumCertificate{ - View: newestQC.View, - BlockId: newestQC.BlockID[:], - SignerIndices: newestQC.SignerIndices, - SigData: newestQC.SigData, - }, - } - } - parentVoterIds := IdentifiersToMessages(signerIDs) - - return &entities.BlockHeader{ - Id: id[:], - ParentId: h.ParentID[:], - Height: h.Height, - PayloadHash: h.PayloadHash[:], - Timestamp: t, - View: h.View, - ParentView: h.ParentView, - ParentVoterIndices: h.ParentVoterIndices, - ParentVoterIds: parentVoterIds, - ParentVoterSigData: h.ParentVoterSigData, - ProposerId: h.ProposerID[:], - ProposerSigData: h.ProposerSigData, - ChainId: h.ChainID.String(), - LastViewTc: lastViewTC, - }, nil -} - -func MessageToBlockHeader(m *entities.BlockHeader) (*flow.Header, error) { - chainId, err := MessageToChainId(m.ChainId) - if err != nil { - return nil, fmt.Errorf("failed to convert ChainId: %w", err) - } - var lastViewTC *flow.TimeoutCertificate - if m.LastViewTc != nil { - newestQC := m.LastViewTc.HighestQc - if newestQC == nil { - return nil, fmt.Errorf("invalid structure newest QC should be present") - } - lastViewTC = &flow.TimeoutCertificate{ - View: m.LastViewTc.View, - NewestQCViews: m.LastViewTc.HighQcViews, - SignerIndices: m.LastViewTc.SignerIndices, - SigData: m.LastViewTc.SigData, - NewestQC: &flow.QuorumCertificate{ - View: newestQC.View, - BlockID: MessageToIdentifier(newestQC.BlockId), - SignerIndices: newestQC.SignerIndices, - SigData: newestQC.SigData, - }, - } - } - - return &flow.Header{ - ParentID: MessageToIdentifier(m.ParentId), - Height: m.Height, - PayloadHash: MessageToIdentifier(m.PayloadHash), - Timestamp: m.Timestamp.AsTime(), - View: m.View, - ParentView: m.ParentView, - ParentVoterIndices: m.ParentVoterIndices, - ParentVoterSigData: m.ParentVoterSigData, - ProposerID: MessageToIdentifier(m.ProposerId), - ProposerSigData: m.ProposerSigData, - ChainID: *chainId, - LastViewTC: lastViewTC, - }, nil -} - -// MessageToChainId checks chainId from enumeration to prevent a panic on Chain() being called +// MessageToChainId converts the chainID from a protobuf message to a flow.ChainID +// It returns an error if the value is not a valid chainId func MessageToChainId(m string) (*flow.ChainID, error) { if !ValidChainIds[m] { return nil, fmt.Errorf("invalid chainId %s: ", m) @@ -237,209 +33,21 @@ func MessageToChainId(m string) (*flow.ChainID, error) { return &chainId, nil } -func CollectionGuaranteesToMessages(c []*flow.CollectionGuarantee) []*entities.CollectionGuarantee { - cg := make([]*entities.CollectionGuarantee, len(c)) - for i, g := range c { - cg[i] = CollectionGuaranteeToMessage(g) - } - return cg -} - -func MessagesToCollectionGuarantees(m []*entities.CollectionGuarantee) []*flow.CollectionGuarantee { - cg := make([]*flow.CollectionGuarantee, len(m)) - for i, g := range m { - cg[i] = MessageToCollectionGuarantee(g) - } - return cg -} - -func BlockSealsToMessages(b []*flow.Seal) []*entities.BlockSeal { - seals := make([]*entities.BlockSeal, len(b)) - for i, s := range b { - seals[i] = BlockSealToMessage(s) - } - return seals -} - -func MessagesToBlockSeals(m []*entities.BlockSeal) ([]*flow.Seal, error) { - seals := make([]*flow.Seal, len(m)) - for i, s := range m { - msg, err := MessageToBlockSeal(s) - if err != nil { - return nil, err - } - seals[i] = msg - } - return seals, nil -} - -func ExecutionResultsToMessages(e []*flow.ExecutionResult) ( - []*entities.ExecutionResult, - error, -) { - execResults := make([]*entities.ExecutionResult, len(e)) - for i, execRes := range e { - parsedExecResult, err := ExecutionResultToMessage(execRes) - if err != nil { - return nil, err - } - execResults[i] = parsedExecResult - } - return execResults, nil -} - -func MessagesToExecutionResults(m []*entities.ExecutionResult) ( - []*flow.ExecutionResult, - error, -) { - execResults := make([]*flow.ExecutionResult, len(m)) - for i, e := range m { - parsedExecResult, err := MessageToExecutionResult(e) - if err != nil { - return nil, fmt.Errorf("failed to convert message at index %d to execution result: %w", i, err) - } - execResults[i] = parsedExecResult - } - return execResults, nil -} - -func BlockToMessage(h *flow.Block, signerIDs flow.IdentifierList) ( - *entities.Block, - error, -) { - - id := h.ID() - - parentID := h.Header.ParentID - t := timestamppb.New(h.Header.Timestamp) - cg := CollectionGuaranteesToMessages(h.Payload.Guarantees) - - seals := BlockSealsToMessages(h.Payload.Seals) - - execResults, err := ExecutionResultsToMessages(h.Payload.Results) - if err != nil { - return nil, err - } - - blockHeader, err := BlockHeaderToMessage(h.Header, signerIDs) - if err != nil { - return nil, err - } - - bh := entities.Block{ - Id: id[:], - Height: h.Header.Height, - ParentId: parentID[:], - Timestamp: t, - CollectionGuarantees: cg, - BlockSeals: seals, - Signatures: [][]byte{h.Header.ParentVoterSigData}, - ExecutionReceiptMetaList: ExecutionResultMetaListToMessages(h.Payload.Receipts), - ExecutionResultList: execResults, - BlockHeader: blockHeader, - } - - return &bh, nil -} - -func BlockToMessageLight(h *flow.Block) *entities.Block { - id := h.ID() - - parentID := h.Header.ParentID - t := timestamppb.New(h.Header.Timestamp) - cg := CollectionGuaranteesToMessages(h.Payload.Guarantees) - - return &entities.Block{ - Id: id[:], - Height: h.Header.Height, - ParentId: parentID[:], - Timestamp: t, - CollectionGuarantees: cg, - Signatures: [][]byte{h.Header.ParentVoterSigData}, - } -} - -func MessageToBlock(m *entities.Block) (*flow.Block, error) { - payload, err := PayloadFromMessage(m) - if err != nil { - return nil, fmt.Errorf("failed to extract payload data from message: %w", err) - } - header, err := MessageToBlockHeader(m.BlockHeader) - if err != nil { - return nil, fmt.Errorf("failed to convert block header: %w", err) - } - return &flow.Block{ - Header: header, - Payload: payload, - }, nil -} - -func MessagesToExecutionResultMetaList(m []*entities.ExecutionReceiptMeta) flow.ExecutionReceiptMetaList { - execMetaList := make([]*flow.ExecutionReceiptMeta, len(m)) - for i, message := range m { - execMetaList[i] = &flow.ExecutionReceiptMeta{ - ExecutorID: MessageToIdentifier(message.ExecutorId), - ResultID: MessageToIdentifier(message.ResultId), - Spocks: MessagesToSignatures(message.Spocks), - ExecutorSignature: MessageToSignature(message.ExecutorSignature), - } - } - return execMetaList[:] -} - -func ExecutionResultMetaListToMessages(e flow.ExecutionReceiptMetaList) []*entities.ExecutionReceiptMeta { - messageList := make([]*entities.ExecutionReceiptMeta, len(e)) - for i, execMeta := range e { - messageList[i] = &entities.ExecutionReceiptMeta{ - ExecutorId: IdentifierToMessage(execMeta.ExecutorID), - ResultId: IdentifierToMessage(execMeta.ResultID), - Spocks: SignaturesToMessages(execMeta.Spocks), - ExecutorSignature: MessageToSignature(execMeta.ExecutorSignature), +// AggregatedSignaturesToMessages converts a slice of AggregatedSignature structs to a corresponding +// slice of protobuf messages +func AggregatedSignaturesToMessages(a []flow.AggregatedSignature) []*entities.AggregatedSignature { + parsedMessages := make([]*entities.AggregatedSignature, len(a)) + for i, sig := range a { + parsedMessages[i] = &entities.AggregatedSignature{ + SignerIds: IdentifiersToMessages(sig.SignerIDs), + VerifierSignatures: SignaturesToMessages(sig.VerifierSignatures), } } - return messageList -} - -func PayloadFromMessage(m *entities.Block) (*flow.Payload, error) { - cgs := MessagesToCollectionGuarantees(m.CollectionGuarantees) - seals, err := MessagesToBlockSeals(m.BlockSeals) - if err != nil { - return nil, err - } - receipts := MessagesToExecutionResultMetaList(m.ExecutionReceiptMetaList) - results, err := MessagesToExecutionResults(m.ExecutionResultList) - if err != nil { - return nil, err - } - return &flow.Payload{ - Guarantees: cgs, - Seals: seals, - Receipts: receipts, - Results: results, - }, nil -} - -func CollectionGuaranteeToMessage(g *flow.CollectionGuarantee) *entities.CollectionGuarantee { - id := g.ID() - - return &entities.CollectionGuarantee{ - CollectionId: id[:], - Signatures: [][]byte{g.Signature}, - ReferenceBlockId: IdentifierToMessage(g.ReferenceBlockID), - Signature: g.Signature, - SignerIndices: g.SignerIndices, - } -} - -func MessageToCollectionGuarantee(m *entities.CollectionGuarantee) *flow.CollectionGuarantee { - return &flow.CollectionGuarantee{ - CollectionID: MessageToIdentifier(m.CollectionId), - ReferenceBlockID: MessageToIdentifier(m.ReferenceBlockId), - SignerIndices: m.SignerIndices, - Signature: MessageToSignature(m.Signature), - } + return parsedMessages } +// MessagesToAggregatedSignatures converts a slice of protobuf messages to their corresponding +// AggregatedSignature structs func MessagesToAggregatedSignatures(m []*entities.AggregatedSignature) []flow.AggregatedSignature { parsedSignatures := make([]flow.AggregatedSignature, len(m)) for i, message := range m { @@ -451,29 +59,17 @@ func MessagesToAggregatedSignatures(m []*entities.AggregatedSignature) []flow.Ag return parsedSignatures } -func AggregatedSignaturesToMessages(a []flow.AggregatedSignature) []*entities.AggregatedSignature { - parsedMessages := make([]*entities.AggregatedSignature, len(a)) - for i, sig := range a { - parsedMessages[i] = &entities.AggregatedSignature{ - SignerIds: IdentifiersToMessages(sig.SignerIDs), - VerifierSignatures: SignaturesToMessages(sig.VerifierSignatures), - } - } - return parsedMessages -} - -func MessagesToSignatures(m [][]byte) []crypto.Signature { - signatures := make([]crypto.Signature, len(m)) - for i, message := range m { - signatures[i] = MessageToSignature(message) - } - return signatures +// SignatureToMessage converts a crypto.Signature to a byte slice for inclusion in a protobuf message +func SignatureToMessage(s crypto.Signature) []byte { + return s[:] } +// MessageToSignature converts a byte slice from a protobuf message to a crypto.Signature func MessageToSignature(m []byte) crypto.Signature { return m[:] } +// SignaturesToMessages converts a slice of crypto.Signatures to a slice of byte slices for inclusion in a protobuf message func SignaturesToMessages(s []crypto.Signature) [][]byte { messages := make([][]byte, len(s)) for i, sig := range s { @@ -482,208 +78,26 @@ func SignaturesToMessages(s []crypto.Signature) [][]byte { return messages } -func SignatureToMessage(s crypto.Signature) []byte { - return s[:] -} - -func BlockSealToMessage(s *flow.Seal) *entities.BlockSeal { - id := s.BlockID - result := s.ResultID - return &entities.BlockSeal{ - BlockId: id[:], - ExecutionReceiptId: result[:], - ExecutionReceiptSignatures: [][]byte{}, // filling seals signature with zero - FinalState: StateCommitmentToMessage(s.FinalState), - AggregatedApprovalSigs: AggregatedSignaturesToMessages(s.AggregatedApprovalSigs), - ResultId: IdentifierToMessage(s.ResultID), - } -} - -func MessageToBlockSeal(m *entities.BlockSeal) (*flow.Seal, error) { - finalState, err := MessageToStateCommitment(m.FinalState) - if err != nil { - return nil, fmt.Errorf("failed to convert message to block seal: %w", err) - } - return &flow.Seal{ - BlockID: MessageToIdentifier(m.BlockId), - ResultID: MessageToIdentifier(m.ResultId), - FinalState: finalState, - AggregatedApprovalSigs: MessagesToAggregatedSignatures(m.AggregatedApprovalSigs), - }, nil -} - -func CollectionToMessage(c *flow.Collection) (*entities.Collection, error) { - if c == nil || c.Transactions == nil { - return nil, fmt.Errorf("invalid collection") - } - - transactionsIDs := make([][]byte, len(c.Transactions)) - for i, t := range c.Transactions { - id := t.ID() - transactionsIDs[i] = id[:] - } - - collectionID := c.ID() - - ce := &entities.Collection{ - Id: collectionID[:], - TransactionIds: transactionsIDs, - } - - return ce, nil -} - -func LightCollectionToMessage(c *flow.LightCollection) (*entities.Collection, error) { - if c == nil || c.Transactions == nil { - return nil, fmt.Errorf("invalid collection") - } - - collectionID := c.ID() - - return &entities.Collection{ - Id: collectionID[:], - TransactionIds: IdentifiersToMessages(c.Transactions), - }, nil -} - -func EventToMessage(e flow.Event) *entities.Event { - return &entities.Event{ - Type: string(e.Type), - TransactionId: e.TransactionID[:], - TransactionIndex: e.TransactionIndex, - EventIndex: e.EventIndex, - Payload: e.Payload, - } -} - -func MessageToAccount(m *entities.Account) (*flow.Account, error) { - if m == nil { - return nil, ErrEmptyMessage - } - - accountKeys := make([]flow.AccountPublicKey, len(m.GetKeys())) - for i, key := range m.GetKeys() { - accountKey, err := MessageToAccountKey(key) - if err != nil { - return nil, err - } - - accountKeys[i] = *accountKey - } - - return &flow.Account{ - Address: flow.BytesToAddress(m.GetAddress()), - Balance: m.GetBalance(), - Keys: accountKeys, - Contracts: m.Contracts, - }, nil -} - -func AccountToMessage(a *flow.Account) (*entities.Account, error) { - keys := make([]*entities.AccountKey, len(a.Keys)) - for i, k := range a.Keys { - messageKey, err := AccountKeyToMessage(k) - if err != nil { - return nil, err - } - keys[i] = messageKey - } - - return &entities.Account{ - Address: a.Address.Bytes(), - Balance: a.Balance, - Code: nil, - Keys: keys, - Contracts: a.Contracts, - }, nil -} - -func MessageToAccountKey(m *entities.AccountKey) (*flow.AccountPublicKey, error) { - if m == nil { - return nil, ErrEmptyMessage - } - - sigAlgo := crypto.SigningAlgorithm(m.GetSignAlgo()) - hashAlgo := hash.HashingAlgorithm(m.GetHashAlgo()) - - publicKey, err := crypto.DecodePublicKey(sigAlgo, m.GetPublicKey()) - if err != nil { - return nil, err - } - - return &flow.AccountPublicKey{ - Index: int(m.GetIndex()), - PublicKey: publicKey, - SignAlgo: sigAlgo, - HashAlgo: hashAlgo, - Weight: int(m.GetWeight()), - SeqNumber: uint64(m.GetSequenceNumber()), - Revoked: m.GetRevoked(), - }, nil -} - -func AccountKeyToMessage(a flow.AccountPublicKey) (*entities.AccountKey, error) { - publicKey := a.PublicKey.Encode() - return &entities.AccountKey{ - Index: uint32(a.Index), - PublicKey: publicKey, - SignAlgo: uint32(a.SignAlgo), - HashAlgo: uint32(a.HashAlgo), - Weight: uint32(a.Weight), - SequenceNumber: uint32(a.SeqNumber), - Revoked: a.Revoked, - }, nil -} - -func MessagesToEvents(l []*entities.Event) []flow.Event { - events := make([]flow.Event, len(l)) - - for i, m := range l { - events[i] = MessageToEvent(m) - } - - return events -} - -func MessageToEvent(m *entities.Event) flow.Event { - return flow.Event{ - Type: flow.EventType(m.GetType()), - TransactionID: flow.HashToID(m.GetTransactionId()), - TransactionIndex: m.GetTransactionIndex(), - EventIndex: m.GetEventIndex(), - Payload: m.GetPayload(), - } -} - -func EventsToMessages(flowEvents []flow.Event) []*entities.Event { - events := make([]*entities.Event, len(flowEvents)) - for i, e := range flowEvents { - event := EventToMessage(e) - events[i] = event +// MessagesToSignatures converts a slice of byte slices from a protobuf message to a slice of crypto.Signatures +func MessagesToSignatures(m [][]byte) []crypto.Signature { + signatures := make([]crypto.Signature, len(m)) + for i, message := range m { + signatures[i] = MessageToSignature(message) } - return events + return signatures } +// IdentifierToMessage converts a flow.Identifier to a byte slice for inclusion in a protobuf message func IdentifierToMessage(i flow.Identifier) []byte { return i[:] } +// MessageToIdentifier converts a byte slice from a protobuf message to a flow.Identifier func MessageToIdentifier(b []byte) flow.Identifier { return flow.HashToID(b) } -func StateCommitmentToMessage(s flow.StateCommitment) []byte { - return s[:] -} - -func MessageToStateCommitment(bytes []byte) (sc flow.StateCommitment, err error) { - if len(bytes) != len(sc) { - return sc, fmt.Errorf("invalid state commitment length. got %d expected %d", len(bytes), len(sc)) - } - copy(sc[:], bytes) - return -} - +// IdentifiersToMessages converts a slice of flow.Identifiers to a slice of byte slices for inclusion in a protobuf message func IdentifiersToMessages(l []flow.Identifier) [][]byte { results := make([][]byte, len(l)) for i, item := range l { @@ -692,6 +106,7 @@ func IdentifiersToMessages(l []flow.Identifier) [][]byte { return results } +// MessagesToIdentifiers converts a slice of byte slices from a protobuf message to a slice of flow.Identifiers func MessagesToIdentifiers(l [][]byte) []flow.Identifier { results := make([]flow.Identifier, len(l)) for i, item := range l { @@ -700,416 +115,16 @@ func MessagesToIdentifiers(l [][]byte) []flow.Identifier { return results } -// SnapshotToBytes converts a `protocol.Snapshot` to bytes, encoded as JSON -func SnapshotToBytes(snapshot protocol.Snapshot) ([]byte, error) { - serializable, err := inmem.FromSnapshot(snapshot) - if err != nil { - return nil, err - } - - data, err := json.Marshal(serializable.Encodable()) - if err != nil { - return nil, err - } - - return data, nil -} - -// BytesToInmemSnapshot converts an array of bytes to `inmem.Snapshot` -func BytesToInmemSnapshot(bytes []byte) (*inmem.Snapshot, error) { - var encodable inmem.EncodableSnapshot - err := json.Unmarshal(bytes, &encodable) - if err != nil { - return nil, fmt.Errorf("could not unmarshal decoded snapshot: %w", err) - } - - return inmem.SnapshotFromEncodable(encodable), nil -} - -func MessagesToChunkList(m []*entities.Chunk) (flow.ChunkList, error) { - parsedChunks := make(flow.ChunkList, len(m)) - for i, chunk := range m { - parsedChunk, err := MessageToChunk(chunk) - if err != nil { - return nil, fmt.Errorf("failed to parse message at index %d to chunk: %w", i, err) - } - parsedChunks[i] = parsedChunk - } - return parsedChunks, nil -} - -func MessagesToServiceEventList(m []*entities.ServiceEvent) ( - flow.ServiceEventList, - error, -) { - parsedServiceEvents := make(flow.ServiceEventList, len(m)) - for i, serviceEvent := range m { - parsedServiceEvent, err := MessageToServiceEvent(serviceEvent) - if err != nil { - return nil, fmt.Errorf("failed to parse service event at index %d from message: %w", i, err) - } - parsedServiceEvents[i] = *parsedServiceEvent - } - return parsedServiceEvents, nil -} - -func MessageToExecutionResult(m *entities.ExecutionResult) ( - *flow.ExecutionResult, - error, -) { - // convert Chunks - parsedChunks, err := MessagesToChunkList(m.Chunks) - if err != nil { - return nil, fmt.Errorf("failed to parse messages to ChunkList: %w", err) - } - // convert ServiceEvents - parsedServiceEvents, err := MessagesToServiceEventList(m.ServiceEvents) - if err != nil { - return nil, err - } - return &flow.ExecutionResult{ - PreviousResultID: MessageToIdentifier(m.PreviousResultId), - BlockID: MessageToIdentifier(m.BlockId), - Chunks: parsedChunks, - ServiceEvents: parsedServiceEvents, - ExecutionDataID: MessageToIdentifier(m.ExecutionDataId), - }, nil -} - -func ExecutionResultToMessage(er *flow.ExecutionResult) ( - *entities.ExecutionResult, - error, -) { - - chunks := make([]*entities.Chunk, len(er.Chunks)) - - for i, chunk := range er.Chunks { - chunks[i] = ChunkToMessage(chunk) - } - - serviceEvents := make([]*entities.ServiceEvent, len(er.ServiceEvents)) - var err error - for i, serviceEvent := range er.ServiceEvents { - serviceEvents[i], err = ServiceEventToMessage(serviceEvent) - if err != nil { - return nil, fmt.Errorf("error while convering service event %d: %w", i, err) - } - } - - return &entities.ExecutionResult{ - PreviousResultId: IdentifierToMessage(er.PreviousResultID), - BlockId: IdentifierToMessage(er.BlockID), - Chunks: chunks, - ServiceEvents: serviceEvents, - ExecutionDataId: IdentifierToMessage(er.ExecutionDataID), - }, nil -} - -func ServiceEventToMessage(event flow.ServiceEvent) (*entities.ServiceEvent, error) { - - bytes, err := json.Marshal(event.Event) - if err != nil { - return nil, fmt.Errorf("cannot marshal service event: %w", err) - } - - return &entities.ServiceEvent{ - Type: event.Type.String(), - Payload: bytes, - }, nil -} - -func MessageToServiceEvent(m *entities.ServiceEvent) (*flow.ServiceEvent, error) { - rawEvent := m.Payload - eventType := flow.ServiceEventType(m.Type) - se, err := flow.ServiceEventJSONMarshaller.UnmarshalWithType(rawEvent, eventType) - - return &se, err -} - -func ChunkToMessage(chunk *flow.Chunk) *entities.Chunk { - return &entities.Chunk{ - CollectionIndex: uint32(chunk.CollectionIndex), - StartState: StateCommitmentToMessage(chunk.StartState), - EventCollection: IdentifierToMessage(chunk.EventCollection), - BlockId: IdentifierToMessage(chunk.BlockID), - TotalComputationUsed: chunk.TotalComputationUsed, - NumberOfTransactions: uint32(chunk.NumberOfTransactions), - Index: chunk.Index, - EndState: StateCommitmentToMessage(chunk.EndState), - } -} - -func MessageToChunk(m *entities.Chunk) (*flow.Chunk, error) { - startState, err := flow.ToStateCommitment(m.StartState) - if err != nil { - return nil, fmt.Errorf("failed to parse Message start state to Chunk: %w", err) - } - endState, err := flow.ToStateCommitment(m.EndState) - if err != nil { - return nil, fmt.Errorf("failed to parse Message end state to Chunk: %w", err) - } - chunkBody := flow.ChunkBody{ - CollectionIndex: uint(m.CollectionIndex), - StartState: startState, - EventCollection: MessageToIdentifier(m.EventCollection), - BlockID: MessageToIdentifier(m.BlockId), - TotalComputationUsed: m.TotalComputationUsed, - NumberOfTransactions: uint64(m.NumberOfTransactions), - } - return &flow.Chunk{ - ChunkBody: chunkBody, - Index: m.Index, - EndState: endState, - }, nil -} - -func BlockExecutionDataToMessage(data *execution_data.BlockExecutionData) ( - *entities.BlockExecutionData, - error, -) { - chunkExecutionDatas := make([]*entities.ChunkExecutionData, len(data.ChunkExecutionDatas)) - for i, chunk := range data.ChunkExecutionDatas { - chunkMessage, err := ChunkExecutionDataToMessage(chunk) - if err != nil { - return nil, err - } - chunkExecutionDatas[i] = chunkMessage - } - return &entities.BlockExecutionData{ - BlockId: IdentifierToMessage(data.BlockID), - ChunkExecutionData: chunkExecutionDatas, - }, nil -} - -func ChunkExecutionDataToMessage(data *execution_data.ChunkExecutionData) ( - *entities.ChunkExecutionData, - error, -) { - collection := &entities.ExecutionDataCollection{} - if data.Collection != nil { - collection = &entities.ExecutionDataCollection{ - Transactions: TransactionsToMessages(data.Collection.Transactions), - } - } - - events := EventsToMessages(data.Events) - if len(events) == 0 { - events = nil - } - - var trieUpdate *entities.TrieUpdate - if data.TrieUpdate != nil { - paths := make([][]byte, len(data.TrieUpdate.Paths)) - for i, path := range data.TrieUpdate.Paths { - paths[i] = path[:] - } - - payloads := make([]*entities.Payload, len(data.TrieUpdate.Payloads)) - for i, payload := range data.TrieUpdate.Payloads { - key, err := payload.Key() - if err != nil { - return nil, err - } - keyParts := make([]*entities.KeyPart, len(key.KeyParts)) - for j, keyPart := range key.KeyParts { - keyParts[j] = &entities.KeyPart{ - Type: uint32(keyPart.Type), - Value: keyPart.Value, - } - } - payloads[i] = &entities.Payload{ - KeyPart: keyParts, - Value: payload.Value(), - } - } - - trieUpdate = &entities.TrieUpdate{ - RootHash: data.TrieUpdate.RootHash[:], - Paths: paths, - Payloads: payloads, - } - } - - return &entities.ChunkExecutionData{ - Collection: collection, - Events: events, - TrieUpdate: trieUpdate, - }, nil -} - -func MessageToBlockExecutionData( - m *entities.BlockExecutionData, - chain flow.Chain, -) (*execution_data.BlockExecutionData, error) { - if m == nil { - return nil, ErrEmptyMessage - } - chunks := make([]*execution_data.ChunkExecutionData, len(m.ChunkExecutionData)) - for i, chunk := range m.GetChunkExecutionData() { - convertedChunk, err := MessageToChunkExecutionData(chunk, chain) - if err != nil { - return nil, err - } - chunks[i] = convertedChunk - } - - return &execution_data.BlockExecutionData{ - BlockID: MessageToIdentifier(m.GetBlockId()), - ChunkExecutionDatas: chunks, - }, nil -} - -func MessageToChunkExecutionData( - m *entities.ChunkExecutionData, - chain flow.Chain, -) (*execution_data.ChunkExecutionData, error) { - collection, err := messageToTrustedCollection(m.GetCollection(), chain) - if err != nil { - return nil, err - } - - var trieUpdate *ledger.TrieUpdate - if m.GetTrieUpdate() != nil { - trieUpdate, err = MessageToTrieUpdate(m.GetTrieUpdate()) - if err != nil { - return nil, err - } - } - - events := MessagesToEvents(m.GetEvents()) - if len(events) == 0 { - events = nil - } - - return &execution_data.ChunkExecutionData{ - Collection: collection, - Events: events, - TrieUpdate: trieUpdate, - }, nil -} - -func messageToTrustedCollection( - m *entities.ExecutionDataCollection, - chain flow.Chain, -) (*flow.Collection, error) { - messages := m.GetTransactions() - transactions := make([]*flow.TransactionBody, len(messages)) - for i, message := range messages { - transaction, err := messageToTrustedTransaction(message, chain) - if err != nil { - return nil, fmt.Errorf("could not convert transaction %d: %w", i, err) - } - transactions[i] = &transaction - } - - if len(transactions) == 0 { - return nil, nil - } - - return &flow.Collection{Transactions: transactions}, nil -} - -// messageToTrustedTransaction converts a transaction message to a transaction body. -// This is useful when converting transactions from trusted state like BlockExecutionData which -// contain service transactions that do not conform to external transaction format. -func messageToTrustedTransaction( - m *entities.Transaction, - chain flow.Chain, -) (flow.TransactionBody, error) { - if m == nil { - return flow.TransactionBody{}, ErrEmptyMessage - } - - t := flow.NewTransactionBody() - - proposalKey := m.GetProposalKey() - if proposalKey != nil { - proposalAddress, err := insecureAddress(proposalKey.GetAddress(), chain) - if err != nil { - return *t, fmt.Errorf("could not convert proposer address: %w", err) - } - t.SetProposalKey(proposalAddress, uint64(proposalKey.GetKeyId()), proposalKey.GetSequenceNumber()) - } - - payer := m.GetPayer() - if payer != nil { - payerAddress, err := insecureAddress(payer, chain) - if err != nil { - return *t, fmt.Errorf("could not convert payer address: %w", err) - } - t.SetPayer(payerAddress) - } - - for _, authorizer := range m.GetAuthorizers() { - authorizerAddress, err := Address(authorizer, chain) - if err != nil { - return *t, fmt.Errorf("could not convert authorizer address: %w", err) - } - t.AddAuthorizer(authorizerAddress) - } - - for _, sig := range m.GetPayloadSignatures() { - addr, err := Address(sig.GetAddress(), chain) - if err != nil { - return *t, fmt.Errorf("could not convert payload signature address: %w", err) - } - t.AddPayloadSignature(addr, uint64(sig.GetKeyId()), sig.GetSignature()) - } - - for _, sig := range m.GetEnvelopeSignatures() { - addr, err := Address(sig.GetAddress(), chain) - if err != nil { - return *t, fmt.Errorf("could not convert envelope signature address: %w", err) - } - t.AddEnvelopeSignature(addr, uint64(sig.GetKeyId()), sig.GetSignature()) - } - - t.SetScript(m.GetScript()) - t.SetArguments(m.GetArguments()) - t.SetReferenceBlockID(flow.HashToID(m.GetReferenceBlockId())) - t.SetGasLimit(m.GetGasLimit()) - - return *t, nil -} - -func MessageToTrieUpdate(m *entities.TrieUpdate) (*ledger.TrieUpdate, error) { - rootHash, err := ledger.ToRootHash(m.GetRootHash()) - if err != nil { - return nil, fmt.Errorf("could not convert root hash: %w", err) - } - - paths := make([]ledger.Path, len(m.GetPaths())) - for i, path := range m.GetPaths() { - convertedPath, err := ledger.ToPath(path) - if err != nil { - return nil, fmt.Errorf("could not convert path %d: %w", i, err) - } - paths[i] = convertedPath - } - - payloads := make([]*ledger.Payload, len(m.Payloads)) - for i, payload := range m.GetPayloads() { - keyParts := make([]ledger.KeyPart, len(payload.GetKeyPart())) - for j, keypart := range payload.GetKeyPart() { - keyParts[j] = ledger.NewKeyPart(uint16(keypart.GetType()), keypart.GetValue()) - } - payloads[i] = ledger.NewPayload(ledger.NewKey(keyParts), payload.GetValue()) - } - - return &ledger.TrieUpdate{ - RootHash: rootHash, - Paths: paths, - Payloads: payloads, - }, nil +// StateCommitmentToMessage converts a flow.StateCommitment to a byte slice for inclusion in a protobuf message +func StateCommitmentToMessage(s flow.StateCommitment) []byte { + return s[:] } -// insecureAddress converts a raw address to a flow.Address, skipping validation -// This is useful when converting transactions from trusted state like BlockExecutionData. -// This should only be used for trusted inputs -func insecureAddress(rawAddress []byte, chain flow.Chain) (flow.Address, error) { - if len(rawAddress) == 0 { - return flow.EmptyAddress, status.Error(codes.InvalidArgument, "address cannot be empty") +// MessageToStateCommitment converts a byte slice from a protobuf message to a flow.StateCommitment +func MessageToStateCommitment(bytes []byte) (sc flow.StateCommitment, err error) { + if len(bytes) != len(sc) { + return sc, fmt.Errorf("invalid state commitment length. got %d expected %d", len(bytes), len(sc)) } - - return flow.BytesToAddress(rawAddress), nil + copy(sc[:], bytes) + return } diff --git a/engine/common/rpc/convert/convert_test.go b/engine/common/rpc/convert/convert_test.go deleted file mode 100644 index a98f828d0f6..00000000000 --- a/engine/common/rpc/convert/convert_test.go +++ /dev/null @@ -1,154 +0,0 @@ -package convert_test - -import ( - "bytes" - "math/rand" - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/onflow/flow-go/engine/common/rpc/convert" - "github.com/onflow/flow-go/fvm" - "github.com/onflow/flow-go/ledger" - "github.com/onflow/flow-go/ledger/common/testutils" - "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/module/executiondatasync/execution_data" - "github.com/onflow/flow-go/utils/unittest" -) - -func TestConvertTransaction(t *testing.T) { - tx := unittest.TransactionBodyFixture() - - msg := convert.TransactionToMessage(tx) - converted, err := convert.MessageToTransaction(msg, flow.Testnet.Chain()) - assert.Nil(t, err) - - assert.Equal(t, tx, converted) - assert.Equal(t, tx.ID(), converted.ID()) -} - -func TestConvertAccountKey(t *testing.T) { - privateKey, _ := unittest.AccountKeyDefaultFixture() - accountKey := privateKey.PublicKey(fvm.AccountKeyWeightThreshold) - - // Explicitly test if Revoked is properly converted - accountKey.Revoked = true - - msg, err := convert.AccountKeyToMessage(accountKey) - assert.Nil(t, err) - - converted, err := convert.MessageToAccountKey(msg) - assert.Nil(t, err) - - assert.Equal(t, accountKey, *converted) - assert.Equal(t, accountKey.PublicKey, converted.PublicKey) - assert.Equal(t, accountKey.Revoked, converted.Revoked) -} - -func TestConvertEvents(t *testing.T) { - t.Run("empty", func(t *testing.T) { - messages := convert.EventsToMessages(nil) - assert.Len(t, messages, 0) - }) - - t.Run("simple", func(t *testing.T) { - - txID := unittest.IdentifierFixture() - event := unittest.EventFixture(flow.EventAccountCreated, 2, 3, txID, 0) - - messages := convert.EventsToMessages([]flow.Event{event}) - - require.Len(t, messages, 1) - - message := messages[0] - - require.Equal(t, event.EventIndex, message.EventIndex) - require.Equal(t, event.TransactionIndex, message.TransactionIndex) - require.Equal(t, event.Payload, message.Payload) - require.Equal(t, event.TransactionID[:], message.TransactionId) - require.Equal(t, string(event.Type), message.Type) - }) -} - -// TestConvertBlockExecutionData checks if conversions between BlockExecutionData and it's fields are consistent. -func TestConvertBlockExecutionData(t *testing.T) { - // Initialize the BlockExecutionData object - numChunks := 5 - ced := make([]*execution_data.ChunkExecutionData, numChunks) - bed := &execution_data.BlockExecutionData{ - BlockID: unittest.IdentifierFixture(), - ChunkExecutionDatas: ced, - } - - // Fill the chunk execution datas with trie updates, collections, and events - minSerializedSize := uint64(10 * execution_data.DefaultMaxBlobSize) - for i := 0; i < numChunks; i++ { - // the service chunk sometimes does not have any trie updates - if i == numChunks-1 { - tx1 := unittest.TransactionBodyFixture() - // proposal key and payer are empty addresses for service tx - tx1.ProposalKey.Address = flow.EmptyAddress - tx1.Payer = flow.EmptyAddress - bed.ChunkExecutionDatas[i] = &execution_data.ChunkExecutionData{ - Collection: &flow.Collection{Transactions: []*flow.TransactionBody{&tx1}}, - } - continue - } - - // Initialize collection - tx1 := unittest.TransactionBodyFixture() - tx2 := unittest.TransactionBodyFixture() - col := &flow.Collection{Transactions: []*flow.TransactionBody{&tx1, &tx2}} - - // Initialize events - header := unittest.BlockHeaderFixture() - events := unittest.BlockEventsFixture(header, 5).Events - - chunk := &execution_data.ChunkExecutionData{ - Collection: col, - Events: events, - TrieUpdate: testutils.TrieUpdateFixture(1, 1, 8), - } - size := 1 - - // Fill the TrieUpdate with data - inner: - for { - buf := &bytes.Buffer{} - require.NoError(t, execution_data.DefaultSerializer.Serialize(buf, chunk)) - - if buf.Len() >= int(minSerializedSize) { - break inner - } - - v := make([]byte, size) - _, _ = rand.Read(v) - - k, err := chunk.TrieUpdate.Payloads[0].Key() - require.NoError(t, err) - - chunk.TrieUpdate.Payloads[0] = ledger.NewPayload(k, v) - size *= 2 - } - bed.ChunkExecutionDatas[i] = chunk - } - - t.Run("chunk execution data conversions", func(t *testing.T) { - chunkMsg, err := convert.ChunkExecutionDataToMessage(bed.ChunkExecutionDatas[0]) - assert.Nil(t, err) - - chunkReConverted, err := convert.MessageToChunkExecutionData(chunkMsg, flow.Testnet.Chain()) - assert.Nil(t, err) - assert.Equal(t, bed.ChunkExecutionDatas[0], chunkReConverted) - }) - - t.Run("block execution data conversions", func(t *testing.T) { - blockMsg, err := convert.BlockExecutionDataToMessage(bed) - assert.Nil(t, err) - - bedReConverted, err := convert.MessageToBlockExecutionData(blockMsg, flow.Testnet.Chain()) - assert.Nil(t, err) - assert.Equal(t, bed, bedReConverted) - }) -} diff --git a/engine/common/rpc/convert/events.go b/engine/common/rpc/convert/events.go new file mode 100644 index 00000000000..2d78f3b62f2 --- /dev/null +++ b/engine/common/rpc/convert/events.go @@ -0,0 +1,358 @@ +package convert + +import ( + "encoding/json" + "fmt" + + "google.golang.org/protobuf/types/known/timestamppb" + + "github.com/onflow/cadence/encoding/ccf" + jsoncdc "github.com/onflow/cadence/encoding/json" + + "github.com/onflow/flow-go/model/flow" + + accessproto "github.com/onflow/flow/protobuf/go/flow/access" + "github.com/onflow/flow/protobuf/go/flow/entities" +) + +// EventToMessage converts a flow.Event to a protobuf message +// Note: this function does not convert the payload encoding +func EventToMessage(e flow.Event) *entities.Event { + return &entities.Event{ + Type: string(e.Type), + TransactionId: e.TransactionID[:], + TransactionIndex: e.TransactionIndex, + EventIndex: e.EventIndex, + Payload: e.Payload, + } +} + +// MessageToEvent converts a protobuf message to a flow.Event +// Note: this function does not convert the payload encoding +// All errors indicate the input cannot be converted to a valid event. +func MessageToEvent(m *entities.Event) (*flow.Event, error) { + event, err := flow.NewEvent( + flow.UntrustedEvent{ + Type: flow.EventType(m.GetType()), + TransactionID: flow.HashToID(m.GetTransactionId()), + TransactionIndex: m.GetTransactionIndex(), + EventIndex: m.GetEventIndex(), + Payload: m.GetPayload(), + }, + ) + if err != nil { + return nil, fmt.Errorf("could not construct event: %w", err) + } + + return event, nil +} + +// EventsToMessages converts a slice of flow.Events to a slice of protobuf messages +// Note: this function does not convert the payload encoding +func EventsToMessages(flowEvents []flow.Event) []*entities.Event { + events := make([]*entities.Event, len(flowEvents)) + for i, e := range flowEvents { + events[i] = EventToMessage(e) + } + return events +} + +// MessagesToEvents converts a slice of protobuf messages to a slice of flow.Events +// Note: this function does not convert the payload encoding +func MessagesToEvents(l []*entities.Event) ([]flow.Event, error) { + events := make([]flow.Event, len(l)) + for i, m := range l { + event, err := MessageToEvent(m) + if err != nil { + return nil, fmt.Errorf("could not convert message at index %d to event: %w", i, err) + } + events[i] = *event + } + + return events, nil +} + +// EventToMessageFromVersion converts a flow.Event to a protobuf message, converting the payload +// encoding from CCF to JSON if the input version is CCF +func EventToMessageFromVersion(e flow.Event, version entities.EventEncodingVersion) (*entities.Event, error) { + message := EventToMessage(e) + + if len(e.Payload) > 0 { + switch version { + case entities.EventEncodingVersion_CCF_V0: + convertedPayload, err := CcfPayloadToJsonPayload(e.Payload) + if err != nil { + return nil, fmt.Errorf("could not convert event payload from CCF to Json: %w", err) + } + message.Payload = convertedPayload + case entities.EventEncodingVersion_JSON_CDC_V0: + default: + return nil, fmt.Errorf("invalid encoding format %d", version) + } + } + + return message, nil +} + +// MessageToEventFromVersion converts a protobuf message to a flow.Event, and converts the payload +// encoding from CCF to JSON if the input version is CCF +func MessageToEventFromVersion(m *entities.Event, inputVersion entities.EventEncodingVersion) (*flow.Event, error) { + event, err := MessageToEvent(m) + if err != nil { + return nil, fmt.Errorf("could not convert the event: %w", err) + } + switch inputVersion { + case entities.EventEncodingVersion_CCF_V0: + convertedPayload, err := CcfPayloadToJsonPayload(event.Payload) + if err != nil { + return nil, fmt.Errorf("could not convert event payload from CCF to Json: %w", err) + } + e, err := flow.NewEvent( + flow.UntrustedEvent{ + Type: event.Type, + TransactionID: event.TransactionID, + TransactionIndex: event.TransactionIndex, + EventIndex: event.EventIndex, + Payload: convertedPayload, + }, + ) + if err != nil { + return nil, fmt.Errorf("could not construct the event: %w", err) + } + + return e, nil + case entities.EventEncodingVersion_JSON_CDC_V0: + return event, nil + default: + return nil, fmt.Errorf("invalid encoding format %d", inputVersion) + } +} + +// EventsToMessagesWithEncodingConversion converts a slice of flow.Events to a slice of protobuf messages, converting +// the payload encoding from CCF to JSON if the input version is CCF +func EventsToMessagesWithEncodingConversion( + flowEvents []flow.Event, + from entities.EventEncodingVersion, + to entities.EventEncodingVersion, +) ([]*entities.Event, error) { + if from == entities.EventEncodingVersion_JSON_CDC_V0 && to == entities.EventEncodingVersion_CCF_V0 { + return nil, fmt.Errorf("conversion from format %s to %s is not supported", from.String(), to.String()) + } + + if from == to { + return EventsToMessages(flowEvents), nil + } + + events := make([]*entities.Event, len(flowEvents)) + for i, e := range flowEvents { + event, err := EventToMessageFromVersion(e, from) + if err != nil { + return nil, fmt.Errorf("could not convert event at index %d from format %d: %w", + e.EventIndex, from, err) + } + events[i] = event + } + return events, nil +} + +// MessagesToEventsWithEncodingConversion converts a slice of protobuf messages to a slice of flow.Events, converting +// the payload encoding from CCF to JSON if the input version is CCF +func MessagesToEventsWithEncodingConversion( + messageEvents []*entities.Event, + from entities.EventEncodingVersion, + to entities.EventEncodingVersion, +) ([]flow.Event, error) { + if from == entities.EventEncodingVersion_JSON_CDC_V0 && to == entities.EventEncodingVersion_CCF_V0 { + return nil, fmt.Errorf("conversion from format %s to %s is not supported", from.String(), to.String()) + } + + if from == to { + return MessagesToEvents(messageEvents) + } + + events := make([]flow.Event, len(messageEvents)) + for i, m := range messageEvents { + event, err := MessageToEventFromVersion(m, from) + if err != nil { + return nil, fmt.Errorf("could not convert event at index %d from format %d: %w", + m.EventIndex, from, err) + } + events[i] = *event + } + return events, nil +} + +// ServiceEventToMessage converts a flow.ServiceEvent to a protobuf message +func ServiceEventToMessage(event flow.ServiceEvent) (*entities.ServiceEvent, error) { + bytes, err := json.Marshal(event.Event) + if err != nil { + return nil, fmt.Errorf("cannot marshal service event: %w", err) + } + + return &entities.ServiceEvent{ + Type: event.Type.String(), + Payload: bytes, + }, nil +} + +// MessageToServiceEvent converts a protobuf message to a flow.ServiceEvent +func MessageToServiceEvent(m *entities.ServiceEvent) (*flow.ServiceEvent, error) { + rawEvent := m.Payload + eventType := flow.ServiceEventType(m.Type) + se, err := flow.ServiceEventJSONMarshaller.UnmarshalWithType(rawEvent, eventType) + + return &se, err +} + +// ServiceEventListToMessages converts a slice of flow.ServiceEvents to a slice of protobuf messages +func ServiceEventListToMessages(list flow.ServiceEventList) ( + []*entities.ServiceEvent, + error, +) { + entities := make([]*entities.ServiceEvent, len(list)) + for i, serviceEvent := range list { + m, err := ServiceEventToMessage(serviceEvent) + if err != nil { + return nil, fmt.Errorf("failed to convert service event at index %d to message: %w", i, err) + } + entities[i] = m + } + return entities, nil +} + +// MessagesToServiceEventList converts a slice of flow.ServiceEvents to a slice of protobuf messages +func MessagesToServiceEventList(m []*entities.ServiceEvent) ( + flow.ServiceEventList, + error, +) { + parsedServiceEvents := make(flow.ServiceEventList, len(m)) + for i, serviceEvent := range m { + parsedServiceEvent, err := MessageToServiceEvent(serviceEvent) + if err != nil { + return nil, fmt.Errorf("failed to parse service event at index %d from message: %w", i, err) + } + parsedServiceEvents[i] = *parsedServiceEvent + } + return parsedServiceEvents, nil +} + +// CcfPayloadToJsonPayload converts a CCF-encoded payload to a JSON-encoded payload +func CcfPayloadToJsonPayload(p []byte) ([]byte, error) { + if len(p) == 0 { + return p, nil + } + + val, err := ccf.Decode(nil, p) + if err != nil { + return nil, fmt.Errorf("unable to decode from ccf format: %w", err) + } + res, err := jsoncdc.Encode(val) + if err != nil { + return nil, fmt.Errorf("unable to encode to json-cdc format: %w", err) + } + return res, nil +} + +// CcfEventToJsonEvent returns a new event with the payload converted from CCF to JSON +func CcfEventToJsonEvent(e flow.Event) (*flow.Event, error) { + convertedPayload, err := CcfPayloadToJsonPayload(e.Payload) + if err != nil { + return nil, err + } + + event, err := flow.NewEvent( + flow.UntrustedEvent{ + Type: e.Type, + TransactionID: e.TransactionID, + TransactionIndex: e.TransactionIndex, + EventIndex: e.EventIndex, + Payload: convertedPayload, + }, + ) + if err != nil { + return nil, fmt.Errorf("could not construct event: %w", err) + } + + return event, nil +} + +// CcfEventsToJsonEvents returns a new event with the payload converted from CCF to JSON +func CcfEventsToJsonEvents(events []flow.Event) ([]flow.Event, error) { + convertedEvents := make([]flow.Event, len(events)) + for i, e := range events { + payload, err := CcfPayloadToJsonPayload(e.Payload) + if err != nil { + return nil, fmt.Errorf("failed to convert event payload for event %d: %w", i, err) + } + convertedEvent, err := flow.NewEvent( + flow.UntrustedEvent{ + Type: e.Type, + TransactionID: e.TransactionID, + TransactionIndex: e.TransactionIndex, + EventIndex: e.EventIndex, + Payload: payload, + }, + ) + if err != nil { + return nil, fmt.Errorf("could not construct event: %w", err) + } + convertedEvents[i] = *convertedEvent + } + return convertedEvents, nil +} + +// MessagesToBlockEvents converts a protobuf EventsResponse_Result messages to a slice of flow.BlockEvents. +func MessagesToBlockEvents(blocksEvents []*accessproto.EventsResponse_Result) ([]flow.BlockEvents, error) { + evs := make([]flow.BlockEvents, len(blocksEvents)) + for i, ev := range blocksEvents { + event, err := MessageToBlockEvents(ev) + if err != nil { + return nil, fmt.Errorf("failed to convert message at index %d to block events: %w", i, err) + } + evs[i] = *event + } + + return evs, nil +} + +// MessageToBlockEvents converts a protobuf EventsResponse_Result message to a flow.BlockEvents. +func MessageToBlockEvents(blockEvents *accessproto.EventsResponse_Result) (*flow.BlockEvents, error) { + events, err := MessagesToEvents(blockEvents.Events) + if err != nil { + return nil, fmt.Errorf("could not convert message to events: %w", err) + } + return &flow.BlockEvents{ + BlockHeight: blockEvents.BlockHeight, + BlockID: MessageToIdentifier(blockEvents.BlockId), + BlockTimestamp: blockEvents.BlockTimestamp.AsTime(), + Events: events, + }, nil +} + +func BlockEventsToMessages(blocks []flow.BlockEvents) ([]*accessproto.EventsResponse_Result, error) { + results := make([]*accessproto.EventsResponse_Result, len(blocks)) + + for i, block := range blocks { + event, err := BlockEventsToMessage(block) + if err != nil { + return nil, err + } + results[i] = event + } + + return results, nil +} + +func BlockEventsToMessage(block flow.BlockEvents) (*accessproto.EventsResponse_Result, error) { + eventMessages := make([]*entities.Event, len(block.Events)) + for i, event := range block.Events { + eventMessages[i] = EventToMessage(event) + } + timestamp := timestamppb.New(block.BlockTimestamp) + return &accessproto.EventsResponse_Result{ + BlockId: block.BlockID[:], + BlockHeight: block.BlockHeight, + BlockTimestamp: timestamp, + Events: eventMessages, + }, nil +} diff --git a/engine/common/rpc/convert/events_test.go b/engine/common/rpc/convert/events_test.go new file mode 100644 index 00000000000..7db322f80d4 --- /dev/null +++ b/engine/common/rpc/convert/events_test.go @@ -0,0 +1,235 @@ +package convert_test + +import ( + "testing" + + "github.com/stretchr/testify/require" + + "github.com/onflow/cadence" + "github.com/onflow/cadence/encoding/ccf" + jsoncdc "github.com/onflow/cadence/encoding/json" + "github.com/onflow/flow/protobuf/go/flow/entities" + + "github.com/onflow/flow-go/engine/common/rpc/convert" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/utils/unittest" +) + +// TestConvertEventWithoutPayloadConversion tests converting events to and from protobuf messages +// with no payload modification +func TestConvertEventWithoutPayloadConversion(t *testing.T) { + t.Parallel() + + cadenceValue := cadence.NewInt(2) + + t.Run("convert json cdc encoded event", func(t *testing.T) { + ccfPayload, err := ccf.Encode(cadenceValue) + require.NoError(t, err) + + event := unittest.EventFixture( + unittest.Event.WithEventType(flow.EventAccountCreated), + unittest.Event.WithPayload(ccfPayload), + ) + + msg := convert.EventToMessage(event) + converted, err := convert.MessageToEvent(msg) + require.NoError(t, err) + + require.Equal(t, event, *converted) + }) + + t.Run("convert json cdc encoded event", func(t *testing.T) { + jsonPayload, err := jsoncdc.Encode(cadenceValue) + require.NoError(t, err) + + event := unittest.EventFixture( + unittest.Event.WithEventType(flow.EventAccountCreated), + unittest.Event.WithPayload(jsonPayload), + ) + + msg := convert.EventToMessage(event) + converted, err := convert.MessageToEvent(msg) + require.NoError(t, err) + + require.Equal(t, event.Type, converted.Type) + }) +} + +// TestConvertEventWithPayloadConversion tests converting events to and from protobuf messages +// with payload modification +func TestConvertEventWithPayloadConversion(t *testing.T) { + t.Parallel() + + cadenceValue := cadence.NewInt(2) + + payload, err := ccf.Encode(cadenceValue) + require.NoError(t, err) + + ccfEvent := unittest.EventFixture( + unittest.Event.WithEventType(flow.EventAccountCreated), + unittest.Event.WithPayload(payload), + ) + + payload, err = jsoncdc.Encode(cadenceValue) + require.NoError(t, err) + + jsonEvent := unittest.EventFixture( + unittest.Event.WithEventType(flow.EventAccountCreated), + unittest.Event.WithPayload(payload), + ) + + t.Run("convert payload from ccf to jsoncdc", func(t *testing.T) { + message := convert.EventToMessage(ccfEvent) + convertedEvent, err := convert.MessageToEventFromVersion(message, entities.EventEncodingVersion_CCF_V0) + require.NoError(t, err) + + require.Equal(t, jsonEvent, *convertedEvent) + }) + + t.Run("convert payload from jsoncdc to jsoncdc", func(t *testing.T) { + message := convert.EventToMessage(jsonEvent) + convertedEvent, err := convert.MessageToEventFromVersion(message, entities.EventEncodingVersion_JSON_CDC_V0) + require.NoError(t, err) + + require.Equal(t, jsonEvent, *convertedEvent) + }) +} + +func TestConvertEvents(t *testing.T) { + t.Parallel() + + eventCount := 3 + + events := make([]flow.Event, eventCount) + ccfEvents := make([]flow.Event, eventCount) + jsonEvents := make([]flow.Event, eventCount) + for i := 0; i < eventCount; i++ { + cadenceValue := cadence.NewInt(i) + + ccfPayload, err := ccf.Encode(cadenceValue) + require.NoError(t, err) + + jsonPayload, err := jsoncdc.Encode(cadenceValue) + require.NoError(t, err) + + event := unittest.EventFixture() + ccfEvent := unittest.EventFixture( + unittest.Event.WithEventType(flow.EventAccountCreated), + unittest.Event.WithPayload(ccfPayload), + ) + jsonEvent := unittest.EventFixture( + unittest.Event.WithEventType(flow.EventAccountCreated), + unittest.Event.WithPayload(jsonPayload), + ) + + events[i] = event + ccfEvents[i] = ccfEvent + jsonEvents[i] = jsonEvent + } + + t.Run("empty", func(t *testing.T) { + messages := convert.EventsToMessages(nil) + require.Len(t, messages, 0) + }) + + t.Run("convert with passthrough payload conversion", func(t *testing.T) { + messages := convert.EventsToMessages(events) + require.Len(t, messages, len(events)) + + for i, message := range messages { + event := events[i] + require.Equal(t, event.EventIndex, message.EventIndex) + require.Equal(t, event.TransactionIndex, message.TransactionIndex) + require.Equal(t, event.Payload, message.Payload) + require.Equal(t, event.TransactionID[:], message.TransactionId) + require.Equal(t, string(event.Type), message.Type) + } + + converted, err := convert.MessagesToEvents(messages) + require.NoError(t, err) + + require.Equal(t, events, converted) + }) + + t.Run("convert event from ccf to jsoncdc", func(t *testing.T) { + messages := convert.EventsToMessages(ccfEvents) + converted, err := convert.MessagesToEventsWithEncodingConversion(messages, entities.EventEncodingVersion_CCF_V0, entities.EventEncodingVersion_JSON_CDC_V0) + require.NoError(t, err) + + require.Equal(t, jsonEvents, converted) + }) + + t.Run("convert event from jsoncdc", func(t *testing.T) { + messages := convert.EventsToMessages(jsonEvents) + converted, err := convert.MessagesToEventsWithEncodingConversion(messages, entities.EventEncodingVersion_JSON_CDC_V0, entities.EventEncodingVersion_JSON_CDC_V0) + require.NoError(t, err) + + require.Equal(t, jsonEvents, converted) + }) + + t.Run("convert event from ccf", func(t *testing.T) { + messages := convert.EventsToMessages(jsonEvents) + converted, err := convert.MessagesToEventsWithEncodingConversion(messages, entities.EventEncodingVersion_CCF_V0, entities.EventEncodingVersion_CCF_V0) + require.NoError(t, err) + + require.Equal(t, jsonEvents, converted) + }) + + t.Run("convert event from jsoncdc to ccf", func(t *testing.T) { + messages := convert.EventsToMessages(jsonEvents) + converted, err := convert.MessagesToEventsWithEncodingConversion(messages, entities.EventEncodingVersion_JSON_CDC_V0, entities.EventEncodingVersion_CCF_V0) + require.Error(t, err) + require.Nil(t, converted) + }) +} + +func TestConvertServiceEvent(t *testing.T) { + t.Parallel() + + serviceEvents := unittest.ServiceEventsFixture(1) + require.Len(t, serviceEvents, 1) + + msg, err := convert.ServiceEventToMessage(serviceEvents[0]) + require.NoError(t, err) + + converted, err := convert.MessageToServiceEvent(msg) + require.NoError(t, err) + + require.Equal(t, serviceEvents[0], *converted) +} + +func TestConvertServiceEventList(t *testing.T) { + t.Parallel() + + serviceEvents := unittest.ServiceEventsFixture(5) + require.Len(t, serviceEvents, 5) + + msg, err := convert.ServiceEventListToMessages(serviceEvents) + require.NoError(t, err) + + converted, err := convert.MessagesToServiceEventList(msg) + require.NoError(t, err) + + require.Equal(t, serviceEvents, converted) +} + +// TestConvertMessagesToBlockEvents tests that converting a protobuf EventsResponse_Result message to and from block events in the same +// block +func TestConvertMessagesToBlockEvents(t *testing.T) { + t.Parallel() + + count := 2 + blockEvents := make([]flow.BlockEvents, count) + for i := 0; i < count; i++ { + header := unittest.BlockHeaderFixture(unittest.WithHeaderHeight(uint64(i))) + blockEvents[i] = unittest.BlockEventsFixture(header, 2) + } + + msg, err := convert.BlockEventsToMessages(blockEvents) + require.NoError(t, err) + + converted, err := convert.MessagesToBlockEvents(msg) + require.NoError(t, err) + + require.Equal(t, blockEvents, converted) +} diff --git a/engine/common/rpc/convert/execution_data.go b/engine/common/rpc/convert/execution_data.go new file mode 100644 index 00000000000..aacf9be73f1 --- /dev/null +++ b/engine/common/rpc/convert/execution_data.go @@ -0,0 +1,383 @@ +package convert + +import ( + "fmt" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + + "github.com/onflow/flow/protobuf/go/flow/entities" + + "github.com/onflow/flow-go/ledger" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module/executiondatasync/execution_data" +) + +// BlockExecutionDataEventPayloadsToVersion converts all event payloads to version +func BlockExecutionDataEventPayloadsToVersion( + m *entities.BlockExecutionData, + to entities.EventEncodingVersion, +) error { + if to == entities.EventEncodingVersion_CCF_V0 { + return nil + } + + for i, chunk := range m.ChunkExecutionData { + for j, e := range chunk.Events { + converted, err := CcfPayloadToJsonPayload(e.Payload) + if err != nil { + return fmt.Errorf("failed to convert payload for event %d to json: %w", j, err) + } + m.ChunkExecutionData[i].Events[j].Payload = converted + } + } + return nil +} + +// BlockExecutionDataToMessage converts a BlockExecutionData to a protobuf message +func BlockExecutionDataToMessage(data *execution_data.BlockExecutionData) ( + *entities.BlockExecutionData, + error, +) { + chunkExecutionDatas := make([]*entities.ChunkExecutionData, len(data.ChunkExecutionDatas)) + for i, chunk := range data.ChunkExecutionDatas { + chunkMessage, err := ChunkExecutionDataToMessage(chunk) + if err != nil { + return nil, err + } + chunkExecutionDatas[i] = chunkMessage + } + return &entities.BlockExecutionData{ + BlockId: IdentifierToMessage(data.BlockID), + ChunkExecutionData: chunkExecutionDatas, + }, nil +} + +// MessageToBlockExecutionData converts a protobuf message to a BlockExecutionData +func MessageToBlockExecutionData( + m *entities.BlockExecutionData, + chain flow.Chain, +) (*execution_data.BlockExecutionData, error) { + if m == nil { + return nil, ErrEmptyMessage + } + chunks := make([]*execution_data.ChunkExecutionData, len(m.ChunkExecutionData)) + for i, chunk := range m.GetChunkExecutionData() { + convertedChunk, err := MessageToChunkExecutionData(chunk, chain) + if err != nil { + return nil, err + } + chunks[i] = convertedChunk + } + + return &execution_data.BlockExecutionData{ + BlockID: MessageToIdentifier(m.GetBlockId()), + ChunkExecutionDatas: chunks, + }, nil +} + +// ChunkExecutionDataToMessage converts a ChunkExecutionData to a protobuf message +func ChunkExecutionDataToMessage(data *execution_data.ChunkExecutionData) ( + *entities.ChunkExecutionData, + error, +) { + collection := &entities.ExecutionDataCollection{} + if data.Collection != nil { + collection = &entities.ExecutionDataCollection{ + Transactions: TransactionsToMessages(data.Collection.Transactions), + } + } + + events := EventsToMessages(data.Events) + if len(events) == 0 { + events = nil + } + + trieUpdate, err := TrieUpdateToMessage(data.TrieUpdate) + if err != nil { + return nil, err + } + + var results []*entities.ExecutionDataTransactionResult + if len(data.TransactionResults) > 0 { + results = make([]*entities.ExecutionDataTransactionResult, len(data.TransactionResults)) + for i, result := range data.TransactionResults { + results[i] = &entities.ExecutionDataTransactionResult{ + TransactionId: IdentifierToMessage(result.TransactionID), + Failed: result.Failed, + ComputationUsed: result.ComputationUsed, + } + } + } + + return &entities.ChunkExecutionData{ + Collection: collection, + Events: events, + TrieUpdate: trieUpdate, + TransactionResults: results, + }, nil +} + +// MessageToChunkExecutionData converts a protobuf message to a ChunkExecutionData +func MessageToChunkExecutionData( + m *entities.ChunkExecutionData, + chain flow.Chain, +) (*execution_data.ChunkExecutionData, error) { + collection, err := messageToTrustedCollection(m.GetCollection(), chain) + if err != nil { + return nil, err + } + + var trieUpdate *ledger.TrieUpdate + if m.GetTrieUpdate() != nil { + trieUpdate, err = MessageToTrieUpdate(m.GetTrieUpdate()) + if err != nil { + return nil, err + } + } + + events, err := MessagesToEvents(m.GetEvents()) + if err != nil { + return nil, err + } + if len(events) == 0 { + events = nil + } + + var results []flow.LightTransactionResult + if len(m.GetTransactionResults()) > 0 { + results = make([]flow.LightTransactionResult, len(m.GetTransactionResults())) + for i, result := range m.GetTransactionResults() { + results[i] = flow.LightTransactionResult{ + TransactionID: MessageToIdentifier(result.GetTransactionId()), + Failed: result.GetFailed(), + ComputationUsed: result.GetComputationUsed(), + } + } + } + + return &execution_data.ChunkExecutionData{ + Collection: collection, + Events: events, + TrieUpdate: trieUpdate, + TransactionResults: results, + }, nil +} + +// MessageToTrieUpdate converts a protobuf message to a TrieUpdate +func MessageToTrieUpdate(m *entities.TrieUpdate) (*ledger.TrieUpdate, error) { + rootHash, err := ledger.ToRootHash(m.GetRootHash()) + if err != nil { + return nil, fmt.Errorf("could not convert root hash: %w", err) + } + + paths := make([]ledger.Path, len(m.GetPaths())) + for i, path := range m.GetPaths() { + convertedPath, err := ledger.ToPath(path) + if err != nil { + return nil, fmt.Errorf("could not convert path %d: %w", i, err) + } + paths[i] = convertedPath + } + + payloads := make([]*ledger.Payload, len(m.Payloads)) + for i, payload := range m.GetPayloads() { + keyParts := make([]ledger.KeyPart, len(payload.GetKeyPart())) + for j, keypart := range payload.GetKeyPart() { + keyParts[j] = ledger.NewKeyPart(uint16(keypart.GetType()), keypart.GetValue()) + } + payloads[i] = ledger.NewPayload(ledger.NewKey(keyParts), payload.GetValue()) + } + + return &ledger.TrieUpdate{ + RootHash: rootHash, + Paths: paths, + Payloads: payloads, + }, nil +} + +// TrieUpdateToMessage converts a TrieUpdate to a protobuf message +func TrieUpdateToMessage(t *ledger.TrieUpdate) (*entities.TrieUpdate, error) { + if t == nil { + return nil, nil + } + + paths := make([][]byte, len(t.Paths)) + for i := range t.Paths { + paths[i] = t.Paths[i][:] + } + + payloads := make([]*entities.Payload, len(t.Payloads)) + for i, payload := range t.Payloads { + key, err := payload.Key() + if err != nil { + return nil, fmt.Errorf("could not convert payload %d: %w", i, err) + } + keyParts := make([]*entities.KeyPart, len(key.KeyParts)) + for j, keyPart := range key.KeyParts { + keyParts[j] = &entities.KeyPart{ + Type: uint32(keyPart.Type), + Value: keyPart.Value, + } + } + payloads[i] = &entities.Payload{ + KeyPart: keyParts, + Value: payload.Value(), + } + } + + return &entities.TrieUpdate{ + RootHash: t.RootHash[:], + Paths: paths, + Payloads: payloads, + }, nil +} + +// messageToTrustedCollection converts a protobuf message to a collection using the +// messageToTrustedTransaction converter to support service transactions. +func messageToTrustedCollection( + m *entities.ExecutionDataCollection, + chain flow.Chain, +) (*flow.Collection, error) { + messages := m.GetTransactions() + if len(messages) == 0 { + return flow.NewEmptyCollection(), nil + } + + transactions := make([]*flow.TransactionBody, len(messages)) + for i, message := range messages { + transaction, err := messageToTrustedTransaction(message, chain) + if err != nil { + return nil, fmt.Errorf("could not convert transaction %d: %w", i, err) + } + transactions[i] = &transaction + } + + collection, err := flow.NewCollection(flow.UntrustedCollection{Transactions: transactions}) + if err != nil { + return nil, fmt.Errorf("could not construct collection: %w", err) + } + + return collection, nil +} + +// messageToTrustedTransaction converts a transaction message to a transaction body. +// This is useful when converting transactions from trusted state like BlockExecutionData which +// contain service transactions that do not conform to external transaction format. +func messageToTrustedTransaction( + m *entities.Transaction, + chain flow.Chain, +) (flow.TransactionBody, error) { + var t flow.TransactionBody + if m == nil { + return t, ErrEmptyMessage + } + + tb := flow.NewTransactionBodyBuilder() + + proposalKey := m.GetProposalKey() + if proposalKey != nil { + proposalAddress, err := insecureAddress(proposalKey.GetAddress()) + if err != nil { + return t, fmt.Errorf("could not convert proposer address: %w", err) + } + tb.SetProposalKey(proposalAddress, proposalKey.GetKeyId(), proposalKey.GetSequenceNumber()) + } + + payer := m.GetPayer() + if payer != nil { + payerAddress, err := insecureAddress(payer) + if err != nil { + return t, fmt.Errorf("could not convert payer address: %w", err) + } + tb.SetPayer(payerAddress) + } + + for _, authorizer := range m.GetAuthorizers() { + authorizerAddress, err := Address(authorizer, chain) + if err != nil { + return t, fmt.Errorf("could not convert authorizer address: %w", err) + } + tb.AddAuthorizer(authorizerAddress) + } + + for _, sig := range m.GetPayloadSignatures() { + addr, err := Address(sig.GetAddress(), chain) + if err != nil { + return t, fmt.Errorf("could not convert payload signature address: %w", err) + } + tb.AddPayloadSignatureWithExtensionData(addr, sig.GetKeyId(), sig.GetSignature(), sig.GetExtensionData()) + } + + for _, sig := range m.GetEnvelopeSignatures() { + addr, err := Address(sig.GetAddress(), chain) + if err != nil { + return t, fmt.Errorf("could not convert envelope signature address: %w", err) + } + tb.AddEnvelopeSignatureWithExtensionData(addr, sig.GetKeyId(), sig.GetSignature(), sig.GetExtensionData()) + } + + transactionBody, err := tb.SetScript(m.GetScript()). + SetArguments(m.GetArguments()). + SetReferenceBlockID(flow.HashToID(m.GetReferenceBlockId())). + SetComputeLimit(m.GetGasLimit()). + Build() + if err != nil { + return t, fmt.Errorf("could not build transaction body: %w", err) + } + + return *transactionBody, nil +} + +func MessageToRegisterID(m *entities.RegisterID, chain flow.Chain) (flow.RegisterID, error) { + if m == nil { + return flow.RegisterID{}, ErrEmptyMessage + } + + owner := flow.EmptyAddress + if len(m.GetOwner()) > 0 { + var err error + owner, err = Address(m.GetOwner(), chain) + if err != nil { + return flow.RegisterID{}, fmt.Errorf("could not convert owner address: %w", err) + } + } + + key := string(m.GetKey()) + + return flow.NewRegisterID(owner, key), nil +} + +// MessagesToRegisterIDs converts a protobuf message to RegisterIDs +func MessagesToRegisterIDs(m []*entities.RegisterID, chain flow.Chain) (flow.RegisterIDs, error) { + if m == nil { + return nil, ErrEmptyMessage + } + result := make(flow.RegisterIDs, len(m)) + for i, entry := range m { + regID, err := MessageToRegisterID(entry, chain) + if err != nil { + return nil, fmt.Errorf("failed to convert register id %d: %w", i, err) + } + result[i] = regID + } + return result, nil +} + +func RegisterIDToMessage(id flow.RegisterID) *entities.RegisterID { + return &entities.RegisterID{ + Owner: []byte(id.Owner), + Key: []byte(id.Key), + } +} + +// insecureAddress converts a raw address to a flow.Address, skipping validation +// This is useful when converting transactions from trusted state like BlockExecutionData. +// This should only be used for trusted inputs +func insecureAddress(rawAddress []byte) (flow.Address, error) { + if len(rawAddress) == 0 { + return flow.EmptyAddress, status.Error(codes.InvalidArgument, "address cannot be empty") + } + + return flow.BytesToAddress(rawAddress), nil +} diff --git a/engine/common/rpc/convert/execution_data_test.go b/engine/common/rpc/convert/execution_data_test.go new file mode 100644 index 00000000000..a046b3579d2 --- /dev/null +++ b/engine/common/rpc/convert/execution_data_test.go @@ -0,0 +1,258 @@ +package convert_test + +import ( + "testing" + + "github.com/stretchr/testify/require" + + "github.com/onflow/cadence/encoding/ccf" + jsoncdc "github.com/onflow/cadence/encoding/json" + "github.com/onflow/flow/protobuf/go/flow/entities" + + "github.com/onflow/flow-go/engine/common/rpc/convert" + "github.com/onflow/flow-go/ledger/common/testutils" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module/executiondatasync/execution_data" + "github.com/onflow/flow-go/utils/unittest" +) + +func TestConvertBlockExecutionDataEventPayloads(t *testing.T) { + // generators will produce identical event payloads (before encoding) + ccfEvents := unittest.EventGenerator.GetEventsWithEncoding(3, entities.EventEncodingVersion_CCF_V0) + jsonEvents := make([]flow.Event, len(ccfEvents)) + for i, e := range ccfEvents { + jsonEvent, err := convert.CcfEventToJsonEvent(e) + require.NoError(t, err) + jsonEvents[i] = *jsonEvent + } + + // generate BlockExecutionData with CCF encoded events + executionData := unittest.BlockExecutionDataFixture( + unittest.WithChunkExecutionDatas( + unittest.ChunkExecutionDataFixture(t, 1024, unittest.WithChunkEvents(ccfEvents)), + unittest.ChunkExecutionDataFixture(t, 1024, unittest.WithChunkEvents(ccfEvents)), + ), + ) + + execDataMessage, err := convert.BlockExecutionDataToMessage(executionData) + require.NoError(t, err) + + t.Run("regular convert does not modify payload encoding", func(t *testing.T) { + for _, chunk := range execDataMessage.GetChunkExecutionData() { + events, err := convert.MessagesToEvents(chunk.Events) + require.NoError(t, err) + + for i, e := range events { + require.Equal(t, ccfEvents[i], e) + + _, err := ccf.Decode(nil, e.Payload) + require.NoError(t, err) + } + } + }) + + t.Run("converted event payloads are encoded in jsoncdc", func(t *testing.T) { + err = convert.BlockExecutionDataEventPayloadsToVersion(execDataMessage, entities.EventEncodingVersion_JSON_CDC_V0) + require.NoError(t, err) + + for _, chunk := range execDataMessage.GetChunkExecutionData() { + events, err := convert.MessagesToEvents(chunk.Events) + require.NoError(t, err) + + for i, e := range events { + require.Equal(t, jsonEvents[i], e) + + _, err := jsoncdc.Decode(nil, e.Payload) + require.NoError(t, err) + } + } + }) +} + +func TestConvertBlockExecutionData(t *testing.T) { + t.Parallel() + + chain := flow.Testnet.Chain() // this is used by the AddressFixture + events := unittest.EventsFixture(5) + + chunks := 5 + chunkData := make([]*execution_data.ChunkExecutionData, 0, chunks) + for i := 0; i < chunks-1; i++ { + ced := unittest.ChunkExecutionDataFixture(t, + 0, // updates set explicitly to target 160-320KB per chunk + unittest.WithChunkEvents(events), + unittest.WithTrieUpdate(testutils.TrieUpdateFixture(5, 32*1024, 64*1024)), + ) + + chunkData = append(chunkData, ced) + } + makeServiceTx := func(ced *execution_data.ChunkExecutionData) { + // proposal key and payer are empty addresses for service tx + collection := unittest.CollectionFixture(1) + collection.Transactions[0].ProposalKey.Address = flow.EmptyAddress + collection.Transactions[0].Payer = flow.EmptyAddress + ced.Collection = &collection + + // the service chunk sometimes does not have any trie updates + ced.TrieUpdate = nil + } + chunk := unittest.ChunkExecutionDataFixture(t, execution_data.DefaultMaxBlobSize/5, unittest.WithChunkEvents(events), makeServiceTx) + chunkData = append(chunkData, chunk) + + blockData := unittest.BlockExecutionDataFixture(unittest.WithChunkExecutionDatas(chunkData...)) + + msg, err := convert.BlockExecutionDataToMessage(blockData) + require.NoError(t, err) + + converted, err := convert.MessageToBlockExecutionData(msg, chain) + require.NoError(t, err) + + require.Equal(t, blockData, converted) + for i, chunk := range blockData.ChunkExecutionDatas { + if chunk.TrieUpdate == nil { + require.Nil(t, converted.ChunkExecutionDatas[i].TrieUpdate) + } else { + require.True(t, chunk.TrieUpdate.Equals(converted.ChunkExecutionDatas[i].TrieUpdate)) + } + } +} + +func TestConvertChunkExecutionData(t *testing.T) { + tests := []struct { + name string + fn func(*testing.T) *execution_data.ChunkExecutionData + }{ + { + name: "chunk execution data conversions", + fn: func(t *testing.T) *execution_data.ChunkExecutionData { + return unittest.ChunkExecutionDataFixture(t, + 0, // updates set explicitly to target 160-320KB per chunk + unittest.WithChunkEvents(unittest.EventsFixture(5)), + unittest.WithTrieUpdate(testutils.TrieUpdateFixture(5, 32*1024, 64*1024)), + ) + }, + }, + { + name: "chunk execution data conversions - no events", + fn: func(t *testing.T) *execution_data.ChunkExecutionData { + ced := unittest.ChunkExecutionDataFixture(t, 0) + ced.Events = nil + return ced + }, + }, + { + name: "chunk execution data conversions - no trie update", + fn: func(t *testing.T) *execution_data.ChunkExecutionData { + ced := unittest.ChunkExecutionDataFixture(t, 0) + ced.TrieUpdate = nil + return ced + }, + }, + { + name: "chunk execution data conversions - empty collection", + fn: func(t *testing.T) *execution_data.ChunkExecutionData { + ced := unittest.ChunkExecutionDataFixture(t, 0) + ced.Collection = flow.NewEmptyCollection() + ced.TransactionResults = nil + return ced + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + ced := test.fn(t) + + chunkMsg, err := convert.ChunkExecutionDataToMessage(ced) + require.NoError(t, err) + + chunkReConverted, err := convert.MessageToChunkExecutionData(chunkMsg, flow.Testnet.Chain()) + require.NoError(t, err) + + require.Equal(t, ced, chunkReConverted) + if ced.TrieUpdate == nil { + require.Nil(t, chunkReConverted.TrieUpdate) + } else { + require.True(t, ced.TrieUpdate.Equals(chunkReConverted.TrieUpdate)) + } + }) + } +} + +func TestMessageToRegisterID(t *testing.T) { + chain := flow.Testnet.Chain() + tests := []struct { + name string + regID flow.RegisterID + }{ + { + name: "service level register id", + regID: flow.UUIDRegisterID(0), + }, + { + name: "account level register id", + regID: flow.AccountStatusRegisterID(unittest.AddressFixture()), + }, + { + name: "regular register id", + regID: unittest.RegisterIDFixture(), + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + msg := convert.RegisterIDToMessage(test.regID) + converted, err := convert.MessageToRegisterID(msg, chain) + require.NoError(t, err) + require.Equal(t, test.regID, converted) + }) + } + + t.Run("nil owner converts to empty string", func(t *testing.T) { + msg := &entities.RegisterID{ + Owner: nil, + Key: []byte("key"), + } + converted, err := convert.MessageToRegisterID(msg, chain) + require.NoError(t, err) + require.Equal(t, "", converted.Owner) + require.Equal(t, "key", converted.Key) + }) + + t.Run("nil message returns error", func(t *testing.T) { + _, err := convert.MessageToRegisterID(nil, chain) + require.ErrorIs(t, err, convert.ErrEmptyMessage) + }) + + t.Run("invalid address returns error", func(t *testing.T) { + // addresses for other chains are invalid + registerID := flow.NewRegisterID( + unittest.RandomAddressFixtureForChain(flow.Mainnet), + "key", + ) + + msg := convert.RegisterIDToMessage(registerID) + _, err := convert.MessageToRegisterID(msg, chain) + require.Error(t, err) + }) + + t.Run("multiple registerIDs", func(t *testing.T) { + expected := flow.RegisterIDs{ + flow.UUIDRegisterID(0), + flow.AccountStatusRegisterID(unittest.AddressFixture()), + unittest.RegisterIDFixture(), + } + + messages := make([]*entities.RegisterID, len(expected)) + for i, regID := range expected { + regID := regID + messages[i] = convert.RegisterIDToMessage(regID) + require.Equal(t, regID.Owner, string(messages[i].Owner)) + require.Equal(t, regID.Key, string(messages[i].Key)) + } + + actual, err := convert.MessagesToRegisterIDs(messages, chain) + require.NoError(t, err) + require.Equal(t, expected, actual) + }) +} diff --git a/engine/common/rpc/convert/execution_results.go b/engine/common/rpc/convert/execution_results.go new file mode 100644 index 00000000000..9dc896ea7ca --- /dev/null +++ b/engine/common/rpc/convert/execution_results.go @@ -0,0 +1,203 @@ +package convert + +import ( + "fmt" + + "github.com/onflow/flow/protobuf/go/flow/entities" + + "github.com/onflow/flow-go/model/flow" +) + +// ExecutionResultToMessage converts an execution result to a protobuf message +func ExecutionResultToMessage(er *flow.ExecutionResult) ( + *entities.ExecutionResult, + error, +) { + chunks := make([]*entities.Chunk, len(er.Chunks)) + + for i, chunk := range er.Chunks { + chunks[i] = ChunkToMessage(chunk) + } + + serviceEvents := make([]*entities.ServiceEvent, len(er.ServiceEvents)) + var err error + for i, serviceEvent := range er.ServiceEvents { + serviceEvents[i], err = ServiceEventToMessage(serviceEvent) + if err != nil { + return nil, fmt.Errorf("error while convering service event %d: %w", i, err) + } + } + + return &entities.ExecutionResult{ + PreviousResultId: IdentifierToMessage(er.PreviousResultID), + BlockId: IdentifierToMessage(er.BlockID), + Chunks: chunks, + ServiceEvents: serviceEvents, + ExecutionDataId: IdentifierToMessage(er.ExecutionDataID), + }, nil +} + +// MessageToExecutionResult converts a protobuf message to an execution result +func MessageToExecutionResult(m *entities.ExecutionResult) ( + *flow.ExecutionResult, + error, +) { + // convert Chunks + parsedChunks, err := MessagesToChunkList(m.Chunks) + if err != nil { + return nil, fmt.Errorf("failed to parse messages to ChunkList: %w", err) + } + // convert ServiceEvents + parsedServiceEvents, err := MessagesToServiceEventList(m.ServiceEvents) + if err != nil { + return nil, err + } + + executionResult, err := flow.NewExecutionResult(flow.UntrustedExecutionResult{ + PreviousResultID: MessageToIdentifier(m.PreviousResultId), + BlockID: MessageToIdentifier(m.BlockId), + Chunks: parsedChunks, + ServiceEvents: parsedServiceEvents, + ExecutionDataID: MessageToIdentifier(m.ExecutionDataId), + }) + if err != nil { + return nil, fmt.Errorf("could not build execution result: %w", err) + } + + return executionResult, nil +} + +// ExecutionResultsToMessages converts a slice of execution results to a slice of protobuf messages +func ExecutionResultsToMessages(e []*flow.ExecutionResult) ( + []*entities.ExecutionResult, + error, +) { + execResults := make([]*entities.ExecutionResult, len(e)) + for i, execRes := range e { + parsedExecResult, err := ExecutionResultToMessage(execRes) + if err != nil { + return nil, err + } + execResults[i] = parsedExecResult + } + return execResults, nil +} + +// MessagesToExecutionResults converts a slice of protobuf messages to a slice of execution results +func MessagesToExecutionResults(m []*entities.ExecutionResult) ( + []*flow.ExecutionResult, + error, +) { + execResults := make([]*flow.ExecutionResult, len(m)) + for i, e := range m { + parsedExecResult, err := MessageToExecutionResult(e) + if err != nil { + return nil, fmt.Errorf("failed to convert message at index %d to execution result: %w", i, err) + } + execResults[i] = parsedExecResult + } + return execResults, nil +} + +// ExecutionResultMetaListToMessages converts an execution result meta list to a slice of protobuf messages +func ExecutionResultMetaListToMessages(e flow.ExecutionReceiptStubList) []*entities.ExecutionReceiptMeta { + messageList := make([]*entities.ExecutionReceiptMeta, len(e)) + for i, execMeta := range e { + messageList[i] = &entities.ExecutionReceiptMeta{ + ExecutorId: IdentifierToMessage(execMeta.ExecutorID), + ResultId: IdentifierToMessage(execMeta.ResultID), + Spocks: SignaturesToMessages(execMeta.Spocks), + ExecutorSignature: MessageToSignature(execMeta.ExecutorSignature), + } + } + return messageList +} + +// MessagesToExecutionResultMetaList converts a slice of protobuf messages to an execution result meta list. +// All errors indicate the input cannot be converted to a valid [flow.ExecutionReceiptStubList]. +func MessagesToExecutionResultMetaList(m []*entities.ExecutionReceiptMeta) (flow.ExecutionReceiptStubList, error) { + execMetaList := make([]*flow.ExecutionReceiptStub, len(m)) + for i, message := range m { + unsignedExecutionReceiptStub, err := flow.NewUnsignedExecutionReceiptStub( + flow.UntrustedUnsignedExecutionReceiptStub{ + ExecutorID: MessageToIdentifier(message.ExecutorId), + ResultID: MessageToIdentifier(message.ResultId), + Spocks: MessagesToSignatures(message.Spocks), + }, + ) + if err != nil { + return nil, fmt.Errorf("could not construct unsigned execution receipt stub at index: %d: %w", i, err) + } + + execMetaList[i], err = flow.NewExecutionReceiptStub( + flow.UntrustedExecutionReceiptStub{ + UnsignedExecutionReceiptStub: *unsignedExecutionReceiptStub, + ExecutorSignature: MessageToSignature(message.ExecutorSignature), + }, + ) + if err != nil { + return nil, fmt.Errorf("could not construct execution receipt stub at index: %d: %w", i, err) + } + } + + return execMetaList[:], nil +} + +// ChunkToMessage converts a chunk to a protobuf message +func ChunkToMessage(chunk *flow.Chunk) *entities.Chunk { + return &entities.Chunk{ + CollectionIndex: uint32(chunk.CollectionIndex), + StartState: StateCommitmentToMessage(chunk.StartState), + EventCollection: IdentifierToMessage(chunk.EventCollection), + BlockId: IdentifierToMessage(chunk.BlockID), + TotalComputationUsed: chunk.TotalComputationUsed, + NumberOfTransactions: uint32(chunk.NumberOfTransactions), + Index: chunk.Index, + EndState: StateCommitmentToMessage(chunk.EndState), + ServiceEventCount: uint32(chunk.ServiceEventCount), + } +} + +// MessageToChunk converts a protobuf message to a chunk +func MessageToChunk(m *entities.Chunk) (*flow.Chunk, error) { + startState, err := flow.ToStateCommitment(m.StartState) + if err != nil { + return nil, fmt.Errorf("failed to parse Message start state to Chunk: %w", err) + } + endState, err := flow.ToStateCommitment(m.EndState) + if err != nil { + return nil, fmt.Errorf("failed to parse Message end state to Chunk: %w", err) + } + + chunk, err := flow.NewChunk(flow.UntrustedChunk{ + ChunkBody: flow.ChunkBody{ + CollectionIndex: uint(m.CollectionIndex), + StartState: startState, + EventCollection: MessageToIdentifier(m.EventCollection), + ServiceEventCount: uint16(m.ServiceEventCount), + BlockID: MessageToIdentifier(m.BlockId), + TotalComputationUsed: m.TotalComputationUsed, + NumberOfTransactions: uint64(m.NumberOfTransactions), + }, + Index: m.Index, + EndState: endState, + }) + if err != nil { + return nil, fmt.Errorf("could not build chunk: %w", err) + } + + return chunk, nil +} + +// MessagesToChunkList converts a slice of protobuf messages to a chunk list +func MessagesToChunkList(m []*entities.Chunk) (flow.ChunkList, error) { + parsedChunks := make(flow.ChunkList, len(m)) + for i, chunk := range m { + parsedChunk, err := MessageToChunk(chunk) + if err != nil { + return nil, fmt.Errorf("failed to parse message at index %d to chunk: %w", i, err) + } + parsedChunks[i] = parsedChunk + } + return parsedChunks, nil +} diff --git a/engine/common/rpc/convert/execution_results_test.go b/engine/common/rpc/convert/execution_results_test.go new file mode 100644 index 00000000000..6a98f61d222 --- /dev/null +++ b/engine/common/rpc/convert/execution_results_test.go @@ -0,0 +1,57 @@ +package convert_test + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/engine/common/rpc/convert" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/utils/unittest" +) + +func TestConvertExecutionResult(t *testing.T) { + t.Parallel() + + er := unittest.ExecutionResultFixture(unittest.WithServiceEvents(3)) + + msg, err := convert.ExecutionResultToMessage(er) + require.NoError(t, err) + + converted, err := convert.MessageToExecutionResult(msg) + require.NoError(t, err) + + assert.Equal(t, er, converted) +} + +func TestConvertExecutionResults(t *testing.T) { + t.Parallel() + + results := []*flow.ExecutionResult{ + unittest.ExecutionResultFixture(unittest.WithServiceEvents(3)), + unittest.ExecutionResultFixture(unittest.WithServiceEvents(3)), + unittest.ExecutionResultFixture(unittest.WithServiceEvents(3)), + } + + msg, err := convert.ExecutionResultsToMessages(results) + require.NoError(t, err) + + converted, err := convert.MessagesToExecutionResults(msg) + require.NoError(t, err) + + assert.Equal(t, results, converted) +} + +func TestConvertExecutionResultMetaList(t *testing.T) { + t.Parallel() + + block := unittest.FullBlockFixture() + metaList := block.Payload.Receipts + + msg := convert.ExecutionResultMetaListToMessages(metaList) + converted, err := convert.MessagesToExecutionResultMetaList(msg) + require.NoError(t, err) + + assert.Equal(t, metaList, converted) +} diff --git a/engine/common/rpc/convert/headers.go b/engine/common/rpc/convert/headers.go new file mode 100644 index 00000000000..bae4bc56107 --- /dev/null +++ b/engine/common/rpc/convert/headers.go @@ -0,0 +1,151 @@ +package convert + +import ( + "fmt" + + "github.com/onflow/flow/protobuf/go/flow/entities" + + "github.com/onflow/flow-go/model/flow" +) + +// BlockHeaderToMessage converts a flow.Header to a protobuf message +func BlockHeaderToMessage( + h *flow.Header, + signerIDs flow.IdentifierList, +) (*entities.BlockHeader, error) { + id := h.ID() + + var lastViewTC *entities.TimeoutCertificate + if h.LastViewTC != nil { + newestQC := h.LastViewTC.NewestQC + lastViewTC = &entities.TimeoutCertificate{ + View: h.LastViewTC.View, + HighQcViews: h.LastViewTC.NewestQCViews, + SignerIndices: h.LastViewTC.SignerIndices, + SigData: h.LastViewTC.SigData, + HighestQc: &entities.QuorumCertificate{ + View: newestQC.View, + BlockId: newestQC.BlockID[:], + SignerIndices: newestQC.SignerIndices, + SigData: newestQC.SigData, + }, + } + } + parentVoterIds := IdentifiersToMessages(signerIDs) + + return &entities.BlockHeader{ + Id: id[:], + ParentId: h.ParentID[:], + Height: h.Height, + PayloadHash: h.PayloadHash[:], + Timestamp: BlockTimestamp2ProtobufTime(h.Timestamp), + View: h.View, + ParentView: h.ParentView, + ParentVoterIndices: h.ParentVoterIndices, + ParentVoterIds: parentVoterIds, + ParentVoterSigData: h.ParentVoterSigData, + ProposerId: h.ProposerID[:], + ChainId: h.ChainID.String(), + LastViewTc: lastViewTC, + }, nil +} + +// MessageToBlockHeader converts a protobuf message to a flow.Header +func MessageToBlockHeader(m *entities.BlockHeader) (*flow.Header, error) { + chainId, err := MessageToChainId(m.ChainId) + if err != nil { + return nil, fmt.Errorf("failed to convert ChainId: %w", err) + } + var lastViewTC *flow.TimeoutCertificate + if m.LastViewTc != nil { + newestQC := m.LastViewTc.HighestQc + if newestQC == nil { + return nil, fmt.Errorf("invalid structure newest QC should be present") + } + + qc, err := flow.NewQuorumCertificate(flow.UntrustedQuorumCertificate{ + View: newestQC.View, + BlockID: MessageToIdentifier(newestQC.BlockId), + SignerIndices: newestQC.SignerIndices, + SigData: newestQC.SigData, + }) + if err != nil { + return nil, fmt.Errorf("could not build quorum certificate: %w", err) + } + + tc, err := flow.NewTimeoutCertificate( + flow.UntrustedTimeoutCertificate{ + View: m.LastViewTc.View, + NewestQCViews: m.LastViewTc.HighQcViews, + NewestQC: qc, + SignerIndices: m.LastViewTc.SignerIndices, + SigData: m.LastViewTc.SigData, + }, + ) + if err != nil { + return nil, fmt.Errorf("could not construct timeout certificate: %w", err) + } + lastViewTC = tc + } + + if IsRootBlockHeader(m) { + rootHeaderBody, err := flow.NewRootHeaderBody(flow.UntrustedHeaderBody{ + ParentID: MessageToIdentifier(m.ParentId), + Height: m.Height, + Timestamp: uint64(m.Timestamp.AsTime().UnixMilli()), + View: m.View, + ParentView: m.ParentView, + ParentVoterIndices: m.ParentVoterIndices, + ParentVoterSigData: m.ParentVoterSigData, + ProposerID: MessageToIdentifier(m.ProposerId), + ChainID: *chainId, + LastViewTC: lastViewTC, + }) + if err != nil { + return nil, fmt.Errorf("failed to create root header body: %w", err) + } + + rootHeader, err := flow.NewRootHeader(flow.UntrustedHeader{ + HeaderBody: *rootHeaderBody, + PayloadHash: MessageToIdentifier(m.PayloadHash), + }) + if err != nil { + return nil, fmt.Errorf("failed to create root header: %w", err) + } + + return rootHeader, nil + } + + headerBody, err := flow.NewHeaderBody(flow.UntrustedHeaderBody{ + ParentID: MessageToIdentifier(m.ParentId), + Height: m.Height, + Timestamp: uint64(m.Timestamp.AsTime().UnixMilli()), + View: m.View, + ParentView: m.ParentView, + ParentVoterIndices: m.ParentVoterIndices, + ParentVoterSigData: m.ParentVoterSigData, + ProposerID: MessageToIdentifier(m.ProposerId), + ChainID: *chainId, + LastViewTC: lastViewTC, + }) + if err != nil { + return nil, fmt.Errorf("could not build header body: %w", err) + } + header, err := flow.NewHeader(flow.UntrustedHeader{ + HeaderBody: *headerBody, + PayloadHash: MessageToIdentifier(m.PayloadHash), + }) + if err != nil { + return nil, fmt.Errorf("could not build header: %w", err) + } + + return header, nil +} + +// IsRootBlockHeader reports whether this is a root block header. +// It returns true only if all of the fields required to build a root Header are zero/nil. +func IsRootBlockHeader(m *entities.BlockHeader) bool { + return m.ParentVoterIndices == nil && + m.ParentVoterSigData == nil && + MessageToIdentifier(m.ProposerId) == flow.ZeroID +} diff --git a/engine/common/rpc/convert/headers_test.go b/engine/common/rpc/convert/headers_test.go new file mode 100644 index 00000000000..5f56c52fb06 --- /dev/null +++ b/engine/common/rpc/convert/headers_test.go @@ -0,0 +1,46 @@ +package convert_test + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/engine/common/rpc/convert" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/utils/unittest" +) + +// TestConvertBlockHeader tests that converting a header to and from a protobuf message results in the same +// header +func TestConvertBlockHeader(t *testing.T) { + t.Parallel() + + header := unittest.BlockHeaderFixture() + + signerIDs := unittest.IdentifierListFixture(5) + + msg, err := convert.BlockHeaderToMessage(header, signerIDs) + require.NoError(t, err) + + converted, err := convert.MessageToBlockHeader(msg) + require.NoError(t, err) + + assert.Equal(t, header, converted) +} + +// TestConvertRootBlockHeader tests that converting a root block header to and from a protobuf message +// results in the same block +func TestConvertRootBlockHeader(t *testing.T) { + t.Parallel() + + header := unittest.Block.Genesis(flow.Emulator).ToHeader() + + msg, err := convert.BlockHeaderToMessage(header, flow.IdentifierList{}) + require.NoError(t, err) + + converted, err := convert.MessageToBlockHeader(msg) + require.NoError(t, err) + + assert.Equal(t, header.ID(), converted.ID()) +} diff --git a/engine/common/rpc/convert/snapshots.go b/engine/common/rpc/convert/snapshots.go new file mode 100644 index 00000000000..963f95dbd09 --- /dev/null +++ b/engine/common/rpc/convert/snapshots.go @@ -0,0 +1,35 @@ +package convert + +import ( + "encoding/json" + "fmt" + + "github.com/onflow/flow-go/state/protocol" + "github.com/onflow/flow-go/state/protocol/inmem" +) + +// SnapshotToBytes converts a `protocol.Snapshot` to bytes, encoded as JSON +func SnapshotToBytes(snapshot protocol.Snapshot) ([]byte, error) { + serializable, err := inmem.FromSnapshot(snapshot) + if err != nil { + return nil, err + } + + data, err := json.Marshal(serializable.Encodable()) + if err != nil { + return nil, err + } + + return data, nil +} + +// BytesToInmemSnapshot converts an array of bytes to `inmem.Snapshot` +func BytesToInmemSnapshot(bytes []byte) (*inmem.Snapshot, error) { + var encodable inmem.EncodableSnapshot + err := json.Unmarshal(bytes, &encodable) + if err != nil { + return nil, fmt.Errorf("could not unmarshal decoded snapshot: %w", err) + } + + return inmem.SnapshotFromEncodable(encodable), nil +} diff --git a/engine/common/rpc/convert/snapshots_test.go b/engine/common/rpc/convert/snapshots_test.go new file mode 100644 index 00000000000..2e1d4ce91e1 --- /dev/null +++ b/engine/common/rpc/convert/snapshots_test.go @@ -0,0 +1,27 @@ +package convert_test + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/engine/common/rpc/convert" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/utils/unittest" +) + +func TestConvertSnapshot(t *testing.T) { + t.Parallel() + + identities := unittest.CompleteIdentitySet() + snapshot := unittest.RootSnapshotFixtureWithChainID(identities, flow.Testnet.Chain().ChainID()) + + msg, err := convert.SnapshotToBytes(snapshot) + require.NoError(t, err) + + converted, err := convert.BytesToInmemSnapshot(msg) + require.NoError(t, err) + + assert.Equal(t, snapshot, converted) +} diff --git a/engine/common/rpc/convert/transaction_result.go b/engine/common/rpc/convert/transaction_result.go new file mode 100644 index 00000000000..f5e21ce1c2d --- /dev/null +++ b/engine/common/rpc/convert/transaction_result.go @@ -0,0 +1,71 @@ +package convert + +import ( + "fmt" + + "github.com/onflow/flow/protobuf/go/flow/access" + "github.com/onflow/flow/protobuf/go/flow/entities" + + accessmodel "github.com/onflow/flow-go/model/access" + "github.com/onflow/flow-go/model/flow" +) + +// TransactionResultToMessage converts a TransactionResult to a protobuf message +func TransactionResultToMessage(result *accessmodel.TransactionResult) *access.TransactionResultResponse { + return &access.TransactionResultResponse{ + Status: entities.TransactionStatus(result.Status), + StatusCode: uint32(result.StatusCode), + ErrorMessage: result.ErrorMessage, + Events: EventsToMessages(result.Events), + BlockId: result.BlockID[:], + TransactionId: result.TransactionID[:], + CollectionId: result.CollectionID[:], + BlockHeight: result.BlockHeight, + } +} + +// MessageToTransactionResult converts a protobuf message to a TransactionResult +// All errors indicate the input cannot be converted to a valid event. +func MessageToTransactionResult(message *access.TransactionResultResponse) (*accessmodel.TransactionResult, error) { + events, err := MessagesToEvents(message.Events) + if err != nil { + return nil, fmt.Errorf("failed to convert message to events: %w", err) + } + + return &accessmodel.TransactionResult{ + Status: flow.TransactionStatus(message.Status), + StatusCode: uint(message.StatusCode), + ErrorMessage: message.ErrorMessage, + Events: events, + BlockID: flow.HashToID(message.BlockId), + TransactionID: flow.HashToID(message.TransactionId), + CollectionID: flow.HashToID(message.CollectionId), + BlockHeight: message.BlockHeight, + }, nil +} + +// TransactionResultsToMessage converts a slice of TransactionResults to a protobuf message +func TransactionResultsToMessage(results []*accessmodel.TransactionResult) *access.TransactionResultsResponse { + messages := make([]*access.TransactionResultResponse, len(results)) + for i, result := range results { + messages[i] = TransactionResultToMessage(result) + } + + return &access.TransactionResultsResponse{ + TransactionResults: messages, + } +} + +// MessageToTransactionResults converts a protobuf message to a slice of TransactionResults +// All errors indicate the input cannot be converted to a valid event. +func MessageToTransactionResults(message *access.TransactionResultsResponse) ([]*accessmodel.TransactionResult, error) { + results := make([]*accessmodel.TransactionResult, len(message.TransactionResults)) + var err error + for i, result := range message.TransactionResults { + results[i], err = MessageToTransactionResult(result) + if err != nil { + return nil, fmt.Errorf("failed to convert message at index %d to transaction result: %w", i, err) + } + } + return results, nil +} diff --git a/engine/common/rpc/convert/transaction_result_test.go b/engine/common/rpc/convert/transaction_result_test.go new file mode 100644 index 00000000000..2d4a62b4436 --- /dev/null +++ b/engine/common/rpc/convert/transaction_result_test.go @@ -0,0 +1,52 @@ +package convert_test + +import ( + "testing" + + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/engine/common/rpc/convert" + accessmodel "github.com/onflow/flow-go/model/access" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/utils/unittest" +) + +func TestConvertTransactionResult(t *testing.T) { + t.Parallel() + + expected := txResultFixture() + + msg := convert.TransactionResultToMessage(expected) + converted, err := convert.MessageToTransactionResult(msg) + require.NoError(t, err) + + require.Equal(t, expected, converted) +} + +func TestConvertTransactionResults(t *testing.T) { + t.Parallel() + + expected := []*accessmodel.TransactionResult{ + txResultFixture(), + txResultFixture(), + } + + msg := convert.TransactionResultsToMessage(expected) + converted, err := convert.MessageToTransactionResults(msg) + require.NoError(t, err) + + require.Equal(t, expected, converted) +} + +func txResultFixture() *accessmodel.TransactionResult { + return &accessmodel.TransactionResult{ + Status: flow.TransactionStatusExecuted, + StatusCode: 0, + Events: unittest.EventsFixture(3), + ErrorMessage: "", + BlockID: unittest.IdentifierFixture(), + TransactionID: unittest.IdentifierFixture(), + CollectionID: unittest.IdentifierFixture(), + BlockHeight: 100, + } +} diff --git a/engine/common/rpc/convert/transactions.go b/engine/common/rpc/convert/transactions.go new file mode 100644 index 00000000000..69a25e83839 --- /dev/null +++ b/engine/common/rpc/convert/transactions.go @@ -0,0 +1,131 @@ +package convert + +import ( + "fmt" + + "github.com/onflow/flow/protobuf/go/flow/entities" + + "github.com/onflow/flow-go/model/flow" +) + +// TransactionToMessage converts a flow.TransactionBody to a protobuf message +func TransactionToMessage(tb flow.TransactionBody) *entities.Transaction { + proposalKeyMessage := &entities.Transaction_ProposalKey{ + Address: tb.ProposalKey.Address.Bytes(), + KeyId: uint32(tb.ProposalKey.KeyIndex), + SequenceNumber: tb.ProposalKey.SequenceNumber, + } + + authMessages := make([][]byte, len(tb.Authorizers)) + for i, auth := range tb.Authorizers { + authMessages[i] = auth.Bytes() + } + + payloadSigMessages := make([]*entities.Transaction_Signature, len(tb.PayloadSignatures)) + + for i, sig := range tb.PayloadSignatures { + payloadSigMessages[i] = &entities.Transaction_Signature{ + Address: sig.Address.Bytes(), + KeyId: uint32(sig.KeyIndex), + Signature: sig.Signature, + ExtensionData: sig.ExtensionData, + } + } + + envelopeSigMessages := make([]*entities.Transaction_Signature, len(tb.EnvelopeSignatures)) + + for i, sig := range tb.EnvelopeSignatures { + envelopeSigMessages[i] = &entities.Transaction_Signature{ + Address: sig.Address.Bytes(), + KeyId: uint32(sig.KeyIndex), + Signature: sig.Signature, + ExtensionData: sig.ExtensionData, + } + } + + return &entities.Transaction{ + Script: tb.Script, + Arguments: tb.Arguments, + ReferenceBlockId: tb.ReferenceBlockID[:], + GasLimit: tb.GasLimit, + ProposalKey: proposalKeyMessage, + Payer: tb.Payer.Bytes(), + Authorizers: authMessages, + PayloadSignatures: payloadSigMessages, + EnvelopeSignatures: envelopeSigMessages, + } +} + +// MessageToTransaction converts a protobuf message to a flow.TransactionBody +func MessageToTransaction( + m *entities.Transaction, + chain flow.Chain, +) (flow.TransactionBody, error) { + var t flow.TransactionBody + if m == nil { + return t, ErrEmptyMessage + } + tb := flow.NewTransactionBodyBuilder() + + proposalKey := m.GetProposalKey() + if proposalKey != nil { + proposalAddress, err := Address(proposalKey.GetAddress(), chain) + if err != nil { + return t, err + } + tb.SetProposalKey(proposalAddress, proposalKey.GetKeyId(), proposalKey.GetSequenceNumber()) + } + + payer := m.GetPayer() + if payer != nil { + payerAddress, err := Address(payer, chain) + if err != nil { + return t, err + } + tb.SetPayer(payerAddress) + } + + for _, authorizer := range m.GetAuthorizers() { + authorizerAddress, err := Address(authorizer, chain) + if err != nil { + return t, err + } + tb.AddAuthorizer(authorizerAddress) + } + + for _, sig := range m.GetPayloadSignatures() { + addr, err := Address(sig.GetAddress(), chain) + if err != nil { + return t, err + } + tb.AddPayloadSignatureWithExtensionData(addr, sig.GetKeyId(), sig.GetSignature(), sig.GetExtensionData()) + } + + for _, sig := range m.GetEnvelopeSignatures() { + addr, err := Address(sig.GetAddress(), chain) + if err != nil { + return t, err + } + tb.AddEnvelopeSignatureWithExtensionData(addr, sig.GetKeyId(), sig.GetSignature(), sig.GetExtensionData()) + } + + transactionBody, err := tb.SetScript(m.GetScript()). + SetArguments(m.GetArguments()). + SetReferenceBlockID(flow.HashToID(m.GetReferenceBlockId())). + SetComputeLimit(m.GetGasLimit()). + Build() + if err != nil { + return t, fmt.Errorf("could not build transaction body: %w", err) + } + + return *transactionBody, nil +} + +// TransactionsToMessages converts a slice of flow.TransactionBody to a slice of protobuf messages +func TransactionsToMessages(transactions []*flow.TransactionBody) []*entities.Transaction { + transactionMessages := make([]*entities.Transaction, len(transactions)) + for i, t := range transactions { + transactionMessages[i] = TransactionToMessage(*t) + } + return transactionMessages +} diff --git a/engine/common/rpc/convert/transactions_test.go b/engine/common/rpc/convert/transactions_test.go new file mode 100644 index 00000000000..c9c5141f9a8 --- /dev/null +++ b/engine/common/rpc/convert/transactions_test.go @@ -0,0 +1,34 @@ +package convert_test + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/onflow/cadence" + jsoncdc "github.com/onflow/cadence/encoding/json" + + "github.com/onflow/flow-go/engine/common/rpc/convert" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/utils/unittest" +) + +func TestConvertTransaction(t *testing.T) { + t.Parallel() + + tx := unittest.TransactionBodyFixture() + arg, err := jsoncdc.Encode(cadence.NewAddress(unittest.AddressFixture())) + require.NoError(t, err) + + // add fields not included in the fixture + tx.Arguments = append(tx.Arguments, arg) + tx.EnvelopeSignatures = append(tx.EnvelopeSignatures, unittest.TransactionSignatureFixture()) + + msg := convert.TransactionToMessage(tx) + converted, err := convert.MessageToTransaction(msg, flow.Testnet.Chain()) + require.NoError(t, err) + + assert.Equal(t, tx, converted) + assert.Equal(t, tx.ID(), converted.ID()) +} diff --git a/engine/common/rpc/convert/validate.go b/engine/common/rpc/convert/validate.go index 92a94438192..35b93851198 100644 --- a/engine/common/rpc/convert/validate.go +++ b/engine/common/rpc/convert/validate.go @@ -26,6 +26,20 @@ func Address(rawAddress []byte, chain flow.Chain) (flow.Address, error) { return address, nil } +func HexToAddress(hexAddress string, chain flow.Chain) (flow.Address, error) { + if len(hexAddress) == 0 { + return flow.EmptyAddress, status.Error(codes.InvalidArgument, "address cannot be empty") + } + + address := flow.HexToAddress(hexAddress) + + if !chain.IsValid(address) { + return flow.EmptyAddress, status.Errorf(codes.InvalidArgument, "address %s is invalid for chain %s", address, chain) + } + + return address, nil +} + func BlockID(blockID []byte) (flow.Identifier, error) { if len(blockID) != flow.IdentifierLen { return flow.ZeroID, status.Error(codes.InvalidArgument, "invalid block id") diff --git a/engine/common/rpc/errors.go b/engine/common/rpc/errors.go index 5bd0b88471c..9266245dc4a 100644 --- a/engine/common/rpc/errors.go +++ b/engine/common/rpc/errors.go @@ -8,9 +8,13 @@ import ( "google.golang.org/grpc/codes" "google.golang.org/grpc/status" + "github.com/onflow/flow-go/module/state_synchronization/indexer" "github.com/onflow/flow-go/storage" ) +// ErrScriptTooLarge is returned when a script and/or arguments exceed the max size allowed by the server +var ErrScriptTooLarge = errors.New("script and/or arguments are too large") + // ConvertError converts a generic error into a grpc status error. The input may either // be a status.Error already, or standard error type. Any error that matches on of the // common status code mappings will be converted, all unmatched errors will be converted @@ -20,16 +24,16 @@ func ConvertError(err error, msg string, defaultCode codes.Code) error { return nil } - // Already converted - if status.Code(err) != codes.Unknown { - return err - } - // Handle multierrors separately if multiErr, ok := err.(*multierror.Error); ok { return ConvertMultiError(multiErr, msg, defaultCode) } + // Already converted + if status.Code(err) != codes.Unknown { + return err + } + if msg != "" { msg += ": " } @@ -66,6 +70,29 @@ func ConvertStorageError(err error) error { return status.Errorf(codes.Internal, "failed to find: %v", err) } +// ConvertIndexError converts errors related to index and storage to appropriate gRPC status errors. +// If the error is nil, it returns nil. If the error is not recognized, it falls back to ConvertError +// with the provided default message and Internal gRPC code. +func ConvertIndexError(err error, height uint64, defaultMsg string) error { + if err == nil { + return nil + } + + if errors.Is(err, indexer.ErrIndexNotInitialized) { + return status.Errorf(codes.FailedPrecondition, "data for block is not available: %v", err) + } + + if errors.Is(err, storage.ErrHeightNotIndexed) { + return status.Errorf(codes.OutOfRange, "data for block height %d is not available", height) + } + + if errors.Is(err, storage.ErrNotFound) { + return status.Errorf(codes.NotFound, "data not found: %v", err) + } + + return ConvertError(err, defaultMsg, codes.Internal) +} + // ConvertMultiError converts a multierror to a grpc status error. // If the errors have related status codes, the common code is returned, otherwise defaultCode is used. func ConvertMultiError(err *multierror.Error, msg string, defaultCode codes.Code) error { diff --git a/engine/common/rpc/execution_node_identities_provider.go b/engine/common/rpc/execution_node_identities_provider.go new file mode 100644 index 00000000000..7374fcc102c --- /dev/null +++ b/engine/common/rpc/execution_node_identities_provider.go @@ -0,0 +1,350 @@ +package rpc + +import ( + "context" + "fmt" + "time" + + "github.com/rs/zerolog" + + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/model/flow/filter" + "github.com/onflow/flow-go/state/protocol" + "github.com/onflow/flow-go/storage" +) + +// minExecutionNodesCnt is the minimum number of execution nodes expected to have sent the execution receipt for a block +const minExecutionNodesCnt = 2 + +// maxAttemptsForExecutionReceipt is the maximum number of attempts to find execution receipts for a given block ID +const maxAttemptsForExecutionReceipt = 3 + +// MaxNodesCnt is the maximum number of nodes that will be contacted to complete an API request. +const MaxNodesCnt = 3 + +// ErrNoENsFoundForExecutionResult is returned when no execution nodes were found that produced +// the requested execution result and matches all operator's criteria. +var ErrNoENsFoundForExecutionResult = fmt.Errorf("no execution nodes found for execution result") + +// ExecutionNodeIdentitiesProvider is a container for elements required to retrieve +// execution node identities for a given block ID. +type ExecutionNodeIdentitiesProvider struct { + log zerolog.Logger + + executionReceipts storage.ExecutionReceipts + state protocol.State + + preferredENIdentifiers flow.IdentifierList + fixedENIdentifiers flow.IdentifierList +} + +// NewExecutionNodeIdentitiesProvider creates and returns a new instance of +// ExecutionNodeIdentitiesProvider. +// +// Parameters: +// - log: The logger to use for logging. +// - state: The protocol state used for retrieving block information. +// - executionReceipts: A storage.ExecutionReceipts object that contains the execution receipts +// for blocks. +// - preferredENIdentifiers: A flow.IdentifierList of preferred execution node identifiers that +// are prioritized during selection. +// - fixedENIdentifiers: A flow.IdentifierList of fixed execution node identifiers that are +// always considered if available. +func NewExecutionNodeIdentitiesProvider( + log zerolog.Logger, + state protocol.State, + executionReceipts storage.ExecutionReceipts, + preferredENIdentifiers flow.IdentifierList, + fixedENIdentifiers flow.IdentifierList, +) *ExecutionNodeIdentitiesProvider { + return &ExecutionNodeIdentitiesProvider{ + log: log, + executionReceipts: executionReceipts, + state: state, + preferredENIdentifiers: preferredENIdentifiers, + fixedENIdentifiers: fixedENIdentifiers, + } +} + +// ExecutionNodesForBlockID returns upto maxNodesCnt number of randomly chosen execution node identities +// which have executed the given block ID. +// +// Expected errors during normal operations: +// - InsufficientExecutionReceipts - If no such execution node is found. +// - ErrNoENsFoundForExecutionResult - if no execution nodes were found that produced +// the provided execution result and matched the operators criteria +func (e *ExecutionNodeIdentitiesProvider) ExecutionNodesForBlockID( + ctx context.Context, + blockID flow.Identifier, +) (flow.IdentitySkeletonList, error) { + var ( + executorIDs flow.IdentifierList + err error + ) + + // check if the block ID is of the root block. If it is then don't look for execution receipts since they + // will not be present for the root block. + rootBlock := e.state.Params().FinalizedRoot() + + if rootBlock.ID() == blockID { + executorIdentities, err := e.state.Final().Identities(filter.HasRole[flow.Identity](flow.RoleExecution)) + if err != nil { + return nil, fmt.Errorf("failed to retreive execution IDs for block ID %v: %w", blockID, err) + } + executorIDs = executorIdentities.NodeIDs() + } else { + // try to find at least minExecutionNodesCnt execution node ids from the execution receipts for the given blockID + for attempt := 0; attempt < maxAttemptsForExecutionReceipt; attempt++ { + executorIDs, err = e.findAllExecutionNodes(blockID) + if err != nil { + return nil, err + } + + if len(executorIDs) >= minExecutionNodesCnt { + break + } + + // log the attempt + e.log.Debug().Int("attempt", attempt).Int("max_attempt", maxAttemptsForExecutionReceipt). + Int("execution_receipts_found", len(executorIDs)). + Str("block_id", blockID.String()). + Msg("insufficient execution receipts") + + // if one or less execution receipts may have been received then re-query + // in the hope that more might have been received by now + + select { + case <-ctx.Done(): + return nil, ctx.Err() + case <-time.After(100 * time.Millisecond << time.Duration(attempt)): + // retry after an exponential backoff + } + } + + receiptCnt := len(executorIDs) + // if less than minExecutionNodesCnt execution receipts have been received so far, then return random ENs + if receiptCnt < minExecutionNodesCnt { + newExecutorIDs, err := e.state.AtBlockID(blockID).Identities(filter.HasRole[flow.Identity](flow.RoleExecution)) + if err != nil { + return nil, fmt.Errorf("failed to retreive execution IDs for block ID %v: %w", blockID, err) + } + executorIDs = newExecutorIDs.NodeIDs() + } + } + + // choose from the preferred or fixed execution nodes + subsetENs, err := e.chooseExecutionNodes(executorIDs) + if err != nil { + return nil, fmt.Errorf("failed to retreive execution IDs for block ID %v: %w", blockID, err) + } + + if len(subsetENs) == 0 { + return nil, ErrNoENsFoundForExecutionResult + } + + return subsetENs, nil +} + +// ExecutionNodesForResultID returns execution node identities that produced receipts +// for the specific execution result ID within the given block. +// +// Expected errors during normal operation: +// - ErrNoENsFoundForExecutionResult - if no execution nodes were found that produced +// the provided execution result and matched the operators criteria +func (e *ExecutionNodeIdentitiesProvider) ExecutionNodesForResultID( + blockID flow.Identifier, + resultID flow.Identifier, +) (flow.IdentitySkeletonList, error) { + var executorIDs flow.IdentifierList + rootBlock := e.state.Params().FinalizedRoot() + + // if block is a root block, don't look for execution receipts as there are none for root block. + if rootBlock.ID() == blockID { + executorIdentities, err := e.state.Final().Identities(filter.HasRole[flow.Identity](flow.RoleExecution)) + if err != nil { + return nil, fmt.Errorf("failed to retreive execution IDs for block ID %v: %w", blockID, err) + } + + executorIDs = append(executorIDs, executorIdentities.NodeIDs()...) + } else { + allReceipts, err := e.executionReceipts.ByBlockID(blockID) + if err != nil { + return nil, fmt.Errorf("failed to retrieve execution receipts for block ID %v: %w", blockID, err) + } + + executionReceiptStubList := make(flow.ExecutionReceiptStubList, 0, len(allReceipts)) + for _, r := range allReceipts { + executionReceiptStubList = append(executionReceiptStubList, r.Stub()) + } + + receiptsByResultID := executionReceiptStubList.GroupByResultID() + targetReceipts := receiptsByResultID.GetGroup(resultID) + + if len(targetReceipts) == 0 { + return nil, fmt.Errorf("no execution receipts found for result ID %v in block %v", resultID, blockID) + } + + for _, receipt := range targetReceipts { + executorIDs = append(executorIDs, receipt.ExecutorID) + } + } + + subsetENs, err := e.chooseExecutionNodes(executorIDs) + if err != nil { + return nil, fmt.Errorf("failed to retrieve execution IDs for result ID %v: %w", resultID, err) + } + + if len(subsetENs) == 0 { + return nil, ErrNoENsFoundForExecutionResult + } + + return subsetENs, nil +} + +// findAllExecutionNodes find all the execution nodes ids from the execution receipts that have been received for the +// given blockID +func (e *ExecutionNodeIdentitiesProvider) findAllExecutionNodes( + blockID flow.Identifier, +) (flow.IdentifierList, error) { + // lookup the receipt's storage with the block ID + allReceipts, err := e.executionReceipts.ByBlockID(blockID) + if err != nil { + return nil, fmt.Errorf("failed to retreive execution receipts for block ID %v: %w", blockID, err) + } + + executionResultMetaList := make(flow.ExecutionReceiptStubList, 0, len(allReceipts)) + for _, r := range allReceipts { + executionResultMetaList = append(executionResultMetaList, r.Stub()) + } + executionResultGroupedMetaList := executionResultMetaList.GroupByResultID() + + // maximum number of matching receipts found so far for any execution result id + maxMatchedReceiptCnt := 0 + // execution result id key for the highest number of matching receipts in the identicalReceipts map + var maxMatchedReceiptResultID flow.Identifier + + // find the largest list of receipts which have the same result ID + for resultID, executionReceiptList := range executionResultGroupedMetaList { + currentMatchedReceiptCnt := executionReceiptList.Size() + if currentMatchedReceiptCnt > maxMatchedReceiptCnt { + maxMatchedReceiptCnt = currentMatchedReceiptCnt + maxMatchedReceiptResultID = resultID + } + } + + // if there are more than one execution result for the same block ID, log as error + if executionResultGroupedMetaList.NumberGroups() > 1 { + identicalReceiptsStr := fmt.Sprintf("%v", flow.GetIDs(allReceipts)) + e.log.Error(). + Str("block_id", blockID.String()). + Str("execution_receipts", identicalReceiptsStr). + Msg("execution receipt mismatch") + } + + // pick the largest list of matching receipts + matchingReceiptMetaList := executionResultGroupedMetaList.GetGroup(maxMatchedReceiptResultID) + + metaReceiptGroupedByExecutorID := matchingReceiptMetaList.GroupByExecutorID() + + // collect all unique execution node ids from the receipts + var executorIDs flow.IdentifierList + for executorID := range metaReceiptGroupedByExecutorID { + executorIDs = append(executorIDs, executorID) + } + + return executorIDs, nil +} + +// chooseExecutionNodes finds the subset of execution nodes defined in the identity table by first +// choosing the preferred execution nodes which have executed the transaction. If no such preferred +// execution nodes are found, then the fixed execution nodes defined in the identity table are returned +// If neither preferred nor fixed nodes are defined, then all execution node matching the executor IDs are returned. +// e.g. If execution nodes in identity table are {1,2,3,4}, preferred ENs are defined as {2,3,4} +// and the executor IDs is {1,2,3}, then {2, 3} is returned as the chosen subset of ENs +func (e *ExecutionNodeIdentitiesProvider) chooseExecutionNodes( + executorIDs flow.IdentifierList, +) (flow.IdentitySkeletonList, error) { + allENs, err := e.state.Final().Identities(filter.HasRole[flow.Identity](flow.RoleExecution)) + if err != nil { + return nil, fmt.Errorf("failed to retrieve all execution IDs: %w", err) + } + + // choose from preferred EN IDs + if len(e.preferredENIdentifiers) > 0 { + chosenIDs := e.ChooseFromPreferredENIDs(allENs, executorIDs) + return chosenIDs.ToSkeleton(), nil + } + + // if no preferred EN ID is found, then choose from the fixed EN IDs + if len(e.fixedENIdentifiers) > 0 { + // choose fixed ENs which have executed the transaction + chosenIDs := allENs.Filter(filter.And( + filter.HasNodeID[flow.Identity](e.fixedENIdentifiers...), + filter.HasNodeID[flow.Identity](executorIDs...), + )) + if len(chosenIDs) > 0 { + return chosenIDs.ToSkeleton(), nil + } + // if no such ENs are found, then just choose all fixed ENs + chosenIDs = allENs.Filter(filter.HasNodeID[flow.Identity](e.fixedENIdentifiers...)) + return chosenIDs.ToSkeleton(), nil + } + + // if no preferred or fixed ENs have been specified, then return all executor IDs i.e., no preference at all + return allENs.Filter(filter.HasNodeID[flow.Identity](executorIDs...)).ToSkeleton(), nil +} + +// ChooseFromPreferredENIDs finds the subset of execution nodes if preferred execution nodes are defined. +// If preferredENIdentifiers is set and there are less than maxNodesCnt nodes selected, than the list is padded up to +// maxNodesCnt nodes using the following order: +// 1. Use any EN with a receipt. +// 2. Use any preferred node not already selected. +// 3. Use any EN not already selected. +func (e *ExecutionNodeIdentitiesProvider) ChooseFromPreferredENIDs( + allENs flow.IdentityList, + executorIDs flow.IdentifierList, +) flow.IdentityList { + var chosenIDs flow.IdentityList + + // filter for both preferred and executor IDs + chosenIDs = allENs.Filter(filter.And( + filter.HasNodeID[flow.Identity](e.preferredENIdentifiers...), + filter.HasNodeID[flow.Identity](executorIDs...), + )) + + if len(chosenIDs) >= MaxNodesCnt { + return chosenIDs + } + + // function to add nodes to chosenIDs if they are not already included + addIfNotExists := func(candidates flow.IdentityList) { + for _, en := range candidates { + _, exists := chosenIDs.ByNodeID(en.NodeID) + if !exists { + chosenIDs = append(chosenIDs, en) + if len(chosenIDs) >= MaxNodesCnt { + return + } + } + } + } + + // add any EN with a receipt + receiptENs := allENs.Filter(filter.HasNodeID[flow.Identity](executorIDs...)) + addIfNotExists(receiptENs) + if len(chosenIDs) >= MaxNodesCnt { + return chosenIDs + } + + // add any preferred node not already selected + preferredENs := allENs.Filter(filter.HasNodeID[flow.Identity](e.preferredENIdentifiers...)) + addIfNotExists(preferredENs) + if len(chosenIDs) >= MaxNodesCnt { + return chosenIDs + } + + // add any EN not already selected + addIfNotExists(allENs) + + return chosenIDs +} diff --git a/engine/common/rpc/execution_node_identities_provider_test.go b/engine/common/rpc/execution_node_identities_provider_test.go new file mode 100644 index 00000000000..4b86b1cb1f6 --- /dev/null +++ b/engine/common/rpc/execution_node_identities_provider_test.go @@ -0,0 +1,265 @@ +package rpc_test + +import ( + "context" + "testing" + + "github.com/rs/zerolog" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" + + "github.com/onflow/flow-go/engine/access/rpc/backend/node_communicator" + commonrpc "github.com/onflow/flow-go/engine/common/rpc" + "github.com/onflow/flow-go/model/flow" + protocol "github.com/onflow/flow-go/state/protocol/mock" + storagemock "github.com/onflow/flow-go/storage/mock" + "github.com/onflow/flow-go/utils/unittest" +) + +// ENIdentitiesProviderSuite is a test suite for testing the ExecutionNodeIdentitiesProvider. +type ENIdentitiesProviderSuite struct { + suite.Suite + + state *protocol.State + snapshot *protocol.Snapshot + log zerolog.Logger + + receipts *storagemock.ExecutionReceipts +} + +func TestHandler(t *testing.T) { + suite.Run(t, new(ENIdentitiesProviderSuite)) +} + +// SetupTest initializes the test suite with mock state and receipts storage. +func (suite *ENIdentitiesProviderSuite) SetupTest() { + suite.log = zerolog.New(zerolog.NewConsoleWriter()) + suite.state = new(protocol.State) + suite.snapshot = new(protocol.Snapshot) + suite.receipts = new(storagemock.ExecutionReceipts) + + header := unittest.BlockHeaderFixture() + params := new(protocol.Params) + params.On("FinalizedRoot").Return(header, nil) + suite.state.On("Params").Return(params) +} + +// TestExecutionNodesForBlockID tests the ExecutionNodesForBlockID function. +// This function is responsible for retrieving execution nodes used to serve +// all API calls that interact with execution nodes. +func (suite *ENIdentitiesProviderSuite) TestExecutionNodesForBlockID() { + totalReceipts := 5 + + block := unittest.BlockFixture() + + // generate one execution node identities for each receipt assuming that each ER is generated by a unique exec node + allExecutionNodes := unittest.IdentityListFixture(totalReceipts, unittest.WithRole(flow.RoleExecution)) + + // one execution result for all receipts for this block + executionResult := *unittest.ExecutionResultFixture() + + // generate execution receipts + receipts := make(flow.ExecutionReceiptList, totalReceipts) + for j := 0; j < totalReceipts; j++ { + r := unittest.ReceiptForBlockFixture(block) + r.ExecutorID = allExecutionNodes[j].NodeID + r.ExecutionResult = executionResult + receipts[j] = r + } + + currentAttempt := 0 + attempt1Receipts, attempt2Receipts, attempt3Receipts := receipts, receipts, receipts + + // setup receipts storage mock to return different list of receipts on each call + suite.receipts. + On("ByBlockID", block.ID()).Return( + func(id flow.Identifier) flow.ExecutionReceiptList { + switch currentAttempt { + case 0: + currentAttempt++ + return attempt1Receipts + case 1: + currentAttempt++ + return attempt2Receipts + default: + currentAttempt = 0 + return attempt3Receipts + } + }, + func(id flow.Identifier) error { return nil }) + + suite.snapshot.On("Identities", mock.Anything).Return( + func(filter flow.IdentityFilter[flow.Identity]) flow.IdentityList { + // apply the filter passed in to the list of all the execution nodes + return allExecutionNodes.Filter(filter) + }, + func(flow.IdentityFilter[flow.Identity]) error { return nil }) + suite.state.On("Final").Return(suite.snapshot, nil).Maybe() + + var preferredENIdentifiers flow.IdentifierList + var fixedENIdentifiers flow.IdentifierList + + testExecutionNodesForBlockID := func(preferredENs, fixedENs, expectedENs flow.IdentityList) { + + if preferredENs != nil { + preferredENIdentifiers = preferredENs.NodeIDs() + } + if fixedENs != nil { + fixedENIdentifiers = fixedENs.NodeIDs() + } + + if expectedENs == nil { + expectedENs = flow.IdentityList{} + } + + execNodeIdentitiesProvider := commonrpc.NewExecutionNodeIdentitiesProvider( + suite.log, + suite.state, + suite.receipts, + preferredENIdentifiers, + fixedENIdentifiers, + ) + + allExecNodes, err := execNodeIdentitiesProvider.ExecutionNodesForBlockID(context.Background(), block.ID()) + require.NoError(suite.T(), err) + + execNodeSelectorFactory := node_communicator.NewNodeSelectorFactory(false) + execSelector, err := execNodeSelectorFactory.SelectNodes(allExecNodes) + require.NoError(suite.T(), err) + + actualList := flow.IdentitySkeletonList{} + for actual := execSelector.Next(); actual != nil; actual = execSelector.Next() { + actualList = append(actualList, actual) + } + + { + expectedENs := expectedENs.ToSkeleton() + if len(expectedENs) > commonrpc.MaxNodesCnt { + for _, actual := range actualList { + require.Contains(suite.T(), expectedENs, actual) + } + } else { + require.ElementsMatch(suite.T(), actualList, expectedENs) + } + } + } + // if we don't find sufficient receipts, ExecutionNodesForBlockID should return a list of random ENs + suite.Run("insufficient receipts return random ENs in State", func() { + // return no receipts at all attempts + attempt1Receipts = flow.ExecutionReceiptList{} + attempt2Receipts = flow.ExecutionReceiptList{} + attempt3Receipts = flow.ExecutionReceiptList{} + suite.state.On("AtBlockID", mock.Anything).Return(suite.snapshot) + + execNodeIdentitiesProvider := commonrpc.NewExecutionNodeIdentitiesProvider( + suite.log, + suite.state, + suite.receipts, + flow.IdentifierList{}, + flow.IdentifierList{}, + ) + + allExecNodes, err := execNodeIdentitiesProvider.ExecutionNodesForBlockID(context.Background(), block.ID()) + require.NoError(suite.T(), err) + + execNodeSelectorFactory := node_communicator.NewNodeSelectorFactory(false) + execSelector, err := execNodeSelectorFactory.SelectNodes(allExecNodes) + require.NoError(suite.T(), err) + + actualList := flow.IdentitySkeletonList{} + for actual := execSelector.Next(); actual != nil; actual = execSelector.Next() { + actualList = append(actualList, actual) + } + + require.Equal(suite.T(), len(actualList), commonrpc.MaxNodesCnt) + }) + + // if no preferred or fixed ENs are specified, the ExecutionNodesForBlockID function should + // return the exe node list without a filter + suite.Run("no preferred or fixed ENs", func() { + testExecutionNodesForBlockID(nil, nil, allExecutionNodes) + }) + // if only fixed ENs are specified, the ExecutionNodesForBlockID function should + // return the fixed ENs list + suite.Run("two fixed ENs with zero preferred EN", func() { + // mark the first two ENs as fixed + fixedENs := allExecutionNodes[0:2] + expectedList := fixedENs + testExecutionNodesForBlockID(nil, fixedENs, expectedList) + }) + // if only preferred ENs are specified, the ExecutionNodesForBlockID function should + // return the preferred ENs list + suite.Run("two preferred ENs with zero fixed EN", func() { + // mark the first two ENs as preferred + preferredENs := allExecutionNodes[0:2] + expectedList := allExecutionNodes[0:commonrpc.MaxNodesCnt] + testExecutionNodesForBlockID(preferredENs, nil, expectedList) + }) + // if both are specified, the ExecutionNodesForBlockID function should + // return the preferred ENs list + suite.Run("four fixed ENs of which two are preferred ENs", func() { + // mark the first four ENs as fixed + fixedENs := allExecutionNodes[0:5] + // mark the first two of the fixed ENs as preferred ENs + preferredENs := fixedENs[0:2] + expectedList := fixedENs[0:commonrpc.MaxNodesCnt] + testExecutionNodesForBlockID(preferredENs, fixedENs, expectedList) + }) + // if both are specified, but the preferred ENs don't match the ExecutorIDs in the ER, + // the ExecutionNodesForBlockID function should return the fixed ENs list + suite.Run("four fixed ENs of which two are preferred ENs but have not generated the ER", func() { + // mark the first two ENs as fixed + fixedENs := allExecutionNodes[0:2] + // specify two ENs not specified in the ERs as preferred + preferredENs := unittest.IdentityListFixture(2, unittest.WithRole(flow.RoleExecution)) + // add one more node ID besides of the fixed ENs list cause expected length of the list should be maxNodesCnt + expectedList := append(fixedENs, allExecutionNodes[2]) + testExecutionNodesForBlockID(preferredENs, fixedENs, expectedList) + }) + // if execution receipts are not yet available, the ExecutionNodesForBlockID function should retry twice + suite.Run("retry execution receipt query", func() { + // on first attempt, no execution receipts are available + attempt1Receipts = flow.ExecutionReceiptList{} + // on second attempt ony one is available + attempt2Receipts = flow.ExecutionReceiptList{receipts[0]} + // on third attempt all receipts are available + attempt3Receipts = receipts + currentAttempt = 0 + // mark the first two ENs as preferred + preferredENs := allExecutionNodes[0:2] + expectedList := allExecutionNodes[0:commonrpc.MaxNodesCnt] + testExecutionNodesForBlockID(preferredENs, nil, expectedList) + }) + // if preferredENIdentifiers was set and there are less than maxNodesCnt nodes selected than check the order + // of adding ENs ids + suite.Run("add nodes in the correct order", func() { + // mark the first EN as preferred + preferredENIdentifiers = allExecutionNodes[0:1].NodeIDs() + // mark the fourth EN with receipt + executorIDs := allExecutionNodes[3:4].NodeIDs() + + receiptNodes := allExecutionNodes[3:4] // any EN with a receipt + preferredNodes := allExecutionNodes[0:1] // preferred EN node not already selected + additionalNode := allExecutionNodes[1:2] // any EN not already selected + + expectedOrder := flow.IdentityList{ + receiptNodes[0], + preferredNodes[0], + additionalNode[0], + } + + execNodeIdentitiesProvider := commonrpc.NewExecutionNodeIdentitiesProvider( + suite.log, + suite.state, + suite.receipts, + preferredENIdentifiers, + flow.IdentifierList{}, + ) + + chosenIDs := execNodeIdentitiesProvider.ChooseFromPreferredENIDs(allExecutionNodes, executorIDs) + + require.ElementsMatch(suite.T(), chosenIDs, expectedOrder) + require.Equal(suite.T(), len(chosenIDs), commonrpc.MaxNodesCnt) + }) +} diff --git a/engine/common/rpc/helpers.go b/engine/common/rpc/helpers.go new file mode 100644 index 00000000000..7c266df0f57 --- /dev/null +++ b/engine/common/rpc/helpers.go @@ -0,0 +1,19 @@ +package rpc + +// CheckScriptSize returns true if the combined size (in bytes) of the script and arguments is less +// than or equal to the max size. +func CheckScriptSize(script []byte, arguments [][]byte, maxSize uint) bool { + currentSize := len(script) + if currentSize > int(maxSize) { + return false + } + + for _, arg := range arguments { + currentSize += len(arg) + if currentSize > int(maxSize) { + return false + } + } + + return true +} diff --git a/engine/common/rpc/helpers_test.go b/engine/common/rpc/helpers_test.go new file mode 100644 index 00000000000..9631fd86fe6 --- /dev/null +++ b/engine/common/rpc/helpers_test.go @@ -0,0 +1,137 @@ +package rpc + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestCheckScriptSize(t *testing.T) { + tests := []struct { + name string + script []byte + arguments [][]byte + maxSize uint + expected bool + }{ + { + name: "empty script and arguments within limit", + script: []byte{}, + arguments: [][]byte{}, + maxSize: 100, + expected: true, + }, + { + name: "script within limit, no arguments", + script: []byte("test script"), + arguments: [][]byte{}, + maxSize: 100, + expected: true, + }, + { + name: "script and arguments within limit", + script: []byte("test script"), + arguments: [][]byte{[]byte("arg1"), []byte("arg2")}, + maxSize: 100, + expected: true, + }, + { + name: "script exactly at limit, no arguments", + script: make([]byte, 50), + arguments: [][]byte{}, + maxSize: 50, + expected: true, + }, + { + name: "script and arguments exactly at limit", + script: make([]byte, 30), + arguments: [][]byte{make([]byte, 20)}, + maxSize: 50, + expected: true, + }, + { + name: "script exceeds limit", + script: make([]byte, 60), + arguments: [][]byte{}, + maxSize: 50, + expected: false, + }, + { + name: "script within limit but arguments exceed limit", + script: make([]byte, 30), + arguments: [][]byte{make([]byte, 25)}, + maxSize: 50, + expected: false, + }, + { + name: "script and arguments combined exceed limit", + script: make([]byte, 30), + arguments: [][]byte{make([]byte, 15), make([]byte, 10)}, + maxSize: 50, + expected: false, + }, + { + name: "multiple arguments exceed limit", + script: make([]byte, 10), + arguments: [][]byte{make([]byte, 15), make([]byte, 20), make([]byte, 10)}, + maxSize: 50, + expected: false, + }, + { + name: "zero max size with empty inputs", + script: []byte{}, + arguments: [][]byte{}, + maxSize: 0, + expected: true, + }, + { + name: "zero max size with non-empty inputs", + script: []byte("test"), + arguments: [][]byte{}, + maxSize: 0, + expected: false, + }, + { + name: "large script with large arguments", + script: make([]byte, 1000), + arguments: [][]byte{make([]byte, 500), make([]byte, 300)}, + maxSize: 2000, + expected: true, + }, + { + name: "large script with large arguments exceeding limit", + script: make([]byte, 1000), + arguments: [][]byte{make([]byte, 500), make([]byte, 600)}, + maxSize: 2000, + expected: false, + }, + { + name: "nil script and arguments", + script: nil, + arguments: [][]byte{nil, nil}, + maxSize: 100, + expected: true, + }, + { + name: "nil arguments", + script: []byte("test"), + arguments: nil, + maxSize: 100, + expected: true, + }, + { + name: "mixed nil and non-nil arguments", + script: []byte("test"), + arguments: [][]byte{nil, []byte("arg"), nil}, + maxSize: 100, + expected: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := CheckScriptSize(tt.script, tt.arguments, tt.maxSize) + assert.Equal(t, tt.expected, result) + }) + } +} diff --git a/engine/common/rpc/logging_interceptor.go b/engine/common/rpc/logging_interceptor.go deleted file mode 100644 index 7e4a7c897df..00000000000 --- a/engine/common/rpc/logging_interceptor.go +++ /dev/null @@ -1,25 +0,0 @@ -package rpc - -import ( - grpczerolog "github.com/grpc-ecosystem/go-grpc-middleware/providers/zerolog/v2" - "github.com/grpc-ecosystem/go-grpc-middleware/v2/interceptors/logging" - "github.com/grpc-ecosystem/go-grpc-middleware/v2/interceptors/tags" - "github.com/rs/zerolog" - "google.golang.org/grpc" - "google.golang.org/grpc/codes" -) - -func customClientCodeToLevel(c codes.Code) logging.Level { - if c == codes.OK { - // log successful returns as Debug to avoid excessive logging in info mode - return logging.DEBUG - } - return logging.DefaultServerCodeToLevel(c) -} - -// LoggingInterceptor creates the logging interceptors to log incoming GRPC request and response (minus the payload body) -func LoggingInterceptor(log zerolog.Logger) []grpc.UnaryServerInterceptor { - tagsInterceptor := tags.UnaryServerInterceptor(tags.WithFieldExtractor(tags.CodeGenRequestFieldExtractor)) - loggingInterceptor := logging.UnaryServerInterceptor(grpczerolog.InterceptorLogger(log), logging.WithLevels(customClientCodeToLevel)) - return []grpc.UnaryServerInterceptor{tagsInterceptor, loggingInterceptor} -} diff --git a/engine/common/rpc/rate_limit_interceptor.go b/engine/common/rpc/rate_limit_interceptor.go deleted file mode 100644 index 7a26a7d3b11..00000000000 --- a/engine/common/rpc/rate_limit_interceptor.go +++ /dev/null @@ -1,95 +0,0 @@ -package rpc - -import ( - "context" - "path/filepath" - - "github.com/rs/zerolog" - "golang.org/x/time/rate" - "google.golang.org/grpc" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" -) - -const defaultRateLimit = 1000 // aggregate default rate limit for all unspecified API calls -const defaultBurst = 100 // default burst limit (calls made at the same time) for an API - -// rateLimiterInterceptor rate limits the -type rateLimiterInterceptor struct { - log zerolog.Logger - - // a shared default rate limiter for APIs whose rate limit is not explicitly defined - defaultLimiter *rate.Limiter - - // a map of api and its limiter - methodLimiterMap map[string]*rate.Limiter -} - -// NewRateLimiterInterceptor creates a new rate limiter interceptor with the defined per second rate limits and the -// optional burst limit for each API. -func NewRateLimiterInterceptor(log zerolog.Logger, apiRateLimits map[string]int, apiBurstLimits map[string]int) *rateLimiterInterceptor { - - defaultLimiter := rate.NewLimiter(rate.Limit(defaultRateLimit), defaultBurst) - methodLimiterMap := make(map[string]*rate.Limiter, len(apiRateLimits)) - - // read rate limit values for each API and create a limiter for each - for api, limit := range apiRateLimits { - // if a burst limit is defined for this api, use that else use the default - burst := defaultBurst - if b, ok := apiBurstLimits[api]; ok { - burst = b - } - methodLimiterMap[api] = rate.NewLimiter(rate.Limit(limit), burst) - } - - if len(methodLimiterMap) == 0 { - log.Info().Int("default_rate_limit", defaultRateLimit).Msg("no rate limits specified, using the default limit") - } - - return &rateLimiterInterceptor{ - defaultLimiter: defaultLimiter, - methodLimiterMap: methodLimiterMap, - log: log, - } -} - -// UnaryServerInterceptor rate limits the given request based on the limits defined when creating the rateLimiterInterceptor -func (interceptor *rateLimiterInterceptor) UnaryServerInterceptor(ctx context.Context, - req interface{}, - info *grpc.UnaryServerInfo, - handler grpc.UnaryHandler) (resp interface{}, err error) { - - // remove the package name (e.g. "/flow.access.AccessAPI/Ping" to "Ping") - methodName := filepath.Base(info.FullMethod) - - // look up the limiter - limiter := interceptor.methodLimiterMap[methodName] - - // if not found, use the default limiter - if limiter == nil { - - interceptor.log.Trace().Str("method", methodName).Msg("rate limit not defined, using default limit") - - limiter = interceptor.defaultLimiter - } - - // check if request within limit - if !limiter.Allow() { - - // log the limit violation - interceptor.log.Trace(). - Str("method", methodName). - Interface("request", req). - Float64("limit", float64(limiter.Limit())). - Msg("rate limit exceeded") - - // reject the request - return nil, status.Errorf(codes.ResourceExhausted, "%s rate limit reached, please retry later.", - info.FullMethod) - } - - // call the handler - h, err := handler(ctx, req) - - return h, err -} diff --git a/engine/common/splitter/engine.go b/engine/common/splitter/engine.go deleted file mode 100644 index bfb4169e2a4..00000000000 --- a/engine/common/splitter/engine.go +++ /dev/null @@ -1,112 +0,0 @@ -package splitter - -import ( - "fmt" - "sync" - - "github.com/hashicorp/go-multierror" - "github.com/rs/zerolog" - - "github.com/onflow/flow-go/engine" - "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/network" - "github.com/onflow/flow-go/network/channels" -) - -// Engine is the splitter engine, which maintains a list of registered engines -// and passes every event it receives to each of these engines in parallel. -type Engine struct { - enginesMu sync.RWMutex - unit *engine.Unit // used to manage concurrency & shutdown - log zerolog.Logger // used to log relevant actions with context - engines map[network.MessageProcessor]struct{} // stores registered engines - channel channels.Channel // the channel that this splitter listens on -} - -// New creates a new splitter engine. -func New( - log zerolog.Logger, - channel channels.Channel, -) *Engine { - return &Engine{ - unit: engine.NewUnit(), - log: log.With().Str("engine", "splitter").Logger(), - engines: make(map[network.MessageProcessor]struct{}), - channel: channel, - } -} - -// RegisterEngine registers a new engine with the splitter. Events -// that are received by the splitter after the engine has registered -// will be passed down to it. -func (e *Engine) RegisterEngine(engine network.MessageProcessor) { - e.enginesMu.Lock() - defer e.enginesMu.Unlock() - - e.engines[engine] = struct{}{} -} - -// UnregisterEngine unregisters an engine with the splitter. After -// the engine has been unregistered, the splitter will stop passing -// events to it. If the given engine was never registered, this is -// a noop. -func (e *Engine) UnregisterEngine(engine network.MessageProcessor) { - e.enginesMu.Lock() - defer e.enginesMu.Unlock() - - delete(e.engines, engine) -} - -// Ready returns a ready channel that is closed once the engine has fully -// started. -func (e *Engine) Ready() <-chan struct{} { - return e.unit.Ready() -} - -// Done returns a done channel that is closed once the engine has fully stopped. -func (e *Engine) Done() <-chan struct{} { - return e.unit.Done() -} - -// Process processes the given event from the node with the given origin ID -// in a blocking manner. It returns the potential processing error when -// done. -func (e *Engine) Process(channel channels.Channel, originID flow.Identifier, event interface{}) error { - return e.unit.Do(func() error { - if channel != e.channel { - return fmt.Errorf("received event on unknown channel %s", channel) - } - - return e.process(func(downstream network.MessageProcessor) error { - return downstream.Process(channel, originID, event) - }) - }) -} - -// process calls the given function in parallel for all the engines that have -// registered with this splitter. -func (e *Engine) process(processFunc func(network.MessageProcessor) error) error { - count := 0 - errors := make(chan error) - - e.enginesMu.RLock() - for eng := range e.engines { - e.enginesMu.RUnlock() - - count += 1 - go func(downstream network.MessageProcessor) { - errors <- processFunc(downstream) - }(eng) - - e.enginesMu.RLock() - } - e.enginesMu.RUnlock() - - var multiErr *multierror.Error - - for i := 0; i < count; i++ { - multiErr = multierror.Append(multiErr, <-errors) - } - - return multiErr.ErrorOrNil() -} diff --git a/engine/common/splitter/engine_test.go b/engine/common/splitter/engine_test.go deleted file mode 100644 index afc8c7f955e..00000000000 --- a/engine/common/splitter/engine_test.go +++ /dev/null @@ -1,148 +0,0 @@ -package splitter_test - -import ( - "errors" - "sync" - "testing" - - "github.com/hashicorp/go-multierror" - "github.com/rs/zerolog" - "github.com/stretchr/testify/suite" - - "github.com/onflow/flow-go/engine/common/splitter" - "github.com/onflow/flow-go/network/channels" - "github.com/onflow/flow-go/network/mocknetwork" - "github.com/onflow/flow-go/utils/unittest" -) - -func getEvent() interface{} { - return struct { - foo string - }{ - foo: "bar", - } -} - -type Suite struct { - suite.Suite - - channel channels.Channel - engine *splitter.Engine -} - -func (suite *Suite) SetupTest() { - suite.channel = channels.TestNetworkChannel - suite.engine = splitter.New(zerolog.Logger{}, suite.channel) -} - -func TestSplitter(t *testing.T) { - suite.Run(t, new(Suite)) -} - -// TestDownstreamEngineFailure tests the case where one of the engines registered with -// the splitter encounters an error while processing a message. -func (suite *Suite) TestDownstreamEngineFailure() { - id := unittest.IdentifierFixture() - event := getEvent() - - engine1 := new(mocknetwork.Engine) - engine2 := new(mocknetwork.Engine) - - suite.engine.RegisterEngine(engine1) - suite.engine.RegisterEngine(engine2) - - processError := errors.New("Process Error!") - - // engine1 processing error should not impact engine2 - - engine1.On("Process", suite.channel, id, event).Return(processError).Once() - engine2.On("Process", suite.channel, id, event).Return(nil).Once() - - err := suite.engine.Process(suite.channel, id, event) - merr, ok := err.(*multierror.Error) - suite.Assert().True(ok) - suite.Assert().Len(merr.Errors, 1) - suite.Assert().ErrorIs(merr.Errors[0], processError) - - engine1.AssertNumberOfCalls(suite.T(), "Process", 1) - engine2.AssertNumberOfCalls(suite.T(), "Process", 1) - - engine1.AssertExpectations(suite.T()) - engine2.AssertExpectations(suite.T()) - - // engine2 processing error should not impact engine1 - - engine1.On("Process", suite.channel, id, event).Return(nil).Once() - engine2.On("Process", suite.channel, id, event).Return(processError).Once() - - err = suite.engine.Process(suite.channel, id, event) - merr, ok = err.(*multierror.Error) - suite.Assert().True(ok) - suite.Assert().Len(merr.Errors, 1) - suite.Assert().ErrorIs(merr.Errors[0], processError) - - engine1.AssertNumberOfCalls(suite.T(), "Process", 2) - engine2.AssertNumberOfCalls(suite.T(), "Process", 2) - - engine1.AssertExpectations(suite.T()) - engine2.AssertExpectations(suite.T()) -} - -// TestProcessUnregisteredChannel tests that receiving a message on an unknown channel -// returns an error. -func (suite *Suite) TestProcessUnknownChannel() { - id := unittest.IdentifierFixture() - event := getEvent() - - unknownChannel := channels.Channel("unknown-chan") - - engine := new(mocknetwork.Engine) - - suite.engine.RegisterEngine(engine) - - err := suite.engine.Process(unknownChannel, id, event) - suite.Assert().Error(err) - - engine.AssertNumberOfCalls(suite.T(), "Process", 0) -} - -// TestConcurrentEvents tests that sending multiple messages concurrently, results in each engine -// receiving every message. -func (suite *Suite) TestConcurrentEvents() { - id := unittest.IdentifierFixture() - const numEvents = 10 - const numEngines = 5 - - var engines [numEngines]*mocknetwork.Engine - - for i := 0; i < numEngines; i++ { - engine := new(mocknetwork.Engine) - suite.engine.RegisterEngine(engine) - engines[i] = engine - } - - for i := 0; i < numEvents; i++ { - for _, engine := range engines { - engine.On("Process", suite.channel, id, i).Return(nil).Once() - } - } - - var wg sync.WaitGroup - - for i := 0; i < numEvents; i++ { - wg.Add(1) - - go func(value int) { - defer wg.Done() - err := suite.engine.Process(suite.channel, id, value) - suite.Assert().Nil(err) - }(i) - } - - wg.Wait() - - for _, engine := range engines { - engine.AssertNumberOfCalls(suite.T(), "Process", numEvents) - engine.AssertExpectations(suite.T()) - } -} diff --git a/engine/common/splitter/network/example_test.go b/engine/common/splitter/network/example_test.go deleted file mode 100644 index b94f9e8a70e..00000000000 --- a/engine/common/splitter/network/example_test.go +++ /dev/null @@ -1,64 +0,0 @@ -package network_test - -import ( - "fmt" - "math/rand" - - "github.com/rs/zerolog" - - splitterNetwork "github.com/onflow/flow-go/engine/common/splitter/network" - "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/network/channels" - testnet "github.com/onflow/flow-go/utils/unittest/network" -) - -func Example() { - // create a mock network - net := testnet.NewNetwork() - - // create a splitter network - logger := zerolog.Nop() - splitterNet := splitterNetwork.NewNetwork(net, logger) - - // generate a random origin ID - var id flow.Identifier - rand.Seed(0) - rand.Read(id[:]) - - // create engines - engineProcessFunc := func(engineID int) testnet.EngineProcessFunc { - return func(channel channels.Channel, originID flow.Identifier, event interface{}) error { - fmt.Printf("Engine %d received message: channel=%v, originID=%v, event=%v\n", engineID, channel, originID, event) - return nil - } - } - engine1 := testnet.NewEngine().OnProcess(engineProcessFunc(1)) - engine2 := testnet.NewEngine().OnProcess(engineProcessFunc(2)) - engine3 := testnet.NewEngine().OnProcess(engineProcessFunc(3)) - - // register engines with splitter network - channel := channels.Channel("foo-channel") - _, err := splitterNet.Register(channel, engine1) - if err != nil { - fmt.Println(err) - } - _, err = splitterNet.Register(channel, engine2) - if err != nil { - fmt.Println(err) - } - _, err = splitterNet.Register(channel, engine3) - if err != nil { - fmt.Println(err) - } - - // send message to network - err = net.Send(channel, id, "foo") - if err != nil { - fmt.Println(err) - } - - // Unordered output: - // Engine 1 received message: channel=foo-channel, originID=0194fdc2fa2ffcc041d3ff12045b73c86e4ff95ff662a5eee82abdf44a2d0b75, event=foo - // Engine 2 received message: channel=foo-channel, originID=0194fdc2fa2ffcc041d3ff12045b73c86e4ff95ff662a5eee82abdf44a2d0b75, event=foo - // Engine 3 received message: channel=foo-channel, originID=0194fdc2fa2ffcc041d3ff12045b73c86e4ff95ff662a5eee82abdf44a2d0b75, event=foo -} diff --git a/engine/common/splitter/network/network.go b/engine/common/splitter/network/network.go deleted file mode 100644 index 13f1dfb8cc3..00000000000 --- a/engine/common/splitter/network/network.go +++ /dev/null @@ -1,118 +0,0 @@ -package network - -import ( - "errors" - "fmt" - "sync" - - "github.com/ipfs/go-datastore" - "github.com/libp2p/go-libp2p/core/protocol" - "github.com/rs/zerolog" - - splitterEngine "github.com/onflow/flow-go/engine/common/splitter" - "github.com/onflow/flow-go/module/component" - "github.com/onflow/flow-go/module/irrecoverable" - "github.com/onflow/flow-go/module/util" - "github.com/onflow/flow-go/network" - "github.com/onflow/flow-go/network/channels" -) - -// Network is the splitter network. It is a wrapper around the default network implementation -// and should be passed in to engine constructors that require a network to register with. -// When an engine is registered with the splitter network, a splitter engine is created for -// the given channel (if one doesn't already exist) and the engine is registered with that -// splitter engine. As a result, multiple engines can register with the splitter network on -// the same channel and will each receive all events on that channel. -type Network struct { - net network.Network - mu sync.RWMutex - log zerolog.Logger - splitters map[channels.Channel]*splitterEngine.Engine // stores splitters for each channel - conduits map[channels.Channel]network.Conduit // stores conduits for all registered channels - *component.ComponentManager -} - -var _ network.Network = (*Network)(nil) - -// NewNetwork returns a new splitter network. -func NewNetwork( - net network.Network, - log zerolog.Logger, -) *Network { - n := &Network{ - net: net, - splitters: make(map[channels.Channel]*splitterEngine.Engine), - conduits: make(map[channels.Channel]network.Conduit), - log: log, - } - - n.ComponentManager = component.NewComponentManagerBuilder(). - AddWorker(func(ctx irrecoverable.SignalerContext, ready component.ReadyFunc) { - err := util.WaitClosed(ctx, n.net.Ready()) - - if err != nil { - return - } - - ready() - - <-ctx.Done() - }).Build() - - return n -} - -func (n *Network) RegisterBlobService(channel channels.Channel, store datastore.Batching, opts ...network.BlobServiceOption) (network.BlobService, error) { - return n.net.RegisterBlobService(channel, store, opts...) -} - -func (n *Network) RegisterPingService(pid protocol.ID, provider network.PingInfoProvider) (network.PingService, error) { - return n.net.RegisterPingService(pid, provider) -} - -// Register will subscribe the given engine with the spitter on the given channel, and all registered -// engines will be notified with incoming messages on the channel. -// The returned Conduit can be used to send messages to engines on other nodes subscribed to the same channel -func (n *Network) Register(channel channels.Channel, engine network.MessageProcessor) (network.Conduit, error) { - n.mu.Lock() - defer n.mu.Unlock() - - splitter, splitterExists := n.splitters[channel] - conduit, conduitExists := n.conduits[channel] - - if splitterExists != conduitExists { - return nil, errors.New("inconsistent state detected") - } - - channelRegistered := splitterExists && conduitExists - - if !channelRegistered { - // create new splitter for the channel - splitter = splitterEngine.New( - n.log, - channel, - ) - - n.splitters[channel] = splitter - } - - // register engine with splitter - splitter.RegisterEngine(engine) - - if !channelRegistered { - var err error - conduit, err = n.net.Register(channel, splitter) - - if err != nil { - // undo previous steps - splitter.UnregisterEngine(engine) - delete(n.splitters, channel) - - return nil, fmt.Errorf("failed to register splitter engine on channel %s: %w", channel, err) - } - - n.conduits[channel] = conduit - } - - return conduit, nil -} diff --git a/engine/common/splitter/network/network_test.go b/engine/common/splitter/network/network_test.go deleted file mode 100644 index acc6564a1b1..00000000000 --- a/engine/common/splitter/network/network_test.go +++ /dev/null @@ -1,148 +0,0 @@ -package network_test - -import ( - "testing" - - "github.com/rs/zerolog" - "github.com/stretchr/testify/mock" - "github.com/stretchr/testify/suite" - - splitternetwork "github.com/onflow/flow-go/engine/common/splitter/network" - "github.com/onflow/flow-go/network" - "github.com/onflow/flow-go/network/channels" - "github.com/onflow/flow-go/network/mocknetwork" - "github.com/onflow/flow-go/utils/unittest" -) - -func getEvent() interface{} { - return struct { - foo string - }{ - foo: "bar", - } -} - -type Suite struct { - suite.Suite - - con *mocknetwork.Conduit - net *splitternetwork.Network - engines map[channels.Channel]network.MessageProcessor -} - -func (suite *Suite) SetupTest() { - net := new(mocknetwork.Network) - suite.con = new(mocknetwork.Conduit) - suite.engines = make(map[channels.Channel]network.MessageProcessor) - - net.On("Register", mock.AnythingOfType("channels.Channel"), mock.Anything).Run(func(args mock.Arguments) { - channel, _ := args.Get(0).(channels.Channel) - engine, ok := args.Get(1).(network.MessageProcessor) - suite.Assert().True(ok) - suite.engines[channel] = engine - }).Return(suite.con, nil) - - splitterNet := splitternetwork.NewNetwork(net, zerolog.Logger{}) - - suite.net = splitterNet -} - -func TestSplitterNetwork(t *testing.T) { - suite.Run(t, new(Suite)) -} - -// TestHappyPath tests a basic scenario with three channels and three engines -func (suite *Suite) TestHappyPath() { - id := unittest.IdentifierFixture() - event := getEvent() - - chan1 := channels.Channel("test-chan-1") - chan2 := channels.Channel("test-chan-2") - chan3 := channels.Channel("test-chan-3") - - engine1 := new(mocknetwork.Engine) - engine2 := new(mocknetwork.Engine) - engine3 := new(mocknetwork.Engine) - - con, err := suite.net.Register(chan1, engine1) - suite.Assert().Nil(err) - suite.Assert().Equal(suite.con, con) - con, err = suite.net.Register(chan1, engine2) - suite.Assert().Nil(err) - suite.Assert().Equal(suite.con, con) - - con, err = suite.net.Register(chan2, engine2) - suite.Assert().Nil(err) - suite.Assert().Equal(suite.con, con) - con, err = suite.net.Register(chan2, engine3) - suite.Assert().Nil(err) - suite.Assert().Equal(suite.con, con) - - con, err = suite.net.Register(chan3, engine1) - suite.Assert().Nil(err) - suite.Assert().Equal(suite.con, con) - con, err = suite.net.Register(chan3, engine2) - suite.Assert().Nil(err) - suite.Assert().Equal(suite.con, con) - con, err = suite.net.Register(chan3, engine3) - suite.Assert().Nil(err) - suite.Assert().Equal(suite.con, con) - - // Message sent on chan1 should be delivered to engine1 and engine2 - - engine1.On("Process", chan1, id, event).Return(nil).Once() - engine2.On("Process", chan1, id, event).Return(nil).Once() - - splitter, ok := suite.engines[chan1] - suite.Assert().True(ok) - - err = splitter.Process(chan1, id, event) - suite.Assert().Nil(err) - - engine1.AssertNumberOfCalls(suite.T(), "Process", 1) - engine2.AssertNumberOfCalls(suite.T(), "Process", 1) - engine3.AssertNumberOfCalls(suite.T(), "Process", 0) - - engine1.AssertExpectations(suite.T()) - engine2.AssertExpectations(suite.T()) - engine3.AssertExpectations(suite.T()) - - // Message sent on chan2 should be delivered to engine2 and engine3 - - engine2.On("Process", chan2, id, event).Return(nil).Once() - engine3.On("Process", chan2, id, event).Return(nil).Once() - - splitter, ok = suite.engines[chan2] - suite.Assert().True(ok) - - err = splitter.Process(chan2, id, event) - suite.Assert().Nil(err) - - engine1.AssertNumberOfCalls(suite.T(), "Process", 1) - engine2.AssertNumberOfCalls(suite.T(), "Process", 2) - engine3.AssertNumberOfCalls(suite.T(), "Process", 1) - - engine1.AssertExpectations(suite.T()) - engine2.AssertExpectations(suite.T()) - engine3.AssertExpectations(suite.T()) - - // Message sent on chan3 should be delivered to all engines - - engine1.On("Process", chan3, id, event).Return(nil).Once() - engine2.On("Process", chan3, id, event).Return(nil).Once() - engine3.On("Process", chan3, id, event).Return(nil).Once() - - splitter, ok = suite.engines[chan3] - suite.Assert().True(ok) - - err = splitter.Process(chan3, id, event) - suite.Assert().Nil(err) - - engine1.AssertNumberOfCalls(suite.T(), "Process", 2) - engine2.AssertNumberOfCalls(suite.T(), "Process", 3) - engine3.AssertNumberOfCalls(suite.T(), "Process", 2) - - engine1.AssertExpectations(suite.T()) - engine2.AssertExpectations(suite.T()) - engine3.AssertExpectations(suite.T()) -} diff --git a/engine/common/stop/stop_control.go b/engine/common/stop/stop_control.go new file mode 100644 index 00000000000..2655a22928f --- /dev/null +++ b/engine/common/stop/stop_control.go @@ -0,0 +1,157 @@ +package stop + +import ( + "fmt" + + "github.com/coreos/go-semver/semver" + "github.com/rs/zerolog" + "go.uber.org/atomic" + + "github.com/onflow/flow-go/module/component" + "github.com/onflow/flow-go/module/counters" + "github.com/onflow/flow-go/module/executiondatasync/execution_data" + "github.com/onflow/flow-go/module/irrecoverable" +) + +type VersionMetadata struct { + // incompatibleBlockHeight is the height of the block that is incompatible with the current node version. + incompatibleBlockHeight uint64 + // updatedVersion is the expected node version to continue working with new blocks. + updatedVersion string +} + +// StopControl is responsible for managing the stopping behavior of the node +// when an incompatible block height is encountered. +type StopControl struct { + component.Component + cm *component.ComponentManager + + log zerolog.Logger + + versionData *atomic.Pointer[VersionMetadata] + + // Notifier for new processed block height + processedHeightChannel chan uint64 + // Signal channel to notify when processing is done + doneProcessingEvents chan struct{} + + // Stores latest processed block height + lastProcessedHeight counters.StrictMonotonicCounter +} + +// NewStopControl creates a new StopControl instance. +// +// Parameters: +// - log: The logger used for logging. +// +// Returns: +// - A pointer to the newly created StopControl instance. +func NewStopControl( + log zerolog.Logger, +) *StopControl { + sc := &StopControl{ + log: log.With(). + Str("component", "stop_control"). + Logger(), + lastProcessedHeight: counters.NewMonotonicCounter(0), + versionData: atomic.NewPointer[VersionMetadata](nil), + processedHeightChannel: make(chan uint64), + doneProcessingEvents: make(chan struct{}), + } + + sc.cm = component.NewComponentManagerBuilder(). + AddWorker(sc.processEvents). + Build() + sc.Component = sc.cm + + return sc +} + +// OnVersionUpdate is called when a version update occurs. +// +// It updates the incompatible block height and the expected node version +// based on the provided height and semver. +// +// Parameters: +// - height: The block height that is incompatible with the current node version. +// - version: The new semantic version object that is expected for compatibility. +func (sc *StopControl) OnVersionUpdate(height uint64, version *semver.Version) { + // If the version was updated, store new version information + if version != nil { + sc.log.Info(). + Uint64("height", height). + Str("semver", version.String()). + Msg("Received version update") + + sc.versionData.Store(&VersionMetadata{ + incompatibleBlockHeight: height, + updatedVersion: version.String(), + }) + return + } + + // If semver is 0, but notification was received, this means that the version update was deleted. + sc.versionData.Store(nil) +} + +// onProcessedBlock is called when a new block is processed block. +// when the last compatible block is processed, the StopControl will cause the node to crash +// +// Parameters: +// - ctx: The context used to signal an irrecoverable error. +func (sc *StopControl) onProcessedBlock(ctx irrecoverable.SignalerContext) { + versionData := sc.versionData.Load() + if versionData == nil { + return + } + + newHeight := sc.lastProcessedHeight.Value() + if newHeight >= versionData.incompatibleBlockHeight-1 { + ctx.Throw(fmt.Errorf("processed block at height %d is incompatible with the current node version, please upgrade to version %s starting from block height %d", + newHeight, versionData.updatedVersion, versionData.incompatibleBlockHeight)) + } +} + +// updateProcessedHeight updates the last processed height and triggers notifications. +// +// Parameters: +// - height: The height of the latest processed block. +func (sc *StopControl) updateProcessedHeight(height uint64) { + select { + case sc.processedHeightChannel <- height: // Successfully sent the height to the channel + case <-sc.doneProcessingEvents: // Process events are done, do not block + } +} + +// RegisterHeightRecorder registers an execution data height recorder with the StopControl. +// +// Parameters: +// - recorder: The execution data height recorder to register. +func (sc *StopControl) RegisterHeightRecorder(recorder execution_data.ProcessedHeightRecorder) { + recorder.SetHeightUpdatesConsumer(sc.updateProcessedHeight) +} + +// processEvents processes incoming events related to block heights and version updates. +// +// Parameters: +// - ctx: The context used to handle irrecoverable errors. +// - ready: A function to signal that the component is ready to start processing events. +func (sc *StopControl) processEvents(ctx irrecoverable.SignalerContext, ready component.ReadyFunc) { + ready() + + defer close(sc.doneProcessingEvents) // Ensure the signal channel is closed when done + + for { + select { + case <-ctx.Done(): + return + case height, ok := <-sc.processedHeightChannel: + if !ok { + return + } + if sc.lastProcessedHeight.Set(height) { + sc.onProcessedBlock(ctx) + } + } + } +} diff --git a/engine/common/stop/stop_control_test.go b/engine/common/stop/stop_control_test.go new file mode 100644 index 00000000000..75d5c6e8026 --- /dev/null +++ b/engine/common/stop/stop_control_test.go @@ -0,0 +1,138 @@ +package stop + +import ( + "context" + "fmt" + "sync" + "testing" + "time" + + "github.com/coreos/go-semver/semver" + "github.com/rs/zerolog" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + + "github.com/onflow/flow-go/module/irrecoverable" + "github.com/onflow/flow-go/utils/unittest" +) + +// RunWithStopControl is a helper function that creates a StopControl instance and runs the provided test function with it. +// +// Parameters: +// - t: The testing context. +// - f: A function that takes a MockSignalerContext and a StopControl, used to run the test logic. +func RunWithStopControl(t *testing.T, f func(ctx *irrecoverable.MockSignalerContext, sc *StopControl)) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + signalerContext := irrecoverable.NewMockSignalerContext(t, ctx) + + f(signalerContext, createStopControl(t, signalerContext)) +} + +// createStopControl creates and starts a new StopControl instance. +// +// Parameters: +// - t: The testing context. +// - signalerContext: The mock context used to simulate signaler behavior. +// +// Returns: +// - A pointer to the newly created and started StopControl instance. +func createStopControl(t *testing.T, signalerContext *irrecoverable.MockSignalerContext) *StopControl { + sc := NewStopControl(zerolog.Nop()) + assert.NotNil(t, sc) + + // Start the StopControl component. + sc.Start(signalerContext) + + return sc +} + +// TestNewStopControl verifies that a new StopControl instance is created correctly and its components are ready. +// +// This test ensures that the StopControl can be initialized and started properly, and that all components are ready +// within a specified time frame. +func TestNewStopControl(t *testing.T) { + RunWithStopControl(t, func(_ *irrecoverable.MockSignalerContext, sc *StopControl) { + unittest.RequireComponentsReadyBefore(t, 2*time.Second, sc) + }) +} + +// TestStopControl_OnVersionUpdate tests the OnVersionUpdate method of the StopControl. +// +// This test covers two scenarios: +// 1. When a valid version update is received, it checks that the version data is stored correctly. +// 2. When a nil version is provided, it checks that the version data is cleared. +func TestStopControl_OnVersionUpdate(t *testing.T) { + RunWithStopControl(t, func(_ *irrecoverable.MockSignalerContext, sc *StopControl) { + + // Case 1: Version is updated + height := uint64(100) + version := semver.New("1.0.0") + + sc.OnVersionUpdate(height, version) + + // Verify that the version data is correctly stored. + versionData := sc.versionData.Load() + assert.NotNil(t, versionData) + assert.Equal(t, height, versionData.incompatibleBlockHeight) + assert.Equal(t, "1.0.0", versionData.updatedVersion) + + // Case 2: Version update is deleted (nil version) + sc.OnVersionUpdate(0, nil) + + // Verify that the version data is cleared. + versionData = sc.versionData.Load() + assert.Nil(t, versionData) + }) +} + +// TestStopControl_OnProcessedBlock tests the onProcessedBlock method of the StopControl. +// +// This test covers multiple scenarios related to processing block heights: +// 1. Verifying that the processed height is updated correctly. +// 2. Ensuring that a lower processed height cannot overwrite a higher one. +// 3. Testing that the StopControl correctly triggers an irrecoverable error (via Throw) when the incompatible block height is reached. +func TestStopControl_OnProcessedBlock(t *testing.T) { + RunWithStopControl(t, func(ctx *irrecoverable.MockSignalerContext, sc *StopControl) { + // Initial block height + height := uint64(10) + + // Update processed height and verify it's stored correctly. + sc.updateProcessedHeight(height) + assert.Equal(t, height, sc.lastProcessedHeight.Value()) + + // Attempt to set a lower processed height, which should not be allowed. + sc.updateProcessedHeight(height - 1) + assert.Equal(t, height, sc.lastProcessedHeight.Value()) + + // Set version metadata with an incompatible height and verify the processed height behavior. + incompatibleHeight := uint64(13) + version := semver.New("1.0.0") + + sc.OnVersionUpdate(incompatibleHeight, version) + height = incompatibleHeight - 2 + sc.updateProcessedHeight(height) + assert.Equal(t, height, sc.lastProcessedHeight.Value()) + + // Prepare to trigger the Throw method when the incompatible block height is processed. + height = incompatibleHeight - 1 + + var wg sync.WaitGroup + wg.Add(1) + + // Expected error message when the incompatible block height is processed. + expectedError := fmt.Errorf("processed block at height %d is incompatible with the current node version, please upgrade to version %s starting from block height %d", height, version.String(), incompatibleHeight) + + // Set expectation that the Throw method will be called with the expected error. + ctx.On("Throw", expectedError).Run(func(args mock.Arguments) { wg.Done() }).Return().Once() + + // Update the processed height to the incompatible height and wait for Throw to be called. + sc.updateProcessedHeight(height) + unittest.RequireReturnsBefore(t, wg.Wait, 100*time.Millisecond, "expect for ctx.Throw before timeout") + + // Verify that the processed height and the Throw method call are correct. + assert.Equal(t, height, sc.lastProcessedHeight.Value()) + ctx.AssertCalled(t, "Throw", expectedError) + }) +} diff --git a/engine/common/synchronization/config.go b/engine/common/synchronization/config.go index 59d8c4dc1ea..48ecf8b94a5 100644 --- a/engine/common/synchronization/config.go +++ b/engine/common/synchronization/config.go @@ -1,8 +1,10 @@ package synchronization import ( + "fmt" "time" + "github.com/onflow/flow-go/config" core "github.com/onflow/flow-go/module/chainsync" ) @@ -36,3 +38,47 @@ func WithScanInterval(interval time.Duration) OptionFunc { cfg.ScanInterval = interval } } + +// spamProbabilityMultiplier is used to convert probability factor to an integer as well as a maximum value for the +// random number that can be generated by the random number generator. +const spamProbabilityMultiplier = 1000 + +// SpamDetectionConfig contains configuration parameters for spam detection for different message types. +// The probability of creating a misbehavior report for a message of a given type is calculated differently for different +// message types. +// MisbehaviourReports are generated for two reasons: +// 1. A malformed message will always produce a MisbehaviourReport, to notify ALSP of *unambiguous* spam. +// 2. A correctly formed message may produce a MisbehaviourReport probabilistically, to notify ALSP of *ambiguous* spam. +// This effectively tracks the load associated with a particular sender, for this engine, and, on average, +// reports message load proportionally as misbehaviour to ALSP. +type SpamDetectionConfig struct { + + // batchRequestBaseProb is the base probability in [0,1] that's used in creating the final probability of creating a + // misbehavior report for a BatchRequest message. This is why the word "base" is used in the name of this field, + // since it's not the final probability and there are other factors that determine the final probability. + // The reason for this is that we want to increase the probability of creating a misbehavior report for a large batch. + batchRequestBaseProb float32 + + // syncRequestProb is the probability in [0,1] of creating a misbehavior report for a SyncRequest message. + syncRequestProb float32 + + // rangeRequestBaseProb is the base probability in [0,1] that's used in creating the final probability of creating a + // misbehavior report for a RangeRequest message. This is why the word "base" is used in the name of this field, + // since it's not the final probability and there are other factors that determine the final probability. + // The reason for this is that we want to increase the probability of creating a misbehavior report for a large range. + rangeRequestBaseProb float32 +} + +func NewSpamDetectionConfig() (*SpamDetectionConfig, error) { + flowConfig, err := config.DefaultConfig() + if err != nil { + return nil, fmt.Errorf("failed to read default config: %w", err) + } + + return &SpamDetectionConfig{ + // see config/default-config.yml for more information on the following fields + batchRequestBaseProb: flowConfig.NetworkConfig.SyncEngine.BatchRequestBaseProb, + syncRequestProb: flowConfig.NetworkConfig.SyncEngine.SyncRequestProb, + rangeRequestBaseProb: flowConfig.NetworkConfig.SyncEngine.RangeRequestBaseProb, + }, nil +} diff --git a/engine/common/synchronization/engine.go b/engine/common/synchronization/engine.go index 5dddac3644f..593951784e7 100644 --- a/engine/common/synchronization/engine.go +++ b/engine/common/synchronization/engine.go @@ -1,15 +1,14 @@ -// (c) 2019 Dapper Labs - ALL RIGHTS RESERVED - package synchronization import ( + "context" "fmt" - "math/rand" "time" "github.com/hashicorp/go-multierror" "github.com/rs/zerolog" + "github.com/onflow/flow-go/consensus/hotstuff" "github.com/onflow/flow-go/engine" "github.com/onflow/flow-go/engine/common/fifoqueue" "github.com/onflow/flow-go/engine/consensus" @@ -18,12 +17,17 @@ import ( "github.com/onflow/flow-go/model/messages" "github.com/onflow/flow-go/module" synccore "github.com/onflow/flow-go/module/chainsync" - "github.com/onflow/flow-go/module/lifecycle" + "github.com/onflow/flow-go/module/component" + "github.com/onflow/flow-go/module/events" + "github.com/onflow/flow-go/module/irrecoverable" "github.com/onflow/flow-go/module/metrics" "github.com/onflow/flow-go/network" + "github.com/onflow/flow-go/network/alsp" "github.com/onflow/flow-go/network/channels" "github.com/onflow/flow-go/state/protocol" "github.com/onflow/flow-go/storage" + "github.com/onflow/flow-go/utils/logging" + "github.com/onflow/flow-go/utils/rand" ) // defaultSyncResponseQueueCapacity maximum capacity of sync responses queue @@ -34,40 +38,45 @@ const defaultBlockResponseQueueCapacity = 500 // Engine is the synchronization engine, responsible for synchronizing chain state. type Engine struct { - // TODO replace engine.Unit and lifecycle.LifecycleManager with component.ComponentManager - unit *engine.Unit - lm *lifecycle.LifecycleManager - log zerolog.Logger - metrics module.EngineMetrics - me module.Local - con network.Conduit - blocks storage.Blocks - comp consensus.Compliance + component.Component + hotstuff.FinalizationConsumer + + log zerolog.Logger + metrics module.EngineMetrics + me module.Local + finalizedHeaderCache module.FinalizedHeaderCache + con network.Conduit + blocks storage.Blocks + comp consensus.Compliance pollInterval time.Duration scanInterval time.Duration core module.SyncCore - state protocol.State participantsProvider module.IdentifierProvider - requestHandler *RequestHandler // component responsible for handling requests + requestHandler *RequestHandler // component responsible for handling requests + spamDetectionConfig *SpamDetectionConfig pendingSyncResponses engine.MessageStore // message store for *message.SyncResponse pendingBlockResponses engine.MessageStore // message store for *message.BlockResponse responseMessageHandler *engine.MessageHandler // message handler responsible for response processing } +var _ network.MessageProcessor = (*Engine)(nil) +var _ component.Component = (*Engine)(nil) + // New creates a new main chain synchronization engine. func New( log zerolog.Logger, metrics module.EngineMetrics, - net network.Network, + net network.EngineRegistry, me module.Local, state protocol.State, blocks storage.Blocks, comp consensus.Compliance, core module.SyncCore, participantsProvider module.IdentifierProvider, + spamDetectionConfig *SpamDetectionConfig, opts ...OptionFunc, ) (*Engine, error) { @@ -80,25 +89,25 @@ func New( panic("must initialize synchronization engine with comp engine") } + finalizedHeaderCache, finalizedCacheWorker, err := events.NewFinalizedHeaderCache(state) + if err != nil { + return nil, fmt.Errorf("could not create finalized header cache: %w", err) + } + // initialize the propagation engine with its dependencies e := &Engine{ - unit: engine.NewUnit(), - lm: lifecycle.NewLifecycleManager(), + FinalizationConsumer: finalizedHeaderCache, log: log.With().Str("engine", "synchronization").Logger(), metrics: metrics, me: me, - state: state, + finalizedHeaderCache: finalizedHeaderCache, blocks: blocks, comp: comp, core: core, pollInterval: opt.PollInterval, scanInterval: opt.ScanInterval, participantsProvider: participantsProvider, - } - - err := e.setupResponseMessageHandler() - if err != nil { - return nil, fmt.Errorf("could not setup message handler") + spamDetectionConfig: spamDetectionConfig, } // register the engine with the network layer and store the conduit @@ -107,8 +116,22 @@ func New( return nil, fmt.Errorf("could not register engine: %w", err) } e.con = con + e.requestHandler = NewRequestHandler(log, metrics, NewResponseSender(con), me, finalizedHeaderCache, blocks, core, true) + + // set up worker routines + builder := component.NewComponentManagerBuilder(). + AddWorker(finalizedCacheWorker). + AddWorker(e.checkLoop). + AddWorker(e.responseProcessingLoop) + for i := 0; i < defaultEngineRequestsWorkers; i++ { + builder.AddWorker(e.requestHandler.requestProcessingWorker) + } + e.Component = builder.Build() - e.requestHandler = NewRequestHandler(log, metrics, NewResponseSender(con), me, state, blocks, core, true) + err = e.setupResponseMessageHandler() + if err != nil { + return nil, fmt.Errorf("could not setup message handler") + } return e, nil } @@ -139,7 +162,7 @@ func (e *Engine) setupResponseMessageHandler() error { engine.NewNotifier(), engine.Pattern{ Match: func(msg *engine.Message) bool { - _, ok := msg.Payload.(*messages.SyncResponse) + _, ok := msg.Payload.(*flow.SyncResponse) if ok { e.metrics.MessageReceived(metrics.EngineSynchronization, metrics.MessageSyncResponse) } @@ -149,7 +172,7 @@ func (e *Engine) setupResponseMessageHandler() error { }, engine.Pattern{ Match: func(msg *engine.Message) bool { - _, ok := msg.Payload.(*messages.BlockResponse) + _, ok := msg.Payload.(*flow.BlockResponse) if ok { e.metrics.MessageReceived(metrics.EngineSynchronization, metrics.MessageBlockResponse) } @@ -162,58 +185,10 @@ func (e *Engine) setupResponseMessageHandler() error { return nil } -// Ready returns a ready channel that is closed once the engine has fully started. -func (e *Engine) Ready() <-chan struct{} { - e.lm.OnStart(func() { - e.unit.Launch(e.checkLoop) - e.unit.Launch(e.responseProcessingLoop) - // wait for request handler to startup - <-e.requestHandler.Ready() - }) - return e.lm.Started() -} - -// Done returns a done channel that is closed once the engine has fully stopped. -func (e *Engine) Done() <-chan struct{} { - e.lm.OnStop(func() { - // signal the request handler to shutdown - requestHandlerDone := e.requestHandler.Done() - // wait for request sending and response processing routines to exit - <-e.unit.Done() - // wait for request handler shutdown to complete - <-requestHandlerDone - }) - return e.lm.Stopped() -} - -// SubmitLocal submits an event originating on the local node. -func (e *Engine) SubmitLocal(event interface{}) { - err := e.process(e.me.NodeID(), event) - if err != nil { - // receiving an input of incompatible type from a trusted internal component is fatal - e.log.Fatal().Err(err).Msg("internal error processing event") - } -} - -// Submit submits the given event from the node with the given origin ID -// for processing in a non-blocking manner. It returns instantly and logs -// a potential processing error internally when done. -func (e *Engine) Submit(channel channels.Channel, originID flow.Identifier, event interface{}) { - err := e.Process(channel, originID, event) - if err != nil { - e.log.Fatal().Err(err).Msg("internal error processing event") - } -} - -// ProcessLocal processes an event originating on the local node. -func (e *Engine) ProcessLocal(event interface{}) error { - return e.process(e.me.NodeID(), event) -} - // Process processes the given event from the node with the given origin ID in // a blocking manner. It returns the potential processing error when done. func (e *Engine) Process(channel channels.Channel, originID flow.Identifier, event interface{}) error { - err := e.process(originID, event) + err := e.process(channel, originID, event) if err != nil { if engine.IsIncompatibleInputTypeError(err) { e.log.Warn().Msgf("%v delivered unsupported message %T through %v", originID, event, channel) @@ -228,11 +203,40 @@ func (e *Engine) Process(channel channels.Channel, originID flow.Identifier, eve // Error returns: // - IncompatibleInputTypeError if input has unexpected type // - All other errors are potential symptoms of internal state corruption or bugs (fatal). -func (e *Engine) process(originID flow.Identifier, event interface{}) error { - switch event.(type) { - case *messages.RangeRequest, *messages.BatchRequest, *messages.SyncRequest: - return e.requestHandler.process(originID, event) - case *messages.SyncResponse, *messages.BlockResponse: +func (e *Engine) process(channel channels.Channel, originID flow.Identifier, event interface{}) error { + switch message := event.(type) { + case *flow.BatchRequest: + err := e.validateBatchRequestForALSP(originID, message) + if err != nil { + irrecoverable.Throw(context.TODO(), fmt.Errorf("failed to validate batch request from %x: %w", originID[:], err)) + } + return e.requestHandler.Process(channel, originID, event) + case *flow.RangeRequest: + err := e.validateRangeRequestForALSP(originID, message) + if err != nil { + irrecoverable.Throw(context.TODO(), fmt.Errorf("failed to validate range request from %x: %w", originID[:], err)) + } + return e.requestHandler.Process(channel, originID, event) + + case *flow.SyncRequest: + err := e.validateSyncRequestForALSP(originID) + if err != nil { + irrecoverable.Throw(context.TODO(), fmt.Errorf("failed to validate sync request from %x: %w", originID[:], err)) + } + return e.requestHandler.Process(channel, originID, event) + + case *flow.BlockResponse: + err := e.validateBlockResponseForALSP(channel, originID, message) + if err != nil { + irrecoverable.Throw(context.TODO(), fmt.Errorf("failed to validate block response from %x: %w", originID[:], err)) + } + return e.responseMessageHandler.Process(originID, event) + + case *flow.SyncResponse: + err := e.validateSyncResponseForALSP(channel, originID, message) + if err != nil { + irrecoverable.Throw(context.TODO(), fmt.Errorf("failed to validate sync response from %x: %w", originID[:], err)) + } return e.responseMessageHandler.Process(originID, event) default: return fmt.Errorf("received input with type %T from %x: %w", event, originID[:], engine.IncompatibleInputTypeError) @@ -240,37 +244,40 @@ func (e *Engine) process(originID flow.Identifier, event interface{}) error { } // responseProcessingLoop is a separate goroutine that performs processing of queued responses -func (e *Engine) responseProcessingLoop() { +func (e *Engine) responseProcessingLoop(ctx irrecoverable.SignalerContext, ready component.ReadyFunc) { + ready() + notifier := e.responseMessageHandler.GetNotifier() + done := ctx.Done() for { select { - case <-e.unit.Quit(): + case <-done: return case <-notifier: - e.processAvailableResponses() + e.processAvailableResponses(ctx) } } } // processAvailableResponses is processor of pending events which drives events from networking layer to business logic. -func (e *Engine) processAvailableResponses() { +func (e *Engine) processAvailableResponses(ctx context.Context) { for { select { - case <-e.unit.Quit(): + case <-ctx.Done(): return default: } msg, ok := e.pendingSyncResponses.Get() if ok { - e.onSyncResponse(msg.OriginID, msg.Payload.(*messages.SyncResponse)) + e.onSyncResponse(msg.OriginID, msg.Payload.(*flow.SyncResponse)) e.metrics.MessageHandled(metrics.EngineSynchronization, metrics.MessageSyncResponse) continue } msg, ok = e.pendingBlockResponses.Get() if ok { - e.onBlockResponse(msg.OriginID, msg.Payload.(*messages.BlockResponse)) + e.onBlockResponse(msg.OriginID, msg.Payload.(*flow.BlockResponse)) e.metrics.MessageHandled(metrics.EngineSynchronization, metrics.MessageBlockResponse) continue } @@ -282,46 +289,46 @@ func (e *Engine) processAvailableResponses() { } // onSyncResponse processes a synchronization response. -func (e *Engine) onSyncResponse(originID flow.Identifier, res *messages.SyncResponse) { +func (e *Engine) onSyncResponse(originID flow.Identifier, res *flow.SyncResponse) { e.log.Debug().Str("origin_id", originID.String()).Msg("received sync response") - final, err := e.state.Final().Head() - if err != nil { - e.log.Fatal().Err(err).Msg("unexpected fatal error retrieving latest finalized block") - } + final := e.finalizedHeaderCache.Get() e.core.HandleHeight(final, res.Height) } -// onBlockResponse processes a response containing a specifically requested block. -func (e *Engine) onBlockResponse(originID flow.Identifier, res *messages.BlockResponse) { - // process the blocks one by one +// onBlockResponse processes a structurally validated block proposal containing a specifically requested block response. +func (e *Engine) onBlockResponse(originID flow.Identifier, res *flow.BlockResponse) { + // process the proposal one by one if len(res.Blocks) == 0 { - e.log.Debug().Msg("received empty block response") + e.log.Debug().Msg("received empty proposals") return } - first := res.Blocks[0].Header.Height - last := res.Blocks[len(res.Blocks)-1].Header.Height - e.log.Debug().Uint64("first", first).Uint64("last", last).Msg("received block response") + proposals := res.Blocks + first := proposals[0].Block.Height + last := proposals[len(proposals)-1].Block.Height + e.log.Debug().Uint64("first", first).Uint64("last", last).Msg("received proposal") - filteredBlocks := make([]*messages.BlockProposal, 0, len(res.Blocks)) - for _, block := range res.Blocks { - header := block.Header - if !e.core.HandleBlock(&header) { + filteredProposals := make([]*flow.Proposal, 0, len(proposals)) + for _, proposal := range proposals { + header := proposal.Block.ToHeader() + if !e.core.HandleBlock(header) { e.log.Debug().Uint64("height", header.Height).Msg("block handler rejected") continue } - filteredBlocks = append(filteredBlocks, &messages.BlockProposal{Block: block}) + filteredProposals = append(filteredProposals, &proposal) } // forward the block to the compliance engine for validation and processing - e.comp.OnSyncedBlocks(flow.Slashable[[]*messages.BlockProposal]{ + e.comp.OnSyncedBlocks(flow.Slashable[[]*flow.Proposal]{ OriginID: originID, - Message: filteredBlocks, + Message: filteredProposals, }) } // checkLoop will regularly scan for items that need requesting. -func (e *Engine) checkLoop() { +func (e *Engine) checkLoop(ctx irrecoverable.SignalerContext, ready component.ReadyFunc) { + ready() + pollChan := make(<-chan time.Time) if e.pollInterval > 0 { poll := time.NewTicker(e.pollInterval) @@ -329,47 +336,49 @@ func (e *Engine) checkLoop() { defer poll.Stop() } scan := time.NewTicker(e.scanInterval) + defer scan.Stop() -CheckLoop: + done := ctx.Done() for { // give the quit channel a priority to be selected select { - case <-e.unit.Quit(): - break CheckLoop + case <-done: + return default: } select { - case <-e.unit.Quit(): - break CheckLoop + case <-done: + return case <-pollChan: e.pollHeight() case <-scan.C: - final, err := e.state.Final().Head() - if err != nil { - e.log.Fatal().Err(err).Msg("unexpected fatal error retrieving latest finalized block") - } + final := e.finalizedHeaderCache.Get() participants := e.participantsProvider.Identifiers() ranges, batches := e.core.ScanPending(final) e.sendRequests(participants, ranges, batches) } } - - // some minor cleanup - scan.Stop() } // pollHeight will send a synchronization request to three random nodes. func (e *Engine) pollHeight() { - final, err := e.state.Final().Head() + final := e.finalizedHeaderCache.Get() + participants := e.participantsProvider.Identifiers() + + nonce, err := rand.Uint64() if err != nil { - e.log.Fatal().Err(err).Msg("unexpected fatal error retrieving latest finalized block") + // TODO: this error should be returned by pollHeight() + // it is logged for now since the only error possible is related to a failure + // of the system entropy generation. Such error is going to cause failures in other + // components where it's handled properly and will lead to crashing the module. + e.log.Warn().Err(err).Msg("nonce generation failed during pollHeight") + return } - participants := e.participantsProvider.Identifiers() // send the request for synchronization req := &messages.SyncRequest{ - Nonce: rand.Uint64(), + Nonce: nonce, Height: final.Height, } e.log.Debug(). @@ -389,12 +398,21 @@ func (e *Engine) sendRequests(participants flow.IdentifierList, ranges []chainsy var errs *multierror.Error for _, ran := range ranges { + nonce, err := rand.Uint64() + if err != nil { + // TODO: this error should be returned by sendRequests + // it is logged for now since the only error possible is related to a failure + // of the system entropy generation. Such error is going to cause failures in other + // components where it's handled properly and will lead to crashing the module. + e.log.Error().Err(err).Msg("nonce generation failed during range request") + return + } req := &messages.RangeRequest{ - Nonce: rand.Uint64(), + Nonce: nonce, FromHeight: ran.From, ToHeight: ran.To, } - err := e.con.Multicast(req, synccore.DefaultBlockRequestNodes, participants...) + err = e.con.Multicast(req, synccore.DefaultBlockRequestNodes, participants...) if err != nil { errs = multierror.Append(errs, fmt.Errorf("could not submit range request: %w", err)) continue @@ -409,11 +427,20 @@ func (e *Engine) sendRequests(participants flow.IdentifierList, ranges []chainsy } for _, batch := range batches { + nonce, err := rand.Uint64() + if err != nil { + // TODO: this error should be returned by sendRequests + // it is logged for now since the only error possible is related to a failure + // of the system entropy generation. Such error is going to cause failures in other + // components where it's handled properly and will lead to crashing the module. + e.log.Error().Err(err).Msg("nonce generation failed during batch request") + return + } req := &messages.BatchRequest{ - Nonce: rand.Uint64(), + Nonce: nonce, BlockIDs: batch.BlockIDs, } - err := e.con.Multicast(req, synccore.DefaultBlockRequestNodes, participants...) + err = e.con.Multicast(req, synccore.DefaultBlockRequestNodes, participants...) if err != nil { errs = multierror.Append(errs, fmt.Errorf("could not submit batch request: %w", err)) continue @@ -430,3 +457,197 @@ func (e *Engine) sendRequests(participants flow.IdentifierList, ranges []chainsy e.log.Warn().Err(err).Msg("sending range and batch requests failed") } } + +// validateBatchRequestForALSP checks if a batch request should be reported as a misbehavior and sends misbehavior report to ALSP. +// The misbehavior is due to either: +// 1. unambiguous malicious or incorrect behavior (0 block IDs) OR +// 2. large number of block IDs in batch request. This is more ambiguous to detect as malicious behavior because there is no way to know for sure +// if the sender is sending a large batch request maliciously or not, so we use a probabilistic approach to report the misbehavior. +// +// Args: +// - originID: the sender of the batch request +// - batchRequest: the batch request to validate +// Returns: +// - error: If an error is encountered while validating the batch request. Error is assumed to be irrecoverable because of internal processes that didn't allow validation to complete. +func (e *Engine) validateBatchRequestForALSP(originID flow.Identifier, batchRequest *flow.BatchRequest) error { + // Generate a random integer between 0 and spamProbabilityMultiplier (exclusive) + n, err := rand.Uint32n(spamProbabilityMultiplier) + if err != nil { + return fmt.Errorf("failed to generate random number from %x: %w", originID[:], err) + } + + // validity check: if no block IDs, always report as misbehavior + if len(batchRequest.BlockIDs) == 0 { + e.log.Warn(). + Hex("origin_id", logging.ID(originID)). + Str(logging.KeySuspicious, "true"). + Str("reason", alsp.InvalidMessage.String()). + Msg("received invalid batch request with 0 block IDs, creating ALSP report") + report, err := alsp.NewMisbehaviorReport(originID, alsp.InvalidMessage) + if err != nil { + // failing to create the misbehavior report is unlikely. If an error is encountered while + // creating the misbehavior report it indicates a bug and processing can not proceed. + return fmt.Errorf("failed to create misbehavior report (invalid batch request, no block IDs) from %x: %w", originID[:], err) + } + // failed unambiguous validation check and should be reported as misbehavior + e.con.ReportMisbehavior(report) + return nil + } + + // to avoid creating a misbehavior report for every batch request received, use a probabilistic approach. + // The larger the batch request and base probability, the higher the probability of creating a misbehavior report. + + // batchRequestProb is calculated as follows: + // batchRequestBaseProb * (len(batchRequest.BlockIDs) + 1) / synccore.DefaultConfig().MaxSize + // Example 1 (small batch of block IDs) if the batch request is for 10 blocks IDs and batchRequestBaseProb is 0.01, then the probability of + // creating a misbehavior report is: + // batchRequestBaseProb * (10+1) / synccore.DefaultConfig().MaxSize + // = 0.01 * 11 / 64 = 0.00171875 = 0.171875% + // Example 2 (large batch of block IDs) if the batch request is for 1000 block IDs and batchRequestBaseProb is 0.01, then the probability of + // creating a misbehavior report is: + // batchRequestBaseProb * (1000+1) / synccore.DefaultConfig().MaxSize + // = 0.01 * 1001 / 64 = 0.15640625 = 15.640625% + batchRequestProb := e.spamDetectionConfig.batchRequestBaseProb * (float32(len(batchRequest.BlockIDs)) + 1) / float32(synccore.DefaultConfig().MaxSize) + if float32(n) < batchRequestProb*spamProbabilityMultiplier { + // create a misbehavior report + e.log.Debug(). + Hex("origin_id", logging.ID(originID)). + Str(logging.KeyLoad, "true"). + Str("reason", alsp.ResourceIntensiveRequest.String()). + Msgf("for %d block IDs, creating probabilistic ALSP report", len(batchRequest.BlockIDs)) + report, err := alsp.NewMisbehaviorReport(originID, alsp.ResourceIntensiveRequest) + if err != nil { + // failing to create the misbehavior report is unlikely. If an error is encountered while + // creating the misbehavior report it indicates a bug and processing can not proceed. + return fmt.Errorf("failed to create misbehavior report from %x: %w", originID[:], err) + } + // failed probabilistic (load) validation check and should be reported as misbehavior + e.con.ReportMisbehavior(report) + return nil + } + return nil +} + +// TODO: implement spam reporting similar to validateSyncRequestForALSP +func (e *Engine) validateBlockResponseForALSP(channel channels.Channel, id flow.Identifier, blockResponse *flow.BlockResponse) error { + return nil +} + +// validateRangeRequestForALSP checks if a range request should be reported as a misbehavior and sends misbehavior report to ALSP. +// The misbehavior is due to either: +// 1. unambiguous malicious or incorrect behavior (toHeight < fromHeight) OR +// 2. large height in range request. This is more ambiguous to detect as malicious behavior because there is no way to know for sure +// if the sender is sending a large range request height maliciously or not, so we use a probabilistic approach to report the misbehavior. +// +// Args: +// - originID: the sender of the range request +// - rangeRequest: the range request to validate +// Returns: +// - error: If an error is encountered while validating the range request. Error is assumed to be irrecoverable because of internal processes that didn't allow validation to complete. +func (e *Engine) validateRangeRequestForALSP(originID flow.Identifier, rangeRequest *flow.RangeRequest) error { + // Generate a random integer between 0 and spamProbabilityMultiplier (exclusive) + n, err := rand.Uint32n(spamProbabilityMultiplier) + if err != nil { + return fmt.Errorf("failed to generate random number from %x: %w", originID[:], err) + } + + // check if range request is valid + if rangeRequest.ToHeight < rangeRequest.FromHeight { + e.log.Warn(). + Hex("origin_id", logging.ID(originID)). + Str(logging.KeySuspicious, "true"). + Str("reason", alsp.InvalidMessage.String()). + Msgf("received invalid range request from height %d is not less than the to height %d, creating ALSP report", rangeRequest.FromHeight, rangeRequest.ToHeight) + report, err := alsp.NewMisbehaviorReport(originID, alsp.InvalidMessage) + if err != nil { + // failing to create the misbehavior report is unlikely. If an error is encountered while + // creating the misbehavior report it indicates a bug and processing can not proceed. + return fmt.Errorf("failed to create misbehavior report (invalid range request) from %x: %w", originID[:], err) + } + // failed unambiguous validation check and should be reported as misbehavior + e.con.ReportMisbehavior(report) + return nil + } + + // to avoid creating a misbehavior report for every range request received, use a probabilistic approach. + // The higher the range request and base probability, the higher the probability of creating a misbehavior report. + + // rangeRequestProb is calculated as follows: + // rangeRequestBaseProb * ((rangeRequest.ToHeight-rangeRequest.FromHeight) + 1) / synccore.DefaultConfig().MaxSize + // Example 1 (small range) if the range request is for 10 blocks and rangeRequestBaseProb is 0.01, then the probability of + // creating a misbehavior report is: + // rangeRequestBaseProb * (10+1) / synccore.DefaultConfig().MaxSize + // = 0.01 * 11 / 64 = 0.00171875 = 0.171875% + // Example 2 (large range) if the range request is for 1000 blocks and rangeRequestBaseProb is 0.01, then the probability of + // creating a misbehavior report is: + // rangeRequestBaseProb * (1000+1) / synccore.DefaultConfig().MaxSize + // = 0.01 * 1001 / 64 = 0.15640625 = 15.640625% + rangeRequestProb := e.spamDetectionConfig.rangeRequestBaseProb * (float32(rangeRequest.ToHeight-rangeRequest.FromHeight) + 1) / float32(synccore.DefaultConfig().MaxSize) + if float32(n) < rangeRequestProb*spamProbabilityMultiplier { + // create a misbehavior report + e.log.Debug(). + Hex("origin_id", logging.ID(originID)). + Str(logging.KeyLoad, "true"). + Str("reason", alsp.ResourceIntensiveRequest.String()). + Msgf("from height %d to height %d, creating probabilistic ALSP report", rangeRequest.FromHeight, rangeRequest.ToHeight) + report, err := alsp.NewMisbehaviorReport(originID, alsp.ResourceIntensiveRequest) + if err != nil { + // failing to create the misbehavior report is unlikely. If an error is encountered while + // creating the misbehavior report it indicates a bug and processing can not proceed. + return fmt.Errorf("failed to create misbehavior report from %x: %w", originID[:], err) + } + // failed validation check and should be reported as misbehavior + + // failed probabilistic (load) validation check and should be reported as misbehavior + e.con.ReportMisbehavior(report) + return nil + } + + // passed all validation checks with no misbehavior detected + return nil +} + +// validateSyncRequestForALSP checks if a sync request should be reported as a misbehavior and sends misbehavior report to ALSP. +// The misbehavior is ambiguous to detect as malicious behavior because there is no way to know for sure if the sender is sending +// a sync request maliciously or not, so we use a probabilistic approach to report the misbehavior. +// +// Args: +// - originID: the sender of the sync request +// Returns: +// - error: If an error is encountered while validating the sync request. Error is assumed to be irrecoverable because of internal processes that didn't allow validation to complete. +func (e *Engine) validateSyncRequestForALSP(originID flow.Identifier) error { + // Generate a random integer between 0 and spamProbabilityMultiplier (exclusive) + n, err := rand.Uint32n(spamProbabilityMultiplier) + if err != nil { + return fmt.Errorf("failed to generate random number from %x: %w", originID[:], err) + } + + // to avoid creating a misbehavior report for every sync request received, use a probabilistic approach. + // Create a report with a probability of spamDetectionConfig.syncRequestProb + if float32(n) < e.spamDetectionConfig.syncRequestProb*spamProbabilityMultiplier { + + // create misbehavior report + e.log.Debug(). + Hex("origin_id", logging.ID(originID)). + Str(logging.KeyLoad, "true"). + Str("reason", alsp.ResourceIntensiveRequest.String()). + Msg("creating probabilistic ALSP report") + + report, err := alsp.NewMisbehaviorReport(originID, alsp.ResourceIntensiveRequest) + if err != nil { + // failing to create the misbehavior report is unlikely. If an error is encountered while + // creating the misbehavior report it indicates a bug and processing can not proceed. + return fmt.Errorf("failed to create misbehavior report from %x: %w", originID[:], err) + } + e.con.ReportMisbehavior(report) + return nil + } + + // passed all validation checks with no misbehavior detected + return nil +} + +// TODO: implement spam reporting similar to validateSyncRequestForALSP +func (e *Engine) validateSyncResponseForALSP(channel channels.Channel, id flow.Identifier, syncResponse *flow.SyncResponse) error { + return nil +} diff --git a/engine/common/synchronization/engine_spam_test.go b/engine/common/synchronization/engine_spam_test.go new file mode 100644 index 00000000000..91037a3c3ed --- /dev/null +++ b/engine/common/synchronization/engine_spam_test.go @@ -0,0 +1,395 @@ +package synchronization + +import ( + "context" + "fmt" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/model/chainsync" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module/irrecoverable" + "github.com/onflow/flow-go/module/metrics" + "github.com/onflow/flow-go/network/channels" + "github.com/onflow/flow-go/utils/rand" + "github.com/onflow/flow-go/utils/unittest" +) + +// TestLoad_Process_SyncRequest_HigherThanReceiver_OutsideTolerance_AlwaysReportSpam is a load test that ensures that +// a misbehavior report is generated every time when the probability factor is set to 1.0. +// It checks that a sync request that's higher than the receiver's height doesn't trigger a response, even if outside tolerance. +func (ss *SyncSuite) TestLoad_Process_SyncRequest_HigherThanReceiver_OutsideTolerance_AlwaysReportSpam() { + ctx, cancel := irrecoverable.NewMockSignalerContextWithCancel(ss.T(), context.Background()) + ss.e.Start(ctx) + unittest.AssertClosesBefore(ss.T(), ss.e.Ready(), time.Second) + defer cancel() + + load := 1000 + + // reset misbehavior report counter for each subtest + misbehaviorsCounter := 0 + + for i := 0; i < load; i++ { + // generate origin and request message + originID := unittest.IdentifierFixture() + + nonce, err := rand.Uint64() + require.NoError(ss.T(), err, "should generate nonce") + + req := &flow.SyncRequest{ + Nonce: nonce, + Height: 0, + } + + // if request height is higher than local finalized, we should not respond + req.Height = ss.head.Height + 1 + + ss.core.On("HandleHeight", ss.head, req.Height) + ss.core.On("WithinTolerance", ss.head, req.Height).Return(false) + ss.con.AssertNotCalled(ss.T(), "Unicast", mock.Anything, mock.Anything) + + // maybe function calls that might or might not occur over the course of the load test + ss.core.On("ScanPending", ss.head).Return([]chainsync.Range{}, []chainsync.Batch{}).Maybe() + ss.con.On("Multicast", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(nil).Maybe() + + // count misbehavior reports over the course of a load test + ss.con.On("ReportMisbehavior", mock.Anything).Return(mock.Anything).Run( + func(args mock.Arguments) { + misbehaviorsCounter++ + }, + ) + + // force creating misbehavior report by setting syncRequestProb to 1.0 (i.e. report misbehavior 100% of the time) + ss.e.spamDetectionConfig.syncRequestProb = 1.0 + + ss.metrics.On("MessageReceived", metrics.EngineSynchronization, metrics.MessageSyncRequest).Once() + require.NoError(ss.T(), ss.e.Process(channels.SyncCommittee, originID, req)) + } + + ss.core.AssertExpectations(ss.T()) + ss.con.AssertExpectations(ss.T()) + ss.metrics.AssertExpectations(ss.T()) + assert.Equal(ss.T(), misbehaviorsCounter, load) // should generate misbehavior report every time +} + +// TestLoad_Process_SyncRequest_HigherThanReceiver_OutsideTolerance_SometimesReportSpam is a load test that ensures that a +// misbehavior report is generated an appropriate range of times when the probability factor is set to different values. +// It checks that a sync request that's higher than the receiver's height doesn't trigger a response, even if +// outside tolerance. +func (ss *SyncSuite) TestLoad_Process_SyncRequest_HigherThanReceiver_OutsideTolerance_SometimesReportSpam() { + ctx, cancel := irrecoverable.NewMockSignalerContextWithCancel(ss.T(), context.Background()) + ss.e.Start(ctx) + unittest.AssertClosesBefore(ss.T(), ss.e.Ready(), time.Second) + defer cancel() + + load := 1000 + + // each load test is a load group that contains a set of factors with unique values to test how many misbehavior reports are generated + // Due to the probabilistic nature of how misbehavior reports are generated, we use an expected lower and + // upper range of expected misbehaviors to determine if the load test passed or failed. As long as the number of misbehavior reports + // falls within the expected range, the load test passes. + type loadGroup struct { + syncRequestProbabilityFactor float32 // probability factor that will be used to generate misbehavior reports + expectedMisbehaviorsLower int // lower range of expected misbehavior reports + expectedMisbehaviorsUpper int // upper range of expected misbehavior reports + } + + loadGroups := []loadGroup{} + + // expect to never get misbehavior report + loadGroups = append(loadGroups, loadGroup{0.0, 0, 0}) + + // expect to get misbehavior report about 0.1% of the time (1 in 1000 requests) + loadGroups = append(loadGroups, loadGroup{0.001, 0, 7}) + + // expect to get misbehavior report about 1% of the time + loadGroups = append(loadGroups, loadGroup{0.01, 5, 15}) + + // expect to get misbehavior report about 10% of the time + loadGroups = append(loadGroups, loadGroup{0.1, 75, 140}) + + // expect to get misbehavior report about 50% of the time + loadGroups = append(loadGroups, loadGroup{0.5, 450, 550}) + + // expect to get misbehavior report about 90% of the time + loadGroups = append(loadGroups, loadGroup{0.9, 850, 950}) + + // reset misbehavior report counter for each subtest + misbehaviorsCounter := 0 + + for _, loadGroup := range loadGroups { + ss.T().Run(fmt.Sprintf("load test; pfactor=%f lower=%d upper=%d", loadGroup.syncRequestProbabilityFactor, loadGroup.expectedMisbehaviorsLower, loadGroup.expectedMisbehaviorsUpper), func(t *testing.T) { + for i := 0; i < load; i++ { + ss.T().Log("load iteration", i) + nonce, err := rand.Uint64() + require.NoError(ss.T(), err, "should generate nonce") + + // generate origin and request message + originID := unittest.IdentifierFixture() + req := &flow.SyncRequest{ + Nonce: nonce, + Height: 0, + } + + // if request height is higher than local finalized, we should not respond + req.Height = ss.head.Height + 1 + + ss.core.On("HandleHeight", ss.head, req.Height) + ss.core.On("WithinTolerance", ss.head, req.Height).Return(false) + ss.con.AssertNotCalled(ss.T(), "Unicast", mock.Anything, mock.Anything) + + // maybe function calls that might or might not occur over the course of the load test + ss.core.On("ScanPending", ss.head).Return([]chainsync.Range{}, []chainsync.Batch{}).Maybe() + ss.con.On("Multicast", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(nil).Maybe() + + // count misbehavior reports over the course of a load test + ss.con.On("ReportMisbehavior", mock.Anything).Return(mock.Anything).Maybe().Run( + func(args mock.Arguments) { + misbehaviorsCounter++ + }, + ) + ss.e.spamDetectionConfig.syncRequestProb = loadGroup.syncRequestProbabilityFactor + ss.metrics.On("MessageSent", metrics.EngineSynchronization, metrics.MessageSyncRequest).Maybe() + ss.metrics.On("MessageReceived", metrics.EngineSynchronization, metrics.MessageSyncRequest).Once() + require.NoError(ss.T(), ss.e.Process(channels.SyncCommittee, originID, req)) + } + + // check function call expectations at the end of the load test; otherwise, load test would take much longer + ss.core.AssertExpectations(ss.T()) + ss.con.AssertExpectations(ss.T()) + ss.metrics.AssertExpectations(ss.T()) + + // check that correct range of misbehavior reports were generated + // since we're using a probabilistic approach to generate misbehavior reports, we can't guarantee the exact number, + // so we check that it's within an expected range + ss.T().Logf("misbehaviors counter after load test: %d (expected lower bound: %d expected upper bound: %d)", misbehaviorsCounter, loadGroup.expectedMisbehaviorsLower, loadGroup.expectedMisbehaviorsUpper) + assert.GreaterOrEqual(ss.T(), misbehaviorsCounter, loadGroup.expectedMisbehaviorsLower) + assert.LessOrEqual(ss.T(), misbehaviorsCounter, loadGroup.expectedMisbehaviorsUpper) + + misbehaviorsCounter = 0 // reset counter for next subtest + }) + } +} + +// TestLoad_Process_RangeRequest_SometimesReportSpam is a load test that ensures that a misbehavior report is generated +// an appropriate range of times when the base probability factor and range are set to different values. +func (ss *SyncSuite) TestLoad_Process_RangeRequest_SometimesReportSpam() { + ctx, cancel := irrecoverable.NewMockSignalerContextWithCancel(ss.T(), context.Background()) + ss.e.Start(ctx) + unittest.AssertClosesBefore(ss.T(), ss.e.Ready(), time.Second) + defer cancel() + + load := 1000 + + // each load test is a load group that contains a set of factors with unique values to test how many misbehavior reports are generated. + // Due to the probabilistic nature of how misbehavior reports are generated, we use an expected lower and + // upper range of expected misbehaviors to determine if the load test passed or failed. As long as the number of misbehavior reports + // falls within the expected range, the load test passes. + type loadGroup struct { + rangeRequestBaseProb float32 // base probability factor that will be used to calculate the final probability factor + expectedMisbehaviorsLower int // lower range of expected misbehavior reports + expectedMisbehaviorsUpper int // upper range of expected misbehavior reports + fromHeight uint64 // from height of the range request + toHeight uint64 // to height of the range request + } + + loadGroups := []loadGroup{} + + // using a very small range (1) with a 10% base probability factor, expect to almost never get misbehavior report, about 0.003% of the time (3 in 1000 requests) + // expected probability factor: 0.1 * ((10-9) + 1)/64 = 0.003125 + loadGroups = append(loadGroups, loadGroup{0.1, 0, 15, 9, 10}) + + // using a small range (10) with a 10% base probability factor, expect to get misbehavior report about 1.7% of the time (17 in 1000 requests) + // expected probability factor: 0.1 * ((11-1) + 1)/64 = 0.0171875 + loadGroups = append(loadGroups, loadGroup{0.1, 5, 31, 1, 11}) + + // using a large range (99) with a 10% base probability factor, expect to get misbehavior report about 15% of the time (150 in 1000 requests) + // expected probability factor: 0.1 * ((100-1) + 1)/64 = 0.15625 + loadGroups = append(loadGroups, loadGroup{0.1, 110, 200, 1, 100}) + + // using a flat range (0) (from height == to height) with a 1% base probability factor, expect to almost never get a misbehavior report, about 0.16% of the time (2 in 1000 requests) + // expected probability factor: 0.01 * ((1-1) + 1)/64 = 0.0015625 + // Note: the expected upper misbehavior count is 5 even though the expected probability is close to 0 to cover outlier cases during the load test to avoid flakiness in CI. + // Due of the probabilistic nature of the load tests, you sometimes get edge cases (that cover outliers where out of a 1000 messages, up to 5 could be reported as spam. + // 5/1000 = 0.005 and the calculated probability is 0.00171875 which 2.9x as small. + loadGroups = append(loadGroups, loadGroup{0.01, 0, 5, 1, 1}) + + // using a small range (10) with a 1% base probability factor, expect to almost never get misbehavior report, about 0.17% of the time (2 in 1000 requests) + // expected probability factor: 0.01 * ((11-1) + 1)/64 = 0.00171875 + loadGroups = append(loadGroups, loadGroup{0.01, 0, 7, 1, 11}) + + // using a very large range (999) with a 1% base probability factor, expect to get misbehavior report about 15% of the time (150 in 1000 requests) + // expected probability factor: 0.01 * ((1000-1) + 1)/64 = 0.15625 + loadGroups = append(loadGroups, loadGroup{0.01, 110, 200, 1, 1000}) + + // ALWAYS REPORT SPAM FOR INVALID RANGE REQUESTS OR RANGE REQUESTS THAT ARE FAR OUTSIDE OF THE TOLERANCE + + // using an inverted range (from height > to height) always results in a misbehavior report, no matter how small the range is or how small the base probability factor is + loadGroups = append(loadGroups, loadGroup{0.001, 1000, 1000, 2, 1}) + + // using a very large range (999) with a 10% base probability factor, expect to get misbehavior report 100% of the time (1000 in 1000 requests) + // expected probability factor: 0.1 * ((1000-1) + 1)/64 = 1.5625 + loadGroups = append(loadGroups, loadGroup{0.1, 1000, 1000, 1, 1000}) + + // reset misbehavior report counter for each subtest + misbehaviorsCounter := 0 + + for _, loadGroup := range loadGroups { + for i := 0; i < load; i++ { + ss.T().Log("load iteration", i) + + nonce, err := rand.Uint64() + require.NoError(ss.T(), err, "should generate nonce") + + // generate origin and request message + originID := unittest.IdentifierFixture() + req := &flow.RangeRequest{ + Nonce: nonce, + FromHeight: loadGroup.fromHeight, + ToHeight: loadGroup.toHeight, + } + + // maybe function calls that might or might not occur over the course of the load test + ss.core.On("ScanPending", ss.head).Return([]chainsync.Range{}, []chainsync.Batch{}).Maybe() + ss.con.On("Multicast", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(nil).Maybe() + + // count misbehavior reports over the course of a load test + ss.con.On("ReportMisbehavior", mock.Anything).Return(mock.Anything).Maybe().Run( + func(args mock.Arguments) { + misbehaviorsCounter++ + }, + ) + ss.e.spamDetectionConfig.rangeRequestBaseProb = loadGroup.rangeRequestBaseProb + ss.metrics.On("MessageReceived", metrics.EngineSynchronization, metrics.MessageRangeRequest).Once() + ss.metrics.On("MessageSent", metrics.EngineSynchronization, metrics.MessageSyncRequest).Maybe() + require.NoError(ss.T(), ss.e.Process(channels.SyncCommittee, originID, req)) + } + // check function call expectations at the end of the load test; otherwise, load test would take much longer + ss.core.AssertExpectations(ss.T()) + ss.con.AssertExpectations(ss.T()) + ss.metrics.AssertExpectations(ss.T()) + + // check that correct range of misbehavior reports were generated + // since we're using a probabilistic approach to generate misbehavior reports, we can't guarantee the exact number, + // so we check that it's within an expected range + ss.T().Logf("misbehaviors counter after load test: %d (expected lower bound: %d expected upper bound: %d)", misbehaviorsCounter, loadGroup.expectedMisbehaviorsLower, loadGroup.expectedMisbehaviorsUpper) + assert.GreaterOrEqual(ss.T(), misbehaviorsCounter, loadGroup.expectedMisbehaviorsLower) + assert.LessOrEqual(ss.T(), misbehaviorsCounter, loadGroup.expectedMisbehaviorsUpper) + + misbehaviorsCounter = 0 // reset counter for next subtest + } +} + +// TestLoad_Process_BatchRequest_SometimesReportSpam is a load test that ensures that a misbehavior report is generated +// an appropriate range of times when the base probability factor and number of block IDs are set to different values. +func (ss *SyncSuite) TestLoad_Process_BatchRequest_SometimesReportSpam() { + ctx, cancel := irrecoverable.NewMockSignalerContextWithCancel(ss.T(), context.Background()) + ss.e.Start(ctx) + unittest.AssertClosesBefore(ss.T(), ss.e.Ready(), time.Second) + defer cancel() + + load := 1000 + + // each load test is a load group that contains a set of factors with unique values to test how many misbehavior reports are generated. + // Due to the probabilistic nature of how misbehavior reports are generated, we use an expected lower and + // upper range of expected misbehaviors to determine if the load test passed or failed. As long as the number of misbehavior reports + // falls within the expected range, the load test passes. + type loadGroup struct { + batchRequestBaseProb float32 + expectedMisbehaviorsLower int + expectedMisbehaviorsUpper int + blockIDs []flow.Identifier + } + + loadGroups := []loadGroup{} + + // using a very small batch request (1 block ID) with a 10% base probability factor, expect to almost never get misbehavior report, about 0.003% of the time (3 in 1000 requests) + // expected probability factor: 0.1 * ((10-9) + 1)/64 = 0.003125 + loadGroups = append(loadGroups, loadGroup{0.1, 0, 15, repeatedBlockIDs(1)}) + + // using a small batch request (10 block IDs) with a 10% base probability factor, expect to get misbehavior report about 1.7% of the time (17 in 1000 requests) + // expected probability factor: 0.1 * ((11-1) + 1)/64 = 0.0171875 + loadGroups = append(loadGroups, loadGroup{0.1, 5, 31, repeatedBlockIDs(10)}) + + // using a large batch request (99 block IDs) with a 10% base probability factor, expect to get misbehavior report about 15% of the time (150 in 1000 requests) + // expected probability factor: 0.1 * ((100-1) + 1)/64 = 0.15625 + loadGroups = append(loadGroups, loadGroup{0.1, 110, 200, repeatedBlockIDs(99)}) + + // using a small batch request (10 block IDs) with a 1% base probability factor, expect to almost never get misbehavior report, about 0.17% of the time (2 in 1000 requests) + // expected probability factor: 0.01 * ((11-1) + 1)/64 = 0.00171875 + loadGroups = append(loadGroups, loadGroup{0.01, 0, 7, repeatedBlockIDs(10)}) + + // using a very large batch request (999 block IDs) with a 1% base probability factor, expect to get misbehavior report about 15% of the time (150 in 1000 requests) + // expected probability factor: 0.01 * ((1000-1) + 1)/64 = 0.15625 + loadGroups = append(loadGroups, loadGroup{0.01, 110, 200, repeatedBlockIDs(999)}) + + // ALWAYS REPORT SPAM FOR INVALID BATCH REQUESTS OR BATCH REQUESTS THAT ARE FAR OUTSIDE OF THE TOLERANCE + + // using an empty batch request (0 block IDs) always results in a misbehavior report, no matter how small the base probability factor is + loadGroups = append(loadGroups, loadGroup{0.001, 1000, 1000, []flow.Identifier{}}) + + // using a very large batch request (999 block IDs) with a 10% base probability factor, expect to get misbehavior report 100% of the time (1000 in 1000 requests) + // expected probability factor: 0.1 * ((999 + 1)/64 = 1.5625 + loadGroups = append(loadGroups, loadGroup{0.1, 1000, 1000, repeatedBlockIDs(999)}) + + // reset misbehavior report counter for each subtest + misbehaviorsCounter := 0 + for _, loadGroup := range loadGroups { + for i := 0; i < load; i++ { + ss.T().Log("load iteration", i) + + nonce, err := rand.Uint64() + require.NoError(ss.T(), err, "should generate nonce") + + // generate origin and request message + originID := unittest.IdentifierFixture() + req := &flow.BatchRequest{ + Nonce: nonce, + BlockIDs: loadGroup.blockIDs, + } + + // maybe function calls that might or might not occur over the course of the load test + ss.core.On("ScanPending", ss.head).Return([]chainsync.Range{}, []chainsync.Batch{}).Maybe() + ss.con.On("Multicast", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(nil).Maybe() + + // count misbehavior reports over the course of a load test + ss.con.On("ReportMisbehavior", mock.Anything).Return(mock.Anything).Maybe().Run( + func(args mock.Arguments) { + misbehaviorsCounter++ + }, + ) + ss.e.spamDetectionConfig.batchRequestBaseProb = loadGroup.batchRequestBaseProb + ss.metrics.On("MessageSent", metrics.EngineSynchronization, metrics.MessageSyncRequest).Maybe() + ss.metrics.On("MessageReceived", metrics.EngineSynchronization, metrics.MessageBatchRequest).Once() + + require.NoError(ss.T(), ss.e.Process(channels.SyncCommittee, originID, req)) + } + // check function call expectations at the end of the load test; otherwise, load test would take much longer + ss.core.AssertExpectations(ss.T()) + ss.con.AssertExpectations(ss.T()) + ss.metrics.AssertExpectations(ss.T()) + + // check that correct range of misbehavior reports were generated + // since we're using a probabilistic approach to generate misbehavior reports, we can't guarantee the exact number, + // so we check that it's within an expected range + ss.T().Logf("misbehaviors counter after load test: %d (expected lower bound: %d expected upper bound: %d)", misbehaviorsCounter, loadGroup.expectedMisbehaviorsLower, loadGroup.expectedMisbehaviorsUpper) + assert.GreaterOrEqual(ss.T(), misbehaviorsCounter, loadGroup.expectedMisbehaviorsLower) + assert.LessOrEqual(ss.T(), misbehaviorsCounter, loadGroup.expectedMisbehaviorsUpper) + + misbehaviorsCounter = 0 // reset counter for next subtest + } +} + +func repeatedBlockIDs(n int) []flow.Identifier { + blockID := unittest.BlockFixture().ID() + + arr := make([]flow.Identifier, n) + for i := 0; i < n; i++ { + arr[i] = blockID + } + return arr +} diff --git a/engine/common/synchronization/engine_suite_test.go b/engine/common/synchronization/engine_suite_test.go new file mode 100644 index 00000000000..c0e496fcd73 --- /dev/null +++ b/engine/common/synchronization/engine_suite_test.go @@ -0,0 +1,167 @@ +package synchronization + +import ( + "io" + "testing" + + "github.com/rs/zerolog" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" + + mockconsensus "github.com/onflow/flow-go/engine/consensus/mock" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/model/flow/filter" + "github.com/onflow/flow-go/module/id" + module "github.com/onflow/flow-go/module/mock" + netint "github.com/onflow/flow-go/network" + "github.com/onflow/flow-go/network/channels" + mocknetwork "github.com/onflow/flow-go/network/mock" + "github.com/onflow/flow-go/network/p2p/cache" + protocolint "github.com/onflow/flow-go/state/protocol" + protocolEvents "github.com/onflow/flow-go/state/protocol/events" + protocol "github.com/onflow/flow-go/state/protocol/mock" + storerr "github.com/onflow/flow-go/storage" + storage "github.com/onflow/flow-go/storage/mock" + "github.com/onflow/flow-go/utils/unittest" +) + +func TestSyncEngine(t *testing.T) { + suite.Run(t, new(SyncSuite)) +} + +type SyncSuite struct { + suite.Suite + myID flow.Identifier + participants flow.IdentityList + head *flow.Header + heights map[uint64]*flow.Proposal + blockIDs map[flow.Identifier]*flow.Proposal + net *mocknetwork.EngineRegistry + con *mocknetwork.Conduit + me *module.Local + state *protocol.State + snapshot *protocol.Snapshot + blocks *storage.Blocks + comp *mockconsensus.Compliance + core *module.SyncCore + metrics *module.EngineMetrics + e *Engine +} + +func (ss *SyncSuite) SetupTest() { + // generate own ID + ss.participants = unittest.IdentityListFixture(3, unittest.WithRole(flow.RoleConsensus)) + keys := unittest.NetworkingKeys(len(ss.participants)) + + for i, p := range ss.participants { + p.NetworkPubKey = keys[i].PublicKey() + } + ss.myID = ss.participants[0].NodeID + + // generate a header for the final state + header := unittest.BlockHeaderFixture() + ss.head = header + + // create maps to enable block returns + ss.heights = make(map[uint64]*flow.Proposal) + ss.blockIDs = make(map[flow.Identifier]*flow.Proposal) + + // set up the network module mock + ss.net = &mocknetwork.EngineRegistry{} + ss.net.On("Register", mock.Anything, mock.Anything).Return( + func(channel channels.Channel, engine netint.MessageProcessor) netint.Conduit { + return ss.con + }, + nil, + ) + + // set up the network conduit mock + ss.con = &mocknetwork.Conduit{} + + // set up the local module mock + ss.me = &module.Local{} + ss.me.On("NodeID").Return( + func() flow.Identifier { + return ss.myID + }, + ) + + // set up the protocol state mock + ss.state = &protocol.State{} + ss.state.On("Final").Return( + func() protocolint.Snapshot { + return ss.snapshot + }, + ) + ss.state.On("AtBlockID", mock.Anything).Return( + func(blockID flow.Identifier) protocolint.Snapshot { + if ss.head.ID() == blockID { + return ss.snapshot + } else { + return unittest.StateSnapshotForUnknownBlock() + } + }, + ).Maybe() + + // set up the snapshot mock + ss.snapshot = &protocol.Snapshot{} + ss.snapshot.On("Head").Return( + func() *flow.Header { + return ss.head + }, + nil, + ) + ss.snapshot.On("Identities", mock.Anything).Return( + func(selector flow.IdentityFilter[flow.Identity]) flow.IdentityList { + return ss.participants.Filter(selector) + }, + nil, + ) + + // set up blocks storage mock + ss.blocks = &storage.Blocks{} + ss.blocks.On("ProposalByHeight", mock.Anything).Return( + func(height uint64) (*flow.Proposal, error) { + block, enabled := ss.heights[height] + if !enabled { + return nil, storerr.ErrNotFound + } + return block, nil + }) + ss.blocks.On("ProposalByID", mock.Anything).Return( + func(blockID flow.Identifier) (*flow.Proposal, error) { + block, enabled := ss.blockIDs[blockID] + if !enabled { + return nil, storerr.ErrNotFound + } + return block, nil + }) + + // set up compliance engine mock + ss.comp = mockconsensus.NewCompliance(ss.T()) + ss.comp.On("Process", mock.Anything, mock.Anything, mock.Anything).Return(nil).Maybe() + + // set up sync core + ss.core = &module.SyncCore{} + + // initialize the engine + log := zerolog.New(io.Discard) + ss.metrics = new(module.EngineMetrics) + + idCache, err := cache.NewProtocolStateIDCache(log, ss.state, protocolEvents.NewDistributor()) + require.NoError(ss.T(), err, "could not create protocol state identity cache") + spamConfig, err := NewSpamDetectionConfig() + require.NoError(ss.T(), err, "could not create spam detection config") + e, err := New(log, ss.metrics, ss.net, ss.me, ss.state, ss.blocks, ss.comp, ss.core, + id.NewIdentityFilterIdentifierProvider( + filter.And( + filter.HasRole[flow.Identity](flow.RoleConsensus), + filter.Not(filter.HasNodeID[flow.Identity](ss.me.NodeID())), + ), + idCache, + ), + spamConfig) + require.NoError(ss.T(), err, "should pass engine initialization") + ss.e = e +} diff --git a/engine/common/synchronization/engine_test.go b/engine/common/synchronization/engine_test.go index e2eebd2aac4..2f5c399c6f8 100644 --- a/engine/common/synchronization/engine_test.go +++ b/engine/common/synchronization/engine_test.go @@ -1,212 +1,83 @@ package synchronization import ( - "io" + "context" "math" - "math/rand" "testing" "time" - "github.com/rs/zerolog" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" - "github.com/stretchr/testify/suite" - "github.com/onflow/flow-go/engine" - mockconsensus "github.com/onflow/flow-go/engine/consensus/mock" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/model/flow/filter" "github.com/onflow/flow-go/model/messages" synccore "github.com/onflow/flow-go/module/chainsync" - "github.com/onflow/flow-go/module/id" + "github.com/onflow/flow-go/module/irrecoverable" "github.com/onflow/flow-go/module/metrics" - module "github.com/onflow/flow-go/module/mock" netint "github.com/onflow/flow-go/network" "github.com/onflow/flow-go/network/channels" - "github.com/onflow/flow-go/network/mocknetwork" - "github.com/onflow/flow-go/network/p2p/cache" - protocolint "github.com/onflow/flow-go/state/protocol" - protocolEvents "github.com/onflow/flow-go/state/protocol/events" - protocol "github.com/onflow/flow-go/state/protocol/mock" - storerr "github.com/onflow/flow-go/storage" - storage "github.com/onflow/flow-go/storage/mock" + "github.com/onflow/flow-go/utils/rand" "github.com/onflow/flow-go/utils/unittest" ) -func TestSyncEngine(t *testing.T) { - suite.Run(t, new(SyncSuite)) -} - -type SyncSuite struct { - suite.Suite - myID flow.Identifier - participants flow.IdentityList - head *flow.Header - heights map[uint64]*flow.Block - blockIDs map[flow.Identifier]*flow.Block - net *mocknetwork.Network - con *mocknetwork.Conduit - me *module.Local - state *protocol.State - snapshot *protocol.Snapshot - blocks *storage.Blocks - comp *mockconsensus.Compliance - core *module.SyncCore - e *Engine -} - -func (ss *SyncSuite) SetupTest() { - // seed the RNG - rand.Seed(time.Now().UnixNano()) - - // generate own ID - ss.participants = unittest.IdentityListFixture(3, unittest.WithRole(flow.RoleConsensus)) - keys := unittest.NetworkingKeys(len(ss.participants)) - - for i, p := range ss.participants { - p.NetworkPubKey = keys[i].PublicKey() - } - ss.myID = ss.participants[0].NodeID - - // generate a header for the final state - header := unittest.BlockHeaderFixture() - ss.head = header - - // create maps to enable block returns - ss.heights = make(map[uint64]*flow.Block) - ss.blockIDs = make(map[flow.Identifier]*flow.Block) - - // set up the network module mock - ss.net = &mocknetwork.Network{} - ss.net.On("Register", mock.Anything, mock.Anything).Return( - func(channel channels.Channel, engine netint.MessageProcessor) netint.Conduit { - return ss.con - }, - nil, - ) - - // set up the network conduit mock - ss.con = &mocknetwork.Conduit{} - - // set up the local module mock - ss.me = &module.Local{} - ss.me.On("NodeID").Return( - func() flow.Identifier { - return ss.myID - }, - ) - - // set up the protocol state mock - ss.state = &protocol.State{} - ss.state.On("Final").Return( - func() protocolint.Snapshot { - return ss.snapshot - }, - ) - ss.state.On("AtBlockID", mock.Anything).Return( - func(blockID flow.Identifier) protocolint.Snapshot { - if ss.head.ID() == blockID { - return ss.snapshot - } else { - return unittest.StateSnapshotForUnknownBlock() - } - }, - ).Maybe() - - // set up the snapshot mock - ss.snapshot = &protocol.Snapshot{} - ss.snapshot.On("Head").Return( - func() *flow.Header { - return ss.head - }, - nil, - ) - ss.snapshot.On("Identities", mock.Anything).Return( - func(selector flow.IdentityFilter) flow.IdentityList { - return ss.participants.Filter(selector) - }, - nil, - ) - - // set up blocks storage mock - ss.blocks = &storage.Blocks{} - ss.blocks.On("ByHeight", mock.Anything).Return( - func(height uint64) *flow.Block { - return ss.heights[height] - }, - func(height uint64) error { - _, enabled := ss.heights[height] - if !enabled { - return storerr.ErrNotFound - } - return nil - }, - ) - ss.blocks.On("ByID", mock.Anything).Return( - func(blockID flow.Identifier) *flow.Block { - return ss.blockIDs[blockID] - }, - func(blockID flow.Identifier) error { - _, enabled := ss.blockIDs[blockID] - if !enabled { - return storerr.ErrNotFound - } - return nil - }, - ) - - // set up compliance engine mock - ss.comp = mockconsensus.NewCompliance(ss.T()) - ss.comp.On("Process", mock.Anything, mock.Anything, mock.Anything).Return(nil).Maybe() - - // set up sync core - ss.core = &module.SyncCore{} - - // initialize the engine - log := zerolog.New(io.Discard) - metrics := metrics.NewNoopCollector() - - idCache, err := cache.NewProtocolStateIDCache(log, ss.state, protocolEvents.NewDistributor()) - require.NoError(ss.T(), err, "could not create protocol state identity cache") - e, err := New(log, metrics, ss.net, ss.me, ss.state, ss.blocks, ss.comp, ss.core, - id.NewIdentityFilterIdentifierProvider( - filter.And( - filter.HasRole(flow.RoleConsensus), - filter.Not(filter.HasNodeID(ss.me.NodeID())), - ), - idCache, - )) - require.NoError(ss.T(), err, "should pass engine initialization") - - ss.e = e -} - -func (ss *SyncSuite) TestOnSyncRequest() { - +// TestOnSyncRequest_LowerThanReceiver_WithinTolerance tests that a sync request that's within tolerance of the receiver doesn't trigger +// a response, even if request height is lower than receiver. +func (ss *SyncSuite) TestOnSyncRequest_LowerThanReceiver_WithinTolerance() { + nonce, err := rand.Uint64() + require.NoError(ss.T(), err, "should generate nonce") // generate origin and request message originID := unittest.IdentifierFixture() - req := &messages.SyncRequest{ - Nonce: rand.Uint64(), + req := &flow.SyncRequest{ + Nonce: nonce, Height: 0, } // regardless of request height, if within tolerance, we should not respond ss.core.On("HandleHeight", ss.head, req.Height) ss.core.On("WithinTolerance", ss.head, req.Height).Return(true) - err := ss.e.requestHandler.onSyncRequest(originID, req) - ss.Assert().NoError(err, "same height sync request should pass") + ss.Assert().NoError(ss.e.requestHandler.onSyncRequest(originID, req)) ss.con.AssertNotCalled(ss.T(), "Unicast", mock.Anything, mock.Anything) + ss.core.AssertExpectations(ss.T()) +} + +// TestOnSyncRequest_HigherThanReceiver_OutsideTolerance tests that a sync request that's higher +// than the receiver's height doesn't trigger a response, even if outside tolerance. +func (ss *SyncSuite) TestOnSyncRequest_HigherThanReceiver_OutsideTolerance() { + nonce, err := rand.Uint64() + require.NoError(ss.T(), err, "should generate nonce") + // generate origin and request message + originID := unittest.IdentifierFixture() + req := &flow.SyncRequest{ + Nonce: nonce, + Height: 0, + } // if request height is higher than local finalized, we should not respond req.Height = ss.head.Height + 1 + ss.core.On("HandleHeight", ss.head, req.Height) ss.core.On("WithinTolerance", ss.head, req.Height).Return(false) - err = ss.e.requestHandler.onSyncRequest(originID, req) - ss.Assert().NoError(err, "same height sync request should pass") + ss.Assert().NoError(ss.e.requestHandler.onSyncRequest(originID, req)) ss.con.AssertNotCalled(ss.T(), "Unicast", mock.Anything, mock.Anything) + ss.core.AssertExpectations(ss.T()) +} + +// TestOnSyncRequest_LowerThanReceiver_OutsideTolerance tests that a sync request that's outside tolerance and +// lower than the receiver's height triggers a response. +func (ss *SyncSuite) TestOnSyncRequest_LowerThanReceiver_OutsideTolerance() { + nonce, err := rand.Uint64() + require.NoError(ss.T(), err, "should generate nonce") + + // generate origin and request message + originID := unittest.IdentifierFixture() + req := &flow.SyncRequest{ + Nonce: nonce, + Height: 0, + } - // if the request height is lower than head and outside tolerance, we should submit correct response + // if the request height is lower than head and outside tolerance, we should expect correct response req.Height = ss.head.Height - 1 ss.core.On("HandleHeight", ss.head, req.Height) ss.core.On("WithinTolerance", ss.head, req.Height).Return(false) @@ -219,19 +90,26 @@ func (ss *SyncSuite) TestOnSyncRequest() { assert.Equal(ss.T(), originID, recipientID, "should send response to original sender") }, ) + ss.metrics.On("MessageSent", metrics.EngineSynchronization, metrics.MessageSyncResponse).Once() err = ss.e.requestHandler.onSyncRequest(originID, req) require.NoError(ss.T(), err, "smaller height sync request should pass") ss.core.AssertExpectations(ss.T()) + ss.metrics.AssertExpectations(ss.T()) } func (ss *SyncSuite) TestOnSyncResponse() { + nonce, err := rand.Uint64() + require.NoError(ss.T(), err, "should generate nonce") + + height, err := rand.Uint64() + require.NoError(ss.T(), err, "should generate height") // generate origin ID and response message originID := unittest.IdentifierFixture() - res := &messages.SyncResponse{ - Nonce: rand.Uint64(), - Height: rand.Uint64(), + res := &flow.SyncResponse{ + Nonce: nonce, + Height: height, } // the height should be handled @@ -241,11 +119,13 @@ func (ss *SyncSuite) TestOnSyncResponse() { } func (ss *SyncSuite) TestOnRangeRequest() { + nonce, err := rand.Uint64() + require.NoError(ss.T(), err, "should generate nonce") // generate originID and range request originID := unittest.IdentifierFixture() - req := &messages.RangeRequest{ - Nonce: rand.Uint64(), + req := &flow.RangeRequest{ + Nonce: nonce, FromHeight: 0, ToHeight: 0, } @@ -253,9 +133,11 @@ func (ss *SyncSuite) TestOnRangeRequest() { // fill in blocks at heights -1 to -4 from head ref := ss.head.Height for height := ref; height >= ref-4; height-- { - block := unittest.BlockFixture() - block.Header.Height = height - ss.heights[height] = &block + block := unittest.BlockFixture( + unittest.Block.WithHeight(height), + ) + ss.heights[height] = unittest.ProposalFromBlock(block) + ss.blockIDs[block.ID()] = ss.heights[height] } // empty range should be a no-op @@ -264,7 +146,7 @@ func (ss *SyncSuite) TestOnRangeRequest() { req.ToHeight = ref - 1 err := ss.e.requestHandler.onRangeRequest(originID, req) require.NoError(ss.T(), err, "empty range request should pass") - ss.con.AssertNumberOfCalls(ss.T(), "Unicast", 0) + ss.con.AssertNotCalled(ss.T(), "Unicast", mock.Anything, mock.Anything) }) // range with only unknown block should be a no-op @@ -273,7 +155,7 @@ func (ss *SyncSuite) TestOnRangeRequest() { req.ToHeight = ref + 3 err := ss.e.requestHandler.onRangeRequest(originID, req) require.NoError(ss.T(), err, "unknown range request should pass") - ss.con.AssertNumberOfCalls(ss.T(), "Unicast", 0) + ss.con.AssertNotCalled(ss.T(), "Unicast", mock.Anything, mock.Anything) }) // a request for same from and to should send single block @@ -283,16 +165,25 @@ func (ss *SyncSuite) TestOnRangeRequest() { ss.con.On("Unicast", mock.Anything, mock.Anything).Return(nil).Once().Run( func(args mock.Arguments) { res := args.Get(0).(*messages.BlockResponse) - expected := ss.heights[ref-1] - actual := res.Blocks[0].ToInternal() - assert.Equal(ss.T(), expected, actual, "response should contain right block") + expected := *ss.heights[ref-1] + internal, err := res.ToInternal() + require.NoError(t, err) + actual, ok := internal.(*flow.BlockResponse) + require.True(t, ok) + assert.Equal(ss.T(), expected, actual.Blocks[0], "response should contain right block") assert.Equal(ss.T(), req.Nonce, res.Nonce, "response should contain request nonce") recipientID := args.Get(1).(flow.Identifier) assert.Equal(ss.T(), originID, recipientID, "should send response to original requester") }, ) + ss.metrics.On("MessageSent", metrics.EngineSynchronization, metrics.MessageBlockResponse).Once() err := ss.e.requestHandler.onRangeRequest(originID, req) require.NoError(ss.T(), err, "range request with higher to height should pass") + ss.con.AssertNumberOfCalls(ss.T(), "Unicast", 1) + ss.metrics.AssertExpectations(ss.T()) + + // clear any expectations for next test - otherwise, next subtest will fail due to increment of expected calls to Unicast + ss.con.Mock = mock.Mock{} }) // a request for a range that we partially have should send partial response @@ -302,15 +193,25 @@ func (ss *SyncSuite) TestOnRangeRequest() { ss.con.On("Unicast", mock.Anything, mock.Anything).Return(nil).Once().Run( func(args mock.Arguments) { res := args.Get(0).(*messages.BlockResponse) - expected := []*flow.Block{ss.heights[ref-2], ss.heights[ref-1], ss.heights[ref]} - assert.ElementsMatch(ss.T(), expected, res.BlocksInternal(), "response should contain right blocks") + expected := []flow.Proposal{*ss.heights[ref-2], *ss.heights[ref-1], *ss.heights[ref]} + internal, err := res.ToInternal() + require.NoError(t, err) + actual, ok := internal.(*flow.BlockResponse) + require.True(t, ok) + assert.ElementsMatch(ss.T(), expected, actual.Blocks, "response should contain right blocks") assert.Equal(ss.T(), req.Nonce, res.Nonce, "response should contain request nonce") recipientID := args.Get(1).(flow.Identifier) assert.Equal(ss.T(), originID, recipientID, "should send response to original requester") }, ) + ss.metrics.On("MessageSent", metrics.EngineSynchronization, metrics.MessageBlockResponse).Once() err := ss.e.requestHandler.onRangeRequest(originID, req) require.NoError(ss.T(), err, "valid range with missing blocks should fail") + ss.con.AssertNumberOfCalls(ss.T(), "Unicast", 1) + ss.metrics.AssertExpectations(ss.T()) + + // clear any expectations for next test - otherwise, next subtest will fail due to increment of expected calls to Unicast + ss.con.Mock = mock.Mock{} }) // a request for a range we entirely have should send all blocks @@ -320,15 +221,25 @@ func (ss *SyncSuite) TestOnRangeRequest() { ss.con.On("Unicast", mock.Anything, mock.Anything).Return(nil).Once().Run( func(args mock.Arguments) { res := args.Get(0).(*messages.BlockResponse) - expected := []*flow.Block{ss.heights[ref-2], ss.heights[ref-1], ss.heights[ref]} - assert.ElementsMatch(ss.T(), expected, res.BlocksInternal(), "response should contain right blocks") + expected := []flow.Proposal{*ss.heights[ref-2], *ss.heights[ref-1], *ss.heights[ref]} + internal, err := res.ToInternal() + require.NoError(t, err) + actual, ok := internal.(*flow.BlockResponse) + require.True(t, ok) + assert.ElementsMatch(ss.T(), expected, actual.Blocks, "response should contain right blocks") assert.Equal(ss.T(), req.Nonce, res.Nonce, "response should contain request nonce") recipientID := args.Get(1).(flow.Identifier) assert.Equal(ss.T(), originID, recipientID, "should send response to original requester") }, ) + ss.metrics.On("MessageSent", metrics.EngineSynchronization, metrics.MessageBlockResponse).Once() err := ss.e.requestHandler.onRangeRequest(originID, req) require.NoError(ss.T(), err, "valid range request should pass") + ss.con.AssertNumberOfCalls(ss.T(), "Unicast", 1) + ss.metrics.AssertExpectations(ss.T()) + + // clear any expectations for next test - otherwise, next subtest will fail due to increment of expected calls to Unicast + ss.con.Mock = mock.Mock{} }) // a request for a range larger than MaxSize should be clamped @@ -338,8 +249,12 @@ func (ss *SyncSuite) TestOnRangeRequest() { ss.con.On("Unicast", mock.Anything, mock.Anything).Return(nil).Once().Run( func(args mock.Arguments) { res := args.Get(0).(*messages.BlockResponse) - expected := []*flow.Block{ss.heights[ref-4], ss.heights[ref-3], ss.heights[ref-2]} - assert.ElementsMatch(ss.T(), expected, res.BlocksInternal(), "response should contain right blocks") + expected := []flow.Proposal{*ss.heights[ref-4], *ss.heights[ref-3], *ss.heights[ref-2]} + internal, err := res.ToInternal() + require.NoError(t, err) + actual, ok := internal.(*flow.BlockResponse) + require.True(t, ok) + assert.ElementsMatch(ss.T(), expected, actual.Blocks, "response should contain right blocks") assert.Equal(ss.T(), req.Nonce, res.Nonce, "response should contain request nonce") recipientID := args.Get(1).(flow.Identifier) assert.Equal(ss.T(), originID, recipientID, "should send response to original requester") @@ -353,17 +268,25 @@ func (ss *SyncSuite) TestOnRangeRequest() { ss.e.requestHandler.core, err = synccore.New(ss.e.log, config, metrics.NewNoopCollector(), flow.Localnet) require.NoError(ss.T(), err) + ss.metrics.On("MessageSent", metrics.EngineSynchronization, metrics.MessageBlockResponse).Once() err = ss.e.requestHandler.onRangeRequest(originID, req) require.NoError(ss.T(), err, "valid range request exceeding max size should still pass") + ss.con.AssertNumberOfCalls(ss.T(), "Unicast", 1) + ss.metrics.AssertExpectations(ss.T()) + + // clear any expectations for next test - otherwise, next subtest will fail due to increment of expected calls to Unicast + ss.con.Mock = mock.Mock{} }) } func (ss *SyncSuite) TestOnBatchRequest() { + nonce, err := rand.Uint64() + require.NoError(ss.T(), err, "should generate nonce") // generate origin ID and batch request originID := unittest.IdentifierFixture() - req := &messages.BatchRequest{ - Nonce: rand.Uint64(), + req := &flow.BatchRequest{ + Nonce: nonce, BlockIDs: nil, } @@ -385,38 +308,53 @@ func (ss *SyncSuite) TestOnBatchRequest() { // a non-empty request for existing block IDs should send right response ss.T().Run("request for existing blocks", func(t *testing.T) { - block := unittest.BlockFixture() - block.Header.Height = ss.head.Height - 1 + block := unittest.BlockFixture( + unittest.Block.WithHeight(ss.head.Height - 1), + ) + proposal := unittest.ProposalFromBlock(block) req.BlockIDs = []flow.Identifier{block.ID()} - ss.blockIDs[block.ID()] = &block + ss.blockIDs[block.ID()] = proposal ss.con.On("Unicast", mock.Anything, mock.Anything).Return(nil).Run( func(args mock.Arguments) { res := args.Get(0).(*messages.BlockResponse) - assert.Equal(ss.T(), &block, res.Blocks[0].ToInternal(), "response should contain right block") + internal, err := res.ToInternal() + require.NoError(t, err) + actual, ok := internal.(*flow.BlockResponse) + require.True(t, ok) + assert.Equal(ss.T(), proposal, &actual.Blocks[0], "response should contain right block") assert.Equal(ss.T(), req.Nonce, res.Nonce, "response should contain request nonce") recipientID := args.Get(1).(flow.Identifier) assert.Equal(ss.T(), originID, recipientID, "response should be send to original requester") }, ).Once() + ss.metrics.On("MessageSent", metrics.EngineSynchronization, metrics.MessageBlockResponse).Once() err := ss.e.requestHandler.onBatchRequest(originID, req) require.NoError(ss.T(), err, "should pass request with valid block") + + ss.metrics.AssertExpectations(ss.T()) }) // a request for too many blocks should be clamped ss.T().Run("oversized range", func(t *testing.T) { // setup request for 5 blocks. response should contain the first 2 (MaxSize) - ss.blockIDs = make(map[flow.Identifier]*flow.Block) + ss.blockIDs = make(map[flow.Identifier]*flow.Proposal) req.BlockIDs = make([]flow.Identifier, 5) for i := 0; i < len(req.BlockIDs); i++ { - b := unittest.BlockFixture() - b.Header.Height = ss.head.Height - uint64(i) + b := unittest.BlockFixture( + unittest.Block.WithHeight(ss.head.Height - uint64(i)), + ) req.BlockIDs[i] = b.ID() - ss.blockIDs[b.ID()] = &b + ss.blockIDs[b.ID()] = unittest.ProposalFromBlock(b) } ss.con.On("Unicast", mock.Anything, mock.Anything).Return(nil).Run( func(args mock.Arguments) { res := args.Get(0).(*messages.BlockResponse) - assert.ElementsMatch(ss.T(), []*flow.Block{ss.blockIDs[req.BlockIDs[0]], ss.blockIDs[req.BlockIDs[1]]}, res.BlocksInternal(), "response should contain right block") + expected := []flow.Proposal{*ss.blockIDs[req.BlockIDs[0]], *ss.blockIDs[req.BlockIDs[1]]} + internal, err := res.ToInternal() + require.NoError(t, err) + actual, ok := internal.(*flow.BlockResponse) + require.True(t, ok) + assert.ElementsMatch(ss.T(), expected, actual.Blocks, "response should contain right block") assert.Equal(ss.T(), req.Nonce, res.Nonce, "response should contain request nonce") recipientID := args.Get(1).(flow.Identifier) assert.Equal(ss.T(), originID, recipientID, "response should be send to original requester") @@ -429,59 +367,57 @@ func (ss *SyncSuite) TestOnBatchRequest() { config.MaxSize = 2 ss.e.requestHandler.core, err = synccore.New(ss.e.log, config, metrics.NewNoopCollector(), flow.Localnet) require.NoError(ss.T(), err) + ss.metrics.On("MessageSent", metrics.EngineSynchronization, metrics.MessageBlockResponse).Once() err = ss.e.requestHandler.onBatchRequest(originID, req) require.NoError(ss.T(), err, "valid batch request exceeding max size should still pass") + + ss.metrics.AssertExpectations(ss.T()) }) } -func (ss *SyncSuite) TestOnBlockResponse() { - +func (ss *SyncSuite) TestOnValidBlockResponse() { // generate origin and block response originID := unittest.IdentifierFixture() - res := &messages.BlockResponse{ - Nonce: rand.Uint64(), - Blocks: []messages.UntrustedBlock{}, - } // add one block that should be processed - processable := unittest.BlockFixture() - ss.core.On("HandleBlock", processable.Header).Return(true) - res.Blocks = append(res.Blocks, messages.UntrustedBlockFromInternal(&processable)) + response := unittest.BlockResponseFixture(2) + processable := response.Blocks[0] + ss.core.On("HandleBlock", processable.Block.ToHeader()).Return(true) // add one block that should not be processed - unprocessable := unittest.BlockFixture() - ss.core.On("HandleBlock", unprocessable.Header).Return(false) - res.Blocks = append(res.Blocks, messages.UntrustedBlockFromInternal(&unprocessable)) + unprocessable := response.Blocks[1] + ss.core.On("HandleBlock", unprocessable.Block.ToHeader()).Return(false) ss.comp.On("OnSyncedBlocks", mock.Anything).Run(func(args mock.Arguments) { - res := args.Get(0).(flow.Slashable[[]*messages.BlockProposal]) - converted := res.Message[0].Block.ToInternal() - ss.Assert().Equal(processable.Header, converted.Header) - ss.Assert().Equal(processable.Payload, converted.Payload) + res := args.Get(0).(flow.Slashable[[]*flow.Proposal]) + actual := res.Message[0] + ss.Assert().Equal(processable.Block.HeaderBody, actual.Block.HeaderBody) + ss.Assert().Equal(processable.Block.Payload, actual.Block.Payload) ss.Assert().Equal(originID, res.OriginID) }) - ss.e.onBlockResponse(originID, res) + ss.e.onBlockResponse(originID, response) ss.core.AssertExpectations(ss.T()) } func (ss *SyncSuite) TestPollHeight() { // check that we send to three nodes from our total list - others := ss.participants.Filter(filter.HasNodeID(ss.participants[1:].NodeIDs()...)) + others := ss.participants.Filter(filter.HasNodeID[flow.Identity](ss.participants[1:].NodeIDs()...)) ss.con.On("Multicast", mock.Anything, synccore.DefaultPollNodes, others[0].NodeID, others[1].NodeID).Return(nil).Run( func(args mock.Arguments) { req := args.Get(0).(*messages.SyncRequest) require.Equal(ss.T(), ss.head.Height, req.Height, "request should contain finalized height") }, ) + ss.metrics.On("MessageSent", metrics.EngineSynchronization, metrics.MessageSyncRequest).Once() ss.e.pollHeight() ss.con.AssertExpectations(ss.T()) + ss.metrics.AssertExpectations(ss.T()) } func (ss *SyncSuite) TestSendRequests() { - ranges := unittest.RangeListFixture(1) batches := unittest.BatchListFixture(1) @@ -503,37 +439,48 @@ func (ss *SyncSuite) TestSendRequests() { }, ) ss.core.On("BatchRequested", batches[0]) + ss.metrics.On("MessageSent", metrics.EngineSynchronization, metrics.MessageBatchRequest).Once() + ss.metrics.On("MessageSent", metrics.EngineSynchronization, metrics.MessageRangeRequest).Once() // exclude my node ID ss.e.sendRequests(ss.participants[1:].NodeIDs(), ranges, batches) ss.con.AssertExpectations(ss.T()) + ss.metrics.AssertExpectations(ss.T()) } // test a synchronization engine can be started and stopped func (ss *SyncSuite) TestStartStop() { - unittest.AssertReturnsBefore(ss.T(), func() { - <-ss.e.Ready() - <-ss.e.Done() - }, time.Second) + ctx, cancel := irrecoverable.NewMockSignalerContextWithCancel(ss.T(), context.Background()) + ss.e.Start(ctx) + unittest.AssertClosesBefore(ss.T(), ss.e.Ready(), time.Second) + cancel() + unittest.AssertClosesBefore(ss.T(), ss.e.Done(), time.Second) } // TestProcessingMultipleItems tests that items are processed in async way func (ss *SyncSuite) TestProcessingMultipleItems() { - <-ss.e.Ready() + ctx, cancel := irrecoverable.NewMockSignalerContextWithCancel(ss.T(), context.Background()) + ss.e.Start(ctx) + unittest.AssertClosesBefore(ss.T(), ss.e.Ready(), time.Second) + defer cancel() originID := unittest.IdentifierFixture() for i := 0; i < 5; i++ { - msg := &messages.SyncResponse{ + msg := &flow.SyncResponse{ Nonce: uint64(i), Height: uint64(1000 + i), } ss.core.On("HandleHeight", mock.Anything, msg.Height).Once() + ss.metrics.On("MessageSent", metrics.EngineSynchronization, metrics.MessageSyncResponse).Once() + ss.metrics.On("MessageHandled", metrics.EngineSynchronization, metrics.MessageSyncResponse).Once() + ss.metrics.On("MessageReceived", metrics.EngineSynchronization, metrics.MessageSyncResponse).Once() + require.NoError(ss.T(), ss.e.Process(channels.SyncCommittee, originID, msg)) } finalHeight := ss.head.Height for i := 0; i < 5; i++ { - msg := &messages.SyncRequest{ + msg := &flow.SyncRequest{ Nonce: uint64(i), Height: finalHeight - 100, } @@ -542,6 +489,10 @@ func (ss *SyncSuite) TestProcessingMultipleItems() { ss.core.On("WithinTolerance", mock.Anything, mock.Anything).Return(false) ss.core.On("HandleHeight", mock.Anything, msg.Height).Once() ss.con.On("Unicast", mock.Anything, mock.Anything).Return(nil) + ss.metrics.On("MessageReceived", metrics.EngineSynchronization, metrics.MessageSyncRequest).Once() + + // misbehavior might or might not be reported + ss.con.On("ReportMisbehavior", mock.Anything).Return(mock.Anything).Maybe() require.NoError(ss.T(), ss.e.Process(channels.SyncCommittee, originID, msg)) } @@ -550,6 +501,7 @@ func (ss *SyncSuite) TestProcessingMultipleItems() { time.Sleep(time.Millisecond * 100) ss.core.AssertExpectations(ss.T()) + ss.metrics.AssertExpectations(ss.T()) } // TestProcessUnsupportedMessageType tests that Process and ProcessLocal correctly handle a case where invalid message type @@ -562,9 +514,4 @@ func (ss *SyncSuite) TestProcessUnsupportedMessageType() { // shouldn't result in error since byzantine inputs are expected require.NoError(ss.T(), err) } - - // in case of local processing error cannot be consumed since all inputs are trusted - err := ss.e.ProcessLocal(invalidEvent) - require.Error(ss.T(), err) - require.True(ss.T(), engine.IsIncompatibleInputTypeError(err)) } diff --git a/engine/common/synchronization/request_handler.go b/engine/common/synchronization/request_handler.go index 4aa5beba465..75367e0d616 100644 --- a/engine/common/synchronization/request_handler.go +++ b/engine/common/synchronization/request_handler.go @@ -1,6 +1,7 @@ package synchronization import ( + "context" "errors" "fmt" @@ -11,10 +12,11 @@ import ( "github.com/onflow/flow-go/model/messages" "github.com/onflow/flow-go/module" "github.com/onflow/flow-go/module/chainsync" - "github.com/onflow/flow-go/module/lifecycle" + "github.com/onflow/flow-go/module/component" + "github.com/onflow/flow-go/module/events" + "github.com/onflow/flow-go/module/irrecoverable" "github.com/onflow/flow-go/module/metrics" "github.com/onflow/flow-go/network/channels" - "github.com/onflow/flow-go/state/protocol" "github.com/onflow/flow-go/storage" "github.com/onflow/flow-go/utils/logging" ) @@ -31,18 +33,26 @@ const defaultBatchRequestQueueCapacity = 500 // defaultEngineRequestsWorkers number of workers to dispatch events for requests const defaultEngineRequestsWorkers = 8 +// RequestHandler encapsulates message queues and processing logic for the sync engine. +// It logically separates request processing from active participation (sending requests), +// primarily to simplify nodes which bridge the public and private networks. +// +// The RequestHandlerEngine embeds RequestHandler to create an engine which only responds +// to requests on the public network (does not send requests over this network). +// The Engine embeds RequestHandler and additionally includes logic for sending sync requests. +// +// Although the RequestHandler defines a notifier, message queue, and processing worker logic, +// it is not itself a component.Component and does not manage any worker threads. The containing +// engine is responsible for starting the worker threads for processing requests. type RequestHandler struct { - lm *lifecycle.LifecycleManager - unit *engine.Unit - me module.Local log zerolog.Logger metrics module.EngineMetrics - blocks storage.Blocks - state protocol.State - core module.SyncCore - responseSender ResponseSender + blocks storage.Blocks + finalizedHeaderCache module.FinalizedHeaderCache + core module.SyncCore + responseSender ResponseSender pendingSyncRequests engine.MessageStore // message store for *message.SyncRequest pendingBatchRequests engine.MessageStore // message store for *message.BatchRequest @@ -57,22 +67,20 @@ func NewRequestHandler( metrics module.EngineMetrics, responseSender ResponseSender, me module.Local, - state protocol.State, + finalizedHeaderCache *events.FinalizedHeaderCache, blocks storage.Blocks, core module.SyncCore, queueMissingHeights bool, ) *RequestHandler { r := &RequestHandler{ - unit: engine.NewUnit(), - lm: lifecycle.NewLifecycleManager(), - me: me, - log: log.With().Str("engine", "synchronization").Logger(), - metrics: metrics, - state: state, - blocks: blocks, - core: core, - responseSender: responseSender, - queueMissingHeights: queueMissingHeights, + me: me, + log: log.With().Str("engine", "synchronization").Logger(), + metrics: metrics, + finalizedHeaderCache: finalizedHeaderCache, + blocks: blocks, + core: core, + responseSender: responseSender, + queueMissingHeights: queueMissingHeights, } r.setupRequestMessageHandler() @@ -80,10 +88,10 @@ func NewRequestHandler( return r } -// Process processes the given event from the node with the given origin ID in -// a blocking manner. It returns the potential processing error when done. +// Process processes the given event from the node with the given origin ID in a blocking manner. +// No errors are expected during normal operation. func (r *RequestHandler) Process(channel channels.Channel, originID flow.Identifier, event interface{}) error { - err := r.process(originID, event) + err := r.requestMessageHandler.Process(originID, event) if err != nil { if engine.IsIncompatibleInputTypeError(err) { r.log.Warn().Msgf("%v delivered unsupported message %T through %v", originID, event, channel) @@ -94,14 +102,6 @@ func (r *RequestHandler) Process(channel channels.Channel, originID flow.Identif return nil } -// process processes events for the synchronization request handler engine. -// Error returns: -// - IncompatibleInputTypeError if input has unexpected type -// - All other errors are potential symptoms of internal state corruption or bugs (fatal). -func (r *RequestHandler) process(originID flow.Identifier, event interface{}) error { - return r.requestMessageHandler.Process(originID, event) -} - // setupRequestMessageHandler initializes the inbound queues and the MessageHandler for UNTRUSTED requests. func (r *RequestHandler) setupRequestMessageHandler() { // RequestHeap deduplicates requests by keeping only one sync request for each requester. @@ -115,7 +115,7 @@ func (r *RequestHandler) setupRequestMessageHandler() { engine.NewNotifier(), engine.Pattern{ Match: func(msg *engine.Message) bool { - _, ok := msg.Payload.(*messages.SyncRequest) + _, ok := msg.Payload.(*flow.SyncRequest) if ok { r.metrics.MessageReceived(metrics.EngineSynchronization, metrics.MessageSyncRequest) } @@ -125,7 +125,7 @@ func (r *RequestHandler) setupRequestMessageHandler() { }, engine.Pattern{ Match: func(msg *engine.Message) bool { - _, ok := msg.Payload.(*messages.RangeRequest) + _, ok := msg.Payload.(*flow.RangeRequest) if ok { r.metrics.MessageReceived(metrics.EngineSynchronization, metrics.MessageRangeRequest) } @@ -135,7 +135,7 @@ func (r *RequestHandler) setupRequestMessageHandler() { }, engine.Pattern{ Match: func(msg *engine.Message) bool { - _, ok := msg.Payload.(*messages.BatchRequest) + _, ok := msg.Payload.(*flow.BatchRequest) if ok { r.metrics.MessageReceived(metrics.EngineSynchronization, metrics.MessageBatchRequest) } @@ -150,35 +150,32 @@ func (r *RequestHandler) setupRequestMessageHandler() { // inform the other node of it, so they can organize their block downloads. If // we have a lower height, we add the difference to our own download queue. // No errors are expected during normal operation. -func (r *RequestHandler) onSyncRequest(originID flow.Identifier, req *messages.SyncRequest) error { - final, err := r.state.Final().Head() - if err != nil { - return fmt.Errorf("could not get finalized header: %w", err) - } +func (r *RequestHandler) onSyncRequest(originID flow.Identifier, req *flow.SyncRequest) error { + finalizedHeader := r.finalizedHeaderCache.Get() logger := r.log.With().Str("origin_id", originID.String()).Logger() logger.Debug(). Uint64("origin_height", req.Height). - Uint64("local_height", final.Height). + Uint64("local_height", finalizedHeader.Height). Msg("received new sync request") if r.queueMissingHeights { // queue any missing heights as needed - r.core.HandleHeight(final, req.Height) + r.core.HandleHeight(finalizedHeader, req.Height) } // don't bother sending a response if we're within tolerance or if we're // behind the requester - if r.core.WithinTolerance(final, req.Height) || req.Height > final.Height { + if r.core.WithinTolerance(finalizedHeader, req.Height) || req.Height > finalizedHeader.Height { return nil } // if we're sufficiently ahead of the requester, send a response res := &messages.SyncResponse{ - Height: final.Height, + Height: finalizedHeader.Height, Nonce: req.Nonce, } - err = r.responseSender.SendResponse(res, originID) + err := r.responseSender.SendResponse(res, originID) if err != nil { logger.Warn().Err(err).Msg("sending sync response failed") return nil @@ -190,18 +187,15 @@ func (r *RequestHandler) onSyncRequest(originID flow.Identifier, req *messages.S // onRangeRequest processes a request for a range of blocks by height. // No errors are expected during normal operation. -func (r *RequestHandler) onRangeRequest(originID flow.Identifier, req *messages.RangeRequest) error { +func (r *RequestHandler) onRangeRequest(originID flow.Identifier, req *flow.RangeRequest) error { logger := r.log.With().Str("origin_id", originID.String()).Logger() logger.Debug().Msg("received new range request") // get the latest final state to know if we can fulfill the request - head, err := r.state.Final().Head() - if err != nil { - return fmt.Errorf("could not get finalized header: %w", err) - } + finalizedHeader := r.finalizedHeaderCache.Get() // if we don't have anything to send, we can bail right away - if head.Height < req.FromHeight || req.FromHeight > req.ToHeight { + if finalizedHeader.Height < req.FromHeight || req.FromHeight > req.ToHeight { return nil } @@ -227,9 +221,13 @@ func (r *RequestHandler) onRangeRequest(originID flow.Identifier, req *messages. } // get all the blocks, one by one - blocks := make([]messages.UntrustedBlock, 0, req.ToHeight-req.FromHeight+1) + // We currently require all blocks in the block response to be sent with a valid proposer signature. + // Consensus Followers theoretically only need the last block to have a valid proposer signature, + // as the other blocks can be verified via included QCs. Though, for now we keep it simple and just + // uniformly use proposals, so all nodes (consensus participants and followers) maintain the same data. + blocks := make([]flow.UntrustedProposal, 0, req.ToHeight-req.FromHeight+1) for height := req.FromHeight; height <= req.ToHeight; height++ { - block, err := r.blocks.ByHeight(height) + proposal, err := r.blocks.ProposalByHeight(height) if errors.Is(err, storage.ErrNotFound) { logger.Error().Uint64("height", height).Msg("skipping unknown heights") break @@ -237,7 +235,7 @@ func (r *RequestHandler) onRangeRequest(originID flow.Identifier, req *messages. if err != nil { return fmt.Errorf("could not get block for height (%d): %w", height, err) } - blocks = append(blocks, messages.UntrustedBlockFromInternal(block)) + blocks = append(blocks, flow.UntrustedProposal(*proposal)) } // if there are no blocks to send, skip network message @@ -251,7 +249,7 @@ func (r *RequestHandler) onRangeRequest(originID flow.Identifier, req *messages. Nonce: req.Nonce, Blocks: blocks, } - err = r.responseSender.SendResponse(res, originID) + err := r.responseSender.SendResponse(res, originID) if err != nil { logger.Warn().Err(err).Msg("sending range response failed") return nil @@ -262,7 +260,7 @@ func (r *RequestHandler) onRangeRequest(originID flow.Identifier, req *messages. } // onBatchRequest processes a request for a specific block by block ID. -func (r *RequestHandler) onBatchRequest(originID flow.Identifier, req *messages.BatchRequest) error { +func (r *RequestHandler) onBatchRequest(originID flow.Identifier, req *flow.BatchRequest) error { logger := r.log.With().Str("origin_id", originID.String()).Logger() logger.Debug().Msg("received new batch request") @@ -299,9 +297,9 @@ func (r *RequestHandler) onBatchRequest(originID flow.Identifier, req *messages. } // try to get all the blocks by ID - blocks := make([]messages.UntrustedBlock, 0, len(blockIDs)) + blocks := make([]flow.UntrustedProposal, 0, len(blockIDs)) for blockID := range blockIDs { - block, err := r.blocks.ByID(blockID) + proposal, err := r.blocks.ProposalByID(blockID) if errors.Is(err, storage.ErrNotFound) { logger.Debug().Hex("block_id", blockID[:]).Msg("skipping unknown block") continue @@ -309,7 +307,7 @@ func (r *RequestHandler) onBatchRequest(originID flow.Identifier, req *messages. if err != nil { return fmt.Errorf("could not get block by ID (%s): %w", blockID, err) } - blocks = append(blocks, messages.UntrustedBlockFromInternal(block)) + blocks = append(blocks, flow.UntrustedProposal(*proposal)) } // if there are no blocks to send, skip network message @@ -334,17 +332,17 @@ func (r *RequestHandler) onBatchRequest(originID flow.Identifier, req *messages. } // processAvailableRequests is processor of pending events which drives events from networking layer to business logic. -func (r *RequestHandler) processAvailableRequests() error { +func (r *RequestHandler) processAvailableRequests(ctx context.Context) error { for { select { - case <-r.unit.Quit(): + case <-ctx.Done(): return nil default: } msg, ok := r.pendingSyncRequests.Get() if ok { - err := r.onSyncRequest(msg.OriginID, msg.Payload.(*messages.SyncRequest)) + err := r.onSyncRequest(msg.OriginID, msg.Payload.(*flow.SyncRequest)) if err != nil { return fmt.Errorf("processing sync request failed: %w", err) } @@ -353,7 +351,7 @@ func (r *RequestHandler) processAvailableRequests() error { msg, ok = r.pendingRangeRequests.Get() if ok { - err := r.onRangeRequest(msg.OriginID, msg.Payload.(*messages.RangeRequest)) + err := r.onRangeRequest(msg.OriginID, msg.Payload.(*flow.RangeRequest)) if err != nil { return fmt.Errorf("processing range request failed: %w", err) } @@ -362,7 +360,7 @@ func (r *RequestHandler) processAvailableRequests() error { msg, ok = r.pendingBatchRequests.Get() if ok { - err := r.onBatchRequest(msg.OriginID, msg.Payload.(*messages.BatchRequest)) + err := r.onBatchRequest(msg.OriginID, msg.Payload.(*flow.BatchRequest)) if err != nil { return fmt.Errorf("processing batch request failed: %w", err) } @@ -375,37 +373,24 @@ func (r *RequestHandler) processAvailableRequests() error { } } -// requestProcessingLoop is a separate goroutine that performs processing of queued requests -func (r *RequestHandler) requestProcessingLoop() { +// requestProcessingWorker is a separate goroutine that performs processing of queued requests. +// Multiple instances may be invoked. It is invoked and managed by the Engine or RequestHandlerEngine +// which embeds this RequestHandler. +func (r *RequestHandler) requestProcessingWorker(ctx irrecoverable.SignalerContext, ready component.ReadyFunc) { + ready() + notifier := r.requestMessageHandler.GetNotifier() + done := ctx.Done() for { select { - case <-r.unit.Quit(): + case <-done: return case <-notifier: - err := r.processAvailableRequests() + err := r.processAvailableRequests(ctx) if err != nil { - r.log.Fatal().Err(err).Msg("internal error processing queued requests") + r.log.Err(err).Msg("internal error processing queued requests") + ctx.Throw(err) } } } } - -// Ready returns a ready channel that is closed once the engine has fully started. -func (r *RequestHandler) Ready() <-chan struct{} { - r.lm.OnStart(func() { - for i := 0; i < defaultEngineRequestsWorkers; i++ { - r.unit.Launch(r.requestProcessingLoop) - } - }) - return r.lm.Started() -} - -// Done returns a done channel that is closed once the engine has fully stopped. -func (r *RequestHandler) Done() <-chan struct{} { - r.lm.OnStop(func() { - // wait for all request processing workers to exit - <-r.unit.Done() - }) - return r.lm.Stopped() -} diff --git a/engine/common/synchronization/request_handler_engine.go b/engine/common/synchronization/request_handler_engine.go index 4a0026a640f..20dfb37a808 100644 --- a/engine/common/synchronization/request_handler_engine.go +++ b/engine/common/synchronization/request_handler_engine.go @@ -5,9 +5,12 @@ import ( "github.com/rs/zerolog" + "github.com/onflow/flow-go/consensus/hotstuff" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/model/messages" "github.com/onflow/flow-go/module" + "github.com/onflow/flow-go/module/component" + "github.com/onflow/flow-go/module/events" "github.com/onflow/flow-go/network" "github.com/onflow/flow-go/network/channels" "github.com/onflow/flow-go/state/protocol" @@ -47,16 +50,25 @@ func NewResponseSender(con network.Conduit) *ResponseSenderImpl { } } +// RequestHandlerEngine is an engine which operates only the request-handling portion of the block sync protocol. +// It is used by Access/Observer nodes attached to the public network, enabling them +// to provide block synchronization data to nodes on the public network, but not +// requesting any data from these nodes. (Requests are sent only on the private network.) type RequestHandlerEngine struct { + component.Component + hotstuff.FinalizationConsumer + requestHandler *RequestHandler } var _ network.MessageProcessor = (*RequestHandlerEngine)(nil) +var _ component.Component = (*RequestHandlerEngine)(nil) +var _ hotstuff.FinalizationConsumer = (*RequestHandlerEngine)(nil) func NewRequestHandlerEngine( logger zerolog.Logger, metrics module.EngineMetrics, - net network.Network, + net network.EngineRegistry, me module.Local, state protocol.State, blocks storage.Blocks, @@ -69,16 +81,26 @@ func NewRequestHandlerEngine( return nil, fmt.Errorf("could not register engine: %w", err) } + finalizedHeaderCache, finalizedCacheWorker, err := events.NewFinalizedHeaderCache(state) + if err != nil { + return nil, fmt.Errorf("could not initialize finalized header cache: %w", err) + } + e.FinalizationConsumer = finalizedHeaderCache e.requestHandler = NewRequestHandler( logger, metrics, NewResponseSender(con), me, - state, + finalizedHeaderCache, blocks, core, false, ) + builder := component.NewComponentManagerBuilder().AddWorker(finalizedCacheWorker) + for i := 0; i < defaultEngineRequestsWorkers; i++ { + builder.AddWorker(e.requestHandler.requestProcessingWorker) + } + e.Component = builder.Build() return e, nil } @@ -86,11 +108,3 @@ func NewRequestHandlerEngine( func (r *RequestHandlerEngine) Process(channel channels.Channel, originID flow.Identifier, event interface{}) error { return r.requestHandler.Process(channel, originID, event) } - -func (r *RequestHandlerEngine) Ready() <-chan struct{} { - return r.requestHandler.Ready() -} - -func (r *RequestHandlerEngine) Done() <-chan struct{} { - return r.requestHandler.Done() -} diff --git a/engine/common/version/version_control.go b/engine/common/version/version_control.go new file mode 100644 index 00000000000..41a1d40c4be --- /dev/null +++ b/engine/common/version/version_control.go @@ -0,0 +1,434 @@ +package version + +import ( + "errors" + "fmt" + "sync" + + "github.com/coreos/go-semver/semver" + "github.com/rs/zerolog" + "go.uber.org/atomic" + + "github.com/onflow/flow-go/engine" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module/component" + "github.com/onflow/flow-go/module/counters" + "github.com/onflow/flow-go/module/irrecoverable" + "github.com/onflow/flow-go/state/protocol" + psEvents "github.com/onflow/flow-go/state/protocol/events" + "github.com/onflow/flow-go/storage" +) + +// ErrOutOfRange indicates that height is higher than last handled block height +var ErrOutOfRange = errors.New("height is out of range") + +// VersionControlConsumer defines a function type that consumes version control updates. +// It is called with the block height and the corresponding semantic version. +// There are two possible notifications options: +// - A new or updated version will have a height and a semantic version at that height. +// - A deleted version will have the previous height and nil semantic version, indicating that the update was deleted. +type VersionControlConsumer func(height uint64, version *semver.Version) + +// NoHeight represents the maximum possible height for blocks. +var NoHeight = uint64(0) + +// defaultCompatibilityOverrides stores the list of version compatibility overrides. +// version beacon events who's Major.Minor.Patch version match an entry in this map will be ignored. +// +// IMPORTANT: only add versions to this list if you are certain that the cadence and fvm changes +// deployed during the HCU are backwards compatible for scripts. +var defaultCompatibilityOverrides = map[string]struct{}{ + "0.37.17": {}, // mainnet, testnet + "0.37.18": {}, // testnet only + "0.37.20": {}, // mainnet, testnet + "0.37.22": {}, // mainnet, testnet + "0.37.26": {}, // mainnet, testnet + "0.38.1": {}, // testnet only + "0.38.2": {}, // mainnet, testnet + "0.38.3": {}, // mainnet, testnet + "0.40.0": {}, // mainnet, testnet + "0.41.0": {}, // mainnet, testnet + "0.41.4": {}, // mainnet, testnet + "0.42.0": {}, // mainnet, testnet + "0.42.1": {}, // mainnet, testnet +} + +// VersionControl manages the version control system for the node. +// It consumes BlockFinalized events and updates the node's version control based on the latest version beacon. +type VersionControl struct { + // Noop implements the protocol.Consumer interface with no operations. + psEvents.Noop + sync.Mutex + component.Component + + log zerolog.Logger + // Storage + versionBeacons storage.VersionBeacons + + // nodeVersion stores the node's current version. + // It could be nil if the node version is not available. + nodeVersion *semver.Version + + // consumers stores the list of consumers for version updates. + consumers []VersionControlConsumer + + // Notifier for new finalized block height + finalizedHeightNotifier engine.Notifier + + finalizedHeight counters.StrictMonotonicCounter + + // lastProcessedHeight the last handled block height + lastProcessedHeight *atomic.Uint64 + + // sealedRootBlockHeight the last sealed block height when node bootstrapped + sealedRootBlockHeight *atomic.Uint64 + + // startHeight and endHeight define the height boundaries for version compatibility. + startHeight *atomic.Uint64 + endHeight *atomic.Uint64 + + // compatibilityOverrides stores the list of version compatibility overrides. + // version beacon events who's Major.Minor.Patch version match an entry in this map will be ignored. + compatibilityOverrides map[string]struct{} + + // overridesLogSuppression stores the list of version compatibility overrides that have been logged. + // this is used to avoid emitting logs during every check when a version is overridden. + overridesLogSuppression map[string]struct{} +} + +var _ protocol.Consumer = (*VersionControl)(nil) +var _ component.Component = (*VersionControl)(nil) + +// NewVersionControl creates a new VersionControl instance. +// +// We currently have no strong guarantee that the node version is a valid semver. +// See build.SemverV2 for more details. That is why nil is a valid input for node version. +func NewVersionControl( + log zerolog.Logger, + versionBeacons storage.VersionBeacons, + nodeVersion *semver.Version, + sealedRootBlockHeight uint64, + latestFinalizedBlockHeight uint64, +) (*VersionControl, error) { + + vc := &VersionControl{ + log: log.With(). + Str("component", "version_control"). + Logger(), + + nodeVersion: nodeVersion, + versionBeacons: versionBeacons, + sealedRootBlockHeight: atomic.NewUint64(sealedRootBlockHeight), + lastProcessedHeight: atomic.NewUint64(latestFinalizedBlockHeight), + finalizedHeight: counters.NewMonotonicCounter(latestFinalizedBlockHeight), + finalizedHeightNotifier: engine.NewNotifier(), + startHeight: atomic.NewUint64(NoHeight), + endHeight: atomic.NewUint64(NoHeight), + compatibilityOverrides: defaultCompatibilityOverrides, + overridesLogSuppression: make(map[string]struct{}), + } + + if vc.nodeVersion == nil { + return nil, fmt.Errorf("version control node version is empty") + } + + vc.log.Info(). + Stringer("node_version", vc.nodeVersion). + Msg("system initialized") + + // Setup component manager for handling worker functions. + cm := component.NewComponentManagerBuilder() + cm.AddWorker(vc.processEvents) + cm.AddWorker(vc.checkInitialVersionBeacon) + + vc.Component = cm.Build() + + return vc, nil +} + +// checkInitialVersionBeacon checks the initial version beacon at the latest finalized block. +// It ensures the component is not ready until the initial version beacon is checked. +func (v *VersionControl) checkInitialVersionBeacon( + ctx irrecoverable.SignalerContext, + ready component.ReadyFunc, +) { + err := v.initBoundaries(ctx) + if err == nil { + ready() + } +} + +// initBoundaries initializes the version boundaries for version control. +// +// It searches through version beacons to find the start and end block heights +// for the current node version. The search continues until the start height +// is found or until the sealed root block height is reached. +// +// Returns an error when could not get the highest version beacon event +func (v *VersionControl) initBoundaries( + ctx irrecoverable.SignalerContext, +) error { + sealedRootBlockHeight := v.sealedRootBlockHeight.Load() + latestHeight := v.lastProcessedHeight.Load() + processedHeight := latestHeight + + for { + vb, err := v.versionBeacons.Highest(processedHeight) + if err != nil && !errors.Is(err, storage.ErrNotFound) { + ctx.Throw(fmt.Errorf("failed to get highest version beacon for version control: %w", err)) + return err + } + + if vb == nil { + // no version beacon found + // this is only expected when a node starts up on a network that has never had a version beacon event. + v.log.Info(). + Uint64("height", processedHeight). + Msg("No initial version beacon found") + + return nil + } + + // version boundaries are sorted by blockHeight in ascending order + // the first version greater than the node's is the version transition height + for i := len(vb.VersionBoundaries) - 1; i >= 0; i-- { + boundary := vb.VersionBoundaries[i] + + ver, err := boundary.Semver() + // this should never happen as we already validated the version beacon + // when indexing it + if err != nil || ver == nil { + if err == nil { + err = fmt.Errorf("boundary semantic version is nil") + } + ctx.Throw(fmt.Errorf("failed to parse semver during version control setup: %w", err)) + return err + } + processedHeight = vb.SealHeight - 1 + + if v.isOverridden(ver) { + continue + } + + if ver.Compare(*v.nodeVersion) <= 0 { + v.startHeight.Store(boundary.BlockHeight) + v.log.Info(). + Uint64("startHeight", boundary.BlockHeight). + Msg("Found start block height") + // This is the lowest compatible height for this node version, stop search immediately + return nil + } else { + v.endHeight.Store(boundary.BlockHeight - 1) + v.log.Info(). + Uint64("endHeight", boundary.BlockHeight-1). + Msg("Found end block height") + } + } + + // The search should continue until we find the start height or reach the sealed root block height + if v.startHeight.Load() == NoHeight && processedHeight <= sealedRootBlockHeight { + v.log.Info(). + Uint64("processedHeight", processedHeight). + Uint64("sealedRootBlockHeight", sealedRootBlockHeight). + Msg("No start version beacon event found") + return nil + } + } +} + +// BlockFinalized is called when a block is finalized. +// It implements the protocol.Consumer interface. +func (v *VersionControl) BlockFinalized(h *flow.Header) { + if v.finalizedHeight.Set(h.Height) { + v.finalizedHeightNotifier.Notify() + } +} + +// CompatibleAtBlock checks if the node's version is compatible at a given block height. +// It returns true if the node's version is compatible within the specified height range. +// Returns expected errors: +// - ErrOutOfRange if incoming block height is higher that last handled block height +func (v *VersionControl) CompatibleAtBlock(height uint64) (bool, error) { + // Check, if the height smaller than sealed root block height. If so, return an error indicating that the height is unhandled. + sealedRootHeight := v.sealedRootBlockHeight.Load() + if height < sealedRootHeight { + return false, fmt.Errorf("could not check compatibility for height %d: the provided height is smaller than sealed root height %d: %w", height, sealedRootHeight, ErrOutOfRange) + } + + // Check if the height is greater than the last handled block height. If so, return an error indicating that the height is unhandled. + lastProcessedHeight := v.lastProcessedHeight.Load() + if height > lastProcessedHeight { + return false, fmt.Errorf("could not check compatibility for height %d: last handled height is %d: %w", height, lastProcessedHeight, ErrOutOfRange) + } + + startHeight := v.startHeight.Load() + // Check if the start height is set and the height is less than the start height. If so, return false indicating that the height is not compatible. + if startHeight != NoHeight && height < startHeight { + return false, nil + } + + endHeight := v.endHeight.Load() + // Check if the end height is set and the height is greater than the end height. If so, return false indicating that the height is not compatible. + if endHeight != NoHeight && height > endHeight { + return false, nil + } + + // If none of the above conditions are met, the height is compatible. + return true, nil +} + +// AddVersionUpdatesConsumer adds a consumer for version update events. +func (v *VersionControl) AddVersionUpdatesConsumer(consumer VersionControlConsumer) { + v.Lock() + defer v.Unlock() + + v.consumers = append(v.consumers, consumer) +} + +// processEvents is a worker that processes block finalized events. +func (v *VersionControl) processEvents( + ctx irrecoverable.SignalerContext, + ready component.ReadyFunc, +) { + ready() + + for { + select { + case <-ctx.Done(): + return + case <-v.finalizedHeightNotifier.Channel(): + v.blockFinalized(ctx, v.finalizedHeight.Value()) + } + } +} + +// blockFinalized processes a block finalized event and updates the version control state. +func (v *VersionControl) blockFinalized( + ctx irrecoverable.SignalerContext, + newFinalizedHeight uint64, +) { + lastProcessedHeight := v.lastProcessedHeight.Load() + if lastProcessedHeight >= newFinalizedHeight { + // already processed this or a higher version beacon + return + } + + for height := lastProcessedHeight + 1; height <= newFinalizedHeight; height++ { + vb, err := v.versionBeacons.Highest(height) + if err != nil { + v.log.Err(err). + Uint64("height", height). + Msg("Failed to get highest version beacon") + + ctx.Throw(fmt.Errorf("failed to get highest version beacon for version control: %w", err)) + return + } + + v.lastProcessedHeight.Store(height) + + if vb == nil { + // no version beacon found + // this is only expected when a node starts up on a network that has never had a version beacon event. + v.log.Debug(). + Uint64("height", height). + Msg("No version beacon found at height") + continue + } + + previousEndHeight := v.endHeight.Load() + + if previousEndHeight != NoHeight && height > previousEndHeight { + // Stop here since it's outside our compatible range + return + } + + newEndHeight := NoHeight + // version boundaries are sorted by blockHeight in ascending order + for _, boundary := range vb.VersionBoundaries { + ver, err := boundary.Semver() + if err != nil || ver == nil { + if err == nil { + err = fmt.Errorf("boundary semantic version is nil") + } + // this should never happen as we already validated the version beacon + // when indexing it + ctx.Throw(fmt.Errorf("failed to parse semver: %w", err)) + return + } + + if v.isOverridden(ver) { + continue + } + + if ver.Compare(*v.nodeVersion) > 0 { + newEndHeight = boundary.BlockHeight - 1 + + for _, consumer := range v.consumers { + consumer(boundary.BlockHeight, ver) + } + + break + } + } + + v.endHeight.Store(newEndHeight) + + // Check if previous version was deleted. If yes, notify consumers about deletion + if previousEndHeight != NoHeight && newEndHeight == NoHeight { + for _, consumer := range v.consumers { + // Note: notifying for the boundary height, which is end height + 1 + consumer(previousEndHeight+1, nil) + } + } + } +} + +// StartHeight return the first block that the version supports. +// Start height is the sealed root block if there is no start boundary in the current spork. +func (v *VersionControl) StartHeight() uint64 { + startHeight := v.startHeight.Load() + + // in case no start boundary in the current spork + if startHeight == NoHeight { + startHeight = v.sealedRootBlockHeight.Load() + } + + return startHeight +} + +// EndHeight return the last block that the version supports. +// End height is the last processed height if there is no end boundary in the current spork. +func (v *VersionControl) EndHeight() uint64 { + endHeight := v.endHeight.Load() + + // in case no end boundary in the current spork + if endHeight == NoHeight { + endHeight = v.lastProcessedHeight.Load() + } + + return endHeight +} + +// isOverridden checks if the version is overridden by the compatibility overrides and can be ignored. +func (v *VersionControl) isOverridden(ver *semver.Version) bool { + normalizedVersion := semver.Version{ + Major: ver.Major, + Minor: ver.Minor, + Patch: ver.Patch, + }.String() + + if _, ok := v.compatibilityOverrides[normalizedVersion]; !ok { + return false + } + + // only log the suppression once per version + if _, ok := v.overridesLogSuppression[normalizedVersion]; !ok { + v.overridesLogSuppression[normalizedVersion] = struct{}{} + v.log.Info(). + Str("event_version", ver.String()). + Str("override_version", normalizedVersion). + Msg("ignoring version beacon event matching compatibility override") + } + + return true +} diff --git a/engine/common/version/version_control_test.go b/engine/common/version/version_control_test.go new file mode 100644 index 00000000000..4dce52bd8f3 --- /dev/null +++ b/engine/common/version/version_control_test.go @@ -0,0 +1,759 @@ +package version + +import ( + "context" + "fmt" + "math" + "sort" + "testing" + "time" + + "github.com/onflow/flow-go/utils/unittest/mocks" + + "github.com/coreos/go-semver/semver" + "github.com/stretchr/testify/assert" + testifyMock "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module/irrecoverable" + "github.com/onflow/flow-go/storage" + storageMock "github.com/onflow/flow-go/storage/mock" + "github.com/onflow/flow-go/utils/unittest" +) + +// testCaseConfig contains custom tweaks for test cases +type testCaseConfig struct { + name string + nodeVersion string + + versionEvents []*flow.SealedVersionBeacon + overrides map[string]struct{} + expectedStart uint64 + expectedEnd uint64 +} + +// TestVersionControlInitialization tests the initialization process of the VersionControl component +func TestVersionControlInitialization(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + sealedRootBlockHeight := uint64(1000) + latestBlockHeight := sealedRootBlockHeight + 100 + + testCases := []testCaseConfig{ + { + name: "no version beacon found", + nodeVersion: "0.0.1", + versionEvents: []*flow.SealedVersionBeacon{ + VersionBeaconEvent(sealedRootBlockHeight-100, + flow.VersionBoundary{BlockHeight: sealedRootBlockHeight - 50, Version: "0.0.1"}), + }, + expectedStart: sealedRootBlockHeight, + expectedEnd: latestBlockHeight, + }, + { + name: "start version set", + nodeVersion: "0.0.1", + versionEvents: []*flow.SealedVersionBeacon{ + VersionBeaconEvent(sealedRootBlockHeight+10, + flow.VersionBoundary{BlockHeight: sealedRootBlockHeight + 12, Version: "0.0.1"}), + }, + expectedStart: sealedRootBlockHeight + 12, + expectedEnd: latestBlockHeight, + }, + { + name: "correct start version found", + nodeVersion: "0.0.3", + versionEvents: []*flow.SealedVersionBeacon{ + VersionBeaconEvent(sealedRootBlockHeight+2, + flow.VersionBoundary{BlockHeight: sealedRootBlockHeight + 4, Version: "0.0.1"}), + VersionBeaconEvent(sealedRootBlockHeight+5, + flow.VersionBoundary{BlockHeight: sealedRootBlockHeight + 7, Version: "0.0.2"}), + }, + expectedStart: sealedRootBlockHeight + 7, + expectedEnd: latestBlockHeight, + }, + { + name: "end version set", + nodeVersion: "0.0.1", + versionEvents: []*flow.SealedVersionBeacon{ + VersionBeaconEvent(sealedRootBlockHeight-100, + flow.VersionBoundary{BlockHeight: sealedRootBlockHeight - 50, Version: "0.0.1"}), + VersionBeaconEvent(latestBlockHeight-10, + flow.VersionBoundary{BlockHeight: latestBlockHeight - 8, Version: "0.0.3"}), + }, + expectedStart: sealedRootBlockHeight, + expectedEnd: latestBlockHeight - 9, + }, + { + name: "correct end version found", + nodeVersion: "0.0.1", + versionEvents: []*flow.SealedVersionBeacon{ + VersionBeaconEvent(sealedRootBlockHeight-100, + flow.VersionBoundary{BlockHeight: sealedRootBlockHeight - 50, Version: "0.0.1"}), + VersionBeaconEvent(latestBlockHeight-10, + flow.VersionBoundary{BlockHeight: latestBlockHeight - 8, Version: "0.0.3"}), + VersionBeaconEvent(latestBlockHeight-3, + flow.VersionBoundary{BlockHeight: latestBlockHeight - 1, Version: "0.0.4"}), + }, + expectedStart: sealedRootBlockHeight, + expectedEnd: latestBlockHeight - 9, + }, + { + name: "start and end version set", + nodeVersion: "0.0.2", + versionEvents: []*flow.SealedVersionBeacon{ + VersionBeaconEvent(sealedRootBlockHeight+10, + flow.VersionBoundary{BlockHeight: sealedRootBlockHeight + 12, Version: "0.0.1"}), + VersionBeaconEvent(latestBlockHeight-10, + flow.VersionBoundary{BlockHeight: latestBlockHeight - 8, Version: "0.0.3"}), + }, + expectedStart: sealedRootBlockHeight + 12, + expectedEnd: latestBlockHeight - 9, + }, + { + name: "correct start and end version found", + nodeVersion: "0.0.2", + versionEvents: []*flow.SealedVersionBeacon{ + VersionBeaconEvent(sealedRootBlockHeight+2, + flow.VersionBoundary{BlockHeight: sealedRootBlockHeight + 4, Version: "0.0.1"}), + VersionBeaconEvent(sealedRootBlockHeight+10, + flow.VersionBoundary{BlockHeight: sealedRootBlockHeight + 12, Version: "0.0.2"}), + VersionBeaconEvent(latestBlockHeight-10, + flow.VersionBoundary{BlockHeight: latestBlockHeight - 8, Version: "0.0.3"}), + VersionBeaconEvent(latestBlockHeight-3, + flow.VersionBoundary{BlockHeight: latestBlockHeight - 1, Version: "0.0.4"}), + }, + expectedStart: sealedRootBlockHeight + 12, + expectedEnd: latestBlockHeight - 9, + }, + { + name: "node's version is too old for current latest", + nodeVersion: "0.0.1", + versionEvents: []*flow.SealedVersionBeacon{ + // the node's version is too old for the earliest version boundary for the network + VersionBeaconEvent(sealedRootBlockHeight-100, + flow.VersionBoundary{BlockHeight: sealedRootBlockHeight - 50, Version: "0.0.2"}), + }, + expectedStart: math.MaxUint64, + expectedEnd: math.MaxUint64, + }, + { + name: "node's version is too new for current latest", + nodeVersion: "0.0.3", + versionEvents: []*flow.SealedVersionBeacon{ + VersionBeaconEvent(sealedRootBlockHeight-100, + flow.VersionBoundary{BlockHeight: sealedRootBlockHeight - 50, Version: "0.0.2"}), + + // the version boundary that transitions to the node's version applies after the + // latest finalized block, so the node's version is not compatible with any block + VersionBeaconEvent(latestBlockHeight-3, + flow.VersionBoundary{BlockHeight: latestBlockHeight + 1, Version: "0.0.3"}), + VersionBeaconEvent(latestBlockHeight-2, + flow.VersionBoundary{BlockHeight: latestBlockHeight + 2, Version: "0.0.4"}), + }, + expectedStart: math.MaxUint64, + expectedEnd: math.MaxUint64, + }, + { + name: "pre-release versions handled as expected", + nodeVersion: "0.0.1-pre-release.1", + versionEvents: []*flow.SealedVersionBeacon{ + // 0.0.1-pre-release.1 > 0.0.1-pre-release.0 + VersionBeaconEvent(sealedRootBlockHeight+10, + flow.VersionBoundary{BlockHeight: sealedRootBlockHeight + 12, Version: "0.0.1-pre-release.0"}), + // 0.0.1-pre-release.1 < 0.0.1 + VersionBeaconEvent(sealedRootBlockHeight+12, + flow.VersionBoundary{BlockHeight: sealedRootBlockHeight + 14, Version: "0.0.1"}), + }, + expectedStart: sealedRootBlockHeight + 12, + expectedEnd: sealedRootBlockHeight + 13, + }, + { + name: "0.0.0 handled as expected", + nodeVersion: "0.0.0-20230101000000-c0c9f774e40c", + versionEvents: []*flow.SealedVersionBeacon{ + // 0.0.0-20230101000000-c0c9f774e40c > 0.0.0-20220101000000-7b4eea64cf58 + VersionBeaconEvent(sealedRootBlockHeight+10, + flow.VersionBoundary{BlockHeight: sealedRootBlockHeight + 12, Version: "0.0.0-20220101000000-7b4eea64cf58"}), + // 0.0.0-20230101000000-c0c9f774e40c < 0.0.0-20240101000000-6ceb2ff114de + VersionBeaconEvent(sealedRootBlockHeight+12, + flow.VersionBoundary{BlockHeight: sealedRootBlockHeight + 14, Version: "0.0.0-20240101000000-6ceb2ff114de"}), + }, + expectedStart: sealedRootBlockHeight + 12, + expectedEnd: sealedRootBlockHeight + 13, + }, + { + name: "start and end version set, start ignored due to override", + nodeVersion: "0.0.2", + versionEvents: []*flow.SealedVersionBeacon{ + VersionBeaconEvent(sealedRootBlockHeight+10, flow.VersionBoundary{BlockHeight: sealedRootBlockHeight + 12, Version: "0.0.1"}), + VersionBeaconEvent(latestBlockHeight-10, flow.VersionBoundary{BlockHeight: latestBlockHeight - 8, Version: "0.0.3"}), + }, + overrides: map[string]struct{}{"0.0.1": {}}, + expectedStart: sealedRootBlockHeight, + expectedEnd: latestBlockHeight - 9, + }, + { + name: "start and end version set, end ignored due to override", + nodeVersion: "0.0.2", + versionEvents: []*flow.SealedVersionBeacon{ + VersionBeaconEvent(sealedRootBlockHeight+10, flow.VersionBoundary{BlockHeight: sealedRootBlockHeight + 12, Version: "0.0.1"}), + VersionBeaconEvent(latestBlockHeight-10, flow.VersionBoundary{BlockHeight: latestBlockHeight - 8, Version: "0.0.3"}), + }, + overrides: map[string]struct{}{"0.0.3": {}}, + expectedStart: sealedRootBlockHeight + 12, + expectedEnd: latestBlockHeight, + }, + { + name: "start and end version set, middle envent ignored due to override", + nodeVersion: "0.0.2", + versionEvents: []*flow.SealedVersionBeacon{ + VersionBeaconEvent(sealedRootBlockHeight+10, flow.VersionBoundary{BlockHeight: sealedRootBlockHeight + 12, Version: "0.0.1"}), + VersionBeaconEvent(latestBlockHeight-3, flow.VersionBoundary{BlockHeight: latestBlockHeight - 1, Version: "0.0.3"}), + VersionBeaconEvent(latestBlockHeight-10, flow.VersionBoundary{BlockHeight: latestBlockHeight - 8, Version: "0.0.4"}), + }, + overrides: map[string]struct{}{"0.0.3": {}}, + expectedStart: sealedRootBlockHeight + 12, + expectedEnd: latestBlockHeight - 9, + }, + { + name: "pre-release version matches overrides", + nodeVersion: "0.0.2", + versionEvents: []*flow.SealedVersionBeacon{ + VersionBeaconEvent(sealedRootBlockHeight+10, flow.VersionBoundary{BlockHeight: sealedRootBlockHeight + 12, Version: "0.0.1-pre-release.0"}), + VersionBeaconEvent(latestBlockHeight-10, flow.VersionBoundary{BlockHeight: latestBlockHeight - 8, Version: "0.0.3"}), + }, + overrides: map[string]struct{}{"0.0.1": {}}, + expectedStart: sealedRootBlockHeight, + expectedEnd: latestBlockHeight - 9, + }, + } + + for _, testCase := range testCases { + t.Run(testCase.name, func(t *testing.T) { + eventMap := make(map[uint64]*flow.SealedVersionBeacon, len(testCase.versionEvents)) + for _, event := range testCase.versionEvents { + eventMap[event.SealHeight] = event + } + + // make sure events are sorted descending by seal height + sort.Slice(testCase.versionEvents, func(i, j int) bool { + return testCase.versionEvents[i].SealHeight > testCase.versionEvents[j].SealHeight + }) + + versionBeacons := storageMock.NewVersionBeacons(t) + versionBeacons. + On("Highest", testifyMock.AnythingOfType("uint64")). + Return(func(height uint64) (*flow.SealedVersionBeacon, error) { + // iterating through events sorted descending by seal height + // return the first event that was sealed in a height less than or equal to height + for _, event := range testCase.versionEvents { + if event.SealHeight <= height { + return event, nil + } + } + return nil, storage.ErrNotFound + }) + + vc := createVersionControlComponent(t, versionComponentTestConfigs{ + nodeVersion: testCase.nodeVersion, + versionBeacons: versionBeacons, + sealedRootBlockHeight: sealedRootBlockHeight, + latestFinalizedBlockHeight: latestBlockHeight, + overrides: testCase.overrides, + signalerContext: irrecoverable.NewMockSignalerContext(t, ctx), + }) + + checks := generateChecks(testCase, sealedRootBlockHeight, latestBlockHeight) + + for height, expected := range checks { + compatible, err := vc.CompatibleAtBlock(height) + + require.NoError(t, err) + assert.Equal(t, expected, compatible, "unexpected compatibility at height %d. want: %t, got %t", height, expected, compatible) + } + }) + } +} + +// TestVersionControlInitializationWithErrors tests the initialization process of the VersionControl component with error cases +func TestVersionControlInitializationWithErrors(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + sealedRootBlockHeight := uint64(1000) + latestBlockHeight := sealedRootBlockHeight + 100 + eventMap := map[uint64]*flow.SealedVersionBeacon{ + sealedRootBlockHeight + 10: VersionBeaconEvent(sealedRootBlockHeight+10, + flow.VersionBoundary{BlockHeight: sealedRootBlockHeight + 12, Version: "0.0.1"}), + } + + versionBeacons := storageMock.NewVersionBeacons(t) + + checkForError := func(height uint64) { + versionBeacons. + On("Highest", testifyMock.AnythingOfType("uint64")). + Return(mocks.StorageMapGetter(eventMap)).Once() + + vc := createVersionControlComponent(t, versionComponentTestConfigs{ + nodeVersion: "0.0.1", + versionBeacons: versionBeacons, + sealedRootBlockHeight: sealedRootBlockHeight, + latestFinalizedBlockHeight: latestBlockHeight, + signalerContext: irrecoverable.NewMockSignalerContext(t, ctx), + }) + + compatible, err := vc.CompatibleAtBlock(height) + + assert.ErrorIs(t, err, ErrOutOfRange) + assert.False(t, compatible) + } + + t.Run("height is bigger than latest block height", func(t *testing.T) { + checkForError(latestBlockHeight + 1) + }) + + t.Run("height is smaller than sealed root block height", func(t *testing.T) { + checkForError(sealedRootBlockHeight - 1) + }) + + t.Run("failed to complete initialization due to \"Highest\" of version beacon returns error", func(t *testing.T) { + decodeErr := fmt.Errorf("test decode error") + + versionBeacons. + On("Highest", testifyMock.AnythingOfType("uint64")). + Return(nil, decodeErr).Once() + + vc, err := NewVersionControl( + unittest.Logger(), + versionBeacons, + semver.New("0.0.1"), + sealedRootBlockHeight, + latestBlockHeight, + ) + require.NoError(t, err) + + vc.Start(irrecoverable.NewMockSignalerContextExpectError(t, ctx, fmt.Errorf( + "failed to get highest version beacon for version control: %w", + decodeErr))) + + unittest.AssertNotClosesBefore(t, vc.Ready(), 2*time.Second) + }) +} + +func generateChecks(testCase testCaseConfig, finalizedRootBlockHeight, latestBlockHeight uint64) map[uint64]bool { + checks := map[uint64]bool{} + if testCase.expectedStart == math.MaxUint64 && testCase.expectedEnd == math.MaxUint64 { + for height := finalizedRootBlockHeight; height <= latestBlockHeight; height++ { + checks[height] = false + } + return checks + } + + checks[testCase.expectedStart] = true + checks[testCase.expectedEnd] = true + + if testCase.expectedStart > finalizedRootBlockHeight { + checks[finalizedRootBlockHeight] = false + checks[testCase.expectedStart-1] = false + } + + if testCase.expectedEnd < latestBlockHeight { + checks[latestBlockHeight] = false + checks[testCase.expectedEnd+1] = false + } + + return checks +} + +// TestVersionBoundaryReceived tests the behavior of the VersionControl component when a new +// version beacon event is received. +func TestVersionBoundaryReceived(t *testing.T) { + signalCtx := irrecoverable.NewMockSignalerContext(t, context.Background()) + + contract := &versionBeaconContract{} + + // Create version event for initial height + latestHeight := uint64(10) + boundaryHeight := uint64(13) + + vc := createVersionControlComponent(t, versionComponentTestConfigs{ + nodeVersion: "0.0.1", + versionBeacons: contract, + sealedRootBlockHeight: 0, + latestFinalizedBlockHeight: latestHeight, + overrides: map[string]struct{}{"0.0.2": {}}, // skip event at 0.0.2 + signalerContext: signalCtx, + }) + + var assertUpdate func(height uint64, version *semver.Version) + var assertCallbackCalled, assertCallbackNotCalled func() + + // Add a consumer to verify version updates + vc.AddVersionUpdatesConsumer(func(height uint64, version *semver.Version) { + assertUpdate(height, version) + }) + assert.Len(t, vc.consumers, 1) + + // At this point, both start and end heights are unset + + // Add a new boundary, and finalize the block + latestHeight++ // 11 + contract.AddBoundary(latestHeight, flow.VersionBoundary{BlockHeight: boundaryHeight, Version: "0.0.2"}) + + // This event should be skipped due to the override + assertUpdate, assertCallbackNotCalled = generateConsumerIgnoredAssertions(t) + vc.blockFinalized(signalCtx, latestHeight) + assertCallbackNotCalled() + + // Next, add another new boundary and finalize the block + latestHeight++ // 12 + contract.AddBoundary(latestHeight, flow.VersionBoundary{BlockHeight: boundaryHeight, Version: "0.0.3"}) + + assertUpdate, assertCallbackCalled = generateConsumerAssertions(t, boundaryHeight, semver.New("0.0.3")) + vc.blockFinalized(signalCtx, latestHeight) + assertCallbackCalled() + + // Finally, finalize one more block to get past the boundary + latestHeight++ // 13 + vc.blockFinalized(signalCtx, latestHeight) + + // Check compatibility at key heights + compatible, err := vc.CompatibleAtBlock(10) + require.NoError(t, err) + assert.True(t, compatible) + + compatible, err = vc.CompatibleAtBlock(12) + require.NoError(t, err) + assert.True(t, compatible) + + compatible, err = vc.CompatibleAtBlock(13) + require.NoError(t, err) + assert.False(t, compatible) +} + +// TestVersionBoundaryUpdated tests the behavior of the VersionControl component when the version is updated. +func TestVersionBoundaryUpdated(t *testing.T) { + signalCtx := irrecoverable.NewMockSignalerContext(t, context.Background()) + + contract := &versionBeaconContract{} + + // Create version event for initial height + latestHeight := uint64(10) + boundaryHeight := uint64(13) + + vc := createVersionControlComponent(t, versionComponentTestConfigs{ + nodeVersion: "0.0.1", + versionBeacons: contract, + sealedRootBlockHeight: 0, + latestFinalizedBlockHeight: latestHeight, + signalerContext: signalCtx, + }) + + var assertUpdate func(height uint64, version *semver.Version) + var assertCallbackCalled func() + + // Add a consumer to verify version updates + vc.AddVersionUpdatesConsumer(func(height uint64, version *semver.Version) { + assertUpdate(height, version) + }) + assert.Len(t, vc.consumers, 1) + + // At this point, both start and end heights are unset + + // Add a new boundary, and finalize the block + latestHeight++ // 11 + contract.AddBoundary(latestHeight, flow.VersionBoundary{BlockHeight: boundaryHeight, Version: "0.0.2"}) + + assertUpdate, assertCallbackCalled = generateConsumerAssertions(t, boundaryHeight, semver.New("0.0.2")) + vc.blockFinalized(signalCtx, latestHeight) + assertCallbackCalled() + + // Next, update the boundary and finalize the block + latestHeight++ // 12 + contract.UpdateBoundary(latestHeight, boundaryHeight, "0.0.3") + + assertUpdate, assertCallbackCalled = generateConsumerAssertions(t, boundaryHeight, semver.New("0.0.3")) + vc.blockFinalized(signalCtx, latestHeight) + assertCallbackCalled() + + // Finally, finalize one more block to get past the boundary + latestHeight++ // 13 + vc.blockFinalized(signalCtx, latestHeight) + + // Check compatibility at various heights + compatible, err := vc.CompatibleAtBlock(10) + require.NoError(t, err) + assert.True(t, compatible) + + compatible, err = vc.CompatibleAtBlock(12) + require.NoError(t, err) + assert.True(t, compatible) + + compatible, err = vc.CompatibleAtBlock(13) + require.NoError(t, err) + assert.False(t, compatible) +} + +// TestVersionBoundaryDeleted tests the behavior of the VersionControl component when the version is deleted. +func TestVersionBoundaryDeleted(t *testing.T) { + signalCtx := irrecoverable.NewMockSignalerContext(t, context.Background()) + + contract := &versionBeaconContract{} + + // Create version event for initial height + latestHeight := uint64(10) + boundaryHeight := uint64(13) + + vc := createVersionControlComponent(t, versionComponentTestConfigs{ + nodeVersion: "0.0.1", + versionBeacons: contract, + sealedRootBlockHeight: 0, + latestFinalizedBlockHeight: latestHeight, + signalerContext: signalCtx, + }) + + var assertUpdate func(height uint64, version *semver.Version) + var assertCallbackCalled func() + + // Add a consumer to verify version updates + vc.AddVersionUpdatesConsumer(func(height uint64, version *semver.Version) { + assertUpdate(height, version) + }) + assert.Len(t, vc.consumers, 1) + + // Add a new boundary, and finalize the block + latestHeight++ // 11 + contract.AddBoundary(latestHeight, flow.VersionBoundary{BlockHeight: boundaryHeight, Version: "0.0.2"}) + + assertUpdate, assertCallbackCalled = generateConsumerAssertions(t, boundaryHeight, semver.New("0.0.2")) + vc.blockFinalized(signalCtx, latestHeight) + assertCallbackCalled() + + // Next, delete the boundary and finalize the block + latestHeight++ // 12 + contract.DeleteBoundary(latestHeight, boundaryHeight) + + assertUpdate, assertCallbackCalled = generateConsumerAssertions(t, boundaryHeight, nil) // called with empty string signalling deleted + vc.blockFinalized(signalCtx, latestHeight) + assertCallbackCalled() + + // Finally, finalize one more block to get past the boundary + latestHeight++ // 13 + vc.blockFinalized(signalCtx, latestHeight) + + // Check compatibility at various heights + compatible, err := vc.CompatibleAtBlock(10) + require.NoError(t, err) + assert.True(t, compatible) + + compatible, err = vc.CompatibleAtBlock(12) + require.NoError(t, err) + assert.True(t, compatible) + + compatible, err = vc.CompatibleAtBlock(13) + require.NoError(t, err) + assert.True(t, compatible) +} + +// TestNotificationSkippedForCompatibleVersions tests that the VersionControl component does not +// send notifications to consumers VersionBeacon events with compatible versions. +func TestNotificationSkippedForCompatibleVersions(t *testing.T) { + signalCtx := irrecoverable.NewMockSignalerContext(t, context.Background()) + + contract := &versionBeaconContract{} + + // Create version event for initial height + latestHeight := uint64(10) + boundaryHeight := uint64(13) + + vc := createVersionControlComponent(t, versionComponentTestConfigs{ + nodeVersion: "0.0.1", + versionBeacons: contract, + sealedRootBlockHeight: 0, + latestFinalizedBlockHeight: latestHeight, + signalerContext: signalCtx, + }) + + // Add a consumer to verify notification is never sent + vc.AddVersionUpdatesConsumer(func(height uint64, version *semver.Version) { + t.Errorf("unexpected callback called at height %d with version %s", height, version) + }) + assert.Len(t, vc.consumers, 1) + + // Add a new boundary, and finalize the block + latestHeight++ // 11 + contract.AddBoundary(latestHeight, flow.VersionBoundary{BlockHeight: boundaryHeight, Version: "0.0.1-pre-release"}) + + vc.blockFinalized(signalCtx, latestHeight) + + // Check compatibility at various heights + compatible, err := vc.CompatibleAtBlock(10) + require.NoError(t, err) + assert.True(t, compatible) + + compatible, err = vc.CompatibleAtBlock(11) + require.NoError(t, err) + assert.True(t, compatible) +} + +// TestIsOverriden tests the isOverridden method of the VersionControl component correctly matches +// versions +func TestIsOverriden(t *testing.T) { + vc := &VersionControl{ + compatibilityOverrides: map[string]struct{}{"0.0.1": {}}, + overridesLogSuppression: make(map[string]struct{}), + } + + assert.True(t, vc.isOverridden(semver.New("0.0.1"))) + assert.True(t, vc.isOverridden(semver.New("0.0.1-pre-release"))) + + assert.False(t, vc.isOverridden(semver.New("0.0.2"))) + assert.False(t, vc.isOverridden(semver.New("0.0.2-pre-release"))) + + assert.False(t, vc.isOverridden(semver.New("1.0.1"))) + assert.False(t, vc.isOverridden(semver.New("0.1.1"))) +} + +func generateConsumerAssertions( + t *testing.T, + boundaryHeight uint64, + version *semver.Version, +) (func(height uint64, semver *semver.Version), func()) { + called := false + + assertUpdate := func(height uint64, semver *semver.Version) { + assert.Equal(t, boundaryHeight, height) + assert.Equal(t, version, semver) + called = true + } + + assertCalled := func() { + assert.True(t, called) + } + + return assertUpdate, assertCalled +} + +func generateConsumerIgnoredAssertions( + t *testing.T, +) (func(uint64, *semver.Version), func()) { + called := false + + assertUpdate := func(uint64, *semver.Version) { + called = true + } + + assertNotCalled := func() { + assert.False(t, called) + } + + return assertUpdate, assertNotCalled +} + +// versionComponentTestConfigs contains custom tweaks for version control creation +type versionComponentTestConfigs struct { + nodeVersion string + versionBeacons storage.VersionBeacons + sealedRootBlockHeight uint64 + latestFinalizedBlockHeight uint64 + overrides map[string]struct{} + signalerContext *irrecoverable.MockSignalerContext +} + +func createVersionControlComponent( + t *testing.T, + config versionComponentTestConfigs, +) *VersionControl { + // Create a new VersionControl instance with initial parameters. + vc, err := NewVersionControl( + unittest.Logger(), + config.versionBeacons, + semver.New(config.nodeVersion), + config.sealedRootBlockHeight, + config.latestFinalizedBlockHeight, + ) + require.NoError(t, err) + + if config.overrides != nil { + vc.compatibilityOverrides = config.overrides + } + + // Start the VersionControl component. + vc.Start(config.signalerContext) + unittest.RequireComponentsReadyBefore(t, 2*time.Second, vc) + + return vc +} + +// VersionBeaconEvent creates a SealedVersionBeacon for the given heights and versions. +func VersionBeaconEvent(sealHeight uint64, vb ...flow.VersionBoundary) *flow.SealedVersionBeacon { + return &flow.SealedVersionBeacon{ + VersionBeacon: unittest.VersionBeaconFixture( + unittest.WithBoundaries(vb...), + ), + SealHeight: sealHeight, + } +} + +type versionBeaconContract struct { + boundaries []flow.VersionBoundary + events []*flow.SealedVersionBeacon +} + +func (c *versionBeaconContract) Highest(belowOrEqualTo uint64) (*flow.SealedVersionBeacon, error) { + for _, event := range c.events { + if event.SealHeight <= belowOrEqualTo { + return event, nil + } + } + return nil, storage.ErrNotFound +} + +func (c *versionBeaconContract) AddBoundary(sealedHeight uint64, boundary flow.VersionBoundary) { + c.boundaries = append(c.boundaries, boundary) + c.emitEvent(sealedHeight) +} + +func (c *versionBeaconContract) DeleteBoundary(sealedHeight, boundaryHeight uint64) { + for i, boundary := range c.boundaries { + if boundary.BlockHeight == boundaryHeight { + c.boundaries = append(c.boundaries[:i], c.boundaries[i+1:]...) + break + } + } + c.emitEvent(sealedHeight) +} + +func (c *versionBeaconContract) UpdateBoundary(sealedHeight, boundaryHeight uint64, version string) { + for i, boundary := range c.boundaries { + if boundary.BlockHeight == boundaryHeight { + c.boundaries[i].Version = version + break + } + } + c.emitEvent(sealedHeight) +} + +func (c *versionBeaconContract) emitEvent(sealedHeight uint64) { + // sort boundaries ascending by height + sort.Slice(c.boundaries, func(i, j int) bool { + return c.boundaries[i].BlockHeight < c.boundaries[j].BlockHeight + }) + + // include only future boundaries + boundaries := make([]flow.VersionBoundary, 0) + for _, boundary := range c.boundaries { + if boundary.BlockHeight >= sealedHeight { + boundaries = append(boundaries, boundary) + } + } + c.events = append(c.events, VersionBeaconEvent(sealedHeight, boundaries...)) + + // sort boundaries descending by height + sort.Slice(c.events, func(i, j int) bool { + return c.events[i].SealHeight > c.events[j].SealHeight + }) +} diff --git a/engine/common/worker/worker_builder.go b/engine/common/worker/worker_builder.go index cc1c3e7b438..ede7804b665 100644 --- a/engine/common/worker/worker_builder.go +++ b/engine/common/worker/worker_builder.go @@ -10,6 +10,11 @@ import ( "github.com/onflow/flow-go/module/irrecoverable" ) +const ( + QueuedItemProcessingLog = "processing queued work item" + QueuedItemProcessedLog = "finished processing queued work item" +) + // Pool is a worker pool that can be used by a higher-level component to manage a set of workers. // The workers are managed by the higher-level component, but the worker pool provides the logic for // submitting work to the workers and for processing the work. The worker pool is responsible for @@ -126,9 +131,9 @@ func (b *PoolBuilder[T]) workerLogic() component.ComponentWorker { b.logger.Trace().Msg("store is empty, waiting for next notification") break // store is empty; go back to outer for loop } - b.logger.Trace().Msg("processing queued work item") + b.logger.Trace().Msg(QueuedItemProcessingLog) err := processingFunc(msg.Payload.(T)) - b.logger.Trace().Msg("finished processing queued work item") + b.logger.Trace().Msg(QueuedItemProcessedLog) if err != nil { ctx.Throw(fmt.Errorf("unexpected error processing queued work item: %w", err)) return diff --git a/engine/common/worker/worker_builder_test.go b/engine/common/worker/worker_builder_test.go index c08da0769c3..e1a0d95b436 100644 --- a/engine/common/worker/worker_builder_test.go +++ b/engine/common/worker/worker_builder_test.go @@ -14,6 +14,7 @@ import ( "github.com/onflow/flow-go/module/irrecoverable" "github.com/onflow/flow-go/module/mempool/queue" "github.com/onflow/flow-go/module/metrics" + "github.com/onflow/flow-go/utils/concurrentmap" "github.com/onflow/flow-go/utils/unittest" ) @@ -37,7 +38,7 @@ func TestWorkerPool_SingleEvent_SingleWorker(t *testing.T) { cancelCtx, cancel := context.WithCancel(context.Background()) defer cancel() - ctx, _ := irrecoverable.WithSignaler(cancelCtx) + ctx := irrecoverable.NewMockSignalerContext(t, cancelCtx) cm := component.NewComponentManagerBuilder(). AddWorker(pool.WorkerLogic()). Build() @@ -74,7 +75,7 @@ func TestWorkerBuilder_UnhappyPaths(t *testing.T) { cancelCtx, cancel := context.WithCancel(context.Background()) defer cancel() - ctx, _ := irrecoverable.WithSignaler(cancelCtx) + ctx := irrecoverable.NewMockSignalerContext(t, cancelCtx) cm := component.NewComponentManagerBuilder(). AddWorker(pool.WorkerLogic()). Build() @@ -115,7 +116,7 @@ func TestWorkerPool_TwoWorkers_ConcurrentEvents(t *testing.T) { } q := queue.NewHeroStore(uint32(size), unittest.Logger(), metrics.NewNoopCollector()) - distributedEvents := unittest.NewProtectedMap[string, struct{}]() + distributedEvents := concurrentmap.New[string, struct{}]() allEventsDistributed := sync.WaitGroup{} allEventsDistributed.Add(size) @@ -137,7 +138,7 @@ func TestWorkerPool_TwoWorkers_ConcurrentEvents(t *testing.T) { cancelCtx, cancel := context.WithCancel(context.Background()) defer cancel() - ctx, _ := irrecoverable.WithSignaler(cancelCtx) + ctx := irrecoverable.NewMockSignalerContext(t, cancelCtx) cm := component.NewComponentManagerBuilder(). AddWorker(pool.WorkerLogic()). AddWorker(pool.WorkerLogic()). diff --git a/engine/consensus/Sealing_Readme.md b/engine/consensus/Sealing_Readme.md index 27ea0fa7e6e..35b6c1fee42 100644 --- a/engine/consensus/Sealing_Readme.md +++ b/engine/consensus/Sealing_Readme.md @@ -8,7 +8,7 @@ - The sealing logic works with **height** as opposed to view (height is denoted at the bottom of each block) - **Whether or not a block can incorporate an `ExecutionReceipt` or `Seal`** only depends on the fork and **is independent of finality** - ![Forks](/docs/Chain_and_ExecutionResult_trees_A.png) + ![Forks](/docs/images/Chain_and_ExecutionResult_trees_A.png) 2. An `ExecutionResult` is a claim that @@ -25,7 +25,7 @@ Chunks ChunkList } ``` - ![Forks with execution results](/docs/Chain_and_ExecutionResult_trees_B.png) + ![Forks with execution results](/docs/images/Chain_and_ExecutionResult_trees_B.png) Notation: `r[B]` is an execution result for block `B` @@ -36,7 +36,7 @@ * Example: result `r[C]_1` and `r[C]_2` * 💡 insight: **The `ExecutionResults` form a tree.** - ![For a single fork of blocks, the execution results can form a tree](/docs/Chain_and_ExecutionResult_trees_C.png) + ![For a single fork of blocks, the execution results can form a tree](/docs/images/Chain_and_ExecutionResult_trees_C.png) Notation: `r[C]` denotes an execution result for block `C`. If there are multiple results, we denote them as `r[C]_1`, `r[C]_2`, ... @@ -53,14 +53,14 @@ which _both have the same parent_ (referenced by `PreviousResultID`). For example, `Er[r[C]_1]` and `Er[r[C]_2]` could be published by the same Execution Node - ![Blocks with execution results and execution receipts](/docs/Chain_and_ExecutionResult_trees_D.png) + ![Blocks with execution results and execution receipts](/docs/images/Chain_and_ExecutionResult_trees_D.png) Notation: `Er[r]` is an execution receipt vouching for result `r`. For example `Er[r[C]_2]` is the receipt for result `r[C]_2` 5. `ResultApprovals` approve results (*not* receipts). - ![Blocks with execution results and execution receipts and result approvals](/docs/Chain_and_ExecutionResult_trees_E.png) + ![Blocks with execution results and execution receipts and result approvals](/docs/images/Chain_and_ExecutionResult_trees_E.png) # Embedding of Execution results and Receipts into _descending_ blocks @@ -70,7 +70,7 @@ Execution receipts and results are embedded into downstream blocks, to record what results the execution nodes [ENs] committed to and to generate verifier assignment. Let's take a look at the following example: -![Verifier Assignments](/docs/VerifierAssignment.png) +![Verifier Assignments](/docs/images/VerifierAssignment.png) * Execution nodes 'Alice' and 'Bob' have both generated the Execution Result `r[A]_1` for block `A`. The Execution Result contains no information about the node generating it. As long as Execution Nodes generate exactly the same result for a particular block (a.g. block `A`), diff --git a/engine/consensus/approvals/Readme.md b/engine/consensus/approvals/Readme.md index 42425cfc6c2..83044d7b8de 100644 --- a/engine/consensus/approvals/Readme.md +++ b/engine/consensus/approvals/Readme.md @@ -54,7 +54,7 @@ Formally, we define: `c[r[B]] --> c[r_parent]` in the Assignment Collector Tree. * Lastly, for an `AssignmentCollector`, we define a `level` as the height of the executed block. -![Assignment Collector Tree](/docs/AssignmentCollectorTree_1.png) +![Assignment Collector Tree](/docs/images/AssignmentCollectorTree_1.png) ### Orphaning forks in the Assignment Collector Tree @@ -86,7 +86,7 @@ that the parent is not yet present (see following figure for illustration). This it is likely that the result is already sealed by the time it caught up. We permit a simple implementation that is to not keep track of the order in which it received the approvals. -![Assignment Collector Tree](/docs/AssignmentCollectorTree_2.png) +![Assignment Collector Tree](/docs/images/AssignmentCollectorTree_2.png) In the future, we will also add further states for 'Extensive Checking Mode': * `ExtensiveCheckingMode`: the `AssignmentCollector` has not received sufficient approvals and sealing is lagging behind @@ -144,7 +144,7 @@ The decision whether the ER can be emergency sealed is governed by two protocol `DefaultEmergencySealingThresholdForFinalization` and `DefaultEmergencySealingThresholdForVerification`. For an ER to be emergency sealed, all of the following conditions have to be satisfied: -![Emergency sealing](/docs/Emergency_Sealing.png) +![Emergency sealing](/docs/images/Emergency_Sealing.png) 1. Let Δh<sub>1</sub> be the height difference between the latest finalized block and block `A`. We require that Δh<sub>1</sub> > `DefaultEmergencySealingThresholdForFinalization`. diff --git a/engine/consensus/approvals/approval_collector.go b/engine/consensus/approvals/approval_collector.go index 199bad8743a..03447ec91ba 100644 --- a/engine/consensus/approvals/approval_collector.go +++ b/engine/consensus/approvals/approval_collector.go @@ -8,6 +8,7 @@ import ( "github.com/onflow/flow-go/engine" "github.com/onflow/flow-go/model/chunks" "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module/irrecoverable" "github.com/onflow/flow-go/module/mempool" ) @@ -36,8 +37,11 @@ func NewApprovalCollector( ) (*ApprovalCollector, error) { chunkCollectors := make([]*ChunkApprovalCollector, 0, result.Result.Chunks.Len()) for _, chunk := range result.Result.Chunks { - chunkAssignment := assignment.Verifiers(chunk).Lookup() - collector := NewChunkApprovalCollector(chunkAssignment, requiredApprovalsForSealConstruction) + assignedVerifiers, err := assignment.Verifiers(chunk.Index) + if err != nil { + return nil, fmt.Errorf("getting verifiers for chunk %d failed: %w", chunk.Index, err) + } + collector := NewChunkApprovalCollector(assignedVerifiers, requiredApprovalsForSealConstruction) chunkCollectors = append(chunkCollectors, collector) } @@ -97,6 +101,8 @@ func (c *ApprovalCollector) IncorporatedResult() *flow.IncorporatedResult { return c.incorporatedResult } +// SealResult generates and stores the seal into the mempool. +// No errors are expected during normal operation. func (c *ApprovalCollector) SealResult() error { // get final state of execution result finalState, err := c.incorporatedResult.Result.FinalStateCommitment() @@ -108,11 +114,16 @@ func (c *ApprovalCollector) SealResult() error { // TODO: Check SPoCK proofs // generate & store seal - seal := &flow.Seal{ - BlockID: c.incorporatedResult.Result.BlockID, - ResultID: c.incorporatedResult.Result.ID(), - FinalState: finalState, - AggregatedApprovalSigs: c.aggregatedSignatures.Collect(), + seal, err := flow.NewSeal( + flow.UntrustedSeal{ + BlockID: c.incorporatedResult.Result.BlockID, + ResultID: c.incorporatedResult.Result.ID(), + FinalState: finalState, + AggregatedApprovalSigs: c.aggregatedSignatures.Collect(), + }, + ) + if err != nil { + return irrecoverable.NewExceptionf("could not construct seal : %w", err) } // Adding a seal that already exists in the mempool is a NoOp. But to reduce log diff --git a/engine/consensus/approvals/approval_collector_test.go b/engine/consensus/approvals/approval_collector_test.go index e3d035b8a92..4b283559eb8 100644 --- a/engine/consensus/approvals/approval_collector_test.go +++ b/engine/consensus/approvals/approval_collector_test.go @@ -92,7 +92,14 @@ func (s *ApprovalCollectorTestSuite) TestCollectMissingVerifiers() { assignedVerifiers := make(map[uint64]flow.IdentifierList) for _, chunk := range s.Chunks { - assignedVerifiers[chunk.Index] = s.ChunksAssignment.Verifiers(chunk) + verifiers, err := s.ChunksAssignment.Verifiers(chunk.Index) + require.NoError(s.T(), err) + // we need a consistent iteration order later, so convert to slice + v := make([]flow.Identifier, 0, len(verifiers)) + for id := range verifiers { + v = append(v, id) + } + assignedVerifiers[chunk.Index] = v } // no approvals processed diff --git a/engine/consensus/approvals/approvals_lru_cache.go b/engine/consensus/approvals/approvals_lru_cache.go index f4b84d008a1..26fccc715ff 100644 --- a/engine/consensus/approvals/approvals_lru_cache.go +++ b/engine/consensus/approvals/approvals_lru_cache.go @@ -3,7 +3,7 @@ package approvals import ( "sync" - "github.com/hashicorp/golang-lru/simplelru" + "github.com/hashicorp/golang-lru/v2/simplelru" "github.com/onflow/flow-go/model/flow" ) @@ -11,7 +11,7 @@ import ( // LruCache is a wrapper over `simplelru.LRUCache` that provides needed api for processing result approvals // Extends functionality of `simplelru.LRUCache` by introducing additional index for quicker access. type LruCache struct { - lru simplelru.LRUCache + lru simplelru.LRUCache[flow.Identifier, *flow.ResultApproval] lock sync.RWMutex // secondary index by result id, since multiple approvals could // reference same result @@ -21,8 +21,7 @@ type LruCache struct { func NewApprovalsLRUCache(limit uint) *LruCache { byResultID := make(map[flow.Identifier]map[flow.Identifier]struct{}) // callback has to be called while we are holding lock - lru, _ := simplelru.NewLRU(int(limit), func(key interface{}, value interface{}) { - approval := value.(*flow.ResultApproval) + lru, _ := simplelru.NewLRU(int(limit), func(key flow.Identifier, approval *flow.ResultApproval) { delete(byResultID[approval.Body.ExecutionResultID], approval.Body.PartialID()) if len(byResultID[approval.Body.ExecutionResultID]) == 0 { delete(byResultID, approval.Body.ExecutionResultID) @@ -40,7 +39,7 @@ func (c *LruCache) Peek(approvalID flow.Identifier) *flow.ResultApproval { // check if we have it in the cache resource, cached := c.lru.Peek(approvalID) if cached { - return resource.(*flow.ResultApproval) + return resource } return nil @@ -52,7 +51,7 @@ func (c *LruCache) Get(approvalID flow.Identifier) *flow.ResultApproval { // check if we have it in the cache resource, cached := c.lru.Get(approvalID) if cached { - return resource.(*flow.ResultApproval) + return resource } return nil @@ -74,7 +73,7 @@ func (c *LruCache) TakeByResultID(resultID flow.Identifier) []*flow.ResultApprov // no need to cleanup secondary index since it will be // cleaned up in evict callback _ = c.lru.Remove(approvalID) - approvals = append(approvals, resource.(*flow.ResultApproval)) + approvals = append(approvals, resource) } } diff --git a/engine/consensus/approvals/assignment_collector_base.go b/engine/consensus/approvals/assignment_collector_base.go index 58307b6c9f0..55839b5aafc 100644 --- a/engine/consensus/approvals/assignment_collector_base.go +++ b/engine/consensus/approvals/assignment_collector_base.go @@ -4,7 +4,8 @@ import ( "github.com/gammazero/workerpool" "github.com/rs/zerolog" - "github.com/onflow/flow-go/crypto/hash" + "github.com/onflow/crypto/hash" + "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/module" "github.com/onflow/flow-go/module/mempool" diff --git a/engine/consensus/approvals/assignment_collector_statemachine_test.go b/engine/consensus/approvals/assignment_collector_statemachine_test.go index 65ed3210fea..aeb3bd6a3b4 100644 --- a/engine/consensus/approvals/assignment_collector_statemachine_test.go +++ b/engine/consensus/approvals/assignment_collector_statemachine_test.go @@ -1,7 +1,6 @@ package approvals import ( - "errors" "sync" "testing" "time" @@ -129,6 +128,5 @@ func (s *AssignmentCollectorStateMachineTestSuite) TestChangeProcessingStatus_In require.Equal(s.T(), Orphaned, s.collector.ProcessingStatus()) // then try to perform transition from caching to verifying err = s.collector.ChangeProcessingStatus(CachingApprovals, VerifyingApprovals) - require.Error(s.T(), err) - require.True(s.T(), errors.Is(err, ErrDifferentCollectorState)) + require.ErrorIs(s.T(), err, ErrDifferentCollectorState) } diff --git a/engine/consensus/approvals/assignment_collector_tree.go b/engine/consensus/approvals/assignment_collector_tree.go index e161a75faa4..c31282b3e8a 100644 --- a/engine/consensus/approvals/assignment_collector_tree.go +++ b/engine/consensus/approvals/assignment_collector_tree.go @@ -160,11 +160,10 @@ func (t *AssignmentCollectorTree) selectCollectorsForFinalizedFork(startHeight, var fork []*assignmentCollectorVertex for height := startHeight; height <= finalizedHeight; height++ { iter := t.forest.GetVerticesAtLevel(height) - finalizedBlock, err := t.headers.ByHeight(height) + finalizedBlockID, err := t.headers.BlockIDByHeight(height) if err != nil { return nil, fmt.Errorf("could not retrieve finalized block at height %d: %w", height, err) } - finalizedBlockID := finalizedBlock.ID() for iter.HasNext() { vertex := iter.NextVertex().(*assignmentCollectorVertex) if finalizedBlockID == vertex.collector.BlockID() { diff --git a/engine/consensus/approvals/assignment_collector_tree_test.go b/engine/consensus/approvals/assignment_collector_tree_test.go index 7f8f00f82ac..b05a2212682 100644 --- a/engine/consensus/approvals/assignment_collector_tree_test.go +++ b/engine/consensus/approvals/assignment_collector_tree_test.go @@ -100,7 +100,7 @@ func (s *AssignmentCollectorTreeSuite) TestGetSize_ConcurrentAccess() { result0 := unittest.ExecutionResultFixture() receipts := unittest.ReceiptChainFor(chain, result0) for _, block := range chain { - s.Blocks[block.ID()] = block.Header + s.Blocks[block.ID()] = block.ToHeader() } for _, receipt := range receipts { s.prepareMockedCollector(&receipt.ExecutionResult) @@ -147,7 +147,7 @@ func (s *AssignmentCollectorTreeSuite) TestGetCollectorsByInterval() { chain := unittest.ChainFixtureFrom(10, s.ParentBlock) receipts := unittest.ReceiptChainFor(chain, s.IncorporatedResult.Result) for _, block := range chain { - s.Blocks[block.ID()] = block.Header + s.Blocks[block.ID()] = block.ToHeader() } // Process all receipts except first one. This generates a chain of collectors but all of them will be @@ -226,12 +226,12 @@ func (s *AssignmentCollectorTreeSuite) TestGetOrCreateCollector_CollectorParentI // Leveled forest doesn't accept vertexes lower than the lowest height. func (s *AssignmentCollectorTreeSuite) TestGetOrCreateCollector_AddingSealedCollector() { block := unittest.BlockWithParentFixture(s.ParentBlock) - s.Blocks[block.ID()] = block.Header + s.Blocks[block.ID()] = block.ToHeader() result := unittest.ExecutionResultFixture(unittest.WithBlock(block)) s.prepareMockedCollector(result) // generate a few sealed blocks - prevSealedBlock := block.Header + prevSealedBlock := block.ToHeader() for i := 0; i < 5; i++ { sealedBlock := unittest.BlockHeaderWithParentFixture(prevSealedBlock) s.MarkFinalized(sealedBlock) @@ -278,7 +278,7 @@ func (s *AssignmentCollectorTreeSuite) TestFinalizeForkAtLevel_ProcessableAfterS blockID := block.ID() // update caches - s.Blocks[blockID] = block.Header + s.Blocks[blockID] = block.ToHeader() s.IdentitiesCache[blockID] = s.AuthorizedVerifiers IR := unittest.IncorporatedResult.Fixture( @@ -302,7 +302,7 @@ func (s *AssignmentCollectorTreeSuite) TestFinalizeForkAtLevel_ProcessableAfterS } } - finalized := forks[0][0].Header + finalized := forks[0][0].ToHeader() s.MarkFinalized(s.IncorporatedBlock) s.MarkFinalized(finalized) diff --git a/engine/consensus/approvals/chunk_collector_test.go b/engine/consensus/approvals/chunk_collector_test.go index 07177b51829..bb14345b01a 100644 --- a/engine/consensus/approvals/chunk_collector_test.go +++ b/engine/consensus/approvals/chunk_collector_test.go @@ -29,10 +29,9 @@ type ChunkApprovalCollectorTestSuite struct { func (s *ChunkApprovalCollectorTestSuite) SetupTest() { s.BaseApprovalsTestSuite.SetupTest() s.chunk = s.Chunks[0] - s.chunkAssignment = make(map[flow.Identifier]struct{}) - for _, verifier := range s.ChunksAssignment.Verifiers(s.chunk) { - s.chunkAssignment[verifier] = struct{}{} - } + verifiers, err := s.ChunksAssignment.Verifiers(s.chunk.Index) + require.NoError(s.T(), err) + s.chunkAssignment = verifiers s.collector = NewChunkApprovalCollector(s.chunkAssignment, uint(len(s.chunkAssignment))) } diff --git a/engine/consensus/approvals/mock/assignment_collector.go b/engine/consensus/approvals/mock/assignment_collector.go index 40eac99267c..d34c93bd206 100644 --- a/engine/consensus/approvals/mock/assignment_collector.go +++ b/engine/consensus/approvals/mock/assignment_collector.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mock @@ -16,10 +16,14 @@ type AssignmentCollector struct { mock.Mock } -// Block provides a mock function with given fields: +// Block provides a mock function with no fields func (_m *AssignmentCollector) Block() *flow.Header { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Block") + } + var r0 *flow.Header if rf, ok := ret.Get(0).(func() *flow.Header); ok { r0 = rf() @@ -32,10 +36,14 @@ func (_m *AssignmentCollector) Block() *flow.Header { return r0 } -// BlockID provides a mock function with given fields: +// BlockID provides a mock function with no fields func (_m *AssignmentCollector) BlockID() flow.Identifier { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for BlockID") + } + var r0 flow.Identifier if rf, ok := ret.Get(0).(func() flow.Identifier); ok { r0 = rf() @@ -52,6 +60,10 @@ func (_m *AssignmentCollector) BlockID() flow.Identifier { func (_m *AssignmentCollector) ChangeProcessingStatus(expectedValue approvals.ProcessingStatus, newValue approvals.ProcessingStatus) error { ret := _m.Called(expectedValue, newValue) + if len(ret) == 0 { + panic("no return value specified for ChangeProcessingStatus") + } + var r0 error if rf, ok := ret.Get(0).(func(approvals.ProcessingStatus, approvals.ProcessingStatus) error); ok { r0 = rf(expectedValue, newValue) @@ -66,6 +78,10 @@ func (_m *AssignmentCollector) ChangeProcessingStatus(expectedValue approvals.Pr func (_m *AssignmentCollector) CheckEmergencySealing(observer consensus.SealingObservation, finalizedBlockHeight uint64) error { ret := _m.Called(observer, finalizedBlockHeight) + if len(ret) == 0 { + panic("no return value specified for CheckEmergencySealing") + } + var r0 error if rf, ok := ret.Get(0).(func(consensus.SealingObservation, uint64) error); ok { r0 = rf(observer, finalizedBlockHeight) @@ -80,6 +96,10 @@ func (_m *AssignmentCollector) CheckEmergencySealing(observer consensus.SealingO func (_m *AssignmentCollector) ProcessApproval(approval *flow.ResultApproval) error { ret := _m.Called(approval) + if len(ret) == 0 { + panic("no return value specified for ProcessApproval") + } + var r0 error if rf, ok := ret.Get(0).(func(*flow.ResultApproval) error); ok { r0 = rf(approval) @@ -94,6 +114,10 @@ func (_m *AssignmentCollector) ProcessApproval(approval *flow.ResultApproval) er func (_m *AssignmentCollector) ProcessIncorporatedResult(incorporatedResult *flow.IncorporatedResult) error { ret := _m.Called(incorporatedResult) + if len(ret) == 0 { + panic("no return value specified for ProcessIncorporatedResult") + } + var r0 error if rf, ok := ret.Get(0).(func(*flow.IncorporatedResult) error); ok { r0 = rf(incorporatedResult) @@ -104,10 +128,14 @@ func (_m *AssignmentCollector) ProcessIncorporatedResult(incorporatedResult *flo return r0 } -// ProcessingStatus provides a mock function with given fields: +// ProcessingStatus provides a mock function with no fields func (_m *AssignmentCollector) ProcessingStatus() approvals.ProcessingStatus { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for ProcessingStatus") + } + var r0 approvals.ProcessingStatus if rf, ok := ret.Get(0).(func() approvals.ProcessingStatus); ok { r0 = rf() @@ -122,6 +150,10 @@ func (_m *AssignmentCollector) ProcessingStatus() approvals.ProcessingStatus { func (_m *AssignmentCollector) RequestMissingApprovals(observer consensus.SealingObservation, maxHeightForRequesting uint64) (uint, error) { ret := _m.Called(observer, maxHeightForRequesting) + if len(ret) == 0 { + panic("no return value specified for RequestMissingApprovals") + } + var r0 uint var r1 error if rf, ok := ret.Get(0).(func(consensus.SealingObservation, uint64) (uint, error)); ok { @@ -142,10 +174,14 @@ func (_m *AssignmentCollector) RequestMissingApprovals(observer consensus.Sealin return r0, r1 } -// Result provides a mock function with given fields: +// Result provides a mock function with no fields func (_m *AssignmentCollector) Result() *flow.ExecutionResult { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Result") + } + var r0 *flow.ExecutionResult if rf, ok := ret.Get(0).(func() *flow.ExecutionResult); ok { r0 = rf() @@ -158,10 +194,14 @@ func (_m *AssignmentCollector) Result() *flow.ExecutionResult { return r0 } -// ResultID provides a mock function with given fields: +// ResultID provides a mock function with no fields func (_m *AssignmentCollector) ResultID() flow.Identifier { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for ResultID") + } + var r0 flow.Identifier if rf, ok := ret.Get(0).(func() flow.Identifier); ok { r0 = rf() @@ -174,13 +214,12 @@ func (_m *AssignmentCollector) ResultID() flow.Identifier { return r0 } -type mockConstructorTestingTNewAssignmentCollector interface { +// NewAssignmentCollector creates a new instance of AssignmentCollector. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewAssignmentCollector(t interface { mock.TestingT Cleanup(func()) -} - -// NewAssignmentCollector creates a new instance of AssignmentCollector. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewAssignmentCollector(t mockConstructorTestingTNewAssignmentCollector) *AssignmentCollector { +}) *AssignmentCollector { mock := &AssignmentCollector{} mock.Mock.Test(t) diff --git a/engine/consensus/approvals/mock/assignment_collector_state.go b/engine/consensus/approvals/mock/assignment_collector_state.go index a01b83d1263..5df81f76726 100644 --- a/engine/consensus/approvals/mock/assignment_collector_state.go +++ b/engine/consensus/approvals/mock/assignment_collector_state.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mock @@ -16,10 +16,14 @@ type AssignmentCollectorState struct { mock.Mock } -// Block provides a mock function with given fields: +// Block provides a mock function with no fields func (_m *AssignmentCollectorState) Block() *flow.Header { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Block") + } + var r0 *flow.Header if rf, ok := ret.Get(0).(func() *flow.Header); ok { r0 = rf() @@ -32,10 +36,14 @@ func (_m *AssignmentCollectorState) Block() *flow.Header { return r0 } -// BlockID provides a mock function with given fields: +// BlockID provides a mock function with no fields func (_m *AssignmentCollectorState) BlockID() flow.Identifier { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for BlockID") + } + var r0 flow.Identifier if rf, ok := ret.Get(0).(func() flow.Identifier); ok { r0 = rf() @@ -52,6 +60,10 @@ func (_m *AssignmentCollectorState) BlockID() flow.Identifier { func (_m *AssignmentCollectorState) CheckEmergencySealing(observer consensus.SealingObservation, finalizedBlockHeight uint64) error { ret := _m.Called(observer, finalizedBlockHeight) + if len(ret) == 0 { + panic("no return value specified for CheckEmergencySealing") + } + var r0 error if rf, ok := ret.Get(0).(func(consensus.SealingObservation, uint64) error); ok { r0 = rf(observer, finalizedBlockHeight) @@ -66,6 +78,10 @@ func (_m *AssignmentCollectorState) CheckEmergencySealing(observer consensus.Sea func (_m *AssignmentCollectorState) ProcessApproval(approval *flow.ResultApproval) error { ret := _m.Called(approval) + if len(ret) == 0 { + panic("no return value specified for ProcessApproval") + } + var r0 error if rf, ok := ret.Get(0).(func(*flow.ResultApproval) error); ok { r0 = rf(approval) @@ -80,6 +96,10 @@ func (_m *AssignmentCollectorState) ProcessApproval(approval *flow.ResultApprova func (_m *AssignmentCollectorState) ProcessIncorporatedResult(incorporatedResult *flow.IncorporatedResult) error { ret := _m.Called(incorporatedResult) + if len(ret) == 0 { + panic("no return value specified for ProcessIncorporatedResult") + } + var r0 error if rf, ok := ret.Get(0).(func(*flow.IncorporatedResult) error); ok { r0 = rf(incorporatedResult) @@ -90,10 +110,14 @@ func (_m *AssignmentCollectorState) ProcessIncorporatedResult(incorporatedResult return r0 } -// ProcessingStatus provides a mock function with given fields: +// ProcessingStatus provides a mock function with no fields func (_m *AssignmentCollectorState) ProcessingStatus() approvals.ProcessingStatus { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for ProcessingStatus") + } + var r0 approvals.ProcessingStatus if rf, ok := ret.Get(0).(func() approvals.ProcessingStatus); ok { r0 = rf() @@ -108,6 +132,10 @@ func (_m *AssignmentCollectorState) ProcessingStatus() approvals.ProcessingStatu func (_m *AssignmentCollectorState) RequestMissingApprovals(observer consensus.SealingObservation, maxHeightForRequesting uint64) (uint, error) { ret := _m.Called(observer, maxHeightForRequesting) + if len(ret) == 0 { + panic("no return value specified for RequestMissingApprovals") + } + var r0 uint var r1 error if rf, ok := ret.Get(0).(func(consensus.SealingObservation, uint64) (uint, error)); ok { @@ -128,10 +156,14 @@ func (_m *AssignmentCollectorState) RequestMissingApprovals(observer consensus.S return r0, r1 } -// Result provides a mock function with given fields: +// Result provides a mock function with no fields func (_m *AssignmentCollectorState) Result() *flow.ExecutionResult { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Result") + } + var r0 *flow.ExecutionResult if rf, ok := ret.Get(0).(func() *flow.ExecutionResult); ok { r0 = rf() @@ -144,10 +176,14 @@ func (_m *AssignmentCollectorState) Result() *flow.ExecutionResult { return r0 } -// ResultID provides a mock function with given fields: +// ResultID provides a mock function with no fields func (_m *AssignmentCollectorState) ResultID() flow.Identifier { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for ResultID") + } + var r0 flow.Identifier if rf, ok := ret.Get(0).(func() flow.Identifier); ok { r0 = rf() @@ -160,13 +196,12 @@ func (_m *AssignmentCollectorState) ResultID() flow.Identifier { return r0 } -type mockConstructorTestingTNewAssignmentCollectorState interface { +// NewAssignmentCollectorState creates a new instance of AssignmentCollectorState. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewAssignmentCollectorState(t interface { mock.TestingT Cleanup(func()) -} - -// NewAssignmentCollectorState creates a new instance of AssignmentCollectorState. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewAssignmentCollectorState(t mockConstructorTestingTNewAssignmentCollectorState) *AssignmentCollectorState { +}) *AssignmentCollectorState { mock := &AssignmentCollectorState{} mock.Mock.Test(t) diff --git a/engine/consensus/approvals/request_tracker.go b/engine/consensus/approvals/request_tracker.go index 02520d10ee7..7669199c0c0 100644 --- a/engine/consensus/approvals/request_tracker.go +++ b/engine/consensus/approvals/request_tracker.go @@ -2,13 +2,13 @@ package approvals import ( "fmt" - "math/rand" "sync" "time" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/module/mempool" "github.com/onflow/flow-go/storage" + "github.com/onflow/flow-go/utils/rand" ) /*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -28,30 +28,45 @@ type RequestTrackerItem struct { // NewRequestTrackerItem instantiates a new RequestTrackerItem where the // NextTimeout is evaluated to the current time plus a random blackout period // contained between min and max. -func NewRequestTrackerItem(blackoutPeriodMin, blackoutPeriodMax int) RequestTrackerItem { +func NewRequestTrackerItem(blackoutPeriodMin, blackoutPeriodMax int) (RequestTrackerItem, error) { item := RequestTrackerItem{ blackoutPeriodMin: blackoutPeriodMin, blackoutPeriodMax: blackoutPeriodMax, } - item.NextTimeout = randBlackout(blackoutPeriodMin, blackoutPeriodMax) - return item + var err error + item.NextTimeout, err = randBlackout(blackoutPeriodMin, blackoutPeriodMax) + if err != nil { + return RequestTrackerItem{}, err + } + + return item, err } // Update creates a _new_ RequestTrackerItem with incremented request number and updated NextTimeout. -func (i RequestTrackerItem) Update() RequestTrackerItem { +// No errors are expected during normal operation. +func (i RequestTrackerItem) Update() (RequestTrackerItem, error) { i.Requests++ - i.NextTimeout = randBlackout(i.blackoutPeriodMin, i.blackoutPeriodMax) - return i + var err error + i.NextTimeout, err = randBlackout(i.blackoutPeriodMin, i.blackoutPeriodMax) + if err != nil { + return RequestTrackerItem{}, fmt.Errorf("could not get next timeout: %w", err) + } + return i, nil } func (i RequestTrackerItem) IsBlackout() bool { return time.Now().Before(i.NextTimeout) } -func randBlackout(min int, max int) time.Time { - blackoutSeconds := rand.Intn(max-min+1) + min +// No errors are expected during normal operation. +func randBlackout(min int, max int) (time.Time, error) { + random, err := rand.Uint64n(uint64(max - min + 1)) + if err != nil { + return time.Now(), fmt.Errorf("failed to generate blackout: %w", err) + } + blackoutSeconds := random + uint64(min) blackout := time.Now().Add(time.Duration(blackoutSeconds) * time.Second) - return blackout + return blackout, nil } /*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -93,10 +108,14 @@ func (rt *RequestTracker) TryUpdate(result *flow.ExecutionResult, incorporatedBl rt.lock.Lock() defer rt.lock.Unlock() item, ok := rt.index[resultID][incorporatedBlockID][chunkIndex] + var err error if !ok { - item = NewRequestTrackerItem(rt.blackoutPeriodMin, rt.blackoutPeriodMax) - err := rt.set(resultID, result.BlockID, incorporatedBlockID, chunkIndex, item) + item, err = NewRequestTrackerItem(rt.blackoutPeriodMin, rt.blackoutPeriodMax) + if err != nil { + return item, false, fmt.Errorf("could not create tracker item: %w", err) + } + err = rt.set(resultID, result.BlockID, incorporatedBlockID, chunkIndex, item) if err != nil { return item, false, fmt.Errorf("could not set created tracker item: %w", err) } @@ -104,7 +123,10 @@ func (rt *RequestTracker) TryUpdate(result *flow.ExecutionResult, incorporatedBl canUpdate := !item.IsBlackout() if canUpdate { - item = item.Update() + item, err = item.Update() + if err != nil { + return item, false, fmt.Errorf("could not update tracker item: %w", err) + } rt.index[resultID][incorporatedBlockID][chunkIndex] = item } diff --git a/engine/consensus/approvals/request_tracker_test.go b/engine/consensus/approvals/request_tracker_test.go index 33fe47e708c..4661f6e8c0c 100644 --- a/engine/consensus/approvals/request_tracker_test.go +++ b/engine/consensus/approvals/request_tracker_test.go @@ -39,8 +39,8 @@ func (s *RequestTrackerTestSuite) SetupTest() { // updated when blackout period has passed. func (s *RequestTrackerTestSuite) TestTryUpdate_CreateAndUpdate() { executedBlock := unittest.BlockFixture() - s.headers.On("ByBlockID", executedBlock.ID()).Return(executedBlock.Header, nil) - result := unittest.ExecutionResultFixture(unittest.WithBlock(&executedBlock)) + s.headers.On("ByBlockID", executedBlock.ID()).Return(executedBlock.ToHeader(), nil) + result := unittest.ExecutionResultFixture(unittest.WithBlock(executedBlock)) chunks := 5 for i := 0; i < chunks; i++ { _, updated, err := s.tracker.TryUpdate(result, executedBlock.ID(), uint64(i)) @@ -65,8 +65,8 @@ func (s *RequestTrackerTestSuite) TestTryUpdate_ConcurrentTracking() { s.tracker.blackoutPeriodMin = 0 executedBlock := unittest.BlockFixture() - s.headers.On("ByBlockID", executedBlock.ID()).Return(executedBlock.Header, nil) - result := unittest.ExecutionResultFixture(unittest.WithBlock(&executedBlock)) + s.headers.On("ByBlockID", executedBlock.ID()).Return(executedBlock.ToHeader(), nil) + result := unittest.ExecutionResultFixture(unittest.WithBlock(executedBlock)) chunks := 5 var wg sync.WaitGroup for times := 0; times < 10; times++ { @@ -95,7 +95,7 @@ func (s *RequestTrackerTestSuite) TestTryUpdate_ConcurrentTracking() { func (s *RequestTrackerTestSuite) TestTryUpdate_UpdateForInvalidResult() { executedBlock := unittest.BlockFixture() s.headers.On("ByBlockID", executedBlock.ID()).Return(nil, storage.ErrNotFound) - result := unittest.ExecutionResultFixture(unittest.WithBlock(&executedBlock)) + result := unittest.ExecutionResultFixture(unittest.WithBlock(executedBlock)) _, updated, err := s.tracker.TryUpdate(result, executedBlock.ID(), uint64(0)) require.Error(s.T(), err) require.False(s.T(), updated) @@ -105,10 +105,10 @@ func (s *RequestTrackerTestSuite) TestTryUpdate_UpdateForInvalidResult() { // that are lower than our lowest height. func (s *RequestTrackerTestSuite) TestTryUpdate_UpdateForPrunedHeight() { executedBlock := unittest.BlockFixture() - s.headers.On("ByBlockID", executedBlock.ID()).Return(executedBlock.Header, nil) - err := s.tracker.PruneUpToHeight(executedBlock.Header.Height + 1) + s.headers.On("ByBlockID", executedBlock.ID()).Return(executedBlock.ToHeader(), nil) + err := s.tracker.PruneUpToHeight(executedBlock.Height + 1) require.NoError(s.T(), err) - result := unittest.ExecutionResultFixture(unittest.WithBlock(&executedBlock)) + result := unittest.ExecutionResultFixture(unittest.WithBlock(executedBlock)) _, updated, err := s.tracker.TryUpdate(result, executedBlock.ID(), uint64(0)) require.Error(s.T(), err) require.True(s.T(), mempool.IsBelowPrunedThresholdError(err)) @@ -118,11 +118,11 @@ func (s *RequestTrackerTestSuite) TestTryUpdate_UpdateForPrunedHeight() { // TestPruneUpToHeight_Pruning tests that pruning up to height some height correctly removes needed items func (s *RequestTrackerTestSuite) TestPruneUpToHeight_Pruning() { executedBlock := unittest.BlockFixture() - nextExecutedBlock := unittest.BlockWithParentFixture(executedBlock.Header) - s.headers.On("ByBlockID", executedBlock.ID()).Return(executedBlock.Header, nil) - s.headers.On("ByBlockID", nextExecutedBlock.ID()).Return(nextExecutedBlock.Header, nil) + nextExecutedBlock := unittest.BlockWithParentFixture(executedBlock.ToHeader()) + s.headers.On("ByBlockID", executedBlock.ID()).Return(executedBlock.ToHeader(), nil) + s.headers.On("ByBlockID", nextExecutedBlock.ID()).Return(nextExecutedBlock.ToHeader(), nil) - result := unittest.ExecutionResultFixture(unittest.WithBlock(&executedBlock)) + result := unittest.ExecutionResultFixture(unittest.WithBlock(executedBlock)) nextResult := unittest.ExecutionResultFixture(unittest.WithBlock(nextExecutedBlock)) for _, r := range []*flow.ExecutionResult{result, nextResult} { @@ -131,7 +131,7 @@ func (s *RequestTrackerTestSuite) TestPruneUpToHeight_Pruning() { require.False(s.T(), updated) } - err := s.tracker.PruneUpToHeight(nextExecutedBlock.Header.Height) + err := s.tracker.PruneUpToHeight(nextExecutedBlock.Height) require.NoError(s.T(), err) _, ok := s.tracker.index[result.ID()] diff --git a/engine/consensus/approvals/signature_collector.go b/engine/consensus/approvals/signature_collector.go index 6af55f0e475..266052a4da5 100644 --- a/engine/consensus/approvals/signature_collector.go +++ b/engine/consensus/approvals/signature_collector.go @@ -1,11 +1,12 @@ package approvals import ( - "github.com/onflow/flow-go/crypto" + "github.com/onflow/crypto" + "github.com/onflow/flow-go/model/flow" ) -// SignatureCollector contains a set of of signatures from verifiers attesting +// SignatureCollector contains a set of signatures from verifiers attesting // to the validity of an execution result chunk. // NOT concurrency safe. // TODO: this will be replaced with stateful BLS aggregation diff --git a/engine/consensus/approvals/testutil.go b/engine/consensus/approvals/testutil.go index df5e98fa36b..d958553a377 100644 --- a/engine/consensus/approvals/testutil.go +++ b/engine/consensus/approvals/testutil.go @@ -2,16 +2,17 @@ package approvals import ( "github.com/gammazero/workerpool" + "github.com/onflow/crypto/hash" "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" - "github.com/onflow/flow-go/crypto/hash" "github.com/onflow/flow-go/model/chunks" "github.com/onflow/flow-go/model/flow" mempool "github.com/onflow/flow-go/module/mempool/mock" module "github.com/onflow/flow-go/module/mock" msig "github.com/onflow/flow-go/module/signature" - "github.com/onflow/flow-go/network/mocknetwork" + mocknetwork "github.com/onflow/flow-go/network/mock" realproto "github.com/onflow/flow-go/state/protocol" protocol "github.com/onflow/flow-go/state/protocol/mock" realstorage "github.com/onflow/flow-go/storage" @@ -42,8 +43,8 @@ func (s *BaseApprovalsTestSuite) SetupTest() { s.Block = unittest.BlockHeaderWithParentFixture(s.ParentBlock) verifiers := make(flow.IdentifierList, 0) s.AuthorizedVerifiers = make(map[flow.Identifier]*flow.Identity) - s.ChunksAssignment = chunks.NewAssignment() - s.Chunks = unittest.ChunkListFixture(50, s.Block.ID()) + assignmentBuilder := chunks.NewAssignmentBuilder() + s.Chunks = unittest.ChunkListFixture(50, s.Block.ID(), unittest.StateCommitmentFixture()) // mock public key to mock signature verifications s.PublicKey = &module.PublicKey{} @@ -59,20 +60,24 @@ func (s *BaseApprovalsTestSuite) SetupTest() { // create assignment for _, chunk := range s.Chunks { - s.ChunksAssignment.Add(chunk, verifiers) + require.NoError(s.T(), assignmentBuilder.Add(chunk.Index, verifiers)) } + s.ChunksAssignment = assignmentBuilder.Build() s.VerID = verifiers[0] result := unittest.ExecutionResultFixture() - result.BlockID = s.Block.ID() - result.Chunks = s.Chunks + result.BlockID = s.Block.ID() //nolint:structwrite + result.Chunks = s.Chunks //nolint:structwrite s.IncorporatedBlock = unittest.BlockHeaderWithParentFixture(s.Block) // compose incorporated result - s.IncorporatedResult = unittest.IncorporatedResult.Fixture( - unittest.IncorporatedResult.WithResult(result), - unittest.IncorporatedResult.WithIncorporatedBlockID(s.IncorporatedBlock.ID())) + incorporatedResult, err := flow.NewIncorporatedResult(flow.UntrustedIncorporatedResult{ + IncorporatedBlockID: s.IncorporatedBlock.ID(), + Result: result, + }) + require.NoError(s.T(), err) + s.IncorporatedResult = incorporatedResult } // BaseAssignmentCollectorTestSuite is a base suite for testing assignment collectors, contains mocks for all @@ -134,20 +139,22 @@ func (s *BaseAssignmentCollectorTestSuite) SetupTest() { return realstorage.ErrNotFound } }) - s.Headers.On("ByHeight", mock.Anything).Return( - func(height uint64) *flow.Header { + s.Headers.On("BlockIDByHeight", mock.Anything).Return( + func(height uint64) (flow.Identifier, error) { if block, found := s.FinalizedAtHeight[height]; found { - return block + return block.ID(), nil } else { - return nil + return flow.ZeroID, realstorage.ErrNotFound } }, - func(height uint64) error { - _, found := s.FinalizedAtHeight[height] - if !found { - return realstorage.ErrNotFound + ) + s.Headers.On("ByHeight", mock.Anything).Return( + func(height uint64) (*flow.Header, error) { + if block, found := s.FinalizedAtHeight[height]; found { + return block, nil + } else { + return nil, realstorage.ErrNotFound } - return nil }, ) diff --git a/engine/consensus/approvals/tracker/record.go b/engine/consensus/approvals/tracker/record.go index 1efb695f090..0b249947f3b 100644 --- a/engine/consensus/approvals/tracker/record.go +++ b/engine/consensus/approvals/tracker/record.go @@ -92,7 +92,7 @@ func (r *SealingRecord) Generate() (Rec, error) { rec["result_initial_state"] = hex.EncodeToString(initialState[:]) rec["number_chunks"] = len(result.Chunks) rec["number_receipts"] = numberReceipts - _, rec["candidate_seal_in_mempool"] = r.sealsPl.ByID(irID) + _, rec["candidate_seal_in_mempool"] = r.sealsPl.Get(irID) if finalizationStatus != nil { rec["incorporating_block"] = *finalizationStatus diff --git a/engine/consensus/approvals/verifying_assignment_collector.go b/engine/consensus/approvals/verifying_assignment_collector.go index 118627db3bc..d32324c8bb8 100644 --- a/engine/consensus/approvals/verifying_assignment_collector.go +++ b/engine/consensus/approvals/verifying_assignment_collector.go @@ -2,7 +2,6 @@ package approvals import ( "fmt" - "math/rand" "sync" "github.com/rs/zerolog" @@ -15,6 +14,7 @@ import ( "github.com/onflow/flow-go/model/messages" "github.com/onflow/flow-go/module/mempool" "github.com/onflow/flow-go/state/protocol" + "github.com/onflow/flow-go/utils/rand" ) // **Emergency-sealing parameters** @@ -360,9 +360,14 @@ func (ac *VerifyingAssignmentCollector) RequestMissingApprovals(observation cons ) } + nonce, err := rand.Uint64() + if err != nil { + return 0, fmt.Errorf("nonce generation failed during request missing approvals: %w", err) + } + // prepare the request req := &messages.ApprovalRequest{ - Nonce: rand.Uint64(), + Nonce: nonce, ResultID: ac.ResultID(), ChunkIndex: chunkIndex, } @@ -391,9 +396,9 @@ func (ac *VerifyingAssignmentCollector) RequestMissingApprovals(observation cons func authorizedVerifiersAtBlock(state protocol.State, blockID flow.Identifier) (map[flow.Identifier]*flow.Identity, error) { authorizedVerifierList, err := state.AtBlockID(blockID).Identities( filter.And( - filter.HasRole(flow.RoleVerification), - filter.HasWeight(true), - filter.Not(filter.Ejected), + filter.HasRole[flow.Identity](flow.RoleVerification), + filter.HasInitialWeight[flow.Identity](true), + filter.IsValidCurrentEpochParticipant, )) if err != nil { return nil, fmt.Errorf("failed to retrieve Identities for block %v: %w", blockID, err) diff --git a/engine/consensus/approvals/verifying_assignment_collector_test.go b/engine/consensus/approvals/verifying_assignment_collector_test.go index ee101e03d45..fe87c612ff1 100644 --- a/engine/consensus/approvals/verifying_assignment_collector_test.go +++ b/engine/consensus/approvals/verifying_assignment_collector_test.go @@ -12,7 +12,8 @@ import ( "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" - "github.com/onflow/flow-go/crypto/hash" + "github.com/onflow/crypto/hash" + "github.com/onflow/flow-go/engine" "github.com/onflow/flow-go/engine/consensus/approvals/tracker" "github.com/onflow/flow-go/model/chunks" @@ -228,12 +229,10 @@ func (s *AssignmentCollectorTestSuite) TestProcessIncorporatedResult() { // TestProcessIncorporatedResult_InvalidIdentity tests a few scenarios where verifier identity is not correct // by one or another reason func (s *AssignmentCollectorTestSuite) TestProcessIncorporatedResult_InvalidIdentity() { - - s.Run("verifier zero-weight", func() { - identity := unittest.IdentityFixture(unittest.WithRole(flow.RoleVerification)) - identity.Weight = 0 // zero weight - - state := &protocol.State{} + // mocks state to return invalid identity and creates assignment collector that will use it + // creating assignment collector with invalid identity should result in error + assertInvalidIdentity := func(identity *flow.Identity) { + state := protocol.NewState(s.T()) state.On("AtBlockID", mock.Anything).Return( func(blockID flow.Identifier) realproto.Snapshot { return unittest.StateSnapshotForKnownBlock( @@ -247,45 +246,41 @@ func (s *AssignmentCollectorTestSuite) TestProcessIncorporatedResult_InvalidIden s.SigHasher, s.Conduit, s.RequestTracker, 1) require.Error(s.T(), err) require.Nil(s.T(), collector) - }) + } + s.Run("verifier-zero-weight", func() { + identity := unittest.IdentityFixture( + unittest.WithRole(flow.RoleVerification), + unittest.WithParticipationStatus(flow.EpochParticipationStatusActive), + unittest.WithInitialWeight(0), + ) + assertInvalidIdentity(identity) + }) + s.Run("verifier-leaving", func() { + identity := unittest.IdentityFixture( + unittest.WithRole(flow.RoleVerification), + unittest.WithParticipationStatus(flow.EpochParticipationStatusLeaving), + ) + assertInvalidIdentity(identity) + }) + s.Run("verifier-joining", func() { + identity := unittest.IdentityFixture( + unittest.WithRole(flow.RoleVerification), + unittest.WithParticipationStatus(flow.EpochParticipationStatusJoining), + ) + assertInvalidIdentity(identity) + }) s.Run("verifier-ejected", func() { - identity := unittest.IdentityFixture(unittest.WithRole(flow.RoleVerification)) - identity.Ejected = true // node ejected - - state := &protocol.State{} - state.On("AtBlockID", mock.Anything).Return( - func(blockID flow.Identifier) realproto.Snapshot { - return unittest.StateSnapshotForKnownBlock( - s.Block, - map[flow.Identifier]*flow.Identity{identity.NodeID: identity}, - ) - }, + identity := unittest.IdentityFixture( + unittest.WithRole(flow.RoleVerification), + unittest.WithParticipationStatus(flow.EpochParticipationStatusEjected), ) - - collector, err := newVerifyingAssignmentCollector(unittest.Logger(), s.WorkerPool, s.IncorporatedResult.Result, state, s.Headers, s.Assigner, s.SealsPL, - s.SigHasher, s.Conduit, s.RequestTracker, 1) - require.Nil(s.T(), collector) - require.Error(s.T(), err) + assertInvalidIdentity(identity) }) s.Run("verifier-invalid-role", func() { // invalid role identity := unittest.IdentityFixture(unittest.WithRole(flow.RoleAccess)) - - state := &protocol.State{} - state.On("AtBlockID", mock.Anything).Return( - func(blockID flow.Identifier) realproto.Snapshot { - return unittest.StateSnapshotForKnownBlock( - s.Block, - map[flow.Identifier]*flow.Identity{identity.NodeID: identity}, - ) - }, - ) - - collector, err := newVerifyingAssignmentCollector(unittest.Logger(), s.WorkerPool, s.IncorporatedResult.Result, state, s.Headers, s.Assigner, s.SealsPL, - s.SigHasher, s.Conduit, s.RequestTracker, 1) - require.Nil(s.T(), collector) - require.Error(s.T(), err) + assertInvalidIdentity(identity) }) } @@ -307,13 +302,18 @@ func (s *AssignmentCollectorTestSuite) TestProcessApproval_BeforeIncorporatedRes // rate limiting is respected. func (s *AssignmentCollectorTestSuite) TestRequestMissingApprovals() { // build new assignment with 2 verifiers - assignment := chunks.NewAssignment() + assignmentBuilder := chunks.NewAssignmentBuilder() for _, chunk := range s.Chunks { - verifiers := s.ChunksAssignment.Verifiers(chunk) - assignment.Add(chunk, verifiers[:2]) + verifiers, err := s.ChunksAssignment.Verifiers(chunk.Index) + require.NoError(s.T(), err) + v := make([]flow.Identifier, 0, len(verifiers)) + for id := range verifiers { + v = append(v, id) + } + require.NoError(s.T(), assignmentBuilder.Add(chunk.Index, v[:2])) } // replace old one - s.ChunksAssignment = assignment + s.ChunksAssignment = assignmentBuilder.Build() incorporatedBlocks := make([]*flow.Header, 0) diff --git a/engine/consensus/compliance.go b/engine/consensus/compliance.go index 046ed54f543..3e3533c051e 100644 --- a/engine/consensus/compliance.go +++ b/engine/consensus/compliance.go @@ -2,7 +2,6 @@ package consensus import ( "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/model/messages" "github.com/onflow/flow-go/module/component" ) @@ -19,14 +18,14 @@ import ( type Compliance interface { component.Component - // OnBlockProposal feeds a new block proposal into the processing pipeline. + // OnBlockProposal feeds a new structurally validated block proposal into the processing pipeline. // Incoming proposals will be queued and eventually dispatched by worker. // This method is non-blocking. - OnBlockProposal(proposal flow.Slashable[*messages.BlockProposal]) + OnBlockProposal(proposal flow.Slashable[*flow.Proposal]) - // OnSyncedBlocks feeds a batch of blocks obtained from sync into the processing pipeline. + // OnSyncedBlocks feeds a batch of structurally validated blocks obtained from sync into the processing pipeline. // Implementors shouldn't assume that blocks are arranged in any particular order. // Incoming proposals will be queued and eventually dispatched by worker. // This method is non-blocking. - OnSyncedBlocks(blocks flow.Slashable[[]*messages.BlockProposal]) + OnSyncedBlocks(blocks flow.Slashable[[]*flow.Proposal]) } diff --git a/engine/consensus/compliance/core.go b/engine/consensus/compliance/core.go index d5e737714f3..f52ba2494ea 100644 --- a/engine/consensus/compliance/core.go +++ b/engine/consensus/compliance/core.go @@ -1,5 +1,3 @@ -// (c) 2019 Dapper Labs - ALL RIGHTS RESERVED - package compliance import ( @@ -14,18 +12,16 @@ import ( "github.com/onflow/flow-go/consensus/hotstuff" "github.com/onflow/flow-go/consensus/hotstuff/model" "github.com/onflow/flow-go/engine" - "github.com/onflow/flow-go/engine/consensus/sealing/counters" "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/model/messages" "github.com/onflow/flow-go/module" "github.com/onflow/flow-go/module/compliance" + "github.com/onflow/flow-go/module/counters" "github.com/onflow/flow-go/module/mempool" "github.com/onflow/flow-go/module/metrics" "github.com/onflow/flow-go/module/trace" "github.com/onflow/flow-go/state" "github.com/onflow/flow-go/state/protocol" "github.com/onflow/flow-go/storage" - "github.com/onflow/flow-go/utils/logging" ) // Core contains the central business logic for the main consensus' compliance engine. @@ -48,8 +44,8 @@ type Core struct { payloads storage.Payloads state protocol.ParticipantState // track latest finalized view/height - used to efficiently drop outdated or too-far-ahead blocks - finalizedView counters.StrictMonotonousCounter - finalizedHeight counters.StrictMonotonousCounter + finalizedView counters.StrictMonotonicCounter + finalizedHeight counters.StrictMonotonicCounter pending module.PendingBlockBuffer // pending block cache sync module.BlockRequester hotstuff module.HotStuff @@ -76,14 +72,9 @@ func NewCore( hotstuff module.HotStuff, voteAggregator hotstuff.VoteAggregator, timeoutAggregator hotstuff.TimeoutAggregator, - opts ...compliance.Opt, + config compliance.Config, ) (*Core, error) { - config := compliance.DefaultConfig() - for _, apply := range opts { - apply(&config) - } - c := &Core{ log: log.With().Str("compliance", "core").Logger(), config: config, @@ -116,40 +107,44 @@ func NewCore( return c, nil } -// OnBlockProposal handles incoming block proposals. +// OnBlockProposal handles incoming basic structural validated block proposals. // No errors are expected during normal operation. All returned exceptions // are potential symptoms of internal state corruption and should be fatal. -func (c *Core) OnBlockProposal(originID flow.Identifier, proposal *messages.BlockProposal) error { - block := proposal.Block.ToInternal() - header := block.Header - blockID := header.ID() +func (c *Core) OnBlockProposal(proposal flow.Slashable[*flow.Proposal]) error { + block := proposal.Message.Block + header := block.ToHeader() + blockID := block.ID() finalHeight := c.finalizedHeight.Value() finalView := c.finalizedView.Value() span, _ := c.tracer.StartBlockSpan(context.Background(), header.ID(), trace.CONCompOnBlockProposal) span.SetAttributes( attribute.Int64("view", int64(header.View)), - attribute.String("origin_id", originID.String()), + attribute.String("origin_id", proposal.OriginID.String()), attribute.String("proposer", header.ProposerID.String()), ) traceID := span.SpanContext().TraceID().String() defer span.End() log := c.log.With(). - Hex("origin_id", originID[:]). - Str("chain_id", header.ChainID.String()). + Hex("origin_id", proposal.OriginID[:]). Uint64("block_height", header.Height). Uint64("block_view", header.View). - Hex("block_id", logging.Entity(header)). + Hex("block_id", blockID[:]). Hex("parent_id", header.ParentID[:]). - Hex("payload_hash", header.PayloadHash[:]). - Time("timestamp", header.Timestamp). Hex("proposer", header.ProposerID[:]). - Hex("parent_signer_indices", header.ParentVoterIndices). - Str("traceID", traceID). // traceID is used to connect logs to traces - Uint64("finalized_height", finalHeight). - Uint64("finalized_view", finalView). + Time("timestamp", time.UnixMilli(int64(header.Timestamp)).UTC()). Logger() + if log.Debug().Enabled() { + log = log.With(). + Uint64("finalized_height", finalHeight). + Uint64("finalized_view", finalView). + Str("chain_id", header.ChainID.String()). + Hex("payload_hash", header.PayloadHash[:]). + Hex("parent_signer_indices", header.ParentVoterIndices). + Str("traceID", traceID). // traceID is used to connect logs to traces + Logger() + } log.Info().Msg("block proposal received") // drop proposals below the finalized threshold @@ -158,12 +153,13 @@ func (c *Core) OnBlockProposal(originID flow.Identifier, proposal *messages.Bloc return nil } + skipNewProposalsThreshold := c.config.GetSkipNewProposalsThreshold() // ignore proposals which are too far ahead of our local finalized state // instead, rely on sync engine to catch up finalization more effectively, and avoid // large subtree of blocks to be cached. - if header.View > finalView+c.config.SkipNewProposalsThreshold { + if header.View > finalView+skipNewProposalsThreshold { log.Debug(). - Uint64("skip_new_proposals_threshold", c.config.SkipNewProposalsThreshold). + Uint64("skip_new_proposals_threshold", skipNewProposalsThreshold). Msg("dropping block too far ahead of locally finalized view") return nil } @@ -204,7 +200,7 @@ func (c *Core) OnBlockProposal(originID flow.Identifier, proposal *messages.Bloc _, found := c.pending.ByID(header.ParentID) if found { // add the block to the cache - _ = c.pending.Add(originID, block) + _ = c.pending.Add(proposal) c.mempoolMetrics.MempoolEntries(metrics.ResourceProposal, c.pending.Size()) return nil @@ -218,7 +214,7 @@ func (c *Core) OnBlockProposal(originID flow.Identifier, proposal *messages.Bloc return fmt.Errorf("could not check parent exists: %w", err) } if !exists { - _ = c.pending.Add(originID, block) + _ = c.pending.Add(proposal) c.mempoolMetrics.MempoolEntries(metrics.ResourceProposal, c.pending.Size()) c.sync.RequestBlock(header.ParentID, header.Height-1) @@ -232,7 +228,7 @@ func (c *Core) OnBlockProposal(originID flow.Identifier, proposal *messages.Bloc // execution of the entire recursion, which might include processing the // proposal's pending children. There is another span within // processBlockProposal that measures the time spent for a single proposal. - err = c.processBlockAndDescendants(block) + err = c.processBlockAndDescendants(proposal) c.mempoolMetrics.MempoolEntries(metrics.ResourceProposal, c.pending.Size()) if err != nil { return fmt.Errorf("could not process block proposal: %w", err) @@ -247,25 +243,35 @@ func (c *Core) OnBlockProposal(originID flow.Identifier, proposal *messages.Bloc // processed as well. // No errors are expected during normal operation. All returned exceptions // are potential symptoms of internal state corruption and should be fatal. -func (c *Core) processBlockAndDescendants(proposal *flow.Block) error { - blockID := proposal.Header.ID() +func (c *Core) processBlockAndDescendants(proposal flow.Slashable[*flow.Proposal]) error { + block := proposal.Message.Block + header := block.ToHeader() + blockID := block.ID() log := c.log.With(). Str("block_id", blockID.String()). - Uint64("block_height", proposal.Header.Height). - Uint64("block_view", proposal.Header.View). - Uint64("parent_view", proposal.Header.ParentView). + Uint64("block_height", header.Height). + Uint64("block_view", header.View). + Uint64("parent_view", header.ParentView). Logger() // process block itself - err := c.processBlockProposal(proposal) + err := c.processBlockProposal(proposal.Message) if err != nil { - if checkForAndLogOutdatedInputError(err, log) { + if checkForAndLogOutdatedInputError(err, log) || checkForAndLogUnverifiableInputError(err, log) { return nil } - if checkForAndLogInvalidInputError(err, log) { + if invalidBlockErr, ok := model.AsInvalidProposalError(err); ok { + log.Err(err).Msg("received invalid block from other node (potential slashing evidence?)") + + // notify consumers about invalid block + c.proposalViolationNotifier.OnInvalidBlockDetected(flow.Slashable[model.InvalidProposalError]{ + OriginID: proposal.OriginID, + Message: *invalidBlockErr, + }) + // notify VoteAggregator about the invalid block - err = c.voteAggregator.InvalidBlock(model.ProposalFromFlow(proposal.Header)) + err = c.voteAggregator.InvalidBlock(model.SignedProposalFromBlock(proposal.Message)) if err != nil { if mempool.IsBelowPrunedThresholdError(err) { log.Warn().Msg("received invalid block, but is below pruned threshold") @@ -287,7 +293,7 @@ func (c *Core) processBlockAndDescendants(proposal *flow.Block) error { return nil } for _, child := range children { - cpr := c.processBlockAndDescendants(child.Message) + cpr := c.processBlockAndDescendants(child) if cpr != nil { // unexpected error: potentially corrupted internal state => abort processing and escalate error return cpr @@ -304,15 +310,17 @@ func (c *Core) processBlockAndDescendants(proposal *flow.Block) error { // the finalized state. // Expected errors during normal operations: // - engine.OutdatedInputError if the block proposal is outdated (e.g. orphaned) -// - engine.InvalidInputError if the block proposal is invalid -func (c *Core) processBlockProposal(proposal *flow.Block) error { +// - model.InvalidProposalError if the block proposal is invalid +// - engine.UnverifiableInputError if the block proposal cannot be verified +func (c *Core) processBlockProposal(proposal *flow.Proposal) error { startTime := time.Now() defer func() { c.hotstuffMetrics.BlockProcessingDuration(time.Since(startTime)) }() - header := proposal.Header - blockID := header.ID() + block := proposal.Block + header := block.ToHeader() + blockID := block.ID() span, ctx := c.tracer.StartBlockSpan(context.Background(), blockID, trace.ConCompProcessBlockProposal) span.SetAttributes( @@ -320,25 +328,24 @@ func (c *Core) processBlockProposal(proposal *flow.Block) error { ) defer span.End() - hotstuffProposal := model.ProposalFromFlow(header) + hotstuffProposal := model.SignedProposalFromBlock(proposal) err := c.validator.ValidateProposal(hotstuffProposal) if err != nil { - if invalidBlockErr, ok := model.AsInvalidProposalError(err); ok { - c.proposalViolationNotifier.OnInvalidBlockDetected(*invalidBlockErr) - return engine.NewInvalidInputErrorf("invalid block proposal: %w", err) + if model.IsInvalidProposalError(err) { + return err } if errors.Is(err, model.ErrViewForUnknownEpoch) { // We have received a proposal, but we don't know the epoch its view is within. // We know: // - the parent of this block is valid and was appended to the state (ie. we knew the epoch for it) // - if we then see this for the child, one of two things must have happened: - // 1. the proposer malicious created the block for a view very far in the future (it's invalid) + // 1. the proposer maliciously created the block for a view very far in the future (it's invalid) // -> in this case we can disregard the block - // 2. no blocks have been finalized within the epoch commitment deadline, and the epoch ended - // (breaking a critical assumption - see EpochCommitSafetyThreshold in protocol.Params for details) + // 2. no blocks have been finalized within the epoch commitment deadline, and the epoch ended + // (breaking a critical assumption - see FinalizationSafetyThreshold in protocol.Params for details) // -> in this case, the network has encountered a critical failure // - we assume in general that Case 2 will not happen, therefore this must be Case 1 - an invalid block - return engine.NewInvalidInputErrorf("invalid proposal with view from unknown epoch: %w", err) + return engine.NewUnverifiableInputError("unverifiable proposal with view from unknown epoch: %w", err) } return fmt.Errorf("unexpected error validating proposal: %w", err) } @@ -350,22 +357,18 @@ func (c *Core) processBlockProposal(proposal *flow.Block) error { Hex("block_id", blockID[:]). Hex("parent_id", header.ParentID[:]). Hex("payload_hash", header.PayloadHash[:]). - Time("timestamp", header.Timestamp). + Time("timestamp", time.UnixMilli(int64(header.Timestamp)).UTC()). Hex("proposer", header.ProposerID[:]). Hex("parent_signer_indices", header.ParentVoterIndices). Logger() - log.Info().Msg("processing block proposal") + log.Debug().Msg("processing block proposal") // see if the block is a valid extension of the protocol state - block := &flow.Block{ - Header: proposal.Header, - Payload: proposal.Payload, - } - err = c.state.Extend(ctx, block) + err = c.state.Extend(ctx, proposal) if err != nil { if state.IsInvalidExtensionError(err) { // if the block proposes an invalid extension of the protocol state, then the block is invalid - return engine.NewInvalidInputErrorf("invalid extension of protocol state (block: %x, height: %d): %w", blockID, header.Height, err) + return model.NewInvalidProposalErrorf(hotstuffProposal, "invalid extension of protocol state (block: %x, height: %d): %w", blockID, header.Height, err) } if state.IsOutdatedExtensionError(err) { // protocol state aborted processing of block as it is on an abandoned fork: block is outdated @@ -381,7 +384,7 @@ func (c *Core) processBlockProposal(proposal *flow.Block) error { // submit the model to hotstuff for processing // TODO replace with pubsub https://github.com/dapperlabs/flow-go/issues/6395 - log.Info().Msg("forwarding block proposal to hotstuff") + log.Debug().Msg("forwarding block proposal to hotstuff") c.hotstuff.SubmitProposal(hotstuffProposal) return nil @@ -413,14 +416,15 @@ func checkForAndLogOutdatedInputError(err error, log zerolog.Logger) bool { return false } -// checkForAndLogInvalidInputError checks whether error is an `engine.InvalidInputError`. +// checkForAndLogUnverifiableInputError checks whether error is an `engine.UnverifiableInputError`. // If this is the case, we emit a log message and return true. -// For any error other than `engine.InvalidInputError`, this function is a no-op +// For any error other than `engine.UnverifiableInputError`, this function is a no-op // and returns false. -func checkForAndLogInvalidInputError(err error, log zerolog.Logger) bool { - if engine.IsInvalidInputError(err) { - // the block is invalid; log as error as we desire honest participation - log.Err(err).Msg("received invalid block from other node (potential slashing evidence?)") +func checkForAndLogUnverifiableInputError(err error, log zerolog.Logger) bool { + if engine.IsUnverifiableInputError(err) { + // the block cannot be validated + log.Warn().Err(err).Msg("received unverifiable block proposal; " + + "this might be an indicator that a malicious proposer is generating detached blocks very far ahead") return true } return false diff --git a/engine/consensus/compliance/core_test.go b/engine/consensus/compliance/core_test.go index 270a417411b..23b98835ae2 100644 --- a/engine/consensus/compliance/core_test.go +++ b/engine/consensus/compliance/core_test.go @@ -2,20 +2,17 @@ package compliance import ( "errors" - "math/rand" "testing" - "time" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" + "github.com/onflow/flow-go/consensus/hotstuff/helper" hotstuff "github.com/onflow/flow-go/consensus/hotstuff/mocks" "github.com/onflow/flow-go/consensus/hotstuff/model" - consensus "github.com/onflow/flow-go/engine/consensus/mock" "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/model/messages" realModule "github.com/onflow/flow-go/module" real "github.com/onflow/flow-go/module/buffer" "github.com/onflow/flow-go/module/compliance" @@ -24,7 +21,7 @@ import ( "github.com/onflow/flow-go/module/trace" netint "github.com/onflow/flow-go/network" "github.com/onflow/flow-go/network/channels" - "github.com/onflow/flow-go/network/mocknetwork" + mocknetwork "github.com/onflow/flow-go/network/mock" "github.com/onflow/flow-go/state" protint "github.com/onflow/flow-go/state/protocol" protocol "github.com/onflow/flow-go/state/protocol/mock" @@ -54,8 +51,8 @@ type CommonSuite struct { // storage data headerDB map[flow.Identifier]*flow.Header payloadDB map[flow.Identifier]*flow.Payload - pendingDB map[flow.Identifier]flow.Slashable[*flow.Block] - childrenDB map[flow.Identifier][]flow.Slashable[*flow.Block] + pendingDB map[flow.Identifier]flow.Slashable[*flow.Proposal] + childrenDB map[flow.Identifier][]flow.Slashable[*flow.Proposal] // mocked dependencies me *module.Local @@ -66,8 +63,7 @@ type CommonSuite struct { state *protocol.ParticipantState snapshot *protocol.Snapshot con *mocknetwork.Conduit - net *mocknetwork.Network - prov *consensus.ProposalProvider + net *mocknetwork.EngineRegistry pending *module.PendingBlockBuffer hotstuff *module.HotStuff sync *module.BlockRequester @@ -81,27 +77,24 @@ type CommonSuite struct { } func (cs *CommonSuite) SetupTest() { - // seed the RNG - rand.Seed(time.Now().UnixNano()) - // initialize the paramaters cs.participants = unittest.IdentityListFixture(3, unittest.WithRole(flow.RoleConsensus), - unittest.WithWeight(1000), + unittest.WithInitialWeight(1000), ) cs.myID = cs.participants[0].NodeID block := unittest.BlockFixture() - cs.head = block.Header + cs.head = block.ToHeader() // initialize the storage data cs.headerDB = make(map[flow.Identifier]*flow.Header) cs.payloadDB = make(map[flow.Identifier]*flow.Payload) - cs.pendingDB = make(map[flow.Identifier]flow.Slashable[*flow.Block]) - cs.childrenDB = make(map[flow.Identifier][]flow.Slashable[*flow.Block]) + cs.pendingDB = make(map[flow.Identifier]flow.Slashable[*flow.Proposal]) + cs.childrenDB = make(map[flow.Identifier][]flow.Slashable[*flow.Proposal]) // store the head header and payload - cs.headerDB[block.ID()] = block.Header - cs.payloadDB[block.ID()] = block.Payload + cs.headerDB[block.ID()] = block.ToHeader() + cs.payloadDB[block.ID()] = &block.Payload // set up local module mock cs.me = &module.Local{} @@ -177,7 +170,7 @@ func (cs *CommonSuite) SetupTest() { // set up protocol snapshot mock cs.snapshot = &protocol.Snapshot{} cs.snapshot.On("Identities", mock.Anything).Return( - func(filter flow.IdentityFilter) flow.IdentityList { + func(filter flow.IdentityFilter[flow.Identity]) flow.IdentityList { return cs.participants.Filter(filter) }, nil, @@ -197,7 +190,7 @@ func (cs *CommonSuite) SetupTest() { cs.con.On("Unicast", mock.Anything, mock.Anything).Return(nil) // set up network module mock - cs.net = &mocknetwork.Network{} + cs.net = &mocknetwork.EngineRegistry{} cs.net.On("Register", mock.Anything, mock.Anything).Return( func(channel channels.Channel, engine netint.MessageProcessor) netint.Conduit { return cs.con @@ -205,15 +198,11 @@ func (cs *CommonSuite) SetupTest() { nil, ) - // set up the provider engine - cs.prov = &consensus.ProposalProvider{} - cs.prov.On("ProvideProposal", mock.Anything).Return() - // set up pending module mock cs.pending = &module.PendingBlockBuffer{} cs.pending.On("Add", mock.Anything, mock.Anything).Return(true) cs.pending.On("ByID", mock.Anything).Return( - func(blockID flow.Identifier) flow.Slashable[*flow.Block] { + func(blockID flow.Identifier) flow.Slashable[*flow.Proposal] { return cs.pendingDB[blockID] }, func(blockID flow.Identifier) bool { @@ -222,7 +211,7 @@ func (cs *CommonSuite) SetupTest() { }, ) cs.pending.On("ByParentID", mock.Anything).Return( - func(blockID flow.Identifier) []flow.Slashable[*flow.Block] { + func(blockID flow.Identifier) []flow.Slashable[*flow.Proposal] { return cs.childrenDB[blockID] }, func(blockID flow.Identifier) bool { @@ -273,6 +262,7 @@ func (cs *CommonSuite) SetupTest() { cs.hotstuff, cs.voteAggregator, cs.timeoutAggregator, + compliance.DefaultConfig(), ) require.NoError(cs.T(), err, "engine initialization should pass") @@ -287,19 +277,22 @@ func (cs *CoreSuite) TestOnBlockProposalValidParent() { proposal := unittest.ProposalFromBlock(block) // store the data for retrieval - cs.headerDB[block.Header.ParentID] = cs.head + cs.headerDB[block.ParentID] = cs.head - hotstuffProposal := model.ProposalFromFlow(block.Header) + hotstuffProposal := model.SignedProposalFromBlock(proposal) cs.validator.On("ValidateProposal", hotstuffProposal).Return(nil) cs.voteAggregator.On("AddBlock", hotstuffProposal).Once() cs.hotstuff.On("SubmitProposal", hotstuffProposal) // it should be processed without error - err := cs.core.OnBlockProposal(originID, proposal) + err := cs.core.OnBlockProposal(flow.Slashable[*flow.Proposal]{ + OriginID: originID, + Message: proposal, + }) require.NoError(cs.T(), err, "valid block proposal should pass") // we should extend the state with the header - cs.state.AssertCalled(cs.T(), "Extend", mock.Anything, block) + cs.state.AssertCalled(cs.T(), "Extend", mock.Anything, proposal) } func (cs *CoreSuite) TestOnBlockProposalValidAncestor() { @@ -307,40 +300,49 @@ func (cs *CoreSuite) TestOnBlockProposalValidAncestor() { // create a proposal that has two ancestors in the cache originID := cs.participants[1].NodeID ancestor := unittest.BlockWithParentFixture(cs.head) - parent := unittest.BlockWithParentFixture(ancestor.Header) - block := unittest.BlockWithParentFixture(parent.Header) + parent := unittest.BlockWithParentFixture(ancestor.ToHeader()) + block := unittest.BlockWithParentFixture(parent.ToHeader()) proposal := unittest.ProposalFromBlock(block) // store the data for retrieval - cs.headerDB[parent.ID()] = parent.Header - cs.headerDB[ancestor.ID()] = ancestor.Header + cs.headerDB[parent.ID()] = parent.ToHeader() + cs.headerDB[ancestor.ID()] = ancestor.ToHeader() - hotstuffProposal := model.ProposalFromFlow(block.Header) + hotstuffProposal := model.SignedProposalFromBlock(proposal) cs.validator.On("ValidateProposal", hotstuffProposal).Return(nil) cs.voteAggregator.On("AddBlock", hotstuffProposal).Once() cs.hotstuff.On("SubmitProposal", hotstuffProposal) // it should be processed without error - err := cs.core.OnBlockProposal(originID, proposal) + err := cs.core.OnBlockProposal(flow.Slashable[*flow.Proposal]{ + OriginID: originID, + Message: proposal, + }) require.NoError(cs.T(), err, "valid block proposal should pass") // we should extend the state with the header - cs.state.AssertCalled(cs.T(), "Extend", mock.Anything, block) + cs.state.AssertCalled(cs.T(), "Extend", mock.Anything, proposal) } func (cs *CoreSuite) TestOnBlockProposalSkipProposalThreshold() { // create a proposal which is far enough ahead to be dropped originID := cs.participants[1].NodeID - block := unittest.BlockFixture() - block.Header.View = cs.head.View + compliance.DefaultConfig().SkipNewProposalsThreshold + 1 - proposal := unittest.ProposalFromBlock(&block) + view := cs.head.View + compliance.DefaultConfig().SkipNewProposalsThreshold + 1 + block := unittest.BlockFixture( + unittest.Block.WithView(view), + unittest.Block.WithParentView(view-1), + ) + proposal := unittest.ProposalFromBlock(block) - err := cs.core.OnBlockProposal(originID, proposal) + err := cs.core.OnBlockProposal(flow.Slashable[*flow.Proposal]{ + OriginID: originID, + Message: proposal, + }) require.NoError(cs.T(), err) // block should be dropped - not added to state or cache - cs.state.AssertNotCalled(cs.T(), "Extend", mock.Anything) + cs.state.AssertNotCalled(cs.T(), "Extend", mock.Anything, mock.Anything) cs.validator.AssertNotCalled(cs.T(), "ValidateProposal", mock.Anything) cs.pending.AssertNotCalled(cs.T(), "Add", originID, mock.Anything) } @@ -355,30 +357,36 @@ func (cs *CoreSuite) TestOnBlockProposal_FailsHotStuffValidation() { // create a proposal that has two ancestors in the cache originID := cs.participants[1].NodeID ancestor := unittest.BlockWithParentFixture(cs.head) - parent := unittest.BlockWithParentFixture(ancestor.Header) - block := unittest.BlockWithParentFixture(parent.Header) + parent := unittest.BlockWithParentFixture(ancestor.ToHeader()) + block := unittest.BlockWithParentFixture(parent.ToHeader()) proposal := unittest.ProposalFromBlock(block) - hotstuffProposal := model.ProposalFromFlow(block.Header) + hotstuffProposal := model.SignedProposalFromBlock(proposal) // store the data for retrieval - cs.headerDB[parent.ID()] = parent.Header - cs.headerDB[ancestor.ID()] = ancestor.Header + cs.headerDB[parent.ID()] = parent.ToHeader() + cs.headerDB[ancestor.ID()] = ancestor.ToHeader() cs.Run("invalid block error", func() { // the block fails HotStuff validation *cs.validator = *hotstuff.NewValidator(cs.T()) sentinelError := model.NewInvalidProposalErrorf(hotstuffProposal, "") cs.validator.On("ValidateProposal", hotstuffProposal).Return(sentinelError) - cs.proposalViolationNotifier.On("OnInvalidBlockDetected", sentinelError).Return().Once() + cs.proposalViolationNotifier.On("OnInvalidBlockDetected", flow.Slashable[model.InvalidProposalError]{ + OriginID: originID, + Message: sentinelError.(model.InvalidProposalError), + }).Return().Once() // we should notify VoteAggregator about the invalid block cs.voteAggregator.On("InvalidBlock", hotstuffProposal).Return(nil) // the expected error should be handled within the Core - err := cs.core.OnBlockProposal(originID, proposal) + err := cs.core.OnBlockProposal(flow.Slashable[*flow.Proposal]{ + OriginID: originID, + Message: proposal, + }) require.NoError(cs.T(), err, "proposal with invalid extension should fail") // we should not extend the state with the header - cs.state.AssertNotCalled(cs.T(), "Extend", mock.Anything, block) + cs.state.AssertNotCalled(cs.T(), "Extend", mock.Anything, proposal) // we should not attempt to process the children cs.pending.AssertNotCalled(cs.T(), "ByParentID", mock.Anything) }) @@ -389,11 +397,14 @@ func (cs *CoreSuite) TestOnBlockProposal_FailsHotStuffValidation() { cs.validator.On("ValidateProposal", hotstuffProposal).Return(model.ErrViewForUnknownEpoch) // the expected error should be handled within the Core - err := cs.core.OnBlockProposal(originID, proposal) + err := cs.core.OnBlockProposal(flow.Slashable[*flow.Proposal]{ + OriginID: originID, + Message: proposal, + }) require.NoError(cs.T(), err, "proposal with invalid extension should fail") // we should not extend the state with the header - cs.state.AssertNotCalled(cs.T(), "Extend", mock.Anything, block) + cs.state.AssertNotCalled(cs.T(), "Extend", mock.Anything, proposal) // we should not attempt to process the children cs.pending.AssertNotCalled(cs.T(), "ByParentID", mock.Anything) }) @@ -405,11 +416,14 @@ func (cs *CoreSuite) TestOnBlockProposal_FailsHotStuffValidation() { cs.validator.On("ValidateProposal", hotstuffProposal).Return(unexpectedErr) // the error should be propagated - err := cs.core.OnBlockProposal(originID, proposal) + err := cs.core.OnBlockProposal(flow.Slashable[*flow.Proposal]{ + OriginID: originID, + Message: proposal, + }) require.ErrorIs(cs.T(), err, unexpectedErr) // we should not extend the state with the header - cs.state.AssertNotCalled(cs.T(), "Extend", mock.Anything, block) + cs.state.AssertNotCalled(cs.T(), "Extend", mock.Anything, proposal) // we should not attempt to process the children cs.pending.AssertNotCalled(cs.T(), "ByParentID", mock.Anything) }) @@ -425,14 +439,14 @@ func (cs *CoreSuite) TestOnBlockProposal_FailsProtocolStateValidation() { // create a proposal that has two ancestors in the cache originID := cs.participants[1].NodeID ancestor := unittest.BlockWithParentFixture(cs.head) - parent := unittest.BlockWithParentFixture(ancestor.Header) - block := unittest.BlockWithParentFixture(parent.Header) + parent := unittest.BlockWithParentFixture(ancestor.ToHeader()) + block := unittest.BlockWithParentFixture(parent.ToHeader()) proposal := unittest.ProposalFromBlock(block) - hotstuffProposal := model.ProposalFromFlow(block.Header) + hotstuffProposal := model.SignedProposalFromBlock(proposal) // store the data for retrieval - cs.headerDB[parent.ID()] = parent.Header - cs.headerDB[ancestor.ID()] = ancestor.Header + cs.headerDB[parent.ID()] = parent.ToHeader() + cs.headerDB[ancestor.ID()] = ancestor.ToHeader() // the block passes HotStuff validation cs.validator.On("ValidateProposal", hotstuffProposal).Return(nil) @@ -441,16 +455,26 @@ func (cs *CoreSuite) TestOnBlockProposal_FailsProtocolStateValidation() { // make sure we fail to extend the state *cs.state = protocol.ParticipantState{} cs.state.On("Final").Return(func() protint.Snapshot { return cs.snapshot }) - cs.state.On("Extend", mock.Anything, mock.Anything).Return(state.NewInvalidExtensionError("")) + sentinelErr := state.NewInvalidExtensionErrorf("") + cs.state.On("Extend", mock.Anything, mock.Anything).Return(sentinelErr) + cs.proposalViolationNotifier.On("OnInvalidBlockDetected", mock.Anything).Run(func(args mock.Arguments) { + err := args.Get(0).(flow.Slashable[model.InvalidProposalError]) + require.ErrorIs(cs.T(), err.Message, sentinelErr) + require.Equal(cs.T(), err.Message.InvalidProposal, hotstuffProposal) + require.Equal(cs.T(), err.OriginID, originID) + }).Return().Once() // we should notify VoteAggregator about the invalid block cs.voteAggregator.On("InvalidBlock", hotstuffProposal).Return(nil) // the expected error should be handled within the Core - err := cs.core.OnBlockProposal(originID, proposal) + err := cs.core.OnBlockProposal(flow.Slashable[*flow.Proposal]{ + OriginID: originID, + Message: proposal, + }) require.NoError(cs.T(), err, "proposal with invalid extension should fail") // we should extend the state with the header - cs.state.AssertCalled(cs.T(), "Extend", mock.Anything, block) + cs.state.AssertCalled(cs.T(), "Extend", mock.Anything, proposal) // we should not pass the block to hotstuff cs.hotstuff.AssertNotCalled(cs.T(), "SubmitProposal", mock.Anything) // we should not attempt to process the children @@ -461,14 +485,17 @@ func (cs *CoreSuite) TestOnBlockProposal_FailsProtocolStateValidation() { // make sure we fail to extend the state *cs.state = protocol.ParticipantState{} cs.state.On("Final").Return(func() protint.Snapshot { return cs.snapshot }) - cs.state.On("Extend", mock.Anything, mock.Anything).Return(state.NewOutdatedExtensionError("")) + cs.state.On("Extend", mock.Anything, mock.Anything).Return(state.NewOutdatedExtensionErrorf("")) // the expected error should be handled within the Core - err := cs.core.OnBlockProposal(originID, proposal) + err := cs.core.OnBlockProposal(flow.Slashable[*flow.Proposal]{ + OriginID: originID, + Message: proposal, + }) require.NoError(cs.T(), err, "proposal with invalid extension should fail") // we should extend the state with the header - cs.state.AssertCalled(cs.T(), "Extend", mock.Anything, block) + cs.state.AssertCalled(cs.T(), "Extend", mock.Anything, proposal) // we should not pass the block to hotstuff cs.hotstuff.AssertNotCalled(cs.T(), "SubmitProposal", mock.Anything) // we should not attempt to process the children @@ -483,11 +510,14 @@ func (cs *CoreSuite) TestOnBlockProposal_FailsProtocolStateValidation() { cs.state.On("Extend", mock.Anything, mock.Anything).Return(unexpectedErr) // it should be processed without error - err := cs.core.OnBlockProposal(originID, proposal) + err := cs.core.OnBlockProposal(flow.Slashable[*flow.Proposal]{ + OriginID: originID, + Message: proposal, + }) require.ErrorIs(cs.T(), err, unexpectedErr) // we should extend the state with the header - cs.state.AssertCalled(cs.T(), "Extend", mock.Anything, block) + cs.state.AssertCalled(cs.T(), "Extend", mock.Anything, proposal) // we should not pass the block to hotstuff cs.hotstuff.AssertNotCalled(cs.T(), "SubmitProposal", mock.Anything) // we should not attempt to process the children @@ -499,37 +529,45 @@ func (cs *CoreSuite) TestProcessBlockAndDescendants() { // create three children blocks parent := unittest.BlockWithParentFixture(cs.head) - block1 := unittest.BlockWithParentFixture(parent.Header) - block2 := unittest.BlockWithParentFixture(parent.Header) - block3 := unittest.BlockWithParentFixture(parent.Header) + block1 := unittest.BlockWithParentFixture(parent.ToHeader()) + block2 := unittest.BlockWithParentFixture(parent.ToHeader()) + block3 := unittest.BlockWithParentFixture(parent.ToHeader()) - // create the pending blocks - pending1 := unittest.AsSlashable(block1) - pending2 := unittest.AsSlashable(block2) - pending3 := unittest.AsSlashable(block3) + proposal0 := unittest.ProposalFromBlock(parent) + proposal1 := unittest.ProposalFromBlock(block1) + proposal2 := unittest.ProposalFromBlock(block2) + proposal3 := unittest.ProposalFromBlock(block3) + + // create the pending proposals + pending1 := unittest.AsSlashable(proposal1) + pending2 := unittest.AsSlashable(proposal2) + pending3 := unittest.AsSlashable(proposal3) // store the parent on disk parentID := parent.ID() - cs.headerDB[parentID] = parent.Header + cs.headerDB[parentID] = parent.ToHeader() // store the pending children in the cache cs.childrenDB[parentID] = append(cs.childrenDB[parentID], pending1) cs.childrenDB[parentID] = append(cs.childrenDB[parentID], pending2) cs.childrenDB[parentID] = append(cs.childrenDB[parentID], pending3) - for _, block := range []*flow.Block{parent, block1, block2, block3} { - hotstuffProposal := model.ProposalFromFlow(block.Header) + for _, prop := range []*flow.Proposal{proposal0, proposal1, proposal2, proposal3} { + hotstuffProposal := model.SignedProposalFromBlock(prop) cs.validator.On("ValidateProposal", hotstuffProposal).Return(nil) cs.voteAggregator.On("AddBlock", hotstuffProposal).Once() cs.hotstuff.On("SubmitProposal", hotstuffProposal).Once() } // execute the connected children handling - err := cs.core.processBlockAndDescendants(parent) + err := cs.core.processBlockAndDescendants(flow.Slashable[*flow.Proposal]{ + OriginID: unittest.IdentifierFixture(), + Message: proposal0, + }) require.NoError(cs.T(), err, "should pass handling children") // make sure we drop the cache after trying to process - cs.pending.AssertCalled(cs.T(), "DropForParent", parent.Header.ID()) + cs.pending.AssertCalled(cs.T(), "DropForParent", parent.ID()) } func (cs *CoreSuite) TestProposalBufferingOrder() { @@ -540,10 +578,10 @@ func (cs *CoreSuite) TestProposalBufferingOrder() { missingProposal := unittest.ProposalFromBlock(missingBlock) // create a chain of descendants - var proposals []*messages.BlockProposal + var proposals []*flow.Proposal parent := missingProposal for i := 0; i < 3; i++ { - descendant := unittest.BlockWithParentFixture(&parent.Block.Header) + descendant := unittest.BlockWithParentFixture(parent.Block.ToHeader()) proposal := unittest.ProposalFromBlock(descendant) proposals = append(proposals, proposal) parent = proposal @@ -553,16 +591,19 @@ func (cs *CoreSuite) TestProposalBufferingOrder() { cs.core.pending = real.NewPendingBlocks() // check that we request the ancestor block each time - cs.sync.On("RequestBlock", missingBlock.Header.ID(), missingBlock.Header.Height).Times(len(proposals)) + cs.sync.On("RequestBlock", missingBlock.ID(), missingBlock.Height).Times(len(proposals)) // process all the descendants for _, proposal := range proposals { // process and make sure no error occurs (as they are unverifiable) - err := cs.core.OnBlockProposal(originID, proposal) + err := cs.core.OnBlockProposal(flow.Slashable[*flow.Proposal]{ + OriginID: originID, + Message: proposal, + }) require.NoError(cs.T(), err, "proposal buffering should pass") // make sure no block is forwarded to hotstuff - cs.hotstuff.AssertNotCalled(cs.T(), "SubmitProposal", model.ProposalFromFlow(&proposal.Block.Header)) + cs.hotstuff.AssertNotCalled(cs.T(), "SubmitProposal", model.SignedProposalFromBlock(proposal)) } // check that we submit each proposal in a valid order @@ -572,29 +613,32 @@ func (cs *CoreSuite) TestProposalBufferingOrder() { calls := 0 // track # of calls to SubmitProposal unprocessed := map[flow.Identifier]struct{}{ // track un-processed proposals - missingProposal.Block.Header.ID(): {}, - proposals[0].Block.Header.ID(): {}, - proposals[1].Block.Header.ID(): {}, - proposals[2].Block.Header.ID(): {}, + missingProposal.Block.ID(): {}, + proposals[0].Block.ID(): {}, + proposals[1].Block.ID(): {}, + proposals[2].Block.ID(): {}, } cs.hotstuff.On("SubmitProposal", mock.Anything).Times(4).Run( func(args mock.Arguments) { - proposal := args.Get(0).(*model.Proposal) + proposal := args.Get(0).(*model.SignedProposal) header := proposal.Block if calls == 0 { // first header processed must be the common parent - assert.Equal(cs.T(), missingProposal.Block.Header.ID(), header.BlockID) + assert.Equal(cs.T(), missingProposal.Block.ID(), header.BlockID) } // mark the proposal as processed delete(unprocessed, header.BlockID) - cs.headerDB[header.BlockID] = model.ProposalToFlow(proposal) + cs.headerDB[header.BlockID] = helper.SignedProposalToFlow(proposal).Header calls++ }, ) cs.voteAggregator.On("AddBlock", mock.Anything).Times(4) // process the root proposal - err := cs.core.OnBlockProposal(originID, missingProposal) + err := cs.core.OnBlockProposal(flow.Slashable[*flow.Proposal]{ + OriginID: originID, + Message: missingProposal, + }) require.NoError(cs.T(), err, "root proposal should pass") // all proposals should be processed diff --git a/engine/consensus/compliance/engine.go b/engine/consensus/compliance/engine.go index 7f24a26c007..1625b7b50a9 100644 --- a/engine/consensus/compliance/engine.go +++ b/engine/consensus/compliance/engine.go @@ -11,7 +11,6 @@ import ( "github.com/onflow/flow-go/engine/common/fifoqueue" "github.com/onflow/flow-go/engine/consensus" "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/model/messages" "github.com/onflow/flow-go/module" "github.com/onflow/flow-go/module/component" "github.com/onflow/flow-go/module/events" @@ -21,7 +20,7 @@ import ( "github.com/onflow/flow-go/storage" ) -// defaultBlockQueueCapacity maximum capacity of inbound queue for `messages.BlockProposal`s +// defaultBlockQueueCapacity maximum capacity of inbound queue for `flow.UntrustedProposal`s const defaultBlockQueueCapacity = 10_000 // Engine is a wrapper around `compliance.Core`. The Engine queues inbound messages, relevant @@ -54,7 +53,7 @@ func NewEngine( core *Core, ) (*Engine, error) { - // Inbound FIFO queue for `messages.BlockProposal`s + // Inbound FIFO queue for `flow.UntrustedProposal`s blocksQueue, err := fifoqueue.NewFifoQueue( defaultBlockQueueCapacity, fifoqueue.WithLengthObserver(func(len int) { core.mempoolMetrics.MempoolEntries(metrics.ResourceBlockProposalQueue, uint(len)) }), @@ -120,9 +119,12 @@ func (e *Engine) processQueuedBlocks(doneSignal <-chan struct{}) error { msg, ok := e.pendingBlocks.Pop() if ok { - batch := msg.(flow.Slashable[[]*messages.BlockProposal]) + batch := msg.(flow.Slashable[[]*flow.Proposal]) for _, block := range batch.Message { - err := e.core.OnBlockProposal(batch.OriginID, block) + err := e.core.OnBlockProposal(flow.Slashable[*flow.Proposal]{ + OriginID: batch.OriginID, + Message: block, + }) e.core.engineMetrics.MessageHandled(metrics.EngineCompliance, metrics.MessageBlockProposal) if err != nil { return fmt.Errorf("could not handle block proposal: %w", err) @@ -137,13 +139,13 @@ func (e *Engine) processQueuedBlocks(doneSignal <-chan struct{}) error { } } -// OnBlockProposal feeds a new block proposal into the processing pipeline. +// OnBlockProposal feeds a new basic structural validated block proposal into the processing pipeline. // Incoming proposals are queued and eventually dispatched by worker. -func (e *Engine) OnBlockProposal(proposal flow.Slashable[*messages.BlockProposal]) { +func (e *Engine) OnBlockProposal(proposal flow.Slashable[*flow.Proposal]) { e.core.engineMetrics.MessageReceived(metrics.EngineCompliance, metrics.MessageBlockProposal) - proposalAsList := flow.Slashable[[]*messages.BlockProposal]{ + proposalAsList := flow.Slashable[[]*flow.Proposal]{ OriginID: proposal.OriginID, - Message: []*messages.BlockProposal{proposal.Message}, + Message: []*flow.Proposal{proposal.Message}, } if e.pendingBlocks.Push(proposalAsList) { e.pendingBlocksNotifier.Notify() @@ -152,10 +154,10 @@ func (e *Engine) OnBlockProposal(proposal flow.Slashable[*messages.BlockProposal } } -// OnSyncedBlocks feeds a batch of blocks obtained via sync into the processing pipeline. +// OnSyncedBlocks feeds a batch of basic structural validated blocks obtained via sync into the processing pipeline. // Blocks in batch aren't required to be in any particular order. // Incoming proposals are queued and eventually dispatched by worker. -func (e *Engine) OnSyncedBlocks(blocks flow.Slashable[[]*messages.BlockProposal]) { +func (e *Engine) OnSyncedBlocks(blocks flow.Slashable[[]*flow.Proposal]) { e.core.engineMetrics.MessageReceived(metrics.EngineCompliance, metrics.MessageSyncedBlocks) if e.pendingBlocks.Push(blocks) { e.pendingBlocksNotifier.Notify() diff --git a/engine/consensus/compliance/engine_test.go b/engine/consensus/compliance/engine_test.go index a82ccc558c7..16aadc89573 100644 --- a/engine/consensus/compliance/engine_test.go +++ b/engine/consensus/compliance/engine_test.go @@ -13,7 +13,6 @@ import ( "github.com/onflow/flow-go/consensus/hotstuff/model" "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/model/messages" "github.com/onflow/flow-go/module/irrecoverable" modulemock "github.com/onflow/flow-go/module/mock" "github.com/onflow/flow-go/utils/unittest" @@ -69,13 +68,13 @@ func (cs *EngineSuite) TestSubmittingMultipleEntries() { go func() { for i := 0; i < blockCount; i++ { block := unittest.BlockWithParentFixture(cs.head) - proposal := messages.NewBlockProposal(block) - hotstuffProposal := model.ProposalFromFlow(block.Header) + proposal := unittest.ProposalFromBlock(block) + hotstuffProposal := model.SignedProposalFromBlock(proposal) cs.hotstuff.On("SubmitProposal", hotstuffProposal).Return().Once() cs.voteAggregator.On("AddBlock", hotstuffProposal).Once() cs.validator.On("ValidateProposal", hotstuffProposal).Return(nil).Once() // execute the block submission - cs.engine.OnBlockProposal(flow.Slashable[*messages.BlockProposal]{ + cs.engine.OnBlockProposal(flow.Slashable[*flow.Proposal]{ OriginID: unittest.IdentifierFixture(), Message: proposal, }) @@ -88,11 +87,11 @@ func (cs *EngineSuite) TestSubmittingMultipleEntries() { block := unittest.BlockWithParentFixture(cs.head) proposal := unittest.ProposalFromBlock(block) - hotstuffProposal := model.ProposalFromFlow(block.Header) + hotstuffProposal := model.SignedProposalFromBlock(proposal) cs.hotstuff.On("SubmitProposal", hotstuffProposal).Return().Once() cs.voteAggregator.On("AddBlock", hotstuffProposal).Once() cs.validator.On("ValidateProposal", hotstuffProposal).Return(nil).Once() - cs.engine.OnBlockProposal(flow.Slashable[*messages.BlockProposal]{ + cs.engine.OnBlockProposal(flow.Slashable[*flow.Proposal]{ OriginID: unittest.IdentifierFixture(), Message: proposal, }) diff --git a/engine/consensus/dkg/doc.go b/engine/consensus/dkg/doc.go index 2c72beabc5a..15ee9e1e06a 100644 --- a/engine/consensus/dkg/doc.go +++ b/engine/consensus/dkg/doc.go @@ -1,54 +1,45 @@ -/* - -Package dkg implements engines for the DKG protocol. - -ReactorEngine - -ReactorEngine implements triggers to control the lifecycle of DKG runs. A new -DKG protocol is started when an EpochSetup event is sealed and finalized. The -subsequent phase transitions are triggered when specified views are encountered -(specifically when the first block of a given view is finalized). In between -phase transitions the engine regularly queries the DKG smart-contract to read -broadcast messages. - -MessagingEngine - -MessagingEngine is a network engine that enables consensus nodes to securely -exchange private DKG messages. Note that broadcast messages are not exchanged -through this engine, but rather via the DKG smart-contract. - -Architecture - -For every new epoch, the ReactorEngine instantiates a new DKGController with a -new Broker using the provided ControllerFactory. The ControllerFactory ties new -DKGControllers to the MessagingEngine via a BrokerTunnel which exposes channels -to relay incoming and outgoing messages (cf. module/dkg). - - EpochSetup/OnView - | - v - +---------------+ - | ReactorEngine | - +---------------+ - | - v -*~~~~~~~~~~~~~~~~~~~~~* (one/epoch) -| +---------------+ | -| | Controller | | -| +---------------+ | -| | | -| v | -| +---------------+ | -| | Broker | | -| +---------------+ | -*~~~~~~~~|~~~~~~~~~\~~* - tunnel smart-contract client - | \ - +--------------+ +------------------+ - | Messaging | | DKGSmartContract | - | Engine | | | - +--------------+ +------------------+ - -*/ - +// Package dkg implements engines for the DKG protocol. +// +// # Reactor Engine +// +// The [ReactorEngine] implements triggers to control the lifecycle of DKG instances. +// A new DKG instance is started when an EpochSetup service event is sealed. +// The subsequent phase transitions are triggered when specified views are encountered. +// Specifically, phase transitions for a view V are triggered when the first block with view ≥V is finalized. +// Between phase transitions, we periodically query the DKG smart-contract ("whiteboard") to read broadcast messages. +// Before transitioning the state machine to the next phase, we query the whiteboard w.r.t. the final view +// of the phase - this ensures all participants eventually observe the same set of messages for each phase. +// +// # Messaging Engine +// +// The [MessagingEngine] is a network engine that enables consensus nodes to securely exchange +// private (not broadcast) DKG messages. Broadcast messages are sent via the DKG smart contract. +// +// # Architecture +// +// In the happy path, one DKG instance runs every epoch. For each DKG instance, the [ReactorEngine] +// instantiates a new, epoch-scoped module.DKGController and module.DKGBroker using the provided dkg.ControllerFactory. +// The dkg.ControllerFactory ties new module.DKGController's to the [MessagingEngine] via a dkg.BrokerTunnel, +// which exposes channels to relay incoming and outgoing messages (see package module/dkg for details). +// +// EpochSetup/EpochCommit/OnView events +// ↓ +// ┏━━━━━━━━━━━━━━━━━┓ +// ┃ ReactorEngine ┃ +// ┗━━━━━━━━━━━━━━━━━┛ +// ↓ +// ┏━━━━━━━━━━━━━━━━━┓ ╮ +// ┃ Controller ┃ │ +// ┗━━━━━━━━━━━━━━━━━┛ │ +// ↓ ┝ Epoch-scoped components +// ┏━━━━━━━━━━━━━━━━━┓ │ +// ┃ Broker ┃ │ +// ┗━━━━━━━━━━━━━━━━━┛ ╯ +// │ │ +// BrokerTunnel DKGContractClient +// ↓ ↓ +// ┏━━━━━━━━━━━━━━┓ ┏━━━━━━━━━━━━━━━━━━┓ +// ┃ Messaging ┃ ┃ FlowDKG smart ┃ +// ┃ Engine ┃ ┃ contract ┃ +// ┗━━━━━━━━━━━━━━┛ ┗━━━━━━━━━━━━━━━━━━┛ package dkg diff --git a/engine/consensus/dkg/messaging_engine.go b/engine/consensus/dkg/messaging_engine.go index 18862110083..576c63bb0e7 100644 --- a/engine/consensus/dkg/messaging_engine.go +++ b/engine/consensus/dkg/messaging_engine.go @@ -70,7 +70,7 @@ var _ component.Component = (*MessagingEngine)(nil) // NewMessagingEngine returns a new MessagingEngine. func NewMessagingEngine( log zerolog.Logger, - net network.Network, + net network.EngineRegistry, me module.Local, tunnel *dkg.BrokerTunnel, collector module.MempoolMetrics, @@ -87,7 +87,7 @@ func NewMessagingEngine( notifier := engine.NewNotifier() messageHandler := engine.NewMessageHandler(log, notifier, engine.Pattern{ - Match: engine.MatchType[*msg.DKGMessage], + Match: engine.MatchType[*flow.DKGMessage], Store: &engine.FifoMessageStore{FifoQueue: inbound}, }) @@ -156,11 +156,11 @@ func (e *MessagingEngine) popNextInboundMessage() (msg.PrivDKGMessageIn, bool) { return msg.PrivDKGMessageIn{}, false } asEngineWrapper := nextMessage.(*engine.Message) - asDKGMsg := asEngineWrapper.Payload.(*msg.DKGMessage) + asDKGMsg := asEngineWrapper.Payload.(*flow.DKGMessage) originID := asEngineWrapper.OriginID message := msg.PrivDKGMessageIn{ - DKGMessage: *asDKGMsg, + DKGMessage: (msg.DKGMessage)(*asDKGMsg), OriginID: originID, } return message, true diff --git a/engine/consensus/dkg/messaging_engine_test.go b/engine/consensus/dkg/messaging_engine_test.go index b3ca1e42ff3..d666f4725ba 100644 --- a/engine/consensus/dkg/messaging_engine_test.go +++ b/engine/consensus/dkg/messaging_engine_test.go @@ -9,13 +9,14 @@ import ( "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" + "github.com/onflow/flow-go/model/flow" msg "github.com/onflow/flow-go/model/messages" "github.com/onflow/flow-go/module/dkg" "github.com/onflow/flow-go/module/irrecoverable" "github.com/onflow/flow-go/module/metrics" mockmodule "github.com/onflow/flow-go/module/mock" "github.com/onflow/flow-go/network/channels" - "github.com/onflow/flow-go/network/mocknetwork" + mocknetwork "github.com/onflow/flow-go/network/mock" "github.com/onflow/flow-go/utils/unittest" ) @@ -24,7 +25,7 @@ type MessagingEngineSuite struct { suite.Suite conduit *mocknetwork.Conduit - network *mocknetwork.Network + network *mocknetwork.EngineRegistry me *mockmodule.Local engine *MessagingEngine @@ -37,7 +38,7 @@ func TestMessagingEngine(t *testing.T) { func (ms *MessagingEngineSuite) SetupTest() { // setup mock conduit ms.conduit = mocknetwork.NewConduit(ms.T()) - ms.network = mocknetwork.NewNetwork(ms.T()) + ms.network = mocknetwork.NewEngineRegistry(ms.T()) ms.network.On("Register", mock.Anything, mock.Anything). Return(ms.conduit, nil). Once() @@ -68,10 +69,10 @@ func (ms *MessagingEngineSuite) TestForwardOutgoingMessages() { // expected DKGMessage destinationID := unittest.IdentifierFixture() - expectedMsg := msg.NewDKGMessage( - []byte("hello"), - "dkg-123", - ) + expectedMsg := msg.DKGMessage{ + Data: []byte("hello"), + DKGInstanceID: "dkg-123", + } done := make(chan struct{}) ms.conduit.On("Unicast", &expectedMsg, destinationID). @@ -95,8 +96,9 @@ func (ms *MessagingEngineSuite) TestForwardIncomingMessages() { defer cancel() originID := unittest.IdentifierFixture() + dkgMessage := flow.DKGMessage{Data: []byte("hello"), DKGInstanceID: "dkg-123"} expectedMsg := msg.PrivDKGMessageIn{ - DKGMessage: msg.NewDKGMessage([]byte("hello"), "dkg-123"), + DKGMessage: (msg.DKGMessage)(dkgMessage), OriginID: originID, } @@ -108,7 +110,7 @@ func (ms *MessagingEngineSuite) TestForwardIncomingMessages() { close(done) }() - err := ms.engine.Process(channels.DKGCommittee, originID, &expectedMsg.DKGMessage) + err := ms.engine.Process(channels.DKGCommittee, originID, &dkgMessage) require.NoError(ms.T(), err) unittest.RequireCloseBefore(ms.T(), done, time.Second, "message not received") diff --git a/engine/consensus/dkg/reactor_engine.go b/engine/consensus/dkg/reactor_engine.go index 1704483ef48..0d915a57ab1 100644 --- a/engine/consensus/dkg/reactor_engine.go +++ b/engine/consensus/dkg/reactor_engine.go @@ -5,9 +5,9 @@ import ( "errors" "fmt" + "github.com/onflow/crypto" "github.com/rs/zerolog" - "github.com/onflow/flow-go/crypto" "github.com/onflow/flow-go/engine" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/model/flow/filter" @@ -24,7 +24,7 @@ const DefaultPollStep = 10 // dkgInfo consolidates information about the current DKG protocol instance. type dkgInfo struct { - identities flow.IdentityList + identities flow.IdentitySkeletonList phase1FinalView uint64 phase2FinalView uint64 phase3FinalView uint64 @@ -84,20 +84,21 @@ func (e *ReactorEngine) Ready() <-chan struct{} { // and fail this epoch's DKG. snap := e.State.Final() - phase, err := snap.Phase() + phase, err := snap.EpochPhase() if err != nil { // unexpected storage-level error // TODO use irrecoverable context e.log.Fatal().Err(err).Msg("failed to check epoch phase when starting DKG reactor engine") return } - currentCounter, err := snap.Epochs().Current().Counter() + epoch, err := snap.Epochs().Current() if err != nil { // unexpected storage-level error // TODO use irrecoverable context - e.log.Fatal().Err(err).Msg("failed to retrieve current epoch counter when starting DKG reactor engine") + e.log.Fatal().Err(err).Msg("failed to retrieve current epoch when starting DKG reactor engine") return } + currentCounter := epoch.Counter() first, err := snap.Head() if err != nil { // unexpected storage-level error @@ -110,7 +111,7 @@ func (e *ReactorEngine) Ready() <-chan struct{} { if phase == flow.EpochPhaseSetup { e.startDKGForEpoch(currentCounter, first) } else if phase == flow.EpochPhaseCommitted { - // If we start up in EpochCommitted phase, ensure the DKG end state is set correctly. + // If we start up in EpochCommitted phase, ensure the DKG current state is set correctly. e.handleEpochCommittedPhaseStarted(currentCounter, first) } }) @@ -155,7 +156,7 @@ func (e *ReactorEngine) startDKGForEpoch(currentEpochCounter uint64, first *flow Logger() // if we have started the dkg for this epoch already, exit - started, err := e.dkgState.GetDKGStarted(nextEpochCounter) + started, err := e.dkgState.IsDKGStarted(nextEpochCounter) if err != nil { // unexpected storage-level error // TODO use irrecoverable context @@ -167,11 +168,11 @@ func (e *ReactorEngine) startDKGForEpoch(currentEpochCounter uint64, first *flow } // flag that we are starting the dkg for this epoch - err = e.dkgState.SetDKGStarted(nextEpochCounter) + err = e.dkgState.SetDKGState(nextEpochCounter, flow.DKGStateStarted) if err != nil { // unexpected storage-level error // TODO use irrecoverable context - log.Fatal().Err(err).Msg("could not set dkg started") + log.Fatal().Err(err).Msg("could not transition DKG state machine into state DKGStateStarted") } curDKGInfo, err := e.getDKGInfo(firstID) @@ -181,7 +182,7 @@ func (e *ReactorEngine) startDKGForEpoch(currentEpochCounter uint64, first *flow log.Fatal().Err(err).Msg("could not retrieve epoch info") } - committee := curDKGInfo.identities.Filter(filter.IsVotingConsensusCommitteeMember) + committee := curDKGInfo.identities.Filter(filter.IsConsensusCommitteeMember) log.Info(). Uint64("phase1", curDKGInfo.phase1FinalView). @@ -246,14 +247,17 @@ func (e *ReactorEngine) startDKGForEpoch(currentEpochCounter uint64, first *flow // handleEpochCommittedPhaseStarted is invoked upon the transition to the EpochCommitted // phase, when the canonical beacon key vector is incorporated into the protocol state. +// Alternatively we invoke this function preemptively on startup if we are in the +// EpochCommitted Phase, in case the `EpochCommittedPhaseStarted` event was missed +// due to a crash. // // This function checks that the local DKG completed and that our locally computed // key share is consistent with the canonical key vector. When this function returns, -// an end state for the just-completed DKG is guaranteed to be stored (if not, the +// the current state for the just-completed DKG is guaranteed to be stored (if not, the // program will crash). Since this function is invoked synchronously before the end // of the current epoch, this guarantees that when we reach the end of the current epoch -// we will either have a usable beacon key (successful DKG) or a DKG failure end state -// stored, so we can safely fall back to using our staking key. +// we will either have a usable beacon key committed (state [flow.RandomBeaconKeyCommitted]) +// or we persist [flow.DKGStateFailure], so we can safely fall back to using our staking key. // // CAUTION: This function is not safe for concurrent use. This is not enforced within // the ReactorEngine - instead we rely on the protocol event emission being single-threaded @@ -267,35 +271,58 @@ func (e *ReactorEngine) handleEpochCommittedPhaseStarted(currentEpochCounter uin Uint64("next_epoch", nextEpochCounter). // the epoch the just-finished DKG was preparing for Logger() - // Check whether we have already set the end state for this DKG. + // Check whether we have already set the current state for this DKG. // This can happen if the DKG failed locally, if we failed to generate // a local private beacon key, or if we crashed while performing this // check previously. - endState, err := e.dkgState.GetDKGEndState(nextEpochCounter) - if err == nil { - log.Warn().Msgf("checking beacon key consistency: exiting because dkg end state was already set: %s", endState.String()) + currentState, err := e.dkgState.GetDKGState(nextEpochCounter) + if err != nil { + if errors.Is(err, storage.ErrNotFound) { + log.Warn().Msg("failed to get dkg state, assuming this node has skipped epoch setup phase") + } else { + log.Fatal().Err(err).Msg("failed to get dkg state") + } + + return + } + // (i) if I have a key (currentState == flow.DKGStateCompleted) which is consistent with the EpochCommit service event, + // then commit the key or (ii) if the key is already committed (currentState == flow.RandomBeaconKeyCommitted), then we + // expect it to be consistent with the EpochCommit service event. While (ii) is a sanity check, we have a severe problem + // if it is violated, because a node signing with an invalid Random beacon key will be slashed - so we better check! + // Our logic for committing a key is idempotent: it is a no-op when stating that a key `k` should be committed that previously + // has already been committed; while it errors if `k` is different from the previously-committed key. In other words, the + // sanity check (ii) is already included in the happy-path logic for (i). So we just repeat the happy-path logic also for + // currentState == flow.RandomBeaconKeyCommitted, because repeated calls only occur due to node crashes, which are rare. + if currentState != flow.DKGStateCompleted && currentState != flow.RandomBeaconKeyCommitted { + log.Warn().Msgf("checking beacon key consistency after EpochCommit: exiting because dkg didn't reach completed state: %s", currentState.String()) return } // Since epoch phase transitions are emitted when the first block of the new // phase is finalized, the block's snapshot is guaranteed to already be - // accessible in the protocol state at this point (even though the Badger - // transaction finalizing the block has not been committed yet). - nextDKG, err := e.State.AtBlockID(firstBlock.ID()).Epochs().Next().DKG() + // accessible in the protocol state at this point + snapshot := e.State.AtBlockID(firstBlock.ID()) + nextEpoch, err := snapshot.Epochs().NextCommitted() + if err != nil { + // CAUTION: this should never happen, indicates a storage failure or state corruption + // TODO use irrecoverable context + log.Fatal().Err(err).Msg("checking beacon key consistency: could not get next committed epoch") + } + nextDKG, err := nextEpoch.DKG() if err != nil { - // CAUTION: this should never happen, indicates a storage failure or corruption + // CAUTION: this should never happen, indicates a storage failure or state corruption // TODO use irrecoverable context log.Fatal().Err(err).Msg("checking beacon key consistency: could not retrieve next DKG info") return } - myBeaconPrivKey, err := e.dkgState.RetrieveMyBeaconPrivateKey(nextEpochCounter) + myBeaconPrivKey, err := e.dkgState.UnsafeRetrieveMyBeaconPrivateKey(nextEpochCounter) if errors.Is(err, storage.ErrNotFound) { log.Warn().Msg("checking beacon key consistency: no key found") - err := e.dkgState.SetDKGEndState(nextEpochCounter, flow.DKGEndStateNoKey) + err := e.dkgState.SetDKGState(nextEpochCounter, flow.DKGStateFailure) if err != nil { // TODO use irrecoverable context - log.Fatal().Err(err).Msg("failed to set dkg end state") + log.Fatal().Err(err).Msg("failed to set dkg state") } return } else if err != nil { @@ -312,53 +339,62 @@ func (e *ReactorEngine) handleEpochCommittedPhaseStarted(currentEpochCounter uin } localPubKey := myBeaconPrivKey.PublicKey() - // we computed a local beacon key but it is inconsistent with our canonical + // we computed a local beacon key, but it is inconsistent with our canonical // public key - therefore it is unsafe for use if !nextDKGPubKey.Equals(localPubKey) { log.Warn(). Str("computed_beacon_pub_key", localPubKey.String()). Str("canonical_beacon_pub_key", nextDKGPubKey.String()). Msg("checking beacon key consistency: locally computed beacon public key does not match beacon public key for next epoch") - err := e.dkgState.SetDKGEndState(nextEpochCounter, flow.DKGEndStateInconsistentKey) + err := e.dkgState.SetDKGState(nextEpochCounter, flow.DKGStateFailure) if err != nil { // TODO use irrecoverable context - log.Fatal().Err(err).Msg("failed to set dkg end state") + log.Fatal().Err(err).Msg("failed to set dkg current state") } return } - err = e.dkgState.SetDKGEndState(nextEpochCounter, flow.DKGEndStateSuccess) + epochProtocolState, err := snapshot.EpochProtocolState() if err != nil { // TODO use irrecoverable context - e.log.Fatal().Err(err).Msg("failed to set dkg end state") + log.Fatal().Err(err).Msg("failed to retrieve epoch protocol state") + return + } + err = e.dkgState.CommitMyBeaconPrivateKey(nextEpochCounter, epochProtocolState.Entry().NextEpochCommit) + if err != nil { + // TODO use irrecoverable context + e.log.Fatal().Err(err).Msg("failed to set dkg current state") } log.Info().Msgf("successfully ended DKG, my beacon pub key for epoch %d is %s", nextEpochCounter, localPubKey) } -// TODO document error returns +// getDKGInfo returns the information required to initiate the DKG for the current epoch. +// firstBlockID must be the first block of the EpochSetup phase. This is one of the few places +// where we have to use the configuration for a future epoch that has not yet been committed. +// CAUTION: the epoch transition might not happen as described here! +// No errors are expected during normal operation. func (e *ReactorEngine) getDKGInfo(firstBlockID flow.Identifier) (*dkgInfo, error) { - currEpoch := e.State.AtBlockID(firstBlockID).Epochs().Current() - nextEpoch := e.State.AtBlockID(firstBlockID).Epochs().Next() - - identities, err := nextEpoch.InitialIdentities() + epochsAtBlock := e.State.AtBlockID(firstBlockID).Epochs() + currEpoch, err := epochsAtBlock.Current() if err != nil { - return nil, fmt.Errorf("could not retrieve epoch identities: %w", err) + return nil, fmt.Errorf("could not retrieve current epoch: %w", err) } - phase1Final, phase2Final, phase3Final, err := protocol.DKGPhaseViews(currEpoch) + nextEpoch, err := epochsAtBlock.NextUnsafe() if err != nil { - return nil, fmt.Errorf("could not retrieve epoch dkg final views: %w", err) + return nil, fmt.Errorf("could not retrieve next epoch: %w", err) } - seed := make([]byte, crypto.SeedMinLenDKG) + + seed := make([]byte, crypto.KeyGenSeedMinLen) _, err = rand.Read(seed) if err != nil { return nil, fmt.Errorf("could not generate random seed: %w", err) } info := &dkgInfo{ - identities: identities, - phase1FinalView: phase1Final, - phase2FinalView: phase2Final, - phase3FinalView: phase3Final, + identities: nextEpoch.InitialIdentities(), + phase1FinalView: currEpoch.DKGPhase1FinalView(), + phase2FinalView: currEpoch.DKGPhase2FinalView(), + phase3FinalView: currEpoch.DKGPhase3FinalView(), seed: seed, } return info, nil @@ -421,20 +457,37 @@ func (e *ReactorEngine) end(nextEpochCounter uint64) func() error { err := e.controller.End() if crypto.IsDKGFailureError(err) { + // Failing to complete the DKG protocol is a rare but expected scenario, which we must handle. + // By convention, if we are leaving the happy path, we want to persist the _first_ failure symptom + // in the `dkgState`. If the write yields a [storage.InvalidDKGStateTransitionError], it means that the state machine + // is in the terminal state([flow.RandomBeaconKeyCommitted]) as all other transitions(even to [flow.DKGStateFailure] -> [flow.DKGStateFailure]) + // are allowed. If the protocol is in terminal state, and we have a failure symptom, then it means that recovery has happened + // before ending the DKG. In this case, we want to ignore the error and return without error. e.log.Warn().Err(err).Msgf("node %s with index %d failed DKG locally", e.me.NodeID(), e.controller.GetIndex()) - err := e.dkgState.SetDKGEndState(nextEpochCounter, flow.DKGEndStateDKGFailure) + err := e.dkgState.SetDKGState(nextEpochCounter, flow.DKGStateFailure) if err != nil { - return fmt.Errorf("failed to set dkg end state following dkg end error: %w", err) + if storage.IsInvalidDKGStateTransitionError(err) { + return nil + } + return fmt.Errorf("failed to set dkg current state following dkg end error: %w", err) } + return nil // local DKG protocol has failed (the expected scenario) } else if err != nil { return fmt.Errorf("unknown error ending the dkg: %w", err) } + // The following only implements the happy path, which is an atomic step-by-step progression + // along a single path in the `dkgState` machine. If the write yields a `storage.ErrAlreadyExists`, + // we know the overall protocol has already abandoned the happy path, because on the happy path + // ReactorEngine is the only writer. Then this function just stops and returns without error. privateShare, _, _ := e.controller.GetArtifacts() if privateShare != nil { // we only store our key if one was computed err = e.dkgState.InsertMyBeaconPrivateKey(nextEpochCounter, privateShare) if err != nil { + if errors.Is(err, storage.ErrAlreadyExists) { + return nil // the beacon key already existing is expected in case of epoch recovery + } return fmt.Errorf("could not save beacon private key in db: %w", err) } } diff --git a/engine/consensus/dkg/reactor_engine_test.go b/engine/consensus/dkg/reactor_engine_test.go index 48e2707188d..e28c291f20f 100644 --- a/engine/consensus/dkg/reactor_engine_test.go +++ b/engine/consensus/dkg/reactor_engine_test.go @@ -12,7 +12,8 @@ import ( "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" - "github.com/onflow/flow-go/crypto" + "github.com/onflow/crypto" + "github.com/onflow/flow-go/engine/consensus/dkg" "github.com/onflow/flow-go/model/flow" dkgmodule "github.com/onflow/flow-go/module/dkg" @@ -51,8 +52,8 @@ type ReactorEngineSuite_SetupPhase struct { logger zerolog.Logger local *module.Local - currentEpoch *protocol.Epoch - nextEpoch *protocol.Epoch + currentEpoch *protocol.CommittedEpoch + nextEpoch *protocol.TentativeEpoch epochQuery *mocks.EpochQuery snapshot *protocol.Snapshot state *protocol.State @@ -113,21 +114,21 @@ func (suite *ReactorEngineSuite_SetupPhase) SetupTest() { // expectedPrivKey is the expected private share produced by the dkg run. We // will mock the controller to return this value, and we will check it // against the value that gets inserted in the DB at the end. - suite.expectedPrivateKey = unittest.PrivateKeyFixture(crypto.BLSBLS12381, 48) + suite.expectedPrivateKey = unittest.PrivateKeyFixture(crypto.BLSBLS12381) // mock protocol state - suite.currentEpoch = new(protocol.Epoch) - suite.currentEpoch.On("Counter").Return(suite.epochCounter, nil) - suite.currentEpoch.On("DKGPhase1FinalView").Return(suite.dkgPhase1FinalView, nil) - suite.currentEpoch.On("DKGPhase2FinalView").Return(suite.dkgPhase2FinalView, nil) - suite.currentEpoch.On("DKGPhase3FinalView").Return(suite.dkgPhase3FinalView, nil) - suite.nextEpoch = new(protocol.Epoch) - suite.nextEpoch.On("Counter").Return(suite.NextEpochCounter(), nil) - suite.nextEpoch.On("InitialIdentities").Return(suite.committee, nil) + suite.currentEpoch = new(protocol.CommittedEpoch) + suite.currentEpoch.On("Counter").Return(suite.epochCounter) + suite.currentEpoch.On("DKGPhase1FinalView").Return(suite.dkgPhase1FinalView) + suite.currentEpoch.On("DKGPhase2FinalView").Return(suite.dkgPhase2FinalView) + suite.currentEpoch.On("DKGPhase3FinalView").Return(suite.dkgPhase3FinalView) + suite.nextEpoch = new(protocol.TentativeEpoch) + suite.nextEpoch.On("Counter").Return(suite.NextEpochCounter()) + suite.nextEpoch.On("InitialIdentities").Return(suite.committee.ToSkeleton()) suite.epochQuery = mocks.NewEpochQuery(suite.T(), suite.epochCounter) - suite.epochQuery.Add(suite.currentEpoch) - suite.epochQuery.Add(suite.nextEpoch) + suite.epochQuery.AddCommitted(suite.currentEpoch) + suite.epochQuery.AddTentative(suite.nextEpoch) suite.snapshot = new(protocol.Snapshot) suite.snapshot.On("Epochs").Return(suite.epochQuery) suite.snapshot.On("Head").Return(suite.firstBlock, nil) @@ -138,7 +139,7 @@ func (suite *ReactorEngineSuite_SetupPhase) SetupTest() { // ensure that an attempt is made to insert the expected dkg private share // for the next epoch. suite.dkgState = new(storage.DKGState) - suite.dkgState.On("SetDKGStarted", suite.NextEpochCounter()).Return(nil).Once() + suite.dkgState.On("SetDKGState", suite.NextEpochCounter(), flow.DKGStateStarted).Return(nil).Once() suite.dkgState.On("InsertMyBeaconPrivateKey", mock.Anything, mock.Anything).Run( func(args mock.Arguments) { epochCounter := args.Get(0).(uint64) @@ -162,7 +163,7 @@ func (suite *ReactorEngineSuite_SetupPhase) SetupTest() { suite.factory = new(module.DKGControllerFactory) suite.factory.On("Create", dkgmodule.CanonicalInstanceID(suite.firstBlock.ChainID, suite.NextEpochCounter()), - suite.committee, + suite.committee.ToSkeleton(), mock.Anything, ).Return(suite.controller, nil) @@ -185,7 +186,7 @@ func (suite *ReactorEngineSuite_SetupPhase) SetupTest() { func (suite *ReactorEngineSuite_SetupPhase) TestRunDKG_PhaseTransition() { // the dkg for this epoch has not been started - suite.dkgState.On("GetDKGStarted", suite.NextEpochCounter()).Return(false, nil).Once() + suite.dkgState.On("IsDKGStarted", suite.NextEpochCounter()).Return(false, nil).Once() // protocol event indicating the setup phase is starting suite.engine.EpochSetupPhaseStarted(suite.epochCounter, suite.firstBlock) @@ -208,9 +209,9 @@ func (suite *ReactorEngineSuite_SetupPhase) TestRunDKG_PhaseTransition() { func (suite *ReactorEngineSuite_SetupPhase) TestRunDKG_StartupInSetupPhase() { // we are in the EpochSetup phase - suite.snapshot.On("Phase").Return(flow.EpochPhaseSetup, nil).Once() + suite.snapshot.On("EpochPhase").Return(flow.EpochPhaseSetup, nil).Once() // the dkg for this epoch has not been started - suite.dkgState.On("GetDKGStarted", suite.NextEpochCounter()).Return(false, nil).Once() + suite.dkgState.On("IsDKGStarted", suite.NextEpochCounter()).Return(false, nil).Once() // start up the engine unittest.AssertClosesBefore(suite.T(), suite.engine.Ready(), time.Second) @@ -237,9 +238,9 @@ func (suite *ReactorEngineSuite_SetupPhase) TestRunDKG_StartupInSetupPhase() { func (suite *ReactorEngineSuite_SetupPhase) TestRunDKG_StartupInSetupPhase_DKGAlreadyStarted() { // we are in the EpochSetup phase - suite.snapshot.On("Phase").Return(flow.EpochPhaseSetup, nil).Once() + suite.snapshot.On("EpochPhase").Return(flow.EpochPhaseSetup, nil).Once() // the dkg for this epoch has been started - suite.dkgState.On("GetDKGStarted", suite.NextEpochCounter()).Return(true, nil).Once() + suite.dkgState.On("IsDKGStarted", suite.NextEpochCounter()).Return(true, nil).Once() // start up the engine unittest.AssertClosesBefore(suite.T(), suite.engine.Ready(), time.Second) @@ -265,7 +266,7 @@ type ReactorEngineSuite_CommittedPhase struct { epochCounter uint64 // current epoch counter myLocalBeaconKey crypto.PrivateKey // my locally computed beacon key myGlobalBeaconPubKey crypto.PublicKey // my public key, as dictated by global DKG - dkgEndState flow.DKGEndState // backend for DGKState. + DKGState flow.DKGState // backend for DGKState. firstBlock *flow.Header // first block of EpochCommitted phase warnsLogged int // count # of warn-level logs @@ -289,7 +290,7 @@ func (suite *ReactorEngineSuite_CommittedPhase) NextEpochCounter() uint64 { func (suite *ReactorEngineSuite_CommittedPhase) SetupTest() { suite.epochCounter = rand.Uint64() - suite.dkgEndState = flow.DKGEndStateUnknown + suite.DKGState = flow.DKGStateCompleted // we start with the completed state since we are going to test the transition to committed suite.me = new(module.Local) id := unittest.IdentifierFixture() @@ -300,7 +301,7 @@ func (suite *ReactorEngineSuite_CommittedPhase) SetupTest() { suite.myGlobalBeaconPubKey = suite.myLocalBeaconKey.PublicKey() suite.dkgState = new(storage.DKGState) - suite.dkgState.On("RetrieveMyBeaconPrivateKey", suite.NextEpochCounter()).Return( + suite.dkgState.On("UnsafeRetrieveMyBeaconPrivateKey", suite.NextEpochCounter()).Return( func(_ uint64) crypto.PrivateKey { return suite.myLocalBeaconKey }, func(_ uint64) error { if suite.myLocalBeaconKey == nil { @@ -309,24 +310,24 @@ func (suite *ReactorEngineSuite_CommittedPhase) SetupTest() { return nil }, ) - suite.dkgState.On("SetDKGEndState", suite.NextEpochCounter(), mock.Anything). + suite.dkgState.On("SetDKGState", suite.NextEpochCounter(), mock.Anything). Run(func(args mock.Arguments) { - assert.Equal(suite.T(), flow.DKGEndStateUnknown, suite.dkgEndState) // must be unset - endState := args[1].(flow.DKGEndState) - suite.dkgEndState = endState + assert.Equal(suite.T(), flow.DKGStateCompleted, suite.DKGState) // must be equal to the initial state of the test + endState := args[1].(flow.DKGState) + suite.DKGState = endState }). Return(nil) - suite.dkgState.On("GetDKGEndState", suite.NextEpochCounter()).Return( - func(_ uint64) flow.DKGEndState { return suite.dkgEndState }, + suite.dkgState.On("GetDKGState", suite.NextEpochCounter()).Return( + func(_ uint64) flow.DKGState { return suite.DKGState }, func(_ uint64) error { - if suite.dkgEndState == flow.DKGEndStateUnknown { + if suite.DKGState == flow.DKGStateUninitialized { return storerr.ErrNotFound } return nil }, ) - currentEpoch := new(protocol.Epoch) + currentEpoch := new(protocol.CommittedEpoch) currentEpoch.On("Counter").Return(suite.epochCounter, nil) nextDKG := new(protocol.DKG) @@ -335,13 +336,13 @@ func (suite *ReactorEngineSuite_CommittedPhase) SetupTest() { func(_ flow.Identifier) error { return nil }, ) - nextEpoch := new(protocol.Epoch) + nextEpoch := new(protocol.CommittedEpoch) nextEpoch.On("Counter").Return(suite.NextEpochCounter(), nil) nextEpoch.On("DKG").Return(nextDKG, nil) epochQuery := mocks.NewEpochQuery(suite.T(), suite.epochCounter) - epochQuery.Add(currentEpoch) - epochQuery.Add(nextEpoch) + epochQuery.AddCommitted(currentEpoch) + epochQuery.AddCommitted(nextEpoch) firstBlock := unittest.BlockHeaderFixture(unittest.HeaderWithView(100)) suite.firstBlock = firstBlock @@ -377,11 +378,18 @@ func (suite *ReactorEngineSuite_CommittedPhase) SetupTest() { // * set the DKG end state to Success func (suite *ReactorEngineSuite_CommittedPhase) TestDKGSuccess() { - // no change to suite - this is the happy path - + entry := unittest.EpochStateFixture(unittest.WithNextEpochProtocolState(), func(entry *flow.RichEpochStateEntry) { + entry.NextEpochCommit.Counter = suite.NextEpochCounter() + entry.NextEpoch.CommitID = entry.NextEpochCommit.ID() + }) + epochProtocolState := protocol.NewEpochProtocolState(suite.T()) + epochProtocolState.On("Entry").Return(entry) + suite.snap.On("EpochProtocolState").Return(epochProtocolState, nil) + suite.dkgState.On("CommitMyBeaconPrivateKey", suite.NextEpochCounter(), entry.NextEpochCommit).Return(nil).Once() suite.engine.EpochCommittedPhaseStarted(suite.epochCounter, suite.firstBlock) suite.Require().Equal(0, suite.warnsLogged) - suite.Assert().Equal(flow.DKGEndStateSuccess, suite.dkgEndState) + // ensure we commit my beacon private key + suite.dkgState.AssertCalled(suite.T(), "CommitMyBeaconPrivateKey", suite.NextEpochCounter(), entry.NextEpochCommit) } // TestInconsistentKey tests the path where we are checking the global DKG @@ -396,7 +404,7 @@ func (suite *ReactorEngineSuite_CommittedPhase) TestInconsistentKey() { suite.engine.EpochCommittedPhaseStarted(suite.epochCounter, suite.firstBlock) suite.Require().Equal(1, suite.warnsLogged) - suite.Assert().Equal(flow.DKGEndStateInconsistentKey, suite.dkgEndState) + suite.Assert().Equal(flow.DKGStateFailure, suite.DKGState) } // TestMissingKey tests the path where we are checking the global DKG results @@ -411,7 +419,7 @@ func (suite *ReactorEngineSuite_CommittedPhase) TestMissingKey() { suite.engine.EpochCommittedPhaseStarted(suite.epochCounter, suite.firstBlock) suite.Require().Equal(1, suite.warnsLogged) - suite.Assert().Equal(flow.DKGEndStateNoKey, suite.dkgEndState) + suite.Assert().Equal(flow.DKGStateFailure, suite.DKGState) } // TestLocalDKGFailure tests the path where we are checking the global DKG @@ -422,11 +430,11 @@ func (suite *ReactorEngineSuite_CommittedPhase) TestMissingKey() { func (suite *ReactorEngineSuite_CommittedPhase) TestLocalDKGFailure() { // set dkg end state as failure - suite.dkgEndState = flow.DKGEndStateDKGFailure + suite.DKGState = flow.DKGStateFailure suite.engine.EpochCommittedPhaseStarted(suite.epochCounter, suite.firstBlock) suite.Require().Equal(1, suite.warnsLogged) - suite.Assert().Equal(flow.DKGEndStateDKGFailure, suite.dkgEndState) + suite.Assert().Equal(flow.DKGStateFailure, suite.DKGState) } // TestStartupInCommittedPhase_DKGSuccess tests that the dkg end state is correctly @@ -434,10 +442,19 @@ func (suite *ReactorEngineSuite_CommittedPhase) TestLocalDKGFailure() { func (suite *ReactorEngineSuite_CommittedPhase) TestStartupInCommittedPhase_DKGSuccess() { // we are in the EpochSetup phase - suite.snap.On("Phase").Return(flow.EpochPhaseCommitted, nil).Once() + suite.snap.On("EpochPhase").Return(flow.EpochPhaseCommitted, nil).Once() // the dkg for this epoch has been started but not ended - suite.dkgState.On("GetDKGStarted", suite.NextEpochCounter()).Return(true, nil).Once() - suite.dkgState.On("GetDKGEndState", suite.NextEpochCounter()).Return(flow.DKGEndStateUnknown, storerr.ErrNotFound).Once() + suite.dkgState.On("IsDKGStarted", suite.NextEpochCounter()).Return(true, nil).Once() + suite.DKGState = flow.DKGStateCompleted + + entry := unittest.EpochStateFixture(unittest.WithNextEpochProtocolState(), func(entry *flow.RichEpochStateEntry) { + entry.NextEpochCommit.Counter = suite.NextEpochCounter() + entry.NextEpoch.CommitID = entry.NextEpochCommit.ID() + }) + epochProtocolState := protocol.NewEpochProtocolState(suite.T()) + epochProtocolState.On("Entry").Return(entry) + suite.snap.On("EpochProtocolState").Return(epochProtocolState, nil) + suite.dkgState.On("CommitMyBeaconPrivateKey", suite.NextEpochCounter(), entry.NextEpochCommit).Return(nil).Once() // start up the engine unittest.AssertClosesBefore(suite.T(), suite.engine.Ready(), time.Second) @@ -448,19 +465,19 @@ func (suite *ReactorEngineSuite_CommittedPhase) TestStartupInCommittedPhase_DKGS mock.Anything, mock.Anything, ) - // should set DKG end state - suite.Assert().Equal(flow.DKGEndStateSuccess, suite.dkgEndState) + // ensure we commit my beacon private key + suite.dkgState.AssertCalled(suite.T(), "CommitMyBeaconPrivateKey", suite.NextEpochCounter(), entry.NextEpochCommit) } // TestStartupInCommittedPhase_DKGSuccess tests that the dkg end state is correctly // set when starting in EpochCommitted phase and the DKG end state is already set. -func (suite *ReactorEngineSuite_CommittedPhase) TestStartupInCommittedPhase_DKGEndStateAlreadySet() { +func (suite *ReactorEngineSuite_CommittedPhase) TestStartupInCommittedPhase_DKGStateAlreadySet() { - // we are in the EpochSetup phase - suite.snap.On("Phase").Return(flow.EpochPhaseCommitted, nil).Once() + // we are in the Epoch Commit phase + suite.snap.On("EpochPhase").Return(flow.EpochPhaseCommitted, nil).Once() // the dkg for this epoch has been started and ended - suite.dkgState.On("GetDKGStarted", suite.NextEpochCounter()).Return(true, nil).Once() - suite.dkgState.On("GetDKGEndState", suite.NextEpochCounter()).Return(flow.DKGEndStateNoKey, nil).Once() + suite.dkgState.On("IsDKGStarted", suite.NextEpochCounter()).Return(true, nil).Once() + suite.DKGState = flow.DKGStateFailure // start up the engine unittest.AssertClosesBefore(suite.T(), suite.engine.Ready(), time.Second) @@ -478,10 +495,10 @@ func (suite *ReactorEngineSuite_CommittedPhase) TestStartupInCommittedPhase_DKGE func (suite *ReactorEngineSuite_CommittedPhase) TestStartupInCommittedPhase_InconsistentKey() { // we are in the EpochSetup phase - suite.snap.On("Phase").Return(flow.EpochPhaseCommitted, nil).Once() + suite.snap.On("EpochPhase").Return(flow.EpochPhaseCommitted, nil).Once() // the dkg for this epoch has been started but not ended - suite.dkgState.On("GetDKGStarted", suite.NextEpochCounter()).Return(true, nil).Once() - suite.dkgState.On("GetDKGEndState", suite.NextEpochCounter()).Return(flow.DKGEndStateUnknown, storerr.ErrNotFound).Once() + suite.dkgState.On("IsDKGStarted", suite.NextEpochCounter()).Return(true, nil).Once() + suite.dkgState.On("GetDKGState", suite.NextEpochCounter()).Return(flow.DKGStateUninitialized, storerr.ErrNotFound).Once() // set our global pub key to a random value suite.myGlobalBeaconPubKey = unittest.RandomBeaconPriv().PublicKey() @@ -496,7 +513,7 @@ func (suite *ReactorEngineSuite_CommittedPhase) TestStartupInCommittedPhase_Inco mock.Anything, ) // should set DKG end state - suite.Assert().Equal(flow.DKGEndStateInconsistentKey, suite.dkgEndState) + suite.Assert().Equal(flow.DKGStateFailure, suite.DKGState) } // TestStartupInCommittedPhase_MissingKey tests that the dkg end state is correctly @@ -504,10 +521,10 @@ func (suite *ReactorEngineSuite_CommittedPhase) TestStartupInCommittedPhase_Inco func (suite *ReactorEngineSuite_CommittedPhase) TestStartupInCommittedPhase_MissingKey() { // we are in the EpochSetup phase - suite.snap.On("Phase").Return(flow.EpochPhaseCommitted, nil).Once() + suite.snap.On("EpochPhase").Return(flow.EpochPhaseCommitted, nil).Once() // the dkg for this epoch has been started but not ended - suite.dkgState.On("GetDKGStarted", suite.NextEpochCounter()).Return(true, nil).Once() - suite.dkgState.On("GetDKGEndState", suite.NextEpochCounter()).Return(flow.DKGEndStateUnknown, storerr.ErrNotFound).Once() + suite.dkgState.On("IsDKGStarted", suite.NextEpochCounter()).Return(true, nil).Once() + suite.dkgState.On("GetDKGState", suite.NextEpochCounter()).Return(flow.DKGStateUninitialized, storerr.ErrNotFound).Once() // remove our key suite.myLocalBeaconKey = nil @@ -522,7 +539,7 @@ func (suite *ReactorEngineSuite_CommittedPhase) TestStartupInCommittedPhase_Miss mock.Anything, ) // should set DKG end state - suite.Assert().Equal(flow.DKGEndStateNoKey, suite.dkgEndState) + suite.Assert().Equal(flow.DKGStateFailure, suite.DKGState) } // utility function to track the number of warn-level calls to a logger diff --git a/engine/consensus/ingestion/core.go b/engine/consensus/ingestion/core.go index abe7e1ca420..b844f963341 100644 --- a/engine/consensus/ingestion/core.go +++ b/engine/consensus/ingestion/core.go @@ -1,5 +1,3 @@ -// (c) 2019 Dapper Labs - ALL RIGHTS RESERVED - package ingestion import ( @@ -68,15 +66,15 @@ func (e *Core) OnGuarantee(originID flow.Identifier, guarantee *flow.CollectionG ) defer span.End() - guaranteeID := guarantee.ID() - log := e.log.With(). Hex("origin_id", originID[:]). - Hex("collection_id", guaranteeID[:]). + Hex("collection_id", guarantee.CollectionID[:]). Hex("signers", guarantee.SignerIndices). Logger() log.Info().Msg("collection guarantee received") + guaranteeID := guarantee.ID() + // skip collection guarantees that are already in our memory pool exists := e.pool.Has(guaranteeID) if exists { @@ -99,7 +97,7 @@ func (e *Core) OnGuarantee(originID flow.Identifier, guarantee *flow.CollectionG } // at this point, we can add the guarantee to the memory pool - added := e.pool.Add(guarantee) + added := e.pool.Add(guaranteeID, guarantee) if !added { log.Debug().Msg("discarding guarantee already in pool") return nil @@ -157,23 +155,27 @@ func (e *Core) validateExpiry(guarantee *flow.CollectionGuarantee) error { func (e *Core) validateGuarantors(guarantee *flow.CollectionGuarantee) error { // get the clusters to assign the guarantee and check if the guarantor is part of it snapshot := e.state.AtBlockID(guarantee.ReferenceBlockID) - cluster, err := snapshot.Epochs().Current().ClusterByChainID(guarantee.ChainID) + epoch, err := snapshot.Epochs().Current() + if err != nil { + return fmt.Errorf("could not get current epoch: %w", err) + } + cluster, err := epoch.ClusterByChainID(guarantee.ClusterChainID) // reference block not found if errors.Is(err, state.ErrUnknownSnapshotReference) { return engine.NewUnverifiableInputError( - "could not get clusters with chainID %v for unknown reference block (id=%x): %w", guarantee.ChainID, guarantee.ReferenceBlockID, err) + "could not get clusters with chainID %v for unknown reference block (id=%x): %w", guarantee.ClusterChainID, guarantee.ReferenceBlockID, err) } // cluster not found by the chain ID if errors.Is(err, protocol.ErrClusterNotFound) { - return engine.NewInvalidInputErrorf("cluster not found by chain ID %v: %w", guarantee.ChainID, err) + return engine.NewInvalidInputErrorf("cluster not found by chain ID %v: %w", guarantee.ClusterChainID, err) } if err != nil { - return fmt.Errorf("internal error retrieving collector clusters for guarantee (ReferenceBlockID: %v, ChainID: %v): %w", - guarantee.ReferenceBlockID, guarantee.ChainID, err) + return fmt.Errorf("internal error retrieving collector clusters for guarantee (ReferenceBlockID: %v, ClusterChainID: %v): %w", + guarantee.ReferenceBlockID, guarantee.ClusterChainID, err) } // ensure the guarantors are from the same cluster - clusterMembers := cluster.Members() + clusterMembers := cluster.Members().ToSkeleton() // find guarantors by signer indices guarantors, err := signature.DecodeSignerIndicesToIdentities(clusterMembers, guarantee.SignerIndices) @@ -187,7 +189,7 @@ func (e *Core) validateGuarantors(guarantee *flow.CollectionGuarantee) error { // determine whether signers reach minimally required stake threshold threshold := committees.WeightThresholdToBuildQC(clusterMembers.TotalWeight()) // compute required stake threshold - totalStake := flow.IdentityList(guarantors).TotalWeight() + totalStake := guarantors.TotalWeight() if totalStake < threshold { return engine.NewInvalidInputErrorf("collection guarantee qc signers have insufficient stake of %d (required=%d)", totalStake, threshold) } diff --git a/engine/consensus/ingestion/core_test.go b/engine/consensus/ingestion/core_test.go index 6167f6d55ee..57a5ef88804 100644 --- a/engine/consensus/ingestion/core_test.go +++ b/engine/consensus/ingestion/core_test.go @@ -1,5 +1,3 @@ -// (c) 2019 Dapper Labs - ALL RIGHTS RESERVED - package ingestion import ( @@ -46,9 +44,9 @@ type IngestionCoreSuite struct { ref *mockprotocol.Snapshot // state snapshot w.r.t. reference block query *mockprotocol.EpochQuery - epoch *mockprotocol.Epoch + epoch *mockprotocol.CommittedEpoch headers *mockstorage.Headers - pool *mockmempool.Guarantees + pool *mockmempool.Mempool[flow.Identifier, *flow.CollectionGuarantee] core *Core } @@ -84,9 +82,9 @@ func (suite *IngestionCoreSuite) SetupTest() { final := &mockprotocol.Snapshot{} ref := &mockprotocol.Snapshot{} suite.query = &mockprotocol.EpochQuery{} - suite.epoch = &mockprotocol.Epoch{} + suite.epoch = &mockprotocol.CommittedEpoch{} headers := &mockstorage.Headers{} - pool := &mockmempool.Guarantees{} + pool := &mockmempool.Mempool[flow.Identifier, *flow.CollectionGuarantee]{} cluster := &mockprotocol.Cluster{} // this state basically works like a normal protocol state @@ -108,14 +106,14 @@ func (suite *IngestionCoreSuite) SetupTest() { }, ) final.On("Identities", mock.Anything).Return( - func(selector flow.IdentityFilter) flow.IdentityList { + func(selector flow.IdentityFilter[flow.Identity]) flow.IdentityList { return suite.finalIdentities.Filter(selector) }, nil, ) ref.On("Epochs").Return(suite.query) - suite.query.On("Current").Return(suite.epoch) - cluster.On("Members").Return(suite.clusterMembers) + suite.query.On("Current").Return(suite.epoch, nil) + cluster.On("Members").Return(suite.clusterMembers.ToSkeleton()) suite.epoch.On("ClusterByChainID", mock.Anything).Return( func(chainID flow.ChainID) protocol.Cluster { if chainID == suite.clusterID { @@ -167,14 +165,14 @@ func (suite *IngestionCoreSuite) TestOnGuaranteeNewFromCollection() { // the guarantee is not part of the memory pool yet suite.pool.On("Has", guarantee.ID()).Return(false) - suite.pool.On("Add", guarantee).Return(true) + suite.pool.On("Add", guarantee.ID(), guarantee).Return(true) // submit the guarantee as if it was sent by a collection node err := suite.core.OnGuarantee(suite.collID, guarantee) suite.Assert().NoError(err, "should not error on new guarantee from collection node") // check that the guarantee has been added to the mempool - suite.pool.AssertCalled(suite.T(), "Add", guarantee) + suite.pool.AssertCalled(suite.T(), "Add", guarantee.ID(), guarantee) } @@ -201,14 +199,14 @@ func (suite *IngestionCoreSuite) TestOnGuaranteeNotAdded() { // the guarantee is not already part of the memory pool suite.pool.On("Has", guarantee.ID()).Return(false) - suite.pool.On("Add", guarantee).Return(false) + suite.pool.On("Add", guarantee.ID(), guarantee).Return(false) // submit the guarantee as if it was sent by a collection node err := suite.core.OnGuarantee(suite.collID, guarantee) suite.Assert().NoError(err, "should not error when guarantee was already added") // check that the guarantee has been added to the mempool - suite.pool.AssertCalled(suite.T(), "Add", guarantee) + suite.pool.AssertCalled(suite.T(), "Add", guarantee.ID(), guarantee) } @@ -254,13 +252,13 @@ func (suite *IngestionCoreSuite) TestOnGuaranteeExpired() { suite.Assert().True(engine.IsOutdatedInputError(err)) } -// TestOnGuaranteeReferenceBlockFromWrongEpoch validates that guarantees which contain a ChainID -// that is inconsistent with the reference block (ie. the ChainID either refers to a non-existent +// TestOnGuaranteeReferenceBlockFromWrongEpoch validates that guarantees which contain a ClusterChainID +// that is inconsistent with the reference block (ie. the ClusterChainID either refers to a non-existent // cluster, or a cluster for a different epoch) should be considered invalid inputs. func (suite *IngestionCoreSuite) TestOnGuaranteeReferenceBlockFromWrongEpoch() { // create a guarantee from a cluster in a different epoch guarantee := suite.validGuarantee() - guarantee.ChainID = cluster.CanonicalClusterID(suite.epochCounter+1, suite.clusterMembers.NodeIDs()) + guarantee.ClusterChainID = cluster.CanonicalClusterID(suite.epochCounter+1, suite.clusterMembers.NodeIDs()) // the guarantee is not part of the memory pool suite.pool.On("Has", guarantee.ID()).Return(false) @@ -298,18 +296,20 @@ func (suite *IngestionCoreSuite) TestOnGuaranteeInvalidGuarantor() { // at this epoch boundary). func (suite *IngestionCoreSuite) TestOnGuaranteeEpochEnd() { - // in the finalized state the collectors has 0 weight but is not ejected - // this is what happens when we finalize the final block of the epoch during + // The finalized state contains the identity of a collector that: + // * was active in the previous epoch but is leaving as of the current epoch + // * wasn't ejected and has positive initial weight + // This happens when we finalize the final block of the epoch during // which this node requested to unstake colID, ok := suite.finalIdentities.ByNodeID(suite.collID) suite.Require().True(ok) - colID.Weight = 0 + colID.EpochParticipationStatus = flow.EpochParticipationStatusLeaving guarantee := suite.validGuarantee() // the guarantee is not part of the memory pool suite.pool.On("Has", guarantee.ID()).Return(false) - suite.pool.On("Add", guarantee).Return(true).Once() + suite.pool.On("Add", guarantee.ID(), guarantee).Return(true).Once() // submit the guarantee as if it was sent by the collection node which // is leaving at the current epoch boundary @@ -326,21 +326,21 @@ func (suite *IngestionCoreSuite) TestOnGuaranteeUnknownOrigin() { // the guarantee is not part of the memory pool suite.pool.On("Has", guarantee.ID()).Return(false) - suite.pool.On("Add", guarantee).Return(true) + suite.pool.On("Add", guarantee.ID(), guarantee).Return(true) // submit the guarantee with an unknown origin err := suite.core.OnGuarantee(unittest.IdentifierFixture(), guarantee) suite.Assert().Error(err) suite.Assert().True(engine.IsInvalidInputError(err)) - suite.pool.AssertNotCalled(suite.T(), "Add", guarantee) + suite.pool.AssertNotCalled(suite.T(), "Add", guarantee.ID(), guarantee) } // validGuarantee returns a valid collection guarantee based on the suite state. func (suite *IngestionCoreSuite) validGuarantee() *flow.CollectionGuarantee { guarantee := unittest.CollectionGuaranteeFixture() - guarantee.ChainID = suite.clusterID + guarantee.ClusterChainID = suite.clusterID signerIndices, err := signature.EncodeSignersToIndices( []flow.Identifier{suite.collID}, []flow.Identifier{suite.collID}) diff --git a/engine/consensus/ingestion/engine.go b/engine/consensus/ingestion/engine.go index cb98d34632f..e556fc2f766 100644 --- a/engine/consensus/ingestion/engine.go +++ b/engine/consensus/ingestion/engine.go @@ -1,5 +1,3 @@ -// (c) 2019 Dapper Labs - ALL RIGHTS RESERVED - package ingestion import ( @@ -43,7 +41,7 @@ type Engine struct { func New( log zerolog.Logger, engineMetrics module.EngineMetrics, - net network.Network, + net network.EngineRegistry, me module.Local, core *Core, ) (*Engine, error) { diff --git a/engine/consensus/ingestion/engine_test.go b/engine/consensus/ingestion/engine_test.go index a146816bfa9..29d59b3adf7 100644 --- a/engine/consensus/ingestion/engine_test.go +++ b/engine/consensus/ingestion/engine_test.go @@ -1,5 +1,3 @@ -// (c) 2019 Dapper Labs - ALL RIGHTS RESERVED - package ingestion import ( @@ -19,7 +17,7 @@ import ( mockmodule "github.com/onflow/flow-go/module/mock" netint "github.com/onflow/flow-go/network" "github.com/onflow/flow-go/network/channels" - "github.com/onflow/flow-go/network/mocknetwork" + mocknetwork "github.com/onflow/flow-go/network/mock" "github.com/onflow/flow-go/utils/unittest" ) @@ -31,7 +29,7 @@ type IngestionSuite struct { IngestionCoreSuite con *mocknetwork.Conduit - net *mocknetwork.Network + net *mocknetwork.EngineRegistry cancel context.CancelFunc ingest *Engine @@ -43,7 +41,7 @@ func (s *IngestionSuite) SetupTest() { s.con = &mocknetwork.Conduit{} // set up network module mock - s.net = &mocknetwork.Network{} + s.net = &mocknetwork.EngineRegistry{} s.net.On("Register", channels.ReceiveGuarantees, mock.Anything).Return( func(channel channels.Channel, engine netint.MessageProcessor) netint.Conduit { return s.con @@ -57,7 +55,7 @@ func (s *IngestionSuite) SetupTest() { ctx, cancel := context.WithCancel(context.Background()) s.cancel = cancel - signalerCtx, _ := irrecoverable.WithSignaler(ctx) + signalerCtx := irrecoverable.NewMockSignalerContext(s.T(), ctx) metrics := metrics.NewNoopCollector() ingest, err := New(unittest.Logger(), metrics, s.net, me, s.core) @@ -86,7 +84,7 @@ func (s *IngestionSuite) TestSubmittingMultipleEntries() { for i := 0; i < int(count); i++ { guarantee := s.validGuarantee() s.pool.On("Has", guarantee.ID()).Return(false) - s.pool.On("Add", guarantee).Run(func(args mock.Arguments) { + s.pool.On("Add", guarantee.ID(), guarantee).Run(func(args mock.Arguments) { processed.Add(1) }).Return(true) diff --git a/engine/consensus/matching/core.go b/engine/consensus/matching/core.go index ceadfa71254..f83512861ce 100644 --- a/engine/consensus/matching/core.go +++ b/engine/consensus/matching/core.go @@ -15,6 +15,7 @@ import ( "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/model/flow/filter" "github.com/onflow/flow-go/module" + "github.com/onflow/flow-go/module/irrecoverable" "github.com/onflow/flow-go/module/mempool" "github.com/onflow/flow-go/module/metrics" "github.com/onflow/flow-go/module/trace" @@ -120,7 +121,7 @@ func (c *Core) ProcessReceipt(receipt *flow.ExecutionReceipt) error { } childReceipts := c.pendingReceipts.ByPreviousResultID(resultID) - c.pendingReceipts.Remove(receipt.ID()) + c.pendingReceipts.Remove(receiptID) for _, childReceipt := range childReceipts { // recursively processing the child receipts @@ -180,19 +181,25 @@ func (c *Core) processReceipt(receipt *flow.ExecutionReceipt) (bool, error) { Hex("initial_state", initialState[:]). Hex("final_state", finalState[:]).Logger() - // if the receipt is for an unknown block, skip it. It will be re-requested - // later by `requestPending` function. + // If the receipt is for an unknown block, skip it. + // Reasoning: If this is an honest receipt, this replica is behind. Chances are high that other leaders will + // already have included the receipt by the time this replica has caught up. If we still need the receipt by + // the time this replica has caught up, it will be re-requested later by `requestPending` function. If it is + // a malicious receipt, discarding it is advantageous for mitigating spamming and resource exhaustion attacks. executedBlock, err := c.headersDB.ByBlockID(receipt.ExecutionResult.BlockID) if err != nil { - log.Debug().Msg("discarding receipt for unknown block") - return false, nil + if errors.Is(err, storage.ErrNotFound) { + log.Debug().Msg("dropping execution receipt for unknown block") + return false, nil + } + return false, irrecoverable.NewExceptionf("encountered unexpected storage error attempting to retrieve block %v: %w", receipt.ExecutionResult.BlockID, err) } log = log.With(). Uint64("block_view", executedBlock.View). Uint64("block_height", executedBlock.Height). Logger() - log.Info().Msg("execution receipt received") + log.Debug().Msg("execution receipt received") // if Execution Receipt is for block whose height is lower or equal to already sealed height // => drop Receipt @@ -208,31 +215,28 @@ func (c *Core) processReceipt(receipt *flow.ExecutionReceipt) (bool, error) { childSpan := c.tracer.StartSpanFromParent(receiptSpan, trace.CONMatchProcessReceiptVal) err = c.receiptValidator.Validate(receipt) childSpan.End() - - if engine.IsUnverifiableInputError(err) { - // If previous result is missing, we can't validate this receipt. - // Although we will request its previous receipt(s), - // we don't want to drop it now, because when the missing previous arrive - // in a wrong order, they will still be dropped, and causing the catch up - // to be inefficient. - // Instead, we cache the receipt in case it arrives earlier than its - // previous receipt. - // For instance, given blocks A <- B <- C <- D <- E, if we receive their receipts - // in the order of [E,C,D,B,A], then: - // if we drop the missing previous receipts, then only A will be processed; - // if we cache the missing previous receipts, then all of them will be processed, because - // once A is processed, we will check if there is a child receipt pending, - // if yes, then process it. - c.pendingReceipts.Add(receipt) - log.Info().Msg("receipt is cached because its previous result is missing") - return false, nil - } - if err != nil { + if module.IsUnknownResultError(err) { + // Previous result is missing. Hence, we can't validate this receipt. + // We want to efficiently handle receipts arriving out of order. Therefore, we cache the + // receipt in `c.pendingReceipts`. On finalization of new blocks, we request receipts + // for all unsealed but finalized blocks. For instance, given blocks + // A <- B <- C <- D <- E, if we receive their receipts in the order of [E,C,D,B,A], then: + // - If we drop the missing previous receipts, then only A will be processed. + // - If we cache the missing previous receipts, then all of them will be processed, because once + // A is processed, we will check if there is a child receipt pending, if yes, then process it. + c.pendingReceipts.Add(receipt) + log.Debug().Msg("receipt is cached because its previous result is missing") + return false, nil + } if engine.IsInvalidInputError(err) { - log.Err(err).Msg("invalid execution receipt") + log.Err(err).Bool(logging.KeyProtocolViolation, true).Msg("invalid execution receipt") return false, nil } + if module.IsUnknownBlockError(err) { // This should never happen + // Above, we successfully retrieved the `executedBlock`. Hence, `UnknownBlockError` here means our state is corrupted! + return false, irrecoverable.NewExceptionf("internal state corruption detected when validating receipt %v for block %v: %w", receipt.ID(), receipt.BlockID, err) + } return false, fmt.Errorf("failed to validate execution receipt: %w", err) } @@ -241,7 +245,7 @@ func (c *Core) processReceipt(receipt *flow.ExecutionReceipt) (bool, error) { return false, fmt.Errorf("failed to store receipt: %w", err) } if added { - log.Info().Msg("execution result processed and stored") + log.Debug().Msg("execution result processed and stored") } return added, nil @@ -378,13 +382,19 @@ func (c *Core) OnBlockFinalization() error { lastSealed.ID(), lastSealed.Height, err) } - c.log.Info(). + log := c.log.With(). Uint64("first_height_missing_result", firstMissingHeight). Uint("seals_size", c.seals.Size()). Uint("receipts_size", c.receipts.Size()). Int("pending_receipt_requests", pendingReceiptRequests). Int64("duration_ms", time.Since(startTime).Milliseconds()). - Msg("finalized block processed successfully") + Logger() + // this runs frequently, so we only log at info-level if there are pending requests + if pendingReceiptRequests > 0 { + log.Info().Msg("finalized block processed successfully") + } else { + log.Debug().Msg("finalized block processed successfully") + } return nil } diff --git a/engine/consensus/matching/core_test.go b/engine/consensus/matching/core_test.go index 6097c4422ac..2cd76d588d8 100644 --- a/engine/consensus/matching/core_test.go +++ b/engine/consensus/matching/core_test.go @@ -9,6 +9,7 @@ import ( "github.com/onflow/flow-go/engine" "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module" "github.com/onflow/flow-go/module/metrics" mockmodule "github.com/onflow/flow-go/module/mock" "github.com/onflow/flow-go/module/trace" @@ -100,7 +101,7 @@ func (ms *MatchingSuite) TestOnReceiptPendingResult() { ms.receiptValidator.On("Validate", receipt).Return(nil) // Expect the receipt to be added to mempool and persistent storage - ms.ReceiptsPL.On("AddReceipt", receipt, ms.UnfinalizedBlock.Header).Return(true, nil).Once() + ms.ReceiptsPL.On("AddReceipt", receipt, ms.UnfinalizedBlock.ToHeader()).Return(true, nil).Once() ms.ReceiptsDB.On("Store", receipt).Return(nil).Once() _, err := ms.core.processReceipt(receipt) @@ -123,7 +124,7 @@ func (ms *MatchingSuite) TestOnReceipt_ReceiptInPersistentStorage() { // Persistent storage layer for Receipts has the receipt already stored ms.ReceiptsDB.On("Store", receipt).Return(storage.ErrAlreadyExists).Once() // The receipt should be added to the receipts mempool - ms.ReceiptsPL.On("AddReceipt", receipt, ms.UnfinalizedBlock.Header).Return(true, nil).Once() + ms.ReceiptsPL.On("AddReceipt", receipt, ms.UnfinalizedBlock.ToHeader()).Return(true, nil).Once() _, err := ms.core.processReceipt(receipt) ms.Require().NoError(err, "should process receipts, even if it is already in storage") @@ -142,7 +143,7 @@ func (ms *MatchingSuite) TestOnReceiptValid() { ms.receiptValidator.On("Validate", receipt).Return(nil).Once() // Expect the receipt to be added to mempool and persistent storage - ms.ReceiptsPL.On("AddReceipt", receipt, ms.UnfinalizedBlock.Header).Return(true, nil).Once() + ms.ReceiptsPL.On("AddReceipt", receipt, ms.UnfinalizedBlock.ToHeader()).Return(true, nil).Once() ms.ReceiptsDB.On("Store", receipt).Return(nil).Once() // onReceipt should run to completion without throwing an error @@ -154,7 +155,7 @@ func (ms *MatchingSuite) TestOnReceiptValid() { ms.ReceiptsDB.AssertExpectations(ms.T()) } -// TestOnReceiptInvalid tests that we reject receipts that don't pass the ReceiptValidator +// TestOnReceiptInvalid tests handing of receipts that the ReceiptValidator detects as violating the protocol func (ms *MatchingSuite) TestOnReceiptInvalid() { // we use the same Receipt as in TestOnReceiptValid to ensure that the sealing Core is not // rejecting the receipt for any other reason @@ -165,14 +166,39 @@ func (ms *MatchingSuite) TestOnReceiptInvalid() { ) // check that _expected_ failure case of invalid receipt is handled without error - ms.receiptValidator.On("Validate", receipt).Return(engine.NewInvalidInputError("")).Once() - _, err := ms.core.processReceipt(receipt) + ms.receiptValidator.On("Validate", receipt).Return(engine.NewInvalidInputErrorf("")).Once() + wasAdded, err := ms.core.processReceipt(receipt) ms.Require().NoError(err, "invalid receipt should be dropped but not error") + ms.Require().False(wasAdded, "invalid receipt should not be added") + ms.receiptValidator.AssertExpectations(ms.T()) + ms.ReceiptsDB.AssertNumberOfCalls(ms.T(), "Store", 0) +} + +// TestOnReceiptValidatorExceptions tests matching.Core escalates unexpected errors and exceptions. +// We expect that such errors are *not* interpreted as the receipt being invalid. +func (ms *MatchingSuite) TestOnReceiptValidatorExceptions() { + // we use the same Receipt as in TestOnReceiptValid to ensure that the sealing Core is not rejecting the receipt for any other reason + originID := ms.ExeID + receipt := unittest.ExecutionReceiptFixture( + unittest.WithExecutorID(originID), + unittest.WithResult(unittest.ExecutionResultFixture(unittest.WithBlock(&ms.UnfinalizedBlock))), + ) - // check that _unexpected_ failure case causes the error to be escalated + // Check that _unexpected_ failure causes the error to be escalated and is *not* interpreted as an invalid receipt. ms.receiptValidator.On("Validate", receipt).Return(fmt.Errorf("")).Once() + _, err := ms.core.processReceipt(receipt) + ms.Require().Error(err, "unexpected errors should be escalated") + ms.Require().False(engine.IsInvalidInputError(err), "exceptions should not be misinterpreted as an invalid receipt") + + // Check that an `UnknownBlockError` causes the error to be escalated and is *not* interpreted as an invalid receipt. + // Reasoning: For attack resilience, we should discard outdated receipts based on the height of the executed block, _before_ we + // run the expensive receipt validation. Therefore, matching.Core should retrieve the executed block before calling into the + // ReceiptValidator. Hence, if matching.Core finds the executed block, but `ReceiptValidator.Validate(..)` errors saying that + // the executed block is unknown, our state is corrupted or we have a severe internal bug. + ms.receiptValidator.On("Validate", receipt).Return(module.NewUnknownBlockError("")).Once() _, err = ms.core.processReceipt(receipt) ms.Require().Error(err, "unexpected errors should be escalated") + ms.Require().False(engine.IsInvalidInputError(err), "exceptions should not be misinterpreted as an invalid receipt") ms.receiptValidator.AssertExpectations(ms.T()) ms.ReceiptsDB.AssertNumberOfCalls(ms.T(), "Store", 0) @@ -192,7 +218,7 @@ func (ms *MatchingSuite) TestOnUnverifiableReceipt() { ms.PendingReceipts.On("Add", receipt).Return(false).Once() // check that _expected_ failure case of invalid receipt is handled without error - ms.receiptValidator.On("Validate", receipt).Return(engine.NewUnverifiableInputError("missing parent result")).Once() + ms.receiptValidator.On("Validate", receipt).Return(module.NewUnknownResultError("missing parent result")).Once() wasAdded, err := ms.core.processReceipt(receipt) ms.Require().NoError(err, "unverifiable receipt should be cached but not error") ms.Require().False(wasAdded, "unverifiable receipt should be cached but not added to the node's validated information") @@ -210,7 +236,7 @@ func (ms *MatchingSuite) TestRequestPendingReceipts() { orderedBlocks := make([]flow.Block, 0, n) parentBlock := ms.UnfinalizedBlock for i := 0; i < n; i++ { - block := unittest.BlockWithParentFixture(parentBlock.Header) + block := unittest.BlockWithParentFixture(parentBlock.ToHeader()) ms.Extend(block) orderedBlocks = append(orderedBlocks, *block) parentBlock = *block @@ -246,7 +272,6 @@ func (ms *MatchingSuite) TestRequestPendingReceipts() { // // TODO: this test is temporarily requires as long as sealing.Core requires _two_ receipts from different ENs to seal func (ms *MatchingSuite) TestRequestSecondPendingReceipt() { - ms.core.config.SealingThreshold = 0 // request receipts for all unsealed finalized blocks result := unittest.ExecutionResultFixture(unittest.WithBlock(ms.LatestFinalizedBlock)) @@ -256,8 +281,8 @@ func (ms *MatchingSuite) TestRequestSecondPendingReceipt() { receipt2 := unittest.ExecutionReceiptFixture(unittest.WithResult(result)) // receipts from storage are potentially added to receipts mempool and incorporated results mempool - ms.ReceiptsPL.On("AddReceipt", receipt1, ms.LatestFinalizedBlock.Header).Return(false, nil).Maybe() - ms.ReceiptsPL.On("AddReceipt", receipt2, ms.LatestFinalizedBlock.Header).Return(false, nil).Maybe() + ms.ReceiptsPL.On("AddReceipt", receipt1, ms.LatestFinalizedBlock.ToHeader()).Return(false, nil).Maybe() + ms.ReceiptsPL.On("AddReceipt", receipt2, ms.LatestFinalizedBlock.ToHeader()).Return(false, nil).Maybe() // Situation A: we have _once_ receipt for an unsealed finalized block in storage ms.ReceiptsDB.On("ByBlockID", ms.LatestFinalizedBlock.ID()).Return(flow.ExecutionReceiptList{receipt1}, nil).Once() diff --git a/engine/consensus/matching/engine.go b/engine/consensus/matching/engine.go index 2fc6e679d9a..a8fd0d8c21d 100644 --- a/engine/consensus/matching/engine.go +++ b/engine/consensus/matching/engine.go @@ -11,6 +11,8 @@ import ( sealing "github.com/onflow/flow-go/engine/consensus" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/module" + "github.com/onflow/flow-go/module/component" + "github.com/onflow/flow-go/module/irrecoverable" "github.com/onflow/flow-go/module/metrics" "github.com/onflow/flow-go/network" "github.com/onflow/flow-go/network/channels" @@ -27,7 +29,7 @@ const defaultIncorporatedBlockQueueCapacity = 10 // Engine is a wrapper struct for `Core` which implements consensus algorithm. // Engine is responsible for handling incoming messages, queueing for processing, broadcasting proposals. type Engine struct { - unit *engine.Unit + component.Component log zerolog.Logger me module.Local core sealing.MatchingCore @@ -44,7 +46,7 @@ type Engine struct { func NewEngine( log zerolog.Logger, - net network.Network, + net network.EngineRegistry, me module.Local, engineMetrics module.EngineMetrics, mempool module.MempoolMetrics, @@ -69,7 +71,6 @@ func NewEngine( e := &Engine{ log: log.With().Str("engine", "matching.Engine").Logger(), - unit: engine.NewUnit(), me: me, core: core, state: state, @@ -83,6 +84,12 @@ func NewEngine( pendingIncorporatedBlocks: pendingIncorporatedBlocks, } + e.Component = component.NewComponentManagerBuilder(). + AddWorker(e.inboundEventsProcessingLoop). + AddWorker(e.finalizationProcessingLoop). + AddWorker(e.blockIncorporatedEventsProcessingLoop). + Build() + // register engine with the receipt provider _, err = net.Register(channels.ReceiveReceipts, e) if err != nil { @@ -92,79 +99,34 @@ func NewEngine( return e, nil } -// Ready returns a ready channel that is closed once the engine has fully -// started. For consensus engine, this is true once the underlying consensus -// algorithm has started. -func (e *Engine) Ready() <-chan struct{} { - e.unit.Launch(e.inboundEventsProcessingLoop) - e.unit.Launch(e.finalizationProcessingLoop) - e.unit.Launch(e.blockIncorporatedEventsProcessingLoop) - return e.unit.Ready() -} - -// Done returns a done channel that is closed once the engine has fully stopped. -// For the consensus engine, we wait for hotstuff to finish. -func (e *Engine) Done() <-chan struct{} { - return e.unit.Done() -} - -// SubmitLocal submits an event originating on the local node. -func (e *Engine) SubmitLocal(event interface{}) { - err := e.ProcessLocal(event) - if err != nil { - e.log.Fatal().Err(err).Msg("internal error processing event") - } -} - -// Submit submits the given event from the node with the given origin ID -// for processing in a non-blocking manner. It returns instantly and logs -// a potential processing error internally when done. -func (e *Engine) Submit(channel channels.Channel, originID flow.Identifier, event interface{}) { - err := e.Process(channel, originID, event) - if err != nil { - e.log.Fatal().Err(err).Msg("internal error processing event") - } -} - -// ProcessLocal processes an event originating on the local node. -func (e *Engine) ProcessLocal(event interface{}) error { - return e.process(e.me.NodeID(), event) -} - -// Process processes the given event from the node with the given origin ID in -// a blocking manner. It returns the potential processing error when done. +// Process receives events from the network and checks their type, +// before enqueuing them to be processed by a worker in a non-blocking manner. +// No errors expected during normal operation (errors are logged instead). func (e *Engine) Process(channel channels.Channel, originID flow.Identifier, event interface{}) error { - err := e.process(originID, event) - if err != nil { - if engine.IsIncompatibleInputTypeError(err) { - e.log.Warn().Msgf("%v delivered unsupported message %T through %v", originID, event, channel) - return nil - } - return fmt.Errorf("unexpected error while processing engine message: %w", err) + receipt, ok := event.(*flow.ExecutionReceipt) + if !ok { + e.log.Warn().Msgf("%v delivered unsupported message %T through %v", originID, event, channel) + return nil } + e.addReceiptToQueue(receipt) return nil } -// process events for the matching engine on the consensus node. -func (e *Engine) process(originID flow.Identifier, event interface{}) error { - receipt, ok := event.(*flow.ExecutionReceipt) - if !ok { - return fmt.Errorf("no matching processor for message of type %T from origin %x: %w", event, originID[:], - engine.IncompatibleInputTypeError) - } +// addReceiptToQueue adds an execution receipt to the queue of the matching engine, to be processed by a worker +func (e *Engine) addReceiptToQueue(receipt *flow.ExecutionReceipt) { e.metrics.MessageReceived(metrics.EngineSealing, metrics.MessageExecutionReceipt) e.pendingReceipts.Push(receipt) e.inboundEventsNotifier.Notify() - return nil } -// HandleReceipt ingests receipts from the Requester module. +// HandleReceipt ingests receipts from the Requester module, adding them to the queue. func (e *Engine) HandleReceipt(originID flow.Identifier, receipt flow.Entity) { e.log.Debug().Msg("received receipt from requester engine") - err := e.process(originID, receipt) - if err != nil { - e.log.Fatal().Err(err).Msg("internal error processing event from requester module") + r, ok := receipt.(*flow.ExecutionReceipt) + if !ok { + e.log.Fatal().Err(engine.IncompatibleInputTypeError).Msg("internal error processing event from requester module") } + e.addReceiptToQueue(r) } // OnFinalizedBlock implements the `OnFinalizedBlock` callback from the `hotstuff.FinalizationConsumer` @@ -183,10 +145,10 @@ func (e *Engine) OnBlockIncorporated(incorporatedBlock *model.Block) { } // processIncorporatedBlock selects receipts that were included into incorporated block and submits them -// for further processing by matching core. +// to the matching core for further processing. // Without the logic below, the sealing engine would produce IncorporatedResults // only from receipts received directly from ENs. sealing Core would not know about -// Receipts that are incorporated by other nodes in their blocks blocks (but never +// Receipts that are incorporated by other nodes in their blocks (but never // received directly from the EN). // No errors expected during normal operations. func (e *Engine) processIncorporatedBlock(blockID flow.Identifier) error { @@ -205,50 +167,56 @@ func (e *Engine) processIncorporatedBlock(blockID flow.Identifier) error { return nil } -// finalizationProcessingLoop is a separate goroutine that performs processing of finalization events -func (e *Engine) finalizationProcessingLoop() { +// finalizationProcessingLoop contains the logic for processing of finalization events. +// This method is intended to be executed by a dedicated worker / goroutine. +func (e *Engine) finalizationProcessingLoop(ctx irrecoverable.SignalerContext, ready component.ReadyFunc) { finalizationNotifier := e.finalizationEventsNotifier.Channel() + ready() for { select { - case <-e.unit.Quit(): + case <-ctx.Done(): return case <-finalizationNotifier: err := e.core.OnBlockFinalization() if err != nil { - e.log.Fatal().Err(err).Msg("could not process last finalized event") + ctx.Throw(fmt.Errorf("could not process last finalized event: %w", err)) } } } } -// blockIncorporatedEventsProcessingLoop is a separate goroutine for processing block incorporated events. -func (e *Engine) blockIncorporatedEventsProcessingLoop() { +// blockIncorporatedEventsProcessingLoop contains the logic for processing block incorporated events. +// This method is intended to be executed by a dedicated worker / goroutine. +func (e *Engine) blockIncorporatedEventsProcessingLoop(ctx irrecoverable.SignalerContext, ready component.ReadyFunc) { c := e.blockIncorporatedNotifier.Channel() - + ready() for { select { - case <-e.unit.Quit(): + case <-ctx.Done(): return case <-c: - err := e.processBlockIncorporatedEvents() + err := e.processBlockIncorporatedEvents(ctx) if err != nil { - e.log.Fatal().Err(err).Msg("internal error processing block incorporated queued message") + ctx.Throw(fmt.Errorf("internal error processing block incorporated queued message: %w", err)) } } } } -func (e *Engine) inboundEventsProcessingLoop() { +// inboundEventsProcessingLoop contains the logic for processing execution receipts, received +// from the network via Process, from the Requester module via HandleReceipt, or from incorporated blocks. +// This method is intended to be executed by a dedicated worker / goroutine. +func (e *Engine) inboundEventsProcessingLoop(ctx irrecoverable.SignalerContext, ready component.ReadyFunc) { c := e.inboundEventsNotifier.Channel() - + ready() for { select { - case <-e.unit.Quit(): + case <-ctx.Done(): return case <-c: - err := e.processAvailableEvents() + err := e.processExecutionReceipts(ctx) if err != nil { - e.log.Fatal().Err(err).Msg("internal error processing queued message") + ctx.Throw(fmt.Errorf("internal error processing queued execution receipt: %w", err)) } } } @@ -256,10 +224,10 @@ func (e *Engine) inboundEventsProcessingLoop() { // processBlockIncorporatedEvents performs processing of block incorporated hot stuff events. // No errors expected during normal operations. -func (e *Engine) processBlockIncorporatedEvents() error { +func (e *Engine) processBlockIncorporatedEvents(ctx irrecoverable.SignalerContext) error { for { select { - case <-e.unit.Quit(): + case <-ctx.Done(): return nil default: } @@ -279,27 +247,18 @@ func (e *Engine) processBlockIncorporatedEvents() error { } } -// processAvailableEvents processes _all_ available events (untrusted messages +// processExecutionReceipts processes execution receipts // from other nodes as well as internally trusted. // No errors expected during normal operations. -func (e *Engine) processAvailableEvents() error { +func (e *Engine) processExecutionReceipts(ctx irrecoverable.SignalerContext) error { for { select { - case <-e.unit.Quit(): + case <-ctx.Done(): return nil default: } - msg, ok := e.pendingIncorporatedBlocks.Pop() - if ok { - err := e.processIncorporatedBlock(msg.(flow.Identifier)) - if err != nil { - return fmt.Errorf("could not process incorporated block: %w", err) - } - continue - } - - msg, ok = e.pendingReceipts.Pop() + msg, ok := e.pendingReceipts.Pop() if ok { err := e.core.ProcessReceipt(msg.(*flow.ExecutionReceipt)) if err != nil { diff --git a/engine/consensus/matching/engine_test.go b/engine/consensus/matching/engine_test.go index 170e633da86..884873b732e 100644 --- a/engine/consensus/matching/engine_test.go +++ b/engine/consensus/matching/engine_test.go @@ -1,6 +1,7 @@ package matching import ( + "context" "sync" "testing" "time" @@ -10,13 +11,13 @@ import ( "github.com/stretchr/testify/suite" "github.com/onflow/flow-go/consensus/hotstuff/model" - "github.com/onflow/flow-go/engine" mockconsensus "github.com/onflow/flow-go/engine/consensus/mock" "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module/irrecoverable" "github.com/onflow/flow-go/module/metrics" mockmodule "github.com/onflow/flow-go/module/mock" "github.com/onflow/flow-go/network/channels" - "github.com/onflow/flow-go/network/mocknetwork" + mocknetwork "github.com/onflow/flow-go/network/mock" mockprotocol "github.com/onflow/flow-go/state/protocol/mock" mockstorage "github.com/onflow/flow-go/storage/mock" "github.com/onflow/flow-go/utils/unittest" @@ -36,12 +37,13 @@ type MatchingEngineSuite struct { // Matching Engine engine *Engine + cancel context.CancelFunc } func (s *MatchingEngineSuite) SetupTest() { metrics := metrics.NewNoopCollector() me := &mockmodule.Local{} - net := &mocknetwork.Network{} + net := &mocknetwork.EngineRegistry{} s.core = &mockconsensus.MatchingCore{} s.index = &mockstorage.Index{} s.receipts = &mockstorage.ExecutionReceipts{} @@ -57,7 +59,17 @@ func (s *MatchingEngineSuite) SetupTest() { s.engine, err = NewEngine(unittest.Logger(), net, me, metrics, metrics, s.state, s.receipts, s.index, s.core) require.NoError(s.T(), err) - <-s.engine.Ready() + ctx, cancel := irrecoverable.NewMockSignalerContextWithCancel(s.T(), context.Background()) + s.cancel = cancel + s.engine.Start(ctx) + unittest.AssertClosesBefore(s.T(), s.engine.Ready(), 10*time.Millisecond) +} + +func (s *MatchingEngineSuite) TearDownTest() { + if s.cancel != nil { + s.cancel() + unittest.AssertClosesBefore(s.T(), s.engine.Done(), 10*time.Millisecond) + } } // TestOnFinalizedBlock tests if finalized block gets processed when send through `Engine`. @@ -87,7 +99,8 @@ func (s *MatchingEngineSuite) TestOnBlockIncorporated() { resultsByID := payload.Results.Lookup() for _, receipt := range payload.Receipts { index.ReceiptIDs = append(index.ReceiptIDs, receipt.ID()) - fullReceipt := flow.ExecutionReceiptFromMeta(*receipt, *resultsByID[receipt.ResultID]) + fullReceipt, err := flow.ExecutionReceiptFromStub(*receipt, *resultsByID[receipt.ResultID]) + s.Require().NoError(err) s.receipts.On("ByID", receipt.ID()).Return(fullReceipt, nil).Once() s.core.On("ProcessReceipt", fullReceipt).Return(nil).Once() } @@ -111,7 +124,7 @@ func (s *MatchingEngineSuite) TestMultipleProcessingItems() { for i := range receipts { receipt := unittest.ExecutionReceiptFixture( unittest.WithExecutorID(originID), - unittest.WithResult(unittest.ExecutionResultFixture(unittest.WithBlock(&block))), + unittest.WithResult(unittest.ExecutionResultFixture(unittest.WithBlock(block))), ) receipts[i] = receipt s.core.On("ProcessReceipt", receipt).Return(nil).Once() @@ -135,15 +148,12 @@ func (s *MatchingEngineSuite) TestMultipleProcessingItems() { s.core.AssertExpectations(s.T()) } -// TestProcessUnsupportedMessageType tests that Process and ProcessLocal correctly handle a case where invalid message type -// was submitted from network layer. +// TestProcessUnsupportedMessageType tests that Process correctly handles a case where invalid message type +// (byzantine message) was submitted from network layer. func (s *MatchingEngineSuite) TestProcessUnsupportedMessageType() { invalidEvent := uint64(42) err := s.engine.Process("ch", unittest.IdentifierFixture(), invalidEvent) // shouldn't result in error since byzantine inputs are expected require.NoError(s.T(), err) - // in case of local processing error cannot be consumed since all inputs are trusted - err = s.engine.ProcessLocal(invalidEvent) - require.Error(s.T(), err) - require.True(s.T(), engine.IsIncompatibleInputTypeError(err)) + // Local processing happens only via HandleReceipt, which will log.Fatal on invalid input } diff --git a/engine/consensus/message_hub/message_hub.go b/engine/consensus/message_hub/message_hub.go index 6c674c219ff..a84ab22900c 100644 --- a/engine/consensus/message_hub/message_hub.go +++ b/engine/consensus/message_hub/message_hub.go @@ -83,7 +83,7 @@ type MessageHub struct { pushBlocksCon network.Conduit ownOutboundMessageNotifier engine.Notifier ownOutboundVotes *fifoqueue.FifoQueue // queue for handling outgoing vote transmissions - ownOutboundProposals *fifoqueue.FifoQueue // queue for handling outgoing proposal transmissions + ownOutboundProposals *fifoqueue.FifoQueue // queue for handling outgoing proposal transmissions (flow.ProposalHeader) ownOutboundTimeouts *fifoqueue.FifoQueue // queue for handling outgoing timeout transmissions // injected dependencies @@ -100,7 +100,7 @@ var _ hotstuff.CommunicatorConsumer = (*MessageHub)(nil) // No errors are expected during normal operations. func NewMessageHub(log zerolog.Logger, engineMetrics module.EngineMetrics, - net network.Network, + net network.EngineRegistry, me module.Local, compliance consensus.Compliance, hotstuff module.HotStuff, @@ -193,10 +193,10 @@ func (h *MessageHub) sendOwnMessages(ctx context.Context) error { msg, ok := h.ownOutboundProposals.Pop() if ok { - block := msg.(*flow.Header) - err := h.sendOwnProposal(block) + proposal := msg.(*flow.ProposalHeader) + err := h.sendOwnProposal(proposal) if err != nil { - return fmt.Errorf("could not process queued block %v: %w", block.ID(), err) + return fmt.Errorf("could not process queued block %v: %w", proposal.Header.ID(), err) } continue } @@ -230,28 +230,23 @@ func (h *MessageHub) sendOwnMessages(ctx context.Context) error { // No errors are expected during normal operations. func (h *MessageHub) sendOwnTimeout(timeout *model.TimeoutObject) error { log := timeout.LogContext(h.log).Logger() - log.Info().Msg("processing timeout broadcast request from hotstuff") + log.Debug().Msg("processing timeout broadcast request from hotstuff") // Retrieve all consensus nodes (excluding myself). - // CAUTION: We must include also nodes with weight zero, because otherwise + // CAUTION: We must include consensus nodes that are joining, because otherwise + // TCs might not be constructed at epoch switchover. // TCs might not be constructed at epoch switchover. recipients, err := h.state.Final().Identities(filter.And( - filter.Not(filter.Ejected), - filter.HasRole(flow.RoleConsensus), - filter.Not(filter.HasNodeID(h.me.NodeID())), + filter.IsValidCurrentEpochParticipantOrJoining, + filter.HasRole[flow.Identity](flow.RoleConsensus), + filter.Not(filter.HasNodeID[flow.Identity](h.me.NodeID())), )) if err != nil { return fmt.Errorf("could not get consensus recipients for broadcasting timeout: %w", err) } // create the timeout message - msg := &messages.TimeoutObject{ - View: timeout.View, - NewestQC: timeout.NewestQC, - LastViewTC: timeout.LastViewTC, - SigData: timeout.SigData, - TimeoutTick: timeout.TimeoutTick, - } + msg := (*messages.TimeoutObject)(timeout) err = h.con.Publish(msg, recipients.NodeIDs()...) if err != nil { if !errors.Is(err, network.EmptyTargetList) { @@ -259,7 +254,7 @@ func (h *MessageHub) sendOwnTimeout(timeout *model.TimeoutObject) error { } return nil } - log.Info().Msg("consensus timeout was broadcast") + log.Debug().Msg("consensus timeout was broadcast") h.engineMetrics.MessageSent(metrics.EngineConsensusMessageHub, metrics.MessageTimeoutObject) return nil @@ -273,7 +268,7 @@ func (h *MessageHub) sendOwnVote(packed *packedVote) error { Uint64("block_view", packed.vote.View). Hex("recipient_id", packed.recipientID[:]). Logger() - log.Info().Msg("processing vote transmission request from hotstuff") + log.Debug().Msg("processing vote transmission request from hotstuff") // send the vote the desired recipient err := h.con.Unicast(packed.vote, packed.recipientID) @@ -282,7 +277,7 @@ func (h *MessageHub) sendOwnVote(packed *packedVote) error { return nil } h.engineMetrics.MessageSent(metrics.EngineConsensusMessageHub, metrics.MessageBlockVote) - log.Info().Msg("block vote transmitted") + log.Debug().Msg("block vote transmitted") return nil } @@ -292,8 +287,9 @@ func (h *MessageHub) sendOwnVote(packed *packedVote) error { // - broadcast to all non-consensus participants // // No errors are expected during normal operations. -func (h *MessageHub) sendOwnProposal(header *flow.Header) error { +func (h *MessageHub) sendOwnProposal(proposal *flow.ProposalHeader) error { // first, check that we are the proposer of the block + header := proposal.Header if header.ProposerID != h.me.NodeID() { return fmt.Errorf("cannot broadcast proposal with non-local proposer (%x)", header.ProposerID) } @@ -314,7 +310,7 @@ func (h *MessageHub) sendOwnProposal(header *flow.Header) error { Int("guarantees_count", len(payload.Guarantees)). Int("seals_count", len(payload.Seals)). Int("receipts_count", len(payload.Receipts)). - Time("timestamp", header.Timestamp). + Time("timestamp", time.UnixMilli(int64(header.Timestamp)).UTC()). Hex("signers", header.ParentVoterIndices). //Dur("delay", delay). Logger() @@ -322,31 +318,46 @@ func (h *MessageHub) sendOwnProposal(header *flow.Header) error { log.Debug().Msg("processing proposal broadcast request from hotstuff") // Retrieve all consensus nodes (excluding myself). - // CAUTION: We must include also nodes with weight zero, because otherwise - // new consensus nodes for the next epoch are left out. + // CAUTION: We must also include nodes that are joining, because otherwise new consensus + // nodes for the next epoch are left out. As most nodes might be interested in + // new proposals, we simply broadcast to all non-ejected nodes (excluding myself). // Note: retrieving the final state requires a time-intensive database read. // Therefore, we execute this in a separate routine, because // `OnOwnTimeout` is directly called by the consensus core logic. allIdentities, err := h.state.AtBlockID(header.ParentID).Identities(filter.And( - filter.Not(filter.Ejected), - filter.Not(filter.HasNodeID(h.me.NodeID())), + filter.Not(filter.HasParticipationStatus(flow.EpochParticipationStatusEjected)), + filter.Not(filter.HasNodeID[flow.Identity](h.me.NodeID())), )) if err != nil { return fmt.Errorf("could not get identities for broadcasting proposal: %w", err) } - consRecipients := allIdentities.Filter(filter.HasRole(flow.RoleConsensus)) + consRecipients := allIdentities.Filter(filter.HasRole[flow.Identity](flow.RoleConsensus)) // NOTE: some fields are not needed for the message // - proposer ID is conveyed over the network message // - the payload hash is deduced from the payload - proposal := messages.NewBlockProposal(&flow.Block{ - Header: header, - Payload: payload, - }) + block, err := flow.NewBlock( + flow.UntrustedBlock{ + HeaderBody: header.HeaderBody, + Payload: *payload, + }, + ) + if err != nil { + return fmt.Errorf("could not build block: %w", err) + } + blockProposal := &flow.UntrustedProposal{ + Block: *block, + ProposerSigData: proposal.ProposerSigData, + } + if _, err = flow.NewProposal(*blockProposal); err != nil { + return fmt.Errorf("could not build proposal: %w", err) + } + + message := (*messages.Proposal)(blockProposal) // broadcast the proposal to consensus nodes - err = h.con.Publish(proposal, consRecipients.NodeIDs()...) + err = h.con.Publish(message, consRecipients.NodeIDs()...) if err != nil { if !errors.Is(err, network.EmptyTargetList) { log.Err(err).Msg("could not send proposal message") @@ -356,7 +367,7 @@ func (h *MessageHub) sendOwnProposal(header *flow.Header) error { log.Info().Msg("block proposal was broadcast") // submit proposal to non-consensus nodes - h.provideProposal(proposal, allIdentities.Filter(filter.Not(filter.HasRole(flow.RoleConsensus)))) + h.provideProposal(message, allIdentities.Filter(filter.Not(filter.HasRole[flow.Identity](flow.RoleConsensus)))) h.engineMetrics.MessageSent(metrics.EngineConsensusMessageHub, metrics.MessageBlockProposal) return nil @@ -364,15 +375,15 @@ func (h *MessageHub) sendOwnProposal(header *flow.Header) error { // provideProposal is used when we want to broadcast a local block to the rest of the // network (non-consensus nodes). -func (h *MessageHub) provideProposal(proposal *messages.BlockProposal, recipients flow.IdentityList) { - header := proposal.Block.Header +func (h *MessageHub) provideProposal(proposal *messages.Proposal, recipients flow.IdentityList) { + header := proposal.Block.ToHeader() blockID := header.ID() log := h.log.With(). Uint64("block_view", header.View). Hex("block_id", blockID[:]). Hex("parent_id", header.ParentID[:]). Logger() - log.Info().Msg("block proposal submitted for propagation") + log.Debug().Msg("block proposal submitted for propagation") // submit the block to the targets err := h.pushBlocksCon.Publish(proposal, recipients.NodeIDs()...) @@ -387,23 +398,21 @@ func (h *MessageHub) provideProposal(proposal *messages.BlockProposal, recipient // OnOwnVote propagates the vote to relevant recipient(s): // - [common case] vote is queued and is sent via unicast to another node that is the next leader by worker // - [special case] this node is the next leader: vote is directly forwarded to the node's internal `VoteAggregator` -func (h *MessageHub) OnOwnVote(blockID flow.Identifier, view uint64, sigData []byte, recipientID flow.Identifier) { - vote := &messages.BlockVote{ - BlockID: blockID, - View: view, - SigData: sigData, - } - +func (h *MessageHub) OnOwnVote(vote *model.Vote, recipientID flow.Identifier) { // special case: I am the next leader if recipientID == h.me.NodeID() { - h.forwardToOwnVoteAggregator(vote, h.me.NodeID()) // forward vote to my own `voteAggregator` + h.forwardToOwnVoteAggregator(vote) // forward vote to my own `voteAggregator` return } // common case: someone else is leader packed := &packedVote{ recipientID: recipientID, - vote: vote, + vote: &messages.BlockVote{ + BlockID: vote.BlockID, + View: vote.View, + SigData: vote.SigData, + }, } if ok := h.ownOutboundVotes.Push(packed); ok { h.ownOutboundMessageNotifier.Notify() @@ -426,7 +435,7 @@ func (h *MessageHub) OnOwnTimeout(timeout *model.TimeoutObject) { // OnOwnProposal directly forwards proposal to HotStuff core logic (skipping compliance engine as we assume our // own proposals to be correct) and queues proposal for subsequent propagation to all consensus participants (including this node). // The proposal will only be placed in the queue, after the specified delay (or dropped on shutdown signal). -func (h *MessageHub) OnOwnProposal(proposal *flow.Header, targetPublicationTime time.Time) { +func (h *MessageHub) OnOwnProposal(proposal *flow.ProposalHeader, targetPublicationTime time.Time) { go func() { select { case <-time.After(time.Until(targetPublicationTime)): @@ -434,7 +443,7 @@ func (h *MessageHub) OnOwnProposal(proposal *flow.Header, targetPublicationTime return } - hotstuffProposal := model.ProposalFromFlow(proposal) + hotstuffProposal := model.SignedProposalFromFlow(proposal) // notify vote aggregator that new block proposal is available, in case we are next leader h.voteAggregator.AddBlock(hotstuffProposal) // non-blocking @@ -453,25 +462,41 @@ func (h *MessageHub) OnOwnProposal(proposal *flow.Header, targetPublicationTime // Process handles incoming messages from consensus channel. After matching message by type, sends it to the correct // component for handling. // No errors are expected during normal operations. +// +// TODO(BFT, #7620): This function should not return an error. The networking layer's responsibility is fulfilled +// once it delivers a message to an engine. It does not possess the context required to handle +// errors that may arise during an engine's processing of the message, as error handling for +// message processing falls outside the domain of the networking layer. +// +// Some of the current error returns signal Byzantine behavior, such as forged or malformed +// messages. These cases must be logged and routed to a dedicated violation reporting consumer. func (h *MessageHub) Process(channel channels.Channel, originID flow.Identifier, message interface{}) error { switch msg := message.(type) { - case *messages.BlockProposal: - h.compliance.OnBlockProposal(flow.Slashable[*messages.BlockProposal]{ + case *flow.Proposal: + h.compliance.OnBlockProposal(flow.Slashable[*flow.Proposal]{ OriginID: originID, Message: msg, }) - case *messages.BlockVote: - h.forwardToOwnVoteAggregator(msg, originID) - case *messages.TimeoutObject: - t := &model.TimeoutObject{ - View: msg.View, - NewestQC: msg.NewestQC, - LastViewTC: msg.LastViewTC, - SignerID: originID, - SigData: msg.SigData, - TimeoutTick: msg.TimeoutTick, + case *flow.BlockVote: + vote, err := model.NewVote(model.UntrustedVote{ + View: msg.View, + BlockID: msg.BlockID, + SignerID: originID, + SigData: msg.SigData, + }) + if err != nil { + // TODO(BFT, #7620): Replace this log statement with a call to the protocol violation consumer. + h.log.Warn(). + Hex("origin_id", originID[:]). + Hex("block_id", msg.BlockID[:]). + Uint64("view", msg.View). + Err(err).Msgf("received invalid vote message") + return nil } - h.forwardToOwnTimeoutAggregator(t) + + h.forwardToOwnVoteAggregator(vote) + case *model.TimeoutObject: + h.forwardToOwnTimeoutAggregator(msg) default: h.log.Warn(). Bool(logging.KeySuspicious, true). @@ -485,31 +510,25 @@ func (h *MessageHub) Process(channel channels.Channel, originID flow.Identifier, // forwardToOwnVoteAggregator converts vote to generic `model.Vote`, logs vote and forwards it to own `voteAggregator`. // Per API convention, timeoutAggregator` is non-blocking, hence, this call returns quickly. -func (h *MessageHub) forwardToOwnVoteAggregator(vote *messages.BlockVote, originID flow.Identifier) { +func (h *MessageHub) forwardToOwnVoteAggregator(vote *model.Vote) { h.engineMetrics.MessageReceived(metrics.EngineConsensusMessageHub, metrics.MessageBlockVote) - v := &model.Vote{ - View: vote.View, - BlockID: vote.BlockID, - SignerID: originID, - SigData: vote.SigData, - } - h.log.Info(). - Uint64("block_view", v.View). - Hex("block_id", v.BlockID[:]). - Hex("voter", v.SignerID[:]). - Str("vote_id", v.ID().String()). + h.log.Debug(). + Uint64("block_view", vote.View). + Hex("block_id", vote.BlockID[:]). + Hex("voter", vote.SignerID[:]). + Str("vote_id", vote.ID().String()). Msg("block vote received, forwarding block vote to hotstuff vote aggregator") - h.voteAggregator.AddVote(v) + h.voteAggregator.AddVote(vote) } // forwardToOwnTimeoutAggregator logs timeout and forwards it to own `timeoutAggregator`. // Per API convention, timeoutAggregator` is non-blocking, hence, this call returns quickly. func (h *MessageHub) forwardToOwnTimeoutAggregator(t *model.TimeoutObject) { h.engineMetrics.MessageReceived(metrics.EngineConsensusMessageHub, metrics.MessageTimeoutObject) - h.log.Info(). - Hex("origin_id", t.SignerID[:]). + h.log.Debug(). + Hex("signer_id", t.SignerID[:]). Uint64("view", t.View). - Str("timeout_id", t.ID().String()). + Uint64("newest_qc_view", t.NewestQC.View). Msg("timeout received, forwarding timeout to hotstuff timeout aggregator") h.timeoutAggregator.AddTimeout(t) } diff --git a/engine/consensus/message_hub/message_hub_test.go b/engine/consensus/message_hub/message_hub_test.go index 16896be4de8..1e30b792ce5 100644 --- a/engine/consensus/message_hub/message_hub_test.go +++ b/engine/consensus/message_hub/message_hub_test.go @@ -2,7 +2,6 @@ package message_hub import ( "context" - "math/rand" "sync" "testing" "time" @@ -24,7 +23,7 @@ import ( "github.com/onflow/flow-go/module/util" netint "github.com/onflow/flow-go/network" "github.com/onflow/flow-go/network/channels" - "github.com/onflow/flow-go/network/mocknetwork" + mocknetwork "github.com/onflow/flow-go/network/mock" protint "github.com/onflow/flow-go/state/protocol" protocol "github.com/onflow/flow-go/state/protocol/mock" storerr "github.com/onflow/flow-go/storage" @@ -49,7 +48,7 @@ type MessageHubSuite struct { payloads *storage.Payloads me *module.Local state *protocol.State - net *mocknetwork.Network + net *mocknetwork.EngineRegistry con *mocknetwork.Conduit pushBlocksCon *mocknetwork.Conduit hotstuff *module.HotStuff @@ -65,22 +64,19 @@ type MessageHubSuite struct { } func (s *MessageHubSuite) SetupTest() { - // seed the RNG - rand.Seed(time.Now().UnixNano()) - // initialize the paramaters s.participants = unittest.IdentityListFixture(3, unittest.WithRole(flow.RoleConsensus), - unittest.WithWeight(1000), + unittest.WithInitialWeight(1000), ) s.myID = s.participants[0].NodeID block := unittest.BlockFixture() - s.head = block.Header + s.head = block.ToHeader() s.payloads = storage.NewPayloads(s.T()) s.me = module.NewLocal(s.T()) s.state = protocol.NewState(s.T()) - s.net = mocknetwork.NewNetwork(s.T()) + s.net = mocknetwork.NewEngineRegistry(s.T()) s.con = mocknetwork.NewConduit(s.T()) s.pushBlocksCon = mocknetwork.NewConduit(s.T()) s.hotstuff = module.NewHotStuff(s.T()) @@ -125,7 +121,7 @@ func (s *MessageHubSuite) SetupTest() { // set up protocol snapshot mock s.snapshot = &protocol.Snapshot{} s.snapshot.On("Identities", mock.Anything).Return( - func(filter flow.IdentityFilter) flow.IdentityList { + func(filter flow.IdentityFilter[flow.Identity]) flow.IdentityList { return s.participants.Filter(filter) }, nil, @@ -170,26 +166,24 @@ func (s *MessageHubSuite) TearDownTest() { } } -// TestProcessIncomingMessages tests processing of incoming messages, MessageHub matches messages by type +// TestProcessValidIncomingMessages tests processing of structurally valid incoming messages, MessageHub matches messages by type // and sends them to other modules which execute business logic. -func (s *MessageHubSuite) TestProcessIncomingMessages() { +func (s *MessageHubSuite) TestProcessValidIncomingMessages() { var channel channels.Channel originID := unittest.IdentifierFixture() s.Run("to-compliance-engine", func() { - block := unittest.BlockFixture() - - blockProposalMsg := messages.NewBlockProposal(&block) - expectedComplianceMsg := flow.Slashable[*messages.BlockProposal]{ + proposal := unittest.ProposalFixture() + expectedComplianceMsg := flow.Slashable[*flow.Proposal]{ OriginID: originID, - Message: blockProposalMsg, + Message: proposal, } s.compliance.On("OnBlockProposal", expectedComplianceMsg).Return(nil).Once() - err := s.hub.Process(channel, originID, blockProposalMsg) + err := s.hub.Process(channel, originID, proposal) require.NoError(s.T(), err) }) s.Run("to-vote-aggregator", func() { expectedVote := unittest.VoteFixture(unittest.WithVoteSignerID(originID)) - msg := &messages.BlockVote{ + msg := &flow.BlockVote{ View: expectedVote.View, BlockID: expectedVote.BlockID, SigData: expectedVote.SigData, @@ -199,16 +193,35 @@ func (s *MessageHubSuite) TestProcessIncomingMessages() { require.NoError(s.T(), err) }) s.Run("to-timeout-aggregator", func() { - expectedTimeout := helper.TimeoutObjectFixture(helper.WithTimeoutObjectSignerID(originID)) - msg := &messages.TimeoutObject{ - View: expectedTimeout.View, - NewestQC: expectedTimeout.NewestQC, - LastViewTC: expectedTimeout.LastViewTC, - SigData: expectedTimeout.SigData, + timeout := helper.TimeoutObjectFixture(helper.WithTimeoutObjectSignerID(originID)) + s.timeoutAggregator.On("AddTimeout", timeout) + err := s.hub.Process(channel, originID, timeout) + require.NoError(s.T(), err) + }) + s.Run("unsupported-msg-type", func() { + err := s.hub.Process(channel, originID, struct{}{}) + require.NoError(s.T(), err) + }) +} + +// TestProcessInvalidIncomingMessages tests processing of structurally invalid incoming messages, MessageHub matches messages by type +// and sends them to other modules which execute business logic. +func (s *MessageHubSuite) TestProcessInvalidIncomingMessages() { + var channel channels.Channel + originID := unittest.IdentifierFixture() + s.Run("to-vote-aggregator", func() { + expectedVote := unittest.VoteFixture(unittest.WithVoteSignerID(originID)) + msg := &messages.BlockVote{ + View: expectedVote.View, + BlockID: flow.ZeroID, // invalid value + SigData: expectedVote.SigData, } - s.timeoutAggregator.On("AddTimeout", expectedTimeout) + err := s.hub.Process(channel, originID, msg) require.NoError(s.T(), err) + + // AddVote should NOT be called for invalid Vote + s.voteAggregator.AssertNotCalled(s.T(), "AddVote", mock.Anything) }) s.Run("unsupported-msg-type", func() { err := s.hub.Process(channel, originID, struct{}{}) @@ -228,33 +241,36 @@ func (s *MessageHubSuite) TestOnOwnProposal() { // create a block with the parent and store the payload with correct ID block := unittest.BlockWithParentFixture(parent) - block.Header.ProposerID = s.myID + block.ProposerID = s.myID - s.payloads.On("ByBlockID", block.Header.ID()).Return(block.Payload, nil) + s.payloads.On("ByBlockID", block.ID()).Return(&block.Payload, nil) s.payloads.On("ByBlockID", mock.Anything).Return(nil, storerr.ErrNotFound) s.Run("should fail with wrong proposer", func() { - header := *block.Header + header := block.ToHeader() header.ProposerID = unittest.IdentifierFixture() - err := s.hub.sendOwnProposal(&header) + proposal := unittest.ProposalHeaderFromHeader(header) + err := s.hub.sendOwnProposal(proposal) require.Error(s.T(), err, "should fail with wrong proposer") header.ProposerID = s.myID }) // should fail with wrong block ID (payload unavailable) s.Run("should fail with wrong block ID", func() { - header := *block.Header + header := block.ToHeader() header.View++ - err := s.hub.sendOwnProposal(&header) + proposal := unittest.ProposalHeaderFromHeader(header) + err := s.hub.sendOwnProposal(proposal) require.Error(s.T(), err, "should fail with missing payload") header.View-- }) s.Run("should broadcast proposal and pass to HotStuff for valid proposals", func() { - expectedBroadcastMsg := messages.NewBlockProposal(block) + proposal := unittest.ProposalFromBlock(block) + expectedBroadcastMsg := (*messages.Proposal)(proposal) submitted := make(chan struct{}) // closed when proposal is submitted to hotstuff - hotstuffProposal := model.ProposalFromFlow(block.Header) + hotstuffProposal := model.SignedProposalFromBlock(proposal) s.voteAggregator.On("AddBlock", hotstuffProposal).Once() s.hotstuff.On("SubmitProposal", hotstuffProposal). Run(func(args mock.Arguments) { close(submitted) }). @@ -269,7 +285,7 @@ func (s *MessageHubSuite) TestOnOwnProposal() { s.pushBlocksCon.On("Publish", expectedBroadcastMsg, s.participants[3].NodeID).Return(nil) // submit to broadcast proposal - s.hub.OnOwnProposal(block.Header, time.Now()) + s.hub.OnOwnProposal(proposal.ProposalHeader(), time.Now()) unittest.AssertClosesBefore(s.T(), util.AllClosed(broadcast, submitted), time.Second) }) @@ -293,18 +309,13 @@ func (s *MessageHubSuite) TestProcessMultipleMessagesHappyPath() { }).Return(nil) // submit vote - s.hub.OnOwnVote(vote.BlockID, vote.View, vote.SigData, recipientID) + s.hub.OnOwnVote(vote, recipientID) }) s.Run("timeout", func() { wg.Add(1) // prepare timeout fixture timeout := helper.TimeoutObjectFixture() - expectedBroadcastMsg := &messages.TimeoutObject{ - View: timeout.View, - NewestQC: timeout.NewestQC, - LastViewTC: timeout.LastViewTC, - SigData: timeout.SigData, - } + expectedBroadcastMsg := (*messages.TimeoutObject)(timeout) s.con.On("Publish", expectedBroadcastMsg, s.participants[1].NodeID, s.participants[2].NodeID). Run(func(_ mock.Arguments) { wg.Done() }). Return(nil) @@ -315,21 +326,22 @@ func (s *MessageHubSuite) TestProcessMultipleMessagesHappyPath() { s.Run("proposal", func() { wg.Add(1) // prepare proposal fixture - proposal := unittest.BlockWithParentAndProposerFixture(s.T(), s.head, s.myID) - s.payloads.On("ByBlockID", proposal.Header.ID()).Return(proposal.Payload, nil) + block := unittest.BlockWithParentAndProposerFixture(s.T(), s.head, s.myID) + proposal := unittest.ProposalFromBlock(block) + s.payloads.On("ByBlockID", block.ID()).Return(&block.Payload, nil) // unset chain and height to make sure they are correctly reconstructed - hotstuffProposal := model.ProposalFromFlow(proposal.Header) + hotstuffProposal := model.SignedProposalFromBlock(proposal) s.voteAggregator.On("AddBlock", hotstuffProposal).Once() s.hotstuff.On("SubmitProposal", hotstuffProposal) - expectedBroadcastMsg := messages.NewBlockProposal(&proposal) + expectedBroadcastMsg := (*messages.Proposal)(proposal) s.con.On("Publish", expectedBroadcastMsg, s.participants[1].NodeID, s.participants[2].NodeID). Run(func(_ mock.Arguments) { wg.Done() }). Return(nil) s.pushBlocksCon.On("Publish", expectedBroadcastMsg, s.participants[3].NodeID).Return(nil) // submit proposal - s.hub.OnOwnProposal(proposal.Header, time.Now()) + s.hub.OnOwnProposal(proposal.ProposalHeader(), time.Now()) }) unittest.RequireReturnsBefore(s.T(), func() { diff --git a/engine/consensus/mock/compliance.go b/engine/consensus/mock/compliance.go index 69c318dbd7e..3a5229dfb4d 100644 --- a/engine/consensus/mock/compliance.go +++ b/engine/consensus/mock/compliance.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mock @@ -6,8 +6,6 @@ import ( flow "github.com/onflow/flow-go/model/flow" irrecoverable "github.com/onflow/flow-go/module/irrecoverable" - messages "github.com/onflow/flow-go/model/messages" - mock "github.com/stretchr/testify/mock" ) @@ -16,10 +14,14 @@ type Compliance struct { mock.Mock } -// Done provides a mock function with given fields: +// Done provides a mock function with no fields func (_m *Compliance) Done() <-chan struct{} { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Done") + } + var r0 <-chan struct{} if rf, ok := ret.Get(0).(func() <-chan struct{}); ok { r0 = rf() @@ -33,19 +35,23 @@ func (_m *Compliance) Done() <-chan struct{} { } // OnBlockProposal provides a mock function with given fields: proposal -func (_m *Compliance) OnBlockProposal(proposal flow.Slashable[*messages.BlockProposal]) { +func (_m *Compliance) OnBlockProposal(proposal flow.Slashable[*flow.Proposal]) { _m.Called(proposal) } // OnSyncedBlocks provides a mock function with given fields: blocks -func (_m *Compliance) OnSyncedBlocks(blocks flow.Slashable[[]*messages.BlockProposal]) { +func (_m *Compliance) OnSyncedBlocks(blocks flow.Slashable[[]*flow.Proposal]) { _m.Called(blocks) } -// Ready provides a mock function with given fields: +// Ready provides a mock function with no fields func (_m *Compliance) Ready() <-chan struct{} { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Ready") + } + var r0 <-chan struct{} if rf, ok := ret.Get(0).(func() <-chan struct{}); ok { r0 = rf() @@ -63,13 +69,12 @@ func (_m *Compliance) Start(_a0 irrecoverable.SignalerContext) { _m.Called(_a0) } -type mockConstructorTestingTNewCompliance interface { +// NewCompliance creates a new instance of Compliance. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewCompliance(t interface { mock.TestingT Cleanup(func()) -} - -// NewCompliance creates a new instance of Compliance. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewCompliance(t mockConstructorTestingTNewCompliance) *Compliance { +}) *Compliance { mock := &Compliance{} mock.Mock.Test(t) diff --git a/engine/consensus/mock/matching_core.go b/engine/consensus/mock/matching_core.go index 331d467cf90..0b6e647387b 100644 --- a/engine/consensus/mock/matching_core.go +++ b/engine/consensus/mock/matching_core.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mock @@ -12,10 +12,14 @@ type MatchingCore struct { mock.Mock } -// OnBlockFinalization provides a mock function with given fields: +// OnBlockFinalization provides a mock function with no fields func (_m *MatchingCore) OnBlockFinalization() error { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for OnBlockFinalization") + } + var r0 error if rf, ok := ret.Get(0).(func() error); ok { r0 = rf() @@ -30,6 +34,10 @@ func (_m *MatchingCore) OnBlockFinalization() error { func (_m *MatchingCore) ProcessReceipt(receipt *flow.ExecutionReceipt) error { ret := _m.Called(receipt) + if len(ret) == 0 { + panic("no return value specified for ProcessReceipt") + } + var r0 error if rf, ok := ret.Get(0).(func(*flow.ExecutionReceipt) error); ok { r0 = rf(receipt) @@ -40,13 +48,12 @@ func (_m *MatchingCore) ProcessReceipt(receipt *flow.ExecutionReceipt) error { return r0 } -type mockConstructorTestingTNewMatchingCore interface { +// NewMatchingCore creates a new instance of MatchingCore. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewMatchingCore(t interface { mock.TestingT Cleanup(func()) -} - -// NewMatchingCore creates a new instance of MatchingCore. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewMatchingCore(t mockConstructorTestingTNewMatchingCore) *MatchingCore { +}) *MatchingCore { mock := &MatchingCore{} mock.Mock.Test(t) diff --git a/engine/consensus/mock/proposal_provider.go b/engine/consensus/mock/proposal_provider.go deleted file mode 100644 index b53cef236e1..00000000000 --- a/engine/consensus/mock/proposal_provider.go +++ /dev/null @@ -1,33 +0,0 @@ -// Code generated by mockery v2.13.1. DO NOT EDIT. - -package mock - -import ( - messages "github.com/onflow/flow-go/model/messages" - mock "github.com/stretchr/testify/mock" -) - -// ProposalProvider is an autogenerated mock type for the ProposalProvider type -type ProposalProvider struct { - mock.Mock -} - -// ProvideProposal provides a mock function with given fields: proposal -func (_m *ProposalProvider) ProvideProposal(proposal *messages.BlockProposal) { - _m.Called(proposal) -} - -type mockConstructorTestingTNewProposalProvider interface { - mock.TestingT - Cleanup(func()) -} - -// NewProposalProvider creates a new instance of ProposalProvider. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewProposalProvider(t mockConstructorTestingTNewProposalProvider) *ProposalProvider { - mock := &ProposalProvider{} - mock.Mock.Test(t) - - t.Cleanup(func() { mock.AssertExpectations(t) }) - - return mock -} diff --git a/engine/consensus/mock/sealing_core.go b/engine/consensus/mock/sealing_core.go index ee3e9bbb63a..243a6b0d7ff 100644 --- a/engine/consensus/mock/sealing_core.go +++ b/engine/consensus/mock/sealing_core.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mock @@ -16,6 +16,10 @@ type SealingCore struct { func (_m *SealingCore) ProcessApproval(approval *flow.ResultApproval) error { ret := _m.Called(approval) + if len(ret) == 0 { + panic("no return value specified for ProcessApproval") + } + var r0 error if rf, ok := ret.Get(0).(func(*flow.ResultApproval) error); ok { r0 = rf(approval) @@ -30,6 +34,10 @@ func (_m *SealingCore) ProcessApproval(approval *flow.ResultApproval) error { func (_m *SealingCore) ProcessFinalizedBlock(finalizedBlockID flow.Identifier) error { ret := _m.Called(finalizedBlockID) + if len(ret) == 0 { + panic("no return value specified for ProcessFinalizedBlock") + } + var r0 error if rf, ok := ret.Get(0).(func(flow.Identifier) error); ok { r0 = rf(finalizedBlockID) @@ -44,6 +52,10 @@ func (_m *SealingCore) ProcessFinalizedBlock(finalizedBlockID flow.Identifier) e func (_m *SealingCore) ProcessIncorporatedResult(result *flow.IncorporatedResult) error { ret := _m.Called(result) + if len(ret) == 0 { + panic("no return value specified for ProcessIncorporatedResult") + } + var r0 error if rf, ok := ret.Get(0).(func(*flow.IncorporatedResult) error); ok { r0 = rf(result) @@ -54,13 +66,12 @@ func (_m *SealingCore) ProcessIncorporatedResult(result *flow.IncorporatedResult return r0 } -type mockConstructorTestingTNewSealingCore interface { +// NewSealingCore creates a new instance of SealingCore. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewSealingCore(t interface { mock.TestingT Cleanup(func()) -} - -// NewSealingCore creates a new instance of SealingCore. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewSealingCore(t mockConstructorTestingTNewSealingCore) *SealingCore { +}) *SealingCore { mock := &SealingCore{} mock.Mock.Test(t) diff --git a/engine/consensus/mock/sealing_observation.go b/engine/consensus/mock/sealing_observation.go index 040f3a27217..12222208e81 100644 --- a/engine/consensus/mock/sealing_observation.go +++ b/engine/consensus/mock/sealing_observation.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mock @@ -22,7 +22,7 @@ func (_m *SealingObservation) ApprovalsRequested(ir *flow.IncorporatedResult, re _m.Called(ir, requestCount) } -// Complete provides a mock function with given fields: +// Complete provides a mock function with no fields func (_m *SealingObservation) Complete() { _m.Called() } @@ -32,13 +32,12 @@ func (_m *SealingObservation) QualifiesForEmergencySealing(ir *flow.Incorporated _m.Called(ir, emergencySealable) } -type mockConstructorTestingTNewSealingObservation interface { +// NewSealingObservation creates a new instance of SealingObservation. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewSealingObservation(t interface { mock.TestingT Cleanup(func()) -} - -// NewSealingObservation creates a new instance of SealingObservation. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewSealingObservation(t mockConstructorTestingTNewSealingObservation) *SealingObservation { +}) *SealingObservation { mock := &SealingObservation{} mock.Mock.Test(t) diff --git a/engine/consensus/mock/sealing_tracker.go b/engine/consensus/mock/sealing_tracker.go index 47e98cb3d4d..294b7bf5e48 100644 --- a/engine/consensus/mock/sealing_tracker.go +++ b/engine/consensus/mock/sealing_tracker.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mock @@ -18,6 +18,10 @@ type SealingTracker struct { func (_m *SealingTracker) NewSealingObservation(finalizedBlock *flow.Header, seal *flow.Seal, sealedBlock *flow.Header) consensus.SealingObservation { ret := _m.Called(finalizedBlock, seal, sealedBlock) + if len(ret) == 0 { + panic("no return value specified for NewSealingObservation") + } + var r0 consensus.SealingObservation if rf, ok := ret.Get(0).(func(*flow.Header, *flow.Seal, *flow.Header) consensus.SealingObservation); ok { r0 = rf(finalizedBlock, seal, sealedBlock) @@ -30,13 +34,12 @@ func (_m *SealingTracker) NewSealingObservation(finalizedBlock *flow.Header, sea return r0 } -type mockConstructorTestingTNewSealingTracker interface { +// NewSealingTracker creates a new instance of SealingTracker. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewSealingTracker(t interface { mock.TestingT Cleanup(func()) -} - -// NewSealingTracker creates a new instance of SealingTracker. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewSealingTracker(t mockConstructorTestingTNewSealingTracker) *SealingTracker { +}) *SealingTracker { mock := &SealingTracker{} mock.Mock.Test(t) diff --git a/engine/consensus/sealing/core.go b/engine/consensus/sealing/core.go index 942d489e971..8db0bc27f5a 100644 --- a/engine/consensus/sealing/core.go +++ b/engine/consensus/sealing/core.go @@ -1,5 +1,3 @@ -// (c) 2021 Dapper Labs - ALL RIGHTS RESERVED - package sealing import ( @@ -9,17 +7,18 @@ import ( "time" "github.com/gammazero/workerpool" + "github.com/onflow/crypto/hash" "github.com/rs/zerolog" "go.opentelemetry.io/otel/attribute" otelTrace "go.opentelemetry.io/otel/trace" + "go.uber.org/atomic" - "github.com/onflow/flow-go/crypto/hash" "github.com/onflow/flow-go/engine" "github.com/onflow/flow-go/engine/consensus" "github.com/onflow/flow-go/engine/consensus/approvals" - "github.com/onflow/flow-go/engine/consensus/sealing/counters" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/module" + "github.com/onflow/flow-go/module/counters" "github.com/onflow/flow-go/module/mempool" "github.com/onflow/flow-go/module/trace" "github.com/onflow/flow-go/network" @@ -36,13 +35,12 @@ import ( // - pre-validating approvals (if they are outdated or non-verifiable) // - pruning already processed collectorTree type Core struct { - unit *engine.Unit workerPool *workerpool.WorkerPool // worker pool used by collectors log zerolog.Logger // used to log relevant actions with context collectorTree *approvals.AssignmentCollectorTree // levelled forest for assignment collectors approvalsCache *approvals.LruCache // in-memory cache of approvals that weren't verified - counterLastSealedHeight counters.StrictMonotonousCounter // monotonous counter for last sealed block height - counterLastFinalizedHeight counters.StrictMonotonousCounter // monotonous counter for last finalized block height + counterLastSealedHeight counters.StrictMonotonicCounter // monotonic counter for last sealed block height + counterLastFinalizedHeight counters.StrictMonotonicCounter // monotonic counter for last finalized block height headers storage.Headers // used to access block headers in storage state protocol.State // used to access protocol state seals storage.Seals // used to get last sealed block @@ -52,6 +50,7 @@ type Core struct { sealingTracker consensus.SealingTracker // logic-aware component for tracking sealing progress. tracer module.Tracer // used to trace execution sealingConfigsGetter module.SealingConfigsGetter // used to access configs for sealing conditions + reporter *gatedSealingObservationReporter // used to avoid excess resource usage by sealing observation completions } func NewCore( @@ -60,7 +59,6 @@ func NewCore( tracer module.Tracer, conMetrics module.ConsensusMetrics, sealingTracker consensus.SealingTracker, - unit *engine.Unit, headers storage.Headers, state protocol.State, sealsDB storage.Seals, @@ -81,16 +79,16 @@ func NewCore( tracer: tracer, metrics: conMetrics, sealingTracker: sealingTracker, - unit: unit, approvalsCache: approvals.NewApprovalsLRUCache(1000), - counterLastSealedHeight: counters.NewMonotonousCounter(lastSealed.Height), - counterLastFinalizedHeight: counters.NewMonotonousCounter(lastSealed.Height), + counterLastSealedHeight: counters.NewMonotonicCounter(lastSealed.Height), + counterLastFinalizedHeight: counters.NewMonotonicCounter(lastSealed.Height), headers: headers, state: state, seals: sealsDB, sealsMempool: sealsMempool, requestTracker: approvals.NewRequestTracker(headers, 10, 30), sealingConfigsGetter: sealingConfigsGetter, + reporter: newGatedSealingObservationReporter(), } factoryMethod := func(result *flow.ExecutionResult) (approvals.AssignmentCollector, error) { @@ -137,10 +135,7 @@ func (c *Core) RepopulateAssignmentCollectorTree(payloads storage.Payloads) erro // Get the root block of our local state - we allow references to unknown // blocks below the root height - rootHeader, err := c.state.Params().Root() - if err != nil { - return fmt.Errorf("could not retrieve root header: %w", err) - } + rootHeader := c.state.Params().FinalizedRoot() // Determine the list of unknown blocks referenced within the sealing segment // if we are initializing with a latest sealed block below the root height @@ -171,7 +166,13 @@ func (c *Core) RepopulateAssignmentCollectorTree(payloads storage.Payloads) erro Msg("skipping outdated block referenced in root sealing segment") continue } - incorporatedResult := flow.NewIncorporatedResult(blockID, result) + incorporatedResult, err := flow.NewIncorporatedResult(flow.UntrustedIncorporatedResult{ + IncorporatedBlockID: blockID, + Result: result, + }) + if err != nil { + return fmt.Errorf("could not create incorporated result for block (%x): %w", blockID, err) + } err = c.ProcessIncorporatedResult(incorporatedResult) if err != nil { return fmt.Errorf("could not process incorporated result from block %s: %w", blockID, err) @@ -257,11 +258,11 @@ func (c *Core) processIncorporatedResult(incRes *flow.IncorporatedResult) error // For incorporating blocks at heights that are already finalized, we check that the incorporating block // is on the finalized fork. Otherwise, the incorporating block is orphaned, and we can drop the result. if incorporatedAtHeight <= c.counterLastFinalizedHeight.Value() { - finalized, err := c.headers.ByHeight(incorporatedAtHeight) + finalizedID, err := c.headers.BlockIDByHeight(incorporatedAtHeight) if err != nil { return fmt.Errorf("could not retrieve finalized block at height %d: %w", incorporatedAtHeight, err) } - if finalized.ID() != incRes.IncorporatedBlockID { + if finalizedID != incRes.IncorporatedBlockID { // it means that we got incorporated incRes for a block which doesn't extend our chain // and should be discarded from future processing return engine.NewOutdatedInputErrorf("won't process incorporated incRes from orphan block %s", incRes.IncorporatedBlockID) @@ -302,7 +303,6 @@ func (c *Core) processIncorporatedResult(incRes *flow.IncorporatedResult) error // * exception in case of unexpected error // * nil - successfully processed incorporated result func (c *Core) ProcessIncorporatedResult(result *flow.IncorporatedResult) error { - span, _ := c.tracer.StartBlockSpan(context.Background(), result.Result.BlockID, trace.CONSealingProcessIncorporatedResult) defer span.End() @@ -566,7 +566,9 @@ func (c *Core) ProcessFinalizedBlock(finalizedBlockID flow.Identifier) error { // observes the latest state of `sealingObservation`. // * The `sealingObservation` lives in the scope of this function. Hence, when this goroutine exits // this function, `sealingObservation` lives solely in the scope of the newly-created goroutine. - c.unit.Launch(sealingObservation.Complete) + // We do this call asynchronously because we are in the hot path, and it is not required to progress, + // and the call may involve database transactions that would unnecessarily delay sealing. + c.reporter.reportAsync(sealingObservation) return nil } @@ -652,17 +654,16 @@ func (c *Core) requestPendingApprovals(observation consensus.SealingObservation, // [ sealing segment ] // Z <- A <- B(RZ) <- C <- D <- E func (c *Core) getOutdatedBlockIDsFromRootSealingSegment(rootHeader *flow.Header) (map[flow.Identifier]struct{}, error) { - rootSealingSegment, err := c.state.AtBlockID(rootHeader.ID()).SealingSegment() if err != nil { return nil, fmt.Errorf("could not get root sealing segment: %w", err) } knownBlockIDs := make(map[flow.Identifier]struct{}) // track block IDs in the sealing segment - var outdatedBlockIDs flow.IdentifierList - for _, block := range rootSealingSegment.Blocks { - knownBlockIDs[block.ID()] = struct{}{} - for _, result := range block.Payload.Results { + outdatedBlockIDs := make(flow.IdentifierList, 0) + for _, proposal := range rootSealingSegment.Blocks { // We iterate over the blocks in the sealing segment with increasing height. + knownBlockIDs[proposal.Block.ID()] = struct{}{} // Hence, we are guaranteed to encounter a block B *first* before its results if and only if B is in the sealing segment. + for _, result := range proposal.Block.Payload.Results { _, known := knownBlockIDs[result.BlockID] if !known { outdatedBlockIDs = append(outdatedBlockIDs, result.BlockID) @@ -671,3 +672,25 @@ func (c *Core) getOutdatedBlockIDsFromRootSealingSegment(rootHeader *flow.Header } return outdatedBlockIDs.Lookup(), nil } + +// gatedSealingObservationReporter is a utility for gating asynchronous completion of sealing observations. +type gatedSealingObservationReporter struct { + reporting *atomic.Bool // true when a sealing observation is actively being asynchronously completed +} + +func newGatedSealingObservationReporter() *gatedSealingObservationReporter { + return &gatedSealingObservationReporter{ + reporting: atomic.NewBool(false), + } +} + +// reportAsync only allows one in-flight observation completion at a time. +// Any extra observations are dropped. +func (reporter *gatedSealingObservationReporter) reportAsync(observation consensus.SealingObservation) { + if reporter.reporting.CompareAndSwap(false, true) { + go func() { + observation.Complete() + reporter.reporting.Store(false) + }() + } +} diff --git a/engine/consensus/sealing/core_test.go b/engine/consensus/sealing/core_test.go index 4dfbc31d50c..9b58b90e557 100644 --- a/engine/consensus/sealing/core_test.go +++ b/engine/consensus/sealing/core_test.go @@ -1,6 +1,7 @@ package sealing import ( + "errors" "fmt" "testing" "time" @@ -41,10 +42,10 @@ const RequiredApprovalsForSealConstructionTestingValue = 1 type ApprovalProcessingCoreTestSuite struct { approvals.BaseAssignmentCollectorTestSuite - sealsDB *storage.Seals - rootHeader *flow.Header - core *Core - setter realmodule.SealingConfigsSetter + sealsDB *storage.Seals + finalizedRootHeader *flow.Header + core *Core + setter realmodule.SealingConfigsSetter } func (s *ApprovalProcessingCoreTestSuite) TearDownTest() { @@ -56,12 +57,12 @@ func (s *ApprovalProcessingCoreTestSuite) SetupTest() { s.sealsDB = &storage.Seals{} - s.rootHeader = unittest.GenesisFixture().Header + s.finalizedRootHeader = unittest.Block.Genesis(flow.Emulator).ToHeader() params := new(mockstate.Params) s.State.On("Sealed").Return(unittest.StateSnapshotForKnownBlock(s.ParentBlock, nil)).Maybe() s.State.On("Params").Return(params) - params.On("Root").Return( - func() *flow.Header { return s.rootHeader }, + params.On("FinalizedRoot").Return( + func() *flow.Header { return s.finalizedRootHeader }, func() error { return nil }, ) @@ -70,7 +71,7 @@ func (s *ApprovalProcessingCoreTestSuite) SetupTest() { setter := unittest.NewSealingConfigs(flow.DefaultChunkAssignmentAlpha) var err error - s.core, err = NewCore(unittest.Logger(), s.WorkerPool, tracer, metrics, &tracker.NoopSealingTracker{}, engine.NewUnit(), s.Headers, s.State, s.sealsDB, s.Assigner, s.SigHasher, s.SealsPL, s.Conduit, setter) + s.core, err = NewCore(unittest.Logger(), s.WorkerPool, tracer, metrics, &tracker.NoopSealingTracker{}, s.Headers, s.State, s.sealsDB, s.Assigner, s.SigHasher, s.SealsPL, s.Conduit, setter) require.NoError(s.T(), err) s.setter = setter } @@ -327,11 +328,11 @@ func (s *ApprovalProcessingCoreTestSuite) TestOnBlockFinalized_EmergencySealing( true, // enable emergency sealing ) require.NoError(s.T(), err) - s.core, err = NewCore(unittest.Logger(), s.WorkerPool, tracer, metrics, &tracker.NoopSealingTracker{}, engine.NewUnit(), s.Headers, s.State, s.sealsDB, s.Assigner, s.SigHasher, s.SealsPL, s.Conduit, setter) + s.core, err = NewCore(unittest.Logger(), s.WorkerPool, tracer, metrics, &tracker.NoopSealingTracker{}, s.Headers, s.State, s.sealsDB, s.Assigner, s.SigHasher, s.SealsPL, s.Conduit, setter) require.NoError(s.T(), err) s.setter = setter - s.SealsPL.On("ByID", mock.Anything).Return(nil, false).Maybe() + s.SealsPL.On("Get", mock.Anything).Return(nil, false).Maybe() s.SealsPL.On("Add", mock.Anything).Run( func(args mock.Arguments) { seal := args.Get(0).(*flow.IncorporatedResultSeal) @@ -378,14 +379,14 @@ func (s *ApprovalProcessingCoreTestSuite) TestOnBlockFinalized_ProcessingOrphanA previousResult := s.IncorporatedResult.Result for blockIndex, block := range fork { - s.Blocks[block.ID()] = block.Header + s.Blocks[block.ID()] = block.ToHeader() s.IdentitiesCache[block.ID()] = s.AuthorizedVerifiers // create and incorporate result for every block in fork except first one if blockIndex > 0 { // create a result result := unittest.ExecutionResultFixture(unittest.WithPreviousResult(*previousResult)) - result.BlockID = block.Header.ParentID + result.BlockID = block.ParentID result.Chunks = s.Chunks forkResults[forkIndex] = append(forkResults[forkIndex], result) previousResult = result @@ -406,7 +407,7 @@ func (s *ApprovalProcessingCoreTestSuite) TestOnBlockFinalized_ProcessingOrphanA s.sealsDB.On("HighestInFork", mock.Anything).Return(seal, nil).Once() // block B_1 becomes finalized - finalized := forks[0][0].Header + finalized := forks[0][0].ToHeader() s.MarkFinalized(finalized) err := s.core.ProcessFinalizedBlock(finalized.ID()) require.NoError(s.T(), err) @@ -445,12 +446,12 @@ func (s *ApprovalProcessingCoreTestSuite) TestOnBlockFinalized_ExtendingUnproces forks[forkIndex] = unittest.ChainFixtureFrom(forkIndex+3, s.Block) fork := forks[forkIndex] for _, block := range fork { - s.Blocks[block.ID()] = block.Header + s.Blocks[block.ID()] = block.ToHeader() s.IdentitiesCache[block.ID()] = s.AuthorizedVerifiers } } - finalized := forks[1][0].Header + finalized := forks[1][0].ToHeader() s.MarkFinalized(finalized) seal := unittest.Seal.Fixture(unittest.Seal.WithBlock(s.ParentBlock)) @@ -465,7 +466,7 @@ func (s *ApprovalProcessingCoreTestSuite) TestOnBlockFinalized_ExtendingUnproces previousResult := s.IncorporatedResult.Result for blockIndex, block := range fork { result := unittest.ExecutionResultFixture(unittest.WithPreviousResult(*previousResult)) - result.BlockID = block.Header.ParentID + result.BlockID = block.ParentID result.Chunks = s.Chunks previousResult = result @@ -523,7 +524,7 @@ func (s *ApprovalProcessingCoreTestSuite) TestOnBlockFinalized_ExtendingSealedRe // rate limiting is respected. func (s *ApprovalProcessingCoreTestSuite) TestRequestPendingApprovals() { s.core.requestTracker = approvals.NewRequestTracker(s.core.headers, 1, 3) - s.SealsPL.On("ByID", mock.Anything).Return(nil, false) + s.SealsPL.On("Get", mock.Anything).Return(nil, false) // n is the total number of blocks and incorporated-results we add to the // chain and mempool @@ -534,10 +535,10 @@ func (s *ApprovalProcessingCoreTestSuite) TestRequestPendingApprovals() { parentBlock := s.ParentBlock for i := 0; i < n; i++ { block := unittest.BlockWithParentFixture(parentBlock) - s.Blocks[block.ID()] = block.Header + s.Blocks[block.ID()] = block.ToHeader() s.IdentitiesCache[block.ID()] = s.AuthorizedVerifiers unsealedFinalizedBlocks = append(unsealedFinalizedBlocks, *block) - parentBlock = block.Header + parentBlock = block.ToHeader() } // progress latest sealed and latest finalized: @@ -546,7 +547,7 @@ func (s *ApprovalProcessingCoreTestSuite) TestRequestPendingApprovals() { // add an unfinalized block; it shouldn't require an approval request unfinalizedBlock := unittest.BlockWithParentFixture(parentBlock) - s.Blocks[unfinalizedBlock.ID()] = unfinalizedBlock.Header + s.Blocks[unfinalizedBlock.ID()] = unfinalizedBlock.ToHeader() // we will assume that all chunks are assigned to the same two verifiers. verifiers := make([]flow.Identifier, 0) @@ -594,12 +595,13 @@ func (s *ApprovalProcessingCoreTestSuite) TestRequestPendingApprovals() { prevResult = ir.Result - s.ChunksAssignment = chunks.NewAssignment() + assignmentBuilder := chunks.NewAssignmentBuilder() for _, chunk := range ir.Result.Chunks { // assign the verifier to this chunk - s.ChunksAssignment.Add(chunk, verifiers) + require.NoError(s.T(), assignmentBuilder.Add(chunk.Index, verifiers)) } + s.ChunksAssignment = assignmentBuilder.Build() err := s.core.processIncorporatedResult(ir) require.NoError(s.T(), err) @@ -616,7 +618,7 @@ func (s *ApprovalProcessingCoreTestSuite) TestRequestPendingApprovals() { // start delivering finalization events lastProcessedIndex := 0 for ; lastProcessedIndex < int(s.core.sealingConfigsGetter.ApprovalRequestsThresholdConst()); lastProcessedIndex++ { - finalized := unsealedFinalizedBlocks[lastProcessedIndex].Header + finalized := unsealedFinalizedBlocks[lastProcessedIndex].ToHeader() s.MarkFinalized(finalized) err := s.core.ProcessFinalizedBlock(finalized.ID()) require.NoError(s.T(), err) @@ -627,7 +629,7 @@ func (s *ApprovalProcessingCoreTestSuite) TestRequestPendingApprovals() { // process two more blocks, this will trigger requesting approvals for lastSealed + 1 height // but they will be in blackout period for i := 0; i < 2; i++ { - finalized := unsealedFinalizedBlocks[lastProcessedIndex].Header + finalized := unsealedFinalizedBlocks[lastProcessedIndex].ToHeader() s.MarkFinalized(finalized) err := s.core.ProcessFinalizedBlock(finalized.ID()) require.NoError(s.T(), err) @@ -644,7 +646,7 @@ func (s *ApprovalProcessingCoreTestSuite) TestRequestPendingApprovals() { Return(nil).Times(chunkCount) // process next block - finalized := unsealedFinalizedBlocks[lastProcessedIndex].Header + finalized := unsealedFinalizedBlocks[lastProcessedIndex].ToHeader() s.MarkFinalized(finalized) err = s.core.ProcessFinalizedBlock(finalized.ID()) require.NoError(s.T(), err) @@ -658,9 +660,11 @@ func (s *ApprovalProcessingCoreTestSuite) TestRequestPendingApprovals() { // TestRepopulateAssignmentCollectorTree tests that the // collectors tree will contain execution results and assignment collectors will be created. // -// P <- A[ER{P}] <- B[ER{A}] <- C[ER{B}] <- D[ER{C}] <- E[ER{D}] -// | <- F[ER{A}] <- G[ER{B}] <- H[ER{G}] -// finalized +// ↙ B[ER{A}] ← C[ER{B}] ← D[ER{C}] ← E[ER{D}] +// P ←── A[ER{P}] +// ^ ↖ F[ER{A}] ← G[ER{B}] ← H[ER{G}] +// | +// finalized // // collectors tree has to be repopulated with incorporated results from blocks [A, B, C, D, F, G] // E, H shouldn't be considered since @@ -674,15 +678,24 @@ func (s *ApprovalProcessingCoreTestSuite) TestRepopulateAssignmentCollectorTree( expectedResults := []*flow.IncorporatedResult{s.IncorporatedResult} blockChildren := make([]flow.Identifier, 0) - rootSnapshot := unittest.StateSnapshotForKnownBlock(s.rootHeader, nil) - s.Snapshots[s.rootHeader.ID()] = rootSnapshot + rootSnapshot := unittest.StateSnapshotForKnownBlock(s.finalizedRootHeader, nil) + s.Snapshots[s.finalizedRootHeader.ID()] = rootSnapshot + block, err := flow.NewRootBlock( + flow.UntrustedBlock{ + HeaderBody: s.finalizedRootHeader.HeaderBody, + Payload: unittest.PayloadFixture(), + }, + ) + require.NoError(s.T(), err) + rootSnapshot.On("SealingSegment").Return( - &flow.SealingSegment{ - Blocks: []*flow.Block{{ - Header: s.rootHeader, - Payload: &flow.Payload{}, - }}, - }, nil) + &flow.SealingSegment{Blocks: []*flow.Proposal{ + { + Block: *block, + // By convention, root block has no proposer signature - implementation has to handle this edge case + ProposerSigData: nil, + }, + }}, nil) s.sealsDB.On("HighestInFork", s.IncorporatedBlock.ID()).Return( unittest.Seal.Fixture( @@ -695,8 +708,8 @@ func (s *ApprovalProcessingCoreTestSuite) TestRepopulateAssignmentCollectorTree( unittest.WithResult(s.IncorporatedResult.Result)))) payloads.On("ByBlockID", s.IncorporatedBlock.ID()).Return(&incorporatedBlockPayload, nil) - emptyPayload := flow.EmptyPayload() - payloads.On("ByBlockID", s.Block.ID()).Return(&emptyPayload, nil) + emptyPayload := flow.NewEmptyPayload() + payloads.On("ByBlockID", s.Block.ID()).Return(emptyPayload, nil) s.IdentitiesCache[s.IncorporatedBlock.ID()] = s.AuthorizedVerifiers @@ -715,10 +728,10 @@ func (s *ApprovalProcessingCoreTestSuite) TestRepopulateAssignmentCollectorTree( result := unittest.ExecutionResultFixture( unittest.WithPreviousResult(*prevResult), ) - result.BlockID = block.Header.ParentID + result.BlockID = block.ParentID // update caches - s.Blocks[blockID] = block.Header + s.Blocks[blockID] = block.ToHeader() s.IdentitiesCache[blockID] = s.AuthorizedVerifiers blockChildren = append(blockChildren, blockID) @@ -726,7 +739,6 @@ func (s *ApprovalProcessingCoreTestSuite) TestRepopulateAssignmentCollectorTree( unittest.IncorporatedResult.WithResult(result), unittest.IncorporatedResult.WithIncorporatedBlockID(blockID)) - // TODO: change this test for phase 3, assigner should expect incorporated block ID, not executed if blockIndex < len(fork)-1 { assigner.On("Assign", result, blockID).Return(s.ChunksAssignment, nil) expectedResults = append(expectedResults, IR) @@ -747,7 +759,7 @@ func (s *ApprovalProcessingCoreTestSuite) TestRepopulateAssignmentCollectorTree( finalSnapShot.On("Descendants").Return(blockChildren, nil) s.State.On("Final").Return(finalSnapShot) - core, err := NewCore(unittest.Logger(), s.WorkerPool, tracer, metrics, &tracker.NoopSealingTracker{}, engine.NewUnit(), + core, err := NewCore(unittest.Logger(), s.WorkerPool, tracer, metrics, &tracker.NoopSealingTracker{}, s.Headers, s.State, s.sealsDB, assigner, s.SigHasher, s.SealsPL, s.Conduit, s.setter) require.NoError(s.T(), err) @@ -764,81 +776,172 @@ func (s *ApprovalProcessingCoreTestSuite) TestRepopulateAssignmentCollectorTree( } } -// TestRepopulateAssignmentCollectorTree_RootSealingSegment tests that the sealing -// engine will be initialized correctly when bootstrapping with a root sealing -// segment with multiple blocks, as is the case when joining the network at an epoch -// boundary. +// TestRepopulateAssignmentCollectorTree_RootSealingSegment tests the instantiation logic for sealing.Code +// when bootstrapping with a root sealing segment containing multiple blocks. +// +// The test verifies two key aspects: +// 1. Proper handling of seals/results referencing blocks *before* the lowest block in `SealingSegment.Blocks` +// 2. Correct initialization of the assignment collector tree. +// +// We refer to the lowest block in `SealingSegment.Blocks` as the `SealedRoot` and to the highest as `FinalizedRoot` +// Chain structure: +// +// Pre-Root Blocks Root Sealing Segment (queryable blocks) +// (unknown) ╭─────────┸────────╮ +// +// … <┄ X <┄┄┄┄┄┄┄┄┄┄ S ←── B ←── C ←── D +// ^ ^ ^ ^ +// │ │ │ └ FinalizedRoot +// SealedRoot │ │ contains seal for S +// │ │ +// contains results for X and S ┙ └ contains seal for X +// +// TODO: add result for block B +// +// Key aspects of this setup: +// 1. Block S is the lowest block in `SealingSegment.Blocks` (ignoring `SealingSegment.ExtraBlocks`) +// This is the highest sealed block as of block D. +// After bootstrapping, the storage API will only permit retrieving blocks whose height is larger or equal to the root block. +// 2. Block X is an ancestor of S but not included in sealing segment (because it is before the root block) +// 3. Block B contains execution results for: +// - Block X (a pre-root block) +// - Block S (the root block) +// 4. Block C contains a seal for block X +// 5. Block D contains a seal for block S +// +// Important Implementation Notes: +// (i) Per `sealing_segment.md`, block X would normally need to be included in the sealing +// segment's ExtraBlocks. However, blocks before the root block are not queryable via +// the common API. Therefore, we explicitly exclude them to ensure the repopulation +// logic does not attempt to retrieve such blocks. +// (ii) When bootstrapping with a genesis block, the sealing segment's root block would +// not contain any proposer signatures - an edge case the implementation must handle. // -// In particular, the assignment collector tree population step should ignore -// unknown block references below the root height. +// Note that scenarios (i) and (ii) are conceptually mutually exclusive: +// - In a genesis bootstrap (ii), the genesis block is the lowest block in +// SealingSegment.Blocks with ProposerSigData=nil. There cannot be any ancestor blocks +// excluded from the sealing segment. +// - With excluded ancestor blocks (i), ProposerSigData cannot be nil for any block in +// SealingSegment.Blocks as they are not genesis blocks. +// +// However, in this test we combine both scenarios to reduce the number of test cases. func (s *ApprovalProcessingCoreTestSuite) TestRepopulateAssignmentCollectorTree_RootSealingSegment() { metrics := metrics.NewNoopCollector() tracer := trace.NewNoopTracer() assigner := &module.ChunkAssigner{} payloads := &storage.Payloads{} - // setup mocks - s.rootHeader = s.IncorporatedBlock - expectedResults := []*flow.IncorporatedResult{s.IncorporatedResult} - - s.sealsDB.On("HighestInFork", s.IncorporatedBlock.ID()).Return( - unittest.Seal.Fixture( - unittest.Seal.WithBlock(s.ParentBlock)), nil) - - // the incorporated block contains the result for the sealing candidate block - incorporatedBlockPayload := unittest.PayloadFixture( - unittest.WithReceipts( - unittest.ExecutionReceiptFixture( - unittest.WithResult(s.IncorporatedResult.Result)))) - payloads.On("ByBlockID", s.IncorporatedBlock.ID()).Return(&incorporatedBlockPayload, nil) - - // the sealing candidate block (S) is the lowest block in the segment under consideration here - // initially, this block would represent the lowest block in a node's root sealing segment, - // meaning that all earlier blocks are not known. In this case we should ignore results and seals - // referencing unknown blocks (tested here by adding such a result+seal to the candidate payload). - candidatePayload := unittest.PayloadFixture( - unittest.WithReceipts(unittest.ExecutionReceiptFixture()), // receipt referencing pre-root block - unittest.WithSeals(unittest.Seal.Fixture()), // seal referencing pre-root block + // block X: + // Create result and receipt - we don't need the block itself, since this will not be used in this test. + // The block ID referenced by the result will uniquely identify this block + receiptX := unittest.ExecutionReceiptFixture() + s.Headers.On("ByBlockID", receiptX.BlockID).Return(nil, errors.New("fooooo")) + + // Create block S (root block) first + blockS := unittest.BlockFixture() + s.Blocks[blockS.ID()] = blockS.ToHeader() + s.IdentitiesCache[blockS.ID()] = s.AuthorizedVerifiers + // Note: as Block S is already sealed, `payloads` should not be queried for it + + // Create block B with results for both X and S + receiptS := unittest.ExecutionReceiptFixture(unittest.WithResult(unittest.ExecutionResultFixture( + unittest.WithPreviousResult(receiptX.ExecutionResult), + unittest.WithBlock(blockS), + ))) + blockB := unittest.BlockWithParentAndPayload( + blockS.ToHeader(), + unittest.PayloadFixture(unittest.WithReceipts(receiptX, receiptS)), ) - payloads.On("ByBlockID", s.Block.ID()).Return(&candidatePayload, nil) - - s.IdentitiesCache[s.IncorporatedBlock.ID()] = s.AuthorizedVerifiers - - assigner.On("Assign", s.IncorporatedResult.Result, mock.Anything).Return(s.ChunksAssignment, nil) - - finalSnapShot := unittest.StateSnapshotForKnownBlock(s.rootHeader, nil) - s.Snapshots[s.rootHeader.ID()] = finalSnapShot - // root snapshot has no pending children - finalSnapShot.On("Descendants").Return(nil, nil) - // set up sealing segment - finalSnapShot.On("SealingSegment").Return( + s.Blocks[blockB.ID()] = blockB.ToHeader() + s.IdentitiesCache[blockB.ID()] = s.AuthorizedVerifiers + payloads.On("ByBlockID", blockB.ID()).Return(&blockB.Payload, nil) + assigner.On("Assign", receiptS.ExecutionResult, blockB.ID()).Return(s.ChunksAssignment, nil).Maybe() // we allow an assignment for block S to be created, even though it is sealed. + // Note: We expect the assigner to NOT be called for resultForX as it references a pre-root block + + // Create block C with seal for X + blockC := unittest.BlockWithParentAndPayload( + blockB.ToHeader(), + unittest.PayloadFixture(unittest.WithSeals( + unittest.Seal.Fixture(unittest.Seal.WithResult(&receiptX.ExecutionResult)), + ))) + s.Blocks[blockC.ID()] = blockC.ToHeader() + s.IdentitiesCache[blockC.ID()] = s.AuthorizedVerifiers + payloads.On("ByBlockID", blockC.ID()).Return(&blockC.Payload, nil) + + // Create block D with seal for S and receipt for B: + sealS := unittest.Seal.Fixture(unittest.Seal.WithResult(&receiptS.ExecutionResult)) + receiptB := unittest.ExecutionReceiptFixture(unittest.WithResult(unittest.ExecutionResultFixture( + unittest.WithPreviousResult(receiptS.ExecutionResult), + unittest.WithBlock(blockB), + ))) + blockD := unittest.BlockWithParentAndPayload( + blockC.ToHeader(), + unittest.PayloadFixture( + unittest.WithSeals(sealS), + unittest.WithReceipts(receiptB), + )) + s.Blocks[blockD.ID()] = blockD.ToHeader() + s.IdentitiesCache[blockD.ID()] = s.AuthorizedVerifiers + payloads.On("ByBlockID", blockD.ID()).Return(&blockD.Payload, nil) + assigner.On("Assign", &receiptB.ExecutionResult, blockD.ID()).Return(s.ChunksAssignment, nil) // this is the only result that a verifier assignment should be created for + + // Setup Protocol State: + // * latest sealed block is the root block S + s.State.On("Sealed").Unset() + s.State.On("Sealed").Return(unittest.StateSnapshotForKnownBlock(blockS.ToHeader(), nil)) + // * snapshot for latest finalized block: + // Block D is the latest finalized block, right after bootstrapping. While the freshly bootstrapped node does not + // know any children of D, the Sealing Segment definition guarantees that only finalized blocks are included. + finalSnapshot := unittest.StateSnapshotForKnownBlock(blockD.ToHeader(), nil) + s.State.On("Final").Return(finalSnapshot) // call `s.State.AtBlockID(…)` looks up the snapshots in map `s.Snapshots` + s.Snapshots[blockD.ID()] = finalSnapshot + s.finalizedRootHeader = blockD.ToHeader() // call `s.State.Params().FinalizedRoot()` returns `s.finalizedRootHeader` + finalSnapshot.On("SealingSegment").Return( &flow.SealingSegment{ - Blocks: []*flow.Block{{ - Header: s.Block, - Payload: &candidatePayload, - }, { - Header: s.ParentBlock, - Payload: &flow.Payload{}, - }, { - Header: s.IncorporatedBlock, - Payload: &incorporatedBlockPayload, - }}, + Blocks: []*flow.Proposal{ + { + Block: *blockS, + ProposerSigData: nil, // combination of (i) and (ii): spork root block without proposer signature; but with ancestor blocks + }, + { + Block: *blockB, + ProposerSigData: unittest.SignatureFixture(), + }, + { + Block: *blockC, + ProposerSigData: unittest.SignatureFixture(), + }, + { + Block: *blockD, + ProposerSigData: unittest.SignatureFixture(), + }, + }, }, nil) - s.State.On("Final").Return(finalSnapShot) + finalSnapshot.On("Descendants").Return([]flow.Identifier{}, nil) // block D has no descendants - core, err := NewCore(unittest.Logger(), s.WorkerPool, tracer, metrics, &tracker.NoopSealingTracker{}, engine.NewUnit(), + // Mock highest sealed block lookup + s.sealsDB.On("HighestInFork", blockD.ID()).Return(sealS, nil) + + // Instantiate sealing.Core and repopulate the assignment collector tree + core, err := NewCore(unittest.Logger(), s.WorkerPool, tracer, metrics, &tracker.NoopSealingTracker{}, s.Headers, s.State, s.sealsDB, assigner, s.SigHasher, s.SealsPL, s.Conduit, s.setter) require.NoError(s.T(), err) - err = core.RepopulateAssignmentCollectorTree(payloads) require.NoError(s.T(), err) - // check collector tree, after repopulating we should have all collectors for execution results that we have - // traversed and they have to be processable. - for _, incorporatedResult := range expectedResults { - collector, err := core.collectorTree.GetOrCreateCollector(incorporatedResult.Result) - require.NoError(s.T(), err) - require.False(s.T(), collector.Created) - require.Equal(s.T(), approvals.VerifyingApprovals, collector.Collector.ProcessingStatus()) - } + // Verify that no collector was created for block X's result: + // the following call will try to instantiate a collector; an error means that during repopulation no such collection was created + _, err = core.collectorTree.GetOrCreateCollector(&receiptX.ExecutionResult) + require.Error(s.T(), err) + + // Verify that the only result in the assignment collector tree is the one for block B: + collector, err := core.collectorTree.GetOrCreateCollector(&receiptB.ExecutionResult) + require.NoError(s.T(), err) + require.False(s.T(), collector.Created) + require.Equal(s.T(), approvals.VerifyingApprovals, collector.Collector.ProcessingStatus()) + + // Verify mock expectations + assigner.AssertExpectations(s.T()) + payloads.AssertExpectations(s.T()) + s.sealsDB.AssertExpectations(s.T()) } diff --git a/engine/consensus/sealing/counters/monotonous_counter.go b/engine/consensus/sealing/counters/monotonous_counter.go deleted file mode 100644 index d561d5f1552..00000000000 --- a/engine/consensus/sealing/counters/monotonous_counter.go +++ /dev/null @@ -1,37 +0,0 @@ -package counters - -import "sync/atomic" - -// StrictMonotonousCounter is a helper struct which implements a strict monotonous counter. -// StrictMonotonousCounter is implemented using atomic operations and doesn't allow to set a value -// which is lower or equal to the already stored one. The counter is implemented -// solely with non-blocking atomic operations for concurrency safety. -type StrictMonotonousCounter struct { - atomicCounter uint64 -} - -// NewMonotonousCounter creates new counter with initial value -func NewMonotonousCounter(initialValue uint64) StrictMonotonousCounter { - return StrictMonotonousCounter{ - atomicCounter: initialValue, - } -} - -// Set updates value of counter if and only if it's strictly larger than value which is already stored. -// Returns true if update was successful or false if stored value is larger. -func (c *StrictMonotonousCounter) Set(newValue uint64) bool { - for { - oldValue := c.Value() - if newValue <= oldValue { - return false - } - if atomic.CompareAndSwapUint64(&c.atomicCounter, oldValue, newValue) { - return true - } - } -} - -// Value returns value which is stored in atomic variable -func (c *StrictMonotonousCounter) Value() uint64 { - return atomic.LoadUint64(&c.atomicCounter) -} diff --git a/engine/consensus/sealing/counters/monotonous_counter_test.go b/engine/consensus/sealing/counters/monotonous_counter_test.go deleted file mode 100644 index 3e8eb74c1bf..00000000000 --- a/engine/consensus/sealing/counters/monotonous_counter_test.go +++ /dev/null @@ -1,46 +0,0 @@ -package counters - -import ( - "testing" - - "github.com/stretchr/testify/require" - - "github.com/onflow/flow-go/utils/unittest" -) - -func TestSet(t *testing.T) { - counter := NewMonotonousCounter(3) - require.True(t, counter.Set(4)) - require.Equal(t, uint64(4), counter.Value()) - require.False(t, counter.Set(2)) - require.Equal(t, uint64(4), counter.Value()) -} - -func TestFuzzy(t *testing.T) { - counter := NewMonotonousCounter(3) - require.True(t, counter.Set(4)) - require.False(t, counter.Set(2)) - require.True(t, counter.Set(7)) - require.True(t, counter.Set(9)) - require.True(t, counter.Set(12)) - require.False(t, counter.Set(10)) - require.True(t, counter.Set(18)) - - for i := 20; i < 100; i++ { - require.True(t, counter.Set(uint64(i))) - } - - for i := 20; i < 100; i++ { - require.False(t, counter.Set(uint64(i))) - } -} - -func TestConcurrent(t *testing.T) { - counter := NewMonotonousCounter(3) - - unittest.Concurrently(100, func(i int) { - counter.Set(uint64(i)) - }) - - require.Equal(t, uint64(99), counter.Value()) -} diff --git a/engine/consensus/sealing/engine.go b/engine/consensus/sealing/engine.go index ae432725bd6..6f957cb9dd6 100644 --- a/engine/consensus/sealing/engine.go +++ b/engine/consensus/sealing/engine.go @@ -11,8 +11,9 @@ import ( "github.com/onflow/flow-go/engine/common/fifoqueue" "github.com/onflow/flow-go/engine/consensus" "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/model/messages" "github.com/onflow/flow-go/module" + "github.com/onflow/flow-go/module/component" + "github.com/onflow/flow-go/module/irrecoverable" "github.com/onflow/flow-go/module/mempool" "github.com/onflow/flow-go/module/metrics" msig "github.com/onflow/flow-go/module/signature" @@ -20,6 +21,7 @@ import ( "github.com/onflow/flow-go/network/channels" "github.com/onflow/flow-go/state/protocol" "github.com/onflow/flow-go/storage" + "github.com/onflow/flow-go/utils/logging" ) type Event struct { @@ -60,10 +62,10 @@ type ( // Engine is a wrapper for approval processing `Core` which implements logic for // queuing and filtering network messages which later will be processed by sealing engine. -// Purpose of this struct is to provide an efficient way how to consume messages from network layer and pass -// them to `Core`. Engine runs 2 separate gorourtines that perform pre-processing and consuming messages by Core. +// Purpose of this struct is to provide an efficient way to consume messages from the network layer and pass +// them to `Core`. Engine runs multiple workers for pre-processing messages and executing `sealing.Core` business logic. type Engine struct { - unit *engine.Unit + component.Component workerPool *workerpool.WorkerPool core consensus.SealingCore log zerolog.Logger @@ -85,14 +87,14 @@ type Engine struct { rootHeader *flow.Header } -// NewEngine constructs new `Engine` which runs on it's own unit. +// NewEngine constructs a new Sealing Engine which runs on its own component. func NewEngine(log zerolog.Logger, tracer module.Tracer, conMetrics module.ConsensusMetrics, engineMetrics module.EngineMetrics, mempool module.MempoolMetrics, sealingTracker consensus.SealingTracker, - net network.Network, + net network.EngineRegistry, me module.Local, headers storage.Headers, payloads storage.Payloads, @@ -104,14 +106,9 @@ func NewEngine(log zerolog.Logger, sealsMempool mempool.IncorporatedResultSeals, requiredApprovalsForSealConstructionGetter module.SealingConfigsGetter, ) (*Engine, error) { - rootHeader, err := state.Params().Root() - if err != nil { - return nil, fmt.Errorf("could not retrieve root block: %w", err) - } + rootHeader := state.Params().FinalizedRoot() - unit := engine.NewUnit() e := &Engine{ - unit: unit, workerPool: workerpool.New(defaultAssignmentCollectorsWorkerPoolCapacity), log: log.With().Str("engine", "sealing.Engine").Logger(), me: me, @@ -124,7 +121,7 @@ func NewEngine(log zerolog.Logger, rootHeader: rootHeader, } - err = e.setupTrustedInboundQueues() + err := e.setupTrustedInboundQueues() if err != nil { return nil, fmt.Errorf("initialization of inbound queues for trusted inputs failed: %w", err) } @@ -134,6 +131,8 @@ func NewEngine(log zerolog.Logger, return nil, fmt.Errorf("could not initialize message handler for untrusted inputs: %w", err) } + e.Component = e.buildComponentManager() + // register engine with the approval provider _, err = net.Register(channels.ReceiveApprovals, e) if err != nil { @@ -147,7 +146,7 @@ func NewEngine(log zerolog.Logger, } signatureHasher := msig.NewBLSHasher(msig.ResultApprovalTag) - core, err := NewCore(log, e.workerPool, tracer, conMetrics, sealingTracker, unit, headers, state, sealsDB, assigner, signatureHasher, sealsMempool, approvalConduit, requiredApprovalsForSealConstructionGetter) + core, err := NewCore(log, e.workerPool, tracer, conMetrics, sealingTracker, headers, state, sealsDB, assigner, signatureHasher, sealsMempool, approvalConduit, requiredApprovalsForSealConstructionGetter) if err != nil { return nil, fmt.Errorf("failed to init sealing engine: %w", err) } @@ -161,6 +160,30 @@ func NewEngine(log zerolog.Logger, return e, nil } +// buildComponentManager creates the component manager with the necessary workers. +// It must only be called during initialization of the sealing engine, and the only +// reason it is factored out from NewEngine is so that it can be used in tests. +func (e *Engine) buildComponentManager() *component.ComponentManager { + builder := component.NewComponentManagerBuilder() + for i := 0; i < defaultSealingEngineWorkers; i++ { + builder.AddWorker(e.loop) + } + builder.AddWorker(e.finalizationProcessingLoop) + builder.AddWorker(e.blockIncorporatedEventsProcessingLoop) + builder.AddWorker(e.waitUntilWorkersFinish) + return builder.Build() +} + +// waitUntilWorkersFinish ensures that the Sealing Engine only finishes shutting down +// once the workerPool used by the Sealing Core has been shut down (after waiting +// for any pending tasks to complete). +func (e *Engine) waitUntilWorkersFinish(ctx irrecoverable.SignalerContext, ready component.ReadyFunc) { + ready() + <-ctx.Done() + // After receiving shutdown signal, wait for the workerPool + e.workerPool.StopWait() +} + // setupTrustedInboundQueues initializes inbound queues for TRUSTED INPUTS (from other components within the // consensus node). We deliberately separate the queues for trusted inputs from the MessageHandler, which // handles external, untrusted inputs. This reduces the attack surface, as it makes it impossible for an external @@ -232,7 +255,7 @@ func (e *Engine) setupMessageHandler(getSealingConfigs module.SealingConfigsGett }, engine.Pattern{ Match: func(msg *engine.Message) bool { - _, ok := msg.Payload.(*messages.ApprovalResponse) + _, ok := msg.Payload.(*flow.ApprovalResponse) if ok { e.engineMetrics.MessageReceived(metrics.EngineSealing, metrics.MessageResultApproval) } @@ -244,7 +267,7 @@ func (e *Engine) setupMessageHandler(getSealingConfigs module.SealingConfigsGett return nil, false } - approval := msg.Payload.(*messages.ApprovalResponse).Approval + approval := msg.Payload.(*flow.ApprovalResponse).Approval return &engine.Message{ OriginID: msg.OriginID, Payload: &approval, @@ -265,17 +288,22 @@ func (e *Engine) Process(channel channels.Channel, originID flow.Identifier, eve e.log.Warn().Msgf("%v delivered unsupported message %T through %v", originID, event, channel) return nil } - return fmt.Errorf("unexpected error while processing engine message: %w", err) + // An unexpected exception should never happen here, because the `messageHandler` only puts the events into the + // respective queues depending on their type or returns an `IncompatibleInputTypeError` for events with unknown type. + // We cannot return the error here, because the networking layer calling `Process` will just log that error and + // continue on a best-effort basis, which is not safe in case of an unexpected exception. + e.log.Fatal().Err(err).Msg("unexpected error while processing engine message") } return nil } // processAvailableMessages is processor of pending events which drives events from networking layer to business logic in `Core`. // Effectively consumes messages from networking layer and dispatches them into corresponding sinks which are connected with `Core`. -func (e *Engine) processAvailableMessages() error { +// No errors expected during normal operations. +func (e *Engine) processAvailableMessages(ctx irrecoverable.SignalerContext) error { for { select { - case <-e.unit.Quit(): + case <-ctx.Done(): return nil default: } @@ -300,7 +328,11 @@ func (e *Engine) processAvailableMessages() error { if ok { e.log.Debug().Msg("got new result approval") - err := e.onApproval(msg.OriginID, msg.Payload.(*flow.ResultApproval)) + ra, ok := msg.Payload.(*flow.ResultApproval) + if !ok { + return irrecoverable.NewExceptionf("unexpected approval payload type %T; expected *flow.ResultApproval", msg.Payload) + } + err := e.onApproval(msg.OriginID, ra) if err != nil { return fmt.Errorf("could not process result approval: %w", err) } @@ -313,53 +345,59 @@ func (e *Engine) processAvailableMessages() error { } } -// finalizationProcessingLoop is a separate goroutine that performs processing of finalization events -func (e *Engine) finalizationProcessingLoop() { +// finalizationProcessingLoop contains the logic for processing of block finalization events. +// This method is intended to be executed by a single worker goroutine. +func (e *Engine) finalizationProcessingLoop(ctx irrecoverable.SignalerContext, ready component.ReadyFunc) { finalizationNotifier := e.finalizationEventsNotifier.Channel() + ready() for { select { - case <-e.unit.Quit(): + case <-ctx.Done(): return case <-finalizationNotifier: finalized, err := e.state.Final().Head() if err != nil { - e.log.Fatal().Err(err).Msg("could not retrieve last finalized block") + ctx.Throw(fmt.Errorf("could not retrieve last finalized block: %w", err)) } err = e.core.ProcessFinalizedBlock(finalized.ID()) if err != nil { - e.log.Fatal().Err(err).Msgf("could not process finalized block %v", finalized.ID()) + ctx.Throw(fmt.Errorf("could not process finalized block %v: %w", finalized.ID(), err)) } } } } -// blockIncorporatedEventsProcessingLoop is a separate goroutine for processing block incorporated events -func (e *Engine) blockIncorporatedEventsProcessingLoop() { +// blockIncorporatedEventsProcessingLoop contains the logic for processing block incorporated events. +// This method is intended to be executed by a single worker goroutine. +func (e *Engine) blockIncorporatedEventsProcessingLoop(ctx irrecoverable.SignalerContext, ready component.ReadyFunc) { c := e.blockIncorporatedNotifier.Channel() - + ready() for { select { - case <-e.unit.Quit(): + case <-ctx.Done(): return case <-c: - err := e.processBlockIncorporatedEvents() + err := e.processBlockIncorporatedEvents(ctx) if err != nil { - e.log.Fatal().Err(err).Msg("internal error processing block incorporated queued message") + ctx.Throw(fmt.Errorf("internal error processing block incorporated queued message: %w", err)) } } } } -func (e *Engine) loop() { +// loop contains the logic for processing incorporated results and result approvals via sealing.Core's +// business logic. This method is intended to be executed by multiple loop worker goroutines concurrently. +func (e *Engine) loop(ctx irrecoverable.SignalerContext, ready component.ReadyFunc) { notifier := e.inboundEventsNotifier.Channel() + ready() for { select { - case <-e.unit.Quit(): + case <-ctx.Done(): return case <-notifier: - err := e.processAvailableMessages() + err := e.processAvailableMessages(ctx) if err != nil { - e.log.Fatal().Err(err).Msg("internal error processing queued message") + ctx.Throw(fmt.Errorf("internal error processing queued message: %w", err)) } } } @@ -368,69 +406,31 @@ func (e *Engine) loop() { // processIncorporatedResult is a function that creates incorporated result and submits it for processing // to sealing core. In phase 2, incorporated result is incorporated at same block that is being executed. // This will be changed in phase 3. +// No errors expected during normal operations. func (e *Engine) processIncorporatedResult(incorporatedResult *flow.IncorporatedResult) error { err := e.core.ProcessIncorporatedResult(incorporatedResult) e.engineMetrics.MessageHandled(metrics.EngineSealing, metrics.MessageExecutionReceipt) return err } +// onApproval checks that the result approval is valid before forwarding it to the Core for processing in a blocking way. +// No errors expected during normal operations. func (e *Engine) onApproval(originID flow.Identifier, approval *flow.ResultApproval) error { - // don't process approval if originID is mismatched + // don't process (silently ignore) approval if originID is mismatched if originID != approval.Body.ApproverID { + e.log.Warn().Bool(logging.KeyProtocolViolation, true). + Msgf("result approval generated by node %v received from different originID %v", approval.Body.ApproverID, originID) return nil } err := e.core.ProcessApproval(approval) e.engineMetrics.MessageHandled(metrics.EngineSealing, metrics.MessageResultApproval) if err != nil { - return fmt.Errorf("fatal internal error in sealing core logic") + return irrecoverable.NewExceptionf("fatal internal error in sealing core logic: %w", err) } return nil } -// SubmitLocal submits an event originating on the local node. -func (e *Engine) SubmitLocal(event interface{}) { - err := e.ProcessLocal(event) - if err != nil { - // receiving an input of incompatible type from a trusted internal component is fatal - e.log.Fatal().Err(err).Msg("internal error processing event") - } -} - -// Submit submits the given event from the node with the given origin ID -// for processing in a non-blocking manner. It returns instantly and logs -// a potential processing error internally when done. -func (e *Engine) Submit(channel channels.Channel, originID flow.Identifier, event interface{}) { - err := e.Process(channel, originID, event) - if err != nil { - e.log.Fatal().Err(err).Msg("internal error processing event") - } -} - -// ProcessLocal processes an event originating on the local node. -func (e *Engine) ProcessLocal(event interface{}) error { - return e.messageHandler.Process(e.me.NodeID(), event) -} - -// Ready returns a ready channel that is closed once the engine has fully -// started. For the propagation engine, we consider the engine up and running -// upon initialization. -func (e *Engine) Ready() <-chan struct{} { - // launch as many workers as we need - for i := 0; i < defaultSealingEngineWorkers; i++ { - e.unit.Launch(e.loop) - } - e.unit.Launch(e.finalizationProcessingLoop) - e.unit.Launch(e.blockIncorporatedEventsProcessingLoop) - return e.unit.Ready() -} - -func (e *Engine) Done() <-chan struct{} { - return e.unit.Done(func() { - e.workerPool.StopWait() - }) -} - // OnFinalizedBlock implements the `OnFinalizedBlock` callback from the `hotstuff.FinalizationConsumer` // It informs sealing.Core about finalization of respective block. // @@ -488,7 +488,13 @@ func (e *Engine) processIncorporatedBlock(incorporatedBlockID flow.Identifier) e return fmt.Errorf("could not retrieve receipt incorporated in block %v: %w", incorporatedBlock.ParentID, err) } - incorporatedResult := flow.NewIncorporatedResult(incorporatedBlock.ParentID, result) + incorporatedResult, err := flow.NewIncorporatedResult(flow.UntrustedIncorporatedResult{ + IncorporatedBlockID: incorporatedBlock.ParentID, + Result: result, + }) + if err != nil { + return fmt.Errorf("could not create incorporated result for block (%x): %w", incorporatedBlock.ParentID, err) + } added := e.pendingIncorporatedResults.Push(incorporatedResult) if !added { // Not being able to queue an incorporated result is a fatal edge case. It might happen, if the @@ -503,10 +509,10 @@ func (e *Engine) processIncorporatedBlock(incorporatedBlockID flow.Identifier) e // processBlockIncorporatedEvents performs processing of block incorporated hot stuff events // No errors expected during normal operations. -func (e *Engine) processBlockIncorporatedEvents() error { +func (e *Engine) processBlockIncorporatedEvents(ctx irrecoverable.SignalerContext) error { for { select { - case <-e.unit.Quit(): + case <-ctx.Done(): return nil default: } diff --git a/engine/consensus/sealing/engine_test.go b/engine/consensus/sealing/engine_test.go index e5adb345460..5b4c9df2ed2 100644 --- a/engine/consensus/sealing/engine_test.go +++ b/engine/consensus/sealing/engine_test.go @@ -1,20 +1,19 @@ -// (c) 2021 Dapper Labs - ALL RIGHTS RESERVED - package sealing import ( + "context" "sync" "testing" "time" + "github.com/gammazero/workerpool" "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" "github.com/onflow/flow-go/consensus/hotstuff/model" - "github.com/onflow/flow-go/engine" mockconsensus "github.com/onflow/flow-go/engine/consensus/mock" "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/model/messages" + "github.com/onflow/flow-go/module/irrecoverable" "github.com/onflow/flow-go/module/metrics" mockmodule "github.com/onflow/flow-go/module/mock" "github.com/onflow/flow-go/network/channels" @@ -38,6 +37,7 @@ type SealingEngineSuite struct { // Sealing Engine engine *Engine + cancel context.CancelFunc } func (s *SealingEngineSuite) SetupTest() { @@ -60,7 +60,7 @@ func (s *SealingEngineSuite) SetupTest() { s.engine = &Engine{ log: unittest.Logger(), - unit: engine.NewUnit(), + workerPool: workerpool.New(defaultAssignmentCollectorsWorkerPoolCapacity), core: s.core, me: me, engineMetrics: metrics, @@ -77,10 +77,22 @@ func (s *SealingEngineSuite) SetupTest() { err = s.engine.setupMessageHandler(unittest.NewSealingConfigs(RequiredApprovalsForSealConstructionTestingValue)) require.NoError(s.T(), err) - <-s.engine.Ready() + // setup ComponentManager and start the engine + s.engine.Component = s.engine.buildComponentManager() + ctx, cancel := irrecoverable.NewMockSignalerContextWithCancel(s.T(), context.Background()) + s.cancel = cancel + s.engine.Start(ctx) + unittest.AssertClosesBefore(s.T(), s.engine.Ready(), 10*time.Millisecond) } -// TestOnFinalizedBlock tests if finalized block gets processed when send through `Engine`. +func (s *SealingEngineSuite) TearDownTest() { + if s.cancel != nil { + s.cancel() + unittest.AssertClosesBefore(s.T(), s.engine.Done(), 10*time.Millisecond) + } +} + +// TestOnFinalizedBlock tests if finalized block gets processed when sent through [Engine]. // Tests the whole processing pipeline. func (s *SealingEngineSuite) TestOnFinalizedBlock() { @@ -97,7 +109,7 @@ func (s *SealingEngineSuite) TestOnFinalizedBlock() { s.core.AssertExpectations(s.T()) } -// TestOnBlockIncorporated tests if incorporated block gets processed when send through `Engine`. +// TestOnBlockIncorporated tests if incorporated block gets processed when sent through [Engine]. // Tests the whole processing pipeline. func (s *SealingEngineSuite) TestOnBlockIncorporated() { parentBlock := unittest.BlockHeaderFixture() @@ -108,10 +120,14 @@ func (s *SealingEngineSuite) TestOnBlockIncorporated() { index := &flow.Index{} for _, result := range payload.Results { - index.ResultIDs = append(index.ReceiptIDs, result.ID()) + index.ResultIDs = append(index.ResultIDs, result.ID()) s.results.On("ByID", result.ID()).Return(result, nil).Once() - IR := flow.NewIncorporatedResult(parentBlock.ID(), result) + IR, err := flow.NewIncorporatedResult(flow.UntrustedIncorporatedResult{ + IncorporatedBlockID: parentBlock.ID(), + Result: result, + }) + require.NoError(s.T(), err) s.core.On("ProcessIncorporatedResult", IR).Return(nil).Once() } s.index.On("ByBlockID", parentBlock.ID()).Return(index, nil) @@ -139,24 +155,30 @@ func (s *SealingEngineSuite) TestMultipleProcessingItems() { for i := range receipts { receipt := unittest.ExecutionReceiptFixture( unittest.WithExecutorID(originID), - unittest.WithResult(unittest.ExecutionResultFixture(unittest.WithBlock(&block))), + unittest.WithResult(unittest.ExecutionResultFixture(unittest.WithBlock(block))), ) receipts[i] = receipt } numApprovalsPerReceipt := 1 approvals := make([]*flow.ResultApproval, 0, len(receipts)*numApprovalsPerReceipt) - responseApprovals := make([]*messages.ApprovalResponse, 0) + responseApprovals := make([]*flow.ApprovalResponse, 0) approverID := unittest.IdentifierFixture() for _, receipt := range receipts { for j := 0; j < numApprovalsPerReceipt; j++ { - approval := unittest.ResultApprovalFixture(unittest.WithExecutionResultID(receipt.ID()), - unittest.WithApproverID(approverID)) - responseApproval := &messages.ApprovalResponse{ + approval := unittest.ResultApprovalFixture( + unittest.WithExecutionResultID(receipt.ExecutionResult.ID()), + unittest.WithApproverID(approverID), + ) + + responseApproval := &flow.ApprovalResponse{ + Nonce: 0, Approval: *approval, } + responseApprovals = append(responseApprovals, responseApproval) approvals = append(approvals, approval) + s.core.On("ProcessApproval", approval).Return(nil).Twice() } } @@ -167,15 +189,16 @@ func (s *SealingEngineSuite) TestMultipleProcessingItems() { defer wg.Done() for _, approval := range approvals { err := s.engine.Process(channels.ReceiveApprovals, approverID, approval) - s.Require().NoError(err, "should process approval") + s.Require().NoError(err, "should process approval (trusted)") } }() wg.Add(1) go func() { defer wg.Done() - for _, approval := range responseApprovals { - err := s.engine.Process(channels.ReceiveApprovals, approverID, approval) - s.Require().NoError(err, "should process approval") + for _, resp := range responseApprovals { + + err := s.engine.Process(channels.ReceiveApprovals, approverID, resp) + s.Require().NoError(err, "should process approval (converted from wire)") } }() @@ -203,15 +226,11 @@ func (s *SealingEngineSuite) TestApprovalInvalidOrigin() { s.core.AssertNumberOfCalls(s.T(), "ProcessApproval", 0) } -// TestProcessUnsupportedMessageType tests that Process and ProcessLocal correctly handle a case where invalid message type +// TestProcessUnsupportedMessageType tests that Process correctly handles a case where invalid message type // was submitted from network layer. func (s *SealingEngineSuite) TestProcessUnsupportedMessageType() { invalidEvent := uint64(42) err := s.engine.Process("ch", unittest.IdentifierFixture(), invalidEvent) // shouldn't result in error since byzantine inputs are expected require.NoError(s.T(), err) - // in case of local processing error cannot be consumed since all inputs are trusted - err = s.engine.ProcessLocal(invalidEvent) - require.Error(s.T(), err) - require.True(s.T(), engine.IsIncompatibleInputTypeError(err)) } diff --git a/engine/enqueue.go b/engine/enqueue.go index 2999cf5cd9a..c3221da3044 100644 --- a/engine/enqueue.go +++ b/engine/enqueue.go @@ -48,6 +48,12 @@ func MatchType[T any](m *Message) bool { return ok } +// MessageHandler routes incoming network messages based on a static set of patterns. +// It extends the network layer and shares its semantics: delivery is not guaranteed. +// +// Messages are placed into bounded stores. When a store is full, new messages are dropped +// to avoid unbounded memory growth. Engines using [MessageHandler] can configure store sizes, +// but must tolerate message loss, just as with the underlying network. type MessageHandler struct { log zerolog.Logger notifier Notifier @@ -85,6 +91,9 @@ func (e *MessageHandler) Process(originID flow.Identifier, payload interface{}) ok := pattern.Store.Put(msg) if !ok { + // Failed to store the message means that the message store is full. + // To prevent memory exhaustion attacks we drop the message. + // Any component using the [MessageHandler] must tolerate message loss e.log.Warn(). Str("msg_type", logging.Type(payload)). Hex("origin_id", originID[:]). diff --git a/engine/enqueue_test.go b/engine/enqueue_test.go index dae6aa4c47d..cd743c32a07 100644 --- a/engine/enqueue_test.go +++ b/engine/enqueue_test.go @@ -1,7 +1,6 @@ package engine_test import ( - "errors" "fmt" "sync" "testing" @@ -363,7 +362,6 @@ func TestUnknownMessageType(t *testing.T) { unknownType := struct{ n int }{n: 10} err := eng.Process(id, unknownType) - require.Error(t, err) - require.True(t, errors.Is(err, engine.IncompatibleInputTypeError)) + require.ErrorIs(t, err, engine.IncompatibleInputTypeError) }) } diff --git a/engine/errors.go b/engine/errors.go index 06f24990052..df31acd58a8 100644 --- a/engine/errors.go +++ b/engine/errors.go @@ -22,10 +22,6 @@ type InvalidInputError struct { err error } -func NewInvalidInputError(msg string) error { - return NewInvalidInputErrorf(msg) -} - func NewInvalidInputErrorf(msg string, args ...interface{}) error { return InvalidInputError{ err: fmt.Errorf(msg, args...), @@ -64,12 +60,6 @@ func NewNetworkTransmissionErrorf(msg string, args ...interface{}) error { } } -func NewNetworkTransmissionError(msg string) error { - return NetworkTransmissionError{ - err: fmt.Errorf(msg), - } -} - func (e NetworkTransmissionError) Unwrap() error { return e.err } diff --git a/engine/errors_test.go b/engine/errors_test.go index 9a25ca838f8..2ba3650964c 100644 --- a/engine/errors_test.go +++ b/engine/errors_test.go @@ -70,7 +70,7 @@ func (e FieldsError) Error() string { func TestTypeCheck(t *testing.T) { var err error err = NoFieldError - require.True(t, errors.Is(err, NoFieldError)) + require.ErrorIs(t, err, NoFieldError) err = FieldsError{ Field1: "field1 missing", diff --git a/engine/execution/block_result.go b/engine/execution/block_result.go index d2e57641d16..232875f7e4b 100644 --- a/engine/execution/block_result.go +++ b/engine/execution/block_result.go @@ -1,6 +1,9 @@ package execution import ( + "fmt" + "math" + "github.com/onflow/flow-go/fvm/storage/snapshot" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/module/executiondatasync/execution_data" @@ -12,6 +15,7 @@ type BlockExecutionResult struct { *entity.ExecutableBlock collectionExecutionResults []CollectionExecutionResult + ExecutionDataRoot *flow.BlockExecutionDataRoot // full root data structure produced from block } // NewPopulatedBlockExecutionResult constructs a new BlockExecutionResult, @@ -30,7 +34,7 @@ func (er *BlockExecutionResult) Size() int { } func (er *BlockExecutionResult) CollectionExecutionResultAt(colIndex int) *CollectionExecutionResult { - if colIndex < 0 && colIndex > len(er.collectionExecutionResults) { + if colIndex < 0 || colIndex > len(er.collectionExecutionResults) { return nil } return &er.collectionExecutionResults[colIndex] @@ -46,6 +50,18 @@ func (er *BlockExecutionResult) AllEvents() flow.EventsList { return res } +// ServiceEventCountForChunk returns the number of service events emitted in the given chunk. +func (er *BlockExecutionResult) ServiceEventCountForChunk(chunkIndex int) uint16 { + serviceEventCount := len(er.collectionExecutionResults[chunkIndex].serviceEvents) + if serviceEventCount > math.MaxUint16 { + // The current protocol demands that the ServiceEventCount does not exceed 65535. + // For defensive programming, we explicitly enforce this limit as 65k could be produced by a bug. + // Execution nodes would be first to realize that this bound is violated, and crash (fail early). + panic(fmt.Sprintf("service event count (%d) exceeds maximum value of 65535", serviceEventCount)) + } + return uint16(serviceEventCount) +} + func (er *BlockExecutionResult) AllServiceEvents() flow.EventsList { res := make(flow.EventsList, 0) for _, ce := range er.collectionExecutionResults { @@ -93,6 +109,25 @@ func (er *BlockExecutionResult) AllConvertedServiceEvents() flow.ServiceEventLis return res } +// AllUpdatedRegisters returns all updated unique register entries +// Note: order is not determinstic +func (er *BlockExecutionResult) AllUpdatedRegisters() []flow.RegisterEntry { + updates := make(map[flow.RegisterID]flow.RegisterValue) + for _, ce := range er.collectionExecutionResults { + for regID, regVal := range ce.executionSnapshot.WriteSet { + updates[regID] = regVal + } + } + res := make([]flow.RegisterEntry, 0, len(updates)) + for regID, regVal := range updates { + res = append(res, flow.RegisterEntry{ + Key: regID, + Value: regVal, + }) + } + return res +} + // BlockAttestationResult holds collection attestation results type BlockAttestationResult struct { *BlockExecutionResult @@ -114,7 +149,7 @@ func NewEmptyBlockAttestationResult( BlockExecutionResult: blockExecutionResult, collectionAttestationResults: make([]CollectionAttestationResult, 0, colSize), BlockExecutionData: &execution_data.BlockExecutionData{ - BlockID: blockExecutionResult.ID(), + BlockID: blockExecutionResult.BlockID(), ChunkExecutionDatas: make( []*execution_data.ChunkExecutionData, 0, @@ -149,43 +184,74 @@ func (ar *BlockAttestationResult) AppendCollectionAttestationResult( ar.ChunkExecutionDatas = append(ar.ChunkExecutionDatas, chunkExecutionDatas) } -func (ar *BlockAttestationResult) AllChunks() []*flow.Chunk { +func (ar *BlockAttestationResult) AllChunks() ([]*flow.Chunk, error) { chunks := make([]*flow.Chunk, len(ar.collectionAttestationResults)) for i := 0; i < len(ar.collectionAttestationResults); i++ { - chunks[i] = ar.ChunkAt(i) // TODO(ramtin): cache and optimize this + chunk, err := ar.ChunkAt(i) + if err != nil { + return nil, fmt.Errorf("could not find chunk: %w", err) + } + chunks[i] = chunk // TODO(ramtin): cache and optimize this } - return chunks + return chunks, nil } -func (ar *BlockAttestationResult) ChunkAt(index int) *flow.Chunk { +// ChunkAt returns the Chunk for the collection at the given index. +// Receiver BlockAttestationResult is expected to be well-formed; callers must use an index that exists. +// No errors are expected during normal operation. +func (ar *BlockAttestationResult) ChunkAt(index int) (*flow.Chunk, error) { if index < 0 || index >= len(ar.collectionAttestationResults) { - return nil + return nil, fmt.Errorf("chunk collection index is not valid: %v", index) } execRes := ar.collectionExecutionResults[index] attestRes := ar.collectionAttestationResults[index] - return flow.NewChunk( - ar.Block.ID(), - index, - attestRes.startStateCommit, - len(execRes.TransactionResults()), - attestRes.eventCommit, - attestRes.endStateCommit, - ) + if execRes.executionSnapshot == nil { + // This should never happen + // In case it does, attach additional information to the error message + panic(fmt.Sprintf("execution snapshot is nil. Block ID: %s, EndState: %s", ar.Block.ID(), attestRes.endStateCommit)) + } + + chunk, err := flow.NewChunk(flow.UntrustedChunk{ + ChunkBody: flow.ChunkBody{ + BlockID: ar.Block.ID(), + CollectionIndex: uint(index), + StartState: attestRes.startStateCommit, + EventCollection: attestRes.eventCommit, + ServiceEventCount: ar.ServiceEventCountForChunk(index), + TotalComputationUsed: execRes.executionSnapshot.TotalComputationUsed(), + NumberOfTransactions: uint64(len(execRes.TransactionResults())), + }, + Index: uint64(index), + EndState: attestRes.endStateCommit, + }) + if err != nil { + return nil, fmt.Errorf("could not build chunk: %w", err) + } + + return chunk, nil + } -func (ar *BlockAttestationResult) AllChunkDataPacks() []*flow.ChunkDataPack { +func (ar *BlockAttestationResult) AllChunkDataPacks() ([]*flow.ChunkDataPack, error) { chunkDataPacks := make([]*flow.ChunkDataPack, len(ar.collectionAttestationResults)) for i := 0; i < len(ar.collectionAttestationResults); i++ { - chunkDataPacks[i] = ar.ChunkDataPackAt(i) // TODO(ramtin): cache and optimize this + chunkDataPack, err := ar.ChunkDataPackAt(i) + if err != nil { + return nil, fmt.Errorf("could not find chunk data pack: %w", err) + } + chunkDataPacks[i] = chunkDataPack // TODO(ramtin): cache and optimize this } - return chunkDataPacks + return chunkDataPacks, nil } -func (ar *BlockAttestationResult) ChunkDataPackAt(index int) *flow.ChunkDataPack { +// ChunkDataPackAt returns the ChunkDataPack for the collection at the given index. +// Receiver BlockAttestationResult is expected to be well-formed; callers must use an index that exists. +// No errors are expected during normal operation. +func (ar *BlockAttestationResult) ChunkDataPackAt(index int) (*flow.ChunkDataPack, error) { if index < 0 || index >= len(ar.collectionAttestationResults) { - return nil + return nil, fmt.Errorf("chunk collection index is not valid: %v", index) } // Note: There's some inconsistency in how chunk execution data and @@ -196,12 +262,23 @@ func (ar *BlockAttestationResult) ChunkDataPackAt(index int) *flow.ChunkDataPack attestRes := ar.collectionAttestationResults[index] - return flow.NewChunkDataPack( - ar.ChunkAt(index).ID(), // TODO(ramtin): optimize this - attestRes.startStateCommit, - attestRes.stateProof, - collection, - ) + chunk, err := ar.ChunkAt(index) + if err != nil { + return nil, fmt.Errorf("could not build chunk: %w", err) + } + + chunkDataPack, err := flow.NewChunkDataPack(flow.UntrustedChunkDataPack{ + ChunkID: chunk.ID(), // TODO(ramtin): optimize this + StartState: attestRes.startStateCommit, + Proof: attestRes.stateProof, + Collection: collection, + ExecutionDataRoot: *ar.ExecutionDataRoot, + }) + if err != nil { + return nil, fmt.Errorf("could not build chunk data pack: %w", err) + } + + return chunkDataPack, nil } func (ar *BlockAttestationResult) AllEventCommitments() []flow.Identifier { diff --git a/engine/execution/block_result_test.go b/engine/execution/block_result_test.go new file mode 100644 index 00000000000..a96e3576728 --- /dev/null +++ b/engine/execution/block_result_test.go @@ -0,0 +1,81 @@ +package execution + +import ( + "math/rand" + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/onflow/flow-go/utils/slices" + "github.com/onflow/flow-go/utils/unittest" +) + +// makeBlockExecutionResultFixture makes a BlockExecutionResult fixture +// with the specified allocation of service events to chunks. +func makeBlockExecutionResultFixture(serviceEventsPerChunk []int) *BlockExecutionResult { + fixture := new(BlockExecutionResult) + for _, nServiceEvents := range serviceEventsPerChunk { + fixture.collectionExecutionResults = append(fixture.collectionExecutionResults, + CollectionExecutionResult{ + serviceEvents: unittest.EventsFixture(nServiceEvents), + convertedServiceEvents: unittest.ServiceEventsFixture(nServiceEvents), + }) + } + return fixture +} + +// Tests that ServiceEventCountForChunk method works as expected under various circumstances: +func TestBlockExecutionResult_ServiceEventCountForChunk(t *testing.T) { + t.Run("no service events", func(t *testing.T) { + nChunks := rand.Intn(10) + 1 // always contains at least system chunk + blockResult := makeBlockExecutionResultFixture(make([]int, nChunks)) + // all chunks should have 0 service event count + for chunkIndex := 0; chunkIndex < nChunks; chunkIndex++ { + count := blockResult.ServiceEventCountForChunk(chunkIndex) + assert.Equal(t, uint16(0), count) + } + }) + t.Run("service events only in system chunk", func(t *testing.T) { + nChunks := rand.Intn(10) + 2 // at least 2 chunks + // add between 1 and 10 service events, all in the system chunk + serviceEventAllocation := make([]int, nChunks) + nServiceEvents := rand.Intn(10) + 1 + serviceEventAllocation[nChunks-1] = nServiceEvents + + blockResult := makeBlockExecutionResultFixture(serviceEventAllocation) + // all non-system chunks should have zero service event count + for chunkIndex := 0; chunkIndex < nChunks-1; chunkIndex++ { + count := blockResult.ServiceEventCountForChunk(chunkIndex) + assert.Equal(t, uint16(0), count) + } + // the system chunk should contain all service events + assert.Equal(t, uint16(nServiceEvents), blockResult.ServiceEventCountForChunk(nChunks-1)) + }) + t.Run("service events only outside system chunk", func(t *testing.T) { + nChunks := rand.Intn(10) + 2 // at least 2 chunks + // add 1 service event to all non-system chunks + serviceEventAllocation := slices.Fill(1, nChunks) + serviceEventAllocation[nChunks-1] = 0 + + blockResult := makeBlockExecutionResultFixture(serviceEventAllocation) + // all non-system chunks should have 1 service event + for chunkIndex := 0; chunkIndex < nChunks-1; chunkIndex++ { + count := blockResult.ServiceEventCountForChunk(chunkIndex) + assert.Equal(t, uint16(1), count) + } + // the system chunk service event count should include all service events + assert.Equal(t, uint16(0), blockResult.ServiceEventCountForChunk(nChunks-1)) + }) + t.Run("service events in both system chunk and other chunks", func(t *testing.T) { + nChunks := rand.Intn(10) + 2 // at least 2 chunks + // add 1 service event to all chunks (including system chunk) + serviceEventAllocation := slices.Fill(1, nChunks) + + blockResult := makeBlockExecutionResultFixture(serviceEventAllocation) + // all chunks should have service event count of 1 + for chunkIndex := 0; chunkIndex < nChunks; chunkIndex++ { + count := blockResult.ServiceEventCountForChunk(chunkIndex) + assert.Equal(t, uint16(1), count) + } + }) +} diff --git a/engine/execution/checker/core.go b/engine/execution/checker/core.go new file mode 100644 index 00000000000..3643c174ce3 --- /dev/null +++ b/engine/execution/checker/core.go @@ -0,0 +1,156 @@ +package checker + +import ( + "context" + "errors" + "fmt" + + "github.com/rs/zerolog" + + "github.com/onflow/flow-go/engine/execution/state" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/state/protocol" + "github.com/onflow/flow-go/storage" +) + +// Core is the core logic of the checker engine that checks if the execution result matches the sealed result. +type Core struct { + log zerolog.Logger + state protocol.State + execState state.ExecutionState +} + +func NewCore( + logger zerolog.Logger, + state protocol.State, + execState state.ExecutionState, +) *Core { + e := &Core{ + log: logger.With().Str("engine", "checker").Logger(), + state: state, + execState: execState, + } + + return e +} + +// checkMyCommitWithSealedCommit is the main check of the checker engine +func checkMyCommitWithSealedCommit( + logger zerolog.Logger, + executedBlock *flow.Header, + myCommit flow.StateCommitment, + sealedCommit flow.StateCommitment, +) error { + if myCommit != sealedCommit { + // mismatch + return fmt.Errorf("execution result is different from the sealed result, height: %v, block_id: %v, sealed_commit: %v, my_commit: %v", + executedBlock.Height, + executedBlock.ID(), + sealedCommit, + myCommit, + ) + } + + logger.Info(). + Uint64("height", executedBlock.Height). + Str("block_id", executedBlock.ID().String()). + Msg("execution result matches the sealed result") + + // match + return nil +} + +// RunCheck skips when the last sealed has not been executed, and last executed has not been finalized. +func (c *Core) RunCheck() error { + // find last sealed block + lastSealedBlock, lastFinal, seal, err := c.findLastSealedBlock() + if err != nil { + return err + } + + mycommitAtLastSealed, err := c.execState.StateCommitmentByBlockID(lastSealedBlock.ID()) + if err == nil { + // if last sealed block has been executed, then check if they match + return checkMyCommitWithSealedCommit(c.log, lastSealedBlock, mycommitAtLastSealed, seal.FinalState) + } + + // if last sealed block has not been executed, then check if recent executed block has + // been sealed already, if yes, check if they match. + lastExecutedHeight, err := c.findLastExecutedBlockHeight() + if err != nil { + return err + } + + if lastExecutedHeight > lastFinal.Height { + // last executed block has not been finalized yet, + // can't check since unfinalized block is also unsealed, skip + return nil + } + + // TODO: better to query seals from protocol state, + // switch to state.Final().LastSealed() when available + sealedExecuted, seal, err := c.findLatestSealedAtHeight(lastExecutedHeight) + if err != nil { + return fmt.Errorf("could not get the last sealed block at height: %v, err: %w", lastExecutedHeight, err) + } + + sealedCommit := seal.FinalState + + mycommit, err := c.execState.StateCommitmentByBlockID(seal.BlockID) + if errors.Is(err, storage.ErrNotFound) { + // have not executed the sealed block yet + // in other words, this can't detect execution fork, if the execution is behind + // the sealing + return nil + } + + if err != nil { + return fmt.Errorf("could not get my state commitment OnFinalizedBlock, blockID: %v", seal.BlockID) + } + + return checkMyCommitWithSealedCommit(c.log, sealedExecuted, mycommit, sealedCommit) +} + +// findLastSealedBlock finds the last sealed block +func (c *Core) findLastSealedBlock() (*flow.Header, *flow.Header, *flow.Seal, error) { + finalized := c.state.Final() + lastFinal, err := finalized.Head() + if err != nil { + return nil, nil, nil, err + } + + _, lastSeal, err := finalized.SealedResult() + if err != nil { + return nil, nil, nil, fmt.Errorf("could not get the last sealed for the finalized block: %w", err) + } + + lastSealed, err := c.state.AtBlockID(lastSeal.BlockID).Head() + if err != nil { + return nil, nil, nil, fmt.Errorf("could not get the last sealed block: %w", err) + } + + return lastSealed, lastFinal, lastSeal, nil +} + +// findLastExecutedBlockHeight finds the last executed block height +func (c *Core) findLastExecutedBlockHeight() (uint64, error) { + height, _, err := c.execState.GetLastExecutedBlockID(context.Background()) + if err != nil { + return 0, fmt.Errorf("could not get the last executed block: %w", err) + } + return height, nil +} + +// findLatestSealedAtHeight finds the latest sealed block at the given height +func (c *Core) findLatestSealedAtHeight(finalizedHeight uint64) (*flow.Header, *flow.Seal, error) { + _, seal, err := c.state.AtHeight(finalizedHeight).SealedResult() + if err != nil { + return nil, nil, fmt.Errorf("could not get the last sealed for the finalized block: %w", err) + } + + sealed, err := c.state.AtBlockID(seal.BlockID).Head() + if err != nil { + return nil, nil, fmt.Errorf("could not get the last sealed block: %w", err) + } + return sealed, seal, nil +} diff --git a/engine/execution/checker/core_test.go b/engine/execution/checker/core_test.go new file mode 100644 index 00000000000..7b32e7ba345 --- /dev/null +++ b/engine/execution/checker/core_test.go @@ -0,0 +1,156 @@ +package checker_test + +import ( + "testing" + + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/engine/execution/checker" + stateMock "github.com/onflow/flow-go/engine/execution/state/mock" + "github.com/onflow/flow-go/model/flow" + protocol "github.com/onflow/flow-go/state/protocol/mock" + "github.com/onflow/flow-go/storage" + "github.com/onflow/flow-go/utils/unittest" +) + +func makeCore(t *testing.T) (*checker.Core, *protocol.State, *stateMock.ExecutionState) { + logger := unittest.Logger() + state := protocol.NewState(t) + execState := stateMock.NewExecutionState(t) + core := checker.NewCore(logger, state, execState) + return core, state, execState +} + +func mockFinalizedBlock(t *testing.T, state *protocol.State, finalized *flow.Header) *protocol.Snapshot { + finalizedSnapshot := protocol.NewSnapshot(t) + finalizedSnapshot.On("Head").Return(finalized, nil) + state.On("Final").Return(finalizedSnapshot) + return finalizedSnapshot +} + +func mockAtBlockID(t *testing.T, state *protocol.State, header *flow.Header) *protocol.Snapshot { + snapshot := protocol.NewSnapshot(t) + snapshot.On("Head").Return(header, nil) + state.On("AtBlockID", header.ID()).Return(snapshot) + return snapshot +} + +func mockSealedBlock(t *testing.T, state *protocol.State, finalized *protocol.Snapshot, sealed *flow.Header) (*flow.ExecutionResult, *flow.Seal) { + lastSealResult := unittest.ExecutionResultFixture(func(r *flow.ExecutionResult) { + r.BlockID = sealed.ID() + }) + lastSeal := unittest.Seal.Fixture(unittest.Seal.WithResult(lastSealResult)) + finalized.On("SealedResult").Return(lastSealResult, lastSeal, nil) + return lastSealResult, lastSeal +} + +func mockFinalizedSealedBlock(t *testing.T, state *protocol.State, finalized *flow.Header, sealed *flow.Header) (*flow.ExecutionResult, *flow.Seal) { + finalizedSnapshot := mockFinalizedBlock(t, state, finalized) + return mockSealedBlock(t, state, finalizedSnapshot, sealed) +} + +func mockSealedBlockAtHeight(t *testing.T, state *protocol.State, height uint64, lastSealed *flow.Header) (*flow.ExecutionResult, *flow.Seal) { + snapshotAtHeight := protocol.NewSnapshot(t) + lastSealedResultAtHeight := unittest.ExecutionResultFixture(func(r *flow.ExecutionResult) { + r.BlockID = lastSealed.ID() + }) + lastSealAtHeight := unittest.Seal.Fixture(unittest.Seal.WithResult(lastSealedResultAtHeight)) + snapshotAtHeight.On("SealedResult").Return(lastSealedResultAtHeight, lastSealAtHeight, nil) + state.On("AtHeight", height).Return(snapshotAtHeight, nil) + return lastSealedResultAtHeight, lastSealAtHeight +} + +func mockExecutedBlock(t *testing.T, es *stateMock.ExecutionState, executed *flow.Header, result *flow.ExecutionResult) { + commit, err := result.FinalStateCommitment() + require.NoError(t, err) + es.On("StateCommitmentByBlockID", executed.ID()).Return(commit, nil) +} + +func mockUnexecutedBlock(t *testing.T, es *stateMock.ExecutionState, unexecuted *flow.Header) { + es.On("StateCommitmentByBlockID", unexecuted.ID()).Return(nil, storage.ErrNotFound) +} + +func TestCheckPassIfLastSealedIsExecutedAndMatch(t *testing.T) { + // ..<- LastSealed(executed) <- .. <- LastFinalized <- .. <- LastExecuted <- ... + chain, _, _ := unittest.ChainFixture(10) + lastFinal := chain[7].ToHeader() + lastSealed := chain[5].ToHeader() + + core, state, es := makeCore(t) + lastSealedResult, _ := mockFinalizedSealedBlock(t, state, lastFinal, lastSealed) + mockAtBlockID(t, state, lastSealed) + mockExecutedBlock(t, es, lastSealed, lastSealedResult) + + require.NoError(t, core.RunCheck()) +} + +func TestCheckFailIfLastSealedIsExecutedButMismatch(t *testing.T) { + // ..<- LastSealed(executed) <- .. <- LastFinalized <- .. <- LastExecuted <- ... + chain, _, _ := unittest.ChainFixture(10) + lastFinal := chain[7].ToHeader() + lastSealed := chain[5].ToHeader() + + core, state, es := makeCore(t) + _, _ = mockFinalizedSealedBlock(t, state, lastFinal, lastSealed) + mockAtBlockID(t, state, lastSealed) + + mismatchingResult := unittest.ExecutionResultFixture() + + mockExecutedBlock(t, es, lastSealed, mismatchingResult) + + require.Error(t, core.RunCheck()) + require.Contains(t, core.RunCheck().Error(), "execution result is different from the sealed result") +} + +func TestCheckPassIfLastSealedIsNotExecutedAndLastExecutedMatch(t *testing.T) { + // LastSealedExecuted (sealed) <..<- LastExecuted(finalized) <..<- LastSealed(not executed) <..<- LastFinalized + chain, _, _ := unittest.ChainFixture(10) + lastFinal := chain[7].ToHeader() + lastSealed := chain[5].ToHeader() + lastExecuted := chain[3].ToHeader() + lastSealedExecuted := chain[1].ToHeader() + + core, state, es := makeCore(t) + // mock that last sealed is not executed + mockFinalizedSealedBlock(t, state, lastFinal, lastSealed) + mockAtBlockID(t, state, lastSealed) + mockUnexecutedBlock(t, es, lastSealed) + + // mock the last sealed and is also executed + es.On("GetLastExecutedBlockID", mock.Anything).Return(lastExecuted.Height, lastExecuted.ID(), nil) + lastSealedResultAtExecutedHeight, _ := mockSealedBlockAtHeight(t, state, lastExecuted.Height, lastSealedExecuted) + mockAtBlockID(t, state, lastSealedExecuted) + + // mock with matching result + mockExecutedBlock(t, es, lastSealedExecuted, lastSealedResultAtExecutedHeight) + + require.NoError(t, core.RunCheck()) +} + +func TestCheckFailIfLastSealedIsNotExecutedAndLastExecutedMismatch(t *testing.T) { + // LastSealedExecuted (sealed) <..<- LastExecuted(finalized) <..<- LastSealed(not executed) <..<- LastFinalized + chain, _, _ := unittest.ChainFixture(10) + lastFinal := chain[7].ToHeader() + lastSealed := chain[5].ToHeader() + lastExecuted := chain[3].ToHeader() + lastSealedExecuted := chain[1].ToHeader() + + core, state, es := makeCore(t) + // mock that last sealed is not executed + mockFinalizedSealedBlock(t, state, lastFinal, lastSealed) + mockAtBlockID(t, state, lastSealed) + mockUnexecutedBlock(t, es, lastSealed) + + // mock the last sealed and is also executed + es.On("GetLastExecutedBlockID", mock.Anything).Return(lastExecuted.Height, lastExecuted.ID(), nil) + mockSealedBlockAtHeight(t, state, lastExecuted.Height, lastSealedExecuted) + mockAtBlockID(t, state, lastSealedExecuted) + + // mock with mismatching result + mismatchingResult := unittest.ExecutionResultFixture() + mockExecutedBlock(t, es, lastSealedExecuted, mismatchingResult) + + require.Error(t, core.RunCheck()) + require.Contains(t, core.RunCheck().Error(), "execution result is different from the sealed result") +} diff --git a/engine/execution/checker/engine.go b/engine/execution/checker/engine.go index dcf330bd2c7..5c9a5bc1404 100644 --- a/engine/execution/checker/engine.go +++ b/engine/execution/checker/engine.go @@ -1,108 +1,57 @@ package checker import ( - "errors" - "fmt" + "context" + "time" - "github.com/rs/zerolog" - - "github.com/onflow/flow-go/consensus/hotstuff/model" - "github.com/onflow/flow-go/consensus/hotstuff/notifications" - "github.com/onflow/flow-go/engine" - "github.com/onflow/flow-go/engine/execution/state" - "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/state/protocol" - "github.com/onflow/flow-go/storage" + "github.com/onflow/flow-go/module/component" + "github.com/onflow/flow-go/module/irrecoverable" ) type Engine struct { - notifications.NoopConsumer // satisfy the FinalizationConsumer interface - - unit *engine.Unit - log zerolog.Logger - state protocol.State - execState state.ExecutionState - sealsDB storage.Seals -} - -func New( - logger zerolog.Logger, - state protocol.State, - execState state.ExecutionState, - sealsDB storage.Seals, -) *Engine { - return &Engine{ - unit: engine.NewUnit(), - log: logger.With().Str("engine", "checker").Logger(), - state: state, - execState: execState, - sealsDB: sealsDB, - } + *component.ComponentManager + core *Core } -func (e *Engine) Ready() <-chan struct{} { - // make sure we will run into a crashloop if result gets inconsistent - // with sealed result. +// DefaultTimeInterval triggers the check once every minute, +const DefaultTimeInterval = time.Minute * 1 - finalized, err := e.state.Final().Head() - - if err != nil { - e.log.Fatal().Err(err).Msg("could not get finalized block on startup") +func NewEngine(core *Core) *Engine { + e := &Engine{ + core: core, } - err = e.checkLastSealed(finalized.ID()) - if err != nil { - e.log.Fatal().Err(err).Msg("execution consistency check failed on startup") - } - return e.unit.Ready() -} - -func (e *Engine) Done() <-chan struct{} { - return e.unit.Done() -} + e.ComponentManager = component.NewComponentManagerBuilder(). + AddWorker(func(ctx irrecoverable.SignalerContext, ready component.ReadyFunc) { + ready() + err := e.runLoop(ctx, DefaultTimeInterval) + if err != nil { + ctx.Throw(err) + } + }). + Build() -// when a block is finalized check if the last sealed has been executed, -// if it has been executed, check whether if the sealed result is consistent -// with the executed result -func (e *Engine) OnFinalizedBlock(block *model.Block) { - err := e.checkLastSealed(block.BlockID) - if err != nil { - e.log.Fatal().Err(err).Msg("execution consistency check failed") - } + return e } -func (e *Engine) checkLastSealed(finalizedID flow.Identifier) error { - // TODO: better to query seals from protocol state, - // switch to state.Final().LastSealed() when available - seal, err := e.sealsDB.HighestInFork(finalizedID) - if err != nil { - return fmt.Errorf("could not get the last sealed for the finalized block: %w", err) - } - - blockID := seal.BlockID - sealedCommit := seal.FinalState - - mycommit, err := e.execState.StateCommitmentByBlockID(e.unit.Ctx(), blockID) - if errors.Is(err, storage.ErrNotFound) { - // have not executed the sealed block yet - // in other words, this can't detect execution fork, if the execution is behind - // the sealing - return nil - } - - if err != nil { - return fmt.Errorf("could not get my state commitment OnFinalizedBlock, blockID: %v", blockID) - } - - if mycommit != sealedCommit { - sealed, err := e.state.AtBlockID(blockID).Head() - if err != nil { - return fmt.Errorf("could not get sealed block when checkLastSealed: %v, err: %w", blockID, err) +// runLoop runs the check every minute. +// Why using a timer instead of listening to finalized and executed events? +// because it's simpler as it doesn't need to subscribe to those events. +// It also runs less checks, note: the checker doesn't need to find the +// first mismatched block, as long as it can find a mismatch, it's good enough. +// A timer could reduce the number of checks, as it only checks once every minute. +func (e *Engine) runLoop(ctx context.Context, tickInterval time.Duration) error { + ticker := time.NewTicker(tickInterval) + defer ticker.Stop() // critical for ticker to be garbage collected + for { + select { + case <-ticker.C: + err := e.core.RunCheck() + if err != nil { + return err + } + case <-ctx.Done(): + return nil } - - return fmt.Errorf("execution result is different from the sealed result, height: %v, block_id: %v, sealed_commit: %x, my_commit: %x", - sealed.Height, blockID, sealedCommit, mycommit) } - - return nil } diff --git a/engine/execution/computation/committer/committer.go b/engine/execution/computation/committer/committer.go index 878ee0fde11..86d72db1ead 100644 --- a/engine/execution/computation/committer/committer.go +++ b/engine/execution/computation/committer/committer.go @@ -6,9 +6,11 @@ import ( "github.com/hashicorp/go-multierror" + "github.com/onflow/flow-go/engine/execution" execState "github.com/onflow/flow-go/engine/execution/state" "github.com/onflow/flow-go/fvm/storage/snapshot" "github.com/onflow/flow-go/ledger" + "github.com/onflow/flow-go/ledger/common/convert" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/module" ) @@ -30,25 +32,26 @@ func NewLedgerViewCommitter( func (committer *LedgerViewCommitter) CommitView( snapshot *snapshot.ExecutionSnapshot, - baseState flow.StateCommitment, + baseStorageSnapshot execution.ExtendableStorageSnapshot, ) ( newCommit flow.StateCommitment, proof []byte, trieUpdate *ledger.TrieUpdate, + newStorageSnapshot execution.ExtendableStorageSnapshot, err error, ) { var err1, err2 error var wg sync.WaitGroup wg.Add(1) go func() { - proof, err2 = committer.collectProofs(snapshot, baseState) + proof, err2 = committer.collectProofs(snapshot, baseStorageSnapshot) wg.Done() }() - newCommit, trieUpdate, err1 = execState.CommitDelta( + newCommit, trieUpdate, newStorageSnapshot, err1 = execState.CommitDelta( committer.ledger, snapshot, - baseState) + baseStorageSnapshot) wg.Wait() if err1 != nil { @@ -62,16 +65,24 @@ func (committer *LedgerViewCommitter) CommitView( func (committer *LedgerViewCommitter) collectProofs( snapshot *snapshot.ExecutionSnapshot, - baseState flow.StateCommitment, + baseStorageSnapshot execution.ExtendableStorageSnapshot, ) ( proof []byte, err error, ) { - // get all deduplicated register IDs + baseState := baseStorageSnapshot.Commitment() + // Reason for including AllRegisterIDs (read and written registers) instead of ReadRegisterIDs (only read registers): + // AllRegisterIDs returns deduplicated register IDs that were touched by both + // reads and writes during the block execution. + // Verification nodes only need the registers in the storage proof that were touched by reads + // in order to execute transactions in a chunk. However, without the registers touched + // by writes, especially the interim trie nodes for them, verification nodes won't be + // able to reconstruct the trie root hash of the execution state post execution. That's why + // the storage proof needs both read registers and write registers, which specifically is AllRegisterIDs allIds := snapshot.AllRegisterIDs() keys := make([]ledger.Key, 0, len(allIds)) for _, id := range allIds { - keys = append(keys, execState.RegisterIDToKey(id)) + keys = append(keys, convert.RegisterIDToLedgerKey(id)) } query, err := ledger.NewQuery(ledger.State(baseState), keys) diff --git a/engine/execution/computation/committer/committer_test.go b/engine/execution/computation/committer/committer_test.go index 18657a67f13..b0f927c2807 100644 --- a/engine/execution/computation/committer/committer_test.go +++ b/engine/execution/computation/committer/committer_test.go @@ -1,48 +1,105 @@ package committer_test import ( + "fmt" "testing" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" "github.com/onflow/flow-go/engine/execution/computation/committer" + "github.com/onflow/flow-go/engine/execution/storehouse" "github.com/onflow/flow-go/fvm/storage/snapshot" - led "github.com/onflow/flow-go/ledger" + "github.com/onflow/flow-go/ledger" + "github.com/onflow/flow-go/ledger/common/convert" + "github.com/onflow/flow-go/ledger/common/pathfinder" + "github.com/onflow/flow-go/ledger/complete" ledgermock "github.com/onflow/flow-go/ledger/mock" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/module/trace" - utils "github.com/onflow/flow-go/utils/unittest" + "github.com/onflow/flow-go/utils/unittest" ) func TestLedgerViewCommitter(t *testing.T) { - t.Run("calls to set and prove", func(t *testing.T) { + // verify after committing a snapshot, proof will be generated, + // and changes are saved in storage snapshot + t.Run("CommitView should return proof and statecommitment", func(t *testing.T) { - ledger := new(ledgermock.Ledger) - com := committer.NewLedgerViewCommitter(ledger, trace.NewNoopTracer()) + l := ledgermock.NewLedger(t) + committer := committer.NewLedgerViewCommitter(l, trace.NewNoopTracer()) - var expectedStateCommitment led.State - copy(expectedStateCommitment[:], []byte{1, 2, 3}) - ledger.On("Set", mock.Anything). - Return(expectedStateCommitment, nil, nil). + // CommitDelta will call ledger.Set and ledger.Prove + + reg := unittest.MakeOwnerReg("key1", "val1") + startState := unittest.StateCommitmentFixture() + + update, err := ledger.NewUpdate(ledger.State(startState), []ledger.Key{convert.RegisterIDToLedgerKey(reg.Key)}, []ledger.Value{reg.Value}) + require.NoError(t, err) + + expectedTrieUpdate, err := pathfinder.UpdateToTrieUpdate(update, complete.DefaultPathFinderVersion) + require.NoError(t, err) + + endState := unittest.StateCommitmentFixture() + require.NotEqual(t, startState, endState) + + // mock ledger.Set + l.On("Set", mock.Anything). + Return(func(update *ledger.Update) (newState ledger.State, trieUpdate *ledger.TrieUpdate, err error) { + if update.State().Equals(ledger.State(startState)) { + return ledger.State(endState), expectedTrieUpdate, nil + } + return ledger.DummyState, nil, fmt.Errorf("wrong update") + }). Once() - expectedProof := led.Proof([]byte{2, 3, 4}) - ledger.On("Prove", mock.Anything). - Return(expectedProof, nil). + // mock ledger.Prove + expectedProof := ledger.Proof([]byte{2, 3, 4}) + l.On("Prove", mock.Anything). + Return(func(query *ledger.Query) (proof ledger.Proof, err error) { + if query.Size() != 1 { + return nil, fmt.Errorf("wrong query size: %v", query.Size()) + } + + k := convert.RegisterIDToLedgerKey(reg.Key) + if !query.Keys()[0].Equals(&k) { + return nil, fmt.Errorf("in correct query key for prove: %v", query.Keys()[0]) + } + + return expectedProof, nil + }). Once() - newState, proof, _, err := com.CommitView( - &snapshot.ExecutionSnapshot{ - WriteSet: map[flow.RegisterID]flow.RegisterValue{ - flow.NewRegisterID("owner", "key"): []byte{1}, - }, + // previous block's storage snapshot + oldReg := unittest.MakeOwnerReg("key1", "oldvalue") + previousBlockSnapshot := storehouse.NewExecutingBlockSnapshot( + snapshot.MapStorageSnapshot{ + oldReg.Key: oldReg.Value, + }, + flow.StateCommitment(update.State()), + ) + + // this block's register updates + blockUpdates := &snapshot.ExecutionSnapshot{ + WriteSet: map[flow.RegisterID]flow.RegisterValue{ + reg.Key: oldReg.Value, }, - utils.StateCommitmentFixture()) + } + + newCommit, proof, trieUpdate, newStorageSnapshot, err := committer.CommitView( + blockUpdates, + previousBlockSnapshot, + ) + require.NoError(t, err) - require.Equal(t, flow.StateCommitment(expectedStateCommitment), newState) + + // verify CommitView returns expected proof and statecommitment + require.Equal(t, previousBlockSnapshot.Commitment(), flow.StateCommitment(trieUpdate.RootHash)) + require.Equal(t, newCommit, newStorageSnapshot.Commitment()) + require.Equal(t, endState, newCommit) require.Equal(t, []uint8(expectedProof), proof) + require.True(t, expectedTrieUpdate.Equals(trieUpdate)) + }) } diff --git a/engine/execution/computation/committer/noop.go b/engine/execution/computation/committer/noop.go index dcdefbac634..b4549a78c15 100644 --- a/engine/execution/computation/committer/noop.go +++ b/engine/execution/computation/committer/noop.go @@ -1,6 +1,7 @@ package committer import ( + "github.com/onflow/flow-go/engine/execution" "github.com/onflow/flow-go/fvm/storage/snapshot" "github.com/onflow/flow-go/ledger" "github.com/onflow/flow-go/model/flow" @@ -15,12 +16,17 @@ func NewNoopViewCommitter() *NoopViewCommitter { func (NoopViewCommitter) CommitView( _ *snapshot.ExecutionSnapshot, - s flow.StateCommitment, + baseStorageSnapshot execution.ExtendableStorageSnapshot, ) ( flow.StateCommitment, []byte, *ledger.TrieUpdate, + execution.ExtendableStorageSnapshot, error, ) { - return s, nil, nil, nil + + trieUpdate := &ledger.TrieUpdate{ + RootHash: ledger.RootHash(baseStorageSnapshot.Commitment()), + } + return baseStorageSnapshot.Commitment(), []byte{}, trieUpdate, baseStorageSnapshot, nil } diff --git a/engine/execution/computation/computer/computer.go b/engine/execution/computation/computer/computer.go index 779ab69f198..0da18cdb39f 100644 --- a/engine/execution/computation/computer/computer.go +++ b/engine/execution/computation/computer/computer.go @@ -3,26 +3,28 @@ package computer import ( "context" "fmt" - "time" + "sync" + "github.com/onflow/crypto/hash" "github.com/rs/zerolog" "go.opentelemetry.io/otel/attribute" otelTrace "go.opentelemetry.io/otel/trace" - "github.com/onflow/flow-go/crypto/hash" "github.com/onflow/flow-go/engine/execution" "github.com/onflow/flow-go/engine/execution/computation/result" "github.com/onflow/flow-go/engine/execution/utils" "github.com/onflow/flow-go/fvm" "github.com/onflow/flow-go/fvm/blueprints" "github.com/onflow/flow-go/fvm/storage/derived" + "github.com/onflow/flow-go/fvm/storage/errors" + "github.com/onflow/flow-go/fvm/storage/logical" "github.com/onflow/flow-go/fvm/storage/snapshot" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/module" "github.com/onflow/flow-go/module/executiondatasync/provider" "github.com/onflow/flow-go/module/mempool/entity" "github.com/onflow/flow-go/module/trace" - "github.com/onflow/flow-go/utils/logging" + "github.com/onflow/flow-go/state/protocol" ) const ( @@ -30,16 +32,23 @@ const ( ) type collectionInfo struct { - blockId flow.Identifier - blockIdStr string + blockId flow.Identifier + blockIdStr string + blockHeight uint64 collectionIndex int *entity.CompleteCollection - - isSystemTransaction bool } -type transactionRequest struct { +type ComputerTransactionType uint + +const ( + ComputerTransactionTypeUser ComputerTransactionType = iota + ComputerTransactionTypeSystem + ComputerTransactionTypeScheduled +) + +type TransactionRequest struct { collectionInfo txnId flow.Identifier @@ -47,6 +56,7 @@ type transactionRequest struct { txnIndex uint32 + transactionType ComputerTransactionType lastTransactionInCollection bool ctx fvm.Context @@ -59,12 +69,13 @@ func newTransactionRequest( collectionLogger zerolog.Logger, txnIndex uint32, txnBody *flow.TransactionBody, + transactionType ComputerTransactionType, lastTransactionInCollection bool, -) transactionRequest { +) TransactionRequest { txnId := txnBody.ID() txnIdStr := txnId.String() - return transactionRequest{ + return TransactionRequest{ collectionInfo: collection, txnId: txnId, txnIdStr: txnIdStr, @@ -80,6 +91,7 @@ func newTransactionRequest( txnId, txnIndex, txnBody), + transactionType: transactionType, lastTransactionInCollection: lastTransactionInCollection, } } @@ -101,29 +113,49 @@ type BlockComputer interface { type blockComputer struct { vm fvm.VM vmCtx fvm.Context + systemChunkCtx fvm.Context + callbackCtx fvm.Context metrics module.ExecutionMetrics tracer module.Tracer log zerolog.Logger - systemChunkCtx fvm.Context + systemTxn *flow.TransactionBody + processCallbackTxn *flow.TransactionBody committer ViewCommitter - executionDataProvider *provider.Provider + executionDataProvider provider.Provider signer module.Local spockHasher hash.Hasher receiptHasher hash.Hasher colResCons []result.ExecutedCollectionConsumer + protocolState protocol.SnapshotExecutionSubsetProvider + maxConcurrency int } -func SystemChunkContext(vmCtx fvm.Context, logger zerolog.Logger) fvm.Context { +// SystemChunkContext is the context for the system chunk transaction. +func SystemChunkContext(vmCtx fvm.Context, metrics module.ExecutionMetrics) fvm.Context { return fvm.NewContextFromParent( vmCtx, - fvm.WithContractDeploymentRestricted(false), - fvm.WithContractRemovalRestricted(false), fvm.WithAuthorizationChecksEnabled(false), fvm.WithSequenceNumberCheckAndIncrementEnabled(false), fvm.WithTransactionFeesEnabled(false), - fvm.WithServiceEventCollectionEnabled(), + fvm.WithMetricsReporter(metrics), + fvm.WithContractDeploymentRestricted(false), + fvm.WithContractRemovalRestricted(false), fvm.WithEventCollectionSizeLimit(SystemChunkEventCollectionMaxSize), fvm.WithMemoryAndInteractionLimitsDisabled(), + // only the system transaction is allowed to call the block entropy provider + fvm.WithRandomSourceHistoryCallAllowed(true), + fvm.WithAccountStorageLimit(false), + ) +} + +// CallbackContext is the context for the scheduled callback transactions. +func CallbackContext(vmCtx fvm.Context, metrics module.ExecutionMetrics) fvm.Context { + return fvm.NewContextFromParent( + vmCtx, + fvm.WithAuthorizationChecksEnabled(false), + fvm.WithSequenceNumberCheckAndIncrementEnabled(false), + fvm.WithTransactionFeesEnabled(false), + fvm.WithMetricsReporter(metrics), ) } @@ -136,27 +168,54 @@ func NewBlockComputer( logger zerolog.Logger, committer ViewCommitter, signer module.Local, - executionDataProvider *provider.Provider, + executionDataProvider provider.Provider, colResCons []result.ExecutedCollectionConsumer, + state protocol.SnapshotExecutionSubsetProvider, + maxConcurrency int, ) (BlockComputer, error) { - systemChunkCtx := SystemChunkContext(vmCtx, logger) + if maxConcurrency < 1 { + return nil, fmt.Errorf("invalid maxConcurrency: %d", maxConcurrency) + } + + // this is a safeguard to prevent scripts from writing to the program cache on Execution nodes. + // writes are only allowed by transactions. + if vmCtx.AllowProgramCacheWritesInScripts { + return nil, fmt.Errorf("program cache writes are not allowed in scripts on Execution nodes") + } + vmCtx = fvm.NewContextFromParent( vmCtx, fvm.WithMetricsReporter(metrics), fvm.WithTracer(tracer)) + + systemTxn, err := blueprints.SystemChunkTransaction(vmCtx.Chain) + if err != nil { + return nil, fmt.Errorf("could not build system chunk transaction: %w", err) + } + + processCallbackTxn, err := blueprints.ProcessCallbacksTransaction(vmCtx.Chain) + if err != nil { + return nil, fmt.Errorf("failed to generate callbacks script: %w", err) + } + return &blockComputer{ vm: vm, vmCtx: vmCtx, + callbackCtx: CallbackContext(vmCtx, metrics), + systemChunkCtx: SystemChunkContext(vmCtx, metrics), metrics: metrics, tracer: tracer, log: logger, - systemChunkCtx: systemChunkCtx, + systemTxn: systemTxn, + processCallbackTxn: processCallbackTxn, committer: committer, executionDataProvider: executionDataProvider, signer: signer, spockHasher: utils.NewSPOCKHasher(), receiptHasher: utils.NewExecutionReceiptHasher(), colResCons: colResCons, + protocolState: state, + maxConcurrency: maxConcurrency, }, nil } @@ -184,22 +243,43 @@ func (e *blockComputer) ExecuteBlock( return results, nil } -func (e *blockComputer) queueTransactionRequests( +func (e *blockComputer) userTransactionsCount(collections []*entity.CompleteCollection) int { + count := 0 + for _, collection := range collections { + count += len(collection.Collection.Transactions) + } + + return count +} + +// queueUserTransactions enqueues transaction processing requests for all user and +// system transactions in the given block. +// +// If constructing the Collection for the system transaction fails (for example, because +// NewCollection returned an error due to invalid input), this method returns an +// irrecoverable exception. Such an error indicates a fatal, unexpected condition and +// should abort block execution. +// +// Returns: +// - nil on success, +// - error if an irrecoverable error is received if the system transaction’s Collection cannot be constructed. +func (e *blockComputer) queueUserTransactions( blockId flow.Identifier, - blockIdStr string, blockHeader *flow.Header, - derivedBlockData *derived.DerivedBlockData, rawCollections []*entity.CompleteCollection, - systemTxnBody *flow.TransactionBody, - requestQueue chan transactionRequest, -) { + userTxCount int, +) chan TransactionRequest { + txQueue := make(chan TransactionRequest, userTxCount) + defer close(txQueue) + txnIndex := uint32(0) + blockIdStr := blockId.String() - // TODO(patrick): remove derivedBlockData from context collectionCtx := fvm.NewContextFromParent( e.vmCtx, fvm.WithBlockHeader(blockHeader), - fvm.WithDerivedBlockData(derivedBlockData)) + fvm.WithProtocolStateSnapshot(e.protocolState.AtBlockID(blockId)), + ) for idx, collection := range rawCollections { collectionLogger := collectionCtx.Logger.With(). @@ -210,63 +290,78 @@ func (e *blockComputer) queueTransactionRequests( Logger() collectionInfo := collectionInfo{ - blockId: blockId, - blockIdStr: blockIdStr, - collectionIndex: idx, - CompleteCollection: collection, - isSystemTransaction: false, + blockId: blockId, + blockIdStr: blockIdStr, + blockHeight: blockHeader.Height, + collectionIndex: idx, + CompleteCollection: collection, } - for i, txnBody := range collection.Transactions { - requestQueue <- newTransactionRequest( + for i, txnBody := range collection.Collection.Transactions { + txQueue <- newTransactionRequest( collectionInfo, collectionCtx, collectionLogger, txnIndex, txnBody, - i == len(collection.Transactions)-1) + ComputerTransactionTypeUser, + i == len(collection.Collection.Transactions)-1, + ) txnIndex += 1 } - } - // TODO(patrick): remove derivedBlockData from context - systemCtx := fvm.NewContextFromParent( - e.systemChunkCtx, - fvm.WithBlockHeader(blockHeader), - fvm.WithDerivedBlockData(derivedBlockData)) - systemCollectionLogger := systemCtx.Logger.With(). - Str("block_id", blockIdStr). - Uint64("height", blockHeader.Height). - Bool("system_chunk", true). + return txQueue +} + +func (e *blockComputer) queueSystemTransactions( + callbackCtx fvm.Context, + systemChunkCtx fvm.Context, + systemCollection collectionInfo, + systemTxn *flow.TransactionBody, + executeCallbackTxs []*flow.TransactionBody, + txnIndex uint32, + systemLogger zerolog.Logger, +) chan TransactionRequest { + + systemTxLogger := systemLogger.With(). + Uint32("num_txs", uint32(len(systemCollection.CompleteCollection.Collection.Transactions))). Bool("system_transaction", true). Logger() - systemCollectionInfo := collectionInfo{ - blockId: blockId, - blockIdStr: blockIdStr, - collectionIndex: len(rawCollections), - CompleteCollection: &entity.CompleteCollection{ - Transactions: []*flow.TransactionBody{systemTxnBody}, - }, - isSystemTransaction: true, - } - requestQueue <- newTransactionRequest( - systemCollectionInfo, - systemCtx, - systemCollectionLogger, - txnIndex, - systemTxnBody, - true) -} + scheduledTxLogger := systemLogger.With(). + Uint32("num_txs", uint32(len(systemCollection.CompleteCollection.Collection.Transactions))). + Bool("scheduled_transaction", true). + Logger() -func numberOfTransactionsInBlock(collections []*entity.CompleteCollection) int { - numTxns := 1 // there's one system transaction per block - for _, collection := range collections { - numTxns += len(collection.Transactions) + txQueue := make(chan TransactionRequest, len(executeCallbackTxs)+1) + defer close(txQueue) + + for _, txBody := range executeCallbackTxs { + txQueue <- newTransactionRequest( + systemCollection, + callbackCtx, + scheduledTxLogger, + txnIndex, + txBody, + ComputerTransactionTypeScheduled, + false, + ) + + txnIndex++ } - return numTxns + txQueue <- newTransactionRequest( + systemCollection, + systemChunkCtx, + systemTxLogger, + txnIndex, + systemTxn, + ComputerTransactionTypeSystem, + true, + ) + + return txQueue } func (e *blockComputer) executeBlock( @@ -284,28 +379,17 @@ func (e *blockComputer) executeBlock( return nil, fmt.Errorf("executable block start state is not set") } - blockId := block.ID() - blockIdStr := blockId.String() - rawCollections := block.Collections() + userTxCount := e.userTransactionsCount(rawCollections) blockSpan := e.tracer.StartSpanFromParent( - e.tracer.BlockRootSpan(blockId), + e.tracer.BlockRootSpan(block.BlockID()), trace.EXEComputeBlock) blockSpan.SetAttributes( - attribute.String("block_id", blockIdStr), + attribute.String("block_id", block.BlockID().String()), attribute.Int("collection_counts", len(rawCollections))) defer blockSpan.End() - systemTxn, err := blueprints.SystemChunkTransaction(e.vmCtx.Chain) - if err != nil { - return nil, fmt.Errorf( - "could not get system chunk transaction: %w", - err) - } - - numTxns := numberOfTransactionsInBlock(rawCollections) - collector := newResultCollector( e.tracer, blockSpan, @@ -317,42 +401,41 @@ func (e *blockComputer) executeBlock( e.receiptHasher, parentBlockExecutionResultID, block, - numTxns, - e.colResCons) + // Add buffer just in case result collection becomes slower than the execution + e.maxConcurrency*2, + e.colResCons, + baseSnapshot, + ) defer collector.Stop() - requestQueue := make(chan transactionRequest, numTxns) - e.queueTransactionRequests( - blockId, - blockIdStr, - block.Block.Header, + database := newTransactionCoordinator( + e.vm, + baseSnapshot, derivedBlockData, - rawCollections, - systemTxn, - requestQueue) - close(requestQueue) + collector) - snapshotTree := snapshot.NewSnapshotTree(baseSnapshot) - for request := range requestQueue { - txnExecutionSnapshot, output, err := e.executeTransaction( - blockSpan, - request, - snapshotTree) - if err != nil { - prefix := "" - if request.isSystemTransaction { - prefix = "system " - } + e.executeUserTransactions( + block, + blockSpan, + database, + rawCollections, + userTxCount, + ) - return nil, fmt.Errorf( - "failed to execute %stransaction at txnIndex %v: %w", - prefix, - request.txnIndex, - err) - } + err := e.executeSystemTransactions( + block, + blockSpan, + database, + rawCollections, + userTxCount, + ) + if err != nil { + return nil, err + } - collector.AddTransactionResult(request, txnExecutionSnapshot, output) - snapshotTree = snapshotTree.Append(txnExecutionSnapshot) + err = database.Error() + if err != nil { + return nil, err } res, err := collector.Finalize(ctx) @@ -361,7 +444,7 @@ func (e *blockComputer) executeBlock( } e.log.Debug(). - Hex("block_id", logging.Entity(block)). + Str("block_id", block.BlockID().String()). Msg("all views committed") e.metrics.ExecutionBlockCachedPrograms(derivedBlockData.CachedPrograms()) @@ -369,19 +452,328 @@ func (e *blockComputer) executeBlock( return res, nil } +// executeUserTransactions executes the user transactions in the block. +// It queues the user transactions into a request queue and then executes them in parallel. +func (e *blockComputer) executeUserTransactions( + block *entity.ExecutableBlock, + blockSpan otelTrace.Span, + database *transactionCoordinator, + rawCollections []*entity.CompleteCollection, + userTxCount int, +) { + txQueue := e.queueUserTransactions( + block.BlockID(), + block.Block.ToHeader(), + rawCollections, + userTxCount, + ) + + e.executeQueue(blockSpan, database, txQueue) +} + +// executeSystemTransactions executes all system transactions in the block as part of the system collection. +// +// When scheduled callbacks are enabled, system transactions are executed in the following order: +// 1. Process callback transaction - queries the scheduler contract to identify ready callbacks +// and emits events containing callback IDs and execution effort requirements +// 2. Callback execution transactions - one transaction per callback ID from step 1 events, +// each executing a single scheduled callback with its specified effort limit +// 3. System chunk transaction - performs standard system operations +// +// When scheduled callbacks are disabled, only the system chunk transaction is executed. +// +// All errors are indicators of bugs or corrupted internal state (continuation impossible) +func (e *blockComputer) executeSystemTransactions( + block *entity.ExecutableBlock, + blockSpan otelTrace.Span, + database *transactionCoordinator, + rawCollections []*entity.CompleteCollection, + userTxCount int, +) error { + userCollectionCount := len(rawCollections) + txIndex := uint32(userTxCount) + + callbackCtx := fvm.NewContextFromParent( + e.callbackCtx, + fvm.WithBlockHeader(block.Block.ToHeader()), + fvm.WithProtocolStateSnapshot(e.protocolState.AtBlockID(block.BlockID())), + ) + + systemChunkCtx := fvm.NewContextFromParent( + e.systemChunkCtx, + fvm.WithBlockHeader(block.Block.ToHeader()), + fvm.WithProtocolStateSnapshot(e.protocolState.AtBlockID(block.BlockID())), + ) + + systemCollectionInfo := collectionInfo{ + blockId: block.BlockID(), + blockIdStr: block.BlockID().String(), + blockHeight: block.Block.Height, + collectionIndex: userCollectionCount, + CompleteCollection: nil, // We do not yet know all the scheduled callbacks, so postpone construction of the collection. + } + + systemChunkLogger := callbackCtx.Logger.With(). + Str("block_id", block.BlockID().String()). + Uint64("height", block.Block.Height). + Bool("system_chunk", true). + Int("num_collections", userCollectionCount). + Logger() + + var callbackTxs []*flow.TransactionBody + + if e.vmCtx.ScheduleCallbacksEnabled { + // We pass in the `systemCollectionInfo` here. However, note that at this point, the composition of the system chunk + // is not yet known. Specifically, the `entity.CompleteCollection` represents the *final* output of a process and is + // immutable by protocol mandate. If we had a bug in our software that accidentally illegally mutated such structs, + // likely the node encountering that bug would misbehave and get slashed, or in the worst case the flow protocol might + // be compromised. Therefore, we have the rigorous convention in our code base that the `CompleteCollection` is only + // constructed once the final composition of the system chunk has been determined. + // To that end, the CompleteCollection is nil here, such that any attempt to access the Collection will panic. + callbacks, updatedTxnIndex, err := e.executeProcessCallback( + callbackCtx, + systemCollectionInfo, + database, + blockSpan, + txIndex, + systemChunkLogger, + ) + if err != nil { + return err + } + + callbackTxs = callbacks + txIndex = updatedTxnIndex + + finalCollection, err := flow.NewCollection(flow.UntrustedCollection{ + Transactions: append(append([]*flow.TransactionBody{e.processCallbackTxn}, callbackTxs...), e.systemTxn), + }) + if err != nil { + return err + } + systemCollectionInfo.CompleteCollection = &entity.CompleteCollection{ + Collection: finalCollection, + } + } else { + finalCollection, err := flow.NewCollection(flow.UntrustedCollection{ + Transactions: []*flow.TransactionBody{e.systemTxn}, + }) + if err != nil { + return err + } + systemCollectionInfo.CompleteCollection = &entity.CompleteCollection{ + Collection: finalCollection, + } + } + + txQueue := e.queueSystemTransactions( + callbackCtx, + systemChunkCtx, + systemCollectionInfo, + e.systemTxn, + callbackTxs, + txIndex, + systemChunkLogger, + ) + + e.executeQueue(blockSpan, database, txQueue) + + return nil +} + +// executeQueue executes the transactions in the request queue in parallel with the maxConcurrency workers. +func (e *blockComputer) executeQueue( + blockSpan otelTrace.Span, + database *transactionCoordinator, + txQueue chan TransactionRequest, +) { + wg := &sync.WaitGroup{} + wg.Add(e.maxConcurrency) + + for range e.maxConcurrency { + go e.executeTransactions( + blockSpan, + database, + txQueue, + wg) + } + + wg.Wait() +} + +// executeProcessCallback submits a transaction that invokes the `process` method +// on the callback scheduler contract. +// +// The `process` method scans for scheduled callbacks and emits an event for each that should +// be executed. These emitted events are used to construct callback execution transactions, +// which are then added to the system transaction collection. +// +// If the `process` transaction fails, a fatal error is returned. +// +// Note: this transaction is executed serially and not concurrently with the system transaction. +// This is because it's unclear whether the callback executions triggered by `process` +// will result in additional system transactions. +// In theory, if no additional transactions are emitted, concurrent execution could be optimized. +// However, due to the added complexity, this optimization was deferred. +func (e *blockComputer) executeProcessCallback( + systemCtx fvm.Context, + systemCollectionInfo collectionInfo, + database *transactionCoordinator, + blockSpan otelTrace.Span, + txnIndex uint32, + systemLogger zerolog.Logger, +) ([]*flow.TransactionBody, uint32, error) { + callbackLogger := systemLogger.With(). + Bool("system_transaction", true). + Logger() + + request := newTransactionRequest( + systemCollectionInfo, + systemCtx, + callbackLogger, + txnIndex, + e.processCallbackTxn, + ComputerTransactionTypeSystem, + false) + + txnIndex++ + + txn, err := e.executeTransactionInternal(blockSpan, database, request, 0) + if err != nil { + snapshotTime := logical.Time(0) + if txn != nil { + snapshotTime = txn.SnapshotTime() + } + + return nil, 0, fmt.Errorf( + "failed to execute system process transaction %v (%d@%d) for block %s at height %v: %w", + request.txnIdStr, + request.txnIndex, + snapshotTime, + request.blockIdStr, + request.ctx.BlockHeader.Height, + err) + } + + if txn.Output().Err != nil { + // if the process transaction fails we log the critical error but don't return an error + // so that block execution continues and only the scheduled transactions halt + callbackLogger.Error(). + Err(txn.Output().Err). + Bool("critical_error", true). + Uint64("height", request.ctx.BlockHeader.Height). + Msg("system process transaction output error") + + return nil, txnIndex, nil + } + + callbackTxs, err := blueprints.ExecuteCallbacksTransactions(e.vmCtx.Chain, txn.Output().Events) + if err != nil { + return nil, 0, err + } + + if len(callbackTxs) > 0 { + // calculate total computation limits for execute callback transactions + var totalExecuteComputationLimits uint64 + for _, tx := range callbackTxs { + totalExecuteComputationLimits += tx.GasLimit + } + + // report metrics for callbacks executed + e.metrics.ExecutionCallbacksExecuted( + len(callbackTxs), + txn.Output().ComputationUsed, + totalExecuteComputationLimits, + ) + } + + return callbackTxs, txnIndex, nil +} + +func (e *blockComputer) executeTransactions( + blockSpan otelTrace.Span, + database *transactionCoordinator, + requestQueue chan TransactionRequest, + wg *sync.WaitGroup, +) { + defer wg.Done() + + for request := range requestQueue { + attempt := 0 + for { + request.ctx.Logger.Info(). + Int("attempt", attempt). + Msg("executing transaction") + + attempt += 1 + err := e.executeTransaction(blockSpan, database, request, attempt) + + if errors.IsRetryableConflictError(err) { + request.ctx.Logger.Info(). + Int("attempt", attempt). + Str("conflict_error", err.Error()). + Msg("conflict detected. retrying transaction") + continue + } + + if err != nil { + database.AbortAllOutstandingTransactions(err) + return + } + + break // process next transaction + } + } +} + func (e *blockComputer) executeTransaction( - parentSpan otelTrace.Span, - request transactionRequest, - storageSnapshot snapshot.StorageSnapshot, + blockSpan otelTrace.Span, + database *transactionCoordinator, + request TransactionRequest, + attempt int, +) error { + txn, err := e.executeTransactionInternal( + blockSpan, + database, + request, + attempt) + if err != nil { + prefix := "" + if request.transactionType == ComputerTransactionTypeSystem { + prefix = "system " + } + + snapshotTime := logical.Time(0) + if txn != nil { + snapshotTime = txn.SnapshotTime() + } + + return fmt.Errorf( + "failed to execute %stransaction %v (%d@%d) for block %s "+ + "at height %v: %w", + prefix, + request.txnIdStr, + request.txnIndex, + snapshotTime, + request.blockIdStr, + request.ctx.BlockHeader.Height, + err) + } + + return nil +} + +func (e *blockComputer) executeTransactionInternal( + blockSpan otelTrace.Span, + database *transactionCoordinator, + request TransactionRequest, + attempt int, ) ( - *snapshot.ExecutionSnapshot, - fvm.ProcedureOutput, + *transaction, error, ) { - startedAt := time.Now() - txSpan := e.tracer.StartSampledSpanFromParent( - parentSpan, + blockSpan, request.txnId, trace.EXEComputeTransaction) txSpan.SetAttributes( @@ -391,66 +783,52 @@ func (e *blockComputer) executeTransaction( ) defer txSpan.End() - logger := e.log.With(). - Str("tx_id", request.txnIdStr). - Uint32("tx_index", request.txnIndex). - Str("block_id", request.blockIdStr). - Uint64("height", request.ctx.BlockHeader.Height). - Bool("system_chunk", request.isSystemTransaction). - Bool("system_transaction", request.isSystemTransaction). - Logger() - logger.Info().Msg("executing transaction in fvm") - request.ctx = fvm.NewContextFromParent(request.ctx, fvm.WithSpan(txSpan)) - executionSnapshot, output, err := e.vm.Run( - request.ctx, - request.TransactionProcedure, - storageSnapshot) + txn, err := database.NewTransaction(request, attempt) if err != nil { - return nil, fvm.ProcedureOutput{}, fmt.Errorf( - "failed to execute transaction %v for block %s at height %v: %w", - request.txnIdStr, - request.blockIdStr, - request.ctx.BlockHeader.Height, - err) + return nil, err } + defer txn.Cleanup() - logger = logger.With(). - Uint64("computation_used", output.ComputationUsed). - Uint64("memory_used", output.MemoryEstimate). - Int64("time_spent_in_ms", time.Since(startedAt).Milliseconds()). - Logger() + err = txn.Preprocess() + if err != nil { + return txn, err + } - if output.Err != nil { - logger = logger.With(). - Str("error_message", output.Err.Error()). - Uint16("error_code", uint16(output.Err.Code())). - Logger() - logger.Info().Msg("transaction execution failed") - - if request.isSystemTransaction { - // This log is used as the data source for an alert on grafana. - // The system_chunk_error field must not be changed without adding - // the corresponding changes in grafana. - // https://github.com/dapperlabs/flow-internal/issues/1546 - logger.Error(). - Bool("system_chunk_error", true). - Bool("system_transaction_error", true). - Bool("critical_error", true). - Msg("error executing system chunk transaction") + // Validating here gives us an opportunity to early abort/retry the + // transaction in case the conflict is detectable after preprocessing. + // This is strictly an optimization and hence we don't need to wait for + // updates (removing this validate call won't impact correctness). + err = txn.Validate() + if err != nil { + return txn, err + } + + err = txn.Execute() + if err != nil { + return txn, err + } + + err = txn.Finalize() + if err != nil { + return txn, err + } + + // Snapshot time smaller than execution time indicates there are outstanding + // transaction(s) that must be committed before this transaction can be + // committed. + for txn.SnapshotTime() < request.ExecutionTime() { + err = txn.WaitForUpdates() + if err != nil { + return txn, err + } + + err = txn.Validate() + if err != nil { + return txn, err } - } else { - logger.Info().Msg("transaction executed successfully") } - e.metrics.ExecutionTransactionExecuted( - time.Since(startedAt), - output.ComputationUsed, - output.MemoryEstimate, - len(output.Events), - flow.EventsList(output.Events).ByteSize(), - output.Err != nil, - ) - return executionSnapshot, output, nil + return txn, txn.Commit() } diff --git a/engine/execution/computation/computer/computer_test.go b/engine/execution/computation/computer/computer_test.go index b8af570e0e6..1ae0fe8c667 100644 --- a/engine/execution/computation/computer/computer_test.go +++ b/engine/execution/computation/computer/computer_test.go @@ -1,22 +1,27 @@ package computer_test import ( + "bytes" "context" + "encoding/json" "fmt" "math/rand" + "strings" + "sync" + "sync/atomic" "testing" - "github.com/onflow/cadence" - "github.com/onflow/cadence/encoding/json" - "github.com/onflow/cadence/runtime" - "github.com/onflow/cadence/runtime/common" - "github.com/onflow/cadence/runtime/interpreter" - "github.com/onflow/cadence/runtime/sema" - "github.com/onflow/cadence/runtime/stdlib" - + "github.com/ipfs/boxo/blockstore" "github.com/ipfs/go-datastore" dssync "github.com/ipfs/go-datastore/sync" - blockstore "github.com/ipfs/go-ipfs-blockstore" + "github.com/onflow/cadence" + "github.com/onflow/cadence/common" + "github.com/onflow/cadence/encoding/ccf" + jsoncdc "github.com/onflow/cadence/encoding/json" + "github.com/onflow/cadence/interpreter" + "github.com/onflow/cadence/runtime" + "github.com/onflow/cadence/sema" + "github.com/onflow/cadence/stdlib" "github.com/rs/zerolog" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" @@ -26,18 +31,25 @@ import ( "github.com/onflow/flow-go/engine/execution/computation/committer" "github.com/onflow/flow-go/engine/execution/computation/computer" computermock "github.com/onflow/flow-go/engine/execution/computation/computer/mock" + "github.com/onflow/flow-go/engine/execution/storehouse" "github.com/onflow/flow-go/engine/execution/testutil" "github.com/onflow/flow-go/fvm" "github.com/onflow/flow-go/fvm/environment" fvmErrors "github.com/onflow/flow-go/fvm/errors" fvmmock "github.com/onflow/flow-go/fvm/mock" reusableRuntime "github.com/onflow/flow-go/fvm/runtime" + "github.com/onflow/flow-go/fvm/storage" "github.com/onflow/flow-go/fvm/storage/derived" + "github.com/onflow/flow-go/fvm/storage/logical" "github.com/onflow/flow-go/fvm/storage/snapshot" "github.com/onflow/flow-go/fvm/storage/state" "github.com/onflow/flow-go/fvm/systemcontracts" "github.com/onflow/flow-go/ledger" + "github.com/onflow/flow-go/ledger/common/convert" + "github.com/onflow/flow-go/ledger/common/pathfinder" + "github.com/onflow/flow-go/ledger/complete" "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module" "github.com/onflow/flow-go/module/epochs" "github.com/onflow/flow-go/module/executiondatasync/execution_data" "github.com/onflow/flow-go/module/executiondatasync/provider" @@ -50,6 +62,10 @@ import ( "github.com/onflow/flow-go/utils/unittest" ) +const ( + testMaxConcurrency = 2 +) + func incStateCommitment(startState flow.StateCommitment) flow.StateCommitment { endState := flow.StateCommitment(startState) endState[0] += 1 @@ -62,42 +78,63 @@ type fakeCommitter struct { func (committer *fakeCommitter) CommitView( view *snapshot.ExecutionSnapshot, - startState flow.StateCommitment, + baseStorageSnapshot execution.ExtendableStorageSnapshot, ) ( flow.StateCommitment, []byte, *ledger.TrieUpdate, + execution.ExtendableStorageSnapshot, error, ) { committer.callCount++ + startState := baseStorageSnapshot.Commitment() endState := incStateCommitment(startState) - trieUpdate := &ledger.TrieUpdate{} - trieUpdate.RootHash[0] = byte(committer.callCount) - return endState, + reg := unittest.MakeOwnerReg("key", fmt.Sprintf("%v", committer.callCount)) + regKey := convert.RegisterIDToLedgerKey(reg.Key) + path, err := pathfinder.KeyToPath( + regKey, + complete.DefaultPathFinderVersion, + ) + if err != nil { + return flow.DummyStateCommitment, nil, nil, nil, err + } + trieUpdate := &ledger.TrieUpdate{ + RootHash: ledger.RootHash(startState), + Paths: []ledger.Path{ + path, + }, + Payloads: []*ledger.Payload{ + ledger.NewPayload(regKey, reg.Value), + }, + } + + newStorageSnapshot := baseStorageSnapshot.Extend(endState, map[flow.RegisterID]flow.RegisterValue{ + reg.Key: reg.Value, + }) + + return newStorageSnapshot.Commitment(), []byte{byte(committer.callCount)}, trieUpdate, + newStorageSnapshot, nil } func TestBlockExecutor_ExecuteBlock(t *testing.T) { - rag := &RandomAddressGenerator{} executorID := unittest.IdentifierFixture() me := new(modulemock.Local) me.On("NodeID").Return(executorID) - me.On("Sign", mock.Anything, mock.Anything).Return(nil, nil) + me.On("Sign", mock.Anything, mock.Anything).Return(unittest.SignatureFixture(), nil) me.On("SignFunc", mock.Anything, mock.Anything, mock.Anything). Return(nil, nil) t.Run("single collection", func(t *testing.T) { - execCtx := fvm.NewContext( - fvm.WithDerivedBlockData(derived.NewEmptyDerivedBlockData(0)), - ) + execCtx := fvm.NewContext() vm := &testVM{ t: t, @@ -110,24 +147,23 @@ func TestBlockExecutor_ExecuteBlock(t *testing.T) { exemetrics := new(modulemock.ExecutionMetrics) exemetrics.On("ExecutionBlockExecuted", - mock.Anything, // duration - mock.Anything). // stats + mock.Anything, + mock.Anything). Return(nil). Times(1) exemetrics.On("ExecutionCollectionExecuted", - mock.Anything, // duration - mock.Anything). // stats + mock.Anything, + mock.Anything). Return(nil). Times(2) // 1 collection + system collection exemetrics.On("ExecutionTransactionExecuted", - mock.Anything, // duration - mock.Anything, // computation used - mock.Anything, // memory used - mock.Anything, // number of events - mock.Anything, // size of events - false). // no failure + mock.Anything, + mock.MatchedBy(func(arg module.TransactionExecutionResultStats) bool { + return !arg.Failed // only successful transactions + }), + mock.Anything). Return(nil). Times(2 + 1) // 2 txs in collection + system chunk tx @@ -165,7 +201,9 @@ func TestBlockExecutor_ExecuteBlock(t *testing.T) { committer, me, prov, - nil) + nil, + testutil.ProtocolStateWithSourceFixture(nil), + testMaxConcurrency) require.NoError(t, err) // create a block with 1 collection with 2 transactions @@ -183,7 +221,7 @@ func TestBlockExecutor_ExecuteBlock(t *testing.T) { require.Equal(t, 2, committer.callCount) - assert.Equal(t, block.ID(), result.BlockExecutionData.BlockID) + assert.Equal(t, block.BlockID(), result.BlockExecutionData.BlockID) expectedChunk1EndState := incStateCommitment(*block.StartState) expectedChunk2EndState := incStateCommitment(expectedChunk1EndState) @@ -200,7 +238,7 @@ func TestBlockExecutor_ExecuteBlock(t *testing.T) { t, parentBlockExecutionResultID, receipt.PreviousResultID) - assert.Equal(t, block.ID(), receipt.BlockID) + assert.Equal(t, block.BlockID(), receipt.BlockID) assert.NotEqual(t, flow.ZeroID, receipt.ExecutionDataID) assert.Len(t, receipt.Chunks, 1+1) // +1 system chunk @@ -208,7 +246,7 @@ func TestBlockExecutor_ExecuteBlock(t *testing.T) { chunk1 := receipt.Chunks[0] eventCommits := result.AllEventCommitments() - assert.Equal(t, block.ID(), chunk1.BlockID) + assert.Equal(t, block.BlockID(), chunk1.BlockID) assert.Equal(t, uint(0), chunk1.CollectionIndex) assert.Equal(t, uint64(2), chunk1.NumberOfTransactions) assert.Equal(t, eventCommits[0], chunk1.EventCollection) @@ -220,7 +258,7 @@ func TestBlockExecutor_ExecuteBlock(t *testing.T) { assert.Equal(t, expectedChunk1EndState, chunk1.EndState) chunk2 := receipt.Chunks[1] - assert.Equal(t, block.ID(), chunk2.BlockID) + assert.Equal(t, block.BlockID(), chunk2.BlockID) assert.Equal(t, uint(1), chunk2.CollectionIndex) assert.Equal(t, uint64(1), chunk2.NumberOfTransactions) assert.Equal(t, eventCommits[1], chunk2.EventCollection) @@ -234,7 +272,8 @@ func TestBlockExecutor_ExecuteBlock(t *testing.T) { // Verify ChunkDataPacks - chunkDataPacks := result.AllChunkDataPacks() + chunkDataPacks, err := result.AllChunkDataPacks() + require.NoError(t, err) assert.Len(t, chunkDataPacks, 1+1) // +1 system chunk chunkDataPack1 := chunkDataPacks[0] @@ -261,14 +300,17 @@ func TestBlockExecutor_ExecuteBlock(t *testing.T) { chunkDataPack1.Collection, chunkExecutionData1.Collection) assert.NotNil(t, chunkExecutionData1.TrieUpdate) - assert.Equal(t, byte(1), chunkExecutionData1.TrieUpdate.RootHash[0]) + assert.Equal(t, ledger.RootHash(chunk1.StartState), chunkExecutionData1.TrieUpdate.RootHash) chunkExecutionData2 := result.ChunkExecutionDatas[1] assert.NotNil(t, chunkExecutionData2.Collection) assert.NotNil(t, chunkExecutionData2.TrieUpdate) - assert.Equal(t, byte(2), chunkExecutionData2.TrieUpdate.RootHash[0]) + assert.Equal(t, ledger.RootHash(chunk2.StartState), chunkExecutionData2.TrieUpdate.RootHash) - assert.Equal(t, 3, vm.callCount) + assert.GreaterOrEqual(t, vm.CallCount(), 3) + // if every transaction is retried once, then the call count should be + // (1+totalTransactionCount) /2 * totalTransactionCount + assert.LessOrEqual(t, vm.CallCount(), (1+3)/2*3) }) t.Run("empty block still computes system chunk", func(t *testing.T) { @@ -298,22 +340,26 @@ func TestBlockExecutor_ExecuteBlock(t *testing.T) { committer, me, prov, - nil) + nil, + testutil.ProtocolStateWithSourceFixture(nil), + testMaxConcurrency) require.NoError(t, err) // create an empty block block := generateBlock(0, 0, rag) derivedBlockData := derived.NewEmptyDerivedBlockData(0) - vm.On("Run", mock.Anything, mock.Anything, mock.Anything). - Return( - &snapshot.ExecutionSnapshot{}, - fvm.ProcedureOutput{}, - nil). + vm.On("NewExecutor", mock.Anything, mock.Anything, mock.Anything). + Return(noOpExecutor{}). Once() // just system chunk + snapshot := storehouse.NewExecutingBlockSnapshot( + snapshot.MapStorageSnapshot{}, + unittest.StateCommitmentFixture(), + ) + committer.On("CommitView", mock.Anything, mock.Anything). - Return(nil, nil, nil, nil). + Return(nil, nil, nil, snapshot, nil). Once() // just system chunk result, err := exe.ExecuteBlock( @@ -333,9 +379,9 @@ func TestBlockExecutor_ExecuteBlock(t *testing.T) { }) t.Run("system chunk transaction should not fail", func(t *testing.T) { - // include all fees. System chunk should ignore them contextOptions := []fvm.Option{ + fvm.WithEVMEnabled(true), fvm.WithTransactionFeesEnabled(true), fvm.WithAccountStorageLimit(true), fvm.WithBlocks(&environment.NoopBlockFinder{}), @@ -397,14 +443,21 @@ func TestBlockExecutor_ExecuteBlock(t *testing.T) { comm, me, prov, - nil) + nil, + testutil.ProtocolStateWithSourceFixture(nil), + testMaxConcurrency) require.NoError(t, err) // create an empty block block := generateBlock(0, 0, rag) + snapshot := storehouse.NewExecutingBlockSnapshot( + snapshot.MapStorageSnapshot{}, + unittest.StateCommitmentFixture(), + ) + comm.On("CommitView", mock.Anything, mock.Anything). - Return(nil, nil, nil, nil). + Return(nil, nil, nil, snapshot, nil). Once() // just system chunk result, err := exe.ExecuteBlock( @@ -455,7 +508,9 @@ func TestBlockExecutor_ExecuteBlock(t *testing.T) { committer, me, prov, - nil) + nil, + testutil.ProtocolStateWithSourceFixture(nil), + testMaxConcurrency) require.NoError(t, err) collectionCount := 2 @@ -468,8 +523,13 @@ func TestBlockExecutor_ExecuteBlock(t *testing.T) { block := generateBlock(collectionCount, transactionsPerCollection, rag) derivedBlockData := derived.NewEmptyDerivedBlockData(0) + snapshot := storehouse.NewExecutingBlockSnapshot( + snapshot.MapStorageSnapshot{}, + unittest.StateCommitmentFixture(), + ) + committer.On("CommitView", mock.Anything, mock.Anything). - Return(nil, nil, nil, nil). + Return(nil, nil, nil, snapshot, nil). Times(collectionCount + 1) result, err := exe.ExecuteBlock( @@ -507,7 +567,7 @@ func TestBlockExecutor_ExecuteBlock(t *testing.T) { expectedResults := make([]flow.TransactionResult, 0) for _, c := range block.CompleteCollections { - for _, t := range c.Transactions { + for _, t := range c.Collection.Transactions { txResult := flow.TransactionResult{ TransactionID: t.ID(), ErrorMessage: fvmErrors.NewInvalidAddressErrorf( @@ -522,13 +582,16 @@ func TestBlockExecutor_ExecuteBlock(t *testing.T) { assertEventHashesMatch(t, collectionCount+1, result) - assert.Equal(t, totalTransactionCount, vm.callCount) + assert.GreaterOrEqual(t, vm.CallCount(), totalTransactionCount) + // if every transaction is retried once, then the call count should be + // (1+totalTransactionCount) /2 * totalTransactionCount + assert.LessOrEqual(t, vm.CallCount(), (1+totalTransactionCount)/2*totalTransactionCount) }) + // TODO: this test is flaky with a low probability of failing t.Run( "service events are emitted", func(t *testing.T) { execCtx := fvm.NewContext( - fvm.WithServiceEventCollectionEnabled(), fvm.WithAuthorizationChecksEnabled(false), fvm.WithSequenceNumberCheckAndIncrementEnabled(false), ) @@ -536,22 +599,14 @@ func TestBlockExecutor_ExecuteBlock(t *testing.T) { collectionCount := 2 transactionsPerCollection := 2 - totalTransactionCount := (collectionCount * transactionsPerCollection) + 1 // +1 for system chunk - // create a block with 2 collections with 2 transactions each block := generateBlock(collectionCount, transactionsPerCollection, rag) - ordinaryEvent := cadence.Event{ - EventType: &cadence.EventType{ - Location: stdlib.FlowLocation{}, - QualifiedIdentifier: "what.ever", - }, - } - - serviceEvents, err := systemcontracts.ServiceEventsForChain(execCtx.Chain.ChainID()) - require.NoError(t, err) + chainID := execCtx.Chain.ChainID() + serviceEvents := systemcontracts.ServiceEventsForChain(chainID) - payload, err := json.Decode(nil, []byte(unittest.EpochSetupFixtureJSON)) + randomSource := unittest.EpochSetupRandomSourceFixture() + payload, err := ccf.Decode(nil, unittest.EpochSetupFixtureCCF(randomSource)) require.NoError(t, err) serviceEventA, ok := payload.(cadence.Event) @@ -562,7 +617,7 @@ func TestBlockExecutor_ExecuteBlock(t *testing.T) { } serviceEventA.EventType.QualifiedIdentifier = serviceEvents.EpochSetup.QualifiedIdentifier() - payload, err = json.Decode(nil, []byte(unittest.EpochCommitFixtureJSON)) + payload, err = ccf.Decode(nil, unittest.EpochCommitFixtureCCF) require.NoError(t, err) serviceEventB, ok := payload.(cadence.Event) @@ -573,7 +628,7 @@ func TestBlockExecutor_ExecuteBlock(t *testing.T) { } serviceEventB.EventType.QualifiedIdentifier = serviceEvents.EpochCommit.QualifiedIdentifier() - payload, err = json.Decode(nil, []byte(unittest.VersionBeaconFixtureJSON)) + payload, err = ccf.Decode(nil, unittest.VersionBeaconFixtureCCF) require.NoError(t, err) serviceEventC, ok := payload.(cadence.Event) @@ -584,26 +639,55 @@ func TestBlockExecutor_ExecuteBlock(t *testing.T) { } serviceEventC.EventType.QualifiedIdentifier = serviceEvents.VersionBeacon.QualifiedIdentifier() + transactions := []*flow.TransactionBody{} + for _, col := range block.Collections() { + transactions = append(transactions, col.Collection.Transactions...) + } + // events to emit for each iteration/transaction - events := make([][]cadence.Event, totalTransactionCount) - events[0] = nil - events[1] = []cadence.Event{serviceEventA, ordinaryEvent} - events[2] = []cadence.Event{ordinaryEvent} - events[3] = nil - events[4] = []cadence.Event{serviceEventB, serviceEventC} + events := map[common.Location][]cadence.Event{ + common.TransactionLocation(transactions[0].ID()): nil, + common.TransactionLocation(transactions[1].ID()): { + serviceEventA, + { + EventType: &cadence.EventType{ + Location: stdlib.FlowLocation{}, + QualifiedIdentifier: "what.ever", + }, + }, + }, + common.TransactionLocation(transactions[2].ID()): { + { + EventType: &cadence.EventType{ + Location: stdlib.FlowLocation{}, + QualifiedIdentifier: "what.ever", + }, + }, + }, + common.TransactionLocation(transactions[3].ID()): nil, + } + + systemTransactionEvents := []cadence.Event{ + serviceEventB, + serviceEventC, + } emittingRuntime := &testRuntime{ executeTransaction: func( script runtime.Script, context runtime.Context, ) error { - for _, e := range events[0] { + scriptEvents, ok := events[context.Location] + if !ok { + scriptEvents = systemTransactionEvents + } + + for _, e := range scriptEvents { err := context.Interface.EmitEvent(e) if err != nil { return err } } - events = events[1:] return nil }, readStored: func( @@ -651,7 +735,8 @@ func TestBlockExecutor_ExecuteBlock(t *testing.T) { me, prov, nil, - ) + testutil.ProtocolStateWithSourceFixture(nil), + testMaxConcurrency) require.NoError(t, err) result, err := exe.ExecuteBlock( @@ -666,7 +751,7 @@ func TestBlockExecutor_ExecuteBlock(t *testing.T) { // make sure event index sequence are valid for i := 0; i < result.BlockExecutionResult.Size(); i++ { collectionResult := result.CollectionExecutionResultAt(i) - unittest.EnsureEventsIndexSeq(t, collectionResult.Events(), execCtx.Chain.ChainID()) + unittest.EnsureEventsIndexSeq(t, collectionResult.Events(), chainID) } sEvents := result.AllServiceEvents() // all events should have been collected @@ -704,14 +789,14 @@ func TestBlockExecutor_ExecuteBlock(t *testing.T) { Name: "Test", } - contractProgram := &interpreter.Program{} + contractProgram := &runtime.Program{} rt := &testRuntime{ executeTransaction: func(script runtime.Script, r runtime.Context) error { _, err := r.Interface.GetOrLoadProgram( contractLocation, - func() (*interpreter.Program, error) { + func() (*runtime.Program, error) { return contractProgram, nil }, ) @@ -736,7 +821,8 @@ func TestBlockExecutor_ExecuteBlock(t *testing.T) { runtime.Config{}, func(_ runtime.Config) runtime.Runtime { return rt - }))) + })), + ) vm := fvm.NewVirtualMachine() @@ -760,7 +846,9 @@ func TestBlockExecutor_ExecuteBlock(t *testing.T) { committer.NewNoopViewCommitter(), me, prov, - nil) + nil, + testutil.ProtocolStateWithSourceFixture(nil), + testMaxConcurrency) require.NoError(t, err) const collectionCount = 2 @@ -794,34 +882,41 @@ func TestBlockExecutor_ExecuteBlock(t *testing.T) { Name: "Test", } - contractProgram := &interpreter.Program{} + contractProgram := &runtime.Program{} const collectionCount = 2 const transactionCount = 2 + block := generateBlock(collectionCount, transactionCount, rag) - var executionCalls int + normalTransactions := map[common.Location]struct{}{} + for _, col := range block.Collections() { + for _, txn := range col.Collection.Transactions { + loc := common.TransactionLocation(txn.ID()) + normalTransactions[loc] = struct{}{} + } + } rt := &testRuntime{ executeTransaction: func(script runtime.Script, r runtime.Context) error { - executionCalls++ - - // NOTE: set a program and revert all transactions but the system chunk transaction + // NOTE: set a program and revert all transactions but the + // system chunk transaction _, err := r.Interface.GetOrLoadProgram( contractLocation, - func() (*interpreter.Program, error) { + func() (*runtime.Program, error) { return contractProgram, nil }, ) require.NoError(t, err) - if executionCalls > collectionCount*transactionCount { - return nil + _, ok := normalTransactions[r.Location] + if ok { + return runtime.Error{ + Err: fmt.Errorf("TX reverted"), + } } - return runtime.Error{ - Err: fmt.Errorf("TX reverted"), - } + return nil }, readStored: func( address common.Address, @@ -840,7 +935,8 @@ func TestBlockExecutor_ExecuteBlock(t *testing.T) { runtime.Config{}, func(_ runtime.Config) runtime.Runtime { return rt - }))) + })), + ) vm := fvm.NewVirtualMachine() @@ -864,11 +960,11 @@ func TestBlockExecutor_ExecuteBlock(t *testing.T) { committer.NewNoopViewCommitter(), me, prov, - nil) + nil, + testutil.ProtocolStateWithSourceFixture(nil), + testMaxConcurrency) require.NoError(t, err) - block := generateBlock(collectionCount, transactionCount, rag) - key := flow.AccountStatusRegisterID( flow.BytesToAddress(address.Bytes())) value := environment.NewAccountStatus().ToBytes() @@ -882,6 +978,60 @@ func TestBlockExecutor_ExecuteBlock(t *testing.T) { require.NoError(t, err) assert.Len(t, result.AllExecutionSnapshots(), collectionCount+1) // +1 system chunk }) + + t.Run("internal error", func(t *testing.T) { + execCtx := fvm.NewContext() + + committer := new(computermock.ViewCommitter) + + bservice := requesterunit.MockBlobService( + blockstore.NewBlockstore( + dssync.MutexWrap(datastore.NewMapDatastore()))) + trackerStorage := mocktracker.NewMockStorage() + + prov := provider.NewProvider( + zerolog.Nop(), + metrics.NewNoopCollector(), + execution_data.DefaultSerializer, + bservice, + trackerStorage) + + exe, err := computer.NewBlockComputer( + errorVM{errorAt: 5}, + execCtx, + metrics.NewNoopCollector(), + trace.NewNoopTracer(), + zerolog.Nop(), + committer, + me, + prov, + nil, + testutil.ProtocolStateWithSourceFixture(nil), + testMaxConcurrency) + require.NoError(t, err) + + collectionCount := 5 + transactionsPerCollection := 3 + block := generateBlock(collectionCount, transactionsPerCollection, rag) + + snapshot := storehouse.NewExecutingBlockSnapshot( + snapshot.MapStorageSnapshot{}, + unittest.StateCommitmentFixture(), + ) + + committer.On("CommitView", mock.Anything, mock.Anything). + Return(nil, nil, nil, snapshot, nil). + Times(collectionCount + 1) + + _, err = exe.ExecuteBlock( + context.Background(), + unittest.IdentifierFixture(), + block, + nil, + derived.NewEmptyDerivedBlockData(0)) + assert.ErrorContains(t, err, "boom - internal error") + }) + } func assertEventHashesMatch( @@ -1028,14 +1178,6 @@ func (e *testRuntime) ReadStored( return e.readStored(a, p, c) } -func (*testRuntime) ReadLinked( - _ common.Address, - _ cadence.Path, - _ runtime.Context, -) (cadence.Value, error) { - panic("ReadLinked not expected") -} - func (*testRuntime) SetDebugger(_ *interpreter.Debugger) { panic("SetDebugger not expected") } @@ -1089,6 +1231,7 @@ func (f *FixedAddressGenerator) AddressCount() uint64 { func Test_ExecutingSystemCollection(t *testing.T) { execCtx := fvm.NewContext( + fvm.WithEVMEnabled(true), fvm.WithChain(flow.Localnet.Chain()), fvm.WithBlocks(&environment.NoopBlockFinder{}), ) @@ -1100,37 +1243,41 @@ func Test_ExecutingSystemCollection(t *testing.T) { ledger := testutil.RootBootstrappedLedger(vm, execCtx) committer := new(computermock.ViewCommitter) + snapshot := storehouse.NewExecutingBlockSnapshot( + snapshot.MapStorageSnapshot{}, + unittest.StateCommitmentFixture(), + ) + committer.On("CommitView", mock.Anything, mock.Anything). - Return(nil, nil, nil, nil). + Return(nil, nil, nil, snapshot, nil). Times(1) // only system chunk noopCollector := metrics.NewNoopCollector() - expectedNumberOfEvents := 3 - expectedEventSize := 1721 - // bootstrapping does not cache programs - expectedCachedPrograms := 0 + expectedNumberOfEvents := 4 + expectedMinEventSize := 1000 metrics := new(modulemock.ExecutionMetrics) metrics.On("ExecutionBlockExecuted", - mock.Anything, // duration - mock.Anything). // stats + mock.Anything, + mock.Anything). Return(nil). Times(1) metrics.On("ExecutionCollectionExecuted", - mock.Anything, // duration - mock.Anything). // stats + mock.Anything, + mock.Anything). Return(nil). Times(1) // system collection metrics.On("ExecutionTransactionExecuted", mock.Anything, // duration - mock.Anything, // computation used - mock.Anything, // memory used - expectedNumberOfEvents, - expectedEventSize, - false). + mock.MatchedBy(func(arg module.TransactionExecutionResultStats) bool { + return arg.EventCounts == expectedNumberOfEvents && + arg.EventSize >= expectedMinEventSize && + !arg.Failed + }), + mock.Anything). Return(nil). Times(1) // system chunk tx @@ -1143,7 +1290,12 @@ func Test_ExecutingSystemCollection(t *testing.T) { metrics.On( "ExecutionBlockCachedPrograms", - expectedCachedPrograms). + mock.Anything). + Run(func(args mock.Arguments) { + actual := args[0].(int) + // bootstrapping already caches some programs + require.Greater(t, actual, 0) + }). Return(nil). Times(1) // block @@ -1153,6 +1305,18 @@ func Test_ExecutingSystemCollection(t *testing.T) { mock.Anything). Return(nil) + metrics.On("RuntimeTransactionParsed", mock.Anything) + metrics.On("RuntimeTransactionProgramsCacheMiss") + metrics.On("RuntimeTransactionProgramsCacheHit") + metrics.On("RuntimeTransactionChecked", mock.Anything) + metrics.On("RuntimeTransactionInterpreted", mock.Anything) + + metrics.On("EVMBlockExecuted", + mock.Anything, + mock.Anything, + mock.Anything, + ) + bservice := requesterunit.MockBlobService(blockstore.NewBlockstore(dssync.MutexWrap(datastore.NewMapDatastore()))) trackerStorage := mocktracker.NewMockStorage() @@ -1166,10 +1330,12 @@ func Test_ExecutingSystemCollection(t *testing.T) { me := new(modulemock.Local) me.On("NodeID").Return(unittest.IdentifierFixture()) - me.On("Sign", mock.Anything, mock.Anything).Return(nil, nil) + me.On("Sign", mock.Anything, mock.Anything).Return(unittest.SignatureFixture(), nil) me.On("SignFunc", mock.Anything, mock.Anything, mock.Anything). Return(nil, nil) + constRandomSource := make([]byte, 32) + exe, err := computer.NewBlockComputer( vm, execCtx, @@ -1179,7 +1345,9 @@ func Test_ExecutingSystemCollection(t *testing.T) { committer, me, prov, - nil) + nil, + testutil.ProtocolStateWithSourceFixture(constRandomSource), + testMaxConcurrency) require.NoError(t, err) // create empty block, it will have system collection attached while executing @@ -1200,6 +1368,462 @@ func Test_ExecutingSystemCollection(t *testing.T) { committer.AssertExpectations(t) } +func Test_ScheduledCallback(t *testing.T) { + chain := flow.Testnet.Chain() + + t.Run("process with no scheduled callback", func(t *testing.T) { + testScheduledCallback(t, chain, []cadence.Event{}, 2) // process callback + system chunk + }) + + t.Run("process with 2 scheduled callbacks", func(t *testing.T) { + // create callback events that process callback will return + env := systemcontracts.SystemContractsForChain(chain.ChainID()) + location := common.NewAddressLocation(nil, common.Address(env.FlowCallbackScheduler.Address), "FlowTransactionScheduler") + + eventType := cadence.NewEventType( + location, + "PendingExecution", + []cadence.Field{ + {Identifier: "id", Type: cadence.UInt64Type}, + {Identifier: "priority", Type: cadence.UInt8Type}, + {Identifier: "executionEffort", Type: cadence.UInt64Type}, + {Identifier: "fees", Type: cadence.UFix64Type}, + {Identifier: "callbackOwner", Type: cadence.AddressType}, + }, + nil, + ) + + callbackID1 := uint64(1) + callbackID2 := uint64(2) + + fees, err := cadence.NewUFix64("0.0") + require.NoError(t, err) + + callbackEvent1 := cadence.NewEvent( + []cadence.Value{ + cadence.NewUInt64(callbackID1), + cadence.NewUInt8(1), + cadence.NewUInt64(1000), // execution effort + fees, + cadence.NewAddress(env.FlowServiceAccount.Address), + }, + ).WithType(eventType) + + callbackEvent2 := cadence.NewEvent( + []cadence.Value{ + cadence.NewUInt64(callbackID2), + cadence.NewUInt8(1), + cadence.NewUInt64(2000), // execution effort + fees, + cadence.NewAddress(env.FlowServiceAccount.Address), + }, + ).WithType(eventType) + + testScheduledCallback(t, chain, []cadence.Event{callbackEvent1, callbackEvent2}, 4) // process callback + 2 callbacks + system chunk + }) + + t.Run("process callback transaction execution error", func(t *testing.T) { + processCallbackError := fvmErrors.NewInvalidAddressErrorf(flow.EmptyAddress, "process callback execution failed") + testScheduledCallbackWithError(t, chain, []cadence.Event{}, 0, processCallbackError, nil) + }) + + t.Run("process callback transaction output error", func(t *testing.T) { + processCallbackError := fvmErrors.NewInvalidAddressErrorf(flow.EmptyAddress, "process callback output error") + testScheduledCallbackWithError(t, chain, []cadence.Event{}, 2, nil, processCallbackError) + }) +} + +func testScheduledCallback(t *testing.T, chain flow.Chain, callbackEvents []cadence.Event, expectedTransactionCount int) { + testScheduledCallbackWithError(t, chain, callbackEvents, expectedTransactionCount, nil, nil) +} + +func testScheduledCallbackWithError( + t *testing.T, + chain flow.Chain, + callbackEvents []cadence.Event, + expectedTransactionCount int, + processExecuteError fvmErrors.CodedError, + processOutputError fvmErrors.CodedError, +) { + rag := &RandomAddressGenerator{} + executorID := unittest.IdentifierFixture() + + testLogger := NewTestLogger() + + execCtx := fvm.NewContext( + fvm.WithScheduleCallbacksEnabled(true), // Enable callbacks + fvm.WithChain(chain), + fvm.WithLogger(testLogger.Logger), + ) + + // track which transactions were executed and their details + executedTransactions := make(map[string]string) + var executedTransactionsMutex sync.Mutex + + // encode events to create flow event payloads + eventPayloads := make([][]byte, len(callbackEvents)) + callbackIDs := make([]uint64, len(callbackEvents)) + for i, event := range callbackEvents { + payload, err := ccf.Encode(event) + require.NoError(t, err) + eventPayloads[i] = payload + + // extract callback ID from event for later comparison + if len(callbackEvents) > 0 { + decodedEvent, err := ccf.Decode(nil, payload) + require.NoError(t, err) + if cadenceEvent, ok := decodedEvent.(cadence.Event); ok { + // search for the ID field in the event + idField := cadence.SearchFieldByName(cadenceEvent, "id") + if idValue, ok := idField.(cadence.UInt64); ok { + callbackIDs[i] = uint64(idValue) + } + } + } + } + + // create a VM that will track execution and return appropriate events + vm := &callbackTestVM{ + testVM: testVM{ + t: t, + eventsPerTransaction: 0, // we'll handle events manually + err: processExecuteError, // inject error if provided + }, + processOutputErr: processOutputError, + executedTransactions: executedTransactions, + executedMutex: &executedTransactionsMutex, + eventPayloads: eventPayloads, + callbackIDs: callbackIDs, + } + + committer := &fakeCommitter{ + callCount: 0, + } + + me := new(modulemock.Local) + me.On("NodeID").Return(executorID) + me.On("Sign", mock.Anything, mock.Anything).Return(unittest.SignatureFixture(), nil) + me.On("SignFunc", mock.Anything, mock.Anything, mock.Anything). + Return(unittest.SignatureFixture(), nil) + + exemetrics := new(modulemock.ExecutionMetrics) + exemetrics.On("ExecutionBlockExecuted", + mock.Anything, + mock.Anything). + Return(nil). + Times(1) + + // expect 1 system collection execution + exemetrics.On("ExecutionCollectionExecuted", + mock.Anything, + mock.Anything). + Return(nil). + Times(1) + + if processOutputError != nil { + // expect 1 failed transaction (process callback) + 1 successful transaction (system chunk) + exemetrics.On("ExecutionTransactionExecuted", + mock.Anything, + mock.MatchedBy(func(arg module.TransactionExecutionResultStats) bool { + return arg.Failed && (arg.SystemTransaction || arg.ScheduledTransaction) + }), + mock.Anything). + Return(nil). + Times(1) + exemetrics.On("ExecutionTransactionExecuted", + mock.Anything, + mock.MatchedBy(func(arg module.TransactionExecutionResultStats) bool { + return !arg.Failed && (arg.SystemTransaction || arg.ScheduledTransaction) + }), + mock.Anything). + Return(nil). + Times(expectedTransactionCount - 1) + } else { + exemetrics.On("ExecutionTransactionExecuted", + mock.Anything, + mock.MatchedBy(func(arg module.TransactionExecutionResultStats) bool { + return !arg.Failed && (arg.SystemTransaction || arg.ScheduledTransaction) + }), + mock.Anything). + Return(nil). + Times(expectedTransactionCount) + } + + exemetrics.On( + "ExecutionChunkDataPackGenerated", + mock.Anything, + mock.Anything). + Return(nil). + Times(1) // system collection + + exemetrics.On( + "ExecutionBlockCachedPrograms", + mock.Anything). + Return(nil). + Times(1) + + // expect callback execution metrics if there are callbacks + if len(callbackEvents) > 0 { + exemetrics.On("ExecutionCallbacksExecuted", + mock.Anything, + mock.Anything, + mock.Anything). + Return(nil). + Times(1) + } + + bservice := requesterunit.MockBlobService(blockstore.NewBlockstore(dssync.MutexWrap(datastore.NewMapDatastore()))) + trackerStorage := mocktracker.NewMockStorage() + + prov := provider.NewProvider( + zerolog.Nop(), + metrics.NewNoopCollector(), + execution_data.DefaultSerializer, + bservice, + trackerStorage, + ) + + exe, err := computer.NewBlockComputer( + vm, + execCtx, + exemetrics, + trace.NewNoopTracer(), + testLogger.Logger, + committer, + me, + prov, + nil, + testutil.ProtocolStateWithSourceFixture(nil), + testMaxConcurrency) + require.NoError(t, err) + + // create empty block (no user collections) + block := generateBlock(0, 0, rag) + + parentBlockExecutionResultID := unittest.IdentifierFixture() + result, err := exe.ExecuteBlock( + context.Background(), + parentBlockExecutionResultID, + block, + nil, + derived.NewEmptyDerivedBlockData(0)) + + // If we expect an error, verify it and return early + if processExecuteError != nil { + require.Error(t, err) + require.Contains(t, err.Error(), "system process transaction") + return + } + + if processOutputError != nil { + require.NoError(t, err) + require.Truef( + t, + testLogger.HasLogWithField("system process transaction output error", "critical_error", true), + "expected critical error log not found", + ) + + // verify the process callback transaction failed as expected + require.Len(t, result.AllTransactionResults(), expectedTransactionCount) + processCallbackResult := result.AllTransactionResults()[0] + require.NotEmpty(t, processCallbackResult.ErrorMessage, "process callback transaction should have failed") + require.Contains(t, processCallbackResult.ErrorMessage, "process callback output error") + + // verify system chunk transaction succeeded + systemChunkResult := result.AllTransactionResults()[1] + require.Empty(t, systemChunkResult.ErrorMessage, "system chunk transaction should not have failed") + return + } + + require.NoError(t, err) + + // verify execution results + assert.Len(t, result.AllExecutionSnapshots(), 1) // Only system chunk + assert.Len(t, result.AllTransactionResults(), expectedTransactionCount) + + // verify correct number of commits (1 for system collection) + assert.Equal(t, 1, committer.callCount) + assert.Equal(t, expectedTransactionCount, len(executedTransactions)) + + // verify we executed each type of transaction + hasProcessCallback := false + hasSystemChunk := false + callbackNames := make(map[string]bool) + + for _, txType := range executedTransactions { + switch txType { + case "process_callback": + hasProcessCallback = true + case "system_chunk": + hasSystemChunk = true + default: + if strings.HasPrefix(txType, "callback") { + // add unique callbacks to the map + callbackNames[txType] = true + } + } + } + + assert.True(t, hasProcessCallback, "process callback transaction should have been executed") + assert.True(t, hasSystemChunk, "system chunk transaction should have been executed") + assert.Equal(t, len(callbackEvents), len(callbackNames), "should have executed the expected number of callback transactions") + + // verify no transaction errors + for _, txResult := range result.AllTransactionResults() { + assert.Empty(t, txResult.ErrorMessage, "transaction should not have failed") + } + + // verify receipt structure + receipt := result.ExecutionReceipt + assert.Equal(t, executorID, receipt.ExecutorID) + assert.Equal(t, parentBlockExecutionResultID, receipt.PreviousResultID) + assert.Equal(t, block.BlockID(), receipt.BlockID) + assert.Len(t, receipt.Chunks, 1) // Only system chunk + + // verify system chunk details + systemChunk := receipt.Chunks[0] + assert.Equal(t, block.BlockID(), systemChunk.BlockID) + assert.Equal(t, uint(0), systemChunk.CollectionIndex) // System collection is at index 0 for empty block + assert.Equal(t, uint64(expectedTransactionCount), systemChunk.NumberOfTransactions) + + // verify all mocks were called as expected + exemetrics.AssertExpectations(t) +} + +// callbackTestVM is a custom VM for testing callback execution +type callbackTestVM struct { + testVM + processOutputErr fvmErrors.CodedError + executedTransactions map[string]string + executedMutex *sync.Mutex + eventPayloads [][]byte + callbackIDs []uint64 +} + +func (vm *callbackTestVM) NewExecutor( + ctx fvm.Context, + proc fvm.Procedure, + txnState storage.TransactionPreparer, +) fvm.ProcedureExecutor { + // Create a custom executor that tracks execution and returns proper events + return &callbackTestExecutor{ + testExecutor: testExecutor{ + testVM: &vm.testVM, + ctx: ctx, + proc: proc, + txnState: txnState, + }, + vm: vm, + } +} + +// callbackTestExecutor is a custom executor for testing callback execution +type callbackTestExecutor struct { + testExecutor + vm *callbackTestVM +} + +func (c *callbackTestExecutor) Execute() error { + // Return error if one was injected for process callback transaction + if c.vm.err != nil { + txProc, ok := c.proc.(*fvm.TransactionProcedure) + if ok { + script := string(txProc.Transaction.Script) + if strings.Contains(script, "scheduler.process") { + return c.vm.err + } + } + } + + return c.testExecutor.Execute() +} + +// we need to reimplement this Output since the events are consumed in the block computer +// from the output of the procedure executor +func (c *callbackTestExecutor) Output() fvm.ProcedureOutput { + // Return error if one was injected for process callback transaction + if c.vm.processOutputErr != nil { + txProc, ok := c.proc.(*fvm.TransactionProcedure) + if ok { + script := string(txProc.Transaction.Script) + if strings.Contains(script, "scheduler.process") { + return fvm.ProcedureOutput{ + Err: c.vm.processOutputErr, + } + } + } + } + + c.vm.executedMutex.Lock() + defer c.vm.executedMutex.Unlock() + + txProc, ok := c.proc.(*fvm.TransactionProcedure) + if !ok { + return fvm.ProcedureOutput{} + } + + const callbackSchedulerImport = `import "FlowTransactionScheduler"` + txBody := txProc.Transaction + txID := fmt.Sprintf("tx_%d", txProc.TxIndex) + + switch { + // scheduled callbacks process transaction + case strings.Contains(string(txBody.Script), "scheduler.process"): + c.vm.executedTransactions[txID] = "process_callback" + env := systemcontracts.SystemContractsForChain(c.ctx.Chain.ChainID()).AsTemplateEnv() + eventTypeString := fmt.Sprintf("A.%v.FlowTransactionScheduler.PendingExecution", env.FlowTransactionSchedulerAddress) + + // return events for each scheduled callback + events := make([]flow.Event, len(c.vm.eventPayloads)) + for i, payload := range c.vm.eventPayloads { + events[i] = flow.Event{ + Type: flow.EventType(eventTypeString), + TransactionID: txProc.ID, + TransactionIndex: txProc.TxIndex, + EventIndex: uint32(i), + Payload: payload, + } + } + + return fvm.ProcedureOutput{ + Events: events, + } + // scheduled callbacks execute transaction + case strings.Contains(string(txBody.Script), "scheduler.executeTransaction"): + // extract the callback ID from the arguments + if len(txBody.Arguments) == 0 { + return fvm.ProcedureOutput{} + } + + // decode the argument to check which callback it is + argValue, err := jsoncdc.Decode(nil, txBody.Arguments[0]) + if err == nil { + if idValue, ok := argValue.(cadence.UInt64); ok { + // find which callback this is + callbackIndex := -1 + for i, callbackID := range c.vm.callbackIDs { + if uint64(idValue) == callbackID { + callbackIndex = i + break + } + } + + if callbackIndex >= 0 { + c.vm.executedTransactions[txID] = fmt.Sprintf("callback%d", callbackIndex+1) + } else { + c.vm.executedTransactions[txID] = "unknown_callback" + } + } + } + + return fvm.ProcedureOutput{} + // system chunk transaction + default: + c.vm.executedTransactions[txID] = "system_chunk" + return fvm.ProcedureOutput{} + } +} + func generateBlock( collectionCount, transactionCount int, addressGenerator flow.AddressGenerator, @@ -1220,22 +1844,20 @@ func generateBlockWithVisitor( collection := generateCollection(transactionCount, addressGenerator, visitor) collections[i] = collection guarantees[i] = collection.Guarantee - completeCollections[collection.Guarantee.ID()] = collection + completeCollections[collection.Guarantee.CollectionID] = collection } - block := flow.Block{ - Header: &flow.Header{ - Timestamp: flow.GenesisTime, - Height: 42, - View: 42, - }, - Payload: &flow.Payload{ - Guarantees: guarantees, - }, - } + block := unittest.BlockFixture( + unittest.Block.WithHeight(42), + unittest.Block.WithView(42), + unittest.Block.WithParentView(41), + unittest.Block.WithPayload( + unittest.PayloadFixture(unittest.WithGuarantees(guarantees...)), + ), + ) return &entity.ExecutableBlock{ - Block: &block, + Block: block, CompleteCollections: completeCollections, StartState: unittest.StateCommitmentPointerFixture(), } @@ -1268,19 +1890,84 @@ func generateCollection( guarantee := &flow.CollectionGuarantee{CollectionID: collection.ID()} return &entity.CompleteCollection{ - Guarantee: guarantee, - Transactions: transactions, + Guarantee: guarantee, + Collection: &flow.Collection{Transactions: transactions}, } } +type noOpExecutor struct{} + +func (noOpExecutor) Cleanup() {} + +func (noOpExecutor) Preprocess() error { + return nil +} + +func (noOpExecutor) Execute() error { + return nil +} + +func (noOpExecutor) Output() fvm.ProcedureOutput { + return fvm.ProcedureOutput{} +} + type testVM struct { t *testing.T eventsPerTransaction int - callCount int + callCount int32 // atomic variable err fvmErrors.CodedError } +type testExecutor struct { + *testVM + + ctx fvm.Context + proc fvm.Procedure + txnState storage.TransactionPreparer +} + +func (testExecutor) Cleanup() { +} + +func (testExecutor) Preprocess() error { + return nil +} + +func (executor *testExecutor) Execute() error { + atomic.AddInt32(&executor.callCount, 1) + + getSetAProgram(executor.t, executor.txnState) + + return nil +} + +func (executor *testExecutor) Output() fvm.ProcedureOutput { + txn := executor.proc.(*fvm.TransactionProcedure) + + return fvm.ProcedureOutput{ + Events: generateEvents(executor.eventsPerTransaction, txn.TxIndex), + Err: executor.err, + } +} + +func (vm *testVM) NewExecutor( + ctx fvm.Context, + proc fvm.Procedure, + txnState storage.TransactionPreparer, +) fvm.ProcedureExecutor { + return &testExecutor{ + testVM: vm, + proc: proc, + ctx: ctx, + txnState: txnState, + } +} + +func (vm *testVM) CallCount() int { + return int(atomic.LoadInt32(&vm.callCount)) +} + func (vm *testVM) Run( ctx fvm.Context, proc fvm.Procedure, @@ -1290,24 +1977,27 @@ func (vm *testVM) Run( fvm.ProcedureOutput, error, ) { - vm.callCount += 1 + database := storage.NewBlockDatabase( + storageSnapshot, + proc.ExecutionTime(), + ctx.DerivedBlockData) - txn := proc.(*fvm.TransactionProcedure) + txn, err := database.NewTransaction( + proc.ExecutionTime(), + state.DefaultParameters()) + require.NoError(vm.t, err) - derivedTxnData, err := ctx.DerivedBlockData.NewDerivedTransactionData( - txn.ExecutionTime(), - txn.ExecutionTime()) + executor := vm.NewExecutor(ctx, proc, txn) + err = fvm.Run(executor) require.NoError(vm.t, err) - getSetAProgram(vm.t, storageSnapshot, derivedTxnData) + err = txn.Finalize() + require.NoError(vm.t, err) - snapshot := &snapshot.ExecutionSnapshot{} - output := fvm.ProcedureOutput{ - Events: generateEvents(vm.eventsPerTransaction, txn.TxIndex), - Err: vm.err, - } + executionSnapshot, err := txn.Commit() + require.NoError(vm.t, err) - return snapshot, output, nil + return executionSnapshot, executor.Output(), nil } func (testVM) GetAccount( @@ -1335,21 +2025,77 @@ func generateEvents(eventCount int, txIndex uint32) []flow.Event { return events } -func getSetAProgram( - t *testing.T, +type errorVM struct { + errorAt logical.Time +} + +type errorExecutor struct { + err error +} + +func (errorExecutor) Cleanup() {} + +func (errorExecutor) Preprocess() error { + return nil +} + +func (e errorExecutor) Execute() error { + return e.err +} + +func (errorExecutor) Output() fvm.ProcedureOutput { + return fvm.ProcedureOutput{} +} + +func (vm errorVM) NewExecutor( + ctx fvm.Context, + proc fvm.Procedure, + txn storage.TransactionPreparer, +) fvm.ProcedureExecutor { + var err error + if proc.ExecutionTime() == vm.errorAt { + err = fmt.Errorf("boom - internal error") + } + + return errorExecutor{err: err} +} + +func (vm errorVM) Run( + ctx fvm.Context, + proc fvm.Procedure, storageSnapshot snapshot.StorageSnapshot, - derivedTxnData *derived.DerivedTransactionData, +) ( + *snapshot.ExecutionSnapshot, + fvm.ProcedureOutput, + error, ) { + var err error + if proc.ExecutionTime() == vm.errorAt { + err = fmt.Errorf("boom - internal error") + } + return &snapshot.ExecutionSnapshot{}, fvm.ProcedureOutput{}, err +} - txnState := state.NewTransactionState( - storageSnapshot, - state.DefaultParameters()) +func (errorVM) GetAccount( + ctx fvm.Context, + addr flow.Address, + storageSnapshot snapshot.StorageSnapshot, +) ( + *flow.Account, + error, +) { + panic("not implemented") +} +func getSetAProgram( + t *testing.T, + txnState storage.TransactionPreparer, +) { loc := common.AddressLocation{ Name: "SomeContract", Address: common.MustBytesToAddress([]byte{0x1}), } - _, err := derivedTxnData.GetOrComputeProgram( + _, err := txnState.GetOrComputeProgram( txnState, loc, &programLoader{ @@ -1359,9 +2105,6 @@ func getSetAProgram( }, ) require.NoError(t, err) - - err = derivedTxnData.Commit() - require.NoError(t, err) } type programLoader struct { @@ -1377,3 +2120,67 @@ func (p *programLoader) Compute( ) { return p.load() } + +// TestLogger captures log output for testing and provides methods to verify logged messages. +type TestLogger struct { + buffer bytes.Buffer + Logger zerolog.Logger +} + +func NewTestLogger() *TestLogger { + tl := &TestLogger{} + tl.Logger = zerolog.New(&tl.buffer).Level(zerolog.DebugLevel) + return tl +} + +type LogEntry struct { + Level string + Message string + Fields map[string]interface{} +} + +func (tl *TestLogger) Logs() []LogEntry { + var entries []LogEntry + lines := strings.Split(tl.buffer.String(), "\n") + for _, line := range lines { + if line == "" { + continue + } + var rawEntry map[string]interface{} + if err := json.Unmarshal([]byte(line), &rawEntry); err != nil { + continue + } + entry := LogEntry{ + Fields: make(map[string]interface{}), + } + for k, v := range rawEntry { + switch k { + case "level": + entry.Level = fmt.Sprintf("%v", v) + case "message": + entry.Message = fmt.Sprintf("%v", v) + default: + entry.Fields[k] = v + } + } + entries = append(entries, entry) + } + return entries +} + +func (tl *TestLogger) HasLog(message string) bool { + return strings.Contains(tl.buffer.String(), message) +} + +func (tl *TestLogger) HasLogWithField(message string, fieldName string, fieldValue interface{}) bool { + for _, entry := range tl.Logs() { + if strings.Contains(entry.Message, message) { + if val, ok := entry.Fields[fieldName]; ok { + if val == fieldValue { + return true + } + } + } + } + return false +} diff --git a/engine/execution/computation/computer/mock/block_computer.go b/engine/execution/computation/computer/mock/block_computer.go index 7464c38e9b2..2e309602195 100644 --- a/engine/execution/computation/computer/mock/block_computer.go +++ b/engine/execution/computation/computer/mock/block_computer.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mock @@ -26,6 +26,10 @@ type BlockComputer struct { func (_m *BlockComputer) ExecuteBlock(ctx context.Context, parentBlockExecutionResultID flow.Identifier, block *entity.ExecutableBlock, _a3 snapshot.StorageSnapshot, derivedBlockData *derived.DerivedBlockData) (*execution.ComputationResult, error) { ret := _m.Called(ctx, parentBlockExecutionResultID, block, _a3, derivedBlockData) + if len(ret) == 0 { + panic("no return value specified for ExecuteBlock") + } + var r0 *execution.ComputationResult var r1 error if rf, ok := ret.Get(0).(func(context.Context, flow.Identifier, *entity.ExecutableBlock, snapshot.StorageSnapshot, *derived.DerivedBlockData) (*execution.ComputationResult, error)); ok { @@ -48,13 +52,12 @@ func (_m *BlockComputer) ExecuteBlock(ctx context.Context, parentBlockExecutionR return r0, r1 } -type mockConstructorTestingTNewBlockComputer interface { +// NewBlockComputer creates a new instance of BlockComputer. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewBlockComputer(t interface { mock.TestingT Cleanup(func()) -} - -// NewBlockComputer creates a new instance of BlockComputer. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewBlockComputer(t mockConstructorTestingTNewBlockComputer) *BlockComputer { +}) *BlockComputer { mock := &BlockComputer{} mock.Mock.Test(t) diff --git a/engine/execution/computation/computer/mock/transaction_write_behind_logger.go b/engine/execution/computation/computer/mock/transaction_write_behind_logger.go new file mode 100644 index 00000000000..7122b9ecee4 --- /dev/null +++ b/engine/execution/computation/computer/mock/transaction_write_behind_logger.go @@ -0,0 +1,38 @@ +// Code generated by mockery. DO NOT EDIT. + +package mock + +import ( + computer "github.com/onflow/flow-go/engine/execution/computation/computer" + fvm "github.com/onflow/flow-go/fvm" + + mock "github.com/stretchr/testify/mock" + + snapshot "github.com/onflow/flow-go/fvm/storage/snapshot" + + time "time" +) + +// TransactionWriteBehindLogger is an autogenerated mock type for the TransactionWriteBehindLogger type +type TransactionWriteBehindLogger struct { + mock.Mock +} + +// AddTransactionResult provides a mock function with given fields: txn, _a1, output, timeSpent, numTxnConflictRetries +func (_m *TransactionWriteBehindLogger) AddTransactionResult(txn computer.TransactionRequest, _a1 *snapshot.ExecutionSnapshot, output fvm.ProcedureOutput, timeSpent time.Duration, numTxnConflictRetries int) { + _m.Called(txn, _a1, output, timeSpent, numTxnConflictRetries) +} + +// NewTransactionWriteBehindLogger creates a new instance of TransactionWriteBehindLogger. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewTransactionWriteBehindLogger(t interface { + mock.TestingT + Cleanup(func()) +}) *TransactionWriteBehindLogger { + mock := &TransactionWriteBehindLogger{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/engine/execution/computation/computer/mock/view_committer.go b/engine/execution/computation/computer/mock/view_committer.go index dfcacb97c83..8ce508c4fc9 100644 --- a/engine/execution/computation/computer/mock/view_committer.go +++ b/engine/execution/computation/computer/mock/view_committer.go @@ -1,11 +1,13 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mock import ( - ledger "github.com/onflow/flow-go/ledger" + execution "github.com/onflow/flow-go/engine/execution" flow "github.com/onflow/flow-go/model/flow" + ledger "github.com/onflow/flow-go/ledger" + mock "github.com/stretchr/testify/mock" snapshot "github.com/onflow/flow-go/fvm/storage/snapshot" @@ -17,17 +19,22 @@ type ViewCommitter struct { } // CommitView provides a mock function with given fields: _a0, _a1 -func (_m *ViewCommitter) CommitView(_a0 *snapshot.ExecutionSnapshot, _a1 flow.StateCommitment) (flow.StateCommitment, []byte, *ledger.TrieUpdate, error) { +func (_m *ViewCommitter) CommitView(_a0 *snapshot.ExecutionSnapshot, _a1 execution.ExtendableStorageSnapshot) (flow.StateCommitment, []byte, *ledger.TrieUpdate, execution.ExtendableStorageSnapshot, error) { ret := _m.Called(_a0, _a1) + if len(ret) == 0 { + panic("no return value specified for CommitView") + } + var r0 flow.StateCommitment var r1 []byte var r2 *ledger.TrieUpdate - var r3 error - if rf, ok := ret.Get(0).(func(*snapshot.ExecutionSnapshot, flow.StateCommitment) (flow.StateCommitment, []byte, *ledger.TrieUpdate, error)); ok { + var r3 execution.ExtendableStorageSnapshot + var r4 error + if rf, ok := ret.Get(0).(func(*snapshot.ExecutionSnapshot, execution.ExtendableStorageSnapshot) (flow.StateCommitment, []byte, *ledger.TrieUpdate, execution.ExtendableStorageSnapshot, error)); ok { return rf(_a0, _a1) } - if rf, ok := ret.Get(0).(func(*snapshot.ExecutionSnapshot, flow.StateCommitment) flow.StateCommitment); ok { + if rf, ok := ret.Get(0).(func(*snapshot.ExecutionSnapshot, execution.ExtendableStorageSnapshot) flow.StateCommitment); ok { r0 = rf(_a0, _a1) } else { if ret.Get(0) != nil { @@ -35,7 +42,7 @@ func (_m *ViewCommitter) CommitView(_a0 *snapshot.ExecutionSnapshot, _a1 flow.St } } - if rf, ok := ret.Get(1).(func(*snapshot.ExecutionSnapshot, flow.StateCommitment) []byte); ok { + if rf, ok := ret.Get(1).(func(*snapshot.ExecutionSnapshot, execution.ExtendableStorageSnapshot) []byte); ok { r1 = rf(_a0, _a1) } else { if ret.Get(1) != nil { @@ -43,7 +50,7 @@ func (_m *ViewCommitter) CommitView(_a0 *snapshot.ExecutionSnapshot, _a1 flow.St } } - if rf, ok := ret.Get(2).(func(*snapshot.ExecutionSnapshot, flow.StateCommitment) *ledger.TrieUpdate); ok { + if rf, ok := ret.Get(2).(func(*snapshot.ExecutionSnapshot, execution.ExtendableStorageSnapshot) *ledger.TrieUpdate); ok { r2 = rf(_a0, _a1) } else { if ret.Get(2) != nil { @@ -51,22 +58,29 @@ func (_m *ViewCommitter) CommitView(_a0 *snapshot.ExecutionSnapshot, _a1 flow.St } } - if rf, ok := ret.Get(3).(func(*snapshot.ExecutionSnapshot, flow.StateCommitment) error); ok { + if rf, ok := ret.Get(3).(func(*snapshot.ExecutionSnapshot, execution.ExtendableStorageSnapshot) execution.ExtendableStorageSnapshot); ok { r3 = rf(_a0, _a1) } else { - r3 = ret.Error(3) + if ret.Get(3) != nil { + r3 = ret.Get(3).(execution.ExtendableStorageSnapshot) + } } - return r0, r1, r2, r3 -} + if rf, ok := ret.Get(4).(func(*snapshot.ExecutionSnapshot, execution.ExtendableStorageSnapshot) error); ok { + r4 = rf(_a0, _a1) + } else { + r4 = ret.Error(4) + } -type mockConstructorTestingTNewViewCommitter interface { - mock.TestingT - Cleanup(func()) + return r0, r1, r2, r3, r4 } // NewViewCommitter creates a new instance of ViewCommitter. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewViewCommitter(t mockConstructorTestingTNewViewCommitter) *ViewCommitter { +// The first argument is typically a *testing.T value. +func NewViewCommitter(t interface { + mock.TestingT + Cleanup(func()) +}) *ViewCommitter { mock := &ViewCommitter{} mock.Mock.Test(t) diff --git a/engine/execution/computation/computer/result_collector.go b/engine/execution/computation/computer/result_collector.go index 4640485b33b..3e249147394 100644 --- a/engine/execution/computation/computer/result_collector.go +++ b/engine/execution/computation/computer/result_collector.go @@ -6,12 +6,13 @@ import ( "sync" "time" + "github.com/onflow/crypto" + "github.com/onflow/crypto/hash" otelTrace "go.opentelemetry.io/otel/trace" - "github.com/onflow/flow-go/crypto" - "github.com/onflow/flow-go/crypto/hash" "github.com/onflow/flow-go/engine/execution" "github.com/onflow/flow-go/engine/execution/computation/result" + "github.com/onflow/flow-go/engine/execution/storehouse" "github.com/onflow/flow-go/fvm" "github.com/onflow/flow-go/fvm/meter" "github.com/onflow/flow-go/fvm/storage/snapshot" @@ -31,19 +32,22 @@ type ViewCommitter interface { // CommitView commits an execution snapshot and collects proofs CommitView( *snapshot.ExecutionSnapshot, - flow.StateCommitment, + execution.ExtendableStorageSnapshot, ) ( - flow.StateCommitment, + flow.StateCommitment, // TODO(leo): deprecate. see storehouse.ExtendableStorageSnapshot.Commitment() []byte, *ledger.TrieUpdate, + execution.ExtendableStorageSnapshot, error, ) } type transactionResult struct { - transactionRequest + TransactionRequest *snapshot.ExecutionSnapshot fvm.ProcedureOutput + timeSpent time.Duration + numConflictRetries int } // TODO(ramtin): move committer and other folks to consumers layer @@ -64,7 +68,7 @@ type resultCollector struct { spockHasher hash.Hasher receiptHasher hash.Hasher - executionDataProvider *provider.Provider + executionDataProvider provider.Provider parentBlockExecutionResultID flow.Identifier @@ -74,12 +78,13 @@ type resultCollector struct { spockSignatures []crypto.Signature blockStartTime time.Time - blockStats module.ExecutionResultStats + blockStats module.BlockExecutionResultStats blockMeter *meter.Meter - currentCollectionStartTime time.Time - currentCollectionState *state.ExecutionState - currentCollectionStats module.ExecutionResultStats + currentCollectionStartTime time.Time + currentCollectionState *state.ExecutionState + currentCollectionStats module.CollectionExecutionResultStats + currentCollectionStorageSnapshot execution.ExtendableStorageSnapshot } func newResultCollector( @@ -88,13 +93,14 @@ func newResultCollector( metrics module.ExecutionMetrics, committer ViewCommitter, signer module.Local, - executionDataProvider *provider.Provider, + executionDataProvider provider.Provider, spockHasher hash.Hasher, receiptHasher hash.Hasher, parentBlockExecutionResultID flow.Identifier, block *entity.ExecutableBlock, - numTransactions int, + inputChannelSize int, consumers []result.ExecutedCollectionConsumer, + previousBlockSnapshot snapshot.StorageSnapshot, ) *resultCollector { numCollections := len(block.Collections()) + 1 now := time.Now() @@ -102,7 +108,7 @@ func newResultCollector( tracer: tracer, blockSpan: blockSpan, metrics: metrics, - processorInputChan: make(chan transactionResult, numTransactions), + processorInputChan: make(chan transactionResult, inputChannelSize), processorDoneChan: make(chan struct{}), committer: committer, signer: signer, @@ -117,9 +123,11 @@ func newResultCollector( blockMeter: meter.NewMeter(meter.DefaultParameters()), currentCollectionStartTime: now, currentCollectionState: state.NewExecutionState(nil, state.DefaultParameters()), - currentCollectionStats: module.ExecutionResultStats{ - NumberOfCollections: 1, - }, + currentCollectionStats: module.CollectionExecutionResultStats{}, + currentCollectionStorageSnapshot: storehouse.NewExecutingBlockSnapshot( + previousBlockSnapshot, + *block.StartState, + ), } go collector.runResultProcessor() @@ -136,14 +144,19 @@ func (collector *resultCollector) commitCollection( collector.blockSpan, trace.EXECommitDelta).End() - startState := collector.result.CurrentEndState() - endState, proof, trieUpdate, err := collector.committer.CommitView( + startState := collector.currentCollectionStorageSnapshot.Commitment() + + _, proof, trieUpdate, newSnapshot, err := collector.committer.CommitView( collectionExecutionSnapshot, - startState) + collector.currentCollectionStorageSnapshot, + ) if err != nil { return fmt.Errorf("commit view failed: %w", err) } + endState := newSnapshot.Commitment() + collector.currentCollectionStorageSnapshot = newSnapshot + execColRes := collector.result.CollectionExecutionResultAt(collection.collectionIndex) execColRes.UpdateExecutionSnapshot(collectionExecutionSnapshot) @@ -153,11 +166,14 @@ func (collector *resultCollector) commitCollection( return fmt.Errorf("hash events failed: %w", err) } - col := collection.Collection() + txResults := execColRes.TransactionResults() + convertedTxResults := execution_data.ConvertTransactionResults(txResults) + chunkExecData := &execution_data.ChunkExecutionData{ - Collection: &col, - Events: events, - TrieUpdate: trieUpdate, + Collection: collection.Collection, + Events: events, + TrieUpdate: trieUpdate, + TransactionResults: convertedTxResults, } collector.result.AppendCollectionAttestationResult( @@ -170,39 +186,28 @@ func (collector *resultCollector) commitCollection( collector.metrics.ExecutionChunkDataPackGenerated( len(proof), - len(collection.Transactions)) + len(collection.Collection.Transactions)) spock, err := collector.signer.SignFunc( collectionExecutionSnapshot.SpockSecret, collector.spockHasher, - SPOCKProve) + crypto.SPOCKProve) if err != nil { return fmt.Errorf("signing spock hash failed: %w", err) } collector.spockSignatures = append(collector.spockSignatures, spock) - collector.currentCollectionStats.EventCounts = len(events) - collector.currentCollectionStats.EventSize = events.ByteSize() - collector.currentCollectionStats.NumberOfRegistersTouched = len( - collectionExecutionSnapshot.AllRegisterIDs()) - for _, entry := range collectionExecutionSnapshot.UpdatedRegisters() { - collector.currentCollectionStats.NumberOfBytesWrittenToRegisters += len( - entry.Value) - } - collector.metrics.ExecutionCollectionExecuted( time.Since(startTime), collector.currentCollectionStats) - collector.blockStats.Merge(collector.currentCollectionStats) + collector.blockStats.Add(collector.currentCollectionStats) collector.blockMeter.MergeMeter(collectionExecutionSnapshot.Meter) collector.currentCollectionStartTime = time.Now() collector.currentCollectionState = state.NewExecutionState(nil, state.DefaultParameters()) - collector.currentCollectionStats = module.ExecutionResultStats{ - NumberOfCollections: 1, - } + collector.currentCollectionStats = module.CollectionExecutionResultStats{} for _, consumer := range collector.consumers { err = consumer.OnExecutedCollection(collector.result.CollectionExecutionResultAt(collection.collectionIndex)) @@ -215,10 +220,45 @@ func (collector *resultCollector) commitCollection( } func (collector *resultCollector) processTransactionResult( - txn transactionRequest, + txn TransactionRequest, txnExecutionSnapshot *snapshot.ExecutionSnapshot, output fvm.ProcedureOutput, + timeSpent time.Duration, + numConflictRetries int, ) error { + logger := txn.ctx.Logger.With(). + Uint64("computation_used", output.ComputationUsed). + Uint64("memory_used", output.MemoryEstimate). + Int64("time_spent_in_ms", timeSpent.Milliseconds()). + Float64("normalized_time_per_computation", flow.NormalizedExecutionTimePerComputationUnit(timeSpent, output.ComputationUsed)). + Logger() + + if output.Err != nil { + logger = logger.With(). + Str("error_message", output.Err.Error()). + Uint16("error_code", uint16(output.Err.Code())). + Logger() + logger.Info().Msg("transaction execution failed") + + if txn.transactionType == ComputerTransactionTypeSystem { + // This log is used as the data source for an alert on grafana. + // The critical_error field must not be changed without adding + // the corresponding changes in grafana. + logger.Error(). + Bool("critical_error", true). + Msg("error executing system chunk transaction") + } + } else { + logger.Info().Msg("transaction executed successfully") + } + + collector.handleTransactionExecutionMetrics( + timeSpent, + output, + txnExecutionSnapshot, + txn, + numConflictRetries, + ) txnResult := flow.TransactionResult{ TransactionID: txn.ID, @@ -243,10 +283,6 @@ func (collector *resultCollector) processTransactionResult( return fmt.Errorf("failed to merge into collection view: %w", err) } - collector.currentCollectionStats.ComputationUsed += output.ComputationUsed - collector.currentCollectionStats.MemoryUsed += output.MemoryEstimate - collector.currentCollectionStats.NumberOfTransactions += 1 - if !txn.lastTransactionInCollection { return nil } @@ -257,15 +293,57 @@ func (collector *resultCollector) processTransactionResult( collector.currentCollectionState.Finalize()) } +func (collector *resultCollector) handleTransactionExecutionMetrics( + timeSpent time.Duration, + output fvm.ProcedureOutput, + txnExecutionSnapshot *snapshot.ExecutionSnapshot, + txn TransactionRequest, + numConflictRetries int, +) { + transactionExecutionStats := module.TransactionExecutionResultStats{ + ExecutionResultStats: module.ExecutionResultStats{ + ComputationUsed: output.ComputationUsed, + MemoryUsed: output.MemoryEstimate, + EventCounts: len(output.Events), + EventSize: output.Events.ByteSize(), + NumberOfRegistersTouched: len(txnExecutionSnapshot.AllRegisterIDs()), + }, + ComputationIntensities: output.ComputationIntensities, + NumberOfTxnConflictRetries: numConflictRetries, + Failed: output.Err != nil, + ScheduledTransaction: txn.transactionType == ComputerTransactionTypeScheduled, + SystemTransaction: txn.transactionType == ComputerTransactionTypeSystem, + } + for _, entry := range txnExecutionSnapshot.UpdatedRegisters() { + transactionExecutionStats.NumberOfBytesWrittenToRegisters += len(entry.Value) + } + + collector.metrics.ExecutionTransactionExecuted( + timeSpent, + transactionExecutionStats, + module.TransactionExecutionResultInfo{ + TransactionID: txn.ID, + BlockID: txn.blockId, + BlockHeight: txn.blockHeight, + }, + ) + + collector.currentCollectionStats.Add(transactionExecutionStats) +} + func (collector *resultCollector) AddTransactionResult( - txn transactionRequest, + request TransactionRequest, snapshot *snapshot.ExecutionSnapshot, output fvm.ProcedureOutput, + timeSpent time.Duration, + numConflictRetries int, ) { result := transactionResult{ - transactionRequest: txn, + TransactionRequest: request, ExecutionSnapshot: snapshot, ProcedureOutput: output, + timeSpent: timeSpent, + numConflictRetries: numConflictRetries, } select { @@ -281,9 +359,11 @@ func (collector *resultCollector) runResultProcessor() { for result := range collector.processorInputChan { err := collector.processTransactionResult( - result.transactionRequest, + result.TransactionRequest, result.ExecutionSnapshot, - result.ProcedureOutput) + result.ProcedureOutput, + result.timeSpent, + result.numConflictRetries) if err != nil { collector.processorError = err return @@ -311,7 +391,7 @@ func (collector *resultCollector) Finalize( return nil, collector.processorError } - executionDataID, err := collector.executionDataProvider.Provide( + executionDataID, executionDataRoot, err := collector.executionDataProvider.Provide( ctx, collector.result.Height(), collector.result.BlockExecutionData) @@ -319,12 +399,21 @@ func (collector *resultCollector) Finalize( return nil, fmt.Errorf("failed to provide execution data: %w", err) } - executionResult := flow.NewExecutionResult( - collector.parentBlockExecutionResultID, - collector.result.ExecutableBlock.ID(), - collector.result.AllChunks(), - collector.result.AllConvertedServiceEvents(), - executionDataID) + chunks, err := collector.result.AllChunks() + if err != nil { + return nil, fmt.Errorf("failed to retrieve chunks data: %w", err) + } + + executionResult, err := flow.NewExecutionResult(flow.UntrustedExecutionResult{ + PreviousResultID: collector.parentBlockExecutionResultID, + BlockID: collector.result.ExecutableBlock.BlockID(), + Chunks: chunks, + ServiceEvents: collector.result.AllConvertedServiceEvents(), + ExecutionDataID: executionDataID, + }) + if err != nil { + return nil, fmt.Errorf("could not build execution result: %w", err) + } executionReceipt, err := GenerateExecutionReceipt( collector.signer, @@ -336,6 +425,7 @@ func (collector *resultCollector) Finalize( } collector.result.ExecutionReceipt = executionReceipt + collector.result.ExecutionDataRoot = executionDataRoot collector.metrics.ExecutionBlockExecuted( time.Since(collector.blockStartTime), @@ -359,21 +449,33 @@ func GenerateExecutionReceipt( *flow.ExecutionReceipt, error, ) { - receipt := &flow.ExecutionReceipt{ - ExecutionResult: *result, - Spocks: spockSignatures, - ExecutorSignature: crypto.Signature{}, - ExecutorID: signer.NodeID(), + unsignedExecutionReceipt, err := flow.NewUnsignedExecutionReceipt( + flow.UntrustedUnsignedExecutionReceipt{ + ExecutionResult: *result, + Spocks: spockSignatures, + ExecutorID: signer.NodeID(), + }, + ) + if err != nil { + return nil, fmt.Errorf("could not construct unsigned execution receipt: %w", err) } - // generates a signature over the execution result - id := receipt.ID() - sig, err := signer.Sign(id[:], receiptHasher) + // generates a signature over the execution receipt's body + unsignedReceiptID := unsignedExecutionReceipt.ID() + sig, err := signer.Sign(unsignedReceiptID[:], receiptHasher) if err != nil { return nil, fmt.Errorf("could not sign execution result: %w", err) } - receipt.ExecutorSignature = sig + executionReceipt, err := flow.NewExecutionReceipt( + flow.UntrustedExecutionReceipt{ + UnsignedExecutionReceipt: *unsignedExecutionReceipt, + ExecutorSignature: sig, + }, + ) + if err != nil { + return nil, fmt.Errorf("could not construct execution receipt: %w", err) + } - return receipt, nil + return executionReceipt, nil } diff --git a/engine/execution/computation/computer/spock_norelic.go b/engine/execution/computation/computer/spock_norelic.go deleted file mode 100644 index 81678d94f33..00000000000 --- a/engine/execution/computation/computer/spock_norelic.go +++ /dev/null @@ -1,26 +0,0 @@ -//go:build !relic -// +build !relic - -package computer - -import ( - "github.com/onflow/flow-go/crypto" - "github.com/onflow/flow-go/crypto/hash" -) - -// This is a temporary wrapper that simulates a call to SPoCK prove, -// required for the emulator build. The function is never called by the -// emulator although it is required for a successful build. -// -// TODO(tarak): remove once the crypto module properly implements a non-relic -// version of SPOCKProve. -func SPOCKProve( - sk crypto.PrivateKey, - data []byte, - kmac hash.Hasher, -) ( - crypto.Signature, - error, -) { - panic("SPoCK prove not supported when flow-go is built without relic") -} diff --git a/engine/execution/computation/computer/spock_relic.go b/engine/execution/computation/computer/spock_relic.go deleted file mode 100644 index 89a8182ba8f..00000000000 --- a/engine/execution/computation/computer/spock_relic.go +++ /dev/null @@ -1,24 +0,0 @@ -//go:build relic -// +build relic - -package computer - -import ( - "github.com/onflow/flow-go/crypto" - "github.com/onflow/flow-go/crypto/hash" -) - -// This is a temporary wrapper that around the crypto library. -// -// TODO(tarak): remove once the crypto module properly implements a non-relic -// version of SPOCKProve. -func SPOCKProve( - sk crypto.PrivateKey, - data []byte, - kmac hash.Hasher, -) ( - crypto.Signature, - error, -) { - return crypto.SPOCKProve(sk, data, kmac) -} diff --git a/engine/execution/computation/computer/transaction_coordinator.go b/engine/execution/computation/computer/transaction_coordinator.go new file mode 100644 index 00000000000..6ce2cb3757c --- /dev/null +++ b/engine/execution/computation/computer/transaction_coordinator.go @@ -0,0 +1,193 @@ +package computer + +import ( + "sync" + "time" + + "github.com/onflow/flow-go/fvm" + "github.com/onflow/flow-go/fvm/storage" + "github.com/onflow/flow-go/fvm/storage/derived" + "github.com/onflow/flow-go/fvm/storage/logical" + "github.com/onflow/flow-go/fvm/storage/snapshot" +) + +type TransactionWriteBehindLogger interface { + AddTransactionResult( + txn TransactionRequest, + snapshot *snapshot.ExecutionSnapshot, + output fvm.ProcedureOutput, + timeSpent time.Duration, + numTxnConflictRetries int, + ) +} + +// transactionCoordinator provides synchronization functionality for driving +// transaction execution. +type transactionCoordinator struct { + vm fvm.VM + + mutex *sync.Mutex + cond *sync.Cond + + snapshotTime logical.Time // guarded by mutex, cond broadcast on updates. + abortErr error // guarded by mutex, cond broadcast on updates. + + // Note: database commit and result logging must occur within the same + // critical section (guraded by mutex). + database *storage.BlockDatabase + writeBehindLog TransactionWriteBehindLogger +} + +type transaction struct { + request TransactionRequest + numConflictRetries int + + coordinator *transactionCoordinator + + startedAt time.Time + storage.Transaction + fvm.ProcedureExecutor +} + +func newTransactionCoordinator( + vm fvm.VM, + storageSnapshot snapshot.StorageSnapshot, + cachedDerivedBlockData *derived.DerivedBlockData, + writeBehindLog TransactionWriteBehindLogger, +) *transactionCoordinator { + mutex := &sync.Mutex{} + cond := sync.NewCond(mutex) + + database := storage.NewBlockDatabase( + storageSnapshot, + 0, + cachedDerivedBlockData) + + return &transactionCoordinator{ + vm: vm, + mutex: mutex, + cond: cond, + snapshotTime: 0, + abortErr: nil, + database: database, + writeBehindLog: writeBehindLog, + } +} + +func (coordinator *transactionCoordinator) SnapshotTime() logical.Time { + coordinator.mutex.Lock() + defer coordinator.mutex.Unlock() + + return coordinator.snapshotTime +} + +func (coordinator *transactionCoordinator) Error() error { + coordinator.mutex.Lock() + defer coordinator.mutex.Unlock() + + return coordinator.abortErr +} + +func (coordinator *transactionCoordinator) AbortAllOutstandingTransactions( + err error, +) { + coordinator.mutex.Lock() + defer coordinator.mutex.Unlock() + + if coordinator.abortErr != nil { // Transactions are already aborting. + return + } + + coordinator.abortErr = err + coordinator.cond.Broadcast() +} + +func (coordinator *transactionCoordinator) NewTransaction( + request TransactionRequest, + attempt int, +) ( + *transaction, + error, +) { + err := coordinator.Error() + if err != nil { + return nil, err + } + + txn, err := coordinator.database.NewTransaction( + request.ExecutionTime(), + fvm.ProcedureStateParameters(request.ctx, request)) + if err != nil { + return nil, err + } + + return &transaction{ + request: request, + coordinator: coordinator, + numConflictRetries: attempt, + startedAt: time.Now(), + Transaction: txn, + ProcedureExecutor: coordinator.vm.NewExecutor( + request.ctx, + request.TransactionProcedure, + txn), + }, nil +} + +func (coordinator *transactionCoordinator) commit(txn *transaction) error { + coordinator.mutex.Lock() + defer coordinator.mutex.Unlock() + + if coordinator.abortErr != nil { + return coordinator.abortErr + } + + executionSnapshot, err := txn.Transaction.Commit() + if err != nil { + return err + } + + coordinator.writeBehindLog.AddTransactionResult( + txn.request, + executionSnapshot, + txn.Output(), + time.Since(txn.startedAt), + txn.numConflictRetries) + + // Commit advances the database's snapshot. + coordinator.snapshotTime += 1 + coordinator.cond.Broadcast() + + return nil +} + +func (txn *transaction) Commit() error { + return txn.coordinator.commit(txn) +} + +func (coordinator *transactionCoordinator) waitForUpdatesNewerThan( + snapshotTime logical.Time, +) ( + logical.Time, + error, + logical.Time, + error, +) { + coordinator.mutex.Lock() + defer coordinator.mutex.Unlock() + + startTime := coordinator.snapshotTime + startErr := coordinator.abortErr + for coordinator.snapshotTime <= snapshotTime && coordinator.abortErr == nil { + coordinator.cond.Wait() + } + + return startTime, startErr, coordinator.snapshotTime, coordinator.abortErr +} + +func (txn *transaction) WaitForUpdates() error { + // Note: the frist three returned values are only used by tests to ensure + // the function correctly waited. + _, _, _, err := txn.coordinator.waitForUpdatesNewerThan(txn.SnapshotTime()) + return err +} diff --git a/engine/execution/computation/computer/transaction_coordinator_test.go b/engine/execution/computation/computer/transaction_coordinator_test.go new file mode 100644 index 00000000000..0cee4598883 --- /dev/null +++ b/engine/execution/computation/computer/transaction_coordinator_test.go @@ -0,0 +1,358 @@ +package computer + +import ( + "fmt" + "testing" + "time" + + "github.com/rs/zerolog" + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/fvm" + "github.com/onflow/flow-go/fvm/storage" + "github.com/onflow/flow-go/fvm/storage/logical" + "github.com/onflow/flow-go/fvm/storage/snapshot" + "github.com/onflow/flow-go/model/flow" +) + +type testCoordinatorVM struct{} + +func (testCoordinatorVM) NewExecutor( + ctx fvm.Context, + proc fvm.Procedure, + txnState storage.TransactionPreparer, +) fvm.ProcedureExecutor { + return testCoordinatorExecutor{ + executionTime: proc.ExecutionTime(), + } +} + +func (testCoordinatorVM) Run( + ctx fvm.Context, + proc fvm.Procedure, + storageSnapshot snapshot.StorageSnapshot, +) ( + *snapshot.ExecutionSnapshot, + fvm.ProcedureOutput, + error, +) { + panic("not implemented") +} + +func (testCoordinatorVM) GetAccount( + _ fvm.Context, + _ flow.Address, + _ snapshot.StorageSnapshot, +) ( + *flow.Account, + error, +) { + panic("not implemented") +} + +type testCoordinatorExecutor struct { + executionTime logical.Time +} + +func (testCoordinatorExecutor) Cleanup() {} + +func (testCoordinatorExecutor) Preprocess() error { + return nil +} + +func (testCoordinatorExecutor) Execute() error { + return nil +} + +func (executor testCoordinatorExecutor) Output() fvm.ProcedureOutput { + return fvm.ProcedureOutput{ + ComputationUsed: uint64(executor.executionTime), + } +} + +type testCoordinator struct { + *transactionCoordinator + committed []uint64 +} + +func newTestCoordinator(t *testing.T) *testCoordinator { + db := &testCoordinator{} + db.transactionCoordinator = newTransactionCoordinator( + testCoordinatorVM{}, + nil, + nil, + db) + + require.Equal(t, db.SnapshotTime(), logical.Time(0)) + + // commit a transaction to increment the snapshot time + setupTxn, err := db.newTransaction(0) + require.NoError(t, err) + + err = setupTxn.Finalize() + require.NoError(t, err) + + err = setupTxn.Commit() + require.NoError(t, err) + + require.Equal(t, db.SnapshotTime(), logical.Time(1)) + + return db + +} + +func (db *testCoordinator) AddTransactionResult( + txn TransactionRequest, + snapshot *snapshot.ExecutionSnapshot, + output fvm.ProcedureOutput, + timeSpent time.Duration, + numConflictRetries int, +) { + db.committed = append(db.committed, output.ComputationUsed) +} + +func (db *testCoordinator) newTransaction(txnIndex uint32) ( + *transaction, + error, +) { + return db.NewTransaction( + newTransactionRequest( + collectionInfo{}, + fvm.NewContext(), + zerolog.Nop(), + txnIndex, + &flow.TransactionBody{}, + ComputerTransactionTypeUser, + false), + 0) +} + +type testWaitValues struct { + startTime logical.Time + startErr error + snapshotTime logical.Time + abortErr error +} + +func (db *testCoordinator) setupWait(txn *transaction) chan testWaitValues { + ret := make(chan testWaitValues, 1) + go func() { + startTime, startErr, snapshotTime, abortErr := db.waitForUpdatesNewerThan( + txn.SnapshotTime()) + ret <- testWaitValues{ + startTime: startTime, + startErr: startErr, + snapshotTime: snapshotTime, + abortErr: abortErr, + } + }() + + // Sleep a bit to ensure goroutine is running before returning the channel. + time.Sleep(10 * time.Millisecond) + return ret +} + +func TestTransactionCoordinatorBasicCommit(t *testing.T) { + db := newTestCoordinator(t) + + txns := []*transaction{} + for i := uint32(1); i < 6; i++ { + txn, err := db.newTransaction(i) + require.NoError(t, err) + + txns = append(txns, txn) + } + + for i, txn := range txns { + executionTime := logical.Time(1 + i) + + require.Equal(t, txn.SnapshotTime(), logical.Time(1)) + + err := txn.Finalize() + require.NoError(t, err) + + err = txn.Validate() + require.NoError(t, err) + + require.Equal(t, txn.SnapshotTime(), executionTime) + + err = txn.Commit() + require.NoError(t, err) + + require.Equal(t, db.SnapshotTime(), executionTime+1) + } + + require.Equal(t, db.committed, []uint64{0, 1, 2, 3, 4, 5}) +} + +func TestTransactionCoordinatorBlockingWaitForCommit(t *testing.T) { + db := newTestCoordinator(t) + + testTxn, err := db.newTransaction(6) + require.NoError(t, err) + + require.Equal(t, db.SnapshotTime(), logical.Time(1)) + require.Equal(t, testTxn.SnapshotTime(), logical.Time(1)) + + ret := db.setupWait(testTxn) + + setupTxn, err := db.newTransaction(1) + require.NoError(t, err) + + err = setupTxn.Finalize() + require.NoError(t, err) + + err = setupTxn.Commit() + require.NoError(t, err) + + require.Equal(t, db.SnapshotTime(), logical.Time(2)) + + select { + case val := <-ret: + require.Equal( + t, + val, + testWaitValues{ + startTime: 1, + startErr: nil, + snapshotTime: 2, + abortErr: nil, + }) + case <-time.After(time.Second): + require.Fail(t, "Failed to return result") + } + + require.Equal(t, testTxn.SnapshotTime(), logical.Time(1)) + + err = testTxn.Validate() + require.NoError(t, err) + + require.Equal(t, testTxn.SnapshotTime(), logical.Time(2)) + +} + +func TestTransactionCoordinatorNonblockingWaitForCommit(t *testing.T) { + db := newTestCoordinator(t) + + testTxn, err := db.newTransaction(6) + require.NoError(t, err) + + setupTxn, err := db.newTransaction(1) + require.NoError(t, err) + + err = setupTxn.Finalize() + require.NoError(t, err) + + err = setupTxn.Commit() + require.NoError(t, err) + + require.Equal(t, db.SnapshotTime(), logical.Time(2)) + require.Equal(t, testTxn.SnapshotTime(), logical.Time(1)) + + ret := db.setupWait(testTxn) + + select { + case val := <-ret: + require.Equal( + t, + val, + testWaitValues{ + startTime: 2, + startErr: nil, + snapshotTime: 2, + abortErr: nil, + }) + case <-time.After(time.Second): + require.Fail(t, "Failed to return result") + } +} + +func TestTransactionCoordinatorBasicAbort(t *testing.T) { + db := newTestCoordinator(t) + + txn, err := db.newTransaction(1) + require.NoError(t, err) + + abortErr := fmt.Errorf("abort") + db.AbortAllOutstandingTransactions(abortErr) + + err = txn.Finalize() + require.NoError(t, err) + + err = txn.Commit() + require.Equal(t, err, abortErr) + + txn, err = db.newTransaction(2) + require.Equal(t, err, abortErr) + require.Nil(t, txn) +} + +func TestTransactionCoordinatorBlockingWaitForAbort(t *testing.T) { + db := newTestCoordinator(t) + + testTxn, err := db.newTransaction(6) + require.NoError(t, err) + + // start waiting before aborting. + require.Equal(t, testTxn.SnapshotTime(), logical.Time(1)) + ret := db.setupWait(testTxn) + + abortErr := fmt.Errorf("abort") + db.AbortAllOutstandingTransactions(abortErr) + + select { + case val := <-ret: + require.Equal( + t, + val, + testWaitValues{ + startTime: 1, + startErr: nil, + snapshotTime: 1, + abortErr: abortErr, + }) + case <-time.After(time.Second): + require.Fail(t, "Failed to return result") + } + + err = testTxn.Finalize() + require.NoError(t, err) + + err = testTxn.Commit() + require.Equal(t, err, abortErr) +} + +func TestTransactionCoordinatorNonblockingWaitForAbort(t *testing.T) { + db := newTestCoordinator(t) + + testTxn, err := db.newTransaction(6) + require.NoError(t, err) + + // start aborting before waiting. + abortErr := fmt.Errorf("abort") + db.AbortAllOutstandingTransactions(abortErr) + + require.Equal(t, testTxn.SnapshotTime(), logical.Time(1)) + ret := db.setupWait(testTxn) + + select { + case val := <-ret: + require.Equal( + t, + val, + testWaitValues{ + startTime: 1, + startErr: abortErr, + snapshotTime: 1, + abortErr: abortErr, + }) + case <-time.After(time.Second): + require.Fail(t, "Failed to return result") + } + + err = testTxn.Finalize() + require.NoError(t, err) + + err = testTxn.Commit() + require.Equal(t, err, abortErr) +} diff --git a/engine/execution/computation/execution_verification_test.go b/engine/execution/computation/execution_verification_test.go index fd4e4c8c0a0..21500d43a4d 100644 --- a/engine/execution/computation/execution_verification_test.go +++ b/engine/execution/computation/execution_verification_test.go @@ -6,16 +6,16 @@ import ( "fmt" "testing" + "github.com/ipfs/boxo/blockstore" "github.com/ipfs/go-datastore" dssync "github.com/ipfs/go-datastore/sync" - blockstore "github.com/ipfs/go-ipfs-blockstore" "github.com/onflow/cadence" jsoncdc "github.com/onflow/cadence/encoding/json" "github.com/rs/zerolog" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "github.com/onflow/flow-go/crypto" + "github.com/onflow/crypto" "github.com/onflow/flow-go/engine/execution" "github.com/onflow/flow-go/engine/execution/computation/committer" @@ -25,16 +25,17 @@ import ( "github.com/onflow/flow-go/engine/execution/testutil" "github.com/onflow/flow-go/engine/execution/utils" "github.com/onflow/flow-go/engine/testutil/mocklocal" - "github.com/onflow/flow-go/engine/verification/fetcher" "github.com/onflow/flow-go/fvm" "github.com/onflow/flow-go/fvm/blueprints" "github.com/onflow/flow-go/fvm/environment" "github.com/onflow/flow-go/fvm/errors" "github.com/onflow/flow-go/fvm/storage/derived" + "github.com/onflow/flow-go/fvm/systemcontracts" completeLedger "github.com/onflow/flow-go/ledger/complete" "github.com/onflow/flow-go/ledger/complete/wal/fixtures" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/model/verification" + "github.com/onflow/flow-go/model/verification/convert" "github.com/onflow/flow-go/module/chunks" "github.com/onflow/flow-go/module/executiondatasync/execution_data" exedataprovider "github.com/onflow/flow-go/module/executiondatasync/provider" @@ -45,6 +46,10 @@ import ( "github.com/onflow/flow-go/utils/unittest" ) +const ( + testVerifyMaxConcurrency = 2 +) + var chain = flow.Emulator.Chain() // In the following tests the system transaction is expected to fail, as the epoch related things are not set up properly. @@ -60,30 +65,34 @@ func Test_ExecutionMatchesVerification(t *testing.T) { t.Run("single transaction event", func(t *testing.T) { - deployTx := blueprints.DeployContractTransaction(chain.ServiceAddress(), []byte(""+ - `pub contract Foo { - pub event FooEvent(x: Int, y: Int) + deployTxBuilder := blueprints.DeployContractTransaction(chain.ServiceAddress(), []byte(""+ + `access(all) contract Foo { + access(all) event FooEvent(x: Int, y: Int) - pub fun event() { + access(all) fun emitEvent() { emit FooEvent(x: 2, y: 1) } }`), "Foo") - emitTx := &flow.TransactionBody{ - Script: []byte(fmt.Sprintf(` + emitTxBuilder := flow.NewTransactionBodyBuilder(). + SetScript([]byte(fmt.Sprintf(` import Foo from 0x%s transaction { prepare() {} execute { - Foo.event() + Foo.emitEvent() } - }`, chain.ServiceAddress())), - } + }`, chain.ServiceAddress()))) - err := testutil.SignTransactionAsServiceAccount(deployTx, 0, chain) + err := testutil.SignTransactionAsServiceAccount(deployTxBuilder, 0, chain) require.NoError(t, err) - err = testutil.SignTransactionAsServiceAccount(emitTx, 1, chain) + err = testutil.SignTransactionAsServiceAccount(emitTxBuilder, 1, chain) + require.NoError(t, err) + + deployTx, err := deployTxBuilder.Build() + require.NoError(t, err) + emitTx, err := emitTxBuilder.Build() require.NoError(t, err) cr := executeBlockAndVerify(t, [][]*flow.TransactionBody{ @@ -103,50 +112,58 @@ func Test_ExecutionMatchesVerification(t *testing.T) { }) t.Run("multiple collections events", func(t *testing.T) { + deployTxBuilder := blueprints.DeployContractTransaction(chain.ServiceAddress(), []byte(""+ + `access(all) contract Foo { + access(all) event FooEvent(x: Int, y: Int) - deployTx := blueprints.DeployContractTransaction(chain.ServiceAddress(), []byte(""+ - `pub contract Foo { - pub event FooEvent(x: Int, y: Int) - - pub fun event() { + access(all) fun emitEvent() { emit FooEvent(x: 2, y: 1) } }`), "Foo") - emitTx1 := flow.TransactionBody{ - Script: []byte(fmt.Sprintf(` + emitTx1Builder := flow.NewTransactionBodyBuilder(). + SetScript([]byte(fmt.Sprintf(` import Foo from 0x%s transaction { prepare() {} execute { - Foo.event() + Foo.emitEvent() } - }`, chain.ServiceAddress())), - } + }`, chain.ServiceAddress()))) // copy txs - emitTx2 := emitTx1 - emitTx3 := emitTx1 + emitTx2Builder := *emitTx1Builder + emitTx3Builder := *emitTx1Builder - err := testutil.SignTransactionAsServiceAccount(deployTx, 0, chain) + err := testutil.SignTransactionAsServiceAccount(deployTxBuilder, 0, chain) require.NoError(t, err) - err = testutil.SignTransactionAsServiceAccount(&emitTx1, 1, chain) + err = testutil.SignTransactionAsServiceAccount(emitTx1Builder, 1, chain) require.NoError(t, err) - err = testutil.SignTransactionAsServiceAccount(&emitTx2, 2, chain) + + err = testutil.SignTransactionAsServiceAccount(&emitTx2Builder, 2, chain) require.NoError(t, err) - err = testutil.SignTransactionAsServiceAccount(&emitTx3, 3, chain) + err = testutil.SignTransactionAsServiceAccount(&emitTx3Builder, 3, chain) + require.NoError(t, err) + + deployTx, err := deployTxBuilder.Build() + require.NoError(t, err) + emitTx1, err := emitTx1Builder.Build() + require.NoError(t, err) + emitTx2, err := emitTx2Builder.Build() + require.NoError(t, err) + emitTx3, err := emitTx3Builder.Build() require.NoError(t, err) cr := executeBlockAndVerify(t, [][]*flow.TransactionBody{ { - deployTx, &emitTx1, + deployTx, emitTx1, }, { - &emitTx2, + emitTx2, }, { - &emitTx3, + emitTx3, }, }, fvm.BootstrapProcedureFeeParameters{}, fvm.DefaultMinimumStorageReservation) @@ -186,20 +203,26 @@ func Test_ExecutionMatchesVerification(t *testing.T) { t.Run("with failed storage limit", func(t *testing.T) { - accountPrivKey, createAccountTx := testutil.CreateAccountCreationTransaction(t, chain) + accountPrivKey, createAccountTxBuilder := testutil.CreateAccountCreationTransaction(t, chain) // this should return the address of newly created account - accountAddress, err := chain.AddressAtIndex(5) + accountAddress, err := chain.AddressAtIndex(systemcontracts.LastSystemAccountIndex + 1) require.NoError(t, err) - err = testutil.SignTransactionAsServiceAccount(createAccountTx, 0, chain) + err = testutil.SignTransactionAsServiceAccount(createAccountTxBuilder, 0, chain) require.NoError(t, err) - addKeyTx := testutil.CreateAddAnAccountKeyMultipleTimesTransaction(t, &accountPrivKey, 100).AddAuthorizer(accountAddress) - err = testutil.SignTransaction(addKeyTx, accountAddress, accountPrivKey, 0) + addKeyTxBuilder := testutil.CreateAddAnAccountKeyMultipleTimesTransaction(t, &accountPrivKey, 100). + AddAuthorizer(accountAddress) + err = testutil.SignTransaction(addKeyTxBuilder, accountAddress, accountPrivKey, 0) require.NoError(t, err) - minimumStorage, err := cadence.NewUFix64("0.00010807") + minimumStorage, err := cadence.NewUFix64("0.00011661") + require.NoError(t, err) + + createAccountTx, err := createAccountTxBuilder.Build() + require.NoError(t, err) + addKeyTx, err := addKeyTxBuilder.Build() require.NoError(t, err) cr := executeBlockAndVerify(t, [][]*flow.TransactionBody{ @@ -215,9 +238,9 @@ func Test_ExecutionMatchesVerification(t *testing.T) { txResults := colResult.TransactionResults() // storage limit error assert.Len(t, txResults, 1) - assert.Equal(t, txResults[0].ErrorMessage, "") + assert.Equal(t, "", txResults[0].ErrorMessage) // ensure events from the first transaction is emitted - require.Len(t, colResult.Events(), 10) + require.Len(t, colResult.Events(), 18) colResult = cr.CollectionExecutionResultAt(1) txResults = colResult.TransactionResults() @@ -225,20 +248,21 @@ func Test_ExecutionMatchesVerification(t *testing.T) { // storage limit error assert.Contains(t, txResults[0].ErrorMessage, errors.ErrCodeStorageCapacityExceeded.String()) // ensure fee deduction events are emitted even though tx fails - require.Len(t, colResult.Events(), 3) + require.Len(t, colResult.Events(), 5) }) t.Run("with failed transaction fee deduction", func(t *testing.T) { - accountPrivKey, createAccountTx := testutil.CreateAccountCreationTransaction(t, chain) + accountPrivKey, createAccountTxBuilder := testutil.CreateAccountCreationTransaction(t, chain) + // this should return the address of newly created account - accountAddress, err := chain.AddressAtIndex(5) + accountAddress, err := chain.AddressAtIndex(systemcontracts.LastSystemAccountIndex + 1) require.NoError(t, err) - err = testutil.SignTransactionAsServiceAccount(createAccountTx, 0, chain) + err = testutil.SignTransactionAsServiceAccount(createAccountTxBuilder, 0, chain) require.NoError(t, err) - spamTx := &flow.TransactionBody{ - Script: []byte(` + spamTxBuilder := flow.NewTransactionBodyBuilder(). + SetScript([]byte(` transaction { prepare() {} execute { @@ -255,13 +279,14 @@ func Test_ExecutionMatchesVerification(t *testing.T) { } log(i) } - }`), - } + }`)).SetComputeLimit(800000) - spamTx.SetGasLimit(800000) - err = testutil.SignTransaction(spamTx, accountAddress, accountPrivKey, 0) + err = testutil.SignTransaction(spamTxBuilder, accountAddress, accountPrivKey, 0) require.NoError(t, err) + createAccountTx, err := createAccountTxBuilder.Build() + require.NoError(t, err) + spamTx, err := spamTxBuilder.Build() require.NoError(t, err) cr := executeBlockAndVerifyWithParameters(t, [][]*flow.TransactionBody{ @@ -297,7 +322,7 @@ func Test_ExecutionMatchesVerification(t *testing.T) { transactionEvents += 1 } } - require.Equal(t, 10, transactionEvents) + require.Equal(t, 18, transactionEvents) assert.Contains(t, txResults[1].ErrorMessage, errors.ErrCodeStorageCapacityExceeded.String()) @@ -308,7 +333,7 @@ func Test_ExecutionMatchesVerification(t *testing.T) { transactionEvents += 1 } } - require.Equal(t, 3, transactionEvents) + require.Equal(t, 5, transactionEvents) }) } @@ -326,6 +351,11 @@ func TestTransactionFeeDeduction(t *testing.T) { fundingAmount := uint64(100_000_000) transferAmount := uint64(123_456) + sc := systemcontracts.SystemContractsForChain(chain.ChainID()) + + depositedEvent := fmt.Sprintf("A.%s.FlowToken.TokensDeposited", sc.FlowToken.Address) + withdrawnEvent := fmt.Sprintf("A.%s.FlowToken.TokensWithdrawn", sc.FlowToken.Address) + testCases := []testCase{ { name: "Transaction fee deduction emits events", @@ -344,15 +374,15 @@ func TestTransactionFeeDeduction(t *testing.T) { // events of the first collection events := cr.CollectionExecutionResultAt(2).Events() for _, e := range events { - if string(e.Type) == fmt.Sprintf("A.%s.FlowToken.TokensDeposited", fvm.FlowTokenAddress(chain)) { + if string(e.Type) == depositedEvent { deposits = append(deposits, e) } - if string(e.Type) == fmt.Sprintf("A.%s.FlowToken.TokensWithdrawn", fvm.FlowTokenAddress(chain)) { + if string(e.Type) == withdrawnEvent { withdraws = append(withdraws, e) } } - require.Len(t, deposits, 2) + require.Len(t, deposits, 1) require.Len(t, withdraws, 2) }, }, @@ -373,15 +403,15 @@ func TestTransactionFeeDeduction(t *testing.T) { // events of the last collection events := cr.CollectionExecutionResultAt(2).Events() for _, e := range events { - if string(e.Type) == fmt.Sprintf("A.%s.FlowToken.TokensDeposited", fvm.FlowTokenAddress(chain)) { + if string(e.Type) == depositedEvent { deposits = append(deposits, e) } - if string(e.Type) == fmt.Sprintf("A.%s.FlowToken.TokensWithdrawn", fvm.FlowTokenAddress(chain)) { + if string(e.Type) == withdrawnEvent { withdraws = append(withdraws, e) } } - require.Len(t, deposits, 2) + require.Len(t, deposits, 1) require.Len(t, withdraws, 2) }, }, @@ -404,15 +434,15 @@ func TestTransactionFeeDeduction(t *testing.T) { // events of the last collection events := cr.CollectionExecutionResultAt(2).Events() for _, e := range events { - if string(e.Type) == fmt.Sprintf("A.%s.FlowToken.TokensDeposited", fvm.FlowTokenAddress(chain)) { + if string(e.Type) == depositedEvent { deposits = append(deposits, e) } - if string(e.Type) == fmt.Sprintf("A.%s.FlowToken.TokensWithdrawn", fvm.FlowTokenAddress(chain)) { + if string(e.Type) == withdrawnEvent { withdraws = append(withdraws, e) } } - require.Len(t, deposits, 2) + require.Len(t, deposits, 1) require.Len(t, withdraws, 2) }, }, @@ -433,10 +463,10 @@ func TestTransactionFeeDeduction(t *testing.T) { // events of the last collection events := cr.CollectionExecutionResultAt(2).Events() for _, e := range events { - if string(e.Type) == fmt.Sprintf("A.%s.FlowToken.TokensDeposited", fvm.FlowTokenAddress(chain)) { + if string(e.Type) == depositedEvent { deposits = append(deposits, e) } - if string(e.Type) == fmt.Sprintf("A.%s.FlowToken.TokensWithdrawn", fvm.FlowTokenAddress(chain)) { + if string(e.Type) == withdrawnEvent { withdraws = append(withdraws, e) } } @@ -465,15 +495,15 @@ func TestTransactionFeeDeduction(t *testing.T) { // events of the last collection events := cr.CollectionExecutionResultAt(2).Events() for _, e := range events { - if string(e.Type) == fmt.Sprintf("A.%s.FlowToken.TokensDeposited", fvm.FlowTokenAddress(chain)) { + if string(e.Type) == depositedEvent { deposits = append(deposits, e) } - if string(e.Type) == fmt.Sprintf("A.%s.FlowToken.TokensWithdrawn", fvm.FlowTokenAddress(chain)) { + if string(e.Type) == withdrawnEvent { withdraws = append(withdraws, e) } } - require.Len(t, deposits, 2) + require.Len(t, deposits, 1) require.Len(t, withdraws, 2) }, }, @@ -494,15 +524,15 @@ func TestTransactionFeeDeduction(t *testing.T) { // events of the last collection events := cr.CollectionExecutionResultAt(2).Events() for _, e := range events { - if string(e.Type) == fmt.Sprintf("A.%s.FlowToken.TokensDeposited", fvm.FlowTokenAddress(chain)) { + if string(e.Type) == depositedEvent { deposits = append(deposits, e) } - if string(e.Type) == fmt.Sprintf("A.%s.FlowToken.TokensWithdrawn", fvm.FlowTokenAddress(chain)) { + if string(e.Type) == withdrawnEvent { withdraws = append(withdraws, e) } } - require.Len(t, deposits, 2) + require.Len(t, deposits, 1) require.Len(t, withdraws, 2) }, }, @@ -523,10 +553,10 @@ func TestTransactionFeeDeduction(t *testing.T) { // events of the last collection events := cr.CollectionExecutionResultAt(2).Events() for _, e := range events { - if string(e.Type) == fmt.Sprintf("A.%s.FlowToken.TokensDeposited", fvm.FlowTokenAddress(chain)) { + if string(e.Type) == depositedEvent { deposits = append(deposits, e) } - if string(e.Type) == fmt.Sprintf("A.%s.FlowToken.TokensWithdrawn", fvm.FlowTokenAddress(chain)) { + if string(e.Type) == withdrawnEvent { withdraws = append(withdraws, e) } } @@ -552,10 +582,10 @@ func TestTransactionFeeDeduction(t *testing.T) { // events of the last collection events := cr.CollectionExecutionResultAt(2).Events() for _, e := range events { - if string(e.Type) == fmt.Sprintf("A.%s.FlowToken.TokensDeposited", fvm.FlowTokenAddress(chain)) { + if string(e.Type) == depositedEvent { deposits = append(deposits, e) } - if string(e.Type) == fmt.Sprintf("A.%s.FlowToken.TokensWithdrawn", fvm.FlowTokenAddress(chain)) { + if string(e.Type) == withdrawnEvent { withdraws = append(withdraws, e) } } @@ -566,8 +596,8 @@ func TestTransactionFeeDeduction(t *testing.T) { }, } - transferTokensTx := func(chain flow.Chain) *flow.TransactionBody { - return flow.NewTransactionBody(). + transferTokensTx := func(chain flow.Chain) *flow.TransactionBodyBuilder { + return flow.NewTransactionBodyBuilder(). SetScript([]byte(fmt.Sprintf(` // This transaction is a template for a transaction that // could be used by anyone to send tokens to another account @@ -575,39 +605,38 @@ func TestTransactionFeeDeduction(t *testing.T) { // // The withdraw amount and the account from getAccount // would be the parameters to the transaction - + import FungibleToken from 0x%s import FlowToken from 0x%s - + transaction(amount: UFix64, to: Address) { - + // The Vault resource that holds the tokens that are being transferred - let sentVault: @FungibleToken.Vault - - prepare(signer: AuthAccount) { - + let sentVault: @{FungibleToken.Vault} + + prepare(signer: auth(BorrowValue) &Account) { + // Get a reference to the signer's stored vault - let vaultRef = signer.borrow<&FlowToken.Vault>(from: /storage/flowTokenVault) + let vaultRef = signer.storage.borrow<auth(FungibleToken.Withdraw) &FlowToken.Vault>(from: /storage/flowTokenVault) ?? panic("Could not borrow reference to the owner's Vault!") - + // Withdraw tokens from the signer's stored vault self.sentVault <- vaultRef.withdraw(amount: amount) } - + execute { - + // Get the recipient's public account object let recipient = getAccount(to) - + // Get a reference to the recipient's Receiver - let receiverRef = recipient.getCapability(/public/flowTokenReceiver) - .borrow<&{FungibleToken.Receiver}>() + let receiverRef = recipient.capabilities.borrow<&{FungibleToken.Receiver}>(/public/flowTokenReceiver) ?? panic("Could not borrow receiver reference to the recipient's Vault") - + // Deposit the withdrawn tokens in the recipient's receiver receiverRef.deposit(from: <-self.sentVault) } - }`, fvm.FungibleTokenAddress(chain), fvm.FlowTokenAddress(chain))), + }`, sc.FungibleToken.Address, sc.FlowToken.Address)), ) } @@ -616,26 +645,25 @@ func TestTransactionFeeDeduction(t *testing.T) { bootstrapOpts []fvm.BootstrapProcedureOption) func(t *testing.T) { return func(t *testing.T) { // ==== Create an account ==== - privateKey, createAccountTx := testutil.CreateAccountCreationTransaction(t, chain) + privateKey, createAccountTxBuilder := testutil.CreateAccountCreationTransaction(t, chain) // this should return the address of newly created account - address, err := chain.AddressAtIndex(5) + address, err := chain.AddressAtIndex(systemcontracts.LastSystemAccountIndex + 1) require.NoError(t, err) - err = testutil.SignTransactionAsServiceAccount(createAccountTx, 0, chain) + err = testutil.SignTransactionAsServiceAccount(createAccountTxBuilder, 0, chain) require.NoError(t, err) // ==== Transfer tokens to new account ==== - transferTx := transferTokensTx(chain). + transferTxBuilder := transferTokensTx(chain). AddAuthorizer(chain.ServiceAddress()). AddArgument(jsoncdc.MustEncode(cadence.UFix64(tc.fundWith))). - AddArgument(jsoncdc.MustEncode(cadence.NewAddress(address))) - - transferTx.SetProposalKey(chain.ServiceAddress(), 0, 1) - transferTx.SetPayer(chain.ServiceAddress()) + AddArgument(jsoncdc.MustEncode(cadence.NewAddress(address))). + SetProposalKey(chain.ServiceAddress(), 0, 1). + SetPayer(chain.ServiceAddress()) err = testutil.SignEnvelope( - transferTx, + transferTxBuilder, chain.ServiceAddress(), unittest.ServiceAccountPrivateKey, ) @@ -643,21 +671,29 @@ func TestTransactionFeeDeduction(t *testing.T) { // ==== Transfer tokens from new account ==== - transferTx2 := transferTokensTx(chain). + transferTx2Builder := transferTokensTx(chain). AddAuthorizer(address). AddArgument(jsoncdc.MustEncode(cadence.UFix64(tc.tryToTransfer))). - AddArgument(jsoncdc.MustEncode(cadence.NewAddress(chain.ServiceAddress()))) - - transferTx2.SetProposalKey(address, 0, 0) - transferTx2.SetPayer(address) + AddArgument(jsoncdc.MustEncode(cadence.NewAddress(chain.ServiceAddress()))). + SetProposalKey(address, 0, 0). + SetPayer(address) err = testutil.SignEnvelope( - transferTx2, + transferTx2Builder, address, privateKey, ) require.NoError(t, err) + createAccountTx, err := createAccountTxBuilder.Build() + require.NoError(t, err) + + transferTx, err := transferTxBuilder.Build() + require.NoError(t, err) + + transferTx2, err := transferTx2Builder.Build() + require.NoError(t, err) + cr := executeBlockAndVerifyWithParameters(t, [][]*flow.TransactionBody{ { createAccountTx, @@ -763,7 +799,10 @@ func executeBlockAndVerifyWithParameters(t *testing.T, sk, err := crypto.GeneratePrivateKey(crypto.BLSBLS12381, seed) require.NoError(t, err) myIdentity.StakingPubKey = sk.PublicKey() - me := mocklocal.NewMockLocal(sk, myIdentity.ID(), t) + me := mocklocal.NewMockLocal(sk, myIdentity.NodeID, t) + + // used by computer to generate the prng used in the service tx + stateForRandomSource := testutil.ProtocolStateWithSourceFixture(nil) blockComputer, err := computer.NewBlockComputer( vm, @@ -774,7 +813,9 @@ func executeBlockAndVerifyWithParameters(t *testing.T, ledgerCommiter, me, prov, - nil) + nil, + stateForRandomSource, + testVerifyMaxConcurrency) require.NoError(t, err) executableBlock := unittest.ExecutableBlockFromTransactions(chain.ChainID(), txs) @@ -798,7 +839,7 @@ func executeBlockAndVerifyWithParameters(t *testing.T, snapshot := res.ExecutionSnapshot() valid, err := crypto.SPOCKVerifyAgainstData( myIdentity.StakingPubKey, - computationResult.Spocks[i], + computationResult.ExecutionReceipt.Spocks[i], snapshot.SpockSecret, spockHasher) require.NoError(t, err) @@ -806,19 +847,20 @@ func executeBlockAndVerifyWithParameters(t *testing.T, } receipt := computationResult.ExecutionReceipt - receiptID := receipt.ID() + unsignedReceiptID := receipt.UnsignedExecutionReceipt.ID() valid, err := myIdentity.StakingPubKey.Verify( receipt.ExecutorSignature, - receiptID[:], + unsignedReceiptID[:], utils.NewExecutionReceiptHasher()) require.NoError(t, err) require.True(t, valid) - chdps := computationResult.AllChunkDataPacks() + chdps, err := computationResult.AllChunkDataPacks() + require.NoError(t, err) require.Equal(t, len(chdps), len(receipt.Spocks)) - er := &computationResult.ExecutionResult + er := &computationResult.ExecutionReceipt.ExecutionResult verifier := chunks.NewChunkVerifier(vm, fvmContext, logger) @@ -826,26 +868,28 @@ func executeBlockAndVerifyWithParameters(t *testing.T, for i, chunk := range er.Chunks { isSystemChunk := i == er.Chunks.Len()-1 - offsetForChunk, err := fetcher.TransactionOffsetForChunk(er.Chunks, chunk.Index) + offsetForChunk, err := convert.TransactionOffsetForChunk(er.Chunks, chunk.Index) require.NoError(t, err) vcds[i] = &verification.VerifiableChunkData{ IsSystemChunk: isSystemChunk, Chunk: chunk, - Header: executableBlock.Block.Header, + Header: executableBlock.Block.ToHeader(), Result: er, ChunkDataPack: chdps[i], EndState: chunk.EndState, TransactionOffset: offsetForChunk, + // returns the same RandomSource used by the computer + Snapshot: stateForRandomSource.AtBlockID(chunk.BlockID), } } require.Len(t, vcds, len(txs)+1) // +1 for system chunk for _, vcd := range vcds { - _, fault, err := verifier.Verify(vcd) + spockSecret, err := verifier.Verify(vcd) assert.NoError(t, err) - assert.Nil(t, fault) + assert.NotNil(t, spockSecret) } return computationResult diff --git a/engine/execution/computation/manager.go b/engine/execution/computation/manager.go index ae45c80fd89..9ff4be34890 100644 --- a/engine/execution/computation/manager.go +++ b/engine/execution/computation/manager.go @@ -35,6 +35,7 @@ type ComputationManager interface { snapshot snapshot.StorageSnapshot, ) ( []byte, + uint64, error, ) @@ -61,9 +62,9 @@ type ComputationManager interface { type ComputationConfig struct { query.QueryConfig - CadenceTracing bool ExtensiveTracing bool DerivedDataCacheSize uint + MaxConcurrency int // When NewCustomVirtualMachine is nil, the manager will create a standard // fvm virtual machine via fvm.NewVirtualMachine. Otherwise, the manager @@ -89,10 +90,10 @@ func New( metrics module.ExecutionMetrics, tracer module.Tracer, me module.Local, - protoState protocol.State, + protoState protocol.SnapshotExecutionSubsetProvider, vmCtx fvm.Context, committer computer.ViewCommitter, - executionDataProvider *provider.Provider, + executionDataProvider provider.Provider, params ComputationConfig, ) (*Manager, error) { log := logger.With().Str("engine", "computation").Logger() @@ -105,24 +106,7 @@ func New( } chainID := vmCtx.Chain.ChainID() - - options := []fvm.Option{ - fvm.WithReusableCadenceRuntimePool( - reusableRuntime.NewReusableCadenceRuntimePool( - ReusableCadenceRuntimePoolSize, - runtime.Config{ - TracingEnabled: params.CadenceTracing, - AccountLinkingEnabled: true, - // Attachments are enabled everywhere except for Mainnet - AttachmentsEnabled: chainID != flow.Mainnet, - }, - ), - ), - } - if params.ExtensiveTracing { - options = append(options, fvm.WithExtensiveTracing()) - } - + options := DefaultFVMOptions(chainID, params.ExtensiveTracing, vmCtx.ScheduleCallbacksEnabled) vmCtx = fvm.NewContextFromParent(vmCtx, options...) blockComputer, err := computer.NewBlockComputer( @@ -135,6 +119,8 @@ func New( me, executionDataProvider, nil, // TODO(ramtin): update me with proper consumers + protoState, + params.MaxConcurrency, ) if err != nil { @@ -153,6 +139,7 @@ func New( vm, vmCtx, derivedChainData, + protoState, ) e := Manager{ @@ -182,7 +169,7 @@ func (e *Manager) ComputeBlock( Msg("received complete block") derivedBlockData := e.derivedChainData.GetOrCreateDerivedBlockData( - block.ID(), + block.BlockID(), block.ParentID()) result, err := e.blockComputer.ExecuteBlock( @@ -192,10 +179,6 @@ func (e *Manager) ComputeBlock( snapshot, derivedBlockData) if err != nil { - e.log.Error(). - Hex("block_id", logging.Entity(block.Block)). - Msg("failed to compute block result") - return nil, fmt.Errorf("failed to execute block: %w", err) } @@ -212,7 +195,7 @@ func (e *Manager) ExecuteScript( arguments [][]byte, blockHeader *flow.Header, snapshot snapshot.StorageSnapshot, -) ([]byte, error) { +) ([]byte, uint64, error) { return e.queryExecutor.ExecuteScript(ctx, code, arguments, @@ -235,3 +218,27 @@ func (e *Manager) GetAccount( blockHeader, snapshot) } + +func (e *Manager) QueryExecutor() query.Executor { + return e.queryExecutor +} + +func DefaultFVMOptions(chainID flow.ChainID, extensiveTracing bool, scheduleCallbacksEnabled bool) []fvm.Option { + options := []fvm.Option{ + fvm.WithChain(chainID.Chain()), + fvm.WithReusableCadenceRuntimePool( + reusableRuntime.NewReusableCadenceRuntimePool( + ReusableCadenceRuntimePoolSize, + runtime.Config{}, + ), + ), + fvm.WithEVMEnabled(true), + fvm.WithScheduleCallbacksEnabled(scheduleCallbacksEnabled), + } + + if extensiveTracing { + options = append(options, fvm.WithExtensiveTracing()) + } + + return options +} diff --git a/engine/execution/computation/manager_benchmark_test.go b/engine/execution/computation/manager_benchmark_test.go index 1b553ec80ee..fa3d91e0fed 100644 --- a/engine/execution/computation/manager_benchmark_test.go +++ b/engine/execution/computation/manager_benchmark_test.go @@ -7,9 +7,9 @@ import ( "testing" "time" + "github.com/ipfs/boxo/blockstore" "github.com/ipfs/go-datastore" dssync "github.com/ipfs/go-datastore/sync" - blockstore "github.com/ipfs/go-ipfs-blockstore" "github.com/onflow/cadence/runtime" "github.com/rs/zerolog" "github.com/stretchr/testify/mock" @@ -84,11 +84,14 @@ func mustFundAccounts( ) snapshot.SnapshotTree { var err error for _, acc := range accs.accounts { - transferTx := testutil.CreateTokenTransferTransaction(chain, 1_000_000, acc.address, chain.ServiceAddress()) - err = testutil.SignTransactionAsServiceAccount(transferTx, accs.seq, chain) + transferTxBuilder := testutil.CreateTokenTransferTransaction(chain, 1_000_000, acc.address, chain.ServiceAddress()) + err = testutil.SignTransactionAsServiceAccount(transferTxBuilder, accs.seq, chain) require.NoError(b, err) accs.seq++ + transferTx, err := transferTxBuilder.Build() + require.NoError(b, err) + executionSnapshot, output, err := vm.Run( execCtx, fvm.Transaction(transferTx, 0), @@ -103,7 +106,48 @@ func mustFundAccounts( func BenchmarkComputeBlock(b *testing.B) { b.StopTimer() + b.SetParallelism(1) + + type benchmarkCase struct { + numCollections int + numTransactionsPerCollection int + maxConcurrency int + } + for _, benchCase := range []benchmarkCase{ + { + numCollections: 16, + numTransactionsPerCollection: 128, + maxConcurrency: 1, + }, + { + numCollections: 16, + numTransactionsPerCollection: 128, + maxConcurrency: 2, + }, + } { + b.Run( + fmt.Sprintf( + "%d/cols/%d/txes/%d/max-concurrency", + benchCase.numCollections, + benchCase.numTransactionsPerCollection, + benchCase.maxConcurrency), + func(b *testing.B) { + benchmarkComputeBlock( + b, + benchCase.numCollections, + benchCase.numTransactionsPerCollection, + benchCase.maxConcurrency) + }) + } +} + +func benchmarkComputeBlock( + b *testing.B, + numCollections int, + numTransactionsPerCollection int, + maxConcurrency int, +) { tracer, err := trace.NewTracer(zerolog.Nop(), "", "", 4) require.NoError(b, err) @@ -133,8 +177,8 @@ func BenchmarkComputeBlock(b *testing.B) { snapshotTree = mustFundAccounts(b, vm, snapshotTree, execCtx, accs) me := new(module.Local) - me.On("NodeID").Return(flow.ZeroID) - me.On("Sign", mock.Anything, mock.Anything).Return(nil, nil) + me.On("NodeID").Return(unittest.IdentifierFixture()) + me.On("Sign", mock.Anything, mock.Anything).Return(unittest.SignatureFixture(), nil) me.On("SignFunc", mock.Anything, mock.Anything, mock.Anything). Return(nil, nil) @@ -159,7 +203,9 @@ func BenchmarkComputeBlock(b *testing.B) { committer.NewNoopViewCommitter(), me, prov, - nil) + nil, + testutil.ProtocolStateWithSourceFixture(nil), + maxConcurrency) require.NoError(b, err) derivedChainData, err := derived.NewDerivedChainData( @@ -171,53 +217,46 @@ func BenchmarkComputeBlock(b *testing.B) { derivedChainData: derivedChainData, } - b.SetParallelism(1) - - parentBlock := &flow.Block{ - Header: &flow.Header{}, - Payload: &flow.Payload{}, - } - - const ( - cols = 16 - txes = 128 - ) + parentBlock := unittest.BlockFixture() - b.Run(fmt.Sprintf("%d/cols/%d/txes", cols, txes), func(b *testing.B) { + b.StopTimer() + b.ResetTimer() + + var elapsed time.Duration + for i := 0; i < b.N; i++ { + executableBlock := createBlock( + b, + parentBlock, + accs, + numCollections, + numTransactionsPerCollection) + parentBlock = executableBlock.Block + + b.StartTimer() + start := time.Now() + res, err := engine.ComputeBlock( + context.Background(), + unittest.IdentifierFixture(), + executableBlock, + snapshotTree) + elapsed += time.Since(start) b.StopTimer() - b.ResetTimer() - - var elapsed time.Duration - for i := 0; i < b.N; i++ { - executableBlock := createBlock(b, parentBlock, accs, cols, txes) - parentBlock = executableBlock.Block - - b.StartTimer() - start := time.Now() - res, err := engine.ComputeBlock( - context.Background(), - unittest.IdentifierFixture(), - executableBlock, - snapshotTree) - elapsed += time.Since(start) - b.StopTimer() - - for _, snapshot := range res.AllExecutionSnapshots() { - snapshotTree = snapshotTree.Append(snapshot) - } - require.NoError(b, err) - for j, r := range res.AllTransactionResults() { - // skip system transactions - if j >= cols*txes { - break - } - require.Emptyf(b, r.ErrorMessage, "Transaction %d failed", j) + require.NoError(b, err) + for _, snapshot := range res.AllExecutionSnapshots() { + snapshotTree = snapshotTree.Append(snapshot) + } + + for j, r := range res.AllTransactionResults() { + // skip system transactions + if j >= numCollections*numTransactionsPerCollection { + break } + require.Emptyf(b, r.ErrorMessage, "Transaction %d failed", j) } - totalTxes := int64(cols) * int64(txes) * int64(b.N) - b.ReportMetric(float64(elapsed.Nanoseconds()/totalTxes/int64(time.Microsecond)), "us/tx") - }) + } + totalTxes := int64(numCollections) * int64(numTransactionsPerCollection) * int64(b.N) + b.ReportMetric(float64(elapsed.Nanoseconds()/totalTxes/int64(time.Microsecond)), "us/tx") } func createBlock(b *testing.B, parentBlock *flow.Block, accs *testAccounts, colNum int, txNum int) *entity.ExecutableBlock { @@ -236,24 +275,21 @@ func createBlock(b *testing.B, parentBlock *flow.Block, accs *testAccounts, colN collections[c] = collection guarantees[c] = guarantee - completeCollections[guarantee.ID()] = &entity.CompleteCollection{ - Guarantee: guarantee, - Transactions: transactions, + completeCollections[guarantee.CollectionID] = &entity.CompleteCollection{ + Guarantee: guarantee, + Collection: collection, } } - block := flow.Block{ - Header: &flow.Header{ - ParentID: parentBlock.ID(), - View: parentBlock.Header.Height + 1, - }, - Payload: &flow.Payload{ - Guarantees: guarantees, - }, - } + block := unittest.BlockFixture( + unittest.Block.WithParent(parentBlock.ID(), parentBlock.View, parentBlock.Height), + unittest.Block.WithPayload( + unittest.PayloadFixture(unittest.WithGuarantees(guarantees...)), + ), + ) return &entity.ExecutableBlock{ - Block: &block, + Block: block, CompleteCollections: completeCollections, StartState: unittest.StateCommitmentPointerFixture(), } @@ -266,17 +302,20 @@ func createTokenTransferTransaction(b *testing.B, accs *testAccounts) *flow.Tran src := accs.accounts[rnd] dst := accs.accounts[(rnd+1)%len(accs.accounts)] - tx := testutil.CreateTokenTransferTransaction(chain, 1, dst.address, src.address) - tx.SetProposalKey(chain.ServiceAddress(), 0, accs.seq). - SetGasLimit(1000). + txBuilder := testutil.CreateTokenTransferTransaction(chain, 1, dst.address, src.address). + SetProposalKey(chain.ServiceAddress(), 0, accs.seq). + SetComputeLimit(1000). SetPayer(chain.ServiceAddress()) accs.seq++ - err = testutil.SignPayload(tx, src.address, src.privateKey) + err = testutil.SignPayload(txBuilder, src.address, src.privateKey) + require.NoError(b, err) + + err = testutil.SignEnvelope(txBuilder, chain.ServiceAddress(), unittest.ServiceAccountPrivateKey) require.NoError(b, err) - err = testutil.SignEnvelope(tx, chain.ServiceAddress(), unittest.ServiceAccountPrivateKey) + txBody, err := txBuilder.Build() require.NoError(b, err) - return tx + return txBody } diff --git a/engine/execution/computation/manager_test.go b/engine/execution/computation/manager_test.go index 574a8cc3df7..b265f94268d 100644 --- a/engine/execution/computation/manager_test.go +++ b/engine/execution/computation/manager_test.go @@ -9,12 +9,12 @@ import ( "testing" "time" + "github.com/ipfs/boxo/blockstore" "github.com/ipfs/go-datastore" dssync "github.com/ipfs/go-datastore/sync" - blockstore "github.com/ipfs/go-ipfs-blockstore" "github.com/onflow/cadence" + "github.com/onflow/cadence/common" jsoncdc "github.com/onflow/cadence/encoding/json" - "github.com/onflow/cadence/runtime/common" "github.com/rs/zerolog" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" @@ -30,8 +30,10 @@ import ( "github.com/onflow/flow-go/fvm" "github.com/onflow/flow-go/fvm/environment" fvmErrors "github.com/onflow/flow-go/fvm/errors" + "github.com/onflow/flow-go/fvm/storage" "github.com/onflow/flow-go/fvm/storage/derived" "github.com/onflow/flow-go/fvm/storage/snapshot" + "github.com/onflow/flow-go/fvm/systemcontracts" "github.com/onflow/flow-go/ledger/complete" "github.com/onflow/flow-go/ledger/complete/wal/fixtures" "github.com/onflow/flow-go/model/flow" @@ -64,52 +66,56 @@ func TestComputeBlockWithStorage(t *testing.T) { chain) require.NoError(t, err) - tx1 := testutil.DeployCounterContractTransaction(accounts[0], chain) - tx1.SetProposalKey(chain.ServiceAddress(), 0, 0). - SetGasLimit(1000). + tx1Builder := testutil.DeployCounterContractTransaction(accounts[0], chain). + SetProposalKey(chain.ServiceAddress(), 0, 0). + SetComputeLimit(1000). SetPayer(chain.ServiceAddress()) - err = testutil.SignPayload(tx1, accounts[0], privateKeys[0]) + err = testutil.SignPayload(tx1Builder, accounts[0], privateKeys[0]) require.NoError(t, err) - err = testutil.SignEnvelope(tx1, chain.ServiceAddress(), unittest.ServiceAccountPrivateKey) + err = testutil.SignEnvelope(tx1Builder, chain.ServiceAddress(), unittest.ServiceAccountPrivateKey) require.NoError(t, err) - tx2 := testutil.CreateCounterTransaction(accounts[0], accounts[1]) - tx2.SetProposalKey(chain.ServiceAddress(), 0, 0). - SetGasLimit(1000). + tx2Builder := testutil.CreateCounterTransaction(accounts[0], accounts[1]). + SetProposalKey(chain.ServiceAddress(), 0, 0). + SetComputeLimit(1000). SetPayer(chain.ServiceAddress()) - err = testutil.SignPayload(tx2, accounts[1], privateKeys[1]) + err = testutil.SignPayload(tx2Builder, accounts[1], privateKeys[1]) require.NoError(t, err) - err = testutil.SignEnvelope(tx2, chain.ServiceAddress(), unittest.ServiceAccountPrivateKey) + err = testutil.SignEnvelope(tx2Builder, chain.ServiceAddress(), unittest.ServiceAccountPrivateKey) require.NoError(t, err) - transactions := []*flow.TransactionBody{tx1, tx2} + tx1Body, err := tx1Builder.Build() + require.NoError(t, err) + tx2Body, err := tx2Builder.Build() + require.NoError(t, err) + + transactions := []*flow.TransactionBody{tx1Body, tx2Body} col := flow.Collection{Transactions: transactions} - guarantee := flow.CollectionGuarantee{ + guarantee := &flow.CollectionGuarantee{ CollectionID: col.ID(), Signature: nil, } - block := flow.Block{ - Header: &flow.Header{ - View: 42, - }, - Payload: &flow.Payload{ - Guarantees: []*flow.CollectionGuarantee{&guarantee}, - }, - } + block := unittest.BlockFixture( + unittest.Block.WithView(42), + unittest.Block.WithParentView(41), + unittest.Block.WithPayload( + unittest.PayloadFixture(unittest.WithGuarantees(guarantee)), + ), + ) executableBlock := &entity.ExecutableBlock{ - Block: &block, + Block: block, CompleteCollections: map[flow.Identifier]*entity.CompleteCollection{ - guarantee.ID(): { - Guarantee: &guarantee, - Transactions: transactions, + guarantee.CollectionID: { + Guarantee: guarantee, + Collection: &col, }, }, StartState: unittest.StateCommitmentPointerFixture(), @@ -117,7 +123,7 @@ func TestComputeBlockWithStorage(t *testing.T) { me := new(module.Local) me.On("NodeID").Return(unittest.IdentifierFixture()) - me.On("Sign", mock.Anything, mock.Anything).Return(nil, nil) + me.On("Sign", mock.Anything, mock.Anything).Return(unittest.SignatureFixture(), nil) me.On("SignFunc", mock.Anything, mock.Anything, mock.Anything). Return(nil, nil) @@ -141,7 +147,9 @@ func TestComputeBlockWithStorage(t *testing.T) { committer.NewNoopViewCommitter(), me, prov, - nil) + nil, + testutil.ProtocolStateWithSourceFixture(nil), + testMaxConcurrency) require.NoError(t, err) derivedChainData, err := derived.NewDerivedChainData(10) @@ -187,11 +195,12 @@ func TestComputeBlock_Uploader(t *testing.T) { me := new(module.Local) me.On("NodeID").Return(unittest.IdentifierFixture()) - me.On("Sign", mock.Anything, mock.Anything).Return(nil, nil) + me.On("Sign", mock.Anything, mock.Anything).Return(unittest.SignatureFixture(), nil) me.On("SignFunc", mock.Anything, mock.Anything, mock.Anything). Return(nil, nil) computationResult := unittest2.ComputationResultFixture( + t, unittest.IdentifierFixture(), [][]flow.Identifier{ {unittest.IdentifierFixture()}, @@ -228,7 +237,7 @@ func TestExecuteScript(t *testing.T) { me := new(module.Local) me.On("NodeID").Return(unittest.IdentifierFixture()) - me.On("Sign", mock.Anything, mock.Anything).Return(nil, nil) + me.On("Sign", mock.Anything, mock.Anything).Return(unittest.SignatureFixture(), nil) me.On("SignFunc", mock.Anything, mock.Anything, mock.Anything). Return(nil, nil) @@ -236,13 +245,15 @@ func TestExecuteScript(t *testing.T) { ledger := testutil.RootBootstrappedLedger(vm, execCtx, fvm.WithExecutionMemoryLimit(math.MaxUint64)) + sc := systemcontracts.SystemContractsForChain(execCtx.Chain.ChainID()) + script := []byte(fmt.Sprintf( ` import FungibleToken from %s - pub fun main() {} + access(all) fun main() {} `, - fvm.FungibleTokenAddress(execCtx.Chain).HexWithPrefix(), + sc.FungibleToken.Address.HexWithPrefix(), )) bservice := requesterunit.MockBlobService(blockstore.NewBlockstore(dssync.MutexWrap(datastore.NewMapDatastore()))) @@ -260,19 +271,20 @@ func TestExecuteScript(t *testing.T) { metrics.NewNoopCollector(), trace.NewNoopTracer(), me, - nil, + testutil.ProtocolStateWithSourceFixture(nil), execCtx, committer.NewNoopViewCommitter(), prov, ComputationConfig{ QueryConfig: query.NewDefaultConfig(), DerivedDataCacheSize: derived.DefaultDerivedDataCacheSize, + MaxConcurrency: 1, }, ) require.NoError(t, err) header := unittest.BlockHeaderFixture() - _, err = engine.ExecuteScript( + _, _, err = engine.ExecuteScript( context.Background(), script, nil, @@ -291,7 +303,7 @@ func TestExecuteScript_BalanceScriptFailsIfViewIsEmpty(t *testing.T) { me := new(module.Local) me.On("NodeID").Return(unittest.IdentifierFixture()) - me.On("Sign", mock.Anything, mock.Anything).Return(nil, nil) + me.On("Sign", mock.Anything, mock.Anything).Return(unittest.SignatureFixture(), nil) me.On("SignFunc", mock.Anything, mock.Anything, mock.Anything). Return(nil, nil) @@ -300,13 +312,15 @@ func TestExecuteScript_BalanceScriptFailsIfViewIsEmpty(t *testing.T) { return nil, fmt.Errorf("error getting register") }) + sc := systemcontracts.SystemContractsForChain(execCtx.Chain.ChainID()) + script := []byte(fmt.Sprintf( ` - pub fun main(): UFix64 { + access(all) fun main(): UFix64 { return getAccount(%s).balance } `, - fvm.FungibleTokenAddress(execCtx.Chain).HexWithPrefix(), + sc.FungibleToken.Address.HexWithPrefix(), )) bservice := requesterunit.MockBlobService(blockstore.NewBlockstore(dssync.MutexWrap(datastore.NewMapDatastore()))) @@ -324,19 +338,20 @@ func TestExecuteScript_BalanceScriptFailsIfViewIsEmpty(t *testing.T) { metrics.NewNoopCollector(), trace.NewNoopTracer(), me, - nil, + testutil.ProtocolStateWithSourceFixture(nil), execCtx, committer.NewNoopViewCommitter(), prov, ComputationConfig{ QueryConfig: query.NewDefaultConfig(), DerivedDataCacheSize: derived.DefaultDerivedDataCacheSize, + MaxConcurrency: 1, }, ) require.NoError(t, err) header := unittest.BlockHeaderFixture() - _, err = engine.ExecuteScript( + _, _, err = engine.ExecuteScript( context.Background(), script, nil, @@ -369,13 +384,14 @@ func TestExecuteScripPanicsAreHandled(t *testing.T) { metrics.NewNoopCollector(), trace.NewNoopTracer(), nil, - nil, + testutil.ProtocolStateWithSourceFixture(nil), ctx, committer.NewNoopViewCommitter(), prov, ComputationConfig{ QueryConfig: query.NewDefaultConfig(), DerivedDataCacheSize: derived.DefaultDerivedDataCacheSize, + MaxConcurrency: 1, NewCustomVirtualMachine: func() fvm.VM { return &PanickingVM{} }, @@ -383,7 +399,7 @@ func TestExecuteScripPanicsAreHandled(t *testing.T) { ) require.NoError(t, err) - _, err = manager.ExecuteScript( + _, _, err = manager.ExecuteScript( context.Background(), []byte("whatever"), nil, @@ -391,7 +407,6 @@ func TestExecuteScripPanicsAreHandled(t *testing.T) { nil) require.Error(t, err) - require.Contains(t, buffer.String(), "Verunsicherung") } @@ -419,7 +434,7 @@ func TestExecuteScript_LongScriptsAreLogged(t *testing.T) { metrics.NewNoopCollector(), trace.NewNoopTracer(), nil, - nil, + testutil.ProtocolStateWithSourceFixture(nil), ctx, committer.NewNoopViewCommitter(), prov, @@ -429,6 +444,7 @@ func TestExecuteScript_LongScriptsAreLogged(t *testing.T) { ExecutionTimeLimit: query.DefaultExecutionTimeLimit, }, DerivedDataCacheSize: 10, + MaxConcurrency: 1, NewCustomVirtualMachine: func() fvm.VM { return &LongRunningVM{duration: 2 * time.Millisecond} }, @@ -436,7 +452,7 @@ func TestExecuteScript_LongScriptsAreLogged(t *testing.T) { ) require.NoError(t, err) - _, err = manager.ExecuteScript( + _, _, err = manager.ExecuteScript( context.Background(), []byte("whatever"), nil, @@ -444,7 +460,6 @@ func TestExecuteScript_LongScriptsAreLogged(t *testing.T) { nil) require.NoError(t, err) - require.Contains(t, buffer.String(), "exceeded threshold") } @@ -472,7 +487,7 @@ func TestExecuteScript_ShortScriptsAreNotLogged(t *testing.T) { metrics.NewNoopCollector(), trace.NewNoopTracer(), nil, - nil, + testutil.ProtocolStateWithSourceFixture(nil), ctx, committer.NewNoopViewCommitter(), prov, @@ -482,6 +497,7 @@ func TestExecuteScript_ShortScriptsAreNotLogged(t *testing.T) { ExecutionTimeLimit: query.DefaultExecutionTimeLimit, }, DerivedDataCacheSize: derived.DefaultDerivedDataCacheSize, + MaxConcurrency: 1, NewCustomVirtualMachine: func() fvm.VM { return &LongRunningVM{duration: 0} }, @@ -489,7 +505,7 @@ func TestExecuteScript_ShortScriptsAreNotLogged(t *testing.T) { ) require.NoError(t, err) - _, err = manager.ExecuteScript( + _, _, err = manager.ExecuteScript( context.Background(), []byte("whatever"), nil, @@ -497,12 +513,35 @@ func TestExecuteScript_ShortScriptsAreNotLogged(t *testing.T) { nil) require.NoError(t, err) - require.NotContains(t, buffer.String(), "exceeded threshold") } +type PanickingExecutor struct{} + +func (PanickingExecutor) Cleanup() {} + +func (PanickingExecutor) Preprocess() error { + return nil +} + +func (PanickingExecutor) Execute() error { + panic("panic, but expected with sentinel for test: Verunsicherung ") +} + +func (PanickingExecutor) Output() fvm.ProcedureOutput { + return fvm.ProcedureOutput{} +} + type PanickingVM struct{} +func (p *PanickingVM) NewExecutor( + f fvm.Context, + procedure fvm.Procedure, + txn storage.TransactionPreparer, +) fvm.ProcedureExecutor { + return PanickingExecutor{} +} + func (p *PanickingVM) Run( f fvm.Context, procedure fvm.Procedure, @@ -526,10 +565,41 @@ func (p *PanickingVM) GetAccount( panic("not expected") } +type LongRunningExecutor struct { + duration time.Duration +} + +func (LongRunningExecutor) Cleanup() {} + +func (LongRunningExecutor) Preprocess() error { + return nil +} + +func (l LongRunningExecutor) Execute() error { + time.Sleep(l.duration) + return nil +} + +func (LongRunningExecutor) Output() fvm.ProcedureOutput { + return fvm.ProcedureOutput{ + Value: cadence.NewVoid(), + } +} + type LongRunningVM struct { duration time.Duration } +func (l *LongRunningVM) NewExecutor( + f fvm.Context, + procedure fvm.Procedure, + txn storage.TransactionPreparer, +) fvm.ProcedureExecutor { + return LongRunningExecutor{ + duration: l.duration, + } +} + func (l *LongRunningVM) Run( f fvm.Context, procedure fvm.Procedure, @@ -584,7 +654,7 @@ func TestExecuteScriptTimeout(t *testing.T) { metrics.NewNoopCollector(), trace.NewNoopTracer(), nil, - nil, + testutil.ProtocolStateWithSourceFixture(nil), fvm.NewContext(), committer.NewNoopViewCommitter(), nil, @@ -594,13 +664,14 @@ func TestExecuteScriptTimeout(t *testing.T) { ExecutionTimeLimit: timeout, }, DerivedDataCacheSize: derived.DefaultDerivedDataCacheSize, + MaxConcurrency: 1, }, ) require.NoError(t, err) script := []byte(` - pub fun main(): Int { + access(all) fun main(): Int { var i = 0 while i < 10000 { i = i + 1 @@ -610,7 +681,7 @@ func TestExecuteScriptTimeout(t *testing.T) { `) header := unittest.BlockHeaderFixture() - value, err := manager.ExecuteScript( + value, _, err := manager.ExecuteScript( context.Background(), script, nil, @@ -630,7 +701,7 @@ func TestExecuteScriptCancelled(t *testing.T) { metrics.NewNoopCollector(), trace.NewNoopTracer(), nil, - nil, + testutil.ProtocolStateWithSourceFixture(nil), fvm.NewContext(), committer.NewNoopViewCommitter(), nil, @@ -640,13 +711,14 @@ func TestExecuteScriptCancelled(t *testing.T) { ExecutionTimeLimit: timeout, }, DerivedDataCacheSize: derived.DefaultDerivedDataCacheSize, + MaxConcurrency: 1, }, ) require.NoError(t, err) script := []byte(` - pub fun main(): Int { + access(all) fun main(): Int { var i = 0 var j = 0 while i < 10000000 { @@ -663,7 +735,7 @@ func TestExecuteScriptCancelled(t *testing.T) { wg.Add(1) go func() { header := unittest.BlockHeaderFixture() - value, err = manager.ExecuteScript( + value, _, err = manager.ExecuteScript( reqCtx, script, nil, @@ -704,41 +776,46 @@ func Test_EventEncodingFailsOnlyTxAndCarriesOn(t *testing.T) { account := accounts[0] privKey := privateKeys[0] // tx1 deploys contract version 1 - tx1 := testutil.DeployEventContractTransaction(account, chain, 1) - prepareTx(t, tx1, account, privKey, 0, chain) + tx1Builder := testutil.DeployEventContractTransaction(account, chain, 1) + prepareTx(t, tx1Builder, account, privKey, 0, chain) + tx1, err := tx1Builder.Build() + require.NoError(t, err) // tx2 emits event which will fail encoding - tx2 := testutil.CreateEmitEventTransaction(account, account) - prepareTx(t, tx2, account, privKey, 1, chain) + tx2Builder := testutil.CreateEmitEventTransaction(account, account) + prepareTx(t, tx2Builder, account, privKey, 1, chain) + tx2, err := tx2Builder.Build() + require.NoError(t, err) // tx3 emits event that will work fine - tx3 := testutil.CreateEmitEventTransaction(account, account) - prepareTx(t, tx3, account, privKey, 2, chain) + tx3Builder := testutil.CreateEmitEventTransaction(account, account) + prepareTx(t, tx3Builder, account, privKey, 2, chain) + tx3, err := tx3Builder.Build() + require.NoError(t, err) transactions := []*flow.TransactionBody{tx1, tx2, tx3} col := flow.Collection{Transactions: transactions} - guarantee := flow.CollectionGuarantee{ + guarantee := &flow.CollectionGuarantee{ CollectionID: col.ID(), Signature: nil, } - block := flow.Block{ - Header: &flow.Header{ - View: 26, - }, - Payload: &flow.Payload{ - Guarantees: []*flow.CollectionGuarantee{&guarantee}, - }, - } + block := unittest.BlockFixture( + unittest.Block.WithView(26), + unittest.Block.WithParentView(25), + unittest.Block.WithPayload( + unittest.PayloadFixture(unittest.WithGuarantees(guarantee)), + ), + ) executableBlock := &entity.ExecutableBlock{ - Block: &block, + Block: block, CompleteCollections: map[flow.Identifier]*entity.CompleteCollection{ - guarantee.ID(): { - Guarantee: &guarantee, - Transactions: transactions, + guarantee.CollectionID: { + Guarantee: guarantee, + Collection: &col, }, }, StartState: unittest.StateCommitmentPointerFixture(), @@ -746,7 +823,7 @@ func Test_EventEncodingFailsOnlyTxAndCarriesOn(t *testing.T) { me := new(module.Local) me.On("NodeID").Return(unittest.IdentifierFixture()) - me.On("Sign", mock.Anything, mock.Anything).Return(nil, nil) + me.On("Sign", mock.Anything, mock.Anything).Return(unittest.SignatureFixture(), nil) me.On("SignFunc", mock.Anything, mock.Anything, mock.Anything). Return(nil, nil) @@ -771,7 +848,8 @@ func Test_EventEncodingFailsOnlyTxAndCarriesOn(t *testing.T) { me, prov, nil, - ) + testutil.ProtocolStateWithSourceFixture(nil), + testMaxConcurrency) require.NoError(t, err) derivedChainData, err := derived.NewDerivedChainData(10) @@ -839,7 +917,7 @@ func TestScriptStorageMutationsDiscarded(t *testing.T) { metrics.NewExecutionCollector(ctx.Tracer), trace.NewNoopTracer(), nil, - nil, + testutil.ProtocolStateWithSourceFixture(nil), ctx, committer.NewNoopViewCommitter(), nil, @@ -849,6 +927,7 @@ func TestScriptStorageMutationsDiscarded(t *testing.T) { ExecutionTimeLimit: timeout, }, DerivedDataCacheSize: derived.DefaultDerivedDataCacheSize, + MaxConcurrency: 1, }, ) vm := manager.vm @@ -870,14 +949,14 @@ func TestScriptStorageMutationsDiscarded(t *testing.T) { commonAddress, _ := common.HexToAddress(address.Hex()) script := []byte(` - pub fun main(account: Address) { - let acc = getAuthAccount(account) - acc.save(3, to: /storage/x) + access(all) fun main(account: Address) { + let acc = getAuthAccount<auth(SaveValue) &Account>(account) + acc.storage.save(3, to: /storage/x) } `) header := unittest.BlockHeaderFixture() - _, err = manager.ExecuteScript( + _, compUsed, err := manager.ExecuteScript( context.Background(), script, [][]byte{jsoncdc.MustEncode(address)}, @@ -885,6 +964,7 @@ func TestScriptStorageMutationsDiscarded(t *testing.T) { snapshotTree) require.NoError(t, err) + require.Greater(t, compUsed, uint64(0)) env := environment.NewScriptEnvironmentFromStorageSnapshot( ctx.EnvironmentParams, @@ -893,11 +973,10 @@ func TestScriptStorageMutationsDiscarded(t *testing.T) { rt := env.BorrowCadenceRuntime() defer env.ReturnCadenceRuntime(rt) - v, err := rt.ReadStored( - commonAddress, - cadence.NewPath("storage", "x"), - ) + path, err := cadence.NewPath(common.PathDomainStorage, "x") + require.NoError(t, err) + v, err := rt.ReadStored(commonAddress, path) // the save should not update account storage by writing the updates // back to the snapshotTree require.NoError(t, err) diff --git a/engine/execution/computation/metrics/collector.go b/engine/execution/computation/metrics/collector.go new file mode 100644 index 00000000000..8f3438d4658 --- /dev/null +++ b/engine/execution/computation/metrics/collector.go @@ -0,0 +1,130 @@ +package metrics + +import ( + "sync" + + "github.com/rs/zerolog" + + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module/component" + "github.com/onflow/flow-go/module/irrecoverable" +) + +type collector struct { + log zerolog.Logger + + collection chan metrics + + mu sync.Mutex + + lowestAvailableHeight uint64 + blocksAtHeight map[uint64]map[flow.Identifier]struct{} + metrics map[flow.Identifier][]TransactionExecutionMetrics +} + +func newCollector( + log zerolog.Logger, + lowestAvailableHeight uint64, +) *collector { + return &collector{ + log: log, + lowestAvailableHeight: lowestAvailableHeight, + + collection: make(chan metrics, 1000), + blocksAtHeight: make(map[uint64]map[flow.Identifier]struct{}), + metrics: make(map[flow.Identifier][]TransactionExecutionMetrics), + } +} + +// Collect should never block because it's called from the execution +func (c *collector) Collect( + blockId flow.Identifier, + blockHeight uint64, + t TransactionExecutionMetrics, +) { + select { + case c.collection <- metrics{ + TransactionExecutionMetrics: t, + blockHeight: blockHeight, + blockId: blockId, + }: + default: + c.log.Warn(). + Uint64("height", blockHeight). + Msg("dropping metrics because the collection channel is full") + } +} + +func (c *collector) metricsCollectorWorker( + ctx irrecoverable.SignalerContext, + ready component.ReadyFunc, +) { + ready() + + for { + select { + case <-ctx.Done(): + return + case m := <-c.collection: + c.collect(m.blockId, m.blockHeight, m.TransactionExecutionMetrics) + } + } +} + +func (c *collector) collect( + blockId flow.Identifier, + blockHeight uint64, + t TransactionExecutionMetrics, +) { + c.mu.Lock() + defer c.mu.Unlock() + + if blockHeight <= c.lowestAvailableHeight { + c.log.Warn(). + Uint64("height", blockHeight). + Uint64("lowestAvailableHeight", c.lowestAvailableHeight). + Msg("received metrics for a block that is older or equal than the most recent block") + return + } + + if _, ok := c.blocksAtHeight[blockHeight]; !ok { + c.blocksAtHeight[blockHeight] = make(map[flow.Identifier]struct{}) + } + c.blocksAtHeight[blockHeight][blockId] = struct{}{} + c.metrics[blockId] = append(c.metrics[blockId], t) +} + +// Pop returns the metrics for the given finalized block at the given height +// and clears all data up to the given height. +func (c *collector) Pop(height uint64, finalizedBlockId flow.Identifier) []TransactionExecutionMetrics { + c.mu.Lock() + defer c.mu.Unlock() + + if height <= c.lowestAvailableHeight { + c.log.Warn(). + Uint64("height", height). + Stringer("finalizedBlockId", finalizedBlockId). + Msg("requested metrics for a finalizedBlockId that is older or equal than the most recent finalizedBlockId") + return nil + } + + // only return metrics for finalized block + metrics := c.metrics[finalizedBlockId] + + c.advanceTo(height) + + return metrics +} + +// advanceTo moves the latest height to the given height +// all data at lower heights will be deleted +func (c *collector) advanceTo(height uint64) { + for c.lowestAvailableHeight < height { + blocks := c.blocksAtHeight[c.lowestAvailableHeight] + for block := range blocks { + delete(c.metrics, block) + } + delete(c.blocksAtHeight, c.lowestAvailableHeight) + c.lowestAvailableHeight++ + } +} diff --git a/engine/execution/computation/metrics/collector_test.go b/engine/execution/computation/metrics/collector_test.go new file mode 100644 index 00000000000..14882c5f1c0 --- /dev/null +++ b/engine/execution/computation/metrics/collector_test.go @@ -0,0 +1,98 @@ +package metrics + +import ( + "context" + "sync" + "testing" + "time" + + "github.com/rs/zerolog" + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module/irrecoverable" +) + +func Test_CollectorPopOnEmpty(t *testing.T) { + t.Parallel() + + log := zerolog.New(zerolog.NewTestWriter(t)) + latestHeight := uint64(100) + + collector := newCollector(log, latestHeight) + + data := collector.Pop(latestHeight, flow.ZeroID) + require.Nil(t, data) +} + +func Test_CollectorCollection(t *testing.T) { + log := zerolog.New(zerolog.NewTestWriter(t)) + startHeight := uint64(100) + + collector := newCollector(log, startHeight) + + ctx := context.Background() + go func() { + ictx := irrecoverable.NewMockSignalerContext(t, ctx) + collector.metricsCollectorWorker(ictx, func() {}) + }() + + wg := sync.WaitGroup{} + + wg.Add(16 * 16 * 16) + for height := 0; height < 16; height++ { + // for each height we add multiple blocks. Only one block will be popped per height + for block := 0; block < 16; block++ { + // for each block we add multiple transactions + for transaction := 0; transaction < 16; transaction++ { + go func(h, b, t int) { + defer wg.Done() + + block := flow.Identifier{} + block[0] = byte(h) + block[1] = byte(b) + + collector.Collect( + block, + startHeight+1+uint64(h), + TransactionExecutionMetrics{ + ExecutionTime: time.Duration(b + t), + }, + ) + }(height, block, transaction) + } + // wait a bit for the collector to process the data + <-time.After(1 * time.Millisecond) + } + } + + wg.Wait() + // wait a bit for the collector to process the data + <-time.After(10 * time.Millisecond) + + // there should be no data at the start height + data := collector.Pop(startHeight, flow.ZeroID) + require.Nil(t, data) + + for height := 0; height < 16; height++ { + block := flow.Identifier{} + block[0] = byte(height) + // always pop the first block each height + block[1] = byte(0) + + data := collector.Pop(startHeight+1+uint64(height), block) + + require.Len(t, data, 16) + } + + block := flow.Identifier{} + block[0] = byte(15) + block[1] = byte(1) + // height 16 was already popped so there should be no more data for any blocks + data = collector.Pop(startHeight+16, block) + require.Nil(t, data) + + // there should be no data past the last collected height + data = collector.Pop(startHeight+17, flow.ZeroID) + require.Nil(t, data) +} diff --git a/engine/execution/computation/metrics/provider.go b/engine/execution/computation/metrics/provider.go new file mode 100644 index 00000000000..c23a426141d --- /dev/null +++ b/engine/execution/computation/metrics/provider.go @@ -0,0 +1,124 @@ +package metrics + +import ( + "sync" + + "github.com/rs/zerolog" +) + +// provider is responsible for providing the metrics for the rpc endpoint +// it has a circular buffer of metrics for the last N finalized and executed blocks. +type provider struct { + log zerolog.Logger + + mu sync.RWMutex + + bufferSize uint + bufferIndex uint + blockHeightAtBufferIndex uint64 + + buffer [][]TransactionExecutionMetrics +} + +func newProvider( + log zerolog.Logger, + bufferSize uint, + blockHeightAtBufferIndex uint64, +) *provider { + if bufferSize == 0 { + panic("buffer size must be greater than zero") + } + + return &provider{ + log: log, + bufferSize: bufferSize, + blockHeightAtBufferIndex: blockHeightAtBufferIndex, + bufferIndex: 0, + buffer: make([][]TransactionExecutionMetrics, bufferSize), + } +} + +// Push buffers the metrics for the given height. +// The call should ensure height are called in strictly increasing order, otherwise +// metrics for the skipped height will not buffered. +func (p *provider) Push( + height uint64, + data []TransactionExecutionMetrics, +) { + p.mu.Lock() + defer p.mu.Unlock() + + if height <= p.blockHeightAtBufferIndex { + p.log.Warn(). + Uint64("height", height). + Uint64("blockHeightAtBufferIndex", p.blockHeightAtBufferIndex). + Msg("received metrics for a block that is older or equal than the most recent block") + return + } + if height > p.blockHeightAtBufferIndex+1 { + p.log.Warn(). + Uint64("height", height). + Uint64("blockHeightAtBufferIndex", p.blockHeightAtBufferIndex). + Msg("received metrics for a block that is not the next block") + + // Fill in the gap with nil + for i := p.blockHeightAtBufferIndex; i < height-1; i++ { + p.pushData(nil) + } + } + + p.pushData(data) +} + +func (p *provider) pushData(data []TransactionExecutionMetrics) { + p.bufferIndex = (p.bufferIndex + 1) % p.bufferSize + p.blockHeightAtBufferIndex++ + p.buffer[p.bufferIndex] = data +} + +func (p *provider) GetTransactionExecutionMetricsAfter(height uint64) (GetTransactionExecutionMetricsAfterResponse, error) { + p.mu.RLock() + defer p.mu.RUnlock() + + data := make(map[uint64][]TransactionExecutionMetrics) + + if height+1 > p.blockHeightAtBufferIndex { + return data, nil + } + + // start index is the lowest block height that is in the buffer + // missing heights are handled below + startHeight := uint64(0) + // assign startHeight with the lowest buffered height + if p.blockHeightAtBufferIndex > uint64(p.bufferSize) { + startHeight = p.blockHeightAtBufferIndex - uint64(p.bufferSize) + } + + // if the starting index is lower than the height we only need to return the data for + // the blocks that are later than the given height + if height+1 > startHeight { + startHeight = height + 1 + } + + for h := startHeight; h <= p.blockHeightAtBufferIndex; h++ { + // 0 <= diff; because of the bufferSize check above + diff := uint(p.blockHeightAtBufferIndex - h) + + // 0 <= diff < bufferSize; because of the bufferSize check above + // we are about to do a modulo operation with p.bufferSize on p.bufferIndex - diff, but diff could + // be larger than p.bufferIndex, which would result in a negative intermediate value. + // To avoid this, we add p.bufferSize to diff, which will guarantee that (p.bufferSize + p.bufferIndex - diff) + // is always positive, but the modulo operation will still return the same index. + intermediateIndex := p.bufferIndex + p.bufferSize - diff + index := intermediateIndex % p.bufferSize + + d := p.buffer[index] + if len(d) == 0 { + continue + } + + data[h] = p.buffer[index] + } + + return data, nil +} diff --git a/engine/execution/computation/metrics/provider_test.go b/engine/execution/computation/metrics/provider_test.go new file mode 100644 index 00000000000..08bf3ae23c2 --- /dev/null +++ b/engine/execution/computation/metrics/provider_test.go @@ -0,0 +1,109 @@ +package metrics + +import ( + "testing" + "time" + + "github.com/rs/zerolog" + "github.com/stretchr/testify/require" +) + +func Test_ProviderGetOnEmpty(t *testing.T) { + t.Parallel() + + height := uint64(100) + bufferSize := uint(10) + log := zerolog.New(zerolog.NewTestWriter(t)) + + provider := newProvider(log, bufferSize, height) + + for i := 0; uint(i) < bufferSize; i++ { + data, err := provider.GetTransactionExecutionMetricsAfter(height - uint64(i)) + require.NoError(t, err) + require.Len(t, data, 0) + } +} + +func Test_ProviderGetOutOfBounds(t *testing.T) { + t.Parallel() + + height := uint64(100) + bufferSize := uint(10) + log := zerolog.New(zerolog.NewTestWriter(t)) + + provider := newProvider(log, bufferSize, height) + + res, err := provider.GetTransactionExecutionMetricsAfter(height + 1) + require.NoError(t, err) + require.Len(t, res, 0) +} + +func Test_ProviderPushSequential(t *testing.T) { + t.Parallel() + + height := uint64(100) + bufferSize := uint(10) + log := zerolog.New(zerolog.NewTestWriter(t)) + + provider := newProvider(log, bufferSize, height) + + for i := 0; uint(i) < bufferSize; i++ { + data := []TransactionExecutionMetrics{ + { + // Execution time is our label + ExecutionTime: time.Duration(i), + }, + } + + provider.Push(height+uint64(i)+1, data) + } + + data, err := provider.GetTransactionExecutionMetricsAfter(height) + require.NoError(t, err) + for i := 0; uint(i) < bufferSize; i++ { + require.Equal(t, time.Duration(uint(i)), data[height+uint64(i)+1][0].ExecutionTime) + } +} + +func Test_ProviderPushOutOfSequence(t *testing.T) { + t.Parallel() + + height := uint64(100) + bufferSize := uint(10) + log := zerolog.New(zerolog.NewTestWriter(t)) + + provider := newProvider(log, bufferSize, height) + + for i := 0; uint(i) < bufferSize; i++ { + data := []TransactionExecutionMetrics{ + { + ExecutionTime: time.Duration(i), + }, + } + + provider.Push(height+uint64(i)+1, data) + } + + newHeight := height + uint64(bufferSize) + + // Push out of sequence + data := []TransactionExecutionMetrics{ + { + ExecutionTime: time.Duration(newHeight + 2), + }, + } + + // no-op + provider.Push(newHeight, data) + + // skip 1 + provider.Push(newHeight+2, data) + + res, err := provider.GetTransactionExecutionMetricsAfter(height) + require.NoError(t, err) + + require.Len(t, res, int(bufferSize)) + + require.Nil(t, res[newHeight+1]) + require.Equal(t, time.Duration(newHeight+2), res[newHeight+2][0].ExecutionTime) +} diff --git a/engine/execution/computation/metrics/transaction_execution_metrics.go b/engine/execution/computation/metrics/transaction_execution_metrics.go new file mode 100644 index 00000000000..24458ba2898 --- /dev/null +++ b/engine/execution/computation/metrics/transaction_execution_metrics.go @@ -0,0 +1,168 @@ +package metrics + +import ( + "time" + + "github.com/onflow/flow-go/engine" + + cadenceCommon "github.com/onflow/cadence/common" + "github.com/rs/zerolog" + + "github.com/onflow/flow-go/engine/execution/state" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module/component" + "github.com/onflow/flow-go/module/irrecoverable" + "github.com/onflow/flow-go/state/protocol" + psEvents "github.com/onflow/flow-go/state/protocol/events" + "github.com/onflow/flow-go/storage" +) + +type TransactionExecutionMetricsProvider interface { + component.Component + protocol.Consumer + + // GetTransactionExecutionMetricsAfter returns the transaction metrics for all blocks higher than the given height + // It returns a map of block height to a list of transaction execution metrics + // Blocks that are out of scope (only a limited number blocks are kept in memory) are not returned + GetTransactionExecutionMetricsAfter(height uint64) (GetTransactionExecutionMetricsAfterResponse, error) + + // Collect the transaction metrics for the given block + // Collect does not block, it returns immediately + Collect( + blockId flow.Identifier, + blockHeight uint64, + t TransactionExecutionMetrics, + ) +} + +// GetTransactionExecutionMetricsAfterResponse is the response type for GetTransactionExecutionMetricsAfter +// It is a map of block height to a list of transaction execution metrics +type GetTransactionExecutionMetricsAfterResponse = map[uint64][]TransactionExecutionMetrics + +type TransactionExecutionMetrics struct { + TransactionID flow.Identifier + ExecutionTime time.Duration + ExecutionEffortWeights map[cadenceCommon.ComputationKind]uint64 +} + +type metrics struct { + TransactionExecutionMetrics + blockHeight uint64 + blockId flow.Identifier +} + +// transactionExecutionMetricsProvider is responsible for providing the metrics for the rpc endpoint. +// It has a circular buffer of metrics for the last N finalized and executed blocks. +// The metrics are not guaranteed to be available for all blocks. If the node is just starting up or catching up +// to the latest finalized block, some blocks may not have metrics available. +// The metrics are intended to be used for monitoring and analytics purposes. +type transactionExecutionMetricsProvider struct { + // collector is responsible for collecting the metrics + // the collector collects the metrics from the execution during block execution + // on a finalized and executed block, the metrics are moved to the provider, + // all non-finalized metrics for that height are discarded + *collector + + // provider is responsible for providing the metrics for the rpc endpoint + // it has a circular buffer of metrics for the last N finalized and executed blocks. + *provider + + component.Component + // transactionExecutionMetricsProvider needs to consume BlockFinalized events. + psEvents.Noop + + log zerolog.Logger + + executionState state.FinalizedExecutionState + headers storage.Headers + blockFinalizedNotifier engine.Notifier + + latestFinalizedAndExecutedHeight uint64 +} + +var _ TransactionExecutionMetricsProvider = (*transactionExecutionMetricsProvider)(nil) + +func NewTransactionExecutionMetricsProvider( + log zerolog.Logger, + executionState state.FinalizedExecutionState, + headers storage.Headers, + latestFinalizedAndExecutedHeight uint64, + bufferSize uint, +) TransactionExecutionMetricsProvider { + log = log.With().Str("component", "transaction_execution_metrics_provider").Logger() + + collector := newCollector(log, latestFinalizedAndExecutedHeight) + provider := newProvider(log, bufferSize, latestFinalizedAndExecutedHeight) + + p := &transactionExecutionMetricsProvider{ + collector: collector, + provider: provider, + log: log, + executionState: executionState, + headers: headers, + blockFinalizedNotifier: engine.NewNotifier(), + latestFinalizedAndExecutedHeight: latestFinalizedAndExecutedHeight, + } + + cm := component.NewComponentManagerBuilder() + cm.AddWorker(collector.metricsCollectorWorker) + cm.AddWorker(p.blockFinalizedWorker) + + p.Component = cm.Build() + + return p +} + +func (p *transactionExecutionMetricsProvider) BlockFinalized(*flow.Header) { + p.blockFinalizedNotifier.Notify() +} + +// move data from the collector to the provider +func (p *transactionExecutionMetricsProvider) onBlockExecutedAndFinalized(block flow.Identifier, height uint64) { + data := p.collector.Pop(height, block) + p.provider.Push(height, data) +} + +func (p *transactionExecutionMetricsProvider) blockFinalizedWorker( + ctx irrecoverable.SignalerContext, + ready component.ReadyFunc, +) { + ready() + + for { + select { + case <-ctx.Done(): + return + case <-p.blockFinalizedNotifier.Channel(): + p.onExecutedAndFinalized() + } + } +} + +func (p *transactionExecutionMetricsProvider) onExecutedAndFinalized() { + latestFinalizedAndExecutedHeight, err := p.executionState.GetHighestFinalizedExecuted() + + if err != nil { + p.log.Warn().Err(err).Msg("could not get highest finalized executed") + return + } + + // the latest finalized and executed block could be more than one block further than the last one handled + // step through all blocks between the last one handled and the latest finalized and executed + for height := p.latestFinalizedAndExecutedHeight + 1; height <= latestFinalizedAndExecutedHeight; height++ { + blockID, err := p.headers.BlockIDByHeight(height) + if err != nil { + p.log.Warn(). + Err(err). + Uint64("height", height). + Msg("could not get header by height") + return + } + + p.onBlockExecutedAndFinalized(blockID, height) + + if height == latestFinalizedAndExecutedHeight { + p.latestFinalizedAndExecutedHeight = height + } + } +} diff --git a/engine/execution/computation/mock/computation_manager.go b/engine/execution/computation/mock/computation_manager.go index f019caf61bd..060e1671b4d 100644 --- a/engine/execution/computation/mock/computation_manager.go +++ b/engine/execution/computation/mock/computation_manager.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mock @@ -24,6 +24,10 @@ type ComputationManager struct { func (_m *ComputationManager) ComputeBlock(ctx context.Context, parentBlockExecutionResultID flow.Identifier, block *entity.ExecutableBlock, _a3 snapshot.StorageSnapshot) (*execution.ComputationResult, error) { ret := _m.Called(ctx, parentBlockExecutionResultID, block, _a3) + if len(ret) == 0 { + panic("no return value specified for ComputeBlock") + } + var r0 *execution.ComputationResult var r1 error if rf, ok := ret.Get(0).(func(context.Context, flow.Identifier, *entity.ExecutableBlock, snapshot.StorageSnapshot) (*execution.ComputationResult, error)); ok { @@ -47,12 +51,17 @@ func (_m *ComputationManager) ComputeBlock(ctx context.Context, parentBlockExecu } // ExecuteScript provides a mock function with given fields: ctx, script, arguments, blockHeader, _a4 -func (_m *ComputationManager) ExecuteScript(ctx context.Context, script []byte, arguments [][]byte, blockHeader *flow.Header, _a4 snapshot.StorageSnapshot) ([]byte, error) { +func (_m *ComputationManager) ExecuteScript(ctx context.Context, script []byte, arguments [][]byte, blockHeader *flow.Header, _a4 snapshot.StorageSnapshot) ([]byte, uint64, error) { ret := _m.Called(ctx, script, arguments, blockHeader, _a4) + if len(ret) == 0 { + panic("no return value specified for ExecuteScript") + } + var r0 []byte - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, []byte, [][]byte, *flow.Header, snapshot.StorageSnapshot) ([]byte, error)); ok { + var r1 uint64 + var r2 error + if rf, ok := ret.Get(0).(func(context.Context, []byte, [][]byte, *flow.Header, snapshot.StorageSnapshot) ([]byte, uint64, error)); ok { return rf(ctx, script, arguments, blockHeader, _a4) } if rf, ok := ret.Get(0).(func(context.Context, []byte, [][]byte, *flow.Header, snapshot.StorageSnapshot) []byte); ok { @@ -63,19 +72,29 @@ func (_m *ComputationManager) ExecuteScript(ctx context.Context, script []byte, } } - if rf, ok := ret.Get(1).(func(context.Context, []byte, [][]byte, *flow.Header, snapshot.StorageSnapshot) error); ok { + if rf, ok := ret.Get(1).(func(context.Context, []byte, [][]byte, *flow.Header, snapshot.StorageSnapshot) uint64); ok { r1 = rf(ctx, script, arguments, blockHeader, _a4) } else { - r1 = ret.Error(1) + r1 = ret.Get(1).(uint64) } - return r0, r1 + if rf, ok := ret.Get(2).(func(context.Context, []byte, [][]byte, *flow.Header, snapshot.StorageSnapshot) error); ok { + r2 = rf(ctx, script, arguments, blockHeader, _a4) + } else { + r2 = ret.Error(2) + } + + return r0, r1, r2 } // GetAccount provides a mock function with given fields: ctx, addr, header, _a3 func (_m *ComputationManager) GetAccount(ctx context.Context, addr flow.Address, header *flow.Header, _a3 snapshot.StorageSnapshot) (*flow.Account, error) { ret := _m.Called(ctx, addr, header, _a3) + if len(ret) == 0 { + panic("no return value specified for GetAccount") + } + var r0 *flow.Account var r1 error if rf, ok := ret.Get(0).(func(context.Context, flow.Address, *flow.Header, snapshot.StorageSnapshot) (*flow.Account, error)); ok { @@ -98,13 +117,12 @@ func (_m *ComputationManager) GetAccount(ctx context.Context, addr flow.Address, return r0, r1 } -type mockConstructorTestingTNewComputationManager interface { +// NewComputationManager creates a new instance of ComputationManager. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewComputationManager(t interface { mock.TestingT Cleanup(func()) -} - -// NewComputationManager creates a new instance of ComputationManager. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewComputationManager(t mockConstructorTestingTNewComputationManager) *ComputationManager { +}) *ComputationManager { mock := &ComputationManager{} mock.Mock.Test(t) diff --git a/engine/execution/computation/programs_test.go b/engine/execution/computation/programs_test.go index 2f3a273e176..b8dc91a6c2c 100644 --- a/engine/execution/computation/programs_test.go +++ b/engine/execution/computation/programs_test.go @@ -6,11 +6,11 @@ import ( "testing" "time" + "github.com/ipfs/boxo/blockstore" "github.com/ipfs/go-datastore" dssync "github.com/ipfs/go-datastore/sync" - blockstore "github.com/ipfs/go-ipfs-blockstore" "github.com/onflow/cadence" - jsoncdc "github.com/onflow/cadence/encoding/json" + "github.com/onflow/cadence/encoding/ccf" "github.com/rs/zerolog" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" @@ -35,6 +35,10 @@ import ( "github.com/onflow/flow-go/utils/unittest" ) +const ( + testMaxConcurrency = 2 +) + func TestPrograms_TestContractUpdates(t *testing.T) { chain := flow.Mainnet.Chain() vm := fvm.NewVirtualMachine() @@ -53,55 +57,67 @@ func TestPrograms_TestContractUpdates(t *testing.T) { account := accounts[0] privKey := privateKeys[0] // tx1 deploys contract version 1 - tx1 := testutil.DeployEventContractTransaction(account, chain, 1) - prepareTx(t, tx1, account, privKey, 0, chain) + tx1Builder := testutil.DeployEventContractTransaction(account, chain, 1) + prepareTx(t, tx1Builder, account, privKey, 0, chain) + tx1, err := tx1Builder.Build() + require.NoError(t, err) // tx2 calls the method of the contract (version 1) - tx2 := testutil.CreateEmitEventTransaction(account, account) - prepareTx(t, tx2, account, privKey, 1, chain) + tx2Builder := testutil.CreateEmitEventTransaction(account, account) + prepareTx(t, tx2Builder, account, privKey, 1, chain) + tx2, err := tx2Builder.Build() + require.NoError(t, err) // tx3 updates the contract to version 2 - tx3 := testutil.UpdateEventContractTransaction(account, chain, 2) - prepareTx(t, tx3, account, privKey, 2, chain) + tx3Builder := testutil.UpdateEventContractTransaction(account, chain, 2) + prepareTx(t, tx3Builder, account, privKey, 2, chain) + tx3, err := tx3Builder.Build() + require.NoError(t, err) // tx4 calls the method of the contract (version 2) - tx4 := testutil.CreateEmitEventTransaction(account, account) - prepareTx(t, tx4, account, privKey, 3, chain) + tx4Builder := testutil.CreateEmitEventTransaction(account, account) + prepareTx(t, tx4Builder, account, privKey, 3, chain) + tx4, err := tx4Builder.Build() + require.NoError(t, err) // tx5 updates the contract to version 3 but fails (no env signature of service account) - tx5 := testutil.UnauthorizedDeployEventContractTransaction(account, chain, 3) - tx5.SetProposalKey(account, 0, 4).SetPayer(account) - err = testutil.SignEnvelope(tx5, account, privKey) + tx5Builder := testutil.UnauthorizedDeployEventContractTransaction(account, chain, 3). + SetProposalKey(account, 0, 4). + SetPayer(account) + err = testutil.SignEnvelope(tx5Builder, account, privKey) + require.NoError(t, err) + tx5, err := tx5Builder.Build() require.NoError(t, err) // tx6 calls the method of the contract (version 2 expected) - tx6 := testutil.CreateEmitEventTransaction(account, account) - prepareTx(t, tx6, account, privKey, 5, chain) + tx6Builder := testutil.CreateEmitEventTransaction(account, account) + prepareTx(t, tx6Builder, account, privKey, 5, chain) + tx6, err := tx6Builder.Build() + require.NoError(t, err) transactions := []*flow.TransactionBody{tx1, tx2, tx3, tx4, tx5, tx6} col := flow.Collection{Transactions: transactions} - guarantee := flow.CollectionGuarantee{ + guarantee := &flow.CollectionGuarantee{ CollectionID: col.ID(), Signature: nil, } - block := flow.Block{ - Header: &flow.Header{ - View: 26, - }, - Payload: &flow.Payload{ - Guarantees: []*flow.CollectionGuarantee{&guarantee}, - }, - } + block := unittest.BlockFixture( + unittest.Block.WithView(26), + unittest.Block.WithParentView(25), + unittest.Block.WithPayload( + unittest.PayloadFixture(unittest.WithGuarantees(guarantee)), + ), + ) executableBlock := &entity.ExecutableBlock{ - Block: &block, + Block: block, CompleteCollections: map[flow.Identifier]*entity.CompleteCollection{ - guarantee.ID(): { - Guarantee: &guarantee, - Transactions: transactions, + guarantee.CollectionID: { + Guarantee: guarantee, + Collection: &col, }, }, StartState: unittest.StateCommitmentPointerFixture(), @@ -109,7 +125,7 @@ func TestPrograms_TestContractUpdates(t *testing.T) { me := new(module.Local) me.On("NodeID").Return(unittest.IdentifierFixture()) - me.On("Sign", mock.Anything, mock.Anything).Return(nil, nil) + me.On("Sign", mock.Anything, mock.Anything).Return(unittest.SignatureFixture(), nil) me.On("SignFunc", mock.Anything, mock.Anything, mock.Anything). Return(nil, nil) @@ -133,7 +149,9 @@ func TestPrograms_TestContractUpdates(t *testing.T) { committer.NewNoopViewCommitter(), me, prov, - nil) + nil, + testutil.ProtocolStateWithSourceFixture(nil), + testMaxConcurrency) require.NoError(t, err) derivedChainData, err := derived.NewDerivedChainData(10) @@ -176,7 +194,7 @@ type blockProvider struct { func (b blockProvider) ByHeightFrom(height uint64, _ *flow.Header) (*flow.Header, error) { block, has := b.blocks[height] if has { - return block.Header, nil + return block.ToHeader(), nil } return nil, fmt.Errorf("block for height (%d) is not available", height) } @@ -201,10 +219,10 @@ func TestPrograms_TestBlockForks(t *testing.T) { chain := flow.Emulator.Chain() vm := fvm.NewVirtualMachine() execCtx := fvm.NewContext( - fvm.WithBlockHeader(block.Header), - fvm.WithBlocks(blockProvider{map[uint64]*flow.Block{0: &block}}), + fvm.WithEVMEnabled(true), + fvm.WithBlockHeader(block.ToHeader()), + fvm.WithBlocks(blockProvider{map[uint64]*flow.Block{0: block}}), fvm.WithChain(chain)) - privateKeys, err := testutil.GenerateAccountPrivateKeys(1) require.NoError(t, err) snapshotTree, accounts, err := testutil.CreateAccounts( @@ -219,7 +237,7 @@ func TestPrograms_TestBlockForks(t *testing.T) { me := new(module.Local) me.On("NodeID").Return(unittest.IdentifierFixture()) - me.On("Sign", mock.Anything, mock.Anything).Return(nil, nil) + me.On("Sign", mock.Anything, mock.Anything).Return(unittest.SignatureFixture(), nil) me.On("SignFunc", mock.Anything, mock.Anything, mock.Anything). Return(nil, nil) @@ -243,7 +261,9 @@ func TestPrograms_TestBlockForks(t *testing.T) { committer.NewNoopViewCommitter(), me, prov, - nil) + nil, + testutil.ProtocolStateWithSourceFixture(nil), + testMaxConcurrency) require.NoError(t, err) derivedChainData, err := derived.NewDerivedChainData(10) @@ -266,10 +286,12 @@ func TestPrograms_TestBlockForks(t *testing.T) { t.Run("executing block1 (no collection)", func(t *testing.T) { block1 = &flow.Block{ - Header: &flow.Header{ - View: 1, + HeaderBody: flow.HeaderBody{ + View: 1, + ChainID: flow.Emulator, + Timestamp: uint64(time.Now().UnixMilli()), }, - Payload: &flow.Payload{ + Payload: flow.Payload{ Guarantees: []*flow.CollectionGuarantee{}, }, } @@ -287,8 +309,10 @@ func TestPrograms_TestBlockForks(t *testing.T) { }) t.Run("executing block11 (deploys contract version 1)", func(t *testing.T) { - block11tx1 := testutil.DeployEventContractTransaction(account, chain, 1) - prepareTx(t, block11tx1, account, privKey, 0, chain) + block11tx1Builder := testutil.DeployEventContractTransaction(account, chain, 1) + prepareTx(t, block11tx1Builder, account, privKey, 0, chain) + block11tx1, err := block11tx1Builder.Build() + require.NoError(t, err) txs11 := []*flow.TransactionBody{block11tx1} col11 := flow.Collection{Transactions: txs11} @@ -308,12 +332,16 @@ func TestPrograms_TestBlockForks(t *testing.T) { t.Run("executing block111 (emit event (expected v1), update contract to v3)", func(t *testing.T) { block111ExpectedValue := 1 // emit event - block111tx1 := testutil.CreateEmitEventTransaction(account, account) - prepareTx(t, block111tx1, account, privKey, 1, chain) + block111tx1Builder := testutil.CreateEmitEventTransaction(account, account) + prepareTx(t, block111tx1Builder, account, privKey, 1, chain) + block111tx1, err := block111tx1Builder.Build() + require.NoError(t, err) // update contract version 3 - block111tx2 := testutil.UpdateEventContractTransaction(account, chain, 3) - prepareTx(t, block111tx2, account, privKey, 2, chain) + block111tx2Builder := testutil.UpdateEventContractTransaction(account, chain, 3) + prepareTx(t, block111tx2Builder, account, privKey, 2, chain) + block111tx2, err := block111tx2Builder.Build() + require.NoError(t, err) col111 := flow.Collection{Transactions: []*flow.TransactionBody{block111tx1, block111tx2}} block111, res, block111Snapshot = createTestBlockAndRun( @@ -336,8 +364,10 @@ func TestPrograms_TestBlockForks(t *testing.T) { t.Run("executing block1111 (emit event (expected v3))", func(t *testing.T) { block1111ExpectedValue := 3 - block1111tx1 := testutil.CreateEmitEventTransaction(account, account) - prepareTx(t, block1111tx1, account, privKey, 3, chain) + block1111tx1Builder := testutil.CreateEmitEventTransaction(account, account) + prepareTx(t, block1111tx1Builder, account, privKey, 3, chain) + block1111tx1, err := block1111tx1Builder.Build() + require.NoError(t, err) col1111 := flow.Collection{Transactions: []*flow.TransactionBody{block1111tx1}} block1111, res, _ = createTestBlockAndRun( @@ -358,12 +388,16 @@ func TestPrograms_TestBlockForks(t *testing.T) { t.Run("executing block112 (emit event (expected v1))", func(t *testing.T) { block112ExpectedValue := 1 - block112tx1 := testutil.CreateEmitEventTransaction(account, account) - prepareTx(t, block112tx1, account, privKey, 1, chain) + block112tx1Builder := testutil.CreateEmitEventTransaction(account, account) + prepareTx(t, block112tx1Builder, account, privKey, 1, chain) + block112tx1, err := block112tx1Builder.Build() + require.NoError(t, err) // update contract version 4 - block112tx2 := testutil.UpdateEventContractTransaction(account, chain, 4) - prepareTx(t, block112tx2, account, privKey, 2, chain) + block112tx2Builder := testutil.UpdateEventContractTransaction(account, chain, 4) + prepareTx(t, block112tx2Builder, account, privKey, 2, chain) + block112tx2, err := block112tx2Builder.Build() + require.NoError(t, err) col112 := flow.Collection{Transactions: []*flow.TransactionBody{block112tx1, block112tx2}} block112, res, block112Snapshot = createTestBlockAndRun( @@ -386,8 +420,10 @@ func TestPrograms_TestBlockForks(t *testing.T) { }) t.Run("executing block1121 (emit event (expected v4))", func(t *testing.T) { block1121ExpectedValue := 4 - block1121tx1 := testutil.CreateEmitEventTransaction(account, account) - prepareTx(t, block1121tx1, account, privKey, 3, chain) + block1121tx1Builder := testutil.CreateEmitEventTransaction(account, account) + prepareTx(t, block1121tx1Builder, account, privKey, 3, chain) + block1121tx1, err := block1121tx1Builder.Build() + require.NoError(t, err) col1121 := flow.Collection{Transactions: []*flow.TransactionBody{block1121tx1}} block1121, res, _ = createTestBlockAndRun( @@ -407,9 +443,10 @@ func TestPrograms_TestBlockForks(t *testing.T) { }) t.Run("executing block12 (deploys contract V2)", func(t *testing.T) { - - block12tx1 := testutil.DeployEventContractTransaction(account, chain, 2) - prepareTx(t, block12tx1, account, privKey, 0, chain) + block12tx1Builder := testutil.DeployEventContractTransaction(account, chain, 2) + prepareTx(t, block12tx1Builder, account, privKey, 0, chain) + block12tx1, err := block12tx1Builder.Build() + require.NoError(t, err) col12 := flow.Collection{Transactions: []*flow.TransactionBody{block12tx1}} block12, res, block12Snapshot = createTestBlockAndRun( @@ -428,8 +465,10 @@ func TestPrograms_TestBlockForks(t *testing.T) { }) t.Run("executing block121 (emit event (expected V2)", func(t *testing.T) { block121ExpectedValue := 2 - block121tx1 := testutil.CreateEmitEventTransaction(account, account) - prepareTx(t, block121tx1, account, privKey, 1, chain) + block121tx1Builder := testutil.CreateEmitEventTransaction(account, account) + prepareTx(t, block121tx1Builder, account, privKey, 1, chain) + block121tx1, err := block121tx1Builder.Build() + require.NoError(t, err) col121 := flow.Collection{Transactions: []*flow.TransactionBody{block121tx1}} block121, res, block121Snapshot = createTestBlockAndRun( @@ -449,8 +488,10 @@ func TestPrograms_TestBlockForks(t *testing.T) { }) t.Run("executing Block1211 (emit event (expected V2)", func(t *testing.T) { block1211ExpectedValue := 2 - block1211tx1 := testutil.CreateEmitEventTransaction(account, account) - prepareTx(t, block1211tx1, account, privKey, 2, chain) + block1211tx1Builder := testutil.CreateEmitEventTransaction(account, account) + prepareTx(t, block1211tx1Builder, account, privKey, 2, chain) + block1211tx1, err := block1211tx1Builder.Build() + require.NoError(t, err) col1211 := flow.Collection{Transactions: []*flow.TransactionBody{block1211tx1}} block1211, res, _ = createTestBlockAndRun( @@ -484,28 +525,24 @@ func createTestBlockAndRun( *execution.ComputationResult, snapshot.SnapshotTree, ) { - guarantee := flow.CollectionGuarantee{ + guarantee := &flow.CollectionGuarantee{ CollectionID: col.ID(), Signature: nil, } - block := &flow.Block{ - Header: &flow.Header{ - ParentID: parentBlock.ID(), - View: parentBlock.Header.Height + 1, - Timestamp: time.Now(), - }, - Payload: &flow.Payload{ - Guarantees: []*flow.CollectionGuarantee{&guarantee}, - }, - } + block := unittest.BlockFixture( + unittest.Block.WithParent(parentBlock.ID(), parentBlock.View, parentBlock.Height), + unittest.Block.WithPayload( + unittest.PayloadFixture(unittest.WithGuarantees(guarantee)), + ), + ) executableBlock := &entity.ExecutableBlock{ Block: block, CompleteCollections: map[flow.Identifier]*entity.CompleteCollection{ - guarantee.ID(): { - Guarantee: &guarantee, - Transactions: col.Transactions, + guarantee.CollectionID: { + Guarantee: guarantee, + Collection: &col, }, }, StartState: unittest.StateCommitmentPointerFixture(), @@ -528,22 +565,28 @@ func createTestBlockAndRun( return block, returnedComputationResult, snapshotTree } -func prepareTx(t *testing.T, - tx *flow.TransactionBody, +func prepareTx( + t *testing.T, + txBuilder *flow.TransactionBodyBuilder, account flow.Address, privKey flow.AccountPrivateKey, seqNumber uint64, chain flow.Chain) { - tx.SetProposalKey(account, 0, seqNumber). + + txBuilder.SetProposalKey(account, 0, seqNumber). SetPayer(chain.ServiceAddress()) - err := testutil.SignPayload(tx, account, privKey) + + err := testutil.SignPayload(txBuilder, account, privKey) require.NoError(t, err) - err = testutil.SignEnvelope(tx, chain.ServiceAddress(), unittest.ServiceAccountPrivateKey) + err = testutil.SignEnvelope(txBuilder, chain.ServiceAddress(), unittest.ServiceAccountPrivateKey) require.NoError(t, err) } func hasValidEventValue(t *testing.T, event flow.Event, value int) { - data, err := jsoncdc.Decode(nil, event.Payload) + data, err := ccf.Decode(nil, event.Payload) require.NoError(t, err) - assert.Equal(t, int16(value), data.(cadence.Event).Fields[0].ToGoValue()) + assert.Equal(t, + cadence.Int16(value), + cadence.SearchFieldByName(data.(cadence.Event), "value"), + ) } diff --git a/engine/execution/computation/query/executor.go b/engine/execution/computation/query/executor.go index 44f7ec69ab6..1027cebcbfa 100644 --- a/engine/execution/computation/query/executor.go +++ b/engine/execution/computation/query/executor.go @@ -4,11 +4,12 @@ import ( "context" "encoding/hex" "fmt" - "math/rand" "strings" "sync" "time" + "github.com/onflow/flow-go/fvm/errors" + jsoncdc "github.com/onflow/cadence/encoding/json" "github.com/rs/zerolog" @@ -17,7 +18,9 @@ import ( "github.com/onflow/flow-go/fvm/storage/snapshot" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/module" + "github.com/onflow/flow-go/state/protocol" "github.com/onflow/flow-go/utils/debug" + "github.com/onflow/flow-go/utils/rand" ) const ( @@ -35,6 +38,7 @@ type Executor interface { snapshot snapshot.StorageSnapshot, ) ( []byte, + uint64, error, ) @@ -47,11 +51,53 @@ type Executor interface { *flow.Account, error, ) + + GetAccountBalance( + ctx context.Context, + addr flow.Address, + header *flow.Header, + snapshot snapshot.StorageSnapshot, + ) ( + uint64, + error, + ) + + GetAccountAvailableBalance( + ctx context.Context, + addr flow.Address, + header *flow.Header, + snapshot snapshot.StorageSnapshot, + ) ( + uint64, + error, + ) + + GetAccountKeys( + ctx context.Context, + addr flow.Address, + header *flow.Header, + snapshot snapshot.StorageSnapshot, + ) ( + []flow.AccountPublicKey, + error, + ) + + GetAccountKey( + ctx context.Context, + addr flow.Address, + keyIndex uint32, + header *flow.Header, + snapshot snapshot.StorageSnapshot, + ) ( + *flow.AccountPublicKey, + error, + ) } type QueryConfig struct { LogTimeThreshold time.Duration ExecutionTimeLimit time.Duration + ComputationLimit uint64 MaxErrorMessageSize int } @@ -59,19 +105,20 @@ func NewDefaultConfig() QueryConfig { return QueryConfig{ LogTimeThreshold: DefaultLogTimeThreshold, ExecutionTimeLimit: DefaultExecutionTimeLimit, + ComputationLimit: fvm.DefaultComputationLimit, MaxErrorMessageSize: DefaultMaxErrorMessageSize, } } type QueryExecutor struct { - config QueryConfig - logger zerolog.Logger - metrics module.ExecutionMetrics - vm fvm.VM - vmCtx fvm.Context - derivedChainData *derived.DerivedChainData - rngLock *sync.Mutex - rng *rand.Rand + config QueryConfig + logger zerolog.Logger + metrics module.ExecutionMetrics + vm fvm.VM + vmCtx fvm.Context + derivedChainData *derived.DerivedChainData + rngLock *sync.Mutex + protocolStateSnapshot protocol.SnapshotExecutionSubsetProvider } var _ Executor = &QueryExecutor{} @@ -83,16 +130,20 @@ func NewQueryExecutor( vm fvm.VM, vmCtx fvm.Context, derivedChainData *derived.DerivedChainData, + protocolStateSnapshot protocol.SnapshotExecutionSubsetProvider, ) *QueryExecutor { + if config.ComputationLimit > 0 { + vmCtx = fvm.NewContextFromParent(vmCtx, fvm.WithComputationLimit(config.ComputationLimit)) + } return &QueryExecutor{ - config: config, - logger: logger, - metrics: metrics, - vm: vm, - vmCtx: vmCtx, - derivedChainData: derivedChainData, - rngLock: &sync.Mutex{}, - rng: rand.New(rand.NewSource(time.Now().UnixNano())), + config: config, + logger: logger, + metrics: metrics, + vm: vm, + vmCtx: vmCtx, + derivedChainData: derivedChainData, + rngLock: &sync.Mutex{}, + protocolStateSnapshot: protocolStateSnapshot, } } @@ -104,6 +155,7 @@ func (e *QueryExecutor) ExecuteScript( snapshot snapshot.StorageSnapshot, ) ( encodedValue []byte, + computationUsed uint64, err error, ) { @@ -115,8 +167,11 @@ func (e *QueryExecutor) ExecuteScript( // TODO: this is a temporary measure, we could remove this in the future if e.logger.Debug().Enabled() { e.rngLock.Lock() - trackerID := e.rng.Uint32() - e.rngLock.Unlock() + defer e.rngLock.Unlock() + trackerID, err := rand.Uint32() + if err != nil { + return nil, 0, fmt.Errorf("failed to generate trackerID: %w", err) + } trackedLogger := e.logger.With().Hex("script_hex", script).Uint32("trackerID", trackerID).Logger() trackedLogger.Debug().Msg("script is sent for execution") @@ -161,24 +216,26 @@ func (e *QueryExecutor) ExecuteScript( fvm.NewContextFromParent( e.vmCtx, fvm.WithBlockHeader(blockHeader), + fvm.WithProtocolStateSnapshot(e.protocolStateSnapshot.AtBlockID(blockHeader.ID())), fvm.WithDerivedBlockData( e.derivedChainData.NewDerivedBlockDataForScript(blockHeader.ID()))), fvm.NewScriptWithContextAndArgs(script, requestCtx, arguments...), snapshot) if err != nil { - return nil, fmt.Errorf("failed to execute script (internal error): %w", err) + return nil, 0, fmt.Errorf("failed to execute script (internal error): %w", err) } if output.Err != nil { - return nil, fmt.Errorf("failed to execute script at block (%s): %s", - blockHeader.ID(), - summarizeLog(output.Err.Error(), - e.config.MaxErrorMessageSize)) + return nil, 0, errors.NewCodedError( + output.Err.Code(), + "failed to execute script at block (%s): %s", blockHeader.ID(), + summarizeLog(output.Err.Error(), e.config.MaxErrorMessageSize), + ) } encodedValue, err = jsoncdc.Encode(output.Value) if err != nil { - return nil, fmt.Errorf("failed to encode runtime value: %w", err) + return nil, 0, fmt.Errorf("failed to encode runtime value: %w", err) } memAllocAfter := debug.GetHeapAllocsBytes() @@ -188,7 +245,7 @@ func (e *QueryExecutor) ExecuteScript( memAllocAfter-memAllocBefore, output.MemoryEstimate) - return encodedValue, nil + return encodedValue, output.ComputationUsed, nil } func summarizeLog(log string, limit int) string { @@ -204,7 +261,7 @@ func summarizeLog(log string, limit int) string { } func (e *QueryExecutor) GetAccount( - ctx context.Context, + _ context.Context, address flow.Address, blockHeader *flow.Header, snapshot snapshot.StorageSnapshot, @@ -219,7 +276,7 @@ func (e *QueryExecutor) GetAccount( fvm.WithDerivedBlockData( e.derivedChainData.NewDerivedBlockDataForScript(blockHeader.ID()))) - account, err := e.vm.GetAccount( + account, err := fvm.GetAccount( blockCtx, address, snapshot) @@ -233,3 +290,119 @@ func (e *QueryExecutor) GetAccount( return account, nil } + +func (e *QueryExecutor) GetAccountBalance( + _ context.Context, + address flow.Address, + blockHeader *flow.Header, + snapshot snapshot.StorageSnapshot, +) (uint64, error) { + + // TODO(ramtin): utilize ctx + blockCtx := fvm.NewContextFromParent( + e.vmCtx, + fvm.WithBlockHeader(blockHeader), + fvm.WithDerivedBlockData( + e.derivedChainData.NewDerivedBlockDataForScript(blockHeader.ID()))) + + accountBalance, err := fvm.GetAccountBalance( + blockCtx, + address, + snapshot) + + if err != nil { + return 0, fmt.Errorf( + "failed to get account balance (%s) at block (%s): %w", + address.String(), + blockHeader.ID(), + err) + } + + return accountBalance, nil +} + +func (e *QueryExecutor) GetAccountAvailableBalance( + _ context.Context, + address flow.Address, + blockHeader *flow.Header, + snapshot snapshot.StorageSnapshot, +) (uint64, error) { + + // TODO(ramtin): utilize ctx + blockCtx := fvm.NewContextFromParent( + e.vmCtx, + fvm.WithBlockHeader(blockHeader), + fvm.WithDerivedBlockData( + e.derivedChainData.NewDerivedBlockDataForScript(blockHeader.ID()))) + + accountAvailableBalance, err := fvm.GetAccountAvailableBalance( + blockCtx, + address, + snapshot) + + if err != nil { + return 0, fmt.Errorf( + "failed to get account available balance (%s) at block (%s): %w", + address.String(), + blockHeader.ID(), + err) + } + + return accountAvailableBalance, nil +} + +func (e *QueryExecutor) GetAccountKeys( + _ context.Context, + address flow.Address, + blockHeader *flow.Header, + snapshot snapshot.StorageSnapshot, +) ([]flow.AccountPublicKey, error) { + // TODO(ramtin): utilize ctx + blockCtx := fvm.NewContextFromParent( + e.vmCtx, + fvm.WithBlockHeader(blockHeader), + fvm.WithDerivedBlockData( + e.derivedChainData.NewDerivedBlockDataForScript(blockHeader.ID()))) + + accountKeys, err := fvm.GetAccountKeys(blockCtx, + address, + snapshot) + if err != nil { + return nil, fmt.Errorf( + "failed to get account keys (%s) at block (%s): %w", + address.String(), + blockHeader.ID(), + err) + } + + return accountKeys, nil +} + +func (e *QueryExecutor) GetAccountKey( + _ context.Context, + address flow.Address, + keyIndex uint32, + blockHeader *flow.Header, + snapshot snapshot.StorageSnapshot, +) (*flow.AccountPublicKey, error) { + // TODO(ramtin): utilize ctx + blockCtx := fvm.NewContextFromParent( + e.vmCtx, + fvm.WithBlockHeader(blockHeader), + fvm.WithDerivedBlockData( + e.derivedChainData.NewDerivedBlockDataForScript(blockHeader.ID()))) + + accountKey, err := fvm.GetAccountKey(blockCtx, + address, + keyIndex, + snapshot) + if err != nil { + return nil, fmt.Errorf( + "failed to get account key (%s) at block (%s): %w", + address.String(), + blockHeader.ID(), + err) + } + + return accountKey, nil +} diff --git a/engine/execution/computation/query/mock/executor.go b/engine/execution/computation/query/mock/executor.go new file mode 100644 index 00000000000..ec40569c661 --- /dev/null +++ b/engine/execution/computation/query/mock/executor.go @@ -0,0 +1,214 @@ +// Code generated by mockery. DO NOT EDIT. + +package mock + +import ( + context "context" + + flow "github.com/onflow/flow-go/model/flow" + mock "github.com/stretchr/testify/mock" + + snapshot "github.com/onflow/flow-go/fvm/storage/snapshot" +) + +// Executor is an autogenerated mock type for the Executor type +type Executor struct { + mock.Mock +} + +// ExecuteScript provides a mock function with given fields: ctx, script, arguments, blockHeader, _a4 +func (_m *Executor) ExecuteScript(ctx context.Context, script []byte, arguments [][]byte, blockHeader *flow.Header, _a4 snapshot.StorageSnapshot) ([]byte, uint64, error) { + ret := _m.Called(ctx, script, arguments, blockHeader, _a4) + + if len(ret) == 0 { + panic("no return value specified for ExecuteScript") + } + + var r0 []byte + var r1 uint64 + var r2 error + if rf, ok := ret.Get(0).(func(context.Context, []byte, [][]byte, *flow.Header, snapshot.StorageSnapshot) ([]byte, uint64, error)); ok { + return rf(ctx, script, arguments, blockHeader, _a4) + } + if rf, ok := ret.Get(0).(func(context.Context, []byte, [][]byte, *flow.Header, snapshot.StorageSnapshot) []byte); ok { + r0 = rf(ctx, script, arguments, blockHeader, _a4) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]byte) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, []byte, [][]byte, *flow.Header, snapshot.StorageSnapshot) uint64); ok { + r1 = rf(ctx, script, arguments, blockHeader, _a4) + } else { + r1 = ret.Get(1).(uint64) + } + + if rf, ok := ret.Get(2).(func(context.Context, []byte, [][]byte, *flow.Header, snapshot.StorageSnapshot) error); ok { + r2 = rf(ctx, script, arguments, blockHeader, _a4) + } else { + r2 = ret.Error(2) + } + + return r0, r1, r2 +} + +// GetAccount provides a mock function with given fields: ctx, addr, header, _a3 +func (_m *Executor) GetAccount(ctx context.Context, addr flow.Address, header *flow.Header, _a3 snapshot.StorageSnapshot) (*flow.Account, error) { + ret := _m.Called(ctx, addr, header, _a3) + + if len(ret) == 0 { + panic("no return value specified for GetAccount") + } + + var r0 *flow.Account + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, flow.Address, *flow.Header, snapshot.StorageSnapshot) (*flow.Account, error)); ok { + return rf(ctx, addr, header, _a3) + } + if rf, ok := ret.Get(0).(func(context.Context, flow.Address, *flow.Header, snapshot.StorageSnapshot) *flow.Account); ok { + r0 = rf(ctx, addr, header, _a3) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*flow.Account) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, flow.Address, *flow.Header, snapshot.StorageSnapshot) error); ok { + r1 = rf(ctx, addr, header, _a3) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetAccountAvailableBalance provides a mock function with given fields: ctx, addr, header, _a3 +func (_m *Executor) GetAccountAvailableBalance(ctx context.Context, addr flow.Address, header *flow.Header, _a3 snapshot.StorageSnapshot) (uint64, error) { + ret := _m.Called(ctx, addr, header, _a3) + + if len(ret) == 0 { + panic("no return value specified for GetAccountAvailableBalance") + } + + var r0 uint64 + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, flow.Address, *flow.Header, snapshot.StorageSnapshot) (uint64, error)); ok { + return rf(ctx, addr, header, _a3) + } + if rf, ok := ret.Get(0).(func(context.Context, flow.Address, *flow.Header, snapshot.StorageSnapshot) uint64); ok { + r0 = rf(ctx, addr, header, _a3) + } else { + r0 = ret.Get(0).(uint64) + } + + if rf, ok := ret.Get(1).(func(context.Context, flow.Address, *flow.Header, snapshot.StorageSnapshot) error); ok { + r1 = rf(ctx, addr, header, _a3) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetAccountBalance provides a mock function with given fields: ctx, addr, header, _a3 +func (_m *Executor) GetAccountBalance(ctx context.Context, addr flow.Address, header *flow.Header, _a3 snapshot.StorageSnapshot) (uint64, error) { + ret := _m.Called(ctx, addr, header, _a3) + + if len(ret) == 0 { + panic("no return value specified for GetAccountBalance") + } + + var r0 uint64 + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, flow.Address, *flow.Header, snapshot.StorageSnapshot) (uint64, error)); ok { + return rf(ctx, addr, header, _a3) + } + if rf, ok := ret.Get(0).(func(context.Context, flow.Address, *flow.Header, snapshot.StorageSnapshot) uint64); ok { + r0 = rf(ctx, addr, header, _a3) + } else { + r0 = ret.Get(0).(uint64) + } + + if rf, ok := ret.Get(1).(func(context.Context, flow.Address, *flow.Header, snapshot.StorageSnapshot) error); ok { + r1 = rf(ctx, addr, header, _a3) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetAccountKey provides a mock function with given fields: ctx, addr, keyIndex, header, _a4 +func (_m *Executor) GetAccountKey(ctx context.Context, addr flow.Address, keyIndex uint32, header *flow.Header, _a4 snapshot.StorageSnapshot) (*flow.AccountPublicKey, error) { + ret := _m.Called(ctx, addr, keyIndex, header, _a4) + + if len(ret) == 0 { + panic("no return value specified for GetAccountKey") + } + + var r0 *flow.AccountPublicKey + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, flow.Address, uint32, *flow.Header, snapshot.StorageSnapshot) (*flow.AccountPublicKey, error)); ok { + return rf(ctx, addr, keyIndex, header, _a4) + } + if rf, ok := ret.Get(0).(func(context.Context, flow.Address, uint32, *flow.Header, snapshot.StorageSnapshot) *flow.AccountPublicKey); ok { + r0 = rf(ctx, addr, keyIndex, header, _a4) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*flow.AccountPublicKey) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, flow.Address, uint32, *flow.Header, snapshot.StorageSnapshot) error); ok { + r1 = rf(ctx, addr, keyIndex, header, _a4) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetAccountKeys provides a mock function with given fields: ctx, addr, header, _a3 +func (_m *Executor) GetAccountKeys(ctx context.Context, addr flow.Address, header *flow.Header, _a3 snapshot.StorageSnapshot) ([]flow.AccountPublicKey, error) { + ret := _m.Called(ctx, addr, header, _a3) + + if len(ret) == 0 { + panic("no return value specified for GetAccountKeys") + } + + var r0 []flow.AccountPublicKey + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, flow.Address, *flow.Header, snapshot.StorageSnapshot) ([]flow.AccountPublicKey, error)); ok { + return rf(ctx, addr, header, _a3) + } + if rf, ok := ret.Get(0).(func(context.Context, flow.Address, *flow.Header, snapshot.StorageSnapshot) []flow.AccountPublicKey); ok { + r0 = rf(ctx, addr, header, _a3) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]flow.AccountPublicKey) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, flow.Address, *flow.Header, snapshot.StorageSnapshot) error); ok { + r1 = rf(ctx, addr, header, _a3) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// NewExecutor creates a new instance of Executor. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewExecutor(t interface { + mock.TestingT + Cleanup(func()) +}) *Executor { + mock := &Executor{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/engine/execution/computation/snapshot_provider.go b/engine/execution/computation/snapshot_provider.go new file mode 100644 index 00000000000..4819ca4b16d --- /dev/null +++ b/engine/execution/computation/snapshot_provider.go @@ -0,0 +1,29 @@ +package computation + +import ( + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/state/protocol" +) + +// SnapshotExecutionSubset is a subset of the protocol state snapshot that is needed by the FVM +var _ protocol.SnapshotExecutionSubset = (protocol.Snapshot)(nil) + +// protocolStateWrapper just wraps the protocol.State and returns a SnapshotExecutionSubset +// from the AtBlockID method instead of the protocol.Snapshot interface. +type protocolStateWrapper struct { + protocol.State +} + +// protocolStateWrapper implements `EntropyProviderPerBlock` +var _ protocol.SnapshotExecutionSubsetProvider = (*protocolStateWrapper)(nil) + +func (p protocolStateWrapper) AtBlockID(blockID flow.Identifier) protocol.SnapshotExecutionSubset { + return p.State.AtBlockID(blockID) +} + +// NewProtocolStateWrapper wraps the protocol.State input so that the AtBlockID method returns a +// SnapshotExecutionSubset instead of the protocol.Snapshot interface. +// This is used in the FVM for execution. +func NewProtocolStateWrapper(s protocol.State) protocol.SnapshotExecutionSubsetProvider { + return protocolStateWrapper{s} +} diff --git a/engine/execution/engines.go b/engine/execution/engines.go new file mode 100644 index 00000000000..9e7fa57a9f9 --- /dev/null +++ b/engine/execution/engines.go @@ -0,0 +1,21 @@ +package execution + +import ( + "context" + + "github.com/onflow/flow-go/model/flow" +) + +// ScriptExecutor represents the RPC calls that the execution script engine exposes to support the Access Node API calls +type ScriptExecutor interface { + + // ExecuteScriptAtBlockID executes a script at the given Block id + // it returns the value, the computation used and the error (if any) + ExecuteScriptAtBlockID(ctx context.Context, script []byte, arguments [][]byte, blockID flow.Identifier) ([]byte, uint64, error) + + // GetAccount returns the Account details at the given Block id + GetAccount(ctx context.Context, address flow.Address, blockID flow.Identifier) (*flow.Account, error) + + // GetRegisterAtBlockID returns the value of a register at the given Block id (if available) + GetRegisterAtBlockID(ctx context.Context, owner, key []byte, blockID flow.Identifier) ([]byte, error) +} diff --git a/engine/execution/execution_test.go b/engine/execution/execution_test.go index c823505ebaa..10b3e4d97ca 100644 --- a/engine/execution/execution_test.go +++ b/engine/execution/execution_test.go @@ -10,24 +10,23 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" - "github.com/vmihailenco/msgpack" + "github.com/vmihailenco/msgpack/v4" "go.uber.org/atomic" execTestutil "github.com/onflow/flow-go/engine/execution/testutil" "github.com/onflow/flow-go/engine/testutil" testmock "github.com/onflow/flow-go/engine/testutil/mock" "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/model/flow/order" "github.com/onflow/flow-go/model/messages" "github.com/onflow/flow-go/module/signature" "github.com/onflow/flow-go/network/channels" - "github.com/onflow/flow-go/network/mocknetwork" + mocknetwork "github.com/onflow/flow-go/network/mock" "github.com/onflow/flow-go/network/stub" "github.com/onflow/flow-go/state/cluster" "github.com/onflow/flow-go/utils/unittest" ) -func sendBlock(exeNode *testmock.ExecutionNode, from flow.Identifier, proposal *messages.BlockProposal) error { +func sendBlock(exeNode *testmock.ExecutionNode, from flow.Identifier, proposal *flow.Proposal) error { return exeNode.FollowerEngine.Process(channels.ReceiveBlocks, from, proposal) } @@ -43,35 +42,36 @@ func TestExecutionFlow(t *testing.T) { chainID := flow.Testnet - colID := unittest.IdentityFixture( + colID := unittest.PrivateNodeInfoFixture( unittest.WithRole(flow.RoleCollection), unittest.WithKeys, ) - conID := unittest.IdentityFixture( + conID := unittest.PrivateNodeInfoFixture( unittest.WithRole(flow.RoleConsensus), unittest.WithKeys, ) - exeID := unittest.IdentityFixture( + exeID := unittest.PrivateNodeInfoFixture( unittest.WithRole(flow.RoleExecution), unittest.WithKeys, ) - verID := unittest.IdentityFixture( + verID := unittest.PrivateNodeInfoFixture( unittest.WithRole(flow.RoleVerification), unittest.WithKeys, ) - identities := unittest.CompleteIdentitySet(colID, conID, exeID, verID).Sort(order.Canonical) + identities := unittest.CompleteIdentitySet(colID.Identity(), conID.Identity(), exeID.Identity(), verID.Identity()). + Sort(flow.Canonical[flow.Identity]) // create execution node exeNode := testutil.ExecutionNode(t, hub, exeID, identities, 21, chainID) ctx, cancel := context.WithCancel(context.Background()) unittest.RequireReturnsBefore(t, func() { - exeNode.Ready(ctx) + exeNode.Ready(t, ctx) }, 1*time.Second, "could not start execution node on time") defer exeNode.Done(cancel) - genesis, err := exeNode.State.AtHeight(0).Head() + genesis, err := exeNode.Blocks.ByHeight(0) require.NoError(t, err) tx1 := flow.TransactionBody{ @@ -98,40 +98,47 @@ func TestExecutionFlow(t *testing.T) { col2.ID(): &col2, } - clusterChainID := cluster.CanonicalClusterID(1, flow.IdentityList{colID}.NodeIDs()) + clusterChainID := cluster.CanonicalClusterID(1, flow.IdentityList{colID.Identity()}.NodeIDs()) // signed by the only collector - block := unittest.BlockWithParentAndProposerFixture(t, genesis, conID.NodeID) - voterIndices, err := signature.EncodeSignersToIndices( - []flow.Identifier{conID.NodeID}, []flow.Identifier{conID.NodeID}) - require.NoError(t, err) - block.Header.ParentVoterIndices = voterIndices + block := unittest.BlockWithParentAndProposerFixture(t, genesis.ToHeader(), conID.NodeID) // sets field `ParentVoterIndices` such that `conID.NodeID` is the sole signer signerIndices, err := signature.EncodeSignersToIndices( []flow.Identifier{colID.NodeID}, []flow.Identifier{colID.NodeID}) require.NoError(t, err) - block.SetPayload(flow.Payload{ - Guarantees: []*flow.CollectionGuarantee{ - { - CollectionID: col1.ID(), - SignerIndices: signerIndices, - ChainID: clusterChainID, - ReferenceBlockID: genesis.ID(), - }, - { - CollectionID: col2.ID(), - SignerIndices: signerIndices, - ChainID: clusterChainID, - ReferenceBlockID: genesis.ID(), + block, err = flow.NewBlock( + flow.UntrustedBlock{ + HeaderBody: block.HeaderBody, + Payload: flow.Payload{ + Guarantees: []*flow.CollectionGuarantee{ + { + CollectionID: col1.ID(), + SignerIndices: signerIndices, + ClusterChainID: clusterChainID, + ReferenceBlockID: genesis.ID(), + }, + { + CollectionID: col2.ID(), + SignerIndices: signerIndices, + ClusterChainID: clusterChainID, + ReferenceBlockID: genesis.ID(), + }, + }, + ProtocolStateID: genesis.Payload.ProtocolStateID, }, }, - }) + ) + require.NoError(t, err) - child := unittest.BlockWithParentAndProposerFixture(t, block.Header, conID.NodeID) - // the default signer indices is 2 bytes, but in this test cases - // we need 1 byte - child.Header.ParentVoterIndices = voterIndices + child := unittest.BlockWithParentAndProposerFixture(t, block.ToHeader(), conID.NodeID) // sets field `ParentVoterIndices` such that `conID.NodeID` is the sole signer + child, err = flow.NewBlock( + flow.UntrustedBlock{ + HeaderBody: child.HeaderBody, + Payload: unittest.PayloadFixture(unittest.WithProtocolStateID(block.Payload.ProtocolStateID)), + }, + ) + require.NoError(t, err) - log.Info().Msgf("child block ID: %v, indices: %x", child.Header.ID(), child.Header.ParentVoterIndices) + log.Info().Msgf("child block ID: %v, indices: %x", child.ID(), child.ParentVoterIndices) collectionNode := testutil.GenericNodeFromParticipants(t, hub, colID, identities, chainID) defer collectionNode.Done() @@ -147,7 +154,7 @@ func TestExecutionFlow(t *testing.T) { providerEngine.On("Process", mock.AnythingOfType("channels.Channel"), exeID.NodeID, mock.Anything). Run(func(args mock.Arguments) { originID := args.Get(1).(flow.Identifier) - req := args.Get(2).(*messages.EntityRequest) + req := args.Get(2).(*flow.EntityRequest) var entities []flow.Entity for _, entityID := range req.EntityIDs { @@ -214,12 +221,12 @@ func TestExecutionFlow(t *testing.T) { Once() // submit block from consensus node - err = sendBlock(&exeNode, conID.NodeID, unittest.ProposalFromBlock(&block)) + err = sendBlock(&exeNode, conID.NodeID, unittest.ProposalFromBlock(block)) require.NoError(t, err) // submit the child block from consensus node, which trigger the parent block // to be passed to BlockProcessable - err = sendBlock(&exeNode, conID.NodeID, unittest.ProposalFromBlock(&child)) + err = sendBlock(&exeNode, conID.NodeID, unittest.ProposalFromBlock(child)) require.NoError(t, err) require.Eventually(t, func() bool { @@ -233,7 +240,13 @@ func TestExecutionFlow(t *testing.T) { }, time.Second*10, time.Millisecond*500) // check that the block has been executed. - exeNode.AssertHighestExecutedBlock(t, block.Header) + exeNode.AssertBlockIsExecuted(t, block.ToHeader()) + + if exeNode.StorehouseEnabled { + exeNode.AssertHighestExecutedBlock(t, genesis.ToHeader()) + } else { + exeNode.AssertHighestExecutedBlock(t, block.ToHeader()) + } myReceipt, err := exeNode.MyExecutionReceipts.MyReceipt(block.ID()) require.NoError(t, err) @@ -245,15 +258,25 @@ func TestExecutionFlow(t *testing.T) { consensusEngine.AssertExpectations(t) } -func deployContractBlock(t *testing.T, conID *flow.Identity, colID *flow.Identity, chain flow.Chain, seq uint64, parent *flow.Header, ref *flow.Header) ( - *flow.TransactionBody, *flow.Collection, flow.Block, *messages.BlockProposal, uint64) { +func deployContractBlock( + t *testing.T, + conID *flow.Identity, + colID *flow.Identity, + chain flow.Chain, + seq uint64, + parent *flow.Block, + ref *flow.Header, +) ( + *flow.TransactionBody, *flow.Collection, *flow.Block, *flow.Proposal, uint64) { // make tx - tx := execTestutil.DeployCounterContractTransaction(chain.ServiceAddress(), chain) - err := execTestutil.SignTransactionAsServiceAccount(tx, seq, chain) + txBodyBuilder := execTestutil.DeployCounterContractTransaction(chain.ServiceAddress(), chain) + err := execTestutil.SignTransactionAsServiceAccount(txBodyBuilder, seq, chain) require.NoError(t, err) // make collection - col := &flow.Collection{Transactions: []*flow.TransactionBody{tx}} + txBody, err := txBodyBuilder.Build() + require.NoError(t, err) + col := &flow.Collection{Transactions: []*flow.TransactionBody{txBody}} signerIndices, err := signature.EncodeSignersToIndices( []flow.Identifier{colID.NodeID}, []flow.Identifier{colID.NodeID}) @@ -262,65 +285,72 @@ func deployContractBlock(t *testing.T, conID *flow.Identity, colID *flow.Identit clusterChainID := cluster.CanonicalClusterID(1, flow.IdentityList{colID}.NodeIDs()) // make block - block := unittest.BlockWithParentAndProposerFixture(t, parent, conID.NodeID) - voterIndices, err := signature.EncodeSignersToIndices( - []flow.Identifier{conID.NodeID}, []flow.Identifier{conID.NodeID}) - require.NoError(t, err) - block.Header.ParentVoterIndices = voterIndices - block.SetPayload(flow.Payload{ - Guarantees: []*flow.CollectionGuarantee{ - { - CollectionID: col.ID(), - SignerIndices: signerIndices, - ChainID: clusterChainID, - ReferenceBlockID: ref.ID(), + block := unittest.BlockWithParentAndProposerFixture(t, parent.ToHeader(), conID.NodeID) // sets field `ParentVoterIndices` such that `conID.NodeID` is the sole signer + block, err = flow.NewBlock( + flow.UntrustedBlock{ + HeaderBody: block.HeaderBody, + Payload: flow.Payload{ + Guarantees: []*flow.CollectionGuarantee{ + { + CollectionID: col.ID(), + SignerIndices: signerIndices, + ClusterChainID: clusterChainID, + ReferenceBlockID: ref.ID(), + }, + }, + ProtocolStateID: parent.Payload.ProtocolStateID, }, }, - }) + ) + require.NoError(t, err) // make proposal - proposal := unittest.ProposalFromBlock(&block) - - return tx, col, block, proposal, seq + 1 + proposal := unittest.ProposalFromBlock(block) + return txBody, col, block, proposal, seq + 1 } -func makePanicBlock(t *testing.T, conID *flow.Identity, colID *flow.Identity, chain flow.Chain, seq uint64, parent *flow.Header, ref *flow.Header) ( - *flow.TransactionBody, *flow.Collection, flow.Block, *messages.BlockProposal, uint64) { +func makePanicBlock(t *testing.T, conID *flow.Identity, colID *flow.Identity, chain flow.Chain, seq uint64, parent *flow.Block, ref *flow.Header) ( + *flow.TransactionBody, *flow.Collection, *flow.Block, *flow.Proposal, uint64) { // make tx - tx := execTestutil.CreateCounterPanicTransaction(chain.ServiceAddress(), chain.ServiceAddress()) - err := execTestutil.SignTransactionAsServiceAccount(tx, seq, chain) + txBodyBuilder := execTestutil.CreateCounterPanicTransaction(chain.ServiceAddress(), chain.ServiceAddress()) + err := execTestutil.SignTransactionAsServiceAccount(txBodyBuilder, seq, chain) require.NoError(t, err) // make collection - col := &flow.Collection{Transactions: []*flow.TransactionBody{tx}} + txBody, err := txBodyBuilder.Build() + require.NoError(t, err) + col := &flow.Collection{Transactions: []*flow.TransactionBody{txBody}} clusterChainID := cluster.CanonicalClusterID(1, flow.IdentityList{colID}.NodeIDs()) // make block - block := unittest.BlockWithParentAndProposerFixture(t, parent, conID.NodeID) - voterIndices, err := signature.EncodeSignersToIndices( - []flow.Identifier{conID.NodeID}, []flow.Identifier{conID.NodeID}) - require.NoError(t, err) - block.Header.ParentVoterIndices = voterIndices + block := unittest.BlockWithParentAndProposerFixture(t, parent.ToHeader(), conID.NodeID) // sets field `ParentVoterIndices` such that `conID.NodeID` is the sole signer signerIndices, err := signature.EncodeSignersToIndices( []flow.Identifier{colID.NodeID}, []flow.Identifier{colID.NodeID}) require.NoError(t, err) - block.SetPayload(flow.Payload{ - Guarantees: []*flow.CollectionGuarantee{ - {CollectionID: col.ID(), SignerIndices: signerIndices, ChainID: clusterChainID, ReferenceBlockID: ref.ID()}, + block, err = flow.NewBlock( + flow.UntrustedBlock{ + HeaderBody: block.HeaderBody, + Payload: flow.Payload{ + Guarantees: []*flow.CollectionGuarantee{ + {CollectionID: col.ID(), SignerIndices: signerIndices, ClusterChainID: clusterChainID, ReferenceBlockID: ref.ID()}, + }, + ProtocolStateID: parent.Payload.ProtocolStateID, + }, }, - }) + ) + require.NoError(t, err) - proposal := unittest.ProposalFromBlock(&block) + proposal := unittest.ProposalFromBlock(block) - return tx, col, block, proposal, seq + 1 + return txBody, col, block, proposal, seq + 1 } -func makeSuccessBlock(t *testing.T, conID *flow.Identity, colID *flow.Identity, chain flow.Chain, seq uint64, parent *flow.Header, ref *flow.Header) ( - *flow.TransactionBody, *flow.Collection, flow.Block, *messages.BlockProposal, uint64) { - tx := execTestutil.AddToCounterTransaction(chain.ServiceAddress(), chain.ServiceAddress()) - err := execTestutil.SignTransactionAsServiceAccount(tx, seq, chain) +func makeSuccessBlock(t *testing.T, conID *flow.Identity, colID *flow.Identity, chain flow.Chain, seq uint64, parent *flow.Block, ref *flow.Header) ( + *flow.TransactionBody, *flow.Collection, *flow.Block, *flow.Proposal, uint64) { + txBodyBuilder := execTestutil.AddToCounterTransaction(chain.ServiceAddress(), chain.ServiceAddress()) + err := execTestutil.SignTransactionAsServiceAccount(txBodyBuilder, seq, chain) require.NoError(t, err) signerIndices, err := signature.EncodeSignersToIndices( @@ -328,21 +358,26 @@ func makeSuccessBlock(t *testing.T, conID *flow.Identity, colID *flow.Identity, require.NoError(t, err) clusterChainID := cluster.CanonicalClusterID(1, flow.IdentityList{colID}.NodeIDs()) - col := &flow.Collection{Transactions: []*flow.TransactionBody{tx}} - block := unittest.BlockWithParentAndProposerFixture(t, parent, conID.NodeID) - voterIndices, err := signature.EncodeSignersToIndices( - []flow.Identifier{conID.NodeID}, []flow.Identifier{conID.NodeID}) + txBody, err := txBodyBuilder.Build() require.NoError(t, err) - block.Header.ParentVoterIndices = voterIndices - block.SetPayload(flow.Payload{ - Guarantees: []*flow.CollectionGuarantee{ - {CollectionID: col.ID(), SignerIndices: signerIndices, ChainID: clusterChainID, ReferenceBlockID: ref.ID()}, + col := &flow.Collection{Transactions: []*flow.TransactionBody{txBody}} + block := unittest.BlockWithParentAndProposerFixture(t, parent.ToHeader(), conID.NodeID) // sets field `ParentVoterIndices` such that `conID.NodeID` is the sole signer + block, err = flow.NewBlock( + flow.UntrustedBlock{ + HeaderBody: block.HeaderBody, + Payload: flow.Payload{ + Guarantees: []*flow.CollectionGuarantee{ + {CollectionID: col.ID(), SignerIndices: signerIndices, ClusterChainID: clusterChainID, ReferenceBlockID: ref.ID()}, + }, + ProtocolStateID: parent.Payload.ProtocolStateID, + }, }, - }) + ) + require.NoError(t, err) - proposal := unittest.ProposalFromBlock(&block) + proposal := unittest.ProposalFromBlock(block) - return tx, col, block, proposal, seq + 1 + return txBody, col, block, proposal, seq + 1 } // Test a successful tx should change the statecommitment, @@ -352,36 +387,40 @@ func TestFailedTxWillNotChangeStateCommitment(t *testing.T) { chainID := flow.Emulator - colID := unittest.IdentityFixture( + colNodeInfo := unittest.PrivateNodeInfoFixture( unittest.WithRole(flow.RoleCollection), unittest.WithKeys, ) - conID := unittest.IdentityFixture( + conNodeInfo := unittest.PrivateNodeInfoFixture( unittest.WithRole(flow.RoleConsensus), unittest.WithKeys, ) - exe1ID := unittest.IdentityFixture( + exe1NodeInfo := unittest.PrivateNodeInfoFixture( unittest.WithRole(flow.RoleExecution), unittest.WithKeys, ) + colID := colNodeInfo.Identity() + conID := conNodeInfo.Identity() + exe1ID := exe1NodeInfo.Identity() + identities := unittest.CompleteIdentitySet(colID, conID, exe1ID) key := unittest.NetworkingPrivKeyFixture() identities[3].NetworkPubKey = key.PublicKey() - collectionNode := testutil.GenericNodeFromParticipants(t, hub, colID, identities, chainID) + collectionNode := testutil.GenericNodeFromParticipants(t, hub, colNodeInfo, identities, chainID) defer collectionNode.Done() - consensusNode := testutil.GenericNodeFromParticipants(t, hub, conID, identities, chainID) + consensusNode := testutil.GenericNodeFromParticipants(t, hub, conNodeInfo, identities, chainID) defer consensusNode.Done() - exe1Node := testutil.ExecutionNode(t, hub, exe1ID, identities, 27, chainID) + exe1Node := testutil.ExecutionNode(t, hub, exe1NodeInfo, identities, 27, chainID) ctx, cancel := context.WithCancel(context.Background()) unittest.RequireReturnsBefore(t, func() { - exe1Node.Ready(ctx) + exe1Node.Ready(t, ctx) }, 1*time.Second, "could not start execution node on time") defer exe1Node.Done(cancel) - genesis, err := exe1Node.State.AtHeight(0).Head() + genesis, err := exe1Node.Blocks.ByHeight(0) require.NoError(t, err) seq := uint64(0) @@ -390,14 +429,14 @@ func TestFailedTxWillNotChangeStateCommitment(t *testing.T) { // transaction that will change state and succeed, used to test that state commitment changes // genesis <- block1 [tx1] <- block2 [tx2] <- block3 [tx3] <- child - _, col1, block1, proposal1, seq := deployContractBlock(t, conID, colID, chain, seq, genesis, genesis) + _, col1, block1, proposal1, seq := deployContractBlock(t, conID, colID, chain, seq, genesis, genesis.ToHeader()) // we don't set the proper sequence number of this one - _, col2, block2, proposal2, _ := makePanicBlock(t, conID, colID, chain, uint64(0), block1.Header, genesis) + _, col2, block2, proposal2, _ := makePanicBlock(t, conID, colID, chain, uint64(0), block1, genesis.ToHeader()) - _, col3, block3, proposal3, seq := makeSuccessBlock(t, conID, colID, chain, seq, block2.Header, genesis) + _, col3, block3, proposal3, seq := makeSuccessBlock(t, conID, colID, chain, seq, block2, genesis.ToHeader()) - _, _, _, proposal4, _ := makeSuccessBlock(t, conID, colID, chain, seq, block3.Header, genesis) + _, _, _, proposal4, _ := makeSuccessBlock(t, conID, colID, chain, seq, block3, genesis.ToHeader()) // seq++ // setup mocks and assertions @@ -435,12 +474,20 @@ func TestFailedTxWillNotChangeStateCommitment(t *testing.T) { hub.DeliverAllEventually(t, func() bool { return receiptsReceived.Load() == 1 }) - exe1Node.AssertHighestExecutedBlock(t, block1.Header) - scExe1Genesis, err := exe1Node.ExecutionState.StateCommitmentByBlockID(context.Background(), genesis.ID()) + if exe1Node.StorehouseEnabled { + exe1Node.AssertHighestExecutedBlock(t, genesis.ToHeader()) + } else { + exe1Node.AssertHighestExecutedBlock(t, block1.ToHeader()) + } + + exe1Node.AssertBlockIsExecuted(t, block1.ToHeader()) + exe1Node.AssertBlockNotExecuted(t, block2.ToHeader()) + + scExe1Genesis, err := exe1Node.ExecutionState.StateCommitmentByBlockID(genesis.ID()) assert.NoError(t, err) - scExe1Block1, err := exe1Node.ExecutionState.StateCommitmentByBlockID(context.Background(), block1.ID()) + scExe1Block1, err := exe1Node.ExecutionState.StateCommitmentByBlockID(block1.ID()) assert.NoError(t, err) assert.NotEqual(t, scExe1Genesis, scExe1Block1) @@ -457,14 +504,14 @@ func TestFailedTxWillNotChangeStateCommitment(t *testing.T) { }) // ensure state has been synced across both nodes - exe1Node.AssertHighestExecutedBlock(t, block3.Header) - // exe2Node.AssertHighestExecutedBlock(t, block3.Header) + exe1Node.AssertBlockIsExecuted(t, block2.ToHeader()) + exe1Node.AssertBlockIsExecuted(t, block3.ToHeader()) // verify state commitment of block 2 is the same as block 1, since tx failed on seq number verification - scExe1Block2, err := exe1Node.ExecutionState.StateCommitmentByBlockID(context.Background(), block2.ID()) + scExe1Block2, err := exe1Node.ExecutionState.StateCommitmentByBlockID(block2.ID()) assert.NoError(t, err) // TODO this is no longer valid because the system chunk can change the state - //assert.Equal(t, scExe1Block1, scExe1Block2) + // assert.Equal(t, scExe1Block1, scExe1Block2) _ = scExe1Block2 collectionEngine.AssertExpectations(t) @@ -484,7 +531,7 @@ func mockCollectionEngineToReturnCollections(t *testing.T, collectionNode *testm collectionEngine.On("Process", mock.AnythingOfType("channels.Channel"), mock.Anything, mock.Anything). Run(func(args mock.Arguments) { originID := args[1].(flow.Identifier) - req := args[2].(*messages.EntityRequest) + req := args[2].(*flow.EntityRequest) blob, ok := colMap[req.EntityIDs[0]] if !ok { assert.FailNow(t, "requesting unexpected collection", req.EntityIDs[0]) @@ -504,34 +551,39 @@ func TestBroadcastToMultipleVerificationNodes(t *testing.T) { chainID := flow.Emulator - colID := unittest.IdentityFixture( + colID := unittest.PrivateNodeInfoFixture( unittest.WithRole(flow.RoleCollection), unittest.WithKeys, ) - conID := unittest.IdentityFixture( + conID := unittest.PrivateNodeInfoFixture( unittest.WithRole(flow.RoleConsensus), unittest.WithKeys, ) - exeID := unittest.IdentityFixture( + exeID := unittest.PrivateNodeInfoFixture( unittest.WithRole(flow.RoleExecution), unittest.WithKeys, ) - ver1ID := unittest.IdentityFixture( + ver1ID := unittest.PrivateNodeInfoFixture( unittest.WithRole(flow.RoleVerification), unittest.WithKeys, ) - ver2ID := unittest.IdentityFixture( + ver2ID := unittest.PrivateNodeInfoFixture( unittest.WithRole(flow.RoleVerification), unittest.WithKeys, ) - identities := unittest.CompleteIdentitySet(colID, conID, exeID, ver1ID, ver2ID) + identities := unittest.CompleteIdentitySet(colID.Identity(), + conID.Identity(), + exeID.Identity(), + ver1ID.Identity(), + ver2ID.Identity(), + ) exeNode := testutil.ExecutionNode(t, hub, exeID, identities, 21, chainID) ctx, cancel := context.WithCancel(context.Background()) unittest.RequireReturnsBefore(t, func() { - exeNode.Ready(ctx) + exeNode.Ready(t, ctx) }, 1*time.Second, "could not start execution node on time") defer exeNode.Done(cancel) @@ -540,18 +592,23 @@ func TestBroadcastToMultipleVerificationNodes(t *testing.T) { verification2Node := testutil.GenericNodeFromParticipants(t, hub, ver2ID, identities, chainID) defer verification2Node.Done() - genesis, err := exeNode.State.AtHeight(0).Head() + genesis, err := exeNode.Blocks.ByHeight(0) require.NoError(t, err) - block := unittest.BlockWithParentAndProposerFixture(t, genesis, conID.NodeID) + block := unittest.BlockWithParentAndProposerFixture(t, genesis.ToHeader(), conID.NodeID) voterIndices, err := signature.EncodeSignersToIndices([]flow.Identifier{conID.NodeID}, []flow.Identifier{conID.NodeID}) require.NoError(t, err) - block.Header.ParentVoterIndices = voterIndices - block.SetPayload(flow.Payload{}) - proposal := unittest.ProposalFromBlock(&block) + block.ParentVoterIndices = voterIndices + block, err = flow.NewBlock( + flow.UntrustedBlock{ + HeaderBody: block.HeaderBody, + Payload: unittest.PayloadFixture(unittest.WithProtocolStateID(genesis.Payload.ProtocolStateID)), + }, + ) + require.NoError(t, err) - child := unittest.BlockWithParentAndProposerFixture(t, block.Header, conID.NodeID) - child.Header.ParentVoterIndices = voterIndices + child := unittest.BlockWithParentAndProposerFixture(t, block.ToHeader(), conID.NodeID) + child.ParentVoterIndices = voterIndices actualCalls := atomic.Uint64{} @@ -566,13 +623,17 @@ func TestBroadcastToMultipleVerificationNodes(t *testing.T) { receipt, _ = args[2].(*flow.ExecutionReceipt) assert.Equal(t, block.ID(), receipt.ExecutionResult.BlockID) + for i, chunk := range receipt.ExecutionResult.Chunks { + assert.EqualValues(t, i, chunk.CollectionIndex) + assert.Greater(t, chunk.TotalComputationUsed, uint64(0)) + } }). Return(nil) - err = sendBlock(&exeNode, exeID.NodeID, proposal) + err = sendBlock(&exeNode, exeID.NodeID, unittest.ProposalFromBlock(block)) require.NoError(t, err) - err = sendBlock(&exeNode, conID.NodeID, unittest.ProposalFromBlock(&child)) + err = sendBlock(&exeNode, conID.NodeID, unittest.ProposalFromBlock(child)) require.NoError(t, err) hub.DeliverAllEventually(t, func() bool { diff --git a/engine/execution/ingestion/block_queue/queue.go b/engine/execution/ingestion/block_queue/queue.go new file mode 100644 index 00000000000..b466defa29c --- /dev/null +++ b/engine/execution/ingestion/block_queue/queue.go @@ -0,0 +1,501 @@ +package block_queue + +import ( + "fmt" + "sync" + + "github.com/rs/zerolog" + + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module/mempool/entity" +) + +var ErrMissingParent = fmt.Errorf("missing parent block") + +// BlockQueue keeps track of state of blocks and determines which blocks are executable +// A block becomes executable when all the following conditions are met: +// 1. the block has been validated by consensus algorithm +// 2. the block's parent has been executed +// 3. all the collections included in the block have been received +type BlockQueue struct { + sync.Mutex + log zerolog.Logger + + // if a block still exists in this map, it means the block has not been executed. + // it could either be one of the following cases: + // 1) block is not executed due to some of its collection is missing + // 2) block is not executed due to its parent block has not been executed + // 3) block is ready to execute, but the execution has not been finished yet. + // some consistency checks: + // 1) since an executed block must have been removed from this map, if a block's + // parent block has been executed, then its parent block must have been removed + // from this map + // 2) if a block's parent block has not been executed, then its parent block must still + // exist in this map + blocks map[flow.Identifier]*entity.ExecutableBlock + + // a collection could be included in multiple blocks, + // when a missing block is received, it might trigger multiple blocks to be executable, which + // can be looked up by the map + // when a block is executed, its collections should be removed from this map unless a collection + // is still referenced by other blocks, which will eventually be removed when those blocks are + // executed. + collections map[flow.Identifier]*collectionInfo + + // blockIDsByHeight is used to find next executable block. + // when a block is executed, the next executable block must be a block with height = current block height + 1 + // the following map allows us to find the next executable block by height and their parent block ID + blockIDsByHeight map[uint64]map[flow.Identifier]*entity.ExecutableBlock +} + +// MissingCollection stores a collection guarantee for which an Execution Node has not +// yet received the full collection. It is used for book-keeping while requesting collections. +// +//structwrite:immutable - mutations allowed only within the constructor +type MissingCollection struct { + BlockID flow.Identifier + Height uint64 + Guarantee *flow.CollectionGuarantee +} + +// UntrustedMissingCollection is an untrusted input-only representation of an MissingCollection, +// used for construction. +// +// This type exists to ensure that constructor functions are invoked explicitly +// with named fields, which improves clarity and reduces the risk of incorrect field +// ordering during construction. +// +// An instance of UntrustedMissingCollection should be validated and converted into +// a trusted MissingCollection using NewMissingCollection constructor. +type UntrustedMissingCollection MissingCollection + +// NewMissingCollection creates a new instance of MissingCollection. +// Construction MissingCollection allowed only within the constructor +// +// All errors indicate a valid MissingCollection cannot be constructed from the input. +func NewMissingCollection(untrusted UntrustedMissingCollection) (*MissingCollection, error) { + if untrusted.BlockID == flow.ZeroID { + return nil, fmt.Errorf("BlockID must not be empty") + } + + if untrusted.Height == 0 { + return nil, fmt.Errorf("Height must not be zero") + } + + if untrusted.Guarantee == nil { + return nil, fmt.Errorf("CollectionGuarantee must not be empty") + } + + return &MissingCollection{ + BlockID: untrusted.BlockID, + Height: untrusted.Height, + Guarantee: untrusted.Guarantee, + }, nil +} + +// collectionInfo is an internal struct used to keep track of the state of a collection, +// and the blocks that include the collection +type collectionInfo struct { + Collection *entity.CompleteCollection + IncludedIn map[flow.Identifier]*entity.ExecutableBlock +} + +func NewBlockQueue(logger zerolog.Logger) *BlockQueue { + log := logger.With().Str("module", "block_queue").Logger() + + return &BlockQueue{ + log: log, + blocks: make(map[flow.Identifier]*entity.ExecutableBlock), + collections: make(map[flow.Identifier]*collectionInfo), + blockIDsByHeight: make(map[uint64]map[flow.Identifier]*entity.ExecutableBlock), + } +} + +// HandleBlock is called when a new block is received, the parentFinalState indicates +// whether its parent block has been executed. +// Caller must ensure: +// 1. blocks are passsed in order, i.e. parent block is passed in before its child block +// 2. if a block's parent is not executed, then the parent block must be passed in first +// 3. if a block's parent is executed, then the parent's finalState must be passed in +// It returns (nil, nil, nil) if this block is a duplication +func (q *BlockQueue) HandleBlock(block *flow.Block, parentFinalState *flow.StateCommitment) ( + []*MissingCollection, // missing collections + []*entity.ExecutableBlock, // blocks ready to execute + error, // exceptions +) { + q.Lock() + defer q.Unlock() + + // check if the block already exists + blockID := block.ID() + executable, ok := q.blocks[blockID] + + q.log.Debug(). + Str("blockID", blockID.String()). + Uint64("height", block.Height). + Bool("parent executed", parentFinalState != nil). + Msg("handle block") + + if ok { + // handle the case where the block has seen before + return q.handleKnownBlock(executable, parentFinalState) + } + + // handling a new block + + _, parentExists := q.blocks[block.ParentID] + // if parentFinalState is not provided, then its parent block must exists in the queue + // otherwise it's an exception + if parentFinalState == nil { + if !parentExists { + return nil, nil, + fmt.Errorf("block %s has no parent commitment, but its parent block %s does not exist in the queue: %w", + blockID, block.ParentID, ErrMissingParent) + } + } else { + if parentExists { + // this is an edge case where A <- B, and B is received with A's final state, however, + // A is not executed yet. + // the reason this could happen is that there is a race condition in `OnBlockExecuted` and + // `HandleBlock`, when A's execution result (which contains the final state) has been + // saved to database, and B is received before `blockQueue.OnBlockExecuted(A)` is called, so + // `blockQueue.HandleBlock(B, A's final state)` will be called, which run into this case. + // In this case, if we consider A is executed, then return B as executables, and later + // when `OnBlockExecuted(A)` is called, it will return B as executables again, which + // will cause B to be executed twice. + // In order to prevent B to be executed twice, we will simply ignore A's final state, + // as if its parent has not been executed yet, then B is not executable. And when `OnBlockExecuted(A)` + // is called, it will return B as executables, so that both A and B will be executed only once. + // See test case: TestHandleBlockChildCalledBeforeOnBlockExecutedParent + q.log.Warn(). + Str("blockID", blockID.String()). + Uint64("height", block.Height). + Msgf("edge case: receiving block with parent commitment, but its parent block %s still exists", + block.ParentID) + + parentFinalState = nil + } + } + + executable = &entity.ExecutableBlock{ + Block: block, + StartState: parentFinalState, + } + + // add block to blocks + q.blocks[blockID] = executable + + // update collection + colls := make(map[flow.Identifier]*entity.CompleteCollection, len(block.Payload.Guarantees)) + executable.CompleteCollections = colls + + // find missing collections and update collection index + missingCollections := make([]*MissingCollection, 0, len(block.Payload.Guarantees)) + + for _, guarantee := range block.Payload.Guarantees { + colID := guarantee.CollectionID + colInfo, ok := q.collections[colID] + if ok { + // some other block also includes this collection + colInfo.IncludedIn[blockID] = executable + colls[colID] = colInfo.Collection + } else { + col := &entity.CompleteCollection{ + Guarantee: guarantee, + } + colls[colID] = col + + // add new collection to collections + q.collections[colID] = &collectionInfo{ + Collection: col, + IncludedIn: map[flow.Identifier]*entity.ExecutableBlock{ + blockID: executable, + }, + } + + missingCollection, err := NewMissingCollection(UntrustedMissingCollection{ + BlockID: executable.BlockID(), + Height: executable.Block.Height, + Guarantee: col.Guarantee, + }) + if err != nil { + return nil, nil, fmt.Errorf("could not construct missingCollection: %w", err) + } + + missingCollections = append( + missingCollections, + missingCollection, + ) + } + } + + // index height + blocksAtSameHeight, ok := q.blockIDsByHeight[block.Height] + if !ok { + blocksAtSameHeight = make(map[flow.Identifier]*entity.ExecutableBlock) + q.blockIDsByHeight[block.Height] = blocksAtSameHeight + } + blocksAtSameHeight[blockID] = executable + + // check if the block is executable + var executables []*entity.ExecutableBlock + if executable.IsComplete() { + // executables might contain other siblings, but won't contain "executable", + // which is the block itself, that's because executables are created + // from OnBlockExecuted( + executables = []*entity.ExecutableBlock{executable} + } + + return missingCollections, executables, nil +} + +// HandleCollection is called when a new collection is received +// It returns a list of executable blocks that contains the collection +func (q *BlockQueue) HandleCollection(collection *flow.Collection) ([]*entity.ExecutableBlock, error) { + q.Lock() + defer q.Unlock() + // when a collection is received, we find the blocks the collection is included in, + // and check if the blocks become executable. + // Note a collection could be included in multiple blocks, so receiving a collection + // might trigger multiple blocks to be executable. + + // check if the collection is for any block in the queue + colID := collection.ID() + colInfo, ok := q.collections[colID] + if !ok { + // no block in the queue includes this collection + return nil, nil + } + + if colInfo.Collection.IsCompleted() { + // the collection is already received, no action needed because an action must + // have been returned when the collection is first received. + return nil, nil + } + + // update collection + colInfo.Collection.Collection = collection + + // check if any block, which includes this collection, became executable + executables := make([]*entity.ExecutableBlock, 0, len(colInfo.IncludedIn)) + for _, block := range colInfo.IncludedIn { + if !block.IsComplete() { + continue + } + executables = append(executables, block) + } + + if len(executables) == 0 { + return nil, nil + } + + return executables, nil +} + +// OnBlockExecuted is called when a block is executed +// It returns a list of executable blocks (usually its child blocks) +// The caller has to ensure OnBlockExecuted is not called in a wrong order, such as +// OnBlockExecuted(childBlock) being called before OnBlockExecuted(parentBlock). +func (q *BlockQueue) OnBlockExecuted( + blockID flow.Identifier, + commit flow.StateCommitment, +) ([]*entity.ExecutableBlock, error) { + q.Lock() + defer q.Unlock() + + q.log.Debug(). + Str("blockID", blockID.String()). + Hex("commit", commit[:]). + Msg("block executed") + + return q.onBlockExecuted(blockID, commit) +} + +func (q *BlockQueue) handleKnownBlock(executable *entity.ExecutableBlock, parentFinalState *flow.StateCommitment) ( + []*MissingCollection, // missing collections + []*entity.ExecutableBlock, // blocks ready to execute + error, // exceptions +) { + // we have already received this block, and its parent still has not been executed yet + if executable.StartState == nil && parentFinalState == nil { + return nil, nil, nil + } + + // this is an edge case where parentFinalState is provided, and its parent block exists + // in the queue but has not been marked as executed yet (OnBlockExecuted(parent) is not called), + // in this case, we will internally call OnBlockExecuted(parentBlockID, parentFinalState). + // there is no need to create the executable block again, since it's already created. + if executable.StartState == nil && parentFinalState != nil { + q.log.Warn(). + Str("blockID", executable.BlockID().String()). + Uint64("height", executable.Block.Height). + Hex("parentID", executable.Block.ParentID[:]). + Msg("edge case: receiving block with no parent commitment, but its parent block actually has been executed") + + executables, err := q.onBlockExecuted(executable.Block.ParentID, *parentFinalState) + if err != nil { + return nil, nil, fmt.Errorf("receiving block %v with parent commitment %v, but parent block %v already exists with no commitment, fail to call mark parent as executed: %w", + executable.BlockID(), *parentFinalState, executable.Block.ParentID, err) + } + + // we already have this block, its collection must have been fetched, so we only return the + // executables from marking its parent as executed. + return nil, executables, nil + } + + // this means the caller think it's parent has not been executed, but the queue's internal state + // shows the parent has been executed, then it's probably a race condition where the call to + // inform the parent block has been executed arrives earlier than this call, which is an edge case + // and we can simply ignore this call. + if executable.StartState != nil && parentFinalState == nil { + q.log.Warn(). + Str("blockID", executable.BlockID().String()). + Uint64("height", executable.Block.Height). + Hex("parentID", executable.Block.ParentID[:]). + Msg("edge case: receiving block with no parent commitment, but its parent block actually has been executed") + return nil, nil, nil + } + + // this is an exception that should not happen + if *executable.StartState != *parentFinalState { + return nil, nil, + fmt.Errorf("block %s has already been executed with a different parent final state, %v != %v", + executable.BlockID(), *executable.StartState, parentFinalState) + } + + q.log.Warn(). + Str("blockID", executable.BlockID().String()). + Uint64("height", executable.Block.Height). + Msg("edge case: OnBlockExecuted is called with the same arguments again") + return nil, nil, nil +} + +func (q *BlockQueue) onBlockExecuted( + blockID flow.Identifier, + commit flow.StateCommitment, +) ([]*entity.ExecutableBlock, error) { + // when a block is executed, the child block might become executable + // we also remove it from all the indexes + + // remove block + block, ok := q.blocks[blockID] + if !ok { + return nil, nil + } + + // sanity check + // if a block exists in the queue and is executed, then its parent block + // must not exist in the queue, otherwise the state is inconsistent + _, parentExists := q.blocks[block.Block.ParentID] + if parentExists { + return nil, fmt.Errorf("parent block %s of block %s is in the queue", + block.Block.ParentID, blockID) + } + + delete(q.blocks, blockID) + + // remove height index + height := block.Block.Height + delete(q.blockIDsByHeight[height], blockID) + if len(q.blockIDsByHeight[height]) == 0 { + delete(q.blockIDsByHeight, height) + } + + // remove colections if no other blocks include it + for colID := range block.CompleteCollections { + colInfo, ok := q.collections[colID] + if !ok { + return nil, fmt.Errorf("collection %s not found", colID) + } + + delete(colInfo.IncludedIn, blockID) + if len(colInfo.IncludedIn) == 0 { + // no other blocks includes this collection, + // so this collection can be removed from the index + delete(q.collections, colID) + } + } + + return q.checkIfChildBlockBecomeExecutable(block, commit) +} + +func (q *BlockQueue) checkIfChildBlockBecomeExecutable( + block *entity.ExecutableBlock, + commit flow.StateCommitment, +) ([]*entity.ExecutableBlock, error) { + childHeight := block.Block.Height + 1 + blocksAtNextHeight, ok := q.blockIDsByHeight[childHeight] + if !ok { + // no block at next height + return nil, nil + } + + // find children and update their start state + children := make([]*entity.ExecutableBlock, 0, len(blocksAtNextHeight)) + for _, childBlock := range blocksAtNextHeight { + // a child block at the next height must have the same parent ID + // as the current block + isChild := childBlock.Block.ParentID == block.BlockID() + if !isChild { + continue + } + + // update child block's start state with current block's end state + childBlock.StartState = &commit + children = append(children, childBlock) + } + + if len(children) == 0 { + return nil, nil + } + + // check if children are executable + executables := make([]*entity.ExecutableBlock, 0, len(children)) + for _, child := range children { + if child.IsComplete() { + executables = append(executables, child) + } + } + + return executables, nil +} + +// GetMissingCollections returns the missing collections and the start state for the given block +// Useful for debugging what is missing for the next unexecuted block to become executable. +// It returns an error if the block is not found or if could not construct missing collection. +func (q *BlockQueue) GetMissingCollections(blockID flow.Identifier) ( + []*MissingCollection, + *flow.StateCommitment, + error, +) { + q.Lock() + defer q.Unlock() + block, ok := q.blocks[blockID] + if !ok { + return nil, nil, fmt.Errorf("block %s not found", blockID) + } + + missingCollections := make([]*MissingCollection, 0, len(block.Block.Payload.Guarantees)) + for _, col := range block.CompleteCollections { + // check if the collection is already received + if col.IsCompleted() { + continue + } + + missingCollection, err := NewMissingCollection(UntrustedMissingCollection{ + BlockID: block.BlockID(), + Height: block.Block.Height, + Guarantee: col.Guarantee, + }) + if err != nil { + return nil, nil, fmt.Errorf("could not construct missingCollection: %w", err) + } + + missingCollections = append( + missingCollections, + missingCollection, + ) + } + + return missingCollections, block.StartState, nil +} diff --git a/engine/execution/ingestion/block_queue/queue_test.go b/engine/execution/ingestion/block_queue/queue_test.go new file mode 100644 index 00000000000..24e5f8636e5 --- /dev/null +++ b/engine/execution/ingestion/block_queue/queue_test.go @@ -0,0 +1,656 @@ +package block_queue + +import ( + "errors" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module/mempool/entity" + "github.com/onflow/flow-go/utils/unittest" +) + +func TestSingleBlockBecomeReady(t *testing.T) { + t.Parallel() + // Given a chain + // R <- A(C1) <- B(C2,C3) <- C() <- D() + // - ^------- E(C4,C5) <- F(C6) + // - ^-----------G() + block, coll, commitFor := makeChainABCDEFG() + blockA := block("A") + c1, c2 := coll(1), coll(2) + + q := NewBlockQueue(unittest.Logger()) + + // verify receving a collection (C1) before its block (A) will be ignored + executables, err := q.HandleCollection(c1) + require.NoError(t, err) + requireExecutableHas(t, executables) + + // verify receving a block (A) will return missing collection (C1) + missing, executables, err := q.HandleBlock(blockA, commitFor("R")) + require.NoError(t, err) + require.Empty(t, executables) + requireCollectionHas(t, missing, c1) + + // verify receving a collection (C2) that is not for the block (A) will be ignored + executables, err = q.HandleCollection(c2) + require.NoError(t, err) + requireExecutableHas(t, executables) + + // verify after receiving all collections (C1), block (A) becomes executable + executables, err = q.HandleCollection(c1) + require.NoError(t, err) + requireExecutableHas(t, executables, blockA) + + // verify after the block (A) is executed, no more block is executable and + // nothing left in the queue + executables, err = q.OnBlockExecuted(blockA.ID(), *commitFor("A")) + require.NoError(t, err) + requireExecutableHas(t, executables) + requireQueueIsEmpty(t, q) +} + +func TestHandleBlockChildCalledBeforeOnBlockExecutedParent(t *testing.T) { + t.Parallel() + // Given a chain + // R <- A(C1) <- B(C2,C3) <- C() <- D() + // - ^------- E(C4,C5) <- F(C6) + // - ^-----------G() + block, _, commitFor := makeChainABCDEFG() + // take block C and D + blockC, blockD := block("C"), block("D") + + q := NewBlockQueue(unittest.Logger()) + + // Given block B has been executed, and block C is received, + // block C becomes executable + missing, executables, err := q.HandleBlock(blockC, commitFor("B")) + require.NoError(t, err) + requireExecutableHas(t, executables, blockC) + require.Empty(t, missing) + + // Now we received blockD, with block C's commit, however, + // the block queue state shows block D's parent (C) has not been executed yet, + // because OnBlockExecuted(C) is not called. + // In this case, we will ignore block C's commit, as if block C has not + // been executed yet. + missing, executables, err = q.HandleBlock(blockD, commitFor("C")) + require.NoError(t, err) + requireExecutableHas(t, executables) + require.Empty(t, missing) + + // later block C is executed, which will make block D to be executable + executables, err = q.OnBlockExecuted(blockC.ID(), *commitFor("C")) + require.NoError(t, err) + requireExecutableHas(t, executables, blockD) + + // once block D is executed, the queue should be empty + executables, err = q.OnBlockExecuted(blockD.ID(), *commitFor("D")) + require.NoError(t, err) + requireExecutableHas(t, executables) + requireQueueIsEmpty(t, q) +} + +func TestMultipleBlockBecomesReady(t *testing.T) { + t.Parallel() + // Given a chain + // R <- A(C1) <- B(C2,C3) <- C() <- D() + // - ^------- E(C4,C5) <- F(C6) + // - ^-----------G() + block, coll, commitFor := makeChainABCDEFG() + blockA, blockB, blockC, blockD, blockE, blockF, blockG := + block("A"), block("B"), block("C"), block("D"), block("E"), block("F"), block("G") + c1, c2, c3, c4, c5, c6 := coll(1), coll(2), coll(3), coll(4), coll(5), coll(6) + + q := NewBlockQueue(unittest.Logger()) + + // verify receiving blocks without collections will return missing collections and no executables + missing, executables, err := q.HandleBlock(blockA, commitFor("R")) + require.NoError(t, err) + require.Empty(t, executables) + requireCollectionHas(t, missing, c1) + + missing, executables, err = q.HandleBlock(blockB, nil) + require.NoError(t, err) + require.Empty(t, executables) // because A is not executed + requireCollectionHas(t, missing, c2, c3) + + // creating forks + missing, executables, err = q.HandleBlock(blockE, nil) + require.NoError(t, err) + require.Empty(t, executables) // because A is not executed + requireCollectionHas(t, missing, c4, c5) + + // creating forks with empty block + missing, executables, err = q.HandleBlock(blockG, nil) + require.NoError(t, err) + require.Empty(t, executables) // because E is not executed + requireCollectionHas(t, missing) + + missing, executables, err = q.HandleBlock(blockF, nil) + require.NoError(t, err) + require.Empty(t, executables) // because E is not executed + requireCollectionHas(t, missing, c6) + + missing, executables, err = q.HandleBlock(blockC, nil) + require.NoError(t, err) + require.Empty(t, executables) // because B is not executed + require.Empty(t, missing) + + // verify receiving all collections makes block executable + executables, err = q.HandleCollection(c1) + require.NoError(t, err) + requireExecutableHas(t, executables, blockA) + + // verify receiving partial collections won't make block executable + executables, err = q.HandleCollection(c2) + require.NoError(t, err) + requireExecutableHas(t, executables) // because A is not executed and C3 is not received for B to be executable + + // verify when parent block (A) is executed, the child block (B) will not become executable if + // some collection (c3) is still missing + executables, err = q.OnBlockExecuted(blockA.ID(), *commitFor("A")) + require.NoError(t, err) + requireExecutableHas(t, executables) // because C3 is not received for B to be executable + + // verify when parent block (A) has been executed, the child block (B) has all the collections + // it will become executable + executables, err = q.HandleCollection(c3) + require.NoError(t, err) + requireExecutableHas(t, executables, blockB) // c2, c3 are received, blockB is executable + + executables, err = q.HandleCollection(c5) + require.NoError(t, err) + requireExecutableHas(t, executables) // c2, c3 are received, blockB is executable + + executables, err = q.HandleCollection(c6) + require.NoError(t, err) + requireExecutableHas(t, executables) // c2, c3 are received, blockB is executable + + executables, err = q.HandleCollection(c4) + require.NoError(t, err) + requireExecutableHas(t, executables, blockE) // c2, c3 are received, blockB is executable + + // verify when parent block (E) is executed, all children block (F,G) will become executable if all + // collections (C6) have already received + executables, err = q.OnBlockExecuted(blockE.ID(), *commitFor("E")) + require.NoError(t, err) + requireExecutableHas(t, executables, blockF, blockG) + + executables, err = q.OnBlockExecuted(blockB.ID(), *commitFor("B")) + require.NoError(t, err) + requireExecutableHas(t, executables, blockC) + + executables, err = q.OnBlockExecuted(blockC.ID(), *commitFor("C")) + require.NoError(t, err) + requireExecutableHas(t, executables) + + // verify receiving a block whose parent was executed before + missing, executables, err = q.HandleBlock(blockD, commitFor("C")) + require.NoError(t, err) + require.Empty(t, missing) + requireExecutableHas(t, executables, blockD) + + executables, err = q.OnBlockExecuted(blockD.ID(), *commitFor("D")) + require.NoError(t, err) + requireExecutableHas(t, executables) + + executables, err = q.OnBlockExecuted(blockF.ID(), *commitFor("F")) + require.NoError(t, err) + requireExecutableHas(t, executables) + + executables, err = q.OnBlockExecuted(blockG.ID(), *commitFor("G")) + require.NoError(t, err) + requireExecutableHas(t, executables) + + // verify after all blocks are executed, the queue is empty + requireQueueIsEmpty(t, q) +} + +func TestOneReadyAndMultiplePending(t *testing.T) { + t.Parallel() + // Given a chain + // R() <- A() <- B(C1, C2) <- C(C3) + // - ^----- D(C1, C2) <- E(C3) + // - ^----- F(C1, C2, C3) + block, coll, commitFor := makeChainABCDEF() + blockA, blockB, blockC := block("A"), block("B"), block("C") + c1, c2, c3 := coll(1), coll(2), coll(3) + + q := NewBlockQueue(unittest.Logger()) + _, _, err := q.HandleBlock(blockA, commitFor("R")) + require.NoError(t, err) + + // received B when A is not execured + missing, executables, err := q.HandleBlock(blockB, nil) + require.NoError(t, err) + require.Empty(t, executables) + requireCollectionHas(t, missing, c1, c2) + + _, err = q.HandleCollection(c1) + require.NoError(t, err) + + _, err = q.HandleCollection(c2) + require.NoError(t, err) + + // received C when B is not executed + _, _, err = q.HandleBlock(blockC, nil) + require.NoError(t, err) + + _, err = q.HandleCollection(c3) + require.NoError(t, err) + + // A is executed + executables, err = q.OnBlockExecuted(blockA.ID(), *commitFor("A")) + require.NoError(t, err) + requireExecutableHas(t, executables, blockB) // B is executable + + // B is executed + executables, err = q.OnBlockExecuted(blockB.ID(), *commitFor("B")) + require.NoError(t, err) + requireExecutableHas(t, executables, blockC) // C is executable +} + +func TestOnForksWithSameCollections(t *testing.T) { + t.Parallel() + // Given a chain + // R() <- A() <- B(C1, C2) <- C(C3) + // - ^----- D(C1, C2) <- E(C3) + // - ^----- F(C1, C2, C3) + block, coll, commitFor := makeChainABCDEF() + blockA, blockB, blockC, blockD, blockE, blockF := + block("A"), block("B"), block("C"), block("D"), block("E"), block("F") + c1, c2, c3 := coll(1), coll(2), coll(3) + + q := NewBlockQueue(unittest.Logger()) + + missing, executables, err := q.HandleBlock(blockA, commitFor("R")) + require.NoError(t, err) + requireExecutableHas(t, executables, blockA) + requireCollectionHas(t, missing) + + // receiving block B and D which have the same collections (C1, C2) + missing, executables, err = q.HandleBlock(blockB, nil) + require.NoError(t, err) + require.Empty(t, executables) + requireCollectionHas(t, missing, c1, c2) + + // receiving block F (C1, C2, C3) + missing, executables, err = q.HandleBlock(blockF, nil) + require.NoError(t, err) + require.Empty(t, executables) + requireCollectionHas(t, missing, c3) // c1 and c2 are requested before, only c3 is missing + + // verify receiving D will not return any missing collections because + // missing collections were returned when receiving B + missing, executables, err = q.HandleBlock(blockD, nil) + require.NoError(t, err) + require.Empty(t, executables) + requireCollectionHas(t, missing) + + // verify receiving all collections makes all blocks executable + executables, err = q.HandleCollection(c1) + require.NoError(t, err) + requireExecutableHas(t, executables) + + // A is executed + executables, err = q.OnBlockExecuted(blockA.ID(), *commitFor("A")) + require.NoError(t, err) + requireExecutableHas(t, executables) // because C2 is not received + + executables, err = q.HandleCollection(c2) + require.NoError(t, err) + requireExecutableHas(t, executables, blockB, blockD) + + // verify if 2 blocks (C, E) having the same collections (C3), if all collections are received, + // but only one block (C) whose parent (B) is executed, then only that block (C) becomes executable + // the other block (E) is not executable + + missing, executables, err = q.HandleBlock(blockC, nil) + require.NoError(t, err) + require.Empty(t, executables) + requireCollectionHas(t, missing) // because C3 is requested when F is received + + missing, executables, err = q.HandleBlock(blockE, nil) + require.NoError(t, err) + require.Empty(t, executables) + requireCollectionHas(t, missing) + + executables, err = q.OnBlockExecuted(blockB.ID(), *commitFor("B")) + require.NoError(t, err) + requireExecutableHas(t, executables) + + // verify C and F are executable, because their parent have been executed + // E is not executable, because E's parent (D) is not executed yet. + executables, err = q.HandleCollection(c3) + require.NoError(t, err) + requireExecutableHas(t, executables, blockC, blockF) + + // verify when D is executed, E becomes executable + executables, err = q.OnBlockExecuted(blockD.ID(), *commitFor("D")) + require.NoError(t, err) + requireExecutableHas(t, executables, blockE) + + // verify the remaining blocks (C,E,F) are executed, the queue is empty + executables, err = q.OnBlockExecuted(blockE.ID(), *commitFor("E")) + require.NoError(t, err) + requireExecutableHas(t, executables) + + executables, err = q.OnBlockExecuted(blockF.ID(), *commitFor("F")) + require.NoError(t, err) + requireExecutableHas(t, executables) + + executables, err = q.OnBlockExecuted(blockC.ID(), *commitFor("C")) + require.NoError(t, err) + requireExecutableHas(t, executables) + + requireQueueIsEmpty(t, q) +} + +func TestOnBlockWithMissingParentCommit(t *testing.T) { + t.Parallel() + // Given a chain + // R <- A(C1) <- B(C2,C3) <- C() <- D() + // - ^------- E(C4,C5) <- F(C6) + // - ^-----------G() + + block, coll, commitFor := makeChainABCDEFG() + blockA, blockB := block("A"), block("B") + c1, c2, c3 := coll(1), coll(2), coll(3) + + q := NewBlockQueue(unittest.Logger()) + + missing, executables, err := q.HandleBlock(blockA, commitFor("R")) + require.NoError(t, err) + require.Empty(t, executables) + requireCollectionHas(t, missing, c1) + + // block A has all the collections and become executable + executables, err = q.HandleCollection(c1) + require.NoError(t, err) + requireExecutableHas(t, executables, blockA) + + // the following two calls create an edge case where A is executed, + // and B is received, however, due to race condition, the parent commit + // was not saved in the database yet + executables, err = q.OnBlockExecuted(blockA.ID(), *commitFor("A")) + require.NoError(t, err) + requireExecutableHas(t, executables) + requireQueueIsEmpty(t, q) + + // verify when race condition happens, ErrMissingParent will be returned + _, _, err = q.HandleBlock(blockB, nil) + require.True(t, errors.Is(err, ErrMissingParent), err) + + // verify if called again with parent commit, it will be successful + missing, executables, err = q.HandleBlock(blockB, commitFor("A")) + require.NoError(t, err) + require.Empty(t, executables) + requireCollectionHas(t, missing, c2, c3) + + // verify after receiving all collections, B becomes executable + executables, err = q.HandleCollection(c2) + require.NoError(t, err) + require.Empty(t, executables) + + executables, err = q.HandleCollection(c3) + require.NoError(t, err) + requireExecutableHas(t, executables, blockB) + + // verify after B is executed, the queue is empty + executables, err = q.OnBlockExecuted(blockB.ID(), *commitFor("B")) + require.NoError(t, err) + requireExecutableHas(t, executables) + requireQueueIsEmpty(t, q) +} + +// TestNewMissingCollection verifies that NewMissingCollection constructs a valid MissingCollection +// when given complete, non-zero fields, and returns an error if any required field is missing. +// It covers: +// - valid missing collection creation +// - missing BlockID +// - zero Height +// - nil Guarantee +func TestNewMissingCollection(t *testing.T) { + height := uint64(10) + + t.Run("valid missing collection", func(t *testing.T) { + id := unittest.IdentifierFixture() + guarantee := unittest.CollectionGuaranteeFixture() + + uc := UntrustedMissingCollection{ + BlockID: id, + Height: height, + Guarantee: guarantee, + } + + mc, err := NewMissingCollection(uc) + assert.NoError(t, err) + assert.NotNil(t, mc) + assert.Equal(t, id, mc.BlockID) + assert.Equal(t, height, mc.Height) + assert.Equal(t, guarantee, mc.Guarantee) + }) + + t.Run("missing BlockID", func(t *testing.T) { + guarantee := unittest.CollectionGuaranteeFixture() + + uc := UntrustedMissingCollection{ + BlockID: flow.ZeroID, + Height: height, + Guarantee: guarantee, + } + + mc, err := NewMissingCollection(uc) + assert.Error(t, err) + assert.Nil(t, mc) + assert.Contains(t, err.Error(), "BlockID") + }) + + t.Run("zero Height", func(t *testing.T) { + id := unittest.IdentifierFixture() + guarantee := unittest.CollectionGuaranteeFixture() + + uc := UntrustedMissingCollection{ + BlockID: id, + Height: 0, + Guarantee: guarantee, + } + + mc, err := NewMissingCollection(uc) + assert.Error(t, err) + assert.Nil(t, mc) + assert.Contains(t, err.Error(), "Height") + }) + + t.Run("nil Guarantee", func(t *testing.T) { + id := unittest.IdentifierFixture() + + uc := UntrustedMissingCollection{ + BlockID: id, + Height: height, + Guarantee: nil, + } + + mc, err := NewMissingCollection(uc) + assert.Error(t, err) + assert.Nil(t, mc) + assert.Contains(t, err.Error(), "CollectionGuarantee") + }) +} + +/* ==== Test utils ==== */ + +// GetBlock("A") => A +type GetBlock func(name string) *flow.Block + +// GetCollection(1) => C1 +type GetCollection func(name int) *flow.Collection + +// GetCommit("A") => A_FinalState +type GetCommit func(name string) *flow.StateCommitment + +// R <- A(C1) <- B(C2,C3) <- C() <- D() +// - ^------- E(C4,C5) <- F(C6) +// - ^-----------G() +func makeChainABCDEFG() (GetBlock, GetCollection, GetCommit) { + cs := unittest.CollectionListFixture(6) + c1, c2, c3, c4, c5, c6 := + cs[0], cs[1], cs[2], cs[3], cs[4], cs[5] + getCol := func(name int) *flow.Collection { + if name < 1 || name > len(cs) { + return nil + } + return cs[name-1] + } + + blockR := unittest.BlockFixture() + bs := unittest.ChainBlockFixtureWithRoot(blockR.ToHeader(), 4) + blockA, blockB, blockC, blockD := bs[0], bs[1], bs[2], bs[3] + unittest.AddCollectionsToBlock(blockA, []*flow.Collection{c1}) + unittest.AddCollectionsToBlock(blockB, []*flow.Collection{c2, c3}) + unittest.RechainBlocks(bs) + + bs = unittest.ChainBlockFixtureWithRoot(blockA.ToHeader(), 2) + blockE, blockF := bs[0], bs[1] + unittest.AddCollectionsToBlock(blockE, []*flow.Collection{c4, c5}) + unittest.AddCollectionsToBlock(blockF, []*flow.Collection{c6}) + unittest.RechainBlocks(bs) + + bs = unittest.ChainBlockFixtureWithRoot(blockE.ToHeader(), 1) + blockG := bs[0] + + blockLookup := map[string]*flow.Block{ + "R": blockR, + "A": blockA, + "B": blockB, + "C": blockC, + "D": blockD, + "E": blockE, + "F": blockF, + "G": blockG, + } + + getBlock := func(name string) *flow.Block { + return blockLookup[name] + } + + commitLookup := make(map[string]*flow.StateCommitment, len(blockLookup)) + for name := range blockLookup { + commit := unittest.StateCommitmentFixture() + commitLookup[name] = &commit + } + + getCommit := func(name string) *flow.StateCommitment { + commit, ok := commitLookup[name] + if !ok { + panic("commit not found") + } + return commit + } + + return getBlock, getCol, getCommit +} + +// R() <- A() <- B(C1, C2) <- C(C3) +// - ^----- D(C1, C2) <- E(C3) +// - ^----- F(C1, C2, C3) +func makeChainABCDEF() (GetBlock, GetCollection, GetCommit) { + cs := unittest.CollectionListFixture(3) + c1, c2, c3 := cs[0], cs[1], cs[2] + getCol := func(name int) *flow.Collection { + if name < 1 || name > len(cs) { + return nil + } + return cs[name-1] + } + + blockR := unittest.BlockFixture() + bs := unittest.ChainBlockFixtureWithRoot(blockR.ToHeader(), 3) + blockA, blockB, blockC := bs[0], bs[1], bs[2] + unittest.AddCollectionsToBlock(blockB, []*flow.Collection{c1, c2}) + unittest.AddCollectionsToBlock(blockC, []*flow.Collection{c3}) + unittest.RechainBlocks(bs) + + bs = unittest.ChainBlockFixtureWithRoot(blockA.ToHeader(), 2) + blockD, blockE := bs[0], bs[1] + unittest.AddCollectionsToBlock(blockD, []*flow.Collection{c1, c2}) + unittest.AddCollectionsToBlock(blockE, []*flow.Collection{c3}) + unittest.RechainBlocks(bs) + + bs = unittest.ChainBlockFixtureWithRoot(blockA.ToHeader(), 1) + blockF := bs[0] + unittest.AddCollectionsToBlock(blockF, []*flow.Collection{c1, c2, c3}) + unittest.RechainBlocks(bs) + + blockLookup := map[string]*flow.Block{ + "R": blockR, + "A": blockA, + "B": blockB, + "C": blockC, + "D": blockD, + "E": blockE, + "F": blockF, + } + + getBlock := func(name string) *flow.Block { + return blockLookup[name] + } + + commitLookup := make(map[string]*flow.StateCommitment, len(blockLookup)) + for name := range blockLookup { + commit := unittest.StateCommitmentFixture() + commitLookup[name] = &commit + } + + getCommit := func(name string) *flow.StateCommitment { + commit, ok := commitLookup[name] + if !ok { + panic("commit not found for " + name) + } + return commit + } + + return getBlock, getCol, getCommit +} + +func requireExecutableHas(t *testing.T, executables []*entity.ExecutableBlock, bs ...*flow.Block) { + blocks := make(map[flow.Identifier]*flow.Block, len(bs)) + for _, b := range bs { + blocks[b.ID()] = b + } + + for _, e := range executables { + _, ok := blocks[e.Block.ID()] + require.True(t, ok) + delete(blocks, e.Block.ID()) + } + + require.Equal(t, len(bs), len(executables)) + require.Equal(t, 0, len(blocks)) +} + +func requireCollectionHas(t *testing.T, missing []*MissingCollection, cs ...*flow.Collection) { + collections := make(map[flow.Identifier]*flow.Collection, len(cs)) + for _, c := range cs { + collections[c.ID()] = c + } + + for _, m := range missing { + _, ok := collections[m.Guarantee.CollectionID] + require.True(t, ok) + delete(collections, m.Guarantee.CollectionID) + } + + require.Equal(t, len(cs), len(missing)) + require.Equal(t, 0, len(collections)) +} + +func requireQueueIsEmpty(t *testing.T, q *BlockQueue) { + require.Equal(t, 0, len(q.blocks)) + require.Equal(t, 0, len(q.collections)) + require.Equal(t, 0, len(q.blockIDsByHeight)) +} diff --git a/engine/execution/ingestion/core.go b/engine/execution/ingestion/core.go new file mode 100644 index 00000000000..fa42ccd3e54 --- /dev/null +++ b/engine/execution/ingestion/core.go @@ -0,0 +1,535 @@ +package ingestion + +import ( + "context" + "errors" + "fmt" + "sync" + "time" + + "github.com/rs/zerolog" + + "github.com/onflow/flow-go/engine/execution" + "github.com/onflow/flow-go/engine/execution/ingestion/block_queue" + "github.com/onflow/flow-go/engine/execution/ingestion/stop" + "github.com/onflow/flow-go/engine/execution/state" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module" + "github.com/onflow/flow-go/module/component" + "github.com/onflow/flow-go/module/irrecoverable" + "github.com/onflow/flow-go/module/mempool/entity" + "github.com/onflow/flow-go/storage" + "github.com/onflow/flow-go/utils/logging" +) + +// MaxProcessableBlocks is the maximum number of blocks that is queued to be processed +const MaxProcessableBlocks = 10000 + +// MaxConcurrentBlockExecutor is the maximum number of concurrent block executors +const MaxConcurrentBlockExecutor = 5 + +// Core connects the execution components +// when it receives blocks and collections, it forwards them to the block queue. +// when the block queue decides to execute blocks, it forwards to the executor for execution +// when the block queue decides to fetch missing collections, it forwards to the collection fetcher +// when a block is executed, it notifies the block queue and forwards to execution state to save them. +type Core struct { + *component.ComponentManager + + log zerolog.Logger + + // when a block is received, it is first pushed to the processables channel, and then the worker will + // fetch the collections and forward it to the block queue. + // once the data is fetched, and its parent block is executed, then the block is ready to be executed, it + // will be pushed to the blockExecutors channel, and the worker will execute the block. + // during startup, the throttle will limit the number of blocks to be added to the processables channel. + // once caught up, the throttle will allow all the remaining blocks to be added to the processables channel. + processables chan BlockIDHeight // block IDs that are received and waiting to be processed + throttle Throttle // to throttle the blocks to be added to processables during startup and catchup + blockQueue *block_queue.BlockQueue // blocks are waiting for the data to be fetched + blockExecutors chan *entity.ExecutableBlock // blocks that are ready to be executed + stopControl *stop.StopControl // decide whether to execute a block or not and when to stop the execution + + // data storage + execState state.ExecutionState + blocks storage.Blocks + collections storage.Collections + + // computation, data fetching, events + executor BlockExecutor + collectionFetcher CollectionFetcher + eventConsumer EventConsumer + metrics module.ExecutionMetrics +} + +type BlockExecutor interface { + ExecuteBlock(ctx context.Context, block *entity.ExecutableBlock) (*execution.ComputationResult, error) +} + +type EventConsumer interface { + BeforeComputationResultSaved(ctx context.Context, result *execution.ComputationResult) + OnComputationResultSaved(ctx context.Context, result *execution.ComputationResult) string +} + +func NewCore( + logger zerolog.Logger, + throttle Throttle, + execState state.ExecutionState, + stopControl *stop.StopControl, + blocks storage.Blocks, + collections storage.Collections, + executor BlockExecutor, + collectionFetcher CollectionFetcher, + eventConsumer EventConsumer, + metrics module.ExecutionMetrics, +) (*Core, error) { + e := &Core{ + log: logger.With().Str("engine", "ingestion_core").Logger(), + processables: make(chan BlockIDHeight, MaxProcessableBlocks), + blockExecutors: make(chan *entity.ExecutableBlock), + throttle: throttle, + execState: execState, + blockQueue: block_queue.NewBlockQueue(logger), + stopControl: stopControl, + blocks: blocks, + collections: collections, + executor: executor, + collectionFetcher: collectionFetcher, + eventConsumer: eventConsumer, + metrics: metrics, + } + + err := e.throttle.Init(e.processables, DefaultCatchUpThreshold) + if err != nil { + return nil, fmt.Errorf("fail to initialize throttle engine: %w", err) + } + + e.log.Info().Msgf("throttle engine initialized") + + builder := component.NewComponentManagerBuilder().AddWorker(e.launchWorkerToHandleBlocks) + + for w := 0; w < MaxConcurrentBlockExecutor; w++ { + builder.AddWorker(e.launchWorkerToExecuteBlocks) + } + + e.ComponentManager = builder.Build() + + return e, nil +} + +func (e *Core) launchWorkerToHandleBlocks(ctx irrecoverable.SignalerContext, ready component.ReadyFunc) { + executionStopped := e.stopControl.IsExecutionStopped() + + e.log.Info().Bool("execution_stopped", executionStopped).Msgf("launching worker") + + ready() + + if executionStopped { + return + } + + e.launchWorkerToConsumeThrottledBlocks(ctx) +} + +func (e *Core) launchWorkerToExecuteBlocks(ctx irrecoverable.SignalerContext, ready component.ReadyFunc) { + ready() + for { + select { + case <-ctx.Done(): + return + case executable := <-e.blockExecutors: + err := e.execute(ctx, executable) + if err != nil { + ctx.Throw(fmt.Errorf("execution ingestion engine failed to execute block %v (%v): %w", + executable.Block.Height, + executable.Block.ID(), err)) + } + } + } +} + +func (e *Core) OnCollection(col *flow.Collection) { + err := e.onCollection(col) + if err != nil { + e.log.Fatal().Err(err).Msgf("error processing collection: %v", col.ID()) + } +} + +func (e *Core) launchWorkerToConsumeThrottledBlocks(ctx irrecoverable.SignalerContext) { + // running worker in the background to consume + // processables blocks which are throttled, + // and forward them to the block queue for processing + e.log.Info().Msgf("starting worker to consume throttled blocks") + defer func() { + e.log.Info().Msgf("worker to consume throttled blocks stopped") + }() + for { + select { + case <-ctx.Done(): + // if the engine has shut down, then mark throttle as Done, which + // will stop sending new blocks to e.processables + err := e.throttle.Done() + if err != nil { + ctx.Throw(fmt.Errorf("execution ingestion engine failed to stop throttle: %w", err)) + } + + // drain the processables + e.log.Info().Msgf("draining processables") + close(e.processables) + for range e.processables { + } + e.log.Info().Msgf("finish draining processables") + return + + case blockIDHeight := <-e.processables: + e.log.Debug().Hex("block_id", blockIDHeight.ID[:]).Uint64("height", blockIDHeight.Height).Msg("ingestion core processing block") + err := e.onProcessableBlock(blockIDHeight.ID, blockIDHeight.Height) + if err != nil { + ctx.Throw(fmt.Errorf("execution ingestion engine fail to process block %v (height: %v): %w", + blockIDHeight.ID, blockIDHeight.Height, err)) + return + } + } + } + +} + +func (e *Core) onProcessableBlock(blockID flow.Identifier, height uint64) error { + // skip if stopControl tells to skip + if !e.stopControl.ShouldExecuteBlock(blockID, height) { + return nil + } + + executed, err := e.execState.IsBlockExecuted(height, blockID) + if err != nil { + return fmt.Errorf("could not check whether block %v is executed: %w", blockID, err) + } + + if executed { + e.log.Debug().Hex("block_id", blockID[:]).Uint64("height", height).Msg("block has been executed already") + return nil + } + + block, err := e.blocks.ByID(blockID) + if err != nil { + return fmt.Errorf("failed to get block %s: %w", blockID, err) + } + + missingColls, executables, err := e.enqueuBlock(block, blockID) + if err != nil { + return fmt.Errorf("failed to enqueue block %v: %w", blockID, err) + } + + lg := e.log.With(). + Hex("block_id", blockID[:]).Uint64("height", height). + Logger() + lg.Debug(). + Int("executables", len(executables)).Msgf("executeConcurrently block is executable") + e.executeConcurrently(executables) + + missingCount, err := e.fetch(missingColls) + if err != nil { + return fmt.Errorf("failed to fetch missing collections: %w", err) + } + + lg.Debug().Int("missing_collections", missingCount).Msgf("fetch missing collections") + + return nil +} + +func (e *Core) enqueuBlock(block *flow.Block, blockID flow.Identifier) ( + []*block_queue.MissingCollection, + []*entity.ExecutableBlock, + error, +) { + lg := e.log.With(). + Hex("block_id", blockID[:]). + Uint64("height", block.Height). + Logger() + + lg.Info().Msg("handling new block") + + parentCommitment, err := e.execState.StateCommitmentByBlockID(block.ParentID) + + if err == nil { + // the parent block is an executed block. + missingColls, executables, err := e.blockQueue.HandleBlock(block, &parentCommitment) + if err != nil { + return nil, nil, fmt.Errorf("unexpected error while adding block to block queue: %w", err) + } + + lg.Info().Bool("parent_is_executed", true). + Int("missing_col", len(missingColls)). + Int("executables", len(executables)). + Msgf("block is enqueued") + + return missingColls, executables, nil + } + + // handle exception + if !errors.Is(err, storage.ErrNotFound) { + return nil, nil, fmt.Errorf("failed to get state commitment for parent block %v of block %v (height: %v): %w", + block.ParentID, blockID, block.Height, err) + } + + // the parent block is an unexecuted block. + // we can enqueue the block without providing the state commitment + missingColls, executables, err := e.blockQueue.HandleBlock(block, nil) + if err != nil { + if !errors.Is(err, block_queue.ErrMissingParent) { + return nil, nil, fmt.Errorf("unexpected error while adding block to block queue: %w", err) + } + + // if parent is missing, there are two possibilities: + // 1) parent was never enqueued to block queue + // 2) parent was enqueued, but it has been executed and removed from the block queue + // however, actually 1) is not possible 2) is the only possible case here, why? + // because forwardProcessableToHandler guarantees we always enqueue a block before its child, + // which means when HandleBlock is called with a block, then its parent block must have been + // called with HandleBlock already. Therefore, 1) is not possible. + // And the reason 2) is possible is because the fact that its parent block is missing + // might be outdated since OnBlockExecuted might be called concurrently in a different thread. + // it means OnBlockExecuted is called in a different thread after us getting the parent commit + // and before HandleBlock was called, therefore, we should re-enqueue the block with the + // parent commit. It's necessary to check again whether the parent block is executed after the call. + lg.Warn().Msgf( + "block is missing parent block, re-enqueueing %v (parent: %v)", + blockID, block.ParentID, + ) + + parentCommitment, err := e.execState.StateCommitmentByBlockID(block.ParentID) + if err != nil { + return nil, nil, fmt.Errorf("failed to get parent state commitment when re-enqueue block %v (parent: %v): %w", + blockID, block.ParentID, err) + } + + // now re-enqueue the block with parent commit + missingColls, executables, err = e.blockQueue.HandleBlock(block, &parentCommitment) + if err != nil { + return nil, nil, fmt.Errorf("unexpected error while reenqueue block to block queue: %w", err) + } + } + + lg.Info().Bool("parent_is_executed", false). + Int("missing_col", len(missingColls)). + Int("executables", len(executables)). + Msgf("block is enqueued") + + return missingColls, executables, nil +} + +func (e *Core) onBlockExecuted( + ctx context.Context, + block *entity.ExecutableBlock, + computationResult *execution.ComputationResult, + startedAt time.Time, +) error { + commit := computationResult.CurrentEndState() + + e.metrics.ExecutionLastExecutedBlockHeight(block.Block.Height) + + wg := sync.WaitGroup{} + wg.Add(1) + defer wg.Wait() + + go func() { + defer wg.Done() + e.eventConsumer.BeforeComputationResultSaved(ctx, computationResult) + }() + + err := e.execState.SaveExecutionResults(ctx, computationResult) + if err != nil { + return fmt.Errorf("cannot persist execution state: %w", err) + } + + blockID := block.BlockID() + lg := e.log.With(). + Hex("block_id", blockID[:]). + Uint64("height", block.Block.Height). + Logger() + + lg.Debug().Msgf("execution state saved") + + // must call OnBlockExecuted AFTER saving the execution result to storage + // because when enqueuing a block, we rely on execState.StateCommitmentByBlockID + // to determine whether a block has been executed or not. + executables, err := e.blockQueue.OnBlockExecuted(blockID, commit) + if err != nil { + return fmt.Errorf("unexpected error while marking block as executed: %w", err) + } + + e.stopControl.OnBlockExecuted(block.Block.ToHeader()) + + // notify event consumer so that the event consumer can do tasks + // such as broadcasting or uploading the result + logs := e.eventConsumer.OnComputationResultSaved(ctx, computationResult) + + receipt := computationResult.ExecutionReceipt + lg.Info(). + Int("collections", len(block.CompleteCollections)). + Hex("parent_block", block.Block.ParentID[:]). + Int("collections", len(block.Block.Payload.Guarantees)). + Hex("start_state", block.StartState[:]). + Hex("final_state", commit[:]). + Hex("receipt_id", logging.Entity(receipt)). + Hex("result_id", logging.Entity(receipt.ExecutionResult)). + Hex("execution_data_id", receipt.ExecutionResult.ExecutionDataID[:]). + Bool("state_changed", commit != *block.StartState). + Uint64("num_txs", nonSystemTransactionCount(receipt.ExecutionResult)). + Int64("timeSpentInMS", time.Since(startedAt).Milliseconds()). + Str("logs", logs). // broadcasted + Int("executables", len(executables)). + Msgf("block executed") + + // we ensures that the child blocks are only executed after the execution result of + // its parent block has been successfully saved to storage. + // this ensures OnBlockExecuted would not be called with blocks in a wrong order, such as + // OnBlockExecuted(childBlock) being called before OnBlockExecuted(parentBlock). + + e.executeConcurrently(executables) + err = e.throttle.OnBlockExecuted(blockID, block.Block.Height) + if err != nil { + return fmt.Errorf("failed to notify throttle that block %v has been executed: %w", blockID, err) + } + + return nil +} + +func nonSystemTransactionCount(result flow.ExecutionResult) uint64 { + count := uint64(0) + for _, chunk := range result.Chunks { + count += chunk.NumberOfTransactions + } + return count +} + +func (e *Core) onCollection(col *flow.Collection) error { + colID := col.ID() + e.log.Info(). + Hex("collection_id", colID[:]). + Msgf("handle collection") + // EN might request a collection from multiple collection nodes, + // therefore might receive multiple copies of the same collection. + // we only need to store it once. + err := storeCollectionIfMissing(e.collections, col) + if err != nil { + return fmt.Errorf("failed to store collection %v: %w", col.ID(), err) + } + + return e.handleCollection(colID, col) +} + +func (e *Core) handleCollection(colID flow.Identifier, col *flow.Collection) error { + // if the collection is a duplication, it's still good to add it to the block queue, + // because chances are the collection was stored before a restart, and + // is not in the queue after the restart. + // adding it to the queue ensures we don't miss any collection. + // since the queue's state is in memory, processing a duplicated collection should be + // a fast no-op, and won't return any executable blocks. + executables, err := e.blockQueue.HandleCollection(col) + if err != nil { + return fmt.Errorf("unexpected error while adding collection to block queue") + } + + e.log.Debug(). + Hex("collection_id", colID[:]). + Int("executables", len(executables)).Msgf("executeConcurrently: collection is handled, ready to execute block") + + e.executeConcurrently(executables) + + return nil +} + +func storeCollectionIfMissing(collections storage.Collections, col *flow.Collection) error { + _, err := collections.ByID(col.ID()) + if err != nil { + if !errors.Is(err, storage.ErrNotFound) { + return fmt.Errorf("failed to get collection %v: %w", col.ID(), err) + } + + _, err = collections.Store(col) + if err != nil { + return fmt.Errorf("failed to store collection %v: %w", col.ID(), err) + } + } + + return nil +} + +// execute block concurrently +func (e *Core) executeConcurrently(executables []*entity.ExecutableBlock) { + for _, executable := range executables { + select { + case <-e.ShutdownSignal(): + // if the engine has shut down, then stop executing the block + return + case e.blockExecutors <- executable: + } + } +} + +func (e *Core) execute(ctx context.Context, executable *entity.ExecutableBlock) error { + if !e.stopControl.ShouldExecuteBlock(executable.Block.ID(), executable.Block.Height) { + return nil + } + + blockID := executable.BlockID() + e.log.Info(). + Hex("block_id", blockID[:]). + Uint64("height", executable.Block.Height). + Int("collections", len(executable.CompleteCollections)). + Msgf("executing block") + + startedAt := time.Now() + + result, err := e.executor.ExecuteBlock(ctx, executable) + if err != nil { + return fmt.Errorf("failed to execute block %v: %w", executable.Block.ID(), err) + } + + err = e.onBlockExecuted(ctx, executable, result, startedAt) + if err != nil { + return fmt.Errorf("failed to handle execution result of block %v: %w", executable.Block.ID(), err) + } + + return nil +} + +func (e *Core) fetch(missingColls []*block_queue.MissingCollection) (int, error) { + missingCount := 0 + for _, col := range missingColls { + + // if we've requested this collection, we will store it in the storage, + // so check the storage to see whether we've seen it. + collection, err := e.collections.ByID(col.Guarantee.CollectionID) + + if err == nil { + // we found the collection from storage, forward this collection to handler + err = e.handleCollection(col.Guarantee.CollectionID, collection) + if err != nil { + return 0, fmt.Errorf("could not handle collection: %w", err) + } + + continue + } + + // check if there was exception + if !errors.Is(err, storage.ErrNotFound) { + return 0, fmt.Errorf("error while querying for collection: %w", err) + } + + err = e.collectionFetcher.FetchCollection(col.BlockID, col.Height, col.Guarantee) + if err != nil { + return 0, fmt.Errorf("failed to fetch collection %v for block %v (height: %v): %w", + col.Guarantee.CollectionID, col.BlockID, col.Height, err) + } + missingCount++ + } + + if missingCount > 0 { + e.collectionFetcher.Force() + e.metrics.ExecutionCollectionRequestSent() + } + + return missingCount, nil +} diff --git a/engine/execution/ingestion/core_test.go b/engine/execution/ingestion/core_test.go new file mode 100644 index 00000000000..406554cf905 --- /dev/null +++ b/engine/execution/ingestion/core_test.go @@ -0,0 +1,271 @@ +package ingestion + +import ( + "context" + "fmt" + "sync" + "testing" + "time" + + "github.com/rs/zerolog" + "github.com/rs/zerolog/log" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + + enginePkg "github.com/onflow/flow-go/engine" + "github.com/onflow/flow-go/engine/execution" + "github.com/onflow/flow-go/engine/execution/ingestion/mocks" + "github.com/onflow/flow-go/engine/execution/ingestion/stop" + stateMock "github.com/onflow/flow-go/engine/execution/state/mock" + "github.com/onflow/flow-go/engine/execution/testutil" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module/irrecoverable" + "github.com/onflow/flow-go/module/mempool/entity" + "github.com/onflow/flow-go/module/metrics" + storageerr "github.com/onflow/flow-go/storage" + storage "github.com/onflow/flow-go/storage/mock" + "github.com/onflow/flow-go/utils/unittest" + unittestMocks "github.com/onflow/flow-go/utils/unittest/mocks" +) + +func TestInogestionCoreExecuteBlock(t *testing.T) { + // Given R <- 1 <- 2 (Col0) <- 3 <- 4 (Col1) + blocks, cols := makeBlocksAndCollections(t) + // create core + core, throttle, state, collectionDB, blocksDB, headers, fetcher, consumer := + createCore(t, blocks) + + // start the core + ctx, cancel := context.WithCancel(context.Background()) + irrecoverableCtx := irrecoverable.NewMockSignalerContext(t, ctx) + core.Start(irrecoverableCtx) + <-core.Ready() + defer func() { + cancel() + <-core.Done() + log.Info().Msgf("done") + }() + + waitTime := 10 * time.Millisecond + // Receive Block1 + // verify Block1 is executed + wg := &sync.WaitGroup{} + receiveBlock(t, throttle, state, headers, blocksDB, consumer, blocks[1], wg) + verifyBlockExecuted(t, consumer, wg, blocks[1]) + + // Receive Block 2 and 3, no block is executed + receiveBlock(t, throttle, state, headers, blocksDB, consumer, blocks[2], wg) + time.Sleep(waitTime) + verifyBlockNotExecuted(t, consumer, blocks[2]) + + receiveBlock(t, throttle, state, headers, blocksDB, consumer, blocks[3], wg) + time.Sleep(waitTime) + verifyBlockNotExecuted(t, consumer, blocks[3]) + + // Receive Col0 + // Verify block 2 and 3 are executed + receiveCollection(t, fetcher, core, cols[0]) + time.Sleep(waitTime) + verifyBlockExecuted(t, consumer, wg, blocks[2], blocks[3]) + + // Store Col1 + // Receive block 4 + // Verify block 4 is executed because Col1 can be found in local + storeCollection(t, collectionDB, cols[1]) + receiveBlock(t, throttle, state, headers, blocksDB, consumer, blocks[4], wg) + verifyBlockExecuted(t, consumer, wg, blocks[4]) +} + +func createCore(t *testing.T, blocks []*flow.Block) ( + *Core, Throttle, *unittestMocks.ProtocolState, *mocks.MockCollectionStore, + *storage.Blocks, *headerStore, *mockFetcher, *mockConsumer) { + headers := newHeadersWithBlocks(toHeaders(blocks)) + blocksDB := storage.NewBlocks(t) + collections := mocks.NewMockCollectionStore() + state := unittestMocks.NewProtocolState() + require.NoError(t, state.Bootstrap(blocks[0], nil, nil)) + execState := stateMock.NewExecutionState(t) + execState.On("GetHighestFinalizedExecuted").Return(blocks[0].Height, nil) + + // root block is executed + consumer := newMockConsumer(blocks[0].ID()) + + execState.On("StateCommitmentByBlockID", mock.Anything).Return( + func(blockID flow.Identifier) (flow.StateCommitment, error) { + executed := consumer.MockIsBlockExecuted(blockID) + if executed { + return unittest.StateCommitmentFixture(), nil + } + return flow.DummyStateCommitment, storageerr.ErrNotFound + }) + + execState.On("IsBlockExecuted", mock.Anything, mock.Anything).Return(func(height uint64, blockID flow.Identifier) (bool, error) { + return consumer.MockIsBlockExecuted(blockID), nil + }) + execState.On("SaveExecutionResults", mock.Anything, mock.Anything).Return(nil) + + throttle, err := NewBlockThrottle(unittest.Logger(), state, execState, headers) + require.NoError(t, err) + + unit := enginePkg.NewUnit() + stopControl := stop.NewStopControl( + unit, + time.Second, + zerolog.Nop(), + execState, + headers, + nil, + nil, + &flow.Header{HeaderBody: flow.HeaderBody{Height: 1}}, + false, + false, + ) + collectionFetcher := newMockFetcher() + executor := &mockExecutor{t: t, consumer: consumer} + metrics := metrics.NewNoopCollector() + core, err := NewCore(unittest.Logger(), throttle, execState, stopControl, blocksDB, + collections, executor, collectionFetcher, consumer, metrics) + require.NoError(t, err) + return core, throttle, state, collections, blocksDB, headers, collectionFetcher, consumer +} + +func makeBlocksAndCollections(t *testing.T) ([]*flow.Block, []*flow.Collection) { + cs := unittest.CollectionListFixture(2) + col0, col1 := cs[0], cs[1] + + genesis := unittest.Block.Genesis(flow.Emulator) + blocks := unittest.ChainFixtureFrom(4, genesis.ToHeader()) + + bs := append([]*flow.Block{genesis}, blocks...) + unittest.AddCollectionsToBlock(bs[2], []*flow.Collection{col0}) + unittest.AddCollectionsToBlock(bs[4], []*flow.Collection{col1}) + unittest.RechainBlocks(bs) + + return bs, cs +} + +func receiveBlock(t *testing.T, throttle Throttle, state *unittestMocks.ProtocolState, headers *headerStore, blocksDB *storage.Blocks, consumer *mockConsumer, block *flow.Block, wg *sync.WaitGroup) { + require.NoError(t, state.Extend(block)) + blocksDB.On("ByID", block.ID()).Return(block, nil) + require.NoError(t, throttle.OnBlock(block.ID(), block.Height)) + consumer.WaitForExecuted(block.ID(), wg) +} + +func verifyBlockExecuted(t *testing.T, consumer *mockConsumer, wg *sync.WaitGroup, blocks ...*flow.Block) { + // Wait until blocks are executed + unittest.AssertReturnsBefore(t, func() { wg.Wait() }, time.Millisecond*20) + for _, block := range blocks { + require.True(t, consumer.MockIsBlockExecuted(block.ID())) + } +} + +func verifyBlockNotExecuted(t *testing.T, consumer *mockConsumer, blocks ...*flow.Block) { + for _, block := range blocks { + require.False(t, consumer.MockIsBlockExecuted(block.ID())) + } +} + +func storeCollection(t *testing.T, collectionDB *mocks.MockCollectionStore, collection *flow.Collection) { + log.Info().Msgf("collectionDB: store collection %v", collection.ID()) + _, err := collectionDB.Store(collection) + require.NoError(t, err) +} + +func receiveCollection(t *testing.T, fetcher *mockFetcher, core *Core, collection *flow.Collection) { + require.True(t, fetcher.IsFetched(collection.ID())) + core.OnCollection(collection) +} + +type mockExecutor struct { + t *testing.T + consumer *mockConsumer +} + +func (m *mockExecutor) ExecuteBlock(_ context.Context, block *entity.ExecutableBlock) (*execution.ComputationResult, error) { + result := testutil.ComputationResultFixture(m.t) + result.ExecutableBlock = block + result.ExecutionReceipt.ExecutionResult.BlockID = block.BlockID() + log.Info().Msgf("mockExecutor: block %v executed", block.Block.Height) + return result, nil +} + +type mockConsumer struct { + sync.Mutex + executed map[flow.Identifier]struct{} + wgs map[flow.Identifier]*sync.WaitGroup +} + +func newMockConsumer(executed flow.Identifier) *mockConsumer { + return &mockConsumer{ + executed: map[flow.Identifier]struct{}{ + executed: {}, + }, + wgs: make(map[flow.Identifier]*sync.WaitGroup), + } +} + +func (m *mockConsumer) BeforeComputationResultSaved(ctx context.Context, result *execution.ComputationResult) { +} + +func (m *mockConsumer) OnComputationResultSaved(_ context.Context, result *execution.ComputationResult) string { + m.Lock() + defer m.Unlock() + blockID := result.BlockExecutionResult.ExecutableBlock.BlockID() + if _, ok := m.executed[blockID]; ok { + return fmt.Sprintf("block %v is already executed", blockID) + } + m.executed[blockID] = struct{}{} + log.Info().Uint64("height", result.BlockExecutionResult.ExecutableBlock.Block.Height).Msg("mockConsumer: block result saved") + m.wgs[blockID].Done() + return "" +} + +func (m *mockConsumer) WaitForExecuted(blockID flow.Identifier, wg *sync.WaitGroup) { + m.Lock() + defer m.Unlock() + wg.Add(1) + m.wgs[blockID] = wg +} + +func (m *mockConsumer) MockIsBlockExecuted(id flow.Identifier) bool { + m.Lock() + defer m.Unlock() + _, ok := m.executed[id] + return ok +} + +type mockFetcher struct { + sync.Mutex + fetching map[flow.Identifier]struct{} +} + +func newMockFetcher() *mockFetcher { + return &mockFetcher{ + fetching: make(map[flow.Identifier]struct{}), + } +} + +func (f *mockFetcher) FetchCollection(blockID flow.Identifier, height uint64, guarantee *flow.CollectionGuarantee) error { + f.Lock() + defer f.Unlock() + + if _, ok := f.fetching[guarantee.CollectionID]; ok { + return fmt.Errorf("collection %v is already fetching", guarantee.CollectionID) + } + + f.fetching[guarantee.CollectionID] = struct{}{} + log.Info().Msgf("mockFetcher: fetching collection %v for block %v", guarantee.CollectionID, height) + return nil +} + +func (f *mockFetcher) Force() { + f.Lock() + defer f.Unlock() +} + +func (f *mockFetcher) IsFetched(colID flow.Identifier) bool { + f.Lock() + defer f.Unlock() + _, ok := f.fetching[colID] + return ok +} diff --git a/engine/execution/ingestion/engine.go b/engine/execution/ingestion/engine.go deleted file mode 100644 index c46ebed62d9..00000000000 --- a/engine/execution/ingestion/engine.go +++ /dev/null @@ -1,1303 +0,0 @@ -package ingestion - -import ( - "context" - "encoding/hex" - "errors" - "fmt" - "strings" - "sync" - "time" - - "github.com/rs/zerolog" - "github.com/rs/zerolog/log" - - "github.com/onflow/flow-go/engine" - "github.com/onflow/flow-go/engine/execution" - "github.com/onflow/flow-go/engine/execution/computation" - "github.com/onflow/flow-go/engine/execution/ingestion/uploader" - "github.com/onflow/flow-go/engine/execution/provider" - "github.com/onflow/flow-go/engine/execution/state" - "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/model/flow/filter" - "github.com/onflow/flow-go/module" - "github.com/onflow/flow-go/module/executiondatasync/pruner" - "github.com/onflow/flow-go/module/mempool/entity" - "github.com/onflow/flow-go/module/mempool/queue" - "github.com/onflow/flow-go/module/mempool/stdmap" - "github.com/onflow/flow-go/module/trace" - "github.com/onflow/flow-go/network" - "github.com/onflow/flow-go/network/channels" - "github.com/onflow/flow-go/state/protocol" - psEvents "github.com/onflow/flow-go/state/protocol/events" - "github.com/onflow/flow-go/storage" - "github.com/onflow/flow-go/utils/logging" -) - -// An Engine receives and saves incoming blocks. -type Engine struct { - psEvents.Noop // satisfy protocol events consumer interface - - unit *engine.Unit - log zerolog.Logger - me module.Local - request module.Requester // used to request collections - state protocol.State - blocks storage.Blocks - collections storage.Collections - events storage.Events - serviceEvents storage.ServiceEvents - transactionResults storage.TransactionResults - computationManager computation.ComputationManager - providerEngine provider.ProviderEngine - mempool *Mempool - execState state.ExecutionState - metrics module.ExecutionMetrics - maxCollectionHeight uint64 - tracer module.Tracer - extensiveLogging bool - checkAuthorizedAtBlock func(blockID flow.Identifier) (bool, error) - executionDataPruner *pruner.Pruner - uploader *uploader.Manager - stopControl *StopControl -} - -func New( - logger zerolog.Logger, - net network.Network, - me module.Local, - request module.Requester, - state protocol.State, - blocks storage.Blocks, - collections storage.Collections, - events storage.Events, - serviceEvents storage.ServiceEvents, - transactionResults storage.TransactionResults, - executionEngine computation.ComputationManager, - providerEngine provider.ProviderEngine, - execState state.ExecutionState, - metrics module.ExecutionMetrics, - tracer module.Tracer, - extLog bool, - checkAuthorizedAtBlock func(blockID flow.Identifier) (bool, error), - pruner *pruner.Pruner, - uploader *uploader.Manager, - stopControl *StopControl, -) (*Engine, error) { - log := logger.With().Str("engine", "ingestion").Logger() - - mempool := newMempool() - - eng := Engine{ - unit: engine.NewUnit(), - log: log, - me: me, - request: request, - state: state, - blocks: blocks, - collections: collections, - events: events, - serviceEvents: serviceEvents, - transactionResults: transactionResults, - computationManager: executionEngine, - providerEngine: providerEngine, - mempool: mempool, - execState: execState, - metrics: metrics, - maxCollectionHeight: 0, - tracer: tracer, - extensiveLogging: extLog, - checkAuthorizedAtBlock: checkAuthorizedAtBlock, - executionDataPruner: pruner, - uploader: uploader, - stopControl: stopControl, - } - - return &eng, nil -} - -// Ready returns a channel that will close when the engine has -// successfully started. -func (e *Engine) Ready() <-chan struct{} { - if !e.stopControl.IsPaused() { - if err := e.uploader.RetryUploads(); err != nil { - e.log.Warn().Msg("failed to re-upload all ComputationResults") - } - - err := e.reloadUnexecutedBlocks() - if err != nil { - e.log.Fatal().Err(err).Msg("failed to load all unexecuted blocks") - } - } - - return e.unit.Ready() -} - -// Done returns a channel that will close when the engine has -// successfully stopped. -func (e *Engine) Done() <-chan struct{} { - return e.unit.Done() -} - -// SubmitLocal submits an event originating on the local node. -func (e *Engine) SubmitLocal(event interface{}) { - e.unit.Launch(func() { - err := e.process(e.me.NodeID(), event) - if err != nil { - engine.LogError(e.log, err) - } - }) -} - -// Submit submits the given event from the node with the given origin ID -// for processing in a non-blocking manner. It returns instantly and logs -// a potential processing error internally when done. -func (e *Engine) Submit( - channel channels.Channel, - originID flow.Identifier, - event interface{}, -) { - e.unit.Launch(func() { - err := e.process(originID, event) - if err != nil { - engine.LogError(e.log, err) - } - }) -} - -// ProcessLocal processes an event originating on the local node. -func (e *Engine) ProcessLocal(event interface{}) error { - return fmt.Errorf("ingestion error does not process local events") -} - -func (e *Engine) Process( - channel channels.Channel, - originID flow.Identifier, - event interface{}, -) error { - return e.unit.Do(func() error { - return e.process(originID, event) - }) -} - -func (e *Engine) process(originID flow.Identifier, event interface{}) error { - return nil -} - -func (e *Engine) finalizedUnexecutedBlocks(finalized protocol.Snapshot) ( - []flow.Identifier, - error, -) { - // get finalized height - final, err := finalized.Head() - if err != nil { - return nil, fmt.Errorf("could not get finalized block: %w", err) - } - - // find the first unexecuted and finalized block - // We iterate from the last finalized, check if it has been executed, - // if not, keep going to the lower height, until we find an executed - // block, and then the next height is the first unexecuted. - // If there is only one finalized, and it's executed (i.e. root block), - // then the firstUnexecuted is a unfinalized block, which is ok, - // because the next loop will ensure it only iterates through finalized - // blocks. - lastExecuted := final.Height - - rootBlock, err := e.state.Params().Root() - if err != nil { - return nil, fmt.Errorf("failed to retrieve root block: %w", err) - } - - for ; lastExecuted > rootBlock.Height; lastExecuted-- { - header, err := e.state.AtHeight(lastExecuted).Head() - if err != nil { - return nil, fmt.Errorf("could not get header at height: %v, %w", lastExecuted, err) - } - - executed, err := state.IsBlockExecuted(e.unit.Ctx(), e.execState, header.ID()) - if err != nil { - return nil, fmt.Errorf("could not check whether block is executed: %w", err) - } - - if executed { - break - } - } - - firstUnexecuted := lastExecuted + 1 - - e.log.Info().Msgf("last finalized and executed height: %v", lastExecuted) - - unexecuted := make([]flow.Identifier, 0) - - // starting from the first unexecuted block, go through each unexecuted and finalized block - // reload its block to execution queues - for height := firstUnexecuted; height <= final.Height; height++ { - header, err := e.state.AtHeight(height).Head() - if err != nil { - return nil, fmt.Errorf("could not get header at height: %v, %w", height, err) - } - - unexecuted = append(unexecuted, header.ID()) - } - - return unexecuted, nil -} - -func (e *Engine) pendingUnexecutedBlocks(finalized protocol.Snapshot) ( - []flow.Identifier, - error, -) { - pendings, err := finalized.Descendants() - if err != nil { - return nil, fmt.Errorf("could not get pending blocks: %w", err) - } - - unexecuted := make([]flow.Identifier, 0) - - for _, pending := range pendings { - executed, err := state.IsBlockExecuted(e.unit.Ctx(), e.execState, pending) - if err != nil { - return nil, fmt.Errorf("could not check block executed or not: %w", err) - } - - if !executed { - unexecuted = append(unexecuted, pending) - } - } - - return unexecuted, nil -} - -func (e *Engine) unexecutedBlocks() ( - finalized []flow.Identifier, - pending []flow.Identifier, - err error, -) { - // pin the snapshot so that finalizedUnexecutedBlocks and pendingUnexecutedBlocks are based - // on the same snapshot. - snapshot := e.state.Final() - - finalized, err = e.finalizedUnexecutedBlocks(snapshot) - if err != nil { - return nil, nil, fmt.Errorf("could not read finalized unexecuted blocks") - } - - pending, err = e.pendingUnexecutedBlocks(snapshot) - if err != nil { - return nil, nil, fmt.Errorf("could not read pending unexecuted blocks") - } - - return finalized, pending, nil -} - -// on nodes startup, we need to load all the unexecuted blocks to the execution queues. -// blocks have to be loaded in the way that the parent has been loaded before loading its children -func (e *Engine) reloadUnexecutedBlocks() error { - // it's possible the BlockProcessable is called during the reloading, as the follower engine - // will receive blocks before ingestion engine is ready. - // The problem with that is, since the reloading hasn't finished yet, enqueuing the new block from - // the BlockProcessable callback will fail, because its parent block might have not been reloaded - // to the queues yet. - // So one solution here is to lock the execution queues during reloading, so that if BlockProcessable - // is called before reloading is finished, it will be blocked, which will avoid that edge case. - return e.mempool.Run(func( - blockByCollection *stdmap.BlockByCollectionBackdata, - executionQueues *stdmap.QueuesBackdata, - ) error { - - // saving an executed block is currently not transactional, so it's possible - // the block is marked as executed but the receipt might not be saved during a crash. - // in order to mitigate this problem, we always re-execute the last executed and finalized - // block. - // there is an exception, if the last executed block is a root block, then don't execute it, - // because the root has already been executed during bootstrapping phase. And re-executing - // a root block will fail, because the root block doesn't have a parent block, and could not - // get the result of it. - // TODO: remove this, when saving a executed block is transactional - lastExecutedHeight, lastExecutedID, err := e.execState.GetHighestExecutedBlockID(e.unit.Ctx()) - if err != nil { - return fmt.Errorf("could not get last executed: %w", err) - } - - last, err := e.state.AtBlockID(lastExecutedID).Head() - if err != nil { - return fmt.Errorf("could not get last executed final by ID: %w", err) - } - - // don't reload root block - rootBlock, err := e.state.Params().Root() - if err != nil { - return fmt.Errorf("failed to retrieve root block: %w", err) - } - - isRoot := rootBlock.ID() == last.ID() - if !isRoot { - executed, err := state.IsBlockExecuted(e.unit.Ctx(), e.execState, lastExecutedID) - if err != nil { - return fmt.Errorf("cannot check is last exeucted final block has been executed %v: %w", lastExecutedID, err) - } - if !executed { - // this should not happen, but if it does, execution should still work - e.log.Warn(). - Hex("block_id", lastExecutedID[:]). - Msg("block marked as highest executed one, but not executable - internal inconsistency") - - err = e.reloadBlock(blockByCollection, executionQueues, lastExecutedID) - if err != nil { - return fmt.Errorf("could not reload the last executed final block: %v, %w", lastExecutedID, err) - } - } - } - - finalized, pending, err := e.unexecutedBlocks() - if err != nil { - return fmt.Errorf("could not reload unexecuted blocks: %w", err) - } - - unexecuted := append(finalized, pending...) - - log := e.log.With(). - Int("total", len(unexecuted)). - Int("finalized", len(finalized)). - Int("pending", len(pending)). - Uint64("last_executed", lastExecutedHeight). - Hex("last_executed_id", lastExecutedID[:]). - Logger() - - log.Info().Msg("reloading unexecuted blocks") - - for _, blockID := range unexecuted { - err := e.reloadBlock(blockByCollection, executionQueues, blockID) - if err != nil { - return fmt.Errorf("could not reload block: %v, %w", blockID, err) - } - - e.log.Debug().Hex("block_id", blockID[:]).Msg("reloaded block") - } - - log.Info().Msg("all unexecuted have been successfully reloaded") - - return nil - }) -} - -func (e *Engine) reloadBlock( - blockByCollection *stdmap.BlockByCollectionBackdata, - executionQueues *stdmap.QueuesBackdata, - blockID flow.Identifier, -) error { - block, err := e.blocks.ByID(blockID) - if err != nil { - return fmt.Errorf("could not get block by ID: %v %w", blockID, err) - } - - // enqueue the block and check if there is any missing collections - missingCollections, err := e.enqueueBlockAndCheckExecutable(blockByCollection, executionQueues, block, false) - - if err != nil { - return fmt.Errorf("could not enqueue block %x on reloading: %w", blockID, err) - } - - // forward the missing collections to requester engine for requesting them from collection nodes, - // adding the missing collections to mempool in order to trigger the block execution as soon as - // all missing collections are received. - err = e.fetchAndHandleCollection(blockID, block.Header.Height, missingCollections, func(collection *flow.Collection) error { - err := e.addCollectionToMempool(collection, blockByCollection) - - if err != nil { - return fmt.Errorf("could not add collection to mempool: %w", err) - } - return nil - }) - - if err != nil { - return fmt.Errorf("could not fetch or handle collection %w", err) - } - return nil -} - -// BlockProcessable handles the new verified blocks (blocks that -// have passed consensus validation) received from the consensus nodes -// NOTE: BlockProcessable might be called multiple times for the same block. -// NOTE: Ready calls reloadUnexecutedBlocks during initialization, which handles dropped protocol events. -func (e *Engine) BlockProcessable(b *flow.Header, _ *flow.QuorumCertificate) { - - // skip if stopControl tells to skip - if !e.stopControl.blockProcessable(b) { - return - } - - blockID := b.ID() - newBlock, err := e.blocks.ByID(blockID) - if err != nil { - e.log.Fatal().Err(err).Msgf("could not get incorporated block(%v): %v", blockID, err) - } - - e.log.Info().Hex("block_id", blockID[:]). - Uint64("height", b.Height). - Msg("handling new block") - - err = e.handleBlock(e.unit.Ctx(), newBlock) - if err != nil { - e.log.Error().Err(err).Hex("block_id", blockID[:]).Msg("failed to handle block") - } -} - -// BlockFinalized implements part of state.protocol.Consumer interface. -// Method gets called for every finalized block -func (e *Engine) BlockFinalized(h *flow.Header) { - e.stopControl.blockFinalized(e.unit.Ctx(), e.execState, h) -} - -// Main handling - -// handle block will process the incoming block. -// the block has passed the consensus validation. -func (e *Engine) handleBlock(ctx context.Context, block *flow.Block) error { - - blockID := block.ID() - log := e.log.With().Hex("block_id", blockID[:]).Logger() - - span, _ := e.tracer.StartBlockSpan(ctx, blockID, trace.EXEHandleBlock) - defer span.End() - - executed, err := state.IsBlockExecuted(e.unit.Ctx(), e.execState, blockID) - if err != nil { - return fmt.Errorf("could not check whether block is executed: %w", err) - } - - if executed { - log.Debug().Msg("block has been executed already") - return nil - } - - var missingCollections []*flow.CollectionGuarantee - // unexecuted block - // acquiring the lock so that there is only one process modifying the queue - err = e.mempool.Run(func( - blockByCollection *stdmap.BlockByCollectionBackdata, - executionQueues *stdmap.QueuesBackdata, - ) error { - missing, err := e.enqueueBlockAndCheckExecutable(blockByCollection, executionQueues, block, false) - if err != nil { - return err - } - missingCollections = missing - return nil - }) - - if err != nil { - return fmt.Errorf("could not enqueue block %v: %w", blockID, err) - } - - return e.addOrFetch(blockID, block.Header.Height, missingCollections) -} - -func (e *Engine) enqueueBlockAndCheckExecutable( - blockByCollection *stdmap.BlockByCollectionBackdata, - executionQueues *stdmap.QueuesBackdata, - block *flow.Block, - checkStateSync bool, -) ([]*flow.CollectionGuarantee, error) { - executableBlock := &entity.ExecutableBlock{ - Block: block, - CompleteCollections: make(map[flow.Identifier]*entity.CompleteCollection), - } - - blockID := executableBlock.ID() - - lg := e.log.With(). - Hex("block_id", blockID[:]). - Uint64("block_height", executableBlock.Block.Header.Height). - Logger() - - // adding the block to the queue, - queue, added, head := enqueue(executableBlock, executionQueues) - - // if it's not added, it means the block is not a new block, it already - // exists in the queue, then bail - if !added { - log.Debug().Hex("block_id", logging.Entity(executableBlock)). - Int("block_height", int(executableBlock.Height())). - Msg("block already exists in the execution queue") - return nil, nil - } - - firstUnexecutedHeight := queue.Head.Item.Height() - - // check if a block is executable. - // a block is executable if the following conditions are all true - // 1) the parent state commitment is ready - // 2) the collections for the block payload are ready - // 3) the child block is ready for querying the randomness - - // check if the block's parent has been executed. (we can't execute the block if the parent has - // not been executed yet) - // check if there is a statecommitment for the parent block - parentCommitment, err := e.execState.StateCommitmentByBlockID(e.unit.Ctx(), block.Header.ParentID) - - // if we found the statecommitment for the parent block, then add it to the executable block. - if err == nil { - executableBlock.StartState = &parentCommitment - } else if errors.Is(err, storage.ErrNotFound) { - // the parent block is an unexecuted block. - // if the queue only has one block, and its parent doesn't - // exist in the queue, then we need to load the block from the storage. - _, ok := queue.Nodes[blockID] - if !ok { - lg.Error().Msgf("an unexecuted parent block is missing in the queue") - } - } else { - // if there is exception, then crash - lg.Fatal().Err(err).Msg("unexpected error while accessing storage, shutting down") - } - - // check if we have all the collections for the block, and request them if there is missing. - missingCollections, err := e.matchAndFindMissingCollections(executableBlock, blockByCollection) - if err != nil { - return nil, fmt.Errorf("cannot send collection requests: %w", err) - } - - complete := false - - // if newly enqueued block is inside any existing queue, we should skip now and wait - // for parent to finish execution - if head { - // execute the block if the block is ready to be executed - complete = e.executeBlockIfComplete(executableBlock) - } - - lg.Info(). - // if the execution is halt, but the queue keeps growing, we could check which block - // hasn't been executed. - Uint64("first_unexecuted_in_queue", firstUnexecutedHeight). - Bool("complete", complete). - Bool("head_of_queue", head). - Msg("block is enqueued") - - return missingCollections, nil -} - -// executeBlock will execute the block. -// When finish executing, it will check if the children becomes executable and execute them if yes. -func (e *Engine) executeBlock( - ctx context.Context, - executableBlock *entity.ExecutableBlock, -) { - lg := e.log.With(). - Hex("block_id", logging.Entity(executableBlock)). - Uint64("height", executableBlock.Block.Header.Height). - Logger() - - lg.Info().Msg("executing block") - - startedAt := time.Now() - - e.stopControl.executingBlockHeight(executableBlock.Block.Header.Height) - - span, ctx := e.tracer.StartSpanFromContext(ctx, trace.EXEExecuteBlock) - defer span.End() - - parentID := executableBlock.Block.Header.ParentID - parentErID, err := e.execState.GetExecutionResultID(ctx, parentID) - if err != nil { - lg.Err(err). - Str("parentID", parentID.String()). - Msg("could not get execution result ID for parent block") - return - } - - snapshot := e.execState.NewStorageSnapshot(*executableBlock.StartState) - - computationResult, err := e.computationManager.ComputeBlock( - ctx, - parentErID, - executableBlock, - snapshot) - if err != nil { - lg.Err(err).Msg("error while computing block") - return - } - - wg := sync.WaitGroup{} - wg.Add(1) - defer wg.Wait() - - go func() { - defer wg.Done() - err := e.uploader.Upload(ctx, computationResult) - if err != nil { - lg.Err(err).Msg("error while uploading block") - // continue processing. uploads should not block execution - } - }() - - err = e.saveExecutionResults(ctx, computationResult) - if errors.Is(err, storage.ErrDataMismatch) { - lg.Fatal().Err(err).Msg("fatal: trying to store different results for the same block") - } - - if err != nil { - lg.Err(err).Msg("error while handing computation results") - return - } - - // if the receipt is for a sealed block, then no need to broadcast it. - lastSealed, err := e.state.Sealed().Head() - if err != nil { - lg.Fatal().Err(err).Msg("could not get sealed block before broadcasting") - } - - receipt := computationResult.ExecutionReceipt - isExecutedBlockSealed := executableBlock.Block.Header.Height <= lastSealed.Height - broadcasted := false - - if !isExecutedBlockSealed { - authorizedAtBlock, err := e.checkAuthorizedAtBlock(executableBlock.ID()) - if err != nil { - lg.Fatal().Err(err).Msg("could not check staking status") - } - if authorizedAtBlock { - err = e.providerEngine.BroadcastExecutionReceipt(ctx, receipt) - if err != nil { - lg.Err(err).Msg("critical: failed to broadcast the receipt") - } else { - broadcasted = true - } - } - } - - finalEndState := computationResult.CurrentEndState() - lg.Info(). - Hex("parent_block", executableBlock.Block.Header.ParentID[:]). - Int("collections", len(executableBlock.Block.Payload.Guarantees)). - Hex("start_state", executableBlock.StartState[:]). - Hex("final_state", finalEndState[:]). - Hex("receipt_id", logging.Entity(receipt)). - Hex("result_id", logging.Entity(receipt.ExecutionResult)). - Hex("execution_data_id", receipt.ExecutionResult.ExecutionDataID[:]). - Bool("sealed", isExecutedBlockSealed). - Bool("broadcasted", broadcasted). - Int64("timeSpentInMS", time.Since(startedAt).Milliseconds()). - Msg("block executed") - - err = e.onBlockExecuted(executableBlock, finalEndState) - if err != nil { - lg.Err(err).Msg("failed in process block's children") - } - - if e.executionDataPruner != nil { - e.executionDataPruner.NotifyFulfilledHeight(executableBlock.Height()) - } - - e.unit.Ctx() - - e.stopControl.blockExecuted(executableBlock.Block.Header) -} - -// we've executed the block, now we need to check: -// 1. whether the state syncing can be turned off -// 2. whether its children can be executed -// the executionQueues stores blocks as a tree: -// -// 10 <- 11 <- 12 -// ^-- 13 -// 14 <- 15 <- 16 -// -// if block 10 is the one just executed, then we will remove it from the queue, and add -// its children back, meaning the tree will become: -// -// 11 <- 12 -// 13 -// 14 <- 15 <- 16 - -func (e *Engine) onBlockExecuted( - executed *entity.ExecutableBlock, - finalState flow.StateCommitment, -) error { - - e.metrics.ExecutionStorageStateCommitment(int64(len(finalState))) - e.metrics.ExecutionLastExecutedBlockHeight(executed.Block.Header.Height) - - // e.checkStateSyncStop(executed.Block.Header.Height) - - missingCollections := make(map[*entity.ExecutableBlock][]*flow.CollectionGuarantee) - err := e.mempool.Run( - func( - blockByCollection *stdmap.BlockByCollectionBackdata, - executionQueues *stdmap.QueuesBackdata, - ) error { - // find the block that was just executed - executionQueue, exists := executionQueues.ByID(executed.ID()) - if !exists { - // when the block no longer exists in the queue, it means there was a race condition that - // two onBlockExecuted was called for the same block, and one process has already removed the - // block from the queue, so we will print an error here - return fmt.Errorf("block has been executed already, no longer exists in the queue") - } - - // dismount the executed block and all its children - _, newQueues := executionQueue.Dismount() - - // go through each children, add them back to the queue, and check - // if the children is executable - for _, queue := range newQueues { - queueID := queue.ID() - added := executionQueues.Add(queueID, queue) - if !added { - // blocks should be unique in execution queues, if we dismount all the children blocks, then - // add it back to the queues, then it should always be able to add. - // If not, then there is a bug that the queues have duplicated blocks - return fmt.Errorf("fatal error - child block already in execution queue") - } - - // the parent block has been executed, update the StartState of - // each child block. - child := queue.Head.Item.(*entity.ExecutableBlock) - child.StartState = &finalState - - missing, err := e.matchAndFindMissingCollections(child, blockByCollection) - if err != nil { - return fmt.Errorf("cannot send collection requests: %w", err) - } - if len(missing) > 0 { - missingCollections[child] = append(missingCollections[child], missing...) - } - - completed := e.executeBlockIfComplete(child) - if !completed { - e.log.Debug(). - Hex("executed_block", logging.Entity(executed)). - Hex("child_block", logging.Entity(child)). - Msg("child block is not ready to be executed yet") - } else { - e.log.Debug(). - Hex("executed_block", logging.Entity(executed)). - Hex("child_block", logging.Entity(child)). - Msg("child block is ready to be executed") - } - } - - // remove the executed block - executionQueues.Remove(executed.ID()) - - return nil - }) - - if err != nil { - e.log.Err(err). - Hex("block", logging.Entity(executed)). - Msg("error while requeueing blocks after execution") - } - - for child, missing := range missingCollections { - err := e.addOrFetch(child.ID(), child.Block.Header.Height, missing) - if err != nil { - return fmt.Errorf("fail to add missing collections: %w", err) - } - } - - return nil -} - -// executeBlockIfComplete checks whether the block is ready to be executed. -// if yes, execute the block -// return a bool indicates whether the block was completed -func (e *Engine) executeBlockIfComplete(eb *entity.ExecutableBlock) bool { - - if eb.Executing { - return false - } - - // if don't have the delta, then check if everything is ready for executing - // the block - if eb.IsComplete() { - - if e.extensiveLogging { - e.logExecutableBlock(eb) - } - - // no external synchronisation is used because this method must be run in a thread-safe context - eb.Executing = true - - e.unit.Launch(func() { - e.executeBlock(e.unit.Ctx(), eb) - }) - return true - } - return false -} - -// OnCollection is a callback for handling the collections requested by the -// collection requester. -func (e *Engine) OnCollection(originID flow.Identifier, entity flow.Entity) { - // convert entity to strongly typed collection - collection, ok := entity.(*flow.Collection) - if !ok { - e.log.Error().Msgf("invalid entity type (%T)", entity) - return - } - - // no need to validate the origin ID, since the collection requester has - // checked the origin must be a collection node. - - err := e.handleCollection(originID, collection) - if err != nil { - e.log.Error().Err(err).Msg("could not handle collection") - } -} - -// a block can't be executed if its collection is missing. -// since a collection can belong to multiple blocks, we need to -// find all the blocks that are needing this collection, and then -// check if any of these block becomes executable and execute it if -// is. -func (e *Engine) handleCollection( - originID flow.Identifier, - collection *flow.Collection, -) error { - collID := collection.ID() - - span, _ := e.tracer.StartCollectionSpan(context.Background(), collID, trace.EXEHandleCollection) - defer span.End() - - lg := e.log.With().Hex("collection_id", collID[:]).Logger() - - lg.Info().Hex("sender", originID[:]).Msg("handle collection") - defer func(startTime time.Time) { - lg.Info().TimeDiff("duration", time.Now(), startTime).Msg("collection handled") - }(time.Now()) - - // TODO: bail if have seen this collection before. - err := e.collections.Store(collection) - if err != nil { - return fmt.Errorf("cannot store collection: %w", err) - } - - return e.mempool.BlockByCollection.Run( - func(backdata *stdmap.BlockByCollectionBackdata) error { - return e.addCollectionToMempool(collection, backdata) - }, - ) -} - -func (e *Engine) addCollectionToMempool( - collection *flow.Collection, - backdata *stdmap.BlockByCollectionBackdata, -) error { - collID := collection.ID() - blockByCollectionID, exists := backdata.ByID(collID) - - // if we don't find any block for this collection, then - // means we don't need this collection any more. - // or it was ejected from the mempool when it was full. - // either way, we will return - if !exists { - return nil - } - - for _, executableBlock := range blockByCollectionID.ExecutableBlocks { - blockID := executableBlock.ID() - - completeCollection, ok := executableBlock.CompleteCollections[collID] - if !ok { - return fmt.Errorf("cannot handle collection: internal inconsistency - collection pointing to block %v which does not contain said collection", - blockID) - } - - // record collection max height metrics - blockHeight := executableBlock.Block.Header.Height - if blockHeight > e.maxCollectionHeight { - e.metrics.UpdateCollectionMaxHeight(blockHeight) - e.maxCollectionHeight = blockHeight - } - - if completeCollection.IsCompleted() { - // already received transactions for this collection - continue - } - - // update the transactions of the collection - // Note: it's guaranteed the transactions are for this collection, because - // the collection id matches with the CollectionID from the collection guarantee - completeCollection.Transactions = collection.Transactions - - // check if the block becomes executable - _ = e.executeBlockIfComplete(executableBlock) - } - - // since we've received this collection, remove it from the index - // this also prevents from executing the same block twice, because the second - // time when the collection arrives, it will not be found in the blockByCollectionID - // index. - backdata.Remove(collID) - - return nil -} - -func newQueue(blockify queue.Blockify, queues *stdmap.QueuesBackdata) ( - *queue.Queue, - bool, -) { - q := queue.NewQueue(blockify) - qID := q.ID() - return q, queues.Add(qID, q) -} - -// enqueue adds a block to the queues, return the queue that includes the block and booleans -// * is block new one (it's not already enqueued, not a duplicate) -// * is head of the queue (new queue has been created) -// -// Queues are chained blocks. Since a block can't be executable until its parent has been -// executed, the chained structure allows us to only check the head of each queue to see if -// any block becomes executable. -// for instance we have one queue whose head is A: -// -// A <- B <- C -// ^- D <- E -// -// If we receive E <- F, then we will add it to the queue: -// -// A <- B <- C -// ^- D <- E <- F -// -// Even through there are 6 blocks, we only need to check if block A becomes executable. -// when the parent block isn't in the queue, we add it as a new queue. for instance, if -// we receive H <- G, then the queues will become: -// -// A <- B <- C -// ^- D <- E -// G -func enqueue(blockify queue.Blockify, queues *stdmap.QueuesBackdata) ( - *queue.Queue, - bool, - bool, -) { - for _, queue := range queues.All() { - if stored, isNew := queue.TryAdd(blockify); stored { - return queue, isNew, false - } - } - queue, isNew := newQueue(blockify, queues) - return queue, isNew, true -} - -// check if the block's collections have been received, -// if yes, add the collection to the executable block -// if no, fetch the collection. -// if a block has 3 collection, it would be 3 reqs to fetch them. -// mark the collection belongs to the block, -// mark the block contains this collection. -// It returns the missing collections to be fetched -// TODO: to rename -func (e *Engine) matchAndFindMissingCollections( - executableBlock *entity.ExecutableBlock, - collectionsBackdata *stdmap.BlockByCollectionBackdata, -) ([]*flow.CollectionGuarantee, error) { - missingCollections := make([]*flow.CollectionGuarantee, 0, len(executableBlock.Block.Payload.Guarantees)) - - for _, guarantee := range executableBlock.Block.Payload.Guarantees { - coll := &entity.CompleteCollection{ - Guarantee: guarantee, - } - executableBlock.CompleteCollections[guarantee.ID()] = coll - - // check if we have requested this collection before. - // blocksNeedingCollection stores all the blocks that contain this collection - - if blocksNeedingCollection, exists := collectionsBackdata.ByID(guarantee.ID()); exists { - // if we've requested this collection, it means other block might also contain this collection. - // in this case, add this block to the map so that when the collection is received, - // we could update the executable block - blocksNeedingCollection.ExecutableBlocks[executableBlock.ID()] = executableBlock - - // since the collection is still being requested, we don't have the transactions - // yet, so exit - continue - } - - // the storage doesn't have this collection, meaning this is our first time seeing this - // collection guarantee, create an entry to store in collectionsBackdata in order to - // update the executable blocks when the collection is received. - blocksNeedingCollection := &entity.BlocksByCollection{ - CollectionID: guarantee.ID(), - ExecutableBlocks: map[flow.Identifier]*entity.ExecutableBlock{executableBlock.ID(): executableBlock}, - } - - added := collectionsBackdata.Add(blocksNeedingCollection.ID(), blocksNeedingCollection) - if !added { - // sanity check, should not happen, unless mempool implementation has a bug - return nil, fmt.Errorf("collection already mapped to block") - } - - missingCollections = append(missingCollections, guarantee) - } - - return missingCollections, nil -} - -func (e *Engine) ExecuteScriptAtBlockID( - ctx context.Context, - script []byte, - arguments [][]byte, - blockID flow.Identifier, -) ([]byte, error) { - - stateCommit, err := e.execState.StateCommitmentByBlockID(ctx, blockID) - if err != nil { - return nil, fmt.Errorf("failed to get state commitment for block (%s): %w", blockID, err) - } - - // return early if state with the given state commitment is not in memory - // and already purged. This reduces allocations for scripts targeting old blocks. - if !e.execState.HasState(stateCommit) { - return nil, fmt.Errorf("failed to execute script at block (%s): state commitment not found (%s). this error usually happens if the reference block for this script is not set to a recent block", blockID.String(), hex.EncodeToString(stateCommit[:])) - } - - block, err := e.state.AtBlockID(blockID).Head() - if err != nil { - return nil, fmt.Errorf("failed to get block (%s): %w", blockID, err) - } - - blockSnapshot := e.execState.NewStorageSnapshot(stateCommit) - - if e.extensiveLogging { - args := make([]string, 0) - for _, a := range arguments { - args = append(args, hex.EncodeToString(a)) - } - e.log.Debug(). - Hex("block_id", logging.ID(blockID)). - Uint64("block_height", block.Height). - Hex("state_commitment", stateCommit[:]). - Hex("script_hex", script). - Str("args", strings.Join(args[:], ",")). - Msg("extensive log: executed script content") - } - return e.computationManager.ExecuteScript( - ctx, - script, - arguments, - block, - blockSnapshot) -} - -func (e *Engine) GetRegisterAtBlockID( - ctx context.Context, - owner, key []byte, - blockID flow.Identifier, -) ([]byte, error) { - - stateCommit, err := e.execState.StateCommitmentByBlockID(ctx, blockID) - if err != nil { - return nil, fmt.Errorf("failed to get state commitment for block (%s): %w", blockID, err) - } - - blockSnapshot := e.execState.NewStorageSnapshot(stateCommit) - - id := flow.NewRegisterID(string(owner), string(key)) - data, err := blockSnapshot.Get(id) - if err != nil { - return nil, fmt.Errorf("failed to get the register (%s): %w", id, err) - } - - return data, nil -} - -func (e *Engine) GetAccount( - ctx context.Context, - addr flow.Address, - blockID flow.Identifier, -) (*flow.Account, error) { - stateCommit, err := e.execState.StateCommitmentByBlockID(ctx, blockID) - if err != nil { - return nil, fmt.Errorf("failed to get state commitment for block (%s): %w", blockID, err) - } - - // return early if state with the given state commitment is not in memory - // and already purged. This reduces allocations for get accounts targeting old blocks. - if !e.execState.HasState(stateCommit) { - return nil, fmt.Errorf( - "failed to get account at block (%s): state commitment not "+ - "found (%s). this error usually happens if the reference "+ - "block for this script is not set to a recent block.", - blockID.String(), - hex.EncodeToString(stateCommit[:])) - } - - block, err := e.state.AtBlockID(blockID).Head() - if err != nil { - return nil, fmt.Errorf("failed to get block (%s): %w", blockID, err) - } - - blockSnapshot := e.execState.NewStorageSnapshot(stateCommit) - - return e.computationManager.GetAccount(ctx, addr, block, blockSnapshot) -} - -// save the execution result of a block -func (e *Engine) saveExecutionResults( - ctx context.Context, - result *execution.ComputationResult, -) error { - span, childCtx := e.tracer.StartSpanFromContext(ctx, trace.EXESaveExecutionResults) - defer span.End() - - e.log.Debug(). - Hex("block_id", logging.Entity(result.ExecutableBlock)). - Msg("received computation result") - - for _, event := range result.ExecutionResult.ServiceEvents { - e.log.Info(). - Uint64("block_height", result.ExecutableBlock.Height()). - Hex("block_id", logging.Entity(result.ExecutableBlock)). - Str("event_type", event.Type.String()). - Msg("service event emitted") - } - - err := e.execState.SaveExecutionResults(childCtx, result) - if err != nil { - return fmt.Errorf("cannot persist execution state: %w", err) - } - - finalEndState := result.CurrentEndState() - e.log.Debug(). - Hex("block_id", logging.Entity(result.ExecutableBlock)). - Hex("start_state", result.ExecutableBlock.StartState[:]). - Hex("final_state", finalEndState[:]). - Msg("saved computation results") - - return nil -} - -// logExecutableBlock logs all data about an executable block -// over time we should skip this -func (e *Engine) logExecutableBlock(eb *entity.ExecutableBlock) { - // log block - e.log.Debug(). - Hex("block_id", logging.Entity(eb)). - Hex("prev_block_id", logging.ID(eb.Block.Header.ParentID)). - Uint64("block_height", eb.Block.Header.Height). - Int("number_of_collections", len(eb.Collections())). - RawJSON("block_header", logging.AsJSON(eb.Block.Header)). - Msg("extensive log: block header") - - // logs transactions - for i, col := range eb.Collections() { - for j, tx := range col.Transactions { - e.log.Debug(). - Hex("block_id", logging.Entity(eb)). - Int("block_height", int(eb.Block.Header.Height)). - Hex("prev_block_id", logging.ID(eb.Block.Header.ParentID)). - Int("collection_index", i). - Int("tx_index", j). - Hex("collection_id", logging.ID(col.Guarantee.CollectionID)). - Hex("tx_hash", logging.Entity(tx)). - Hex("start_state_commitment", eb.StartState[:]). - RawJSON("transaction", logging.AsJSON(tx)). - Msg("extensive log: executed tx content") - } - } -} - -// addOrFetch checks if there are stored collections for the given guarantees, if there is, -// forward them to mempool to process the collection, otherwise fetch the collections. -// any error returned are exception -func (e *Engine) addOrFetch( - blockID flow.Identifier, - height uint64, - guarantees []*flow.CollectionGuarantee, -) error { - return e.fetchAndHandleCollection(blockID, height, guarantees, func(collection *flow.Collection) error { - err := e.mempool.BlockByCollection.Run( - func(backdata *stdmap.BlockByCollectionBackdata) error { - return e.addCollectionToMempool(collection, backdata) - }) - - if err != nil { - return fmt.Errorf("could not add collection to mempool: %w", err) - } - return nil - }) -} - -// addOrFetch checks if there are stored collections for the given guarantees, if there is, -// forward them to the handler to process the collection, otherwise fetch the collections. -// any error returned are exception -func (e *Engine) fetchAndHandleCollection( - blockID flow.Identifier, - height uint64, - guarantees []*flow.CollectionGuarantee, - handleCollection func(*flow.Collection) error, -) error { - fetched := false - for _, guarantee := range guarantees { - // if we've requested this collection, we will store it in the storage, - // so check the storage to see whether we've seen it. - collection, err := e.collections.ByID(guarantee.CollectionID) - - if err == nil { - // we found the collection from storage, forward this collection to handler - err = handleCollection(collection) - if err != nil { - return fmt.Errorf("could not handle collection: %w", err) - } - - continue - } - - // check if there was exception - if !errors.Is(err, storage.ErrNotFound) { - return fmt.Errorf("error while querying for collection: %w", err) - } - - err = e.fetchCollection(blockID, height, guarantee) - if err != nil { - return fmt.Errorf("could not fetch collection: %w", err) - } - fetched = true - } - - // make sure that the requests are dispatched immediately by the requester - if fetched { - e.request.Force() - e.metrics.ExecutionCollectionRequestSent() - } - - return nil -} - -// fetchCollection takes a guarantee and forwards to requester engine for fetching the collection -// any error returned are fatal error -func (e *Engine) fetchCollection( - blockID flow.Identifier, - height uint64, - guarantee *flow.CollectionGuarantee, -) error { - e.log.Debug(). - Hex("block", blockID[:]). - Hex("collection_id", logging.ID(guarantee.ID())). - Msg("requesting collection") - - guarantors, err := protocol.FindGuarantors(e.state, guarantee) - if err != nil { - // execution node executes certified blocks, which means there is a quorum of consensus nodes who - // have validated the block payload. And that validation includes checking the guarantors are correct. - // Based on that assumption, failing to find guarantors for guarantees contained in an incorporated block - // should be treated as fatal error - e.log.Fatal().Err(err).Msgf("failed to find guarantors for guarantee %v at block %v, height %v", - guarantee.ID(), - blockID, - height, - ) - return fmt.Errorf("could not find guarantors: %w", err) - } - // queue the collection to be requested from one of the guarantors - e.request.EntityByID(guarantee.ID(), filter.And( - filter.HasNodeID(guarantors...), - )) - - return nil -} diff --git a/engine/execution/ingestion/engine_test.go b/engine/execution/ingestion/engine_test.go deleted file mode 100644 index c93d52cb68b..00000000000 --- a/engine/execution/ingestion/engine_test.go +++ /dev/null @@ -1,1927 +0,0 @@ -package ingestion - -import ( - "context" - "crypto/rand" - "fmt" - mathRand "math/rand" - "strings" - "sync" - "testing" - "time" - - "github.com/golang/mock/gomock" - "github.com/rs/zerolog" - "github.com/rs/zerolog/log" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/mock" - "github.com/stretchr/testify/require" - - "github.com/onflow/flow-go/crypto" - - "github.com/onflow/flow-go/engine/execution" - computation "github.com/onflow/flow-go/engine/execution/computation/mock" - "github.com/onflow/flow-go/engine/execution/ingestion/uploader" - uploadermock "github.com/onflow/flow-go/engine/execution/ingestion/uploader/mock" - provider "github.com/onflow/flow-go/engine/execution/provider/mock" - "github.com/onflow/flow-go/engine/execution/state" - stateMock "github.com/onflow/flow-go/engine/execution/state/mock" - executionUnittest "github.com/onflow/flow-go/engine/execution/state/unittest" - "github.com/onflow/flow-go/engine/testutil/mocklocal" - "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/model/flow/filter" - "github.com/onflow/flow-go/model/flow/order" - "github.com/onflow/flow-go/module/mempool/entity" - "github.com/onflow/flow-go/module/metrics" - module "github.com/onflow/flow-go/module/mocks" - "github.com/onflow/flow-go/module/signature" - "github.com/onflow/flow-go/module/trace" - "github.com/onflow/flow-go/network/mocknetwork" - stateProtocol "github.com/onflow/flow-go/state/protocol" - protocol "github.com/onflow/flow-go/state/protocol/mock" - storageerr "github.com/onflow/flow-go/storage" - storage "github.com/onflow/flow-go/storage/mocks" - "github.com/onflow/flow-go/utils/unittest" - "github.com/onflow/flow-go/utils/unittest/mocks" -) - -var ( - collection1Identity = unittest.IdentityFixture() - collection2Identity = unittest.IdentityFixture() - collection3Identity = unittest.IdentityFixture() - myIdentity = unittest.IdentityFixture() -) - -func init() { - collection1Identity.Role = flow.RoleCollection - collection2Identity.Role = flow.RoleCollection - collection3Identity.Role = flow.RoleCollection - myIdentity.Role = flow.RoleExecution -} - -// ExecutionState is a mocked version of execution state that -// simulates some of its behavior for testing purpose -type mockExecutionState struct { - sync.Mutex - stateMock.ExecutionState - commits map[flow.Identifier]flow.StateCommitment -} - -func newMockExecutionState(seal *flow.Seal) *mockExecutionState { - commits := make(map[flow.Identifier]flow.StateCommitment) - commits[seal.BlockID] = seal.FinalState - return &mockExecutionState{ - commits: commits, - } -} - -func (es *mockExecutionState) StateCommitmentByBlockID( - ctx context.Context, - blockID flow.Identifier, -) ( - flow.StateCommitment, - error, -) { - es.Lock() - defer es.Unlock() - commit, ok := es.commits[blockID] - if !ok { - return flow.DummyStateCommitment, storageerr.ErrNotFound - } - - return commit, nil -} - -func (es *mockExecutionState) ExecuteBlock(t *testing.T, block *flow.Block) { - parentExecuted, err := state.IsBlockExecuted( - context.Background(), - es, - block.Header.ParentID) - require.NoError(t, err) - require.True(t, parentExecuted, "parent block not executed") - - es.Lock() - defer es.Unlock() - es.commits[block.ID()] = unittest.StateCommitmentFixture() -} - -type testingContext struct { - t *testing.T - engine *Engine - blocks *storage.MockBlocks - collections *storage.MockCollections - state *protocol.State - conduit *mocknetwork.Conduit - collectionConduit *mocknetwork.Conduit - computationManager *computation.ComputationManager - providerEngine *provider.ProviderEngine - executionState *stateMock.ExecutionState - snapshot *protocol.Snapshot - identity *flow.Identity - broadcastedReceipts map[flow.Identifier]*flow.ExecutionReceipt - collectionRequester *module.MockRequester - identities flow.IdentityList - stopControl *StopControl - uploadMgr *uploader.Manager - - mu *sync.Mutex -} - -func runWithEngine(t *testing.T, f func(testingContext)) { - - ctrl := gomock.NewController(t) - - net := mocknetwork.NewMockNetwork(ctrl) - request := module.NewMockRequester(ctrl) - - // initialize the mocks and engine - conduit := &mocknetwork.Conduit{} - collectionConduit := &mocknetwork.Conduit{} - - // generates signing identity including staking key for signing - seed := make([]byte, crypto.KeyGenSeedMinLen) - n, err := rand.Read(seed) - require.Equal(t, n, crypto.KeyGenSeedMinLen) - require.NoError(t, err) - sk, err := crypto.GeneratePrivateKey(crypto.BLSBLS12381, seed) - require.NoError(t, err) - myIdentity.StakingPubKey = sk.PublicKey() - me := mocklocal.NewMockLocal(sk, myIdentity.ID(), t) - - blocks := storage.NewMockBlocks(ctrl) - payloads := storage.NewMockPayloads(ctrl) - collections := storage.NewMockCollections(ctrl) - events := storage.NewMockEvents(ctrl) - serviceEvents := storage.NewMockServiceEvents(ctrl) - txResults := storage.NewMockTransactionResults(ctrl) - - computationManager := new(computation.ComputationManager) - providerEngine := new(provider.ProviderEngine) - protocolState := new(protocol.State) - executionState := new(stateMock.ExecutionState) - snapshot := new(protocol.Snapshot) - - var engine *Engine - - defer func() { - <-engine.Done() - ctrl.Finish() - computationManager.AssertExpectations(t) - protocolState.AssertExpectations(t) - executionState.AssertExpectations(t) - providerEngine.AssertExpectations(t) - }() - - identityListUnsorted := flow.IdentityList{myIdentity, collection1Identity, collection2Identity, collection3Identity} - identityList := identityListUnsorted.Sort(order.Canonical) - - snapshot.On("Identities", mock.Anything).Return(func(selector flow.IdentityFilter) flow.IdentityList { - return identityList.Filter(selector) - }, nil) - - snapshot.On("Identity", mock.Anything).Return(func(nodeID flow.Identifier) *flow.Identity { - identity, ok := identityList.ByNodeID(nodeID) - require.Truef(t, ok, "Could not find nodeID %v in identityList", nodeID) - return identity - }, nil) - - txResults.EXPECT().BatchStore(gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes() - payloads.EXPECT().Store(gomock.Any(), gomock.Any()).AnyTimes() - - log := unittest.Logger() - metrics := metrics.NewNoopCollector() - - tracer, err := trace.NewTracer(log, "test", "test", trace.SensitivityCaptureAll) - require.NoError(t, err) - - request.EXPECT().Force().Return().AnyTimes() - - checkAuthorizedAtBlock := func(blockID flow.Identifier) (bool, error) { - return stateProtocol.IsNodeAuthorizedAt(protocolState.AtBlockID(blockID), myIdentity.NodeID) - } - - stopControl := NewStopControl(zerolog.Nop(), false, 0) - - uploadMgr := uploader.NewManager(trace.NewNoopTracer()) - - engine, err = New( - log, - net, - me, - request, - protocolState, - blocks, - collections, - events, - serviceEvents, - txResults, - computationManager, - providerEngine, - executionState, - metrics, - tracer, - false, - checkAuthorizedAtBlock, - nil, - uploadMgr, - stopControl, - ) - require.NoError(t, err) - - f(testingContext{ - t: t, - engine: engine, - blocks: blocks, - collections: collections, - state: protocolState, - collectionRequester: request, - conduit: conduit, - collectionConduit: collectionConduit, - computationManager: computationManager, - providerEngine: providerEngine, - executionState: executionState, - snapshot: snapshot, - identity: myIdentity, - broadcastedReceipts: make(map[flow.Identifier]*flow.ExecutionReceipt), - identities: identityList, - uploadMgr: uploadMgr, - stopControl: stopControl, - - mu: &sync.Mutex{}, - }) - - <-engine.Done() -} - -func (ctx *testingContext) assertSuccessfulBlockComputation( - commits map[flow.Identifier]flow.StateCommitment, - onPersisted func(blockID flow.Identifier, commit flow.StateCommitment), - executableBlock *entity.ExecutableBlock, - previousExecutionResultID flow.Identifier, - expectBroadcast bool, - newStateCommitment flow.StateCommitment, - computationResult *execution.ComputationResult, -) *protocol.Snapshot { - if computationResult == nil { - computationResult = executionUnittest.ComputationResultForBlockFixture( - previousExecutionResultID, - executableBlock) - } - - if len(computationResult.Chunks) > 0 { - computationResult.Chunks[len(computationResult.Chunks)-1].EndState = newStateCommitment - } - - // copy executable block to set `Executing` state for arguments matching - // without changing original object - eb := *executableBlock - eb.Executing = true - eb.StartState = &newStateCommitment - - ctx.computationManager. - On("ComputeBlock", mock.Anything, previousExecutionResultID, &eb, mock.Anything). - Return(computationResult, nil).Once() - - ctx.executionState.On("NewStorageSnapshot", newStateCommitment).Return(nil) - - ctx.executionState. - On("GetExecutionResultID", mock.Anything, executableBlock.Block.Header.ParentID). - Return(previousExecutionResultID, nil) - - mocked := ctx.executionState. - On("SaveExecutionResults", mock.Anything, computationResult). - Return(nil) - - mocked.RunFn = - func(args mock.Arguments) { - result := args[1].(*execution.ComputationResult) - blockID := result.ExecutableBlock.Block.Header.ID() - commit := result.CurrentEndState() - - ctx.mu.Lock() - commits[blockID] = commit - ctx.mu.Unlock() - onPersisted(blockID, commit) - } - - mocked.ReturnArguments = mock.Arguments{nil} - - broadcastMock := ctx.providerEngine. - On( - "BroadcastExecutionReceipt", - mock.Anything, - mock.Anything, - ). - Run(func(args mock.Arguments) { - receipt := args[1].(*flow.ExecutionReceipt) - - assert.Equal(ctx.t, - len(computationResult.ServiceEvents), - len(receipt.ExecutionResult.ServiceEvents), - ) - - ctx.mu.Lock() - ctx.broadcastedReceipts[receipt.ExecutionResult.BlockID] = receipt - ctx.mu.Unlock() - }). - Return(nil) - - protocolSnapshot := ctx.mockHasWeightAtBlockID(executableBlock.ID(), expectBroadcast) - - if !expectBroadcast { - broadcastMock.Maybe() - } - - return protocolSnapshot -} - -func (ctx testingContext) mockHasWeightAtBlockID(blockID flow.Identifier, hasWeight bool) *protocol.Snapshot { - identity := *ctx.identity - identity.Weight = 0 - if hasWeight { - identity.Weight = 100 - } - snap := new(protocol.Snapshot) - snap.On("Identity", identity.NodeID).Return(&identity, nil) - - return snap -} - -func (ctx testingContext) mockSnapshot(header *flow.Header, identities flow.IdentityList) { - ctx.mockSnapshotWithBlockID(header.ID(), identities) -} - -func (ctx testingContext) mockSnapshotWithBlockID(blockID flow.Identifier, identities flow.IdentityList) { - cluster := new(protocol.Cluster) - // filter only collections as cluster members - cluster.On("Members").Return(identities.Filter(filter.HasRole(flow.RoleCollection))) - - epoch := new(protocol.Epoch) - epoch.On("ClusterByChainID", mock.Anything).Return(cluster, nil) - - epochQuery := new(protocol.EpochQuery) - epochQuery.On("Current").Return(epoch) - - snap := new(protocol.Snapshot) - snap.On("Epochs").Return(epochQuery) - snap.On("Identity", mock.Anything).Return(identities[0], nil) - ctx.state.On("AtBlockID", blockID).Return(snap) -} - -func (ctx *testingContext) stateCommitmentExist(blockID flow.Identifier, commit flow.StateCommitment) { - ctx.executionState.On("StateCommitmentByBlockID", mock.Anything, blockID).Return(commit, nil) -} - -func (ctx *testingContext) mockStateCommitsWithMap(commits map[flow.Identifier]flow.StateCommitment) { - mocked := ctx.executionState.On("StateCommitmentByBlockID", mock.Anything, mock.Anything) - // https://github.com/stretchr/testify/issues/350#issuecomment-570478958 - mocked.RunFn = func(args mock.Arguments) { - - blockID := args[1].(flow.Identifier) - ctx.mu.Lock() - commit, ok := commits[blockID] - ctx.mu.Unlock() - if ok { - mocked.ReturnArguments = mock.Arguments{commit, nil} - return - } - - mocked.ReturnArguments = mock.Arguments{flow.StateCommitment{}, storageerr.ErrNotFound} - } -} - -func TestChunkIndexIsSet(t *testing.T) { - - i := mathRand.Int() - chunk := flow.NewChunk( - unittest.IdentifierFixture(), - i, - unittest.StateCommitmentFixture(), - 21, - unittest.IdentifierFixture(), - unittest.StateCommitmentFixture()) - - assert.Equal(t, i, int(chunk.Index)) - assert.Equal(t, i, int(chunk.CollectionIndex)) -} - -func TestChunkNumberOfTxsIsSet(t *testing.T) { - - i := int(mathRand.Uint32()) - chunk := flow.NewChunk( - unittest.IdentifierFixture(), - 3, - unittest.StateCommitmentFixture(), - i, - unittest.IdentifierFixture(), - unittest.StateCommitmentFixture()) - - assert.Equal(t, i, int(chunk.NumberOfTransactions)) -} - -func TestExecuteOneBlock(t *testing.T) { - runWithEngine(t, func(ctx testingContext) { - - // A <- B - blockA := unittest.BlockHeaderFixture() - blockB := unittest.ExecutableBlockFixtureWithParent(nil, blockA, unittest.StateCommitmentPointerFixture()) - - ctx.mockHasWeightAtBlockID(blockA.ID(), true) - ctx.mockHasWeightAtBlockID(blockB.ID(), true) - ctx.mockSnapshot(blockB.Block.Header, unittest.IdentityListFixture(1)) - - // blockA's start state is its parent's state commitment, - // and blockA's parent has been executed. - commits := make(map[flow.Identifier]flow.StateCommitment) - commits[blockB.Block.Header.ParentID] = *blockB.StartState - wg := sync.WaitGroup{} - ctx.mockStateCommitsWithMap(commits) - - ctx.state.On("Sealed").Return(ctx.snapshot) - ctx.snapshot.On("Head").Return(blockA, nil) - - ctx.assertSuccessfulBlockComputation( - commits, - func(blockID flow.Identifier, commit flow.StateCommitment) { - wg.Done() - }, - blockB, - unittest.IdentifierFixture(), - true, - *blockB.StartState, - nil) - - wg.Add(1) // wait for block B to be executed - err := ctx.engine.handleBlock(context.Background(), blockB.Block) - require.NoError(t, err) - - unittest.AssertReturnsBefore(t, wg.Wait, 10*time.Second) - - _, more := <-ctx.engine.Done() // wait for all the blocks to be processed - require.False(t, more) - - _, ok := commits[blockB.ID()] - require.True(t, ok) - - }) -} - -func Test_OnlyHeadOfTheQueueIsExecuted(t *testing.T) { - unittest.SkipUnless(t, unittest.TEST_FLAKY, "To be fixed later") - // only head of the queue should be executing. - // Restarting node or errors in consensus module could trigger - // block (or its parent) which already has been executed to be enqueued again - // as we already have state commitment for it, it will be executed right away. - // Now if it finishes execution before it parent - situation can occur that we try to - // dequeue it, but it will fail since only queue heads are checked. - // - // Similarly, rebuild of queues can happen block connecting two heads is added - for example - // block 1 and 3 are handled and both start executing, in the meantime block 2 is added, and it - // shouldn't make block 3 requeued as child of 2 (which is child of 1) because it's already being executed - // - // Should any of this happen the execution will halt. - - runWithEngine(t, func(ctx testingContext) { - - // A <- B <- C <- D - - // root block - blockA := unittest.BlockHeaderFixture(func(header *flow.Header) { - header.Height = 920 - }) - - // last executed block - it will be re-queued regardless of state commit - blockB := unittest.ExecutableBlockFixtureWithParent(nil, blockA, unittest.StateCommitmentPointerFixture()) - - // finalized block - it can be executed in parallel, as blockB has been executed - // and this should be fixed - blockC := unittest.ExecutableBlockFixtureWithParent(nil, blockB.Block.Header, blockB.StartState) - - // expected to be executed afterwards - blockD := unittest.ExecutableBlockFixtureWithParent(nil, blockC.Block.Header, blockC.StartState) - - logBlocks(map[string]*entity.ExecutableBlock{ - "B": blockB, - "C": blockC, - "D": blockD, - }) - - commits := make(map[flow.Identifier]flow.StateCommitment) - commits[blockB.Block.Header.ParentID] = *blockB.StartState - commits[blockC.Block.Header.ParentID] = *blockC.StartState - - wg := sync.WaitGroup{} - - // this intentionally faulty behaviour (block cannot have no state commitment and later have one without being executed) - // is to hack the first check for block execution and intentionally cause situation where - // next check (executing only queue head) can be tested - bFirstTime := true - bStateCommitment := ctx.executionState.On("StateCommitmentByBlockID", mock.Anything, blockB.ID()) - bStateCommitment.RunFn = func(args mock.Arguments) { - if bFirstTime { - bStateCommitment.ReturnArguments = mock.Arguments{flow.StateCommitment{}, storageerr.ErrNotFound} - bFirstTime = false - return - } - bStateCommitment.ReturnArguments = mock.Arguments{*blockB.StartState, nil} - } - - ctx.executionState.On("StateCommitmentByBlockID", mock.Anything, blockA.ID()).Return(*blockB.StartState, nil) - ctx.executionState.On("StateCommitmentByBlockID", mock.Anything, mock.Anything).Return(nil, storageerr.ErrNotFound) - - ctx.state.On("Sealed").Return(ctx.snapshot) - ctx.snapshot.On("Head").Return(blockA, nil) - - wgB := sync.WaitGroup{} - wgB.Add(1) - - bDone := false - cDone := false - - // expect B and C to be loaded by loading unexecuted blocks in engine Ready - wg.Add(2) - - blockBSnapshot := ctx.assertSuccessfulBlockComputation( - commits, - func(blockID flow.Identifier, commit flow.StateCommitment) { - require.False(t, bDone) - require.False(t, cDone) - wg.Done() - - // make sure block B execution takes enough time so C can start executing to showcase an error - time.Sleep(10 * time.Millisecond) - - bDone = true - }, - blockB, - unittest.IdentifierFixture(), - true, - *blockB.StartState, - nil) - - blockCSnapshot := ctx.assertSuccessfulBlockComputation( - commits, - func(blockID flow.Identifier, commit flow.StateCommitment) { - require.True(t, bDone) - require.False(t, cDone) - - wg.Done() - cDone = true - - }, - blockC, - unittest.IdentifierFixture(), - true, - *blockC.StartState, - nil) - - ctx.assertSuccessfulBlockComputation( - commits, - func(blockID flow.Identifier, commit flow.StateCommitment) { - require.True(t, bDone) - require.True(t, cDone) - - wg.Done() - }, - blockD, - unittest.IdentifierFixture(), - true, - *blockC.StartState, - nil) - - // mock loading unexecuted blocks at startup - ctx.executionState.On("GetHighestExecutedBlockID", mock.Anything).Return(blockB.Height(), blockB.ID(), nil) - blockASnapshot := new(protocol.Snapshot) - - ctx.state.On("AtHeight", blockB.Height()).Return(blockBSnapshot) - blockBSnapshot.On("Head").Return(blockB.Block.Header, nil) - - params := new(protocol.Params) - ctx.state.On("Final").Return(blockCSnapshot) - - // for reloading - ctx.blocks.EXPECT().ByID(blockB.ID()).Return(blockB.Block, nil) - ctx.blocks.EXPECT().ByID(blockC.ID()).Return(blockC.Block, nil) - - blockASnapshot.On("Head").Return(&blockA, nil) - blockCSnapshot.On("Head").Return(blockC.Block.Header, nil) - blockCSnapshot.On("Descendants").Return(nil, nil) - - ctx.state.On("AtHeight", blockC.Height()).Return(blockCSnapshot) - - ctx.state.On("Params").Return(params) - params.On("Root").Return(&blockA, nil) - - <-ctx.engine.Ready() - - wg.Add(1) // for block E to be executed - it should wait for D to finish - err := ctx.engine.handleBlock(context.Background(), blockD.Block) - require.NoError(t, err) - - unittest.AssertReturnsBefore(t, wg.Wait, 10*time.Second) - - _, more := <-ctx.engine.Done() // wait for all the blocks to be processed - require.False(t, more) - - _, ok := commits[blockB.ID()] - require.True(t, ok) - - _, ok = commits[blockC.ID()] - require.True(t, ok) - - _, ok = commits[blockD.ID()] - require.True(t, ok) - }) -} - -func TestBlocksArentExecutedMultipleTimes_multipleBlockEnqueue(t *testing.T) { - unittest.SkipUnless(t, unittest.TEST_TODO, "broken test") - - runWithEngine(t, func(ctx testingContext) { - - colSigner := unittest.IdentifierFixture() - - // A <- B <- C - blockA := unittest.BlockHeaderFixture() - blockB := unittest.ExecutableBlockFixtureWithParent(nil, blockA, unittest.StateCommitmentPointerFixture()) - - // blocks are empty, so no state change is expected - blockC := unittest.ExecutableBlockFixtureWithParent([][]flow.Identifier{{colSigner}}, blockB.Block.Header, blockB.StartState) - - logBlocks(map[string]*entity.ExecutableBlock{ - "B": blockB, - "C": blockC, - }) - - collection := blockC.Collections()[0].Collection() - - commits := make(map[flow.Identifier]flow.StateCommitment) - commits[blockB.Block.Header.ParentID] = *blockB.StartState - - wg := sync.WaitGroup{} - ctx.mockStateCommitsWithMap(commits) - - ctx.state.On("Sealed").Return(ctx.snapshot) - ctx.snapshot.On("Head").Return(blockA, nil) - - // wait finishing execution until all the blocks are sent to execution - wgPut := sync.WaitGroup{} - wgPut.Add(1) - - // add extra flag to make sure B was indeed executed before C - wasBExecuted := false - - ctx.assertSuccessfulBlockComputation( - commits, - func(blockID flow.Identifier, commit flow.StateCommitment) { - wgPut.Wait() - wg.Done() - - wasBExecuted = true - }, - blockB, - unittest.IdentifierFixture(), - true, - *blockB.StartState, - nil) - - ctx.assertSuccessfulBlockComputation( - commits, - func(blockID flow.Identifier, commit flow.StateCommitment) { - wg.Done() - require.True(t, wasBExecuted) - }, - blockC, - unittest.IdentifierFixture(), - true, - *blockB.StartState, - nil) - - // make sure collection requests are sent - // first, the collection should not be found, so the request will be sent. Next, it will be queried again, and this time - // it should return fine - gomock.InOrder( - ctx.collections.EXPECT().ByID(blockC.Collections()[0].Guarantee.CollectionID).DoAndReturn(func(_ flow.Identifier) (*flow.Collection, error) { - // make sure request for collection from block C are sent before block B finishes execution - require.False(t, wasBExecuted) - return nil, storageerr.ErrNotFound - }), - ctx.collections.EXPECT().ByID(blockC.Collections()[0].Guarantee.CollectionID).DoAndReturn(func(_ flow.Identifier) (*flow.Collection, error) { - return &collection, nil - }), - ) - - ctx.collectionRequester.EXPECT().EntityByID(gomock.Any(), gomock.Any()).DoAndReturn(func(_ flow.Identifier, _ flow.IdentityFilter) { - // parallel run to avoid deadlock, ingestion engine is thread-safe - go func() { - err := ctx.engine.handleCollection(unittest.IdentifierFixture(), &collection) - require.NoError(t, err) - }() - }) - ctx.collections.EXPECT().Store(&collection) - - times := 4 - - wg.Add(1) // wait for block B to be executed - for i := 0; i < times; i++ { - err := ctx.engine.handleBlock(context.Background(), blockB.Block) - require.NoError(t, err) - } - wg.Add(1) // wait for block C to be executed - // add extra block to ensure the execution can continue after duplicated blocks - err := ctx.engine.handleBlock(context.Background(), blockC.Block) - require.NoError(t, err) - wgPut.Done() - - unittest.AssertReturnsBefore(t, wg.Wait, 10*time.Second) - - _, more := <-ctx.engine.Done() // wait for all the blocks to be processed - require.False(t, more) - - _, ok := commits[blockB.ID()] - require.True(t, ok) - - _, ok = commits[blockC.ID()] - require.True(t, ok) - }) -} - -func TestBlocksArentExecutedMultipleTimes_collectionArrival(t *testing.T) { - runWithEngine(t, func(ctx testingContext) { - - // block in the queue are removed only after the execution has finished - // this gives a brief window for multiple execution - // when parent block is executing and collection arrives, completing the block - // it gets executed. When parent finishes it checks it children, finds complete - // block and executes it again. - // It should rather not occur during normal execution because StartState won't be set - // before parent has finished, but we should handle this edge case that it is set as well. - - // A (0 collection) <- B (0 collection) <- C (0 collection) <- D (1 collection) - blockA := unittest.BlockHeaderFixture() - blockB := unittest.ExecutableBlockFixtureWithParent(nil, blockA, unittest.StateCommitmentPointerFixture()) - - collectionIdentities := ctx.identities.Filter(filter.HasRole(flow.RoleCollection)) - colSigner := collectionIdentities[0].ID() - // blocks are empty, so no state change is expected - blockC := unittest.ExecutableBlockFixtureWithParent([][]flow.Identifier{{colSigner}}, blockB.Block.Header, blockB.StartState) - // the default fixture uses a 10 collectors committee, but in this test case, there are only 4, - // so we need to update the signer indices. - // set the first identity as signer - log.Info().Msgf("canonical collection list %v", collectionIdentities.NodeIDs()) - log.Info().Msgf("full list %v", ctx.identities) - indices, err := - signature.EncodeSignersToIndices(collectionIdentities.NodeIDs(), []flow.Identifier{colSigner}) - require.NoError(t, err) - blockC.Block.Payload.Guarantees[0].SignerIndices = indices - - // block D to make sure execution resumes after block C multiple execution has been prevented - blockD := unittest.ExecutableBlockFixtureWithParent(nil, blockC.Block.Header, blockC.StartState) - - logBlocks(map[string]*entity.ExecutableBlock{ - "B": blockB, - "C": blockC, - "D": blockD, - }) - - collection := blockC.Collections()[0].Collection() - - commits := make(map[flow.Identifier]flow.StateCommitment) - commits[blockB.Block.Header.ParentID] = *blockB.StartState - - wg := sync.WaitGroup{} - ctx.mockStateCommitsWithMap(commits) - - // mock the cluster canonical list at the collection guarantee's reference block - // use the same canonical list as used for building signer indices - ctx.mockSnapshotWithBlockID(unittest.FixedReferenceBlockID(), ctx.identities) - ctx.mockSnapshot(blockB.Block.Header, ctx.identities) - ctx.mockSnapshot(blockC.Block.Header, ctx.identities) - ctx.mockSnapshot(blockD.Block.Header, ctx.identities) - - ctx.state.On("Sealed").Return(ctx.snapshot) - ctx.snapshot.On("Head").Return(blockA, nil) - - // wait to control parent (block B) execution until we are ready - wgB := sync.WaitGroup{} - wgB.Add(1) - - wgC := sync.WaitGroup{} - wgC.Add(1) - - ctx.assertSuccessfulBlockComputation( - commits, - func(blockID flow.Identifier, commit flow.StateCommitment) { - wgB.Wait() - wg.Done() - }, - blockB, - unittest.IdentifierFixture(), - true, - *blockB.StartState, - nil) - - ctx.assertSuccessfulBlockComputation( - commits, - func(blockID flow.Identifier, commit flow.StateCommitment) { - wgC.Wait() - wg.Done() - }, - blockC, - unittest.IdentifierFixture(), - true, - *blockC.StartState, - nil) - - ctx.assertSuccessfulBlockComputation( - commits, - func(blockID flow.Identifier, commit flow.StateCommitment) { - wg.Done() - }, - blockD, - unittest.IdentifierFixture(), - true, - *blockD.StartState, - nil) - - // make sure collection requests are sent - // first, the collection should not be found, so the request will be sent. Next, it will be queried again, and this time - // it should return fine - gomock.InOrder( - ctx.collections.EXPECT().ByID(blockC.Collections()[0].Guarantee.CollectionID).DoAndReturn(func(_ flow.Identifier) (*flow.Collection, error) { - return nil, storageerr.ErrNotFound - - }), - ctx.collections.EXPECT().Store(&collection), - ctx.collections.EXPECT().ByID(blockC.Collections()[0].Guarantee.CollectionID).DoAndReturn(func(_ flow.Identifier) (*flow.Collection, error) { - return &collection, nil - }), - ) - - ctx.collectionRequester.EXPECT().EntityByID(gomock.Any(), gomock.Any()).DoAndReturn(func(_ flow.Identifier, _ flow.IdentityFilter) { - // parallel run to avoid deadlock, ingestion engine is thread-safe - go func() { - // OnCollection is official callback for collection requester engine - ctx.engine.OnCollection(unittest.IdentifierFixture(), &collection) - - // if block C execution started, it will be unblocked, and next execution will cause WaitGroup/mock failure - // if not, it will be run only once and all will be good - wgC.Done() - wgB.Done() - }() - }).Times(1) - - wg.Add(1) // wait for block B to be executed - err = ctx.engine.handleBlock(context.Background(), blockB.Block) - require.NoError(t, err) - - wg.Add(1) // wait for block C to be executed - err = ctx.engine.handleBlock(context.Background(), blockC.Block) - require.NoError(t, err) - - wg.Add(1) // wait for block D to be executed - err = ctx.engine.handleBlock(context.Background(), blockD.Block) - require.NoError(t, err) - - unittest.AssertReturnsBefore(t, wg.Wait, 10*time.Second) - - _, more := <-ctx.engine.Done() // wait for all the blocks to be processed - require.False(t, more) - - _, ok := commits[blockB.ID()] - require.True(t, ok) - - _, ok = commits[blockC.ID()] - require.True(t, ok) - - _, ok = commits[blockD.ID()] - require.True(t, ok) - - }) -} - -func logBlocks(blocks map[string]*entity.ExecutableBlock) { - log := unittest.Logger() - for name, b := range blocks { - log.Debug().Msgf("creating blocks for testing, block %v's ID:%v", name, b.ID()) - } -} - -func TestExecuteBlockInOrder(t *testing.T) { - runWithEngine(t, func(ctx testingContext) { - // create blocks with the following relations - // A <- B - // A <- C <- D - - blockSealed := unittest.BlockHeaderFixture() - - blocks := make(map[string]*entity.ExecutableBlock) - blocks["A"] = unittest.ExecutableBlockFixtureWithParent(nil, blockSealed, unittest.StateCommitmentPointerFixture()) - - // none of the blocks has any collection, so state is essentially the same - blocks["B"] = unittest.ExecutableBlockFixtureWithParent(nil, blocks["A"].Block.Header, blocks["A"].StartState) - blocks["C"] = unittest.ExecutableBlockFixtureWithParent(nil, blocks["A"].Block.Header, blocks["A"].StartState) - blocks["D"] = unittest.ExecutableBlockFixtureWithParent(nil, blocks["C"].Block.Header, blocks["C"].StartState) - - // log the blocks, so that we can link the block ID in the log with the blocks in tests - logBlocks(blocks) - - commits := make(map[flow.Identifier]flow.StateCommitment) - commits[blocks["A"].Block.Header.ParentID] = *blocks["A"].StartState - - wg := sync.WaitGroup{} - ctx.mockStateCommitsWithMap(commits) - - // make sure the seal height won't trigger state syncing, so that all blocks - // will be executed. - ctx.mockHasWeightAtBlockID(blocks["A"].ID(), true) - ctx.state.On("Sealed").Return(ctx.snapshot) - // a receipt for sealed block won't be broadcasted - ctx.snapshot.On("Head").Return(blockSealed, nil) - ctx.mockSnapshot(blocks["A"].Block.Header, unittest.IdentityListFixture(1)) - ctx.mockSnapshot(blocks["B"].Block.Header, unittest.IdentityListFixture(1)) - ctx.mockSnapshot(blocks["C"].Block.Header, unittest.IdentityListFixture(1)) - ctx.mockSnapshot(blocks["D"].Block.Header, unittest.IdentityListFixture(1)) - - // once block A is computed, it should trigger B and C being sent to compute, - // which in turn should trigger D - blockAExecutionResultID := unittest.IdentifierFixture() - onPersisted := func(blockID flow.Identifier, commit flow.StateCommitment) { - wg.Done() - } - ctx.assertSuccessfulBlockComputation( - commits, - onPersisted, - blocks["A"], - unittest.IdentifierFixture(), - true, - *blocks["A"].StartState, - nil) - ctx.assertSuccessfulBlockComputation( - commits, - onPersisted, - blocks["B"], - blockAExecutionResultID, - true, - *blocks["B"].StartState, - nil) - ctx.assertSuccessfulBlockComputation( - commits, - onPersisted, - blocks["C"], - blockAExecutionResultID, - true, - *blocks["C"].StartState, - nil) - ctx.assertSuccessfulBlockComputation( - commits, - onPersisted, - blocks["D"], - unittest.IdentifierFixture(), - true, - *blocks["D"].StartState, - nil) - - wg.Add(1) - err := ctx.engine.handleBlock(context.Background(), blocks["A"].Block) - require.NoError(t, err) - - wg.Add(1) - err = ctx.engine.handleBlock(context.Background(), blocks["B"].Block) - require.NoError(t, err) - - wg.Add(1) - err = ctx.engine.handleBlock(context.Background(), blocks["C"].Block) - require.NoError(t, err) - - wg.Add(1) - err = ctx.engine.handleBlock(context.Background(), blocks["D"].Block) - require.NoError(t, err) - - // wait until all 4 blocks have been executed - unittest.AssertReturnsBefore(t, wg.Wait, 10*time.Second) - - _, more := <-ctx.engine.Done() // wait for all the blocks to be processed - assert.False(t, more) - - var ok bool - _, ok = commits[blocks["A"].ID()] - require.True(t, ok) - _, ok = commits[blocks["B"].ID()] - require.True(t, ok) - _, ok = commits[blocks["C"].ID()] - require.True(t, ok) - _, ok = commits[blocks["D"].ID()] - require.True(t, ok) - - // make sure no stopping has been engaged, as it was not set - stopState := ctx.stopControl.GetState() - require.Equal(t, stopState, StopControlOff) - }) -} - -func TestStopAtHeight(t *testing.T) { - runWithEngine(t, func(ctx testingContext) { - - blockSealed := unittest.BlockHeaderFixture() - - blocks := make(map[string]*entity.ExecutableBlock) - blocks["A"] = unittest.ExecutableBlockFixtureWithParent(nil, blockSealed, unittest.StateCommitmentPointerFixture()) - - // none of the blocks has any collection, so state is essentially the same - blocks["B"] = unittest.ExecutableBlockFixtureWithParent(nil, blocks["A"].Block.Header, blocks["A"].StartState) - blocks["C"] = unittest.ExecutableBlockFixtureWithParent(nil, blocks["B"].Block.Header, blocks["A"].StartState) - blocks["D"] = unittest.ExecutableBlockFixtureWithParent(nil, blocks["C"].Block.Header, blocks["A"].StartState) - - // stop at block C - _, _, err := ctx.stopControl.SetStopHeight(blockSealed.Height+3, false) - require.NoError(t, err) - - // log the blocks, so that we can link the block ID in the log with the blocks in tests - logBlocks(blocks) - - commits := make(map[flow.Identifier]flow.StateCommitment) - commits[blocks["A"].Block.Header.ParentID] = *blocks["A"].StartState - - ctx.mockStateCommitsWithMap(commits) - - // make sure the seal height won't trigger state syncing, so that all blocks - // will be executed. - ctx.mockHasWeightAtBlockID(blocks["A"].ID(), true) - ctx.state.On("Sealed").Return(ctx.snapshot) - ctx.snapshot.On("Head").Return(blockSealed, nil) - ctx.mockSnapshot(blocks["A"].Block.Header, unittest.IdentityListFixture(1)) - ctx.mockSnapshot(blocks["B"].Block.Header, unittest.IdentityListFixture(1)) - - wg := sync.WaitGroup{} - onPersisted := func(blockID flow.Identifier, commit flow.StateCommitment) { - wg.Done() - } - - ctx.blocks.EXPECT().ByID(blocks["A"].ID()).Return(blocks["A"].Block, nil) - ctx.blocks.EXPECT().ByID(blocks["B"].ID()).Return(blocks["B"].Block, nil) - ctx.blocks.EXPECT().ByID(blocks["C"].ID()).Times(0) - ctx.blocks.EXPECT().ByID(blocks["D"].ID()).Times(0) - - ctx.assertSuccessfulBlockComputation( - commits, - onPersisted, - blocks["A"], - unittest.IdentifierFixture(), - true, - *blocks["A"].StartState, - nil) - ctx.assertSuccessfulBlockComputation( - commits, - onPersisted, - blocks["B"], - unittest.IdentifierFixture(), - true, - *blocks["B"].StartState, - nil) - - assert.False(t, ctx.stopControl.IsPaused()) - - wg.Add(1) - ctx.engine.BlockProcessable(blocks["A"].Block.Header, nil) - wg.Add(1) - ctx.engine.BlockProcessable(blocks["B"].Block.Header, nil) - - ctx.engine.BlockProcessable(blocks["C"].Block.Header, nil) - ctx.engine.BlockProcessable(blocks["D"].Block.Header, nil) - - // wait until all 4 blocks have been executed - unittest.AssertReturnsBefore(t, wg.Wait, 10*time.Second) - - // we don't pause until a block has been finalized - assert.False(t, ctx.stopControl.IsPaused()) - - ctx.engine.BlockFinalized(blocks["A"].Block.Header) - ctx.engine.BlockFinalized(blocks["B"].Block.Header) - - assert.False(t, ctx.stopControl.IsPaused()) - ctx.engine.BlockFinalized(blocks["C"].Block.Header) - assert.True(t, ctx.stopControl.IsPaused()) - - ctx.engine.BlockFinalized(blocks["D"].Block.Header) - - _, more := <-ctx.engine.Done() // wait for all the blocks to be processed - assert.False(t, more) - - var ok bool - for c := range commits { - fmt.Printf("%s => ok\n", c.String()) - } - _, ok = commits[blocks["A"].ID()] - require.True(t, ok) - _, ok = commits[blocks["B"].ID()] - require.True(t, ok) - _, ok = commits[blocks["C"].ID()] - require.False(t, ok) - _, ok = commits[blocks["D"].ID()] - require.False(t, ok) - - // make sure C and D were not executed - ctx.computationManager.AssertNotCalled( - t, - "ComputeBlock", - mock.Anything, - mock.Anything, - mock.MatchedBy(func(eb *entity.ExecutableBlock) bool { - return eb.ID() == blocks["C"].ID() - }), - mock.Anything) - - ctx.computationManager.AssertNotCalled( - t, - "ComputeBlock", - mock.Anything, - mock.Anything, - mock.MatchedBy(func(eb *entity.ExecutableBlock) bool { - return eb.ID() == blocks["D"].ID() - }), - mock.Anything) - }) -} - -// TestStopAtHeightRaceFinalization test a possible race condition which happens -// when block at stop height N is finalized while N-1 is being executed. -// If execution finishes exactly between finalization checking execution state and -// setting block ID to crash, it's possible to miss and never actually stop the EN -func TestStopAtHeightRaceFinalization(t *testing.T) { - runWithEngine(t, func(ctx testingContext) { - - blockSealed := unittest.BlockHeaderFixture() - - blocks := make(map[string]*entity.ExecutableBlock) - blocks["A"] = unittest.ExecutableBlockFixtureWithParent(nil, blockSealed, unittest.StateCommitmentPointerFixture()) - blocks["B"] = unittest.ExecutableBlockFixtureWithParent(nil, blocks["A"].Block.Header, nil) - blocks["C"] = unittest.ExecutableBlockFixtureWithParent(nil, blocks["B"].Block.Header, nil) - - // stop at block B, so B-1 (A) will be last executed - _, _, err := ctx.stopControl.SetStopHeight(blocks["B"].Height(), false) - require.NoError(t, err) - - // log the blocks, so that we can link the block ID in the log with the blocks in tests - logBlocks(blocks) - - commits := make(map[flow.Identifier]flow.StateCommitment) - - ctx.executionState.On("StateCommitmentByBlockID", mock.Anything, blocks["A"].Block.Header.ParentID).Return( - *blocks["A"].StartState, nil, - ) - - // make sure the seal height won't trigger state syncing, so that all blocks - // will be executed. - ctx.mockHasWeightAtBlockID(blocks["A"].ID(), true) - ctx.state.On("Sealed").Return(ctx.snapshot) - ctx.snapshot.On("Head").Return(blockSealed, nil) - ctx.mockSnapshot(blocks["A"].Block.Header, unittest.IdentityListFixture(1)) - - executionWg := sync.WaitGroup{} - onPersisted := func(blockID flow.Identifier, commit flow.StateCommitment) { - executionWg.Done() - } - - ctx.blocks.EXPECT().ByID(blocks["A"].ID()).Return(blocks["A"].Block, nil) - - ctx.executionState.On("StateCommitmentByBlockID", mock.Anything, blocks["A"].ID()).Return(nil, storageerr.ErrNotFound).Once() - - // second call should come from finalization handler, which should wait for execution to finish before returning. - // This way we simulate possible race condition when block execution finishes exactly in the middle of finalization handler - // setting stopping blockID - finalizationWg := sync.WaitGroup{} - ctx.executionState.On("StateCommitmentByBlockID", mock.Anything, blocks["A"].ID()).Run(func(args mock.Arguments) { - executionWg.Wait() - finalizationWg.Done() - }).Return(nil, storageerr.ErrNotFound).Once() - - // second call from finalization handler, third overall - ctx.executionState.On("StateCommitmentByBlockID", mock.Anything, blocks["A"].ID()). - Return(flow.StateCommitment{}, nil).Maybe() - - ctx.assertSuccessfulBlockComputation( - commits, - onPersisted, - blocks["A"], - unittest.IdentifierFixture(), - true, - *blocks["A"].StartState, - nil) - - assert.False(t, ctx.stopControl.IsPaused()) - - executionWg.Add(1) - ctx.engine.BlockProcessable(blocks["A"].Block.Header, nil) - ctx.engine.BlockProcessable(blocks["B"].Block.Header, nil) - - assert.False(t, ctx.stopControl.IsPaused()) - - finalizationWg.Add(1) - ctx.engine.BlockFinalized(blocks["B"].Block.Header) - - finalizationWg.Wait() - executionWg.Wait() - - _, more := <-ctx.engine.Done() // wait for all the blocks to be processed - assert.False(t, more) - - assert.True(t, ctx.stopControl.IsPaused()) - - var ok bool - - // make sure B and C were not executed - _, ok = commits[blocks["A"].ID()] - require.True(t, ok) - _, ok = commits[blocks["B"].ID()] - require.False(t, ok) - _, ok = commits[blocks["C"].ID()] - require.False(t, ok) - - ctx.computationManager.AssertNotCalled( - t, - "ComputeBlock", - mock.Anything, - mock.Anything, - mock.MatchedBy(func(eb *entity.ExecutableBlock) bool { - return eb.ID() == blocks["B"].ID() - }), - mock.Anything) - - ctx.computationManager.AssertNotCalled( - t, - "ComputeBlock", - mock.Anything, - mock.Anything, - mock.MatchedBy(func(eb *entity.ExecutableBlock) bool { - return eb.ID() == blocks["C"].ID() - }), - mock.Anything) - }) -} - -func TestExecutionGenerationResultsAreChained(t *testing.T) { - - execState := new(stateMock.ExecutionState) - - ctrl := gomock.NewController(t) - me := module.NewMockLocal(ctrl) - - startState := unittest.StateCommitmentFixture() - executableBlock := unittest.ExecutableBlockFixture( - [][]flow.Identifier{{collection1Identity.NodeID}, - {collection1Identity.NodeID}}, - &startState, - ) - previousExecutionResultID := unittest.IdentifierFixture() - - cr := executionUnittest.ComputationResultFixture( - previousExecutionResultID, - nil) - cr.ExecutableBlock = executableBlock - - execState. - On("SaveExecutionResults", mock.Anything, cr). - Return(nil) - - e := Engine{ - execState: execState, - tracer: trace.NewNoopTracer(), - metrics: metrics.NewNoopCollector(), - me: me, - } - - err := e.saveExecutionResults(context.Background(), cr) - assert.NoError(t, err) - - execState.AssertExpectations(t) -} - -func TestExecuteScriptAtBlockID(t *testing.T) { - t.Run("happy path", func(t *testing.T) { - runWithEngine(t, func(ctx testingContext) { - // Meaningless script - script := []byte{1, 1, 2, 3, 5, 8, 11} - scriptResult := []byte{1} - - // Ensure block we're about to query against is executable - blockA := unittest.ExecutableBlockFixture(nil, unittest.StateCommitmentPointerFixture()) - - snapshot := new(protocol.Snapshot) - snapshot.On("Head").Return(blockA.Block.Header, nil) - - commits := make(map[flow.Identifier]flow.StateCommitment) - commits[blockA.ID()] = *blockA.StartState - - ctx.stateCommitmentExist(blockA.ID(), *blockA.StartState) - - ctx.state.On("AtBlockID", blockA.Block.ID()).Return(snapshot) - ctx.executionState.On("NewStorageSnapshot", *blockA.StartState).Return(nil) - - ctx.executionState.On("HasState", *blockA.StartState).Return(true) - - // Successful call to computation manager - ctx.computationManager. - On("ExecuteScript", mock.Anything, script, [][]byte(nil), blockA.Block.Header, nil). - Return(scriptResult, nil) - - // Execute our script and expect no error - res, err := ctx.engine.ExecuteScriptAtBlockID(context.Background(), script, nil, blockA.Block.ID()) - assert.NoError(t, err) - assert.Equal(t, scriptResult, res) - - // Assert other components were called as expected - ctx.computationManager.AssertExpectations(t) - ctx.executionState.AssertExpectations(t) - ctx.state.AssertExpectations(t) - }) - }) - - t.Run("return early when state commitment not exist", func(t *testing.T) { - runWithEngine(t, func(ctx testingContext) { - // Meaningless script - script := []byte{1, 1, 2, 3, 5, 8, 11} - - // Ensure block we're about to query against is executable - blockA := unittest.ExecutableBlockFixture(nil, unittest.StateCommitmentPointerFixture()) - - // make sure blockID to state commitment mapping exist - ctx.executionState.On("StateCommitmentByBlockID", mock.Anything, blockA.ID()).Return(*blockA.StartState, nil) - - // but the state commitment does not exist (e.g. purged) - ctx.executionState.On("HasState", *blockA.StartState).Return(false) - - // Execute our script and expect no error - _, err := ctx.engine.ExecuteScriptAtBlockID(context.Background(), script, nil, blockA.Block.ID()) - assert.Error(t, err) - assert.True(t, strings.Contains(err.Error(), "state commitment not found")) - - // Assert other components were called as expected - ctx.executionState.AssertExpectations(t) - ctx.state.AssertExpectations(t) - }) - }) - -} - -func TestUnauthorizedNodeDoesNotBroadcastReceipts(t *testing.T) { - runWithEngine(t, func(ctx testingContext) { - - // create blocks with the following relations - // A <- B <- C <- D - blockSealed := unittest.BlockHeaderFixture() - - blocks := make(map[string]*entity.ExecutableBlock) - blocks["A"] = unittest.ExecutableBlockFixtureWithParent(nil, blockSealed, unittest.StateCommitmentPointerFixture()) - - // none of the blocks has any collection, so state is essentially the same - blocks["B"] = unittest.ExecutableBlockFixtureWithParent(nil, blocks["A"].Block.Header, blocks["A"].StartState) - blocks["C"] = unittest.ExecutableBlockFixtureWithParent(nil, blocks["B"].Block.Header, blocks["B"].StartState) - blocks["D"] = unittest.ExecutableBlockFixtureWithParent(nil, blocks["C"].Block.Header, blocks["C"].StartState) - - // log the blocks, so that we can link the block ID in the log with the blocks in tests - logBlocks(blocks) - - commits := make(map[flow.Identifier]flow.StateCommitment) - commits[blocks["A"].Block.Header.ParentID] = *blocks["A"].StartState - - wg := sync.WaitGroup{} - ctx.mockStateCommitsWithMap(commits) - - onPersisted := func(blockID flow.Identifier, commit flow.StateCommitment) { - wg.Done() - } - - // make sure the seal height won't trigger state syncing, so that all blocks - // will be executed. - ctx.state.On("Sealed").Return(ctx.snapshot) - // a receipt for sealed block won't be broadcasted - ctx.snapshot.On("Head").Return(blockSealed, nil) - - ctx.mockHasWeightAtBlockID(blocks["A"].ID(), true) - identity := *ctx.identity - identity.Weight = 0 - - ctx.assertSuccessfulBlockComputation( - commits, - onPersisted, - blocks["A"], - unittest.IdentifierFixture(), - true, - *blocks["A"].StartState, - nil) - ctx.assertSuccessfulBlockComputation( - commits, - onPersisted, - blocks["B"], - unittest.IdentifierFixture(), - false, - *blocks["B"].StartState, - nil) - ctx.assertSuccessfulBlockComputation( - commits, - onPersisted, - blocks["C"], - unittest.IdentifierFixture(), - true, - *blocks["C"].StartState, - nil) - ctx.assertSuccessfulBlockComputation( - commits, - onPersisted, - blocks["D"], - unittest.IdentifierFixture(), - false, - *blocks["D"].StartState, - nil) - - wg.Add(1) - ctx.mockHasWeightAtBlockID(blocks["A"].ID(), true) - ctx.mockSnapshot(blocks["A"].Block.Header, flow.IdentityList{ctx.identity}) - - err := ctx.engine.handleBlock(context.Background(), blocks["A"].Block) - require.NoError(t, err) - - wg.Add(1) - ctx.mockHasWeightAtBlockID(blocks["B"].ID(), false) - ctx.mockSnapshot(blocks["B"].Block.Header, flow.IdentityList{&identity}) // unauthorized - - err = ctx.engine.handleBlock(context.Background(), blocks["B"].Block) - require.NoError(t, err) - - wg.Add(1) - ctx.mockHasWeightAtBlockID(blocks["C"].ID(), true) - ctx.mockSnapshot(blocks["C"].Block.Header, flow.IdentityList{ctx.identity}) - - err = ctx.engine.handleBlock(context.Background(), blocks["C"].Block) - require.NoError(t, err) - - wg.Add(1) - ctx.mockHasWeightAtBlockID(blocks["D"].ID(), false) - ctx.mockSnapshot(blocks["D"].Block.Header, flow.IdentityList{&identity}) // unauthorized - - err = ctx.engine.handleBlock(context.Background(), blocks["D"].Block) - require.NoError(t, err) - - // // wait until all 4 blocks have been executed - unittest.AssertReturnsBefore(t, wg.Wait, 15*time.Second) - _, more := <-ctx.engine.Done() // wait for all the blocks to be processed - assert.False(t, more) - - require.Len(t, ctx.broadcastedReceipts, 2) - - var ok bool - - // make sure only selected receipts were broadcasted - _, ok = ctx.broadcastedReceipts[blocks["A"].ID()] - require.True(t, ok) - _, ok = ctx.broadcastedReceipts[blocks["B"].ID()] - require.False(t, ok) - _, ok = ctx.broadcastedReceipts[blocks["C"].ID()] - require.True(t, ok) - _, ok = ctx.broadcastedReceipts[blocks["D"].ID()] - require.False(t, ok) - - _, ok = commits[blocks["A"].ID()] - require.True(t, ok) - _, ok = commits[blocks["B"].ID()] - require.True(t, ok) - _, ok = commits[blocks["C"].ID()] - require.True(t, ok) - _, ok = commits[blocks["D"].ID()] - require.True(t, ok) - }) -} - -// func TestShouldTriggerStateSync(t *testing.T) { -// require.True(t, shouldTriggerStateSync(1, 2, 2)) -// require.False(t, shouldTriggerStateSync(1, 1, 2)) -// require.True(t, shouldTriggerStateSync(1, 3, 2)) -// require.True(t, shouldTriggerStateSync(1, 4, 2)) -// -// // there are only 9 sealed and unexecuted blocks between height 20 and 28, -// // haven't reach the threshold 10 yet, so should not trigger -// require.False(t, shouldTriggerStateSync(20, 28, 10)) -// -// // there are 10 sealed and unexecuted blocks between height 20 and 29, -// // reached the threshold 10, so should trigger -// require.True(t, shouldTriggerStateSync(20, 29, 10)) -// } - -func newIngestionEngine(t *testing.T, ps *mocks.ProtocolState, es *mockExecutionState) *Engine { - log := unittest.Logger() - metrics := metrics.NewNoopCollector() - tracer, err := trace.NewTracer(log, "test", "test", trace.SensitivityCaptureAll) - require.NoError(t, err) - ctrl := gomock.NewController(t) - net := mocknetwork.NewMockNetwork(ctrl) - request := module.NewMockRequester(ctrl) - var engine *Engine - - // generates signing identity including staking key for signing - seed := make([]byte, crypto.KeyGenSeedMinLen) - n, err := rand.Read(seed) - require.Equal(t, n, crypto.KeyGenSeedMinLen) - require.NoError(t, err) - sk, err := crypto.GeneratePrivateKey(crypto.BLSBLS12381, seed) - require.NoError(t, err) - myIdentity.StakingPubKey = sk.PublicKey() - me := mocklocal.NewMockLocal(sk, myIdentity.ID(), t) - - blocks := storage.NewMockBlocks(ctrl) - collections := storage.NewMockCollections(ctrl) - events := storage.NewMockEvents(ctrl) - serviceEvents := storage.NewMockServiceEvents(ctrl) - txResults := storage.NewMockTransactionResults(ctrl) - - computationManager := new(computation.ComputationManager) - providerEngine := new(provider.ProviderEngine) - - checkAuthorizedAtBlock := func(blockID flow.Identifier) (bool, error) { - return stateProtocol.IsNodeAuthorizedAt(ps.AtBlockID(blockID), myIdentity.NodeID) - } - - engine, err = New( - log, - net, - me, - request, - ps, - blocks, - collections, - events, - serviceEvents, - txResults, - computationManager, - providerEngine, - es, - metrics, - tracer, - false, - checkAuthorizedAtBlock, - nil, - nil, - NewStopControl(zerolog.Nop(), false, 0), - ) - - require.NoError(t, err) - return engine -} - -func logChain(chain []*flow.Block) { - log := unittest.Logger() - for i, block := range chain { - log.Info().Msgf("block %v, height: %v, ID: %v", i, block.Header.Height, block.ID()) - } -} - -func TestLoadingUnexecutedBlocks(t *testing.T) { - t.Run("only genesis", func(t *testing.T) { - ps := mocks.NewProtocolState() - - chain, result, seal := unittest.ChainFixture(0) - genesis := chain[0] - - logChain(chain) - - require.NoError(t, ps.Bootstrap(genesis, result, seal)) - - es := newMockExecutionState(seal) - engine := newIngestionEngine(t, ps, es) - - finalized, pending, err := engine.unexecutedBlocks() - require.NoError(t, err) - - unittest.IDsEqual(t, []flow.Identifier{}, finalized) - unittest.IDsEqual(t, []flow.Identifier{}, pending) - }) - - t.Run("no finalized, nor pending unexected", func(t *testing.T) { - ps := mocks.NewProtocolState() - - chain, result, seal := unittest.ChainFixture(4) - genesis, blockA, blockB, blockC, blockD := - chain[0], chain[1], chain[2], chain[3], chain[4] - - logChain(chain) - - require.NoError(t, ps.Bootstrap(genesis, result, seal)) - require.NoError(t, ps.Extend(blockA)) - require.NoError(t, ps.Extend(blockB)) - require.NoError(t, ps.Extend(blockC)) - require.NoError(t, ps.Extend(blockD)) - - es := newMockExecutionState(seal) - engine := newIngestionEngine(t, ps, es) - - finalized, pending, err := engine.unexecutedBlocks() - require.NoError(t, err) - - unittest.IDsEqual(t, []flow.Identifier{}, finalized) - unittest.IDsEqual(t, []flow.Identifier{blockA.ID(), blockB.ID(), blockC.ID(), blockD.ID()}, pending) - }) - - t.Run("no finalized, some pending executed", func(t *testing.T) { - ps := mocks.NewProtocolState() - - chain, result, seal := unittest.ChainFixture(4) - genesis, blockA, blockB, blockC, blockD := - chain[0], chain[1], chain[2], chain[3], chain[4] - - logChain(chain) - - require.NoError(t, ps.Bootstrap(genesis, result, seal)) - require.NoError(t, ps.Extend(blockA)) - require.NoError(t, ps.Extend(blockB)) - require.NoError(t, ps.Extend(blockC)) - require.NoError(t, ps.Extend(blockD)) - - es := newMockExecutionState(seal) - engine := newIngestionEngine(t, ps, es) - - es.ExecuteBlock(t, blockA) - es.ExecuteBlock(t, blockB) - - finalized, pending, err := engine.unexecutedBlocks() - require.NoError(t, err) - - unittest.IDsEqual(t, []flow.Identifier{}, finalized) - unittest.IDsEqual(t, []flow.Identifier{blockC.ID(), blockD.ID()}, pending) - }) - - t.Run("all finalized have been executed, and no pending executed", func(t *testing.T) { - ps := mocks.NewProtocolState() - - chain, result, seal := unittest.ChainFixture(4) - genesis, blockA, blockB, blockC, blockD := - chain[0], chain[1], chain[2], chain[3], chain[4] - - logChain(chain) - - require.NoError(t, ps.Bootstrap(genesis, result, seal)) - require.NoError(t, ps.Extend(blockA)) - require.NoError(t, ps.Extend(blockB)) - require.NoError(t, ps.Extend(blockC)) - require.NoError(t, ps.Extend(blockD)) - - require.NoError(t, ps.Finalize(blockC.ID())) - - es := newMockExecutionState(seal) - engine := newIngestionEngine(t, ps, es) - - es.ExecuteBlock(t, blockA) - es.ExecuteBlock(t, blockB) - es.ExecuteBlock(t, blockC) - - finalized, pending, err := engine.unexecutedBlocks() - require.NoError(t, err) - - unittest.IDsEqual(t, []flow.Identifier{}, finalized) - unittest.IDsEqual(t, []flow.Identifier{blockD.ID()}, pending) - }) - - t.Run("some finalized are executed and conflicting are executed", func(t *testing.T) { - ps := mocks.NewProtocolState() - - chain, result, seal := unittest.ChainFixture(4) - genesis, blockA, blockB, blockC, blockD := - chain[0], chain[1], chain[2], chain[3], chain[4] - - logChain(chain) - - require.NoError(t, ps.Bootstrap(genesis, result, seal)) - require.NoError(t, ps.Extend(blockA)) - require.NoError(t, ps.Extend(blockB)) - require.NoError(t, ps.Extend(blockC)) - require.NoError(t, ps.Extend(blockD)) - - require.NoError(t, ps.Finalize(blockC.ID())) - - es := newMockExecutionState(seal) - engine := newIngestionEngine(t, ps, es) - - es.ExecuteBlock(t, blockA) - es.ExecuteBlock(t, blockB) - es.ExecuteBlock(t, blockC) - - finalized, pending, err := engine.unexecutedBlocks() - require.NoError(t, err) - - unittest.IDsEqual(t, []flow.Identifier{}, finalized) - unittest.IDsEqual(t, []flow.Identifier{blockD.ID()}, pending) - }) - - t.Run("all pending executed", func(t *testing.T) { - ps := mocks.NewProtocolState() - - chain, result, seal := unittest.ChainFixture(4) - genesis, blockA, blockB, blockC, blockD := - chain[0], chain[1], chain[2], chain[3], chain[4] - - logChain(chain) - - require.NoError(t, ps.Bootstrap(genesis, result, seal)) - require.NoError(t, ps.Extend(blockA)) - require.NoError(t, ps.Extend(blockB)) - require.NoError(t, ps.Extend(blockC)) - require.NoError(t, ps.Extend(blockD)) - require.NoError(t, ps.Finalize(blockA.ID())) - - es := newMockExecutionState(seal) - engine := newIngestionEngine(t, ps, es) - - es.ExecuteBlock(t, blockA) - es.ExecuteBlock(t, blockB) - es.ExecuteBlock(t, blockC) - es.ExecuteBlock(t, blockD) - - finalized, pending, err := engine.unexecutedBlocks() - require.NoError(t, err) - - unittest.IDsEqual(t, []flow.Identifier{}, finalized) - unittest.IDsEqual(t, []flow.Identifier{}, pending) - }) - - t.Run("some fork is executed", func(t *testing.T) { - ps := mocks.NewProtocolState() - - // Genesis <- A <- B <- C (finalized) <- D <- E <- F - // ^--- G <- H - // ^-- I - // ^--- J <- K - chain, result, seal := unittest.ChainFixture(6) - genesis, blockA, blockB, blockC, blockD, blockE, blockF := - chain[0], chain[1], chain[2], chain[3], chain[4], chain[5], chain[6] - - fork1 := unittest.ChainFixtureFrom(2, blockD.Header) - blockG, blockH := fork1[0], fork1[1] - - fork2 := unittest.ChainFixtureFrom(1, blockC.Header) - blockI := fork2[0] - - fork3 := unittest.ChainFixtureFrom(2, blockB.Header) - blockJ, blockK := fork3[0], fork3[1] - - logChain(chain) - logChain(fork1) - logChain(fork2) - logChain(fork3) - - require.NoError(t, ps.Bootstrap(genesis, result, seal)) - require.NoError(t, ps.Extend(blockA)) - require.NoError(t, ps.Extend(blockB)) - require.NoError(t, ps.Extend(blockC)) - require.NoError(t, ps.Extend(blockI)) - require.NoError(t, ps.Extend(blockJ)) - require.NoError(t, ps.Extend(blockK)) - require.NoError(t, ps.Extend(blockD)) - require.NoError(t, ps.Extend(blockE)) - require.NoError(t, ps.Extend(blockF)) - require.NoError(t, ps.Extend(blockG)) - require.NoError(t, ps.Extend(blockH)) - - require.NoError(t, ps.Finalize(blockC.ID())) - - es := newMockExecutionState(seal) - - engine := newIngestionEngine(t, ps, es) - - es.ExecuteBlock(t, blockA) - es.ExecuteBlock(t, blockB) - es.ExecuteBlock(t, blockC) - es.ExecuteBlock(t, blockD) - es.ExecuteBlock(t, blockG) - es.ExecuteBlock(t, blockJ) - - finalized, pending, err := engine.unexecutedBlocks() - require.NoError(t, err) - - unittest.IDsEqual(t, []flow.Identifier{}, finalized) - unittest.IDsEqual(t, []flow.Identifier{ - blockI.ID(), // I is still pending, and unexecuted - blockE.ID(), - blockF.ID(), - // note K is not a pending block, but a conflicting block, even if it's not executed, - // it won't included - blockH.ID()}, - pending) - }) -} - -// TestExecutedBlockIsUploaded tests that the engine uploads the execution result -func TestExecutedBlockIsUploaded(t *testing.T) { - runWithEngine(t, func(ctx testingContext) { - - // A <- B - blockA := unittest.BlockHeaderFixture() - blockB := unittest.ExecutableBlockFixtureWithParent(nil, blockA, unittest.StateCommitmentPointerFixture()) - - ctx.mockHasWeightAtBlockID(blockA.ID(), true) - ctx.mockHasWeightAtBlockID(blockB.ID(), true) - ctx.mockSnapshot(blockB.Block.Header, unittest.IdentityListFixture(1)) - - parentBlockExecutionResultID := unittest.IdentifierFixture() - computationResultB := executionUnittest.ComputationResultForBlockFixture( - parentBlockExecutionResultID, - blockB) - - // configure upload manager with a single uploader - uploader1 := uploadermock.NewUploader(ctx.t) - uploader1.On("Upload", computationResultB).Return(nil).Once() - ctx.uploadMgr.AddUploader(uploader1) - - // blockA's start state is its parent's state commitment, - // and blockA's parent has been executed. - commits := make(map[flow.Identifier]flow.StateCommitment) - commits[blockB.Block.Header.ParentID] = *blockB.StartState - wg := sync.WaitGroup{} - ctx.mockStateCommitsWithMap(commits) - - ctx.state.On("Sealed").Return(ctx.snapshot) - ctx.snapshot.On("Head").Return(blockA, nil) - - ctx.assertSuccessfulBlockComputation( - commits, - func(blockID flow.Identifier, commit flow.StateCommitment) { - wg.Done() - }, - blockB, - parentBlockExecutionResultID, - true, - *blockB.StartState, - computationResultB) - - wg.Add(1) // wait for block B to be executed - err := ctx.engine.handleBlock(context.Background(), blockB.Block) - require.NoError(t, err) - - unittest.AssertReturnsBefore(t, wg.Wait, 10*time.Second) - - _, more := <-ctx.engine.Done() // wait for all the blocks to be processed - require.False(t, more) - - _, ok := commits[blockB.ID()] - require.True(t, ok) - - }) -} - -// TestExecutedBlockUploadedFailureDoesntBlock tests that block processing continues even the -// uploader fails with an error -func TestExecutedBlockUploadedFailureDoesntBlock(t *testing.T) { - runWithEngine(t, func(ctx testingContext) { - - // A <- B - blockA := unittest.BlockHeaderFixture() - blockB := unittest.ExecutableBlockFixtureWithParent(nil, blockA, unittest.StateCommitmentPointerFixture()) - - ctx.mockHasWeightAtBlockID(blockA.ID(), true) - ctx.mockHasWeightAtBlockID(blockB.ID(), true) - ctx.mockSnapshot(blockB.Block.Header, unittest.IdentityListFixture(1)) - - previousExecutionResultID := unittest.IdentifierFixture() - - computationResultB := executionUnittest.ComputationResultForBlockFixture( - previousExecutionResultID, - blockB) - - // configure upload manager with a single uploader that returns an error - uploader1 := uploadermock.NewUploader(ctx.t) - uploader1.On("Upload", computationResultB).Return(fmt.Errorf("error uploading")).Once() - ctx.uploadMgr.AddUploader(uploader1) - - // blockA's start state is its parent's state commitment, - // and blockA's parent has been executed. - commits := make(map[flow.Identifier]flow.StateCommitment) - commits[blockB.Block.Header.ParentID] = *blockB.StartState - wg := sync.WaitGroup{} - ctx.mockStateCommitsWithMap(commits) - - ctx.state.On("Sealed").Return(ctx.snapshot) - ctx.snapshot.On("Head").Return(blockA, nil) - - ctx.assertSuccessfulBlockComputation( - commits, - func(blockID flow.Identifier, commit flow.StateCommitment) { - wg.Done() - }, - blockB, - previousExecutionResultID, - true, - *blockB.StartState, - computationResultB) - - wg.Add(1) // wait for block B to be executed - err := ctx.engine.handleBlock(context.Background(), blockB.Block) - require.NoError(t, err) - - unittest.AssertReturnsBefore(t, wg.Wait, 10*time.Second) - - _, more := <-ctx.engine.Done() // wait for all the blocks to be processed - require.False(t, more) - - _, ok := commits[blockB.ID()] - require.True(t, ok) - - }) -} diff --git a/engine/execution/ingestion/fetcher.go b/engine/execution/ingestion/fetcher.go new file mode 100644 index 00000000000..1bf1e627a41 --- /dev/null +++ b/engine/execution/ingestion/fetcher.go @@ -0,0 +1,13 @@ +package ingestion + +import "github.com/onflow/flow-go/model/flow" + +// CollectionFetcher abstracts the details of how to fetch collection +type CollectionFetcher interface { + // FetchCollection decides which collection nodes to fetch the collection from + // No error is expected during normal operation + FetchCollection(blockID flow.Identifier, height uint64, guarantee *flow.CollectionGuarantee) error + + // Force forces the requests to be sent immediately + Force() +} diff --git a/engine/execution/ingestion/fetcher/access_fetcher.go b/engine/execution/ingestion/fetcher/access_fetcher.go new file mode 100644 index 00000000000..75a19f3e4da --- /dev/null +++ b/engine/execution/ingestion/fetcher/access_fetcher.go @@ -0,0 +1,180 @@ +package fetcher + +import ( + "context" + "fmt" + "strings" + "time" + + "github.com/rs/zerolog" + "github.com/sethvargo/go-retry" + "google.golang.org/grpc" + "google.golang.org/grpc/credentials" + + "github.com/onflow/crypto" + "github.com/onflow/flow/protobuf/go/flow/access" + + "github.com/onflow/flow-go/engine/common/requester" + commonrpc "github.com/onflow/flow-go/engine/common/rpc" + "github.com/onflow/flow-go/engine/common/rpc/convert" + "github.com/onflow/flow-go/engine/execution/ingestion" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module/component" + "github.com/onflow/flow-go/module/grpcclient" + "github.com/onflow/flow-go/module/irrecoverable" + "github.com/onflow/flow-go/utils/grpcutils" +) + +type AccessCollectionFetcher struct { + *component.ComponentManager + log zerolog.Logger + + handler requester.HandleFunc + client access.AccessAPIClient + chain flow.Chain + originID flow.Identifier + + guaranteeInfos chan guaranteeInfo +} + +type guaranteeInfo struct { + blockID flow.Identifier + height uint64 + colID flow.Identifier +} + +var _ ingestion.CollectionFetcher = (*AccessCollectionFetcher)(nil) +var _ ingestion.CollectionRequester = (*AccessCollectionFetcher)(nil) + +func NewAccessCollectionFetcher( + logger zerolog.Logger, accessURL string, networkPubKey crypto.PublicKey, nodeID flow.Identifier, chain flow.Chain) ( + *AccessCollectionFetcher, error) { + + tlsConfig, err := grpcutils.DefaultClientTLSConfig(networkPubKey) + if err != nil { + return nil, fmt.Errorf("failed to create tls config: %w", err) + } + + accessAddress := convertAccessAddrFromState(accessURL) + + lg := logger.With().Str("engine", "collection_fetcher").Logger() + + lg.Info().Msgf("dailing access rpc at %s", accessAddress) + + collectionRPCConn, err := grpc.Dial( + accessAddress, + grpc.WithTransportCredentials(credentials.NewTLS(tlsConfig)), + grpc.WithDefaultCallOptions(grpc.MaxCallRecvMsgSize(commonrpc.DefaultMaxResponseMsgSize)), + ) + if err != nil { + return nil, fmt.Errorf("failed to connect to collection rpc: %w", err) + } + + lg.Info().Msgf("connected to access rpc at %s", accessAddress) + + // make a large enough buffer so that it is able to hold all the guarantees + // on startup and not block the main thread. + // this case would only happen if there are lots of un-executed finalized blocks. + // making sure the --enable-new-ingestion-engine=true flag is on to make use + // of the new ingestion engine for catching up, which loads less un-executed blocks + // during startup. + bufferSize := 100_000 + noopHandler := func(flow.Identifier, flow.Entity) {} + e := &AccessCollectionFetcher{ + log: lg, + handler: noopHandler, + client: access.NewAccessAPIClient(collectionRPCConn), + chain: chain, + originID: nodeID, + guaranteeInfos: make(chan guaranteeInfo, bufferSize), + } + + builder := component.NewComponentManagerBuilder().AddWorker(e.launchWorker) + + e.ComponentManager = builder.Build() + + return e, nil +} + +// port number depending on the insecureAccessAPI arg. +func convertAccessAddrFromState(address string) string { + // remove gossip port from access address and add respective secure or insecure port + var accessAddress strings.Builder + accessAddress.WriteString(strings.Split(address, ":")[0]) + + accessAddress.WriteString(fmt.Sprintf(":%s", grpcclient.DefaultAccessAPISecurePort)) + + return accessAddress.String() +} + +func (f *AccessCollectionFetcher) FetchCollection(blockID flow.Identifier, height uint64, guarantee *flow.CollectionGuarantee) error { + f.log.Debug().Hex("blockID", blockID[:]).Uint64("height", height).Hex("col_id", guarantee.CollectionID[:]). + Msgf("fetching collection guarantee") + + f.guaranteeInfos <- guaranteeInfo{ + blockID: blockID, + height: height, + colID: guarantee.CollectionID, + } + + return nil +} + +func (f *AccessCollectionFetcher) Force() { +} + +func (f *AccessCollectionFetcher) WithHandle(handler requester.HandleFunc) { + f.handler = handler +} + +func (f *AccessCollectionFetcher) launchWorker(ctx irrecoverable.SignalerContext, ready component.ReadyFunc) { + ready() + + f.log.Info().Msg("launching collection fetcher worker") + + for { + select { + case <-ctx.Done(): + return + case guaranteeInfo := <-f.guaranteeInfos: + err := f.fetchCollection(ctx, guaranteeInfo) + if err != nil { + ctx.Throw(fmt.Errorf("failed to fetch collection: %w", err)) + } + } + } +} + +func (f *AccessCollectionFetcher) fetchCollection(ctx irrecoverable.SignalerContext, g guaranteeInfo) error { + backoff := retry.NewConstant(3 * time.Second) + return retry.Do(ctx, backoff, func(ctx context.Context) error { + f.log.Debug().Hex("blockID", g.blockID[:]).Uint64("height", g.height).Hex("col_id", g.colID[:]).Msgf("fetching collection") + resp, err := f.client.GetFullCollectionByID(context.Background(), + &access.GetFullCollectionByIDRequest{ + Id: g.colID[:], + }) + if err != nil { + f.log.Error().Err(err).Hex("blockID", g.blockID[:]).Uint64("height", g.height). + Msgf("failed to fetch collection %v", g.colID) + return retry.RetryableError(err) + } + + col, err := convert.MessageToFullCollection(resp.Transactions, f.chain) + if err != nil { + f.log.Error().Err(err).Hex("blockID", g.blockID[:]).Uint64("height", g.height). + Msgf("failed to convert collection %v", g.colID) + return err + } + + // the received collection should match with the guarantee, + // validate the collection before processing it + if col.ID() != g.colID { + f.log.Error().Hex("blockID", g.blockID[:]).Uint64("height", g.height). + Msgf("collection id mismatch %v != %v", col.ID(), g.colID) + return fmt.Errorf("collection id mismatch %v != %v", col.ID(), g.colID) + } + + f.handler(f.originID, col) + return nil + }) +} diff --git a/engine/execution/ingestion/fetcher/fetcher.go b/engine/execution/ingestion/fetcher/fetcher.go new file mode 100644 index 00000000000..0bb51f394f7 --- /dev/null +++ b/engine/execution/ingestion/fetcher/fetcher.go @@ -0,0 +1,88 @@ +package fetcher + +import ( + "fmt" + "regexp" + + "github.com/rs/zerolog" + + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/model/flow/filter" + "github.com/onflow/flow-go/module" + "github.com/onflow/flow-go/state/protocol" +) + +var onlyOnflowRegex = regexp.MustCompile(`.*\.(onflow\.org|dapper-flow\.com):3569$`) + +type CollectionFetcher struct { + log zerolog.Logger + request module.Requester // used to request collections + state protocol.State + // This is included to temporarily work around an issue observed on a small number of ENs. + // It works around an issue where some collection nodes are not configured with enough + // file descriptors causing connection failures. + onflowOnlyLNs bool +} + +func NewCollectionFetcher( + log zerolog.Logger, + request module.Requester, + state protocol.State, + onflowOnlyLNs bool, +) *CollectionFetcher { + return &CollectionFetcher{ + log: log.With().Str("component", "ingestion_engine_collection_fetcher").Logger(), + request: request, + state: state, + onflowOnlyLNs: onflowOnlyLNs, + } +} + +// FetchCollection decides which collection nodes to fetch the collection from +// No error is expected during normal operation +func (e *CollectionFetcher) FetchCollection(blockID flow.Identifier, height uint64, guarantee *flow.CollectionGuarantee) error { + guarantors, err := protocol.FindGuarantors(e.state, guarantee) + if err != nil { + // execution node executes certified blocks, which means there is a quorum of consensus nodes who + // have validated the block payload. And that validation includes checking the guarantors are correct. + // Based on that assumption, failing to find guarantors for guarantees contained in an incorporated block + // should be treated as fatal error + e.log.Fatal().Err(err).Msgf("failed to find guarantors for collection %v at block %v, height %v", + guarantee.CollectionID, + blockID, + height, + ) + return fmt.Errorf("could not find guarantors: %w", err) + } + + filters := []flow.IdentityFilter[flow.Identity]{ + filter.HasNodeID[flow.Identity](guarantors...), + } + + // This is included to temporarily work around an issue observed on a small number of ENs. + // It works around an issue where some collection nodes are not configured with enough + // file descriptors causing connection failures. This will be removed once a + // proper fix is in place. + if e.onflowOnlyLNs { + // func(Identity("verification-049.mainnet20.nodes.onflow.org:3569")) => true + // func(Identity("verification-049.hello.org:3569")) => false + filters = append(filters, func(identity *flow.Identity) bool { + return onlyOnflowRegex.MatchString(identity.Address) + }) + } + + e.log.Debug().Bool("onflowOnlyLNs", e.onflowOnlyLNs). + Msgf("queued collection %v for block %v, height %v from guarantors: %v", + guarantee.CollectionID, blockID, height, guarantors) + + // queue the collection to be requested from one of the guarantors + e.request.EntityByID(guarantee.CollectionID, filter.And( + filters..., + )) + + return nil +} + +func (e *CollectionFetcher) Force() { + e.request.Force() +} diff --git a/engine/execution/ingestion/fetcher/fetcher_test.go b/engine/execution/ingestion/fetcher/fetcher_test.go new file mode 100644 index 00000000000..165b4a3ec15 --- /dev/null +++ b/engine/execution/ingestion/fetcher/fetcher_test.go @@ -0,0 +1,66 @@ +package fetcher_test + +import ( + "testing" + + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/engine/execution/ingestion" + "github.com/onflow/flow-go/engine/execution/ingestion/fetcher" + "github.com/onflow/flow-go/model/flow" + modulemock "github.com/onflow/flow-go/module/mock" + "github.com/onflow/flow-go/module/signature" + statemock "github.com/onflow/flow-go/state/protocol/mock" + "github.com/onflow/flow-go/utils/unittest" +) + +var _ ingestion.CollectionFetcher = (*fetcher.CollectionFetcher)(nil) + +func TestFetch(t *testing.T) { + // prepare data + blockID := unittest.IdentifierFixture() + height := uint64(10) + nodes := unittest.IdentityListFixture(10) + guarantee := unittest.CollectionGuaranteeFixture() + guarantee.ReferenceBlockID = unittest.IdentifierFixture() + signers, err := signature.EncodeSignersToIndices(nodes.NodeIDs(), flow.IdentifierList{nodes[0].NodeID, nodes[1].NodeID}) + require.NoError(t, err) + guarantee.SignerIndices = signers + + // mock depedencies + cluster := new(statemock.Cluster) + cluster.On("Members").Return(nodes.ToSkeleton()) + epoch := new(statemock.CommittedEpoch) + epoch.On("ClusterByChainID", guarantee.ClusterChainID).Return(cluster, nil) + epochs := new(statemock.EpochQuery) + epochs.On("Current").Return(epoch, nil) + snapshot := new(statemock.Snapshot) + snapshot.On("Epochs").Return(epochs) + state := new(statemock.State) + state.On("AtBlockID", guarantee.ReferenceBlockID).Return(snapshot).Times(1) + + request := new(modulemock.Requester) + var filter flow.IdentityFilter[flow.Identity] + request.On("EntityByID", guarantee.CollectionID, mock.Anything).Run( + func(args mock.Arguments) { + filter = args.Get(1).(flow.IdentityFilter[flow.Identity]) + }, + ).Return().Times(1) + + // create fetcher + fetcher := fetcher.NewCollectionFetcher(unittest.Logger(), request, state, false) + + // fetch collections + err = fetcher.FetchCollection(blockID, height, guarantee) + require.NoError(t, err) + + request.AssertExpectations(t) + state.AssertExpectations(t) + + // verify request.EntityByID is called with the right filter + for i, signer := range nodes { + isSigner := i == 0 || i == 1 + require.Equal(t, isSigner, filter(signer)) + } +} diff --git a/engine/execution/ingestion/ingest_rpc.go b/engine/execution/ingestion/ingest_rpc.go deleted file mode 100644 index a0c71c51db4..00000000000 --- a/engine/execution/ingestion/ingest_rpc.go +++ /dev/null @@ -1,20 +0,0 @@ -package ingestion - -import ( - "context" - - "github.com/onflow/flow-go/model/flow" -) - -// IngestRPC represents the RPC calls that the execution ingest engine exposes to support the Access Node API calls -type IngestRPC interface { - - // ExecuteScriptAtBlockID executes a script at the given Block id - ExecuteScriptAtBlockID(ctx context.Context, script []byte, arguments [][]byte, blockID flow.Identifier) ([]byte, error) - - // GetAccount returns the Account details at the given Block id - GetAccount(ctx context.Context, address flow.Address, blockID flow.Identifier) (*flow.Account, error) - - // GetRegisterAtBlockID returns the value of a register at the given Block id (if available) - GetRegisterAtBlockID(ctx context.Context, owner, key []byte, blockID flow.Identifier) ([]byte, error) -} diff --git a/engine/execution/ingestion/loader.go b/engine/execution/ingestion/loader.go new file mode 100644 index 00000000000..410c131c647 --- /dev/null +++ b/engine/execution/ingestion/loader.go @@ -0,0 +1,11 @@ +package ingestion + +import ( + "context" + + "github.com/onflow/flow-go/model/flow" +) + +type BlockLoader interface { + LoadUnexecuted(ctx context.Context) ([]flow.Identifier, error) +} diff --git a/engine/execution/ingestion/loader/unexecuted_loader.go b/engine/execution/ingestion/loader/unexecuted_loader.go new file mode 100644 index 00000000000..caf90ec5347 --- /dev/null +++ b/engine/execution/ingestion/loader/unexecuted_loader.go @@ -0,0 +1,223 @@ +package loader + +import ( + "context" + "fmt" + + "github.com/rs/zerolog" + + "github.com/onflow/flow-go/engine/execution/state" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/state/protocol" + "github.com/onflow/flow-go/storage" + "github.com/onflow/flow-go/utils/logging" +) + +// deprecated. Storehouse is going to use unfinalized loader instead +type UnexecutedLoader struct { + log zerolog.Logger + state protocol.State + headers storage.Headers + execState state.ExecutionState +} + +func NewUnexecutedLoader( + log zerolog.Logger, + state protocol.State, + headers storage.Headers, + execState state.ExecutionState, +) *UnexecutedLoader { + return &UnexecutedLoader{ + log: log.With().Str("component", "ingestion_engine_unexecuted_loader").Logger(), + state: state, + headers: headers, + execState: execState, + } +} + +// LoadUnexecuted loads all unexecuted and validated blocks +// any error returned are exceptions +func (e *UnexecutedLoader) LoadUnexecuted(ctx context.Context) ([]flow.Identifier, error) { + // saving an executed block is currently not transactional, so it's possible + // the block is marked as executed but the receipt might not be saved during a crash. + // in order to mitigate this problem, we always re-execute the last executed and finalized + // block. + // there is an exception, if the last executed block is a root block, then don't execute it, + // because the root has already been executed during bootstrapping phase. And re-executing + // a root block will fail, because the root block doesn't have a parent block, and could not + // get the result of it. + // TODO: remove this, when saving a executed block is transactional + lastExecutedHeight, lastExecutedID, err := e.execState.GetLastExecutedBlockID(ctx) + if err != nil { + return nil, fmt.Errorf("could not get last executed: %w", err) + } + + last, err := e.headers.ByBlockID(lastExecutedID) + if err != nil { + return nil, fmt.Errorf("could not get last executed final by ID: %w", err) + } + + // don't reload root block + rootBlock := e.state.Params().SealedRoot() + + blockIDs := make([]flow.Identifier, 0) + isRoot := rootBlock.ID() == last.ID() + if !isRoot { + executed, err := e.execState.IsBlockExecuted(lastExecutedHeight, lastExecutedID) + if err != nil { + return nil, fmt.Errorf("cannot check is last exeucted final block has been executed %v: %w", lastExecutedID, err) + } + if !executed { + // this should not happen, but if it does, execution should still work + e.log.Warn(). + Hex("block_id", lastExecutedID[:]). + Msg("block marked as highest executed one, but not executable - internal inconsistency") + + blockIDs = append(blockIDs, lastExecutedID) + } + } + + finalized, pending, err := e.unexecutedBlocks(ctx) + if err != nil { + return nil, fmt.Errorf("could not reload unexecuted blocks: %w", err) + } + + unexecuted := append(finalized, pending...) + + log := e.log.With(). + Int("total", len(unexecuted)). + Int("finalized", len(finalized)). + Int("pending", len(pending)). + Uint64("last_executed", lastExecutedHeight). + Hex("last_executed_id", lastExecutedID[:]). + Logger() + + log.Info().Msg("reloading unexecuted blocks") + + for _, blockID := range unexecuted { + blockIDs = append(blockIDs, blockID) + e.log.Debug().Hex("block_id", blockID[:]).Msg("reloaded block") + } + + log.Info().Msg("all unexecuted have been successfully reloaded") + + return blockIDs, nil +} + +func (e *UnexecutedLoader) unexecutedBlocks(ctx context.Context) ( + finalized []flow.Identifier, + pending []flow.Identifier, + err error, +) { + // pin the snapshot so that finalizedUnexecutedBlocks and pendingUnexecutedBlocks are based + // on the same snapshot. + snapshot := e.state.Final() + + finalized, err = e.finalizedUnexecutedBlocks(ctx, snapshot) + if err != nil { + return nil, nil, fmt.Errorf("could not read finalized unexecuted blocks") + } + + pending, err = e.pendingUnexecutedBlocks(ctx, snapshot) + if err != nil { + return nil, nil, fmt.Errorf("could not read pending unexecuted blocks") + } + + return finalized, pending, nil +} + +func (e *UnexecutedLoader) finalizedUnexecutedBlocks(ctx context.Context, finalized protocol.Snapshot) ( + []flow.Identifier, + error, +) { + // get finalized height + final, err := finalized.Head() + if err != nil { + return nil, fmt.Errorf("could not get finalized block: %w", err) + } + + // find the first unexecuted and finalized block + // We iterate from the last finalized, check if it has been executed, + // if not, keep going to the lower height, until we find an executed + // block, and then the next height is the first unexecuted. + // If there is only one finalized, and it's executed (i.e. root block), + // then the firstUnexecuted is a unfinalized block, which is ok, + // because the next loop will ensure it only iterates through finalized + // blocks. + lastExecuted := final.Height + + // dynamically bootstrapped execution node will reload blocks from + // [sealedRoot.Height + 1, finalizedRoot.Height] and execute them on startup. + rootBlock := e.state.Params().SealedRoot() + + for ; lastExecuted > rootBlock.Height; lastExecuted-- { + finalizedID, err := e.headers.BlockIDByHeight(lastExecuted) + if err != nil { + return nil, fmt.Errorf("could not get header at height: %v, %w", lastExecuted, err) + } + + executed, err := e.execState.IsBlockExecuted(lastExecuted, finalizedID) + if err != nil { + return nil, fmt.Errorf("could not check whether block is executed: %w", err) + } + + if executed { + break + } + } + + firstUnexecuted := lastExecuted + 1 + + unexecuted := make([]flow.Identifier, 0) + + // starting from the first unexecuted block, go through each unexecuted and finalized block + // reload its block to execution queues + for height := firstUnexecuted; height <= final.Height; height++ { + finalizedID, err := e.headers.BlockIDByHeight(height) + if err != nil { + return nil, fmt.Errorf("could not get header at height: %v, %w", height, err) + } + + unexecuted = append(unexecuted, finalizedID) + } + + e.log.Info(). + Uint64("last_finalized", final.Height). + Uint64("last_finalized_executed", lastExecuted). + Uint64("sealed_root_height", rootBlock.Height). + Hex("sealed_root_id", logging.Entity(rootBlock)). + Uint64("first_unexecuted", firstUnexecuted). + Int("total_finalized_unexecuted", len(unexecuted)). + Msgf("finalized unexecuted blocks") + + return unexecuted, nil +} + +func (e *UnexecutedLoader) pendingUnexecutedBlocks(ctx context.Context, finalized protocol.Snapshot) ( + []flow.Identifier, + error, +) { + pendings, err := finalized.Descendants() + if err != nil { + return nil, fmt.Errorf("could not get pending blocks: %w", err) + } + + unexecuted := make([]flow.Identifier, 0) + + for _, pending := range pendings { + p, err := e.headers.ByBlockID(pending) + if err != nil { + return nil, fmt.Errorf("could not get header by block id: %w", err) + } + executed, err := e.execState.IsBlockExecuted(p.Height, pending) + if err != nil { + return nil, fmt.Errorf("could not check block executed or not: %w", err) + } + + if !executed { + unexecuted = append(unexecuted, pending) + } + } + + return unexecuted, nil +} diff --git a/engine/execution/ingestion/loader/unexecuted_loader_test.go b/engine/execution/ingestion/loader/unexecuted_loader_test.go new file mode 100644 index 00000000000..00b31d684cd --- /dev/null +++ b/engine/execution/ingestion/loader/unexecuted_loader_test.go @@ -0,0 +1,377 @@ +package loader_test + +import ( + "context" + "sync" + "testing" + + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/engine/execution/ingestion" + "github.com/onflow/flow-go/engine/execution/ingestion/loader" + stateMock "github.com/onflow/flow-go/engine/execution/state/mock" + "github.com/onflow/flow-go/model/flow" + storageerr "github.com/onflow/flow-go/storage" + storage "github.com/onflow/flow-go/storage/mock" + "github.com/onflow/flow-go/utils/unittest" + "github.com/onflow/flow-go/utils/unittest/mocks" +) + +var _ ingestion.BlockLoader = (*loader.UnexecutedLoader)(nil) + +// ExecutionState is a mocked version of execution state that +// simulates some of its behavior for testing purpose +type mockExecutionState struct { + sync.Mutex + stateMock.ExecutionState + commits map[flow.Identifier]flow.StateCommitment +} + +func newMockExecutionState(seal *flow.Seal, genesis *flow.Block) *mockExecutionState { + commits := make(map[flow.Identifier]flow.StateCommitment) + commits[seal.BlockID] = seal.FinalState + es := &mockExecutionState{ + commits: commits, + } + genesisHeader := genesis.ToHeader() + es.On("GetLastExecutedBlockID", mock.Anything).Return(genesisHeader.Height, genesisHeader.ID(), nil) + return es +} + +func (es *mockExecutionState) StateCommitmentByBlockID( + blockID flow.Identifier, +) ( + flow.StateCommitment, + error, +) { + es.Lock() + defer es.Unlock() + commit, ok := es.commits[blockID] + if !ok { + return flow.DummyStateCommitment, storageerr.ErrNotFound + } + + return commit, nil +} + +func (es *mockExecutionState) IsBlockExecuted(height uint64, blockID flow.Identifier) (bool, error) { + es.Lock() + defer es.Unlock() + _, ok := es.commits[blockID] + return ok, nil +} + +func (es *mockExecutionState) ExecuteBlock(t *testing.T, block *flow.Block) { + parentExecuted, err := es.IsBlockExecuted( + block.Height, + block.ParentID) + require.NoError(t, err) + require.True(t, parentExecuted, "parent block not executed") + + es.Lock() + defer es.Unlock() + es.commits[block.ID()] = unittest.StateCommitmentFixture() +} + +func logChain(chain []*flow.Block) { + log := unittest.Logger() + for i, block := range chain { + log.Info().Msgf("block %v, height: %v, ID: %v", i, block.Height, block.ID()) + } +} + +func TestLoadingUnexecutedBlocks(t *testing.T) { + t.Run("only genesis", func(t *testing.T) { + ps := mocks.NewProtocolState() + + chain, result, seal := unittest.ChainFixture(0) + genesis := chain[0] + + logChain(chain) + + require.NoError(t, ps.Bootstrap(genesis, result, seal)) + + es := newMockExecutionState(seal, genesis) + headers := new(storage.Headers) + headers.On("ByBlockID", genesis.ID()).Return(genesis.ToHeader(), nil).Once() + log := unittest.Logger() + loader := loader.NewUnexecutedLoader(log, ps, headers, es) + + unexecuted, err := loader.LoadUnexecuted(context.Background()) + require.NoError(t, err) + + unittest.IDsEqual(t, []flow.Identifier{}, unexecuted) + + headers.AssertExpectations(t) + }) + + t.Run("no finalized, nor pending unexected", func(t *testing.T) { + ps := mocks.NewProtocolState() + + chain, result, seal := unittest.ChainFixture(4) + genesis, blockA, blockB, blockC, blockD := + chain[0], chain[1], chain[2], chain[3], chain[4] + + logChain(chain) + + require.NoError(t, ps.Bootstrap(genesis, result, seal)) + require.NoError(t, ps.Extend(blockA)) + require.NoError(t, ps.Extend(blockB)) + require.NoError(t, ps.Extend(blockC)) + require.NoError(t, ps.Extend(blockD)) + + es := newMockExecutionState(seal, genesis) + headers := new(storage.Headers) + headers.On("ByBlockID", genesis.ID()).Return(genesis.ToHeader(), nil).Once() + headers.On("ByBlockID", blockA.ID()).Return(blockA.ToHeader(), nil).Once() + headers.On("ByBlockID", blockB.ID()).Return(blockB.ToHeader(), nil).Once() + headers.On("ByBlockID", blockC.ID()).Return(blockC.ToHeader(), nil).Once() + headers.On("ByBlockID", blockD.ID()).Return(blockD.ToHeader(), nil).Once() + log := unittest.Logger() + loader := loader.NewUnexecutedLoader(log, ps, headers, es) + + unexecuted, err := loader.LoadUnexecuted(context.Background()) + require.NoError(t, err) + + unittest.IDsEqual(t, []flow.Identifier{blockA.ID(), blockB.ID(), blockC.ID(), blockD.ID()}, unexecuted) + + headers.AssertExpectations(t) + }) + + t.Run("no finalized, some pending executed", func(t *testing.T) { + ps := mocks.NewProtocolState() + + chain, result, seal := unittest.ChainFixture(4) + genesis, blockA, blockB, blockC, blockD := + chain[0], chain[1], chain[2], chain[3], chain[4] + + logChain(chain) + + require.NoError(t, ps.Bootstrap(genesis, result, seal)) + require.NoError(t, ps.Extend(blockA)) + require.NoError(t, ps.Extend(blockB)) + require.NoError(t, ps.Extend(blockC)) + require.NoError(t, ps.Extend(blockD)) + + es := newMockExecutionState(seal, genesis) + headers := new(storage.Headers) + headers.On("ByBlockID", genesis.ID()).Return(genesis.ToHeader(), nil).Once() + headers.On("ByBlockID", blockA.ID()).Return(blockA.ToHeader(), nil).Once() + headers.On("ByBlockID", blockB.ID()).Return(blockB.ToHeader(), nil).Once() + headers.On("ByBlockID", blockC.ID()).Return(blockC.ToHeader(), nil).Once() + headers.On("ByBlockID", blockD.ID()).Return(blockD.ToHeader(), nil).Once() + + log := unittest.Logger() + loader := loader.NewUnexecutedLoader(log, ps, headers, es) + + es.ExecuteBlock(t, blockA) + es.ExecuteBlock(t, blockB) + + unexecuted, err := loader.LoadUnexecuted(context.Background()) + require.NoError(t, err) + + unittest.IDsEqual(t, []flow.Identifier{blockC.ID(), blockD.ID()}, unexecuted) + + headers.AssertExpectations(t) + }) + + t.Run("all finalized have been executed, and no pending executed", func(t *testing.T) { + ps := mocks.NewProtocolState() + + chain, result, seal := unittest.ChainFixture(4) + genesis, blockA, blockB, blockC, blockD := + chain[0], chain[1], chain[2], chain[3], chain[4] + + logChain(chain) + + require.NoError(t, ps.Bootstrap(genesis, result, seal)) + require.NoError(t, ps.Extend(blockA)) + require.NoError(t, ps.Extend(blockB)) + require.NoError(t, ps.Extend(blockC)) + require.NoError(t, ps.Extend(blockD)) + + require.NoError(t, ps.Finalize(blockC.ID())) + + es := newMockExecutionState(seal, genesis) + headers := new(storage.Headers) + headers.On("ByBlockID", genesis.ID()).Return(genesis.ToHeader(), nil).Once() + headers.On("ByBlockID", blockD.ID()).Return(blockD.ToHeader(), nil).Once() + + log := unittest.Logger() + loader := loader.NewUnexecutedLoader(log, ps, headers, es) + + // block C is the only finalized block, index its header by its height + headers.On("BlockIDByHeight", blockC.Height).Return(blockC.ID(), nil).Once() + + es.ExecuteBlock(t, blockA) + es.ExecuteBlock(t, blockB) + es.ExecuteBlock(t, blockC) + + unexecuted, err := loader.LoadUnexecuted(context.Background()) + require.NoError(t, err) + + unittest.IDsEqual(t, []flow.Identifier{blockD.ID()}, unexecuted) + + headers.AssertExpectations(t) + }) + + t.Run("some finalized are executed and conflicting are executed", func(t *testing.T) { + ps := mocks.NewProtocolState() + + chain, result, seal := unittest.ChainFixture(4) + genesis, blockA, blockB, blockC, blockD := + chain[0], chain[1], chain[2], chain[3], chain[4] + + logChain(chain) + + require.NoError(t, ps.Bootstrap(genesis, result, seal)) + require.NoError(t, ps.Extend(blockA)) + require.NoError(t, ps.Extend(blockB)) + require.NoError(t, ps.Extend(blockC)) + require.NoError(t, ps.Extend(blockD)) + + require.NoError(t, ps.Finalize(blockC.ID())) + + es := newMockExecutionState(seal, genesis) + headers := new(storage.Headers) + headers.On("ByBlockID", genesis.ID()).Return(genesis.ToHeader(), nil).Once() + headers.On("ByBlockID", blockD.ID()).Return(blockD.ToHeader(), nil).Once() + log := unittest.Logger() + loader := loader.NewUnexecutedLoader(log, ps, headers, es) + + // block C is finalized, index its header by its height + headers.On("BlockIDByHeight", blockC.Height).Return(blockC.ID(), nil).Once() + + es.ExecuteBlock(t, blockA) + es.ExecuteBlock(t, blockB) + es.ExecuteBlock(t, blockC) + + unexecuted, err := loader.LoadUnexecuted(context.Background()) + require.NoError(t, err) + + unittest.IDsEqual(t, []flow.Identifier{blockD.ID()}, unexecuted) + + headers.AssertExpectations(t) + }) + + t.Run("all pending executed", func(t *testing.T) { + ps := mocks.NewProtocolState() + + chain, result, seal := unittest.ChainFixture(4) + genesis, blockA, blockB, blockC, blockD := + chain[0], chain[1], chain[2], chain[3], chain[4] + + logChain(chain) + + require.NoError(t, ps.Bootstrap(genesis, result, seal)) + require.NoError(t, ps.Extend(blockA)) + require.NoError(t, ps.Extend(blockB)) + require.NoError(t, ps.Extend(blockC)) + require.NoError(t, ps.Extend(blockD)) + require.NoError(t, ps.Finalize(blockA.ID())) + + es := newMockExecutionState(seal, genesis) + headers := new(storage.Headers) + headers.On("ByBlockID", genesis.ID()).Return(genesis.ToHeader(), nil).Once() + headers.On("ByBlockID", blockB.ID()).Return(blockB.ToHeader(), nil).Once() + headers.On("ByBlockID", blockC.ID()).Return(blockC.ToHeader(), nil).Once() + headers.On("ByBlockID", blockD.ID()).Return(blockD.ToHeader(), nil).Once() + + log := unittest.Logger() + loader := loader.NewUnexecutedLoader(log, ps, headers, es) + + // block A is finalized, index its header by its height + headers.On("BlockIDByHeight", blockA.Height).Return(blockA.ID(), nil).Once() + + es.ExecuteBlock(t, blockA) + es.ExecuteBlock(t, blockB) + es.ExecuteBlock(t, blockC) + es.ExecuteBlock(t, blockD) + + unexecuted, err := loader.LoadUnexecuted(context.Background()) + require.NoError(t, err) + + unittest.IDsEqual(t, []flow.Identifier{}, unexecuted) + + headers.AssertExpectations(t) + }) + + t.Run("some fork is executed", func(t *testing.T) { + ps := mocks.NewProtocolState() + + // Genesis <- A <- B <- C (finalized) <- D <- E <- F + // ^--- G <- H + // ^-- I + // ^--- J <- K + chain, result, seal := unittest.ChainFixture(6) + genesis, blockA, blockB, blockC, blockD, blockE, blockF := + chain[0], chain[1], chain[2], chain[3], chain[4], chain[5], chain[6] + + fork1 := unittest.ChainFixtureFrom(2, blockD.ToHeader()) + blockG, blockH := fork1[0], fork1[1] + + fork2 := unittest.ChainFixtureFrom(1, blockC.ToHeader()) + blockI := fork2[0] + + fork3 := unittest.ChainFixtureFrom(2, blockB.ToHeader()) + blockJ, blockK := fork3[0], fork3[1] + + logChain(chain) + logChain(fork1) + logChain(fork2) + logChain(fork3) + + require.NoError(t, ps.Bootstrap(genesis, result, seal)) + require.NoError(t, ps.Extend(blockA)) + require.NoError(t, ps.Extend(blockB)) + require.NoError(t, ps.Extend(blockC)) + require.NoError(t, ps.Extend(blockI)) + require.NoError(t, ps.Extend(blockJ)) + require.NoError(t, ps.Extend(blockK)) + require.NoError(t, ps.Extend(blockD)) + require.NoError(t, ps.Extend(blockE)) + require.NoError(t, ps.Extend(blockF)) + require.NoError(t, ps.Extend(blockG)) + require.NoError(t, ps.Extend(blockH)) + + require.NoError(t, ps.Finalize(blockC.ID())) + + es := newMockExecutionState(seal, genesis) + headers := new(storage.Headers) + headers.On("ByBlockID", genesis.ID()).Return(genesis.ToHeader(), nil).Once() + headers.On("ByBlockID", blockD.ID()).Return(blockD.ToHeader(), nil).Once() + headers.On("ByBlockID", blockE.ID()).Return(blockE.ToHeader(), nil).Once() + headers.On("ByBlockID", blockF.ID()).Return(blockF.ToHeader(), nil).Once() + headers.On("ByBlockID", blockG.ID()).Return(blockG.ToHeader(), nil).Once() + headers.On("ByBlockID", blockH.ID()).Return(blockH.ToHeader(), nil).Once() + headers.On("ByBlockID", blockI.ID()).Return(blockI.ToHeader(), nil).Once() + + log := unittest.Logger() + loader := loader.NewUnexecutedLoader(log, ps, headers, es) + + // block C is finalized, index its header by its height + headers.On("BlockIDByHeight", blockC.Height).Return(blockC.ID(), nil).Once() + + es.ExecuteBlock(t, blockA) + es.ExecuteBlock(t, blockB) + es.ExecuteBlock(t, blockC) + es.ExecuteBlock(t, blockD) + es.ExecuteBlock(t, blockG) + es.ExecuteBlock(t, blockJ) + + unexecuted, err := loader.LoadUnexecuted(context.Background()) + require.NoError(t, err) + + unittest.IDsEqual(t, []flow.Identifier{ + blockI.ID(), // I is still pending, and unexecuted + blockE.ID(), + blockF.ID(), + // note K is not a pending block, but a conflicting block, even if it's not executed, + // it won't included + blockH.ID()}, + unexecuted) + + headers.AssertExpectations(t) + }) +} diff --git a/engine/execution/ingestion/loader/unfinalized_loader.go b/engine/execution/ingestion/loader/unfinalized_loader.go new file mode 100644 index 00000000000..5c78d40a688 --- /dev/null +++ b/engine/execution/ingestion/loader/unfinalized_loader.go @@ -0,0 +1,92 @@ +package loader + +import ( + "context" + "fmt" + + "github.com/rs/zerolog" + + "github.com/onflow/flow-go/engine/execution/state" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/state/protocol" + "github.com/onflow/flow-go/storage" +) + +type UnfinalizedLoader struct { + log zerolog.Logger + state protocol.State + headers storage.Headers + execState state.FinalizedExecutionState +} + +// NewUnfinalizedLoader creates a new loader that loads all unfinalized and validated blocks +func NewUnfinalizedLoader( + log zerolog.Logger, + state protocol.State, + headers storage.Headers, + execState state.FinalizedExecutionState, +) *UnfinalizedLoader { + return &UnfinalizedLoader{ + log: log.With().Str("component", "ingestion_engine_unfinalized_loader").Logger(), + state: state, + headers: headers, + execState: execState, + } +} + +// LoadUnexecuted loads all unfinalized and validated blocks +// any error returned are exceptions +func (e *UnfinalizedLoader) LoadUnexecuted(ctx context.Context) ([]flow.Identifier, error) { + lastExecuted, err := e.execState.GetHighestFinalizedExecuted() + if err != nil { + return nil, fmt.Errorf("could not get highest finalized executed: %w", err) + } + + // get finalized height + finalized := e.state.Final() + final, err := finalized.Head() + if err != nil { + return nil, fmt.Errorf("could not get finalized block: %w", err) + } + + lg := e.log.With(). + Uint64("last_finalized", final.Height). + Uint64("last_finalized_executed", lastExecuted). + Logger() + + lg.Info().Msgf("start loading unfinalized blocks") + + // dynamically bootstrapped execution node will have highest finalized executed as sealed root, + // which is lower than finalized root. so we will reload blocks from + // [sealedRoot.Height + 1, finalizedRoot.Height] and execute them on startup. + unexecutedFinalized := make([]flow.Identifier, 0) + + // starting from the first unexecuted block, go through each unexecuted and finalized block + // reload its block to execution queues + // loading finalized blocks + for height := lastExecuted + 1; height <= final.Height; height++ { + finalizedID, err := e.headers.BlockIDByHeight(height) + if err != nil { + return nil, fmt.Errorf("could not get header at height: %v, %w", height, err) + } + + unexecutedFinalized = append(unexecutedFinalized, finalizedID) + } + + // loaded all pending blocks + pendings, err := finalized.Descendants() + if err != nil { + return nil, fmt.Errorf("could not get descendants of finalized block: %w", err) + } + + unexecuted := append(unexecutedFinalized, pendings...) + + lg.Info(). + // Uint64("sealed_root_height", rootBlock.Height). + // Hex("sealed_root_id", logging.Entity(rootBlock)). + Int("total_finalized_unexecuted", len(unexecutedFinalized)). + Int("total_unexecuted", len(unexecuted)). + Msgf("finalized unexecuted blocks") + + return unexecuted, nil +} diff --git a/engine/execution/ingestion/loader/unfinalized_loader_test.go b/engine/execution/ingestion/loader/unfinalized_loader_test.go new file mode 100644 index 00000000000..13b872d9e33 --- /dev/null +++ b/engine/execution/ingestion/loader/unfinalized_loader_test.go @@ -0,0 +1,55 @@ +package loader_test + +import ( + "context" + "testing" + + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/engine/execution/ingestion" + "github.com/onflow/flow-go/engine/execution/ingestion/loader" + stateMock "github.com/onflow/flow-go/engine/execution/state/mock" + "github.com/onflow/flow-go/model/flow" + storage "github.com/onflow/flow-go/storage/mock" + "github.com/onflow/flow-go/utils/unittest" + "github.com/onflow/flow-go/utils/unittest/mocks" +) + +var _ ingestion.BlockLoader = (*loader.UnfinalizedLoader)(nil) + +func TestLoadingUnfinalizedBlocks(t *testing.T) { + ps := mocks.NewProtocolState() + + // Genesis <- A <- B <- C (finalized) <- D + chain, result, seal := unittest.ChainFixture(5) + genesis, blockA, blockB, blockC, blockD := + chain[0], chain[1], chain[2], chain[3], chain[4] + + logChain(chain) + + require.NoError(t, ps.Bootstrap(genesis, result, seal)) + require.NoError(t, ps.Extend(blockA)) + require.NoError(t, ps.Extend(blockB)) + require.NoError(t, ps.Extend(blockC)) + require.NoError(t, ps.Extend(blockD)) + require.NoError(t, ps.Finalize(blockC.ID())) + + es := new(stateMock.FinalizedExecutionState) + es.On("GetHighestFinalizedExecuted").Return(genesis.Height, nil) + headers := new(storage.Headers) + headers.On("BlockIDByHeight", blockA.Height).Return(blockA.ID(), nil) + headers.On("BlockIDByHeight", blockB.Height).Return(blockB.ID(), nil) + headers.On("BlockIDByHeight", blockC.Height).Return(blockC.ID(), nil) + + loader := loader.NewUnfinalizedLoader(unittest.Logger(), ps, headers, es) + + unexecuted, err := loader.LoadUnexecuted(context.Background()) + require.NoError(t, err) + + unittest.IDsEqual(t, []flow.Identifier{ + blockA.ID(), + blockB.ID(), + blockC.ID(), + blockD.ID(), + }, unexecuted) +} diff --git a/engine/execution/ingestion/machine.go b/engine/execution/ingestion/machine.go new file mode 100644 index 00000000000..194c12b8fea --- /dev/null +++ b/engine/execution/ingestion/machine.go @@ -0,0 +1,176 @@ +package ingestion + +import ( + "context" + "fmt" + + "github.com/rs/zerolog" + + "github.com/onflow/flow-go/engine/common/requester" + "github.com/onflow/flow-go/engine/execution" + "github.com/onflow/flow-go/engine/execution/computation" + "github.com/onflow/flow-go/engine/execution/ingestion/stop" + "github.com/onflow/flow-go/engine/execution/ingestion/uploader" + "github.com/onflow/flow-go/engine/execution/provider" + "github.com/onflow/flow-go/engine/execution/state" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module" + "github.com/onflow/flow-go/module/mempool/entity" + "github.com/onflow/flow-go/state/protocol" + "github.com/onflow/flow-go/state/protocol/events" + "github.com/onflow/flow-go/storage" +) + +// Machine forwards blocks and collections to the core for processing. +type Machine struct { + events.Noop // satisfy protocol events consumer interface + log zerolog.Logger + core *Core + throttle Throttle + broadcaster provider.ProviderEngine + uploader *uploader.Manager + execState state.ExecutionState + computationManager computation.ComputationManager +} + +type CollectionRequester interface { + module.ReadyDoneAware + WithHandle(requester.HandleFunc) +} + +func NewMachine( + logger zerolog.Logger, + protocolEvents *events.Distributor, + collectionRequester CollectionRequester, + + collectionFetcher CollectionFetcher, + headers storage.Headers, + blocks storage.Blocks, + collections storage.Collections, + execState state.ExecutionState, + state protocol.State, + metrics module.ExecutionMetrics, + computationManager computation.ComputationManager, + broadcaster provider.ProviderEngine, + uploader *uploader.Manager, + stopControl *stop.StopControl, +) (*Machine, *Core, error) { + + e := &Machine{ + log: logger.With().Str("engine", "ingestion_machine").Logger(), + broadcaster: broadcaster, + uploader: uploader, + execState: execState, + computationManager: computationManager, + } + + throttle, err := NewBlockThrottle( + logger, + state, + execState, + headers, + ) + + if err != nil { + return nil, nil, fmt.Errorf("failed to create block throttle: %w", err) + } + + core, err := NewCore( + logger, + throttle, + execState, + stopControl, + blocks, + collections, + e, + collectionFetcher, + e, + metrics, + ) + + if err != nil { + return nil, nil, fmt.Errorf("failed to create ingestion core: %w", err) + } + + e.throttle = throttle + e.core = core + + protocolEvents.AddConsumer(e) + collectionRequester.WithHandle(func(originID flow.Identifier, entity flow.Entity) { + collection, ok := entity.(*flow.Collection) + if !ok { + e.log.Error().Msgf("invalid entity type (%T)", entity) + return + } + e.core.OnCollection(collection) + }) + + return e, core, nil +} + +// Protocol Events implementation +func (e *Machine) BlockProcessable(header *flow.Header, qc *flow.QuorumCertificate) { + err := e.throttle.OnBlock(qc.BlockID, header.Height) + if err != nil { + e.log.Fatal().Err(err).Msgf("error processing block %v (qc.BlockID: %v, blockID: %v)", + header.Height, qc.BlockID, header.ID()) + } +} + +func (e *Machine) BlockFinalized(b *flow.Header) { + e.throttle.OnBlockFinalized(b.Height) +} + +// EventConsumer implementation +var _ EventConsumer = (*Machine)(nil) + +func (e *Machine) BeforeComputationResultSaved( + ctx context.Context, + result *execution.ComputationResult, +) { + err := e.uploader.Upload(ctx, result) + if err != nil { + e.log.Err(err).Msg("error while uploading block") + // continue processing. uploads should not block execution + } +} + +func (e *Machine) OnComputationResultSaved( + ctx context.Context, + result *execution.ComputationResult, +) string { + block := result.BlockExecutionResult.ExecutableBlock.Block + broadcasted, err := e.broadcaster.BroadcastExecutionReceipt( + ctx, block.Height, result.ExecutionReceipt) + if err != nil { + e.log.Err(err).Msg("critical: failed to broadcast the receipt") + } + return fmt.Sprintf("broadcasted: %v", broadcasted) +} + +// BlockExecutor implementation +var _ BlockExecutor = (*Machine)(nil) + +func (e *Machine) ExecuteBlock(ctx context.Context, executableBlock *entity.ExecutableBlock) (*execution.ComputationResult, error) { + block := executableBlock.Block + parentErID, err := e.execState.GetExecutionResultID(ctx, block.ParentID) + if err != nil { + return nil, fmt.Errorf("failed to get parent execution result ID %v: %w", block.ParentID, err) + } + + snapshot := e.execState.NewStorageSnapshot(*executableBlock.StartState, + block.ParentID, + block.Height-1, + ) + + computationResult, err := e.computationManager.ComputeBlock( + ctx, + parentErID, + executableBlock, + snapshot) + if err != nil { + return nil, fmt.Errorf("failed to compute block: %w", err) + } + + return computationResult, nil +} diff --git a/engine/execution/ingestion/mempool.go b/engine/execution/ingestion/mempool.go deleted file mode 100644 index 58d2b11f923..00000000000 --- a/engine/execution/ingestion/mempool.go +++ /dev/null @@ -1,29 +0,0 @@ -package ingestion - -//revive:disable:unexported-return - -import ( - "github.com/onflow/flow-go/module/mempool/stdmap" -) - -type Mempool struct { - ExecutionQueue *stdmap.Queues - BlockByCollection *stdmap.BlockByCollections -} - -func (m *Mempool) Run(f func(blockByCollection *stdmap.BlockByCollectionBackdata, executionQueue *stdmap.QueuesBackdata) error) error { - return m.ExecutionQueue.Run(func(queueBackdata *stdmap.QueuesBackdata) error { - return m.BlockByCollection.Run(func(blockByCollectionBackdata *stdmap.BlockByCollectionBackdata) error { - return f(blockByCollectionBackdata, queueBackdata) - }) - }) -} - -func newMempool() *Mempool { - m := &Mempool{ - BlockByCollection: stdmap.NewBlockByCollections(), - ExecutionQueue: stdmap.NewQueues(), - } - - return m -} diff --git a/engine/execution/ingestion/mock/ingest_rpc.go b/engine/execution/ingestion/mock/ingest_rpc.go deleted file mode 100644 index 0359b5e4a0c..00000000000 --- a/engine/execution/ingestion/mock/ingest_rpc.go +++ /dev/null @@ -1,109 +0,0 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. - -package mock - -import ( - context "context" - - flow "github.com/onflow/flow-go/model/flow" - - mock "github.com/stretchr/testify/mock" -) - -// IngestRPC is an autogenerated mock type for the IngestRPC type -type IngestRPC struct { - mock.Mock -} - -// ExecuteScriptAtBlockID provides a mock function with given fields: ctx, script, arguments, blockID -func (_m *IngestRPC) ExecuteScriptAtBlockID(ctx context.Context, script []byte, arguments [][]byte, blockID flow.Identifier) ([]byte, error) { - ret := _m.Called(ctx, script, arguments, blockID) - - var r0 []byte - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, []byte, [][]byte, flow.Identifier) ([]byte, error)); ok { - return rf(ctx, script, arguments, blockID) - } - if rf, ok := ret.Get(0).(func(context.Context, []byte, [][]byte, flow.Identifier) []byte); ok { - r0 = rf(ctx, script, arguments, blockID) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).([]byte) - } - } - - if rf, ok := ret.Get(1).(func(context.Context, []byte, [][]byte, flow.Identifier) error); ok { - r1 = rf(ctx, script, arguments, blockID) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// GetAccount provides a mock function with given fields: ctx, address, blockID -func (_m *IngestRPC) GetAccount(ctx context.Context, address flow.Address, blockID flow.Identifier) (*flow.Account, error) { - ret := _m.Called(ctx, address, blockID) - - var r0 *flow.Account - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, flow.Address, flow.Identifier) (*flow.Account, error)); ok { - return rf(ctx, address, blockID) - } - if rf, ok := ret.Get(0).(func(context.Context, flow.Address, flow.Identifier) *flow.Account); ok { - r0 = rf(ctx, address, blockID) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*flow.Account) - } - } - - if rf, ok := ret.Get(1).(func(context.Context, flow.Address, flow.Identifier) error); ok { - r1 = rf(ctx, address, blockID) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// GetRegisterAtBlockID provides a mock function with given fields: ctx, owner, key, blockID -func (_m *IngestRPC) GetRegisterAtBlockID(ctx context.Context, owner []byte, key []byte, blockID flow.Identifier) ([]byte, error) { - ret := _m.Called(ctx, owner, key, blockID) - - var r0 []byte - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, []byte, []byte, flow.Identifier) ([]byte, error)); ok { - return rf(ctx, owner, key, blockID) - } - if rf, ok := ret.Get(0).(func(context.Context, []byte, []byte, flow.Identifier) []byte); ok { - r0 = rf(ctx, owner, key, blockID) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).([]byte) - } - } - - if rf, ok := ret.Get(1).(func(context.Context, []byte, []byte, flow.Identifier) error); ok { - r1 = rf(ctx, owner, key, blockID) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -type mockConstructorTestingTNewIngestRPC interface { - mock.TestingT - Cleanup(func()) -} - -// NewIngestRPC creates a new instance of IngestRPC. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewIngestRPC(t mockConstructorTestingTNewIngestRPC) *IngestRPC { - mock := &IngestRPC{} - mock.Mock.Test(t) - - t.Cleanup(func() { mock.AssertExpectations(t) }) - - return mock -} diff --git a/engine/execution/ingestion/mocks/block_store.go b/engine/execution/ingestion/mocks/block_store.go new file mode 100644 index 00000000000..2145a0615aa --- /dev/null +++ b/engine/execution/ingestion/mocks/block_store.go @@ -0,0 +1,135 @@ +package mocks + +import ( + "fmt" + "sync" + "testing" + + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/engine/execution" + executionUnittest "github.com/onflow/flow-go/engine/execution/state/unittest" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module/mempool/entity" + "github.com/onflow/flow-go/utils/unittest" +) + +type BlockResult struct { + Block *entity.ExecutableBlock + Result *execution.ComputationResult +} + +// MockBlockStore contains mocked block computation result +// it ensures that as blocks computation result are created, the block's start state +// is the same as its parent block's end state. +// it also stores which block is executed, so that the mock execution state or computer +// can determine what result to return +type MockBlockStore struct { + sync.Mutex + ResultByBlock map[flow.Identifier]*BlockResult + Executed map[flow.Identifier]struct{} + RootBlock *flow.Header +} + +func NewMockBlockStore(t *testing.T) *MockBlockStore { + rootBlock := unittest.ExecutableBlockFixtureWithParent([][]flow.Identifier{{}}, + unittest.BlockHeaderFixture(), unittest.StateCommitmentPointerFixture()) + rootResult := executionUnittest.ComputationResultForBlockFixture(t, + unittest.IdentifierFixture(), rootBlock) + + blockResult := &BlockResult{ + Block: rootBlock, + Result: rootResult, + } + + byBlock := make(map[flow.Identifier]*BlockResult) + byBlock[rootResult.Block.ID()] = blockResult + + executed := make(map[flow.Identifier]struct{}) + executed[rootResult.Block.ID()] = struct{}{} + return &MockBlockStore{ + ResultByBlock: byBlock, + Executed: executed, + RootBlock: rootBlock.Block.ToHeader(), + } +} + +func (bs *MockBlockStore) MarkExecuted(computationResult *execution.ComputationResult) error { + bs.Lock() + defer bs.Unlock() + blockID := computationResult.ExecutableBlock.Block.ID() + _, executed := bs.Executed[blockID] + if executed { + return fmt.Errorf("block %s already executed", blockID) + } + + expected, exist := bs.ResultByBlock[blockID] + if !exist { + return fmt.Errorf("block %s not found", blockID) + } + + if expected.Result != computationResult { + return fmt.Errorf("block %s expected %v, got %v", blockID, expected, computationResult) + } + bs.Executed[blockID] = struct{}{} + return nil +} + +func (bs *MockBlockStore) CreateBlockAndMockResult(t *testing.T, block *entity.ExecutableBlock) *execution.ComputationResult { + bs.Lock() + defer bs.Unlock() + blockID := block.BlockID() + _, exist := bs.ResultByBlock[blockID] + require.False(t, exist, "block %s already exists", blockID) + + parent := block.Block.ParentID + parentResult, ok := bs.ResultByBlock[parent] + require.True(t, ok, "parent block %s not found", parent) + + previousExecutionResultID := parentResult.Result.ExecutionReceipt.ExecutionResult.ID() + + previousCommit := parentResult.Result.CurrentEndState() + + block.StartState = &previousCommit + + // mock computation result + cr := executionUnittest.ComputationResultForBlockFixture(t, + previousExecutionResultID, + block, + ) + result := &BlockResult{ + Block: block, + Result: cr, + } + bs.ResultByBlock[blockID] = result + return cr +} + +func (bs *MockBlockStore) GetExecuted(blockID flow.Identifier) (*BlockResult, error) { + bs.Lock() + defer bs.Unlock() + _, exist := bs.Executed[blockID] + if !exist { + return nil, fmt.Errorf("block %s not executed", blockID) + } + + result, exist := bs.ResultByBlock[blockID] + if !exist { + return nil, fmt.Errorf("block %s not found", blockID) + } + return result, nil +} + +func (bs *MockBlockStore) AssertExecuted(t *testing.T, alias string, block flow.Identifier) { + bs.Lock() + defer bs.Unlock() + _, exist := bs.Executed[block] + require.True(t, exist, "block %s not executed", alias) +} + +func (bs *MockBlockStore) AssertNotExecuted(t *testing.T, alias string, block flow.Identifier) { + bs.Lock() + defer bs.Unlock() + _, exist := bs.Executed[block] + require.False(t, exist, "block %s executed", alias) +} diff --git a/engine/execution/ingestion/mocks/collection_store.go b/engine/execution/ingestion/mocks/collection_store.go new file mode 100644 index 00000000000..1404d5baf58 --- /dev/null +++ b/engine/execution/ingestion/mocks/collection_store.go @@ -0,0 +1,68 @@ +package mocks + +import ( + "fmt" + + "github.com/jordanschalm/lockctx" + + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/storage" +) + +var _ storage.Collections = (*MockCollectionStore)(nil) + +type MockCollectionStore struct { + byID map[flow.Identifier]*flow.Collection +} + +func NewMockCollectionStore() *MockCollectionStore { + return &MockCollectionStore{ + byID: make(map[flow.Identifier]*flow.Collection), + } +} + +func (m *MockCollectionStore) ByID(id flow.Identifier) (*flow.Collection, error) { + c, ok := m.byID[id] + if !ok { + return nil, fmt.Errorf("collection %s not found: %w", id, storage.ErrNotFound) + } + return c, nil +} + +func (m *MockCollectionStore) Store(c *flow.Collection) (*flow.LightCollection, error) { + m.byID[c.ID()] = c + return c.Light(), nil +} + +func (m *MockCollectionStore) StoreLightAndIndexByTransaction(collection *flow.LightCollection) error { + panic("StoreLightIndexByTransaction not implemented") +} + +func (m *MockCollectionStore) StoreLight(collection *flow.LightCollection) error { + panic("StoreLight not implemented") +} + +func (m *MockCollectionStore) Remove(id flow.Identifier) error { + delete(m.byID, id) + return nil +} + +func (m *MockCollectionStore) LightByID(id flow.Identifier) (*flow.LightCollection, error) { + panic("LightByID not implemented") +} + +func (m *MockCollectionStore) LightByTransactionID(id flow.Identifier) (*flow.LightCollection, error) { + panic("LightByTransactionID not implemented") +} + +func (m *MockCollectionStore) BatchStoreLightAndIndexByTransaction(_ *flow.LightCollection, _ storage.ReaderBatchWriter) error { + panic("BatchStoreLightAndIndexByTransaction not implemented") +} + +func (m *MockCollectionStore) StoreAndIndexByTransaction(_ lockctx.Proof, collection *flow.Collection) (*flow.LightCollection, error) { + panic("StoreAndIndexByTransaction not implemented") +} + +func (m *MockCollectionStore) BatchStoreAndIndexByTransaction(_ lockctx.Proof, collection *flow.Collection, batch storage.ReaderBatchWriter) (*flow.LightCollection, error) { + panic("BatchStoreAndIndexByTransaction not implemented") +} diff --git a/engine/execution/ingestion/mocks/fetcher.go b/engine/execution/ingestion/mocks/fetcher.go new file mode 100644 index 00000000000..72d98a4084c --- /dev/null +++ b/engine/execution/ingestion/mocks/fetcher.go @@ -0,0 +1,26 @@ +package mocks + +import "github.com/onflow/flow-go/model/flow" + +type MockFetcher struct { + byID map[flow.Identifier]struct{} +} + +func NewMockFetcher() *MockFetcher { + return &MockFetcher{ + byID: make(map[flow.Identifier]struct{}), + } +} + +func (r *MockFetcher) FetchCollection(blockID flow.Identifier, height uint64, guarantee *flow.CollectionGuarantee) error { + r.byID[guarantee.ID()] = struct{}{} + return nil +} + +func (r *MockFetcher) Force() { +} + +func (r *MockFetcher) IsFetched(id flow.Identifier) bool { + _, ok := r.byID[id] + return ok +} diff --git a/engine/execution/ingestion/stop/stop_control.go b/engine/execution/ingestion/stop/stop_control.go new file mode 100644 index 00000000000..b9fecc68fb3 --- /dev/null +++ b/engine/execution/ingestion/stop/stop_control.go @@ -0,0 +1,702 @@ +package stop + +import ( + "errors" + "fmt" + "math" + "strings" + "sync" + "time" + + "github.com/coreos/go-semver/semver" + "github.com/rs/zerolog" + + "github.com/onflow/flow-go/engine" + "github.com/onflow/flow-go/engine/execution/state" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module/component" + "github.com/onflow/flow-go/module/irrecoverable" + "github.com/onflow/flow-go/state/protocol" + psEvents "github.com/onflow/flow-go/state/protocol/events" + "github.com/onflow/flow-go/storage" +) + +const ( + // TODO: figure out an appropriate graceful stop time (is 10 min. enough?) + DefaultMaxGracefulStopDuration = 10 * time.Minute +) + +// StopControl is a specialized component used by ingestion.Engine to encapsulate +// control of stopping blocks execution. +// It is intended to work tightly with the Engine, not as a general mechanism or interface. +// +// StopControl can stop execution or crash the node at a specific block height. The stop +// height can be set manually or by the version beacon service event. This leads to some +// edge cases that are handled by the StopControl: +// +// 1. stop is already set manually and is set again manually. +// This is considered as an attempt to move the stop height. The resulting stop +// height is the new one. Note, the new height can be either lower or higher than +// previous value. +// 2. stop is already set manually and is set by the version beacon. +// The resulting stop height is the lower one. +// 3. stop is already set by the version beacon and is set manually. +// The resulting stop height is the lower one. +// 4. stop is already set by the version beacon and is set by the version beacon. +// This means version boundaries were edited. The resulting stop +// height is the new one. +type StopControl struct { + unit *engine.Unit + maxGracefulStopDuration time.Duration + + // Stop control needs to consume BlockFinalized events. + // adding psEvents.Noop makes it a protocol.Consumer + psEvents.Noop + sync.RWMutex + component.Component + + blockFinalizedChan chan *flow.Header + + headers StopControlHeaders + exeState state.ReadOnlyExecutionState + versionBeacons storage.VersionBeacons + + // stopped is true if node should no longer be executing blocks. + stopped bool + // stopBoundary is when the node should stop. + stopBoundary stopBoundary + // nodeVersion could be nil right now. See NewStopControl. + nodeVersion *semver.Version + // last seen version beacon, used to detect version beacon changes + versionBeacon *flow.SealedVersionBeacon + // if the node should crash on version boundary from a version beacon is reached + crashOnVersionBoundaryReached bool + + log zerolog.Logger +} + +var _ protocol.Consumer = (*StopControl)(nil) + +var NoStopHeight = uint64(math.MaxUint64) + +type StopParameters struct { + // desired StopBeforeHeight, the first value new version should be used, + // so this height WON'T be executed + StopBeforeHeight uint64 + + // if the node should crash or just pause after reaching StopBeforeHeight + ShouldCrash bool +} + +func (p StopParameters) Set() bool { + return p.StopBeforeHeight != NoStopHeight +} + +type stopBoundarySource string + +const ( + stopBoundarySourceManual stopBoundarySource = "manual" + stopBoundarySourceVersionBeacon stopBoundarySource = "versionBeacon" +) + +type stopBoundary struct { + StopParameters + + // The stop control will prevent execution of blocks higher than StopBeforeHeight + // once this happens the stop control is affecting execution and StopParameters can + // no longer be changed + immutable bool + + // This is the block ID of the block that should be executed last. + stopAfterExecuting flow.Identifier + + // if the stop parameters were set by the version beacon or manually + source stopBoundarySource +} + +// String returns string in the format "crash@20023[stopBoundarySourceVersionBeacon]" or +// "stop@20023@blockID[manual]" +// block ID is only present if stopAfterExecuting is set +// the ID is from the block that should be executed last and has height one +// less than StopBeforeHeight +func (s stopBoundary) String() string { + if !s.Set() { + return "none" + } + + sb := strings.Builder{} + if s.ShouldCrash { + sb.WriteString("crash") + } else { + sb.WriteString("stop") + } + sb.WriteString("@") + sb.WriteString(fmt.Sprintf("%d", s.StopBeforeHeight)) + + if s.stopAfterExecuting != flow.ZeroID { + sb.WriteString("@") + sb.WriteString(s.stopAfterExecuting.String()) + } + + sb.WriteString("[") + sb.WriteString(string(s.source)) + sb.WriteString("]") + + return sb.String() +} + +// StopControlHeaders is an interface for fetching headers +// Its jut a small subset of storage.Headers for comments see storage.Headers +type StopControlHeaders interface { + BlockIDByHeight(height uint64) (flow.Identifier, error) +} + +// NewStopControl creates new StopControl. +// +// We currently have no strong guarantee that the node version is a valid semver. +// See build.SemverV2 for more details. That is why nil is a valid input for node version +// without a node version, the stop control can still be used for manual stopping. +func NewStopControl( + unit *engine.Unit, + maxGracefulStopDuration time.Duration, + log zerolog.Logger, + exeState state.ReadOnlyExecutionState, + headers StopControlHeaders, + versionBeacons storage.VersionBeacons, + nodeVersion *semver.Version, + latestFinalizedBlock *flow.Header, + withStoppedExecution bool, + crashOnVersionBoundaryReached bool, +) *StopControl { + // We should not miss block finalized events, and we should be able to handle them + // faster than they are produced anyway. + blockFinalizedChan := make(chan *flow.Header, 1000) + + sc := &StopControl{ + unit: unit, + maxGracefulStopDuration: maxGracefulStopDuration, + log: log.With(). + Str("component", "stop_control"). + Logger(), + + blockFinalizedChan: blockFinalizedChan, + + exeState: exeState, + headers: headers, + nodeVersion: nodeVersion, + versionBeacons: versionBeacons, + stopped: withStoppedExecution, + crashOnVersionBoundaryReached: crashOnVersionBoundaryReached, + // the default is to never stop + stopBoundary: stopBoundary{ + StopParameters: StopParameters{ + StopBeforeHeight: NoStopHeight, + }, + }, + } + + if sc.nodeVersion != nil { + log = log.With(). + Stringer("node_version", sc.nodeVersion). + Bool("crash_on_version_boundary_reached", + sc.crashOnVersionBoundaryReached). + Logger() + } + + log.Info().Msgf("Created") + + cm := component.NewComponentManagerBuilder() + cm.AddWorker(sc.processEvents) + cm.AddWorker(func(ctx irrecoverable.SignalerContext, ready component.ReadyFunc) { + sc.checkInitialVersionBeacon(ctx, ready, latestFinalizedBlock) + }) + + sc.Component = cm.Build() + + // TODO: handle version beacon already indicating a stop + // right now the stop will happen on first BlockFinalized + // which is fine, but ideally we would stop right away. + + return sc +} + +// BlockFinalized is called when a block is finalized. +// +// This is a protocol event consumer. See protocol.Consumer. +func (s *StopControl) BlockFinalized(h *flow.Header) { + s.blockFinalizedChan <- h +} + +// processEvents is a worker that processes block finalized events. +func (s *StopControl) processEvents( + ctx irrecoverable.SignalerContext, + ready component.ReadyFunc, +) { + ready() + + for { + select { + case <-ctx.Done(): + return + case h := <-s.blockFinalizedChan: + s.blockFinalized(ctx, h) + } + } +} + +// BlockFinalizedForTesting is used for testing only. +func (s *StopControl) BlockFinalizedForTesting(h *flow.Header) { + s.blockFinalized(irrecoverable.MockSignalerContext{}, h) +} + +func (s *StopControl) checkInitialVersionBeacon( + ctx irrecoverable.SignalerContext, + ready component.ReadyFunc, + latestFinalizedBlock *flow.Header, +) { + // component is not ready until we checked the initial version beacon + defer ready() + + // the most straightforward way to check it is to simply pretend we just finalized the + // last finalized block + s.blockFinalized(ctx, latestFinalizedBlock) + +} + +// IsExecutionStopped returns true is block execution has been stopped +func (s *StopControl) IsExecutionStopped() bool { + s.RLock() + defer s.RUnlock() + + return s.stopped +} + +// SetStopParameters sets new stop parameters manually. +// +// Expected error returns during normal operations: +// - ErrCannotChangeStop: this indicates that new stop parameters cannot be set. +// See stop.validateStopChange. +func (s *StopControl) SetStopParameters( + stop StopParameters, +) error { + s.Lock() + defer s.Unlock() + + boundary := stopBoundary{ + StopParameters: stop, + source: stopBoundarySourceManual, + } + + return s.setStopParameters(boundary) +} + +// setStopParameters sets new stop parameters. +// stopBoundary is the new stop parameters. If nil, the stop is removed. +// +// Expected error returns during normal operations: +// - ErrCannotChangeStop: this indicates that new stop parameters cannot be set. +// See stop.validateStopChange. +// +// Caller must acquire the lock. +func (s *StopControl) setStopParameters( + stopBoundary stopBoundary, +) error { + log := s.log.With(). + Stringer("old_stop", s.stopBoundary). + Stringer("new_stop", stopBoundary). + Logger() + + err := s.validateStopChange(stopBoundary) + if err != nil { + log.Info().Err(err).Msg("cannot set stopHeight") + return err + } + + log.Info().Msg("new stop set") + s.stopBoundary = stopBoundary + + return nil +} + +var ErrCannotChangeStop = errors.New("cannot change stop control stopping parameters") + +// validateStopChange verifies if the stop parameters can be changed +// returns the error with the reason if the parameters cannot be changed. +// +// Stop parameters cannot be changed if: +// 1. node is already stopped +// 2. stop parameters are immutable (due to them already affecting execution see +// ShouldExecuteBlock) +// 3. stop parameters are already set by a different source and the new stop is later than +// the existing one +// +// Expected error returns during normal operations: +// - ErrCannotChangeStop: this indicates that new stop parameters cannot be set. +// +// Caller must acquire the lock. +func (s *StopControl) validateStopChange( + newStopBoundary stopBoundary, +) error { + + errf := func(reason string) error { + return fmt.Errorf("%s: %w", reason, ErrCannotChangeStop) + } + + // 1. + if s.stopped { + return errf("cannot update stop parameters, already stopped") + } + + // 2. + if s.stopBoundary.immutable { + return errf( + fmt.Sprintf( + "cannot update stopHeight, stopping commenced for %s", + s.stopBoundary), + ) + } + + if !s.stopBoundary.Set() { + // if the current stop is no stop, we can always update + return nil + } + + // 3. + if s.stopBoundary.source == newStopBoundary.source { + // if the stop was set by the same source, we can always update + return nil + } + + // 3. + // if one stop was set by the version beacon and the other one was manual + // we can only update if the new stop is strictly earlier + if newStopBoundary.StopBeforeHeight < s.stopBoundary.StopBeforeHeight { + return nil + + } + // this prevents users moving the stopHeight forward when a version newStopBoundary + // is earlier, and prevents version beacons from moving the stopHeight forward + // when a manual stop is earlier. + return errf("cannot update stopHeight, " + + "new stop height is later than the current one") +} + +// GetStopParameters returns the upcoming stop parameters or nil if no stop is set. +func (s *StopControl) GetStopParameters() StopParameters { + s.RLock() + defer s.RUnlock() + + return s.stopBoundary.StopParameters +} + +// ShouldExecuteBlock should be called when new block can be executed. +// The block should not be executed if its height is above or equal to +// s.stopBoundary.StopBeforeHeight. +// +// It returns a boolean indicating if the block should be executed. +func (s *StopControl) ShouldExecuteBlock(blockID flow.Identifier, height uint64) bool { + s.Lock() + defer s.Unlock() + + // don't process anymore blocks if stopped + if s.stopped { + return false + } + + // Skips blocks at or above requested stopHeight + // doing so means we have started the stopping process + if height < s.stopBoundary.StopBeforeHeight { + return true + } + + s.log.Info(). + Msgf("Skipping execution of %s at height %d"+ + " because stop has been requested %s", + blockID, + height, + s.stopBoundary) + + // stopBoundary is now immutable, because it started affecting execution + s.stopBoundary.immutable = true + return false +} + +// blockFinalized is called when a block is marked as finalized +// +// Once finalization reached stopHeight we can be sure no other fork will be valid at +// this height, if this block's parent has been executed, we are safe to stop. +// This will happen during normal execution, where blocks are executed +// before they are finalized. However, it is possible that EN block computation +// progress can fall behind. In this case, we want to crash only after the execution +// reached the stopHeight. +func (s *StopControl) blockFinalized( + ctx irrecoverable.SignalerContext, + h *flow.Header, +) { + s.Lock() + defer s.Unlock() + + // already stopped, nothing to do + if s.stopped { + return + } + + // We already know the ID of the block that should be executed last nothing to do. + // Node is stopping. + if s.stopBoundary.stopAfterExecuting != flow.ZeroID { + return + } + + handleErr := func(err error) { + s.log.Err(err). + Stringer("block_id", h.ID()). + Stringer("stop", s.stopBoundary). + Msg("Error in stop control BlockFinalized") + + ctx.Throw(err) + } + + s.processNewVersionBeacons(ctx, h.Height) + + // we are not at the stop yet, nothing to do + if h.Height < s.stopBoundary.StopBeforeHeight { + return + } + + parentID := h.ParentID + + if h.Height != s.stopBoundary.StopBeforeHeight { + // we are past the stop. This can happen if stop was set before + // last finalized block + s.log.Warn(). + Uint64("finalization_height", h.Height). + Stringer("block_id", h.ID()). + Stringer("stop", s.stopBoundary). + Msg("Block finalization already beyond stop.") + + // Let's find the ID of the block that should be executed last + // which is the parent of the block at the stopHeight + finalizedID, err := s.headers.BlockIDByHeight(s.stopBoundary.StopBeforeHeight - 1) + if err != nil { + handleErr(fmt.Errorf("failed to get header by height: %w", err)) + return + } + parentID = finalizedID + } + + s.stopBoundary.stopAfterExecuting = parentID + + s.log.Info(). + Stringer("block_id", h.ID()). + Stringer("stop", s.stopBoundary). + Stringer("stop_after_executing", s.stopBoundary.stopAfterExecuting). + Msgf("Found ID of the block that should be executed last") + + // check if the parent block has been executed then stop right away + executed, err := state.IsParentExecuted(s.exeState, h) + if err != nil { + handleErr(fmt.Errorf( + "failed to check if the block has been executed: %w", + err, + )) + return + } + + if executed { + // we already reached the point where we should stop + s.stopExecution() + return + } +} + +// OnBlockExecuted should be called after a block has finished execution +func (s *StopControl) OnBlockExecuted(h *flow.Header) { + s.Lock() + defer s.Unlock() + + if s.stopped { + return + } + + if s.stopBoundary.stopAfterExecuting != h.ID() { + return + } + + // double check. Even if requested stopHeight has been changed multiple times, + // as long as it matches this block we are safe to terminate + if h.Height != s.stopBoundary.StopBeforeHeight-1 { + s.log.Warn(). + Msgf( + "Inconsistent stopping state. "+ + "Scheduled to stop after executing block ID %s and height %d, "+ + "but this block has a height %d. ", + h.ID().String(), + s.stopBoundary.StopBeforeHeight-1, + h.Height, + ) + return + } + + s.stopExecution() +} + +// stopExecution stops the node execution and crashes the node if ShouldCrash is true. +// Caller must acquire the lock. +func (s *StopControl) stopExecution() { + log := s.log.With(). + Stringer("requested_stop", s.stopBoundary). + Uint64("last_executed_height", s.stopBoundary.StopBeforeHeight). + Stringer("last_executed_id", s.stopBoundary.stopAfterExecuting). + Logger() + + s.stopped = true + log.Warn().Msg("Stopping as finalization reached requested stop") + + if s.stopBoundary.ShouldCrash { + log.Info(). + Dur("max-graceful-stop-duration", s.maxGracefulStopDuration). + Msg("Attempting graceful stop as finalization reached requested stop") + doneChan := s.unit.Done() + select { + case <-doneChan: + log.Info().Msg("Engine gracefully stopped") + case <-time.After(s.maxGracefulStopDuration): + log.Info(). + Msg("Engine did not stop within max graceful stop duration") + } + log.Fatal().Msg("Crashing as finalization reached requested stop") + return + } +} + +// processNewVersionBeacons processes version beacons and updates the stop control stop +// height if needed. +// +// When a block is finalized it is possible that a new version beacon is indexed. +// This new version beacon might have added/removed/moved a version boundary. +// The old version beacon is considered invalid, and the stop height must be updated +// according to the new version beacon. +// +// Caller must acquire the lock. +func (s *StopControl) processNewVersionBeacons( + ctx irrecoverable.SignalerContext, + height uint64, +) { + // TODO: remove when we can guarantee that the node will always have a valid version + if s.nodeVersion == nil { + return + } + + if s.versionBeacon != nil && s.versionBeacon.SealHeight >= height { + // already processed this or a higher version beacon + return + } + + vb, err := s.versionBeacons.Highest(height) + if err != nil { + s.log.Err(err). + Uint64("height", height). + Msg("Failed to get highest version beacon for stop control") + + ctx.Throw( + fmt.Errorf( + "failed to get highest version beacon for stop control: %w", + err)) + return + } + + if vb == nil { + // no version beacon found + // this is unexpected as there should always be at least the + // starting version beacon, but not fatal. + // It can happen if the node starts before bootstrap is finished. + // TODO: remove when we can guarantee that there will always be a version beacon + s.log.Info(). + Uint64("height", height). + Msg("No version beacon found for stop control") + return + } + + if s.versionBeacon != nil && s.versionBeacon.SealHeight >= vb.SealHeight { + // we already processed this or a higher version beacon + return + } + + lg := s.log.With(). + Str("node_version", s.nodeVersion.String()). + Str("beacon", vb.String()). + Uint64("vb_seal_height", vb.SealHeight). + Uint64("vb_sequence", vb.Sequence).Logger() + + // this is now the last handled version beacon + s.versionBeacon = vb + + // this is a new version beacon check what boundary it sets + stopHeight, err := s.getVersionBeaconStopHeight(vb) + if err != nil { + s.log.Err(err). + Interface("version_beacon", vb). + Msg("Failed to get stop height from version beacon") + + ctx.Throw( + fmt.Errorf("failed to get stop height from version beacon: %w", err)) + return + } + + lg.Info(). + Uint64("stop_height", stopHeight). + Msg("New version beacon found") + + var newStop = stopBoundary{ + StopParameters: StopParameters{ + StopBeforeHeight: stopHeight, + ShouldCrash: s.crashOnVersionBoundaryReached, + }, + source: stopBoundarySourceVersionBeacon, + } + + err = s.setStopParameters(newStop) + if err != nil { + // This is just informational and is expected to sometimes happen during + // normal operation. The causes for this are described here: validateStopChange. + s.log.Info(). + Uint64("stop_height", stopHeight). + Err(err). + Msg("Cannot change stop boundary when detecting new version beacon") + } +} + +// getVersionBeaconStopHeight returns the stop height that should be set +// based on the version beacon +// +// No error is expected during normal operation since the version beacon +// should have been validated when indexing. +// +// Caller must acquire the lock. +func (s *StopControl) getVersionBeaconStopHeight( + vb *flow.SealedVersionBeacon, +) ( + uint64, + error, +) { + // version boundaries are sorted by version + for _, boundary := range vb.VersionBoundaries { + ver, err := boundary.Semver() + if err != nil || ver == nil { + // this should never happen as we already validated the version beacon + // when indexing it + return 0, fmt.Errorf("failed to parse semver: %w", err) + } + + // This condition can be tweaked in the future. For example if we guarantee that + // all nodes with the same major version have compatible execution, + // we can stop only on major version change. + if s.nodeVersion.LessThan(*ver) { + // we need to stop here + return boundary.BlockHeight, nil + } + } + + // no stop boundary should be set + return NoStopHeight, nil +} diff --git a/engine/execution/ingestion/stop/stop_control_test.go b/engine/execution/ingestion/stop/stop_control_test.go new file mode 100644 index 00000000000..6525ecb4612 --- /dev/null +++ b/engine/execution/ingestion/stop/stop_control_test.go @@ -0,0 +1,875 @@ +package stop + +import ( + "context" + "fmt" + "testing" + "time" + + "github.com/coreos/go-semver/semver" + testifyMock "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/engine" + "github.com/onflow/flow-go/engine/execution/state/mock" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module/irrecoverable" + storageMock "github.com/onflow/flow-go/storage/mock" + "github.com/onflow/flow-go/utils/unittest" +) + +// If stopping mechanism has caused any changes to execution flow +// (skipping execution of blocks) we disallow setting new values +func TestCannotSetNewValuesAfterStoppingCommenced(t *testing.T) { + + t.Run("when processing block at stop height", func(t *testing.T) { + sc := NewStopControl( + engine.NewUnit(), + time.Second, + unittest.Logger(), + nil, + nil, + nil, + nil, + &flow.Header{HeaderBody: flow.HeaderBody{Height: 1}}, + false, + false, + ) + + require.False(t, sc.GetStopParameters().Set()) + + // first update is always successful + stop := StopParameters{StopBeforeHeight: 21} + err := sc.SetStopParameters(stop) + require.NoError(t, err) + + require.Equal(t, stop, sc.GetStopParameters()) + + // no stopping has started yet, block below stop height + header := unittest.BlockHeaderFixture(unittest.WithHeaderHeight(20)) + require.True(t, sc.ShouldExecuteBlock(header.ID(), header.Height)) + + stop2 := StopParameters{StopBeforeHeight: 37} + err = sc.SetStopParameters(stop2) + require.NoError(t, err) + + // block at stop height, it should be skipped + header = unittest.BlockHeaderFixture(unittest.WithHeaderHeight(37)) + require.False(t, sc.ShouldExecuteBlock(header.ID(), header.Height)) + + // cannot set new stop height after stopping has started + err = sc.SetStopParameters(StopParameters{StopBeforeHeight: 2137}) + require.ErrorIs(t, err, ErrCannotChangeStop) + + // state did not change + require.Equal(t, stop2, sc.GetStopParameters()) + }) + + t.Run("when processing finalized blocks", func(t *testing.T) { + + execState := mock.NewExecutionState(t) + + sc := NewStopControl( + engine.NewUnit(), + time.Second, + unittest.Logger(), + execState, + nil, + nil, + nil, + &flow.Header{HeaderBody: flow.HeaderBody{Height: 1}}, + false, + false, + ) + + require.False(t, sc.GetStopParameters().Set()) + + // first update is always successful + stop := StopParameters{StopBeforeHeight: 21} + err := sc.SetStopParameters(stop) + require.NoError(t, err) + require.Equal(t, stop, sc.GetStopParameters()) + + // make execution check pretends block has been executed + execState.On("IsBlockExecuted", testifyMock.Anything, testifyMock.Anything).Return(true, nil) + + // no stopping has started yet, block below stop height + header := unittest.BlockHeaderFixture(unittest.WithHeaderHeight(20)) + sc.BlockFinalizedForTesting(header) + + stop2 := StopParameters{StopBeforeHeight: 37} + err = sc.SetStopParameters(stop2) + require.NoError(t, err) + require.Equal(t, stop2, sc.GetStopParameters()) + + // block at stop height, it should be triggered stop + header = unittest.BlockHeaderFixture(unittest.WithHeaderHeight(37)) + sc.BlockFinalizedForTesting(header) + + // since we set shouldCrash to false, execution should be stopped + require.True(t, sc.IsExecutionStopped()) + + err = sc.SetStopParameters(StopParameters{StopBeforeHeight: 2137}) + require.ErrorIs(t, err, ErrCannotChangeStop) + }) +} + +// TestExecutionFallingBehind check if StopControl behaves properly even if EN runs behind +// and blocks are finalized before they are executed +func TestExecutionFallingBehind(t *testing.T) { + + execState := mock.NewExecutionState(t) + + headerA := unittest.BlockHeaderFixture(unittest.WithHeaderHeight(20)) + headerB := unittest.BlockHeaderWithParentFixture(headerA) // 21 + headerC := unittest.BlockHeaderWithParentFixture(headerB) // 22 + headerD := unittest.BlockHeaderWithParentFixture(headerC) // 23 + + sc := NewStopControl( + engine.NewUnit(), + time.Second, + unittest.Logger(), + execState, + nil, + nil, + nil, + &flow.Header{HeaderBody: flow.HeaderBody{Height: 1}}, + false, + false, + ) + + // set stop at 22, so 21 is the last height which should be processed + stop := StopParameters{StopBeforeHeight: 22} + err := sc.SetStopParameters(stop) + require.NoError(t, err) + require.Equal(t, stop, sc.GetStopParameters()) + + execState.On("IsBlockExecuted", headerC.Height-1, headerC.ParentID).Return(false, nil) + + // finalize blocks first + sc.BlockFinalizedForTesting(headerA) + sc.BlockFinalizedForTesting(headerB) + sc.BlockFinalizedForTesting(headerC) + sc.BlockFinalizedForTesting(headerD) + + // simulate execution + sc.OnBlockExecuted(headerA) + sc.OnBlockExecuted(headerB) + require.True(t, sc.IsExecutionStopped()) +} + +type stopControlMockHeaders struct { + headers map[uint64]*flow.Header +} + +func (m *stopControlMockHeaders) BlockIDByHeight(height uint64) (flow.Identifier, error) { + h, ok := m.headers[height] + if !ok { + return flow.ZeroID, fmt.Errorf("header not found") + } + return h.ID(), nil +} + +func TestAddStopForPastBlocks(t *testing.T) { + execState := mock.NewExecutionState(t) + + headerA := unittest.BlockHeaderFixture(unittest.WithHeaderHeight(20)) + headerB := unittest.BlockHeaderWithParentFixture(headerA) // 21 + headerC := unittest.BlockHeaderWithParentFixture(headerB) // 22 + headerD := unittest.BlockHeaderWithParentFixture(headerC) // 23 + + headers := &stopControlMockHeaders{ + headers: map[uint64]*flow.Header{ + headerA.Height: headerA, + headerB.Height: headerB, + headerC.Height: headerC, + headerD.Height: headerD, + }, + } + + sc := NewStopControl( + engine.NewUnit(), + time.Second, + unittest.Logger(), + execState, + headers, + nil, + nil, + &flow.Header{HeaderBody: flow.HeaderBody{Height: 1}}, + false, + false, + ) + + // finalize blocks first + sc.BlockFinalizedForTesting(headerA) + sc.BlockFinalizedForTesting(headerB) + sc.BlockFinalizedForTesting(headerC) + + // simulate execution + sc.OnBlockExecuted(headerA) + sc.OnBlockExecuted(headerB) + sc.OnBlockExecuted(headerC) + + // block is executed + execState.On("IsBlockExecuted", headerD.Height-1, headerD.ParentID).Return(true, nil) + + // set stop at 22, but finalization and execution is at 23 + // so stop right away + stop := StopParameters{StopBeforeHeight: 22} + err := sc.SetStopParameters(stop) + require.NoError(t, err) + require.Equal(t, stop, sc.GetStopParameters()) + + // finalize one more block after stop is set + sc.BlockFinalizedForTesting(headerD) + + require.True(t, sc.IsExecutionStopped()) +} + +func TestAddStopForPastBlocksExecutionFallingBehind(t *testing.T) { + execState := mock.NewExecutionState(t) + + headerA := unittest.BlockHeaderFixture(unittest.WithHeaderHeight(20)) + headerB := unittest.BlockHeaderWithParentFixture(headerA) // 21 + headerC := unittest.BlockHeaderWithParentFixture(headerB) // 22 + headerD := unittest.BlockHeaderWithParentFixture(headerC) // 23 + + headers := &stopControlMockHeaders{ + headers: map[uint64]*flow.Header{ + headerA.Height: headerA, + headerB.Height: headerB, + headerC.Height: headerC, + headerD.Height: headerD, + }, + } + + sc := NewStopControl( + engine.NewUnit(), + time.Second, + unittest.Logger(), + execState, + headers, + nil, + nil, + &flow.Header{HeaderBody: flow.HeaderBody{Height: 1}}, + false, + false, + ) + + execState.On("IsBlockExecuted", headerD.Height-1, headerD.ParentID).Return(false, nil) + + // finalize blocks first + sc.BlockFinalizedForTesting(headerA) + sc.BlockFinalizedForTesting(headerB) + sc.BlockFinalizedForTesting(headerC) + + // set stop at 22, but finalization is at 23 so 21 + // is the last height which wil be executed + stop := StopParameters{StopBeforeHeight: 22} + err := sc.SetStopParameters(stop) + require.NoError(t, err) + require.Equal(t, stop, sc.GetStopParameters()) + + // finalize one more block after stop is set + sc.BlockFinalizedForTesting(headerD) + + // simulate execution + sc.OnBlockExecuted(headerA) + sc.OnBlockExecuted(headerB) + require.True(t, sc.IsExecutionStopped()) +} + +func TestStopControlWithVersionControl(t *testing.T) { + t.Run("normal case", func(t *testing.T) { + execState := mock.NewExecutionState(t) + versionBeacons := new(storageMock.VersionBeacons) + + headerA := unittest.BlockHeaderFixture(unittest.WithHeaderHeight(20)) + headerB := unittest.BlockHeaderWithParentFixture(headerA) // 21 + headerC := unittest.BlockHeaderWithParentFixture(headerB) // 22 + + headers := &stopControlMockHeaders{ + headers: map[uint64]*flow.Header{ + headerA.Height: headerA, + headerB.Height: headerB, + headerC.Height: headerC, + }, + } + + sc := NewStopControl( + engine.NewUnit(), + time.Second, + unittest.Logger(), + execState, + headers, + versionBeacons, + semver.New("1.0.0"), + &flow.Header{HeaderBody: flow.HeaderBody{Height: 1}}, + false, + false, + ) + + // setting this means all finalized blocks are considered already executed + execState.On("IsBlockExecuted", headerC.Height-1, headerC.ParentID).Return(true, nil) + + versionBeacons. + On("Highest", testifyMock.Anything). + Return(&flow.SealedVersionBeacon{ + VersionBeacon: unittest.VersionBeaconFixture( + unittest.WithBoundaries( + // zero boundary is expected if there + // is no boundary set by the contract yet + flow.VersionBoundary{ + BlockHeight: 0, + Version: "0.0.0", + }), + ), + SealHeight: headerA.Height, + }, nil).Once() + + // finalize first block + sc.BlockFinalizedForTesting(headerA) + require.False(t, sc.IsExecutionStopped()) + require.False(t, sc.GetStopParameters().Set()) + + // new version beacon + versionBeacons. + On("Highest", testifyMock.Anything). + Return(&flow.SealedVersionBeacon{ + VersionBeacon: unittest.VersionBeaconFixture( + unittest.WithBoundaries( + // zero boundary is expected if there + // is no boundary set by the contract yet + flow.VersionBoundary{ + BlockHeight: 0, + Version: "0.0.0", + }, flow.VersionBoundary{ + BlockHeight: 21, + Version: "1.0.0", + }), + ), + SealHeight: headerB.Height, + }, nil).Once() + + // finalize second block. we are still ok as the node version + // is the same as the version beacon one + sc.BlockFinalizedForTesting(headerB) + require.False(t, sc.IsExecutionStopped()) + require.False(t, sc.GetStopParameters().Set()) + + // new version beacon + versionBeacons. + On("Highest", testifyMock.Anything). + Return(&flow.SealedVersionBeacon{ + VersionBeacon: unittest.VersionBeaconFixture( + unittest.WithBoundaries( + // The previous version is included in the new version beacon + flow.VersionBoundary{ + BlockHeight: 21, + Version: "1.0.0", + }, flow.VersionBoundary{ + BlockHeight: 22, + Version: "2.0.0", + }), + ), + SealHeight: headerC.Height, + }, nil).Once() + sc.BlockFinalizedForTesting(headerC) + // should be stopped as this is height 22 and height 21 is already considered executed + require.True(t, sc.IsExecutionStopped()) + }) + + t.Run("version boundary removed", func(t *testing.T) { + + // future version boundaries can be removed + // in which case they will be missing from the version beacon + execState := mock.NewExecutionState(t) + versionBeacons := storageMock.NewVersionBeacons(t) + + headerA := unittest.BlockHeaderFixture(unittest.WithHeaderHeight(20)) + headerB := unittest.BlockHeaderWithParentFixture(headerA) // 21 + headerC := unittest.BlockHeaderWithParentFixture(headerB) // 22 + + headers := &stopControlMockHeaders{ + headers: map[uint64]*flow.Header{ + headerA.Height: headerA, + headerB.Height: headerB, + headerC.Height: headerC, + }, + } + + sc := NewStopControl( + engine.NewUnit(), + time.Second, + unittest.Logger(), + execState, + headers, + versionBeacons, + semver.New("1.0.0"), + &flow.Header{HeaderBody: flow.HeaderBody{Height: 1}}, + false, + false, + ) + + versionBeacons. + On("Highest", testifyMock.Anything). + Return(&flow.SealedVersionBeacon{ + VersionBeacon: unittest.VersionBeaconFixture( + unittest.WithBoundaries( + // set to stop at height 21 + flow.VersionBoundary{ + BlockHeight: 0, + Version: "0.0.0", + }, flow.VersionBoundary{ + BlockHeight: 21, + Version: "2.0.0", + }), + ), + SealHeight: headerA.Height, + }, nil).Once() + + // finalize first block + sc.BlockFinalizedForTesting(headerA) + require.False(t, sc.IsExecutionStopped()) + require.Equal(t, StopParameters{ + StopBeforeHeight: 21, + ShouldCrash: false, + }, sc.GetStopParameters()) + + // new version beacon + versionBeacons. + On("Highest", testifyMock.Anything). + Return(&flow.SealedVersionBeacon{ + VersionBeacon: unittest.VersionBeaconFixture( + unittest.WithBoundaries( + // stop removed + flow.VersionBoundary{ + BlockHeight: 0, + Version: "0.0.0", + }), + ), + SealHeight: headerB.Height, + }, nil).Once() + + // finalize second block. we are still ok as the node version + // is the same as the version beacon one + sc.BlockFinalizedForTesting(headerB) + require.False(t, sc.IsExecutionStopped()) + require.False(t, sc.GetStopParameters().Set()) + }) + + t.Run("manual not cleared by version beacon", func(t *testing.T) { + // future version boundaries can be removed + // in which case they will be missing from the version beacon + execState := mock.NewExecutionState(t) + versionBeacons := storageMock.NewVersionBeacons(t) + + headerA := unittest.BlockHeaderFixture(unittest.WithHeaderHeight(20)) + headerB := unittest.BlockHeaderWithParentFixture(headerA) // 21 + headerC := unittest.BlockHeaderWithParentFixture(headerB) // 22 + + headers := &stopControlMockHeaders{ + headers: map[uint64]*flow.Header{ + headerA.Height: headerA, + headerB.Height: headerB, + headerC.Height: headerC, + }, + } + + sc := NewStopControl( + engine.NewUnit(), + time.Second, + unittest.Logger(), + execState, + headers, + versionBeacons, + semver.New("1.0.0"), + &flow.Header{HeaderBody: flow.HeaderBody{Height: 1}}, + false, + false, + ) + + versionBeacons. + On("Highest", testifyMock.Anything). + Return(&flow.SealedVersionBeacon{ + VersionBeacon: unittest.VersionBeaconFixture( + unittest.WithBoundaries( + // set to stop at height 21 + flow.VersionBoundary{ + BlockHeight: 0, + Version: "0.0.0", + }), + ), + SealHeight: headerA.Height, + }, nil).Once() + + // finalize first block + sc.BlockFinalizedForTesting(headerA) + require.False(t, sc.IsExecutionStopped()) + require.False(t, sc.GetStopParameters().Set()) + + // set manual stop + stop := StopParameters{ + StopBeforeHeight: 22, + ShouldCrash: false, + } + err := sc.SetStopParameters(stop) + require.NoError(t, err) + require.Equal(t, stop, sc.GetStopParameters()) + + // new version beacon + versionBeacons. + On("Highest", testifyMock.Anything). + Return(&flow.SealedVersionBeacon{ + VersionBeacon: unittest.VersionBeaconFixture( + unittest.WithBoundaries( + // stop removed + flow.VersionBoundary{ + BlockHeight: 0, + Version: "0.0.0", + }), + ), + SealHeight: headerB.Height, + }, nil).Once() + + sc.BlockFinalizedForTesting(headerB) + require.False(t, sc.IsExecutionStopped()) + // stop is not cleared due to being set manually + require.Equal(t, stop, sc.GetStopParameters()) + }) + + t.Run("version beacon not cleared by manual", func(t *testing.T) { + // future version boundaries can be removed + // in which case they will be missing from the version beacon + execState := mock.NewExecutionState(t) + versionBeacons := storageMock.NewVersionBeacons(t) + + headerA := unittest.BlockHeaderFixture(unittest.WithHeaderHeight(20)) + headerB := unittest.BlockHeaderWithParentFixture(headerA) // 21 + + headers := &stopControlMockHeaders{ + headers: map[uint64]*flow.Header{ + headerA.Height: headerA, + headerB.Height: headerB, + }, + } + + sc := NewStopControl( + engine.NewUnit(), + time.Second, + unittest.Logger(), + execState, + headers, + versionBeacons, + semver.New("1.0.0"), + &flow.Header{HeaderBody: flow.HeaderBody{Height: 1}}, + false, + false, + ) + + vbStop := StopParameters{ + StopBeforeHeight: 22, + ShouldCrash: false, + } + versionBeacons. + On("Highest", testifyMock.Anything). + Return(&flow.SealedVersionBeacon{ + VersionBeacon: unittest.VersionBeaconFixture( + unittest.WithBoundaries( + // set to stop at height 21 + flow.VersionBoundary{ + BlockHeight: 0, + Version: "0.0.0", + }, flow.VersionBoundary{ + BlockHeight: vbStop.StopBeforeHeight, + Version: "2.0.0", + }), + ), + SealHeight: headerA.Height, + }, nil).Once() + + // finalize first block + sc.BlockFinalizedForTesting(headerA) + require.False(t, sc.IsExecutionStopped()) + require.Equal(t, vbStop, sc.GetStopParameters()) + + // set manual stop + stop := StopParameters{ + StopBeforeHeight: 23, + ShouldCrash: false, + } + err := sc.SetStopParameters(stop) + require.ErrorIs(t, err, ErrCannotChangeStop) + // stop is not cleared due to being set earlier by a version beacon + require.Equal(t, vbStop, sc.GetStopParameters()) + }) +} + +// StopControl created as stopped will keep the state +func TestStartingStopped(t *testing.T) { + + sc := NewStopControl( + engine.NewUnit(), + time.Second, + unittest.Logger(), + nil, + nil, + nil, + nil, + &flow.Header{HeaderBody: flow.HeaderBody{Height: 1}}, + true, + false, + ) + require.True(t, sc.IsExecutionStopped()) +} + +func TestStoppedStateRejectsAllBlocksAndChanged(t *testing.T) { + + // make sure we don't even query executed status if stopped + // mock should fail test on any method call + execState := mock.NewExecutionState(t) + + sc := NewStopControl( + engine.NewUnit(), + time.Second, + unittest.Logger(), + execState, + nil, + nil, + nil, + &flow.Header{HeaderBody: flow.HeaderBody{Height: 1}}, + true, + false, + ) + require.True(t, sc.IsExecutionStopped()) + + err := sc.SetStopParameters(StopParameters{ + StopBeforeHeight: 2137, + ShouldCrash: true, + }) + require.ErrorIs(t, err, ErrCannotChangeStop) + + header := unittest.BlockHeaderFixture(unittest.WithHeaderHeight(20)) + + sc.BlockFinalizedForTesting(header) + require.True(t, sc.IsExecutionStopped()) +} + +func Test_StopControlWorkers(t *testing.T) { + + t.Run("start and stop, stopped = true", func(t *testing.T) { + + sc := NewStopControl( + engine.NewUnit(), + time.Second, + unittest.Logger(), + nil, + nil, + nil, + nil, + &flow.Header{HeaderBody: flow.HeaderBody{Height: 1}}, + true, + false, + ) + + ctx, cancel := context.WithCancel(context.Background()) + ictx := irrecoverable.NewMockSignalerContext(t, ctx) + + sc.Start(ictx) + + unittest.AssertClosesBefore(t, sc.Ready(), 10*time.Second) + + cancel() + + unittest.AssertClosesBefore(t, sc.Done(), 10*time.Second) + }) + + t.Run("start and stop, stopped = false", func(t *testing.T) { + + sc := NewStopControl( + engine.NewUnit(), + time.Second, + unittest.Logger(), + nil, + nil, + nil, + nil, + &flow.Header{HeaderBody: flow.HeaderBody{Height: 1}}, + false, + false, + ) + + ctx, cancel := context.WithCancel(context.Background()) + ictx := irrecoverable.NewMockSignalerContext(t, ctx) + + sc.Start(ictx) + + unittest.AssertClosesBefore(t, sc.Ready(), 10*time.Second) + + cancel() + + unittest.AssertClosesBefore(t, sc.Done(), 10*time.Second) + }) + + t.Run("start as stopped if execution is at version boundary", func(t *testing.T) { + + headerA := unittest.BlockHeaderFixture(unittest.WithHeaderHeight(20)) + headerB := unittest.BlockHeaderWithParentFixture(headerA) // 21 + + versionBeacons := storageMock.NewVersionBeacons(t) + versionBeacons.On("Highest", headerB.Height). + Return(&flow.SealedVersionBeacon{ + VersionBeacon: unittest.VersionBeaconFixture( + unittest.WithBoundaries( + flow.VersionBoundary{ + BlockHeight: headerB.Height, + Version: "2.0.0", + }, + ), + ), + SealHeight: 1, // sealed in the past + }, nil). + Once() + + execState := mock.NewExecutionState(t) + + execState.On("IsBlockExecuted", headerA.Height, headerA.ID()).Return(true, nil).Once() + + headers := &stopControlMockHeaders{ + headers: map[uint64]*flow.Header{ + headerA.Height: headerA, + headerB.Height: headerB, + }, + } + + // This is a likely scenario where the node stopped because of a version + // boundary but was restarted without being upgraded to the new version. + // In this case, the node should start as stopped. + sc := NewStopControl( + engine.NewUnit(), + time.Second, + unittest.Logger(), + execState, + headers, + versionBeacons, + semver.New("1.0.0"), + headerB, + false, + false, + ) + + ctx, cancel := context.WithCancel(context.Background()) + ictx := irrecoverable.NewMockSignalerContext(t, ctx) + + sc.Start(ictx) + + unittest.AssertClosesBefore(t, sc.Ready(), 10*time.Second) + + // should start as stopped + require.True(t, sc.IsExecutionStopped()) + require.Equal(t, StopParameters{ + StopBeforeHeight: headerB.Height, + ShouldCrash: false, + }, sc.GetStopParameters()) + + cancel() + + unittest.AssertClosesBefore(t, sc.Done(), 10*time.Second) + }) + + t.Run("test stopping with block finalized events", func(t *testing.T) { + + headerA := unittest.BlockHeaderFixture(unittest.WithHeaderHeight(20)) + headerB := unittest.BlockHeaderWithParentFixture(headerA) // 21 + headerC := unittest.BlockHeaderWithParentFixture(headerB) // 22 + + vb := &flow.SealedVersionBeacon{ + VersionBeacon: unittest.VersionBeaconFixture( + unittest.WithBoundaries( + flow.VersionBoundary{ + BlockHeight: headerC.Height, + Version: "2.0.0", + }, + ), + ), + SealHeight: 1, // sealed in the past + } + + versionBeacons := storageMock.NewVersionBeacons(t) + versionBeacons.On("Highest", headerB.Height). + Return(vb, nil). + Once() + versionBeacons.On("Highest", headerC.Height). + Return(vb, nil). + Once() + + execState := mock.NewExecutionState(t) + execState.On("IsBlockExecuted", headerB.Height, headerB.ID()).Return(true, nil).Once() + + headers := &stopControlMockHeaders{ + headers: map[uint64]*flow.Header{ + headerA.Height: headerA, + headerB.Height: headerB, + headerC.Height: headerC, + }, + } + + // The stop is set by a previous version beacon and is in one blocks time. + sc := NewStopControl( + engine.NewUnit(), + time.Second, + unittest.Logger(), + execState, + headers, + versionBeacons, + semver.New("1.0.0"), + headerB, + false, + false, + ) + + ctx, cancel := context.WithCancel(context.Background()) + ictx := irrecoverable.NewMockSignalerContext(t, ctx) + + sc.Start(ictx) + + unittest.AssertClosesBefore(t, sc.Ready(), 10*time.Second) + + require.False(t, sc.IsExecutionStopped()) + require.Equal(t, StopParameters{ + StopBeforeHeight: headerC.Height, + ShouldCrash: false, + }, sc.GetStopParameters()) + + sc.BlockFinalized(headerC) + + done := make(chan struct{}) + go func() { + for !sc.IsExecutionStopped() { + <-time.After(100 * time.Millisecond) + } + close(done) + }() + + select { + case <-done: + case <-time.After(2 * time.Second): + t.Fatal("timed out waiting for stop control to stop execution") + } + + cancel() + unittest.AssertClosesBefore(t, sc.Done(), 10*time.Second) + }) +} + +func TestPatchedVersion(t *testing.T) { + require.True(t, semver.New("0.31.20").LessThan(*semver.New("0.31.21"))) + require.True(t, semver.New("0.31.20-patch.1").LessThan(*semver.New("0.31.20"))) // be careful with this one + require.True(t, semver.New("0.31.20-without-adx").LessThan(*semver.New("0.31.20"))) + + // a special build created with "+" would not change the version priority for standard and pre-release versions + require.True(t, semver.New("0.31.20+without-adx").Equal(*semver.New("0.31.20"))) + require.True(t, semver.New("0.31.20-patch.1+without-adx").Equal(*semver.New("0.31.20-patch.1"))) + require.True(t, semver.New("0.31.20+without-netgo-without-adx").Equal(*semver.New("0.31.20"))) + require.True(t, semver.New("0.31.20+arm").Equal(*semver.New("0.31.20"))) +} diff --git a/engine/execution/ingestion/stop_control.go b/engine/execution/ingestion/stop_control.go deleted file mode 100644 index 49d09f07194..00000000000 --- a/engine/execution/ingestion/stop_control.go +++ /dev/null @@ -1,320 +0,0 @@ -package ingestion - -import ( - "context" - "fmt" - "sync" - - "github.com/rs/zerolog" - - "github.com/onflow/flow-go/engine/execution/state" - "github.com/onflow/flow-go/model/flow" -) - -// StopControl is a specialized component used by ingestion.Engine to encapsulate -// control of pausing/stopping blocks execution. -// It is intended to work tightly with the Engine, not as a general mechanism or interface. -// StopControl follows states described in StopState -type StopControl struct { - sync.RWMutex - // desired stopHeight, the first value new version should be used, - // so this height WON'T be executed - stopHeight uint64 - - // if the node should crash or just pause after reaching stopHeight - crash bool - - // This is the block ID of the block that should be executed last. - stopAfterExecuting flow.Identifier - - log zerolog.Logger - state StopControlState - - // used to prevent setting stopHeight to block which has already been executed - highestExecutingHeight uint64 -} - -type StopControlState byte - -const ( - // StopControlOff default state, envisioned to be used most of the time. - // Stopping module is simply off, blocks will be processed "as usual". - StopControlOff StopControlState = iota - - // StopControlSet means stopHeight is set but not reached yet, - // and nothing related to stopping happened yet. - // We could still go back to StopControlOff or progress to StopControlCommenced. - StopControlSet - - // StopControlCommenced indicates that stopping process has commenced - // and no parameters can be changed anymore. - // For example, blocks at or above stopHeight has been received, - // but finalization didn't reach stopHeight yet. - // It can only progress to StopControlPaused - StopControlCommenced - - // StopControlPaused means EN has stopped processing blocks. - // It can happen by reaching the set stopping `stopHeight`, or - // if the node was started in pause mode. - // It is a final state and cannot be changed - StopControlPaused -) - -// NewStopControl creates new empty NewStopControl -func NewStopControl( - log zerolog.Logger, - paused bool, - lastExecutedHeight uint64, -) *StopControl { - state := StopControlOff - if paused { - state = StopControlPaused - } - log.Debug().Msgf("created StopControl module with paused = %t", paused) - return &StopControl{ - log: log, - state: state, - highestExecutingHeight: lastExecutedHeight, - } -} - -// GetState returns current state of StopControl module -func (s *StopControl) GetState() StopControlState { - s.RLock() - defer s.RUnlock() - return s.state -} - -// IsPaused returns true is block execution has been paused -func (s *StopControl) IsPaused() bool { - s.RLock() - defer s.RUnlock() - return s.state == StopControlPaused -} - -// SetStopHeight sets new stopHeight and crash mode, and return old values: -// - stopHeight -// - crash -// -// Returns error if the stopping process has already commenced, new values will be rejected. -func (s *StopControl) SetStopHeight( - height uint64, - crash bool, -) (uint64, bool, error) { - s.Lock() - defer s.Unlock() - - oldHeight := s.stopHeight - oldCrash := s.crash - - if s.state == StopControlCommenced { - return oldHeight, - oldCrash, - fmt.Errorf( - "cannot update stopHeight, "+ - "stopping commenced for stopHeight %d with crash=%t", - oldHeight, - oldCrash, - ) - } - - if s.state == StopControlPaused { - return oldHeight, - oldCrash, - fmt.Errorf("cannot update stopHeight, already paused") - } - - // cannot set stopHeight to block which is already executing - // so the lowest possible stopHeight is highestExecutingHeight+1 - if height <= s.highestExecutingHeight { - return oldHeight, - oldCrash, - fmt.Errorf( - "cannot update stopHeight, "+ - "given stopHeight %d below or equal to highest executing height %d", - height, - s.highestExecutingHeight, - ) - } - - s.log.Info(). - Int8("previous_state", int8(s.state)). - Int8("new_state", int8(StopControlSet)). - Uint64("stopHeight", height). - Bool("crash", crash). - Uint64("old_height", oldHeight). - Bool("old_crash", oldCrash). - Msg("new stopHeight set") - - s.state = StopControlSet - - s.stopHeight = height - s.crash = crash - s.stopAfterExecuting = flow.ZeroID - - return oldHeight, oldCrash, nil -} - -// GetStopHeight returns: -// - stopHeight -// - crash -// -// Values are undefined if they were not previously set -func (s *StopControl) GetStopHeight() (uint64, bool) { - s.RLock() - defer s.RUnlock() - - return s.stopHeight, s.crash -} - -// blockProcessable should be called when new block is processable. -// It returns boolean indicating if the block should be processed. -func (s *StopControl) blockProcessable(b *flow.Header) bool { - s.Lock() - defer s.Unlock() - - if s.state == StopControlOff { - return true - } - - if s.state == StopControlPaused { - return false - } - - // skips blocks at or above requested stopHeight - if b.Height >= s.stopHeight { - s.log.Warn(). - Int8("previous_state", int8(s.state)). - Int8("new_state", int8(StopControlCommenced)). - Msgf( - "Skipping execution of %s at height %d"+ - " because stop has been requested at height %d", - b.ID(), - b.Height, - s.stopHeight, - ) - - s.state = StopControlCommenced // if block was skipped, move into commenced state - return false - } - - return true -} - -// blockFinalized should be called when a block is marked as finalized -func (s *StopControl) blockFinalized( - ctx context.Context, - execState state.ReadOnlyExecutionState, - h *flow.Header, -) { - - s.Lock() - defer s.Unlock() - - if s.state == StopControlOff || s.state == StopControlPaused { - return - } - - // Once finalization reached stopHeight we can be sure no other fork will be valid at this height, - // if this block's parent has been executed, we are safe to stop or crash. - // This will happen during normal execution, where blocks are executed before they are finalized. - // However, it is possible that EN block computation progress can fall behind. In this case, - // we want to crash only after the execution reached the stopHeight. - if h.Height == s.stopHeight { - - executed, err := state.IsBlockExecuted(ctx, execState, h.ParentID) - if err != nil { - // any error here would indicate unexpected storage error, so we crash the node - // TODO: what if the error is due to the node being stopped? - // i.e. context cancelled? - s.log.Fatal(). - Err(err). - Str("block_id", h.ID().String()). - Msg("failed to check if the block has been executed") - return - } - - if executed { - s.stopExecution() - } else { - s.stopAfterExecuting = h.ParentID - s.log.Info(). - Msgf( - "Node scheduled to stop executing"+ - " after executing block %s at height %d", - s.stopAfterExecuting.String(), - h.Height-1, - ) - } - } -} - -// blockExecuted should be called after a block has finished execution -func (s *StopControl) blockExecuted(h *flow.Header) { - s.Lock() - defer s.Unlock() - - if s.state == StopControlPaused || s.state == StopControlOff { - return - } - - if s.stopAfterExecuting == h.ID() { - // double check. Even if requested stopHeight has been changed multiple times, - // as long as it matches this block we are safe to terminate - if h.Height == s.stopHeight-1 { - s.stopExecution() - } else { - s.log.Warn(). - Msgf( - "Inconsistent stopping state. "+ - "Scheduled to stop after executing block ID %s and height %d, "+ - "but this block has a height %d. ", - h.ID().String(), - s.stopHeight-1, - h.Height, - ) - } - } -} - -func (s *StopControl) stopExecution() { - if s.crash { - s.log.Fatal().Msgf( - "Crashing as finalization reached requested "+ - "stop height %d and the highest executed block is (%d - 1)", - s.stopHeight, - s.stopHeight, - ) - return - } - - s.log.Debug(). - Int8("previous_state", int8(s.state)). - Int8("new_state", int8(StopControlPaused)). - Msg("StopControl state transition") - - s.state = StopControlPaused - - s.log.Warn().Msgf( - "Pausing execution as finalization reached "+ - "the requested stop height %d", - s.stopHeight, - ) - -} - -// executingBlockHeight should be called while execution of height starts, -// used for internal tracking of the minimum possible value of stopHeight -func (s *StopControl) executingBlockHeight(height uint64) { - // TODO: should we lock here? - - if s.state == StopControlPaused { - return - } - - // updating the highest executing height, which will be used to reject setting - // stopHeight that is too low. - if height > s.highestExecutingHeight { - s.highestExecutingHeight = height - } -} diff --git a/engine/execution/ingestion/stop_control_test.go b/engine/execution/ingestion/stop_control_test.go deleted file mode 100644 index 500278f56f5..00000000000 --- a/engine/execution/ingestion/stop_control_test.go +++ /dev/null @@ -1,183 +0,0 @@ -package ingestion - -import ( - "context" - "testing" - - "github.com/onflow/flow-go/storage" - - testifyMock "github.com/stretchr/testify/mock" - "github.com/stretchr/testify/require" - - "github.com/onflow/flow-go/engine/execution/state/mock" - - "github.com/onflow/flow-go/utils/unittest" -) - -// If stopping mechanism has caused any changes to execution flow (skipping execution of blocks) -// we disallow setting new values -func TestCannotSetNewValuesAfterStoppingCommenced(t *testing.T) { - - t.Run("when processing block at stop height", func(t *testing.T) { - sc := NewStopControl(unittest.Logger(), false, 0) - - require.Equal(t, sc.GetState(), StopControlOff) - - // first update is always successful - _, _, err := sc.SetStopHeight(21, false) - require.NoError(t, err) - - require.Equal(t, sc.GetState(), StopControlSet) - - // no stopping has started yet, block below stop height - header := unittest.BlockHeaderFixture(unittest.WithHeaderHeight(20)) - sc.blockProcessable(header) - - require.Equal(t, sc.GetState(), StopControlSet) - - _, _, err = sc.SetStopHeight(37, false) - require.NoError(t, err) - - // block at stop height, it should be skipped - header = unittest.BlockHeaderFixture(unittest.WithHeaderHeight(37)) - sc.blockProcessable(header) - - require.Equal(t, sc.GetState(), StopControlCommenced) - - _, _, err = sc.SetStopHeight(2137, false) - require.Error(t, err) - - // state did not change - require.Equal(t, sc.GetState(), StopControlCommenced) - }) - - t.Run("when processing finalized blocks", func(t *testing.T) { - - execState := new(mock.ReadOnlyExecutionState) - - sc := NewStopControl(unittest.Logger(), false, 0) - - require.Equal(t, sc.GetState(), StopControlOff) - - // first update is always successful - _, _, err := sc.SetStopHeight(21, false) - require.NoError(t, err) - require.Equal(t, sc.GetState(), StopControlSet) - - // make execution check pretends block has been executed - execState.On("StateCommitmentByBlockID", testifyMock.Anything, testifyMock.Anything).Return(nil, nil) - - // no stopping has started yet, block below stop height - header := unittest.BlockHeaderFixture(unittest.WithHeaderHeight(20)) - sc.blockFinalized(context.TODO(), execState, header) - - _, _, err = sc.SetStopHeight(37, false) - require.NoError(t, err) - require.Equal(t, sc.GetState(), StopControlSet) - - // block at stop height, it should be trigger stop - header = unittest.BlockHeaderFixture(unittest.WithHeaderHeight(37)) - sc.blockFinalized(context.TODO(), execState, header) - - // since we set crash to false, execution should be paused - require.Equal(t, sc.GetState(), StopControlPaused) - - _, _, err = sc.SetStopHeight(2137, false) - require.Error(t, err) - - execState.AssertExpectations(t) - }) -} - -// TestExecutionFallingBehind check if StopControl behaves properly even if EN runs behind -// and blocks are finalized before they are executed -func TestExecutionFallingBehind(t *testing.T) { - - execState := new(mock.ReadOnlyExecutionState) - - headerA := unittest.BlockHeaderFixture(unittest.WithHeaderHeight(20)) - headerB := unittest.BlockHeaderWithParentFixture(headerA) // 21 - headerC := unittest.BlockHeaderWithParentFixture(headerB) // 22 - headerD := unittest.BlockHeaderWithParentFixture(headerC) // 23 - - sc := NewStopControl(unittest.Logger(), false, 0) - - require.Equal(t, sc.GetState(), StopControlOff) - - // set stop at 22, so 21 is the last height which should be processed - _, _, err := sc.SetStopHeight(22, false) - require.NoError(t, err) - require.Equal(t, sc.GetState(), StopControlSet) - - execState.On("StateCommitmentByBlockID", testifyMock.Anything, headerC.ParentID).Return(nil, storage.ErrNotFound) - - // finalize blocks first - sc.blockFinalized(context.TODO(), execState, headerA) - require.Equal(t, StopControlSet, sc.GetState()) - - sc.blockFinalized(context.TODO(), execState, headerB) - require.Equal(t, StopControlSet, sc.GetState()) - - sc.blockFinalized(context.TODO(), execState, headerC) - require.Equal(t, StopControlSet, sc.GetState()) - - sc.blockFinalized(context.TODO(), execState, headerD) - require.Equal(t, StopControlSet, sc.GetState()) - - // simulate execution - sc.blockExecuted(headerA) - require.Equal(t, StopControlSet, sc.GetState()) - - sc.blockExecuted(headerB) - require.Equal(t, StopControlPaused, sc.GetState()) - - execState.AssertExpectations(t) -} - -// TestCannotSetHeightBelowLastExecuted check if StopControl -// tracks last executed height and prevents from setting stop height -// below or too close to it -func TestCannotSetHeightBelowLastExecuted(t *testing.T) { - - sc := NewStopControl(unittest.Logger(), false, 0) - - require.Equal(t, sc.GetState(), StopControlOff) - - sc.executingBlockHeight(20) - require.Equal(t, StopControlOff, sc.GetState()) - - _, _, err := sc.SetStopHeight(20, false) - require.Error(t, err) - require.Equal(t, StopControlOff, sc.GetState()) - - _, _, err = sc.SetStopHeight(25, false) - require.NoError(t, err) - require.Equal(t, StopControlSet, sc.GetState()) -} - -// StopControl started as paused will keep the state -func TestStartingPaused(t *testing.T) { - - sc := NewStopControl(unittest.Logger(), true, 0) - require.Equal(t, StopControlPaused, sc.GetState()) -} - -func TestPausedStateRejectsAllBlocksAndChanged(t *testing.T) { - - sc := NewStopControl(unittest.Logger(), true, 0) - require.Equal(t, StopControlPaused, sc.GetState()) - - _, _, err := sc.SetStopHeight(2137, true) - require.Error(t, err) - - // make sure we don't even query executed status if paused - // mock should fail test on any method call - execState := new(mock.ReadOnlyExecutionState) - - header := unittest.BlockHeaderFixture(unittest.WithHeaderHeight(20)) - - sc.blockFinalized(context.TODO(), execState, header) - require.Equal(t, StopControlPaused, sc.GetState()) - - execState.AssertExpectations(t) -} diff --git a/engine/execution/ingestion/throttle.go b/engine/execution/ingestion/throttle.go new file mode 100644 index 00000000000..bbf02dd46fb --- /dev/null +++ b/engine/execution/ingestion/throttle.go @@ -0,0 +1,383 @@ +package ingestion + +import ( + "fmt" + "sync" + + "github.com/rs/zerolog" + + "github.com/onflow/flow-go/engine/execution/state" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/state/protocol" + "github.com/onflow/flow-go/storage" +) + +// DefaultCatchUpThreshold is the number of blocks that if the execution is far behind +// the finalization then we will only lazy load the next unexecuted finalized +// blocks until the execution has caught up +const DefaultCatchUpThreshold = 500 + +// BlockIDHeight is a helper struct that holds the block ID and height +type BlockIDHeight struct { + ID flow.Identifier + Height uint64 +} + +func HeaderToBlockIDHeight(header *flow.Header) BlockIDHeight { + return BlockIDHeight{ + ID: header.ID(), + Height: header.Height, + } +} + +// Throttle is used to throttle the blocks to be added to the processables channel +type Throttle interface { + // Init initializes the throttle with the processables channel to forward the blocks + Init(processables chan<- BlockIDHeight, threshold int) error + // OnBlock is called when a block is received, the throttle will check if the execution + // is falling far behind the finalization, and add the block to the processables channel + // if it's not falling far behind. + OnBlock(blockID flow.Identifier, height uint64) error + // OnBlockExecuted is called when a block is executed, the throttle will check whether + // the execution is caught up with the finalization, and allow all the remaining blocks + // to be added to the processables channel. + OnBlockExecuted(blockID flow.Identifier, height uint64) error + // OnBlockFinalized is called when a block is finalized, the throttle will update the + // finalized height. + OnBlockFinalized(height uint64) + // Done stops the throttle, and stop sending new blocks to the processables channel + Done() error +} + +var _ Throttle = (*BlockThrottle)(nil) + +// BlockThrottle is a helper struct that helps throttle the unexecuted blocks to be sent +// to the block queue for execution. +// It is useful for case when execution is falling far behind the finalization, in which case +// we want to throttle the blocks to be sent to the block queue for fetching data to execute +// them. Without throttle, the block queue will be flooded with blocks, and the network +// will be flooded with requests fetching collections, and the EN might quickly run out of memory. +type BlockThrottle struct { + // when initialized, if the execution is falling far behind the finalization, then + // the throttle will only load the next "throttle" number of unexecuted blocks to processables, + // and ignore newly received blocks until the execution has caught up the finalization. + // During the catching up phase, after a block is executed, the throttle will load the next block + // to processables, and keep doing so until the execution has caught up the finalization. + // Once caught up, the throttle will process all the remaining unexecuted blocks, including + // unfinalized blocks. + mu sync.Mutex + stopped bool // whether the throttle is stopped, if true, no more block will be loaded + loadedAll bool // whether all blocks have been loaded. if true, no block will be throttled. + loaded uint64 // the last block height pushed to processables. Used to track if has caught up + finalized uint64 // the last finalized height. Used to track if has caught up + + // notifier + processables chan<- BlockIDHeight + + // dependencies + log zerolog.Logger + state protocol.State + headers storage.Headers +} + +func NewBlockThrottle( + log zerolog.Logger, + state protocol.State, + execState state.ExecutionState, + headers storage.Headers, +) (*BlockThrottle, error) { + finalizedHead, err := state.Final().Head() + if err != nil { + return nil, fmt.Errorf("could not get finalized head: %w", err) + } + + finalized := finalizedHead.Height + executed, err := execState.GetHighestFinalizedExecuted() + if err != nil { + return nil, fmt.Errorf("could not get highest finalized executed: %w", err) + } + + if executed > finalized { + return nil, fmt.Errorf("executed finalized %v is greater than finalized %v", executed, finalized) + } + + return &BlockThrottle{ + loaded: executed, + finalized: finalized, + stopped: false, + loadedAll: false, + + log: log.With().Str("component", "block_throttle").Logger(), + state: state, + headers: headers, + }, nil +} + +// inited returns true if the throttle has been inited +func (c *BlockThrottle) inited() bool { + return c.processables != nil +} + +func (c *BlockThrottle) Init(processables chan<- BlockIDHeight, threshold int) error { + c.mu.Lock() + defer c.mu.Unlock() + if c.inited() { + return fmt.Errorf("throttle already inited") + } + + c.processables = processables + + lastFinalizedToLoad := c.loaded + uint64(threshold) + if lastFinalizedToLoad > c.finalized { + lastFinalizedToLoad = c.finalized + } + + loadedAll := lastFinalizedToLoad == c.finalized + + lg := c.log.With(). + Uint64("executed", c.loaded). + Uint64("finalized", c.finalized). + Uint64("lastFinalizedToLoad", lastFinalizedToLoad). + Int("threshold", threshold). + Bool("loadedAll", loadedAll). + Logger() + + lg.Info().Msgf("finding finalized blocks") + + unexecuted, err := findFinalized(c.state, c.headers, c.loaded, lastFinalizedToLoad) + if err != nil { + return err + } + + if loadedAll { + pendings, err := findAllPendingBlocks(c.state, c.headers, c.finalized) + if err != nil { + return err + } + unexecuted = append(unexecuted, pendings...) + } + + lg = lg.With().Int("unexecuted", len(unexecuted)). + Logger() + + lg.Debug().Msgf("initializing throttle") + + // the ingestion core engine must have initialized the 'processables' with 10000 (default) buffer size, + // and the 'unexecuted' will only contain up to DefaultCatchUpThreshold (500) blocks, + // so pushing all the unexecuted to processables won't be blocked. + for _, b := range unexecuted { + c.processables <- b + c.loaded = b.Height + } + + c.loadedAll = loadedAll + + lg.Info().Msgf("throttle initialized unexecuted blocks") + + return nil +} + +func (c *BlockThrottle) OnBlockExecuted(_ flow.Identifier, executed uint64) error { + c.mu.Lock() + defer c.mu.Unlock() + + if !c.inited() { + return fmt.Errorf("throttle not inited") + } + + if c.stopped { + return nil + } + + // we have already caught up, ignore + if c.caughtUp() { + return nil + } + + // in this case, c.loaded must be < c.finalized + // so we must be able to load the next block + err := c.loadNextBlock(c.loaded) + if err != nil { + return fmt.Errorf("could not load next block: %w", err) + } + + if !c.caughtUp() { + // after loading the next block, if the execution height is no longer + // behind the finalization height + return nil + } + + c.log.Info().Uint64("executed", executed).Uint64("finalized", c.finalized). + Uint64("loaded", c.loaded). + Msgf("execution has caught up, processing remaining unexecuted blocks") + + // if the execution have just caught up close enough to the latest finalized blocks, + // then process all unexecuted blocks, including finalized unexecuted and pending unexecuted + unexecuted, err := findAllPendingBlocks(c.state, c.headers, c.finalized) + if err != nil { + return fmt.Errorf("could not find unexecuted blocks for processing: %w", err) + } + + c.log.Info().Int("unexecuted", len(unexecuted)).Msgf("forwarding unexecuted blocks") + + for _, block := range unexecuted { + c.processables <- block + c.loaded = block.Height + } + + c.log.Info().Msgf("all unexecuted blocks have been processed") + + return nil +} + +// Done marks the throttle as done, and no more blocks will be processed +func (c *BlockThrottle) Done() error { + c.mu.Lock() + defer c.mu.Unlock() + + c.log.Info().Msgf("throttle done") + + if !c.inited() { + return fmt.Errorf("throttle not inited") + } + + c.stopped = true + + return nil +} + +func (c *BlockThrottle) OnBlock(blockID flow.Identifier, height uint64) error { + c.mu.Lock() + defer c.mu.Unlock() + c.log.Debug().Msgf("recieved block (%v) height: %v", blockID, height) + + if !c.inited() { + return fmt.Errorf("throttle not inited") + } + + if c.stopped { + return nil + } + + // ignore the block if has not caught up. + if !c.caughtUp() { + return nil + } + + // if has caught up, then process the block + c.processables <- BlockIDHeight{ + ID: blockID, + Height: height, + } + c.loaded = height + c.log.Debug().Msgf("processed block (%v), height: %v", blockID, height) + + return nil +} + +func (c *BlockThrottle) OnBlockFinalized(finalizedHeight uint64) { + c.mu.Lock() + defer c.mu.Unlock() + if !c.inited() { + return + } + + if c.caughtUp() { + // once caught up, all unfinalized blocks will be loaded, and loadedAll will be set to true + // which will always be caught up, so we don't need to update finalized height any more. + return + } + + if finalizedHeight <= c.finalized { + return + } + + c.finalized = finalizedHeight +} + +func (c *BlockThrottle) loadNextBlock(height uint64) error { + c.log.Debug().Uint64("height", height).Msg("loading next block") + // load next block + next := height + 1 + blockID, err := c.headers.BlockIDByHeight(next) + if err != nil { + return fmt.Errorf("could not get block ID by height %v: %w", next, err) + } + + c.processables <- BlockIDHeight{ + ID: blockID, + Height: next, + } + c.loaded = next + c.log.Debug().Uint64("height", next).Msg("loaded next block") + + return nil +} + +func (c *BlockThrottle) caughtUp() bool { + // load all pending blocks should only happen at most once. + // if the execution is already caught up finalization during initialization, + // then loadedAll is true, and we don't need to catch up again. + // if the execution was falling behind finalization, and has caught up, + // then loadedAll is also true, and we don't need to catch up again, because + // otherwise we might load the same block twice. + if c.loadedAll { + return true + } + + // in this case, the execution was falling behind the finalization during initialization, + // whether the execution has caught up is determined by whether the loaded block is equal + // to or above the finalized block. + return c.loaded >= c.finalized +} + +func findFinalized(state protocol.State, headers storage.Headers, lastExecuted, finalizedHeight uint64) ([]BlockIDHeight, error) { + // get finalized height + finalized := state.AtHeight(finalizedHeight) + final, err := finalized.Head() + if err != nil { + return nil, fmt.Errorf("could not get finalized block: %w", err) + } + + // dynamically bootstrapped execution node will have highest finalized executed as sealed root, + // which is lower than finalized root. so we will reload blocks from + // [sealedRoot.Height + 1, finalizedRoot.Height] and execute them on startup. + unexecutedFinalized := make([]BlockIDHeight, 0) + + // starting from the first unexecuted block, go through each unexecuted and finalized block + for height := lastExecuted + 1; height <= final.Height; height++ { + finalizedID, err := headers.BlockIDByHeight(height) + if err != nil { + return nil, fmt.Errorf("could not get block ID by height %v: %w", height, err) + } + + unexecutedFinalized = append(unexecutedFinalized, BlockIDHeight{ + ID: finalizedID, + Height: height, + }) + } + + return unexecutedFinalized, nil +} + +func findAllPendingBlocks(state protocol.State, headers storage.Headers, finalizedHeight uint64) ([]BlockIDHeight, error) { + // loaded all pending blocks + pendings, err := state.AtHeight(finalizedHeight).Descendants() + if err != nil { + return nil, fmt.Errorf("could not get descendants of finalized block: %w", err) + } + + unexecuted := make([]BlockIDHeight, 0, len(pendings)) + for _, id := range pendings { + header, err := headers.ByBlockID(id) + if err != nil { + return nil, fmt.Errorf("could not get header by block ID %v: %w", id, err) + } + unexecuted = append(unexecuted, BlockIDHeight{ + ID: id, + Height: header.Height, + }) + } + + return unexecuted, nil +} diff --git a/engine/execution/ingestion/throttle_test.go b/engine/execution/ingestion/throttle_test.go new file mode 100644 index 00000000000..b3299cdabbb --- /dev/null +++ b/engine/execution/ingestion/throttle_test.go @@ -0,0 +1,254 @@ +package ingestion + +import ( + "fmt" + "sync" + "testing" + + "github.com/rs/zerolog/log" + "github.com/stretchr/testify/require" + + stateMock "github.com/onflow/flow-go/engine/execution/state/mock" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/storage" + "github.com/onflow/flow-go/utils/unittest" + "github.com/onflow/flow-go/utils/unittest/mocks" +) + +// Given the following chain: +// 1 <- 2 <- 3 <- 4 <- 5 <- 6 <- 7 <- 8 <- 9 <- 10 +// Block 6 is the last executed, block 7 is the last finalized, +// Then block 7, 8, 9, 10 will be loaded. +// When 7, 8, 9, 10 are executed, no more block will be loaded +func TestThrottleLoadAllBlocks(t *testing.T) { + blocks := makeBlocks(t, 0, 10) + headers := toHeaders(blocks) + require.Len(t, blocks, 10+1) + threshold, lastExecuted, lastFinalized := 3, 6, 7 + throttle := createThrottle(t, blocks, headers, lastExecuted, lastFinalized) + var wg sync.WaitGroup + processables, consumer := makeProcessables(t, &wg, HeaderToBlockIDHeight(headers[lastExecuted])) + + wg.Add(4) // load block 7,8,9,10 + // verify that after Init, the last processable is 10 (all headers are loaded) + require.NoError(t, throttle.Init(processables, threshold)) + wg.Wait() + + require.Equal(t, HeaderToBlockIDHeight(headers[10]), consumer.LastProcessable()) + total := consumer.Total() + + // when 7-10 are executed, verify no more block is loaded + require.NoError(t, throttle.OnBlockExecuted(headers[6].ID(), headers[6].Height)) + require.Equal(t, total, consumer.Total()) + + require.NoError(t, throttle.Done()) +} + +// Given the following chain: +// 1 <- 2 <- 3 <- 4 <- 5 <- 6 <- 7 <- 8 <- 9 <- 10 <- 11 +// 10 and 11 are not received. +// Block 1 is the last executed, Block 7 is the last finalized. +// If threshold is 3, then block 2, 3, 4 will be loaded +// When 2 is executed, block 5 will be loaded. +// When 10 is received, no block will be loaded. +// When 3 is executed, block 6 will be loaded. +// When 4 is executed, block 7, 8, 9, 10 will be loaded. +// When 5 is executed, no block is loaded +// When 11 is received, block 11 will be loaded. +func TestThrottleFallBehindCatchUp(t *testing.T) { + allBlocks := makeBlocks(t, 0, 11) + blocks := allBlocks[:11] + require.Len(t, blocks, 10+1) + headers := toHeaders(blocks) + threshold, lastExecuted, lastFinalized := 3, 1, 7 + throttle := createThrottle(t, blocks, headers, lastExecuted, lastFinalized) + var wg sync.WaitGroup + processables, consumer := makeProcessables(t, &wg, HeaderToBlockIDHeight(headers[lastExecuted])) + + wg.Add(3) // load block 2,3,4 + // verify that after Init, the last processable is 4 (only the next 3 finalized blocks are loaded) + require.NoError(t, throttle.Init(processables, threshold)) + wg.Wait() + require.Equal(t, HeaderToBlockIDHeight(headers[4]), consumer.LastProcessable()) + + // when 2 is executed, verify block 5 is loaded + wg.Add(1) + require.NoError(t, throttle.OnBlockExecuted(headers[2].ID(), headers[2].Height)) + wg.Wait() + require.Equal(t, HeaderToBlockIDHeight(headers[5]), consumer.LastProcessable()) + + // when 10 is received, no block is loaded + require.NoError(t, throttle.OnBlock(headers[10].ID(), headers[10].Height)) + require.Equal(t, HeaderToBlockIDHeight(headers[5]), consumer.LastProcessable()) + + // when 3 is executed, verify block 6 is loaded + wg.Add(1) + require.NoError(t, throttle.OnBlockExecuted(headers[3].ID(), headers[3].Height)) + wg.Wait() + require.Equal(t, HeaderToBlockIDHeight(headers[6]), consumer.LastProcessable()) + + // when 4 is executed, verify block 7, 8, 9, 10 is loaded + wg.Add(4) + require.NoError(t, throttle.OnBlockExecuted(headers[4].ID(), headers[4].Height)) + wg.Wait() + require.Equal(t, HeaderToBlockIDHeight(headers[10]), consumer.LastProcessable()) + require.Equal(t, 10, consumer.Total()) + + // when 5 to 10 is executed, no block is loaded + for i := 4; i <= 10; i++ { + require.NoError(t, throttle.OnBlockExecuted(headers[i].ID(), headers[i].Height)) + } + wg.Wait() + require.Equal(t, HeaderToBlockIDHeight(headers[10]), consumer.LastProcessable()) + require.Equal(t, 10, consumer.Total()) + + // when 11 is received, verify block 11 is loaded + wg.Add(1) + require.NoError(t, throttle.OnBlock(allBlocks[11].ID(), allBlocks[11].Height)) + wg.Wait() + require.Equal(t, HeaderToBlockIDHeight(allBlocks[11].ToHeader()), consumer.LastProcessable()) + + require.NoError(t, throttle.Done()) +} + +func makeBlocks(t *testing.T, start, count int) []*flow.Block { + genesis := unittest.Block.Genesis(flow.Emulator) + blocks := unittest.ChainFixtureFrom(count, genesis.ToHeader()) + return append([]*flow.Block{genesis}, blocks...) +} + +func toHeaders(blocks []*flow.Block) []*flow.Header { + headers := make([]*flow.Header, len(blocks)) + for i, block := range blocks { + headers[i] = block.ToHeader() + } + return headers +} + +func createThrottle(t *testing.T, blocks []*flow.Block, headers []*flow.Header, lastExecuted, lastFinalized int) *BlockThrottle { + log := unittest.Logger() + headerStore := newHeadersWithBlocks(headers) + + state := mocks.NewProtocolState() + require.NoError(t, state.Bootstrap(blocks[0], nil, nil)) + for i := 1; i < len(blocks); i++ { + require.NoError(t, state.Extend(blocks[i])) + } + require.NoError(t, state.Finalize(blocks[lastFinalized].ID())) + + execState := stateMock.NewExecutionState(t) + execState.On("GetHighestFinalizedExecuted").Return(headers[lastExecuted].Height, nil) + + throttle, err := NewBlockThrottle(log, state, execState, headerStore) + require.NoError(t, err) + return throttle +} + +func makeProcessables(t *testing.T, wg *sync.WaitGroup, root BlockIDHeight) (chan<- BlockIDHeight, *processableConsumer) { + processables := make(chan BlockIDHeight, MaxProcessableBlocks) + consumer := &processableConsumer{ + headers: map[flow.Identifier]struct{}{ + root.ID: {}, + }, + last: root, + } + consumer.Consume(t, wg, processables) + return processables, consumer +} + +type processableConsumer struct { + last BlockIDHeight + headers map[flow.Identifier]struct{} +} + +func (c *processableConsumer) Consume(t *testing.T, wg *sync.WaitGroup, processables <-chan BlockIDHeight) { + go func() { + for block := range processables { + _, ok := c.headers[block.ID] + require.False(t, ok, "block %v is already processed", block.Height) + c.headers[block.ID] = struct{}{} + c.last = block + log.Info().Msgf("consuming block %v, c.last: %v", block.Height, c.last.Height) + wg.Done() + } + }() +} + +func (c *processableConsumer) Total() int { + return len(c.headers) +} + +func (c *processableConsumer) LastProcessable() BlockIDHeight { + return c.last +} + +type headerStore struct { + byID map[flow.Identifier]*flow.Header + byHeight map[uint64]*flow.Header +} + +var _ storage.Headers = (*headerStore)(nil) + +func newHeadersWithBlocks(headers []*flow.Header) *headerStore { + byID := make(map[flow.Identifier]*flow.Header, len(headers)) + byHeight := make(map[uint64]*flow.Header, len(headers)) + for _, header := range headers { + byID[header.ID()] = header + byHeight[header.Height] = header + } + return &headerStore{ + byID: byID, + byHeight: byHeight, + } +} + +func (h *headerStore) BlockIDByHeight(height uint64) (flow.Identifier, error) { + header, ok := h.byHeight[height] + if !ok { + return flow.Identifier{}, fmt.Errorf("block %d not found", height) + } + return header.ID(), nil +} + +func (h *headerStore) ByBlockID(blockID flow.Identifier) (*flow.Header, error) { + header, ok := h.byID[blockID] + if !ok { + return nil, fmt.Errorf("block %v not found", blockID) + } + return header, nil +} + +func (h *headerStore) ByHeight(height uint64) (*flow.Header, error) { + header, ok := h.byHeight[height] + if !ok { + return nil, fmt.Errorf("block %d not found", height) + } + return header, nil +} + +func (h *headerStore) Exists(blockID flow.Identifier) (bool, error) { + _, ok := h.byID[blockID] + return ok, nil +} + +func (h *headerStore) ByParentID(parentID flow.Identifier) ([]*flow.Header, error) { + return nil, nil +} + +func (h *headerStore) Store(proposal *flow.ProposalHeader) error { + return nil +} + +func (h *headerStore) ProposalByBlockID(blockID flow.Identifier) (*flow.ProposalHeader, error) { + return nil, nil +} + +func (h *headerStore) ByView(view uint64) (*flow.Header, error) { + // Find header with matching view + for _, header := range h.byID { + if header.View == view { + return header, nil + } + } + return nil, fmt.Errorf("no header found for view %d", view) +} diff --git a/engine/execution/ingestion/uploader/file_uploader.go b/engine/execution/ingestion/uploader/file_uploader.go index c8e8f5819d7..82623230be1 100644 --- a/engine/execution/ingestion/uploader/file_uploader.go +++ b/engine/execution/ingestion/uploader/file_uploader.go @@ -20,7 +20,7 @@ type FileUploader struct { } func (f *FileUploader) Upload(computationResult *execution.ComputationResult) error { - file, err := os.Create(path.Join(f.dir, fmt.Sprintf("%s.cbor", computationResult.ExecutableBlock.ID()))) + file, err := os.Create(path.Join(f.dir, fmt.Sprintf("%s.cbor", computationResult.ExecutableBlock.BlockID()))) if err != nil { return fmt.Errorf("cannot create file for writing block data: %w", err) } diff --git a/engine/execution/ingestion/uploader/gcp_uploader.go b/engine/execution/ingestion/uploader/gcp_uploader.go index 6deb89ea1c5..343b1bd7fa7 100644 --- a/engine/execution/ingestion/uploader/gcp_uploader.go +++ b/engine/execution/ingestion/uploader/gcp_uploader.go @@ -70,5 +70,5 @@ func (u *GCPBucketUploader) Upload(computationResult *execution.ComputationResul } func GCPBlockDataObjectName(computationResult *execution.ComputationResult) string { - return fmt.Sprintf("%s.cbor", computationResult.ExecutableBlock.ID().String()) + return fmt.Sprintf("%s.cbor", computationResult.ExecutableBlock.BlockID().String()) } diff --git a/engine/execution/ingestion/uploader/manager_test.go b/engine/execution/ingestion/uploader/manager_test.go index f90e0cd3aa0..d705ff6fd41 100644 --- a/engine/execution/ingestion/uploader/manager_test.go +++ b/engine/execution/ingestion/uploader/manager_test.go @@ -13,6 +13,7 @@ import ( executionUnittest "github.com/onflow/flow-go/engine/execution/state/unittest" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/module/trace" + "github.com/onflow/flow-go/utils/unittest" ) func TestManagerSetEnabled(t *testing.T) { @@ -28,7 +29,8 @@ func TestManagerSetEnabled(t *testing.T) { func TestManagerUploadsWithAllUploaders(t *testing.T) { result := executionUnittest.ComputationResultFixture( - flow.ZeroID, + t, + unittest.IdentifierFixture(), [][]flow.Identifier{ {flow.ZeroID}, {flow.ZeroID}, @@ -83,7 +85,7 @@ func TestRetryableUploader(t *testing.T) { uploadMgr.AddUploader(testRetryableUploader) err := uploadMgr.RetryUploads() - assert.Nil(t, err) + assert.NoError(t, err) require.True(t, testRetryableUploader.RetryUploadCalled()) } diff --git a/engine/execution/ingestion/uploader/mock/retryable_uploader_wrapper.go b/engine/execution/ingestion/uploader/mock/retryable_uploader_wrapper.go new file mode 100644 index 00000000000..441f9c46eba --- /dev/null +++ b/engine/execution/ingestion/uploader/mock/retryable_uploader_wrapper.go @@ -0,0 +1,63 @@ +// Code generated by mockery. DO NOT EDIT. + +package mock + +import ( + execution "github.com/onflow/flow-go/engine/execution" + mock "github.com/stretchr/testify/mock" +) + +// RetryableUploaderWrapper is an autogenerated mock type for the RetryableUploaderWrapper type +type RetryableUploaderWrapper struct { + mock.Mock +} + +// RetryUpload provides a mock function with no fields +func (_m *RetryableUploaderWrapper) RetryUpload() error { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for RetryUpload") + } + + var r0 error + if rf, ok := ret.Get(0).(func() error); ok { + r0 = rf() + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// Upload provides a mock function with given fields: computationResult +func (_m *RetryableUploaderWrapper) Upload(computationResult *execution.ComputationResult) error { + ret := _m.Called(computationResult) + + if len(ret) == 0 { + panic("no return value specified for Upload") + } + + var r0 error + if rf, ok := ret.Get(0).(func(*execution.ComputationResult) error); ok { + r0 = rf(computationResult) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// NewRetryableUploaderWrapper creates a new instance of RetryableUploaderWrapper. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewRetryableUploaderWrapper(t interface { + mock.TestingT + Cleanup(func()) +}) *RetryableUploaderWrapper { + mock := &RetryableUploaderWrapper{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/engine/execution/ingestion/uploader/mock/uploader.go b/engine/execution/ingestion/uploader/mock/uploader.go index 32aea526dd8..6ac5d1da993 100644 --- a/engine/execution/ingestion/uploader/mock/uploader.go +++ b/engine/execution/ingestion/uploader/mock/uploader.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.13.1. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mock @@ -16,6 +16,10 @@ type Uploader struct { func (_m *Uploader) Upload(computationResult *execution.ComputationResult) error { ret := _m.Called(computationResult) + if len(ret) == 0 { + panic("no return value specified for Upload") + } + var r0 error if rf, ok := ret.Get(0).(func(*execution.ComputationResult) error); ok { r0 = rf(computationResult) @@ -26,13 +30,12 @@ func (_m *Uploader) Upload(computationResult *execution.ComputationResult) error return r0 } -type mockConstructorTestingTNewUploader interface { +// NewUploader creates a new instance of Uploader. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewUploader(t interface { mock.TestingT Cleanup(func()) -} - -// NewUploader creates a new instance of Uploader. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewUploader(t mockConstructorTestingTNewUploader) *Uploader { +}) *Uploader { mock := &Uploader{} mock.Mock.Test(t) diff --git a/engine/execution/ingestion/uploader/model.go b/engine/execution/ingestion/uploader/model.go index ba01f27ca28..fc39dd08393 100644 --- a/engine/execution/ingestion/uploader/model.go +++ b/engine/execution/ingestion/uploader/model.go @@ -29,9 +29,10 @@ func ComputationResultToBlockData(computationResult *execution.ComputationResult txResults[i] = &AllResults[i] } - events := make([]*flow.Event, 0) - for _, e := range computationResult.AllEvents() { - events = append(events, &e) + eventsList := computationResult.AllEvents() + events := make([]*flow.Event, len(eventsList)) + for i := 0; i < len(eventsList); i++ { + events[i] = &eventsList[i] } trieUpdates := make( diff --git a/engine/execution/ingestion/uploader/model_test.go b/engine/execution/ingestion/uploader/model_test.go index c58979eb44f..5f78824ebe4 100644 --- a/engine/execution/ingestion/uploader/model_test.go +++ b/engine/execution/ingestion/uploader/model_test.go @@ -11,6 +11,7 @@ import ( "github.com/onflow/flow-go/ledger" "github.com/onflow/flow-go/ledger/common/pathfinder" "github.com/onflow/flow-go/ledger/complete" + "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/utils/unittest" ) @@ -29,12 +30,23 @@ func Test_ComputationResultToBlockDataConversion(t *testing.T) { assert.Equal(t, result, *blockData.TxResults[i]) } - // ramtin: warning returned events are not preserving orders, - // but since we are going to depricate this part of logic, - // I'm not going to spend more time fixing this mess + // Since returned events are not preserving orders, + // use map with event.ID() as key to confirm all events + // are included. allEvents := cr.AllEvents() require.Equal(t, len(allEvents), len(blockData.Events)) + eventsInBlockData := make(map[flow.Identifier]flow.Event) + for _, e := range blockData.Events { + eventsInBlockData[e.ID()] = *e + } + + for _, expectedEvent := range allEvents { + event, ok := eventsInBlockData[expectedEvent.ID()] + require.True(t, ok) + require.Equal(t, expectedEvent, event) + } + assert.Equal(t, len(expectedTrieUpdates), len(blockData.TrieUpdates)) assert.Equal(t, cr.CurrentEndState(), blockData.FinalStateCommitment) diff --git a/engine/execution/ingestion/uploader/retryable_uploader_wrapper.go b/engine/execution/ingestion/uploader/retryable_uploader_wrapper.go index 2ce8914b65a..e3fedb79098 100644 --- a/engine/execution/ingestion/uploader/retryable_uploader_wrapper.go +++ b/engine/execution/ingestion/uploader/retryable_uploader_wrapper.go @@ -10,6 +10,7 @@ import ( "github.com/onflow/flow-go/engine/execution" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/module" + "github.com/onflow/flow-go/module/component" "github.com/onflow/flow-go/module/executiondatasync/execution_data" "github.com/onflow/flow-go/module/mempool/entity" "github.com/onflow/flow-go/storage" @@ -34,6 +35,7 @@ type BadgerRetryableUploaderWrapper struct { results storage.ExecutionResults transactionResults storage.TransactionResults uploadStatusStore storage.ComputationResultUploadStatus + component.Component } func NewBadgerRetryableUploaderWrapper( @@ -99,17 +101,10 @@ func NewBadgerRetryableUploaderWrapper( results: results, transactionResults: transactionResults, uploadStatusStore: uploadStatusStore, + Component: uploader, // delegate to the AsyncUploader } } -func (b *BadgerRetryableUploaderWrapper) Ready() <-chan struct{} { - return b.uploader.Ready() -} - -func (b *BadgerRetryableUploaderWrapper) Done() <-chan struct{} { - return b.uploader.Done() -} - func (b *BadgerRetryableUploaderWrapper) Upload(computationResult *execution.ComputationResult) error { if computationResult == nil || computationResult.ExecutableBlock == nil || computationResult.ExecutableBlock.Block == nil { @@ -181,7 +176,7 @@ func (b *BadgerRetryableUploaderWrapper) reconstructComputationResult( executionDataID := executionResult.ExecutionDataID // retrieving BlockExecutionData from EDS - executionData, err := b.execDataDownloader.Download(b.unit.Ctx(), executionDataID) + executionData, err := b.execDataDownloader.Get(b.unit.Ctx(), executionDataID) if executionData == nil || err != nil { log.Error().Err(err).Msgf( "failed to retrieve BlockExecutionData from EDS with ID %s", executionDataID.String()) @@ -204,7 +199,7 @@ func (b *BadgerRetryableUploaderWrapper) reconstructComputationResult( // grabbing collections and guarantees from BadgerDB guarantees := make([]*flow.CollectionGuarantee, 0) - if block != nil && block.Payload != nil { + if block != nil { guarantees = block.Payload.Guarantees } @@ -219,8 +214,8 @@ func (b *BadgerRetryableUploaderWrapper) reconstructComputationResult( } completeCollections[collectionID] = &entity.CompleteCollection{ - Guarantee: guarantees[inx], - Transactions: collection.Transactions, + Guarantee: guarantees[inx], + Collection: collection, } } @@ -242,6 +237,8 @@ func (b *BadgerRetryableUploaderWrapper) reconstructComputationResult( CompleteCollections: completeCollections, } + // NOTE(#6777): The entity ultimately uploaded by this component is [uploader.BlockData], + // which does not include chunks, so we do not need to implement version-aware chunk construction here. compRes := execution.NewEmptyComputationResult(executableBlock) eventsByTxIndex := make(map[int]flow.EventsList, 0) diff --git a/engine/execution/ingestion/uploader/retryable_uploader_wrapper_test.go b/engine/execution/ingestion/uploader/retryable_uploader_wrapper_test.go index a22147b862e..be1cf891e13 100644 --- a/engine/execution/ingestion/uploader/retryable_uploader_wrapper_test.go +++ b/engine/execution/ingestion/uploader/retryable_uploader_wrapper_test.go @@ -1,6 +1,7 @@ package uploader import ( + "context" "sync" "testing" "time" @@ -11,9 +12,9 @@ import ( "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/module/executiondatasync/execution_data" executionDataMock "github.com/onflow/flow-go/module/executiondatasync/execution_data/mock" + "github.com/onflow/flow-go/module/irrecoverable" "github.com/onflow/flow-go/module/mempool/entity" "github.com/onflow/flow-go/module/metrics" - "github.com/onflow/flow-go/utils/unittest" "github.com/stretchr/testify/mock" @@ -26,13 +27,15 @@ import ( ) func Test_Upload_invoke(t *testing.T) { + ctx, cancel := irrecoverable.NewMockSignalerContextWithCancel(t, context.Background()) + defer cancel() wg := sync.WaitGroup{} uploaderCalled := false dummyUploader := &DummyUploader{ f: func() error { - wg.Done() uploaderCalled = true + wg.Done() return nil }, } @@ -40,7 +43,7 @@ func Test_Upload_invoke(t *testing.T) { 1*time.Nanosecond, 1, zerolog.Nop(), &metrics.NoopCollector{}) testRetryableUploaderWrapper := createTestBadgerRetryableUploaderWrapper(asyncUploader) - defer testRetryableUploaderWrapper.Done() + testRetryableUploaderWrapper.Start(ctx) // nil input - no call to Upload() err := testRetryableUploaderWrapper.Upload(nil) @@ -58,13 +61,15 @@ func Test_Upload_invoke(t *testing.T) { } func Test_RetryUpload(t *testing.T) { + ctx, cancel := irrecoverable.NewMockSignalerContextWithCancel(t, context.Background()) + defer cancel() wg := sync.WaitGroup{} wg.Add(1) uploaderCalled := false dummyUploader := &DummyUploader{ f: func() error { - wg.Done() uploaderCalled = true + wg.Done() return nil }, } @@ -72,7 +77,7 @@ func Test_RetryUpload(t *testing.T) { 1*time.Nanosecond, 1, zerolog.Nop(), &metrics.NoopCollector{}) testRetryableUploaderWrapper := createTestBadgerRetryableUploaderWrapper(asyncUploader) - defer testRetryableUploaderWrapper.Done() + testRetryableUploaderWrapper.Start(ctx) err := testRetryableUploaderWrapper.RetryUpload() wg.Wait() @@ -82,6 +87,8 @@ func Test_RetryUpload(t *testing.T) { } func Test_AsyncUploaderCallback(t *testing.T) { + ctx, cancel := irrecoverable.NewMockSignalerContextWithCancel(t, context.Background()) + defer cancel() wgUploadCalleded := sync.WaitGroup{} wgUploadCalleded.Add(1) @@ -95,7 +102,7 @@ func Test_AsyncUploaderCallback(t *testing.T) { 1*time.Nanosecond, 1, zerolog.Nop(), &metrics.NoopCollector{}) testRetryableUploaderWrapper := createTestBadgerRetryableUploaderWrapper(asyncUploader) - defer testRetryableUploaderWrapper.Done() + testRetryableUploaderWrapper.Start(ctx) testComputationResult := createTestComputationResult() err := testRetryableUploaderWrapper.Upload(testComputationResult) @@ -118,19 +125,25 @@ func Test_ReconstructComputationResultFromStorage(t *testing.T) { }, } testEvents := []flow.Event{ - unittest.EventFixture(flow.EventAccountCreated, 0, 0, flow.HashToID([]byte{11, 22, 33}), 200), + unittest.EventFixture( + unittest.Event.WithEventType(flow.EventAccountUpdated), + unittest.Event.WithTransactionIndex(0), + unittest.Event.WithEventIndex(0), + unittest.Event.WithTransactionID(flow.HashToID([]byte{11, 22, 33})), + ), } testCollectionID := flow.HashToID([]byte{0xA, 0xB, 0xC}) - testBlock := &flow.Block{ - Header: &flow.Header{}, - Payload: &flow.Payload{ - Guarantees: []*flow.CollectionGuarantee{ + + testBlock := unittest.BlockFixture( + unittest.Block.WithPayload( + unittest.PayloadFixture(unittest.WithGuarantees([]*flow.CollectionGuarantee{ { CollectionID: testCollectionID, }, - }, - }, - } + }..., + )), + ), + ) testTransactionBody := &flow.TransactionBody{ Script: []byte("random script"), } @@ -169,7 +182,7 @@ func Test_ReconstructComputationResultFromStorage(t *testing.T) { mockComputationResultStorage.On("Upsert", testBlockID, mock.Anything).Return(nil) mockExecutionDataDowloader := new(executionDataMock.Downloader) - mockExecutionDataDowloader.On("Download", mock.Anything, testEDID).Return( + mockExecutionDataDowloader.On("Get", mock.Anything, testEDID).Return( &execution_data.BlockExecutionData{ BlockID: testBlockID, ChunkExecutionDatas: testChunkExecutionDatas, @@ -202,7 +215,9 @@ func Test_ReconstructComputationResultFromStorage(t *testing.T) { Guarantee: &flow.CollectionGuarantee{ CollectionID: testCollectionID, }, - Transactions: []*flow.TransactionBody{testTransactionBody}, + Collection: &flow.Collection{ + Transactions: []*flow.TransactionBody{testTransactionBody}, + }, } expectedTestEvents := make([]*flow.Event, len(testEvents)) @@ -230,10 +245,8 @@ func Test_ReconstructComputationResultFromStorage(t *testing.T) { // AsyncUploader instance and proper mock storage and EDS interfaces. func createTestBadgerRetryableUploaderWrapper(asyncUploader *AsyncUploader) *BadgerRetryableUploaderWrapper { mockBlocksStorage := new(storageMock.Blocks) - mockBlocksStorage.On("ByID", mock.Anything).Return(&flow.Block{ - Header: &flow.Header{}, - Payload: nil, - }, nil) + mockBlocksStorage.On("ByID", mock.Anything). + Return(unittest.BlockFixture(), nil) mockCommitsStorage := new(storageMock.Commits) mockCommitsStorage.On("ByBlockID", mock.Anything).Return(nil, nil) @@ -259,8 +272,7 @@ func createTestBadgerRetryableUploaderWrapper(asyncUploader *AsyncUploader) *Bad mockComputationResultStorage.On("Upsert", mock.Anything, mock.Anything).Return(nil) mockExecutionDataDowloader := new(executionDataMock.Downloader) - mockExecutionDataDowloader.On("Add", mock.Anything, mock.Anything).Return(flow.ZeroID, nil, nil) - mockExecutionDataDowloader.On("Download", mock.Anything, mock.Anything).Return( + mockExecutionDataDowloader.On("Get", mock.Anything, mock.Anything).Return( &execution_data.BlockExecutionData{ BlockID: flow.ZeroID, ChunkExecutionDatas: make([]*execution_data.ChunkExecutionData, 0), diff --git a/engine/execution/ingestion/uploader/uploader.go b/engine/execution/ingestion/uploader/uploader.go index f9486ea99d8..2abb6a9078b 100644 --- a/engine/execution/ingestion/uploader/uploader.go +++ b/engine/execution/ingestion/uploader/uploader.go @@ -6,12 +6,12 @@ import ( "github.com/rs/zerolog" - "github.com/onflow/flow-go/engine" + "github.com/sethvargo/go-retry" + "github.com/onflow/flow-go/engine/execution" "github.com/onflow/flow-go/module" - "github.com/onflow/flow-go/utils/logging" - - "github.com/sethvargo/go-retry" + "github.com/onflow/flow-go/module/component" + "github.com/onflow/flow-go/module/irrecoverable" ) type Uploader interface { @@ -26,74 +26,103 @@ func NewAsyncUploader(uploader Uploader, maxRetryNumber uint64, log zerolog.Logger, metrics module.ExecutionMetrics) *AsyncUploader { - return &AsyncUploader{ - unit: engine.NewUnit(), + a := &AsyncUploader{ uploader: uploader, log: log.With().Str("component", "block_data_uploader").Logger(), metrics: metrics, retryInitialTimeout: retryInitialTimeout, maxRetryNumber: maxRetryNumber, + // we use a channel rather than a Fifoqueue here because a Fifoqueue might drop items when full, + // but it is not acceptable to skip uploading an execution result + queue: make(chan *execution.ComputationResult, 20000), + } + builder := component.NewComponentManagerBuilder() + for i := 0; i < 10; i++ { + builder.AddWorker(a.UploadWorker) } + a.cm = builder.Build() + a.Component = a.cm + return a } // AsyncUploader wraps up another Uploader instance and make its upload asynchronous type AsyncUploader struct { - module.ReadyDoneAware - unit *engine.Unit uploader Uploader log zerolog.Logger metrics module.ExecutionMetrics retryInitialTimeout time.Duration maxRetryNumber uint64 onComplete OnCompleteFunc // callback function called after Upload is completed + queue chan *execution.ComputationResult + cm *component.ComponentManager + component.Component } -func (a *AsyncUploader) Ready() <-chan struct{} { - return a.unit.Ready() -} - -func (a *AsyncUploader) Done() <-chan struct{} { - return a.unit.Done() +// UploadWorker implements a component worker which asynchronously uploads computation results +// from the execution node (after a block is executed) to storage such as a GCP bucket or S3 bucket. +func (a *AsyncUploader) UploadWorker(ctx irrecoverable.SignalerContext, ready component.ReadyFunc) { + ready() + + done := ctx.Done() + for { + select { + case <-done: + return + case computationResult := <-a.queue: + a.UploadTask(ctx, computationResult) + } + } } func (a *AsyncUploader) SetOnCompleteCallback(onComplete OnCompleteFunc) { a.onComplete = onComplete } +// Upload adds the computation result to a queue to be processed asynchronously by workers, +// ensuring that multiple uploads can be run in parallel. +// No errors expected during normal operation. func (a *AsyncUploader) Upload(computationResult *execution.ComputationResult) error { + a.queue <- computationResult + return nil +} +// UploadTask implements retrying for uploading computation results. +// When the upload is complete, the callback will be called with the result (for example, +// to record that the upload was successful) and any error. +// No errors expected during normal operation. +func (a *AsyncUploader) UploadTask(ctx context.Context, computationResult *execution.ComputationResult) { backoff := retry.NewFibonacci(a.retryInitialTimeout) backoff = retry.WithMaxRetries(a.maxRetryNumber, backoff) - a.unit.Launch(func() { - a.metrics.ExecutionBlockDataUploadStarted() - start := time.Now() - - a.log.Debug().Msgf("computation result of block %s is being uploaded", - computationResult.ExecutableBlock.ID().String()) + a.metrics.ExecutionBlockDataUploadStarted() + start := time.Now() - err := retry.Do(a.unit.Ctx(), backoff, func(ctx context.Context) error { - err := a.uploader.Upload(computationResult) - if err != nil { - a.log.Warn().Err(err).Msg("error while uploading block data, retrying") - } - return retry.RetryableError(err) - }) + a.log.Debug().Msgf("computation result of block %s is being uploaded", + computationResult.ExecutableBlock.BlockID().String()) + err := retry.Do(ctx, backoff, func(ctx context.Context) error { + err := a.uploader.Upload(computationResult) if err != nil { - a.log.Error().Err(err). - Hex("block_id", logging.Entity(computationResult.ExecutableBlock)). - Msg("failed to upload block data") - } else { - a.log.Debug().Msgf("computation result of block %s was successfully uploaded", - computationResult.ExecutableBlock.ID().String()) + a.log.Warn().Err(err).Msg("error while uploading block data, retrying") } + return retry.RetryableError(err) + }) - a.metrics.ExecutionBlockDataUploadFinished(time.Since(start)) + // We only log upload errors here because the errors originate from an external cloud provider + // and the upload success is not critical to correct continued operation of the node + if err != nil { + blockID := computationResult.ExecutableBlock.BlockID() + a.log.Error().Err(err). + Hex("block_id", blockID[:]). + Msg("failed to upload block data") + } else { + a.log.Debug().Msgf("computation result of block %s was successfully uploaded", + computationResult.ExecutableBlock.BlockID().String()) + } - if a.onComplete != nil { - a.onComplete(computationResult, err) - } - }) - return nil + a.metrics.ExecutionBlockDataUploadFinished(time.Since(start)) + + if a.onComplete != nil { + a.onComplete(computationResult, err) + } } diff --git a/engine/execution/ingestion/uploader/uploader_test.go b/engine/execution/ingestion/uploader/uploader_test.go index a580c22b865..73a72b91507 100644 --- a/engine/execution/ingestion/uploader/uploader_test.go +++ b/engine/execution/ingestion/uploader/uploader_test.go @@ -2,6 +2,7 @@ package uploader import ( "bytes" + "context" "fmt" "runtime/debug" "sync" @@ -13,16 +14,17 @@ import ( "go.uber.org/atomic" "github.com/onflow/flow-go/engine/execution" - "github.com/onflow/flow-go/engine/execution/state/unittest" + exeunittest "github.com/onflow/flow-go/engine/execution/state/unittest" + "github.com/onflow/flow-go/module/irrecoverable" "github.com/onflow/flow-go/module/metrics" - testutils "github.com/onflow/flow-go/utils/unittest" - unittest2 "github.com/onflow/flow-go/utils/unittest" + "github.com/onflow/flow-go/utils/unittest" ) func Test_AsyncUploader(t *testing.T) { - computationResult := unittest.ComputationResultFixture( - testutils.IdentifierFixture(), + computationResult := exeunittest.ComputationResultFixture( + t, + unittest.IdentifierFixture(), nil) t.Run("uploads are run in parallel and emit metrics", func(t *testing.T) { @@ -45,6 +47,8 @@ func Test_AsyncUploader(t *testing.T) { metrics := &DummyCollector{} async := NewAsyncUploader(uploader, 1*time.Nanosecond, 1, zerolog.Nop(), metrics) + ctx, cancel := irrecoverable.NewMockSignalerContextWithCancel(t, context.Background()) + async.Start(ctx) err := async.Upload(computationResult) require.NoError(t, err) @@ -62,6 +66,8 @@ func Test_AsyncUploader(t *testing.T) { wgContinueUpload.Done() //release all // shut down component + cancel() + unittest.AssertClosesBefore(t, async.Done(), 1*time.Second, "async uploader did not finish in time") <-async.Done() require.Equal(t, int64(0), metrics.Counter.Load()) @@ -88,6 +94,9 @@ func Test_AsyncUploader(t *testing.T) { } async := NewAsyncUploader(uploader, 1*time.Nanosecond, 5, zerolog.Nop(), &metrics.NoopCollector{}) + ctx, cancel := irrecoverable.NewMockSignalerContextWithCancel(t, context.Background()) + async.Start(ctx) + defer cancel() err := async.Upload(computationResult) require.NoError(t, err) @@ -106,7 +115,7 @@ func Test_AsyncUploader(t *testing.T) { // 2. shut down async uploader right after upload initiated (not completed) // 3. assert that upload called only once even when trying to use retry mechanism t.Run("stopping component stops retrying", func(t *testing.T) { - testutils.SkipUnless(t, testutils.TEST_FLAKY, "flaky") + unittest.SkipUnless(t, unittest.TEST_FLAKY, "flaky") callCount := 0 t.Log("test started grID:", string(bytes.Fields(debug.Stack())[1])) @@ -150,6 +159,8 @@ func Test_AsyncUploader(t *testing.T) { } t.Log("about to create NewAsyncUploader grID:", string(bytes.Fields(debug.Stack())[1])) async := NewAsyncUploader(uploader, 1*time.Nanosecond, 5, zerolog.Nop(), &metrics.NoopCollector{}) + ctx, cancel := irrecoverable.NewMockSignalerContextWithCancel(t, context.Background()) + async.Start(ctx) t.Log("about to call async.Upload() grID:", string(bytes.Fields(debug.Stack())[1])) err := async.Upload(computationResult) // doesn't matter what we upload require.NoError(t, err) @@ -162,11 +173,11 @@ func Test_AsyncUploader(t *testing.T) { // stop component and check that it's fully stopped t.Log("about to initiate shutdown grID: ", string(bytes.Fields(debug.Stack())[1])) - c := async.Done() + cancel() t.Log("about to notify upload() that shutdown started and can continue uploading grID:", string(bytes.Fields(debug.Stack())[1])) wgShutdownStarted.Done() t.Log("about to check async done channel is closed grID:", string(bytes.Fields(debug.Stack())[1])) - unittest2.RequireCloseBefore(t, c, 1*time.Second, "async uploader not closed in time") + unittest.RequireCloseBefore(t, async.Done(), 1*time.Second, "async uploader not closed in time") t.Log("about to check if callCount is 1 grID:", string(bytes.Fields(debug.Stack())[1])) require.Equal(t, 1, callCount) @@ -189,12 +200,15 @@ func Test_AsyncUploader(t *testing.T) { async.SetOnCompleteCallback(func(computationResult *execution.ComputationResult, err error) { onCompleteCallbackCalled = true }) + ctx, cancel := irrecoverable.NewMockSignalerContextWithCancel(t, context.Background()) + async.Start(ctx) err := async.Upload(computationResult) require.NoError(t, err) - wgUploadCalleded.Wait() - <-async.Done() + unittest.AssertReturnsBefore(t, wgUploadCalleded.Wait, time.Second) + cancel() + unittest.AssertClosesBefore(t, async.Done(), 1*time.Second, "async uploader not done in time") require.True(t, onCompleteCallbackCalled) }) diff --git a/engine/execution/messages.go b/engine/execution/messages.go index 64763ff0a46..85f4401f230 100644 --- a/engine/execution/messages.go +++ b/engine/execution/messages.go @@ -5,13 +5,19 @@ import ( "github.com/onflow/flow-go/module/mempool/entity" ) +// ComputationResult captures artifacts of execution of block collections, collection attestation results and +// the full execution receipt, as sent by the Execution Node. +// CAUTION: This type is used to represent both a complete ComputationResult and a partially constructed ComputationResult. +// TODO: Consider using a Builder type to represent the partially constructed model. type ComputationResult struct { *BlockExecutionResult *BlockAttestationResult - *flow.ExecutionReceipt + ExecutionReceipt *flow.ExecutionReceipt } +// NewEmptyComputationResult creates an empty ComputationResult. +// Construction ComputationResult allowed only within the constructor. func NewEmptyComputationResult( block *entity.ExecutableBlock, ) *ComputationResult { diff --git a/engine/execution/mock/executed_finalized_wal.go b/engine/execution/mock/executed_finalized_wal.go new file mode 100644 index 00000000000..e1805e7b7f1 --- /dev/null +++ b/engine/execution/mock/executed_finalized_wal.go @@ -0,0 +1,95 @@ +// Code generated by mockery. DO NOT EDIT. + +package mock + +import ( + execution "github.com/onflow/flow-go/engine/execution" + flow "github.com/onflow/flow-go/model/flow" + + mock "github.com/stretchr/testify/mock" +) + +// ExecutedFinalizedWAL is an autogenerated mock type for the ExecutedFinalizedWAL type +type ExecutedFinalizedWAL struct { + mock.Mock +} + +// Append provides a mock function with given fields: height, registers +func (_m *ExecutedFinalizedWAL) Append(height uint64, registers flow.RegisterEntries) error { + ret := _m.Called(height, registers) + + if len(ret) == 0 { + panic("no return value specified for Append") + } + + var r0 error + if rf, ok := ret.Get(0).(func(uint64, flow.RegisterEntries) error); ok { + r0 = rf(height, registers) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// GetReader provides a mock function with given fields: height +func (_m *ExecutedFinalizedWAL) GetReader(height uint64) execution.WALReader { + ret := _m.Called(height) + + if len(ret) == 0 { + panic("no return value specified for GetReader") + } + + var r0 execution.WALReader + if rf, ok := ret.Get(0).(func(uint64) execution.WALReader); ok { + r0 = rf(height) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(execution.WALReader) + } + } + + return r0 +} + +// Latest provides a mock function with no fields +func (_m *ExecutedFinalizedWAL) Latest() (uint64, error) { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Latest") + } + + var r0 uint64 + var r1 error + if rf, ok := ret.Get(0).(func() (uint64, error)); ok { + return rf() + } + if rf, ok := ret.Get(0).(func() uint64); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(uint64) + } + + if rf, ok := ret.Get(1).(func() error); ok { + r1 = rf() + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// NewExecutedFinalizedWAL creates a new instance of ExecutedFinalizedWAL. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewExecutedFinalizedWAL(t interface { + mock.TestingT + Cleanup(func()) +}) *ExecutedFinalizedWAL { + mock := &ExecutedFinalizedWAL{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/engine/execution/mock/extendable_storage_snapshot.go b/engine/execution/mock/extendable_storage_snapshot.go new file mode 100644 index 00000000000..0a3a558bb4f --- /dev/null +++ b/engine/execution/mock/extendable_storage_snapshot.go @@ -0,0 +1,99 @@ +// Code generated by mockery. DO NOT EDIT. + +package mock + +import ( + execution "github.com/onflow/flow-go/engine/execution" + flow "github.com/onflow/flow-go/model/flow" + + mock "github.com/stretchr/testify/mock" +) + +// ExtendableStorageSnapshot is an autogenerated mock type for the ExtendableStorageSnapshot type +type ExtendableStorageSnapshot struct { + mock.Mock +} + +// Commitment provides a mock function with no fields +func (_m *ExtendableStorageSnapshot) Commitment() flow.StateCommitment { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Commitment") + } + + var r0 flow.StateCommitment + if rf, ok := ret.Get(0).(func() flow.StateCommitment); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(flow.StateCommitment) + } + } + + return r0 +} + +// Extend provides a mock function with given fields: newCommit, updatedRegisters +func (_m *ExtendableStorageSnapshot) Extend(newCommit flow.StateCommitment, updatedRegisters map[flow.RegisterID]flow.RegisterValue) execution.ExtendableStorageSnapshot { + ret := _m.Called(newCommit, updatedRegisters) + + if len(ret) == 0 { + panic("no return value specified for Extend") + } + + var r0 execution.ExtendableStorageSnapshot + if rf, ok := ret.Get(0).(func(flow.StateCommitment, map[flow.RegisterID]flow.RegisterValue) execution.ExtendableStorageSnapshot); ok { + r0 = rf(newCommit, updatedRegisters) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(execution.ExtendableStorageSnapshot) + } + } + + return r0 +} + +// Get provides a mock function with given fields: id +func (_m *ExtendableStorageSnapshot) Get(id flow.RegisterID) (flow.RegisterValue, error) { + ret := _m.Called(id) + + if len(ret) == 0 { + panic("no return value specified for Get") + } + + var r0 flow.RegisterValue + var r1 error + if rf, ok := ret.Get(0).(func(flow.RegisterID) (flow.RegisterValue, error)); ok { + return rf(id) + } + if rf, ok := ret.Get(0).(func(flow.RegisterID) flow.RegisterValue); ok { + r0 = rf(id) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(flow.RegisterValue) + } + } + + if rf, ok := ret.Get(1).(func(flow.RegisterID) error); ok { + r1 = rf(id) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// NewExtendableStorageSnapshot creates a new instance of ExtendableStorageSnapshot. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewExtendableStorageSnapshot(t interface { + mock.TestingT + Cleanup(func()) +}) *ExtendableStorageSnapshot { + mock := &ExtendableStorageSnapshot{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/engine/execution/mock/finalized_reader.go b/engine/execution/mock/finalized_reader.go new file mode 100644 index 00000000000..d1117d4dfc1 --- /dev/null +++ b/engine/execution/mock/finalized_reader.go @@ -0,0 +1,57 @@ +// Code generated by mockery. DO NOT EDIT. + +package mock + +import ( + flow "github.com/onflow/flow-go/model/flow" + mock "github.com/stretchr/testify/mock" +) + +// FinalizedReader is an autogenerated mock type for the FinalizedReader type +type FinalizedReader struct { + mock.Mock +} + +// FinalizedBlockIDAtHeight provides a mock function with given fields: height +func (_m *FinalizedReader) FinalizedBlockIDAtHeight(height uint64) (flow.Identifier, error) { + ret := _m.Called(height) + + if len(ret) == 0 { + panic("no return value specified for FinalizedBlockIDAtHeight") + } + + var r0 flow.Identifier + var r1 error + if rf, ok := ret.Get(0).(func(uint64) (flow.Identifier, error)); ok { + return rf(height) + } + if rf, ok := ret.Get(0).(func(uint64) flow.Identifier); ok { + r0 = rf(height) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(flow.Identifier) + } + } + + if rf, ok := ret.Get(1).(func(uint64) error); ok { + r1 = rf(height) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// NewFinalizedReader creates a new instance of FinalizedReader. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewFinalizedReader(t interface { + mock.TestingT + Cleanup(func()) +}) *FinalizedReader { + mock := &FinalizedReader{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/engine/execution/mock/in_memory_register_store.go b/engine/execution/mock/in_memory_register_store.go new file mode 100644 index 00000000000..40975612100 --- /dev/null +++ b/engine/execution/mock/in_memory_register_store.go @@ -0,0 +1,169 @@ +// Code generated by mockery. DO NOT EDIT. + +package mock + +import ( + flow "github.com/onflow/flow-go/model/flow" + mock "github.com/stretchr/testify/mock" +) + +// InMemoryRegisterStore is an autogenerated mock type for the InMemoryRegisterStore type +type InMemoryRegisterStore struct { + mock.Mock +} + +// GetRegister provides a mock function with given fields: height, blockID, register +func (_m *InMemoryRegisterStore) GetRegister(height uint64, blockID flow.Identifier, register flow.RegisterID) (flow.RegisterValue, error) { + ret := _m.Called(height, blockID, register) + + if len(ret) == 0 { + panic("no return value specified for GetRegister") + } + + var r0 flow.RegisterValue + var r1 error + if rf, ok := ret.Get(0).(func(uint64, flow.Identifier, flow.RegisterID) (flow.RegisterValue, error)); ok { + return rf(height, blockID, register) + } + if rf, ok := ret.Get(0).(func(uint64, flow.Identifier, flow.RegisterID) flow.RegisterValue); ok { + r0 = rf(height, blockID, register) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(flow.RegisterValue) + } + } + + if rf, ok := ret.Get(1).(func(uint64, flow.Identifier, flow.RegisterID) error); ok { + r1 = rf(height, blockID, register) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetUpdatedRegisters provides a mock function with given fields: height, blockID +func (_m *InMemoryRegisterStore) GetUpdatedRegisters(height uint64, blockID flow.Identifier) (flow.RegisterEntries, error) { + ret := _m.Called(height, blockID) + + if len(ret) == 0 { + panic("no return value specified for GetUpdatedRegisters") + } + + var r0 flow.RegisterEntries + var r1 error + if rf, ok := ret.Get(0).(func(uint64, flow.Identifier) (flow.RegisterEntries, error)); ok { + return rf(height, blockID) + } + if rf, ok := ret.Get(0).(func(uint64, flow.Identifier) flow.RegisterEntries); ok { + r0 = rf(height, blockID) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(flow.RegisterEntries) + } + } + + if rf, ok := ret.Get(1).(func(uint64, flow.Identifier) error); ok { + r1 = rf(height, blockID) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// IsBlockExecuted provides a mock function with given fields: height, blockID +func (_m *InMemoryRegisterStore) IsBlockExecuted(height uint64, blockID flow.Identifier) (bool, error) { + ret := _m.Called(height, blockID) + + if len(ret) == 0 { + panic("no return value specified for IsBlockExecuted") + } + + var r0 bool + var r1 error + if rf, ok := ret.Get(0).(func(uint64, flow.Identifier) (bool, error)); ok { + return rf(height, blockID) + } + if rf, ok := ret.Get(0).(func(uint64, flow.Identifier) bool); ok { + r0 = rf(height, blockID) + } else { + r0 = ret.Get(0).(bool) + } + + if rf, ok := ret.Get(1).(func(uint64, flow.Identifier) error); ok { + r1 = rf(height, blockID) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Prune provides a mock function with given fields: finalizedHeight, finalizedBlockID +func (_m *InMemoryRegisterStore) Prune(finalizedHeight uint64, finalizedBlockID flow.Identifier) error { + ret := _m.Called(finalizedHeight, finalizedBlockID) + + if len(ret) == 0 { + panic("no return value specified for Prune") + } + + var r0 error + if rf, ok := ret.Get(0).(func(uint64, flow.Identifier) error); ok { + r0 = rf(finalizedHeight, finalizedBlockID) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// PrunedHeight provides a mock function with no fields +func (_m *InMemoryRegisterStore) PrunedHeight() uint64 { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for PrunedHeight") + } + + var r0 uint64 + if rf, ok := ret.Get(0).(func() uint64); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(uint64) + } + + return r0 +} + +// SaveRegisters provides a mock function with given fields: height, blockID, parentID, registers +func (_m *InMemoryRegisterStore) SaveRegisters(height uint64, blockID flow.Identifier, parentID flow.Identifier, registers flow.RegisterEntries) error { + ret := _m.Called(height, blockID, parentID, registers) + + if len(ret) == 0 { + panic("no return value specified for SaveRegisters") + } + + var r0 error + if rf, ok := ret.Get(0).(func(uint64, flow.Identifier, flow.Identifier, flow.RegisterEntries) error); ok { + r0 = rf(height, blockID, parentID, registers) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// NewInMemoryRegisterStore creates a new instance of InMemoryRegisterStore. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewInMemoryRegisterStore(t interface { + mock.TestingT + Cleanup(func()) +}) *InMemoryRegisterStore { + mock := &InMemoryRegisterStore{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/engine/execution/mock/on_disk_register_store.go b/engine/execution/mock/on_disk_register_store.go new file mode 100644 index 00000000000..01e7bab3b53 --- /dev/null +++ b/engine/execution/mock/on_disk_register_store.go @@ -0,0 +1,111 @@ +// Code generated by mockery. DO NOT EDIT. + +package mock + +import ( + flow "github.com/onflow/flow-go/model/flow" + mock "github.com/stretchr/testify/mock" +) + +// OnDiskRegisterStore is an autogenerated mock type for the OnDiskRegisterStore type +type OnDiskRegisterStore struct { + mock.Mock +} + +// FirstHeight provides a mock function with no fields +func (_m *OnDiskRegisterStore) FirstHeight() uint64 { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for FirstHeight") + } + + var r0 uint64 + if rf, ok := ret.Get(0).(func() uint64); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(uint64) + } + + return r0 +} + +// Get provides a mock function with given fields: ID, height +func (_m *OnDiskRegisterStore) Get(ID flow.RegisterID, height uint64) (flow.RegisterValue, error) { + ret := _m.Called(ID, height) + + if len(ret) == 0 { + panic("no return value specified for Get") + } + + var r0 flow.RegisterValue + var r1 error + if rf, ok := ret.Get(0).(func(flow.RegisterID, uint64) (flow.RegisterValue, error)); ok { + return rf(ID, height) + } + if rf, ok := ret.Get(0).(func(flow.RegisterID, uint64) flow.RegisterValue); ok { + r0 = rf(ID, height) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(flow.RegisterValue) + } + } + + if rf, ok := ret.Get(1).(func(flow.RegisterID, uint64) error); ok { + r1 = rf(ID, height) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// LatestHeight provides a mock function with no fields +func (_m *OnDiskRegisterStore) LatestHeight() uint64 { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for LatestHeight") + } + + var r0 uint64 + if rf, ok := ret.Get(0).(func() uint64); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(uint64) + } + + return r0 +} + +// Store provides a mock function with given fields: entries, height +func (_m *OnDiskRegisterStore) Store(entries flow.RegisterEntries, height uint64) error { + ret := _m.Called(entries, height) + + if len(ret) == 0 { + panic("no return value specified for Store") + } + + var r0 error + if rf, ok := ret.Get(0).(func(flow.RegisterEntries, uint64) error); ok { + r0 = rf(entries, height) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// NewOnDiskRegisterStore creates a new instance of OnDiskRegisterStore. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewOnDiskRegisterStore(t interface { + mock.TestingT + Cleanup(func()) +}) *OnDiskRegisterStore { + mock := &OnDiskRegisterStore{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/engine/execution/mock/register_store.go b/engine/execution/mock/register_store.go new file mode 100644 index 00000000000..3df9af78fd2 --- /dev/null +++ b/engine/execution/mock/register_store.go @@ -0,0 +1,139 @@ +// Code generated by mockery. DO NOT EDIT. + +package mock + +import ( + flow "github.com/onflow/flow-go/model/flow" + mock "github.com/stretchr/testify/mock" +) + +// RegisterStore is an autogenerated mock type for the RegisterStore type +type RegisterStore struct { + mock.Mock +} + +// GetRegister provides a mock function with given fields: height, blockID, register +func (_m *RegisterStore) GetRegister(height uint64, blockID flow.Identifier, register flow.RegisterID) (flow.RegisterValue, error) { + ret := _m.Called(height, blockID, register) + + if len(ret) == 0 { + panic("no return value specified for GetRegister") + } + + var r0 flow.RegisterValue + var r1 error + if rf, ok := ret.Get(0).(func(uint64, flow.Identifier, flow.RegisterID) (flow.RegisterValue, error)); ok { + return rf(height, blockID, register) + } + if rf, ok := ret.Get(0).(func(uint64, flow.Identifier, flow.RegisterID) flow.RegisterValue); ok { + r0 = rf(height, blockID, register) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(flow.RegisterValue) + } + } + + if rf, ok := ret.Get(1).(func(uint64, flow.Identifier, flow.RegisterID) error); ok { + r1 = rf(height, blockID, register) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// IsBlockExecuted provides a mock function with given fields: height, blockID +func (_m *RegisterStore) IsBlockExecuted(height uint64, blockID flow.Identifier) (bool, error) { + ret := _m.Called(height, blockID) + + if len(ret) == 0 { + panic("no return value specified for IsBlockExecuted") + } + + var r0 bool + var r1 error + if rf, ok := ret.Get(0).(func(uint64, flow.Identifier) (bool, error)); ok { + return rf(height, blockID) + } + if rf, ok := ret.Get(0).(func(uint64, flow.Identifier) bool); ok { + r0 = rf(height, blockID) + } else { + r0 = ret.Get(0).(bool) + } + + if rf, ok := ret.Get(1).(func(uint64, flow.Identifier) error); ok { + r1 = rf(height, blockID) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// LastFinalizedAndExecutedHeight provides a mock function with no fields +func (_m *RegisterStore) LastFinalizedAndExecutedHeight() uint64 { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for LastFinalizedAndExecutedHeight") + } + + var r0 uint64 + if rf, ok := ret.Get(0).(func() uint64); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(uint64) + } + + return r0 +} + +// OnBlockFinalized provides a mock function with no fields +func (_m *RegisterStore) OnBlockFinalized() error { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for OnBlockFinalized") + } + + var r0 error + if rf, ok := ret.Get(0).(func() error); ok { + r0 = rf() + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// SaveRegisters provides a mock function with given fields: header, registers +func (_m *RegisterStore) SaveRegisters(header *flow.Header, registers flow.RegisterEntries) error { + ret := _m.Called(header, registers) + + if len(ret) == 0 { + panic("no return value specified for SaveRegisters") + } + + var r0 error + if rf, ok := ret.Get(0).(func(*flow.Header, flow.RegisterEntries) error); ok { + r0 = rf(header, registers) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// NewRegisterStore creates a new instance of RegisterStore. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewRegisterStore(t interface { + mock.TestingT + Cleanup(func()) +}) *RegisterStore { + mock := &RegisterStore{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/engine/execution/mock/register_store_notifier.go b/engine/execution/mock/register_store_notifier.go new file mode 100644 index 00000000000..1d50c4c01d0 --- /dev/null +++ b/engine/execution/mock/register_store_notifier.go @@ -0,0 +1,29 @@ +// Code generated by mockery. DO NOT EDIT. + +package mock + +import mock "github.com/stretchr/testify/mock" + +// RegisterStoreNotifier is an autogenerated mock type for the RegisterStoreNotifier type +type RegisterStoreNotifier struct { + mock.Mock +} + +// OnFinalizedAndExecutedHeightUpdated provides a mock function with given fields: height +func (_m *RegisterStoreNotifier) OnFinalizedAndExecutedHeightUpdated(height uint64) { + _m.Called(height) +} + +// NewRegisterStoreNotifier creates a new instance of RegisterStoreNotifier. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewRegisterStoreNotifier(t interface { + mock.TestingT + Cleanup(func()) +}) *RegisterStoreNotifier { + mock := &RegisterStoreNotifier{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/engine/execution/mock/script_executor.go b/engine/execution/mock/script_executor.go new file mode 100644 index 00000000000..ce31e1adb2d --- /dev/null +++ b/engine/execution/mock/script_executor.go @@ -0,0 +1,127 @@ +// Code generated by mockery. DO NOT EDIT. + +package mock + +import ( + context "context" + + flow "github.com/onflow/flow-go/model/flow" + + mock "github.com/stretchr/testify/mock" +) + +// ScriptExecutor is an autogenerated mock type for the ScriptExecutor type +type ScriptExecutor struct { + mock.Mock +} + +// ExecuteScriptAtBlockID provides a mock function with given fields: ctx, script, arguments, blockID +func (_m *ScriptExecutor) ExecuteScriptAtBlockID(ctx context.Context, script []byte, arguments [][]byte, blockID flow.Identifier) ([]byte, uint64, error) { + ret := _m.Called(ctx, script, arguments, blockID) + + if len(ret) == 0 { + panic("no return value specified for ExecuteScriptAtBlockID") + } + + var r0 []byte + var r1 uint64 + var r2 error + if rf, ok := ret.Get(0).(func(context.Context, []byte, [][]byte, flow.Identifier) ([]byte, uint64, error)); ok { + return rf(ctx, script, arguments, blockID) + } + if rf, ok := ret.Get(0).(func(context.Context, []byte, [][]byte, flow.Identifier) []byte); ok { + r0 = rf(ctx, script, arguments, blockID) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]byte) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, []byte, [][]byte, flow.Identifier) uint64); ok { + r1 = rf(ctx, script, arguments, blockID) + } else { + r1 = ret.Get(1).(uint64) + } + + if rf, ok := ret.Get(2).(func(context.Context, []byte, [][]byte, flow.Identifier) error); ok { + r2 = rf(ctx, script, arguments, blockID) + } else { + r2 = ret.Error(2) + } + + return r0, r1, r2 +} + +// GetAccount provides a mock function with given fields: ctx, address, blockID +func (_m *ScriptExecutor) GetAccount(ctx context.Context, address flow.Address, blockID flow.Identifier) (*flow.Account, error) { + ret := _m.Called(ctx, address, blockID) + + if len(ret) == 0 { + panic("no return value specified for GetAccount") + } + + var r0 *flow.Account + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, flow.Address, flow.Identifier) (*flow.Account, error)); ok { + return rf(ctx, address, blockID) + } + if rf, ok := ret.Get(0).(func(context.Context, flow.Address, flow.Identifier) *flow.Account); ok { + r0 = rf(ctx, address, blockID) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*flow.Account) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, flow.Address, flow.Identifier) error); ok { + r1 = rf(ctx, address, blockID) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetRegisterAtBlockID provides a mock function with given fields: ctx, owner, key, blockID +func (_m *ScriptExecutor) GetRegisterAtBlockID(ctx context.Context, owner []byte, key []byte, blockID flow.Identifier) ([]byte, error) { + ret := _m.Called(ctx, owner, key, blockID) + + if len(ret) == 0 { + panic("no return value specified for GetRegisterAtBlockID") + } + + var r0 []byte + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, []byte, []byte, flow.Identifier) ([]byte, error)); ok { + return rf(ctx, owner, key, blockID) + } + if rf, ok := ret.Get(0).(func(context.Context, []byte, []byte, flow.Identifier) []byte); ok { + r0 = rf(ctx, owner, key, blockID) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]byte) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, []byte, []byte, flow.Identifier) error); ok { + r1 = rf(ctx, owner, key, blockID) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// NewScriptExecutor creates a new instance of ScriptExecutor. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewScriptExecutor(t interface { + mock.TestingT + Cleanup(func()) +}) *ScriptExecutor { + mock := &ScriptExecutor{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/engine/execution/mock/wal_reader.go b/engine/execution/mock/wal_reader.go new file mode 100644 index 00000000000..9cc87a5401a --- /dev/null +++ b/engine/execution/mock/wal_reader.go @@ -0,0 +1,64 @@ +// Code generated by mockery. DO NOT EDIT. + +package mock + +import ( + flow "github.com/onflow/flow-go/model/flow" + mock "github.com/stretchr/testify/mock" +) + +// WALReader is an autogenerated mock type for the WALReader type +type WALReader struct { + mock.Mock +} + +// Next provides a mock function with no fields +func (_m *WALReader) Next() (uint64, flow.RegisterEntries, error) { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Next") + } + + var r0 uint64 + var r1 flow.RegisterEntries + var r2 error + if rf, ok := ret.Get(0).(func() (uint64, flow.RegisterEntries, error)); ok { + return rf() + } + if rf, ok := ret.Get(0).(func() uint64); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(uint64) + } + + if rf, ok := ret.Get(1).(func() flow.RegisterEntries); ok { + r1 = rf() + } else { + if ret.Get(1) != nil { + r1 = ret.Get(1).(flow.RegisterEntries) + } + } + + if rf, ok := ret.Get(2).(func() error); ok { + r2 = rf() + } else { + r2 = ret.Error(2) + } + + return r0, r1, r2 +} + +// NewWALReader creates a new instance of WALReader. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewWALReader(t interface { + mock.TestingT + Cleanup(func()) +}) *WALReader { + mock := &WALReader{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/engine/execution/provider/engine.go b/engine/execution/provider/engine.go index 2b1b94a1620..1bdab991ed1 100644 --- a/engine/execution/provider/engine.go +++ b/engine/execution/provider/engine.go @@ -4,7 +4,6 @@ import ( "context" "errors" "fmt" - "math/rand" "time" "github.com/rs/zerolog" @@ -25,11 +24,28 @@ import ( "github.com/onflow/flow-go/state/protocol" "github.com/onflow/flow-go/storage" "github.com/onflow/flow-go/utils/logging" + "github.com/onflow/flow-go/utils/rand" ) type ProviderEngine interface { network.MessageProcessor - BroadcastExecutionReceipt(context.Context, *flow.ExecutionReceipt) error + module.ReadyDoneAware + // BroadcastExecutionReceipt broadcasts an execution receipt to all nodes in the network. + // It skips broadcasting the receipt if the block is sealed, or the node is not authorized at the block. + // It returns true if the receipt is broadcasted, false otherwise. + BroadcastExecutionReceipt(context.Context, uint64, *flow.ExecutionReceipt) (bool, error) +} + +type NoopEngine struct { + module.NoopReadyDoneAware +} + +func (*NoopEngine) Process(channel channels.Channel, originID flow.Identifier, message interface{}) error { + return nil +} + +func (*NoopEngine) BroadcastExecutionReceipt(context.Context, uint64, *flow.ExecutionReceipt) (bool, error) { + return false, nil } const ( @@ -43,6 +59,8 @@ const ( DefaultChunkDataPackDeliveryTimeout = 10 * time.Second ) +var _ ProviderEngine = (*Engine)(nil) + // An Engine provides means of accessing data about execution state and broadcasts execution receipts to nodes in the network. // Also generates and saves execution receipts type Engine struct { @@ -73,7 +91,7 @@ type Engine struct { func New( logger zerolog.Logger, tracer module.Tracer, - net network.Network, + net network.EngineRegistry, state protocol.State, execState state.ReadOnlyExecutionState, metrics module.ExecutionMetrics, @@ -91,10 +109,10 @@ func New( engine.NewNotifier(), engine.Pattern{ // Match is called on every new message coming to this engine. - // Provider enigne only expects ChunkDataRequests. + // Provider engine only expects ChunkDataRequests. // Other message types are discarded by Match. Match: func(message *engine.Message) bool { - chdpReq, ok := message.Payload.(*messages.ChunkDataRequest) + chdpReq, ok := message.Payload.(*flow.ChunkDataRequest) if ok { log.Info(). Hex("chunk_id", logging.ID(chdpReq.ChunkID)). @@ -107,7 +125,7 @@ func New( // ChunkDataRequests. // It replaces the payload of message with requested chunk id. Map: func(message *engine.Message) (*engine.Message, bool) { - chdpReq := message.Payload.(*messages.ChunkDataRequest) + chdpReq := message.Payload.(*flow.ChunkDataRequest) return &engine.Message{ OriginID: message.OriginID, Payload: chdpReq.ChunkID, @@ -166,7 +184,7 @@ func (e *Engine) processQueuedChunkDataPackRequestsShovelerWorker(ctx irrecovera select { case <-e.chdpRequestHandler.GetNotifier(): // there is at list a single chunk data pack request queued up. - e.processAvailableMesssages(ctx) + e.processAvailableMessages(ctx) case <-ctx.Done(): // close the internal channel, the workers will drain the channel before exiting close(e.chdpRequestChannel) @@ -179,7 +197,7 @@ func (e *Engine) processQueuedChunkDataPackRequestsShovelerWorker(ctx irrecovera // processAvailableMesssages is a blocking method that reads all queued ChunkDataRequests till the queue gets empty. // Each ChunkDataRequest is processed by a single concurrent worker. However, there are limited number of such workers. // If there is no worker available for a request, the method blocks till one is available. -func (e *Engine) processAvailableMesssages(ctx irrecoverable.SignalerContext) { +func (e *Engine) processAvailableMessages(ctx irrecoverable.SignalerContext) { for { select { case <-ctx.Done(): @@ -199,7 +217,6 @@ func (e *Engine) processAvailableMesssages(ctx irrecoverable.SignalerContext) { // if it does happen, it means there is a bug in the queue implementation. ctx.Throw(fmt.Errorf("invalid chunk id type in chunk data pack request queue: %T", msg.Payload)) } - request := &mempool.ChunkDataPackRequest{ RequesterId: msg.OriginID, ChunkId: chunkId, @@ -311,12 +328,24 @@ func (e *Engine) deliverChunkDataResponse(chunkDataPack *flow.ChunkDataPack, req // sends requested chunk data pack to the requester deliveryStartTime := time.Now() + nonce, err := rand.Uint64() + if err != nil { + // TODO: this error should be returned by deliverChunkDataResponse + // it is logged for now since the only error possible is related to a failure + // of the system entropy generation. Such error is going to cause failures in other + // components where it's handled properly and will lead to crashing the module. + lg.Error(). + Err(err). + Msg("could not generate nonce for chunk data response") + return + } + response := &messages.ChunkDataResponse{ - ChunkDataPack: *chunkDataPack, - Nonce: rand.Uint64(), + ChunkDataPack: flow.UntrustedChunkDataPack(*chunkDataPack), + Nonce: nonce, } - err := e.chunksConduit.Unicast(response, requesterId) + err = e.chunksConduit.Unicast(response, requesterId) if err != nil { lg.Warn(). Err(err). @@ -342,10 +371,36 @@ func (e *Engine) deliverChunkDataResponse(chunkDataPack *flow.ChunkDataPack, req lg.Info().Msg("chunk data pack request successfully replied") } -func (e *Engine) BroadcastExecutionReceipt(ctx context.Context, receipt *flow.ExecutionReceipt) error { +// BroadcastExecutionReceipt broadcasts an execution receipt to all nodes in the network. +// It skips broadcasting the receipt if the block is sealed, or the node is not authorized at the block. +// It returns true if the receipt is broadcasted, false otherwise. +func (e *Engine) BroadcastExecutionReceipt(ctx context.Context, height uint64, receipt *flow.ExecutionReceipt) (bool, error) { + // if the receipt is for a sealed block, then no need to broadcast it. + lastSealed, err := e.state.Sealed().Head() + if err != nil { + return false, fmt.Errorf("could not get sealed block before broadcasting: %w", err) + } + + isExecutedBlockSealed := height <= lastSealed.Height + + if isExecutedBlockSealed { + // no need to braodcast the receipt if the block is sealed + return false, nil + } + + blockID := receipt.ExecutionResult.BlockID + authorizedAtBlock, err := e.checkAuthorizedAtBlock(blockID) + if err != nil { + return false, fmt.Errorf("could not check staking status: %w", err) + } + + if !authorizedAtBlock { + return false, nil + } + finalState, err := receipt.ExecutionResult.FinalStateCommitment() if err != nil { - return fmt.Errorf("could not get final state: %w", err) + return false, fmt.Errorf("could not get final state: %w", err) } span, _ := e.tracer.StartSpanFromContext(ctx, trace.EXEBroadcastExecutionReceipt) @@ -357,16 +412,16 @@ func (e *Engine) BroadcastExecutionReceipt(ctx context.Context, receipt *flow.Ex Hex("final_state", finalState[:]). Msg("broadcasting execution receipt") - identities, err := e.state.Final().Identities(filter.HasRole(flow.RoleAccess, flow.RoleConsensus, + identities, err := e.state.Final().Identities(filter.HasRole[flow.Identity](flow.RoleAccess, flow.RoleConsensus, flow.RoleVerification)) if err != nil { - return fmt.Errorf("could not get consensus and verification identities: %w", err) + return false, fmt.Errorf("could not get consensus and verification identities: %w", err) } - err = e.receiptCon.Publish(receipt, identities.NodeIDs()...) + err = e.receiptCon.Publish((*messages.ExecutionReceipt)(receipt), identities.NodeIDs()...) if err != nil { - return fmt.Errorf("could not submit execution receipts: %w", err) + return false, fmt.Errorf("could not submit execution receipts: %w", err) } - return nil + return true, nil } diff --git a/engine/execution/provider/engine_test.go b/engine/execution/provider/engine_test.go index d47f4b0ccae..d1c441521cf 100644 --- a/engine/execution/provider/engine_test.go +++ b/engine/execution/provider/engine_test.go @@ -20,7 +20,7 @@ import ( "github.com/onflow/flow-go/module/metrics" "github.com/onflow/flow-go/module/trace" "github.com/onflow/flow-go/network/channels" - "github.com/onflow/flow-go/network/mocknetwork" + mocknetwork "github.com/onflow/flow-go/network/mock" mockprotocol "github.com/onflow/flow-go/state/protocol/mock" "github.com/onflow/flow-go/utils/unittest" ) @@ -28,51 +28,34 @@ import ( func TestProviderEngine_onChunkDataRequest(t *testing.T) { t.Run("non-existent chunk", func(t *testing.T) { - ps := mockprotocol.NewState(t) - net := mocknetwork.NewNetwork(t) + net := mocknetwork.NewEngineRegistry(t) chunkConduit := mocknetwork.NewConduit(t) - execState := state.NewExecutionState(t) - net.On("Register", channels.PushReceipts, mock.Anything).Return(&mocknetwork.Conduit{}, nil) net.On("Register", channels.ProvideChunks, mock.Anything).Return(chunkConduit, nil) + e, _, es, requestQueue := newTestEngine(t, net, true) - execState.On("ChunkDataPackByChunkID", mock.Anything).Return(nil, errors.New("not found!")) - requestQueue := queue.NewHeroStore(10, unittest.Logger(), metrics.NewNoopCollector()) - - e, err := New( - unittest.Logger(), - trace.NewNoopTracer(), - net, - ps, - execState, - metrics.NewNoopCollector(), - func(_ flow.Identifier) (bool, error) { return true, nil }, - requestQueue, - DefaultChunkDataPackRequestWorker, - DefaultChunkDataPackQueryTimeout, - DefaultChunkDataPackDeliveryTimeout) - require.NoError(t, err) + es.On("ChunkDataPackByChunkID", mock.Anything).Return(nil, errors.New("not found!")) originIdentity := unittest.IdentityFixture(unittest.WithRole(flow.RoleVerification)) chunkID := unittest.IdentifierFixture() - req := &messages.ChunkDataRequest{ + req := &flow.ChunkDataRequest{ ChunkID: chunkID, Nonce: rand.Uint64(), } cancelCtx, cancel := context.WithCancel(context.Background()) defer cancel() - ctx, _ := irrecoverable.WithSignaler(cancelCtx) + ctx := irrecoverable.NewMockSignalerContext(t, cancelCtx) e.Start(ctx) // submit using non-existing origin ID unittest.RequireCloseBefore(t, e.Ready(), 100*time.Millisecond, "could not start engine") require.NoError(t, e.Process(channels.RequestChunks, originIdentity.NodeID, req)) require.Eventually(t, func() bool { - _, ok := requestQueue.Get() // ensuring all requests have been picked up from the queue. - return !ok + empty := requestQueue.Size() == 0 // ensuring all requests have been picked up from the queue. + return empty }, 1*time.Second, 10*time.Millisecond) cancel() @@ -83,38 +66,17 @@ func TestProviderEngine_onChunkDataRequest(t *testing.T) { }) t.Run("success", func(t *testing.T) { - ps := new(mockprotocol.State) - ss := new(mockprotocol.Snapshot) - net := new(mocknetwork.Network) + net := mocknetwork.NewEngineRegistry(t) chunkConduit := &mocknetwork.Conduit{} - execState := new(state.ExecutionState) - net.On("Register", channels.PushReceipts, mock.Anything).Return(&mocknetwork.Conduit{}, nil) net.On("Register", channels.ProvideChunks, mock.Anything).Return(chunkConduit, nil) - requestQueue := queue.NewHeroStore(10, unittest.Logger(), metrics.NewNoopCollector()) - - e, err := New( - unittest.Logger(), - trace.NewNoopTracer(), - net, - ps, - execState, - metrics.NewNoopCollector(), - func(_ flow.Identifier) (bool, error) { return true, nil }, - requestQueue, - DefaultChunkDataPackRequestWorker, - DefaultChunkDataPackQueryTimeout, - DefaultChunkDataPackDeliveryTimeout) - require.NoError(t, err) + e, _, es, requestQueue := newTestEngine(t, net, true) originIdentity := unittest.IdentityFixture(unittest.WithRole(flow.RoleVerification)) chunkID := unittest.IdentifierFixture() chunkDataPack := unittest.ChunkDataPackFixture(chunkID) - blockID := unittest.IdentifierFixture() - ps.On("AtBlockID", blockID).Return(ss).Once() - ss.On("Identity", originIdentity.NodeID).Return(originIdentity, nil) chunkConduit.On("Unicast", mock.Anything, originIdentity.NodeID). Run(func(args mock.Arguments) { res, ok := args[0].(*messages.ChunkDataResponse) @@ -125,24 +87,24 @@ func TestProviderEngine_onChunkDataRequest(t *testing.T) { }). Return(nil) - execState.On("ChunkDataPackByChunkID", chunkID).Return(chunkDataPack, nil) + es.On("ChunkDataPackByChunkID", chunkID).Return(chunkDataPack, nil) - req := &messages.ChunkDataRequest{ + req := &flow.ChunkDataRequest{ ChunkID: chunkID, Nonce: rand.Uint64(), } cancelCtx, cancel := context.WithCancel(context.Background()) defer cancel() - ctx, _ := irrecoverable.WithSignaler(cancelCtx) + ctx := irrecoverable.NewMockSignalerContext(t, cancelCtx) e.Start(ctx) // submit using non-existing origin ID unittest.RequireCloseBefore(t, e.Ready(), 100*time.Millisecond, "could not start engine") require.NoError(t, e.Process(channels.RequestChunks, originIdentity.NodeID, req)) require.Eventually(t, func() bool { - _, ok := requestQueue.Get() // ensuring all requests have been picked up from the queue. - return !ok + empty := requestQueue.Size() == 0 // ensuring all requests have been picked up from the queue. + return empty }, 1*time.Second, 10*time.Millisecond) cancel() @@ -150,3 +112,93 @@ func TestProviderEngine_onChunkDataRequest(t *testing.T) { }) } + +func TestProviderEngine_BroadcastExecutionReceipt(t *testing.T) { + // prepare + net := mocknetwork.NewEngineRegistry(t) + chunkConduit := mocknetwork.NewConduit(t) + receiptConduit := mocknetwork.NewConduit(t) + net.On("Register", channels.PushReceipts, mock.Anything).Return(receiptConduit, nil) + net.On("Register", channels.ProvideChunks, mock.Anything).Return(chunkConduit, nil) + e, ps, _, _ := newTestEngine(t, net, true) + + sealedBlock := unittest.BlockHeaderFixture() + sealed := new(mockprotocol.Snapshot) + sealed.On("Head").Return(sealedBlock, nil) + ps.On("Sealed").Return(sealed) + sealedHeight := sealedBlock.Height + + receivers := unittest.IdentityListFixture(1) + snap := new(mockprotocol.Snapshot) + snap.On("Identities", mock.Anything).Return(receivers, nil) + ps.On("Final").Return(snap) + + // verify that above the sealed height will be broadcasted + receipt1 := unittest.ExecutionReceiptFixture() + receipt1Msg := (*messages.ExecutionReceipt)(receipt1) + receiptConduit.On("Publish", receipt1Msg, receivers.NodeIDs()[0]).Return(nil) + + broadcasted, err := e.BroadcastExecutionReceipt(context.Background(), sealedHeight+1, receipt1) + require.NoError(t, err) + require.True(t, broadcasted) + + // verify that equal the sealed height will NOT be broadcasted + receipt2 := unittest.ExecutionReceiptFixture() + broadcasted, err = e.BroadcastExecutionReceipt(context.Background(), sealedHeight, receipt2) + require.NoError(t, err) + require.False(t, broadcasted) + + // verify that below the sealed height will NOT be broadcasted + broadcasted, err = e.BroadcastExecutionReceipt(context.Background(), sealedHeight-1, receipt2) + require.NoError(t, err) + require.False(t, broadcasted) +} + +func TestProviderEngine_BroadcastExecutionUnauthorized(t *testing.T) { + net := mocknetwork.NewEngineRegistry(t) + chunkConduit := mocknetwork.NewConduit(t) + receiptConduit := mocknetwork.NewConduit(t) + net.On("Register", channels.PushReceipts, mock.Anything).Return(receiptConduit, nil) + net.On("Register", channels.ProvideChunks, mock.Anything).Return(chunkConduit, nil) + // make sure the node is not authorized for broadcasting + authorized := false + e, ps, _, _ := newTestEngine(t, net, authorized) + + sealedBlock := unittest.BlockHeaderFixture() + sealed := mockprotocol.NewSnapshot(t) + sealed.On("Head").Return(sealedBlock, nil) + ps.On("Sealed").Return(sealed) + sealedHeight := sealedBlock.Height + + // verify that unstaked node will NOT broadcast + receipt2 := unittest.ExecutionReceiptFixture() + broadcasted, err := e.BroadcastExecutionReceipt(context.Background(), sealedHeight+1, receipt2) + require.NoError(t, err) + require.False(t, broadcasted) +} + +func newTestEngine(t *testing.T, net *mocknetwork.EngineRegistry, authorized bool) ( + *Engine, + *mockprotocol.State, + *state.ExecutionState, + *queue.HeroStore, +) { + ps := mockprotocol.NewState(t) + execState := state.NewExecutionState(t) + requestQueue := queue.NewHeroStore(10, unittest.Logger(), metrics.NewNoopCollector()) + + e, err := New( + unittest.Logger(), + trace.NewNoopTracer(), + net, + ps, + execState, + metrics.NewNoopCollector(), + func(_ flow.Identifier) (bool, error) { return authorized, nil }, + requestQueue, + DefaultChunkDataPackRequestWorker, + DefaultChunkDataPackQueryTimeout, + DefaultChunkDataPackDeliveryTimeout) + require.NoError(t, err) + return e, ps, execState, requestQueue +} diff --git a/engine/execution/provider/mock/provider_engine.go b/engine/execution/provider/mock/provider_engine.go index 85d6cba1447..559292675b6 100644 --- a/engine/execution/provider/mock/provider_engine.go +++ b/engine/execution/provider/mock/provider_engine.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mock @@ -17,15 +17,49 @@ type ProviderEngine struct { mock.Mock } -// BroadcastExecutionReceipt provides a mock function with given fields: _a0, _a1 -func (_m *ProviderEngine) BroadcastExecutionReceipt(_a0 context.Context, _a1 *flow.ExecutionReceipt) error { - ret := _m.Called(_a0, _a1) +// BroadcastExecutionReceipt provides a mock function with given fields: _a0, _a1, _a2 +func (_m *ProviderEngine) BroadcastExecutionReceipt(_a0 context.Context, _a1 uint64, _a2 *flow.ExecutionReceipt) (bool, error) { + ret := _m.Called(_a0, _a1, _a2) - var r0 error - if rf, ok := ret.Get(0).(func(context.Context, *flow.ExecutionReceipt) error); ok { - r0 = rf(_a0, _a1) + if len(ret) == 0 { + panic("no return value specified for BroadcastExecutionReceipt") + } + + var r0 bool + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, uint64, *flow.ExecutionReceipt) (bool, error)); ok { + return rf(_a0, _a1, _a2) + } + if rf, ok := ret.Get(0).(func(context.Context, uint64, *flow.ExecutionReceipt) bool); ok { + r0 = rf(_a0, _a1, _a2) } else { - r0 = ret.Error(0) + r0 = ret.Get(0).(bool) + } + + if rf, ok := ret.Get(1).(func(context.Context, uint64, *flow.ExecutionReceipt) error); ok { + r1 = rf(_a0, _a1, _a2) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Done provides a mock function with no fields +func (_m *ProviderEngine) Done() <-chan struct{} { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Done") + } + + var r0 <-chan struct{} + if rf, ok := ret.Get(0).(func() <-chan struct{}); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(<-chan struct{}) + } } return r0 @@ -35,6 +69,10 @@ func (_m *ProviderEngine) BroadcastExecutionReceipt(_a0 context.Context, _a1 *fl func (_m *ProviderEngine) Process(channel channels.Channel, originID flow.Identifier, message interface{}) error { ret := _m.Called(channel, originID, message) + if len(ret) == 0 { + panic("no return value specified for Process") + } + var r0 error if rf, ok := ret.Get(0).(func(channels.Channel, flow.Identifier, interface{}) error); ok { r0 = rf(channel, originID, message) @@ -45,13 +83,32 @@ func (_m *ProviderEngine) Process(channel channels.Channel, originID flow.Identi return r0 } -type mockConstructorTestingTNewProviderEngine interface { - mock.TestingT - Cleanup(func()) +// Ready provides a mock function with no fields +func (_m *ProviderEngine) Ready() <-chan struct{} { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Ready") + } + + var r0 <-chan struct{} + if rf, ok := ret.Get(0).(func() <-chan struct{}); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(<-chan struct{}) + } + } + + return r0 } // NewProviderEngine creates a new instance of ProviderEngine. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewProviderEngine(t mockConstructorTestingTNewProviderEngine) *ProviderEngine { +// The first argument is typically a *testing.T value. +func NewProviderEngine(t interface { + mock.TestingT + Cleanup(func()) +}) *ProviderEngine { mock := &ProviderEngine{} mock.Mock.Test(t) diff --git a/engine/execution/pruner/config.go b/engine/execution/pruner/config.go new file mode 100644 index 00000000000..1f0286b999c --- /dev/null +++ b/engine/execution/pruner/config.go @@ -0,0 +1,26 @@ +package pruner + +import ( + "math" + "time" +) + +type PruningConfig struct { + Threshold uint64 // The threshold is the number of blocks that we want to keep in the database. + BatchSize uint // The batch size is the number of blocks that we want to delete in one batch. + SleepAfterEachBatchCommit time.Duration // The sleep time after each batch commit. + SleepAfterEachIteration time.Duration // The sleep time after each iteration. +} + +var DefaultConfig = PruningConfig{ + Threshold: 30 * 60 * 60 * 24 * 1.2, // (30 days of blocks) days * hours * minutes * seconds * block_per_second + BatchSize: 1200, + // when choosing a value, consider the batch size and block time, + // for instance, + // the block time is 1.2 block/second, and the batch size is 1200, + // so the batch commit time is 1200 / 1.2 = 1000 seconds. + // the sleep time should be smaller than 1000 seconds, otherwise, + // the pruner is not able to keep up with the block generation. + SleepAfterEachBatchCommit: 12 * time.Second, + SleepAfterEachIteration: math.MaxInt64, // by default it's disabled so that we can slowly roll this feature out. +} diff --git a/engine/execution/pruner/core.go b/engine/execution/pruner/core.go new file mode 100644 index 00000000000..7040d80d1d1 --- /dev/null +++ b/engine/execution/pruner/core.go @@ -0,0 +1,200 @@ +package pruner + +import ( + "context" + "fmt" + "time" + + "github.com/cockroachdb/pebble/v2" + "github.com/rs/zerolog" + + "github.com/onflow/flow-go/module" + "github.com/onflow/flow-go/module/block_iterator" + "github.com/onflow/flow-go/module/block_iterator/executor" + "github.com/onflow/flow-go/module/block_iterator/latest" + "github.com/onflow/flow-go/state/protocol" + "github.com/onflow/flow-go/storage" + "github.com/onflow/flow-go/storage/operation/pebbleimpl" + "github.com/onflow/flow-go/storage/store" +) + +const NextHeightForUnprunedExecutionDataPackKey = "NextHeightForUnprunedExecutionDataPackKey" + +func LoopPruneExecutionDataFromRootToLatestSealed( + ctx context.Context, + log zerolog.Logger, + metrics module.ExecutionMetrics, + state protocol.State, + protocolDB storage.DB, + headers storage.Headers, + chunkDataPacks storage.ChunkDataPacks, + results storage.ExecutionResults, + chunkDataPacksDB *pebble.DB, + config PruningConfig, +) error { + + chunksDB := pebbleimpl.ToDB(chunkDataPacksDB) + // the creator can be reused to create new block iterator that can iterate from the last + // checkpoint to the new latest (sealed) block. + creator, getNextAndLatest, err := makeBlockIteratorCreator(state, protocolDB, headers, chunksDB, config) + if err != nil { + return err + } + + pruner := NewChunkDataPackPruner(chunkDataPacks, results) + + // iterateAndPruneAll takes a block iterator and iterates through all the blocks + // and decides how to prune the chunk data packs. + iterateAndPruneAll := func(iter module.BlockIterator) error { + err := executor.IterateExecuteAndCommitInBatch( + ctx, log, metrics, iter, pruner, chunksDB, config.BatchSize, config.SleepAfterEachBatchCommit) + if err != nil { + return fmt.Errorf("failed to iterate, execute, and commit in batch: %w", err) + } + return nil + } + + for { + nextToPrune, latestToPrune, err := getNextAndLatest() + if err != nil { + return fmt.Errorf("failed to get next and latest to prune: %w", err) + } + + // report the target pruned height and last pruned height + lastPruned := nextToPrune - 1 + metrics.ExecutionLastChunkDataPackPrunedHeight(lastPruned) + metrics.ExecutionTargetChunkDataPackPrunedHeight(latestToPrune) + + if lastPruned > latestToPrune { + // this might happen if the threshold is increased after restart in order to retain more data, + // which will make the latest block to go backwards. + + log.Warn(). + Uint64("threshold", config.Threshold). + Msgf("last pruned height %d is greater than latest to prune %d", lastPruned, latestToPrune) + } + + commitDuration := 2 * time.Millisecond // with default batch size 1200, the avg commit duration is 2ms + batchCount, totalDuration := EstimateBatchProcessing( + nextToPrune, latestToPrune, + config.BatchSize, config.SleepAfterEachBatchCommit, commitDuration) + + log.Info(). + Uint64("nextToPrune", nextToPrune). + Uint64("latestToPrune", latestToPrune). + Uint64("threshold", config.Threshold). + Uint("batchsize", config.BatchSize). + Dur("sleepAfterEachBatchCommit", config.SleepAfterEachBatchCommit). + Dur("sleepAfterEachIteration", config.SleepAfterEachIteration). + Uint64("batchCount", batchCount). + Str("totalDuration", totalDuration.String()). + Msgf("execution data pruning will start in %s at %s, complete at %s", + config.SleepAfterEachIteration, + time.Now().Add(config.SleepAfterEachIteration).UTC(), + time.Now().Add(config.SleepAfterEachIteration).Add(totalDuration).UTC(), + ) + + select { + case <-ctx.Done(): + return nil + // wait first so that we give the data pruning lower priority compare to other tasks. + // also we can disable this feature by setting the sleep time to a very large value. + // also allows the pruner to be more responsive to the context cancellation, meaning + // while the pruner is sleeping, it can be cancelled immediately. + case <-time.After(config.SleepAfterEachIteration): + } + + iter, hasNext, err := creator.Create() + if err != nil { + return fmt.Errorf("failed to create block iterator: %w", err) + } + + if !hasNext { + // no more blocks to iterate, we are done. + continue + } + + err = iterateAndPruneAll(iter) + if err != nil { + return fmt.Errorf("failed to iterate, execute, and commit in batch: %w", err) + } + } +} + +// makeBlockIteratorCreator create the block iterator creator +func makeBlockIteratorCreator( + state protocol.State, + protocolDB storage.DB, + headers storage.Headers, + chunkDataPacksDB storage.DB, + config PruningConfig, +) ( + module.IteratorCreator, + // this is for logging purpose, so that after each round of pruning, + // we can log and report metrics about the next and latest to prune + func() (nextToPrune uint64, latestToPrune uint64, err error), + error, // any error are exception +) { + root := state.Params().SealedRoot() + sealedAndExecuted := latest.NewLatestSealedAndExecuted( + root, + state, + protocolDB, + ) + + // retrieves the latest sealed and executed block height. + // the threshold ensures that a certain number of blocks are retained for querying instead of being pruned. + latest := &LatestPrunable{ + LatestSealedAndExecuted: sealedAndExecuted, + threshold: config.Threshold, + } + + initializer := store.NewConsumerProgress(chunkDataPacksDB, NextHeightForUnprunedExecutionDataPackKey) + + creator, err := block_iterator.NewHeightBasedCreator( + headers.BlockIDByHeight, + initializer, + root, + latest.Latest, + ) + + if err != nil { + return nil, nil, fmt.Errorf("failed to create height based block iterator creator: %w", err) + } + + stateReader := creator.IteratorState() + + return creator, func() (nextToPrune uint64, latestToPrune uint64, err error) { + next, err := stateReader.LoadState() + if err != nil { + return 0, 0, fmt.Errorf("failed to get next height to prune: %w", err) + } + + header, err := latest.Latest() + if err != nil { + return 0, 0, fmt.Errorf("failed to get latest prunable block: %w", err) + } + + return next, header.Height, nil + }, nil +} + +// estimateBatchProcessing estimates the number of batches and the total duration +// start, end are both inclusive +func EstimateBatchProcessing( + start, end uint64, + batchSize uint, + sleepAfterEachBatchCommit time.Duration, + commitDuration time.Duration, +) ( + batchCount uint64, totalDuration time.Duration) { + if batchSize == 0 || start > end { + return 0, 0 + } + count := end - start + 1 + batchCount = (count + uint64(batchSize) - 1) / uint64(batchSize) + + totalDuration = time.Duration(batchCount-1)*sleepAfterEachBatchCommit + time.Duration(batchCount)*commitDuration + + return batchCount, totalDuration +} diff --git a/engine/execution/pruner/core_test.go b/engine/execution/pruner/core_test.go new file mode 100644 index 00000000000..16d1eaa5540 --- /dev/null +++ b/engine/execution/pruner/core_test.go @@ -0,0 +1,223 @@ +package pruner + +import ( + "context" + "fmt" + "testing" + "time" + + "github.com/cockroachdb/pebble/v2" + "github.com/jordanschalm/lockctx" + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/model/verification" + "github.com/onflow/flow-go/module/metrics" + "github.com/onflow/flow-go/storage" + "github.com/onflow/flow-go/storage/operation" + "github.com/onflow/flow-go/storage/operation/pebbleimpl" + "github.com/onflow/flow-go/storage/store" + "github.com/onflow/flow-go/utils/unittest" + unittestMocks "github.com/onflow/flow-go/utils/unittest/mocks" +) + +func TestLoopPruneExecutionDataFromRootToLatestSealed(t *testing.T) { + unittest.RunWithPebbleDB(t, func(pdb *pebble.DB) { + lockManager := storage.NewTestingLockManager() + // create dependencies + ps := unittestMocks.NewProtocolState() + blocks, rootResult, rootSeal := unittest.ChainFixture(0) + genesis := blocks[0] + require.NoError(t, ps.Bootstrap(genesis, rootResult, rootSeal)) + + db := pebbleimpl.ToDB(pdb) + ctx, cancel := context.WithCancel(context.Background()) + metrics := metrics.NewNoopCollector() + all := store.InitAll(metrics, db) + headers := all.Headers + blockstore := all.Blocks + results := all.Results + + transactions := store.NewTransactions(metrics, db) + collections := store.NewCollections(db, transactions) + chunkDataPacks := store.NewChunkDataPacks(metrics, pebbleimpl.ToDB(pdb), collections, 1000) + + lastSealedHeight := 30 + lastFinalizedHeight := lastSealedHeight + 2 // 2 finalized but unsealed + // indexed by height + chunks := make([]*verification.VerifiableChunkData, lastFinalizedHeight+2) + parentID := genesis.ID() + err := unittest.WithLock(t, lockManager, storage.LockInsertBlock, func(lctx lockctx.Context) error { + return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + // By convention, root block has no proposer signature - implementation has to handle this edge case + return blockstore.BatchStore(lctx, rw, &flow.Proposal{Block: *genesis, ProposerSigData: nil}) + }) + }) + require.NoError(t, err) + + for i := 1; i <= lastFinalizedHeight; i++ { + chunk, block := unittest.VerifiableChunkDataFixture(0, func(headerBody *flow.HeaderBody) { + headerBody.Height = uint64(i) + headerBody.ParentID = parentID + }) + chunks[i] = chunk // index by height + err := unittest.WithLock(t, lockManager, storage.LockInsertBlock, func(lctx lockctx.Context) error { + return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return blockstore.BatchStore(lctx, rw, unittest.ProposalFromBlock(block)) + }) + }) + require.NoError(t, err) + err = unittest.WithLock(t, lockManager, storage.LockFinalizeBlock, func(lctx lockctx.Context) error { + return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return operation.IndexFinalizedBlockByHeight(lctx, rw, chunk.Header.Height, chunk.Header.ID()) + }) + }) + require.NoError(t, err) + require.NoError(t, results.Store(chunk.Result)) + require.NoError(t, results.Index(chunk.Result.BlockID, chunk.Result.ID())) + require.NoError(t, unittest.WithLock(t, lockManager, storage.LockInsertChunkDataPack, func(lctx lockctx.Context) error { + return chunkDataPacks.StoreByChunkID(lctx, []*flow.ChunkDataPack{chunk.ChunkDataPack}) + })) + _, storeErr := collections.Store(chunk.ChunkDataPack.Collection) + require.NoError(t, storeErr) + // verify that chunk data pack fixture can be found by the result + for _, c := range chunk.Result.Chunks { + chunkID := c.ID() + require.Equal(t, chunk.ChunkDataPack.ChunkID, chunkID) + _, err := chunkDataPacks.ByChunkID(chunkID) + require.NoError(t, err) + } + // verify the result can be found by block + _, err = results.ByBlockID(chunk.Header.ID()) + require.NoError(t, err) + + // Finalize block + require.NoError(t, ps.Extend(block)) + require.NoError(t, ps.Finalize(block.ID())) + parentID = block.ID() + } + + // update the index "latest executed block (max height)" to latest sealed block + require.NoError(t, db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return operation.UpdateExecutedBlock(rw.Writer(), chunks[lastFinalizedHeight].Header.ID()) + })) + + lastSealed := chunks[lastSealedHeight].Header + require.NoError(t, ps.MakeSeal(lastSealed.ID())) + + // create config + cfg := PruningConfig{ + Threshold: 10, + BatchSize: 3, + SleepAfterEachBatchCommit: 1 * time.Millisecond, + SleepAfterEachIteration: 100 * time.Millisecond, + } + + // wait long enough for chunks data packs are pruned + go (func(cancel func()) { + time.Sleep(1 * time.Second) + // cancel the context to stop the loop + cancel() + })(cancel) + + require.NoError(t, LoopPruneExecutionDataFromRootToLatestSealed( + ctx, unittest.Logger(), metrics, ps, db, headers, chunkDataPacks, results, pdb, cfg, + )) + + // verify the chunk data packs beyond the threshold are pruned + // if lastSealedHeight is 2, threshold is 1, then block height 1 and 2 will be stored, + // and we only prune block 1, the last pruned height is 1 (block 2 is not pruned) + // so the lastPrunedHeight should be calculated as lastSealedHeight (2) - threshold(1) = 1 + lastPrunedHeight := lastSealedHeight - int(cfg.Threshold) // 90 + for i := 1; i <= lastPrunedHeight; i++ { + expected := chunks[i] + _, err := chunkDataPacks.ByChunkID(expected.ChunkDataPack.ChunkID) + require.Error(t, err, fmt.Errorf("chunk data pack at height %v should be pruned, but not", i)) + require.ErrorIs(t, err, storage.ErrNotFound) + } + + // verify the chunk data packs within the threshold are not pruned + for i := lastPrunedHeight + 1; i <= lastFinalizedHeight; i++ { + expected := chunks[i] + actual, err := chunkDataPacks.ByChunkID(expected.ChunkDataPack.ChunkID) + require.NoError(t, err) + require.Equal(t, expected.ChunkDataPack, actual) + } + }) +} + +func TestEstimateBatchProcessing(t *testing.T) { + tests := []struct { + name string + start, end uint64 + batchSize uint + sleepAfterEachBatchCommit time.Duration + commitDuration time.Duration + expectedBatchCount uint64 + expectedTotalDuration time.Duration + }{ + { + name: "Normal case with multiple batches", + start: 0, + end: 100, + batchSize: 10, + sleepAfterEachBatchCommit: time.Second, + commitDuration: 500 * time.Millisecond, + expectedBatchCount: 11, + expectedTotalDuration: 10*time.Second + 11*500*time.Millisecond, + }, + { + name: "Single batch", + start: 0, + end: 5, + batchSize: 10, + sleepAfterEachBatchCommit: time.Second, + commitDuration: 500 * time.Millisecond, + expectedBatchCount: 1, + expectedTotalDuration: 500 * time.Millisecond, + }, + { + name: "Zero batch size", + start: 0, + end: 100, + batchSize: 0, + sleepAfterEachBatchCommit: time.Second, + commitDuration: 500 * time.Millisecond, + expectedBatchCount: 0, + expectedTotalDuration: 0, + }, + { + name: "Start greater than end", + start: 100, + end: 50, + batchSize: 10, + sleepAfterEachBatchCommit: time.Second, + commitDuration: 500 * time.Millisecond, + expectedBatchCount: 0, + expectedTotalDuration: 0, + }, + { + name: "Start equal to end", + start: 50, + end: 50, + batchSize: 10, + sleepAfterEachBatchCommit: time.Second, + commitDuration: 500 * time.Millisecond, + expectedBatchCount: 1, + expectedTotalDuration: 500 * time.Millisecond, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + batchCount, totalDuration := EstimateBatchProcessing(tt.start, tt.end, tt.batchSize, tt.sleepAfterEachBatchCommit, tt.commitDuration) + + if batchCount != tt.expectedBatchCount { + t.Errorf("expected batchCount %d, got %d", tt.expectedBatchCount, batchCount) + } + if totalDuration != tt.expectedTotalDuration { + t.Errorf("expected totalDuration %v, got %v", tt.expectedTotalDuration, totalDuration) + } + }) + } +} diff --git a/engine/execution/pruner/engine.go b/engine/execution/pruner/engine.go new file mode 100644 index 00000000000..c1d4e1a378a --- /dev/null +++ b/engine/execution/pruner/engine.go @@ -0,0 +1,39 @@ +package pruner + +import ( + "github.com/cockroachdb/pebble/v2" + "github.com/rs/zerolog" + + "github.com/onflow/flow-go/module" + "github.com/onflow/flow-go/module/component" + "github.com/onflow/flow-go/module/irrecoverable" + "github.com/onflow/flow-go/state/protocol" + "github.com/onflow/flow-go/storage" +) + +// NewChunkDataPackPruningEngine creates a component that prunes chunk data packs +// from root to the latest sealed block. +func NewChunkDataPackPruningEngine( + log zerolog.Logger, + metrics module.ExecutionMetrics, + state protocol.State, + protocolDB storage.DB, + headers storage.Headers, + chunkDataPacks storage.ChunkDataPacks, + results storage.ExecutionResults, + chunkDataPacksDB *pebble.DB, + config PruningConfig, +) *component.ComponentManager { + return component.NewComponentManagerBuilder(). + AddWorker(func(ctx irrecoverable.SignalerContext, ready component.ReadyFunc) { + ready() + + err := LoopPruneExecutionDataFromRootToLatestSealed( + ctx, log.With().Str("component", "CDP-pruner").Logger(), metrics, + state, protocolDB, headers, chunkDataPacks, results, chunkDataPacksDB, config) + if err != nil { + ctx.Throw(err) + } + }). + Build() +} diff --git a/engine/execution/pruner/executor.go b/engine/execution/pruner/executor.go new file mode 100644 index 00000000000..3460ef8e98c --- /dev/null +++ b/engine/execution/pruner/executor.go @@ -0,0 +1,24 @@ +package pruner + +import ( + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module/block_iterator/executor" + "github.com/onflow/flow-go/module/pruner/pruners" + "github.com/onflow/flow-go/storage" +) + +type ChunkDataPackPruner struct { + *pruners.ChunkDataPackPruner +} + +var _ executor.IterationExecutor = (*ChunkDataPackPruner)(nil) + +func NewChunkDataPackPruner(chunkDataPacks storage.ChunkDataPacks, results storage.ExecutionResults) *ChunkDataPackPruner { + return &ChunkDataPackPruner{ + ChunkDataPackPruner: pruners.NewChunkDataPackPruner(chunkDataPacks, results), + } +} + +func (c *ChunkDataPackPruner) ExecuteByBlockID(blockID flow.Identifier, batch storage.ReaderBatchWriter) (exception error) { + return c.PruneByBlockID(blockID, batch) +} diff --git a/engine/execution/pruner/prunable.go b/engine/execution/pruner/prunable.go new file mode 100644 index 00000000000..da2cc2582d1 --- /dev/null +++ b/engine/execution/pruner/prunable.go @@ -0,0 +1,18 @@ +package pruner + +import ( + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module/block_iterator/latest" +) + +// LatestPrunable decides which blocks are prunable +// we don't want to prune all the sealed blocks, but keep +// a certain number of them so that the data is still available for querying +type LatestPrunable struct { + *latest.LatestSealedAndExecuted + threshold uint64 // the number of blocks below the latest block +} + +func (l *LatestPrunable) Latest() (*flow.Header, error) { + return l.LatestSealedAndExecuted.BelowLatest(l.threshold) +} diff --git a/engine/execution/rpc/engine.go b/engine/execution/rpc/engine.go index 407fc3bedef..d32d093c72d 100644 --- a/engine/execution/rpc/engine.go +++ b/engine/execution/rpc/engine.go @@ -6,32 +6,49 @@ import ( "errors" "fmt" "net" + "sort" "strings" "unicode/utf8" + "github.com/onflow/flow/protobuf/go/flow/entities" + grpc_prometheus "github.com/grpc-ecosystem/go-grpc-prometheus" "github.com/onflow/flow/protobuf/go/flow/execution" "github.com/rs/zerolog" "google.golang.org/grpc" "google.golang.org/grpc/codes" + _ "google.golang.org/grpc/encoding/gzip" // required for gRPC compression "google.golang.org/grpc/status" + _ "github.com/onflow/flow-go/engine/common/grpc/compressor/deflate" // required for gRPC compression + _ "github.com/onflow/flow-go/engine/common/grpc/compressor/snappy" // required for gRPC compression + "github.com/onflow/flow-go/consensus/hotstuff" "github.com/onflow/flow-go/engine" "github.com/onflow/flow-go/engine/common/rpc" "github.com/onflow/flow-go/engine/common/rpc/convert" - "github.com/onflow/flow-go/engine/execution/ingestion" + exeEng "github.com/onflow/flow-go/engine/execution" + "github.com/onflow/flow-go/engine/execution/computation/metrics" + "github.com/onflow/flow-go/engine/execution/state" fvmerrors "github.com/onflow/flow-go/fvm/errors" "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module/grpcserver" "github.com/onflow/flow-go/state/protocol" "github.com/onflow/flow-go/storage" ) +const DefaultMaxBlockRange = 300 + // Config defines the configurable options for the gRPC server. type Config struct { - ListenAddr string - MaxMsgSize uint // In bytes - RpcMetricsEnabled bool // enable GRPC metrics reporting + ListenAddr string + MaxRequestMsgSize uint // in bytes + MaxResponseMsgSize uint // in bytes + RpcMetricsEnabled bool // enable GRPC metrics reporting + + // holds value of deprecated MaxMsgSize flag for use during bootstrapping. + // will be removed in a future release. + DeprecatedMaxMsgSize uint // in bytes } // Engine implements a gRPC server with a simplified version of the Observation API. @@ -47,13 +64,14 @@ type Engine struct { func New( log zerolog.Logger, config Config, - e *ingestion.Engine, + scriptsExecutor exeEng.ScriptExecutor, headers storage.Headers, state protocol.State, - events storage.Events, - exeResults storage.ExecutionResults, - txResults storage.TransactionResults, - commits storage.Commits, + events storage.EventsReader, + exeResults storage.ExecutionResultsReader, + txResults storage.TransactionResultsReader, + commits storage.CommitsReader, + transactionMetrics metrics.TransactionExecutionMetricsProvider, chainID flow.ChainID, signerIndicesDecoder hotstuff.BlockSignerDecoder, apiRatelimits map[string]int, // the api rate limit (max calls per second) for each of the gRPC API e.g. Ping->100, ExecuteScriptAtBlockID->300 @@ -61,8 +79,8 @@ func New( ) *Engine { log = log.With().Str("engine", "rpc").Logger() serverOptions := []grpc.ServerOption{ - grpc.MaxRecvMsgSize(int(config.MaxMsgSize)), - grpc.MaxSendMsgSize(int(config.MaxMsgSize)), + grpc.MaxRecvMsgSize(int(config.MaxRequestMsgSize)), + grpc.MaxSendMsgSize(int(config.MaxResponseMsgSize)), } var interceptors []grpc.UnaryServerInterceptor // ordered list of interceptors @@ -73,7 +91,7 @@ func New( if len(apiRatelimits) > 0 { // create a rate limit interceptor - rateLimitInterceptor := rpc.NewRateLimiterInterceptor(log, apiRatelimits, apiBurstLimits).UnaryServerInterceptor + rateLimitInterceptor := grpcserver.NewRateLimiterInterceptor(log, apiRatelimits, apiBurstLimits).UnaryServerInterceptor // append the rate limit interceptor to the list of interceptors interceptors = append(interceptors, rateLimitInterceptor) } @@ -88,7 +106,7 @@ func New( log: log, unit: engine.NewUnit(), handler: &handler{ - engine: e, + engine: scriptsExecutor, chain: chainID, headers: headers, state: state, @@ -97,7 +115,10 @@ func New( exeResults: exeResults, transactionResults: txResults, commits: commits, + transactionMetrics: transactionMetrics, log: log, + maxBlockRange: DefaultMaxBlockRange, + maxScriptSize: config.MaxRequestMsgSize, }, server: server, config: config, @@ -147,22 +168,28 @@ func (e *Engine) serve() { // handler implements a subset of the Observation API. type handler struct { - engine ingestion.IngestRPC + engine exeEng.ScriptExecutor chain flow.ChainID headers storage.Headers state protocol.State signerIndicesDecoder hotstuff.BlockSignerDecoder - events storage.Events - exeResults storage.ExecutionResults - transactionResults storage.TransactionResults + events storage.EventsReader + exeResults storage.ExecutionResultsReader + transactionResults storage.TransactionResultsReader log zerolog.Logger - commits storage.Commits + commits storage.CommitsReader + transactionMetrics metrics.TransactionExecutionMetricsProvider + maxBlockRange int + maxScriptSize uint } -var _ execution.ExecutionAPIServer = &handler{} +var _ execution.ExecutionAPIServer = (*handler)(nil) // Ping responds to requests when the server is up. -func (h *handler) Ping(_ context.Context, _ *execution.PingRequest) (*execution.PingResponse, error) { +func (h *handler) Ping( + _ context.Context, + _ *execution.PingRequest, +) (*execution.PingResponse, error) { return &execution.PingResponse{}, nil } @@ -170,20 +197,36 @@ func (h *handler) ExecuteScriptAtBlockID( ctx context.Context, req *execution.ExecuteScriptAtBlockIDRequest, ) (*execution.ExecuteScriptAtBlockIDResponse, error) { + script := req.GetScript() + arguments := req.GetArguments() + + if !rpc.CheckScriptSize(script, arguments, h.maxScriptSize) { + return nil, status.Error(codes.InvalidArgument, rpc.ErrScriptTooLarge.Error()) + } blockID, err := convert.BlockID(req.GetBlockId()) if err != nil { return nil, err } - value, err := h.engine.ExecuteScriptAtBlockID(ctx, req.GetScript(), req.GetArguments(), blockID) + // return a more user friendly error if block has not been executed + if _, err = h.commits.ByBlockID(blockID); err != nil { + if errors.Is(err, storage.ErrNotFound) { + return nil, status.Errorf(codes.NotFound, "block %s has not been executed by node or was pruned", blockID) + } + return nil, status.Errorf(codes.Internal, "state commitment for block ID %s could not be retrieved", blockID) + } + + value, compUsage, err := h.engine.ExecuteScriptAtBlockID(ctx, script, arguments, blockID) if err != nil { + // todo check the error code instead // return code 3 as this passes the litmus test in our context return nil, status.Errorf(codes.InvalidArgument, "failed to execute script: %v", err) } res := &execution.ExecuteScriptAtBlockIDResponse{ - Value: value, + Value: value, + ComputationUsage: compUsage, } return res, nil @@ -214,8 +257,10 @@ func (h *handler) GetRegisterAtBlockID( return res, nil } -func (h *handler) GetEventsForBlockIDs(_ context.Context, - req *execution.GetEventsForBlockIDsRequest) (*execution.GetEventsForBlockIDsResponse, error) { +func (h *handler) GetEventsForBlockIDs( + _ context.Context, + req *execution.GetEventsForBlockIDsRequest, +) (*execution.GetEventsForBlockIDsResponse, error) { // validate request blockIDs := req.GetBlockIds() @@ -229,6 +274,10 @@ func (h *handler) GetEventsForBlockIDs(_ context.Context, return nil, err } + if len(blockIDs) > h.maxBlockRange { + return nil, status.Errorf(codes.InvalidArgument, "too many block IDs requested: %d > %d", len(blockIDs), h.maxBlockRange) + } + results := make([]*execution.GetEventsForBlockIDsResponse_Result, len(blockIDs)) // collect all the events and create a EventsResponse_Result for each block @@ -236,27 +285,36 @@ func (h *handler) GetEventsForBlockIDs(_ context.Context, // Check if block has been executed if _, err := h.commits.ByBlockID(bID); err != nil { if errors.Is(err, storage.ErrNotFound) { - return nil, status.Errorf(codes.NotFound, "state commitment for block ID %s does not exist", bID) + return nil, status.Errorf(codes.NotFound, "block %s has not been executed by node or was pruned", bID) } return nil, status.Errorf(codes.Internal, "state commitment for block ID %s could not be retrieved", bID) } - // lookup events - blockEvents, err := h.events.ByBlockIDEventType(bID, flow.EventType(eType)) + // lookup all events for the block + blockAllEvents, err := h.getEventsByBlockID(bID) if err != nil { return nil, status.Errorf(codes.Internal, "failed to get events for block: %v", err) } + // filter events by type + eventType := flow.EventType(eType) + blockEvents := make([]flow.Event, 0, len(blockAllEvents)) + for _, event := range blockAllEvents { + if event.Type == eventType { + blockEvents = append(blockEvents, event) + } + } + result, err := h.eventResult(bID, blockEvents) if err != nil { return nil, err } results[i] = result - } return &execution.GetEventsForBlockIDsResponse{ - Results: results, + Results: results, + EventEncodingVersion: entities.EventEncodingVersion_CCF_V0, }, nil } @@ -316,9 +374,10 @@ func (h *handler) GetTransactionResult( // compose a response with the events and the transaction error return &execution.GetTransactionResultResponse{ - StatusCode: statusCode, - ErrorMessage: errMsg, - Events: events, + StatusCode: statusCode, + ErrorMessage: errMsg, + Events: events, + EventEncodingVersion: entities.EventEncodingVersion_CCF_V0, }, nil } @@ -374,9 +433,10 @@ func (h *handler) GetTransactionResultByIndex( // compose a response with the events and the transaction error return &execution.GetTransactionResultResponse{ - StatusCode: statusCode, - ErrorMessage: errMsg, - Events: events, + StatusCode: statusCode, + ErrorMessage: errMsg, + Events: events, + EventEncodingVersion: entities.EventEncodingVersion_CCF_V0, }, nil } @@ -391,6 +451,15 @@ func (h *handler) GetTransactionResultsByBlockID( return nil, status.Errorf(codes.InvalidArgument, "invalid blockID: %v", err) } + // must verify block was locally executed first since transactionResults.ByBlockID will return + // an empty slice if block does not exist + if _, err = h.commits.ByBlockID(blockID); err != nil { + if errors.Is(err, storage.ErrNotFound) { + return nil, status.Errorf(codes.NotFound, "block %s has not been executed by node or was pruned", blockID) + } + return nil, status.Errorf(codes.Internal, "state commitment for block ID %s could not be retrieved", blockID) + } + // Get all tx results txResults, err := h.transactionResults.ByBlockID(blockID) if err != nil { @@ -402,14 +471,14 @@ func (h *handler) GetTransactionResultsByBlockID( } // get all events for a block - blockEvents, err := h.events.ByBlockID(blockID) + blockEvents, err := h.getEventsByBlockID(blockID) if err != nil { return nil, status.Errorf(codes.Internal, "failed to get events for block: %v", err) } responseTxResults := make([]*execution.GetTransactionResultResponse, len(txResults)) - eventsByTxIndex := make(map[uint32][]flow.Event, len(txResults)) //we will have at most as many buckets as tx results + eventsByTxIndex := make(map[uint32][]flow.Event, len(txResults)) // we will have at most as many buckets as tx results // re-partition events by tx index // it's not documented but events are stored indexed by (blockID, event.TransactionID, event.TransactionIndex, event.EventIndex) @@ -452,13 +521,174 @@ func (h *handler) GetTransactionResultsByBlockID( // compose a response return &execution.GetTransactionResultsResponse{ - TransactionResults: responseTxResults, + TransactionResults: responseTxResults, + EventEncodingVersion: entities.EventEncodingVersion_CCF_V0, + }, nil +} + +// GetTransactionErrorMessage implements a grpc handler for getting a transaction error message by block ID and tx ID. +// Expected error codes during normal operations: +// - codes.InvalidArgument - invalid blockID, tx ID. +// - codes.NotFound - transaction result by tx ID not found. +func (h *handler) GetTransactionErrorMessage( + _ context.Context, + req *execution.GetTransactionErrorMessageRequest, +) (*execution.GetTransactionErrorMessageResponse, error) { + reqBlockID := req.GetBlockId() + blockID, err := convert.BlockID(reqBlockID) + if err != nil { + return nil, status.Errorf(codes.InvalidArgument, "invalid blockID: %v", err) + } + + reqTxID := req.GetTransactionId() + txID, err := convert.TransactionID(reqTxID) + if err != nil { + return nil, status.Errorf(codes.InvalidArgument, "invalid transactionID: %v", err) + } + + // lookup any transaction error that might have occurred + txResult, err := h.transactionResults.ByBlockIDTransactionID(blockID, txID) + if err != nil { + if errors.Is(err, storage.ErrNotFound) { + return nil, status.Error(codes.NotFound, "transaction result not found") + } + + return nil, status.Errorf(codes.Internal, "failed to get transaction result: %v", err) + } + + result := &execution.GetTransactionErrorMessageResponse{ + TransactionId: convert.IdentifierToMessage(txResult.TransactionID), + } + + if len(txResult.ErrorMessage) > 0 { + cadenceErrMessage := txResult.ErrorMessage + if !utf8.ValidString(cadenceErrMessage) { + h.log.Warn(). + Str("block_id", blockID.String()). + Str("transaction_id", txID.String()). + Str("error_mgs", fmt.Sprintf("%q", cadenceErrMessage)). + Msg("invalid character in Cadence error message") + // convert non UTF-8 string to a UTF-8 string for safe GRPC marshaling + cadenceErrMessage = strings.ToValidUTF8(txResult.ErrorMessage, "?") + } + result.ErrorMessage = cadenceErrMessage + } + return result, nil +} + +// GetTransactionErrorMessageByIndex implements a grpc handler for getting a transaction error message by block ID and tx index. +// Expected error codes during normal operations: +// - codes.InvalidArgument - invalid blockID. +// - codes.NotFound - transaction result at index not found. +func (h *handler) GetTransactionErrorMessageByIndex( + _ context.Context, + req *execution.GetTransactionErrorMessageByIndexRequest, +) (*execution.GetTransactionErrorMessageResponse, error) { + reqBlockID := req.GetBlockId() + blockID, err := convert.BlockID(reqBlockID) + if err != nil { + return nil, status.Errorf(codes.InvalidArgument, "invalid blockID: %v", err) + } + + index := req.GetIndex() + + // lookup any transaction error that might have occurred + txResult, err := h.transactionResults.ByBlockIDTransactionIndex(blockID, index) + if err != nil { + if errors.Is(err, storage.ErrNotFound) { + return nil, status.Error(codes.NotFound, "transaction result not found") + } + + return nil, status.Errorf(codes.Internal, "failed to get transaction result: %v", err) + } + + result := &execution.GetTransactionErrorMessageResponse{ + TransactionId: convert.IdentifierToMessage(txResult.TransactionID), + } + + if len(txResult.ErrorMessage) > 0 { + cadenceErrMessage := txResult.ErrorMessage + if !utf8.ValidString(cadenceErrMessage) { + h.log.Warn(). + Str("block_id", blockID.String()). + Str("transaction_id", txResult.TransactionID.String()). + Str("error_mgs", fmt.Sprintf("%q", cadenceErrMessage)). + Msg("invalid character in Cadence error message") + // convert non UTF-8 string to a UTF-8 string for safe GRPC marshaling + cadenceErrMessage = strings.ToValidUTF8(txResult.ErrorMessage, "?") + } + result.ErrorMessage = cadenceErrMessage + } + return result, nil +} + +// GetTransactionErrorMessagesByBlockID implements a grpc handler for getting transaction error messages by block ID. +// Only failed transactions will be returned. +// Expected error codes during normal operations: +// - codes.InvalidArgument - invalid blockID. +// - codes.NotFound - block was not executed or was pruned. +func (h *handler) GetTransactionErrorMessagesByBlockID( + _ context.Context, + req *execution.GetTransactionErrorMessagesByBlockIDRequest, +) (*execution.GetTransactionErrorMessagesResponse, error) { + reqBlockID := req.GetBlockId() + blockID, err := convert.BlockID(reqBlockID) + if err != nil { + return nil, status.Errorf(codes.InvalidArgument, "invalid blockID: %v", err) + } + + // must verify block was locally executed first since transactionResults.ByBlockID will return + // an empty slice if block does not exist + if _, err = h.commits.ByBlockID(blockID); err != nil { + if errors.Is(err, storage.ErrNotFound) { + return nil, status.Errorf(codes.NotFound, "block %s has not been executed by node or was pruned", blockID) + } + return nil, status.Errorf(codes.Internal, "state commitment for block ID %s could not be retrieved", blockID) + } + + // Get all tx results + txResults, err := h.transactionResults.ByBlockID(blockID) + if err != nil { + if errors.Is(err, storage.ErrNotFound) { + return nil, status.Error(codes.NotFound, "transaction results not found") + } + + return nil, status.Errorf(codes.Internal, "failed to get transaction results: %v", err) + } + + var results []*execution.GetTransactionErrorMessagesResponse_Result + for index, txResult := range txResults { + if len(txResult.ErrorMessage) == 0 { + continue + } + txIndex := uint32(index) + cadenceErrMessage := txResult.ErrorMessage + if !utf8.ValidString(cadenceErrMessage) { + h.log.Warn(). + Str("block_id", blockID.String()). + Uint32("index", txIndex). + Str("error_mgs", fmt.Sprintf("%q", cadenceErrMessage)). + Msg("invalid character in Cadence error message") + // convert non UTF-8 string to a UTF-8 string for safe GRPC marshaling + cadenceErrMessage = strings.ToValidUTF8(txResult.ErrorMessage, "?") + } + results = append(results, &execution.GetTransactionErrorMessagesResponse_Result{ + TransactionId: convert.IdentifierToMessage(txResult.TransactionID), + Index: txIndex, + ErrorMessage: cadenceErrMessage, + }) + } + + return &execution.GetTransactionErrorMessagesResponse{ + Results: results, }, nil } // eventResult creates EventsResponse_Result from flow.Event for the given blockID -func (h *handler) eventResult(blockID flow.Identifier, - flowEvents []flow.Event) (*execution.GetEventsForBlockIDsResponse_Result, error) { +func (h *handler) eventResult( + blockID flow.Identifier, + flowEvents []flow.Event, +) (*execution.GetEventsForBlockIDsResponse_Result, error) { // convert events to event message events := convert.EventsToMessages(flowEvents) @@ -492,14 +722,28 @@ func (h *handler) GetAccountAtBlockID( return nil, status.Errorf(codes.InvalidArgument, "invalid address: %v", err) } - value, err := h.engine.GetAccount(ctx, flowAddress, blockFlowID) - if errors.Is(err, storage.ErrNotFound) { - return nil, status.Errorf(codes.NotFound, "account with address %s not found", flowAddress) - } - if fvmerrors.IsAccountNotFoundError(err) { - return nil, status.Errorf(codes.NotFound, "account not found") + // return a more user friendly error if block has not been executed + if _, err = h.commits.ByBlockID(blockFlowID); err != nil { + if errors.Is(err, storage.ErrNotFound) { + return nil, status.Errorf(codes.NotFound, "block %s has not been executed by node or was pruned", blockFlowID) + } + return nil, status.Errorf(codes.Internal, "state commitment for block ID %s could not be retrieved", blockFlowID) } + + value, err := h.engine.GetAccount(ctx, flowAddress, blockFlowID) if err != nil { + if errors.Is(err, state.ErrExecutionStatePruned) { + return nil, status.Errorf(codes.OutOfRange, "state for block ID %s not available", blockFlowID) + } + if errors.Is(err, state.ErrNotExecuted) { + return nil, status.Errorf(codes.NotFound, "block %s has not been executed by node or was pruned", blockFlowID) + } + if errors.Is(err, storage.ErrNotFound) { + return nil, status.Errorf(codes.NotFound, "block %s not found", blockFlowID) + } + if fvmerrors.IsAccountNotFoundError(err) { + return nil, status.Errorf(codes.NotFound, "account not found") + } return nil, status.Errorf(codes.Internal, "failed to get account: %v", err) } @@ -536,7 +780,10 @@ func (h *handler) GetLatestBlockHeader( header, err = h.state.Final().Head() } if err != nil { - return nil, status.Errorf(codes.NotFound, "not found: %v", err) + // this header MUST exist in the db, otherwise the node likely has inconsistent state. + // Don't crash as a result of an external API request, but other components will likely panic. + h.log.Err(err).Msg("failed to get latest block header. potentially inconsistent protocol state.") + return nil, status.Errorf(codes.Internal, "unable to get latest header: %v", err) } return h.blockHeaderResponse(header) @@ -574,3 +821,82 @@ func (h *handler) blockHeaderResponse(header *flow.Header) (*execution.BlockHead Block: msg, }, nil } + +// GetTransactionExecutionMetricsAfter gets the execution metrics for a transaction after a given block. +func (h *handler) GetTransactionExecutionMetricsAfter( + _ context.Context, + req *execution.GetTransactionExecutionMetricsAfterRequest, +) (*execution.GetTransactionExecutionMetricsAfterResponse, error) { + height := req.GetBlockHeight() + + metrics, err := h.transactionMetrics.GetTransactionExecutionMetricsAfter(height) + if err != nil { + return nil, status.Errorf(codes.Internal, "failed to get metrics after block height %v: %v", height, err) + } + + response := &execution.GetTransactionExecutionMetricsAfterResponse{ + Results: make([]*execution.GetTransactionExecutionMetricsAfterResponse_Result, 0, len(metrics)), + } + + for blockHeight, blockMetrics := range metrics { + blockResponse := &execution.GetTransactionExecutionMetricsAfterResponse_Result{ + BlockHeight: blockHeight, + Transactions: make([]*execution.GetTransactionExecutionMetricsAfterResponse_Transaction, len(blockMetrics)), + } + + for i, transactionMetrics := range blockMetrics { + transactionMetricsResponse := &execution.GetTransactionExecutionMetricsAfterResponse_Transaction{ + TransactionId: transactionMetrics.TransactionID[:], + ExecutionTime: uint64(transactionMetrics.ExecutionTime.Nanoseconds()), + ExecutionEffortWeights: make([]*execution.GetTransactionExecutionMetricsAfterResponse_ExecutionEffortWeight, 0, len(transactionMetrics.ExecutionEffortWeights)), + } + + for kind, weight := range transactionMetrics.ExecutionEffortWeights { + transactionMetricsResponse.ExecutionEffortWeights = append( + transactionMetricsResponse.ExecutionEffortWeights, + &execution.GetTransactionExecutionMetricsAfterResponse_ExecutionEffortWeight{ + Kind: uint64(kind), + Weight: uint64(weight), + }, + ) + } + + blockResponse.Transactions[i] = transactionMetricsResponse + } + response.Results = append(response.Results, blockResponse) + } + + // sort the response by block height in descending order + sort.Slice(response.Results, func(i, j int) bool { + return response.Results[i].BlockHeight > response.Results[j].BlockHeight + }) + + return response, nil +} + +// additional check that when there is no event in the block, double check if the execution +// result has no events as well, otherwise return an error. +// we check the execution result has no event by checking if each chunk's EventCollection is +// the default hash for empty event collection. +func (h *handler) getEventsByBlockID(blockID flow.Identifier) ([]flow.Event, error) { + blockEvents, err := h.events.ByBlockID(blockID) + if err != nil { + return nil, status.Errorf(codes.Internal, "failed to get events for block: %v", err) + } + + if len(blockEvents) == 0 { + executionResult, err := h.exeResults.ByBlockID(blockID) + if err != nil { + return nil, status.Errorf(codes.Internal, "failed to get execution result for block %v: %v", blockID, err) + } + + for _, chunk := range executionResult.Chunks { + if chunk.EventCollection != flow.EmptyEventCollectionID && + executionResult.PreviousResultID != flow.ZeroID { // skip the root blcok + return nil, status.Errorf(codes.Internal, "events not found for block %s, but chunk %d has events", blockID, chunk.Index) + } + } + } + + return blockEvents, nil +} diff --git a/engine/execution/rpc/engine_test.go b/engine/execution/rpc/engine_test.go index ffbbd4b0a37..562d0e5ee34 100644 --- a/engine/execution/rpc/engine_test.go +++ b/engine/execution/rpc/engine_test.go @@ -3,6 +3,7 @@ package rpc import ( "context" "errors" + "fmt" "math/rand" "testing" @@ -17,8 +18,10 @@ import ( "github.com/onflow/flow/protobuf/go/flow/entities" "github.com/onflow/flow/protobuf/go/flow/execution" + commonrpc "github.com/onflow/flow-go/engine/common/rpc" "github.com/onflow/flow-go/engine/common/rpc/convert" - ingestion "github.com/onflow/flow-go/engine/execution/ingestion/mock" + mockEng "github.com/onflow/flow-go/engine/execution/mock" + "github.com/onflow/flow-go/engine/execution/state" "github.com/onflow/flow-go/model/flow" realstorage "github.com/onflow/flow-go/storage" storage "github.com/onflow/flow-go/storage/mock" @@ -41,21 +44,16 @@ func TestHandler(t *testing.T) { func (suite *Suite) SetupTest() { suite.log = zerolog.Logger{} - suite.events = new(storage.Events) - suite.exeResults = new(storage.ExecutionResults) - suite.txResults = new(storage.TransactionResults) - suite.commits = new(storage.Commits) - suite.headers = new(storage.Headers) + suite.events = storage.NewEvents(suite.T()) + suite.exeResults = storage.NewExecutionResults(suite.T()) + suite.txResults = storage.NewTransactionResults(suite.T()) + suite.commits = storage.NewCommits(suite.T()) + suite.headers = storage.NewHeaders(suite.T()) } // TestExecuteScriptAtBlockID tests the ExecuteScriptAtBlockID API call func (suite *Suite) TestExecuteScriptAtBlockID() { - // setup handler - mockEngine := new(ingestion.IngestRPC) - handler := &handler{ - engine: mockEngine, - chain: flow.Mainnet, - } + handler, mockEngine := suite.defaultHandler() // setup dummy request/response ctx := context.Background() @@ -67,22 +65,33 @@ func (suite *Suite) TestExecuteScriptAtBlockID() { Script: script, } scriptExecValue := []byte{9, 10, 11} + computationUsage := uint64(11) executionResp := execution.ExecuteScriptAtBlockIDResponse{ - Value: scriptExecValue, + Value: scriptExecValue, + ComputationUsage: computationUsage, } suite.Run("happy path with successful script execution", func() { + suite.commits.On("ByBlockID", mockIdentifier).Return(nil, nil).Once() mockEngine.On("ExecuteScriptAtBlockID", ctx, script, arguments, mockIdentifier). - Return(scriptExecValue, nil).Once() + Return(scriptExecValue, computationUsage, nil).Once() response, err := handler.ExecuteScriptAtBlockID(ctx, &executionReq) suite.Require().NoError(err) suite.Require().Equal(&executionResp, response) mockEngine.AssertExpectations(suite.T()) }) + suite.Run("valid request for unknown block", func() { + suite.commits.On("ByBlockID", mockIdentifier).Return(nil, realstorage.ErrNotFound).Once() + _, err := handler.ExecuteScriptAtBlockID(ctx, &executionReq) + suite.Require().Error(err) + errors.Is(err, status.Error(codes.NotFound, "")) + }) + suite.Run("valid request with script execution failure", func() { + suite.commits.On("ByBlockID", mockIdentifier).Return(nil, nil).Once() mockEngine.On("ExecuteScriptAtBlockID", ctx, script, arguments, mockIdentifier). - Return(nil, status.Error(codes.InvalidArgument, "")).Once() + Return(nil, uint64(0), status.Error(codes.InvalidArgument, "")).Once() _, err := handler.ExecuteScriptAtBlockID(ctx, &executionReq) suite.Require().Error(err) errors.Is(err, status.Error(codes.InvalidArgument, "")) @@ -113,44 +122,41 @@ func (suite *Suite) TestGetEventsForBlockIDs() { // setup the events storage mock for i := range blockIDs { - block := unittest.BlockFixture() - block.Header.Height = uint64(i) + block := unittest.BlockFixture( + unittest.Block.WithHeight(uint64(i + 1)), // avoiding edge case of height = 0 (genesis block) + ) id := block.ID() blockIDs[i] = id[:] eventsForBlock := make([]flow.Event, eventsPerBlock) eventMessages := make([]*entities.Event, eventsPerBlock) for j := range eventsForBlock { - e := unittest.EventFixture(flow.EventAccountCreated, uint32(j), uint32(j), unittest.IdentifierFixture(), 0) + e := unittest.EventFixture( + unittest.Event.WithEventType(flow.EventAccountCreated), + unittest.Event.WithTransactionIndex(uint32(j)), + unittest.Event.WithEventIndex(uint32(j)), + ) eventsForBlock[j] = e eventMessages[j] = convert.EventToMessage(e) } // expect one call to lookup result for each block ID - //suite.exeResults.On("ByBlockID", id).Return(nil, nil).Once() suite.commits.On("ByBlockID", id).Return(nil, nil).Once() // expect one call to lookup events for each block ID - suite.events.On("ByBlockIDEventType", id, flow.EventAccountCreated).Return(eventsForBlock, nil).Once() + suite.events.On("ByBlockID", id).Return(eventsForBlock, nil).Once() // expect one call to lookup each block - suite.headers.On("ByBlockID", id).Return(block.Header, nil).Once() + suite.headers.On("ByBlockID", id).Return(block.ToHeader(), nil).Once() // create the expected result for this block expectedResult[i] = &execution.GetEventsForBlockIDsResponse_Result{ BlockId: id[:], - BlockHeight: block.Header.Height, + BlockHeight: block.Height, Events: eventMessages, } } // create the handler - handler := &handler{ - headers: suite.headers, - events: suite.events, - exeResults: suite.exeResults, - transactionResults: suite.txResults, - commits: suite.commits, - chain: flow.Mainnet, - } + handler, _ := suite.defaultHandler() concoctReq := func(errType string, blockIDs [][]byte) *execution.GetEventsForBlockIDsRequest { return &execution.GetEventsForBlockIDsRequest{ @@ -173,9 +179,6 @@ func (suite *Suite) TestGetEventsForBlockIDs() { actualResult := resp.GetResults() suite.Require().ElementsMatch(expectedResult, actualResult) - - // check that appropriate storage calls were made - suite.events.AssertExpectations(suite.T()) }) // failure path - empty even type in the request results in an error @@ -189,9 +192,6 @@ func (suite *Suite) TestGetEventsForBlockIDs() { // check that an error was received suite.Require().Error(err) errors.Is(err, status.Error(codes.InvalidArgument, "")) - - // check that no storage calls was made - suite.events.AssertExpectations(suite.T()) }) // failure path - empty block ids in request results in an error @@ -205,9 +205,6 @@ func (suite *Suite) TestGetEventsForBlockIDs() { // check that an error was received suite.Require().Error(err) errors.Is(err, status.Error(codes.InvalidArgument, "")) - - // check that no storage calls was made - suite.events.AssertExpectations(suite.T()) }) // failure path - non-existent block id in request results in an error @@ -226,9 +223,23 @@ func (suite *Suite) TestGetEventsForBlockIDs() { // check that an error was received suite.Require().Error(err) errors.Is(err, status.Error(codes.NotFound, "")) + }) + + // request for too many blocks - receives a InvalidArgument error + suite.Run("request for too many blocks", func() { + + // update range so it's smaller than list of blockIDs + handler.maxBlockRange = totalBlocks / 2 + + // create a valid API request + req := concoctReq(string(flow.EventAccountCreated), blockIDs) - // check that no storage calls was made - suite.events.AssertExpectations(suite.T()) + // execute the GetEventsForBlockIDs call + _, err := handler.GetEventsForBlockIDs(context.Background(), req) + + // check that an error was received + suite.Require().Error(err) + errors.Is(err, status.Error(codes.InvalidArgument, "")) }) } @@ -242,13 +253,7 @@ func (suite *Suite) TestGetAccountAtBlockID() { Address: serviceAddress, } - mockEngine := new(ingestion.IngestRPC) - - // create the handler - handler := &handler{ - engine: mockEngine, - chain: flow.Mainnet, - } + handler, mockEngine := suite.defaultHandler() createReq := func(id []byte, address []byte) *execution.GetAccountAtBlockIDRequest { return &execution.GetAccountAtBlockIDRequest{ @@ -260,6 +265,8 @@ func (suite *Suite) TestGetAccountAtBlockID() { suite.Run("happy path with valid request", func() { // setup mock expectations + suite.commits.On("ByBlockID", id).Return(nil, nil).Once() + mockEngine.On("GetAccount", mock.Anything, serviceAddress, id).Return(&serviceAccount, nil).Once() req := createReq(id[:], serviceAddress.Bytes()) @@ -272,7 +279,19 @@ func (suite *Suite) TestGetAccountAtBlockID() { suite.Require().NoError(err) suite.Require().Empty( cmp.Diff(expectedAccount, actualAccount, protocmp.Transform())) - mockEngine.AssertExpectations(suite.T()) + }) + + suite.Run("invalid request with unknown block id", func() { + + // setup mock expectations + suite.commits.On("ByBlockID", id).Return(nil, realstorage.ErrNotFound).Once() + + req := createReq(id[:], serviceAddress.Bytes()) + + _, err := handler.GetAccountAtBlockID(context.Background(), req) + + suite.Require().Error(err) + errors.Is(err, status.Error(codes.NotFound, "")) }) suite.Run("invalid request with nil block id", func() { @@ -282,6 +301,7 @@ func (suite *Suite) TestGetAccountAtBlockID() { _, err := handler.GetAccountAtBlockID(context.Background(), req) suite.Require().Error(err) + errors.Is(err, status.Error(codes.InvalidArgument, "")) }) suite.Run("invalid request with nil root address", func() { @@ -291,6 +311,28 @@ func (suite *Suite) TestGetAccountAtBlockID() { _, err := handler.GetAccountAtBlockID(context.Background(), req) suite.Require().Error(err) + errors.Is(err, status.Error(codes.InvalidArgument, "")) + }) + + suite.Run("valid request for unavailable data", func() { + suite.commits.On("ByBlockID", id).Return(nil, nil).Once() + + expectedErr := fmt.Errorf( + "failed to execute script at block (%s): %w (%s). "+ + "this error usually happens if the reference "+ + "block for this script is not set to a recent block.", + id, + state.ErrExecutionStatePruned, + unittest.IdentifierFixture(), + ) + + mockEngine.On("GetAccount", mock.Anything, serviceAddress, id).Return(nil, expectedErr).Once() + + req := createReq(id[:], serviceAddress.Bytes()) + + resp, err := handler.GetAccountAtBlockID(context.Background(), req) + suite.Assert().Nil(resp) + suite.Assert().Equal(codes.OutOfRange, status.Code(err)) }) } @@ -301,13 +343,7 @@ func (suite *Suite) TestGetRegisterAtBlockID() { serviceAddress := flow.Mainnet.Chain().ServiceAddress() validKey := []byte("exists") - mockEngine := new(ingestion.IngestRPC) - - // create the handler - handler := &handler{ - engine: mockEngine, - chain: flow.Mainnet, - } + handler, mockEngine := suite.defaultHandler() createReq := func(id, owner, key []byte) *execution.GetRegisterAtBlockIDRequest { return &execution.GetRegisterAtBlockIDRequest{ @@ -329,7 +365,6 @@ func (suite *Suite) TestGetRegisterAtBlockID() { value := resp.GetValue() suite.Require().NoError(err) suite.Require().True(len(value) > 0) - mockEngine.AssertExpectations(suite.T()) }) suite.Run("invalid request with bad address", func() { @@ -357,7 +392,11 @@ func (suite *Suite) TestGetTransactionResult() { eventsForTx := make([]flow.Event, totalEvents) eventMessages := make([]*entities.Event, totalEvents) for j := range eventsForTx { - e := unittest.EventFixture(flow.EventAccountCreated, uint32(j), uint32(j), unittest.IdentifierFixture(), 0) + e := unittest.EventFixture( + unittest.Event.WithEventType(flow.EventAccountCreated), + unittest.Event.WithTransactionIndex(uint32(j)), + unittest.Event.WithEventIndex(uint32(j)), + ) eventsForTx[j] = e eventMessages[j] = convert.EventToMessage(e) } @@ -365,17 +404,10 @@ func (suite *Suite) TestGetTransactionResult() { // expect a call to lookup events by block ID and transaction ID suite.events.On("ByBlockIDTransactionID", bID, txID).Return(eventsForTx, nil) - // expect a call to lookup each block - suite.headers.On("ByID", block.ID()).Return(&block, true) - // create the handler createHandler := func(txResults *storage.TransactionResults) *handler { - handler := &handler{ - headers: suite.headers, - events: suite.events, - transactionResults: txResults, - chain: flow.Mainnet, - } + handler, _ := suite.defaultHandler() + handler.transactionResults = txResults return handler } @@ -412,7 +444,7 @@ func (suite *Suite) TestGetTransactionResult() { } // expect a call to lookup transaction result by block ID and transaction ID, return a result with no error - txResults := new(storage.TransactionResults) + txResults := storage.NewTransactionResults(suite.T()) txResult := flow.TransactionResult{ TransactionID: flow.Identifier{}, ErrorMessage: "", @@ -432,10 +464,6 @@ func (suite *Suite) TestGetTransactionResult() { // check that all fields in response are as expected assertEqual(expectedResult, actualResult) - - // check that appropriate storage calls were made - suite.events.AssertExpectations(suite.T()) - txResults.AssertExpectations(suite.T()) }) // happy path - valid requests receives all events for the given transaction by index @@ -449,7 +477,7 @@ func (suite *Suite) TestGetTransactionResult() { } // expect a call to lookup transaction result by block ID and transaction ID, return a result with no error - txResults := new(storage.TransactionResults) + txResults := storage.NewTransactionResults(suite.T()) txResult := flow.TransactionResult{ TransactionID: flow.Identifier{}, ErrorMessage: "", @@ -472,10 +500,6 @@ func (suite *Suite) TestGetTransactionResult() { // check that all fields in response are as expected assertEqual(expectedResult, actualResult) - - // check that appropriate storage calls were made - suite.events.AssertExpectations(suite.T()) - txResults.AssertExpectations(suite.T()) }) // happy path - valid requests receives all events and an error for the given transaction @@ -489,7 +513,7 @@ func (suite *Suite) TestGetTransactionResult() { } // setup the storage to return a transaction error - txResults := new(storage.TransactionResults) + txResults := storage.NewTransactionResults(suite.T()) txResult := flow.TransactionResult{ TransactionID: txID, ErrorMessage: "runtime error", @@ -509,10 +533,6 @@ func (suite *Suite) TestGetTransactionResult() { // check that all fields in response are as expected assertEqual(expectedResult, actualResult) - - // check that appropriate storage calls were made - suite.events.AssertExpectations(suite.T()) - txResults.AssertExpectations(suite.T()) }) // happy path - valid requests receives all events and an error for the given transaction @@ -526,7 +546,7 @@ func (suite *Suite) TestGetTransactionResult() { } // setup the storage to return a transaction error - txResults := new(storage.TransactionResults) + txResults := storage.NewTransactionResults(suite.T()) txResult := flow.TransactionResult{ TransactionID: txID, ErrorMessage: "runtime error", @@ -551,8 +571,6 @@ func (suite *Suite) TestGetTransactionResult() { assertEqual(expectedResult, actualResult) // check that appropriate storage calls were made - suite.events.AssertExpectations(suite.T()) - txResults.AssertExpectations(suite.T()) }) // failure path - nil transaction ID in the request results in an error @@ -561,11 +579,7 @@ func (suite *Suite) TestGetTransactionResult() { // create an API request with transaction ID as nil req := concoctReq(bID[:], nil) - // expect a call to lookup transaction result by block ID and transaction ID, return an error - txResults := new(storage.TransactionResults) - - txResults.On("ByBlockIDTransactionID", bID, nil).Return(nil, status.Error(codes.InvalidArgument, "")).Once() - + txResults := storage.NewTransactionResults(suite.T()) handler := createHandler(txResults) _, err := handler.GetTransactionResult(context.Background(), req) @@ -573,9 +587,6 @@ func (suite *Suite) TestGetTransactionResult() { // check that an error was received suite.Require().Error(err) errors.Is(err, status.Error(codes.InvalidArgument, "")) - - // check that no storage calls was made - suite.events.AssertExpectations(suite.T()) }) // failure path - nil block id in the request results in an error @@ -584,10 +595,7 @@ func (suite *Suite) TestGetTransactionResult() { // create an API request with a nil block id req := concoctReq(nil, txID[:]) - txResults := new(storage.TransactionResults) - - txResults.On("ByBlockIDTransactionID", nil, txID).Return(nil, status.Error(codes.InvalidArgument, "")).Once() - + txResults := storage.NewTransactionResults(suite.T()) handler := createHandler(txResults) _, err := handler.GetTransactionResult(context.Background(), req) @@ -595,9 +603,6 @@ func (suite *Suite) TestGetTransactionResult() { // check that an error was received suite.Require().Error(err) errors.Is(err, status.Error(codes.InvalidArgument, "")) - - // check that no storage calls was made - suite.events.AssertExpectations(suite.T()) }) // failure path - nil block id in the index request results in an error @@ -606,10 +611,7 @@ func (suite *Suite) TestGetTransactionResult() { // create an API request with a nil block id req := concoctIndexReq(nil, txIndex) - txResults := new(storage.TransactionResults) - - txResults.On("ByBlockIDTransactionIndex", nil, txID).Return(nil, status.Error(codes.InvalidArgument, "")).Once() - + txResults := storage.NewTransactionResults(suite.T()) handler := createHandler(txResults) _, err := handler.GetTransactionResultByIndex(context.Background(), req) @@ -617,9 +619,6 @@ func (suite *Suite) TestGetTransactionResult() { // check that an error was received suite.Require().Error(err) errors.Is(err, status.Error(codes.InvalidArgument, "")) - - // check that no storage calls was made - suite.events.AssertExpectations(suite.T()) }) // failure path - non-existent transaction ID in request results in an error @@ -631,8 +630,8 @@ func (suite *Suite) TestGetTransactionResult() { req := concoctReq(bID[:], wrongTxID[:]) // expect a storage call for the invalid tx ID but return an error - txResults := new(storage.TransactionResults) - txResults.On("ByBlockIDTransactionID", bID, wrongTxID).Return(nil, status.Error(codes.Internal, "")).Once() + txResults := storage.NewTransactionResults(suite.T()) + txResults.On("ByBlockIDTransactionID", bID, wrongTxID).Return(nil, realstorage.ErrNotFound).Once() handler := createHandler(txResults) @@ -640,10 +639,28 @@ func (suite *Suite) TestGetTransactionResult() { // check that an error was received suite.Require().Error(err) - errors.Is(err, status.Error(codes.Internal, "")) + errors.Is(err, status.Error(codes.NotFound, "")) + }) + + // failure path - non-existent transaction ID in request results in an exception + suite.Run("request with non-existent transaction ID, exception", func() { + + wrongTxID := unittest.IdentifierFixture() + + // create an API request with the invalid transaction ID + req := concoctReq(bID[:], wrongTxID[:]) + + // expect a storage call for the invalid tx ID but return an exception + txResults := storage.NewTransactionResults(suite.T()) + txResults.On("ByBlockIDTransactionID", bID, wrongTxID).Return(nil, errors.New("internal-error")).Once() - // check that one storage call was made - suite.events.AssertExpectations(suite.T()) + handler := createHandler(txResults) + + _, err := handler.GetTransactionResult(context.Background(), req) + + // check that an error was received + suite.Require().Error(err) + errors.Is(err, status.Error(codes.Internal, "")) }) // failure path - non-existent transaction index in request results in an error @@ -655,8 +672,8 @@ func (suite *Suite) TestGetTransactionResult() { req := concoctIndexReq(bID[:], wrongTxIndex) // expect a storage call for the invalid tx ID but return an error - txResults := new(storage.TransactionResults) - txResults.On("ByBlockIDTransactionIndex", bID, wrongTxIndex).Return(nil, status.Error(codes.Internal, "")).Once() + txResults := storage.NewTransactionResults(suite.T()) + txResults.On("ByBlockIDTransactionIndex", bID, wrongTxIndex).Return(nil, realstorage.ErrNotFound).Once() handler := createHandler(txResults) @@ -664,10 +681,28 @@ func (suite *Suite) TestGetTransactionResult() { // check that an error was received suite.Require().Error(err) - errors.Is(err, status.Error(codes.Internal, "")) + errors.Is(err, status.Error(codes.NotFound, "")) + }) - // check that one storage call was made - suite.events.AssertExpectations(suite.T()) + // failure path - non-existent transaction index in request results in an exception + suite.Run("request with non-existent transaction index, exception", func() { + + wrongTxIndex := txIndex + 1 + + // create an API request with the invalid transaction ID + req := concoctIndexReq(bID[:], wrongTxIndex) + + // expect a storage call for the invalid tx ID but return an exception + txResults := storage.NewTransactionResults(suite.T()) + txResults.On("ByBlockIDTransactionIndex", bID, wrongTxIndex).Return(nil, errors.New("internal-error")).Once() + + handler := createHandler(txResults) + + _, err := handler.GetTransactionResultByIndex(context.Background(), req) + + // check that an error was received + suite.Require().Error(err) + errors.Is(err, status.Error(codes.Internal, "")) }) } @@ -692,13 +727,23 @@ func (suite *Suite) TestGetTransactionResultsByBlockID() { convertedEventsForTx2 := make([]*entities.Event, len(eventsForTx2)) for j := 0; j < len(eventsForTx1); j++ { - e := unittest.EventFixture(flow.EventAccountCreated, uint32(0), uint32(j), tx1ID, 0) + e := unittest.EventFixture( + unittest.Event.WithEventType(flow.EventAccountCreated), + unittest.Event.WithTransactionIndex(0), + unittest.Event.WithEventIndex(uint32(j)), + unittest.Event.WithTransactionID(tx1ID), + ) eventsForTx1[j] = e eventsForBlock[j] = e convertedEventsForTx1[j] = convert.EventToMessage(e) } for j := 0; j < len(eventsForTx2); j++ { - e := unittest.EventFixture(flow.EventAccountCreated, uint32(1), uint32(j), tx2ID, 0) + e := unittest.EventFixture( + unittest.Event.WithEventType(flow.EventAccountCreated), + unittest.Event.WithTransactionIndex(1), + unittest.Event.WithEventIndex(uint32(j)), + unittest.Event.WithTransactionID(tx2ID), + ) eventsForTx2[j] = e eventsForBlock[len(eventsForTx1)+j] = e convertedEventsForTx2[j] = convert.EventToMessage(e) @@ -706,12 +751,8 @@ func (suite *Suite) TestGetTransactionResultsByBlockID() { // create the handler createHandler := func(txResults *storage.TransactionResults) *handler { - handler := &handler{ - headers: suite.headers, - events: suite.events, - transactionResults: txResults, - chain: flow.Mainnet, - } + handler, _ := suite.defaultHandler() + handler.transactionResults = txResults return handler } @@ -736,6 +777,8 @@ func (suite *Suite) TestGetTransactionResultsByBlockID() { // happy path - valid requests receives all events for the given transaction suite.Run("happy path with valid events and no transaction error", func() { + suite.commits.On("ByBlockID", bID).Return(nil, nil).Once() + // expect a call to lookup events by block ID and transaction ID suite.events.On("ByBlockID", bID).Return(eventsForBlock, nil).Once() @@ -756,7 +799,7 @@ func (suite *Suite) TestGetTransactionResultsByBlockID() { } // expect a call to lookup transaction result by block ID return a result with no error - txResultsMock := new(storage.TransactionResults) + txResultsMock := storage.NewTransactionResults(suite.T()) txResults := []flow.TransactionResult{ { TransactionID: tx1ID, @@ -782,15 +825,13 @@ func (suite *Suite) TestGetTransactionResultsByBlockID() { // check that all fields in response are as expected assertEqual(expectedResult, actualResult) - - // check that appropriate storage calls were made - suite.events.AssertExpectations(suite.T()) - txResultsMock.AssertExpectations(suite.T()) }) // happy path - valid requests receives all events and an error for the given transaction suite.Run("happy path with valid events and a transaction error", func() { + suite.commits.On("ByBlockID", bID).Return(nil, nil).Once() + // expect a call to lookup events by block ID and transaction ID suite.events.On("ByBlockID", bID).Return(eventsForBlock, nil).Once() @@ -811,7 +852,7 @@ func (suite *Suite) TestGetTransactionResultsByBlockID() { } // expect a call to lookup transaction result by block ID return a result with no error - txResultsMock := new(storage.TransactionResults) + txResultsMock := storage.NewTransactionResults(suite.T()) txResults := []flow.TransactionResult{ { TransactionID: tx1ID, @@ -837,10 +878,6 @@ func (suite *Suite) TestGetTransactionResultsByBlockID() { // check that all fields in response are as expected assertEqual(expectedResult, actualResult) - - // check that appropriate storage calls were made - suite.events.AssertExpectations(suite.T()) - txResultsMock.AssertExpectations(suite.T()) }) // failure path - nil block id in the request results in an error @@ -849,54 +886,487 @@ func (suite *Suite) TestGetTransactionResultsByBlockID() { // create an API request with a nil block id req := concoctReq(nil) - txResults := new(storage.TransactionResults) + txResults := storage.NewTransactionResults(suite.T()) + handler := createHandler(txResults) - txResults.On("ByBlockID", nil).Return(nil, status.Error(codes.InvalidArgument, "")).Once() + _, err := handler.GetTransactionResultsByBlockID(context.Background(), req) - handler := createHandler(txResults) + // check that an error was received + suite.Require().Error(err) + errors.Is(err, status.Error(codes.InvalidArgument, "")) + }) + + // failure path - nonexisting block id in the request results in not found error + suite.Run("request with nonexisting block ID", func() { + + suite.commits.On("ByBlockID", nonexistingBlockID).Return(nil, realstorage.ErrNotFound).Once() + + txResultsMock := storage.NewTransactionResults(suite.T()) + handler := createHandler(txResultsMock) + + // create a valid API request + req := concoctReq(nonexistingBlockID[:]) + // execute the GetTransactionResult call _, err := handler.GetTransactionResultsByBlockID(context.Background(), req) + // check that an error was received + suite.Require().Error(err) + errors.Is(err, status.Error(codes.NotFound, "")) + }) +} + +// TestGetTransactionErrorMessage tests the GetTransactionErrorMessage and GetTransactionErrorMessageByIndex API calls +func (suite *Suite) TestGetTransactionErrorMessage() { + block := unittest.BlockFixture() + tx := unittest.TransactionFixture() + bID := block.ID() + txID := tx.ID() + txIndex := rand.Uint32() + + // create the handler + createHandler := func(txResults *storage.TransactionResults) *handler { + handler, _ := suite.defaultHandler() + handler.transactionResults = txResults + return handler + } + + // concoctReq creates a GetTransactionErrorMessageRequest + concoctReq := func(bID []byte, tID []byte) *execution.GetTransactionErrorMessageRequest { + return &execution.GetTransactionErrorMessageRequest{ + BlockId: bID, + TransactionId: tID, + } + } + + // concoctIndexReq creates a GetTransactionErrorMessageByIndexRequest + concoctIndexReq := func(bID []byte, tIndex uint32) *execution.GetTransactionErrorMessageByIndexRequest { + return &execution.GetTransactionErrorMessageByIndexRequest{ + BlockId: bID, + Index: tIndex, + } + } + + suite.Run("happy path - by tx id - no transaction error", func() { + + // create the expected result + expectedResult := &execution.GetTransactionErrorMessageResponse{ + TransactionId: convert.IdentifierToMessage(txID), + ErrorMessage: "", + } + + // expect a call to lookup transaction result by block ID and transaction ID, return a result with no error + txResults := storage.NewTransactionResults(suite.T()) + txResult := flow.TransactionResult{ + TransactionID: txID, + ErrorMessage: "", + } + txResults.On("ByBlockIDTransactionID", bID, txID).Return(&txResult, nil).Once() + + handler := createHandler(txResults) + + // create a valid API request + req := concoctReq(bID[:], txID[:]) + + // execute the GetTransactionErrorMessage call + actualResult, err := handler.GetTransactionErrorMessage(context.Background(), req) + + // check that a successful response is received + suite.Require().NoError(err) + + // check that all fields in response are as expected + suite.Equal(expectedResult, actualResult) + }) + + suite.Run("happy path - at index - no transaction error", func() { + + // create the expected result + expectedResult := &execution.GetTransactionErrorMessageResponse{ + TransactionId: convert.IdentifierToMessage(txID), + ErrorMessage: "", + } + + // expect a call to lookup transaction result by block ID and transaction ID, return a result with no error + txResults := storage.NewTransactionResults(suite.T()) + txResult := flow.TransactionResult{ + TransactionID: txID, + ErrorMessage: "", + } + txResults.On("ByBlockIDTransactionIndex", bID, txIndex).Return(&txResult, nil).Once() + + handler := createHandler(txResults) + + // create a valid API request + req := concoctIndexReq(bID[:], txIndex) + + // execute the GetTransactionResult call + actualResult, err := handler.GetTransactionErrorMessageByIndex(context.Background(), req) + + // check that a successful response is received + suite.Require().NoError(err) + + // check that all fields in response are as expected + suite.Equal(expectedResult, actualResult) + }) + + suite.Run("happy path - by tx id - transaction error", func() { + + // create the expected result + expectedResult := &execution.GetTransactionErrorMessageResponse{ + TransactionId: convert.IdentifierToMessage(txID), + ErrorMessage: "runtime error", + } + + // setup the storage to return a transaction error + txResults := storage.NewTransactionResults(suite.T()) + txResult := flow.TransactionResult{ + TransactionID: txID, + ErrorMessage: "runtime error", + } + txResults.On("ByBlockIDTransactionID", bID, txID).Return(&txResult, nil).Once() + + handler := createHandler(txResults) + + // create a valid API request + req := concoctReq(bID[:], txID[:]) + + // execute the GetTransactionErrorMessage call + actualResult, err := handler.GetTransactionErrorMessage(context.Background(), req) + + // check that a successful response is received + suite.Require().NoError(err) + + // check that all fields in response are as expected + suite.Equal(expectedResult, actualResult) + }) + + suite.Run("happy path - at index - transaction error", func() { + + // create the expected result + expectedResult := &execution.GetTransactionErrorMessageResponse{ + TransactionId: convert.IdentifierToMessage(txID), + ErrorMessage: "runtime error", + } + + // setup the storage to return a transaction error + txResults := storage.NewTransactionResults(suite.T()) + txResult := flow.TransactionResult{ + TransactionID: txID, + ErrorMessage: "runtime error", + } + txResults.On("ByBlockIDTransactionIndex", bID, txIndex).Return(&txResult, nil).Once() + + handler := createHandler(txResults) + + // create a valid API request + req := concoctIndexReq(bID[:], txIndex) + + // execute the GetTransactionErrorMessageByIndex call + actualResult, err := handler.GetTransactionErrorMessageByIndex(context.Background(), req) + + // check that a successful response is received + suite.Require().NoError(err) + + // check that all fields in response are as expected + suite.Equal(expectedResult, actualResult) + }) + + // failure path - nil transaction ID in the request results in an error + suite.Run("request with nil tx ID", func() { + + // create an API request with transaction ID as nil + req := concoctReq(bID[:], nil) + + txResults := storage.NewTransactionResults(suite.T()) + handler := createHandler(txResults) + + _, err := handler.GetTransactionErrorMessage(context.Background(), req) + // check that an error was received suite.Require().Error(err) errors.Is(err, status.Error(codes.InvalidArgument, "")) + }) + + // failure path - nil block id in the request results in an error + suite.Run("request with nil block ID", func() { - // check that no storage calls was made - suite.events.AssertExpectations(suite.T()) + // create an API request with a nil block id + req := concoctReq(nil, txID[:]) + + txResults := storage.NewTransactionResults(suite.T()) + handler := createHandler(txResults) + + _, err := handler.GetTransactionErrorMessage(context.Background(), req) + + // check that an error was received + suite.Require().Error(err) + errors.Is(err, status.Error(codes.InvalidArgument, "")) }) - // failure path - nonexisting block id in the request results in valid, but empty - suite.Run("request with nonexisting block ID", func() { + // failure path - nil block id in the index request results in an error + suite.Run("index request with nil block ID", func() { - // expect a call to lookup events by block ID and transaction ID - suite.events.On("ByBlockID", nonexistingBlockID).Return(eventsForBlock, nil).Once() + // create an API request with a nil block id + req := concoctIndexReq(nil, txIndex) + + txResults := storage.NewTransactionResults(suite.T()) + handler := createHandler(txResults) + + _, err := handler.GetTransactionErrorMessageByIndex(context.Background(), req) + + // check that an error was received + suite.Require().Error(err) + errors.Is(err, status.Error(codes.InvalidArgument, "")) + }) + + // failure path - non-existent transaction ID in request results in an error + suite.Run("request with non-existent transaction ID", func() { + + wrongTxID := unittest.IdentifierFixture() + + // create an API request with the invalid transaction ID + req := concoctReq(bID[:], wrongTxID[:]) + + // expect a storage call for the invalid tx ID but return an error + txResults := storage.NewTransactionResults(suite.T()) + txResults.On("ByBlockIDTransactionID", bID, wrongTxID).Return(nil, realstorage.ErrNotFound).Once() + + handler := createHandler(txResults) + + _, err := handler.GetTransactionErrorMessage(context.Background(), req) + + // check that an error was received + suite.Require().Error(err) + errors.Is(err, status.Error(codes.NotFound, "")) + }) + + // failure path - non-existent transaction ID in request results in an exception + suite.Run("request with non-existent transaction ID, exception", func() { + + wrongTxID := unittest.IdentifierFixture() + + // create an API request with the invalid transaction ID + req := concoctReq(bID[:], wrongTxID[:]) + + // expect a storage call for the invalid tx ID but return an exception + txResults := storage.NewTransactionResults(suite.T()) + txResults.On("ByBlockIDTransactionID", bID, wrongTxID).Return(nil, errors.New("internal-error")).Once() + + handler := createHandler(txResults) + + _, err := handler.GetTransactionErrorMessage(context.Background(), req) + + // check that an error was received + suite.Require().Error(err) + errors.Is(err, status.Error(codes.Internal, "")) + }) + + // failure path - non-existent transaction index in request results in an error + suite.Run("request with non-existent transaction index", func() { + + wrongTxIndex := txIndex + 1 + + // create an API request with the invalid transaction ID + req := concoctIndexReq(bID[:], wrongTxIndex) + + // expect a storage call for the invalid tx ID but return an error + txResults := storage.NewTransactionResults(suite.T()) + txResults.On("ByBlockIDTransactionIndex", bID, wrongTxIndex).Return(nil, realstorage.ErrNotFound).Once() + + handler := createHandler(txResults) + + _, err := handler.GetTransactionErrorMessageByIndex(context.Background(), req) + + // check that an error was received + suite.Require().Error(err) + errors.Is(err, status.Error(codes.NotFound, "")) + }) + + // failure path - non-existent transaction index in request results in an exception + suite.Run("request with non-existent transaction index, exception", func() { + + wrongTxIndex := txIndex + 1 + + // create an API request with the invalid transaction ID + req := concoctIndexReq(bID[:], wrongTxIndex) + + // expect a storage call for the invalid tx ID but return an exception + txResults := storage.NewTransactionResults(suite.T()) + txResults.On("ByBlockIDTransactionIndex", bID, wrongTxIndex).Return(nil, errors.New("internal-error")).Once() + + handler := createHandler(txResults) + + _, err := handler.GetTransactionErrorMessageByIndex(context.Background(), req) + + // check that an error was received + suite.Require().Error(err) + errors.Is(err, status.Error(codes.Internal, "")) + }) +} + +// TestGetTransactionErrorMessagesByBlockID tests GetTransactionErrorMessagesByBlockID API calls +func (suite *Suite) TestGetTransactionErrorMessagesByBlockID() { + block := unittest.BlockFixture() + tx := unittest.TransactionFixture() + bID := block.ID() + nonexistingBlockID := unittest.IdentifierFixture() + tx1ID := tx.ID() + tx2ID := tx.ID() + tx3ID := tx.ID() + + // create the handler + createHandler := func(txResults *storage.TransactionResults) *handler { + handler, _ := suite.defaultHandler() + handler.transactionResults = txResults + return handler + } + + // concoctReq creates a GetTransactionErrorMessagesByBlockIDRequest + concoctReq := func(bID []byte) *execution.GetTransactionErrorMessagesByBlockIDRequest { + return &execution.GetTransactionErrorMessagesByBlockIDRequest{ + BlockId: bID, + } + } + + // happy path - if no transaction errors are found, an empty list is returned + suite.Run("happy path with no transaction error", func() { + suite.commits.On("ByBlockID", bID).Return(nil, nil).Once() // create the expected result - expectedResult := &execution.GetTransactionResultsResponse{ - TransactionResults: []*execution.GetTransactionResultResponse{}, + expectedResult := &execution.GetTransactionErrorMessagesResponse{ + Results: []*execution.GetTransactionErrorMessagesResponse_Result{}, } // expect a call to lookup transaction result by block ID return a result with no error - txResultsMock := new(storage.TransactionResults) - var txResults []flow.TransactionResult - txResultsMock.On("ByBlockID", nonexistingBlockID).Return(txResults, nil).Once() + txResultsMock := storage.NewTransactionResults(suite.T()) + txResults := []flow.TransactionResult{ + { + TransactionID: tx1ID, + ErrorMessage: "", + }, + { + TransactionID: tx2ID, + ErrorMessage: "", + }, + } + txResultsMock.On("ByBlockID", bID).Return(txResults, nil).Once() handler := createHandler(txResultsMock) // create a valid API request - req := concoctReq(nonexistingBlockID[:]) + req := concoctReq(bID[:]) - // execute the GetTransactionResult call - actualResult, err := handler.GetTransactionResultsByBlockID(context.Background(), req) + // execute the GetTransactionErrorMessagesByBlockID call + actualResult, err := handler.GetTransactionErrorMessagesByBlockID(context.Background(), req) // check that a successful response is received suite.Require().NoError(err) // check that all fields in response are as expected - assertEqual(expectedResult, actualResult) + suite.Assert().ElementsMatch(expectedResult.Results, actualResult.Results) + }) - // check that appropriate storage calls were made - suite.events.AssertExpectations(suite.T()) - txResultsMock.AssertExpectations(suite.T()) + // happy path - valid requests receives error messages for all failed transactions. + suite.Run("happy path with transaction errors", func() { + + suite.commits.On("ByBlockID", bID).Return(nil, nil).Once() + + // create the expected result + expectedResult := &execution.GetTransactionErrorMessagesResponse{ + Results: []*execution.GetTransactionErrorMessagesResponse_Result{ + { + TransactionId: convert.IdentifierToMessage(tx2ID), + Index: 1, + ErrorMessage: "runtime error", + }, + { + TransactionId: convert.IdentifierToMessage(tx3ID), + Index: 2, + ErrorMessage: "runtime error", + }, + }, + } + + // expect a call to lookup transaction result by block ID return a result with no error + txResultsMock := storage.NewTransactionResults(suite.T()) + txResults := []flow.TransactionResult{ + { + TransactionID: tx1ID, + ErrorMessage: "", + }, + { + TransactionID: tx2ID, + ErrorMessage: "runtime error", + }, + { + TransactionID: tx3ID, + ErrorMessage: "runtime error", + }, + } + txResultsMock.On("ByBlockID", bID).Return(txResults, nil).Once() + + handler := createHandler(txResultsMock) + + // create a valid API request + req := concoctReq(bID[:]) + + // execute the GetTransactionErrorMessagesByBlockID call + actualResult, err := handler.GetTransactionErrorMessagesByBlockID(context.Background(), req) + + // check that a successful response is received + suite.Require().NoError(err) + + // check that all fields in response are as expected + suite.Assert().ElementsMatch(expectedResult.Results, actualResult.Results) + }) + + // failure path - nil block id in the request results in an error + suite.Run("request with nil block ID", func() { + + // create an API request with a nil block id + req := concoctReq(nil) + + txResults := storage.NewTransactionResults(suite.T()) + handler := createHandler(txResults) + + _, err := handler.GetTransactionErrorMessagesByBlockID(context.Background(), req) + + // check that an error was received + suite.Require().Error(err) + errors.Is(err, status.Error(codes.InvalidArgument, "")) }) + + // failure path - nonexisting block id in the request results in not found error + suite.Run("request with nonexisting block ID", func() { + + suite.commits.On("ByBlockID", nonexistingBlockID).Return(nil, realstorage.ErrNotFound).Once() + + txResultsMock := storage.NewTransactionResults(suite.T()) + handler := createHandler(txResultsMock) + + // create a valid API request + req := concoctReq(nonexistingBlockID[:]) + + // execute the GetTransactionResult call + _, err := handler.GetTransactionErrorMessagesByBlockID(context.Background(), req) + + // check that an error was received + suite.Require().Error(err) + errors.Is(err, status.Error(codes.NotFound, "")) + }) +} + +func (suite *Suite) defaultHandler() (*handler, *mockEng.ScriptExecutor) { + mockEngine := mockEng.NewScriptExecutor(suite.T()) + return &handler{ + engine: mockEngine, + chain: flow.Mainnet, + headers: suite.headers, + events: suite.events, + exeResults: suite.exeResults, + transactionResults: suite.txResults, + commits: suite.commits, + maxBlockRange: DefaultMaxBlockRange, + maxScriptSize: commonrpc.DefaultAccessMaxRequestSize, + }, mockEngine } diff --git a/engine/execution/scripts/engine.go b/engine/execution/scripts/engine.go new file mode 100644 index 00000000000..409bf23fec8 --- /dev/null +++ b/engine/execution/scripts/engine.go @@ -0,0 +1,97 @@ +package scripts + +import ( + "context" + "fmt" + + "github.com/rs/zerolog" + + "github.com/onflow/flow-go/engine" + "github.com/onflow/flow-go/engine/execution" + "github.com/onflow/flow-go/engine/execution/computation/query" + "github.com/onflow/flow-go/engine/execution/state" + "github.com/onflow/flow-go/model/flow" +) + +type Engine struct { + unit *engine.Unit + log zerolog.Logger + queryExecutor query.Executor + execState state.ScriptExecutionState +} + +var _ execution.ScriptExecutor = (*Engine)(nil) + +func New( + logger zerolog.Logger, + queryExecutor query.Executor, + execState state.ScriptExecutionState, +) *Engine { + return &Engine{ + unit: engine.NewUnit(), + log: logger.With().Str("engine", "scripts").Logger(), + execState: execState, + queryExecutor: queryExecutor, + } +} + +func (e *Engine) Ready() <-chan struct{} { + return e.unit.Ready() +} + +func (e *Engine) Done() <-chan struct{} { + return e.unit.Done() +} + +func (e *Engine) ExecuteScriptAtBlockID( + ctx context.Context, + script []byte, + arguments [][]byte, + blockID flow.Identifier, +) ([]byte, uint64, error) { + + blockSnapshot, header, err := e.execState.CreateStorageSnapshot(blockID) + if err != nil { + return nil, 0, fmt.Errorf("failed to create storage snapshot: %w", err) + } + + return e.queryExecutor.ExecuteScript( + ctx, + script, + arguments, + header, + blockSnapshot) +} + +func (e *Engine) GetRegisterAtBlockID( + ctx context.Context, + owner, key []byte, + blockID flow.Identifier, +) ([]byte, error) { + + blockSnapshot, _, err := e.execState.CreateStorageSnapshot(blockID) + if err != nil { + return nil, fmt.Errorf("failed to create storage snapshot: %w", err) + } + + id := flow.NewRegisterID(flow.BytesToAddress(owner), string(key)) + data, err := blockSnapshot.Get(id) + if err != nil { + return nil, fmt.Errorf("failed to get the register (%s): %w", id, err) + } + + return data, nil +} + +func (e *Engine) GetAccount( + ctx context.Context, + addr flow.Address, + blockID flow.Identifier, +) (*flow.Account, error) { + blockSnapshot, header, err := e.execState.CreateStorageSnapshot(blockID) + if err != nil { + return nil, fmt.Errorf("failed to create storage snapshot: %w", err) + } + + return e.queryExecutor.GetAccount(ctx, addr, header, blockSnapshot) +} diff --git a/engine/execution/state/bootstrap/bootstrap.go b/engine/execution/state/bootstrap/bootstrap.go index 9f6f190c75b..19305dc257d 100644 --- a/engine/execution/state/bootstrap/bootstrap.go +++ b/engine/execution/state/bootstrap/bootstrap.go @@ -1,19 +1,23 @@ package bootstrap import ( + "context" "errors" "fmt" - "github.com/dgraph-io/badger/v2" + "github.com/cockroachdb/pebble/v2" + "github.com/jordanschalm/lockctx" "github.com/rs/zerolog" "github.com/onflow/flow-go/engine/execution/state" + "github.com/onflow/flow-go/engine/execution/storehouse" "github.com/onflow/flow-go/fvm" "github.com/onflow/flow-go/fvm/storage/snapshot" "github.com/onflow/flow-go/ledger" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/storage" - "github.com/onflow/flow-go/storage/badger/operation" + "github.com/onflow/flow-go/storage/operation" + pStorage "github.com/onflow/flow-go/storage/pebble" ) // an increased limit for bootstrapping @@ -36,9 +40,10 @@ func (b *Bootstrapper) BootstrapLedger( chain flow.Chain, opts ...fvm.BootstrapProcedureOption, ) (flow.StateCommitment, error) { + startCommit := flow.StateCommitment(ledger.InitialState()) storageSnapshot := state.NewLedgerStorageSnapshot( ledger, - flow.StateCommitment(ledger.InitialState())) + startCommit) vm := fvm.NewVirtualMachine() @@ -58,10 +63,11 @@ func (b *Bootstrapper) BootstrapLedger( return flow.DummyStateCommitment, err } - newStateCommitment, _, err := state.CommitDelta( + newStateCommitment, _, _, err := state.CommitDelta( ledger, executionSnapshot, - flow.StateCommitment(ledger.InitialState())) + storehouse.NewExecutingBlockSnapshot(storageSnapshot, startCommit), + ) if err != nil { return flow.DummyStateCommitment, err } @@ -71,18 +77,10 @@ func (b *Bootstrapper) BootstrapLedger( // IsBootstrapped returns whether the execution database has been bootstrapped, if yes, returns the // root statecommitment -func (b *Bootstrapper) IsBootstrapped(db *badger.DB) (flow.StateCommitment, bool, error) { +func (b *Bootstrapper) IsBootstrapped(db storage.DB) (flow.StateCommitment, bool, error) { var commit flow.StateCommitment - err := db.View(func(txn *badger.Txn) error { - err := operation.LookupStateCommitment(flow.ZeroID, &commit)(txn) - if err != nil { - return fmt.Errorf("could not lookup state commitment: %w", err) - } - - return nil - }) - + err := operation.LookupStateCommitment(db.Reader(), flow.ZeroID, &commit) if errors.Is(err, storage.ErrNotFound) { return flow.DummyStateCommitment, false, nil } @@ -94,37 +92,66 @@ func (b *Bootstrapper) IsBootstrapped(db *badger.DB) (flow.StateCommitment, bool return commit, true, nil } -func (b *Bootstrapper) BootstrapExecutionDatabase(db *badger.DB, commit flow.StateCommitment, genesis *flow.Header) error { +func (b *Bootstrapper) BootstrapExecutionDatabase( + manager lockctx.Manager, + db storage.DB, + rootSeal *flow.Seal, +) error { - err := operation.RetryOnConflict(db.Update, func(txn *badger.Txn) error { + lctx := manager.NewContext() + defer lctx.Release() + err := lctx.AcquireLock(storage.LockInsertOwnReceipt) + if err != nil { + return err + } - err := operation.InsertExecutedBlock(genesis.ID())(txn) + commit := rootSeal.FinalState + return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + w := rw.Writer() + err := operation.UpdateExecutedBlock(w, rootSeal.BlockID) if err != nil { return fmt.Errorf("could not index initial genesis execution block: %w", err) } - err = operation.IndexStateCommitment(flow.ZeroID, commit)(txn) + err = operation.IndexExecutionResult(w, rootSeal.BlockID, rootSeal.ResultID) + if err != nil { + return fmt.Errorf("could not index result for root result: %w", err) + } + + err = operation.IndexStateCommitment(lctx, rw, flow.ZeroID, commit) if err != nil { return fmt.Errorf("could not index void state commitment: %w", err) } - err = operation.IndexStateCommitment(genesis.ID(), commit)(txn) + err = operation.IndexStateCommitment(lctx, rw, rootSeal.BlockID, commit) if err != nil { return fmt.Errorf("could not index genesis state commitment: %w", err) } snapshots := make([]*snapshot.ExecutionSnapshot, 0) - err = operation.InsertExecutionStateInteractions(genesis.ID(), snapshots)(txn) + err = operation.InsertExecutionStateInteractions(w, rootSeal.BlockID, snapshots) if err != nil { return fmt.Errorf("could not bootstrap execution state interactions: %w", err) } return nil }) +} + +func ImportRegistersFromCheckpoint(logger zerolog.Logger, checkpointFile string, checkpointHeight uint64, checkpointRootHash ledger.RootHash, pdb *pebble.DB, workerCount int) error { + logger.Info().Msgf("importing registers from checkpoint file %s at height %d with root hash: %v", checkpointFile, checkpointHeight, checkpointRootHash) + bootstrap, err := pStorage.NewRegisterBootstrap(pdb, checkpointFile, checkpointHeight, checkpointRootHash, logger) if err != nil { - return err + return fmt.Errorf("could not create registers bootstrapper: %w", err) + } + + // TODO: find a way to hook a context up to this to allow a graceful shutdown + err = bootstrap.IndexCheckpointFile(context.Background(), workerCount) + if err != nil { + return fmt.Errorf("could not load checkpoint file: %w", err) } + logger.Info().Msgf("finish importing registers from checkpoint file %s at height %d", checkpointFile, checkpointHeight) return nil } diff --git a/engine/execution/state/bootstrap/bootstrap_test.go b/engine/execution/state/bootstrap/bootstrap_test.go index 8e66b769423..f85e9a79e76 100644 --- a/engine/execution/state/bootstrap/bootstrap_test.go +++ b/engine/execution/state/bootstrap/bootstrap_test.go @@ -2,13 +2,17 @@ package bootstrap import ( "encoding/hex" + "fmt" "testing" "github.com/rs/zerolog" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "github.com/onflow/flow-go/engine/execution/state" + "github.com/onflow/flow-go/engine/execution/storehouse" "github.com/onflow/flow-go/fvm" + "github.com/onflow/flow-go/fvm/systemcontracts" completeLedger "github.com/onflow/flow-go/ledger/complete" "github.com/onflow/flow-go/ledger/complete/wal/fixtures" "github.com/onflow/flow-go/model/flow" @@ -42,7 +46,7 @@ func TestBootstrapLedger(t *testing.T) { expectedStateCommitment := unittest.GenesisStateCommitment - if !assert.Equal(t, expectedStateCommitment, stateCommitment) { + if !assert.Equal(t, fmt.Sprint(expectedStateCommitment), fmt.Sprint(stateCommitment)) { t.Logf( "Incorrect state commitment: got %s, expected %s", hex.EncodeToString(stateCommitment[:]), @@ -53,7 +57,7 @@ func TestBootstrapLedger(t *testing.T) { } func TestBootstrapLedger_ZeroTokenSupply(t *testing.T) { - expectedStateCommitmentBytes, _ := hex.DecodeString("e3ef7950c868f03880e489aa4b1d84b3916a20a28d2a1dfc88292cad93153ddb") + expectedStateCommitmentBytes, _ := hex.DecodeString("882b0d6e4b69733234018d359c6b97d252ab1a0a521e1097ed65d69bd1357251") expectedStateCommitment, err := flow.ToStateCommitment(expectedStateCommitmentBytes) require.NoError(t, err) @@ -79,7 +83,111 @@ func TestBootstrapLedger_ZeroTokenSupply(t *testing.T) { ) require.NoError(t, err) - if !assert.Equal(t, expectedStateCommitment, stateCommitment) { + if !assert.Equal(t, fmt.Sprint(expectedStateCommitment), fmt.Sprint(stateCommitment)) { + t.Logf( + "Incorrect state commitment: got %s, expected %s", + hex.EncodeToString(stateCommitment[:]), + hex.EncodeToString(expectedStateCommitment[:]), + ) + } + }) +} + +// TestBootstrapLedger_EmptyTransaction bootstraps a ledger with: +// - transaction fees +// - storage fees +// - minimum account balance +// - initial token supply +// Then runs an empty transaction to trigger the bookkeeping parts of a transaction: +// - payer has balance to cover the transaction fees check +// - account storage check +// - transaction fee deduction +// This tests that the state commitment has not changed for the bookkeeping parts of the transaction. +func TestBootstrapLedger_EmptyTransaction(t *testing.T) { + expectedStateCommitmentBytes, _ := hex.DecodeString("677a70ac17338286e65c6e2bef0a5eff8495ba10226e44d662386ccf358d3140") + expectedStateCommitment, err := flow.ToStateCommitment(expectedStateCommitmentBytes) + require.NoError(t, err) + + unittest.RunWithTempDir(t, func(dbDir string) { + + chain := flow.Mainnet.Chain() + + metricsCollector := &metrics.NoopCollector{} + wal := &fixtures.NoopWAL{} + ls, err := completeLedger.NewLedger(wal, 100, metricsCollector, zerolog.Nop(), completeLedger.DefaultPathFinderVersion) + require.NoError(t, err) + compactor := fixtures.NewNoopCompactor(ls) + <-compactor.Ready() + defer func() { + <-ls.Done() + <-compactor.Done() + }() + + stateCommitment, err := NewBootstrapper(zerolog.Nop()).BootstrapLedger( + ls, + unittest.ServiceAccountPublicKey, + chain, + fvm.WithInitialTokenSupply(unittest.GenesisTokenSupply), + fvm.WithAccountCreationFee(fvm.DefaultAccountCreationFee), + fvm.WithMinimumStorageReservation(fvm.DefaultMinimumStorageReservation), + fvm.WithTransactionFee(fvm.DefaultTransactionFees), + fvm.WithStorageMBPerFLOW(fvm.DefaultStorageMBPerFLOW), + ) + require.NoError(t, err) + + storageSnapshot := state.NewLedgerStorageSnapshot(ls, stateCommitment) + vm := fvm.NewVirtualMachine() + + ctx := fvm.NewContext( + fvm.WithChain(chain), + fvm.WithTransactionFeesEnabled(true), + fvm.WithAccountStorageLimit(true), + fvm.WithSequenceNumberCheckAndIncrementEnabled(false), + fvm.WithAuthorizationChecksEnabled(false), + ) + + sc := systemcontracts.SystemContractsForChain(chain.ChainID()) + + // create an empty transaction + txBody, err := flow.NewTransactionBodyBuilder(). + SetScript([]byte(` + transaction() { + prepare() {} + execute {} + } + `)). + SetProposalKey(sc.FlowServiceAccount.Address, 0, 0). + SetPayer(sc.FlowServiceAccount.Address). + Build() + require.NoError(t, err) + + executionSnapshot, output, err := vm.Run(ctx, fvm.Transaction(txBody, 0), storageSnapshot) + require.NoError(t, err) + require.NoError(t, output.Err) + + // make sure we have the expected events + // all of these events are emitted by the fee deduction + eventNames := make([]string, 0, len(output.Events)) + for _, event := range output.Events { + eventNames = append(eventNames, string(event.Type)) + } + expectedEventNames := []string{ + "A.1654653399040a61.FlowToken.TokensWithdrawn", + "A.f233dcee88fe0abe.FungibleToken.Withdrawn", + "A.1654653399040a61.FlowToken.TokensDeposited", + "A.f233dcee88fe0abe.FungibleToken.Deposited", + "A.f919ee77447b7497.FlowFees.FeesDeducted", + } + require.Equal(t, expectedEventNames, eventNames) + + stateCommitment, _, _, err = state.CommitDelta( + ls, + executionSnapshot, + storehouse.NewExecutingBlockSnapshot(storageSnapshot, stateCommitment), + ) + require.NoError(t, err) + + if !assert.Equal(t, fmt.Sprint(expectedStateCommitment), fmt.Sprint(stateCommitment)) { t.Logf( "Incorrect state commitment: got %s, expected %s", hex.EncodeToString(stateCommitment[:]), diff --git a/engine/execution/state/mock/execution_state.go b/engine/execution/state/mock/execution_state.go index f847632cd94..5199ca35e26 100644 --- a/engine/execution/state/mock/execution_state.go +++ b/engine/execution/state/mock/execution_state.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mock @@ -22,6 +22,10 @@ type ExecutionState struct { func (_m *ExecutionState) ChunkDataPackByChunkID(_a0 flow.Identifier) (*flow.ChunkDataPack, error) { ret := _m.Called(_a0) + if len(ret) == 0 { + panic("no return value specified for ChunkDataPackByChunkID") + } + var r0 *flow.ChunkDataPack var r1 error if rf, ok := ret.Get(0).(func(flow.Identifier) (*flow.ChunkDataPack, error)); ok { @@ -44,10 +48,53 @@ func (_m *ExecutionState) ChunkDataPackByChunkID(_a0 flow.Identifier) (*flow.Chu return r0, r1 } +// CreateStorageSnapshot provides a mock function with given fields: blockID +func (_m *ExecutionState) CreateStorageSnapshot(blockID flow.Identifier) (snapshot.StorageSnapshot, *flow.Header, error) { + ret := _m.Called(blockID) + + if len(ret) == 0 { + panic("no return value specified for CreateStorageSnapshot") + } + + var r0 snapshot.StorageSnapshot + var r1 *flow.Header + var r2 error + if rf, ok := ret.Get(0).(func(flow.Identifier) (snapshot.StorageSnapshot, *flow.Header, error)); ok { + return rf(blockID) + } + if rf, ok := ret.Get(0).(func(flow.Identifier) snapshot.StorageSnapshot); ok { + r0 = rf(blockID) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(snapshot.StorageSnapshot) + } + } + + if rf, ok := ret.Get(1).(func(flow.Identifier) *flow.Header); ok { + r1 = rf(blockID) + } else { + if ret.Get(1) != nil { + r1 = ret.Get(1).(*flow.Header) + } + } + + if rf, ok := ret.Get(2).(func(flow.Identifier) error); ok { + r2 = rf(blockID) + } else { + r2 = ret.Error(2) + } + + return r0, r1, r2 +} + // GetExecutionResultID provides a mock function with given fields: _a0, _a1 func (_m *ExecutionState) GetExecutionResultID(_a0 context.Context, _a1 flow.Identifier) (flow.Identifier, error) { ret := _m.Called(_a0, _a1) + if len(ret) == 0 { + panic("no return value specified for GetExecutionResultID") + } + var r0 flow.Identifier var r1 error if rf, ok := ret.Get(0).(func(context.Context, flow.Identifier) (flow.Identifier, error)); ok { @@ -70,10 +117,42 @@ func (_m *ExecutionState) GetExecutionResultID(_a0 context.Context, _a1 flow.Ide return r0, r1 } -// GetHighestExecutedBlockID provides a mock function with given fields: _a0 -func (_m *ExecutionState) GetHighestExecutedBlockID(_a0 context.Context) (uint64, flow.Identifier, error) { +// GetHighestFinalizedExecuted provides a mock function with no fields +func (_m *ExecutionState) GetHighestFinalizedExecuted() (uint64, error) { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for GetHighestFinalizedExecuted") + } + + var r0 uint64 + var r1 error + if rf, ok := ret.Get(0).(func() (uint64, error)); ok { + return rf() + } + if rf, ok := ret.Get(0).(func() uint64); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(uint64) + } + + if rf, ok := ret.Get(1).(func() error); ok { + r1 = rf() + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetLastExecutedBlockID provides a mock function with given fields: _a0 +func (_m *ExecutionState) GetLastExecutedBlockID(_a0 context.Context) (uint64, flow.Identifier, error) { ret := _m.Called(_a0) + if len(ret) == 0 { + panic("no return value specified for GetLastExecutedBlockID") + } + var r0 uint64 var r1 flow.Identifier var r2 error @@ -103,27 +182,45 @@ func (_m *ExecutionState) GetHighestExecutedBlockID(_a0 context.Context) (uint64 return r0, r1, r2 } -// HasState provides a mock function with given fields: _a0 -func (_m *ExecutionState) HasState(_a0 flow.StateCommitment) bool { - ret := _m.Called(_a0) +// IsBlockExecuted provides a mock function with given fields: height, blockID +func (_m *ExecutionState) IsBlockExecuted(height uint64, blockID flow.Identifier) (bool, error) { + ret := _m.Called(height, blockID) + + if len(ret) == 0 { + panic("no return value specified for IsBlockExecuted") + } var r0 bool - if rf, ok := ret.Get(0).(func(flow.StateCommitment) bool); ok { - r0 = rf(_a0) + var r1 error + if rf, ok := ret.Get(0).(func(uint64, flow.Identifier) (bool, error)); ok { + return rf(height, blockID) + } + if rf, ok := ret.Get(0).(func(uint64, flow.Identifier) bool); ok { + r0 = rf(height, blockID) } else { r0 = ret.Get(0).(bool) } - return r0 + if rf, ok := ret.Get(1).(func(uint64, flow.Identifier) error); ok { + r1 = rf(height, blockID) + } else { + r1 = ret.Error(1) + } + + return r0, r1 } -// NewStorageSnapshot provides a mock function with given fields: _a0 -func (_m *ExecutionState) NewStorageSnapshot(_a0 flow.StateCommitment) snapshot.StorageSnapshot { - ret := _m.Called(_a0) +// NewStorageSnapshot provides a mock function with given fields: commit, blockID, height +func (_m *ExecutionState) NewStorageSnapshot(commit flow.StateCommitment, blockID flow.Identifier, height uint64) snapshot.StorageSnapshot { + ret := _m.Called(commit, blockID, height) + + if len(ret) == 0 { + panic("no return value specified for NewStorageSnapshot") + } var r0 snapshot.StorageSnapshot - if rf, ok := ret.Get(0).(func(flow.StateCommitment) snapshot.StorageSnapshot); ok { - r0 = rf(_a0) + if rf, ok := ret.Get(0).(func(flow.StateCommitment, flow.Identifier, uint64) snapshot.StorageSnapshot); ok { + r0 = rf(commit, blockID, height) } else { if ret.Get(0) != nil { r0 = ret.Get(0).(snapshot.StorageSnapshot) @@ -137,6 +234,10 @@ func (_m *ExecutionState) NewStorageSnapshot(_a0 flow.StateCommitment) snapshot. func (_m *ExecutionState) SaveExecutionResults(ctx context.Context, result *execution.ComputationResult) error { ret := _m.Called(ctx, result) + if len(ret) == 0 { + panic("no return value specified for SaveExecutionResults") + } + var r0 error if rf, ok := ret.Get(0).(func(context.Context, *execution.ComputationResult) error); ok { r0 = rf(ctx, result) @@ -147,25 +248,29 @@ func (_m *ExecutionState) SaveExecutionResults(ctx context.Context, result *exec return r0 } -// StateCommitmentByBlockID provides a mock function with given fields: _a0, _a1 -func (_m *ExecutionState) StateCommitmentByBlockID(_a0 context.Context, _a1 flow.Identifier) (flow.StateCommitment, error) { - ret := _m.Called(_a0, _a1) +// StateCommitmentByBlockID provides a mock function with given fields: _a0 +func (_m *ExecutionState) StateCommitmentByBlockID(_a0 flow.Identifier) (flow.StateCommitment, error) { + ret := _m.Called(_a0) + + if len(ret) == 0 { + panic("no return value specified for StateCommitmentByBlockID") + } var r0 flow.StateCommitment var r1 error - if rf, ok := ret.Get(0).(func(context.Context, flow.Identifier) (flow.StateCommitment, error)); ok { - return rf(_a0, _a1) + if rf, ok := ret.Get(0).(func(flow.Identifier) (flow.StateCommitment, error)); ok { + return rf(_a0) } - if rf, ok := ret.Get(0).(func(context.Context, flow.Identifier) flow.StateCommitment); ok { - r0 = rf(_a0, _a1) + if rf, ok := ret.Get(0).(func(flow.Identifier) flow.StateCommitment); ok { + r0 = rf(_a0) } else { if ret.Get(0) != nil { r0 = ret.Get(0).(flow.StateCommitment) } } - if rf, ok := ret.Get(1).(func(context.Context, flow.Identifier) error); ok { - r1 = rf(_a0, _a1) + if rf, ok := ret.Get(1).(func(flow.Identifier) error); ok { + r1 = rf(_a0) } else { r1 = ret.Error(1) } @@ -173,12 +278,16 @@ func (_m *ExecutionState) StateCommitmentByBlockID(_a0 context.Context, _a1 flow return r0, r1 } -// UpdateHighestExecutedBlockIfHigher provides a mock function with given fields: _a0, _a1 -func (_m *ExecutionState) UpdateHighestExecutedBlockIfHigher(_a0 context.Context, _a1 *flow.Header) error { +// UpdateLastExecutedBlock provides a mock function with given fields: _a0, _a1 +func (_m *ExecutionState) UpdateLastExecutedBlock(_a0 context.Context, _a1 flow.Identifier) error { ret := _m.Called(_a0, _a1) + if len(ret) == 0 { + panic("no return value specified for UpdateLastExecutedBlock") + } + var r0 error - if rf, ok := ret.Get(0).(func(context.Context, *flow.Header) error); ok { + if rf, ok := ret.Get(0).(func(context.Context, flow.Identifier) error); ok { r0 = rf(_a0, _a1) } else { r0 = ret.Error(0) @@ -187,13 +296,12 @@ func (_m *ExecutionState) UpdateHighestExecutedBlockIfHigher(_a0 context.Context return r0 } -type mockConstructorTestingTNewExecutionState interface { +// NewExecutionState creates a new instance of ExecutionState. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewExecutionState(t interface { mock.TestingT Cleanup(func()) -} - -// NewExecutionState creates a new instance of ExecutionState. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewExecutionState(t mockConstructorTestingTNewExecutionState) *ExecutionState { +}) *ExecutionState { mock := &ExecutionState{} mock.Mock.Test(t) diff --git a/engine/execution/state/mock/finalized_execution_state.go b/engine/execution/state/mock/finalized_execution_state.go new file mode 100644 index 00000000000..99826ff92af --- /dev/null +++ b/engine/execution/state/mock/finalized_execution_state.go @@ -0,0 +1,52 @@ +// Code generated by mockery. DO NOT EDIT. + +package mock + +import mock "github.com/stretchr/testify/mock" + +// FinalizedExecutionState is an autogenerated mock type for the FinalizedExecutionState type +type FinalizedExecutionState struct { + mock.Mock +} + +// GetHighestFinalizedExecuted provides a mock function with no fields +func (_m *FinalizedExecutionState) GetHighestFinalizedExecuted() (uint64, error) { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for GetHighestFinalizedExecuted") + } + + var r0 uint64 + var r1 error + if rf, ok := ret.Get(0).(func() (uint64, error)); ok { + return rf() + } + if rf, ok := ret.Get(0).(func() uint64); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(uint64) + } + + if rf, ok := ret.Get(1).(func() error); ok { + r1 = rf() + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// NewFinalizedExecutionState creates a new instance of FinalizedExecutionState. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewFinalizedExecutionState(t interface { + mock.TestingT + Cleanup(func()) +}) *FinalizedExecutionState { + mock := &FinalizedExecutionState{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/engine/execution/state/mock/read_only_execution_state.go b/engine/execution/state/mock/read_only_execution_state.go index 24f230ed316..8ebe4f18c33 100644 --- a/engine/execution/state/mock/read_only_execution_state.go +++ b/engine/execution/state/mock/read_only_execution_state.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mock @@ -20,6 +20,10 @@ type ReadOnlyExecutionState struct { func (_m *ReadOnlyExecutionState) ChunkDataPackByChunkID(_a0 flow.Identifier) (*flow.ChunkDataPack, error) { ret := _m.Called(_a0) + if len(ret) == 0 { + panic("no return value specified for ChunkDataPackByChunkID") + } + var r0 *flow.ChunkDataPack var r1 error if rf, ok := ret.Get(0).(func(flow.Identifier) (*flow.ChunkDataPack, error)); ok { @@ -42,10 +46,53 @@ func (_m *ReadOnlyExecutionState) ChunkDataPackByChunkID(_a0 flow.Identifier) (* return r0, r1 } +// CreateStorageSnapshot provides a mock function with given fields: blockID +func (_m *ReadOnlyExecutionState) CreateStorageSnapshot(blockID flow.Identifier) (snapshot.StorageSnapshot, *flow.Header, error) { + ret := _m.Called(blockID) + + if len(ret) == 0 { + panic("no return value specified for CreateStorageSnapshot") + } + + var r0 snapshot.StorageSnapshot + var r1 *flow.Header + var r2 error + if rf, ok := ret.Get(0).(func(flow.Identifier) (snapshot.StorageSnapshot, *flow.Header, error)); ok { + return rf(blockID) + } + if rf, ok := ret.Get(0).(func(flow.Identifier) snapshot.StorageSnapshot); ok { + r0 = rf(blockID) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(snapshot.StorageSnapshot) + } + } + + if rf, ok := ret.Get(1).(func(flow.Identifier) *flow.Header); ok { + r1 = rf(blockID) + } else { + if ret.Get(1) != nil { + r1 = ret.Get(1).(*flow.Header) + } + } + + if rf, ok := ret.Get(2).(func(flow.Identifier) error); ok { + r2 = rf(blockID) + } else { + r2 = ret.Error(2) + } + + return r0, r1, r2 +} + // GetExecutionResultID provides a mock function with given fields: _a0, _a1 func (_m *ReadOnlyExecutionState) GetExecutionResultID(_a0 context.Context, _a1 flow.Identifier) (flow.Identifier, error) { ret := _m.Called(_a0, _a1) + if len(ret) == 0 { + panic("no return value specified for GetExecutionResultID") + } + var r0 flow.Identifier var r1 error if rf, ok := ret.Get(0).(func(context.Context, flow.Identifier) (flow.Identifier, error)); ok { @@ -68,10 +115,14 @@ func (_m *ReadOnlyExecutionState) GetExecutionResultID(_a0 context.Context, _a1 return r0, r1 } -// GetHighestExecutedBlockID provides a mock function with given fields: _a0 -func (_m *ReadOnlyExecutionState) GetHighestExecutedBlockID(_a0 context.Context) (uint64, flow.Identifier, error) { +// GetLastExecutedBlockID provides a mock function with given fields: _a0 +func (_m *ReadOnlyExecutionState) GetLastExecutedBlockID(_a0 context.Context) (uint64, flow.Identifier, error) { ret := _m.Called(_a0) + if len(ret) == 0 { + panic("no return value specified for GetLastExecutedBlockID") + } + var r0 uint64 var r1 flow.Identifier var r2 error @@ -101,27 +152,45 @@ func (_m *ReadOnlyExecutionState) GetHighestExecutedBlockID(_a0 context.Context) return r0, r1, r2 } -// HasState provides a mock function with given fields: _a0 -func (_m *ReadOnlyExecutionState) HasState(_a0 flow.StateCommitment) bool { - ret := _m.Called(_a0) +// IsBlockExecuted provides a mock function with given fields: height, blockID +func (_m *ReadOnlyExecutionState) IsBlockExecuted(height uint64, blockID flow.Identifier) (bool, error) { + ret := _m.Called(height, blockID) + + if len(ret) == 0 { + panic("no return value specified for IsBlockExecuted") + } var r0 bool - if rf, ok := ret.Get(0).(func(flow.StateCommitment) bool); ok { - r0 = rf(_a0) + var r1 error + if rf, ok := ret.Get(0).(func(uint64, flow.Identifier) (bool, error)); ok { + return rf(height, blockID) + } + if rf, ok := ret.Get(0).(func(uint64, flow.Identifier) bool); ok { + r0 = rf(height, blockID) } else { r0 = ret.Get(0).(bool) } - return r0 + if rf, ok := ret.Get(1).(func(uint64, flow.Identifier) error); ok { + r1 = rf(height, blockID) + } else { + r1 = ret.Error(1) + } + + return r0, r1 } -// NewStorageSnapshot provides a mock function with given fields: _a0 -func (_m *ReadOnlyExecutionState) NewStorageSnapshot(_a0 flow.StateCommitment) snapshot.StorageSnapshot { - ret := _m.Called(_a0) +// NewStorageSnapshot provides a mock function with given fields: commit, blockID, height +func (_m *ReadOnlyExecutionState) NewStorageSnapshot(commit flow.StateCommitment, blockID flow.Identifier, height uint64) snapshot.StorageSnapshot { + ret := _m.Called(commit, blockID, height) + + if len(ret) == 0 { + panic("no return value specified for NewStorageSnapshot") + } var r0 snapshot.StorageSnapshot - if rf, ok := ret.Get(0).(func(flow.StateCommitment) snapshot.StorageSnapshot); ok { - r0 = rf(_a0) + if rf, ok := ret.Get(0).(func(flow.StateCommitment, flow.Identifier, uint64) snapshot.StorageSnapshot); ok { + r0 = rf(commit, blockID, height) } else { if ret.Get(0) != nil { r0 = ret.Get(0).(snapshot.StorageSnapshot) @@ -131,25 +200,29 @@ func (_m *ReadOnlyExecutionState) NewStorageSnapshot(_a0 flow.StateCommitment) s return r0 } -// StateCommitmentByBlockID provides a mock function with given fields: _a0, _a1 -func (_m *ReadOnlyExecutionState) StateCommitmentByBlockID(_a0 context.Context, _a1 flow.Identifier) (flow.StateCommitment, error) { - ret := _m.Called(_a0, _a1) +// StateCommitmentByBlockID provides a mock function with given fields: _a0 +func (_m *ReadOnlyExecutionState) StateCommitmentByBlockID(_a0 flow.Identifier) (flow.StateCommitment, error) { + ret := _m.Called(_a0) + + if len(ret) == 0 { + panic("no return value specified for StateCommitmentByBlockID") + } var r0 flow.StateCommitment var r1 error - if rf, ok := ret.Get(0).(func(context.Context, flow.Identifier) (flow.StateCommitment, error)); ok { - return rf(_a0, _a1) + if rf, ok := ret.Get(0).(func(flow.Identifier) (flow.StateCommitment, error)); ok { + return rf(_a0) } - if rf, ok := ret.Get(0).(func(context.Context, flow.Identifier) flow.StateCommitment); ok { - r0 = rf(_a0, _a1) + if rf, ok := ret.Get(0).(func(flow.Identifier) flow.StateCommitment); ok { + r0 = rf(_a0) } else { if ret.Get(0) != nil { r0 = ret.Get(0).(flow.StateCommitment) } } - if rf, ok := ret.Get(1).(func(context.Context, flow.Identifier) error); ok { - r1 = rf(_a0, _a1) + if rf, ok := ret.Get(1).(func(flow.Identifier) error); ok { + r1 = rf(_a0) } else { r1 = ret.Error(1) } @@ -157,13 +230,12 @@ func (_m *ReadOnlyExecutionState) StateCommitmentByBlockID(_a0 context.Context, return r0, r1 } -type mockConstructorTestingTNewReadOnlyExecutionState interface { +// NewReadOnlyExecutionState creates a new instance of ReadOnlyExecutionState. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewReadOnlyExecutionState(t interface { mock.TestingT Cleanup(func()) -} - -// NewReadOnlyExecutionState creates a new instance of ReadOnlyExecutionState. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewReadOnlyExecutionState(t mockConstructorTestingTNewReadOnlyExecutionState) *ReadOnlyExecutionState { +}) *ReadOnlyExecutionState { mock := &ReadOnlyExecutionState{} mock.Mock.Test(t) diff --git a/engine/execution/state/mock/register_updates_holder.go b/engine/execution/state/mock/register_updates_holder.go index 69c58edf06f..42bb218545b 100644 --- a/engine/execution/state/mock/register_updates_holder.go +++ b/engine/execution/state/mock/register_updates_holder.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mock @@ -12,10 +12,34 @@ type RegisterUpdatesHolder struct { mock.Mock } -// UpdatedRegisters provides a mock function with given fields: +// UpdatedRegisterSet provides a mock function with no fields +func (_m *RegisterUpdatesHolder) UpdatedRegisterSet() map[flow.RegisterID]flow.RegisterValue { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for UpdatedRegisterSet") + } + + var r0 map[flow.RegisterID]flow.RegisterValue + if rf, ok := ret.Get(0).(func() map[flow.RegisterID]flow.RegisterValue); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(map[flow.RegisterID]flow.RegisterValue) + } + } + + return r0 +} + +// UpdatedRegisters provides a mock function with no fields func (_m *RegisterUpdatesHolder) UpdatedRegisters() flow.RegisterEntries { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for UpdatedRegisters") + } + var r0 flow.RegisterEntries if rf, ok := ret.Get(0).(func() flow.RegisterEntries); ok { r0 = rf() @@ -28,13 +52,12 @@ func (_m *RegisterUpdatesHolder) UpdatedRegisters() flow.RegisterEntries { return r0 } -type mockConstructorTestingTNewRegisterUpdatesHolder interface { +// NewRegisterUpdatesHolder creates a new instance of RegisterUpdatesHolder. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewRegisterUpdatesHolder(t interface { mock.TestingT Cleanup(func()) -} - -// NewRegisterUpdatesHolder creates a new instance of RegisterUpdatesHolder. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewRegisterUpdatesHolder(t mockConstructorTestingTNewRegisterUpdatesHolder) *RegisterUpdatesHolder { +}) *RegisterUpdatesHolder { mock := &RegisterUpdatesHolder{} mock.Mock.Test(t) diff --git a/engine/execution/state/mock/script_execution_state.go b/engine/execution/state/mock/script_execution_state.go new file mode 100644 index 00000000000..7632abe8f0a --- /dev/null +++ b/engine/execution/state/mock/script_execution_state.go @@ -0,0 +1,146 @@ +// Code generated by mockery. DO NOT EDIT. + +package mock + +import ( + flow "github.com/onflow/flow-go/model/flow" + mock "github.com/stretchr/testify/mock" + + snapshot "github.com/onflow/flow-go/fvm/storage/snapshot" +) + +// ScriptExecutionState is an autogenerated mock type for the ScriptExecutionState type +type ScriptExecutionState struct { + mock.Mock +} + +// CreateStorageSnapshot provides a mock function with given fields: blockID +func (_m *ScriptExecutionState) CreateStorageSnapshot(blockID flow.Identifier) (snapshot.StorageSnapshot, *flow.Header, error) { + ret := _m.Called(blockID) + + if len(ret) == 0 { + panic("no return value specified for CreateStorageSnapshot") + } + + var r0 snapshot.StorageSnapshot + var r1 *flow.Header + var r2 error + if rf, ok := ret.Get(0).(func(flow.Identifier) (snapshot.StorageSnapshot, *flow.Header, error)); ok { + return rf(blockID) + } + if rf, ok := ret.Get(0).(func(flow.Identifier) snapshot.StorageSnapshot); ok { + r0 = rf(blockID) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(snapshot.StorageSnapshot) + } + } + + if rf, ok := ret.Get(1).(func(flow.Identifier) *flow.Header); ok { + r1 = rf(blockID) + } else { + if ret.Get(1) != nil { + r1 = ret.Get(1).(*flow.Header) + } + } + + if rf, ok := ret.Get(2).(func(flow.Identifier) error); ok { + r2 = rf(blockID) + } else { + r2 = ret.Error(2) + } + + return r0, r1, r2 +} + +// IsBlockExecuted provides a mock function with given fields: height, blockID +func (_m *ScriptExecutionState) IsBlockExecuted(height uint64, blockID flow.Identifier) (bool, error) { + ret := _m.Called(height, blockID) + + if len(ret) == 0 { + panic("no return value specified for IsBlockExecuted") + } + + var r0 bool + var r1 error + if rf, ok := ret.Get(0).(func(uint64, flow.Identifier) (bool, error)); ok { + return rf(height, blockID) + } + if rf, ok := ret.Get(0).(func(uint64, flow.Identifier) bool); ok { + r0 = rf(height, blockID) + } else { + r0 = ret.Get(0).(bool) + } + + if rf, ok := ret.Get(1).(func(uint64, flow.Identifier) error); ok { + r1 = rf(height, blockID) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// NewStorageSnapshot provides a mock function with given fields: commit, blockID, height +func (_m *ScriptExecutionState) NewStorageSnapshot(commit flow.StateCommitment, blockID flow.Identifier, height uint64) snapshot.StorageSnapshot { + ret := _m.Called(commit, blockID, height) + + if len(ret) == 0 { + panic("no return value specified for NewStorageSnapshot") + } + + var r0 snapshot.StorageSnapshot + if rf, ok := ret.Get(0).(func(flow.StateCommitment, flow.Identifier, uint64) snapshot.StorageSnapshot); ok { + r0 = rf(commit, blockID, height) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(snapshot.StorageSnapshot) + } + } + + return r0 +} + +// StateCommitmentByBlockID provides a mock function with given fields: _a0 +func (_m *ScriptExecutionState) StateCommitmentByBlockID(_a0 flow.Identifier) (flow.StateCommitment, error) { + ret := _m.Called(_a0) + + if len(ret) == 0 { + panic("no return value specified for StateCommitmentByBlockID") + } + + var r0 flow.StateCommitment + var r1 error + if rf, ok := ret.Get(0).(func(flow.Identifier) (flow.StateCommitment, error)); ok { + return rf(_a0) + } + if rf, ok := ret.Get(0).(func(flow.Identifier) flow.StateCommitment); ok { + r0 = rf(_a0) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(flow.StateCommitment) + } + } + + if rf, ok := ret.Get(1).(func(flow.Identifier) error); ok { + r1 = rf(_a0) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// NewScriptExecutionState creates a new instance of ScriptExecutionState. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewScriptExecutionState(t interface { + mock.TestingT + Cleanup(func()) +}) *ScriptExecutionState { + mock := &ScriptExecutionState{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/engine/execution/state/state.go b/engine/execution/state/state.go index d5ed9d9ab4c..47a7accb8fc 100644 --- a/engine/execution/state/state.go +++ b/engine/execution/state/state.go @@ -4,39 +4,73 @@ import ( "context" "errors" "fmt" + "math" "sync" - "github.com/dgraph-io/badger/v2" + "github.com/jordanschalm/lockctx" "github.com/onflow/flow-go/engine/execution" + "github.com/onflow/flow-go/engine/execution/storehouse" "github.com/onflow/flow-go/fvm/storage/snapshot" "github.com/onflow/flow-go/ledger" + "github.com/onflow/flow-go/ledger/common/convert" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/module" "github.com/onflow/flow-go/module/trace" "github.com/onflow/flow-go/storage" - badgerstorage "github.com/onflow/flow-go/storage/badger" - "github.com/onflow/flow-go/storage/badger/operation" - "github.com/onflow/flow-go/storage/badger/procedure" + "github.com/onflow/flow-go/storage/operation" ) +var ErrExecutionStatePruned = fmt.Errorf("execution state is pruned") +var ErrNotExecuted = fmt.Errorf("block not executed") + // ReadOnlyExecutionState allows to read the execution state type ReadOnlyExecutionState interface { - // NewStorageSnapshot creates a new ready-only view at the given state commitment. - NewStorageSnapshot(flow.StateCommitment) snapshot.StorageSnapshot - - // StateCommitmentByBlockID returns the final state commitment for the provided block ID. - StateCommitmentByBlockID(context.Context, flow.Identifier) (flow.StateCommitment, error) - - // HasState returns true if the state with the given state commitment exists in memory - HasState(flow.StateCommitment) bool + ScriptExecutionState // ChunkDataPackByChunkID retrieve a chunk data pack given the chunk ID. ChunkDataPackByChunkID(flow.Identifier) (*flow.ChunkDataPack, error) GetExecutionResultID(context.Context, flow.Identifier) (flow.Identifier, error) - GetHighestExecutedBlockID(context.Context) (uint64, flow.Identifier, error) + GetLastExecutedBlockID(context.Context) (uint64, flow.Identifier, error) +} + +// ScriptExecutionState is a subset of the `state.ExecutionState` interface purposed to only access the state +// used for script execution and not mutate the execution state of the blockchain. +type ScriptExecutionState interface { + // NewStorageSnapshot creates a new ready-only view at the given block. + NewStorageSnapshot(commit flow.StateCommitment, blockID flow.Identifier, height uint64) snapshot.StorageSnapshot + + // CreateStorageSnapshot creates a new ready-only view at the given block. + // It returns: + // - (nil, nil, storage.ErrNotFound) if block is unknown + // - (nil, nil, state.ErrNotExecuted) if block is not executed + // - (nil, nil, state.ErrExecutionStatePruned) if the execution state has been pruned + CreateStorageSnapshot(blockID flow.Identifier) (snapshot.StorageSnapshot, *flow.Header, error) + + // StateCommitmentByBlockID returns the final state commitment for the provided block ID. + StateCommitmentByBlockID(flow.Identifier) (flow.StateCommitment, error) + + // Any error returned is exception + IsBlockExecuted(height uint64, blockID flow.Identifier) (bool, error) +} + +// IsParentExecuted returns true if and only if the parent of the given block (header) is executed. +// TODO: Check whether `header` is a root block is potentially flawed, because it only works for the genesis block. +// +// Neither spork root blocks nor dynamically boostrapped Execution Nodes (with truncated history) are supported. +func IsParentExecuted(state ReadOnlyExecutionState, header *flow.Header) (bool, error) { + // sanity check, caller should not pass a root block + if header.Height == 0 { + return false, fmt.Errorf("root block does not have parent block") + } + return state.IsBlockExecuted(header.Height-1, header.ParentID) +} + +// FinalizedExecutionState is an interface used to access the finalized execution state +type FinalizedExecutionState interface { + GetHighestFinalizedExecuted() (uint64, error) } // TODO Many operations here are should be transactional, so we need to refactor this @@ -47,21 +81,17 @@ type ReadOnlyExecutionState interface { type ExecutionState interface { ReadOnlyExecutionState - UpdateHighestExecutedBlockIfHigher(context.Context, *flow.Header) error + UpdateLastExecutedBlock(context.Context, flow.Identifier) error SaveExecutionResults( ctx context.Context, result *execution.ComputationResult, ) error -} -const ( - KeyPartOwner = uint16(0) - // @deprecated - controller was used only by the very first - // version of cadence for access controll which was retired later on - // KeyPartController = uint16(1) - KeyPartKey = uint16(2) -) + // only available with storehouse enabled + // panic when called with storehouse disabled (which should be a bug) + GetHighestFinalizedExecuted() (uint64, error) +} type state struct { tracer module.Tracer @@ -69,21 +99,20 @@ type state struct { commits storage.Commits blocks storage.Blocks headers storage.Headers - collections storage.Collections chunkDataPacks storage.ChunkDataPacks results storage.ExecutionResults myReceipts storage.MyExecutionReceipts events storage.Events serviceEvents storage.ServiceEvents transactionResults storage.TransactionResults - db *badger.DB -} - -func RegisterIDToKey(reg flow.RegisterID) ledger.Key { - return ledger.NewKey([]ledger.KeyPart{ - ledger.NewKeyPart(KeyPartOwner, []byte(reg.Owner)), - ledger.NewKeyPart(KeyPartKey, []byte(reg.Key)), - }) + db storage.DB + getLatestFinalized func() (uint64, error) + lockManager lockctx.Manager + + registerStore execution.RegisterStore + // when it is true, registers are stored in both register store and ledger + // and register queries will send to the register store instead of ledger + enableRegisterStore bool } // NewExecutionState returns a new execution state access layer for the given ledger storage. @@ -92,37 +121,43 @@ func NewExecutionState( commits storage.Commits, blocks storage.Blocks, headers storage.Headers, - collections storage.Collections, chunkDataPacks storage.ChunkDataPacks, results storage.ExecutionResults, myReceipts storage.MyExecutionReceipts, events storage.Events, serviceEvents storage.ServiceEvents, transactionResults storage.TransactionResults, - db *badger.DB, + db storage.DB, + getLatestFinalized func() (uint64, error), tracer module.Tracer, + registerStore execution.RegisterStore, + enableRegisterStore bool, + lockManager lockctx.Manager, ) ExecutionState { return &state{ - tracer: tracer, - ls: ls, - commits: commits, - blocks: blocks, - headers: headers, - collections: collections, - chunkDataPacks: chunkDataPacks, - results: results, - myReceipts: myReceipts, - events: events, - serviceEvents: serviceEvents, - transactionResults: transactionResults, - db: db, + tracer: tracer, + ls: ls, + commits: commits, + blocks: blocks, + headers: headers, + chunkDataPacks: chunkDataPacks, + results: results, + myReceipts: myReceipts, + events: events, + serviceEvents: serviceEvents, + transactionResults: transactionResults, + db: db, + getLatestFinalized: getLatestFinalized, + registerStore: registerStore, + enableRegisterStore: enableRegisterStore, + lockManager: lockManager, } } func makeSingleValueQuery(commitment flow.StateCommitment, id flow.RegisterID) (*ledger.QuerySingleValue, error) { return ledger.NewQuerySingleValue(ledger.State(commitment), - RegisterIDToKey(id), + convert.RegisterIDToLedgerKey(id), ) } @@ -135,14 +170,12 @@ func RegisterEntriesToKeysValues( keys := make([]ledger.Key, len(entries)) values := make([]ledger.Value, len(entries)) for i, entry := range entries { - keys[i] = RegisterIDToKey(entry.Key) + keys[i] = convert.RegisterIDToLedgerKey(entry.Key) values[i] = entry.Value } return keys, values } -// TODO(patrick): revisit caching. readCache needs to be mutex guarded for -// parallel execution. type LedgerStorageSnapshot struct { ledger ledger.Ledger commitment flow.StateCommitment @@ -223,43 +256,97 @@ func (storage *LedgerStorageSnapshot) Get( func (s *state) NewStorageSnapshot( commitment flow.StateCommitment, + blockID flow.Identifier, + height uint64, ) snapshot.StorageSnapshot { + if s.enableRegisterStore { + return storehouse.NewBlockEndStateSnapshot(s.registerStore, blockID, height) + } return NewLedgerStorageSnapshot(s.ls, commitment) } +func (s *state) CreateStorageSnapshot( + blockID flow.Identifier, +) (snapshot.StorageSnapshot, *flow.Header, error) { + header, err := s.headers.ByBlockID(blockID) + if err != nil { + return nil, nil, fmt.Errorf("cannot get header by block ID: %w", err) + } + + // make sure the block is executed + commit, err := s.commits.ByBlockID(blockID) + if err != nil { + // statecommitment not exists means the block hasn't been executed yet + if errors.Is(err, storage.ErrNotFound) { + return nil, nil, fmt.Errorf("block %v is never executed: %w", blockID, ErrNotExecuted) + } + + return nil, header, fmt.Errorf("cannot get commit by block ID: %w", err) + } + + // make sure we have trie state for this block + ledgerHasState := s.ls.HasState(ledger.State(commit)) + if !ledgerHasState { + return nil, header, fmt.Errorf("state not found in ledger for commit %x (block %v): %w", commit, blockID, ErrExecutionStatePruned) + } + + if s.enableRegisterStore { + isExecuted, err := s.registerStore.IsBlockExecuted(header.Height, blockID) + if err != nil { + return nil, header, fmt.Errorf("cannot check if block %v is executed: %w", blockID, err) + } + if !isExecuted { + return nil, header, fmt.Errorf("block %v is not executed yet: %w", blockID, ErrNotExecuted) + } + } + + return s.NewStorageSnapshot(commit, blockID, header.Height), header, nil +} + type RegisterUpdatesHolder interface { UpdatedRegisters() flow.RegisterEntries + UpdatedRegisterSet() map[flow.RegisterID]flow.RegisterValue } -func CommitDelta(ldg ledger.Ledger, ruh RegisterUpdatesHolder, baseState flow.StateCommitment) (flow.StateCommitment, *ledger.TrieUpdate, error) { - keys, values := RegisterEntriesToKeysValues(ruh.UpdatedRegisters()) +// CommitDelta takes a base storage snapshot and creates a new storage snapshot +// with the register updates from the given RegisterUpdatesHolder +// a new statecommitment is returned from the ledger, along with the trie update +// any error returned are exceptions +func CommitDelta( + ldg ledger.Ledger, + ruh RegisterUpdatesHolder, + baseStorageSnapshot execution.ExtendableStorageSnapshot, +) (flow.StateCommitment, *ledger.TrieUpdate, execution.ExtendableStorageSnapshot, error) { + updatedRegisters := ruh.UpdatedRegisters() + keys, values := RegisterEntriesToKeysValues(updatedRegisters) + baseState := baseStorageSnapshot.Commitment() update, err := ledger.NewUpdate(ledger.State(baseState), keys, values) if err != nil { - return flow.DummyStateCommitment, nil, fmt.Errorf("cannot create ledger update: %w", err) + return flow.DummyStateCommitment, nil, nil, fmt.Errorf("cannot create ledger update: %w", err) } - commit, trieUpdate, err := ldg.Set(update) + newState, trieUpdate, err := ldg.Set(update) if err != nil { - return flow.DummyStateCommitment, nil, err + return flow.DummyStateCommitment, nil, nil, fmt.Errorf("could not update ledger: %w", err) } - return flow.StateCommitment(commit), trieUpdate, nil -} + newCommit := flow.StateCommitment(newState) + + newStorageSnapshot := baseStorageSnapshot.Extend(newCommit, ruh.UpdatedRegisterSet()) -func (s *state) HasState(commitment flow.StateCommitment) bool { - return s.ls.HasState(ledger.State(commitment)) + return newCommit, trieUpdate, newStorageSnapshot, nil } -func (s *state) StateCommitmentByBlockID(ctx context.Context, blockID flow.Identifier) (flow.StateCommitment, error) { +func (s *state) StateCommitmentByBlockID(blockID flow.Identifier) (flow.StateCommitment, error) { return s.commits.ByBlockID(blockID) } func (s *state) ChunkDataPackByChunkID(chunkID flow.Identifier) (*flow.ChunkDataPack, error) { chunkDataPack, err := s.chunkDataPacks.ByChunkID(chunkID) if err != nil { - return nil, fmt.Errorf("could not retrieve stored chunk data pack: %w", err) + return nil, fmt.Errorf("could not retrieve chunk data pack: %w", err) } return chunkDataPack, nil @@ -285,102 +372,198 @@ func (s *state) SaveExecutionResults( trace.EXEStateSaveExecutionResults) defer span.End() - header := result.ExecutableBlock.Block.Header - blockID := header.ID() + err := s.saveExecutionResults(ctx, result) + if err != nil { + return fmt.Errorf("could not save execution results: %w", err) + } - // Write Batch is BadgerDB feature designed for handling lots of writes - // in efficient and automatic manner, hence pushing all the updates we can - // as tightly as possible to let Badger manage it. - // Note, that it does not guarantee atomicity as transactions has size limit, - // but it's the closest thing to atomicity we could have - batch := badgerstorage.NewBatch(s.db) + if s.enableRegisterStore { + // save registers to register store + err = s.registerStore.SaveRegisters( + result.BlockExecutionResult.ExecutableBlock.Block.ToHeader(), + result.BlockExecutionResult.AllUpdatedRegisters(), + ) - for _, chunkDataPack := range result.AllChunkDataPacks() { - err := s.chunkDataPacks.BatchStore(chunkDataPack, batch) if err != nil { - return fmt.Errorf("cannot store chunk data pack: %w", err) + return fmt.Errorf("could not save updated registers: %w", err) } } - err := s.events.BatchStore(blockID, []flow.EventsList{result.AllEvents()}, batch) + //outside batch because it requires read access + err = s.UpdateLastExecutedBlock(childCtx, result.ExecutableBlock.BlockID()) if err != nil { - return fmt.Errorf("cannot store events: %w", err) + return fmt.Errorf("cannot update highest executed block: %w", err) } + return nil +} - err = s.serviceEvents.BatchStore(blockID, result.AllServiceEvents(), batch) +// saveExecutionResults saves all data related to the execution of a block. +// It is concurrent-safe +func (s *state) saveExecutionResults( + ctx context.Context, + result *execution.ComputationResult, +) (err error) { + blockID := result.ExecutableBlock.BlockID() + + chunks, err := result.AllChunkDataPacks() if err != nil { - return fmt.Errorf("cannot store service events: %w", err) + return fmt.Errorf("can not retrieve chunk data packs: %w", err) } - err = s.transactionResults.BatchStore( - blockID, - result.AllTransactionResults(), - batch) - if err != nil { - return fmt.Errorf("cannot store transaction result: %w", err) + // Acquire both locks to ensure it's concurrent safe when inserting the execution results and chunk data packs. + return storage.WithLocks(s.lockManager, []string{storage.LockInsertOwnReceipt, storage.LockInsertChunkDataPack}, func(lctx lockctx.Context) error { + err := s.chunkDataPacks.StoreByChunkID(lctx, chunks) + if err != nil { + return fmt.Errorf("can not store multiple chunk data pack: %w", err) + } + + // Save entire execution result (including all chunk data packs) within one batch to minimize + // the number of database interactions. + return s.db.WithReaderBatchWriter(func(batch storage.ReaderBatchWriter) error { + batch.AddCallback(func(err error) { + // Rollback if an error occurs during batch operations + // Chunk data packs are saved in a separate database, there is a chance + // that execution result was failed to save, but chunk data packs was saved and + // didnt get removed. + // TODO(leo): when retrieving chunk data packs, we need to add a check to ensure the block + // has been executed before returning chunk data packs + if err != nil { + chunkIDs := make([]flow.Identifier, 0, len(chunks)) + for _, chunk := range chunks { + chunkIDs = append(chunkIDs, chunk.ChunkID) + } + _ = s.chunkDataPacks.Remove(chunkIDs) + } + }) + + err = s.events.BatchStore(blockID, []flow.EventsList{result.AllEvents()}, batch) + if err != nil { + return fmt.Errorf("cannot store events: %w", err) + } + + err = s.serviceEvents.BatchStore(blockID, result.AllServiceEvents(), batch) + if err != nil { + return fmt.Errorf("cannot store service events: %w", err) + } + + err = s.transactionResults.BatchStore( + blockID, + result.AllTransactionResults(), + batch) + if err != nil { + return fmt.Errorf("cannot store transaction result: %w", err) + } + + executionResult := &result.ExecutionReceipt.ExecutionResult + // saving my receipts will also save the execution result + err = s.myReceipts.BatchStoreMyReceipt(lctx, result.ExecutionReceipt, batch) + if err != nil { + return fmt.Errorf("could not persist execution result: %w", err) + } + + err = s.results.BatchIndex(blockID, executionResult.ID(), batch) + if err != nil { + return fmt.Errorf("cannot index execution result: %w", err) + } + + // the state commitment is the last data item to be stored, so that + // IsBlockExecuted can be implemented by checking whether state commitment exists + // in the database + err = s.commits.BatchStore(lctx, blockID, result.CurrentEndState(), batch) + if err != nil { + return fmt.Errorf("cannot store state commitment: %w", err) + } + + return nil + }) + }) +} + +func (s *state) UpdateLastExecutedBlock(ctx context.Context, executedID flow.Identifier) error { + return s.db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return operation.UpdateExecutedBlock(rw.Writer(), executedID) + }) +} + +// deprecated by storehouse's GetHighestFinalizedExecuted +func (s *state) GetLastExecutedBlockID(ctx context.Context) (uint64, flow.Identifier, error) { + if s.enableRegisterStore { + // when storehouse is enabled, the highest executed block is consisted as + // the highest finalized and executed block + height, err := s.GetHighestFinalizedExecuted() + if err != nil { + return 0, flow.ZeroID, fmt.Errorf("could not get highest finalized executed: %w", err) + } + + finalizedID, err := s.headers.BlockIDByHeight(height) + if err != nil { + return 0, flow.ZeroID, fmt.Errorf("could not get header by height %v: %w", height, err) + } + return height, finalizedID, nil } - executionResult := &result.ExecutionReceipt.ExecutionResult - err = s.results.BatchStore(executionResult, batch) + var blockID flow.Identifier + err := operation.RetrieveExecutedBlock(s.db.Reader(), &blockID) if err != nil { - return fmt.Errorf("cannot store execution result: %w", err) + return 0, flow.ZeroID, err } - err = s.results.BatchIndex(blockID, executionResult.ID(), batch) + lastExecuted, err := s.headers.ByBlockID(blockID) if err != nil { - return fmt.Errorf("cannot index execution result: %w", err) + return 0, flow.ZeroID, fmt.Errorf("could not retrieve executed header %v: %w", blockID, err) } - err = s.myReceipts.BatchStoreMyReceipt(result.ExecutionReceipt, batch) + return lastExecuted.Height, blockID, nil +} + +func (s *state) GetHighestFinalizedExecuted() (uint64, error) { + if s.enableRegisterStore { + return s.registerStore.LastFinalizedAndExecutedHeight(), nil + } + + // last finalized height + finalizedHeight, err := s.getLatestFinalized() if err != nil { - return fmt.Errorf("could not persist execution result: %w", err) + return 0, fmt.Errorf("could not retrieve finalized: %w", err) } - // the state commitment is the last data item to be stored, so that - // IsBlockExecuted can be implemented by checking whether state commitment exists - // in the database - err = s.commits.BatchStore(blockID, result.CurrentEndState(), batch) + // last executed height + executedHeight, _, err := s.GetLastExecutedBlockID(context.Background()) if err != nil { - return fmt.Errorf("cannot store state commitment: %w", err) + return 0, fmt.Errorf("could not get highest executed block: %w", err) } - err = batch.Flush() + // the highest finalized and executed height is the min of the two + highest := uint64(math.Min(float64(finalizedHeight), float64(executedHeight))) + + // double check the higesht block is executed + blockID, err := s.headers.BlockIDByHeight(highest) if err != nil { - return fmt.Errorf("batch flush error: %w", err) + return 0, fmt.Errorf("could not get header by height %v: %w", highest, err) } - //outside batch because it requires read access - err = s.UpdateHighestExecutedBlockIfHigher(childCtx, header) + isExecuted, err := s.IsBlockExecuted(highest, blockID) if err != nil { - return fmt.Errorf("cannot update highest executed block: %w", err) + return 0, fmt.Errorf("could not check if block %v (height: %v) is executed: %w", blockID, highest, err) } - return nil -} -func (s *state) UpdateHighestExecutedBlockIfHigher(ctx context.Context, header *flow.Header) error { - if s.tracer != nil { - span, _ := s.tracer.StartSpanFromContext(ctx, trace.EXEUpdateHighestExecutedBlockIfHigher) - defer span.End() + if !isExecuted { + return 0, fmt.Errorf("block %v (height: %v) is not executed yet", blockID, highest) } - return operation.RetryOnConflict(s.db.Update, procedure.UpdateHighestExecutedBlockIfHigher(header)) + return highest, nil } -func (s *state) GetHighestExecutedBlockID(ctx context.Context) (uint64, flow.Identifier, error) { - var blockID flow.Identifier - var height uint64 - err := s.db.View(procedure.GetHighestExecutedBlock(&height, &blockID)) - if err != nil { - return 0, flow.ZeroID, err +// IsBlockExecuted returns true if the block is executed, which means registers, events, +// results, etc are all stored. +// otherwise returns false +func (s *state) IsBlockExecuted(height uint64, blockID flow.Identifier) (bool, error) { + if s.enableRegisterStore { + return s.registerStore.IsBlockExecuted(height, blockID) } - return height, blockID, nil -} - -// IsBlockExecuted returns whether the block has been executed. -// it checks whether the state commitment exists in execution state. -func IsBlockExecuted(ctx context.Context, state ReadOnlyExecutionState, block flow.Identifier) (bool, error) { - _, err := state.StateCommitmentByBlockID(ctx, block) + // ledger-based execution state uses commitment to determine if a block has been executed + _, err := s.StateCommitmentByBlockID(blockID) // statecommitment exists means the block has been executed if err == nil { @@ -393,4 +576,5 @@ func IsBlockExecuted(ctx context.Context, state ReadOnlyExecutionState, block fl } return false, err + } diff --git a/engine/execution/state/state_storehouse_test.go b/engine/execution/state/state_storehouse_test.go new file mode 100644 index 00000000000..59b618e4031 --- /dev/null +++ b/engine/execution/state/state_storehouse_test.go @@ -0,0 +1,268 @@ +package state_test + +import ( + "context" + "testing" + + "github.com/cockroachdb/pebble/v2" + "github.com/ipfs/go-cid" + "github.com/jordanschalm/lockctx" + "github.com/rs/zerolog" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/engine/execution" + "github.com/onflow/flow-go/engine/execution/state" + "github.com/onflow/flow-go/engine/execution/storehouse" + "github.com/onflow/flow-go/engine/execution/testutil" + "github.com/onflow/flow-go/fvm/meter" + "github.com/onflow/flow-go/fvm/storage/snapshot" + led "github.com/onflow/flow-go/ledger" + "github.com/onflow/flow-go/ledger/common/convert" + "github.com/onflow/flow-go/ledger/common/pathfinder" + ledger "github.com/onflow/flow-go/ledger/complete" + "github.com/onflow/flow-go/ledger/complete/wal/fixtures" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module/executiondatasync/execution_data" + "github.com/onflow/flow-go/module/mempool/entity" + "github.com/onflow/flow-go/module/metrics" + "github.com/onflow/flow-go/module/trace" + "github.com/onflow/flow-go/storage" + storagemock "github.com/onflow/flow-go/storage/mock" + "github.com/onflow/flow-go/storage/operation" + "github.com/onflow/flow-go/storage/operation/pebbleimpl" + pebblestorage "github.com/onflow/flow-go/storage/pebble" + "github.com/onflow/flow-go/utils/unittest" +) + +func prepareStorehouseTest(f func(t *testing.T, es state.ExecutionState, l *ledger.Ledger, headers *storagemock.Headers, commits *storagemock.Commits, finalized *testutil.MockFinalizedReader)) func(*testing.T) { + return func(t *testing.T) { + lockManager := storage.NewTestingLockManager() + unittest.RunWithPebbleDB(t, func(pebbleDB *pebble.DB) { + metricsCollector := &metrics.NoopCollector{} + diskWal := &fixtures.NoopWAL{} + ls, err := ledger.NewLedger(diskWal, 100, metricsCollector, zerolog.Nop(), ledger.DefaultPathFinderVersion) + require.NoError(t, err) + compactor := fixtures.NewNoopCompactor(ls) + <-compactor.Ready() + defer func() { + <-ls.Done() + <-compactor.Done() + }() + + stateCommitments := storagemock.NewCommits(t) + stateCommitments.On("BatchStore", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(nil) + headers := storagemock.NewHeaders(t) + blocks := storagemock.NewBlocks(t) + events := storagemock.NewEvents(t) + events.On("BatchStore", mock.Anything, mock.Anything, mock.Anything).Return(nil) + serviceEvents := storagemock.NewServiceEvents(t) + serviceEvents.On("BatchStore", mock.Anything, mock.Anything, mock.Anything).Return(nil) + txResults := storagemock.NewTransactionResults(t) + txResults.On("BatchStore", mock.Anything, mock.Anything, mock.Anything).Return(nil) + chunkDataPacks := storagemock.NewChunkDataPacks(t) + chunkDataPacks.On("StoreByChunkID", mock.Anything, mock.Anything).Return(nil) + results := storagemock.NewExecutionResults(t) + results.On("BatchIndex", mock.Anything, mock.Anything, mock.Anything).Return(nil) + myReceipts := storagemock.NewMyExecutionReceipts(t) + myReceipts.On("BatchStoreMyReceipt", mock.Anything, mock.Anything, mock.Anything).Return(nil) + + withRegisterStore(t, func(t *testing.T, + rs *storehouse.RegisterStore, + diskStore execution.OnDiskRegisterStore, + finalized *testutil.MockFinalizedReader, + rootHeight uint64, + endHeight uint64, + finalizedHeaders map[uint64]*flow.Header, + ) { + + rootID, err := finalized.FinalizedBlockIDAtHeight(10) + require.NoError(t, err) + + db := pebbleimpl.ToDB(pebbleDB) + require.NoError(t, db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return operation.UpdateExecutedBlock(rw.Writer(), rootID) + })) + + err = unittest.WithLock(t, lockManager, storage.LockInsertBlock, func(lctx lockctx.Context) error { + return pebbleimpl.ToDB(pebbleDB).WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return operation.InsertHeader(lctx, rw, finalizedHeaders[10].ID(), finalizedHeaders[10]) + }) + }) + require.NoError(t, err) + + getLatestFinalized := func() (uint64, error) { + return rootHeight, nil + } + + es := state.NewExecutionState( + ls, stateCommitments, blocks, headers, chunkDataPacks, results, myReceipts, events, serviceEvents, txResults, pebbleimpl.ToDB(pebbleDB), + getLatestFinalized, + trace.NewNoopTracer(), + rs, + true, + lockManager, + ) + + f(t, es, ls, headers, stateCommitments, finalized) + + }) + }) + } +} + +func withRegisterStore(t *testing.T, fn func( + t *testing.T, + rs *storehouse.RegisterStore, + diskStore execution.OnDiskRegisterStore, + finalized *testutil.MockFinalizedReader, + rootHeight uint64, + endHeight uint64, + headers map[uint64]*flow.Header, +)) { + // block 10 is executed block + pebblestorage.RunWithRegistersStorageAtInitialHeights(t, 10, 10, func(diskStore *pebblestorage.Registers) { + log := unittest.Logger() + var wal execution.ExecutedFinalizedWAL + finalized, headerByHeight, highest := testutil.NewMockFinalizedReader(10, 100) + rs, err := storehouse.NewRegisterStore(diskStore, wal, finalized, log, storehouse.NewNoopNotifier()) + require.NoError(t, err) + fn(t, rs, diskStore, finalized, 10, highest, headerByHeight) + }) +} + +func TestExecutionStateWithStorehouse(t *testing.T) { + t.Run("commit write and read new state", prepareStorehouseTest(func( + t *testing.T, es state.ExecutionState, l *ledger.Ledger, headers *storagemock.Headers, stateCommitments *storagemock.Commits, finalized *testutil.MockFinalizedReader) { + + // block 11 is the block to be executed + block11 := finalized.BlockAtHeight(11) + header11 := block11.ToHeader() + sc10 := flow.StateCommitment(l.InitialState()) + + reg1 := unittest.MakeOwnerReg("fruit", "apple") + reg2 := unittest.MakeOwnerReg("vegetable", "carrot") + executionSnapshot := &snapshot.ExecutionSnapshot{ + WriteSet: map[flow.RegisterID]flow.RegisterValue{ + reg1.Key: reg1.Value, + reg2.Key: reg2.Value, + }, + Meter: meter.NewMeter(meter.DefaultParameters()), + } + + // create Block 11's end statecommitment + sc2, update, sc2Snapshot, err := state.CommitDelta(l, executionSnapshot, + storehouse.NewExecutingBlockSnapshot(state.NewLedgerStorageSnapshot(l, sc10), sc10)) + require.NoError(t, err) + + // validate new snapshot + val, err := sc2Snapshot.Get(reg1.Key) + require.NoError(t, err) + require.Equal(t, reg1.Value, val) + + val, err = sc2Snapshot.Get(reg2.Key) + require.NoError(t, err) + require.Equal(t, reg2.Value, val) + + validateUpdate(t, update, sc10, executionSnapshot) + + // validate storage snapshot + completeBlock := &entity.ExecutableBlock{ + Block: block11, + CompleteCollections: map[flow.Identifier]*entity.CompleteCollection{}, + StartState: &sc10, + Executing: false, + } + + computationResult := makeComputationResult(t, completeBlock, executionSnapshot, sc2) + + // save result and store registers + require.NoError(t, es.SaveExecutionResults(context.Background(), computationResult)) + + storageSnapshot := es.NewStorageSnapshot(sc2, header11.ID(), header11.Height) + + // validate the storage snapshot has the registers + b1, err := storageSnapshot.Get(reg1.Key) + require.NoError(t, err) + b2, err := storageSnapshot.Get(reg2.Key) + require.NoError(t, err) + + require.Equal(t, flow.RegisterValue("apple"), b1) + require.Equal(t, flow.RegisterValue("carrot"), b2) + + // verify has state + require.True(t, l.HasState(led.State(sc2))) + require.False(t, l.HasState(led.State(unittest.StateCommitmentFixture()))) + })) +} + +func validateUpdate(t *testing.T, update *led.TrieUpdate, commit flow.StateCommitment, executionSnapshot *snapshot.ExecutionSnapshot) { + require.Equal(t, commit[:], update.RootHash[:]) + require.Len(t, update.Paths, len(executionSnapshot.WriteSet)) + require.Len(t, update.Payloads, len(executionSnapshot.WriteSet)) + + regs := executionSnapshot.UpdatedRegisters() + for i, reg := range regs { + key := convert.RegisterIDToLedgerKey(reg.Key) + path, err := pathfinder.KeyToPath(key, ledger.DefaultPathFinderVersion) + require.NoError(t, err) + + require.Equal(t, path, update.Paths[i]) + require.Equal(t, led.Value(reg.Value), update.Payloads[i].Value()) + } +} + +func makeComputationResult( + t *testing.T, + completeBlock *entity.ExecutableBlock, + executionSnapshot *snapshot.ExecutionSnapshot, + commit flow.StateCommitment, +) *execution.ComputationResult { + + computationResult := execution.NewEmptyComputationResult(completeBlock) + numberOfChunks := 1 + ceds := make([]*execution_data.ChunkExecutionData, numberOfChunks) + ceds[0] = unittest.ChunkExecutionDataFixture(t, 1024) + computationResult.CollectionExecutionResultAt(0).UpdateExecutionSnapshot(executionSnapshot) + computationResult.AppendCollectionAttestationResult( + *completeBlock.StartState, + commit, + []byte{'p'}, + unittest.IdentifierFixture(), + ceds[0], + ) + + bed := unittest.BlockExecutionDataFixture( + unittest.WithBlockExecutionDataBlockID(completeBlock.Block.ID()), + unittest.WithChunkExecutionDatas(ceds...), + ) + + executionDataID, err := execution_data.CalculateID(context.Background(), bed, execution_data.DefaultSerializer) + require.NoError(t, err) + + chunks, err := computationResult.AllChunks() + require.NoError(t, err) + + executionResult, err := flow.NewExecutionResult(flow.UntrustedExecutionResult{ + PreviousResultID: unittest.IdentifierFixture(), + BlockID: completeBlock.BlockID(), + Chunks: chunks, + ServiceEvents: flow.ServiceEventList{}, + ExecutionDataID: executionDataID, + }) + require.NoError(t, err) + + computationResult.BlockAttestationResult.BlockExecutionResult.ExecutionDataRoot = &flow.BlockExecutionDataRoot{ + BlockID: completeBlock.BlockID(), + ChunkExecutionDataIDs: []cid.Cid{flow.IdToCid(unittest.IdentifierFixture())}, + } + + computationResult.ExecutionReceipt = &flow.ExecutionReceipt{ + UnsignedExecutionReceipt: flow.UnsignedExecutionReceipt{ + ExecutionResult: *executionResult, + Spocks: unittest.SignaturesFixture(numberOfChunks), + }, + ExecutorSignature: unittest.SignatureFixture(), + } + return computationResult +} diff --git a/engine/execution/state/state_test.go b/engine/execution/state/state_test.go index 6d6833837f0..9cf96405024 100644 --- a/engine/execution/state/state_test.go +++ b/engine/execution/state/state_test.go @@ -1,33 +1,32 @@ package state_test import ( - "context" + "fmt" "testing" - "github.com/dgraph-io/badger/v2" - "github.com/golang/mock/gomock" + "github.com/cockroachdb/pebble/v2" "github.com/rs/zerolog" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - ledger2 "github.com/onflow/flow-go/ledger" - "github.com/onflow/flow-go/ledger/common/pathfinder" - "github.com/onflow/flow-go/engine/execution/state" + "github.com/onflow/flow-go/engine/execution/storehouse" "github.com/onflow/flow-go/fvm/storage/snapshot" + led "github.com/onflow/flow-go/ledger" ledger "github.com/onflow/flow-go/ledger/complete" "github.com/onflow/flow-go/ledger/complete/wal/fixtures" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/module/metrics" "github.com/onflow/flow-go/module/trace" + storageerr "github.com/onflow/flow-go/storage" storage "github.com/onflow/flow-go/storage/mock" - "github.com/onflow/flow-go/storage/mocks" + "github.com/onflow/flow-go/storage/operation/pebbleimpl" "github.com/onflow/flow-go/utils/unittest" ) -func prepareTest(f func(t *testing.T, es state.ExecutionState, l *ledger.Ledger)) func(*testing.T) { +func prepareTest(f func(t *testing.T, es state.ExecutionState, l *ledger.Ledger, headers *storage.Headers, commits *storage.Commits)) func(*testing.T) { return func(t *testing.T) { - unittest.RunWithBadgerDB(t, func(badgerDB *badger.DB) { + lockManager := storageerr.NewTestingLockManager() + unittest.RunWithPebbleDB(t, func(pebbleDB *pebble.DB) { metricsCollector := &metrics.NoopCollector{} diskWal := &fixtures.NoopWAL{} ls, err := ledger.NewLedger(diskWal, 100, metricsCollector, zerolog.Nop(), ledger.DefaultPathFinderVersion) @@ -39,205 +38,265 @@ func prepareTest(f func(t *testing.T, es state.ExecutionState, l *ledger.Ledger) <-compactor.Done() }() - ctrl := gomock.NewController(t) - - stateCommitments := mocks.NewMockCommits(ctrl) - blocks := mocks.NewMockBlocks(ctrl) - headers := mocks.NewMockHeaders(ctrl) - collections := mocks.NewMockCollections(ctrl) - events := mocks.NewMockEvents(ctrl) - serviceEvents := mocks.NewMockServiceEvents(ctrl) - txResults := mocks.NewMockTransactionResults(ctrl) - - stateCommitment := ls.InitialState() - - stateCommitments.EXPECT().ByBlockID(gomock.Any()).Return(flow.StateCommitment(stateCommitment), nil) - - chunkDataPacks := new(storage.ChunkDataPacks) - - results := new(storage.ExecutionResults) - myReceipts := new(storage.MyExecutionReceipts) - + stateCommitments := storage.NewCommits(t) + headers := storage.NewHeaders(t) + blocks := storage.NewBlocks(t) + events := storage.NewEvents(t) + serviceEvents := storage.NewServiceEvents(t) + txResults := storage.NewTransactionResults(t) + chunkDataPacks := storage.NewChunkDataPacks(t) + results := storage.NewExecutionResults(t) + myReceipts := storage.NewMyExecutionReceipts(t) + + getLatestFinalized := func() (uint64, error) { + return 0, nil + } + + db := pebbleimpl.ToDB(pebbleDB) es := state.NewExecutionState( - ls, stateCommitments, blocks, headers, collections, chunkDataPacks, results, myReceipts, events, serviceEvents, txResults, badgerDB, trace.NewNoopTracer(), + ls, stateCommitments, blocks, headers, chunkDataPacks, results, myReceipts, events, serviceEvents, txResults, db, getLatestFinalized, trace.NewNoopTracer(), + nil, + false, + lockManager, ) - f(t, es, ls) + f(t, es, ls, headers, stateCommitments) }) } } func TestExecutionStateWithTrieStorage(t *testing.T) { - registerID1 := flow.NewRegisterID("fruit", "") - - registerID2 := flow.NewRegisterID("vegetable", "") - - t.Run("commit write and read new state", prepareTest(func(t *testing.T, es state.ExecutionState, l *ledger.Ledger) { - // TODO: use real block ID - sc1, err := es.StateCommitmentByBlockID(context.Background(), flow.Identifier{}) - assert.NoError(t, err) + t.Run("commit write and read new state", prepareTest(func( + t *testing.T, es state.ExecutionState, l *ledger.Ledger, headers *storage.Headers, stateCommitments *storage.Commits) { + header1 := unittest.BlockHeaderFixture() + sc1 := flow.StateCommitment(l.InitialState()) + reg1 := unittest.MakeOwnerReg("fruit", "apple") + reg2 := unittest.MakeOwnerReg("vegetable", "carrot") executionSnapshot := &snapshot.ExecutionSnapshot{ WriteSet: map[flow.RegisterID]flow.RegisterValue{ - registerID1: flow.RegisterValue("apple"), - registerID2: flow.RegisterValue("carrot"), + reg1.Key: reg1.Value, + reg2.Key: reg2.Value, }, } - sc2, update, err := state.CommitDelta(l, executionSnapshot, sc1) - assert.NoError(t, err) - - assert.Equal(t, sc1[:], update.RootHash[:]) - assert.Len(t, update.Paths, 2) - assert.Len(t, update.Payloads, 2) - - key1 := ledger2.NewKey( - []ledger2.KeyPart{ - ledger2.NewKeyPart(0, []byte(registerID1.Owner)), - ledger2.NewKeyPart(2, []byte(registerID1.Key)), - }) - path1, err := pathfinder.KeyToPath(key1, ledger.DefaultPathFinderVersion) - assert.NoError(t, err) - - key2 := ledger2.NewKey( - []ledger2.KeyPart{ - ledger2.NewKeyPart(0, []byte(registerID2.Owner)), - ledger2.NewKeyPart(2, []byte(registerID2.Key)), - }) - path2, err := pathfinder.KeyToPath(key2, ledger.DefaultPathFinderVersion) - assert.NoError(t, err) - - assert.Equal(t, path1, update.Paths[0]) - assert.Equal(t, path2, update.Paths[1]) + sc2, update, sc2Snapshot, err := state.CommitDelta(l, executionSnapshot, + storehouse.NewExecutingBlockSnapshot(state.NewLedgerStorageSnapshot(l, sc1), sc1)) + require.NoError(t, err) - k1, err := update.Payloads[0].Key() + // validate new snapshot + val, err := sc2Snapshot.Get(reg1.Key) require.NoError(t, err) + require.Equal(t, reg1.Value, val) - k2, err := update.Payloads[1].Key() + val, err = sc2Snapshot.Get(reg2.Key) require.NoError(t, err) + require.Equal(t, reg2.Value, val) - assert.Equal(t, key1, k1) - assert.Equal(t, key2, k2) + require.Equal(t, sc1[:], update.RootHash[:]) + require.Len(t, update.Paths, 2) + require.Len(t, update.Payloads, 2) - assert.Equal(t, []byte("apple"), []byte(update.Payloads[0].Value())) - assert.Equal(t, []byte("carrot"), []byte(update.Payloads[1].Value())) + // validate sc2 + require.Equal(t, sc2, sc2Snapshot.Commitment()) + validateUpdate(t, update, sc1, executionSnapshot) - storageSnapshot := es.NewStorageSnapshot(sc2) + header2 := unittest.BlockHeaderWithParentFixture(header1) + storageSnapshot := es.NewStorageSnapshot(sc2, header2.ID(), header2.Height) + + b1, err := storageSnapshot.Get(reg1.Key) + require.NoError(t, err) + b2, err := storageSnapshot.Get(reg2.Key) + require.NoError(t, err) - b1, err := storageSnapshot.Get(registerID1) - assert.NoError(t, err) - b2, err := storageSnapshot.Get(registerID2) - assert.NoError(t, err) + require.Equal(t, flow.RegisterValue("apple"), b1) + require.Equal(t, flow.RegisterValue("carrot"), b2) - assert.Equal(t, flow.RegisterValue("apple"), b1) - assert.Equal(t, flow.RegisterValue("carrot"), b2) + // verify has state + require.True(t, l.HasState(led.State(sc2))) + require.False(t, l.HasState(led.State(unittest.StateCommitmentFixture()))) })) - t.Run("commit write and read previous state", prepareTest(func(t *testing.T, es state.ExecutionState, l *ledger.Ledger) { - // TODO: use real block ID - sc1, err := es.StateCommitmentByBlockID(context.Background(), flow.Identifier{}) - assert.NoError(t, err) + t.Run("commit write and read previous state", prepareTest(func( + t *testing.T, es state.ExecutionState, l *ledger.Ledger, headers *storage.Headers, stateCommitments *storage.Commits) { + header1 := unittest.BlockHeaderFixture() + sc1 := flow.StateCommitment(l.InitialState()) + reg1 := unittest.MakeOwnerReg("fruit", "apple") executionSnapshot1 := &snapshot.ExecutionSnapshot{ WriteSet: map[flow.RegisterID]flow.RegisterValue{ - registerID1: []byte("apple"), + reg1.Key: reg1.Value, }, } - sc2, _, err := state.CommitDelta(l, executionSnapshot1, sc1) - assert.NoError(t, err) + sc2, _, sc2Snapshot, err := state.CommitDelta(l, executionSnapshot1, + storehouse.NewExecutingBlockSnapshot(state.NewLedgerStorageSnapshot(l, sc1), sc1), + ) + require.NoError(t, err) // update value and get resulting state commitment executionSnapshot2 := &snapshot.ExecutionSnapshot{ WriteSet: map[flow.RegisterID]flow.RegisterValue{ - registerID1: []byte("orange"), + reg1.Key: flow.RegisterValue("orange"), }, } - sc3, _, err := state.CommitDelta(l, executionSnapshot2, sc2) - assert.NoError(t, err) + sc3, _, _, err := state.CommitDelta(l, executionSnapshot2, sc2Snapshot) + require.NoError(t, err) + header2 := unittest.BlockHeaderWithParentFixture(header1) // create a view for previous state version - storageSnapshot3 := es.NewStorageSnapshot(sc2) + storageSnapshot3 := es.NewStorageSnapshot(sc2, header2.ID(), header2.Height) + header3 := unittest.BlockHeaderWithParentFixture(header1) // create a view for new state version - storageSnapshot4 := es.NewStorageSnapshot(sc3) + storageSnapshot4 := es.NewStorageSnapshot(sc3, header3.ID(), header3.Height) + + // header2 and header3 are different blocks + require.True(t, header2.ID() != (header3.ID())) // fetch the value at both versions - b1, err := storageSnapshot3.Get(registerID1) - assert.NoError(t, err) + b1, err := storageSnapshot3.Get(reg1.Key) + require.NoError(t, err) - b2, err := storageSnapshot4.Get(registerID1) - assert.NoError(t, err) + b2, err := storageSnapshot4.Get(reg1.Key) + require.NoError(t, err) - assert.Equal(t, flow.RegisterValue("apple"), b1) - assert.Equal(t, flow.RegisterValue("orange"), b2) + require.Equal(t, flow.RegisterValue("apple"), b1) + require.Equal(t, flow.RegisterValue("orange"), b2) })) - t.Run("commit delta and read new state", prepareTest(func(t *testing.T, es state.ExecutionState, l *ledger.Ledger) { - // TODO: use real block ID - sc1, err := es.StateCommitmentByBlockID(context.Background(), flow.Identifier{}) - assert.NoError(t, err) + t.Run("commit delta and read new state", prepareTest(func( + t *testing.T, es state.ExecutionState, l *ledger.Ledger, headers *storage.Headers, stateCommitments *storage.Commits) { + header1 := unittest.BlockHeaderFixture() + sc1 := flow.StateCommitment(l.InitialState()) + reg1 := unittest.MakeOwnerReg("fruit", "apple") + reg2 := unittest.MakeOwnerReg("vegetable", "carrot") // set initial value executionSnapshot1 := &snapshot.ExecutionSnapshot{ WriteSet: map[flow.RegisterID]flow.RegisterValue{ - registerID1: []byte("apple"), - registerID2: []byte("apple"), + reg1.Key: reg1.Value, + reg2.Key: reg2.Value, }, } - sc2, _, err := state.CommitDelta(l, executionSnapshot1, sc1) - assert.NoError(t, err) + sc2, _, sc2Snapshot, err := state.CommitDelta(l, executionSnapshot1, + storehouse.NewExecutingBlockSnapshot(state.NewLedgerStorageSnapshot(l, sc1), sc1), + ) + require.NoError(t, err) // update value and get resulting state commitment executionSnapshot2 := &snapshot.ExecutionSnapshot{ WriteSet: map[flow.RegisterID]flow.RegisterValue{ - registerID1: nil, + reg1.Key: nil, }, } - sc3, _, err := state.CommitDelta(l, executionSnapshot2, sc2) - assert.NoError(t, err) + sc3, _, _, err := state.CommitDelta(l, executionSnapshot2, sc2Snapshot) + require.NoError(t, err) + header2 := unittest.BlockHeaderWithParentFixture(header1) // create a view for previous state version - storageSnapshot3 := es.NewStorageSnapshot(sc2) + storageSnapshot3 := es.NewStorageSnapshot(sc2, header2.ID(), header2.Height) + header3 := unittest.BlockHeaderWithParentFixture(header2) // create a view for new state version - storageSnapshot4 := es.NewStorageSnapshot(sc3) + storageSnapshot4 := es.NewStorageSnapshot(sc3, header3.ID(), header3.Height) // fetch the value at both versions - b1, err := storageSnapshot3.Get(registerID1) - assert.NoError(t, err) + b1, err := storageSnapshot3.Get(reg1.Key) + require.NoError(t, err) - b2, err := storageSnapshot4.Get(registerID1) - assert.NoError(t, err) + b2, err := storageSnapshot4.Get(reg1.Key) + require.NoError(t, err) - assert.Equal(t, flow.RegisterValue("apple"), b1) - assert.Empty(t, b2) + require.Equal(t, flow.RegisterValue("apple"), b1) + require.Empty(t, b2) })) - t.Run("commit delta and persist state commit for the second time should be OK", prepareTest(func(t *testing.T, es state.ExecutionState, l *ledger.Ledger) { - // TODO: use real block ID - sc1, err := es.StateCommitmentByBlockID(context.Background(), flow.Identifier{}) - assert.NoError(t, err) + t.Run("commit delta and persist state commit for the second time should be OK", prepareTest(func( + t *testing.T, es state.ExecutionState, l *ledger.Ledger, headers *storage.Headers, stateCommitments *storage.Commits) { + sc1 := flow.StateCommitment(l.InitialState()) + reg1 := unittest.MakeOwnerReg("fruit", "apple") + reg2 := unittest.MakeOwnerReg("vegetable", "carrot") // set initial value executionSnapshot1 := &snapshot.ExecutionSnapshot{ WriteSet: map[flow.RegisterID]flow.RegisterValue{ - registerID1: flow.RegisterValue("apple"), - registerID2: flow.RegisterValue("apple"), + reg1.Key: reg1.Value, + reg2.Key: reg2.Value, }, } - sc2, _, err := state.CommitDelta(l, executionSnapshot1, sc1) - assert.NoError(t, err) + sc2, _, _, err := state.CommitDelta(l, executionSnapshot1, + storehouse.NewExecutingBlockSnapshot(state.NewLedgerStorageSnapshot(l, sc1), sc1), + ) + require.NoError(t, err) // committing for the second time should be OK - sc2Same, _, err := state.CommitDelta(l, executionSnapshot1, sc1) - assert.NoError(t, err) + sc2Same, _, _, err := state.CommitDelta(l, executionSnapshot1, + storehouse.NewExecutingBlockSnapshot(state.NewLedgerStorageSnapshot(l, sc1), sc1), + ) + require.NoError(t, err) require.Equal(t, sc2, sc2Same) })) + t.Run("commit write and create snapshot", prepareTest(func( + t *testing.T, es state.ExecutionState, l *ledger.Ledger, headers *storage.Headers, stateCommitments *storage.Commits) { + header1 := unittest.BlockHeaderFixture() + header2 := unittest.BlockHeaderWithParentFixture(header1) + sc1 := flow.StateCommitment(l.InitialState()) + + reg1 := unittest.MakeOwnerReg("fruit", "apple") + reg2 := unittest.MakeOwnerReg("vegetable", "carrot") + executionSnapshot := &snapshot.ExecutionSnapshot{ + WriteSet: map[flow.RegisterID]flow.RegisterValue{ + reg1.Key: reg1.Value, + reg2.Key: reg2.Value, + }, + } + + sc2, _, _, err := state.CommitDelta(l, executionSnapshot, + storehouse.NewExecutingBlockSnapshot(state.NewLedgerStorageSnapshot(l, sc1), sc1)) + require.NoError(t, err) + + // test CreateStorageSnapshot for known and executed block + headers.On("ByBlockID", header2.ID()).Return(header2, nil) + stateCommitments.On("ByBlockID", header2.ID()).Return(sc2, nil) + snapshot2, h2, err := es.CreateStorageSnapshot(header2.ID()) + require.NoError(t, err) + require.Equal(t, header2.ID(), h2.ID()) + + val, err := snapshot2.Get(reg1.Key) + require.NoError(t, err) + require.Equal(t, val, reg1.Value) + + val, err = snapshot2.Get(reg2.Key) + require.NoError(t, err) + require.Equal(t, val, reg2.Value) + + // test CreateStorageSnapshot for unknown block + unknown := unittest.BlockHeaderFixture() + headers.On("ByBlockID", unknown.ID()).Return(nil, fmt.Errorf("unknown: %w", storageerr.ErrNotFound)) + _, _, err = es.CreateStorageSnapshot(unknown.ID()) + require.ErrorIs(t, err, storageerr.ErrNotFound) + + // test CreateStorageSnapshot for known and unexecuted block + unexecuted := unittest.BlockHeaderFixture() + headers.On("ByBlockID", unexecuted.ID()).Return(unexecuted, nil) + stateCommitments.On("ByBlockID", unexecuted.ID()).Return(nil, fmt.Errorf("not found: %w", storageerr.ErrNotFound)) + _, _, err = es.CreateStorageSnapshot(unexecuted.ID()) + require.ErrorIs(t, err, state.ErrNotExecuted) + + // test CreateStorageSnapshot for pruned block + pruned := unittest.BlockHeaderFixture() + prunedState := unittest.StateCommitmentFixture() + headers.On("ByBlockID", pruned.ID()).Return(pruned, nil) + stateCommitments.On("ByBlockID", pruned.ID()).Return(prunedState, nil) + _, _, err = es.CreateStorageSnapshot(pruned.ID()) + require.ErrorIs(t, err, state.ErrExecutionStatePruned) + })) + } diff --git a/engine/execution/state/unittest/fixtures.go b/engine/execution/state/unittest/fixtures.go index b05b70d0cb1..f76d74dac9a 100644 --- a/engine/execution/state/unittest/fixtures.go +++ b/engine/execution/state/unittest/fixtures.go @@ -1,19 +1,28 @@ package unittest import ( - "github.com/onflow/flow-go/crypto" + "context" + "testing" + + "github.com/stretchr/testify/require" + "github.com/onflow/flow-go/engine/execution" + "github.com/onflow/flow-go/fvm/meter" "github.com/onflow/flow-go/fvm/storage/snapshot" "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module/executiondatasync/execution_data" "github.com/onflow/flow-go/module/mempool/entity" "github.com/onflow/flow-go/utils/unittest" ) func StateInteractionsFixture() *snapshot.ExecutionSnapshot { - return &snapshot.ExecutionSnapshot{} + return &snapshot.ExecutionSnapshot{ + Meter: meter.NewMeter(meter.DefaultParameters()), + } } func ComputationResultFixture( + t *testing.T, parentBlockExecutionResultID flow.Identifier, collectionsSignerIDs [][]flow.Identifier, ) *execution.ComputationResult { @@ -21,12 +30,13 @@ func ComputationResultFixture( startState := unittest.StateCommitmentFixture() block := unittest.ExecutableBlockFixture(collectionsSignerIDs, &startState) - return ComputationResultForBlockFixture( + return ComputationResultForBlockFixture(t, parentBlockExecutionResultID, block) } func ComputationResultForBlockFixture( + t *testing.T, parentBlockExecutionResultID flow.Identifier, completeBlock *entity.ExecutableBlock, ) *execution.ComputationResult { @@ -34,30 +44,68 @@ func ComputationResultForBlockFixture( computationResult := execution.NewEmptyComputationResult(completeBlock) numberOfChunks := len(collections) + 1 + ceds := make([]*execution_data.ChunkExecutionData, numberOfChunks) + startState := *completeBlock.StartState for i := 0; i < numberOfChunks; i++ { + ceds[i] = unittest.ChunkExecutionDataFixture(t, 1024) + endState := unittest.StateCommitmentFixture() computationResult.CollectionExecutionResultAt(i).UpdateExecutionSnapshot(StateInteractionsFixture()) computationResult.AppendCollectionAttestationResult( - *completeBlock.StartState, - *completeBlock.StartState, + startState, + endState, nil, unittest.IdentifierFixture(), - nil, + ceds[i], ) - + startState = endState } + bed := unittest.BlockExecutionDataFixture( + unittest.WithBlockExecutionDataBlockID(completeBlock.Block.ID()), + unittest.WithChunkExecutionDatas(ceds...), + ) + executionDataID, err := execution_data.CalculateID(context.Background(), bed, execution_data.DefaultSerializer) + require.NoError(t, err) - executionResult := flow.NewExecutionResult( - parentBlockExecutionResultID, - completeBlock.ID(), - computationResult.AllChunks(), - nil, - flow.ZeroID) - - computationResult.ExecutionReceipt = &flow.ExecutionReceipt{ - ExecutionResult: *executionResult, - Spocks: make([]crypto.Signature, numberOfChunks), - ExecutorSignature: crypto.Signature{}, + _, serviceEventEpochCommitProtocol := unittest.EpochCommitFixtureByChainID(flow.Localnet) + _, serviceEventEpochSetupProtocol := unittest.EpochSetupFixtureByChainID(flow.Localnet) + _, serviceEventEpochRecoverProtocol := unittest.EpochRecoverFixtureByChainID(flow.Localnet) + _, serviceEventVersionBeaconProtocol := unittest.VersionBeaconFixtureByChainID(flow.Localnet) + + convertedServiceEvents := flow.ServiceEventList{ + serviceEventEpochCommitProtocol.ServiceEvent(), + serviceEventEpochSetupProtocol.ServiceEvent(), + serviceEventEpochRecoverProtocol.ServiceEvent(), + serviceEventVersionBeaconProtocol.ServiceEvent(), } + chunks, err := computationResult.AllChunks() + require.NoError(t, err) + + executionResult, err := flow.NewExecutionResult(flow.UntrustedExecutionResult{ + PreviousResultID: parentBlockExecutionResultID, + BlockID: completeBlock.BlockID(), + Chunks: chunks, + ServiceEvents: convertedServiceEvents, + ExecutionDataID: executionDataID, + }) + require.NoError(t, err) + + unsignedExecutionReceipt, err := flow.NewUnsignedExecutionReceipt( + flow.UntrustedUnsignedExecutionReceipt{ + ExecutionResult: *executionResult, + ExecutorID: unittest.IdentifierFixture(), + Spocks: unittest.SignaturesFixture(numberOfChunks), + }, + ) + require.NoError(t, err) + receipt, err := flow.NewExecutionReceipt( + flow.UntrustedExecutionReceipt{ + UnsignedExecutionReceipt: *unsignedExecutionReceipt, + ExecutorSignature: unittest.SignatureFixture(), + }, + ) + require.NoError(t, err) + computationResult.ExecutionReceipt = receipt + return computationResult } diff --git a/engine/execution/storehouse.go b/engine/execution/storehouse.go new file mode 100644 index 00000000000..47864217c36 --- /dev/null +++ b/engine/execution/storehouse.go @@ -0,0 +1,126 @@ +package execution + +import ( + "github.com/onflow/flow-go/fvm/storage/snapshot" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module/finalizedreader" + "github.com/onflow/flow-go/storage" + "github.com/onflow/flow-go/storage/pebble" +) + +// RegisterStore is the interface for register store +// see implementation in engine/execution/storehouse/register_store.go +type RegisterStore interface { + // GetRegister first try to get the register from InMemoryRegisterStore, then OnDiskRegisterStore + // It returns: + // - (value, nil) if the register value is found at the given block + // - (nil, nil) if the register is not found + // - (nil, storage.ErrHeightNotIndexed) if the height is below the first height that is indexed. + // - (nil, storehouse.ErrNotExecuted) if the block is not executed yet + // - (nil, storehouse.ErrNotExecuted) if the block is conflicting iwth finalized block + // - (nil, err) for any other exceptions + GetRegister(height uint64, blockID flow.Identifier, register flow.RegisterID) (flow.RegisterValue, error) + + // SaveRegisters saves to InMemoryRegisterStore first, then trigger the same check as OnBlockFinalized + // Depend on InMemoryRegisterStore.SaveRegisters + // It returns: + // - nil if the registers are saved successfully + // - exception is the block is above the pruned height but does not connect to the pruned height (conflicting block). + // - exception if the block is below the pruned height + // - exception if the save block is saved again + // - exception for any other exception + SaveRegisters(header *flow.Header, registers flow.RegisterEntries) error + + // Depend on FinalizedReader's FinalizedBlockIDAtHeight + // Depend on ExecutedFinalizedWAL.Append + // Depend on OnDiskRegisterStore.SaveRegisters + // OnBlockFinalized trigger the check of whether a block at the next height becomes finalized and executed. + // Note: This is a blocking call + // the next height is the existing finalized and executed block's height + 1. + // If a block at next height becomes finalized and executed, then: + // 1. write the registers to write ahead logs + // 2. save the registers of the block to OnDiskRegisterStore + // 3. prune the height in InMemoryRegisterStore + // any error returned are exception + OnBlockFinalized() error + + // LastFinalizedAndExecutedHeight returns the height of the last finalized and executed block, + // which has been saved in OnDiskRegisterStore + LastFinalizedAndExecutedHeight() uint64 + + // IsBlockExecuted returns whether the given block is executed. + // If a block is not executed, it does not distinguish whether the block exists or not. + // It returns: + // - (true, nil) if the block is executed, regardless of whether the registers of the block is pruned on disk or not + // - (false, nil) if the block is not executed + // - (false, exception) if running into any exception + IsBlockExecuted(height uint64, blockID flow.Identifier) (bool, error) +} + +// RegisterStoreNotifier is the interface for register store to notify when a block is finalized and executed +type RegisterStoreNotifier interface { + OnFinalizedAndExecutedHeightUpdated(height uint64) +} + +type FinalizedReader interface { + // FinalizedBlockIDAtHeight returns the block ID of the finalized block at the given height. + // It return storage.NotFound if the given height has not been finalized yet + // any other error returned are exceptions + FinalizedBlockIDAtHeight(height uint64) (flow.Identifier, error) +} + +// finalizedreader.FinalizedReader is an implementation of FinalizedReader interface +var _ FinalizedReader = (*finalizedreader.FinalizedReader)(nil) + +// see implementation in engine/execution/storehouse/in_memory_register_store.go +type InMemoryRegisterStore interface { + Prune(finalizedHeight uint64, finalizedBlockID flow.Identifier) error + PrunedHeight() uint64 + + // GetRegister will return the latest updated value of the given register since the pruned height. + // It returns ErrPruned if the register is unknown or not updated since the pruned height + // It returns exception if internal index is inconsistent + GetRegister(height uint64, blockID flow.Identifier, register flow.RegisterID) (flow.RegisterValue, error) + GetUpdatedRegisters(height uint64, blockID flow.Identifier) (flow.RegisterEntries, error) + SaveRegisters( + height uint64, + blockID flow.Identifier, + parentID flow.Identifier, + registers flow.RegisterEntries, + ) error + + // IsBlockExecuted returns wheather the given block is executed. + // It returns: + // - (true, nil) if the block is above the pruned height and is executed + // - (true, nil) if the block is the pruned block, since the prunded block are finalized and executed + // - (false, nil) if the block is above the pruned height and is not executed + // - (false, nil) if the block's height is the pruned height, but is different from the pruned block + // - (false, exception) if the block is below the pruned height + IsBlockExecuted(height uint64, blockID flow.Identifier) (bool, error) +} + +type OnDiskRegisterStore = storage.RegisterIndex + +// pebble.Registers is an implementation of OnDiskRegisterStore interface +var _ OnDiskRegisterStore = (*pebble.Registers)(nil) + +type ExecutedFinalizedWAL interface { + Append(height uint64, registers flow.RegisterEntries) error + + // Latest returns the latest height in the WAL. + Latest() (uint64, error) + + GetReader(height uint64) WALReader +} + +type WALReader interface { + // Next returns the next height and trie updates in the WAL. + // It returns EOF when there are no more entries. + Next() (height uint64, registers flow.RegisterEntries, err error) +} + +type ExtendableStorageSnapshot interface { + snapshot.StorageSnapshot + Extend(newCommit flow.StateCommitment, updatedRegisters map[flow.RegisterID]flow.RegisterValue) ExtendableStorageSnapshot + Commitment() flow.StateCommitment +} diff --git a/engine/execution/storehouse/block_end_snapshot.go b/engine/execution/storehouse/block_end_snapshot.go new file mode 100644 index 00000000000..bf7718a9543 --- /dev/null +++ b/engine/execution/storehouse/block_end_snapshot.go @@ -0,0 +1,88 @@ +package storehouse + +import ( + "errors" + "sync" + + "github.com/onflow/flow-go/engine/execution" + "github.com/onflow/flow-go/fvm/storage/snapshot" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/storage" +) + +var _ snapshot.StorageSnapshot = (*BlockEndStateSnapshot)(nil) + +// BlockEndStateSnapshot represents the storage at the end of a block. +type BlockEndStateSnapshot struct { + storage execution.RegisterStore + + blockID flow.Identifier + height uint64 + + mutex sync.RWMutex + readCache map[flow.RegisterID]flow.RegisterValue // cache the reads from storage at baseBlock +} + +// the caller must ensure the block height is for the given block +func NewBlockEndStateSnapshot( + storage execution.RegisterStore, + blockID flow.Identifier, + height uint64, +) *BlockEndStateSnapshot { + return &BlockEndStateSnapshot{ + storage: storage, + blockID: blockID, + height: height, + readCache: make(map[flow.RegisterID]flow.RegisterValue), + } +} + +// Get returns the value of the register with the given register ID. +// It returns: +// - (value, nil) if the register exists +// - (nil, nil) if the register does not exist +// - (nil, storage.ErrHeightNotIndexed) if the height is below the first height that is indexed. +// - (nil, storehouse.ErrNotExecuted) if the block is not executed yet +// - (nil, storehouse.ErrNotExecuted) if the block is conflicting with finalized block +// - (nil, err) for any other exceptions +func (s *BlockEndStateSnapshot) Get(id flow.RegisterID) (flow.RegisterValue, error) { + value, ok := s.getFromCache(id) + if ok { + return value, nil + } + + value, err := s.getFromStorage(id) + if err != nil { + return nil, err + } + + s.mutex.Lock() + defer s.mutex.Unlock() + + // TODO: consider adding a limit/eviction policy for the cache + s.readCache[id] = value + return value, err +} + +func (s *BlockEndStateSnapshot) getFromCache(id flow.RegisterID) (flow.RegisterValue, bool) { + s.mutex.RLock() + defer s.mutex.RUnlock() + + value, ok := s.readCache[id] + return value, ok +} + +func (s *BlockEndStateSnapshot) getFromStorage(id flow.RegisterID) (flow.RegisterValue, error) { + value, err := s.storage.GetRegister(s.height, s.blockID, id) + if err != nil { + if errors.Is(err, storage.ErrNotFound) { + // if the error is not found, we return a nil RegisterValue, + // in this case, the nil value can be cached, because the storage will not change it + return nil, nil + } + // if the error is not ErrNotFound, such as storage.ErrHeightNotIndexed, storehouse.ErrNotExecuted + // we return the error without caching + return nil, err + } + return value, nil +} diff --git a/engine/execution/storehouse/block_end_snapshot_test.go b/engine/execution/storehouse/block_end_snapshot_test.go new file mode 100644 index 00000000000..191affbf3fc --- /dev/null +++ b/engine/execution/storehouse/block_end_snapshot_test.go @@ -0,0 +1,98 @@ +package storehouse_test + +import ( + "fmt" + "testing" + + "github.com/stretchr/testify/require" + "go.uber.org/atomic" + + executionMock "github.com/onflow/flow-go/engine/execution/mock" + "github.com/onflow/flow-go/engine/execution/storehouse" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/storage" + "github.com/onflow/flow-go/utils/unittest" +) + +func TestBlockEndSnapshot(t *testing.T) { + t.Run("Get register", func(t *testing.T) { + header := unittest.BlockHeaderFixture() + + // create mock for storage + store := executionMock.NewRegisterStore(t) + reg := unittest.MakeOwnerReg("key", "value") + store.On("GetRegister", header.Height, header.ID(), reg.Key).Return(reg.Value, nil).Once() + snapshot := storehouse.NewBlockEndStateSnapshot(store, header.ID(), header.Height) + + // test get from storage + value, err := snapshot.Get(reg.Key) + require.NoError(t, err) + require.Equal(t, reg.Value, value) + + // test get from cache + value, err = snapshot.Get(reg.Key) + require.NoError(t, err) + require.Equal(t, reg.Value, value) + + // test get non existing register + unknownReg := unittest.MakeOwnerReg("unknown", "unknown") + store.On("GetRegister", header.Height, header.ID(), unknownReg.Key). + Return(nil, fmt.Errorf("fail: %w", storage.ErrNotFound)).Once() + + value, err = snapshot.Get(unknownReg.Key) + require.NoError(t, err) + require.Nil(t, value) + + // test get non existing register from cache + _, err = snapshot.Get(unknownReg.Key) + require.NoError(t, err) + require.Nil(t, value) + + // test getting storage.ErrHeightNotIndexed error + heightNotIndexed := unittest.MakeOwnerReg("height not index", "height not index") + store.On("GetRegister", header.Height, header.ID(), heightNotIndexed.Key). + Return(nil, fmt.Errorf("fail: %w", storage.ErrHeightNotIndexed)). + Twice() // to verify the result is not cached + + // verify getting the correct error + _, err = snapshot.Get(heightNotIndexed.Key) + require.ErrorIs(t, err, storage.ErrHeightNotIndexed) + + // verify result is not cached + _, err = snapshot.Get(heightNotIndexed.Key) + require.ErrorIs(t, err, storage.ErrHeightNotIndexed) + + // test getting storage.ErrNotExecuted error + heightNotExecuted := unittest.MakeOwnerReg("height not executed", "height not executed") + counter := atomic.NewInt32(0) + store. + On("GetRegister", header.Height, header.ID(), heightNotExecuted.Key). + Return(func(uint64, flow.Identifier, flow.RegisterID) (flow.RegisterValue, error) { + counter.Inc() + // the first call should return error + if counter.Load() == 1 { + return nil, fmt.Errorf("fail: %w", storehouse.ErrNotExecuted) + } + // the second call, it returns value + return heightNotExecuted.Value, nil + }). + Times(2) + + // first time should return error + _, err = snapshot.Get(heightNotExecuted.Key) + require.ErrorIs(t, err, storehouse.ErrNotExecuted) + + // second time should return value + value, err = snapshot.Get(heightNotExecuted.Key) + require.NoError(t, err) + require.Equal(t, heightNotExecuted.Value, value) + + // third time should be cached + value, err = snapshot.Get(heightNotExecuted.Key) + require.NoError(t, err) + require.Equal(t, heightNotExecuted.Value, value) + + store.AssertExpectations(t) + }) + +} diff --git a/engine/execution/storehouse/executing_block_snapshot.go b/engine/execution/storehouse/executing_block_snapshot.go new file mode 100644 index 00000000000..e9e9b97c32b --- /dev/null +++ b/engine/execution/storehouse/executing_block_snapshot.go @@ -0,0 +1,76 @@ +package storehouse + +import ( + "github.com/onflow/flow-go/engine/execution" + "github.com/onflow/flow-go/fvm/storage/snapshot" + "github.com/onflow/flow-go/model/flow" +) + +var _ execution.ExtendableStorageSnapshot = (*ExecutingBlockSnapshot)(nil) + +// ExecutingBlockSnapshot is a snapshot of the storage at an executed collection. +// It starts with a storage snapshot at the end of previous block, +// The register updates at the executed collection at baseHeight + 1 are cached in +// a map, such that retrieving register values at the snapshot will first check +// the cache, and then the storage. +type ExecutingBlockSnapshot struct { + // the snapshot at the end of previous block + previous snapshot.StorageSnapshot + + commitment flow.StateCommitment + registerUpdates map[flow.RegisterID]flow.RegisterValue +} + +// create a new storage snapshot for an executed collection +// at the base block at height h - 1 +func NewExecutingBlockSnapshot( + previous snapshot.StorageSnapshot, + // the statecommitment of a block at height h + commitment flow.StateCommitment, +) *ExecutingBlockSnapshot { + return &ExecutingBlockSnapshot{ + previous: previous, + commitment: commitment, + registerUpdates: make(map[flow.RegisterID]flow.RegisterValue), + } +} + +// Get returns the register value at the snapshot. +func (s *ExecutingBlockSnapshot) Get(id flow.RegisterID) (flow.RegisterValue, error) { + // get from latest updates first + value, ok := s.getFromUpdates(id) + if ok { + return value, nil + } + + // get from BlockEndStateSnapshot at previous block + value, err := s.previous.Get(id) + return value, err +} + +func (s *ExecutingBlockSnapshot) getFromUpdates(id flow.RegisterID) (flow.RegisterValue, bool) { + value, ok := s.registerUpdates[id] + return value, ok +} + +// Extend returns a new storage snapshot at the same block but but for a different state commitment, +// which contains the given registerUpdates +// Usually it's used to create a new storage snapshot at the next executed collection. +// The registerUpdates contains the register updates at the executed collection. +func (s *ExecutingBlockSnapshot) Extend(newCommit flow.StateCommitment, updates map[flow.RegisterID]flow.RegisterValue) execution.ExtendableStorageSnapshot { + // if there is no update, we can return the original snapshot directly + // instead of wrapping it with a new ExecutingBlockSnapshot that has no update + if len(updates) == 0 { + return s + } + + return &ExecutingBlockSnapshot{ + previous: s, + commitment: newCommit, + registerUpdates: updates, + } +} + +func (s *ExecutingBlockSnapshot) Commitment() flow.StateCommitment { + return s.commitment +} diff --git a/engine/execution/storehouse/executing_block_snapshot_test.go b/engine/execution/storehouse/executing_block_snapshot_test.go new file mode 100644 index 00000000000..616430ec858 --- /dev/null +++ b/engine/execution/storehouse/executing_block_snapshot_test.go @@ -0,0 +1,92 @@ +package storehouse_test + +import ( + "testing" + + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/engine/execution/storehouse" + "github.com/onflow/flow-go/fvm/storage/snapshot" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/utils/unittest" +) + +func TestExtendingBlockSnapshot(t *testing.T) { + t.Run("Get register", func(t *testing.T) { + reg1 := makeReg("key1", "val1") + base := snapshot.MapStorageSnapshot{ + reg1.Key: reg1.Value, + } + baseCommit := unittest.StateCommitmentFixture() + snap := storehouse.NewExecutingBlockSnapshot(base, baseCommit) + + // should get value + value, err := snap.Get(reg1.Key) + require.NoError(t, err) + require.Equal(t, reg1.Value, value) + + // should get nil for unknown register + unknown := makeReg("unknown", "unknownV") + value, err = snap.Get(unknown.Key) + require.NoError(t, err) + require.Equal(t, []byte(nil), value) + }) + + t.Run("Extend snapshot", func(t *testing.T) { + reg1 := makeReg("key1", "val1") + reg2 := makeReg("key2", "val2") + base := snapshot.MapStorageSnapshot{ + reg1.Key: reg1.Value, + reg2.Key: reg2.Value, + } + // snap1: { key1: val1, key2: val2 } + snap1 := storehouse.NewExecutingBlockSnapshot(base, unittest.StateCommitmentFixture()) + + updatedReg2 := makeReg("key2", "val22") + reg3 := makeReg("key3", "val3") + // snap2: { key1: val1, key2: val22, key3: val3 } + snap2 := snap1.Extend(unittest.StateCommitmentFixture(), map[flow.RegisterID]flow.RegisterValue{ + updatedReg2.Key: updatedReg2.Value, + reg3.Key: reg3.Value, + }) + + // should get un-changed value + value, err := snap2.Get(reg1.Key) + require.NoError(t, err) + require.Equal(t, []byte("val1"), value) + + value, err = snap2.Get(reg2.Key) + require.NoError(t, err) + require.Equal(t, []byte("val22"), value) + + value, err = snap2.Get(reg3.Key) + require.NoError(t, err) + require.Equal(t, []byte("val3"), value) + + // should get nil for unknown register + unknown := makeReg("unknown", "unknownV") + value, err = snap2.Get(unknown.Key) + require.NoError(t, err) + require.Equal(t, []byte(nil), value) + + // create snap3 with reg3 updated + // snap3: { key1: val1, key2: val22, key3: val33 } + updatedReg3 := makeReg("key3", "val33") + snap3 := snap2.Extend(unittest.StateCommitmentFixture(), map[flow.RegisterID]flow.RegisterValue{ + updatedReg3.Key: updatedReg3.Value, + }) + + // verify all keys + value, err = snap3.Get(reg1.Key) + require.NoError(t, err) + require.Equal(t, []byte("val1"), value) + + value, err = snap3.Get(reg2.Key) + require.NoError(t, err) + require.Equal(t, []byte("val22"), value) + + value, err = snap3.Get(reg3.Key) + require.NoError(t, err) + require.Equal(t, []byte("val33"), value) + }) +} diff --git a/engine/execution/storehouse/in_memory_register_store.go b/engine/execution/storehouse/in_memory_register_store.go new file mode 100644 index 00000000000..029e665cba8 --- /dev/null +++ b/engine/execution/storehouse/in_memory_register_store.go @@ -0,0 +1,351 @@ +package storehouse + +import ( + "errors" + "fmt" + "sync" + + "github.com/onflow/flow-go/engine/execution" + "github.com/onflow/flow-go/model/flow" +) + +var _ execution.InMemoryRegisterStore = (*InMemoryRegisterStore)(nil) + +var ErrNotExecuted = fmt.Errorf("block is not executed") + +type PrunedError struct { + PrunedHeight uint64 + PrunedID flow.Identifier + Height uint64 +} + +func NewPrunedError(height uint64, prunedHeight uint64, prunedID flow.Identifier) error { + return PrunedError{Height: height, PrunedHeight: prunedHeight, PrunedID: prunedID} +} + +func (e PrunedError) Error() string { + return fmt.Sprintf("block is pruned at height %d", e.Height) +} + +func IsPrunedError(err error) (PrunedError, bool) { + var e PrunedError + ok := errors.As(err, &e) + if ok { + return e, true + } + return PrunedError{}, false +} + +type InMemoryRegisterStore struct { + sync.RWMutex + registersByBlockID map[flow.Identifier]map[flow.RegisterID]flow.RegisterValue // for storing the registers + parentByBlockID map[flow.Identifier]flow.Identifier // for register updates to be fork-aware + blockIDsByHeight map[uint64]map[flow.Identifier]struct{} // for pruning + prunedHeight uint64 // registers at pruned height are pruned (not saved in registersByBlockID) + prunedID flow.Identifier // to ensure all blocks are extending from pruned block (last finalized and executed block) +} + +func NewInMemoryRegisterStore(lastHeight uint64, lastID flow.Identifier) *InMemoryRegisterStore { + return &InMemoryRegisterStore{ + registersByBlockID: make(map[flow.Identifier]map[flow.RegisterID]flow.RegisterValue), + parentByBlockID: make(map[flow.Identifier]flow.Identifier), + blockIDsByHeight: make(map[uint64]map[flow.Identifier]struct{}), + prunedHeight: lastHeight, + prunedID: lastID, + } +} + +// SaveRegisters saves the registers of a block to InMemoryRegisterStore +// It needs to ensure the block is above the pruned height and is connected to the pruned block +func (s *InMemoryRegisterStore) SaveRegisters( + height uint64, + blockID flow.Identifier, + parentID flow.Identifier, + registers flow.RegisterEntries, +) error { + // preprocess data before acquiring the lock + regs := make(map[flow.RegisterID]flow.RegisterValue, len(registers)) + for _, reg := range registers { + regs[reg.Key] = reg.Value + } + + s.Lock() + defer s.Unlock() + + // ensure all saved registers are above the pruned height + if height <= s.prunedHeight { + return fmt.Errorf("saving pruned registers height %v <= pruned height %v", height, s.prunedHeight) + } + + // ensure the block is not already saved + _, ok := s.registersByBlockID[blockID] + if ok { + // already exist + return fmt.Errorf("saving registers for block %s, but it already exists", blockID) + } + + // make sure parent is a known block or the pruned block, which forms a fork + _, ok = s.registersByBlockID[parentID] + if !ok && parentID != s.prunedID { + return fmt.Errorf("saving registers for block %s, but its parent %s is not saved", blockID, parentID) + } + + // update registers for the block + s.registersByBlockID[blockID] = regs + + // update index on parent + s.parentByBlockID[blockID] = parentID + + // update index on height + sameHeight, ok := s.blockIDsByHeight[height] + if !ok { + sameHeight = make(map[flow.Identifier]struct{}) + s.blockIDsByHeight[height] = sameHeight + } + + sameHeight[blockID] = struct{}{} + return nil +} + +// GetRegister will return the latest updated value of the given register +// since the pruned height. +// It returns PrunedError if the register is unknown or not updated since the pruned height +// Can't return ErrNotFound, since we can't distinguish between not found or not updated since the pruned height +func (s *InMemoryRegisterStore) GetRegister(height uint64, blockID flow.Identifier, register flow.RegisterID) (flow.RegisterValue, error) { + s.RLock() + defer s.RUnlock() + + if height <= s.prunedHeight { + return flow.RegisterValue{}, NewPrunedError(height, s.prunedHeight, s.prunedID) + } + + _, ok := s.registersByBlockID[blockID] + if !ok { + return flow.RegisterValue{}, fmt.Errorf("cannot get register at height %d, block %v is not saved: %w", height, blockID, ErrNotExecuted) + } + + // traverse the fork to find the latest updated value of the given register + // if not found, it means the register is not updated from the pruned block to the given block + block := blockID + for { + // TODO: do not hold the read lock when reading register from the updated register map + reg, ok := s.readRegisterAtBlockID(block, register) + if ok { + return reg, nil + } + + // the register didn't get updated at this block, so check its parent + + parent, ok := s.parentByBlockID[block] + if !ok { + // if the parent doesn't exist because the block itself is the pruned block, + // then it means the register is not updated since the pruned height. + // since we can't distinguish whether the register is not updated or not exist at all, + // we just return PrunedError error along with the prunedHeight, so the + // caller could check with OnDiskRegisterStore to find if this register has a updated value + // at earlier height. + if block == s.prunedID { + return flow.RegisterValue{}, NewPrunedError(height, s.prunedHeight, s.prunedID) + } + + // in this case, it means the state of in-memory register store is inconsistent, + // because all saved block must have their parent saved in `parentByBlockID`, and traversing + // its parent should eventually reach the pruned block, otherwise it's a bug. + + return flow.RegisterValue{}, + fmt.Errorf("inconsistent parent block index in in-memory-register-store, ancient block %v is not found when getting register at block %v", + block, blockID) + } + + block = parent + } +} + +func (s *InMemoryRegisterStore) readRegisterAtBlockID(blockID flow.Identifier, register flow.RegisterID) (flow.RegisterValue, bool) { + registers, ok := s.registersByBlockID[blockID] + if !ok { + return flow.RegisterValue{}, false + } + + value, ok := registers[register] + return value, ok +} + +// GetUpdatedRegisters returns the updated registers of a block +func (s *InMemoryRegisterStore) GetUpdatedRegisters(height uint64, blockID flow.Identifier) (flow.RegisterEntries, error) { + registerUpdates, err := s.getUpdatedRegisters(height, blockID) + if err != nil { + return nil, err + } + + // since the registerUpdates won't be updated and registers for a block can only be set once, + // we don't need to hold the lock when converting it from map into slice. + registers := make(flow.RegisterEntries, 0, len(registerUpdates)) + for regID, reg := range registerUpdates { + registers = append(registers, flow.RegisterEntry{ + Key: regID, + Value: reg, + }) + } + + return registers, nil +} + +func (s *InMemoryRegisterStore) getUpdatedRegisters(height uint64, blockID flow.Identifier) (map[flow.RegisterID]flow.RegisterValue, error) { + s.RLock() + defer s.RUnlock() + if height <= s.prunedHeight { + return nil, fmt.Errorf("cannot get register at height %d, it is pruned %v", height, s.prunedHeight) + } + + registerUpdates, ok := s.registersByBlockID[blockID] + if !ok { + return nil, fmt.Errorf("cannot get register at height %d, block %s is not found: %w", height, blockID, ErrNotExecuted) + } + return registerUpdates, nil +} + +// Prune prunes the register store to the given height +// The pruned height must be an executed block, the caller should ensure that by calling SaveRegisters before. +// +// Pruning is done by walking up the finalized fork from `s.prunedHeight` to `height`. At each height, prune all +// other forks that begin at that height. This ensures that data for all conflicting forks are freed +// +// TODO: It does not block the caller, the pruning work is done async +func (s *InMemoryRegisterStore) Prune(height uint64, blockID flow.Identifier) error { + finalizedFork, err := s.findFinalizedFork(height, blockID) + if err != nil { + return fmt.Errorf("cannot find finalized fork: %w", err) + } + + s.Lock() + defer s.Unlock() + + // prune each height starting at the lowest height in the fork. this will remove all blocks + // below the new pruned height along with any conflicting forks. + for i := len(finalizedFork) - 1; i >= 0; i-- { + blockID := finalizedFork[i] + + err := s.pruneByHeight(s.prunedHeight+1, blockID) + if err != nil { + return fmt.Errorf("could not prune by height %v: %w", s.prunedHeight+1, err) + } + } + + return nil +} + +func (s *InMemoryRegisterStore) PrunedHeight() uint64 { + s.RLock() + defer s.RUnlock() + return s.prunedHeight +} + +func (s *InMemoryRegisterStore) IsBlockExecuted(height uint64, blockID flow.Identifier) (bool, error) { + s.RLock() + defer s.RUnlock() + + // finalized and executed blocks are pruned + // so if the height is below pruned height, in memory register store is not sure if + // it's executed or not + if height < s.prunedHeight { + return false, fmt.Errorf("below pruned height") + } + + // if the height is the pruned height, then it's executed only if the blockID is the prunedID + // since the pruned block must be finalized and executed. + if height == s.prunedHeight { + return blockID == s.prunedID, nil + } + + _, ok := s.registersByBlockID[blockID] + return ok, nil +} + +// findFinalizedFork returns the finalized fork from higher height to lower height +// the last block's height is s.prunedHeight + 1 +func (s *InMemoryRegisterStore) findFinalizedFork(height uint64, blockID flow.Identifier) ([]flow.Identifier, error) { + s.RLock() + defer s.RUnlock() + + if height < s.prunedHeight { + return nil, fmt.Errorf("cannot find finalized fork at height %d, it is pruned (prunedHeight: %v)", height, s.prunedHeight) + } + + if height == s.prunedHeight { + if blockID != s.prunedID { + return nil, fmt.Errorf("cannot find finalized fork at height %d, it is pruned (prunedHeight: %v, prunedID: %v)", height, s.prunedHeight, s.prunedID) + } + + return nil, nil + } + + prunedHeight := height + block := blockID + + // walk backwards from the provided finalized block to the last pruned block + // the result must be a chain from height/blockID to s.prunedHeight/s.prunedID + fork := make([]flow.Identifier, 0, height-s.prunedHeight) + for { + fork = append(fork, block) + prunedHeight-- + + parent, ok := s.parentByBlockID[block] + if !ok { + return nil, fmt.Errorf("inconsistent parent block index in in-memory-register-store, ancient block %s is not found when finding finalized fork at height %v", block, height) + } + if parent == s.prunedID { + break + } + block = parent + } + + if prunedHeight != s.prunedHeight { + return nil, fmt.Errorf("inconsistent parent block index in in-memory-register-store, pruned height %d is not equal to %d", prunedHeight, s.prunedHeight) + } + + return fork, nil +} + +func (s *InMemoryRegisterStore) pruneByHeight(height uint64, finalized flow.Identifier) error { + s.removeBlock(height, finalized) + + // remove conflicting forks + for blockID := range s.blockIDsByHeight[height] { + s.pruneFork(height, blockID) + } + + if len(s.blockIDsByHeight[height]) > 0 { + return fmt.Errorf("all forks on the same height should have been pruend, but actually not: %v", len(s.blockIDsByHeight[height])) + } + + delete(s.blockIDsByHeight, height) + s.prunedHeight = height + s.prunedID = finalized + return nil +} + +func (s *InMemoryRegisterStore) removeBlock(height uint64, blockID flow.Identifier) { + delete(s.registersByBlockID, blockID) + delete(s.parentByBlockID, blockID) + delete(s.blockIDsByHeight[height], blockID) +} + +// pruneFork prunes the provided block and all of its children +func (s *InMemoryRegisterStore) pruneFork(height uint64, blockID flow.Identifier) { + s.removeBlock(height, blockID) + // all its children must be at height + 1, whose parent is blockID + + nextHeight := height + 1 + blocksAtNextHeight, ok := s.blockIDsByHeight[nextHeight] + if !ok { + return + } + + for block := range blocksAtNextHeight { + isChild := s.parentByBlockID[block] == blockID + if isChild { + s.pruneFork(nextHeight, block) + } + } +} diff --git a/engine/execution/storehouse/in_memory_register_store_test.go b/engine/execution/storehouse/in_memory_register_store_test.go new file mode 100644 index 00000000000..f7f417375da --- /dev/null +++ b/engine/execution/storehouse/in_memory_register_store_test.go @@ -0,0 +1,689 @@ +package storehouse + +import ( + "fmt" + "math/rand" + "sync" + "testing" + + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/utils/unittest" +) + +// 1. SaveRegisters should fail if height is below or equal to pruned height +func TestInMemoryRegisterStore(t *testing.T) { + t.Run("FailBelowOrEqualPrunedHeight", func(t *testing.T) { + t.Parallel() + // 1. + pruned := uint64(10) + lastID := unittest.IdentifierFixture() + store := NewInMemoryRegisterStore(pruned, lastID) + err := store.SaveRegisters( + pruned-1, // below pruned pruned, will fail + unittest.IdentifierFixture(), + unittest.IdentifierFixture(), + flow.RegisterEntries{}, + ) + require.Error(t, err) + require.Contains(t, err.Error(), "<= pruned height") + + err = store.SaveRegisters( + pruned, // equal to pruned height, will fail + lastID, + unittest.IdentifierFixture(), + flow.RegisterEntries{}, + ) + require.Error(t, err) + require.Contains(t, err.Error(), "<= pruned height") + }) + + // 2. SaveRegisters should fail if its parent block doesn't exist and it is not the pruned block + // SaveRegisters should succeed if height is above pruned height and block is not saved, + // the updates can be retrieved by GetUpdatedRegisters + // GetRegister should return PrunedError if the queried key is not updated since pruned height + // GetRegister should return PrunedError if the queried height is below pruned height + // GetRegister should return ErrNotExecuted if the block is unknown + t.Run("FailParentNotExist", func(t *testing.T) { + t.Parallel() + pruned := uint64(10) + lastID := unittest.IdentifierFixture() + store := NewInMemoryRegisterStore(pruned, lastID) + + height := pruned + 1 // above the pruned pruned + blockID := unittest.IdentifierFixture() + notExistParent := unittest.IdentifierFixture() + reg := unittest.RegisterEntryFixture() + err := store.SaveRegisters( + height, + blockID, + notExistParent, // should fail because parent doesn't exist + flow.RegisterEntries{reg}, + ) + require.Error(t, err) + require.Contains(t, err.Error(), "but its parent") + }) + + t.Run("StoreOK", func(t *testing.T) { + t.Parallel() + // 3. + pruned := uint64(10) + lastID := unittest.IdentifierFixture() + store := NewInMemoryRegisterStore(pruned, lastID) + + height := pruned + 1 // above the pruned pruned + blockID := unittest.IdentifierFixture() + reg := unittest.RegisterEntryFixture() + err := store.SaveRegisters( + height, + blockID, + lastID, + flow.RegisterEntries{reg}, + ) + require.NoError(t, err) + + val, err := store.GetRegister(height, blockID, reg.Key) + require.NoError(t, err) + require.Equal(t, reg.Value, val) + + // unknown key + _, err = store.GetRegister(height, blockID, unknownKey) + require.Error(t, err) + pe, ok := IsPrunedError(err) + require.True(t, ok) + require.Equal(t, pe.PrunedHeight, pruned) + require.Equal(t, pe.Height, height) + + // unknown block with unknown height + _, err = store.GetRegister(height+1, unknownBlock, reg.Key) + require.Error(t, err) + require.ErrorIs(t, err, ErrNotExecuted) + + // unknown block with known height + _, err = store.GetRegister(height, unknownBlock, reg.Key) + require.Error(t, err) + require.ErrorIs(t, err, ErrNotExecuted) + + // too low height + _, err = store.GetRegister(height-1, unknownBlock, reg.Key) + require.Error(t, err) + pe, ok = IsPrunedError(err) + require.True(t, ok) + require.Equal(t, pe.PrunedHeight, pruned) + require.Equal(t, pe.Height, height-1) + }) + + // 3. SaveRegisters should fail if the block is already saved + t.Run("StoreFailAlreadyExist", func(t *testing.T) { + t.Parallel() + pruned := uint64(10) + lastID := unittest.IdentifierFixture() + store := NewInMemoryRegisterStore(pruned, lastID) + + height := pruned + 1 // above the pruned pruned + blockID := unittest.IdentifierFixture() + reg := unittest.RegisterEntryFixture() + err := store.SaveRegisters( + height, + blockID, + lastID, + flow.RegisterEntries{reg}, + ) + require.NoError(t, err) + + // saving again should fail + err = store.SaveRegisters( + height, + blockID, + lastID, + flow.RegisterEntries{reg}, + ) + require.Error(t, err) + require.Contains(t, err.Error(), "already exists") + }) + + // 4. SaveRegisters should succeed if a different block at the same height was saved before, + // updates for different blocks can be retrieved by their blockID + t.Run("StoreOKDifferentBlockSameParent", func(t *testing.T) { + t.Parallel() + pruned := uint64(10) + lastID := unittest.IdentifierFixture() + store := NewInMemoryRegisterStore(pruned, lastID) + + // 10 <- A + // ^- B + height := pruned + 1 // above the pruned pruned + blockA := unittest.IdentifierFixture() + regA := unittest.RegisterEntryFixture() + err := store.SaveRegisters( + height, + blockA, + lastID, + flow.RegisterEntries{regA}, + ) + require.NoError(t, err) + + blockB := unittest.IdentifierFixture() + regB := unittest.RegisterEntryFixture() + err = store.SaveRegisters( + height, + blockB, // different block + lastID, // same parent + flow.RegisterEntries{regB}, + ) + require.NoError(t, err) + + valA, err := store.GetRegister(height, blockA, regA.Key) + require.NoError(t, err) + require.Equal(t, regA.Value, valA) + + valB, err := store.GetRegister(height, blockB, regB.Key) + require.NoError(t, err) + require.Equal(t, regB.Value, valB) + }) + + t.Run("IsBlockExecuted", func(t *testing.T) { + t.Parallel() + pruned := uint64(10) + lastID := unittest.IdentifierFixture() + store := NewInMemoryRegisterStore(pruned, lastID) + + height := pruned + 1 // above the pruned pruned + blockID := unittest.IdentifierFixture() + reg := unittest.RegisterEntryFixture() + err := store.SaveRegisters( + height, + blockID, + lastID, + flow.RegisterEntries{reg}, + ) + require.NoError(t, err) + + // above the pruned height and is executed + executed, err := store.IsBlockExecuted(height, blockID) + require.NoError(t, err) + require.True(t, executed) + + // above the pruned height, and is not executed + executed, err = store.IsBlockExecuted(pruned+1, unittest.IdentifierFixture()) + require.NoError(t, err) + require.False(t, executed) + + executed, err = store.IsBlockExecuted(pruned+2, unittest.IdentifierFixture()) + require.NoError(t, err) + require.False(t, executed) + + // below the pruned height + _, err = store.IsBlockExecuted(pruned-1, unittest.IdentifierFixture()) + require.Error(t, err) + + // equal to the pruned height and is the pruned block + executed, err = store.IsBlockExecuted(pruned, lastID) + require.NoError(t, err) + require.True(t, executed) + + // equal to the pruned height, but is not the pruned block + executed, err = store.IsBlockExecuted(pruned, unittest.IdentifierFixture()) + require.NoError(t, err) + require.False(t, executed) + + // prune a new block + require.NoError(t, store.Prune(height, blockID)) + // equal to the pruned height and is the pruned block + executed, err = store.IsBlockExecuted(height, blockID) + require.NoError(t, err) + require.True(t, executed) + + // equal to the pruned height, but is not the pruned block + executed, err = store.IsBlockExecuted(height, unittest.IdentifierFixture()) + require.NoError(t, err) + require.False(t, executed) + + // below the pruned height + _, err = store.IsBlockExecuted(pruned, lastID) + require.Error(t, err) + }) + + // 5. Given A(X: 1, Y: 2), GetRegister(A, X) should return 1, GetRegister(A, X) should return 2 + t.Run("GetRegistersOK", func(t *testing.T) { + t.Parallel() + pruned := uint64(10) + lastID := unittest.IdentifierFixture() + store := NewInMemoryRegisterStore(pruned, lastID) + + // 10 <- A (X: 1, Y: 2) + height := pruned + 1 // above the pruned pruned + blockA := unittest.IdentifierFixture() + regX := makeReg("X", "1") + regY := makeReg("Y", "2") + err := store.SaveRegisters( + height, + blockA, + lastID, + flow.RegisterEntries{regX, regY}, + ) + require.NoError(t, err) + + valX, err := store.GetRegister(height, blockA, regX.Key) + require.NoError(t, err) + require.Equal(t, regX.Value, valX) + + valY, err := store.GetRegister(height, blockA, regY.Key) + require.NoError(t, err) + require.Equal(t, regY.Value, valY) + }) + + // 6. Given A(X: 1, Y: 2) <- B(Y: 3), + // GetRegister(B, X) should return 1, because X is not updated in B + // GetRegister(B, Y) should return 3, because Y is updated in B + // GetRegister(A, Y) should return 2, because the query queries the value at A, not B + // GetRegister(B, Z) should return PrunedError, because register is unknown + // GetRegister(C, X) should return BlockNotExecuted, because block is not executed (unexecuted) + t.Run("GetLatestValueOK", func(t *testing.T) { + t.Parallel() + pruned := uint64(10) + lastID := unittest.IdentifierFixture() + store := NewInMemoryRegisterStore(pruned, lastID) + + // 10 <- A (X: 1, Y: 2) <- B (Y: 3) + blockA := unittest.IdentifierFixture() + regX := makeReg("X", "1") + regY := makeReg("Y", "2") + err := store.SaveRegisters( + pruned+1, + blockA, + lastID, + flow.RegisterEntries{regX, regY}, + ) + require.NoError(t, err) + + blockB := unittest.IdentifierFixture() + regY3 := makeReg("Y", "3") + err = store.SaveRegisters( + pruned+2, + blockB, + blockA, + flow.RegisterEntries{regY3}, + ) + require.NoError(t, err) + + val, err := store.GetRegister(pruned+2, blockB, regX.Key) + require.NoError(t, err) + require.Equal(t, regX.Value, val) // X is not updated in B + + val, err = store.GetRegister(pruned+2, blockB, regY.Key) + require.NoError(t, err) + require.Equal(t, regY3.Value, val) // Y is updated in B + + val, err = store.GetRegister(pruned+1, blockA, regY.Key) + require.NoError(t, err) + require.Equal(t, regY.Value, val) // Y's old value at A + + _, err = store.GetRegister(pruned+2, blockB, unknownKey) + require.Error(t, err) + pe, ok := IsPrunedError(err) + require.True(t, ok) + require.Equal(t, pe.PrunedHeight, pruned) + require.Equal(t, pe.Height, pruned+2) + + _, err = store.GetRegister(pruned+3, unittest.IdentifierFixture(), regX.Key) + require.Error(t, err) + require.ErrorIs(t, err, ErrNotExecuted) // unknown block + }) + + // 7. Given the following tree: + // Pruned <- A(X:1) <- B(Y:2) + // .......^- C(X:3) <- D(Y:4) + // GetRegister(D, X) should return 3 + t.Run("StoreMultiForkOK", func(t *testing.T) { + t.Parallel() + pruned := uint64(10) + lastID := unittest.IdentifierFixture() + store := NewInMemoryRegisterStore(pruned, lastID) + + // 10 <- A (X: 1) <- B (Y: 2) + // ^- C (X: 3) <- D (Y: 4) + blockA := unittest.IdentifierFixture() + blockB := unittest.IdentifierFixture() + blockC := unittest.IdentifierFixture() + blockD := unittest.IdentifierFixture() + + require.NoError(t, store.SaveRegisters( + pruned+1, + blockA, + lastID, + flow.RegisterEntries{makeReg("X", "1")}, + )) + + require.NoError(t, store.SaveRegisters( + pruned+2, + blockB, + blockA, + flow.RegisterEntries{makeReg("Y", "2")}, + )) + + require.NoError(t, store.SaveRegisters( + pruned+1, + blockC, + lastID, + flow.RegisterEntries{makeReg("X", "3")}, + )) + + require.NoError(t, store.SaveRegisters( + pruned+2, + blockD, + blockC, + flow.RegisterEntries{makeReg("Y", "4")}, + )) + + reg := makeReg("X", "3") + val, err := store.GetRegister(pruned+2, blockD, reg.Key) + require.NoError(t, err) + require.Equal(t, reg.Value, val) + }) + + // 8. Given the following tree: + // Pruned <- A(X:1) <- B(Y:2), B is not executed + // GetUpdatedRegisters(B) should return ErrNotExecuted + t.Run("GetUpdatedRegisters", func(t *testing.T) { + t.Parallel() + pruned := uint64(10) + lastID := unittest.IdentifierFixture() + store := NewInMemoryRegisterStore(pruned, lastID) + + // 10 <- A (X: 1) <- B (Y: 2) + blockA := unittest.IdentifierFixture() + blockB := unittest.IdentifierFixture() + + require.NoError(t, store.SaveRegisters( + pruned+1, + blockA, + lastID, + flow.RegisterEntries{makeReg("X", "1")}, + )) + + reg, err := store.GetUpdatedRegisters(pruned+1, blockA) + require.NoError(t, err) + require.Equal(t, flow.RegisterEntries{makeReg("X", "1")}, reg) + + _, err = store.GetUpdatedRegisters(pruned+2, blockB) + require.Error(t, err) + require.ErrorIs(t, err, ErrNotExecuted) + }) + + // 9. Prune should fail if the block is unknown + // Prune should succeed if the block is known, and GetUpdatedRegisters should return err + // Prune should prune up to the pruned height. + // Given Pruned <- A(X:1) <- B(X:2) <- C(X:3) <- D(X:4) + // after Prune(B), GetRegister(C, X) should return 3, GetRegister(B, X) should return err + t.Run("StorePrune", func(t *testing.T) { + t.Parallel() + pruned := uint64(10) + lastID := unittest.IdentifierFixture() + store := NewInMemoryRegisterStore(pruned, lastID) + + blockA := unittest.IdentifierFixture() + blockB := unittest.IdentifierFixture() + blockC := unittest.IdentifierFixture() + blockD := unittest.IdentifierFixture() + + require.NoError(t, store.SaveRegisters( + pruned+1, + blockA, + lastID, + flow.RegisterEntries{makeReg("X", "1")}, + )) + + require.NoError(t, store.SaveRegisters( + pruned+2, + blockB, + blockA, + flow.RegisterEntries{makeReg("X", "2")}, + )) + + require.NoError(t, store.SaveRegisters( + pruned+3, + blockC, + blockB, + flow.RegisterEntries{makeReg("X", "3")}, + )) + + require.NoError(t, store.SaveRegisters( + pruned+4, + blockD, + blockC, + flow.RegisterEntries{makeReg("X", "4")}, + )) + + err := store.Prune(pruned+1, unknownBlock) // block is unknown + require.Error(t, err) + + err = store.Prune(pruned+1, blockB) // block is known, but height is wrong + require.Error(t, err) + + err = store.Prune(pruned+4, unknownBlock) // height is unknown + require.Error(t, err) + + err = store.Prune(pruned+1, blockA) // prune next block + require.NoError(t, err) + + require.Equal(t, pruned+1, store.PrunedHeight()) + + reg := makeReg("X", "3") + val, err := store.GetRegister(pruned+3, blockC, reg.Key) + require.NoError(t, err) + require.Equal(t, reg.Value, val) + + _, err = store.GetRegister(pruned+1, blockA, reg.Key) // A is pruned + require.Error(t, err) + pe, ok := IsPrunedError(err) + require.True(t, ok) + require.Equal(t, pe.PrunedHeight, pruned+1) + require.Equal(t, pe.Height, pruned+1) + + err = store.Prune(pruned+3, blockC) // prune both B and C + require.NoError(t, err) + + require.Equal(t, pruned+3, store.PrunedHeight()) + + reg = makeReg("X", "4") + val, err = store.GetRegister(pruned+4, blockD, reg.Key) // can still get X at block D + require.NoError(t, err) + require.Equal(t, reg.Value, val) + }) + + // 10. Prune should prune conflicting forks + // Given Pruned <- A(X:1) <- B(X:2) + // .................. ^----- E(X:5) + // ............ ^- C(X:3) <- D(X:4) + // Prune(A) should prune C and D, and GetUpdatedRegisters(C) should return out of range error, + // GetUpdatedRegisters(D) should return NotFound + t.Run("PruneConflictingForks", func(t *testing.T) { + t.Parallel() + pruned := uint64(10) + lastID := unittest.IdentifierFixture() + store := NewInMemoryRegisterStore(pruned, lastID) + + blockA := unittest.IdentifierFixture() + blockB := unittest.IdentifierFixture() + blockC := unittest.IdentifierFixture() + blockD := unittest.IdentifierFixture() + blockE := unittest.IdentifierFixture() + + require.NoError(t, store.SaveRegisters( + pruned+1, + blockA, + lastID, + flow.RegisterEntries{makeReg("X", "1")}, + )) + + require.NoError(t, store.SaveRegisters( + pruned+2, + blockB, + blockA, + flow.RegisterEntries{makeReg("X", "2")}, + )) + + require.NoError(t, store.SaveRegisters( + pruned+1, + blockC, + lastID, + flow.RegisterEntries{makeReg("X", "3")}, + )) + + require.NoError(t, store.SaveRegisters( + pruned+2, + blockD, + blockC, + flow.RegisterEntries{makeReg("X", "4")}, + )) + + require.NoError(t, store.SaveRegisters( + pruned+2, + blockE, + blockA, + flow.RegisterEntries{makeReg("X", "5")}, + )) + + err := store.Prune(pruned+1, blockA) // prune A should prune C and D + require.NoError(t, err) + + _, err = store.GetUpdatedRegisters(pruned+2, blockD) + require.Error(t, err) + require.Contains(t, err.Error(), "not found") + + _, err = store.GetUpdatedRegisters(pruned+2, blockE) + require.NoError(t, err) + }) + + // 11. Concurrency: SaveRegisters can happen concurrently with GetUpdatedRegisters, and GetRegister + t.Run("ConcurrentSaveAndGet", func(t *testing.T) { + t.Parallel() + pruned := uint64(10) + lastID := unittest.IdentifierFixture() + store := NewInMemoryRegisterStore(pruned, lastID) + + // prepare a chain of 101 blocks with the first as lastID + count := 100 + blocks := make(map[uint64]flow.Identifier, count) + blocks[pruned] = lastID + for i := 1; i < count; i++ { + block := unittest.IdentifierFixture() + blocks[pruned+uint64(i)] = block + } + + reg := makeReg("X", "0") + + var wg sync.WaitGroup + for i := 1; i < count; i++ { + height := pruned + uint64(i) + require.NoError(t, store.SaveRegisters( + height, + blocks[height], + blocks[height-1], + flow.RegisterEntries{makeReg("X", fmt.Sprintf("%v", height))}, + )) + + // concurrently query get registers for past registers + wg.Add(1) + go func(i int) { + defer wg.Done() + + rdHeight := randBetween(pruned+1, pruned+uint64(i)+1) + val, err := store.GetRegister(rdHeight, blocks[rdHeight], reg.Key) + require.NoError(t, err) + r := makeReg("X", fmt.Sprintf("%v", rdHeight)) + require.Equal(t, r.Value, val) + }(i) + + // concurrently query updated registers + wg.Add(1) + go func(i int) { + defer wg.Done() + + rdHeight := randBetween(pruned+1, pruned+uint64(i)+1) + vals, err := store.GetUpdatedRegisters(rdHeight, blocks[rdHeight]) + require.NoError(t, err) + r := makeReg("X", fmt.Sprintf("%v", rdHeight)) + require.Equal(t, flow.RegisterEntries{r}, vals) + }(i) + } + + wg.Wait() + }) + + // 12. Concurrency: Prune can happen concurrently with GetUpdatedRegisters, and GetRegister + t.Run("ConcurrentSaveAndPrune", func(t *testing.T) { + t.Parallel() + pruned := uint64(10) + lastID := unittest.IdentifierFixture() + store := NewInMemoryRegisterStore(pruned, lastID) + + // prepare a chain of 101 blocks with the first as lastID + count := 100 + blocks := make(map[uint64]flow.Identifier, count) + blocks[pruned] = lastID + for i := 1; i < count; i++ { + block := unittest.IdentifierFixture() + blocks[pruned+uint64(i)] = block + } + + var wg sync.WaitGroup + savedHeights := make(chan uint64, 100) + + wg.Add(1) + go func() { + defer wg.Done() + + lastPrunedHeight := pruned + for savedHeight := range savedHeights { + if savedHeight%10 != 0 { + continue + } + rdHeight := randBetween(lastPrunedHeight+1, savedHeight+1) + err := store.Prune(rdHeight, blocks[rdHeight]) + require.NoError(t, err) + lastPrunedHeight = rdHeight + } + }() + + // save 100 blocks + for i := 1; i < count; i++ { + height := pruned + uint64(i) + require.NoError(t, store.SaveRegisters( + height, + blocks[height], + blocks[height-1], + flow.RegisterEntries{makeReg("X", fmt.Sprintf("%v", i))}, + )) + savedHeights <- height + } + + close(savedHeights) + + wg.Wait() + }) + + t.Run("PrunedError", func(t *testing.T) { + e := NewPrunedError(1, 2, unittest.IdentifierFixture()) + pe, ok := IsPrunedError(e) + require.True(t, ok) + require.Equal(t, uint64(1), pe.Height) + require.Equal(t, uint64(2), pe.PrunedHeight) + }) +} + +func randBetween(min, max uint64) uint64 { + return uint64(rand.Intn(int(max)-int(min))) + min +} + +func makeReg(key string, value string) flow.RegisterEntry { + return unittest.MakeOwnerReg(key, value) +} + +var unknownBlock = unittest.IdentifierFixture() +var unknownKey = flow.RegisterID{ + Owner: "unknown", + Key: "unknown", +} diff --git a/engine/execution/storehouse/register_engine.go b/engine/execution/storehouse/register_engine.go new file mode 100644 index 00000000000..d34e28637e5 --- /dev/null +++ b/engine/execution/storehouse/register_engine.go @@ -0,0 +1,57 @@ +package storehouse + +import ( + "fmt" + + "github.com/onflow/flow-go/consensus/hotstuff/model" + "github.com/onflow/flow-go/engine" + "github.com/onflow/flow-go/module/component" + "github.com/onflow/flow-go/module/irrecoverable" +) + +// RegisterEngine is a wrapper for RegisterStore in order to make Block Finalization process +// non-blocking. +type RegisterEngine struct { + *component.ComponentManager + store *RegisterStore + finalizationNotifier engine.Notifier +} + +func NewRegisterEngine(store *RegisterStore) *RegisterEngine { + e := &RegisterEngine{ + store: store, + finalizationNotifier: engine.NewNotifier(), + } + + // Add workers + e.ComponentManager = component.NewComponentManagerBuilder(). + AddWorker(e.finalizationProcessingLoop). + Build() + return e +} + +// OnBlockFinalized will create a single goroutine to notify register store +// when a block is finalized. +// This call is non-blocking in order to avoid blocking the consensus +func (e *RegisterEngine) OnBlockFinalized(*model.Block) { + e.finalizationNotifier.Notify() +} + +// finalizationProcessingLoop notify the register store when a block is finalized +// and handle the error if any +func (e *RegisterEngine) finalizationProcessingLoop(ctx irrecoverable.SignalerContext, ready component.ReadyFunc) { + ready() + notifier := e.finalizationNotifier.Channel() + + for { + select { + case <-ctx.Done(): + return + case <-notifier: + err := e.store.OnBlockFinalized() + if err != nil { + ctx.Throw(fmt.Errorf("could not process finalized block: %w", err)) + } + } + } +} diff --git a/engine/execution/storehouse/register_store.go b/engine/execution/storehouse/register_store.go new file mode 100644 index 00000000000..e52e4d1fb5f --- /dev/null +++ b/engine/execution/storehouse/register_store.go @@ -0,0 +1,294 @@ +package storehouse + +import ( + "errors" + "fmt" + + "go.uber.org/atomic" + + "github.com/rs/zerolog" + + "github.com/onflow/flow-go/engine/execution" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/storage" +) + +type RegisterStore struct { + memStore *InMemoryRegisterStore + diskStore execution.OnDiskRegisterStore + wal execution.ExecutedFinalizedWAL + finalized execution.FinalizedReader + log zerolog.Logger + finalizing *atomic.Bool // making sure only one goroutine is finalizing at a time + notifier execution.RegisterStoreNotifier +} + +var _ execution.RegisterStore = (*RegisterStore)(nil) + +type NoopNotifier struct{} + +func NewNoopNotifier() *NoopNotifier { return &NoopNotifier{} } + +func (n *NoopNotifier) OnFinalizedAndExecutedHeightUpdated(height uint64) {} + +var _ execution.RegisterStoreNotifier = (*NoopNotifier)(nil) + +func NewRegisterStore( + diskStore execution.OnDiskRegisterStore, + wal execution.ExecutedFinalizedWAL, + finalized execution.FinalizedReader, + log zerolog.Logger, + notifier execution.RegisterStoreNotifier, +) (*RegisterStore, error) { + if notifier == nil { + return nil, fmt.Errorf("notifier is empty, use NoopNotifier if you don't need it") + } + + // replay the executed and finalized blocks from the write ahead logs + // to the OnDiskRegisterStore + height, err := syncDiskStore(wal, diskStore, log) + if err != nil { + return nil, fmt.Errorf("cannot sync disk store: %w", err) + } + + // fetch the last executed and finalized block ID + finalizedID, err := finalized.FinalizedBlockIDAtHeight(height) + if err != nil { + return nil, fmt.Errorf("cannot get finalized block ID at height %d: %w", height, err) + } + + // init the memStore with the last executed and finalized block ID + memStore := NewInMemoryRegisterStore(height, finalizedID) + + log.Info().Msgf("initialized in memory register store at block %v, height %v", finalizedID, height) + + return &RegisterStore{ + memStore: memStore, + diskStore: diskStore, + wal: wal, + finalized: finalized, + finalizing: atomic.NewBool(false), + log: log.With().Str("module", "register-store").Logger(), + notifier: notifier, + }, nil +} + +// GetRegister first try to get the register from InMemoryRegisterStore, then OnDiskRegisterStore +// 1. below pruned height, and is conflicting +// 2. below pruned height, and is finalized +// 3. above pruned height, and is not executed +// 4. above pruned height, and is executed, and register is updated +// 5. above pruned height, and is executed, but register is not updated since pruned height +// It returns: +// - (value, nil) if the register value is found at the given block +// - (nil, nil) if the register is not found +// - (nil, storage.ErrHeightNotIndexed) if the height is below the first height that is indexed. +// - (nil, storehouse.ErrNotExecuted) if the block is not executed yet +// - (nil, storehouse.ErrNotExecuted) if the block is conflicting iwth finalized block +// - (nil, err) for any other exceptions +func (r *RegisterStore) GetRegister(height uint64, blockID flow.Identifier, register flow.RegisterID) (flow.RegisterValue, error) { + reg, err := r.memStore.GetRegister(height, blockID, register) + // the height might be lower than the lowest height in memStore, + // or the register might not be found in memStore. + if err == nil { + // this register was updated before its block is finalized + return reg, nil + } + + prunedError, ok := IsPrunedError(err) + if !ok { + // this means we ran into an exception. finding a register from in-memory store should either + // getting the register value or getting a ErrPruned error. + return flow.RegisterValue{}, fmt.Errorf("cannot get register from memStore: %w", err) + } + + // if in memory store returns PrunedError, and register height is above the pruned height, + // then it means the block is connected to the pruned block of in memory store, which is + // a finalized block and executed block, so we can get its value from on disk store. + if height > prunedError.PrunedHeight { + return r.getAndConvertNotFoundErr(register, prunedError.PrunedHeight) + } + + // if the block is below or equal to the pruned height, then there are two cases: + // the block is a finalized block, or a conflicting block. + // In order to distinguish, we need to query the finalized block ID at that height + + var finalizedID flow.Identifier + if height == prunedError.PrunedHeight { + // if the block is at the pruned height, then the finalized ID is the pruned ID from in memory store, + // this saves a DB query + finalizedID = prunedError.PrunedID + } else { + // if the block is below the pruned height, we query the finalized ID from the finalized reader + finalizedID, err = r.finalized.FinalizedBlockIDAtHeight(height) + if err != nil { + return nil, fmt.Errorf("cannot get finalized block ID at height %d: %w", height, err) + } + } + + isConflictingBlock := blockID != finalizedID + if isConflictingBlock { + // conflicting blocks are considered as un-executed + return flow.RegisterValue{}, fmt.Errorf("getting registers from conflicting block %v at height %v: %w", blockID, height, ErrNotExecuted) + } + return r.getAndConvertNotFoundErr(register, height) +} + +// getAndConvertNotFoundErr returns nil if the register is not found from storage +func (r *RegisterStore) getAndConvertNotFoundErr(register flow.RegisterID, height uint64) (flow.RegisterValue, error) { + val, err := r.diskStore.Get(register, height) + if errors.Is(err, storage.ErrNotFound) { + // FVM expects the error to be nil when register is not found + return nil, nil + } + return val, err +} + +// SaveRegisters saves to InMemoryRegisterStore first, then trigger the same check as OnBlockFinalized +// Depend on InMemoryRegisterStore.SaveRegisters +// It returns: +// - nil if the registers are saved successfully +// - exception is the block is above the pruned height but does not connect to the pruned height (conflicting block). +// - exception if the block is below the pruned height +// - exception if the save block is saved again +// - exception for any other exception +func (r *RegisterStore) SaveRegisters(header *flow.Header, registers flow.RegisterEntries) error { + err := r.memStore.SaveRegisters(header.Height, header.ID(), header.ParentID, registers) + if err != nil { + return fmt.Errorf("cannot save register to memStore: %w", err) + } + + err = r.OnBlockFinalized() + if err != nil { + return fmt.Errorf("cannot trigger OnBlockFinalized: %w", err) + } + return nil +} + +// Depend on FinalizedReader's FinalizedBlockIDAtHeight +// Depend on ExecutedFinalizedWAL.Append +// Depend on OnDiskRegisterStore.SaveRegisters +// OnBlockFinalized trigger the check of whether a block at the next height becomes finalized and executed. +// the next height is the existing finalized and executed block's height + 1. +// If a block at next height becomes finalized and executed, then: +// 1. write the registers to write ahead logs +// 2. save the registers of the block to OnDiskRegisterStore +// 3. prune the height in InMemoryRegisterStore +func (r *RegisterStore) OnBlockFinalized() error { + // only one goroutine can execute OnBlockFinalized at a time + if !r.finalizing.CompareAndSwap(false, true) { + return nil + } + + defer r.finalizing.Store(false) + return r.onBlockFinalized() +} + +func (r *RegisterStore) onBlockFinalized() error { + latest := r.diskStore.LatestHeight() + next := latest + 1 + blockID, err := r.finalized.FinalizedBlockIDAtHeight(next) + if errors.Is(err, storage.ErrNotFound) { + // next block is not finalized yet + return nil + } + + regs, err := r.memStore.GetUpdatedRegisters(next, blockID) + if errors.Is(err, ErrNotExecuted) { + // next block is not executed yet + return nil + } + + // TODO: append WAL + // err = r.wal.Append(next, regs) + // if err != nil { + // return fmt.Errorf("cannot write %v registers to write ahead logs for height %v: %w", len(regs), next, err) + // } + + err = r.diskStore.Store(regs, next) + if err != nil { + return fmt.Errorf("cannot save %v registers to disk store for height %v: %w", len(regs), next, err) + } + + r.notifier.OnFinalizedAndExecutedHeightUpdated(next) + + err = r.memStore.Prune(next, blockID) + if err != nil { + return fmt.Errorf("cannot prune memStore for height %v: %w", next, err) + } + + return r.onBlockFinalized() // check again until there is no more finalized block +} + +// LastFinalizedAndExecutedHeight returns the height of the last finalized and executed block, +// which has been saved in OnDiskRegisterStore +func (r *RegisterStore) LastFinalizedAndExecutedHeight() uint64 { + // diskStore caches the latest height in memory + return r.diskStore.LatestHeight() +} + +// IsBlockExecuted returns true if the block is executed, false if not executed +// Note: it returns (true, nil) even if the block has been pruned from on disk register store, +func (r *RegisterStore) IsBlockExecuted(height uint64, blockID flow.Identifier) (bool, error) { + executed, err := r.memStore.IsBlockExecuted(height, blockID) + if err != nil { + // the only error memStore would return is when the given height is lower than the pruned height in memStore. + // Since the pruned height in memStore is a finalized and executed height, in order to know if the block + // is executed, we just need to check if this block is the finalized blcok at the given height. + executed, err = r.isBlockFinalized(height, blockID) + return executed, err + } + + return executed, nil +} + +func (r *RegisterStore) isBlockFinalized(height uint64, blockID flow.Identifier) (bool, error) { + finalizedID, err := r.finalized.FinalizedBlockIDAtHeight(height) + if err != nil { + return false, fmt.Errorf("cannot get finalized block ID at height %d: %w", height, err) + } + return finalizedID == blockID, nil +} + +// syncDiskStore replay WAL to disk store +func syncDiskStore( + wal execution.ExecutedFinalizedWAL, + diskStore execution.OnDiskRegisterStore, + log zerolog.Logger, +) (uint64, error) { + // TODO: replace diskStore.Latest with wal.Latest + // latest, err := r.wal.Latest() + var err error + latest := diskStore.LatestHeight() // tmp + if err != nil { + return 0, fmt.Errorf("cannot get latest height from write ahead logs: %w", err) + } + + stored := diskStore.LatestHeight() + + if stored > latest { + return 0, fmt.Errorf("latest height in storehouse %v is larger than latest height %v in write ahead logs", stored, latest) + } + + if stored < latest { + // replay + reader := wal.GetReader(stored + 1) + for { + height, registers, err := reader.Next() + // TODO: to rename + if errors.Is(err, storage.ErrNotFound) { + break + } + if err != nil { + return 0, fmt.Errorf("cannot read registers from write ahead logs: %w", err) + } + + err = diskStore.Store(registers, height) + if err != nil { + return 0, fmt.Errorf("cannot save registers to disk store at height %v : %w", height, err) + } + } + } + + return latest, nil +} diff --git a/engine/execution/storehouse/register_store_metrics.go b/engine/execution/storehouse/register_store_metrics.go new file mode 100644 index 00000000000..1542b29d384 --- /dev/null +++ b/engine/execution/storehouse/register_store_metrics.go @@ -0,0 +1,22 @@ +package storehouse + +import ( + "github.com/onflow/flow-go/engine/execution" + "github.com/onflow/flow-go/module" +) + +type RegisterStoreMetrics struct { + collector module.ExecutionMetrics +} + +var _ execution.RegisterStoreNotifier = (*RegisterStoreMetrics)(nil) + +func NewRegisterStoreMetrics(collector module.ExecutionMetrics) *RegisterStoreMetrics { + return &RegisterStoreMetrics{ + collector: collector, + } +} + +func (m *RegisterStoreMetrics) OnFinalizedAndExecutedHeightUpdated(height uint64) { + m.collector.ExecutionLastFinalizedExecutedBlockHeight(height) +} diff --git a/engine/execution/storehouse/register_store_test.go b/engine/execution/storehouse/register_store_test.go new file mode 100644 index 00000000000..c746a05dcba --- /dev/null +++ b/engine/execution/storehouse/register_store_test.go @@ -0,0 +1,586 @@ +package storehouse_test + +import ( + "fmt" + "sync" + "testing" + + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/engine/execution" + "github.com/onflow/flow-go/engine/execution/storehouse" + "github.com/onflow/flow-go/engine/execution/testutil" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/storage/pebble" + "github.com/onflow/flow-go/utils/unittest" +) + +type notifier struct { + height uint64 +} + +func (n *notifier) OnFinalizedAndExecutedHeightUpdated(height uint64) { + n.height = height +} + +func withRegisterStore(t *testing.T, fn func( + t *testing.T, + rs *storehouse.RegisterStore, + diskStore execution.OnDiskRegisterStore, + finalized *testutil.MockFinalizedReader, + rootHeight uint64, + endHeight uint64, + headers map[uint64]*flow.Header, + n *notifier, +)) { + pebble.RunWithRegistersStorageAtInitialHeights(t, 10, 10, func(diskStore *pebble.Registers) { + log := unittest.Logger() + var wal execution.ExecutedFinalizedWAL + finalized, headerByHeight, highest := testutil.NewMockFinalizedReader(10, 100) + n := ¬ifier{height: 10} + rs, err := storehouse.NewRegisterStore(diskStore, wal, finalized, log, n) + require.NoError(t, err) + fn(t, rs, diskStore, finalized, 10, highest, headerByHeight, n) + }) +} + +// GetRegister should fail for +// 1. unknown blockID +// 2. height lower than OnDiskRegisterStore's root height +// 3. height too high +// 4. known block, but unknown register +func TestRegisterStoreGetRegisterFail(t *testing.T) { + t.Parallel() + withRegisterStore(t, func( + t *testing.T, + rs *storehouse.RegisterStore, + diskStore execution.OnDiskRegisterStore, + finalized *testutil.MockFinalizedReader, + rootHeight uint64, + endHeight uint64, + headerByHeight map[uint64]*flow.Header, + n *notifier, + ) { + // unknown block + _, err := rs.GetRegister(rootHeight+1, unknownBlock, unknownReg.Key) + require.Error(t, err) + require.ErrorIs(t, err, storehouse.ErrNotExecuted) + + // too high + block11 := headerByHeight[rootHeight+1] + _, err = rs.GetRegister(rootHeight+1, block11.ID(), unknownReg.Key) + require.Error(t, err) + require.ErrorIs(t, err, storehouse.ErrNotExecuted) + + // lower than root height + _, err = rs.GetRegister(rootHeight-1, unknownBlock, unknownReg.Key) + require.Error(t, err) + // TODO: enable it once implemented + // require.ErrorIs(t, err, storehouse.ErrPruned) + + // known block, unknown register + rootBlock := headerByHeight[rootHeight] + val, err := rs.GetRegister(rootHeight, rootBlock.ID(), unknownReg.Key) + require.NoError(t, err) + require.Nil(t, val) + }) +} + +// SaveRegisters should fail for +// 1. mismatching parent +// 2. saved block +func TestRegisterStoreSaveRegistersShouldFail(t *testing.T) { + t.Parallel() + withRegisterStore(t, func( + t *testing.T, + rs *storehouse.RegisterStore, + diskStore execution.OnDiskRegisterStore, + finalized *testutil.MockFinalizedReader, + rootHeight uint64, + endHeight uint64, + headerByHeight map[uint64]*flow.Header, + n *notifier, + ) { + wrongParent := unittest.BlockHeaderFixture(unittest.WithHeaderHeight(rootHeight + 1)) + err := rs.SaveRegisters(wrongParent, flow.RegisterEntries{}) + require.Error(t, err) + require.Contains(t, err.Error(), "parent") + + err = rs.SaveRegisters(headerByHeight[rootHeight], flow.RegisterEntries{}) + require.Error(t, err) + require.Contains(t, err.Error(), "pruned") + }) +} + +// SaveRegisters should ok, and +// 1. GetRegister can get saved registers, +// 2. IsBlockExecuted should return true +// +// if SaveRegisters with empty register, then +// 1. LastFinalizedAndExecutedHeight should be updated +// 2. IsBlockExecuted should return true +func TestRegisterStoreSaveRegistersShouldOK(t *testing.T) { + t.Parallel() + withRegisterStore(t, func( + t *testing.T, + rs *storehouse.RegisterStore, + diskStore execution.OnDiskRegisterStore, + finalized *testutil.MockFinalizedReader, + rootHeight uint64, + endHeight uint64, + headerByHeight map[uint64]*flow.Header, + n *notifier, + ) { + // not executed + executed, err := rs.IsBlockExecuted(rootHeight+1, headerByHeight[rootHeight+1].ID()) + require.NoError(t, err) + require.False(t, executed) + + // save block 11 + reg := makeReg("X", "1") + err = rs.SaveRegisters(headerByHeight[rootHeight+1], flow.RegisterEntries{reg}) + require.NoError(t, err) + + // should get value + val, err := rs.GetRegister(rootHeight+1, headerByHeight[rootHeight+1].ID(), reg.Key) + require.NoError(t, err) + require.Equal(t, reg.Value, val) + + // should become executed + executed, err = rs.IsBlockExecuted(rootHeight+1, headerByHeight[rootHeight+1].ID()) + require.NoError(t, err) + require.True(t, executed) + + // block 12 is empty + err = rs.SaveRegisters(headerByHeight[rootHeight+2], flow.RegisterEntries{}) + require.NoError(t, err) + + // should get same value + val, err = rs.GetRegister(rootHeight+1, headerByHeight[rootHeight+2].ID(), reg.Key) + require.NoError(t, err) + require.Equal(t, reg.Value, val) + + // should become executed + executed, err = rs.IsBlockExecuted(rootHeight+1, headerByHeight[rootHeight+2].ID()) + require.NoError(t, err) + require.True(t, executed) + }) +} + +// if 11 is latest finalized, then +// 1. IsBlockExecuted should return true for finalized block 10 +// 2. IsBlockExecuted should return false for conflicting block 10 +// 4. IsBlockExecuted should return true for executed and unfinalized block 12 +// 3. IsBlockExecuted should return false for unexecuted block 13 +func TestRegisterStoreIsBlockExecuted(t *testing.T) { + t.Parallel() + withRegisterStore(t, func( + t *testing.T, + rs *storehouse.RegisterStore, + diskStore execution.OnDiskRegisterStore, + finalized *testutil.MockFinalizedReader, + rootHeight uint64, + endHeight uint64, + headerByHeight map[uint64]*flow.Header, + n *notifier, + ) { + // save block 11 + reg := makeReg("X", "1") + err := rs.SaveRegisters(headerByHeight[rootHeight+1], flow.RegisterEntries{reg}) + require.NoError(t, err) + + // save block 12 + err = rs.SaveRegisters(headerByHeight[rootHeight+2], flow.RegisterEntries{makeReg("X", "2")}) + require.NoError(t, err) + + require.NoError(t, finalized.MockFinal(rootHeight+1)) + + require.NoError(t, rs.OnBlockFinalized()) // notify 11 is finalized + + require.Equal(t, rootHeight+1, rs.LastFinalizedAndExecutedHeight()) + + executed, err := rs.IsBlockExecuted(rootHeight, headerByHeight[rootHeight].ID()) + require.NoError(t, err) + require.True(t, executed) + + executed, err = rs.IsBlockExecuted(rootHeight+1, headerByHeight[rootHeight+1].ID()) + require.NoError(t, err) + require.True(t, executed) + + executed, err = rs.IsBlockExecuted(rootHeight+2, headerByHeight[rootHeight+2].ID()) + require.NoError(t, err) + require.True(t, executed) + + executed, err = rs.IsBlockExecuted(rootHeight+3, headerByHeight[rootHeight+3].ID()) + require.NoError(t, err) + require.False(t, executed) + }) +} + +// Test reading registers from finalized block +func TestRegisterStoreReadingFromDisk(t *testing.T) { + t.Parallel() + withRegisterStore(t, func( + t *testing.T, + rs *storehouse.RegisterStore, + diskStore execution.OnDiskRegisterStore, + finalized *testutil.MockFinalizedReader, + rootHeight uint64, + endHeight uint64, + headerByHeight map[uint64]*flow.Header, + n *notifier, + ) { + + // R <- 11 (X: 1, Y: 2) <- 12 (Y: 3) <- 13 (X: 4) + // save block 11 + err := rs.SaveRegisters(headerByHeight[rootHeight+1], flow.RegisterEntries{makeReg("X", "1"), makeReg("Y", "2")}) + require.NoError(t, err) + + // save block 12 + err = rs.SaveRegisters(headerByHeight[rootHeight+2], flow.RegisterEntries{makeReg("Y", "3")}) + require.NoError(t, err) + + // save block 13 + err = rs.SaveRegisters(headerByHeight[rootHeight+3], flow.RegisterEntries{makeReg("X", "4")}) + require.NoError(t, err) + + require.Equal(t, rootHeight, n.height) + + require.NoError(t, finalized.MockFinal(rootHeight+2)) + require.NoError(t, rs.OnBlockFinalized()) // notify 12 is finalized + + require.Equal(t, rootHeight+2, n.height) + + val, err := rs.GetRegister(rootHeight+1, headerByHeight[rootHeight+1].ID(), makeReg("Y", "2").Key) + require.NoError(t, err) + // value at block 11 is now stored in OnDiskRegisterStore, which is 2 + require.Equal(t, makeReg("Y", "2").Value, val) + + val, err = rs.GetRegister(rootHeight+2, headerByHeight[rootHeight+2].ID(), makeReg("X", "1").Key) + require.NoError(t, err) + // value at block 12 is now stored in OnDiskRegisterStore, which is 1 + require.Equal(t, makeReg("X", "1").Value, val) + + val, err = rs.GetRegister(rootHeight+3, headerByHeight[rootHeight+3].ID(), makeReg("Y", "3").Key) + require.NoError(t, err) + // value at block 13 was stored in OnDiskRegisterStore at block 12, which is 3 + require.Equal(t, makeReg("Y", "3").Value, val) + + _, err = rs.GetRegister(rootHeight+4, headerByHeight[rootHeight+4].ID(), makeReg("Y", "3").Key) + require.Error(t, err) + }) +} + +func TestRegisterStoreReadingFromInMemStore(t *testing.T) { + t.Parallel() + withRegisterStore(t, func( + t *testing.T, + rs *storehouse.RegisterStore, + diskStore execution.OnDiskRegisterStore, + finalized *testutil.MockFinalizedReader, + rootHeight uint64, + endHeight uint64, + headerByHeight map[uint64]*flow.Header, + n *notifier, + ) { + + // R <- 11 (X: 1, Y: 2) <- 12 (Y: 3) + // ^- 11 (X: 4) + + // save block 11 + err := rs.SaveRegisters(headerByHeight[rootHeight+1], flow.RegisterEntries{makeReg("X", "1"), makeReg("Y", "2")}) + require.NoError(t, err) + + // save block 12 + err = rs.SaveRegisters(headerByHeight[rootHeight+2], flow.RegisterEntries{makeReg("Y", "3")}) + require.NoError(t, err) + + // save block 11 fork + block11Fork := unittest.BlockWithParentFixture(headerByHeight[rootHeight]).ToHeader() + err = rs.SaveRegisters(block11Fork, flow.RegisterEntries{makeReg("X", "4")}) + require.NoError(t, err) + + val, err := rs.GetRegister(rootHeight+1, headerByHeight[rootHeight+1].ID(), makeReg("X", "1").Key) + require.NoError(t, err) + require.Equal(t, makeReg("X", "1").Value, val) + + val, err = rs.GetRegister(rootHeight+1, headerByHeight[rootHeight+1].ID(), makeReg("Y", "2").Key) + require.NoError(t, err) + require.Equal(t, makeReg("Y", "2").Value, val) + + val, err = rs.GetRegister(rootHeight+2, headerByHeight[rootHeight+2].ID(), makeReg("X", "1").Key) + require.NoError(t, err) + require.Equal(t, makeReg("X", "1").Value, val) + + val, err = rs.GetRegister(rootHeight+2, headerByHeight[rootHeight+2].ID(), makeReg("Y", "3").Key) + require.NoError(t, err) + require.Equal(t, makeReg("Y", "3").Value, val) + + val, err = rs.GetRegister(rootHeight+1, block11Fork.ID(), makeReg("X", "4").Key) + require.NoError(t, err) + require.Equal(t, makeReg("X", "4").Value, val) + + // finalizing 11 should prune block 11 fork, and won't be able to read register from block 11 fork + require.NoError(t, finalized.MockFinal(rootHeight+1)) + require.NoError(t, rs.OnBlockFinalized()) // notify 11 is finalized + + val, err = rs.GetRegister(rootHeight+1, block11Fork.ID(), makeReg("X", "4").Key) + require.Error(t, err, fmt.Sprintf("%v", val)) + // pruned conflicting forks are considered not executed + require.ErrorIs(t, err, storehouse.ErrNotExecuted) + }) +} + +func TestRegisterStoreReadRegisterAtPrunedHeight(t *testing.T) { + t.Parallel() + withRegisterStore(t, func( + t *testing.T, + rs *storehouse.RegisterStore, + diskStore execution.OnDiskRegisterStore, + finalized *testutil.MockFinalizedReader, + rootHeight uint64, + endHeight uint64, + headerByHeight map[uint64]*flow.Header, + n *notifier, + ) { + + // R <- 11 (X: 1) + + // if execute first then finalize later, should be able to read register at pruned height + // save block 11 + err := rs.SaveRegisters(headerByHeight[rootHeight+1], flow.RegisterEntries{makeReg("X", "1")}) + require.NoError(t, err) + require.Equal(t, 2, finalized.FinalizedCalled()) // called by SaveRegisters with height 11 + + // finalize block 11 + require.NoError(t, finalized.MockFinal(rootHeight+1)) + require.NoError(t, rs.OnBlockFinalized()) // notify 11 is finalized + + val, err := rs.GetRegister(rootHeight+1, headerByHeight[rootHeight+1].ID(), makeReg("X", "").Key) + require.NoError(t, err) + require.Equal(t, makeReg("X", "1").Value, val) + + // R <- 11 (X: 1) <- 12 (X: 2) + // if finalize first then execute later, should not be able to read register at pruned height + // finalize block 12 + require.NoError(t, finalized.MockFinal(rootHeight+2)) + require.NoError(t, rs.OnBlockFinalized()) // notify 12 is finalized + + // save block 12 + err = rs.SaveRegisters(headerByHeight[rootHeight+2], flow.RegisterEntries{makeReg("X", "2")}) + require.NoError(t, err) + + val, err = rs.GetRegister(rootHeight+1, headerByHeight[rootHeight+1].ID(), makeReg("X", "").Key) + require.NoError(t, err) + require.Equal(t, makeReg("X", "1").Value, val) + + val, err = rs.GetRegister(rootHeight+2, headerByHeight[rootHeight+2].ID(), makeReg("X", "").Key) + require.NoError(t, err) + require.Equal(t, makeReg("X", "2").Value, val) + }) +} + +// Test that when getting register during executing a finalized block or finalize an executed block, +// FinalizedBlockIDAtHeight should not be called +func TestRegisterStoreExecuteFinalizedBlockOrFinalizeExecutedBlockShouldNotCallFinalizedHeight(t *testing.T) { + t.Parallel() + withRegisterStore(t, func( + t *testing.T, + rs *storehouse.RegisterStore, + diskStore execution.OnDiskRegisterStore, + finalized *testutil.MockFinalizedReader, + rootHeight uint64, + endHeight uint64, + headerByHeight map[uint64]*flow.Header, + n *notifier, + ) { + + require.Equal(t, 1, finalized.FinalizedCalled()) // called by NewRegisterStore + // R <- 11 (X: 1) + + val, err := rs.GetRegister(rootHeight, headerByHeight[rootHeight].ID(), makeReg("X", "").Key) + require.NoError(t, err) + require.Nil(t, val) + require.Equal(t, 1, finalized.FinalizedCalled()) // no FinalizedBlockIDAtHeight called + + // if execute first then finalize later, should be able to read register at pruned height + // save block 11 + err = rs.SaveRegisters(headerByHeight[rootHeight+1], flow.RegisterEntries{makeReg("X", "1")}) + require.NoError(t, err) + require.Equal(t, 2, finalized.FinalizedCalled()) // called by SaveRegisters with height 11 + + // finalize block 11 + require.NoError(t, finalized.MockFinal(rootHeight+1)) + require.NoError(t, rs.OnBlockFinalized()) // notify 11 is finalized + require.Equal(t, 4, finalized.FinalizedCalled()) // called by Checking whether height 11 and 12 are finalized + + // R <- 11 (X: 1) <- 12 (X: 2) + // if finalize first then execute later, should not be able to read register at pruned height + // finalize block 12 + require.NoError(t, finalized.MockFinal(rootHeight+2)) + require.NoError(t, rs.OnBlockFinalized()) // notify 12 is finalized + require.Equal(t, 5, finalized.FinalizedCalled()) // called by Checking whether height 12 and 13 are finalized + + val, err = rs.GetRegister(rootHeight+1, headerByHeight[rootHeight+1].ID(), makeReg("X", "").Key) + require.NoError(t, err) + require.Equal(t, makeReg("X", "1").Value, val) + require.Equal(t, 5, finalized.FinalizedCalled()) // no FinalizedBlockIDAtHeight call + + // save block 12 + err = rs.SaveRegisters(headerByHeight[rootHeight+2], flow.RegisterEntries{makeReg("X", "2")}) + require.NoError(t, err) + require.Equal(t, 7, finalized.FinalizedCalled()) // called by SaveRegisters with height 12 and 13 + + }) +} + +// Execute first then finalize later +// SaveRegisters(1), SaveRegisters(2), SaveRegisters(3), then +// OnBlockFinalized(1), OnBlockFinalized(2), OnBlockFinalized(3) should +// 1. update LastFinalizedAndExecutedHeight +// 2. InMemoryRegisterStore should have correct pruned height +// 3. NewRegisterStore with the same OnDiskRegisterStore again should return correct LastFinalizedAndExecutedHeight +func TestRegisterStoreExecuteFirstFinalizeLater(t *testing.T) { + t.Parallel() + withRegisterStore(t, func( + t *testing.T, + rs *storehouse.RegisterStore, + diskStore execution.OnDiskRegisterStore, + finalized *testutil.MockFinalizedReader, + rootHeight uint64, + endHeight uint64, + headerByHeight map[uint64]*flow.Header, + n *notifier, + ) { + // save block 11 + err := rs.SaveRegisters(headerByHeight[rootHeight+1], flow.RegisterEntries{makeReg("X", "1")}) + require.NoError(t, err) + require.Equal(t, rootHeight, rs.LastFinalizedAndExecutedHeight()) + + // save block 12 + err = rs.SaveRegisters(headerByHeight[rootHeight+2], flow.RegisterEntries{makeReg("X", "2")}) + require.NoError(t, err) + require.Equal(t, rootHeight, rs.LastFinalizedAndExecutedHeight()) + + // save block 13 + err = rs.SaveRegisters(headerByHeight[rootHeight+3], flow.RegisterEntries{makeReg("X", "3")}) + require.NoError(t, err) + require.Equal(t, rootHeight, rs.LastFinalizedAndExecutedHeight()) + + require.Equal(t, rootHeight, n.height) + + require.NoError(t, finalized.MockFinal(rootHeight+1)) + require.NoError(t, rs.OnBlockFinalized()) // notify 11 is finalized + require.Equal(t, rootHeight+1, rs.LastFinalizedAndExecutedHeight()) + require.Equal(t, rootHeight+1, n.height) + + require.NoError(t, finalized.MockFinal(rootHeight+2)) + require.NoError(t, rs.OnBlockFinalized()) // notify 12 is finalized + require.Equal(t, rootHeight+2, rs.LastFinalizedAndExecutedHeight()) + require.Equal(t, rootHeight+2, n.height) + + require.NoError(t, finalized.MockFinal(rootHeight+3)) + require.NoError(t, rs.OnBlockFinalized()) // notify 13 is finalized + require.Equal(t, rootHeight+3, rs.LastFinalizedAndExecutedHeight()) + require.Equal(t, rootHeight+3, n.height) + }) +} + +// Finalize first then execute later +// OnBlockFinalized(1), OnBlockFinalized(2), OnBlockFinalized(3), then +// SaveRegisters(1), SaveRegisters(2), SaveRegisters(3) should +// 1. update LastFinalizedAndExecutedHeight +// 2. InMemoryRegisterStore should have correct pruned height +// 3. NewRegisterStore with the same OnDiskRegisterStore again should return correct LastFinalizedAndExecutedHeight +func TestRegisterStoreFinalizeFirstExecuteLater(t *testing.T) { + t.Parallel() + withRegisterStore(t, func( + t *testing.T, + rs *storehouse.RegisterStore, + diskStore execution.OnDiskRegisterStore, + finalized *testutil.MockFinalizedReader, + rootHeight uint64, + endHeight uint64, + headerByHeight map[uint64]*flow.Header, + n *notifier, + ) { + require.NoError(t, finalized.MockFinal(rootHeight+1)) + require.NoError(t, rs.OnBlockFinalized()) // notify 11 is finalized + require.Equal(t, rootHeight, rs.LastFinalizedAndExecutedHeight(), fmt.Sprintf("LastFinalizedAndExecutedHeight: %d", rs.LastFinalizedAndExecutedHeight())) + + require.NoError(t, finalized.MockFinal(rootHeight+2)) + require.NoError(t, rs.OnBlockFinalized()) // notify 12 is finalized + require.Equal(t, rootHeight, rs.LastFinalizedAndExecutedHeight(), fmt.Sprintf("LastFinalizedAndExecutedHeight: %d", rs.LastFinalizedAndExecutedHeight())) + + require.NoError(t, finalized.MockFinal(rootHeight+3)) + require.NoError(t, rs.OnBlockFinalized()) // notify 13 is finalized + require.Equal(t, rootHeight, rs.LastFinalizedAndExecutedHeight()) + + require.Equal(t, rootHeight, n.height) + + // save block 11 + err := rs.SaveRegisters(headerByHeight[rootHeight+1], flow.RegisterEntries{makeReg("X", "1")}) + require.NoError(t, err) + require.Equal(t, rootHeight+1, rs.LastFinalizedAndExecutedHeight()) + require.Equal(t, rootHeight+1, n.height) + + // save block 12 + err = rs.SaveRegisters(headerByHeight[rootHeight+2], flow.RegisterEntries{makeReg("X", "2")}) + require.NoError(t, err) + require.Equal(t, rootHeight+2, rs.LastFinalizedAndExecutedHeight()) + require.Equal(t, rootHeight+2, n.height) + + // save block 13 + err = rs.SaveRegisters(headerByHeight[rootHeight+3], flow.RegisterEntries{makeReg("X", "3")}) + require.NoError(t, err) + require.Equal(t, rootHeight+3, rs.LastFinalizedAndExecutedHeight()) + require.Equal(t, rootHeight+3, n.height) + }) +} + +// Finalize and Execute concurrently +// SaveRegisters(1), SaveRegisters(2), ... SaveRegisters(100), happen concurrently with +// OnBlockFinalized(1), OnBlockFinalized(2), ... OnBlockFinalized(100), should update LastFinalizedAndExecutedHeight +func TestRegisterStoreConcurrentFinalizeAndExecute(t *testing.T) { + t.Parallel() + withRegisterStore(t, func( + t *testing.T, + rs *storehouse.RegisterStore, + diskStore execution.OnDiskRegisterStore, + finalized *testutil.MockFinalizedReader, + rootHeight uint64, + endHeight uint64, + headerByHeight map[uint64]*flow.Header, + n *notifier, + ) { + + var wg sync.WaitGroup + savedHeights := make(chan uint64, len(headerByHeight)) // enough buffer so that producer won't be blocked + + wg.Add(1) + go func() { + defer wg.Done() + + for savedHeight := range savedHeights { + err := finalized.MockFinal(savedHeight) + require.NoError(t, err) + require.NoError(t, rs.OnBlockFinalized(), fmt.Sprintf("saved height %v", savedHeight)) + } + }() + + for height := rootHeight + 1; height <= endHeight; height++ { + if height >= 50 { + savedHeights <- height + } + + err := rs.SaveRegisters(headerByHeight[height], flow.RegisterEntries{makeReg("X", fmt.Sprintf("%d", height))}) + require.NoError(t, err) + } + close(savedHeights) + + wg.Wait() // wait until all heights are finalized + + // after all heights are executed and finalized, the LastFinalizedAndExecutedHeight should be the last height + require.Equal(t, endHeight, rs.LastFinalizedAndExecutedHeight()) + }) +} diff --git a/engine/execution/storehouse/storehouse_test.go b/engine/execution/storehouse/storehouse_test.go new file mode 100644 index 00000000000..9a1fbe378f2 --- /dev/null +++ b/engine/execution/storehouse/storehouse_test.go @@ -0,0 +1,19 @@ +package storehouse_test + +import ( + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/utils/unittest" +) + +var unknownBlock = unittest.IdentifierFixture() +var unknownReg = makeReg("unknown", "unknown") + +func makeReg(key string, value string) flow.RegisterEntry { + return flow.RegisterEntry{ + Key: flow.RegisterID{ + Owner: "owner", + Key: key, + }, + Value: []byte(value), + } +} diff --git a/engine/execution/testutil/finalized_reader.go b/engine/execution/testutil/finalized_reader.go new file mode 100644 index 00000000000..ddc8fa81268 --- /dev/null +++ b/engine/execution/testutil/finalized_reader.go @@ -0,0 +1,73 @@ +package testutil + +import ( + "fmt" + + "go.uber.org/atomic" + + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/storage" + "github.com/onflow/flow-go/utils/unittest" +) + +type MockFinalizedReader struct { + headerByHeight map[uint64]*flow.Header + blockByHeight map[uint64]*flow.Block + lowest uint64 + highest uint64 + finalizedHeight *atomic.Uint64 + finalizedCalled *atomic.Int64 +} + +func NewMockFinalizedReader(initHeight uint64, count int) (*MockFinalizedReader, map[uint64]*flow.Header, uint64) { + root := unittest.BlockHeaderFixture(unittest.WithHeaderHeight(initHeight)) + blocks := unittest.ChainFixtureFrom(count, root) + headerByHeight := make(map[uint64]*flow.Header, len(blocks)+1) + headerByHeight[root.Height] = root + + blockByHeight := make(map[uint64]*flow.Block, len(blocks)+1) + for _, b := range blocks { + headerByHeight[b.Height] = b.ToHeader() + blockByHeight[b.Height] = b + } + + highest := blocks[len(blocks)-1].Height + return &MockFinalizedReader{ + headerByHeight: headerByHeight, + blockByHeight: blockByHeight, + lowest: initHeight, + highest: highest, + finalizedHeight: atomic.NewUint64(initHeight), + finalizedCalled: atomic.NewInt64(0), + }, headerByHeight, highest +} + +func (r *MockFinalizedReader) FinalizedBlockIDAtHeight(height uint64) (flow.Identifier, error) { + r.finalizedCalled.Add(1) + finalized := r.finalizedHeight.Load() + if height > finalized { + return flow.Identifier{}, storage.ErrNotFound + } + + if height < r.lowest { + return flow.ZeroID, fmt.Errorf("height %d is out of range [%d, %d]", height, r.lowest, r.highest) + } + return r.headerByHeight[height].ID(), nil +} + +func (r *MockFinalizedReader) MockFinal(height uint64) error { + if height < r.lowest || height > r.highest { + return fmt.Errorf("height %d is out of range [%d, %d]", height, r.lowest, r.highest) + } + + r.finalizedHeight.Store(height) + return nil +} + +func (r *MockFinalizedReader) BlockAtHeight(height uint64) *flow.Block { + return r.blockByHeight[height] +} + +func (r *MockFinalizedReader) FinalizedCalled() int { + return int(r.finalizedCalled.Load()) +} diff --git a/engine/execution/testutil/fixtures.go b/engine/execution/testutil/fixtures.go index 57c125786f2..fb9ee6e398a 100644 --- a/engine/execution/testutil/fixtures.go +++ b/engine/execution/testutil/fixtures.go @@ -8,14 +8,19 @@ import ( "testing" "github.com/onflow/cadence" + "github.com/onflow/cadence/encoding/ccf" jsoncdc "github.com/onflow/cadence/encoding/json" + "github.com/onflow/cadence/stdlib" "github.com/stretchr/testify/require" - "github.com/onflow/flow-go/crypto" - "github.com/onflow/flow-go/crypto/hash" + "github.com/onflow/crypto" + "github.com/onflow/crypto/hash" + "github.com/onflow/flow-go/engine/execution" "github.com/onflow/flow-go/engine/execution/utils" "github.com/onflow/flow-go/fvm" + "github.com/onflow/flow-go/fvm/environment" + envMock "github.com/onflow/flow-go/fvm/environment/mock" "github.com/onflow/flow-go/fvm/storage/snapshot" "github.com/onflow/flow-go/ledger" "github.com/onflow/flow-go/ledger/common/pathfinder" @@ -23,34 +28,35 @@ import ( "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/module/epochs" "github.com/onflow/flow-go/module/executiondatasync/execution_data" + "github.com/onflow/flow-go/state/protocol" + protocolMock "github.com/onflow/flow-go/state/protocol/mock" "github.com/onflow/flow-go/utils/unittest" ) -func CreateContractDeploymentTransaction(contractName string, contract string, authorizer flow.Address, chain flow.Chain) *flow.TransactionBody { - +func CreateContractDeploymentTransaction(contractName string, contract string, authorizer flow.Address, chain flow.Chain) *flow.TransactionBodyBuilder { encoded := hex.EncodeToString([]byte(contract)) script := []byte(fmt.Sprintf(`transaction { - prepare(signer: AuthAccount, service: AuthAccount) { + prepare(signer: auth(AddContract) &Account, service: &Account) { signer.contracts.add(name: "%s", code: "%s".decodeHex()) } }`, contractName, encoded)) - txBody := flow.NewTransactionBody(). + txBodyBuilder := flow.NewTransactionBodyBuilder(). SetScript(script). AddAuthorizer(authorizer). AddAuthorizer(chain.ServiceAddress()) - return txBody + return txBodyBuilder } -func UpdateContractDeploymentTransaction(contractName string, contract string, authorizer flow.Address, chain flow.Chain) *flow.TransactionBody { +func UpdateContractDeploymentTransaction(contractName string, contract string, authorizer flow.Address, chain flow.Chain) *flow.TransactionBodyBuilder { encoded := hex.EncodeToString([]byte(contract)) - return flow.NewTransactionBody(). + return flow.NewTransactionBodyBuilder(). SetScript([]byte(fmt.Sprintf(`transaction { - prepare(signer: AuthAccount, service: AuthAccount) { - signer.contracts.update__experimental(name: "%s", code: "%s".decodeHex()) + prepare(signer: auth(UpdateContract) &Account, service: &Account) { + signer.contracts.update(name: "%s", code: "%s".decodeHex()) } }`, contractName, encoded)), ). @@ -58,23 +64,23 @@ func UpdateContractDeploymentTransaction(contractName string, contract string, a AddAuthorizer(chain.ServiceAddress()) } -func UpdateContractUnathorizedDeploymentTransaction(contractName string, contract string, authorizer flow.Address) *flow.TransactionBody { +func UpdateContractUnathorizedDeploymentTransaction(contractName string, contract string, authorizer flow.Address) *flow.TransactionBodyBuilder { encoded := hex.EncodeToString([]byte(contract)) - return flow.NewTransactionBody(). + return flow.NewTransactionBodyBuilder(). SetScript([]byte(fmt.Sprintf(`transaction { - prepare(signer: AuthAccount) { - signer.contracts.update__experimental(name: "%s", code: "%s".decodeHex()) + prepare(signer: auth(UpdateContract) &Account) { + signer.contracts.update(name: "%s", code: "%s".decodeHex()) } }`, contractName, encoded)), ). AddAuthorizer(authorizer) } -func RemoveContractDeploymentTransaction(contractName string, authorizer flow.Address, chain flow.Chain) *flow.TransactionBody { - return flow.NewTransactionBody(). +func RemoveContractDeploymentTransaction(contractName string, authorizer flow.Address, chain flow.Chain) *flow.TransactionBodyBuilder { + return flow.NewTransactionBodyBuilder(). SetScript([]byte(fmt.Sprintf(`transaction { - prepare(signer: AuthAccount, service: AuthAccount) { + prepare(signer: auth(RemoveContract) &Account, service: &Account) { signer.contracts.remove(name: "%s") } }`, contractName)), @@ -83,10 +89,10 @@ func RemoveContractDeploymentTransaction(contractName string, authorizer flow.Ad AddAuthorizer(chain.ServiceAddress()) } -func RemoveContractUnathorizedDeploymentTransaction(contractName string, authorizer flow.Address) *flow.TransactionBody { - return flow.NewTransactionBody(). +func RemoveContractUnathorizedDeploymentTransaction(contractName string, authorizer flow.Address) *flow.TransactionBodyBuilder { + return flow.NewTransactionBodyBuilder(). SetScript([]byte(fmt.Sprintf(`transaction { - prepare(signer: AuthAccount) { + prepare(signer: auth(RemoveContract) &Account) { signer.contracts.remove(name: "%s") } }`, contractName)), @@ -94,12 +100,12 @@ func RemoveContractUnathorizedDeploymentTransaction(contractName string, authori AddAuthorizer(authorizer) } -func CreateUnauthorizedContractDeploymentTransaction(contractName string, contract string, authorizer flow.Address) *flow.TransactionBody { +func CreateUnauthorizedContractDeploymentTransaction(contractName string, contract string, authorizer flow.Address) *flow.TransactionBodyBuilder { encoded := hex.EncodeToString([]byte(contract)) - return flow.NewTransactionBody(). + return flow.NewTransactionBodyBuilder(). SetScript([]byte(fmt.Sprintf(`transaction { - prepare(signer: AuthAccount) { + prepare(signer: auth(AddContract) &Account) { signer.contracts.add(name: "%s", code: "%s".decodeHex()) } }`, contractName, encoded)), @@ -107,17 +113,24 @@ func CreateUnauthorizedContractDeploymentTransaction(contractName string, contra AddAuthorizer(authorizer) } -func SignPayload( - tx *flow.TransactionBody, - account flow.Address, +func SignTransaction( + tx *flow.TransactionBodyBuilder, + address flow.Address, privateKey flow.AccountPrivateKey, + seqNum uint64, ) error { + tx.SetProposalKey(address, 0, seqNum) + tx.SetPayer(address) + return SignEnvelope(tx, address, privateKey) +} + +func SignEnvelope(tx *flow.TransactionBodyBuilder, account flow.Address, privateKey flow.AccountPrivateKey) error { hasher, err := utils.NewHasher(privateKey.HashAlgo) if err != nil { return fmt.Errorf("failed to create hasher: %w", err) } - err = tx.SignPayload(account, 0, privateKey.PrivateKey, hasher) + err = tx.SignEnvelope(account, 0, privateKey.PrivateKey, hasher) if err != nil { return fmt.Errorf("failed to sign transaction: %w", err) @@ -126,13 +139,17 @@ func SignPayload( return nil } -func SignEnvelope(tx *flow.TransactionBody, account flow.Address, privateKey flow.AccountPrivateKey) error { +func SignPayload( + tx *flow.TransactionBodyBuilder, + account flow.Address, + privateKey flow.AccountPrivateKey, +) error { hasher, err := utils.NewHasher(privateKey.HashAlgo) if err != nil { return fmt.Errorf("failed to create hasher: %w", err) } - err = tx.SignEnvelope(account, 0, privateKey.PrivateKey, hasher) + err = tx.SignPayload(account, 0, privateKey.PrivateKey, hasher) if err != nil { return fmt.Errorf("failed to sign transaction: %w", err) @@ -141,18 +158,7 @@ func SignEnvelope(tx *flow.TransactionBody, account flow.Address, privateKey flo return nil } -func SignTransaction( - tx *flow.TransactionBody, - address flow.Address, - privateKey flow.AccountPrivateKey, - seqNum uint64, -) error { - tx.SetProposalKey(address, 0, seqNum) - tx.SetPayer(address) - return SignEnvelope(tx, address, privateKey) -} - -func SignTransactionAsServiceAccount(tx *flow.TransactionBody, seqNum uint64, chain flow.Chain) error { +func SignTransactionAsServiceAccount(tx *flow.TransactionBodyBuilder, seqNum uint64, chain flow.Chain) error { return SignTransaction(tx, chain.ServiceAddress(), unittest.ServiceAccountPrivateKey, seqNum) } @@ -227,8 +233,8 @@ func CreateAccountsWithSimpleAddresses( scriptTemplate := ` transaction(publicKey: [UInt8]) { - prepare(signer: AuthAccount) { - let acct = AuthAccount(payer: signer) + prepare(signer: auth(AddKey, BorrowValue) &Account) { + let acct = Account(payer: signer) let publicKey2 = PublicKey( publicKey: publicKey, signatureAlgorithm: SignatureAlgorithm.%s @@ -258,10 +264,17 @@ func CreateAccountsWithSimpleAddresses( ), ) - txBody := flow.NewTransactionBody(). + txBody, err := flow.NewTransactionBodyBuilder(). SetScript(script). + SetPayer(serviceAddress). AddArgument(encCadPublicKey). - AddAuthorizer(serviceAddress) + AddAuthorizer(serviceAddress). + Build() + if err != nil { + return snapshot.SnapshotTree{}, nil, fmt.Errorf( + "failed to build transaction body: %w", + err) + } tx := fvm.Transaction(txBody, 0) executionSnapshot, output, err := vm.Run(ctx, tx, snapshotTree) @@ -281,13 +294,22 @@ func CreateAccountsWithSimpleAddresses( for _, event := range output.Events { if event.Type == flow.EventAccountCreated { - data, err := jsoncdc.Decode(nil, event.Payload) + data, err := ccf.Decode(nil, event.Payload) if err != nil { - return snapshotTree, nil, errors.New( - "error decoding events") + return snapshotTree, nil, errors.New("error decoding events") + } + + event, ok := data.(cadence.Event) + if !ok { + return snapshotTree, nil, errors.New("error decoding events") } - addr = flow.ConvertAddress( - data.(cadence.Event).Fields[0].(cadence.Address)) + + address := cadence.SearchFieldByName( + event, + stdlib.AccountEventAddressParameter.Identifier, + ).(cadence.Address) + + addr = flow.ConvertAddress(address) break } } @@ -335,13 +357,13 @@ func BytesToCadenceArray(l []byte) cadence.Array { values[i] = cadence.NewUInt8(b) } - return cadence.NewArray(values).WithType(cadence.NewVariableSizedArrayType(cadence.NewUInt8Type())) + return cadence.NewArray(values).WithType(cadence.NewVariableSizedArrayType(cadence.UInt8Type)) } // CreateAccountCreationTransaction creates a transaction which will create a new account. // // This function returns a randomly generated private key and the transaction. -func CreateAccountCreationTransaction(t testing.TB, chain flow.Chain) (flow.AccountPrivateKey, *flow.TransactionBody) { +func CreateAccountCreationTransaction(t testing.TB, chain flow.Chain) (flow.AccountPrivateKey, *flow.TransactionBodyBuilder) { accountKey, err := GenerateAccountPrivateKey() require.NoError(t, err) encPublicKey := accountKey.PublicKey(1000).PublicKey.Encode() @@ -352,8 +374,8 @@ func CreateAccountCreationTransaction(t testing.TB, chain flow.Chain) (flow.Acco // define the cadence script script := fmt.Sprintf(` transaction(publicKey: [UInt8]) { - prepare(signer: AuthAccount) { - let acct = AuthAccount(payer: signer) + prepare(signer: auth(AddKey, BorrowValue) &Account) { + let acct = Account(payer: signer) let publicKey2 = PublicKey( publicKey: publicKey, signatureAlgorithm: SignatureAlgorithm.%s @@ -370,7 +392,7 @@ func CreateAccountCreationTransaction(t testing.TB, chain flow.Chain) (flow.Acco ) // create the transaction to create the account - tx := flow.NewTransactionBody(). + tx := flow.NewTransactionBodyBuilder(). SetScript([]byte(script)). AddArgument(encCadPublicKey). AddAuthorizer(chain.ServiceAddress()) @@ -381,7 +403,7 @@ func CreateAccountCreationTransaction(t testing.TB, chain flow.Chain) (flow.Acco // CreateMultiAccountCreationTransaction creates a transaction which will create many (n) new account. // // This function returns a randomly generated private key and the transaction. -func CreateMultiAccountCreationTransaction(t *testing.T, chain flow.Chain, n int) (flow.AccountPrivateKey, *flow.TransactionBody) { +func CreateMultiAccountCreationTransaction(t *testing.T, chain flow.Chain, n int) (flow.AccountPrivateKey, *flow.TransactionBodyBuilder) { accountKey, err := GenerateAccountPrivateKey() require.NoError(t, err) encPublicKey := accountKey.PublicKey(1000).PublicKey.Encode() @@ -392,10 +414,10 @@ func CreateMultiAccountCreationTransaction(t *testing.T, chain flow.Chain, n int // define the cadence script script := fmt.Sprintf(` transaction(publicKey: [UInt8]) { - prepare(signer: AuthAccount) { + prepare(signer: auth(AddKey, BorrowValue) &Account) { var i = 0 while i < %d { - let account = AuthAccount(payer: signer) + let account = Account(payer: signer) let publicKey2 = PublicKey( publicKey: publicKey, signatureAlgorithm: SignatureAlgorithm.%s @@ -415,7 +437,7 @@ func CreateMultiAccountCreationTransaction(t *testing.T, chain flow.Chain, n int ) // create the transaction to create the account - tx := flow.NewTransactionBody(). + tx := flow.NewTransactionBodyBuilder(). SetScript([]byte(script)). AddArgument(encCadPublicKey). AddAuthorizer(chain.ServiceAddress()) @@ -425,10 +447,10 @@ func CreateMultiAccountCreationTransaction(t *testing.T, chain flow.Chain, n int // CreateAddAnAccountKeyMultipleTimesTransaction generates a tx that adds a key several times to an account. // this can be used to exhaust an account's storage. -func CreateAddAnAccountKeyMultipleTimesTransaction(t *testing.T, accountKey *flow.AccountPrivateKey, counts int) *flow.TransactionBody { +func CreateAddAnAccountKeyMultipleTimesTransaction(t *testing.T, accountKey *flow.AccountPrivateKey, counts int) *flow.TransactionBodyBuilder { script := []byte(fmt.Sprintf(` transaction(counts: Int, key: [UInt8]) { - prepare(signer: AuthAccount) { + prepare(signer: auth(AddKey) &Account) { var i = 0 while i < counts { i = i + 1 @@ -454,11 +476,12 @@ func CreateAddAnAccountKeyMultipleTimesTransaction(t *testing.T, accountKey *flo arg2, err := jsoncdc.Encode(cadPublicKey) require.NoError(t, err) - addKeysTx := &flow.TransactionBody{ - Script: script, - } - addKeysTx = addKeysTx.AddArgument(arg1).AddArgument(arg2) - return addKeysTx + addKeysTxBuilder := flow.NewTransactionBodyBuilder(). + SetScript(script). + AddArgument(arg1). + AddArgument(arg2) + + return addKeysTxBuilder } // CreateAddAccountKeyTransaction generates a tx that adds a key to an account. @@ -467,8 +490,8 @@ func CreateAddAccountKeyTransaction(t *testing.T, accountKey *flow.AccountPrivat script := []byte(` transaction(key: [UInt8]) { - prepare(signer: AuthAccount) { - let acct = AuthAccount(payer: signer) + prepare(signer: auth(AddKey) &Account) { + let acct = Account(payer: signer) let publicKey2 = PublicKey( publicKey: key, signatureAlgorithm: SignatureAlgorithm.%s @@ -486,10 +509,9 @@ func CreateAddAccountKeyTransaction(t *testing.T, accountKey *flow.AccountPrivat require.NoError(t, err) addKeysTx := &flow.TransactionBody{ - Script: script, + Script: script, + Arguments: [][]byte{arg}, } - addKeysTx = addKeysTx.AddArgument(arg) - return addKeysTx } @@ -572,8 +594,14 @@ func ComputationResultFixture(t *testing.T) *execution.ComputationResult { blockExecResult := execution.NewPopulatedBlockExecutionResult(executableBlock) blockExecResult.CollectionExecutionResultAt(0).AppendTransactionResults( flow.EventsList{ - unittest.EventFixture("what", 0, 0, unittest.IdentifierFixture(), 2), - unittest.EventFixture("ever", 0, 1, unittest.IdentifierFixture(), 22), + unittest.EventFixture( + unittest.Event.WithTransactionIndex(0), + unittest.Event.WithEventIndex(0), + ), + unittest.EventFixture( + unittest.Event.WithTransactionIndex(0), + unittest.Event.WithEventIndex(1), + ), }, nil, nil, @@ -586,10 +614,22 @@ func ComputationResultFixture(t *testing.T) *execution.ComputationResult { ) blockExecResult.CollectionExecutionResultAt(1).AppendTransactionResults( flow.EventsList{ - unittest.EventFixture("what", 2, 0, unittest.IdentifierFixture(), 2), - unittest.EventFixture("ever", 2, 1, unittest.IdentifierFixture(), 22), - unittest.EventFixture("ever", 2, 2, unittest.IdentifierFixture(), 2), - unittest.EventFixture("ever", 2, 3, unittest.IdentifierFixture(), 22), + unittest.EventFixture( + unittest.Event.WithTransactionIndex(2), + unittest.Event.WithEventIndex(0), + ), + unittest.EventFixture( + unittest.Event.WithTransactionIndex(2), + unittest.Event.WithEventIndex(1), + ), + unittest.EventFixture( + unittest.Event.WithTransactionIndex(2), + unittest.Event.WithEventIndex(2), + ), + unittest.EventFixture( + unittest.Event.WithTransactionIndex(2), + unittest.Event.WithEventIndex(3), + ), }, nil, nil, @@ -600,6 +640,21 @@ func ComputationResultFixture(t *testing.T) *execution.ComputationResult { MemoryUsed: 22, }, ) + executionReceipt := &flow.ExecutionReceipt{ + UnsignedExecutionReceipt: flow.UnsignedExecutionReceipt{ + ExecutorID: unittest.IdentifierFixture(), + ExecutionResult: flow.ExecutionResult{ + Chunks: flow.ChunkList{ + {EndState: unittest.StateCommitmentFixture()}, + {EndState: unittest.StateCommitmentFixture()}, + {EndState: unittest.StateCommitmentFixture()}, + {EndState: unittest.StateCommitmentFixture()}, + }, + }, + Spocks: unittest.SignaturesFixture(1), + }, + ExecutorSignature: unittest.SignatureFixture(), + } return &execution.ComputationResult{ BlockExecutionResult: blockExecResult, @@ -613,15 +668,83 @@ func ComputationResultFixture(t *testing.T) *execution.ComputationResult { }, }, }, - ExecutionReceipt: &flow.ExecutionReceipt{ - ExecutionResult: flow.ExecutionResult{ - Chunks: flow.ChunkList{ - {EndState: unittest.StateCommitmentFixture()}, - {EndState: unittest.StateCommitmentFixture()}, - {EndState: unittest.StateCommitmentFixture()}, - {EndState: unittest.StateCommitmentFixture()}, - }, - }, + ExecutionReceipt: executionReceipt, + } +} + +// EntropyProviderFixture returns an entropy provider mock that +// supports RandomSource(). +// If input is nil, a random source fixture is generated. +func EntropyProviderFixture(source []byte) environment.EntropyProvider { + if source == nil { + source = unittest.SignatureFixture() + } + provider := envMock.EntropyProvider{} + provider.On("RandomSource").Return(source, nil) + return &provider +} + +// ProtocolStateWithSourceFixture returns a protocol state mock that only +// supports AtBlockID to return a snapshot mock. +// The snapshot mock only supports RandomSource(). +// If input is nil, a random source fixture is generated. +func ProtocolStateWithSourceFixture(source []byte) protocol.SnapshotExecutionSubsetProvider { + if source == nil { + source = unittest.SignatureFixture() + } + // For tests not explicitly testing version compatibility, always return latest protocol version + kvstore := &protocolMock.KVStoreReader{} + kvstore.On("GetProtocolStateVersion").Return(uint64(2)) + snapshot := mockSnapshotSubset{ + randomSourceFunc: func() ([]byte, error) { + return source, nil + }, + versionBeaconFunc: func() (*flow.SealedVersionBeacon, error) { + return &flow.SealedVersionBeacon{VersionBeacon: unittest.VersionBeaconFixture()}, nil + }, + kvStoreFunc: func() (protocol.KVStoreReader, error) { + return kvstore, nil + }, + } + + provider := mockProtocolStateSnapshotProvider{ + snapshotFunc: func(blockID flow.Identifier) protocol.SnapshotExecutionSubset { + return snapshot }, } + return provider +} + +type mockProtocolStateSnapshotProvider struct { + snapshotFunc func(blockID flow.Identifier) protocol.SnapshotExecutionSubset +} + +func (m mockProtocolStateSnapshotProvider) AtBlockID(blockID flow.Identifier) protocol.SnapshotExecutionSubset { + return m.snapshotFunc(blockID) } + +type mockSnapshotSubset struct { + randomSourceFunc func() ([]byte, error) + versionBeaconFunc func() (*flow.SealedVersionBeacon, error) + kvStoreFunc func() (protocol.KVStoreReader, error) +} + +func (m mockSnapshotSubset) RandomSource() ([]byte, error) { + if m.randomSourceFunc == nil { + return nil, errors.New("random source not implemented") + } + return m.randomSourceFunc() +} + +func (m mockSnapshotSubset) VersionBeacon() (*flow.SealedVersionBeacon, error) { + if m.versionBeaconFunc == nil { + return nil, errors.New("version beacon not implemented") + } + return m.versionBeaconFunc() +} + +func (m mockSnapshotSubset) ProtocolState() (protocol.KVStoreReader, error) { + return m.kvStoreFunc() +} + +var _ protocol.SnapshotExecutionSubset = (*mockSnapshotSubset)(nil) diff --git a/engine/execution/testutil/fixtures_checker_heavy_contract.go b/engine/execution/testutil/fixtures_checker_heavy_contract.go index 8baddba39ab..9740f654af8 100644 --- a/engine/execution/testutil/fixtures_checker_heavy_contract.go +++ b/engine/execution/testutil/fixtures_checker_heavy_contract.go @@ -6,7 +6,7 @@ import ( "github.com/onflow/flow-go/model/flow" ) -func DeployLocalReplayLimitedTransaction(authorizer flow.Address, chain flow.Chain) *flow.TransactionBody { +func DeployLocalReplayLimitedTransaction(authorizer flow.Address, chain flow.Chain) *flow.TransactionBodyBuilder { var builder strings.Builder builder.WriteString("let t = T") @@ -23,7 +23,7 @@ func DeployLocalReplayLimitedTransaction(authorizer flow.Address, chain flow.Cha ) } -func DeployGlobalReplayLimitedTransaction(authorizer flow.Address, chain flow.Chain) *flow.TransactionBody { +func DeployGlobalReplayLimitedTransaction(authorizer flow.Address, chain flow.Chain) *flow.TransactionBodyBuilder { var builder strings.Builder for j := 0; j < 2; j++ { diff --git a/engine/execution/testutil/fixtures_counter.go b/engine/execution/testutil/fixtures_counter.go index 8dd76d387ff..702c8797392 100644 --- a/engine/execution/testutil/fixtures_counter.go +++ b/engine/execution/testutil/fixtures_counter.go @@ -9,16 +9,16 @@ import ( const CounterContract = ` access(all) contract Container { access(all) resource Counter { - pub var count: Int + access(all) var count: Int init(_ v: Int) { self.count = v } - pub fun add(_ count: Int) { + access(all) fun add(_ count: Int) { self.count = self.count + count } } - pub fun createCounter(_ v: Int): @Counter { + access(all) fun createCounter(_ v: Int): @Counter { return <-create Counter(v) } } @@ -27,59 +27,59 @@ access(all) contract Container { const CounterContractV2 = ` access(all) contract Container { access(all) resource Counter { - pub var count: Int + access(all) var count: Int init(_ v: Int) { self.count = v } - pub fun add(_ count: Int) { + access(all) fun add(_ count: Int) { self.count = self.count + count } } - pub fun createCounter(_ v: Int): @Counter { + access(all) fun createCounter(_ v: Int): @Counter { return <-create Counter(v) } - pub fun createCounter2(_ v: Int): @Counter { + access(all) fun createCounter2(_ v: Int): @Counter { return <-create Counter(v) } } ` -func DeployCounterContractTransaction(authorizer flow.Address, chain flow.Chain) *flow.TransactionBody { +func DeployCounterContractTransaction(authorizer flow.Address, chain flow.Chain) *flow.TransactionBodyBuilder { return CreateContractDeploymentTransaction("Container", CounterContract, authorizer, chain) } -func DeployUnauthorizedCounterContractTransaction(authorizer flow.Address) *flow.TransactionBody { +func DeployUnauthorizedCounterContractTransaction(authorizer flow.Address) *flow.TransactionBodyBuilder { return CreateUnauthorizedContractDeploymentTransaction("Container", CounterContract, authorizer) } -func UpdateUnauthorizedCounterContractTransaction(authorizer flow.Address) *flow.TransactionBody { +func UpdateUnauthorizedCounterContractTransaction(authorizer flow.Address) *flow.TransactionBodyBuilder { return UpdateContractUnathorizedDeploymentTransaction("Container", CounterContractV2, authorizer) } -func RemoveUnauthorizedCounterContractTransaction(authorizer flow.Address) *flow.TransactionBody { +func RemoveUnauthorizedCounterContractTransaction(authorizer flow.Address) *flow.TransactionBodyBuilder { return RemoveContractUnathorizedDeploymentTransaction("Container", authorizer) } -func RemoveCounterContractTransaction(authorizer flow.Address, chain flow.Chain) *flow.TransactionBody { +func RemoveCounterContractTransaction(authorizer flow.Address, chain flow.Chain) *flow.TransactionBodyBuilder { return RemoveContractDeploymentTransaction("Container", authorizer, chain) } -func CreateCounterTransaction(counter, signer flow.Address) *flow.TransactionBody { - return flow.NewTransactionBody(). +func CreateCounterTransaction(counter, signer flow.Address) *flow.TransactionBodyBuilder { + return flow.NewTransactionBodyBuilder(). SetScript([]byte(fmt.Sprintf(` import 0x%s transaction { - prepare(acc: AuthAccount) { - var maybeCounter <- acc.load<@Container.Counter>(from: /storage/counter) + prepare(acc: auth(Storage) &Account) { + var maybeCounter <- acc.storage.load<@Container.Counter>(from: /storage/counter) if maybeCounter == nil { maybeCounter <-! Container.createCounter(3) } - acc.save(<-maybeCounter!, to: /storage/counter) + acc.storage.save(<-maybeCounter!, to: /storage/counter) } }`, counter)), ). @@ -88,35 +88,33 @@ func CreateCounterTransaction(counter, signer flow.Address) *flow.TransactionBod // CreateCounterPanicTransaction returns a transaction that will manipulate state by writing a new counter into storage // and then panic. It can be used to test whether execution state stays untouched/will revert -func CreateCounterPanicTransaction(counter, signer flow.Address) *flow.TransactionBody { - return &flow.TransactionBody{ - Script: []byte(fmt.Sprintf(` +func CreateCounterPanicTransaction(counter, signer flow.Address) *flow.TransactionBodyBuilder { + return flow.NewTransactionBodyBuilder(). + SetScript([]byte(fmt.Sprintf(` import 0x%s transaction { - prepare(acc: AuthAccount) { - if let existing <- acc.load<@Container.Counter>(from: /storage/counter) { + prepare(acc: auth(Storage) &Account) { + if let existing <- acc.storage.load<@Container.Counter>(from: /storage/counter) { destroy existing } panic("fail for testing purposes") } - }`, counter)), - Authorizers: []flow.Address{signer}, - } + }`, counter))). + AddAuthorizer(signer) } -func AddToCounterTransaction(counter, signer flow.Address) *flow.TransactionBody { - return &flow.TransactionBody{ - Script: []byte(fmt.Sprintf(` +func AddToCounterTransaction(counter, signer flow.Address) *flow.TransactionBodyBuilder { + return flow.NewTransactionBodyBuilder(). + SetScript([]byte(fmt.Sprintf(` import 0x%s transaction { - prepare(acc: AuthAccount) { - let counter = acc.borrow<&Container.Counter>(from: /storage/counter) + prepare(acc: auth(Storage) &Account) { + let counter = acc.storage.borrow<&Container.Counter>(from: /storage/counter) counter?.add(2) } - }`, counter)), - Authorizers: []flow.Address{signer}, - } + }`, counter))). + AddAuthorizer(signer) } diff --git a/engine/execution/testutil/fixtures_event.go b/engine/execution/testutil/fixtures_event.go index 376ae76903b..b976f96fba2 100644 --- a/engine/execution/testutil/fixtures_event.go +++ b/engine/execution/testutil/fixtures_event.go @@ -17,31 +17,31 @@ access(all) contract EventContract { } ` -func DeployEventContractTransaction(authorizer flow.Address, chain flow.Chain, eventValue int) *flow.TransactionBody { +func DeployEventContractTransaction(authorizer flow.Address, chain flow.Chain, eventValue int) *flow.TransactionBodyBuilder { contract := fmt.Sprintf(EventContract, eventValue) return CreateContractDeploymentTransaction("EventContract", contract, authorizer, chain) } -func UnauthorizedDeployEventContractTransaction(authorizer flow.Address, chain flow.Chain, eventValue int) *flow.TransactionBody { +func UnauthorizedDeployEventContractTransaction(authorizer flow.Address, chain flow.Chain, eventValue int) *flow.TransactionBodyBuilder { contract := fmt.Sprintf(EventContract, eventValue) return CreateUnauthorizedContractDeploymentTransaction("EventContract", contract, authorizer) } -func UpdateEventContractTransaction(authorizer flow.Address, chain flow.Chain, eventValue int) *flow.TransactionBody { +func UpdateEventContractTransaction(authorizer flow.Address, chain flow.Chain, eventValue int) *flow.TransactionBodyBuilder { contract := fmt.Sprintf(EventContract, eventValue) return UpdateContractDeploymentTransaction("EventContract", contract, authorizer, chain) } -func CreateEmitEventTransaction(contractAccount, signer flow.Address) *flow.TransactionBody { - return flow.NewTransactionBody(). +func CreateEmitEventTransaction(contractAccount, signer flow.Address) *flow.TransactionBodyBuilder { + return flow.NewTransactionBodyBuilder(). SetScript([]byte(fmt.Sprintf(` import EventContract from 0x%s transaction { - prepare(acc: AuthAccount) {} + prepare(acc: &Account) {} execute { - EventContract.EmitEvent() - } + EventContract.EmitEvent() + } }`, contractAccount)), ). AddAuthorizer(signer) diff --git a/engine/execution/testutil/fixtures_token.go b/engine/execution/testutil/fixtures_token.go index 2bbef170428..0c93c24b39c 100644 --- a/engine/execution/testutil/fixtures_token.go +++ b/engine/execution/testutil/fixtures_token.go @@ -6,33 +6,33 @@ import ( "github.com/onflow/cadence" jsoncdc "github.com/onflow/cadence/encoding/json" - "github.com/onflow/flow-go/fvm" + "github.com/onflow/flow-go/fvm/systemcontracts" "github.com/onflow/flow-go/model/flow" ) -func CreateTokenTransferTransaction(chain flow.Chain, amount int, to flow.Address, signer flow.Address) *flow.TransactionBody { - return flow.NewTransactionBody(). +func CreateTokenTransferTransaction(chain flow.Chain, amount int, to flow.Address, signer flow.Address) *flow.TransactionBodyBuilder { + sc := systemcontracts.SystemContractsForChain(chain.ChainID()) + return flow.NewTransactionBodyBuilder(). SetScript([]byte(fmt.Sprintf(` import FungibleToken from 0x%s import FlowToken from 0x%s transaction(amount: UFix64, to: Address) { - let sentVault: @FungibleToken.Vault + let sentVault: @{FungibleToken.Vault} - prepare(signer: AuthAccount) { - let vaultRef = signer.borrow<&FlowToken.Vault>(from: /storage/flowTokenVault) + prepare(signer: auth(BorrowValue) &Account) { + let vaultRef = signer.storage.borrow<auth(FungibleToken.Withdraw) &FlowToken.Vault>(from: /storage/flowTokenVault) ?? panic("Could not borrow reference to the owner's Vault!") self.sentVault <- vaultRef.withdraw(amount: amount) } execute { let receiverRef = getAccount(to) - .getCapability(/public/flowTokenReceiver) - .borrow<&{FungibleToken.Receiver}>() + .capabilities.borrow<&{FungibleToken.Receiver}>(/public/flowTokenReceiver) ?? panic("Could not borrow receiver reference to the recipient's Vault") receiverRef.deposit(from: <-self.sentVault) } - }`, fvm.FungibleTokenAddress(chain), fvm.FlowTokenAddress(chain)))). + }`, sc.FungibleToken.Address.Hex(), sc.FlowToken.Address.Hex()))). AddArgument(jsoncdc.MustEncode(cadence.UFix64(amount))). AddArgument(jsoncdc.MustEncode(cadence.NewAddress(to))). AddAuthorizer(signer) diff --git a/engine/execution/utils/hasher.go b/engine/execution/utils/hasher.go index 81cf55ddba1..1ef8b87e036 100644 --- a/engine/execution/utils/hasher.go +++ b/engine/execution/utils/hasher.go @@ -3,7 +3,8 @@ package utils import ( "fmt" - "github.com/onflow/flow-go/crypto/hash" + "github.com/onflow/crypto/hash" + "github.com/onflow/flow-go/module/signature" ) diff --git a/engine/ghost/client/ghost_client.go b/engine/ghost/client/ghost_client.go index 66e5555b002..219ddf230a8 100644 --- a/engine/ghost/client/ghost_client.go +++ b/engine/ghost/client/ghost_client.go @@ -108,8 +108,12 @@ func (fmsr *FlowMessageStreamReader) Next() (flow.Identifier, interface{}, error if err != nil { return flow.ZeroID, nil, fmt.Errorf("failed to decode event: %w", err) } + internal, err := event.ToInternal() + if err != nil { + return flow.ZeroID, nil, fmt.Errorf("failed to convert event to internal: %w", err) + } originID := flow.HashToID(msg.GetSenderID()) - return originID, event, nil + return originID, internal, nil } diff --git a/engine/ghost/engine/handler.go b/engine/ghost/engine/handler.go index 767ef53b561..11d757c06eb 100644 --- a/engine/ghost/engine/handler.go +++ b/engine/ghost/engine/handler.go @@ -47,7 +47,7 @@ func (h Handler) SendEvent(_ context.Context, req *ghost.SendEventRequest) (*emp message := req.GetMessage() - event, err := h.codec.Decode(message) + decodedMsg, err := h.codec.Decode(message) if err != nil { return nil, status.Error(codes.InvalidArgument, "failed to decode message") } @@ -61,7 +61,7 @@ func (h Handler) SendEvent(_ context.Context, req *ghost.SendEventRequest) (*emp } h.log.Info(). - Interface("event", event). + Interface("event", decodedMsg). Str("flow_ids", fmt.Sprintf("%v", flowIDs)). Str("target_ids", fmt.Sprintf("%v", targetIDs)). Msg("sending message") @@ -70,11 +70,11 @@ func (h Handler) SendEvent(_ context.Context, req *ghost.SendEventRequest) (*emp // TODO: there is an issue in the Publish method for the ghost node, // sometimes, it fails to deliver the message to the target without returning any error. // This becomes one of the big factors contributing to the tests flakeiness. - err = conduit.Publish(event, flowIDs...) + err = conduit.Publish(decodedMsg, flowIDs...) if err != nil { h.log.Error(). Err(err). - Interface("event", event). + Interface("message", decodedMsg). Str("flow_ids", fmt.Sprintf("%v", flowIDs)). Str("target_ids", fmt.Sprintf("%v", targetIDs)). Msg("error publishing message") diff --git a/engine/ghost/engine/rpc.go b/engine/ghost/engine/rpc.go index b859bb12a8e..f73c2b3ec18 100644 --- a/engine/ghost/engine/rpc.go +++ b/engine/ghost/engine/rpc.go @@ -10,6 +10,7 @@ import ( "github.com/onflow/flow-go/engine" ghost "github.com/onflow/flow-go/engine/ghost/protobuf" "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/model/messages" "github.com/onflow/flow-go/module" "github.com/onflow/flow-go/network" "github.com/onflow/flow-go/network/channels" @@ -40,7 +41,7 @@ type RPC struct { } // New returns a new RPC engine. -func New(net network.Network, log zerolog.Logger, me module.Local, state protocol.State, config Config) (*RPC, error) { +func New(net network.EngineRegistry, log zerolog.Logger, me module.Local, state protocol.State, config Config) (*RPC, error) { log = log.With().Str("engine", "rpc").Logger() @@ -76,7 +77,7 @@ func New(net network.Network, log zerolog.Logger, me module.Local, state protoco } // registerConduits registers for ALL channels and returns a map of engine id to conduit -func registerConduits(net network.Network, state protocol.State, eng network.Engine) (map[channels.Channel]network.Conduit, error) { +func registerConduits(net network.EngineRegistry, state protocol.State, eng network.Engine) (map[channels.Channel]network.Conduit, error) { // create a list of all channels that don't change over time channelList := channels.ChannelList{ @@ -93,7 +94,10 @@ func registerConduits(net network.Network, state protocol.State, eng network.Eng // add channels that are dependent on protocol state and change over time // TODO need to update to register dynamic channels that are created on later epoch transitions - epoch := state.Final().Epochs().Current() + epoch, err := state.Final().Epochs().Current() + if err != nil { + return nil, fmt.Errorf("could not get current epoch: %w", err) + } clusters, err := epoch.Clustering() if err != nil { @@ -105,7 +109,7 @@ func registerConduits(net network.Network, state protocol.State, eng network.Eng if err != nil { return nil, fmt.Errorf("could not get cluster: %w", err) } - clusterID := cluster.RootBlock().Header.ChainID + clusterID := cluster.RootBlock().ChainID // add the dynamic channels for the cluster channelList = append( @@ -182,9 +186,12 @@ func (e *RPC) Process(channel channels.Channel, originID flow.Identifier, event } func (e *RPC) process(originID flow.Identifier, event interface{}) error { - + msg, err := messages.InternalToMessage(event) + if err != nil { + return fmt.Errorf("failed to convert event to message: %v", err) + } // json encode the message into bytes - encodedMsg, err := e.codec.Encode(event) + encodedMsg, err := e.codec.Encode(msg) if err != nil { return fmt.Errorf("failed to encode message: %v", err) } diff --git a/engine/notifier_test.go b/engine/notifier_test.go index cf5725fff04..673fa0a2ca3 100644 --- a/engine/notifier_test.go +++ b/engine/notifier_test.go @@ -1,12 +1,13 @@ package engine import ( + "context" "fmt" "sync" "testing" - "time" + "testing/synctest" - "github.com/stretchr/testify/require" + "github.com/stretchr/testify/assert" "go.uber.org/atomic" ) @@ -17,8 +18,8 @@ func TestNotifier_PassByValue(t *testing.T) { var sent sync.WaitGroup sent.Add(1) - go func(n Notifier) { - notifier.Notify() + go func(passedByValue Notifier) { + passedByValue.Notify() sent.Done() }(notifier) sent.Wait() @@ -38,7 +39,7 @@ func TestNotifier_NoNotificationsInitialization(t *testing.T) { select { case <-notifier.Channel(): t.Fail() - default: //expected + default: // expected } } @@ -52,12 +53,10 @@ func TestNotifier_ManyNotifications(t *testing.T) { notifier := NewNotifier() var counter sync.WaitGroup - for i := 0; i < 10; i++ { - counter.Add(1) - go func() { + for range 10 { + counter.Go(func() { notifier.Notify() - counter.Done() - }() + }) } counter.Wait() @@ -67,141 +66,132 @@ func TestNotifier_ManyNotifications(t *testing.T) { select { case <-c: // expected default: - t.Fail() + t.Error("expected one notification to be available") } - // attempt to consume first notification + // attempt to consume second notification // expect that no notification is available select { case <-c: - t.Fail() - default: //expected + t.Error("expected only one notification to be available") + default: // expected } } -// TestNotifier_ManyConsumers spans many worker routines and -// sends just as many notifications with small delays. We require that -// all workers eventually get a notification. +// TestNotifier_ManyConsumers spans many worker routines and sends just as many notifications. +// We require that all workers eventually get a notification. func TestNotifier_ManyConsumers(t *testing.T) { singleTestRun := func(t *testing.T) { t.Parallel() - notifier := NewNotifier() - c := notifier.Channel() - - // spawn 100 worker routines to each wait for a notification - var startingWorkers sync.WaitGroup - pendingWorkers := atomic.NewInt32(100) - for i := 0; i < 100; i++ { - startingWorkers.Add(1) - go func() { - startingWorkers.Done() - <-c - pendingWorkers.Dec() - }() - } - startingWorkers.Wait() - - // send 100 notifications, with small delays - for i := 0; i < 100; i++ { - notifier.Notify() - time.Sleep(100 * time.Millisecond) - } + synctest.Test(t, func(t *testing.T) { + notifier := NewNotifier() + c := notifier.Channel() + + // spawn 100 worker routines to each wait for a notification + pendingWorkers := atomic.NewInt32(100) + for range 100 { + go func() { + <-c + pendingWorkers.Dec() + }() + } + + // wait until all workers are blocked on the notifier channel + synctest.Wait() + + for range 100 { + notifier.Notify() + + // wait for the previous notification to be consumed to ensure that the producer + // won't drop any notifications. + // NOTE: this is necessary because golang channels do not provide atomic consistency. + // Specifically, it means that when we send a notification while workers are waiting + // it is not guaranteed that a worker will atomically receive that notification. In + // other words, the channel might behave as if there was no worker waiting and de- + // duplicating notifications. For details, see + // https://www.notion.so/flowfoundation/Golang-Channel-Consistency-19a1aee12324817699b1ff162921d8fc + synctest.Wait() + } + + // wait until all workers are done + synctest.Wait() - // require that all workers got a notification - if !conditionEventuallySatisfied(func() bool { return pendingWorkers.Load() == 0 }, 3*time.Second, 100*time.Millisecond) { - require.Fail(t, "timed out", "still awaiting %d workers to get notification", pendingWorkers.Load()) - } + // require that all workers got a notification + assert.Equal(t, int32(0), pendingWorkers.Load()) + }) } - for r := 0; r < 100; r++ { + for r := range 100 { t.Run(fmt.Sprintf("run %d", r), singleTestRun) } } // TestNotifier_AllWorkProcessed spans many routines pushing work and fewer -// routines consuming work. We require that all worker is eventually processed. +// routines consuming work. We require that all work is eventually processed. func TestNotifier_AllWorkProcessed(t *testing.T) { singleTestRun := func(t *testing.T) { t.Parallel() - notifier := NewNotifier() - - totalWork := int32(100) - pendingWorkQueue := make(chan struct{}, totalWork) - scheduledWork := atomic.NewInt32(0) - consumedWork := atomic.NewInt32(0) - - // starts the consuming first, because if we starts the production first instead, then - // we might finish pushing all jobs, before any of our consumer has started listening - // to the queue. - var consumersAllReady sync.WaitGroup - consumersAllReady.Add(5) - - // 5 routines consuming work - for i := 0; i < 5; i++ { - go func() { - consumersAllReady.Done() - for consumedWork.Load() < totalWork { - <-notifier.Channel() - L: + synctest.Test(t, func(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + + notifier := NewNotifier() + + producerCount := int32(10) // number of producers + producerJobs := int32(10) // number of tasks that each producer will queue up + pendingWorkQueue := make(chan struct{}, producerCount*producerJobs) + consumedWork := atomic.NewInt32(0) + + // start the consumers first, otherwise we might finish pushing all jobs, before any of + // our consumer has started listening to the queue. + + processAllPending := func() { + for { + select { + case <-pendingWorkQueue: + consumedWork.Inc() + default: + return + } + } + } + + // 5 routines consuming work + for range 5 { + go func() { for { select { - case <-pendingWorkQueue: - consumedWork.Inc() - default: - break L + case <-ctx.Done(): + return + case <-notifier.Channel(): + processAllPending() } } - } - }() - } - - // wait long enough for all consumer to be ready for new notification. - consumersAllReady.Wait() - - var workersAllReady sync.WaitGroup - workersAllReady.Add(10) - - // 10 routines pushing work - for i := 0; i < 10; i++ { - go func() { - workersAllReady.Done() - for scheduledWork.Inc() <= totalWork { - pendingWorkQueue <- struct{}{} - notifier.Notify() - } - }() - } - - // wait long enough for all workers to be started. - workersAllReady.Wait() + }() + } - // require that all work is eventually consumed - if !conditionEventuallySatisfied(func() bool { return consumedWork.Load() == totalWork }, 3*time.Second, 100*time.Millisecond) { - require.Fail(t, "timed out", "only consumed %d units of work but expecting %d", consumedWork.Load(), totalWork) - } - } + // wait for all consumers to block on the notifier channel + synctest.Wait() - for r := 0; r < 100; r++ { - t.Run(fmt.Sprintf("run %d", r), singleTestRun) - } -} + for range producerCount { + go func() { + for range producerJobs { + pendingWorkQueue <- struct{}{} + notifier.Notify() + } + }() + } -func conditionEventuallySatisfied(condition func() bool, waitFor time.Duration, tick time.Duration) bool { - done := make(chan struct{}) + // wait for all producers and consumers to block. at this point, all jobs should be consumed. + synctest.Wait() + assert.Equal(t, producerCount*producerJobs, consumedWork.Load()) - go func() { - for range time.Tick(tick) { - if condition() { - close(done) - return - } - } - }() + // shutdown blocked consumers and wait for them to complete + cancel() + synctest.Wait() + }) + } - select { - case <-time.After(waitFor): - return false - case <-done: - return true + for r := range 100 { + t.Run(fmt.Sprintf("run %d", r), singleTestRun) } } diff --git a/engine/protocol/api.go b/engine/protocol/api.go index 5f0451896d2..79122ce3937 100644 --- a/engine/protocol/api.go +++ b/engine/protocol/api.go @@ -3,17 +3,19 @@ package protocol import ( "context" - "github.com/onflow/flow-go/access" "github.com/onflow/flow-go/engine/common/rpc" + accessmodel "github.com/onflow/flow-go/model/access" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/state/protocol" "github.com/onflow/flow-go/storage" ) type NetworkAPI interface { - GetNetworkParameters(ctx context.Context) access.NetworkParameters + GetNetworkParameters(ctx context.Context) accessmodel.NetworkParameters GetLatestProtocolStateSnapshot(ctx context.Context) ([]byte, error) - GetNodeVersionInfo(ctx context.Context) (*access.NodeVersionInfo, error) + GetProtocolStateSnapshotByBlockID(ctx context.Context, blockID flow.Identifier) ([]byte, error) + GetProtocolStateSnapshotByHeight(ctx context.Context, blockHeight uint64) ([]byte, error) + GetNodeVersionInfo(ctx context.Context) (*accessmodel.NodeVersionInfo, error) } type API interface { diff --git a/engine/protocol/api_test.go b/engine/protocol/api_test.go index e2b7234eb42..42f1f23f45c 100644 --- a/engine/protocol/api_test.go +++ b/engine/protocol/api_test.go @@ -2,9 +2,7 @@ package protocol import ( "context" - "math/rand" "testing" - "time" "github.com/stretchr/testify/suite" @@ -37,7 +35,6 @@ func TestHandler(t *testing.T) { } func (suite *Suite) SetupTest() { - rand.Seed(time.Now().UnixNano()) suite.snapshot = new(protocol.Snapshot) suite.state = new(protocol.State) @@ -51,7 +48,7 @@ func (suite *Suite) TestGetLatestFinalizedBlock_Success() { // setup the mocks block := unittest.BlockFixture() - header := block.Header + header := block.ToHeader() suite.snapshot. On("Head"). @@ -60,7 +57,7 @@ func (suite *Suite) TestGetLatestFinalizedBlock_Success() { suite.blocks. On("ByID", header.ID()). - Return(&block, nil). + Return(block, nil). Once() backend := New(suite.state, suite.blocks, suite.headers, nil) @@ -70,7 +67,7 @@ func (suite *Suite) TestGetLatestFinalizedBlock_Success() { suite.checkResponse(responseBlock, err) // make sure we got the latest block - suite.Require().Equal(block, *responseBlock) + suite.Require().Equal(block, responseBlock) suite.assertAllExpectations() } @@ -80,7 +77,7 @@ func (suite *Suite) TestGetLatestSealedBlock_Success() { // setup the mocks block := unittest.BlockFixture() - header := block.Header + header := block.ToHeader() suite.snapshot. On("Head"). @@ -89,7 +86,7 @@ func (suite *Suite) TestGetLatestSealedBlock_Success() { suite.blocks. On("ByID", header.ID()). - Return(&block, nil). + Return(block, nil). Once() backend := New(suite.state, suite.blocks, suite.headers, nil) @@ -99,7 +96,7 @@ func (suite *Suite) TestGetLatestSealedBlock_Success() { suite.checkResponse(responseBlock, err) // make sure we got the latest block - suite.Require().Equal(block, *responseBlock) + suite.Require().Equal(block, responseBlock) suite.assertAllExpectations() } @@ -109,7 +106,7 @@ func (suite *Suite) TestGetLatestBlock_StorageNotFoundFailure() { // setup the mocks block := unittest.BlockFixture() - header := block.Header + header := block.ToHeader() suite.snapshot. On("Head"). @@ -131,7 +128,7 @@ func (suite *Suite) TestGetLatestBlock_CodesNotFoundFailure() { // setup the mocks block := unittest.BlockFixture() - header := block.Header + header := block.ToHeader() suite.snapshot. On("Head"). @@ -153,7 +150,7 @@ func (suite *Suite) TestGetLatestBlock_InternalFailure() { // setup the mocks block := unittest.BlockFixture() - header := block.Header + header := block.ToHeader() suite.snapshot. On("Head"). @@ -176,7 +173,7 @@ func (suite *Suite) TestGetBlockById_Success() { suite.blocks. On("ByID", block.ID()). - Return(&block, nil). + Return(block, nil). Once() backend := New(suite.state, suite.blocks, suite.headers, nil) @@ -197,7 +194,7 @@ func (suite *Suite) TestGetBlockById_StorageNotFoundFailure() { suite.blocks. On("ByID", block.ID()). - Return(&block, storage.ErrNotFound). + Return(block, storage.ErrNotFound). Once() backend := New(suite.state, suite.blocks, suite.headers, nil) @@ -216,7 +213,7 @@ func (suite *Suite) TestGetBlockById_CodesNotFoundFailure() { suite.blocks. On("ByID", block.ID()). - Return(&block, CodesNotFoundErr). + Return(block, CodesNotFoundErr). Once() backend := New(suite.state, suite.blocks, suite.headers, nil) @@ -235,7 +232,7 @@ func (suite *Suite) TestGetBlockById_InternalFailure() { suite.blocks. On("ByID", block.ID()). - Return(&block, InternalErr). + Return(block, InternalErr). Once() backend := New(suite.state, suite.blocks, suite.headers, nil) @@ -251,11 +248,11 @@ func (suite *Suite) TestGetBlockById_InternalFailure() { func (suite *Suite) TestGetBlockByHeight_Success() { // setup the mocks block := unittest.BlockFixture() - height := block.Header.Height + height := block.Height suite.blocks. On("ByHeight", height). - Return(&block, nil). + Return(block, nil). Once() backend := New(suite.state, suite.blocks, suite.headers, nil) @@ -273,11 +270,11 @@ func (suite *Suite) TestGetBlockByHeight_Success() { func (suite *Suite) TestGetBlockByHeight_StorageNotFoundFailure() { // setup the mocks block := unittest.BlockFixture() - height := block.Header.Height + height := block.Height suite.blocks. On("ByHeight", height). - Return(&block, StorageNotFoundErr). + Return(block, StorageNotFoundErr). Once() backend := New(suite.state, suite.blocks, suite.headers, nil) @@ -293,11 +290,11 @@ func (suite *Suite) TestGetBlockByHeight_StorageNotFoundFailure() { func (suite *Suite) TestGetBlockByHeight_CodesNotFoundFailure() { // setup the mocks block := unittest.BlockFixture() - height := block.Header.Height + height := block.Height suite.blocks. On("ByHeight", height). - Return(&block, CodesNotFoundErr). + Return(block, CodesNotFoundErr). Once() backend := New(suite.state, suite.blocks, suite.headers, nil) @@ -313,11 +310,11 @@ func (suite *Suite) TestGetBlockByHeight_CodesNotFoundFailure() { func (suite *Suite) TestGetBlockByHeight_InternalFailure() { // setup the mocks block := unittest.BlockFixture() - height := block.Header.Height + height := block.Height suite.blocks. On("ByHeight", height). - Return(&block, InternalErr). + Return(block, InternalErr). Once() backend := New(suite.state, suite.blocks, suite.headers, nil) @@ -426,7 +423,7 @@ func (suite *Suite) TestGetLatestBlockHeader_InternalFailure() { func (suite *Suite) TestGetBlockHeaderByID_Success() { // setup the mocks block := unittest.BlockFixture() - header := block.Header + header := block.ToHeader() suite.headers. On("ByBlockID", block.ID()). @@ -451,7 +448,7 @@ func (suite *Suite) TestGetBlockHeaderByID_Success() { func (suite *Suite) TestGetBlockHeaderByID_StorageNotFoundFailure() { // setup the mocks block := unittest.BlockFixture() - header := block.Header + header := block.ToHeader() suite.headers. On("ByBlockID", block.ID()). @@ -471,7 +468,7 @@ func (suite *Suite) TestGetBlockHeaderByID_StorageNotFoundFailure() { func (suite *Suite) TestGetBlockHeaderByID_CodesNotFoundFailure() { // setup the mocks block := unittest.BlockFixture() - header := block.Header + header := block.ToHeader() suite.headers. On("ByBlockID", block.ID()). @@ -491,7 +488,7 @@ func (suite *Suite) TestGetBlockHeaderByID_CodesNotFoundFailure() { func (suite *Suite) TestGetBlockHeaderByID_InternalFailure() { // setup the mocks block := unittest.BlockFixture() - header := block.Header + header := block.ToHeader() suite.headers. On("ByBlockID", block.ID()). diff --git a/engine/protocol/handler.go b/engine/protocol/handler.go index ef77ad70e43..6448d576b52 100644 --- a/engine/protocol/handler.go +++ b/engine/protocol/handler.go @@ -50,7 +50,7 @@ func (h *Handler) GetNetworkParameters( func (h *Handler) GetNodeVersionInfo( ctx context.Context, - request *access.GetNodeVersionInfoRequest, + _ *access.GetNodeVersionInfoRequest, ) (*access.GetNodeVersionInfoResponse, error) { nodeVersionInfo, err := h.api.GetNodeVersionInfo(ctx) if err != nil { @@ -59,10 +59,11 @@ func (h *Handler) GetNodeVersionInfo( return &access.GetNodeVersionInfoResponse{ Info: &entities.NodeVersionInfo{ - Semver: nodeVersionInfo.Semver, - Commit: nodeVersionInfo.Commit, - SporkId: nodeVersionInfo.SporkId[:], - ProtocolVersion: nodeVersionInfo.ProtocolVersion, + Semver: nodeVersionInfo.Semver, + Commit: nodeVersionInfo.Commit, + SporkId: nodeVersionInfo.SporkId[:], + ProtocolVersion: nodeVersionInfo.ProtocolVersion, + ProtocolStateVersion: nodeVersionInfo.ProtocolStateVersion, }, }, nil } @@ -79,6 +80,31 @@ func (h *Handler) GetLatestProtocolStateSnapshot(ctx context.Context, req *acces }, nil } +// GetProtocolStateSnapshotByBlockID returns serializable Snapshot by blockID +func (h *Handler) GetProtocolStateSnapshotByBlockID(ctx context.Context, req *access.GetProtocolStateSnapshotByBlockIDRequest) (*access.ProtocolStateSnapshotResponse, error) { + blockID := convert.MessageToIdentifier(req.GetBlockId()) + snapshot, err := h.api.GetProtocolStateSnapshotByBlockID(ctx, blockID) + if err != nil { + return nil, err + } + + return &access.ProtocolStateSnapshotResponse{ + SerializedSnapshot: snapshot, + }, nil +} + +// GetProtocolStateSnapshotByHeight returns serializable Snapshot by block height +func (h *Handler) GetProtocolStateSnapshotByHeight(ctx context.Context, req *access.GetProtocolStateSnapshotByHeightRequest) (*access.ProtocolStateSnapshotResponse, error) { + snapshot, err := h.api.GetProtocolStateSnapshotByHeight(ctx, req.GetBlockHeight()) + if err != nil { + return nil, err + } + + return &access.ProtocolStateSnapshotResponse{ + SerializedSnapshot: snapshot, + }, nil +} + // GetLatestBlockHeader gets the latest sealed block header. func (h *Handler) GetLatestBlockHeader( ctx context.Context, @@ -160,7 +186,7 @@ func (h *Handler) GetBlockByID( } func (h *Handler) blockResponse(block *flow.Block, fullResponse bool) (*access.BlockResponse, error) { - signerIDs, err := h.signerIndicesDecoder.DecodeSignerIDs(block.Header) + signerIDs, err := h.signerIndicesDecoder.DecodeSignerIDs(block.ToHeader()) if err != nil { return nil, err // the block was retrieved from local storage - so no errors are expected } diff --git a/engine/protocol/mock/api.go b/engine/protocol/mock/api.go index 6ece771befd..0597385edef 100644 --- a/engine/protocol/mock/api.go +++ b/engine/protocol/mock/api.go @@ -1,11 +1,11 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mock import ( context "context" - access "github.com/onflow/flow-go/access" + access "github.com/onflow/flow-go/model/access" flow "github.com/onflow/flow-go/model/flow" @@ -21,6 +21,10 @@ type API struct { func (_m *API) GetBlockByHeight(ctx context.Context, height uint64) (*flow.Block, error) { ret := _m.Called(ctx, height) + if len(ret) == 0 { + panic("no return value specified for GetBlockByHeight") + } + var r0 *flow.Block var r1 error if rf, ok := ret.Get(0).(func(context.Context, uint64) (*flow.Block, error)); ok { @@ -47,6 +51,10 @@ func (_m *API) GetBlockByHeight(ctx context.Context, height uint64) (*flow.Block func (_m *API) GetBlockByID(ctx context.Context, id flow.Identifier) (*flow.Block, error) { ret := _m.Called(ctx, id) + if len(ret) == 0 { + panic("no return value specified for GetBlockByID") + } + var r0 *flow.Block var r1 error if rf, ok := ret.Get(0).(func(context.Context, flow.Identifier) (*flow.Block, error)); ok { @@ -73,6 +81,10 @@ func (_m *API) GetBlockByID(ctx context.Context, id flow.Identifier) (*flow.Bloc func (_m *API) GetBlockHeaderByHeight(ctx context.Context, height uint64) (*flow.Header, error) { ret := _m.Called(ctx, height) + if len(ret) == 0 { + panic("no return value specified for GetBlockHeaderByHeight") + } + var r0 *flow.Header var r1 error if rf, ok := ret.Get(0).(func(context.Context, uint64) (*flow.Header, error)); ok { @@ -99,6 +111,10 @@ func (_m *API) GetBlockHeaderByHeight(ctx context.Context, height uint64) (*flow func (_m *API) GetBlockHeaderByID(ctx context.Context, id flow.Identifier) (*flow.Header, error) { ret := _m.Called(ctx, id) + if len(ret) == 0 { + panic("no return value specified for GetBlockHeaderByID") + } + var r0 *flow.Header var r1 error if rf, ok := ret.Get(0).(func(context.Context, flow.Identifier) (*flow.Header, error)); ok { @@ -125,6 +141,10 @@ func (_m *API) GetBlockHeaderByID(ctx context.Context, id flow.Identifier) (*flo func (_m *API) GetLatestBlock(ctx context.Context, isSealed bool) (*flow.Block, error) { ret := _m.Called(ctx, isSealed) + if len(ret) == 0 { + panic("no return value specified for GetLatestBlock") + } + var r0 *flow.Block var r1 error if rf, ok := ret.Get(0).(func(context.Context, bool) (*flow.Block, error)); ok { @@ -151,6 +171,10 @@ func (_m *API) GetLatestBlock(ctx context.Context, isSealed bool) (*flow.Block, func (_m *API) GetLatestBlockHeader(ctx context.Context, isSealed bool) (*flow.Header, error) { ret := _m.Called(ctx, isSealed) + if len(ret) == 0 { + panic("no return value specified for GetLatestBlockHeader") + } + var r0 *flow.Header var r1 error if rf, ok := ret.Get(0).(func(context.Context, bool) (*flow.Header, error)); ok { @@ -177,6 +201,10 @@ func (_m *API) GetLatestBlockHeader(ctx context.Context, isSealed bool) (*flow.H func (_m *API) GetLatestProtocolStateSnapshot(ctx context.Context) ([]byte, error) { ret := _m.Called(ctx) + if len(ret) == 0 { + panic("no return value specified for GetLatestProtocolStateSnapshot") + } + var r0 []byte var r1 error if rf, ok := ret.Get(0).(func(context.Context) ([]byte, error)); ok { @@ -203,6 +231,10 @@ func (_m *API) GetLatestProtocolStateSnapshot(ctx context.Context) ([]byte, erro func (_m *API) GetNetworkParameters(ctx context.Context) access.NetworkParameters { ret := _m.Called(ctx) + if len(ret) == 0 { + panic("no return value specified for GetNetworkParameters") + } + var r0 access.NetworkParameters if rf, ok := ret.Get(0).(func(context.Context) access.NetworkParameters); ok { r0 = rf(ctx) @@ -217,6 +249,10 @@ func (_m *API) GetNetworkParameters(ctx context.Context) access.NetworkParameter func (_m *API) GetNodeVersionInfo(ctx context.Context) (*access.NodeVersionInfo, error) { ret := _m.Called(ctx) + if len(ret) == 0 { + panic("no return value specified for GetNodeVersionInfo") + } + var r0 *access.NodeVersionInfo var r1 error if rf, ok := ret.Get(0).(func(context.Context) (*access.NodeVersionInfo, error)); ok { @@ -239,13 +275,72 @@ func (_m *API) GetNodeVersionInfo(ctx context.Context) (*access.NodeVersionInfo, return r0, r1 } -type mockConstructorTestingTNewAPI interface { - mock.TestingT - Cleanup(func()) +// GetProtocolStateSnapshotByBlockID provides a mock function with given fields: ctx, blockID +func (_m *API) GetProtocolStateSnapshotByBlockID(ctx context.Context, blockID flow.Identifier) ([]byte, error) { + ret := _m.Called(ctx, blockID) + + if len(ret) == 0 { + panic("no return value specified for GetProtocolStateSnapshotByBlockID") + } + + var r0 []byte + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, flow.Identifier) ([]byte, error)); ok { + return rf(ctx, blockID) + } + if rf, ok := ret.Get(0).(func(context.Context, flow.Identifier) []byte); ok { + r0 = rf(ctx, blockID) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]byte) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, flow.Identifier) error); ok { + r1 = rf(ctx, blockID) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetProtocolStateSnapshotByHeight provides a mock function with given fields: ctx, blockHeight +func (_m *API) GetProtocolStateSnapshotByHeight(ctx context.Context, blockHeight uint64) ([]byte, error) { + ret := _m.Called(ctx, blockHeight) + + if len(ret) == 0 { + panic("no return value specified for GetProtocolStateSnapshotByHeight") + } + + var r0 []byte + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, uint64) ([]byte, error)); ok { + return rf(ctx, blockHeight) + } + if rf, ok := ret.Get(0).(func(context.Context, uint64) []byte); ok { + r0 = rf(ctx, blockHeight) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]byte) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, uint64) error); ok { + r1 = rf(ctx, blockHeight) + } else { + r1 = ret.Error(1) + } + + return r0, r1 } // NewAPI creates a new instance of API. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewAPI(t mockConstructorTestingTNewAPI) *API { +// The first argument is typically a *testing.T value. +func NewAPI(t interface { + mock.TestingT + Cleanup(func()) +}) *API { mock := &API{} mock.Mock.Test(t) diff --git a/engine/protocol/mock/network_api.go b/engine/protocol/mock/network_api.go new file mode 100644 index 00000000000..c2b907377fa --- /dev/null +++ b/engine/protocol/mock/network_api.go @@ -0,0 +1,170 @@ +// Code generated by mockery. DO NOT EDIT. + +package mock + +import ( + context "context" + + access "github.com/onflow/flow-go/model/access" + + flow "github.com/onflow/flow-go/model/flow" + + mock "github.com/stretchr/testify/mock" +) + +// NetworkAPI is an autogenerated mock type for the NetworkAPI type +type NetworkAPI struct { + mock.Mock +} + +// GetLatestProtocolStateSnapshot provides a mock function with given fields: ctx +func (_m *NetworkAPI) GetLatestProtocolStateSnapshot(ctx context.Context) ([]byte, error) { + ret := _m.Called(ctx) + + if len(ret) == 0 { + panic("no return value specified for GetLatestProtocolStateSnapshot") + } + + var r0 []byte + var r1 error + if rf, ok := ret.Get(0).(func(context.Context) ([]byte, error)); ok { + return rf(ctx) + } + if rf, ok := ret.Get(0).(func(context.Context) []byte); ok { + r0 = rf(ctx) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]byte) + } + } + + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(ctx) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetNetworkParameters provides a mock function with given fields: ctx +func (_m *NetworkAPI) GetNetworkParameters(ctx context.Context) access.NetworkParameters { + ret := _m.Called(ctx) + + if len(ret) == 0 { + panic("no return value specified for GetNetworkParameters") + } + + var r0 access.NetworkParameters + if rf, ok := ret.Get(0).(func(context.Context) access.NetworkParameters); ok { + r0 = rf(ctx) + } else { + r0 = ret.Get(0).(access.NetworkParameters) + } + + return r0 +} + +// GetNodeVersionInfo provides a mock function with given fields: ctx +func (_m *NetworkAPI) GetNodeVersionInfo(ctx context.Context) (*access.NodeVersionInfo, error) { + ret := _m.Called(ctx) + + if len(ret) == 0 { + panic("no return value specified for GetNodeVersionInfo") + } + + var r0 *access.NodeVersionInfo + var r1 error + if rf, ok := ret.Get(0).(func(context.Context) (*access.NodeVersionInfo, error)); ok { + return rf(ctx) + } + if rf, ok := ret.Get(0).(func(context.Context) *access.NodeVersionInfo); ok { + r0 = rf(ctx) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*access.NodeVersionInfo) + } + } + + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(ctx) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetProtocolStateSnapshotByBlockID provides a mock function with given fields: ctx, blockID +func (_m *NetworkAPI) GetProtocolStateSnapshotByBlockID(ctx context.Context, blockID flow.Identifier) ([]byte, error) { + ret := _m.Called(ctx, blockID) + + if len(ret) == 0 { + panic("no return value specified for GetProtocolStateSnapshotByBlockID") + } + + var r0 []byte + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, flow.Identifier) ([]byte, error)); ok { + return rf(ctx, blockID) + } + if rf, ok := ret.Get(0).(func(context.Context, flow.Identifier) []byte); ok { + r0 = rf(ctx, blockID) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]byte) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, flow.Identifier) error); ok { + r1 = rf(ctx, blockID) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetProtocolStateSnapshotByHeight provides a mock function with given fields: ctx, blockHeight +func (_m *NetworkAPI) GetProtocolStateSnapshotByHeight(ctx context.Context, blockHeight uint64) ([]byte, error) { + ret := _m.Called(ctx, blockHeight) + + if len(ret) == 0 { + panic("no return value specified for GetProtocolStateSnapshotByHeight") + } + + var r0 []byte + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, uint64) ([]byte, error)); ok { + return rf(ctx, blockHeight) + } + if rf, ok := ret.Get(0).(func(context.Context, uint64) []byte); ok { + r0 = rf(ctx, blockHeight) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]byte) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, uint64) error); ok { + r1 = rf(ctx, blockHeight) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// NewNetworkAPI creates a new instance of NetworkAPI. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewNetworkAPI(t interface { + mock.TestingT + Cleanup(func()) +}) *NetworkAPI { + mock := &NetworkAPI{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/engine/testutil/mock/nodes.go b/engine/testutil/mock/nodes.go index fc3aa000746..4754f1f5081 100644 --- a/engine/testutil/mock/nodes.go +++ b/engine/testutil/mock/nodes.go @@ -8,6 +8,7 @@ import ( "time" "github.com/dgraph-io/badger/v2" + "github.com/jordanschalm/lockctx" "github.com/rs/zerolog" "github.com/stretchr/testify/require" @@ -22,7 +23,7 @@ import ( "github.com/onflow/flow-go/engine/consensus/matching" "github.com/onflow/flow-go/engine/consensus/sealing" "github.com/onflow/flow-go/engine/execution/computation" - "github.com/onflow/flow-go/engine/execution/ingestion" + executionIngest "github.com/onflow/flow-go/engine/execution/ingestion" executionprovider "github.com/onflow/flow-go/engine/execution/provider" "github.com/onflow/flow-go/engine/execution/state" "github.com/onflow/flow-go/engine/verification/assigner" @@ -46,7 +47,7 @@ import ( "github.com/onflow/flow-go/state/protocol" "github.com/onflow/flow-go/state/protocol/events" "github.com/onflow/flow-go/storage" - bstorage "github.com/onflow/flow-go/storage/badger" + "github.com/onflow/flow-go/storage/store" "github.com/onflow/flow-go/utils/unittest" ) @@ -54,11 +55,12 @@ import ( // as well as all of its backend dependencies. type StateFixture struct { DBDir string - PublicDB *badger.DB + PublicDB storage.DB SecretsDB *badger.DB - Storage *storage.All + Storage *store.All ProtocolEvents *events.Distributor State protocol.ParticipantState + LockManager lockctx.Manager } // GenericNode implements a generic in-process node for tests. @@ -71,14 +73,20 @@ type GenericNode struct { Log zerolog.Logger Metrics *metrics.NoopCollector Tracer module.Tracer - PublicDB *badger.DB + PublicDB storage.DB SecretsDB *badger.DB + LockManager lockctx.Manager Headers storage.Headers Guarantees storage.Guarantees Seals storage.Seals Payloads storage.Payloads Blocks storage.Blocks QuorumCertificates storage.QuorumCertificates + Results storage.ExecutionResults + Setups storage.EpochSetups + EpochCommits storage.EpochCommits + EpochProtocolState storage.EpochProtocolStateEntries + ProtocolKVStore storage.ProtocolKVStore State protocol.ParticipantState Index storage.Index Me module.Local @@ -135,6 +143,7 @@ func (n CollectionNode) Start(t *testing.T) { n.IngestionEngine.Start(n.Ctx) n.EpochManagerEngine.Start(n.Ctx) n.ProviderEngine.Start(n.Ctx) + n.PusherEngine.Start(n.Ctx) } func (n CollectionNode) Ready() <-chan struct{} { @@ -173,21 +182,38 @@ type ConsensusNode struct { MatchingEngine *matching.Engine } -func (cn ConsensusNode) Ready() { - <-cn.IngestionEngine.Ready() - <-cn.SealingEngine.Ready() +func (cn ConsensusNode) Start(t *testing.T) { + go unittest.FailOnIrrecoverableError(t, cn.Ctx.Done(), cn.Errs) + cn.IngestionEngine.Start(cn.Ctx) + cn.SealingEngine.Start(cn.Ctx) } -func (cn ConsensusNode) Done() { - <-cn.IngestionEngine.Done() - <-cn.SealingEngine.Done() +func (cn ConsensusNode) Ready() <-chan struct{} { + return util.AllReady( + cn.IngestionEngine, + cn.SealingEngine, + ) +} + +func (cn ConsensusNode) Done() <-chan struct{} { + done := make(chan struct{}) + go func() { + cn.GenericNode.Cancel() + <-util.AllDone( + cn.IngestionEngine, + cn.SealingEngine, + ) + cn.GenericNode.Done() + close(done) + }() + return done } // ExecutionNode implements a mocked execution node for tests. type ExecutionNode struct { GenericNode FollowerState protocol.FollowerState - IngestionEngine *ingestion.Engine + IngestionEngine *executionIngest.Core ExecutionEngine *computation.Manager RequestEngine *requester.Engine ReceiptsEngine *executionprovider.Engine @@ -195,7 +221,7 @@ type ExecutionNode struct { FollowerEngine *followereng.ComplianceEngine SyncEngine *synchronization.Engine Compactor *complete.Compactor - BadgerDB *badger.DB + ProtocolDB storage.DB VM fvm.VM ExecutionState state.ExecutionState Ledger ledger.Ledger @@ -203,16 +229,19 @@ type ExecutionNode struct { Collections storage.Collections Finalizer *consensus.Finalizer MyExecutionReceipts storage.MyExecutionReceipts + StorehouseEnabled bool } -func (en ExecutionNode) Ready(ctx context.Context) { +func (en ExecutionNode) Ready(t *testing.T, ctx context.Context) { // TODO: receipt engine has been migrated to the new component interface, hence // is using Start. Other engines' startup should be refactored once migrated to // new interface. - irctx, _ := irrecoverable.WithSignaler(ctx) + irctx := irrecoverable.NewMockSignalerContext(t, ctx) en.ReceiptsEngine.Start(irctx) + en.IngestionEngine.Start(irctx) en.FollowerCore.Start(irctx) en.FollowerEngine.Start(irctx) + en.SyncEngine.Start(irctx) <-util.AllReady( en.Ledger, @@ -231,7 +260,6 @@ func (en ExecutionNode) Done(cancelFunc context.CancelFunc) { // to stop all (deprecated) ready-done-aware <-util.AllDone( - en.IngestionEngine, en.IngestionEngine, en.ReceiptsEngine, en.Ledger, @@ -246,14 +274,25 @@ func (en ExecutionNode) Done(cancelFunc context.CancelFunc) { } func (en ExecutionNode) AssertHighestExecutedBlock(t *testing.T, header *flow.Header) { - - height, blockID, err := en.ExecutionState.GetHighestExecutedBlockID(context.Background()) + height, blockID, err := en.ExecutionState.GetLastExecutedBlockID(context.Background()) require.NoError(t, err) require.Equal(t, header.ID(), blockID) require.Equal(t, header.Height, height) } +func (en ExecutionNode) AssertBlockIsExecuted(t *testing.T, header *flow.Header) { + executed, err := en.ExecutionState.IsBlockExecuted(header.Height, header.ID()) + require.NoError(t, err) + require.True(t, executed) +} + +func (en ExecutionNode) AssertBlockNotExecuted(t *testing.T, header *flow.Header) { + executed, err := en.ExecutionState.IsBlockExecuted(header.Height, header.ID()) + require.NoError(t, err) + require.False(t, executed) +} + // VerificationNode implements an in-process verification node for tests. type VerificationNode struct { *GenericNode @@ -263,12 +302,12 @@ type VerificationNode struct { Receipts storage.ExecutionReceipts // chunk consumer and processor for fetcher engine - ProcessedChunkIndex storage.ConsumerProgress - ChunksQueue *bstorage.ChunksQueue + ProcessedChunkIndex storage.ConsumerProgressInitializer + ChunksQueue storage.ChunksQueue ChunkConsumer *chunkconsumer.ChunkConsumer // block consumer for chunk consumer - ProcessedBlockHeight storage.ConsumerProgress + ProcessedBlockHeight storage.ConsumerProgressInitializer BlockConsumer *blockconsumer.BlockConsumer VerifierEngine *verifier.Engine diff --git a/engine/testutil/mocklocal/local.go b/engine/testutil/mocklocal/local.go index a4a819797b5..377cd0767eb 100644 --- a/engine/testutil/mocklocal/local.go +++ b/engine/testutil/mocklocal/local.go @@ -1,11 +1,11 @@ package mocklocal import ( + "github.com/onflow/crypto" + "github.com/onflow/crypto/hash" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" - "github.com/onflow/flow-go/crypto" - "github.com/onflow/flow-go/crypto/hash" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/model/flow/filter" ) @@ -44,8 +44,8 @@ func (m *MockLocal) MockNodeID(id flow.Identifier) { m.id = id } -func (m *MockLocal) NotMeFilter() flow.IdentityFilter { - return filter.Not(filter.HasNodeID(m.id)) +func (m *MockLocal) NotMeFilter() flow.IdentityFilter[flow.Identity] { + return filter.Not(filter.HasNodeID[flow.Identity](m.id)) } func (m *MockLocal) SignFunc(data []byte, hasher hash.Hasher, f func(crypto.PrivateKey, []byte, hash.Hasher) (crypto.Signature, diff --git a/engine/testutil/nodes.go b/engine/testutil/nodes.go index cb9c0d700e9..c0f969b43c9 100644 --- a/engine/testutil/nodes.go +++ b/engine/testutil/nodes.go @@ -2,21 +2,24 @@ package testutil import ( "context" - "encoding/json" "fmt" "math" "path/filepath" "testing" "time" + "github.com/cockroachdb/pebble/v2" + "github.com/coreos/go-semver/semver" + "github.com/ipfs/boxo/blockstore" "github.com/ipfs/go-datastore" dssync "github.com/ipfs/go-datastore/sync" - blockstore "github.com/ipfs/go-ipfs-blockstore" "github.com/rs/zerolog" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" "go.uber.org/atomic" + "golang.org/x/time/rate" + "github.com/onflow/flow-go/cmd/build" "github.com/onflow/flow-go/consensus" "github.com/onflow/flow-go/consensus/hotstuff" "github.com/onflow/flow-go/consensus/hotstuff/committees" @@ -24,10 +27,12 @@ import ( "github.com/onflow/flow-go/consensus/hotstuff/model" "github.com/onflow/flow-go/consensus/hotstuff/notifications" "github.com/onflow/flow-go/consensus/hotstuff/notifications/pubsub" - "github.com/onflow/flow-go/crypto" + "github.com/onflow/flow-go/engine" "github.com/onflow/flow-go/engine/collection/epochmgr" "github.com/onflow/flow-go/engine/collection/epochmgr/factories" + "github.com/onflow/flow-go/engine/collection/ingest" collectioningest "github.com/onflow/flow-go/engine/collection/ingest" + mockcollection "github.com/onflow/flow-go/engine/collection/mock" "github.com/onflow/flow-go/engine/collection/pusher" "github.com/onflow/flow-go/engine/common/follower" "github.com/onflow/flow-go/engine/common/provider" @@ -41,10 +46,14 @@ import ( "github.com/onflow/flow-go/engine/execution/computation/committer" "github.com/onflow/flow-go/engine/execution/computation/query" "github.com/onflow/flow-go/engine/execution/ingestion" + exeFetcher "github.com/onflow/flow-go/engine/execution/ingestion/fetcher" + "github.com/onflow/flow-go/engine/execution/ingestion/stop" "github.com/onflow/flow-go/engine/execution/ingestion/uploader" executionprovider "github.com/onflow/flow-go/engine/execution/provider" executionState "github.com/onflow/flow-go/engine/execution/state" bootstrapexec "github.com/onflow/flow-go/engine/execution/state/bootstrap" + esbootstrap "github.com/onflow/flow-go/engine/execution/state/bootstrap" + "github.com/onflow/flow-go/engine/execution/storehouse" testmock "github.com/onflow/flow-go/engine/testutil/mock" verificationassigner "github.com/onflow/flow-go/engine/verification/assigner" "github.com/onflow/flow-go/engine/verification/assigner/blockconsumer" @@ -57,6 +66,7 @@ import ( "github.com/onflow/flow-go/fvm/storage/derived" "github.com/onflow/flow-go/ledger/common/pathfinder" completeLedger "github.com/onflow/flow-go/ledger/complete" + "github.com/onflow/flow-go/ledger/complete/mtrie/trie" "github.com/onflow/flow-go/ledger/complete/wal" "github.com/onflow/flow-go/model/bootstrap" "github.com/onflow/flow-go/model/flow" @@ -64,9 +74,11 @@ import ( "github.com/onflow/flow-go/module" "github.com/onflow/flow-go/module/chainsync" "github.com/onflow/flow-go/module/chunks" + "github.com/onflow/flow-go/module/compliance" "github.com/onflow/flow-go/module/executiondatasync/execution_data" exedataprovider "github.com/onflow/flow-go/module/executiondatasync/provider" mocktracker "github.com/onflow/flow-go/module/executiondatasync/tracker/mock" + "github.com/onflow/flow-go/module/finalizedreader" confinalizer "github.com/onflow/flow-go/module/finalizer/consensus" "github.com/onflow/flow-go/module/id" "github.com/onflow/flow-go/module/irrecoverable" @@ -82,6 +94,7 @@ import ( "github.com/onflow/flow-go/module/signature" requesterunit "github.com/onflow/flow-go/module/state_synchronization/requester/unittest" "github.com/onflow/flow-go/module/trace" + "github.com/onflow/flow-go/module/updatable_configs" "github.com/onflow/flow-go/module/validation" "github.com/onflow/flow-go/network/channels" "github.com/onflow/flow-go/network/p2p/cache" @@ -92,7 +105,11 @@ import ( "github.com/onflow/flow-go/state/protocol/events" "github.com/onflow/flow-go/state/protocol/events/gadgets" "github.com/onflow/flow-go/state/protocol/util" - storage "github.com/onflow/flow-go/storage/badger" + "github.com/onflow/flow-go/storage" + storagebadger "github.com/onflow/flow-go/storage/badger" + "github.com/onflow/flow-go/storage/operation/pebbleimpl" + storagepebble "github.com/onflow/flow-go/storage/pebble" + "github.com/onflow/flow-go/storage/store" "github.com/onflow/flow-go/utils/unittest" ) @@ -102,7 +119,7 @@ import ( // // CAUTION: Please use GenericNode instead for most use-cases so that multiple nodes // may share the same root state snapshot. -func GenericNodeFromParticipants(t testing.TB, hub *stub.Hub, identity *flow.Identity, participants []*flow.Identity, chainID flow.ChainID, +func GenericNodeFromParticipants(t testing.TB, hub *stub.Hub, identity bootstrap.NodeInfo, participants []*flow.Identity, chainID flow.ChainID, options ...func(protocol.State)) testmock.GenericNode { var i int var participant *flow.Identity @@ -119,7 +136,7 @@ func GenericNodeFromParticipants(t testing.TB, hub *stub.Hub, identity *flow.Ide metrics := metrics.NewNoopCollector() // creates state fixture and bootstrap it. - rootSnapshot := unittest.RootSnapshotFixture(participants) + rootSnapshot := unittest.RootSnapshotFixtureWithChainID(participants, chainID) stateFixture := CompleteStateFixture(t, log, metrics, tracer, rootSnapshot) require.NoError(t, err) @@ -136,7 +153,7 @@ func GenericNodeFromParticipants(t testing.TB, hub *stub.Hub, identity *flow.Ide func GenericNode( t testing.TB, hub *stub.Hub, - identity *flow.Identity, + identity bootstrap.NodeInfo, root protocol.Snapshot, ) testmock.GenericNode { @@ -159,13 +176,17 @@ func GenericNode( func GenericNodeWithStateFixture(t testing.TB, stateFixture *testmock.StateFixture, hub *stub.Hub, - identity *flow.Identity, + bootstrapInfo bootstrap.NodeInfo, log zerolog.Logger, metrics *metrics.NoopCollector, tracer module.Tracer, chainID flow.ChainID) testmock.GenericNode { - me := LocalFixture(t, identity) + identity := bootstrapInfo.Identity() + privateKeys, err := bootstrapInfo.PrivateKeys() + require.NoError(t, err) + me, err := local.New(identity.IdentitySkeleton, privateKeys.StakingKey) + require.NoError(t, err) net := stub.NewNetwork(t, identity.NodeID, hub) parentCtx, cancel := context.WithCancel(context.Background()) @@ -180,13 +201,20 @@ func GenericNodeWithStateFixture(t testing.TB, Tracer: tracer, PublicDB: stateFixture.PublicDB, SecretsDB: stateFixture.SecretsDB, - State: stateFixture.State, + LockManager: stateFixture.LockManager, Headers: stateFixture.Storage.Headers, Guarantees: stateFixture.Storage.Guarantees, Seals: stateFixture.Storage.Seals, Payloads: stateFixture.Storage.Payloads, Blocks: stateFixture.Storage.Blocks, QuorumCertificates: stateFixture.Storage.QuorumCertificates, + Results: stateFixture.Storage.Results, + Setups: stateFixture.Storage.EpochSetups, + EpochCommits: stateFixture.Storage.EpochCommits, + EpochProtocolState: stateFixture.Storage.EpochProtocolStateEntries, + ProtocolKVStore: stateFixture.Storage.ProtocolKVStore, + State: stateFixture.State, + Index: stateFixture.Storage.Index, Me: me, Net: net, DBDir: stateFixture.DBDir, @@ -195,28 +223,6 @@ func GenericNodeWithStateFixture(t testing.TB, } } -// LocalFixture creates and returns a Local module for given identity. -func LocalFixture(t testing.TB, identity *flow.Identity) module.Local { - - // Generates test signing oracle for the nodes - // Disclaimer: it should not be used for practical applications - // - // uses identity of node as its seed - seed, err := json.Marshal(identity) - require.NoError(t, err) - // creates signing key of the node - sk, err := crypto.GeneratePrivateKey(crypto.BLSBLS12381, seed[:64]) - require.NoError(t, err) - - // sets staking public key of the node - identity.StakingPubKey = sk.PublicKey() - - me, err := local.New(identity, sk) - require.NoError(t, err) - - return me -} - // CompleteStateFixture is a test helper that creates, bootstraps, and returns a StateFixture for sake of unit testing. func CompleteStateFixture( t testing.TB, @@ -229,22 +235,26 @@ func CompleteStateFixture( dataDir := unittest.TempDir(t) publicDBDir := filepath.Join(dataDir, "protocol") secretsDBDir := filepath.Join(dataDir, "secrets") - db := unittest.TypedBadgerDB(t, publicDBDir, storage.InitPublic) - s := storage.InitAll(metric, db) - secretsDB := unittest.TypedBadgerDB(t, secretsDBDir, storage.InitSecret) + pdb := unittest.TypedPebbleDB(t, publicDBDir, pebble.Open) + db := pebbleimpl.ToDB(pdb) + lockManager := storage.NewTestingLockManager() + s := store.InitAll(metric, db) + secretsDB := unittest.TypedBadgerDB(t, secretsDBDir, storagebadger.InitSecret) consumer := events.NewDistributor() state, err := badgerstate.Bootstrap( metric, db, + lockManager, s.Headers, s.Seals, s.Results, s.Blocks, s.QuorumCertificates, - s.Setups, + s.EpochSetups, s.EpochCommits, - s.Statuses, + s.EpochProtocolStateEntries, + s.ProtocolKVStore, s.VersionBeacons, rootSnapshot, ) @@ -270,30 +280,33 @@ func CompleteStateFixture( DBDir: dataDir, ProtocolEvents: consumer, State: mutableState, + LockManager: lockManager, } } // CollectionNode returns a mock collection node. func CollectionNode(t *testing.T, hub *stub.Hub, identity bootstrap.NodeInfo, rootSnapshot protocol.Snapshot) testmock.CollectionNode { - - node := GenericNode(t, hub, identity.Identity(), rootSnapshot) + node := GenericNode(t, hub, identity, rootSnapshot) privKeys, err := identity.PrivateKeys() require.NoError(t, err) - node.Me, err = local.New(identity.Identity(), privKeys.StakingKey) + node.Me, err = local.New(identity.Identity().IdentitySkeleton, privKeys.StakingKey) require.NoError(t, err) pools := epochs.NewTransactionPools( func(_ uint64) mempool.Transactions { return herocache.NewTransactions(1000, node.Log, metrics.NewNoopCollector()) }) - transactions := storage.NewTransactions(node.Metrics, node.PublicDB) - collections := storage.NewCollections(node.PublicDB, transactions) - clusterPayloads := storage.NewClusterPayloads(node.Metrics, node.PublicDB) - ingestionEngine, err := collectioningest.New(node.Log, node.Net, node.State, node.Metrics, node.Metrics, node.Metrics, node.Me, node.ChainID.Chain(), pools, collectioningest.DefaultConfig()) + db := node.PublicDB + transactions := store.NewTransactions(node.Metrics, db) + collections := store.NewCollections(db, transactions) + clusterPayloads := store.NewClusterPayloads(node.Metrics, db) + + ingestionEngine, err := collectioningest.New(node.Log, node.Net, node.State, node.Metrics, node.Metrics, node.Metrics, node.Me, node.ChainID.Chain(), pools, collectioningest.DefaultConfig(), + ingest.NewAddressRateLimiter(rate.Limit(1), 10)) // 10 tps require.NoError(t, err) - selector := filter.HasRole(flow.RoleAccess, flow.RoleVerification) + selector := filter.HasRole[flow.Identity](flow.RoleAccess, flow.RoleVerification) retrieve := func(collID flow.Identifier) (flow.Entity, error) { coll, err := collections.ByID(collID) return coll, err @@ -311,24 +324,27 @@ func CollectionNode(t *testing.T, hub *stub.Hub, identity bootstrap.NodeInfo, ro retrieve) require.NoError(t, err) - pusherEngine, err := pusher.New(node.Log, node.Net, node.State, node.Metrics, node.Metrics, node.Me, collections, transactions) + pusherEngine, err := pusher.New(node.Log, node.Net, node.State, node.Metrics, node.Metrics, node.Me) require.NoError(t, err) clusterStateFactory, err := factories.NewClusterStateFactory( - node.PublicDB, + db, + node.LockManager, node.Metrics, node.Tracer, ) require.NoError(t, err) builderFactory, err := factories.NewBuilderFactory( - node.PublicDB, + db, node.State, + node.LockManager, node.Headers, node.Tracer, node.Metrics, pusherEngine, node.Log, + updatable_configs.DefaultBySealingLagRateLimiterConfigs(), ) require.NoError(t, err) @@ -338,7 +354,7 @@ func CollectionNode(t *testing.T, hub *stub.Hub, identity bootstrap.NodeInfo, ro node.Me, node.Metrics, node.Metrics, node.Metrics, node.State, - transactions, + compliance.DefaultConfig(), ) require.NoError(t, err) @@ -356,10 +372,12 @@ func CollectionNode(t *testing.T, hub *stub.Hub, identity bootstrap.NodeInfo, ro createMetrics := func(chainID flow.ChainID) module.HotstuffMetrics { return metrics.NewNoopCollector() } + hotstuffFactory, err := factories.NewHotStuffFactory( node.Log, node.Me, - node.PublicDB, + db, + node.LockManager, node.State, node.Metrics, node.Metrics, @@ -390,6 +408,8 @@ func CollectionNode(t *testing.T, hub *stub.Hub, identity bootstrap.NodeInfo, ro rootQCVoter := new(mockmodule.ClusterRootQCVoter) rootQCVoter.On("Vote", mock.Anything, mock.Anything).Return(nil) + engineEventsDistributor := mockcollection.NewEngineEvents(t) + engineEventsDistributor.On("ActiveClustersChanged", mock.AnythingOfType("flow.ChainIDList")).Maybe() heights := gadgets.NewHeights() node.ProtocolEvents.AddConsumer(heights) @@ -401,6 +421,7 @@ func CollectionNode(t *testing.T, hub *stub.Hub, identity bootstrap.NodeInfo, ro rootQCVoter, factory, heights, + engineEventsDistributor, ) require.NoError(t, err) node.ProtocolEvents.AddConsumer(epochManager) @@ -417,15 +438,15 @@ func CollectionNode(t *testing.T, hub *stub.Hub, identity bootstrap.NodeInfo, ro } } -func ConsensusNode(t *testing.T, hub *stub.Hub, identity *flow.Identity, identities []*flow.Identity, chainID flow.ChainID) testmock.ConsensusNode { +func ConsensusNode(t *testing.T, hub *stub.Hub, identity bootstrap.NodeInfo, identities []*flow.Identity, chainID flow.ChainID) testmock.ConsensusNode { node := GenericNodeFromParticipants(t, hub, identity, identities, chainID) - resultsDB := storage.NewExecutionResults(node.Metrics, node.PublicDB) - receiptsDB := storage.NewExecutionReceipts(node.Metrics, node.PublicDB, resultsDB, storage.DefaultCacheSize) + db := node.PublicDB + resultsDB := store.NewExecutionResults(node.Metrics, db) + receiptsDB := store.NewExecutionReceipts(node.Metrics, db, resultsDB, storagebadger.DefaultCacheSize) - guarantees, err := stdmap.NewGuarantees(1000) - require.NoError(t, err) + guarantees := stdmap.NewGuarantees(1000) receipts := consensusMempools.NewExecutionTree() @@ -436,16 +457,22 @@ func ConsensusNode(t *testing.T, hub *stub.Hub, identity *flow.Identity, identit node.Headers, guarantees) // receive collections ingestionEngine, err := consensusingest.New(node.Log, node.Metrics, node.Net, node.Me, ingestionCore) - require.Nil(t, err) + require.NoError(t, err) // request receipts from execution nodes - receiptRequester, err := requester.New(node.Log, node.Metrics, node.Net, node.Me, node.State, channels.RequestReceiptsByBlockID, filter.Any, func() flow.Entity { return &flow.ExecutionReceipt{} }) - require.Nil(t, err) + receiptRequester, err := requester.New(node.Log.With().Str("entity", "receipt").Logger(), node.Metrics, node.Net, node.Me, node.State, channels.RequestReceiptsByBlockID, filter.Any, func() flow.Entity { return new(flow.ExecutionReceipt) }) + require.NoError(t, err) assigner, err := chunks.NewChunkAssigner(flow.DefaultChunkAssignmentAlpha, node.State) - require.Nil(t, err) + require.NoError(t, err) - receiptValidator := validation.NewReceiptValidator(node.State, node.Headers, node.Index, resultsDB, node.Seals) + receiptValidator := validation.NewReceiptValidator( + node.State, + node.Headers, + node.Index, + resultsDB, + node.Seals, + ) sealingEngine, err := sealing.NewEngine( node.Log, @@ -509,42 +536,28 @@ func ConsensusNode(t *testing.T, hub *stub.Hub, identity *flow.Identity, identit } } -func ConsensusNodes(t *testing.T, hub *stub.Hub, nNodes int, chainID flow.ChainID) []testmock.ConsensusNode { - conIdentities := unittest.IdentityListFixture(nNodes, unittest.WithRole(flow.RoleConsensus)) - for _, id := range conIdentities { - t.Log(id.String()) - } - - // add some extra dummy identities so we have one of each role - others := unittest.IdentityListFixture(5, unittest.WithAllRolesExcept(flow.RoleConsensus)) - - identities := append(conIdentities, others...) - - nodes := make([]testmock.ConsensusNode, 0, len(conIdentities)) - for _, identity := range conIdentities { - nodes = append(nodes, ConsensusNode(t, hub, identity, identities, chainID)) - } - - return nodes -} - type CheckerMock struct { notifications.NoopConsumer // satisfy the FinalizationConsumer interface } -func ExecutionNode(t *testing.T, hub *stub.Hub, identity *flow.Identity, identities []*flow.Identity, syncThreshold int, chainID flow.ChainID) testmock.ExecutionNode { +func ExecutionNode(t *testing.T, hub *stub.Hub, identity bootstrap.NodeInfo, identities []*flow.Identity, syncThreshold int, chainID flow.ChainID) testmock.ExecutionNode { node := GenericNodeFromParticipants(t, hub, identity, identities, chainID) - transactionsStorage := storage.NewTransactions(node.Metrics, node.PublicDB) - collectionsStorage := storage.NewCollections(node.PublicDB, transactionsStorage) - eventsStorage := storage.NewEvents(node.Metrics, node.PublicDB) - serviceEventsStorage := storage.NewServiceEvents(node.Metrics, node.PublicDB) - txResultStorage := storage.NewTransactionResults(node.Metrics, node.PublicDB, storage.DefaultCacheSize) - commitsStorage := storage.NewCommits(node.Metrics, node.PublicDB) - chunkDataPackStorage := storage.NewChunkDataPacks(node.Metrics, node.PublicDB, collectionsStorage, 100) - results := storage.NewExecutionResults(node.Metrics, node.PublicDB) - receipts := storage.NewExecutionReceipts(node.Metrics, node.PublicDB, results, storage.DefaultCacheSize) - myReceipts := storage.NewMyExecutionReceipts(node.Metrics, node.PublicDB, receipts) + db := node.PublicDB + transactionsStorage := store.NewTransactions(node.Metrics, db) + collectionsStorage := store.NewCollections(db, transactionsStorage) + eventsStorage := store.NewEvents(node.Metrics, db) + serviceEventsStorage := store.NewServiceEvents(node.Metrics, db) + txResultStorage, err := store.NewTransactionResults(node.Metrics, db, storagebadger.DefaultCacheSize) + require.NoError(t, err) + commitsStorage := store.NewCommits(node.Metrics, db) + chunkDataPackStorage := store.NewChunkDataPacks(node.Metrics, db, collectionsStorage, 100) + results := store.NewExecutionResults(node.Metrics, db) + receipts := store.NewExecutionReceipts(node.Metrics, db, results, storagebadger.DefaultCacheSize) + myReceipts := store.NewMyExecutionReceipts(node.Metrics, db, receipts) + versionBeacons := store.NewVersionBeacons(db) + headersStorage := store.NewHeaders(node.Metrics, db) + checkAuthorizedAtBlock := func(blockID flow.Identifier) (bool, error) { return protocol.IsNodeAuthorizedAt(node.State.AtBlockID(blockID), node.Me.NodeID()) } @@ -575,10 +588,10 @@ func ExecutionNode(t *testing.T, hub *stub.Hub, identity *flow.Identity, identit diskWal, err := wal.NewDiskWAL(node.Log.With().Str("subcomponent", "wal").Logger(), nil, metricsCollector, dbDir, capacity, pathfinder.PathByteSize, wal.SegmentSize) require.NoError(t, err) - ls, err := completeLedger.NewLedger(diskWal, capacity, metricsCollector, node.Log.With().Str("compontent", "ledger").Logger(), completeLedger.DefaultPathFinderVersion) + ls, err := completeLedger.NewLedger(diskWal, capacity, metricsCollector, node.Log.With().Str("component", "ledger").Logger(), completeLedger.DefaultPathFinderVersion) require.NoError(t, err) - compactor, err := completeLedger.NewCompactor(ls, diskWal, zerolog.Nop(), capacity, checkpointDistance, checkpointsToKeep, atomic.NewBool(false)) + compactor, err := completeLedger.NewCompactor(ls, diskWal, zerolog.Nop(), capacity, checkpointDistance, checkpointsToKeep, atomic.NewBool(false), metricsCollector) require.NoError(t, err) <-compactor.Ready() // Need to start compactor here because BootstrapLedger() updates ledger state. @@ -594,18 +607,66 @@ func ExecutionNode(t *testing.T, hub *stub.Hub, identity *flow.Identity, identit fvm.WithInitialTokenSupply(unittest.GenesisTokenSupply)) require.NoError(t, err) - err = bootstrapper.BootstrapExecutionDatabase(node.PublicDB, commit, genesisHead) + matchTrie, err := ls.FindTrieByStateCommit(commit) + require.NoError(t, err) + require.NotNil(t, matchTrie) + + const bootstrapCheckpointFile = "bootstrap-checkpoint" + checkpointFile := filepath.Join(dbDir, bootstrapCheckpointFile) + err = wal.StoreCheckpointV6([]*trie.MTrie{matchTrie}, dbDir, bootstrapCheckpointFile, zerolog.Nop(), 1) + require.NoError(t, err) + + rootResult, rootSeal, err := protoState.Sealed().SealedResult() + require.NoError(t, err) + + require.Equal(t, fmt.Sprint(rootSeal.FinalState), fmt.Sprint(commit)) + require.Equal(t, rootSeal.ResultID, rootResult.ID()) + + err = bootstrapper.BootstrapExecutionDatabase(node.LockManager, db, rootSeal) + require.NoError(t, err) + + registerDir := unittest.TempPebblePath(t) + pebbledb, err := storagepebble.OpenRegisterPebbleDB(node.Log.With().Str("pebbledb", "registers").Logger(), registerDir) + require.NoError(t, err) + + checkpointHeight := uint64(0) + require.NoError(t, esbootstrap.ImportRegistersFromCheckpoint(node.Log, checkpointFile, checkpointHeight, matchTrie.RootHash(), pebbledb, 2)) + + diskStore, err := storagepebble.NewRegisters(pebbledb, storagepebble.PruningDisabled) + require.NoError(t, err) + + reader := finalizedreader.NewFinalizedReader(headersStorage, checkpointHeight) + registerStore, err := storehouse.NewRegisterStore( + diskStore, + nil, // TOOD(leo): replace with real WAL + reader, + node.Log, + storehouse.NewNoopNotifier(), + ) require.NoError(t, err) + storehouseEnabled := true + getLatestFinalized := func() (uint64, error) { + final, err := protoState.Final().Head() + if err != nil { + return 0, err + } + return final.Height, nil + } + execState := executionState.NewExecutionState( - ls, commitsStorage, node.Blocks, node.Headers, collectionsStorage, chunkDataPackStorage, results, myReceipts, eventsStorage, serviceEventsStorage, txResultStorage, node.PublicDB, node.Tracer, + ls, commitsStorage, node.Blocks, node.Headers, chunkDataPackStorage, results, myReceipts, eventsStorage, serviceEventsStorage, txResultStorage, db, getLatestFinalized, node.Tracer, + // TODO: test with register store + registerStore, + storehouseEnabled, + node.LockManager, ) requestEngine, err := requester.New( - node.Log, node.Metrics, node.Net, node.Me, node.State, + node.Log.With().Str("entity", "collection").Logger(), node.Metrics, node.Net, node.Me, node.State, channels.RequestCollections, - filter.HasRole(flow.RoleCollection), - func() flow.Entity { return &flow.Collection{} }, + filter.HasRole[flow.Identity](flow.RoleCollection), + func() flow.Entity { return new(flow.Collection) }, ) require.NoError(t, err) @@ -649,13 +710,14 @@ func ExecutionNode(t *testing.T, hub *stub.Hub, identity *flow.Identity, identit node.Metrics, node.Tracer, node.Me, - node.State, + computation.NewProtocolStateWrapper(node.State), vmCtx, committer, prov, computation.ComputationConfig{ QueryConfig: query.NewDefaultConfig(), DerivedDataCacheSize: derived.DefaultDerivedDataCacheSize, + MaxConcurrency: 1, }, ) require.NoError(t, err) @@ -664,40 +726,52 @@ func ExecutionNode(t *testing.T, hub *stub.Hub, identity *flow.Identity, identit require.NoError(t, err) followerDistributor := pubsub.NewFollowerDistributor() - - latestExecutedHeight, _, err := execState.GetHighestExecutedBlockID(context.TODO()) require.NoError(t, err) // disabled by default uploader := uploader.NewManager(node.Tracer) + _, err = build.Semver() + require.ErrorIs(t, err, build.UndefinedVersionError) + ver := semver.New("0.0.0") + + latestFinalizedBlock, err := node.State.Final().Head() + require.NoError(t, err) + + unit := engine.NewUnit() + stopControl := stop.NewStopControl( + unit, + time.Second, + node.Log, + execState, + node.Headers, + versionBeacons, + ver, + latestFinalizedBlock, + false, + true, + ) + + fetcher := exeFetcher.NewCollectionFetcher(node.Log, requestEngine, node.State, false) rootHead, rootQC := getRoot(t, &node) - ingestionEngine, err := ingestion.New( + _, ingestionCore, err := ingestion.NewMachine( node.Log, - node.Net, - node.Me, + node.ProtocolEvents, requestEngine, - node.State, + fetcher, + node.Headers, node.Blocks, collectionsStorage, - eventsStorage, - serviceEventsStorage, - txResultStorage, - computationEngine, - pusherEngine, execState, + node.State, node.Metrics, - node.Tracer, - false, - checkAuthorizedAtBlock, - nil, + computationEngine, + pusherEngine, uploader, - ingestion.NewStopControl(node.Log.With().Str("compontent", "stop_control").Logger(), false, latestExecutedHeight), + stopControl, ) require.NoError(t, err) - requestEngine.WithHandle(ingestionEngine.OnCollection) - - node.ProtocolEvents.AddConsumer(ingestionEngine) + node.ProtocolEvents.AddConsumer(stopControl) followerCore, finalizer := createFollowerCore(t, &node, followerState, followerDistributor, rootHead, rootQC) // mock out hotstuff validator @@ -727,11 +801,14 @@ func ExecutionNode(t *testing.T, hub *stub.Hub, identity *flow.Identity, identit node.Headers, finalizedHeader, core, + compliance.DefaultConfig(), ) require.NoError(t, err) idCache, err := cache.NewProtocolStateIDCache(node.Log, node.State, events.NewDistributor()) require.NoError(t, err, "could not create finalized snapshot cache") + spamConfig, err := synchronization.NewSpamDetectionConfig() + require.NoError(t, err, "could not initialize spam detection config") syncEngine, err := synchronization.New( node.Log, node.Metrics, @@ -743,26 +820,28 @@ func ExecutionNode(t *testing.T, hub *stub.Hub, identity *flow.Identity, identit syncCore, id.NewIdentityFilterIdentifierProvider( filter.And( - filter.HasRole(flow.RoleConsensus), - filter.Not(filter.HasNodeID(node.Me.NodeID())), + filter.HasRole[flow.Identity](flow.RoleConsensus), + filter.Not(filter.HasNodeID[flow.Identity](node.Me.NodeID())), ), idCache, ), + spamConfig, synchronization.WithPollInterval(time.Duration(0)), ) require.NoError(t, err) + followerDistributor.AddFinalizationConsumer(syncEngine) return testmock.ExecutionNode{ GenericNode: node, FollowerState: followerState, - IngestionEngine: ingestionEngine, + IngestionEngine: ingestionCore, FollowerCore: followerCore, FollowerEngine: followerEng, SyncEngine: syncEngine, ExecutionEngine: computationEngine, RequestEngine: requestEngine, ReceiptsEngine: pusherEngine, - BadgerDB: node.PublicDB, + ProtocolDB: node.PublicDB, VM: computationEngine.VM(), ExecutionState: execState, Ledger: ls, @@ -771,26 +850,27 @@ func ExecutionNode(t *testing.T, hub *stub.Hub, identity *flow.Identity, identit Finalizer: finalizer, MyExecutionReceipts: myReceipts, Compactor: compactor, + StorehouseEnabled: storehouseEnabled, } } func getRoot(t *testing.T, node *testmock.GenericNode) (*flow.Header, *flow.QuorumCertificate) { - rootHead, err := node.State.Params().Root() - require.NoError(t, err) + rootHead := node.State.Params().FinalizedRoot() - signers, err := node.State.AtHeight(0).Identities(filter.HasRole(flow.RoleConsensus)) + signers, err := node.State.AtHeight(0).Identities(filter.HasRole[flow.Identity](flow.RoleConsensus)) require.NoError(t, err) signerIDs := signers.NodeIDs() signerIndices, err := signature.EncodeSignersToIndices(signerIDs, signerIDs) require.NoError(t, err) - rootQC := &flow.QuorumCertificate{ + rootQC, err := flow.NewQuorumCertificate(flow.UntrustedQuorumCertificate{ View: rootHead.View, BlockID: rootHead.ID(), SignerIndices: signerIndices, SigData: unittest.SignatureFixture(), - } + }) + require.NoError(t, err) return rootHead, rootQC } @@ -816,16 +896,16 @@ func (s *RoundRobinLeaderSelection) IdentityByBlock(_ flow.Identifier, participa return id, nil } -func (s *RoundRobinLeaderSelection) IdentitiesByEpoch(_ uint64) (flow.IdentityList, error) { - return s.identities, nil +func (s *RoundRobinLeaderSelection) IdentitiesByEpoch(view uint64) (flow.IdentitySkeletonList, error) { + return s.identities.ToSkeleton(), nil } -func (s *RoundRobinLeaderSelection) IdentityByEpoch(_ uint64, participantID flow.Identifier) (*flow.Identity, error) { +func (s *RoundRobinLeaderSelection) IdentityByEpoch(view uint64, participantID flow.Identifier) (*flow.IdentitySkeleton, error) { id, found := s.identities.ByNodeID(participantID) if !found { return nil, model.NewInvalidSignerErrorf("unknown participant %x", participantID) } - return id, nil + return &id.IdentitySkeleton, nil } func (s *RoundRobinLeaderSelection) LeaderForView(view uint64) (flow.Identifier, error) { @@ -833,11 +913,11 @@ func (s *RoundRobinLeaderSelection) LeaderForView(view uint64) (flow.Identifier, } func (s *RoundRobinLeaderSelection) QuorumThresholdForView(_ uint64) (uint64, error) { - return committees.WeightThresholdToBuildQC(s.identities.TotalWeight()), nil + return committees.WeightThresholdToBuildQC(s.identities.ToSkeleton().TotalWeight()), nil } func (s *RoundRobinLeaderSelection) TimeoutThresholdForView(_ uint64) (uint64, error) { - return committees.WeightThresholdToTimeout(s.identities.TotalWeight()), nil + return committees.WeightThresholdToTimeout(s.identities.ToSkeleton().TotalWeight()), nil } func (s *RoundRobinLeaderSelection) Self() flow.Identifier { @@ -856,13 +936,14 @@ func createFollowerCore( rootHead *flow.Header, rootQC *flow.QuorumCertificate, ) (module.HotStuffFollower, *confinalizer.Finalizer) { - finalizer := confinalizer.NewFinalizer(node.PublicDB, node.Headers, followerState, trace.NewNoopTracer()) + finalizer := confinalizer.NewFinalizer(node.PublicDB.Reader(), node.Headers, followerState, trace.NewNoopTracer()) - pending := make([]*flow.Header, 0) + pending := make([]*flow.ProposalHeader, 0) // creates a consensus follower with noop consumer as the notifier followerCore, err := consensus.NewFollower( node.Log, + node.Metrics, node.Headers, finalizer, notifier, @@ -893,7 +974,7 @@ func WithGenericNode(genericNode *testmock.GenericNode) VerificationOpt { // (integration) testing. func VerificationNode(t testing.TB, hub *stub.Hub, - verIdentity *flow.Identity, // identity of this verification node. + verIdentity bootstrap.NodeInfo, // identity of this verification node. participants flow.IdentityList, // identity of all nodes in system including this verification node. assigner module.ChunkAssigner, chunksLimit uint, @@ -917,7 +998,7 @@ func VerificationNode(t testing.TB, if node.ChunkStatuses == nil { node.ChunkStatuses = stdmap.NewChunkStatuses(chunksLimit) err = mempoolCollector.Register(metrics.ResourceChunkStatus, node.ChunkStatuses.Size) - require.Nil(t, err) + require.NoError(t, err) } if node.ChunkRequests == nil { @@ -927,26 +1008,27 @@ func VerificationNode(t testing.TB, } if node.Results == nil { - results := storage.NewExecutionResults(node.Metrics, node.PublicDB) + db := node.PublicDB + results := store.NewExecutionResults(node.Metrics, db) node.Results = results - node.Receipts = storage.NewExecutionReceipts(node.Metrics, node.PublicDB, results, storage.DefaultCacheSize) + node.Receipts = store.NewExecutionReceipts(node.Metrics, db, results, storagebadger.DefaultCacheSize) } if node.ProcessedChunkIndex == nil { - node.ProcessedChunkIndex = storage.NewConsumerProgress(node.PublicDB, module.ConsumeProgressVerificationChunkIndex) + node.ProcessedChunkIndex = store.NewConsumerProgress(node.PublicDB, module.ConsumeProgressVerificationChunkIndex) } if node.ChunksQueue == nil { - node.ChunksQueue = storage.NewChunkQueue(node.PublicDB) - ok, err := node.ChunksQueue.Init(chunkconsumer.DefaultJobIndex) + cq := store.NewChunkQueue(node.Metrics, node.PublicDB) + ok, err := cq.Init(chunkconsumer.DefaultJobIndex) require.NoError(t, err) require.True(t, ok) + node.ChunksQueue = cq } if node.ProcessedBlockHeight == nil { - node.ProcessedBlockHeight = storage.NewConsumerProgress(node.PublicDB, module.ConsumeProgressVerificationBlockHeight) + node.ProcessedBlockHeight = store.NewConsumerProgress(node.PublicDB, module.ConsumeProgressVerificationBlockHeight) } - if node.VerifierEngine == nil { vm := fvm.NewVirtualMachine() @@ -960,7 +1042,7 @@ func VerificationNode(t testing.TB, chunkVerifier := chunks.NewChunkVerifier(vm, vmCtx, node.Log) - approvalStorage := storage.NewResultApprovals(node.Metrics, node.PublicDB) + approvalStorage := store.NewResultApprovals(node.Metrics, node.PublicDB, node.LockManager) node.VerifierEngine, err = verifier.New(node.Log, collector, @@ -969,8 +1051,10 @@ func VerificationNode(t testing.TB, node.State, node.Me, chunkVerifier, - approvalStorage) - require.Nil(t, err) + approvalStorage, + node.LockManager, + ) + require.NoError(t, err) } if node.RequesterEngine == nil { @@ -1011,12 +1095,13 @@ func VerificationNode(t testing.TB, } if node.ChunkConsumer == nil { - node.ChunkConsumer = chunkconsumer.NewChunkConsumer(node.Log, + node.ChunkConsumer, err = chunkconsumer.NewChunkConsumer(node.Log, collector, node.ProcessedChunkIndex, node.ChunksQueue, node.FetcherEngine, chunkconsumer.DefaultChunkWorkers) // defaults number of workers to 3. + require.NoError(t, err) err = mempoolCollector.Register(metrics.ResourceChunkConsumer, node.ChunkConsumer.Size) require.NoError(t, err) } diff --git a/engine/unit.go b/engine/unit.go index 3dc6b4fd4c6..1450237f0d9 100644 --- a/engine/unit.go +++ b/engine/unit.go @@ -1,5 +1,3 @@ -// (c) 2019 Dapper Labs - ALL RIGHTS RESERVED - package engine import ( @@ -9,6 +7,7 @@ import ( ) // Unit handles synchronization management, startup, and shutdown for engines. +// New components should use component.ComponentManager rather than Unit. type Unit struct { admitLock sync.Mutex // used for synchronizing context cancellation with work admittance diff --git a/engine/verification/Readme.md b/engine/verification/Readme.md new file mode 100644 index 00000000000..ff527a432b0 --- /dev/null +++ b/engine/verification/Readme.md @@ -0,0 +1,170 @@ +# Verification Node +The Verification Node in the Flow blockchain network is a critical component responsible for +verifying `ExecutionResult`s and generating `ResultApproval`s. +Its primary role is to ensure the integrity and validity of block execution by performing verification processes. +In a nutshell, the Verification Node is responsible for the following: +1. Following the chain for new finalized blocks (`Follower` engine). +2. Processing the execution results in the finalized blocks and determining assigned chunks to the node (`Assigner` engine). +3. Requesting chunk data pack from Execution Nodes for the assigned chunks (`Fetcher` and `Requester` engines). +4. Verifying the assigned chunks and emitting `ResultApproval`s for the verified chunks to Consensus Nodes (`Verifier` engine). +![architecture.png](architecture.png) + + +## Block Consumer ([consumer.go](verification%2Fassigner%2Fblockconsumer%2Fconsumer.go)) +The `blockconsumer` package efficiently manages the processing of finalized blocks in Verification Node of Flow blockchain. +Specifically, it listens for notifications from the `Follower` engine regarding finalized blocks, and systematically +queues these blocks for processing. The package employs parallel workers, each an instance of the `Assigner` engine, +to fetch and process blocks from the queue. The `BlockConsumer` diligently coordinates this process by only assigning +a new block to a worker once it has completed processing its current block and signaled its availability. +This ensures that the processing is not only methodical but also resilient to any node crashes. +In case of a crash, the `BlockConsumer` resumes from where it left off by reading the processed block index from storage, reassigning blocks from the queue to workers, +thereby guaranteeing no loss of data. + +## Assigner Engine +The `Assigner` [engine](verification%2Fassigner%2Fengine.go) is an integral part of the verification process in Flow, +focusing on processing the execution results in the finalized blocks, performing chunk assignments on the results, and +queuing the assigned chunks for further processing. The Assigner engine is a worker of the `BlockConsumer` engine, +which assigns finalized blocks to the Assigner engine for processing. +This engine reads execution receipts included in each finalized block, +determines which chunks are assigned to the node for verification, +and stores the assigned chunks into the chunks queue for further processing (by the `Fetcher` engine). + +The core behavior of the Assigner engine is implemented in the `ProcessFinalizedBlock` function. +This function initiates the process of execution receipt indexing, chunk assignment, and processing the assigned chunks. +For every receipt in the block, the engine determines chunk assignments using the verifiable chunk assignment algorithm of Flow. +Each assigned chunk is then processed by the `processChunk` method. This method is responsible for storing a chunk locator in the chunks queue, +which is a crucial step for further processing of the chunks by the fetcher engine. +Deduplication of chunk locators is handled by the chunks queue. +The Assigner engine provides robustness by handling the situation where a node is not authorized at a specific block ID. +It verifies the role of the result executor, checks if the node has been ejected, and assesses the node's staked weight before granting authorization. +Lastly, once the Assigner engine has completed processing the receipts in a block, it sends a notification to the block consumer. This is inline with +Assigner engine as a worker of the block consumer informing the consumer that it is ready to process the next block. +This ensures a smooth and efficient flow of data in the system, promoting consistency across different parts of the Flow architecture. + +### Chunk Locator +A chunk locator in the Flow blockchain is an internal structure of the Verification Nodes that points to a specific chunk +within a specific execution result of a block. It's an important part of the verification process in the Flow network, +allowing verification nodes to efficiently identify, retrieve, and verify individual chunks of computation. + +```go +type ChunkLocator struct { + ResultID flow.Identifier // The identifier of the ExecutionResult + Index uint64 // Index of the chunk +} +``` +- `ResultID`: This is the identifier of the execution result that the chunk is a part of. The execution result contains a list of chunks which each represent a portion of the computation carried out by execution nodes. Each execution result is linked to a specific block in the blockchain. +- `Index`: This is the index of the chunk within the execution result's list of chunks. It's an easy way to refer to a specific chunk within a specific execution result. + +**Note-1**: The `ChunkLocator` doesn't contain the chunk itself but points to where the chunk can be found. In the context of the `Assigner` engine, the `ChunkLocator` is stored in a queue after chunk assignment is done, so the `Fetcher` engine can later retrieve the chunk for verification. +**Note-2**: The `ChunkLocator` is never meant to be sent over the networking layer to another Flow node. It's an internal structure of the verification nodes, and it's only used for internal communication between the `Assigner` and `Fetcher` engines. + + +## ChunkConsumer +The `ChunkConsumer` ([consumer](verification%2Ffetcher%2Fchunkconsumer%2Fconsumer.go)) package orchestrates the processing of chunks in the Verification Node of the Flow blockchain. +Specifically, it keeps tabs on chunks that are assigned for processing by the `Assigner` engine and systematically enqueues these chunks for further handling. +To expedite the processing, the package deploys parallel workers, with each worker being an instance of the `Fetcher` engine, which retrieves and processes the chunks from the queue. +The `ChunkConsumer` administers this process by ensuring that a new chunk is assigned to a worker only after it has finalized processing its current chunk and signaled that it is ready for more. +This systematic approach guarantees not only efficiency but also robustness against any node failures. In an event where a node crashes, +the `ChunkConsumer` picks up right where it left, redistributing chunks from the queue to the workers, ensuring that there is no loss of data or progress. + +## Fetcher Engine - The Journey of a `ChunkLocator` to a `VerifiableChunkData` +The Fetcher [engine.go](fetcher%2Fengine.go) of the Verification Nodes focuses on the lifecycle of a `ChunkLocator` as it transitions into a `VerifiableChunkData`. + +### `VerifiableChunkData` +`VerifiableChunkData` refers to a data structure that encapsulates all the necessary components and resources required to +verify a chunk within the Flow blockchain network. It represents a chunk that has undergone processing and is ready for verification. + +The `VerifiableChunkData` object contains the following key elements: +```go +type VerifiableChunkData struct { + IsSystemChunk bool // indicates whether this is a system chunk + Chunk *flow.Chunk // the chunk to be verified + Header *flow.Header // BlockHeader that contains this chunk + Result *flow.ExecutionResult // execution result of this block + ChunkDataPack *flow.ChunkDataPack // chunk data package needed to verify this chunk + EndState flow.StateCommitment // state commitment at the end of this chunk + TransactionOffset uint32 // index of the first transaction in a chunk within a block +} +``` +1. `IsSystemChunk`: A boolean value that indicates whether the chunk is a system chunk. System chunk is a specific chunk typically representing the last chunk within an execution result. +2. `Chunk`: The actual chunk that needs to be verified. It contains the relevant data and instructions related to the execution of transactions within the blockchain. +3. `Header`: The `BlockHeader` associated with the chunk. It provides important contextual information about the block that the chunk belongs to. +4. `Result`: The `ExecutionResult` object that corresponds to the execution of the block containing the chunk. It contains information about the execution status, including any errors or exceptions encountered during the execution process. +5. `ChunkDataPack`: The `ChunkDataPack`, which is a package containing additional data and resources specific to the chunk being verified. It provides supplementary information required for the verification process. +6. `EndState`: The state commitment at the end of the chunk. It represents the final state of the blockchain after executing all the transactions within the chunk. +7. `TransactionOffset`: An index indicating the position of the first transaction within the chunk in relation to the entire block. This offset helps in locating and tracking individual transactions within the blockchain. +By combining these elements, the VerifiableChunkData object forms a comprehensive representation of a chunk ready for verification. It serves as an input to the `Verifier` engine, which utilizes this data to perform the necessary checks and validations to ensure the integrity and correctness of the chunk within the Flow blockchain network. + +### The Journey of a `ChunkLocator` to a `VerifiableChunkData` +Upon receiving the `ChunkLocator`, the `Fetcher` engine’s `validateAuthorizedExecutionNodeAtBlockID` function is responsible +for validating the authenticity of the sender. It evaluates whether the sender is an authorized execution node for the respective block. +The function cross-references the sender’s credentials against the state snapshot of the specific block. +In the case of unauthorized or invalid credentials, an error is logged, and the `ChunkLocator` is rejected. +For authorized credentials, the processing continues. + +Once authenticated, the `ChunkLocator` is utilized to retrieve the associated Chunk Data Pack. +The `requestChunkDataPack` function takes the Chunk Locator and generates a `ChunkDataPackRequest`. +During this stage, the function segregates execution nodes into two categories - those which agree with the execution result (`agrees`) and those which do not (`disagrees`). +This information is encapsulated within the `ChunkDataPackRequest` and is forwarded to the `Requester` Engine. +The `Requester` Engine handles the retrieval of the `ChunkDataPack` from the network of execution nodes. + +After the Chunk Data Pack is successfully retrieved by the `Requester` Engine, +the next phase involves structuring this data for verification and constructing a `VerifiableChunkData`. +It’s imperative that this construction is performed with utmost accuracy to ensure that the data is in a state that can be properly verified. + +The final step in the lifecycle is forwarding the `VerifiableChunkData` to the `Verifier` Engine. The `Verifier` Engine is tasked with the critical function +of thoroughly analyzing and verifying the data. Depending on the outcome of this verification process, +the chunk may either pass verification successfully or be rejected due to discrepancies. + +### Handling Sealed Chunks +In parallel, the `Fetcher` engine remains vigilant regarding the sealed status of chunks. +The `NotifyChunkDataPackSealed` function monitors the sealing status. +If the Consensus Nodes seal a chunk, this function ensures that the `Fetcher` Engine acknowledges this update and discards the respective +`ChunkDataPack` from its processing pipeline as it is now sealed (i.e., has been verified by an acceptable quota of Verification Nodes). + +## Requester Engine - Retrieving the `ChunkDataPack` +The `Requester` [engine](requester%2Frequester.go) is responsible for handling the request and retrieval of chunk data packs in the Flow blockchain network. +It acts as an intermediary between the `Fetcher` engine and the Execution Nodes, facilitating the communication and coordination required +to obtain the necessary `ChunkDataPack` for verification. + +The `Requester` engine receives `ChunkDataPackRequest`s from the `Fetcher`. +These requests contain information such as the chunk ID, block height, agree and disagree executors, and other relevant details. +Upon receiving a `ChunkDataPackRequest`, the `Requester` engine adds it to the pending requests cache for tracking and further processing. +The Requester engine periodically checks the pending chunk data pack requests and dispatches them to the Execution Nodes for retrieval. +It ensures that only qualified requests are dispatched based on certain criteria, such as the chunk ID and request history. +The dispatching process involves creating a `ChunkDataRequest` message and publishing it to the network. +The request is sent to a selected number of Execution Nodes, determined by the `requestTargets` parameter. + +When an Execution Node receives a `ChunkDataPackRequest`, it processes the request and generates a `ChunkDataResponse` +message containing the requested chunk data pack. The execution node sends this response back to the`Requester` engine. +The `Requester` engine receives the chunk data pack response, verifies its integrity, and passes it to the registered `ChunkDataPackHandler`, +i.e., the `Fetcher` engine. + +### Retry and Backoff Mechanism +In case a `ChunkDataPackRequest` does not receive a response within a certain period, the `Requester` engine retries the request to ensure data retrieval. +It implements an exponential backoff mechanism for retrying failed requests. +The retry interval, backoff multiplier, and backoff intervals can be customized using the respective configuration parameters. + +### Handling Sealed Blocks +If a `ChunkDataPackRequest` pertains to a block that has already been sealed, the `Requester` engine recognizes this and +removes the corresponding request from the pending requests cache. +It notifies the `ChunkDataPackHandler` (i.e., the `Fetcher` engine) about the sealing of the block to ensure proper handling. + +### Parallel Chunk Data Pack Retrieval +The `Requester` processes a number of chunk data pack requests in parallel, +dispatching them to execution nodes and handling the received responses. +However, it is important to note that if a chunk data pack request does not receive a response from the execution nodes, +the `Requester` engine can become stuck in processing, waiting for the missing chunk data pack. +To mitigate this, the engine implements a retry and backoff mechanism, ensuring that requests are retried and backed off if necessary. +This mechanism helps to prevent prolonged waiting and allows the engine to continue processing other requests while waiting for the missing chunk data pack response. + +## Verifier Engine - Verifying Chunks +The `Verifier` [engine](verifier%2Fengine.go) is responsible for verifying chunks, generating `ResultApproval`s, and maintaining a cache of `ResultApproval`s. +It receives verifiable chunks along with the necessary data for verification, verifies the chunks by constructing a partial trie, +executing transactions, and checking the final state commitment and other chunk metadata. +If the verification is successful, it generates a `ResultApproval` and broadcasts it to the consensus nodes. + +The `Verifier` Engine offers the following key features: +1. **Verification of Chunks**: The engine receives verifiable chunks, which include the chunk to be verified, the associated header, execution result, and chunk data pack. It performs the verification process, which involves constructing a partial trie, executing transactions, and checking the final state commitment. The verification process ensures the integrity and validity of the chunk. +2. **Generation of Result Approvals**: If the verification process is successful, the engine generates a result approval for the verified chunk. The result approval includes the block ID, execution result ID, chunk index, attestation, approver ID, attestation signature, and SPoCK (Secure Proof of Confidential Knowledge) signature. The result approval provides a cryptographic proof of the chunk's validity and is used to seal the block. +3. **Cache of Result Approvals**: The engine maintains a cache of result approvals for efficient retrieval and lookup. The result approvals are stored in a storage module, allowing quick access to the approvals associated with specific chunks and execution results. diff --git a/engine/verification/architecture.png b/engine/verification/architecture.png new file mode 100644 index 00000000000..a1a16dec61b Binary files /dev/null and b/engine/verification/architecture.png differ diff --git a/engine/verification/assigner/blockconsumer/consumer.go b/engine/verification/assigner/blockconsumer/consumer.go index 982fe418688..89871fecb65 100644 --- a/engine/verification/assigner/blockconsumer/consumer.go +++ b/engine/verification/assigner/blockconsumer/consumer.go @@ -21,10 +21,9 @@ const DefaultBlockWorkers = uint64(2) // and notifies the consumer to check in the job queue // (i.e., its block reader) for new block jobs. type BlockConsumer struct { - consumer module.JobConsumer - defaultIndex uint64 - unit *engine.Unit - metrics module.VerificationMetrics + consumer module.JobConsumer + unit *engine.Unit + metrics module.VerificationMetrics } // defaultProcessedIndex returns the last sealed block height from the protocol state. @@ -43,7 +42,7 @@ func defaultProcessedIndex(state protocol.State) (uint64, error) { // index for initializing the processed index in storage. func NewBlockConsumer(log zerolog.Logger, metrics module.VerificationMetrics, - processedHeight storage.ConsumerProgress, + processedHeight storage.ConsumerProgressInitializer, blocks storage.Blocks, state protocol.State, blockProcessor assigner.FinalizedBlockProcessor, @@ -59,17 +58,20 @@ func NewBlockConsumer(log zerolog.Logger, // the block reader is where the consumer reads new finalized blocks from (i.e., jobs). jobs := jobqueue.NewFinalizedBlockReader(state, blocks) - consumer := jobqueue.NewConsumer(lg, jobs, processedHeight, worker, maxProcessing, 0) defaultIndex, err := defaultProcessedIndex(state) if err != nil { return nil, 0, fmt.Errorf("could not read default processed index: %w", err) } + consumer, err := jobqueue.NewConsumer(lg, jobs, processedHeight, worker, maxProcessing, 0, defaultIndex) + if err != nil { + return nil, 0, fmt.Errorf("could not create block consumer: %w", err) + } + blockConsumer := &BlockConsumer{ - consumer: consumer, - defaultIndex: defaultIndex, - unit: engine.NewUnit(), - metrics: metrics, + consumer: consumer, + unit: engine.NewUnit(), + metrics: metrics, } worker.withBlockConsumer(blockConsumer) @@ -99,7 +101,7 @@ func (c *BlockConsumer) OnFinalizedBlock(*model.Block) { } func (c *BlockConsumer) Ready() <-chan struct{} { - err := c.consumer.Start(c.defaultIndex) + err := c.consumer.Start() if err != nil { panic(fmt.Errorf("could not start block consumer for finder engine: %w", err)) } diff --git a/engine/verification/assigner/blockconsumer/consumer_test.go b/engine/verification/assigner/blockconsumer/consumer_test.go index 2a2bff2a343..471aba2a328 100644 --- a/engine/verification/assigner/blockconsumer/consumer_test.go +++ b/engine/verification/assigner/blockconsumer/consumer_test.go @@ -5,7 +5,7 @@ import ( "testing" "time" - "github.com/dgraph-io/badger/v2" + "github.com/cockroachdb/pebble/v2" "github.com/stretchr/testify/require" "github.com/onflow/flow-go/consensus/hotstuff/model" @@ -18,7 +18,8 @@ import ( "github.com/onflow/flow-go/module/jobqueue" "github.com/onflow/flow-go/module/metrics" "github.com/onflow/flow-go/module/trace" - bstorage "github.com/onflow/flow-go/storage/badger" + "github.com/onflow/flow-go/storage/operation/pebbleimpl" + "github.com/onflow/flow-go/storage/store" "github.com/onflow/flow-go/utils/unittest" ) @@ -26,9 +27,9 @@ import ( // and its corresponding job can be converted back to the same block. func TestBlockToJob(t *testing.T) { block := unittest.BlockFixture() - actual, err := jobqueue.JobToBlock(jobqueue.BlockToJob(&block)) + actual, err := jobqueue.JobToBlock(jobqueue.BlockToJob(block)) require.NoError(t, err) - require.Equal(t, &block, actual) + require.Equal(t, block, actual) } func TestProduceConsume(t *testing.T) { @@ -117,10 +118,11 @@ func withConsumer( process func(notifier module.ProcessingNotifier, block *flow.Block), withBlockConsumer func(*blockconsumer.BlockConsumer, []*flow.Block), ) { - unittest.RunWithBadgerDB(t, func(db *badger.DB) { + + unittest.RunWithPebbleDB(t, func(pdb *pebble.DB) { maxProcessing := uint64(workerCount) - processedHeight := bstorage.NewConsumerProgress(db, module.ConsumeProgressVerificationBlockHeight) + processedHeight := store.NewConsumerProgress(pebbleimpl.ToDB(pdb), module.ConsumeProgressVerificationBlockHeight) collector := &metrics.NoopCollector{} tracer := trace.NewNoopTracer() log := unittest.Logger() @@ -146,10 +148,21 @@ func withConsumer( // blocks (i.e., containing guarantees), and Cs are container blocks for their preceding reference block, // Container blocks only contain receipts of their preceding reference blocks. But they do not // hold any guarantees. - root, err := s.State.Params().Root() + root, err := s.State.Final().Head() + require.NoError(t, err) + rootProtocolState, err := s.State.Final().ProtocolState() require.NoError(t, err) - clusterCommittee := participants.Filter(filter.HasRole(flow.RoleCollection)) - results := vertestutils.CompleteExecutionReceiptChainFixture(t, root, blockCount/2, vertestutils.WithClusterCommittee(clusterCommittee)) + rootProtocolStateID := rootProtocolState.ID() + clusterCommittee := participants.Filter(filter.HasRole[flow.Identity](flow.RoleCollection)) + sources := unittest.RandomSourcesFixture(110) + results := vertestutils.CompleteExecutionReceiptChainFixture( + t, + root, + rootProtocolStateID, + blockCount/2, + sources, + vertestutils.WithClusterCommittee(clusterCommittee), + ) blocks := vertestutils.ExtendStateWithFinalizedBlocks(t, results, s.State) // makes sure that we generated a block chain of requested length. require.Len(t, blocks, blockCount) diff --git a/engine/verification/assigner/engine.go b/engine/verification/assigner/engine.go index c68beba4653..e0a7e237965 100644 --- a/engine/verification/assigner/engine.go +++ b/engine/verification/assigner/engine.go @@ -6,11 +6,10 @@ import ( "sync/atomic" "github.com/rs/zerolog" - "github.com/rs/zerolog/log" - "github.com/onflow/flow-go/engine" "github.com/onflow/flow-go/model/chunks" "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/model/flow/filter" "github.com/onflow/flow-go/module" "github.com/onflow/flow-go/module/trace" "github.com/onflow/flow-go/state/protocol" @@ -23,7 +22,6 @@ import ( // to me to verify, and then save it to the chunks job queue for the // fetcher engine to process. type Engine struct { - unit *engine.Unit log zerolog.Logger metrics module.VerificationMetrics tracer module.Tracer @@ -35,8 +33,11 @@ type Engine struct { blockConsumerNotifier module.ProcessingNotifier // to report a block has been processed. stopAtHeight uint64 stopAtBlockID atomic.Value + *module.NoopReadyDoneAware } +var _ module.ReadyDoneAware = (*Engine)(nil) + func New( log zerolog.Logger, metrics module.VerificationMetrics, @@ -49,7 +50,6 @@ func New( stopAtHeight uint64, ) *Engine { e := &Engine{ - unit: engine.NewUnit(), log: log.With().Str("engine", "assigner").Logger(), metrics: metrics, tracer: tracer, @@ -68,14 +68,6 @@ func (e *Engine) WithBlockConsumerNotifier(notifier module.ProcessingNotifier) { e.blockConsumerNotifier = notifier } -func (e *Engine) Ready() <-chan struct{} { - return e.unit.Ready() -} - -func (e *Engine) Done() <-chan struct{} { - return e.unit.Done() -} - // resultChunkAssignment receives an execution result that appears in a finalized incorporating block. // In case this verification node is authorized at the reference block of this execution receipt's result, // chunk assignment is computed for the result, and the list of assigned chunks returned. @@ -84,7 +76,7 @@ func (e *Engine) resultChunkAssignment(ctx context.Context, incorporatingBlock flow.Identifier, ) (flow.ChunkList, error) { resultID := result.ID() - log := log.With(). + log := e.log.With(). Hex("result_id", logging.ID(resultID)). Hex("executed_block_id", logging.ID(result.BlockID)). Hex("incorporating_block_id", logging.ID(incorporatingBlock)). @@ -108,8 +100,7 @@ func (e *Engine) resultChunkAssignment(ctx context.Context, } e.metrics.OnChunksAssignmentDoneAtAssigner(len(chunkList)) - // TODO: de-escalate to debug level on stable version. - log.Info(). + log.Debug(). Int("total_chunks", len(result.Chunks)). Int("total_assigned_chunks", len(chunkList)). Msg("chunk assignment done") @@ -132,9 +123,14 @@ func (e *Engine) processChunk(chunk *flow.Chunk, resultID flow.Identifier, block Uint64("block_height", blockHeight). Logger() - locator := &chunks.Locator{ - ResultID: resultID, - Index: chunk.Index, + locator, err := chunks.NewLocator( + chunks.UntrustedLocator{ + ResultID: resultID, + Index: chunk.Index, + }, + ) + if err != nil { + return false, fmt.Errorf("could not construct locator: %w", err) } // pushes chunk locator to the chunks queue @@ -151,7 +147,7 @@ func (e *Engine) processChunk(chunk *flow.Chunk, resultID flow.Identifier, block // notifies chunk queue consumer of a new chunk e.newChunkListener.Check() - lg.Info().Msg("chunk locator successfully pushed to chunks queue") + lg.Debug().Msg("chunk locator successfully pushed to chunks queue") return true, nil } @@ -163,7 +159,8 @@ func (e *Engine) processChunk(chunk *flow.Chunk, resultID flow.Identifier, block func (e *Engine) ProcessFinalizedBlock(block *flow.Block) { blockID := block.ID() - span, ctx := e.tracer.StartBlockSpan(e.unit.Ctx(), blockID, trace.VERProcessFinalizedBlock) + // We don't have any existing information and don't need cancellation, so use a background (empty) context + span, ctx := e.tracer.StartBlockSpan(context.Background(), blockID, trace.VERProcessFinalizedBlock) defer span.End() e.processFinalizedBlock(ctx, block) @@ -173,7 +170,7 @@ func (e *Engine) ProcessFinalizedBlock(block *flow.Block) { // processes the chunks assigned to this verification node by pushing them to the chunks consumer. func (e *Engine) processFinalizedBlock(ctx context.Context, block *flow.Block) { - if e.stopAtHeight > 0 && block.Header.Height == e.stopAtHeight { + if e.stopAtHeight > 0 && block.Height == e.stopAtHeight { e.stopAtBlockID.Store(block.ID()) } @@ -188,7 +185,7 @@ func (e *Engine) processFinalizedBlock(ctx context.Context, block *flow.Block) { lg := e.log.With(). Hex("block_id", logging.ID(blockID)). - Uint64("block_height", block.Header.Height). + Uint64("block_height", block.Height). Int("result_num", len(block.Payload.Results)).Logger() lg.Debug().Msg("new finalized block arrived") @@ -219,7 +216,7 @@ func (e *Engine) processFinalizedBlock(ctx context.Context, block *flow.Block) { Msgf("Chunk for block at finalized height %d received - stopping node", e.stopAtHeight) } - processed, err := e.processChunkWithTracing(ctx, chunk, resultID, block.Header.Height) + processed, err := e.processChunkWithTracing(ctx, chunk, resultID, block.Height) if err != nil { resultLog.Fatal(). Err(err). @@ -234,8 +231,8 @@ func (e *Engine) processFinalizedBlock(ctx context.Context, block *flow.Block) { } } - e.metrics.OnFinalizedBlockArrivedAtAssigner(block.Header.Height) - lg.Info(). + e.metrics.OnFinalizedBlockArrivedAtAssigner(block.Height) + lg.Debug(). Uint64("total_assigned_chunks", assignedChunksCount). Uint64("total_processed_chunks", processedChunksCount). Msg("finished processing finalized block") @@ -274,13 +271,8 @@ func authorizedAsVerification(state protocol.State, blockID flow.Identifier, ide return false, fmt.Errorf("node has an invalid role. expected: %s, got: %s", flow.RoleVerification, identity.Role) } - // checks identity has not been ejected - if identity.Ejected { - return false, nil - } - - // checks identity has weight - if identity.Weight == 0 { + // checks identity is an active epoch participant with positive weight + if !filter.IsValidCurrentEpochParticipant(identity) || identity.InitialWeight == 0 { return false, nil } diff --git a/engine/verification/assigner/engine_test.go b/engine/verification/assigner/engine_test.go index b1a4fe4c9e2..c60b74a33f9 100644 --- a/engine/verification/assigner/engine_test.go +++ b/engine/verification/assigner/engine_test.go @@ -87,21 +87,25 @@ func SetupTest(options ...func(suite *AssignerEngineTestSuite)) *AssignerEngineT // createContainerBlock creates and returns a block that contains an execution receipt, with its corresponding chunks assignment based // on the input options. -func createContainerBlock(options ...func(result *flow.ExecutionResult, assignments *chunks.Assignment)) (*flow.Block, *chunks.Assignment) { +func createContainerBlock(options ...func(result *flow.ExecutionResult, assignments *chunks.AssignmentBuilder)) (*flow.Block, *chunks.Assignment) { result, assignment := vertestutils.CreateExecutionResult(unittest.IdentifierFixture(), options...) receipt := &flow.ExecutionReceipt{ - ExecutorID: unittest.IdentifierFixture(), - ExecutionResult: *result, - } - // container block - header := unittest.BlockHeaderFixture() - block := &flow.Block{ - Header: header, - Payload: &flow.Payload{ - Receipts: []*flow.ExecutionReceiptMeta{receipt.Meta()}, - Results: []*flow.ExecutionResult{&receipt.ExecutionResult}, + UnsignedExecutionReceipt: flow.UnsignedExecutionReceipt{ + ExecutorID: unittest.IdentifierFixture(), + ExecutionResult: *result, }, + ExecutorSignature: unittest.SignatureFixture(), } + // container block + block := unittest.BlockFixture( + unittest.Block.WithPayload( + flow.Payload{ + Receipts: []*flow.ExecutionReceiptStub{receipt.Stub()}, + Results: []*flow.ExecutionResult{&receipt.ExecutionResult}, + }, + ), + ) + return block, assignment } @@ -131,8 +135,8 @@ func TestAssignerEngine(t *testing.T) { t.Run("new block happy path", func(t *testing.T) { newBlockHappyPath(t) }) - t.Run("new block zero-weight", func(t *testing.T) { - newBlockZeroWeight(t) + t.Run("new block invalid identity", func(t *testing.T) { + newBlockVerifierNotAuthorized(t) }) t.Run("new block zero chunk", func(t *testing.T) { newBlockNoChunk(t) @@ -159,10 +163,16 @@ func newBlockHappyPath(t *testing.T) { // one assigned chunk to verification node. containerBlock, assignment := createContainerBlock( vertestutils.WithChunks( - vertestutils.WithAssignee(s.myID()))) + vertestutils.WithAssignee(t, s.myID()))) result := containerBlock.Payload.Results[0] s.mockStateAtBlockID(result.BlockID) - chunksNum := s.mockChunkAssigner(flow.NewIncorporatedResult(containerBlock.ID(), result), assignment) + + incorporatedResult, err := flow.NewIncorporatedResult(flow.UntrustedIncorporatedResult{ + IncorporatedBlockID: containerBlock.ID(), + Result: result, + }) + require.NoError(t, err) + chunksNum := s.mockChunkAssigner(incorporatedResult, assignment) require.Equal(t, chunksNum, 1) // one chunk should be assigned // mocks processing assigned chunks @@ -176,7 +186,7 @@ func newBlockHappyPath(t *testing.T) { s.metrics.On("OnAssignedChunkProcessedAtAssigner").Return().Once() // sends containerBlock containing receipt to assigner engine - s.metrics.On("OnFinalizedBlockArrivedAtAssigner", containerBlock.Header.Height).Return().Once() + s.metrics.On("OnFinalizedBlockArrivedAtAssigner", containerBlock.Height).Return().Once() s.metrics.On("OnExecutionResultReceivedAtAssignerEngine").Return().Once() e.ProcessFinalizedBlock(containerBlock) @@ -189,47 +199,71 @@ func newBlockHappyPath(t *testing.T) { s.notifier) } -// newBlockZeroWeight evaluates that when verification node has zero weight at a reference block, -// it drops the corresponding execution receipts for that block without performing any chunk assignment. +// newBlockVerifierNotAuthorized evaluates that when verification node is not authorized to participate at reference block, it includes next cases: +// - verification node is joining +// - verification node is leaving +// - verification node has zero initial weight. +// It drops the corresponding execution receipts for that block without performing any chunk assignment. // It also evaluates that the chunks queue is never called on any chunks of that receipt's result. -func newBlockZeroWeight(t *testing.T) { +func newBlockVerifierNotAuthorized(t *testing.T) { + + assertIdentityAtReferenceBlock := func(identity *flow.Identity) { + // creates an assigner engine for non-active verification node. + s := SetupTest(WithIdentity(identity)) + e := NewAssignerEngine(s) + + // creates a container block, with a single receipt, that contains + // no assigned chunk to verification node. + containerBlock, _ := createContainerBlock( + vertestutils.WithChunks( // all chunks assigned to some (random) identifiers, but not this verification node + vertestutils.WithAssignee(t, unittest.IdentifierFixture()), + vertestutils.WithAssignee(t, unittest.IdentifierFixture()), + vertestutils.WithAssignee(t, unittest.IdentifierFixture()))) + result := containerBlock.Payload.Results[0] + s.mockStateAtBlockID(result.BlockID) + + // once assigner engine is done processing the block, it should notify the processing notifier. + s.notifier.On("Notify", containerBlock.ID()).Return().Once() + + // sends block containing receipt to assigner engine + s.metrics.On("OnFinalizedBlockArrivedAtAssigner", containerBlock.Height).Return().Once() + s.metrics.On("OnExecutionResultReceivedAtAssignerEngine").Return().Once() + e.ProcessFinalizedBlock(containerBlock) + + // when the node has zero-weight at reference block id, chunk assigner should not be called, + // and nothing should be passed to chunks queue, and + // job listener should not be notified. + s.chunksQueue.AssertNotCalled(t, "StoreChunkLocator") + s.newChunkListener.AssertNotCalled(t, "Check") + s.assigner.AssertNotCalled(t, "Assign") + + mock.AssertExpectationsForObjects(t, + s.metrics, + s.assigner, + s.notifier) + } - // creates an assigner engine for zero-weight verification node. - s := SetupTest(WithIdentity( - unittest.IdentityFixture( + t.Run("verifier-joining", func(t *testing.T) { + identity := unittest.IdentityFixture( unittest.WithRole(flow.RoleVerification), - unittest.WithWeight(0)))) - e := NewAssignerEngine(s) - - // creates a container block, with a single receipt, that contains - // no assigned chunk to verification node. - containerBlock, _ := createContainerBlock( - vertestutils.WithChunks( // all chunks assigned to some (random) identifiers, but not this verification node - vertestutils.WithAssignee(unittest.IdentifierFixture()), - vertestutils.WithAssignee(unittest.IdentifierFixture()), - vertestutils.WithAssignee(unittest.IdentifierFixture()))) - result := containerBlock.Payload.Results[0] - s.mockStateAtBlockID(result.BlockID) - - // once assigner engine is done processing the block, it should notify the processing notifier. - s.notifier.On("Notify", containerBlock.ID()).Return().Once() - - // sends block containing receipt to assigner engine - s.metrics.On("OnFinalizedBlockArrivedAtAssigner", containerBlock.Header.Height).Return().Once() - s.metrics.On("OnExecutionResultReceivedAtAssignerEngine").Return().Once() - e.ProcessFinalizedBlock(containerBlock) - - // when the node has zero-weight at reference block id, chunk assigner should not be called, - // and nothing should be passed to chunks queue, and - // job listener should not be notified. - s.chunksQueue.AssertNotCalled(t, "StoreChunkLocator") - s.newChunkListener.AssertNotCalled(t, "Check") - s.assigner.AssertNotCalled(t, "Assign") - - mock.AssertExpectationsForObjects(t, - s.metrics, - s.assigner, - s.notifier) + unittest.WithParticipationStatus(flow.EpochParticipationStatusJoining), + ) + assertIdentityAtReferenceBlock(identity) + }) + t.Run("verifier-leaving", func(t *testing.T) { + identity := unittest.IdentityFixture( + unittest.WithRole(flow.RoleVerification), + unittest.WithParticipationStatus(flow.EpochParticipationStatusLeaving), + ) + assertIdentityAtReferenceBlock(identity) + }) + t.Run("verifier-zero-weight", func(t *testing.T) { + identity := unittest.IdentityFixture( + unittest.WithRole(flow.RoleVerification), + unittest.WithInitialWeight(0), + ) + assertIdentityAtReferenceBlock(identity) + }) } // newBlockNoChunk evaluates passing a new finalized block to assigner engine that contains @@ -243,14 +277,19 @@ func newBlockNoChunk(t *testing.T) { containerBlock, assignment := createContainerBlock() result := containerBlock.Payload.Results[0] s.mockStateAtBlockID(result.BlockID) - chunksNum := s.mockChunkAssigner(flow.NewIncorporatedResult(containerBlock.ID(), result), assignment) + incorporatedResult, err := flow.NewIncorporatedResult(flow.UntrustedIncorporatedResult{ + IncorporatedBlockID: containerBlock.ID(), + Result: result, + }) + require.NoError(t, err) + chunksNum := s.mockChunkAssigner(incorporatedResult, assignment) require.Equal(t, chunksNum, 0) // no chunk should be assigned // once assigner engine is done processing the block, it should notify the processing notifier. s.notifier.On("Notify", containerBlock.ID()).Return().Once() // sends block containing receipt to assigner engine - s.metrics.On("OnFinalizedBlockArrivedAtAssigner", containerBlock.Header.Height).Return().Once() + s.metrics.On("OnFinalizedBlockArrivedAtAssigner", containerBlock.Height).Return().Once() s.metrics.On("OnExecutionResultReceivedAtAssignerEngine").Return().Once() e.ProcessFinalizedBlock(containerBlock) @@ -276,21 +315,26 @@ func newBlockNoAssignedChunk(t *testing.T) { // none of them is assigned to this verification node. containerBlock, assignment := createContainerBlock( vertestutils.WithChunks( - vertestutils.WithAssignee(unittest.IdentifierFixture()), // assigned to others - vertestutils.WithAssignee(unittest.IdentifierFixture()), // assigned to others - vertestutils.WithAssignee(unittest.IdentifierFixture()), // assigned to others - vertestutils.WithAssignee(unittest.IdentifierFixture()), // assigned to others - vertestutils.WithAssignee(unittest.IdentifierFixture()))) // assigned to others + vertestutils.WithAssignee(t, unittest.IdentifierFixture()), // assigned to others + vertestutils.WithAssignee(t, unittest.IdentifierFixture()), // assigned to others + vertestutils.WithAssignee(t, unittest.IdentifierFixture()), // assigned to others + vertestutils.WithAssignee(t, unittest.IdentifierFixture()), // assigned to others + vertestutils.WithAssignee(t, unittest.IdentifierFixture()))) // assigned to others result := containerBlock.Payload.Results[0] s.mockStateAtBlockID(result.BlockID) - chunksNum := s.mockChunkAssigner(flow.NewIncorporatedResult(containerBlock.ID(), result), assignment) + incorporatedResult, err := flow.NewIncorporatedResult(flow.UntrustedIncorporatedResult{ + IncorporatedBlockID: containerBlock.ID(), + Result: result, + }) + require.NoError(t, err) + chunksNum := s.mockChunkAssigner(incorporatedResult, assignment) require.Equal(t, chunksNum, 0) // no chunk should be assigned // once assigner engine is done processing the block, it should notify the processing notifier. s.notifier.On("Notify", containerBlock.ID()).Return().Once() // sends block containing receipt to assigner engine - s.metrics.On("OnFinalizedBlockArrivedAtAssigner", containerBlock.Header.Height).Return().Once() + s.metrics.On("OnFinalizedBlockArrivedAtAssigner", containerBlock.Height).Return().Once() s.metrics.On("OnExecutionResultReceivedAtAssignerEngine").Return().Once() e.ProcessFinalizedBlock(containerBlock) @@ -316,14 +360,19 @@ func newBlockMultipleAssignment(t *testing.T) { // only 3 of them is assigned to this verification node. containerBlock, assignment := createContainerBlock( vertestutils.WithChunks( - vertestutils.WithAssignee(unittest.IdentifierFixture()), // assigned to others - vertestutils.WithAssignee(s.myID()), // assigned to me - vertestutils.WithAssignee(s.myID()), // assigned to me - vertestutils.WithAssignee(unittest.IdentifierFixture()), // assigned to others - vertestutils.WithAssignee(s.myID()))) // assigned to me + vertestutils.WithAssignee(t, unittest.IdentifierFixture()), // assigned to others + vertestutils.WithAssignee(t, s.myID()), // assigned to me + vertestutils.WithAssignee(t, s.myID()), // assigned to me + vertestutils.WithAssignee(t, unittest.IdentifierFixture()), // assigned to others + vertestutils.WithAssignee(t, s.myID()))) // assigned to me result := containerBlock.Payload.Results[0] s.mockStateAtBlockID(result.BlockID) - chunksNum := s.mockChunkAssigner(flow.NewIncorporatedResult(containerBlock.ID(), result), assignment) + incorporatedResult, err := flow.NewIncorporatedResult(flow.UntrustedIncorporatedResult{ + IncorporatedBlockID: containerBlock.ID(), + Result: result, + }) + require.NoError(t, err) + chunksNum := s.mockChunkAssigner(incorporatedResult, assignment) require.Equal(t, chunksNum, 3) // 3 chunks should be assigned // mocks processing assigned chunks @@ -337,7 +386,7 @@ func newBlockMultipleAssignment(t *testing.T) { s.notifier.On("Notify", containerBlock.ID()).Return().Once() // sends containerBlock containing receipt to assigner engine - s.metrics.On("OnFinalizedBlockArrivedAtAssigner", containerBlock.Header.Height).Return().Once() + s.metrics.On("OnFinalizedBlockArrivedAtAssigner", containerBlock.Height).Return().Once() s.metrics.On("OnExecutionResultReceivedAtAssignerEngine").Return().Once() e.ProcessFinalizedBlock(containerBlock) @@ -359,10 +408,15 @@ func chunkQueueUnhappyPathDuplicate(t *testing.T) { // creates a container block, with a single receipt, that contains a single chunk assigned // to verification node. containerBlock, assignment := createContainerBlock( - vertestutils.WithChunks(vertestutils.WithAssignee(s.myID()))) + vertestutils.WithChunks(vertestutils.WithAssignee(t, s.myID()))) result := containerBlock.Payload.Results[0] s.mockStateAtBlockID(result.BlockID) - chunksNum := s.mockChunkAssigner(flow.NewIncorporatedResult(containerBlock.ID(), result), assignment) + incorporatedResult, err := flow.NewIncorporatedResult(flow.UntrustedIncorporatedResult{ + IncorporatedBlockID: containerBlock.ID(), + Result: result, + }) + require.NoError(t, err) + chunksNum := s.mockChunkAssigner(incorporatedResult, assignment) require.Equal(t, chunksNum, 1) // mocks processing assigned chunks @@ -373,7 +427,7 @@ func chunkQueueUnhappyPathDuplicate(t *testing.T) { s.notifier.On("Notify", containerBlock.ID()).Return().Once() // sends block containing receipt to assigner engine - s.metrics.On("OnFinalizedBlockArrivedAtAssigner", containerBlock.Header.Height).Return().Once() + s.metrics.On("OnFinalizedBlockArrivedAtAssigner", containerBlock.Height).Return().Once() s.metrics.On("OnExecutionResultReceivedAtAssignerEngine").Return().Once() e.ProcessFinalizedBlock(containerBlock) diff --git a/engine/verification/fetcher/chunkconsumer/consumer.go b/engine/verification/fetcher/chunkconsumer/consumer.go index 97cccdb4ab2..66a71aa3dca 100644 --- a/engine/verification/fetcher/chunkconsumer/consumer.go +++ b/engine/verification/fetcher/chunkconsumer/consumer.go @@ -29,18 +29,21 @@ type ChunkConsumer struct { func NewChunkConsumer( log zerolog.Logger, metrics module.VerificationMetrics, - processedIndex storage.ConsumerProgress, // to persist the processed index + processedIndexInitializer storage.ConsumerProgressInitializer, // to persist the processed index chunksQueue storage.ChunksQueue, // to read jobs (chunks) from chunkProcessor fetcher.AssignedChunkProcessor, // to process jobs (chunks) maxProcessing uint64, // max number of jobs to be processed in parallel -) *ChunkConsumer { +) (*ChunkConsumer, error) { worker := NewWorker(chunkProcessor) chunkProcessor.WithChunkConsumerNotifier(worker) jobs := &ChunkJobs{locators: chunksQueue} lg := log.With().Str("module", "chunk_consumer").Logger() - consumer := jobqueue.NewConsumer(lg, jobs, processedIndex, worker, maxProcessing, 0) + consumer, err := jobqueue.NewConsumer(lg, jobs, processedIndexInitializer, worker, maxProcessing, 0, DefaultJobIndex) + if err != nil { + return nil, err + } chunkConsumer := &ChunkConsumer{ consumer: consumer, @@ -50,7 +53,7 @@ func NewChunkConsumer( worker.consumer = chunkConsumer - return chunkConsumer + return chunkConsumer, nil } func (c *ChunkConsumer) NotifyJobIsDone(jobID module.JobID) { @@ -68,7 +71,7 @@ func (c ChunkConsumer) Check() { } func (c *ChunkConsumer) Ready() <-chan struct{} { - err := c.consumer.Start(DefaultJobIndex) + err := c.consumer.Start() if err != nil { panic(fmt.Errorf("could not start the chunk consumer for match engine: %w", err)) } diff --git a/engine/verification/fetcher/chunkconsumer/consumer_test.go b/engine/verification/fetcher/chunkconsumer/consumer_test.go index 6ca73d10aba..e314a4627ae 100644 --- a/engine/verification/fetcher/chunkconsumer/consumer_test.go +++ b/engine/verification/fetcher/chunkconsumer/consumer_test.go @@ -6,7 +6,7 @@ import ( "sync" "testing" - "github.com/dgraph-io/badger/v2" + "github.com/cockroachdb/pebble/v2" "github.com/stretchr/testify/require" "go.uber.org/atomic" @@ -14,7 +14,9 @@ import ( "github.com/onflow/flow-go/model/chunks" "github.com/onflow/flow-go/module" "github.com/onflow/flow-go/module/metrics" - storage "github.com/onflow/flow-go/storage/badger" + storage "github.com/onflow/flow-go/storage" + "github.com/onflow/flow-go/storage/operation/pebbleimpl" + "github.com/onflow/flow-go/storage/store" "github.com/onflow/flow-go/utils/unittest" ) @@ -40,7 +42,7 @@ func TestProduceConsume(t *testing.T) { defer lock.Unlock() called = append(called, locator) } - WithConsumer(t, neverFinish, func(consumer *chunkconsumer.ChunkConsumer, chunksQueue *storage.ChunksQueue) { + WithConsumer(t, neverFinish, func(consumer *chunkconsumer.ChunkConsumer, chunksQueue storage.ChunksQueue) { <-consumer.Ready() locators := unittest.ChunkLocatorListFixture(10) @@ -76,7 +78,7 @@ func TestProduceConsume(t *testing.T) { finishAll.Done() }() } - WithConsumer(t, alwaysFinish, func(consumer *chunkconsumer.ChunkConsumer, chunksQueue *storage.ChunksQueue) { + WithConsumer(t, alwaysFinish, func(consumer *chunkconsumer.ChunkConsumer, chunksQueue storage.ChunksQueue) { <-consumer.Ready() locators := unittest.ChunkLocatorListFixture(10) @@ -111,7 +113,7 @@ func TestProduceConsume(t *testing.T) { finishAll.Done() }() } - WithConsumer(t, alwaysFinish, func(consumer *chunkconsumer.ChunkConsumer, chunksQueue *storage.ChunksQueue) { + WithConsumer(t, alwaysFinish, func(consumer *chunkconsumer.ChunkConsumer, chunksQueue storage.ChunksQueue) { <-consumer.Ready() total := atomic.NewUint32(0) @@ -139,13 +141,15 @@ func TestProduceConsume(t *testing.T) { func WithConsumer( t *testing.T, process func(module.ProcessingNotifier, *chunks.Locator), - withConsumer func(*chunkconsumer.ChunkConsumer, *storage.ChunksQueue), + withConsumer func(*chunkconsumer.ChunkConsumer, storage.ChunksQueue), ) { - unittest.RunWithBadgerDB(t, func(db *badger.DB) { + unittest.RunWithPebbleDB(t, func(pebbleDB *pebble.DB) { maxProcessing := uint64(3) + db := pebbleimpl.ToDB(pebbleDB) - processedIndex := storage.NewConsumerProgress(db, module.ConsumeProgressVerificationChunkIndex) - chunksQueue := storage.NewChunkQueue(db) + collector := &metrics.NoopCollector{} + processedIndex := store.NewConsumerProgress(db, module.ConsumeProgressVerificationChunkIndex) + chunksQueue := store.NewChunkQueue(collector, db) ok, err := chunksQueue.Init(chunkconsumer.DefaultJobIndex) require.NoError(t, err) require.True(t, ok) @@ -154,8 +158,7 @@ func WithConsumer( process: process, } - collector := &metrics.NoopCollector{} - consumer := chunkconsumer.NewChunkConsumer( + consumer, err := chunkconsumer.NewChunkConsumer( unittest.Logger(), collector, processedIndex, @@ -163,6 +166,7 @@ func WithConsumer( engine, maxProcessing, ) + require.NoError(t, err) withConsumer(consumer, chunksQueue) }) diff --git a/engine/verification/fetcher/engine.go b/engine/verification/fetcher/engine.go index 23d02c02474..957373f7f2e 100644 --- a/engine/verification/fetcher/engine.go +++ b/engine/verification/fetcher/engine.go @@ -12,6 +12,7 @@ import ( "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/model/flow/filter" "github.com/onflow/flow-go/model/verification" + "github.com/onflow/flow-go/model/verification/convert" "github.com/onflow/flow-go/module" "github.com/onflow/flow-go/module/mempool" "github.com/onflow/flow-go/module/trace" @@ -34,7 +35,6 @@ import ( // to the verifier engine. type Engine struct { // common - unit *engine.Unit state protocol.State // used to verify the origin ID of chunk data response, and sealing status. // monitoring @@ -72,7 +72,6 @@ func New( stopAtHeight uint64, ) *Engine { e := &Engine{ - unit: engine.NewUnit(), metrics: metrics, tracer: tracer, log: log.With().Str("engine", "fetcher").Logger(), @@ -104,16 +103,12 @@ func (e *Engine) Ready() <-chan struct{} { if e.chunkConsumerNotifier == nil { e.log.Fatal().Msg("missing chunk consumer notifier callback in verification fetcher engine") } - return e.unit.Ready(func() { - <-e.requester.Ready() - }) + return e.requester.Ready() } // Done terminates the engine and returns a channel that is closed when the termination is done func (e *Engine) Done() <-chan struct{} { - return e.unit.Done(func() { - <-e.requester.Done() - }) + return e.requester.Done() } // ProcessAssignedChunk is the entry point of fetcher engine. @@ -169,7 +164,8 @@ func (e *Engine) ProcessAssignedChunk(locator *chunks.Locator) { // processAssignedChunkWithTracing encapsulates the logic of processing assigned chunk with tracing enabled. func (e *Engine) processAssignedChunkWithTracing(chunk *flow.Chunk, result *flow.ExecutionResult, chunkLocatorID flow.Identifier) (bool, uint64, error) { - span, _ := e.tracer.StartBlockSpan(e.unit.Ctx(), result.BlockID, trace.VERProcessAssignedChunk) + // We don't have any existing information and don't need cancellation, so use a background (empty) context + span, _ := e.tracer.StartBlockSpan(context.Background(), result.BlockID, trace.VERProcessAssignedChunk) span.SetAttributes(attribute.Int("collection_index", int(chunk.CollectionIndex))) defer span.End() @@ -205,7 +201,7 @@ func (e *Engine) processAssignedChunk(chunk *flow.Chunk, result *flow.ExecutionR ExecutionResult: result, BlockHeight: blockHeight, } - added := e.pendingChunks.Add(status) + added := e.pendingChunks.Add(chunkLocatorID, status) if !added { return false, blockHeight, nil } @@ -247,7 +243,8 @@ func (e *Engine) HandleChunkDataPack(originID flow.Identifier, response *verific e.metrics.OnChunkDataPackArrivedAtFetcher() // make sure we still need it - status, exists := e.pendingChunks.Get(response.Index, response.ResultID) + locatorID := response.Locator.ID() + status, exists := e.pendingChunks.Get(locatorID) if !exists { lg.Debug().Msg("could not fetch pending status from mempool, dropping chunk data") return @@ -259,7 +256,7 @@ func (e *Engine) HandleChunkDataPack(originID flow.Identifier, response *verific Uint64("block_height", status.BlockHeight). Hex("result_id", logging.ID(resultID)). Uint64("chunk_index", status.ChunkIndex). - Bool("system_chunk", IsSystemChunk(status.ChunkIndex, status.ExecutionResult)). + Bool("system_chunk", convert.IsSystemChunk(status.ChunkIndex, status.ExecutionResult)). Logger() span, ctx := e.tracer.StartBlockSpan(context.Background(), status.ExecutionResult.BlockID, trace.VERFetcherHandleChunkDataPack) @@ -281,7 +278,7 @@ func (e *Engine) HandleChunkDataPack(originID flow.Identifier, response *verific e.metrics.OnVerifiableChunkSentToVerifier() // we need to report that the job has been finished eventually - e.chunkConsumerNotifier.Notify(status.ChunkLocatorID()) + e.chunkConsumerNotifier.Notify(locatorID) lg.Info().Msg("verifiable chunk pushed to verifier engine") } @@ -297,18 +294,20 @@ func (e *Engine) handleChunkDataPackWithTracing( ctx context.Context, originID flow.Identifier, status *verification.ChunkStatus, - chunkDataPack *flow.ChunkDataPack) (bool, error) { - + chunkDataPack *flow.ChunkDataPack, +) (bool, error) { // make sure the chunk data pack is valid err := e.validateChunkDataPackWithTracing(ctx, status.ChunkIndex, originID, chunkDataPack, status.ExecutionResult) if err != nil { - return false, NewChunkDataPackValidationError(originID, + return false, NewChunkDataPackValidationError( + originID, status.ExecutionResult.ID(), status.ChunkIndex, chunkDataPack.ID(), chunkDataPack.ChunkID, chunkDataPack.Collection.ID(), - err) + err, + ) } processed, err := e.handleValidatedChunkDataPack(ctx, status, chunkDataPack) @@ -322,11 +321,22 @@ func (e *Engine) handleChunkDataPackWithTracing( // handleValidatedChunkDataPack receives a validated chunk data pack, removes its status from the memory, and pushes a verifiable chunk for it to // verifier engine. // Boolean return value determines whether verifiable chunk pushed to verifier or not. -func (e *Engine) handleValidatedChunkDataPack(ctx context.Context, +func (e *Engine) handleValidatedChunkDataPack( + ctx context.Context, status *verification.ChunkStatus, - chunkDataPack *flow.ChunkDataPack) (bool, error) { + chunkDataPack *flow.ChunkDataPack, +) (bool, error) { + locator, err := chunks.NewLocator( + chunks.UntrustedLocator{ + ResultID: status.ExecutionResult.ID(), + Index: status.ChunkIndex, + }, + ) + if err != nil { + return false, fmt.Errorf("could not construct locator: %w", err) + } - removed := e.pendingChunks.Remove(status.ChunkIndex, status.ExecutionResult.ID()) + removed := e.pendingChunks.Remove(locator.ID()) if !removed { // we deduplicate the chunk data responses at this point, reaching here means a // duplicate chunk data response is under process concurrently, so we give up @@ -336,7 +346,7 @@ func (e *Engine) handleValidatedChunkDataPack(ctx context.Context, // pushes chunk data pack to verifier, and waits for it to be verified. chunk := status.ExecutionResult.Chunks[status.ChunkIndex] - err := e.pushToVerifierWithTracing(ctx, chunk, status.ExecutionResult, chunkDataPack) + err = e.pushToVerifierWithTracing(ctx, chunk, status.ExecutionResult, chunkDataPack) if err != nil { return false, fmt.Errorf("could not push the chunk to verifier engine") } @@ -413,7 +423,7 @@ func (e Engine) validateCollectionID( result *flow.ExecutionResult, chunk *flow.Chunk) error { - if IsSystemChunk(chunk.Index, result) { + if convert.IsSystemChunk(chunk.Index, result) { return e.validateSystemChunkCollection(chunkDataPack) } @@ -480,20 +490,29 @@ func (e *Engine) NotifyChunkDataPackSealed(chunkIndex uint64, resultID flow.Iden Logger() // we need to report that the job has been finished eventually - status, exists := e.pendingChunks.Get(chunkIndex, resultID) + locator, err := chunks.NewLocator( + chunks.UntrustedLocator{ + ResultID: resultID, + Index: chunkIndex, + }, + ) + // TODO: update this engine to use SignallerContext and throw an exception here + if err != nil { + e.log.Fatal().Err(err).Msg("could not construct locator") + } + status, exists := e.pendingChunks.Get(locator.ID()) if !exists { lg.Debug(). Msg("could not fetch pending status for sealed chunk from mempool, dropping chunk data") return } - chunkLocatorID := status.ChunkLocatorID() lg = lg.With(). Uint64("block_height", status.BlockHeight). Hex("result_id", logging.ID(status.ExecutionResult.ID())).Logger() - removed := e.pendingChunks.Remove(chunkIndex, resultID) + removed := e.pendingChunks.Remove(locator.ID()) - e.chunkConsumerNotifier.Notify(chunkLocatorID) + e.chunkConsumerNotifier.Notify(locator.ID()) lg.Info(). Bool("removed", removed). Msg("discards fetching chunk of an already sealed block and notified consumer") @@ -526,8 +545,8 @@ func (e *Engine) pushToVerifier(chunk *flow.Chunk, if err != nil { return fmt.Errorf("could not get block: %w", err) } - - vchunk, err := e.makeVerifiableChunkData(chunk, header, result, chunkDataPack) + snapshot := e.state.AtBlockID(header.ID()) + vchunk, err := e.makeVerifiableChunkData(chunk, header, snapshot, result, chunkDataPack) if err != nil { return fmt.Errorf("could not verify chunk: %w", err) } @@ -545,32 +564,18 @@ func (e *Engine) pushToVerifier(chunk *flow.Chunk, // chunk data to verify it. func (e *Engine) makeVerifiableChunkData(chunk *flow.Chunk, header *flow.Header, + snapshot protocol.Snapshot, result *flow.ExecutionResult, chunkDataPack *flow.ChunkDataPack, ) (*verification.VerifiableChunkData, error) { - // system chunk is the last chunk - isSystemChunk := IsSystemChunk(chunk.Index, result) - - endState, err := EndStateCommitment(result, chunk.Index, isSystemChunk) - if err != nil { - return nil, fmt.Errorf("could not compute end state of chunk: %w", err) - } - - transactionOffset, err := TransactionOffsetForChunk(result.Chunks, chunk.Index) - if err != nil { - return nil, fmt.Errorf("cannot compute transaction offset for chunk: %w", err) - } - - return &verification.VerifiableChunkData{ - IsSystemChunk: isSystemChunk, - Chunk: chunk, - Header: header, - Result: result, - ChunkDataPack: chunkDataPack, - EndState: endState, - TransactionOffset: transactionOffset, - }, nil + return convert.FromChunkDataPack( + chunk, + chunkDataPack, + header, + snapshot, + result, + ) } // requestChunkDataPack creates and dispatches a chunk data pack request to the requester engine. @@ -585,23 +590,35 @@ func (e *Engine) requestChunkDataPack(chunkIndex uint64, chunkID flow.Identifier return fmt.Errorf("could not get header for block: %x", blockID) } - allExecutors, err := e.state.AtBlockID(blockID).Identities(filter.HasRole(flow.RoleExecution)) + allExecutors, err := e.state.AtBlockID(blockID).Identities(filter.HasRole[flow.Identity](flow.RoleExecution)) if err != nil { return fmt.Errorf("could not fetch execution node ids at block %x: %w", blockID, err) } - request := &verification.ChunkDataPackRequest{ - Locator: chunks.Locator{ + locator, err := chunks.NewLocator( + chunks.UntrustedLocator{ ResultID: resultID, Index: chunkIndex, }, - ChunkDataPackRequestInfo: verification.ChunkDataPackRequestInfo{ - ChunkID: chunkID, - Height: header.Height, - Agrees: agrees, - Disagrees: disagrees, - Targets: allExecutors, + ) + if err != nil { + return fmt.Errorf("could not construct locator: %w", err) + } + + request, err := verification.NewChunkDataPackRequest( + verification.UntrustedChunkDataPackRequest{ + Locator: *locator, + ChunkDataPackRequestInfo: verification.ChunkDataPackRequestInfo{ + ChunkID: chunkID, + Height: header.Height, + Agrees: agrees, + Disagrees: disagrees, + Targets: allExecutors, + }, }, + ) + if err != nil { + return fmt.Errorf("could not construct chunk data pack request: %w", err) } e.requester.Request(request) @@ -659,42 +676,3 @@ func executorsOf(receipts []*flow.ExecutionReceipt, resultID flow.Identifier) (f return agrees, disagrees } - -// EndStateCommitment computes the end state of the given chunk. -func EndStateCommitment(result *flow.ExecutionResult, chunkIndex uint64, systemChunk bool) (flow.StateCommitment, error) { - var endState flow.StateCommitment - if systemChunk { - var err error - // last chunk in a result is the system chunk and takes final state commitment - endState, err = result.FinalStateCommitment() - if err != nil { - return flow.DummyStateCommitment, fmt.Errorf("can not read final state commitment, likely a bug:%w", err) - } - } else { - // any chunk except last takes the subsequent chunk's start state - endState = result.Chunks[chunkIndex+1].StartState - } - - return endState, nil -} - -// TransactionOffsetForChunk calculates transaction offset for a given chunk which is the index of the first -// transaction of this chunk within the whole block -func TransactionOffsetForChunk(chunks flow.ChunkList, chunkIndex uint64) (uint32, error) { - if int(chunkIndex) > len(chunks)-1 { - return 0, fmt.Errorf("chunk list out of bounds, len %d asked for chunk %d", len(chunks), chunkIndex) - } - var offset uint32 = 0 - for i := 0; i < int(chunkIndex); i++ { - offset += uint32(chunks[i].NumberOfTransactions) - } - return offset, nil -} - -// IsSystemChunk returns true if `chunkIndex` points to a system chunk in `result`. -// Otherwise, it returns false. -// In the current version, a chunk is a system chunk if it is the last chunk of the -// execution result. -func IsSystemChunk(chunkIndex uint64, result *flow.ExecutionResult) bool { - return chunkIndex == uint64(len(result.Chunks)-1) -} diff --git a/engine/verification/fetcher/engine_test.go b/engine/verification/fetcher/engine_test.go index 80cd43e905c..3af15e04fc0 100644 --- a/engine/verification/fetcher/engine_test.go +++ b/engine/verification/fetcher/engine_test.go @@ -18,10 +18,11 @@ import ( "github.com/onflow/flow-go/model/chunks" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/model/verification" + "github.com/onflow/flow-go/model/verification/convert" mempool "github.com/onflow/flow-go/module/mempool/mock" module "github.com/onflow/flow-go/module/mock" "github.com/onflow/flow-go/module/trace" - "github.com/onflow/flow-go/network/mocknetwork" + mocknetwork "github.com/onflow/flow-go/network/mock" flowprotocol "github.com/onflow/flow-go/state/protocol" protocol "github.com/onflow/flow-go/state/protocol/mock" storage "github.com/onflow/flow-go/storage/mock" @@ -33,15 +34,15 @@ type FetcherEngineTestSuite struct { log zerolog.Logger metrics *module.VerificationMetrics tracer *trace.NoopTracer - verifier *mocknetwork.Engine // the verifier engine - state *protocol.State // used to verify the request origin - pendingChunks *mempool.ChunkStatuses // used to store all the pending chunks that assigned to this node - blocks *storage.Blocks // used to for verifying collection ID. - headers *storage.Headers // used for building verifiable chunk data. - chunkConsumerNotifier *module.ProcessingNotifier // to report a chunk has been processed - results *storage.ExecutionResults // to retrieve execution result of an assigned chunk - receipts *storage.ExecutionReceipts // used to find executor of the chunk - requester *mockfetcher.ChunkDataPackRequester // used to request chunk data packs from network + verifier *mocknetwork.Engine // the verifier engine + state *protocol.State // used to verify the request origin + pendingChunks *mempool.Mempool[flow.Identifier, *verification.ChunkStatus] // used to store all the pending chunks that assigned to this node + blocks *storage.Blocks // used to for verifying collection ID. + headers *storage.Headers // used for building verifiable chunk data. + chunkConsumerNotifier *module.ProcessingNotifier // to report a chunk has been processed + results *storage.ExecutionResults // to retrieve execution result of an assigned chunk + receipts *storage.ExecutionReceipts // used to find executor of the chunk + requester *mockfetcher.ChunkDataPackRequester // used to request chunk data packs from network } // setupTest initiates a test suite prior to each test. @@ -52,7 +53,7 @@ func setupTest() *FetcherEngineTestSuite { tracer: trace.NewNoopTracer(), verifier: &mocknetwork.Engine{}, state: &protocol.State{}, - pendingChunks: &mempool.ChunkStatuses{}, + pendingChunks: &mempool.Mempool[flow.Identifier, *verification.ChunkStatus]{}, headers: &storage.Headers{}, blocks: &storage.Blocks{}, chunkConsumerNotifier: &module.ProcessingNotifier{}, @@ -138,7 +139,7 @@ func testProcessAssignChunkHappyPath(t *testing.T, chunkNum int, assignedNum int s.metrics.On("OnAssignedChunkReceivedAtFetcher").Return().Times(len(locators)) // the chunks belong to an unsealed block. - mockBlockSealingStatus(s.state, s.headers, block.Header, false) + mockBlockSealingStatus(s.state, s.headers, block.ToHeader(), false) // mocks resources on fetcher engine side. mockResultsByIDs(s.results, []*flow.ExecutionResult{result}) @@ -199,14 +200,14 @@ func TestChunkResponse_RemovingStatusFails(t *testing.T) { // creates a result with specified 2 chunks and a single assigned chunk to this fetcher engine. block, result, statuses, _, collMap := completeChunkStatusListFixture(t, 2, 1) _, _, agrees, _ := mockReceiptsBlockID(t, block.ID(), s.receipts, result, 2, 2) - mockBlockSealingStatus(s.state, s.headers, block.Header, false) + mockBlockSealingStatus(s.state, s.headers, block.ToHeader(), false) mockResultsByIDs(s.results, []*flow.ExecutionResult{result}) mockBlocksStorage(s.blocks, s.headers, block) mockPendingChunksGet(s.pendingChunks, statuses) mockStateAtBlockIDForIdentities(s.state, block.ID(), agrees) - chunkLocatorID := statuses[0].ChunkLocatorID() + chunkLocatorID := unittest.ChunkLocatorFixture(statuses[0].ExecutionResult.ID(), statuses[0].ChunkIndex).ID() // trying to remove the pending status fails. mockPendingChunksRemove(t, s.pendingChunks, statuses, false) @@ -236,7 +237,7 @@ func TestProcessAssignChunkSealedAfterRequest(t *testing.T) { // also the chunk belongs to an unsealed block. block, result, statuses, locators, collMap := completeChunkStatusListFixture(t, 2, 1) _, _, agrees, disagrees := mockReceiptsBlockID(t, block.ID(), s.receipts, result, 2, 2) - mockBlockSealingStatus(s.state, s.headers, block.Header, false) + mockBlockSealingStatus(s.state, s.headers, block.ToHeader(), false) s.metrics.On("OnAssignedChunkReceivedAtFetcher").Return().Times(len(locators)) // mocks resources on fetcher engine side. @@ -338,7 +339,27 @@ func TestChunkResponse_InvalidChunkDataPack(t *testing.T) { // we don't alter chunk data pack content }, mockStateFunc: func(identity flow.Identity, state *protocol.State, blockID flow.Identifier) { - identity.Weight = 0 + identity.EpochParticipationStatus = flow.EpochParticipationStatusJoining + mockStateAtBlockIDForIdentities(state, blockID, flow.IdentityList{&identity}) + }, + msg: "participation-status-joining-origin-id", + }, + { + alterChunkDataResponse: func(cdp *flow.ChunkDataPack) { + // we don't alter chunk data pack content + }, + mockStateFunc: func(identity flow.Identity, state *protocol.State, blockID flow.Identifier) { + identity.EpochParticipationStatus = flow.EpochParticipationStatusLeaving + mockStateAtBlockIDForIdentities(state, blockID, flow.IdentityList{&identity}) + }, + msg: "participation-status-leaving-origin-id", + }, + { + alterChunkDataResponse: func(cdp *flow.ChunkDataPack) { + // we don't alter chunk data pack content + }, + mockStateFunc: func(identity flow.Identity, state *protocol.State, blockID flow.Identifier) { + identity.InitialWeight = 0 mockStateAtBlockIDForIdentities(state, blockID, flow.IdentityList{&identity}) }, msg: "zero-weight-origin-id", @@ -384,7 +405,7 @@ func testInvalidChunkDataResponse(t *testing.T, mockPendingChunksGet(s.pendingChunks, statuses) mockBlocksStorage(s.blocks, s.headers, block) - chunkLocatorID := statuses[0].ChunkLocatorID() + chunkLocatorID := unittest.ChunkLocatorFixture(statuses[0].ExecutionResult.ID(), statuses[0].ChunkIndex).ID() responses, _ := verifiableChunksFixture(t, statuses, block, result, collMap) // alters chunk data pack so that it become invalid. @@ -421,10 +442,12 @@ func TestChunkResponse_MissingStatus(t *testing.T) { status := statuses[0] responses, _ := verifiableChunksFixture(t, statuses, block, result, collMap) - chunkLocatorID := statuses[0].ChunkLocatorID() + chunkLocatorID := unittest.ChunkLocatorFixture(statuses[0].ExecutionResult.ID(), statuses[0].ChunkIndex).ID() + var zero *verification.ChunkStatus // mocks there is no pending status for this chunk at fetcher engine. - s.pendingChunks.On("Get", status.ChunkIndex, result.ID()).Return(nil, false) + locator := unittest.ChunkLocatorFixture(result.ID(), status.ChunkIndex) + s.pendingChunks.On("Get", locator.ID()).Return(zero, false) s.metrics.On("OnChunkDataPackArrivedAtFetcher").Return().Times(len(responses)) e.HandleChunkDataPack(unittest.IdentifierFixture(), responses[chunkLocatorID]) @@ -444,7 +467,7 @@ func TestChunkResponse_MissingStatus(t *testing.T) { } // TestSkipChunkOfSealedBlock evaluates that if fetcher engine receives a chunk belonging to a sealed block, -// it drops it without processing it any further and and notifies consumer +// it drops it without processing it any further and notifies consumer // that it is done with processing that chunk. func TestSkipChunkOfSealedBlock(t *testing.T) { s := setupTest() @@ -453,11 +476,11 @@ func TestSkipChunkOfSealedBlock(t *testing.T) { // creates a single chunk locator, and mocks its corresponding block sealed. block := unittest.BlockFixture() result := unittest.ExecutionResultFixture(unittest.WithExecutionResultBlockID(block.ID())) - statuses := unittest.ChunkStatusListFixture(t, block.Header.Height, result, 1) + statuses := unittest.ChunkStatusListFixture(t, block.Height, result, 1) locators := unittest.ChunkStatusListToChunkLocatorFixture(statuses) s.metrics.On("OnAssignedChunkReceivedAtFetcher").Return().Once() - mockBlockSealingStatus(s.state, s.headers, block.Header, true) + mockBlockSealingStatus(s.state, s.headers, block.ToHeader(), true) mockResultsByIDs(s.results, []*flow.ExecutionResult{result}) // expects processing notifier being invoked upon sealed chunk detected, @@ -501,37 +524,30 @@ func TestStopAtHeight(t *testing.T) { mockBlockSealingStatus(s.state, s.headers, headerB, false) mockResultsByIDs(s.results, []*flow.ExecutionResult{resultA, resultB}) - locatorA := chunks.Locator{ - ResultID: resultA.ID(), - Index: 0, - } - locatorB := chunks.Locator{ - ResultID: resultB.ID(), - Index: 0, - } + locatorA := unittest.ChunkLocatorFixture(resultA.ID(), 0) + locatorB := unittest.ChunkLocatorFixture(resultB.ID(), 0) // expects processing notifier being invoked upon sealed chunk detected, // which means the termination of processing a sealed chunk on fetcher engine // side. mockChunkConsumerNotifier(t, s.chunkConsumerNotifier, flow.GetIDs([]flow.Entity{locatorA, locatorB})) - s.pendingChunks.On("Add", mock.Anything).Run(func(args mock.Arguments) { - spew.Dump(args[0].(*verification.ChunkStatus).BlockHeight) + s.pendingChunks.On("Add", mock.Anything, mock.Anything).Run(func(args mock.Arguments) { + spew.Dump(args[1].(*verification.ChunkStatus).BlockHeight) }).Return(false) - e.ProcessAssignedChunk(&locatorA) - e.ProcessAssignedChunk(&locatorB) + e.ProcessAssignedChunk(locatorA) + e.ProcessAssignedChunk(locatorB) mock.AssertExpectationsForObjects(t, s.results, s.metrics) // we should not request a duplicate chunk status. s.requester.AssertNotCalled(t, "Request") - s.pendingChunks.AssertNotCalled(t, "Add", mock.MatchedBy(func(status *verification.ChunkStatus) bool { + s.pendingChunks.AssertNotCalled(t, "Add", mock.Anything, mock.MatchedBy(func(status *verification.ChunkStatus) bool { return status.BlockHeight == headerB.Height })) - - s.pendingChunks.AssertCalled(t, "Add", mock.MatchedBy(func(status *verification.ChunkStatus) bool { + s.pendingChunks.AssertCalled(t, "Add", mock.Anything, mock.MatchedBy(func(status *verification.ChunkStatus) bool { return status.BlockHeight == headerA.Height })) } @@ -613,23 +629,23 @@ func mockStateAtBlockIDForMissingIdentities(state *protocol.State, blockID flow. // mockPendingChunksAdd mocks the add method of pending chunks for expecting only the specified list of chunk statuses. // Each chunk status should be added only once. // It should return the specified added boolean variable as the result of mocking. -func mockPendingChunksAdd(t *testing.T, pendingChunks *mempool.ChunkStatuses, list []*verification.ChunkStatus, added bool) { +func mockPendingChunksAdd(t *testing.T, pendingChunks *mempool.Mempool[flow.Identifier, *verification.ChunkStatus], list []*verification.ChunkStatus, added bool) { mu := &sync.Mutex{} - pendingChunks.On("Add", mock.Anything). + pendingChunks.On("Add", mock.Anything, mock.Anything). Run(func(args mock.Arguments) { // to provide mutual exclusion under concurrent invocations. mu.Lock() defer mu.Unlock() - actual, ok := args[0].(*verification.ChunkStatus) + actual, ok := args[1].(*verification.ChunkStatus) require.True(t, ok) // there should be a matching chunk status with the received one. - actualLocatorID := actual.ChunkLocatorID() + actualLocatorID := unittest.ChunkLocatorFixture(actual.ExecutionResult.ID(), actual.ChunkIndex).ID() for _, expected := range list { - expectedLocatorID := expected.ChunkLocatorID() + expectedLocatorID := unittest.ChunkLocatorFixture(expected.ExecutionResult.ID(), expected.ChunkIndex).ID() if expectedLocatorID == actualLocatorID { require.Equal(t, expected.ExecutionResult, actual.ExecutionResult) return @@ -643,24 +659,22 @@ func mockPendingChunksAdd(t *testing.T, pendingChunks *mempool.ChunkStatuses, li // mockPendingChunksRemove mocks the remove method of pending chunks for expecting only the specified list of chunk statuses. // Each chunk status should be removed only once. // It should return the specified added boolean variable as the result of mocking. -func mockPendingChunksRemove(t *testing.T, pendingChunks *mempool.ChunkStatuses, list []*verification.ChunkStatus, removed bool) { +func mockPendingChunksRemove(t *testing.T, pendingChunks *mempool.Mempool[flow.Identifier, *verification.ChunkStatus], list []*verification.ChunkStatus, removed bool) { mu := &sync.Mutex{} - pendingChunks.On("Remove", mock.Anything, mock.Anything). + pendingChunks.On("Remove", mock.Anything). Run(func(args mock.Arguments) { // to provide mutual exclusion under concurrent invocations. mu.Lock() defer mu.Unlock() - actualIndex, ok := args[0].(uint64) - require.True(t, ok) - - actualResultID, ok := args[1].(flow.Identifier) + actualChunkLocatorID, ok := args[0].(flow.Identifier) require.True(t, ok) // there should be a matching chunk status with the received one. for _, expected := range list { - if expected.ChunkIndex == actualIndex && expected.ExecutionResult.ID() == actualResultID { + expectedLocatorID := unittest.ChunkLocatorFixture(expected.ExecutionResult.ID(), expected.ChunkIndex).ID() + if actualChunkLocatorID == expectedLocatorID { return } } @@ -670,25 +684,27 @@ func mockPendingChunksRemove(t *testing.T, pendingChunks *mempool.ChunkStatuses, } // mockPendingChunksGet mocks the Get method of pending chunks for expecting only the specified list of chunk statuses. -func mockPendingChunksGet(pendingChunks *mempool.ChunkStatuses, list []*verification.ChunkStatus) { +func mockPendingChunksGet(pendingChunks *mempool.Mempool[flow.Identifier, *verification.ChunkStatus], list []*verification.ChunkStatus) { mu := &sync.Mutex{} - pendingChunks.On("Get", mock.Anything, mock.Anything).Return( - func(chunkIndex uint64, resultID flow.Identifier) *verification.ChunkStatus { + pendingChunks.On("Get", mock.Anything).Return( + func(chunkLocatorID flow.Identifier) *verification.ChunkStatus { // to provide mutual exclusion under concurrent invocations. mu.Lock() defer mu.Unlock() for _, expected := range list { - if expected.ChunkIndex == chunkIndex && expected.ExecutionResult.ID() == resultID { + expectedLocatorID := unittest.ChunkLocatorFixture(expected.ExecutionResult.ID(), expected.ChunkIndex).ID() + if expectedLocatorID == chunkLocatorID { return expected } } return nil }, - func(chunkIndex uint64, resultID flow.Identifier) bool { + func(chunkLocatorID flow.Identifier) bool { for _, expected := range list { - if expected.ChunkIndex == chunkIndex && expected.ExecutionResult.ID() == resultID { + expectedLocatorID := unittest.ChunkLocatorFixture(expected.ExecutionResult.ID(), expected.ChunkIndex).ID() + if expectedLocatorID == chunkLocatorID { return true } } @@ -716,12 +732,14 @@ func mockVerifierEngine(t *testing.T, require.True(t, ok) // verifiable chunk data should be distinct. - _, ok = seen[chunks.ChunkLocatorID(vc.Result.ID(), vc.Chunk.Index)] + locatorID := unittest.ChunkLocatorFixture(vc.Result.ID(), vc.Chunk.Index).ID() + + _, ok = seen[locatorID] require.False(t, ok, "duplicated verifiable chunk received") - seen[chunks.ChunkLocatorID(vc.Result.ID(), vc.Chunk.Index)] = struct{}{} + seen[locatorID] = struct{}{} // we should expect this verifiable chunk and its fields should match our expectation - expected, ok := verifiableChunks[chunks.ChunkLocatorID(vc.Result.ID(), vc.Chunk.Index)] + expected, ok := verifiableChunks[locatorID] require.True(t, ok, "verifier engine received an unknown verifiable chunk data") if vc.IsSystemChunk { @@ -737,10 +755,10 @@ func mockVerifierEngine(t *testing.T, require.Equal(t, expected.Result.ID(), vc.Result.ID()) require.Equal(t, expected.Header.ID(), vc.Header.ID()) - isSystemChunk := fetcher.IsSystemChunk(vc.Chunk.Index, vc.Result) + isSystemChunk := convert.IsSystemChunk(vc.Chunk.Index, vc.Result) require.Equal(t, isSystemChunk, vc.IsSystemChunk) - endState, err := fetcher.EndStateCommitment(vc.Result, vc.Chunk.Index, isSystemChunk) + endState, err := convert.EndStateCommitment(vc.Result, vc.Chunk.Index, isSystemChunk) require.NoError(t, err) require.Equal(t, endState, vc.EndState) @@ -784,9 +802,7 @@ func mockBlockSealingStatus(state *protocol.State, headers *storage.Headers, hea // mockBlocksStorage mocks blocks and headers storages for given block. func mockBlocksStorage(blocks *storage.Blocks, headers *storage.Headers, block *flow.Block) { - blockID := block.ID() - blocks.On("ByID", blockID).Return(block, nil) - headers.On("ByBlockID", blockID).Return(block.Header, nil) + blocks.On("ByID", block.ID()).Return(block, nil) } // mockRequester mocks the chunk data pack requester with the given chunk data pack requests. @@ -839,7 +855,7 @@ func chunkDataPackResponsesFixture(t *testing.T, responses := make(map[flow.Identifier]*verification.ChunkDataPackResponse) for _, status := range statuses { - chunkLocatorID := status.ChunkLocatorID() + chunkLocatorID := unittest.ChunkLocatorFixture(status.ExecutionResult.ID(), status.ChunkIndex).ID() responses[chunkLocatorID] = chunkDataPackResponseFixture(t, status.Chunk(), collMap[status.Chunk().ID()], result) } @@ -852,16 +868,15 @@ func chunkDataPackResponseFixture(t *testing.T, collection *flow.Collection, result *flow.ExecutionResult) *verification.ChunkDataPackResponse { - require.Equal(t, collection != nil, !fetcher.IsSystemChunk(chunk.Index, result), "only non-system chunks must have a collection") + require.Equal(t, collection != nil, !convert.IsSystemChunk(chunk.Index, result), "only non-system chunks must have a collection") return &verification.ChunkDataPackResponse{ - Locator: chunks.Locator{ - ResultID: result.ID(), - Index: chunk.Index, - }, - Cdp: unittest.ChunkDataPackFixture(chunk.ID(), + Locator: *unittest.ChunkLocatorFixture(result.ID(), chunk.Index), + Cdp: unittest.ChunkDataPackFixture( + chunk.ID(), unittest.WithStartState(chunk.StartState), - unittest.WithChunkDataPackCollection(collection)), + unittest.WithChunkDataPackCollection(collection), + ), } } @@ -878,7 +893,7 @@ func verifiableChunksFixture(t *testing.T, verifiableChunks := make(map[flow.Identifier]*verification.VerifiableChunkData) for _, status := range statuses { - chunkLocatorID := status.ChunkLocatorID() + chunkLocatorID := unittest.ChunkLocatorFixture(status.ExecutionResult.ID(), status.ChunkIndex).ID() response, ok := responses[chunkLocatorID] require.True(t, ok, "missing chunk data response") @@ -897,13 +912,13 @@ func verifiableChunkFixture(t *testing.T, result *flow.ExecutionResult, chunkDataPack *flow.ChunkDataPack) *verification.VerifiableChunkData { - offsetForChunk, err := fetcher.TransactionOffsetForChunk(result.Chunks, chunk.Index) + offsetForChunk, err := convert.TransactionOffsetForChunk(result.Chunks, chunk.Index) require.NoError(t, err) // TODO: add end state return &verification.VerifiableChunkData{ Chunk: chunk, - Header: block.Header, + Header: block.ToHeader(), Result: result, ChunkDataPack: chunkDataPack, TransactionOffset: offsetForChunk, @@ -921,7 +936,7 @@ func chunkRequestsFixture( requests := make(map[flow.Identifier]*verification.ChunkDataPackRequest) for _, status := range statuses { - chunkLocatorID := status.ChunkLocatorID() + chunkLocatorID := unittest.ChunkLocatorFixture(status.ExecutionResult.ID(), status.ChunkIndex).ID() requests[chunkLocatorID] = chunkRequestFixture(resultID, status, agrees, disagrees) } @@ -932,16 +947,14 @@ func chunkRequestsFixture( // // Agrees and disagrees are the list of execution node identifiers that generate the same and contradicting execution result // with the execution result that chunks belong to, respectively. -func chunkRequestFixture(resultID flow.Identifier, +func chunkRequestFixture( + resultID flow.Identifier, status *verification.ChunkStatus, agrees flow.IdentityList, - disagrees flow.IdentityList) *verification.ChunkDataPackRequest { - + disagrees flow.IdentityList, +) *verification.ChunkDataPackRequest { return &verification.ChunkDataPackRequest{ - Locator: chunks.Locator{ - ResultID: resultID, - Index: status.ChunkIndex, - }, + Locator: *unittest.ChunkLocatorFixture(resultID, status.ChunkIndex), ChunkDataPackRequestInfo: verification.ChunkDataPackRequestInfo{ ChunkID: status.Chunk().ID(), Height: status.BlockHeight, @@ -969,18 +982,20 @@ func completeChunkStatusListFixture(t *testing.T, chunkCount int, statusCount in collections := unittest.CollectionListFixture(chunkCount) - block := unittest.BlockWithGuaranteesFixture( - unittest.CollectionGuaranteesWithCollectionIDFixture(collections), + block := unittest.BlockFixture( + unittest.Block.WithPayload( + unittest.PayloadFixture(unittest.WithGuarantees(unittest.CollectionGuaranteesWithCollectionIDFixture(collections)...)), + ), ) result := unittest.ExecutionResultFixture( unittest.WithBlock(block), unittest.WithChunks(uint(chunkCount))) - statuses := unittest.ChunkStatusListFixture(t, block.Header.Height, result, statusCount) + statuses := unittest.ChunkStatusListFixture(t, block.Height, result, statusCount) locators := unittest.ChunkStatusListToChunkLocatorFixture(statuses) for _, status := range statuses { - if fetcher.IsSystemChunk(status.ChunkIndex, result) { + if convert.IsSystemChunk(status.ChunkIndex, result) { // system-chunk should have a nil collection continue } @@ -992,7 +1007,7 @@ func completeChunkStatusListFixture(t *testing.T, chunkCount int, statusCount in func TestTransactionOffsetForChunk(t *testing.T) { t.Run("first chunk index always returns zero offset", func(t *testing.T) { - offsetForChunk, err := fetcher.TransactionOffsetForChunk([]*flow.Chunk{nil}, 0) + offsetForChunk, err := convert.TransactionOffsetForChunk([]*flow.Chunk{nil}, 0) require.NoError(t, err) assert.Equal(t, uint32(0), offsetForChunk) }) @@ -1022,19 +1037,19 @@ func TestTransactionOffsetForChunk(t *testing.T) { }, } - offsetForChunk, err := fetcher.TransactionOffsetForChunk(chunksList, 0) + offsetForChunk, err := convert.TransactionOffsetForChunk(chunksList, 0) require.NoError(t, err) assert.Equal(t, uint32(0), offsetForChunk) - offsetForChunk, err = fetcher.TransactionOffsetForChunk(chunksList, 1) + offsetForChunk, err = convert.TransactionOffsetForChunk(chunksList, 1) require.NoError(t, err) assert.Equal(t, uint32(1), offsetForChunk) - offsetForChunk, err = fetcher.TransactionOffsetForChunk(chunksList, 2) + offsetForChunk, err = convert.TransactionOffsetForChunk(chunksList, 2) require.NoError(t, err) assert.Equal(t, uint32(3), offsetForChunk) - offsetForChunk, err = fetcher.TransactionOffsetForChunk(chunksList, 3) + offsetForChunk, err = convert.TransactionOffsetForChunk(chunksList, 3) require.NoError(t, err) assert.Equal(t, uint32(6), offsetForChunk) }) @@ -1043,7 +1058,7 @@ func TestTransactionOffsetForChunk(t *testing.T) { chunksList := make([]*flow.Chunk, 2) - _, err := fetcher.TransactionOffsetForChunk(chunksList, 2) + _, err := convert.TransactionOffsetForChunk(chunksList, 2) require.Error(t, err) }) } diff --git a/engine/verification/fetcher/errors.go b/engine/verification/fetcher/errors.go index d7a605e0341..27b28800aec 100644 --- a/engine/verification/fetcher/errors.go +++ b/engine/verification/fetcher/errors.go @@ -17,14 +17,15 @@ type ChunkDataPackValidationError struct { err error } -func NewChunkDataPackValidationError(originID flow.Identifier, +func NewChunkDataPackValidationError( + originID flow.Identifier, resultID flow.Identifier, chunkIndex uint64, chunkDataPackID flow.Identifier, chunkID flow.Identifier, collectionID flow.Identifier, - err error) error { - + err error, +) error { return ChunkDataPackValidationError{ originID: originID, chunkDataPackID: chunkDataPackID, @@ -45,7 +46,8 @@ func (c ChunkDataPackValidationError) Error() string { c.chunkDataPackID, c.chunkID, c.collectionID, - c.err) + c.err, + ) } func IsChunkDataPackValidationError(err error) bool { diff --git a/engine/verification/fetcher/execution_fork_test.go b/engine/verification/fetcher/execution_fork_test.go index 5b46dc51b81..3c62b246024 100644 --- a/engine/verification/fetcher/execution_fork_test.go +++ b/engine/verification/fetcher/execution_fork_test.go @@ -39,7 +39,7 @@ func TestExecutionForkWithDuplicateAssignedChunks(t *testing.T) { mockStateAtBlockIDForIdentities(s.state, block.ID(), executorsA.Union(executorsB)) // the chunks belong to an unsealed block, so their chunk data pack is requested. - mockBlockSealingStatus(s.state, s.headers, block.Header, false) + mockBlockSealingStatus(s.state, s.headers, block.ToHeader(), false) // mocks resources on fetcher engine side. mockResultsByIDs(s.results, []*flow.ExecutionResult{resultA, resultB}) @@ -57,8 +57,8 @@ func TestExecutionForkWithDuplicateAssignedChunks(t *testing.T) { s.metrics.On("OnChunkDataPackRequestSentByFetcher").Return().Times(len(assignedChunkStatuses)) // each chunk data request is answered by requester engine on a distinct chunk data response - chunkALocatorID := statusA.ChunkLocatorID() - chunkBLocatorID := statusB.ChunkLocatorID() + chunkALocatorID := unittest.ChunkLocatorFixture(statusA.ExecutionResult.ID(), statusA.ChunkIndex).ID() + chunkBLocatorID := unittest.ChunkLocatorFixture(statusB.ExecutionResult.ID(), statusB.ChunkIndex).ID() chunkDataResponse := make(map[flow.Identifier]*verification.ChunkDataPackResponse) chunkDataResponse[chunkALocatorID] = chunkDataPackResponseFixture(t, statusA.Chunk(), collMap[statusA.Chunk().ID()], resultA) chunkDataResponse[chunkBLocatorID] = chunkDataPackResponseFixture(t, statusB.Chunk(), collMap[statusA.Chunk().ID()], resultB) @@ -83,16 +83,12 @@ func TestExecutionForkWithDuplicateAssignedChunks(t *testing.T) { processWG := &sync.WaitGroup{} processWG.Add(len(assignedChunkStatuses)) for _, status := range assignedChunkStatuses { - locator := &chunks.Locator{ - Index: status.ChunkIndex, - ResultID: status.ExecutionResult.ID(), - } + locator := unittest.ChunkLocatorFixture(status.ExecutionResult.ID(), status.ChunkIndex) go func(l *chunks.Locator) { e.ProcessAssignedChunk(l) processWG.Done() }(locator) - } unittest.RequireReturnsBefore(t, requesterWg.Wait, 100*time.Millisecond, "could not handle received chunk data pack on time") @@ -121,12 +117,12 @@ func executionResultForkFixture(t *testing.T) (*flow.Block, statusA := &verification.ChunkStatus{ ChunkIndex: 0, ExecutionResult: resultA, - BlockHeight: block.Header.Height, + BlockHeight: block.Height, } statusB := &verification.ChunkStatus{ ChunkIndex: 0, ExecutionResult: resultB, - BlockHeight: block.Header.Height, + BlockHeight: block.Height, } // keeps collections of assigned chunks diff --git a/engine/verification/fetcher/mock/assigned_chunk_processor.go b/engine/verification/fetcher/mock/assigned_chunk_processor.go index 193af0532a2..7b7f3018d61 100644 --- a/engine/verification/fetcher/mock/assigned_chunk_processor.go +++ b/engine/verification/fetcher/mock/assigned_chunk_processor.go @@ -1,6 +1,6 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. -package mockfetcher +package mock import ( chunks "github.com/onflow/flow-go/model/chunks" @@ -15,10 +15,14 @@ type AssignedChunkProcessor struct { mock.Mock } -// Done provides a mock function with given fields: +// Done provides a mock function with no fields func (_m *AssignedChunkProcessor) Done() <-chan struct{} { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Done") + } + var r0 <-chan struct{} if rf, ok := ret.Get(0).(func() <-chan struct{}); ok { r0 = rf() @@ -36,10 +40,14 @@ func (_m *AssignedChunkProcessor) ProcessAssignedChunk(locator *chunks.Locator) _m.Called(locator) } -// Ready provides a mock function with given fields: +// Ready provides a mock function with no fields func (_m *AssignedChunkProcessor) Ready() <-chan struct{} { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Ready") + } + var r0 <-chan struct{} if rf, ok := ret.Get(0).(func() <-chan struct{}); ok { r0 = rf() @@ -57,13 +65,12 @@ func (_m *AssignedChunkProcessor) WithChunkConsumerNotifier(notifier module.Proc _m.Called(notifier) } -type mockConstructorTestingTNewAssignedChunkProcessor interface { +// NewAssignedChunkProcessor creates a new instance of AssignedChunkProcessor. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewAssignedChunkProcessor(t interface { mock.TestingT Cleanup(func()) -} - -// NewAssignedChunkProcessor creates a new instance of AssignedChunkProcessor. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewAssignedChunkProcessor(t mockConstructorTestingTNewAssignedChunkProcessor) *AssignedChunkProcessor { +}) *AssignedChunkProcessor { mock := &AssignedChunkProcessor{} mock.Mock.Test(t) diff --git a/engine/verification/fetcher/mock/chunk_data_pack_handler.go b/engine/verification/fetcher/mock/chunk_data_pack_handler.go index c3675d3480c..dd6f5f3335b 100644 --- a/engine/verification/fetcher/mock/chunk_data_pack_handler.go +++ b/engine/verification/fetcher/mock/chunk_data_pack_handler.go @@ -1,6 +1,6 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. -package mockfetcher +package mock import ( flow "github.com/onflow/flow-go/model/flow" @@ -24,13 +24,12 @@ func (_m *ChunkDataPackHandler) NotifyChunkDataPackSealed(chunkIndex uint64, res _m.Called(chunkIndex, resultID) } -type mockConstructorTestingTNewChunkDataPackHandler interface { +// NewChunkDataPackHandler creates a new instance of ChunkDataPackHandler. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewChunkDataPackHandler(t interface { mock.TestingT Cleanup(func()) -} - -// NewChunkDataPackHandler creates a new instance of ChunkDataPackHandler. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewChunkDataPackHandler(t mockConstructorTestingTNewChunkDataPackHandler) *ChunkDataPackHandler { +}) *ChunkDataPackHandler { mock := &ChunkDataPackHandler{} mock.Mock.Test(t) diff --git a/engine/verification/fetcher/mock/chunk_data_pack_requester.go b/engine/verification/fetcher/mock/chunk_data_pack_requester.go index 2b3b42de6c4..fdf0973efe7 100644 --- a/engine/verification/fetcher/mock/chunk_data_pack_requester.go +++ b/engine/verification/fetcher/mock/chunk_data_pack_requester.go @@ -1,6 +1,6 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. -package mockfetcher +package mock import ( fetcher "github.com/onflow/flow-go/engine/verification/fetcher" @@ -14,10 +14,14 @@ type ChunkDataPackRequester struct { mock.Mock } -// Done provides a mock function with given fields: +// Done provides a mock function with no fields func (_m *ChunkDataPackRequester) Done() <-chan struct{} { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Done") + } + var r0 <-chan struct{} if rf, ok := ret.Get(0).(func() <-chan struct{}); ok { r0 = rf() @@ -30,10 +34,14 @@ func (_m *ChunkDataPackRequester) Done() <-chan struct{} { return r0 } -// Ready provides a mock function with given fields: +// Ready provides a mock function with no fields func (_m *ChunkDataPackRequester) Ready() <-chan struct{} { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Ready") + } + var r0 <-chan struct{} if rf, ok := ret.Get(0).(func() <-chan struct{}); ok { r0 = rf() @@ -56,13 +64,12 @@ func (_m *ChunkDataPackRequester) WithChunkDataPackHandler(handler fetcher.Chunk _m.Called(handler) } -type mockConstructorTestingTNewChunkDataPackRequester interface { +// NewChunkDataPackRequester creates a new instance of ChunkDataPackRequester. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewChunkDataPackRequester(t interface { mock.TestingT Cleanup(func()) -} - -// NewChunkDataPackRequester creates a new instance of ChunkDataPackRequester. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewChunkDataPackRequester(t mockConstructorTestingTNewChunkDataPackRequester) *ChunkDataPackRequester { +}) *ChunkDataPackRequester { mock := &ChunkDataPackRequester{} mock.Mock.Test(t) diff --git a/engine/verification/requester/requester.go b/engine/verification/requester/requester.go index 10f91780c72..0d166dbc63c 100644 --- a/engine/verification/requester/requester.go +++ b/engine/verification/requester/requester.go @@ -67,7 +67,7 @@ type Engine struct { func New(log zerolog.Logger, state protocol.State, - net network.Network, + net network.EngineRegistry, tracer module.Tracer, metrics module.VerificationMetrics, pendingRequests mempool.ChunkRequests, @@ -155,7 +155,7 @@ func (e *Engine) Done() <-chan struct{} { // the peer-to-peer network. func (e *Engine) process(originID flow.Identifier, event interface{}) error { switch resource := event.(type) { - case *messages.ChunkDataResponse: + case *flow.ChunkDataResponse: e.handleChunkDataPackWithTracing(originID, &resource.ChunkDataPack) default: return fmt.Errorf("invalid event type (%T)", event) @@ -198,12 +198,19 @@ func (e *Engine) handleChunkDataPack(originID flow.Identifier, chunkDataPack *fl } for _, locator := range locators { - response := verification.ChunkDataPackResponse{ - Locator: *locator, - Cdp: chunkDataPack, + response, err := verification.NewChunkDataPackResponse( + verification.UntrustedChunkDataPackResponse{ + Locator: *locator, + Cdp: chunkDataPack, + }, + ) + if err != nil { + // TODO: update this engine to use SignalerContext and throw an exception here + lg.Fatal().Err(err).Msg("could not construct chunk data pack response") + return } - e.handler.HandleChunkDataPack(originID, &response) + e.handler.HandleChunkDataPack(originID, response) e.metrics.OnChunkDataPackSentToFetcher() lg.Info(). Hex("result_id", logging.ID(locator.ResultID)). @@ -331,8 +338,11 @@ func (e *Engine) requestChunkDataPack(request *verification.ChunkDataPackRequest } // publishes the chunk data request to the network - targetIDs := request.SampleTargets(int(e.requestTargets)) - err := e.con.Publish(req, targetIDs...) + targetIDs, err := request.SampleTargets(int(e.requestTargets)) + if err != nil { + return fmt.Errorf("target sampling failed: %w", err) + } + err = e.con.Publish(req, targetIDs...) if err != nil { return fmt.Errorf("could not publish chunk data pack request for chunk (id=%s): %w", request.ChunkID, err) } diff --git a/engine/verification/requester/requester_test.go b/engine/verification/requester/requester_test.go index ea014d4e9e0..b4ad60917a9 100644 --- a/engine/verification/requester/requester_test.go +++ b/engine/verification/requester/requester_test.go @@ -22,7 +22,7 @@ import ( "github.com/onflow/flow-go/module/mock" "github.com/onflow/flow-go/module/trace" "github.com/onflow/flow-go/network/channels" - "github.com/onflow/flow-go/network/mocknetwork" + mocknetwork "github.com/onflow/flow-go/network/mock" protocol "github.com/onflow/flow-go/state/protocol/mock" "github.com/onflow/flow-go/utils/unittest" ) @@ -66,7 +66,7 @@ func setupTest() *RequesterEngineTestSuite { // newRequesterEngine returns a requester engine for testing. func newRequesterEngine(t *testing.T, s *RequesterEngineTestSuite) *requester.Engine { - net := &mocknetwork.Network{} + net := &mocknetwork.EngineRegistry{} // mocking the network registration of the engine net.On("Register", channels.RequestChunks, testifymock.Anything). Return(s.con, nil). @@ -114,30 +114,30 @@ func TestHandleChunkDataPack_HappyPath(t *testing.T) { s := setupTest() e := newRequesterEngine(t, s) - response := unittest.ChunkDataResponseMsgFixture(unittest.IdentifierFixture()) - request := unittest.ChunkDataPackRequestFixture(unittest.WithChunkID(response.ChunkDataPack.ChunkID)) + responseMsg := unittest.ChunkDataResponseMsgFixture(unittest.IdentifierFixture()) + internal, err := responseMsg.ToInternal() + require.NoError(t, err) + responseInternal, ok := internal.(*flow.ChunkDataResponse) + require.True(t, ok) + + request := unittest.ChunkDataPackRequestFixture(unittest.WithChunkID(responseMsg.ChunkDataPack.ChunkID)) originID := unittest.IdentifierFixture() // we remove pending request on receiving this response locators := chunks.LocatorMap{} - locators[chunks.ChunkLocatorID(request.ResultID, request.Index)] = &chunks.Locator{ - ResultID: request.ResultID, - Index: request.Index, - } - s.pendingRequests.On("PopAll", response.ChunkDataPack.ChunkID).Return(locators, true).Once() + locator := unittest.ChunkLocatorFixture(request.ResultID, request.Index) + locators[locator.ID()] = locator + s.pendingRequests.On("PopAll", responseMsg.ChunkDataPack.ChunkID).Return(locators, true).Once() s.handler.On("HandleChunkDataPack", originID, &verification.ChunkDataPackResponse{ - Locator: chunks.Locator{ - ResultID: request.ResultID, - Index: request.Index, - }, - Cdp: &response.ChunkDataPack, + Locator: *unittest.ChunkLocatorFixture(request.ResultID, request.Index), + Cdp: &responseInternal.ChunkDataPack, }).Return().Once() s.metrics.On("OnChunkDataPackResponseReceivedFromNetworkByRequester").Return().Once() s.metrics.On("OnChunkDataPackSentToFetcher").Return().Once() - err := e.Process(channels.RequestChunks, originID, response) - require.Nil(t, err) + err = e.Process(channels.RequestChunks, originID, responseInternal) + require.NoError(t, err) testifymock.AssertExpectationsForObjects(t, s.con, s.handler, s.pendingRequests, s.metrics) } @@ -154,19 +154,23 @@ func TestHandleChunkDataPack_HappyPath_Multiple(t *testing.T) { requests := unittest.ChunkDataPackRequestListFixture(count) originID := unittest.IdentifierFixture() chunkIDs := toChunkIDs(t, requests) - responses := unittest.ChunkDataResponseMessageListFixture(chunkIDs) + responsesMsg := unittest.ChunkDataResponseMessageListFixture(chunkIDs) // we remove pending request on receiving this response mockPendingRequestsPopAll(t, s.pendingRequests, requests) // we pass each chunk data pack and its collection to chunk data pack handler handlerWG := mockChunkDataPackHandler(t, s.handler, requests) - s.metrics.On("OnChunkDataPackResponseReceivedFromNetworkByRequester").Return().Times(len(responses)) - s.metrics.On("OnChunkDataPackSentToFetcher").Return().Times(len(responses)) + s.metrics.On("OnChunkDataPackResponseReceivedFromNetworkByRequester").Return().Times(len(responsesMsg)) + s.metrics.On("OnChunkDataPackSentToFetcher").Return().Times(len(responsesMsg)) - for _, response := range responses { - err := e.Process(channels.RequestChunks, originID, response) - require.Nil(t, err) + for _, response := range responsesMsg { + internal, err := response.ToInternal() + require.NoError(t, err) + responseInternal, ok := internal.(*flow.ChunkDataResponse) + require.True(t, ok) + err = e.Process(channels.RequestChunks, originID, responseInternal) + require.NoError(t, err) } unittest.RequireReturnsBefore(t, handlerWG.Wait, 100*time.Millisecond, "could not handle chunk data responses on time") @@ -182,17 +186,21 @@ func TestHandleChunkDataPack_FailedRequestRemoval(t *testing.T) { s := setupTest() e := newRequesterEngine(t, s) - response := unittest.ChunkDataResponseMsgFixture(unittest.IdentifierFixture()) + responseMsg := unittest.ChunkDataResponseMsgFixture(unittest.IdentifierFixture()) + internal, err := responseMsg.ToInternal() + require.NoError(t, err) + responseInternal, ok := internal.(*flow.ChunkDataResponse) + require.True(t, ok) originID := unittest.IdentifierFixture() // however by the time we try remove it, the request has gone. // this can happen when duplicate chunk data packs are coming concurrently. // the concurrency is safe with pending requests mempool's mutex lock. - s.pendingRequests.On("PopAll", response.ChunkDataPack.ChunkID).Return(nil, false).Once() + s.pendingRequests.On("PopAll", responseMsg.ChunkDataPack.ChunkID).Return(nil, false).Once() s.metrics.On("OnChunkDataPackResponseReceivedFromNetworkByRequester").Return().Once() - err := e.Process(channels.RequestChunks, originID, response) - require.Nil(t, err) + err = e.Process(channels.RequestChunks, originID, responseInternal) + require.NoError(t, err) testifymock.AssertExpectationsForObjects(t, s.pendingRequests, s.con, s.metrics) s.handler.AssertNotCalled(t, "HandleChunkDataPack") @@ -247,7 +255,12 @@ func TestCompleteRequestingUnsealedChunkLifeCycle(t *testing.T) { unittest.WithHeightGreaterThan(sealedHeight), unittest.WithAgrees(agrees), unittest.WithDisagrees(disagrees)) - response := unittest.ChunkDataResponseMsgFixture(requests[0].ChunkID) + + responseMsg := unittest.ChunkDataResponseMsgFixture(requests[0].ChunkID) + internal, err := responseMsg.ToInternal() + require.NoError(t, err) + responseInternal, ok := internal.(*flow.ChunkDataResponse) + require.True(t, ok) // mocks the requester pipeline vertestutils.MockLastSealedHeight(s.state, sealedHeight) @@ -270,14 +283,14 @@ func TestCompleteRequestingUnsealedChunkLifeCycle(t *testing.T) { unittest.RequireCloseBefore(t, e.Ready(), time.Second, "could not start engine on time") // we wait till the engine submits the chunk request to the network, and receive the response - conduitWG := mockConduitForChunkDataPackRequest(t, s.con, requests, 1, func(request *messages.ChunkDataRequest) { - err := e.Process(channels.RequestChunks, requests[0].Agrees[0], response) + conduitWG := mockConduitForChunkDataPackRequest(t, s.con, requests, 1, func(request *flow.ChunkDataRequest) { + err := e.Process(channels.RequestChunks, requests[0].Agrees[0], responseInternal) require.NoError(t, err) }) unittest.RequireReturnsBefore(t, requestHistoryWG.Wait, time.Duration(2)*s.retryInterval, "could not check chunk requests qualification on time") unittest.RequireReturnsBefore(t, updateHistoryWG.Wait, s.retryInterval, "could not update chunk request history on time") unittest.RequireReturnsBefore(t, conduitWG.Wait, time.Duration(2)*s.retryInterval, "could not request chunks from network") - unittest.RequireReturnsBefore(t, handlerWG.Wait, 100*time.Second, "could not handle chunk data responses on time") + unittest.RequireReturnsBefore(t, handlerWG.Wait, time.Second, "could not handle chunk data responses on time") unittest.RequireCloseBefore(t, e.Done(), time.Second, "could not stop engine on time") testifymock.AssertExpectationsForObjects(t, s.metrics) @@ -327,7 +340,7 @@ func TestRequestPendingChunkSealedBlock_Hybrid(t *testing.T) { mockPendingRequestsPopAll(t, s.pendingRequests, sealedRequests) notifierWG := mockNotifyBlockSealedHandler(t, s.handler, sealedRequests) // unsealed requests should be submitted to the network once - conduitWG := mockConduitForChunkDataPackRequest(t, s.con, unsealedRequests, 1, func(*messages.ChunkDataRequest) {}) + conduitWG := mockConduitForChunkDataPackRequest(t, s.con, unsealedRequests, 1, func(*flow.ChunkDataRequest) {}) unittest.RequireReturnsBefore(t, requestHistoryWG.Wait, time.Duration(2)*s.retryInterval, "could not check chunk requests qualification on time") unittest.RequireReturnsBefore(t, updateHistoryWG.Wait, s.retryInterval, "could not update chunk request history on time") @@ -351,7 +364,9 @@ func TestReceivingChunkDataResponseForDuplicateChunkRequests(t *testing.T) { resultA, _, _, _ := vertestutils.ExecutionResultForkFixture(t) duplicateChunkID := resultA.Chunks[0].ID() - responseA := unittest.ChunkDataResponseMsgFixture(duplicateChunkID) + responseMsgA := unittest.ChunkDataResponseMsgFixture(duplicateChunkID) + responseInternalA, err := responseMsgA.ToInternal() + require.NoError(t, err) requestA := unittest.ChunkDataPackRequestFixture(unittest.WithChunkID(duplicateChunkID)) requestB := unittest.ChunkDataPackRequestFixture(unittest.WithChunkID(duplicateChunkID)) @@ -365,8 +380,8 @@ func TestReceivingChunkDataResponseForDuplicateChunkRequests(t *testing.T) { s.metrics.On("OnChunkDataPackResponseReceivedFromNetworkByRequester").Return().Once() s.metrics.On("OnChunkDataPackSentToFetcher").Return().Twice() - err := e.Process(channels.RequestChunks, originID, responseA) - require.Nil(t, err) + err = e.Process(channels.RequestChunks, originID, responseInternalA) + require.NoError(t, err) unittest.RequireReturnsBefore(t, handlerWG.Wait, time.Second, "could not handle chunk data responses on time") testifymock.AssertExpectationsForObjects(t, s.con, s.metrics) @@ -457,7 +472,7 @@ func testRequestPendingChunkDataPack(t *testing.T, count int, attempts int) { unittest.RequireCloseBefore(t, e.Ready(), time.Second, "could not start engine on time") - conduitWG := mockConduitForChunkDataPackRequest(t, s.con, requests, attempts, func(*messages.ChunkDataRequest) {}) + conduitWG := mockConduitForChunkDataPackRequest(t, s.con, requests, attempts, func(*flow.ChunkDataRequest) {}) unittest.RequireReturnsBefore(t, requestHistory.Wait, time.Duration(2*attempts)*s.retryInterval, "could not check chunk requests qualification on time") unittest.RequireReturnsBefore(t, updateHistoryWG.Wait, s.retryInterval, "could not update chunk request history on time") unittest.RequireReturnsBefore(t, conduitWG.Wait, time.Duration(2*attempts)*s.retryInterval, "could not request and handle chunks on time") @@ -515,7 +530,7 @@ func TestDispatchingRequests_Hybrid(t *testing.T) { unittest.RequireCloseBefore(t, e.Ready(), time.Second, "could not start engine on time") // mocks only instantly qualified requests are dispatched in the network. - conduitWG := mockConduitForChunkDataPackRequest(t, s.con, instantQualifiedRequests, attempts, func(*messages.ChunkDataRequest) {}) + conduitWG := mockConduitForChunkDataPackRequest(t, s.con, instantQualifiedRequests, attempts, func(*flow.ChunkDataRequest) {}) s.metrics.On("OnChunkDataPackRequestDispatchedInNetworkByRequester").Return().Times(len(instantQualifiedRequests) * attempts) // each instantly qualified one is requested only once, hence the maximum is updated only once from 0 -> 1, and // is kept at 1 during all cycles of this test. @@ -550,7 +565,7 @@ func mockConduitForChunkDataPackRequest(t *testing.T, con *mocknetwork.Conduit, reqList verification.ChunkDataPackRequestList, count int, - requestHandler func(*messages.ChunkDataRequest)) *sync.WaitGroup { + requestHandler func(*flow.ChunkDataRequest)) *sync.WaitGroup { // counts number of requests for each chunk data pack reqCount := make(map[flow.Identifier]int) @@ -588,7 +603,9 @@ func mockConduitForChunkDataPackRequest(t *testing.T, require.Contains(t, reqMap[req.ChunkID].Agrees, target2) go func() { - requestHandler(req) + internal, err := req.ToInternal() + require.NoError(t, err) + requestHandler(internal.(*flow.ChunkDataRequest)) wg.Done() }() @@ -616,7 +633,7 @@ func mockChunkDataPackHandler(t *testing.T, handler *mockfetcher.ChunkDataPackHa require.True(t, requests.ContainsChunkID(response.Cdp.ChunkID)) // invocation should be distinct per chunk ID - locatorID := chunks.ChunkLocatorID(response.ResultID, response.Index) + locatorID := unittest.ChunkLocatorFixture(response.ResultID, response.Index).ID() _, ok = handledLocators[locatorID] require.False(t, ok) @@ -647,7 +664,7 @@ func mockNotifyBlockSealedHandler(t *testing.T, handler *mockfetcher.ChunkDataPa require.True(t, requests.ContainsLocator(resultID, chunkIndex)) // invocation should be distinct per chunk ID - locatorID := chunks.ChunkLocatorID(resultID, chunkIndex) + locatorID := unittest.ChunkLocatorFixture(resultID, chunkIndex).ID() _, ok = seen[locatorID] require.False(t, ok) seen[locatorID] = struct{}{} diff --git a/engine/verification/utils/hasher.go b/engine/verification/utils/hasher.go index 56ab130aaf4..b2fef99c640 100644 --- a/engine/verification/utils/hasher.go +++ b/engine/verification/utils/hasher.go @@ -1,7 +1,8 @@ package utils import ( - "github.com/onflow/flow-go/crypto/hash" + "github.com/onflow/crypto/hash" + "github.com/onflow/flow-go/module/signature" ) diff --git a/engine/verification/utils/mocked.go b/engine/verification/utils/mocked.go index 77038b71dc7..528108c6897 100644 --- a/engine/verification/utils/mocked.go +++ b/engine/verification/utils/mocked.go @@ -23,12 +23,15 @@ func (m *MockAssigner) Assign(result *flow.ExecutionResult, blockID flow.Identif if len(result.Chunks) == 0 { return nil, fmt.Errorf("assigner called with empty chunk list") } - a := chmodel.NewAssignment() + a := chmodel.NewAssignmentBuilder() for _, c := range result.Chunks { if m.isAssigned(c.Index) { - a.Add(c, flow.IdentifierList{m.me}) + err := a.Add(c.Index, flow.IdentifierList{m.me}) + if err != nil { + return nil, err + } } } - return a, nil + return a.Build(), nil } diff --git a/engine/verification/utils/unittest/fixture.go b/engine/verification/utils/unittest/fixture.go index dc572cc0622..c587fbfe2a6 100644 --- a/engine/verification/utils/unittest/fixture.go +++ b/engine/verification/utils/unittest/fixture.go @@ -5,16 +5,16 @@ import ( "math/rand" "testing" + "github.com/ipfs/boxo/blockstore" "github.com/ipfs/go-datastore" dssync "github.com/ipfs/go-datastore/sync" - blockstore "github.com/ipfs/go-ipfs-blockstore" "github.com/rs/zerolog" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" "github.com/onflow/flow-go/engine/execution/computation/committer" "github.com/onflow/flow-go/engine/execution/computation/computer" - "github.com/onflow/flow-go/engine/execution/state" + exstate "github.com/onflow/flow-go/engine/execution/state" "github.com/onflow/flow-go/engine/execution/state/bootstrap" "github.com/onflow/flow-go/engine/execution/testutil" "github.com/onflow/flow-go/fvm" @@ -39,6 +39,12 @@ import ( "github.com/onflow/flow-go/utils/unittest" ) +const ( + // TODO: enable parallel execution once cadence type equivalence check issue + // is resolved. + testMaxConcurrency = 1 +) + // ExecutionReceiptData is a test helper struct that represents all data required // to verify the result of an execution receipt. type ExecutionReceiptData struct { @@ -71,7 +77,7 @@ func (c CompleteExecutionReceiptList) ChunkDataResponseOf(t *testing.T, chunkID // publishes the chunk data pack response to the network res := &messages.ChunkDataResponse{ - ChunkDataPack: *receiptData.ChunkDataPacks[chunkIndex], + ChunkDataPack: flow.UntrustedChunkDataPack(*receiptData.ChunkDataPacks[chunkIndex]), Nonce: rand.Uint64(), } @@ -183,19 +189,31 @@ func WithClusterCommittee(clusterCommittee flow.IdentityList) CompleteExecutionR // ExecutionResultFixture is a test helper that returns an execution result for the reference block header as well as the execution receipt data // for that result. -func ExecutionResultFixture(t *testing.T, chunkCount int, chain flow.Chain, refBlkHeader *flow.Header, clusterCommittee flow.IdentityList) (*flow.ExecutionResult, - *ExecutionReceiptData) { +func ExecutionResultFixture(t *testing.T, + chunkCount int, + chain flow.Chain, + refBlkHeader *flow.Header, + protocolStateID flow.Identifier, + clusterCommittee flow.IdentityList, + source []byte, +) (*flow.ExecutionResult, *ExecutionReceiptData) { // setups up the first collection of block consists of three transactions - tx1 := testutil.DeployCounterContractTransaction(chain.ServiceAddress(), chain) - err := testutil.SignTransactionAsServiceAccount(tx1, 0, chain) + tx1Builder := testutil.DeployCounterContractTransaction(chain.ServiceAddress(), chain) + err := testutil.SignTransactionAsServiceAccount(tx1Builder, 0, chain) require.NoError(t, err) - tx2 := testutil.CreateCounterTransaction(chain.ServiceAddress(), chain.ServiceAddress()) - err = testutil.SignTransactionAsServiceAccount(tx2, 1, chain) + tx2Builder := testutil.CreateCounterTransaction(chain.ServiceAddress(), chain.ServiceAddress()) + err = testutil.SignTransactionAsServiceAccount(tx2Builder, 1, chain) + require.NoError(t, err) + tx3Builder := testutil.CreateCounterPanicTransaction(chain.ServiceAddress(), chain.ServiceAddress()) + err = testutil.SignTransactionAsServiceAccount(tx3Builder, 2, chain) + require.NoError(t, err) + tx1, err := tx1Builder.Build() require.NoError(t, err) - tx3 := testutil.CreateCounterPanicTransaction(chain.ServiceAddress(), chain.ServiceAddress()) - err = testutil.SignTransactionAsServiceAccount(tx3, 2, chain) + tx2, err := tx2Builder.Build() + require.NoError(t, err) + tx3, err := tx3Builder.Build() require.NoError(t, err) transactions := []*flow.TransactionBody{tx1, tx2, tx3} collection := flow.Collection{Transactions: transactions} @@ -203,7 +221,7 @@ func ExecutionResultFixture(t *testing.T, chunkCount int, chain flow.Chain, refB clusterChainID := cluster.CanonicalClusterID(1, clusterCommittee.NodeIDs()) guarantee := unittest.CollectionGuaranteeFixture(unittest.WithCollection(&collection), unittest.WithCollRef(refBlkHeader.ParentID)) - guarantee.ChainID = clusterChainID + guarantee.ClusterChainID = clusterChainID indices, err := signature.EncodeSignersToIndices(clusterCommittee.NodeIDs(), clusterCommittee.NodeIDs()) require.NoError(t, err) guarantee.SignerIndices = indices @@ -213,7 +231,7 @@ func ExecutionResultFixture(t *testing.T, chunkCount int, chain flow.Chain, refB log := zerolog.Nop() // setups execution outputs: - var referenceBlock flow.Block + var referenceBlock *flow.Block var spockSecrets [][]byte var chunkDataPacks []*flow.ChunkDataPack var result *flow.ExecutionResult @@ -256,7 +274,7 @@ func ExecutionResultFixture(t *testing.T, chunkCount int, chain flow.Chain, refB ) // create state.View - snapshot := state.NewLedgerStorageSnapshot( + snapshot := exstate.NewLedgerStorageSnapshot( led, startStateCommitment) committer := committer.NewLedgerViewCommitter(led, trace.NewNoopTracer()) @@ -274,10 +292,12 @@ func ExecutionResultFixture(t *testing.T, chunkCount int, chain flow.Chain, refB me := new(moduleMock.Local) me.On("NodeID").Return(unittest.IdentifierFixture()) - me.On("Sign", mock.Anything, mock.Anything).Return(nil, nil) + me.On("Sign", mock.Anything, mock.Anything).Return(unittest.SignatureFixture(), nil) me.On("SignFunc", mock.Anything, mock.Anything, mock.Anything). Return(nil, nil) + protocolState := testutil.ProtocolStateWithSourceFixture(source) + // create BlockComputer bc, err := computer.NewBlockComputer( vm, @@ -288,44 +308,54 @@ func ExecutionResultFixture(t *testing.T, chunkCount int, chain flow.Chain, refB committer, me, prov, - nil) + nil, + protocolState, + testMaxConcurrency) require.NoError(t, err) completeColls := make(map[flow.Identifier]*entity.CompleteCollection) - completeColls[guarantee.ID()] = &entity.CompleteCollection{ - Guarantee: guarantee, - Transactions: collection.Transactions, + completeColls[guarantee.CollectionID] = &entity.CompleteCollection{ + Guarantee: guarantee, + Collection: &collection, } for i := 1; i < chunkCount; i++ { - tx := testutil.CreateCounterTransaction(chain.ServiceAddress(), chain.ServiceAddress()) - err = testutil.SignTransactionAsServiceAccount(tx, 3+uint64(i), chain) + txBuilder := testutil.CreateCounterTransaction(chain.ServiceAddress(), chain.ServiceAddress()) + err = testutil.SignTransactionAsServiceAccount(txBuilder, 3+uint64(i), chain) + require.NoError(t, err) + txBody, err := txBuilder.Build() require.NoError(t, err) - collection := flow.Collection{Transactions: []*flow.TransactionBody{tx}} + collection := flow.Collection{Transactions: []*flow.TransactionBody{txBody}} guarantee := unittest.CollectionGuaranteeFixture(unittest.WithCollection(&collection), unittest.WithCollRef(refBlkHeader.ParentID)) guarantee.SignerIndices = indices - guarantee.ChainID = clusterChainID + guarantee.ClusterChainID = clusterChainID collections = append(collections, &collection) guarantees = append(guarantees, guarantee) - completeColls[guarantee.ID()] = &entity.CompleteCollection{ - Guarantee: guarantee, - Transactions: collection.Transactions, + completeColls[guarantee.CollectionID] = &entity.CompleteCollection{ + Guarantee: guarantee, + Collection: &collection, } } - payload := flow.Payload{ - Guarantees: guarantees, - } - referenceBlock = flow.Block{ - Header: refBlkHeader, - } - referenceBlock.SetPayload(payload) - + payload, err := flow.NewPayload( + flow.UntrustedPayload{ + Guarantees: guarantees, + ProtocolStateID: protocolStateID, + }, + ) + require.NoError(t, err) + referenceBlock, err = flow.NewBlock( + flow.UntrustedBlock{ + HeaderBody: refBlkHeader.HeaderBody, + Payload: *payload, + }, + ) + require.NoError(t, err) executableBlock := &entity.ExecutableBlock{ - Block: &referenceBlock, + Block: referenceBlock, CompleteCollections: completeColls, StartState: &startStateCommitment, } @@ -341,12 +371,13 @@ func ExecutionResultFixture(t *testing.T, chunkCount int, chain flow.Chain, refB spockSecrets = append(spockSecrets, snapshot.SpockSecret) } - chunkDataPacks = computationResult.AllChunkDataPacks() - result = &computationResult.ExecutionResult + chunkDataPacks, err = computationResult.AllChunkDataPacks() + require.NoError(t, err) + result = &computationResult.ExecutionReceipt.ExecutionResult }) return result, &ExecutionReceiptData{ - ReferenceBlock: &referenceBlock, + ReferenceBlock: referenceBlock, ChunkDataPacks: chunkDataPacks, SpockSecrets: spockSecrets, } @@ -360,7 +391,13 @@ func ExecutionResultFixture(t *testing.T, chunkCount int, chain flow.Chain, refB // For sake of simplicity and test, container blocks (i.e., C) do not contain any guarantee. // // It returns a slice of complete execution receipt fixtures that contains a container block as well as all data to verify its contained receipts. -func CompleteExecutionReceiptChainFixture(t *testing.T, root *flow.Header, count int, opts ...CompleteExecutionReceiptBuilderOpt) []*CompleteExecutionReceipt { +func CompleteExecutionReceiptChainFixture(t *testing.T, + root *flow.Header, + rootProtocolStateID flow.Identifier, + count int, + sources [][]byte, + opts ...CompleteExecutionReceiptBuilderOpt, +) []*CompleteExecutionReceipt { completeERs := make([]*CompleteExecutionReceipt, 0, count) parent := root @@ -382,18 +419,21 @@ func CompleteExecutionReceiptChainFixture(t *testing.T, root *flow.Header, count require.GreaterOrEqual(t, len(builder.executorIDs), builder.executorCount, "number of executors in the tests should be greater than or equal to the number of receipts per block") + var sourcesIndex = 0 for i := 0; i < count; i++ { // Generates two blocks as parent <- R <- C where R is a reference block containing guarantees, // and C is a container block containing execution receipt for R. - receipts, allData, head := ExecutionReceiptsFromParentBlockFixture(t, parent, builder) - containerBlock := ContainerBlockFixture(head, receipts) + receipts, allData, head := ExecutionReceiptsFromParentBlockFixture(t, parent, rootProtocolStateID, builder, sources[sourcesIndex:]) + sourcesIndex += builder.resultsCount + containerBlock := ContainerBlockFixture(head, rootProtocolStateID, receipts, sources[sourcesIndex]) + sourcesIndex++ completeERs = append(completeERs, &CompleteExecutionReceipt{ ContainerBlock: containerBlock, Receipts: receipts, ReceiptsData: allData, }) - parent = containerBlock.Header + parent = containerBlock.ToHeader() } return completeERs } @@ -404,7 +444,11 @@ func CompleteExecutionReceiptChainFixture(t *testing.T, root *flow.Header, count // result (i.e., for the next result). // // Each result may appear in more than one receipt depending on the builder parameters. -func ExecutionReceiptsFromParentBlockFixture(t *testing.T, parent *flow.Header, builder *CompleteExecutionReceiptBuilder) ( +func ExecutionReceiptsFromParentBlockFixture(t *testing.T, + parent *flow.Header, + protocolStateID flow.Identifier, + builder *CompleteExecutionReceiptBuilder, + sources [][]byte) ( []*flow.ExecutionReceipt, []*ExecutionReceiptData, *flow.Header) { @@ -412,35 +456,51 @@ func ExecutionReceiptsFromParentBlockFixture(t *testing.T, parent *flow.Header, allReceipts := make([]*flow.ExecutionReceipt, 0, builder.resultsCount*builder.executorCount) for i := 0; i < builder.resultsCount; i++ { - result, data := ExecutionResultFromParentBlockFixture(t, parent, builder) + result, data := ExecutionResultFromParentBlockFixture(t, parent, protocolStateID, builder, sources[i:]) // makes several copies of the same result for cp := 0; cp < builder.executorCount; cp++ { allReceipts = append(allReceipts, &flow.ExecutionReceipt{ - ExecutorID: builder.executorIDs[cp], - ExecutionResult: *result, + UnsignedExecutionReceipt: flow.UnsignedExecutionReceipt{ + ExecutorID: builder.executorIDs[cp], + ExecutionResult: *result, + Spocks: unittest.SignaturesFixture(1), + }, + ExecutorSignature: unittest.SignatureFixture(), }) allData = append(allData, data) } - parent = data.ReferenceBlock.Header + parent = data.ReferenceBlock.ToHeader() } return allReceipts, allData, parent } // ExecutionResultFromParentBlockFixture is a test helper that creates a child (reference) block from the parent, as well as an execution for it. -func ExecutionResultFromParentBlockFixture(t *testing.T, parent *flow.Header, builder *CompleteExecutionReceiptBuilder) (*flow.ExecutionResult, - *ExecutionReceiptData) { - refBlkHeader := unittest.BlockHeaderWithParentFixture(parent) - return ExecutionResultFixture(t, builder.chunksCount, builder.chain, refBlkHeader, builder.clusterCommittee) +func ExecutionResultFromParentBlockFixture(t *testing.T, + parent *flow.Header, + protocolStateID flow.Identifier, + builder *CompleteExecutionReceiptBuilder, + sources [][]byte, +) (*flow.ExecutionResult, *ExecutionReceiptData) { + // create the block header including a QC with source a index `i` + refBlkHeader := unittest.BlockHeaderWithParentWithSoRFixture(parent, sources[0]) + // execute the block with the source a index `i+1` (which will be included later in the child block) + return ExecutionResultFixture(t, builder.chunksCount, builder.chain, refBlkHeader, protocolStateID, builder.clusterCommittee, sources[1]) } // ContainerBlockFixture builds and returns a block that contains input execution receipts. -func ContainerBlockFixture(parent *flow.Header, receipts []*flow.ExecutionReceipt) *flow.Block { +func ContainerBlockFixture(parent *flow.Header, protocolStateID flow.Identifier, receipts []*flow.ExecutionReceipt, source []byte) *flow.Block { // container block is the block that contains the execution receipt of reference block - containerBlock := unittest.BlockWithParentFixture(parent) - containerBlock.SetPayload(unittest.PayloadFixture(unittest.WithReceipts(receipts...))) + containerBlock := unittest.BlockWithParentAndPayload( + parent, + unittest.PayloadFixture( + unittest.WithReceipts(receipts...), + unittest.WithProtocolStateID(protocolStateID), + ), + ) + containerBlock.ParentVoterSigData = unittest.QCSigDataWithSoRFixture(source) return containerBlock } @@ -452,8 +512,10 @@ func ContainerBlockFixture(parent *flow.Header, receipts []*flow.ExecutionReceip func ExecutionResultForkFixture(t *testing.T) (*flow.ExecutionResult, *flow.ExecutionResult, *flow.Collection, *flow.Block) { // collection and block collections := unittest.CollectionListFixture(1) - block := unittest.BlockWithGuaranteesFixture( - unittest.CollectionGuaranteesWithCollectionIDFixture(collections), + block := unittest.BlockFixture( + unittest.Block.WithPayload( + unittest.PayloadFixture(unittest.WithGuarantees(unittest.CollectionGuaranteesWithCollectionIDFixture(collections)...)), + ), ) // execution fork at block with resultA and resultB that share first chunk @@ -463,7 +525,7 @@ func ExecutionResultForkFixture(t *testing.T) (*flow.ExecutionResult, *flow.Exec resultB := &flow.ExecutionResult{ PreviousResultID: resultA.PreviousResultID, BlockID: resultA.BlockID, - Chunks: append(flow.ChunkList{resultA.Chunks[0]}, unittest.ChunkListFixture(1, resultA.BlockID)...), + Chunks: append(flow.ChunkList{resultA.Chunks[0]}, unittest.ChunkListFixture(1, resultA.BlockID, resultA.Chunks[0].EndState)...), ServiceEvents: nil, } diff --git a/engine/verification/utils/unittest/helper.go b/engine/verification/utils/unittest/helper.go index 7c6e6eec323..6582684b21f 100644 --- a/engine/verification/utils/unittest/helper.go +++ b/engine/verification/utils/unittest/helper.go @@ -7,21 +7,22 @@ import ( "testing" "time" + "github.com/onflow/crypto" "github.com/rs/zerolog" "github.com/rs/zerolog/log" "github.com/stretchr/testify/assert" testifymock "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" + "golang.org/x/exp/slices" "github.com/onflow/flow-go/consensus/hotstuff/model" - "github.com/onflow/flow-go/crypto" "github.com/onflow/flow-go/engine/testutil" enginemock "github.com/onflow/flow-go/engine/testutil/mock" "github.com/onflow/flow-go/engine/verification/assigner/blockconsumer" + "github.com/onflow/flow-go/model/bootstrap" "github.com/onflow/flow-go/model/chunks" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/model/flow/filter" - "github.com/onflow/flow-go/model/messages" "github.com/onflow/flow-go/module" "github.com/onflow/flow-go/module/metrics" "github.com/onflow/flow-go/module/mock" @@ -29,10 +30,11 @@ import ( "github.com/onflow/flow-go/module/trace" "github.com/onflow/flow-go/network" "github.com/onflow/flow-go/network/channels" - "github.com/onflow/flow-go/network/mocknetwork" + mocknetwork "github.com/onflow/flow-go/network/mock" "github.com/onflow/flow-go/network/stub" "github.com/onflow/flow-go/state/protocol" mockprotocol "github.com/onflow/flow-go/state/protocol/mock" + protocol_state "github.com/onflow/flow-go/state/protocol/protocol_state/state" "github.com/onflow/flow-go/utils/logging" "github.com/onflow/flow-go/utils/unittest" ) @@ -46,7 +48,7 @@ type MockChunkDataProviderFunc func(*testing.T, CompleteExecutionReceiptList, fl // requests should come from a verification node, and should has one of the assigned chunk IDs. Otherwise, it fails the test. func SetupChunkDataPackProvider(t *testing.T, hub *stub.Hub, - exeIdentity *flow.Identity, + exeIdentity bootstrap.NodeInfo, participants flow.IdentityList, chainID flow.ChainID, completeERs CompleteExecutionReceiptList, @@ -58,7 +60,7 @@ func SetupChunkDataPackProvider(t *testing.T, exeEngine := new(mocknetwork.Engine) exeChunkDataConduit, err := exeNode.Net.Register(channels.ProvideChunks, exeEngine) - assert.Nil(t, err) + assert.NoError(t, err) replied := make(map[flow.Identifier]struct{}) @@ -75,9 +77,9 @@ func SetupChunkDataPackProvider(t *testing.T, originID, ok := args[1].(flow.Identifier) require.True(t, ok) // request should be dispatched by a verification node. - require.Contains(t, participants.Filter(filter.HasRole(flow.RoleVerification)).NodeIDs(), originID) + require.Contains(t, participants.Filter(filter.HasRole[flow.Identity](flow.RoleVerification)).NodeIDs(), originID) - req, ok := args[2].(*messages.ChunkDataRequest) + req, ok := args[2].(*flow.ChunkDataRequest) require.True(t, ok) require.Contains(t, assignedChunkIDs, req.ChunkID) // only assigned chunks should be requested. @@ -107,7 +109,7 @@ func RespondChunkDataPackRequestImmediately(t *testing.T, res := completeERs.ChunkDataResponseOf(t, chunkID) err := con.Unicast(res, verID) - assert.Nil(t, err) + assert.NoError(t, err) log.Debug(). Hex("origin_id", logging.ID(verID)). @@ -129,7 +131,7 @@ func RespondChunkDataPackRequestAfterNTrials(n int) MockChunkDataProviderFunc { res := completeERs.ChunkDataResponseOf(t, chunkID) err := con.Unicast(res, verID) - assert.Nil(t, err) + assert.NoError(t, err) log.Debug(). Hex("origin_id", logging.ID(verID)). @@ -150,7 +152,7 @@ func RespondChunkDataPackRequestAfterNTrials(n int) MockChunkDataProviderFunc { func SetupMockConsensusNode(t *testing.T, log zerolog.Logger, hub *stub.Hub, - conIdentity *flow.Identity, + conIdentity bootstrap.NodeInfo, verIdentities flow.IdentityList, othersIdentity flow.IdentityList, completeERs CompleteExecutionReceiptList, @@ -252,7 +254,7 @@ func SetupMockConsensusNode(t *testing.T, }).Return(nil) _, err := conNode.Net.Register(channels.ReceiveApprovals, conEngine) - assert.Nil(t, err) + assert.NoError(t, err) return &conNode, conEngine, wg } @@ -263,23 +265,23 @@ func isSystemChunk(index uint64, chunkNum int) bool { return int(index) == chunkNum-1 } -func CreateExecutionResult(blockID flow.Identifier, options ...func(result *flow.ExecutionResult, assignments *chunks.Assignment)) (*flow.ExecutionResult, *chunks.Assignment) { +func CreateExecutionResult(blockID flow.Identifier, options ...func(result *flow.ExecutionResult, assignments *chunks.AssignmentBuilder)) (*flow.ExecutionResult, *chunks.Assignment) { result := &flow.ExecutionResult{ BlockID: blockID, Chunks: flow.ChunkList{}, } - assignments := chunks.NewAssignment() + assignmentsBuilder := chunks.NewAssignmentBuilder() for _, option := range options { - option(result, assignments) + option(result, assignmentsBuilder) } - return result, assignments + return result, assignmentsBuilder.Build() } -func WithChunks(setAssignees ...func(flow.Identifier, uint64, *chunks.Assignment) *flow.Chunk) func(*flow.ExecutionResult, *chunks.Assignment) { - return func(result *flow.ExecutionResult, assignment *chunks.Assignment) { +func WithChunks(setAssignees ...func(flow.Identifier, uint64, *chunks.AssignmentBuilder) *flow.Chunk) func(*flow.ExecutionResult, *chunks.AssignmentBuilder) { + return func(result *flow.ExecutionResult, assignmentBuilder *chunks.AssignmentBuilder) { for i, setAssignee := range setAssignees { - chunk := setAssignee(result.BlockID, uint64(i), assignment) + chunk := setAssignee(result.BlockID, uint64(i), assignmentBuilder) result.Chunks.Insert(chunk) } } @@ -298,21 +300,15 @@ func ChunkWithIndex(blockID flow.Identifier, index int) *flow.Chunk { return chunk } -func WithAssignee(assignee flow.Identifier) func(flow.Identifier, uint64, *chunks.Assignment) *flow.Chunk { - return func(blockID flow.Identifier, index uint64, assignment *chunks.Assignment) *flow.Chunk { +func WithAssignee(t *testing.T, assignee flow.Identifier) func(flow.Identifier, uint64, *chunks.AssignmentBuilder) *flow.Chunk { + return func(blockID flow.Identifier, index uint64, assignmentBuilder *chunks.AssignmentBuilder) *flow.Chunk { chunk := ChunkWithIndex(blockID, int(index)) fmt.Printf("with assignee: %v, chunk id: %v\n", index, chunk.ID()) - assignment.Add(chunk, flow.IdentifierList{assignee}) + require.NoError(t, assignmentBuilder.Add(chunk.Index, flow.IdentifierList{assignee})) return chunk } } -func FromChunkID(chunkID flow.Identifier) flow.ChunkDataPack { - return flow.ChunkDataPack{ - ChunkID: chunkID, - } -} - type ChunkAssignerFunc func(chunkIndex uint64, chunks int) bool // MockChunkAssignmentFixture is a test helper that mocks a chunk assigner for a set of verification nodes for the @@ -320,7 +316,8 @@ type ChunkAssignerFunc func(chunkIndex uint64, chunks int) bool // // It returns the list of chunk locator ids assigned to the input verification nodes, as well as the list of their chunk IDs. // All verification nodes are assigned the same chunks. -func MockChunkAssignmentFixture(chunkAssigner *mock.ChunkAssigner, +func MockChunkAssignmentFixture(t *testing.T, + chunkAssigner *mock.ChunkAssigner, verIds flow.IdentityList, completeERs CompleteExecutionReceiptList, isAssigned ChunkAssignerFunc) (flow.IdentifierList, flow.IdentifierList) { @@ -333,7 +330,7 @@ func MockChunkAssignmentFixture(chunkAssigner *mock.ChunkAssigner, for _, completeER := range completeERs { for _, receipt := range completeER.Receipts { - a := chunks.NewAssignment() + a := chunks.NewAssignmentBuilder() _, duplicate := visited[receipt.ExecutionResult.ID()] if duplicate { @@ -343,18 +340,19 @@ func MockChunkAssignmentFixture(chunkAssigner *mock.ChunkAssigner, for _, chunk := range receipt.ExecutionResult.Chunks { if isAssigned(chunk.Index, len(receipt.ExecutionResult.Chunks)) { - locatorID := chunks.Locator{ - ResultID: receipt.ExecutionResult.ID(), - Index: chunk.Index, - }.ID() + locatorID := unittest.ChunkLocatorFixture(receipt.ExecutionResult.ID(), chunk.Index).ID() expectedLocatorIds = append(expectedLocatorIds, locatorID) expectedChunkIds = append(expectedChunkIds, chunk.ID()) - a.Add(chunk, verIds.NodeIDs()) + require.NoError(t, a.Add(chunk.Index, verIds.NodeIDs())) + } else { + // the chunk has no verifiers assigned + require.NoError(t, a.Add(chunk.Index, flow.IdentifierList{})) } } + assignment := a.Build() - chunkAssigner.On("Assign", &receipt.ExecutionResult, completeER.ContainerBlock.ID()).Return(a, nil) + chunkAssigner.On("Assign", &receipt.ExecutionResult, completeER.ContainerBlock.ID()).Return(assignment, nil) visited[receipt.ExecutionResult.ID()] = struct{}{} } } @@ -395,7 +393,7 @@ func ExtendStateWithFinalizedBlocks(t *testing.T, completeExecutionReceipts Comp continue } - err := state.Extend(context.Background(), receipt.ReferenceBlock) + err := state.Extend(context.Background(), unittest.ProposalFromBlock(receipt.ReferenceBlock)) require.NoError(t, err, fmt.Errorf("can not extend block %v: %w", receipt.ReferenceBlock.ID(), err)) err = state.Finalize(context.Background(), refBlockID) require.NoError(t, err) @@ -410,7 +408,7 @@ func ExtendStateWithFinalizedBlocks(t *testing.T, completeExecutionReceipts Comp // skips extending state with already duplicate container block continue } - err := state.Extend(context.Background(), completeER.ContainerBlock) + err := state.Extend(context.Background(), unittest.ProposalFromBlock(completeER.ContainerBlock)) require.NoError(t, err) err = state.Finalize(context.Background(), containerBlockID) require.NoError(t, err) @@ -478,23 +476,37 @@ func withConsumers(t *testing.T, log := zerolog.Nop() // bootstraps system with one node of each role. - s, verID, participants := bootstrapSystem(t, log, tracer, authorized) - exeID := participants.Filter(filter.HasRole(flow.RoleExecution))[0] - conID := participants.Filter(filter.HasRole(flow.RoleConsensus))[0] + s, verID, bootstrapNodesInfo := bootstrapSystem(t, log, tracer, authorized) + + participants := bootstrap.ToIdentityList(bootstrapNodesInfo) + exeIndex := slices.IndexFunc(bootstrapNodesInfo, func(info bootstrap.NodeInfo) bool { + return info.Role == flow.RoleExecution + }) + conIndex := slices.IndexFunc(bootstrapNodesInfo, func(info bootstrap.NodeInfo) bool { + return info.Role == flow.RoleConsensus + }) // generates a chain of blocks in the form of root <- R1 <- C1 <- R2 <- C2 <- ... where Rs are distinct reference // blocks (i.e., containing guarantees), and Cs are container blocks for their preceding reference block, // Container blocks only contain receipts of their preceding reference blocks. But they do not // hold any guarantees. root, err := s.State.Final().Head() require.NoError(t, err) + protocolState, err := s.State.Final().ProtocolState() + require.NoError(t, err) + protocolStateID := protocolState.ID() + chainID := root.ChainID ops = append(ops, WithExecutorIDs( - participants.Filter(filter.HasRole(flow.RoleExecution)).NodeIDs()), func(builder *CompleteExecutionReceiptBuilder) { + participants.Filter(filter.HasRole[flow.Identity](flow.RoleExecution)).NodeIDs()), func(builder *CompleteExecutionReceiptBuilder) { // needed for the guarantees to have the correct chainID and signer indices - builder.clusterCommittee = participants.Filter(filter.HasRole(flow.RoleCollection)) + builder.clusterCommittee = participants.Filter(filter.HasRole[flow.Identity](flow.RoleCollection)) }) - completeERs := CompleteExecutionReceiptChainFixture(t, root, blockCount, ops...) + // random sources for all blocks: + // - root block (block[0]) is executed with sources[0] (included in QC of child block[1]) + // - block[i] is executed with sources[i] (included in QC of child block[i+1]) + sources := unittest.RandomSourcesFixture(30) + completeERs := CompleteExecutionReceiptChainFixture(t, root, protocolStateID, blockCount, sources, ops...) blocks := ExtendStateWithFinalizedBlocks(t, completeERs, s.State) // chunk assignment @@ -502,8 +514,9 @@ func withConsumers(t *testing.T, assignedChunkIDs := flow.IdentifierList{} if authorized { // only authorized verification node has some chunks assigned to it. - _, assignedChunkIDs = MockChunkAssignmentFixture(chunkAssigner, - flow.IdentityList{verID}, + _, assignedChunkIDs = MockChunkAssignmentFixture(t, + chunkAssigner, + flow.IdentityList{verID.Identity()}, completeERs, EvenChunkIndexAssigner) } @@ -523,7 +536,7 @@ func withConsumers(t *testing.T, // execution node exeNode, exeEngine, exeWG := SetupChunkDataPackProvider(t, hub, - exeID, + bootstrapNodesInfo[exeIndex], participants, chainID, completeERs, @@ -534,8 +547,8 @@ func withConsumers(t *testing.T, conNode, conEngine, conWG := SetupMockConsensusNode(t, unittest.Logger(), hub, - conID, - flow.IdentityList{verID}, + bootstrapNodesInfo[conIndex], + flow.IdentityList{verID.Identity()}, participants, completeERs, chainID, @@ -591,10 +604,10 @@ func withConsumers(t *testing.T, } // verifies memory resources are cleaned up all over pipeline - assert.True(t, verNode.BlockConsumer.Size() == 0) - assert.True(t, verNode.ChunkConsumer.Size() == 0) - assert.True(t, verNode.ChunkStatuses.Size() == 0) - assert.True(t, verNode.ChunkRequests.Size() == 0) + assert.Zero(t, verNode.BlockConsumer.Size()) + assert.Zero(t, verNode.ChunkConsumer.Size()) + assert.Zero(t, verNode.ChunkStatuses.Size()) + assert.Zero(t, verNode.ChunkRequests.Size()) } // bootstrapSystem is a test helper that bootstraps a flow system with node of each main roles (except execution nodes that are two). @@ -609,13 +622,21 @@ func bootstrapSystem( authorized bool, ) ( *enginemock.StateFixture, - *flow.Identity, - flow.IdentityList, + bootstrap.NodeInfo, + []bootstrap.NodeInfo, ) { - // creates identities to bootstrap system with - verID := unittest.IdentityFixture(unittest.WithRole(flow.RoleVerification)) - identities := unittest.CompleteIdentitySet(verID) - identities = append(identities, unittest.IdentityFixture(unittest.WithRole(flow.RoleExecution))) // adds extra execution node + // creates bootstrapNodesInfo to bootstrap system with + bootstrapNodesInfo := make([]bootstrap.NodeInfo, 0) + var verID bootstrap.NodeInfo + for _, missingRole := range unittest.CompleteIdentitySet() { + nodeInfo := unittest.PrivateNodeInfoFixture(unittest.WithRole(missingRole.Role)) + if nodeInfo.Role == flow.RoleVerification { + verID = nodeInfo + } + bootstrapNodesInfo = append(bootstrapNodesInfo, nodeInfo) + } + bootstrapNodesInfo = append(bootstrapNodesInfo, unittest.PrivateNodeInfoFixture(unittest.WithRole(flow.RoleExecution))) // adds extra execution node + identities := bootstrap.ToIdentityList(bootstrapNodesInfo) collector := &metrics.NoopCollector{} rootSnapshot := unittest.RootSnapshotFixture(identities) @@ -624,14 +645,25 @@ func bootstrapSystem( if !authorized { // creates a new verification node identity that is unauthorized for this epoch - verID = unittest.IdentityFixture(unittest.WithRole(flow.RoleVerification)) - identities = identities.Union(flow.IdentityList{verID}) - - epochBuilder := unittest.NewEpochBuilder(t, stateFixture.State) + verID = unittest.PrivateNodeInfoFixture(unittest.WithRole(flow.RoleVerification)) + bootstrapNodesInfo = append(bootstrapNodesInfo, verID) + identities = append(identities, verID.Identity()) + + mutableProtocolState := protocol_state.NewMutableProtocolState( + log, + stateFixture.Storage.EpochProtocolStateEntries, + stateFixture.Storage.ProtocolKVStore, + stateFixture.State.Params(), + stateFixture.Storage.Headers, + stateFixture.Storage.Results, + stateFixture.Storage.EpochSetups, + stateFixture.Storage.EpochCommits, + ) + epochBuilder := unittest.NewEpochBuilder(t, mutableProtocolState, stateFixture.State) epochBuilder. - UsingSetupOpts(unittest.WithParticipants(identities)). + UsingSetupOpts(unittest.WithParticipants(identities.ToSkeleton())). BuildEpoch() } - return stateFixture, verID, identities + return stateFixture, verID, bootstrapNodesInfo } diff --git a/engine/verification/verifier/engine.go b/engine/verification/verifier/engine.go index 8b412dc2f66..2d35b9f3a45 100644 --- a/engine/verification/verifier/engine.go +++ b/engine/verification/verifier/engine.go @@ -4,11 +4,12 @@ import ( "context" "fmt" + "github.com/jordanschalm/lockctx" + "github.com/onflow/crypto" + "github.com/onflow/crypto/hash" "github.com/rs/zerolog" "go.opentelemetry.io/otel/attribute" - "github.com/onflow/flow-go/crypto" - "github.com/onflow/flow-go/crypto/hash" "github.com/onflow/flow-go/engine" "github.com/onflow/flow-go/engine/verification/utils" chmodels "github.com/onflow/flow-go/model/chunks" @@ -43,6 +44,7 @@ type Engine struct { chVerif module.ChunkVerifier // used to verify chunks spockHasher hash.Hasher // used for generating spocks approvals storage.ResultApprovals // used to store result approvals + lockManager lockctx.Manager } // New creates and returns a new instance of a verifier engine. @@ -50,11 +52,12 @@ func New( log zerolog.Logger, metrics module.VerificationMetrics, tracer module.Tracer, - net network.Network, + net network.EngineRegistry, state protocol.State, me module.Local, chVerif module.ChunkVerifier, approvals storage.ResultApprovals, + lockManager lockctx.Manager, ) (*Engine, error) { e := &Engine{ @@ -68,6 +71,7 @@ func New( approvalHasher: utils.NewResultApprovalHasher(), spockHasher: signature.NewBLSHasher(signature.SPOCKTag), approvals: approvals, + lockManager: lockManager, } var err error @@ -138,7 +142,7 @@ func (e *Engine) process(originID flow.Identifier, event interface{}) error { switch resource := event.(type) { case *verification.VerifiableChunkData: err = e.verifiableChunkHandler(originID, resource) - case *messages.ApprovalRequest: + case *flow.ApprovalRequest: err = e.approvalRequestHandler(originID, resource) default: return fmt.Errorf("invalid event type (%T)", event) @@ -163,69 +167,115 @@ func (e *Engine) process(originID flow.Identifier, event interface{}) error { func (e *Engine) verify(ctx context.Context, originID flow.Identifier, vc *verification.VerifiableChunkData) error { // log it first - log := e.log.With().Timestamp(). - Hex("origin", logging.ID(originID)). + log := e.log.With(). Uint64("chunk_index", vc.Chunk.Index). Hex("result_id", logging.Entity(vc.Result)). + Uint64("block_height", vc.Header.Height). + Hex("block_id", vc.Chunk.ChunkBody.BlockID[:]). Logger() log.Info().Msg("verifiable chunk received by verifier engine") // only accept internal calls if originID != e.me.NodeID() { - return fmt.Errorf("invalid remote origin for verify") + return fmt.Errorf("invalid remote origin for verify: %v", originID) } - var err error - // extracts chunk ID ch, ok := vc.Result.Chunks.ByIndex(vc.Chunk.Index) if !ok { return engine.NewInvalidInputErrorf("chunk out of range requested: %v", vc.Chunk.Index) } - log.With().Hex("chunk_id", logging.Entity(ch)).Logger() + log = log.With(). + Hex("chunk_id", logging.Entity(ch)). + Logger() // execute the assigned chunk span, _ := e.tracer.StartSpanFromContext(ctx, trace.VERVerChunkVerify) - spockSecret, chFault, err := e.chVerif.Verify(vc) + spockSecret, err := e.chVerif.Verify(vc) span.End() - // Any err means that something went wrong when verify the chunk - // the outcome of the verification is captured inside the chFault and not the err + if err != nil { - return fmt.Errorf("cannot verify chunk: %w", err) - } + // any error besides a ChunkFaultError is a system error + if !chmodels.IsChunkFaultError(err) { + return fmt.Errorf("cannot verify chunk: %w", err) + } - // if any fault found with the chunk - if chFault != nil { - switch chFault.(type) { + // if any fault found with the chunk + switch chFault := err.(type) { case *chmodels.CFMissingRegisterTouch: - e.log.Warn().Msg(chFault.String()) + log.Warn(). + Str("chunk_fault_type", "missing_register_touch"). + Str("chunk_fault", chFault.Error()). + Msg("chunk fault found, could not verify chunk") // still create approvals for this case case *chmodels.CFNonMatchingFinalState: // TODO raise challenge - e.log.Warn().Msg(chFault.String()) + log.Warn(). + Str("chunk_fault_type", "final_state_mismatch"). + Str("chunk_fault", chFault.Error()). + Msg("chunk fault found, could not verify chunk") return nil case *chmodels.CFInvalidVerifiableChunk: // TODO raise challenge - e.log.Error().Msg(chFault.String()) + log.Error(). + Str("chunk_fault_type", "invalid_verifiable_chunk"). + Str("chunk_fault", chFault.Error()). + Msg("chunk fault found, could not verify chunk") return nil case *chmodels.CFInvalidEventsCollection: // TODO raise challenge - e.log.Error().Msg(chFault.String()) + log.Error(). + Str("chunk_fault_type", "invalid_event_collection"). + Str("chunk_fault", chFault.Error()). + Msg("chunk fault found, could not verify chunk") + return nil + case *chmodels.CFSystemChunkIncludedCollection: + log.Error(). + Str("chunk_fault_type", "system_chunk_includes_collection"). + Str("chunk_fault", chFault.Error()). + Msg("chunk fault found, could not verify chunk") + return nil + case *chmodels.CFExecutionDataBlockIDMismatch: + log.Error(). + Str("chunk_fault_type", "execution_data_block_id_mismatch"). + Str("chunk_fault", chFault.Error()). + Msg("chunk fault found, could not verify chunk") + return nil + case *chmodels.CFExecutionDataChunksLengthMismatch: + log.Error(). + Str("chunk_fault_type", "execution_data_chunks_count_mismatch"). + Str("chunk_fault", chFault.Error()). + Msg("chunk fault found, could not verify chunk") + return nil + case *chmodels.CFExecutionDataInvalidChunkCID: + log.Error(). + Str("chunk_fault_type", "execution_data_chunk_cid_mismatch"). + Str("chunk_fault", chFault.Error()). + Msg("chunk fault found, could not verify chunk") + return nil + case *chmodels.CFInvalidExecutionDataID: + log.Error(). + Str("chunk_fault_type", "execution_data_root_cid_mismatch"). + Str("chunk_fault", chFault.Error()). + Msg("chunk fault found, could not verify chunk") return nil default: return engine.NewInvalidInputErrorf("unknown type of chunk fault is received (type: %T) : %v", - chFault, chFault.String()) + chFault, chFault.Error()) } } // Generate result approval span, _ = e.tracer.StartSpanFromContext(ctx, trace.VERVerGenerateResultApproval) - attestation := &flow.Attestation{ + attestation, err := flow.NewAttestation(flow.UntrustedAttestation{ BlockID: vc.Header.ID(), ExecutionResultID: vc.Result.ID(), ChunkIndex: vc.Chunk.Index, + }) + if err != nil { + return fmt.Errorf("could not build attestation: %w", err) } approval, err := GenerateResultApproval( e.me, @@ -239,27 +289,22 @@ func (e *Engine) verify(ctx context.Context, originID flow.Identifier, return fmt.Errorf("couldn't generate a result approval: %w", err) } - err = e.approvals.Store(approval) + err = e.storeApproval(approval) if err != nil { return fmt.Errorf("could not store approval: %w", err) } - err = e.approvals.Index(approval.Body.ExecutionResultID, approval.Body.ChunkIndex, approval.ID()) - if err != nil { - return fmt.Errorf("could not index approval: %w", err) - } - // Extracting consensus node ids // TODO state extraction should be done based on block references consensusNodes, err := e.state.Final(). - Identities(filter.HasRole(flow.RoleConsensus)) + Identities(filter.HasRole[flow.Identity](flow.RoleConsensus)) if err != nil { // TODO this error needs more advance handling after MVP return fmt.Errorf("could not load consensus node IDs: %w", err) } // broadcast result approval to the consensus nodes - err = e.pushConduit.Publish(approval, consensusNodes.NodeIDs()...) + err = e.pushConduit.Publish((*messages.ResultApproval)(approval), consensusNodes.NodeIDs()...) if err != nil { // TODO this error needs more advance handling after MVP return fmt.Errorf("could not submit result approval: %w", err) @@ -271,6 +316,31 @@ func (e *Engine) verify(ctx context.Context, originID flow.Identifier, return nil } +// storeApproval stores the result approval in the database. +// Concurrency safe and guarantees that an approval for a result is never +// overwritten by a different one (enforcing protocol rule that Verifier +// must never publish two different approvals for the same chunk). +// No errors are expected during normal operations. +func (e *Engine) storeApproval(approval *flow.ResultApproval) error { + // create deferred operation for storing approval in the database + storing := e.approvals.StoreMyApproval(approval) + + lctx := e.lockManager.NewContext() + defer lctx.Release() + + err := lctx.AcquireLock(storage.LockIndexResultApproval) + if err != nil { + return fmt.Errorf("fail to acquire lock to insert result approval: %w", err) + } + + err = storing(lctx) + if err != nil { + return fmt.Errorf("could not store result approval: %w", err) + } + + return nil +} + // GenerateResultApproval generates result approval for specific chunk of an execution receipt. func GenerateResultApproval( me module.Local, @@ -294,11 +364,14 @@ func GenerateResultApproval( } // result approval body - body := flow.ResultApprovalBody{ + body, err := flow.NewResultApprovalBody(flow.UntrustedResultApprovalBody{ Attestation: *attestation, ApproverID: me.NodeID(), AttestationSignature: atstSign, Spock: spock, + }) + if err != nil { + return nil, fmt.Errorf("could not build result approval body: %w", err) } // generates a signature over result approval body @@ -308,10 +381,15 @@ func GenerateResultApproval( return nil, fmt.Errorf("could not sign result approval body: %w", err) } - return &flow.ResultApproval{ - Body: body, + resultApproval, err := flow.NewResultApproval(flow.UntrustedResultApproval{ + Body: *body, VerifierSignature: bodySign, - }, nil + }) + if err != nil { + return nil, fmt.Errorf("could not build result approval: %w", err) + } + + return resultApproval, nil } // verifiableChunkHandler acts as a wrapper around the verify method that captures its performance-related metrics @@ -347,7 +425,7 @@ func (e *Engine) verifiableChunkHandler(originID flow.Identifier, ch *verificati return nil } -func (e *Engine) approvalRequestHandler(originID flow.Identifier, req *messages.ApprovalRequest) error { +func (e *Engine) approvalRequestHandler(originID flow.Identifier, req *flow.ApprovalRequest) error { log := e.log.With(). Hex("origin_id", logging.ID(originID)). @@ -374,7 +452,7 @@ func (e *Engine) approvalRequestHandler(originID flow.Identifier, req *messages. response := &messages.ApprovalResponse{ Nonce: req.Nonce, - Approval: *approval, + Approval: flow.UntrustedResultApproval(*approval), } err = e.pullConduit.Unicast(response, originID) diff --git a/engine/verification/verifier/engine_test.go b/engine/verification/verifier/engine_test.go index 511b05bf828..627ca29cdbb 100644 --- a/engine/verification/verifier/engine_test.go +++ b/engine/verification/verifier/engine_test.go @@ -3,47 +3,53 @@ package verifier_test import ( "crypto/rand" "errors" + "sync/atomic" "testing" - "github.com/rs/zerolog" + "github.com/ipfs/go-cid" + "github.com/jordanschalm/lockctx" "github.com/stretchr/testify/mock" testifymock "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" - "github.com/onflow/flow-go/crypto" - "github.com/onflow/flow-go/crypto/hash" + "github.com/onflow/crypto" + "github.com/onflow/crypto/hash" "github.com/onflow/flow-go/engine/testutil/mocklocal" "github.com/onflow/flow-go/engine/verification/utils" "github.com/onflow/flow-go/engine/verification/verifier" chmodel "github.com/onflow/flow-go/model/chunks" "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/model/messages" "github.com/onflow/flow-go/model/verification" realModule "github.com/onflow/flow-go/module" mockmodule "github.com/onflow/flow-go/module/mock" "github.com/onflow/flow-go/module/trace" "github.com/onflow/flow-go/network/channels" - "github.com/onflow/flow-go/network/mocknetwork" + mocknetwork "github.com/onflow/flow-go/network/mock" protocol "github.com/onflow/flow-go/state/protocol/mock" + "github.com/onflow/flow-go/storage" mockstorage "github.com/onflow/flow-go/storage/mock" "github.com/onflow/flow-go/utils/unittest" ) type VerifierEngineTestSuite struct { suite.Suite - net *mocknetwork.Network - tracer realModule.Tracer - state *protocol.State - ss *protocol.Snapshot - me *mocklocal.MockLocal - sk crypto.PrivateKey - hasher hash.Hasher - chain flow.Chain - pushCon *mocknetwork.Conduit // mocks con for submitting result approvals - pullCon *mocknetwork.Conduit - metrics *mockmodule.VerificationMetrics // mocks performance monitoring metrics - approvals *mockstorage.ResultApprovals + net *mocknetwork.EngineRegistry + tracer realModule.Tracer + state *protocol.State + ss *protocol.Snapshot + me *mocklocal.MockLocal + sk crypto.PrivateKey + hasher hash.Hasher + chain flow.Chain + pushCon *mocknetwork.Conduit // mocks con for submitting result approvals + pullCon *mocknetwork.Conduit + metrics *mockmodule.VerificationMetrics // mocks performance monitoring metrics + approvals *mockstorage.ResultApprovals + chunkVerifier *mockmodule.ChunkVerifier + lockManager lockctx.Manager } func TestVerifierEngine(t *testing.T) { @@ -51,19 +57,17 @@ func TestVerifierEngine(t *testing.T) { } func (suite *VerifierEngineTestSuite) SetupTest() { - - suite.state = &protocol.State{} - suite.net = &mocknetwork.Network{} + suite.lockManager = storage.NewTestingLockManager() + suite.state = new(protocol.State) + suite.net = mocknetwork.NewEngineRegistry(suite.T()) suite.tracer = trace.NewNoopTracer() - suite.ss = &protocol.Snapshot{} - suite.pushCon = &mocknetwork.Conduit{} - suite.pullCon = &mocknetwork.Conduit{} - suite.metrics = &mockmodule.VerificationMetrics{} + suite.ss = new(protocol.Snapshot) + suite.pushCon = mocknetwork.NewConduit(suite.T()) + suite.pullCon = mocknetwork.NewConduit(suite.T()) + suite.metrics = mockmodule.NewVerificationMetrics(suite.T()) suite.chain = flow.Testnet.Chain() - suite.approvals = &mockstorage.ResultApprovals{} - - suite.approvals.On("Store", mock.Anything).Return(nil) - suite.approvals.On("Index", mock.Anything, mock.Anything, mock.Anything).Return(nil) + suite.approvals = mockstorage.NewResultApprovals(suite.T()) + suite.chunkVerifier = mockmodule.NewChunkVerifier(suite.T()) suite.net.On("Register", channels.PushApprovals, testifymock.Anything). Return(suite.pushCon, nil). @@ -97,154 +101,210 @@ func (suite *VerifierEngineTestSuite) SetupTest() { suite.me = mocklocal.NewMockLocal(sk, verIdentity.NodeID, suite.T()) } -func (suite *VerifierEngineTestSuite) TestNewEngine() *verifier.Engine { +func (suite *VerifierEngineTestSuite) getTestNewEngine() *verifier.Engine { e, err := verifier.New( - zerolog.Logger{}, + unittest.Logger(), suite.metrics, suite.tracer, suite.net, suite.state, suite.me, - ChunkVerifierMock{}, - suite.approvals) - require.Nil(suite.T(), err) + suite.chunkVerifier, + suite.approvals, + suite.lockManager, + ) + require.NoError(suite.T(), err) suite.net.AssertExpectations(suite.T()) return e } -func (suite *VerifierEngineTestSuite) TestIncorrectResult() { - // TODO when ERs are verified -} - // TestVerifyHappyPath tests the verification path for a single verifiable chunk, which is // assigned to the verifier node, and is passed by the ingest engine // The tests evaluates that a result approval is emitted to all consensus nodes // about the input execution receipt func (suite *VerifierEngineTestSuite) TestVerifyHappyPath() { + eng := suite.getTestNewEngine() - eng := suite.TestNewEngine() - myID := unittest.IdentifierFixture() consensusNodes := unittest.IdentityListFixture(1, unittest.WithRole(flow.RoleConsensus)) - // creates a verifiable chunk - vChunk := unittest.VerifiableChunkDataFixture(uint64(0)) - - // mocking node ID using the LocalMock - suite.me.MockNodeID(myID) suite.ss.On("Identities", testifymock.Anything).Return(consensusNodes, nil) - // mocks metrics - // reception of verifiable chunk - suite.metrics.On("OnVerifiableChunkReceivedAtVerifierEngine").Return() - // emission of result approval - suite.metrics.On("OnResultApprovalDispatchedInNetworkByVerifier").Return() - - suite.pushCon. - On("Publish", testifymock.Anything, testifymock.Anything). - Return(nil). - Run(func(args testifymock.Arguments) { - // check that the approval matches the input execution result - ra, ok := args[0].(*flow.ResultApproval) - suite.Assert().True(ok) - suite.Assert().Equal(vChunk.Result.ID(), ra.Body.ExecutionResultID) - - // verifies the signatures - atstID := ra.Body.Attestation.ID() - suite.Assert().True(suite.sk.PublicKey().Verify(ra.Body.AttestationSignature, atstID[:], suite.hasher)) - bodyID := ra.Body.ID() - suite.Assert().True(suite.sk.PublicKey().Verify(ra.VerifierSignature, bodyID[:], suite.hasher)) - - // spock should be non-nil - suite.Assert().NotNil(ra.Body.Spock) - }). - Once() + vChunk, _ := unittest.VerifiableChunkDataFixture(uint64(0)) - err := eng.ProcessLocal(vChunk) - suite.Assert().NoError(err) - suite.ss.AssertExpectations(suite.T()) - suite.pushCon.AssertExpectations(suite.T()) + tests := []struct { + name string + err error + }{ + // tests that a valid verifiable chunk is verified and a result approval is emitted + { + name: "chunk verified successfully", + err: nil, + }, + // tests that a verifiable chunk that triggers a CFMissingRegisterTouch fault still emits a result approval + { + name: "chunk failed with missing register touch fault", + err: chmodel.NewCFMissingRegisterTouch( + []string{"test missing register touch"}, + vChunk.Chunk.Index, + vChunk.Result.ID(), + unittest.TransactionFixture().ID()), + }, + } + for _, test := range tests { + suite.Run(test.name, func() { + + var expectedApproval atomic.Pointer[messages.ResultApproval] + + suite.approvals. + On("StoreMyApproval", mock.Anything). + Return(func(ra *flow.ResultApproval) func(lockctx.Proof) error { + return func(lctx lockctx.Proof) error { + suite.Assert().True(lctx.HoldsLock(storage.LockIndexResultApproval)) + suite.Assert().Equal(vChunk.Chunk.BlockID, ra.Body.BlockID) + suite.Assert().Equal(vChunk.Result.ID(), ra.Body.ExecutionResultID) + suite.Assert().Equal(vChunk.Chunk.Index, ra.Body.ChunkIndex) + suite.Assert().Equal(suite.me.NodeID(), ra.Body.ApproverID) + + // verifies the signatures + atstID := ra.Body.Attestation.ID() + suite.Assert().True(suite.sk.PublicKey().Verify(ra.Body.AttestationSignature, atstID[:], suite.hasher)) + bodyID := ra.Body.ID() + suite.Assert().True(suite.sk.PublicKey().Verify(ra.VerifierSignature, bodyID[:], suite.hasher)) + + // spock should be non-nil + suite.Assert().NotNil(ra.Body.Spock) + + expectedApproval.Store((*messages.ResultApproval)(ra)) + return nil + } + }). + Once() + + suite.pushCon. + On("Publish", testifymock.Anything, testifymock.Anything). + Return(nil). + Run(func(args testifymock.Arguments) { + // check that the approval matches the input execution result + ra, ok := args[0].(*messages.ResultApproval) + suite.Require().True(ok) + suite.Assert().Equal(expectedApproval.Load(), ra) + + // note: mock includes each variadic argument as a separate element in slice + node, ok := args[1].(flow.Identifier) + suite.Require().True(ok) + suite.Assert().Equal(consensusNodes.NodeIDs()[0], node) + suite.Assert().Len(args, 2) // only a single node should be in the list + }). + Once() + + suite.metrics.On("OnVerifiableChunkReceivedAtVerifierEngine").Return().Once() + suite.metrics.On("OnResultApprovalDispatchedInNetworkByVerifier").Return().Once() + + suite.chunkVerifier.On("Verify", vChunk).Return(nil, test.err).Once() + + err := eng.ProcessLocal(vChunk) + suite.Assert().NoError(err) + }) + } } func (suite *VerifierEngineTestSuite) TestVerifyUnhappyPaths() { - eng := suite.TestNewEngine() - myID := unittest.IdentifierFixture() - consensusNodes := unittest.IdentityListFixture(1, unittest.WithRole(flow.RoleConsensus)) - - // mocking node ID using the LocalMock - suite.me.MockNodeID(myID) - suite.ss.On("Identities", testifymock.Anything).Return(consensusNodes, nil) - - // mocks metrics - // reception of verifiable chunk - suite.metrics.On("OnVerifiableChunkReceivedAtVerifierEngine").Return() - - // we shouldn't receive any result approval - suite.pushCon. - On("Publish", testifymock.Anything, testifymock.Anything). - Return(nil). - Run(func(args testifymock.Arguments) { - // TODO change this to check challeneges - _, ok := args[0].(*flow.ResultApproval) - // TODO change this to false when missing register is rolled back - suite.Assert().True(ok) - }) - - // emission of result approval - suite.metrics.On("OnResultApprovalDispatchedInNetworkByVerifier").Return() + eng := suite.getTestNewEngine() var tests = []struct { - vc *verification.VerifiableChunkData - expectedErr error + errFn func(vc *verification.VerifiableChunkData) error }{ - {unittest.VerifiableChunkDataFixture(uint64(1)), nil}, - {unittest.VerifiableChunkDataFixture(uint64(2)), nil}, - {unittest.VerifiableChunkDataFixture(uint64(3)), nil}, - } - for _, test := range tests { - err := eng.ProcessLocal(test.vc) - suite.Assert().NoError(err) + // Note: skipping CFMissingRegisterTouch because it does emit a result approval + { + errFn: func(vc *verification.VerifiableChunkData) error { + return chmodel.NewCFInvalidVerifiableChunk( + "test", + errors.New("test invalid verifiable chunk"), + vc.Chunk.Index, + vc.Result.ID()) + }, + }, + { + errFn: func(vc *verification.VerifiableChunkData) error { + return chmodel.NewCFNonMatchingFinalState( + unittest.StateCommitmentFixture(), + unittest.StateCommitmentFixture(), + vc.Chunk.Index, + vc.Result.ID()) + }, + }, + { + errFn: func(vc *verification.VerifiableChunkData) error { + return chmodel.NewCFInvalidEventsCollection( + unittest.IdentifierFixture(), + unittest.IdentifierFixture(), + vc.Chunk.Index, + vc.Result.ID(), + flow.EventsList{}) + }, + }, + { + errFn: func(vc *verification.VerifiableChunkData) error { + return chmodel.NewCFSystemChunkIncludedCollection(vc.Chunk.Index, vc.Result.ID()) + }, + }, + { + errFn: func(vc *verification.VerifiableChunkData) error { + return chmodel.NewCFExecutionDataBlockIDMismatch( + unittest.IdentifierFixture(), + unittest.IdentifierFixture(), + vc.Chunk.Index, + vc.Result.ID()) + }, + }, + { + errFn: func(vc *verification.VerifiableChunkData) error { + return chmodel.NewCFExecutionDataChunksLengthMismatch( + 0, + 0, + vc.Chunk.Index, + vc.Result.ID()) + }, + }, + { + errFn: func(vc *verification.VerifiableChunkData) error { + return chmodel.NewCFExecutionDataInvalidChunkCID( + cid.Cid{}, + cid.Cid{}, + vc.Chunk.Index, + vc.Result.ID()) + }, + }, + { + errFn: func(vc *verification.VerifiableChunkData) error { + return chmodel.NewCFInvalidExecutionDataID( + unittest.IdentifierFixture(), + unittest.IdentifierFixture(), + vc.Chunk.Index, + vc.Result.ID()) + }, + }, + { + errFn: func(vc *verification.VerifiableChunkData) error { + return errors.New("test error") + }, + }, } -} -type ChunkVerifierMock struct { -} + for i, test := range tests { + vc, _ := unittest.VerifiableChunkDataFixture(uint64(i)) + expectedErr := test.errFn(vc) -func (v ChunkVerifierMock) Verify(vc *verification.VerifiableChunkData) ([]byte, chmodel.ChunkFault, error) { - if vc.IsSystemChunk { - return nil, nil, nil - } + suite.chunkVerifier.On("Verify", vc).Return(nil, expectedErr).Once() - switch vc.Chunk.Index { - case 0: - return []byte{}, nil, nil - // return error - case 1: - return nil, chmodel.NewCFMissingRegisterTouch( - []string{"test missing register touch"}, - vc.Chunk.Index, - vc.Result.ID(), - unittest.TransactionFixture().ID()), nil - - case 2: - return nil, chmodel.NewCFInvalidVerifiableChunk( - "test", - errors.New("test invalid verifiable chunk"), - vc.Chunk.Index, - vc.Result.ID()), nil - - case 3: - return nil, chmodel.NewCFNonMatchingFinalState( - unittest.StateCommitmentFixture(), - unittest.StateCommitmentFixture(), - vc.Chunk.Index, - vc.Result.ID()), nil - - // TODO add cases for challenges - // return successful by default - default: - return nil, nil, nil - } + suite.metrics.On("OnVerifiableChunkReceivedAtVerifierEngine").Return().Once() + // note: we shouldn't publish any result approval or emit OnResultApprovalDispatchedInNetworkByVerifier + err := eng.ProcessLocal(vc) + + // no error returned from the engine + suite.Assert().NoError(err) + } } diff --git a/engine/verification/verifier/verifiers.go b/engine/verification/verifier/verifiers.go new file mode 100644 index 00000000000..afb49aa401a --- /dev/null +++ b/engine/verification/verifier/verifiers.go @@ -0,0 +1,359 @@ +package verifier + +import ( + "context" + "errors" + "fmt" + "sync" + + "github.com/jordanschalm/lockctx" + "github.com/rs/zerolog" + "github.com/rs/zerolog/log" + + "github.com/onflow/flow-go/cmd/util/cmd/common" + "github.com/onflow/flow-go/engine/execution/computation" + "github.com/onflow/flow-go/fvm" + "github.com/onflow/flow-go/fvm/initialize" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/model/verification/convert" + "github.com/onflow/flow-go/module" + "github.com/onflow/flow-go/module/chunks" + "github.com/onflow/flow-go/module/metrics" + "github.com/onflow/flow-go/module/util" + "github.com/onflow/flow-go/state/protocol" + "github.com/onflow/flow-go/storage" + "github.com/onflow/flow-go/storage/operation/pebbleimpl" + storagepebble "github.com/onflow/flow-go/storage/pebble" + "github.com/onflow/flow-go/storage/store" +) + +// VerifyLastKHeight verifies the last k sealed blocks by verifying all chunks in the results. +// It assumes the latest sealed block has been executed, and the chunk data packs have not been +// pruned. +// Note, it returns nil if certain block is not executed, in this case warning will be logged +func VerifyLastKHeight( + lockManager lockctx.Manager, + k uint64, + chainID flow.ChainID, + protocolDataDir string, + chunkDataPackDir string, + nWorker uint, + stopOnMismatch bool, + transactionFeesDisabled bool, + scheduledCallbacksEnabled bool, +) (err error) { + closer, storages, chunkDataPacks, state, verifier, err := initStorages(lockManager, chainID, protocolDataDir, chunkDataPackDir, transactionFeesDisabled, scheduledCallbacksEnabled) + if err != nil { + return fmt.Errorf("could not init storages: %w", err) + } + defer func() { + closerErr := closer() + if closerErr != nil { + err = errors.Join(err, closerErr) + } + }() + + lastSealed, err := state.Sealed().Head() + if err != nil { + return fmt.Errorf("could not get last sealed height: %w", err) + } + + root := state.Params().SealedRoot().Height + + // preventing overflow + if k > lastSealed.Height+1 { + return fmt.Errorf("k is greater than the number of sealed blocks, k: %d, last sealed height: %d", k, lastSealed.Height) + } + + from := lastSealed.Height - k + 1 + + // root block is not verifiable, because it's sealed already. + // the first verifiable is the next block of the root block + firstVerifiable := root + 1 + + if from < firstVerifiable { + from = firstVerifiable + } + to := lastSealed.Height + + log.Info().Msgf("verifying blocks from %d to %d", from, to) + + err = verifyConcurrently(from, to, nWorker, stopOnMismatch, storages.Headers, chunkDataPacks, storages.Results, state, verifier, verifyHeight) + if err != nil { + return err + } + + return nil +} + +// VerifyRange verifies all chunks in the results of the blocks in the given range. +// Note, it returns nil if certain block is not executed, in this case warning will be logged +func VerifyRange( + lockManager lockctx.Manager, + from, to uint64, + chainID flow.ChainID, + protocolDataDir string, chunkDataPackDir string, + nWorker uint, + stopOnMismatch bool, + transactionFeesDisabled bool, + scheduledCallbacksEnabled bool, +) (err error) { + closer, storages, chunkDataPacks, state, verifier, err := initStorages(lockManager, chainID, protocolDataDir, chunkDataPackDir, transactionFeesDisabled, scheduledCallbacksEnabled) + if err != nil { + return fmt.Errorf("could not init storages: %w", err) + } + defer func() { + closerErr := closer() + if closerErr != nil { + err = errors.Join(err, closerErr) + } + }() + + log.Info().Msgf("verifying blocks from %d to %d", from, to) + + root := state.Params().SealedRoot().Height + + if from <= root { + return fmt.Errorf("cannot verify blocks before the root block, from: %d, root: %d", from, root) + } + + err = verifyConcurrently(from, to, nWorker, stopOnMismatch, storages.Headers, chunkDataPacks, storages.Results, state, verifier, verifyHeight) + if err != nil { + return err + } + + return nil +} + +func verifyConcurrently( + from, to uint64, + nWorker uint, + stopOnMismatch bool, + headers storage.Headers, + chunkDataPacks storage.ChunkDataPacks, + results storage.ExecutionResults, + state protocol.State, + verifier module.ChunkVerifier, + verifyHeight func(uint64, storage.Headers, storage.ChunkDataPacks, storage.ExecutionResults, protocol.State, module.ChunkVerifier, bool) error, +) error { + tasks := make(chan uint64, int(nWorker)) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() // Ensure cancel is called to release resources + + var lowestErr error + var lowestErrHeight = ^uint64(0) // Initialize to max value of uint64 + var mu sync.Mutex // To protect access to lowestErr and lowestErrHeight + + lg := util.LogProgress( + log.Logger, + util.DefaultLogProgressConfig( + fmt.Sprintf("verifying heights progress for [%v:%v]", from, to), + to+1-from, + ), + ) + + // Worker function + worker := func() { + for { + select { + case <-ctx.Done(): + return // Stop processing tasks if context is canceled + case height, ok := <-tasks: + if !ok { + return // Exit if the tasks channel is closed + } + log.Info().Uint64("height", height).Msg("verifying height") + err := verifyHeight(height, headers, chunkDataPacks, results, state, verifier, stopOnMismatch) + if err != nil { + log.Error().Uint64("height", height).Err(err).Msg("error encountered while verifying height") + + // when encountered an error, the error might not be from the lowest height that had + // error, so we need to first cancel the context to stop worker from processing further tasks + // and wait until all workers are done, which will ensure all the heights before this height + // that had error are processed. Then we can safely update the lowestErr and lowestErrHeight + mu.Lock() + if height < lowestErrHeight { + lowestErr = err + lowestErrHeight = height + cancel() // Cancel context to stop further task dispatch + } + mu.Unlock() + } else { + log.Info().Uint64("height", height).Msg("verified height successfully") + } + + lg(1) // log progress + } + } + } + + // Start nWorker workers + var wg sync.WaitGroup + for i := 0; i < int(nWorker); i++ { + wg.Add(1) + go func() { + defer wg.Done() + worker() + }() + } + + // Send tasks to workers + go func() { + defer close(tasks) // Close tasks channel once all tasks are pushed + for height := from; height <= to; height++ { + select { + case <-ctx.Done(): + return // Stop pushing tasks if context is canceled + case tasks <- height: + } + } + }() + + // Wait for all workers to complete + wg.Wait() + + // Check if there was an error + if lowestErr != nil { + log.Error().Uint64("height", lowestErrHeight).Err(lowestErr).Msg("error encountered while verifying height") + return fmt.Errorf("could not verify height %d: %w", lowestErrHeight, lowestErr) + } + + return nil +} + +func initStorages( + lockManager lockctx.Manager, + chainID flow.ChainID, + dataDir string, + chunkDataPackDir string, + transactionFeesDisabled bool, + scheduledCallbacksEnabled bool, +) ( + func() error, + *store.All, + storage.ChunkDataPacks, + protocol.State, + module.ChunkVerifier, + error, +) { + db, err := common.InitStorage(dataDir) + if err != nil { + return nil, nil, nil, nil, nil, fmt.Errorf("could not init storage database: %w", err) + } + + storages := common.InitStorages(db) + state, err := common.OpenProtocolState(lockManager, db, storages) + if err != nil { + return nil, nil, nil, nil, nil, fmt.Errorf("could not open protocol state: %w", err) + } + + // require the chunk data pack data must exist before returning the storage module + chunkDataPackDB, err := storagepebble.ShouldOpenDefaultPebbleDB( + log.Logger.With().Str("pebbledb", "cdp").Logger(), chunkDataPackDir) + if err != nil { + return nil, nil, nil, nil, nil, fmt.Errorf("could not open chunk data pack DB: %w", err) + } + chunkDataPacks := store.NewChunkDataPacks(metrics.NewNoopCollector(), + pebbleimpl.ToDB(chunkDataPackDB), storages.Collections, 1000) + + verifier := makeVerifier(log.Logger, chainID, storages.Headers, transactionFeesDisabled, scheduledCallbacksEnabled) + closer := func() error { + var dbErr, chunkDataPackDBErr error + + if err := db.Close(); err != nil { + dbErr = fmt.Errorf("failed to close protocol db: %w", err) + } + + if err := chunkDataPackDB.Close(); err != nil { + chunkDataPackDBErr = fmt.Errorf("failed to close chunk data pack db: %w", err) + } + return errors.Join(dbErr, chunkDataPackDBErr) + } + return closer, storages, chunkDataPacks, state, verifier, nil +} + +// verifyHeight verifies all chunks in the results of the block at the given height. +// Note: it returns nil if the block is not executed. +func verifyHeight( + height uint64, + headers storage.Headers, + chunkDataPacks storage.ChunkDataPacks, + results storage.ExecutionResults, + state protocol.State, + verifier module.ChunkVerifier, + stopOnMismatch bool, +) error { + header, err := headers.ByHeight(height) + if err != nil { + return fmt.Errorf("could not get block header by height %d: %w", height, err) + } + + blockID := header.ID() + + result, err := results.ByBlockID(blockID) + if err != nil { + if errors.Is(err, storage.ErrNotFound) { + log.Warn().Uint64("height", height).Hex("block_id", blockID[:]).Msg("execution result not found") + return nil + } + + return fmt.Errorf("could not get execution result by block ID %s: %w", blockID, err) + } + snapshot := state.AtBlockID(blockID) + + for i, chunk := range result.Chunks { + chunkDataPack, err := chunkDataPacks.ByChunkID(chunk.ID()) + if err != nil { + return fmt.Errorf("could not get chunk data pack by chunk ID %s: %w", chunk.ID(), err) + } + + vcd, err := convert.FromChunkDataPack(chunk, chunkDataPack, header, snapshot, result) + if err != nil { + return err + } + + _, err = verifier.Verify(vcd) + if err != nil { + if stopOnMismatch { + return fmt.Errorf("could not verify chunk (index: %v) at block %v (%v): %w", i, height, blockID, err) + } + + log.Error().Err(err).Msgf("could not verify chunk (index: %v) at block %v (%v)", i, height, blockID) + } + } + return nil +} + +func makeVerifier( + logger zerolog.Logger, + chainID flow.ChainID, + headers storage.Headers, + transactionFeesDisabled bool, + scheduledCallbacksEnabled bool, +) module.ChunkVerifier { + + vm := fvm.NewVirtualMachine() + fvmOptions := initialize.InitFvmOptions( + chainID, + headers, + transactionFeesDisabled, + ) + fvmOptions = append( + []fvm.Option{fvm.WithLogger(logger)}, + fvmOptions..., + ) + + // TODO(JanezP): cleanup creation of fvm context github.com/onflow/flow-go/issues/5249 + fvmOptions = append( + fvmOptions, + computation.DefaultFVMOptions( + chainID, + false, + scheduledCallbacksEnabled, + )..., + ) + vmCtx := fvm.NewContext(fvmOptions...) + + chunkVerifier := chunks.NewChunkVerifier(vm, vmCtx, logger) + return chunkVerifier +} diff --git a/engine/verification/verifier/verifiers_test.go b/engine/verification/verifier/verifiers_test.go new file mode 100644 index 00000000000..907d9b6915c --- /dev/null +++ b/engine/verification/verifier/verifiers_test.go @@ -0,0 +1,86 @@ +package verifier + +import ( + "errors" + "fmt" + "testing" + + "github.com/onflow/flow-go/module" + mockmodule "github.com/onflow/flow-go/module/mock" + "github.com/onflow/flow-go/state/protocol" + "github.com/onflow/flow-go/storage" + "github.com/onflow/flow-go/storage/mock" + unittestMocks "github.com/onflow/flow-go/utils/unittest/mocks" +) + +func TestVerifyConcurrently(t *testing.T) { + + tests := []struct { + name string + from uint64 + to uint64 + nWorker uint + errors map[uint64]error // Map of heights to errors + expectedErr error + }{ + { + name: "All heights verified successfully", + from: 1, + to: 5, + nWorker: 3, + errors: nil, + expectedErr: nil, + }, + { + name: "Single error at a height", + from: 1, + to: 5, + nWorker: 3, + errors: map[uint64]error{3: errors.New("mock error")}, + expectedErr: fmt.Errorf("mock error"), + }, + { + name: "Multiple errors, lowest height returned", + from: 1, + to: 5, + nWorker: 3, + errors: map[uint64]error{2: errors.New("error 2"), 4: errors.New("error 4")}, + expectedErr: fmt.Errorf("error 2"), + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + // Reset mockVerifyHeight for each test + mockVerifyHeight := func( + height uint64, + headers storage.Headers, + chunkDataPacks storage.ChunkDataPacks, + results storage.ExecutionResults, + state protocol.State, + verifier module.ChunkVerifier, + stopOnMismatch bool, + ) error { + if err, ok := tt.errors[height]; ok { + return err + } + return nil + } + + mockHeaders := mock.NewHeaders(t) + mockChunkDataPacks := mock.NewChunkDataPacks(t) + mockResults := mock.NewExecutionResults(t) + mockState := unittestMocks.NewProtocolState() + mockVerifier := mockmodule.NewChunkVerifier(t) + + err := verifyConcurrently(tt.from, tt.to, tt.nWorker, true, mockHeaders, mockChunkDataPacks, mockResults, mockState, mockVerifier, mockVerifyHeight) + if tt.expectedErr != nil { + if err == nil || errors.Is(err, tt.expectedErr) { + t.Fatalf("expected error: %v, got: %v", tt.expectedErr, err) + } + } else if err != nil { + t.Fatalf("expected no error, got: %v", err) + } + }) + } +} diff --git a/flips/network-api.md b/flips/network-api.md index b4307f1936a..a2caa57f54d 100644 --- a/flips/network-api.md +++ b/flips/network-api.md @@ -78,9 +78,9 @@ When the message is dequeued, the engine should check the `Context` to see wheth These can be combined into a [single context](https://github.com/teivah/onecontext) which can be used by the message processing business logic, so that the processing can be cancelled either by the network or by the engine. This will allow us to deprecate [`engine.Unit`](https://github.com/onflow/flow-go/blob/master/engine/unit.go), which uses a single `Context` for the entire engine. -There are certain types of messages (e.g block proposals) which may transit between the private and public networks via relay nodes (e.g Access Nodes). Libp2p's [default message ID function](https://github.com/libp2p/go-libp2p-pubsub/blob/0c7092d1f50091ae88407ba93103ac5868da3d0a/pubsub.go#L1040-L1043) will treat a message originating from one network, relayed to the other network by `n` distinct relay nodes, as `n` distinct messages, causing unnacceptable message duplification / traffic amplification. In order to prevent this, we will need to define a [custom message ID function](https://pkg.go.dev/github.com/libp2p/go-libp2p-pubsub#WithMessageIdFn) which returns the hash of the message [`Payload`](https://github.com/onflow/flow-go/blob/698c77460bc33d1a8ee8a154f7fe4877bc518a02/network/message/message.proto#L13). +There are certain types of messages (e.g block proposals) which may transit between the private and public networks via relay nodes (e.g Access Nodes). Libp2p's [default message ID function](https://github.com/libp2p/go-libp2p-pubsub/blob/0c7092d1f50091ae88407ba93103ac5868da3d0a/pubsub.go#L1040-L1043) will treat a message originating from one network, relayed to the other network by `n` distinct relay nodes, as `n` distinct messages, causing unacceptable message duplification / traffic amplification. In order to prevent this, we will need to define a [custom message ID function](https://pkg.go.dev/github.com/libp2p/go-libp2p-pubsub#WithMessageIdFn) which returns the hash of the message [`Payload`](https://github.com/onflow/flow-go/blob/698c77460bc33d1a8ee8a154f7fe4877bc518a02/network/message/message.proto#L13). -In order to avoid making the message ID function deserialize the `Message` to access the `Payload`, we need to remove all other fields from the `Message` protobuf so that the message ID function can simply take the hash of the the pubsub [`Data`](https://github.com/libp2p/go-libp2p-pubsub/blob/0c7092d1f50091ae88407ba93103ac5868da3d0a/pb/rpc.pb.go#L145) field without needing to do any deserialization. +In order to avoid making the message ID function deserialize the `Message` to access the `Payload`, we need to remove all other fields from the `Message` protobuf so that the message ID function can simply take the hash of the pubsub [`Data`](https://github.com/libp2p/go-libp2p-pubsub/blob/0c7092d1f50091ae88407ba93103ac5868da3d0a/pb/rpc.pb.go#L145) field without needing to do any deserialization. The `Multicast` implementation will need to be changed to make direct connections to the target peers instead of sending messages with a `TargetIDs` field via gossip. @@ -90,4 +90,4 @@ The `Multicast` implementation will need to be changed to make direct connection - Since existing calls to `Multicast` only target 3 peers, changing the implementation to use direct connections instead of gossip will reduce traffic on the network and make it more efficient. - While `engine.Unit` provides some useful functionalities, it also uses the anti-pattern of [storing a `Context` inside a struct](https://github.com/onflow/flow-go/blob/b50f0ffe054103a82e4aa9e0c9e4610c2cbf2cc9/engine/unit.go#L117), something which is [specifically advised against](https://pkg.go.dev/context#:~:text=Do%20not%20store%20Contexts%20inside%20a%20struct%20type%3B%20instead%2C%20pass%20a%20Context%20explicitly%20to%20each%20function%20that%20needs%20it.%20The%20Context%20should%20be%20the%20first%20parameter%2C%20typically%20named%20ctx%3A) by [the developers of Go](https://go.dev/blog/context-and-structs#TOC_2.). Here is an [example](https://go.dev/blog/context-and-structs#:~:text=Storing%20context%20in%20structs%20leads%20to%20confusion) illustrating some of the problems with this approach. -## Implementation (TODO) \ No newline at end of file +## Implementation (TODO) diff --git a/follower/consensus_follower.go b/follower/consensus_follower.go index 56863bcf530..2b9a60709ec 100644 --- a/follower/consensus_follower.go +++ b/follower/consensus_follower.go @@ -5,19 +5,20 @@ import ( "fmt" "sync" - "github.com/dgraph-io/badger/v2" + "github.com/jordanschalm/lockctx" + "github.com/onflow/crypto" "github.com/rs/zerolog" "github.com/onflow/flow-go/cmd" "github.com/onflow/flow-go/consensus/hotstuff/model" "github.com/onflow/flow-go/consensus/hotstuff/notifications/pubsub" - "github.com/onflow/flow-go/crypto" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/module/chainsync" "github.com/onflow/flow-go/module/compliance" "github.com/onflow/flow-go/module/component" "github.com/onflow/flow-go/module/irrecoverable" "github.com/onflow/flow-go/module/util" + "github.com/onflow/flow-go/storage" ) // ConsensusFollower is a standalone module run by third parties which provides @@ -36,24 +37,23 @@ type Config struct { networkPrivKey crypto.PrivateKey // the network private key of this node bootstrapNodes []BootstrapNodeInfo // the bootstrap nodes to use bindAddr string // address to bind on - db *badger.DB // the badger DB storage to use for the protocol state - dataDir string // directory to store the protocol state (if the badger storage is not provided) + protocolDB storage.DB // badger instance bootstrapDir string // path to the bootstrap directory logLevel string // log level exposeMetrics bool // whether to expose metrics syncConfig *chainsync.Config // sync core configuration complianceConfig *compliance.Config // follower engine configuration + // lock manager for the follower, allows integration tests who run mulitple followers + // to be able to use different lock managers. + lockManager lockctx.Manager } type Option func(c *Config) -// WithDataDir sets the underlying directory to be used to store the database -// If a database is supplied, then data directory will be set to empty string -func WithDataDir(dataDir string) Option { +// currently used by rosetta +func WithProtocolDB(db storage.DB) Option { return func(cf *Config) { - if cf.db == nil { - cf.dataDir = dataDir - } + cf.protocolDB = db } } @@ -69,15 +69,6 @@ func WithLogLevel(level string) Option { } } -// WithDB sets the underlying database that will be used to store the chain state -// WithDB takes precedence over WithDataDir and datadir will be set to empty if DB is set using this option -func WithDB(db *badger.DB) Option { - return func(cf *Config) { - cf.db = db - cf.dataDir = "" - } -} - func WithExposeMetrics(expose bool) Option { return func(c *Config) { c.exposeMetrics = expose @@ -96,6 +87,15 @@ func WithComplianceConfig(config *compliance.Config) Option { } } +func WithLockManager(lockManager lockctx.Manager) Option { + return func(c *Config) { + if c.lockManager != nil { + panic("lock manager already set, cannot overwrite") + } + c.lockManager = lockManager + } +} + // BootstrapNodeInfo contains the details about the upstream bootstrap peer the consensus follower uses type BootstrapNodeInfo struct { Host string // ip or hostname @@ -103,10 +103,10 @@ type BootstrapNodeInfo struct { NetworkPublicKey crypto.PublicKey // the network public key of the bootstrap peer } -func bootstrapIdentities(bootstrapNodes []BootstrapNodeInfo) flow.IdentityList { - ids := make(flow.IdentityList, len(bootstrapNodes)) +func bootstrapIdentities(bootstrapNodes []BootstrapNodeInfo) flow.IdentitySkeletonList { + ids := make(flow.IdentitySkeletonList, len(bootstrapNodes)) for i, b := range bootstrapNodes { - ids[i] = &flow.Identity{ + ids[i] = &flow.IdentitySkeleton{ Role: flow.RoleAccess, NetworkPubKey: b.NetworkPublicKey, Address: fmt.Sprintf("%s:%d", b.Host, b.Port), @@ -122,6 +122,7 @@ func getFollowerServiceOptions(config *Config) []FollowerOption { WithBootStrapPeers(ids...), WithBaseOptions(getBaseOptions(config)), WithNetworkKey(config.networkPrivKey), + WithStorageLockManager(config.lockManager), } } @@ -133,18 +134,15 @@ func getBaseOptions(config *Config) []cmd.Option { if config.bootstrapDir != "" { options = append(options, cmd.WithBootstrapDir(config.bootstrapDir)) } - if config.dataDir != "" { - options = append(options, cmd.WithDataDir(config.dataDir)) - } if config.bindAddr != "" { options = append(options, cmd.WithBindAddress(config.bindAddr)) } + if config.protocolDB != nil { + options = append(options, cmd.WithProtocolDB(config.protocolDB)) + } if config.logLevel != "" { options = append(options, cmd.WithLogLevel(config.logLevel)) } - if config.db != nil { - options = append(options, cmd.WithDB(config.db)) - } if config.exposeMetrics { options = append(options, cmd.WithMetricsEnabled(config.exposeMetrics)) } @@ -187,8 +185,10 @@ func NewConsensusFollower( networkPrivKey: networkPrivKey, bootstrapNodes: bootstapIdentities, bindAddr: bindAddr, + protocolDB: nil, logLevel: "info", exposeMetrics: false, + lockManager: nil, // default to nil, can be set optionally with WithLockManager in tests } for _, opt := range opts { @@ -206,6 +206,7 @@ func NewConsensusFollower( anb.FollowerDistributor.AddOnBlockFinalizedConsumer(cf.onBlockFinalized) cf.NodeConfig = anb.NodeConfig + // Build will initialize the database cf.Component, err = anb.Build() if err != nil { return nil, err diff --git a/follower/database/init.go b/follower/database/init.go new file mode 100644 index 00000000000..eaff70b085b --- /dev/null +++ b/follower/database/init.go @@ -0,0 +1,15 @@ +package database + +import ( + "io" + + "github.com/cockroachdb/pebble/v2" + "github.com/rs/zerolog" + + "github.com/onflow/flow-go/cmd/scaffold" +) + +// InitPebbleDB is an alias for scaffold.InitPebbleDB. +func InitPebbleDB(logger zerolog.Logger, dir string) (*pebble.DB, io.Closer, error) { + return scaffold.InitPebbleDB(logger, dir) +} diff --git a/follower/follower_builder.go b/follower/follower_builder.go index d02e87fa55f..550ffb87582 100644 --- a/follower/follower_builder.go +++ b/follower/follower_builder.go @@ -1,19 +1,18 @@ package follower import ( - "context" "encoding/json" "errors" "fmt" "strings" - dht "github.com/libp2p/go-libp2p-kad-dht" - "github.com/libp2p/go-libp2p/core/host" + "github.com/jordanschalm/lockctx" "github.com/libp2p/go-libp2p/core/peer" - "github.com/libp2p/go-libp2p/core/routing" + "github.com/onflow/crypto" "github.com/rs/zerolog" "github.com/onflow/flow-go/cmd" + "github.com/onflow/flow-go/config" "github.com/onflow/flow-go/consensus" "github.com/onflow/flow-go/consensus/hotstuff" "github.com/onflow/flow-go/consensus/hotstuff/committees" @@ -23,7 +22,6 @@ import ( hotstuffvalidator "github.com/onflow/flow-go/consensus/hotstuff/validator" "github.com/onflow/flow-go/consensus/hotstuff/verification" recovery "github.com/onflow/flow-go/consensus/recovery/protocol" - "github.com/onflow/flow-go/crypto" "github.com/onflow/flow-go/engine/common/follower" synceng "github.com/onflow/flow-go/engine/common/synchronization" "github.com/onflow/flow-go/model/encodable" @@ -31,31 +29,26 @@ import ( "github.com/onflow/flow-go/model/flow/filter" "github.com/onflow/flow-go/module" synchronization "github.com/onflow/flow-go/module/chainsync" - "github.com/onflow/flow-go/module/compliance" finalizer "github.com/onflow/flow-go/module/finalizer/consensus" "github.com/onflow/flow-go/module/id" "github.com/onflow/flow-go/module/local" "github.com/onflow/flow-go/module/metrics" "github.com/onflow/flow-go/module/upstream" "github.com/onflow/flow-go/network" + alspmgr "github.com/onflow/flow-go/network/alsp/manager" netcache "github.com/onflow/flow-go/network/cache" "github.com/onflow/flow-go/network/channels" cborcodec "github.com/onflow/flow-go/network/codec/cbor" "github.com/onflow/flow-go/network/converter" "github.com/onflow/flow-go/network/p2p" "github.com/onflow/flow-go/network/p2p/cache" - p2pdht "github.com/onflow/flow-go/network/p2p/dht" + "github.com/onflow/flow-go/network/p2p/conduit" "github.com/onflow/flow-go/network/p2p/keyutils" - "github.com/onflow/flow-go/network/p2p/middleware" - "github.com/onflow/flow-go/network/p2p/p2pbuilder" - p2pconfig "github.com/onflow/flow-go/network/p2p/p2pbuilder/config" - "github.com/onflow/flow-go/network/p2p/p2pbuilder/inspector" - "github.com/onflow/flow-go/network/p2p/subscription" - "github.com/onflow/flow-go/network/p2p/tracer" + p2plogging "github.com/onflow/flow-go/network/p2p/logging" "github.com/onflow/flow-go/network/p2p/translator" "github.com/onflow/flow-go/network/p2p/unicast/protocols" - "github.com/onflow/flow-go/network/p2p/utils" "github.com/onflow/flow-go/network/slashing" + "github.com/onflow/flow-go/network/underlay" "github.com/onflow/flow-go/network/validator" "github.com/onflow/flow-go/state/protocol" badgerState "github.com/onflow/flow-go/state/protocol/badger" @@ -83,19 +76,15 @@ import ( // For a node running as a standalone process, the config fields will be populated from the command line params, // while for a node running as a library, the config fields are expected to be initialized by the caller. type FollowerServiceConfig struct { - bootstrapNodeAddresses []string - bootstrapNodePublicKeys []string - bootstrapIdentities flow.IdentityList // the identity list of bootstrap peers the node uses to discover other nodes - NetworkKey crypto.PrivateKey // the networking key passed in by the caller when being used as a library - baseOptions []cmd.Option + bootstrapIdentities flow.IdentitySkeletonList // the identity list of bootstrap peers the node uses to discover other nodes + NetworkKey crypto.PrivateKey // the networking key passed in by the caller when being used as a library + baseOptions []cmd.Option + lockManager lockctx.Manager // the lock manager used by the follower service, can be nil if not used } // DefaultFollowerServiceConfig defines all the default values for the FollowerServiceConfig func DefaultFollowerServiceConfig() *FollowerServiceConfig { - return &FollowerServiceConfig{ - bootstrapNodeAddresses: []string{}, - bootstrapNodePublicKeys: []string{}, - } + return &FollowerServiceConfig{} } // FollowerServiceBuilder provides the common functionality needed to bootstrap a Flow staked and observer @@ -112,7 +101,7 @@ type FollowerServiceBuilder struct { FollowerDistributor *pubsub.FollowerDistributor Committee hotstuff.DynamicCommittee Finalized *flow.Header - Pending []*flow.Header + Pending []*flow.ProposalHeader FollowerCore module.HotStuffFollower // for the observer, the sync engine participants provider is the libp2p peer store which is not // available until after the network has started. Hence, a factory function that needs to be called just before @@ -135,7 +124,7 @@ func (builder *FollowerServiceBuilder) deriveBootstrapPeerIdentities() error { return nil } - ids, err := BootstrapIdentities(builder.bootstrapNodeAddresses, builder.bootstrapNodePublicKeys) + ids, err := builder.DeriveBootstrapPeerIdentities() if err != nil { return fmt.Errorf("failed to derive bootstrap peer identities: %w", err) } @@ -212,10 +201,19 @@ func (builder *FollowerServiceBuilder) buildFollowerCore() *FollowerServiceBuild builder.Component("follower core", func(node *cmd.NodeConfig) (module.ReadyDoneAware, error) { // create a finalizer that will handle updating the protocol // state when the follower detects newly finalized blocks - final := finalizer.NewFinalizer(node.DB, node.Storage.Headers, builder.FollowerState, node.Tracer) + final := finalizer.NewFinalizer(node.ProtocolDB.Reader(), node.Storage.Headers, builder.FollowerState, node.Tracer) - followerCore, err := consensus.NewFollower(node.Logger, node.Storage.Headers, final, - builder.FollowerDistributor, node.RootBlock.Header, node.RootQC, builder.Finalized, builder.Pending) + followerCore, err := consensus.NewFollower( + node.Logger, + node.Metrics.Mempool, + node.Storage.Headers, + final, + builder.FollowerDistributor, + node.FinalizedRootBlock.ToHeader(), + node.RootQC, + builder.Finalized, + builder.Pending, + ) if err != nil { return nil, fmt.Errorf("could not initialize follower core: %w", err) } @@ -255,18 +253,19 @@ func (builder *FollowerServiceBuilder) buildFollowerEngine() *FollowerServiceBui builder.FollowerEng, err = follower.NewComplianceLayer( node.Logger, - node.Network, + node.EngineRegistry, node.Me, node.Metrics.Engine, node.Storage.Headers, builder.Finalized, core, + node.ComplianceConfig, follower.WithChannel(channels.PublicReceiveBlocks), - follower.WithComplianceConfigOpt(compliance.WithSkipNewProposalsThreshold(node.ComplianceConfig.SkipNewProposalsThreshold)), ) if err != nil { return nil, fmt.Errorf("could not create follower engine: %w", err) } + builder.FollowerDistributor.AddOnBlockFinalizedConsumer(builder.FollowerEng.OnFinalizedBlock) return builder.FollowerEng, nil }) @@ -276,21 +275,28 @@ func (builder *FollowerServiceBuilder) buildFollowerEngine() *FollowerServiceBui func (builder *FollowerServiceBuilder) buildSyncEngine() *FollowerServiceBuilder { builder.Component("sync engine", func(node *cmd.NodeConfig) (module.ReadyDoneAware, error) { + spamConfig, err := synceng.NewSpamDetectionConfig() + if err != nil { + return nil, fmt.Errorf("could not initialize spam detection config: %w", err) + } + sync, err := synceng.New( node.Logger, node.Metrics.Engine, - node.Network, + node.EngineRegistry, node.Me, node.State, node.Storage.Blocks, builder.FollowerEng, builder.SyncCore, builder.SyncEngineParticipantsProviderFactory(), + spamConfig, ) if err != nil { return nil, fmt.Errorf("could not create synchronization engine: %w", err) } builder.SyncEng = sync + builder.FollowerDistributor.AddFinalizationConsumer(sync) return builder.SyncEng, nil }) @@ -313,7 +319,7 @@ func (builder *FollowerServiceBuilder) BuildConsensusFollower() cmd.NodeBuilder type FollowerOption func(*FollowerServiceConfig) -func WithBootStrapPeers(bootstrapNodes ...*flow.Identity) FollowerOption { +func WithBootStrapPeers(bootstrapNodes ...*flow.IdentitySkeleton) FollowerOption { return func(config *FollowerServiceConfig) { config.bootstrapIdentities = bootstrapNodes } @@ -325,6 +331,14 @@ func WithNetworkKey(key crypto.PrivateKey) FollowerOption { } } +func WithStorageLockManager(lockManager lockctx.Manager) FollowerOption { + return func(config *FollowerServiceConfig) { + // LockManager is not used in the follower service, but we keep this option for compatibility + // with the staked node builder. + config.lockManager = lockManager + } +} + func WithBaseOptions(baseOptions []cmd.Option) FollowerOption { return func(config *FollowerServiceConfig) { config.baseOptions = baseOptions @@ -346,40 +360,10 @@ func FlowConsensusFollowerService(opts ...FollowerOption) *FollowerServiceBuilde // the observer gets a version of the root snapshot file that does not contain any node addresses // hence skip all the root snapshot validations that involved an identity address ret.FlowNodeBuilder.SkipNwAddressBasedValidations = true + ret.StorageLockMgr = config.lockManager return ret } -// initNetwork creates the network.Network implementation with the given metrics, middleware, initial list of network -// participants and topology used to choose peers from the list of participants. The list of participants can later be -// updated by calling network.SetIDs. -func (builder *FollowerServiceBuilder) initNetwork(nodeID module.Local, - networkMetrics module.NetworkMetrics, - middleware network.Middleware, - topology network.Topology, - receiveCache *netcache.ReceiveCache, -) (*p2p.Network, error) { - - codec := cborcodec.NewCodec() - - // creates network instance - net, err := p2p.NewNetwork(&p2p.NetworkParameters{ - Logger: builder.Logger, - Codec: codec, - Me: nodeID, - MiddlewareFactory: func() (network.Middleware, error) { return builder.Middleware, nil }, - Topology: topology, - SubscriptionManager: subscription.NewChannelSubscriptionManager(middleware), - Metrics: networkMetrics, - IdentityProvider: builder.IdentityProvider, - ReceiveCache: receiveCache, - }) - if err != nil { - return nil, fmt.Errorf("could not initialize network: %w", err) - } - - return net, nil -} - func publicNetworkMsgValidators(log zerolog.Logger, idProvider module.IdentityProvider, selfID flow.Identifier) []network.MessageValidator { return []network.MessageValidator{ // filter out messages sent by this node itself @@ -398,13 +382,13 @@ func publicNetworkMsgValidators(log zerolog.Logger, idProvider module.IdentityPr // BootstrapIdentities converts the bootstrap node addresses and keys to a Flow Identity list where // each Flow Identity is initialized with the passed address, the networking key // and the Node ID set to ZeroID, role set to Access, 0 stake and no staking key. -func BootstrapIdentities(addresses []string, keys []string) (flow.IdentityList, error) { +func BootstrapIdentities(addresses []string, keys []string) (flow.IdentitySkeletonList, error) { if len(addresses) != len(keys) { return nil, fmt.Errorf("number of addresses and keys provided for the boostrap nodes don't match") } - ids := make([]*flow.Identity, len(addresses)) + ids := make(flow.IdentitySkeletonList, len(addresses)) for i, address := range addresses { key := keys[i] @@ -422,7 +406,7 @@ func BootstrapIdentities(addresses []string, keys []string) (flow.IdentityList, } // create the identity of the peer by setting only the relevant fields - ids[i] = &flow.Identity{ + ids[i] = &flow.IdentitySkeleton{ NodeID: flow.ZeroID, // the NodeID is the hash of the staking key and for the public network it does not apply Address: address, Role: flow.RoleAccess, // the upstream node has to be an access node @@ -465,13 +449,17 @@ func (builder *FollowerServiceBuilder) InitIDProviders() { } builder.IDTranslator = translator.NewHierarchicalIDTranslator(idCache, translator.NewPublicNetworkIDTranslator()) - builder.NodeDisallowListDistributor = cmd.BuildDisallowListNotificationDisseminator(builder.DisallowListNotificationCacheSize, builder.MetricsRegisterer, builder.Logger, builder.MetricsEnabled) - // The following wrapper allows to disallow-list byzantine nodes via an admin command: // the wrapper overrides the 'Ejected' flag of the disallow-listed nodes to true - builder.IdentityProvider, err = cache.NewNodeBlocklistWrapper(idCache, node.DB, builder.NodeDisallowListDistributor) + builder.IdentityProvider, err = cache.NewNodeDisallowListWrapper( + idCache, + node.ProtocolDB, + func() network.DisallowListNotificationConsumer { + return builder.NetworkUnderlay + }, + ) if err != nil { - return fmt.Errorf("could not initialize NodeBlockListWrapper: %w", err) + return fmt.Errorf("could not initialize NodeDisallowListWrapper: %w", err) } // use the default identifier provider @@ -488,7 +476,7 @@ func (builder *FollowerServiceBuilder) InitIDProviders() { if flowID, err := builder.IDTranslator.GetFlowID(pid); err != nil { // TODO: this is an instance of "log error and continue with best effort" anti-pattern - builder.Logger.Err(err).Str("peer", pid.String()).Msg("failed to translate to Flow ID") + builder.Logger.Debug().Str("peer", p2plogging.PeerId(pid)).Msg("failed to translate to Flow ID") } else { result = append(result, flowID) } @@ -500,14 +488,14 @@ func (builder *FollowerServiceBuilder) InitIDProviders() { return nil }) - - builder.Component("disallow list notification distributor", func(node *cmd.NodeConfig) (module.ReadyDoneAware, error) { - // distributor is returned as a component to be started and stopped. - return builder.NodeDisallowListDistributor, nil - }) } func (builder *FollowerServiceBuilder) Initialize() error { + // initialize default flow configuration + if err := config.Unmarshall(&builder.FlowConfig); err != nil { + return fmt.Errorf("failed to initialize flow config for follower builder: %w", err) + } + if err := builder.deriveBootstrapPeerIdentities(); err != nil { return err } @@ -548,98 +536,22 @@ func (builder *FollowerServiceBuilder) validateParams() error { if len(builder.bootstrapIdentities) > 0 { return nil } - if len(builder.bootstrapNodeAddresses) == 0 { + if len(builder.BootstrapNodeAddresses) == 0 { return errors.New("no bootstrap node address provided") } - if len(builder.bootstrapNodeAddresses) != len(builder.bootstrapNodePublicKeys) { + if len(builder.BootstrapNodeAddresses) != len(builder.BootstrapNodePublicKeys) { return errors.New("number of bootstrap node addresses and public keys should match") } return nil } -// initPublicLibP2PFactory creates the LibP2P factory function for the given node ID and network key for the observer. -// The factory function is later passed into the initMiddleware function to eventually instantiate the p2p.LibP2PNode instance -// The LibP2P host is created with the following options: -// - DHT as client and seeded with the given bootstrap peers -// - The specified bind address as the listen address -// - The passed in private key as the libp2p key -// - No connection gater -// - No connection manager -// - No peer manager -// - Default libp2p pubsub options -func (builder *FollowerServiceBuilder) initPublicLibP2PFactory(networkKey crypto.PrivateKey) p2p.LibP2PFactoryFunc { - return func() (p2p.LibP2PNode, error) { - var pis []peer.AddrInfo - - for _, b := range builder.bootstrapIdentities { - pi, err := utils.PeerAddressInfo(*b) - - if err != nil { - return nil, fmt.Errorf("could not extract peer address info from bootstrap identity %v: %w", b, err) - } - - pis = append(pis, pi) - } - - meshTracer := tracer.NewGossipSubMeshTracer( - builder.Logger, - builder.Metrics.Network, - builder.IdentityProvider, - builder.GossipSubConfig.LocalMeshLogInterval) - - rpcInspectorSuite, err := inspector.NewGossipSubInspectorBuilder(builder.Logger, builder.SporkID, builder.GossipSubConfig.RpcInspector). - SetPublicNetwork(p2p.PublicNetwork). - SetMetrics(&p2pconfig.MetricsConfig{ - HeroCacheFactory: builder.HeroCacheMetricsFactory(), - Metrics: builder.Metrics.Network, - }).Build() - if err != nil { - return nil, fmt.Errorf("failed to create gossipsub rpc inspectors for public libp2p node: %w", err) - } - - node, err := p2pbuilder.NewNodeBuilder( - builder.Logger, - builder.Metrics.Network, - builder.BaseConfig.BindAddr, - networkKey, - builder.SporkID, - builder.LibP2PResourceManagerConfig). - SetSubscriptionFilter( - subscription.NewRoleBasedFilter( - subscription.UnstakedRole, builder.IdentityProvider, - ), - ). - SetRoutingSystem(func(ctx context.Context, h host.Host) (routing.Routing, error) { - return p2pdht.NewDHT(ctx, h, protocols.FlowPublicDHTProtocolID(builder.SporkID), - builder.Logger, - builder.Metrics.Network, - p2pdht.AsClient(), - dht.BootstrapPeers(pis...), - ) - }). - SetStreamCreationRetryInterval(builder.UnicastCreateStreamRetryDelay). - SetGossipSubTracer(meshTracer). - SetGossipSubScoreTracerInterval(builder.GossipSubConfig.ScoreTracerInterval). - SetGossipSubRpcInspectorSuite(rpcInspectorSuite). - Build() - - if err != nil { - return nil, fmt.Errorf("could not build public libp2p node: %w", err) - } - - builder.LibP2PNode = node - - return builder.LibP2PNode, nil - } -} - // initObserverLocal initializes the observer's ID, network key and network address // Currently, it reads a node-info.priv.json like any other node. // TODO: read the node ID from the special bootstrap files func (builder *FollowerServiceBuilder) initObserverLocal() func(node *cmd.NodeConfig) error { return func(node *cmd.NodeConfig) error { // for an observer, set the identity here explicitly since it will not be found in the protocol state - self := &flow.Identity{ + self := flow.IdentitySkeleton{ NodeID: node.NodeID, NetworkPubKey: node.NetworkKey.PublicKey(), StakingPubKey: nil, // no staking key needed for the observer @@ -665,47 +577,69 @@ func (builder *FollowerServiceBuilder) Build() (cmd.Node, error) { // enqueuePublicNetworkInit enqueues the observer network component initialized for the observer func (builder *FollowerServiceBuilder) enqueuePublicNetworkInit() { - var libp2pNode p2p.LibP2PNode + var publicLibp2pNode p2p.LibP2PNode builder. Component("public libp2p node", func(node *cmd.NodeConfig) (module.ReadyDoneAware, error) { - libP2PFactory := builder.initPublicLibP2PFactory(node.NetworkKey) - var err error - libp2pNode, err = libP2PFactory() + publicLibp2pNode, err = builder.BuildPublicLibp2pNode(builder.BaseConfig.BindAddr, builder.bootstrapIdentities) if err != nil { - return nil, fmt.Errorf("could not create public libp2p node: %w", err) + return nil, fmt.Errorf("could not build public libp2p node: %w", err) } - return libp2pNode, nil + builder.LibP2PNode = publicLibp2pNode + return publicLibp2pNode, nil }). Component("public network", func(node *cmd.NodeConfig) (module.ReadyDoneAware, error) { - receiveCache := netcache.NewHeroReceiveCache(builder.NetworkReceivedMessageCacheSize, + receiveCache := netcache.NewHeroReceiveCache(builder.FlowConfig.NetworkConfig.NetworkReceivedMessageCacheSize, builder.Logger, - metrics.NetworkReceiveCacheMetricsFactory(builder.HeroCacheMetricsFactory(), p2p.PublicNetwork)) + metrics.NetworkReceiveCacheMetricsFactory(builder.HeroCacheMetricsFactory(), network.PublicNetwork)) err := node.Metrics.Mempool.Register(metrics.PrependPublicPrefix(metrics.ResourceNetworkingReceiveCache), receiveCache.Size) if err != nil { return nil, fmt.Errorf("could not register networking receive cache metric: %w", err) } - msgValidators := publicNetworkMsgValidators(node.Logger, node.IdentityProvider, node.NodeID) - - builder.initMiddleware(node.NodeID, libp2pNode, msgValidators...) - - // topology is nil since it is automatically managed by libp2p - net, err := builder.initNetwork(builder.Me, builder.Metrics.Network, builder.Middleware, nil, receiveCache) + net, err := underlay.NewNetwork(&underlay.NetworkConfig{ + Logger: builder.Logger.With().Str("component", "public-network").Logger(), + Codec: cborcodec.NewCodec(), + Me: builder.Me, + Libp2pNode: publicLibp2pNode, + Topology: nil, // topology is nil since it is automatically managed by libp2p // TODO: can we use empty topology? + Metrics: builder.Metrics.Network, + BitSwapMetrics: builder.Metrics.Bitswap, + IdentityProvider: builder.IdentityProvider, + ReceiveCache: receiveCache, + ConduitFactory: conduit.NewDefaultConduitFactory(), + SporkId: builder.SporkID, + UnicastMessageTimeout: underlay.DefaultUnicastTimeout, + IdentityTranslator: builder.IDTranslator, + AlspCfg: &alspmgr.MisbehaviorReportManagerConfig{ + Logger: builder.Logger, + SpamRecordCacheSize: builder.FlowConfig.NetworkConfig.AlspConfig.SpamRecordCacheSize, + SpamReportQueueSize: builder.FlowConfig.NetworkConfig.AlspConfig.SpamReportQueueSize, + DisablePenalty: builder.FlowConfig.NetworkConfig.AlspConfig.DisablePenalty, + HeartBeatInterval: builder.FlowConfig.NetworkConfig.AlspConfig.HearBeatInterval, + AlspMetrics: builder.Metrics.Network, + HeroCacheMetricsFactory: builder.HeroCacheMetricsFactory(), + NetworkType: network.PublicNetwork, + }, + SlashingViolationConsumerFactory: func(adapter network.ConduitAdapter) network.ViolationsConsumer { + return slashing.NewSlashingViolationsConsumer(builder.Logger, builder.Metrics.Network, adapter) + }, + }, underlay.WithMessageValidators(publicNetworkMsgValidators(node.Logger, node.IdentityProvider, node.NodeID)...)) if err != nil { - return nil, err + return nil, fmt.Errorf("could not initialize network: %w", err) } - builder.Network = converter.NewNetwork(net, channels.SyncCommittee, channels.PublicSyncCommittee) + builder.NetworkUnderlay = net + builder.EngineRegistry = converter.NewNetwork(net, channels.SyncCommittee, channels.PublicSyncCommittee) builder.Logger.Info().Msgf("network will run on address: %s", builder.BindAddr) - idEvents := gadgets.NewIdentityDeltas(builder.Middleware.UpdateNodeAddresses) + idEvents := gadgets.NewIdentityDeltas(builder.NetworkUnderlay.UpdateNodeAddresses) builder.ProtocolEvents.AddConsumer(idEvents) - return builder.Network, nil + return builder.EngineRegistry, nil }) } @@ -720,28 +654,3 @@ func (builder *FollowerServiceBuilder) enqueueConnectWithStakedAN() { return upstream.NewUpstreamConnector(builder.bootstrapIdentities, builder.LibP2PNode, builder.Logger), nil }) } - -// initMiddleware creates the network.Middleware implementation with the libp2p factory function, metrics, peer update -// interval, and validators. The network.Middleware is then passed into the initNetwork function. -func (builder *FollowerServiceBuilder) initMiddleware(nodeID flow.Identifier, - libp2pNode p2p.LibP2PNode, - validators ...network.MessageValidator, -) network.Middleware { - slashingViolationsConsumer := slashing.NewSlashingViolationsConsumer(builder.Logger, builder.Metrics.Network) - mw := middleware.NewMiddleware( - builder.Logger, - libp2pNode, - nodeID, - builder.Metrics.Bitswap, - builder.SporkID, - middleware.DefaultUnicastTimeout, - builder.IDTranslator, - builder.CodecFactory(), - slashingViolationsConsumer, - middleware.WithMessageValidators(validators...), - // use default identifier provider - ) - builder.NodeDisallowListDistributor.AddConsumer(mw) - builder.Middleware = mw - return builder.Middleware -} diff --git a/fvm/README.md b/fvm/README.md index b30856d12fa..668290c4562 100644 --- a/fvm/README.md +++ b/fvm/README.md @@ -86,7 +86,7 @@ an immutable object, and any changes to a context must be made by spawning a new child context. ```go -vm := fvm.New(runtime.NewInterpreterRuntime()) +vm := fvm.New(runtime.NewRuntime()) globalCtx := fvm.NewContext() diff --git a/fvm/accounts_test.go b/fvm/accounts_test.go index ece44bf3ff4..0746f505efc 100644 --- a/fvm/accounts_test.go +++ b/fvm/accounts_test.go @@ -6,8 +6,10 @@ import ( "testing" "github.com/onflow/cadence" + "github.com/onflow/cadence/encoding/ccf" jsoncdc "github.com/onflow/cadence/encoding/json" - "github.com/onflow/cadence/runtime/format" + "github.com/onflow/cadence/format" + "github.com/onflow/cadence/stdlib" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -53,9 +55,12 @@ func createAccount( fvm.WithSequenceNumberCheckAndIncrementEnabled(false), ) - txBody := flow.NewTransactionBody(). + txBody, err := flow.NewTransactionBodyBuilder(). SetScript([]byte(createAccountTransaction)). - AddAuthorizer(chain.ServiceAddress()) + SetPayer(chain.ServiceAddress()). + AddAuthorizer(chain.ServiceAddress()). + Build() + require.NoError(t, err) executionSnapshot, output, err := vm.Run( ctx, @@ -70,10 +75,17 @@ func createAccount( require.Len(t, accountCreatedEvents, 1) - data, err := jsoncdc.Decode(nil, accountCreatedEvents[0].Payload) + data, err := ccf.Decode(nil, accountCreatedEvents[0].Payload) require.NoError(t, err) + + event := data.(cadence.Event) + address := flow.ConvertAddress( - data.(cadence.Event).Fields[0].(cadence.Address)) + cadence.SearchFieldByName( + event, + stdlib.AccountEventAddressParameter.Identifier, + ).(cadence.Address), + ) return snapshotTree, address } @@ -102,17 +114,13 @@ func addAccountKey( publicKeyA, cadencePublicKey := newAccountKey(t, privateKey, apiVersion) - var addAccountKeyTx accountKeyAPIVersion - if apiVersion == accountKeyAPIVersionV1 { - addAccountKeyTx = addAccountKeyTransaction - } else { - addAccountKeyTx = addAccountKeyTransactionV2 - } - - txBody := flow.NewTransactionBody(). - SetScript([]byte(addAccountKeyTx)). + txBody, err := flow.NewTransactionBodyBuilder(). + SetScript([]byte(addAccountKeyTransaction)). + SetPayer(address). AddArgument(cadencePublicKey). - AddAuthorizer(address) + AddAuthorizer(address). + Build() + require.NoError(t, err) executionSnapshot, output, err := vm.Run( ctx, @@ -141,9 +149,12 @@ func addAccountCreator( ), ) - txBody := flow.NewTransactionBody(). + txBody, err := flow.NewTransactionBodyBuilder(). SetScript(script). - AddAuthorizer(chain.ServiceAddress()) + SetPayer(chain.ServiceAddress()). + AddAuthorizer(chain.ServiceAddress()). + Build() + require.NoError(t, err) executionSnapshot, output, err := vm.Run( ctx, @@ -171,9 +182,12 @@ func removeAccountCreator( ), ) - txBody := flow.NewTransactionBody(). + txBody, err := flow.NewTransactionBodyBuilder(). SetScript(script). - AddAuthorizer(chain.ServiceAddress()) + SetPayer(chain.ServiceAddress()). + AddAuthorizer(chain.ServiceAddress()). + Build() + require.NoError(t, err) executionSnapshot, output, err := vm.Run( ctx, @@ -187,100 +201,72 @@ func removeAccountCreator( const createAccountTransaction = ` transaction { - prepare(signer: AuthAccount) { - let account = AuthAccount(payer: signer) + prepare(signer: auth(BorrowValue) &Account) { + let account = Account(payer: signer) } } ` const createMultipleAccountsTransaction = ` transaction { - prepare(signer: AuthAccount) { - let accountA = AuthAccount(payer: signer) - let accountB = AuthAccount(payer: signer) - let accountC = AuthAccount(payer: signer) + prepare(signer: auth(BorrowValue) &Account) { + let accountA = Account(payer: signer) + let accountB = Account(payer: signer) + let accountC = Account(payer: signer) } } ` const addAccountKeyTransaction = ` transaction(key: [UInt8]) { - prepare(signer: AuthAccount) { - signer.addPublicKey(key) - } -} -` -const addAccountKeyTransactionV2 = ` -transaction(key: [UInt8]) { - prepare(signer: AuthAccount) { - let publicKey = PublicKey( - publicKey: key, - signatureAlgorithm: SignatureAlgorithm.ECDSA_P256 - ) + prepare(signer: auth(AddKey) &Account) { + let publicKey = PublicKey( + publicKey: key, + signatureAlgorithm: SignatureAlgorithm.ECDSA_P256 + ) signer.keys.add( - publicKey: publicKey, - hashAlgorithm: HashAlgorithm.SHA3_256, - weight: 1000.0 - ) + publicKey: publicKey, + hashAlgorithm: HashAlgorithm.SHA3_256, + weight: 1000.0 + ) } } ` const addMultipleAccountKeysTransaction = ` transaction(key1: [UInt8], key2: [UInt8]) { - prepare(signer: AuthAccount) { - signer.addPublicKey(key1) - signer.addPublicKey(key2) - } -} -` - -const addMultipleAccountKeysTransactionV2 = ` -transaction(key1: [UInt8], key2: [UInt8]) { - prepare(signer: AuthAccount) { - for key in [key1, key2] { - let publicKey = PublicKey( - publicKey: key, - signatureAlgorithm: SignatureAlgorithm.ECDSA_P256 - ) - signer.keys.add( - publicKey: publicKey, - hashAlgorithm: HashAlgorithm.SHA3_256, - weight: 1000.0 - ) - } - } -} -` - -const removeAccountKeyTransaction = ` -transaction(key: Int) { - prepare(signer: AuthAccount) { - signer.removePublicKey(key) + prepare(signer: auth(AddKey) &Account) { + signer.keys.add( + publicKey: PublicKey( + publicKey: key1, + signatureAlgorithm: SignatureAlgorithm.ECDSA_P256 + ), + hashAlgorithm: HashAlgorithm.SHA3_256, + weight: 1000.0 + ) + signer.keys.add( + publicKey: PublicKey( + publicKey: key2, + signatureAlgorithm: SignatureAlgorithm.ECDSA_P256 + ), + hashAlgorithm: HashAlgorithm.SHA3_256, + weight: 1000.0 + ) } } ` const revokeAccountKeyTransaction = ` transaction(keyIndex: Int) { - prepare(signer: AuthAccount) { + prepare(signer: auth(RevokeKey) &Account) { signer.keys.revoke(keyIndex: keyIndex) } } ` -const removeMultipleAccountKeysTransaction = ` -transaction(key1: Int, key2: Int) { - prepare(signer: AuthAccount) { - signer.removePublicKey(key1) - signer.removePublicKey(key2) - } -} -` - const revokeMultipleAccountKeysTransaction = ` transaction(keyIndex1: Int, keyIndex2: Int) { - prepare(signer: AuthAccount) { + prepare(signer: auth(RevokeKey) &Account) { for keyIndex in [keyIndex1, keyIndex2] { signer.keys.revoke(keyIndex: keyIndex) } @@ -292,10 +278,10 @@ const removeAccountCreatorTransactionTemplate = ` import FlowServiceAccount from 0x%s transaction { let serviceAccountAdmin: &FlowServiceAccount.Administrator - prepare(signer: AuthAccount) { + prepare(signer: auth(BorrowValue) &Account) { // Borrow reference to FlowServiceAccount Administrator resource. // - self.serviceAccountAdmin = signer.borrow<&FlowServiceAccount.Administrator>(from: /storage/flowServiceAdmin) + self.serviceAccountAdmin = signer.storage.borrow<&FlowServiceAccount.Administrator>(from: /storage/flowServiceAdmin) ?? panic("Unable to borrow reference to administrator resource") } execute { @@ -312,10 +298,10 @@ const addAccountCreatorTransactionTemplate = ` import FlowServiceAccount from 0x%s transaction { let serviceAccountAdmin: &FlowServiceAccount.Administrator - prepare(signer: AuthAccount) { + prepare(signer: auth(BorrowValue) &Account) { // Borrow reference to FlowServiceAccount Administrator resource. // - self.serviceAccountAdmin = signer.borrow<&FlowServiceAccount.Administrator>(from: /storage/flowServiceAdmin) + self.serviceAccountAdmin = signer.storage.borrow<&FlowServiceAccount.Administrator>(from: /storage/flowServiceAdmin) ?? panic("Unable to borrow reference to administrator resource") } execute { @@ -330,19 +316,17 @@ transaction { const getAccountKeyTransaction = ` transaction(keyIndex: Int) { - prepare(signer: AuthAccount) { - var key :AccountKey? = signer.keys.get(keyIndex: keyIndex) - log(key) + prepare(signer: &Account) { + log(signer.keys.get(keyIndex: keyIndex)) } } ` const getMultipleAccountKeysTransaction = ` transaction(keyIndex1: Int, keyIndex2: Int) { - prepare(signer: AuthAccount) { + prepare(signer: &Account) { for keyIndex in [keyIndex1, keyIndex2] { - var key :AccountKey? = signer.keys.get(keyIndex: keyIndex) - log(key) + log(signer.keys.get(keyIndex: keyIndex)) } } } @@ -391,9 +375,12 @@ func TestCreateAccount(t *testing.T) { ctx, snapshotTree) - txBody := flow.NewTransactionBody(). + txBody, err := flow.NewTransactionBodyBuilder(). SetScript([]byte(createAccountTransaction)). - AddAuthorizer(payer) + SetPayer(payer). + AddAuthorizer(payer). + Build() + require.NoError(t, err) executionSnapshot, output, err := vm.Run( ctx, @@ -407,12 +394,19 @@ func TestCreateAccount(t *testing.T) { accountCreatedEvents := filterAccountCreatedEvents(output.Events) require.Len(t, accountCreatedEvents, 1) - data, err := jsoncdc.Decode(nil, accountCreatedEvents[0].Payload) + data, err := ccf.Decode(nil, accountCreatedEvents[0].Payload) require.NoError(t, err) + + event := data.(cadence.Event) + address := flow.ConvertAddress( - data.(cadence.Event).Fields[0].(cadence.Address)) + cadence.SearchFieldByName( + event, + stdlib.AccountEventAddressParameter.Identifier, + ).(cadence.Address), + ) - account, err := vm.GetAccount(ctx, address, snapshotTree) + account, err := fvm.GetAccount(ctx, address, snapshotTree) require.NoError(t, err) require.NotNil(t, account) }), @@ -430,9 +424,12 @@ func TestCreateAccount(t *testing.T) { ctx, snapshotTree) - txBody := flow.NewTransactionBody(). + txBody, err := flow.NewTransactionBodyBuilder(). SetScript([]byte(createMultipleAccountsTransaction)). - AddAuthorizer(payer) + SetPayer(payer). + AddAuthorizer(payer). + Build() + require.NoError(t, err) executionSnapshot, output, err := vm.Run( ctx, @@ -450,12 +447,18 @@ func TestCreateAccount(t *testing.T) { } accountCreatedEventCount += 1 - data, err := jsoncdc.Decode(nil, event.Payload) + data, err := ccf.Decode(nil, event.Payload) require.NoError(t, err) - address := flow.ConvertAddress( - data.(cadence.Event).Fields[0].(cadence.Address)) - account, err := vm.GetAccount(ctx, address, snapshotTree) + event := data.(cadence.Event) + + address := flow.ConvertAddress( + cadence.SearchFieldByName( + event, + stdlib.AccountEventAddressParameter.Identifier, + ).(cadence.Address), + ) + account, err := fvm.GetAccount(ctx, address, snapshotTree) require.NoError(t, err) require.NotNil(t, account) } @@ -483,9 +486,12 @@ func TestCreateAccount_WithRestrictedAccountCreation(t *testing.T) { ctx, snapshotTree) - txBody := flow.NewTransactionBody(). + txBody, err := flow.NewTransactionBodyBuilder(). SetScript([]byte(createAccountTransaction)). - AddAuthorizer(payer) + SetPayer(payer). + AddAuthorizer(payer). + Build() + require.NoError(t, err) _, output, err := vm.Run( ctx, @@ -501,9 +507,12 @@ func TestCreateAccount_WithRestrictedAccountCreation(t *testing.T) { newVMTest().withContextOptions(options...). withBootstrapProcedureOptions(fvm.WithRestrictedAccountCreationEnabled(true)). run(func(t *testing.T, vm fvm.VM, chain flow.Chain, ctx fvm.Context, snapshotTree snapshot.SnapshotTree) { - txBody := flow.NewTransactionBody(). + txBody, err := flow.NewTransactionBodyBuilder(). SetScript([]byte(createAccountTransaction)). - AddAuthorizer(chain.ServiceAddress()) + SetPayer(chain.ServiceAddress()). + AddAuthorizer(chain.ServiceAddress()). + Build() + require.NoError(t, err) _, output, err := vm.Run( ctx, @@ -533,10 +542,12 @@ func TestCreateAccount_WithRestrictedAccountCreation(t *testing.T) { snapshotTree, payer) - txBody := flow.NewTransactionBody(). + txBody, err := flow.NewTransactionBodyBuilder(). SetScript([]byte(createAccountTransaction)). SetPayer(payer). - AddAuthorizer(payer) + AddAuthorizer(payer). + Build() + require.NoError(t, err) _, output, err := vm.Run( ctx, @@ -566,9 +577,12 @@ func TestCreateAccount_WithRestrictedAccountCreation(t *testing.T) { snapshotTree, payer) - txBody := flow.NewTransactionBody(). + txBody, err := flow.NewTransactionBodyBuilder(). SetScript([]byte(createAccountTransaction)). - AddAuthorizer(payer) + SetPayer(payer). + AddAuthorizer(payer). + Build() + require.NoError(t, err) executionSnapshot, output, err := vm.Run( ctx, @@ -615,17 +629,13 @@ func TestAddAccountKey(t *testing.T) { singleKeyTests := []addKeyTest{ { source: addAccountKeyTransaction, - apiVersion: accountKeyAPIVersionV1, - }, - { - source: addAccountKeyTransactionV2, apiVersion: accountKeyAPIVersionV2, }, } for _, test := range singleKeyTests { - t.Run(fmt.Sprintf("Add to empty key list %s", test.apiVersion), + t.Run(fmt.Sprintf("Add to empty key list %s", accountKeyAPIVersionV2), newVMTest().withContextOptions(options...). run(func(t *testing.T, vm fvm.VM, chain flow.Chain, ctx fvm.Context, snapshotTree snapshot.SnapshotTree) { snapshotTree, address := createAccount( @@ -635,19 +645,22 @@ func TestAddAccountKey(t *testing.T) { ctx, snapshotTree) - before, err := vm.GetAccount(ctx, address, snapshotTree) + before, err := fvm.GetAccount(ctx, address, snapshotTree) require.NoError(t, err) assert.Empty(t, before.Keys) privateKey, err := unittest.AccountKeyDefaultFixture() require.NoError(t, err) - publicKeyA, cadencePublicKey := newAccountKey(t, privateKey, test.apiVersion) + publicKeyA, cadencePublicKey := newAccountKey(t, privateKey, accountKeyAPIVersionV2) - txBody := flow.NewTransactionBody(). + txBody, err := flow.NewTransactionBodyBuilder(). SetScript([]byte(test.source)). + SetPayer(address). AddArgument(cadencePublicKey). - AddAuthorizer(address) + AddAuthorizer(address). + Build() + require.NoError(t, err) executionSnapshot, output, err := vm.Run( ctx, @@ -659,7 +672,7 @@ func TestAddAccountKey(t *testing.T) { snapshotTree = snapshotTree.Append(executionSnapshot) - after, err := vm.GetAccount(ctx, address, snapshotTree) + after, err := fvm.GetAccount(ctx, address, snapshotTree) require.NoError(t, err) require.Len(t, after.Keys, 1) @@ -673,7 +686,7 @@ func TestAddAccountKey(t *testing.T) { }), ) - t.Run(fmt.Sprintf("Add to non-empty key list %s", test.apiVersion), + t.Run(fmt.Sprintf("Add to non-empty key list %s", accountKeyAPIVersionV2), newVMTest().withContextOptions(options...). run(func(t *testing.T, vm fvm.VM, chain flow.Chain, ctx fvm.Context, snapshotTree snapshot.SnapshotTree) { snapshotTree, address := createAccount( @@ -689,21 +702,24 @@ func TestAddAccountKey(t *testing.T) { ctx, snapshotTree, address, - test.apiVersion) + accountKeyAPIVersionV2) - before, err := vm.GetAccount(ctx, address, snapshotTree) + before, err := fvm.GetAccount(ctx, address, snapshotTree) require.NoError(t, err) assert.Len(t, before.Keys, 1) privateKey, err := unittest.AccountKeyDefaultFixture() require.NoError(t, err) - publicKey2, publicKey2Arg := newAccountKey(t, privateKey, test.apiVersion) + publicKey2, publicKey2Arg := newAccountKey(t, privateKey, accountKeyAPIVersionV2) - txBody := flow.NewTransactionBody(). + txBody, err := flow.NewTransactionBodyBuilder(). SetScript([]byte(test.source)). + SetPayer(address). AddArgument(publicKey2Arg). - AddAuthorizer(address) + AddAuthorizer(address). + Build() + require.NoError(t, err) executionSnapshot, output, err := vm.Run( ctx, @@ -714,7 +730,7 @@ func TestAddAccountKey(t *testing.T) { snapshotTree = snapshotTree.Append(executionSnapshot) - after, err := vm.GetAccount(ctx, address, snapshotTree) + after, err := fvm.GetAccount(ctx, address, snapshotTree) require.NoError(t, err) expectedKeys := []flow.AccountPublicKey{ @@ -726,7 +742,7 @@ func TestAddAccountKey(t *testing.T) { for i, expectedKey := range expectedKeys { actualKey := after.Keys[i] - assert.Equal(t, i, actualKey.Index) + assert.Equal(t, uint32(i), actualKey.Index) assert.Equal(t, expectedKey.PublicKey, actualKey.PublicKey) assert.Equal(t, expectedKey.SignAlgo, actualKey.SignAlgo) assert.Equal(t, expectedKey.HashAlgo, actualKey.HashAlgo) @@ -735,7 +751,7 @@ func TestAddAccountKey(t *testing.T) { }), ) - t.Run(fmt.Sprintf("Invalid key %s", test.apiVersion), + t.Run(fmt.Sprintf("Invalid key %s", accountKeyAPIVersionV2), newVMTest().withContextOptions(options...). run(func(t *testing.T, vm fvm.VM, chain flow.Chain, ctx fvm.Context, snapshotTree snapshot.SnapshotTree) { snapshotTree, address := createAccount( @@ -749,10 +765,13 @@ func TestAddAccountKey(t *testing.T) { invalidPublicKeyArg, err := jsoncdc.Encode(invalidPublicKey) require.NoError(t, err) - txBody := flow.NewTransactionBody(). + txBody, err := flow.NewTransactionBodyBuilder(). SetScript([]byte(test.source)). + SetPayer(address). AddArgument(invalidPublicKeyArg). - AddAuthorizer(address) + AddAuthorizer(address). + Build() + require.NoError(t, err) executionSnapshot, output, err := vm.Run( ctx, @@ -763,7 +782,7 @@ func TestAddAccountKey(t *testing.T) { snapshotTree = snapshotTree.Append(executionSnapshot) - after, err := vm.GetAccount(ctx, address, snapshotTree) + after, err := fvm.GetAccount(ctx, address, snapshotTree) require.NoError(t, err) assert.Empty(t, after.Keys) @@ -776,16 +795,12 @@ func TestAddAccountKey(t *testing.T) { multipleKeysTests := []addKeyTest{ { source: addMultipleAccountKeysTransaction, - apiVersion: accountKeyAPIVersionV1, - }, - { - source: addMultipleAccountKeysTransactionV2, apiVersion: accountKeyAPIVersionV2, }, } for _, test := range multipleKeysTests { - t.Run(fmt.Sprintf("Multiple keys %s", test.apiVersion), + t.Run(fmt.Sprintf("Multiple keys %s", accountKeyAPIVersionV2), newVMTest().withContextOptions(options...). run(func(t *testing.T, vm fvm.VM, chain flow.Chain, ctx fvm.Context, snapshotTree snapshot.SnapshotTree) { snapshotTree, address := createAccount( @@ -795,7 +810,7 @@ func TestAddAccountKey(t *testing.T) { ctx, snapshotTree) - before, err := vm.GetAccount(ctx, address, snapshotTree) + before, err := fvm.GetAccount(ctx, address, snapshotTree) require.NoError(t, err) assert.Empty(t, before.Keys) @@ -805,14 +820,17 @@ func TestAddAccountKey(t *testing.T) { privateKey2, err := unittest.AccountKeyDefaultFixture() require.NoError(t, err) - publicKey1, publicKey1Arg := newAccountKey(t, privateKey1, test.apiVersion) - publicKey2, publicKey2Arg := newAccountKey(t, privateKey2, test.apiVersion) + publicKey1, publicKey1Arg := newAccountKey(t, privateKey1, accountKeyAPIVersionV2) + publicKey2, publicKey2Arg := newAccountKey(t, privateKey2, accountKeyAPIVersionV2) - txBody := flow.NewTransactionBody(). + txBody, err := flow.NewTransactionBodyBuilder(). SetScript([]byte(test.source)). + SetPayer(address). AddArgument(publicKey1Arg). AddArgument(publicKey2Arg). - AddAuthorizer(address) + AddAuthorizer(address). + Build() + require.NoError(t, err) executionSnapshot, output, err := vm.Run( ctx, @@ -823,7 +841,7 @@ func TestAddAccountKey(t *testing.T) { snapshotTree = snapshotTree.Append(executionSnapshot) - after, err := vm.GetAccount(ctx, address, snapshotTree) + after, err := fvm.GetAccount(ctx, address, snapshotTree) require.NoError(t, err) expectedKeys := []flow.AccountPublicKey{ @@ -863,11 +881,11 @@ func TestAddAccountKey(t *testing.T) { _, publicKeyArg := newAccountKey(t, privateKey, accountKeyAPIVersionV2) - txBody := flow.NewTransactionBody(). + txBody, err := flow.NewTransactionBodyBuilder(). SetScript([]byte(fmt.Sprintf( ` transaction(key: [UInt8]) { - prepare(signer: AuthAccount) { + prepare(signer: auth(AddKey) &Account) { let publicKey = PublicKey( publicKey: key, signatureAlgorithm: SignatureAlgorithm.ECDSA_P256 @@ -882,8 +900,11 @@ func TestAddAccountKey(t *testing.T) { `, hashAlgo, ))). + SetPayer(address). AddArgument(publicKeyArg). - AddAuthorizer(address) + AddAuthorizer(address). + Build() + require.NoError(t, err) executionSnapshot, output, err := vm.Run( ctx, @@ -899,7 +920,7 @@ func TestAddAccountKey(t *testing.T) { snapshotTree = snapshotTree.Append(executionSnapshot) - after, err := vm.GetAccount(ctx, address, snapshotTree) + after, err := fvm.GetAccount(ctx, address, snapshotTree) require.NoError(t, err) assert.Empty(t, after.Keys) @@ -925,11 +946,6 @@ func TestRemoveAccountKey(t *testing.T) { // Remove a single key singleKeyTests := []removeKeyTest{ - { - source: removeAccountKeyTransaction, - apiVersion: accountKeyAPIVersionV1, - expectError: true, - }, { source: revokeAccountKeyTransaction, apiVersion: accountKeyAPIVersionV2, @@ -939,7 +955,7 @@ func TestRemoveAccountKey(t *testing.T) { for _, test := range singleKeyTests { - t.Run(fmt.Sprintf("Non-existent key %s", test.apiVersion), + t.Run(fmt.Sprintf("Non-existent key %s", accountKeyAPIVersionV2), newVMTest().withContextOptions(options...). run(func(t *testing.T, vm fvm.VM, chain flow.Chain, ctx fvm.Context, snapshotTree snapshot.SnapshotTree) { snapshotTree, address := createAccount( @@ -958,21 +974,24 @@ func TestRemoveAccountKey(t *testing.T) { ctx, snapshotTree, address, - test.apiVersion) + accountKeyAPIVersionV2) } - before, err := vm.GetAccount(ctx, address, snapshotTree) + before, err := fvm.GetAccount(ctx, address, snapshotTree) require.NoError(t, err) assert.Len(t, before.Keys, keyCount) - for _, keyIndex := range []int{-1, keyCount, keyCount + 1} { + for _, keyIndex := range []int{keyCount, keyCount + 1} { keyIndexArg, err := jsoncdc.Encode(cadence.NewInt(keyIndex)) require.NoError(t, err) - txBody := flow.NewTransactionBody(). + txBody, err := flow.NewTransactionBodyBuilder(). SetScript([]byte(test.source)). + SetPayer(address). AddArgument(keyIndexArg). - AddAuthorizer(address) + AddAuthorizer(address). + Build() + require.NoError(t, err) executionSnapshot, output, err := vm.Run( ctx, @@ -989,7 +1008,7 @@ func TestRemoveAccountKey(t *testing.T) { snapshotTree = snapshotTree.Append(executionSnapshot) } - after, err := vm.GetAccount(ctx, address, snapshotTree) + after, err := fvm.GetAccount(ctx, address, snapshotTree) require.NoError(t, err) assert.Len(t, after.Keys, keyCount) @@ -999,7 +1018,7 @@ func TestRemoveAccountKey(t *testing.T) { }), ) - t.Run(fmt.Sprintf("Existing key %s", test.apiVersion), + t.Run(fmt.Sprintf("Existing key %s", accountKeyAPIVersionV2), newVMTest().withContextOptions(options...). run(func(t *testing.T, vm fvm.VM, chain flow.Chain, ctx fvm.Context, snapshotTree snapshot.SnapshotTree) { snapshotTree, address := createAccount( @@ -1019,20 +1038,23 @@ func TestRemoveAccountKey(t *testing.T) { ctx, snapshotTree, address, - test.apiVersion) + accountKeyAPIVersionV2) } - before, err := vm.GetAccount(ctx, address, snapshotTree) + before, err := fvm.GetAccount(ctx, address, snapshotTree) require.NoError(t, err) assert.Len(t, before.Keys, keyCount) keyIndexArg, err := jsoncdc.Encode(cadence.NewInt(keyIndex)) require.NoError(t, err) - txBody := flow.NewTransactionBody(). + txBody, err := flow.NewTransactionBodyBuilder(). SetScript([]byte(test.source)). + SetPayer(address). AddArgument(keyIndexArg). - AddAuthorizer(address) + AddAuthorizer(address). + Build() + require.NoError(t, err) executionSnapshot, output, err := vm.Run( ctx, @@ -1043,7 +1065,7 @@ func TestRemoveAccountKey(t *testing.T) { snapshotTree = snapshotTree.Append(executionSnapshot) - after, err := vm.GetAccount(ctx, address, snapshotTree) + after, err := fvm.GetAccount(ctx, address, snapshotTree) require.NoError(t, err) assert.Len(t, after.Keys, keyCount) @@ -1055,7 +1077,7 @@ func TestRemoveAccountKey(t *testing.T) { }), ) - t.Run(fmt.Sprintf("Key added by a different api version %s", test.apiVersion), + t.Run(fmt.Sprintf("Key added by a different api version %s", accountKeyAPIVersionV2), newVMTest().withContextOptions(options...). run(func(t *testing.T, vm fvm.VM, chain flow.Chain, ctx fvm.Context, snapshotTree snapshot.SnapshotTree) { snapshotTree, address := createAccount( @@ -1068,14 +1090,6 @@ func TestRemoveAccountKey(t *testing.T) { const keyCount = 2 const keyIndex = keyCount - 1 - // Use one version of API to add the keys, and a different version of the API to revoke the keys. - var apiVersionForAdding accountKeyAPIVersion - if test.apiVersion == accountKeyAPIVersionV1 { - apiVersionForAdding = accountKeyAPIVersionV2 - } else { - apiVersionForAdding = accountKeyAPIVersionV1 - } - for i := 0; i < keyCount; i++ { snapshotTree, _ = addAccountKey( t, @@ -1083,20 +1097,23 @@ func TestRemoveAccountKey(t *testing.T) { ctx, snapshotTree, address, - apiVersionForAdding) + accountKeyAPIVersionV2) } - before, err := vm.GetAccount(ctx, address, snapshotTree) + before, err := fvm.GetAccount(ctx, address, snapshotTree) require.NoError(t, err) assert.Len(t, before.Keys, keyCount) keyIndexArg, err := jsoncdc.Encode(cadence.NewInt(keyIndex)) require.NoError(t, err) - txBody := flow.NewTransactionBody(). + txBody, err := flow.NewTransactionBodyBuilder(). SetScript([]byte(test.source)). + SetPayer(address). AddArgument(keyIndexArg). - AddAuthorizer(address) + AddAuthorizer(address). + Build() + require.NoError(t, err) executionSnapshot, output, err := vm.Run( ctx, @@ -1107,7 +1124,7 @@ func TestRemoveAccountKey(t *testing.T) { snapshotTree = snapshotTree.Append(executionSnapshot) - after, err := vm.GetAccount(ctx, address, snapshotTree) + after, err := fvm.GetAccount(ctx, address, snapshotTree) require.NoError(t, err) assert.Len(t, after.Keys, keyCount) @@ -1123,10 +1140,6 @@ func TestRemoveAccountKey(t *testing.T) { // Remove multiple keys multipleKeysTests := []removeKeyTest{ - { - source: removeMultipleAccountKeysTransaction, - apiVersion: accountKeyAPIVersionV1, - }, { source: revokeMultipleAccountKeysTransaction, apiVersion: accountKeyAPIVersionV2, @@ -1134,7 +1147,7 @@ func TestRemoveAccountKey(t *testing.T) { } for _, test := range multipleKeysTests { - t.Run(fmt.Sprintf("Multiple keys %s", test.apiVersion), + t.Run(fmt.Sprintf("Multiple keys %s", accountKeyAPIVersionV2), newVMTest().withContextOptions(options...). run(func(t *testing.T, vm fvm.VM, chain flow.Chain, ctx fvm.Context, snapshotTree snapshot.SnapshotTree) { snapshotTree, address := createAccount( @@ -1153,24 +1166,28 @@ func TestRemoveAccountKey(t *testing.T) { ctx, snapshotTree, address, - test.apiVersion) + accountKeyAPIVersionV2) } - before, err := vm.GetAccount(ctx, address, snapshotTree) + before, err := fvm.GetAccount(ctx, address, snapshotTree) require.NoError(t, err) assert.Len(t, before.Keys, keyCount) - txBody := flow.NewTransactionBody(). + txBodyBuilder := flow.NewTransactionBodyBuilder(). SetScript([]byte(test.source)). + SetPayer(address). AddAuthorizer(address) for i := 0; i < keyCount; i++ { keyIndexArg, err := jsoncdc.Encode(cadence.NewInt(i)) require.NoError(t, err) - txBody.AddArgument(keyIndexArg) + txBodyBuilder.AddArgument(keyIndexArg) } + txBody, err := txBodyBuilder.Build() + require.NoError(t, err) + executionSnapshot, output, err := vm.Run( ctx, fvm.Transaction(txBody, 0), @@ -1180,7 +1197,7 @@ func TestRemoveAccountKey(t *testing.T) { snapshotTree = snapshotTree.Append(executionSnapshot) - after, err := vm.GetAccount(ctx, address, snapshotTree) + after, err := fvm.GetAccount(ctx, address, snapshotTree) require.NoError(t, err) assert.Len(t, after.Keys, keyCount) @@ -1222,18 +1239,21 @@ func TestGetAccountKey(t *testing.T) { accountKeyAPIVersionV2) } - before, err := vm.GetAccount(ctx, address, snapshotTree) + before, err := fvm.GetAccount(ctx, address, snapshotTree) require.NoError(t, err) assert.Len(t, before.Keys, keyCount) - for _, keyIndex := range []int{-1, keyCount, keyCount + 1} { + for _, keyIndex := range []int{keyCount, keyCount + 1} { keyIndexArg, err := jsoncdc.Encode(cadence.NewInt(keyIndex)) require.NoError(t, err) - txBody := flow.NewTransactionBody(). + txBody, err := flow.NewTransactionBodyBuilder(). SetScript([]byte(getAccountKeyTransaction)). + SetPayer(address). AddArgument(keyIndexArg). - AddAuthorizer(address) + AddAuthorizer(address). + Build() + require.NoError(t, err) executionSnapshot, output, err := vm.Run( ctx, @@ -1274,17 +1294,20 @@ func TestGetAccountKey(t *testing.T) { accountKeyAPIVersionV2) } - before, err := vm.GetAccount(ctx, address, snapshotTree) + before, err := fvm.GetAccount(ctx, address, snapshotTree) require.NoError(t, err) assert.Len(t, before.Keys, keyCount) keyIndexArg, err := jsoncdc.Encode(cadence.NewInt(keyIndex)) require.NoError(t, err) - txBody := flow.NewTransactionBody(). + txBody, err := flow.NewTransactionBodyBuilder(). SetScript([]byte(getAccountKeyTransaction)). + SetPayer(address). AddArgument(keyIndexArg). - AddAuthorizer(address) + AddAuthorizer(address). + Build() + require.NoError(t, err) _, output, err := vm.Run( ctx, @@ -1335,20 +1358,23 @@ func TestGetAccountKey(t *testing.T) { ctx, snapshotTree, address, - accountKeyAPIVersionV1) + accountKeyAPIVersionV2) } - before, err := vm.GetAccount(ctx, address, snapshotTree) + before, err := fvm.GetAccount(ctx, address, snapshotTree) require.NoError(t, err) assert.Len(t, before.Keys, keyCount) keyIndexArg, err := jsoncdc.Encode(cadence.NewInt(keyIndex)) require.NoError(t, err) - txBody := flow.NewTransactionBody(). + txBody, err := flow.NewTransactionBodyBuilder(). SetScript([]byte(getAccountKeyTransaction)). + SetPayer(address). AddArgument(keyIndexArg). - AddAuthorizer(address) + AddAuthorizer(address). + Build() + require.NoError(t, err) _, output, err := vm.Run( ctx, @@ -1400,21 +1426,25 @@ func TestGetAccountKey(t *testing.T) { accountKeyAPIVersionV2) } - before, err := vm.GetAccount(ctx, address, snapshotTree) + before, err := fvm.GetAccount(ctx, address, snapshotTree) require.NoError(t, err) assert.Len(t, before.Keys, keyCount) - txBody := flow.NewTransactionBody(). + txBodyBuilder := flow.NewTransactionBodyBuilder(). SetScript([]byte(getMultipleAccountKeysTransaction)). + SetPayer(address). AddAuthorizer(address) for i := 0; i < keyCount; i++ { keyIndexArg, err := jsoncdc.Encode(cadence.NewInt(i)) require.NoError(t, err) - txBody.AddArgument(keyIndexArg) + txBodyBuilder.AddArgument(keyIndexArg) } + txBody, err := txBodyBuilder.Build() + require.NoError(t, err) + _, output, err := vm.Run( ctx, fvm.Transaction(txBody, 0), @@ -1467,10 +1497,13 @@ func TestAccountBalanceFields(t *testing.T) { ctx, snapshotTree) - txBody := transferTokensTx(chain). + txBody, err := transferTokensTx(chain). + SetPayer(chain.ServiceAddress()). AddArgument(jsoncdc.MustEncode(cadence.UFix64(100_000_000))). AddArgument(jsoncdc.MustEncode(cadence.Address(account))). - AddAuthorizer(chain.ServiceAddress()) + AddAuthorizer(chain.ServiceAddress()). + Build() + require.NoError(t, err) executionSnapshot, output, err := vm.Run( ctx, @@ -1482,7 +1515,7 @@ func TestAccountBalanceFields(t *testing.T) { snapshotTree = snapshotTree.Append(executionSnapshot) script := fvm.Script([]byte(fmt.Sprintf(` - pub fun main(): UFix64 { + access(all) fun main(): UFix64 { let acc = getAccount(0x%s) return acc.balance } @@ -1512,7 +1545,7 @@ func TestAccountBalanceFields(t *testing.T) { require.NoError(t, err) script := fvm.Script([]byte(fmt.Sprintf(` - pub fun main(): UFix64 { + access(all) fun main(): UFix64 { let acc = getAccount(0x%s) return acc.balance } @@ -1543,7 +1576,7 @@ func TestAccountBalanceFields(t *testing.T) { snapshotTree) script := fvm.Script([]byte(fmt.Sprintf(` - pub fun main(): UFix64 { + access(all) fun main(): UFix64 { let acc = getAccount(0x%s) return acc.balance } @@ -1581,10 +1614,13 @@ func TestAccountBalanceFields(t *testing.T) { ctx, snapshotTree) - txBody := transferTokensTx(chain). + txBody, err := transferTokensTx(chain). + SetPayer(chain.ServiceAddress()). AddArgument(jsoncdc.MustEncode(cadence.UFix64(100_000_000))). AddArgument(jsoncdc.MustEncode(cadence.Address(account))). - AddAuthorizer(chain.ServiceAddress()) + AddAuthorizer(chain.ServiceAddress()). + Build() + require.NoError(t, err) executionSnapshot, output, err := vm.Run( ctx, @@ -1596,7 +1632,7 @@ func TestAccountBalanceFields(t *testing.T) { snapshotTree = snapshotTree.Append(executionSnapshot) script := fvm.Script([]byte(fmt.Sprintf(` - pub fun main(): UFix64 { + access(all) fun main(): UFix64 { let acc = getAccount(0x%s) return acc.availableBalance } @@ -1605,7 +1641,7 @@ func TestAccountBalanceFields(t *testing.T) { _, output, err = vm.Run(ctx, script, snapshotTree) assert.NoError(t, err) assert.NoError(t, output.Err) - assert.Equal(t, cadence.UFix64(9999_3120), output.Value) + assert.Equal(t, cadence.UFix64(99_990_900), output.Value) }), ) @@ -1623,7 +1659,7 @@ func TestAccountBalanceFields(t *testing.T) { require.NoError(t, err) script := fvm.Script([]byte(fmt.Sprintf(` - pub fun main(): UFix64 { + access(all) fun main(): UFix64 { let acc = getAccount(0x%s) return acc.availableBalance } @@ -1654,10 +1690,13 @@ func TestAccountBalanceFields(t *testing.T) { ctx, snapshotTree) - txBody := transferTokensTx(chain). + txBody, err := transferTokensTx(chain). + SetPayer(chain.ServiceAddress()). AddArgument(jsoncdc.MustEncode(cadence.UFix64(100_000_000))). AddArgument(jsoncdc.MustEncode(cadence.Address(account))). - AddAuthorizer(chain.ServiceAddress()) + AddAuthorizer(chain.ServiceAddress()). + Build() + require.NoError(t, err) executionSnapshot, output, err := vm.Run( ctx, @@ -1669,7 +1708,7 @@ func TestAccountBalanceFields(t *testing.T) { snapshotTree = snapshotTree.Append(executionSnapshot) script := fvm.Script([]byte(fmt.Sprintf(` - pub fun main(): UFix64 { + access(all) fun main(): UFix64 { let acc = getAccount(0x%s) return acc.availableBalance } @@ -1705,10 +1744,13 @@ func TestGetStorageCapacity(t *testing.T) { ctx, snapshotTree) - txBody := transferTokensTx(chain). + txBody, err := transferTokensTx(chain). + SetPayer(chain.ServiceAddress()). AddArgument(jsoncdc.MustEncode(cadence.UFix64(100_000_000))). AddArgument(jsoncdc.MustEncode(cadence.Address(account))). - AddAuthorizer(chain.ServiceAddress()) + AddAuthorizer(chain.ServiceAddress()). + Build() + require.NoError(t, err) executionSnapshot, output, err := vm.Run( ctx, @@ -1720,9 +1762,9 @@ func TestGetStorageCapacity(t *testing.T) { snapshotTree = snapshotTree.Append(executionSnapshot) script := fvm.Script([]byte(fmt.Sprintf(` - pub fun main(): UInt64 { + access(all) fun main(): UInt64 { let acc = getAccount(0x%s) - return acc.storageCapacity + return acc.storage.capacity } `, account))) @@ -1749,9 +1791,9 @@ func TestGetStorageCapacity(t *testing.T) { require.NoError(t, err) script := fvm.Script([]byte(fmt.Sprintf(` - pub fun main(): UInt64 { + access(all) fun main(): UInt64 { let acc = getAccount(0x%s) - return acc.storageCapacity + return acc.storage.capacity } `, nonExistentAddress))) @@ -1777,9 +1819,9 @@ func TestGetStorageCapacity(t *testing.T) { address := chain.ServiceAddress() script := fvm.Script([]byte(fmt.Sprintf(` - pub fun main(): UInt64 { + access(all) fun main(): UInt64 { let acc = getAccount(0x%s) - return acc.storageCapacity + return acc.storage.capacity } `, address))) diff --git a/fvm/blueprints/bridge.go b/fvm/blueprints/bridge.go new file mode 100644 index 00000000000..17a7cadcb91 --- /dev/null +++ b/fvm/blueprints/bridge.go @@ -0,0 +1,440 @@ +package blueprints + +import ( + _ "embed" + + "github.com/onflow/cadence" + jsoncdc "github.com/onflow/cadence/encoding/json" + "github.com/onflow/flow-core-contracts/lib/go/templates" + + bridge "github.com/onflow/flow-evm-bridge" + + "github.com/onflow/flow-go/model/flow" +) + +// All the Cadence contracts that make up the core functionality +// of the Flow VM bridge. They are all needed for the +// bridge to function properly. +// Solidity contracts are handled elsewhere in the bootstrapping process +// See more info in the VM Bridge Repo +// https://github.com/onflow/flow-evm-bridge +// or FLIP +// https://github.com/onflow/flips/blob/main/application/20231222-evm-vm-bridge.md +var BridgeContracts = []string{ + "cadence/contracts/utils/ArrayUtils.cdc", + "cadence/contracts/utils/StringUtils.cdc", + "cadence/contracts/utils/ScopedFTProviders.cdc", + "cadence/contracts/utils/Serialize.cdc", + "cadence/contracts/utils/SerializeMetadata.cdc", + "cadence/contracts/bridge/interfaces/FlowEVMBridgeHandlerInterfaces.cdc", + "cadence/contracts/bridge/interfaces/IBridgePermissions.cdc", + "cadence/contracts/bridge/interfaces/ICrossVM.cdc", + "cadence/contracts/bridge/interfaces/ICrossVMAsset.cdc", + "cadence/contracts/bridge/interfaces/CrossVMNFT.cdc", + "cadence/contracts/bridge/interfaces/CrossVMToken.cdc", + "cadence/contracts/bridge/interfaces/IEVMBridgeNFTMinter.cdc", + "cadence/contracts/bridge/interfaces/IEVMBridgeTokenMinter.cdc", + "cadence/contracts/bridge/FlowEVMBridgeConfig.cdc", + "cadence/contracts/bridge/interfaces/IFlowEVMNFTBridge.cdc", + "cadence/contracts/bridge/interfaces/IFlowEVMTokenBridge.cdc", + "cadence/contracts/bridge/FlowEVMBridgeUtils.cdc", + "cadence/contracts/bridge/FlowEVMBridgeResolver.cdc", + "cadence/contracts/bridge/FlowEVMBridgeHandlers.cdc", + "cadence/contracts/bridge/FlowEVMBridgeNFTEscrow.cdc", + "cadence/contracts/bridge/FlowEVMBridgeTokenEscrow.cdc", + "cadence/contracts/bridge/FlowEVMBridgeTemplates.cdc", + "cadence/contracts/bridge/FlowEVMBridge.cdc", +} + +// CreateCOATransaction returns the transaction body for the create COA transaction +func CreateCOATransaction( + env templates.Environment, + bridgeEnv bridge.Environment, + service flow.Address, +) *flow.TransactionBodyBuilder { + txScript, _ := bridge.GetCadenceTransactionCode("cadence/transactions/evm/create_account.cdc", bridgeEnv, env) + return flow.NewTransactionBodyBuilder(). + SetScript(txScript). + AddArgument(jsoncdc.MustEncode(cadence.UFix64(0.0))). + AddAuthorizer(service). + SetPayer(service) +} + +// DeployEVMContractTransaction returns the transaction body for +// the deploy EVM contract transaction +func DeployEVMContractTransaction( + env templates.Environment, + bridgeEnv bridge.Environment, + service flow.Address, + bytecode string, + gasLimit int, + deploymentValue float64, +) (*flow.TransactionBody, error) { + txScript, _ := bridge.GetCadenceTransactionCode("cadence/transactions/evm/deploy.cdc", bridgeEnv, env) + return flow.NewTransactionBodyBuilder(). + SetScript(txScript). + AddArgument(jsoncdc.MustEncode(cadence.String(bytecode))). + AddArgument(jsoncdc.MustEncode(cadence.UInt64(gasLimit))). + AddArgument(jsoncdc.MustEncode(cadence.UFix64(deploymentValue))). + AddAuthorizer(service). + SetPayer(service). + Build() +} + +// DeployFlowEVMBridgeUtilsContractTransaction returns the transaction body for +// the deploy FlowEVMBridgeUtils contract transaction +func DeployFlowEVMBridgeUtilsContractTransaction( + env templates.Environment, + bridgeEnv *bridge.Environment, + service flow.Address, + contract []byte, + contractName string, + factoryAddress string, +) (*flow.TransactionBody, error) { + txScript, _ := bridge.GetCadenceTransactionCode("cadence/transactions/bridge/admin/deploy_bridge_utils.cdc", *bridgeEnv, env) + return flow.NewTransactionBodyBuilder(). + SetScript(txScript). + AddArgument(jsoncdc.MustEncode(cadence.String(contractName))). + AddArgument(jsoncdc.MustEncode(cadence.String(contract))). + AddArgument(jsoncdc.MustEncode(cadence.String(factoryAddress))). + AddAuthorizer(service). + SetPayer(service). + Build() +} + +// PauseBridgeTransaction returns the transaction body for the transaction +// to pause or unpause the VM bridge +func PauseBridgeTransaction( + env templates.Environment, + bridgeEnv bridge.Environment, + service flow.Address, + pause bool, +) (*flow.TransactionBody, error) { + txScript, _ := bridge.GetCadenceTransactionCode("cadence/transactions/bridge/admin/pause/update_bridge_pause_status.cdc", bridgeEnv, env) + return flow.NewTransactionBodyBuilder(). + SetScript(txScript). + AddArgument(jsoncdc.MustEncode(cadence.Bool(pause))). + AddAuthorizer(service). + SetPayer(service). + Build() +} + +// SetRegistrarTransaction returns the transaction body for the transaction to set the factory as registrar +func SetRegistrarTransaction( + env templates.Environment, + bridgeEnv bridge.Environment, + service flow.Address, + registryAddress string, +) (*flow.TransactionBody, error) { + txScript, _ := bridge.GetCadenceTransactionCode("cadence/transactions/bridge/admin/evm/set_registrar.cdc", bridgeEnv, env) + return flow.NewTransactionBodyBuilder(). + SetScript(txScript). + AddArgument(jsoncdc.MustEncode(cadence.String(registryAddress))). + AddAuthorizer(service). + SetPayer(service). + Build() +} + +// SetDeploymentRegistryTransaction returns the transaction body for the transaction +// to add the registry to the factory +func SetDeploymentRegistryTransaction( + env templates.Environment, + bridgeEnv bridge.Environment, + service flow.Address, + registryAddress string, +) (*flow.TransactionBody, error) { + txScript, _ := bridge.GetCadenceTransactionCode("cadence/transactions/bridge/admin/evm/set_deployment_registry.cdc", bridgeEnv, env) + return flow.NewTransactionBodyBuilder(). + SetScript(txScript). + AddArgument(jsoncdc.MustEncode(cadence.String(registryAddress))). + AddAuthorizer(service). + SetPayer(service). + Build() +} + +// SetDelegatedDeployerTransaction returns the transaction body for the transaction +// to set a delegated deployer for a particular token type +func SetDelegatedDeployerTransaction( + env templates.Environment, + bridgeEnv bridge.Environment, + service flow.Address, + deployerAddress string, +) (*flow.TransactionBody, error) { + txScript, _ := bridge.GetCadenceTransactionCode("cadence/transactions/bridge/admin/evm/set_delegated_deployer.cdc", bridgeEnv, env) + return flow.NewTransactionBodyBuilder(). + SetScript(txScript). + AddArgument(jsoncdc.MustEncode(cadence.String(deployerAddress))). + AddAuthorizer(service). + SetPayer(service). + Build() +} + +// AddDeployerTransaction returns the transaction body for the transaction +// to add a deployer for a particular token type +func AddDeployerTransaction( + env templates.Environment, + bridgeEnv bridge.Environment, + service flow.Address, + deployerTag, deployerAddress string, +) (*flow.TransactionBody, error) { + txScript, _ := bridge.GetCadenceTransactionCode("cadence/transactions/bridge/admin/evm/add_deployer.cdc", bridgeEnv, env) + return flow.NewTransactionBodyBuilder(). + SetScript(txScript). + AddArgument(jsoncdc.MustEncode(cadence.String(deployerTag))). + AddArgument(jsoncdc.MustEncode(cadence.String(deployerAddress))). + AddAuthorizer(service). + SetPayer(service). + Build() +} + +// DeployFlowEVMBridgeAccessorContractTransaction returns the transaction body for the deploy FlowEVMBridgeAccessor contract transaction +func DeployFlowEVMBridgeAccessorContractTransaction( + env templates.Environment, + bridgeEnv bridge.Environment, + service flow.Address, +) (*flow.TransactionBody, error) { + contract, _ := bridge.GetCadenceContractCode("cadence/contracts/bridge/FlowEVMBridgeAccessor.cdc", bridgeEnv, env) + contractName := "FlowEVMBridgeAccessor" + txScript, _ := bridge.GetCadenceTransactionCode("cadence/transactions/bridge/admin/deploy_bridge_accessor.cdc", bridgeEnv, env) + return flow.NewTransactionBodyBuilder(). + SetScript(txScript). + AddArgument(jsoncdc.MustEncode(cadence.String(contractName))). + AddArgument(jsoncdc.MustEncode(cadence.String(contract))). + AddArgument(jsoncdc.MustEncode(cadence.Address(service))). + AddAuthorizer(service). + SetPayer(service). + Build() +} + +// IntegrateEVMWithBridgeAccessorTransaction returns the transaction body for the transaction +// that claims the bridge accessor capability and saves the bridge router +func IntegrateEVMWithBridgeAccessorTransaction( + env templates.Environment, + bridgeEnv bridge.Environment, + service flow.Address, +) (*flow.TransactionBody, error) { + txScript, _ := bridge.GetCadenceTransactionCode("cadence/transactions/bridge/admin/evm-integration/claim_accessor_capability_and_save_router.cdc", bridgeEnv, env) + return flow.NewTransactionBodyBuilder(). + SetScript(txScript). + AddArgument(jsoncdc.MustEncode(cadence.String("FlowEVMBridgeAccessor"))). + AddArgument(jsoncdc.MustEncode(cadence.Address(service))). + AddAuthorizer(service). + SetPayer(service). + Build() +} + +// UpdateOnboardFeeTransaction returns the transaction body for the transaction +// that updates the onboarding fees for the bridge +func UpdateOnboardFeeTransaction( + env templates.Environment, + bridgeEnv bridge.Environment, + service flow.Address, + fee float64, +) (*flow.TransactionBody, error) { + txScript, _ := bridge.GetCadenceTransactionCode("cadence/transactions/bridge/admin/fee/update_onboard_fee.cdc", bridgeEnv, env) + return flow.NewTransactionBodyBuilder(). + SetScript(txScript). + AddArgument(jsoncdc.MustEncode(cadence.UFix64(fee))). + AddAuthorizer(service). + SetPayer(service). + Build() +} + +// UpdateBaseFeeTransaction returns the transaction body for the transaction +// that updates the base fees for the bridge +func UpdateBaseFeeTransaction( + env templates.Environment, + bridgeEnv bridge.Environment, + service flow.Address, + fee float64, +) (*flow.TransactionBody, error) { + txScript, _ := bridge.GetCadenceTransactionCode("cadence/transactions/bridge/admin/fee/update_base_fee.cdc", bridgeEnv, env) + return flow.NewTransactionBodyBuilder(). + SetScript(txScript). + AddArgument(jsoncdc.MustEncode(cadence.UFix64(fee))). + AddAuthorizer(service). + SetPayer(service). + Build() +} + +// UpsertContractCodeChunksTransaction returns the transaction body for the transaction +// that adds the code chunks for the FT or NFT templates to the bridge +func UpsertContractCodeChunksTransaction( + env templates.Environment, + bridgeEnv bridge.Environment, + service flow.Address, + forTemplate string, + newChunks []string, +) (*flow.TransactionBody, error) { + txScript, _ := bridge.GetCadenceTransactionCode("cadence/transactions/bridge/admin/templates/upsert_contract_code_chunks.cdc", bridgeEnv, env) + + chunks := make([]cadence.Value, len(newChunks)) + for i, chunk := range newChunks { + chunks[i] = cadence.String(chunk) + } + return flow.NewTransactionBodyBuilder(). + SetScript(txScript). + AddArgument(jsoncdc.MustEncode(cadence.String(forTemplate))). + AddArgument(jsoncdc.MustEncode(cadence.NewArray(chunks))). + AddAuthorizer(service). + SetPayer(service). + Build() +} + +// CreateWFLOWTokenHandlerTransaction returns the transaction body for the transaction +// that creates a token handler for the WFLOW Solidity contract +func CreateWFLOWTokenHandlerTransaction( + env templates.Environment, + bridgeEnv bridge.Environment, + service flow.Address, + wflowEVMAddress string, +) (*flow.TransactionBody, error) { + txScript, _ := bridge.GetCadenceTransactionCode("cadence/transactions/bridge/admin/token-handler/create_wflow_token_handler.cdc", bridgeEnv, env) + + return flow.NewTransactionBodyBuilder(). + SetScript(txScript). + AddArgument(jsoncdc.MustEncode(cadence.String(wflowEVMAddress))). + SetProposalKey(service, 0, 0). + SetPayer(service). + AddAuthorizer(service). + Build() +} + +// EnableWFLOWTokenHandlerTransaction returns the transaction body for the transaction +// that enables the token handler for the WFLOW Solidity contract +func EnableWFLOWTokenHandlerTransaction( + env templates.Environment, + bridgeEnv bridge.Environment, + service flow.Address, + flowTokenType string, +) (*flow.TransactionBody, error) { + txScript, _ := bridge.GetCadenceTransactionCode("cadence/transactions/bridge/admin/token-handler/enable_token_handler.cdc", bridgeEnv, env) + + return flow.NewTransactionBodyBuilder(). + SetScript(txScript). + AddArgument(jsoncdc.MustEncode(cadence.String(flowTokenType))). + SetProposalKey(service, 0, 0). + SetPayer(service). + AddAuthorizer(service). + Build() +} + +// OnboardToBridgeByTypeIDTransaction returns the transaction body for the transaction +// that onboards a FT or NFT type to the bridge +func OnboardToBridgeByTypeIDTransaction( + env templates.Environment, + bridgeEnv bridge.Environment, + service flow.Address, + forType string, +) *flow.TransactionBodyBuilder { + txScript, _ := bridge.GetCadenceTransactionCode("cadence/transactions/bridge/onboarding/onboard_by_type_identifier.cdc", bridgeEnv, env) + + return flow.NewTransactionBodyBuilder(). + SetScript(txScript). + AddArgument(jsoncdc.MustEncode(cadence.String(forType))). + SetProposalKey(service, 0, 0). + SetPayer(service). + AddAuthorizer(service) +} + +// BridgeFTToEVMTransaction returns the transaction body for the transaction +// that bridges a fungible token from Cadence to EVM +func BridgeFTToEVMTransaction( + env templates.Environment, + bridgeEnv bridge.Environment, + service flow.Address, + forType string, + amount string, +) *flow.TransactionBodyBuilder { + txScript, _ := bridge.GetCadenceTransactionCode("cadence/transactions/bridge/tokens/bridge_tokens_to_evm.cdc", bridgeEnv, env) + bridgeAmount, _ := cadence.NewUFix64(amount) + return flow.NewTransactionBodyBuilder(). + SetScript(txScript). + AddArgument(jsoncdc.MustEncode(cadence.String(forType))). + AddArgument(jsoncdc.MustEncode(bridgeAmount)). + SetProposalKey(service, 0, 0). + SetPayer(service). + AddAuthorizer(service) +} + +// BridgeFTFromEVMTransaction returns the transaction body for the transaction +// that bridges a fungible token from EVM to Cadence +func BridgeFTFromEVMTransaction( + env templates.Environment, + bridgeEnv bridge.Environment, + service flow.Address, + forType string, + amount uint, +) *flow.TransactionBodyBuilder { + txScript, _ := bridge.GetCadenceTransactionCode("cadence/transactions/bridge/tokens/bridge_tokens_from_evm.cdc", bridgeEnv, env) + + return flow.NewTransactionBodyBuilder(). + SetScript(txScript). + AddArgument(jsoncdc.MustEncode(cadence.String(forType))). + AddArgument(jsoncdc.MustEncode(cadence.NewUInt256(amount))). + SetProposalKey(service, 0, 0). + SetPayer(service). + AddAuthorizer(service) +} + +// GetEscrowedTokenBalanceScript returns the script body for the script +// that gets the balance of an escrowed fungible token in the Cadence side of the VM bridge +func GetEscrowedTokenBalanceScript( + env templates.Environment, + bridgeEnv bridge.Environment, +) []byte { + script, _ := bridge.GetCadenceTransactionCode("cadence/scripts/escrow/get_locked_token_balance.cdc", bridgeEnv, env) + + return script +} + +// BridgeNFTToEVMTransaction returns the transaction body for the transaction +// that bridges a non-fungible token from Cadence to EVM +func BridgeNFTToEVMTransaction( + env templates.Environment, + bridgeEnv bridge.Environment, + service flow.Address, + forType string, + id cadence.UInt64, +) *flow.TransactionBodyBuilder { + txScript, _ := bridge.GetCadenceTransactionCode("cadence/transactions/bridge/nft/bridge_nft_to_evm.cdc", bridgeEnv, env) + + return flow.NewTransactionBodyBuilder(). + SetScript(txScript). + AddArgument(jsoncdc.MustEncode(cadence.String(forType))). + AddArgument(jsoncdc.MustEncode(id)). + SetProposalKey(service, 0, 0). + SetPayer(service). + AddAuthorizer(service) +} + +// BridgeNFTFromEVMTransaction returns the transaction body for the transaction +// that bridges a non-fungible token from EVM to Cadence +func BridgeNFTFromEVMTransaction( + env templates.Environment, + bridgeEnv bridge.Environment, + service flow.Address, + forType string, + id cadence.UInt256, +) *flow.TransactionBodyBuilder { + txScript, _ := bridge.GetCadenceTransactionCode("cadence/transactions/bridge/nft/bridge_nft_from_evm.cdc", bridgeEnv, env) + + return flow.NewTransactionBodyBuilder(). + SetScript(txScript). + AddArgument(jsoncdc.MustEncode(cadence.String(forType))). + AddArgument(jsoncdc.MustEncode(id)). + SetProposalKey(service, 0, 0). + SetPayer(service). + AddAuthorizer(service) +} + +// GetIsNFTInEscrowScript returns the script body for the script +// that gets if an NFT is escrowed in the Cadence side of the VM bridge +func GetIsNFTInEscrowScript( + env templates.Environment, + bridgeEnv bridge.Environment, +) []byte { + script, _ := bridge.GetCadenceTransactionCode("cadence/scripts/escrow/is_nft_locked.cdc", bridgeEnv, env) + + return script +} diff --git a/fvm/blueprints/contracts.go b/fvm/blueprints/contracts.go index dee250b4bac..077182611dc 100644 --- a/fvm/blueprints/contracts.go +++ b/fvm/blueprints/contracts.go @@ -3,25 +3,23 @@ package blueprints import ( _ "embed" - "encoding/hex" - "github.com/onflow/cadence" + "github.com/onflow/cadence/common" jsoncdc "github.com/onflow/cadence/encoding/json" - "github.com/onflow/cadence/runtime/common" "github.com/onflow/flow-go/model/flow" ) var ContractDeploymentAuthorizedAddressesPath = cadence.Path{ - Domain: common.PathDomainStorage.Identifier(), + Domain: common.PathDomainStorage, Identifier: "authorizedAddressesToDeployContracts", } var ContractRemovalAuthorizedAddressesPath = cadence.Path{ - Domain: common.PathDomainStorage.Identifier(), + Domain: common.PathDomainStorage, Identifier: "authorizedAddressesToRemoveContracts", } var IsContractDeploymentRestrictedPath = cadence.Path{ - Domain: common.PathDomainStorage.Identifier(), + Domain: common.PathDomainStorage, Identifier: "isContractDeploymentRestricted", } @@ -32,15 +30,15 @@ var setContractOperationAuthorizersTransactionTemplate string var setIsContractDeploymentRestrictedTransactionTemplate string //go:embed scripts/deployContractTransactionTemplate.cdc -var deployContractTransactionTemplate string +var DeployContractTransactionTemplate []byte // SetContractDeploymentAuthorizersTransaction returns a transaction for updating list of authorized accounts allowed to deploy/update contracts -func SetContractDeploymentAuthorizersTransaction(serviceAccount flow.Address, authorized []flow.Address) (*flow.TransactionBody, error) { +func SetContractDeploymentAuthorizersTransaction(serviceAccount flow.Address, authorized []flow.Address) (*flow.TransactionBodyBuilder, error) { return setContractAuthorizersTransaction(ContractDeploymentAuthorizedAddressesPath, serviceAccount, authorized) } // SetContractRemovalAuthorizersTransaction returns a transaction for updating list of authorized accounts allowed to remove contracts -func SetContractRemovalAuthorizersTransaction(serviceAccount flow.Address, authorized []flow.Address) (*flow.TransactionBody, error) { +func SetContractRemovalAuthorizersTransaction(serviceAccount flow.Address, authorized []flow.Address) (*flow.TransactionBodyBuilder, error) { return setContractAuthorizersTransaction(ContractRemovalAuthorizedAddressesPath, serviceAccount, authorized) } @@ -48,7 +46,7 @@ func setContractAuthorizersTransaction( path cadence.Path, serviceAccount flow.Address, authorized []flow.Address, -) (*flow.TransactionBody, error) { +) (*flow.TransactionBodyBuilder, error) { addressValues := make([]cadence.Value, 0, len(authorized)) for _, address := range authorized { addressValues = append( @@ -66,8 +64,9 @@ func setContractAuthorizersTransaction( return nil, err } - return flow.NewTransactionBody(). + return flow.NewTransactionBodyBuilder(). SetScript([]byte(setContractOperationAuthorizersTransactionTemplate)). + SetPayer(serviceAccount). AddAuthorizer(serviceAccount). AddArgument(addressesArg). AddArgument(pathArg), nil @@ -85,18 +84,21 @@ func SetIsContractDeploymentRestrictedTransaction(serviceAccount flow.Address, r return nil, err } - return flow.NewTransactionBody(). + return flow.NewTransactionBodyBuilder(). SetScript([]byte(setIsContractDeploymentRestrictedTransactionTemplate)). + SetPayer(serviceAccount). AddAuthorizer(serviceAccount). AddArgument(argRestricted). - AddArgument(argPath), nil + AddArgument(argPath). + Build() } // TODO (ramtin) get rid of authorizers -func DeployContractTransaction(address flow.Address, contract []byte, contractName string) *flow.TransactionBody { - return flow.NewTransactionBody(). - SetScript([]byte(deployContractTransactionTemplate)). +func DeployContractTransaction(address flow.Address, contract []byte, contractName string) *flow.TransactionBodyBuilder { + return flow.NewTransactionBodyBuilder(). + SetScript(DeployContractTransactionTemplate). AddArgument(jsoncdc.MustEncode(cadence.String(contractName))). - AddArgument(jsoncdc.MustEncode(cadence.String(hex.EncodeToString(contract)))). + AddArgument(jsoncdc.MustEncode(cadence.String(contract))). + SetPayer(address). AddAuthorizer(address) } diff --git a/fvm/blueprints/epochs.go b/fvm/blueprints/epochs.go index 9a60f179854..f338133c52d 100644 --- a/fvm/blueprints/epochs.go +++ b/fvm/blueprints/epochs.go @@ -34,8 +34,8 @@ var fundAccountTemplate string var deployLockedTokensTemplate string // DeployEpochTransaction returns the transaction body for the deploy epoch transaction -func DeployEpochTransaction(service flow.Address, contract []byte, epochConfig epochs.EpochConfig) *flow.TransactionBody { - return flow.NewTransactionBody(). +func DeployEpochTransaction(service flow.Address, contract []byte, epochConfig epochs.EpochConfig) (*flow.TransactionBody, error) { + return flow.NewTransactionBodyBuilder(). SetScript([]byte( templates.ReplaceAddresses( deployEpochTransactionTemplate, @@ -44,6 +44,8 @@ func DeployEpochTransaction(service flow.Address, contract []byte, epochConfig e }, ), )). + SetPayer(service). + AddArgument(jsoncdc.MustEncode(cadence.String("FlowEpoch"))). AddArgument(jsoncdc.MustEncode(cadence.String(hex.EncodeToString(contract)))). AddArgument(jsoncdc.MustEncode(epochConfig.CurrentEpochCounter)). AddArgument(jsoncdc.MustEncode(epochConfig.NumViewsInEpoch)). @@ -53,7 +55,8 @@ func DeployEpochTransaction(service flow.Address, contract []byte, epochConfig e AddArgument(jsoncdc.MustEncode(epochConfig.FLOWsupplyIncreasePercentage)). AddArgument(jsoncdc.MustEncode(epochConfig.RandomSource)). AddArgument(epochs.EncodeClusterAssignments(epochConfig.CollectorClusters)). - AddAuthorizer(service) + AddAuthorizer(service). + Build() } // SetupAccountTransaction returns the transaction body for the setup account transaction @@ -61,8 +64,8 @@ func SetupAccountTransaction( fungibleToken flow.Address, flowToken flow.Address, accountAddress flow.Address, -) *flow.TransactionBody { - return flow.NewTransactionBody(). +) (*flow.TransactionBody, error) { + return flow.NewTransactionBodyBuilder(). SetScript([]byte( templates.ReplaceAddresses( setupAccountTemplate, @@ -72,17 +75,21 @@ func SetupAccountTransaction( }, ), )). - AddAuthorizer(accountAddress) + SetPayer(accountAddress). + AddAuthorizer(accountAddress). + Build() } // DeployIDTableStakingTransaction returns the transaction body for the deploy id table staking transaction -func DeployIDTableStakingTransaction(service flow.Address, contract []byte, epochTokenPayout cadence.UFix64, rewardCut cadence.UFix64) *flow.TransactionBody { - return flow.NewTransactionBody(). +func DeployIDTableStakingTransaction(service flow.Address, contract []byte, epochTokenPayout cadence.UFix64, rewardCut cadence.UFix64) (*flow.TransactionBody, error) { + return flow.NewTransactionBodyBuilder(). SetScript([]byte(deployIDTableStakingTransactionTemplate)). + SetPayer(service). AddArgument(jsoncdc.MustEncode(cadence.String(hex.EncodeToString(contract)))). AddArgument(jsoncdc.MustEncode(epochTokenPayout)). AddArgument(jsoncdc.MustEncode(rewardCut)). - AddAuthorizer(service) + AddAuthorizer(service). + Build() } // FundAccountTransaction returns the transaction body for the fund account transaction @@ -91,14 +98,14 @@ func FundAccountTransaction( fungibleToken flow.Address, flowToken flow.Address, nodeAddress flow.Address, -) *flow.TransactionBody { +) (*flow.TransactionBody, error) { cdcAmount, err := cadence.NewUFix64(fmt.Sprintf("%d.0", 2_000_000)) if err != nil { panic(err) } - return flow.NewTransactionBody(). + return flow.NewTransactionBodyBuilder(). SetScript([]byte(templates.ReplaceAddresses( fundAccountTemplate, templates.Environment{ @@ -106,20 +113,24 @@ func FundAccountTransaction( FlowTokenAddress: flowToken.Hex(), }, ))). + SetPayer(service). AddArgument(jsoncdc.MustEncode(cdcAmount)). AddArgument(jsoncdc.MustEncode(cadence.NewAddress(nodeAddress))). - AddAuthorizer(service) + AddAuthorizer(service). + Build() } // DeployLockedTokensTransaction returns the transaction body for the deploy locked tokens transaction -func DeployLockedTokensTransaction(service flow.Address, contract []byte, publicKeys []cadence.Value) *flow.TransactionBody { - return flow.NewTransactionBody(). +func DeployLockedTokensTransaction(service flow.Address, contract []byte, publicKeys []cadence.Value) (*flow.TransactionBody, error) { + return flow.NewTransactionBodyBuilder(). SetScript([]byte( deployLockedTokensTemplate, )). + SetPayer(service). AddArgument(jsoncdc.MustEncode(cadence.NewArray(publicKeys))). AddArgument(jsoncdc.MustEncode(cadence.String(hex.EncodeToString(contract)))). - AddAuthorizer(service) + AddAuthorizer(service). + Build() } // RegisterNodeTransaction creates a new node struct object. @@ -128,12 +139,14 @@ func DeployLockedTokensTransaction(service flow.Address, contract []byte, public func RegisterNodeTransaction( service flow.Address, flowTokenAddress flow.Address, + fungibleTokenAddress flow.Address, nodeAddress flow.Address, - id *flow.Identity, + node bootstrap.NodeInfo, ) *flow.TransactionBody { env := templates.Environment{ FlowTokenAddress: flowTokenAddress.HexWithPrefix(), + FungibleTokenAddress: fungibleTokenAddress.HexWithPrefix(), IDTableAddress: service.HexWithPrefix(), QuorumCertificateAddress: service.HexWithPrefix(), DkgAddress: service.HexWithPrefix(), @@ -143,8 +156,8 @@ func RegisterNodeTransaction( // Use NetworkingKey as the public key of the machine account. // We do this for tests/localnet but normally it should be a separate key. accountKey := &flowsdk.AccountKey{ - PublicKey: id.NetworkPubKey, - SigAlgo: id.NetworkPubKey.Algorithm(), + PublicKey: node.NetworkPubKey(), + SigAlgo: node.NetworkPubKey().Algorithm(), HashAlgo: bootstrap.DefaultMachineAccountHashAlgo, Weight: 1000, } @@ -165,49 +178,69 @@ func RegisterNodeTransaction( panic(err) } - cdcNodeID, err := cadence.NewString(id.NodeID.String()) + cdcNodeID, err := cadence.NewString(node.NodeID.String()) if err != nil { panic(err) } - cdcAddress, err := cadence.NewString(id.Address) + cdcAddress, err := cadence.NewString(node.Address) if err != nil { panic(err) } - cdcNetworkPubKey, err := cadence.NewString(id.NetworkPubKey.String()[2:]) + cdcNetworkPubKey, err := cadence.NewString(node.NetworkPubKey().String()[2:]) if err != nil { panic(err) } - cdcStakingPubKey, err := cadence.NewString(id.StakingPubKey.String()[2:]) + cdcStakingPubKey, err := cadence.NewString(node.StakingPubKey().String()[2:]) + if err != nil { + panic(err) + } + + pop, err := node.StakingPoP() + if err != nil { + panic(err) + } + cdcStakingKeyPoP, err := cadence.NewString(pop.String()[2:]) if err != nil { panic(err) } // register node - return flow.NewTransactionBody(). + + txBody, err := flow.NewTransactionBodyBuilder(). SetScript(templates.GenerateEpochRegisterNodeScript(env)). + SetPayer(service). AddArgument(jsoncdc.MustEncode(cdcNodeID)). - AddArgument(jsoncdc.MustEncode(cadence.NewUInt8(uint8(id.Role)))). + AddArgument(jsoncdc.MustEncode(cadence.NewUInt8(uint8(node.Role)))). AddArgument(jsoncdc.MustEncode(cdcAddress)). AddArgument(jsoncdc.MustEncode(cdcNetworkPubKey)). AddArgument(jsoncdc.MustEncode(cdcStakingPubKey)). + AddArgument(jsoncdc.MustEncode(cdcStakingKeyPoP)). AddArgument(jsoncdc.MustEncode(cdcAmount)). AddArgument(jsoncdc.MustEncode(cadencePublicKeys)). - AddAuthorizer(nodeAddress) + AddAuthorizer(nodeAddress). + Build() + if err != nil { + panic(err) + } + + return txBody } // SetStakingAllowlistTransaction returns transaction body for set staking allowlist transaction -func SetStakingAllowlistTransaction(idTableStakingAddr flow.Address, allowedNodeIDs []flow.Identifier) *flow.TransactionBody { +func SetStakingAllowlistTransaction(idTableStakingAddr flow.Address, allowedNodeIDs []flow.Identifier) (*flow.TransactionBody, error) { env := templates.Environment{ IDTableAddress: idTableStakingAddr.HexWithPrefix(), } allowedNodesArg := SetStakingAllowlistTxArg(allowedNodeIDs) - return flow.NewTransactionBody(). + return flow.NewTransactionBodyBuilder(). SetScript(templates.GenerateSetApprovedNodesScript(env)). + SetPayer(idTableStakingAddr). AddArgument(jsoncdc.MustEncode(allowedNodesArg)). - AddAuthorizer(idTableStakingAddr) + AddAuthorizer(idTableStakingAddr). + Build() } // SetStakingAllowlistTxArg returns the transaction argument for setting the staking allow-list. diff --git a/fvm/blueprints/fees.go b/fvm/blueprints/fees.go index 72b6f1645d1..371b7520f1b 100644 --- a/fvm/blueprints/fees.go +++ b/fvm/blueprints/fees.go @@ -6,24 +6,23 @@ import ( "fmt" "github.com/onflow/cadence" + "github.com/onflow/cadence/common" jsoncdc "github.com/onflow/cadence/encoding/json" - "github.com/onflow/cadence/runtime/common" - "github.com/onflow/flow-core-contracts/lib/go/contracts" "github.com/onflow/flow-core-contracts/lib/go/templates" "github.com/onflow/flow-go/model/flow" ) var TransactionFeesExecutionEffortWeightsPath = cadence.Path{ - Domain: common.PathDomainStorage.Identifier(), + Domain: common.PathDomainStorage, Identifier: "executionEffortWeights", } var TransactionFeesExecutionMemoryWeightsPath = cadence.Path{ - Domain: common.PathDomainStorage.Identifier(), + Domain: common.PathDomainStorage, Identifier: "executionMemoryWeights", } var TransactionFeesExecutionMemoryLimitPath = cadence.Path{ - Domain: common.PathDomainStorage.Identifier(), + Domain: common.PathDomainStorage, Identifier: "executionMemoryLimit", } @@ -36,27 +35,25 @@ var setupParametersTransactionTemplate string //go:embed scripts/setupStorageForServiceAccountsTemplate.cdc var setupStorageForServiceAccountsTemplate string +//go:embed scripts/setupStorageForAccount.cdc +var setupStorageForAccountTemplate string + //go:embed scripts/setupFeesTransactionTemplate.cdc var setupFeesTransactionTemplate string //go:embed scripts/setExecutionMemoryLimit.cdc var setExecutionMemoryLimit string -func DeployTxFeesContractTransaction(service, fungibleToken, flowToken, storageFees, flowFees flow.Address) *flow.TransactionBody { - contract := contracts.FlowFees( - fungibleToken.HexWithPrefix(), - flowToken.HexWithPrefix(), - storageFees.HexWithPrefix(), - ) - - return flow.NewTransactionBody(). +func DeployTxFeesContractTransaction(flowFees, service flow.Address, contract []byte) *flow.TransactionBodyBuilder { + return flow.NewTransactionBodyBuilder(). SetScript([]byte(deployTxFeesTransactionTemplate)). + SetPayer(service). AddArgument(jsoncdc.MustEncode(cadence.String(hex.EncodeToString(contract)))). AddAuthorizer(flowFees). AddAuthorizer(service) } -func DeployStorageFeesContractTransaction(service flow.Address, contract []byte) *flow.TransactionBody { +func DeployStorageFeesContractTransaction(service flow.Address, contract []byte) *flow.TransactionBodyBuilder { return DeployContractTransaction(service, contract, "FlowStorageFees") } @@ -66,7 +63,7 @@ func SetupParametersTransaction( minimumStorageReservation, storagePerFlow cadence.UFix64, restrictedAccountCreationEnabled cadence.Bool, -) *flow.TransactionBody { +) (*flow.TransactionBody, error) { addressCreationFeeArg, err := jsoncdc.Encode(addressCreationFee) if err != nil { panic(fmt.Sprintf("failed to encode address creation fee: %s", err.Error())) @@ -84,7 +81,7 @@ func SetupParametersTransaction( panic(fmt.Sprintf("failed to encode restrictedAccountCreationEnabled: %s", err.Error())) } - return flow.NewTransactionBody(). + return flow.NewTransactionBodyBuilder(). SetScript([]byte(templates.ReplaceAddresses(setupParametersTransactionTemplate, templates.Environment{ StorageFeesAddress: service.Hex(), @@ -95,13 +92,15 @@ func SetupParametersTransaction( AddArgument(minimumStorageReservationArg). AddArgument(storagePerFlowArg). AddArgument(restrictedAccountCreationEnabledArg). - AddAuthorizer(service) + SetPayer(service). + AddAuthorizer(service). + Build() } func SetupStorageForServiceAccountsTransaction( service, fungibleToken, flowToken, feeContract flow.Address, -) *flow.TransactionBody { - return flow.NewTransactionBody(). +) (*flow.TransactionBody, error) { + return flow.NewTransactionBodyBuilder(). SetScript([]byte(templates.ReplaceAddresses(setupStorageForServiceAccountsTemplate, templates.Environment{ ServiceAccountAddress: service.Hex(), @@ -110,10 +109,30 @@ func SetupStorageForServiceAccountsTransaction( FlowTokenAddress: flowToken.Hex(), })), ). + SetPayer(service). AddAuthorizer(service). AddAuthorizer(fungibleToken). AddAuthorizer(flowToken). - AddAuthorizer(feeContract) + AddAuthorizer(feeContract). + Build() +} + +func SetupStorageForAccountTransaction( + account, service, fungibleToken, flowToken flow.Address, +) (*flow.TransactionBody, error) { + return flow.NewTransactionBodyBuilder(). + SetScript([]byte(templates.ReplaceAddresses(setupStorageForAccountTemplate, + templates.Environment{ + ServiceAccountAddress: service.Hex(), + StorageFeesAddress: service.Hex(), + FungibleTokenAddress: fungibleToken.Hex(), + FlowTokenAddress: flowToken.Hex(), + })), + ). + SetPayer(service). + AddAuthorizer(account). + AddAuthorizer(service). + Build() } func SetupFeesTransaction( @@ -122,7 +141,7 @@ func SetupFeesTransaction( surgeFactor, inclusionEffortCost, executionEffortCost cadence.UFix64, -) *flow.TransactionBody { +) (*flow.TransactionBody, error) { surgeFactorArg, err := jsoncdc.Encode(surgeFactor) if err != nil { panic(fmt.Sprintf("failed to encode surge factor: %s", err.Error())) @@ -136,25 +155,27 @@ func SetupFeesTransaction( panic(fmt.Sprintf("failed to encode execution effort cost: %s", err.Error())) } - return flow.NewTransactionBody(). + return flow.NewTransactionBodyBuilder(). SetScript([]byte(templates.ReplaceAddresses(setupFeesTransactionTemplate, templates.Environment{ FlowFeesAddress: flowFees.Hex(), })), ). + SetPayer(service). AddArgument(surgeFactorArg). AddArgument(inclusionEffortCostArg). AddArgument(executionEffortCostArg). - AddAuthorizer(service) + AddAuthorizer(service). + Build() } // SetExecutionEffortWeightsTransaction creates a transaction that sets up weights for the weighted Meter. func SetExecutionEffortWeightsTransaction( - service flow.Address, + parametersAccount flow.Address, weights map[uint]uint64, ) (*flow.TransactionBody, error) { return setExecutionWeightsTransaction( - service, + parametersAccount, weights, TransactionFeesExecutionEffortWeightsPath, ) @@ -162,18 +183,18 @@ func SetExecutionEffortWeightsTransaction( // SetExecutionMemoryWeightsTransaction creates a transaction that sets up weights for the weighted Meter. func SetExecutionMemoryWeightsTransaction( - service flow.Address, + parametersAccount flow.Address, weights map[uint]uint64, ) (*flow.TransactionBody, error) { return setExecutionWeightsTransaction( - service, + parametersAccount, weights, TransactionFeesExecutionMemoryWeightsPath, ) } func setExecutionWeightsTransaction( - service flow.Address, + parametersAccount flow.Address, weights map[uint]uint64, path cadence.Path, ) (*flow.TransactionBody, error) { @@ -196,20 +217,25 @@ func setExecutionWeightsTransaction( return nil, err } - tx := flow.NewTransactionBody(). + txBody, err := flow.NewTransactionBodyBuilder(). SetScript([]byte(setExecutionWeightsScript)). + SetPayer(parametersAccount). AddArgument(newWeights). AddArgument(storagePath). - AddAuthorizer(service) + AddAuthorizer(parametersAccount). + Build() + if err != nil { + return nil, err + } - return tx, nil + return txBody, nil } //go:embed scripts/setExecutionWeightsScript.cdc var setExecutionWeightsScript string func SetExecutionMemoryLimitTransaction( - service flow.Address, + parametersAccount flow.Address, limit uint64, ) (*flow.TransactionBody, error) { newLimit, err := jsoncdc.Encode(cadence.UInt64(limit)) @@ -222,11 +248,16 @@ func SetExecutionMemoryLimitTransaction( return nil, err } - tx := flow.NewTransactionBody(). + txBody, err := flow.NewTransactionBodyBuilder(). SetScript([]byte(setExecutionMemoryLimit)). + SetPayer(parametersAccount). AddArgument(newLimit). AddArgument(storagePath). - AddAuthorizer(service) + AddAuthorizer(parametersAccount). + Build() + if err != nil { + return nil, err + } - return tx, nil + return txBody, nil } diff --git a/fvm/blueprints/scheduled_callback.go b/fvm/blueprints/scheduled_callback.go new file mode 100644 index 00000000000..e26f0dc9c64 --- /dev/null +++ b/fvm/blueprints/scheduled_callback.go @@ -0,0 +1,179 @@ +package blueprints + +import ( + "fmt" + + "github.com/onflow/cadence" + "github.com/onflow/cadence/encoding/ccf" + "github.com/rs/zerolog/log" + + jsoncdc "github.com/onflow/cadence/encoding/json" + "github.com/onflow/flow-core-contracts/lib/go/templates" + + "github.com/onflow/flow-go/fvm/systemcontracts" + "github.com/onflow/flow-go/model/flow" +) + +const callbackTransactionGasLimit = flow.DefaultMaxTransactionGasLimit + +// SystemCollection returns the re-created system collection after it has been already executed +// using the events from the process callback transaction. +func SystemCollection(chain flow.Chain, processEvents flow.EventsList) (*flow.Collection, error) { + process, err := ProcessCallbacksTransaction(chain) + if err != nil { + return nil, fmt.Errorf("failed to construct process callbacks transaction: %w", err) + } + + executes, err := ExecuteCallbacksTransactions(chain, processEvents) + if err != nil { + return nil, fmt.Errorf("failed to construct execute callbacks transactions: %w", err) + } + + systemTx, err := SystemChunkTransaction(chain) + if err != nil { + return nil, fmt.Errorf("failed to construct system chunk transaction: %w", err) + } + + transactions := make([]*flow.TransactionBody, 0, len(executes)+2) // +2 process and system tx + transactions = append(transactions, process) + transactions = append(transactions, executes...) + transactions = append(transactions, systemTx) + + collection, err := flow.NewCollection(flow.UntrustedCollection{ + Transactions: transactions, + }) + if err != nil { + return nil, fmt.Errorf("failed to construct system collection: %w", err) + } + + return collection, nil +} + +// ProcessCallbacksTransaction constructs a transaction for processing callbacks, for the given callback. +// No errors are expected during normal operation. +func ProcessCallbacksTransaction(chain flow.Chain) (*flow.TransactionBody, error) { + sc := systemcontracts.SystemContractsForChain(chain.ChainID()) + script := templates.GenerateProcessTransactionScript(sc.AsTemplateEnv()) + + return flow.NewTransactionBodyBuilder(). + AddAuthorizer(sc.FlowServiceAccount.Address). + SetScript(script). + SetComputeLimit(callbackTransactionGasLimit).Build() +} + +// ExecuteCallbacksTransactions constructs a list of transaction to execute callbacks, for the given chain. +// No errors are expected during normal operation. +func ExecuteCallbacksTransactions(chainID flow.Chain, processEvents flow.EventsList) ([]*flow.TransactionBody, error) { + txs := make([]*flow.TransactionBody, 0, len(processEvents)) + env := systemcontracts.SystemContractsForChain(chainID.ChainID()).AsTemplateEnv() + sc := systemcontracts.SystemContractsForChain(chainID.ChainID()) + + for _, event := range processEvents { + // todo make sure to check event index to ensure order is indeed correct + // event.EventIndex + + // skip any fee events or other events that are not pending execution events + if !isPendingExecutionEvent(env, event) { + continue + } + + id, effort, err := callbackArgsFromEvent(event) + if err != nil { + return nil, fmt.Errorf("failed to get callback args from event: %w", err) + } + + tx, err := executeCallbackTransaction(sc, env, id, effort) + if err != nil { + return nil, fmt.Errorf("failed to construct execute callback transactions: %w", err) + } + txs = append(txs, tx) + } + + return txs, nil +} + +func executeCallbackTransaction( + sc *systemcontracts.SystemContracts, + env templates.Environment, + id []byte, + effort uint64, +) (*flow.TransactionBody, error) { + script := templates.GenerateExecuteTransactionScript(env) + + return flow.NewTransactionBodyBuilder(). + AddAuthorizer(sc.FlowServiceAccount.Address). + SetScript(script). + AddArgument(id). + SetComputeLimit(effort). + Build() +} + +// callbackArgsFromEvent decodes the event payload and returns the callback ID and effort. +// +// The event for processed callback event is emitted by the process callback transaction from +// callback scheduler contract and has the following signature: +// event PendingExecution(id: UInt64, priority: UInt8, executionEffort: UInt64, fees: UFix64, callbackOwner: Address) +func callbackArgsFromEvent(event flow.Event) ([]byte, uint64, error) { + const ( + processedCallbackIDFieldName = "id" + processedCallbackEffortFieldName = "executionEffort" + ) + + eventData, err := ccf.Decode(nil, event.Payload) + if err != nil { + return nil, 0, fmt.Errorf("failed to decode event: %w", err) + } + + cadenceEvent, ok := eventData.(cadence.Event) + if !ok { + return nil, 0, fmt.Errorf("event data is not a cadence event") + } + + idValue := cadence.SearchFieldByName( + cadenceEvent, + processedCallbackIDFieldName, + ) + + effortValue := cadence.SearchFieldByName( + cadenceEvent, + processedCallbackEffortFieldName, + ) + + id, ok := idValue.(cadence.UInt64) + if !ok { + return nil, 0, fmt.Errorf("id is not uint64") + } + + cadenceEffort, ok := effortValue.(cadence.UInt64) + if !ok { + return nil, 0, fmt.Errorf("effort is not uint64") + } + + effort := uint64(cadenceEffort) + + if effort > flow.DefaultMaxTransactionGasLimit { + log.Warn().Uint64("effort", effort).Msg("effort is greater than max transaction gas limit, setting to max") + effort = flow.DefaultMaxTransactionGasLimit + } + + encID, err := jsoncdc.Encode(id) + if err != nil { + return nil, 0, fmt.Errorf("failed to encode id: %w", err) + } + + return encID, uint64(effort), nil +} + +func isPendingExecutionEvent(env templates.Environment, event flow.Event) bool { + processedEventType := PendingExecutionEventType(env) + return event.Type == processedEventType +} + +// PendingExecutionEventType returns the event type for FlowCallbackScheduler PendingExecution event +// for the provided environment. +func PendingExecutionEventType(env templates.Environment) flow.EventType { + const processedEventTypeTemplate = "A.%v.FlowTransactionScheduler.PendingExecution" + + scheduledContractAddress := env.FlowTransactionSchedulerAddress + return flow.EventType(fmt.Sprintf(processedEventTypeTemplate, scheduledContractAddress)) +} diff --git a/fvm/blueprints/scheduled_callback_test.go b/fvm/blueprints/scheduled_callback_test.go new file mode 100644 index 00000000000..f86f7d3babc --- /dev/null +++ b/fvm/blueprints/scheduled_callback_test.go @@ -0,0 +1,436 @@ +package blueprints_test + +import ( + "fmt" + "strings" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/onflow/cadence" + cadenceCommon "github.com/onflow/cadence/common" + "github.com/onflow/cadence/encoding/ccf" + + jsoncdc "github.com/onflow/cadence/encoding/json" + + "github.com/onflow/flow-go/fvm/blueprints" + "github.com/onflow/flow-go/fvm/systemcontracts" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/utils/unittest" +) + +func TestProcessCallbacksTransaction(t *testing.T) { + t.Parallel() + + chain := flow.Mainnet.Chain() + tx, err := blueprints.ProcessCallbacksTransaction(chain) + require.NoError(t, err) + + assert.NotNil(t, tx) + assert.NotEmpty(t, tx.Script) + require.False(t, strings.Contains(string(tx.Script), `import "FlowTransactionScheduler"`), "should resolve callback scheduler import") + assert.Equal(t, uint64(flow.DefaultMaxTransactionGasLimit), tx.GasLimit) + assert.Equal(t, tx.Authorizers, []flow.Address{chain.ServiceAddress()}) + assert.Empty(t, tx.Arguments) +} + +func TestExecuteCallbacksTransactions(t *testing.T) { + t.Parallel() + + chain := flow.Mainnet.Chain() + + tests := []struct { + name string + events []flow.Event + expectedTxs int + expectError bool + errorMessage string + }{ + { + name: "no events", + events: []flow.Event{}, + expectedTxs: 0, + expectError: false, + }, + { + name: "single valid event", + events: []flow.Event{createValidCallbackEvent(t, 1, 100)}, + expectedTxs: 1, + expectError: false, + }, + { + name: "multiple valid events", + events: []flow.Event{ + createValidCallbackEvent(t, 1, 100), + createValidCallbackEvent(t, 2, 200), + createValidCallbackEvent(t, 3, 300), + }, + expectedTxs: 3, + expectError: false, + }, + { + name: "invalid event type ignored", + events: []flow.Event{createInvalidTypeEvent()}, + expectedTxs: 0, + expectError: false, + }, + { + name: "invalid event payload ignored", + events: []flow.Event{createInvalidPayloadEvent()}, + expectedTxs: 0, + expectError: false, + }, + { + name: "invalid CCF payload", + events: []flow.Event{createPendingExecutionEventWithPayload([]byte{0xFF, 0xAB, 0xCD})}, // Invalid CCF bytes + expectedTxs: 0, + expectError: true, + errorMessage: "failed to decode event", + }, + { + name: "non-cadence event data", + events: []flow.Event{createPendingExecutionEventWithEncodedValue(t, cadence.String("not an event"))}, + expectedTxs: 0, + expectError: true, + errorMessage: "event data is not a cadence event", + }, + { + name: "missing id field", + events: []flow.Event{createEventWithModifiedField(t, "id", nil)}, + expectedTxs: 0, + expectError: true, + errorMessage: "id is not uint64", + }, + { + name: "missing effort field", + events: []flow.Event{createEventWithModifiedField(t, "executionEffort", nil)}, + expectedTxs: 0, + expectError: true, + errorMessage: "effort is not uint64", + }, + { + name: "effort exceeding max gas limit", + events: []flow.Event{createValidCallbackEvent(t, 1, flow.DefaultMaxTransactionGasLimit+1000)}, + expectedTxs: 1, + expectError: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + txs, err := blueprints.ExecuteCallbacksTransactions(chain, tt.events) + + if tt.expectError { + assert.Errorf(t, err, tt.name) + assert.Contains(t, err.Error(), tt.errorMessage) + assert.Nil(t, txs) + return + } + + assert.NoError(t, err) + assert.Len(t, txs, tt.expectedTxs) + + // Verify gas limit for effort exceeding max case + if tt.name == "effort exceeding max gas limit" && len(txs) > 0 { + assert.Equal(t, uint64(flow.DefaultMaxTransactionGasLimit), txs[0].GasLimit) + } + + for i, tx := range txs { + assert.NotNil(t, tx) + assert.NotEmpty(t, tx.Script) + if tt.name != "effort exceeding max gas limit" { + expectedEffort := uint64(100 * (i + 1)) // Events created with efforts 100, 200, 300 + assert.Equal(t, expectedEffort, tx.GasLimit) + } else { + assert.Equal(t, uint64(flow.DefaultMaxTransactionGasLimit), tx.GasLimit) + } + assert.Equal(t, tx.Authorizers, []flow.Address{chain.ServiceAddress()}) + assert.Len(t, tx.Arguments, 1) + assert.NotEmpty(t, tx.Arguments[0]) + + t.Logf("Transaction %d: ID arg length: %d, GasLimit: %d", + i, len(tx.Arguments[0]), tx.GasLimit) + } + }) + } +} + +func TestExecuteCallbackTransaction(t *testing.T) { + t.Parallel() + + chain := flow.Mainnet.Chain() + + const id = 123 + const effort = 456 + event := createValidCallbackEvent(t, id, effort) + txs, err := blueprints.ExecuteCallbacksTransactions(chain, []flow.Event{event}) + + require.NoError(t, err) + require.Len(t, txs, 1) + + tx := txs[0] + assert.NotNil(t, tx) + assert.NotEmpty(t, tx.Script) + require.False(t, strings.Contains(string(tx.Script), `import "FlowTransactionScheduler"`), "should resolve callback scheduler import") + assert.Equal(t, uint64(effort), tx.GasLimit) + assert.Len(t, tx.Arguments, 1) + + expectedEncodedID, err := jsoncdc.Encode(cadence.NewUInt64(id)) + require.NoError(t, err) + assert.Equal(t, tx.Arguments[0], expectedEncodedID) + + assert.Equal(t, tx.GasLimit, uint64(effort)) +} + +func createValidCallbackEvent(t *testing.T, id uint64, effort uint64) flow.Event { + const processedEventTypeTemplate = "A.%v.FlowTransactionScheduler.PendingExecution" + env := systemcontracts.SystemContractsForChain(flow.Mainnet.Chain().ChainID()).AsTemplateEnv() + eventTypeString := fmt.Sprintf(processedEventTypeTemplate, env.FlowTransactionSchedulerAddress) + loc, err := cadenceCommon.HexToAddress(env.FlowTransactionSchedulerAddress) + require.NoError(t, err) + location := cadenceCommon.NewAddressLocation(nil, loc, "PendingExecution") + + eventType := cadence.NewEventType( + location, + "PendingExecution", + []cadence.Field{ + {Identifier: "id", Type: cadence.UInt64Type}, + {Identifier: "priority", Type: cadence.UInt8Type}, + {Identifier: "executionEffort", Type: cadence.UInt64Type}, + {Identifier: "callbackOwner", Type: cadence.AddressType}, + }, + nil, + ) + + event := cadence.NewEvent( + []cadence.Value{ + cadence.NewUInt64(id), + cadence.NewUInt8(1), + cadence.NewUInt64(effort), + cadence.NewAddress([8]byte{}), + }, + ).WithType(eventType) + + payload, err := ccf.Encode(event) + require.NoError(t, err) + + return flow.Event{ + Type: flow.EventType(eventTypeString), + TransactionID: unittest.IdentifierFixture(), + TransactionIndex: 0, + EventIndex: 0, + Payload: payload, + } +} + +func createInvalidTypeEvent() flow.Event { + return flow.Event{ + Type: flow.EventType("A.0000000000000000.SomeContract.WrongEvent"), + TransactionID: unittest.IdentifierFixture(), + TransactionIndex: 0, + EventIndex: 0, + Payload: []byte("invalid"), + } +} + +func createInvalidPayloadEvent() flow.Event { + return flow.Event{ + Type: flow.EventType("A.0000000000000000.FlowTransactionScheduler.PendingExecution"), + TransactionID: unittest.IdentifierFixture(), + TransactionIndex: 0, + EventIndex: 0, + Payload: []byte("not valid ccf"), + } +} + +func createPendingExecutionEventWithPayload(payload []byte) flow.Event { + const processedEventTypeTemplate = "A.%v.FlowTransactionScheduler.PendingExecution" + env := systemcontracts.SystemContractsForChain(flow.Mainnet.Chain().ChainID()).AsTemplateEnv() + eventTypeString := fmt.Sprintf(processedEventTypeTemplate, env.FlowTransactionSchedulerAddress) + + return flow.Event{ + Type: flow.EventType(eventTypeString), + TransactionID: unittest.IdentifierFixture(), + TransactionIndex: 0, + EventIndex: 0, + Payload: payload, + } +} + +func createPendingExecutionEventWithEncodedValue(t *testing.T, value cadence.Value) flow.Event { + payload, err := ccf.Encode(value) + require.NoError(t, err) + return createPendingExecutionEventWithPayload(payload) +} + +func TestSystemCollection(t *testing.T) { + t.Parallel() + + chain := flow.Mainnet.Chain() + + tests := []struct { + name string + events []flow.Event + expectedTxCount int + errorMessage string + }{ + { + name: "no events", + events: []flow.Event{}, + expectedTxCount: 2, // process + system chunk + }, + { + name: "single valid callback event", + events: []flow.Event{createValidCallbackEvent(t, 1, 100)}, + expectedTxCount: 3, // process + execute + system chunk + }, + { + name: "multiple valid callback events", + events: []flow.Event{ + createValidCallbackEvent(t, 1, 100), + createValidCallbackEvent(t, 2, 200), + createValidCallbackEvent(t, 3, 300), + }, + expectedTxCount: 5, // process + 3 executes + system chunk + }, + { + name: "mixed events - valid callbacks and invalid types", + events: []flow.Event{ + createValidCallbackEvent(t, 1, 100), + createInvalidTypeEvent(), + createValidCallbackEvent(t, 2, 200), + createInvalidPayloadEvent(), + }, + expectedTxCount: 4, // process + 2 executes + system chunk + }, + { + name: "only invalid event types", + events: []flow.Event{createInvalidTypeEvent(), createInvalidPayloadEvent()}, + expectedTxCount: 2, // process + system chunk + }, + { + name: "invalid CCF payload in callback event", + events: []flow.Event{createPendingExecutionEventWithPayload([]byte{0xFF, 0xAB, 0xCD})}, + errorMessage: "failed to construct execute callbacks transactions", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + collection, err := blueprints.SystemCollection(chain, tt.events) + + if tt.errorMessage != "" { + assert.Error(t, err) + assert.Contains(t, err.Error(), tt.errorMessage) + assert.Nil(t, collection) + return + } + + require.NoError(t, err) + require.NotNil(t, collection) + + transactions := collection.Transactions + assert.Len(t, transactions, tt.expectedTxCount) + + if tt.expectedTxCount > 0 { + // First transaction should always be the process transaction + processTx := transactions[0] + assert.NotNil(t, processTx) + assert.NotEmpty(t, processTx.Script) + assert.Equal(t, uint64(flow.DefaultMaxTransactionGasLimit), processTx.GasLimit) + assert.Equal(t, []flow.Address{chain.ServiceAddress()}, processTx.Authorizers) + assert.Empty(t, processTx.Arguments) + + // Last transaction should always be the system chunk transaction + systemChunkTx := transactions[len(transactions)-1] + assert.NotNil(t, systemChunkTx) + assert.NotEmpty(t, systemChunkTx.Script) + assert.Equal(t, []flow.Address{chain.ServiceAddress()}, systemChunkTx.Authorizers) + + // Middle transactions should be execute callback transactions + executeCount := tt.expectedTxCount - 2 // subtract process and system chunk + if executeCount > 0 { + for i := 1; i < len(transactions)-1; i++ { + executeTx := transactions[i] + assert.NotNil(t, executeTx) + assert.NotEmpty(t, executeTx.Script) + assert.Equal(t, []flow.Address{chain.ServiceAddress()}, executeTx.Authorizers) + assert.Len(t, executeTx.Arguments, 1) + assert.NotEmpty(t, executeTx.Arguments[0]) + } + } + } + + // Verify collection properties + assert.NotEmpty(t, collection.ID()) + assert.Equal(t, len(transactions), len(collection.Transactions)) + }) + } +} + +func createEventWithModifiedField(t *testing.T, fieldName string, newValue cadence.Value) flow.Event { + const processedEventTypeTemplate = "A.%v.FlowTransactionScheduler.PendingExecution" + env := systemcontracts.SystemContractsForChain(flow.Mainnet.Chain().ChainID()).AsTemplateEnv() + eventTypeString := fmt.Sprintf(processedEventTypeTemplate, env.FlowTransactionSchedulerAddress) + loc, err := cadenceCommon.HexToAddress(env.FlowTransactionSchedulerAddress) + require.NoError(t, err) + location := cadenceCommon.NewAddressLocation(nil, loc, "PendingExecution") + + fields := []cadence.Field{ + {Identifier: "id", Type: cadence.UInt64Type}, + {Identifier: "priority", Type: cadence.UInt8Type}, + {Identifier: "executionEffort", Type: cadence.UInt64Type}, + {Identifier: "callbackOwner", Type: cadence.AddressType}, + } + + values := []cadence.Value{ + cadence.NewUInt64(123), + cadence.NewUInt8(1), + cadence.NewUInt64(456), + cadence.NewAddress([8]byte{}), + } + + // Handle field modification or removal + if newValue == nil { + // Remove the field entirely + var filteredFields []cadence.Field + var filteredValues []cadence.Value + for i, field := range fields { + if field.Identifier != fieldName { + filteredFields = append(filteredFields, field) + filteredValues = append(filteredValues, values[i]) + } + } + fields = filteredFields + values = filteredValues + } else { + // Replace the field value with wrong type + switch fieldName { + case "id": + values[0] = newValue + case "executionEffort": + values[2] = newValue + } + } + + eventType := cadence.NewEventType( + location, + "PendingExecution", + fields, + nil, + ) + + event := cadence.NewEvent(values).WithType(eventType) + + payload, err := ccf.Encode(event) + require.NoError(t, err) + + return flow.Event{ + Type: flow.EventType(eventTypeString), + TransactionID: unittest.IdentifierFixture(), + TransactionIndex: 0, + EventIndex: 0, + Payload: payload, + } +} diff --git a/fvm/blueprints/scripts/createFlowTokenMinterTransactionTemplate.cdc b/fvm/blueprints/scripts/createFlowTokenMinterTransactionTemplate.cdc index d9641454cdb..d237749a13b 100644 --- a/fvm/blueprints/scripts/createFlowTokenMinterTransactionTemplate.cdc +++ b/fvm/blueprints/scripts/createFlowTokenMinterTransactionTemplate.cdc @@ -1,14 +1,14 @@ -import FlowToken from 0xFLOWTOKENADDRESS +import "FlowToken" transaction { - prepare(serviceAccount: AuthAccount) { + prepare(serviceAccount: auth(Storage) &Account) { /// Borrow a reference to the Flow Token Admin in the account storage - let flowTokenAdmin = serviceAccount.borrow<&FlowToken.Administrator>(from: /storage/flowTokenAdmin) + let flowTokenAdmin = serviceAccount.storage.borrow<&FlowToken.Administrator>(from: /storage/flowTokenAdmin) ?? panic("Could not borrow a reference to the Flow Token Admin resource") /// Create a flowTokenMinterResource let flowTokenMinter <- flowTokenAdmin.createNewMinter(allowedAmount: 1000000000.0) - serviceAccount.save(<-flowTokenMinter, to: /storage/flowTokenMinter) + serviceAccount.storage.save(<-flowTokenMinter, to: /storage/flowTokenMinter) } } diff --git a/fvm/blueprints/scripts/deployContractTransactionTemplate.cdc b/fvm/blueprints/scripts/deployContractTransactionTemplate.cdc index 02573e4342b..e91e5daf349 100644 --- a/fvm/blueprints/scripts/deployContractTransactionTemplate.cdc +++ b/fvm/blueprints/scripts/deployContractTransactionTemplate.cdc @@ -1,5 +1,5 @@ transaction(name: String, code: String) { - prepare(signer: AuthAccount) { - signer.contracts.add(name: name, code: code.decodeHex()) + prepare(signer: auth(AddContract) &Account) { + signer.contracts.add(name: name, code: code.utf8) } } diff --git a/fvm/blueprints/scripts/deployEpochTransactionTemplate.cdc b/fvm/blueprints/scripts/deployEpochTransactionTemplate.cdc index 48903277868..93140580deb 100644 --- a/fvm/blueprints/scripts/deployEpochTransactionTemplate.cdc +++ b/fvm/blueprints/scripts/deployEpochTransactionTemplate.cdc @@ -1,6 +1,7 @@ -import FlowClusterQC from 0xQCADDRESS +import "FlowClusterQC" transaction( + name: String, code: String, currentEpochCounter: UInt64, numViewsInEpoch: UInt64, @@ -10,7 +11,7 @@ transaction( FLOWsupplyIncreasePercentage: UFix64, randomSource: String, clusterWeights: [{String: UInt64}]) { - prepare(serviceAccount: AuthAccount) { + prepare(serviceAccount: auth(AddContract) &Account) { // first, construct Cluster objects from cluster weights let clusters: [FlowClusterQC.Cluster] = [] @@ -21,7 +22,7 @@ transaction( } serviceAccount.contracts.add( - name: "FlowEpoch", + name: name, code: code.decodeHex(), currentEpochCounter: currentEpochCounter, numViewsInEpoch: numViewsInEpoch, diff --git a/fvm/blueprints/scripts/deployFlowTokenTransactionTemplate.cdc b/fvm/blueprints/scripts/deployFlowTokenTransactionTemplate.cdc index 9f91d8b2c5d..fef906fc630 100644 --- a/fvm/blueprints/scripts/deployFlowTokenTransactionTemplate.cdc +++ b/fvm/blueprints/scripts/deployFlowTokenTransactionTemplate.cdc @@ -1,5 +1,5 @@ transaction(code: String) { - prepare(flowTokenAccount: AuthAccount, serviceAccount: AuthAccount) { + prepare(flowTokenAccount: auth(AddContract) &Account, serviceAccount: auth(Storage, Capabilities) &Account) { let adminAccount = serviceAccount flowTokenAccount.contracts.add(name: "FlowToken", code: code.decodeHex(), adminAccount: adminAccount) } diff --git a/fvm/blueprints/scripts/deployIDTableStakingTransactionTemplate.cdc b/fvm/blueprints/scripts/deployIDTableStakingTransactionTemplate.cdc index e64ea463ece..2f11a4fc9fc 100644 --- a/fvm/blueprints/scripts/deployIDTableStakingTransactionTemplate.cdc +++ b/fvm/blueprints/scripts/deployIDTableStakingTransactionTemplate.cdc @@ -1,6 +1,6 @@ // Note: uses a default large candidate limit transaction(code: String, epochTokenPayout: UFix64, rewardCut: UFix64) { - prepare(serviceAccount: AuthAccount) { + prepare(serviceAccount: auth(AddContract) &Account) { let candidateNodeLimits: {UInt8: UInt64} = {1: 10000, 2: 10000, 3: 10000, 4: 10000, 5: 10000} serviceAccount.contracts.add(name: "FlowIDTableStaking", code: code.decodeHex(), epochTokenPayout: epochTokenPayout, rewardCut: rewardCut, candidateNodeLimits: candidateNodeLimits) } diff --git a/fvm/blueprints/scripts/deployLockedTokensTemplate.cdc b/fvm/blueprints/scripts/deployLockedTokensTemplate.cdc index 4a677162611..6399bca0df4 100644 --- a/fvm/blueprints/scripts/deployLockedTokensTemplate.cdc +++ b/fvm/blueprints/scripts/deployLockedTokensTemplate.cdc @@ -1,6 +1,6 @@ transaction(publicKeys: [[UInt8]], code: String) { - prepare(admin: AuthAccount) { + prepare(admin: auth(AddContract, Storage) &Account) { admin.contracts.add(name: "LockedTokens", code: code.decodeHex(), admin) } diff --git a/fvm/blueprints/scripts/deployNodeVersionBeaconTransactionTemplate.cdc b/fvm/blueprints/scripts/deployNodeVersionBeaconTransactionTemplate.cdc index 24c05ac47c1..a7a80ef92bb 100644 --- a/fvm/blueprints/scripts/deployNodeVersionBeaconTransactionTemplate.cdc +++ b/fvm/blueprints/scripts/deployNodeVersionBeaconTransactionTemplate.cdc @@ -1,5 +1,5 @@ transaction(code: String, versionThreshold: UInt64) { - prepare(serviceAccount: AuthAccount) { + prepare(serviceAccount: auth(AddContract) &Account) { serviceAccount.contracts.add(name: "NodeVersionBeacon", code: code.decodeHex(), versionUpdateBuffer: versionThreshold) } -} \ No newline at end of file +} diff --git a/fvm/blueprints/scripts/deployRandomBeaconHistoryTransactionTemplate.cdc b/fvm/blueprints/scripts/deployRandomBeaconHistoryTransactionTemplate.cdc new file mode 100644 index 00000000000..867f1f4e22a --- /dev/null +++ b/fvm/blueprints/scripts/deployRandomBeaconHistoryTransactionTemplate.cdc @@ -0,0 +1,5 @@ +transaction(code: String) { + prepare(serviceAccount: auth(AddContract) &Account) { + serviceAccount.contracts.add(name: "RandomBeaconHistory", code: code.utf8) + } +} diff --git a/fvm/blueprints/scripts/deployTxFeesTransactionTemplate.cdc b/fvm/blueprints/scripts/deployTxFeesTransactionTemplate.cdc index f62a2503209..4f34449551a 100644 --- a/fvm/blueprints/scripts/deployTxFeesTransactionTemplate.cdc +++ b/fvm/blueprints/scripts/deployTxFeesTransactionTemplate.cdc @@ -1,5 +1,5 @@ transaction(code: String) { - prepare(flowFeesAccount: AuthAccount, serviceAccount: AuthAccount) { + prepare(flowFeesAccount: auth(AddContract) &Account, serviceAccount: auth(SaveValue) &Account) { let adminAccount = serviceAccount flowFeesAccount.contracts.add(name: "FlowFees", code: code.decodeHex(), adminAccount: adminAccount) } diff --git a/fvm/blueprints/scripts/fundAccountTemplate.cdc b/fvm/blueprints/scripts/fundAccountTemplate.cdc index baab6b90db4..fb495ebe26c 100644 --- a/fvm/blueprints/scripts/fundAccountTemplate.cdc +++ b/fvm/blueprints/scripts/fundAccountTemplate.cdc @@ -1,18 +1,20 @@ -import FungibleToken from 0xFUNGIBLETOKENADDRESS -import FlowToken from 0xFLOWTOKENADDRESS +import "FungibleToken" +import "FlowToken" transaction(amount: UFix64, recipient: Address) { - let sentVault: @FungibleToken.Vault - prepare(signer: AuthAccount) { - let vaultRef = signer.borrow<&FlowToken.Vault>(from: /storage/flowTokenVault) - ?? panic("failed to borrow reference to sender vault") - self.sentVault <- vaultRef.withdraw(amount: amount) + + let sentVault: @{FungibleToken.Vault} + + prepare(signer: auth(BorrowValue) &Account) { + let vaultRef = signer.storage.borrow<auth(FungibleToken.Withdraw) &FlowToken.Vault>(from: /storage/flowTokenVault) + ?? panic("failed to borrow reference to sender vault") + self.sentVault <- vaultRef.withdraw(amount: amount) } + execute { - let receiverRef = getAccount(recipient) - .getCapability(/public/flowTokenReceiver) - .borrow<&{FungibleToken.Receiver}>() - ?? panic("failed to borrow reference to recipient vault") - receiverRef.deposit(from: <-self.sentVault) + let receiverRef = getAccount(recipient) + .capabilities.borrow<&FlowToken.Vault>(/public/flowTokenReceiver) + ?? panic("failed to borrow reference to recipient vault") + receiverRef.deposit(from: <-self.sentVault) } } diff --git a/fvm/blueprints/scripts/mintFlowTokenTransactionTemplate.cdc b/fvm/blueprints/scripts/mintFlowTokenTransactionTemplate.cdc index 91bc93484d6..e2e576edffc 100644 --- a/fvm/blueprints/scripts/mintFlowTokenTransactionTemplate.cdc +++ b/fvm/blueprints/scripts/mintFlowTokenTransactionTemplate.cdc @@ -1,19 +1,18 @@ -import FungibleToken from 0xFUNGIBLETOKENADDRESS -import FlowToken from 0xFLOWTOKENADDRESS +import "FungibleToken" +import "FlowToken" transaction(amount: UFix64) { let tokenAdmin: &FlowToken.Administrator - let tokenReceiver: &FlowToken.Vault{FungibleToken.Receiver} + let tokenReceiver: &FlowToken.Vault - prepare(signer: AuthAccount) { - self.tokenAdmin = signer + prepare(signer: auth(BorrowValue) &Account) { + self.tokenAdmin = signer.storage .borrow<&FlowToken.Administrator>(from: /storage/flowTokenAdmin) ?? panic("Signer is not the token admin") self.tokenReceiver = signer - .getCapability(/public/flowTokenReceiver) - .borrow<&FlowToken.Vault{FungibleToken.Receiver}>() + .capabilities.borrow<&FlowToken.Vault>(/public/flowTokenReceiver) ?? panic("Unable to borrow receiver reference for recipient") } diff --git a/fvm/blueprints/scripts/setContractOperationAuthorizersTransactionTemplate.cdc b/fvm/blueprints/scripts/setContractOperationAuthorizersTransactionTemplate.cdc index 14d64e119b1..9931e6abb19 100644 --- a/fvm/blueprints/scripts/setContractOperationAuthorizersTransactionTemplate.cdc +++ b/fvm/blueprints/scripts/setContractOperationAuthorizersTransactionTemplate.cdc @@ -1,6 +1,6 @@ transaction(addresses: [Address], path: StoragePath) { - prepare(signer: AuthAccount) { - signer.load<[Address]>(from: path) - signer.save(addresses, to: path) + prepare(signer: auth(Storage) &Account) { + signer.storage.load<[Address]>(from: path) + signer.storage.save(addresses, to: path) } } diff --git a/fvm/blueprints/scripts/setExecutionMemoryLimit.cdc b/fvm/blueprints/scripts/setExecutionMemoryLimit.cdc index 210678fce10..2a7831dbf1c 100644 --- a/fvm/blueprints/scripts/setExecutionMemoryLimit.cdc +++ b/fvm/blueprints/scripts/setExecutionMemoryLimit.cdc @@ -1,6 +1,6 @@ transaction(newLimit: UInt64, path: StoragePath) { - prepare(signer: AuthAccount) { - signer.load<UInt64>(from: path) - signer.save(newLimit, to: path) + prepare(signer: auth(Storage) &Account) { + signer.storage.load<UInt64>(from: path) + signer.storage.save(newLimit, to: path) } } diff --git a/fvm/blueprints/scripts/setExecutionWeightsScript.cdc b/fvm/blueprints/scripts/setExecutionWeightsScript.cdc index dd46488377a..f53af9f1c6e 100644 --- a/fvm/blueprints/scripts/setExecutionWeightsScript.cdc +++ b/fvm/blueprints/scripts/setExecutionWeightsScript.cdc @@ -1,6 +1,6 @@ transaction(newWeights: {UInt64: UInt64}, path: StoragePath) { - prepare(signer: AuthAccount) { - signer.load<{UInt64: UInt64}>(from: path) - signer.save(newWeights, to: path) + prepare(signer: auth(Storage) &Account) { + signer.storage.load<{UInt64: UInt64}>(from: path) + signer.storage.save(newWeights, to: path) } } diff --git a/fvm/blueprints/scripts/setIsContractDeploymentRestrictedTransactionTemplate.cdc b/fvm/blueprints/scripts/setIsContractDeploymentRestrictedTransactionTemplate.cdc index fec1086af98..f1d075c59da 100644 --- a/fvm/blueprints/scripts/setIsContractDeploymentRestrictedTransactionTemplate.cdc +++ b/fvm/blueprints/scripts/setIsContractDeploymentRestrictedTransactionTemplate.cdc @@ -1,6 +1,6 @@ transaction(restricted: Bool, path: StoragePath) { - prepare(signer: AuthAccount) { - signer.load<Bool>(from: path) - signer.save(restricted, to: path) + prepare(signer: auth(Storage) &Account) { + signer.storage.load<Bool>(from: path) + signer.storage.save(restricted, to: path) } } diff --git a/fvm/blueprints/scripts/setupAccountTemplate.cdc b/fvm/blueprints/scripts/setupAccountTemplate.cdc index 3cc88518ffb..d40abf5c2aa 100644 --- a/fvm/blueprints/scripts/setupAccountTemplate.cdc +++ b/fvm/blueprints/scripts/setupAccountTemplate.cdc @@ -2,30 +2,26 @@ // to add a Vault resource to their account // so that they can use the flowToken -import FungibleToken from 0xFUNGIBLETOKENADDRESS -import FlowToken from 0xFLOWTOKENADDRESS +import "FungibleToken" +import "FlowToken" transaction { - prepare(signer: AuthAccount) { + prepare(signer: auth(Storage, Capabilities) &Account) { - if signer.borrow<&FlowToken.Vault>(from: /storage/flowTokenVault) == nil { + if signer.storage.borrow<&FlowToken.Vault>(from: /storage/flowTokenVault) == nil { // Create a new flowToken Vault and put it in storage - signer.save(<-FlowToken.createEmptyVault(), to: /storage/flowTokenVault) + signer.storage.save(<-FlowToken.createEmptyVault(vaultType: Type<@FlowToken.Vault>()), to: /storage/flowTokenVault) // Create a public capability to the Vault that only exposes // the deposit function through the Receiver interface - signer.link<&FlowToken.Vault{FungibleToken.Receiver}>( - /public/flowTokenReceiver, - target: /storage/flowTokenVault - ) + let receiverCap = signer.capabilities.storage.issue<&FlowToken.Vault>(/storage/flowTokenVault) + signer.capabilities.publish(receiverCap, at: /public/flowTokenReceiver) // Create a public capability to the Vault that only exposes // the balance field through the Balance interface - signer.link<&FlowToken.Vault{FungibleToken.Balance}>( - /public/flowTokenBalance, - target: /storage/flowTokenVault - ) + let balanceCap = signer.capabilities.storage.issue<&FlowToken.Vault>(/storage/flowTokenVault) + signer.capabilities.publish(balanceCap, at: /public/flowTokenBalance) } } } diff --git a/fvm/blueprints/scripts/setupFeesTransactionTemplate.cdc b/fvm/blueprints/scripts/setupFeesTransactionTemplate.cdc index 0d15c0040ef..59bfa33dabd 100644 --- a/fvm/blueprints/scripts/setupFeesTransactionTemplate.cdc +++ b/fvm/blueprints/scripts/setupFeesTransactionTemplate.cdc @@ -1,9 +1,9 @@ -import FlowFees from 0xFLOWFEESADDRESS +import "FlowFees" transaction(surgeFactor: UFix64, inclusionEffortCost: UFix64, executionEffortCost: UFix64) { - prepare(service: AuthAccount) { + prepare(service: auth(BorrowValue) &Account) { - let flowFeesAdmin = service.borrow<&FlowFees.Administrator>(from: /storage/flowFeesAdmin) + let flowFeesAdmin = service.storage.borrow<&FlowFees.Administrator>(from: /storage/flowFeesAdmin) ?? panic("Could not borrow reference to the flow fees admin!"); flowFeesAdmin.setFeeParameters(surgeFactor: surgeFactor, inclusionEffortCost: inclusionEffortCost, executionEffortCost: executionEffortCost) diff --git a/fvm/blueprints/scripts/setupParametersTransactionTemplate.cdc b/fvm/blueprints/scripts/setupParametersTransactionTemplate.cdc index 61cf11e1a85..6f8a8a1ddc7 100644 --- a/fvm/blueprints/scripts/setupParametersTransactionTemplate.cdc +++ b/fvm/blueprints/scripts/setupParametersTransactionTemplate.cdc @@ -1,12 +1,13 @@ -import FlowStorageFees from 0xFLOWSTORAGEFEESADDRESS -import FlowServiceAccount from 0xFLOWSERVICEADDRESS +import "FlowStorageFees" +import "FlowServiceAccount" transaction(accountCreationFee: UFix64, minimumStorageReservation: UFix64, storageMegaBytesPerReservedFLOW: UFix64, restrictedAccountCreationEnabled: Bool) { - prepare(service: AuthAccount) { - let serviceAdmin = service.borrow<&FlowServiceAccount.Administrator>(from: /storage/flowServiceAdmin) + + prepare(service: auth(BorrowValue) &Account) { + let serviceAdmin = service.storage.borrow<&FlowServiceAccount.Administrator>(from: /storage/flowServiceAdmin) ?? panic("Could not borrow reference to the flow service admin!"); - let storageAdmin = service.borrow<&FlowStorageFees.Administrator>(from: /storage/storageFeesAdmin) + let storageAdmin = service.storage.borrow<&FlowStorageFees.Administrator>(from: /storage/storageFeesAdmin) ?? panic("Could not borrow reference to the flow storage fees admin!"); serviceAdmin.setAccountCreationFee(accountCreationFee) diff --git a/fvm/blueprints/scripts/setupStorageForAccount.cdc b/fvm/blueprints/scripts/setupStorageForAccount.cdc new file mode 100644 index 00000000000..724c02199b9 --- /dev/null +++ b/fvm/blueprints/scripts/setupStorageForAccount.cdc @@ -0,0 +1,32 @@ +import "FlowServiceAccount" +import "FlowStorageFees" +import "FungibleToken" +import "FlowToken" + +// This transaction sets up storage on a auth account. +// This is used during bootstrapping a local environment +transaction() { + prepare( + account: auth(SaveValue, Capabilities) &Account, + service: auth(BorrowValue) &Account + ) { + // take all the funds from the service account + let tokenVault = service.storage + .borrow<auth(FungibleToken.Withdraw) &FlowToken.Vault>(from: /storage/flowTokenVault) + ?? panic("Unable to borrow reference to the default token vault") + + let storageReservation <- tokenVault.withdraw(amount: FlowStorageFees.minimumStorageReservation) as! @FlowToken.Vault + + let hasReceiver = account.capabilities + .get<&{FungibleToken.Receiver}>(/public/flowTokenReceiver).check() + if !hasReceiver { + FlowServiceAccount.initDefaultToken(account) + } + + let receiver = account.capabilities + .borrow<&{FungibleToken.Receiver}>(/public/flowTokenReceiver) + ?? panic("Could not borrow receiver reference to the recipient's Vault") + + receiver.deposit(from: <-storageReservation) + } +} diff --git a/fvm/blueprints/scripts/setupStorageForServiceAccountsTemplate.cdc b/fvm/blueprints/scripts/setupStorageForServiceAccountsTemplate.cdc index 0d0aada5164..33275ea9755 100644 --- a/fvm/blueprints/scripts/setupStorageForServiceAccountsTemplate.cdc +++ b/fvm/blueprints/scripts/setupStorageForServiceAccountsTemplate.cdc @@ -1,25 +1,35 @@ -import FlowServiceAccount from 0xFLOWSERVICEADDRESS -import FlowStorageFees from 0xFLOWSTORAGEFEESADDRESS -import FungibleToken from 0xFUNGIBLETOKENADDRESS -import FlowToken from 0xFLOWTOKENADDRESS +import "FlowServiceAccount" +import "FlowStorageFees" +import "FungibleToken" +import "FlowToken" // This transaction sets up storage on any auth accounts that were created before the storage fees. // This is used during bootstrapping a local environment transaction() { - prepare(service: AuthAccount, fungibleToken: AuthAccount, flowToken: AuthAccount, feeContract: AuthAccount) { - let authAccounts = [service, fungibleToken, flowToken, feeContract] - // take all the funds from the service account - let tokenVault = service.borrow<&FlowToken.Vault>(from: /storage/flowTokenVault) + prepare( + service: auth(SaveValue, BorrowValue, Capabilities) &Account, + fungibleToken: auth(SaveValue, Capabilities) &Account, + flowToken: auth(SaveValue, Capabilities) &Account, + feeContract: auth(SaveValue, Capabilities) &Account, + ) { + + let authAccounts:[auth(SaveValue, Capabilities) &Account] = [service, fungibleToken, flowToken, feeContract] + + // Take all the funds from the service account. + let tokenVault: auth(FungibleToken.Withdraw) &FlowToken.Vault = service.storage + .borrow<auth(FungibleToken.Withdraw) &FlowToken.Vault>(from: /storage/flowTokenVault) ?? panic("Unable to borrow reference to the default token vault") for account in authAccounts { let storageReservation <- tokenVault.withdraw(amount: FlowStorageFees.minimumStorageReservation) as! @FlowToken.Vault - let hasReceiver = account.getCapability(/public/flowTokenReceiver)!.check<&{FungibleToken.Receiver}>() - if !hasReceiver { + + let receiverCap = account.capabilities.get<&{FungibleToken.Receiver}>(/public/flowTokenReceiver) + if !receiverCap.check() { FlowServiceAccount.initDefaultToken(account) } - let receiver = account.getCapability(/public/flowTokenReceiver)!.borrow<&{FungibleToken.Receiver}>() + + let receiver = account.capabilities.borrow<&{FungibleToken.Receiver}>(/public/flowTokenReceiver) ?? panic("Could not borrow receiver reference to the recipient's Vault") receiver.deposit(from: <-storageReservation) diff --git a/fvm/blueprints/scripts/systemChunkTransactionTemplate.cdc b/fvm/blueprints/scripts/systemChunkTransactionTemplate.cdc index bdc083bddf2..8ce4750c7e4 100644 --- a/fvm/blueprints/scripts/systemChunkTransactionTemplate.cdc +++ b/fvm/blueprints/scripts/systemChunkTransactionTemplate.cdc @@ -1,15 +1,31 @@ -import FlowEpoch from 0xEPOCHADDRESS -import NodeVersionBeacon from 0xNODEVERSIONBEACONADDRESS +import "FlowEpoch" +import "NodeVersionBeacon" +import "RandomBeaconHistory" +import "EVM" +import Migration from "Migration" transaction { - prepare(serviceAccount: AuthAccount) { - let epochHeartbeat = serviceAccount.borrow<&FlowEpoch.Heartbeat>(from: FlowEpoch.heartbeatStoragePath) + prepare(serviceAccount: auth(BorrowValue) &Account) { + let epochHeartbeat = serviceAccount.storage.borrow<&FlowEpoch.Heartbeat>(from: FlowEpoch.heartbeatStoragePath) ?? panic("Could not borrow heartbeat from storage path") epochHeartbeat.advanceBlock() - let versionBeaconHeartbeat = serviceAccount.borrow<&NodeVersionBeacon.Heartbeat>( - from: NodeVersionBeacon.HeartbeatStoragePath) - ?? panic("Couldn't borrow NodeVersionBeacon.Heartbeat Resource") + let versionBeaconHeartbeat = serviceAccount.storage + .borrow<&NodeVersionBeacon.Heartbeat>(from: NodeVersionBeacon.HeartbeatStoragePath) + ?? panic("Couldn't borrow NodeVersionBeacon.Heartbeat Resource") versionBeaconHeartbeat.heartbeat() + + let randomBeaconHistoryHeartbeat = serviceAccount.storage + .borrow<&RandomBeaconHistory.Heartbeat>(from: RandomBeaconHistory.HeartbeatStoragePath) + ?? panic("Couldn't borrow RandomBeaconHistory.Heartbeat Resource") + randomBeaconHistoryHeartbeat.heartbeat(randomSourceHistory: randomSourceHistory()) + + let evmHeartbeat = serviceAccount.storage + .borrow<&EVM.Heartbeat>(from: /storage/EVMHeartbeat) + evmHeartbeat?.heartbeat() + + let migrationAdmin = serviceAccount.storage + .borrow<&Migration.Admin>(from: Migration.adminStoragePath) + migrationAdmin?.migrate() } } diff --git a/fvm/blueprints/scripts/systemChunkTransactionTemplateDualAuthorizer.cdc b/fvm/blueprints/scripts/systemChunkTransactionTemplateDualAuthorizer.cdc deleted file mode 100644 index 7c5d60d2a97..00000000000 --- a/fvm/blueprints/scripts/systemChunkTransactionTemplateDualAuthorizer.cdc +++ /dev/null @@ -1,18 +0,0 @@ -import FlowEpoch from 0xEPOCHADDRESS -import NodeVersionBeacon from 0xNODEVERSIONBEACONADDRESS - -transaction { - prepare(serviceAccount: AuthAccount, epochAccount: AuthAccount) { - let epochHeartbeat = - serviceAccount.borrow<&FlowEpoch.Heartbeat>(from: FlowEpoch.heartbeatStoragePath) ?? - epochAccount.borrow<&FlowEpoch.Heartbeat>(from: FlowEpoch.heartbeatStoragePath) ?? - panic("Could not borrow heartbeat from storage path") - epochHeartbeat.advanceBlock() - - let versionBeaconHeartbeat = - serviceAccount.borrow<&NodeVersionBeacon.Heartbeat>(from: NodeVersionBeacon.HeartbeatStoragePath) ?? - epochAccount.borrow<&NodeVersionBeacon.Heartbeat>(from: NodeVersionBeacon.HeartbeatStoragePath) ?? - panic("Couldn't borrow NodeVersionBeacon.Heartbeat Resource") - versionBeaconHeartbeat.heartbeat() - } -} diff --git a/fvm/blueprints/source_of_randomness.go b/fvm/blueprints/source_of_randomness.go new file mode 100644 index 00000000000..9399a695f55 --- /dev/null +++ b/fvm/blueprints/source_of_randomness.go @@ -0,0 +1,28 @@ +package blueprints + +import ( + _ "embed" + + "github.com/onflow/cadence" + jsoncdc "github.com/onflow/cadence/encoding/json" + + "github.com/onflow/flow-core-contracts/lib/go/contracts" + + "github.com/onflow/flow-go/model/flow" +) + +//go:embed scripts/deployRandomBeaconHistoryTransactionTemplate.cdc +var deployRandomBeaconHistoryTransactionTemplate string + +// DeployRandomBeaconHistoryTransaction returns the transaction body for the deployment +// of the RandomBeaconHistory contract transaction +func DeployRandomBeaconHistoryTransaction( + service flow.Address, +) (*flow.TransactionBody, error) { + return flow.NewTransactionBodyBuilder(). + SetScript([]byte(deployRandomBeaconHistoryTransactionTemplate)). + SetPayer(service). + AddArgument(jsoncdc.MustEncode(cadence.String(contracts.RandomBeaconHistory()))). + AddAuthorizer(service). + Build() +} diff --git a/fvm/blueprints/system.go b/fvm/blueprints/system.go index f4c6893b34b..b400e62cf35 100644 --- a/fvm/blueprints/system.go +++ b/fvm/blueprints/system.go @@ -3,6 +3,7 @@ package blueprints import ( _ "embed" "fmt" + "strings" "github.com/onflow/flow-core-contracts/lib/go/templates" @@ -12,75 +13,41 @@ import ( const SystemChunkTransactionGasLimit = 100_000_000 -// TODO (Ramtin) after changes to this method are merged into master move them here. - // systemChunkTransactionTemplate looks for the epoch and version beacon heartbeat resources // and calls them. // //go:embed scripts/systemChunkTransactionTemplate.cdc var systemChunkTransactionTemplate string +const placeholderMigrationAddress = "\"Migration\"" + +func prepareSystemContractCode(chainID flow.ChainID) string { + sc := systemcontracts.SystemContractsForChain(chainID) + code := templates.ReplaceAddresses( + systemChunkTransactionTemplate, + sc.AsTemplateEnv(), + ) + code = strings.ReplaceAll( + code, + placeholderMigrationAddress, + sc.Migration.Address.HexWithPrefix(), + ) + return code +} + // SystemChunkTransaction creates and returns the transaction corresponding to the // system chunk for the given chain. func SystemChunkTransaction(chain flow.Chain) (*flow.TransactionBody, error) { - contracts, err := systemcontracts.SystemContractsForChain(chain.ChainID()) + // The heartbeat resources needed by the system tx have are on the service account, + // therefore, the service account is the only authorizer needed. + systemTxBody, err := flow.NewTransactionBodyBuilder(). + SetScript([]byte(prepareSystemContractCode(chain.ChainID()))). + SetComputeLimit(SystemChunkTransactionGasLimit). + AddAuthorizer(chain.ServiceAddress()). + Build() if err != nil { - return nil, fmt.Errorf("could not get system contracts for chain: %w", err) + return nil, fmt.Errorf("could not build system chunk transaction: %w", err) } - // this is only true for testnet, sandboxnet and mainnet. - if contracts.Epoch.Address != chain.ServiceAddress() { - // Temporary workaround because the heartbeat resources need to be moved - // to the service account: - // - the system chunk will attempt to load both Epoch and VersionBeacon - // resources from either the service account or the staking account - // - the service account committee can then safely move the resources - // at any time - // - once the resources are moved, this workaround should be removed - // after version v0.31.0 - return systemChunkTransactionDualAuthorizers(chain, contracts) - } - - tx := flow.NewTransactionBody(). - SetScript( - []byte(templates.ReplaceAddresses( - systemChunkTransactionTemplate, - templates.Environment{ - EpochAddress: contracts.Epoch.Address.Hex(), - NodeVersionBeaconAddress: contracts.NodeVersionBeacon.Address.Hex(), - }, - )), - ). - AddAuthorizer(contracts.Epoch.Address). - SetGasLimit(SystemChunkTransactionGasLimit) - - return tx, nil -} - -// systemChunkTransactionTemplateDualAuthorizer is the same as systemChunkTransactionTemplate -// but it looks for the heartbeat resources on two different accounts. -// -//go:embed scripts/systemChunkTransactionTemplateDualAuthorizer.cdc -var systemChunkTransactionTemplateDualAuthorizer string - -func systemChunkTransactionDualAuthorizers( - chain flow.Chain, - contracts *systemcontracts.SystemContracts, -) (*flow.TransactionBody, error) { - - tx := flow.NewTransactionBody(). - SetScript( - []byte(templates.ReplaceAddresses( - systemChunkTransactionTemplateDualAuthorizer, - templates.Environment{ - EpochAddress: contracts.Epoch.Address.Hex(), - NodeVersionBeaconAddress: contracts.NodeVersionBeacon.Address.Hex(), - }, - )), - ). - AddAuthorizer(chain.ServiceAddress()). - AddAuthorizer(contracts.Epoch.Address). - SetGasLimit(SystemChunkTransactionGasLimit) - - return tx, nil + return systemTxBody, nil } diff --git a/fvm/blueprints/system_test.go b/fvm/blueprints/system_test.go new file mode 100644 index 00000000000..2e2f924b279 --- /dev/null +++ b/fvm/blueprints/system_test.go @@ -0,0 +1,61 @@ +package blueprints_test + +import ( + "strings" + "testing" + + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/fvm/blueprints" + "github.com/onflow/flow-go/model/flow" +) + +// TestSystemChunkTransactionHash tests the hash of the system chunk transaction hash does not change. +// We currently give no guarantees about the system transaction hash not changing block-to-block, but the community +// currently depends on the hash not changing. If the hash changes, the community should be notified in the release notes. +func TestSystemChunkTransactionHash(t *testing.T) { + t.Parallel() + + // this is formatted in a way that the resulting error message is easy to copy-paste into the test. + expectedHashes := []chainHash{ + {chainId: "flow-mainnet", expectedHash: "3408f8b1aa1b33cfc3f78c3f15217272807b14cec4ef64168bcf313bc4174621"}, + {chainId: "flow-testnet", expectedHash: "dadf3e1bf916f6cb2510cbea00ed9be78cc1b7d2b9ec29f0ef1d469ead2dda2d"}, + {chainId: "flow-previewnet", expectedHash: "ecee9d431f3ab406c64bd31c2b574035a9971feabd872f5e8f31b55dd08978f3"}, + {chainId: "flow-emulator", expectedHash: "d201f7b80ee8471754e2a1cad30f5ab888d4be3ba2c0a1cac5a3fcc0b34546a4"}, + } + + var actualHashes []chainHash + + for _, expected := range expectedHashes { + chain := flow.ChainID(expected.chainId) + + txBody, err := blueprints.SystemChunkTransaction(chain.Chain()) + require.NoError(t, err) + + actualID := txBody.ID().String() + actualHashes = append(actualHashes, chainHash{chainId: expected.chainId, expectedHash: actualID}) + } + + require.Equal(t, expectedHashes, actualHashes, + "Hashes of the system transactions have changed.\n"+ + "The community should be notified!\n\n"+ + "Update the expected hashes with the following values:\n%s", formatHashes(actualHashes)) + +} + +type chainHash struct { + chainId string + expectedHash string +} + +func formatHashes(hashes []chainHash) string { + b := strings.Builder{} + for _, h := range hashes { + b.WriteString("{chainId: \"") + b.WriteString(h.chainId) + b.WriteString("\", expectedHash: \"") + b.WriteString(h.expectedHash) + b.WriteString("\"},\n") + } + return b.String() +} diff --git a/fvm/blueprints/token.go b/fvm/blueprints/token.go index 92cc09e22c3..3044e2c9822 100644 --- a/fvm/blueprints/token.go +++ b/fvm/blueprints/token.go @@ -7,19 +7,87 @@ import ( "github.com/onflow/cadence" jsoncdc "github.com/onflow/cadence/encoding/json" + "github.com/onflow/flow-core-contracts/lib/go/contracts" "github.com/onflow/flow-core-contracts/lib/go/templates" "github.com/onflow/flow-go/model/flow" ) -func DeployFungibleTokenContractTransaction(fungibleToken flow.Address) *flow.TransactionBody { - contract := contracts.FungibleToken() +func DeployFungibleTokenContractTransaction(fungibleToken flow.Address, contract []byte) *flow.TransactionBodyBuilder { contractName := "FungibleToken" return DeployContractTransaction( fungibleToken, contract, - contractName) + contractName, + ) +} + +func DeployNonFungibleTokenContractTransaction(nonFungibleToken flow.Address, contract []byte) *flow.TransactionBodyBuilder { + contractName := "NonFungibleToken" + return DeployContractTransaction( + nonFungibleToken, + contract, + contractName, + ) +} + +func DeployMetadataViewsContractTransaction(nonFungibleToken flow.Address, contract []byte) *flow.TransactionBodyBuilder { + contractName := "MetadataViews" + return DeployContractTransaction( + nonFungibleToken, + contract, + contractName, + ) +} + +func DeployCrossVMMetadataViewsContractTransaction(nonFungibleToken flow.Address, contract []byte) *flow.TransactionBodyBuilder { + contractName := "CrossVMMetadataViews" + return DeployContractTransaction( + nonFungibleToken, + contract, + contractName, + ) +} + +func DeployViewResolverContractTransaction(nonFungibleToken flow.Address) *flow.TransactionBodyBuilder { + contract := contracts.ViewResolver() + contractName := "ViewResolver" + return DeployContractTransaction( + nonFungibleToken, + contract, + contractName, + ) +} + +func DeployBurnerContractTransaction(fungibleToken flow.Address) *flow.TransactionBodyBuilder { + contract := contracts.Burner() + contractName := "Burner" + return DeployContractTransaction( + fungibleToken, + contract, + contractName, + ) +} + +func DeployFungibleTokenMetadataViewsContractTransaction(fungibleToken flow.Address, contract []byte) *flow.TransactionBodyBuilder { + + contractName := "FungibleTokenMetadataViews" + return DeployContractTransaction( + fungibleToken, + contract, + contractName, + ) +} + +func DeployFungibleTokenSwitchboardContractTransaction(fungibleToken flow.Address, contract []byte) *flow.TransactionBodyBuilder { + + contractName := "FungibleTokenSwitchboard" + return DeployContractTransaction( + fungibleToken, + contract, + contractName, + ) } //go:embed scripts/deployFlowTokenTransactionTemplate.cdc @@ -31,46 +99,67 @@ var createFlowTokenMinterTransactionTemplate string //go:embed scripts/mintFlowTokenTransactionTemplate.cdc var mintFlowTokenTransactionTemplate string -func DeployFlowTokenContractTransaction(service, fungibleToken, flowToken flow.Address) *flow.TransactionBody { - contract := contracts.FlowToken(fungibleToken.HexWithPrefix()) - - return flow.NewTransactionBody(). +func DeployFlowTokenContractTransaction(service, flowToken flow.Address, contract []byte) (*flow.TransactionBody, error) { + return flow.NewTransactionBodyBuilder(). SetScript([]byte(deployFlowTokenTransactionTemplate)). + SetPayer(service). AddArgument(jsoncdc.MustEncode(cadence.String(hex.EncodeToString(contract)))). AddAuthorizer(flowToken). - AddAuthorizer(service) + AddAuthorizer(service). + Build() } // CreateFlowTokenMinterTransaction returns a transaction which creates a Flow // token Minter resource and stores it in the service account. This Minter is // expected to be stored here by the epoch smart contracts. -func CreateFlowTokenMinterTransaction(service, flowToken flow.Address) *flow.TransactionBody { - return flow.NewTransactionBody(). +func CreateFlowTokenMinterTransaction(service, flowToken flow.Address) (*flow.TransactionBody, error) { + return flow.NewTransactionBodyBuilder(). SetScript([]byte(templates.ReplaceAddresses( createFlowTokenMinterTransactionTemplate, templates.Environment{ FlowTokenAddress: flowToken.Hex(), })), ). - AddAuthorizer(service) + SetPayer(service). + AddAuthorizer(service). + Build() } func MintFlowTokenTransaction( fungibleToken, flowToken, service flow.Address, initialSupply cadence.UFix64, -) *flow.TransactionBody { +) (*flow.TransactionBody, error) { initialSupplyArg, err := jsoncdc.Encode(initialSupply) if err != nil { panic(fmt.Sprintf("failed to encode initial token supply: %s", err.Error())) } - return flow.NewTransactionBody(). + return flow.NewTransactionBodyBuilder(). SetScript([]byte(templates.ReplaceAddresses(mintFlowTokenTransactionTemplate, templates.Environment{ FlowTokenAddress: flowToken.Hex(), FungibleTokenAddress: fungibleToken.Hex(), })), ). + SetPayer(service). AddArgument(initialSupplyArg). - AddAuthorizer(service) + AddAuthorizer(service). + Build() +} + +func TransferFlowTokenTransaction( + env templates.Environment, + from, to flow.Address, + amount string, +) *flow.TransactionBodyBuilder { + cadenceAmount, _ := cadence.NewUFix64(amount) + txScript := templates.GenerateTransferGenericVaultWithAddressScript(env) + return flow.NewTransactionBodyBuilder(). + SetScript(txScript). + SetPayer(from). + AddArgument(jsoncdc.MustEncode(cadenceAmount)). + AddArgument(jsoncdc.MustEncode(cadence.NewAddress(to))). + AddArgument(jsoncdc.MustEncode(cadence.NewAddress(flow.HexToAddress(env.FlowTokenAddress)))). + AddArgument(jsoncdc.MustEncode(cadence.String("FlowToken"))). + AddAuthorizer(from) } diff --git a/fvm/blueprints/version_beacon.go b/fvm/blueprints/version_beacon.go index ba3535db728..6b6a38423d5 100644 --- a/fvm/blueprints/version_beacon.go +++ b/fvm/blueprints/version_beacon.go @@ -19,10 +19,12 @@ var deployNodeVersionBeaconTransactionTemplate string func DeployNodeVersionBeaconTransaction( service flow.Address, versionFreezePeriod cadence.UInt64, -) *flow.TransactionBody { - return flow.NewTransactionBody(). +) (*flow.TransactionBody, error) { + return flow.NewTransactionBodyBuilder(). SetScript([]byte(deployNodeVersionBeaconTransactionTemplate)). + SetPayer(service). AddArgument(jsoncdc.MustEncode(cadence.String(hex.EncodeToString(contracts.NodeVersionBeacon())))). AddArgument(jsoncdc.MustEncode(versionFreezePeriod)). - AddAuthorizer(service) + AddAuthorizer(service). + Build() } diff --git a/fvm/bootstrap.go b/fvm/bootstrap.go index 72d75919927..fe91d1975b1 100644 --- a/fvm/bootstrap.go +++ b/fvm/bootstrap.go @@ -3,16 +3,26 @@ package fvm import ( "fmt" "math" + "strings" "github.com/onflow/cadence" + "github.com/onflow/cadence/encoding/ccf" "github.com/onflow/flow-core-contracts/lib/go/contracts" + "github.com/onflow/flow-core-contracts/lib/go/templates" + + bridge "github.com/onflow/flow-evm-bridge" + storefront "github.com/onflow/nft-storefront/lib/go/contracts" "github.com/onflow/flow-go/fvm/blueprints" "github.com/onflow/flow-go/fvm/environment" "github.com/onflow/flow-go/fvm/errors" + "github.com/onflow/flow-go/fvm/evm/stdlib" "github.com/onflow/flow-go/fvm/meter" + "github.com/onflow/flow-go/fvm/migration" "github.com/onflow/flow-go/fvm/storage" "github.com/onflow/flow-go/fvm/storage/logical" + "github.com/onflow/flow-go/fvm/systemcontracts" + "github.com/onflow/flow-go/model/bootstrap" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/module/epochs" ) @@ -65,7 +75,7 @@ type BootstrapProcedure struct { } type BootstrapParams struct { - rootBlock *flow.Header + rootHeader *flow.Header // genesis parameters accountKeys BootstrapAccountKeys @@ -75,6 +85,8 @@ type BootstrapParams struct { minimumStorageReservation cadence.UFix64 storagePerFlow cadence.UFix64 restrictedAccountCreationEnabled cadence.Bool + setupEVMEnabled cadence.Bool + setupVMBridgeEnabled cadence.Bool // versionFreezePeriod is the number of blocks in the future where the version // changes are frozen. The Node version beacon manages the freeze period, @@ -98,7 +110,7 @@ type BootstrapParams struct { // list of initial network participants for whom we will create/stake flow // accounts and retrieve epoch-related resources - identities flow.IdentityList + nodes []bootstrap.NodeInfo } type BootstrapAccountKeys struct { @@ -184,14 +196,14 @@ func WithEpochConfig(epochConfig epochs.EpochConfig) BootstrapProcedureOption { func WithRootBlock(rootBlock *flow.Header) BootstrapProcedureOption { return func(bp *BootstrapProcedure) *BootstrapProcedure { - bp.rootBlock = rootBlock + bp.rootHeader = rootBlock return bp } } -func WithIdentities(identities flow.IdentityList) BootstrapProcedureOption { +func WithNodes(nodes []bootstrap.NodeInfo) BootstrapProcedureOption { return func(bp *BootstrapProcedure) *BootstrapProcedure { - bp.identities = identities + bp.nodes = nodes return bp } } @@ -210,6 +222,22 @@ func WithRestrictedAccountCreationEnabled(enabled cadence.Bool) BootstrapProcedu } } +func WithSetupEVMEnabled(enabled cadence.Bool) BootstrapProcedureOption { + return func(bp *BootstrapProcedure) *BootstrapProcedure { + bp.setupEVMEnabled = enabled + return bp + } +} + +// Option to deploy and setup the Flow VM bridge during bootstrapping +// so that assets can be bridged between Flow-Cadence and Flow-EVM +func WithSetupVMBridgeEnabled(enabled cadence.Bool) BootstrapProcedureOption { + return func(bp *BootstrapProcedure) *BootstrapProcedure { + bp.setupVMBridgeEnabled = enabled + return bp + } +} + func WithRestrictedContractDeployment(restricted *bool) BootstrapProcedureOption { return func(bp *BootstrapProcedure) *BootstrapProcedure { bp.restrictedContractDeployment = restricted @@ -234,6 +262,7 @@ func Bootstrap( transactionFees: BootstrapProcedureFeeParameters{0, 0, 0}, epochConfig: epochs.DefaultEpochConfig(), versionFreezePeriod: DefaultVersionFreezePeriod, + setupEVMEnabled: true, }, } @@ -277,6 +306,8 @@ type bootstrapExecutor struct { txnState storage.TransactionPreparer accountCreator environment.BootstrapAccountCreator + + output ProcedureOutput } func newBootstrapExecutor( @@ -290,6 +321,7 @@ func newBootstrapExecutor( ctx, WithContractDeploymentRestricted(false)), txnState: txnState, + output: ProcedureOutput{}, } } @@ -298,7 +330,7 @@ func (b *bootstrapExecutor) Cleanup() { } func (b *bootstrapExecutor) Output() ProcedureOutput { - return ProcedureOutput{} + return b.output } func (b *bootstrapExecutor) Preprocess() error { @@ -307,7 +339,14 @@ func (b *bootstrapExecutor) Preprocess() error { } func (b *bootstrapExecutor) Execute() error { - b.rootBlock = flow.Genesis(flow.ChainID(b.ctx.Chain.String())).Header + if b.rootHeader == nil { + header, err := b.genesisHeader() + if err != nil { + return fmt.Errorf("failed to create genesis header: %w", err) + } + + b.rootHeader = header + } // initialize the account addressing state b.accountCreator = environment.NewBootstrapAccountCreator( @@ -315,18 +354,70 @@ func (b *bootstrapExecutor) Execute() error { b.ctx.Chain, environment.NewAccounts(b.txnState)) + expectAccounts := func(n uint64) error { + ag := environment.NewAddressGenerator(b.txnState, b.ctx.Chain) + currentAddresses := ag.AddressCount() + if currentAddresses != n { + return fmt.Errorf("expected %d accounts, got %d", n, currentAddresses) + } + return nil + } + service := b.createServiceAccount() - fungibleToken := b.deployFungibleToken() - flowToken := b.deployFlowToken(service, fungibleToken) - storageFees := b.deployStorageFees(service, fungibleToken, flowToken) - feeContract := b.deployFlowFees(service, fungibleToken, flowToken, storageFees) + err := expectAccounts(1) + if err != nil { + return err + } + + env := templates.Environment{ + ServiceAccountAddress: service.String(), + } + + b.deployViewResolver(service, &env) + b.deployBurner(service, &env) + b.deployCrypto(service, &env) + + err = expectAccounts(1) + if err != nil { + return err + } + + fungibleToken := b.deployFungibleToken(&env) + + err = expectAccounts(systemcontracts.FungibleTokenAccountIndex) + if err != nil { + return err + } + + nonFungibleToken := b.deployNonFungibleToken(service, &env) + + b.deployMetadataViews(fungibleToken, nonFungibleToken, &env) + b.deployFungibleTokenSwitchboard(fungibleToken, &env) + + // deploys the NFTStorefrontV2 contract + b.deployNFTStorefrontV2(nonFungibleToken, &env) + + flowToken := b.deployFlowToken(service, &env) + err = expectAccounts(systemcontracts.FlowTokenAccountIndex) + if err != nil { + return err + } + + b.deployStorageFees(service, &env) + feeContract := b.deployFlowFees(service, &env) + err = expectAccounts(systemcontracts.FlowFeesAccountIndex) + if err != nil { + return err + } if b.initialTokenSupply > 0 { b.mintInitialTokens(service, fungibleToken, flowToken, b.initialTokenSupply) } - b.deployServiceAccount(service, fungibleToken, flowToken, feeContract) + b.deployExecutionParameters(fungibleToken, &env) + b.setupExecutionWeights(fungibleToken) + b.deployServiceAccount(service, &env) b.setupParameters( service, @@ -346,39 +437,88 @@ func (b *bootstrapExecutor) Execute() error { b.setContractDeploymentRestrictions(service, b.restrictedContractDeployment) - b.setupExecutionWeights(service) - b.setupStorageForServiceAccounts(service, fungibleToken, flowToken, feeContract) b.createMinter(service, flowToken) - b.deployDKG(service) + b.deployDKG(service, &env) + + b.deployQC(service, &env) - b.deployQC(service) + b.deployIDTableStaking(service, &env) - b.deployIDTableStaking(service, fungibleToken, flowToken, feeContract) + b.deployEpoch(service, &env) - b.deployEpoch(service, fungibleToken, flowToken, feeContract) + b.deployVersionBeacon(service, b.versionFreezePeriod, &env) - b.deployVersionBeacon(service, b.versionFreezePeriod) + b.deployRandomBeaconHistory(service, &env) // deploy staking proxy contract to the service account - b.deployStakingProxyContract(service) + b.deployStakingProxyContract(service, &env) // deploy locked tokens contract to the service account - b.deployLockedTokensContract(service, fungibleToken, flowToken) + b.deployLockedTokensContract(service, &env) // deploy staking collection contract to the service account - b.deployStakingCollection(service, fungibleToken, flowToken) + b.deployStakingCollection(service, &env) + + // deploy flow transaction scheduler contract to the service account + b.deployFlowTransactionScheduler(service, &env) + + // sets up the EVM environment + b.setupEVM(service, nonFungibleToken, fungibleToken, flowToken, &env) + b.setupVMBridge(service, &env) + + b.deployCrossVMMetadataViews(nonFungibleToken, &env) + + err = expectAccounts(systemcontracts.EVMStorageAccountIndex) + if err != nil { + return err + } b.registerNodes(service, fungibleToken, flowToken) // set the list of nodes which are allowed to stake in this network - b.setStakingAllowlist(service, b.identities.NodeIDs()) + b.setStakingAllowlist(service, bootstrap.ToIdentityList(b.nodes).NodeIDs()) + + b.deployMigrationContract(service) return nil } +// genesisHeader creates genesis block header with empty payload. +// +// This function must always return a structurally valid genesis block header. +func (b *bootstrapExecutor) genesisHeader() (*flow.Header, error) { + // create the raw content for the genesis block + payload := flow.NewEmptyPayload() + + // create the headerBody + headerBody, err := flow.NewRootHeaderBody( + flow.UntrustedHeaderBody{ + ChainID: b.ctx.Chain.ChainID(), + ParentID: flow.ZeroID, + Height: 0, + Timestamp: uint64(flow.GenesisTime.UnixMilli()), + View: 0, + }, + ) + if err != nil { + return nil, fmt.Errorf("failed to create root header body: %w", err) + } + + header, err := flow.NewRootHeader( + flow.UntrustedHeader{ + HeaderBody: *headerBody, + PayloadHash: payload.Hash(), + }) + if err != nil { + return nil, fmt.Errorf("failed to create root header: %w", err) + } + + return header, nil +} + func (b *bootstrapExecutor) createAccount(publicKeys []flow.AccountPublicKey) flow.Address { address, err := b.accountCreator.CreateBootstrapAccount(publicKeys) if err != nil { @@ -398,189 +538,311 @@ func (b *bootstrapExecutor) createServiceAccount() flow.Address { return address } -func (b *bootstrapExecutor) deployFungibleToken() flow.Address { +func (b *bootstrapExecutor) deployFungibleToken(env *templates.Environment) flow.Address { fungibleToken := b.createAccount(b.accountKeys.FungibleTokenAccountPublicKeys) - txError, err := b.invokeMetaTransaction( - b.ctx, - Transaction( - blueprints.DeployFungibleTokenContractTransaction(fungibleToken), - 0), - ) + contract := contracts.FungibleToken(*env) + + txBody, err := blueprints.DeployFungibleTokenContractTransaction(fungibleToken, contract).Build() + if err != nil { + panic(fmt.Sprintf("failed to build deploy fungible token transaction: %s", err)) + } + txError, err := b.invokeMetaTransaction(b.ctx, Transaction(txBody, 0)) + env.FungibleTokenAddress = fungibleToken.String() panicOnMetaInvokeErrf("failed to deploy fungible token contract: %s", txError, err) return fungibleToken } -func (b *bootstrapExecutor) deployFlowToken(service, fungibleToken flow.Address) flow.Address { +func (b *bootstrapExecutor) deployNonFungibleToken(deployTo flow.Address, env *templates.Environment) flow.Address { + contract := contracts.NonFungibleToken(*env) + + txBody, err := blueprints.DeployNonFungibleTokenContractTransaction(deployTo, contract).Build() + if err != nil { + panic(fmt.Sprintf("failed to build deploy non-fungible token transaction: %s", err)) + } + txError, err := b.invokeMetaTransaction(b.ctx, Transaction(txBody, 0)) + env.NonFungibleTokenAddress = deployTo.String() + panicOnMetaInvokeErrf("failed to deploy non-fungible token contract: %s", txError, err) + return deployTo +} + +func (b *bootstrapExecutor) deployViewResolver(deployTo flow.Address, env *templates.Environment) { + txBody, err := blueprints.DeployViewResolverContractTransaction(deployTo).Build() + if err != nil { + panic(fmt.Sprintf("failed to build deploy view resolver transaction: %s", err)) + } + txError, err := b.invokeMetaTransaction(b.ctx, Transaction(txBody, 0)) + env.ViewResolverAddress = deployTo.String() + panicOnMetaInvokeErrf("failed to deploy view resolver contract: %s", txError, err) +} + +func (b *bootstrapExecutor) deployBurner(deployTo flow.Address, env *templates.Environment) { + txBody, err := blueprints.DeployBurnerContractTransaction(deployTo).Build() + if err != nil { + panic(fmt.Sprintf("failed to build deploy burner contract transaction: %s", err)) + } + txError, err := b.invokeMetaTransaction(b.ctx, Transaction(txBody, 0)) + env.BurnerAddress = deployTo.String() + panicOnMetaInvokeErrf("failed to deploy burner contract: %s", txError, err) +} + +func (b *bootstrapExecutor) deployCrypto(deployTo flow.Address, env *templates.Environment) { + contract := contracts.Crypto() + + txBody, err := blueprints.DeployContractTransaction( + deployTo, + contract, + "Crypto").Build() + if err != nil { + panic(fmt.Sprintf("failed to build deploy crypto contract transaction: %s", err)) + } + txError, err := b.invokeMetaTransaction(b.ctx, Transaction(txBody, 0)) + env.CryptoAddress = deployTo.String() + panicOnMetaInvokeErrf("failed to deploy crypto contract: %s", txError, err) +} + +func (b *bootstrapExecutor) deployMetadataViews(fungibleToken, nonFungibleToken flow.Address, env *templates.Environment) { + mvContract := contracts.MetadataViews(*env) + + txBody, err := blueprints.DeployMetadataViewsContractTransaction(nonFungibleToken, mvContract).Build() + if err != nil { + panic(fmt.Sprintf("failed to build deploy metadata views transaction: %s", err)) + } + txError, err := b.invokeMetaTransaction(b.ctx, Transaction(txBody, 0)) + env.MetadataViewsAddress = nonFungibleToken.String() + panicOnMetaInvokeErrf("failed to deploy metadata views contract: %s", txError, err) + + ftmvContract := contracts.FungibleTokenMetadataViews(*env) + + txBody, err = blueprints.DeployFungibleTokenMetadataViewsContractTransaction(fungibleToken, ftmvContract).Build() + if err != nil { + panic(fmt.Sprintf("failed to build deploy fungible token metadata views transaction: %s", err)) + } + txError, err = b.invokeMetaTransaction(b.ctx, Transaction(txBody, 0)) + env.FungibleTokenMetadataViewsAddress = fungibleToken.String() + panicOnMetaInvokeErrf("failed to deploy fungible token metadata views contract: %s", txError, err) +} + +func (b *bootstrapExecutor) deployCrossVMMetadataViews(nonFungibleToken flow.Address, env *templates.Environment) { + if !bool(b.setupEVMEnabled) || + !bool(b.setupVMBridgeEnabled) || + !b.ctx.Chain.ChainID().Transient() { + return + } + + crossVMMVContract := contracts.CrossVMMetadataViews(*env) + + txBody, err := blueprints.DeployCrossVMMetadataViewsContractTransaction(nonFungibleToken, crossVMMVContract).Build() + if err != nil { + panic(fmt.Sprintf("failed to build deploy cross VM metadata views transaction: %s", err)) + } + txError, err := b.invokeMetaTransaction(b.ctx, Transaction(txBody, 0)) + env.CrossVMMetadataViewsAddress = nonFungibleToken.String() + panicOnMetaInvokeErrf("failed to deploy cross VM metadata views contract: %s", txError, err) +} + +func (b *bootstrapExecutor) deployFungibleTokenSwitchboard(deployTo flow.Address, env *templates.Environment) { + contract := contracts.FungibleTokenSwitchboard(*env) + txBody, err := blueprints.DeployFungibleTokenSwitchboardContractTransaction(deployTo, contract).Build() + if err != nil { + panic(fmt.Sprintf("failed to build deploy fungible token switchboard transaction: %s", err)) + } + txError, err := b.invokeMetaTransaction(b.ctx, Transaction(txBody, 0)) + env.FungibleTokenSwitchboardAddress = deployTo.String() + panicOnMetaInvokeErrf("failed to deploy fungible token switchboard contract: %s", txError, err) +} + +func (b *bootstrapExecutor) deployFlowToken(service flow.Address, env *templates.Environment) flow.Address { flowToken := b.createAccount(b.accountKeys.FlowTokenAccountPublicKeys) - txError, err := b.invokeMetaTransaction( - b.ctx, - Transaction( - blueprints.DeployFlowTokenContractTransaction( - service, - fungibleToken, - flowToken), - 0), - ) + contract := contracts.FlowToken(*env) + txBody, err := blueprints.DeployFlowTokenContractTransaction(service, flowToken, contract) + if err != nil { + panic(fmt.Sprintf("failed to build deploy Flow token transaction: %s", err)) + } + txError, err := b.invokeMetaTransaction(b.ctx, Transaction(txBody, 0)) + env.FlowTokenAddress = flowToken.String() panicOnMetaInvokeErrf("failed to deploy Flow token contract: %s", txError, err) return flowToken } -func (b *bootstrapExecutor) deployFlowFees(service, fungibleToken, flowToken, storageFees flow.Address) flow.Address { +func (b *bootstrapExecutor) deployFlowFees(service flow.Address, env *templates.Environment) flow.Address { flowFees := b.createAccount(b.accountKeys.FlowFeesAccountPublicKeys) - txError, err := b.invokeMetaTransaction( - b.ctx, - Transaction( - blueprints.DeployTxFeesContractTransaction( - service, - fungibleToken, - flowToken, - storageFees, - flowFees, - ), - 0), + contract := contracts.FlowFees( + *env, ) + + txBody, err := blueprints.DeployTxFeesContractTransaction(flowFees, service, contract).Build() + if err != nil { + panic(fmt.Sprintf("failed to build deploy metadata views transaction: %s", err)) + } + txError, err := b.invokeMetaTransaction(b.ctx, Transaction(txBody, 0)) + env.FlowFeesAddress = flowFees.String() panicOnMetaInvokeErrf("failed to deploy fees contract: %s", txError, err) return flowFees } -func (b *bootstrapExecutor) deployStorageFees(service, fungibleToken, flowToken flow.Address) flow.Address { - contract := contracts.FlowStorageFees( - fungibleToken.HexWithPrefix(), - flowToken.HexWithPrefix(), - ) +func (b *bootstrapExecutor) deployStorageFees(deployTo flow.Address, env *templates.Environment) { + contract := contracts.FlowStorageFees(*env) + txBody, err := blueprints.DeployStorageFeesContractTransaction(deployTo, contract).Build() + if err != nil { + panic(fmt.Sprintf("failed to build deploy metadata views transaction: %s", err)) + } // deploy storage fees contract on the service account - txError, err := b.invokeMetaTransaction( - b.ctx, - Transaction( - blueprints.DeployStorageFeesContractTransaction( - service, - contract), - 0), - ) + txError, err := b.invokeMetaTransaction(b.ctx, Transaction(txBody, 0)) + env.StorageFeesAddress = deployTo.String() panicOnMetaInvokeErrf("failed to deploy storage fees contract: %s", txError, err) - return service } func (b *bootstrapExecutor) createMinter(service, flowToken flow.Address) { - txError, err := b.invokeMetaTransaction( - b.ctx, - Transaction( - blueprints.CreateFlowTokenMinterTransaction( - service, - flowToken), - 0), - ) + txBody, err := blueprints.CreateFlowTokenMinterTransaction(service, flowToken) + if err != nil { + panic(fmt.Sprintf("failed to build create flow token minter transaction: %s", err)) + } + txError, err := b.invokeMetaTransaction(b.ctx, Transaction(txBody, 0)) panicOnMetaInvokeErrf("failed to create flow token minter: %s", txError, err) } -func (b *bootstrapExecutor) deployDKG(service flow.Address) { +func (b *bootstrapExecutor) deployDKG(deployTo flow.Address, env *templates.Environment) { contract := contracts.FlowDKG() - txError, err := b.invokeMetaTransaction( - b.ctx, - Transaction( - blueprints.DeployContractTransaction(service, contract, "FlowDKG"), - 0, - ), - ) + txBody, err := blueprints.DeployContractTransaction(deployTo, contract, "FlowDKG").Build() + if err != nil { + panic(fmt.Sprintf("failed to build deploy DKG contract transaction: %s", err)) + } + txError, err := b.invokeMetaTransaction(b.ctx, Transaction(txBody, 0)) + env.DkgAddress = deployTo.String() panicOnMetaInvokeErrf("failed to deploy DKG contract: %s", txError, err) } -func (b *bootstrapExecutor) deployQC(service flow.Address) { +func (b *bootstrapExecutor) deployQC(deployTo flow.Address, env *templates.Environment) { contract := contracts.FlowQC() - txError, err := b.invokeMetaTransaction( - b.ctx, - Transaction( - blueprints.DeployContractTransaction(service, contract, "FlowClusterQC"), - 0, - ), - ) + txBody, err := blueprints.DeployContractTransaction(deployTo, contract, "FlowClusterQC").Build() + if err != nil { + panic(fmt.Sprintf("failed to build deploy QC transaction: %s", err)) + } + txError, err := b.invokeMetaTransaction(b.ctx, Transaction(txBody, 0)) + env.QuorumCertificateAddress = deployTo.String() panicOnMetaInvokeErrf("failed to deploy QC contract: %s", txError, err) } -func (b *bootstrapExecutor) deployIDTableStaking(service, fungibleToken, flowToken, flowFees flow.Address) { - +func (b *bootstrapExecutor) deployIDTableStaking(deployTo flow.Address, env *templates.Environment) { contract := contracts.FlowIDTableStaking( - fungibleToken.HexWithPrefix(), - flowToken.HexWithPrefix(), - flowFees.HexWithPrefix(), - true) - - txError, err := b.invokeMetaTransaction( - b.ctx, - Transaction( - blueprints.DeployIDTableStakingTransaction(service, - contract, - b.epochConfig.EpochTokenPayout, - b.epochConfig.RewardCut), - 0, - ), + *env, ) + txBody, err := blueprints.DeployIDTableStakingTransaction(deployTo, + contract, + b.epochConfig.EpochTokenPayout, + b.epochConfig.RewardCut) + if err != nil { + panic(fmt.Sprintf("failed to build deploy IDTableStaking transaction: %s", err)) + } + + txError, err := b.invokeMetaTransaction(b.ctx, Transaction(txBody, 0)) + env.IDTableAddress = deployTo.String() panicOnMetaInvokeErrf("failed to deploy IDTableStaking contract: %s", txError, err) } -func (b *bootstrapExecutor) deployEpoch(service, fungibleToken, flowToken, flowFees flow.Address) { - - contract := contracts.FlowEpoch( - fungibleToken.HexWithPrefix(), - flowToken.HexWithPrefix(), - service.HexWithPrefix(), - service.HexWithPrefix(), - service.HexWithPrefix(), - flowFees.HexWithPrefix(), - ) - +func (b *bootstrapExecutor) deployEpoch(deployTo flow.Address, env *templates.Environment) { + contract := contracts.FlowEpoch(*env) context := NewContextFromParent(b.ctx, - WithBlockHeader(b.rootBlock), + WithBlockHeader(b.rootHeader), WithBlocks(&environment.NoopBlockFinder{}), ) - txError, err := b.invokeMetaTransaction( - context, - Transaction( - blueprints.DeployEpochTransaction(service, contract, b.epochConfig), - 0, - ), - ) + txBody, err := blueprints.DeployEpochTransaction(deployTo, contract, b.epochConfig) + if err != nil { + panic(fmt.Sprintf("failed to build deploy Epoch transaction: %s", err)) + } + + txError, err := b.invokeMetaTransaction(context, Transaction(txBody, 0)) + env.EpochAddress = deployTo.String() panicOnMetaInvokeErrf("failed to deploy Epoch contract: %s", txError, err) } -func (b *bootstrapExecutor) deployServiceAccount(service, fungibleToken, flowToken, feeContract flow.Address) { +func (b *bootstrapExecutor) deployExecutionParameters(deployTo flow.Address, env *templates.Environment) { + contract := contracts.FlowExecutionParameters(*env) + txBody, err := blueprints.DeployContractTransaction(deployTo, contract, "FlowExecutionParameters").Build() + if err != nil { + panic(fmt.Sprintf("failed to build deploy FlowExecutionParameters transaction: %s", err)) + } + + txError, err := b.invokeMetaTransaction(b.ctx, Transaction(txBody, 0)) + env.FlowExecutionParametersAddress = deployTo.String() + panicOnMetaInvokeErrf("failed to deploy FlowExecutionParameters contract: %s", txError, err) +} + +func (b *bootstrapExecutor) deployServiceAccount(deployTo flow.Address, env *templates.Environment) { contract := contracts.FlowServiceAccount( - fungibleToken.HexWithPrefix(), - flowToken.HexWithPrefix(), - feeContract.HexWithPrefix(), - service.HexWithPrefix(), + *env, ) + txBody, err := blueprints.DeployContractTransaction( + deployTo, + contract, + "FlowServiceAccount").Build() + if err != nil { + panic(fmt.Sprintf("failed to build deploy service account transaction: %s", err)) + } + txError, err := b.invokeMetaTransaction(b.ctx, Transaction(txBody, 0)) + panicOnMetaInvokeErrf("failed to deploy service account contract: %s", txError, err) +} + +func (b *bootstrapExecutor) deployNFTStorefrontV2(deployTo flow.Address, env *templates.Environment) { + contract := storefront.NFTStorefrontV2( + env.FungibleTokenAddress, + env.NonFungibleTokenAddress) + txBody, err := blueprints.DeployContractTransaction(deployTo, contract, "NFTStorefrontV2").Build() + if err != nil { + panic(fmt.Sprintf("failed to build deploy NFTStorefrontV2 transaction: %s", err)) + } + + txError, err := b.invokeMetaTransaction(b.ctx, Transaction(txBody, 0)) + panicOnMetaInvokeErrf("failed to deploy NFTStorefrontV2 contract: %s", txError, err) +} + +func (b *bootstrapExecutor) deployFlowTransactionScheduler(deployTo flow.Address, env *templates.Environment) { + contract := contracts.FlowTransactionScheduler(*env) + txBody, err := blueprints.DeployContractTransaction(deployTo, contract, "FlowTransactionScheduler").Build() + if err != nil { + panic(fmt.Sprintf("failed to create FlowTransactionScheduler deploy transaction: %s", err)) + } txError, err := b.invokeMetaTransaction( b.ctx, - Transaction( - blueprints.DeployContractTransaction( - service, - contract, - "FlowServiceAccount"), - 0), + Transaction(txBody, 0), ) - panicOnMetaInvokeErrf("failed to deploy service account contract: %s", txError, err) + + env.FlowTransactionSchedulerAddress = deployTo.String() + panicOnMetaInvokeErrf("failed to deploy FlowTransactionScheduler contract: %s", txError, err) + + contract = contracts.FlowTransactionSchedulerUtils(*env) + txBody, err = blueprints.DeployContractTransaction(deployTo, contract, "FlowTransactionSchedulerUtils").Build() + if err != nil { + panic(fmt.Sprintf("failed to create FlowTransactionSchedulerUtils deploy transaction: %s", err)) + } + txError, err = b.invokeMetaTransaction( + b.ctx, + Transaction(txBody, 0), + ) + + env.FlowTransactionSchedulerUtilsAddress = deployTo.String() + panicOnMetaInvokeErrf("failed to deploy FlowTransactionSchedulerUtils contract: %s", txError, err) } func (b *bootstrapExecutor) mintInitialTokens( service, fungibleToken, flowToken flow.Address, initialSupply cadence.UFix64, ) { - txError, err := b.invokeMetaTransaction( - b.ctx, - Transaction( - blueprints.MintFlowTokenTransaction( - fungibleToken, - flowToken, - service, - initialSupply), - 0), - ) + txBody, err := blueprints.MintFlowTokenTransaction( + fungibleToken, + flowToken, + service, + initialSupply) + if err != nil { + panic(fmt.Sprintf("failed to build mint initial token supplytransaction: %s", err)) + } + + txError, err := b.invokeMetaTransaction(b.ctx, Transaction(txBody, 0)) panicOnMetaInvokeErrf("failed to mint initial token supply: %s", txError, err) } @@ -591,18 +853,18 @@ func (b *bootstrapExecutor) setupParameters( storagePerFlow cadence.UFix64, restrictedAccountCreationEnabled cadence.Bool, ) { - txError, err := b.invokeMetaTransaction( - b.ctx, - Transaction( - blueprints.SetupParametersTransaction( - service, - addressCreationFee, - minimumStorageReservation, - storagePerFlow, - restrictedAccountCreationEnabled, - ), - 0), + txBody, err := blueprints.SetupParametersTransaction( + service, + addressCreationFee, + minimumStorageReservation, + storagePerFlow, + restrictedAccountCreationEnabled, ) + if err != nil { + panic(fmt.Sprintf("failed to build setup parameters transaction: %s", err)) + } + + txError, err := b.invokeMetaTransaction(b.ctx, Transaction(txBody, 0)) panicOnMetaInvokeErrf("failed to setup parameters: %s", txError, err) } @@ -610,36 +872,36 @@ func (b *bootstrapExecutor) setupFees( service, flowFees flow.Address, surgeFactor, inclusionEffortCost, executionEffortCost cadence.UFix64, ) { - txError, err := b.invokeMetaTransaction( - b.ctx, - Transaction( - blueprints.SetupFeesTransaction( - service, - flowFees, - surgeFactor, - inclusionEffortCost, - executionEffortCost, - ), - 0), + txBody, err := blueprints.SetupFeesTransaction( + service, + flowFees, + surgeFactor, + inclusionEffortCost, + executionEffortCost, ) + if err != nil { + panic(fmt.Sprintf("failed to build setup fees transaction: %s", err)) + } + + txError, err := b.invokeMetaTransaction(b.ctx, Transaction(txBody, 0)) panicOnMetaInvokeErrf("failed to setup fees: %s", txError, err) } -func (b *bootstrapExecutor) setupExecutionWeights(service flow.Address) { +func (b *bootstrapExecutor) setupExecutionWeights(parametersAccount flow.Address) { // if executionEffortWeights were not set skip this part and just use the defaults. if b.executionEffortWeights != nil { - b.setupExecutionEffortWeights(service) + b.setupExecutionEffortWeights(parametersAccount) } // if executionMemoryWeights were not set skip this part and just use the defaults. if b.executionMemoryWeights != nil { - b.setupExecutionMemoryWeights(service) + b.setupExecutionMemoryWeights(parametersAccount) } if b.executionMemoryLimit != 0 { - b.setExecutionMemoryLimitTransaction(service) + b.setExecutionMemoryLimitTransaction(parametersAccount) } } -func (b *bootstrapExecutor) setupExecutionEffortWeights(service flow.Address) { +func (b *bootstrapExecutor) setupExecutionEffortWeights(parametersAccount flow.Address) { weights := b.executionEffortWeights uintWeights := make(map[uint]uint64, len(weights)) @@ -647,7 +909,7 @@ func (b *bootstrapExecutor) setupExecutionEffortWeights(service flow.Address) { uintWeights[uint(i)] = weight } - tb, err := blueprints.SetExecutionEffortWeightsTransaction(service, uintWeights) + tb, err := blueprints.SetExecutionEffortWeightsTransaction(parametersAccount, uintWeights) if err != nil { panic(fmt.Sprintf("failed to setup execution effort weights %s", err.Error())) } @@ -661,7 +923,7 @@ func (b *bootstrapExecutor) setupExecutionEffortWeights(service flow.Address) { panicOnMetaInvokeErrf("failed to setup execution effort weights: %s", txError, err) } -func (b *bootstrapExecutor) setupExecutionMemoryWeights(service flow.Address) { +func (b *bootstrapExecutor) setupExecutionMemoryWeights(parametersAccount flow.Address) { weights := b.executionMemoryWeights uintWeights := make(map[uint]uint64, len(weights)) @@ -669,49 +931,55 @@ func (b *bootstrapExecutor) setupExecutionMemoryWeights(service flow.Address) { uintWeights[uint(i)] = weight } - tb, err := blueprints.SetExecutionMemoryWeightsTransaction(service, uintWeights) + tb, err := blueprints.SetExecutionMemoryWeightsTransaction(parametersAccount, uintWeights) if err != nil { panic(fmt.Sprintf("failed to setup execution memory weights %s", err.Error())) } - txError, err := b.invokeMetaTransaction( - b.ctx, - Transaction( - tb, - 0), - ) + txError, err := b.invokeMetaTransaction(b.ctx, Transaction(tb, 0)) panicOnMetaInvokeErrf("failed to setup execution memory weights: %s", txError, err) } -func (b *bootstrapExecutor) setExecutionMemoryLimitTransaction(service flow.Address) { +func (b *bootstrapExecutor) setExecutionMemoryLimitTransaction(parametersAccount flow.Address) { - tb, err := blueprints.SetExecutionMemoryLimitTransaction(service, b.executionMemoryLimit) + tb, err := blueprints.SetExecutionMemoryLimitTransaction(parametersAccount, b.executionMemoryLimit) if err != nil { panic(fmt.Sprintf("failed to setup execution memory limit %s", err.Error())) } - txError, err := b.invokeMetaTransaction( - b.ctx, - Transaction( - tb, - 0), - ) + txError, err := b.invokeMetaTransaction(b.ctx, Transaction(tb, 0)) panicOnMetaInvokeErrf("failed to setup execution memory limit: %s", txError, err) } func (b *bootstrapExecutor) setupStorageForServiceAccounts( service, fungibleToken, flowToken, feeContract flow.Address, ) { - txError, err := b.invokeMetaTransaction( - b.ctx, - Transaction( - blueprints.SetupStorageForServiceAccountsTransaction( - service, - fungibleToken, - flowToken, - feeContract), - 0), - ) + txBody, err := blueprints.SetupStorageForServiceAccountsTransaction( + service, + fungibleToken, + flowToken, + feeContract) + if err != nil { + panic(fmt.Sprintf("failed to build setup storage for service accounts transaction: %s", err)) + } + + txError, err := b.invokeMetaTransaction(b.ctx, Transaction(txBody, 0)) + panicOnMetaInvokeErrf("failed to setup storage for service accounts: %s", txError, err) +} + +func (b *bootstrapExecutor) setupStorageForAccount( + account, service, fungibleToken, flowToken flow.Address, +) { + txBody, err := blueprints.SetupStorageForAccountTransaction( + account, + service, + fungibleToken, + flowToken) + if err != nil { + panic(fmt.Sprintf("failed to build setup storage for service accounts transaction: %s", err)) + } + + txError, err := b.invokeMetaTransaction(b.ctx, Transaction(txBody, 0)) panicOnMetaInvokeErrf("failed to setup storage for service accounts: %s", txError, err) } @@ -719,46 +987,393 @@ func (b *bootstrapExecutor) setStakingAllowlist( service flow.Address, allowedIDs []flow.Identifier, ) { + txBody, err := blueprints.SetStakingAllowlistTransaction(service, allowedIDs) + if err != nil { + panic(fmt.Sprintf("failed to build set staking allow-list transaction: %s", err)) + } - txError, err := b.invokeMetaTransaction( - b.ctx, - Transaction( - blueprints.SetStakingAllowlistTransaction( - service, - allowedIDs, - ), - 0), - ) + txError, err := b.invokeMetaTransaction(b.ctx, Transaction(txBody, 0)) panicOnMetaInvokeErrf("failed to set staking allow-list: %s", txError, err) } +func (b *bootstrapExecutor) setupEVM(serviceAddress, nonFungibleTokenAddress, fungibleTokenAddress, flowTokenAddress flow.Address, env *templates.Environment) { + if b.setupEVMEnabled { + // account for storage + // we dont need to deploy anything to this account, but it needs to exist + // so that we can store the EVM state on it + evmAcc := b.createAccount(nil) + b.setupStorageForAccount(evmAcc, serviceAddress, fungibleTokenAddress, flowTokenAddress) + + // deploy the EVM contract to the service account + txBody, err := blueprints.DeployContractTransaction( + serviceAddress, + stdlib.ContractCode(nonFungibleTokenAddress, fungibleTokenAddress, flowTokenAddress), + stdlib.ContractName, + ).Build() + if err != nil { + panic(fmt.Sprintf("failed to build EVM transaction %s", err.Error())) + } + // WithEVMEnabled should only be used after we create an account for storage + txError, err := b.invokeMetaTransaction( + NewContextFromParent(b.ctx, WithEVMEnabled(true)), + Transaction(txBody, 0), + ) + panicOnMetaInvokeErrf("failed to deploy EVM contract: %s", txError, err) + + env.EVMAddress = env.ServiceAccountAddress + } +} + +type stubEntropyProvider struct{} + +func (stubEntropyProvider) RandomSource() ([]byte, error) { + return []byte{0}, nil +} + +func (b *bootstrapExecutor) setupVMBridge(serviceAddress flow.Address, env *templates.Environment) { + // only setup VM bridge for transient networks + // this is because the evm storage account for testnet and mainnet do not exist yet after boostrapping + if !bool(b.setupEVMEnabled) || + !bool(b.setupVMBridgeEnabled) || + !b.ctx.Chain.ChainID().Transient() { + return + } + + bridgeEnv := bridge.Environment{ + CrossVMNFTAddress: env.ServiceAccountAddress, + CrossVMTokenAddress: env.ServiceAccountAddress, + FlowEVMBridgeHandlerInterfacesAddress: env.ServiceAccountAddress, + IBridgePermissionsAddress: env.ServiceAccountAddress, + ICrossVMAddress: env.ServiceAccountAddress, + ICrossVMAssetAddress: env.ServiceAccountAddress, + IEVMBridgeNFTMinterAddress: env.ServiceAccountAddress, + IEVMBridgeTokenMinterAddress: env.ServiceAccountAddress, + IFlowEVMNFTBridgeAddress: env.ServiceAccountAddress, + IFlowEVMTokenBridgeAddress: env.ServiceAccountAddress, + FlowEVMBridgeAddress: env.ServiceAccountAddress, + FlowEVMBridgeAccessorAddress: env.ServiceAccountAddress, + FlowEVMBridgeConfigAddress: env.ServiceAccountAddress, + FlowEVMBridgeHandlersAddress: env.ServiceAccountAddress, + FlowEVMBridgeNFTEscrowAddress: env.ServiceAccountAddress, + FlowEVMBridgeResolverAddress: env.ServiceAccountAddress, + FlowEVMBridgeTemplatesAddress: env.ServiceAccountAddress, + FlowEVMBridgeTokenEscrowAddress: env.ServiceAccountAddress, + FlowEVMBridgeUtilsAddress: env.ServiceAccountAddress, + ArrayUtilsAddress: env.ServiceAccountAddress, + ScopedFTProvidersAddress: env.ServiceAccountAddress, + SerializeAddress: env.ServiceAccountAddress, + SerializeMetadataAddress: env.ServiceAccountAddress, + StringUtilsAddress: env.ServiceAccountAddress, + } + + ctx := NewContextFromParent(b.ctx, + WithBlockHeader(b.rootHeader), + WithEntropyProvider(stubEntropyProvider{}), + WithEVMEnabled(true), + ) + + txIndex := uint32(0) + events := flow.EventsList{} + + run := func(tx *flow.TransactionBody, failMSG string) ProcedureOutput { + txOutput, err := b.runMetaTransaction(ctx, Transaction(tx, txIndex)) + + if err != nil { + panic(fmt.Sprintf(failMSG, err.Error())) + } + if txOutput.Err != nil { + panic(fmt.Sprintf(failMSG, txOutput.Err.Error())) + } + txIndex += 1 + events = append(events, txOutput.Events...) + + return txOutput + } + + // Create a COA in the bridge account + txBody, err := blueprints.CreateCOATransaction(*env, bridgeEnv, serviceAddress).Build() + if err != nil { + panic(fmt.Sprintf("failed to build COA contract transaction: %s", err)) + } + _ = run(txBody, "failed to create COA in Service Account: %s") + + // Arbitrary high gas limit that can be used for all the + // EVM transactions to ensure none of them run out of gas + gasLimit := 15000000 + deploymentValue := 0.0 + + // Retrieve the factory bytecode from the JSON args + factoryBytecode := bridge.GetBytecodeFromArgsJSON("cadence/args/deploy-factory-args.json") + + // deploy the Solidity Factory contract to the service account's COA + txBody, err = blueprints.DeployEVMContractTransaction(*env, bridgeEnv, serviceAddress, factoryBytecode, gasLimit, deploymentValue) + if err != nil { + panic(fmt.Sprintf("failed to build EVM contract transaction: %s", err)) + } + txOutput := run(txBody, "failed to deploy the Factory in the Service Account COA: %s") + + factoryAddress, err := getContractAddressFromEVMEvent(txOutput) + if err != nil { + panic(fmt.Sprintf("failed to deploy Solidity Factory contract: %s", err)) + } + + // Retrieve the registry bytecode from the JSON args + registryBytecode := bridge.GetBytecodeFromArgsJSON("cadence/args/deploy-deployment-registry-args.json") + + // deploy the Solidity Registry contract to the service account's COA + txBody, err = blueprints.DeployEVMContractTransaction(*env, bridgeEnv, serviceAddress, registryBytecode, gasLimit, deploymentValue) + if err != nil { + panic(fmt.Sprintf("failed to build Solidity Registry contract to the service account's COA transaction: %s", err)) + } + txOutput = run(txBody, "failed to deploy the Registry in the Service Account COA: %s") + + registryAddress, err := getContractAddressFromEVMEvent(txOutput) + if err != nil { + panic(fmt.Sprintf("failed to deploy Solidity Registry contract: %s", err)) + } + + // Retrieve the erc20Deployer bytecode from the JSON args + erc20DeployerBytecode := bridge.GetBytecodeFromArgsJSON("cadence/args/deploy-erc20-deployer-args.json") + + // deploy the Solidity ERC20 Deployer contract to the service account's COA + txBody, err = blueprints.DeployEVMContractTransaction(*env, bridgeEnv, serviceAddress, erc20DeployerBytecode, gasLimit, deploymentValue) + if err != nil { + panic(fmt.Sprintf("failed to build Solidity ERC20 Deployer contract to the service account's COA transaction: %s", err)) + } + txOutput = run(txBody, "failed to deploy the ERC20 Deployer in the Service Account COA: %s") + + erc20DeployerAddress, err := getContractAddressFromEVMEvent(txOutput) + if err != nil { + panic(fmt.Sprintf("failed to deploy ERC20 deployer contract: %s", err)) + } + + erc721DeployerBytecode := bridge.GetBytecodeFromArgsJSON("cadence/args/deploy-erc721-deployer-args.json") + + // deploy the ERC721 deployer contract to the service account's COA + txBody, err = blueprints.DeployEVMContractTransaction(*env, bridgeEnv, serviceAddress, erc721DeployerBytecode, gasLimit, deploymentValue) + if err != nil { + panic(fmt.Sprintf("failed to build ERC721 deployer contract to the service account's COA transaction: %s", err)) + } + txOutput = run(txBody, "failed to deploy the ERC721 Deployer in the Service Account COA: %s") + + erc721DeployerAddress, err := getContractAddressFromEVMEvent(txOutput) + if err != nil { + panic(fmt.Sprintf("failed to deploy ERC 721 deployer contract: %s", err)) + } + + for _, path := range blueprints.BridgeContracts { + + contract, _ := bridge.GetCadenceContractCode(path, bridgeEnv, *env) + + slashSplit := strings.Split(path, "/") + nameWithCDC := slashSplit[len(slashSplit)-1] + name := nameWithCDC[:len(nameWithCDC)-4] + + if name == "FlowEVMBridgeUtils" { + txBody, err := blueprints.DeployFlowEVMBridgeUtilsContractTransaction(*env, &bridgeEnv, serviceAddress, contract, name, factoryAddress) + if err != nil { + panic(fmt.Sprintf("failed to build FlowEVMBridgeUtils transaction: %s", err)) + } + _ = run(txBody, "failed to deploy FlowEVMBridgeUtils contract: %s") + } else { + txBody, err := blueprints.DeployContractTransaction(serviceAddress, contract, name).Build() + if err != nil { + panic(fmt.Sprintf("failed to build "+name+" contract transaction: %s", err)) + } + _ = run(txBody, "failed to deploy "+name+" contract: %s") + } + } + + // Pause the bridge for setup + txBody, err = blueprints.PauseBridgeTransaction(*env, bridgeEnv, serviceAddress, true) + if err != nil { + panic(fmt.Sprintf("failed to build pause the bridge contracts transaction: %s", err)) + } + _ = run(txBody, "failed to pause the bridge contracts: %s") + + // Set the factory as registrar in the registry + txBody, err = blueprints.SetRegistrarTransaction(*env, bridgeEnv, serviceAddress, registryAddress) + if err != nil { + panic(fmt.Sprintf("failed to build set the factory as register transaction: %s", err)) + } + _ = run(txBody, "failed to set the factory as registrar: %s") + + // Add the registry to the factory + txBody, err = blueprints.SetDeploymentRegistryTransaction(*env, bridgeEnv, serviceAddress, registryAddress) + if err != nil { + panic(fmt.Sprintf("failed to build add the registry to the factory transaction: %s", err)) + } + _ = run(txBody, "failed to add the registry to the factory: %s") + + // Set the factory as delegated deployer in the ERC20 deployer + txBody, err = blueprints.SetDelegatedDeployerTransaction(*env, bridgeEnv, serviceAddress, erc20DeployerAddress) + if err != nil { + panic(fmt.Sprintf("failed to build set the erc20 deployer as delegated deployer transaction: %s", err)) + } + _ = run(txBody, "failed to set the erc20 deployer as delegated deployer: %s") + + // Set the factory as delegated deployer in the ERC721 deployer + txBody, err = blueprints.SetDelegatedDeployerTransaction(*env, bridgeEnv, serviceAddress, erc721DeployerAddress) + if err != nil { + panic(fmt.Sprintf("failed to build set the erc721 deployer as delegated deployer transaction: %s", err)) + } + _ = run(txBody, "failed to set the erc721 deployer as delegated deployer: %s") + + // Add the ERC20 Deployer as a deployer in the factory + txBody, err = blueprints.AddDeployerTransaction(*env, bridgeEnv, serviceAddress, "ERC20", erc20DeployerAddress) + if err != nil { + panic(fmt.Sprintf("failed to build add the erc20 deployer in the factory transaction: %s", err)) + } + _ = run(txBody, "failed to add the erc20 deployer in the factory: %s") + + // Add the ERC721 Deployer as a deployer in the factory + txBody, err = blueprints.AddDeployerTransaction(*env, bridgeEnv, serviceAddress, "ERC721", erc721DeployerAddress) + if err != nil { + panic(fmt.Sprintf("failed to build add the erc721 in the factory transaction: %s", err)) + } + _ = run(txBody, "failed to add the erc721 deployer in the factory: %s") + + // /* --- EVM Contract Integration --- */ + + // Deploy FlowEVMBridgeAccessor, providing EVM contract host (network service account) as argument + txBody, err = blueprints.DeployFlowEVMBridgeAccessorContractTransaction(*env, bridgeEnv, serviceAddress) + if err != nil { + panic(fmt.Sprintf("failed to build deploy FlowEVMBridgeAccessor transaction: %s", err)) + } + _ = run(txBody, "failed to deploy FlowEVMBridgeAccessor contract: %s") + + // Integrate the EVM contract with the BridgeAccessor + txBody, err = blueprints.IntegrateEVMWithBridgeAccessorTransaction(*env, bridgeEnv, serviceAddress) + if err != nil { + panic(fmt.Sprintf("failed to build integrate the EVM contract with the BridgeAccessor transaction: %s", err)) + } + _ = run(txBody, "failed to integrate the EVM contract with the BridgeAccessor: %s") + + // Set the bridge onboarding fees + txBody, err = blueprints.UpdateOnboardFeeTransaction(*env, bridgeEnv, serviceAddress, 1.0) + if err != nil { + panic(fmt.Sprintf("failed to build update the bridge onboarding fees transaction: %s", err)) + } + _ = run(txBody, "failed to update the bridge onboarding fees: %s") + + // Set the bridge base fee + txBody, err = blueprints.UpdateBaseFeeTransaction(*env, bridgeEnv, serviceAddress, 0.001) + if err != nil { + panic(fmt.Sprintf("failed to build update the bridge base fees transaction: %s", err)) + } + _ = run(txBody, "failed to update the bridge base fees: %s") + + tokenChunks := bridge.GetCadenceTokenChunkedJSONArguments(false) + nftChunks := bridge.GetCadenceTokenChunkedJSONArguments(true) + + // Add the FT Template Cadence Code Chunks + txBody, err = blueprints.UpsertContractCodeChunksTransaction(*env, bridgeEnv, serviceAddress, "bridgedToken", tokenChunks) + if err != nil { + panic(fmt.Sprintf("failed to build add the FT template code chunks transaction: %s", err)) + } + _ = run(txBody, "failed to add the FT template code chunks: %s") + + // Add the NFT Template Cadence Code Chunks + txBody, err = blueprints.UpsertContractCodeChunksTransaction(*env, bridgeEnv, serviceAddress, "bridgedNFT", nftChunks) + if err != nil { + panic(fmt.Sprintf("failed to build add the NFT template code chunks transaction: %s", err)) + } + _ = run(txBody, "failed to add the NFT template code chunks: %s") + + // Retrieve the WFLOW bytecode from the JSON args + wflowBytecode, err := bridge.GetSolidityContractCode("WFLOW") + if err != nil { + panic(fmt.Sprintf("failed to get WFLOW bytecode: %s", err)) + } + + // deploy the WFLOW contract to the service account's COA + txBody, err = blueprints.DeployEVMContractTransaction(*env, bridgeEnv, serviceAddress, wflowBytecode, gasLimit, deploymentValue) + if err != nil { + panic(fmt.Sprintf("failed to build WFLOW contract in the Service Account COA transaction: %s", err)) + } + txOutput = run(txBody, "failed to deploy the WFLOW contract in the Service Account COA: %s") + + wflowAddress, err := getContractAddressFromEVMEvent(txOutput) + if err != nil { + panic(fmt.Sprintf("failed to deploy WFLOW contract: %s", err)) + } + + // Create WFLOW Token Handler, supplying the WFLOW EVM address + txBody, err = blueprints.CreateWFLOWTokenHandlerTransaction(*env, bridgeEnv, serviceAddress, wflowAddress) + if err != nil { + panic(fmt.Sprintf("failed to build create the WFLOW token handler transaction: %s", err)) + } + _ = run(txBody, "failed to create the WFLOW token handler: %s") + + // Enable WFLOW Token Handler, supplying the Cadence FlowToken.Vault type + flowVaultType := "A." + env.FlowTokenAddress + ".FlowToken.Vault" + + txBody, err = blueprints.EnableWFLOWTokenHandlerTransaction(*env, bridgeEnv, serviceAddress, flowVaultType) + if err != nil { + panic(fmt.Sprintf("failed to build enable the WFLOW token handler transaction: %s", err)) + } + _ = run(txBody, "failed to enable the WFLOW token handler: %s") + + // Unpause the bridge + txBody, err = blueprints.PauseBridgeTransaction(*env, bridgeEnv, serviceAddress, false) + if err != nil { + panic(fmt.Sprintf("failed to build un-pause the bridge contracts transaction: %s", err)) + } + _ = run(txBody, "failed to un-pause the bridge contracts: %s") + + b.output.Events = events +} + +// getContractAddressFromEVMEvent gets the deployment address from an evm deployment transaction +func getContractAddressFromEVMEvent(output ProcedureOutput) (string, error) { + for _, event := range output.Events { + if strings.Contains(string(event.Type), "TransactionExecuted") { + // decode the event payload + data, _ := ccf.Decode(nil, event.Payload) + // get the contractAddress field from the event + contractAddr := cadence.SearchFieldByName( + data.(cadence.Event), + "contractAddress", + ).(cadence.String) + + if contractAddr.String() == "" { + return "", fmt.Errorf( + "Contract address not found in event") + } + address := strings.ToLower(strings.TrimPrefix(contractAddr.String(), "0x")) + // For some reason, there are extra quotations here in the address + // so the first and last character needs to be removed for it to only be the address + return address[1 : len(address)-1], nil + } + } + return "", fmt.Errorf( + "No TransactionExecuted event found in the output of the transaction.") +} + func (b *bootstrapExecutor) registerNodes(service, fungibleToken, flowToken flow.Address) { - for _, id := range b.identities { + for _, node := range b.nodes { // create a staking account for the node nodeAddress := b.createAccount(b.accountKeys.NodeAccountPublicKeys) // give a vault resource to the staking account - txError, err := b.invokeMetaTransaction( - b.ctx, - Transaction( - blueprints.SetupAccountTransaction(fungibleToken, - flowToken, - nodeAddress), - 0, - ), - ) + txBody, err := blueprints.SetupAccountTransaction(fungibleToken, flowToken, nodeAddress) + if err != nil { + panic(fmt.Sprintf("failed to build setup machine account transaction: %s", err)) + } + txError, err := b.invokeMetaTransaction(b.ctx, Transaction(txBody, 0)) panicOnMetaInvokeErrf("failed to setup machine account: %s", txError, err) // fund the staking account - txError, err = b.invokeMetaTransaction( - b.ctx, - Transaction(blueprints.FundAccountTransaction(service, - fungibleToken, - flowToken, - nodeAddress), - 0), - ) + txBody, err = blueprints.FundAccountTransaction( + service, + fungibleToken, + flowToken, + nodeAddress) + if err != nil { + panic(fmt.Sprintf("failed to build fund node staking account transaction: %s", err)) + } + txError, err = b.invokeMetaTransaction(b.ctx, Transaction(txBody, 0)) panicOnMetaInvokeErrf("failed to fund node staking account: %s", txError, err) // register the node @@ -766,95 +1381,102 @@ func (b *bootstrapExecutor) registerNodes(service, fungibleToken, flowToken flow // and set it up with the QC/DKG participant resource txError, err = b.invokeMetaTransaction( b.ctx, - Transaction(blueprints.RegisterNodeTransaction(service, + Transaction(blueprints.RegisterNodeTransaction( + service, flowToken, + fungibleToken, nodeAddress, - id), + node), 0), ) panicOnMetaInvokeErrf("failed to register node: %s", txError, err) } } -func (b *bootstrapExecutor) deployStakingProxyContract(service flow.Address) { +func (b *bootstrapExecutor) deployStakingProxyContract(deployTo flow.Address, env *templates.Environment) { contract := contracts.FlowStakingProxy() - txError, err := b.invokeMetaTransaction( - b.ctx, - Transaction( - blueprints.DeployContractTransaction(service, contract, "StakingProxy"), - 0, - ), - ) + txBody, err := blueprints.DeployContractTransaction(deployTo, contract, "StakingProxy").Build() + if err != nil { + panic(fmt.Sprintf("failed to build StakingProxy transaction: %s", err)) + } + txError, err := b.invokeMetaTransaction(b.ctx, Transaction(txBody, 0)) + env.StakingProxyAddress = deployTo.String() panicOnMetaInvokeErrf("failed to deploy StakingProxy contract: %s", txError, err) } func (b *bootstrapExecutor) deployVersionBeacon( - service flow.Address, + deployTo flow.Address, versionFreezePeriod cadence.UInt64, + env *templates.Environment, ) { - tx := blueprints.DeployNodeVersionBeaconTransaction(service, versionFreezePeriod) - txError, err := b.invokeMetaTransaction( - b.ctx, - Transaction( - tx, - 0, - ), - ) + txBody, err := blueprints.DeployNodeVersionBeaconTransaction(deployTo, versionFreezePeriod) + if err != nil { + panic(fmt.Sprintf("failed to build deploy NodeVersionBeacon transaction: %s", err)) + } + txError, err := b.invokeMetaTransaction(b.ctx, Transaction(txBody, 0)) + env.NodeVersionBeaconAddress = deployTo.String() panicOnMetaInvokeErrf("failed to deploy NodeVersionBeacon contract: %s", txError, err) } -func (b *bootstrapExecutor) deployLockedTokensContract( - service flow.Address, fungibleTokenAddress, - flowTokenAddress flow.Address, +func (b *bootstrapExecutor) deployRandomBeaconHistory( + deployTo flow.Address, + env *templates.Environment, ) { + txBody, err := blueprints.DeployRandomBeaconHistoryTransaction(deployTo) + if err != nil { + panic(fmt.Sprintf("failed to build deploy RandomBeaconHistory history transaction: %s", err)) + } + txError, err := b.invokeMetaTransaction(b.ctx, Transaction(txBody, 0)) + env.RandomBeaconHistoryAddress = deployTo.String() + panicOnMetaInvokeErrf("failed to deploy RandomBeaconHistory history contract: %s", txError, err) +} +func (b *bootstrapExecutor) deployLockedTokensContract( + deployTo flow.Address, + env *templates.Environment, +) { publicKeys, err := flow.EncodeRuntimeAccountPublicKeys(b.accountKeys.ServiceAccountPublicKeys) if err != nil { panic(err) } - contract := contracts.FlowLockedTokens( - fungibleTokenAddress.Hex(), - flowTokenAddress.Hex(), - service.Hex(), - service.Hex(), - service.Hex()) - - txError, err := b.invokeMetaTransaction( - b.ctx, - Transaction( - blueprints.DeployLockedTokensTransaction(service, contract, publicKeys), - 0, - ), - ) - + contract := contracts.FlowLockedTokens(*env) + txBody, err := blueprints.DeployLockedTokensTransaction(deployTo, contract, publicKeys) + if err != nil { + panic(fmt.Sprintf("failed to build deploy LockedTokens transaction: %s", err)) + } + txError, err := b.invokeMetaTransaction(b.ctx, Transaction(txBody, 0)) + env.LockedTokensAddress = deployTo.String() panicOnMetaInvokeErrf("failed to deploy LockedTokens contract: %s", txError, err) } func (b *bootstrapExecutor) deployStakingCollection( - service flow.Address, - fungibleTokenAddress, flowTokenAddress flow.Address, + deployTo flow.Address, + env *templates.Environment, ) { - contract := contracts.FlowStakingCollection( - fungibleTokenAddress.Hex(), - flowTokenAddress.Hex(), - service.Hex(), - service.Hex(), - service.Hex(), - service.Hex(), - service.Hex(), - service.Hex(), - service.Hex()) - txError, err := b.invokeMetaTransaction( - b.ctx, - Transaction( - blueprints.DeployContractTransaction(service, contract, "FlowStakingCollection"), - 0, - ), - ) + contract := contracts.FlowStakingCollection(*env) + txBody, err := blueprints.DeployContractTransaction(deployTo, contract, "FlowStakingCollection").Build() + if err != nil { + panic(fmt.Sprintf("failed to build FlowStakingCollection transaction: %s", err)) + } + txError, err := b.invokeMetaTransaction(b.ctx, Transaction(txBody, 0)) + env.StakingCollectionAddress = deployTo.String() panicOnMetaInvokeErrf("failed to deploy FlowStakingCollection contract: %s", txError, err) } +func (b *bootstrapExecutor) deployMigrationContract(deployTo flow.Address) { + txBody, err := blueprints.DeployContractTransaction( + deployTo, + migration.ContractCode(), + migration.ContractName, + ).Build() + if err != nil { + panic(fmt.Sprintf("failed to build Migration transaction: %s", err)) + } + txError, err := b.invokeMetaTransaction(b.ctx, Transaction(txBody, 0)) + panicOnMetaInvokeErrf("failed to deploy Migration contract: %s", txError, err) +} + func (b *bootstrapExecutor) setContractDeploymentRestrictions( service flow.Address, deployment *bool, @@ -867,13 +1489,7 @@ func (b *bootstrapExecutor) setContractDeploymentRestrictions( if err != nil { panic(err) } - txError, err := b.invokeMetaTransaction( - b.ctx, - Transaction( - txBody, - 0, - ), - ) + txError, err := b.invokeMetaTransaction(b.ctx, Transaction(txBody, 0)) panicOnMetaInvokeErrf("failed to deploy FlowStakingCollection contract: %s", txError, err) } @@ -886,16 +1502,6 @@ func panicOnMetaInvokeErrf(msg string, txError errors.CodedError, err error) { } } -func FungibleTokenAddress(chain flow.Chain) flow.Address { - address, _ := chain.AddressAtIndex(environment.FungibleTokenAccountIndex) - return address -} - -func FlowTokenAddress(chain flow.Chain) flow.Address { - address, _ := chain.AddressAtIndex(environment.FlowTokenAccountIndex) - return address -} - // invokeMetaTransaction invokes a meta transaction inside the context of an // outer transaction. // @@ -908,6 +1514,23 @@ func (b *bootstrapExecutor) invokeMetaTransaction( ) ( errors.CodedError, error, +) { + output, err := b.runMetaTransaction(parentCtx, tx) + if err != nil { + return nil, err + } + + return output.Err, err +} + +// runMetaTransaction invokes a meta transaction inside the context of an +// outer transaction. +func (b *bootstrapExecutor) runMetaTransaction( + parentCtx Context, + tx *TransactionProcedure, +) ( + ProcedureOutput, + error, ) { // do not deduct fees or check storage in meta transactions ctx := NewContextFromParent(parentCtx, @@ -924,5 +1547,5 @@ func (b *bootstrapExecutor) invokeMetaTransaction( executor := tx.NewExecutor(ctx, b.txnState) err := Run(executor) - return executor.Output().Err, err + return executor.Output(), err } diff --git a/fvm/context.go b/fvm/context.go index a1c25541360..4c5aff84278 100644 --- a/fvm/context.go +++ b/fvm/context.go @@ -13,6 +13,7 @@ import ( "github.com/onflow/flow-go/fvm/tracing" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/module" + "github.com/onflow/flow-go/state/protocol" ) const ( @@ -21,6 +22,10 @@ const ( DefaultComputationLimit = 100_000 // 100K DefaultMemoryLimit = math.MaxUint64 DefaultMaxInteractionSize = 20_000_000 // ~20MB + + // DefaultScheduledCallbacksEnabled is the default value for the scheduled callbacks enabled flag + // used by Execution, Verification, and Access nodes. + DefaultScheduledCallbacksEnabled = true ) // A Context defines a set of execution parameters used by the virtual machine. @@ -28,6 +33,8 @@ type Context struct { // DisableMemoryAndInteractionLimits will override memory and interaction // limits and set them to MaxUint64, effectively disabling these limits. DisableMemoryAndInteractionLimits bool + EVMEnabled bool + ScheduleCallbacksEnabled bool ComputationLimit uint64 MemoryLimit uint64 MaxStateKeySize uint64 @@ -41,6 +48,10 @@ type Context struct { tracing.TracerSpan environment.EnvironmentParams + + // AllowProgramCacheWritesInScripts determines if the program cache can be written to in scripts + // By default, the program cache is only updated by transactions. + AllowProgramCacheWritesInScripts bool } // NewContext initializes a new execution context with the provided options. @@ -62,7 +73,7 @@ func newContext(ctx Context, opts ...Option) Context { } func defaultContext() Context { - return Context{ + ctx := Context{ DisableMemoryAndInteractionLimits: false, ComputationLimit: DefaultComputationLimit, MemoryLimit: DefaultMemoryLimit, @@ -72,6 +83,7 @@ func defaultContext() Context { TransactionExecutorParams: DefaultTransactionExecutorParams(), EnvironmentParams: environment.DefaultEnvironmentParams(), } + return ctx } // An Option sets a configuration parameter for a virtual machine context. @@ -85,8 +97,8 @@ func WithChain(chain flow.Chain) Option { } } -// WithGasLimit sets the computation limit for a virtual machine context. -// @depricated, please use WithComputationLimit instead. +// Deprecated: WithGasLimit sets the computation limit for a virtual machine context. +// Use WithComputationLimit instead. func WithGasLimit(limit uint64) Option { return func(ctx Context) Context { ctx.ComputationLimit = limit @@ -162,8 +174,7 @@ func WithEventCollectionSizeLimit(limit uint64) Option { // WithBlockHeader sets the block header for a virtual machine context. // -// The VM uses the header to provide current block information to the Cadence runtime, -// as well as to seed the pseudorandom number generator. +// The VM uses the header to provide current block information to the Cadence runtime. func WithBlockHeader(header *flow.Header) Option { return func(ctx Context) Context { ctx.BlockHeader = header @@ -171,14 +182,6 @@ func WithBlockHeader(header *flow.Header) Option { } } -// WithServiceEventCollectionEnabled enables service event collection -func WithServiceEventCollectionEnabled() Option { - return func(ctx Context) Context { - ctx.ServiceEventCollectionEnabled = true - return ctx - } -} - // WithBlocks sets the block storage provider for a virtual machine context. // // The VM uses the block storage provider to provide historical block information to @@ -321,6 +324,15 @@ func WithTransactionFeesEnabled(enabled bool) Option { } } +// WithRandomSourceHistoryCallAllowed enables or disables calling the `entropy` function +// within cadence +func WithRandomSourceHistoryCallAllowed(allowed bool) Option { + return func(ctx Context) Context { + ctx.RandomSourceHistoryCallAllowed = allowed + return ctx + } +} + // WithReusableCadenceRuntimePool set the (shared) RedusableCadenceRuntimePool // use for creating the cadence runtime. func WithReusableCadenceRuntimePool( @@ -348,3 +360,59 @@ func WithEventEncoder(encoder environment.EventEncoder) Option { return ctx } } + +// WithEVMEnabled enables access to the evm environment +func WithEVMEnabled(enabled bool) Option { + return func(ctx Context) Context { + ctx.EVMEnabled = enabled + return ctx + } +} + +// WithAllowProgramCacheWritesInScriptsEnabled enables caching of programs accessed by scripts +func WithAllowProgramCacheWritesInScriptsEnabled(enabled bool) Option { + return func(ctx Context) Context { + ctx.AllowProgramCacheWritesInScripts = enabled + return ctx + } +} + +// WithEntropyProvider sets the entropy provider of a virtual machine context. +// +// The VM uses the input to provide entropy to the Cadence runtime randomness functions. +func WithEntropyProvider(source environment.EntropyProvider) Option { + return func(ctx Context) Context { + ctx.EntropyProvider = source + return ctx + } +} + +// WithExecutionVersionProvider sets the execution version provider of a virtual machine context. +// +// this is used to provide the execution version to the Cadence runtime. +func WithExecutionVersionProvider(provider environment.ExecutionVersionProvider) Option { + return func(ctx Context) Context { + ctx.ExecutionVersionProvider = provider + return ctx + } +} + +// WithProtocolStateSnapshot sets all the necessary components from a subset of the protocol state +// to the virtual machine context. +func WithProtocolStateSnapshot(snapshot protocol.SnapshotExecutionSubset) Option { + return func(ctx Context) Context { + + ctx = WithEntropyProvider(snapshot)(ctx) + + ctx = WithExecutionVersionProvider(environment.NewVersionBeaconExecutionVersionProvider(snapshot.VersionBeacon))(ctx) + return ctx + } +} + +// WithScheduleCallbacksEnabled enables execution of scheduled callbacks. +func WithScheduleCallbacksEnabled(enabled bool) Option { + return func(ctx Context) Context { + ctx.ScheduleCallbacksEnabled = enabled + return ctx + } +} diff --git a/fvm/crypto/crypto.go b/fvm/crypto/crypto.go index 28d781f2801..c956894c5ae 100644 --- a/fvm/crypto/crypto.go +++ b/fvm/crypto/crypto.go @@ -5,11 +5,10 @@ import ( "fmt" "github.com/onflow/cadence/runtime" + "github.com/onflow/crypto" + "github.com/onflow/crypto/hash" - "github.com/onflow/flow-go/crypto" - "github.com/onflow/flow-go/crypto/hash" "github.com/onflow/flow-go/fvm/errors" - "github.com/onflow/flow-go/model/flow" msig "github.com/onflow/flow-go/module/signature" ) @@ -118,14 +117,14 @@ func ValidatePublicKey(signAlgo runtime.SignatureAlgorithm, pk []byte) error { return nil } -// VerifySignatureFromRuntime performs signature verification using raw values provided +// VerifySignatureFromRuntime performs signature verification using values provided // by the Cadence runtime. // // The signature/hash function combinations accepted are: // - ECDSA (on both curves P-256 and secp256k1) with any of SHA2-256/SHA3-256/Keccak256. // - BLS (on BLS12-381 curve) with the specific KMAC128 for BLS. // -// The tag is applied to the message depending on the hash function used. +// The tag is applied to the message within the implementation depending on the hash function used. // // The function errors: // - NewValueErrorf for any user error @@ -199,13 +198,14 @@ func VerifySignatureFromRuntime( return valid, nil } -// VerifySignatureFromRuntime performs signature verification using raw values provided -// by the Cadence runtime. +// VerifySignatureFromTransaction performs signature verification using values provided +// by the Transaction Verifier. // // The signature/hash function combinations accepted are: // - ECDSA (on both curves P-256 and secp256k1) with any of SHA2-256/SHA3-256. // -// The tag is applied to the message as a constant length prefix. +// No tagging is applied to the input `message` in the implementation.Any tagging/prefixing should be applied to the message prior +// to calling the function. // // The function errors: // - NewValueErrorf for any user error @@ -219,22 +219,23 @@ func VerifySignatureFromTransaction( // check ECDSA compatibilites if pk.Algorithm() != crypto.ECDSAP256 && pk.Algorithm() != crypto.ECDSASecp256k1 { - // TODO: check if we should panic - // This case only happens in production if there is a bug + // should not happen return false, errors.NewUnknownFailure(fmt.Errorf( pk.Algorithm().String(), "is not supported in transactions")) } // hashing compatibility if hashAlgo != hash.SHA2_256 && hashAlgo != hash.SHA3_256 { - // TODO: check if we should panic - // This case only happens in production if there is a bug + // should not happen return false, errors.NewUnknownFailure(fmt.Errorf( hashAlgo.String(), "is not supported in transactions")) } - hasher, err := NewPrefixedHashing(hashAlgo, flow.TransactionTagString) + // No prefix logic is implemented here, any prefixing should be applied directly to the `message` input + hasher, err := NewHashing(hashAlgo) if err != nil { - return false, errors.NewValueErrorf(err.Error(), "transaction verification failed") + // should not happen + return false, errors.NewUnknownFailure(fmt.Errorf( + hashAlgo.String(), "is not supported in transactions")) } valid, err := pk.Verify(signature, message, hasher) diff --git a/fvm/crypto/crypto_test.go b/fvm/crypto/crypto_test.go index fe6c400c1b4..bde4e317d2f 100644 --- a/fvm/crypto/crypto_test.go +++ b/fvm/crypto/crypto_test.go @@ -8,11 +8,11 @@ import ( "github.com/fxamacker/cbor/v2" "github.com/onflow/cadence/runtime" + onflowCrypto "github.com/onflow/crypto" + "github.com/onflow/crypto/hash" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - gocrypto "github.com/onflow/flow-go/crypto" - "github.com/onflow/flow-go/crypto/hash" "github.com/onflow/flow-go/fvm/crypto" "github.com/onflow/flow-go/fvm/errors" "github.com/onflow/flow-go/model/flow" @@ -90,7 +90,7 @@ func TestVerifySignatureFromRuntime(t *testing.T) { seed := make([]byte, seedLength) _, err := rand.Read(seed) require.NoError(t, err) - pk, err := gocrypto.GeneratePrivateKey(crypto.RuntimeToCryptoSigningAlgorithm(s), seed) + pk, err := onflowCrypto.GeneratePrivateKey(crypto.RuntimeToCryptoSigningAlgorithm(s), seed) require.NoError(t, err) tag := "random_tag" @@ -182,7 +182,7 @@ func TestVerifySignatureFromRuntime(t *testing.T) { seed := make([]byte, seedLength) _, err := rand.Read(seed) require.NoError(t, err) - pk, err := gocrypto.GeneratePrivateKey(gocrypto.BLSBLS12381, seed) + pk, err := onflowCrypto.GeneratePrivateKey(onflowCrypto.BLSBLS12381, seed) require.NoError(t, err) hasher := msig.NewBLSHasher(string(c.signTag)) @@ -265,7 +265,7 @@ func TestVerifySignatureFromRuntime(t *testing.T) { seed := make([]byte, seedLength) _, err := rand.Read(seed) require.NoError(t, err) - pk, err := gocrypto.GeneratePrivateKey(crypto.RuntimeToCryptoSigningAlgorithm(s), seed) + pk, err := onflowCrypto.GeneratePrivateKey(crypto.RuntimeToCryptoSigningAlgorithm(s), seed) require.NoError(t, err) hasher, err := crypto.NewPrefixedHashing(crypto.RuntimeToCryptoHashingAlgorithm(h), c.signTag) @@ -294,16 +294,14 @@ func TestVerifySignatureFromRuntime(t *testing.T) { } func TestVerifySignatureFromTransaction(t *testing.T) { + seedLength := 32 - // make sure the seed length is larger than miniumum seed lengths of all signature schemes - seedLength := 64 - - correctCombinations := map[gocrypto.SigningAlgorithm]map[hash.HashingAlgorithm]struct{}{ - gocrypto.ECDSAP256: { + correctCombinations := map[onflowCrypto.SigningAlgorithm]map[hash.HashingAlgorithm]struct{}{ + onflowCrypto.ECDSAP256: { hash.SHA2_256: {}, hash.SHA3_256: {}, }, - gocrypto.ECDSASecp256k1: { + onflowCrypto.ECDSASecp256k1: { hash.SHA2_256: {}, hash.SHA3_256: {}, }, @@ -311,10 +309,10 @@ func TestVerifySignatureFromTransaction(t *testing.T) { t.Run("verify should fail on incorrect combinations", func(t *testing.T) { - signatureAlgos := []gocrypto.SigningAlgorithm{ - gocrypto.ECDSAP256, - gocrypto.ECDSASecp256k1, - gocrypto.BLSBLS12381, + signatureAlgos := []onflowCrypto.SigningAlgorithm{ + onflowCrypto.ECDSAP256, + onflowCrypto.ECDSASecp256k1, + onflowCrypto.BLSBLS12381, } hashAlgos := []hash.HashingAlgorithm{ hash.SHA2_256, @@ -331,110 +329,53 @@ func TestVerifySignatureFromTransaction(t *testing.T) { seed := make([]byte, seedLength) _, err := rand.Read(seed) require.NoError(t, err) - sk, err := gocrypto.GeneratePrivateKey(s, seed) + sk, err := onflowCrypto.GeneratePrivateKey(s, seed) require.NoError(t, err) tag := string(flow.TransactionDomainTag[:]) var hasher hash.Hasher if h != hash.KMAC128 { - hasher, err = crypto.NewPrefixedHashing(h, tag) + hasher, err = crypto.NewHashing(h) require.NoError(t, err) } else { hasher = msig.NewBLSHasher(tag) } - signature := make([]byte, 0) data := []byte("some_data") sig, err := sk.Sign(data, hasher) if _, shouldBeOk := correctCombinations[s][h]; shouldBeOk { require.NoError(t, err) } + signature := make([]byte, 0) if sig != nil { signature = sig.Bytes() } ok, err := crypto.VerifySignatureFromTransaction(signature, data, sk.PublicKey(), h) - if _, shouldBeOk := correctCombinations[s][h]; shouldBeOk { require.NoError(t, err) require.True(t, ok) } else { require.Error(t, err) + require.ErrorContains(t, err, "is not supported in transactions") require.False(t, ok) } }) } } }) - - t.Run("tag combinations", func(t *testing.T) { - - cases := []struct { - signTag string - require func(t *testing.T, sigOk bool, err error) - }{ - { - signTag: string(flow.TransactionDomainTag[:]), - require: func(t *testing.T, sigOk bool, err error) { - require.NoError(t, err) - require.True(t, sigOk) - }, - }, - { - signTag: "", - require: func(t *testing.T, sigOk bool, err error) { - require.NoError(t, err) - require.False(t, sigOk) - }, - }, { - signTag: "random_tag", - require: func(t *testing.T, sigOk bool, err error) { - require.NoError(t, err) - require.False(t, sigOk) - }, - }, - } - - for _, c := range cases { - for s, hMaps := range correctCombinations { - for h := range hMaps { - t.Run(fmt.Sprintf("sign tag: %v [%v, %v]", c.signTag, s, h), func(t *testing.T) { - seed := make([]byte, seedLength) - _, err := rand.Read(seed) - require.NoError(t, err) - sk, err := gocrypto.GeneratePrivateKey(s, seed) - require.NoError(t, err) - - hasher, err := crypto.NewPrefixedHashing(h, c.signTag) - require.NoError(t, err) - - data := []byte("some data") - sig, err := sk.Sign(data, hasher) - require.NoError(t, err) - signature := sig.Bytes() - - ok, err := crypto.VerifySignatureFromTransaction(signature, data, sk.PublicKey(), h) - c.require(t, ok, err) - }) - } - } - } - }) } func TestValidatePublicKey(t *testing.T) { - // make sure the seed length is larger than miniumum seed lengths of all signature schemes - seedLength := 64 - validPublicKey := func(t *testing.T, s runtime.SignatureAlgorithm) []byte { - seed := make([]byte, seedLength) + seed := make([]byte, onflowCrypto.KeyGenSeedMinLen) _, err := rand.Read(seed) require.NoError(t, err) - pk, err := gocrypto.GeneratePrivateKey(crypto.RuntimeToCryptoSigningAlgorithm(s), seed) + sk, err := onflowCrypto.GeneratePrivateKey(crypto.RuntimeToCryptoSigningAlgorithm(s), seed) require.NoError(t, err) - return pk.PublicKey().Encode() + return sk.PublicKey().Encode() } t.Run("Unknown algorithm should return false", func(t *testing.T) { @@ -463,12 +404,14 @@ func TestValidatePublicKey(t *testing.T) { runtime.SignatureAlgorithmBLS_BLS12_381, } for i, s := range signatureAlgos { + t.Run(fmt.Sprintf("case %v: %v", i, s), func(t *testing.T) { key := validPublicKey(t, s) + // This may cause flakiness depending on the public key + // deserialization scheme used!! key[0] ^= 1 // alter one bit of the valid key - err := crypto.ValidatePublicKey(s, key) - require.Error(t, err) + require.Errorf(t, err, "key is %#x", key) }) } }) @@ -491,10 +434,10 @@ func TestHashingAlgorithmConversion(t *testing.T) { } func TestSigningAlgorithmConversion(t *testing.T) { - signingAlgoMapping := map[runtime.SignatureAlgorithm]gocrypto.SigningAlgorithm{ - runtime.SignatureAlgorithmECDSA_P256: gocrypto.ECDSAP256, - runtime.SignatureAlgorithmECDSA_secp256k1: gocrypto.ECDSASecp256k1, - runtime.SignatureAlgorithmBLS_BLS12_381: gocrypto.BLSBLS12381, + signingAlgoMapping := map[runtime.SignatureAlgorithm]onflowCrypto.SigningAlgorithm{ + runtime.SignatureAlgorithmECDSA_P256: onflowCrypto.ECDSAP256, + runtime.SignatureAlgorithmECDSA_secp256k1: onflowCrypto.ECDSASecp256k1, + runtime.SignatureAlgorithmBLS_BLS12_381: onflowCrypto.BLSBLS12381, } for runtimeAlgo, cryptoAlgo := range signingAlgoMapping { @@ -503,6 +446,19 @@ func TestSigningAlgorithmConversion(t *testing.T) { } } +func TestAuthenticationSchemeConversion(t *testing.T) { + schemeMapping := map[byte]string{ + 0x0: "PlainScheme", + 0x01: "WebAuthnScheme", + 0x02: "InvalidScheme", + 0x03: "InvalidScheme", + } + + for authSchemeByte, authSchemeName := range schemeMapping { + assert.Equal(t, authSchemeName, flow.AuthenticationSchemeFromByte(authSchemeByte).String()) + } +} + func TestVerifySignatureFromRuntime_error_handling_produces_valid_utf8_for_invalid_sign_algo(t *testing.T) { invalidSignatureAlgo := runtime.SignatureAlgorithm(164) diff --git a/fvm/crypto/hash.go b/fvm/crypto/hash.go index 49d8dd0000b..66245e22772 100644 --- a/fvm/crypto/hash.go +++ b/fvm/crypto/hash.go @@ -4,7 +4,8 @@ import ( "errors" "fmt" - "github.com/onflow/flow-go/crypto/hash" + "github.com/onflow/crypto/hash" + "github.com/onflow/flow-go/model/flow" ) @@ -14,7 +15,7 @@ const tagLength = flow.DomainTagLength // prefixedHashing embeds a crypto hasher and implements // hashing with a prefix : prefixedHashing(data) = hasher(prefix || data) // -// Prefixes are padded tags till 32 bytes to guarantee prefixedHashers are independant +// Prefixes are padded tags till 32 bytes to guarantee prefixedHashers are independent // hashers. // Prefixes are disabled with the particular tag value "" type prefixedHashing struct { @@ -41,8 +42,22 @@ var hasherCreators = map[hash.HashingAlgorithm](func() hash.Hasher){ hash.Keccak_256: hash.NewKeccak_256, } +// NewHashing returns a new hasher from the supported hash algorithms, witout any prefixing logic. +// The output is equivalent to calling `NewPrefixedHashing` with an empty tag "". +// +// Supported algorithms are SHA2-256, SHA2-384, SHA3-256, SHA3-384 and Keccak256. +func NewHashing(algo hash.HashingAlgorithm) (hash.Hasher, error) { + hasherCreator, hasCreator := hasherCreators[algo] + if !hasCreator { + return nil, errors.New("hashing algorithm is not supported") + } + + return hasherCreator(), nil +} + // NewPrefixedHashing returns a new hasher that prefixes the tag for all // hash computations (only when tag is not empty). +// If tag is empty "", output is a simple hasher without tagging logic. // // Supported algorithms are SHA2-256, SHA2-384, SHA3-256, SHA3-384 and Keccak256. func NewPrefixedHashing(algo hash.HashingAlgorithm, tag string) (hash.Hasher, error) { diff --git a/fvm/crypto/hash_test.go b/fvm/crypto/hash_test.go index bb9bb64172b..6c8ba0354c8 100644 --- a/fvm/crypto/hash_test.go +++ b/fvm/crypto/hash_test.go @@ -1,17 +1,16 @@ package crypto_test import ( - "math/rand" - "testing" - + "crypto/rand" "crypto/sha256" "crypto/sha512" + "testing" + "github.com/onflow/crypto/hash" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "golang.org/x/crypto/sha3" - "github.com/onflow/flow-go/crypto/hash" "github.com/onflow/flow-go/fvm/crypto" "github.com/onflow/flow-go/model/flow" ) diff --git a/fvm/environment/account-key-metadata/digest.go b/fvm/environment/account-key-metadata/digest.go new file mode 100644 index 00000000000..6d863e64969 --- /dev/null +++ b/fvm/environment/account-key-metadata/digest.go @@ -0,0 +1,80 @@ +package accountkeymetadata + +import ( + "bytes" + "encoding/binary" + + "github.com/fxamacker/circlehash" + + "github.com/onflow/flow-go/model/flow" +) + +// FindDuplicateKey returns true with duplicate key index if duplicate key +// of the given key is found. However, detection rate is intentionally +// not 100% in order to limit the number of digests we store on chain. +// If a hash collision happens with given digest, this function returns +// SentinelFastDigest64 digest and duplicate key not found. +// Specifically, a duplicate key is found when these conditions are met: +// - computed digest isn't the predefined sentinel digest (0), +// - computed digest matches one of the stored digests in key metadata, and +// - given encodedKey also matches the stored key with the same digest. +func FindDuplicateKey( + keyMetadata *KeyMetadataAppender, + encodedKey []byte, + getKeyDigest func([]byte) uint64, + getStoredKey func(uint32) ([]byte, error), +) (digest uint64, found bool, duplicateStoredKeyIndex uint32, _ error) { + + // To balance tradeoffs, it is OK to have detection rate less than 100%, for the + // same reasons compression programs/libraries don't use max compression by default. + + // We use a fast non-cryptographic hash algorithm for efficiency, so + // we need to handle hash collisions (same digest from different hash inputs). + // When a hash collision is detected, sentinel digest (0) is stored in place + // of new key digest, and subsequent digest comparison excludes stored sentinel digest. + // This means keys with the sentinel digest will not be deduplicated and that is OK. + + digest = getKeyDigest(encodedKey) + + if digest == SentinelFastDigest64 { + // The new key digest matches the sentinel digest by coincidence or attack. + // Return early so the key will be stored without using deduplication. + return SentinelFastDigest64, false, 0, nil + } + + // Find duplicate stored digest by comparing computed digest against stored digests in key metadata section. + found, duplicateStoredKeyIndex = keyMetadata.findDuplicateDigest(digest) + + // If no duplicate digest is found, we return duplicate not found. + if !found { + return digest, false, 0, nil + } + + // A duplicate digest is found, so we need to compare the stored key to + // the given encodedKey. + + // Get encoded key with duplicate digest. + encodedKeyWithDuplicateDigest, err := getStoredKey(duplicateStoredKeyIndex) + if err != nil { + return digest, false, 0, err + } + + // Compare the given encodedKey with stored key. + if bytes.Equal(encodedKeyWithDuplicateDigest, encodedKey) { + return digest, true, duplicateStoredKeyIndex, nil + } + + // Found hash collision. The given encodedKey and the stored key are different but + // they both produce the same 64-bit fast hash digest. + // Return SentinelFastDigest64, duplicate key not found, and no error. + return SentinelFastDigest64, false, 0, nil +} + +func GetPublicKeyDigest(owner flow.Address, encodedPublicKey []byte) uint64 { + seed := binary.BigEndian.Uint64(owner[:]) + return circlehash.Hash64(encodedPublicKey, seed) +} + +// SentinelFastDigest64 is the sentinel digest used for 64-bit fast hash collision handling. SentinelFastDigest64 +// is stored in key metadata's digest list as a placeholder. +const SentinelFastDigest64 uint64 = 0 // don't change this value (instead, declare new constant with new value if needed) diff --git a/fvm/environment/account-key-metadata/digest_test.go b/fvm/environment/account-key-metadata/digest_test.go new file mode 100644 index 00000000000..dfac8f062c1 --- /dev/null +++ b/fvm/environment/account-key-metadata/digest_test.go @@ -0,0 +1,113 @@ +package accountkeymetadata + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func TestFindDuplicateKey(t *testing.T) { + testcases := []struct { + name string + deduplicated bool + data []byte + encodedKey []byte + getKeyDigest func([]byte) uint64 + getStoredKey func(uint32) ([]byte, error) + expectedDigest uint64 + expectedFound bool + expectedDuplicateKeyIndex uint32 + expectError bool + }{ + { + name: "no duplicate digest", + deduplicated: false, + data: []byte{ + 0, 0, 0, 4, // length prefix for weight and revoked list + 0, 2, 3, 0xe8, // weight and revoked group 1 + 0, 0, 0, 1, // start index for digests + 0, 0, 0, 0x10, // length prefix for digests + 0, 0, 0, 0, 0, 0, 0, 2, // digest 2 + 0, 0, 0, 0, 0, 0, 0, 3, // digest 3 + }, + encodedKey: []byte{0x01}, // not used in this test case + getKeyDigest: func(encodedKey []byte) uint64 { + require.Equal(t, []byte{0x01}, encodedKey) + return 1 + }, + getStoredKey: nil, // not used in this test case + expectedDigest: 1, + expectedFound: false, + }, + { + name: "digest collision", + deduplicated: false, + data: []byte{ + 0, 0, 0, 4, // length prefix for weight and revoked list + 0, 2, 3, 0xe8, // weight and revoked group 1 + 0, 0, 0, 1, // start index for digests + 0, 0, 0, 0x10, // length prefix for digests + 0, 0, 0, 0, 0, 0, 0, 2, // digest 2 + 0, 0, 0, 0, 0, 0, 0, 3, // digest 3 + }, + encodedKey: []byte{0x01}, + getKeyDigest: func(encodedKey []byte) uint64 { + require.Equal(t, []byte{0x01}, encodedKey) + return 2 + }, + getStoredKey: func(keyIndex uint32) ([]byte, error) { + require.Equal(t, uint32(1), keyIndex) + return []byte{0x02}, nil + }, + expectedDigest: SentinelFastDigest64, + expectedFound: false, + expectError: false, + }, + { + name: "duplicate key", + deduplicated: false, + data: []byte{ + 0, 0, 0, 4, // length prefix for weight and revoked list + 0, 2, 3, 0xe8, // weight and revoked group 1 + 0, 0, 0, 1, // start index for digests + 0, 0, 0, 0x10, // length prefix for digests + 0, 0, 0, 0, 0, 0, 0, 2, // digest 2 + 0, 0, 0, 0, 0, 0, 0, 3, // digest 3 + }, + encodedKey: []byte{0x01}, + getKeyDigest: func(encodedKey []byte) uint64 { + require.Equal(t, []byte{0x01}, encodedKey) + return 3 + }, + getStoredKey: func(keyIndex uint32) ([]byte, error) { + require.Equal(t, uint32(2), keyIndex) + return []byte{0x01}, nil + }, + expectedDigest: 3, + expectedFound: true, + expectedDuplicateKeyIndex: 2, + }, + } + + for _, tc := range testcases { + t.Run(tc.name, func(t *testing.T) { + keyMetadata, err := NewKeyMetadataAppenderFromBytes(tc.data, tc.deduplicated, maxStoredDigests) + require.NoError(t, err) + + digest, found, duplicateStoredKeyIndex, err := FindDuplicateKey( + keyMetadata, + tc.encodedKey, + tc.getKeyDigest, + tc.getStoredKey, + ) + if tc.expectError { + require.Error(t, err) + } else { + require.NoError(t, err) + } + require.Equal(t, tc.expectedDigest, digest) + require.Equal(t, tc.expectedFound, found) + require.Equal(t, tc.expectedDuplicateKeyIndex, duplicateStoredKeyIndex) + }) + } +} diff --git a/fvm/environment/account-key-metadata/encoder_util.go b/fvm/environment/account-key-metadata/encoder_util.go new file mode 100644 index 00000000000..373a67564b0 --- /dev/null +++ b/fvm/environment/account-key-metadata/encoder_util.go @@ -0,0 +1,109 @@ +package accountkeymetadata + +import ( + "encoding/binary" + + "github.com/onflow/flow-go/fvm/errors" +) + +const ( + lengthPrefixSize = 4 + runLengthSize = 2 + digestSize = 8 +) + +func parseWeightAndRevokedStatusFromKeyMetadataBytes(b []byte) ( + weightAndRevokedStatusBytes []byte, + rest []byte, + err error, +) { + if len(b) == 0 { + err = errors.NewKeyMetadataEmptyError("failed to parse weight and revoked status") + return + } + + return parseNextLengthPrefixedData(b) +} + +// parseStoredKeyMappingFromKeyMetadataBytes parses b and returns: +// start index for mapping, raw mapping bytes, trailing bytes, and error (if any). +// NOTE: b is expected to start with encoded start index for mapping. +func parseStoredKeyMappingFromKeyMetadataBytes(b []byte) ( + startIndexForMapping uint32, + mappingBytes []byte, + rest []byte, + err error, +) { + if len(b) < storedKeyIndexSize { + err = errors.NewKeyMetadataTooShortError( + "failed to parse start key index for mappings", + storedKeyIndexSize, + len(b), + ) + return + } + + // Get mapping start index + startIndexForMapping = binary.BigEndian.Uint32(b[:storedKeyIndexSize]) + + b = b[storedKeyIndexSize:] + + // Get mapping raw bytes + mappingBytes, rest, err = parseNextLengthPrefixedData(b) + return +} + +// parseDigestsFromKeyMetadataBytes parses b and returns: +// start index for digests, raw digest bytes, trailing bytes, and error (if any). +// NOTE: b is expected to start with encoded start index for digests. +func parseDigestsFromKeyMetadataBytes(b []byte) ( + startIndexForDigests uint32, + digestBytes []byte, + rest []byte, + err error, +) { + if len(b) < storedKeyIndexSize { + err = errors.NewKeyMetadataTooShortError( + "failed to parse start key index for digests", + storedKeyIndexSize, + len(b), + ) + return + } + + // Get digest start index + startIndexForDigests = binary.BigEndian.Uint32(b[:storedKeyIndexSize]) + + b = b[storedKeyIndexSize:] + + // Get digests raw bytes + digestBytes, rest, err = parseNextLengthPrefixedData(b) + return +} + +func parseNextLengthPrefixedData(b []byte) (next []byte, rest []byte, err error) { + if len(b) < lengthPrefixSize { + return nil, nil, + errors.NewKeyMetadataTooShortError( + "failed to parse prefixed data", + lengthPrefixSize, + len(b), + ) + } + + length := binary.BigEndian.Uint32(b[:lengthPrefixSize]) + + // NOTE: here, int is always int64 (never int32) because this software can cannot run on 32-bit platforms, + // so it is safe to cast length (uint32) to int (which is int64 on 64-bit platforms). + if len(b) < lengthPrefixSize+int(length) { + return nil, nil, + errors.NewKeyMetadataTooShortError( + "failed to parse length prefixed data", + lengthPrefixSize+int(length), + len(b), + ) + } + + b = b[lengthPrefixSize:] + return b[:length], b[length:], nil +} diff --git a/fvm/environment/account-key-metadata/key_index_mapping_group.go b/fvm/environment/account-key-metadata/key_index_mapping_group.go new file mode 100644 index 00000000000..6648e8a0203 --- /dev/null +++ b/fvm/environment/account-key-metadata/key_index_mapping_group.go @@ -0,0 +1,217 @@ +package accountkeymetadata + +import ( + "encoding/binary" + + "github.com/onflow/flow-go/fvm/errors" +) + +// Account Public Key Index to Stored Public Key Index Mappings + +// Key index mapping is encoded using RLE: +// - run length (2 bytes): consecutive group flag in high 1 bit and run length in 15 bits +// - value (4 bytes): stored key index +// NOTE: +// - If number of elements in a run-length group exceeds maxRunLengthInMappingGroup, +// a new group is created with remaining run-length and the same storedKeyIndex. +// - Consecutive groups are adjoining groups that run-length is 1 and value is increased by 1. +// - When consecutive group flag is on, run-length is number of consecutive groups, and value is the value of the first group. + +// We encode stored key indexes using a new variant of RLE designed to efficiently store +// long runs of unique integers that increment by 1. +// For example, if an account has 1000 keys and the first 2 keys are the same, followed +// by 998 unique keys (e.g., {key0, key0, key1, key2, ..., key998}). +// Using regular RLE would bloat the storage size by storing 999 mapping groups with run length 1. +// By comparison, our optimized encoding would only store 2 mapping groups. +// We support a maximum run length of 32767 because we use the low 15 bits of uint16 +// to store the run length (the high bit is used as a flag). + +const ( + maxRunLengthInMappingGroup = 1<<15 - 1 + + storedKeyIndexSize = 4 + mappingGroupSize = runLengthSize + storedKeyIndexSize + + consecutiveGroupFlagMask = 0x8000 + lengthMask = 0x7fff +) + +// getStoredKeyIndexFromMappings returns stored key index of the given key index from encoded data. +// Received b is expected to only contain encoded mappings. +func getStoredKeyIndexFromMappings(b []byte, keyIndex uint32) (uint32, error) { + remainingKeyIndex := keyIndex + + if len(b)%mappingGroupSize != 0 { + return 0, + errors.NewKeyMetadataUnexpectedLengthError( + "failed to get stored key index from mapping", + mappingGroupSize, + len(b), + ) + } + + for off := 0; off < len(b); off += mappingGroupSize { + isConsecutiveGroup, runLength := parseMappingRunLength(b, off) + + if remainingKeyIndex < uint32(runLength) { + storedKeyIndex := parseMappingStoredKeyIndex(b, off+runLengthSize) + + if isConsecutiveGroup { + return storedKeyIndex + remainingKeyIndex, nil + } + + return storedKeyIndex, nil + } + + remainingKeyIndex -= uint32(runLength) + } + + return 0, errors.NewKeyMetadataNotFoundError("failed to query stored key index from mapping", keyIndex) +} + +// appendStoredKeyIndexToMappings appends the given storedKeyIndex to the given encoded mappings (b). +// NOTE: b can be modified by this function. +func appendStoredKeyIndexToMappings(b []byte, storedKeyIndex uint32) (_ []byte, _ error) { + if len(b) == 0 { + return encodeKeyIndexToStoredKeyIndexMapping(false, 1, storedKeyIndex), nil + } + + if len(b)%mappingGroupSize != 0 { + return nil, + errors.NewKeyMetadataUnexpectedLengthError( + "failed to append stored key mapping", + mappingGroupSize, + len(b), + ) + } + + // Merge to last group + lastGroupOff := len(b) - mappingGroupSize + lastGroup := parseMappingGroup(b, lastGroupOff) + + if lastGroup.TryMerge(storedKeyIndex) { + // Overwrite the last group in the given b since the + // given storedKeyIndex is merged into the last group. + b = append(b[:lastGroupOff], lastGroup.Encode()...) + return b, nil + } + + // Append new group + b = append(b, encodeKeyIndexToStoredKeyIndexMapping(false, 1, storedKeyIndex)...) + return b, nil +} + +// Utils + +type MappingGroup struct { + runLength uint16 + storedKeyIndex uint32 + consecutiveGroup bool +} + +func NewMappingGroup(runLength uint16, storedKeyIndex uint32, consecutive bool) *MappingGroup { + return &MappingGroup{ + runLength: runLength, + storedKeyIndex: storedKeyIndex, + consecutiveGroup: consecutive, + } +} + +// TryMerge returns true if the given storedKeyIndex is merged into g. +// The given storedKeyIndex is merged into regular group g if +// g's runLength is less than maxRunLengthInMappingGroup and +// either g's storedKeyIndex is the same as the given storedKeyIndex or +// g's runLength is 1 and g's storedKeyIndex + 1 is the same as the +// given storedKeyIndex. +// The given storedKeyIndex is merged into consecutive group g if +// g's runLength is less than maxRunLengthInMappingGroup and +// g's storedKeyIndex + g's runLength the same as the given storedKeyIndex. +func (g *MappingGroup) TryMerge(storedKeyIndex uint32) bool { + if g.runLength == maxRunLengthInMappingGroup { + // Can't be merged because run length limit is reached. + return false + } + + if g.consecutiveGroup { + if g.storedKeyIndex+uint32(g.runLength) == storedKeyIndex { + // Merge into consecutive group + g.runLength++ + return true + } + // Can't be merged because new stored key index isn't consecutive. + return false + } + + if g.storedKeyIndex == storedKeyIndex { + // Merge into regular group + g.runLength++ + return true + } + + if g.runLength == 1 && g.storedKeyIndex+1 == storedKeyIndex { + // Convert the last group from a regular group to a consecutive group, + // and merge the given storedKeyIndex into it. + g.consecutiveGroup = true + g.runLength++ + return true + } + + return false +} + +func (g *MappingGroup) Encode() []byte { + return encodeKeyIndexToStoredKeyIndexMapping(g.consecutiveGroup, g.runLength, g.storedKeyIndex) +} + +type MappingGroups []*MappingGroup + +func (groups MappingGroups) Encode() []byte { + if len(groups) == 0 { + return nil + } + + buf := make([]byte, 0, len(groups)*(mappingGroupSize)) + for _, group := range groups { + buf = append(buf, group.Encode()...) + } + return buf +} + +func encodeKeyIndexToStoredKeyIndexMapping(isConsecutiveGroup bool, runLength uint16, storedKeyIndex uint32) []byte { + var b [mappingGroupSize]byte + + if isConsecutiveGroup { + runLength |= consecutiveGroupFlagMask + } + // Set runlength + binary.BigEndian.PutUint16(b[:], runLength) + + // Set value + binary.BigEndian.PutUint32(b[runLengthSize:], storedKeyIndex) + + return b[:] +} + +func parseMappingGroup(b []byte, off int) *MappingGroup { + _ = b[off+mappingGroupSize-1] // bounds check + isConsecutiveGroup, runLength := parseMappingRunLength(b, off) + storedKeyIndex := binary.BigEndian.Uint32(b[off+runLengthSize:]) + return &MappingGroup{ + runLength: runLength, + storedKeyIndex: storedKeyIndex, + consecutiveGroup: isConsecutiveGroup, + } +} + +func parseMappingRunLength(b []byte, off int) (isConsecutiveGroup bool, runLength uint16) { + _ = b[off+1] // bounds check + runLength = binary.BigEndian.Uint16(b[off : off+runLengthSize]) + isConsecutiveGroup = runLength&consecutiveGroupFlagMask > 0 + runLength &= lengthMask + return +} + +func parseMappingStoredKeyIndex(b []byte, off int) uint32 { + _ = b[off+3] // bounds check + return binary.BigEndian.Uint32(b[off : off+storedKeyIndexSize]) +} diff --git a/fvm/environment/account-key-metadata/key_index_mapping_group_test.go b/fvm/environment/account-key-metadata/key_index_mapping_group_test.go new file mode 100644 index 00000000000..ceff93a8577 --- /dev/null +++ b/fvm/environment/account-key-metadata/key_index_mapping_group_test.go @@ -0,0 +1,240 @@ +package accountkeymetadata + +import ( + "testing" + + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/fvm/errors" +) + +func TestAppendAndGetStoredKeyIndexFromMapping(t *testing.T) { + t.Run("get from empty data", func(t *testing.T) { + _, err := getStoredKeyIndexFromMappings(nil, 0) + require.True(t, errors.IsKeyMetadataNotFoundError(err)) + }) + + t.Run("get from truncated data", func(t *testing.T) { + b := []byte{1} + + _, err := getStoredKeyIndexFromMappings(b, 1) + require.True(t, errors.IsKeyMetadataDecodingError(err)) + }) + + t.Run("append to truncated data", func(t *testing.T) { + b := []byte{1} + + _, err := appendStoredKeyIndexToMappings(b, 1) + require.True(t, errors.IsKeyMetadataDecodingError(err)) + }) + + testcases := []struct { + name string + mappings []uint32 + expected []byte + }{ + { + name: "1 group with run length 1", + mappings: []uint32{1}, + expected: []byte{ + 0, 1, 0, 0, 0, 1, + }, + }, + { + name: "2 groups with different run length", + mappings: []uint32{1, 1, 2}, + expected: []byte{ + 0, 2, 0, 0, 0, 1, + 0, 1, 0, 0, 0, 2, + }, + }, + { + name: "group value not consecutive", + mappings: []uint32{1, 3}, + expected: []byte{ + 0, 1, 0, 0, 0, 1, + 0, 1, 0, 0, 0, 3, + }, + }, + { + name: "consecutive group with run length 2", + mappings: []uint32{1, 2}, + expected: []byte{ + 0x80, 2, 0, 0, 0, 1, + }, + }, + { + name: "consecutive group with run length 3", + mappings: []uint32{1, 2, 3}, + expected: []byte{ + 0x80, 3, 0, 0, 0, 1, + }, + }, + { + name: "consecutive group followed by non-consecutive group", + mappings: []uint32{1, 2, 2}, + expected: []byte{ + 0x80, 2, 0, 0, 0, 1, + 0, 1, 0, 0, 0, 2, + }, + }, + { + name: "consecutive group followed by consecutive group", + mappings: []uint32{1, 2, 2, 3}, + expected: []byte{ + 0x80, 2, 0, 0, 0, 1, + 0x80, 2, 0, 0, 0, 2, + }, + }, + { + name: "consecutive groups mixed with non-consecutive groups", + mappings: []uint32{1, 3, 4, 5, 5, 5, 5, 6, 7, 7}, + expected: []byte{ + 0, 1, 0, 0, 0, 1, + 0x80, 3, 0, 0, 0, 3, + 0, 3, 0, 0, 0, 5, + 0x80, 2, 0, 0, 0, 6, + 0, 1, 0, 0, 0, 7, + }, + }, + } + + for _, tc := range testcases { + t.Run(tc.name, func(t *testing.T) { + var b []byte + var err error + + for _, storedKeyIndex := range tc.mappings { + b, err = appendStoredKeyIndexToMappings(b, storedKeyIndex) + require.NoError(t, err) + } + require.Equal(t, tc.expected, b) + + for keyIndex, expectedStoredKeyIndex := range tc.mappings { + storedKeyIndex, err := getStoredKeyIndexFromMappings(b, uint32(keyIndex)) + require.NoError(t, err) + require.Equal(t, expectedStoredKeyIndex, storedKeyIndex) + } + }) + } + + t.Run("run length around max group count", func(t *testing.T) { + testcases := []struct { + name string + encodedExistingMappings []byte + mapping uint32 + expected []byte + expectedCount uint32 + expectedMapping uint32 + expectedStartMapping uint32 + isConsecutiveGroup bool + }{ + { + name: "regular group, run length maxRunLengthInMappingGroup - 1", + encodedExistingMappings: []byte{ + 0x7f, 0xfe, 0x00, 0x00, 0x00, 0x01, + }, + mapping: 1, + expected: []byte{ + 0x7f, 0xff, 0x00, 0x00, 0x00, 0x01, + }, + expectedCount: maxRunLengthInMappingGroup, + expectedMapping: 1, + }, + { + name: "regular group, run length maxRunLengthInMappingGroup", + encodedExistingMappings: []byte{ + 0x7f, 0xff, 0x00, 0x00, 0x00, 0x01, + }, + mapping: 1, + expected: []byte{ + 0x7f, 0xff, 0x00, 0x00, 0x00, 0x01, + 0x00, 0x01, 0x00, 0x00, 0x00, 0x01, + }, + expectedCount: maxRunLengthInMappingGroup + 1, + expectedMapping: 1, + }, + { + name: "regular group, run length maxRunLengthInMappingGroup + 1", + encodedExistingMappings: []byte{ + 0x7f, 0xff, 0x00, 0x00, 0x00, 0x01, + 0x00, 0x01, 0x00, 0x00, 0x00, 0x01, + }, + mapping: 1, + expected: []byte{ + 0x7f, 0xff, 0x00, 0x00, 0x00, 0x01, + 0x00, 0x02, 0x00, 0x00, 0x00, 0x01, + }, + expectedCount: maxRunLengthInMappingGroup + 2, + expectedMapping: 1, + }, + { + name: "consecutive group, run length maxRunLengthInMappingGroup - 1", + encodedExistingMappings: []byte{ + 0xff, 0xfe, 0x00, 0x00, 0x00, 0x01, + }, + mapping: maxRunLengthInMappingGroup, + expected: []byte{ + 0xff, 0xff, 0x00, 0x00, 0x00, 0x01, + }, + expectedCount: maxRunLengthInMappingGroup, + expectedStartMapping: 1, + isConsecutiveGroup: true, + }, + { + name: "consecutive group, run length maxRunLengthInMappingGroup", + encodedExistingMappings: []byte{ + 0xff, 0xff, 0x00, 0x00, 0x00, 0x01, + }, + mapping: maxRunLengthInMappingGroup + 1, + expected: []byte{ + 0xff, 0xff, 0x00, 0x00, 0x00, 0x01, + 0x00, 0x01, 0x00, 0x00, 0x80, 0x00, + }, + expectedCount: maxRunLengthInMappingGroup + 1, + expectedStartMapping: 1, + isConsecutiveGroup: true, + }, + { + name: "consecutive group, run length maxRunLengthInMappingGroup + 1", + encodedExistingMappings: []byte{ + 0xff, 0xff, 0x00, 0x00, 0x00, 0x01, + 0x00, 0x01, 0x00, 0x00, 0x80, 0x00, + }, + mapping: maxRunLengthInMappingGroup + 2, + expected: []byte{ + 0xff, 0xff, 0x00, 0x00, 0x00, 0x01, + 0x80, 0x02, 0x00, 0x00, 0x80, 0x00, + }, + expectedCount: maxRunLengthInMappingGroup + 2, + expectedStartMapping: 1, + isConsecutiveGroup: true, + }, + } + + for _, tc := range testcases { + t.Run(tc.name, func(t *testing.T) { + + // Encode and append stored key index + b, err := appendStoredKeyIndexToMappings(tc.encodedExistingMappings, tc.mapping) + require.NoError(t, err) + require.Equal(t, tc.expected, b) + + // Get stored key index from mappings + if tc.isConsecutiveGroup { + for i := range tc.expectedCount { + retrievedStoredKeyIndex, err := getStoredKeyIndexFromMappings(b, i) + require.NoError(t, err) + require.Equal(t, tc.expectedStartMapping+i, retrievedStoredKeyIndex) + } + } else { + for i := range tc.expectedCount { + retrievedStoredKeyIndex, err := getStoredKeyIndexFromMappings(b, i) + require.NoError(t, err) + require.Equal(t, tc.expectedMapping, retrievedStoredKeyIndex) + } + } + }) + } + }) +} diff --git a/fvm/environment/account-key-metadata/metadata.go b/fvm/environment/account-key-metadata/metadata.go new file mode 100644 index 00000000000..f657abdf400 --- /dev/null +++ b/fvm/environment/account-key-metadata/metadata.go @@ -0,0 +1,348 @@ +package accountkeymetadata + +import ( + "bytes" + "encoding/binary" + "fmt" + "slices" + + "github.com/onflow/flow-go/fvm/errors" +) + +// GetRevokedStatus returns revoked status for account public key at the key index. +func GetRevokedStatus(b []byte, keyIndex uint32) (bool, error) { + // Key metadata only stores weight and revoked status for keys at index > 0. + + if keyIndex == 0 { + return false, errors.NewKeyMetadataUnexpectedKeyIndexError("failed to query revoked status", 0) + } + + weightAndRevokedStatusBytes, _, err := parseWeightAndRevokedStatusFromKeyMetadataBytes(b) + if err != nil { + return false, err + } + + revoked, _, err := getWeightAndRevokedStatus(weightAndRevokedStatusBytes, keyIndex-1) + return revoked, err +} + +// GetKeyMetadata returns weight, revoked status, and stored key index for given account public key index. +func GetKeyMetadata(b []byte, keyIndex uint32, deduplicated bool) ( + weight uint16, + revoked bool, + storedKeyIndex uint32, + err error, +) { + // Key metadata only stores weight and revoked status for keys at index > 0. + + if keyIndex == 0 { + err = errors.NewKeyMetadataUnexpectedKeyIndexError("failed to query key metadata", 0) + return + } + + // Get raw weight and revoked status bytes + weightAndRevokedStatusBytes, rest, err := parseWeightAndRevokedStatusFromKeyMetadataBytes(b) + if err != nil { + return 0, false, 0, err + } + + // Get weight and revoked status for given account key index + revoked, weight, err = getWeightAndRevokedStatus(weightAndRevokedStatusBytes, keyIndex-1) + if err != nil { + return 0, false, 0, err + } + + // If keys are not deduplicated, storedKeyIndex is the same as the given keyIndex. + if !deduplicated { + return weight, revoked, keyIndex, nil + } + + // Get raw key index mapping bytes + startIndexForMapping, mappingBytes, _, err := parseStoredKeyMappingFromKeyMetadataBytes(rest) + if err != nil { + return 0, false, 0, err + } + + // StoredKeyIndex is the same as the given keyIndex if deduplication happens afterwards. + if keyIndex < startIndexForMapping { + return weight, revoked, keyIndex, nil + } + + // Get stored key index from mapping + storedKeyIndex, err = getStoredKeyIndexFromMappings(mappingBytes, keyIndex-startIndexForMapping) + if err != nil { + return 0, false, 0, err + } + + return weight, revoked, storedKeyIndex, nil +} + +// SetRevokedStatus revokes key and returns encoded key metadata. +// NOTE: b may be modified. +func SetRevokedStatus(b []byte, keyIndex uint32) ([]byte, error) { + // Key metadata only stores weight and revoked status for keys at index > 0. + + if keyIndex == 0 { + return nil, errors.NewKeyMetadataUnexpectedKeyIndexError("failed to set revoked status", 0) + } + + weightAndRevokedStatusBytes, rest, err := parseWeightAndRevokedStatusFromKeyMetadataBytes(b) + if err != nil { + return nil, err + } + + newWeightAndRevokedStatusBytes, err := setRevokedStatus(slices.Clone(weightAndRevokedStatusBytes), keyIndex-1) + if err != nil { + return nil, err + } + + newB := make([]byte, lengthPrefixSize+len(newWeightAndRevokedStatusBytes)+len(rest)) + off := 0 + + binary.BigEndian.PutUint32(newB, uint32(len(newWeightAndRevokedStatusBytes))) + off += 4 + + n := copy(newB[off:], newWeightAndRevokedStatusBytes) + off += n + + copy(newB[off:], rest) + return newB, nil +} + +type KeyMetadataAppender struct { + original []byte + weightAndRevokedStatusBytes []byte // copied + mappingBytes []byte // copied + digestBytes []byte // copied + startIndexForMapping uint32 + startIndexForDigests uint32 + maxStoredDigests uint32 + deduplicated bool +} + +func NewKeyMetadataAppender(key0Digest uint64, maxStoredDigests uint32) *KeyMetadataAppender { + keyMetadata := KeyMetadataAppender{ + maxStoredDigests: maxStoredDigests, + } + keyMetadata.appendDigest(key0Digest) + return &keyMetadata +} + +// NewKeyMetadataAppenderFromBytes returns KeyMetadataAppender used to append new key metadata. +// NOTE: b can be modified. +func NewKeyMetadataAppenderFromBytes(b []byte, deduplicated bool, maxStoredDigests uint32) (*KeyMetadataAppender, error) { + if len(b) == 0 { + return nil, fmt.Errorf("failed to create KeyMetadataAppender with empty data: use NewKeyMetadataAppend() instead") + } + + keyMetadata := KeyMetadataAppender{ + original: b, + deduplicated: deduplicated, + maxStoredDigests: maxStoredDigests, + } + + var err error + + // Get revoked and weight raw bytes. + var weightAndRevokedStatusBytes []byte + weightAndRevokedStatusBytes, b, err = parseWeightAndRevokedStatusFromKeyMetadataBytes(b) + if err != nil { + return nil, err + } + keyMetadata.weightAndRevokedStatusBytes = slices.Clone(weightAndRevokedStatusBytes) + + // Get mapping raw bytes. + if deduplicated { + var mappingBytes []byte + keyMetadata.startIndexForMapping, mappingBytes, b, err = parseStoredKeyMappingFromKeyMetadataBytes(b) + if err != nil { + return nil, err + } + keyMetadata.mappingBytes = slices.Clone(mappingBytes) + } + + // Get digests list + var digestBytes []byte + keyMetadata.startIndexForDigests, digestBytes, b, err = parseDigestsFromKeyMetadataBytes(b) + if err != nil { + return nil, err + } + keyMetadata.digestBytes = slices.Clone(digestBytes) + + if len(b) != 0 { + return nil, + errors.NewKeyMetadataTrailingDataError( + "failed to parse key metadata", + len(b), + ) + } + + return &keyMetadata, nil +} + +// With deduplicated flag, account key metadata is encoded as: +// - length prefixed list of account public key weight and revoked status starting from key index 1 +// - startKeyIndex (4 bytes) + length prefixed list of account public key index mappings to stored key index +// - startStoredKeyIndex (4 bytes) + length prefixed list of last N stored key digests +// +// Without deduplicated flag, account key metadata is encoded as: +// - length prefixed list of account public key weight and revoked status starting from key index 1 +// - startStoredKeyIndex (4 bytes) + length prefixed list of last N stored key digests +func (m *KeyMetadataAppender) ToBytes() ([]byte, bool) { + size := lengthPrefixSize + len(m.weightAndRevokedStatusBytes) + // length prefixed weight and revoked status + storedKeyIndexSize + // start stored key index + lengthPrefixSize + len(m.digestBytes) // length prefixed digests + + if m.deduplicated { + size += storedKeyIndexSize + // start key index + lengthPrefixSize + len(m.mappingBytes) // length prefixed mappings + } + + var b []byte + if cap(m.original) >= size { + b = m.original[:size] + } else { + b = make([]byte, size) + } + + off := 0 + + // Encode length of encoded weight and revoked status + binary.BigEndian.PutUint32(b[off:], uint32(len(m.weightAndRevokedStatusBytes))) + off += 4 + + // Copy encoded weight and revoked status + n := copy(b[off:], m.weightAndRevokedStatusBytes) + off += n + + // Encode mapping if deduplication is on + if m.deduplicated { + // Encode account public key start index for mapping + binary.BigEndian.PutUint32(b[off:], m.startIndexForMapping) + off += 4 + + // Encode length of encoded mapping + binary.BigEndian.PutUint32(b[off:], uint32(len(m.mappingBytes))) + off += 4 + + // Copy encoded mapping + n := copy(b[off:], m.mappingBytes) + off += n + } + + // Encode digests + + // Encoded start index for digests + binary.BigEndian.PutUint32(b[off:], m.startIndexForDigests) + off += 4 + + // Encode length of encoded digests + binary.BigEndian.PutUint32(b[off:], uint32(len(m.digestBytes))) + off += 4 + + // Copy encoded digest + copy(b[off:], m.digestBytes) + + return b, m.deduplicated +} + +func (m *KeyMetadataAppender) appendDigest(digest uint64) { + digestCount := 1 + len(m.digestBytes)/digestSize + + if digestCount > int(m.maxStoredDigests) { + // Remove digest from front + removeCount := digestCount - int(m.maxStoredDigests) + m.digestBytes = slices.Delete(m.digestBytes, 0, removeCount*digestSize) + + // Adjust digest start index + m.startIndexForDigests += uint32(removeCount) + } + + var digestBytes [digestSize]byte + binary.BigEndian.PutUint64(digestBytes[:], digest) + + m.digestBytes = append(m.digestBytes, digestBytes[:]...) +} + +// AppendUniqueKeyMetadata appends unique key metadata. +func (m *KeyMetadataAppender) AppendUniqueKeyMetadata( + revoked bool, + weight uint16, + digest uint64, +) (storedKeyIndex uint32, err error) { + + storedKeyIndex = m.storedKeyCount() + + // Append revoked status and weight + m.weightAndRevokedStatusBytes, err = appendWeightAndRevokedStatus(m.weightAndRevokedStatusBytes, revoked, weight) + if err != nil { + return 0, err + } + + // Append digest + m.appendDigest(digest) + + if m.deduplicated { + // Append next stored key index to mapping + m.mappingBytes, err = appendStoredKeyIndexToMappings(m.mappingBytes, storedKeyIndex) + if err != nil { + return 0, err + } + } + + return storedKeyIndex, nil +} + +// AppendDuplicateKeyMetadata appends duplicate key metadata. +func (m *KeyMetadataAppender) AppendDuplicateKeyMetadata( + keyIndex uint32, + duplicateStoredKeyIndex uint32, + revoked bool, + weight uint16, +) (err error) { + + // Append revoked status and weight + m.weightAndRevokedStatusBytes, err = appendWeightAndRevokedStatus(m.weightAndRevokedStatusBytes, revoked, weight) + if err != nil { + return err + } + + if !m.deduplicated { + // Set deduplication flag + m.deduplicated = true + + // Save mapping start key index. + m.startIndexForMapping = keyIndex + } + + // Append duplicate stored key index to mapping + m.mappingBytes, err = appendStoredKeyIndexToMappings(m.mappingBytes, duplicateStoredKeyIndex) + if err != nil { + return err + } + + return nil +} + +func (m *KeyMetadataAppender) storedKeyCount() uint32 { + return m.startIndexForDigests + uint32(len(m.digestBytes)/digestSize) +} + +// findDuplicateDigest returns true and stored key index with duplicate digest +// if the given digest has a match in stored digests in key metadata section. +func (m *KeyMetadataAppender) findDuplicateDigest(digest uint64) (found bool, duplicateStoredKeyIndex uint32) { + if len(m.digestBytes) == 0 { + return false, 0 + } + + var digestBytes [digestSize]byte + binary.BigEndian.PutUint64(digestBytes[:], digest) + + for off, i := 0, uint32(0); off < len(m.digestBytes); off, i = off+digestSize, i+1 { + if bytes.Equal(digestBytes[:], m.digestBytes[off:off+digestSize]) { + return true, m.startIndexForDigests + i + } + } + + return false, 0 +} diff --git a/fvm/environment/account-key-metadata/metadata_test.go b/fvm/environment/account-key-metadata/metadata_test.go new file mode 100644 index 00000000000..dd57802a200 --- /dev/null +++ b/fvm/environment/account-key-metadata/metadata_test.go @@ -0,0 +1,643 @@ +package accountkeymetadata + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +const maxStoredDigests = 2 + +func TestGetRevokedStatusFromKeyMetadataBytes(t *testing.T) { + testcases := []struct { + name string + data []byte + expectedRevokedStatus map[uint32]bool + }{ + { + name: "1 public key (1 revoked group)", + data: []byte{ + 0, 0, 0, 4, // length prefix for weight and revoked list + 0, 1, 3, 0xe8, // weight and revoked group + 0, 0, 0, 0, // start index for digests + 0, 0, 0, 0x10, // length prefix for digests + 0, 0, 0, 0, 0, 0, 0, 1, // digest 1 + 0, 0, 0, 0, 0, 0, 0, 2, // digest 2 + }, + expectedRevokedStatus: map[uint32]bool{ + 1: false, + }, + }, + { + name: "2 public keys (1 revoked group)", + data: []byte{ + 0, 0, 0, 4, // length prefix for weight and revoked list + 0, 2, 3, 0xe8, // weight and revoked group 1 + 0, 0, 0, 1, // start index for digests + 0, 0, 0, 0x10, // length prefix for digests + 0, 0, 0, 0, 0, 0, 0, 2, // digest 2 + 0, 0, 0, 0, 0, 0, 0, 3, // digest 3 + }, + expectedRevokedStatus: map[uint32]bool{ + 1: false, + 2: false, + }, + }, + { + name: "2 public keys (2 revoked groups)", + data: []byte{ + 0, 0, 0, 8, // length prefix for weight and revoked list + 0, 1, 3, 0xe8, // weight and revoked group 1 + 0, 1, 0x80, 0x01, // weight and revoked group 2 + 0, 0, 0, 1, // start index for digests + 0, 0, 0, 0x10, // length prefix for digests + 0, 0, 0, 0, 0, 0, 0, 2, // digest 2 + 0, 0, 0, 0, 0, 0, 0, 3, // digest 3 + }, + expectedRevokedStatus: map[uint32]bool{ + 1: false, + 2: true, + }, + }, + } + + for _, tc := range testcases { + t.Run(tc.name, func(t *testing.T) { + for keyIndex, expected := range tc.expectedRevokedStatus { + revoked, err := GetRevokedStatus(tc.data, keyIndex) + require.NoError(t, err) + require.Equal(t, expected, revoked) + } + }) + } +} + +func TestSetRevokedStatusInKeyMetadata(t *testing.T) { + testcases := []struct { + name string + data []byte + keyIndexToRevoke uint32 + expected []byte + }{ + { + name: "revoke key in run length 1 group", + data: []byte{ + 0, 0, 0, 4, // length prefix for weight and revoked list + 0, 1, 3, 0xe8, // weight and revoked group + 0, 0, 0, 0, // start index for digests + 0, 0, 0, 0x10, // length prefix for digests + 0, 0, 0, 0, 0, 0, 0, 1, // digest 1 + 0, 0, 0, 0, 0, 0, 0, 2, // digest 2 + }, + keyIndexToRevoke: 1, + expected: []byte{ + 0, 0, 0, 4, // length prefix for weight and revoked list + 0, 1, 0x83, 0xe8, // weight and revoked group + 0, 0, 0, 0, // start index for digests + 0, 0, 0, 0x10, // length prefix for digests + 0, 0, 0, 0, 0, 0, 0, 1, // digest 1 + 0, 0, 0, 0, 0, 0, 0, 2, // digest 2 + }, + }, + { + name: "revoke first key in run length 2 group", + data: []byte{ + 0, 0, 0, 4, // length prefix for weight and revoked list + 0, 2, 3, 0xe8, // weight and revoked group 1 + 0, 0, 0, 1, // start index for digests + 0, 0, 0, 0x10, // length prefix for digests + 0, 0, 0, 0, 0, 0, 0, 2, // digest 2 + 0, 0, 0, 0, 0, 0, 0, 3, // digest 3 + }, + keyIndexToRevoke: 1, + expected: []byte{ + 0, 0, 0, 8, // length prefix for weight and revoked list + 0, 1, 0x83, 0xe8, // weight and revoked group 1 + 0, 1, 3, 0xe8, // weight and revoked group 1 + 0, 0, 0, 1, // start index for digests + 0, 0, 0, 0x10, // length prefix for digests + 0, 0, 0, 0, 0, 0, 0, 2, // digest 2 + 0, 0, 0, 0, 0, 0, 0, 3, // digest 3 + }, + }, + { + name: "revoke second key in run length 2 group", + data: []byte{ + 0, 0, 0, 4, // length prefix for weight and revoked list + 0, 2, 3, 0xe8, // weight and revoked group 1 + 0, 0, 0, 1, // start index for digests + 0, 0, 0, 0x10, // length prefix for digests + 0, 0, 0, 0, 0, 0, 0, 2, // digest 2 + 0, 0, 0, 0, 0, 0, 0, 3, // digest 3 + }, + keyIndexToRevoke: 2, + expected: []byte{ + 0, 0, 0, 8, // length prefix for weight and revoked list + 0, 1, 3, 0xe8, // weight and revoked group 1 + 0, 1, 0x83, 0xe8, // weight and revoked group 2 + 0, 0, 0, 1, // start index for digests + 0, 0, 0, 0x10, // length prefix for digests + 0, 0, 0, 0, 0, 0, 0, 2, // digest 2 + 0, 0, 0, 0, 0, 0, 0, 3, // digest 3 + }, + }, + { + name: "revoke key in run length 1 group (cannot merge with next group)", + data: []byte{ + 0, 0, 0, 8, // length prefix for weight and revoked list + 0, 1, 3, 0xe8, // weight and revoked group 1 + 0, 1, 0x80, 0x01, // weight and revoked group 2 + 0, 0, 0, 1, // start index for digests + 0, 0, 0, 0x10, // length prefix for digests + 0, 0, 0, 0, 0, 0, 0, 2, // digest 2 + 0, 0, 0, 0, 0, 0, 0, 3, // digest 3 + }, + keyIndexToRevoke: 1, + expected: []byte{ + 0, 0, 0, 8, // length prefix for weight and revoked list + 0, 1, 0x83, 0xe8, // weight and revoked group 1 + 0, 1, 0x80, 0x01, // weight and revoked group 2 + 0, 0, 0, 1, // start index for digests + 0, 0, 0, 0x10, // length prefix for digests + 0, 0, 0, 0, 0, 0, 0, 2, // digest 2 + 0, 0, 0, 0, 0, 0, 0, 3, // digest 3 + }, + }, + { + name: "no-op revoke", + data: []byte{ + 0, 0, 0, 8, // length prefix for weight and revoked list + 0, 1, 3, 0xe8, // weight and revoked group 1 + 0, 1, 0x80, 0x01, // weight and revoked group 2 + 0, 0, 0, 1, // start index for digests + 0, 0, 0, 0x10, // length prefix for digests + 0, 0, 0, 0, 0, 0, 0, 2, // digest 2 + 0, 0, 0, 0, 0, 0, 0, 3, // digest 3 + }, + keyIndexToRevoke: 2, + expected: []byte{ + 0, 0, 0, 8, // length prefix for weight and revoked list + 0, 1, 3, 0xe8, // weight and revoked group 1 + 0, 1, 0x80, 0x01, // weight and revoked group 2 + 0, 0, 0, 1, // start index for digests + 0, 0, 0, 0x10, // length prefix for digests + 0, 0, 0, 0, 0, 0, 0, 2, // digest 2 + 0, 0, 0, 0, 0, 0, 0, 3, // digest 3 + }, + }, + { + name: "revoke first key in run length 3 group (no previous group)", + data: []byte{ + 0, 0, 0, 4, // length prefix for weight and revoked list + 0, 3, 3, 0xe8, // weight and revoked group 1 + 0, 0, 0, 1, // start index for digests + 0, 0, 0, 0x10, // length prefix for digests + 0, 0, 0, 0, 0, 0, 0, 2, // digest 2 + 0, 0, 0, 0, 0, 0, 0, 3, // digest 3 + }, + keyIndexToRevoke: 1, + expected: []byte{ + 0, 0, 0, 8, // length prefix for weight and revoked list + 0, 1, 0x83, 0xe8, // weight and revoked group 1 + 0, 2, 3, 0xe8, // weight and revoked group 1 + 0, 0, 0, 1, // start index for digests + 0, 0, 0, 0x10, // length prefix for digests + 0, 0, 0, 0, 0, 0, 0, 2, // digest 2 + 0, 0, 0, 0, 0, 0, 0, 3, // digest 3 + }, + }, + { + name: "revoke second key in run length 3 group", + data: []byte{ + 0, 0, 0, 4, // length prefix for weight and revoked list + 0, 3, 3, 0xe8, // weight and revoked group 1 + 0, 0, 0, 1, // start index for digests + 0, 0, 0, 0x10, // length prefix for digests + 0, 0, 0, 0, 0, 0, 0, 2, // digest 2 + 0, 0, 0, 0, 0, 0, 0, 3, // digest 3 + }, + keyIndexToRevoke: 2, + expected: []byte{ + 0, 0, 0, 0x0c, // length prefix for weight and revoked list + 0, 1, 3, 0xe8, // weight and revoked group 1 + 0, 1, 0x83, 0xe8, // weight and revoked group 1 + 0, 1, 3, 0xe8, // weight and revoked group 1 + 0, 0, 0, 1, // start index for digests + 0, 0, 0, 0x10, // length prefix for digests + 0, 0, 0, 0, 0, 0, 0, 2, // digest 2 + 0, 0, 0, 0, 0, 0, 0, 3, // digest 3 + }, + }, + { + name: "revoke last key in run length 3 group", + data: []byte{ + 0, 0, 0, 4, // length prefix for weight and revoked list + 0, 3, 3, 0xe8, // weight and revoked group 1 + 0, 0, 0, 1, // start index for digests + 0, 0, 0, 0x10, // length prefix for digests + 0, 0, 0, 0, 0, 0, 0, 2, // digest 2 + 0, 0, 0, 0, 0, 0, 0, 3, // digest 3 + }, + keyIndexToRevoke: 3, + expected: []byte{ + 0, 0, 0, 0x8, // length prefix for weight and revoked list + 0, 2, 3, 0xe8, // weight and revoked group 1 + 0, 1, 0x83, 0xe8, // weight and revoked group 1 + 0, 0, 0, 1, // start index for digests + 0, 0, 0, 0x10, // length prefix for digests + 0, 0, 0, 0, 0, 0, 0, 2, // digest 2 + 0, 0, 0, 0, 0, 0, 0, 3, // digest 3 + }, + }, + } + + for _, tc := range testcases { + t.Run(tc.name, func(t *testing.T) { + newData, err := SetRevokedStatus(tc.data, tc.keyIndexToRevoke) + require.NoError(t, err) + require.Equal(t, tc.expected, newData) + }) + } +} + +func TestGetKeyMetadata(t *testing.T) { + type keyMetadata struct { + weight uint16 + revoked bool + storedKeyIndex uint32 + } + + testcases := []struct { + name string + deduplicated bool + data []byte + expectedKeyMetadata map[uint32]keyMetadata + }{ + { + name: "not deduplicated, 2 account public keys", + deduplicated: false, + data: []byte{ + 0, 0, 0, 4, // length prefix for weight and revoked list + 0, 1, 3, 0xe8, // weight and revoked group + 0, 0, 0, 0, // start index for digests + 0, 0, 0, 0x10, // length prefix for digests + 0, 0, 0, 0, 0, 0, 0, 1, // digest 1 + 0, 0, 0, 0, 0, 0, 0, 2, // digest 2 + }, + expectedKeyMetadata: map[uint32]keyMetadata{ + 1: { + weight: 1000, + revoked: false, + storedKeyIndex: 1, + }, + }, + }, + { + name: "not deduplicated, 3 account public keys", + deduplicated: false, + data: []byte{ + 0, 0, 0, 8, // length prefix for weight and revoked list + 0, 1, 3, 0xe8, // weight and revoked group + 0, 1, 0x80, 0x01, // weight and revoked group + 0, 0, 0, 1, // start index for digests + 0, 0, 0, 0x10, // length prefix for digests + 0, 0, 0, 0, 0, 0, 0, 2, // digest 2 + 0, 0, 0, 0, 0, 0, 0, 3, // digest 3 + }, + expectedKeyMetadata: map[uint32]keyMetadata{ + 1: { + weight: 1000, + revoked: false, + storedKeyIndex: 1, + }, + 2: { + weight: 1, + revoked: true, + storedKeyIndex: 2, + }, + }, + }, + { + name: "deduplicated, 2 account public keys, 1 stored key, deduplication from key at index 1", + deduplicated: true, + data: []byte{ + 0, 0, 0, 4, // length prefix for weight and revoked list + 0, 1, 3, 0xe8, // weight and revoked group + 0, 0, 0, 1, // start index for mapping + 0, 0, 0, 6, // length prefix for mapping + 0, 1, 0, 0, 0, 0, // mapping group 1 + 0, 0, 0, 0, // start index for digests + 0, 0, 0, 8, // length prefix for digests + 0, 0, 0, 0, 0, 0, 0, 1, // digest 1 + }, + expectedKeyMetadata: map[uint32]keyMetadata{ + 1: { + weight: 1000, + revoked: false, + storedKeyIndex: 0, + }, + }, + }, + { + name: "deduplicated, 3 account public keys, 2 stored keys, deduplication from key at index 1", + deduplicated: true, + data: []byte{ + 0, 0, 0, 4, // length prefix for weight and revoked list + 0, 2, 3, 0xe8, // weight and revoked group + 0, 0, 0, 1, // start index for mapping + 0, 0, 0, 6, // length prefix for mapping + 0x80, 2, 0, 0, 0, 0, // mapping group 1 + 0, 0, 0, 0, // start index for digests + 0, 0, 0, 0x10, // length prefix for digests + 0, 0, 0, 0, 0, 0, 0, 1, // digest 1 + 0, 0, 0, 0, 0, 0, 0, 2, // digest 2 + }, + expectedKeyMetadata: map[uint32]keyMetadata{ + 1: { + weight: 1000, + revoked: false, + storedKeyIndex: 0, + }, + 2: { + weight: 1000, + revoked: false, + storedKeyIndex: 1, + }, + }, + }, + { + name: "deduplicated, 4 account public keys, 2 stored keys, deduplication from key at index 2", + deduplicated: true, + data: []byte{ + 0, 0, 0, 8, // length prefix for weight and revoked list + 0, 2, 3, 0xe8, // weight and revoked group + 0, 1, 0x80, 0x01, // weight and revoked group + 0, 0, 0, 2, // start index for mapping + 0, 0, 0, 0x06, // length prefix for mapping + 0, 2, 0, 0, 0, 0, // mapping group 1 + 0, 0, 0, 2, // start index for digests + 0, 0, 0, 0x10, // length prefix for digests + 0, 0, 0, 0, 0, 0, 0, 3, // digest 3 + 0, 0, 0, 0, 0, 0, 0, 4, // digest 4 + }, + expectedKeyMetadata: map[uint32]keyMetadata{ + 1: { + weight: 1000, + revoked: false, + storedKeyIndex: 1, + }, + 2: { + weight: 1000, + revoked: false, + storedKeyIndex: 0, + }, + 3: { + weight: 1, + revoked: true, + storedKeyIndex: 0, + }, + }, + }, + } + + for _, tc := range testcases { + t.Run(tc.name, func(t *testing.T) { + for keyIndex, expected := range tc.expectedKeyMetadata { + weight, revoked, storedKeyIndex, err := GetKeyMetadata(tc.data, keyIndex, tc.deduplicated) + require.NoError(t, err) + require.Equal(t, expected.weight, weight) + require.Equal(t, expected.revoked, revoked) + require.Equal(t, expected.storedKeyIndex, storedKeyIndex) + } + }) + } +} + +func TestAppendUniqueKeyMetadata(t *testing.T) { + testcases := []struct { + name string + deduplicated bool + data []byte + key0Digest uint64 + revoked bool + weight uint16 + digest uint64 + expectedData []byte + expectedStoredKeyIndex uint32 + expectedDeduplicated bool + }{ + { + name: "not deduplicated, append new key_1 to {key_0}", + deduplicated: false, + data: []byte{}, + key0Digest: 1, + revoked: false, + weight: 1000, + digest: 2, + expectedData: []byte{ + 0, 0, 0, 4, // length prefix for weight and revoked list + 0, 1, 3, 0xe8, // weight and revoked group + 0, 0, 0, 0, // start index for digests + 0, 0, 0, 0x10, // length prefix for digests + 0, 0, 0, 0, 0, 0, 0, 1, // key_0 digest + 0, 0, 0, 0, 0, 0, 0, 2, // key_1 digest + }, + expectedStoredKeyIndex: 1, + expectedDeduplicated: false, + }, + { + name: "not deduplicated, append new key_2 to {key_0, key_1}", + deduplicated: false, + data: []byte{ + 0, 0, 0, 4, // length prefix for weight and revoked list + 0, 1, 3, 0xe8, // weight and revoked group + 0, 0, 0, 0, // start index for digests + 0, 0, 0, 0x10, // length prefix for digests + 0, 0, 0, 0, 0, 0, 0, 1, // key_0 digest + 0, 0, 0, 0, 0, 0, 0, 2, // key_1 digest + }, + revoked: false, + weight: 1, + digest: 3, + expectedData: []byte{ + 0, 0, 0, 8, // length prefix for weight and revoked list + 0, 1, 3, 0xe8, // weight and revoked group 1 + 0, 1, 0, 1, // weight and revoked group 2 + 0, 0, 0, 1, // start index for digests + 0, 0, 0, 0x10, // length prefix for digests + 0, 0, 0, 0, 0, 0, 0, 2, // key_1 digest + 0, 0, 0, 0, 0, 0, 0, 3, // key_2 digest + }, + expectedStoredKeyIndex: 2, + expectedDeduplicated: false, + }, + { + name: "deduplicated, append new key_1 to {key_0, key_0}", + deduplicated: true, + data: []byte{ + 0, 0, 0, 4, // length prefix for weight and revoked list + 0, 1, 3, 0xe8, // weight and revoked group + 0, 0, 0, 1, // start index for mapping + 0, 0, 0, 6, // length prefix for mapping + 0, 1, 0, 0, 0, 0, // mapping group 1 + 0, 0, 0, 0, // start index for digests + 0, 0, 0, 8, // length prefix for digests + 0, 0, 0, 0, 0, 0, 0, 1, // key_0 digest + }, + revoked: false, + weight: 1000, + digest: 3, + expectedData: []byte{ + 0, 0, 0, 4, // length prefix for weight and revoked list + 0, 2, 3, 0xe8, // weight and revoked group + 0, 0, 0, 1, // start index for mapping + 0, 0, 0, 6, // length prefix for mapping + 0x80, 2, 0, 0, 0, 0, // mapping group + 0, 0, 0, 0, // start index for digests + 0, 0, 0, 0x10, // length prefix for digests + 0, 0, 0, 0, 0, 0, 0, 1, // key_0 digest + 0, 0, 0, 0, 0, 0, 0, 3, // key_1 digest + }, + expectedStoredKeyIndex: 1, + expectedDeduplicated: true, + }, + } + + for _, tc := range testcases { + t.Run(tc.name, func(t *testing.T) { + var km *KeyMetadataAppender + var err error + + if len(tc.data) == 0 { + km = NewKeyMetadataAppender(tc.key0Digest, maxStoredDigests) + } else { + km, err = NewKeyMetadataAppenderFromBytes(tc.data, tc.deduplicated, maxStoredDigests) + require.NoError(t, err) + } + + storedKeyIndex, err := km.AppendUniqueKeyMetadata(tc.revoked, tc.weight, tc.digest) + require.NoError(t, err) + require.Equal(t, tc.expectedStoredKeyIndex, storedKeyIndex) + + newData, deduplicated := km.ToBytes() + require.Equal(t, tc.expectedData, newData) + require.Equal(t, tc.expectedDeduplicated, deduplicated) + }) + } +} + +func TestAppendDuplicateKeyMetadata(t *testing.T) { + testcases := []struct { + name string + deduplicated bool + data []byte + key0Digest uint64 + revoked bool + weight uint16 + keyIndex uint32 + duplicateStoredKeyIndex uint32 + expectedData []byte + expectedDeduplicated bool + }{ + { + name: "not deduplicated, append key_0 to {key_0}", + deduplicated: false, + data: []byte{}, + key0Digest: 1, + revoked: false, + weight: 1000, + keyIndex: 1, + duplicateStoredKeyIndex: 0, + expectedData: []byte{ + 0, 0, 0, 4, // length prefix for weight and revoked list + 0, 1, 3, 0xe8, // weight and revoked group + 0, 0, 0, 1, // start index for mapping + 0, 0, 0, 6, // length prefix for mapping + 0, 1, 0, 0, 0, 0, // mapping group + 0, 0, 0, 0, // start index for digests + 0, 0, 0, 8, // length prefix for digests + 0, 0, 0, 0, 0, 0, 0, 1, // key_0 digest + }, + expectedDeduplicated: true, + }, + { + name: "not deduplicated, append key_0 to {key_0, key_1}", + deduplicated: false, + data: []byte{ + 0, 0, 0, 4, // length prefix for weight and revoked list + 0, 1, 3, 0xe8, // weight and revoked group + 0, 0, 0, 0, // start index for digests + 0, 0, 0, 0x10, // length prefix for digests + 0, 0, 0, 0, 0, 0, 0, 1, // key_0 digest + 0, 0, 0, 0, 0, 0, 0, 2, // key_1 digest + }, + revoked: false, + weight: 1, + keyIndex: 2, + duplicateStoredKeyIndex: 0, + expectedData: []byte{ + 0, 0, 0, 8, // length prefix for weight and revoked list + 0, 1, 3, 0xe8, // weight and revoked group 1 + 0, 1, 0, 1, // weight and revoked group 2 + 0, 0, 0, 2, // start index for mapping + 0, 0, 0, 6, // length prefix for mapping + 0, 1, 0, 0, 0, 0, // mapping group + 0, 0, 0, 0, // start index for digests + 0, 0, 0, 0x10, // length prefix for digests + 0, 0, 0, 0, 0, 0, 0, 1, // key_0 digest + 0, 0, 0, 0, 0, 0, 0, 2, // key_1 digest + }, + expectedDeduplicated: true, + }, + { + name: "deduplicated, append key_0 to {key_0, key_0}", + deduplicated: true, + data: []byte{ + 0, 0, 0, 4, // length prefix for weight and revoked list + 0, 1, 3, 0xe8, // weight and revoked group + 0, 0, 0, 1, // start index for mapping + 0, 0, 0, 6, // length prefix for mapping + 0, 1, 0, 0, 0, 0, // mapping group 1 + 0, 0, 0, 0, // start index for digests + 0, 0, 0, 8, // length prefix for digests + 0, 0, 0, 0, 0, 0, 0, 1, // key_0 digest + }, + revoked: false, + weight: 1000, + keyIndex: 2, + duplicateStoredKeyIndex: 0, + expectedData: []byte{ + 0, 0, 0, 4, // length prefix for weight and revoked list + 0, 2, 3, 0xe8, // weight and revoked group + 0, 0, 0, 1, // start index for mapping + 0, 0, 0, 6, // length prefix for mapping + 0, 2, 0, 0, 0, 0, // mapping group 1 + 0, 0, 0, 0, // start index for digests + 0, 0, 0, 8, // length prefix for digests + 0, 0, 0, 0, 0, 0, 0, 1, // key_0 digest + }, + expectedDeduplicated: true, + }, + } + + for _, tc := range testcases { + t.Run(tc.name, func(t *testing.T) { + var km *KeyMetadataAppender + var err error + + if len(tc.data) == 0 { + km = NewKeyMetadataAppender(tc.key0Digest, maxStoredDigests) + } else { + km, err = NewKeyMetadataAppenderFromBytes(tc.data, tc.deduplicated, maxStoredDigests) + require.NoError(t, err) + } + + err = km.AppendDuplicateKeyMetadata(tc.keyIndex, tc.duplicateStoredKeyIndex, tc.revoked, tc.weight) + require.NoError(t, err) + + newData, deduplicated := km.ToBytes() + require.Equal(t, tc.expectedData, newData) + require.Equal(t, tc.expectedDeduplicated, deduplicated) + }) + } +} diff --git a/fvm/environment/account-key-metadata/weight_and_revoked_group.go b/fvm/environment/account-key-metadata/weight_and_revoked_group.go new file mode 100644 index 00000000000..707746c1f55 --- /dev/null +++ b/fvm/environment/account-key-metadata/weight_and_revoked_group.go @@ -0,0 +1,321 @@ +package accountkeymetadata + +import ( + "encoding/binary" + "math" + "slices" + + "github.com/onflow/flow-go/fvm/errors" +) + +// Account public key weight and revoked status is encoded using RLE: +// - run length (2 bytes) +// - value (2 bytes): revoked status is the high bit and weight is the remaining 15 bits. +// NOTE: if number of elements in a run-length group exceeds maxRunLengthInWeightAndRevokedStatusGroup, +// a new group is created with remaining run-length and the same weight and revoked status. + +const ( + maxRunLengthInWeightAndRevokedStatusGroup = math.MaxUint16 + + weightAndRevokedStatusSize = 2 + weightAndRevokedStatusIndex = runLengthSize + weightAndRevokedStatusGroupSize = runLengthSize + weightAndRevokedStatusSize + + revokedMask = 0x8000 + weightMask = 0x7fff +) + +// getWeightAndRevokedStatus returns weight and revoked status of the given keyIndex from encoded data. +// Received b is expected to only contain encoded weight and revoked status. +func getWeightAndRevokedStatus(b []byte, keyIndex uint32) (bool, uint16, error) { + if len(b)%weightAndRevokedStatusGroupSize != 0 { + return false, 0, + errors.NewKeyMetadataUnexpectedLengthError( + "failed to get weight and revoked status", + weightAndRevokedStatusGroupSize, + len(b), + ) + } + + remainingKeyIndex := keyIndex + + for off := 0; off < len(b); off += weightAndRevokedStatusGroupSize { + runLength := parseRunLength(b, off) + + if remainingKeyIndex < uint32(runLength) { + revoked, weight := parseWeightAndRevokedStatus(b, off+runLengthSize) + return revoked, weight, nil + } + + remainingKeyIndex -= uint32(runLength) + } + + return false, 0, errors.NewKeyMetadataNotFoundError("failed to query weight and revoked status", keyIndex) +} + +// appendWeightAndRevokedStatus appends given weight and revoked status to the given data. +// New weight and revoked status can be appended in a new run-length group, or included in +// last group by incrementing last group's run-length. +// NOTE: this function can modify the given data. +func appendWeightAndRevokedStatus(b []byte, revoked bool, weight uint16) ([]byte, error) { + if len(b) == 0 { + return encodeWeightAndRevokedStatusGroup(1, revoked, weight), nil + } + + if len(b)%weightAndRevokedStatusGroupSize != 0 { + return nil, + errors.NewKeyMetadataUnexpectedLengthError( + "failed to append weight and revoked status", + weightAndRevokedStatusGroupSize, + len(b), + ) + } + + // Merge to last groupo + lastGroupOff := len(b) - weightAndRevokedStatusGroupSize + lastGroup := parseWeightAndRevokedStatusGroup(b, lastGroupOff) + + if lastGroup.tryMerge(revoked, weight) { + b = append(b[:lastGroupOff], lastGroup.encode()...) + return b, nil + } + + b = append(b, encodeWeightAndRevokedStatusGroup(1, revoked, weight)...) + return b, nil +} + +// setRevokedStatus sets revoked status for the given index in the given data. +// NOTE: this function can modify the given data. +func setRevokedStatus(b []byte, index uint32) ([]byte, error) { + if len(b)%weightAndRevokedStatusGroupSize != 0 { + return nil, + errors.NewKeyMetadataUnexpectedLengthError( + "failed to set revoked status", + weightAndRevokedStatusGroupSize, + len(b), + ) + } + + foundGroup := false + curOff := 0 + for ; curOff < len(b); curOff += weightAndRevokedStatusGroupSize { + runLength := parseRunLength(b, curOff) + + // When this loop exits, index is guaranteed to be less than math.MaxUint16 (65535) + // because runlength is uint16. + if index < uint32(runLength) { + foundGroup = true + break + } + + index -= uint32(runLength) + } + + if !foundGroup { + return nil, errors.NewKeyMetadataNotFoundError("failed to set revoked status", index) + } + + curGroup := parseWeightAndRevokedStatusGroup(b, curOff) + + if curGroup.revoked { + // Revoked status is already true + return b, nil + } + + isFirstElementInGroup := index == 0 + isLastElementInGroup := index+1 == uint32(curGroup.runLength) + + // Set revoked status by splitting current group into multiple groups + newGroups := curGroup.setRevoke(uint16(index)) + + // Return early if there is only one group. + if len(b) == weightAndRevokedStatusGroupSize { + encodedNewGroups := encodeWeightAndRevokedStatusGroups(newGroups) + return encodedNewGroups, nil + } + + startOff := curOff + endOff := curOff + weightAndRevokedStatusGroupSize + + // Try to merge with previous group + if isFirstElementInGroup && curOff > 0 { + prevGroupOff := curOff - weightAndRevokedStatusGroupSize + prevGroup := parseWeightAndRevokedStatusGroup(b, prevGroupOff) + + firstNewGroup := newGroups[0] + if merged, modifiedFirstNewGroup := prevGroup.tryMergeGroup(firstNewGroup); merged { + startOff = prevGroupOff + + if modifiedFirstNewGroup == nil { + // First new group is completed merged with previous group, + // so replace first new group with previous group to be re-encoded. + newGroups[0] = prevGroup + } else { + // First new group is partially merged with previous group, + // so include both previous group and modified first new group to be re-encoded. + newGroups[0] = modifiedFirstNewGroup + newGroups = slices.Insert(newGroups, 0, prevGroup) + } + } + } + + // Try to merge with next group + if isLastElementInGroup { + nextGroupOff := curOff + weightAndRevokedStatusGroupSize + if nextGroupOff < len(b) { + nextGroup := parseWeightAndRevokedStatusGroup(b, nextGroupOff) + + lastNewGroup := newGroups[len(newGroups)-1] + if merged, modifiedNextGroup := lastNewGroup.tryMergeGroup(nextGroup); merged { + endOff = nextGroupOff + weightAndRevokedStatusGroupSize + + if modifiedNextGroup != nil { + // Next group is partially merged with new groups, + // so include modified next group in new groups to be re-encoded. + newGroups = append(newGroups, modifiedNextGroup) + } + } + } + } + + encodedNewGroups := encodeWeightAndRevokedStatusGroups(newGroups) + + if startOff == 0 && endOff == len(b) { + return encodedNewGroups, nil + } + + newSize := len(b) - (endOff - startOff) + len(encodedNewGroups) + newBuffer := make([]byte, 0, newSize) + newBuffer = append(newBuffer, b[:startOff]...) + newBuffer = append(newBuffer, encodedNewGroups...) + newBuffer = append(newBuffer, b[endOff:]...) + + return newBuffer, nil +} + +// Utils + +type weightAndRevokedStatusGroup struct { + runLength uint16 + weight uint16 + revoked bool +} + +func newWeightAndRevokedStatusGroup(runLength uint16, weight uint16, revoked bool) *weightAndRevokedStatusGroup { + return &weightAndRevokedStatusGroup{ + runLength: runLength, + weight: weight, + revoked: revoked, + } +} + +func (g *weightAndRevokedStatusGroup) tryMerge(revoked bool, weight uint16) bool { + if g.revoked != revoked || g.weight != weight || g.runLength == maxRunLengthInWeightAndRevokedStatusGroup { + return false + } + g.runLength++ + return true +} + +// tryMergeGroup returns true and nil if next group is completely merged into g. +// It returns true and modified next group if next group is partially merged into g. +// It return false and unmodified next group if merge failed. +// NOTE: next can be modified. +func (g *weightAndRevokedStatusGroup) tryMergeGroup(next *weightAndRevokedStatusGroup) (bool, *weightAndRevokedStatusGroup) { + // Current group reached run length limit + if g.runLength == maxRunLengthInWeightAndRevokedStatusGroup { + return false, next + } + + // Groups have different values. + if g.revoked != next.revoked || g.weight != next.weight { + return false, next + } + + totalRunLength := uint32(g.runLength) + uint32(next.runLength) + + // Merge second group into first group + if totalRunLength <= maxRunLengthInWeightAndRevokedStatusGroup { + g.runLength += next.runLength + return true, nil + } + + // Rebalance groups + g.runLength = maxRunLengthInWeightAndRevokedStatusGroup + next.runLength = uint16(totalRunLength - maxRunLengthInWeightAndRevokedStatusGroup) + return true, next +} + +func (g *weightAndRevokedStatusGroup) encode() []byte { + return encodeWeightAndRevokedStatusGroup(g.runLength, g.revoked, g.weight) +} + +func (g *weightAndRevokedStatusGroup) setRevoke(index uint16) []*weightAndRevokedStatusGroup { + if g.runLength == 1 { + return []*weightAndRevokedStatusGroup{newWeightAndRevokedStatusGroup(1, g.weight, true)} + } + + groups := make([]*weightAndRevokedStatusGroup, 0, 3) + + // Create group before revoked index + if index > 0 { + groups = append(groups, newWeightAndRevokedStatusGroup(index, g.weight, g.revoked)) + } + + // Create group for the revoked index + groups = append(groups, newWeightAndRevokedStatusGroup(1, g.weight, true)) + + // Create group after revoked index + if index+1 < g.runLength { + groups = append(groups, newWeightAndRevokedStatusGroup(g.runLength-index-1, g.weight, g.revoked)) + } + + return groups +} + +func encodeWeightAndRevokedStatusGroups(weightAndRevoked []*weightAndRevokedStatusGroup) []byte { + buf := make([]byte, 0, len(weightAndRevoked)*(weightAndRevokedStatusGroupSize)) + + for _, g := range weightAndRevoked { + b := g.encode() + buf = append(buf, b...) + } + + return buf +} + +func encodeWeightAndRevokedStatusGroup(runLength uint16, revoked bool, weight uint16) []byte { + var b [weightAndRevokedStatusGroupSize]byte + + // Set runlength + binary.BigEndian.PutUint16(b[:], runLength) + + // Set value + value := weight + if revoked { + value |= revokedMask + } + binary.BigEndian.PutUint16(b[weightAndRevokedStatusIndex:], value) + + return b[:] +} + +func parseWeightAndRevokedStatusGroup(b []byte, off int) *weightAndRevokedStatusGroup { + _ = b[off+weightAndRevokedStatusGroupSize-1] // bounds check + runLength := parseRunLength(b, off) + revoked, weight := parseWeightAndRevokedStatus(b, off+runLengthSize) + return newWeightAndRevokedStatusGroup(runLength, weight, revoked) +} + +func parseRunLength(b []byte, off int) uint16 { + _ = b[off+1] // bounds check + return binary.BigEndian.Uint16(b[off:]) +} + +func parseWeightAndRevokedStatus(b []byte, off int) (revoked bool, weight uint16) { + _ = b[off+1] // bounds check + weightAndRevoked := binary.BigEndian.Uint16(b[off:]) + weight = weightAndRevoked & weightMask + revoked = (weightAndRevoked & revokedMask) > 0 + return revoked, weight +} diff --git a/fvm/environment/account-key-metadata/weight_and_revoked_group_test.go b/fvm/environment/account-key-metadata/weight_and_revoked_group_test.go new file mode 100644 index 00000000000..74d07c0ac27 --- /dev/null +++ b/fvm/environment/account-key-metadata/weight_and_revoked_group_test.go @@ -0,0 +1,669 @@ +package accountkeymetadata + +import ( + "testing" + + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/fvm/errors" +) + +type weightAndRevokedStatus struct { + weight uint16 + revoked bool +} + +func TestAppendAndGetWeightAndRevokedStatus(t *testing.T) { + t.Run("get from empty data", func(t *testing.T) { + _, _, err := getWeightAndRevokedStatus(nil, 0) + require.True(t, errors.IsKeyMetadataNotFoundError(err)) + }) + + t.Run("get from truncated data", func(t *testing.T) { + b := []byte{1} + + _, _, err := getWeightAndRevokedStatus(b, 1) + require.True(t, errors.IsKeyMetadataDecodingError(err)) + }) + + t.Run("append to truncated data", func(t *testing.T) { + b := []byte{1} + + _, err := appendWeightAndRevokedStatus(b, false, 1) + require.True(t, errors.IsKeyMetadataDecodingError(err)) + }) + + // Some of the test cases are from migration test TestAccountPublicKeyWeightsAndRevokedStatusSerizliation + // in cmd/util/ledger/migrations/account_key_deduplication_encoder_test.go + testcases := []struct { + name string + status []weightAndRevokedStatus + expected []byte + }{ + { + name: "one group, run length 1", + status: []weightAndRevokedStatus{ + {weight: 1000, revoked: false}, + }, + expected: []byte{0, 1, 0x03, 0xe8}, + }, + { + name: "one group, run length 1", + status: []weightAndRevokedStatus{ + {weight: 1000, revoked: true}, + }, + expected: []byte{0, 1, 0x83, 0xe8}, + }, + { + name: "one group, run length 3", + status: []weightAndRevokedStatus{ + {weight: 1000, revoked: true}, + {weight: 1000, revoked: true}, + {weight: 1000, revoked: true}, + }, + expected: []byte{0, 3, 0x83, 0xe8}, + }, + { + name: "three groups, run length 1", + status: []weightAndRevokedStatus{ + {weight: 1, revoked: false}, + {weight: 2, revoked: false}, + {weight: 2, revoked: true}, + }, + expected: []byte{ + 0, 1, 0, 1, + 0, 1, 0, 2, + 0, 1, 0x80, 2, + }, + }, + { + name: "three groups, different run length", + status: []weightAndRevokedStatus{ + {weight: 1, revoked: false}, + {weight: 1, revoked: false}, + {weight: 2, revoked: true}, + {weight: 3, revoked: true}, + {weight: 3, revoked: true}, + }, + expected: []byte{ + 0, 2, 0, 1, + 0, 1, 0x80, 2, + 0, 2, 0x80, 3, + }, + }, + } + + for _, tc := range testcases { + t.Run(tc.name, func(t *testing.T) { + var b []byte + var err error + + // Encode and append status + for _, s := range tc.status { + b, err = appendWeightAndRevokedStatus(b, s.revoked, s.weight) + require.NoError(t, err) + } + require.Equal(t, tc.expected, b) + + // Get revoked and weight status + for i, s := range tc.status { + revoked, weight, err := getWeightAndRevokedStatus(b, uint32(i)) + require.NoError(t, err) + require.Equal(t, s.revoked, revoked) + require.Equal(t, s.weight, weight) + } + + _, _, err = getWeightAndRevokedStatus(b, uint32(len(tc.status))) + require.True(t, errors.IsKeyMetadataNotFoundError(err)) + }) + } + + t.Run("run length around max group count", func(t *testing.T) { + testcases := []struct { + name string + status weightAndRevokedStatus + count uint32 + expected []byte + }{ + { + name: "run length maxRunLengthInEncodedStatusGroup - 1", + status: weightAndRevokedStatus{weight: 1000, revoked: true}, + count: maxRunLengthInWeightAndRevokedStatusGroup - 1, + expected: []byte{ + 0xff, 0xfe, 0x83, 0xe8, + }, + }, + { + name: "run length maxRunLengthInEncodedStatusGroup ", + status: weightAndRevokedStatus{weight: 1000, revoked: true}, + count: maxRunLengthInWeightAndRevokedStatusGroup, + expected: []byte{ + 0xff, 0xff, 0x83, 0xe8, + }, + }, + { + name: "run length maxRunLengthInEncodedStatusGroup + 1", + status: weightAndRevokedStatus{weight: 1000, revoked: true}, + count: maxRunLengthInWeightAndRevokedStatusGroup + 1, + expected: []byte{ + 0xff, 0xff, 0x83, 0xe8, + 0x00, 0x01, 0x83, 0xe8, + }, + }, + } + + for _, tc := range testcases { + t.Run(tc.name, func(t *testing.T) { + status := make([]weightAndRevokedStatus, tc.count) + for i := range len(status) { + status[i] = tc.status + } + + var b []byte + var err error + + // Encode and append status + for _, s := range status { + b, err = appendWeightAndRevokedStatus(b, s.revoked, s.weight) + require.NoError(t, err) + } + require.Equal(t, tc.expected, b) + + // Get revoked and weight status + for i, s := range status { + revoked, weight, err := getWeightAndRevokedStatus(b, uint32(i)) + require.NoError(t, err) + require.Equal(t, s.revoked, revoked) + require.Equal(t, s.weight, weight) + } + }) + } + }) +} + +func TestSetRevokeInWeightAndRevokedStatus(t *testing.T) { + testcases := []struct { + name string + status []weightAndRevokedStatus + expected []byte + index uint32 + }{ + { + name: "revoke in run-length 1 group", + status: []weightAndRevokedStatus{ + {weight: 1000, revoked: false}, + }, + expected: []byte{0, 1, 0x83, 0xe8}, + index: 0, + }, + { + name: "no-op revoke in run-length 1 group", + status: []weightAndRevokedStatus{ + {weight: 1000, revoked: true}, + }, + expected: []byte{0, 1, 0x83, 0xe8}, + index: 0, + }, + { + name: "revoke first of run-length 2 group (no prev group)", + status: []weightAndRevokedStatus{ + {weight: 1000, revoked: false}, + {weight: 1000, revoked: false}, + }, + expected: []byte{0, 1, 0x83, 0xe8, 0, 1, 0x03, 0xe8}, + index: 0, + }, + { + name: "revoke second of run-length 2 group (no next group)", + status: []weightAndRevokedStatus{ + {weight: 1000, revoked: false}, + {weight: 1000, revoked: false}, + }, + expected: []byte{0, 1, 0x03, 0xe8, 0, 1, 0x83, 0xe8}, + index: 1, + }, + { + name: "no-op revoke first of run-length 2 group", + status: []weightAndRevokedStatus{ + {weight: 1000, revoked: true}, + {weight: 1000, revoked: true}, + }, + expected: []byte{0, 2, 0x83, 0xe8}, + index: 0, + }, + { + name: "no-op revoke second of run-length 2 group", + status: []weightAndRevokedStatus{ + {weight: 1000, revoked: true}, + {weight: 1000, revoked: true}, + }, + expected: []byte{0, 2, 0x83, 0xe8}, + index: 1, + }, + { + name: "revoke first of run-length 3 group (no prev group)", + status: []weightAndRevokedStatus{ + {weight: 1000, revoked: false}, + {weight: 1000, revoked: false}, + {weight: 1000, revoked: false}, + }, + expected: []byte{0, 1, 0x83, 0xe8, 0, 2, 0x03, 0xe8}, + index: 0, + }, + { + name: "revoke second of run-length 3 group", + status: []weightAndRevokedStatus{ + {weight: 1000, revoked: false}, + {weight: 1000, revoked: false}, + {weight: 1000, revoked: false}, + }, + expected: []byte{0, 1, 0x03, 0xe8, 0, 1, 0x83, 0xe8, 0, 1, 0x03, 0xe8}, + index: 1, + }, + { + name: "revoke third of run-length 3 group (no next group)", + status: []weightAndRevokedStatus{ + {weight: 1000, revoked: false}, + {weight: 1000, revoked: false}, + {weight: 1000, revoked: false}, + }, + expected: []byte{0, 2, 0x03, 0xe8, 0, 1, 0x83, 0xe8}, + index: 2, + }, + { + name: "no-op revoke first of run-length 3 group", + status: []weightAndRevokedStatus{ + {weight: 1000, revoked: true}, + {weight: 1000, revoked: true}, + {weight: 1000, revoked: true}, + }, + expected: []byte{0, 3, 0x83, 0xe8}, + index: 0, + }, + { + name: "no-op revoke second of run-length 3 group", + status: []weightAndRevokedStatus{ + {weight: 1000, revoked: true}, + {weight: 1000, revoked: true}, + {weight: 1000, revoked: true}, + }, + expected: []byte{0, 3, 0x83, 0xe8}, + index: 1, + }, + { + name: "no-op revoke last of run-length 3 group", + status: []weightAndRevokedStatus{ + {weight: 1000, revoked: true}, + {weight: 1000, revoked: true}, + {weight: 1000, revoked: true}, + }, + expected: []byte{0, 3, 0x83, 0xe8}, + index: 2, + }, + { + name: "revoke first of run-length 2 group (cannot merge with previous group)", + status: []weightAndRevokedStatus{ + {weight: 1, revoked: false}, + {weight: 1000, revoked: false}, + {weight: 1000, revoked: false}, + }, + expected: []byte{0, 1, 0, 1, 0, 1, 0x83, 0xe8, 0, 1, 0x03, 0xe8}, + index: 1, + }, + { + name: "revoke first of run-length 2 group (merge with previous group)", + status: []weightAndRevokedStatus{ + {weight: 1000, revoked: true}, + {weight: 1000, revoked: false}, + {weight: 1000, revoked: false}, + }, + expected: []byte{0, 2, 0x83, 0xe8, 0, 1, 0x03, 0xe8}, + index: 1, + }, + { + name: "revoke second of run-length 2 group (cann't merge with next group)", + status: []weightAndRevokedStatus{ + {weight: 1000, revoked: false}, + {weight: 1000, revoked: false}, + {weight: 1, revoked: false}, + }, + expected: []byte{0, 1, 0x03, 0xe8, 0, 1, 0x83, 0xe8, 0, 1, 0, 1}, + index: 1, + }, + { + name: "revoke second of run-length 2 group (merge with next group)", + status: []weightAndRevokedStatus{ + {weight: 1000, revoked: false}, + {weight: 1000, revoked: false}, + {weight: 1000, revoked: true}, + }, + expected: []byte{0, 1, 0x03, 0xe8, 0, 2, 0x83, 0xe8}, + index: 1, + }, + { + name: "revoke middle of a large group", + status: []weightAndRevokedStatus{ + {weight: 1, revoked: false}, + {weight: 1, revoked: false}, + {weight: 1, revoked: false}, + {weight: 1000, revoked: false}, + {weight: 1000, revoked: false}, + {weight: 1000, revoked: false}, + {weight: 1000, revoked: false}, + {weight: 1, revoked: false}, + {weight: 1, revoked: false}, + {weight: 1, revoked: false}, + }, + expected: []byte{0, 3, 0, 1, 0, 2, 0x03, 0xe8, 0, 1, 0x83, 0xe8, 0, 1, 0x03, 0xe8, 0, 3, 0, 1}, + index: 5, + }, + { + name: "revoke in run-length 1 group (cannot merge with previous and next groups)", + status: []weightAndRevokedStatus{ + {weight: 1, revoked: false}, + {weight: 1, revoked: false}, + {weight: 1, revoked: false}, + {weight: 1000, revoked: false}, + {weight: 1, revoked: false}, + {weight: 1, revoked: false}, + {weight: 1, revoked: false}, + }, + expected: []byte{0, 3, 0, 1, 0, 1, 0x83, 0xe8, 0, 3, 0, 1}, + index: 3, + }, + { + name: "revoke in run-length 1 group (merge with both previous and next groups)", + status: []weightAndRevokedStatus{ + {weight: 1, revoked: false}, + {weight: 1000, revoked: true}, + {weight: 1000, revoked: true}, + {weight: 1000, revoked: true}, + {weight: 1000, revoked: false}, + {weight: 1000, revoked: true}, + {weight: 1000, revoked: true}, + {weight: 1000, revoked: true}, + {weight: 1, revoked: false}, + }, + expected: []byte{0, 1, 0, 1, 0, 7, 0x83, 0xe8, 0, 1, 0, 1}, + index: 4, + }, + } + + for _, tc := range testcases { + t.Run(tc.name, func(t *testing.T) { + var b []byte + var err error + + // Encode and append status + for _, s := range tc.status { + b, err = appendWeightAndRevokedStatus(b, s.revoked, s.weight) + require.NoError(t, err) + } + + // Revoke at given index + b, err = setRevokedStatus(b, tc.index) + require.NoError(t, err) + require.Equal(t, tc.expected, b) + + // Get revoked and weight status + for i, s := range tc.status { + revoked, weight, err := getWeightAndRevokedStatus(b, uint32(i)) + require.NoError(t, err) + if uint32(i) == tc.index { + require.Equal(t, true, revoked) + } else { + require.Equal(t, s.revoked, revoked) + } + require.Equal(t, s.weight, weight) + } + }) + } + + t.Run("can't merge with previous group due to run length limit", func(t *testing.T) { + status := make([]weightAndRevokedStatus, maxRunLengthInWeightAndRevokedStatusGroup) + for i := range len(status) { + status[i] = weightAndRevokedStatus{weight: 1000, revoked: true} + } + status = append(status, weightAndRevokedStatus{weight: 1000, revoked: false}) + status = append(status, weightAndRevokedStatus{weight: 1000, revoked: false}) + + revokeIndex := uint32(maxRunLengthInWeightAndRevokedStatusGroup) + + expected := []byte{ + 0xff, 0xff, 0x83, 0xe8, + 0x00, 0x02, 0x03, 0xe8, + } + + expectedAfterRevoke := []byte{ + 0xff, 0xff, 0x83, 0xe8, + 0x00, 0x01, 0x83, 0xe8, + 0x00, 0x01, 0x03, 0xe8, + } + + var b []byte + var err error + + // Encode and append status + for _, s := range status { + b, err = appendWeightAndRevokedStatus(b, s.revoked, s.weight) + require.NoError(t, err) + } + require.Equal(t, expected, b) + + // Revoke at given index + b, err = setRevokedStatus(b, revokeIndex) + require.NoError(t, err) + require.Equal(t, expectedAfterRevoke, b) + + // Get revoked and weight status + for i, s := range status { + revoked, weight, err := getWeightAndRevokedStatus(b, uint32(i)) + require.NoError(t, err) + if uint32(i) == revokeIndex { + require.Equal(t, true, revoked) + } else { + require.Equal(t, s.revoked, revoked) + } + require.Equal(t, s.weight, weight) + } + }) + + t.Run("merge with previous group at run length limit", func(t *testing.T) { + status := make([]weightAndRevokedStatus, maxRunLengthInWeightAndRevokedStatusGroup-1) + for i := range len(status) { + status[i] = weightAndRevokedStatus{weight: 1000, revoked: true} + } + status = append(status, weightAndRevokedStatus{weight: 1000, revoked: false}) + status = append(status, weightAndRevokedStatus{weight: 1000, revoked: false}) + + revokeIndex := uint32(maxRunLengthInWeightAndRevokedStatusGroup - 1) + + expected := []byte{ + 0xff, 0xfe, 0x83, 0xe8, + 0x00, 0x02, 0x03, 0xe8, + } + + expectedAfterRevoke := []byte{ + 0xff, 0xff, 0x83, 0xe8, + 0x00, 0x01, 0x03, 0xe8, + } + + var b []byte + var err error + + // Encode and append status + for _, s := range status { + b, err = appendWeightAndRevokedStatus(b, s.revoked, s.weight) + require.NoError(t, err) + } + require.Equal(t, expected, b) + + // Revoke at given index + b, err = setRevokedStatus(b, revokeIndex) + require.NoError(t, err) + require.Equal(t, expectedAfterRevoke, b) + + // Get revoked and weight status + for i, s := range status { + revoked, weight, err := getWeightAndRevokedStatus(b, uint32(i)) + require.NoError(t, err) + if uint32(i) == revokeIndex { + require.Equal(t, true, revoked) + } else { + require.Equal(t, s.revoked, revoked) + } + require.Equal(t, s.weight, weight) + } + }) + + t.Run("partially merge with next group", func(t *testing.T) { + status := make([]weightAndRevokedStatus, maxRunLengthInWeightAndRevokedStatusGroup+2) + status[0] = weightAndRevokedStatus{weight: 1000, revoked: false} + status[1] = weightAndRevokedStatus{weight: 1000, revoked: false} + for i := 2; i < len(status); i++ { + status[i] = weightAndRevokedStatus{weight: 1000, revoked: true} + } + + revokeIndex := uint32(1) + + expected := []byte{ + 0x00, 0x02, 0x03, 0xe8, + 0xff, 0xff, 0x83, 0xe8, + } + + expectedAfterRevoke := []byte{ + 0x00, 0x01, 0x03, 0xe8, + 0xff, 0xff, 0x83, 0xe8, + 0x00, 0x01, 0x83, 0xe8, + } + + var b []byte + var err error + + // Encode and append status + for _, s := range status { + b, err = appendWeightAndRevokedStatus(b, s.revoked, s.weight) + require.NoError(t, err) + } + require.Equal(t, expected, b) + + // Revoke at given index + b, err = setRevokedStatus(b, revokeIndex) + require.NoError(t, err) + require.Equal(t, expectedAfterRevoke, b) + + // Get revoked and weight status + for i, s := range status { + revoked, weight, err := getWeightAndRevokedStatus(b, uint32(i)) + require.NoError(t, err) + if uint32(i) == revokeIndex { + require.Equal(t, true, revoked) + } else { + require.Equal(t, s.revoked, revoked) + } + require.Equal(t, s.weight, weight) + } + }) + + t.Run("cannot merge with previous group due to run length limit, partially merge with next group", func(t *testing.T) { + status := make([]weightAndRevokedStatus, 0, 2*maxRunLengthInWeightAndRevokedStatusGroup+1) + for range maxRunLengthInWeightAndRevokedStatusGroup { + status = append(status, weightAndRevokedStatus{weight: 1000, revoked: true}) + } + status = append(status, weightAndRevokedStatus{weight: 1000, revoked: false}) + for range maxRunLengthInWeightAndRevokedStatusGroup { + status = append(status, weightAndRevokedStatus{weight: 1000, revoked: true}) + } + + revokeIndex := uint32(maxRunLengthInWeightAndRevokedStatusGroup) + + expected := []byte{ + 0xff, 0xff, 0x83, 0xe8, + 0x00, 0x01, 0x03, 0xe8, + 0xff, 0xff, 0x83, 0xe8, + } + + expectedAfterRevoke := []byte{ + 0xff, 0xff, 0x83, 0xe8, + 0xff, 0xff, 0x83, 0xe8, + 0x00, 0x01, 0x83, 0xe8, + } + + var b []byte + var err error + + // Encode and append status + for _, s := range status { + b, err = appendWeightAndRevokedStatus(b, s.revoked, s.weight) + require.NoError(t, err) + } + require.Equal(t, expected, b) + + // Revoke at given index + b, err = setRevokedStatus(b, revokeIndex) + require.NoError(t, err) + require.Equal(t, expectedAfterRevoke, b) + + // Get revoked and weight status + for i, s := range status { + revoked, weight, err := getWeightAndRevokedStatus(b, uint32(i)) + require.NoError(t, err) + if uint32(i) == revokeIndex { + require.Equal(t, true, revoked) + } else { + require.Equal(t, s.revoked, revoked) + } + require.Equal(t, s.weight, weight) + } + }) + + t.Run("merge with previous group and next group", func(t *testing.T) { + status := make([]weightAndRevokedStatus, maxRunLengthInWeightAndRevokedStatusGroup-15) + for i := range len(status) { + status[i] = weightAndRevokedStatus{weight: 1000, revoked: true} + } + status = append(status, weightAndRevokedStatus{weight: 1000, revoked: false}) + for range 14 { + status = append(status, weightAndRevokedStatus{weight: 1000, revoked: true}) + } + + revokeIndex := uint32(maxRunLengthInWeightAndRevokedStatusGroup - 15) + + expected := []byte{ + 0xff, 0xf0, 0x83, 0xe8, + 0x00, 0x01, 0x03, 0xe8, + 0x00, 0x0e, 0x83, 0xe8, + } + + expectedAfterRevoke := []byte{ + 0xff, 0xff, 0x83, 0xe8, + } + + var b []byte + var err error + + // Encode and append status + for _, s := range status { + b, err = appendWeightAndRevokedStatus(b, s.revoked, s.weight) + require.NoError(t, err) + } + require.Equal(t, expected, b) + + // Revoke at given index + b, err = setRevokedStatus(b, revokeIndex) + require.NoError(t, err) + require.Equal(t, expectedAfterRevoke, b) + + // Get revoked and weight status + for i, s := range status { + revoked, weight, err := getWeightAndRevokedStatus(b, uint32(i)) + require.NoError(t, err) + if uint32(i) == revokeIndex { + require.Equal(t, true, revoked) + } else { + require.Equal(t, s.revoked, revoked) + } + require.Equal(t, s.weight, weight) + } + }) +} diff --git a/fvm/environment/account_creator.go b/fvm/environment/account_creator.go index 07612384d2c..f0212bdbd5b 100644 --- a/fvm/environment/account_creator.go +++ b/fvm/environment/account_creator.go @@ -3,7 +3,7 @@ package environment import ( "fmt" - "github.com/onflow/cadence/runtime/common" + "github.com/onflow/cadence/common" "github.com/onflow/flow-go/fvm/errors" "github.com/onflow/flow-go/fvm/storage/state" @@ -12,12 +12,6 @@ import ( "github.com/onflow/flow-go/module/trace" ) -const ( - FungibleTokenAccountIndex = 2 - FlowTokenAccountIndex = 3 - FlowFeesAccountIndex = 4 -) - type AddressGenerator interface { Bytes() []byte NextAddress() (flow.Address, error) @@ -259,14 +253,19 @@ func (creator *accountCreator) CreateAccount( ) { defer creator.tracer.StartChildSpan(trace.FVMEnvCreateAccount).End() - err := creator.meter.MeterComputation(ComputationKindCreateAccount, 1) + err := creator.meter.MeterComputation( + common.ComputationUsage{ + Kind: ComputationKindCreateAccount, + Intensity: 1, + }, + ) if err != nil { return common.Address{}, err } // don't enforce limit during account creation var address flow.Address - creator.txnState.RunWithAllLimitsDisabled(func() { + creator.txnState.RunWithMeteringDisabled(func() { address, err = creator.createAccount(flow.ConvertAddress(runtimePayer)) }) diff --git a/fvm/environment/account_info.go b/fvm/environment/account_info.go index 6af26a1940b..81a90106da9 100644 --- a/fvm/environment/account_info.go +++ b/fvm/environment/account_info.go @@ -4,7 +4,7 @@ import ( "fmt" "github.com/onflow/cadence" - "github.com/onflow/cadence/runtime/common" + "github.com/onflow/cadence/common" "github.com/onflow/flow-go/fvm/storage/state" "github.com/onflow/flow-go/fvm/tracing" @@ -15,12 +15,14 @@ import ( // AccountInfo exposes various account balance and storage statistics. type AccountInfo interface { // Cadence's runtime APIs. - GetStorageUsed(runtimeaddress common.Address) (uint64, error) + GetStorageUsed(runtimeAddress common.Address) (uint64, error) GetStorageCapacity(runtimeAddress common.Address) (uint64, error) GetAccountBalance(runtimeAddress common.Address) (uint64, error) GetAccountAvailableBalance(runtimeAddress common.Address) (uint64, error) GetAccount(address flow.Address) (*flow.Account, error) + GetAccountKeys(address flow.Address) ([]flow.AccountPublicKey, error) + GetAccountKeyByIndex(address flow.Address, index uint32) (*flow.AccountPublicKey, error) } type ParseRestrictedAccountInfo struct { @@ -103,6 +105,35 @@ func (info ParseRestrictedAccountInfo) GetAccount( address) } +func (info ParseRestrictedAccountInfo) GetAccountKeys( + address flow.Address, +) ( + []flow.AccountPublicKey, + error, +) { + return parseRestrict1Arg1Ret( + info.txnState, + trace.FVMEnvGetAccountKeys, + info.impl.GetAccountKeys, + address) +} + +func (info ParseRestrictedAccountInfo) GetAccountKeyByIndex( + address flow.Address, + index uint32, +) ( + *flow.AccountPublicKey, + error, +) { + return parseRestrict2Arg1Ret( + info.txnState, + trace.FVMEnvGetAccountKey, + info.impl.GetAccountKeyByIndex, + address, + index, + ) +} + type accountInfo struct { tracer tracing.TracerSpan meter Meter @@ -137,7 +168,12 @@ func (info *accountInfo) GetStorageUsed( ) { defer info.tracer.StartChildSpan(trace.FVMEnvGetStorageUsed).End() - err := info.meter.MeterComputation(ComputationKindGetStorageUsed, 1) + err := info.meter.MeterComputation( + common.ComputationUsage{ + Kind: ComputationKindGetStorageUsed, + Intensity: 1, + }, + ) if err != nil { return 0, fmt.Errorf("get storage used failed: %w", err) } @@ -152,11 +188,11 @@ func (info *accountInfo) GetStorageUsed( } // StorageMBUFixToBytesUInt converts the return type of storage capacity which -// is a UFix64 with the unit of megabytes to UInt with the unit of bytes +// is a UFix64 with the unit of megabytes to UInt with the unit of bytes. func StorageMBUFixToBytesUInt(result cadence.Value) uint64 { // Divide the unsigned int by (1e8 (the scale of Fix64) / 1e6 (for mega)) // to get bytes (rounded down) - return result.ToGoValue().(uint64) / 100 + return uint64(result.(cadence.UFix64) / 100) } func (info *accountInfo) GetStorageCapacity( @@ -167,7 +203,12 @@ func (info *accountInfo) GetStorageCapacity( ) { defer info.tracer.StartChildSpan(trace.FVMEnvGetStorageCapacity).End() - err := info.meter.MeterComputation(ComputationKindGetStorageCapacity, 1) + err := info.meter.MeterComputation( + common.ComputationUsage{ + Kind: ComputationKindGetStorageCapacity, + Intensity: 1, + }, + ) if err != nil { return 0, fmt.Errorf("get storage capacity failed: %w", err) } @@ -192,17 +233,22 @@ func (info *accountInfo) GetAccountBalance( ) { defer info.tracer.StartChildSpan(trace.FVMEnvGetAccountBalance).End() - err := info.meter.MeterComputation(ComputationKindGetAccountBalance, 1) + err := info.meter.MeterComputation( + common.ComputationUsage{ + Kind: ComputationKindGetAccountBalance, + Intensity: 1, + }, + ) if err != nil { return 0, fmt.Errorf("get account balance failed: %w", err) } - result, invokeErr := info.systemContracts.AccountBalance( - flow.ConvertAddress(runtimeAddress)) + result, invokeErr := info.systemContracts.AccountBalance(flow.ConvertAddress(runtimeAddress)) if invokeErr != nil { return 0, invokeErr } - return result.ToGoValue().(uint64), nil + + return uint64(result.(cadence.UFix64)), nil } func (info *accountInfo) GetAccountAvailableBalance( @@ -215,18 +261,21 @@ func (info *accountInfo) GetAccountAvailableBalance( trace.FVMEnvGetAccountAvailableBalance).End() err := info.meter.MeterComputation( - ComputationKindGetAccountAvailableBalance, - 1) + common.ComputationUsage{ + Kind: ComputationKindGetAccountAvailableBalance, + Intensity: 1, + }, + ) if err != nil { return 0, fmt.Errorf("get account available balance failed: %w", err) } - result, invokeErr := info.systemContracts.AccountAvailableBalance( - flow.ConvertAddress(runtimeAddress)) + result, invokeErr := info.systemContracts.AccountAvailableBalance(flow.ConvertAddress(runtimeAddress)) if invokeErr != nil { return 0, invokeErr } - return result.ToGoValue().(uint64), nil + + return uint64(result.(cadence.UFix64)), nil } func (info *accountInfo) GetAccount( @@ -254,3 +303,38 @@ func (info *accountInfo) GetAccount( return account, nil } + +func (info *accountInfo) GetAccountKeys( + address flow.Address, +) ( + []flow.AccountPublicKey, + error, +) { + defer info.tracer.StartChildSpan(trace.FVMEnvGetAccountKeys).End() + + accountKeys, err := info.accounts.GetAccountPublicKeys(address) + + if err != nil { + return nil, err + } + + return accountKeys, nil +} + +func (info *accountInfo) GetAccountKeyByIndex( + address flow.Address, + index uint32, +) ( + *flow.AccountPublicKey, + error, +) { + defer info.tracer.StartChildSpan(trace.FVMEnvGetAccountKey).End() + + accountKey, err := info.accounts.GetAccountPublicKey(address, index) + + if err != nil { + return nil, err + } + + return &accountKey, nil +} diff --git a/fvm/environment/account_key_reader.go b/fvm/environment/account_key_reader.go index 82ee3333cdf..7144ea1f171 100644 --- a/fvm/environment/account_key_reader.go +++ b/fvm/environment/account_key_reader.go @@ -3,8 +3,8 @@ package environment import ( "fmt" + "github.com/onflow/cadence/common" "github.com/onflow/cadence/runtime" - "github.com/onflow/cadence/runtime/common" "github.com/onflow/flow-go/fvm/crypto" "github.com/onflow/flow-go/fvm/errors" @@ -23,12 +23,12 @@ type AccountKeyReader interface { // exist, the provided index is not valid, or if the key retrieval fails. GetAccountKey( runtimeAddress common.Address, - keyIndex int, + keyIndex uint32, ) ( *runtime.AccountKey, error, ) - AccountKeysCount(runtimeAddress common.Address) (uint64, error) + AccountKeysCount(runtimeAddress common.Address) (uint32, error) } type ParseRestrictedAccountKeyReader struct { @@ -48,7 +48,7 @@ func NewParseRestrictedAccountKeyReader( func (reader ParseRestrictedAccountKeyReader) GetAccountKey( runtimeAddress common.Address, - keyIndex int, + keyIndex uint32, ) ( *runtime.AccountKey, error, @@ -64,7 +64,7 @@ func (reader ParseRestrictedAccountKeyReader) GetAccountKey( func (reader ParseRestrictedAccountKeyReader) AccountKeysCount( runtimeAddress common.Address, ) ( - uint64, + uint32, error, ) { return parseRestrict1Arg1Ret( @@ -96,7 +96,7 @@ func NewAccountKeyReader( func (reader *accountKeyReader) GetAccountKey( runtimeAddress common.Address, - keyIndex int, + keyIndex uint32, ) ( *runtime.AccountKey, error, @@ -107,22 +107,22 @@ func (reader *accountKeyReader) GetAccountKey( return nil, fmt.Errorf("getting account key failed: %w", err) } - err := reader.meter.MeterComputation(ComputationKindGetAccountKey, 1) + err := reader.meter.MeterComputation( + common.ComputationUsage{ + Kind: ComputationKindGetAccountKey, + Intensity: 1, + }, + ) if err != nil { return formatErr(err) } - // Don't return an error for invalid key indices - if keyIndex < 0 { - return nil, nil - } - address := flow.ConvertAddress(runtimeAddress) // address verification is also done in this step - accountPublicKey, err := reader.accounts.GetPublicKey( + accountPublicKey, err := reader.accounts.GetRuntimeAccountPublicKey( address, - uint64(keyIndex)) + keyIndex) if err != nil { // If a key is not found at a given index, then return a nil key with // no errors. This is to be inline with the Cadence runtime. Otherwise, @@ -147,27 +147,34 @@ func (reader *accountKeyReader) GetAccountKey( func (reader *accountKeyReader) AccountKeysCount( runtimeAddress common.Address, ) ( - uint64, + uint32, error, ) { defer reader.tracer.StartChildSpan(trace.FVMEnvAccountKeysCount).End() - formatErr := func(err error) (uint64, error) { + formatErr := func(err error) (uint32, error) { return 0, fmt.Errorf("fetching account key count failed: %w", err) } - err := reader.meter.MeterComputation(ComputationKindAccountKeysCount, 1) + err := reader.meter.MeterComputation( + common.ComputationUsage{ + Kind: ComputationKindAccountKeysCount, + Intensity: 1, + }, + ) if err != nil { return formatErr(err) } // address verification is also done in this step - return reader.accounts.GetPublicKeyCount( + keyCount, err := reader.accounts.GetAccountPublicKeyCount( flow.ConvertAddress(runtimeAddress)) + + return keyCount, err } func FlowToRuntimeAccountKey( - flowKey flow.AccountPublicKey, + flowKey flow.RuntimeAccountPublicKey, ) ( *runtime.AccountKey, error, diff --git a/fvm/environment/account_key_reader_test.go b/fvm/environment/account_key_reader_test.go index 8f91f7c1ec1..7a71a11cb2e 100644 --- a/fvm/environment/account_key_reader_test.go +++ b/fvm/environment/account_key_reader_test.go @@ -4,12 +4,13 @@ import ( "testing" "testing/quick" - "github.com/onflow/cadence/runtime/common" + "github.com/onflow/cadence/common" testMock "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" - "github.com/onflow/flow-go/crypto" - "github.com/onflow/flow-go/crypto/hash" + "github.com/onflow/crypto" + "github.com/onflow/crypto/hash" + "github.com/onflow/flow-go/fvm/environment" "github.com/onflow/flow-go/fvm/environment/mock" "github.com/onflow/flow-go/fvm/tracing" @@ -17,11 +18,11 @@ import ( func newDummyAccountKeyReader( t *testing.T, - keyCount uint64, + keyCount uint32, ) environment.AccountKeyReader { tracer := tracing.NewTracerSpan() meter := mock.NewMeter(t) - meter.On("MeterComputation", testMock.Anything, testMock.Anything).Return(nil) + meter.On("MeterComputation", testMock.Anything).Return(nil) accounts := &FakeAccounts{keyCount: keyCount} return environment.NewAccountKeyReader(tracer, meter, accounts) } @@ -36,7 +37,7 @@ func TestKeyConversionValidAlgorithms(t *testing.T) { t.Run("invalid hash algo", func(t *testing.T) { t.Parallel() - accountKey := FakePublicKey{}.toAccountPublicKey() + accountKey := FakePublicKey{}.toRuntimeAccountPublicKey() accountKey.HashAlgo = hash.UnknownHashingAlgorithm rtKey, err := environment.FlowToRuntimeAccountKey(accountKey) @@ -47,7 +48,7 @@ func TestKeyConversionValidAlgorithms(t *testing.T) { t.Run("invalid sign algo", func(t *testing.T) { t.Parallel() - accountKey := FakePublicKey{}.toAccountPublicKey() + accountKey := FakePublicKey{}.toRuntimeAccountPublicKey() accountKey.SignAlgo = crypto.UnknownSigningAlgorithm rtKey, err := environment.FlowToRuntimeAccountKey(accountKey) @@ -58,7 +59,7 @@ func TestKeyConversionValidAlgorithms(t *testing.T) { t.Run("valid key", func(t *testing.T) { t.Parallel() - accountKey := FakePublicKey{}.toAccountPublicKey() + accountKey := FakePublicKey{}.toRuntimeAccountPublicKey() rtKey, err := environment.FlowToRuntimeAccountKey(accountKey) require.NoError(t, err) @@ -75,7 +76,7 @@ func TestAccountKeyReader_get_valid_key(t *testing.T) { require.NoError(t, err) expected, err := environment.FlowToRuntimeAccountKey( - FakePublicKey{}.toAccountPublicKey(), + FakePublicKey{}.toRuntimeAccountPublicKey(), ) require.NoError(t, err) @@ -89,7 +90,7 @@ func TestAccountKeyReader_get_out_of_range(t *testing.T) { res, err := newDummyAccountKeyReader(t, 0).GetAccountKey(address, 1000) // GetAccountKey should distinguish between an invalid index, and issues like failing to fetch a key from storage - require.Nil(t, err) + require.NoError(t, err) require.Nil(t, res) } @@ -97,8 +98,8 @@ func TestAccountKeyReader_get_key_count(t *testing.T) { t.Parallel() address := bytesToAddress(1, 2, 3, 4) - identity := func(n uint64) (uint64, error) { return n, nil } - prop := func(n uint64) (uint64, error) { + identity := func(n uint32) (uint32, error) { return n, nil } + prop := func(n uint32) (uint32, error) { return newDummyAccountKeyReader(t, n).AccountKeysCount(address) } diff --git a/fvm/environment/account_key_updater.go b/fvm/environment/account_key_updater.go index 96c601cb1aa..70679e98b44 100644 --- a/fvm/environment/account_key_updater.go +++ b/fvm/environment/account_key_updater.go @@ -4,12 +4,13 @@ import ( "encoding/hex" "fmt" + "github.com/onflow/cadence/common" "github.com/onflow/cadence/runtime" - "github.com/onflow/cadence/runtime/common" - "github.com/onflow/cadence/runtime/sema" + "github.com/onflow/cadence/sema" + + fgcrypto "github.com/onflow/crypto" + fghash "github.com/onflow/crypto/hash" - fgcrypto "github.com/onflow/flow-go/crypto" - fghash "github.com/onflow/flow-go/crypto/hash" "github.com/onflow/flow-go/fvm/crypto" "github.com/onflow/flow-go/fvm/errors" "github.com/onflow/flow-go/fvm/storage/state" @@ -22,7 +23,7 @@ import ( // public key. func NewAccountPublicKey(publicKey *runtime.PublicKey, hashAlgo sema.HashAlgorithm, - keyIndex int, + keyIndex uint32, weight int, ) ( *flow.AccountPublicKey, @@ -80,28 +81,6 @@ func NewAccountPublicKey(publicKey *runtime.PublicKey, // Note that scripts cannot modify account keys, but must expose the API in // compliance with the runtime environment interface. type AccountKeyUpdater interface { - // AddEncodedAccountKey adds an encoded public key to an existing account. - // - // This function returns an error if the specified account does not exist or - // if the key insertion fails. - // - // Note that the script variant will return OperationNotSupportedError. - AddEncodedAccountKey(runtimeAddress common.Address, publicKey []byte) error - - // RevokeEncodedAccountKey revokes a public key by index from an existing - // account. - // - // This function returns an error if the specified account does not exist, - // the provided key is invalid, or if key revoking fails. - // - // Note that the script variant will return OperationNotSupportedError. - RevokeEncodedAccountKey( - runtimeAddress common.Address, - index int, - ) ( - []byte, - error, - ) // AddAccountKey adds a public key to an existing account. // @@ -130,7 +109,7 @@ type AccountKeyUpdater interface { // Note that the script variant will return OperationNotSupportedError. RevokeAccountKey( runtimeAddress common.Address, - keyIndex int, + keyIndex uint32, ) ( *runtime.AccountKey, error, @@ -152,33 +131,6 @@ func NewParseRestrictedAccountKeyUpdater( } } -func (updater ParseRestrictedAccountKeyUpdater) AddEncodedAccountKey( - runtimeAddress common.Address, - publicKey []byte, -) error { - return parseRestrict2Arg( - updater.txnState, - trace.FVMEnvAddEncodedAccountKey, - updater.impl.AddEncodedAccountKey, - runtimeAddress, - publicKey) -} - -func (updater ParseRestrictedAccountKeyUpdater) RevokeEncodedAccountKey( - runtimeAddress common.Address, - index int, -) ( - []byte, - error, -) { - return parseRestrict2Arg1Ret( - updater.txnState, - trace.FVMEnvRevokeEncodedAccountKey, - updater.impl.RevokeEncodedAccountKey, - runtimeAddress, - index) -} - func (updater ParseRestrictedAccountKeyUpdater) AddAccountKey( runtimeAddress common.Address, publicKey *runtime.PublicKey, @@ -200,7 +152,7 @@ func (updater ParseRestrictedAccountKeyUpdater) AddAccountKey( func (updater ParseRestrictedAccountKeyUpdater) RevokeAccountKey( runtimeAddress common.Address, - keyIndex int, + keyIndex uint32, ) ( *runtime.AccountKey, error, @@ -215,23 +167,6 @@ func (updater ParseRestrictedAccountKeyUpdater) RevokeAccountKey( type NoAccountKeyUpdater struct{} -func (NoAccountKeyUpdater) AddEncodedAccountKey( - runtimeAddress common.Address, - publicKey []byte, -) error { - return errors.NewOperationNotSupportedError("AddEncodedAccountKey") -} - -func (NoAccountKeyUpdater) RevokeEncodedAccountKey( - runtimeAddress common.Address, - index int, -) ( - []byte, - error, -) { - return nil, errors.NewOperationNotSupportedError("RevokeEncodedAccountKey") -} - func (NoAccountKeyUpdater) AddAccountKey( runtimeAddress common.Address, publicKey *runtime.PublicKey, @@ -246,7 +181,7 @@ func (NoAccountKeyUpdater) AddAccountKey( func (NoAccountKeyUpdater) RevokeAccountKey( runtimeAddress common.Address, - keyIndex int, + keyIndex uint32, ) ( *runtime.AccountKey, error, @@ -302,7 +237,7 @@ func (updater *accountKeyUpdater) addAccountKey( errors.NewAccountNotFoundError(address)) } - keyIndex, err := updater.accounts.GetPublicKeyCount(address) + keyIndex, err := updater.accounts.GetAccountPublicKeyCount(address) if err != nil { return nil, fmt.Errorf("adding account key failed: %w", err) } @@ -310,13 +245,13 @@ func (updater *accountKeyUpdater) addAccountKey( accountPublicKey, err := NewAccountPublicKey( publicKey, hashAlgo, - int(keyIndex), + keyIndex, weight) if err != nil { return nil, fmt.Errorf("adding account key failed: %w", err) } - err = updater.accounts.AppendPublicKey(address, *accountPublicKey) + err = updater.accounts.AppendAccountPublicKey(address, *accountPublicKey) if err != nil { return nil, fmt.Errorf("adding account key failed: %w", err) } @@ -341,7 +276,7 @@ func (updater *accountKeyUpdater) addAccountKey( // can be separated into another method func (updater *accountKeyUpdater) revokeAccountKey( address flow.Address, - keyIndex int, + keyIndex uint32, ) ( *runtime.AccountKey, error, @@ -357,15 +292,7 @@ func (updater *accountKeyUpdater) revokeAccountKey( errors.NewAccountNotFoundError(address)) } - // Don't return an error for invalid key indices - if keyIndex < 0 { - return nil, nil - } - - var publicKey flow.AccountPublicKey - publicKey, err = updater.accounts.GetPublicKey( - address, - uint64(keyIndex)) + err = updater.accounts.RevokeAccountPublicKey(address, keyIndex) if err != nil { // If a key is not found at a given index, then return a nil key with // no errors. This is to be inline with the Cadence runtime. Otherwise @@ -377,15 +304,9 @@ func (updater *accountKeyUpdater) revokeAccountKey( return nil, fmt.Errorf("revoking account key failed: %w", err) } - // mark this key as revoked - publicKey.Revoked = true - - _, err = updater.accounts.SetPublicKey( - address, - uint64(keyIndex), - publicKey) + publicKey, err := updater.accounts.GetRuntimeAccountPublicKey(address, keyIndex) if err != nil { - return nil, fmt.Errorf("revoking account key failed: %w", err) + return nil, err } // Prepare account key to return @@ -419,151 +340,6 @@ func (updater *accountKeyUpdater) revokeAccountKey( }, nil } -// InternalAddEncodedAccountKey adds an encoded public key to an existing -// account. -// -// This function returns following error -// * NewAccountNotFoundError - if the specified account does not exist -// * ValueError - if the provided encodedPublicKey is not valid public key -func (updater *accountKeyUpdater) InternalAddEncodedAccountKey( - address flow.Address, - encodedPublicKey []byte, -) error { - ok, err := updater.accounts.Exists(address) - if err != nil { - return fmt.Errorf("adding encoded account key failed: %w", err) - } - - if !ok { - return errors.NewAccountNotFoundError(address) - } - - var publicKey flow.AccountPublicKey - - publicKey, err = flow.DecodeRuntimeAccountPublicKey(encodedPublicKey, 0) - if err != nil { - hexEncodedPublicKey := hex.EncodeToString(encodedPublicKey) - return fmt.Errorf( - "adding encoded account key failed: %w", - errors.NewValueErrorf( - hexEncodedPublicKey, - "invalid encoded public key value: %w", - err)) - } - - err = updater.accounts.AppendPublicKey(address, publicKey) - if err != nil { - return fmt.Errorf("adding encoded account key failed: %w", err) - } - - return nil -} - -// RemoveAccountKey revokes a public key by index from an existing account. -// -// This function returns an error if the specified account does not exist, the -// provided key is invalid, or if key revoking fails. -func (updater *accountKeyUpdater) removeAccountKey( - address flow.Address, - keyIndex int, -) ( - []byte, - error, -) { - ok, err := updater.accounts.Exists(address) - if err != nil { - return nil, fmt.Errorf("remove account key failed: %w", err) - } - - if !ok { - issue := errors.NewAccountNotFoundError(address) - return nil, fmt.Errorf("remove account key failed: %w", issue) - } - - if keyIndex < 0 { - err = errors.NewValueErrorf( - fmt.Sprint(keyIndex), - "key index must be positive") - return nil, fmt.Errorf("remove account key failed: %w", err) - } - - var publicKey flow.AccountPublicKey - publicKey, err = updater.accounts.GetPublicKey( - address, - uint64(keyIndex)) - if err != nil { - return nil, fmt.Errorf("remove account key failed: %w", err) - } - - // mark this key as revoked - publicKey.Revoked = true - - encodedPublicKey, err := updater.accounts.SetPublicKey( - address, - uint64(keyIndex), - publicKey) - if err != nil { - return nil, fmt.Errorf("remove account key failed: %w", err) - } - - return encodedPublicKey, nil -} - -func (updater *accountKeyUpdater) AddEncodedAccountKey( - runtimeAddress common.Address, - publicKey []byte, -) error { - defer updater.tracer.StartChildSpan( - trace.FVMEnvAddEncodedAccountKey).End() - - err := updater.meter.MeterComputation( - ComputationKindAddEncodedAccountKey, - 1) - if err != nil { - return fmt.Errorf("add encoded account key failed: %w", err) - } - - address := flow.ConvertAddress(runtimeAddress) - - // TODO do a call to track the computation usage and memory usage - // - // don't enforce limit during adding a key - updater.txnState.RunWithAllLimitsDisabled(func() { - err = updater.InternalAddEncodedAccountKey(address, publicKey) - }) - - if err != nil { - return fmt.Errorf("add encoded account key failed: %w", err) - } - return nil -} - -func (updater *accountKeyUpdater) RevokeEncodedAccountKey( - runtimeAddress common.Address, - index int, -) ( - []byte, - error, -) { - defer updater.tracer.StartChildSpan(trace.FVMEnvRevokeEncodedAccountKey).End() - - err := updater.meter.MeterComputation( - ComputationKindRevokeEncodedAccountKey, - 1) - if err != nil { - return nil, fmt.Errorf("revoke encoded account key failed: %w", err) - } - - address := flow.ConvertAddress(runtimeAddress) - - encodedKey, err := updater.removeAccountKey(address, index) - if err != nil { - return nil, fmt.Errorf("revoke encoded account key failed: %w", err) - } - - return encodedKey, nil -} - func (updater *accountKeyUpdater) AddAccountKey( runtimeAddress common.Address, publicKey *runtime.PublicKey, @@ -576,8 +352,11 @@ func (updater *accountKeyUpdater) AddAccountKey( defer updater.tracer.StartChildSpan(trace.FVMEnvAddAccountKey).End() err := updater.meter.MeterComputation( - ComputationKindAddAccountKey, - 1) + common.ComputationUsage{ + Kind: ComputationKindAddAccountKey, + Intensity: 1, + }, + ) if err != nil { return nil, fmt.Errorf("add account key failed: %w", err) } @@ -596,7 +375,7 @@ func (updater *accountKeyUpdater) AddAccountKey( func (updater *accountKeyUpdater) RevokeAccountKey( runtimeAddress common.Address, - keyIndex int, + keyIndex uint32, ) ( *runtime.AccountKey, error, @@ -604,8 +383,11 @@ func (updater *accountKeyUpdater) RevokeAccountKey( defer updater.tracer.StartChildSpan(trace.FVMEnvRevokeAccountKey).End() err := updater.meter.MeterComputation( - ComputationKindRevokeAccountKey, - 1) + common.ComputationUsage{ + Kind: ComputationKindRevokeAccountKey, + Intensity: 1, + }, + ) if err != nil { return nil, fmt.Errorf("revoke account key failed: %w", err) } diff --git a/fvm/environment/account_key_updater_test.go b/fvm/environment/account_key_updater_test.go index 24c2404b917..b8642b20f8c 100644 --- a/fvm/environment/account_key_updater_test.go +++ b/fvm/environment/account_key_updater_test.go @@ -8,60 +8,17 @@ import ( "github.com/fxamacker/cbor/v2" "github.com/onflow/atree" "github.com/onflow/cadence/runtime" - "github.com/onflow/cadence/runtime/sema" + "github.com/onflow/cadence/sema" + "github.com/onflow/crypto" + "github.com/onflow/crypto/hash" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "github.com/onflow/flow-go/crypto" - "github.com/onflow/flow-go/crypto/hash" "github.com/onflow/flow-go/fvm/environment" "github.com/onflow/flow-go/fvm/errors" - "github.com/onflow/flow-go/fvm/tracing" "github.com/onflow/flow-go/model/flow" ) -func TestAddEncodedAccountKey_error_handling_produces_valid_utf8(t *testing.T) { - - akh := environment.NewAccountKeyUpdater( - tracing.NewTracerSpan(), - nil, - FakeAccounts{}, - nil, - nil) - - address := flow.BytesToAddress([]byte{1, 2, 3, 4}) - - // emulate encoded public key (which comes as a user input) - // containing bytes which are invalid UTF8 - - invalidEncodedKey := make([]byte, 64) - invalidUTF8 := []byte{0xc3, 0x28} - copy(invalidUTF8, invalidEncodedKey) - accountPublicKey := FakePublicKey{data: invalidEncodedKey}.toAccountPublicKey() - - encodedPublicKey, err := flow.EncodeRuntimeAccountPublicKey(accountPublicKey) - require.NoError(t, err) - - err = akh.InternalAddEncodedAccountKey(address, encodedPublicKey) - require.Error(t, err) - - require.True(t, errors.IsValueError(err)) - - errorString := err.Error() - assert.True(t, utf8.ValidString(errorString)) - - // check if they can encoded and decoded using CBOR - marshalledBytes, err := cbor.Marshal(errorString) - require.NoError(t, err) - - var unmarshalledString string - - err = cbor.Unmarshal(marshalledBytes, &unmarshalledString) - require.NoError(t, err) - - require.Equal(t, errorString, unmarshalledString) -} - func TestNewAccountKey_error_handling_produces_valid_utf8_and_sign_algo(t *testing.T) { invalidSignAlgo := runtime.SignatureAlgorithm(254) @@ -173,6 +130,17 @@ func (f FakePublicKey) toAccountPublicKey() flow.AccountPublicKey { } } +func (f FakePublicKey) toRuntimeAccountPublicKey() flow.RuntimeAccountPublicKey { + return flow.RuntimeAccountPublicKey{ + Index: 1, + PublicKey: f, + SignAlgo: crypto.ECDSASecp256k1, + HashAlgo: hash.SHA3_256, + Weight: 1000, + Revoked: false, + } +} + func (f FakePublicKey) Encode() []byte { return f.data } @@ -187,26 +155,53 @@ func (f FakePublicKey) EncodeCompressed() []byte { return nil } func (f FakePublicKey) Equals(key crypto.PublicKey) bool { return false } type FakeAccounts struct { - keyCount uint64 + keyCount uint32 } var _ environment.Accounts = &FakeAccounts{} func (f FakeAccounts) Exists(address flow.Address) (bool, error) { return true, nil } func (f FakeAccounts) Get(address flow.Address) (*flow.Account, error) { return &flow.Account{}, nil } -func (f FakeAccounts) GetPublicKeyCount(_ flow.Address) (uint64, error) { +func (f FakeAccounts) GetAccountPublicKeyCount(_ flow.Address) (uint32, error) { return f.keyCount, nil } -func (f FakeAccounts) AppendPublicKey(_ flow.Address, _ flow.AccountPublicKey) error { return nil } -func (f FakeAccounts) GetPublicKey(address flow.Address, keyIndex uint64) (flow.AccountPublicKey, error) { +func (f FakeAccounts) AppendAccountPublicKey(_ flow.Address, _ flow.AccountPublicKey) error { + return nil +} +func (f FakeAccounts) GetAccountPublicKey(address flow.Address, keyIndex uint32) (flow.AccountPublicKey, error) { if keyIndex >= f.keyCount { return flow.AccountPublicKey{}, errors.NewAccountPublicKeyNotFoundError(address, keyIndex) } return FakePublicKey{}.toAccountPublicKey(), nil } - -func (f FakeAccounts) SetPublicKey(_ flow.Address, _ uint64, _ flow.AccountPublicKey) ([]byte, error) { - return nil, nil +func (f FakeAccounts) GetRuntimeAccountPublicKey(address flow.Address, keyIndex uint32) (flow.RuntimeAccountPublicKey, error) { + if keyIndex >= f.keyCount { + return flow.RuntimeAccountPublicKey{}, errors.NewAccountPublicKeyNotFoundError(address, keyIndex) + } + return FakePublicKey{}.toRuntimeAccountPublicKey(), nil +} +func (f FakeAccounts) GetAccountPublicKeys(address flow.Address) ([]flow.AccountPublicKey, error) { + return make([]flow.AccountPublicKey, f.keyCount), nil +} +func (f FakeAccounts) GetAccountPublicKeyRevokedStatus(address flow.Address, keyIndex uint32) (bool, error) { + if keyIndex >= f.keyCount { + return false, errors.NewAccountPublicKeyNotFoundError(address, keyIndex) + } + return FakePublicKey{}.toAccountPublicKey().Revoked, nil +} +func (f FakeAccounts) GetAccountPublicKeySequenceNumber(address flow.Address, keyIndex uint32) (uint64, error) { + if keyIndex >= f.keyCount { + return 0, errors.NewAccountPublicKeyNotFoundError(address, keyIndex) + } + return FakePublicKey{}.toAccountPublicKey().SeqNumber, nil +} +func (f FakeAccounts) IncrementAccountPublicKeySequenceNumber(flow.Address, uint32) error { + // no-op + return nil +} +func (f FakeAccounts) RevokeAccountPublicKey(flow.Address, uint32) error { + // no-op + return nil } func (f FakeAccounts) GetContractNames(_ flow.Address) ([]string, error) { return nil, nil } func (f FakeAccounts) GetContract(_ string, _ flow.Address) ([]byte, error) { return nil, nil } @@ -217,6 +212,9 @@ func (f FakeAccounts) Create(_ []flow.AccountPublicKey, _ flow.Address) error { func (f FakeAccounts) GetValue(_ flow.RegisterID) (flow.RegisterValue, error) { return nil, nil } func (f FakeAccounts) GetStorageUsed(_ flow.Address) (uint64, error) { return 0, nil } func (f FakeAccounts) SetValue(_ flow.RegisterID, _ []byte) error { return nil } -func (f FakeAccounts) AllocateStorageIndex(_ flow.Address) (atree.StorageIndex, error) { - return atree.StorageIndex{}, nil +func (f FakeAccounts) AllocateSlabIndex(_ flow.Address) (atree.SlabIndex, error) { + return atree.SlabIndex{}, nil +} +func (f FakeAccounts) GenerateAccountLocalID(address flow.Address) (uint64, error) { + return 0, nil } diff --git a/fvm/environment/account_local_id_generator.go b/fvm/environment/account_local_id_generator.go new file mode 100644 index 00000000000..079f292ac3e --- /dev/null +++ b/fvm/environment/account_local_id_generator.go @@ -0,0 +1,82 @@ +package environment + +import ( + "github.com/onflow/cadence/common" + + "github.com/onflow/flow-go/fvm/storage/state" + "github.com/onflow/flow-go/fvm/tracing" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module/trace" +) + +type AccountLocalIDGenerator interface { + GenerateAccountID(address common.Address) (uint64, error) +} + +type ParseRestrictedAccountLocalIDGenerator struct { + txnState state.NestedTransactionPreparer + impl AccountLocalIDGenerator +} + +func NewParseRestrictedAccountLocalIDGenerator( + txnState state.NestedTransactionPreparer, + impl AccountLocalIDGenerator, +) AccountLocalIDGenerator { + return ParseRestrictedAccountLocalIDGenerator{ + txnState: txnState, + impl: impl, + } +} + +func (generator ParseRestrictedAccountLocalIDGenerator) GenerateAccountID( + address common.Address, +) (uint64, error) { + return parseRestrict1Arg1Ret( + generator.txnState, + trace.FVMEnvGenerateAccountLocalID, + generator.impl.GenerateAccountID, + address) +} + +type accountLocalIDGenerator struct { + tracer tracing.TracerSpan + meter Meter + accounts Accounts +} + +func NewAccountLocalIDGenerator( + tracer tracing.TracerSpan, + meter Meter, + accounts Accounts, +) AccountLocalIDGenerator { + return &accountLocalIDGenerator{ + tracer: tracer, + meter: meter, + accounts: accounts, + } +} + +func (generator *accountLocalIDGenerator) GenerateAccountID( + runtimeAddress common.Address, +) ( + uint64, + error, +) { + defer generator.tracer.StartExtensiveTracingChildSpan( + trace.FVMEnvGenerateAccountLocalID, + ).End() + + err := generator.meter.MeterComputation( + common.ComputationUsage{ + Kind: ComputationKindGenerateAccountLocalID, + Intensity: 1, + }, + ) + if err != nil { + return 0, err + } + + return generator.accounts.GenerateAccountLocalID( + flow.ConvertAddress(runtimeAddress), + ) +} diff --git a/fvm/environment/account_local_id_generator_test.go b/fvm/environment/account_local_id_generator_test.go new file mode 100644 index 00000000000..b0f91b0e699 --- /dev/null +++ b/fvm/environment/account_local_id_generator_test.go @@ -0,0 +1,93 @@ +package environment_test + +import ( + "errors" + "testing" + + "github.com/stretchr/testify/require" + + "github.com/onflow/cadence/common" + + "github.com/onflow/flow-go/fvm/environment" + envMock "github.com/onflow/flow-go/fvm/environment/mock" + "github.com/onflow/flow-go/fvm/tracing" + "github.com/onflow/flow-go/model/flow" +) + +func Test_accountLocalIDGenerator_GenerateAccountID(t *testing.T) { + address, err := common.HexToAddress("0x1") + require.NoError(t, err) + + t.Run("success", func(t *testing.T) { + meter := envMock.NewMeter(t) + meter.On( + "MeterComputation", + common.ComputationUsage{ + Kind: environment.ComputationKindGenerateAccountLocalID, + Intensity: 1, + }, + ).Return(nil) + + accounts := envMock.NewAccounts(t) + accounts.On("GenerateAccountLocalID", flow.ConvertAddress(address)). + Return(uint64(1), nil) + + generator := environment.NewAccountLocalIDGenerator( + tracing.NewMockTracerSpan(), + meter, + accounts, + ) + + id, err := generator.GenerateAccountID(address) + require.NoError(t, err) + require.Equal(t, uint64(1), id) + }) + t.Run("error in meter", func(t *testing.T) { + expectedErr := errors.New("error in meter") + + meter := envMock.NewMeter(t) + meter.On( + "MeterComputation", + common.ComputationUsage{ + Kind: environment.ComputationKindGenerateAccountLocalID, + Intensity: 1, + }, + ).Return(expectedErr) + + accounts := envMock.NewAccounts(t) + + generator := environment.NewAccountLocalIDGenerator( + tracing.NewMockTracerSpan(), + meter, + accounts, + ) + + _, err := generator.GenerateAccountID(address) + require.ErrorIs(t, err, expectedErr) + }) + t.Run("err in accounts", func(t *testing.T) { + expectedErr := errors.New("error in accounts") + + meter := envMock.NewMeter(t) + meter.On( + "MeterComputation", + common.ComputationUsage{ + Kind: environment.ComputationKindGenerateAccountLocalID, + Intensity: 1, + }, + ).Return(nil) + + accounts := envMock.NewAccounts(t) + accounts.On("GenerateAccountLocalID", flow.ConvertAddress(address)). + Return(uint64(0), expectedErr) + + generator := environment.NewAccountLocalIDGenerator( + tracing.NewMockTracerSpan(), + meter, + accounts, + ) + + _, err := generator.GenerateAccountID(address) + require.ErrorIs(t, err, expectedErr) + }) +} diff --git a/fvm/environment/account_public_key_util.go b/fvm/environment/account_public_key_util.go new file mode 100644 index 00000000000..753413fe6a4 --- /dev/null +++ b/fvm/environment/account_public_key_util.go @@ -0,0 +1,355 @@ +package environment + +import ( + "fmt" + "math" + "slices" + + "github.com/onflow/flow-go/fvm/errors" + "github.com/onflow/flow-go/model/flow" +) + +const ( + // NOTE: MaxPublicKeyCountInBatch can't be modified + MaxPublicKeyCountInBatch = 20 // 20 public key payload is ~1420 bytes +) + +// Account Public Key 0 + +func getAccountPublicKey0( + a Accounts, + address flow.Address, +) ( + flow.AccountPublicKey, + error, +) { + encodedPublicKey, err := a.GetValue(flow.AccountPublicKey0RegisterID(address)) + if err != nil { + return flow.AccountPublicKey{}, err + } + + const keyIndex = uint32(0) + + if len(encodedPublicKey) == 0 { + return flow.AccountPublicKey{}, errors.NewAccountPublicKeyNotFoundError( + address, + keyIndex) + } + + decodedPublicKey, err := flow.DecodeAccountPublicKey(encodedPublicKey, keyIndex) + if err != nil { + return flow.AccountPublicKey{}, fmt.Errorf( + "failed to decode account public key 0: %w", + err) + } + + return decodedPublicKey, nil +} + +func setAccountPublicKey0( + a Accounts, + address flow.Address, + publicKey flow.AccountPublicKey, +) error { + err := publicKey.Validate() + if err != nil { + encoded, _ := publicKey.MarshalJSON() + return errors.NewValueErrorf( + string(encoded), + "invalid public key value: %w", + err) + } + + encodedPublicKey, err := flow.EncodeAccountPublicKey(publicKey) + if err != nil { + encoded, _ := publicKey.MarshalJSON() + return errors.NewValueErrorf( + string(encoded), + "failed to encode account public key: %w", + err) + } + + err = a.SetValue( + flow.AccountPublicKey0RegisterID(address), + encodedPublicKey) + + return err +} + +func revokeAccountPublicKey0( + a Accounts, + address flow.Address, +) error { + accountPublicKey0RegisterID := flow.AccountPublicKey0RegisterID(address) + + publicKey, err := a.GetValue(accountPublicKey0RegisterID) + if err != nil { + return err + } + + const keyIndex = uint32(0) + + if len(publicKey) == 0 { + return errors.NewAccountPublicKeyNotFoundError( + address, + keyIndex) + } + + decodedPublicKey, err := flow.DecodeAccountPublicKey(publicKey, keyIndex) + if err != nil { + return fmt.Errorf( + "failed to decode account public key 0: %w", + err) + } + + decodedPublicKey.Revoked = true + + encodedPublicKey, err := flow.EncodeAccountPublicKey(decodedPublicKey) + if err != nil { + encoded, _ := decodedPublicKey.MarshalJSON() + return errors.NewValueErrorf( + string(encoded), + "failed to encode revoked account public key 0: %w", + err) + } + + return a.SetValue(accountPublicKey0RegisterID, encodedPublicKey) +} + +// Account Public Key Sequence Number + +func getAccountPublicKeySequenceNumber( + a Accounts, + address flow.Address, + keyIndex uint32, +) (uint64, error) { + if keyIndex == 0 { + decodedAccountPublicKey, err := getAccountPublicKey0(a, address) + if err != nil { + return 0, err + } + return decodedAccountPublicKey.SeqNumber, nil + } + + encodedSequenceNumber, err := a.GetValue(flow.AccountPublicKeySequenceNumberRegisterID(address, keyIndex)) + if err != nil { + return 0, err + } + + if len(encodedSequenceNumber) == 0 { + return 0, nil + } + + return flow.DecodeSequenceNumber(encodedSequenceNumber) +} + +func incrementAccountPublicKeySequenceNumber( + a Accounts, + address flow.Address, + keyIndex uint32, +) error { + if keyIndex == 0 { + decodedAccountPublicKey, err := getAccountPublicKey0(a, address) + if err != nil { + return err + } + + decodedAccountPublicKey.SeqNumber++ + + return setAccountPublicKey0(a, address, decodedAccountPublicKey) + } + + seqNum, err := getAccountPublicKeySequenceNumber(a, address, keyIndex) + if err != nil { + return err + } + + seqNum++ + + return createAccountPublicKeySequenceNumber(a, address, keyIndex, seqNum) +} + +func createAccountPublicKeySequenceNumber( + a Accounts, + address flow.Address, + keyIndex uint32, + seqNum uint64, +) error { + if keyIndex == 0 { + return errors.NewKeyMetadataUnexpectedKeyIndexError("failed to create sequence number register", keyIndex) + } + + encodedSeqNum, err := flow.EncodeSequenceNumber(seqNum) + if err != nil { + return err + } + + return a.SetValue( + flow.AccountPublicKeySequenceNumberRegisterID(address, keyIndex), + encodedSeqNum) +} + +// Batch Public Key + +// BatchPublicKey register contains up to maxPublicKeyCountInBatch number of encoded public keys. +// Each public key is encoded as: +// - length prefixed encoded stored public key + +func getStoredPublicKey( + a Accounts, + address flow.Address, + storedKeyIndex uint32, +) (flow.StoredPublicKey, error) { + if storedKeyIndex == 0 { + // Stored key 0 is always account public key 0. + + accountKey, err := getAccountPublicKey0(a, address) + if err != nil { + return flow.StoredPublicKey{}, err + } + + return flow.StoredPublicKey{ + PublicKey: accountKey.PublicKey, + SignAlgo: accountKey.SignAlgo, + HashAlgo: accountKey.HashAlgo, + }, nil + } + + encodedPublicKey, err := getRawStoredPublicKey(a, address, storedKeyIndex) + if err != nil { + return flow.StoredPublicKey{}, err + } + + return flow.DecodeStoredPublicKey(encodedPublicKey) +} + +func getRawStoredPublicKey( + a Accounts, + address flow.Address, + storedKeyIndex uint32, +) ([]byte, error) { + + if storedKeyIndex == 0 { + // Stored key index is always account public key 0 + + accountKey, err := getAccountPublicKey0(a, address) + if err != nil { + return nil, err + } + + storedKey := flow.StoredPublicKey{ + PublicKey: accountKey.PublicKey, + SignAlgo: accountKey.SignAlgo, + HashAlgo: accountKey.HashAlgo, + } + + return flow.EncodeStoredPublicKey(storedKey) + } + + batchIndex := storedKeyIndex / MaxPublicKeyCountInBatch + keyIndexInBatch := storedKeyIndex % MaxPublicKeyCountInBatch + + batchRegisterKey := flow.AccountBatchPublicKeyRegisterID(address, batchIndex) + + b, err := a.GetValue(batchRegisterKey) + if err != nil { + return nil, err + } + + if len(b) == 0 { + return nil, errors.NewBatchPublicKeyNotFoundError("failed to get stored public key", address, batchIndex) + } + + for off, i := 0, uint32(0); off < len(b); i++ { + size := int(b[off]) + off++ + + if off+size > len(b) { + return nil, errors.NewBatchPublicKeyDecodingError( + fmt.Sprintf("%s register is too short", batchRegisterKey), + address, + batchIndex) + } + + if i == keyIndexInBatch { + encodedPublicKey := b[off : off+size] + return slices.Clone(encodedPublicKey), nil + } + + off += size + } + + return nil, errors.NewStoredPublicKeyNotFoundError( + fmt.Sprintf("%s register doesn't have key at index %d", batchRegisterKey, keyIndexInBatch), + address, + storedKeyIndex) +} + +func appendStoredKey( + a Accounts, + address flow.Address, + storedKeyIndex uint32, + encodedPublicKey []byte, +) error { + if storedKeyIndex == 0 { + return errors.NewStoredPublicKeyUnexpectedIndexError("failed to append stored key 0 to batch public key", address, storedKeyIndex) + } + + encodedBatchedPublicKey, err := encodeBatchedPublicKey(encodedPublicKey) + if err != nil { + return err + } + + batchNum := storedKeyIndex / MaxPublicKeyCountInBatch + indexInBatch := storedKeyIndex % MaxPublicKeyCountInBatch + + batchPublicKeyRegisterKey := flow.AccountBatchPublicKeyRegisterID(address, batchNum) + + if indexInBatch == 0 { + // Create new batch public key with 1 key + return a.SetValue(batchPublicKeyRegisterKey, encodedBatchedPublicKey) + } + + if batchNum == 0 && indexInBatch == 1 { + // Create new batch public key with 2 keys: + // - key 0 is nil (placeholder for account public key 0) + // - key 1 is the new key + // NOTE: key 0 in batch 0 is encoded as nil because key 0 is already stored in apk_0 register. + + var batchPublicKeyBytes []byte + batchPublicKeyBytes = append(batchPublicKeyBytes, encodedNilBatchedPublicKey...) + batchPublicKeyBytes = append(batchPublicKeyBytes, encodedBatchedPublicKey...) + + return a.SetValue(batchPublicKeyRegisterKey, batchPublicKeyBytes) + } + + existingBatchKeyPayload, err := a.GetValue(batchPublicKeyRegisterKey) + if err != nil { + return err + } + if len(existingBatchKeyPayload) == 0 { + return errors.NewBatchPublicKeyNotFoundError("failed to append stored public key", address, batchNum) + } + + // Append new key to existing batch public key register + batchPublicKeyBytes := append([]byte(nil), existingBatchKeyPayload...) + batchPublicKeyBytes = append(batchPublicKeyBytes, encodedBatchedPublicKey...) + + return a.SetValue(batchPublicKeyRegisterKey, batchPublicKeyBytes) +} + +var encodedNilBatchedPublicKey, _ = encodeBatchedPublicKey(nil) + +func encodeBatchedPublicKey(encodedPublicKey []byte) ([]byte, error) { + const maxEncodedKeySize = math.MaxUint8 + + if len(encodedPublicKey) > maxEncodedKeySize { + return nil, fmt.Errorf("failed to encode batched public key: encoded key size is %d bytes, exceeded max size %d", len(encodedPublicKey), maxEncodedKeySize) + } + + buf := make([]byte, 1+len(encodedPublicKey)) + buf[0] = byte(len(encodedPublicKey)) + copy(buf[1:], encodedPublicKey) + + return buf, nil +} diff --git a/fvm/environment/account_public_key_util_export_test.go b/fvm/environment/account_public_key_util_export_test.go new file mode 100644 index 00000000000..9bcc26e459e --- /dev/null +++ b/fvm/environment/account_public_key_util_export_test.go @@ -0,0 +1,13 @@ +package environment + +// Export functions from account_public_key_util.go for testing. +var ( + GetAccountPublicKey0 = getAccountPublicKey0 + SetAccountPublicKey0 = setAccountPublicKey0 + RevokeAccountPublicKey0 = revokeAccountPublicKey0 + GetAccountPublicKeySequenceNumber = getAccountPublicKeySequenceNumber + IncrementAccountPublicKeySequenceNumber = incrementAccountPublicKeySequenceNumber + GetStoredPublicKey = getStoredPublicKey + AppendStoredKey = appendStoredKey + EncodeBatchedPublicKey = encodeBatchedPublicKey +) diff --git a/fvm/environment/account_public_key_util_test.go b/fvm/environment/account_public_key_util_test.go new file mode 100644 index 00000000000..973f31316ec --- /dev/null +++ b/fvm/environment/account_public_key_util_test.go @@ -0,0 +1,394 @@ +package environment_test + +import ( + "testing" + + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/fvm/environment" + envMock "github.com/onflow/flow-go/fvm/environment/mock" + "github.com/onflow/flow-go/fvm/errors" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/utils/unittest" +) + +func TestGetAccountPublicKey0(t *testing.T) { + address := flow.BytesToAddress([]byte{1}) + + t.Run("no key", func(t *testing.T) { + accounts := envMock.NewAccounts(t) + accounts.On("GetValue", flow.AccountPublicKey0RegisterID(address)). + Return(nil, nil) + + _, err := environment.GetAccountPublicKey0(accounts, address) + require.True(t, errors.IsAccountPublicKeyNotFoundError(err)) + }) + + t.Run("invalid key", func(t *testing.T) { + accounts := envMock.NewAccounts(t) + accounts.On("GetValue", flow.AccountPublicKey0RegisterID(address)). + Return([]byte{0, 1, 2}, nil) + + _, err := environment.GetAccountPublicKey0(accounts, address) + require.ErrorContains(t, err, "failed to decode account public key 0") + }) + + t.Run("has key", func(t *testing.T) { + expectedPublicKey := newAccountPublicKey(t, 1000) + encodedPublicKey, err := flow.EncodeAccountPublicKey(expectedPublicKey) + require.NoError(t, err) + + accounts := envMock.NewAccounts(t) + accounts.On("GetValue", flow.AccountPublicKey0RegisterID(address)). + Return(encodedPublicKey, nil) + + publicKey, err := environment.GetAccountPublicKey0(accounts, address) + require.NoError(t, err) + require.Equal(t, expectedPublicKey, publicKey) + }) +} + +func TestSetAccountPublicKey0(t *testing.T) { + address := flow.BytesToAddress([]byte{1}) + + expectedPublicKey := newAccountPublicKey(t, 1000) + encodedPublicKey, err := flow.EncodeAccountPublicKey(expectedPublicKey) + require.NoError(t, err) + + accounts := envMock.NewAccounts(t) + accounts.On("SetValue", flow.AccountPublicKey0RegisterID(address), encodedPublicKey). + Return(nil) + + err = environment.SetAccountPublicKey0(accounts, address, expectedPublicKey) + require.NoError(t, err) +} + +func TestRevokeAccountPublicKey0(t *testing.T) { + address := flow.BytesToAddress([]byte{1}) + + t.Run("no key", func(t *testing.T) { + accounts := envMock.NewAccounts(t) + accounts.On("GetValue", flow.AccountPublicKey0RegisterID(address)). + Return(nil, nil) + + err := environment.RevokeAccountPublicKey0(accounts, address) + require.True(t, errors.IsAccountPublicKeyNotFoundError(err)) + }) + + t.Run("has key", func(t *testing.T) { + publicKey := newAccountPublicKey(t, 1000) + encodedPublicKey, err := flow.EncodeAccountPublicKey(publicKey) + require.NoError(t, err) + + publicKey.Revoked = true + encodedRevokedPublicKey, err := flow.EncodeAccountPublicKey(publicKey) + require.NoError(t, err) + + accounts := envMock.NewAccounts(t) + accounts.On("GetValue", flow.AccountPublicKey0RegisterID(address)). + Return(encodedPublicKey, nil) + accounts.On("SetValue", flow.AccountPublicKey0RegisterID(address), encodedRevokedPublicKey). + Return(nil) + + err = environment.RevokeAccountPublicKey0(accounts, address) + require.NoError(t, err) + }) +} + +func TestGetSequenceNumber(t *testing.T) { + address := flow.BytesToAddress([]byte{1}) + + t.Run("no sequence number", func(t *testing.T) { + accounts := envMock.NewAccounts(t) + accounts.On("GetValue", flow.AccountPublicKey0RegisterID(address)). + Return(nil, nil) + accounts.On("GetValue", flow.AccountPublicKeySequenceNumberRegisterID(address, 1)). + Return(nil, nil) + + _, err := environment.GetAccountPublicKeySequenceNumber(accounts, address, 0) + require.True(t, errors.IsAccountPublicKeyNotFoundError(err)) + + seqNum, err := environment.GetAccountPublicKeySequenceNumber(accounts, address, 1) + require.NoError(t, err) + require.Equal(t, uint64(0), seqNum) + }) + + t.Run("has sequence number", func(t *testing.T) { + expectedSeqNum0 := uint64(1) + expectedSeqNum1 := uint64(2) + + publicKey0 := newAccountPublicKey(t, 1000) + publicKey0.SeqNumber = expectedSeqNum0 + encodedPublicKey0, err := flow.EncodeAccountPublicKey(publicKey0) + require.NoError(t, err) + + encodedSeqNum1, err := flow.EncodeSequenceNumber(expectedSeqNum1) + require.NoError(t, err) + + accounts := envMock.NewAccounts(t) + accounts.On("GetValue", flow.AccountPublicKey0RegisterID(address)). + Return(encodedPublicKey0, nil) + accounts.On("GetValue", flow.AccountPublicKeySequenceNumberRegisterID(address, 1)). + Return(encodedSeqNum1, nil) + + seqNum0, err := environment.GetAccountPublicKeySequenceNumber(accounts, address, 0) + require.NoError(t, err) + require.Equal(t, expectedSeqNum0, seqNum0) + + seqNum1, err := environment.GetAccountPublicKeySequenceNumber(accounts, address, 1) + require.NoError(t, err) + require.Equal(t, expectedSeqNum1, seqNum1) + }) +} + +func TestIncrementSequenceNumber(t *testing.T) { + address := flow.BytesToAddress([]byte{1}) + + t.Run("no sequence number", func(t *testing.T) { + encodedSeqNum, err := flow.EncodeSequenceNumber(1) + require.NoError(t, err) + + accounts := envMock.NewAccounts(t) + accounts.On("GetValue", flow.AccountPublicKey0RegisterID(address)). + Return(nil, nil) + accounts.On("GetValue", flow.AccountPublicKeySequenceNumberRegisterID(address, 1)). + Return(nil, nil) + accounts.On("SetValue", flow.AccountPublicKeySequenceNumberRegisterID(address, 1), encodedSeqNum). + Return(nil, nil) + + err = environment.IncrementAccountPublicKeySequenceNumber(accounts, address, 0) + require.True(t, errors.IsAccountPublicKeyNotFoundError(err)) + + err = environment.IncrementAccountPublicKeySequenceNumber(accounts, address, 1) + require.NoError(t, err) + }) + + t.Run("has sequence number", func(t *testing.T) { + storedSeqNum0 := uint64(1) + storedSeqNum1 := uint64(3) + + publicKey0 := newAccountPublicKey(t, 1000) + publicKey0.SeqNumber = storedSeqNum0 + encodedPublicKey0, err := flow.EncodeAccountPublicKey(publicKey0) + require.NoError(t, err) + + publicKey0.SeqNumber++ + encodedIncrementedPublicKey0, err := flow.EncodeAccountPublicKey(publicKey0) + require.NoError(t, err) + + encodedSeqNum1, err := flow.EncodeSequenceNumber(storedSeqNum1) + require.NoError(t, err) + + encodedIncrementedSeqNum1, err := flow.EncodeSequenceNumber(storedSeqNum1 + 1) + require.NoError(t, err) + + accounts := envMock.NewAccounts(t) + + accounts.On("GetValue", flow.AccountPublicKey0RegisterID(address)). + Return(encodedPublicKey0, nil) + accounts.On("SetValue", flow.AccountPublicKey0RegisterID(address), encodedIncrementedPublicKey0). + Return(nil) + + accounts.On("GetValue", flow.AccountPublicKeySequenceNumberRegisterID(address, 1)). + Return(encodedSeqNum1, nil) + accounts.On("SetValue", flow.AccountPublicKeySequenceNumberRegisterID(address, 1), encodedIncrementedSeqNum1). + Return(nil) + + err = environment.IncrementAccountPublicKeySequenceNumber(accounts, address, 0) + require.NoError(t, err) + + err = environment.IncrementAccountPublicKeySequenceNumber(accounts, address, 1) + require.NoError(t, err) + }) +} + +func TestGetStoredPublicKey(t *testing.T) { + address := flow.BytesToAddress([]byte{1}) + + t.Run("0 key", func(t *testing.T) { + accounts := envMock.NewAccounts(t) + accounts.On("GetValue", flow.AccountPublicKey0RegisterID(address)). + Return(nil, nil) + + _, err := environment.GetStoredPublicKey(accounts, address, 0) + require.True(t, errors.IsAccountPublicKeyNotFoundError(err)) + }) + + t.Run("1 key", func(t *testing.T) { + accountPublicKey1 := newAccountPublicKey(t, 1000) + encodedAccountPublicKey1, err := flow.EncodeAccountPublicKey(accountPublicKey1) + require.NoError(t, err) + + expectedStoredPublicKey1 := accountPublicKeyToStoredKey(accountPublicKey1) + + accounts := envMock.NewAccounts(t) + accounts.On("GetValue", flow.AccountPublicKey0RegisterID(address)). + Return(encodedAccountPublicKey1, nil) + + spk, err := environment.GetStoredPublicKey(accounts, address, 0) + require.NoError(t, err) + require.Equal(t, expectedStoredPublicKey1, spk) + }) + + t.Run("2 keys", func(t *testing.T) { + accountPublicKey1 := newAccountPublicKey(t, 1000) + encodedAccountPublicKey1, err := flow.EncodeAccountPublicKey(accountPublicKey1) + require.NoError(t, err) + + expectedStoredPublicKey1 := accountPublicKeyToStoredKey(accountPublicKey1) + expectedStoredPublicKey2 := accountPublicKeyToStoredKey(newAccountPublicKey(t, 1)) + + accounts := envMock.NewAccounts(t) + accounts.On("GetValue", flow.AccountPublicKey0RegisterID(address)). + Return(encodedAccountPublicKey1, nil) + accounts.On("GetValue", flow.AccountBatchPublicKeyRegisterID(address, 0)). + Return(newBatchPublicKey(t, []*flow.StoredPublicKey{nil, &expectedStoredPublicKey2}), nil) + + spk, err := environment.GetStoredPublicKey(accounts, address, 0) + require.NoError(t, err) + require.Equal(t, expectedStoredPublicKey1, spk) + + spk, err = environment.GetStoredPublicKey(accounts, address, 1) + require.NoError(t, err) + require.Equal(t, expectedStoredPublicKey2, spk) + }) + + t.Run("one full batch", func(t *testing.T) { + accountPublicKey1 := newAccountPublicKey(t, 1000) + encodedAccountPublicKey1, err := flow.EncodeAccountPublicKey(accountPublicKey1) + require.NoError(t, err) + + storedKeyCount := environment.MaxPublicKeyCountInBatch + expectedStoredKeys := make([]*flow.StoredPublicKey, storedKeyCount) + + for i := 1; i < environment.MaxPublicKeyCountInBatch; i++ { + key := accountPublicKeyToStoredKey(newAccountPublicKey(t, 1)) + expectedStoredKeys[i] = &key + } + + accounts := envMock.NewAccounts(t) + accounts.On("GetValue", flow.AccountPublicKey0RegisterID(address)). + Return(encodedAccountPublicKey1, nil) + accounts.On("GetValue", flow.AccountBatchPublicKeyRegisterID(address, 0)). + Return(newBatchPublicKey(t, expectedStoredKeys), nil) + + spk, err := environment.GetStoredPublicKey(accounts, address, 0) + require.NoError(t, err) + require.Equal(t, accountPublicKeyToStoredKey(accountPublicKey1), spk) + + for i := 1; i < storedKeyCount; i++ { + spk, err = environment.GetStoredPublicKey(accounts, address, uint32(i)) + require.NoError(t, err) + require.Equal(t, *expectedStoredKeys[i], spk) + } + }) + + t.Run("more than one batch", func(t *testing.T) { + accountPublicKey1 := newAccountPublicKey(t, 1000) + encodedAccountPublicKey1, err := flow.EncodeAccountPublicKey(accountPublicKey1) + require.NoError(t, err) + + storedKeyCount := environment.MaxPublicKeyCountInBatch + 1 + + expectedStoredKeys := make([]*flow.StoredPublicKey, storedKeyCount) + + for i := 1; i < storedKeyCount; i++ { + key := accountPublicKeyToStoredKey(newAccountPublicKey(t, 1)) + expectedStoredKeys[i] = &key + } + + accounts := envMock.NewAccounts(t) + accounts.On("GetValue", flow.AccountPublicKey0RegisterID(address)). + Return(encodedAccountPublicKey1, nil) + accounts.On("GetValue", flow.AccountBatchPublicKeyRegisterID(address, 0)). + Return(newBatchPublicKey(t, expectedStoredKeys[:environment.MaxPublicKeyCountInBatch]), nil) + accounts.On("GetValue", flow.AccountBatchPublicKeyRegisterID(address, 1)). + Return(newBatchPublicKey(t, expectedStoredKeys[environment.MaxPublicKeyCountInBatch:]), nil) + + spk, err := environment.GetStoredPublicKey(accounts, address, 0) + require.NoError(t, err) + require.Equal(t, accountPublicKeyToStoredKey(accountPublicKey1), spk) + + for i := 1; i < storedKeyCount; i++ { + spk, err = environment.GetStoredPublicKey(accounts, address, uint32(i)) + require.NoError(t, err) + require.Equal(t, *expectedStoredKeys[i], spk) + } + }) +} + +func TestAppendStoredPublicKey(t *testing.T) { + address := flow.BytesToAddress([]byte{1}) + + storedKeyCount := environment.MaxPublicKeyCountInBatch + 2 + + expectedStoredKeys := make([]*flow.StoredPublicKey, storedKeyCount) + for i := 1; i < storedKeyCount; i++ { + key := accountPublicKeyToStoredKey(newAccountPublicKey(t, 1)) + expectedStoredKeys[i] = &key + } + + for i := 1; i < storedKeyCount; i++ { + batchNum := i / environment.MaxPublicKeyCountInBatch + keyIndexInBatch := i % environment.MaxPublicKeyCountInBatch + + startStoredKeyIndexInBatch := environment.MaxPublicKeyCountInBatch * batchNum + endStoredKeyIndexInBatch := startStoredKeyIndexInBatch + keyIndexInBatch + + batchRegisterID := flow.AccountBatchPublicKeyRegisterID(address, uint32(batchNum)) + + accounts := envMock.NewAccounts(t) + if !(batchNum == 0 && keyIndexInBatch == 1) && !(batchNum > 0 && keyIndexInBatch == 0) { + accounts.On( + "GetValue", + batchRegisterID). + Return(newBatchPublicKey(t, expectedStoredKeys[startStoredKeyIndexInBatch:endStoredKeyIndexInBatch]), nil) + } + accounts.On( + "SetValue", + batchRegisterID, + newBatchPublicKey(t, expectedStoredKeys[startStoredKeyIndexInBatch:endStoredKeyIndexInBatch+1])). + Return(nil) + + encodedKey, err := flow.EncodeStoredPublicKey(*expectedStoredKeys[i]) + require.NoError(t, err) + + err = environment.AppendStoredKey(accounts, address, uint32(i), encodedKey) + require.NoError(t, err) + } +} + +func newAccountPublicKey(t *testing.T, weight int) flow.AccountPublicKey { + privateKey, err := unittest.AccountKeyDefaultFixture() + require.NoError(t, err) + + return privateKey.PublicKey(weight) +} + +func accountPublicKeyToStoredKey(apk flow.AccountPublicKey) flow.StoredPublicKey { + return flow.StoredPublicKey{ + PublicKey: apk.PublicKey, + SignAlgo: apk.SignAlgo, + HashAlgo: apk.HashAlgo, + } +} + +func newBatchPublicKey(t *testing.T, storedPublicKeys []*flow.StoredPublicKey) []byte { + var buf []byte + var err error + + for _, k := range storedPublicKeys { + var encodedKey []byte + + if k != nil { + encodedKey, err = flow.EncodeStoredPublicKey(*k) + require.NoError(t, err) + } + + b, err := environment.EncodeBatchedPublicKey(encodedKey) + require.NoError(t, err) + + buf = append(buf, b...) + } + return buf +} diff --git a/fvm/environment/accounts.go b/fvm/environment/accounts.go index 17a54a4549f..6af89260b0c 100644 --- a/fvm/environment/accounts.go +++ b/fvm/environment/accounts.go @@ -5,28 +5,40 @@ import ( "fmt" "math" "sort" + "strings" "github.com/fxamacker/cbor/v2" "github.com/onflow/atree" + "github.com/onflow/crypto" + "github.com/onflow/crypto/hash" - "github.com/onflow/flow-go/crypto" - "github.com/onflow/flow-go/crypto/hash" + accountkeymetadata "github.com/onflow/flow-go/fvm/environment/account-key-metadata" "github.com/onflow/flow-go/fvm/errors" "github.com/onflow/flow-go/fvm/storage/state" "github.com/onflow/flow-go/model/flow" ) const ( - MaxPublicKeyCount = math.MaxUint64 + MaxPublicKeyCount = math.MaxUint32 ) type Accounts interface { Exists(address flow.Address) (bool, error) Get(address flow.Address) (*flow.Account, error) - GetPublicKeyCount(address flow.Address) (uint64, error) - AppendPublicKey(address flow.Address, key flow.AccountPublicKey) error - GetPublicKey(address flow.Address, keyIndex uint64) (flow.AccountPublicKey, error) - SetPublicKey(address flow.Address, keyIndex uint64, publicKey flow.AccountPublicKey) ([]byte, error) + GetAccountPublicKeyCount(address flow.Address) (uint32, error) + AppendAccountPublicKey(address flow.Address, key flow.AccountPublicKey) error + GetRuntimeAccountPublicKey(address flow.Address, keyIndex uint32) (flow.RuntimeAccountPublicKey, error) + GetAccountPublicKey(address flow.Address, keyIndex uint32) (flow.AccountPublicKey, error) + GetAccountPublicKeys(address flow.Address) ([]flow.AccountPublicKey, error) + RevokeAccountPublicKey(address flow.Address, keyIndex uint32) error + GetAccountPublicKeyRevokedStatus(address flow.Address, keyIndex uint32) (bool, error) + GetAccountPublicKeySequenceNumber(address flow.Address, keyIndex uint32) (uint64, error) + // IncrementAccountPublicKeySequenceNumber increments the sequence number for the account's public key + // at the given key index. This update does not affect the account status, enabling concurrent execution + // of transactions that do not modify any data related to account status. + // Note: No additional storage is consumed, as the storage for the sequence number register + // was allocated when account public key was initially added to the account. + IncrementAccountPublicKeySequenceNumber(address flow.Address, keyIndex uint32) error GetContractNames(address flow.Address) ([]string, error) GetContract(contractName string, address flow.Address) ([]byte, error) ContractExists(contractName string, address flow.Address) (bool, error) @@ -36,7 +48,8 @@ type Accounts interface { GetValue(id flow.RegisterID) (flow.RegisterValue, error) GetStorageUsed(address flow.Address) (uint64, error) SetValue(id flow.RegisterID, value flow.RegisterValue) error - AllocateStorageIndex(address flow.Address) (atree.StorageIndex, error) + AllocateSlabIndex(address flow.Address) (atree.SlabIndex, error) + GenerateAccountLocalID(address flow.Address) (uint64, error) } var _ Accounts = &StatefulAccounts{} @@ -51,20 +64,20 @@ func NewAccounts(txnState state.NestedTransactionPreparer) *StatefulAccounts { } } -func (a *StatefulAccounts) AllocateStorageIndex( +func (a *StatefulAccounts) AllocateSlabIndex( address flow.Address, ) ( - atree.StorageIndex, + atree.SlabIndex, error, ) { // get status status, err := a.getAccountStatus(address) if err != nil { - return atree.StorageIndex{}, err + return atree.SlabIndex{}, err } // get and increment the index - index := status.StorageIndex() + index := status.SlabIndex() newIndexBytes := index.Next() // store nil so that the setValue for new allocated slabs would be faster @@ -72,13 +85,13 @@ func (a *StatefulAccounts) AllocateStorageIndex( // compute storage size changes) // this way the getValue would load this value from deltas key := atree.SlabIndexToLedgerKey(index) - a.txnState.RunWithAllLimitsDisabled(func() { + a.txnState.RunWithMeteringDisabled(func() { err = a.txnState.Set( - flow.NewRegisterID(string(address.Bytes()), string(key)), + flow.NewRegisterID(address, string(key)), []byte{}) }) if err != nil { - return atree.StorageIndex{}, fmt.Errorf( + return atree.SlabIndex{}, fmt.Errorf( "failed to allocate an storage index: %w", err) } @@ -87,7 +100,7 @@ func (a *StatefulAccounts) AllocateStorageIndex( status.SetStorageIndex(newIndexBytes) err = a.setAccountStatus(address, status) if err != nil { - return atree.StorageIndex{}, fmt.Errorf( + return atree.SlabIndex{}, fmt.Errorf( "failed to allocate an storage index: %w", err) } @@ -122,7 +135,7 @@ func (a *StatefulAccounts) Get(address flow.Address) (*flow.Account, error) { } var publicKeys []flow.AccountPublicKey - publicKeys, err = a.GetPublicKeys(address) + publicKeys, err = a.GetAccountPublicKeys(address) if err != nil { return nil, err } @@ -167,6 +180,15 @@ func (a *StatefulAccounts) Create( return errors.NewAccountAlreadyExistsError(newAddress) } + publicKeyCount := uint32(len(publicKeys)) + + if publicKeyCount >= MaxPublicKeyCount { + return errors.NewAccountPublicKeyLimitError( + newAddress, + publicKeyCount, + MaxPublicKeyCount) + } + accountStatus := NewAccountStatus() storageUsedByTheStatusItself := uint64(RegisterSize( flow.AccountStatusRegisterID(newAddress), @@ -179,53 +201,241 @@ func (a *StatefulAccounts) Create( return fmt.Errorf("failed to create a new account: %w", err) } - return a.SetAllPublicKeys(newAddress, publicKeys) + for i, publicKey := range publicKeys { + err := a.appendPublicKey(newAddress, publicKey, uint32(i)) + if err != nil { + return err + } + } + + // NOTE: do not include pre-allocated sequence number register size in storage used for account public key 0. + + if publicKeyCount <= 1 { + return nil + } + + // Adjust storage used for pre-allocated sequence number registers, starting from account public key 1. + sequenceNumberPayloadSize := uint64(0) + for i := uint32(1); i < publicKeyCount; i++ { + sequenceNumberPayloadSize += PredefinedSequenceNumberPayloadSize(newAddress, i) + } + + status, err := a.getAccountStatus(newAddress) + if err != nil { + return err + } + + storageUsed := status.StorageUsed() + sequenceNumberPayloadSize + return a.setAccountStatusStorageUsed(newAddress, status, storageUsed) } -func (a *StatefulAccounts) GetPublicKey( +func (a *StatefulAccounts) GetAccountPublicKey( address flow.Address, - keyIndex uint64, + keyIndex uint32, ) ( flow.AccountPublicKey, error, ) { - publicKey, err := a.GetValue(flow.PublicKeyRegisterID(address, keyIndex)) + err := a.accountPublicKeyIndexInRange(address, keyIndex) if err != nil { return flow.AccountPublicKey{}, err } - if len(publicKey) == 0 { - return flow.AccountPublicKey{}, errors.NewAccountPublicKeyNotFoundError( - address, - keyIndex) + if keyIndex == 0 { + key, err := getAccountPublicKey0(a, address) + if err != nil { + return flow.AccountPublicKey{}, fmt.Errorf("failed to get account public key at index %d for %s: %w", keyIndex, address, err) + } + return key, nil } - decodedPublicKey, err := flow.DecodeAccountPublicKey(publicKey, keyIndex) + status, err := a.getAccountStatus(address) if err != nil { - return flow.AccountPublicKey{}, fmt.Errorf( - "failed to decode public key: %w", - err) + return flow.AccountPublicKey{}, err + } + + // Get account public key metadata. + weight, revoked, storedKeyIndex, err := status.AccountPublicKeyMetadata(keyIndex) + if err != nil { + return flow.AccountPublicKey{}, fmt.Errorf("failed to get account public key at index %d for %s: %w", keyIndex, address, err) + } + + // Get stored public key. + storedKey, err := getStoredPublicKey(a, address, storedKeyIndex) + if err != nil { + return flow.AccountPublicKey{}, fmt.Errorf("failed to get account public key at index %d for %s: %w", keyIndex, address, err) + } + + // Get sequence number. + sequenceNumber, err := getAccountPublicKeySequenceNumber(a, address, keyIndex) + if err != nil { + return flow.AccountPublicKey{}, fmt.Errorf("failed to get account public key at index %d for %s: %w", keyIndex, address, err) } - return decodedPublicKey, nil + return flow.AccountPublicKey{ + Index: keyIndex, + PublicKey: storedKey.PublicKey, + SignAlgo: storedKey.SignAlgo, + HashAlgo: storedKey.HashAlgo, + SeqNumber: sequenceNumber, + Weight: int(weight), + Revoked: revoked, + }, nil } -func (a *StatefulAccounts) GetPublicKeyCount( +func (a *StatefulAccounts) GetRuntimeAccountPublicKey( address flow.Address, + keyIndex uint32, ) ( - uint64, + flow.RuntimeAccountPublicKey, + error, +) { + err := a.accountPublicKeyIndexInRange(address, keyIndex) + if err != nil { + return flow.RuntimeAccountPublicKey{}, err + } + + if keyIndex == 0 { + key, err := getAccountPublicKey0(a, address) + if err != nil { + return flow.RuntimeAccountPublicKey{}, fmt.Errorf("failed to get account public key at index %d for %s: %w", keyIndex, address, err) + } + return flow.RuntimeAccountPublicKey{ + Index: keyIndex, + PublicKey: key.PublicKey, + SignAlgo: key.SignAlgo, + HashAlgo: key.HashAlgo, + Weight: key.Weight, + Revoked: key.Revoked, + }, nil + } + + status, err := a.getAccountStatus(address) + if err != nil { + return flow.RuntimeAccountPublicKey{}, err + } + + // Get account public key metadata. + weight, revoked, storedKeyIndex, err := status.AccountPublicKeyMetadata(keyIndex) + if err != nil { + return flow.RuntimeAccountPublicKey{}, fmt.Errorf("failed to get account public key at index %d for %s: %w", keyIndex, address, err) + } + + // Get stored public key. + storedKey, err := getStoredPublicKey(a, address, storedKeyIndex) + if err != nil { + return flow.RuntimeAccountPublicKey{}, fmt.Errorf("failed to get account public key at index %d for %s: %w", keyIndex, address, err) + } + + return flow.RuntimeAccountPublicKey{ + Index: keyIndex, + PublicKey: storedKey.PublicKey, + SignAlgo: storedKey.SignAlgo, + HashAlgo: storedKey.HashAlgo, + Weight: int(weight), + Revoked: revoked, + }, nil +} + +func (a *StatefulAccounts) GetAccountPublicKeyRevokedStatus(address flow.Address, keyIndex uint32) (bool, error) { + err := a.accountPublicKeyIndexInRange(address, keyIndex) + if err != nil { + return false, err + } + + if keyIndex == 0 { + accountPublicKey, err := getAccountPublicKey0(a, address) + if err != nil { + return false, err + } + return accountPublicKey.Revoked, nil + } + + status, err := a.getAccountStatus(address) + if err != nil { + return false, err + } + + return status.AccountPublicKeyRevokedStatus(keyIndex) +} + +func (a *StatefulAccounts) RevokeAccountPublicKey( + address flow.Address, + keyIndex uint32, +) error { + err := a.accountPublicKeyIndexInRange(address, keyIndex) + if err != nil { + return err + } + + if keyIndex == 0 { + return revokeAccountPublicKey0(a, address) + } + + status, err := a.getAccountStatus(address) + if err != nil { + return err + } + + err = status.RevokeAccountPublicKey(keyIndex) + if err != nil { + return fmt.Errorf("failed to revoke public key at index %d for %s: %w", keyIndex, address, err) + } + + return a.setAccountStatusAfterAccountStatusSizeChange(address, status) +} + +func (a *StatefulAccounts) GetAccountPublicKeySequenceNumber(address flow.Address, keyIndex uint32) (uint64, error) { + err := a.accountPublicKeyIndexInRange(address, keyIndex) + if err != nil { + return 0, err + } + + if keyIndex == 0 { + accountPublicKey, err := getAccountPublicKey0(a, address) + if err != nil { + return 0, err + } + return accountPublicKey.SeqNumber, nil + } + + return getAccountPublicKeySequenceNumber(a, address, keyIndex) +} + +func (a *StatefulAccounts) IncrementAccountPublicKeySequenceNumber(address flow.Address, keyIndex uint32) error { + err := a.accountPublicKeyIndexInRange(address, keyIndex) + if err != nil { + return err + } + + if keyIndex == 0 { + accountPublicKey, err := getAccountPublicKey0(a, address) + if err != nil { + return err + } + accountPublicKey.SeqNumber++ + return setAccountPublicKey0(a, address, accountPublicKey) + } + + return incrementAccountPublicKeySequenceNumber(a, address, keyIndex) +} + +func (a *StatefulAccounts) GetAccountPublicKeyCount( + address flow.Address, +) ( + uint32, error, ) { status, err := a.getAccountStatus(address) if err != nil { return 0, fmt.Errorf("failed to get public key count: %w", err) } - return status.PublicKeyCount(), nil + return status.AccountPublicKeyCount(), nil } func (a *StatefulAccounts) setPublicKeyCount( address flow.Address, - count uint64, + count uint32, ) error { status, err := a.getAccountStatus(address) if err != nil { @@ -235,7 +445,7 @@ func (a *StatefulAccounts) setPublicKeyCount( err) } - status.SetPublicKeyCount(count) + status.SetAccountPublicKeyCount(count) err = a.setAccountStatus(address, status) if err != nil { @@ -247,22 +457,20 @@ func (a *StatefulAccounts) setPublicKeyCount( return nil } -func (a *StatefulAccounts) GetPublicKeys( +func (a *StatefulAccounts) GetAccountPublicKeys( address flow.Address, ) ( publicKeys []flow.AccountPublicKey, err error, ) { - count, err := a.GetPublicKeyCount(address) + count, err := a.GetAccountPublicKeyCount(address) if err != nil { - return nil, fmt.Errorf( - "failed to get public key count of account: %w", - err) + return nil, err } publicKeys = make([]flow.AccountPublicKey, count) - for i := uint64(0); i < count; i++ { - publicKey, err := a.GetPublicKey(address, i) + for i := uint32(0); i < count; i++ { + publicKey, err := a.GetAccountPublicKey(address, i) if err != nil { return nil, err } @@ -273,65 +481,51 @@ func (a *StatefulAccounts) GetPublicKeys( return publicKeys, nil } -func (a *StatefulAccounts) SetPublicKey( +func (a *StatefulAccounts) AppendAccountPublicKey( address flow.Address, - keyIndex uint64, publicKey flow.AccountPublicKey, -) (encodedPublicKey []byte, err error) { - err = publicKey.Validate() - if err != nil { - encoded, _ := publicKey.MarshalJSON() - return nil, errors.NewValueErrorf( - string(encoded), - "invalid public key value: %w", - err) - } - - encodedPublicKey, err = flow.EncodeAccountPublicKey(publicKey) +) error { + count, err := a.GetAccountPublicKeyCount(address) if err != nil { - encoded, _ := publicKey.MarshalJSON() - return nil, errors.NewValueErrorf( - string(encoded), - "invalid public key value: %w", - err) + return err } - err = a.SetValue( - flow.PublicKeyRegisterID(address, keyIndex), - encodedPublicKey) - - return encodedPublicKey, err -} - -func (a *StatefulAccounts) SetAllPublicKeys( - address flow.Address, - publicKeys []flow.AccountPublicKey, -) error { - - count := uint64(len(publicKeys)) - + newCount := count + 1 if count >= MaxPublicKeyCount { return errors.NewAccountPublicKeyLimitError( address, - count, + newCount, MaxPublicKeyCount) } - for i, publicKey := range publicKeys { - _, err := a.SetPublicKey(address, uint64(i), publicKey) + keyIndex := count + + err = a.appendPublicKey(address, publicKey, keyIndex) + if err != nil { + return err + } + + // Adjust storage used for pre-allocated sequence number for key at index > 0. + if keyIndex > 0 { + sequenceNumberPayloadSize := PredefinedSequenceNumberPayloadSize(address, keyIndex) + + status, err := a.getAccountStatus(address) if err != nil { return err } + + storageUsed := status.StorageUsed() + sequenceNumberPayloadSize + return a.setAccountStatusStorageUsed(address, status, storageUsed) } - return a.setPublicKeyCount(address, count) + return nil } -func (a *StatefulAccounts) AppendPublicKey( +func (a *StatefulAccounts) appendPublicKey( address flow.Address, publicKey flow.AccountPublicKey, + keyIndex uint32, ) error { - if !IsValidAccountKeyHashAlgo(publicKey.HashAlgo) { return errors.NewValueErrorf( publicKey.HashAlgo.String(), @@ -344,24 +538,89 @@ func (a *StatefulAccounts) AppendPublicKey( "signature algorithm type not found") } - count, err := a.GetPublicKeyCount(address) + if keyIndex == 0 { + // Create account public key register for account public key at key index 0 + publicKey.Index = keyIndex + err := setAccountPublicKey0(a, address, publicKey) + if err != nil { + return err + } + + return a.setPublicKeyCount(address, keyIndex+1) + } + + storedKey := flow.StoredPublicKey{ + PublicKey: publicKey.PublicKey, + SignAlgo: publicKey.SignAlgo, + HashAlgo: publicKey.HashAlgo, + } + + encodedKey, err := flow.EncodeStoredPublicKey(storedKey) if err != nil { return err } - if count >= MaxPublicKeyCount { - return errors.NewAccountPublicKeyLimitError( - address, - count+1, - MaxPublicKeyCount) + storedKeyIndex, saveKey, err := a.appendKeyMetadataToAccountStatusRegister( + address, + publicKey.Revoked, + uint16(publicKey.Weight), + encodedKey, + ) + if err != nil { + return err + } + + // Store key if needed. + if saveKey { + err = appendStoredKey(a, address, storedKeyIndex, encodedKey) + if err != nil { + return err + } + } + + // Store sequence number if needed. + if publicKey.SeqNumber > 0 { + err = createAccountPublicKeySequenceNumber(a, address, keyIndex, publicKey.SeqNumber) + if err != nil { + return err + } } - _, err = a.SetPublicKey(address, count, publicKey) + return nil +} + +func (a *StatefulAccounts) appendKeyMetadataToAccountStatusRegister( + address flow.Address, + revoked bool, + weight uint16, + encodedKey []byte, +) (storedKeyIndex uint32, saveKey bool, _ error) { + status, err := a.getAccountStatus(address) if err != nil { - return err + return 0, false, err + } + + storedKeyIndex, saveKey, err = status.AppendAccountPublicKeyMetadata( + revoked, + weight, + encodedKey, + func(b []byte) uint64 { + return accountkeymetadata.GetPublicKeyDigest(address, b) + }, + func(storedKeyIndex uint32) ([]byte, error) { + return getRawStoredPublicKey(a, address, storedKeyIndex) + }, + ) + if err != nil { + return 0, false, err + } + + err = a.setAccountStatusAfterAccountStatusSizeChange(address, status) + if err != nil { + return 0, false, err } - return a.setPublicKeyCount(address, count+1) + return storedKeyIndex, saveKey, nil } func IsValidAccountKeySignAlgo(algo crypto.SigningAlgorithm) bool { @@ -430,6 +689,20 @@ func (a *StatefulAccounts) setContract( return nil } +func EncodeContractNames(contractNames contractNames) ([]byte, error) { + var buf bytes.Buffer + cborEncoder := cbor.NewEncoder(&buf) + err := cborEncoder.Encode(contractNames) + if err != nil { + return nil, errors.NewEncodingFailuref( + err, + "cannot encode contract names: %s", + contractNames, + ) + } + return buf.Bytes(), nil +} + func (a *StatefulAccounts) setContractNames( contractNames contractNames, address flow.Address, @@ -442,16 +715,11 @@ func (a *StatefulAccounts) setContractNames( if !ok { return errors.NewAccountNotFoundError(address) } - var buf bytes.Buffer - cborEncoder := cbor.NewEncoder(&buf) - err = cborEncoder.Encode(contractNames) + + newContractNames, err := EncodeContractNames(contractNames) if err != nil { - return errors.NewEncodingFailuref( - err, - "cannot encode contract names: %s", - contractNames) + return err } - newContractNames := buf.Bytes() id := flow.ContractNamesRegisterID(address) prevContractNames, err := a.GetValue(id) @@ -537,6 +805,11 @@ func (a *StatefulAccounts) updateRegisterSizeChange( // don't double check this to save time and prevent recursion return nil } + if strings.HasPrefix(id.Key, flow.SequenceNumberRegisterKeyPrefix) { + // Size of sequence number register is included when account public key is appended. + // Don't double count sequence number registers. + return nil + } oldValue, err := a.GetValue(id) if err != nil { return err @@ -575,15 +848,16 @@ func (a *StatefulAccounts) updateRegisterSizeChange( } func RegisterSize(id flow.RegisterID, value flow.RegisterValue) int { + // NOTE: RegisterSize() needs to be in sync with encodedKeyLength() in ledger/trie_encoder.go. if len(value) == 0 { // registers with empty value won't (or don't) exist when stored return 0 } - size := 0 - // additional 2 is for len prefixes when encoding is happening - // we might get rid of these 2s in the future - size += 2 + len(id.Owner) - size += 2 + len(id.Key) + size := 2 // number of key parts (2 bytes) + // Size for each key part: + // length prefix (4 bytes) + encoded key part type (2 bytes) + key part value + size += 4 + 2 + len(id.Owner) + size += 4 + 2 + len(id.Key) size += len(value) return size } @@ -606,20 +880,26 @@ func (a *StatefulAccounts) getContractNames( error, ) { // TODO return fatal error if can't fetch - encContractNames, err := a.GetValue(flow.ContractNamesRegisterID(address)) + encodedContractNames, err := a.GetValue(flow.ContractNamesRegisterID(address)) if err != nil { return nil, fmt.Errorf("cannot get deployed contract names: %w", err) } + + return DecodeContractNames(encodedContractNames) +} + +func DecodeContractNames(encodedContractNames []byte) ([]string, error) { identifiers := make([]string, 0) - if len(encContractNames) > 0 { - buf := bytes.NewReader(encContractNames) + if len(encodedContractNames) > 0 { + buf := bytes.NewReader(encodedContractNames) cborDecoder := cbor.NewDecoder(buf) - err = cborDecoder.Decode(&identifiers) + err := cborDecoder.Decode(&identifiers) if err != nil { return nil, fmt.Errorf( "cannot decode deployed contract names %x: %w", - encContractNames, - err) + encodedContractNames, + err, + ) } } return identifiers, nil @@ -698,6 +978,32 @@ func (a *StatefulAccounts) DeleteContract( return a.setContractNames(contractNames, address) } +// GenerateAccountLocalID generates a new account local id for an address +// it is sequential and starts at 1 +// Errors can happen if the account state cannot be read or written to +func (a *StatefulAccounts) GenerateAccountLocalID( + address flow.Address, +) ( + uint64, + error, +) { + as, err := a.getAccountStatus(address) + if err != nil { + return 0, fmt.Errorf("failed to get account local id: %w", err) + } + id := as.AccountIdCounter() + // AccountLocalIDs are defined as non 0 so return the incremented value + // see: https://github.com/onflow/cadence/blob/2081a601106baaf6ae695e3f2a84613160bb2166/runtime/interface.go#L149 + id += 1 + + as.SetAccountIdCounter(id) + err = a.setAccountStatus(address, as) + if err != nil { + return 0, fmt.Errorf("failed to get increment account local id: %w", err) + } + return id, nil +} + func (a *StatefulAccounts) getAccountStatus( address flow.Address, ) ( @@ -733,6 +1039,93 @@ func (a *StatefulAccounts) setAccountStatus( return nil } +func (a *StatefulAccounts) accountPublicKeyIndexInRange( + address flow.Address, + keyIndex uint32, +) error { + publicKeyCount, err := a.GetAccountPublicKeyCount(address) + if err != nil { + return errors.NewAccountPublicKeyNotFoundError( + address, + keyIndex) + } + + if keyIndex >= publicKeyCount { + return errors.NewAccountPublicKeyNotFoundError( + address, + keyIndex) + } + + return nil +} + +// setAccountStatusAfterAccountStatusSizeChange adjusts and sets +// account storage used after the account status register size is changed. +// This function is needed because updateRegisterSizeChange() filters out +// account status register to prevent recursion when computing storage used, +// so we need to explicitly update the account storage used when the +// account status register size is changed. +func (a *StatefulAccounts) setAccountStatusAfterAccountStatusSizeChange( + address flow.Address, + status *AccountStatus, +) error { + id := flow.AccountStatusRegisterID(address) + + oldAccountStatusValue, err := a.GetValue(id) + if err != nil { + return err + } + oldAccountStatusSize := len(oldAccountStatusValue) + + newAccountStatusValue := status.ToBytes() + newAccountStatusSize := len(newAccountStatusValue) + + sizeChange := newAccountStatusSize - oldAccountStatusSize + if sizeChange == 0 { + // Account status register size has not changed. + + // Set account status in underlying state + return a.txnState.Set(id, newAccountStatusValue) + } + + oldAccountStatus, err := AccountStatusFromBytes(oldAccountStatusValue) + if err != nil { + return err + } + oldStorageUsed := oldAccountStatus.StorageUsed() + + // Two paths to avoid casting uint to int + var newStorageUsed uint64 + if sizeChange < 0 { + absChange := uint64(-sizeChange) + if absChange > uint64(oldAccountStatusSize) { + // should never happen + return fmt.Errorf("storage would be negative for %s", id) + } + newStorageUsed = oldStorageUsed - absChange + } else { + absChange := uint64(sizeChange) + newStorageUsed = oldStorageUsed + absChange + } + + // Set updated storage used + status.SetStorageUsed(newStorageUsed) + + // Set account status in underlying state + return a.txnState.Set(id, status.ToBytes()) +} + +// PredefinedSequenceNumberPayloadSize returns sequence number register size. +func PredefinedSequenceNumberPayloadSize(address flow.Address, keyIndex uint32) uint64 { + // NOTE: We use 1 byte for register value size as predefined value size. + sequenceNumberValueUsedForStorageSizeComputation := []byte{0x01} + size := RegisterSize( + flow.AccountPublicKeySequenceNumberRegisterID(address, keyIndex), + sequenceNumberValueUsedForStorageSizeComputation, + ) + return uint64(size) +} + // contractNames container for a list of contract names. Should always be // sorted. To ensure this, don't sort while reading it from storage, but sort // it while adding/removing elements diff --git a/fvm/environment/accounts_status.go b/fvm/environment/accounts_status.go index c715c80e89e..48bf0561bc9 100644 --- a/fvm/environment/accounts_status.go +++ b/fvm/environment/accounts_status.go @@ -3,27 +3,62 @@ package environment import ( "encoding/binary" "encoding/hex" + "fmt" "github.com/onflow/atree" + accountkeymetadata "github.com/onflow/flow-go/fvm/environment/account-key-metadata" "github.com/onflow/flow-go/fvm/errors" ) const ( - flagSize = 1 - storageUsedSize = 8 - storageIndexSize = 8 - publicKeyCountsSize = 8 + flagSize = 1 + storageUsedSize = 8 + storageIndexSize = 8 + oldAccountPublicKeyCountsSize = 8 + accountPublicKeyCountsSize = 4 + addressIdCounterSize = 8 - accountStatusSize = flagSize + + // accountStatusSizeV1 is the size of the account status before the address + // id counter was added. After Crescendo check if it can be removed as all accounts + // should then have the new status sile len. + accountStatusSizeV1 = flagSize + storageUsedSize + storageIndexSize + - publicKeyCountsSize + oldAccountPublicKeyCountsSize - flagIndex = 0 - storageUsedStartIndex = flagIndex + flagSize - storageIndexStartIndex = storageUsedStartIndex + storageUsedSize - publicKeyCountsStartIndex = storageIndexStartIndex + storageIndexSize + // accountStatusSizeV2 is the size of the account status before + // the public key count was changed from 8 to 4 bytes long. + // After Crescendo check if it can be removed as all accounts + // should then have the new status sile len. + accountStatusSizeV2 = flagSize + + storageUsedSize + + storageIndexSize + + oldAccountPublicKeyCountsSize + + addressIdCounterSize + + accountStatusSizeV3 = flagSize + + storageUsedSize + + storageIndexSize + + accountPublicKeyCountsSize + + addressIdCounterSize + + flagIndex = 0 + storageUsedStartIndex = flagIndex + flagSize + storageIndexStartIndex = storageUsedStartIndex + storageUsedSize + accountPublicKeyCountsStartIndex = storageIndexStartIndex + storageIndexSize + addressIdCounterStartIndex = accountPublicKeyCountsStartIndex + accountPublicKeyCountsSize + + versionMask = 0xf0 + deduplicationFlagMask = 0x01 + + accountStatusV4DefaultVersionAndFlag = 0x40 + + AccountStatusMinSizeV4 = accountStatusSizeV3 +) + +const ( + maxStoredDigests = 2 // Account status register stores up to 2 digests from last 2 stored keys. ) // AccountStatus holds meta information about an account @@ -32,67 +67,359 @@ const ( // the first byte captures flags // the next 8 bytes (big-endian) captures storage used by an account // the next 8 bytes (big-endian) captures the storage index of an account -// and the last 8 bytes (big-endian) captures the number of public keys stored on this account -type AccountStatus [accountStatusSize]byte +// the next 4 bytes (big-endian) captures the number of public keys stored on this account +// the next 8 bytes (big-endian) captures the current address id counter +type accountStatusV3 [accountStatusSizeV3]byte + +type AccountStatus struct { + accountStatusV3 + keyMetadataBytes []byte +} // NewAccountStatus returns a new AccountStatus // sets the storage index to the init value func NewAccountStatus() *AccountStatus { - return &AccountStatus{ - 0, // initial empty flags - 0, 0, 0, 0, 0, 0, 0, 0, // init value for storage used + as := accountStatusV3{ + accountStatusV4DefaultVersionAndFlag, // initial empty flags + 0, 0, 0, 0, 0, 0, 0, 0, // init value for storage used 0, 0, 0, 0, 0, 0, 0, 1, // init value for storage index - 0, 0, 0, 0, 0, 0, 0, 0, // init value for public key counts + 0, 0, 0, 0, // init value for public key counts + 0, 0, 0, 0, 0, 0, 0, 0, // init value for address id counter + } + return &AccountStatus{ + accountStatusV3: as, } } // ToBytes converts AccountStatus to a byte slice // // this has been kept this way in case one day -// we decided to move on to use an struct to represent +// we decided to move on to use a struct to represent // account status. func (a *AccountStatus) ToBytes() []byte { - return a[:] + if len(a.keyMetadataBytes) == 0 { + return a.accountStatusV3[:] + } + return append(a.accountStatusV3[:], a.keyMetadataBytes...) } // AccountStatusFromBytes constructs an AccountStatus from the given byte slice func AccountStatusFromBytes(inp []byte) (*AccountStatus, error) { - var as AccountStatus - if len(inp) != accountStatusSize { - return &as, errors.NewValueErrorf(hex.EncodeToString(inp), "invalid account status size") + asv3, rest, err := accountStatusV3FromBytes(inp) + if err != nil { + return nil, err } + + // NOTE: both accountStatusV3 and keyMetadataBytes are copies. + return &AccountStatus{ + accountStatusV3: asv3, + keyMetadataBytes: append([]byte(nil), rest...), + }, nil +} + +func accountStatusV3FromBytes(inp []byte) (accountStatusV3, []byte, error) { + sizeChange := int64(0) + + // this is to migrate old account status to new account status on the fly + // TODO: remove this whole block after Crescendo, when a full migration will be made. + if len(inp) == accountStatusSizeV1 { + // migrate v1 to v2 + inp2 := make([]byte, accountStatusSizeV2) + + // pad the input with zeros + sizeIncrease := int64(accountStatusSizeV2 - accountStatusSizeV1) + + // But we also need to fix the storage used by the appropriate size because + // the storage used is part of the account status itself. + copy(inp2, inp) + sizeChange = sizeIncrease + + inp = inp2 + } + + // this is to migrate old account status to new account status on the fly + // TODO: remove this whole block after Crescendo, when a full migration will be made. + if len(inp) == accountStatusSizeV2 { + // migrate v2 to v3 + + inp2 := make([]byte, accountStatusSizeV2) + // copy the old account status first, so that we don't slice the input + copy(inp2, inp) + + // cut leading 4 bytes of old public key count. + cutStart := flagSize + + storageUsedSize + + storageIndexSize + + cutEnd := flagSize + + storageUsedSize + + storageIndexSize + + (oldAccountPublicKeyCountsSize - accountPublicKeyCountsSize) + + // check if the public key count is larger than 4 bytes + for i := cutStart; i < cutEnd; i++ { + if inp2[i] != 0 { + return accountStatusV3{}, nil, fmt.Errorf("cannot migrate account status from v2 to v3: public key count is larger than 4 bytes %v, %v", hex.EncodeToString(inp2[flagSize+ + storageUsedSize+ + storageIndexSize:flagSize+ + storageUsedSize+ + storageIndexSize+ + oldAccountPublicKeyCountsSize]), inp2[i]) + } + } + + inp2 = append(inp2[:cutStart], inp2[cutEnd:]...) + + sizeDecrease := int64(accountStatusSizeV2 - accountStatusSizeV3) + + // But we also need to fix the storage used by the appropriate size because + // the storage used is part of the account status itself. + sizeChange -= sizeDecrease + + inp = inp2 + } + + if len(inp) < accountStatusSizeV3 { + return accountStatusV3{}, nil, errors.NewValueErrorf(hex.EncodeToString(inp), "invalid account status size") + } + + inp, rest := inp[:accountStatusSizeV3], inp[accountStatusSizeV3:] + + var as accountStatusV3 copy(as[:], inp) - return &as, nil + + if sizeChange != 0 { + used := as.StorageUsed() + + if sizeChange < 0 { + // check if the storage used is smaller than the size change + if used < uint64(-sizeChange) { + return accountStatusV3{}, nil, errors.NewValueErrorf(hex.EncodeToString(inp), "account would have negative storage used after migration") + } + + used = used - uint64(-sizeChange) + } + + if sizeChange > 0 { + used = used + uint64(sizeChange) + } + + as.SetStorageUsed(used) + } + + return as, rest, nil +} + +func (a *accountStatusV3) Version() uint8 { + return (a[0] & versionMask) >> 4 +} + +func (a *accountStatusV3) IsAccountKeyDeduplicated() bool { + return (a[0] & deduplicationFlagMask) != 0 +} + +func (a *accountStatusV3) setAccountKeyDeduplicationFlag() { + a[0] |= deduplicationFlagMask } // SetStorageUsed updates the storage used by the account -func (a *AccountStatus) SetStorageUsed(used uint64) { +func (a *accountStatusV3) SetStorageUsed(used uint64) { binary.BigEndian.PutUint64(a[storageUsedStartIndex:storageUsedStartIndex+storageUsedSize], used) } // StorageUsed returns the storage used by the account -func (a *AccountStatus) StorageUsed() uint64 { +func (a *accountStatusV3) StorageUsed() uint64 { return binary.BigEndian.Uint64(a[storageUsedStartIndex : storageUsedStartIndex+storageUsedSize]) } // SetStorageIndex updates the storage index of the account -func (a *AccountStatus) SetStorageIndex(index atree.StorageIndex) { +func (a *accountStatusV3) SetStorageIndex(index atree.SlabIndex) { copy(a[storageIndexStartIndex:storageIndexStartIndex+storageIndexSize], index[:storageIndexSize]) } -// StorageIndex returns the storage index of the account -func (a *AccountStatus) StorageIndex() atree.StorageIndex { - var index atree.StorageIndex +// SlabIndex returns the storage index of the account +func (a *accountStatusV3) SlabIndex() atree.SlabIndex { + var index atree.SlabIndex copy(index[:], a[storageIndexStartIndex:storageIndexStartIndex+storageIndexSize]) return index } -// SetPublicKeyCount updates the public key count of the account -func (a *AccountStatus) SetPublicKeyCount(count uint64) { - binary.BigEndian.PutUint64(a[publicKeyCountsStartIndex:publicKeyCountsStartIndex+publicKeyCountsSize], count) +// SetAccountPublicKeyCount updates the account public key count of the account +func (a *accountStatusV3) SetAccountPublicKeyCount(count uint32) { + binary.BigEndian.PutUint32(a[accountPublicKeyCountsStartIndex:accountPublicKeyCountsStartIndex+accountPublicKeyCountsSize], count) +} + +// AccountPublicKeyCount returns the account public key count of the account +func (a *accountStatusV3) AccountPublicKeyCount() uint32 { + return binary.BigEndian.Uint32(a[accountPublicKeyCountsStartIndex : accountPublicKeyCountsStartIndex+accountPublicKeyCountsSize]) +} + +// SetAccountIdCounter updates id counter of the account +func (a *accountStatusV3) SetAccountIdCounter(id uint64) { + binary.BigEndian.PutUint64(a[addressIdCounterStartIndex:addressIdCounterStartIndex+addressIdCounterSize], id) +} + +// AccountIdCounter returns id counter of the account +func (a *accountStatusV3) AccountIdCounter() uint64 { + return binary.BigEndian.Uint64(a[addressIdCounterStartIndex : addressIdCounterStartIndex+addressIdCounterSize]) +} + +// AccountPublicKeyRevokedStatus returns revoked status of account public key at the given key index stored in key metadata. +// NOTE: To avoid checking keyIndex range repeatedly at different levels, caller must ensure keyIndex > 0 and < AccountPublicKeyCount(). +func (a *AccountStatus) AccountPublicKeyRevokedStatus(keyIndex uint32) (bool, error) { + return accountkeymetadata.GetRevokedStatus(a.keyMetadataBytes, keyIndex) +} + +// AccountPublicKeyMetadata returns weight, revoked status, and stored key index of account public key at the given key index stored in key metadata. +// NOTE: To avoid checking keyIndex range repeatedly at different levels, caller must ensure keyIndex > 0 and < AccountPublicKeyCount(). +func (a *AccountStatus) AccountPublicKeyMetadata(keyIndex uint32) ( + weight uint16, + revoked bool, + storedKeyIndex uint32, + err error, +) { + return accountkeymetadata.GetKeyMetadata(a.keyMetadataBytes, keyIndex, a.IsAccountKeyDeduplicated()) +} + +// RevokeAccountPublicKey revokes account public key at the given key index stored in key metadata. +// NOTE: To avoid checking keyIndex range repeatedly at different levels, caller must ensure keyIndex > 0 and < AccountPublicKeyCount(). +func (a *AccountStatus) RevokeAccountPublicKey(keyIndex uint32) error { + var err error + a.keyMetadataBytes, err = accountkeymetadata.SetRevokedStatus(a.keyMetadataBytes, keyIndex) + if err != nil { + return err + } + + return nil +} + +// AppendAccountPublicKeyMetadata appends and deduplicates account public key metadata. +// NOTE: If AppendAccountPublicKeyMetadata returns true for saveKey, caller is responsible for +// saving the key corresponding to the given key metadata to storage. +func (a *AccountStatus) AppendAccountPublicKeyMetadata( + revoked bool, + weight uint16, + encodedKey []byte, + getKeyDigest func([]byte) uint64, + getStoredKey func(uint32) ([]byte, error), +) (storedKeyIndex uint32, saveKey bool, err error) { + + accountPublicKeyCount := a.AccountPublicKeyCount() + keyIndex := accountPublicKeyCount + + if keyIndex == 0 { + // First account public key's metadata is not saved in order to + // reduce storage overhead because most accounts only have one key. + + // Increment public key count. + a.SetAccountPublicKeyCount(accountPublicKeyCount + 1) + return 0, true, nil + } + + var keyMetadata *accountkeymetadata.KeyMetadataAppender + keyMetadata, storedKeyIndex, saveKey, err = a.appendAccountPublicKeyMetadata(keyIndex, revoked, weight, encodedKey, getKeyDigest, getStoredKey) + if err != nil { + return 0, false, err + } + + // Serialize key metadata and set account duplication flag if needed. + var deduplicated bool + a.keyMetadataBytes, deduplicated = keyMetadata.ToBytes() + if deduplicated { + a.setAccountKeyDeduplicationFlag() + } + + a.SetAccountPublicKeyCount(accountPublicKeyCount + 1) + + return storedKeyIndex, saveKey, nil } -// PublicKeyCount returns the public key count of the account -func (a *AccountStatus) PublicKeyCount() uint64 { - return binary.BigEndian.Uint64(a[publicKeyCountsStartIndex : publicKeyCountsStartIndex+publicKeyCountsSize]) +// appendAccountPublicKeyMetadata is the main implementation of AppendAccountPublicKeyMetadata() +// and it should only be called by that function. +func (a *AccountStatus) appendAccountPublicKeyMetadata( + keyIndex uint32, + revoked bool, + weight uint16, + encodedKey []byte, + getKeyDigest func([]byte) uint64, + getStoredKey func(uint32) ([]byte, error), +) ( + _ *accountkeymetadata.KeyMetadataAppender, + storedKeyIndex uint32, + saveKey bool, + err error, +) { + + var keyMetadata *accountkeymetadata.KeyMetadataAppender + + if len(a.keyMetadataBytes) == 0 { + // New key index must be 1 when key metadata is empty because + // key metadata at key index 0 is stored with public key at + // key index 0 in a separate register (apk_0). + // Most accounts only have 1 account public key and we + // special case for that as an optimization. + + if keyIndex != 1 { + return nil, 0, false, errors.NewKeyMetadataEmptyError(fmt.Sprintf("key metadata cannot be empty when appending new key metadata at index %d", keyIndex)) + } + + // To avoid storage overhead for most accounts, account key 0 digest is computed and stored when account key 1 is added. + // So if new key index is 1, we need to compute and append key 0 digest to keyMetadata before appending key 1 metadata. + + // Get public key 0. + var key0 []byte + key0, err = getStoredKey(0) + if err != nil { + return nil, 0, false, err + } + + // Get public key 0 digest. + key0Digest := getKeyDigest(key0) + + // Create empty KeyMetadataAppender with key 0 digest. + keyMetadata = accountkeymetadata.NewKeyMetadataAppender(key0Digest, maxStoredDigests) + } else { + // Create KeyMetadataAppender with stored key metadata bytes. + keyMetadata, err = accountkeymetadata.NewKeyMetadataAppenderFromBytes(a.keyMetadataBytes, a.IsAccountKeyDeduplicated(), maxStoredDigests) + if err != nil { + return nil, 0, false, err + } + } + + digest, isDuplicateKey, duplicateStoredKeyIndex, err := accountkeymetadata.FindDuplicateKey(keyMetadata, encodedKey, getKeyDigest, getStoredKey) + if err != nil { + return nil, 0, false, err + } + + // Whether new public key is a duplicate or not, we store these items in key metadata section: + // - new account public key's revoked status and weight + // - new public key's digest (we only store last N digests and N=2 by default to balance tradeoffs) + // If new public key is a duplicate, we also store mapping of account key index to stored key index. + // + // As a non-duplicate key example, if public key at index 1 is unique, we store: + // - new key's weight and revoked status, and + // - new key's digest + // + // As a duplicate key example, if public key at index 1 is duplicate of public key at index 0, we store: + // - new key's weight and revoked status, + // - mapping indicating public key at index 1 is the same as public key at index 0. + // - new key's digest + + // Handle duplicate key. + if isDuplicateKey { + err = keyMetadata.AppendDuplicateKeyMetadata(keyIndex, duplicateStoredKeyIndex, revoked, weight) + if err != nil { + return nil, 0, false, err + } + + return keyMetadata, duplicateStoredKeyIndex, false, nil + } + + // Handle non-duplicate key. + storedKeyIndex, err = keyMetadata.AppendUniqueKeyMetadata(revoked, weight, digest) + if err != nil { + return nil, 0, false, err + } + + return keyMetadata, storedKeyIndex, true, nil } diff --git a/fvm/environment/accounts_status_test.go b/fvm/environment/accounts_status_test.go index 5d7a04ddff1..83251064e7e 100644 --- a/fvm/environment/accounts_status_test.go +++ b/fvm/environment/accounts_status_test.go @@ -15,15 +15,17 @@ func TestAccountStatus(t *testing.T) { s := environment.NewAccountStatus() t.Run("test setting values", func(t *testing.T) { - index := atree.StorageIndex{1, 2, 3, 4, 5, 6, 7, 8} + index := atree.SlabIndex{1, 2, 3, 4, 5, 6, 7, 8} s.SetStorageIndex(index) - s.SetPublicKeyCount(34) + s.SetAccountPublicKeyCount(34) s.SetStorageUsed(56) + s.SetAccountIdCounter(78) require.Equal(t, uint64(56), s.StorageUsed()) - returnedIndex := s.StorageIndex() + returnedIndex := s.SlabIndex() require.True(t, bytes.Equal(index[:], returnedIndex[:])) - require.Equal(t, uint64(34), s.PublicKeyCount()) + require.Equal(t, uint32(34), s.AccountPublicKeyCount()) + require.Equal(t, uint64(78), s.AccountIdCounter()) }) @@ -31,12 +33,1101 @@ func TestAccountStatus(t *testing.T) { b := append([]byte(nil), s.ToBytes()...) clone, err := environment.AccountStatusFromBytes(b) require.NoError(t, err) - require.Equal(t, s.StorageIndex(), clone.StorageIndex()) - require.Equal(t, s.PublicKeyCount(), clone.PublicKeyCount()) + require.Equal(t, s.SlabIndex(), clone.SlabIndex()) + require.Equal(t, s.AccountPublicKeyCount(), clone.AccountPublicKeyCount()) require.Equal(t, s.StorageUsed(), clone.StorageUsed()) + require.Equal(t, s.AccountIdCounter(), clone.AccountIdCounter()) // invalid size bytes _, err = environment.AccountStatusFromBytes([]byte{1, 2}) require.Error(t, err) }) + + t.Run("test serialization - v1 format", func(t *testing.T) { + // TODO: remove this test when we remove support for the old format + oldBytes := []byte{ + 0, // flags + 0, 0, 0, 0, 0, 0, 0, 7, // storage used + 0, 0, 0, 0, 0, 0, 0, 6, // storage index + 0, 0, 0, 0, 0, 0, 0, 5, // public key counts + } + + // The new format has an extra 8 bytes for the account id counter + // so we need to increase the storage used by 8 bytes while migrating it + // for v2->v3 migration, we need to decrease the storage used by 4 bytes + increaseInSize := uint64(4) + + migrated, err := environment.AccountStatusFromBytes(oldBytes) + require.NoError(t, err) + require.Equal(t, atree.SlabIndex{0, 0, 0, 0, 0, 0, 0, 6}, migrated.SlabIndex()) + require.Equal(t, uint32(5), migrated.AccountPublicKeyCount()) + require.Equal(t, uint64(7)+increaseInSize, migrated.StorageUsed()) + require.Equal(t, uint64(0), migrated.AccountIdCounter()) + }) + + t.Run("test serialization - v2 format", func(t *testing.T) { + // TODO: remove this test when we remove support for the old format + oldBytes := []byte{ + 0, // flags + 0, 0, 0, 0, 0, 0, 0, 7, // storage used + 0, 0, 0, 0, 0, 0, 0, 6, // storage index + 0, 0, 0, 0, 0, 0, 0, 5, // public key counts + 0, 0, 0, 0, 0, 0, 0, 3, // account id counter + } + + // for v2->v3 migration, we are shrinking the public key counts from uint64 to uint32 + // so we need to decrease the storage used by 4 bytes + decreaseInSize := uint64(4) + + migrated, err := environment.AccountStatusFromBytes(oldBytes) + require.NoError(t, err) + require.Equal(t, atree.SlabIndex{0, 0, 0, 0, 0, 0, 0, 6}, migrated.SlabIndex()) + require.Equal(t, uint32(5), migrated.AccountPublicKeyCount()) + require.Equal(t, uint64(7)-decreaseInSize, migrated.StorageUsed()) + require.Equal(t, uint64(3), migrated.AccountIdCounter()) + }) +} + +func TestAccountStatusV4AppendAndGetKeyMetadata(t *testing.T) { + + newAccountStatusBytes := []byte{ + 0x40, // version + flag + 0, 0, 0, 0, 0, 0, 0, 0, // value for storage used + 0, 0, 0, 0, 0, 0, 0, 1, // value for storage index + 0, 0, 0, 0, // value for public key counts + 0, 0, 0, 0, 0, 0, 0, 0, // value for address id counter + } + + accountStatusWithOneKeyBytes := []byte{ + 0x40, // version + flag + 0, 0, 0, 0, 0, 0, 0, 0, // value for storage used + 0, 0, 0, 0, 0, 0, 0, 1, // value for storage index + 0, 0, 0, 1, // value for public key counts + 0, 0, 0, 0, 0, 0, 0, 0, // value for address id counter + } + + accountStatusWithTwoKeyBytes := []byte{ + 0x40, // version + flag + 0, 0, 0, 0, 0, 0, 0, 0, // value for storage used + 0, 0, 0, 0, 0, 0, 0, 1, // value for storage index + 0, 0, 0, 2, // value for public key counts + 0, 0, 0, 0, 0, 0, 0, 0, // value for address id counter + // key metadata + 0, 0, 0, 4, // length prefix for weight and revoked list + 0, 1, 3, 0xe8, // weight and revoked group + 0, 0, 0, 0, // start index for digests + 0, 0, 0, 0x10, // length prefix for digests + 0, 0, 0, 0, 0, 0, 0, 1, // key_0 digest + 0, 0, 0, 0, 0, 0, 0, 2, // key_1 digest + } + + accountStatusWithOneKeyAndOneDuplicateKeyBytes := []byte{ + 0x41, // version + flag + 0, 0, 0, 0, 0, 0, 0, 0, // value for storage used + 0, 0, 0, 0, 0, 0, 0, 1, // value for storage index + 0, 0, 0, 2, // value for public key counts + 0, 0, 0, 0, 0, 0, 0, 0, // value for address id counter + // key metadata + 0, 0, 0, 4, // length prefix for weight and revoked list + 0, 1, 3, 0xe8, // weight and revoked group + 0, 0, 0, 1, // start index for mapping + 0, 0, 0, 6, // length prefix for mapping + 0, 1, 0, 0, 0, 0, // mapping group 1 + 0, 0, 0, 0, // start index for digests + 0, 0, 0, 8, // length prefix for digests + 0, 0, 0, 0, 0, 0, 0, 1, // key_0 digest + } + + t.Run("new account status", func(t *testing.T) { + s := environment.NewAccountStatus() + require.Equal(t, uint8(4), s.Version()) + require.False(t, s.IsAccountKeyDeduplicated()) + require.Equal(t, uint32(0), s.AccountPublicKeyCount()) + + require.Equal(t, newAccountStatusBytes, s.ToBytes()) + + decoded, err := environment.AccountStatusFromBytes(newAccountStatusBytes) + require.NoError(t, err) + require.Equal(t, s, decoded) + }) + + type keyMetadata struct { + revoked bool + weight uint16 + storedKeyIndex uint32 + } + + testcases := []struct { + name string + accountStatusData []byte + revoked bool + weight uint16 + encodedKey []byte + getKeyDigest func([]byte) uint64 + getStoredKey func(uint32) ([]byte, error) + expectedStoredKeyIndex uint32 + expectedSaveKey bool + expectedPublicKeyCount uint32 + expectedDeduplication bool + expectedAccountStatusData []byte + keyMetadata map[uint32]keyMetadata + }{ + { + name: "append key_0", + accountStatusData: newAccountStatusBytes, + revoked: false, + weight: 1000, + encodedKey: []byte{1}, + getKeyDigest: func([]byte) uint64 { + require.Fail(t, "getKeyDigest shouldn't be called when appending first key") + return 0 + }, + getStoredKey: func(uint32) ([]byte, error) { + require.Fail(t, "getStoredKey shouldn't be called when appending first key") + return nil, nil + }, + expectedStoredKeyIndex: uint32(0), + expectedSaveKey: true, + expectedPublicKeyCount: uint32(1), + expectedDeduplication: false, + expectedAccountStatusData: accountStatusWithOneKeyBytes, + }, + { + name: "append key_1 to {key_0}", + accountStatusData: accountStatusWithOneKeyBytes, + revoked: false, + weight: uint16(1000), + encodedKey: []byte{2}, + getKeyDigest: func(encodedKey []byte) uint64 { + if bytes.Equal(encodedKey, []byte{1}) { + return 1 + } + if bytes.Equal(encodedKey, []byte{2}) { + return 2 + } + require.Fail(t, "getKeyDigest(%x) isn't expected", encodedKey) + return 0 + }, + getStoredKey: func(keyIndex uint32) ([]byte, error) { + if keyIndex == 0 { + return []byte{1}, nil + } + require.Fail(t, "getStoredKey(%d) isn't expected", keyIndex) + return nil, nil + }, + expectedStoredKeyIndex: uint32(1), + expectedSaveKey: true, + expectedPublicKeyCount: uint32(2), + expectedDeduplication: false, + expectedAccountStatusData: accountStatusWithTwoKeyBytes, + keyMetadata: map[uint32]keyMetadata{ + 1: { + revoked: false, + weight: uint16(1000), + storedKeyIndex: uint32(1), + }, + }, + }, + { + name: "append key_0 to {key_0}", + accountStatusData: accountStatusWithOneKeyBytes, + revoked: false, + weight: uint16(1000), + encodedKey: []byte{1}, + getKeyDigest: func(encodedKey []byte) uint64 { + if bytes.Equal(encodedKey, []byte{1}) { + return 1 + } + require.Fail(t, "getKeyDigest(%x) isn't expected", encodedKey) + return 0 + }, + getStoredKey: func(keyIndex uint32) ([]byte, error) { + if keyIndex == 0 { + return []byte{1}, nil + } + require.Fail(t, "getStoredKey(%d) isn't expected", keyIndex) + return nil, nil + }, + expectedStoredKeyIndex: uint32(0), + expectedSaveKey: false, + expectedPublicKeyCount: uint32(2), + expectedDeduplication: true, + expectedAccountStatusData: accountStatusWithOneKeyAndOneDuplicateKeyBytes, + keyMetadata: map[uint32]keyMetadata{ + 1: { + revoked: false, + weight: uint16(1000), + storedKeyIndex: uint32(0), + }, + }, + }, + { + name: "append key_0 to {key_0, key_1}", + accountStatusData: accountStatusWithTwoKeyBytes, + revoked: false, + weight: uint16(1), + encodedKey: []byte{1}, + getKeyDigest: func(encodedKey []byte) uint64 { + if bytes.Equal(encodedKey, []byte{1}) { + return 1 + } + require.Fail(t, "getKeyDigest(%x) isn't expected", encodedKey) + return 0 + }, + getStoredKey: func(keyIndex uint32) ([]byte, error) { + if keyIndex == 0 { + return []byte{1}, nil + } + require.Fail(t, "getStoredKey(%d) isn't expected", keyIndex) + return nil, nil + }, + expectedStoredKeyIndex: uint32(0), + expectedSaveKey: false, + expectedPublicKeyCount: uint32(3), + expectedDeduplication: true, + expectedAccountStatusData: []byte{ + 0x41, // version + flag + 0, 0, 0, 0, 0, 0, 0, 0, // value for storage used + 0, 0, 0, 0, 0, 0, 0, 1, // value for storage index + 0, 0, 0, 3, // value for public key counts + 0, 0, 0, 0, 0, 0, 0, 0, // value for address id counter + // key metadata + 0, 0, 0, 8, // length prefix for weight and revoked list + 0, 1, 3, 0xe8, // weight and revoked group + 0, 1, 0, 1, // weight and revoked group + 0, 0, 0, 2, // start index for mapping + 0, 0, 0, 6, // length prefix for mapping + 0, 1, 0, 0, 0, 0, // mapping group 1 + 0, 0, 0, 0, // start index for digests + 0, 0, 0, 0x10, // length prefix for digests + 0, 0, 0, 0, 0, 0, 0, 1, // key_0 digest + 0, 0, 0, 0, 0, 0, 0, 2, // key_1 digest + }, + keyMetadata: map[uint32]keyMetadata{ + 1: { + revoked: false, + weight: uint16(1000), + storedKeyIndex: uint32(1), + }, + 2: { + revoked: false, + weight: uint16(1), + storedKeyIndex: uint32(0), + }, + }, + }, + { + name: "append key_1 to {key_0, key_0}", + accountStatusData: accountStatusWithOneKeyAndOneDuplicateKeyBytes, + revoked: false, + weight: uint16(1), + encodedKey: []byte{2}, + getKeyDigest: func(encodedKey []byte) uint64 { + if bytes.Equal(encodedKey, []byte{2}) { + return 2 + } + require.Fail(t, "getKeyDigest(%x) isn't expected", encodedKey) + return 0 + }, + getStoredKey: func(keyIndex uint32) ([]byte, error) { + require.Fail(t, "getStoredKey(%d) isn't expected", keyIndex) + return nil, nil + }, + expectedStoredKeyIndex: uint32(1), + expectedSaveKey: true, + expectedPublicKeyCount: uint32(3), + expectedDeduplication: true, + expectedAccountStatusData: []byte{ + 0x41, // version + flag + 0, 0, 0, 0, 0, 0, 0, 0, // value for storage used + 0, 0, 0, 0, 0, 0, 0, 1, // value for storage index + 0, 0, 0, 3, // value for public key counts + 0, 0, 0, 0, 0, 0, 0, 0, // value for address id counter + // key metadata + 0, 0, 0, 8, // length prefix for weight and revoked list + 0, 1, 3, 0xe8, // weight and revoked group + 0, 1, 0, 1, // weight and revoked group + 0, 0, 0, 1, // start index for mapping + 0, 0, 0, 6, // length prefix for mapping + 0x80, 2, 0, 0, 0, 0, // mapping group 1 + 0, 0, 0, 0, // start index for digests + 0, 0, 0, 0x10, // length prefix for digests + 0, 0, 0, 0, 0, 0, 0, 1, // key_0 digest + 0, 0, 0, 0, 0, 0, 0, 2, // key_1 digest + }, + keyMetadata: map[uint32]keyMetadata{ + 1: { + revoked: false, + weight: uint16(1000), + storedKeyIndex: uint32(0), + }, + 2: { + revoked: false, + weight: uint16(1), + storedKeyIndex: uint32(1), + }, + }, + }, + { + name: "append key_1 to {key_0}, key_0 has empty hash", + accountStatusData: accountStatusWithOneKeyBytes, + revoked: false, + weight: uint16(1000), + encodedKey: []byte{2}, + getKeyDigest: func(encodedKey []byte) uint64 { + if bytes.Equal(encodedKey, []byte{1}) { + return 0 + } + if bytes.Equal(encodedKey, []byte{2}) { + return 2 + } + require.Fail(t, "getKeyDigest(%x) isn't expected", encodedKey) + return 0 + }, + getStoredKey: func(keyIndex uint32) ([]byte, error) { + if keyIndex == 0 { + return []byte{1}, nil + } + require.Fail(t, "getStoredKey(%d) isn't expected", keyIndex) + return nil, nil + }, + expectedStoredKeyIndex: uint32(1), + expectedSaveKey: true, + expectedPublicKeyCount: uint32(2), + expectedDeduplication: false, + expectedAccountStatusData: []byte{ + 0x40, // version + flag + 0, 0, 0, 0, 0, 0, 0, 0, // value for storage used + 0, 0, 0, 0, 0, 0, 0, 1, // value for storage index + 0, 0, 0, 2, // value for public key counts + 0, 0, 0, 0, 0, 0, 0, 0, // value for address id counter + // key metadata + 0, 0, 0, 4, // length prefix for weight and revoked list + 0, 1, 3, 0xe8, // weight and revoked group + 0, 0, 0, 0, // start index for digests + 0, 0, 0, 0x10, // length prefix for digests + 0, 0, 0, 0, 0, 0, 0, 0, // key_0 digest + 0, 0, 0, 0, 0, 0, 0, 2, // key_1 digest + }, + keyMetadata: map[uint32]keyMetadata{ + 1: { + revoked: false, + weight: uint16(1000), + storedKeyIndex: uint32(1), + }, + }, + }, + { + name: "append key_1 to {key_0}, key_1 has empty hash", + accountStatusData: accountStatusWithOneKeyBytes, + revoked: false, + weight: uint16(1000), + encodedKey: []byte{2}, + getKeyDigest: func(encodedKey []byte) uint64 { + if bytes.Equal(encodedKey, []byte{1}) { + return 1 + } + if bytes.Equal(encodedKey, []byte{2}) { + return 0 + } + require.Fail(t, "getKeyDigest(%x) isn't expected", encodedKey) + return 0 + }, + getStoredKey: func(keyIndex uint32) ([]byte, error) { + if keyIndex == 0 { + return []byte{1}, nil + } + require.Fail(t, "getStoredKey(%d) isn't expected", keyIndex) + return nil, nil + }, + expectedStoredKeyIndex: uint32(1), + expectedSaveKey: true, + expectedPublicKeyCount: uint32(2), + expectedDeduplication: false, + expectedAccountStatusData: []byte{ + 0x40, // version + flag + 0, 0, 0, 0, 0, 0, 0, 0, // value for storage used + 0, 0, 0, 0, 0, 0, 0, 1, // value for storage index + 0, 0, 0, 2, // value for public key counts + 0, 0, 0, 0, 0, 0, 0, 0, // value for address id counter + // key metadata + 0, 0, 0, 4, // length prefix for weight and revoked list + 0, 1, 3, 0xe8, // weight and revoked group + 0, 0, 0, 0, // start index for digests + 0, 0, 0, 0x10, // length prefix for digests + 0, 0, 0, 0, 0, 0, 0, 1, // key_0 digest + 0, 0, 0, 0, 0, 0, 0, 0, // key_1 digest + }, + keyMetadata: map[uint32]keyMetadata{ + 1: { + revoked: false, + weight: uint16(1000), + storedKeyIndex: uint32(1), + }, + }, + }, + { + name: "append key_1 to {key_0}, key_0 and key_1 have empty hash", + accountStatusData: accountStatusWithOneKeyBytes, + revoked: false, + weight: uint16(1000), + encodedKey: []byte{2}, + getKeyDigest: func(encodedKey []byte) uint64 { + if bytes.Equal(encodedKey, []byte{1}) { + return 0 + } + if bytes.Equal(encodedKey, []byte{2}) { + return 0 + } + require.Fail(t, "getKeyDigest(%x) isn't expected", encodedKey) + return 0 + }, + getStoredKey: func(keyIndex uint32) ([]byte, error) { + if keyIndex == 0 { + return []byte{1}, nil + } + require.Fail(t, "getStoredKey(%d) isn't expected", keyIndex) + return nil, nil + }, + expectedStoredKeyIndex: uint32(1), + expectedSaveKey: true, + expectedPublicKeyCount: uint32(2), + expectedDeduplication: false, + expectedAccountStatusData: []byte{ + 0x40, // version + flag + 0, 0, 0, 0, 0, 0, 0, 0, // value for storage used + 0, 0, 0, 0, 0, 0, 0, 1, // value for storage index + 0, 0, 0, 2, // value for public key counts + 0, 0, 0, 0, 0, 0, 0, 0, // value for address id counter + // key metadata + 0, 0, 0, 4, // length prefix for weight and revoked list + 0, 1, 3, 0xe8, // weight and revoked group + 0, 0, 0, 0, // start index for digests + 0, 0, 0, 0x10, // length prefix for digests + 0, 0, 0, 0, 0, 0, 0, 0, // key_0 digest + 0, 0, 0, 0, 0, 0, 0, 0, // key_1 digest + }, + keyMetadata: map[uint32]keyMetadata{ + 1: { + revoked: false, + weight: uint16(1000), + storedKeyIndex: uint32(1), + }, + }, + }, + { + name: "append key_1 to {key_0}, key_0 and key_1 have hash collision", + accountStatusData: accountStatusWithOneKeyBytes, + revoked: false, + weight: uint16(1000), + encodedKey: []byte{2}, + getKeyDigest: func(encodedKey []byte) uint64 { + if bytes.Equal(encodedKey, []byte{1}) { + return 1 + } + if bytes.Equal(encodedKey, []byte{2}) { + return 1 + } + require.Fail(t, "getKeyDigest(%x) isn't expected", encodedKey) + return 0 + }, + getStoredKey: func(keyIndex uint32) ([]byte, error) { + if keyIndex == 0 { + return []byte{1}, nil + } + require.Fail(t, "getStoredKey(%d) isn't expected", keyIndex) + return nil, nil + }, + expectedStoredKeyIndex: uint32(1), + expectedSaveKey: true, + expectedPublicKeyCount: uint32(2), + expectedDeduplication: false, + expectedAccountStatusData: []byte{ + 0x40, // version + flag + 0, 0, 0, 0, 0, 0, 0, 0, // value for storage used + 0, 0, 0, 0, 0, 0, 0, 1, // value for storage index + 0, 0, 0, 2, // value for public key counts + 0, 0, 0, 0, 0, 0, 0, 0, // value for address id counter + // key metadata + 0, 0, 0, 4, // length prefix for weight and revoked list + 0, 1, 3, 0xe8, // weight and revoked group + 0, 0, 0, 0, // start index for digests + 0, 0, 0, 0x10, // length prefix for digests + 0, 0, 0, 0, 0, 0, 0, 1, // key_0 digest + 0, 0, 0, 0, 0, 0, 0, 0, // key_1 digest + }, + keyMetadata: map[uint32]keyMetadata{ + 1: { + revoked: false, + weight: uint16(1000), + storedKeyIndex: uint32(1), + }, + }, + }, + { + name: "append key_2 to {key_0, key_1}, key_1 has empty hash", + accountStatusData: []byte{ + 0x40, // version + flag + 0, 0, 0, 0, 0, 0, 0, 0, // value for storage used + 0, 0, 0, 0, 0, 0, 0, 1, // value for storage index + 0, 0, 0, 2, // value for public key counts + 0, 0, 0, 0, 0, 0, 0, 0, // value for address id counter + // key metadata + 0, 0, 0, 4, // length prefix for weight and revoked list + 0, 1, 3, 0xe8, // weight and revoked group + 0, 0, 0, 0, // start index for digests + 0, 0, 0, 0x10, // length prefix for digests + 0, 0, 0, 0, 0, 0, 0, 1, // key_0 digest + 0, 0, 0, 0, 0, 0, 0, 0, // key_1 digest + }, + revoked: false, + weight: uint16(1), + encodedKey: []byte{3}, + getKeyDigest: func(encodedKey []byte) uint64 { + if bytes.Equal(encodedKey, []byte{3}) { + return 3 + } + require.Fail(t, "getKeyDigest(%x) isn't expected", encodedKey) + return 0 + }, + getStoredKey: func(keyIndex uint32) ([]byte, error) { + require.Fail(t, "getStoredKey(%d) isn't expected", keyIndex) + return nil, nil + }, + expectedStoredKeyIndex: uint32(2), + expectedSaveKey: true, + expectedPublicKeyCount: uint32(3), + expectedDeduplication: false, + expectedAccountStatusData: []byte{ + 0x40, // version + flag + 0, 0, 0, 0, 0, 0, 0, 0, // value for storage used + 0, 0, 0, 0, 0, 0, 0, 1, // value for storage index + 0, 0, 0, 3, // value for public key counts + 0, 0, 0, 0, 0, 0, 0, 0, // value for address id counter + // key metadata + 0, 0, 0, 8, // length prefix for weight and revoked list + 0, 1, 3, 0xe8, // weight and revoked group + 0, 1, 0, 1, // weight and revoked group + 0, 0, 0, 1, // start index for digests + 0, 0, 0, 0x10, // length prefix for digests + 0, 0, 0, 0, 0, 0, 0, 0, // key_1 digest + 0, 0, 0, 0, 0, 0, 0, 3, // key_2 digest + }, + keyMetadata: map[uint32]keyMetadata{ + 1: { + revoked: false, + weight: uint16(1000), + storedKeyIndex: uint32(1), + }, + 2: { + revoked: false, + weight: uint16(1), + storedKeyIndex: uint32(2), + }, + }, + }, + { + name: "append key_2 to {key_0, key_1}, key_2 has empty hash", + accountStatusData: []byte{ + 0x40, // version + flag + 0, 0, 0, 0, 0, 0, 0, 0, // value for storage used + 0, 0, 0, 0, 0, 0, 0, 1, // value for storage index + 0, 0, 0, 2, // value for public key counts + 0, 0, 0, 0, 0, 0, 0, 0, // value for address id counter + // key metadata + 0, 0, 0, 4, // length prefix for weight and revoked list + 0, 1, 3, 0xe8, // weight and revoked group + 0, 0, 0, 0, // start index for digests + 0, 0, 0, 0x10, // length prefix for digests + 0, 0, 0, 0, 0, 0, 0, 1, // key_0 digest + 0, 0, 0, 0, 0, 0, 0, 2, // key_1 digest + }, + revoked: false, + weight: uint16(1), + encodedKey: []byte{3}, + getKeyDigest: func(encodedKey []byte) uint64 { + if bytes.Equal(encodedKey, []byte{3}) { + return 0 + } + require.Fail(t, "getKeyDigest(%x) isn't expected", encodedKey) + return 0 + }, + getStoredKey: func(keyIndex uint32) ([]byte, error) { + require.Fail(t, "getStoredKey(%d) isn't expected", keyIndex) + return nil, nil + }, + expectedStoredKeyIndex: uint32(2), + expectedSaveKey: true, + expectedPublicKeyCount: uint32(3), + expectedDeduplication: false, + expectedAccountStatusData: []byte{ + 0x40, // version + flag + 0, 0, 0, 0, 0, 0, 0, 0, // value for storage used + 0, 0, 0, 0, 0, 0, 0, 1, // value for storage index + 0, 0, 0, 3, // value for public key counts + 0, 0, 0, 0, 0, 0, 0, 0, // value for address id counter + // key metadata + 0, 0, 0, 8, // length prefix for weight and revoked list + 0, 1, 3, 0xe8, // weight and revoked group + 0, 1, 0, 1, // weight and revoked group + 0, 0, 0, 1, // start index for digests + 0, 0, 0, 0x10, // length prefix for digests + 0, 0, 0, 0, 0, 0, 0, 2, // key_1 digest + 0, 0, 0, 0, 0, 0, 0, 0, // key_2 digest + }, + keyMetadata: map[uint32]keyMetadata{ + 1: { + revoked: false, + weight: uint16(1000), + storedKeyIndex: uint32(1), + }, + 2: { + revoked: false, + weight: uint16(1), + storedKeyIndex: uint32(2), + }, + }, + }, + { + name: "append key_2 to {key_0, key_1}, key_1 and key_2 have hash collision", + accountStatusData: []byte{ + 0x40, // version + flag + 0, 0, 0, 0, 0, 0, 0, 0, // value for storage used + 0, 0, 0, 0, 0, 0, 0, 1, // value for storage index + 0, 0, 0, 2, // value for public key counts + 0, 0, 0, 0, 0, 0, 0, 0, // value for address id counter + // key metadata + 0, 0, 0, 4, // length prefix for weight and revoked list + 0, 1, 3, 0xe8, // weight and revoked group + 0, 0, 0, 0, // start index for digests + 0, 0, 0, 0x10, // length prefix for digests + 0, 0, 0, 0, 0, 0, 0, 1, // key_0 digest + 0, 0, 0, 0, 0, 0, 0, 2, // key_1 digest + }, + revoked: false, + weight: uint16(1), + encodedKey: []byte{3}, + getKeyDigest: func(encodedKey []byte) uint64 { + if bytes.Equal(encodedKey, []byte{3}) { + return 2 + } + require.Fail(t, "getKeyDigest(%x) isn't expected", encodedKey) + return 0 + }, + getStoredKey: func(keyIndex uint32) ([]byte, error) { + if keyIndex == 1 { + return []byte{2}, nil + } + require.Fail(t, "getStoredKey(%d) isn't expected", keyIndex) + return nil, nil + }, + expectedStoredKeyIndex: uint32(2), + expectedSaveKey: true, + expectedPublicKeyCount: uint32(3), + expectedDeduplication: false, + expectedAccountStatusData: []byte{ + 0x40, // version + flag + 0, 0, 0, 0, 0, 0, 0, 0, // value for storage used + 0, 0, 0, 0, 0, 0, 0, 1, // value for storage index + 0, 0, 0, 3, // value for public key counts + 0, 0, 0, 0, 0, 0, 0, 0, // value for address id counter + // key metadata + 0, 0, 0, 8, // length prefix for weight and revoked list + 0, 1, 3, 0xe8, // weight and revoked group + 0, 1, 0, 1, // weight and revoked group + 0, 0, 0, 1, // start index for digests + 0, 0, 0, 0x10, // length prefix for digests + 0, 0, 0, 0, 0, 0, 0, 2, // key_1 digest + 0, 0, 0, 0, 0, 0, 0, 0, // key_2 digest + }, + keyMetadata: map[uint32]keyMetadata{ + 1: { + revoked: false, + weight: uint16(1000), + storedKeyIndex: uint32(1), + }, + 2: { + revoked: false, + weight: uint16(1), + storedKeyIndex: uint32(2), + }, + }, + }, + } + + for _, tc := range testcases { + t.Run(tc.name, func(t *testing.T) { + s, err := environment.AccountStatusFromBytes(tc.accountStatusData) + require.NoError(t, err) + + storedKeyIndex, saveKey, err := s.AppendAccountPublicKeyMetadata( + tc.revoked, + tc.weight, + tc.encodedKey, + tc.getKeyDigest, + tc.getStoredKey, + ) + require.NoError(t, err) + require.Equal(t, tc.expectedStoredKeyIndex, storedKeyIndex) + require.Equal(t, tc.expectedSaveKey, saveKey) + require.Equal(t, tc.expectedPublicKeyCount, s.AccountPublicKeyCount()) + require.Equal(t, tc.expectedDeduplication, s.IsAccountKeyDeduplicated()) + + require.Equal(t, tc.expectedAccountStatusData, s.ToBytes()) + + decoded, err := environment.AccountStatusFromBytes(tc.expectedAccountStatusData) + require.NoError(t, err) + require.Equal(t, s, decoded) + require.Equal(t, tc.expectedPublicKeyCount, s.AccountPublicKeyCount()) + require.Equal(t, tc.expectedDeduplication, s.IsAccountKeyDeduplicated()) + + for keyIndex, expected := range tc.keyMetadata { + // Get revoked status + revoked, err := s.AccountPublicKeyRevokedStatus(keyIndex) + require.NoError(t, err) + require.Equal(t, expected.revoked, revoked) + + // Get key metadata + weight, revoked, storedKeyIndex, err := s.AccountPublicKeyMetadata(keyIndex) + require.NoError(t, err) + require.Equal(t, expected.revoked, revoked) + require.Equal(t, expected.weight, weight) + require.Equal(t, expected.storedKeyIndex, storedKeyIndex) + } + }) + } +} + +func TestAccountStatusV4RevokeKey(t *testing.T) { + + testcases := []struct { + name string + data []byte + keyIndexToRevoke uint32 + expected []byte + expectedRevokedStatus map[uint32]bool + }{ + { + name: "revoke only key in a revoked group of size 1", + data: []byte{ + 0x40, // version + flag + 0, 0, 0, 0, 0, 0, 0, 0, // value for storage used + 0, 0, 0, 0, 0, 0, 0, 1, // value for storage index + 0, 0, 0, 2, // value for public key counts + 0, 0, 0, 0, 0, 0, 0, 0, // value for address id counter + // key metadata + 0, 0, 0, 4, // length prefix for weight and revoked list + 0, 1, 3, 0xe8, // weight and revoked group + 0, 0, 0, 0, // start index for digests + 0, 0, 0, 0x10, // length prefix for digests + 0, 0, 0, 0, 0, 0, 0, 1, // digest 1 + 0, 0, 0, 0, 0, 0, 0, 2, // digest 2 + }, + keyIndexToRevoke: 1, + expected: []byte{ + 0x40, // version + flag + 0, 0, 0, 0, 0, 0, 0, 0, // value for storage used + 0, 0, 0, 0, 0, 0, 0, 1, // value for storage index + 0, 0, 0, 2, // value for public key counts + 0, 0, 0, 0, 0, 0, 0, 0, // value for address id counter + // key metadata + 0, 0, 0, 4, // length prefix for weight and revoked list + 0, 1, 0x83, 0xe8, // weight and revoked group + 0, 0, 0, 0, // start index for digests + 0, 0, 0, 0x10, // length prefix for digests + 0, 0, 0, 0, 0, 0, 0, 1, // digest 1 + 0, 0, 0, 0, 0, 0, 0, 2, // digest 2 + }, + expectedRevokedStatus: map[uint32]bool{ + 1: true, + }, + }, + { + name: "revoke first key in a revoked group of size 2 (no prev group)", + data: []byte{ + 0x40, // version + flag + 0, 0, 0, 0, 0, 0, 0, 0, // value for storage used + 0, 0, 0, 0, 0, 0, 0, 1, // value for storage index + 0, 0, 0, 3, // value for public key counts + 0, 0, 0, 0, 0, 0, 0, 0, // value for address id counter + // key metadata + 0, 0, 0, 4, // length prefix for weight and revoked list + 0, 2, 3, 0xe8, // weight and revoked group 1 + 0, 0, 0, 1, // start index for digests + 0, 0, 0, 0x10, // length prefix for digests + 0, 0, 0, 0, 0, 0, 0, 2, // digest 2 + 0, 0, 0, 0, 0, 0, 0, 3, // digest 3 + }, + keyIndexToRevoke: 1, + expected: []byte{ + 0x40, // version + flag + 0, 0, 0, 0, 0, 0, 0, 0, // value for storage used + 0, 0, 0, 0, 0, 0, 0, 1, // value for storage index + 0, 0, 0, 3, // value for public key counts + 0, 0, 0, 0, 0, 0, 0, 0, // value for address id counter + // key metadata + 0, 0, 0, 8, // length prefix for weight and revoked list + 0, 1, 0x83, 0xe8, // weight and revoked group 1 + 0, 1, 3, 0xe8, // weight and revoked group 1 + 0, 0, 0, 1, // start index for digests + 0, 0, 0, 0x10, // length prefix for digests + 0, 0, 0, 0, 0, 0, 0, 2, // digest 2 + 0, 0, 0, 0, 0, 0, 0, 3, // digest 3 + }, + expectedRevokedStatus: map[uint32]bool{ + 1: true, + 2: false, + }, + }, + { + name: "revoke second key in a revoked group of size 2 (no next group)", + data: []byte{ + 0x40, // version + flag + 0, 0, 0, 0, 0, 0, 0, 0, // value for storage used + 0, 0, 0, 0, 0, 0, 0, 1, // value for storage index + 0, 0, 0, 3, // value for public key counts + 0, 0, 0, 0, 0, 0, 0, 0, // value for address id counter + // key metadata + 0, 0, 0, 4, // length prefix for weight and revoked list + 0, 2, 3, 0xe8, // weight and revoked group 1 + 0, 0, 0, 1, // start index for digests + 0, 0, 0, 0x10, // length prefix for digests + 0, 0, 0, 0, 0, 0, 0, 2, // digest 2 + 0, 0, 0, 0, 0, 0, 0, 3, // digest 3 + }, + keyIndexToRevoke: 2, + expected: []byte{ + 0x40, // version + flag + 0, 0, 0, 0, 0, 0, 0, 0, // value for storage used + 0, 0, 0, 0, 0, 0, 0, 1, // value for storage index + 0, 0, 0, 3, // value for public key counts + 0, 0, 0, 0, 0, 0, 0, 0, // value for address id counter + // key metadata + 0, 0, 0, 8, // length prefix for weight and revoked list + 0, 1, 3, 0xe8, // weight and revoked group 1 + 0, 1, 0x83, 0xe8, // weight and revoked group 2 + 0, 0, 0, 1, // start index for digests + 0, 0, 0, 0x10, // length prefix for digests + 0, 0, 0, 0, 0, 0, 0, 2, // digest 2 + 0, 0, 0, 0, 0, 0, 0, 3, // digest 3 + }, + expectedRevokedStatus: map[uint32]bool{ + 1: false, + 2: true, + }, + }, + { + name: "revoke first key in a revoke group of size 1 (no prev group)", + data: []byte{ + 0x40, // version + flag + 0, 0, 0, 0, 0, 0, 0, 0, // value for storage used + 0, 0, 0, 0, 0, 0, 0, 1, // value for storage index + 0, 0, 0, 3, // value for public key counts + 0, 0, 0, 0, 0, 0, 0, 0, // value for address id counter + // key metadata + 0, 0, 0, 8, // length prefix for weight and revoked list + 0, 1, 3, 0xe8, // weight and revoked group 1 + 0, 1, 0x80, 0x01, // weight and revoked group 2 + 0, 0, 0, 1, // start index for digests + 0, 0, 0, 0x10, // length prefix for digests + 0, 0, 0, 0, 0, 0, 0, 2, // digest 2 + 0, 0, 0, 0, 0, 0, 0, 3, // digest 3 + }, + keyIndexToRevoke: 1, + expected: []byte{ + 0x40, // version + flag + 0, 0, 0, 0, 0, 0, 0, 0, // value for storage used + 0, 0, 0, 0, 0, 0, 0, 1, // value for storage index + 0, 0, 0, 3, // value for public key counts + 0, 0, 0, 0, 0, 0, 0, 0, // value for address id counter + // key metadata + 0, 0, 0, 8, // length prefix for weight and revoked list + 0, 1, 0x83, 0xe8, // weight and revoked group 1 + 0, 1, 0x80, 0x01, // weight and revoked group 2 + 0, 0, 0, 1, // start index for digests + 0, 0, 0, 0x10, // length prefix for digests + 0, 0, 0, 0, 0, 0, 0, 2, // digest 2 + 0, 0, 0, 0, 0, 0, 0, 3, // digest 3 + }, + expectedRevokedStatus: map[uint32]bool{ + 1: true, + 2: true, + }, + }, + { + name: "revoke first key in a revoke group of size 1 (has prev group)", + data: []byte{ + 0x40, // version + flag + 0, 0, 0, 0, 0, 0, 0, 0, // value for storage used + 0, 0, 0, 0, 0, 0, 0, 1, // value for storage index + 0, 0, 0, 3, // value for public key counts + 0, 0, 0, 0, 0, 0, 0, 0, // value for address id counter + // key metadata + 0, 0, 0, 8, // length prefix for weight and revoked list + 0, 1, 3, 0xe8, // weight and revoked group 1 + 0, 1, 0x80, 0x01, // weight and revoked group 2 + 0, 0, 0, 1, // start index for digests + 0, 0, 0, 0x10, // length prefix for digests + 0, 0, 0, 0, 0, 0, 0, 2, // digest 2 + 0, 0, 0, 0, 0, 0, 0, 3, // digest 3 + }, + keyIndexToRevoke: 2, + expected: []byte{ + 0x40, // version + flag + 0, 0, 0, 0, 0, 0, 0, 0, // value for storage used + 0, 0, 0, 0, 0, 0, 0, 1, // value for storage index + 0, 0, 0, 3, // value for public key counts + 0, 0, 0, 0, 0, 0, 0, 0, // value for address id counter + // key metadata + 0, 0, 0, 8, // length prefix for weight and revoked list + 0, 1, 3, 0xe8, // weight and revoked group 1 + 0, 1, 0x80, 0x01, // weight and revoked group 2 + 0, 0, 0, 1, // start index for digests + 0, 0, 0, 0x10, // length prefix for digests + 0, 0, 0, 0, 0, 0, 0, 2, // digest 2 + 0, 0, 0, 0, 0, 0, 0, 3, // digest 3 + }, + expectedRevokedStatus: map[uint32]bool{ + 1: false, + 2: true, + }, + }, + { + name: "revoke first key in a revoke group of size 3 (no prev group)", + data: []byte{ + 0x40, // version + flag + 0, 0, 0, 0, 0, 0, 0, 0, // value for storage used + 0, 0, 0, 0, 0, 0, 0, 1, // value for storage index + 0, 0, 0, 4, // value for public key counts + 0, 0, 0, 0, 0, 0, 0, 0, // value for address id counter + // key metadata + 0, 0, 0, 4, // length prefix for weight and revoked list + 0, 3, 3, 0xe8, // weight and revoked group 1 + 0, 0, 0, 1, // start index for digests + 0, 0, 0, 0x10, // length prefix for digests + 0, 0, 0, 0, 0, 0, 0, 2, // digest 2 + 0, 0, 0, 0, 0, 0, 0, 3, // digest 3 + }, + keyIndexToRevoke: 1, + expected: []byte{ + 0x40, // version + flag + 0, 0, 0, 0, 0, 0, 0, 0, // value for storage used + 0, 0, 0, 0, 0, 0, 0, 1, // value for storage index + 0, 0, 0, 4, // value for public key counts + 0, 0, 0, 0, 0, 0, 0, 0, // value for address id counter + // key metadata + 0, 0, 0, 8, // length prefix for weight and revoked list + 0, 1, 0x83, 0xe8, // weight and revoked group 1 + 0, 2, 3, 0xe8, // weight and revoked group 1 + 0, 0, 0, 1, // start index for digests + 0, 0, 0, 0x10, // length prefix for digests + 0, 0, 0, 0, 0, 0, 0, 2, // digest 2 + 0, 0, 0, 0, 0, 0, 0, 3, // digest 3 + }, + expectedRevokedStatus: map[uint32]bool{ + 1: true, + 2: false, + 3: false, + }, + }, + { + name: "revoke second key in a revoke group of size 3", + data: []byte{ + 0x40, // version + flag + 0, 0, 0, 0, 0, 0, 0, 0, // value for storage used + 0, 0, 0, 0, 0, 0, 0, 1, // value for storage index + 0, 0, 0, 4, // value for public key counts + 0, 0, 0, 0, 0, 0, 0, 0, // value for address id counter + // key metadata + 0, 0, 0, 4, // length prefix for weight and revoked list + 0, 3, 3, 0xe8, // weight and revoked group 1 + 0, 0, 0, 1, // start index for digests + 0, 0, 0, 0x10, // length prefix for digests + 0, 0, 0, 0, 0, 0, 0, 2, // digest 2 + 0, 0, 0, 0, 0, 0, 0, 3, // digest 3 + }, + keyIndexToRevoke: 2, + expected: []byte{ + 0x40, // version + flag + 0, 0, 0, 0, 0, 0, 0, 0, // value for storage used + 0, 0, 0, 0, 0, 0, 0, 1, // value for storage index + 0, 0, 0, 4, // value for public key counts + 0, 0, 0, 0, 0, 0, 0, 0, // value for address id counter + // key metadata + 0, 0, 0, 0x0c, // length prefix for weight and revoked list + 0, 1, 3, 0xe8, // weight and revoked group 1 + 0, 1, 0x83, 0xe8, // weight and revoked group 1 + 0, 1, 3, 0xe8, // weight and revoked group 1 + 0, 0, 0, 1, // start index for digests + 0, 0, 0, 0x10, // length prefix for digests + 0, 0, 0, 0, 0, 0, 0, 2, // digest 2 + 0, 0, 0, 0, 0, 0, 0, 3, // digest 3 + }, + expectedRevokedStatus: map[uint32]bool{ + 1: false, + 2: true, + 3: false, + }, + }, + { + name: "revoke third key in a revoke group of size 3", + data: []byte{ + 0x40, // version + flag + 0, 0, 0, 0, 0, 0, 0, 0, // value for storage used + 0, 0, 0, 0, 0, 0, 0, 1, // value for storage index + 0, 0, 0, 4, // value for public key counts + 0, 0, 0, 0, 0, 0, 0, 0, // value for address id counter + // key metadata + 0, 0, 0, 4, // length prefix for weight and revoked list + 0, 3, 3, 0xe8, // weight and revoked group 1 + 0, 0, 0, 1, // start index for digests + 0, 0, 0, 0x10, // length prefix for digests + 0, 0, 0, 0, 0, 0, 0, 2, // digest 2 + 0, 0, 0, 0, 0, 0, 0, 3, // digest 3 + }, + keyIndexToRevoke: 3, + expected: []byte{ + 0x40, // version + flag + 0, 0, 0, 0, 0, 0, 0, 0, // value for storage used + 0, 0, 0, 0, 0, 0, 0, 1, // value for storage index + 0, 0, 0, 4, // value for public key counts + 0, 0, 0, 0, 0, 0, 0, 0, // value for address id counter + // key metadata + 0, 0, 0, 0x8, // length prefix for weight and revoked list + 0, 2, 3, 0xe8, // weight and revoked group 1 + 0, 1, 0x83, 0xe8, // weight and revoked group 1 + 0, 0, 0, 1, // start index for digests + 0, 0, 0, 0x10, // length prefix for digests + 0, 0, 0, 0, 0, 0, 0, 2, // digest 2 + 0, 0, 0, 0, 0, 0, 0, 3, // digest 3 + }, + expectedRevokedStatus: map[uint32]bool{ + 1: false, + 2: false, + 3: true, + }, + }, + } + + for _, tc := range testcases { + t.Run(tc.name, func(t *testing.T) { + s, err := environment.AccountStatusFromBytes(tc.data) + require.NoError(t, err) + + err = s.RevokeAccountPublicKey(tc.keyIndexToRevoke) + require.NoError(t, err) + + require.Equal(t, tc.expected, s.ToBytes()) + + decoded, err := environment.AccountStatusFromBytes(tc.expected) + require.NoError(t, err) + require.Equal(t, s, decoded) + + for keyIndex, expected := range tc.expectedRevokedStatus { + revoked, err := decoded.AccountPublicKeyRevokedStatus(keyIndex) + require.NoError(t, err) + require.Equal(t, expected, revoked) + } + }) + } } diff --git a/fvm/environment/accounts_test.go b/fvm/environment/accounts_test.go index c10f3e5ed07..714de2847c2 100644 --- a/fvm/environment/accounts_test.go +++ b/fvm/environment/accounts_test.go @@ -1,6 +1,9 @@ package environment_test import ( + "maps" + "math/rand" + "slices" "testing" "github.com/onflow/atree" @@ -10,6 +13,8 @@ import ( "github.com/onflow/flow-go/fvm/errors" "github.com/onflow/flow-go/fvm/storage/snapshot" "github.com/onflow/flow-go/fvm/storage/testutils" + "github.com/onflow/flow-go/ledger" + "github.com/onflow/flow-go/ledger/common/convert" "github.com/onflow/flow-go/model/flow" ) @@ -42,6 +47,271 @@ func TestAccounts_Create(t *testing.T) { require.Error(t, err) }) + + t.Run("account with 0 keys", func(t *testing.T) { + txnState := testutils.NewSimpleTransaction(nil) + accounts := environment.NewAccounts(txnState) + + address := flow.HexToAddress("01") + + err := accounts.Create(nil, address) + require.NoError(t, err) + + snapshot, err := txnState.FinalizeMainTransaction() + require.NoError(t, err) + + // New registers + expectedNewRegisterIDs := []flow.RegisterID{ + flow.AccountStatusRegisterID(address), + } + registerIDs := snapshot.AllRegisterIDs() + require.Equal(t, len(expectedNewRegisterIDs), len(registerIDs)) + require.ElementsMatch(t, expectedNewRegisterIDs, registerIDs) + + // Test account status + value, exists := snapshot.WriteSet[flow.AccountStatusRegisterID(address)] + require.True(t, exists) + require.NotEmpty(t, value) + + decodedAccountStatus, err := environment.AccountStatusFromBytes(value) + require.NoError(t, err) + require.Equal(t, uint8(4), decodedAccountStatus.Version()) + require.Equal(t, uint32(0), decodedAccountStatus.AccountPublicKeyCount()) + + // Test account storage used + expectedStorageUsed := uint64(environment.RegisterSize( + flow.AccountStatusRegisterID(address), + value)) + require.Equal(t, expectedStorageUsed, decodedAccountStatus.StorageUsed()) + }) + + t.Run("account with 1 key", func(t *testing.T) { + key0 := newAccountPublicKey(t, 1000) + key0.SeqNumber = 1 + + txnState := testutils.NewSimpleTransaction(nil) + accounts := environment.NewAccounts(txnState) + + address := flow.HexToAddress("01") + + err := accounts.Create([]flow.AccountPublicKey{key0}, address) + require.NoError(t, err) + + snapshot, err := txnState.FinalizeMainTransaction() + require.NoError(t, err) + + // New registers + expectedNewRegisterIDs := []flow.RegisterID{ + flow.AccountStatusRegisterID(address), + flow.AccountPublicKey0RegisterID(address), + } + registerIDs := snapshot.AllRegisterIDs() + require.Equal(t, len(expectedNewRegisterIDs), len(registerIDs)) + require.ElementsMatch(t, expectedNewRegisterIDs, registerIDs) + + // Test account status + accountStatusValue, exists := snapshot.WriteSet[flow.AccountStatusRegisterID(address)] + require.True(t, exists) + require.NotEmpty(t, accountStatusValue) + + decodedAccountStatus, err := environment.AccountStatusFromBytes(accountStatusValue) + require.NoError(t, err) + require.Equal(t, uint8(4), decodedAccountStatus.Version()) + require.Equal(t, uint32(1), decodedAccountStatus.AccountPublicKeyCount()) + + // Test account public key 0 + key0Value, exists := snapshot.WriteSet[flow.AccountPublicKey0RegisterID(address)] + require.True(t, exists) + require.NotEmpty(t, key0Value) + + decodedKey0, err := flow.DecodeAccountPublicKey(key0Value, 0) + require.NoError(t, err) + require.Equal(t, key0, decodedKey0) + + // Test storage used + var expectedStorageUsed uint64 + expectedStorageUsed += uint64(environment.RegisterSize( + flow.AccountStatusRegisterID(address), + accountStatusValue)) + expectedStorageUsed += uint64(environment.RegisterSize( + flow.AccountPublicKey0RegisterID(address), + key0Value)) + require.Equal(t, expectedStorageUsed, decodedAccountStatus.StorageUsed()) + }) + + t.Run("account with 2 unique keys", func(t *testing.T) { + for _, hasSequenceNumber := range []bool{true, false} { + key0 := newAccountPublicKey(t, 1000) + key0.SeqNumber = 1 + + key1 := newAccountPublicKey(t, 1000) + if hasSequenceNumber { + key1.SeqNumber = 2 + } else { + key1.SeqNumber = 0 + } + + txnState := testutils.NewSimpleTransaction(nil) + accounts := environment.NewAccounts(txnState) + + address := flow.HexToAddress("01") + + err := accounts.Create([]flow.AccountPublicKey{key0, key1}, address) + require.NoError(t, err) + + snapshot, err := txnState.FinalizeMainTransaction() + require.NoError(t, err) + + // New registers + expectedNewRegisterIDs := []flow.RegisterID{ + flow.AccountStatusRegisterID(address), + flow.AccountPublicKey0RegisterID(address), + flow.AccountBatchPublicKeyRegisterID(address, 0), + } + if hasSequenceNumber { + expectedNewRegisterIDs = append( + expectedNewRegisterIDs, + flow.AccountPublicKeySequenceNumberRegisterID(address, 1), + ) + } + registerIDs := snapshot.AllRegisterIDs() + require.Equal(t, len(expectedNewRegisterIDs), len(registerIDs)) + require.ElementsMatch(t, expectedNewRegisterIDs, registerIDs) + + // Test account status + accountStatusValue, exists := snapshot.WriteSet[flow.AccountStatusRegisterID(address)] + require.True(t, exists) + require.NotEmpty(t, accountStatusValue) + + decodedAccountStatus, err := environment.AccountStatusFromBytes(accountStatusValue) + require.NoError(t, err) + require.Equal(t, uint8(4), decodedAccountStatus.Version()) + require.Equal(t, uint32(2), decodedAccountStatus.AccountPublicKeyCount()) + + // Test account public key 0 + key0Value, exists := snapshot.WriteSet[flow.AccountPublicKey0RegisterID(address)] + require.True(t, exists) + require.NotEmpty(t, key0Value) + + decodedKey0, err := flow.DecodeAccountPublicKey(key0Value, 0) + require.NoError(t, err) + require.Equal(t, key0, decodedKey0) + + // Test account public key 1 + key1Value, exists := snapshot.WriteSet[flow.AccountBatchPublicKeyRegisterID(address, 0)] + require.True(t, exists) + require.NotEmpty(t, key1Value) + + if hasSequenceNumber { + // Test sequence number 1 + seqNum1Value, exists := snapshot.WriteSet[flow.AccountPublicKeySequenceNumberRegisterID(address, 1)] + require.True(t, exists) + decodedSeqNum1, err := flow.DecodeSequenceNumber(seqNum1Value) + require.NoError(t, err) + require.Equal(t, key1.SeqNumber, decodedSeqNum1) + } + + // Test storage used + var expectedStorageUsed uint64 + // Include account status register in storage used + expectedStorageUsed += uint64(environment.RegisterSize( + flow.AccountStatusRegisterID(address), + accountStatusValue)) + // Include apk_0 register in storage used + expectedStorageUsed += uint64(environment.RegisterSize( + flow.AccountPublicKey0RegisterID(address), + key0Value)) + // Include pk_b0 register in storage used + expectedStorageUsed += uint64(environment.RegisterSize( + flow.AccountBatchPublicKeyRegisterID(address, 1), + key1Value)) + // Include predefined sequence number 1 register in storage used + expectedStorageUsed += environment.PredefinedSequenceNumberPayloadSize(address, 1) + require.Equal(t, expectedStorageUsed, decodedAccountStatus.StorageUsed()) + } + }) + + t.Run("account with 2 duplicate key", func(t *testing.T) { + for _, hasSequenceNumber := range []bool{true, false} { + key0 := newAccountPublicKey(t, 1000) + key0.SeqNumber = 1 + + key1 := key0 + if hasSequenceNumber { + key1.SeqNumber = 2 + } else { + key1.SeqNumber = 0 + } + + txnState := testutils.NewSimpleTransaction(nil) + accounts := environment.NewAccounts(txnState) + + address := flow.HexToAddress("01") + + err := accounts.Create([]flow.AccountPublicKey{key0, key1}, address) + require.NoError(t, err) + + snapshot, err := txnState.FinalizeMainTransaction() + require.NoError(t, err) + + // New registers + expectedNewRegisterIDs := []flow.RegisterID{ + flow.AccountStatusRegisterID(address), + flow.AccountPublicKey0RegisterID(address), + } + if hasSequenceNumber { + expectedNewRegisterIDs = append( + expectedNewRegisterIDs, + flow.AccountPublicKeySequenceNumberRegisterID(address, 1), + ) + } + registerIDs := snapshot.AllRegisterIDs() + require.Equal(t, len(expectedNewRegisterIDs), len(registerIDs)) + require.ElementsMatch(t, expectedNewRegisterIDs, registerIDs) + + // Test account status + accountStatusValue, exists := snapshot.WriteSet[flow.AccountStatusRegisterID(address)] + require.True(t, exists) + require.NotEmpty(t, accountStatusValue) + + decodedAccountStatus, err := environment.AccountStatusFromBytes(accountStatusValue) + require.NoError(t, err) + require.Equal(t, uint8(4), decodedAccountStatus.Version()) + require.Equal(t, uint32(2), decodedAccountStatus.AccountPublicKeyCount()) + + // Test account public key 0 + key0Value, exists := snapshot.WriteSet[flow.AccountPublicKey0RegisterID(address)] + require.True(t, exists) + require.NotEmpty(t, key0Value) + + decodedKey0, err := flow.DecodeAccountPublicKey(key0Value, 0) + require.NoError(t, err) + require.Equal(t, key0, decodedKey0) + + if hasSequenceNumber { + // Test sequence number 1 + seqNum1Value, exists := snapshot.WriteSet[flow.AccountPublicKeySequenceNumberRegisterID(address, 1)] + require.True(t, exists) + decodedSeqNum1, err := flow.DecodeSequenceNumber(seqNum1Value) + require.NoError(t, err) + require.Equal(t, key1.SeqNumber, decodedSeqNum1) + } + + // Test storage used + var expectedStorageUsed uint64 + // Include account status register in storage used + expectedStorageUsed += uint64(environment.RegisterSize( + flow.AccountStatusRegisterID(address), + accountStatusValue)) + // Include apk_0 register in storage used + expectedStorageUsed += uint64(environment.RegisterSize( + flow.AccountPublicKey0RegisterID(address), + key0Value)) + // Include predefined sequence number 1 register in storage used + expectedStorageUsed += environment.PredefinedSequenceNumberPayloadSize(address, 1) + require.Equal(t, expectedStorageUsed, decodedAccountStatus.StorageUsed()) + } + }) } func TestAccounts_GetWithNoKeys(t *testing.T) { @@ -63,8 +333,8 @@ func TestAccounts_GetPublicKey(t *testing.T) { address := flow.HexToAddress("01") registerId := flow.NewRegisterID( - string(address.Bytes()), - "public_key_0") + address, + "apk_0") for _, value := range [][]byte{{}, nil} { txnState := testutils.NewSimpleTransaction( @@ -76,7 +346,7 @@ func TestAccounts_GetPublicKey(t *testing.T) { err := accounts.Create(nil, address) require.NoError(t, err) - _, err = accounts.GetPublicKey(address, 0) + _, err = accounts.GetAccountPublicKey(address, 0) require.True(t, errors.IsAccountPublicKeyNotFoundError(err)) } }) @@ -88,7 +358,7 @@ func TestAccounts_GetPublicKeyCount(t *testing.T) { address := flow.HexToAddress("01") registerId := flow.NewRegisterID( - string(address.Bytes()), + address, "public_key_count") for _, value := range [][]byte{{}, nil} { @@ -101,9 +371,9 @@ func TestAccounts_GetPublicKeyCount(t *testing.T) { err := accounts.Create(nil, address) require.NoError(t, err) - count, err := accounts.GetPublicKeyCount(address) + count, err := accounts.GetAccountPublicKeyCount(address) require.NoError(t, err) - require.Equal(t, uint64(0), count) + require.Equal(t, uint32(0), count) } }) } @@ -114,7 +384,7 @@ func TestAccounts_GetPublicKeys(t *testing.T) { address := flow.HexToAddress("01") registerId := flow.NewRegisterID( - string(address.Bytes()), + address, "public_key_count") for _, value := range [][]byte{{}, nil} { @@ -128,7 +398,7 @@ func TestAccounts_GetPublicKeys(t *testing.T) { err := accounts.Create(nil, address) require.NoError(t, err) - keys, err := accounts.GetPublicKeys(address) + keys, err := accounts.GetAccountPublicKeys(address) require.NoError(t, err) require.Empty(t, keys) } @@ -224,6 +494,7 @@ func TestAccounts_SetContracts(t *testing.T) { } func TestAccount_StorageUsed(t *testing.T) { + emptyAccountSize := uint64(54) t.Run("Storage used on account creation is deterministic", func(t *testing.T) { txnState := testutils.NewSimpleTransaction(nil) @@ -235,14 +506,14 @@ func TestAccount_StorageUsed(t *testing.T) { storageUsed, err := accounts.GetStorageUsed(address) require.NoError(t, err) - require.Equal(t, uint64(40), storageUsed) + require.Equal(t, emptyAccountSize, storageUsed) }) t.Run("Storage used on register set increases", func(t *testing.T) { txnState := testutils.NewSimpleTransaction(nil) accounts := environment.NewAccounts(txnState) address := flow.HexToAddress("01") - key := flow.NewRegisterID(string(address.Bytes()), "some_key") + key := flow.NewRegisterID(address, "some_key") err := accounts.Create(nil, address) require.NoError(t, err) @@ -252,14 +523,14 @@ func TestAccount_StorageUsed(t *testing.T) { storageUsed, err := accounts.GetStorageUsed(address) require.NoError(t, err) - require.Equal(t, uint64(40+32), storageUsed) + require.Equal(t, emptyAccountSize+uint64(42), storageUsed) }) t.Run("Storage used, set twice on same register to same value, stays the same", func(t *testing.T) { txnState := testutils.NewSimpleTransaction(nil) accounts := environment.NewAccounts(txnState) address := flow.HexToAddress("01") - key := flow.NewRegisterID(string(address.Bytes()), "some_key") + key := flow.NewRegisterID(address, "some_key") err := accounts.Create(nil, address) require.NoError(t, err) @@ -271,14 +542,14 @@ func TestAccount_StorageUsed(t *testing.T) { storageUsed, err := accounts.GetStorageUsed(address) require.NoError(t, err) - require.Equal(t, uint64(40+32), storageUsed) + require.Equal(t, emptyAccountSize+uint64(42), storageUsed) }) t.Run("Storage used, set twice on same register to larger value, increases", func(t *testing.T) { txnState := testutils.NewSimpleTransaction(nil) accounts := environment.NewAccounts(txnState) address := flow.HexToAddress("01") - key := flow.NewRegisterID(string(address.Bytes()), "some_key") + key := flow.NewRegisterID(address, "some_key") err := accounts.Create(nil, address) require.NoError(t, err) @@ -290,14 +561,14 @@ func TestAccount_StorageUsed(t *testing.T) { storageUsed, err := accounts.GetStorageUsed(address) require.NoError(t, err) - require.Equal(t, uint64(40+33), storageUsed) + require.Equal(t, emptyAccountSize+uint64(43), storageUsed) }) t.Run("Storage used, set twice on same register to smaller value, decreases", func(t *testing.T) { txnState := testutils.NewSimpleTransaction(nil) accounts := environment.NewAccounts(txnState) address := flow.HexToAddress("01") - key := flow.NewRegisterID(string(address.Bytes()), "some_key") + key := flow.NewRegisterID(address, "some_key") err := accounts.Create(nil, address) require.NoError(t, err) @@ -309,14 +580,14 @@ func TestAccount_StorageUsed(t *testing.T) { storageUsed, err := accounts.GetStorageUsed(address) require.NoError(t, err) - require.Equal(t, uint64(40+31), storageUsed) + require.Equal(t, emptyAccountSize+uint64(41), storageUsed) }) t.Run("Storage used, after register deleted, decreases", func(t *testing.T) { txnState := testutils.NewSimpleTransaction(nil) accounts := environment.NewAccounts(txnState) address := flow.HexToAddress("01") - key := flow.NewRegisterID(string(address.Bytes()), "some_key") + key := flow.NewRegisterID(address, "some_key") err := accounts.Create(nil, address) require.NoError(t, err) @@ -328,7 +599,7 @@ func TestAccount_StorageUsed(t *testing.T) { storageUsed, err := accounts.GetStorageUsed(address) require.NoError(t, err) - require.Equal(t, uint64(40+0), storageUsed) + require.Equal(t, emptyAccountSize+uint64(0), storageUsed) }) t.Run("Storage used on a complex scenario has correct value", func(t *testing.T) { @@ -339,19 +610,19 @@ func TestAccount_StorageUsed(t *testing.T) { err := accounts.Create(nil, address) require.NoError(t, err) - key1 := flow.NewRegisterID(string(address.Bytes()), "some_key") + key1 := flow.NewRegisterID(address, "some_key") err = accounts.SetValue(key1, createByteArray(12)) require.NoError(t, err) err = accounts.SetValue(key1, createByteArray(11)) require.NoError(t, err) - key2 := flow.NewRegisterID(string(address.Bytes()), "some_key2") + key2 := flow.NewRegisterID(address, "some_key2") err = accounts.SetValue(key2, createByteArray(22)) require.NoError(t, err) err = accounts.SetValue(key2, createByteArray(23)) require.NoError(t, err) - key3 := flow.NewRegisterID(string(address.Bytes()), "some_key3") + key3 := flow.NewRegisterID(address, "some_key3") err = accounts.SetValue(key3, createByteArray(22)) require.NoError(t, err) err = accounts.SetValue(key3, createByteArray(0)) @@ -359,10 +630,51 @@ func TestAccount_StorageUsed(t *testing.T) { storageUsed, err := accounts.GetStorageUsed(address) require.NoError(t, err) - require.Equal(t, uint64(40+33+42), storageUsed) + require.Equal(t, emptyAccountSize+uint64(43+52), storageUsed) }) } +func TestStatefulAccounts_GenerateAccountLocalID(t *testing.T) { + + // Create 3 accounts + addressA := flow.HexToAddress("0x01") + addressB := flow.HexToAddress("0x02") + addressC := flow.HexToAddress("0x03") + txnState := testutils.NewSimpleTransaction(nil) + a := environment.NewAccounts(txnState) + err := a.Create(nil, addressA) + require.NoError(t, err) + err = a.Create(nil, addressB) + require.NoError(t, err) + err = a.Create(nil, addressC) + require.NoError(t, err) + + // setup some state + _, err = a.GenerateAccountLocalID(addressA) + require.NoError(t, err) + _, err = a.GenerateAccountLocalID(addressA) + require.NoError(t, err) + _, err = a.GenerateAccountLocalID(addressB) + require.NoError(t, err) + + // assert + + // addressA + id, err := a.GenerateAccountLocalID(addressA) + require.NoError(t, err) + require.Equal(t, uint64(3), id) + + // addressB + id, err = a.GenerateAccountLocalID(addressB) + require.NoError(t, err) + require.Equal(t, uint64(2), id) + + // addressC + id, err = a.GenerateAccountLocalID(addressC) + require.NoError(t, err) + require.Equal(t, uint64(1), id) +} + func createByteArray(size int) []byte { bytes := make([]byte, size) for i := range bytes { @@ -371,7 +683,7 @@ func createByteArray(size int) []byte { return bytes } -func TestAccounts_AllocateStorageIndex(t *testing.T) { +func TestAccounts_AllocateSlabIndex(t *testing.T) { txnState := testutils.NewSimpleTransaction(nil) accounts := environment.NewAccounts(txnState) address := flow.HexToAddress("01") @@ -380,17 +692,1390 @@ func TestAccounts_AllocateStorageIndex(t *testing.T) { require.NoError(t, err) // no register set case - i, err := accounts.AllocateStorageIndex(address) + i, err := accounts.AllocateSlabIndex(address) require.NoError(t, err) - require.Equal(t, i, atree.StorageIndex([8]byte{0, 0, 0, 0, 0, 0, 0, 1})) + require.Equal(t, i, atree.SlabIndex([8]byte{0, 0, 0, 0, 0, 0, 0, 1})) // register already set case - i, err = accounts.AllocateStorageIndex(address) + i, err = accounts.AllocateSlabIndex(address) require.NoError(t, err) - require.Equal(t, i, atree.StorageIndex([8]byte{0, 0, 0, 0, 0, 0, 0, 2})) + require.Equal(t, i, atree.SlabIndex([8]byte{0, 0, 0, 0, 0, 0, 0, 2})) // register update successful - i, err = accounts.AllocateStorageIndex(address) + i, err = accounts.AllocateSlabIndex(address) require.NoError(t, err) - require.Equal(t, i, atree.StorageIndex([8]byte{0, 0, 0, 0, 0, 0, 0, 3})) + require.Equal(t, i, atree.SlabIndex([8]byte{0, 0, 0, 0, 0, 0, 0, 3})) +} + +func TestAccounts_AppendAndGetAccountPublicKey(t *testing.T) { + t.Run("account with 0 keys", func(t *testing.T) { + txnState := testutils.NewSimpleTransaction(nil) + accounts := environment.NewAccounts(txnState) + + address := flow.HexToAddress("01") + + err := accounts.Create(nil, address) + require.NoError(t, err) + + keyCount, err := accounts.GetAccountPublicKeyCount(address) + require.NoError(t, err) + require.Equal(t, uint32(0), keyCount) + + storageUsed, err := accounts.GetStorageUsed(address) + require.NoError(t, err) + + _, err = accounts.GetAccountPublicKey(address, 0) + require.True(t, errors.IsAccountPublicKeyNotFoundError(err)) + + snapshot, err := txnState.FinalizeMainTransaction() + require.NoError(t, err) + + value, exists := snapshot.WriteSet[flow.AccountStatusRegisterID(address)] + require.True(t, exists) + require.NotEmpty(t, value) + + // Test account storage used + expectedStorageUsed := uint64(environment.RegisterSize( + flow.AccountStatusRegisterID(address), + value)) + require.Equal(t, expectedStorageUsed, storageUsed) + }) + + t.Run("account with 1 key", func(t *testing.T) { + key0 := newAccountPublicKey(t, 1000) + + txnState := testutils.NewSimpleTransaction(nil) + accounts := environment.NewAccounts(txnState) + + address := flow.HexToAddress("01") + + err := accounts.Create(nil, address) + require.NoError(t, err) + + err = accounts.AppendAccountPublicKey(address, key0) + require.NoError(t, err) + + keyCount, err := accounts.GetAccountPublicKeyCount(address) + require.NoError(t, err) + require.Equal(t, uint32(1), keyCount) + + storageUsed, err := accounts.GetStorageUsed(address) + require.NoError(t, err) + + retrievedKey0, err := accounts.GetAccountPublicKey(address, 0) + require.NoError(t, err) + require.Equal(t, key0, retrievedKey0) + + snapshot, err := txnState.FinalizeMainTransaction() + require.NoError(t, err) + + // New registers + expectedNewRegisterIDs := []flow.RegisterID{ + flow.AccountStatusRegisterID(address), + flow.AccountPublicKey0RegisterID(address), + } + registerIDs := snapshot.AllRegisterIDs() + require.Equal(t, len(expectedNewRegisterIDs), len(registerIDs)) + require.ElementsMatch(t, expectedNewRegisterIDs, registerIDs) + + // Test account status + accountStatusValue, exists := snapshot.WriteSet[flow.AccountStatusRegisterID(address)] + require.True(t, exists) + require.NotEmpty(t, accountStatusValue) + + // Test account public key 0 + key0Value, exists := snapshot.WriteSet[flow.AccountPublicKey0RegisterID(address)] + require.True(t, exists) + require.NotEmpty(t, key0Value) + + // Test storage used + var expectedStorageUsed uint64 + expectedStorageUsed += uint64(environment.RegisterSize( + flow.AccountStatusRegisterID(address), + accountStatusValue)) + expectedStorageUsed += uint64(environment.RegisterSize( + flow.AccountPublicKey0RegisterID(address), + key0Value)) + require.Equal(t, expectedStorageUsed, storageUsed) + }) + + t.Run("account with 2 unique keys", func(t *testing.T) { + for _, hasSequenceNumber := range []bool{true, false} { + key0 := newAccountPublicKey(t, 1000) + key0.SeqNumber = 1 + + key1 := newAccountPublicKey(t, 1000) + if hasSequenceNumber { + key1.SeqNumber = 2 + } else { + key1.SeqNumber = 0 + } + + txnState := testutils.NewSimpleTransaction(nil) + accounts := environment.NewAccounts(txnState) + + address := flow.HexToAddress("01") + + err := accounts.Create(nil, address) + require.NoError(t, err) + + err = accounts.AppendAccountPublicKey(address, key0) + require.NoError(t, err) + + err = accounts.AppendAccountPublicKey(address, key1) + require.NoError(t, err) + + keyCount, err := accounts.GetAccountPublicKeyCount(address) + require.NoError(t, err) + require.Equal(t, uint32(2), keyCount) + + storageUsed, err := accounts.GetStorageUsed(address) + require.NoError(t, err) + + retrievedKey0, err := accounts.GetAccountPublicKey(address, 0) + require.NoError(t, err) + require.Equal(t, key0, retrievedKey0) + + retrievedKey1, err := accounts.GetAccountPublicKey(address, 1) + require.NoError(t, err) + key1.Index = 1 + require.Equal(t, key1, retrievedKey1) + + snapshot, err := txnState.FinalizeMainTransaction() + require.NoError(t, err) + + // New registers + expectedNewRegisterIDs := []flow.RegisterID{ + flow.AccountStatusRegisterID(address), + flow.AccountPublicKey0RegisterID(address), + flow.AccountBatchPublicKeyRegisterID(address, 0), + } + if hasSequenceNumber { + expectedNewRegisterIDs = append( + expectedNewRegisterIDs, + flow.AccountPublicKeySequenceNumberRegisterID(address, 1), + ) + } + registerIDs := slices.Collect(maps.Keys(snapshot.WriteSet)) + require.Equal(t, len(expectedNewRegisterIDs), len(registerIDs)) + require.ElementsMatch(t, expectedNewRegisterIDs, registerIDs) + + // Test account status + accountStatusValue, exists := snapshot.WriteSet[flow.AccountStatusRegisterID(address)] + require.True(t, exists) + require.NotEmpty(t, accountStatusValue) + + // Test account public key 0 + key0Value, exists := snapshot.WriteSet[flow.AccountPublicKey0RegisterID(address)] + require.True(t, exists) + require.NotEmpty(t, key0Value) + + // Test account public key 1 + key1Value, exists := snapshot.WriteSet[flow.AccountBatchPublicKeyRegisterID(address, 0)] + require.True(t, exists) + require.NotEmpty(t, key1Value) + + // Test storage used + var expectedStorageUsed uint64 + // Include account status register in storage used + expectedStorageUsed += uint64(environment.RegisterSize( + flow.AccountStatusRegisterID(address), + accountStatusValue)) + // Include apk_0 register in storage used + expectedStorageUsed += uint64(environment.RegisterSize( + flow.AccountPublicKey0RegisterID(address), + key0Value)) + // Include pk_b0 register in storage used + expectedStorageUsed += uint64(environment.RegisterSize( + flow.AccountBatchPublicKeyRegisterID(address, 1), + key1Value)) + // Include predefined sequence number 1 register in storage used + expectedStorageUsed += environment.PredefinedSequenceNumberPayloadSize(address, 1) + require.Equal(t, expectedStorageUsed, storageUsed) + } + }) + + t.Run("account with 2 duplicate keys", func(t *testing.T) { + for _, hasSequenceNumber := range []bool{true, false} { + key0 := newAccountPublicKey(t, 1000) + key0.SeqNumber = 1 + + key1 := key0 + key1.Weight = 1 + if hasSequenceNumber { + key1.SeqNumber = 2 + } else { + key1.SeqNumber = 0 + } + + txnState := testutils.NewSimpleTransaction(nil) + accounts := environment.NewAccounts(txnState) + + address := flow.HexToAddress("01") + + err := accounts.Create(nil, address) + require.NoError(t, err) + + err = accounts.AppendAccountPublicKey(address, key0) + require.NoError(t, err) + + err = accounts.AppendAccountPublicKey(address, key1) + require.NoError(t, err) + + keyCount, err := accounts.GetAccountPublicKeyCount(address) + require.NoError(t, err) + require.Equal(t, uint32(2), keyCount) + + storageUsed, err := accounts.GetStorageUsed(address) + require.NoError(t, err) + + retrievedKey0, err := accounts.GetAccountPublicKey(address, 0) + require.NoError(t, err) + require.Equal(t, key0, retrievedKey0) + + retrievedKey1, err := accounts.GetAccountPublicKey(address, 1) + require.NoError(t, err) + key1.Index = 1 + require.Equal(t, key1, retrievedKey1) + + snapshot, err := txnState.FinalizeMainTransaction() + require.NoError(t, err) + + // New registers + expectedNewRegisterIDs := []flow.RegisterID{ + flow.AccountStatusRegisterID(address), + flow.AccountPublicKey0RegisterID(address), + } + if hasSequenceNumber { + expectedNewRegisterIDs = append( + expectedNewRegisterIDs, + flow.AccountPublicKeySequenceNumberRegisterID(address, 1), + ) + } + registerIDs := slices.Collect(maps.Keys(snapshot.WriteSet)) + require.Equal(t, len(expectedNewRegisterIDs), len(registerIDs)) + require.ElementsMatch(t, expectedNewRegisterIDs, registerIDs) + + // Test account status + accountStatusValue, exists := snapshot.WriteSet[flow.AccountStatusRegisterID(address)] + require.True(t, exists) + require.NotEmpty(t, accountStatusValue) + + // Test account public key 0 + key0Value, exists := snapshot.WriteSet[flow.AccountPublicKey0RegisterID(address)] + require.True(t, exists) + require.NotEmpty(t, key0Value) + + // Test storage used + var expectedStorageUsed uint64 + // Include account status register in storage used + expectedStorageUsed += uint64(environment.RegisterSize( + flow.AccountStatusRegisterID(address), + accountStatusValue)) + // Include apk_0 register in storage used + expectedStorageUsed += uint64(environment.RegisterSize( + flow.AccountPublicKey0RegisterID(address), + key0Value)) + // Include predefined sequence number 1 register in storage used + expectedStorageUsed += environment.PredefinedSequenceNumberPayloadSize(address, 1) + require.Equal(t, expectedStorageUsed, storageUsed) + } + }) + + t.Run("account with 3 duplicate key with mixed sequence number", func(t *testing.T) { + key0 := newAccountPublicKey(t, 1000) + key0.SeqNumber = 1 + + key1 := key0 + key1.Weight = 1 + key1.SeqNumber = 0 + + key2 := key1 + key2.Weight = 1000 + key2.SeqNumber = 3 + + txnState := testutils.NewSimpleTransaction(nil) + accounts := environment.NewAccounts(txnState) + + address := flow.HexToAddress("01") + + err := accounts.Create(nil, address) + require.NoError(t, err) + + err = accounts.AppendAccountPublicKey(address, key0) + require.NoError(t, err) + + err = accounts.AppendAccountPublicKey(address, key1) + require.NoError(t, err) + + err = accounts.AppendAccountPublicKey(address, key2) + require.NoError(t, err) + + keyCount, err := accounts.GetAccountPublicKeyCount(address) + require.NoError(t, err) + require.Equal(t, uint32(3), keyCount) + + storageUsed, err := accounts.GetStorageUsed(address) + require.NoError(t, err) + + retrievedKey0, err := accounts.GetAccountPublicKey(address, 0) + require.NoError(t, err) + require.Equal(t, key0, retrievedKey0) + + retrievedKey1, err := accounts.GetAccountPublicKey(address, 1) + require.NoError(t, err) + key1.Index = 1 + require.Equal(t, key1, retrievedKey1) + + retrievedKey2, err := accounts.GetAccountPublicKey(address, 2) + require.NoError(t, err) + key2.Index = 2 + require.Equal(t, key2, retrievedKey2) + + snapshot, err := txnState.FinalizeMainTransaction() + require.NoError(t, err) + + // New registers + expectedNewRegisterIDs := []flow.RegisterID{ + flow.AccountStatusRegisterID(address), + flow.AccountPublicKey0RegisterID(address), + flow.AccountPublicKeySequenceNumberRegisterID(address, 2), + } + registerIDs := slices.Collect(maps.Keys(snapshot.WriteSet)) + require.Equal(t, len(expectedNewRegisterIDs), len(registerIDs)) + require.ElementsMatch(t, expectedNewRegisterIDs, registerIDs) + + // Test account status + accountStatusValue, exists := snapshot.WriteSet[flow.AccountStatusRegisterID(address)] + require.True(t, exists) + require.NotEmpty(t, accountStatusValue) + + // Test account public key 0 + key0Value, exists := snapshot.WriteSet[flow.AccountPublicKey0RegisterID(address)] + require.True(t, exists) + require.NotEmpty(t, key0Value) + + // Test storage used + var expectedStorageUsed uint64 + // Include account status register in storage used + expectedStorageUsed += uint64(environment.RegisterSize( + flow.AccountStatusRegisterID(address), + accountStatusValue)) + // Include apk_0 register in storage used + expectedStorageUsed += uint64(environment.RegisterSize( + flow.AccountPublicKey0RegisterID(address), + key0Value)) + // Include predefined sequence number 1 register in storage used + expectedStorageUsed += environment.PredefinedSequenceNumberPayloadSize(address, 1) + // Include predefined sequence number 2 register in storage used + expectedStorageUsed += environment.PredefinedSequenceNumberPayloadSize(address, 2) + require.Equal(t, expectedStorageUsed, storageUsed) + }) +} + +func TestAccounts_RevokeAndGetAccountPublicKey(t *testing.T) { + t.Run("account with 0 keys", func(t *testing.T) { + txnState := testutils.NewSimpleTransaction(nil) + accounts := environment.NewAccounts(txnState) + + address := flow.HexToAddress("01") + + err := accounts.Create(nil, address) + require.NoError(t, err) + + keyCount, err := accounts.GetAccountPublicKeyCount(address) + require.NoError(t, err) + require.Equal(t, uint32(0), keyCount) + + _, err = accounts.GetAccountPublicKeyRevokedStatus(address, 0) + require.True(t, errors.IsAccountPublicKeyNotFoundError(err)) + + err = accounts.RevokeAccountPublicKey(address, 0) + require.True(t, errors.IsAccountPublicKeyNotFoundError(err)) + + storageUsed, err := accounts.GetStorageUsed(address) + require.NoError(t, err) + + snapshot, err := txnState.FinalizeMainTransaction() + require.NoError(t, err) + + value, exists := snapshot.WriteSet[flow.AccountStatusRegisterID(address)] + require.True(t, exists) + require.NotEmpty(t, value) + + // Test account storage used + expectedStorageUsed := uint64(environment.RegisterSize( + flow.AccountStatusRegisterID(address), + value)) + require.Equal(t, expectedStorageUsed, storageUsed) + }) + + t.Run("account with 1 key", func(t *testing.T) { + key0 := newAccountPublicKey(t, 1000) + + txnState := testutils.NewSimpleTransaction(nil) + accounts := environment.NewAccounts(txnState) + + address := flow.HexToAddress("01") + + err := accounts.Create(nil, address) + require.NoError(t, err) + + err = accounts.AppendAccountPublicKey(address, key0) + require.NoError(t, err) + + keyCount, err := accounts.GetAccountPublicKeyCount(address) + require.NoError(t, err) + require.Equal(t, uint32(1), keyCount) + + revoked, err := accounts.GetAccountPublicKeyRevokedStatus(address, 0) + require.NoError(t, err) + require.False(t, revoked) + + err = accounts.RevokeAccountPublicKey(address, 0) + require.NoError(t, err) + + revoked, err = accounts.GetAccountPublicKeyRevokedStatus(address, 0) + require.NoError(t, err) + require.True(t, revoked) + + retrievedKey, err := accounts.GetAccountPublicKey(address, 0) + require.NoError(t, err) + key0.Index = 0 + key0.Revoked = true + require.Equal(t, key0, retrievedKey) + + storageUsed, err := accounts.GetStorageUsed(address) + require.NoError(t, err) + + snapshot, err := txnState.FinalizeMainTransaction() + require.NoError(t, err) + + // New registers + expectedNewRegisterIDs := []flow.RegisterID{ + flow.AccountStatusRegisterID(address), + flow.AccountPublicKey0RegisterID(address), + } + registerIDs := snapshot.AllRegisterIDs() + require.Equal(t, len(expectedNewRegisterIDs), len(registerIDs)) + require.ElementsMatch(t, expectedNewRegisterIDs, registerIDs) + + // Test account status + accountStatusValue, exists := snapshot.WriteSet[flow.AccountStatusRegisterID(address)] + require.True(t, exists) + require.NotEmpty(t, accountStatusValue) + + // Test account public key 0 + key0Value, exists := snapshot.WriteSet[flow.AccountPublicKey0RegisterID(address)] + require.True(t, exists) + require.NotEmpty(t, key0Value) + + // Test storage used + var expectedStorageUsed uint64 + expectedStorageUsed += uint64(environment.RegisterSize( + flow.AccountStatusRegisterID(address), + accountStatusValue)) + expectedStorageUsed += uint64(environment.RegisterSize( + flow.AccountPublicKey0RegisterID(address), + key0Value)) + require.Equal(t, expectedStorageUsed, storageUsed) + }) + + t.Run("account with 2 unique key", func(t *testing.T) { + key0 := newAccountPublicKey(t, 1000) + key1 := newAccountPublicKey(t, 1000) + keys := []flow.AccountPublicKey{key0, key1} + + txnState := testutils.NewSimpleTransaction(nil) + accounts := environment.NewAccounts(txnState) + + address := flow.HexToAddress("01") + + err := accounts.Create(nil, address) + require.NoError(t, err) + + for _, key := range keys { + err = accounts.AppendAccountPublicKey(address, key) + require.NoError(t, err) + } + + keyCount, err := accounts.GetAccountPublicKeyCount(address) + require.NoError(t, err) + require.Equal(t, uint32(len(keys)), keyCount) + + for i, key := range keys { + revoked, err := accounts.GetAccountPublicKeyRevokedStatus(address, uint32(i)) + require.NoError(t, err) + require.False(t, revoked) + + err = accounts.RevokeAccountPublicKey(address, uint32(i)) + require.NoError(t, err) + + revoked, err = accounts.GetAccountPublicKeyRevokedStatus(address, uint32(i)) + require.NoError(t, err) + require.True(t, revoked) + + retrievedKey, err := accounts.GetAccountPublicKey(address, uint32(i)) + require.NoError(t, err) + key.Index = uint32(i) + key.Revoked = true + require.Equal(t, key, retrievedKey) + } + + storageUsed, err := accounts.GetStorageUsed(address) + require.NoError(t, err) + + snapshot, err := txnState.FinalizeMainTransaction() + require.NoError(t, err) + + // New registers + expectedNewRegisterIDs := []flow.RegisterID{ + flow.AccountStatusRegisterID(address), + flow.AccountPublicKey0RegisterID(address), + flow.AccountBatchPublicKeyRegisterID(address, 0), + } + registerIDs := slices.Collect(maps.Keys(snapshot.WriteSet)) + require.Equal(t, len(expectedNewRegisterIDs), len(registerIDs)) + require.ElementsMatch(t, expectedNewRegisterIDs, registerIDs) + + // Test account status + accountStatusValue, exists := snapshot.WriteSet[flow.AccountStatusRegisterID(address)] + require.True(t, exists) + require.NotEmpty(t, accountStatusValue) + + // Test account public key 0 + key0Value, exists := snapshot.WriteSet[flow.AccountPublicKey0RegisterID(address)] + require.True(t, exists) + require.NotEmpty(t, key0Value) + + // Test account public key 1 + key1Value, exists := snapshot.WriteSet[flow.AccountBatchPublicKeyRegisterID(address, 0)] + require.True(t, exists) + require.NotEmpty(t, key1Value) + + // Test storage used + var expectedStorageUsed uint64 + // Include account status register in storage used + expectedStorageUsed += uint64(environment.RegisterSize( + flow.AccountStatusRegisterID(address), + accountStatusValue)) + // Include apk_0 register in storage used + expectedStorageUsed += uint64(environment.RegisterSize( + flow.AccountPublicKey0RegisterID(address), + key0Value)) + // Include pk_b0 register in storage used + expectedStorageUsed += uint64(environment.RegisterSize( + flow.AccountBatchPublicKeyRegisterID(address, 1), + key1Value)) + // Include predefined sequence number 1 register in storage used + expectedStorageUsed += environment.PredefinedSequenceNumberPayloadSize(address, 1) + require.Equal(t, expectedStorageUsed, storageUsed) + }) + + t.Run("account with 2 duplicate key", func(t *testing.T) { + key0 := newAccountPublicKey(t, 1000) + key1 := key0 + key1.Weight = 1 + keys := []flow.AccountPublicKey{key0, key1} + + txnState := testutils.NewSimpleTransaction(nil) + accounts := environment.NewAccounts(txnState) + + address := flow.HexToAddress("01") + + err := accounts.Create(nil, address) + require.NoError(t, err) + + for _, key := range keys { + err = accounts.AppendAccountPublicKey(address, key) + require.NoError(t, err) + } + + keyCount, err := accounts.GetAccountPublicKeyCount(address) + require.NoError(t, err) + require.Equal(t, uint32(len(keys)), keyCount) + + for i, key := range keys { + revoked, err := accounts.GetAccountPublicKeyRevokedStatus(address, uint32(i)) + require.NoError(t, err) + require.False(t, revoked) + + err = accounts.RevokeAccountPublicKey(address, uint32(i)) + require.NoError(t, err) + + revoked, err = accounts.GetAccountPublicKeyRevokedStatus(address, uint32(i)) + require.NoError(t, err) + require.True(t, revoked) + + retrievedKey, err := accounts.GetAccountPublicKey(address, uint32(i)) + require.NoError(t, err) + key.Index = uint32(i) + key.Revoked = true + require.Equal(t, key, retrievedKey) + } + + storageUsed, err := accounts.GetStorageUsed(address) + require.NoError(t, err) + + snapshot, err := txnState.FinalizeMainTransaction() + require.NoError(t, err) + + // New registers + expectedNewRegisterIDs := []flow.RegisterID{ + flow.AccountStatusRegisterID(address), + flow.AccountPublicKey0RegisterID(address), + } + registerIDs := slices.Collect(maps.Keys(snapshot.WriteSet)) + require.Equal(t, len(expectedNewRegisterIDs), len(registerIDs)) + require.ElementsMatch(t, expectedNewRegisterIDs, registerIDs) + + // Test account status + accountStatusValue, exists := snapshot.WriteSet[flow.AccountStatusRegisterID(address)] + require.True(t, exists) + require.NotEmpty(t, accountStatusValue) + + // Test account public key 0 + key0Value, exists := snapshot.WriteSet[flow.AccountPublicKey0RegisterID(address)] + require.True(t, exists) + require.NotEmpty(t, key0Value) + + // Test storage used + var expectedStorageUsed uint64 + // Include account status register in storage used + expectedStorageUsed += uint64(environment.RegisterSize( + flow.AccountStatusRegisterID(address), + accountStatusValue)) + // Include apk_0 register in storage used + expectedStorageUsed += uint64(environment.RegisterSize( + flow.AccountPublicKey0RegisterID(address), + key0Value)) + // Include predefined sequence number 1 register in storage used + expectedStorageUsed += environment.PredefinedSequenceNumberPayloadSize(address, 1) + require.Equal(t, expectedStorageUsed, storageUsed) + }) + + t.Run("account with 3 duplicate key", func(t *testing.T) { + key0 := newAccountPublicKey(t, 1000) + key1 := key0 + key1.Weight = 1 + key2 := key1 + key2.Weight = 1000 + keys := []flow.AccountPublicKey{ + key0, key1, key2, + } + + txnState := testutils.NewSimpleTransaction(nil) + accounts := environment.NewAccounts(txnState) + + address := flow.HexToAddress("01") + + err := accounts.Create(nil, address) + require.NoError(t, err) + + for _, key := range keys { + err = accounts.AppendAccountPublicKey(address, key) + require.NoError(t, err) + } + + keyCount, err := accounts.GetAccountPublicKeyCount(address) + require.NoError(t, err) + require.Equal(t, uint32(3), keyCount) + + for i, key := range keys { + revoked, err := accounts.GetAccountPublicKeyRevokedStatus(address, uint32(i)) + require.NoError(t, err) + require.False(t, revoked) + + err = accounts.RevokeAccountPublicKey(address, uint32(i)) + require.NoError(t, err) + + revoked, err = accounts.GetAccountPublicKeyRevokedStatus(address, uint32(i)) + require.NoError(t, err) + require.True(t, revoked) + + retrievedKey, err := accounts.GetAccountPublicKey(address, uint32(i)) + require.NoError(t, err) + key.Index = uint32(i) + key.Revoked = true + require.Equal(t, key, retrievedKey) + } + + storageUsed, err := accounts.GetStorageUsed(address) + require.NoError(t, err) + + snapshot, err := txnState.FinalizeMainTransaction() + require.NoError(t, err) + + // New registers + expectedNewRegisterIDs := []flow.RegisterID{ + flow.AccountStatusRegisterID(address), + flow.AccountPublicKey0RegisterID(address), + } + registerIDs := slices.Collect(maps.Keys(snapshot.WriteSet)) + require.Equal(t, len(expectedNewRegisterIDs), len(registerIDs)) + require.ElementsMatch(t, expectedNewRegisterIDs, registerIDs) + + // Test account status + accountStatusValue, exists := snapshot.WriteSet[flow.AccountStatusRegisterID(address)] + require.True(t, exists) + require.NotEmpty(t, accountStatusValue) + + // Test account public key 0 + key0Value, exists := snapshot.WriteSet[flow.AccountPublicKey0RegisterID(address)] + require.True(t, exists) + require.NotEmpty(t, key0Value) + + // Test storage used + var expectedStorageUsed uint64 + // Include account status register in storage used + expectedStorageUsed += uint64(environment.RegisterSize( + flow.AccountStatusRegisterID(address), + accountStatusValue)) + // Include apk_0 register in storage used + expectedStorageUsed += uint64(environment.RegisterSize( + flow.AccountPublicKey0RegisterID(address), + key0Value)) + // Include predefined sequence number 1 register in storage used + expectedStorageUsed += environment.PredefinedSequenceNumberPayloadSize(address, 1) + // Include predefined sequence number 2 register in storage used + expectedStorageUsed += environment.PredefinedSequenceNumberPayloadSize(address, 2) + require.Equal(t, expectedStorageUsed, storageUsed) + }) + + testcases := []struct { + name string + uniqueKeyCount uint32 + duplicateKeyCount uint32 + }{ + { + name: "account with > 20 stored keys (all keys are unique)", + uniqueKeyCount: 21, + duplicateKeyCount: 0, // all keys are unique + }, + { + name: "account with > 20 stored key (some keys are duplicate)", + uniqueKeyCount: 21, + duplicateKeyCount: 10, + }, + } + + for _, tc := range testcases { + t.Run(tc.name, func(t *testing.T) { + uniqueKeys := make([]flow.AccountPublicKey, tc.uniqueKeyCount) + for i := range len(uniqueKeys) { + weight := rand.Intn(1001) + uniqueKeys[i] = newAccountPublicKey(t, weight) + } + + keys := uniqueKeys + for range tc.duplicateKeyCount { + insertPos := rand.Intn(len(keys)) + if insertPos == 0 { + keys = slices.Insert(keys, insertPos, keys[0]) + } else { + keys = slices.Insert(keys, insertPos, keys[insertPos-1]) + } + } + + txnState := testutils.NewSimpleTransaction(nil) + accounts := environment.NewAccounts(txnState) + + address := flow.HexToAddress("01") + + err := accounts.Create(nil, address) + require.NoError(t, err) + + for _, key := range keys { + err = accounts.AppendAccountPublicKey(address, key) + require.NoError(t, err) + } + + keyCount, err := accounts.GetAccountPublicKeyCount(address) + require.NoError(t, err) + require.Equal(t, tc.uniqueKeyCount+tc.duplicateKeyCount, keyCount) + + for i, key := range keys { + revoked, err := accounts.GetAccountPublicKeyRevokedStatus(address, uint32(i)) + require.NoError(t, err) + require.False(t, revoked) + + err = accounts.RevokeAccountPublicKey(address, uint32(i)) + require.NoError(t, err) + + revoked, err = accounts.GetAccountPublicKeyRevokedStatus(address, uint32(i)) + require.NoError(t, err) + require.True(t, revoked) + + retrievedKey, err := accounts.GetAccountPublicKey(address, uint32(i)) + require.NoError(t, err) + key.Index = uint32(i) + key.Revoked = true + require.Equal(t, key, retrievedKey) + } + + storageUsed, err := accounts.GetStorageUsed(address) + require.NoError(t, err) + + snapshot, err := txnState.FinalizeMainTransaction() + require.NoError(t, err) + + accountStatusValue, exists := snapshot.WriteSet[flow.AccountStatusRegisterID(address)] + require.True(t, exists) + require.NotEmpty(t, accountStatusValue) + + // Test storage used + var expectedStorageUsed uint64 + + expectedStorageUsed += uint64(environment.RegisterSize( + flow.AccountStatusRegisterID(address), + accountStatusValue)) + + if tc.uniqueKeyCount > 0 { + // Include account public key 0 in storage used + key0Value, exists := snapshot.WriteSet[flow.AccountPublicKey0RegisterID(address)] + require.True(t, exists) + require.NotEmpty(t, key0Value) + + expectedStorageUsed += uint64(environment.RegisterSize( + flow.AccountPublicKey0RegisterID(address), + key0Value)) + + // Include pk_b register in storage used + batchNum := uint32(0) + for { + batchValue, exists := snapshot.WriteSet[flow.AccountBatchPublicKeyRegisterID(address, batchNum)] + if !exists || len(batchValue) == 0 { + break + } + require.True(t, len(batchValue) > 1) + + expectedStorageUsed += uint64(environment.RegisterSize( + flow.AccountPublicKey0RegisterID(address), + batchValue)) + + batchNum++ + } + } + + for i := 1; i < int(tc.uniqueKeyCount+tc.duplicateKeyCount); i++ { + // Include predefined sequence number in storage used + expectedStorageUsed += environment.PredefinedSequenceNumberPayloadSize(address, uint32(i)) + } + + require.Equal(t, expectedStorageUsed, storageUsed) + }) + } +} + +func TestAccounts_IncrementAndGetAccountPublicKeySequenceNumber(t *testing.T) { + t.Run("account with 0 keys", func(t *testing.T) { + txnState := testutils.NewSimpleTransaction(nil) + accounts := environment.NewAccounts(txnState) + + address := flow.HexToAddress("01") + + err := accounts.Create(nil, address) + require.NoError(t, err) + + keyCount, err := accounts.GetAccountPublicKeyCount(address) + require.NoError(t, err) + require.Equal(t, uint32(0), keyCount) + + _, err = accounts.GetAccountPublicKeySequenceNumber(address, 0) + require.True(t, errors.IsAccountPublicKeyNotFoundError(err)) + + err = accounts.IncrementAccountPublicKeySequenceNumber(address, 0) + require.True(t, errors.IsAccountPublicKeyNotFoundError(err)) + + storageUsed, err := accounts.GetStorageUsed(address) + require.NoError(t, err) + + snapshot, err := txnState.FinalizeMainTransaction() + require.NoError(t, err) + + value, exists := snapshot.WriteSet[flow.AccountStatusRegisterID(address)] + require.True(t, exists) + require.NotEmpty(t, value) + + // Test account storage used + expectedStorageUsed := uint64(environment.RegisterSize( + flow.AccountStatusRegisterID(address), + value)) + require.Equal(t, expectedStorageUsed, storageUsed) + }) + + t.Run("account with 1 key", func(t *testing.T) { + key0 := newAccountPublicKey(t, 1000) + + txnState := testutils.NewSimpleTransaction(nil) + accounts := environment.NewAccounts(txnState) + + address := flow.HexToAddress("01") + + err := accounts.Create(nil, address) + require.NoError(t, err) + + err = accounts.AppendAccountPublicKey(address, key0) + require.NoError(t, err) + + keyCount, err := accounts.GetAccountPublicKeyCount(address) + require.NoError(t, err) + require.Equal(t, uint32(1), keyCount) + + seqNum, err := accounts.GetAccountPublicKeySequenceNumber(address, 0) + require.NoError(t, err) + require.Equal(t, uint64(0), seqNum) + + err = accounts.IncrementAccountPublicKeySequenceNumber(address, 0) + require.NoError(t, err) + + seqNum, err = accounts.GetAccountPublicKeySequenceNumber(address, 0) + require.NoError(t, err) + require.Equal(t, uint64(1), seqNum) + + retrievedKey, err := accounts.GetAccountPublicKey(address, 0) + require.NoError(t, err) + key0.Index = 0 + key0.SeqNumber = 1 + require.Equal(t, key0, retrievedKey) + + storageUsed, err := accounts.GetStorageUsed(address) + require.NoError(t, err) + + snapshot, err := txnState.FinalizeMainTransaction() + require.NoError(t, err) + + // New registers + expectedNewRegisterIDs := []flow.RegisterID{ + flow.AccountStatusRegisterID(address), + flow.AccountPublicKey0RegisterID(address), + } + registerIDs := snapshot.AllRegisterIDs() + require.Equal(t, len(expectedNewRegisterIDs), len(registerIDs)) + require.ElementsMatch(t, expectedNewRegisterIDs, registerIDs) + + // Test account status + accountStatusValue, exists := snapshot.WriteSet[flow.AccountStatusRegisterID(address)] + require.True(t, exists) + require.NotEmpty(t, accountStatusValue) + + // Test account public key 0 + key0Value, exists := snapshot.WriteSet[flow.AccountPublicKey0RegisterID(address)] + require.True(t, exists) + require.NotEmpty(t, key0Value) + + // Test storage used + var expectedStorageUsed uint64 + expectedStorageUsed += uint64(environment.RegisterSize( + flow.AccountStatusRegisterID(address), + accountStatusValue)) + expectedStorageUsed += uint64(environment.RegisterSize( + flow.AccountPublicKey0RegisterID(address), + key0Value)) + require.Equal(t, expectedStorageUsed, storageUsed) + }) + + t.Run("account with 2 unique key", func(t *testing.T) { + key0 := newAccountPublicKey(t, 1000) + key1 := newAccountPublicKey(t, 1000) + keys := []flow.AccountPublicKey{key0, key1} + + txnState := testutils.NewSimpleTransaction(nil) + accounts := environment.NewAccounts(txnState) + + address := flow.HexToAddress("01") + + err := accounts.Create(nil, address) + require.NoError(t, err) + + for _, key := range keys { + err = accounts.AppendAccountPublicKey(address, key) + require.NoError(t, err) + } + + keyCount, err := accounts.GetAccountPublicKeyCount(address) + require.NoError(t, err) + require.Equal(t, uint32(len(keys)), keyCount) + + for i, key := range keys { + seqNum, err := accounts.GetAccountPublicKeySequenceNumber(address, uint32(i)) + require.NoError(t, err) + require.Equal(t, uint64(0), seqNum) + + err = accounts.IncrementAccountPublicKeySequenceNumber(address, uint32(i)) + require.NoError(t, err) + + seqNum, err = accounts.GetAccountPublicKeySequenceNumber(address, uint32(i)) + require.NoError(t, err) + require.Equal(t, uint64(1), seqNum) + + retrievedKey, err := accounts.GetAccountPublicKey(address, uint32(i)) + require.NoError(t, err) + key.Index = uint32(i) + key.SeqNumber = 1 + require.Equal(t, key, retrievedKey) + } + + storageUsed, err := accounts.GetStorageUsed(address) + require.NoError(t, err) + + snapshot, err := txnState.FinalizeMainTransaction() + require.NoError(t, err) + + // New registers + expectedNewRegisterIDs := []flow.RegisterID{ + flow.AccountStatusRegisterID(address), + flow.AccountPublicKey0RegisterID(address), + flow.AccountBatchPublicKeyRegisterID(address, 0), + flow.AccountPublicKeySequenceNumberRegisterID(address, 1), + } + registerIDs := slices.Collect(maps.Keys(snapshot.WriteSet)) + require.Equal(t, len(expectedNewRegisterIDs), len(registerIDs)) + require.ElementsMatch(t, expectedNewRegisterIDs, registerIDs) + + // Test account status + accountStatusValue, exists := snapshot.WriteSet[flow.AccountStatusRegisterID(address)] + require.True(t, exists) + require.NotEmpty(t, accountStatusValue) + + // Test account public key 0 + key0Value, exists := snapshot.WriteSet[flow.AccountPublicKey0RegisterID(address)] + require.True(t, exists) + require.NotEmpty(t, key0Value) + + // Test account public key 1 + key1Value, exists := snapshot.WriteSet[flow.AccountBatchPublicKeyRegisterID(address, 0)] + require.True(t, exists) + require.NotEmpty(t, key1Value) + + // Test storage used + var expectedStorageUsed uint64 + // Include account status register in storage used + expectedStorageUsed += uint64(environment.RegisterSize( + flow.AccountStatusRegisterID(address), + accountStatusValue)) + // Include apk_0 register in storage used + expectedStorageUsed += uint64(environment.RegisterSize( + flow.AccountPublicKey0RegisterID(address), + key0Value)) + // Include pk_b0 register in storage used + expectedStorageUsed += uint64(environment.RegisterSize( + flow.AccountBatchPublicKeyRegisterID(address, 1), + key1Value)) + // Include predefined sequence number 1 register in storage used + expectedStorageUsed += environment.PredefinedSequenceNumberPayloadSize(address, 1) + require.Equal(t, expectedStorageUsed, storageUsed) + }) + + t.Run("account with 2 duplicate key", func(t *testing.T) { + key0 := newAccountPublicKey(t, 1000) + key1 := key0 + key1.Weight = 1 + keys := []flow.AccountPublicKey{key0, key1} + + txnState := testutils.NewSimpleTransaction(nil) + accounts := environment.NewAccounts(txnState) + + address := flow.HexToAddress("01") + + err := accounts.Create(nil, address) + require.NoError(t, err) + + for _, key := range keys { + err = accounts.AppendAccountPublicKey(address, key) + require.NoError(t, err) + } + + keyCount, err := accounts.GetAccountPublicKeyCount(address) + require.NoError(t, err) + require.Equal(t, uint32(len(keys)), keyCount) + + for i, key := range keys { + seqNum, err := accounts.GetAccountPublicKeySequenceNumber(address, uint32(i)) + require.NoError(t, err) + require.Equal(t, uint64(0), seqNum) + + err = accounts.IncrementAccountPublicKeySequenceNumber(address, uint32(i)) + require.NoError(t, err) + + seqNum, err = accounts.GetAccountPublicKeySequenceNumber(address, uint32(i)) + require.NoError(t, err) + require.Equal(t, uint64(1), seqNum) + + retrievedKey, err := accounts.GetAccountPublicKey(address, uint32(i)) + require.NoError(t, err) + key.Index = uint32(i) + key.SeqNumber = 1 + require.Equal(t, key, retrievedKey) + } + + storageUsed, err := accounts.GetStorageUsed(address) + require.NoError(t, err) + + snapshot, err := txnState.FinalizeMainTransaction() + require.NoError(t, err) + + // New registers + expectedNewRegisterIDs := []flow.RegisterID{ + flow.AccountStatusRegisterID(address), + flow.AccountPublicKey0RegisterID(address), + flow.AccountPublicKeySequenceNumberRegisterID(address, 1), + } + registerIDs := slices.Collect(maps.Keys(snapshot.WriteSet)) + require.Equal(t, len(expectedNewRegisterIDs), len(registerIDs)) + require.ElementsMatch(t, expectedNewRegisterIDs, registerIDs) + + // Test account status + accountStatusValue, exists := snapshot.WriteSet[flow.AccountStatusRegisterID(address)] + require.True(t, exists) + require.NotEmpty(t, accountStatusValue) + + // Test account public key 0 + key0Value, exists := snapshot.WriteSet[flow.AccountPublicKey0RegisterID(address)] + require.True(t, exists) + require.NotEmpty(t, key0Value) + + // Test storage used + var expectedStorageUsed uint64 + // Include account status register in storage used + expectedStorageUsed += uint64(environment.RegisterSize( + flow.AccountStatusRegisterID(address), + accountStatusValue)) + // Include apk_0 register in storage used + expectedStorageUsed += uint64(environment.RegisterSize( + flow.AccountPublicKey0RegisterID(address), + key0Value)) + // Include predefined sequence number 1 register in storage used + expectedStorageUsed += environment.PredefinedSequenceNumberPayloadSize(address, 1) + require.Equal(t, expectedStorageUsed, storageUsed) + }) + + t.Run("account with 3 duplicate key", func(t *testing.T) { + key0 := newAccountPublicKey(t, 1000) + key1 := key0 + key1.Weight = 1 + key2 := key1 + key2.Weight = 1000 + keys := []flow.AccountPublicKey{ + key0, key1, key2, + } + + txnState := testutils.NewSimpleTransaction(nil) + accounts := environment.NewAccounts(txnState) + + address := flow.HexToAddress("01") + + err := accounts.Create(nil, address) + require.NoError(t, err) + + for _, key := range keys { + err = accounts.AppendAccountPublicKey(address, key) + require.NoError(t, err) + } + + keyCount, err := accounts.GetAccountPublicKeyCount(address) + require.NoError(t, err) + require.Equal(t, uint32(3), keyCount) + + for i, key := range keys { + seqNum, err := accounts.GetAccountPublicKeySequenceNumber(address, uint32(i)) + require.NoError(t, err) + require.Equal(t, uint64(0), seqNum) + + err = accounts.IncrementAccountPublicKeySequenceNumber(address, uint32(i)) + require.NoError(t, err) + + seqNum, err = accounts.GetAccountPublicKeySequenceNumber(address, uint32(i)) + require.NoError(t, err) + require.Equal(t, uint64(1), seqNum) + + retrievedKey, err := accounts.GetAccountPublicKey(address, uint32(i)) + require.NoError(t, err) + key.Index = uint32(i) + key.SeqNumber = 1 + require.Equal(t, key, retrievedKey) + } + + storageUsed, err := accounts.GetStorageUsed(address) + require.NoError(t, err) + + snapshot, err := txnState.FinalizeMainTransaction() + require.NoError(t, err) + + // New registers + expectedNewRegisterIDs := []flow.RegisterID{ + flow.AccountStatusRegisterID(address), + flow.AccountPublicKey0RegisterID(address), + flow.AccountPublicKeySequenceNumberRegisterID(address, 1), + flow.AccountPublicKeySequenceNumberRegisterID(address, 2), + } + registerIDs := slices.Collect(maps.Keys(snapshot.WriteSet)) + require.Equal(t, len(expectedNewRegisterIDs), len(registerIDs)) + require.ElementsMatch(t, expectedNewRegisterIDs, registerIDs) + + // Test account status + accountStatusValue, exists := snapshot.WriteSet[flow.AccountStatusRegisterID(address)] + require.True(t, exists) + require.NotEmpty(t, accountStatusValue) + + // Test account public key 0 + key0Value, exists := snapshot.WriteSet[flow.AccountPublicKey0RegisterID(address)] + require.True(t, exists) + require.NotEmpty(t, key0Value) + + // Test storage used + var expectedStorageUsed uint64 + // Include account status register in storage used + expectedStorageUsed += uint64(environment.RegisterSize( + flow.AccountStatusRegisterID(address), + accountStatusValue)) + // Include apk_0 register in storage used + expectedStorageUsed += uint64(environment.RegisterSize( + flow.AccountPublicKey0RegisterID(address), + key0Value)) + // Include predefined sequence number 1 register in storage used + expectedStorageUsed += environment.PredefinedSequenceNumberPayloadSize(address, 1) + // Include predefined sequence number 2 register in storage used + expectedStorageUsed += environment.PredefinedSequenceNumberPayloadSize(address, 2) + require.Equal(t, expectedStorageUsed, storageUsed) + }) + + testcases := []struct { + name string + storedKeyCount uint32 + duplicateKeyCount uint32 + }{ + { + name: "account with > 20 stored keys (all keys are unique)", + storedKeyCount: 21, + duplicateKeyCount: 0, + }, + { + name: "account with > 20 stored key (some keys are duplicate)", + storedKeyCount: 21, + duplicateKeyCount: 10, + }, + } + + for _, tc := range testcases { + t.Run(tc.name, func(t *testing.T) { + uniqueKeys := make([]flow.AccountPublicKey, tc.storedKeyCount) + for i := range len(uniqueKeys) { + weight := rand.Intn(1001) + uniqueKeys[i] = newAccountPublicKey(t, weight) + } + + keys := uniqueKeys + for range tc.duplicateKeyCount { + insertPos := rand.Intn(len(keys)) + if insertPos == 0 { + keys = slices.Insert(keys, insertPos, keys[0]) + } else { + keys = slices.Insert(keys, insertPos, keys[insertPos-1]) + } + } + + txnState := testutils.NewSimpleTransaction(nil) + accounts := environment.NewAccounts(txnState) + + address := flow.HexToAddress("01") + + err := accounts.Create(nil, address) + require.NoError(t, err) + + for _, key := range keys { + err = accounts.AppendAccountPublicKey(address, key) + require.NoError(t, err) + } + + keyCount, err := accounts.GetAccountPublicKeyCount(address) + require.NoError(t, err) + require.Equal(t, tc.storedKeyCount+tc.duplicateKeyCount, keyCount) + + for i, key := range keys { + seqNum, err := accounts.GetAccountPublicKeySequenceNumber(address, uint32(i)) + require.NoError(t, err) + require.Equal(t, uint64(0), seqNum) + + err = accounts.IncrementAccountPublicKeySequenceNumber(address, uint32(i)) + require.NoError(t, err) + + seqNum, err = accounts.GetAccountPublicKeySequenceNumber(address, uint32(i)) + require.NoError(t, err) + require.Equal(t, uint64(1), seqNum) + + retrievedKey, err := accounts.GetAccountPublicKey(address, uint32(i)) + require.NoError(t, err) + key.Index = uint32(i) + key.SeqNumber = 1 + require.Equal(t, key, retrievedKey) + } + + storageUsed, err := accounts.GetStorageUsed(address) + require.NoError(t, err) + + snapshot, err := txnState.FinalizeMainTransaction() + require.NoError(t, err) + + accountStatusValue, exists := snapshot.WriteSet[flow.AccountStatusRegisterID(address)] + require.True(t, exists) + require.NotEmpty(t, accountStatusValue) + + // Test storage used + var expectedStorageUsed uint64 + + expectedStorageUsed += uint64(environment.RegisterSize( + flow.AccountStatusRegisterID(address), + accountStatusValue)) + + if tc.storedKeyCount > 0 { + // Include account public key 0 in storage used + key0Value, exists := snapshot.WriteSet[flow.AccountPublicKey0RegisterID(address)] + require.True(t, exists) + require.NotEmpty(t, key0Value) + + expectedStorageUsed += uint64(environment.RegisterSize( + flow.AccountPublicKey0RegisterID(address), + key0Value)) + + // Include pk_b register in storage used + batchNum := uint32(0) + for { + batchValue, exists := snapshot.WriteSet[flow.AccountBatchPublicKeyRegisterID(address, batchNum)] + if !exists || len(batchValue) == 0 { + break + } + require.True(t, len(batchValue) > 1) + + expectedStorageUsed += uint64(environment.RegisterSize( + flow.AccountPublicKey0RegisterID(address), + batchValue)) + + batchNum++ + } + } + + for i := 1; i < int(tc.storedKeyCount+tc.duplicateKeyCount); i++ { + // Include predefined sequence number in storage used + expectedStorageUsed += environment.PredefinedSequenceNumberPayloadSize(address, uint32(i)) + } + + require.Equal(t, expectedStorageUsed, storageUsed) + }) + } +} + +func TestRegisterSize(t *testing.T) { + address := flow.Address{0x01} + + registerID := flow.AccountStatusRegisterID(address) + registerValue := environment.NewAccountStatus().ToBytes() + registerSize := environment.RegisterSize(registerID, registerValue) + + payload := ledger.NewPayload( + convert.RegisterIDToLedgerKey(registerID), + registerValue) + payloadSize := payload.Size() + + require.Equal(t, payloadSize, registerSize) } diff --git a/fvm/environment/block_info.go b/fvm/environment/block_info.go index 9e55a67c649..fec73407af4 100644 --- a/fvm/environment/block_info.go +++ b/fvm/environment/block_info.go @@ -3,6 +3,7 @@ package environment import ( "fmt" + "github.com/onflow/cadence/common" "github.com/onflow/cadence/runtime" "github.com/onflow/flow-go/fvm/errors" @@ -103,8 +104,11 @@ func (info *blockInfo) GetCurrentBlockHeight() (uint64, error) { trace.FVMEnvGetCurrentBlockHeight).End() err := info.meter.MeterComputation( - ComputationKindGetCurrentBlockHeight, - 1) + common.ComputationUsage{ + Kind: ComputationKindGetCurrentBlockHeight, + Intensity: 1, + }, + ) if err != nil { return 0, fmt.Errorf("get current block height failed: %w", err) } @@ -126,22 +130,29 @@ func (info *blockInfo) GetBlockAtHeight( defer info.tracer.StartChildSpan(trace.FVMEnvGetBlockAtHeight).End() err := info.meter.MeterComputation( - ComputationKindGetBlockAtHeight, - 1) + common.ComputationUsage{ + Kind: ComputationKindGetBlockAtHeight, + Intensity: 1, + }, + ) if err != nil { return runtime.Block{}, false, fmt.Errorf( "get block at height failed: %w", err) } + if info.blockHeader != nil && height == info.blockHeader.Height { + return runtimeBlockFromHeader(info.blockHeader), true, nil + } + + if height+uint64(flow.DefaultTransactionExpiry) < info.blockHeader.Height { + return runtime.Block{}, false, nil + } + if info.blocks == nil { return runtime.Block{}, false, errors.NewOperationNotSupportedError( "GetBlockAtHeight") } - if info.blockHeader != nil && height == info.blockHeader.Height { - return runtimeBlockFromHeader(info.blockHeader), true, nil - } - header, err := info.blocks.ByHeightFrom(height, info.blockHeader) // TODO (ramtin): remove dependency on storage and move this if condition // to blockfinder diff --git a/fvm/environment/block_info_test.go b/fvm/environment/block_info_test.go new file mode 100644 index 00000000000..bc38cd2055a --- /dev/null +++ b/fvm/environment/block_info_test.go @@ -0,0 +1,69 @@ +package environment_test + +import ( + "fmt" + "testing" + + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/cmd/util/ledger/util" + "github.com/onflow/flow-go/fvm/environment" + "github.com/onflow/flow-go/fvm/tracing" + "github.com/onflow/flow-go/model/flow" + storageErr "github.com/onflow/flow-go/storage" + "github.com/onflow/flow-go/utils/unittest" +) + +func TestBlockInfo(t *testing.T) { + tracer := tracing.NewMockTracerSpan() + meter := &util.NopMeter{} + blocks := &mockBlocks{ + blocks: make(map[uint64]*flow.Header), + } + height := uint64(flow.DefaultTransactionExpiry) + header := unittest.BlockHeaderWithHeight(height) + + bi := environment.NewBlockInfo(tracer, meter, header, blocks) + + // verify the current block exists + blocks.Add(header) + b, exists, err := bi.GetBlockAtHeight(height) + require.NoError(t, err) + require.True(t, exists) + require.Equal(t, header.Height, b.Height) + + // verify blocks that do not exist + b, exists, err = bi.GetBlockAtHeight(height + 1) + require.NoError(t, err) + require.False(t, exists) + + // verify that the block at the height before the lowest accepted height exists + lowestAcceptedHeight := height - flow.DefaultTransactionExpiry + lowestHeader := unittest.BlockHeaderWithHeight(lowestAcceptedHeight) + blocks.Add(lowestHeader) + b, exists, err = bi.GetBlockAtHeight(lowestAcceptedHeight) + require.NoError(t, err) + require.True(t, exists) + require.Equal(t, lowestHeader.Height, b.Height) + + // verify that the block at the height before the lowest accepted height does not exist + _, exists, err = bi.GetBlockAtHeight(lowestAcceptedHeight - 1) + require.NoError(t, err) + require.False(t, exists) +} + +type mockBlocks struct { + blocks map[uint64]*flow.Header +} + +func (m *mockBlocks) ByHeightFrom(height uint64, header *flow.Header) (*flow.Header, error) { + h, ok := m.blocks[height] + if !ok { + return nil, fmt.Errorf("block does not exist: %w", storageErr.ErrNotFound) + } + return h, nil +} + +func (m *mockBlocks) Add(h *flow.Header) { + m.blocks[h.Height] = h +} diff --git a/fvm/environment/blocks.go b/fvm/environment/blocks.go index 6430955a0fe..d89a672e264 100644 --- a/fvm/environment/blocks.go +++ b/fvm/environment/blocks.go @@ -3,7 +3,7 @@ package environment import ( "fmt" - "github.com/onflow/cadence/runtime/stdlib" + "github.com/onflow/cadence/stdlib" "github.com/onflow/cadence/runtime" @@ -95,6 +95,6 @@ func runtimeBlockFromHeader(header *flow.Header) runtime.Block { Height: header.Height, View: header.View, Hash: stdlib.BlockHash(header.ID()), - Timestamp: header.Timestamp.UnixNano(), + Timestamp: int64(header.Timestamp * 1e6), // CAUTION: `runtime.Block` has time stamp in NANO-seconds, while `flow.Header` uses milliseconds } } diff --git a/fvm/environment/contract_reader.go b/fvm/environment/contract_reader.go index 2f21c4a9f92..d387cdae46c 100644 --- a/fvm/environment/contract_reader.go +++ b/fvm/environment/contract_reader.go @@ -3,9 +3,10 @@ package environment import ( "fmt" + "github.com/onflow/cadence/ast" + "github.com/onflow/cadence/common" "github.com/onflow/cadence/runtime" - "github.com/onflow/cadence/runtime/ast" - "github.com/onflow/cadence/runtime/common" + "github.com/onflow/cadence/stdlib" "github.com/onflow/flow-go/fvm/errors" "github.com/onflow/flow-go/fvm/tracing" @@ -15,21 +16,23 @@ import ( // ContractReader provide read access to contracts. type ContractReader struct { - tracer tracing.TracerSpan - meter Meter - - accounts Accounts + tracer tracing.TracerSpan + meter Meter + accounts Accounts + cryptoContractAddress common.Address } func NewContractReader( tracer tracing.TracerSpan, meter Meter, accounts Accounts, + cryptoContractAddress common.Address, ) *ContractReader { return &ContractReader{ - tracer: tracer, - meter: meter, - accounts: accounts, + tracer: tracer, + meter: meter, + accounts: accounts, + cryptoContractAddress: cryptoContractAddress, } } @@ -43,8 +46,11 @@ func (reader *ContractReader) GetAccountContractNames( trace.FVMEnvGetAccountContractNames).End() err := reader.meter.MeterComputation( - ComputationKindGetAccountContractNames, - 1) + common.ComputationUsage{ + Kind: ComputationKindGetAccountContractNames, + Intensity: 1, + }, + ) if err != nil { return nil, fmt.Errorf("get account contract names failed: %w", err) } @@ -64,17 +70,47 @@ func (reader *ContractReader) ResolveLocation( defer reader.tracer.StartExtensiveTracingChildSpan( trace.FVMEnvResolveLocation).End() - err := reader.meter.MeterComputation(ComputationKindResolveLocation, 1) + err := reader.meter.MeterComputation( + common.ComputationUsage{ + Kind: ComputationKindResolveLocation, + Intensity: 1, + }, + ) if err != nil { return nil, fmt.Errorf("resolve location failed: %w", err) } + return ResolveLocation( + identifiers, + location, + reader.accounts.GetContractNames, + reader.cryptoContractAddress, + ) +} + +func ResolveLocation( + identifiers []ast.Identifier, + location common.Location, + getContractNames func(flow.Address) ([]string, error), + cryptoContractAddress common.Address, +) ([]runtime.ResolvedLocation, error) { + addressLocation, isAddress := location.(common.AddressLocation) // if the location is not an address location, e.g. an identifier location - // (`import Crypto`), then return a single resolved location which declares - // all identifiers. + // then return a single resolved location which declares all identifiers. if !isAddress { + + // if the location is the Crypto contract, + // translate it to the address of the Crypto contract on the chain + + if location == stdlib.CryptoContractLocation { + location = common.AddressLocation{ + Address: cryptoContractAddress, + Name: string(stdlib.CryptoContractLocation), + } + } + return []runtime.ResolvedLocation{ { Location: location, @@ -87,9 +123,13 @@ func (reader *ContractReader) ResolveLocation( // and no specific identifiers where requested in the import statement, // then fetch all identifiers at this address if len(identifiers) == 0 { + if getContractNames == nil { + return nil, fmt.Errorf("no identifiers provided") + } + address := flow.ConvertAddress(addressLocation.Address) - contractNames, err := reader.accounts.GetContractNames(address) + contractNames, err := getContractNames(address) if err != nil { return nil, fmt.Errorf("resolving location failed: %w", err) } @@ -134,7 +174,12 @@ func (reader *ContractReader) getCode( ) { defer reader.tracer.StartChildSpan(trace.FVMEnvGetCode).End() - err := reader.meter.MeterComputation(ComputationKindGetCode, 1) + err := reader.meter.MeterComputation( + common.ComputationUsage{ + Kind: ComputationKindGetCode, + Intensity: 1, + }, + ) if err != nil { return nil, fmt.Errorf("get code failed: %w", err) } @@ -173,8 +218,11 @@ func (reader *ContractReader) GetAccountContractCode( trace.FVMEnvGetAccountContractCode).End() err := reader.meter.MeterComputation( - ComputationKindGetAccountContractCode, - 1) + common.ComputationUsage{ + Kind: ComputationKindGetAccountContractCode, + Intensity: 1, + }, + ) if err != nil { return nil, fmt.Errorf("get account contract code failed: %w", err) } diff --git a/fvm/environment/contract_updater.go b/fvm/environment/contract_updater.go index 2185b4d09da..950e8c40b49 100644 --- a/fvm/environment/contract_updater.go +++ b/fvm/environment/contract_updater.go @@ -6,7 +6,7 @@ import ( "sort" "github.com/onflow/cadence" - "github.com/onflow/cadence/runtime/common" + "github.com/onflow/cadence/common" "github.com/onflow/flow-go/fvm/blueprints" "github.com/onflow/flow-go/fvm/errors" @@ -194,7 +194,7 @@ func (impl *contractUpdaterStubsImpl) getIsContractDeploymentRestricted() ( common.MustBytesToAddress(service.Bytes()), blueprints.IsContractDeploymentRestrictedPath) if err != nil { - impl.logger.Logger(). + impl.logger. Debug(). Msg("Failed to read IsContractDeploymentRestricted from the " + "service account. Using value from context instead.") @@ -202,13 +202,14 @@ func (impl *contractUpdaterStubsImpl) getIsContractDeploymentRestricted() ( } restrictedCadence, ok := value.(cadence.Bool) if !ok { - impl.logger.Logger(). + impl.logger. Debug(). Msg("Failed to parse IsContractDeploymentRestricted from the " + "service account. Using value from context instead.") return false, false } - restricted = restrictedCadence.ToGoValue().(bool) + restricted = bool(restrictedCadence) + return restricted, true } @@ -244,12 +245,12 @@ func (impl *contractUpdaterStubsImpl) GetAuthorizedAccounts( "service account. using default behaviour instead." if err != nil { - impl.logger.Logger().Warn().Msg(warningMsg) + impl.logger.Warn().Msg(warningMsg) return defaultAccounts } addresses, ok := cadenceValueToAddressSlice(value) if !ok { - impl.logger.Logger().Warn().Msg(warningMsg) + impl.logger.Warn().Msg(warningMsg) return defaultAccounts } return addresses @@ -323,8 +324,11 @@ func (updater *ContractUpdaterImpl) UpdateAccountContractCode( trace.FVMEnvUpdateAccountContractCode).End() err := updater.meter.MeterComputation( - ComputationKindUpdateAccountContractCode, - 1) + common.ComputationUsage{ + Kind: ComputationKindUpdateAccountContractCode, + Intensity: 1, + }, + ) if err != nil { return fmt.Errorf("update account contract code failed: %w", err) } @@ -347,8 +351,11 @@ func (updater *ContractUpdaterImpl) RemoveAccountContractCode( trace.FVMEnvRemoveAccountContractCode).End() err := updater.meter.MeterComputation( - ComputationKindRemoveAccountContractCode, - 1) + common.ComputationUsage{ + Kind: ComputationKindRemoveAccountContractCode, + Intensity: 1, + }, + ) if err != nil { return fmt.Errorf("remove account contract code failed: %w", err) } diff --git a/fvm/environment/contract_updater_test.go b/fvm/environment/contract_updater_test.go index 42ec371c04a..861b0bdb860 100644 --- a/fvm/environment/contract_updater_test.go +++ b/fvm/environment/contract_updater_test.go @@ -5,7 +5,7 @@ import ( "testing" "github.com/onflow/cadence" - "github.com/onflow/cadence/runtime/common" + "github.com/onflow/cadence/common" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" @@ -360,7 +360,7 @@ func TestContract_ContractRemoval(t *testing.T) { // deploy contract with voucher err = contractUpdater.SetContract( location, - []byte("pub contract TestContract {}"), + []byte("access(all) contract TestContract {}"), []flow.Address{ flowAddress, }, @@ -385,7 +385,7 @@ func TestContract_ContractRemoval(t *testing.T) { // update should work err = contractUpdater.SetContract( location, - []byte("pub contract TestContract {}"), + []byte("access(all) contract TestContract {}"), []flow.Address{ flowAddress, }, @@ -416,7 +416,7 @@ func TestContract_ContractRemoval(t *testing.T) { // deploy contract with voucher err = contractUpdater.SetContract( location, - []byte("pub contract TestContract {}"), + []byte("access(all) contract TestContract {}"), []flow.Address{ flowAddress, }, @@ -441,7 +441,7 @@ func TestContract_ContractRemoval(t *testing.T) { // update should work err = contractUpdater.SetContract( location, - []byte("pub contract TestContract {}"), + []byte("access(all) contract TestContract {}"), []flow.Address{ flowAddress, }, diff --git a/fvm/environment/crypto_library.go b/fvm/environment/crypto_library.go index cbb2d24e1f5..c3fe05121fc 100644 --- a/fvm/environment/crypto_library.go +++ b/fvm/environment/crypto_library.go @@ -3,6 +3,7 @@ package environment import ( "fmt" + "github.com/onflow/cadence/common" "github.com/onflow/cadence/runtime" "github.com/onflow/flow-go/fvm/crypto" @@ -181,7 +182,12 @@ func (lib *cryptoLibrary) Hash( ) { defer lib.tracer.StartChildSpan(trace.FVMEnvHash).End() - err := lib.meter.MeterComputation(ComputationKindHash, 1) + err := lib.meter.MeterComputation( + common.ComputationUsage{ + Kind: ComputationKindHash, + Intensity: 1, + }, + ) if err != nil { return nil, fmt.Errorf("hash failed: %w", err) } @@ -203,7 +209,12 @@ func (lib *cryptoLibrary) VerifySignature( ) { defer lib.tracer.StartChildSpan(trace.FVMEnvVerifySignature).End() - err := lib.meter.MeterComputation(ComputationKindVerifySignature, 1) + err := lib.meter.MeterComputation( + common.ComputationUsage{ + Kind: ComputationKindVerifySignature, + Intensity: 1, + }, + ) if err != nil { return false, fmt.Errorf("verify signature failed: %w", err) } @@ -227,7 +238,12 @@ func (lib *cryptoLibrary) VerifySignature( func (lib *cryptoLibrary) ValidatePublicKey(pk *runtime.PublicKey) error { defer lib.tracer.StartChildSpan(trace.FVMEnvValidatePublicKey).End() - err := lib.meter.MeterComputation(ComputationKindValidatePublicKey, 1) + err := lib.meter.MeterComputation( + common.ComputationUsage{ + Kind: ComputationKindValidatePublicKey, + Intensity: 1, + }, + ) if err != nil { return fmt.Errorf("validate public key failed: %w", err) } @@ -244,7 +260,12 @@ func (lib *cryptoLibrary) BLSVerifyPOP( ) { defer lib.tracer.StartChildSpan(trace.FVMEnvBLSVerifyPOP).End() - err := lib.meter.MeterComputation(ComputationKindBLSVerifyPOP, 1) + err := lib.meter.MeterComputation( + common.ComputationUsage{ + Kind: ComputationKindBLSVerifyPOP, + Intensity: 1, + }, + ) if err != nil { return false, fmt.Errorf("BLSVerifyPOP failed: %w", err) } @@ -260,7 +281,12 @@ func (lib *cryptoLibrary) BLSAggregateSignatures( ) { defer lib.tracer.StartChildSpan(trace.FVMEnvBLSAggregateSignatures).End() - err := lib.meter.MeterComputation(ComputationKindBLSAggregateSignatures, 1) + err := lib.meter.MeterComputation( + common.ComputationUsage{ + Kind: ComputationKindBLSAggregateSignatures, + Intensity: 1, + }, + ) if err != nil { return nil, fmt.Errorf("BLSAggregateSignatures failed: %w", err) } @@ -276,7 +302,12 @@ func (lib *cryptoLibrary) BLSAggregatePublicKeys( ) { defer lib.tracer.StartChildSpan(trace.FVMEnvBLSAggregatePublicKeys).End() - err := lib.meter.MeterComputation(ComputationKindBLSAggregatePublicKeys, 1) + err := lib.meter.MeterComputation( + common.ComputationUsage{ + Kind: ComputationKindBLSAggregatePublicKeys, + Intensity: 1, + }, + ) if err != nil { return nil, fmt.Errorf("BLSAggregatePublicKeys failed: %w", err) } diff --git a/fvm/environment/derived_data_invalidator.go b/fvm/environment/derived_data_invalidator.go index 5aa4bf05808..754a1e37513 100644 --- a/fvm/environment/derived_data_invalidator.go +++ b/fvm/environment/derived_data_invalidator.go @@ -1,11 +1,10 @@ package environment import ( - "github.com/onflow/cadence/runtime/common" + "github.com/onflow/cadence/common" "github.com/onflow/flow-go/fvm/storage/derived" "github.com/onflow/flow-go/fvm/storage/snapshot" - "github.com/onflow/flow-go/model/flow" ) type ContractUpdate struct { @@ -26,51 +25,44 @@ func (u ContractUpdates) Any() bool { type DerivedDataInvalidator struct { ContractUpdates - MeterParamOverridesUpdated bool + ExecutionParametersUpdated bool } var _ derived.TransactionInvalidator = DerivedDataInvalidator{} func NewDerivedDataInvalidator( contractUpdates ContractUpdates, - serviceAddress flow.Address, executionSnapshot *snapshot.ExecutionSnapshot, + meterStateRead *snapshot.ExecutionSnapshot, ) DerivedDataInvalidator { return DerivedDataInvalidator{ ContractUpdates: contractUpdates, - MeterParamOverridesUpdated: meterParamOverridesUpdated( - serviceAddress, - executionSnapshot), + ExecutionParametersUpdated: executionParametersUpdated( + executionSnapshot, + meterStateRead), } } -func meterParamOverridesUpdated( - serviceAddress flow.Address, +// executionParametersUpdated returns true if the meter param overrides have been updated +// this is done by checking if the registers needed to compute the meter param overrides +// have been touched in the execution snapshot +func executionParametersUpdated( executionSnapshot *snapshot.ExecutionSnapshot, + meterStateRead *snapshot.ExecutionSnapshot, ) bool { - serviceAccount := string(serviceAddress.Bytes()) - storageDomain := common.PathDomainStorage.Identifier() - - for registerId := range executionSnapshot.WriteSet { - // The meter param override values are stored in the service account. - if registerId.Owner != serviceAccount { - continue + if len(executionSnapshot.WriteSet) > len(meterStateRead.ReadSet) { + for registerId := range meterStateRead.ReadSet { + _, ok := executionSnapshot.WriteSet[registerId] + if ok { + return true + } } - - // NOTE: This condition is empirically generated by running the - // MeterParamOverridesComputer to capture touched registers. - // - // The paramater settings are stored as regular fields in the service - // account. In general, each account's regular fields are stored in - // ordered map known only to cadence. Cadence encodes this map into - // bytes and split the bytes into slab chunks before storing the slabs - // into the ledger. Hence any changes to the stabs indicate changes - // the ordered map. - // - // The meter param overrides use storageDomain as input, so any - // changes to it must also invalidate the values. - if registerId.Key == storageDomain || registerId.IsSlabIndex() { - return true + } else { + for registerId := range executionSnapshot.WriteSet { + _, ok := meterStateRead.ReadSet[registerId] + if ok { + return true + } } } @@ -81,8 +73,8 @@ func (invalidator DerivedDataInvalidator) ProgramInvalidator() derived.ProgramIn return ProgramInvalidator{invalidator} } -func (invalidator DerivedDataInvalidator) MeterParamOverridesInvalidator() derived.MeterParamOverridesInvalidator { - return MeterParamOverridesInvalidator{invalidator} +func (invalidator DerivedDataInvalidator) ExecutionParametersInvalidator() derived.ExecutionParametersInvalidator { + return ExecutionParametersInvalidator{invalidator} } type ProgramInvalidator struct { @@ -90,16 +82,16 @@ type ProgramInvalidator struct { } func (invalidator ProgramInvalidator) ShouldInvalidateEntries() bool { - return invalidator.MeterParamOverridesUpdated || + return invalidator.ExecutionParametersUpdated || invalidator.ContractUpdates.Any() } func (invalidator ProgramInvalidator) ShouldInvalidateEntry( - location common.AddressLocation, + _ common.AddressLocation, program *derived.Program, - snapshot *snapshot.ExecutionSnapshot, + _ *snapshot.ExecutionSnapshot, ) bool { - if invalidator.MeterParamOverridesUpdated { + if invalidator.ExecutionParametersUpdated { // if meter parameters changed we need to invalidate all programs return true } @@ -132,18 +124,18 @@ func (invalidator ProgramInvalidator) ShouldInvalidateEntry( return false } -type MeterParamOverridesInvalidator struct { +type ExecutionParametersInvalidator struct { DerivedDataInvalidator } -func (invalidator MeterParamOverridesInvalidator) ShouldInvalidateEntries() bool { - return invalidator.MeterParamOverridesUpdated +func (invalidator ExecutionParametersInvalidator) ShouldInvalidateEntries() bool { + return invalidator.ExecutionParametersUpdated } -func (invalidator MeterParamOverridesInvalidator) ShouldInvalidateEntry( +func (invalidator ExecutionParametersInvalidator) ShouldInvalidateEntry( _ struct{}, - _ derived.MeterParamOverrides, + _ derived.StateExecutionParameters, _ *snapshot.ExecutionSnapshot, ) bool { - return invalidator.MeterParamOverridesUpdated + return invalidator.ExecutionParametersUpdated } diff --git a/fvm/environment/derived_data_invalidator_test.go b/fvm/environment/derived_data_invalidator_test.go index aa86aaeb258..9598d93f4b1 100644 --- a/fvm/environment/derived_data_invalidator_test.go +++ b/fvm/environment/derived_data_invalidator_test.go @@ -3,7 +3,7 @@ package environment_test import ( "testing" - "github.com/onflow/cadence/runtime/common" + "github.com/onflow/cadence/common" "github.com/stretchr/testify/require" "github.com/onflow/flow-go/fvm" @@ -82,7 +82,7 @@ func TestDerivedDataProgramInvalidator(t *testing.T) { }) t.Run("meter parameters invalidator invalidates all entries", func(t *testing.T) { invalidator := environment.DerivedDataInvalidator{ - MeterParamOverridesUpdated: true, + ExecutionParametersUpdated: true, }.ProgramInvalidator() require.True(t, invalidator.ShouldInvalidateEntries()) @@ -207,23 +207,23 @@ func TestDerivedDataProgramInvalidator(t *testing.T) { func TestMeterParamOverridesInvalidator(t *testing.T) { invalidator := environment.DerivedDataInvalidator{}. - MeterParamOverridesInvalidator() + ExecutionParametersInvalidator() require.False(t, invalidator.ShouldInvalidateEntries()) require.False(t, invalidator.ShouldInvalidateEntry( struct{}{}, - derived.MeterParamOverrides{}, + derived.StateExecutionParameters{}, nil)) invalidator = environment.DerivedDataInvalidator{ ContractUpdates: environment.ContractUpdates{}, - MeterParamOverridesUpdated: true, - }.MeterParamOverridesInvalidator() + ExecutionParametersUpdated: true, + }.ExecutionParametersInvalidator() require.True(t, invalidator.ShouldInvalidateEntries()) require.True(t, invalidator.ShouldInvalidateEntry( struct{}{}, - derived.MeterParamOverrides{}, + derived.StateExecutionParameters{}, nil)) } @@ -244,7 +244,7 @@ func TestMeterParamOverridesUpdated(t *testing.T) { snapshotTree := snapshot.NewSnapshotTree(nil) - ctx := fvm.NewContext(fvm.WithChain(flow.Testnet.Chain())) + ctx := fvm.NewContext(fvm.WithChain(flow.Emulator.Chain())) vm := fvm.NewVirtualMachine() executionSnapshot, _, err := vm.Run( @@ -265,7 +265,11 @@ func TestMeterParamOverridesUpdated(t *testing.T) { txnState, err := blockDatabase.NewTransaction(0, state.DefaultParameters()) require.NoError(t, err) - computer := fvm.NewMeterParamOverridesComputer(ctx, txnState) + computer := fvm.NewExecutionParametersComputer( + ctx.Logger, + ctx, + txnState, + ) overrides, err := computer.Compute(txnState, struct{}{}) require.NoError(t, err) @@ -283,6 +287,12 @@ func TestMeterParamOverridesUpdated(t *testing.T) { ctx.TxBody = &flow.TransactionBody{} + meterStateRead := &snapshot.ExecutionSnapshot{ + ReadSet: map[flow.RegisterID]struct{}{ + flow.NewRegisterID(ctx.Chain.ServiceAddress(), "meter"): {}, + }, + } + checkForUpdates := func(id flow.RegisterID, expected bool) { snapshot := &snapshot.ExecutionSnapshot{ WriteSet: map[flow.RegisterID]flow.RegisterValue{ @@ -292,27 +302,26 @@ func TestMeterParamOverridesUpdated(t *testing.T) { invalidator := environment.NewDerivedDataInvalidator( environment.ContractUpdates{}, - ctx.Chain.ServiceAddress(), - snapshot) - require.Equal(t, expected, invalidator.MeterParamOverridesUpdated) + snapshot, + meterStateRead) + require.Equal(t, expected, invalidator.ExecutionParametersUpdated) } executionSnapshot, err = txnState.FinalizeMainTransaction() require.NoError(t, err) + owner := ctx.Chain.ServiceAddress() + otherOwner := unittest.RandomAddressFixtureForChain(ctx.Chain.ChainID()) + for _, registerId := range executionSnapshot.AllRegisterIDs() { - checkForUpdates(registerId, true) + checkForUpdates(registerId, false) checkForUpdates( - flow.NewRegisterID("other owner", registerId.Key), + flow.NewRegisterID(otherOwner, registerId.Key), false) } - owner := string(ctx.Chain.ServiceAddress().Bytes()) - stabIndexKey := flow.NewRegisterID(owner, "$12345678") - require.True(t, stabIndexKey.IsSlabIndex()) - - checkForUpdates(stabIndexKey, true) - checkForUpdates(flow.NewRegisterID(owner, "other keys"), false) - checkForUpdates(flow.NewRegisterID("other owner", stabIndexKey.Key), false) - checkForUpdates(flow.NewRegisterID("other owner", "other key"), false) + checkForUpdates(flow.NewRegisterID(owner, "meter2"), false) + checkForUpdates(flow.NewRegisterID(owner, "meter"), true) + checkForUpdates(flow.NewRegisterID(otherOwner, "meter2"), false) + checkForUpdates(flow.NewRegisterID(otherOwner, "meter"), false) } diff --git a/fvm/environment/env.go b/fvm/environment/env.go index 886a82be701..eb1195316fd 100644 --- a/fvm/environment/env.go +++ b/fvm/environment/env.go @@ -3,13 +3,9 @@ package environment import ( "github.com/onflow/cadence" "github.com/onflow/cadence/runtime" - "github.com/rs/zerolog" - otelTrace "go.opentelemetry.io/otel/trace" reusableRuntime "github.com/onflow/flow-go/fvm/runtime" - "github.com/onflow/flow-go/fvm/tracing" "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/module/trace" ) // Environment implements the accounts business logic and exposes cadence @@ -17,14 +13,12 @@ import ( type Environment interface { runtime.Interface - // Tracer - StartChildSpan( - name trace.SpanName, - options ...otelTrace.SpanStartOption, - ) tracing.TracerSpan + Tracer Meter + MetricsReporter + // Runtime BorrowCadenceRuntime() *reusableRuntime.ReusableCadenceRuntime ReturnCadenceRuntime(*reusableRuntime.ReusableCadenceRuntime) @@ -32,7 +26,7 @@ type Environment interface { TransactionInfo // ProgramLogger - Logger() *zerolog.Logger + LoggerProvider Logs() []string // EventEmitter @@ -41,6 +35,8 @@ type Environment interface { ConvertedServiceEvents() flow.ServiceEventList // SystemContracts + ContractFunctionInvoker + AccountsStorageCapacity( addresses []flow.Address, payer flow.Address, @@ -68,6 +64,12 @@ type Environment interface { // AccountInfo GetAccount(address flow.Address) (*flow.Account, error) + GetAccountKeys(address flow.Address) ([]flow.AccountPublicKey, error) + + // RandomSourceHistory is the current block's derived random source. + // This source is only used by the core-contract that tracks the random source + // history for commit-reveal schemes. + RandomSourceHistory() ([]byte, error) // FlushPendingUpdates flushes pending updates from the stateful environment // modules (i.e., ContractUpdater) to the state transaction, and return @@ -97,20 +99,29 @@ type EnvironmentParams struct { BlockInfoParams TransactionInfoParams + ScriptInfoParams + + EntropyProvider + ExecutionVersionProvider ContractUpdaterParams } func DefaultEnvironmentParams() EnvironmentParams { + const chainID = flow.Mainnet return EnvironmentParams{ - Chain: flow.Mainnet.Chain(), - ServiceAccountEnabled: true, - - RuntimeParams: DefaultRuntimeParams(), - ProgramLoggerParams: DefaultProgramLoggerParams(), - EventEmitterParams: DefaultEventEmitterParams(), - BlockInfoParams: DefaultBlockInfoParams(), - TransactionInfoParams: DefaultTransactionInfoParams(), - ContractUpdaterParams: DefaultContractUpdaterParams(), + Chain: chainID.Chain(), + ServiceAccountEnabled: true, + RuntimeParams: DefaultRuntimeParams(), + ProgramLoggerParams: DefaultProgramLoggerParams(), + EventEmitterParams: DefaultEventEmitterParams(), + BlockInfoParams: DefaultBlockInfoParams(), + TransactionInfoParams: DefaultTransactionInfoParams(), + ContractUpdaterParams: DefaultContractUpdaterParams(), + ExecutionVersionProvider: ZeroExecutionVersionProvider{}, } } + +func (env *EnvironmentParams) SetScriptInfoParams(info *ScriptInfoParams) { + env.ScriptInfoParams = *info +} diff --git a/fvm/environment/event_emitter.go b/fvm/environment/event_emitter.go index 366c2d81d36..7457b6041ad 100644 --- a/fvm/environment/event_emitter.go +++ b/fvm/environment/event_emitter.go @@ -3,6 +3,9 @@ package environment import ( "fmt" + "github.com/onflow/cadence/common" + "github.com/rs/zerolog" + "github.com/onflow/cadence" "github.com/onflow/flow-go/fvm/errors" @@ -12,6 +15,7 @@ import ( "github.com/onflow/flow-go/model/convert" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/module/trace" + "github.com/onflow/flow-go/utils/logging" ) const ( @@ -19,16 +23,14 @@ const ( ) type EventEmitterParams struct { - ServiceEventCollectionEnabled bool - EventCollectionByteSizeLimit uint64 - EventEncoder EventEncoder + EventCollectionByteSizeLimit uint64 + EventEncoder EventEncoder } func DefaultEventEmitterParams() EventEmitterParams { return EventEmitterParams{ - ServiceEventCollectionEnabled: false, - EventCollectionByteSizeLimit: DefaultEventCollectionByteSizeLimit, - EventEncoder: NewCadenceEventEncoder(), + EventCollectionByteSizeLimit: DefaultEventCollectionByteSizeLimit, + EventEncoder: NewCadenceEventEncoder(), } } @@ -38,8 +40,10 @@ func DefaultEventEmitterParams() EventEmitterParams { // Note that scripts do not emit events, but must expose the API in compliance // with the runtime environment interface. type EventEmitter interface { - // Cadence's runtime API. Note that the script variant will return - // OperationNotSupportedError. + // EmitEvent satisfies Cadence's runtime API. + // This will encode the cadence event + // + // Note that the script variant will return OperationNotSupportedError. EmitEvent(event cadence.Event) error Events() flow.EventsList @@ -94,7 +98,7 @@ var _ EventEmitter = NoEventEmitter{} // where emitting an event does nothing. type NoEventEmitter struct{} -func (NoEventEmitter) EmitEvent(event cadence.Event) error { +func (NoEventEmitter) EmitEvent(cadence.Event) error { return nil } @@ -114,6 +118,7 @@ func (NoEventEmitter) Reset() { } type eventEmitter struct { + log zerolog.Logger tracer tracing.TracerSpan meter Meter @@ -128,6 +133,7 @@ type eventEmitter struct { // NewEventEmitter constructs a new eventEmitter func NewEventEmitter( + log zerolog.Logger, tracer tracing.TracerSpan, meter Meter, chain flow.Chain, @@ -135,6 +141,7 @@ func NewEventEmitter( params EventEmitterParams, ) EventEmitter { emitter := &eventEmitter{ + log: log, tracer: tracer, meter: meter, chain: chain, @@ -159,54 +166,74 @@ func (emitter *eventEmitter) EventCollection() *EventCollection { } func (emitter *eventEmitter) EmitEvent(event cadence.Event) error { - defer emitter.tracer.StartExtensiveTracingChildSpan( - trace.FVMEnvEmitEvent).End() - - err := emitter.meter.MeterComputation(ComputationKindEmitEvent, 1) + err := emitter.meter.MeterComputation( + common.ComputationUsage{ + Kind: ComputationKindEncodeEvent, + Intensity: 1, + }, + ) if err != nil { - return fmt.Errorf("emit event failed: %w", err) + return fmt.Errorf("emit event, event encoding failed: %w", err) } payload, err := emitter.EventEncoder.Encode(event) if err != nil { return errors.NewEventEncodingError(err) } + emitter.tracer.StartExtensiveTracingChildSpan(trace.FVMEnvEncodeEvent).End() + defer emitter.tracer.StartExtensiveTracingChildSpan(trace.FVMEnvEmitEvent).End() + + payloadSize := len(payload) + err = emitter.meter.MeterComputation( + common.ComputationUsage{ + Kind: ComputationKindEmitEvent, + Intensity: uint64(payloadSize), + }, + ) + if err != nil { + return fmt.Errorf("emit event failed: %w", err) + } - payloadSize := uint64(len(payload)) - - flowEvent := flow.Event{ - Type: flow.EventType(event.EventType.ID()), - TransactionID: emitter.txID, - TransactionIndex: emitter.txIndex, - EventIndex: emitter.eventCollection.TotalEventCounter(), - Payload: payload, + eventType := flow.EventType(event.EventType.ID()) + flowEvent, err := flow.NewEvent( + flow.UntrustedEvent{ + Type: eventType, + TransactionID: emitter.txID, + TransactionIndex: emitter.txIndex, + EventIndex: emitter.eventCollection.TotalEventCounter(), + Payload: payload, + }, + ) + if err != nil { + return fmt.Errorf("could not construct event: %w", err) } // TODO: to set limit to maximum when it is service account and get rid of this flag isServiceAccount := emitter.payer == emitter.chain.ServiceAddress() - if emitter.ServiceEventCollectionEnabled { - ok, err := IsServiceEvent(event, emitter.chain.ChainID()) - if err != nil { - return fmt.Errorf("unable to check service event: %w", err) - } - if ok { - eventEmitError := emitter.eventCollection.AppendServiceEvent( - emitter.chain, - flowEvent, - payloadSize) - - // skip limit if payer is service account - // TODO skip only limit-related errors - if !isServiceAccount && eventEmitError != nil { + isServiceEvent, err := IsServiceEvent(eventType, emitter.chain.ChainID()) + if err != nil { + return fmt.Errorf("unable to check service event: %w", err) + } + if isServiceEvent { + eventEmitError := emitter.eventCollection.AppendServiceEvent( + emitter.chain, + *flowEvent, + uint64(payloadSize)) + + // skip limit if payer is service account + // TODO skip only limit-related errors + if eventEmitError != nil { + if isServiceAccount { + emitter.log.Error().Err(eventEmitError).Str(logging.KeySuspicious, "true").Msg("could not emit service event") + } else { return eventEmitError } } - // We don't return and append the service event into event collection - // as well. } - eventEmitError := emitter.eventCollection.AppendEvent(flowEvent, payloadSize) + // Regardless of whether it is a service event, add to eventCollection + eventEmitError := emitter.eventCollection.AppendEvent(*flowEvent, uint64(payloadSize)) // skip limit if payer is service account if !isServiceAccount { return eventEmitError @@ -277,7 +304,6 @@ func (collection *EventCollection) AppendServiceEvent( collection.convertedServiceEvents = append( collection.convertedServiceEvents, *convertedEvent) - collection.eventCounter++ return collection.meter.MeterEmittedEvent(size) } @@ -289,20 +315,16 @@ func (collection *EventCollection) TotalEventCounter() uint32 { return collection.eventCounter } -// IsServiceEvent determines whether or not an emitted Cadence event is -// considered a service event for the given chain. -func IsServiceEvent(event cadence.Event, chain flow.ChainID) (bool, error) { +// IsServiceEvent determines whether an emitted Cadence event is considered a service event for the given chain. +// An event is a service event if it is defined in the `systemcontracts` package allow-list. +// Note that we have *removed* the prior constraint that service events can only be +// emitted in the system chunk. Now a system smart contract can emit service events +// as part of any transaction. +func IsServiceEvent(eventType flow.EventType, chain flow.ChainID) (bool, error) { // retrieve the service event information for this chain - events, err := systemcontracts.ServiceEventsForChain(chain) - if err != nil { - return false, fmt.Errorf( - "unknown system contracts for chain (%s): %w", - chain.String(), - err) - } + events := systemcontracts.ServiceEventsForChain(chain) - eventType := flow.EventType(event.EventType.ID()) for _, serviceEvent := range events.All() { if serviceEvent.EventType() == eventType { return true, nil diff --git a/fvm/environment/event_emitter_test.go b/fvm/environment/event_emitter_test.go index d0f83ebf656..ecd6e5ae17c 100644 --- a/fvm/environment/event_emitter_test.go +++ b/fvm/environment/event_emitter_test.go @@ -7,9 +7,9 @@ import ( "github.com/stretchr/testify/require" "github.com/onflow/cadence" - jsoncdc "github.com/onflow/cadence/encoding/json" - "github.com/onflow/cadence/runtime/common" - "github.com/onflow/cadence/runtime/stdlib" + "github.com/onflow/cadence/common" + "github.com/onflow/cadence/encoding/ccf" + "github.com/onflow/cadence/stdlib" "github.com/onflow/flow-go/fvm/environment" "github.com/onflow/flow-go/fvm/meter" @@ -17,17 +17,17 @@ import ( "github.com/onflow/flow-go/fvm/systemcontracts" "github.com/onflow/flow-go/fvm/tracing" "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/utils/unittest" ) func Test_IsServiceEvent(t *testing.T) { chain := flow.Emulator - events, err := systemcontracts.ServiceEventsForChain(chain) - require.NoError(t, err) + events := systemcontracts.ServiceEventsForChain(chain) t.Run("correct", func(t *testing.T) { for _, event := range events.All() { - isServiceEvent, err := environment.IsServiceEvent(cadence.Event{ + event := cadence.Event{ EventType: &cadence.EventType{ Location: common.AddressLocation{ Address: common.MustBytesToAddress( @@ -35,14 +35,16 @@ func Test_IsServiceEvent(t *testing.T) { }, QualifiedIdentifier: event.QualifiedIdentifier(), }, - }, chain) + } + + isServiceEvent, err := environment.IsServiceEvent(flow.EventType(event.Type().ID()), chain) require.NoError(t, err) assert.True(t, isServiceEvent) } }) t.Run("wrong chain", func(t *testing.T) { - isServiceEvent, err := environment.IsServiceEvent(cadence.Event{ + event := cadence.Event{ EventType: &cadence.EventType{ Location: common.AddressLocation{ Address: common.MustBytesToAddress( @@ -50,13 +52,15 @@ func Test_IsServiceEvent(t *testing.T) { }, QualifiedIdentifier: events.EpochCommit.QualifiedIdentifier(), }, - }, chain) + } + + isServiceEvent, err := environment.IsServiceEvent(flow.EventType(event.Type().ID()), chain) require.NoError(t, err) assert.False(t, isServiceEvent) }) t.Run("wrong type", func(t *testing.T) { - isServiceEvent, err := environment.IsServiceEvent(cadence.Event{ + event := cadence.Event{ EventType: &cadence.EventType{ Location: common.AddressLocation{ Address: common.MustBytesToAddress( @@ -64,7 +68,9 @@ func Test_IsServiceEvent(t *testing.T) { }, QualifiedIdentifier: "SomeContract.SomeEvent", }, - }, chain) + } + + isServiceEvent, err := environment.IsServiceEvent(flow.EventType(event.Type().ID()), chain) require.NoError(t, err) assert.False(t, isServiceEvent) }) @@ -149,7 +155,6 @@ func Test_EmitEvent_Limit(t *testing.T) { err := eventEmitter.EmitEvent(cadenceEvent1) require.Error(t, err) }) - } func createTestEventEmitterWithLimit(chain flow.ChainID, address flow.Address, eventEmitLimit uint64) environment.EventEmitter { @@ -160,26 +165,26 @@ func createTestEventEmitterWithLimit(chain flow.ChainID, address flow.Address, e )) return environment.NewEventEmitter( + unittest.Logger(), tracing.NewTracerSpan(), environment.NewMeter(txnState), chain.Chain(), environment.TransactionInfoParams{ - TxId: flow.ZeroID, + TxId: unittest.IdentifierFixture(), TxIndex: 0, TxBody: &flow.TransactionBody{ Payer: address, }, }, environment.EventEmitterParams{ - ServiceEventCollectionEnabled: false, - EventCollectionByteSizeLimit: eventEmitLimit, - EventEncoder: environment.NewCadenceEventEncoder(), + EventCollectionByteSizeLimit: eventEmitLimit, + EventEncoder: environment.NewCadenceEventEncoder(), }, ) } func getCadenceEventPayloadByteSize(event cadence.Event) uint64 { - payload, err := jsoncdc.Encode(event) + payload, err := ccf.Encode(event) if err != nil { panic(err) } diff --git a/fvm/environment/event_encoder.go b/fvm/environment/event_encoder.go index 33fdbe20c95..36b1f4bd2cd 100644 --- a/fvm/environment/event_encoder.go +++ b/fvm/environment/event_encoder.go @@ -2,7 +2,7 @@ package environment import ( "github.com/onflow/cadence" - jsoncdc "github.com/onflow/cadence/encoding/json" + "github.com/onflow/cadence/encoding/ccf" ) type EventEncoder interface { @@ -16,5 +16,5 @@ func NewCadenceEventEncoder() *CadenceEventEncoder { } func (e *CadenceEventEncoder) Encode(event cadence.Event) ([]byte, error) { - return jsoncdc.Encode(event) + return ccf.Encode(event) } diff --git a/fvm/environment/facade_env.go b/fvm/environment/facade_env.go index d45fcdd5b6f..c885fea72e8 100644 --- a/fvm/environment/facade_env.go +++ b/fvm/environment/facade_env.go @@ -3,12 +3,15 @@ package environment import ( "context" - "github.com/onflow/cadence/runtime/common" - "github.com/onflow/cadence/runtime/interpreter" + "github.com/onflow/cadence/ast" + "github.com/onflow/cadence/common" + "github.com/onflow/cadence/interpreter" + "github.com/onflow/cadence/sema" "github.com/onflow/flow-go/fvm/storage" "github.com/onflow/flow-go/fvm/storage/snapshot" "github.com/onflow/flow-go/fvm/storage/state" + "github.com/onflow/flow-go/fvm/systemcontracts" "github.com/onflow/flow-go/fvm/tracing" ) @@ -24,8 +27,9 @@ type facadeEnvironment struct { *ProgramLogger EventEmitter - UnsafeRandomGenerator + RandomGenerator CryptoLibrary + RandomSourceHistoryProvider BlockInfo AccountInfo @@ -34,8 +38,10 @@ type facadeEnvironment struct { ValueStore *SystemContracts + MinimumCadenceRequiredVersion UUIDGenerator + AccountLocalIDGenerator AccountCreator @@ -59,12 +65,15 @@ func newFacadeEnvironment( accounts := NewAccounts(txnState) logger := NewProgramLogger(tracer, params.ProgramLoggerParams) runtime := NewRuntime(params.RuntimeParams) + chain := params.Chain systemContracts := NewSystemContracts( - params.Chain, + chain, tracer, logger, runtime) + sc := systemcontracts.SystemContractsForChain(chain.ChainID()) + env := &facadeEnvironment{ Runtime: runtime, @@ -74,12 +83,8 @@ func newFacadeEnvironment( ProgramLogger: logger, EventEmitter: NoEventEmitter{}, - UnsafeRandomGenerator: NewUnsafeRandomGenerator( - tracer, - params.BlockHeader, - params.TxIndex, - ), - CryptoLibrary: NewCryptoLibrary(tracer, meter), + CryptoLibrary: NewCryptoLibrary(tracer, meter), + RandomSourceHistoryProvider: NewForbiddenRandomSourceHistoryProvider(), BlockInfo: NewBlockInfo( tracer, @@ -103,11 +108,22 @@ func newFacadeEnvironment( ), SystemContracts: systemContracts, + MinimumCadenceRequiredVersion: NewMinimumCadenceRequiredVersion( + params.ExecutionVersionProvider, + ), UUIDGenerator: NewUUIDGenerator( tracer, + params.Logger, meter, - txnState), + txnState, + params.BlockHeader, + params.TxIndex), + AccountLocalIDGenerator: NewAccountLocalIDGenerator( + tracer, + meter, + accounts, + ), AccountCreator: NoAccountCreator{}, @@ -122,6 +138,7 @@ func newFacadeEnvironment( tracer, meter, accounts, + common.Address(sc.Crypto.Address), ), ContractUpdater: NoContractUpdater{}, Programs: NewPrograms( @@ -166,9 +183,12 @@ func NewScriptEnv( params, txnState, NewCancellableMeter(ctx, txnState)) - + env.RandomGenerator = NewRandomGenerator( + tracer, + params.EntropyProvider, + params.ScriptInfoParams.ID[:], + ) env.addParseRestrictedChecks() - return env } @@ -190,6 +210,7 @@ func NewTransactionEnvironment( params.Chain.ServiceAddress(), ) env.EventEmitter = NewEventEmitter( + env.Logger(), tracer, env.Meter, params.Chain, @@ -223,6 +244,19 @@ func NewTransactionEnvironment( txnState, env) + env.RandomGenerator = NewRandomGenerator( + tracer, + params.EntropyProvider, + params.TxId[:], + ) + + env.RandomSourceHistoryProvider = NewRandomSourceHistoryProvider( + tracer, + env.Meter, + params.EntropyProvider, + params.TransactionInfoParams.RandomSourceHistoryCallAllowed, + ) + env.addParseRestrictedChecks() return env @@ -263,12 +297,18 @@ func (env *facadeEnvironment) addParseRestrictedChecks() { env.TransactionInfo = NewParseRestrictedTransactionInfo( env.txnState, env.TransactionInfo) - env.UnsafeRandomGenerator = NewParseRestrictedUnsafeRandomGenerator( + env.RandomGenerator = NewParseRestrictedRandomGenerator( env.txnState, - env.UnsafeRandomGenerator) + env.RandomGenerator) + env.RandomSourceHistoryProvider = NewParseRestrictedRandomSourceHistoryProvider( + env.txnState, + env.RandomSourceHistoryProvider) env.UUIDGenerator = NewParseRestrictedUUIDGenerator( env.txnState, env.UUIDGenerator) + env.AccountLocalIDGenerator = NewParseRestrictedAccountLocalIDGenerator( + env.txnState, + env.AccountLocalIDGenerator) env.ValueStore = NewParseRestrictedValueStore( env.txnState, env.ValueStore) @@ -287,19 +327,55 @@ func (env *facadeEnvironment) Reset() { env.Programs.Reset() } -// Miscellaneous cadence runtime.Interface API. -func (facadeEnvironment) ResourceOwnerChanged( +// Miscellaneous Cadence runtime.Interface API + +func (*facadeEnvironment) ResourceOwnerChanged( *interpreter.Interpreter, *interpreter.CompositeValue, common.Address, common.Address, ) { + // NO-OP } -func (env *facadeEnvironment) SetInterpreterSharedState(state *interpreter.SharedState) { - // NO-OP +func (env *facadeEnvironment) RecoverProgram(program *ast.Program, location common.Location) ([]byte, error) { + return RecoverProgram( + env.chain.ChainID(), + program, + location, + ) } -func (env *facadeEnvironment) GetInterpreterSharedState() *interpreter.SharedState { - return nil +func (env *facadeEnvironment) ValidateAccountCapabilitiesGet( + _ interpreter.AccountCapabilityGetValidationContext, + _ interpreter.LocationRange, + _ interpreter.AddressValue, + _ interpreter.PathValue, + wantedBorrowType *sema.ReferenceType, + _ *sema.ReferenceType, +) (bool, error) { + _, hasEntitlements := wantedBorrowType.Authorization.(sema.EntitlementSetAccess) + if hasEntitlements { + // TODO: maybe abort + //return false, interpreter.GetCapabilityError{ + // LocationRange: locationRange, + //} + return false, nil + } + return true, nil +} + +func (env *facadeEnvironment) ValidateAccountCapabilitiesPublish( + _ interpreter.AccountCapabilityPublishValidationContext, + _ interpreter.LocationRange, + _ interpreter.AddressValue, + _ interpreter.PathValue, + capabilityBorrowType *interpreter.ReferenceStaticType, +) (bool, error) { + _, isEntitledCapability := capabilityBorrowType.Authorization.(interpreter.EntitlementSetAuthorization) + if isEntitledCapability { + // TODO: maybe abort + return false, nil + } + return true, nil } diff --git a/fvm/environment/history_random_source_provider.go b/fvm/environment/history_random_source_provider.go new file mode 100644 index 00000000000..1548f166466 --- /dev/null +++ b/fvm/environment/history_random_source_provider.go @@ -0,0 +1,136 @@ +package environment + +import ( + "fmt" + + "github.com/onflow/cadence/common" + + "github.com/onflow/flow-go/fvm/errors" + "github.com/onflow/flow-go/fvm/storage/state" + "github.com/onflow/flow-go/fvm/tracing" + "github.com/onflow/flow-go/module/trace" + "github.com/onflow/flow-go/state/protocol/prg" +) + +type RandomSourceHistoryProvider interface { + // RandomSourceHistory provides a source of entropy that can be + // expanded on-chain into randoms (using a pseudo-random generator). + // This random source is only destined to the history source core-contract + // to implement commit-reveal schemes. + // The returned slice should have at least 128 bits of entropy. + // The function doesn't error in normal operations, any + // error should be treated as an exception. + RandomSourceHistory() ([]byte, error) +} + +type ParseRestrictedRandomSourceHistoryProvider struct { + txnState state.NestedTransactionPreparer + impl RandomSourceHistoryProvider +} + +func NewParseRestrictedRandomSourceHistoryProvider( + txnState state.NestedTransactionPreparer, + impl RandomSourceHistoryProvider, +) RandomSourceHistoryProvider { + return ParseRestrictedRandomSourceHistoryProvider{ + txnState: txnState, + impl: impl, + } +} + +func (p ParseRestrictedRandomSourceHistoryProvider) RandomSourceHistory() ([]byte, error) { + return parseRestrict1Ret( + p.txnState, + trace.FVMEnvRandomSourceHistoryProvider, + p.impl.RandomSourceHistory, + ) +} + +// forbiddenRandomSourceHistoryProvider is a RandomSourceHistoryProvider that always returns an error. +// this is the default implementation of RandomSourceHistoryProvider. +type forbiddenRandomSourceHistoryProvider struct { +} + +func NewForbiddenRandomSourceHistoryProvider() RandomSourceHistoryProvider { + return &forbiddenRandomSourceHistoryProvider{} +} + +func (b forbiddenRandomSourceHistoryProvider) RandomSourceHistory() ([]byte, error) { + return nil, errors.NewOperationNotSupportedError("RandomSourceHistory") +} + +type historySourceProvider struct { + tracer tracing.TracerSpan + meter Meter + EntropyProvider +} + +// NewRandomSourceHistoryProvider creates a new RandomSourceHistoryProvider. +// If randomSourceCallAllowed is true, the returned RandomSourceHistoryProvider will +// return a random source from the given EntropyProvider. +// If randomSourceCallAllowed is false, the returned RandomSourceHistoryProvider will +// always return an error. +func NewRandomSourceHistoryProvider( + tracer tracing.TracerSpan, + meter Meter, + entropyProvider EntropyProvider, + randomSourceCallAllowed bool, +) RandomSourceHistoryProvider { + if randomSourceCallAllowed { + return &historySourceProvider{ + tracer: tracer, + meter: meter, + EntropyProvider: entropyProvider, + } + } + + return NewForbiddenRandomSourceHistoryProvider() +} + +// RandomSourceHistoryLength is the byte-size of the random source in the history +// array. +// It must be at least 16 (128 bits) to make sure it includes enough entropy +// (assuming the randomness beacon also outputs more than 128 bits of entropy) +const RandomSourceHistoryLength = 32 + +func (b *historySourceProvider) RandomSourceHistory() ([]byte, error) { + defer b.tracer.StartExtensiveTracingChildSpan( + trace.FVMEnvRandomSourceHistoryProvider).End() + + err := b.meter.MeterComputation( + common.ComputationUsage{ + Kind: ComputationKindGetRandomSourceHistory, + Intensity: 1, + }, + ) + if err != nil { + return nil, fmt.Errorf("get block randomSource failed: %w", err) + } + + source, err := b.RandomSource() + // `RandomSource` does not error in normal operations. + // Any error should be treated as an exception. + if err != nil { + return nil, errors.NewRandomSourceFailure(fmt.Errorf( + "get random source for block randomSource failed: %w", err)) + } + + // A method that derives `RandomSourceHistoryLength` bytes from `source` must: + // - extract and expand the entropy in `source` + // - output must be independent than the expanded bytes used for Cadence's `random` function + // + // The method chosen here is to rely on the same CSPRG used to derive randoms from the source entropy + // (but other methods are possible) + // - use the state/protocol/prg customizer defined for the execution random source history. + // (to ensure independence of seeds, customizer must be different than the one used for Cadence's + // `random` in random_generator.go) + csprg, err := prg.New(source, prg.ExecutionRandomSourceHistory, nil) + if err != nil { + return nil, fmt.Errorf("failed to create a PRG from source: %w", err) + } + + historySource := make([]byte, RandomSourceHistoryLength) + csprg.Read(historySource) + + return historySource, nil +} diff --git a/fvm/environment/invoker.go b/fvm/environment/invoker.go new file mode 100644 index 00000000000..27f9c881b6b --- /dev/null +++ b/fvm/environment/invoker.go @@ -0,0 +1,28 @@ +package environment + +import ( + "github.com/onflow/cadence" + "github.com/onflow/cadence/sema" + + "github.com/onflow/flow-go/model/flow" +) + +// ContractFunctionSpec specify all the information, except the function's +// address and arguments, needed to invoke the contract function. +type ContractFunctionSpec struct { + AddressFromChain func(flow.Chain) flow.Address + LocationName string + FunctionName string + ArgumentTypes []sema.Type +} + +// ContractFunctionInvoker invokes a contract function +type ContractFunctionInvoker interface { + Invoke( + spec ContractFunctionSpec, + arguments []cadence.Value, + ) ( + cadence.Value, + error, + ) +} diff --git a/fvm/environment/logger.go b/fvm/environment/logger.go new file mode 100644 index 00000000000..85b70e1520f --- /dev/null +++ b/fvm/environment/logger.go @@ -0,0 +1,8 @@ +package environment + +import "github.com/rs/zerolog" + +// Logger provides access to the logger to collect logs +type LoggerProvider interface { + Logger() zerolog.Logger +} diff --git a/fvm/environment/meter.go b/fvm/environment/meter.go index d9d5dd280ed..38e740d75ae 100644 --- a/fvm/environment/meter.go +++ b/fvm/environment/meter.go @@ -3,7 +3,7 @@ package environment import ( "context" - "github.com/onflow/cadence/runtime/common" + "github.com/onflow/cadence/common" "github.com/onflow/flow-go/fvm/errors" "github.com/onflow/flow-go/fvm/meter" @@ -12,54 +12,76 @@ import ( const ( // [2_000, 3_000) reserved for the FVM - ComputationKindHash = 2001 - ComputationKindVerifySignature = 2002 - ComputationKindAddAccountKey = 2003 - ComputationKindAddEncodedAccountKey = 2004 - ComputationKindAllocateStorageIndex = 2005 - ComputationKindCreateAccount = 2006 - ComputationKindEmitEvent = 2007 - ComputationKindGenerateUUID = 2008 - ComputationKindGetAccountAvailableBalance = 2009 - ComputationKindGetAccountBalance = 2010 - ComputationKindGetAccountContractCode = 2011 - ComputationKindGetAccountContractNames = 2012 - ComputationKindGetAccountKey = 2013 - ComputationKindGetBlockAtHeight = 2014 - ComputationKindGetCode = 2015 - ComputationKindGetCurrentBlockHeight = 2016 - _ = 2017 - ComputationKindGetStorageCapacity = 2018 - ComputationKindGetStorageUsed = 2019 - ComputationKindGetValue = 2020 - ComputationKindRemoveAccountContractCode = 2021 - ComputationKindResolveLocation = 2022 - ComputationKindRevokeAccountKey = 2023 - ComputationKindRevokeEncodedAccountKey = 2024 - _ = 2025 - ComputationKindSetValue = 2026 - ComputationKindUpdateAccountContractCode = 2027 - ComputationKindValidatePublicKey = 2028 - ComputationKindValueExists = 2029 - ComputationKindAccountKeysCount = 2030 - ComputationKindBLSVerifyPOP = 2031 - ComputationKindBLSAggregateSignatures = 2032 - ComputationKindBLSAggregatePublicKeys = 2033 - ComputationKindGetOrLoadProgram = 2034 + ComputationKindHash = 2001 + iota + ComputationKindVerifySignature + ComputationKindAddAccountKey + ComputationKindAddEncodedAccountKey + ComputationKindAllocateSlabIndex + ComputationKindCreateAccount + ComputationKindEmitEvent + ComputationKindGenerateUUID + ComputationKindGetAccountAvailableBalance + ComputationKindGetAccountBalance + ComputationKindGetAccountContractCode + ComputationKindGetAccountContractNames + ComputationKindGetAccountKey + ComputationKindGetBlockAtHeight + ComputationKindGetCode + ComputationKindGetCurrentBlockHeight + _ + ComputationKindGetStorageCapacity + ComputationKindGetStorageUsed + ComputationKindGetValue + ComputationKindRemoveAccountContractCode + ComputationKindResolveLocation + ComputationKindRevokeAccountKey + _ // removed, DO NOT REUSE + _ // removed, DO NOT REUSE + ComputationKindSetValue + ComputationKindUpdateAccountContractCode + ComputationKindValidatePublicKey + ComputationKindValueExists + ComputationKindAccountKeysCount + ComputationKindBLSVerifyPOP + ComputationKindBLSAggregateSignatures + ComputationKindBLSAggregatePublicKeys + ComputationKindGetOrLoadProgram + ComputationKindGenerateAccountLocalID + ComputationKindGetRandomSourceHistory + ComputationKindEVMGasUsage + ComputationKindRLPEncoding + ComputationKindRLPDecoding + ComputationKindEncodeEvent + _ + ComputationKindEVMEncodeABI + ComputationKindEVMDecodeABI ) +// MainnetExecutionEffortWeights are the execution effort weights as they are +// on mainnet from crescendo spork +var MainnetExecutionEffortWeights = meter.ExecutionEffortWeights{ + common.ComputationKindStatement: 314, + common.ComputationKindLoop: 314, + common.ComputationKindFunctionInvocation: 314, + ComputationKindGetValue: 162, + ComputationKindCreateAccount: 567534, + ComputationKindSetValue: 153, + ComputationKindEVMGasUsage: 13, +} + type Meter interface { - MeterComputation(common.ComputationKind, uint) error - ComputationUsed() (uint64, error) - ComputationIntensities() meter.MeteredComputationIntensities + common.Gauge - MeterMemory(usage common.MemoryUsage) error + ComputationUsed() (uint64, error) MemoryUsed() (uint64, error) + ComputationIntensities() meter.MeteredComputationIntensities + ComputationAvailable(common.ComputationUsage) bool + MeterEmittedEvent(byteSize uint64) error TotalEmittedEventBytes() uint64 - InteractionUsed() (uint64, error) + RunWithMeteringDisabled(f func()) } type meterImpl struct { @@ -72,23 +94,24 @@ func NewMeter(txnState state.NestedTransactionPreparer) Meter { } } -func (meter *meterImpl) MeterComputation( - kind common.ComputationKind, - intensity uint, -) error { - return meter.txnState.MeterComputation(kind, intensity) +func (meter *meterImpl) MeterComputation(usage common.ComputationUsage) error { + return meter.txnState.MeterComputation(usage) } func (meter *meterImpl) ComputationIntensities() meter.MeteredComputationIntensities { return meter.txnState.ComputationIntensities() } +func (meter *meterImpl) ComputationAvailable(usage common.ComputationUsage) bool { + return meter.txnState.ComputationAvailable(usage) +} + func (meter *meterImpl) ComputationUsed() (uint64, error) { return meter.txnState.TotalComputationUsed(), nil } func (meter *meterImpl) MeterMemory(usage common.MemoryUsage) error { - return meter.txnState.MeterMemory(usage.Kind, uint(usage.Amount)) + return meter.txnState.MeterMemory(usage) } func (meter *meterImpl) MemoryUsed() (uint64, error) { @@ -107,6 +130,10 @@ func (meter *meterImpl) TotalEmittedEventBytes() uint64 { return meter.txnState.TotalEmittedEventBytes() } +func (meter *meterImpl) RunWithMeteringDisabled(f func()) { + meter.txnState.RunWithMeteringDisabled(f) +} + type cancellableMeter struct { meterImpl @@ -125,10 +152,7 @@ func NewCancellableMeter( } } -func (meter *cancellableMeter) MeterComputation( - kind common.ComputationKind, - intensity uint, -) error { +func (meter *cancellableMeter) MeterComputation(usage common.ComputationUsage) error { // this method is called on every unit of operation, so // checking the context here is the most likely would capture // timeouts or cancellation as soon as they happen, though @@ -147,5 +171,5 @@ func (meter *cancellableMeter) MeterComputation( // do nothing } - return meter.meterImpl.MeterComputation(kind, intensity) + return meter.meterImpl.MeterComputation(usage) } diff --git a/fvm/environment/minimum_required_version.go b/fvm/environment/minimum_required_version.go new file mode 100644 index 00000000000..5fd4cb54a26 --- /dev/null +++ b/fvm/environment/minimum_required_version.go @@ -0,0 +1,165 @@ +package environment + +import ( + "sync" + + "github.com/coreos/go-semver/semver" + + "github.com/onflow/flow-go/fvm/errors" + "github.com/onflow/flow-go/model/flow" +) + +type ExecutionVersionProvider interface { + ExecutionVersion() (semver.Version, error) +} + +type GetVersionBeaconFunc func() (*flow.SealedVersionBeacon, error) + +type VersionBeaconExecutionVersionProvider struct { + getVersionBeacon GetVersionBeaconFunc + + once sync.Once + cachedVersion semver.Version + cachedErr error +} + +// NewVersionBeaconExecutionVersionProvider creates a new VersionBeaconExecutionVersionProvider +// It caches the result of the getVersionBeacon function +// The assumption here is that the GetVersionBeaconFunc will not return a different result for the lifetime of the provider +// This is safe to make because version beacons change in between blocks and VersionBeaconExecutionVersionProvider are created +// on every block. +// +// This logic will go away once we switch to the cadence component version from the dynamic protocol state +func NewVersionBeaconExecutionVersionProvider(getVersionBeacon GetVersionBeaconFunc) *VersionBeaconExecutionVersionProvider { + return &VersionBeaconExecutionVersionProvider{ + getVersionBeacon: getVersionBeacon, + } +} + +func (v *VersionBeaconExecutionVersionProvider) ExecutionVersion() (semver.Version, error) { + v.once.Do(func() { + v.cachedVersion, v.cachedErr = v.queryExecutionVersion() + }) + + return v.cachedVersion, v.cachedErr +} + +func (v *VersionBeaconExecutionVersionProvider) queryExecutionVersion() (semver.Version, error) { + vb, err := v.getVersionBeacon() + if err != nil { + return semver.Version{}, err + } + // Special case. If there are no version boundaries, then the execution version is 0.0.0. + if vb == nil || len(vb.VersionBoundaries) == 0 { + return semver.Version{}, nil + } + + // by definition zero boundary is the last most recent past boundary + boundary := vb.VersionBoundaries[0] + sv, err := boundary.Semver() + if err != nil { + return semver.Version{}, err + } + return *sv, nil +} + +type ZeroExecutionVersionProvider struct{} + +func (v ZeroExecutionVersionProvider) ExecutionVersion() (semver.Version, error) { + return semver.Version{}, nil +} + +// MinimumCadenceRequiredVersion returns the minimum required cadence version for the current environment +// in semver format. +type MinimumCadenceRequiredVersion interface { + MinimumRequiredVersion() (string, error) +} + +type minimumCadenceRequiredVersion struct { + executionVersionProvider ExecutionVersionProvider +} + +func NewMinimumCadenceRequiredVersion( + executionVersionProvider ExecutionVersionProvider, +) MinimumCadenceRequiredVersion { + return minimumCadenceRequiredVersion{ + executionVersionProvider: executionVersionProvider, + } +} + +// MinimumRequiredVersion The returned cadence version can be used by cadence runtime for supporting feature flag. +// The feature flag in cadence allows ENs to produce consistent results even if running with +// different cadence versions at the same height, which is useful for rolling out cadence +// upgrade without all ENs restarting all together. +// For instance, we would like to grade cadence from v1 to v3, where v3 has a new cadence feature. +// We first make a cadence v2 that has feature flag only turned on when the MinimumRequiredVersion() +// method returns v2 or above. +// So cadence v2 with the feature flag turned off will produce the same result as v1 which doesn't have the feature. +// And cadence v2 with the feature flag turned on will also produce the same result as v3 which has the feature. +// The feature flag allows us to roll out cadence v2 to all ENs which was running v1. +// And we use the MinimumRequiredVersion to control when the feature flag should be switched from off to on. +// And the switching should happen at the same height for all ENs. +// +// The height-based switch over can be done by using VersionBeacon, however, the VersionBeacon only +// defines the flow-go version, not cadence version. +// So we first read the current minimum required flow-go version from the VersionBeacon control, +// and map it to the cadence version to be used by cadence to decide feature flag status. +// +// For instance, let’s say all ENs are running flow-go v0.37.0 with cadence v1. +// We first create a version mapping entry for flow-go v0.37.1 to cadence v2, and roll out v0.37.1 to all ENs.Z +// v0.37.1 ENs will produce the same result as v0.37.0 ENs, because the current version beacon still returns v0.37.0, +// which maps zero cadence version, and cadence will keep the feature flag off. +// +// After all ENs have upgraded to v0.37.1, we send out a version beacon to switch to v0.37.1 at a future height, +// let’s say height 1000. +// Then what happens is that: +// 1. ENs running v0.37.0 will crash after height 999, until upgrade to higher version +// 2. ENs running v0.37.1 will execute with cadence v2 with feature flag off up until height 999, and from height 1000, +// the feature flag will be on, which means all v0.37.1 ENs will again produce consistent results for blocks above 1000. +// +// After height 1000 have been sealed, we can roll out v0.37.2 to all ENs with cadence v3, and it will produce the consistent +// result as v0.37.1. +func (c minimumCadenceRequiredVersion) MinimumRequiredVersion() (string, error) { + executionVersion, err := c.executionVersionProvider.ExecutionVersion() + if err != nil { + return "", errors.NewExecutionVersionProviderFailure(err) + } + + // map the minimum required flow-go version to a minimum required cadence version + cadenceVersion := mapToCadenceVersion(executionVersion, minimumFvmToMinimumCadenceVersionMapping) + + return cadenceVersion.String(), nil +} + +func mapToCadenceVersion(flowGoVersion semver.Version, versionMapping FlowGoToCadenceVersionMapping) semver.Version { + if versionGreaterThanOrEqualTo(flowGoVersion, versionMapping.FlowGoVersion) { + return versionMapping.CadenceVersion + } else { + return semver.Version{} + } +} + +func versionGreaterThanOrEqualTo(version semver.Version, other semver.Version) bool { + return version.Compare(other) >= 0 +} + +type FlowGoToCadenceVersionMapping struct { + FlowGoVersion semver.Version + CadenceVersion semver.Version +} + +// This could also be a map, but ist not needed because we only expect one entry at a give time +// we won't be fixing 2 separate issues at 2 separate version with one deploy. +var minimumFvmToMinimumCadenceVersionMapping = FlowGoToCadenceVersionMapping{ + // Leaving this example in, so it's easier to understand + // + // FlowGoVersion: *semver.New("0.37.0"), + // CadenceVersion: *semver.New("1.0.0"), + // +} + +func setFVMToCadenceVersionMappingForTestingOnly(mapping FlowGoToCadenceVersionMapping) { + minimumFvmToMinimumCadenceVersionMapping = mapping +} + +var _ MinimumCadenceRequiredVersion = (*minimumCadenceRequiredVersion)(nil) diff --git a/fvm/environment/minimum_required_version_test.go b/fvm/environment/minimum_required_version_test.go new file mode 100644 index 00000000000..ca33305c441 --- /dev/null +++ b/fvm/environment/minimum_required_version_test.go @@ -0,0 +1,163 @@ +package environment + +import ( + "testing" + + "github.com/coreos/go-semver/semver" + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/model/flow" +) + +func Test_MapToCadenceVersion(t *testing.T) { + flowV0 := semver.Version{} + cadenceV0 := semver.Version{} + flowV1 := semver.Version{ + Major: 0, + Minor: 37, + Patch: 0, + } + cadenceV1 := semver.Version{ + Major: 1, + Minor: 0, + Patch: 0, + } + + mapping := FlowGoToCadenceVersionMapping{ + FlowGoVersion: flowV1, + CadenceVersion: cadenceV1, + } + + t.Run("no mapping, v0", func(t *testing.T) { + version := mapToCadenceVersion(flowV0, FlowGoToCadenceVersionMapping{}) + + require.Equal(t, cadenceV0, version) + }) + + t.Run("v0", func(t *testing.T) { + version := mapToCadenceVersion(flowV0, mapping) + + require.Equal(t, semver.Version{}, version) + }) + t.Run("v1 - delta", func(t *testing.T) { + + v := flowV1 + v.Patch -= 1 + + version := mapToCadenceVersion(v, mapping) + + require.Equal(t, cadenceV0, version) + }) + t.Run("v1", func(t *testing.T) { + version := mapToCadenceVersion(flowV1, mapping) + + require.Equal(t, cadenceV1, version) + }) + t.Run("v1 + delta", func(t *testing.T) { + + v := flowV1 + v.BumpPatch() + + version := mapToCadenceVersion(v, mapping) + + require.Equal(t, cadenceV1, version) + }) +} + +func Test_VersionBeaconAsDataSource(t *testing.T) { + t.Run("no version beacon", func(t *testing.T) { + versionBeacon := VersionBeaconExecutionVersionProvider{ + getVersionBeacon: func() (*flow.SealedVersionBeacon, error) { + return nil, nil + }, + } + version, err := versionBeacon.ExecutionVersion() + require.NoError(t, err) + require.Equal(t, semver.Version{}, version) + }) + + t.Run("version beacon", func(t *testing.T) { + versionBeacon := NewVersionBeaconExecutionVersionProvider( + func() (*flow.SealedVersionBeacon, error) { + return &flow.SealedVersionBeacon{ + VersionBeacon: &flow.VersionBeacon{ + VersionBoundaries: []flow.VersionBoundary{ + { + BlockHeight: 10, + Version: semver.Version{Major: 0, Minor: 37, Patch: 0}.String(), + }, + }, + }, + }, nil + }, + ) + version, err := versionBeacon.ExecutionVersion() + require.NoError(t, err) + require.Equal(t, semver.Version{Major: 0, Minor: 37, Patch: 0}, version) + }) + + t.Run("version beacon, multiple boundaries", func(t *testing.T) { + versionBeacon := NewVersionBeaconExecutionVersionProvider( + func() (*flow.SealedVersionBeacon, error) { + return &flow.SealedVersionBeacon{ + VersionBeacon: &flow.VersionBeacon{ + VersionBoundaries: []flow.VersionBoundary{ + { + BlockHeight: 10, + Version: semver.Version{Major: 0, Minor: 37, Patch: 0}.String(), + }, + { + BlockHeight: 20, + Version: semver.Version{Major: 1, Minor: 0, Patch: 0}.String(), + }, + }, + }, + }, nil + }, + ) + + version, err := versionBeacon.ExecutionVersion() + require.NoError(t, err) + // the first boundary is by definition the newest past one and defines the version + require.Equal(t, semver.Version{Major: 0, Minor: 37, Patch: 0}, version) + }) +} + +func Test_MinimumCadenceRequiredVersion(t *testing.T) { + t.Run("no version beacon", func(t *testing.T) { + getCadenceVersion := func(executionVersion string) (string, error) { + versionBeacon := NewVersionBeaconExecutionVersionProvider( + func() (*flow.SealedVersionBeacon, error) { + return &flow.SealedVersionBeacon{ + VersionBeacon: &flow.VersionBeacon{ + VersionBoundaries: []flow.VersionBoundary{ + { + BlockHeight: 10, + Version: executionVersion, + }, + }, + }, + }, nil + }, + ) + cadenceVersion := NewMinimumCadenceRequiredVersion(versionBeacon) + return cadenceVersion.MinimumRequiredVersion() + } + + setFVMToCadenceVersionMappingForTestingOnly(FlowGoToCadenceVersionMapping{ + FlowGoVersion: semver.Version{Major: 0, Minor: 37, Patch: 0}, + CadenceVersion: semver.Version{Major: 1, Minor: 0, Patch: 0}, + }) + + requireExpectedSemver := func(t *testing.T, executionVersion semver.Version, expectedCadenceVersion semver.Version) { + t.Helper() + actualCadenceVersion, err := getCadenceVersion(executionVersion.String()) + require.NoError(t, err) + require.Equal(t, expectedCadenceVersion.String(), actualCadenceVersion) + } + + requireExpectedSemver(t, semver.Version{Major: 0, Minor: 36, Patch: 9}, semver.Version{Major: 0, Minor: 0, Patch: 0}) + requireExpectedSemver(t, semver.Version{Major: 0, Minor: 37, Patch: 0}, semver.Version{Major: 1, Minor: 0, Patch: 0}) + requireExpectedSemver(t, semver.Version{Major: 0, Minor: 37, Patch: 1}, semver.Version{Major: 1, Minor: 0, Patch: 0}) + }) +} diff --git a/fvm/environment/mock/account_creator.go b/fvm/environment/mock/account_creator.go index 15b19b507b1..7c23cf079df 100644 --- a/fvm/environment/mock/account_creator.go +++ b/fvm/environment/mock/account_creator.go @@ -1,9 +1,9 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mock import ( - common "github.com/onflow/cadence/runtime/common" + common "github.com/onflow/cadence/common" mock "github.com/stretchr/testify/mock" ) @@ -17,6 +17,10 @@ type AccountCreator struct { func (_m *AccountCreator) CreateAccount(runtimePayer common.Address) (common.Address, error) { ret := _m.Called(runtimePayer) + if len(ret) == 0 { + panic("no return value specified for CreateAccount") + } + var r0 common.Address var r1 error if rf, ok := ret.Get(0).(func(common.Address) (common.Address, error)); ok { @@ -39,13 +43,12 @@ func (_m *AccountCreator) CreateAccount(runtimePayer common.Address) (common.Add return r0, r1 } -type mockConstructorTestingTNewAccountCreator interface { +// NewAccountCreator creates a new instance of AccountCreator. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewAccountCreator(t interface { mock.TestingT Cleanup(func()) -} - -// NewAccountCreator creates a new instance of AccountCreator. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewAccountCreator(t mockConstructorTestingTNewAccountCreator) *AccountCreator { +}) *AccountCreator { mock := &AccountCreator{} mock.Mock.Test(t) diff --git a/fvm/environment/mock/account_info.go b/fvm/environment/mock/account_info.go index 4af71a34296..adeb91e4b60 100644 --- a/fvm/environment/mock/account_info.go +++ b/fvm/environment/mock/account_info.go @@ -1,9 +1,9 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mock import ( - common "github.com/onflow/cadence/runtime/common" + common "github.com/onflow/cadence/common" flow "github.com/onflow/flow-go/model/flow" @@ -19,6 +19,10 @@ type AccountInfo struct { func (_m *AccountInfo) GetAccount(address flow.Address) (*flow.Account, error) { ret := _m.Called(address) + if len(ret) == 0 { + panic("no return value specified for GetAccount") + } + var r0 *flow.Account var r1 error if rf, ok := ret.Get(0).(func(flow.Address) (*flow.Account, error)); ok { @@ -45,6 +49,10 @@ func (_m *AccountInfo) GetAccount(address flow.Address) (*flow.Account, error) { func (_m *AccountInfo) GetAccountAvailableBalance(runtimeAddress common.Address) (uint64, error) { ret := _m.Called(runtimeAddress) + if len(ret) == 0 { + panic("no return value specified for GetAccountAvailableBalance") + } + var r0 uint64 var r1 error if rf, ok := ret.Get(0).(func(common.Address) (uint64, error)); ok { @@ -69,6 +77,10 @@ func (_m *AccountInfo) GetAccountAvailableBalance(runtimeAddress common.Address) func (_m *AccountInfo) GetAccountBalance(runtimeAddress common.Address) (uint64, error) { ret := _m.Called(runtimeAddress) + if len(ret) == 0 { + panic("no return value specified for GetAccountBalance") + } + var r0 uint64 var r1 error if rf, ok := ret.Get(0).(func(common.Address) (uint64, error)); ok { @@ -89,10 +101,74 @@ func (_m *AccountInfo) GetAccountBalance(runtimeAddress common.Address) (uint64, return r0, r1 } +// GetAccountKeyByIndex provides a mock function with given fields: address, index +func (_m *AccountInfo) GetAccountKeyByIndex(address flow.Address, index uint32) (*flow.AccountPublicKey, error) { + ret := _m.Called(address, index) + + if len(ret) == 0 { + panic("no return value specified for GetAccountKeyByIndex") + } + + var r0 *flow.AccountPublicKey + var r1 error + if rf, ok := ret.Get(0).(func(flow.Address, uint32) (*flow.AccountPublicKey, error)); ok { + return rf(address, index) + } + if rf, ok := ret.Get(0).(func(flow.Address, uint32) *flow.AccountPublicKey); ok { + r0 = rf(address, index) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*flow.AccountPublicKey) + } + } + + if rf, ok := ret.Get(1).(func(flow.Address, uint32) error); ok { + r1 = rf(address, index) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetAccountKeys provides a mock function with given fields: address +func (_m *AccountInfo) GetAccountKeys(address flow.Address) ([]flow.AccountPublicKey, error) { + ret := _m.Called(address) + + if len(ret) == 0 { + panic("no return value specified for GetAccountKeys") + } + + var r0 []flow.AccountPublicKey + var r1 error + if rf, ok := ret.Get(0).(func(flow.Address) ([]flow.AccountPublicKey, error)); ok { + return rf(address) + } + if rf, ok := ret.Get(0).(func(flow.Address) []flow.AccountPublicKey); ok { + r0 = rf(address) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]flow.AccountPublicKey) + } + } + + if rf, ok := ret.Get(1).(func(flow.Address) error); ok { + r1 = rf(address) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + // GetStorageCapacity provides a mock function with given fields: runtimeAddress func (_m *AccountInfo) GetStorageCapacity(runtimeAddress common.Address) (uint64, error) { ret := _m.Called(runtimeAddress) + if len(ret) == 0 { + panic("no return value specified for GetStorageCapacity") + } + var r0 uint64 var r1 error if rf, ok := ret.Get(0).(func(common.Address) (uint64, error)); ok { @@ -113,23 +189,27 @@ func (_m *AccountInfo) GetStorageCapacity(runtimeAddress common.Address) (uint64 return r0, r1 } -// GetStorageUsed provides a mock function with given fields: runtimeaddress -func (_m *AccountInfo) GetStorageUsed(runtimeaddress common.Address) (uint64, error) { - ret := _m.Called(runtimeaddress) +// GetStorageUsed provides a mock function with given fields: runtimeAddress +func (_m *AccountInfo) GetStorageUsed(runtimeAddress common.Address) (uint64, error) { + ret := _m.Called(runtimeAddress) + + if len(ret) == 0 { + panic("no return value specified for GetStorageUsed") + } var r0 uint64 var r1 error if rf, ok := ret.Get(0).(func(common.Address) (uint64, error)); ok { - return rf(runtimeaddress) + return rf(runtimeAddress) } if rf, ok := ret.Get(0).(func(common.Address) uint64); ok { - r0 = rf(runtimeaddress) + r0 = rf(runtimeAddress) } else { r0 = ret.Get(0).(uint64) } if rf, ok := ret.Get(1).(func(common.Address) error); ok { - r1 = rf(runtimeaddress) + r1 = rf(runtimeAddress) } else { r1 = ret.Error(1) } @@ -137,13 +217,12 @@ func (_m *AccountInfo) GetStorageUsed(runtimeaddress common.Address) (uint64, er return r0, r1 } -type mockConstructorTestingTNewAccountInfo interface { +// NewAccountInfo creates a new instance of AccountInfo. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewAccountInfo(t interface { mock.TestingT Cleanup(func()) -} - -// NewAccountInfo creates a new instance of AccountInfo. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewAccountInfo(t mockConstructorTestingTNewAccountInfo) *AccountInfo { +}) *AccountInfo { mock := &AccountInfo{} mock.Mock.Test(t) diff --git a/fvm/environment/mock/account_key_reader.go b/fvm/environment/mock/account_key_reader.go index 64cd803bcf1..c577304bb55 100644 --- a/fvm/environment/mock/account_key_reader.go +++ b/fvm/environment/mock/account_key_reader.go @@ -1,13 +1,13 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mock import ( - common "github.com/onflow/cadence/runtime/common" + common "github.com/onflow/cadence/common" mock "github.com/stretchr/testify/mock" - stdlib "github.com/onflow/cadence/runtime/stdlib" + runtime "github.com/onflow/cadence/runtime" ) // AccountKeyReader is an autogenerated mock type for the AccountKeyReader type @@ -16,18 +16,22 @@ type AccountKeyReader struct { } // AccountKeysCount provides a mock function with given fields: runtimeAddress -func (_m *AccountKeyReader) AccountKeysCount(runtimeAddress common.Address) (uint64, error) { +func (_m *AccountKeyReader) AccountKeysCount(runtimeAddress common.Address) (uint32, error) { ret := _m.Called(runtimeAddress) - var r0 uint64 + if len(ret) == 0 { + panic("no return value specified for AccountKeysCount") + } + + var r0 uint32 var r1 error - if rf, ok := ret.Get(0).(func(common.Address) (uint64, error)); ok { + if rf, ok := ret.Get(0).(func(common.Address) (uint32, error)); ok { return rf(runtimeAddress) } - if rf, ok := ret.Get(0).(func(common.Address) uint64); ok { + if rf, ok := ret.Get(0).(func(common.Address) uint32); ok { r0 = rf(runtimeAddress) } else { - r0 = ret.Get(0).(uint64) + r0 = ret.Get(0).(uint32) } if rf, ok := ret.Get(1).(func(common.Address) error); ok { @@ -40,23 +44,27 @@ func (_m *AccountKeyReader) AccountKeysCount(runtimeAddress common.Address) (uin } // GetAccountKey provides a mock function with given fields: runtimeAddress, keyIndex -func (_m *AccountKeyReader) GetAccountKey(runtimeAddress common.Address, keyIndex int) (*stdlib.AccountKey, error) { +func (_m *AccountKeyReader) GetAccountKey(runtimeAddress common.Address, keyIndex uint32) (*runtime.AccountKey, error) { ret := _m.Called(runtimeAddress, keyIndex) - var r0 *stdlib.AccountKey + if len(ret) == 0 { + panic("no return value specified for GetAccountKey") + } + + var r0 *runtime.AccountKey var r1 error - if rf, ok := ret.Get(0).(func(common.Address, int) (*stdlib.AccountKey, error)); ok { + if rf, ok := ret.Get(0).(func(common.Address, uint32) (*runtime.AccountKey, error)); ok { return rf(runtimeAddress, keyIndex) } - if rf, ok := ret.Get(0).(func(common.Address, int) *stdlib.AccountKey); ok { + if rf, ok := ret.Get(0).(func(common.Address, uint32) *runtime.AccountKey); ok { r0 = rf(runtimeAddress, keyIndex) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(*stdlib.AccountKey) + r0 = ret.Get(0).(*runtime.AccountKey) } } - if rf, ok := ret.Get(1).(func(common.Address, int) error); ok { + if rf, ok := ret.Get(1).(func(common.Address, uint32) error); ok { r1 = rf(runtimeAddress, keyIndex) } else { r1 = ret.Error(1) @@ -65,13 +73,12 @@ func (_m *AccountKeyReader) GetAccountKey(runtimeAddress common.Address, keyInde return r0, r1 } -type mockConstructorTestingTNewAccountKeyReader interface { +// NewAccountKeyReader creates a new instance of AccountKeyReader. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewAccountKeyReader(t interface { mock.TestingT Cleanup(func()) -} - -// NewAccountKeyReader creates a new instance of AccountKeyReader. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewAccountKeyReader(t mockConstructorTestingTNewAccountKeyReader) *AccountKeyReader { +}) *AccountKeyReader { mock := &AccountKeyReader{} mock.Mock.Test(t) diff --git a/fvm/environment/mock/account_key_updater.go b/fvm/environment/mock/account_key_updater.go index e495cf79a89..45e77f77dde 100644 --- a/fvm/environment/mock/account_key_updater.go +++ b/fvm/environment/mock/account_key_updater.go @@ -1,15 +1,13 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mock import ( - common "github.com/onflow/cadence/runtime/common" + common "github.com/onflow/cadence/common" mock "github.com/stretchr/testify/mock" - sema "github.com/onflow/cadence/runtime/sema" - - stdlib "github.com/onflow/cadence/runtime/stdlib" + runtime "github.com/onflow/cadence/runtime" ) // AccountKeyUpdater is an autogenerated mock type for the AccountKeyUpdater type @@ -18,23 +16,27 @@ type AccountKeyUpdater struct { } // AddAccountKey provides a mock function with given fields: runtimeAddress, publicKey, hashAlgo, weight -func (_m *AccountKeyUpdater) AddAccountKey(runtimeAddress common.Address, publicKey *stdlib.PublicKey, hashAlgo sema.HashAlgorithm, weight int) (*stdlib.AccountKey, error) { +func (_m *AccountKeyUpdater) AddAccountKey(runtimeAddress common.Address, publicKey *runtime.PublicKey, hashAlgo runtime.HashAlgorithm, weight int) (*runtime.AccountKey, error) { ret := _m.Called(runtimeAddress, publicKey, hashAlgo, weight) - var r0 *stdlib.AccountKey + if len(ret) == 0 { + panic("no return value specified for AddAccountKey") + } + + var r0 *runtime.AccountKey var r1 error - if rf, ok := ret.Get(0).(func(common.Address, *stdlib.PublicKey, sema.HashAlgorithm, int) (*stdlib.AccountKey, error)); ok { + if rf, ok := ret.Get(0).(func(common.Address, *runtime.PublicKey, runtime.HashAlgorithm, int) (*runtime.AccountKey, error)); ok { return rf(runtimeAddress, publicKey, hashAlgo, weight) } - if rf, ok := ret.Get(0).(func(common.Address, *stdlib.PublicKey, sema.HashAlgorithm, int) *stdlib.AccountKey); ok { + if rf, ok := ret.Get(0).(func(common.Address, *runtime.PublicKey, runtime.HashAlgorithm, int) *runtime.AccountKey); ok { r0 = rf(runtimeAddress, publicKey, hashAlgo, weight) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(*stdlib.AccountKey) + r0 = ret.Get(0).(*runtime.AccountKey) } } - if rf, ok := ret.Get(1).(func(common.Address, *stdlib.PublicKey, sema.HashAlgorithm, int) error); ok { + if rf, ok := ret.Get(1).(func(common.Address, *runtime.PublicKey, runtime.HashAlgorithm, int) error); ok { r1 = rf(runtimeAddress, publicKey, hashAlgo, weight) } else { r1 = ret.Error(1) @@ -43,38 +45,28 @@ func (_m *AccountKeyUpdater) AddAccountKey(runtimeAddress common.Address, public return r0, r1 } -// AddEncodedAccountKey provides a mock function with given fields: runtimeAddress, publicKey -func (_m *AccountKeyUpdater) AddEncodedAccountKey(runtimeAddress common.Address, publicKey []byte) error { - ret := _m.Called(runtimeAddress, publicKey) - - var r0 error - if rf, ok := ret.Get(0).(func(common.Address, []byte) error); ok { - r0 = rf(runtimeAddress, publicKey) - } else { - r0 = ret.Error(0) - } - - return r0 -} - // RevokeAccountKey provides a mock function with given fields: runtimeAddress, keyIndex -func (_m *AccountKeyUpdater) RevokeAccountKey(runtimeAddress common.Address, keyIndex int) (*stdlib.AccountKey, error) { +func (_m *AccountKeyUpdater) RevokeAccountKey(runtimeAddress common.Address, keyIndex uint32) (*runtime.AccountKey, error) { ret := _m.Called(runtimeAddress, keyIndex) - var r0 *stdlib.AccountKey + if len(ret) == 0 { + panic("no return value specified for RevokeAccountKey") + } + + var r0 *runtime.AccountKey var r1 error - if rf, ok := ret.Get(0).(func(common.Address, int) (*stdlib.AccountKey, error)); ok { + if rf, ok := ret.Get(0).(func(common.Address, uint32) (*runtime.AccountKey, error)); ok { return rf(runtimeAddress, keyIndex) } - if rf, ok := ret.Get(0).(func(common.Address, int) *stdlib.AccountKey); ok { + if rf, ok := ret.Get(0).(func(common.Address, uint32) *runtime.AccountKey); ok { r0 = rf(runtimeAddress, keyIndex) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(*stdlib.AccountKey) + r0 = ret.Get(0).(*runtime.AccountKey) } } - if rf, ok := ret.Get(1).(func(common.Address, int) error); ok { + if rf, ok := ret.Get(1).(func(common.Address, uint32) error); ok { r1 = rf(runtimeAddress, keyIndex) } else { r1 = ret.Error(1) @@ -83,39 +75,12 @@ func (_m *AccountKeyUpdater) RevokeAccountKey(runtimeAddress common.Address, key return r0, r1 } -// RevokeEncodedAccountKey provides a mock function with given fields: runtimeAddress, index -func (_m *AccountKeyUpdater) RevokeEncodedAccountKey(runtimeAddress common.Address, index int) ([]byte, error) { - ret := _m.Called(runtimeAddress, index) - - var r0 []byte - var r1 error - if rf, ok := ret.Get(0).(func(common.Address, int) ([]byte, error)); ok { - return rf(runtimeAddress, index) - } - if rf, ok := ret.Get(0).(func(common.Address, int) []byte); ok { - r0 = rf(runtimeAddress, index) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).([]byte) - } - } - - if rf, ok := ret.Get(1).(func(common.Address, int) error); ok { - r1 = rf(runtimeAddress, index) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -type mockConstructorTestingTNewAccountKeyUpdater interface { +// NewAccountKeyUpdater creates a new instance of AccountKeyUpdater. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewAccountKeyUpdater(t interface { mock.TestingT Cleanup(func()) -} - -// NewAccountKeyUpdater creates a new instance of AccountKeyUpdater. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewAccountKeyUpdater(t mockConstructorTestingTNewAccountKeyUpdater) *AccountKeyUpdater { +}) *AccountKeyUpdater { mock := &AccountKeyUpdater{} mock.Mock.Test(t) diff --git a/fvm/environment/mock/account_local_id_generator.go b/fvm/environment/mock/account_local_id_generator.go new file mode 100644 index 00000000000..269d411a316 --- /dev/null +++ b/fvm/environment/mock/account_local_id_generator.go @@ -0,0 +1,56 @@ +// Code generated by mockery. DO NOT EDIT. + +package mock + +import ( + common "github.com/onflow/cadence/common" + + mock "github.com/stretchr/testify/mock" +) + +// AccountLocalIDGenerator is an autogenerated mock type for the AccountLocalIDGenerator type +type AccountLocalIDGenerator struct { + mock.Mock +} + +// GenerateAccountID provides a mock function with given fields: address +func (_m *AccountLocalIDGenerator) GenerateAccountID(address common.Address) (uint64, error) { + ret := _m.Called(address) + + if len(ret) == 0 { + panic("no return value specified for GenerateAccountID") + } + + var r0 uint64 + var r1 error + if rf, ok := ret.Get(0).(func(common.Address) (uint64, error)); ok { + return rf(address) + } + if rf, ok := ret.Get(0).(func(common.Address) uint64); ok { + r0 = rf(address) + } else { + r0 = ret.Get(0).(uint64) + } + + if rf, ok := ret.Get(1).(func(common.Address) error); ok { + r1 = rf(address) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// NewAccountLocalIDGenerator creates a new instance of AccountLocalIDGenerator. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewAccountLocalIDGenerator(t interface { + mock.TestingT + Cleanup(func()) +}) *AccountLocalIDGenerator { + mock := &AccountLocalIDGenerator{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/fvm/environment/mock/accounts.go b/fvm/environment/mock/accounts.go index 13a8dd34876..7113bb6b275 100644 --- a/fvm/environment/mock/accounts.go +++ b/fvm/environment/mock/accounts.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mock @@ -15,20 +15,24 @@ type Accounts struct { mock.Mock } -// AllocateStorageIndex provides a mock function with given fields: address -func (_m *Accounts) AllocateStorageIndex(address flow.Address) (atree.StorageIndex, error) { +// AllocateSlabIndex provides a mock function with given fields: address +func (_m *Accounts) AllocateSlabIndex(address flow.Address) (atree.SlabIndex, error) { ret := _m.Called(address) - var r0 atree.StorageIndex + if len(ret) == 0 { + panic("no return value specified for AllocateSlabIndex") + } + + var r0 atree.SlabIndex var r1 error - if rf, ok := ret.Get(0).(func(flow.Address) (atree.StorageIndex, error)); ok { + if rf, ok := ret.Get(0).(func(flow.Address) (atree.SlabIndex, error)); ok { return rf(address) } - if rf, ok := ret.Get(0).(func(flow.Address) atree.StorageIndex); ok { + if rf, ok := ret.Get(0).(func(flow.Address) atree.SlabIndex); ok { r0 = rf(address) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(atree.StorageIndex) + r0 = ret.Get(0).(atree.SlabIndex) } } @@ -41,10 +45,14 @@ func (_m *Accounts) AllocateStorageIndex(address flow.Address) (atree.StorageInd return r0, r1 } -// AppendPublicKey provides a mock function with given fields: address, key -func (_m *Accounts) AppendPublicKey(address flow.Address, key flow.AccountPublicKey) error { +// AppendAccountPublicKey provides a mock function with given fields: address, key +func (_m *Accounts) AppendAccountPublicKey(address flow.Address, key flow.AccountPublicKey) error { ret := _m.Called(address, key) + if len(ret) == 0 { + panic("no return value specified for AppendAccountPublicKey") + } + var r0 error if rf, ok := ret.Get(0).(func(flow.Address, flow.AccountPublicKey) error); ok { r0 = rf(address, key) @@ -59,6 +67,10 @@ func (_m *Accounts) AppendPublicKey(address flow.Address, key flow.AccountPublic func (_m *Accounts) ContractExists(contractName string, address flow.Address) (bool, error) { ret := _m.Called(contractName, address) + if len(ret) == 0 { + panic("no return value specified for ContractExists") + } + var r0 bool var r1 error if rf, ok := ret.Get(0).(func(string, flow.Address) (bool, error)); ok { @@ -83,6 +95,10 @@ func (_m *Accounts) ContractExists(contractName string, address flow.Address) (b func (_m *Accounts) Create(publicKeys []flow.AccountPublicKey, newAddress flow.Address) error { ret := _m.Called(publicKeys, newAddress) + if len(ret) == 0 { + panic("no return value specified for Create") + } + var r0 error if rf, ok := ret.Get(0).(func([]flow.AccountPublicKey, flow.Address) error); ok { r0 = rf(publicKeys, newAddress) @@ -97,6 +113,10 @@ func (_m *Accounts) Create(publicKeys []flow.AccountPublicKey, newAddress flow.A func (_m *Accounts) DeleteContract(contractName string, address flow.Address) error { ret := _m.Called(contractName, address) + if len(ret) == 0 { + panic("no return value specified for DeleteContract") + } + var r0 error if rf, ok := ret.Get(0).(func(string, flow.Address) error); ok { r0 = rf(contractName, address) @@ -111,6 +131,10 @@ func (_m *Accounts) DeleteContract(contractName string, address flow.Address) er func (_m *Accounts) Exists(address flow.Address) (bool, error) { ret := _m.Called(address) + if len(ret) == 0 { + panic("no return value specified for Exists") + } + var r0 bool var r1 error if rf, ok := ret.Get(0).(func(flow.Address) (bool, error)); ok { @@ -131,10 +155,42 @@ func (_m *Accounts) Exists(address flow.Address) (bool, error) { return r0, r1 } +// GenerateAccountLocalID provides a mock function with given fields: address +func (_m *Accounts) GenerateAccountLocalID(address flow.Address) (uint64, error) { + ret := _m.Called(address) + + if len(ret) == 0 { + panic("no return value specified for GenerateAccountLocalID") + } + + var r0 uint64 + var r1 error + if rf, ok := ret.Get(0).(func(flow.Address) (uint64, error)); ok { + return rf(address) + } + if rf, ok := ret.Get(0).(func(flow.Address) uint64); ok { + r0 = rf(address) + } else { + r0 = ret.Get(0).(uint64) + } + + if rf, ok := ret.Get(1).(func(flow.Address) error); ok { + r1 = rf(address) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + // Get provides a mock function with given fields: address func (_m *Accounts) Get(address flow.Address) (*flow.Account, error) { ret := _m.Called(address) + if len(ret) == 0 { + panic("no return value specified for Get") + } + var r0 *flow.Account var r1 error if rf, ok := ret.Get(0).(func(flow.Address) (*flow.Account, error)); ok { @@ -157,10 +213,156 @@ func (_m *Accounts) Get(address flow.Address) (*flow.Account, error) { return r0, r1 } +// GetAccountPublicKey provides a mock function with given fields: address, keyIndex +func (_m *Accounts) GetAccountPublicKey(address flow.Address, keyIndex uint32) (flow.AccountPublicKey, error) { + ret := _m.Called(address, keyIndex) + + if len(ret) == 0 { + panic("no return value specified for GetAccountPublicKey") + } + + var r0 flow.AccountPublicKey + var r1 error + if rf, ok := ret.Get(0).(func(flow.Address, uint32) (flow.AccountPublicKey, error)); ok { + return rf(address, keyIndex) + } + if rf, ok := ret.Get(0).(func(flow.Address, uint32) flow.AccountPublicKey); ok { + r0 = rf(address, keyIndex) + } else { + r0 = ret.Get(0).(flow.AccountPublicKey) + } + + if rf, ok := ret.Get(1).(func(flow.Address, uint32) error); ok { + r1 = rf(address, keyIndex) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetAccountPublicKeyCount provides a mock function with given fields: address +func (_m *Accounts) GetAccountPublicKeyCount(address flow.Address) (uint32, error) { + ret := _m.Called(address) + + if len(ret) == 0 { + panic("no return value specified for GetAccountPublicKeyCount") + } + + var r0 uint32 + var r1 error + if rf, ok := ret.Get(0).(func(flow.Address) (uint32, error)); ok { + return rf(address) + } + if rf, ok := ret.Get(0).(func(flow.Address) uint32); ok { + r0 = rf(address) + } else { + r0 = ret.Get(0).(uint32) + } + + if rf, ok := ret.Get(1).(func(flow.Address) error); ok { + r1 = rf(address) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetAccountPublicKeyRevokedStatus provides a mock function with given fields: address, keyIndex +func (_m *Accounts) GetAccountPublicKeyRevokedStatus(address flow.Address, keyIndex uint32) (bool, error) { + ret := _m.Called(address, keyIndex) + + if len(ret) == 0 { + panic("no return value specified for GetAccountPublicKeyRevokedStatus") + } + + var r0 bool + var r1 error + if rf, ok := ret.Get(0).(func(flow.Address, uint32) (bool, error)); ok { + return rf(address, keyIndex) + } + if rf, ok := ret.Get(0).(func(flow.Address, uint32) bool); ok { + r0 = rf(address, keyIndex) + } else { + r0 = ret.Get(0).(bool) + } + + if rf, ok := ret.Get(1).(func(flow.Address, uint32) error); ok { + r1 = rf(address, keyIndex) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetAccountPublicKeySequenceNumber provides a mock function with given fields: address, keyIndex +func (_m *Accounts) GetAccountPublicKeySequenceNumber(address flow.Address, keyIndex uint32) (uint64, error) { + ret := _m.Called(address, keyIndex) + + if len(ret) == 0 { + panic("no return value specified for GetAccountPublicKeySequenceNumber") + } + + var r0 uint64 + var r1 error + if rf, ok := ret.Get(0).(func(flow.Address, uint32) (uint64, error)); ok { + return rf(address, keyIndex) + } + if rf, ok := ret.Get(0).(func(flow.Address, uint32) uint64); ok { + r0 = rf(address, keyIndex) + } else { + r0 = ret.Get(0).(uint64) + } + + if rf, ok := ret.Get(1).(func(flow.Address, uint32) error); ok { + r1 = rf(address, keyIndex) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetAccountPublicKeys provides a mock function with given fields: address +func (_m *Accounts) GetAccountPublicKeys(address flow.Address) ([]flow.AccountPublicKey, error) { + ret := _m.Called(address) + + if len(ret) == 0 { + panic("no return value specified for GetAccountPublicKeys") + } + + var r0 []flow.AccountPublicKey + var r1 error + if rf, ok := ret.Get(0).(func(flow.Address) ([]flow.AccountPublicKey, error)); ok { + return rf(address) + } + if rf, ok := ret.Get(0).(func(flow.Address) []flow.AccountPublicKey); ok { + r0 = rf(address) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]flow.AccountPublicKey) + } + } + + if rf, ok := ret.Get(1).(func(flow.Address) error); ok { + r1 = rf(address) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + // GetContract provides a mock function with given fields: contractName, address func (_m *Accounts) GetContract(contractName string, address flow.Address) ([]byte, error) { ret := _m.Called(contractName, address) + if len(ret) == 0 { + panic("no return value specified for GetContract") + } + var r0 []byte var r1 error if rf, ok := ret.Get(0).(func(string, flow.Address) ([]byte, error)); ok { @@ -187,6 +389,10 @@ func (_m *Accounts) GetContract(contractName string, address flow.Address) ([]by func (_m *Accounts) GetContractNames(address flow.Address) ([]string, error) { ret := _m.Called(address) + if len(ret) == 0 { + panic("no return value specified for GetContractNames") + } + var r0 []string var r1 error if rf, ok := ret.Get(0).(func(flow.Address) ([]string, error)); ok { @@ -209,22 +415,26 @@ func (_m *Accounts) GetContractNames(address flow.Address) ([]string, error) { return r0, r1 } -// GetPublicKey provides a mock function with given fields: address, keyIndex -func (_m *Accounts) GetPublicKey(address flow.Address, keyIndex uint64) (flow.AccountPublicKey, error) { +// GetRuntimeAccountPublicKey provides a mock function with given fields: address, keyIndex +func (_m *Accounts) GetRuntimeAccountPublicKey(address flow.Address, keyIndex uint32) (flow.RuntimeAccountPublicKey, error) { ret := _m.Called(address, keyIndex) - var r0 flow.AccountPublicKey + if len(ret) == 0 { + panic("no return value specified for GetRuntimeAccountPublicKey") + } + + var r0 flow.RuntimeAccountPublicKey var r1 error - if rf, ok := ret.Get(0).(func(flow.Address, uint64) (flow.AccountPublicKey, error)); ok { + if rf, ok := ret.Get(0).(func(flow.Address, uint32) (flow.RuntimeAccountPublicKey, error)); ok { return rf(address, keyIndex) } - if rf, ok := ret.Get(0).(func(flow.Address, uint64) flow.AccountPublicKey); ok { + if rf, ok := ret.Get(0).(func(flow.Address, uint32) flow.RuntimeAccountPublicKey); ok { r0 = rf(address, keyIndex) } else { - r0 = ret.Get(0).(flow.AccountPublicKey) + r0 = ret.Get(0).(flow.RuntimeAccountPublicKey) } - if rf, ok := ret.Get(1).(func(flow.Address, uint64) error); ok { + if rf, ok := ret.Get(1).(func(flow.Address, uint32) error); ok { r1 = rf(address, keyIndex) } else { r1 = ret.Error(1) @@ -233,34 +443,14 @@ func (_m *Accounts) GetPublicKey(address flow.Address, keyIndex uint64) (flow.Ac return r0, r1 } -// GetPublicKeyCount provides a mock function with given fields: address -func (_m *Accounts) GetPublicKeyCount(address flow.Address) (uint64, error) { - ret := _m.Called(address) - - var r0 uint64 - var r1 error - if rf, ok := ret.Get(0).(func(flow.Address) (uint64, error)); ok { - return rf(address) - } - if rf, ok := ret.Get(0).(func(flow.Address) uint64); ok { - r0 = rf(address) - } else { - r0 = ret.Get(0).(uint64) - } - - if rf, ok := ret.Get(1).(func(flow.Address) error); ok { - r1 = rf(address) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - // GetStorageUsed provides a mock function with given fields: address func (_m *Accounts) GetStorageUsed(address flow.Address) (uint64, error) { ret := _m.Called(address) + if len(ret) == 0 { + panic("no return value specified for GetStorageUsed") + } + var r0 uint64 var r1 error if rf, ok := ret.Get(0).(func(flow.Address) (uint64, error)); ok { @@ -282,19 +472,23 @@ func (_m *Accounts) GetStorageUsed(address flow.Address) (uint64, error) { } // GetValue provides a mock function with given fields: id -func (_m *Accounts) GetValue(id flow.RegisterID) ([]byte, error) { +func (_m *Accounts) GetValue(id flow.RegisterID) (flow.RegisterValue, error) { ret := _m.Called(id) - var r0 []byte + if len(ret) == 0 { + panic("no return value specified for GetValue") + } + + var r0 flow.RegisterValue var r1 error - if rf, ok := ret.Get(0).(func(flow.RegisterID) ([]byte, error)); ok { + if rf, ok := ret.Get(0).(func(flow.RegisterID) (flow.RegisterValue, error)); ok { return rf(id) } - if rf, ok := ret.Get(0).(func(flow.RegisterID) []byte); ok { + if rf, ok := ret.Get(0).(func(flow.RegisterID) flow.RegisterValue); ok { r0 = rf(id) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).([]byte) + r0 = ret.Get(0).(flow.RegisterValue) } } @@ -307,13 +501,17 @@ func (_m *Accounts) GetValue(id flow.RegisterID) ([]byte, error) { return r0, r1 } -// SetContract provides a mock function with given fields: contractName, address, contract -func (_m *Accounts) SetContract(contractName string, address flow.Address, contract []byte) error { - ret := _m.Called(contractName, address, contract) +// IncrementAccountPublicKeySequenceNumber provides a mock function with given fields: address, keyIndex +func (_m *Accounts) IncrementAccountPublicKeySequenceNumber(address flow.Address, keyIndex uint32) error { + ret := _m.Called(address, keyIndex) + + if len(ret) == 0 { + panic("no return value specified for IncrementAccountPublicKeySequenceNumber") + } var r0 error - if rf, ok := ret.Get(0).(func(string, flow.Address, []byte) error); ok { - r0 = rf(contractName, address, contract) + if rf, ok := ret.Get(0).(func(flow.Address, uint32) error); ok { + r0 = rf(address, keyIndex) } else { r0 = ret.Error(0) } @@ -321,38 +519,52 @@ func (_m *Accounts) SetContract(contractName string, address flow.Address, contr return r0 } -// SetPublicKey provides a mock function with given fields: address, keyIndex, publicKey -func (_m *Accounts) SetPublicKey(address flow.Address, keyIndex uint64, publicKey flow.AccountPublicKey) ([]byte, error) { - ret := _m.Called(address, keyIndex, publicKey) +// RevokeAccountPublicKey provides a mock function with given fields: address, keyIndex +func (_m *Accounts) RevokeAccountPublicKey(address flow.Address, keyIndex uint32) error { + ret := _m.Called(address, keyIndex) - var r0 []byte - var r1 error - if rf, ok := ret.Get(0).(func(flow.Address, uint64, flow.AccountPublicKey) ([]byte, error)); ok { - return rf(address, keyIndex, publicKey) + if len(ret) == 0 { + panic("no return value specified for RevokeAccountPublicKey") } - if rf, ok := ret.Get(0).(func(flow.Address, uint64, flow.AccountPublicKey) []byte); ok { - r0 = rf(address, keyIndex, publicKey) + + var r0 error + if rf, ok := ret.Get(0).(func(flow.Address, uint32) error); ok { + r0 = rf(address, keyIndex) } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).([]byte) - } + r0 = ret.Error(0) + } + + return r0 +} + +// SetContract provides a mock function with given fields: contractName, address, contract +func (_m *Accounts) SetContract(contractName string, address flow.Address, contract []byte) error { + ret := _m.Called(contractName, address, contract) + + if len(ret) == 0 { + panic("no return value specified for SetContract") } - if rf, ok := ret.Get(1).(func(flow.Address, uint64, flow.AccountPublicKey) error); ok { - r1 = rf(address, keyIndex, publicKey) + var r0 error + if rf, ok := ret.Get(0).(func(string, flow.Address, []byte) error); ok { + r0 = rf(contractName, address, contract) } else { - r1 = ret.Error(1) + r0 = ret.Error(0) } - return r0, r1 + return r0 } // SetValue provides a mock function with given fields: id, value -func (_m *Accounts) SetValue(id flow.RegisterID, value []byte) error { +func (_m *Accounts) SetValue(id flow.RegisterID, value flow.RegisterValue) error { ret := _m.Called(id, value) + if len(ret) == 0 { + panic("no return value specified for SetValue") + } + var r0 error - if rf, ok := ret.Get(0).(func(flow.RegisterID, []byte) error); ok { + if rf, ok := ret.Get(0).(func(flow.RegisterID, flow.RegisterValue) error); ok { r0 = rf(id, value) } else { r0 = ret.Error(0) @@ -361,13 +573,12 @@ func (_m *Accounts) SetValue(id flow.RegisterID, value []byte) error { return r0 } -type mockConstructorTestingTNewAccounts interface { +// NewAccounts creates a new instance of Accounts. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewAccounts(t interface { mock.TestingT Cleanup(func()) -} - -// NewAccounts creates a new instance of Accounts. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewAccounts(t mockConstructorTestingTNewAccounts) *Accounts { +}) *Accounts { mock := &Accounts{} mock.Mock.Test(t) diff --git a/fvm/environment/mock/address_generator.go b/fvm/environment/mock/address_generator.go index 26f5e1158ac..f952d9bfa71 100644 --- a/fvm/environment/mock/address_generator.go +++ b/fvm/environment/mock/address_generator.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mock @@ -12,10 +12,14 @@ type AddressGenerator struct { mock.Mock } -// AddressCount provides a mock function with given fields: +// AddressCount provides a mock function with no fields func (_m *AddressGenerator) AddressCount() uint64 { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for AddressCount") + } + var r0 uint64 if rf, ok := ret.Get(0).(func() uint64); ok { r0 = rf() @@ -26,10 +30,14 @@ func (_m *AddressGenerator) AddressCount() uint64 { return r0 } -// Bytes provides a mock function with given fields: +// Bytes provides a mock function with no fields func (_m *AddressGenerator) Bytes() []byte { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Bytes") + } + var r0 []byte if rf, ok := ret.Get(0).(func() []byte); ok { r0 = rf() @@ -42,10 +50,14 @@ func (_m *AddressGenerator) Bytes() []byte { return r0 } -// CurrentAddress provides a mock function with given fields: +// CurrentAddress provides a mock function with no fields func (_m *AddressGenerator) CurrentAddress() flow.Address { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for CurrentAddress") + } + var r0 flow.Address if rf, ok := ret.Get(0).(func() flow.Address); ok { r0 = rf() @@ -58,10 +70,14 @@ func (_m *AddressGenerator) CurrentAddress() flow.Address { return r0 } -// NextAddress provides a mock function with given fields: +// NextAddress provides a mock function with no fields func (_m *AddressGenerator) NextAddress() (flow.Address, error) { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for NextAddress") + } + var r0 flow.Address var r1 error if rf, ok := ret.Get(0).(func() (flow.Address, error)); ok { @@ -84,13 +100,12 @@ func (_m *AddressGenerator) NextAddress() (flow.Address, error) { return r0, r1 } -type mockConstructorTestingTNewAddressGenerator interface { +// NewAddressGenerator creates a new instance of AddressGenerator. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewAddressGenerator(t interface { mock.TestingT Cleanup(func()) -} - -// NewAddressGenerator creates a new instance of AddressGenerator. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewAddressGenerator(t mockConstructorTestingTNewAddressGenerator) *AddressGenerator { +}) *AddressGenerator { mock := &AddressGenerator{} mock.Mock.Test(t) diff --git a/fvm/environment/mock/block_info.go b/fvm/environment/mock/block_info.go index 27e19e3206e..bbcc1caeb7c 100644 --- a/fvm/environment/mock/block_info.go +++ b/fvm/environment/mock/block_info.go @@ -1,9 +1,9 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mock import ( - stdlib "github.com/onflow/cadence/runtime/stdlib" + runtime "github.com/onflow/cadence/runtime" mock "github.com/stretchr/testify/mock" ) @@ -13,19 +13,23 @@ type BlockInfo struct { } // GetBlockAtHeight provides a mock function with given fields: height -func (_m *BlockInfo) GetBlockAtHeight(height uint64) (stdlib.Block, bool, error) { +func (_m *BlockInfo) GetBlockAtHeight(height uint64) (runtime.Block, bool, error) { ret := _m.Called(height) - var r0 stdlib.Block + if len(ret) == 0 { + panic("no return value specified for GetBlockAtHeight") + } + + var r0 runtime.Block var r1 bool var r2 error - if rf, ok := ret.Get(0).(func(uint64) (stdlib.Block, bool, error)); ok { + if rf, ok := ret.Get(0).(func(uint64) (runtime.Block, bool, error)); ok { return rf(height) } - if rf, ok := ret.Get(0).(func(uint64) stdlib.Block); ok { + if rf, ok := ret.Get(0).(func(uint64) runtime.Block); ok { r0 = rf(height) } else { - r0 = ret.Get(0).(stdlib.Block) + r0 = ret.Get(0).(runtime.Block) } if rf, ok := ret.Get(1).(func(uint64) bool); ok { @@ -43,10 +47,14 @@ func (_m *BlockInfo) GetBlockAtHeight(height uint64) (stdlib.Block, bool, error) return r0, r1, r2 } -// GetCurrentBlockHeight provides a mock function with given fields: +// GetCurrentBlockHeight provides a mock function with no fields func (_m *BlockInfo) GetCurrentBlockHeight() (uint64, error) { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for GetCurrentBlockHeight") + } + var r0 uint64 var r1 error if rf, ok := ret.Get(0).(func() (uint64, error)); ok { @@ -67,13 +75,12 @@ func (_m *BlockInfo) GetCurrentBlockHeight() (uint64, error) { return r0, r1 } -type mockConstructorTestingTNewBlockInfo interface { +// NewBlockInfo creates a new instance of BlockInfo. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewBlockInfo(t interface { mock.TestingT Cleanup(func()) -} - -// NewBlockInfo creates a new instance of BlockInfo. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewBlockInfo(t mockConstructorTestingTNewBlockInfo) *BlockInfo { +}) *BlockInfo { mock := &BlockInfo{} mock.Mock.Test(t) diff --git a/fvm/environment/mock/blocks.go b/fvm/environment/mock/blocks.go index 51d1305c8a5..22f67e90dea 100644 --- a/fvm/environment/mock/blocks.go +++ b/fvm/environment/mock/blocks.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mock @@ -16,6 +16,10 @@ type Blocks struct { func (_m *Blocks) ByHeightFrom(height uint64, header *flow.Header) (*flow.Header, error) { ret := _m.Called(height, header) + if len(ret) == 0 { + panic("no return value specified for ByHeightFrom") + } + var r0 *flow.Header var r1 error if rf, ok := ret.Get(0).(func(uint64, *flow.Header) (*flow.Header, error)); ok { @@ -38,13 +42,12 @@ func (_m *Blocks) ByHeightFrom(height uint64, header *flow.Header) (*flow.Header return r0, r1 } -type mockConstructorTestingTNewBlocks interface { +// NewBlocks creates a new instance of Blocks. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewBlocks(t interface { mock.TestingT Cleanup(func()) -} - -// NewBlocks creates a new instance of Blocks. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewBlocks(t mockConstructorTestingTNewBlocks) *Blocks { +}) *Blocks { mock := &Blocks{} mock.Mock.Test(t) diff --git a/fvm/environment/mock/bootstrap_account_creator.go b/fvm/environment/mock/bootstrap_account_creator.go index 3fb8a316a18..a6da607bc35 100644 --- a/fvm/environment/mock/bootstrap_account_creator.go +++ b/fvm/environment/mock/bootstrap_account_creator.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mock @@ -16,6 +16,10 @@ type BootstrapAccountCreator struct { func (_m *BootstrapAccountCreator) CreateBootstrapAccount(publicKeys []flow.AccountPublicKey) (flow.Address, error) { ret := _m.Called(publicKeys) + if len(ret) == 0 { + panic("no return value specified for CreateBootstrapAccount") + } + var r0 flow.Address var r1 error if rf, ok := ret.Get(0).(func([]flow.AccountPublicKey) (flow.Address, error)); ok { @@ -38,13 +42,12 @@ func (_m *BootstrapAccountCreator) CreateBootstrapAccount(publicKeys []flow.Acco return r0, r1 } -type mockConstructorTestingTNewBootstrapAccountCreator interface { +// NewBootstrapAccountCreator creates a new instance of BootstrapAccountCreator. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewBootstrapAccountCreator(t interface { mock.TestingT Cleanup(func()) -} - -// NewBootstrapAccountCreator creates a new instance of BootstrapAccountCreator. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewBootstrapAccountCreator(t mockConstructorTestingTNewBootstrapAccountCreator) *BootstrapAccountCreator { +}) *BootstrapAccountCreator { mock := &BootstrapAccountCreator{} mock.Mock.Test(t) diff --git a/fvm/environment/mock/contract_function_invoker.go b/fvm/environment/mock/contract_function_invoker.go new file mode 100644 index 00000000000..357f29f3bda --- /dev/null +++ b/fvm/environment/mock/contract_function_invoker.go @@ -0,0 +1,58 @@ +// Code generated by mockery. DO NOT EDIT. + +package mock + +import ( + cadence "github.com/onflow/cadence" + environment "github.com/onflow/flow-go/fvm/environment" + mock "github.com/stretchr/testify/mock" +) + +// ContractFunctionInvoker is an autogenerated mock type for the ContractFunctionInvoker type +type ContractFunctionInvoker struct { + mock.Mock +} + +// Invoke provides a mock function with given fields: spec, arguments +func (_m *ContractFunctionInvoker) Invoke(spec environment.ContractFunctionSpec, arguments []cadence.Value) (cadence.Value, error) { + ret := _m.Called(spec, arguments) + + if len(ret) == 0 { + panic("no return value specified for Invoke") + } + + var r0 cadence.Value + var r1 error + if rf, ok := ret.Get(0).(func(environment.ContractFunctionSpec, []cadence.Value) (cadence.Value, error)); ok { + return rf(spec, arguments) + } + if rf, ok := ret.Get(0).(func(environment.ContractFunctionSpec, []cadence.Value) cadence.Value); ok { + r0 = rf(spec, arguments) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(cadence.Value) + } + } + + if rf, ok := ret.Get(1).(func(environment.ContractFunctionSpec, []cadence.Value) error); ok { + r1 = rf(spec, arguments) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// NewContractFunctionInvoker creates a new instance of ContractFunctionInvoker. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewContractFunctionInvoker(t interface { + mock.TestingT + Cleanup(func()) +}) *ContractFunctionInvoker { + mock := &ContractFunctionInvoker{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/fvm/environment/mock/contract_updater.go b/fvm/environment/mock/contract_updater.go index fa5fba0bb5c..99a545f3f7e 100644 --- a/fvm/environment/mock/contract_updater.go +++ b/fvm/environment/mock/contract_updater.go @@ -1,9 +1,9 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mock import ( - common "github.com/onflow/cadence/runtime/common" + common "github.com/onflow/cadence/common" environment "github.com/onflow/flow-go/fvm/environment" mock "github.com/stretchr/testify/mock" @@ -14,10 +14,14 @@ type ContractUpdater struct { mock.Mock } -// Commit provides a mock function with given fields: +// Commit provides a mock function with no fields func (_m *ContractUpdater) Commit() (environment.ContractUpdates, error) { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Commit") + } + var r0 environment.ContractUpdates var r1 error if rf, ok := ret.Get(0).(func() (environment.ContractUpdates, error)); ok { @@ -42,6 +46,10 @@ func (_m *ContractUpdater) Commit() (environment.ContractUpdates, error) { func (_m *ContractUpdater) RemoveAccountContractCode(location common.AddressLocation) error { ret := _m.Called(location) + if len(ret) == 0 { + panic("no return value specified for RemoveAccountContractCode") + } + var r0 error if rf, ok := ret.Get(0).(func(common.AddressLocation) error); ok { r0 = rf(location) @@ -52,7 +60,7 @@ func (_m *ContractUpdater) RemoveAccountContractCode(location common.AddressLoca return r0 } -// Reset provides a mock function with given fields: +// Reset provides a mock function with no fields func (_m *ContractUpdater) Reset() { _m.Called() } @@ -61,6 +69,10 @@ func (_m *ContractUpdater) Reset() { func (_m *ContractUpdater) UpdateAccountContractCode(location common.AddressLocation, code []byte) error { ret := _m.Called(location, code) + if len(ret) == 0 { + panic("no return value specified for UpdateAccountContractCode") + } + var r0 error if rf, ok := ret.Get(0).(func(common.AddressLocation, []byte) error); ok { r0 = rf(location, code) @@ -71,13 +83,12 @@ func (_m *ContractUpdater) UpdateAccountContractCode(location common.AddressLoca return r0 } -type mockConstructorTestingTNewContractUpdater interface { +// NewContractUpdater creates a new instance of ContractUpdater. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewContractUpdater(t interface { mock.TestingT Cleanup(func()) -} - -// NewContractUpdater creates a new instance of ContractUpdater. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewContractUpdater(t mockConstructorTestingTNewContractUpdater) *ContractUpdater { +}) *ContractUpdater { mock := &ContractUpdater{} mock.Mock.Test(t) diff --git a/fvm/environment/mock/contract_updater_stubs.go b/fvm/environment/mock/contract_updater_stubs.go index a7edad7ee14..5579a9b1e92 100644 --- a/fvm/environment/mock/contract_updater_stubs.go +++ b/fvm/environment/mock/contract_updater_stubs.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mock @@ -19,6 +19,10 @@ type ContractUpdaterStubs struct { func (_m *ContractUpdaterStubs) GetAuthorizedAccounts(path cadence.Path) []flow.Address { ret := _m.Called(path) + if len(ret) == 0 { + panic("no return value specified for GetAuthorizedAccounts") + } + var r0 []flow.Address if rf, ok := ret.Get(0).(func(cadence.Path) []flow.Address); ok { r0 = rf(path) @@ -31,10 +35,14 @@ func (_m *ContractUpdaterStubs) GetAuthorizedAccounts(path cadence.Path) []flow. return r0 } -// RestrictedDeploymentEnabled provides a mock function with given fields: +// RestrictedDeploymentEnabled provides a mock function with no fields func (_m *ContractUpdaterStubs) RestrictedDeploymentEnabled() bool { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for RestrictedDeploymentEnabled") + } + var r0 bool if rf, ok := ret.Get(0).(func() bool); ok { r0 = rf() @@ -45,10 +53,14 @@ func (_m *ContractUpdaterStubs) RestrictedDeploymentEnabled() bool { return r0 } -// RestrictedRemovalEnabled provides a mock function with given fields: +// RestrictedRemovalEnabled provides a mock function with no fields func (_m *ContractUpdaterStubs) RestrictedRemovalEnabled() bool { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for RestrictedRemovalEnabled") + } + var r0 bool if rf, ok := ret.Get(0).(func() bool); ok { r0 = rf() @@ -59,13 +71,12 @@ func (_m *ContractUpdaterStubs) RestrictedRemovalEnabled() bool { return r0 } -type mockConstructorTestingTNewContractUpdaterStubs interface { +// NewContractUpdaterStubs creates a new instance of ContractUpdaterStubs. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewContractUpdaterStubs(t interface { mock.TestingT Cleanup(func()) -} - -// NewContractUpdaterStubs creates a new instance of ContractUpdaterStubs. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewContractUpdaterStubs(t mockConstructorTestingTNewContractUpdaterStubs) *ContractUpdaterStubs { +}) *ContractUpdaterStubs { mock := &ContractUpdaterStubs{} mock.Mock.Test(t) diff --git a/fvm/environment/mock/crypto_library.go b/fvm/environment/mock/crypto_library.go index 32f794a4800..d0d5d91e039 100644 --- a/fvm/environment/mock/crypto_library.go +++ b/fvm/environment/mock/crypto_library.go @@ -1,12 +1,10 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mock import ( - sema "github.com/onflow/cadence/runtime/sema" + runtime "github.com/onflow/cadence/runtime" mock "github.com/stretchr/testify/mock" - - stdlib "github.com/onflow/cadence/runtime/stdlib" ) // CryptoLibrary is an autogenerated mock type for the CryptoLibrary type @@ -15,23 +13,27 @@ type CryptoLibrary struct { } // BLSAggregatePublicKeys provides a mock function with given fields: keys -func (_m *CryptoLibrary) BLSAggregatePublicKeys(keys []*stdlib.PublicKey) (*stdlib.PublicKey, error) { +func (_m *CryptoLibrary) BLSAggregatePublicKeys(keys []*runtime.PublicKey) (*runtime.PublicKey, error) { ret := _m.Called(keys) - var r0 *stdlib.PublicKey + if len(ret) == 0 { + panic("no return value specified for BLSAggregatePublicKeys") + } + + var r0 *runtime.PublicKey var r1 error - if rf, ok := ret.Get(0).(func([]*stdlib.PublicKey) (*stdlib.PublicKey, error)); ok { + if rf, ok := ret.Get(0).(func([]*runtime.PublicKey) (*runtime.PublicKey, error)); ok { return rf(keys) } - if rf, ok := ret.Get(0).(func([]*stdlib.PublicKey) *stdlib.PublicKey); ok { + if rf, ok := ret.Get(0).(func([]*runtime.PublicKey) *runtime.PublicKey); ok { r0 = rf(keys) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(*stdlib.PublicKey) + r0 = ret.Get(0).(*runtime.PublicKey) } } - if rf, ok := ret.Get(1).(func([]*stdlib.PublicKey) error); ok { + if rf, ok := ret.Get(1).(func([]*runtime.PublicKey) error); ok { r1 = rf(keys) } else { r1 = ret.Error(1) @@ -44,6 +46,10 @@ func (_m *CryptoLibrary) BLSAggregatePublicKeys(keys []*stdlib.PublicKey) (*stdl func (_m *CryptoLibrary) BLSAggregateSignatures(sigs [][]byte) ([]byte, error) { ret := _m.Called(sigs) + if len(ret) == 0 { + panic("no return value specified for BLSAggregateSignatures") + } + var r0 []byte var r1 error if rf, ok := ret.Get(0).(func([][]byte) ([]byte, error)); ok { @@ -67,21 +73,25 @@ func (_m *CryptoLibrary) BLSAggregateSignatures(sigs [][]byte) ([]byte, error) { } // BLSVerifyPOP provides a mock function with given fields: pk, sig -func (_m *CryptoLibrary) BLSVerifyPOP(pk *stdlib.PublicKey, sig []byte) (bool, error) { +func (_m *CryptoLibrary) BLSVerifyPOP(pk *runtime.PublicKey, sig []byte) (bool, error) { ret := _m.Called(pk, sig) + if len(ret) == 0 { + panic("no return value specified for BLSVerifyPOP") + } + var r0 bool var r1 error - if rf, ok := ret.Get(0).(func(*stdlib.PublicKey, []byte) (bool, error)); ok { + if rf, ok := ret.Get(0).(func(*runtime.PublicKey, []byte) (bool, error)); ok { return rf(pk, sig) } - if rf, ok := ret.Get(0).(func(*stdlib.PublicKey, []byte) bool); ok { + if rf, ok := ret.Get(0).(func(*runtime.PublicKey, []byte) bool); ok { r0 = rf(pk, sig) } else { r0 = ret.Get(0).(bool) } - if rf, ok := ret.Get(1).(func(*stdlib.PublicKey, []byte) error); ok { + if rf, ok := ret.Get(1).(func(*runtime.PublicKey, []byte) error); ok { r1 = rf(pk, sig) } else { r1 = ret.Error(1) @@ -91,15 +101,19 @@ func (_m *CryptoLibrary) BLSVerifyPOP(pk *stdlib.PublicKey, sig []byte) (bool, e } // Hash provides a mock function with given fields: data, tag, hashAlgorithm -func (_m *CryptoLibrary) Hash(data []byte, tag string, hashAlgorithm sema.HashAlgorithm) ([]byte, error) { +func (_m *CryptoLibrary) Hash(data []byte, tag string, hashAlgorithm runtime.HashAlgorithm) ([]byte, error) { ret := _m.Called(data, tag, hashAlgorithm) + if len(ret) == 0 { + panic("no return value specified for Hash") + } + var r0 []byte var r1 error - if rf, ok := ret.Get(0).(func([]byte, string, sema.HashAlgorithm) ([]byte, error)); ok { + if rf, ok := ret.Get(0).(func([]byte, string, runtime.HashAlgorithm) ([]byte, error)); ok { return rf(data, tag, hashAlgorithm) } - if rf, ok := ret.Get(0).(func([]byte, string, sema.HashAlgorithm) []byte); ok { + if rf, ok := ret.Get(0).(func([]byte, string, runtime.HashAlgorithm) []byte); ok { r0 = rf(data, tag, hashAlgorithm) } else { if ret.Get(0) != nil { @@ -107,7 +121,7 @@ func (_m *CryptoLibrary) Hash(data []byte, tag string, hashAlgorithm sema.HashAl } } - if rf, ok := ret.Get(1).(func([]byte, string, sema.HashAlgorithm) error); ok { + if rf, ok := ret.Get(1).(func([]byte, string, runtime.HashAlgorithm) error); ok { r1 = rf(data, tag, hashAlgorithm) } else { r1 = ret.Error(1) @@ -117,11 +131,15 @@ func (_m *CryptoLibrary) Hash(data []byte, tag string, hashAlgorithm sema.HashAl } // ValidatePublicKey provides a mock function with given fields: pk -func (_m *CryptoLibrary) ValidatePublicKey(pk *stdlib.PublicKey) error { +func (_m *CryptoLibrary) ValidatePublicKey(pk *runtime.PublicKey) error { ret := _m.Called(pk) + if len(ret) == 0 { + panic("no return value specified for ValidatePublicKey") + } + var r0 error - if rf, ok := ret.Get(0).(func(*stdlib.PublicKey) error); ok { + if rf, ok := ret.Get(0).(func(*runtime.PublicKey) error); ok { r0 = rf(pk) } else { r0 = ret.Error(0) @@ -131,21 +149,25 @@ func (_m *CryptoLibrary) ValidatePublicKey(pk *stdlib.PublicKey) error { } // VerifySignature provides a mock function with given fields: signature, tag, signedData, publicKey, signatureAlgorithm, hashAlgorithm -func (_m *CryptoLibrary) VerifySignature(signature []byte, tag string, signedData []byte, publicKey []byte, signatureAlgorithm sema.SignatureAlgorithm, hashAlgorithm sema.HashAlgorithm) (bool, error) { +func (_m *CryptoLibrary) VerifySignature(signature []byte, tag string, signedData []byte, publicKey []byte, signatureAlgorithm runtime.SignatureAlgorithm, hashAlgorithm runtime.HashAlgorithm) (bool, error) { ret := _m.Called(signature, tag, signedData, publicKey, signatureAlgorithm, hashAlgorithm) + if len(ret) == 0 { + panic("no return value specified for VerifySignature") + } + var r0 bool var r1 error - if rf, ok := ret.Get(0).(func([]byte, string, []byte, []byte, sema.SignatureAlgorithm, sema.HashAlgorithm) (bool, error)); ok { + if rf, ok := ret.Get(0).(func([]byte, string, []byte, []byte, runtime.SignatureAlgorithm, runtime.HashAlgorithm) (bool, error)); ok { return rf(signature, tag, signedData, publicKey, signatureAlgorithm, hashAlgorithm) } - if rf, ok := ret.Get(0).(func([]byte, string, []byte, []byte, sema.SignatureAlgorithm, sema.HashAlgorithm) bool); ok { + if rf, ok := ret.Get(0).(func([]byte, string, []byte, []byte, runtime.SignatureAlgorithm, runtime.HashAlgorithm) bool); ok { r0 = rf(signature, tag, signedData, publicKey, signatureAlgorithm, hashAlgorithm) } else { r0 = ret.Get(0).(bool) } - if rf, ok := ret.Get(1).(func([]byte, string, []byte, []byte, sema.SignatureAlgorithm, sema.HashAlgorithm) error); ok { + if rf, ok := ret.Get(1).(func([]byte, string, []byte, []byte, runtime.SignatureAlgorithm, runtime.HashAlgorithm) error); ok { r1 = rf(signature, tag, signedData, publicKey, signatureAlgorithm, hashAlgorithm) } else { r1 = ret.Error(1) @@ -154,13 +176,12 @@ func (_m *CryptoLibrary) VerifySignature(signature []byte, tag string, signedDat return r0, r1 } -type mockConstructorTestingTNewCryptoLibrary interface { +// NewCryptoLibrary creates a new instance of CryptoLibrary. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewCryptoLibrary(t interface { mock.TestingT Cleanup(func()) -} - -// NewCryptoLibrary creates a new instance of CryptoLibrary. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewCryptoLibrary(t mockConstructorTestingTNewCryptoLibrary) *CryptoLibrary { +}) *CryptoLibrary { mock := &CryptoLibrary{} mock.Mock.Test(t) diff --git a/fvm/environment/mock/entropy_provider.go b/fvm/environment/mock/entropy_provider.go new file mode 100644 index 00000000000..75c94d26177 --- /dev/null +++ b/fvm/environment/mock/entropy_provider.go @@ -0,0 +1,54 @@ +// Code generated by mockery. DO NOT EDIT. + +package mock + +import mock "github.com/stretchr/testify/mock" + +// EntropyProvider is an autogenerated mock type for the EntropyProvider type +type EntropyProvider struct { + mock.Mock +} + +// RandomSource provides a mock function with no fields +func (_m *EntropyProvider) RandomSource() ([]byte, error) { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for RandomSource") + } + + var r0 []byte + var r1 error + if rf, ok := ret.Get(0).(func() ([]byte, error)); ok { + return rf() + } + if rf, ok := ret.Get(0).(func() []byte); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]byte) + } + } + + if rf, ok := ret.Get(1).(func() error); ok { + r1 = rf() + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// NewEntropyProvider creates a new instance of EntropyProvider. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewEntropyProvider(t interface { + mock.TestingT + Cleanup(func()) +}) *EntropyProvider { + mock := &EntropyProvider{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/fvm/environment/mock/environment.go b/fvm/environment/mock/environment.go index 11ee326c3f5..ddffc2c3ca5 100644 --- a/fvm/environment/mock/environment.go +++ b/fvm/environment/mock/environment.go @@ -1,22 +1,24 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mock import ( atree "github.com/onflow/atree" - ast "github.com/onflow/cadence/runtime/ast" + ast "github.com/onflow/cadence/ast" attribute "go.opentelemetry.io/otel/attribute" cadence "github.com/onflow/cadence" - common "github.com/onflow/cadence/runtime/common" + common "github.com/onflow/cadence/common" environment "github.com/onflow/flow-go/fvm/environment" flow "github.com/onflow/flow-go/model/flow" - interpreter "github.com/onflow/cadence/runtime/interpreter" + fvmruntime "github.com/onflow/flow-go/fvm/runtime" + + interpreter "github.com/onflow/cadence/interpreter" meter "github.com/onflow/flow-go/fvm/meter" @@ -24,11 +26,9 @@ import ( oteltrace "go.opentelemetry.io/otel/trace" - runtime "github.com/onflow/flow-go/fvm/runtime" - - sema "github.com/onflow/cadence/runtime/sema" + runtime "github.com/onflow/cadence/runtime" - stdlib "github.com/onflow/cadence/runtime/stdlib" + sema "github.com/onflow/cadence/sema" time "time" @@ -45,21 +45,25 @@ type Environment struct { } // AccountKeysCount provides a mock function with given fields: address -func (_m *Environment) AccountKeysCount(address common.Address) (uint64, error) { +func (_m *Environment) AccountKeysCount(address runtime.Address) (uint32, error) { ret := _m.Called(address) - var r0 uint64 + if len(ret) == 0 { + panic("no return value specified for AccountKeysCount") + } + + var r0 uint32 var r1 error - if rf, ok := ret.Get(0).(func(common.Address) (uint64, error)); ok { + if rf, ok := ret.Get(0).(func(runtime.Address) (uint32, error)); ok { return rf(address) } - if rf, ok := ret.Get(0).(func(common.Address) uint64); ok { + if rf, ok := ret.Get(0).(func(runtime.Address) uint32); ok { r0 = rf(address) } else { - r0 = ret.Get(0).(uint64) + r0 = ret.Get(0).(uint32) } - if rf, ok := ret.Get(1).(func(common.Address) error); ok { + if rf, ok := ret.Get(1).(func(runtime.Address) error); ok { r1 = rf(address) } else { r1 = ret.Error(1) @@ -72,6 +76,10 @@ func (_m *Environment) AccountKeysCount(address common.Address) (uint64, error) func (_m *Environment) AccountsStorageCapacity(addresses []flow.Address, payer flow.Address, maxTxFees uint64) (cadence.Value, error) { ret := _m.Called(addresses, payer, maxTxFees) + if len(ret) == 0 { + panic("no return value specified for AccountsStorageCapacity") + } + var r0 cadence.Value var r1 error if rf, ok := ret.Get(0).(func([]flow.Address, flow.Address, uint64) (cadence.Value, error)); ok { @@ -95,23 +103,27 @@ func (_m *Environment) AccountsStorageCapacity(addresses []flow.Address, payer f } // AddAccountKey provides a mock function with given fields: address, publicKey, hashAlgo, weight -func (_m *Environment) AddAccountKey(address common.Address, publicKey *stdlib.PublicKey, hashAlgo sema.HashAlgorithm, weight int) (*stdlib.AccountKey, error) { +func (_m *Environment) AddAccountKey(address runtime.Address, publicKey *runtime.PublicKey, hashAlgo runtime.HashAlgorithm, weight int) (*runtime.AccountKey, error) { ret := _m.Called(address, publicKey, hashAlgo, weight) - var r0 *stdlib.AccountKey + if len(ret) == 0 { + panic("no return value specified for AddAccountKey") + } + + var r0 *runtime.AccountKey var r1 error - if rf, ok := ret.Get(0).(func(common.Address, *stdlib.PublicKey, sema.HashAlgorithm, int) (*stdlib.AccountKey, error)); ok { + if rf, ok := ret.Get(0).(func(runtime.Address, *runtime.PublicKey, runtime.HashAlgorithm, int) (*runtime.AccountKey, error)); ok { return rf(address, publicKey, hashAlgo, weight) } - if rf, ok := ret.Get(0).(func(common.Address, *stdlib.PublicKey, sema.HashAlgorithm, int) *stdlib.AccountKey); ok { + if rf, ok := ret.Get(0).(func(runtime.Address, *runtime.PublicKey, runtime.HashAlgorithm, int) *runtime.AccountKey); ok { r0 = rf(address, publicKey, hashAlgo, weight) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(*stdlib.AccountKey) + r0 = ret.Get(0).(*runtime.AccountKey) } } - if rf, ok := ret.Get(1).(func(common.Address, *stdlib.PublicKey, sema.HashAlgorithm, int) error); ok { + if rf, ok := ret.Get(1).(func(runtime.Address, *runtime.PublicKey, runtime.HashAlgorithm, int) error); ok { r1 = rf(address, publicKey, hashAlgo, weight) } else { r1 = ret.Error(1) @@ -120,34 +132,24 @@ func (_m *Environment) AddAccountKey(address common.Address, publicKey *stdlib.P return r0, r1 } -// AddEncodedAccountKey provides a mock function with given fields: address, publicKey -func (_m *Environment) AddEncodedAccountKey(address common.Address, publicKey []byte) error { - ret := _m.Called(address, publicKey) +// AllocateSlabIndex provides a mock function with given fields: owner +func (_m *Environment) AllocateSlabIndex(owner []byte) (atree.SlabIndex, error) { + ret := _m.Called(owner) - var r0 error - if rf, ok := ret.Get(0).(func(common.Address, []byte) error); ok { - r0 = rf(address, publicKey) - } else { - r0 = ret.Error(0) + if len(ret) == 0 { + panic("no return value specified for AllocateSlabIndex") } - return r0 -} - -// AllocateStorageIndex provides a mock function with given fields: owner -func (_m *Environment) AllocateStorageIndex(owner []byte) (atree.StorageIndex, error) { - ret := _m.Called(owner) - - var r0 atree.StorageIndex + var r0 atree.SlabIndex var r1 error - if rf, ok := ret.Get(0).(func([]byte) (atree.StorageIndex, error)); ok { + if rf, ok := ret.Get(0).(func([]byte) (atree.SlabIndex, error)); ok { return rf(owner) } - if rf, ok := ret.Get(0).(func([]byte) atree.StorageIndex); ok { + if rf, ok := ret.Get(0).(func([]byte) atree.SlabIndex); ok { r0 = rf(owner) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(atree.StorageIndex) + r0 = ret.Get(0).(atree.SlabIndex) } } @@ -161,23 +163,27 @@ func (_m *Environment) AllocateStorageIndex(owner []byte) (atree.StorageIndex, e } // BLSAggregatePublicKeys provides a mock function with given fields: publicKeys -func (_m *Environment) BLSAggregatePublicKeys(publicKeys []*stdlib.PublicKey) (*stdlib.PublicKey, error) { +func (_m *Environment) BLSAggregatePublicKeys(publicKeys []*runtime.PublicKey) (*runtime.PublicKey, error) { ret := _m.Called(publicKeys) - var r0 *stdlib.PublicKey + if len(ret) == 0 { + panic("no return value specified for BLSAggregatePublicKeys") + } + + var r0 *runtime.PublicKey var r1 error - if rf, ok := ret.Get(0).(func([]*stdlib.PublicKey) (*stdlib.PublicKey, error)); ok { + if rf, ok := ret.Get(0).(func([]*runtime.PublicKey) (*runtime.PublicKey, error)); ok { return rf(publicKeys) } - if rf, ok := ret.Get(0).(func([]*stdlib.PublicKey) *stdlib.PublicKey); ok { + if rf, ok := ret.Get(0).(func([]*runtime.PublicKey) *runtime.PublicKey); ok { r0 = rf(publicKeys) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(*stdlib.PublicKey) + r0 = ret.Get(0).(*runtime.PublicKey) } } - if rf, ok := ret.Get(1).(func([]*stdlib.PublicKey) error); ok { + if rf, ok := ret.Get(1).(func([]*runtime.PublicKey) error); ok { r1 = rf(publicKeys) } else { r1 = ret.Error(1) @@ -190,6 +196,10 @@ func (_m *Environment) BLSAggregatePublicKeys(publicKeys []*stdlib.PublicKey) (* func (_m *Environment) BLSAggregateSignatures(signatures [][]byte) ([]byte, error) { ret := _m.Called(signatures) + if len(ret) == 0 { + panic("no return value specified for BLSAggregateSignatures") + } + var r0 []byte var r1 error if rf, ok := ret.Get(0).(func([][]byte) ([]byte, error)); ok { @@ -213,21 +223,25 @@ func (_m *Environment) BLSAggregateSignatures(signatures [][]byte) ([]byte, erro } // BLSVerifyPOP provides a mock function with given fields: publicKey, signature -func (_m *Environment) BLSVerifyPOP(publicKey *stdlib.PublicKey, signature []byte) (bool, error) { +func (_m *Environment) BLSVerifyPOP(publicKey *runtime.PublicKey, signature []byte) (bool, error) { ret := _m.Called(publicKey, signature) + if len(ret) == 0 { + panic("no return value specified for BLSVerifyPOP") + } + var r0 bool var r1 error - if rf, ok := ret.Get(0).(func(*stdlib.PublicKey, []byte) (bool, error)); ok { + if rf, ok := ret.Get(0).(func(*runtime.PublicKey, []byte) (bool, error)); ok { return rf(publicKey, signature) } - if rf, ok := ret.Get(0).(func(*stdlib.PublicKey, []byte) bool); ok { + if rf, ok := ret.Get(0).(func(*runtime.PublicKey, []byte) bool); ok { r0 = rf(publicKey, signature) } else { r0 = ret.Get(0).(bool) } - if rf, ok := ret.Get(1).(func(*stdlib.PublicKey, []byte) error); ok { + if rf, ok := ret.Get(1).(func(*runtime.PublicKey, []byte) error); ok { r1 = rf(publicKey, signature) } else { r1 = ret.Error(1) @@ -236,16 +250,20 @@ func (_m *Environment) BLSVerifyPOP(publicKey *stdlib.PublicKey, signature []byt return r0, r1 } -// BorrowCadenceRuntime provides a mock function with given fields: -func (_m *Environment) BorrowCadenceRuntime() *runtime.ReusableCadenceRuntime { +// BorrowCadenceRuntime provides a mock function with no fields +func (_m *Environment) BorrowCadenceRuntime() *fvmruntime.ReusableCadenceRuntime { ret := _m.Called() - var r0 *runtime.ReusableCadenceRuntime - if rf, ok := ret.Get(0).(func() *runtime.ReusableCadenceRuntime); ok { + if len(ret) == 0 { + panic("no return value specified for BorrowCadenceRuntime") + } + + var r0 *fvmruntime.ReusableCadenceRuntime + if rf, ok := ret.Get(0).(func() *fvmruntime.ReusableCadenceRuntime); ok { r0 = rf() } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(*runtime.ReusableCadenceRuntime) + r0 = ret.Get(0).(*fvmruntime.ReusableCadenceRuntime) } } @@ -256,6 +274,10 @@ func (_m *Environment) BorrowCadenceRuntime() *runtime.ReusableCadenceRuntime { func (_m *Environment) CheckPayerBalanceAndGetMaxTxFees(payer flow.Address, inclusionEffort uint64, executionEffort uint64) (cadence.Value, error) { ret := _m.Called(payer, inclusionEffort, executionEffort) + if len(ret) == 0 { + panic("no return value specified for CheckPayerBalanceAndGetMaxTxFees") + } + var r0 cadence.Value var r1 error if rf, ok := ret.Get(0).(func(flow.Address, uint64, uint64) (cadence.Value, error)); ok { @@ -278,10 +300,32 @@ func (_m *Environment) CheckPayerBalanceAndGetMaxTxFees(payer flow.Address, incl return r0, r1 } -// ComputationIntensities provides a mock function with given fields: +// ComputationAvailable provides a mock function with given fields: _a0 +func (_m *Environment) ComputationAvailable(_a0 common.ComputationUsage) bool { + ret := _m.Called(_a0) + + if len(ret) == 0 { + panic("no return value specified for ComputationAvailable") + } + + var r0 bool + if rf, ok := ret.Get(0).(func(common.ComputationUsage) bool); ok { + r0 = rf(_a0) + } else { + r0 = ret.Get(0).(bool) + } + + return r0 +} + +// ComputationIntensities provides a mock function with no fields func (_m *Environment) ComputationIntensities() meter.MeteredComputationIntensities { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for ComputationIntensities") + } + var r0 meter.MeteredComputationIntensities if rf, ok := ret.Get(0).(func() meter.MeteredComputationIntensities); ok { r0 = rf() @@ -294,10 +338,14 @@ func (_m *Environment) ComputationIntensities() meter.MeteredComputationIntensit return r0 } -// ComputationUsed provides a mock function with given fields: +// ComputationUsed provides a mock function with no fields func (_m *Environment) ComputationUsed() (uint64, error) { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for ComputationUsed") + } + var r0 uint64 var r1 error if rf, ok := ret.Get(0).(func() (uint64, error)); ok { @@ -318,10 +366,14 @@ func (_m *Environment) ComputationUsed() (uint64, error) { return r0, r1 } -// ConvertedServiceEvents provides a mock function with given fields: +// ConvertedServiceEvents provides a mock function with no fields func (_m *Environment) ConvertedServiceEvents() flow.ServiceEventList { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for ConvertedServiceEvents") + } + var r0 flow.ServiceEventList if rf, ok := ret.Get(0).(func() flow.ServiceEventList); ok { r0 = rf() @@ -335,23 +387,27 @@ func (_m *Environment) ConvertedServiceEvents() flow.ServiceEventList { } // CreateAccount provides a mock function with given fields: payer -func (_m *Environment) CreateAccount(payer common.Address) (common.Address, error) { +func (_m *Environment) CreateAccount(payer runtime.Address) (runtime.Address, error) { ret := _m.Called(payer) - var r0 common.Address + if len(ret) == 0 { + panic("no return value specified for CreateAccount") + } + + var r0 runtime.Address var r1 error - if rf, ok := ret.Get(0).(func(common.Address) (common.Address, error)); ok { + if rf, ok := ret.Get(0).(func(runtime.Address) (runtime.Address, error)); ok { return rf(payer) } - if rf, ok := ret.Get(0).(func(common.Address) common.Address); ok { + if rf, ok := ret.Get(0).(func(runtime.Address) runtime.Address); ok { r0 = rf(payer) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(common.Address) + r0 = ret.Get(0).(runtime.Address) } } - if rf, ok := ret.Get(1).(func(common.Address) error); ok { + if rf, ok := ret.Get(1).(func(runtime.Address) error); ok { r1 = rf(payer) } else { r1 = ret.Error(1) @@ -364,6 +420,10 @@ func (_m *Environment) CreateAccount(payer common.Address) (common.Address, erro func (_m *Environment) DecodeArgument(argument []byte, argumentType cadence.Type) (cadence.Value, error) { ret := _m.Called(argument, argumentType) + if len(ret) == 0 { + panic("no return value specified for DecodeArgument") + } + var r0 cadence.Value var r1 error if rf, ok := ret.Get(0).(func([]byte, cadence.Type) (cadence.Value, error)); ok { @@ -390,6 +450,10 @@ func (_m *Environment) DecodeArgument(argument []byte, argumentType cadence.Type func (_m *Environment) DeductTransactionFees(payer flow.Address, inclusionEffort uint64, executionEffort uint64) (cadence.Value, error) { ret := _m.Called(payer, inclusionEffort, executionEffort) + if len(ret) == 0 { + panic("no return value specified for DeductTransactionFees") + } + var r0 cadence.Value var r1 error if rf, ok := ret.Get(0).(func(flow.Address, uint64, uint64) (cadence.Value, error)); ok { @@ -412,10 +476,24 @@ func (_m *Environment) DeductTransactionFees(payer flow.Address, inclusionEffort return r0, r1 } +// EVMBlockExecuted provides a mock function with given fields: txCount, totalGasUsed, totalSupplyInFlow +func (_m *Environment) EVMBlockExecuted(txCount int, totalGasUsed uint64, totalSupplyInFlow float64) { + _m.Called(txCount, totalGasUsed, totalSupplyInFlow) +} + +// EVMTransactionExecuted provides a mock function with given fields: gasUsed, isDirectCall, failed +func (_m *Environment) EVMTransactionExecuted(gasUsed uint64, isDirectCall bool, failed bool) { + _m.Called(gasUsed, isDirectCall, failed) +} + // EmitEvent provides a mock function with given fields: _a0 func (_m *Environment) EmitEvent(_a0 cadence.Event) error { ret := _m.Called(_a0) + if len(ret) == 0 { + panic("no return value specified for EmitEvent") + } + var r0 error if rf, ok := ret.Get(0).(func(cadence.Event) error); ok { r0 = rf(_a0) @@ -426,10 +504,14 @@ func (_m *Environment) EmitEvent(_a0 cadence.Event) error { return r0 } -// Events provides a mock function with given fields: +// Events provides a mock function with no fields func (_m *Environment) Events() flow.EventsList { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Events") + } + var r0 flow.EventsList if rf, ok := ret.Get(0).(func() flow.EventsList); ok { r0 = rf() @@ -442,10 +524,14 @@ func (_m *Environment) Events() flow.EventsList { return r0 } -// FlushPendingUpdates provides a mock function with given fields: +// FlushPendingUpdates provides a mock function with no fields func (_m *Environment) FlushPendingUpdates() (environment.ContractUpdates, error) { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for FlushPendingUpdates") + } + var r0 environment.ContractUpdates var r1 error if rf, ok := ret.Get(0).(func() (environment.ContractUpdates, error)); ok { @@ -466,10 +552,42 @@ func (_m *Environment) FlushPendingUpdates() (environment.ContractUpdates, error return r0, r1 } -// GenerateUUID provides a mock function with given fields: +// GenerateAccountID provides a mock function with given fields: address +func (_m *Environment) GenerateAccountID(address common.Address) (uint64, error) { + ret := _m.Called(address) + + if len(ret) == 0 { + panic("no return value specified for GenerateAccountID") + } + + var r0 uint64 + var r1 error + if rf, ok := ret.Get(0).(func(common.Address) (uint64, error)); ok { + return rf(address) + } + if rf, ok := ret.Get(0).(func(common.Address) uint64); ok { + r0 = rf(address) + } else { + r0 = ret.Get(0).(uint64) + } + + if rf, ok := ret.Get(1).(func(common.Address) error); ok { + r1 = rf(address) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GenerateUUID provides a mock function with no fields func (_m *Environment) GenerateUUID() (uint64, error) { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for GenerateUUID") + } + var r0 uint64 var r1 error if rf, ok := ret.Get(0).(func() (uint64, error)); ok { @@ -494,6 +612,10 @@ func (_m *Environment) GenerateUUID() (uint64, error) { func (_m *Environment) GetAccount(address flow.Address) (*flow.Account, error) { ret := _m.Called(address) + if len(ret) == 0 { + panic("no return value specified for GetAccount") + } + var r0 *flow.Account var r1 error if rf, ok := ret.Get(0).(func(flow.Address) (*flow.Account, error)); ok { @@ -520,6 +642,10 @@ func (_m *Environment) GetAccount(address flow.Address) (*flow.Account, error) { func (_m *Environment) GetAccountAvailableBalance(address common.Address) (uint64, error) { ret := _m.Called(address) + if len(ret) == 0 { + panic("no return value specified for GetAccountAvailableBalance") + } + var r0 uint64 var r1 error if rf, ok := ret.Get(0).(func(common.Address) (uint64, error)); ok { @@ -544,6 +670,10 @@ func (_m *Environment) GetAccountAvailableBalance(address common.Address) (uint6 func (_m *Environment) GetAccountBalance(address common.Address) (uint64, error) { ret := _m.Called(address) + if len(ret) == 0 { + panic("no return value specified for GetAccountBalance") + } + var r0 uint64 var r1 error if rf, ok := ret.Get(0).(func(common.Address) (uint64, error)); ok { @@ -568,6 +698,10 @@ func (_m *Environment) GetAccountBalance(address common.Address) (uint64, error) func (_m *Environment) GetAccountContractCode(location common.AddressLocation) ([]byte, error) { ret := _m.Called(location) + if len(ret) == 0 { + panic("no return value specified for GetAccountContractCode") + } + var r0 []byte var r1 error if rf, ok := ret.Get(0).(func(common.AddressLocation) ([]byte, error)); ok { @@ -591,15 +725,19 @@ func (_m *Environment) GetAccountContractCode(location common.AddressLocation) ( } // GetAccountContractNames provides a mock function with given fields: address -func (_m *Environment) GetAccountContractNames(address common.Address) ([]string, error) { +func (_m *Environment) GetAccountContractNames(address runtime.Address) ([]string, error) { ret := _m.Called(address) + if len(ret) == 0 { + panic("no return value specified for GetAccountContractNames") + } + var r0 []string var r1 error - if rf, ok := ret.Get(0).(func(common.Address) ([]string, error)); ok { + if rf, ok := ret.Get(0).(func(runtime.Address) ([]string, error)); ok { return rf(address) } - if rf, ok := ret.Get(0).(func(common.Address) []string); ok { + if rf, ok := ret.Get(0).(func(runtime.Address) []string); ok { r0 = rf(address) } else { if ret.Get(0) != nil { @@ -607,7 +745,7 @@ func (_m *Environment) GetAccountContractNames(address common.Address) ([]string } } - if rf, ok := ret.Get(1).(func(common.Address) error); ok { + if rf, ok := ret.Get(1).(func(runtime.Address) error); ok { r1 = rf(address) } else { r1 = ret.Error(1) @@ -617,23 +755,27 @@ func (_m *Environment) GetAccountContractNames(address common.Address) ([]string } // GetAccountKey provides a mock function with given fields: address, index -func (_m *Environment) GetAccountKey(address common.Address, index int) (*stdlib.AccountKey, error) { +func (_m *Environment) GetAccountKey(address runtime.Address, index uint32) (*runtime.AccountKey, error) { ret := _m.Called(address, index) - var r0 *stdlib.AccountKey + if len(ret) == 0 { + panic("no return value specified for GetAccountKey") + } + + var r0 *runtime.AccountKey var r1 error - if rf, ok := ret.Get(0).(func(common.Address, int) (*stdlib.AccountKey, error)); ok { + if rf, ok := ret.Get(0).(func(runtime.Address, uint32) (*runtime.AccountKey, error)); ok { return rf(address, index) } - if rf, ok := ret.Get(0).(func(common.Address, int) *stdlib.AccountKey); ok { + if rf, ok := ret.Get(0).(func(runtime.Address, uint32) *runtime.AccountKey); ok { r0 = rf(address, index) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(*stdlib.AccountKey) + r0 = ret.Get(0).(*runtime.AccountKey) } } - if rf, ok := ret.Get(1).(func(common.Address, int) error); ok { + if rf, ok := ret.Get(1).(func(runtime.Address, uint32) error); ok { r1 = rf(address, index) } else { r1 = ret.Error(1) @@ -642,20 +784,54 @@ func (_m *Environment) GetAccountKey(address common.Address, index int) (*stdlib return r0, r1 } +// GetAccountKeys provides a mock function with given fields: address +func (_m *Environment) GetAccountKeys(address flow.Address) ([]flow.AccountPublicKey, error) { + ret := _m.Called(address) + + if len(ret) == 0 { + panic("no return value specified for GetAccountKeys") + } + + var r0 []flow.AccountPublicKey + var r1 error + if rf, ok := ret.Get(0).(func(flow.Address) ([]flow.AccountPublicKey, error)); ok { + return rf(address) + } + if rf, ok := ret.Get(0).(func(flow.Address) []flow.AccountPublicKey); ok { + r0 = rf(address) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]flow.AccountPublicKey) + } + } + + if rf, ok := ret.Get(1).(func(flow.Address) error); ok { + r1 = rf(address) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + // GetBlockAtHeight provides a mock function with given fields: height -func (_m *Environment) GetBlockAtHeight(height uint64) (stdlib.Block, bool, error) { +func (_m *Environment) GetBlockAtHeight(height uint64) (runtime.Block, bool, error) { ret := _m.Called(height) - var r0 stdlib.Block + if len(ret) == 0 { + panic("no return value specified for GetBlockAtHeight") + } + + var r0 runtime.Block var r1 bool var r2 error - if rf, ok := ret.Get(0).(func(uint64) (stdlib.Block, bool, error)); ok { + if rf, ok := ret.Get(0).(func(uint64) (runtime.Block, bool, error)); ok { return rf(height) } - if rf, ok := ret.Get(0).(func(uint64) stdlib.Block); ok { + if rf, ok := ret.Get(0).(func(uint64) runtime.Block); ok { r0 = rf(height) } else { - r0 = ret.Get(0).(stdlib.Block) + r0 = ret.Get(0).(runtime.Block) } if rf, ok := ret.Get(1).(func(uint64) bool); ok { @@ -674,15 +850,19 @@ func (_m *Environment) GetBlockAtHeight(height uint64) (stdlib.Block, bool, erro } // GetCode provides a mock function with given fields: location -func (_m *Environment) GetCode(location common.Location) ([]byte, error) { +func (_m *Environment) GetCode(location runtime.Location) ([]byte, error) { ret := _m.Called(location) + if len(ret) == 0 { + panic("no return value specified for GetCode") + } + var r0 []byte var r1 error - if rf, ok := ret.Get(0).(func(common.Location) ([]byte, error)); ok { + if rf, ok := ret.Get(0).(func(runtime.Location) ([]byte, error)); ok { return rf(location) } - if rf, ok := ret.Get(0).(func(common.Location) []byte); ok { + if rf, ok := ret.Get(0).(func(runtime.Location) []byte); ok { r0 = rf(location) } else { if ret.Get(0) != nil { @@ -690,7 +870,7 @@ func (_m *Environment) GetCode(location common.Location) ([]byte, error) { } } - if rf, ok := ret.Get(1).(func(common.Location) error); ok { + if rf, ok := ret.Get(1).(func(runtime.Location) error); ok { r1 = rf(location) } else { r1 = ret.Error(1) @@ -699,10 +879,14 @@ func (_m *Environment) GetCode(location common.Location) ([]byte, error) { return r0, r1 } -// GetCurrentBlockHeight provides a mock function with given fields: +// GetCurrentBlockHeight provides a mock function with no fields func (_m *Environment) GetCurrentBlockHeight() (uint64, error) { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for GetCurrentBlockHeight") + } + var r0 uint64 var r1 error if rf, ok := ret.Get(0).(func() (uint64, error)); ok { @@ -723,40 +907,28 @@ func (_m *Environment) GetCurrentBlockHeight() (uint64, error) { return r0, r1 } -// GetInterpreterSharedState provides a mock function with given fields: -func (_m *Environment) GetInterpreterSharedState() *interpreter.SharedState { - ret := _m.Called() - - var r0 *interpreter.SharedState - if rf, ok := ret.Get(0).(func() *interpreter.SharedState); ok { - r0 = rf() - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*interpreter.SharedState) - } - } - - return r0 -} - // GetOrLoadProgram provides a mock function with given fields: location, load -func (_m *Environment) GetOrLoadProgram(location common.Location, load func() (*interpreter.Program, error)) (*interpreter.Program, error) { +func (_m *Environment) GetOrLoadProgram(location runtime.Location, load func() (*runtime.Program, error)) (*runtime.Program, error) { ret := _m.Called(location, load) - var r0 *interpreter.Program + if len(ret) == 0 { + panic("no return value specified for GetOrLoadProgram") + } + + var r0 *runtime.Program var r1 error - if rf, ok := ret.Get(0).(func(common.Location, func() (*interpreter.Program, error)) (*interpreter.Program, error)); ok { + if rf, ok := ret.Get(0).(func(runtime.Location, func() (*runtime.Program, error)) (*runtime.Program, error)); ok { return rf(location, load) } - if rf, ok := ret.Get(0).(func(common.Location, func() (*interpreter.Program, error)) *interpreter.Program); ok { + if rf, ok := ret.Get(0).(func(runtime.Location, func() (*runtime.Program, error)) *runtime.Program); ok { r0 = rf(location, load) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(*interpreter.Program) + r0 = ret.Get(0).(*runtime.Program) } } - if rf, ok := ret.Get(1).(func(common.Location, func() (*interpreter.Program, error)) error); ok { + if rf, ok := ret.Get(1).(func(runtime.Location, func() (*runtime.Program, error)) error); ok { r1 = rf(location, load) } else { r1 = ret.Error(1) @@ -765,20 +937,24 @@ func (_m *Environment) GetOrLoadProgram(location common.Location, load func() (* return r0, r1 } -// GetSigningAccounts provides a mock function with given fields: -func (_m *Environment) GetSigningAccounts() ([]common.Address, error) { +// GetSigningAccounts provides a mock function with no fields +func (_m *Environment) GetSigningAccounts() ([]runtime.Address, error) { ret := _m.Called() - var r0 []common.Address + if len(ret) == 0 { + panic("no return value specified for GetSigningAccounts") + } + + var r0 []runtime.Address var r1 error - if rf, ok := ret.Get(0).(func() ([]common.Address, error)); ok { + if rf, ok := ret.Get(0).(func() ([]runtime.Address, error)); ok { return rf() } - if rf, ok := ret.Get(0).(func() []common.Address); ok { + if rf, ok := ret.Get(0).(func() []runtime.Address); ok { r0 = rf() } else { if ret.Get(0) != nil { - r0 = ret.Get(0).([]common.Address) + r0 = ret.Get(0).([]runtime.Address) } } @@ -792,21 +968,25 @@ func (_m *Environment) GetSigningAccounts() ([]common.Address, error) { } // GetStorageCapacity provides a mock function with given fields: address -func (_m *Environment) GetStorageCapacity(address common.Address) (uint64, error) { +func (_m *Environment) GetStorageCapacity(address runtime.Address) (uint64, error) { ret := _m.Called(address) + if len(ret) == 0 { + panic("no return value specified for GetStorageCapacity") + } + var r0 uint64 var r1 error - if rf, ok := ret.Get(0).(func(common.Address) (uint64, error)); ok { + if rf, ok := ret.Get(0).(func(runtime.Address) (uint64, error)); ok { return rf(address) } - if rf, ok := ret.Get(0).(func(common.Address) uint64); ok { + if rf, ok := ret.Get(0).(func(runtime.Address) uint64); ok { r0 = rf(address) } else { r0 = ret.Get(0).(uint64) } - if rf, ok := ret.Get(1).(func(common.Address) error); ok { + if rf, ok := ret.Get(1).(func(runtime.Address) error); ok { r1 = rf(address) } else { r1 = ret.Error(1) @@ -816,21 +996,25 @@ func (_m *Environment) GetStorageCapacity(address common.Address) (uint64, error } // GetStorageUsed provides a mock function with given fields: address -func (_m *Environment) GetStorageUsed(address common.Address) (uint64, error) { +func (_m *Environment) GetStorageUsed(address runtime.Address) (uint64, error) { ret := _m.Called(address) + if len(ret) == 0 { + panic("no return value specified for GetStorageUsed") + } + var r0 uint64 var r1 error - if rf, ok := ret.Get(0).(func(common.Address) (uint64, error)); ok { + if rf, ok := ret.Get(0).(func(runtime.Address) (uint64, error)); ok { return rf(address) } - if rf, ok := ret.Get(0).(func(common.Address) uint64); ok { + if rf, ok := ret.Get(0).(func(runtime.Address) uint64); ok { r0 = rf(address) } else { r0 = ret.Get(0).(uint64) } - if rf, ok := ret.Get(1).(func(common.Address) error); ok { + if rf, ok := ret.Get(1).(func(runtime.Address) error); ok { r1 = rf(address) } else { r1 = ret.Error(1) @@ -843,6 +1027,10 @@ func (_m *Environment) GetStorageUsed(address common.Address) (uint64, error) { func (_m *Environment) GetValue(owner []byte, key []byte) ([]byte, error) { ret := _m.Called(owner, key) + if len(ret) == 0 { + panic("no return value specified for GetValue") + } + var r0 []byte var r1 error if rf, ok := ret.Get(0).(func([]byte, []byte) ([]byte, error)); ok { @@ -866,15 +1054,19 @@ func (_m *Environment) GetValue(owner []byte, key []byte) ([]byte, error) { } // Hash provides a mock function with given fields: data, tag, hashAlgorithm -func (_m *Environment) Hash(data []byte, tag string, hashAlgorithm sema.HashAlgorithm) ([]byte, error) { +func (_m *Environment) Hash(data []byte, tag string, hashAlgorithm runtime.HashAlgorithm) ([]byte, error) { ret := _m.Called(data, tag, hashAlgorithm) + if len(ret) == 0 { + panic("no return value specified for Hash") + } + var r0 []byte var r1 error - if rf, ok := ret.Get(0).(func([]byte, string, sema.HashAlgorithm) ([]byte, error)); ok { + if rf, ok := ret.Get(0).(func([]byte, string, runtime.HashAlgorithm) ([]byte, error)); ok { return rf(data, tag, hashAlgorithm) } - if rf, ok := ret.Get(0).(func([]byte, string, sema.HashAlgorithm) []byte); ok { + if rf, ok := ret.Get(0).(func([]byte, string, runtime.HashAlgorithm) []byte); ok { r0 = rf(data, tag, hashAlgorithm) } else { if ret.Get(0) != nil { @@ -882,7 +1074,7 @@ func (_m *Environment) Hash(data []byte, tag string, hashAlgorithm sema.HashAlgo } } - if rf, ok := ret.Get(1).(func([]byte, string, sema.HashAlgorithm) error); ok { + if rf, ok := ret.Get(1).(func([]byte, string, runtime.HashAlgorithm) error); ok { r1 = rf(data, tag, hashAlgorithm) } else { r1 = ret.Error(1) @@ -895,6 +1087,10 @@ func (_m *Environment) Hash(data []byte, tag string, hashAlgorithm sema.HashAlgo func (_m *Environment) ImplementationDebugLog(message string) error { ret := _m.Called(message) + if len(ret) == 0 { + panic("no return value specified for ImplementationDebugLog") + } + var r0 error if rf, ok := ret.Get(0).(func(string) error); ok { r0 = rf(message) @@ -905,23 +1101,29 @@ func (_m *Environment) ImplementationDebugLog(message string) error { return r0 } -// InteractionUsed provides a mock function with given fields: -func (_m *Environment) InteractionUsed() (uint64, error) { - ret := _m.Called() +// Invoke provides a mock function with given fields: spec, arguments +func (_m *Environment) Invoke(spec environment.ContractFunctionSpec, arguments []cadence.Value) (cadence.Value, error) { + ret := _m.Called(spec, arguments) - var r0 uint64 + if len(ret) == 0 { + panic("no return value specified for Invoke") + } + + var r0 cadence.Value var r1 error - if rf, ok := ret.Get(0).(func() (uint64, error)); ok { - return rf() + if rf, ok := ret.Get(0).(func(environment.ContractFunctionSpec, []cadence.Value) (cadence.Value, error)); ok { + return rf(spec, arguments) } - if rf, ok := ret.Get(0).(func() uint64); ok { - r0 = rf() + if rf, ok := ret.Get(0).(func(environment.ContractFunctionSpec, []cadence.Value) cadence.Value); ok { + r0 = rf(spec, arguments) } else { - r0 = ret.Get(0).(uint64) + if ret.Get(0) != nil { + r0 = ret.Get(0).(cadence.Value) + } } - if rf, ok := ret.Get(1).(func() error); ok { - r1 = rf() + if rf, ok := ret.Get(1).(func(environment.ContractFunctionSpec, []cadence.Value) error); ok { + r1 = rf(spec, arguments) } else { r1 = ret.Error(1) } @@ -929,10 +1131,14 @@ func (_m *Environment) InteractionUsed() (uint64, error) { return r0, r1 } -// IsServiceAccountAuthorizer provides a mock function with given fields: +// IsServiceAccountAuthorizer provides a mock function with no fields func (_m *Environment) IsServiceAccountAuthorizer() bool { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for IsServiceAccountAuthorizer") + } + var r0 bool if rf, ok := ret.Get(0).(func() bool); ok { r0 = rf() @@ -943,10 +1149,14 @@ func (_m *Environment) IsServiceAccountAuthorizer() bool { return r0 } -// LimitAccountStorage provides a mock function with given fields: +// LimitAccountStorage provides a mock function with no fields func (_m *Environment) LimitAccountStorage() bool { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for LimitAccountStorage") + } + var r0 bool if rf, ok := ret.Get(0).(func() bool); ok { r0 = rf() @@ -957,26 +1167,32 @@ func (_m *Environment) LimitAccountStorage() bool { return r0 } -// Logger provides a mock function with given fields: -func (_m *Environment) Logger() *zerolog.Logger { +// Logger provides a mock function with no fields +func (_m *Environment) Logger() zerolog.Logger { ret := _m.Called() - var r0 *zerolog.Logger - if rf, ok := ret.Get(0).(func() *zerolog.Logger); ok { + if len(ret) == 0 { + panic("no return value specified for Logger") + } + + var r0 zerolog.Logger + if rf, ok := ret.Get(0).(func() zerolog.Logger); ok { r0 = rf() } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*zerolog.Logger) - } + r0 = ret.Get(0).(zerolog.Logger) } return r0 } -// Logs provides a mock function with given fields: +// Logs provides a mock function with no fields func (_m *Environment) Logs() []string { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Logs") + } + var r0 []string if rf, ok := ret.Get(0).(func() []string); ok { r0 = rf() @@ -989,10 +1205,14 @@ func (_m *Environment) Logs() []string { return r0 } -// MemoryUsed provides a mock function with given fields: +// MemoryUsed provides a mock function with no fields func (_m *Environment) MemoryUsed() (uint64, error) { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for MemoryUsed") + } + var r0 uint64 var r1 error if rf, ok := ret.Get(0).(func() (uint64, error)); ok { @@ -1013,13 +1233,17 @@ func (_m *Environment) MemoryUsed() (uint64, error) { return r0, r1 } -// MeterComputation provides a mock function with given fields: operationType, intensity -func (_m *Environment) MeterComputation(operationType common.ComputationKind, intensity uint) error { - ret := _m.Called(operationType, intensity) +// MeterComputation provides a mock function with given fields: usage +func (_m *Environment) MeterComputation(usage common.ComputationUsage) error { + ret := _m.Called(usage) + + if len(ret) == 0 { + panic("no return value specified for MeterComputation") + } var r0 error - if rf, ok := ret.Get(0).(func(common.ComputationKind, uint) error); ok { - r0 = rf(operationType, intensity) + if rf, ok := ret.Get(0).(func(common.ComputationUsage) error); ok { + r0 = rf(usage) } else { r0 = ret.Error(0) } @@ -1031,6 +1255,10 @@ func (_m *Environment) MeterComputation(operationType common.ComputationKind, in func (_m *Environment) MeterEmittedEvent(byteSize uint64) error { ret := _m.Called(byteSize) + if len(ret) == 0 { + panic("no return value specified for MeterEmittedEvent") + } + var r0 error if rf, ok := ret.Get(0).(func(uint64) error); ok { r0 = rf(byteSize) @@ -1045,6 +1273,10 @@ func (_m *Environment) MeterEmittedEvent(byteSize uint64) error { func (_m *Environment) MeterMemory(usage common.MemoryUsage) error { ret := _m.Called(usage) + if len(ret) == 0 { + panic("no return value specified for MeterMemory") + } + var r0 error if rf, ok := ret.Get(0).(func(common.MemoryUsage) error); ok { r0 = rf(usage) @@ -1055,10 +1287,42 @@ func (_m *Environment) MeterMemory(usage common.MemoryUsage) error { return r0 } +// MinimumRequiredVersion provides a mock function with no fields +func (_m *Environment) MinimumRequiredVersion() (string, error) { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for MinimumRequiredVersion") + } + + var r0 string + var r1 error + if rf, ok := ret.Get(0).(func() (string, error)); ok { + return rf() + } + if rf, ok := ret.Get(0).(func() string); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(string) + } + + if rf, ok := ret.Get(1).(func() error); ok { + r1 = rf() + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + // ProgramLog provides a mock function with given fields: _a0 func (_m *Environment) ProgramLog(_a0 string) error { ret := _m.Called(_a0) + if len(ret) == 0 { + panic("no return value specified for ProgramLog") + } + var r0 error if rf, ok := ret.Get(0).(func(string) error); ok { r0 = rf(_a0) @@ -1069,15 +1333,97 @@ func (_m *Environment) ProgramLog(_a0 string) error { return r0 } -// RecordTrace provides a mock function with given fields: operation, location, duration, attrs -func (_m *Environment) RecordTrace(operation string, location common.Location, duration time.Duration, attrs []attribute.KeyValue) { - _m.Called(operation, location, duration, attrs) +// RandomSourceHistory provides a mock function with no fields +func (_m *Environment) RandomSourceHistory() ([]byte, error) { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for RandomSourceHistory") + } + + var r0 []byte + var r1 error + if rf, ok := ret.Get(0).(func() ([]byte, error)); ok { + return rf() + } + if rf, ok := ret.Get(0).(func() []byte); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]byte) + } + } + + if rf, ok := ret.Get(1).(func() error); ok { + r1 = rf() + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ReadRandom provides a mock function with given fields: _a0 +func (_m *Environment) ReadRandom(_a0 []byte) error { + ret := _m.Called(_a0) + + if len(ret) == 0 { + panic("no return value specified for ReadRandom") + } + + var r0 error + if rf, ok := ret.Get(0).(func([]byte) error); ok { + r0 = rf(_a0) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// RecordTrace provides a mock function with given fields: operation, duration, attrs +func (_m *Environment) RecordTrace(operation string, duration time.Duration, attrs []attribute.KeyValue) { + _m.Called(operation, duration, attrs) +} + +// RecoverProgram provides a mock function with given fields: program, location +func (_m *Environment) RecoverProgram(program *ast.Program, location common.Location) ([]byte, error) { + ret := _m.Called(program, location) + + if len(ret) == 0 { + panic("no return value specified for RecoverProgram") + } + + var r0 []byte + var r1 error + if rf, ok := ret.Get(0).(func(*ast.Program, common.Location) ([]byte, error)); ok { + return rf(program, location) + } + if rf, ok := ret.Get(0).(func(*ast.Program, common.Location) []byte); ok { + r0 = rf(program, location) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]byte) + } + } + + if rf, ok := ret.Get(1).(func(*ast.Program, common.Location) error); ok { + r1 = rf(program, location) + } else { + r1 = ret.Error(1) + } + + return r0, r1 } // RemoveAccountContractCode provides a mock function with given fields: location func (_m *Environment) RemoveAccountContractCode(location common.AddressLocation) error { ret := _m.Called(location) + if len(ret) == 0 { + panic("no return value specified for RemoveAccountContractCode") + } + var r0 error if rf, ok := ret.Get(0).(func(common.AddressLocation) error); ok { r0 = rf(location) @@ -1088,29 +1434,33 @@ func (_m *Environment) RemoveAccountContractCode(location common.AddressLocation return r0 } -// Reset provides a mock function with given fields: +// Reset provides a mock function with no fields func (_m *Environment) Reset() { _m.Called() } // ResolveLocation provides a mock function with given fields: identifiers, location -func (_m *Environment) ResolveLocation(identifiers []ast.Identifier, location common.Location) ([]sema.ResolvedLocation, error) { +func (_m *Environment) ResolveLocation(identifiers []runtime.Identifier, location runtime.Location) ([]runtime.ResolvedLocation, error) { ret := _m.Called(identifiers, location) - var r0 []sema.ResolvedLocation + if len(ret) == 0 { + panic("no return value specified for ResolveLocation") + } + + var r0 []runtime.ResolvedLocation var r1 error - if rf, ok := ret.Get(0).(func([]ast.Identifier, common.Location) ([]sema.ResolvedLocation, error)); ok { + if rf, ok := ret.Get(0).(func([]runtime.Identifier, runtime.Location) ([]runtime.ResolvedLocation, error)); ok { return rf(identifiers, location) } - if rf, ok := ret.Get(0).(func([]ast.Identifier, common.Location) []sema.ResolvedLocation); ok { + if rf, ok := ret.Get(0).(func([]runtime.Identifier, runtime.Location) []runtime.ResolvedLocation); ok { r0 = rf(identifiers, location) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).([]sema.ResolvedLocation) + r0 = ret.Get(0).([]runtime.ResolvedLocation) } } - if rf, ok := ret.Get(1).(func([]ast.Identifier, common.Location) error); ok { + if rf, ok := ret.Get(1).(func([]runtime.Identifier, runtime.Location) error); ok { r1 = rf(identifiers, location) } else { r1 = ret.Error(1) @@ -1125,28 +1475,32 @@ func (_m *Environment) ResourceOwnerChanged(_a0 *interpreter.Interpreter, resour } // ReturnCadenceRuntime provides a mock function with given fields: _a0 -func (_m *Environment) ReturnCadenceRuntime(_a0 *runtime.ReusableCadenceRuntime) { +func (_m *Environment) ReturnCadenceRuntime(_a0 *fvmruntime.ReusableCadenceRuntime) { _m.Called(_a0) } // RevokeAccountKey provides a mock function with given fields: address, index -func (_m *Environment) RevokeAccountKey(address common.Address, index int) (*stdlib.AccountKey, error) { +func (_m *Environment) RevokeAccountKey(address runtime.Address, index uint32) (*runtime.AccountKey, error) { ret := _m.Called(address, index) - var r0 *stdlib.AccountKey + if len(ret) == 0 { + panic("no return value specified for RevokeAccountKey") + } + + var r0 *runtime.AccountKey var r1 error - if rf, ok := ret.Get(0).(func(common.Address, int) (*stdlib.AccountKey, error)); ok { + if rf, ok := ret.Get(0).(func(runtime.Address, uint32) (*runtime.AccountKey, error)); ok { return rf(address, index) } - if rf, ok := ret.Get(0).(func(common.Address, int) *stdlib.AccountKey); ok { + if rf, ok := ret.Get(0).(func(runtime.Address, uint32) *runtime.AccountKey); ok { r0 = rf(address, index) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(*stdlib.AccountKey) + r0 = ret.Get(0).(*runtime.AccountKey) } } - if rf, ok := ret.Get(1).(func(common.Address, int) error); ok { + if rf, ok := ret.Get(1).(func(runtime.Address, uint32) error); ok { r1 = rf(address, index) } else { r1 = ret.Error(1) @@ -1155,36 +1509,49 @@ func (_m *Environment) RevokeAccountKey(address common.Address, index int) (*std return r0, r1 } -// RevokeEncodedAccountKey provides a mock function with given fields: address, index -func (_m *Environment) RevokeEncodedAccountKey(address common.Address, index int) ([]byte, error) { - ret := _m.Called(address, index) +// RunWithMeteringDisabled provides a mock function with given fields: f +func (_m *Environment) RunWithMeteringDisabled(f func()) { + _m.Called(f) +} - var r0 []byte - var r1 error - if rf, ok := ret.Get(0).(func(common.Address, int) ([]byte, error)); ok { - return rf(address, index) - } - if rf, ok := ret.Get(0).(func(common.Address, int) []byte); ok { - r0 = rf(address, index) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).([]byte) - } - } +// RuntimeSetNumberOfAccounts provides a mock function with given fields: count +func (_m *Environment) RuntimeSetNumberOfAccounts(count uint64) { + _m.Called(count) +} - if rf, ok := ret.Get(1).(func(common.Address, int) error); ok { - r1 = rf(address, index) - } else { - r1 = ret.Error(1) - } +// RuntimeTransactionChecked provides a mock function with given fields: _a0 +func (_m *Environment) RuntimeTransactionChecked(_a0 time.Duration) { + _m.Called(_a0) +} - return r0, r1 +// RuntimeTransactionInterpreted provides a mock function with given fields: _a0 +func (_m *Environment) RuntimeTransactionInterpreted(_a0 time.Duration) { + _m.Called(_a0) +} + +// RuntimeTransactionParsed provides a mock function with given fields: _a0 +func (_m *Environment) RuntimeTransactionParsed(_a0 time.Duration) { + _m.Called(_a0) +} + +// RuntimeTransactionProgramsCacheHit provides a mock function with no fields +func (_m *Environment) RuntimeTransactionProgramsCacheHit() { + _m.Called() +} + +// RuntimeTransactionProgramsCacheMiss provides a mock function with no fields +func (_m *Environment) RuntimeTransactionProgramsCacheMiss() { + _m.Called() } -// ServiceEvents provides a mock function with given fields: +// ServiceEvents provides a mock function with no fields func (_m *Environment) ServiceEvents() flow.EventsList { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for ServiceEvents") + } + var r0 flow.EventsList if rf, ok := ret.Get(0).(func() flow.EventsList); ok { r0 = rf() @@ -1197,15 +1564,19 @@ func (_m *Environment) ServiceEvents() flow.EventsList { return r0 } -// SetInterpreterSharedState provides a mock function with given fields: state -func (_m *Environment) SetInterpreterSharedState(state *interpreter.SharedState) { - _m.Called(state) +// SetNumberOfDeployedCOAs provides a mock function with given fields: count +func (_m *Environment) SetNumberOfDeployedCOAs(count uint64) { + _m.Called(count) } // SetValue provides a mock function with given fields: owner, key, value func (_m *Environment) SetValue(owner []byte, key []byte, value []byte) error { ret := _m.Called(owner, key, value) + if len(ret) == 0 { + panic("no return value specified for SetValue") + } + var r0 error if rf, ok := ret.Get(0).(func([]byte, []byte, []byte) error); ok { r0 = rf(owner, key, value) @@ -1227,6 +1598,10 @@ func (_m *Environment) StartChildSpan(name trace.SpanName, options ...oteltrace. _ca = append(_ca, _va...) ret := _m.Called(_ca...) + if len(ret) == 0 { + panic("no return value specified for StartChildSpan") + } + var r0 tracing.TracerSpan if rf, ok := ret.Get(0).(func(trace.SpanName, ...oteltrace.SpanStartOption) tracing.TracerSpan); ok { r0 = rf(name, options...) @@ -1237,10 +1612,14 @@ func (_m *Environment) StartChildSpan(name trace.SpanName, options ...oteltrace. return r0 } -// TotalEmittedEventBytes provides a mock function with given fields: +// TotalEmittedEventBytes provides a mock function with no fields func (_m *Environment) TotalEmittedEventBytes() uint64 { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for TotalEmittedEventBytes") + } + var r0 uint64 if rf, ok := ret.Get(0).(func() uint64); ok { r0 = rf() @@ -1251,10 +1630,14 @@ func (_m *Environment) TotalEmittedEventBytes() uint64 { return r0 } -// TransactionFeesEnabled provides a mock function with given fields: +// TransactionFeesEnabled provides a mock function with no fields func (_m *Environment) TransactionFeesEnabled() bool { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for TransactionFeesEnabled") + } + var r0 bool if rf, ok := ret.Get(0).(func() bool); ok { r0 = rf() @@ -1265,10 +1648,14 @@ func (_m *Environment) TransactionFeesEnabled() bool { return r0 } -// TxID provides a mock function with given fields: +// TxID provides a mock function with no fields func (_m *Environment) TxID() flow.Identifier { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for TxID") + } + var r0 flow.Identifier if rf, ok := ret.Get(0).(func() flow.Identifier); ok { r0 = rf() @@ -1281,10 +1668,14 @@ func (_m *Environment) TxID() flow.Identifier { return r0 } -// TxIndex provides a mock function with given fields: +// TxIndex provides a mock function with no fields func (_m *Environment) TxIndex() uint32 { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for TxIndex") + } + var r0 uint32 if rf, ok := ret.Get(0).(func() uint32); ok { r0 = rf() @@ -1295,23 +1686,45 @@ func (_m *Environment) TxIndex() uint32 { return r0 } -// UnsafeRandom provides a mock function with given fields: -func (_m *Environment) UnsafeRandom() (uint64, error) { - ret := _m.Called() +// UpdateAccountContractCode provides a mock function with given fields: location, code +func (_m *Environment) UpdateAccountContractCode(location common.AddressLocation, code []byte) error { + ret := _m.Called(location, code) - var r0 uint64 + if len(ret) == 0 { + panic("no return value specified for UpdateAccountContractCode") + } + + var r0 error + if rf, ok := ret.Get(0).(func(common.AddressLocation, []byte) error); ok { + r0 = rf(location, code) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// ValidateAccountCapabilitiesGet provides a mock function with given fields: context, locationRange, address, path, wantedBorrowType, capabilityBorrowType +func (_m *Environment) ValidateAccountCapabilitiesGet(context interpreter.AccountCapabilityGetValidationContext, locationRange interpreter.LocationRange, address interpreter.AddressValue, path interpreter.PathValue, wantedBorrowType *sema.ReferenceType, capabilityBorrowType *sema.ReferenceType) (bool, error) { + ret := _m.Called(context, locationRange, address, path, wantedBorrowType, capabilityBorrowType) + + if len(ret) == 0 { + panic("no return value specified for ValidateAccountCapabilitiesGet") + } + + var r0 bool var r1 error - if rf, ok := ret.Get(0).(func() (uint64, error)); ok { - return rf() + if rf, ok := ret.Get(0).(func(interpreter.AccountCapabilityGetValidationContext, interpreter.LocationRange, interpreter.AddressValue, interpreter.PathValue, *sema.ReferenceType, *sema.ReferenceType) (bool, error)); ok { + return rf(context, locationRange, address, path, wantedBorrowType, capabilityBorrowType) } - if rf, ok := ret.Get(0).(func() uint64); ok { - r0 = rf() + if rf, ok := ret.Get(0).(func(interpreter.AccountCapabilityGetValidationContext, interpreter.LocationRange, interpreter.AddressValue, interpreter.PathValue, *sema.ReferenceType, *sema.ReferenceType) bool); ok { + r0 = rf(context, locationRange, address, path, wantedBorrowType, capabilityBorrowType) } else { - r0 = ret.Get(0).(uint64) + r0 = ret.Get(0).(bool) } - if rf, ok := ret.Get(1).(func() error); ok { - r1 = rf() + if rf, ok := ret.Get(1).(func(interpreter.AccountCapabilityGetValidationContext, interpreter.LocationRange, interpreter.AddressValue, interpreter.PathValue, *sema.ReferenceType, *sema.ReferenceType) error); ok { + r1 = rf(context, locationRange, address, path, wantedBorrowType, capabilityBorrowType) } else { r1 = ret.Error(1) } @@ -1319,26 +1732,44 @@ func (_m *Environment) UnsafeRandom() (uint64, error) { return r0, r1 } -// UpdateAccountContractCode provides a mock function with given fields: location, code -func (_m *Environment) UpdateAccountContractCode(location common.AddressLocation, code []byte) error { - ret := _m.Called(location, code) +// ValidateAccountCapabilitiesPublish provides a mock function with given fields: context, locationRange, address, path, capabilityBorrowType +func (_m *Environment) ValidateAccountCapabilitiesPublish(context interpreter.AccountCapabilityPublishValidationContext, locationRange interpreter.LocationRange, address interpreter.AddressValue, path interpreter.PathValue, capabilityBorrowType *interpreter.ReferenceStaticType) (bool, error) { + ret := _m.Called(context, locationRange, address, path, capabilityBorrowType) - var r0 error - if rf, ok := ret.Get(0).(func(common.AddressLocation, []byte) error); ok { - r0 = rf(location, code) + if len(ret) == 0 { + panic("no return value specified for ValidateAccountCapabilitiesPublish") + } + + var r0 bool + var r1 error + if rf, ok := ret.Get(0).(func(interpreter.AccountCapabilityPublishValidationContext, interpreter.LocationRange, interpreter.AddressValue, interpreter.PathValue, *interpreter.ReferenceStaticType) (bool, error)); ok { + return rf(context, locationRange, address, path, capabilityBorrowType) + } + if rf, ok := ret.Get(0).(func(interpreter.AccountCapabilityPublishValidationContext, interpreter.LocationRange, interpreter.AddressValue, interpreter.PathValue, *interpreter.ReferenceStaticType) bool); ok { + r0 = rf(context, locationRange, address, path, capabilityBorrowType) } else { - r0 = ret.Error(0) + r0 = ret.Get(0).(bool) } - return r0 + if rf, ok := ret.Get(1).(func(interpreter.AccountCapabilityPublishValidationContext, interpreter.LocationRange, interpreter.AddressValue, interpreter.PathValue, *interpreter.ReferenceStaticType) error); ok { + r1 = rf(context, locationRange, address, path, capabilityBorrowType) + } else { + r1 = ret.Error(1) + } + + return r0, r1 } // ValidatePublicKey provides a mock function with given fields: key -func (_m *Environment) ValidatePublicKey(key *stdlib.PublicKey) error { +func (_m *Environment) ValidatePublicKey(key *runtime.PublicKey) error { ret := _m.Called(key) + if len(ret) == 0 { + panic("no return value specified for ValidatePublicKey") + } + var r0 error - if rf, ok := ret.Get(0).(func(*stdlib.PublicKey) error); ok { + if rf, ok := ret.Get(0).(func(*runtime.PublicKey) error); ok { r0 = rf(key) } else { r0 = ret.Error(0) @@ -1351,6 +1782,10 @@ func (_m *Environment) ValidatePublicKey(key *stdlib.PublicKey) error { func (_m *Environment) ValueExists(owner []byte, key []byte) (bool, error) { ret := _m.Called(owner, key) + if len(ret) == 0 { + panic("no return value specified for ValueExists") + } + var r0 bool var r1 error if rf, ok := ret.Get(0).(func([]byte, []byte) (bool, error)); ok { @@ -1372,21 +1807,25 @@ func (_m *Environment) ValueExists(owner []byte, key []byte) (bool, error) { } // VerifySignature provides a mock function with given fields: signature, tag, signedData, publicKey, signatureAlgorithm, hashAlgorithm -func (_m *Environment) VerifySignature(signature []byte, tag string, signedData []byte, publicKey []byte, signatureAlgorithm sema.SignatureAlgorithm, hashAlgorithm sema.HashAlgorithm) (bool, error) { +func (_m *Environment) VerifySignature(signature []byte, tag string, signedData []byte, publicKey []byte, signatureAlgorithm runtime.SignatureAlgorithm, hashAlgorithm runtime.HashAlgorithm) (bool, error) { ret := _m.Called(signature, tag, signedData, publicKey, signatureAlgorithm, hashAlgorithm) + if len(ret) == 0 { + panic("no return value specified for VerifySignature") + } + var r0 bool var r1 error - if rf, ok := ret.Get(0).(func([]byte, string, []byte, []byte, sema.SignatureAlgorithm, sema.HashAlgorithm) (bool, error)); ok { + if rf, ok := ret.Get(0).(func([]byte, string, []byte, []byte, runtime.SignatureAlgorithm, runtime.HashAlgorithm) (bool, error)); ok { return rf(signature, tag, signedData, publicKey, signatureAlgorithm, hashAlgorithm) } - if rf, ok := ret.Get(0).(func([]byte, string, []byte, []byte, sema.SignatureAlgorithm, sema.HashAlgorithm) bool); ok { + if rf, ok := ret.Get(0).(func([]byte, string, []byte, []byte, runtime.SignatureAlgorithm, runtime.HashAlgorithm) bool); ok { r0 = rf(signature, tag, signedData, publicKey, signatureAlgorithm, hashAlgorithm) } else { r0 = ret.Get(0).(bool) } - if rf, ok := ret.Get(1).(func([]byte, string, []byte, []byte, sema.SignatureAlgorithm, sema.HashAlgorithm) error); ok { + if rf, ok := ret.Get(1).(func([]byte, string, []byte, []byte, runtime.SignatureAlgorithm, runtime.HashAlgorithm) error); ok { r1 = rf(signature, tag, signedData, publicKey, signatureAlgorithm, hashAlgorithm) } else { r1 = ret.Error(1) @@ -1395,13 +1834,12 @@ func (_m *Environment) VerifySignature(signature []byte, tag string, signedData return r0, r1 } -type mockConstructorTestingTNewEnvironment interface { +// NewEnvironment creates a new instance of Environment. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewEnvironment(t interface { mock.TestingT Cleanup(func()) -} - -// NewEnvironment creates a new instance of Environment. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewEnvironment(t mockConstructorTestingTNewEnvironment) *Environment { +}) *Environment { mock := &Environment{} mock.Mock.Test(t) diff --git a/fvm/environment/mock/event_emitter.go b/fvm/environment/mock/event_emitter.go index 5ff23d14d71..3bc17f8bf14 100644 --- a/fvm/environment/mock/event_emitter.go +++ b/fvm/environment/mock/event_emitter.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mock @@ -15,10 +15,14 @@ type EventEmitter struct { mock.Mock } -// ConvertedServiceEvents provides a mock function with given fields: +// ConvertedServiceEvents provides a mock function with no fields func (_m *EventEmitter) ConvertedServiceEvents() flow.ServiceEventList { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for ConvertedServiceEvents") + } + var r0 flow.ServiceEventList if rf, ok := ret.Get(0).(func() flow.ServiceEventList); ok { r0 = rf() @@ -35,6 +39,10 @@ func (_m *EventEmitter) ConvertedServiceEvents() flow.ServiceEventList { func (_m *EventEmitter) EmitEvent(event cadence.Event) error { ret := _m.Called(event) + if len(ret) == 0 { + panic("no return value specified for EmitEvent") + } + var r0 error if rf, ok := ret.Get(0).(func(cadence.Event) error); ok { r0 = rf(event) @@ -45,10 +53,14 @@ func (_m *EventEmitter) EmitEvent(event cadence.Event) error { return r0 } -// Events provides a mock function with given fields: +// Events provides a mock function with no fields func (_m *EventEmitter) Events() flow.EventsList { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Events") + } + var r0 flow.EventsList if rf, ok := ret.Get(0).(func() flow.EventsList); ok { r0 = rf() @@ -61,15 +73,19 @@ func (_m *EventEmitter) Events() flow.EventsList { return r0 } -// Reset provides a mock function with given fields: +// Reset provides a mock function with no fields func (_m *EventEmitter) Reset() { _m.Called() } -// ServiceEvents provides a mock function with given fields: +// ServiceEvents provides a mock function with no fields func (_m *EventEmitter) ServiceEvents() flow.EventsList { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for ServiceEvents") + } + var r0 flow.EventsList if rf, ok := ret.Get(0).(func() flow.EventsList); ok { r0 = rf() @@ -82,13 +98,12 @@ func (_m *EventEmitter) ServiceEvents() flow.EventsList { return r0 } -type mockConstructorTestingTNewEventEmitter interface { +// NewEventEmitter creates a new instance of EventEmitter. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewEventEmitter(t interface { mock.TestingT Cleanup(func()) -} - -// NewEventEmitter creates a new instance of EventEmitter. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewEventEmitter(t mockConstructorTestingTNewEventEmitter) *EventEmitter { +}) *EventEmitter { mock := &EventEmitter{} mock.Mock.Test(t) diff --git a/fvm/environment/mock/event_encoder.go b/fvm/environment/mock/event_encoder.go index a57384f1662..40c429b83ba 100644 --- a/fvm/environment/mock/event_encoder.go +++ b/fvm/environment/mock/event_encoder.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mock @@ -17,6 +17,10 @@ type EventEncoder struct { func (_m *EventEncoder) Encode(event cadence.Event) ([]byte, error) { ret := _m.Called(event) + if len(ret) == 0 { + panic("no return value specified for Encode") + } + var r0 []byte var r1 error if rf, ok := ret.Get(0).(func(cadence.Event) ([]byte, error)); ok { @@ -39,13 +43,12 @@ func (_m *EventEncoder) Encode(event cadence.Event) ([]byte, error) { return r0, r1 } -type mockConstructorTestingTNewEventEncoder interface { +// NewEventEncoder creates a new instance of EventEncoder. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewEventEncoder(t interface { mock.TestingT Cleanup(func()) -} - -// NewEventEncoder creates a new instance of EventEncoder. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewEventEncoder(t mockConstructorTestingTNewEventEncoder) *EventEncoder { +}) *EventEncoder { mock := &EventEncoder{} mock.Mock.Test(t) diff --git a/fvm/environment/mock/evm_metrics_reporter.go b/fvm/environment/mock/evm_metrics_reporter.go new file mode 100644 index 00000000000..a509235f64e --- /dev/null +++ b/fvm/environment/mock/evm_metrics_reporter.go @@ -0,0 +1,39 @@ +// Code generated by mockery. DO NOT EDIT. + +package mock + +import mock "github.com/stretchr/testify/mock" + +// EVMMetricsReporter is an autogenerated mock type for the EVMMetricsReporter type +type EVMMetricsReporter struct { + mock.Mock +} + +// EVMBlockExecuted provides a mock function with given fields: txCount, totalGasUsed, totalSupplyInFlow +func (_m *EVMMetricsReporter) EVMBlockExecuted(txCount int, totalGasUsed uint64, totalSupplyInFlow float64) { + _m.Called(txCount, totalGasUsed, totalSupplyInFlow) +} + +// EVMTransactionExecuted provides a mock function with given fields: gasUsed, isDirectCall, failed +func (_m *EVMMetricsReporter) EVMTransactionExecuted(gasUsed uint64, isDirectCall bool, failed bool) { + _m.Called(gasUsed, isDirectCall, failed) +} + +// SetNumberOfDeployedCOAs provides a mock function with given fields: count +func (_m *EVMMetricsReporter) SetNumberOfDeployedCOAs(count uint64) { + _m.Called(count) +} + +// NewEVMMetricsReporter creates a new instance of EVMMetricsReporter. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewEVMMetricsReporter(t interface { + mock.TestingT + Cleanup(func()) +}) *EVMMetricsReporter { + mock := &EVMMetricsReporter{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/fvm/environment/mock/execution_version_provider.go b/fvm/environment/mock/execution_version_provider.go new file mode 100644 index 00000000000..102942e50ab --- /dev/null +++ b/fvm/environment/mock/execution_version_provider.go @@ -0,0 +1,55 @@ +// Code generated by mockery. DO NOT EDIT. + +package mock + +import ( + semver "github.com/coreos/go-semver/semver" + mock "github.com/stretchr/testify/mock" +) + +// ExecutionVersionProvider is an autogenerated mock type for the ExecutionVersionProvider type +type ExecutionVersionProvider struct { + mock.Mock +} + +// ExecutionVersion provides a mock function with no fields +func (_m *ExecutionVersionProvider) ExecutionVersion() (semver.Version, error) { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for ExecutionVersion") + } + + var r0 semver.Version + var r1 error + if rf, ok := ret.Get(0).(func() (semver.Version, error)); ok { + return rf() + } + if rf, ok := ret.Get(0).(func() semver.Version); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(semver.Version) + } + + if rf, ok := ret.Get(1).(func() error); ok { + r1 = rf() + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// NewExecutionVersionProvider creates a new instance of ExecutionVersionProvider. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewExecutionVersionProvider(t interface { + mock.TestingT + Cleanup(func()) +}) *ExecutionVersionProvider { + mock := &ExecutionVersionProvider{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/fvm/environment/mock/logger_provider.go b/fvm/environment/mock/logger_provider.go new file mode 100644 index 00000000000..5807cc20fa3 --- /dev/null +++ b/fvm/environment/mock/logger_provider.go @@ -0,0 +1,45 @@ +// Code generated by mockery. DO NOT EDIT. + +package mock + +import ( + zerolog "github.com/rs/zerolog" + mock "github.com/stretchr/testify/mock" +) + +// LoggerProvider is an autogenerated mock type for the LoggerProvider type +type LoggerProvider struct { + mock.Mock +} + +// Logger provides a mock function with no fields +func (_m *LoggerProvider) Logger() zerolog.Logger { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Logger") + } + + var r0 zerolog.Logger + if rf, ok := ret.Get(0).(func() zerolog.Logger); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(zerolog.Logger) + } + + return r0 +} + +// NewLoggerProvider creates a new instance of LoggerProvider. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewLoggerProvider(t interface { + mock.TestingT + Cleanup(func()) +}) *LoggerProvider { + mock := &LoggerProvider{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/fvm/environment/mock/meter.go b/fvm/environment/mock/meter.go index 581edb4bbb4..5dc18528fee 100644 --- a/fvm/environment/mock/meter.go +++ b/fvm/environment/mock/meter.go @@ -1,9 +1,9 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mock import ( - common "github.com/onflow/cadence/runtime/common" + common "github.com/onflow/cadence/common" meter "github.com/onflow/flow-go/fvm/meter" @@ -15,10 +15,32 @@ type Meter struct { mock.Mock } -// ComputationIntensities provides a mock function with given fields: +// ComputationAvailable provides a mock function with given fields: _a0 +func (_m *Meter) ComputationAvailable(_a0 common.ComputationUsage) bool { + ret := _m.Called(_a0) + + if len(ret) == 0 { + panic("no return value specified for ComputationAvailable") + } + + var r0 bool + if rf, ok := ret.Get(0).(func(common.ComputationUsage) bool); ok { + r0 = rf(_a0) + } else { + r0 = ret.Get(0).(bool) + } + + return r0 +} + +// ComputationIntensities provides a mock function with no fields func (_m *Meter) ComputationIntensities() meter.MeteredComputationIntensities { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for ComputationIntensities") + } + var r0 meter.MeteredComputationIntensities if rf, ok := ret.Get(0).(func() meter.MeteredComputationIntensities); ok { r0 = rf() @@ -31,34 +53,14 @@ func (_m *Meter) ComputationIntensities() meter.MeteredComputationIntensities { return r0 } -// ComputationUsed provides a mock function with given fields: +// ComputationUsed provides a mock function with no fields func (_m *Meter) ComputationUsed() (uint64, error) { ret := _m.Called() - var r0 uint64 - var r1 error - if rf, ok := ret.Get(0).(func() (uint64, error)); ok { - return rf() - } - if rf, ok := ret.Get(0).(func() uint64); ok { - r0 = rf() - } else { - r0 = ret.Get(0).(uint64) + if len(ret) == 0 { + panic("no return value specified for ComputationUsed") } - if rf, ok := ret.Get(1).(func() error); ok { - r1 = rf() - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// InteractionUsed provides a mock function with given fields: -func (_m *Meter) InteractionUsed() (uint64, error) { - ret := _m.Called() - var r0 uint64 var r1 error if rf, ok := ret.Get(0).(func() (uint64, error)); ok { @@ -79,10 +81,14 @@ func (_m *Meter) InteractionUsed() (uint64, error) { return r0, r1 } -// MemoryUsed provides a mock function with given fields: +// MemoryUsed provides a mock function with no fields func (_m *Meter) MemoryUsed() (uint64, error) { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for MemoryUsed") + } + var r0 uint64 var r1 error if rf, ok := ret.Get(0).(func() (uint64, error)); ok { @@ -103,13 +109,17 @@ func (_m *Meter) MemoryUsed() (uint64, error) { return r0, r1 } -// MeterComputation provides a mock function with given fields: _a0, _a1 -func (_m *Meter) MeterComputation(_a0 common.ComputationKind, _a1 uint) error { - ret := _m.Called(_a0, _a1) +// MeterComputation provides a mock function with given fields: usage +func (_m *Meter) MeterComputation(usage common.ComputationUsage) error { + ret := _m.Called(usage) + + if len(ret) == 0 { + panic("no return value specified for MeterComputation") + } var r0 error - if rf, ok := ret.Get(0).(func(common.ComputationKind, uint) error); ok { - r0 = rf(_a0, _a1) + if rf, ok := ret.Get(0).(func(common.ComputationUsage) error); ok { + r0 = rf(usage) } else { r0 = ret.Error(0) } @@ -121,6 +131,10 @@ func (_m *Meter) MeterComputation(_a0 common.ComputationKind, _a1 uint) error { func (_m *Meter) MeterEmittedEvent(byteSize uint64) error { ret := _m.Called(byteSize) + if len(ret) == 0 { + panic("no return value specified for MeterEmittedEvent") + } + var r0 error if rf, ok := ret.Get(0).(func(uint64) error); ok { r0 = rf(byteSize) @@ -135,6 +149,10 @@ func (_m *Meter) MeterEmittedEvent(byteSize uint64) error { func (_m *Meter) MeterMemory(usage common.MemoryUsage) error { ret := _m.Called(usage) + if len(ret) == 0 { + panic("no return value specified for MeterMemory") + } + var r0 error if rf, ok := ret.Get(0).(func(common.MemoryUsage) error); ok { r0 = rf(usage) @@ -145,10 +163,19 @@ func (_m *Meter) MeterMemory(usage common.MemoryUsage) error { return r0 } -// TotalEmittedEventBytes provides a mock function with given fields: +// RunWithMeteringDisabled provides a mock function with given fields: f +func (_m *Meter) RunWithMeteringDisabled(f func()) { + _m.Called(f) +} + +// TotalEmittedEventBytes provides a mock function with no fields func (_m *Meter) TotalEmittedEventBytes() uint64 { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for TotalEmittedEventBytes") + } + var r0 uint64 if rf, ok := ret.Get(0).(func() uint64); ok { r0 = rf() @@ -159,13 +186,12 @@ func (_m *Meter) TotalEmittedEventBytes() uint64 { return r0 } -type mockConstructorTestingTNewMeter interface { +// NewMeter creates a new instance of Meter. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewMeter(t interface { mock.TestingT Cleanup(func()) -} - -// NewMeter creates a new instance of Meter. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewMeter(t mockConstructorTestingTNewMeter) *Meter { +}) *Meter { mock := &Meter{} mock.Mock.Test(t) diff --git a/fvm/environment/mock/metrics_reporter.go b/fvm/environment/mock/metrics_reporter.go index 10369a3f4c5..9f9ddd48463 100644 --- a/fvm/environment/mock/metrics_reporter.go +++ b/fvm/environment/mock/metrics_reporter.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mock @@ -13,6 +13,16 @@ type MetricsReporter struct { mock.Mock } +// EVMBlockExecuted provides a mock function with given fields: txCount, totalGasUsed, totalSupplyInFlow +func (_m *MetricsReporter) EVMBlockExecuted(txCount int, totalGasUsed uint64, totalSupplyInFlow float64) { + _m.Called(txCount, totalGasUsed, totalSupplyInFlow) +} + +// EVMTransactionExecuted provides a mock function with given fields: gasUsed, isDirectCall, failed +func (_m *MetricsReporter) EVMTransactionExecuted(gasUsed uint64, isDirectCall bool, failed bool) { + _m.Called(gasUsed, isDirectCall, failed) +} + // RuntimeSetNumberOfAccounts provides a mock function with given fields: count func (_m *MetricsReporter) RuntimeSetNumberOfAccounts(count uint64) { _m.Called(count) @@ -33,23 +43,27 @@ func (_m *MetricsReporter) RuntimeTransactionParsed(_a0 time.Duration) { _m.Called(_a0) } -// RuntimeTransactionProgramsCacheHit provides a mock function with given fields: +// RuntimeTransactionProgramsCacheHit provides a mock function with no fields func (_m *MetricsReporter) RuntimeTransactionProgramsCacheHit() { _m.Called() } -// RuntimeTransactionProgramsCacheMiss provides a mock function with given fields: +// RuntimeTransactionProgramsCacheMiss provides a mock function with no fields func (_m *MetricsReporter) RuntimeTransactionProgramsCacheMiss() { _m.Called() } -type mockConstructorTestingTNewMetricsReporter interface { - mock.TestingT - Cleanup(func()) +// SetNumberOfDeployedCOAs provides a mock function with given fields: count +func (_m *MetricsReporter) SetNumberOfDeployedCOAs(count uint64) { + _m.Called(count) } // NewMetricsReporter creates a new instance of MetricsReporter. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewMetricsReporter(t mockConstructorTestingTNewMetricsReporter) *MetricsReporter { +// The first argument is typically a *testing.T value. +func NewMetricsReporter(t interface { + mock.TestingT + Cleanup(func()) +}) *MetricsReporter { mock := &MetricsReporter{} mock.Mock.Test(t) diff --git a/fvm/environment/mock/minimum_cadence_required_version.go b/fvm/environment/mock/minimum_cadence_required_version.go new file mode 100644 index 00000000000..d1634776e33 --- /dev/null +++ b/fvm/environment/mock/minimum_cadence_required_version.go @@ -0,0 +1,52 @@ +// Code generated by mockery. DO NOT EDIT. + +package mock + +import mock "github.com/stretchr/testify/mock" + +// MinimumCadenceRequiredVersion is an autogenerated mock type for the MinimumCadenceRequiredVersion type +type MinimumCadenceRequiredVersion struct { + mock.Mock +} + +// MinimumRequiredVersion provides a mock function with no fields +func (_m *MinimumCadenceRequiredVersion) MinimumRequiredVersion() (string, error) { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for MinimumRequiredVersion") + } + + var r0 string + var r1 error + if rf, ok := ret.Get(0).(func() (string, error)); ok { + return rf() + } + if rf, ok := ret.Get(0).(func() string); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(string) + } + + if rf, ok := ret.Get(1).(func() error); ok { + r1 = rf() + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// NewMinimumCadenceRequiredVersion creates a new instance of MinimumCadenceRequiredVersion. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewMinimumCadenceRequiredVersion(t interface { + mock.TestingT + Cleanup(func()) +}) *MinimumCadenceRequiredVersion { + mock := &MinimumCadenceRequiredVersion{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/fvm/environment/mock/random_generator.go b/fvm/environment/mock/random_generator.go new file mode 100644 index 00000000000..7bc137e00ab --- /dev/null +++ b/fvm/environment/mock/random_generator.go @@ -0,0 +1,42 @@ +// Code generated by mockery. DO NOT EDIT. + +package mock + +import mock "github.com/stretchr/testify/mock" + +// RandomGenerator is an autogenerated mock type for the RandomGenerator type +type RandomGenerator struct { + mock.Mock +} + +// ReadRandom provides a mock function with given fields: _a0 +func (_m *RandomGenerator) ReadRandom(_a0 []byte) error { + ret := _m.Called(_a0) + + if len(ret) == 0 { + panic("no return value specified for ReadRandom") + } + + var r0 error + if rf, ok := ret.Get(0).(func([]byte) error); ok { + r0 = rf(_a0) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// NewRandomGenerator creates a new instance of RandomGenerator. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewRandomGenerator(t interface { + mock.TestingT + Cleanup(func()) +}) *RandomGenerator { + mock := &RandomGenerator{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/fvm/environment/mock/random_source_history_provider.go b/fvm/environment/mock/random_source_history_provider.go new file mode 100644 index 00000000000..15c04515782 --- /dev/null +++ b/fvm/environment/mock/random_source_history_provider.go @@ -0,0 +1,54 @@ +// Code generated by mockery. DO NOT EDIT. + +package mock + +import mock "github.com/stretchr/testify/mock" + +// RandomSourceHistoryProvider is an autogenerated mock type for the RandomSourceHistoryProvider type +type RandomSourceHistoryProvider struct { + mock.Mock +} + +// RandomSourceHistory provides a mock function with no fields +func (_m *RandomSourceHistoryProvider) RandomSourceHistory() ([]byte, error) { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for RandomSourceHistory") + } + + var r0 []byte + var r1 error + if rf, ok := ret.Get(0).(func() ([]byte, error)); ok { + return rf() + } + if rf, ok := ret.Get(0).(func() []byte); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]byte) + } + } + + if rf, ok := ret.Get(1).(func() error); ok { + r1 = rf() + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// NewRandomSourceHistoryProvider creates a new instance of RandomSourceHistoryProvider. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewRandomSourceHistoryProvider(t interface { + mock.TestingT + Cleanup(func()) +}) *RandomSourceHistoryProvider { + mock := &RandomSourceHistoryProvider{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/fvm/environment/mock/runtime_metrics_reporter.go b/fvm/environment/mock/runtime_metrics_reporter.go new file mode 100644 index 00000000000..0e895937e4c --- /dev/null +++ b/fvm/environment/mock/runtime_metrics_reporter.go @@ -0,0 +1,58 @@ +// Code generated by mockery. DO NOT EDIT. + +package mock + +import ( + time "time" + + mock "github.com/stretchr/testify/mock" +) + +// RuntimeMetricsReporter is an autogenerated mock type for the RuntimeMetricsReporter type +type RuntimeMetricsReporter struct { + mock.Mock +} + +// RuntimeSetNumberOfAccounts provides a mock function with given fields: count +func (_m *RuntimeMetricsReporter) RuntimeSetNumberOfAccounts(count uint64) { + _m.Called(count) +} + +// RuntimeTransactionChecked provides a mock function with given fields: _a0 +func (_m *RuntimeMetricsReporter) RuntimeTransactionChecked(_a0 time.Duration) { + _m.Called(_a0) +} + +// RuntimeTransactionInterpreted provides a mock function with given fields: _a0 +func (_m *RuntimeMetricsReporter) RuntimeTransactionInterpreted(_a0 time.Duration) { + _m.Called(_a0) +} + +// RuntimeTransactionParsed provides a mock function with given fields: _a0 +func (_m *RuntimeMetricsReporter) RuntimeTransactionParsed(_a0 time.Duration) { + _m.Called(_a0) +} + +// RuntimeTransactionProgramsCacheHit provides a mock function with no fields +func (_m *RuntimeMetricsReporter) RuntimeTransactionProgramsCacheHit() { + _m.Called() +} + +// RuntimeTransactionProgramsCacheMiss provides a mock function with no fields +func (_m *RuntimeMetricsReporter) RuntimeTransactionProgramsCacheMiss() { + _m.Called() +} + +// NewRuntimeMetricsReporter creates a new instance of RuntimeMetricsReporter. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewRuntimeMetricsReporter(t interface { + mock.TestingT + Cleanup(func()) +}) *RuntimeMetricsReporter { + mock := &RuntimeMetricsReporter{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/fvm/environment/mock/tracer.go b/fvm/environment/mock/tracer.go new file mode 100644 index 00000000000..c6dc6507cc9 --- /dev/null +++ b/fvm/environment/mock/tracer.go @@ -0,0 +1,56 @@ +// Code generated by mockery. DO NOT EDIT. + +package mock + +import ( + mock "github.com/stretchr/testify/mock" + oteltrace "go.opentelemetry.io/otel/trace" + + trace "github.com/onflow/flow-go/module/trace" + + tracing "github.com/onflow/flow-go/fvm/tracing" +) + +// Tracer is an autogenerated mock type for the Tracer type +type Tracer struct { + mock.Mock +} + +// StartChildSpan provides a mock function with given fields: name, options +func (_m *Tracer) StartChildSpan(name trace.SpanName, options ...oteltrace.SpanStartOption) tracing.TracerSpan { + _va := make([]interface{}, len(options)) + for _i := range options { + _va[_i] = options[_i] + } + var _ca []interface{} + _ca = append(_ca, name) + _ca = append(_ca, _va...) + ret := _m.Called(_ca...) + + if len(ret) == 0 { + panic("no return value specified for StartChildSpan") + } + + var r0 tracing.TracerSpan + if rf, ok := ret.Get(0).(func(trace.SpanName, ...oteltrace.SpanStartOption) tracing.TracerSpan); ok { + r0 = rf(name, options...) + } else { + r0 = ret.Get(0).(tracing.TracerSpan) + } + + return r0 +} + +// NewTracer creates a new instance of Tracer. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewTracer(t interface { + mock.TestingT + Cleanup(func()) +}) *Tracer { + mock := &Tracer{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/fvm/environment/mock/transaction_info.go b/fvm/environment/mock/transaction_info.go index 4b838b5f513..f996a3b5ec1 100644 --- a/fvm/environment/mock/transaction_info.go +++ b/fvm/environment/mock/transaction_info.go @@ -1,9 +1,9 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mock import ( - common "github.com/onflow/cadence/runtime/common" + common "github.com/onflow/cadence/common" flow "github.com/onflow/flow-go/model/flow" @@ -15,10 +15,14 @@ type TransactionInfo struct { mock.Mock } -// GetSigningAccounts provides a mock function with given fields: +// GetSigningAccounts provides a mock function with no fields func (_m *TransactionInfo) GetSigningAccounts() ([]common.Address, error) { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for GetSigningAccounts") + } + var r0 []common.Address var r1 error if rf, ok := ret.Get(0).(func() ([]common.Address, error)); ok { @@ -41,10 +45,14 @@ func (_m *TransactionInfo) GetSigningAccounts() ([]common.Address, error) { return r0, r1 } -// IsServiceAccountAuthorizer provides a mock function with given fields: +// IsServiceAccountAuthorizer provides a mock function with no fields func (_m *TransactionInfo) IsServiceAccountAuthorizer() bool { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for IsServiceAccountAuthorizer") + } + var r0 bool if rf, ok := ret.Get(0).(func() bool); ok { r0 = rf() @@ -55,10 +63,14 @@ func (_m *TransactionInfo) IsServiceAccountAuthorizer() bool { return r0 } -// LimitAccountStorage provides a mock function with given fields: +// LimitAccountStorage provides a mock function with no fields func (_m *TransactionInfo) LimitAccountStorage() bool { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for LimitAccountStorage") + } + var r0 bool if rf, ok := ret.Get(0).(func() bool); ok { r0 = rf() @@ -69,10 +81,14 @@ func (_m *TransactionInfo) LimitAccountStorage() bool { return r0 } -// TransactionFeesEnabled provides a mock function with given fields: +// TransactionFeesEnabled provides a mock function with no fields func (_m *TransactionInfo) TransactionFeesEnabled() bool { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for TransactionFeesEnabled") + } + var r0 bool if rf, ok := ret.Get(0).(func() bool); ok { r0 = rf() @@ -83,10 +99,14 @@ func (_m *TransactionInfo) TransactionFeesEnabled() bool { return r0 } -// TxID provides a mock function with given fields: +// TxID provides a mock function with no fields func (_m *TransactionInfo) TxID() flow.Identifier { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for TxID") + } + var r0 flow.Identifier if rf, ok := ret.Get(0).(func() flow.Identifier); ok { r0 = rf() @@ -99,10 +119,14 @@ func (_m *TransactionInfo) TxID() flow.Identifier { return r0 } -// TxIndex provides a mock function with given fields: +// TxIndex provides a mock function with no fields func (_m *TransactionInfo) TxIndex() uint32 { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for TxIndex") + } + var r0 uint32 if rf, ok := ret.Get(0).(func() uint32); ok { r0 = rf() @@ -113,13 +137,12 @@ func (_m *TransactionInfo) TxIndex() uint32 { return r0 } -type mockConstructorTestingTNewTransactionInfo interface { +// NewTransactionInfo creates a new instance of TransactionInfo. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewTransactionInfo(t interface { mock.TestingT Cleanup(func()) -} - -// NewTransactionInfo creates a new instance of TransactionInfo. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewTransactionInfo(t mockConstructorTestingTNewTransactionInfo) *TransactionInfo { +}) *TransactionInfo { mock := &TransactionInfo{} mock.Mock.Test(t) diff --git a/fvm/environment/mock/unsafe_random_generator.go b/fvm/environment/mock/unsafe_random_generator.go deleted file mode 100644 index c92560981dd..00000000000 --- a/fvm/environment/mock/unsafe_random_generator.go +++ /dev/null @@ -1,49 +0,0 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. - -package mock - -import mock "github.com/stretchr/testify/mock" - -// UnsafeRandomGenerator is an autogenerated mock type for the UnsafeRandomGenerator type -type UnsafeRandomGenerator struct { - mock.Mock -} - -// UnsafeRandom provides a mock function with given fields: -func (_m *UnsafeRandomGenerator) UnsafeRandom() (uint64, error) { - ret := _m.Called() - - var r0 uint64 - var r1 error - if rf, ok := ret.Get(0).(func() (uint64, error)); ok { - return rf() - } - if rf, ok := ret.Get(0).(func() uint64); ok { - r0 = rf() - } else { - r0 = ret.Get(0).(uint64) - } - - if rf, ok := ret.Get(1).(func() error); ok { - r1 = rf() - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -type mockConstructorTestingTNewUnsafeRandomGenerator interface { - mock.TestingT - Cleanup(func()) -} - -// NewUnsafeRandomGenerator creates a new instance of UnsafeRandomGenerator. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewUnsafeRandomGenerator(t mockConstructorTestingTNewUnsafeRandomGenerator) *UnsafeRandomGenerator { - mock := &UnsafeRandomGenerator{} - mock.Mock.Test(t) - - t.Cleanup(func() { mock.AssertExpectations(t) }) - - return mock -} diff --git a/fvm/environment/mock/uuid_generator.go b/fvm/environment/mock/uuid_generator.go index 914f56808f9..5d67496b422 100644 --- a/fvm/environment/mock/uuid_generator.go +++ b/fvm/environment/mock/uuid_generator.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mock @@ -9,10 +9,14 @@ type UUIDGenerator struct { mock.Mock } -// GenerateUUID provides a mock function with given fields: +// GenerateUUID provides a mock function with no fields func (_m *UUIDGenerator) GenerateUUID() (uint64, error) { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for GenerateUUID") + } + var r0 uint64 var r1 error if rf, ok := ret.Get(0).(func() (uint64, error)); ok { @@ -33,13 +37,12 @@ func (_m *UUIDGenerator) GenerateUUID() (uint64, error) { return r0, r1 } -type mockConstructorTestingTNewUUIDGenerator interface { +// NewUUIDGenerator creates a new instance of UUIDGenerator. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewUUIDGenerator(t interface { mock.TestingT Cleanup(func()) -} - -// NewUUIDGenerator creates a new instance of UUIDGenerator. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewUUIDGenerator(t mockConstructorTestingTNewUUIDGenerator) *UUIDGenerator { +}) *UUIDGenerator { mock := &UUIDGenerator{} mock.Mock.Test(t) diff --git a/fvm/environment/mock/value_store.go b/fvm/environment/mock/value_store.go index acfc3918545..ad00d31f97f 100644 --- a/fvm/environment/mock/value_store.go +++ b/fvm/environment/mock/value_store.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mock @@ -13,20 +13,24 @@ type ValueStore struct { mock.Mock } -// AllocateStorageIndex provides a mock function with given fields: owner -func (_m *ValueStore) AllocateStorageIndex(owner []byte) (atree.StorageIndex, error) { +// AllocateSlabIndex provides a mock function with given fields: owner +func (_m *ValueStore) AllocateSlabIndex(owner []byte) (atree.SlabIndex, error) { ret := _m.Called(owner) - var r0 atree.StorageIndex + if len(ret) == 0 { + panic("no return value specified for AllocateSlabIndex") + } + + var r0 atree.SlabIndex var r1 error - if rf, ok := ret.Get(0).(func([]byte) (atree.StorageIndex, error)); ok { + if rf, ok := ret.Get(0).(func([]byte) (atree.SlabIndex, error)); ok { return rf(owner) } - if rf, ok := ret.Get(0).(func([]byte) atree.StorageIndex); ok { + if rf, ok := ret.Get(0).(func([]byte) atree.SlabIndex); ok { r0 = rf(owner) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(atree.StorageIndex) + r0 = ret.Get(0).(atree.SlabIndex) } } @@ -43,6 +47,10 @@ func (_m *ValueStore) AllocateStorageIndex(owner []byte) (atree.StorageIndex, er func (_m *ValueStore) GetValue(owner []byte, key []byte) ([]byte, error) { ret := _m.Called(owner, key) + if len(ret) == 0 { + panic("no return value specified for GetValue") + } + var r0 []byte var r1 error if rf, ok := ret.Get(0).(func([]byte, []byte) ([]byte, error)); ok { @@ -69,6 +77,10 @@ func (_m *ValueStore) GetValue(owner []byte, key []byte) ([]byte, error) { func (_m *ValueStore) SetValue(owner []byte, key []byte, value []byte) error { ret := _m.Called(owner, key, value) + if len(ret) == 0 { + panic("no return value specified for SetValue") + } + var r0 error if rf, ok := ret.Get(0).(func([]byte, []byte, []byte) error); ok { r0 = rf(owner, key, value) @@ -83,6 +95,10 @@ func (_m *ValueStore) SetValue(owner []byte, key []byte, value []byte) error { func (_m *ValueStore) ValueExists(owner []byte, key []byte) (bool, error) { ret := _m.Called(owner, key) + if len(ret) == 0 { + panic("no return value specified for ValueExists") + } + var r0 bool var r1 error if rf, ok := ret.Get(0).(func([]byte, []byte) (bool, error)); ok { @@ -103,13 +119,12 @@ func (_m *ValueStore) ValueExists(owner []byte, key []byte) (bool, error) { return r0, r1 } -type mockConstructorTestingTNewValueStore interface { +// NewValueStore creates a new instance of ValueStore. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewValueStore(t interface { mock.TestingT Cleanup(func()) -} - -// NewValueStore creates a new instance of ValueStore. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewValueStore(t mockConstructorTestingTNewValueStore) *ValueStore { +}) *ValueStore { mock := &ValueStore{} mock.Mock.Test(t) diff --git a/fvm/environment/program_logger.go b/fvm/environment/program_logger.go index 44b7e859bcf..2f8f619c6cc 100644 --- a/fvm/environment/program_logger.go +++ b/fvm/environment/program_logger.go @@ -3,7 +3,7 @@ package environment import ( "time" - "github.com/onflow/cadence/runtime/common" + "github.com/onflow/cadence/common" "github.com/rs/zerolog" "go.opentelemetry.io/otel/attribute" otelTrace "go.opentelemetry.io/otel/trace" @@ -12,9 +12,16 @@ import ( "github.com/onflow/flow-go/module/trace" ) -// MetricsReporter captures and reports metrics to back to the execution +// MetricsReporter captures and reports EVM metrics to back to the execution +type EVMMetricsReporter interface { + SetNumberOfDeployedCOAs(count uint64) + EVMTransactionExecuted(gasUsed uint64, isDirectCall bool, failed bool) + EVMBlockExecuted(txCount int, totalGasUsed uint64, totalSupplyInFlow float64) +} + +// RuntimeMetricsReporter captures and reports runtime metrics to back to the execution // environment it is a setup passed to the context. -type MetricsReporter interface { +type RuntimeMetricsReporter interface { RuntimeTransactionParsed(time.Duration) RuntimeTransactionChecked(time.Duration) RuntimeTransactionInterpreted(time.Duration) @@ -23,9 +30,18 @@ type MetricsReporter interface { RuntimeTransactionProgramsCacheHit() } +// MetricsReporter captures and reports metrics to back to the execution +// environment it is a setup passed to the context. +type MetricsReporter interface { + EVMMetricsReporter + RuntimeMetricsReporter +} + // NoopMetricsReporter is a MetricReporter that does nothing. type NoopMetricsReporter struct{} +var _ MetricsReporter = &NoopMetricsReporter{} + // RuntimeTransactionParsed is a noop func (NoopMetricsReporter) RuntimeTransactionParsed(time.Duration) {} @@ -44,6 +60,15 @@ func (NoopMetricsReporter) RuntimeTransactionProgramsCacheMiss() {} // RuntimeTransactionProgramsCacheHit is a noop func (NoopMetricsReporter) RuntimeTransactionProgramsCacheHit() {} +// SetNumberOfDeployedCOAs is a noop +func (NoopMetricsReporter) SetNumberOfDeployedCOAs(_ uint64) {} + +// EVMTransactionExecuted is a noop +func (NoopMetricsReporter) EVMTransactionExecuted(_ uint64, _ bool, _ bool) {} + +// EVMBlockExecuted is a noop +func (NoopMetricsReporter) EVMBlockExecuted(_ int, _ uint64, _ float64) {} + type ProgramLoggerParams struct { zerolog.Logger @@ -79,12 +104,12 @@ func NewProgramLogger( } } -func (logger *ProgramLogger) Logger() *zerolog.Logger { - return &logger.ProgramLoggerParams.Logger +func (logger *ProgramLogger) Logger() zerolog.Logger { + return logger.ProgramLoggerParams.Logger } func (logger *ProgramLogger) ImplementationDebugLog(message string) error { - logger.Logger().Debug().Msgf("Cadence: %s", message) + logger.Debug().Msgf("Cadence: %s", message) return nil } @@ -98,7 +123,7 @@ func (logger *ProgramLogger) ProgramLog(message string) error { // emulator or emulator based tools), // we log the message to the zerolog logger so that they can be tracked // while stepping through a transaction/script. - logger.Logger(). + logger. Debug(). Msgf("Cadence log: %s", message) @@ -113,14 +138,9 @@ func (logger *ProgramLogger) Logs() []string { func (logger *ProgramLogger) RecordTrace( operation string, - location common.Location, duration time.Duration, attrs []attribute.KeyValue, ) { - if location != nil { - attrs = append(attrs, attribute.String("location", location.String())) - } - end := time.Now() span := logger.tracer.StartChildSpan( @@ -135,7 +155,7 @@ func (logger *ProgramLogger) ProgramParsed( location common.Location, duration time.Duration, ) { - logger.RecordTrace("parseProgram", location, duration, nil) + logger.RecordTrace("parseProgram", duration, nil) // These checks prevent re-reporting durations, the metrics collection is // a bit counter-intuitive: @@ -157,7 +177,7 @@ func (logger *ProgramLogger) ProgramChecked( location common.Location, duration time.Duration, ) { - logger.RecordTrace("checkProgram", location, duration, nil) + logger.RecordTrace("checkProgram", duration, nil) // see the comment for ProgramParsed if location == nil { @@ -173,7 +193,7 @@ func (logger *ProgramLogger) ProgramInterpreted( location common.Location, duration time.Duration, ) { - logger.RecordTrace("interpretProgram", location, duration, nil) + logger.RecordTrace("interpretProgram", duration, nil) // see the comment for ProgramInterpreted if location == nil { @@ -186,10 +206,10 @@ func (logger *ProgramLogger) ProgramInterpreted( // ValueEncoded accumulates time spend on runtime value encoding func (logger *ProgramLogger) ValueEncoded(duration time.Duration) { - logger.RecordTrace("encodeValue", nil, duration, nil) + logger.RecordTrace("encodeValue", duration, nil) } // ValueDecoded accumulates time spend on runtime value decoding func (logger *ProgramLogger) ValueDecoded(duration time.Duration) { - logger.RecordTrace("decodeValue", nil, duration, nil) + logger.RecordTrace("decodeValue", duration, nil) } diff --git a/fvm/environment/program_recovery.go b/fvm/environment/program_recovery.go new file mode 100644 index 00000000000..834c4fd07b9 --- /dev/null +++ b/fvm/environment/program_recovery.go @@ -0,0 +1,441 @@ +package environment + +import ( + "fmt" + + "github.com/onflow/cadence/ast" + "github.com/onflow/cadence/common" + "github.com/onflow/cadence/sema" + + "github.com/onflow/flow-go/fvm/systemcontracts" + "github.com/onflow/flow-go/model/flow" +) + +func RecoverProgram( + chainID flow.ChainID, + program *ast.Program, + location common.Location, +) ( + []byte, + error, +) { + addressLocation, ok := location.(common.AddressLocation) + if !ok { + return nil, nil + } + + sc := systemcontracts.SystemContractsForChain(chainID) + + fungibleTokenAddress := common.Address(sc.FungibleToken.Address) + nonFungibleTokenAddress := common.Address(sc.NonFungibleToken.Address) + + switch { + case isFungibleTokenContract(program, fungibleTokenAddress): + return RecoveredFungibleTokenCode(fungibleTokenAddress, addressLocation.Name), nil + + case isNonFungibleTokenContract(program, nonFungibleTokenAddress): + return RecoveredNonFungibleTokenCode(nonFungibleTokenAddress, addressLocation.Name), nil + } + + return nil, nil +} + +func RecoveredFungibleTokenCode(fungibleTokenAddress common.Address, contractName string) []byte { + return []byte(fmt.Sprintf( + //language=Cadence + ` + import FungibleToken from %[1]s + + access(all) + contract %[2]s: FungibleToken { + + access(self) + view fun recoveryPanic(_ functionName: String): Never { + return panic( + "%[3]s ".concat(functionName).concat(" is not available in recovered program.") + ) + } + + access(all) + var totalSupply: UFix64 + + init() { + self.totalSupply = 0.0 + } + + access(all) + view fun getContractViews(resourceType: Type?): [Type] { + %[2]s.recoveryPanic("getContractViews") + } + + access(all) + fun resolveContractView(resourceType: Type?, viewType: Type): AnyStruct? { + %[2]s.recoveryPanic("resolveContractView") + } + + access(all) + fun createEmptyVault(vaultType: Type): @{FungibleToken.Vault} { + %[2]s.recoveryPanic("createEmptyVault") + } + + access(all) + resource Vault: FungibleToken.Vault { + + access(all) + var balance: UFix64 + + init(balance: UFix64) { + self.balance = balance + } + + access(FungibleToken.Withdraw) + fun withdraw(amount: UFix64): @{FungibleToken.Vault} { + %[2]s.recoveryPanic("Vault.withdraw") + } + + access(all) + view fun isAvailableToWithdraw(amount: UFix64): Bool { + %[2]s.recoveryPanic("Vault.isAvailableToWithdraw") + } + + access(all) + fun deposit(from: @{FungibleToken.Vault}) { + %[2]s.recoveryPanic("Vault.deposit") + } + + access(all) + fun createEmptyVault(): @{FungibleToken.Vault} { + %[2]s.recoveryPanic("Vault.createEmptyVault") + } + + access(all) + view fun getViews(): [Type] { + %[2]s.recoveryPanic("Vault.getViews") + } + + access(all) + fun resolveView(_ view: Type): AnyStruct? { + %[2]s.recoveryPanic("Vault.resolveView") + } + } + } + `, + fungibleTokenAddress.HexWithPrefix(), + contractName, + fmt.Sprintf("Contract %s is no longer functional. "+ + "A version of the contract has been recovered to allow access to the fields declared in the FT standard.", + contractName, + ), + )) +} + +func RecoveredNonFungibleTokenCode(nonFungibleTokenAddress common.Address, contractName string) []byte { + return []byte(fmt.Sprintf( + //language=Cadence + ` + import NonFungibleToken from %[1]s + + access(all) + contract %[2]s: NonFungibleToken { + + access(self) + view fun recoveryPanic(_ functionName: String): Never { + return panic( + "%[3]s ".concat(functionName).concat(" is not available in recovered program.") + ) + } + + access(all) + view fun getContractViews(resourceType: Type?): [Type] { + %[2]s.recoveryPanic("getContractViews") + } + + access(all) + fun resolveContractView(resourceType: Type?, viewType: Type): AnyStruct? { + %[2]s.recoveryPanic("resolveContractView") + } + + access(all) + fun createEmptyCollection(nftType: Type): @{NonFungibleToken.Collection} { + %[2]s.recoveryPanic("createEmptyCollection") + } + + access(all) + resource NFT: NonFungibleToken.NFT { + + access(all) + let id: UInt64 + + init(id: UInt64) { + self.id = id + } + + access(all) + view fun getViews(): [Type] { + %[2]s.recoveryPanic("NFT.getViews") + } + + access(all) + fun resolveView(_ view: Type): AnyStruct? { + %[2]s.recoveryPanic("NFT.resolveView") + } + + access(all) + fun createEmptyCollection(): @{NonFungibleToken.Collection} { + %[2]s.recoveryPanic("NFT.createEmptyCollection") + } + } + + access(all) + resource Collection: NonFungibleToken.Collection { + + access(all) + var ownedNFTs: @{UInt64: {NonFungibleToken.NFT}} + + init() { + self.ownedNFTs <- {} + } + + access(all) + fun deposit(token: @{NonFungibleToken.NFT}) { + %[2]s.recoveryPanic("Collection.deposit") + } + + access(all) + view fun getIDs(): [UInt64] { + return self.ownedNFTs.keys + } + + access(all) + view fun getSupportedNFTTypes(): {Type: Bool} { + %[2]s.recoveryPanic("Collection.getSupportedNFTTypes") + } + + access(all) + view fun isSupportedNFTType(type: Type): Bool { + %[2]s.recoveryPanic("Collection.isSupportedNFTType") + } + + access(NonFungibleToken.Withdraw) + fun withdraw(withdrawID: UInt64): @{NonFungibleToken.NFT} { + %[2]s.recoveryPanic("Collection.withdraw") + } + + access(all) + view fun borrowNFT(_ id: UInt64): &{NonFungibleToken.NFT}? { + %[2]s.recoveryPanic("Collection.borrowNFT") + } + + access(all) + fun createEmptyCollection(): @{NonFungibleToken.Collection} { + %[2]s.recoveryPanic("Collection.createEmptyCollection") + } + } + } + `, + nonFungibleTokenAddress.HexWithPrefix(), + contractName, + fmt.Sprintf("Contract %s is no longer functional. "+ + "A version of the contract has been recovered to allow access to the fields declared in the NFT standard.", + contractName, + ), + )) +} + +func importsAddressLocation(program *ast.Program, address common.Address, name string) bool { + importDeclarations := program.ImportDeclarations() + + // Check if the location is imported by any import declaration + for _, importDeclaration := range importDeclarations { + + // The import declaration imports from the same address + importedLocation, ok := importDeclaration.Location.(common.AddressLocation) + if !ok || importedLocation.Address != address { + continue + } + + // The import declaration imports all identifiers, so also the location + if len(importDeclaration.Imports) == 0 { + return true + } + + // The import declaration imports specific identifiers, so check if the location is imported + for _, imp := range importDeclaration.Imports { + if imp.Identifier.Identifier == name { + return true + } + } + } + + return false +} + +func declaresConformanceTo(conformingDeclaration ast.ConformingDeclaration, name string) bool { + for _, conformance := range conformingDeclaration.ConformanceList() { + if conformance.Identifier.Identifier == name { + return true + } + } + + return false +} + +func isNominalType(ty ast.Type, name string) bool { + nominalType, ok := ty.(*ast.NominalType) + return ok && + len(nominalType.NestedIdentifiers) == 0 && + nominalType.Identifier.Identifier == name +} + +const fungibleTokenTypeIdentifier = "FungibleToken" +const fungibleTokenTypeTotalSupplyFieldName = "totalSupply" +const fungibleTokenVaultTypeIdentifier = "Vault" +const fungibleTokenVaultTypeBalanceFieldName = "balance" + +const nonFungibleTokenTypeIdentifier = "NonFungibleToken" +const nonFungibleTokenTypeTotalSupplyFieldName = "totalSupply" +const nonFungibleTokenNFTTypeIdentifier = "NFT" +const nonFungibleTokenNFTTypeIDFieldName = "id" +const nonFungibleTokenCollectionTypeIdentifier = "Collection" +const nonFungibleTokenCollectionTypeOwnedNFTsFieldName = "ownedNFTs" + +func isFungibleTokenContract(program *ast.Program, fungibleTokenAddress common.Address) bool { + + // Check if the contract imports the FungibleToken contract + if !importsAddressLocation(program, fungibleTokenAddress, fungibleTokenTypeIdentifier) { + return false + } + + contractDeclaration := program.SoleContractDeclaration() + if contractDeclaration == nil { + return false + } + + // Check if the contract implements the FungibleToken interface + if !declaresConformanceTo(contractDeclaration, fungibleTokenTypeIdentifier) { + return false + } + + // Check if the contract has a totalSupply field + totalSupplyFieldDeclaration := getField(contractDeclaration, fungibleTokenTypeTotalSupplyFieldName) + if totalSupplyFieldDeclaration == nil { + return false + } + + // Check if the totalSupply field is of type UFix64 + if !isNominalType(totalSupplyFieldDeclaration.TypeAnnotation.Type, sema.UFix64TypeName) { + return false + } + + // Check if the contract has a Vault resource + + vaultDeclaration := contractDeclaration.Members.CompositesByIdentifier()[fungibleTokenVaultTypeIdentifier] + if vaultDeclaration == nil { + return false + } + + // Check if the Vault resource has a balance field + balanceFieldDeclaration := getField(vaultDeclaration, fungibleTokenVaultTypeBalanceFieldName) + if balanceFieldDeclaration == nil { + return false + } + + // Check if the balance field is of type UFix64 + if !isNominalType(balanceFieldDeclaration.TypeAnnotation.Type, sema.UFix64TypeName) { + return false + } + + return true +} + +func isNonFungibleTokenContract(program *ast.Program, nonFungibleTokenAddress common.Address) bool { + + // Check if the contract imports the NonFungibleToken contract + if !importsAddressLocation(program, nonFungibleTokenAddress, nonFungibleTokenTypeIdentifier) { + return false + } + + contractDeclaration := program.SoleContractDeclaration() + if contractDeclaration == nil { + return false + } + + // Check if the contract implements the NonFungibleToken interface + if !declaresConformanceTo(contractDeclaration, nonFungibleTokenTypeIdentifier) { + return false + } + + // Check if the contract has a totalSupply field + totalSupplyFieldDeclaration := getField(contractDeclaration, nonFungibleTokenTypeTotalSupplyFieldName) + if totalSupplyFieldDeclaration == nil { + return false + } + + // Check if the totalSupply field is of type UInt64 + if !isNominalType(totalSupplyFieldDeclaration.TypeAnnotation.Type, sema.UInt64TypeName) { + return false + } + + // Check if the contract has an NFT resource + + nestedComposites := contractDeclaration.Members.CompositesByIdentifier() + + nftDeclaration := nestedComposites[nonFungibleTokenNFTTypeIdentifier] + if nftDeclaration == nil { + return false + } + + // Check if the NFT resource has an id field + idFieldDeclaration := getField(nftDeclaration, nonFungibleTokenNFTTypeIDFieldName) + if idFieldDeclaration == nil { + return false + } + + // Check if the id field is of type UInt64 + if !isNominalType(idFieldDeclaration.TypeAnnotation.Type, sema.UInt64TypeName) { + return false + } + + // Check if the contract has a Collection resource + collectionDeclaration := nestedComposites[nonFungibleTokenCollectionTypeIdentifier] + if collectionDeclaration == nil { + return false + } + + // Check if the Collection resource has an ownedNFTs field + ownedNFTsFieldDeclaration := getField(collectionDeclaration, nonFungibleTokenCollectionTypeOwnedNFTsFieldName) + if ownedNFTsFieldDeclaration == nil { + return false + } + + // Check if the ownedNFTs field is of type {UInt64: NonFungibleToken.NFT} (NOTE: old syntax) + ownedNFTsFieldType := ownedNFTsFieldDeclaration.TypeAnnotation.Type + ownedNFTsFieldDictionaryType, ok := ownedNFTsFieldType.(*ast.DictionaryType) + if !ok || + !isNominalType(ownedNFTsFieldDictionaryType.KeyType, sema.UInt64TypeName) || + !isNonFungibleTokenNFTNominalType(ownedNFTsFieldDictionaryType.ValueType) { + + return false + } + + return true +} + +// isNonFungibleTokenNFTNominalType checks if the given type is a nominal type representing `NonFungibleToken.NFT` +func isNonFungibleTokenNFTNominalType(ty ast.Type) bool { + nominalType, ok := ty.(*ast.NominalType) + return ok && + nominalType.Identifier.Identifier == nonFungibleTokenTypeIdentifier && + len(nominalType.NestedIdentifiers) == 1 && + nominalType.NestedIdentifiers[0].Identifier == nonFungibleTokenNFTTypeIdentifier +} + +func getField(declaration *ast.CompositeDeclaration, name string) *ast.FieldDeclaration { + for _, fieldDeclaration := range declaration.Members.Fields() { + if fieldDeclaration.Identifier.Identifier == name { + return fieldDeclaration + } + } + + return nil +} diff --git a/fvm/environment/programs.go b/fvm/environment/programs.go index 16fe865015c..8051f7a1767 100644 --- a/fvm/environment/programs.go +++ b/fvm/environment/programs.go @@ -4,11 +4,12 @@ import ( "fmt" "github.com/hashicorp/go-multierror" + "github.com/onflow/cadence/runtime" + "golang.org/x/xerrors" "github.com/onflow/cadence" + "github.com/onflow/cadence/common" jsoncdc "github.com/onflow/cadence/encoding/json" - "github.com/onflow/cadence/runtime/common" - "github.com/onflow/cadence/runtime/interpreter" "github.com/onflow/flow-go/fvm/errors" "github.com/onflow/flow-go/fvm/storage" @@ -18,6 +19,22 @@ import ( "github.com/onflow/flow-go/module/trace" ) +type ProgramLoadingError struct { + Err error + Location common.Location +} + +func (p ProgramLoadingError) Unwrap() error { + return p.Err +} + +var _ error = ProgramLoadingError{} +var _ xerrors.Wrapper = ProgramLoadingError{} + +func (p ProgramLoadingError) Error() string { + return fmt.Sprintf("error getting program %v: %s", p.Location, p.Err) +} + // Programs manages operations around cadence program parsing. // // Note that cadence guarantees that Get/Set methods are called in a LIFO @@ -34,7 +51,7 @@ type Programs struct { // NOTE: non-address programs are not reusable across transactions, hence // they are kept out of the derived data database. - nonAddressPrograms map[common.Location]*interpreter.Program + nonAddressPrograms map[common.Location]*runtime.Program // dependencyStack tracks programs currently being loaded and their dependencies. dependencyStack *dependencyStack @@ -54,7 +71,7 @@ func NewPrograms( metrics: metrics, txnState: txnState, accounts: accounts, - nonAddressPrograms: make(map[common.Location]*interpreter.Program), + nonAddressPrograms: make(map[common.Location]*runtime.Program), dependencyStack: newDependencyStack(), } } @@ -62,7 +79,7 @@ func NewPrograms( // Reset resets the program cache. // this is called if the transactions happy path fails. func (programs *Programs) Reset() { - programs.nonAddressPrograms = make(map[common.Location]*interpreter.Program) + programs.nonAddressPrograms = make(map[common.Location]*runtime.Program) programs.dependencyStack = newDependencyStack() } @@ -72,10 +89,15 @@ func (programs *Programs) Reset() { // to load the dependencies of the program. func (programs *Programs) GetOrLoadProgram( location common.Location, - load func() (*interpreter.Program, error), -) (*interpreter.Program, error) { + load func() (*runtime.Program, error), +) (*runtime.Program, error) { defer programs.tracer.StartChildSpan(trace.FVMEnvGetOrLoadProgram).End() - err := programs.meter.MeterComputation(ComputationKindGetOrLoadProgram, 1) + err := programs.meter.MeterComputation( + common.ComputationUsage{ + Kind: ComputationKindGetOrLoadProgram, + Intensity: 1, + }, + ) if err != nil { return nil, fmt.Errorf("get program failed: %w", err) } @@ -91,9 +113,8 @@ func (programs *Programs) GetOrLoadProgram( func (programs *Programs) getOrLoadAddressProgram( location common.AddressLocation, - load func() (*interpreter.Program, error), -) (*interpreter.Program, error) { - + load func() (*runtime.Program, error), +) (*runtime.Program, error) { top, err := programs.dependencyStack.top() if err != nil { return nil, err @@ -127,7 +148,10 @@ func (programs *Programs) getOrLoadAddressProgram( loader, ) if err != nil { - return nil, fmt.Errorf("error getting program: %w", err) + return nil, ProgramLoadingError{ + Err: err, + Location: location, + } } // Add dependencies to the stack. @@ -149,8 +173,8 @@ func (programs *Programs) getOrLoadAddressProgram( func (programs *Programs) getOrLoadNonAddressProgram( location common.Location, - load func() (*interpreter.Program, error), -) (*interpreter.Program, error) { + load func() (*runtime.Program, error), +) (*runtime.Program, error) { program, ok := programs.nonAddressPrograms[location] if ok { return program, nil @@ -197,7 +221,7 @@ func (programs *Programs) cacheMiss() { // programLoader is used to load a program from a location. type programLoader struct { - loadFunc func() (*interpreter.Program, error) + loadFunc func() (*runtime.Program, error) dependencyStack *dependencyStack called bool location common.AddressLocation @@ -206,7 +230,7 @@ type programLoader struct { var _ derived.ValueComputer[common.AddressLocation, *derived.Program] = (*programLoader)(nil) func newProgramLoader( - loadFunc func() (*interpreter.Program, error), + loadFunc func() (*runtime.Program, error), dependencyStack *dependencyStack, location common.AddressLocation, ) *programLoader { @@ -220,7 +244,7 @@ func newProgramLoader( } func (loader *programLoader) Compute( - txState state.NestedTransactionPreparer, + _ state.NestedTransactionPreparer, location common.AddressLocation, ) ( *derived.Program, @@ -263,14 +287,14 @@ func (loader *programLoader) Called() bool { func (loader *programLoader) loadWithDependencyTracking( address common.AddressLocation, - load func() (*interpreter.Program, error), + load func() (*runtime.Program, error), ) ( - *interpreter.Program, + *runtime.Program, derived.ProgramDependencies, error, ) { // this program is not in cache, so we need to load it into the cache. - // tho have proper invalidation, we need to track the dependencies of the program. + // to have proper invalidation, we need to track the dependencies of the program. // If this program depends on another program, // that program will be loaded before this one finishes loading (calls set). // That is why this is a stack. @@ -280,7 +304,13 @@ func (loader *programLoader) loadWithDependencyTracking( // Get collected dependencies of the loaded program. // Pop the dependencies from the stack even if loading errored. - stackLocation, dependencies, depErr := loader.dependencyStack.pop() + // + // In case of an error, the dependencies of the errored program should not be merged + // into the dependencies of the parent program. This is to prevent the parent program + // from thinking that this program was already loaded and is in the cache, + // if it requests it again. + merge := err == nil + stackLocation, dependencies, depErr := loader.dependencyStack.pop(merge) if depErr != nil { err = multierror.Append(err, depErr).ErrorOrNil() } @@ -371,7 +401,9 @@ func (s *dependencyStack) add(dependencies derived.ProgramDependencies) error { } // pop the last dependencies on the stack and return them. -func (s *dependencyStack) pop() (common.Location, derived.ProgramDependencies, error) { +// if merge is false then the dependencies are not merged into the parent tracker. +// this is used to pop the dependencies of a program that errored during loading. +func (s *dependencyStack) pop(merge bool) (common.Location, derived.ProgramDependencies, error) { if len(s.trackers) <= 1 { return nil, derived.NewProgramDependencies(), @@ -384,11 +416,13 @@ func (s *dependencyStack) pop() (common.Location, derived.ProgramDependencies, e tracker := s.trackers[len(s.trackers)-1] s.trackers = s.trackers[:len(s.trackers)-1] - // Add the dependencies of the popped tracker to the parent tracker - // This is an optimisation to avoid having to iterate through the entire stack - // everytime a dependency is pushed or added, instead we add the popped dependencies to the new top of the stack. - // (because if C depends on B which depends on A, A's dependencies include C). - s.trackers[len(s.trackers)-1].dependencies.Merge(tracker.dependencies) + if merge { + // Add the dependencies of the popped tracker to the parent tracker + // This is an optimisation to avoid having to iterate through the entire stack + // everytime a dependency is pushed or added, instead we add the popped dependencies to the new top of the stack. + // (because if C depends on B which depends on A, A's dependencies include C). + s.trackers[len(s.trackers)-1].dependencies.Merge(tracker.dependencies) + } return tracker.location, tracker.dependencies, nil } diff --git a/fvm/environment/programs_test.go b/fvm/environment/programs_test.go index d6016f08dd0..57ce7ee4b7c 100644 --- a/fvm/environment/programs_test.go +++ b/fvm/environment/programs_test.go @@ -6,7 +6,7 @@ import ( "testing" "time" - "github.com/onflow/cadence/runtime/common" + "github.com/onflow/cadence/common" "github.com/stretchr/testify/require" "github.com/onflow/flow-go/fvm" @@ -16,6 +16,7 @@ import ( "github.com/onflow/flow-go/fvm/storage/snapshot" "github.com/onflow/flow-go/fvm/storage/state" "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/utils/unittest" ) var ( @@ -43,34 +44,54 @@ var ( } contractA0Code = ` - pub contract A { - pub fun hello(): String { + access(all) contract A { + access(all) struct interface Foo{} + + access(all) fun hello(): String { return "bad version" } } ` contractACode = ` - pub contract A { - pub fun hello(): String { + access(all) contract A { + access(all) struct interface Foo{} + + access(all) fun hello(): String { return "hello from A" } } ` contractA2Code = ` - pub contract A2 { - pub fun hello(): String { + access(all) contract A2 { + access(all) struct interface Foo{} + + access(all) fun hello(): String { return "hello from A2" } } ` + contractABreakingCode = ` + access(all) contract A { + access(all) struct interface Foo{ + access(all) fun hello() + } + + access(all) fun hello(): String { + return "hello from A with breaking change" + } + } + ` + contractBCode = ` import 0xa - pub contract B { - pub fun hello(): String { + access(all) contract B { + access(all) struct Bar : A.Foo {} + + access(all) fun hello(): String { return "hello from B but also ".concat(A.hello()) } } @@ -80,8 +101,10 @@ var ( import B from 0xb import A from 0xa - pub contract C { - pub fun hello(): String { + access(all) contract C { + access(all) struct Bar : A.Foo {} + + access(all) fun hello(): String { return "hello from C, ".concat(B.hello()) } } @@ -148,10 +171,12 @@ func Test_Programs(t *testing.T) { require.Empty(t, retrievedContractA) // deploy contract A0 + txBody, err := contractDeployTx("A", contractA0Code, addressA) + require.NoError(t, err) executionSnapshot, output, err := vm.Run( context, fvm.Transaction( - contractDeployTx("A", contractA0Code, addressA), + txBody, derivedBlockData.NextTxIndexForTestingOnly()), mainSnapshot) require.NoError(t, err) @@ -167,10 +192,12 @@ func Test_Programs(t *testing.T) { require.Equal(t, contractA0Code, string(retrievedContractA)) // deploy contract A + txBody, err = updateContractTx("A", contractACode, addressA) + require.NoError(t, err) executionSnapshot, output, err = vm.Run( context, fvm.Transaction( - updateContractTx("A", contractACode, addressA), + txBody, derivedBlockData.NextTxIndexForTestingOnly()), mainSnapshot) require.NoError(t, err) @@ -187,7 +214,7 @@ func Test_Programs(t *testing.T) { }) t.Run("register touches are captured for simple contract A", func(t *testing.T) { - fmt.Println("---------- Real transaction here ------------") + t.Log("---------- Real transaction here ------------") // run a TX using contract A @@ -204,10 +231,12 @@ func Test_Programs(t *testing.T) { return mainSnapshot.Get(id) }) + txBody, err := callTx("A", addressA) + require.NoError(t, err) executionSnapshotA, output, err := vm.Run( context, fvm.Transaction( - callTx("A", addressA), + txBody, derivedBlockData.NextTxIndexForTestingOnly()), execASnapshot) require.NoError(t, err) @@ -248,10 +277,12 @@ func Test_Programs(t *testing.T) { return mainSnapshot.Get(id) }) + txBody, err = callTx("A", addressA) + require.NoError(t, err) executionSnapshotA2, output, err := vm.Run( context, fvm.Transaction( - callTx("A", addressA), + txBody, derivedBlockData.NextTxIndexForTestingOnly()), execA2Snapshot) require.NoError(t, err) @@ -268,10 +299,12 @@ func Test_Programs(t *testing.T) { t.Run("deploying another contract invalidates dependant programs", func(t *testing.T) { // deploy contract B + txBody, err := contractDeployTx("B", contractBCode, addressB) + require.NoError(t, err) executionSnapshot, output, err := vm.Run( context, fvm.Transaction( - contractDeployTx("B", contractBCode, addressB), + txBody, derivedBlockData.NextTxIndexForTestingOnly()), mainSnapshot) require.NoError(t, err) @@ -298,11 +331,12 @@ func Test_Programs(t *testing.T) { // programs should have no entries for A and B, as per previous test // run a TX using contract B - + txBody, err := callTx("B", addressB) + require.NoError(t, err) executionSnapshotB, output, err := vm.Run( context, fvm.Transaction( - callTx("B", addressB), + txBody, derivedBlockData.NextTxIndexForTestingOnly()), mainSnapshot) require.NoError(t, err) @@ -353,10 +387,12 @@ func Test_Programs(t *testing.T) { return mainSnapshot.Get(id) }) + txBody, err = callTx("B", addressB) + require.NoError(t, err) executionSnapshotB2, output, err := vm.Run( context, fvm.Transaction( - callTx("B", addressB), + txBody, derivedBlockData.NextTxIndexForTestingOnly()), execB2Snapshot) require.NoError(t, err) @@ -370,11 +406,13 @@ func Test_Programs(t *testing.T) { }) t.Run("deploying new contract A2 invalidates B because of * imports", func(t *testing.T) { - // deploy contract B + // deploy contract A2 + txBody, err := contractDeployTx("A2", contractA2Code, addressA) + require.NoError(t, err) executionSnapshot, output, err := vm.Run( context, fvm.Transaction( - contractDeployTx("A2", contractA2Code, addressA), + txBody, derivedBlockData.NextTxIndexForTestingOnly()), mainSnapshot) require.NoError(t, err) @@ -400,11 +438,12 @@ func Test_Programs(t *testing.T) { // programs should have no entries for A and B, as per previous test // run a TX using contract B - + txBody, err := callTx("B", addressB) + require.NoError(t, err) executionSnapshotB, output, err := vm.Run( context, fvm.Transaction( - callTx("B", addressB), + txBody, derivedBlockData.NextTxIndexForTestingOnly()), mainSnapshot) require.NoError(t, err) @@ -461,10 +500,12 @@ func Test_Programs(t *testing.T) { return mainSnapshot.Get(id) }) + txBody, err = callTx("B", addressB) + require.NoError(t, err) executionSnapshotB2, output, err := vm.Run( context, fvm.Transaction( - callTx("B", addressB), + txBody, derivedBlockData.NextTxIndexForTestingOnly()), execB2Snapshot) require.NoError(t, err) @@ -492,10 +533,12 @@ func Test_Programs(t *testing.T) { }) // run a TX using contract A + txBody, err := callTx("A", addressA) + require.NoError(t, err) executionSnapshot, output, err := vm.Run( context, fvm.Transaction( - callTx("A", addressA), + txBody, derivedBlockData.NextTxIndexForTestingOnly()), execASnapshot) require.NoError(t, err) @@ -512,10 +555,12 @@ func Test_Programs(t *testing.T) { require.NotNil(t, contractBSnapshot) // deploy contract C + txBody, err := contractDeployTx("C", contractCCode, addressC) + require.NoError(t, err) executionSnapshot, output, err := vm.Run( context, fvm.Transaction( - contractDeployTx("C", contractCCode, addressC), + txBody, derivedBlockData.NextTxIndexForTestingOnly()), mainSnapshot) require.NoError(t, err) @@ -538,10 +583,12 @@ func Test_Programs(t *testing.T) { }) t.Run("importing C should chain-import B and A", func(t *testing.T) { + txBody, err := callTx("C", addressC) + require.NoError(t, err) executionSnapshot, output, err := vm.Run( context, fvm.Transaction( - callTx("C", addressC), + txBody, derivedBlockData.NextTxIndexForTestingOnly()), mainSnapshot) require.NoError(t, err) @@ -595,10 +642,12 @@ func Test_ProgramsDoubleCounting(t *testing.T) { t.Run("deploy contracts and ensure cache is empty", func(t *testing.T) { // deploy contract A + txBody, err := contractDeployTx("A", contractACode, addressA) + require.NoError(t, err) executionSnapshot, output, err := vm.Run( context, fvm.Transaction( - contractDeployTx("A", contractACode, addressA), + txBody, derivedBlockData.NextTxIndexForTestingOnly()), snapshotTree) require.NoError(t, err) @@ -607,10 +656,12 @@ func Test_ProgramsDoubleCounting(t *testing.T) { snapshotTree = snapshotTree.Append(executionSnapshot) // deploy contract B + txBody, err = contractDeployTx("B", contractBCode, addressB) + require.NoError(t, err) executionSnapshot, output, err = vm.Run( context, fvm.Transaction( - contractDeployTx("B", contractBCode, addressB), + txBody, derivedBlockData.NextTxIndexForTestingOnly()), snapshotTree) require.NoError(t, err) @@ -619,10 +670,12 @@ func Test_ProgramsDoubleCounting(t *testing.T) { snapshotTree = snapshotTree.Append(executionSnapshot) // deploy contract C + txBody, err = contractDeployTx("C", contractCCode, addressC) + require.NoError(t, err) executionSnapshot, output, err = vm.Run( context, fvm.Transaction( - contractDeployTx("C", contractCCode, addressC), + txBody, derivedBlockData.NextTxIndexForTestingOnly()), snapshotTree) require.NoError(t, err) @@ -631,10 +684,12 @@ func Test_ProgramsDoubleCounting(t *testing.T) { snapshotTree = snapshotTree.Append(executionSnapshot) // deploy contract A2 last to clear any cache so far + txBody, err = contractDeployTx("A2", contractA2Code, addressA) + require.NoError(t, err) executionSnapshot, output, err = vm.Run( context, fvm.Transaction( - contractDeployTx("A2", contractA2Code, addressA), + txBody, derivedBlockData.NextTxIndexForTestingOnly()), snapshotTree) require.NoError(t, err) @@ -657,8 +712,8 @@ func Test_ProgramsDoubleCounting(t *testing.T) { }) callC := func(snapshotTree snapshot.SnapshotTree) snapshot.SnapshotTree { - procCallC := fvm.Transaction( - flow.NewTransactionBody().SetScript( + txBody, err := flow.NewTransactionBodyBuilder(). + SetScript( []byte( ` import A from 0xa @@ -669,8 +724,12 @@ func Test_ProgramsDoubleCounting(t *testing.T) { log(C.hello()) } }`, - )), - derivedBlockData.NextTxIndexForTestingOnly()) + )). + SetPayer(unittest.RandomAddressFixture()). + Build() + require.NoError(t, err) + + procCallC := fvm.Transaction(txBody, derivedBlockData.NextTxIndexForTestingOnly()) executionSnapshot, output, err := vm.Run( context, @@ -681,7 +740,7 @@ func Test_ProgramsDoubleCounting(t *testing.T) { require.Equal( t, - uint( + uint64( 1+ // import A 3+ // import B (import A, import A2) 4, // import C (import B (3), import A (already imported in this scope)) @@ -740,11 +799,89 @@ func Test_ProgramsDoubleCounting(t *testing.T) { require.Equal(t, 0, metrics.CacheMisses) }) + t.Run("update A to breaking change and ensure cache state", func(t *testing.T) { + // deploy contract A + txBody, err := updateContractTx("A", contractABreakingCode, addressA) + require.NoError(t, err) + executionSnapshot, output, err := vm.Run( + context, + fvm.Transaction( + txBody, + derivedBlockData.NextTxIndexForTestingOnly()), + snapshotTree) + require.NoError(t, err) + require.NoError(t, output.Err) + + snapshotTree = snapshotTree.Append(executionSnapshot) + + entryA := derivedBlockData.GetProgramForTestingOnly(contractALocation) + entryB := derivedBlockData.GetProgramForTestingOnly(contractBLocation) + entryC := derivedBlockData.GetProgramForTestingOnly(contractCLocation) + + require.Nil(t, entryA) + require.Nil(t, entryB) + require.Nil(t, entryC) + + cached := derivedBlockData.CachedPrograms() + require.Equal(t, 1, cached) + }) + + callCAfterItsBroken := func(snapshotTree snapshot.SnapshotTree) snapshot.SnapshotTree { + txBody, err := flow.NewTransactionBodyBuilder().SetScript( + []byte( + ` + import A from 0xa + import B from 0xb + import C from 0xc + transaction { + prepare() { + log(C.hello()) + } + }`, + )). + SetPayer(unittest.RandomAddressFixture()). + Build() + require.NoError(t, err) + + procCallC := fvm.Transaction(txBody, derivedBlockData.NextTxIndexForTestingOnly()) + + executionSnapshot, output, err := vm.Run( + context, + procCallC, + snapshotTree) + require.NoError(t, err) + require.Error(t, output.Err) + + entryA := derivedBlockData.GetProgramForTestingOnly(contractALocation) + entryA2 := derivedBlockData.GetProgramForTestingOnly(contractA2Location) + entryB := derivedBlockData.GetProgramForTestingOnly(contractBLocation) + entryC := derivedBlockData.GetProgramForTestingOnly(contractCLocation) + + require.NotNil(t, entryA) + require.NotNil(t, entryA2) // loaded due to "*" import in B + require.Nil(t, entryB) // failed to load + require.Nil(t, entryC) // failed to load + + cached := derivedBlockData.CachedPrograms() + require.Equal(t, 2, cached) + + return snapshotTree.Append(executionSnapshot) + } + + t.Run("Call C when broken", func(t *testing.T) { + metrics.Reset() + snapshotTree = callCAfterItsBroken(snapshotTree) + + // miss A, hit A, hit A2, hit A, hit A2, hit A + require.Equal(t, 5, metrics.CacheHits) + require.Equal(t, 1, metrics.CacheMisses) + }) + } -func callTx(name string, address flow.Address) *flow.TransactionBody { +func callTx(name string, address flow.Address) (*flow.TransactionBody, error) { - return flow.NewTransactionBody().SetScript( + return flow.NewTransactionBodyBuilder().SetScript( []byte(fmt.Sprintf(` import %s from %s transaction { @@ -752,31 +889,41 @@ func callTx(name string, address flow.Address) *flow.TransactionBody { log(%s.hello()) } }`, name, address.HexWithPrefix(), name)), - ) + ). + SetPayer(address). + Build() } -func contractDeployTx(name, code string, address flow.Address) *flow.TransactionBody { +func contractDeployTx(name, code string, address flow.Address) (*flow.TransactionBody, error) { encoded := hex.EncodeToString([]byte(code)) - return flow.NewTransactionBody().SetScript( - []byte(fmt.Sprintf(`transaction { - prepare(signer: AuthAccount) { + return flow.NewTransactionBodyBuilder(). + SetScript( + []byte(fmt.Sprintf(`transaction { + prepare(signer: auth(AddContract) &Account) { signer.contracts.add(name: "%s", code: "%s".decodeHex()) } }`, name, encoded)), - ).AddAuthorizer(address) + ). + AddAuthorizer(address). + SetPayer(address). + Build() } -func updateContractTx(name, code string, address flow.Address) *flow.TransactionBody { +func updateContractTx(name, code string, address flow.Address) (*flow.TransactionBody, error) { encoded := hex.EncodeToString([]byte(code)) - return flow.NewTransactionBody().SetScript([]byte( - fmt.Sprintf(`transaction { - prepare(signer: AuthAccount) { - signer.contracts.update__experimental(name: "%s", code: "%s".decodeHex()) + return flow.NewTransactionBodyBuilder(). + SetScript( + []byte(fmt.Sprintf(`transaction { + prepare(signer: auth(UpdateContract) &Account) { + signer.contracts.update(name: "%s", code: "%s".decodeHex()) } }`, name, encoded)), - ).AddAuthorizer(address) + ). + AddAuthorizer(address). + SetPayer(address). + Build() } func compareExecutionSnapshots(t *testing.T, a, b *snapshot.ExecutionSnapshot) { @@ -798,6 +945,12 @@ func (m *metricsReporter) RuntimeTransactionInterpreted(duration time.Duration) func (m *metricsReporter) RuntimeSetNumberOfAccounts(count uint64) {} +func (m *metricsReporter) SetNumberOfDeployedCOAs(count uint64) {} + +func (m *metricsReporter) EVMTransactionExecuted(_ uint64, _ bool, _ bool) {} + +func (m *metricsReporter) EVMBlockExecuted(_ int, _ uint64, _ float64) {} + func (m *metricsReporter) RuntimeTransactionProgramsCacheMiss() { m.CacheMisses++ } diff --git a/fvm/environment/random_generator.go b/fvm/environment/random_generator.go new file mode 100644 index 00000000000..fde2c1a10f1 --- /dev/null +++ b/fvm/environment/random_generator.go @@ -0,0 +1,125 @@ +package environment + +import ( + "fmt" + + "github.com/onflow/crypto/random" + + "github.com/onflow/flow-go/fvm/storage/state" + "github.com/onflow/flow-go/fvm/tracing" + "github.com/onflow/flow-go/module/trace" + "github.com/onflow/flow-go/state/protocol/prg" +) + +// EntropyProvider represents an entropy (source of randomness) provider +type EntropyProvider interface { + // RandomSource provides a source of entropy that can be + // expanded into randoms (using a pseudo-random generator). + // The returned slice should have at least 128 bits of entropy. + // The function doesn't error in normal operations, any + // error should be treated as an exception. + RandomSource() ([]byte, error) +} + +type RandomGenerator interface { + // ReadRandom reads pseudo-random bytes into the input slice, using distributed randomness. + // The name follows Cadence interface + ReadRandom([]byte) error +} + +var _ RandomGenerator = (*randomGenerator)(nil) + +// randomGenerator implements RandomGenerator and is used +// for the transactions execution environment +type randomGenerator struct { + tracer tracing.TracerSpan + entropySource EntropyProvider + salt []byte + prg random.Rand + isPRGCreated bool +} + +type ParseRestrictedRandomGenerator struct { + txnState state.NestedTransactionPreparer + impl RandomGenerator +} + +func NewParseRestrictedRandomGenerator( + txnState state.NestedTransactionPreparer, + impl RandomGenerator, +) RandomGenerator { + return ParseRestrictedRandomGenerator{ + txnState: txnState, + impl: impl, + } +} + +func (gen ParseRestrictedRandomGenerator) ReadRandom(buf []byte) error { + return parseRestrict1Arg( + gen.txnState, + trace.FVMEnvRandom, + gen.impl.ReadRandom, + buf) +} + +func NewRandomGenerator( + tracer tracing.TracerSpan, + entropySource EntropyProvider, + salt []byte, +) RandomGenerator { + gen := &randomGenerator{ + tracer: tracer, + entropySource: entropySource, + salt: salt, + isPRGCreated: false, // PRG is not created + } + + return gen +} + +func (gen *randomGenerator) createPRG() (random.Rand, error) { + // Use the protocol state source of randomness [SoR] for the current block's + // execution + source, err := gen.entropySource.RandomSource() + // `RandomSource` does not error in normal operations. + // Any error should be treated as an exception. + if err != nil { + return nil, fmt.Errorf("reading random source from state failed: %w", err) + } + + // Use the state/protocol PRG derivation from the source of randomness: + // - for the transaction execution case, the PRG used must be a CSPRG + // - use the state/protocol/prg customizer defined for the execution environment + // - use the salt as an extra diversifier of the CSPRG. Although this + // does not add any extra entropy to the output, it allows creating an independent + // PRG for each transaction or script. + csprg, err := prg.New(source, prg.ExecutionEnvironment, gen.salt) + if err != nil { + return nil, fmt.Errorf("failed to create a CSPRG from source: %w", err) + } + + return csprg, nil +} + +// ReadRandom reads pseudo-random bytes into the input slice using the underlying PRG (currently +// using a crypto-secure one). This function is not thread safe, due to the gen.prg +// instance currently used. This is fine because a +// single transaction has a single RandomGenerator and is run in a single +// thread. +func (gen *randomGenerator) ReadRandom(buf []byte) error { + defer gen.tracer.StartExtensiveTracingChildSpan( + trace.FVMEnvRandom).End() + + // PRG creation is only done once. + if !gen.isPRGCreated { + newPRG, err := gen.createPRG() + if err != nil { + return err + } + gen.prg = newPRG + gen.isPRGCreated = true + } + + gen.prg.Read(buf) + return nil +} diff --git a/fvm/environment/random_generator_test.go b/fvm/environment/random_generator_test.go new file mode 100644 index 00000000000..85f1244e9fb --- /dev/null +++ b/fvm/environment/random_generator_test.go @@ -0,0 +1,96 @@ +package environment_test + +import ( + "encoding/binary" + "math" + mrand "math/rand" + "testing" + + "github.com/onflow/crypto/random" + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/fvm/environment" + "github.com/onflow/flow-go/fvm/environment/mock" + "github.com/onflow/flow-go/fvm/tracing" + "github.com/onflow/flow-go/utils/unittest" +) + +func TestRandomGenerator(t *testing.T) { + randomSourceHistoryProvider := &mock.EntropyProvider{} + randomSourceHistoryProvider.On("RandomSource").Return(unittest.RandomBytes(48), nil) + + getRandoms := func(txId []byte, N int) []uint64 { + // seed the RG with the same block header + urg := environment.NewRandomGenerator( + tracing.NewTracerSpan(), + randomSourceHistoryProvider, + txId) + numbers := make([]uint64, N) + for i := 0; i < N; i++ { + var buffer [8]byte + err := urg.ReadRandom(buffer[:]) + require.NoError(t, err) + numbers[i] = binary.LittleEndian.Uint64(buffer[:]) + } + return numbers + } + + // basic randomness test to check outputs are "uniformly" spread over the + // output space + t.Run("randomness test", func(t *testing.T) { + for i := 0; i < 10; i++ { + txId := unittest.TransactionFixture().ID() + urg := environment.NewRandomGenerator( + tracing.NewTracerSpan(), + randomSourceHistoryProvider, + txId[:]) + + // make sure n is a power of 2 so that there is no bias in the last class + // n is a random power of 2 (from 2 to 2^10) + n := 1 << (1 + mrand.Intn(10)) + classWidth := (math.MaxUint64 / uint64(n)) + 1 + random.BasicDistributionTest(t, uint64(n), uint64(classWidth), func() (uint64, error) { + var buffer [8]byte + err := urg.ReadRandom(buffer[:]) + if err != nil { + return 0, err + } + return binary.LittleEndian.Uint64(buffer[:]), nil + }) + } + }) + + // tests that has deterministic outputs. + t.Run("PRG-based Random", func(t *testing.T) { + for i := 0; i < 10; i++ { + txId := unittest.TransactionFixture().ID() + N := 100 + r1 := getRandoms(txId[:], N) + r2 := getRandoms(txId[:], N) + require.Equal(t, r1, r2) + } + }) + + t.Run("transaction specific randomness", func(t *testing.T) { + txns := [][]uint64{} + for i := 0; i < 10; i++ { + txId := unittest.TransactionFixture().ID() + N := 2 + txns = append(txns, getRandoms(txId[:], N)) + } + + for i, txn := range txns { + for _, otherTxn := range txns[i+1:] { + require.NotEqual(t, txn, otherTxn) + } + } + }) +} + +func TestRandomSourceHistoryProvider(t *testing.T) { + t.Run("source length", func(t *testing.T) { + // Sanity check that entropy source is at least 128 bits + const minimumEntropy = 128 / 8 + require.GreaterOrEqual(t, environment.RandomSourceHistoryLength, minimumEntropy) + }) +} diff --git a/fvm/environment/script_info.go b/fvm/environment/script_info.go new file mode 100644 index 00000000000..638e5b61856 --- /dev/null +++ b/fvm/environment/script_info.go @@ -0,0 +1,32 @@ +package environment + +import ( + "github.com/onflow/flow-go/model/fingerprint" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/model/hash" +) + +type ScriptInfoParams struct { + ID flow.Identifier + Script []byte + Arguments [][]byte +} + +func (info ScriptInfoParams) Fingerprint() []byte { + return fingerprint.Fingerprint(struct { + Script []byte + Arguments [][]byte + }{ + Script: info.Script, + Arguments: info.Arguments, + }) +} + +func NewScriptInfoParams(code []byte, arguments [][]byte) *ScriptInfoParams { + info := &ScriptInfoParams{ + Script: code, + Arguments: arguments, + } + info.ID = flow.HashToID(hash.DefaultComputeHash(info.Fingerprint())) + return info +} diff --git a/fvm/environment/system_contracts.go b/fvm/environment/system_contracts.go index 06a14acd337..22860dd38ea 100644 --- a/fvm/environment/system_contracts.go +++ b/fvm/environment/system_contracts.go @@ -2,8 +2,8 @@ package environment import ( "github.com/onflow/cadence" - "github.com/onflow/cadence/runtime/common" - "github.com/onflow/cadence/runtime/sema" + "github.com/onflow/cadence/common" + "github.com/onflow/cadence/sema" "go.opentelemetry.io/otel/attribute" "github.com/onflow/flow-go/fvm/systemcontracts" @@ -12,15 +12,6 @@ import ( "github.com/onflow/flow-go/module/trace" ) -// ContractFunctionSpec specify all the information, except the function's -// address and arguments, needed to invoke the contract function. -type ContractFunctionSpec struct { - AddressFromChain func(flow.Chain) flow.Address - LocationName string - FunctionName string - ArgumentTypes []sema.Type -} - // SystemContracts provides methods for invoking system contract functions as // service account. type SystemContracts struct { @@ -75,8 +66,8 @@ func (sys *SystemContracts) Invoke( spec.ArgumentTypes, ) if err != nil { - sys.logger.Logger(). - Info(). + log := sys.logger.Logger() + log.Info(). Err(err). Str("contract", contractLocation.String()). Str("function", spec.FunctionName). @@ -86,8 +77,8 @@ func (sys *SystemContracts) Invoke( } func FlowFeesAddress(chain flow.Chain) flow.Address { - address, _ := chain.AddressAtIndex(FlowFeesAccountIndex) - return address + sc := systemcontracts.SystemContractsForChain(chain.ChainID()) + return sc.FlowFees.Address } func ServiceAddress(chain flow.Chain) flow.Address { @@ -99,7 +90,16 @@ var verifyPayersBalanceForTransactionExecutionSpec = ContractFunctionSpec{ LocationName: systemcontracts.ContractNameFlowFees, FunctionName: systemcontracts.ContractServiceAccountFunction_verifyPayersBalanceForTransactionExecution, ArgumentTypes: []sema.Type{ - sema.AuthAccountType, + sema.NewReferenceType( + nil, + sema.NewEntitlementSetAccess( + []*sema.EntitlementType{ + sema.BorrowValueType, + }, + sema.Conjunction, + ), + sema.AccountType, + ), sema.UInt64Type, sema.UInt64Type, }, @@ -131,7 +131,16 @@ var deductTransactionFeeSpec = ContractFunctionSpec{ LocationName: systemcontracts.ContractNameFlowFees, FunctionName: systemcontracts.ContractServiceAccountFunction_deductTransactionFee, ArgumentTypes: []sema.Type{ - sema.AuthAccountType, + sema.NewReferenceType( + nil, + sema.NewEntitlementSetAccess( + []*sema.EntitlementType{ + sema.BorrowValueType, + }, + sema.Conjunction, + ), + sema.AccountType, + ), sema.UInt64Type, sema.UInt64Type, }, @@ -160,8 +169,28 @@ var setupNewAccountSpec = ContractFunctionSpec{ LocationName: systemcontracts.ContractNameServiceAccount, FunctionName: systemcontracts.ContractServiceAccountFunction_setupNewAccount, ArgumentTypes: []sema.Type{ - sema.AuthAccountType, - sema.AuthAccountType, + sema.NewReferenceType( + nil, + sema.NewEntitlementSetAccess( + []*sema.EntitlementType{ + sema.SaveValueType, + sema.BorrowValueType, + sema.CapabilitiesType, + }, + sema.Conjunction, + ), + sema.AccountType, + ), + sema.NewReferenceType( + nil, + sema.NewEntitlementSetAccess( + []*sema.EntitlementType{ + sema.BorrowValueType, + }, + sema.Conjunction, + ), + sema.AccountType, + ), }, } @@ -207,7 +236,11 @@ var accountBalanceInvocationSpec = ContractFunctionSpec{ LocationName: systemcontracts.ContractNameServiceAccount, FunctionName: systemcontracts.ContractServiceAccountFunction_defaultTokenBalance, ArgumentTypes: []sema.Type{ - sema.PublicAccountType, + sema.NewReferenceType( + nil, + sema.UnauthorizedAccess, + sema.AccountType, + ), }, } diff --git a/fvm/environment/system_contracts_test.go b/fvm/environment/system_contracts_test.go index ca9ae5a23a5..9910f304614 100644 --- a/fvm/environment/system_contracts_test.go +++ b/fvm/environment/system_contracts_test.go @@ -4,9 +4,9 @@ import ( "testing" "github.com/onflow/cadence" + "github.com/onflow/cadence/common" "github.com/onflow/cadence/runtime" - "github.com/onflow/cadence/runtime/common" - "github.com/onflow/cadence/runtime/sema" + "github.com/onflow/cadence/sema" "github.com/stretchr/testify/require" "github.com/onflow/flow-go/fvm/environment" @@ -54,12 +54,14 @@ func TestSystemContractsInvoke(t *testing.T) { for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { + const chainID = flow.Mainnet + tracer := tracing.NewTracerSpan() runtimePool := reusableRuntime.NewCustomReusableCadenceRuntimePool( 0, runtime.Config{}, func(_ runtime.Config) runtime.Runtime { - return &testutil.TestInterpreterRuntime{ + return &testutil.TestRuntime{ InvokeContractFunc: tc.contractFunction, } }, @@ -70,7 +72,7 @@ func TestSystemContractsInvoke(t *testing.T) { }, ) invoker := environment.NewSystemContracts( - flow.Mainnet.Chain(), + chainID.Chain(), tracer, environment.NewProgramLogger( tracer, diff --git a/fvm/environment/tracer.go b/fvm/environment/tracer.go new file mode 100644 index 00000000000..f276286475e --- /dev/null +++ b/fvm/environment/tracer.go @@ -0,0 +1,16 @@ +package environment + +import ( + otelTrace "go.opentelemetry.io/otel/trace" + + "github.com/onflow/flow-go/fvm/tracing" + "github.com/onflow/flow-go/module/trace" +) + +// Tracer captures traces +type Tracer interface { + StartChildSpan( + name trace.SpanName, + options ...otelTrace.SpanStartOption, + ) tracing.TracerSpan +} diff --git a/fvm/environment/transaction_info.go b/fvm/environment/transaction_info.go index 25cf64baba4..6abfb367962 100644 --- a/fvm/environment/transaction_info.go +++ b/fvm/environment/transaction_info.go @@ -1,7 +1,7 @@ package environment import ( - "github.com/onflow/cadence/runtime/common" + "github.com/onflow/cadence/common" "github.com/onflow/flow-go/fvm/errors" "github.com/onflow/flow-go/fvm/storage/state" @@ -17,14 +17,18 @@ type TransactionInfoParams struct { TransactionFeesEnabled bool LimitAccountStorage bool + // RandomSourceHistoryCallAllowed is true if the transaction is allowed to call the `entropy` + // cadence function to get the entropy of that block. + RandomSourceHistoryCallAllowed bool } func DefaultTransactionInfoParams() TransactionInfoParams { // NOTE: TxIndex, TxId and TxBody are populated by NewTransactionEnv rather // than by Context. return TransactionInfoParams{ - TransactionFeesEnabled: false, - LimitAccountStorage: false, + TransactionFeesEnabled: false, + LimitAccountStorage: false, + RandomSourceHistoryCallAllowed: false, } } @@ -92,6 +96,8 @@ func (info ParseRestrictedTransactionInfo) GetSigningAccounts() ( info.impl.GetSigningAccounts) } +var _ TransactionInfo = &transactionInfo{} + type transactionInfo struct { params TransactionInfoParams diff --git a/fvm/environment/unsafe_random_generator.go b/fvm/environment/unsafe_random_generator.go deleted file mode 100644 index 548753d90ca..00000000000 --- a/fvm/environment/unsafe_random_generator.go +++ /dev/null @@ -1,156 +0,0 @@ -package environment - -import ( - "crypto/sha256" - "encoding/binary" - "fmt" - "hash" - "io" - "sync" - - "golang.org/x/crypto/hkdf" - - "github.com/onflow/flow-go/crypto/random" - "github.com/onflow/flow-go/fvm/errors" - "github.com/onflow/flow-go/fvm/storage/state" - "github.com/onflow/flow-go/fvm/tracing" - "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/module/trace" -) - -type UnsafeRandomGenerator interface { - // UnsafeRandom returns a random uint64 - UnsafeRandom() (uint64, error) -} - -type unsafeRandomGenerator struct { - tracer tracing.TracerSpan - - blockHeader *flow.Header - txnIndex uint32 - - prg random.Rand - createOnce sync.Once - createErr error -} - -type ParseRestrictedUnsafeRandomGenerator struct { - txnState state.NestedTransactionPreparer - impl UnsafeRandomGenerator -} - -func NewParseRestrictedUnsafeRandomGenerator( - txnState state.NestedTransactionPreparer, - impl UnsafeRandomGenerator, -) UnsafeRandomGenerator { - return ParseRestrictedUnsafeRandomGenerator{ - txnState: txnState, - impl: impl, - } -} - -func (gen ParseRestrictedUnsafeRandomGenerator) UnsafeRandom() ( - uint64, - error, -) { - return parseRestrict1Ret( - gen.txnState, - trace.FVMEnvUnsafeRandom, - gen.impl.UnsafeRandom) -} - -func NewUnsafeRandomGenerator( - tracer tracing.TracerSpan, - blockHeader *flow.Header, - txnIndex uint32, -) UnsafeRandomGenerator { - gen := &unsafeRandomGenerator{ - tracer: tracer, - blockHeader: blockHeader, - txnIndex: txnIndex, - } - - return gen -} - -func (gen *unsafeRandomGenerator) createRandomGenerator() ( - random.Rand, - error, -) { - if gen.blockHeader == nil { - return nil, nil - } - - // The block header ID is currently used as the entropy source. - // This should evolve to become the beacon signature (safer entropy - // source than the block ID) - source := gen.blockHeader.ID() - - // Provide additional randomness for each transaction. - salt := make([]byte, 4) - binary.LittleEndian.PutUint32(salt, gen.txnIndex) - - // Extract the entropy from the source and expand it into the required - // seed length. Note that we can use any implementation which provide - // similar properties. - hkdf := hkdf.New( - func() hash.Hash { return sha256.New() }, - source[:], - salt, - nil) - seed := make([]byte, random.Chacha20SeedLen) - _, err := io.ReadFull(hkdf, seed) - if err != nil { - return nil, fmt.Errorf("extracting seed with HKDF failed: %w", err) - } - - // initialize a fresh crypto-secure PRG with the seed (here ChaCha20) - // This PRG provides all outputs of Cadence UnsafeRandom. - prg, err := random.NewChacha20PRG(seed, []byte{}) - if err != nil { - return nil, fmt.Errorf("creating random generator failed: %w", err) - } - - return prg, nil -} - -// maybeCreateRandomGenerator seeds the pseudo-random number generator using the -// block header ID and transaction index as an entropy source. The seed -// function is currently called for each tranaction, the PRG is used to -// provide all the randoms the transaction needs through UnsafeRandom. -// -// This allows lazy seeding of the random number generator, since not a lot of -// transactions/scripts use it and the time it takes to seed it is not -// negligible. -func (gen *unsafeRandomGenerator) maybeCreateRandomGenerator() error { - gen.createOnce.Do(func() { - gen.prg, gen.createErr = gen.createRandomGenerator() - }) - - return gen.createErr -} - -// UnsafeRandom returns a random uint64 using the underlying PRG (currently -// using a crypto-secure one). This is not thread safe, due to the gen.prg -// instance currently used. Its also not thread safe because each thread needs -// to be deterministically seeded with a different seed. This is Ok because a -// single transaction has a single UnsafeRandomGenerator and is run in a single -// thread. -func (gen *unsafeRandomGenerator) UnsafeRandom() (uint64, error) { - defer gen.tracer.StartExtensiveTracingChildSpan( - trace.FVMEnvUnsafeRandom).End() - - // The internal seeding is only done once. - err := gen.maybeCreateRandomGenerator() - if err != nil { - return 0, err - } - - if gen.prg == nil { - return 0, errors.NewOperationNotSupportedError("UnsafeRandom") - } - - buf := make([]byte, 8) - gen.prg.Read(buf) // Note: prg.Read does not return error - return binary.LittleEndian.Uint64(buf), nil -} diff --git a/fvm/environment/unsafe_random_generator_test.go b/fvm/environment/unsafe_random_generator_test.go deleted file mode 100644 index bb6f13b87e0..00000000000 --- a/fvm/environment/unsafe_random_generator_test.go +++ /dev/null @@ -1,108 +0,0 @@ -package environment_test - -import ( - "fmt" - "math" - mrand "math/rand" - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - "gonum.org/v1/gonum/stat" - - "github.com/onflow/flow-go/fvm/environment" - "github.com/onflow/flow-go/fvm/tracing" - "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/utils/unittest" -) - -// TODO: these functions are copied from flow-go/crypto/rand -// Once the new flow-go/crypto/ module version is tagged, flow-go would upgrade -// to the new version and import these functions -func BasicDistributionTest(t *testing.T, n uint64, classWidth uint64, randf func() (uint64, error)) { - // sample size should ideally be a high number multiple of `n` - // but if `n` is too small, we could use a small sample size so that the test - // isn't too slow - sampleSize := 1000 * n - if n < 100 { - sampleSize = (80000 / n) * n // highest multiple of n less than 80000 - } - distribution := make([]float64, n) - // populate the distribution - for i := uint64(0); i < sampleSize; i++ { - r, err := randf() - require.NoError(t, err) - if n*classWidth != 0 { - require.Less(t, r, n*classWidth) - } - distribution[r/classWidth] += 1.0 - } - EvaluateDistributionUniformity(t, distribution) -} - -func EvaluateDistributionUniformity(t *testing.T, distribution []float64) { - tolerance := 0.05 - stdev := stat.StdDev(distribution, nil) - mean := stat.Mean(distribution, nil) - assert.Greater(t, tolerance*mean, stdev, fmt.Sprintf("basic randomness test failed: n: %d, stdev: %v, mean: %v", len(distribution), stdev, mean)) -} - -func TestUnsafeRandomGenerator(t *testing.T) { - bh := unittest.BlockHeaderFixtureOnChain(flow.Mainnet.Chain().ChainID()) - - getRandoms := func(txnIndex uint32, N int) []uint64 { - // seed the RG with the same block header - urg := environment.NewUnsafeRandomGenerator( - tracing.NewTracerSpan(), - bh, - txnIndex) - numbers := make([]uint64, N) - for i := 0; i < N; i++ { - u, err := urg.UnsafeRandom() - require.NoError(t, err) - numbers[i] = u - } - return numbers - } - - // basic randomness test to check outputs are "uniformly" spread over the - // output space - t.Run("randomness test", func(t *testing.T) { - for txnIndex := uint32(0); txnIndex < 10; txnIndex++ { - urg := environment.NewUnsafeRandomGenerator( - tracing.NewTracerSpan(), - bh, - txnIndex) - - // make sure n is a power of 2 so that there is no bias in the last class - // n is a random power of 2 (from 2 to 2^10) - n := 1 << (1 + mrand.Intn(10)) - classWidth := (math.MaxUint64 / uint64(n)) + 1 - BasicDistributionTest(t, uint64(n), uint64(classWidth), urg.UnsafeRandom) - } - }) - - // tests that unsafeRandom is PRG based and hence has deterministic outputs. - t.Run("PRG-based UnsafeRandom", func(t *testing.T) { - for txnIndex := uint32(0); txnIndex < 10; txnIndex++ { - N := 100 - r1 := getRandoms(txnIndex, N) - r2 := getRandoms(txnIndex, N) - require.Equal(t, r1, r2) - } - }) - - t.Run("transaction specific randomness", func(t *testing.T) { - txns := [][]uint64{} - for txnIndex := uint32(0); txnIndex < 10; txnIndex++ { - N := 100 - txns = append(txns, getRandoms(txnIndex, N)) - } - - for i, txn := range txns { - for _, otherTxn := range txns[i+1:] { - require.NotEqual(t, txn, otherTxn) - } - } - }) -} diff --git a/fvm/environment/uuids.go b/fvm/environment/uuids.go index a6b13dcbf28..98e69e4dddc 100644 --- a/fvm/environment/uuids.go +++ b/fvm/environment/uuids.go @@ -1,9 +1,13 @@ package environment import ( + "crypto/sha256" "encoding/binary" "fmt" + "github.com/onflow/cadence/common" + "github.com/rs/zerolog" + "github.com/onflow/flow-go/fvm/storage/state" "github.com/onflow/flow-go/fvm/tracing" "github.com/onflow/flow-go/model/flow" @@ -11,6 +15,27 @@ import ( "github.com/onflow/flow-go/utils/slices" ) +// uuid is partitioned with 3rd byte for compatibility reasons. +// (database types and Javascript safe integer limits) +// +// counter(C) is 7 bytes, paritition(P) is 1 byte +// uuid is assembled by first reading the counter from the register value of the partitioned register, +// and then left shifting the 6th and 7th byte, and placing the partition byte at 6th byte: +// C7 C6 P C5 C4 C3 C2 C1 +// +// Until resource ids start filling the bits above the 48th one, dapps will have enough time +// to switch to a larger data type. + +const ( + // The max value for any is uuid partition is MaxUint56, since one byte + // in the uuid is used for partitioning. + MaxUint56 = (uint64(1) << 56) - 1 + + // Start warning when there's only a single high bit left. This should give + // us plenty of time to migrate to larger counters. + Uint56OverflowWarningThreshold = (uint64(1) << 55) - 1 +) + type UUIDGenerator interface { GenerateUUID() (uint64, error) } @@ -39,65 +64,155 @@ func (generator ParseRestrictedUUIDGenerator) GenerateUUID() (uint64, error) { type uUIDGenerator struct { tracer tracing.TracerSpan + log zerolog.Logger meter Meter txnState state.NestedTransactionPreparer + + blockHeader *flow.Header + txnIndex uint32 + + initialized bool + partition byte + registerId flow.RegisterID +} + +func uuidPartition(blockId flow.Identifier, txnIndex uint32) byte { + // Partitioning by txnIndex ensures temporally neighboring transactions do + // not share registers / conflict with each other. + // + // Since all blocks will have a transaction at txnIndex 0 but not + // necessarily a transaction at txnIndex 255, if we assign partition based + // only on txnIndex, partition 0's counter (and other low-valued + // partitions' counters) will fill up much more quickly than high-valued + // partitions' counters. Therefore, a deterministically random offset is + // used to ensure the partitioned counters are roughly balanced. Any byte + // in the sha hash is sufficiently random/uniform for this purpose (Note that + // block Id is already a sha hash, but its hash implementation may change + // underneath us). + // + // Note that since partition 0 reuses the legacy counter, its counter is + // much further ahead than the other partitions. If partition 0's counter + // is in danager of overflowing, use variants of "the power of two random + // choices" to shed load to other counters. + // + // The explicit mod is not really needed, but is there for completeness. + partitionOffset := sha256.Sum256(blockId[:])[0] + return byte((uint32(partitionOffset) + txnIndex) % 256) } func NewUUIDGenerator( tracer tracing.TracerSpan, + log zerolog.Logger, meter Meter, txnState state.NestedTransactionPreparer, + blockHeader *flow.Header, + txnIndex uint32, ) *uUIDGenerator { return &uUIDGenerator{ - tracer: tracer, - meter: meter, - txnState: txnState, + tracer: tracer, + log: log, + meter: meter, + txnState: txnState, + blockHeader: blockHeader, + txnIndex: txnIndex, + initialized: false, } } -// GetUUID reads uint64 byte value for uuid from the state -func (generator *uUIDGenerator) getUUID() (uint64, error) { - stateBytes, err := generator.txnState.Get(flow.UUIDRegisterID) +// getCounter reads the uint64 value from the partitioned uuid register. +func (generator *uUIDGenerator) getCounter() (uint64, error) { + stateBytes, err := generator.txnState.Get(generator.registerId) if err != nil { - return 0, fmt.Errorf("cannot get uuid byte from state: %w", err) + return 0, fmt.Errorf( + "cannot get uuid partition %d byte from state: %w", + generator.partition, + err) } bytes := slices.EnsureByteSliceSize(stateBytes, 8) return binary.BigEndian.Uint64(bytes), nil } -// SetUUID sets a new uint64 byte value -func (generator *uUIDGenerator) setUUID(uuid uint64) error { +// setCounter sets a new uint56 value into the partitioned uuid register. +func (generator *uUIDGenerator) setCounter( + value uint64, +) error { + if value > Uint56OverflowWarningThreshold { + if value > MaxUint56 { + return fmt.Errorf( + "uuid partition %d overflowed", + generator.partition) + } + + generator.log.Warn(). + Int("partition", int(generator.partition)). + Uint64("value", value). + Msg("uuid partition is running out of bits") + } + bytes := make([]byte, 8) - binary.BigEndian.PutUint64(bytes, uuid) - err := generator.txnState.Set(flow.UUIDRegisterID, bytes) + binary.BigEndian.PutUint64(bytes, value) + err := generator.txnState.Set(generator.registerId, bytes) if err != nil { - return fmt.Errorf("cannot set uuid byte to state: %w", err) + return fmt.Errorf( + "cannot set uuid %d byte to state: %w", + generator.partition, + err) } return nil } +func (generator *uUIDGenerator) maybeInitializePartition() { + if generator.initialized { + return + } + generator.initialized = true + + // NOTE: block header is not set for scripts. We'll just use partition 0 in + // this case. + if generator.blockHeader == nil { + generator.partition = 0 + } else { + generator.partition = uuidPartition( + generator.blockHeader.ID(), + generator.txnIndex) + } + + generator.registerId = flow.UUIDRegisterID(generator.partition) +} + // GenerateUUID generates a new uuid and persist the data changes into state func (generator *uUIDGenerator) GenerateUUID() (uint64, error) { defer generator.tracer.StartExtensiveTracingChildSpan( trace.FVMEnvGenerateUUID).End() err := generator.meter.MeterComputation( - ComputationKindGenerateUUID, - 1) + common.ComputationUsage{ + Kind: ComputationKindGenerateUUID, + Intensity: 1, + }, + ) if err != nil { return 0, fmt.Errorf("generate uuid failed: %w", err) } - uuid, err := generator.getUUID() + generator.maybeInitializePartition() + + counter, err := generator.getCounter() if err != nil { return 0, fmt.Errorf("cannot generate UUID: %w", err) } - err = generator.setUUID(uuid + 1) + err = generator.setCounter(counter + 1) if err != nil { return 0, fmt.Errorf("cannot generate UUID: %w", err) } - return uuid, nil + + // Since the partition counter only goes up to MaxUint56, we can use the + // assemble a UUID value with the partition (P) and the counter (C). + // Note: partition (P) is represented by the 6th byte + // (C7 C6) | P | (C5 C4 C3 C2 C1) + return ((counter & 0xFF_FF00_0000_0000) << 8) | (uint64(generator.partition) << 40) | (counter & 0xFF_FFFF_FFFF), nil + } diff --git a/fvm/environment/uuids_test.go b/fvm/environment/uuids_test.go index f1fd1b6ce10..8166f715cb1 100644 --- a/fvm/environment/uuids_test.go +++ b/fvm/environment/uuids_test.go @@ -1,71 +1,388 @@ package environment import ( + "fmt" "testing" + "github.com/rs/zerolog" "github.com/stretchr/testify/require" "github.com/onflow/flow-go/fvm/storage/state" "github.com/onflow/flow-go/fvm/tracing" + "github.com/onflow/flow-go/model/flow" ) -func TestUUIDs_GetAndSetUUID(t *testing.T) { +func TestUUIDPartition(t *testing.T) { + blockHeader := &flow.Header{} + + usedPartitions := map[byte]struct{}{} + + // With enough samples, all partitions should be used. (The first 1500 blocks + // only uses 254 partitions) + for numBlocks := 0; numBlocks < 2500; numBlocks++ { + blockId := blockHeader.ID() + + partition0 := uuidPartition(blockId, 0) + usedPartitions[partition0] = struct{}{} + + for txnIndex := 0; txnIndex < 256; txnIndex++ { + partition := uuidPartition(blockId, uint32(txnIndex)) + + // Ensure neighboring transactions uses neighoring partitions. + require.Equal(t, partition, partition0+byte(txnIndex)) + + // Ensure wrap around. + for i := 0; i < 5; i++ { + require.Equal( + t, + partition, + uuidPartition(blockId, uint32(txnIndex+i*256))) + } + } + + blockHeader.ParentID = blockId + } + + require.Len(t, usedPartitions, 256) +} + +func TestUUIDGeneratorInitializePartitionNoHeader(t *testing.T) { + for txnIndex := uint32(0); txnIndex < 256; txnIndex++ { + uuids := NewUUIDGenerator( + tracing.NewTracerSpan(), + zerolog.Nop(), + nil, + nil, + nil, + txnIndex) + require.False(t, uuids.initialized) + + uuids.maybeInitializePartition() + + require.True(t, uuids.initialized) + require.Equal(t, uuids.partition, byte(0)) + require.Equal(t, uuids.registerId, flow.UUIDRegisterID(byte(0))) + } +} + +func TestUUIDGeneratorInitializePartition(t *testing.T) { + blockHeader := &flow.Header{} + + for numBlocks := 0; numBlocks < 10; numBlocks++ { + blockId := blockHeader.ID() + + for txnIndex := uint32(0); txnIndex < 256; txnIndex++ { + uuids := NewUUIDGenerator( + tracing.NewTracerSpan(), + zerolog.Nop(), + nil, + nil, + blockHeader, + txnIndex) + require.False(t, uuids.initialized) + + uuids.maybeInitializePartition() + + require.True(t, uuids.initialized) + + expectedPartition := uuidPartition(blockId, txnIndex) + + require.Equal(t, uuids.partition, expectedPartition) + require.Equal( + t, + uuids.registerId, + flow.UUIDRegisterID(expectedPartition)) + } + + blockHeader.ParentID = blockId + } +} + +func TestUUIDGeneratorIdGeneration(t *testing.T) { + for txnIndex := uint32(0); txnIndex < 256; txnIndex++ { + testUUIDGenerator(t, &flow.Header{}, txnIndex) + } +} + +func testUUIDGenerator(t *testing.T, blockHeader *flow.Header, txnIndex uint32) { + generator := NewUUIDGenerator( + tracing.NewTracerSpan(), + zerolog.Nop(), + nil, + nil, + blockHeader, + txnIndex) + generator.maybeInitializePartition() + + partition := generator.partition + partitionMinValue := uint64(partition) << 40 + maxUint56 := uint64(0xFFFFFFFFFFFFFF) + maxUint56Split := uint64(0xFFFF00FFFFFFFFFF) + + t.Run( + fmt.Sprintf("basic get and set uint (partition: %d)", partition), + func(t *testing.T) { + txnState := state.NewTransactionState(nil, state.DefaultParameters()) + uuidsA := NewUUIDGenerator( + tracing.NewTracerSpan(), + zerolog.Nop(), + NewMeter(txnState), + txnState, + blockHeader, + txnIndex) + uuidsA.maybeInitializePartition() + + uuid, err := uuidsA.getCounter() // start from zero + require.NoError(t, err) + require.Equal(t, uint64(0), uuid) + + err = uuidsA.setCounter(5) + require.NoError(t, err) + + // create new UUIDs instance + uuidsB := NewUUIDGenerator( + tracing.NewTracerSpan(), + zerolog.Nop(), + NewMeter(txnState), + txnState, + blockHeader, + txnIndex) + uuidsB.maybeInitializePartition() + + uuid, err = uuidsB.getCounter() // should read saved value + require.NoError(t, err) + + require.Equal(t, uint64(5), uuid) + }) + + t.Run( + fmt.Sprintf("basic id generation (partition: %d)", partition), + func(t *testing.T) { + txnState := state.NewTransactionState(nil, state.DefaultParameters()) + genA := NewUUIDGenerator( + tracing.NewTracerSpan(), + zerolog.Nop(), + NewMeter(txnState), + txnState, + blockHeader, + txnIndex) + + uuidA, err := genA.GenerateUUID() + require.NoError(t, err) + uuidB, err := genA.GenerateUUID() + require.NoError(t, err) + uuidC, err := genA.GenerateUUID() + require.NoError(t, err) + + require.Equal(t, partitionMinValue, uuidA) + require.Equal(t, partitionMinValue+1, uuidB) + require.Equal(t, partitionMinValue|1, uuidB) + require.Equal(t, partitionMinValue+2, uuidC) + require.Equal(t, partitionMinValue|2, uuidC) + + // Create new generator instance from same ledger + genB := NewUUIDGenerator( + tracing.NewTracerSpan(), + zerolog.Nop(), + NewMeter(txnState), + txnState, + blockHeader, + txnIndex) + + uuidD, err := genB.GenerateUUID() + require.NoError(t, err) + uuidE, err := genB.GenerateUUID() + require.NoError(t, err) + uuidF, err := genB.GenerateUUID() + require.NoError(t, err) + + require.Equal(t, partitionMinValue+3, uuidD) + require.Equal(t, partitionMinValue|3, uuidD) + require.Equal(t, partitionMinValue+4, uuidE) + require.Equal(t, partitionMinValue|4, uuidE) + require.Equal(t, partitionMinValue+5, uuidF) + require.Equal(t, partitionMinValue|5, uuidF) + }) + + t.Run( + fmt.Sprintf("setCounter overflows (partition: %d)", partition), + func(t *testing.T) { + txnState := state.NewTransactionState(nil, state.DefaultParameters()) + uuids := NewUUIDGenerator( + tracing.NewTracerSpan(), + zerolog.Nop(), + NewMeter(txnState), + txnState, + blockHeader, + txnIndex) + uuids.maybeInitializePartition() + + err := uuids.setCounter(maxUint56) + require.NoError(t, err) + + value, err := uuids.getCounter() + require.NoError(t, err) + require.Equal(t, value, maxUint56) + + err = uuids.setCounter(maxUint56 + 1) + require.ErrorContains(t, err, "overflowed") + + value, err = uuids.getCounter() + require.NoError(t, err) + require.Equal(t, value, maxUint56) + }) + + t.Run( + fmt.Sprintf("id generation overflows (partition: %d)", partition), + func(t *testing.T) { + txnState := state.NewTransactionState(nil, state.DefaultParameters()) + uuids := NewUUIDGenerator( + tracing.NewTracerSpan(), + zerolog.Nop(), + NewMeter(txnState), + txnState, + blockHeader, + txnIndex) + uuids.maybeInitializePartition() + + err := uuids.setCounter(maxUint56 - 1) + require.NoError(t, err) + + value, err := uuids.GenerateUUID() + require.NoError(t, err) + require.Equal(t, value, partitionMinValue+maxUint56Split-1) + require.Equal(t, value, partitionMinValue|(maxUint56Split-1)) + + value, err = uuids.getCounter() + require.NoError(t, err) + require.Equal(t, value, maxUint56) + + _, err = uuids.GenerateUUID() + require.ErrorContains(t, err, "overflowed") + + value, err = uuids.getCounter() + require.NoError(t, err) + require.Equal(t, value, maxUint56) + }) +} + +func TestUUIDGeneratorHardcodedPartitionIdGeneration(t *testing.T) { txnState := state.NewTransactionState(nil, state.DefaultParameters()) - uuidsA := NewUUIDGenerator( + uuids := NewUUIDGenerator( tracing.NewTracerSpan(), + zerolog.Nop(), NewMeter(txnState), - txnState) + txnState, + nil, + 0) - uuid, err := uuidsA.getUUID() // start from zero + // Hardcoded the partition to check for exact bytes + uuids.initialized = true + uuids.partition = 0xde + uuids.registerId = flow.UUIDRegisterID(0xde) + + value, err := uuids.GenerateUUID() require.NoError(t, err) - require.Equal(t, uint64(0), uuid) + require.Equal(t, value, uint64(0x0000de0000000000)) - err = uuidsA.setUUID(5) + value, err = uuids.getCounter() require.NoError(t, err) + require.Equal(t, value, uint64(1)) - // create new UUIDs instance - uuidsB := NewUUIDGenerator( - tracing.NewTracerSpan(), - NewMeter(txnState), - txnState) - uuid, err = uuidsB.getUUID() // should read saved value + value, err = uuids.GenerateUUID() require.NoError(t, err) + require.Equal(t, value, uint64(0x0000de0000000001)) - require.Equal(t, uint64(5), uuid) -} + value, err = uuids.getCounter() + require.NoError(t, err) + require.Equal(t, value, uint64(2)) -func Test_GenerateUUID(t *testing.T) { - txnState := state.NewTransactionState(nil, state.DefaultParameters()) - genA := NewUUIDGenerator( - tracing.NewTracerSpan(), - NewMeter(txnState), - txnState) + value, err = uuids.GenerateUUID() + require.NoError(t, err) + require.Equal(t, value, uint64(0x0000de0000000002)) - uuidA, err := genA.GenerateUUID() + value, err = uuids.getCounter() require.NoError(t, err) - uuidB, err := genA.GenerateUUID() + require.Equal(t, value, uint64(3)) + + // pretend we increamented the counter up to cafBad + cafBad := uint64(0x1c2a3f4b5a6d70) + decafBad := uint64(0x1c2ade3f4b5a6d70) + + err = uuids.setCounter(cafBad) require.NoError(t, err) - uuidC, err := genA.GenerateUUID() + + for i := 0; i < 5; i++ { + value, err = uuids.GenerateUUID() + require.NoError(t, err) + require.Equal(t, value, decafBad+uint64(i)) + } + + value, err = uuids.getCounter() require.NoError(t, err) + require.Equal(t, value, cafBad+uint64(5)) - require.Equal(t, uint64(0), uuidA) - require.Equal(t, uint64(1), uuidB) - require.Equal(t, uint64(2), uuidC) + // pretend we increamented the counter up to overflow - 2 + maxUint56Minus2 := uint64(0xfffffffffffffd) + err = uuids.setCounter(maxUint56Minus2) + require.NoError(t, err) + + value, err = uuids.GenerateUUID() + require.NoError(t, err) + require.Equal(t, value, uint64(0xffffdefffffffffd)) - // Create new generator instance from same ledger - genB := NewUUIDGenerator( + value, err = uuids.getCounter() + require.NoError(t, err) + require.Equal(t, value, maxUint56Minus2+1) + + value, err = uuids.GenerateUUID() + require.NoError(t, err) + require.Equal(t, value, uint64(0xffffdefffffffffe)) + + value, err = uuids.getCounter() + require.NoError(t, err) + require.Equal(t, value, maxUint56Minus2+2) + + _, err = uuids.GenerateUUID() + require.ErrorContains(t, err, "overflowed") + + value, err = uuids.getCounter() + require.NoError(t, err) + require.Equal(t, value, maxUint56Minus2+2) +} + +func TestContinuati(t *testing.T) { + txnState := state.NewTransactionState(nil, state.DefaultParameters()) + uuids := NewUUIDGenerator( tracing.NewTracerSpan(), + zerolog.Nop(), NewMeter(txnState), - txnState) + txnState, + nil, + 0) + + // Hardcoded the partition to check for exact bytes + uuids.initialized = true + uuids.partition = 0x01 + uuids.registerId = flow.UUIDRegisterID(0x01) + + value, err := uuids.GenerateUUID() + require.NoError(t, err) + require.Equal(t, value, uint64(0x0000010000000000)) - uuidD, err := genB.GenerateUUID() + err = uuids.setCounter(0xFFFFFFFFFF) require.NoError(t, err) - uuidE, err := genB.GenerateUUID() + + value, err = uuids.GenerateUUID() require.NoError(t, err) - uuidF, err := genB.GenerateUUID() + require.Equal(t, value, uint64(0x000001FFFFFFFFFF)) + + value, err = uuids.GenerateUUID() + require.NoError(t, err) + require.Equal(t, value, uint64(0x0001010000000000)) + + value, err = uuids.GenerateUUID() require.NoError(t, err) + require.Equal(t, value, uint64(0x0001010000000001)) - require.Equal(t, uint64(3), uuidD) - require.Equal(t, uint64(4), uuidE) - require.Equal(t, uint64(5), uuidF) } diff --git a/fvm/environment/value_store.go b/fvm/environment/value_store.go index 8113de6762c..795d56ebe23 100644 --- a/fvm/environment/value_store.go +++ b/fvm/environment/value_store.go @@ -1,9 +1,11 @@ package environment import ( + "bytes" "fmt" "github.com/onflow/atree" + "github.com/onflow/cadence/common" "github.com/onflow/flow-go/fvm/errors" "github.com/onflow/flow-go/fvm/storage/state" @@ -20,7 +22,7 @@ type ValueStore interface { ValueExists(owner []byte, key []byte) (bool, error) - AllocateStorageIndex(owner []byte) (atree.StorageIndex, error) + AllocateSlabIndex(owner []byte) (atree.SlabIndex, error) } type ParseRestrictedValueStore struct { @@ -82,16 +84,16 @@ func (store ParseRestrictedValueStore) ValueExists( key) } -func (store ParseRestrictedValueStore) AllocateStorageIndex( +func (store ParseRestrictedValueStore) AllocateSlabIndex( owner []byte, ) ( - atree.StorageIndex, + atree.SlabIndex, error, ) { return parseRestrict1Arg1Ret( store.txnState, - trace.FVMEnvAllocateStorageIndex, - store.impl.AllocateStorageIndex, + trace.FVMEnvAllocateSlabIndex, + store.impl.AllocateSlabIndex, owner) } @@ -133,14 +135,18 @@ func (store *valueStore) GetValue( return nil, fmt.Errorf("get value failed: %w", err) } - err = store.meter.MeterComputation(ComputationKindGetValue, uint(len(v))) + err = store.meter.MeterComputation( + common.ComputationUsage{ + Kind: ComputationKindGetValue, + Intensity: uint64(len(v)), + }, + ) if err != nil { return nil, fmt.Errorf("get value failed: %w", err) } return v, nil } -// TODO disable SetValue for scripts, right now the view changes are discarded func (store *valueStore) SetValue( owner []byte, keyBytes []byte, @@ -153,9 +159,21 @@ func (store *valueStore) SetValue( return errors.NewInvalidInternalStateAccessError(id, "modify") } - err := store.meter.MeterComputation( - ComputationKindSetValue, - uint(len(value))) + oldValue, err := store.accounts.GetValue(id) + if err != nil { + return fmt.Errorf("get value failed: %w", err) + } + // no-op write + if bytes.Equal(oldValue, value) { + return nil + } + + err = store.meter.MeterComputation( + common.ComputationUsage{ + Kind: ComputationKindSetValue, + Intensity: uint64(len(value)), + }, + ) if err != nil { return fmt.Errorf("set value failed: %w", err) } @@ -176,7 +194,12 @@ func (store *valueStore) ValueExists( ) { defer store.tracer.StartChildSpan(trace.FVMEnvValueExists).End() - err = store.meter.MeterComputation(ComputationKindValueExists, 1) + err = store.meter.MeterComputation( + common.ComputationUsage{ + Kind: ComputationKindValueExists, + Intensity: 1, + }, + ) if err != nil { return false, fmt.Errorf("check value existence failed: %w", err) } @@ -189,26 +212,31 @@ func (store *valueStore) ValueExists( return len(v) > 0, nil } -// AllocateStorageIndex allocates new storage index under the owner accounts +// AllocateSlabIndex allocates new storage index under the owner accounts // to store a new register. -func (store *valueStore) AllocateStorageIndex( +func (store *valueStore) AllocateSlabIndex( owner []byte, ) ( - atree.StorageIndex, + atree.SlabIndex, error, ) { - defer store.tracer.StartChildSpan(trace.FVMEnvAllocateStorageIndex).End() + defer store.tracer.StartChildSpan(trace.FVMEnvAllocateSlabIndex).End() - err := store.meter.MeterComputation(ComputationKindAllocateStorageIndex, 1) + err := store.meter.MeterComputation( + common.ComputationUsage{ + Kind: ComputationKindAllocateSlabIndex, + Intensity: 1, + }, + ) if err != nil { - return atree.StorageIndex{}, fmt.Errorf( + return atree.SlabIndex{}, fmt.Errorf( "allocate storage index failed: %w", err) } - v, err := store.accounts.AllocateStorageIndex(flow.BytesToAddress(owner)) + v, err := store.accounts.AllocateSlabIndex(flow.BytesToAddress(owner)) if err != nil { - return atree.StorageIndex{}, fmt.Errorf( + return atree.SlabIndex{}, fmt.Errorf( "storage address allocation failed: %w", err) } diff --git a/fvm/errors/account_key.go b/fvm/errors/account_key.go new file mode 100644 index 00000000000..2a011abf922 --- /dev/null +++ b/fvm/errors/account_key.go @@ -0,0 +1,152 @@ +package errors + +import "github.com/onflow/flow-go/model/flow" + +// NewKeyMetadataEmptyError creates a new CodedFailure. It is returned +// when key metadata cannot be parsed because it is unexpectedly empty. +func NewKeyMetadataEmptyError(msgPrefix string) CodedFailure { + return NewCodedFailuref( + FailureCodeKeyMetadataDecodingFailure, + msgPrefix, + "key metadata is empty") +} + +// NewKeyMetadataTooShortError creates a new CodedFailure. It is returned +// when key metadata cannot be parsed because it is truncated. +func NewKeyMetadataTooShortError(msgPrefix string, expectedLength, actualLength int) CodedFailure { + return NewCodedFailuref( + FailureCodeKeyMetadataDecodingFailure, + msgPrefix, + "key metadata is too short: expect %d bytes, got %d bytes", + expectedLength, + actualLength, + ) +} + +// NewKeyMetadataUnexpectedLengthError creates a new CodedFailure. It is returned +// when key metadata cannot be parsed because its length is unexpected. +func NewKeyMetadataUnexpectedLengthError(msgPrefix string, groupLength, actualLength int) CodedFailure { + return NewCodedFailuref( + FailureCodeKeyMetadataDecodingFailure, + msgPrefix, + "key metadata length is unexpected: expect multiples of %d, got %d bytes", + groupLength, + actualLength, + ) +} + +// NewKeyMetadataTrailingDataError creates a new CodedFailure. It is returned +// when key metadata cannot be parsed because it has trailing data after expected content. +func NewKeyMetadataTrailingDataError(msgPrefix string, trailingDataLength int) CodedFailure { + return NewCodedFailuref( + FailureCodeKeyMetadataDecodingFailure, + msgPrefix, + "key metadata has trailing data: expect no trailing data, got %d bytes", + trailingDataLength, + ) +} + +// NewKeyMetadataNotFoundError creates a new CodedFailure. It is returned +// when key metadata cannot be found for the given stored key index. +func NewKeyMetadataNotFoundError(msgPrefix string, storedKeyIndex uint32) CodedFailure { + return NewCodedFailuref( + FailureCodeKeyMetadataNotFoundFailure, + msgPrefix, + "key metadata not found at stored key index %d", + storedKeyIndex, + ) +} + +// NewKeyMetadataUnexpectedKeyIndexError creates a new CodedFailure. It is returned +// when key metadata cannot be found for unexpected stored key index. +func NewKeyMetadataUnexpectedKeyIndexError(msgPrefix string, storedKeyIndex uint32) CodedFailure { + return NewCodedFailuref( + FailureCodeKeyMetadataUnexpectedKeyIndexFailure, + msgPrefix, + "unexpected key index %d", + storedKeyIndex, + ) +} + +// NewBatchPublicKeyDecodingError creates a new CodedFailure. It is returned +// when batch public key payload cannot be decoded. +func NewBatchPublicKeyDecodingError(msgPrefix string, address flow.Address, batchIndex uint32) CodedFailure { + return NewCodedFailuref( + FailureCodeBatchPublicKeyDecodingFailure, + msgPrefix, + "batch public key payload is malformed at batch index %d for address %s", + batchIndex, + address, + ) +} + +// NewStoredPublicKeyNotFoundError creates a new CodedFailure. It is returned +// when stored public key cannot be found for the given stored key index. +func NewStoredPublicKeyNotFoundError(msgPrefix string, address flow.Address, storedKeyIndex uint32) CodedFailure { + return NewCodedFailuref( + FailureCodeStoredPublicKeyNotFoundFailure, + msgPrefix, + "stored public key not found at stored key index %d for address %s", + storedKeyIndex, + address, + ) +} + +// NewStoredPublicKeyUnexpectedIndexError creates a new CodedFailure. It is returned +// when stored public key cannot be found for unexpected stored key index. +func NewStoredPublicKeyUnexpectedIndexError(msgPrefix string, address flow.Address, storedKeyIndex uint32) CodedFailure { + return NewCodedFailuref( + FailureCodeStoredPublicKeyUnexpectedIndexFailure, + msgPrefix, + "unexpected stored key index %d for address %s", + storedKeyIndex, + address, + ) +} + +// NewBatchPublicKeyNotFoundError creates a new CodedFailure. It is returned +// when batch public key payload cannot be found for the given batch ndex. +func NewBatchPublicKeyNotFoundError(msgPrefix string, address flow.Address, batchIndex uint32) CodedFailure { + return NewCodedFailuref( + FailureCodeBatchPublicKeyNotFoundFailure, + msgPrefix, + "batch public key payload not found for address %s and batch index %d", + address, + batchIndex, + ) +} + +// IsKeyMetadataDecodingError returns true if error has FailureKeyMetadataDecodingFailure type. +func IsKeyMetadataDecodingError(err error) bool { + return HasFailureCode(err, FailureCodeKeyMetadataDecodingFailure) +} + +// IsKeyMetadataNotFoundError returns true if error has FailureKeyMetadataNotFoundFailure type. +func IsKeyMetadataNotFoundError(err error) bool { + return HasFailureCode(err, FailureCodeKeyMetadataNotFoundFailure) +} + +// IsKeyMetadataUnexpectedKeyIndexError returns true if error has FailureKeyMetadataUnexpectedKeyIndexFailure type. +func IsKeyMetadataUnexpectedKeyIndexError(err error) bool { + return HasFailureCode(err, FailureCodeKeyMetadataUnexpectedKeyIndexFailure) +} + +// IsStoredPublicKeyNotFoundError returns true if error has FailureStoredPublicKeyNotFoundFailure type. +func IsStoredPublicKeyNotFoundError(err error) bool { + return HasFailureCode(err, FailureCodeStoredPublicKeyNotFoundFailure) +} + +// IsStoredPublicKeyUnexpectedIndexError returns true if error has FailureStoredPublicKeyUnexpectedIndexFailure type. +func IsStoredPublicKeyUnexpectedIndexError(err error) bool { + return HasFailureCode(err, FailureCodeStoredPublicKeyUnexpectedIndexFailure) +} + +// IsBatchPublicKeyDecodingError returns true if error has FailureCodeBatchPublicKeyDecodingFailure type. +func IsBatchPublicKeyDecodingError(err error) bool { + return HasFailureCode(err, FailureCodeBatchPublicKeyDecodingFailure) +} + +// IsBatchPublicKeyNotFoundError returns true if error has FailureBatchPublicKeyNotFoundFailure type. +func IsBatchPublicKeyNotFoundError(err error) bool { + return HasFailureCode(err, FailureCodeBatchPublicKeyNotFoundFailure) +} diff --git a/fvm/errors/accounts.go b/fvm/errors/accounts.go index 894b0974af6..fd1ee666b89 100644 --- a/fvm/errors/accounts.go +++ b/fvm/errors/accounts.go @@ -32,7 +32,7 @@ func NewAccountAlreadyExistsError(address flow.Address) CodedError { // when a public key not found for the given address and key index. func NewAccountPublicKeyNotFoundError( address flow.Address, - keyIndex uint64, + keyIndex uint32, ) CodedError { return NewCodedError( ErrCodeAccountPublicKeyNotFoundError, @@ -50,8 +50,8 @@ func IsAccountPublicKeyNotFoundError(err error) bool { // when an account tries to add public keys over the limit. func NewAccountPublicKeyLimitError( address flow.Address, - counts uint64, - limit uint64, + counts uint32, + limit uint32, ) CodedError { return NewCodedError( ErrCodeAccountPublicKeyLimitError, diff --git a/fvm/errors/codes.go b/fvm/errors/codes.go index 9737a0965ca..e73ee94dc2b 100644 --- a/fvm/errors/codes.go +++ b/fvm/errors/codes.go @@ -4,30 +4,41 @@ import "fmt" type ErrorCode uint16 -func (ec ErrorCode) IsFailure() bool { - return ec >= FailureCodeUnknownFailure -} - func (ec ErrorCode) String() string { - if ec.IsFailure() { - return fmt.Sprintf("[Failure Code: %d]", ec) - } return fmt.Sprintf("[Error Code: %d]", ec) } +type FailureCode uint16 + +func (fc FailureCode) String() string { + return fmt.Sprintf("[Failure Code: %d]", fc) +} + const ( - FailureCodeUnknownFailure ErrorCode = 2000 - FailureCodeEncodingFailure ErrorCode = 2001 - FailureCodeLedgerFailure ErrorCode = 2002 - FailureCodeStateMergeFailure ErrorCode = 2003 - FailureCodeBlockFinderFailure ErrorCode = 2004 - // Deprecated: No longer used. - FailureCodeHasherFailure ErrorCode = 2005 - FailureCodeParseRestrictedModeInvalidAccessFailure ErrorCode = 2006 - FailureCodePayerBalanceCheckFailure ErrorCode = 2007 - FailureCodeDerivedDataCacheImplementationFailure ErrorCode = 2008 - // Deprecated: No longer used. - FailureCodeMetaTransactionFailure ErrorCode = 2100 + FailureCodeUnknownFailure FailureCode = 2000 + FailureCodeEncodingFailure FailureCode = 2001 + FailureCodeLedgerFailure FailureCode = 2002 + FailureCodeStateMergeFailure FailureCode = 2003 + FailureCodeBlockFinderFailure FailureCode = 2004 + // Deprecated: No longer used. + FailureCodeHasherFailure FailureCode = 2005 + FailureCodeParseRestrictedModeInvalidAccessFailure FailureCode = 2006 + FailureCodePayerBalanceCheckFailure FailureCode = 2007 + FailureCodeDerivedDataCacheImplementationFailure FailureCode = 2008 + FailureCodeRandomSourceFailure FailureCode = 2009 + FailureCodeEVMFailure FailureCode = 2010 + FailureCodeExecutionVersionProvider FailureCode = 2011 + // Deprecated: No longer used. + FailureCodeMetaTransactionFailure FailureCode = 2100 + + // Account key + FailureCodeKeyMetadataDecodingFailure FailureCode = 2200 + FailureCodeKeyMetadataNotFoundFailure FailureCode = 2201 + FailureCodeKeyMetadataUnexpectedKeyIndexFailure FailureCode = 2202 + FailureCodeStoredPublicKeyNotFoundFailure FailureCode = 2203 + FailureCodeStoredPublicKeyUnexpectedIndexFailure FailureCode = 2204 + FailureCodeBatchPublicKeyDecodingFailure FailureCode = 2205 + FailureCodeBatchPublicKeyNotFoundFailure FailureCode = 2206 ) const ( @@ -59,6 +70,7 @@ const ( ErrCodeAccountAuthorizationError ErrorCode = 1055 ErrCodeOperationAuthorizationError ErrorCode = 1056 ErrCodeOperationNotSupportedError ErrorCode = 1057 + ErrCodeBlockHeightOutOfRangeError ErrorCode = 1058 // execution errors 1100 - 1200 // Deprecated: No longer used. @@ -77,8 +89,8 @@ const ( ErrCodeComputationLimitExceededError ErrorCode = 1110 ErrCodeMemoryLimitExceededError ErrorCode = 1111 ErrCodeCouldNotDecodeExecutionParameterFromState ErrorCode = 1112 - ErrCodeScriptExecutionCancelledError ErrorCode = 1114 ErrCodeScriptExecutionTimedOutError ErrorCode = 1113 + ErrCodeScriptExecutionCancelledError ErrorCode = 1114 ErrCodeEventEncodingError ErrorCode = 1115 ErrCodeInvalidInternalStateAccessError ErrorCode = 1116 // 1117 was never deployed and is free to use @@ -102,4 +114,7 @@ const ( ErrCodeContractNotFoundError ErrorCode = 1251 // Deprecated: No longer used. ErrCodeContractNamesNotFoundError ErrorCode = 1252 + + // fvm std lib errors 1300-1400 + ErrEVMExecutionError ErrorCode = 1300 ) diff --git a/fvm/errors/errors.go b/fvm/errors/errors.go index 0ff5ee1b37a..ea8f3825864 100644 --- a/fvm/errors/errors.go +++ b/fvm/errors/errors.go @@ -5,13 +5,20 @@ import ( "fmt" "github.com/hashicorp/go-multierror" + "github.com/onflow/cadence/errors" "github.com/onflow/cadence/runtime" ) type Unwrappable interface { + error Unwrap() error } +type UnwrappableErrors interface { + error + Unwrap() []error +} + type CodedError interface { Code() ErrorCode @@ -19,6 +26,13 @@ type CodedError interface { error } +type CodedFailure interface { + Code() FailureCode + + Unwrappable + error +} + // Is is a utility function to call std error lib `Is` function for instance equality checks. func Is(err error, target error) bool { return stdErrors.Is(err, target) @@ -32,15 +46,14 @@ func As(err error, target interface{}) bool { return stdErrors.As(err, target) } -// findImportantCodedError recursively unwraps the error to search for important -// coded error: +// findRootCodedError recursively unwraps the error to search for the root (deepest) coded error: // 1. If err is nil, this returns (nil, false), // 2. If err has no error code, this returns (nil, true), -// 3. If err has a failure error code, this returns -// (<the shallowest failure coded error>, false), -// 4. If err has a non-failure error code, this returns -// (<the deepest, aka root cause, non-failure coded error>, false) -func findImportantCodedError(err error) (CodedError, bool) { +// 3. If err has an error code, this returns +// (<the deepest, aka root cause, coded error>, false) +// +// Note: This assumes the caller has already checked if the error contains a CodedFailure. +func findRootCodedError(err error) (CodedError, bool) { if err == nil { return nil, false } @@ -51,10 +64,6 @@ func findImportantCodedError(err error) (CodedError, bool) { } for { - if coded.Code().IsFailure() { - return coded, false - } - var nextCoded CodedError if !As(coded.Unwrap(), &nextCoded) { return coded, false @@ -67,32 +76,45 @@ func findImportantCodedError(err error) (CodedError, bool) { // IsFailure returns true if the error is un-coded, or if the error contains // a failure code. func IsFailure(err error) bool { + return AsFailure(err) != nil +} + +func AsFailure(err error) CodedFailure { if err == nil { - return false + return nil + } + + var failure CodedFailure + if As(err, &failure) { + return failure + } + + var coded CodedError + if !As(err, &coded) { + return NewUnknownFailure(err) } - coded, isUnknown := findImportantCodedError(err) - return isUnknown || coded.Code().IsFailure() + return nil } // SplitErrorTypes splits the error into fatal (failures) and non-fatal errors -func SplitErrorTypes(inp error) (err CodedError, failure CodedError) { +func SplitErrorTypes(inp error) (err CodedError, failure CodedFailure) { if inp == nil { return nil, nil } - coded, isUnknown := findImportantCodedError(inp) - if isUnknown { - return nil, NewUnknownFailure(inp) - } - - if coded.Code().IsFailure() { - return nil, WrapCodedError( - coded.Code(), + if failure = AsFailure(inp); failure != nil { + return nil, WrapCodedFailure( + failure.Code(), inp, "failure caused by") } + coded, isUnknown := findRootCodedError(inp) + if isUnknown { + return nil, NewUnknownFailure(inp) + } + return WrapCodedError( coded.Code(), inp, @@ -117,38 +139,86 @@ func HandleRuntimeError(err error) error { return NewCadenceRuntimeError(runErr) } -// This returns true if the error or one of its nested errors matches the +// HasErrorCode returns true if the error or one of its nested errors matches the // specified error code. func HasErrorCode(err error, code ErrorCode) bool { return Find(err, code) != nil } -// This recursively unwraps the error and returns first CodedError that matches +// HasFailureCode returns true if the error or one of its nested errors matches the +// specified failure code. +func HasFailureCode(err error, code FailureCode) bool { + return FindFailure(err, code) != nil +} + +// Find recursively unwraps the error and returns the first CodedError that matches // the specified error code. func Find(originalErr error, code ErrorCode) CodedError { if originalErr == nil { return nil } - var unwrappable Unwrappable - if !As(originalErr, &unwrappable) { + // Handle non-chained errors + var unwrappedErrs []error + switch err := originalErr.(type) { + case *multierror.Error: + unwrappedErrs = err.WrappedErrors() + case UnwrappableErrors: + unwrappedErrs = err.Unwrap() + + // IMPORTANT: this check needs to run after *multierror.Error because multierror does implement + // the Unwrappable interface, however its implementation only visits the base errors in the list, + // and ignores their descendants. + case Unwrappable: + coded, ok := err.(CodedError) + if ok && coded.Code() == code { + return coded + } + return Find(err.Unwrap(), code) + default: return nil } - coded, ok := unwrappable.(CodedError) - if ok && coded.Code() == code { - return coded + for _, innerErr := range unwrappedErrs { + coded := Find(innerErr, code) + if coded != nil { + return coded + } + } + + return nil +} + +// FindFailure recursively unwraps the error and returns the first CodedFailure that matches +// the specified error code. +func FindFailure(originalErr error, code FailureCode) CodedFailure { + if originalErr == nil { + return nil } - // NOTE: we need to special case multierror.Error since As() will only - // inspect the first error within multierror.Error. - errors, ok := unwrappable.(*multierror.Error) - if !ok { - return Find(unwrappable.Unwrap(), code) + // Handle non-chained errors + var unwrappedErrs []error + switch err := originalErr.(type) { + case *multierror.Error: + unwrappedErrs = err.WrappedErrors() + case UnwrappableErrors: + unwrappedErrs = err.Unwrap() + + // IMPORTANT: this check needs to run after *multierror.Error because multierror does implement + // the Unwrappable interface, however its implementation only visits the base errors in the list, + // and ignores their descendants. + case Unwrappable: + coded, ok := err.(CodedFailure) + if ok && coded.Code() == code { + return coded + } + return FindFailure(err.Unwrap(), code) + default: + return nil } - for _, innerErr := range errors.Errors { - coded = Find(innerErr, code) + for _, innerErr := range unwrappedErrs { + coded := FindFailure(innerErr, code) if coded != nil { return coded } @@ -157,6 +227,8 @@ func Find(originalErr error, code ErrorCode) CodedError { return nil } +var _ CodedError = (*codedError)(nil) + type codedError struct { code ErrorCode @@ -206,6 +278,69 @@ func (err codedError) Code() ErrorCode { return err.code } +var _ CodedFailure = (*codedFailure)(nil) + +type codedFailure struct { + code FailureCode + err error +} + +func newFailure( + code FailureCode, + rootCause error, +) codedFailure { + return codedFailure{ + code: code, + err: rootCause, + } +} + +func WrapCodedFailure( + code FailureCode, + err error, + prefixMsgFormat string, + formatArguments ...interface{}, +) codedFailure { + if prefixMsgFormat != "" { + msg := fmt.Sprintf(prefixMsgFormat, formatArguments...) + err = fmt.Errorf("%s: %w", msg, err) + } + return newFailure(code, err) +} + +func NewCodedFailure( + code FailureCode, + format string, + formatArguments ...interface{}, +) codedFailure { + return newFailure(code, fmt.Errorf(format, formatArguments...)) +} + +func NewCodedFailuref( + code FailureCode, + msgPrefix string, + format string, + formatArguments ...interface{}, +) codedFailure { + err := fmt.Errorf(format, formatArguments...) + if msgPrefix != "" { + err = fmt.Errorf("%s: %w", msgPrefix, err) + } + return newFailure(code, err) +} + +func (err codedFailure) Unwrap() error { + return err.err +} + +func (err codedFailure) Error() string { + return fmt.Sprintf("%v %v", err.code, err.err) +} + +func (err codedFailure) Code() FailureCode { + return err.code +} + // NewEventEncodingError construct a new CodedError which indicates // that encoding event has failed func NewEventEncodingError(err error) CodedError { @@ -213,3 +348,30 @@ func NewEventEncodingError(err error) CodedError { ErrCodeEventEncodingError, "error while encoding emitted event: %w ", err) } + +// EVMError needs to satisfy the user error interface +// in order for Cadence to correctly handle the error +var _ errors.UserError = &(EVMError{}) + +// EVMError captures any non-fatal EVM error +type EVMError struct { + CodedError +} + +func (e EVMError) IsUserError() {} + +// NewEVMError constructs a new CodedError which captures a +// collection of errors provided by (non-fatal) evm runtime. +func NewEVMError(err error) EVMError { + return EVMError{ + WrapCodedError( + ErrEVMExecutionError, + err, + "evm runtime error"), + } +} + +// IsEVMError returns true if error is an EVM error +func IsEVMError(err error) bool { + return HasErrorCode(err, ErrEVMExecutionError) +} diff --git a/fvm/errors/errors_test.go b/fvm/errors/errors_test.go index d0a262e0147..923b86b77ff 100644 --- a/fvm/errors/errors_test.go +++ b/fvm/errors/errors_test.go @@ -4,6 +4,11 @@ import ( "fmt" "testing" + "github.com/hashicorp/go-multierror" + cadenceErr "github.com/onflow/cadence/errors" + "github.com/onflow/cadence/runtime" + "github.com/onflow/cadence/sema" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/onflow/flow-go/model/flow" @@ -39,7 +44,7 @@ func TestErrorHandling(t *testing.T) { e5 := NewInvalidProposalSignatureError(flow.ProposalKey{}, e4) e6 := fmt.Errorf("wrapped: %w", e5) - expectedErr := WrapCodedError( + expectedErr := WrapCodedFailure( e3.Code(), // The shallowest failure's error code e6, // All the error message detail. "failure caused by") @@ -61,3 +66,334 @@ func TestErrorHandling(t *testing.T) { require.True(t, IsFailure(e1)) }) } + +func TestHandleRuntimeError(t *testing.T) { + baseErr := fmt.Errorf("base error") + tests := []struct { + name string + err error + errorCode ErrorCode + failureCode FailureCode + }{ + { + name: "nil error", + err: nil, + }, + { + name: "unknown error", + err: baseErr, + failureCode: FailureCodeUnknownFailure, + }, + { + name: "runtime error", + err: runtime.Error{Err: baseErr}, + errorCode: ErrCodeCadenceRunTimeError, + }, + { + name: "coded error in Unwrappable error", + err: runtime.Error{ + Err: cadenceErr.ExternalError{ + Recovered: NewScriptExecutionCancelledError(baseErr), + }, + }, + errorCode: ErrCodeScriptExecutionCancelledError, + }, + { + name: "coded error in ParentError error", + err: createCheckerErr([]error{ + fmt.Errorf("first error"), + NewScriptExecutionTimedOutError(), + }), + errorCode: ErrCodeScriptExecutionTimedOutError, + }, + { + name: "first coded error returned", + err: createCheckerErr([]error{ + fmt.Errorf("first error"), + NewScriptExecutionTimedOutError(), + NewScriptExecutionCancelledError(baseErr), + }), + errorCode: ErrCodeScriptExecutionTimedOutError, + }, + { + name: "failure returned", + err: createCheckerErr([]error{ + fmt.Errorf("first error"), + NewLedgerFailure(baseErr), + }), + failureCode: FailureCodeLedgerFailure, + }, + { + name: "error before failure returns failure", + err: createCheckerErr([]error{ + fmt.Errorf("first error"), + NewScriptExecutionTimedOutError(), + NewLedgerFailure(baseErr), + }), + failureCode: FailureCodeLedgerFailure, + }, + { + name: "embedded coded errors return deepest error", + err: createCheckerErr([]error{ + fmt.Errorf("first error"), + NewScriptExecutionCancelledError( + NewScriptExecutionTimedOutError(), + ), + }), + errorCode: ErrCodeScriptExecutionTimedOutError, + }, + { + name: "failure with embedded error returns failure", + err: createCheckerErr([]error{ + fmt.Errorf("first error"), + NewLedgerFailure( + NewScriptExecutionTimedOutError(), + ), + }), + failureCode: FailureCodeLedgerFailure, + }, + { + name: "coded error with embedded failure returns failure", + err: createCheckerErr([]error{ + fmt.Errorf("first error"), + NewScriptExecutionCancelledError( + NewLedgerFailure(baseErr), + ), + }), + failureCode: FailureCodeLedgerFailure, + }, + { + name: "error tree with failure returns failure", + err: createCheckerErr([]error{ + fmt.Errorf("first error"), + NewScriptExecutionCancelledError(baseErr), + createCheckerErr([]error{ + fmt.Errorf("first error"), + NewScriptExecutionCancelledError( + NewLedgerFailure(baseErr), + ), + }), + }), + failureCode: FailureCodeLedgerFailure, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + actual := HandleRuntimeError(tc.err) + if tc.err == nil { + assert.NoError(t, actual) + return + } + + actualCoded, failureCoded := SplitErrorTypes(actual) + + if tc.failureCode != 0 { + assert.NoError(t, actualCoded) + assert.Equalf(t, tc.failureCode, failureCoded.Code(), "error code mismatch: expected %d, got %d", tc.failureCode, failureCoded.Code()) + } else { + assert.NoError(t, failureCoded) + assert.Equalf(t, tc.errorCode, actualCoded.Code(), "error code mismatch: expected %d, got %d", tc.errorCode, actualCoded.Code()) + } + }) + } +} + +func TestFind(t *testing.T) { + targetCode := ErrCodeScriptExecutionCancelledError + baseErr := fmt.Errorf("base error") + + tests := []struct { + name string + err error + found bool + }{ + { + name: "nil error", + err: nil, + found: false, + }, + { + name: "plain error", + err: baseErr, + found: false, + }, + { + name: "wrapped plain error", + err: fmt.Errorf("wrapped: %w", baseErr), + found: false, + }, + { + name: "coded failure", + err: NewLedgerFailure(baseErr), + found: false, + }, + { + name: "incorrect coded error", + err: NewScriptExecutionTimedOutError(), + found: false, + }, + { + name: "found", + err: NewScriptExecutionCancelledError(baseErr), + found: true, + }, + { + name: "found with embedded errors", + err: NewScriptExecutionCancelledError(NewLedgerFailure(NewScriptExecutionTimedOutError())), + found: true, + }, + { + name: "found embedded in error", + err: NewDerivedDataCacheImplementationFailure(NewScriptExecutionCancelledError(baseErr)), + found: true, + }, + { + name: "found embedded in failure", + err: NewLedgerFailure(NewScriptExecutionCancelledError(baseErr)), + found: true, + }, + { + name: "found embedded with multierror", + err: &multierror.Error{ + Errors: []error{ + baseErr, + NewScriptExecutionTimedOutError(), + NewLedgerFailure(NewScriptExecutionCancelledError(baseErr)), + }, + }, + found: true, + }, + { + name: "found within embedded error tree", + err: createCheckerErr([]error{ + fmt.Errorf("first error"), + NewLedgerFailure(baseErr), + createCheckerErr([]error{ + fmt.Errorf("first error"), + NewLedgerFailure( + NewScriptExecutionCancelledError(baseErr), + ), + }), + }), + found: true, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + actual := Find(tc.err, targetCode) + if !tc.found { + assert.NoError(t, actual) + return + } + + require.Error(t, actual, "expected error but none found") + assert.Equalf(t, targetCode, actual.Code(), "error code mismatch: expected %d, got %d", targetCode, actual.Code()) + }) + } +} + +func TestFindFailure(t *testing.T) { + targetCode := FailureCodeLedgerFailure + baseErr := fmt.Errorf("base error") + tests := []struct { + name string + err error + found bool + }{ + { + name: "nil error", + err: nil, + found: false, + }, + { + name: "plain error", + err: baseErr, + found: false, + }, + { + name: "wrapped plain error", + err: fmt.Errorf("wrapped: %w", baseErr), + found: false, + }, + { + name: "coded error", + err: NewScriptExecutionTimedOutError(), + found: false, + }, + { + name: "incorrect coded failure", + err: NewStateMergeFailure(baseErr), + found: false, + }, + { + name: "found", + err: NewLedgerFailure(baseErr), + found: true, + }, + { + name: "found with embedded errors", + err: NewLedgerFailure(NewScriptExecutionCancelledError(NewScriptExecutionTimedOutError())), + found: true, + }, + { + name: "found embedded in error", + err: NewDerivedDataCacheImplementationFailure(NewLedgerFailure(baseErr)), + found: true, + }, + { + name: "found embedded in failure", + err: NewStateMergeFailure(NewLedgerFailure(baseErr)), + found: true, + }, + { + name: "found embedded with multierror", + err: &multierror.Error{ + Errors: []error{ + baseErr, + NewScriptExecutionTimedOutError(), + NewScriptExecutionCancelledError(NewLedgerFailure(baseErr)), + }, + }, + found: true, + }, + { + name: "found within embedded error tree", + err: createCheckerErr([]error{ + fmt.Errorf("first error"), + NewScriptExecutionCancelledError(baseErr), + createCheckerErr([]error{ + fmt.Errorf("first error"), + NewScriptExecutionCancelledError( + NewLedgerFailure(baseErr), + ), + }), + }), + found: true, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + actual := FindFailure(tc.err, targetCode) + if !tc.found { + assert.NoError(t, actual) + return + } + + require.Error(t, actual, "expected error but none found") + assert.Equalf(t, targetCode, actual.Code(), "error code mismatch: expected %d, got %d", targetCode, actual.Code()) + }) + } +} + +func createCheckerErr(errs []error) error { + return runtime.Error{ + Err: cadenceErr.ExternalError{ + Recovered: sema.CheckerError{ + Errors: errs, + }, + }, + } +} diff --git a/fvm/errors/execution.go b/fvm/errors/execution.go index f87c68a57cd..ec2f1c3d3fd 100644 --- a/fvm/errors/execution.go +++ b/fvm/errors/execution.go @@ -76,8 +76,8 @@ func IsInsufficientPayerBalanceError(err error) bool { func NewPayerBalanceCheckFailure( payer flow.Address, err error, -) CodedError { - return WrapCodedError( +) CodedFailure { + return WrapCodedFailure( FailureCodePayerBalanceCheckFailure, err, "failed to check if the payer %s has sufficient balance", @@ -88,13 +88,35 @@ func NewPayerBalanceCheckFailure( // the derived data cache. func NewDerivedDataCacheImplementationFailure( err error, -) CodedError { - return WrapCodedError( +) CodedFailure { + return WrapCodedFailure( FailureCodeDerivedDataCacheImplementationFailure, err, "implementation error in derived data cache") } +// NewRandomSourceFailure indicate an implementation error in +// the random source provider. +func NewRandomSourceFailure( + err error, +) CodedFailure { + return WrapCodedFailure( + FailureCodeRandomSourceFailure, + err, + "implementation error in random source provider") +} + +// NewExecutionVersionProviderFailure indicates a irrecoverable failure in the execution +// version provider. +func NewExecutionVersionProviderFailure( + err error, +) CodedFailure { + return WrapCodedFailure( + FailureCodeExecutionVersionProvider, + err, + "Failure in execution version provider") +} + // NewComputationLimitExceededError constructs a new CodedError which indicates // that computation has exceeded its limit. func NewComputationLimitExceededError(limit uint64) CodedError { @@ -226,6 +248,17 @@ func IsOperationNotSupportedError(err error) bool { return HasErrorCode(err, ErrCodeOperationNotSupportedError) } +func NewBlockHeightOutOfRangeError(height uint64) CodedError { + return NewCodedError( + ErrCodeBlockHeightOutOfRangeError, + "block height (%v) is out of queriable range", + height) +} + +func IsBlockHeightOutOfRangeError(err error) bool { + return HasErrorCode(err, ErrCodeBlockHeightOutOfRangeError) +} + // NewScriptExecutionCancelledError construct a new CodedError which indicates // that Cadence Script execution has been cancelled (e.g. request connection // has been droped) diff --git a/fvm/errors/failures.go b/fvm/errors/failures.go index 322fd0ac117..df9b2c1104b 100644 --- a/fvm/errors/failures.go +++ b/fvm/errors/failures.go @@ -4,8 +4,8 @@ import ( "github.com/onflow/flow-go/module/trace" ) -func NewUnknownFailure(err error) CodedError { - return WrapCodedError( +func NewUnknownFailure(err error) CodedFailure { + return WrapCodedFailure( FailureCodeUnknownFailure, err, "unknown failure") @@ -16,8 +16,8 @@ func NewEncodingFailuref( err error, msg string, args ...interface{}, -) CodedError { - return WrapCodedError( +) CodedFailure { + return WrapCodedFailure( FailureCodeEncodingFailure, err, "encoding failed: "+msg, @@ -26,8 +26,8 @@ func NewEncodingFailuref( // NewLedgerFailure constructs a new CodedError which captures a fatal error // cause by ledger failures. -func NewLedgerFailure(err error) CodedError { - return WrapCodedError( +func NewLedgerFailure(err error) CodedFailure { + return WrapCodedFailure( FailureCodeLedgerFailure, err, "ledger returns unsuccessful") @@ -36,13 +36,13 @@ func NewLedgerFailure(err error) CodedError { // IsLedgerFailure returns true if the error or any of the wrapped errors is // a ledger failure func IsLedgerFailure(err error) bool { - return HasErrorCode(err, FailureCodeLedgerFailure) + return HasFailureCode(err, FailureCodeLedgerFailure) } // NewStateMergeFailure constructs a new CodedError which captures a fatal // caused by state merge. -func NewStateMergeFailure(err error) CodedError { - return WrapCodedError( +func NewStateMergeFailure(err error) CodedFailure { + return WrapCodedFailure( FailureCodeStateMergeFailure, err, "can not merge the state") @@ -50,8 +50,8 @@ func NewStateMergeFailure(err error) CodedError { // NewBlockFinderFailure constructs a new CodedError which captures a fatal // caused by block finder. -func NewBlockFinderFailure(err error) CodedError { - return WrapCodedError( +func NewBlockFinderFailure(err error) CodedFailure { + return WrapCodedFailure( FailureCodeBlockFinderFailure, err, "can not retrieve the block") @@ -62,9 +62,18 @@ func NewBlockFinderFailure(err error) CodedError { // operation while it is parsing programs. func NewParseRestrictedModeInvalidAccessFailure( spanName trace.SpanName, -) CodedError { - return NewCodedError( +) CodedFailure { + return NewCodedFailure( FailureCodeParseRestrictedModeInvalidAccessFailure, "cannot access %s while cadence is in parse restricted mode", spanName) } + +// NewEVMFailure constructs a new CodedFailure which captures a fatal +// caused by the EVM. +func NewEVMFailure(err error) CodedFailure { + return WrapCodedFailure( + FailureCodeEVMFailure, + err, + "evm failure") +} diff --git a/fvm/evm/backends/wrappedEnv.go b/fvm/evm/backends/wrappedEnv.go new file mode 100644 index 00000000000..b9c26c02aee --- /dev/null +++ b/fvm/evm/backends/wrappedEnv.go @@ -0,0 +1,219 @@ +package backends + +import ( + "github.com/onflow/atree" + "github.com/onflow/cadence" + "github.com/onflow/cadence/common" + "github.com/onflow/cadence/runtime" + "github.com/rs/zerolog" + otelTrace "go.opentelemetry.io/otel/trace" + + "github.com/onflow/flow-go/fvm/environment" + "github.com/onflow/flow-go/fvm/errors" + "github.com/onflow/flow-go/fvm/evm/types" + "github.com/onflow/flow-go/fvm/meter" + "github.com/onflow/flow-go/fvm/tracing" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module/trace" +) + +// WrappedEnvironment wraps a FVM environment, +// handles external errors and provides backend to the EVM. +type WrappedEnvironment struct { + env environment.Environment +} + +// NewWrappedEnvironment constructs a new wrapped environment +func NewWrappedEnvironment(env environment.Environment) types.Backend { + return &WrappedEnvironment{env} +} + +var _ types.Backend = &WrappedEnvironment{} + +// GetValue gets a value from the storage for the given owner and key pair, +// if value not found empty slice and no error is returned. +func (we *WrappedEnvironment) GetValue(owner, key []byte) ([]byte, error) { + val, err := we.env.GetValue(owner, key) + return val, handleEnvironmentError(err) +} + +// SetValue sets a value into the storage for the given owner and key pair. +func (we *WrappedEnvironment) SetValue(owner, key, value []byte) error { + err := we.env.SetValue(owner, key, value) + return handleEnvironmentError(err) +} + +// ValueExists checks if a value exist for the given owner and key pair. +func (we *WrappedEnvironment) ValueExists(owner, key []byte) (bool, error) { + b, err := we.env.ValueExists(owner, key) + return b, handleEnvironmentError(err) +} + +// AllocateSlabIndex allocates a slab index under the given account. +func (we *WrappedEnvironment) AllocateSlabIndex(owner []byte) (atree.SlabIndex, error) { + index, err := we.env.AllocateSlabIndex(owner) + return index, handleEnvironmentError(err) +} + +func (we *WrappedEnvironment) RunWithMeteringDisabled(f func()) { + we.env.RunWithMeteringDisabled(f) +} + +// MeterComputation updates the total computation used based on the kind and intensity of the operation. +func (we *WrappedEnvironment) MeterComputation(usage common.ComputationUsage) error { + err := we.env.MeterComputation(usage) + return handleEnvironmentError(err) +} + +// ComputationUsed returns the computation used so far +func (we *WrappedEnvironment) ComputationUsed() (uint64, error) { + val, err := we.env.ComputationUsed() + return val, handleEnvironmentError(err) +} + +// ComputationIntensities returns a the list of computation intensities +func (we *WrappedEnvironment) ComputationIntensities() meter.MeteredComputationIntensities { + return we.env.ComputationIntensities() +} + +// ComputationAvailable returns true if there is computation room +// for the given kind and intensity operation. +func (we *WrappedEnvironment) ComputationAvailable(usage common.ComputationUsage) bool { + return we.env.ComputationAvailable(usage) +} + +// MeterMemory meters the memory usage of a new operation. +func (we *WrappedEnvironment) MeterMemory(usage common.MemoryUsage) error { + err := we.env.MeterMemory(usage) + return handleEnvironmentError(err) +} + +// MemoryUsed returns the total memory used so far. +func (we *WrappedEnvironment) MemoryUsed() (uint64, error) { + val, err := we.env.MemoryUsed() + return val, handleEnvironmentError(err) +} + +// MeterEmittedEvent meters a newly emitted event. +func (we *WrappedEnvironment) MeterEmittedEvent(byteSize uint64) error { + err := we.env.MeterEmittedEvent(byteSize) + return handleEnvironmentError(err) +} + +// TotalEmittedEventBytes returns the total byte size of events emitted so far. +func (we *WrappedEnvironment) TotalEmittedEventBytes() uint64 { + return we.env.TotalEmittedEventBytes() +} + +// EmitEvent emits an event. +func (we *WrappedEnvironment) EmitEvent(event cadence.Event) error { + err := we.env.EmitEvent(event) + return handleEnvironmentError(err) +} + +// Events returns the list of emitted events. +func (we *WrappedEnvironment) Events() flow.EventsList { + return we.env.Events() + +} + +// ServiceEvents returns the list of emitted service events +func (we *WrappedEnvironment) ServiceEvents() flow.EventsList { + return we.env.ServiceEvents() +} + +// ConvertedServiceEvents returns the converted list of emitted service events. +func (we *WrappedEnvironment) ConvertedServiceEvents() flow.ServiceEventList { + return we.env.ConvertedServiceEvents() +} + +// Reset resets and discards all the changes to the +// all stateful environment modules (events, storage, ...) +func (we *WrappedEnvironment) Reset() { + we.env.Reset() +} + +// GetCurrentBlockHeight returns the current Flow block height +func (we *WrappedEnvironment) GetCurrentBlockHeight() (uint64, error) { + val, err := we.env.GetCurrentBlockHeight() + return val, handleEnvironmentError(err) +} + +// GetBlockAtHeight returns the block at the given height +func (we *WrappedEnvironment) GetBlockAtHeight(height uint64) ( + runtime.Block, + bool, + error, +) { + val, found, err := we.env.GetBlockAtHeight(height) + return val, found, handleEnvironmentError(err) +} + +// ReadRandom sets a random number into the buffer +func (we *WrappedEnvironment) ReadRandom(buffer []byte) error { + err := we.env.ReadRandom(buffer) + return handleEnvironmentError(err) +} + +// Invoke invokes call inside the fvm env. +func (we *WrappedEnvironment) Invoke( + spec environment.ContractFunctionSpec, + arguments []cadence.Value, +) ( + cadence.Value, + error, +) { + val, err := we.env.Invoke(spec, arguments) + return val, handleEnvironmentError(err) +} + +// GenerateUUID generates a uuid +func (we *WrappedEnvironment) GenerateUUID() (uint64, error) { + uuid, err := we.env.GenerateUUID() + return uuid, handleEnvironmentError(err) +} + +// StartChildSpan starts a new child open tracing span. +func (we *WrappedEnvironment) StartChildSpan( + name trace.SpanName, + options ...otelTrace.SpanStartOption, +) tracing.TracerSpan { + return we.env.StartChildSpan(name, options...) +} + +func (we *WrappedEnvironment) SetNumberOfDeployedCOAs(count uint64) { + we.env.SetNumberOfDeployedCOAs(count) +} + +func (we *WrappedEnvironment) EVMTransactionExecuted( + gasUsed uint64, + isDirectCall bool, + failed bool, +) { + we.env.EVMTransactionExecuted(gasUsed, isDirectCall, failed) +} + +func (we *WrappedEnvironment) EVMBlockExecuted( + txCount int, + totalGasUsed uint64, + totalSupplyInFlow float64, +) { + we.env.EVMBlockExecuted(txCount, totalGasUsed, totalSupplyInFlow) +} + +func (we *WrappedEnvironment) Logger() zerolog.Logger { + return we.env.Logger() +} + +func handleEnvironmentError(err error) error { + if err == nil { + return nil + } + + // fvm fatal errors + if errors.IsFailure(err) { + return types.NewFatalError(err) + } + + return types.NewBackendError(err) +} diff --git a/fvm/evm/emulator/config.go b/fvm/evm/emulator/config.go new file mode 100644 index 00000000000..baee338825f --- /dev/null +++ b/fvm/evm/emulator/config.go @@ -0,0 +1,284 @@ +package emulator + +import ( + "math/big" + + gethCommon "github.com/ethereum/go-ethereum/common" + gethCore "github.com/ethereum/go-ethereum/core" + gethVM "github.com/ethereum/go-ethereum/core/vm" + "github.com/ethereum/go-ethereum/eth/tracers" + gethParams "github.com/ethereum/go-ethereum/params" + + "github.com/onflow/flow-go/fvm/evm/types" +) + +var ( + zero = uint64(0) + bigZero = big.NewInt(0) +) + +var ( + PreviewnetPragueActivation = uint64(0) // already on Prague for PreviewNet + TestnetPragueActivation = uint64(1746723600) // Thu May 08 2025 17:00:00 GMT+0000 (10am PDT) + MainnetPragueActivation = uint64(1747328400) // Thu May 15 2025 17:00:00 GMT+0000 (10am PDT) +) + +// Config aggregates all the configuration (chain, evm, block, tx, ...) +// needed during executing a transaction. +type Config struct { + // Chain Config + ChainConfig *gethParams.ChainConfig + // EVM config + EVMConfig gethVM.Config + // block context + BlockContext *gethVM.BlockContext + // transaction context + TxContext *gethVM.TxContext + // base unit of gas for direct calls + DirectCallBaseGasUsage uint64 + // captures extra precompiled calls + PCTracker *CallTracker + // BlockTxCount captures the total number of + // transactions included in this block so far + BlockTxCountSoFar uint + // BlockTotalGasSoFar captures the total + // amount of gas used so far + BlockTotalGasUsedSoFar uint64 + // PrecompiledContracts holds the applicable precompiled contracts + // for the current chain rules, as well as any extra precompiled + // contracts, such as Cadence Arch etc + PrecompiledContracts gethVM.PrecompiledContracts +} + +// ChainRules returns the chain rules +func (c *Config) ChainRules() gethParams.Rules { + return c.ChainConfig.Rules( + c.BlockContext.BlockNumber, + c.BlockContext.Random != nil, + c.BlockContext.Time) +} + +// PreviewNetChainConfig is the chain config used by the previewnet +var PreviewNetChainConfig = MakeChainConfig(types.FlowEVMPreviewNetChainID) + +// PreviewNetChainConfig is the chain config used by the testnet +var TestNetChainConfig = MakeChainConfig(types.FlowEVMTestNetChainID) + +// MainNetChainConfig is the chain config used by the mainnet +var MainNetChainConfig = MakeChainConfig(types.FlowEVMMainNetChainID) + +// DefaultChainConfig is the chain config used by the emulator +var DefaultChainConfig = PreviewNetChainConfig + +// MakeChainConfig constructs a chain config +// it considers majority of EVM upgrades (e.g. Cancun update) are already applied +// This has been done through setting the height of these changes +// to zero nad setting the time for some other changes to zero +// For the future changes of EVM, we need to update the EVM go mod version +// and set a proper height for the specific release based on the Flow EVM heights +// so it could gets activated at a desired time. +func MakeChainConfig(chainID *big.Int) *gethParams.ChainConfig { + chainConfig := &gethParams.ChainConfig{ + ChainID: chainID, + + // Fork scheduling based on block heights + HomesteadBlock: bigZero, + DAOForkBlock: bigZero, + DAOForkSupport: false, + EIP150Block: bigZero, + EIP155Block: bigZero, + EIP158Block: bigZero, + ByzantiumBlock: bigZero, // already on Byzantium + ConstantinopleBlock: bigZero, // already on Constantinople + PetersburgBlock: bigZero, // already on Petersburg + IstanbulBlock: bigZero, // already on Istanbul + BerlinBlock: bigZero, // already on Berlin + LondonBlock: bigZero, // already on London + MuirGlacierBlock: bigZero, // already on MuirGlacier + + // Fork scheduling based on timestamps + ShanghaiTime: &zero, // already on Shanghai + CancunTime: &zero, // already on Cancun + PragueTime: nil, // this is conditionally set below + VerkleTime: nil, // not on Verkle + } + + if chainID.Cmp(types.FlowEVMPreviewNetChainID) == 0 { + chainConfig.PragueTime = &PreviewnetPragueActivation + } else if chainID.Cmp(types.FlowEVMTestNetChainID) == 0 { + chainConfig.PragueTime = &TestnetPragueActivation + } else if chainID.Cmp(types.FlowEVMMainNetChainID) == 0 { + chainConfig.PragueTime = &MainnetPragueActivation + } + + return chainConfig +} + +// Default config supports the dynamic fee structure (EIP-1559) +// so it accepts both legacy transactions with a fixed gas price +// and dynamic transactions with tip and cap. +// Yet default config keeps the base fee to zero (no automatic adjustment) +func defaultConfig() *Config { + return &Config{ + ChainConfig: DefaultChainConfig, + EVMConfig: gethVM.Config{ + // Forces the EIP-1559 baseFee to 0 (needed for 0 price calls) + NoBaseFee: true, + }, + TxContext: &gethVM.TxContext{ + GasPrice: new(big.Int), + BlobFeeCap: new(big.Int), + }, + BlockContext: &gethVM.BlockContext{ + CanTransfer: gethCore.CanTransfer, + Transfer: gethCore.Transfer, + GasLimit: types.DefaultBlockLevelGasLimit, + BaseFee: types.DefaultBaseFee, + GetHash: func(n uint64) gethCommon.Hash { + return gethCommon.Hash{} + }, + }, + PCTracker: NewCallTracker(), + } +} + +// NewConfig initializes a new config +func NewConfig(opts ...Option) *Config { + ctx := defaultConfig() + for _, applyOption := range opts { + ctx = applyOption(ctx) + } + return ctx +} + +type Option func(*Config) *Config + +// WithChainID sets the evm chain ID +func WithChainID(chainID *big.Int) Option { + return func(c *Config) *Config { + switch chainID.Uint64() { + case types.FlowEVMPreviewNetChainIDInUInt64: + c.ChainConfig = PreviewNetChainConfig + case types.FlowEVMTestNetChainIDInUInt64: + c.ChainConfig = TestNetChainConfig + case types.FlowEVMMainNetChainIDInUInt64: + c.ChainConfig = MainNetChainConfig + } + return c + } +} + +// WithOrigin sets the origin of the transaction (signer) +func WithOrigin(origin gethCommon.Address) Option { + return func(c *Config) *Config { + c.TxContext.Origin = origin + return c + } +} + +// WithGasPrice sets the gas price for the transaction (usually the one sets by the sender) +func WithGasPrice(gasPrice *big.Int) Option { + return func(c *Config) *Config { + c.TxContext.GasPrice = gasPrice + return c + } +} + +// WithGasLimit sets the gas limit of the transaction +func WithGasLimit(gasLimit uint64) Option { + return func(c *Config) *Config { + c.BlockContext.GasLimit = gasLimit + return c + } +} + +// WithCoinbase sets the coinbase of the block where the fees are collected in +func WithCoinbase(coinbase gethCommon.Address) Option { + return func(c *Config) *Config { + c.BlockContext.Coinbase = coinbase + return c + } +} + +// WithBlockNumber sets the block height in the block context +func WithBlockNumber(blockNumber *big.Int) Option { + return func(c *Config) *Config { + c.BlockContext.BlockNumber = blockNumber + return c + } +} + +// WithBlockTime sets the block time in the block context +func WithBlockTime(time uint64) Option { + return func(c *Config) *Config { + c.BlockContext.Time = time + return c + } +} + +// WithGetBlockHashFunction sets the functionality to look up block hash by height +func WithGetBlockHashFunction(getHash gethVM.GetHashFunc) Option { + return func(c *Config) *Config { + c.BlockContext.GetHash = getHash + return c + } +} + +// WithDirectCallBaseGasUsage sets the base direct call gas usage +func WithDirectCallBaseGasUsage(gas uint64) Option { + return func(c *Config) *Config { + c.DirectCallBaseGasUsage = gas + return c + } +} + +// WithExtraPrecompiledContracts appends the precompiled contract list with extra precompiled contracts +func WithExtraPrecompiledContracts(precompiledContracts []types.PrecompiledContract) Option { + return func(c *Config) *Config { + activePrecompiledContracts := gethVM.ActivePrecompiledContracts(c.ChainRules()) + for _, pc := range precompiledContracts { + // wrap pcs for tracking + wpc := c.PCTracker.RegisterPrecompiledContract(pc) + activePrecompiledContracts[pc.Address().ToCommon()] = wpc + } + c.PrecompiledContracts = activePrecompiledContracts + + return c + } +} + +// WithRandom sets the block context random field +func WithRandom(rand *gethCommon.Hash) Option { + return func(c *Config) *Config { + c.BlockContext.Random = rand + return c + } +} + +// WithTransactionTracer sets a transaction tracer +func WithTransactionTracer(tracer *tracers.Tracer) Option { + return func(c *Config) *Config { + if tracer != nil { + c.EVMConfig.Tracer = tracer.Hooks + } + return c + } +} + +// WithBlockTxCountSoFar sets the total number of transactions +// included in the current block so far +func WithBlockTxCountSoFar(txCount uint) Option { + return func(c *Config) *Config { + c.BlockTxCountSoFar = txCount + return c + } +} + +// WithBlockTotalGasSoFar sets the total amount of gas used +// for this block so far +func WithBlockTotalGasUsedSoFar(gasUsed uint64) Option { + return func(c *Config) *Config { + c.BlockTotalGasUsedSoFar = gasUsed + return c + } +} diff --git a/fvm/evm/emulator/emulator.go b/fvm/evm/emulator/emulator.go new file mode 100644 index 00000000000..5d74eee81e8 --- /dev/null +++ b/fvm/evm/emulator/emulator.go @@ -0,0 +1,726 @@ +package emulator + +import ( + "errors" + "math/big" + + gethCommon "github.com/ethereum/go-ethereum/common" + gethCore "github.com/ethereum/go-ethereum/core" + gethTracing "github.com/ethereum/go-ethereum/core/tracing" + gethTypes "github.com/ethereum/go-ethereum/core/types" + gethVM "github.com/ethereum/go-ethereum/core/vm" + gethCrypto "github.com/ethereum/go-ethereum/crypto" + gethParams "github.com/ethereum/go-ethereum/params" + "github.com/holiman/uint256" + "github.com/onflow/atree" + "github.com/onflow/crypto/hash" + + "github.com/onflow/flow-go/fvm/evm/emulator/state" + "github.com/onflow/flow-go/fvm/evm/types" + "github.com/onflow/flow-go/model/flow" +) + +// Emulator wraps an EVM runtime where evm transactions +// and direct calls are accepted. +type Emulator struct { + rootAddr flow.Address + ledger atree.Ledger +} + +var _ types.Emulator = &Emulator{} + +// NewEmulator constructs a new EVM Emulator +func NewEmulator( + ledger atree.Ledger, + rootAddr flow.Address, +) *Emulator { + return &Emulator{ + rootAddr: rootAddr, + ledger: ledger, + } +} + +func newConfig(ctx types.BlockContext) *Config { + return NewConfig( + WithChainID(ctx.ChainID), + WithBlockNumber(new(big.Int).SetUint64(ctx.BlockNumber)), + WithBlockTime(ctx.BlockTimestamp), + WithCoinbase(ctx.GasFeeCollector.ToCommon()), + WithDirectCallBaseGasUsage(ctx.DirectCallBaseGasUsage), + WithExtraPrecompiledContracts(ctx.ExtraPrecompiledContracts), + WithGetBlockHashFunction(ctx.GetHashFunc), + WithRandom(&ctx.Random), + WithTransactionTracer(ctx.Tracer), + WithBlockTotalGasUsedSoFar(ctx.TotalGasUsedSoFar), + WithBlockTxCountSoFar(ctx.TxCountSoFar), + ) +} + +// NewReadOnlyBlockView constructs a new read-only block view +func (em *Emulator) NewReadOnlyBlockView(ctx types.BlockContext) (types.ReadOnlyBlockView, error) { + execState, err := state.NewStateDB(em.ledger, em.rootAddr) + return &ReadOnlyBlockView{ + state: execState, + }, err +} + +// NewBlockView constructs a new block view (mutable) +func (em *Emulator) NewBlockView(ctx types.BlockContext) (types.BlockView, error) { + return &BlockView{ + config: newConfig(ctx), + rootAddr: em.rootAddr, + ledger: em.ledger, + }, nil +} + +// ReadOnlyBlockView provides a read only view of a block +// could be used for multiple queries against a block +type ReadOnlyBlockView struct { + state types.StateDB +} + +// BalanceOf returns the balance of the given address +func (bv *ReadOnlyBlockView) BalanceOf(address types.Address) (*big.Int, error) { + bal := bv.state.GetBalance(address.ToCommon()) + return bal.ToBig(), bv.state.Error() +} + +// NonceOf returns the nonce of the given address +func (bv *ReadOnlyBlockView) NonceOf(address types.Address) (uint64, error) { + return bv.state.GetNonce(address.ToCommon()), bv.state.Error() +} + +// CodeOf returns the code of the given address +func (bv *ReadOnlyBlockView) CodeOf(address types.Address) (types.Code, error) { + return bv.state.GetCode(address.ToCommon()), bv.state.Error() +} + +// CodeHashOf returns the code hash of the given address +func (bv *ReadOnlyBlockView) CodeHashOf(address types.Address) ([]byte, error) { + return bv.state.GetCodeHash(address.ToCommon()).Bytes(), bv.state.Error() +} + +// BlockView allows mutation of the evm state as part of a block +// current version only accepts only a single interaction per block view. +type BlockView struct { + config *Config + rootAddr flow.Address + ledger atree.Ledger +} + +// DirectCall executes a direct call +func (bl *BlockView) DirectCall(call *types.DirectCall) (res *types.Result, err error) { + // construct a new procedure + proc, err := bl.newProcedure() + if err != nil { + return nil, err + } + + // Set the nonce for the call (needed for some operations like deployment) + call.Nonce = proc.state.GetNonce(call.From.ToCommon()) + + // Call tx tracer + if proc.evm.Config.Tracer != nil && proc.evm.Config.Tracer.OnTxStart != nil { + proc.evm.Config.Tracer.OnTxStart(proc.evm.GetVMContext(), call.Transaction(), call.From.ToCommon()) + defer func() { + if proc.evm.Config.Tracer.OnTxEnd != nil && + err == nil && res != nil { + proc.evm.Config.Tracer.OnTxEnd(res.Receipt(), res.ValidationError) + } + + // call OnLog tracer hook, upon successful call result + if proc.evm.Config.Tracer.OnLog != nil && + err == nil && res != nil { + for _, log := range res.Logs { + proc.evm.Config.Tracer.OnLog(log) + } + } + }() + } + + // re-route based on the sub type + switch call.SubType { + case types.DepositCallSubType: + return proc.mintTo(call) + case types.WithdrawCallSubType: + return proc.withdrawFrom(call) + case types.DeployCallSubType: + if !call.EmptyToField() { + return proc.deployAt(call) + } + fallthrough + default: + return proc.runDirect(call) + } +} + +// RunTransaction runs an evm transaction +func (bl *BlockView) RunTransaction( + tx *gethTypes.Transaction, +) (result *types.Result, err error) { + // create a new procedure + proc, err := bl.newProcedure() + if err != nil { + return nil, err + } + + // constructs a core.message from the tx + msg, err := gethCore.TransactionToMessage( + tx, + GetSigner(bl.config), + proc.config.BlockContext.BaseFee) + if err != nil { + // this is not a fatal error (e.g. due to bad signature) + // not a valid transaction + return types.NewInvalidResult(tx, err), nil + } + + // call tracer + if proc.evm.Config.Tracer != nil && proc.evm.Config.Tracer.OnTxStart != nil { + proc.evm.Config.Tracer.OnTxStart(proc.evm.GetVMContext(), tx, msg.From) + } + + // run msg + res, err := proc.run(msg, tx.Hash(), tx.Type()) + if err != nil { + return nil, err + } + + // all commit errors (StateDB errors) has to be returned + res.StateChangeCommitment, err = proc.commit(true) + if err != nil { + return nil, err + } + + // call tracer on tx end + if proc.evm.Config.Tracer != nil && + proc.evm.Config.Tracer.OnTxEnd != nil && + res != nil { + proc.evm.Config.Tracer.OnTxEnd(res.Receipt(), res.ValidationError) + } + + // call OnLog tracer hook, upon successful tx result + if proc.evm.Config.Tracer != nil && + proc.evm.Config.Tracer.OnLog != nil && + res != nil { + for _, log := range res.Logs { + proc.evm.Config.Tracer.OnLog(log) + } + } + + return res, nil +} + +// BatchRunTransactions runs a batch of EVM transactions +func (bl *BlockView) BatchRunTransactions(txs []*gethTypes.Transaction) ([]*types.Result, error) { + batchResults := make([]*types.Result, len(txs)) + + // create a new procedure + proc, err := bl.newProcedure() + if err != nil { + return nil, err + } + + for i, tx := range txs { + msg, err := gethCore.TransactionToMessage( + tx, + GetSigner(bl.config), + proc.config.BlockContext.BaseFee) + if err != nil { + batchResults[i] = types.NewInvalidResult(tx, err) + continue + } + + // call tracer on tx start + if proc.evm.Config.Tracer != nil && proc.evm.Config.Tracer.OnTxStart != nil { + proc.evm.Config.Tracer.OnTxStart(proc.evm.GetVMContext(), tx, msg.From) + } + + // run msg + res, err := proc.run(msg, tx.Hash(), tx.Type()) + if err != nil { + return nil, err + } + + // all commit errors (StateDB errors) has to be returned + res.StateChangeCommitment, err = proc.commit(false) + if err != nil { + return nil, err + } + + // this clears state for any subsequent transaction runs + proc.state.Reset() + + // collect result + batchResults[i] = res + + // call tracer on tx end + if proc.evm.Config.Tracer != nil && + proc.evm.Config.Tracer.OnTxEnd != nil && + res != nil { + proc.evm.Config.Tracer.OnTxEnd(res.Receipt(), res.ValidationError) + } + + // call OnLog tracer hook, upon successful tx result + if proc.evm.Config.Tracer != nil && + proc.evm.Config.Tracer.OnLog != nil && + res != nil { + for _, log := range res.Logs { + proc.evm.Config.Tracer.OnLog(log) + } + } + } + + // finalize after all the batch transactions are executed to save resources + if err := proc.state.Finalize(); err != nil { + return nil, err + } + + return batchResults, nil +} + +// DryRunTransaction runs an unsigned transaction without persisting the state +func (bl *BlockView) DryRunTransaction( + tx *gethTypes.Transaction, + from gethCommon.Address, +) (*types.Result, error) { + // create a new procedure + proc, err := bl.newProcedure() + if err != nil { + return nil, err + } + + // convert tx into message + msg, err := gethCore.TransactionToMessage( + tx, + GetSigner(bl.config), + proc.config.BlockContext.BaseFee, + ) + + // we can ignore invalid signature errors since we don't expect signed transactions + if !errors.Is(err, gethTypes.ErrInvalidSig) { + return nil, err + } + + // use the from as the signer + msg.From = from + // we need to skip nonce/eoa check for dry run + msg.SkipNonceChecks = true + msg.SkipFromEOACheck = true + + // run and return without committing the state changes + return proc.run(msg, tx.Hash(), tx.Type()) +} + +func (bl *BlockView) newProcedure() (*procedure, error) { + execState, err := state.NewStateDB(bl.ledger, bl.rootAddr) + if err != nil { + return nil, err + } + cfg := bl.config + evm := gethVM.NewEVM( + *cfg.BlockContext, + execState, + cfg.ChainConfig, + cfg.EVMConfig, + ) + evm.SetTxContext(*cfg.TxContext) + // inject the applicable precompiled contracts for the current + // chain rules, as well as any extra precompiled contracts, + // such as Cadence Arch etc + evm.SetPrecompiles(cfg.PrecompiledContracts) + + return &procedure{ + config: cfg, + evm: evm, + state: execState, + }, nil +} + +type procedure struct { + config *Config + evm *gethVM.EVM + state types.StateDB +} + +// commit commits the changes to the state (with optional finalization) +func (proc *procedure) commit(finalize bool) (hash.Hash, error) { + // Calling `StateDB.Finalise(true)` is currently a no-op, but + // we add it here to be more in line with how its envisioned. + proc.state.Finalise(true) + stateUpdateCommitment, err := proc.state.Commit(finalize) + if err != nil { + // if known types (state errors) don't do anything and return + if types.IsAFatalError(err) || types.IsAStateError(err) || types.IsABackendError(err) { + return stateUpdateCommitment, err + } + + // else is a new fatal error + return stateUpdateCommitment, types.NewFatalError(err) + } + return stateUpdateCommitment, nil +} + +func (proc *procedure) mintTo( + call *types.DirectCall, +) (*types.Result, error) { + // convert and check value + isValid, value := convertAndCheckValue(call.Value) + if !isValid { + return types.NewInvalidResult( + call.Transaction(), + types.ErrInvalidBalance), nil + } + + // create bridge account if not exist + bridge := call.From.ToCommon() + if !proc.state.Exist(bridge) { + proc.state.CreateAccount(bridge) + } + + // add balance to the bridge account before transfer + proc.state.AddBalance(bridge, value, gethTracing.BalanceIncreaseWithdrawal) + // check state errors + if err := proc.state.Error(); err != nil { + return nil, err + } + + // withdraw the amount and move it to the bridge account + res, err := proc.run(call.Message(), call.Hash(), types.DirectCallTxType) + if err != nil { + return res, err + } + + // if any error (invalid or vm) on the internal call, revert and don't commit any change + // this prevents having cases that we add balance to the bridge but the transfer + // fails due to gas, etc. + if res.Invalid() || res.Failed() { + // reset the state to revert the add balances + proc.state.Reset() + return res, nil + } + + // commit and finalize the state and return any stateDB error + res.StateChangeCommitment, err = proc.commit(true) + return res, err +} + +func (proc *procedure) withdrawFrom( + call *types.DirectCall, +) (*types.Result, error) { + // convert and check value + isValid, value := convertAndCheckValue(call.Value) + if !isValid { + return types.NewInvalidResult( + call.Transaction(), + types.ErrInvalidBalance), nil + } + + // check balance is not prone to rounding error + if types.BalanceConversionToUFix64ProneToRoundingError(call.Value) { + return types.NewInvalidResult( + call.Transaction(), + types.ErrWithdrawBalanceRounding), nil + } + + // create bridge account if not exist + bridge := call.To.ToCommon() + if !proc.state.Exist(bridge) { + proc.state.CreateAccount(bridge) + } + + // withdraw the amount and move it to the bridge account + res, err := proc.run(call.Message(), call.Hash(), types.DirectCallTxType) + if err != nil { + return res, err + } + + // if any error (invalid or vm) on the internal call, revert and don't commit any change + // this prevents having cases that we deduct the balance from the account + // but doesn't return it as a vault. + if res.Invalid() || res.Failed() { + // reset the state to revert the add balances + proc.state.Reset() + return res, nil + } + + // now deduct the balance from the bridge + proc.state.SubBalance(bridge, value, gethTracing.BalanceIncreaseWithdrawal) + + // commit and finalize the state and return any stateDB error + res.StateChangeCommitment, err = proc.commit(true) + return res, err +} + +// deployAt deploys a contract at the given target address +// behavior should be similar to what evm.create internal method does with +// a few differences, we don't need to check for previous forks given this +// functionality was not available to anyone, we don't need to +// follow snapshoting, given we do commit/revert style in this code base. +// in the future we might optimize this method accepting deploy-ready byte codes +// and skip interpreter call, gas calculations and many checks. +func (proc *procedure) deployAt( + call *types.DirectCall, +) (*types.Result, error) { + // convert and check value + isValid, castedValue := convertAndCheckValue(call.Value) + if !isValid { + return types.NewInvalidResult( + call.Transaction(), + types.ErrInvalidBalance), nil + } + + txHash := call.Hash() + res := &types.Result{ + TxType: types.DirectCallTxType, + TxHash: txHash, + } + + if proc.evm.Config.Tracer != nil { + tracer := proc.evm.Config.Tracer + if tracer.OnEnter != nil { + tracer.OnEnter(0, byte(gethVM.CREATE2), call.From.ToCommon(), call.To.ToCommon(), call.Data, call.GasLimit, call.Value) + } + if tracer.OnGasChange != nil { + tracer.OnGasChange(0, call.GasLimit, gethTracing.GasChangeCallInitialBalance) + } + + defer func() { + if call.GasLimit != 0 && tracer.OnGasChange != nil { + tracer.OnGasChange(call.GasLimit, 0, gethTracing.GasChangeCallLeftOverReturned) + } + if tracer.OnExit != nil { + var reverted bool + if res.VMError != nil && !errors.Is(res.VMError, gethVM.ErrCodeStoreOutOfGas) { + reverted = true + } + tracer.OnExit(0, res.ReturnedData, call.GasLimit-res.GasConsumed, gethVM.VMErrorFromErr(res.VMError), reverted) + } + }() + } + + addr := call.To.ToCommon() + // pre check 1 - check balance of the source + if call.Value.Sign() != 0 && + !proc.evm.Context.CanTransfer(proc.state, call.From.ToCommon(), castedValue) { + res.SetValidationError(gethCore.ErrInsufficientFundsForTransfer) + return res, nil + } + + // pre check 2 - ensure there's no existing eoa or contract is deployed at the address + contractHash := proc.state.GetCodeHash(addr) + if proc.state.GetNonce(addr) != 0 || + (contractHash != (gethCommon.Hash{}) && contractHash != gethTypes.EmptyCodeHash) { + res.VMError = gethVM.ErrContractAddressCollision + return res, nil + } + + callerCommon := call.From.ToCommon() + // setup caller if doesn't exist + if !proc.state.Exist(callerCommon) { + proc.state.CreateAccount(callerCommon) + } + // increment the nonce for the caller + proc.state.SetNonce( + callerCommon, + proc.state.GetNonce(callerCommon)+1, + gethTracing.NonceChangeContractCreator, + ) + + // setup account + proc.state.CreateAccount(addr) + proc.state.SetNonce(addr, 1, gethTracing.NonceChangeNewContract) // (EIP-158) + if call.Value.Sign() > 0 { + proc.evm.Context.Transfer( // transfer value + proc.state, + callerCommon, + addr, + uint256.MustFromBig(call.Value), + ) + } + + // run code through interpreter + // this would check for errors and computes the final bytes to be stored under account + var err error + contract := gethVM.NewContract( + callerCommon, + addr, + castedValue, + call.GasLimit, + nil, + ) + + contract.SetCallCode(gethCrypto.Keccak256Hash(call.Data), call.Data) + // update access list (Berlin) + proc.state.AddAddressToAccessList(addr) + + ret, err := proc.evm.Run(contract, nil, false) + gasCost := uint64(len(ret)) * gethParams.CreateDataGas + res.GasConsumed = gasCost + + // handle errors + if err != nil { + // for all errors except this one consume all the remaining gas (Homestead) + if err != gethVM.ErrExecutionReverted { + res.GasConsumed = call.GasLimit + } + res.VMError = err + return res, nil + } + + // update gas usage + if gasCost > call.GasLimit { + // consume all the remaining gas (Homestead) + res.GasConsumed = call.GasLimit + res.VMError = gethVM.ErrCodeStoreOutOfGas + return res, nil + } + + // check max code size (EIP-158) + if len(ret) > gethParams.MaxCodeSize { + // consume all the remaining gas (Homestead) + res.GasConsumed = call.GasLimit + res.VMError = gethVM.ErrMaxCodeSizeExceeded + return res, nil + } + + // reject code starting with 0xEF (EIP-3541) + if len(ret) >= 1 && ret[0] == 0xEF { + // consume all the remaining gas (Homestead) + res.GasConsumed = call.GasLimit + res.VMError = gethVM.ErrInvalidCode + return res, nil + } + + res.DeployedContractAddress = &call.To + res.CumulativeGasUsed = proc.config.BlockTotalGasUsedSoFar + res.GasConsumed + + proc.state.SetCode(addr, ret) + res.StateChangeCommitment, err = proc.commit(true) + return res, err +} + +func (proc *procedure) runDirect( + call *types.DirectCall, +) (*types.Result, error) { + // run the msg + res, err := proc.run( + call.Message(), + call.Hash(), + types.DirectCallTxType, + ) + if err != nil { + return nil, err + } + // commit and finalize the state and return any stateDB error + res.StateChangeCommitment, err = proc.commit(true) + return res, err +} + +// run runs a geth core.message and returns the +// results, any validation or execution errors +// are captured inside the result, the remaining +// return errors are errors requires extra handling +// on upstream (e.g. backend errors). +func (proc *procedure) run( + msg *gethCore.Message, + txHash gethCommon.Hash, + txType uint8, +) (*types.Result, error) { + var err error + res := types.Result{ + TxType: txType, + TxHash: txHash, + } + + // Negative values are not acceptable + // although we check this condition on direct calls + // its worth an extra check here given some calls are + // coming from batch run, etc. + if msg.Value.Sign() < 0 { + res.SetValidationError(types.ErrInvalidBalance) + return &res, nil + } + + // set the origin on the TxContext + proc.evm.TxContext.Origin = msg.From + + // reset precompile tracking in case + proc.config.PCTracker.Reset() + + // Set gas pool based on block gas limit + // if the block gas limit is set to anything than max + // we need to update this code. + gasPool := (*gethCore.GasPool)(&proc.config.BlockContext.GasLimit) + + // transit the state + execResult, err := gethCore.ApplyMessage( + proc.evm, + msg, + gasPool, + ) + if err != nil { + // if the error is a fatal error or a non-fatal state error or a backend err return it + // this condition should never happen given all StateDB errors are withheld for the commit time. + if types.IsAFatalError(err) || types.IsAStateError(err) || types.IsABackendError(err) { + return nil, err + } + // otherwise is a validation error (pre-check failure) + // no state change, wrap the error and return + res.SetValidationError(err) + return &res, nil + } + + txIndex := proc.config.BlockTxCountSoFar + // if pre-checks are passed, the exec result won't be nil + if execResult != nil { + res.GasConsumed = execResult.UsedGas + res.MaxGasConsumed = execResult.MaxUsedGas + res.Index = uint16(txIndex) + res.CumulativeGasUsed = execResult.UsedGas + proc.config.BlockTotalGasUsedSoFar + res.PrecompiledCalls, err = proc.config.PCTracker.CapturedCalls() + if err != nil { + return nil, err + } + + // we need to capture the returned value no matter the status + // if the tx is reverted the error message is returned as returned value + res.ReturnedData = execResult.ReturnData + + // Update proc context + proc.config.BlockTotalGasUsedSoFar = res.CumulativeGasUsed + proc.config.BlockTxCountSoFar += 1 + + if !execResult.Failed() { // collect vm errors + // If the transaction has created a contract, + // store the creation address in the receipt + if msg.To == nil { + deployedAddress := types.NewAddress(gethCrypto.CreateAddress(msg.From, msg.Nonce)) + res.DeployedContractAddress = &deployedAddress + } + // collect logs + res.Logs = proc.state.Logs( + proc.config.BlockContext.BlockNumber.Uint64(), + txHash, + txIndex, + ) + } else { + // execResult.Err is VM errors (we don't return it as error) + res.VMError = execResult.Err + } + } + return &res, nil +} + +func convertAndCheckValue(input *big.Int) (isValid bool, converted *uint256.Int) { + // check for negative input + if input.Sign() < 0 { + return false, nil + } + // convert value into uint256 + value, overflow := uint256.FromBig(input) + if overflow { + return true, nil + } + return true, value +} diff --git a/fvm/evm/emulator/emulator_test.go b/fvm/evm/emulator/emulator_test.go new file mode 100644 index 00000000000..8c4634c72ff --- /dev/null +++ b/fvm/evm/emulator/emulator_test.go @@ -0,0 +1,1260 @@ +package emulator_test + +import ( + "encoding/hex" + "fmt" + "math" + "math/big" + "strings" + "testing" + + gethCommon "github.com/ethereum/go-ethereum/common" + gethTypes "github.com/ethereum/go-ethereum/core/types" + gethVM "github.com/ethereum/go-ethereum/core/vm" + gethParams "github.com/ethereum/go-ethereum/params" + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/fvm/evm/emulator" + "github.com/onflow/flow-go/fvm/evm/precompiles" + "github.com/onflow/flow-go/fvm/evm/testutils" + "github.com/onflow/flow-go/fvm/evm/testutils/contracts" + "github.com/onflow/flow-go/fvm/evm/types" + "github.com/onflow/flow-go/model/flow" + + _ "github.com/ethereum/go-ethereum/eth/tracers/native" // imported so callTracers is registered in init +) + +var blockNumber = big.NewInt(10) +var defaultCtx = types.NewDefaultBlockContext(blockNumber.Uint64()) + +func RunWithNewEmulator(t testing.TB, backend *testutils.TestBackend, rootAddr flow.Address, f func(*emulator.Emulator)) { + env := emulator.NewEmulator(backend, rootAddr) + f(env) +} + +func RunWithNewBlockView(t testing.TB, em *emulator.Emulator, f func(blk types.BlockView)) { + blk, err := em.NewBlockView(defaultCtx) + require.NoError(t, err) + f(blk) +} + +func RunWithNewReadOnlyBlockView(t testing.TB, em *emulator.Emulator, f func(blk types.ReadOnlyBlockView)) { + blk, err := em.NewReadOnlyBlockView(defaultCtx) + require.NoError(t, err) + f(blk) +} + +func requireSuccessfulExecution(t testing.TB, err error, res *types.Result) { + require.NoError(t, err) + require.NoError(t, res.VMError) + require.NoError(t, res.ValidationError) +} + +func TestNativeTokenBridging(t *testing.T) { + testutils.RunWithTestBackend(t, func(backend *testutils.TestBackend) { + testutils.RunWithTestFlowEVMRootAddress(t, backend, func(rootAddr flow.Address) { + originalBalance := types.MakeBigIntInFlow(3) + testAccount := types.NewAddressFromString("test") + bridgeAccount := types.NewAddressFromString("bridge") + testAccountNonce := uint64(0) + + t.Run("mint tokens to the first account", func(t *testing.T) { + RunWithNewEmulator(t, backend, rootAddr, func(env *emulator.Emulator) { + RunWithNewBlockView(t, env, func(blk types.BlockView) { + call := types.NewDepositCall(bridgeAccount, testAccount, originalBalance, 0) + res, err := blk.DirectCall(call) + requireSuccessfulExecution(t, err, res) + require.Equal(t, defaultCtx.DirectCallBaseGasUsage, res.GasConsumed) + require.Equal(t, call.Hash(), res.TxHash) + }) + }) + RunWithNewEmulator(t, backend, rootAddr, func(env *emulator.Emulator) { + RunWithNewReadOnlyBlockView(t, env, func(blk types.ReadOnlyBlockView) { + retBalance, err := blk.BalanceOf(testAccount) + require.NoError(t, err) + require.Equal(t, originalBalance, retBalance) + + // check balance of bridgeAccount to be zero + retBalance, err = blk.BalanceOf(bridgeAccount) + require.NoError(t, err) + require.Equal(t, big.NewInt(0).Uint64(), retBalance.Uint64()) + }) + }) + }) + t.Run("tokens deposit to an smart contract that doesn't accept native token", func(t *testing.T) { + var testContract types.Address + // deploy contract + RunWithNewEmulator(t, backend, rootAddr, func(env *emulator.Emulator) { + RunWithNewBlockView(t, env, func(blk types.BlockView) { + emptyContractByteCode, err := hex.DecodeString("6080604052348015600e575f80fd5b50603e80601a5f395ff3fe60806040525f80fdfea2646970667358221220093c3754c634ed147652afc2e8c4a2336be5c37cbc733839668aa5a11e713e6e64736f6c634300081a0033") + require.NoError(t, err) + call := types.NewDeployCall( + bridgeAccount, + emptyContractByteCode, + 100_000, + big.NewInt(0), + 1) + res, err := blk.DirectCall(call) + requireSuccessfulExecution(t, err, res) + testContract = *res.DeployedContractAddress + }) + }) + + RunWithNewEmulator(t, backend, rootAddr, func(env *emulator.Emulator) { + RunWithNewBlockView(t, env, func(blk types.BlockView) { + call := types.NewDepositCall(bridgeAccount, testContract, types.MakeBigIntInFlow(1), 0) + res, err := blk.DirectCall(call) + require.NoError(t, err) + require.NoError(t, res.ValidationError) + require.Equal(t, res.VMError, gethVM.ErrExecutionReverted) + }) + }) + }) + + t.Run("tokens withdraw", func(t *testing.T) { + amount := types.OneFlow() + RunWithNewEmulator(t, backend, rootAddr, func(env *emulator.Emulator) { + RunWithNewReadOnlyBlockView(t, env, func(blk types.ReadOnlyBlockView) { + retBalance, err := blk.BalanceOf(testAccount) + require.NoError(t, err) + require.Equal(t, originalBalance, retBalance) + retNonce, err := blk.NonceOf(testAccount) + require.NoError(t, err) + require.Equal(t, testAccountNonce, retNonce) + }) + }) + RunWithNewEmulator(t, backend, rootAddr, func(env *emulator.Emulator) { + RunWithNewBlockView(t, env, func(blk types.BlockView) { + call := types.NewWithdrawCall(bridgeAccount, testAccount, amount, testAccountNonce) + res, err := blk.DirectCall(call) + requireSuccessfulExecution(t, err, res) + require.Equal(t, defaultCtx.DirectCallBaseGasUsage, res.GasConsumed) + require.Equal(t, call.Hash(), res.TxHash) + testAccountNonce += 1 + }) + }) + RunWithNewEmulator(t, backend, rootAddr, func(env *emulator.Emulator) { + RunWithNewReadOnlyBlockView(t, env, func(blk types.ReadOnlyBlockView) { + retBalance, err := blk.BalanceOf(testAccount) + require.NoError(t, err) + require.Equal(t, amount.Sub(originalBalance, amount), retBalance) + // check balance of bridgeAccount to be zero + + retBalance, err = blk.BalanceOf(bridgeAccount) + require.NoError(t, err) + require.Equal(t, big.NewInt(0).Uint64(), retBalance.Uint64()) + + retNonce, err := blk.NonceOf(testAccount) + require.NoError(t, err) + require.Equal(t, testAccountNonce, retNonce) + }) + }) + }) + t.Run("tokens withdraw that results in rounding error", func(t *testing.T) { + RunWithNewEmulator(t, backend, rootAddr, func(env *emulator.Emulator) { + RunWithNewBlockView(t, env, func(blk types.BlockView) { + call := types.NewWithdrawCall(bridgeAccount, testAccount, big.NewInt(1000), testAccountNonce) + res, err := blk.DirectCall(call) + require.NoError(t, err) + require.Equal(t, res.ValidationError, types.ErrWithdrawBalanceRounding) + testAccountNonce += 1 + }) + }) + }) + + t.Run("tokens withdraw not having enough balance", func(t *testing.T) { + RunWithNewEmulator(t, backend, rootAddr, func(env *emulator.Emulator) { + RunWithNewBlockView(t, env, func(blk types.BlockView) { + call := types.NewWithdrawCall(bridgeAccount, testAccount, types.MakeBigIntInFlow(3), testAccountNonce) + res, err := blk.DirectCall(call) + require.NoError(t, err) + require.True(t, + strings.Contains( + res.ValidationError.Error(), + "insufficient funds", + ), + ) + }) + }) + }) + }) + }) +} + +func TestContractInteraction(t *testing.T) { + t.Parallel() + testutils.RunWithTestBackend(t, func(backend *testutils.TestBackend) { + testutils.RunWithTestFlowEVMRootAddress(t, backend, func(rootAddr flow.Address) { + + testContract := testutils.GetStorageTestContract(t) + + testAccount := types.NewAddressFromString("test") + bridgeAccount := types.NewAddressFromString("bridge") + testAccountNonce := uint64(0) + + amount := big.NewInt(0).Mul(big.NewInt(1337), big.NewInt(gethParams.Ether)) + amountToBeTransfered := big.NewInt(0).Mul(big.NewInt(100), big.NewInt(gethParams.Ether)) + + // fund test account + RunWithNewEmulator(t, backend, rootAddr, func(env *emulator.Emulator) { + RunWithNewBlockView(t, env, func(blk types.BlockView) { + _, err := blk.DirectCall(types.NewDepositCall(bridgeAccount, testAccount, amount, 0)) + require.NoError(t, err) + }) + }) + + var contractAddr types.Address + + t.Run("deploy contract", func(t *testing.T) { + RunWithNewEmulator(t, backend, rootAddr, func(env *emulator.Emulator) { + RunWithNewBlockView(t, env, func(blk types.BlockView) { + call := types.NewDeployCall( + testAccount, + testContract.ByteCode, + math.MaxUint64, + amountToBeTransfered, + testAccountNonce) + res, err := blk.DirectCall(call) + requireSuccessfulExecution(t, err, res) + require.NotNil(t, res.DeployedContractAddress) + contractAddr = *res.DeployedContractAddress + require.Equal(t, call.Hash(), res.TxHash) + testAccountNonce += 1 + }) + RunWithNewReadOnlyBlockView(t, env, func(blk types.ReadOnlyBlockView) { + require.NotNil(t, contractAddr) + retCode, err := blk.CodeOf(contractAddr) + require.NoError(t, err) + require.NotEmpty(t, retCode) + + retBalance, err := blk.BalanceOf(contractAddr) + require.NoError(t, err) + require.Equal(t, amountToBeTransfered, retBalance) + + retBalance, err = blk.BalanceOf(testAccount) + require.NoError(t, err) + require.Equal(t, amount.Sub(amount, amountToBeTransfered), retBalance) + + retNonce, err := blk.NonceOf(testAccount) + require.NoError(t, err) + require.Equal(t, testAccountNonce, retNonce) + }) + }) + }) + + t.Run("call contract", func(t *testing.T) { + num := big.NewInt(10) + RunWithNewEmulator(t, backend, rootAddr, func(env *emulator.Emulator) { + RunWithNewBlockView(t, env, func(blk types.BlockView) { + res, err := blk.DirectCall( + types.NewContractCall( + testAccount, + contractAddr, + testContract.MakeCallData(t, "store", num), + 1_000_000, + big.NewInt(0), // this should be zero because the contract doesn't have receiver + testAccountNonce, + ), + ) + requireSuccessfulExecution(t, err, res) + require.GreaterOrEqual(t, res.GasConsumed, uint64(40_000)) + testAccountNonce += 1 + require.Empty(t, res.PrecompiledCalls) + }) + }) + + RunWithNewEmulator(t, backend, rootAddr, func(env *emulator.Emulator) { + RunWithNewBlockView(t, env, func(blk types.BlockView) { + res, err := blk.DirectCall( + types.NewContractCall( + testAccount, + contractAddr, + testContract.MakeCallData(t, "retrieve"), + 1_000_000, + big.NewInt(0), // this should be zero because the contract doesn't have receiver + testAccountNonce, + ), + ) + requireSuccessfulExecution(t, err, res) + testAccountNonce += 1 + + ret := new(big.Int).SetBytes(res.ReturnedData) + require.Equal(t, num, ret) + require.GreaterOrEqual(t, res.GasConsumed, uint64(23_000)) + }) + }) + + RunWithNewEmulator(t, backend, rootAddr, func(env *emulator.Emulator) { + RunWithNewBlockView(t, env, func(blk types.BlockView) { + res, err := blk.DirectCall( + types.NewContractCall( + testAccount, + contractAddr, + testContract.MakeCallData(t, "blockNumber"), + 1_000_000, + big.NewInt(0), // this should be zero because the contract doesn't have receiver + testAccountNonce, + ), + ) + requireSuccessfulExecution(t, err, res) + testAccountNonce += 1 + + ret := new(big.Int).SetBytes(res.ReturnedData) + require.Equal(t, blockNumber, ret) + + }) + }) + + RunWithNewEmulator(t, backend, rootAddr, func(env *emulator.Emulator) { + RunWithNewBlockView(t, env, func(blk types.BlockView) { + res, err := blk.DirectCall( + types.NewContractCall( + testAccount, + contractAddr, + testContract.MakeCallData(t, "assertError"), + 1_000_000, + big.NewInt(0), // this should be zero because the contract doesn't have receiver + testAccountNonce, + ), + ) + require.NoError(t, err) + testAccountNonce += 1 + require.Error(t, res.VMError) + strings.Contains(string(res.ReturnedData), "Assert Error Message") + }) + }) + + RunWithNewEmulator(t, backend, rootAddr, func(env *emulator.Emulator) { + RunWithNewBlockView(t, env, func(blk types.BlockView) { + res, err := blk.DirectCall( + types.NewContractCall( + testAccount, + contractAddr, + testContract.MakeCallData(t, "customError"), + 1_000_000, + big.NewInt(0), // this should be zero because the contract doesn't have receiver + testAccountNonce, + ), + ) + require.NoError(t, err) + require.NoError(t, res.ValidationError) + testAccountNonce += 1 + require.Error(t, res.VMError) + strings.Contains(string(res.ReturnedData), "Value is too low") + }) + }) + + RunWithNewEmulator(t, backend, rootAddr, func(em *emulator.Emulator) { + ctx := types.NewDefaultBlockContext(blockNumber.Uint64()) + blk, err := em.NewBlockView(ctx) + require.NoError(t, err) + res, err := blk.DirectCall( + types.NewContractCall( + testAccount, + contractAddr, + testContract.MakeCallData(t, "chainID"), + 1_000_000, + big.NewInt(0), // this should be zero because the contract doesn't have receiver + testAccountNonce, + ), + ) + requireSuccessfulExecution(t, err, res) + testAccountNonce += 1 + + ret := new(big.Int).SetBytes(res.ReturnedData) + require.Equal(t, types.FlowEVMPreviewNetChainID, ret) + }) + }) + + t.Run("test sending transactions (happy case)", func(t *testing.T) { + account := testutils.GetTestEOAAccount(t, testutils.EOATestAccount1KeyHex) + fAddr := account.Address() + RunWithNewEmulator(t, backend, rootAddr, func(env *emulator.Emulator) { + RunWithNewBlockView(t, env, func(blk types.BlockView) { + _, err := blk.DirectCall(types.NewDepositCall(bridgeAccount, fAddr, amount, account.Nonce())) + require.NoError(t, err) + }) + }) + + RunWithNewEmulator(t, backend, rootAddr, func(env *emulator.Emulator) { + ctx := types.NewDefaultBlockContext(blockNumber.Uint64()) + ctx.GasFeeCollector = types.NewAddressFromString("coinbase") + coinbaseOrgBalance := gethCommon.Big1 + // small amount of money to create account + RunWithNewBlockView(t, env, func(blk types.BlockView) { + _, err := blk.DirectCall(types.NewDepositCall(bridgeAccount, ctx.GasFeeCollector, coinbaseOrgBalance, 0)) + require.NoError(t, err) + }) + + blk, err := env.NewBlockView(ctx) + require.NoError(t, err) + tx := account.PrepareAndSignTx( + t, + testAccount.ToCommon(), // to + nil, // data + big.NewInt(1000), // amount + gethParams.TxGas, // gas limit + gethCommon.Big1, // gas fee + + ) + res, err := blk.RunTransaction(tx) + requireSuccessfulExecution(t, err, res) + require.Greater(t, res.GasConsumed, uint64(0)) + + // check the balance of coinbase + RunWithNewReadOnlyBlockView(t, env, func(blk2 types.ReadOnlyBlockView) { + bal, err := blk2.BalanceOf(ctx.GasFeeCollector) + require.NoError(t, err) + expected := gethParams.TxGas*gethCommon.Big1.Uint64() + gethCommon.Big1.Uint64() + require.Equal(t, expected, bal.Uint64()) + + nonce, err := blk2.NonceOf(fAddr) + require.NoError(t, err) + require.Equal(t, 1, int(nonce)) + }) + }) + }) + + t.Run("test batch running transactions", func(t *testing.T) { + account := testutils.GetTestEOAAccount(t, testutils.EOATestAccount1KeyHex) + account.SetNonce(account.Nonce() + 1) + fAddr := account.Address() + RunWithNewEmulator(t, backend, rootAddr, func(env *emulator.Emulator) { + RunWithNewBlockView(t, env, func(blk types.BlockView) { + _, err := blk.DirectCall(types.NewDepositCall(bridgeAccount, fAddr, amount, account.Nonce())) + require.NoError(t, err) + }) + }) + + RunWithNewEmulator(t, backend, rootAddr, func(env *emulator.Emulator) { + ctx := types.NewDefaultBlockContext(blockNumber.Uint64()) + ctx.GasFeeCollector = types.NewAddressFromString("coinbase-collector") + coinbaseOrgBalance := gethCommon.Big1 + // small amount of money to create account + RunWithNewBlockView(t, env, func(blk types.BlockView) { + _, err := blk.DirectCall(types.NewDepositCall(bridgeAccount, ctx.GasFeeCollector, coinbaseOrgBalance, 0)) + require.NoError(t, err) + }) + + blk, err := env.NewBlockView(ctx) + require.NoError(t, err) + + const batchSize = 3 + txs := make([]*gethTypes.Transaction, batchSize) + for i := range txs { + txs[i] = account.PrepareAndSignTx( + t, + testAccount.ToCommon(), // to + nil, // data + big.NewInt(1000), // amount + gethParams.TxGas, // gas limit + gethCommon.Big1, // gas fee + + ) + } + + results, err := blk.BatchRunTransactions(txs) + require.NoError(t, err) + for _, res := range results { + requireSuccessfulExecution(t, nil, res) + require.Greater(t, res.GasConsumed, uint64(0)) + } + + // check the balance of coinbase + RunWithNewReadOnlyBlockView(t, env, func(blk2 types.ReadOnlyBlockView) { + bal, err := blk2.BalanceOf(ctx.GasFeeCollector) + require.NoError(t, err) + expected := gethParams.TxGas*batchSize + gethCommon.Big1.Uint64() + require.Equal(t, expected, bal.Uint64()) + + nonce, err := blk2.NonceOf(fAddr) + require.NoError(t, err) + require.Equal(t, batchSize+1, int(nonce)) + }) + }) + }) + + t.Run("test running transactions with dynamic fees (happy case)", func(t *testing.T) { + account := testutils.GetTestEOAAccount(t, testutils.EOATestAccount1KeyHex) + fAddr := account.Address() + RunWithNewEmulator(t, backend, rootAddr, func(env *emulator.Emulator) { + RunWithNewBlockView(t, env, func(blk types.BlockView) { + _, err := blk.DirectCall(types.NewDepositCall(bridgeAccount, fAddr, amount, account.Nonce())) + require.NoError(t, err) + }) + }) + account.SetNonce(account.Nonce() + 4) + + RunWithNewEmulator(t, backend, rootAddr, func(env *emulator.Emulator) { + ctx := types.NewDefaultBlockContext(blockNumber.Uint64()) + ctx.GasFeeCollector = types.NewAddressFromString("coinbase") + coinbaseOrgBalance := gethCommon.Big1 + // small amount of money to create account + RunWithNewBlockView(t, env, func(blk types.BlockView) { + _, err := blk.DirectCall(types.NewDepositCall(bridgeAccount, ctx.GasFeeCollector, coinbaseOrgBalance, 1)) + require.NoError(t, err) + }) + + blk, err := env.NewBlockView(ctx) + require.NoError(t, err) + tx := account.SignTx( + t, + gethTypes.NewTx(&gethTypes.DynamicFeeTx{ + ChainID: types.FlowEVMPreviewNetChainID, + Nonce: account.Nonce(), + GasTipCap: big.NewInt(2), + GasFeeCap: big.NewInt(3), + Gas: gethParams.TxGas, + To: &gethCommon.Address{}, + Value: big.NewInt(1), + }), + ) + account.SetNonce(account.Nonce() + 1) + + res, err := blk.RunTransaction(tx) + requireSuccessfulExecution(t, err, res) + require.Greater(t, res.GasConsumed, uint64(0)) + }) + }) + + t.Run("test sending transactions (invalid nonce)", func(t *testing.T) { + account := testutils.GetTestEOAAccount(t, testutils.EOATestAccount1KeyHex) + fAddr := account.Address() + RunWithNewEmulator(t, backend, rootAddr, func(env *emulator.Emulator) { + RunWithNewBlockView(t, env, func(blk types.BlockView) { + _, err := blk.DirectCall(types.NewDepositCall(bridgeAccount, fAddr, amount, account.Nonce())) + require.NoError(t, err) + }) + }) + + RunWithNewEmulator(t, backend, rootAddr, func(env *emulator.Emulator) { + ctx := types.NewDefaultBlockContext(blockNumber.Uint64()) + blk, err := env.NewBlockView(ctx) + require.NoError(t, err) + tx := account.SignTx(t, + gethTypes.NewTransaction( + 100, // nonce + testAccount.ToCommon(), // to + big.NewInt(1000), // amount + gethParams.TxGas, // gas limit + gethCommon.Big1, // gas fee + nil, // data + ), + ) + res, err := blk.RunTransaction(tx) + require.NoError(t, err) + require.Error(t, res.ValidationError) + }) + }) + + t.Run("test sending transactions (bad signature)", func(t *testing.T) { + RunWithNewEmulator(t, backend, rootAddr, func(env *emulator.Emulator) { + ctx := types.NewDefaultBlockContext(blockNumber.Uint64()) + blk, err := env.NewBlockView(ctx) + require.NoError(t, err) + tx := gethTypes.NewTx(&gethTypes.LegacyTx{ + Nonce: 0, + GasPrice: gethCommon.Big1, + Gas: gethParams.TxGas, // gas limit + To: nil, // to + Value: big.NewInt(1000), // amount + Data: nil, // data + V: big.NewInt(1), + R: big.NewInt(2), + S: big.NewInt(3), + }) + res, err := blk.RunTransaction(tx) + require.NoError(t, err) + require.Error(t, res.ValidationError) + }) + }) + }) + }) +} + +func TestDeployAtFunctionality(t *testing.T) { + testutils.RunWithTestBackend(t, func(backend *testutils.TestBackend) { + testutils.RunWithTestFlowEVMRootAddress(t, backend, func(rootAddr flow.Address) { + testContract := testutils.GetStorageTestContract(t) + testAccount := types.NewAddressFromString("test") + bridgeAccount := types.NewAddressFromString("bridge") + + amount := big.NewInt(0).Mul(big.NewInt(1337), big.NewInt(gethParams.Ether)) + amountToBeTransfered := big.NewInt(0).Mul(big.NewInt(100), big.NewInt(gethParams.Ether)) + + // fund test account + RunWithNewEmulator(t, backend, rootAddr, func(env *emulator.Emulator) { + RunWithNewBlockView(t, env, func(blk types.BlockView) { + _, err := blk.DirectCall(types.NewDepositCall(bridgeAccount, testAccount, amount, 0)) + require.NoError(t, err) + }) + }) + + t.Run("deploy contract at target address", func(t *testing.T) { + RunWithNewEmulator(t, backend, rootAddr, func(env *emulator.Emulator) { + target := types.Address{1, 2, 3} + RunWithNewBlockView(t, env, func(blk types.BlockView) { + res, err := blk.DirectCall( + types.NewDeployCallWithTargetAddress( + testAccount, + target, + testContract.ByteCode, + math.MaxUint64, + amountToBeTransfered, + 0, + ), + ) + require.NoError(t, err) + require.NotNil(t, res.DeployedContractAddress) + require.Equal(t, target, *res.DeployedContractAddress) + }) + RunWithNewReadOnlyBlockView(t, env, func(blk types.ReadOnlyBlockView) { + require.NotNil(t, target) + retCode, err := blk.CodeOf(target) + require.NoError(t, err) + require.NotEmpty(t, retCode) + + retBalance, err := blk.BalanceOf(target) + require.NoError(t, err) + require.Equal(t, amountToBeTransfered, retBalance) + + retBalance, err = blk.BalanceOf(testAccount) + require.NoError(t, err) + require.Equal(t, amount.Sub(amount, amountToBeTransfered), retBalance) + }) + // test deployment to an address that is already exist + RunWithNewBlockView(t, env, func(blk types.BlockView) { + res, err := blk.DirectCall( + types.NewDeployCallWithTargetAddress( + testAccount, + target, + testContract.ByteCode, + math.MaxUint64, + amountToBeTransfered, + 0), + ) + require.NoError(t, err) + require.Equal(t, gethVM.ErrContractAddressCollision, res.VMError) + }) + // test deployment with not enough gas + RunWithNewBlockView(t, env, func(blk types.BlockView) { + res, err := blk.DirectCall( + types.NewDeployCallWithTargetAddress( + testAccount, + types.Address{3, 4, 5}, + testContract.ByteCode, + 100, + new(big.Int), + 0), + ) + require.NoError(t, err) + require.Equal(t, fmt.Errorf("out of gas"), res.VMError) + }) + }) + }) + }) + }) +} + +// Self destruct test deploys a contract with a selfdestruct function +// this function is called and we make sure the balance the contract had +// is returned to the address provided, and the contract data stays according to the +// EIP 6780 https://eips.ethereum.org/EIPS/eip-6780 in case where the selfdestruct +// is not called in the same transaction as deployment. +func TestSelfdestruct(t *testing.T) { + testutils.RunWithTestBackend(t, func(backend *testutils.TestBackend) { + testutils.RunWithTestFlowEVMRootAddress(t, backend, func(rootAddr flow.Address) { + testutils.RunWithEOATestAccount(t, backend, rootAddr, func(testAccount *testutils.EOATestAccount) { + + testContract := testutils.GetStorageTestContract(t) + testAddress := types.NewAddressFromString("testaddr") + bridgeAccount := types.NewAddressFromString("bridge") + + startBalance := big.NewInt(0).Mul(big.NewInt(1000), big.NewInt(gethParams.Ether)) + deployBalance := big.NewInt(0).Mul(big.NewInt(10), big.NewInt(gethParams.Ether)) + var contractAddr types.Address + + // setup the test with funded account and deploying a selfdestruct contract. + RunWithNewEmulator(t, backend, rootAddr, func(env *emulator.Emulator) { + RunWithNewBlockView(t, env, func(blk types.BlockView) { + res, err := blk.DirectCall(types.NewDepositCall(bridgeAccount, testAddress, startBalance, 0)) + require.NoError(t, err) + requireSuccessfulExecution(t, err, res) + }) + + RunWithNewBlockView(t, env, func(blk types.BlockView) { + res, err := blk.DirectCall( + types.NewDeployCall( + testAddress, + testContract.ByteCode, + math.MaxUint64, + deployBalance, + 0), + ) + requireSuccessfulExecution(t, err, res) + require.NotNil(t, res.DeployedContractAddress) + contractAddr = *res.DeployedContractAddress + }) + + RunWithNewReadOnlyBlockView(t, env, func(blk types.ReadOnlyBlockView) { + bal, err := blk.BalanceOf(testAddress) + require.NoError(t, err) + require.Equal(t, big.NewInt(0).Sub(startBalance, deployBalance), bal) + + bal, err = blk.BalanceOf(contractAddr) + require.NoError(t, err) + require.Equal(t, deployBalance, bal) + }) + + // call the destroy method which executes selfdestruct call. + RunWithNewBlockView(t, env, func(blk types.BlockView) { + res, err := blk.DirectCall(&types.DirectCall{ + Type: types.DirectCallTxType, + From: testAddress, + To: contractAddr, + Data: testContract.MakeCallData(t, "destroy"), + Value: big.NewInt(0), + GasLimit: 100_000, + }) + requireSuccessfulExecution(t, err, res) + }) + + // after calling selfdestruct the balance should be returned to the caller and + // equal initial funded balance of the caller. + RunWithNewReadOnlyBlockView(t, env, func(blk types.ReadOnlyBlockView) { + bal, err := blk.BalanceOf(testAddress) + require.NoError(t, err) + require.Equal(t, startBalance, bal) + + bal, err = blk.BalanceOf(contractAddr) + require.NoError(t, err) + require.Equal(t, big.NewInt(0).Uint64(), bal.Uint64()) + + nonce, err := blk.NonceOf(contractAddr) + require.NoError(t, err) + require.Equal(t, uint64(1), nonce) + + code, err := blk.CodeOf(contractAddr) + require.NoError(t, err) + require.True(t, len(code) > 0) + }) + }) + }) + }) + }) +} + +// test factory patterns +func TestFactoryPatterns(t *testing.T) { + testutils.RunWithTestBackend(t, func(backend *testutils.TestBackend) { + testutils.RunWithTestFlowEVMRootAddress(t, backend, func(rootAddr flow.Address) { + + var factoryAddress types.Address + factoryContract := testutils.GetFactoryTestContract(t) + factoryDeployer := types.NewAddressFromString("factoryDeployer") + factoryDeployerBalance := big.NewInt(0).Mul(big.NewInt(1000), big.NewInt(gethParams.Ether)) + factoryBalance := big.NewInt(0).Mul(big.NewInt(100), big.NewInt(gethParams.Ether)) + + // setup the test with funded account and deploying a factory contract. + RunWithNewEmulator(t, backend, rootAddr, func(env *emulator.Emulator) { + t.Run("test deploying factory", func(t *testing.T) { + RunWithNewBlockView(t, env, func(blk types.BlockView) { + res, err := blk.DirectCall(types.NewDepositCall(types.EmptyAddress, factoryDeployer, factoryDeployerBalance, 0)) + require.NoError(t, err) + requireSuccessfulExecution(t, err, res) + }) + RunWithNewBlockView(t, env, func(blk types.BlockView) { + res, err := blk.DirectCall( + types.NewDeployCall( + factoryDeployer, + factoryContract.ByteCode, + math.MaxUint64, + factoryBalance, + 0), + ) + requireSuccessfulExecution(t, err, res) + require.NotNil(t, res.DeployedContractAddress) + factoryAddress = *res.DeployedContractAddress + }) + }) + + t.Run("test self-destruct to a contract that is already deployed", + func(t *testing.T) { + // first test call deploy and try self destruct later + var deployed types.Address + RunWithNewBlockView(t, env, func(blk types.BlockView) { + salt := [32]byte{1} + res, err := blk.DirectCall( + types.NewContractCall( + factoryDeployer, + factoryAddress, + factoryContract.MakeCallData(t, "deploy", salt), + 250_000, + big.NewInt(0), + 0, + ), + ) + requireSuccessfulExecution(t, err, res) + + // decode address, data is left padded + deployed = types.Address(gethCommon.BytesToAddress(res.ReturnedData[12:])) + }) + + // deposit money into the contract + depositedBalance := big.NewInt(200) + RunWithNewBlockView(t, env, func(blk types.BlockView) { + res, err := blk.DirectCall(types.NewDepositCall( + types.EmptyAddress, + deployed, + depositedBalance, 1)) + require.NoError(t, err) + requireSuccessfulExecution(t, err, res) + }) + // check balance of contract + RunWithNewReadOnlyBlockView(t, env, func(blk types.ReadOnlyBlockView) { + bal, err := blk.BalanceOf(deployed) + require.NoError(t, err) + require.Equal(t, depositedBalance, bal) + }) + + // set storage on deployed contract + storedValue := big.NewInt(12) + RunWithNewBlockView(t, env, func(blk types.BlockView) { + res, err := blk.DirectCall( + types.NewContractCall( + factoryDeployer, + types.Address(deployed), + testutils.MakeCallData(t, + contracts.FactoryDeployableContractABIJSON, + "set", + storedValue), + 120_000, + big.NewInt(0), + 0, + ), + ) + requireSuccessfulExecution(t, err, res) + }) + + // call self-destruct on the deployed + refundAddress := testutils.RandomAddress(t) + RunWithNewBlockView(t, env, func(blk types.BlockView) { + res, err := blk.DirectCall( + types.NewContractCall( + factoryDeployer, + types.Address(deployed), + testutils.MakeCallData(t, + contracts.FactoryDeployableContractABIJSON, + "destroy", + refundAddress), + 120_000, + big.NewInt(0), + 0, + ), + ) + requireSuccessfulExecution(t, err, res) + }) + + // check balance of the refund address and the contract + // balance should be transferred to the refund address + RunWithNewReadOnlyBlockView(t, env, func(blk types.ReadOnlyBlockView) { + bal, err := blk.BalanceOf(refundAddress) + require.NoError(t, err) + require.Equal(t, depositedBalance, bal) + + bal, err = blk.BalanceOf(deployed) + require.NoError(t, err) + require.True(t, types.BalancesAreEqual(big.NewInt(0), bal)) + }) + + // data should still be there + RunWithNewBlockView(t, env, func(blk types.BlockView) { + res, err := blk.DirectCall( + types.NewContractCall( + factoryDeployer, + types.Address(deployed), + testutils.MakeCallData(t, + contracts.FactoryDeployableContractABIJSON, + "get"), + 120_000, + big.NewInt(0), + 0, + ), + ) + requireSuccessfulExecution(t, err, res) + require.Equal(t, storedValue, new(big.Int).SetBytes(res.ReturnedData)) + }) + }) + + t.Run("test deploy and destroy in a single call", + func(t *testing.T) { + var originalFactoryBalance types.Balance + var err error + RunWithNewReadOnlyBlockView(t, env, func(blk types.ReadOnlyBlockView) { + originalFactoryBalance, err = blk.BalanceOf(factoryAddress) + require.NoError(t, err) + }) + + storedValue := big.NewInt(100) + RunWithNewBlockView(t, env, func(blk types.BlockView) { + salt := [32]byte{2} + res, err := blk.DirectCall( + types.NewContractCall( + factoryDeployer, + factoryAddress, + factoryContract.MakeCallData(t, + "deployAndDestroy", + salt, + storedValue), + 400_000, + big.NewInt(0), + 0, + ), + ) + requireSuccessfulExecution(t, err, res) + }) + + // no balance change on the caller + RunWithNewReadOnlyBlockView(t, env, func(blk types.ReadOnlyBlockView) { + ret, err := blk.BalanceOf(factoryAddress) + require.NoError(t, err) + require.True(t, types.BalancesAreEqual(originalFactoryBalance, ret)) + }) + }) + t.Run("test deposit first to an address and then deploy in a single call", + func(t *testing.T) { + storedValue := big.NewInt(120) + balance := big.NewInt(80) + var deployed types.Address + RunWithNewBlockView(t, env, func(blk types.BlockView) { + salt := [32]byte{3} + res, err := blk.DirectCall( + types.NewContractCall( + factoryDeployer, + factoryAddress, + factoryContract.MakeCallData(t, "depositAndDeploy", salt, balance, storedValue), + 250_000, + big.NewInt(0), + 1, + ), + ) + requireSuccessfulExecution(t, err, res) + // decode address, data is left padded + deployed = types.Address(gethCommon.BytesToAddress(res.ReturnedData[12:])) + }) + + // no balance change on the caller + RunWithNewReadOnlyBlockView(t, env, func(blk types.ReadOnlyBlockView) { + ret, err := blk.BalanceOf(deployed) + require.NoError(t, err) + require.True(t, types.BalancesAreEqual(balance, ret)) + }) + + // check stored data + RunWithNewBlockView(t, env, func(blk types.BlockView) { + res, err := blk.DirectCall( + types.NewContractCall( + factoryDeployer, + types.Address(deployed), + testutils.MakeCallData(t, + contracts.FactoryDeployableContractABIJSON, + "get"), + 120_000, + big.NewInt(0), + 0, + ), + ) + requireSuccessfulExecution(t, err, res) + require.Equal(t, storedValue, new(big.Int).SetBytes(res.ReturnedData)) + }) + }) + + t.Run("test deposit, deploy, destroy in a single call", + func(t *testing.T) { + var originalFactoryBalance types.Balance + var err error + RunWithNewReadOnlyBlockView(t, env, func(blk types.ReadOnlyBlockView) { + originalFactoryBalance, err = blk.BalanceOf(factoryAddress) + require.NoError(t, err) + }) + + RunWithNewBlockView(t, env, func(blk types.BlockView) { + salt := [32]byte{4} + res, err := blk.DirectCall( + types.NewContractCall( + factoryDeployer, + factoryAddress, + factoryContract.MakeCallData(t, "depositDeployAndDestroy", salt, big.NewInt(100), big.NewInt(10)), + 250_000, + big.NewInt(0), + 1, + ), + ) + requireSuccessfulExecution(t, err, res) + }) + // no balance change on the caller + RunWithNewReadOnlyBlockView(t, env, func(blk types.ReadOnlyBlockView) { + ret, err := blk.BalanceOf(factoryAddress) + require.NoError(t, err) + require.True(t, types.BalancesAreEqual(originalFactoryBalance, ret)) + }) + }) + }) + }) + }) +} + +func TestTransfers(t *testing.T) { + testutils.RunWithTestBackend(t, func(backend *testutils.TestBackend) { + testutils.RunWithTestFlowEVMRootAddress(t, backend, func(rootAddr flow.Address) { + + testAccount1 := types.NewAddressFromString("test1") + testAccount2 := types.NewAddressFromString("test2") + bridgeAccount := types.NewAddressFromString("bridge") + + amount := big.NewInt(0).Mul(big.NewInt(1337), big.NewInt(gethParams.Ether)) + amountToBeTransfered := big.NewInt(0).Mul(big.NewInt(100), big.NewInt(gethParams.Ether)) + + RunWithNewEmulator(t, backend, rootAddr, func(em *emulator.Emulator) { + RunWithNewBlockView(t, em, func(blk types.BlockView) { + _, err := blk.DirectCall(types.NewDepositCall(bridgeAccount, testAccount1, amount, 0)) + require.NoError(t, err) + }) + }) + + RunWithNewEmulator(t, backend, rootAddr, func(em *emulator.Emulator) { + RunWithNewBlockView(t, em, func(blk types.BlockView) { + _, err := blk.DirectCall(types.NewTransferCall(testAccount1, testAccount2, amountToBeTransfered, 0)) + require.NoError(t, err) + }) + }) + + RunWithNewEmulator(t, backend, rootAddr, func(em *emulator.Emulator) { + RunWithNewReadOnlyBlockView(t, em, func(blk types.ReadOnlyBlockView) { + bal, err := blk.BalanceOf(testAccount2) + require.NoError(t, err) + require.Equal(t, amountToBeTransfered.Uint64(), bal.Uint64()) + + bal, err = blk.BalanceOf(testAccount1) + require.NoError(t, err) + require.Equal(t, new(big.Int).Sub(amount, amountToBeTransfered).Uint64(), bal.Uint64()) + }) + }) + }) + }) +} + +func TestStorageNoSideEffect(t *testing.T) { + testutils.RunWithTestBackend(t, func(backend *testutils.TestBackend) { + testutils.RunWithTestFlowEVMRootAddress(t, backend, func(flowEVMRoot flow.Address) { + var err error + em := emulator.NewEmulator(backend, flowEVMRoot) + testAccount := types.NewAddressFromString("test") + bridgeAccount := types.NewAddressFromString("bridge") + + amount := big.NewInt(10) + RunWithNewBlockView(t, em, func(blk types.BlockView) { + _, err = blk.DirectCall(types.NewDepositCall(bridgeAccount, testAccount, amount, 0)) + require.NoError(t, err) + }) + + orgSize := backend.TotalStorageSize() + RunWithNewBlockView(t, em, func(blk types.BlockView) { + _, err = blk.DirectCall(types.NewDepositCall(bridgeAccount, testAccount, amount, 0)) + require.NoError(t, err) + }) + require.Equal(t, orgSize, backend.TotalStorageSize()) + }) + }) +} + +func TestCallingExtraPrecompiles(t *testing.T) { + testutils.RunWithTestBackend(t, func(backend *testutils.TestBackend) { + testutils.RunWithTestFlowEVMRootAddress(t, backend, func(flowEVMRoot flow.Address) { + RunWithNewEmulator(t, backend, flowEVMRoot, func(em *emulator.Emulator) { + + testAccount := types.NewAddressFromString("test") + bridgeAccount := types.NewAddressFromString("bridge") + amount := big.NewInt(10_000_000) + RunWithNewBlockView(t, em, func(blk types.BlockView) { + _, err := blk.DirectCall(types.NewDepositCall(bridgeAccount, testAccount, amount, 0)) + require.NoError(t, err) + }) + + input := []byte{1, 2} + output := []byte{3, 4} + addr := testutils.RandomAddress(t) + capturedCall := &types.PrecompiledCalls{ + Address: addr, + RequiredGasCalls: []uint64{10}, + RunCalls: []types.RunCall{{ + Output: output, + ErrorMsg: "", + }}, + } + pc := &MockedPrecompiled{ + AddressFunc: func() types.Address { + return addr + }, + RequiredGasFunc: func(inp []byte) uint64 { + require.Equal(t, input, inp) + return uint64(10) + }, + RunFunc: func(inp []byte) ([]byte, error) { + require.Equal(t, input, inp) + return output, nil + }, + } + + ctx := types.NewDefaultBlockContext(blockNumber.Uint64()) + ctx.ExtraPrecompiledContracts = []types.PrecompiledContract{pc} + + blk, err := em.NewBlockView(ctx) + require.NoError(t, err) + + res, err := blk.DirectCall( + types.NewContractCall( + testAccount, + types.NewAddress(addr.ToCommon()), + input, + 1_000_000, + big.NewInt(0), // this should be zero because the contract doesn't have receiver + 0, + ), + ) + require.NoError(t, err) + require.Equal(t, output, res.ReturnedData) + require.NotEmpty(t, res.PrecompiledCalls) + + apc, err := types.AggregatedPrecompileCallsFromEncoded(res.PrecompiledCalls) + require.NoError(t, err) + require.Len(t, apc, 1) + require.Equal(t, *capturedCall, apc[0]) + }) + }) + }) +} + +func TestTxIndex(t *testing.T) { + testutils.RunWithTestBackend(t, func(backend *testutils.TestBackend) { + testutils.RunWithTestFlowEVMRootAddress(t, backend, func(rootAddr flow.Address) { + RunWithNewEmulator(t, backend, rootAddr, func(em *emulator.Emulator) { + ctx := types.NewDefaultBlockContext(blockNumber.Uint64()) + expectedTxIndex := uint16(1) + ctx.TxCountSoFar = 1 + testAccount1 := testutils.RandomAddress(t) + testAccount2 := testutils.RandomAddress(t) + + blk, err := em.NewBlockView(ctx) + require.NoError(t, err) + + res, err := blk.DirectCall( + types.NewContractCall( + testAccount1, + testAccount2, + nil, + 1_000_000, + big.NewInt(0), + 0, + ), + ) + + require.NoError(t, err) + require.Equal(t, expectedTxIndex, res.Index) + expectedTxIndex += 1 + ctx.TxCountSoFar = 2 + + // create a test eoa account + account := testutils.GetTestEOAAccount(t, testutils.EOATestAccount1KeyHex) + fAddr := account.Address() + + blk, err = em.NewBlockView(ctx) + require.NoError(t, err) + res, err = blk.DirectCall( + types.NewDepositCall( + types.EmptyAddress, + fAddr, + types.OneFlow(), + account.Nonce(), + )) + requireSuccessfulExecution(t, err, res) + require.Equal(t, expectedTxIndex, res.Index) + expectedTxIndex += 1 + ctx.TxCountSoFar = 3 + + blk, err = em.NewBlockView(ctx) + require.NoError(t, err) + + tx := account.PrepareAndSignTx( + t, + testAccount1.ToCommon(), // to + nil, // data + big.NewInt(0), // amount + gethParams.TxGas, // gas limit + big.NewInt(0), + ) + + res, err = blk.RunTransaction(tx) + requireSuccessfulExecution(t, err, res) + require.Equal(t, expectedTxIndex, res.Index) + expectedTxIndex += 1 + ctx.TxCountSoFar = 4 + + blk, err = em.NewBlockView(ctx) + require.NoError(t, err) + + const batchSize = 3 + txs := make([]*gethTypes.Transaction, batchSize) + for i := range txs { + txs[i] = account.PrepareAndSignTx( + t, + testAccount1.ToCommon(), // to + nil, // data + big.NewInt(0), // amount + gethParams.TxGas, // gas limit + big.NewInt(0), + ) + } + results, err := blk.BatchRunTransactions(txs) + require.NoError(t, err) + for i, res := range results { + requireSuccessfulExecution(t, err, res) + require.Equal(t, expectedTxIndex+uint16(i), res.Index) + } + }) + }) + }) +} + +type MockedPrecompiled struct { + AddressFunc func() types.Address + RequiredGasFunc func(input []byte) uint64 + RunFunc func(input []byte) ([]byte, error) +} + +var _ types.PrecompiledContract = &MockedPrecompiled{} + +func (mp *MockedPrecompiled) Address() types.Address { + if mp.AddressFunc == nil { + panic("Address not set for the mocked precompiled contract") + } + return mp.AddressFunc() +} + +func (mp *MockedPrecompiled) RequiredGas(input []byte) uint64 { + if mp.RequiredGasFunc == nil { + panic("RequiredGas not set for the mocked precompiled contract") + } + return mp.RequiredGasFunc(input) +} + +func (mp *MockedPrecompiled) Run(input []byte) ([]byte, error) { + if mp.RunFunc == nil { + panic("Run not set for the mocked precompiled contract") + } + return mp.RunFunc(input) +} + +func (mp *MockedPrecompiled) Name() string { + return precompiles.CADENCE_ARCH_PRECOMPILE_NAME +} diff --git a/fvm/evm/emulator/signer.go b/fvm/evm/emulator/signer.go new file mode 100644 index 00000000000..44b2964f843 --- /dev/null +++ b/fvm/evm/emulator/signer.go @@ -0,0 +1,28 @@ +package emulator + +import ( + "math/big" + + "github.com/ethereum/go-ethereum/core/types" +) + +var defaultBlockNumberForEVMRules = big.NewInt(1) // anything bigger than 0 + +// GetDefaultSigner returns a signer which is compatible with the default config +func GetDefaultSigner() types.Signer { + cfg := NewConfig(WithBlockNumber(defaultBlockNumberForEVMRules)) + return GetSigner(cfg) +} + +// GetSigner returns a evm signer object that is compatible with the given config +// +// Despite its misleading name, signer encapsulates transaction signature validation functionality and +// does not provide actual signing functionality. +// we kept the same name to be consistent with EVM naming. +func GetSigner(cfg *Config) types.Signer { + return types.MakeSigner( + cfg.ChainConfig, + cfg.BlockContext.BlockNumber, + cfg.BlockContext.Time, + ) +} diff --git a/fvm/evm/emulator/state/account.go b/fvm/evm/emulator/state/account.go new file mode 100644 index 00000000000..47640df1c98 --- /dev/null +++ b/fvm/evm/emulator/state/account.go @@ -0,0 +1,67 @@ +package state + +import ( + gethCommon "github.com/ethereum/go-ethereum/common" + gethTypes "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/rlp" + "github.com/holiman/uint256" +) + +// Account holds the metadata of an address and provides (de)serialization functionality +// +// Note that code and storage slots of an address is not part of this data structure +type Account struct { + // address + Address gethCommon.Address + // balance of the address + Balance *uint256.Int + // nonce of the address + Nonce uint64 + // hash of the code + // if no code the gethTypes.EmptyCodeHash is stored + CodeHash gethCommon.Hash + // the id of the collection holds storage slots for this account + // this value is nil for EOA accounts + CollectionID []byte +} + +// NewAccount constructs a new account +func NewAccount( + address gethCommon.Address, + balance *uint256.Int, + nonce uint64, + codeHash gethCommon.Hash, + collectionID []byte, +) *Account { + return &Account{ + Address: address, + Balance: balance, + Nonce: nonce, + CodeHash: codeHash, + CollectionID: collectionID, + } +} + +// HasCode returns true if account has code +func (a *Account) HasCode() bool { + return a.CodeHash != gethTypes.EmptyCodeHash +} + +// HasStoredValues returns true if account has stored values +func (a *Account) HasStoredValues() bool { + return len(a.CollectionID) != 0 +} + +// Encode encodes the account +func (a *Account) Encode() ([]byte, error) { + return rlp.EncodeToBytes(a) +} + +// DecodeAccount constructs a new account from the encoded data +func DecodeAccount(inp []byte) (*Account, error) { + if len(inp) == 0 { + return nil, nil + } + a := &Account{} + return a, rlp.DecodeBytes(inp, a) +} diff --git a/fvm/evm/emulator/state/account_test.go b/fvm/evm/emulator/state/account_test.go new file mode 100644 index 00000000000..4d8d31ff08d --- /dev/null +++ b/fvm/evm/emulator/state/account_test.go @@ -0,0 +1,28 @@ +package state_test + +import ( + "testing" + + "github.com/ethereum/go-ethereum/common" + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/fvm/evm/emulator/state" + "github.com/onflow/flow-go/fvm/evm/testutils" +) + +func TestAccountEncoding(t *testing.T) { + acc := state.NewAccount( + testutils.RandomCommonAddress(t), + testutils.RandomUint256Int(1000), + uint64(2), + common.BytesToHash([]byte{1}), + []byte{2}, + ) + + encoded, err := acc.Encode() + require.NoError(t, err) + + ret, err := state.DecodeAccount(encoded) + require.NoError(t, err) + require.Equal(t, acc, ret) +} diff --git a/fvm/evm/emulator/state/base.go b/fvm/evm/emulator/state/base.go new file mode 100644 index 00000000000..a0e85fee22e --- /dev/null +++ b/fvm/evm/emulator/state/base.go @@ -0,0 +1,832 @@ +package state + +import ( + "fmt" + + "github.com/ethereum/go-ethereum/common" + gethCommon "github.com/ethereum/go-ethereum/common" + gethTypes "github.com/ethereum/go-ethereum/core/types" + gethCrypto "github.com/ethereum/go-ethereum/crypto" + "github.com/holiman/uint256" + "github.com/onflow/atree" + + "github.com/onflow/flow-go/fvm/evm/types" + "github.com/onflow/flow-go/model/flow" +) + +const ( + // AccountsStorageIDKey is the path where we store the collection ID for accounts + AccountsStorageIDKey = "AccountsStorageIDKey" + // CodesStorageIDKey is the path where we store the collection ID for codes + CodesStorageIDKey = "CodesStorageIDKey" +) + +var EmptyHash = gethCommon.Hash{} + +// BaseView implements a types.BaseView +// it acts as the base layer of state queries for the stateDB +// it stores accounts, codes and storage slots. +// +// under the hood it uses a set of collections, +// one for account's meta data, one for codes +// and one for each of account storage space. +type BaseView struct { + rootAddress flow.Address + ledger atree.Ledger + collectionProvider *CollectionProvider + + // collections + accounts *Collection + codes *Collection + slots map[gethCommon.Address]*Collection + + // cached values + cachedAccounts map[gethCommon.Address]*Account + cachedCodes map[gethCommon.Address][]byte + cachedSlots map[types.SlotAddress]gethCommon.Hash + + // flags + accountSetupOnCommit bool + codeSetupOnCommit bool +} + +var _ types.BaseView = &BaseView{} + +// NewBaseView constructs a new base view +func NewBaseView(ledger atree.Ledger, rootAddress flow.Address) (*BaseView, error) { + cp, err := NewCollectionProvider(atree.Address(rootAddress), ledger) + if err != nil { + return nil, err + } + + view := &BaseView{ + ledger: ledger, + rootAddress: rootAddress, + collectionProvider: cp, + + slots: make(map[gethCommon.Address]*Collection), + + cachedAccounts: make(map[gethCommon.Address]*Account), + cachedCodes: make(map[gethCommon.Address][]byte), + cachedSlots: make(map[types.SlotAddress]gethCommon.Hash), + } + + // fetch the account collection, if not exist, create one + view.accounts, view.accountSetupOnCommit, err = view.fetchOrCreateCollection(AccountsStorageIDKey) + if err != nil { + return nil, fmt.Errorf("failed to fetch or create account collection with key %v: %w", AccountsStorageIDKey, err) + } + + // fetch the code collection, if not exist, create one + view.codes, view.codeSetupOnCommit, err = view.fetchOrCreateCollection(CodesStorageIDKey) + if err != nil { + return nil, fmt.Errorf("failed to fetch or create code collection with key %v: %w", CodesStorageIDKey, err) + } + + return view, nil +} + +// Exist returns true if the address exist in the state +func (v *BaseView) Exist(addr gethCommon.Address) (bool, error) { + acc, err := v.getAccount(addr) + return acc != nil, err +} + +// IsCreated returns true if the address has been created in the context of this transaction +func (v *BaseView) IsCreated(gethCommon.Address) bool { + return false +} + +// IsNewContract returns true if the address is a new contract +func (v *BaseView) IsNewContract(gethCommon.Address) bool { + return false +} + +// HasSelfDestructed returns true if an address is flagged for destruction at the end of transaction +func (v *BaseView) HasSelfDestructed(gethCommon.Address) (bool, *uint256.Int) { + return false, new(uint256.Int) +} + +// GetBalance returns the balance of an address +// +// for non-existent accounts it returns a balance of zero +func (v *BaseView) GetBalance(addr gethCommon.Address) (*uint256.Int, error) { + acc, err := v.getAccount(addr) + bal := uint256.NewInt(0) + if acc != nil { + bal = acc.Balance + } + return bal, err +} + +// GetNonce returns the nonce of an address +// +// for non-existent accounts it returns zero +func (v *BaseView) GetNonce(addr gethCommon.Address) (uint64, error) { + acc, err := v.getAccount(addr) + nonce := uint64(0) + if acc != nil { + nonce = acc.Nonce + } + return nonce, err +} + +// GetCode returns the code of an address +// +// for non-existent accounts or accounts without a code (e.g. EOAs) it returns nil +func (v *BaseView) GetCode(addr gethCommon.Address) ([]byte, error) { + return v.getCode(addr) +} + +// GetCodeHash returns the code hash of an address +// +// for non-existent accounts it returns gethCommon.Hash{} +// and for accounts without a code (e.g. EOAs) it returns default empty +// hash value (gethTypes.EmptyCodeHash) +func (v *BaseView) GetCodeHash(addr gethCommon.Address) (gethCommon.Hash, error) { + acc, err := v.getAccount(addr) + codeHash := gethCommon.Hash{} + if acc != nil { + codeHash = acc.CodeHash + } + return codeHash, err +} + +// GetCodeSize returns the code size of an address +// +// for non-existent accounts or accounts without a code (e.g. EOAs) it returns zero +func (v *BaseView) GetCodeSize(addr gethCommon.Address) (int, error) { + code, err := v.GetCode(addr) + return len(code), err +} + +// GetState returns values for a slot in the main storage +// +// for non-existent slots it returns the default empty hash value (gethTypes.EmptyCodeHash) +func (v *BaseView) GetState(sk types.SlotAddress) (gethCommon.Hash, error) { + return v.getSlot(sk) +} + +// GetStorageRoot returns some sort of storage root for the given address +// WARNING! the root that is returned is not a commitment to the state +// Mostly is returned to satisfy the requirements of the EVM, +// where the returned value is compared against empty hash and empty root hash +// to determine smart contracts that already has data. +// +// Since BaseView doesn't construct a Merkel tree +// for each account hash of root slab as some sort of root hash. +// if account doesn't exist we return empty hash +// if account exist but not a smart contract we return EmptyRootHash +// if is a contract we return the hash of the root slab content (some sort of commitment). +func (v *BaseView) GetStorageRoot(addr common.Address) (common.Hash, error) { + account, err := v.getAccount(addr) + if err != nil { + return gethCommon.Hash{}, err + } + // account does not exist + if account == nil { + return gethCommon.Hash{}, nil + } + + // account is EOA + if len(account.CollectionID) == 0 { + return gethTypes.EmptyRootHash, nil + } + + // otherwise is smart contract account + // return the hash of collection ID + // This is not a proper root as it doesn't have + // any commitment to the content. + return gethCrypto.Keccak256Hash(account.CollectionID), nil +} + +// UpdateSlot updates the value for a slot +func (v *BaseView) UpdateSlot(sk types.SlotAddress, value gethCommon.Hash) error { + return v.storeSlot(sk, value) +} + +// GetRefund returns the total amount of (gas) refund +// +// this method returns the value of zero +func (v *BaseView) GetRefund() uint64 { + return 0 +} + +// GetTransientState returns values for an slot transient storage +// +// transient storage is not a functionality for the base view so it always +// returns the default value for non-existent slots +func (v *BaseView) GetTransientState(types.SlotAddress) gethCommon.Hash { + return gethCommon.Hash{} +} + +// AddressInAccessList checks if an address is in the access list +// +// access list control is not a functionality of the base view +// it always returns false +func (v *BaseView) AddressInAccessList(gethCommon.Address) bool { + return false +} + +// SlotInAccessList checks if a slot is in the access list +// +// access list control is not a functionality of the base view +// it always returns false +func (v *BaseView) SlotInAccessList(types.SlotAddress) (addressOk bool, slotOk bool) { + return false, false +} + +// CreateAccount creates a new account +func (v *BaseView) CreateAccount( + addr gethCommon.Address, + balance *uint256.Int, + nonce uint64, + code []byte, + codeHash gethCommon.Hash, +) error { + var colID []byte + // if is an smart contract account + if len(code) > 0 { + err := v.updateAccountCode(addr, code, codeHash) + if err != nil { + return err + } + } + + // create a new account and store it + acc := NewAccount(addr, balance, nonce, codeHash, colID) + + // no need to update the cache , storeAccount would update the cache + return v.storeAccount(acc) +} + +// UpdateAccount updates an account's meta data +func (v *BaseView) UpdateAccount( + addr gethCommon.Address, + balance *uint256.Int, + nonce uint64, + code []byte, + codeHash gethCommon.Hash, +) error { + acc, err := v.getAccount(addr) + if err != nil { + return err + } + // if update is called on a non existing account + // we gracefully call the create account + // TODO: but we might need to revisit this action in the future + if acc == nil { + return v.CreateAccount(addr, balance, nonce, code, codeHash) + } + + // update account code + err = v.updateAccountCode(addr, code, codeHash) + if err != nil { + return err + } + // TODO: maybe purge the state in the future as well + // currently the behavior of stateDB doesn't purge the data + // We don't need to check if the code is empty and we purge the state + // this is not possible right now. + + newAcc := NewAccount(addr, balance, nonce, codeHash, acc.CollectionID) + // no need to update the cache , storeAccount would update the cache + return v.storeAccount(newAcc) +} + +// DeleteAccount deletes an account's meta data, code, and +// storage slots associated with that address +func (v *BaseView) DeleteAccount(addr gethCommon.Address) error { + // 1. check account exists + acc, err := v.getAccount(addr) + if err != nil { + return err + } + if acc == nil { // if account doesn't exist return + return nil + } + + // 2. remove the code + if acc.HasCode() { + err = v.updateAccountCode(addr, nil, gethTypes.EmptyCodeHash) + if err != nil { + return err + } + } + + // 3. update the cache + delete(v.cachedAccounts, addr) + + // 4. collections + err = v.accounts.Remove(addr.Bytes()) + if err != nil { + return err + } + + // 5. remove storage slots + if len(acc.CollectionID) > 0 { + col, found := v.slots[addr] + if !found { + col, err = v.collectionProvider.CollectionByID(acc.CollectionID) + if err != nil { + return err + } + } + // delete all slots related to this account (eip-6780) + keys, err := col.Destroy() + if err != nil { + return err + } + + delete(v.slots, addr) + + for _, key := range keys { + delete(v.cachedSlots, types.SlotAddress{ + Address: addr, + Key: gethCommon.BytesToHash(key), + }) + } + } + return nil +} + +// PurgeAllSlotsOfAnAccount purges all the slots related to an account +func (v *BaseView) PurgeAllSlotsOfAnAccount(addr gethCommon.Address) error { + acc, err := v.getAccount(addr) + if err != nil { + return err + } + // if account doesn't exist, return + // len(acc.CollectionID) == 0 means the account is a non smart contract + // account, which has no slots to purge, so we naturally return + if acc == nil || len(acc.CollectionID) == 0 { + return nil + } + + // remove storage slots + col, found := v.slots[addr] + if !found { + col, err = v.collectionProvider.CollectionByID(acc.CollectionID) + if err != nil { + return err + } + } + + delete(v.slots, addr) + + keys := [][]byte{} + keysIterator, err := col.ReadOnlyIterator() + if err != nil { + return err + } + + key, _, err := keysIterator.Next() + if err != nil { + return err + } + + // we need to collect all the keys, before removing them, + // as per the ReadOnlyIterator's specification + for key != nil { + keys = append(keys, key) + delete(v.cachedSlots, types.SlotAddress{ + Address: addr, + Key: gethCommon.BytesToHash(key), + }) + key, _, err = keysIterator.Next() + if err != nil { + return err + } + } + + // remove slot keys from account's collection + for _, key := range keys { + if err = col.Remove(key); err != nil { + return err + } + } + + return nil +} + +// Commit commits the changes to the underlying storage layers +func (v *BaseView) Commit() error { + // commit collection changes + err := v.collectionProvider.Commit() + if err != nil { + return err + } + + // if this is the first time we are setting up an + // account collection, store its collection id. + if v.accountSetupOnCommit { + err = v.ledger.SetValue(v.rootAddress[:], []byte(AccountsStorageIDKey), v.accounts.CollectionID()) + if err != nil { + return err + } + v.accountSetupOnCommit = false + + } + + // if this is the first time we are setting up an + // code collection, store its collection id. + if v.codeSetupOnCommit { + err = v.ledger.SetValue(v.rootAddress[:], []byte(CodesStorageIDKey), v.codes.CollectionID()) + if err != nil { + return err + } + v.codeSetupOnCommit = false + } + return nil +} + +// NumberOfContracts returns the number of unique contracts +func (v *BaseView) NumberOfContracts() uint64 { + return v.codes.Size() +} + +// NumberOfContracts returns the number of accounts +func (v *BaseView) NumberOfAccounts() uint64 { + return v.accounts.Size() +} + +// AccountIterator returns an account iterator +// +// Warning! this is an expensive operation and should only be used +// for testing and exporting state operations, while no changes +// are applied to accounts. Note that the iteration order is not guaranteed. +func (v *BaseView) AccountIterator() (*AccountIterator, error) { + itr, err := v.accounts.ReadOnlyIterator() + if err != nil { + return nil, err + } + return &AccountIterator{colIterator: itr}, nil +} + +// CodeIterator returns a code iterator +// +// Warning! this is an expensive operation and should only be used +// for testing and exporting state operations, while no changes +// are applied to codes. Note that the iteration order is not guaranteed. +func (v *BaseView) CodeIterator() (*CodeIterator, error) { + itr, err := v.codes.ReadOnlyIterator() + if err != nil { + return nil, err + } + return &CodeIterator{colIterator: itr}, nil +} + +// AccountStorageIterator returns an account storage iterator +// for the given address +// +// Warning! this is an expensive operation and should only be used +// for testing and exporting state operations, while no changes +// are applied to accounts. Note that the iteration order is not guaranteed. +func (v *BaseView) AccountStorageIterator( + addr gethCommon.Address, +) (*AccountStorageIterator, error) { + acc, err := v.getAccount(addr) + if err != nil { + return nil, err + } + if acc == nil || !acc.HasStoredValues() { + return nil, fmt.Errorf("account %s has no stored value", addr.String()) + } + col, found := v.slots[addr] + if !found { + col, err = v.collectionProvider.CollectionByID(acc.CollectionID) + if err != nil { + return nil, fmt.Errorf("failed to load storage collection for account %s: %w", addr.String(), err) + } + } + itr, err := col.ReadOnlyIterator() + if err != nil { + return nil, err + } + return &AccountStorageIterator{ + address: addr, + colIterator: itr, + }, nil +} + +func (v *BaseView) fetchOrCreateCollection(path string) (collection *Collection, created bool, error error) { + collectionID, err := v.ledger.GetValue(v.rootAddress[:], []byte(path)) + if err != nil { + return nil, false, err + } + if len(collectionID) == 0 { + collection, err = v.collectionProvider.NewCollection() + if err != nil { + return collection, true, fmt.Errorf("fail to create collection with key %v: %w", path, err) + } + return collection, true, nil + } + collection, err = v.collectionProvider.CollectionByID(collectionID) + return collection, false, err +} + +func (v *BaseView) getAccount(addr gethCommon.Address) (*Account, error) { + // check cached accounts first + acc, found := v.cachedAccounts[addr] + if found { + return acc, nil + } + + // then collect it from the account collection + data, err := v.accounts.Get(addr.Bytes()) + if err != nil { + return nil, err + } + // decode it + acc, err = DecodeAccount(data) + if err != nil { + return nil, err + } + // cache it + if acc != nil { + v.cachedAccounts[addr] = acc + } + return acc, nil +} + +func (v *BaseView) storeAccount(acc *Account) error { + data, err := acc.Encode() + if err != nil { + return err + } + // update the cache + v.cachedAccounts[acc.Address] = acc + return v.accounts.Set(acc.Address.Bytes(), data) +} + +func (v *BaseView) getCode(addr gethCommon.Address) ([]byte, error) { + // check the cache first + code, found := v.cachedCodes[addr] + if found { + return code, nil + } + + // get account + acc, err := v.getAccount(addr) + if err != nil { + return nil, err + } + + if acc == nil || !acc.HasCode() { + return nil, nil + } + + // collect the container from the code collection by codeHash + encoded, err := v.codes.Get(acc.CodeHash.Bytes()) + if err != nil { + return nil, err + } + if len(encoded) == 0 { + return nil, nil + } + + codeCont, err := CodeContainerFromEncoded(encoded) + if err != nil { + return nil, err + } + code = codeCont.Code() + if len(code) > 0 { + v.cachedCodes[addr] = code + } + return code, nil +} + +func (v *BaseView) updateAccountCode(addr gethCommon.Address, code []byte, codeHash gethCommon.Hash) error { + // get account + acc, err := v.getAccount(addr) + if err != nil { + return err + } + // if is a new account + if acc == nil { + if len(code) == 0 { + return nil + } + v.cachedCodes[addr] = code + return v.addCode(code, codeHash) + } + + // skip if is the same code + if acc.CodeHash == codeHash { + return nil + } + + // clean old code first if exist + if acc.HasCode() { + delete(v.cachedCodes, addr) + err = v.removeCode(acc.CodeHash) + if err != nil { + return err + } + } + + // add new code + if len(code) == 0 { + return nil + } + v.cachedCodes[addr] = code + return v.addCode(code, codeHash) +} + +func (v *BaseView) removeCode(codeHash gethCommon.Hash) error { + encoded, err := v.codes.Get(codeHash.Bytes()) + if err != nil { + return err + } + if len(encoded) == 0 { + return nil + } + + cc, err := CodeContainerFromEncoded(encoded) + if err != nil { + return err + } + if cc.DecRefCount() { + return v.codes.Remove(codeHash.Bytes()) + } + return v.codes.Set(codeHash.Bytes(), cc.Encode()) +} + +func (v *BaseView) addCode(code []byte, codeHash gethCommon.Hash) error { + encoded, err := v.codes.Get(codeHash.Bytes()) + if err != nil { + return err + } + // if is the first time the code is getting deployed + if len(encoded) == 0 { + return v.codes.Set(codeHash.Bytes(), NewCodeContainer(code).Encode()) + } + + // otherwise update the cc + cc, err := CodeContainerFromEncoded(encoded) + if err != nil { + return err + } + cc.IncRefCount() + return v.codes.Set(codeHash.Bytes(), cc.Encode()) +} + +func (v *BaseView) getSlot(sk types.SlotAddress) (gethCommon.Hash, error) { + value, found := v.cachedSlots[sk] + if found { + return value, nil + } + + acc, err := v.getAccount(sk.Address) + if err != nil { + return gethCommon.Hash{}, err + } + if acc == nil || len(acc.CollectionID) == 0 { + return gethCommon.Hash{}, nil + } + + col, err := v.getSlotCollection(acc) + if err != nil { + return gethCommon.Hash{}, err + } + + val, err := col.Get(sk.Key.Bytes()) + if err != nil { + return gethCommon.Hash{}, err + } + value = gethCommon.BytesToHash(val) + v.cachedSlots[sk] = value + return value, nil +} + +func (v *BaseView) storeSlot(sk types.SlotAddress, data gethCommon.Hash) error { + acc, err := v.getAccount(sk.Address) + if err != nil { + return err + } + if acc == nil { + return fmt.Errorf("slot belongs to a non-existing account") + } + col, err := v.getSlotCollection(acc) + if err != nil { + return err + } + + if data == EmptyHash { + delete(v.cachedSlots, sk) + return col.Remove(sk.Key.Bytes()) + } + v.cachedSlots[sk] = data + return col.Set(sk.Key.Bytes(), data.Bytes()) +} + +func (v *BaseView) getSlotCollection(acc *Account) (*Collection, error) { + var err error + + if len(acc.CollectionID) == 0 { + // create a new collection for slots + col, err := v.collectionProvider.NewCollection() + if err != nil { + return nil, err + } + // cache collection + v.slots[acc.Address] = col + // update account's collection ID + acc.CollectionID = col.CollectionID() + err = v.storeAccount(acc) + if err != nil { + return nil, err + } + return col, nil + } + + col, found := v.slots[acc.Address] + if !found { + col, err = v.collectionProvider.CollectionByID(acc.CollectionID) + if err != nil { + return nil, err + } + v.slots[acc.Address] = col + } + return col, nil +} + +// AccountIterator iterates over accounts +type AccountIterator struct { + colIterator *CollectionIterator +} + +// Next returns the next account +// if no more accounts next would return nil (no error) +func (ai *AccountIterator) Next() (*Account, error) { + _, value, err := ai.colIterator.Next() + if err != nil { + return nil, fmt.Errorf("account iteration failed: %w", err) + } + return DecodeAccount(value) +} + +// CodeIterator iterates over codes stored in EVM +// code storage only stores unique codes +type CodeIterator struct { + colIterator *CollectionIterator +} + +// Next returns the next code +// if no more codes, it return nil (no error) +func (ci *CodeIterator) Next() ( + *CodeInContext, + error, +) { + ch, encodedCC, err := ci.colIterator.Next() + if err != nil { + return nil, fmt.Errorf("code iteration failed: %w", err) + } + // no more keys + if ch == nil { + return nil, nil + } + if len(encodedCC) == 0 { + return nil, + fmt.Errorf("encoded code container is empty (code hash: %x)", ch) + } + + codeCont, err := CodeContainerFromEncoded(encodedCC) + if err != nil { + return nil, fmt.Errorf("code container decoding failed (code hash: %x)", ch) + + } + return &CodeInContext{ + Hash: gethCommon.BytesToHash(ch), + Code: codeCont.Code(), + RefCounts: codeCont.RefCount(), + }, nil +} + +// AccountStorageIterator iterates over slots of an account +type AccountStorageIterator struct { + address gethCommon.Address + colIterator *CollectionIterator +} + +// Next returns the next slot in the storage +// if no more keys, it returns nil (no error) +func (asi *AccountStorageIterator) Next() ( + *types.SlotEntry, + error, +) { + k, v, err := asi.colIterator.Next() + if err != nil { + return nil, fmt.Errorf("account storage iteration failed: %w", err) + } + // no more keys + if k == nil { + return nil, nil + } + return &types.SlotEntry{ + Address: asi.address, + Key: gethCommon.BytesToHash(k), + Value: gethCommon.BytesToHash(v), + }, nil + +} diff --git a/fvm/evm/emulator/state/base_test.go b/fvm/evm/emulator/state/base_test.go new file mode 100644 index 00000000000..fec5bc6b082 --- /dev/null +++ b/fvm/evm/emulator/state/base_test.go @@ -0,0 +1,503 @@ +package state_test + +import ( + "testing" + + gethCommon "github.com/ethereum/go-ethereum/common" + gethTypes "github.com/ethereum/go-ethereum/core/types" + gethCrypto "github.com/ethereum/go-ethereum/crypto" + "github.com/holiman/uint256" + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/fvm/evm/emulator/state" + "github.com/onflow/flow-go/fvm/evm/testutils" + "github.com/onflow/flow-go/fvm/evm/types" + "github.com/onflow/flow-go/model/flow" +) + +func TestBaseView(t *testing.T) { + t.Parallel() + + t.Run("test account functionalities", func(t *testing.T) { + ledger := testutils.GetSimpleValueStore() + rootAddr := flow.Address{1, 2, 3, 4, 5, 6, 7, 8} + view, err := state.NewBaseView(ledger, rootAddr) + require.NoError(t, err) + + addr1 := testutils.RandomCommonAddress(t) + + // data calls for a non-existent account + checkAccount(t, + view, + addr1, + false, + uint256.NewInt(0), + uint64(0), + nil, + gethCommon.Hash{}, + ) + + // create an account with code + newBal := uint256.NewInt(10) + newNonce := uint64(5) + newCode := []byte("some code") + newCodeHash := gethCommon.Hash{1, 2} + + err = view.CreateAccount(addr1, newBal, newNonce, newCode, newCodeHash) + require.NoError(t, err) + + // check data from cache + checkAccount(t, + view, + addr1, + true, + newBal, + newNonce, + newCode, + newCodeHash, + ) + + // commit the changes and create a new baseview + err = view.Commit() + require.NoError(t, err) + + view, err = state.NewBaseView(ledger, rootAddr) + require.NoError(t, err) + + checkAccount(t, + view, + addr1, + true, + newBal, + newNonce, + newCode, + newCodeHash, + ) + + // test update account + + newBal = uint256.NewInt(12) + newNonce = uint64(6) + newCode = []byte("some new code") + newCodeHash = gethCommon.Hash{2, 3} + err = view.UpdateAccount(addr1, newBal, newNonce, newCode, newCodeHash) + require.NoError(t, err) + + // check data from cache + checkAccount(t, + view, + addr1, + true, + newBal, + newNonce, + newCode, + newCodeHash, + ) + + // commit the changes and create a new baseview + err = view.Commit() + require.NoError(t, err) + + view, err = state.NewBaseView(ledger, rootAddr) + require.NoError(t, err) + + checkAccount(t, + view, + addr1, + true, + newBal, + newNonce, + newCode, + newCodeHash, + ) + + // test delete account + + err = view.DeleteAccount(addr1) + require.NoError(t, err) + + // check from cache + checkAccount(t, + view, + addr1, + false, + uint256.NewInt(0), + uint64(0), + nil, + gethCommon.Hash{}, + ) + + // commit the changes and create a new baseview + err = view.Commit() + require.NoError(t, err) + + view, err = state.NewBaseView(ledger, rootAddr) + require.NoError(t, err) + + checkAccount(t, + view, + addr1, + false, + uint256.NewInt(0), + uint64(0), + nil, + gethCommon.Hash{}, + ) + }) + + t.Run("test slot storage", func(t *testing.T) { + ledger := testutils.GetSimpleValueStore() + rootAddr := flow.Address{1, 2, 3, 4, 5, 6, 7, 8} + view, err := state.NewBaseView(ledger, rootAddr) + require.NoError(t, err) + + addr1 := testutils.RandomCommonAddress(t) + key1 := testutils.RandomCommonHash(t) + slot1 := types.SlotAddress{ + Address: addr1, + Key: key1, + } + + // non-existent account + value, err := view.GetState(slot1) + require.NoError(t, err) + require.Equal(t, value, gethCommon.Hash{}) + + // store a new value + newValue := testutils.RandomCommonHash(t) + + // updating slot for non-existent account should fail + err = view.UpdateSlot(slot1, newValue) + require.Error(t, err) + + // account should have code to have slots + err = view.CreateAccount(addr1, uint256.NewInt(10), 0, []byte("ABC"), gethCommon.Hash{1, 2, 3}) + require.NoError(t, err) + + err = view.UpdateSlot(slot1, newValue) + require.NoError(t, err) + + // return result from the cache + value, err = view.GetState(slot1) + require.NoError(t, err) + require.Equal(t, newValue, value) + + // commit changes + err = view.Commit() + require.NoError(t, err) + + view2, err := state.NewBaseView(ledger, rootAddr) + require.NoError(t, err) + + // return state from ledger + value, err = view2.GetState(slot1) + require.NoError(t, err) + require.Equal(t, newValue, value) + }) + + t.Run("default values method calls", func(t *testing.T) { + // calls to these method that has always same value + view, err := state.NewBaseView(testutils.GetSimpleValueStore(), flow.Address{1, 2, 3, 4}) + require.NoError(t, err) + + dest, bal := view.HasSelfDestructed(gethCommon.Address{}) + require.Equal(t, false, dest) + require.Equal(t, new(uint256.Int), bal) + require.Equal(t, false, view.IsCreated(gethCommon.Address{})) + require.Equal(t, uint64(0), view.GetRefund()) + require.Equal(t, gethCommon.Hash{}, view.GetTransientState(types.SlotAddress{})) + require.Equal(t, false, view.AddressInAccessList(gethCommon.Address{})) + addrFound, slotFound := view.SlotInAccessList(types.SlotAddress{}) + require.Equal(t, false, addrFound) + require.Equal(t, false, slotFound) + }) + + t.Run("test code storage", func(t *testing.T) { + ledger := testutils.GetSimpleValueStore() + rootAddr := flow.Address{1, 2, 3, 4, 5, 6, 7, 8} + view, err := state.NewBaseView(ledger, rootAddr) + require.NoError(t, err) + + bal := new(uint256.Int) + nonce := uint64(0) + + addr1 := testutils.RandomCommonAddress(t) + var code1 []byte + codeHash1 := gethTypes.EmptyCodeHash + err = view.CreateAccount(addr1, bal, nonce, code1, codeHash1) + require.NoError(t, err) + + ret, err := view.GetCode(addr1) + require.NoError(t, err) + require.Equal(t, code1, ret) + + addr2 := testutils.RandomCommonAddress(t) + code2 := []byte("code2") + codeHash2 := gethCrypto.Keccak256Hash(code2) + err = view.CreateAccount(addr2, bal, nonce, code2, codeHash2) + require.NoError(t, err) + + ret, err = view.GetCode(addr2) + require.NoError(t, err) + require.Equal(t, code2, ret) + + err = view.Commit() + require.NoError(t, err) + orgSize := ledger.TotalStorageSize() + require.Equal(t, uint64(1), view.NumberOfContracts()) + + err = view.UpdateAccount(addr1, bal, nonce, code2, codeHash2) + require.NoError(t, err) + + err = view.Commit() + require.NoError(t, err) + require.Equal(t, orgSize, ledger.TotalStorageSize()) + require.Equal(t, uint64(1), view.NumberOfContracts()) + + ret, err = view.GetCode(addr1) + require.NoError(t, err) + require.Equal(t, code2, ret) + + // now remove the code from account 1 + err = view.UpdateAccount(addr1, bal, nonce, code1, codeHash1) + require.NoError(t, err) + + // there should not be any side effect on the code return for account 2 + // and no impact on storage size + ret, err = view.GetCode(addr2) + require.NoError(t, err) + require.Equal(t, code2, ret) + + ret, err = view.GetCode(addr1) + require.NoError(t, err) + require.Equal(t, code1, ret) + + err = view.Commit() + require.NoError(t, err) + require.Equal(t, orgSize, ledger.TotalStorageSize()) + require.Equal(t, uint64(1), view.NumberOfContracts()) + + // now update account 2 and there should a reduction in storage + err = view.UpdateAccount(addr2, bal, nonce, code1, codeHash1) + require.NoError(t, err) + + ret, err = view.GetCode(addr2) + require.NoError(t, err) + require.Equal(t, code1, ret) + + err = view.Commit() + require.NoError(t, err) + require.Greater(t, orgSize, ledger.TotalStorageSize()) + require.Equal(t, uint64(0), view.NumberOfContracts()) + + // delete account 2 + err = view.DeleteAccount(addr2) + require.NoError(t, err) + + ret, err = view.GetCode(addr2) + require.NoError(t, err) + require.Len(t, ret, 0) + + require.Greater(t, orgSize, ledger.TotalStorageSize()) + require.Equal(t, uint64(1), view.NumberOfAccounts()) + }) + + t.Run("test account iterator", func(t *testing.T) { + ledger := testutils.GetSimpleValueStore() + rootAddr := flow.Address{1, 2, 3, 4, 5, 6, 7, 8} + view, err := state.NewBaseView(ledger, rootAddr) + require.NoError(t, err) + + accountCounts := 10 + nonces := make(map[gethCommon.Address]uint64) + balances := make(map[gethCommon.Address]*uint256.Int) + codeHashes := make(map[gethCommon.Address]gethCommon.Hash) + for i := 0; i < accountCounts; i++ { + addr := testutils.RandomCommonAddress(t) + balance := testutils.RandomUint256Int(1000) + nonce := testutils.RandomBigInt(1000).Uint64() + code := testutils.RandomData(t) + codeHash := testutils.RandomCommonHash(t) + + err = view.CreateAccount(addr, balance, nonce, code, codeHash) + require.NoError(t, err) + + nonces[addr] = nonce + balances[addr] = balance + codeHashes[addr] = codeHash + } + err = view.Commit() + require.NoError(t, err) + + ai, err := view.AccountIterator() + require.NoError(t, err) + + counter := 0 + for { + acc, err := ai.Next() + require.NoError(t, err) + if acc == nil { + break + } + require.Equal(t, nonces[acc.Address], acc.Nonce) + delete(nonces, acc.Address) + require.Equal(t, balances[acc.Address].Uint64(), acc.Balance.Uint64()) + delete(balances, acc.Address) + require.Equal(t, codeHashes[acc.Address], acc.CodeHash) + delete(codeHashes, acc.Address) + counter += 1 + } + + require.Equal(t, accountCounts, counter) + }) + + t.Run("test code iterator", func(t *testing.T) { + ledger := testutils.GetSimpleValueStore() + rootAddr := flow.Address{1, 2, 3, 4, 5, 6, 7, 8} + view, err := state.NewBaseView(ledger, rootAddr) + require.NoError(t, err) + + codeCounts := 10 + codeByCodeHash := make(map[gethCommon.Hash][]byte) + refCountByCodeHash := make(map[gethCommon.Hash]uint64) + for i := 0; i < codeCounts; i++ { + + code := testutils.RandomData(t) + codeHash := testutils.RandomCommonHash(t) + refCount := 0 + // we add each code couple of times through different accounts + for j := 1; j <= i+1; j++ { + addr := testutils.RandomCommonAddress(t) + balance := testutils.RandomUint256Int(1000) + nonce := testutils.RandomBigInt(1000).Uint64() + err = view.CreateAccount(addr, balance, nonce, code, codeHash) + require.NoError(t, err) + refCount += 1 + } + codeByCodeHash[codeHash] = code + refCountByCodeHash[codeHash] = uint64(refCount) + } + err = view.Commit() + require.NoError(t, err) + + ci, err := view.CodeIterator() + require.NoError(t, err) + + counter := 0 + for { + cic, err := ci.Next() + require.NoError(t, err) + if cic == nil { + break + } + require.Equal(t, codeByCodeHash[cic.Hash], cic.Code) + delete(codeByCodeHash, cic.Hash) + require.Equal(t, refCountByCodeHash[cic.Hash], cic.RefCounts) + delete(refCountByCodeHash, cic.Hash) + counter += 1 + } + + require.Equal(t, codeCounts, counter) + }) + + t.Run("test account storage iterator", func(t *testing.T) { + ledger := testutils.GetSimpleValueStore() + rootAddr := flow.Address{1, 2, 3, 4, 5, 6, 7, 8} + view, err := state.NewBaseView(ledger, rootAddr) + require.NoError(t, err) + + addr := testutils.RandomCommonAddress(t) + code := []byte("code") + balance := testutils.RandomUint256Int(1000) + nonce := testutils.RandomBigInt(1000).Uint64() + codeHash := gethCrypto.Keccak256Hash(code) + err = view.CreateAccount(addr, balance, nonce, code, codeHash) + require.NoError(t, err) + + slotCounts := 10 + values := make(map[gethCommon.Hash]gethCommon.Hash) + + for i := 0; i < slotCounts; i++ { + key := testutils.RandomCommonHash(t) + value := testutils.RandomCommonHash(t) + + err = view.UpdateSlot( + types.SlotAddress{ + Address: addr, + Key: key, + }, value) + require.NoError(t, err) + values[key] = value + } + err = view.Commit() + require.NoError(t, err) + + asi, err := view.AccountStorageIterator(addr) + require.NoError(t, err) + + counter := 0 + for { + slot, err := asi.Next() + require.NoError(t, err) + if slot == nil { + break + } + require.Equal(t, addr, slot.Address) + require.Equal(t, values[slot.Key], slot.Value) + delete(values, slot.Key) + counter += 1 + } + + require.Equal(t, slotCounts, counter) + + // test non existing address + addr2 := testutils.RandomCommonAddress(t) + _, err = view.AccountStorageIterator(addr2) + require.Error(t, err) + + // test address without storage + err = view.CreateAccount(addr2, balance, nonce, code, codeHash) + require.NoError(t, err) + + err = view.Commit() + require.NoError(t, err) + + _, err = view.AccountStorageIterator(addr2) + require.Error(t, err) + }) + +} + +func checkAccount(t *testing.T, + view *state.BaseView, + addr gethCommon.Address, + exists bool, + balance *uint256.Int, + nonce uint64, + code []byte, + codeHash gethCommon.Hash, +) { + ex, err := view.Exist(addr) + require.NoError(t, err) + require.Equal(t, exists, ex) + + bal, err := view.GetBalance(addr) + require.NoError(t, err) + require.Equal(t, balance, bal) + + no, err := view.GetNonce(addr) + require.NoError(t, err) + require.Equal(t, nonce, no) + + cd, err := view.GetCode(addr) + require.NoError(t, err) + require.Equal(t, code, cd) + + cs, err := view.GetCodeSize(addr) + require.NoError(t, err) + require.Equal(t, len(code), cs) + + ch, err := view.GetCodeHash(addr) + require.NoError(t, err) + require.Equal(t, codeHash, ch) +} diff --git a/fvm/evm/emulator/state/code.go b/fvm/evm/emulator/state/code.go new file mode 100644 index 00000000000..1353cf5a69b --- /dev/null +++ b/fvm/evm/emulator/state/code.go @@ -0,0 +1,103 @@ +package state + +import ( + "encoding/binary" + "fmt" + + gethCommon "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/rlp" +) + +// CodeContainer contains codes and keeps +// track of reference counts +type CodeContainer struct { + code []byte + // keeping encoded so we can reuse it later + buffer []byte + refCount uint64 +} + +// NewCodeContainer constructs a new code container +func NewCodeContainer(code []byte) *CodeContainer { + return &CodeContainer{ + code: code, + refCount: 1, + } +} + +// CodeContainerFromEncoded constructs a code container from the encoded data +func CodeContainerFromEncoded(encoded []byte) (*CodeContainer, error) { + if len(encoded) < 8 { + return nil, fmt.Errorf("invalid length for the encoded code container") + } + return &CodeContainer{ + refCount: binary.BigEndian.Uint64(encoded[:8]), + buffer: encoded, // keep encoded as buffer for future use + code: encoded[8:], + }, nil +} + +// Code returns the code part of the code container +func (cc *CodeContainer) Code() []byte { + return cc.code +} + +// RefCount returns the ref count +func (cc *CodeContainer) RefCount() uint64 { + return cc.refCount +} + +// IncRefCount increment the ref count +func (cc *CodeContainer) IncRefCount() { + cc.refCount++ +} + +// DecRefCount decrement the ref count and +// returns true if the ref has reached to zero +func (cc *CodeContainer) DecRefCount() bool { + // check if ref is already zero + // this condition should never happen + // but better to be here to prevent underflow + if cc.refCount == 0 { + return true + } + cc.refCount-- + return cc.refCount == 0 +} + +// Encoded returns the encoded content of the code container +func (cc *CodeContainer) Encode() []byte { + // try using the buffer if possible to avoid + // extra allocations + encodedLen := 8 + len(cc.code) + var encoded []byte + if len(cc.buffer) < encodedLen { + encoded = make([]byte, encodedLen) + } else { + encoded = cc.buffer[:encodedLen] + } + binary.BigEndian.PutUint64(encoded[:8], cc.refCount) + copy(encoded[8:], cc.code) + return encoded +} + +// CodeInContext captures a code in its context +type CodeInContext struct { + Hash gethCommon.Hash + Code []byte + RefCounts uint64 +} + +// Encoded returns the encoded content of the code in context +func (cic *CodeInContext) Encode() ([]byte, error) { + return rlp.EncodeToBytes(cic) +} + +// CodeInContextFromEncoded constructs a code in context from the encoded data +func CodeInContextFromEncoded(encoded []byte) (*CodeInContext, error) { + if len(encoded) == 0 { + return nil, nil + } + cic := &CodeInContext{} + return cic, rlp.DecodeBytes(encoded, cic) +} diff --git a/fvm/evm/emulator/state/code_test.go b/fvm/evm/emulator/state/code_test.go new file mode 100644 index 00000000000..2a351e08073 --- /dev/null +++ b/fvm/evm/emulator/state/code_test.go @@ -0,0 +1,35 @@ +package state_test + +import ( + "testing" + + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/fvm/evm/emulator/state" +) + +func TestCodeContainer(t *testing.T) { + code := []byte("some code") + + // test construction + cc := state.NewCodeContainer(code) + require.Equal(t, uint64(1), cc.RefCount()) + require.Equal(t, code, cc.Code()) + + // test increment + cc.IncRefCount() + require.Equal(t, uint64(2), cc.RefCount()) + + // test encoding + encoded := cc.Encode() + cc, err := state.CodeContainerFromEncoded(encoded) + require.NoError(t, err) + require.Equal(t, uint64(2), cc.RefCount()) + require.Equal(t, code, cc.Code()) + + // test decrement + require.Equal(t, false, cc.DecRefCount()) + require.Equal(t, uint64(1), cc.RefCount()) + require.Equal(t, true, cc.DecRefCount()) + require.Equal(t, uint64(0), cc.RefCount()) +} diff --git a/fvm/evm/emulator/state/collection.go b/fvm/evm/emulator/state/collection.go new file mode 100644 index 00000000000..dea445c6524 --- /dev/null +++ b/fvm/evm/emulator/state/collection.go @@ -0,0 +1,483 @@ +package state + +import ( + "bytes" + "encoding/binary" + "errors" + "fmt" + "math" + "runtime" + + "github.com/fxamacker/cbor/v2" + "github.com/onflow/atree" +) + +const ( + storageIDSize = 16 +) + +// CollectionProvider provides access to collections +type CollectionProvider struct { + rootAddr atree.Address + storage *atree.PersistentSlabStorage +} + +// NewCollectionProvider constructs a new CollectionProvider +func NewCollectionProvider( + rootAddr atree.Address, + ledger atree.Ledger, +) (*CollectionProvider, error) { + // empty address is not allowed (causes issues with atree) + if rootAddr == atree.AddressUndefined { + return nil, fmt.Errorf("empty address as root is not allowed") + } + baseStorage := atree.NewLedgerBaseStorage(ledger) + storage, err := NewPersistentSlabStorage(baseStorage) + return &CollectionProvider{ + rootAddr: rootAddr, + storage: storage, + }, err +} + +// CollectionByID returns the collection by collection ID +// +// if no collection is found with that collection id, it return error +// Warning: this method should only used only once for each collection and +// the returned pointer should be kept for the future. +// calling twice for the same collection might result in odd-behaviours +// currently collection provider doesn't do any internal caching to protect against these cases +func (cp *CollectionProvider) CollectionByID(collectionID []byte) (*Collection, error) { + slabID, err := atree.NewSlabIDFromRawBytes(collectionID) + if err != nil { + return nil, err + } + + // TODO: expose SlabID.Address() in atree + + var address atree.Address + binary.BigEndian.PutUint64(address[:], slabID.AddressAsUint64()) + + // sanity check the storage ID address + if address != cp.rootAddr { + return nil, fmt.Errorf("root address mismatch %x != %x", address, cp.rootAddr) + } + + omap, err := atree.NewMapWithRootID(cp.storage, slabID, atree.NewDefaultDigesterBuilder()) + if err != nil { + return nil, err + } + return &Collection{ + omap: omap, + storage: cp.storage, + collectionID: collectionID, + }, nil +} + +// NewCollection constructs a new collection +func (cp *CollectionProvider) NewCollection() (*Collection, error) { + omap, err := atree.NewMap(cp.storage, cp.rootAddr, atree.NewDefaultDigesterBuilder(), emptyTypeInfo{}) + if err != nil { + return nil, err + } + storageIDBytes := make([]byte, storageIDSize) + _, err = omap.SlabID().ToRawBytes(storageIDBytes) + if err != nil { + return nil, err + } + return &Collection{ + storage: cp.storage, + omap: omap, + collectionID: storageIDBytes, // we reuse the storageID bytes as collectionID + }, nil +} + +// Commit commits all changes to the collections with changes +func (cp *CollectionProvider) Commit() error { + return cp.storage.FastCommit(runtime.NumCPU()) +} + +// Collection provides a persistent and compact way of storing key/value pairs +// each collection has a unique collectionID that can be used to fetch the collection +// +// TODO(ramtin): we might not need any extra hashing on the atree side +// and optimize this to just use the key given the keys are hashed ? +type Collection struct { + omap *atree.OrderedMap + storage *atree.PersistentSlabStorage + collectionID []byte +} + +// CollectionID returns the unique id for the collection +func (c *Collection) CollectionID() []byte { + return c.collectionID +} + +// Get gets the value for the given key +// +// if key doesn't exist it returns nil (no error) +func (c *Collection) Get(key []byte) ([]byte, error) { + value, err := c.omap.Get(compare, hashInputProvider, NewByteStringValue(key)) + if err != nil { + var keyNotFoundError *atree.KeyNotFoundError + if errors.As(err, &keyNotFoundError) { + return nil, nil + } + return nil, err + } + + return value.(ByteStringValue).Bytes(), nil +} + +// Set sets the value for the given key +// +// if a value already stored at the given key it replaces the value +func (c *Collection) Set(key, value []byte) error { + existingValueStorable, err := c.omap.Set(compare, hashInputProvider, NewByteStringValue(key), NewByteStringValue(value)) + if err != nil { + return err + } + + if id, ok := existingValueStorable.(atree.SlabIDStorable); ok { + // NOTE: deep remove isn't necessary because value is ByteStringValue (not container) + err := c.storage.Remove(atree.SlabID(id)) + if err != nil { + return err + } + } + return nil +} + +// Remove removes a key from the collection +// +// if the key doesn't exist it return no error +func (c *Collection) Remove(key []byte) error { + _, existingValueStorable, err := c.omap.Remove(compare, hashInputProvider, NewByteStringValue(key)) + if err != nil { + var keyNotFoundError *atree.KeyNotFoundError + if errors.As(err, &keyNotFoundError) { + return nil + } + return err + } + + if id, ok := existingValueStorable.(atree.SlabIDStorable); ok { + // NOTE: deep remove isn't necessary because value is ByteStringValue (not container) + err := c.storage.Remove(atree.SlabID(id)) + if err != nil { + return err + } + } + return nil +} + +// Destroy destroys the whole collection +func (c *Collection) Destroy() ([][]byte, error) { + var cachedErr error + keys := make([][]byte, c.omap.Count()) + i := 0 + err := c.omap.PopIterate(func(keyStorable atree.Storable, valueStorable atree.Storable) { + if id, ok := valueStorable.(atree.SlabIDStorable); ok { + err := c.storage.Remove(atree.SlabID(id)) + if err != nil && cachedErr == nil { + cachedErr = err + } + } + key, err := keyStorable.StoredValue(c.omap.Storage) + if err != nil && cachedErr == nil { + cachedErr = err + } + keys[i] = key.(ByteStringValue).Bytes() + i++ + }) + if cachedErr != nil { + return keys, cachedErr + } + if err != nil { + return keys, err + } + return keys, c.storage.Remove(c.omap.SlabID()) +} + +// Size returns the number of items in the collection +func (c *Collection) Size() uint64 { + return c.omap.Count() +} + +// ReadOnlyIterator returns a collection iterator that +// can be used to iterate over key value pairs in the collection +// +// Warning! iteration is a fairly expensive operation and +// should only be used for testing or exporting data purposes +// Also, Collection should not be mutated while iterating over key values +func (c *Collection) ReadOnlyIterator() (*CollectionIterator, error) { + iterator, err := c.omap.ReadOnlyIterator() + if err != nil { + return nil, err + } + return &CollectionIterator{iter: iterator}, nil +} + +// CollectionIterator allows iteration over the collection key value pairs +type CollectionIterator struct { + iter atree.MapIterator +} + +// Next returns the next key value pairs, when no more element it returns +// nil as key and value (no error) +func (ci *CollectionIterator) Next() (key []byte, value []byte, err error) { + k, v, err := ci.iter.Next() + if err != nil { + return nil, nil, fmt.Errorf("collection iteration failed: %w", err) + } + // no more keys + if k == nil { + return nil, nil, nil + } + return k.(ByteStringValue).Bytes(), v.(ByteStringValue).Bytes(), nil +} + +type ByteStringValue struct { + data []byte + size uint32 +} + +var _ atree.Value = &ByteStringValue{} +var _ atree.Storable = &ByteStringValue{} + +func NewByteStringValue(data []byte) ByteStringValue { + size := atree.GetUintCBORSize(uint64(len(data))) + uint32(len(data)) + return ByteStringValue{data: data, size: size} +} + +func (v ByteStringValue) ChildStorables() []atree.Storable { + return nil +} + +func (v ByteStringValue) StoredValue(_ atree.SlabStorage) (atree.Value, error) { + return v, nil +} + +func (v ByteStringValue) Storable(storage atree.SlabStorage, address atree.Address, maxInlineSize uint64) (atree.Storable, error) { + if uint64(v.ByteSize()) <= maxInlineSize { + return v, nil + } + + // Create StorableSlab + return atree.NewStorableSlab(storage, address, v) +} + +func (v ByteStringValue) Encode(enc *atree.Encoder) error { + return enc.CBOR.EncodeBytes(v.data) +} + +func (v ByteStringValue) getHashInput(scratch []byte) ([]byte, error) { + + const cborTypeByteString = 0x40 + + buf := scratch + if uint32(len(buf)) < v.size { + buf = make([]byte, v.size) + } else { + buf = buf[:v.size] + } + + slen := len(v.data) + + if slen <= 23 { + buf[0] = cborTypeByteString | byte(slen) + copy(buf[1:], v.data) + return buf, nil + } + + if slen <= math.MaxUint8 { + buf[0] = cborTypeByteString | byte(24) + buf[1] = byte(slen) + copy(buf[2:], v.data) + return buf, nil + } + + if slen <= math.MaxUint16 { + buf[0] = cborTypeByteString | byte(25) + binary.BigEndian.PutUint16(buf[1:], uint16(slen)) + copy(buf[3:], v.data) + return buf, nil + } + + if slen <= math.MaxUint32 { + buf[0] = cborTypeByteString | byte(26) + binary.BigEndian.PutUint32(buf[1:], uint32(slen)) + copy(buf[5:], v.data) + return buf, nil + } + + buf[0] = cborTypeByteString | byte(27) + binary.BigEndian.PutUint64(buf[1:], uint64(slen)) + copy(buf[9:], v.data) + return buf, nil +} + +func (v ByteStringValue) ByteSize() uint32 { + return v.size +} + +func (v ByteStringValue) String() string { + return string(v.data) +} + +func (v ByteStringValue) Bytes() []byte { + return v.data +} + +func decodeStorable(dec *cbor.StreamDecoder, slabID atree.SlabID, inlinedExtraData []atree.ExtraData) (atree.Storable, error) { + t, err := dec.NextType() + if err != nil { + return nil, err + } + + switch t { + case cbor.ByteStringType: + s, err := dec.DecodeBytes() + if err != nil { + return nil, err + } + return NewByteStringValue(s), nil + + case cbor.TagType: + tagNumber, err := dec.DecodeTagNumber() + if err != nil { + return nil, err + } + + switch tagNumber { + + case atree.CBORTagSlabID: + return atree.DecodeSlabIDStorable(dec) + + case atree.CBORTagInlinedArray: + return atree.DecodeInlinedArrayStorable( + dec, + decodeStorable, + slabID, + inlinedExtraData) + + case atree.CBORTagInlinedMap: + return atree.DecodeInlinedMapStorable( + dec, + decodeStorable, + slabID, + inlinedExtraData, + ) + + case atree.CBORTagInlinedCompactMap: + return atree.DecodeInlinedCompactMapStorable( + dec, + decodeStorable, + slabID, + inlinedExtraData, + ) + + default: + return nil, fmt.Errorf("invalid tag number %d", tagNumber) + } + + default: + return nil, fmt.Errorf("invalid cbor type %s for storable", t) + } +} + +func compare(storage atree.SlabStorage, value atree.Value, storable atree.Storable) (bool, error) { + switch v := value.(type) { + + case ByteStringValue: + other, ok := storable.(ByteStringValue) + if ok { + return bytes.Equal(other.data, v.data), nil + } + + // Retrieve value from storage + otherValue, err := storable.StoredValue(storage) + if err != nil { + return false, err + } + other, ok = otherValue.(ByteStringValue) + if ok { + return bytes.Equal(other.data, v.data), nil + } + + return false, nil + } + + return false, fmt.Errorf("value %T not supported for comparison", value) +} + +func hashInputProvider(value atree.Value, buffer []byte) ([]byte, error) { + switch v := value.(type) { + case ByteStringValue: + return v.getHashInput(buffer) + } + + return nil, fmt.Errorf("value %T not supported for hash input", value) +} + +func NewPersistentSlabStorage(baseStorage atree.BaseStorage) (*atree.PersistentSlabStorage, error) { + encMode, err := cbor.EncOptions{}.EncMode() + if err != nil { + return nil, err + } + + decMode, err := cbor.DecOptions{}.DecMode() + if err != nil { + return nil, err + } + + return atree.NewPersistentSlabStorage( + baseStorage, + encMode, + decMode, + decodeStorable, + decodeTypeInfo, + ), nil +} + +type emptyTypeInfo struct{} + +var _ atree.TypeInfo = emptyTypeInfo{} + +func (emptyTypeInfo) IsComposite() bool { + return false +} + +func (emptyTypeInfo) Identifier() string { + return "" +} + +func (e emptyTypeInfo) Copy() atree.TypeInfo { + return e +} + +func (emptyTypeInfo) Encode(e *cbor.StreamEncoder) error { + return e.EncodeNil() +} + +func (i emptyTypeInfo) Equal(other atree.TypeInfo) bool { + _, ok := other.(emptyTypeInfo) + return ok +} + +func decodeTypeInfo(dec *cbor.StreamDecoder) (atree.TypeInfo, error) { + ty, err := dec.NextType() + if err != nil { + return nil, err + } + switch ty { + case cbor.NilType: + err := dec.DecodeNil() + if err != nil { + return nil, err + } + return emptyTypeInfo{}, nil + default: + } + + return nil, fmt.Errorf("not supported type info") +} diff --git a/fvm/evm/emulator/state/collection_test.go b/fvm/evm/emulator/state/collection_test.go new file mode 100644 index 00000000000..526d9e94a3e --- /dev/null +++ b/fvm/evm/emulator/state/collection_test.go @@ -0,0 +1,69 @@ +package state_test + +import ( + "testing" + + "github.com/onflow/atree" + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/fvm/evm/emulator/state" + "github.com/onflow/flow-go/fvm/evm/testutils" +) + +func TestCollection(t *testing.T) { + + cp := setupTestCollection(t) + c1, err := cp.NewCollection() + require.NoError(t, err) + + key1 := []byte("A") + key2 := []byte("B") + value1 := []byte{1} + value2 := []byte{2} + + // get value for A + ret, err := c1.Get(key1) + require.NoError(t, err) + require.Empty(t, ret) + + // set value1 for A + err = c1.Set(key1, value1) + require.NoError(t, err) + + ret, err = c1.Get(key1) + require.NoError(t, err) + require.Equal(t, ret, value1) + + err = c1.Remove(key1) + require.NoError(t, err) + + ret, err = c1.Get(key1) + require.NoError(t, err) + require.Empty(t, ret) + + err = c1.Set(key2, value2) + require.NoError(t, err) + + c2, err := cp.CollectionByID(c1.CollectionID()) + require.NoError(t, err) + + ret, err = c2.Get(key2) + require.NoError(t, err) + require.Equal(t, value2, ret) + + // destroy + keys, err := c1.Destroy() + require.NoError(t, err) + require.Len(t, keys, 1) + require.Equal(t, key2, keys[0]) + + _, err = cp.CollectionByID(c1.CollectionID()) + require.Error(t, err) +} + +func setupTestCollection(t *testing.T) *state.CollectionProvider { + ledger := testutils.GetSimpleValueStore() + cp, err := state.NewCollectionProvider(atree.Address{1, 2, 3, 4, 5, 6, 7, 8}, ledger) + require.NoError(t, err) + return cp +} diff --git a/fvm/evm/emulator/state/delta.go b/fvm/evm/emulator/state/delta.go new file mode 100644 index 00000000000..6f3838dc7f9 --- /dev/null +++ b/fvm/evm/emulator/state/delta.go @@ -0,0 +1,586 @@ +package state + +import ( + "fmt" + + gethCommon "github.com/ethereum/go-ethereum/common" + gethTypes "github.com/ethereum/go-ethereum/core/types" + gethCrypto "github.com/ethereum/go-ethereum/crypto" + "github.com/holiman/uint256" + + "github.com/onflow/flow-go/fvm/evm/types" +) + +// DeltaView captures the changes to the state during the execution +// +// for most of the read calls it checks its change logs and if no record is +// found it would redirect the call to the parent view. +type DeltaView struct { + parent types.ReadOnlyView + + // dirtyAddresses keeps a set of addresses with changes + dirtyAddresses map[gethCommon.Address]struct{} + // created keeps a set of recently created addresses + created map[gethCommon.Address]struct{} + // This is an EIP-6780 flag indicating whether the object is eligible for + // self-destruct according to EIP-6780. The flag could be set either when + // the contract is just created within the current transaction, or when the + // object was previously existent and is being deployed as a contract within + // the current transaction. + newContract map[gethCommon.Address]struct{} + // toBeDestructed keeps a set of addresses flagged to be destructed at the + // end of transaction, it also keeps the balance of the addresses before destruction + toBeDestructed map[gethCommon.Address]*uint256.Int + // is a flag used to track accounts that has been flagged for + // destruction but recreated later + recreated map[gethCommon.Address]struct{} + // balances keeps the changes to the account balances + balances map[gethCommon.Address]*uint256.Int + // nonces keeps the changes to the account nonces + nonces map[gethCommon.Address]uint64 + // codes keeps the changes to the account codes + codes map[gethCommon.Address][]byte + // codeHashes keeps the changes to account code hashes + codeHashes map[gethCommon.Address]gethCommon.Hash + + // slots keeps a set of slots that has been changed in this view + slots map[types.SlotAddress]gethCommon.Hash + + // transient storage + transient map[types.SlotAddress]gethCommon.Hash + + // access lists + accessListAddresses map[gethCommon.Address]struct{} + accessListSlots map[types.SlotAddress]struct{} + + // logs + logs []*gethTypes.Log + + // preimages + preimages map[gethCommon.Hash][]byte + + // refund + refund uint64 +} + +var _ types.HotView = &DeltaView{} + +// NewDeltaView constructs a new delta view +func NewDeltaView(parent types.ReadOnlyView) *DeltaView { + return &DeltaView{ + parent: parent, + + dirtyAddresses: make(map[gethCommon.Address]struct{}), + created: make(map[gethCommon.Address]struct{}), + newContract: make(map[gethCommon.Address]struct{}), + toBeDestructed: make(map[gethCommon.Address]*uint256.Int), + recreated: make(map[gethCommon.Address]struct{}), + balances: make(map[gethCommon.Address]*uint256.Int), + nonces: make(map[gethCommon.Address]uint64), + codes: make(map[gethCommon.Address][]byte), + codeHashes: make(map[gethCommon.Address]gethCommon.Hash), + + slots: make(map[types.SlotAddress]gethCommon.Hash), + + // for refund we just copy the data + refund: parent.GetRefund(), + } +} + +// NewChildView constructs a new delta view having the current view as parent +func (d *DeltaView) NewChildView() *DeltaView { + return NewDeltaView(d) +} + +// Exist returns true if address exists +// +// it also returns true for both newly created accounts or accounts that has been flagged for deletion +func (d *DeltaView) Exist(addr gethCommon.Address) (bool, error) { + _, found := d.created[addr] + if found { + return true, nil + } + _, found = d.toBeDestructed[addr] + if found { + return true, nil + } + // if is address is dirty it exists + _, found = d.dirtyAddresses[addr] + if found { + return true, nil + } + return d.parent.Exist(addr) +} + +// CreateAccount creates a new account for the given address +// +// if address already exists (even if destructed), carry over the balance +// and reset the data from the original account. +func (d *DeltaView) CreateAccount(addr gethCommon.Address) error { + // if is already created return + if d.IsCreated(addr) { + return nil + } + exist, err := d.Exist(addr) + if err != nil { + return err + } + if exist { + // check if already destructed + destructed, balance := d.HasSelfDestructed(addr) + if !destructed { + balance, err = d.GetBalance(addr) + if err != nil { + return err + } + err = d.SelfDestruct(addr) + if err != nil { + return err + } + } + + d.nonces[addr] = 0 + d.codes[addr] = nil + d.codeHashes[addr] = gethTypes.EmptyCodeHash + // carrying over the balance. (legacy behavior of the Geth stateDB) + d.balances[addr] = balance + + // flag addr as recreated, this flag helps with postponing deletion of slabs + // otherwise we have to iterate over all slabs of this account and set the to nil + d.recreated[addr] = struct{}{} + + // remove slabs from cache related to this account + for k := range d.slots { + if k.Address == addr { + delete(d.slots, k) + } + } + } + d.dirtyAddresses[addr] = struct{}{} + d.created[addr] = struct{}{} + return nil +} + +// IsCreated returns true if address has been created in this tx +func (d *DeltaView) IsCreated(addr gethCommon.Address) bool { + _, found := d.created[addr] + if found { + return true + } + return d.parent.IsCreated(addr) +} + +// CreateContract is used whenever a contract is created. This may be preceded +// by CreateAccount, but that is not required if it already existed in the +// state due to funds sent beforehand. +func (d *DeltaView) CreateContract(addr gethCommon.Address) { + _, found := d.newContract[addr] + if !found { + d.newContract[addr] = struct{}{} + } +} + +// IsNewContract returns true if address has been created in this tx. +// It's used to correctly handle EIP-6780 'delete-in-same-transaction' logic. +func (d *DeltaView) IsNewContract(addr gethCommon.Address) bool { + _, found := d.newContract[addr] + if found { + return true + } + return d.parent.IsNewContract(addr) +} + +// HasSelfDestructed returns true if address has been flagged for destruction +// it also returns the balance of the address before the destruction call +func (d *DeltaView) HasSelfDestructed(addr gethCommon.Address) (bool, *uint256.Int) { + bal, found := d.toBeDestructed[addr] + if found { + return true, bal + } + return d.parent.HasSelfDestructed(addr) +} + +// SelfDestruct sets a flag to destruct the account at the end of transaction +// +// if an account has been created in this transaction, it would return an error +func (d *DeltaView) SelfDestruct(addr gethCommon.Address) error { + // if it doesn't exist, return + exists, err := d.Exist(addr) + if err != nil { + return err + } + if !exists { + return nil + } + + // if already set to be self destructed, return + _, found := d.toBeDestructed[addr] + if found { + return nil + } + + // flag the account for destruction and capture the balance + // before destruction + d.toBeDestructed[addr], err = d.GetBalance(addr) + if err != nil { + return err + } + // flag the address as dirty + d.dirtyAddresses[addr] = struct{}{} + + // set balance to zero + d.balances[addr] = new(uint256.Int) + return nil +} + +// GetBalance returns the balance of the given address +func (d *DeltaView) GetBalance(addr gethCommon.Address) (*uint256.Int, error) { + val, found := d.balances[addr] + if found { + return val, nil + } + // if newly created and no balance is set yet + _, newlyCreated := d.created[addr] + if newlyCreated { + return uint256.NewInt(0), nil + } + return d.parent.GetBalance(addr) +} + +// AddBalance adds the amount to the current balance of the given address +func (d *DeltaView) AddBalance(addr gethCommon.Address, amount *uint256.Int) error { + // if amount is 0 skip + if amount.Sign() == 0 { + return nil + } + // get the latest balance + orgBalance, err := d.GetBalance(addr) + if err != nil { + return err + } + // update the balance + newBalance := new(uint256.Int).Add(orgBalance, amount) + d.balances[addr] = newBalance + + // flag the address as dirty + d.dirtyAddresses[addr] = struct{}{} + return nil +} + +// SubBalance subtracts the amount from the current balance of the given address +func (d *DeltaView) SubBalance(addr gethCommon.Address, amount *uint256.Int) error { + // if amount is 0 skip + if amount.Sign() == 0 { + return nil + } + + // get the latest balance + orgBalance, err := d.GetBalance(addr) + if err != nil { + return err + } + + // update the new balance + newBalance := new(uint256.Int).Sub(orgBalance, amount) + + // if new balance is negative error + if newBalance.Sign() < 0 { + return fmt.Errorf("account balance is negative %d", newBalance) + } + + // update the balance + d.balances[addr] = newBalance + + // flag the address as dirty + d.dirtyAddresses[addr] = struct{}{} + return nil +} + +// GetNonce returns the nonce of the given address +func (d *DeltaView) GetNonce(addr gethCommon.Address) (uint64, error) { + val, found := d.nonces[addr] + if found { + return val, nil + } + // if newly created + _, newlyCreated := d.created[addr] + if newlyCreated { + return 0, nil + } + return d.parent.GetNonce(addr) +} + +// SetNonce sets the nonce for the given address +func (d *DeltaView) SetNonce(addr gethCommon.Address, nonce uint64) error { + // update the nonce + d.nonces[addr] = nonce + + // flag the address as dirty + d.dirtyAddresses[addr] = struct{}{} + return nil +} + +// GetCode returns the code of the given address +func (d *DeltaView) GetCode(addr gethCommon.Address) ([]byte, error) { + code, found := d.codes[addr] + if found { + return code, nil + } + // if newly created + _, newlyCreated := d.created[addr] + if newlyCreated { + return nil, nil + } + return d.parent.GetCode(addr) +} + +// GetCodeSize returns the code size of the given address +func (d *DeltaView) GetCodeSize(addr gethCommon.Address) (int, error) { + code, err := d.GetCode(addr) + return len(code), err +} + +// GetCodeHash returns the code hash of the given address +func (d *DeltaView) GetCodeHash(addr gethCommon.Address) (gethCommon.Hash, error) { + codeHash, found := d.codeHashes[addr] + if found { + return codeHash, nil + } + // if newly created + _, newlyCreated := d.created[addr] + if newlyCreated { + return gethTypes.EmptyCodeHash, nil + } + return d.parent.GetCodeHash(addr) +} + +// SetCode sets the code for the given address +func (d *DeltaView) SetCode(addr gethCommon.Address, code []byte) error { + // update code + d.codes[addr] = code + + // update code hash + codeHash := gethTypes.EmptyCodeHash + if len(code) > 0 { + codeHash = gethCrypto.Keccak256Hash(code) + } + d.codeHashes[addr] = codeHash + + // flag the address as dirty + d.dirtyAddresses[addr] = struct{}{} + return nil +} + +// GetState returns the value of the slot of the main state +func (d *DeltaView) GetState(sk types.SlotAddress) (gethCommon.Hash, error) { + val, found := d.slots[sk] + if found { + return val, nil + } + // if address is deleted in the scope of this delta view, + // don't go backward. this has been done to skip the step to iterate + // over all the state slabs and delete them. + _, recreated := d.recreated[sk.Address] + if recreated { + return gethCommon.Hash{}, nil + } + return d.parent.GetState(sk) +} + +// SetState sets or adds a value for the given slot of the main storage. +// It returns the previous value in any case. +func (d *DeltaView) SetState( + sk types.SlotAddress, + value gethCommon.Hash, +) (gethCommon.Hash, error) { + lastValue, err := d.GetState(sk) + if err != nil { + return gethCommon.Hash{}, err + } + // if the value hasn't changed, skip + if value == lastValue { + return lastValue, nil + } + d.slots[sk] = value + + return lastValue, nil +} + +// GetStorageRoot returns some sort of storage root for the given address +// +// WARNING! the root that is returned is not a commitment to the state +// Mostly is returned to satisfy the requirements of the EVM, +// where the returned value is compared against empty hash and empty root hash +// values to determine smart contracts that already has data. +// +// Here we return values for non-existing accounts, and redirect the call all +// the way back to the base view. This means that the state root that is returned +// ignores the updates to slots during the transaction. +func (d *DeltaView) GetStorageRoot(addr gethCommon.Address) (gethCommon.Hash, error) { + exist, err := d.Exist(addr) + if err != nil { + return gethCommon.Hash{}, err + } + if exist { + code, err := d.GetCode(addr) + if err != nil { + return gethCommon.Hash{}, err + } + if len(code) == 0 { + return gethTypes.EmptyRootHash, nil + } + // else go back to the parent + } + // go back to parents (until we reach the base view) + // Note that if storage is updated in deltas but not + // committed the expected behavior is to return the root in the base view. + return d.parent.GetStorageRoot(addr) +} + +// GetTransientState returns the value of the slot of the transient state +func (d *DeltaView) GetTransientState(sk types.SlotAddress) gethCommon.Hash { + if d.transient != nil { + val, found := d.transient[sk] + if found { + return val + } + } + return d.parent.GetTransientState(sk) +} + +// SetTransientState adds sets a value for the given slot of the transient storage +func (d *DeltaView) SetTransientState(sk types.SlotAddress, value gethCommon.Hash) { + if d.transient == nil { + d.transient = make(map[types.SlotAddress]gethCommon.Hash) + } + d.transient[sk] = value +} + +// GetRefund returns the total (gas) refund +func (d *DeltaView) GetRefund() uint64 { + return d.refund +} + +// AddRefund adds the amount to the total (gas) refund +func (d *DeltaView) AddRefund(amount uint64) error { + d.refund += amount + return nil +} + +// SubRefund subtracts the amount from the total (gas) refund +func (d *DeltaView) SubRefund(amount uint64) error { + if amount > d.refund { + return fmt.Errorf("refund counter below zero (gas: %d > refund: %d)", amount, d.refund) + } + d.refund -= amount + return nil +} + +// AddressInAccessList checks if the address is in the access list of +// the current view. +// NOTE: Due to resource constraints (such as CPU & memory), and the +// high-frequency usage of this function from EVM, we do not look up +// the parents until the root view or until we find a view that has +// the address in its local access list. +// As an optimization, the `StateDB.AddressInAccessList` is responsible +// for optimally traversing the views, to check if the address is in +// the access list. +func (d *DeltaView) AddressInAccessList(addr gethCommon.Address) bool { + if d.accessListAddresses != nil { + _, addressFound := d.accessListAddresses[addr] + if addressFound { + return true + } + } + return false +} + +// AddAddressToAccessList adds an address to the access list +func (d *DeltaView) AddAddressToAccessList(addr gethCommon.Address) bool { + if d.accessListAddresses == nil { + d.accessListAddresses = make(map[gethCommon.Address]struct{}) + } + + addrPresent := d.AddressInAccessList(addr) + d.accessListAddresses[addr] = struct{}{} + return !addrPresent +} + +// SlotInAccessList checks if the slot is in the access list of the +// current view. +// NOTE: Due to resource constraints (such as CPU & memory), and the +// high-frequency usage of this function from EVM, we do not look up +// the parents until the root view or until we find a view that has +// the slot in its local access list. +// As an optimization, the `StateDB.SlotInAccessList` is responsible +// for optimally traversing the views, to check if the slot is in +// the access list. +func (d *DeltaView) SlotInAccessList(sk types.SlotAddress) (addressOk bool, slotOk bool) { + addressFound := d.AddressInAccessList(sk.Address) + if d.accessListSlots != nil { + _, slotFound := d.accessListSlots[sk] + if slotFound { + return addressFound, true + } + } + return addressFound, false +} + +// AddSlotToAccessList adds a slot to the access list +// it also adds the address to the address list +func (d *DeltaView) AddSlotToAccessList(sk types.SlotAddress) (addrAdded bool, slotAdded bool) { + addrPresent, slotPresent := d.SlotInAccessList(sk) + if d.accessListAddresses == nil { + d.accessListAddresses = make(map[gethCommon.Address]struct{}) + } + d.accessListAddresses[sk.Address] = struct{}{} + if d.accessListSlots == nil { + d.accessListSlots = make(map[types.SlotAddress]struct{}) + } + d.accessListSlots[sk] = struct{}{} + return !addrPresent, !slotPresent +} + +// AddLog appends a log to the log collection +func (d *DeltaView) AddLog(log *gethTypes.Log) { + if d.logs == nil { + d.logs = make([]*gethTypes.Log, 0) + } + d.logs = append(d.logs, log) +} + +// Logs returns the logs that has been captured in this view +func (d *DeltaView) Logs() []*gethTypes.Log { + return d.logs +} + +// AddPreimage adds a preimage +func (d *DeltaView) AddPreimage(hash gethCommon.Hash, preimage []byte) { + if d.preimages == nil { + d.preimages = make(map[gethCommon.Hash][]byte) + } + + // make a copy (legacy behaviour) + pi := make([]byte, len(preimage)) + copy(pi, preimage) + d.preimages[hash] = pi +} + +// Preimages returns a map of preimages +func (d *DeltaView) Preimages() map[gethCommon.Hash][]byte { + return d.preimages +} + +// DirtyAddresses returns a set of addresses that has been updated in this view +func (d *DeltaView) DirtyAddresses() map[gethCommon.Address]struct{} { + return d.dirtyAddresses +} + +// DirtySlots returns a set of slots that has been updated in this view +func (d *DeltaView) DirtySlots() map[types.SlotAddress]struct{} { + dirtySlots := make(map[types.SlotAddress]struct{}) + for sk := range d.slots { + dirtySlots[sk] = struct{}{} + } + return dirtySlots +} diff --git a/fvm/evm/emulator/state/delta_test.go b/fvm/evm/emulator/state/delta_test.go new file mode 100644 index 00000000000..9ca089888af --- /dev/null +++ b/fvm/evm/emulator/state/delta_test.go @@ -0,0 +1,871 @@ +package state_test + +import ( + "fmt" + "testing" + + gethCommon "github.com/ethereum/go-ethereum/common" + gethTypes "github.com/ethereum/go-ethereum/core/types" + gethCrypto "github.com/ethereum/go-ethereum/crypto" + "github.com/holiman/uint256" + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/fvm/evm/emulator/state" + "github.com/onflow/flow-go/fvm/evm/testutils" + "github.com/onflow/flow-go/fvm/evm/types" +) + +var emptyRefund = func() uint64 { + return 0 +} + +func TestDeltaView(t *testing.T) { + t.Parallel() + + t.Run("test account exist/creation/self-destruct functionality", func(t *testing.T) { + addr1 := testutils.RandomCommonAddress(t) + addr2 := testutils.RandomCommonAddress(t) + addr3 := testutils.RandomCommonAddress(t) + + view := state.NewDeltaView( + &MockedReadOnlyView{ + // we need get refund for parent + GetRefundFunc: emptyRefund, + ExistFunc: func(addr gethCommon.Address) (bool, error) { + switch addr { + case addr1: + return true, nil + case addr2: + return false, nil + default: + return false, fmt.Errorf("some error") + } + }, + IsCreatedFunc: func(a gethCommon.Address) bool { + return false + }, + GetBalanceFunc: func(gethCommon.Address) (*uint256.Int, error) { + return new(uint256.Int), nil + }, + HasSelfDestructedFunc: func(gethCommon.Address) (bool, *uint256.Int) { + return false, new(uint256.Int) + }, + }) + + // check existing account on the parent + found, err := view.Exist(addr1) + require.NoError(t, err) + require.True(t, found) + + // account doesn't exist on parent + found, err = view.Exist(addr2) + require.NoError(t, err) + require.False(t, found) + + // handling error on the parent + _, err = view.Exist(addr3) + require.Error(t, err) + + // create a account at address 2 + err = view.CreateAccount(addr2) + require.NoError(t, err) + require.True(t, view.IsCreated(addr2)) + + // now it should be found + found, err = view.Exist(addr2) + require.NoError(t, err) + require.True(t, found) + + // test HasSelfDestructed first + success, _ := view.HasSelfDestructed(addr1) + require.False(t, success) + + // set addr1 for deletion + err = view.SelfDestruct(addr1) + require.NoError(t, err) + + // check HasSelfDestructed now + success, _ = view.HasSelfDestructed(addr1) + require.True(t, success) + + // addr1 should still exist after self destruct call + found, err = view.Exist(addr1) + require.NoError(t, err) + require.True(t, found) + }) + + t.Run("test account balance functionality", func(t *testing.T) { + addr1 := testutils.RandomCommonAddress(t) + addr1InitBal := uint256.NewInt(10) + addr2 := testutils.RandomCommonAddress(t) + addr2InitBal := uint256.NewInt(5) + addr3 := testutils.RandomCommonAddress(t) + + view := state.NewDeltaView( + &MockedReadOnlyView{ + // we need get refund for parent + GetRefundFunc: emptyRefund, + ExistFunc: func(addr gethCommon.Address) (bool, error) { + switch addr { + case addr1, addr2: + return true, nil + default: + return false, nil + } + }, + HasSelfDestructedFunc: func(a gethCommon.Address) (bool, *uint256.Int) { + return false, new(uint256.Int) + }, + IsCreatedFunc: func(a gethCommon.Address) bool { + return false + }, + GetBalanceFunc: func(addr gethCommon.Address) (*uint256.Int, error) { + switch addr { + case addr1: + return addr1InitBal, nil + case addr2: + return addr2InitBal, nil + default: + return nil, fmt.Errorf("some error") + } + }, + }) + + // get balance through parent + bal, err := view.GetBalance(addr1) + require.NoError(t, err) + require.Equal(t, addr1InitBal, bal) + + // call self destruct on addr + err = view.SelfDestruct(addr1) + require.NoError(t, err) + + // now it should return balance of zero + bal, err = view.GetBalance(addr1) + require.NoError(t, err) + require.Equal(t, uint256.NewInt(0), bal) + + // add balance to addr2 + amount := uint256.NewInt(7) + expected := new(uint256.Int).Add(addr2InitBal, amount) + err = view.AddBalance(addr2, amount) + require.NoError(t, err) + newBal, err := view.GetBalance(addr2) + require.NoError(t, err) + require.Equal(t, expected, newBal) + + // sub balance from addr2 + amount = uint256.NewInt(9) + expected = new(uint256.Int).Sub(newBal, amount) + err = view.SubBalance(addr2, amount) + require.NoError(t, err) + bal, err = view.GetBalance(addr2) + require.NoError(t, err) + require.Equal(t, expected, bal) + + // negative balance error + err = view.SubBalance(addr2, uint256.NewInt(100)) + require.Error(t, err) + + // handling error on the parent + _, err = view.GetBalance(addr3) + require.Error(t, err) + }) + + t.Run("test nonce functionality", func(t *testing.T) { + addr1 := testutils.RandomCommonAddress(t) + addr1InitNonce := uint64(1) + addr2 := testutils.RandomCommonAddress(t) + + view := state.NewDeltaView( + &MockedReadOnlyView{ + // we need get refund for parent + GetRefundFunc: emptyRefund, + ExistFunc: func(addr gethCommon.Address) (bool, error) { + switch addr { + case addr1: + return true, nil + default: + return false, nil + } + }, + HasSelfDestructedFunc: func(a gethCommon.Address) (bool, *uint256.Int) { + return false, new(uint256.Int) + }, + IsCreatedFunc: func(a gethCommon.Address) bool { + return false + }, + GetBalanceFunc: func(a gethCommon.Address) (*uint256.Int, error) { + return new(uint256.Int), nil + }, + GetNonceFunc: func(addr gethCommon.Address) (uint64, error) { + switch addr { + case addr1: + return addr1InitNonce, nil + default: + return 0, fmt.Errorf("some error") + } + }, + }) + + // get nonce through parent + nonce, err := view.GetNonce(addr1) + require.NoError(t, err) + require.Equal(t, addr1InitNonce, nonce) + + // set nonce + new := uint64(100) + err = view.SetNonce(addr1, new) + require.NoError(t, err) + nonce, err = view.GetNonce(addr1) + require.NoError(t, err) + require.Equal(t, new, nonce) + + // handling error on the parent + _, err = view.GetNonce(addr2) + require.Error(t, err) + + // create a new account at addr2 + err = view.CreateAccount(addr2) + require.NoError(t, err) + + // now the nonce should return 0 + nonce, err = view.GetNonce(addr2) + require.NoError(t, err) + require.Equal(t, uint64(0), nonce) + }) + + t.Run("test code functionality", func(t *testing.T) { + addr1 := testutils.RandomCommonAddress(t) + addr1InitCode := []byte("code1") + addr1IntiCodeHash := gethCommon.BytesToHash([]byte{1, 2}) + addr2 := testutils.RandomCommonAddress(t) + + view := state.NewDeltaView( + &MockedReadOnlyView{ + // we need get refund for parent + GetRefundFunc: emptyRefund, + ExistFunc: func(addr gethCommon.Address) (bool, error) { + switch addr { + case addr1: + return true, nil + default: + return false, nil + } + }, + HasSelfDestructedFunc: func(a gethCommon.Address) (bool, *uint256.Int) { + return false, new(uint256.Int) + }, + IsCreatedFunc: func(a gethCommon.Address) bool { + return false + }, + GetBalanceFunc: func(a gethCommon.Address) (*uint256.Int, error) { + return new(uint256.Int), nil + }, + GetCodeFunc: func(addr gethCommon.Address) ([]byte, error) { + switch addr { + case addr1: + return addr1InitCode, nil + default: + return nil, fmt.Errorf("some error") + } + }, + GetCodeSizeFunc: func(addr gethCommon.Address) (int, error) { + switch addr { + case addr1: + return len(addr1InitCode), nil + default: + return 0, fmt.Errorf("some error") + } + }, + GetCodeHashFunc: func(addr gethCommon.Address) (gethCommon.Hash, error) { + switch addr { + case addr1: + return addr1IntiCodeHash, nil + default: + return gethCommon.Hash{}, fmt.Errorf("some error") + } + }, + }) + + // get code through parent + code, err := view.GetCode(addr1) + require.NoError(t, err) + require.Equal(t, addr1InitCode, code) + + // get code size through parent + codeSize, err := view.GetCodeSize(addr1) + require.NoError(t, err) + require.Equal(t, len(addr1InitCode), codeSize) + + // get code hash through parent + codeHash, err := view.GetCodeHash(addr1) + require.NoError(t, err) + require.Equal(t, addr1IntiCodeHash, codeHash) + + // set code for addr1 + newCode := []byte("new code") + err = view.SetCode(addr1, newCode) + require.NoError(t, err) + + code, err = view.GetCode(addr1) + require.NoError(t, err) + require.Equal(t, newCode, code) + + codeSize, err = view.GetCodeSize(addr1) + require.NoError(t, err) + require.Equal(t, len(newCode), codeSize) + + codeHash, err = view.GetCodeHash(addr1) + require.NoError(t, err) + require.Equal(t, gethCrypto.Keccak256Hash(code), codeHash) + + // handling error on the parent + _, err = view.GetCode(addr2) + require.Error(t, err) + + // create a new account at addr2 + err = view.CreateAccount(addr2) + require.NoError(t, err) + + // now the code should return empty code + code, err = view.GetCode(addr2) + require.NoError(t, err) + require.Len(t, code, 0) + + codeHash, err = view.GetCodeHash(addr2) + require.NoError(t, err) + require.Equal(t, gethTypes.EmptyCodeHash, codeHash) + }) + + t.Run("test state access functionality", func(t *testing.T) { + slot1 := types.SlotAddress{ + Address: testutils.RandomCommonAddress(t), + Key: gethCommon.BytesToHash([]byte{1, 2}), + } + + slot1InitValue := gethCommon.BytesToHash([]byte{3, 4}) + + slot2 := types.SlotAddress{ + Address: testutils.RandomCommonAddress(t), + Key: gethCommon.BytesToHash([]byte{5, 6}), + } + + view := state.NewDeltaView( + &MockedReadOnlyView{ + // we need get refund for parent + GetRefundFunc: emptyRefund, + + GetStateFunc: func(slot types.SlotAddress) (gethCommon.Hash, error) { + switch slot { + case slot1: + return slot1InitValue, nil + default: + return gethCommon.Hash{}, fmt.Errorf("some error") + } + }, + }) + + // get state through parent + value, err := view.GetState(slot1) + require.NoError(t, err) + require.Equal(t, slot1InitValue, value) + + // handle error from parent + _, err = view.GetState(slot2) + require.Error(t, err) + + // check dirty slots + dirtySlots := view.DirtySlots() + require.Empty(t, dirtySlots) + + // set slot1 with some new value + newValue := gethCommon.BytesToHash([]byte{9, 8}) + prevValue, err := view.SetState(slot1, newValue) + require.NoError(t, err) + require.Equal(t, value, prevValue) + + value, err = view.GetState(slot1) + require.NoError(t, err) + require.Equal(t, newValue, value) + + // check dirty slots + dirtySlots = view.DirtySlots() + require.Len(t, dirtySlots, 1) + + _, found := dirtySlots[slot1] + require.True(t, found) + }) + + t.Run("test transient state access functionality", func(t *testing.T) { + slot1 := types.SlotAddress{ + Address: testutils.RandomCommonAddress(t), + Key: gethCommon.BytesToHash([]byte{1, 2}), + } + + slot1InitValue := gethCommon.BytesToHash([]byte{3, 4}) + + view := state.NewDeltaView( + &MockedReadOnlyView{ + // we need get refund for parent + GetRefundFunc: emptyRefund, + GetTransientStateFunc: func(slot types.SlotAddress) gethCommon.Hash { + switch slot { + case slot1: + return slot1InitValue + default: + return gethCommon.Hash{} + } + }, + }) + + // get state through parent + value := view.GetTransientState(slot1) + require.Equal(t, slot1InitValue, value) + + // set slot1 with some new value + newValue := gethCommon.BytesToHash([]byte{9, 8}) + view.SetTransientState(slot1, newValue) + + value = view.GetTransientState(slot1) + require.Equal(t, newValue, value) + }) + + t.Run("test refund functionality", func(t *testing.T) { + initRefund := uint64(10) + view := state.NewDeltaView( + &MockedReadOnlyView{ + GetRefundFunc: func() uint64 { + return initRefund + }, + }) + + // get refund through parent + value := view.GetRefund() + require.Equal(t, initRefund, value) + + // add refund + addition := uint64(7) + err := view.AddRefund(addition) + require.NoError(t, err) + require.Equal(t, initRefund+addition, view.GetRefund()) + + // sub refund + subtract := uint64(2) + err = view.SubRefund(subtract) + require.NoError(t, err) + require.Equal(t, initRefund+addition-subtract, view.GetRefund()) + + // refund goes negative + err = view.SubRefund(1000) + require.Error(t, err) + }) + + t.Run("test access list functionality", func(t *testing.T) { + addr1 := testutils.RandomCommonAddress(t) + addr2 := testutils.RandomCommonAddress(t) + slot1 := types.SlotAddress{ + Address: testutils.RandomCommonAddress(t), + Key: gethCommon.BytesToHash([]byte{1, 2}), + } + + slot2 := types.SlotAddress{ + Address: testutils.RandomCommonAddress(t), + Key: gethCommon.BytesToHash([]byte{3, 4}), + } + + view := state.NewDeltaView( + &MockedReadOnlyView{ + GetRefundFunc: emptyRefund, + AddressInAccessListFunc: func(addr gethCommon.Address) bool { + switch addr { + case addr1: + return true + default: + return false + } + }, + SlotInAccessListFunc: func(slot types.SlotAddress) (addressOk bool, slotOk bool) { + switch slot { + case slot1: + return false, true + default: + return false, false + } + }, + }) + + // check address through parent + require.False(t, view.AddressInAccessList(addr1)) + + // add addr 2 to the list + require.False(t, view.AddressInAccessList(addr2)) + added := view.AddAddressToAccessList(addr2) + require.True(t, added) + require.True(t, view.AddressInAccessList(addr2)) + + // adding again + added = view.AddAddressToAccessList(addr2) + require.False(t, added) + + // check slot through parent + addrFound, slotFound := view.SlotInAccessList(slot1) + require.False(t, addrFound) + require.False(t, slotFound) + + // add slot 2 to the list + addrFound, slotFound = view.SlotInAccessList(slot2) + require.False(t, addrFound) + require.False(t, slotFound) + + addressAdded, slotAdded := view.AddSlotToAccessList(slot2) + require.True(t, addressAdded) + require.True(t, slotAdded) + + addrFound, slotFound = view.SlotInAccessList(slot2) + require.True(t, addrFound) + require.True(t, slotFound) + + // adding again + addressAdded, slotAdded = view.AddSlotToAccessList(slot2) + require.False(t, addressAdded) + require.False(t, slotAdded) + }) + + t.Run("test log functionality", func(t *testing.T) { + view := state.NewDeltaView( + &MockedReadOnlyView{ + GetRefundFunc: emptyRefund, + }) + + logs := view.Logs() + require.Empty(t, logs) + + log1 := &gethTypes.Log{ + Address: testutils.RandomCommonAddress(t), + } + view.AddLog(log1) + + log2 := &gethTypes.Log{ + Address: testutils.RandomCommonAddress(t), + } + view.AddLog(log2) + + logs = view.Logs() + require.Equal(t, []*gethTypes.Log{log1, log2}, logs) + }) + + t.Run("test preimage functionality", func(t *testing.T) { + view := state.NewDeltaView( + &MockedReadOnlyView{ + GetRefundFunc: emptyRefund, + }) + + preimages := view.Preimages() + require.Empty(t, preimages) + + preimage1 := []byte{1, 2} + hash1 := gethCommon.BytesToHash([]byte{2, 3}) + view.AddPreimage(hash1, preimage1) + + preimage2 := []byte{4, 5} + hash2 := gethCommon.BytesToHash([]byte{6, 7}) + view.AddPreimage(hash2, preimage2) + + expected := make(map[gethCommon.Hash][]byte) + expected[hash1] = preimage1 + expected[hash2] = preimage2 + + preimages = view.Preimages() + require.Equal(t, expected, preimages) + }) + + t.Run("test dirty addresses functionality", func(t *testing.T) { + addrCount := 6 + addresses := make([]gethCommon.Address, addrCount) + for i := 0; i < addrCount; i++ { + addresses[i] = testutils.RandomCommonAddress(t) + } + + view := state.NewDeltaView( + &MockedReadOnlyView{ + // we need get refund for parent + GetRefundFunc: emptyRefund, + ExistFunc: func(addr gethCommon.Address) (bool, error) { + return true, nil + }, + GetBalanceFunc: func(addr gethCommon.Address) (*uint256.Int, error) { + return uint256.NewInt(10), nil + }, + GetNonceFunc: func(addr gethCommon.Address) (uint64, error) { + return 0, nil + }, + IsCreatedFunc: func(a gethCommon.Address) bool { + return false + }, + HasSelfDestructedFunc: func(gethCommon.Address) (bool, *uint256.Int) { + return false, new(uint256.Int) + }, + }) + + // check dirty addresses + dirtyAddresses := view.DirtyAddresses() + require.Empty(t, dirtyAddresses) + + // create a account at address 1 + err := view.CreateAccount(addresses[0]) + require.NoError(t, err) + + // self destruct address 2 + err = view.SelfDestruct(addresses[1]) + require.NoError(t, err) + + // add balance for address 3 + err = view.AddBalance(addresses[2], uint256.NewInt(5)) + require.NoError(t, err) + + // sub balance for address 4 + err = view.AddBalance(addresses[3], uint256.NewInt(5)) + require.NoError(t, err) + + // set nonce for address 5 + err = view.SetNonce(addresses[4], 5) + require.NoError(t, err) + + // set code for address 6 + err = view.SetCode(addresses[5], []byte{1, 2}) + require.NoError(t, err) + + // now check dirty addresses + dirtyAddresses = view.DirtyAddresses() + require.Len(t, dirtyAddresses, addrCount) + for _, addr := range addresses { + _, found := dirtyAddresses[addr] + require.True(t, found) + } + }) + + t.Run("test account creation after selfdestruct call", func(t *testing.T) { + addr1 := testutils.RandomCommonAddress(t) + + view := state.NewDeltaView( + &MockedReadOnlyView{ + // we need get refund for parent + GetRefundFunc: emptyRefund, + ExistFunc: func(addr gethCommon.Address) (bool, error) { + return true, nil + }, + HasSelfDestructedFunc: func(gethCommon.Address) (bool, *uint256.Int) { + return true, uint256.NewInt(2) + }, + IsCreatedFunc: func(a gethCommon.Address) bool { + return false + }, + GetBalanceFunc: func(addr gethCommon.Address) (*uint256.Int, error) { + return new(uint256.Int), nil + }, + GetStateFunc: func(sa types.SlotAddress) (gethCommon.Hash, error) { + return gethCommon.Hash{}, nil + }, + }) + + found, err := view.Exist(addr1) + require.NoError(t, err) + require.True(t, found) + + // set balance + initBalance := uint256.NewInt(10) + err = view.AddBalance(addr1, initBalance) + require.NoError(t, err) + + bal, err := view.GetBalance(addr1) + require.NoError(t, err) + require.Equal(t, initBalance, bal) + + // set code + code := []byte{1, 2, 3} + err = view.SetCode(addr1, code) + require.NoError(t, err) + + ret, err := view.GetCode(addr1) + require.NoError(t, err) + require.Equal(t, code, ret) + + // set key values + key := testutils.RandomCommonHash(t) + value := testutils.RandomCommonHash(t) + sk := types.SlotAddress{Address: addr1, Key: key} + + stateValue, err := view.GetState(sk) + require.NoError(t, err) + + prevValue, err := view.SetState(sk, value) + require.NoError(t, err) + require.Equal(t, stateValue, prevValue) + + vret, err := view.GetState(sk) + require.NoError(t, err) + require.Equal(t, value, vret) + + err = view.SelfDestruct(addr1) + require.NoError(t, err) + + // balance should be returned zero + bal, err = view.GetBalance(addr1) + require.NoError(t, err) + require.Equal(t, new(uint256.Int), bal) + + // get code should still work + ret, err = view.GetCode(addr1) + require.NoError(t, err) + require.Equal(t, code, ret) + + // get state should also still work + vret, err = view.GetState(sk) + require.NoError(t, err) + require.Equal(t, value, vret) + + // now re-create account + err = view.CreateAccount(addr1) + require.NoError(t, err) + + // it should carry over the balance + bal, err = view.GetBalance(addr1) + require.NoError(t, err) + require.Equal(t, initBalance, bal) + + ret, err = view.GetCode(addr1) + require.NoError(t, err) + require.Len(t, ret, 0) + + vret, err = view.GetState(sk) + require.NoError(t, err) + emptyValue := gethCommon.Hash{} + require.Equal(t, emptyValue, vret) + }) +} + +type MockedReadOnlyView struct { + ExistFunc func(gethCommon.Address) (bool, error) + HasSelfDestructedFunc func(gethCommon.Address) (bool, *uint256.Int) + IsCreatedFunc func(gethCommon.Address) bool + IsNewContractFunc func(gethCommon.Address) bool + GetBalanceFunc func(gethCommon.Address) (*uint256.Int, error) + GetNonceFunc func(gethCommon.Address) (uint64, error) + GetCodeFunc func(gethCommon.Address) ([]byte, error) + GetCodeHashFunc func(gethCommon.Address) (gethCommon.Hash, error) + GetCodeSizeFunc func(gethCommon.Address) (int, error) + GetStateFunc func(types.SlotAddress) (gethCommon.Hash, error) + GetStorageRootFunc func(gethCommon.Address) (gethCommon.Hash, error) + GetTransientStateFunc func(types.SlotAddress) gethCommon.Hash + GetRefundFunc func() uint64 + AddressInAccessListFunc func(gethCommon.Address) bool + SlotInAccessListFunc func(types.SlotAddress) (addressOk bool, slotOk bool) +} + +var _ types.ReadOnlyView = &MockedReadOnlyView{} + +func (v *MockedReadOnlyView) Exist(addr gethCommon.Address) (bool, error) { + if v.ExistFunc == nil { + panic("Exist is not set in this mocked view") + } + return v.ExistFunc(addr) +} + +func (v *MockedReadOnlyView) IsCreated(addr gethCommon.Address) bool { + if v.IsCreatedFunc == nil { + panic("IsCreated is not set in this mocked view") + } + return v.IsCreatedFunc(addr) +} + +func (v *MockedReadOnlyView) IsNewContract(addr gethCommon.Address) bool { + if v.IsNewContractFunc == nil { + panic("IsNewContract is not set in this mocked view") + } + return v.IsNewContractFunc(addr) +} + +func (v *MockedReadOnlyView) HasSelfDestructed(addr gethCommon.Address) (bool, *uint256.Int) { + if v.HasSelfDestructedFunc == nil { + panic("HasSelfDestructed is not set in this mocked view") + } + return v.HasSelfDestructedFunc(addr) +} + +func (v *MockedReadOnlyView) GetBalance(addr gethCommon.Address) (*uint256.Int, error) { + if v.GetBalanceFunc == nil { + panic("GetBalance is not set in this mocked view") + } + return v.GetBalanceFunc(addr) +} + +func (v *MockedReadOnlyView) GetNonce(addr gethCommon.Address) (uint64, error) { + if v.GetNonceFunc == nil { + panic("GetNonce is not set in this mocked view") + } + return v.GetNonceFunc(addr) +} + +func (v *MockedReadOnlyView) GetCode(addr gethCommon.Address) ([]byte, error) { + if v.GetCodeFunc == nil { + panic("GetCode is not set in this mocked view") + } + return v.GetCodeFunc(addr) +} + +func (v *MockedReadOnlyView) GetCodeHash(addr gethCommon.Address) (gethCommon.Hash, error) { + if v.GetCodeHashFunc == nil { + panic("GetCodeHash is not set in this mocked view") + } + return v.GetCodeHashFunc(addr) +} + +func (v *MockedReadOnlyView) GetCodeSize(addr gethCommon.Address) (int, error) { + if v.GetCodeSizeFunc == nil { + panic("GetCodeSize is not set in this mocked view") + } + return v.GetCodeSizeFunc(addr) +} + +func (v *MockedReadOnlyView) GetState(slot types.SlotAddress) (gethCommon.Hash, error) { + if v.GetStateFunc == nil { + panic("GetState is not set in this mocked view") + } + return v.GetStateFunc(slot) +} + +func (v *MockedReadOnlyView) GetStorageRoot(addr gethCommon.Address) (gethCommon.Hash, error) { + if v.GetStorageRootFunc == nil { + panic("GetStorageRoot is not set in this mocked view") + } + return v.GetStorageRootFunc(addr) +} + +func (v *MockedReadOnlyView) GetTransientState(slot types.SlotAddress) gethCommon.Hash { + if v.GetTransientStateFunc == nil { + panic("GetTransientState is not set in this mocked view") + } + return v.GetTransientStateFunc(slot) +} + +func (v *MockedReadOnlyView) GetRefund() uint64 { + if v.GetRefundFunc == nil { + panic("GetRefund is not set in this mocked view") + } + return v.GetRefundFunc() +} + +func (v *MockedReadOnlyView) AddressInAccessList(addr gethCommon.Address) bool { + if v.AddressInAccessListFunc == nil { + panic("AddressInAccessList is not set in this mocked view") + } + return v.AddressInAccessListFunc(addr) +} + +func (v *MockedReadOnlyView) SlotInAccessList(slot types.SlotAddress) (addressOk bool, slotOk bool) { + if v.SlotInAccessListFunc == nil { + panic("SlotInAccessList is not set in this mocked view") + } + return v.SlotInAccessListFunc(slot) +} diff --git a/fvm/evm/emulator/state/diff.go b/fvm/evm/emulator/state/diff.go new file mode 100644 index 00000000000..bae539bd5db --- /dev/null +++ b/fvm/evm/emulator/state/diff.go @@ -0,0 +1,91 @@ +package state + +import ( + "bytes" + "fmt" +) + +func AccountEqual(a, b *Account) bool { + if a.Address != b.Address { + return false + } + if !bytes.Equal(a.Balance.Bytes(), b.Balance.Bytes()) { + return false + } + if a.Nonce != b.Nonce { + return false + } + if a.CodeHash != b.CodeHash { + return false + } + + // CollectionID could be different + return true +} + +// find the difference and return as error +func Diff(a *EVMState, b *EVMState) []error { + var differences []error + + // Compare Accounts + for addr, accA := range a.Accounts { + if accB, exists := b.Accounts[addr]; exists { + if !AccountEqual(accA, accB) { + differences = append(differences, fmt.Errorf("account %s differs, accA %v, accB %v", addr.Hex(), accA, accB)) + } + } else { + differences = append(differences, fmt.Errorf("account %s exists in a but not in b", addr.Hex())) + } + } + for addr := range b.Accounts { + if _, exists := a.Accounts[addr]; !exists { + differences = append(differences, fmt.Errorf("account %s exists in b but not in a", addr.Hex())) + } + } + + // Compare Slots + for addr, slotsA := range a.Slots { + slotsB, exists := b.Slots[addr] + if !exists { + differences = append(differences, fmt.Errorf("slots for address %s exist in a but not in b", addr.Hex())) + continue + } + for key, valueA := range slotsA { + if valueB, exists := slotsB[key]; exists { + if valueA.Value != valueB.Value { + differences = append(differences, fmt.Errorf("slot value for address %s and key %s differs", addr.Hex(), key.Hex())) + } + } else { + differences = append(differences, fmt.Errorf("slot with key %s for address %s exists in a but not in b", key.Hex(), addr.Hex())) + } + } + for key := range slotsB { + if _, exists := slotsA[key]; !exists { + differences = append(differences, fmt.Errorf("slot with key %s for address %s exists in b but not in a", key.Hex(), addr.Hex())) + } + } + } + for addr := range b.Slots { + if _, exists := a.Slots[addr]; !exists { + differences = append(differences, fmt.Errorf("slots for address %s exist in b but not in a", addr.Hex())) + } + } + + // Compare Codes + for hash, codeA := range a.Codes { + if codeB, exists := b.Codes[hash]; exists { + if !bytes.Equal(codeA.Code, codeB.Code) { + differences = append(differences, fmt.Errorf("code for hash %s differs", hash.Hex())) + } + } else { + differences = append(differences, fmt.Errorf("code with hash %s exists in a but not in b", hash.Hex())) + } + } + for hash := range b.Codes { + if _, exists := a.Codes[hash]; !exists { + differences = append(differences, fmt.Errorf("code with hash %s exists in b but not in a", hash.Hex())) + } + } + + return differences +} diff --git a/fvm/evm/emulator/state/diff_test.go b/fvm/evm/emulator/state/diff_test.go new file mode 100644 index 00000000000..4abb6868795 --- /dev/null +++ b/fvm/evm/emulator/state/diff_test.go @@ -0,0 +1,74 @@ +package state_test + +import ( + "fmt" + "path/filepath" + "testing" + + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/fvm/evm" + "github.com/onflow/flow-go/fvm/evm/emulator/state" + "github.com/onflow/flow-go/fvm/evm/testutils" + "github.com/onflow/flow-go/model/flow" +) + +func StateDiff(t *testing.T) { + offchainState, err := state.ImportEVMStateFromGob("/var/flow2/evm-state-from-gobs-218215348/") + require.NoError(t, err) + + enState, err := state.ImportEVMStateFromGob("/var/flow2/evm-state-from-gobs-218215348/") + require.NoError(t, err) + + differences := state.Diff(enState, offchainState) + + require.Len(t, differences, 0) +} + +func EVMStateDiff(t *testing.T) { + + state1 := EVMStateFromReplayGobDir(t, "/var/flow2/evm-state-from-gobs-218215348/", uint64(218215348)) + // state2 := EVMStateFromReplayGobDir(t, "/var/flow2/evm-state-from-gobs-218215348/", uint64(218215348)) + state2 := EVMStateFromCheckpointExtract(t, "/var/flow2/evm-state-from-checkpoint-218215348/") + + differences := state.Diff(state1, state2) + + for i, diff := range differences { + fmt.Printf("Difference %d: %v\n", i, diff) + } + + require.Len(t, differences, 0) +} + +func EVMStateFromCheckpointExtract(t *testing.T, dir string) *state.EVMState { + enState, err := state.ImportEVMStateFromGob("/var/flow2/evm-state-from-gobs-218215348/") + require.NoError(t, err) + return enState +} + +func EVMStateFromReplayGobDir(t *testing.T, gobDir string, flowHeight uint64) *state.EVMState { + valueFileName, allocatorFileName := evmStateGobFileNamesByEndHeight(gobDir, flowHeight) + chainID := flow.Testnet + + allocatorGobs, err := testutils.DeserializeAllocator(allocatorFileName) + require.NoError(t, err) + + storageRoot := evm.StorageAccountAddress(chainID) + valuesGob, err := testutils.DeserializeState(valueFileName) + require.NoError(t, err) + + store := testutils.GetSimpleValueStorePopulated(valuesGob, allocatorGobs) + + bv, err := state.NewBaseView(store, storageRoot) + require.NoError(t, err) + + evmState, err := state.Extract(storageRoot, bv) + require.NoError(t, err) + return evmState +} + +func evmStateGobFileNamesByEndHeight(evmStateGobDir string, endHeight uint64) (string, string) { + valueFileName := filepath.Join(evmStateGobDir, fmt.Sprintf("values-%d.gob", endHeight)) + allocatorFileName := filepath.Join(evmStateGobDir, fmt.Sprintf("allocators-%d.gob", endHeight)) + return valueFileName, allocatorFileName +} diff --git a/fvm/evm/emulator/state/exporter.go b/fvm/evm/emulator/state/exporter.go new file mode 100644 index 00000000000..32005fa53a9 --- /dev/null +++ b/fvm/evm/emulator/state/exporter.go @@ -0,0 +1,209 @@ +package state + +import ( + "encoding/gob" + "fmt" + "io" + "os" + "path/filepath" + + gethCommon "github.com/ethereum/go-ethereum/common" + "github.com/onflow/atree" + + "github.com/onflow/flow-go/fvm/evm/types" + "github.com/onflow/flow-go/model/flow" +) + +const ( + ExportedAccountsFileName = "accounts.bin" + ExportedCodesFileName = "codes.bin" + ExportedSlotsFileName = "slots.bin" + ExportedStateGobFileName = "state.gob" +) + +type Exporter struct { + ledger atree.Ledger + root flow.Address + baseView *BaseView +} + +// NewExporter constructs a new Exporter +func NewExporter(ledger atree.Ledger, root flow.Address) (*Exporter, error) { + bv, err := NewBaseView(ledger, root) + if err != nil { + return nil, err + } + return &Exporter{ + ledger: ledger, + root: root, + baseView: bv, + }, nil +} + +func (e *Exporter) ExportGob(path string) error { + fileName := filepath.Join(path, ExportedStateGobFileName) + // Open the file for reading + file, err := os.Create(fileName) + if err != nil { + return err + } + defer file.Close() + + state, err := Extract(e.root, e.baseView) + if err != nil { + return err + } + + // Use gob to encode data + encoder := gob.NewEncoder(file) + err = encoder.Encode(state) + if err != nil { + return err + } + + return nil +} + +func (e *Exporter) Export(path string) error { + af, err := os.Create(filepath.Join(path, ExportedAccountsFileName)) + if err != nil { + return err + } + defer af.Close() + + addrWithStorage, err := e.exportAccounts(af) + if err != nil { + return err + } + + cf, err := os.Create(filepath.Join(path, ExportedCodesFileName)) + if err != nil { + return err + } + defer cf.Close() + + err = e.exportCodes(cf) + if err != nil { + return err + } + + sf, err := os.Create(filepath.Join(path, ExportedSlotsFileName)) + if err != nil { + return err + } + defer cf.Close() + + err = e.exportSlots(addrWithStorage, sf) + if err != nil { + return err + } + return nil +} + +// exports accounts and returns a list of addresses with non-empty storage +func (e *Exporter) exportAccounts(writer io.Writer) ([]gethCommon.Address, error) { + itr, err := e.baseView.AccountIterator() + if err != nil { + return nil, err + } + // make a list of accounts with storage + addrWithSlots := make([]gethCommon.Address, 0) + for { + // TODO: we can optimize by returning the encoded value + acc, err := itr.Next() + if err != nil { + return nil, err + } + if acc == nil { + break + } + if acc.HasStoredValues() { + addrWithSlots = append(addrWithSlots, acc.Address) + } + encoded, err := acc.Encode() + if err != nil { + return nil, err + } + + _, err = DecodeAccount(encoded) + if err != nil { + return nil, fmt.Errorf("account can not be decoded: %w", err) + } + + // write every account on a new line + _, err = writer.Write(append(encoded, byte('\n'))) + if err != nil { + return nil, err + } + } + return addrWithSlots, nil +} + +// exportCodes exports codes +func (e *Exporter) exportCodes(writer io.Writer) error { + itr, err := e.baseView.CodeIterator() + if err != nil { + return err + } + for { + cic, err := itr.Next() + if err != nil { + return err + } + if cic == nil { + break + } + encoded, err := cic.Encode() + if err != nil { + return err + } + + _, err = CodeInContextFromEncoded(encoded) + if err != nil { + return fmt.Errorf("error decoding code in context: %w", err) + } + + // write every codes on a new line + _, err = writer.Write(append(encoded, byte('\n'))) + if err != nil { + return err + } + } + return nil +} + +// exportSlots exports slots (key value pairs stored under accounts) +func (e *Exporter) exportSlots(addresses []gethCommon.Address, writer io.Writer) error { + for _, addr := range addresses { + itr, err := e.baseView.AccountStorageIterator(addr) + if err != nil { + return err + } + for { + slot, err := itr.Next() + if err != nil { + return err + } + if slot == nil { + break + } + encoded, err := slot.Encode() + if err != nil { + return err + } + + _, err = types.SlotEntryFromEncoded(encoded) + if err != nil { + return fmt.Errorf("error decoding slot entry: %w", err) + } + + // write every codes on a new line + _, err = writer.Write(append(encoded, byte('\n'))) + if err != nil { + return err + } + } + } + + return nil +} diff --git a/fvm/evm/emulator/state/extract.go b/fvm/evm/emulator/state/extract.go new file mode 100644 index 00000000000..925c713ef6d --- /dev/null +++ b/fvm/evm/emulator/state/extract.go @@ -0,0 +1,82 @@ +package state + +import ( + gethCommon "github.com/ethereum/go-ethereum/common" + + "github.com/onflow/flow-go/fvm/evm/types" + "github.com/onflow/flow-go/model/flow" +) + +func Extract( + root flow.Address, + baseView *BaseView, +) (*EVMState, error) { + + accounts := make(map[gethCommon.Address]*Account, 0) + + itr, err := baseView.AccountIterator() + + if err != nil { + return nil, err + } + // make a list of accounts with storage + addrWithSlots := make([]gethCommon.Address, 0) + for { + // TODO: we can optimize by returning the encoded value + acc, err := itr.Next() + if err != nil { + return nil, err + } + if acc == nil { + break + } + if acc.HasStoredValues() { + addrWithSlots = append(addrWithSlots, acc.Address) + } + accounts[acc.Address] = acc + } + + codes := make(map[gethCommon.Hash]*CodeInContext, 0) + codeItr, err := baseView.CodeIterator() + if err != nil { + return nil, err + } + for { + cic, err := codeItr.Next() + if err != nil { + return nil, err + } + if cic == nil { + break + } + codes[cic.Hash] = cic + } + + // account address -> key -> value + slots := make(map[gethCommon.Address]map[gethCommon.Hash]*types.SlotEntry) + + for _, addr := range addrWithSlots { + slots[addr] = make(map[gethCommon.Hash]*types.SlotEntry) + slotItr, err := baseView.AccountStorageIterator(addr) + if err != nil { + return nil, err + } + for { + slot, err := slotItr.Next() + if err != nil { + return nil, err + } + if slot == nil { + break + } + + slots[addr][slot.Key] = slot + } + } + + return &EVMState{ + Accounts: accounts, + Codes: codes, + Slots: slots, + }, nil +} diff --git a/fvm/evm/emulator/state/importer.go b/fvm/evm/emulator/state/importer.go new file mode 100644 index 00000000000..5eae814086d --- /dev/null +++ b/fvm/evm/emulator/state/importer.go @@ -0,0 +1,137 @@ +package state + +import ( + "encoding/gob" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "strings" + + gethCommon "github.com/ethereum/go-ethereum/common" + + "github.com/onflow/flow-go/fvm/evm/types" +) + +type EVMState struct { + Accounts map[gethCommon.Address]*Account + Codes map[gethCommon.Hash]*CodeInContext + // account address -> key -> value + Slots map[gethCommon.Address]map[gethCommon.Hash]*types.SlotEntry +} + +func ToEVMState( + accounts map[gethCommon.Address]*Account, + codes []*CodeInContext, + slots []*types.SlotEntry, +) (*EVMState, error) { + state := &EVMState{ + Accounts: accounts, + Codes: make(map[gethCommon.Hash]*CodeInContext), + Slots: make(map[gethCommon.Address]map[gethCommon.Hash]*types.SlotEntry), + } + + // Process codes + for _, code := range codes { + if _, ok := state.Codes[code.Hash]; ok { + return nil, fmt.Errorf("duplicate code hash: %s", code.Hash) + } + state.Codes[code.Hash] = code + } + + // Process slots + for _, slot := range slots { + if _, ok := state.Slots[slot.Address]; !ok { + state.Slots[slot.Address] = make(map[gethCommon.Hash]*types.SlotEntry) + } + + if _, ok := state.Slots[slot.Address][slot.Key]; ok { + return nil, fmt.Errorf("duplicate slot key: %s", slot.Key) + } + + state.Slots[slot.Address][slot.Key] = slot + } + + return state, nil +} + +func ImportEVMStateFromGob(path string) (*EVMState, error) { + fileName := filepath.Join(path, ExportedStateGobFileName) + // Open the file for reading + file, err := os.Open(fileName) + if err != nil { + return nil, err + } + defer file.Close() + + // Prepare the map to store decoded data + var data EVMState + + // Use gob to decode data + decoder := gob.NewDecoder(file) + err = decoder.Decode(&data) + if err != nil { + return nil, err + } + + return &data, nil +} + +func ImportEVMState(path string) (*EVMState, error) { + accounts := make(map[gethCommon.Address]*Account) + var codes []*CodeInContext + var slots []*types.SlotEntry + // Import codes + codesData, err := ioutil.ReadFile(filepath.Join(path, ExportedCodesFileName)) + if err != nil { + return nil, fmt.Errorf("error opening codes file: %w", err) + } + codesLines := strings.Split(string(codesData), "\n") + for _, line := range codesLines { + if line == "" { + continue + } + code, err := CodeInContextFromEncoded([]byte(line)) + if err != nil { + return nil, fmt.Errorf("error decoding code in context: %w", err) + } + codes = append(codes, code) + } + + // Import slots + slotsData, err := ioutil.ReadFile(filepath.Join(path, ExportedSlotsFileName)) + if err != nil { + return nil, fmt.Errorf("error opening slots file: %w", err) + } + slotsLines := strings.Split(string(slotsData), "\n") + for _, line := range slotsLines { + if line == "" { + continue + } + slot, err := types.SlotEntryFromEncoded([]byte(line)) + if err != nil { + return nil, fmt.Errorf("error decoding slot entry: %w", err) + } + slots = append(slots, slot) + } + + // Import accounts + accountsData, err := ioutil.ReadFile(filepath.Join(path, ExportedAccountsFileName)) + if err != nil { + return nil, fmt.Errorf("error opening accounts file: %w", err) + } + accountsLines := strings.Split(string(accountsData), "\n") + for _, line := range accountsLines { + if line == "" { + continue + } + acc, err := DecodeAccount([]byte(line)) + if err != nil { + fmt.Println("error decoding account: ", err, line) + } else { + fmt.Println("decoded account", acc.Address) + accounts[acc.Address] = acc + } + } + return ToEVMState(accounts, codes, slots) +} diff --git a/fvm/evm/emulator/state/stateDB.go b/fvm/evm/emulator/state/stateDB.go new file mode 100644 index 00000000000..b0429ad7b7c --- /dev/null +++ b/fvm/evm/emulator/state/stateDB.go @@ -0,0 +1,680 @@ +package state + +import ( + "bytes" + stdErrors "errors" + "fmt" + "sort" + + gethCommon "github.com/ethereum/go-ethereum/common" + gethState "github.com/ethereum/go-ethereum/core/state" + gethStateless "github.com/ethereum/go-ethereum/core/stateless" + gethTracing "github.com/ethereum/go-ethereum/core/tracing" + gethTypes "github.com/ethereum/go-ethereum/core/types" + gethParams "github.com/ethereum/go-ethereum/params" + gethUtils "github.com/ethereum/go-ethereum/trie/utils" + "github.com/holiman/uint256" + "github.com/onflow/atree" + "github.com/onflow/crypto/hash" + + "github.com/onflow/flow-go/fvm/evm/types" + "github.com/onflow/flow-go/model/flow" +) + +// StateDB implements a types.StateDB interface +// +// stateDB interface defined by the Geth doesn't support returning errors +// when state calls are happening, and requires stateDB to cache the error +// and return it at a later time (when commit is called). Only the first error +// is expected to be returned. +// Warning: current implementation of the StateDB is considered +// to be used for a single EVM transaction execution and is not +// thread safe. yet the current design supports addition of concurrency in the +// future if needed +type StateDB struct { + ledger atree.Ledger + root flow.Address + baseView types.BaseView + views []*DeltaView + cachedError error +} + +var _ types.StateDB = &StateDB{} + +// NewStateDB constructs a new StateDB +func NewStateDB(ledger atree.Ledger, root flow.Address) (*StateDB, error) { + bv, err := NewBaseView(ledger, root) + if err != nil { + return nil, err + } + return &StateDB{ + ledger: ledger, + root: root, + baseView: bv, + views: []*DeltaView{NewDeltaView(bv)}, + cachedError: nil, + }, nil +} + +// Exist returns true if the given address exists in state. +// +// this should also return true for self destructed accounts during the transaction execution. +func (db *StateDB) Exist(addr gethCommon.Address) bool { + exist, err := db.latestView().Exist(addr) + db.handleError(err) + return exist +} + +// Empty returns whether the given account is empty. +// +// Empty is defined according to EIP161 (balance = nonce = code = 0). +func (db *StateDB) Empty(addr gethCommon.Address) bool { + if !db.Exist(addr) { + return true + } + return db.GetNonce(addr) == 0 && + db.GetBalance(addr).Sign() == 0 && + bytes.Equal(db.GetCodeHash(addr).Bytes(), gethTypes.EmptyCodeHash.Bytes()) +} + +// CreateAccount creates a new account for the given address +// it sets the nonce to zero +func (db *StateDB) CreateAccount(addr gethCommon.Address) { + err := db.latestView().CreateAccount(addr) + db.handleError(err) +} + +// IsCreated returns true if address is recently created (context of a transaction) +func (db *StateDB) IsCreated(addr gethCommon.Address) bool { + return db.latestView().IsCreated(addr) +} + +// CreateContract is used whenever a contract is created. This may be preceded +// by CreateAccount, but that is not required if it already existed in the +// state due to funds sent beforehand. +// This operation sets the 'newContract'-flag, which is required in order to +// correctly handle EIP-6780 'delete-in-same-transaction' logic. +func (db *StateDB) CreateContract(addr gethCommon.Address) { + db.latestView().CreateContract(addr) +} + +// IsCreated returns true if address is a new contract +func (db *StateDB) IsNewContract(addr gethCommon.Address) bool { + return db.latestView().IsNewContract(addr) +} + +// SelfDestruct flags the address for deletion and returns the previous balance. +// +// While this address exists for the rest of the transaction, +// the balance of this account is cleared after the SelfDestruct call. +func (db *StateDB) SelfDestruct(addr gethCommon.Address) uint256.Int { + db.handleError(fmt.Errorf("legacy self destruct is not supported")) + return uint256.Int{} +} + +// SelfDestruct6780 would only follow the self destruct steps if account is a new contract +// either just created, or address had balance before but got a contract deployed to it (in this tx). +// Returns the previous balance and a boolean value denoting whether the address was self destructed. +func (db *StateDB) SelfDestruct6780(addr gethCommon.Address) (uint256.Int, bool) { + balance, err := db.latestView().GetBalance(addr) + db.handleError(err) + + if db.IsNewContract(addr) { + err := db.latestView().SelfDestruct(addr) + db.handleError(err) + return *balance, true + } + + return *balance, false +} + +// HasSelfDestructed returns true if address is flagged with self destruct. +func (db *StateDB) HasSelfDestructed(addr gethCommon.Address) bool { + destructed, _ := db.latestView().HasSelfDestructed(addr) + return destructed +} + +// SubBalance substitutes the amount from the balance of the given address +// and returns the previous balance. +func (db *StateDB) SubBalance( + addr gethCommon.Address, + amount *uint256.Int, + reason gethTracing.BalanceChangeReason, +) uint256.Int { + // negative amounts are not accepted. + if amount.Sign() < 0 { + db.handleError(types.ErrInvalidBalance) + return uint256.Int{} + } + prevBalance, err := db.latestView().GetBalance(addr) + db.handleError(err) + + err = db.latestView().SubBalance(addr, amount) + db.handleError(err) + + return *prevBalance +} + +// AddBalance adds the amount to the balance of the given address +// and returns the previous balance. +func (db *StateDB) AddBalance( + addr gethCommon.Address, + amount *uint256.Int, + reason gethTracing.BalanceChangeReason, +) uint256.Int { + // negative amounts are not accepted. + if amount.Sign() < 0 { + db.handleError(types.ErrInvalidBalance) + return uint256.Int{} + } + prevBalance, err := db.latestView().GetBalance(addr) + db.handleError(err) + + err = db.latestView().AddBalance(addr, amount) + db.handleError(err) + + return *prevBalance +} + +// GetBalance returns the balance of the given address +func (db *StateDB) GetBalance(addr gethCommon.Address) *uint256.Int { + bal, err := db.latestView().GetBalance(addr) + db.handleError(err) + return bal +} + +// GetNonce returns the nonce of the given address +func (db *StateDB) GetNonce(addr gethCommon.Address) uint64 { + nonce, err := db.latestView().GetNonce(addr) + db.handleError(err) + return nonce +} + +// SetNonce sets the nonce value for the given address +func (db *StateDB) SetNonce( + addr gethCommon.Address, + nonce uint64, + reason gethTracing.NonceChangeReason, +) { + err := db.latestView().SetNonce(addr, nonce) + db.handleError(err) +} + +// GetCodeHash returns the code hash of the given address +func (db *StateDB) GetCodeHash(addr gethCommon.Address) gethCommon.Hash { + hash, err := db.latestView().GetCodeHash(addr) + db.handleError(err) + return hash +} + +// GetCode returns the code for the given address +func (db *StateDB) GetCode(addr gethCommon.Address) []byte { + code, err := db.latestView().GetCode(addr) + db.handleError(err) + return code +} + +// GetCodeSize returns the size of the code for the given address +func (db *StateDB) GetCodeSize(addr gethCommon.Address) int { + codeSize, err := db.latestView().GetCodeSize(addr) + db.handleError(err) + return codeSize +} + +// SetCode sets the code for the given address, and returns the +// previous code located at the given address, if any. +func (db *StateDB) SetCode(addr gethCommon.Address, code []byte) (prev []byte) { + prev = db.GetCode(addr) + err := db.latestView().SetCode(addr, code) + db.handleError(err) + + return prev +} + +// AddRefund adds the amount to the total (gas) refund +func (db *StateDB) AddRefund(amount uint64) { + err := db.latestView().AddRefund(amount) + db.handleError(err) +} + +// SubRefund subtracts the amount from the total (gas) refund +func (db *StateDB) SubRefund(amount uint64) { + err := db.latestView().SubRefund(amount) + db.handleError(err) +} + +// GetRefund returns the total (gas) refund +func (db *StateDB) GetRefund() uint64 { + return db.latestView().GetRefund() +} + +// GetCommittedState returns the value for the given storage slot considering only the committed state and not +// changes in the scope of current transaction. +func (db *StateDB) GetCommittedState(addr gethCommon.Address, key gethCommon.Hash) gethCommon.Hash { + value, err := db.baseView.GetState(types.SlotAddress{Address: addr, Key: key}) + db.handleError(err) + return value +} + +// GetStateAndCommittedState returns the current value and the original value. +func (db *StateDB) GetStateAndCommittedState( + addr gethCommon.Address, + key gethCommon.Hash, +) (gethCommon.Hash, gethCommon.Hash) { + origin := db.GetCommittedState(addr, key) + value := db.GetState(addr, key) + + return value, origin +} + +// GetState returns the value for the given storage slot +func (db *StateDB) GetState(addr gethCommon.Address, key gethCommon.Hash) gethCommon.Hash { + state, err := db.latestView().GetState(types.SlotAddress{Address: addr, Key: key}) + db.handleError(err) + return state +} + +// GetStorageRoot returns some sort of root for the given address. +// +// Warning! Since StateDB doesn't construct a Merkle tree under the hood, +// the behavior of this endpoint is as follow: +// - if an account doesn't exist it returns common.Hash{} +// - if account is EOA it returns gethCommon.EmptyRootHash +// - else it returns a unique hash value as the root but this returned +// +// This behavior is ok for this version of EVM as the only +// use case in the EVM right now is here +// https://github.com/ethereum/go-ethereum/blob/37590b2c5579c36d846c788c70861685b0ea240e/core/vm/evm.go#L480 +// where the value that is returned is compared to empty values to make sure the storage is empty +// This endpoint is added mostly to prevent the case that an smart contract is self-destructed +// and a later transaction tries to deploy a contract to the same address. +func (db *StateDB) GetStorageRoot(addr gethCommon.Address) gethCommon.Hash { + root, err := db.latestView().GetStorageRoot(addr) + db.handleError(err) + return root +} + +// SetState sets a value for the given storage slot. +// It returns the previous value in any case. +func (db *StateDB) SetState( + addr gethCommon.Address, + key gethCommon.Hash, + value gethCommon.Hash, +) gethCommon.Hash { + prevState, err := db.latestView().SetState(types.SlotAddress{Address: addr, Key: key}, value) + db.handleError(err) + + return prevState +} + +// GetTransientState returns the value for the given key of the transient storage +func (db *StateDB) GetTransientState(addr gethCommon.Address, key gethCommon.Hash) gethCommon.Hash { + return db.latestView().GetTransientState(types.SlotAddress{Address: addr, Key: key}) +} + +// SetTransientState sets a value for the given key of the transient storage +func (db *StateDB) SetTransientState(addr gethCommon.Address, key, value gethCommon.Hash) { + db.latestView().SetTransientState(types.SlotAddress{Address: addr, Key: key}, value) +} + +// AddressInAccessList checks if an address is in the access list +func (db *StateDB) AddressInAccessList(addr gethCommon.Address) bool { + // For each static call / call / delegate call, the EVM will create + // a snapshot, so that it can revert to it in case of execution errors, + // such as out of gas etc, using `Snapshot` & `RevertToSnapshot`. + // This can create a long list of views, in the order of 4K for certain + // large transactions. To avoid performance issues with DeltaView checking parents, + // which causes deep stacks and function call overhead, we use a plain for-loop instead. + // We iterate through the views in ascending order (from lowest to highest) as an optimization. + // Since addresses are typically added to the AccessList early during transaction execution, + // this allows us to return early when the needed addresses are found in the initial views. + end := len(db.views) + for i := range end { + view := db.views[i] + if view.AddressInAccessList(addr) { + return true + } + } + + return false +} + +// SlotInAccessList checks if the given (address,slot) is in the access list +func (db *StateDB) SlotInAccessList(addr gethCommon.Address, key gethCommon.Hash) (addressOk bool, slotOk bool) { + slotKey := types.SlotAddress{Address: addr, Key: key} + + // For each static call / call / delegate call, the EVM will create + // a snapshot, so that it can revert to it in case of execution errors, + // such as out of gas etc, using `Snapshot` & `RevertToSnapshot`. + // This can create a long list of views, in the order of 4K for certain + // large transactions. To avoid performance issues with DeltaView checking parents, + // which causes deep stacks and function call overhead, we use a plain for-loop instead. + // We iterate through the views in ascending order (from lowest to highest) as an optimization. + // Since slots are typically added to the AccessList early during transaction execution, + // this allows us to return early when the needed slots are found in the initial views. + addressFound := false + end := len(db.views) + for i := range end { + view := db.views[i] + addressFound, slotFound := view.SlotInAccessList(slotKey) + if slotFound { + return addressFound, true + } + } + + return addressFound, false +} + +// AddAddressToAccessList adds the given address to the access list. +func (db *StateDB) AddAddressToAccessList(addr gethCommon.Address) { + db.latestView().AddAddressToAccessList(addr) +} + +// AddSlotToAccessList adds the given (address,slot) to the access list. +func (db *StateDB) AddSlotToAccessList(addr gethCommon.Address, key gethCommon.Hash) { + db.latestView().AddSlotToAccessList(types.SlotAddress{Address: addr, Key: key}) +} + +// AddLog appends a lot to the collection of logs +func (db *StateDB) AddLog(log *gethTypes.Log) { + db.latestView().AddLog(log) +} + +// AddPreimage adds a pre-image to the collection of pre-images +func (db *StateDB) AddPreimage(hash gethCommon.Hash, data []byte) { + db.latestView().AddPreimage(hash, data) +} + +// RevertToSnapshot reverts the changes until we reach the given snapshot +func (db *StateDB) RevertToSnapshot(index int) { + if index > len(db.views) { + db.cachedError = fmt.Errorf("invalid revert") + return + } + db.views = db.views[:index] +} + +// Snapshot takes an snapshot of the state and returns an int +// that can be used later for revert calls. +func (db *StateDB) Snapshot() int { + newView := db.latestView().NewChildView() + db.views = append(db.views, newView) + return len(db.views) - 1 +} + +// Logs returns the list of logs +// it also update each log with the block and tx info +func (db *StateDB) Logs( + blockNumber uint64, + txHash gethCommon.Hash, + txIndex uint, +) []*gethTypes.Log { + allLogs := make([]*gethTypes.Log, 0) + for _, view := range db.views { + for _, log := range view.Logs() { + log.BlockNumber = blockNumber + log.TxHash = txHash + log.TxIndex = txIndex + allLogs = append(allLogs, log) + } + } + return allLogs +} + +// Preimages returns a set of pre-images +func (db *StateDB) Preimages() map[gethCommon.Hash][]byte { + preImages := make(map[gethCommon.Hash][]byte, 0) + for _, view := range db.views { + for k, v := range view.Preimages() { + preImages[k] = v + } + } + return preImages +} + +// Commit commits state changes back to the underlying +func (db *StateDB) Commit(finalize bool) (hash.Hash, error) { + // return error if any has been accumulated + if db.cachedError != nil { + return nil, wrapError(db.cachedError) + } + + var err error + + // iterate views and collect dirty addresses and slots + addresses := make(map[gethCommon.Address]struct{}) + slots := make(map[types.SlotAddress]struct{}) + for _, view := range db.views { + for key := range view.DirtyAddresses() { + addresses[key] = struct{}{} + } + for key := range view.DirtySlots() { + slots[key] = struct{}{} + } + } + + // sort addresses + sortedAddresses := make([]gethCommon.Address, 0, len(addresses)) + for addr := range addresses { + sortedAddresses = append(sortedAddresses, addr) + } + + sort.Slice(sortedAddresses, + func(i, j int) bool { + return bytes.Compare(sortedAddresses[i][:], sortedAddresses[j][:]) < 0 + }) + + updateCommitter := NewUpdateCommitter() + // update accounts + for _, addr := range sortedAddresses { + deleted := false + // first we need to delete accounts + if db.HasSelfDestructed(addr) { + err = db.baseView.DeleteAccount(addr) + if err != nil { + return nil, wrapError(err) + } + err = updateCommitter.DeleteAccount(addr) + if err != nil { + return nil, wrapError(err) + } + deleted = true + } + if deleted { + continue + } + + bal := db.GetBalance(addr) + nonce := db.GetNonce(addr) + code := db.GetCode(addr) + codeHash := db.GetCodeHash(addr) + // create new accounts + if db.IsCreated(addr) { + err = db.baseView.CreateAccount( + addr, + bal, + nonce, + code, + codeHash, + ) + if err != nil { + return nil, wrapError(err) + } + err = updateCommitter.CreateAccount(addr, bal, nonce, codeHash) + if err != nil { + return nil, wrapError(err) + } + continue + } + err = db.baseView.UpdateAccount( + addr, + bal, + nonce, + code, + codeHash, + ) + if err != nil { + return nil, wrapError(err) + } + err = updateCommitter.UpdateAccount(addr, bal, nonce, codeHash) + if err != nil { + return nil, wrapError(err) + } + } + + // sort slots + sortedSlots := make([]types.SlotAddress, 0, len(slots)) + for slot := range slots { + sortedSlots = append(sortedSlots, slot) + } + sort.Slice(sortedSlots, func(i, j int) bool { + comp := bytes.Compare(sortedSlots[i].Address[:], sortedSlots[j].Address[:]) + if comp == 0 { + return bytes.Compare(sortedSlots[i].Key[:], sortedSlots[j].Key[:]) < 0 + } + return comp < 0 + }) + + // update slots + for _, sk := range sortedSlots { + // don't update slots if self destructed + if db.HasSelfDestructed(sk.Address) { + continue + } + val := db.GetState(sk.Address, sk.Key) + err = db.baseView.UpdateSlot( + sk, + val, + ) + if err != nil { + return nil, wrapError(err) + } + err = updateCommitter.UpdateSlot(sk.Address, sk.Key, val) + if err != nil { + return nil, wrapError(err) + } + } + + // don't purge views yet, people might call the logs etc + updateCommit := updateCommitter.Commitment() + if finalize { + err := db.Finalize() + if err != nil { + return nil, err + } + } + return updateCommit, nil +} + +// This is a no-op for our custom implementation of the StateDB interface, +// since Commit() already handles finalization and deletion of empty +// objects. +func (db *StateDB) Finalise(deleteEmptyObjects bool) {} + +// Finalize flushes all the changes +// to the permanent storage +func (db *StateDB) Finalize() error { + err := db.baseView.Commit() + return wrapError(err) +} + +// Prepare is a high level logic that sadly is considered to be part of the +// stateDB interface and not on the layers above. +// based on parameters that are passed it updates access-lists +func (db *StateDB) Prepare(rules gethParams.Rules, sender, coinbase gethCommon.Address, dest *gethCommon.Address, precompiles []gethCommon.Address, txAccesses gethTypes.AccessList) { + if rules.IsBerlin { + db.AddAddressToAccessList(sender) + + if dest != nil { + db.AddAddressToAccessList(*dest) + // If it's a create-tx, the destination will be added inside egethVM.create + } + for _, addr := range precompiles { + db.AddAddressToAccessList(addr) + } + for _, el := range txAccesses { + db.AddAddressToAccessList(el.Address) + for _, key := range el.StorageKeys { + db.AddSlotToAccessList(el.Address, key) + } + } + if rules.IsShanghai { // EIP-3651: warm coinbase + db.AddAddressToAccessList(coinbase) + } + } +} + +// Reset resets uncommitted changes and transient artifacts such as error, logs, +// pre-images, access lists, ... +// The method is often called between execution of different transactions +func (db *StateDB) Reset() { + db.views = []*DeltaView{NewDeltaView(db.baseView)} + db.cachedError = nil +} + +// Error returns the memorized database failure occurred earlier. +func (s *StateDB) Error() error { + return wrapError(s.cachedError) +} + +// PointCache is not supported and only needed +// when EIP-4762 is enabled in the future versions +// (currently planned for after Verkle fork). +func (s *StateDB) PointCache() *gethUtils.PointCache { + return nil +} + +// Witness is not supported and only needed +// when if witness collection is enabled (EnableWitnessCollection flag). +// By definition it should returns a set containing all trie nodes that have been accessed. +// The returned map could be nil if the witness is empty. +func (s *StateDB) Witness() *gethStateless.Witness { + return nil +} + +// AccessEvents is not supported and only needed +// when EIP-4762 is enabled in the future versions +// (currently planned for after Verkle fork). +// See: https://eips.ethereum.org/EIPS/eip-4762#access-events +func (s *StateDB) AccessEvents() *gethState.AccessEvents { + return nil +} + +func (db *StateDB) latestView() *DeltaView { + return db.views[len(db.views)-1] +} + +// set error captures the first non-nil error it is called with. +func (db *StateDB) handleError(err error) { + if err == nil { + return + } + if db.cachedError == nil { + db.cachedError = err + } +} + +func wrapError(err error) error { + if err == nil { + return nil + } + + var atreeUserError *atree.UserError + // if is an atree user error + if stdErrors.As(err, &atreeUserError) { + return types.NewStateError(err) + } + + var atreeFatalError *atree.FatalError + // if is a atree fatal error or + if stdErrors.As(err, &atreeFatalError) { + return types.NewFatalError(err) + } + + // if is a fatal error + if types.IsAFatalError(err) { + return err + } + + return types.NewStateError(err) +} diff --git a/fvm/evm/emulator/state/stateDB_test.go b/fvm/evm/emulator/state/stateDB_test.go new file mode 100644 index 00000000000..345d92f5dd4 --- /dev/null +++ b/fvm/evm/emulator/state/stateDB_test.go @@ -0,0 +1,475 @@ +package state_test + +import ( + "fmt" + "testing" + + gethCommon "github.com/ethereum/go-ethereum/common" + gethTracing "github.com/ethereum/go-ethereum/core/tracing" + gethTypes "github.com/ethereum/go-ethereum/core/types" + gethParams "github.com/ethereum/go-ethereum/params" + "github.com/holiman/uint256" + "github.com/onflow/atree" + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/fvm/evm/emulator/state" + "github.com/onflow/flow-go/fvm/evm/testutils" + "github.com/onflow/flow-go/fvm/evm/types" + "github.com/onflow/flow-go/model/flow" +) + +var rootAddr = flow.Address{1, 2, 3, 4, 5, 6, 7, 8} + +func TestStateDB(t *testing.T) { + t.Parallel() + + t.Run("test Empty method", func(t *testing.T) { + ledger := testutils.GetSimpleValueStore() + db, err := state.NewStateDB(ledger, rootAddr) + require.NoError(t, err) + + addr1 := testutils.RandomCommonAddress(t) + // non-existent account + require.True(t, db.Empty(addr1)) + require.NoError(t, db.Error()) + + db.CreateAccount(addr1) + require.NoError(t, db.Error()) + + require.True(t, db.Empty(addr1)) + require.NoError(t, db.Error()) + + db.AddBalance(addr1, uint256.NewInt(10), gethTracing.BalanceChangeUnspecified) + require.NoError(t, db.Error()) + + require.False(t, db.Empty(addr1)) + }) + + t.Run("test create contract method", func(t *testing.T) { + ledger := testutils.GetSimpleValueStore() + db, err := state.NewStateDB(ledger, rootAddr) + require.NoError(t, err) + + addr1 := testutils.RandomCommonAddress(t) + require.False(t, db.IsNewContract(addr1)) + require.NoError(t, db.Error()) + + db.CreateContract(addr1) + require.NoError(t, db.Error()) + + require.True(t, db.IsNewContract(addr1)) + require.NoError(t, db.Error()) + }) + + t.Run("test commit functionality", func(t *testing.T) { + ledger := testutils.GetSimpleValueStore() + db, err := state.NewStateDB(ledger, rootAddr) + require.NoError(t, err) + + addr1 := testutils.RandomCommonAddress(t) + key1 := testutils.RandomCommonHash(t) + value1 := testutils.RandomCommonHash(t) + + db.CreateAccount(addr1) + require.NoError(t, db.Error()) + + db.AddBalance(addr1, uint256.NewInt(5), gethTracing.BalanceChangeUnspecified) + require.NoError(t, db.Error()) + + // should have code to be able to set state + db.SetCode(addr1, []byte{1, 2, 3}) + require.NoError(t, db.Error()) + + db.SetState(addr1, key1, value1) + + ret := db.GetState(addr1, key1) + require.Equal(t, value1, ret) + + ret = db.GetCommittedState(addr1, key1) + require.Equal(t, gethCommon.Hash{}, ret) + + currentState, originalState := db.GetStateAndCommittedState(addr1, key1) + require.Equal(t, value1, currentState) + require.Equal(t, gethCommon.Hash{}, originalState) + + commit, err := db.Commit(true) + require.NoError(t, err) + require.NotEmpty(t, commit) + + ret = db.GetCommittedState(addr1, key1) + require.Equal(t, value1, ret) + + currentState, originalState = db.GetStateAndCommittedState(addr1, key1) + require.Equal(t, value1, currentState) + require.Equal(t, value1, originalState) + + // create a new db + db, err = state.NewStateDB(ledger, rootAddr) + require.NoError(t, err) + + bal := db.GetBalance(addr1) + require.NoError(t, db.Error()) + require.Equal(t, uint256.NewInt(5), bal) + + val := db.GetState(addr1, key1) + require.NoError(t, db.Error()) + require.Equal(t, value1, val) + + currentState, originalState = db.GetStateAndCommittedState(addr1, key1) + require.Equal(t, value1, currentState) + require.Equal(t, value1, originalState) + }) + + t.Run("test snapshot and revert functionality", func(t *testing.T) { + ledger := testutils.GetSimpleValueStore() + db, err := state.NewStateDB(ledger, rootAddr) + require.NoError(t, err) + + addr1 := testutils.RandomCommonAddress(t) + require.False(t, db.Exist(addr1)) + require.NoError(t, db.Error()) + + snapshot1 := db.Snapshot() + require.Equal(t, 1, snapshot1) + + db.CreateAccount(addr1) + require.NoError(t, db.Error()) + + require.True(t, db.Exist(addr1)) + require.NoError(t, db.Error()) + + db.AddBalance(addr1, uint256.NewInt(5), gethTracing.BalanceChangeUnspecified) + require.NoError(t, db.Error()) + + bal := db.GetBalance(addr1) + require.NoError(t, db.Error()) + require.Equal(t, uint256.NewInt(5), bal) + + snapshot2 := db.Snapshot() + require.Equal(t, 2, snapshot2) + + db.AddBalance(addr1, uint256.NewInt(5), gethTracing.BalanceChangeUnspecified) + require.NoError(t, db.Error()) + + bal = db.GetBalance(addr1) + require.NoError(t, db.Error()) + require.Equal(t, uint256.NewInt(10), bal) + + // revert to snapshot 2 + db.RevertToSnapshot(snapshot2) + require.NoError(t, db.Error()) + + bal = db.GetBalance(addr1) + require.NoError(t, db.Error()) + require.Equal(t, uint256.NewInt(5), bal) + + // revert to snapshot 1 + db.RevertToSnapshot(snapshot1) + require.NoError(t, db.Error()) + + bal = db.GetBalance(addr1) + require.NoError(t, db.Error()) + require.Equal(t, uint256.NewInt(0), bal) + + // revert to an invalid snapshot + db.RevertToSnapshot(10) + require.Error(t, db.Error()) + }) + + t.Run("test log functionality", func(t *testing.T) { + ledger := testutils.GetSimpleValueStore() + db, err := state.NewStateDB(ledger, rootAddr) + require.NoError(t, err) + + logs := []*gethTypes.Log{ + testutils.GetRandomLogFixture(t), + testutils.GetRandomLogFixture(t), + testutils.GetRandomLogFixture(t), + testutils.GetRandomLogFixture(t), + } + + db.AddLog(logs[0]) + db.AddLog(logs[1]) + + _ = db.Snapshot() + + db.AddLog(logs[2]) + db.AddLog(logs[3]) + + snapshot := db.Snapshot() + db.AddLog(testutils.GetRandomLogFixture(t)) + db.RevertToSnapshot(snapshot) + + ret := db.Logs(1, gethCommon.Hash{}, 1) + require.Equal(t, ret, logs) + }) + + t.Run("test refund functionality", func(t *testing.T) { + ledger := testutils.GetSimpleValueStore() + db, err := state.NewStateDB(ledger, rootAddr) + require.NoError(t, err) + + require.Equal(t, uint64(0), db.GetRefund()) + db.AddRefund(10) + require.Equal(t, uint64(10), db.GetRefund()) + db.SubRefund(3) + require.Equal(t, uint64(7), db.GetRefund()) + + snap1 := db.Snapshot() + db.AddRefund(10) + require.Equal(t, uint64(17), db.GetRefund()) + + db.RevertToSnapshot(snap1) + require.Equal(t, uint64(7), db.GetRefund()) + }) + + t.Run("test Prepare functionality", func(t *testing.T) { + ledger := testutils.GetSimpleValueStore() + db, err := state.NewStateDB(ledger, rootAddr) + + sender := testutils.RandomCommonAddress(t) + coinbase := testutils.RandomCommonAddress(t) + dest := testutils.RandomCommonAddress(t) + precompiles := []gethCommon.Address{ + testutils.RandomCommonAddress(t), + testutils.RandomCommonAddress(t), + } + + txAccesses := gethTypes.AccessList([]gethTypes.AccessTuple{ + {Address: testutils.RandomCommonAddress(t), + StorageKeys: []gethCommon.Hash{ + testutils.RandomCommonHash(t), + testutils.RandomCommonHash(t), + }, + }, + }) + + rules := gethParams.Rules{ + IsBerlin: true, + IsShanghai: true, + } + + require.NoError(t, err) + db.Prepare(rules, sender, coinbase, &dest, precompiles, txAccesses) + + require.True(t, db.AddressInAccessList(sender)) + require.True(t, db.AddressInAccessList(coinbase)) + require.True(t, db.AddressInAccessList(dest)) + + for _, add := range precompiles { + require.True(t, db.AddressInAccessList(add)) + } + + for _, el := range txAccesses { + for _, key := range el.StorageKeys { + addrFound, slotFound := db.SlotInAccessList(el.Address, key) + require.True(t, addrFound) + require.True(t, slotFound) + } + } + }) + + t.Run("test non-fatal error handling", func(t *testing.T) { + ledger := &testutils.TestValueStore{ + GetValueFunc: func(owner, key []byte) ([]byte, error) { + return nil, nil + }, + SetValueFunc: func(owner, key, value []byte) error { + return atree.NewUserError(fmt.Errorf("key not found")) + }, + AllocateSlabIndexFunc: func(owner []byte) (atree.SlabIndex, error) { + return atree.SlabIndex{}, nil + }, + } + db, err := state.NewStateDB(ledger, rootAddr) + require.NoError(t, err) + + db.CreateAccount(testutils.RandomCommonAddress(t)) + commit, err := db.Commit(true) + // ret := db.Error() + require.Error(t, err) + require.Empty(t, commit) + // check wrapping + require.True(t, types.IsAStateError(err)) + }) + + t.Run("test fatal error handling", func(t *testing.T) { + ledger := &testutils.TestValueStore{ + GetValueFunc: func(owner, key []byte) ([]byte, error) { + return nil, nil + }, + SetValueFunc: func(owner, key, value []byte) error { + return atree.NewFatalError(fmt.Errorf("key not found")) + }, + AllocateSlabIndexFunc: func(owner []byte) (atree.SlabIndex, error) { + return atree.SlabIndex{}, nil + }, + } + db, err := state.NewStateDB(ledger, rootAddr) + require.NoError(t, err) + + db.CreateAccount(testutils.RandomCommonAddress(t)) + + commit, err := db.Commit(true) + // ret := db.Error() + require.Error(t, err) + require.Empty(t, commit) + // check wrapping + require.True(t, types.IsAFatalError(err)) + }) + + t.Run("test storage root functionality", func(t *testing.T) { + ledger := testutils.GetSimpleValueStore() + db, err := state.NewStateDB(ledger, rootAddr) + require.NoError(t, err) + + addr1 := testutils.RandomCommonAddress(t) + + // non existing account + require.False(t, db.Exist(addr1)) + require.NoError(t, db.Error()) + root := db.GetStorageRoot(addr1) + require.NoError(t, db.Error()) + require.Equal(t, gethCommon.Hash{}, root) + + // accounts without slots + db.CreateAccount(addr1) + require.NoError(t, db.Error()) + commit, err := db.Commit(true) + require.NoError(t, err) + require.NotEmpty(t, commit) + + root = db.GetStorageRoot(addr1) + require.NoError(t, db.Error()) + require.Equal(t, gethTypes.EmptyRootHash, root) + + db.AddBalance(addr1, uint256.NewInt(100), gethTracing.BalanceChangeTouchAccount) + require.NoError(t, db.Error()) + commit, err = db.Commit(true) + require.NoError(t, err) + require.NotEmpty(t, commit) + + root = db.GetStorageRoot(addr1) + require.NoError(t, db.Error()) + require.Equal(t, gethTypes.EmptyRootHash, root) + + // add slots to the account + key := testutils.RandomCommonHash(t) + value := testutils.RandomCommonHash(t) + db.SetCode(addr1, []byte("somecode")) + require.NoError(t, db.Error()) + db.SetState(addr1, key, value) + require.NoError(t, db.Error()) + commit, err = db.Commit(true) + require.NoError(t, err) + require.NotEmpty(t, commit) + + root = db.GetStorageRoot(addr1) + require.NoError(t, db.Error()) + require.NotEqual(t, gethCommon.Hash{}, root) + require.NotEqual(t, gethTypes.EmptyRootHash, root) + }) + + t.Run("test Selfdestruct6780 functionality", func(t *testing.T) { + ledger := testutils.GetSimpleValueStore() + db, err := state.NewStateDB(ledger, rootAddr) + require.NoError(t, err) + + // test 1 - an already existing contract + // fail for selfdestruction + addr1 := testutils.RandomCommonAddress(t) + balance1 := uint256.NewInt(100) + code1 := []byte("some code") + db.CreateAccount(addr1) + db.SetCode(addr1, code1) + db.AddBalance(addr1, balance1, gethTracing.BalanceChangeTransfer) + require.NoError(t, db.Error()) + commit, err := db.Commit(true) + require.NoError(t, err) + require.NotEmpty(t, commit) + // renew db + db, err = state.NewStateDB(ledger, rootAddr) + require.NoError(t, err) + // call self destruct + db.SelfDestruct6780(addr1) + require.NoError(t, db.Error()) + // noop is expected + require.Equal(t, balance1, db.GetBalance(addr1)) + require.Equal(t, code1, db.GetCode(addr1)) + require.NoError(t, db.Error()) + + // test 2 - account exist before with some balance + // but not a contract - selfdestruct should work + addr2 := testutils.RandomCommonAddress(t) + balance2 := uint256.NewInt(200) + db.CreateAccount(addr2) + db.AddBalance(addr2, balance2, gethTracing.BalanceChangeTransfer) + require.NoError(t, db.Error()) + // commit and renew db + commit, err = db.Commit(true) + require.NoError(t, err) + require.NotEmpty(t, commit) + db, err = state.NewStateDB(ledger, rootAddr) + require.NoError(t, err) + // call self destruct should not work + db.SelfDestruct6780(addr2) + require.NoError(t, db.Error()) + // still no impact + require.Equal(t, balance2, db.GetBalance(addr2)) + require.Empty(t, db.GetCode(addr2)) + require.NoError(t, db.Error()) + // commit and renew db + commit, err = db.Commit(true) + require.NoError(t, err) + require.NotEmpty(t, commit) + db, err = state.NewStateDB(ledger, rootAddr) + require.NoError(t, err) + // set code and call contract creation + db.SetCode(addr2, code1) + db.CreateContract(addr2) + require.Equal(t, code1, db.GetCode(addr2)) + // now calling selfdestruct should do the job + db.SelfDestruct6780(addr2) + require.NoError(t, db.Error()) + commit, err = db.Commit(true) + require.NoError(t, err) + require.NotEmpty(t, commit) + db, err = state.NewStateDB(ledger, rootAddr) + require.NoError(t, err) + // now query + require.Equal(t, uint256.NewInt(0), db.GetBalance(addr2)) + require.Empty(t, db.GetCode(addr2)) + require.NoError(t, db.Error()) + + // test 3 - create account and call self destruct in a single operation + // selfdestruct should work + db, err = state.NewStateDB(ledger, rootAddr) + require.NoError(t, err) + addr3 := testutils.RandomCommonAddress(t) + balance3 := uint256.NewInt(300) + key := testutils.RandomCommonHash(t) + value := testutils.RandomCommonHash(t) + db.CreateAccount(addr3) + db.CreateContract(addr3) + db.SetCode(addr3, code1) + db.SetState(addr3, key, value) + db.AddBalance(addr3, balance3, gethTracing.BalanceChangeTransfer) + require.NoError(t, db.Error()) + // call self destruct + db.SelfDestruct6780(addr3) + require.NoError(t, db.Error()) + // commit changes + commit, err = db.Commit(true) + require.NoError(t, err) + require.NotEmpty(t, commit) + // renew db + db, err = state.NewStateDB(ledger, rootAddr) + require.NoError(t, err) + // account should not exist + require.False(t, db.Exist(addr3)) + require.Equal(t, uint256.NewInt(0), db.GetBalance(addr3)) + require.Empty(t, db.GetCode(addr3)) + require.Equal(t, gethCommon.Hash{}, db.GetState(addr3, key)) + require.NoError(t, db.Error()) + }) +} diff --git a/fvm/evm/emulator/state/state_growth_test.go b/fvm/evm/emulator/state/state_growth_test.go new file mode 100644 index 00000000000..a5ffbdea21e --- /dev/null +++ b/fvm/evm/emulator/state/state_growth_test.go @@ -0,0 +1,200 @@ +package state_test + +import ( + "encoding/binary" + "fmt" + "os" + "strings" + "testing" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/tracing" + "github.com/holiman/uint256" + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/fvm/evm/emulator/state" + "github.com/onflow/flow-go/fvm/evm/testutils" + "github.com/onflow/flow-go/fvm/evm/types" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/utils/io" +) + +const ( + storageBytesMetric = "storage_size_bytes" + storageItemsMetric = "storage_items" + bytesReadMetric = "bytes_read" + bytesWrittenMetric = "bytes_written" +) + +// storage test is designed to evaluate the impact of state modifications on storage size. +// It measures the bytes used in the underlying storage, aiming to understand how storage size scales with changes in state. +// While the specific operation details are not crucial for this benchmark, the primary goal is to analyze how the storage +// size evolves in response to state modifications. + +type storageTest struct { + store *testutils.TestValueStore + addressIndex uint64 + metrics *metrics +} + +func newStorageTest() (*storageTest, error) { + simpleStore := testutils.GetSimpleValueStore() + + return &storageTest{ + store: simpleStore, + addressIndex: 100, + metrics: newMetrics(), + }, nil +} + +func (s *storageTest) newAddress() common.Address { + s.addressIndex++ + var addr common.Address + binary.BigEndian.PutUint64(addr[12:], s.addressIndex) + return addr +} + +// run the provided runner with a newly created state which gets comitted after the runner +// is finished. Storage metrics are being recorded with each run. +func (s *storageTest) run(runner func(state types.StateDB)) error { + state, err := state.NewStateDB(s.store, flow.Address{0x01}) + if err != nil { + return err + } + + runner(state) + + _, err = state.Commit(true) + if err != nil { + return err + } + + s.metrics.add(bytesWrittenMetric, s.store.TotalBytesWritten()) + s.metrics.add(bytesReadMetric, s.store.TotalBytesRead()) + s.metrics.add(storageItemsMetric, s.store.TotalStorageItems()) + s.metrics.add(storageBytesMetric, s.store.TotalStorageSize()) + + return nil +} + +// metrics offers adding custom metrics as well as plotting the metrics on the provided x-axis +// as well as generating csv export for visualisation. +type metrics struct { + data map[string]int + charts map[string][][2]int +} + +func newMetrics() *metrics { + return &metrics{ + data: make(map[string]int), + charts: make(map[string][][2]int), + } +} + +func (m *metrics) add(name string, value int) { + m.data[name] = value +} + +func (m *metrics) get(name string) int { + return m.data[name] +} + +func (m *metrics) plot(chartName string, x int, y int) { + if _, ok := m.charts[chartName]; !ok { + m.charts[chartName] = make([][2]int, 0) + } + m.charts[chartName] = append(m.charts[chartName], [2]int{x, y}) +} + +func (m *metrics) chartCSV(name string) string { + c, ok := m.charts[name] + if !ok { + return "" + } + + s := strings.Builder{} + s.WriteString(name + "\n") // header + for _, line := range c { + s.WriteString(fmt.Sprintf("%d,%d\n", line[0], line[1])) + } + + return s.String() +} + +func Test_AccountCreations(t *testing.T) { + if os.Getenv("benchmark") == "" { + t.Skip("Skipping benchmarking") + } + + tester, err := newStorageTest() + require.NoError(t, err) + + accountChart := "accounts,storage_size" + maxAccounts := 50_000 + for i := 0; i < maxAccounts; i++ { + err = tester.run(func(state types.StateDB) { + state.AddBalance(tester.newAddress(), uint256.NewInt(100), tracing.BalanceChangeUnspecified) + }) + require.NoError(t, err) + + if i%50 == 0 { // plot with resolution + tester.metrics.plot(accountChart, i, tester.metrics.get(storageBytesMetric)) + } + } + + csv := tester.metrics.chartCSV(accountChart) + err = io.WriteFile("./account_storage_size.csv", []byte(csv)) + require.NoError(t, err) +} + +func Test_AccountContractInteraction(t *testing.T) { + if os.Getenv("benchmark") == "" { + t.Skip("Skipping benchmarking") + } + + tester, err := newStorageTest() + require.NoError(t, err) + interactionChart := "interactions,storage_size_bytes" + + // build test contract storage state + contractState := make(map[common.Hash]common.Hash) + for i := 0; i < 10; i++ { + h := common.HexToHash(fmt.Sprintf("%d", i)) + v := common.HexToHash(fmt.Sprintf("%d %s", i, make([]byte, 32))) + contractState[h] = v + } + + // build test contract code, aprox kitty contract size + code := make([]byte, 50000) + + interactions := 50000 + for i := 0; i < interactions; i++ { + err = tester.run(func(state types.StateDB) { + // create a new account + accAddr := tester.newAddress() + state.AddBalance(accAddr, uint256.NewInt(100), tracing.BalanceChangeUnspecified) + + // create a contract + contractAddr := tester.newAddress() + state.AddBalance(contractAddr, uint256.NewInt(uint64(i)), tracing.BalanceChangeUnspecified) + state.SetCode(contractAddr, code) + + for k, v := range contractState { + state.SetState(contractAddr, k, v) + } + + // simulate interaction with contract state and account balance for fees + state.SetState(contractAddr, common.HexToHash("0x03"), common.HexToHash("0x40")) + state.AddBalance(accAddr, uint256.NewInt(1), tracing.BalanceChangeUnspecified) + }) + require.NoError(t, err) + + if i%50 == 0 { // plot with resolution + tester.metrics.plot(interactionChart, i, tester.metrics.get(storageBytesMetric)) + } + } + + csv := tester.metrics.chartCSV(interactionChart) + err = io.WriteFile("./interactions_storage_size.csv", []byte(csv)) + require.NoError(t, err) +} diff --git a/fvm/evm/emulator/state/updateCommitter.go b/fvm/evm/emulator/state/updateCommitter.go new file mode 100644 index 00000000000..389c5e6e001 --- /dev/null +++ b/fvm/evm/emulator/state/updateCommitter.go @@ -0,0 +1,133 @@ +package state + +import ( + "encoding/binary" + + gethCommon "github.com/ethereum/go-ethereum/common" + "github.com/holiman/uint256" + "github.com/onflow/crypto/hash" +) + +type OpCode byte + +const ( + UnknownOpCode OpCode = 0 + + AccountCreationOpCode OpCode = 1 + AccountUpdateOpCode OpCode = 2 + AccountDeletionOpCode OpCode = 3 + SlotUpdateOpCode OpCode = 4 +) + +const ( + opcodeByteSize = 1 + addressByteSize = gethCommon.AddressLength + nonceByteSize = 8 + balanceByteSize = 32 + hashByteSize = gethCommon.HashLength + accountDeletionBufferSize = opcodeByteSize + addressByteSize + accountCreationBufferSize = opcodeByteSize + + addressByteSize + + nonceByteSize + + balanceByteSize + + hashByteSize + accountUpdateBufferSize = accountCreationBufferSize + slotUpdateBufferSize = opcodeByteSize + + addressByteSize + + hashByteSize + + hashByteSize +) + +// UpdateCommitter captures operations (delta) through +// a set of calls (order matters) and constructs a commitment over the state changes. +type UpdateCommitter struct { + hasher hash.Hasher +} + +// NewUpdateCommitter constructs a new UpdateCommitter +func NewUpdateCommitter() *UpdateCommitter { + return &UpdateCommitter{ + hasher: hash.NewSHA3_256(), + } +} + +// CreateAccount captures a create account operation +func (dc *UpdateCommitter) CreateAccount( + addr gethCommon.Address, + balance *uint256.Int, + nonce uint64, + codeHash gethCommon.Hash, +) error { + buffer := make([]byte, accountCreationBufferSize) + var index int + buffer[0] = byte(AccountCreationOpCode) + index += opcodeByteSize + copy(buffer[index:index+addressByteSize], addr.Bytes()) + index += addressByteSize + encodedBalance := balance.Bytes32() + copy(buffer[index:index+balanceByteSize], encodedBalance[:]) + index += balanceByteSize + binary.BigEndian.PutUint64(buffer[index:index+nonceByteSize], nonce) + index += nonceByteSize + copy(buffer[index:index+hashByteSize], codeHash.Bytes()) + _, err := dc.hasher.Write(buffer) + return err +} + +// UpdateAccount captures an update account operation +func (dc *UpdateCommitter) UpdateAccount( + addr gethCommon.Address, + balance *uint256.Int, + nonce uint64, + codeHash gethCommon.Hash, +) error { + buffer := make([]byte, accountUpdateBufferSize) + var index int + buffer[0] = byte(AccountUpdateOpCode) + index += opcodeByteSize + copy(buffer[index:index+addressByteSize], addr.Bytes()) + index += addressByteSize + encodedBalance := balance.Bytes32() + copy(buffer[index:index+balanceByteSize], encodedBalance[:]) + index += balanceByteSize + binary.BigEndian.PutUint64(buffer[index:index+nonceByteSize], nonce) + index += nonceByteSize + copy(buffer[index:index+hashByteSize], codeHash.Bytes()) + _, err := dc.hasher.Write(buffer) + return err +} + +// DeleteAccount captures a delete account operation +func (dc *UpdateCommitter) DeleteAccount(addr gethCommon.Address) error { + buffer := make([]byte, accountDeletionBufferSize) + var index int + buffer[0] = byte(AccountDeletionOpCode) + index += opcodeByteSize + copy(buffer[index:index+addressByteSize], addr.Bytes()) + _, err := dc.hasher.Write(buffer) + return err +} + +// UpdateSlot captures a update slot operation +func (dc *UpdateCommitter) UpdateSlot( + addr gethCommon.Address, + key gethCommon.Hash, + value gethCommon.Hash, +) error { + buffer := make([]byte, slotUpdateBufferSize) + var index int + buffer[0] = byte(SlotUpdateOpCode) + index += opcodeByteSize + copy(buffer[index:index+addressByteSize], addr.Bytes()) + index += addressByteSize + copy(buffer[index:index+hashByteSize], key.Bytes()) + index += hashByteSize + copy(buffer[index:index+hashByteSize], value.Bytes()) + _, err := dc.hasher.Write(buffer) + return err +} + +// Commitment calculates and returns the commitment +func (dc *UpdateCommitter) Commitment() hash.Hash { + return dc.hasher.SumHash() +} diff --git a/fvm/evm/emulator/state/updateCommitter_test.go b/fvm/evm/emulator/state/updateCommitter_test.go new file mode 100644 index 00000000000..ab0be67a08f --- /dev/null +++ b/fvm/evm/emulator/state/updateCommitter_test.go @@ -0,0 +1,129 @@ +package state_test + +import ( + "testing" + + "github.com/holiman/uint256" + "github.com/onflow/crypto/hash" + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/fvm/evm/emulator/state" + "github.com/onflow/flow-go/fvm/evm/testutils" +) + +func TestChangeCommitter(t *testing.T) { + + addr := testutils.RandomAddress(t).ToCommon() + balance := uint256.NewInt(200) + nonce := uint64(1) + nonceBytes := []byte{0, 0, 0, 0, 0, 0, 0, 1} + randomHash := testutils.RandomCommonHash(t) + key := testutils.RandomCommonHash(t) + value := testutils.RandomCommonHash(t) + + t.Run("test create account", func(t *testing.T) { + dc := state.NewUpdateCommitter() + err := dc.CreateAccount(addr, balance, nonce, randomHash) + require.NoError(t, err) + + hasher := hash.NewSHA3_256() + + input := []byte{byte(state.AccountCreationOpCode)} + input = append(input, addr.Bytes()...) + encodedBalance := balance.Bytes32() + input = append(input, encodedBalance[:]...) + input = append(input, nonceBytes...) + input = append(input, randomHash.Bytes()...) + + n, err := hasher.Write(input) + require.NoError(t, err) + require.Equal(t, 93, n) + + expectedCommit := hasher.SumHash() + commit := dc.Commitment() + require.Equal(t, expectedCommit, commit) + }) + + t.Run("test update account", func(t *testing.T) { + dc := state.NewUpdateCommitter() + err := dc.UpdateAccount(addr, balance, nonce, randomHash) + require.NoError(t, err) + + hasher := hash.NewSHA3_256() + input := []byte{byte(state.AccountUpdateOpCode)} + input = append(input, addr.Bytes()...) + encodedBalance := balance.Bytes32() + input = append(input, encodedBalance[:]...) + input = append(input, nonceBytes...) + input = append(input, randomHash.Bytes()...) + + n, err := hasher.Write(input) + require.NoError(t, err) + require.Equal(t, 93, n) + + expectedCommit := hasher.SumHash() + commit := dc.Commitment() + require.Equal(t, expectedCommit, commit) + }) + + t.Run("test delete account", func(t *testing.T) { + dc := state.NewUpdateCommitter() + err := dc.DeleteAccount(addr) + require.NoError(t, err) + + hasher := hash.NewSHA3_256() + input := []byte{byte(state.AccountDeletionOpCode)} + input = append(input, addr.Bytes()...) + + n, err := hasher.Write(input) + require.NoError(t, err) + require.Equal(t, 21, n) + + expectedCommit := hasher.SumHash() + commit := dc.Commitment() + require.Equal(t, expectedCommit, commit) + }) + + t.Run("test update slot", func(t *testing.T) { + dc := state.NewUpdateCommitter() + err := dc.UpdateSlot(addr, key, value) + require.NoError(t, err) + + hasher := hash.NewSHA3_256() + + input := []byte{byte(state.SlotUpdateOpCode)} + input = append(input, addr.Bytes()...) + input = append(input, key[:]...) + input = append(input, value[:]...) + + n, err := hasher.Write(input) + require.NoError(t, err) + require.Equal(t, 85, n) + + expectedCommit := hasher.SumHash() + commit := dc.Commitment() + require.Equal(t, expectedCommit, commit) + }) +} + +func BenchmarkDeltaCommitter(b *testing.B) { + addr := testutils.RandomAddress(b) + balance := uint256.NewInt(200) + nonce := uint64(100) + randomHash := testutils.RandomCommonHash(b) + dc := state.NewUpdateCommitter() + + numberOfAccountUpdates := 10 + for i := 0; i < numberOfAccountUpdates; i++ { + err := dc.UpdateAccount(addr.ToCommon(), balance, nonce, randomHash) + require.NoError(b, err) + } + + numberOfSlotUpdates := 10 + for i := 0; i < numberOfSlotUpdates; i++ { + err := dc.UpdateSlot(addr.ToCommon(), randomHash, randomHash) + require.NoError(b, err) + } + com := dc.Commitment() + require.NotEmpty(b, com) +} diff --git a/fvm/evm/emulator/tracker.go b/fvm/evm/emulator/tracker.go new file mode 100644 index 00000000000..e7f3f6fa09c --- /dev/null +++ b/fvm/evm/emulator/tracker.go @@ -0,0 +1,125 @@ +package emulator + +import ( + "bytes" + "sort" + + "github.com/onflow/flow-go/fvm/evm/precompiles" + "github.com/onflow/flow-go/fvm/evm/types" +) + +// CallTracker captures precompiled calls +type CallTracker struct { + callsByAddress map[types.Address]*types.PrecompiledCalls +} + +// NewCallTracker constructs a new CallTracker +func NewCallTracker() *CallTracker { + return &CallTracker{} +} + +// RegisterPrecompiledContract registers a precompiled contract for tracking +func (ct *CallTracker) RegisterPrecompiledContract(pc types.PrecompiledContract) types.PrecompiledContract { + return &WrappedPrecompiledContract{ + pc: pc, + ct: ct, + } +} + +// CaptureRequiredGas captures a required gas call +func (ct *CallTracker) CaptureRequiredGas(address types.Address, input []byte, output uint64) { + if ct.callsByAddress == nil { + ct.callsByAddress = make(map[types.Address]*types.PrecompiledCalls) + } + calls, found := ct.callsByAddress[address] + if !found { + calls = &types.PrecompiledCalls{ + Address: address, + } + ct.callsByAddress[address] = calls + } + + calls.RequiredGasCalls = append(calls.RequiredGasCalls, output) +} + +// CaptureRun captures a run calls +func (ct *CallTracker) CaptureRun(address types.Address, input []byte, output []byte, err error) { + if ct.callsByAddress == nil { + ct.callsByAddress = make(map[types.Address]*types.PrecompiledCalls) + } + calls, found := ct.callsByAddress[address] + if !found { + calls = &types.PrecompiledCalls{ + Address: address, + } + ct.callsByAddress[address] = calls + } + errMsg := "" + if err != nil { + errMsg = err.Error() + } + calls.RunCalls = append(calls.RunCalls, types.RunCall{ + Output: output, + ErrorMsg: errMsg, + }) +} + +// IsCalled returns true if any calls has been captured +func (ct *CallTracker) IsCalled() bool { + return len(ct.callsByAddress) != 0 +} + +// Encoded +func (ct *CallTracker) CapturedCalls() ([]byte, error) { + if !ct.IsCalled() { + return nil, nil + } + // else constructs an aggregated precompiled calls + apc := make(types.AggregatedPrecompiledCalls, 0) + + sortedAddresses := make([]types.Address, 0, len(ct.callsByAddress)) + // we need to sort by address to stay deterministic + for addr := range ct.callsByAddress { + sortedAddresses = append(sortedAddresses, addr) + } + + sort.Slice(sortedAddresses, + func(i, j int) bool { + return bytes.Compare(sortedAddresses[i][:], sortedAddresses[j][:]) < 0 + }) + + for _, addr := range sortedAddresses { + apc = append(apc, *ct.callsByAddress[addr]) + } + + return apc.Encode() +} + +// Resets the tracker +func (ct *CallTracker) Reset() { + ct.callsByAddress = nil +} + +type WrappedPrecompiledContract struct { + pc types.PrecompiledContract + ct *CallTracker +} + +func (wpc *WrappedPrecompiledContract) Address() types.Address { + return wpc.pc.Address() +} +func (wpc *WrappedPrecompiledContract) RequiredGas(input []byte) uint64 { + output := wpc.pc.RequiredGas(input) + wpc.ct.CaptureRequiredGas(wpc.pc.Address(), input, output) + return output +} + +func (wpc *WrappedPrecompiledContract) Run(input []byte) ([]byte, error) { + output, err := wpc.pc.Run(input) + wpc.ct.CaptureRun(wpc.pc.Address(), input, output, err) + return output, err +} + +func (wpc *WrappedPrecompiledContract) Name() string { + return precompiles.CADENCE_ARCH_PRECOMPILE_NAME +} diff --git a/fvm/evm/emulator/tracker_test.go b/fvm/evm/emulator/tracker_test.go new file mode 100644 index 00000000000..19f8ee57a28 --- /dev/null +++ b/fvm/evm/emulator/tracker_test.go @@ -0,0 +1,78 @@ +package emulator_test + +import ( + "errors" + "testing" + + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/fvm/evm/emulator" + "github.com/onflow/flow-go/fvm/evm/testutils" + "github.com/onflow/flow-go/fvm/evm/types" +) + +func TestTracker(t *testing.T) { + apc := testutils.AggregatedPrecompiledCallsFixture(t) + var runCallCounter int + var requiredGasCallCounter int + + reqGasCallInputs := make([][]byte, len(apc[0].RequiredGasCalls)) + runCallInputs := make([][]byte, len(apc[0].RunCalls)) + + for i := range apc[0].RequiredGasCalls { + reqGasCallInputs[i] = testutils.RandomData(t) + } + + for i := range apc[0].RunCalls { + runCallInputs[i] = testutils.RandomData(t) + } + + pc := &MockedPrecompiled{ + AddressFunc: func() types.Address { + return apc[0].Address + }, + RequiredGasFunc: func(input []byte) uint64 { + res := apc[0].RequiredGasCalls[requiredGasCallCounter] + require.Equal(t, reqGasCallInputs[requiredGasCallCounter], input) + requiredGasCallCounter += 1 + return res + }, + RunFunc: func(input []byte) ([]byte, error) { + res := apc[0].RunCalls[runCallCounter] + require.Equal(t, runCallInputs[runCallCounter], input) + runCallCounter += 1 + var err error + if len(res.ErrorMsg) > 0 { + err = errors.New(res.ErrorMsg) + } + return res.Output, err + }, + } + tracker := emulator.NewCallTracker() + wpc := tracker.RegisterPrecompiledContract(pc) + + require.Equal(t, apc[0].Address, wpc.Address()) + + for _, pc := range apc { + for i, call := range pc.RequiredGasCalls { + require.Equal(t, call, wpc.RequiredGas(reqGasCallInputs[i])) + } + for i, call := range pc.RunCalls { + ret, err := wpc.Run(runCallInputs[i]) + require.Equal(t, call.Output, ret) + errMsg := "" + if err != nil { + errMsg = err.Error() + } + require.Equal(t, call.ErrorMsg, errMsg) + } + + } + require.True(t, tracker.IsCalled()) + + expectedEncoded, err := apc.Encode() + require.NoError(t, err) + encoded, err := tracker.CapturedCalls() + require.NoError(t, err) + require.Equal(t, expectedEncoded, encoded) +} diff --git a/fvm/evm/events/events.go b/fvm/evm/events/events.go new file mode 100644 index 00000000000..b9b2c288b0a --- /dev/null +++ b/fvm/evm/events/events.go @@ -0,0 +1,282 @@ +package events + +import ( + "fmt" + + gethCommon "github.com/ethereum/go-ethereum/common" + "github.com/onflow/cadence" + "github.com/onflow/cadence/encoding/ccf" + + "github.com/onflow/flow-go/fvm/evm/stdlib" + "github.com/onflow/flow-go/fvm/evm/types" + "github.com/onflow/flow-go/model/flow" +) + +const ( + EventTypeBlockExecuted flow.EventType = "EVM.BlockExecuted" + EventTypeTransactionExecuted flow.EventType = "EVM.TransactionExecuted" +) + +type EventPayload interface { + // ToCadence converts the event to Cadence event + ToCadence(chainID flow.ChainID) (cadence.Event, error) +} + +type Event struct { + Etype flow.EventType + Payload EventPayload +} + +// todo we might have to break this event into two (tx included /tx executed) if size becomes an issue + +type transactionEvent struct { + Payload []byte // transaction RLP-encoded payload + Result *types.Result // transaction execution result + BlockHeight uint64 +} + +// NewTransactionEvent creates a new transaction event with the given parameters +// - result: the result of the transaction execution +// - payload: the RLP-encoded payload of the transaction +// - blockHeight: the height of the block where the transaction is included +func NewTransactionEvent( + result *types.Result, + payload []byte, + blockHeight uint64, +) *Event { + return &Event{ + Etype: EventTypeTransactionExecuted, + Payload: &transactionEvent{ + BlockHeight: blockHeight, + Payload: payload, + Result: result, + }, + } +} + +func (p *transactionEvent) ToCadence(chainID flow.ChainID) (cadence.Event, error) { + encodedLogs, err := p.Result.RLPEncodedLogs() + if err != nil { + return cadence.Event{}, err + } + + eventType := stdlib.CadenceTypesForChain(chainID).TransactionExecuted + + return cadence.NewEvent([]cadence.Value{ + hashToCadenceArrayValue(p.Result.TxHash), + cadence.NewUInt16(p.Result.Index), + cadence.NewUInt8(p.Result.TxType), + bytesToCadenceUInt8ArrayValue(p.Payload), + cadence.NewUInt16(uint16(p.Result.ResultSummary().ErrorCode)), + cadence.String(p.Result.ErrorMessageWithRevertReason()), + cadence.NewUInt64(p.Result.GasConsumed), + cadence.String(p.Result.DeployedContractAddressString()), + bytesToCadenceUInt8ArrayValue(encodedLogs), + cadence.NewUInt64(p.BlockHeight), + bytesToCadenceUInt8ArrayValue(p.Result.ReturnedData), + bytesToCadenceUInt8ArrayValue(p.Result.PrecompiledCalls), + checksumToCadenceArrayValue(p.Result.StateChangeChecksum()), + }).WithType(eventType), nil +} + +type blockEvent struct { + *types.Block +} + +// NewBlockEvent creates a new block event with the given block as payload. +func NewBlockEvent(block *types.Block) *Event { + return &Event{ + Etype: EventTypeBlockExecuted, + Payload: &blockEvent{block}, + } +} + +func (p *blockEvent) ToCadence(chainID flow.ChainID) (cadence.Event, error) { + blockHash, err := p.Hash() + if err != nil { + return cadence.Event{}, err + } + + eventType := stdlib.CadenceTypesForChain(chainID).BlockExecuted + + return cadence.NewEvent([]cadence.Value{ + cadence.NewUInt64(p.Height), + hashToCadenceArrayValue(blockHash), + cadence.NewUInt64(p.Timestamp), + cadence.NewIntFromBig(p.TotalSupply), + cadence.NewUInt64(p.TotalGasUsed), + hashToCadenceArrayValue(p.ParentBlockHash), + hashToCadenceArrayValue(p.ReceiptRoot), + hashToCadenceArrayValue(p.TransactionHashRoot), + hashToCadenceArrayValue(p.PrevRandao), + }).WithType(eventType), nil +} + +type BlockEventPayload struct { + Height uint64 `cadence:"height"` + Hash gethCommon.Hash `cadence:"hash"` + Timestamp uint64 `cadence:"timestamp"` + TotalSupply cadence.Int `cadence:"totalSupply"` + TotalGasUsed uint64 `cadence:"totalGasUsed"` + ParentBlockHash gethCommon.Hash `cadence:"parentHash"` + ReceiptRoot gethCommon.Hash `cadence:"receiptRoot"` + TransactionHashRoot gethCommon.Hash `cadence:"transactionHashRoot"` + PrevRandao gethCommon.Hash `cadence:"prevrandao"` +} + +// blockEventPayloadV0 legacy format of the block without prevrando field +type blockEventPayloadV0 struct { + Height uint64 `cadence:"height"` + Hash gethCommon.Hash `cadence:"hash"` + Timestamp uint64 `cadence:"timestamp"` + TotalSupply cadence.Int `cadence:"totalSupply"` + TotalGasUsed uint64 `cadence:"totalGasUsed"` + ParentBlockHash gethCommon.Hash `cadence:"parentHash"` + ReceiptRoot gethCommon.Hash `cadence:"receiptRoot"` + TransactionHashRoot gethCommon.Hash `cadence:"transactionHashRoot"` +} + +// decodeLegacyBlockEventPayload decodes any legacy block formats into +// current version of the block event payload. +func decodeLegacyBlockEventPayload(event cadence.Event) (*BlockEventPayload, error) { + var lb blockEventPayloadV0 + if err := cadence.DecodeFields(event, &lb); err != nil { + return nil, err + } + + return &BlockEventPayload{ + Height: lb.Height, + Hash: lb.Hash, + Timestamp: lb.Timestamp, + TotalSupply: lb.TotalSupply, + TotalGasUsed: lb.TotalGasUsed, + ParentBlockHash: lb.ParentBlockHash, + ReceiptRoot: lb.ReceiptRoot, + TransactionHashRoot: lb.TransactionHashRoot, + }, nil +} + +// DecodeBlockEventPayload decodes Cadence event into block event payload. +func DecodeBlockEventPayload(event cadence.Event) (*BlockEventPayload, error) { + var block BlockEventPayload + if err := cadence.DecodeFields(event, &block); err != nil { + if block, err := decodeLegacyBlockEventPayload(event); err == nil { + return block, nil + } + return nil, err + } + return &block, nil +} + +type TransactionEventPayload struct { + Hash gethCommon.Hash `cadence:"hash"` + Index uint16 `cadence:"index"` + TransactionType uint8 `cadence:"type"` + Payload []byte `cadence:"payload"` + ErrorCode uint16 `cadence:"errorCode"` + GasConsumed uint64 `cadence:"gasConsumed"` + ContractAddress string `cadence:"contractAddress"` + Logs []byte `cadence:"logs"` + BlockHeight uint64 `cadence:"blockHeight"` + ErrorMessage string `cadence:"errorMessage"` + ReturnedData []byte `cadence:"returnedData"` + PrecompiledCalls []byte `cadence:"precompiledCalls"` + StateUpdateChecksum [types.ChecksumLength]byte `cadence:"stateUpdateChecksum"` +} + +// transactionEventPayloadV0 legacy format of the transaction event without stateUpdateChecksum field +type transactionEventPayloadV0 struct { + Hash gethCommon.Hash `cadence:"hash"` + Index uint16 `cadence:"index"` + TransactionType uint8 `cadence:"type"` + Payload []byte `cadence:"payload"` + ErrorCode uint16 `cadence:"errorCode"` + GasConsumed uint64 `cadence:"gasConsumed"` + ContractAddress string `cadence:"contractAddress"` + Logs []byte `cadence:"logs"` + BlockHeight uint64 `cadence:"blockHeight"` + ErrorMessage string `cadence:"errorMessage"` + ReturnedData []byte `cadence:"returnedData"` + PrecompiledCalls []byte `cadence:"precompiledCalls"` +} + +// decodeLegacyTransactionEventPayload decodes any legacy transaction formats into +// current version of the transaction event payload. +func decodeLegacyTransactionEventPayload(event cadence.Event) (*TransactionEventPayload, error) { + var tx transactionEventPayloadV0 + if err := cadence.DecodeFields(event, &tx); err != nil { + return nil, err + } + return &TransactionEventPayload{ + Hash: tx.Hash, + Index: tx.Index, + TransactionType: tx.TransactionType, + Payload: tx.Payload, + ErrorCode: tx.ErrorCode, + GasConsumed: tx.GasConsumed, + ContractAddress: tx.ContractAddress, + Logs: tx.Logs, + BlockHeight: tx.BlockHeight, + ErrorMessage: tx.ErrorMessage, + ReturnedData: tx.ReturnedData, + PrecompiledCalls: tx.PrecompiledCalls, + }, nil +} + +// DecodeTransactionEventPayload decodes Cadence event into transaction event payload. +func DecodeTransactionEventPayload(event cadence.Event) (*TransactionEventPayload, error) { + var tx TransactionEventPayload + if err := cadence.DecodeFields(event, &tx); err != nil { + if legTx, err := decodeLegacyTransactionEventPayload(event); err == nil { + return legTx, nil + } + return nil, err + } + return &tx, nil + +} + +// FLOWTokensDepositedEventPayload captures payloads for a FlowTokenDeposited event +type FLOWTokensDepositedEventPayload struct { + Address string `cadence:"address"` + Amount cadence.UFix64 `cadence:"amount"` + DepositedUUID uint64 `cadence:"depositedUUID"` + BalanceAfterInAttoFlow cadence.Int `cadence:"balanceAfterInAttoFlow"` +} + +// DecodeFLOWTokensDepositedEventPayload decodes a flow FLOWTokenDeposited +// events into a FLOWTokensEventDepositedPayload +func DecodeFLOWTokensDepositedEventPayload(event cadence.Event) (*FLOWTokensDepositedEventPayload, error) { + var payload FLOWTokensDepositedEventPayload + err := cadence.DecodeFields(event, &payload) + return &payload, err +} + +// FLOWTokensWithdrawnEventPayload captures payloads for a FlowTokensWithdrawn event +type FLOWTokensWithdrawnEventPayload struct { + Address string `cadence:"address"` + Amount cadence.UFix64 `cadence:"amount"` + WithdrawnUUID uint64 `cadence:"withdrawnUUID"` + BalanceAfterInAttoFlow cadence.Int `cadence:"balanceAfterInAttoFlow"` +} + +// DecodeFLOWTokensWithdrawnEventPayload decodes a flow FLOWTokensWithdrawn +// events into a FLOWTokensEventDepositedPayload +func DecodeFLOWTokensWithdrawnEventPayload(event cadence.Event) (*FLOWTokensWithdrawnEventPayload, error) { + var payload FLOWTokensWithdrawnEventPayload + err := cadence.DecodeFields(event, &payload) + return &payload, err +} + +func FlowEventToCadenceEvent(event flow.Event) (cadence.Event, error) { + ev, err := ccf.Decode(nil, event.Payload) + if err != nil { + return cadence.Event{}, err + } + + cadenceEvent, ok := ev.(cadence.Event) + if !ok { + return cadence.Event{}, fmt.Errorf("event can not be casted to a cadence event") + } + return cadenceEvent, nil +} diff --git a/fvm/evm/events/events_test.go b/fvm/evm/events/events_test.go new file mode 100644 index 00000000000..cc36d30e35f --- /dev/null +++ b/fvm/evm/events/events_test.go @@ -0,0 +1,200 @@ +package events_test + +import ( + "encoding/hex" + "math/big" + "testing" + + "github.com/onflow/flow-go/fvm/evm/events" + "github.com/onflow/flow-go/fvm/systemcontracts" + "github.com/onflow/flow-go/model/flow" + + gethCommon "github.com/ethereum/go-ethereum/common" + gethTypes "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/core/vm" + "github.com/ethereum/go-ethereum/rlp" + cdcCommon "github.com/onflow/cadence/common" + "github.com/onflow/cadence/encoding/ccf" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/fvm/evm/testutils" + "github.com/onflow/flow-go/fvm/evm/types" +) + +func TestEVMBlockExecutedEventCCFEncodingDecoding(t *testing.T) { + t.Parallel() + + block := &types.Block{ + Height: 2, + Timestamp: 100, + TotalSupply: big.NewInt(1500), + ParentBlockHash: gethCommon.HexToHash("0x2813452cff514c3054ac9f40cd7ce1b016cc78ab7f99f1c6d49708837f6e06d1"), + ReceiptRoot: gethCommon.Hash{}, + TotalGasUsed: 15, + TransactionHashRoot: gethCommon.HexToHash("0x70b67ce6710355acf8d69b2ea013d34e212bc4824926c5d26f189c1ca9667246"), + PrevRandao: testutils.RandomCommonHash(t), + } + + event := events.NewBlockEvent(block) + ev, err := event.Payload.ToCadence(flow.Emulator) + require.NoError(t, err) + + bep, err := events.DecodeBlockEventPayload(ev) + require.NoError(t, err) + + assert.Equal(t, bep.Height, block.Height) + + blockHash, err := block.Hash() + require.NoError(t, err) + assert.Equal(t, bep.Hash, blockHash) + + assert.Equal(t, bep.TotalSupply.Value, block.TotalSupply) + assert.Equal(t, bep.Timestamp, block.Timestamp) + assert.Equal(t, bep.TotalGasUsed, block.TotalGasUsed) + assert.Equal(t, bep.ParentBlockHash, block.ParentBlockHash) + assert.Equal(t, bep.ReceiptRoot, block.ReceiptRoot) + assert.Equal(t, bep.TransactionHashRoot, block.TransactionHashRoot) + assert.Equal(t, bep.PrevRandao, block.PrevRandao) + + v, err := ccf.Encode(ev) + require.NoError(t, err) + assert.Equal(t, ccf.HasMsgPrefix(v), true) + + evt, err := ccf.Decode(nil, v) + require.NoError(t, err) + + sc := systemcontracts.SystemContractsForChain(flow.Emulator) + + assert.Equal(t, + cdcCommon.NewAddressLocation( + nil, + cdcCommon.Address(sc.EVMContract.Address), + string(events.EventTypeBlockExecuted), + ).ID(), + evt.Type().ID(), + ) +} + +func TestEVMTransactionExecutedEventCCFEncodingDecoding(t *testing.T) { + t.Parallel() + + txEncoded := "fff83b81ff0194000000000000000000000000000000000000000094000000000000000000000000000000000000000180895150ae84a8cdf00000825208" + txBytes, err := hex.DecodeString(txEncoded) + require.NoError(t, err) + txHash := testutils.RandomCommonHash(t) + blockHash := testutils.RandomCommonHash(t) + data := "000000000000000000000000000000000000000000000000000000000000002a" + stateUpdateCommit := testutils.RandomCommonHash(t).Bytes() + dataBytes, err := hex.DecodeString(data) + require.NoError(t, err) + blockHeight := uint64(2) + deployedAddress := types.NewAddress(gethCommon.HexToAddress("0x99466ed2e37b892a2ee3e9cd55a98b68f5735db2")) + log := &gethTypes.Log{ + Index: 1, + BlockNumber: blockHeight, + BlockHash: blockHash, + TxHash: txHash, + TxIndex: 3, + Address: gethCommon.HexToAddress("0x99466ed2e37b892a2ee3e9cd55a98b68f5735db2"), + Data: dataBytes, + Topics: []gethCommon.Hash{ + gethCommon.HexToHash("0x24abdb5865df5079dcc5ac590ff6f01d5c16edbc5fab4e195d9febd1114503da"), + }, + } + vmError := vm.ErrOutOfGas + txResult := &types.Result{ + VMError: vmError, + TxType: 255, + GasConsumed: 23200, + DeployedContractAddress: &deployedAddress, + ReturnedData: dataBytes, + Logs: []*gethTypes.Log{log}, + TxHash: txHash, + StateChangeCommitment: stateUpdateCommit, + } + + t.Run("evm.TransactionExecuted with failed status", func(t *testing.T) { + event := events.NewTransactionEvent(txResult, txBytes, blockHeight) + ev, err := event.Payload.ToCadence(flow.Emulator) + require.NoError(t, err) + + tep, err := events.DecodeTransactionEventPayload(ev) + require.NoError(t, err) + + assert.Equal(t, tep.BlockHeight, blockHeight) + assert.Equal(t, tep.Hash, txHash) + assert.Equal(t, tep.Payload, txBytes) + assert.Equal(t, types.ErrorCode(tep.ErrorCode), types.ExecutionErrCodeOutOfGas) + assert.Equal(t, tep.TransactionType, txResult.TxType) + assert.Equal(t, tep.GasConsumed, txResult.GasConsumed) + assert.Equal(t, tep.ErrorMessage, txResult.VMError.Error()) + assert.Equal(t, tep.ReturnedData, txResult.ReturnedData) + assert.Equal(t, tep.StateUpdateChecksum[:], stateUpdateCommit[:types.ChecksumLength]) + assert.Equal( + t, + tep.ContractAddress, + txResult.DeployedContractAddress.ToCommon().Hex(), + ) + + encodedLogs, err := rlp.EncodeToBytes(txResult.Logs) + require.NoError(t, err) + assert.Equal(t, tep.Logs, encodedLogs) + + v, err := ccf.Encode(ev) + require.NoError(t, err) + assert.Equal(t, ccf.HasMsgPrefix(v), true) + + evt, err := ccf.Decode(nil, v) + require.NoError(t, err) + + location := systemcontracts.SystemContractsForChain(flow.Emulator).EVMContract.Location() + assert.Equal(t, + string(location.TypeID(nil, "EVM.TransactionExecuted")), + evt.Type().ID(), + ) + }) + + t.Run("evm.TransactionExecuted with non-failed status", func(t *testing.T) { + txResult.VMError = nil + + event := events.NewTransactionEvent(txResult, txBytes, blockHeight) + ev, err := event.Payload.ToCadence(flow.Emulator) + require.NoError(t, err) + + tep, err := events.DecodeTransactionEventPayload(ev) + require.NoError(t, err) + + assert.Equal(t, tep.BlockHeight, blockHeight) + assert.Equal(t, tep.Hash, txHash) + assert.Equal(t, tep.Payload, txBytes) + assert.Equal(t, types.ErrCodeNoError, types.ErrorCode(tep.ErrorCode)) + assert.Equal(t, tep.TransactionType, txResult.TxType) + assert.Equal(t, tep.GasConsumed, txResult.GasConsumed) + assert.Empty(t, tep.ErrorMessage) + assert.Equal(t, tep.ReturnedData, txResult.ReturnedData) + assert.NotNil(t, txResult.DeployedContractAddress) + assert.Equal( + t, + tep.ContractAddress, + txResult.DeployedContractAddress.ToCommon().Hex(), + ) + + encodedLogs, err := rlp.EncodeToBytes(txResult.Logs) + require.NoError(t, err) + assert.Equal(t, tep.Logs, encodedLogs) + + v, err := ccf.Encode(ev) + require.NoError(t, err) + assert.Equal(t, ccf.HasMsgPrefix(v), true) + + evt, err := ccf.Decode(nil, v) + require.NoError(t, err) + + location := systemcontracts.SystemContractsForChain(flow.Emulator).EVMContract.Location() + assert.Equal(t, + string(location.TypeID(nil, "EVM.TransactionExecuted")), + evt.Type().ID(), + ) + }) +} diff --git a/fvm/evm/events/utils.go b/fvm/evm/events/utils.go new file mode 100644 index 00000000000..c03fc240cae --- /dev/null +++ b/fvm/evm/events/utils.go @@ -0,0 +1,47 @@ +package events + +import ( + gethCommon "github.com/ethereum/go-ethereum/common" + "github.com/onflow/cadence" + + "github.com/onflow/flow-go/fvm/evm/types" +) + +// cadenceArrayTypeOfUInt8 is the Cadence type [UInt8] +var cadenceArrayTypeOfUInt8 = cadence.NewVariableSizedArrayType(cadence.UInt8Type) + +// bytesToCadenceUInt8ArrayValue converts bytes into a Cadence array of type UInt8 +func bytesToCadenceUInt8ArrayValue(b []byte) cadence.Array { + values := make([]cadence.Value, len(b)) + for i, v := range b { + values[i] = cadence.NewUInt8(v) + } + return cadence.NewArray(values). + WithType(cadenceArrayTypeOfUInt8) +} + +// cadenceHashType is the Cadence type [UInt8;32] +var cadenceHashType = cadence.NewConstantSizedArrayType(gethCommon.HashLength, cadence.UInt8Type) + +// hashToCadenceArrayValue EVM hash ([32]byte) into a Cadence array of type [UInt8;32] +func hashToCadenceArrayValue(hash gethCommon.Hash) cadence.Array { + values := make([]cadence.Value, len(hash)) + for i, v := range hash { + values[i] = cadence.NewUInt8(v) + } + return cadence.NewArray(values). + WithType(cadenceHashType) +} + +// checksumType is the Cadence type [UInt8;4] +var checksumType = cadence.NewConstantSizedArrayType(types.ChecksumLength, cadence.UInt8Type) + +// checksumToCadenceArrayValue converts a checksum ([4]byte) into a Cadence array of type [UInt8;4] +func checksumToCadenceArrayValue(checksum [types.ChecksumLength]byte) cadence.Array { + values := make([]cadence.Value, types.ChecksumLength) + for i := 0; i < types.ChecksumLength; i++ { + values[i] = cadence.NewUInt8(checksum[i]) + } + return cadence.NewArray(values). + WithType(checksumType) +} diff --git a/fvm/evm/events/utils_test.go b/fvm/evm/events/utils_test.go new file mode 100644 index 00000000000..f306b6cb5e4 --- /dev/null +++ b/fvm/evm/events/utils_test.go @@ -0,0 +1,90 @@ +package events + +import ( + "testing" + + gethCommon "github.com/ethereum/go-ethereum/common" + "github.com/onflow/cadence" + "github.com/stretchr/testify/require" +) + +func TestBytesToCadenceUInt8ArrayValue(t *testing.T) { + t.Parallel() + + input := []byte{1, 2, 3, 4, 5} + + inCadence := bytesToCadenceUInt8ArrayValue(input) + + require.Equal(t, + cadence.NewArray([]cadence.Value{ + cadence.UInt8(1), + cadence.UInt8(2), + cadence.UInt8(3), + cadence.UInt8(4), + cadence.UInt8(5), + }).WithType(cadenceArrayTypeOfUInt8), + inCadence, + ) +} + +func TestHashToCadenceArrayValue(t *testing.T) { + t.Parallel() + + input := gethCommon.Hash{1, 2, 3, 4, 5} + + inCadence := hashToCadenceArrayValue(input) + + require.Equal(t, + cadence.NewArray([]cadence.Value{ + cadence.UInt8(1), + cadence.UInt8(2), + cadence.UInt8(3), + cadence.UInt8(4), + cadence.UInt8(5), + cadence.UInt8(0), + cadence.UInt8(0), + cadence.UInt8(0), + cadence.UInt8(0), + cadence.UInt8(0), + cadence.UInt8(0), + cadence.UInt8(0), + cadence.UInt8(0), + cadence.UInt8(0), + cadence.UInt8(0), + cadence.UInt8(0), + cadence.UInt8(0), + cadence.UInt8(0), + cadence.UInt8(0), + cadence.UInt8(0), + cadence.UInt8(0), + cadence.UInt8(0), + cadence.UInt8(0), + cadence.UInt8(0), + cadence.UInt8(0), + cadence.UInt8(0), + cadence.UInt8(0), + cadence.UInt8(0), + cadence.UInt8(0), + cadence.UInt8(0), + cadence.UInt8(0), + cadence.UInt8(0), + }).WithType(cadenceHashType), + inCadence, + ) +} + +func TestHashToChecksumValue(t *testing.T) { + t.Parallel() + + checksum := [4]byte{1, 2, 3, 4} + inCadence := checksumToCadenceArrayValue(checksum) + require.Equal(t, + cadence.NewArray([]cadence.Value{ + cadence.UInt8(1), + cadence.UInt8(2), + cadence.UInt8(3), + cadence.UInt8(4), + }).WithType(checksumType), + inCadence, + ) +} diff --git a/fvm/evm/evm.go b/fvm/evm/evm.go new file mode 100644 index 00000000000..9b74ac17ba3 --- /dev/null +++ b/fvm/evm/evm.go @@ -0,0 +1,65 @@ +package evm + +import ( + "github.com/onflow/cadence/common" + "github.com/onflow/cadence/runtime" + + "github.com/onflow/flow-go/fvm/environment" + "github.com/onflow/flow-go/fvm/evm/backends" + evm "github.com/onflow/flow-go/fvm/evm/emulator" + "github.com/onflow/flow-go/fvm/evm/handler" + "github.com/onflow/flow-go/fvm/evm/impl" + "github.com/onflow/flow-go/fvm/evm/stdlib" + "github.com/onflow/flow-go/fvm/systemcontracts" + "github.com/onflow/flow-go/model/flow" +) + +func ContractAccountAddress(chainID flow.ChainID) flow.Address { + return systemcontracts.SystemContractsForChain(chainID).EVMContract.Address +} + +func StorageAccountAddress(chainID flow.ChainID) flow.Address { + return systemcontracts.SystemContractsForChain(chainID).EVMStorage.Address +} + +func SetupEnvironment( + chainID flow.ChainID, + fvmEnv environment.Environment, + runtimeEnv runtime.Environment, +) error { + sc := systemcontracts.SystemContractsForChain(chainID) + randomBeaconAddress := sc.RandomBeaconHistory.Address + flowTokenAddress := sc.FlowToken.Address + + backend := backends.NewWrappedEnvironment(fvmEnv) + emulator := evm.NewEmulator(backend, StorageAccountAddress(chainID)) + blockStore := handler.NewBlockStore(chainID, backend, StorageAccountAddress(chainID)) + addressAllocator := handler.NewAddressAllocator() + + evmContractAddress := ContractAccountAddress(chainID) + + contractHandler := handler.NewContractHandler( + chainID, + evmContractAddress, + common.Address(flowTokenAddress), + randomBeaconAddress, + blockStore, + addressAllocator, + backend, + emulator, + ) + + internalEVMContractValue := impl.NewInternalEVMContractValue( + nil, + contractHandler, + evmContractAddress, + ) + + stdlib.SetupEnvironment( + runtimeEnv, + internalEVMContractValue, + evmContractAddress, + ) + + return nil +} diff --git a/fvm/evm/evm_test.go b/fvm/evm/evm_test.go new file mode 100644 index 00000000000..9e64b4fd17d --- /dev/null +++ b/fvm/evm/evm_test.go @@ -0,0 +1,3715 @@ +package evm_test + +import ( + "bytes" + "encoding/binary" + "fmt" + "math" + "math/big" + "testing" + + "github.com/ethereum/go-ethereum/common" + gethTypes "github.com/ethereum/go-ethereum/core/types" + gethParams "github.com/ethereum/go-ethereum/params" + "github.com/ethereum/go-ethereum/rlp" + "github.com/onflow/cadence/encoding/ccf" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/onflow/cadence" + "github.com/onflow/cadence/encoding/json" + + "github.com/onflow/flow-go/engine/execution/testutil" + "github.com/onflow/flow-go/fvm" + "github.com/onflow/flow-go/fvm/crypto" + "github.com/onflow/flow-go/fvm/environment" + envMock "github.com/onflow/flow-go/fvm/environment/mock" + "github.com/onflow/flow-go/fvm/evm" + "github.com/onflow/flow-go/fvm/evm/events" + "github.com/onflow/flow-go/fvm/evm/impl" + "github.com/onflow/flow-go/fvm/evm/stdlib" + . "github.com/onflow/flow-go/fvm/evm/testutils" + "github.com/onflow/flow-go/fvm/evm/types" + "github.com/onflow/flow-go/fvm/storage/snapshot" + "github.com/onflow/flow-go/fvm/systemcontracts" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/utils/unittest" +) + +func TestEVMRun(t *testing.T) { + t.Parallel() + + chain := flow.Emulator.Chain() + + t.Run("testing EVM.run (happy case)", func(t *testing.T) { + + t.Parallel() + RunWithNewEnvironment(t, + chain, func( + ctx fvm.Context, + vm fvm.VM, + snapshot snapshot.SnapshotTree, + testContract *TestContract, + testAccount *EOATestAccount, + ) { + sc := systemcontracts.SystemContractsForChain(chain.ChainID()) + code := []byte(fmt.Sprintf( + ` + import EVM from %s + + transaction(tx: [UInt8], coinbaseBytes: [UInt8; 20]){ + prepare(account: &Account) { + let coinbase = EVM.EVMAddress(bytes: coinbaseBytes) + let res = EVM.run(tx: tx, coinbase: coinbase) + + assert(res.status == EVM.Status.successful, message: "unexpected status") + assert(res.errorCode == 0, message: "unexpected error code") + assert(res.deployedContract == nil, message: "unexpected deployed contract") + } + } + `, + sc.EVMContract.Address.HexWithPrefix(), + )) + + coinbaseAddr := types.Address{1, 2, 3} + coinbaseBalance := getEVMAccountBalance(t, ctx, vm, snapshot, coinbaseAddr) + require.Zero(t, types.BalanceToBigInt(coinbaseBalance).Uint64()) + + num := int64(12) + innerTxBytes := testAccount.PrepareSignAndEncodeTx(t, + testContract.DeployedAt.ToCommon(), + testContract.MakeCallData(t, "store", big.NewInt(num)), + big.NewInt(0), + uint64(100_000), + big.NewInt(1), + ) + + innerTx := cadence.NewArray( + unittest.BytesToCdcUInt8(innerTxBytes), + ).WithType(stdlib.EVMTransactionBytesCadenceType) + + coinbase := cadence.NewArray( + unittest.BytesToCdcUInt8(coinbaseAddr.Bytes()), + ).WithType(stdlib.EVMAddressBytesCadenceType) + + txBody, err := flow.NewTransactionBodyBuilder(). + SetScript(code). + SetPayer(sc.FlowServiceAccount.Address). + AddAuthorizer(sc.FlowServiceAccount.Address). + AddArgument(json.MustEncode(innerTx)). + AddArgument(json.MustEncode(coinbase)). + Build() + require.NoError(t, err) + + tx := fvm.Transaction(txBody, 0) + + state, output, err := vm.Run( + ctx, + tx, + snapshot) + require.NoError(t, err) + require.NoError(t, output.Err) + require.NotEmpty(t, state.WriteSet) + snapshot = snapshot.Append(state) + + // assert event fields are correct + require.Len(t, output.Events, 2) + txEvent := output.Events[0] + txEventPayload := TxEventToPayload(t, txEvent, sc.EVMContract.Address) + require.NoError(t, err) + + // fee transfer event + feeTransferEvent := output.Events[1] + feeTranferEventPayload := TxEventToPayload(t, feeTransferEvent, sc.EVMContract.Address) + require.NoError(t, err) + require.Equal(t, uint16(types.ErrCodeNoError), feeTranferEventPayload.ErrorCode) + require.Equal(t, uint16(1), feeTranferEventPayload.Index) + require.Equal(t, uint64(21000), feeTranferEventPayload.GasConsumed) + + // commit block + blockEventPayload, snapshot := callEVMHeartBeat(t, + ctx, + vm, + snapshot) + + require.NotEmpty(t, blockEventPayload.Hash) + require.Equal(t, uint64(64785), blockEventPayload.TotalGasUsed) + require.NotEmpty(t, blockEventPayload.Hash) + + txHashes := types.TransactionHashes{txEventPayload.Hash, feeTranferEventPayload.Hash} + require.Equal(t, + txHashes.RootHash(), + blockEventPayload.TransactionHashRoot, + ) + require.NotEmpty(t, blockEventPayload.ReceiptRoot) + + require.Equal(t, innerTxBytes, txEventPayload.Payload) + require.Equal(t, uint16(types.ErrCodeNoError), txEventPayload.ErrorCode) + require.Equal(t, uint16(0), txEventPayload.Index) + require.Equal(t, blockEventPayload.Height, txEventPayload.BlockHeight) + require.Equal(t, blockEventPayload.TotalGasUsed-feeTranferEventPayload.GasConsumed, txEventPayload.GasConsumed) + require.Empty(t, txEventPayload.ContractAddress) + + // append the state + snapshot = snapshot.Append(state) + + // check coinbase balance + coinbaseBalance = getEVMAccountBalance(t, ctx, vm, snapshot, coinbaseAddr) + require.Equal(t, types.BalanceToBigInt(coinbaseBalance).Uint64(), txEventPayload.GasConsumed) + + // query the value + code = []byte(fmt.Sprintf( + ` + import EVM from %s + access(all) + fun main(tx: [UInt8], coinbaseBytes: [UInt8; 20]): EVM.Result { + let coinbase = EVM.EVMAddress(bytes: coinbaseBytes) + let res = EVM.run(tx: tx, coinbase: coinbase) + + assert(res.status == EVM.Status.successful, message: "unexpected status") + assert(res.errorCode == 0, message: "unexpected error code") + + return res + } + `, + sc.EVMContract.Address.HexWithPrefix(), + )) + + innerTxBytes = testAccount.PrepareSignAndEncodeTx(t, + testContract.DeployedAt.ToCommon(), + testContract.MakeCallData(t, "retrieve"), + big.NewInt(0), + uint64(100_000), + big.NewInt(0), + ) + + innerTx = cadence.NewArray( + unittest.BytesToCdcUInt8(innerTxBytes), + ).WithType(stdlib.EVMTransactionBytesCadenceType) + + script := fvm.Script(code).WithArguments( + json.MustEncode(innerTx), + json.MustEncode(coinbase), + ) + + _, output, err = vm.Run( + ctx, + script, + snapshot) + require.NoError(t, err) + require.NoError(t, output.Err) + + res, err := impl.ResultSummaryFromEVMResultValue(output.Value) + require.NoError(t, err) + require.Equal(t, types.StatusSuccessful, res.Status) + require.Equal(t, types.ErrCodeNoError, res.ErrorCode) + require.Empty(t, res.ErrorMessage) + require.Nil(t, res.DeployedContractAddress) + require.Equal(t, uint64(23_520), res.GasConsumed) + require.Equal(t, uint64(23_520), res.MaxGasConsumed) + require.Equal(t, num, new(big.Int).SetBytes(res.ReturnedData).Int64()) + }) + }) + + t.Run("testing EVM.run (failed)", func(t *testing.T) { + t.Parallel() + RunWithNewEnvironment(t, + chain, func( + ctx fvm.Context, + vm fvm.VM, + snapshot snapshot.SnapshotTree, + testContract *TestContract, + testAccount *EOATestAccount, + ) { + sc := systemcontracts.SystemContractsForChain(chain.ChainID()) + code := []byte(fmt.Sprintf( + ` + import EVM from %s + + transaction(tx: [UInt8], coinbaseBytes: [UInt8; 20]){ + prepare(account: &Account) { + let coinbase = EVM.EVMAddress(bytes: coinbaseBytes) + let res = EVM.run(tx: tx, coinbase: coinbase) + + assert(res.status == EVM.Status.failed, message: "unexpected status") + // ExecutionErrCodeExecutionReverted + assert(res.errorCode == %d, message: "unexpected error code") + } + } + `, + sc.EVMContract.Address.HexWithPrefix(), + types.ExecutionErrCodeExecutionReverted, + )) + + num := int64(12) + innerTxBytes := testAccount.PrepareSignAndEncodeTx(t, + testContract.DeployedAt.ToCommon(), + testContract.MakeCallData(t, "storeButRevert", big.NewInt(num)), + big.NewInt(0), + uint64(100_000), + big.NewInt(0), + ) + + innerTx := cadence.NewArray( + unittest.BytesToCdcUInt8(innerTxBytes), + ).WithType(stdlib.EVMTransactionBytesCadenceType) + + coinbase := cadence.NewArray( + unittest.BytesToCdcUInt8(testAccount.Address().Bytes()), + ).WithType(stdlib.EVMAddressBytesCadenceType) + + txBody, err := flow.NewTransactionBodyBuilder(). + SetScript(code). + SetPayer(sc.FlowServiceAccount.Address). + AddAuthorizer(sc.FlowServiceAccount.Address). + AddArgument(json.MustEncode(innerTx)). + AddArgument(json.MustEncode(coinbase)). + Build() + require.NoError(t, err) + + tx := fvm.Transaction(txBody, 0) + + state, output, err := vm.Run( + ctx, + tx, + snapshot) + require.NoError(t, err) + require.NoError(t, output.Err) + require.NotEmpty(t, state.WriteSet) + + snapshot = snapshot.Append(state) + + // query the value + code = []byte(fmt.Sprintf( + ` + import EVM from %s + access(all) + fun main(tx: [UInt8], coinbaseBytes: [UInt8; 20]): EVM.Result { + let coinbase = EVM.EVMAddress(bytes: coinbaseBytes) + return EVM.run(tx: tx, coinbase: coinbase) + } + `, + sc.EVMContract.Address.HexWithPrefix(), + )) + + innerTxBytes = testAccount.PrepareSignAndEncodeTx(t, + testContract.DeployedAt.ToCommon(), + testContract.MakeCallData(t, "retrieve"), + big.NewInt(0), + uint64(100_000), + big.NewInt(0), + ) + + innerTx = cadence.NewArray( + unittest.BytesToCdcUInt8(innerTxBytes), + ).WithType(stdlib.EVMTransactionBytesCadenceType) + + script := fvm.Script(code).WithArguments( + json.MustEncode(innerTx), + json.MustEncode(coinbase), + ) + + _, output, err = vm.Run( + ctx, + script, + snapshot) + require.NoError(t, err) + require.NoError(t, output.Err) + + res, err := impl.ResultSummaryFromEVMResultValue(output.Value) + require.NoError(t, err) + require.Equal(t, types.StatusSuccessful, res.Status) + require.Equal(t, types.ErrCodeNoError, res.ErrorCode) + require.Empty(t, res.ErrorMessage) + require.Equal(t, int64(0), new(big.Int).SetBytes(res.ReturnedData).Int64()) + }) + }) + + t.Run("testing EVM.run (with event emitted)", func(t *testing.T) { + t.Parallel() + RunWithNewEnvironment(t, + chain, func( + ctx fvm.Context, + vm fvm.VM, + snapshot snapshot.SnapshotTree, + testContract *TestContract, + testAccount *EOATestAccount, + ) { + sc := systemcontracts.SystemContractsForChain(chain.ChainID()) + code := []byte(fmt.Sprintf( + ` + import EVM from %s + + transaction(tx: [UInt8], coinbaseBytes: [UInt8; 20]){ + prepare(account: &Account) { + let coinbase = EVM.EVMAddress(bytes: coinbaseBytes) + let res = EVM.run(tx: tx, coinbase: coinbase) + + assert(res.status == EVM.Status.successful, message: "unexpected status") + assert(res.errorCode == 0, message: "unexpected error code") + } + } + `, + sc.EVMContract.Address.HexWithPrefix(), + )) + + num := int64(12) + innerTxBytes := testAccount.PrepareSignAndEncodeTx(t, + testContract.DeployedAt.ToCommon(), + testContract.MakeCallData(t, "storeWithLog", big.NewInt(num)), + big.NewInt(0), + uint64(100_000), + big.NewInt(0), + ) + + innerTx := cadence.NewArray( + unittest.BytesToCdcUInt8(innerTxBytes), + ).WithType(stdlib.EVMTransactionBytesCadenceType) + + coinbase := cadence.NewArray( + unittest.BytesToCdcUInt8(testAccount.Address().Bytes()), + ).WithType(stdlib.EVMAddressBytesCadenceType) + + txBody, err := flow.NewTransactionBodyBuilder(). + SetScript(code). + SetPayer(sc.FlowServiceAccount.Address). + AddAuthorizer(sc.FlowServiceAccount.Address). + AddArgument(json.MustEncode(innerTx)). + AddArgument(json.MustEncode(coinbase)). + Build() + require.NoError(t, err) + + tx := fvm.Transaction(txBody, 0) + + state, output, err := vm.Run( + ctx, + tx, + snapshot) + require.NoError(t, err) + require.NoError(t, output.Err) + require.NotEmpty(t, state.WriteSet) + + txEvent := output.Events[0] + txEventPayload := TxEventToPayload(t, txEvent, sc.EVMContract.Address) + + require.NotEmpty(t, txEventPayload.Hash) + + var logs []*gethTypes.Log + err = rlp.DecodeBytes(txEventPayload.Logs, &logs) + require.NoError(t, err) + require.Len(t, logs, 1) + log := logs[0] + last := log.Topics[len(log.Topics)-1] // last topic is the value set in the store method + assert.Equal(t, num, last.Big().Int64()) + }) + }) + + t.Run("testing EVM.run execution reverted with assert error", func(t *testing.T) { + + t.Parallel() + + RunWithNewEnvironment(t, + chain, func( + ctx fvm.Context, + vm fvm.VM, + snapshot snapshot.SnapshotTree, + testContract *TestContract, + testAccount *EOATestAccount, + ) { + sc := systemcontracts.SystemContractsForChain(chain.ChainID()) + code := []byte(fmt.Sprintf( + ` + import EVM from %s + + transaction(tx: [UInt8], coinbaseBytes: [UInt8; 20]){ + prepare(account: &Account) { + let coinbase = EVM.EVMAddress(bytes: coinbaseBytes) + let res = EVM.run(tx: tx, coinbase: coinbase) + + assert(res.status == EVM.Status.failed, message: "unexpected status") + assert(res.errorCode == 306, message: "unexpected error code") + assert(res.deployedContract == nil, message: "unexpected deployed contract") + } + } + `, + sc.EVMContract.Address.HexWithPrefix(), + )) + + coinbaseAddr := types.Address{1, 2, 3} + coinbaseBalance := getEVMAccountBalance(t, ctx, vm, snapshot, coinbaseAddr) + require.Zero(t, types.BalanceToBigInt(coinbaseBalance).Uint64()) + + innerTxBytes := testAccount.PrepareSignAndEncodeTx(t, + testContract.DeployedAt.ToCommon(), + testContract.MakeCallData(t, "assertError"), + big.NewInt(0), + uint64(100_000), + big.NewInt(1), + ) + + innerTx := cadence.NewArray( + unittest.BytesToCdcUInt8(innerTxBytes), + ).WithType(stdlib.EVMTransactionBytesCadenceType) + + coinbase := cadence.NewArray( + unittest.BytesToCdcUInt8(coinbaseAddr.Bytes()), + ).WithType(stdlib.EVMAddressBytesCadenceType) + + txBody, err := flow.NewTransactionBodyBuilder(). + SetScript(code). + SetPayer(sc.FlowServiceAccount.Address). + AddAuthorizer(sc.FlowServiceAccount.Address). + AddArgument(json.MustEncode(innerTx)). + AddArgument(json.MustEncode(coinbase)). + Build() + require.NoError(t, err) + + tx := fvm.Transaction(txBody, 0) + + state, output, err := vm.Run( + ctx, + tx, + snapshot, + ) + require.NoError(t, err) + require.NoError(t, output.Err) + require.NotEmpty(t, state.WriteSet) + + // assert event fields are correct + require.Len(t, output.Events, 2) + txEvent := output.Events[0] + txEventPayload := TxEventToPayload(t, txEvent, sc.EVMContract.Address) + require.NoError(t, err) + + assert.Equal( + t, + "execution reverted: Assert Error Message", + txEventPayload.ErrorMessage, + ) + }, + ) + }) + + t.Run("testing EVM.run execution reverted with custom error", func(t *testing.T) { + + t.Parallel() + + RunWithNewEnvironment(t, + chain, func( + ctx fvm.Context, + vm fvm.VM, + snapshot snapshot.SnapshotTree, + testContract *TestContract, + testAccount *EOATestAccount, + ) { + sc := systemcontracts.SystemContractsForChain(chain.ChainID()) + code := []byte(fmt.Sprintf( + ` + import EVM from %s + + transaction(tx: [UInt8], coinbaseBytes: [UInt8; 20]){ + prepare(account: &Account) { + let coinbase = EVM.EVMAddress(bytes: coinbaseBytes) + let res = EVM.run(tx: tx, coinbase: coinbase) + + assert(res.status == EVM.Status.failed, message: "unexpected status") + assert(res.errorCode == 306, message: "unexpected error code") + assert(res.deployedContract == nil, message: "unexpected deployed contract") + } + } + `, + sc.EVMContract.Address.HexWithPrefix(), + )) + + coinbaseAddr := types.Address{1, 2, 3} + coinbaseBalance := getEVMAccountBalance(t, ctx, vm, snapshot, coinbaseAddr) + require.Zero(t, types.BalanceToBigInt(coinbaseBalance).Uint64()) + + innerTxBytes := testAccount.PrepareSignAndEncodeTx(t, + testContract.DeployedAt.ToCommon(), + testContract.MakeCallData(t, "customError"), + big.NewInt(0), + uint64(100_000), + big.NewInt(1), + ) + + innerTx := cadence.NewArray( + unittest.BytesToCdcUInt8(innerTxBytes), + ).WithType(stdlib.EVMTransactionBytesCadenceType) + + coinbase := cadence.NewArray( + unittest.BytesToCdcUInt8(coinbaseAddr.Bytes()), + ).WithType(stdlib.EVMAddressBytesCadenceType) + + txBody, err := flow.NewTransactionBodyBuilder(). + SetScript(code). + SetPayer(sc.FlowServiceAccount.Address). + AddAuthorizer(sc.FlowServiceAccount.Address). + AddArgument(json.MustEncode(innerTx)). + AddArgument(json.MustEncode(coinbase)). + Build() + require.NoError(t, err) + + tx := fvm.Transaction(txBody, 0) + + state, output, err := vm.Run( + ctx, + tx, + snapshot, + ) + require.NoError(t, err) + require.NoError(t, output.Err) + require.NotEmpty(t, state.WriteSet) + + // assert event fields are correct + require.Len(t, output.Events, 2) + txEvent := output.Events[0] + txEventPayload := TxEventToPayload(t, txEvent, sc.EVMContract.Address) + require.NoError(t, err) + + // Unlike assert errors, custom errors cannot be further examined + // or ABI decoded, as we do not have access to the contract's ABI. + assert.Equal( + t, + "execution reverted", + txEventPayload.ErrorMessage, + ) + }, + ) + }) +} + +func TestEVMBatchRun(t *testing.T) { + chain := flow.Emulator.Chain() + + // run a batch of valid transactions which update a value on the contract + // after the batch is run check that the value updated on the contract matches + // the last transaction update in the batch. + t.Run("Batch run multiple valid transactions", func(t *testing.T) { + t.Parallel() + RunWithNewEnvironment(t, + chain, func( + ctx fvm.Context, + vm fvm.VM, + snapshot snapshot.SnapshotTree, + testContract *TestContract, + testAccount *EOATestAccount, + ) { + sc := systemcontracts.SystemContractsForChain(chain.ChainID()) + batchRunCode := []byte(fmt.Sprintf( + ` + import EVM from %s + + transaction(txs: [[UInt8]], coinbaseBytes: [UInt8; 20]) { + prepare(account: &Account) { + let coinbase = EVM.EVMAddress(bytes: coinbaseBytes) + let batchResults = EVM.batchRun(txs: txs, coinbase: coinbase) + + assert(batchResults.length == txs.length, message: "invalid result length") + for res in batchResults { + assert(res.status == EVM.Status.successful, message: "unexpected status") + assert(res.errorCode == 0, message: "unexpected error code") + } + } + } + `, + sc.EVMContract.Address.HexWithPrefix(), + )) + + coinbaseAddr := types.Address{1, 2, 3} + coinbaseBalance := getEVMAccountBalance(t, ctx, vm, snapshot, coinbaseAddr) + require.Zero(t, types.BalanceToBigInt(coinbaseBalance).Uint64()) + + batchCount := 5 + var storedValues []int64 + txBytes := make([]cadence.Value, batchCount) + for i := 0; i < batchCount; i++ { + num := int64(i) + storedValues = append(storedValues, num) + // prepare batch of transaction payloads + tx := testAccount.PrepareSignAndEncodeTx(t, + testContract.DeployedAt.ToCommon(), + testContract.MakeCallData(t, "storeWithLog", big.NewInt(num)), + big.NewInt(0), + uint64(100_000), + big.NewInt(1), + ) + + // build txs argument + txBytes[i] = cadence.NewArray( + unittest.BytesToCdcUInt8(tx), + ).WithType(stdlib.EVMTransactionBytesCadenceType) + } + + coinbase := cadence.NewArray( + unittest.BytesToCdcUInt8(coinbaseAddr.Bytes()), + ).WithType(stdlib.EVMAddressBytesCadenceType) + + txs := cadence.NewArray(txBytes). + WithType(cadence.NewVariableSizedArrayType( + stdlib.EVMTransactionBytesCadenceType, + )) + + txBody, err := flow.NewTransactionBodyBuilder(). + SetScript(batchRunCode). + SetPayer(sc.FlowServiceAccount.Address). + AddAuthorizer(sc.FlowServiceAccount.Address). + AddArgument(json.MustEncode(txs)). + AddArgument(json.MustEncode(coinbase)). + Build() + require.NoError(t, err) + + tx := fvm.Transaction(txBody, 0) + + state, output, err := vm.Run(ctx, tx, snapshot) + require.NoError(t, err) + require.NoError(t, output.Err) + require.NotEmpty(t, state.WriteSet) + + // append the state + snapshot = snapshot.Append(state) + + require.Len(t, output.Events, batchCount+1) + txHashes := make(types.TransactionHashes, 0) + totalGasUsed := uint64(0) + for i, event := range output.Events { + if i == batchCount { // skip last one + continue + } + + ev, err := ccf.Decode(nil, event.Payload) + require.NoError(t, err) + cadenceEvent, ok := ev.(cadence.Event) + require.True(t, ok) + + event, err := events.DecodeTransactionEventPayload(cadenceEvent) + require.NoError(t, err) + + txHashes = append(txHashes, event.Hash) + var logs []*gethTypes.Log + err = rlp.DecodeBytes(event.Logs, &logs) + require.NoError(t, err) + + require.Len(t, logs, 1) + + log := logs[0] + last := log.Topics[len(log.Topics)-1] // last topic is the value set in the store method + assert.Equal(t, storedValues[i], last.Big().Int64()) + totalGasUsed += event.GasConsumed + } + + // last event is fee transfer event + feeTransferEvent := output.Events[batchCount] + feeTranferEventPayload := TxEventToPayload(t, feeTransferEvent, sc.EVMContract.Address) + require.NoError(t, err) + require.Equal(t, uint16(types.ErrCodeNoError), feeTranferEventPayload.ErrorCode) + require.Equal(t, uint16(batchCount), feeTranferEventPayload.Index) + require.Equal(t, uint64(21000), feeTranferEventPayload.GasConsumed) + txHashes = append(txHashes, feeTranferEventPayload.Hash) + + // check coinbase balance (note the gas price is 1) + coinbaseBalance = getEVMAccountBalance(t, ctx, vm, snapshot, coinbaseAddr) + require.Equal(t, types.BalanceToBigInt(coinbaseBalance).Uint64(), totalGasUsed) + + // commit block + blockEventPayload, snapshot := callEVMHeartBeat(t, + ctx, + vm, + snapshot) + + require.NotEmpty(t, blockEventPayload.Hash) + require.Equal(t, uint64(176_513), blockEventPayload.TotalGasUsed) + require.Equal(t, + txHashes.RootHash(), + blockEventPayload.TransactionHashRoot, + ) + + // retrieve the values + retrieveCode := []byte(fmt.Sprintf( + ` + import EVM from %s + access(all) + fun main(tx: [UInt8], coinbaseBytes: [UInt8; 20]): EVM.Result { + let coinbase = EVM.EVMAddress(bytes: coinbaseBytes) + return EVM.run(tx: tx, coinbase: coinbase) + } + `, + sc.EVMContract.Address.HexWithPrefix(), + )) + + innerTxBytes := testAccount.PrepareSignAndEncodeTx(t, + testContract.DeployedAt.ToCommon(), + testContract.MakeCallData(t, "retrieve"), + big.NewInt(0), + uint64(100_000), + big.NewInt(0), + ) + + innerTx := cadence.NewArray( + unittest.BytesToCdcUInt8(innerTxBytes), + ).WithType(stdlib.EVMTransactionBytesCadenceType) + + script := fvm.Script(retrieveCode).WithArguments( + json.MustEncode(innerTx), + json.MustEncode(coinbase), + ) + + _, output, err = vm.Run( + ctx, + script, + snapshot) + require.NoError(t, err) + require.NoError(t, output.Err) + + // make sure the retrieved value is the same as the last value + // that was stored by transaction batch + res, err := impl.ResultSummaryFromEVMResultValue(output.Value) + require.NoError(t, err) + require.Equal(t, types.StatusSuccessful, res.Status) + require.Equal(t, types.ErrCodeNoError, res.ErrorCode) + require.Empty(t, res.ErrorMessage) + require.Equal(t, storedValues[len(storedValues)-1], new(big.Int).SetBytes(res.ReturnedData).Int64()) + }) + }) + + // run batch with one invalid transaction that has an invalid nonce + // this should produce invalid result on that specific transaction + // but other transaction should successfuly update the value on the contract + t.Run("Batch run with one invalid transaction", func(t *testing.T) { + t.Parallel() + RunWithNewEnvironment(t, + chain, func( + ctx fvm.Context, + vm fvm.VM, + snapshot snapshot.SnapshotTree, + testContract *TestContract, + testAccount *EOATestAccount, + ) { + // we make transaction at specific index invalid to fail + const failedTxIndex = 3 + sc := systemcontracts.SystemContractsForChain(chain.ChainID()) + batchRunCode := []byte(fmt.Sprintf( + ` + import EVM from %s + + transaction(txs: [[UInt8]], coinbaseBytes: [UInt8; 20]) { + prepare(account: &Account) { + let coinbase = EVM.EVMAddress(bytes: coinbaseBytes) + let batchResults = EVM.batchRun(txs: txs, coinbase: coinbase) + + assert(batchResults.length == txs.length, message: "invalid result length") + for i, res in batchResults { + if i != %d { + assert(res.status == EVM.Status.successful, message: "unexpected status") + assert(res.errorCode == 0, message: "unexpected error code") + } else { + assert(res.status == EVM.Status.invalid, message: "unexpected status") + assert(res.errorCode == 201, message: "unexpected error code") + } + } + } + } + `, + sc.EVMContract.Address.HexWithPrefix(), + failedTxIndex, + )) + + batchCount := 5 + var num int64 + txBytes := make([]cadence.Value, batchCount) + for i := 0; i < batchCount; i++ { + num = int64(i) + + if i == failedTxIndex { + // make one transaction in the batch have an invalid nonce + testAccount.SetNonce(testAccount.Nonce() - 1) + } + // prepare batch of transaction payloads + tx := testAccount.PrepareSignAndEncodeTx(t, + testContract.DeployedAt.ToCommon(), + testContract.MakeCallData(t, "store", big.NewInt(num)), + big.NewInt(0), + uint64(100_000), + big.NewInt(0), + ) + + // build txs argument + txBytes[i] = cadence.NewArray( + unittest.BytesToCdcUInt8(tx), + ).WithType(stdlib.EVMTransactionBytesCadenceType) + } + + coinbase := cadence.NewArray( + unittest.BytesToCdcUInt8(testAccount.Address().Bytes()), + ).WithType(stdlib.EVMAddressBytesCadenceType) + + txs := cadence.NewArray(txBytes). + WithType(cadence.NewVariableSizedArrayType( + stdlib.EVMTransactionBytesCadenceType, + )) + + txBody, err := flow.NewTransactionBodyBuilder(). + SetScript(batchRunCode). + SetPayer(sc.FlowServiceAccount.Address). + AddAuthorizer(sc.FlowServiceAccount.Address). + AddArgument(json.MustEncode(txs)). + AddArgument(json.MustEncode(coinbase)). + Build() + require.NoError(t, err) + + tx := fvm.Transaction(txBody, 0) + + state, output, err := vm.Run(ctx, tx, snapshot) + require.NoError(t, err) + require.NoError(t, output.Err) + require.NotEmpty(t, state.WriteSet) + + // append the state + snapshot = snapshot.Append(state) + + // retrieve the values + retrieveCode := []byte(fmt.Sprintf( + ` + import EVM from %s + access(all) + fun main(tx: [UInt8], coinbaseBytes: [UInt8; 20]): EVM.Result { + let coinbase = EVM.EVMAddress(bytes: coinbaseBytes) + return EVM.run(tx: tx, coinbase: coinbase) + } + `, + sc.EVMContract.Address.HexWithPrefix(), + )) + + innerTxBytes := testAccount.PrepareSignAndEncodeTx(t, + testContract.DeployedAt.ToCommon(), + testContract.MakeCallData(t, "retrieve"), + big.NewInt(0), + uint64(100_000), + big.NewInt(0), + ) + + innerTx := cadence.NewArray( + unittest.BytesToCdcUInt8(innerTxBytes), + ).WithType(stdlib.EVMTransactionBytesCadenceType) + + script := fvm.Script(retrieveCode).WithArguments( + json.MustEncode(innerTx), + json.MustEncode(coinbase), + ) + + _, output, err = vm.Run( + ctx, + script, + snapshot) + require.NoError(t, err) + require.NoError(t, output.Err) + + // make sure the retrieved value is the same as the last value + // that was stored by transaction batch + res, err := impl.ResultSummaryFromEVMResultValue(output.Value) + require.NoError(t, err) + require.Equal(t, types.StatusSuccessful, res.Status) + require.Equal(t, types.ErrCodeNoError, res.ErrorCode) + require.Empty(t, res.ErrorMessage) + require.Equal(t, num, new(big.Int).SetBytes(res.ReturnedData).Int64()) + }) + }) + + // fail every other transaction with gas set too low for execution to succeed + // but high enough to pass intristic gas check, then check the updated values on the + // contract to match the last successful transaction execution + t.Run("Batch run with with failed transactions", func(t *testing.T) { + t.Parallel() + RunWithNewEnvironment(t, + chain, func( + ctx fvm.Context, + vm fvm.VM, + snapshot snapshot.SnapshotTree, + testContract *TestContract, + testAccount *EOATestAccount, + ) { + sc := systemcontracts.SystemContractsForChain(chain.ChainID()) + batchRunCode := []byte(fmt.Sprintf( + ` + import EVM from %s + + transaction(txs: [[UInt8]], coinbaseBytes: [UInt8; 20]) { + execute { + let coinbase = EVM.EVMAddress(bytes: coinbaseBytes) + let batchResults = EVM.batchRun(txs: txs, coinbase: coinbase) + + log("results") + log(batchResults) + assert(batchResults.length == txs.length, message: "invalid result length") + + for i, res in batchResults { + if i %% 2 != 0 { + assert(res.status == EVM.Status.successful, message: "unexpected success status") + assert(res.errorCode == 0, message: "unexpected error code") + assert(res.errorMessage == "", message: "unexpected error msg") + } else { + assert(res.status == EVM.Status.failed, message: "unexpected failed status") + assert(res.errorCode == 400, message: "unexpected error code") + } + } + } + } + `, + sc.EVMContract.Address.HexWithPrefix(), + )) + + batchCount := 6 + var num int64 + txBytes := make([]cadence.Value, batchCount) + for i := 0; i < batchCount; i++ { + gas := uint64(100_000) + if i%2 == 0 { + // fail with too low gas limit + gas = 22_000 + } else { + // update number with only valid transactions + num = int64(i) + } + + // prepare batch of transaction payloads + tx := testAccount.PrepareSignAndEncodeTx(t, + testContract.DeployedAt.ToCommon(), + testContract.MakeCallData(t, "store", big.NewInt(num)), + big.NewInt(0), + gas, + big.NewInt(0), + ) + + // build txs argument + txBytes[i] = cadence.NewArray( + unittest.BytesToCdcUInt8(tx), + ).WithType(stdlib.EVMTransactionBytesCadenceType) + } + + coinbase := cadence.NewArray( + unittest.BytesToCdcUInt8(testAccount.Address().Bytes()), + ).WithType(stdlib.EVMAddressBytesCadenceType) + + txs := cadence.NewArray(txBytes). + WithType(cadence.NewVariableSizedArrayType( + stdlib.EVMTransactionBytesCadenceType, + )) + + txBody, err := flow.NewTransactionBodyBuilder(). + SetScript(batchRunCode). + SetPayer(sc.FlowServiceAccount.Address). + AddArgument(json.MustEncode(txs)). + AddArgument(json.MustEncode(coinbase)). + Build() + require.NoError(t, err) + + tx := fvm.Transaction(txBody, 0) + + state, output, err := vm.Run(ctx, tx, snapshot) + + require.NoError(t, err) + require.NoError(t, output.Err) + //require.NotEmpty(t, state.WriteSet) + + // append the state + snapshot = snapshot.Append(state) + + // retrieve the values + retrieveCode := []byte(fmt.Sprintf( + ` + import EVM from %s + access(all) + fun main(tx: [UInt8], coinbaseBytes: [UInt8; 20]): EVM.Result { + let coinbase = EVM.EVMAddress(bytes: coinbaseBytes) + return EVM.run(tx: tx, coinbase: coinbase) + } + `, + sc.EVMContract.Address.HexWithPrefix(), + )) + + innerTxBytes := testAccount.PrepareSignAndEncodeTx(t, + testContract.DeployedAt.ToCommon(), + testContract.MakeCallData(t, "retrieve"), + big.NewInt(0), + uint64(100_000), + big.NewInt(0), + ) + + innerTx := cadence.NewArray( + unittest.BytesToCdcUInt8(innerTxBytes), + ).WithType(stdlib.EVMTransactionBytesCadenceType) + + script := fvm.Script(retrieveCode).WithArguments( + json.MustEncode(innerTx), + json.MustEncode(coinbase), + ) + + _, output, err = vm.Run( + ctx, + script, + snapshot) + require.NoError(t, err) + require.NoError(t, output.Err) + + // make sure the retrieved value is the same as the last value + // that was stored by transaction batch + res, err := impl.ResultSummaryFromEVMResultValue(output.Value) + require.NoError(t, err) + require.Equal(t, types.ErrCodeNoError, res.ErrorCode) + require.Equal(t, types.StatusSuccessful, res.Status) + require.Empty(t, res.ErrorMessage) + require.Equal(t, num, new(big.Int).SetBytes(res.ReturnedData).Int64()) + }) + }) +} + +func TestEVMBlockData(t *testing.T) { + t.Parallel() + chain := flow.Emulator.Chain() + sc := systemcontracts.SystemContractsForChain(chain.ChainID()) + RunWithNewEnvironment(t, + chain, func( + ctx fvm.Context, + vm fvm.VM, + snapshot snapshot.SnapshotTree, + testContract *TestContract, + testAccount *EOATestAccount, + ) { + + // query the block timestamp + code := []byte(fmt.Sprintf( + ` + import EVM from %s + access(all) + fun main(tx: [UInt8], coinbaseBytes: [UInt8; 20]): EVM.Result { + let coinbase = EVM.EVMAddress(bytes: coinbaseBytes) + return EVM.run(tx: tx, coinbase: coinbase) + } + `, + sc.EVMContract.Address.HexWithPrefix(), + )) + + innerTxBytes := testAccount.PrepareSignAndEncodeTx(t, + testContract.DeployedAt.ToCommon(), + testContract.MakeCallData(t, "blockTime"), + big.NewInt(0), + uint64(100_000), + big.NewInt(0), + ) + + coinbase := cadence.NewArray( + unittest.BytesToCdcUInt8(testAccount.Address().Bytes()), + ).WithType(stdlib.EVMAddressBytesCadenceType) + + innerTx := cadence.NewArray( + unittest.BytesToCdcUInt8(innerTxBytes), + ).WithType(stdlib.EVMTransactionBytesCadenceType) + + script := fvm.Script(code).WithArguments( + json.MustEncode(innerTx), + json.MustEncode(coinbase), + ) + + _, output, err := vm.Run( + ctx, + script, + snapshot) + require.NoError(t, err) + require.NoError(t, output.Err) + + res, err := impl.ResultSummaryFromEVMResultValue(output.Value) + require.NoError(t, err) + require.Equal(t, types.StatusSuccessful, res.Status) + require.Equal(t, types.ErrCodeNoError, res.ErrorCode) + require.Empty(t, res.ErrorMessage) + require.Equal(t, ctx.BlockHeader.Timestamp/1000, new(big.Int).SetBytes(res.ReturnedData).Uint64()) // EVM reports block time as Unix time in seconds + + }) +} + +func TestEVMAddressDeposit(t *testing.T) { + + t.Parallel() + chain := flow.Emulator.Chain() + sc := systemcontracts.SystemContractsForChain(chain.ChainID()) + RunWithNewEnvironment(t, + chain, func( + ctx fvm.Context, + vm fvm.VM, + snapshot snapshot.SnapshotTree, + testContract *TestContract, + testAccount *EOATestAccount, + ) { + + code := []byte(fmt.Sprintf( + ` + import EVM from %s + import FlowToken from %s + + transaction(addr: [UInt8; 20]) { + prepare(account: auth(BorrowValue) &Account) { + let admin = account.storage + .borrow<&FlowToken.Administrator>(from: /storage/flowTokenAdmin)! + + let minter <- admin.createNewMinter(allowedAmount: 1.0) + let vault <- minter.mintTokens(amount: 1.0) + destroy minter + + let address = EVM.EVMAddress(bytes: addr) + address.deposit(from: <-vault) + } + } + `, + sc.EVMContract.Address.HexWithPrefix(), + sc.FlowToken.Address.HexWithPrefix(), + )) + + addr := RandomAddress(t) + + txBody, err := flow.NewTransactionBodyBuilder(). + SetScript(code). + SetPayer(sc.FlowServiceAccount.Address). + AddAuthorizer(sc.FlowServiceAccount.Address). + AddArgument(json.MustEncode(cadence.NewArray( + unittest.BytesToCdcUInt8(addr.Bytes()), + ).WithType(stdlib.EVMAddressBytesCadenceType))). + Build() + require.NoError(t, err) + + tx := fvm.Transaction(txBody, 0) + + execSnap, output, err := vm.Run( + ctx, + tx, + snapshot) + require.NoError(t, err) + require.NoError(t, output.Err) + + snapshot = snapshot.Append(execSnap) + + expectedBalance := types.OneFlowBalance() + bal := getEVMAccountBalance(t, ctx, vm, snapshot, addr) + require.Equal(t, expectedBalance, bal) + + // tx executed event + txEvent := output.Events[2] + txEventPayload := TxEventToPayload(t, txEvent, sc.EVMContract.Address) + + // deposit event + depositEvent := output.Events[3] + depEv, err := events.FlowEventToCadenceEvent(depositEvent) + require.NoError(t, err) + + depEvPayload, err := events.DecodeFLOWTokensDepositedEventPayload(depEv) + require.NoError(t, err) + + require.Equal(t, types.OneFlow(), depEvPayload.BalanceAfterInAttoFlow.Value) + + // commit block + blockEventPayload, _ := callEVMHeartBeat(t, + ctx, + vm, + snapshot) + + require.NotEmpty(t, blockEventPayload.Hash) + require.Equal(t, uint64(21000), blockEventPayload.TotalGasUsed) + + txHashes := types.TransactionHashes{txEventPayload.Hash} + require.Equal(t, + txHashes.RootHash(), + blockEventPayload.TransactionHashRoot, + ) + }) +} + +func TestCOAAddressDeposit(t *testing.T) { + t.Parallel() + + chain := flow.Emulator.Chain() + sc := systemcontracts.SystemContractsForChain(chain.ChainID()) + RunWithNewEnvironment(t, + chain, func( + ctx fvm.Context, + vm fvm.VM, + snapshot snapshot.SnapshotTree, + testContract *TestContract, + testAccount *EOATestAccount, + ) { + code := []byte(fmt.Sprintf( + ` + import EVM from %s + import FlowToken from %s + + access(all) + fun main() { + let admin = getAuthAccount<auth(BorrowValue) &Account>(%s) + .storage.borrow<&FlowToken.Administrator>(from: /storage/flowTokenAdmin)! + let minter <- admin.createNewMinter(allowedAmount: 1.23) + let vault <- minter.mintTokens(amount: 1.23) + destroy minter + + let cadenceOwnedAccount <- EVM.createCadenceOwnedAccount() + cadenceOwnedAccount.deposit(from: <-vault) + destroy cadenceOwnedAccount + } + `, + sc.EVMContract.Address.HexWithPrefix(), + sc.FlowToken.Address.HexWithPrefix(), + sc.FlowServiceAccount.Address.HexWithPrefix(), + )) + + script := fvm.Script(code) + + _, output, err := vm.Run( + ctx, + script, + snapshot) + require.NoError(t, err) + require.NoError(t, output.Err) + + }) +} + +func TestCadenceOwnedAccountFunctionalities(t *testing.T) { + t.Parallel() + chain := flow.Emulator.Chain() + sc := systemcontracts.SystemContractsForChain(chain.ChainID()) + + t.Run("test coa setup", func(t *testing.T) { + t.Parallel() + + RunWithNewEnvironment(t, + chain, func( + ctx fvm.Context, + vm fvm.VM, + snapshot snapshot.SnapshotTree, + testContract *TestContract, + testAccount *EOATestAccount, + ) { + // create a flow account + flowAccount, _, snapshot := createAndFundFlowAccount( + t, + ctx, + vm, + snapshot, + ) + + var coaAddress types.Address + + initNonce := uint64(1) + // 10 Flow in UFix64 + initBalanceInUFix64 := uint64(1_000_000_000) + initBalance := types.NewBalanceFromUFix64(cadence.UFix64(initBalanceInUFix64)) + + coaAddress, snapshot = setupCOA( + t, + ctx, + vm, + snapshot, + flowAccount, + initBalanceInUFix64) + + bal := getEVMAccountBalance( + t, + ctx, + vm, + snapshot, + coaAddress) + require.Equal(t, initBalance, bal) + + nonce := getEVMAccountNonce( + t, + ctx, + vm, + snapshot, + coaAddress) + require.Equal(t, initNonce, nonce) + }) + }) + + t.Run("test coa withdraw", func(t *testing.T) { + t.Parallel() + + RunWithNewEnvironment(t, + chain, func( + ctx fvm.Context, + vm fvm.VM, + snapshot snapshot.SnapshotTree, + testContract *TestContract, + testAccount *EOATestAccount, + ) { + code := []byte(fmt.Sprintf( + ` + import EVM from %s + import FlowToken from %s + + transaction() { + prepare(account: auth(BorrowValue) &Account) { + let admin = account.storage + .borrow<&FlowToken.Administrator>(from: /storage/flowTokenAdmin)! + + let minter <- admin.createNewMinter(allowedAmount: 2.34) + let vault <- minter.mintTokens(amount: 2.34) + destroy minter + + let cadenceOwnedAccount <- EVM.createCadenceOwnedAccount() + cadenceOwnedAccount.deposit(from: <-vault) + + let bal = EVM.Balance(attoflow: 0) + bal.setFLOW(flow: 1.23) + let vault2 <- cadenceOwnedAccount.withdraw(balance: bal) + let balance = vault2.balance + destroy cadenceOwnedAccount + destroy vault2 + } + } + `, + sc.EVMContract.Address.HexWithPrefix(), + sc.FlowToken.Address.HexWithPrefix(), + )) + + txBody, err := flow.NewTransactionBodyBuilder(). + SetScript(code). + SetPayer(sc.FlowServiceAccount.Address). + AddAuthorizer(sc.FlowServiceAccount.Address). + Build() + require.NoError(t, err) + + tx := fvm.Transaction(txBody, 0) + + _, output, err := vm.Run( + ctx, + tx, + snapshot) + require.NoError(t, err) + require.NoError(t, output.Err) + + withdrawEvent := output.Events[7] + + ev, err := events.FlowEventToCadenceEvent(withdrawEvent) + require.NoError(t, err) + + evPayload, err := events.DecodeFLOWTokensWithdrawnEventPayload(ev) + require.NoError(t, err) + + // 2.34 - 1.23 = 1.11 + expectedBalanceAfterWithdraw := big.NewInt(1_110_000_000_000_000_000) + require.Equal(t, expectedBalanceAfterWithdraw, evPayload.BalanceAfterInAttoFlow.Value) + }) + }) + + t.Run("test coa transfer", func(t *testing.T) { + t.Parallel() + + RunWithNewEnvironment(t, + chain, func( + ctx fvm.Context, + vm fvm.VM, + snapshot snapshot.SnapshotTree, + testContract *TestContract, + testAccount *EOATestAccount, + ) { + code := []byte(fmt.Sprintf( + ` + import EVM from %s + import FlowToken from %s + + access(all) + fun main(address: [UInt8; 20]): UFix64 { + let admin = getAuthAccount<auth(BorrowValue) &Account>(%s) + .storage.borrow<&FlowToken.Administrator>(from: /storage/flowTokenAdmin)! + + let minter <- admin.createNewMinter(allowedAmount: 2.34) + let vault <- minter.mintTokens(amount: 2.34) + destroy minter + + let cadenceOwnedAccount <- EVM.createCadenceOwnedAccount() + cadenceOwnedAccount.deposit(from: <-vault) + + let bal = EVM.Balance(attoflow: 0) + bal.setFLOW(flow: 1.23) + + let recipientEVMAddress = EVM.EVMAddress(bytes: address) + + let res = cadenceOwnedAccount.call( + to: recipientEVMAddress, + data: [], + gasLimit: 100_000, + value: bal, + ) + + assert(res.status == EVM.Status.successful, message: "transfer call was not successful") + + destroy cadenceOwnedAccount + return recipientEVMAddress.balance().inFLOW() + } + `, + sc.EVMContract.Address.HexWithPrefix(), + sc.FlowToken.Address.HexWithPrefix(), + sc.FlowServiceAccount.Address.HexWithPrefix(), + )) + + addr := cadence.NewArray( + unittest.BytesToCdcUInt8(RandomAddress(t).Bytes()), + ).WithType(stdlib.EVMAddressBytesCadenceType) + + script := fvm.Script(code).WithArguments( + json.MustEncode(addr), + ) + + _, output, err := vm.Run( + ctx, + script, + snapshot) + require.NoError(t, err) + require.NoError(t, output.Err) + + require.Equal(t, uint64(123000000), uint64(output.Value.(cadence.UFix64))) + }) + }) + + t.Run("test coa deposit and withdraw in a single transaction", func(t *testing.T) { + RunWithNewEnvironment(t, + chain, func( + ctx fvm.Context, + vm fvm.VM, + snapshot snapshot.SnapshotTree, + testContract *TestContract, + testAccount *EOATestAccount, + ) { + code := []byte(fmt.Sprintf( + ` + import EVM from %s + import FlowToken from %s + + access(all) + fun main(): UFix64 { + let admin = getAuthAccount<auth(Storage) &Account>(%s) + .storage.borrow<&FlowToken.Administrator>(from: /storage/flowTokenAdmin)! + + let minter <- admin.createNewMinter(allowedAmount: 2.34) + let vault <- minter.mintTokens(amount: 2.34) + destroy minter + + let cadenceOwnedAccount <- EVM.createCadenceOwnedAccount() + cadenceOwnedAccount.deposit(from: <-vault) + + let bal = EVM.Balance(attoflow: 0) + bal.setFLOW(flow: 1.23) + let vault2 <- cadenceOwnedAccount.withdraw(balance: bal) + let balance = vault2.balance + destroy cadenceOwnedAccount + destroy vault2 + + return balance + } + `, + sc.EVMContract.Address.HexWithPrefix(), + sc.FlowToken.Address.HexWithPrefix(), + sc.FlowServiceAccount.Address.HexWithPrefix(), + )) + + script := fvm.Script(code) + + _, output, err := vm.Run( + ctx, + script, + snapshot) + require.NoError(t, err) + require.NoError(t, output.Err) + }) + }) + + t.Run("test coa deploy", func(t *testing.T) { + RunWithNewEnvironment(t, + chain, func( + ctx fvm.Context, + vm fvm.VM, + snapshot snapshot.SnapshotTree, + testContract *TestContract, + testAccount *EOATestAccount, + ) { + code := []byte(fmt.Sprintf( + ` + import EVM from %s + import FlowToken from %s + + access(all) + fun main(code: [UInt8]): EVM.Result { + let admin = getAuthAccount<auth(Storage) &Account>(%s) + .storage.borrow<&FlowToken.Administrator>(from: /storage/flowTokenAdmin)! + let minter <- admin.createNewMinter(allowedAmount: 2.34) + let vault <- minter.mintTokens(amount: 2.34) + destroy minter + + let cadenceOwnedAccount <- EVM.createCadenceOwnedAccount() + cadenceOwnedAccount.deposit(from: <-vault) + + let res = cadenceOwnedAccount.deploy( + code: code, + gasLimit: 2_000_000, + value: EVM.Balance(attoflow: 1230000000000000000) + ) + destroy cadenceOwnedAccount + return res + } + `, + sc.EVMContract.Address.HexWithPrefix(), + sc.FlowToken.Address.HexWithPrefix(), + sc.FlowServiceAccount.Address.HexWithPrefix(), + )) + + script := fvm.Script(code). + WithArguments(json.MustEncode( + cadence.NewArray( + unittest.BytesToCdcUInt8(testContract.ByteCode), + ).WithType(cadence.NewVariableSizedArrayType(cadence.UInt8Type)), + )) + + _, output, err := vm.Run( + ctx, + script, + snapshot) + require.NoError(t, err) + require.NoError(t, output.Err) + + res, err := impl.ResultSummaryFromEVMResultValue(output.Value) + require.NoError(t, err) + require.Equal(t, types.StatusSuccessful, res.Status) + require.Equal(t, types.ErrCodeNoError, res.ErrorCode) + require.Empty(t, res.ErrorMessage) + require.NotNil(t, res.DeployedContractAddress) + // we strip away first few bytes because they contain deploy code + require.Equal(t, testContract.ByteCode[17:], []byte(res.ReturnedData)) + }) + }) + + t.Run("test coa dryCall", func(t *testing.T) { + RunWithNewEnvironment(t, + chain, func( + ctx fvm.Context, + vm fvm.VM, + snapshot snapshot.SnapshotTree, + testContract *TestContract, + testAccount *EOATestAccount, + ) { + sc := systemcontracts.SystemContractsForChain(chain.ChainID()) + code := []byte(fmt.Sprintf( + ` + import EVM from %s + + transaction(tx: [UInt8], coinbaseBytes: [UInt8; 20]){ + prepare(account: auth(Storage) &Account ) { + let cadenceOwnedAccount <- EVM.createCadenceOwnedAccount() + account.storage.save(<- cadenceOwnedAccount, to: /storage/evmCOA) + + let coinbase = EVM.EVMAddress(bytes: coinbaseBytes) + let res = EVM.run(tx: tx, coinbase: coinbase) + + assert(res.status == EVM.Status.successful, message: "unexpected status") + assert(res.errorCode == 0, message: "unexpected error code") + } + } + `, + sc.EVMContract.Address.HexWithPrefix(), + )) + + num := int64(42) + innerTxBytes := testAccount.PrepareSignAndEncodeTx(t, + testContract.DeployedAt.ToCommon(), + testContract.MakeCallData(t, "store", big.NewInt(num)), + big.NewInt(0), + uint64(50_000), + big.NewInt(0), + ) + + innerTx := cadence.NewArray( + unittest.BytesToCdcUInt8(innerTxBytes), + ).WithType(stdlib.EVMTransactionBytesCadenceType) + + coinbase := cadence.NewArray( + unittest.BytesToCdcUInt8(testAccount.Address().Bytes()), + ).WithType(stdlib.EVMAddressBytesCadenceType) + + txBody, err := flow.NewTransactionBodyBuilder(). + SetScript(code). + SetPayer(sc.FlowServiceAccount.Address). + AddAuthorizer(sc.FlowServiceAccount.Address). + AddArgument(json.MustEncode(innerTx)). + AddArgument(json.MustEncode(coinbase)). + Build() + require.NoError(t, err) + + tx := fvm.Transaction(txBody, 0) + + state, output, err := vm.Run( + ctx, + tx, + snapshot, + ) + require.NoError(t, err) + require.NoError(t, output.Err) + assert.Len(t, output.Events, 3) + assert.Len(t, state.UpdatedRegisterIDs(), 13) + assert.Equal( + t, + flow.EventType("A.f8d6e0586b0a20c7.EVM.TransactionExecuted"), + output.Events[0].Type, + ) + assert.Equal( + t, + flow.EventType("A.f8d6e0586b0a20c7.EVM.CadenceOwnedAccountCreated"), + output.Events[1].Type, + ) + assert.Equal( + t, + flow.EventType("A.f8d6e0586b0a20c7.EVM.TransactionExecuted"), + output.Events[2].Type, + ) + snapshot = snapshot.Append(state) + + code = []byte(fmt.Sprintf( + ` + import EVM from %s + + transaction(data: [UInt8], to: String, gasLimit: UInt64, value: UInt){ + prepare(account: auth(Storage) &Account) { + let coa = account.storage.borrow<&EVM.CadenceOwnedAccount>( + from: /storage/evmCOA + ) ?? panic("could not borrow COA reference!") + let res = coa.dryCall( + to: EVM.addressFromString(to), + data: data, + gasLimit: gasLimit, + value: EVM.Balance(attoflow: value) + ) + + assert(res.status == EVM.Status.successful, message: "unexpected status") + assert(res.errorCode == 0, message: "unexpected error code") + + let values = EVM.decodeABI(types: [Type<UInt256>()], data: res.data) + assert(values.length == 1) + + let number = values[0] as! UInt256 + assert(number == 42, message: String.encodeHex(res.data)) + } + } + `, + sc.EVMContract.Address.HexWithPrefix(), + )) + + data := json.MustEncode( + cadence.NewArray( + unittest.BytesToCdcUInt8(testContract.MakeCallData(t, "retrieve")), + ).WithType(stdlib.EVMTransactionBytesCadenceType), + ) + toAddress, err := cadence.NewString(testContract.DeployedAt.ToCommon().Hex()) + require.NoError(t, err) + to := json.MustEncode(toAddress) + + txBody, err = flow.NewTransactionBodyBuilder(). + SetScript(code). + SetPayer(sc.FlowServiceAccount.Address). + AddAuthorizer(sc.FlowServiceAccount.Address). + AddArgument(data). + AddArgument(to). + AddArgument(json.MustEncode(cadence.NewUInt64(50_000))). + AddArgument(json.MustEncode(cadence.NewUInt(0))). + Build() + require.NoError(t, err) + + tx = fvm.Transaction(txBody, 0) + + state, output, err = vm.Run( + ctx, + tx, + snapshot, + ) + require.NoError(t, err) + require.NoError(t, output.Err) + assert.Len(t, output.Events, 0) + assert.Len(t, state.UpdatedRegisterIDs(), 0) + }) + }) +} + +func TestDryRun(t *testing.T) { + t.Parallel() + chain := flow.Emulator.Chain() + sc := systemcontracts.SystemContractsForChain(chain.ChainID()) + evmAddress := sc.EVMContract.Address.HexWithPrefix() + + dryRunTx := func( + t *testing.T, + tx *gethTypes.Transaction, + ctx fvm.Context, + vm fvm.VM, + snapshot snapshot.SnapshotTree, + ) *types.ResultSummary { + code := []byte(fmt.Sprintf(` + import EVM from %s + + access(all) + fun main(tx: [UInt8]): EVM.Result { + return EVM.dryRun( + tx: tx, + from: EVM.EVMAddress(bytes: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19]) + ) + }`, + evmAddress, + )) + + innerTxBytes, err := tx.MarshalBinary() + require.NoError(t, err) + + script := fvm.Script(code).WithArguments( + json.MustEncode( + cadence.NewArray( + unittest.BytesToCdcUInt8(innerTxBytes), + ).WithType(stdlib.EVMTransactionBytesCadenceType), + ), + ) + _, output, err := vm.Run( + ctx, + script, + snapshot) + require.NoError(t, err) + require.NoError(t, output.Err) + + result, err := impl.ResultSummaryFromEVMResultValue(output.Value) + require.NoError(t, err) + return result + } + + // this test checks that gas limit is correctly used and gas usage correctly reported + t.Run("test dry run storing a value with different gas limits", func(t *testing.T) { + RunWithNewEnvironment(t, + chain, func( + ctx fvm.Context, + vm fvm.VM, + snapshot snapshot.SnapshotTree, + testContract *TestContract, + testAccount *EOATestAccount, + ) { + data := testContract.MakeCallData(t, "store", big.NewInt(1337)) + + limit := uint64(math.MaxUint64 - 1) + tx := gethTypes.NewTransaction( + 0, + testContract.DeployedAt.ToCommon(), + big.NewInt(0), + limit, + big.NewInt(0), + data, + ) + result := dryRunTx(t, tx, ctx, vm, snapshot) + require.Equal(t, types.ErrCodeNoError, result.ErrorCode) + require.Equal(t, types.StatusSuccessful, result.Status) + require.Greater(t, result.GasConsumed, uint64(0)) + require.Less(t, result.GasConsumed, limit) + + // gas limit too low, but still bigger than intrinsic gas value + limit = uint64(24_216) + tx = gethTypes.NewTransaction( + 0, + testContract.DeployedAt.ToCommon(), + big.NewInt(0), + limit, + big.NewInt(0), + data, + ) + result = dryRunTx(t, tx, ctx, vm, snapshot) + require.Equal(t, types.ExecutionErrCodeOutOfGas, result.ErrorCode) + require.Equal(t, types.StatusFailed, result.Status) + require.Equal(t, result.GasConsumed, limit) // burn it all!!! + }) + }) + + t.Run("test dry run store current value", func(t *testing.T) { + RunWithNewEnvironment(t, + chain, func( + ctx fvm.Context, + vm fvm.VM, + snapshot snapshot.SnapshotTree, + testContract *TestContract, + testAccount *EOATestAccount, + ) { + data := testContract.MakeCallData(t, "store", big.NewInt(0)) + tx := gethTypes.NewTransaction( + 0, + testContract.DeployedAt.ToCommon(), + big.NewInt(0), + uint64(50_000), + big.NewInt(0), + data, + ) + dryRunResult := dryRunTx(t, tx, ctx, vm, snapshot) + + require.Equal(t, types.ErrCodeNoError, dryRunResult.ErrorCode) + require.Equal(t, types.StatusSuccessful, dryRunResult.Status) + require.Greater(t, dryRunResult.GasConsumed, uint64(0)) + + code := []byte(fmt.Sprintf( + ` + import EVM from %s + access(all) + fun main(tx: [UInt8], coinbaseBytes: [UInt8; 20]): EVM.Result { + let coinbase = EVM.EVMAddress(bytes: coinbaseBytes) + return EVM.run(tx: tx, coinbase: coinbase) + } + `, + evmAddress, + )) + + // Use the gas estimation from Evm.dryRun with some buffer + gasLimit := dryRunResult.GasConsumed + gethParams.SstoreSentryGasEIP2200 + innerTxBytes := testAccount.PrepareSignAndEncodeTx(t, + testContract.DeployedAt.ToCommon(), + data, + big.NewInt(0), + gasLimit, + big.NewInt(0), + ) + + innerTx := cadence.NewArray( + unittest.BytesToCdcUInt8(innerTxBytes), + ).WithType(stdlib.EVMTransactionBytesCadenceType) + + coinbase := cadence.NewArray( + unittest.BytesToCdcUInt8(testAccount.Address().Bytes()), + ).WithType(stdlib.EVMAddressBytesCadenceType) + + script := fvm.Script(code).WithArguments( + json.MustEncode(innerTx), + json.MustEncode(coinbase), + ) + + _, output, err := vm.Run( + ctx, + script, + snapshot, + ) + require.NoError(t, err) + require.NoError(t, output.Err) + + res, err := impl.ResultSummaryFromEVMResultValue(output.Value) + require.NoError(t, err) + require.Equal(t, types.StatusSuccessful, res.Status) + require.Equal(t, types.ErrCodeNoError, res.ErrorCode) + require.Equal(t, res.GasConsumed, dryRunResult.GasConsumed) + }) + }) + + t.Run("test dry run store new value", func(t *testing.T) { + RunWithNewEnvironment(t, + chain, func( + ctx fvm.Context, + vm fvm.VM, + snapshot snapshot.SnapshotTree, + testContract *TestContract, + testAccount *EOATestAccount, + ) { + sc := systemcontracts.SystemContractsForChain(chain.ChainID()) + code := []byte(fmt.Sprintf( + ` + import EVM from %s + + transaction(tx: [UInt8], coinbaseBytes: [UInt8; 20]){ + prepare(account: &Account) { + let coinbase = EVM.EVMAddress(bytes: coinbaseBytes) + let res = EVM.run(tx: tx, coinbase: coinbase) + + assert(res.status == EVM.Status.successful, message: "unexpected status") + assert(res.errorCode == 0, message: "unexpected error code") + } + } + `, + sc.EVMContract.Address.HexWithPrefix(), + )) + + num := int64(12) + innerTxBytes := testAccount.PrepareSignAndEncodeTx(t, + testContract.DeployedAt.ToCommon(), + testContract.MakeCallData(t, "store", big.NewInt(num)), + big.NewInt(0), + uint64(50_000), + big.NewInt(0), + ) + + innerTx := cadence.NewArray( + unittest.BytesToCdcUInt8(innerTxBytes), + ).WithType(stdlib.EVMTransactionBytesCadenceType) + + coinbase := cadence.NewArray( + unittest.BytesToCdcUInt8(testAccount.Address().Bytes()), + ).WithType(stdlib.EVMAddressBytesCadenceType) + + txBody, err := flow.NewTransactionBodyBuilder(). + SetScript(code). + SetPayer(sc.FlowServiceAccount.Address). + AddAuthorizer(sc.FlowServiceAccount.Address). + AddArgument(json.MustEncode(innerTx)). + AddArgument(json.MustEncode(coinbase)). + Build() + require.NoError(t, err) + + tx := fvm.Transaction(txBody, 0) + + _, output, err := vm.Run( + ctx, + tx, + snapshot, + ) + require.NoError(t, err) + require.NoError(t, output.Err) + + data := testContract.MakeCallData(t, "store", big.NewInt(100)) + tx1 := gethTypes.NewTransaction( + 0, + testContract.DeployedAt.ToCommon(), + big.NewInt(0), + uint64(50_000), + big.NewInt(0), + data, + ) + dryRunResult := dryRunTx(t, tx1, ctx, vm, snapshot) + + require.Equal(t, types.ErrCodeNoError, dryRunResult.ErrorCode) + require.Equal(t, types.StatusSuccessful, dryRunResult.Status) + require.Greater(t, dryRunResult.GasConsumed, uint64(0)) + + code = []byte(fmt.Sprintf( + ` + import EVM from %s + access(all) + fun main(tx: [UInt8], coinbaseBytes: [UInt8; 20]): EVM.Result { + let coinbase = EVM.EVMAddress(bytes: coinbaseBytes) + return EVM.run(tx: tx, coinbase: coinbase) + } + `, + evmAddress, + )) + + // Decrease nonce because we are Cadence using scripts, and not + // transactions, which means that no state change is happening. + testAccount.SetNonce(testAccount.Nonce() - 1) + innerTxBytes = testAccount.PrepareSignAndEncodeTx(t, + testContract.DeployedAt.ToCommon(), + data, + big.NewInt(0), + dryRunResult.GasConsumed, // use the gas estimation from Evm.dryRun + big.NewInt(0), + ) + + innerTx = cadence.NewArray( + unittest.BytesToCdcUInt8(innerTxBytes), + ).WithType(stdlib.EVMTransactionBytesCadenceType) + + coinbase = cadence.NewArray( + unittest.BytesToCdcUInt8(testAccount.Address().Bytes()), + ).WithType(stdlib.EVMAddressBytesCadenceType) + + script := fvm.Script(code).WithArguments( + json.MustEncode(innerTx), + json.MustEncode(coinbase), + ) + + _, output, err = vm.Run( + ctx, + script, + snapshot) + require.NoError(t, err) + require.NoError(t, output.Err) + + res, err := impl.ResultSummaryFromEVMResultValue(output.Value) + require.NoError(t, err) + require.Equal(t, types.StatusSuccessful, res.Status) + require.Equal(t, types.ErrCodeNoError, res.ErrorCode) + require.Equal(t, res.GasConsumed, dryRunResult.GasConsumed) + }) + }) + + t.Run("test dry run clear current value", func(t *testing.T) { + RunWithNewEnvironment(t, + chain, func( + ctx fvm.Context, + vm fvm.VM, + snapshot snapshot.SnapshotTree, + testContract *TestContract, + testAccount *EOATestAccount, + ) { + sc := systemcontracts.SystemContractsForChain(chain.ChainID()) + code := []byte(fmt.Sprintf( + ` + import EVM from %s + + transaction(tx: [UInt8], coinbaseBytes: [UInt8; 20]){ + prepare(account: &Account) { + let coinbase = EVM.EVMAddress(bytes: coinbaseBytes) + let res = EVM.run(tx: tx, coinbase: coinbase) + + assert(res.status == EVM.Status.successful, message: "unexpected status") + assert(res.errorCode == 0, message: "unexpected error code") + } + } + `, + sc.EVMContract.Address.HexWithPrefix(), + )) + + num := int64(100) + innerTxBytes := testAccount.PrepareSignAndEncodeTx(t, + testContract.DeployedAt.ToCommon(), + testContract.MakeCallData(t, "store", big.NewInt(num)), + big.NewInt(0), + uint64(50_000), + big.NewInt(0), + ) + + innerTx := cadence.NewArray( + unittest.BytesToCdcUInt8(innerTxBytes), + ).WithType(stdlib.EVMTransactionBytesCadenceType) + + coinbase := cadence.NewArray( + unittest.BytesToCdcUInt8(testAccount.Address().Bytes()), + ).WithType(stdlib.EVMAddressBytesCadenceType) + + txBody, err := flow.NewTransactionBodyBuilder(). + SetScript(code). + SetPayer(sc.FlowServiceAccount.Address). + AddAuthorizer(sc.FlowServiceAccount.Address). + AddArgument(json.MustEncode(innerTx)). + AddArgument(json.MustEncode(coinbase)). + Build() + require.NoError(t, err) + + tx := fvm.Transaction(txBody, 0) + + state, output, err := vm.Run( + ctx, + tx, + snapshot, + ) + require.NoError(t, err) + require.NoError(t, output.Err) + snapshot = snapshot.Append(state) + + data := testContract.MakeCallData(t, "store", big.NewInt(0)) + tx1 := gethTypes.NewTransaction( + 0, + testContract.DeployedAt.ToCommon(), + big.NewInt(0), + uint64(50_000), + big.NewInt(0), + data, + ) + dryRunResult := dryRunTx(t, tx1, ctx, vm, snapshot) + + require.Equal(t, types.ErrCodeNoError, dryRunResult.ErrorCode) + require.Equal(t, types.StatusSuccessful, dryRunResult.Status) + require.Greater(t, dryRunResult.GasConsumed, uint64(0)) + + code = []byte(fmt.Sprintf( + ` + import EVM from %s + access(all) + fun main(tx: [UInt8], coinbaseBytes: [UInt8; 20]): EVM.Result { + let coinbase = EVM.EVMAddress(bytes: coinbaseBytes) + return EVM.run(tx: tx, coinbase: coinbase) + } + `, + evmAddress, + )) + + // use the gas estimation from Evm.dryRun with the necessary buffer gas + gasLimit := dryRunResult.GasConsumed + gethParams.SstoreClearsScheduleRefundEIP3529 + innerTxBytes = testAccount.PrepareSignAndEncodeTx(t, + testContract.DeployedAt.ToCommon(), + data, + big.NewInt(0), + gasLimit, + big.NewInt(0), + ) + + innerTx = cadence.NewArray( + unittest.BytesToCdcUInt8(innerTxBytes), + ).WithType(stdlib.EVMTransactionBytesCadenceType) + + coinbase = cadence.NewArray( + unittest.BytesToCdcUInt8(testAccount.Address().Bytes()), + ).WithType(stdlib.EVMAddressBytesCadenceType) + + script := fvm.Script(code).WithArguments( + json.MustEncode(innerTx), + json.MustEncode(coinbase), + ) + + _, output, err = vm.Run( + ctx, + script, + snapshot, + ) + require.NoError(t, err) + require.NoError(t, output.Err) + + res, err := impl.ResultSummaryFromEVMResultValue(output.Value) + require.NoError(t, err) + require.Equal(t, types.StatusSuccessful, res.Status) + require.Equal(t, types.ErrCodeNoError, res.ErrorCode) + require.Equal(t, res.GasConsumed, dryRunResult.GasConsumed) + }) + }) + + // this test makes sure the dry-run that updates the value on the contract + // doesn't persist the change, and after when the value is read it isn't updated. + t.Run("test dry run for any side-effects", func(t *testing.T) { + RunWithNewEnvironment(t, + chain, func( + ctx fvm.Context, + vm fvm.VM, + snapshot snapshot.SnapshotTree, + testContract *TestContract, + testAccount *EOATestAccount, + ) { + updatedValue := int64(1337) + data := testContract.MakeCallData(t, "store", big.NewInt(updatedValue)) + tx := gethTypes.NewTransaction( + 0, + testContract.DeployedAt.ToCommon(), + big.NewInt(0), + uint64(1000000), + big.NewInt(0), + data, + ) + + result := dryRunTx(t, tx, ctx, vm, snapshot) + require.Equal(t, types.ErrCodeNoError, result.ErrorCode) + require.Equal(t, types.StatusSuccessful, result.Status) + require.Greater(t, result.GasConsumed, uint64(0)) + + // query the value make sure it's not updated + code := []byte(fmt.Sprintf( + ` + import EVM from %s + access(all) + fun main(tx: [UInt8], coinbaseBytes: [UInt8; 20]): EVM.Result { + let coinbase = EVM.EVMAddress(bytes: coinbaseBytes) + return EVM.run(tx: tx, coinbase: coinbase) + } + `, + evmAddress, + )) + + innerTxBytes := testAccount.PrepareSignAndEncodeTx(t, + testContract.DeployedAt.ToCommon(), + testContract.MakeCallData(t, "retrieve"), + big.NewInt(0), + uint64(100_000), + big.NewInt(0), + ) + + innerTx := cadence.NewArray( + unittest.BytesToCdcUInt8(innerTxBytes), + ).WithType(stdlib.EVMTransactionBytesCadenceType) + + coinbase := cadence.NewArray( + unittest.BytesToCdcUInt8(testAccount.Address().Bytes()), + ).WithType(stdlib.EVMAddressBytesCadenceType) + + script := fvm.Script(code).WithArguments( + json.MustEncode(innerTx), + json.MustEncode(coinbase), + ) + + _, output, err := vm.Run( + ctx, + script, + snapshot) + require.NoError(t, err) + require.NoError(t, output.Err) + + res, err := impl.ResultSummaryFromEVMResultValue(output.Value) + require.NoError(t, err) + require.Equal(t, types.StatusSuccessful, res.Status) + require.Equal(t, types.ErrCodeNoError, res.ErrorCode) + // make sure the value we used in the dry-run is not the same as the value stored in contract + require.NotEqual(t, updatedValue, new(big.Int).SetBytes(res.ReturnedData).Int64()) + }) + }) + + t.Run("test dry run contract deployment", func(t *testing.T) { + RunWithNewEnvironment(t, + chain, func( + ctx fvm.Context, + vm fvm.VM, + snapshot snapshot.SnapshotTree, + testContract *TestContract, + testAccount *EOATestAccount, + ) { + tx := gethTypes.NewContractCreation( + 0, + big.NewInt(0), + uint64(10_000_000), + big.NewInt(0), + testContract.ByteCode, + ) + + result := dryRunTx(t, tx, ctx, vm, snapshot) + require.Equal(t, types.ErrCodeNoError, result.ErrorCode) + require.Equal(t, types.StatusSuccessful, result.Status) + require.Greater(t, result.GasConsumed, uint64(0)) + require.NotNil(t, result.ReturnedData) + require.NotNil(t, result.DeployedContractAddress) + require.NotEmpty(t, result.DeployedContractAddress.String()) + }) + }) + + t.Run("test dry run validation error", func(t *testing.T) { + RunWithNewEnvironment(t, + chain, func( + ctx fvm.Context, + vm fvm.VM, + snapshot snapshot.SnapshotTree, + testContract *TestContract, + testAccount *EOATestAccount, + ) { + tx := gethTypes.NewContractCreation( + 0, + big.NewInt(100), // more than available + uint64(1000000), + big.NewInt(0), + nil, + ) + + result := dryRunTx(t, tx, ctx, vm, snapshot) + assert.Equal(t, types.ValidationErrCodeInsufficientFunds, result.ErrorCode) + assert.Equal(t, types.StatusInvalid, result.Status) + assert.Equal(t, types.InvalidTransactionGasCost, int(result.GasConsumed)) + }) + }) +} + +func TestDryCall(t *testing.T) { + t.Parallel() + + chain := flow.Emulator.Chain() + sc := systemcontracts.SystemContractsForChain(chain.ChainID()) + evmAddress := sc.EVMContract.Address.HexWithPrefix() + + dryCall := func( + t *testing.T, + tx *gethTypes.Transaction, + ctx fvm.Context, + vm fvm.VM, + snapshot snapshot.SnapshotTree, + ) (*types.ResultSummary, *snapshot.ExecutionSnapshot) { + code := []byte(fmt.Sprintf(` + import EVM from %s + + access(all) + fun main(data: [UInt8], to: String, gasLimit: UInt64, value: UInt): EVM.Result { + return EVM.dryCall( + from: EVM.EVMAddress(bytes: [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 10, 0, 0, 0, 0, 0, 0, 0, 0, 15]), + to: EVM.addressFromString(to), + data: data, + gasLimit: gasLimit, + value: EVM.Balance(attoflow: value) + ) + }`, + evmAddress, + )) + + require.NotNil(t, tx.To()) + to := tx.To().Hex() + toAddress, err := cadence.NewString(to) + require.NoError(t, err) + + script := fvm.Script(code).WithArguments( + json.MustEncode( + cadence.NewArray( + unittest.BytesToCdcUInt8(tx.Data()), + ).WithType(stdlib.EVMTransactionBytesCadenceType), + ), + json.MustEncode(toAddress), + json.MustEncode(cadence.NewUInt64(tx.Gas())), + json.MustEncode(cadence.NewUInt(uint(tx.Value().Uint64()))), + ) + execSnapshot, output, err := vm.Run( + ctx, + script, + snapshot, + ) + require.NoError(t, err) + require.NoError(t, output.Err) + require.Len(t, output.Events, 0) + + result, err := impl.ResultSummaryFromEVMResultValue(output.Value) + require.NoError(t, err) + return result, execSnapshot + } + + // this test checks that gas limit is correctly used and gas usage correctly reported + t.Run("test dryCall with different gas limits", func(t *testing.T) { + RunWithNewEnvironment(t, + chain, func( + ctx fvm.Context, + vm fvm.VM, + snapshot snapshot.SnapshotTree, + testContract *TestContract, + testAccount *EOATestAccount, + ) { + data := testContract.MakeCallData(t, "store", big.NewInt(1337)) + + limit := uint64(50_000) + tx := gethTypes.NewTransaction( + 0, + testContract.DeployedAt.ToCommon(), + big.NewInt(0), + limit, + big.NewInt(0), + data, + ) + result, _ := dryCall(t, tx, ctx, vm, snapshot) + require.Equal(t, types.ErrCodeNoError, result.ErrorCode) + require.Equal(t, types.StatusSuccessful, result.Status) + require.Greater(t, result.GasConsumed, uint64(0)) + require.Less(t, result.GasConsumed, limit) + + // gas limit too low, but still bigger than intrinsic gas value + limit = uint64(24_216) + tx = gethTypes.NewTransaction( + 0, + testContract.DeployedAt.ToCommon(), + big.NewInt(0), + limit, + big.NewInt(0), + data, + ) + result, _ = dryCall(t, tx, ctx, vm, snapshot) + require.Equal(t, types.ExecutionErrCodeOutOfGas, result.ErrorCode) + require.Equal(t, types.StatusFailed, result.Status) + require.Equal(t, result.GasConsumed, limit) + }) + }) + + t.Run("test dryCall does not form EVM transactions", func(t *testing.T) { + RunWithNewEnvironment(t, + chain, func( + ctx fvm.Context, + vm fvm.VM, + snapshot snapshot.SnapshotTree, + testContract *TestContract, + testAccount *EOATestAccount, + ) { + sc := systemcontracts.SystemContractsForChain(chain.ChainID()) + code := []byte(fmt.Sprintf( + ` + import EVM from %s + + transaction(tx: [UInt8], coinbaseBytes: [UInt8; 20]){ + prepare(account: &Account) { + let coinbase = EVM.EVMAddress(bytes: coinbaseBytes) + let res = EVM.run(tx: tx, coinbase: coinbase) + + assert(res.status == EVM.Status.successful, message: "unexpected status") + assert(res.errorCode == 0, message: "unexpected error code") + } + } + `, + sc.EVMContract.Address.HexWithPrefix(), + )) + + num := int64(42) + innerTxBytes := testAccount.PrepareSignAndEncodeTx(t, + testContract.DeployedAt.ToCommon(), + testContract.MakeCallData(t, "store", big.NewInt(num)), + big.NewInt(0), + uint64(50_000), + big.NewInt(0), + ) + + innerTx := cadence.NewArray( + unittest.BytesToCdcUInt8(innerTxBytes), + ).WithType(stdlib.EVMTransactionBytesCadenceType) + + coinbase := cadence.NewArray( + unittest.BytesToCdcUInt8(testAccount.Address().Bytes()), + ).WithType(stdlib.EVMAddressBytesCadenceType) + + txBody, err := flow.NewTransactionBodyBuilder(). + SetScript(code). + SetPayer(sc.FlowServiceAccount.Address). + AddAuthorizer(sc.FlowServiceAccount.Address). + AddArgument(json.MustEncode(innerTx)). + AddArgument(json.MustEncode(coinbase)). + Build() + require.NoError(t, err) + + tx := fvm.Transaction(txBody, 0) + + state, output, err := vm.Run( + ctx, + tx, + snapshot, + ) + require.NoError(t, err) + require.NoError(t, output.Err) + assert.Len(t, output.Events, 1) + assert.Len(t, state.UpdatedRegisterIDs(), 4) + assert.Equal( + t, + flow.EventType("A.f8d6e0586b0a20c7.EVM.TransactionExecuted"), + output.Events[0].Type, + ) + snapshot = snapshot.Append(state) + + code = []byte(fmt.Sprintf( + ` + import EVM from %s + + transaction(data: [UInt8], to: String, gasLimit: UInt64, value: UInt){ + prepare(account: &Account) { + let res = EVM.dryCall( + from: EVM.EVMAddress(bytes: [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 10, 0, 0, 0, 0, 0, 0, 0, 0, 15]), + to: EVM.addressFromString(to), + data: data, + gasLimit: gasLimit, + value: EVM.Balance(attoflow: value) + ) + + assert(res.status == EVM.Status.successful, message: "unexpected status") + assert(res.errorCode == 0, message: "unexpected error code") + + let values = EVM.decodeABI(types: [Type<UInt256>()], data: res.data) + assert(values.length == 1) + + let number = values[0] as! UInt256 + assert(number == 42, message: String.encodeHex(res.data)) + } + } + `, + sc.EVMContract.Address.HexWithPrefix(), + )) + + data := json.MustEncode( + cadence.NewArray( + unittest.BytesToCdcUInt8(testContract.MakeCallData(t, "retrieve")), + ).WithType(stdlib.EVMTransactionBytesCadenceType), + ) + toAddress, err := cadence.NewString(testContract.DeployedAt.ToCommon().Hex()) + require.NoError(t, err) + to := json.MustEncode(toAddress) + + txBody, err = flow.NewTransactionBodyBuilder(). + SetScript(code). + SetPayer(sc.FlowServiceAccount.Address). + AddAuthorizer(sc.FlowServiceAccount.Address). + AddArgument(data). + AddArgument(to). + AddArgument(json.MustEncode(cadence.NewUInt64(50_000))). + AddArgument(json.MustEncode(cadence.NewUInt(0))). + Build() + require.NoError(t, err) + + tx = fvm.Transaction(txBody, 0) + + state, output, err = vm.Run( + ctx, + tx, + snapshot, + ) + require.NoError(t, err) + require.NoError(t, output.Err) + assert.Len(t, output.Events, 0) + assert.Len(t, state.UpdatedRegisterIDs(), 0) + }) + }) + + // this test makes sure the dryCall that updates the value on the contract + // doesn't persist the change, and after when the value is read it isn't updated. + t.Run("test dryCall has no side-effects", func(t *testing.T) { + RunWithNewEnvironment(t, + chain, func( + ctx fvm.Context, + vm fvm.VM, + snapshot snapshot.SnapshotTree, + testContract *TestContract, + testAccount *EOATestAccount, + ) { + updatedValue := int64(1337) + data := testContract.MakeCallData(t, "store", big.NewInt(updatedValue)) + tx := gethTypes.NewTransaction( + 0, + testContract.DeployedAt.ToCommon(), + big.NewInt(0), + uint64(1000000), + big.NewInt(0), + data, + ) + + result, state := dryCall(t, tx, ctx, vm, snapshot) + require.Len(t, state.UpdatedRegisterIDs(), 0) + require.Equal(t, types.ErrCodeNoError, result.ErrorCode) + require.Equal(t, types.StatusSuccessful, result.Status) + require.Greater(t, result.GasConsumed, uint64(0)) + + // query the value make sure it's not updated + code := []byte(fmt.Sprintf( + ` + import EVM from %s + access(all) + fun main(tx: [UInt8], coinbaseBytes: [UInt8; 20]): EVM.Result { + let coinbase = EVM.EVMAddress(bytes: coinbaseBytes) + return EVM.run(tx: tx, coinbase: coinbase) + } + `, + evmAddress, + )) + + innerTxBytes := testAccount.PrepareSignAndEncodeTx(t, + testContract.DeployedAt.ToCommon(), + testContract.MakeCallData(t, "retrieve"), + big.NewInt(0), + uint64(100_000), + big.NewInt(0), + ) + + innerTx := cadence.NewArray( + unittest.BytesToCdcUInt8(innerTxBytes), + ).WithType(stdlib.EVMTransactionBytesCadenceType) + + coinbase := cadence.NewArray( + unittest.BytesToCdcUInt8(testAccount.Address().Bytes()), + ).WithType(stdlib.EVMAddressBytesCadenceType) + + script := fvm.Script(code).WithArguments( + json.MustEncode(innerTx), + json.MustEncode(coinbase), + ) + + state, output, err := vm.Run( + ctx, + script, + snapshot, + ) + require.NoError(t, err) + require.NoError(t, output.Err) + require.Len(t, state.UpdatedRegisterIDs(), 0) + + res, err := impl.ResultSummaryFromEVMResultValue(output.Value) + require.NoError(t, err) + require.Equal(t, types.StatusSuccessful, res.Status) + require.Equal(t, types.ErrCodeNoError, res.ErrorCode) + // make sure the value we used in the dryCall is not the same as the value stored in contract + require.NotEqual(t, updatedValue, new(big.Int).SetBytes(res.ReturnedData).Int64()) + }) + }) + + t.Run("test dryCall validation error", func(t *testing.T) { + RunWithNewEnvironment(t, + chain, func( + ctx fvm.Context, + vm fvm.VM, + snapshot snapshot.SnapshotTree, + testContract *TestContract, + testAccount *EOATestAccount, + ) { + data := testContract.MakeCallData(t, "store", big.NewInt(10337)) + tx := gethTypes.NewTransaction( + 0, + testContract.DeployedAt.ToCommon(), + big.NewInt(1000), // more than available + uint64(35_000), + big.NewInt(0), + data, + ) + + result, _ := dryCall(t, tx, ctx, vm, snapshot) + assert.Equal(t, types.ValidationErrCodeInsufficientFunds, result.ErrorCode) + assert.Equal(t, types.StatusInvalid, result.Status) + assert.Equal(t, types.InvalidTransactionGasCost, int(result.GasConsumed)) + + // random function selector + data = []byte{254, 234, 101, 199} + tx = gethTypes.NewTransaction( + 0, + testContract.DeployedAt.ToCommon(), + big.NewInt(0), + uint64(25_000), + big.NewInt(0), + data, + ) + + result, _ = dryCall(t, tx, ctx, vm, snapshot) + assert.Equal(t, types.ExecutionErrCodeExecutionReverted, result.ErrorCode) + assert.Equal(t, types.StatusFailed, result.Status) + assert.Equal(t, uint64(21331), result.GasConsumed) + }) + }) +} + +func TestCadenceArch(t *testing.T) { + t.Parallel() + + t.Run("testing calling Cadence arch - flow block height (happy case)", func(t *testing.T) { + chain := flow.Emulator.Chain() + sc := systemcontracts.SystemContractsForChain(chain.ChainID()) + RunWithNewEnvironment(t, + chain, func( + ctx fvm.Context, + vm fvm.VM, + snapshot snapshot.SnapshotTree, + testContract *TestContract, + testAccount *EOATestAccount, + ) { + code := []byte(fmt.Sprintf( + ` + import EVM from %s + + access(all) + fun main(tx: [UInt8], coinbaseBytes: [UInt8; 20]) { + let coinbase = EVM.EVMAddress(bytes: coinbaseBytes) + let res = EVM.run(tx: tx, coinbase: coinbase) + assert(res.status == EVM.Status.successful, message: "test failed: ".concat(res.errorCode.toString())) + } + `, + sc.EVMContract.Address.HexWithPrefix(), + )) + innerTxBytes := testAccount.PrepareSignAndEncodeTx(t, + testContract.DeployedAt.ToCommon(), + testContract.MakeCallData(t, "verifyArchCallToFlowBlockHeight", ctx.BlockHeader.Height), + big.NewInt(0), + uint64(10_000_000), + big.NewInt(0), + ) + script := fvm.Script(code).WithArguments( + json.MustEncode( + cadence.NewArray( + unittest.BytesToCdcUInt8(innerTxBytes), + ).WithType(stdlib.EVMTransactionBytesCadenceType), + ), + json.MustEncode( + cadence.NewArray( + unittest.BytesToCdcUInt8(testAccount.Address().Bytes()), + ).WithType(stdlib.EVMAddressBytesCadenceType), + ), + ) + _, output, err := vm.Run( + ctx, + script, + snapshot) + require.NoError(t, err) + require.NoError(t, output.Err) + }) + }) + + t.Run("testing calling Cadence arch - revertible random", func(t *testing.T) { + chain := flow.Emulator.Chain() + sc := systemcontracts.SystemContractsForChain(chain.ChainID()) + RunWithNewEnvironment(t, + chain, func( + ctx fvm.Context, + vm fvm.VM, + snapshot snapshot.SnapshotTree, + testContract *TestContract, + testAccount *EOATestAccount, + ) { + code := []byte(fmt.Sprintf( + ` + import EVM from %s + + access(all) + fun main(tx: [UInt8], coinbaseBytes: [UInt8; 20]): [UInt8] { + let coinbase = EVM.EVMAddress(bytes: coinbaseBytes) + let res = EVM.run(tx: tx, coinbase: coinbase) + assert(res.status == EVM.Status.successful, message: "test failed: ".concat(res.errorCode.toString())) + return res.data + } + `, + sc.EVMContract.Address.HexWithPrefix(), + )) + innerTxBytes := testAccount.PrepareSignAndEncodeTx(t, + testContract.DeployedAt.ToCommon(), + testContract.MakeCallData(t, "verifyArchCallToRevertibleRandom"), + big.NewInt(0), + uint64(10_000_000), + big.NewInt(0), + ) + script := fvm.Script(code).WithArguments( + json.MustEncode( + cadence.NewArray( + unittest.BytesToCdcUInt8(innerTxBytes), + ).WithType(stdlib.EVMTransactionBytesCadenceType), + ), + json.MustEncode( + cadence.NewArray( + unittest.BytesToCdcUInt8(testAccount.Address().Bytes()), + ).WithType(stdlib.EVMAddressBytesCadenceType), + ), + ) + _, output, err := vm.Run( + ctx, + script, + snapshot) + require.NoError(t, err) + require.NoError(t, output.Err) + + res := make([]byte, 8) + vals := output.Value.(cadence.Array).Values + vals = vals[len(vals)-8:] // only last 8 bytes is the value + for i := range res { + res[i] = byte(vals[i].(cadence.UInt8)) + } + + actualRand := binary.BigEndian.Uint64(res) + // because PRG uses script ID and random source we can not predict the random + // we can set the random source but since script ID is generated by hashing + // script and args, and since arg is a signed transaction which always changes + // we can't fix the value + require.Greater(t, actualRand, uint64(0)) + }) + }) + + t.Run("testing calling Cadence arch - random source (happy case)", func(t *testing.T) { + chain := flow.Emulator.Chain() + sc := systemcontracts.SystemContractsForChain(chain.ChainID()) + RunWithNewEnvironment(t, + chain, func( + ctx fvm.Context, + vm fvm.VM, + snapshot snapshot.SnapshotTree, + testContract *TestContract, + testAccount *EOATestAccount, + ) { + entropy := []byte{13, 37} + // coresponding out to the above entropy + source := []byte{0x5b, 0xa1, 0xce, 0xab, 0x64, 0x11, 0x8d, 0x2c, 0xd8, 0xae, 0x8c, 0xbb, 0xf7, 0x50, 0x5e, 0xf5, 0xdf, 0xad, 0xfc, 0xf7, 0x2d, 0x3a, 0x46, 0x78, 0xd5, 0xe5, 0x1d, 0xb7, 0xf2, 0xb8, 0xe5, 0xd6} + + // we must record a new heartbeat with a fixed block, we manually execute a transaction to do so, + // since doing this automatically would require a block computer and whole execution setup + height := uint64(1) + block1 := unittest.BlockFixture( + unittest.Block.WithHeight(height), + ) + ctx.BlockHeader = block1.ToHeader() + ctx.EntropyProvider = testutil.EntropyProviderFixture(entropy) // fix the entropy + + txBody, err := flow.NewTransactionBodyBuilder(). + SetScript([]byte(fmt.Sprintf(` + import RandomBeaconHistory from %s + + transaction { + prepare(serviceAccount: auth(Capabilities, Storage) &Account) { + let randomBeaconHistoryHeartbeat = serviceAccount.storage.borrow<&RandomBeaconHistory.Heartbeat>( + from: RandomBeaconHistory.HeartbeatStoragePath) + ?? panic("Couldn't borrow RandomBeaconHistory.Heartbeat Resource") + randomBeaconHistoryHeartbeat.heartbeat(randomSourceHistory: randomSourceHistory()) + } + }`, sc.RandomBeaconHistory.Address.HexWithPrefix())), + ). + SetPayer(sc.FlowServiceAccount.Address). + AddAuthorizer(sc.FlowServiceAccount.Address). + Build() + require.NoError(t, err) + + s, out, err := vm.Run(ctx, fvm.Transaction(txBody, 0), snapshot) + require.NoError(t, err) + require.NoError(t, out.Err) + + snapshot = snapshot.Append(s) + + code := []byte(fmt.Sprintf( + ` + import EVM from %s + + access(all) + fun main(tx: [UInt8], coinbaseBytes: [UInt8; 20]): [UInt8] { + let coinbase = EVM.EVMAddress(bytes: coinbaseBytes) + let res = EVM.run(tx: tx, coinbase: coinbase) + assert(res.status == EVM.Status.successful, message: "evm tx wrong status") + return res.data + } + `, + sc.EVMContract.Address.HexWithPrefix(), + )) + + // we fake progressing to new block height since random beacon does the check the + // current height (2) is bigger than the height requested (1) + block1.Height = 2 + ctx.BlockHeader = block1.ToHeader() + + innerTxBytes := testAccount.PrepareSignAndEncodeTx(t, + testContract.DeployedAt.ToCommon(), + testContract.MakeCallData(t, "verifyArchCallToRandomSource", height), + big.NewInt(0), + uint64(10_000_000), + big.NewInt(0), + ) + script := fvm.Script(code).WithArguments( + json.MustEncode( + cadence.NewArray( + unittest.BytesToCdcUInt8(innerTxBytes), + ).WithType(stdlib.EVMTransactionBytesCadenceType), + ), + json.MustEncode( + cadence.NewArray( + unittest.BytesToCdcUInt8(testAccount.Address().Bytes()), + ).WithType(stdlib.EVMAddressBytesCadenceType), + ), + ) + _, output, err := vm.Run( + ctx, + script, + snapshot) + require.NoError(t, err) + require.NoError(t, output.Err) + + res := make([]byte, environment.RandomSourceHistoryLength) + vals := output.Value.(cadence.Array).Values + require.Len(t, vals, environment.RandomSourceHistoryLength) + + for i := range res { + res[i] = byte(vals[i].(cadence.UInt8)) + } + require.Equal(t, source, res) + }) + }) + + t.Run("testing calling Cadence arch - random source (failed due to incorrect height)", func(t *testing.T) { + chain := flow.Emulator.Chain() + sc := systemcontracts.SystemContractsForChain(chain.ChainID()) + RunWithNewEnvironment(t, + chain, func( + ctx fvm.Context, + vm fvm.VM, + snapshot snapshot.SnapshotTree, + testContract *TestContract, + testAccount *EOATestAccount, + ) { + // we must record a new heartbeat with a fixed block, we manually execute a transaction to do so, + // since doing this automatically would require a block computer and whole execution setup + height := uint64(1) + block1 := unittest.BlockFixture( + unittest.Block.WithHeight(height), + ) + ctx.BlockHeader = block1.ToHeader() + + txBody, err := flow.NewTransactionBodyBuilder(). + SetScript([]byte(fmt.Sprintf(` + import RandomBeaconHistory from %s + + transaction { + prepare(serviceAccount: auth(Capabilities, Storage) &Account) { + let randomBeaconHistoryHeartbeat = serviceAccount.storage.borrow<&RandomBeaconHistory.Heartbeat>( + from: RandomBeaconHistory.HeartbeatStoragePath) + ?? panic("Couldn't borrow RandomBeaconHistory.Heartbeat Resource") + randomBeaconHistoryHeartbeat.heartbeat(randomSourceHistory: randomSourceHistory()) + } + }`, sc.RandomBeaconHistory.Address.HexWithPrefix())), + ). + SetPayer(sc.FlowServiceAccount.Address). + AddAuthorizer(sc.FlowServiceAccount.Address). + Build() + require.NoError(t, err) + + s, out, err := vm.Run(ctx, fvm.Transaction(txBody, 0), snapshot) + require.NoError(t, err) + require.NoError(t, out.Err) + + snapshot = snapshot.Append(s) + + height = 1337 // invalid + // we make sure the transaction fails, due to requested height being invalid + code := []byte(fmt.Sprintf( + ` + import EVM from %s + + access(all) + fun main(tx: [UInt8], coinbaseBytes: [UInt8; 20]) { + let coinbase = EVM.EVMAddress(bytes: coinbaseBytes) + let res = EVM.run(tx: tx, coinbase: coinbase) + } + `, + sc.EVMContract.Address.HexWithPrefix(), + )) + + // we fake progressing to new block height since random beacon does the check the + // current height (2) is bigger than the height requested (1) + block1.Height = 2 + ctx.BlockHeader = block1.ToHeader() + + innerTxBytes := testAccount.PrepareSignAndEncodeTx(t, + testContract.DeployedAt.ToCommon(), + testContract.MakeCallData(t, "verifyArchCallToRandomSource", height), + big.NewInt(0), + uint64(10_000_000), + big.NewInt(0), + ) + script := fvm.Script(code).WithArguments( + json.MustEncode( + cadence.NewArray( + unittest.BytesToCdcUInt8(innerTxBytes), + ).WithType(stdlib.EVMTransactionBytesCadenceType), + ), + json.MustEncode( + cadence.NewArray( + unittest.BytesToCdcUInt8(testAccount.Address().Bytes()), + ).WithType(stdlib.EVMAddressBytesCadenceType), + ), + ) + _, output, err := vm.Run( + ctx, + script, + snapshot) + require.NoError(t, err) + // make sure the error is correct + require.ErrorContains(t, output.Err, "Source of randomness not yet recorded") + }) + }) + + t.Run("testing calling Cadence arch - COA ownership proof (happy case)", func(t *testing.T) { + chain := flow.Emulator.Chain() + sc := systemcontracts.SystemContractsForChain(chain.ChainID()) + RunWithNewEnvironment(t, + chain, func( + ctx fvm.Context, + vm fvm.VM, + snapshot snapshot.SnapshotTree, + testContract *TestContract, + testAccount *EOATestAccount, + ) { + // create a flow account + privateKey, err := testutil.GenerateAccountPrivateKey() + require.NoError(t, err) + + snapshot, accounts, err := testutil.CreateAccounts( + vm, + snapshot, + []flow.AccountPrivateKey{privateKey}, + chain) + require.NoError(t, err) + flowAccount := accounts[0] + + // create/store/link coa + coaAddress, snapshot := setupCOA( + t, + ctx, + vm, + snapshot, + flowAccount, + 0, + ) + + data := RandomCommonHash(t) + + hasher, err := crypto.NewPrefixedHashing(privateKey.HashAlgo, "FLOW-V0.0-user") + require.NoError(t, err) + + sig, err := privateKey.PrivateKey.Sign(data.Bytes(), hasher) + require.NoError(t, err) + + validProof := types.COAOwnershipProof{ + KeyIndices: []uint64{0}, + Address: types.FlowAddress(flowAccount), + CapabilityPath: "coa", + Signatures: []types.Signature{types.Signature(sig)}, + } + + encodedValidProof, err := validProof.Encode() + require.NoError(t, err) + + // create transaction for proof verification + code := []byte(fmt.Sprintf( + ` + import EVM from %s + + access(all) + fun main(tx: [UInt8], coinbaseBytes: [UInt8; 20]) { + let coinbase = EVM.EVMAddress(bytes: coinbaseBytes) + let res = EVM.run(tx: tx, coinbase: coinbase) + assert(res.status == EVM.Status.successful, message: "test failed: ".concat(res.errorCode.toString())) + } + `, + sc.EVMContract.Address.HexWithPrefix(), + )) + innerTxBytes := testAccount.PrepareSignAndEncodeTx(t, + testContract.DeployedAt.ToCommon(), + testContract.MakeCallData(t, "verifyArchCallToVerifyCOAOwnershipProof", + true, + coaAddress.ToCommon(), + data, + encodedValidProof), + big.NewInt(0), + uint64(10_000_000), + big.NewInt(0), + ) + verifyScript := fvm.Script(code).WithArguments( + json.MustEncode( + cadence.NewArray( + unittest.BytesToCdcUInt8(innerTxBytes), + ).WithType( + stdlib.EVMTransactionBytesCadenceType, + )), + json.MustEncode( + cadence.NewArray( + unittest.BytesToCdcUInt8( + testAccount.Address().Bytes(), + ), + ).WithType( + stdlib.EVMAddressBytesCadenceType, + ), + ), + ) + // run proof transaction + _, output, err := vm.Run( + ctx, + verifyScript, + snapshot) + require.NoError(t, err) + require.NoError(t, output.Err) + + invalidProof := types.COAOwnershipProof{ + KeyIndices: []uint64{1000}, + Address: types.FlowAddress(flowAccount), + CapabilityPath: "coa", + Signatures: []types.Signature{types.Signature(sig)}, + } + + encodedInvalidProof, err := invalidProof.Encode() + require.NoError(t, err) + + // invalid proof tx + innerTxBytes = testAccount.PrepareSignAndEncodeTx(t, + testContract.DeployedAt.ToCommon(), + testContract.MakeCallData(t, "verifyArchCallToVerifyCOAOwnershipProof", + true, + coaAddress.ToCommon(), + data, + encodedInvalidProof), + big.NewInt(0), + uint64(10_000_000), + big.NewInt(0), + ) + + verifyScript = fvm.Script(code).WithArguments( + json.MustEncode( + cadence.NewArray( + unittest.BytesToCdcUInt8(innerTxBytes), + ).WithType( + stdlib.EVMTransactionBytesCadenceType, + )), + json.MustEncode( + cadence.NewArray( + unittest.BytesToCdcUInt8( + testAccount.Address().Bytes(), + ), + ).WithType( + stdlib.EVMAddressBytesCadenceType, + ), + ), + ) + // run proof transaction + _, output, err = vm.Run( + ctx, + verifyScript, + snapshot) + require.NoError(t, err) + require.Error(t, output.Err) + }) + }) +} + +func TestEVMFileSystemContract(t *testing.T) { + chain := flow.Emulator.Chain() + sc := systemcontracts.SystemContractsForChain(chain.ChainID()) + + runFileSystemContract := func( + ctx fvm.Context, + vm fvm.VM, + snapshot snapshot.SnapshotTree, + testContract *TestContract, + testAccount *EOATestAccount, + computeLimit uint64, + ) ( + *snapshot.ExecutionSnapshot, + fvm.ProcedureOutput, + ) { + code := []byte(fmt.Sprintf( + ` + import EVM from %s + + transaction(tx: [UInt8], coinbaseBytes: [UInt8; 20]){ + prepare(account: &Account) { + let coinbase = EVM.EVMAddress(bytes: coinbaseBytes) + let res = EVM.run(tx: tx, coinbase: coinbase) + } + } + `, + sc.EVMContract.Address.HexWithPrefix(), + )) + + coinbaseAddr := types.Address{1, 2, 3} + coinbaseBalance := getEVMAccountBalance(t, ctx, vm, snapshot, coinbaseAddr) + require.Zero(t, types.BalanceToBigInt(coinbaseBalance).Uint64()) + + var buffer bytes.Buffer + address := common.HexToAddress("0xea02F564664A477286B93712829180be4764fAe2") + chunkHash := "0x2521660d04da85198d1cc71c20a69b7e875ebbf1682f6a5c6a3fec69068ccc13" + index := int64(0) + chunk := "T1ARYBYsPOJPVYoU7wVp+PYuXmdS6bgkLf2egcxa+1hP64wAxfkLXtnllU6DmEuj+Id4oWl1ZV4ftQ+ofQ3DQhOoxNlPZGOYbhoMLuzE" + for i := 0; i <= 500; i++ { + buffer.WriteString(chunk) + } + innerTxBytes := testAccount.PrepareSignAndEncodeTx(t, + testContract.DeployedAt.ToCommon(), + testContract.MakeCallData( + t, + "publishChunk", + address, + chunkHash, + big.NewInt(index), + buffer.String(), + ), + big.NewInt(0), + uint64(2_132_171), + big.NewInt(1), + ) + + innerTx := cadence.NewArray( + unittest.BytesToCdcUInt8(innerTxBytes), + ).WithType(stdlib.EVMTransactionBytesCadenceType) + + coinbase := cadence.NewArray( + unittest.BytesToCdcUInt8(coinbaseAddr.Bytes()), + ).WithType(stdlib.EVMAddressBytesCadenceType) + + txBody, err := flow.NewTransactionBodyBuilder(). + SetScript(code). + SetComputeLimit(computeLimit). + AddAuthorizer(sc.FlowServiceAccount.Address). + AddArgument(json.MustEncode(innerTx)). + AddArgument(json.MustEncode(coinbase)). + Build() + require.NoError(t, err) + + tx := fvm.Transaction( + txBody, + 0, + ) + + state, output, err := vm.Run(ctx, tx, snapshot) + require.NoError(t, err) + return state, output + } + + t.Run("happy case", func(t *testing.T) { + RunContractWithNewEnvironment( + t, + chain, + GetFileSystemContract(t), + func( + ctx fvm.Context, + vm fvm.VM, + snapshot snapshot.SnapshotTree, + testContract *TestContract, + testAccount *EOATestAccount, + ) { + state, output := runFileSystemContract(ctx, vm, snapshot, testContract, testAccount, 10001) + + require.NoError(t, output.Err) + require.NotEmpty(t, state.WriteSet) + snapshot = snapshot.Append(state) + + // assert event fields are correct + require.Len(t, output.Events, 2) + txEvent := output.Events[0] + txEventPayload := TxEventToPayload(t, txEvent, sc.EVMContract.Address) + + // fee transfer event + feeTransferEvent := output.Events[1] + feeTranferEventPayload := TxEventToPayload(t, feeTransferEvent, sc.EVMContract.Address) + require.Equal(t, uint16(types.ErrCodeNoError), feeTranferEventPayload.ErrorCode) + require.Equal(t, uint16(1), feeTranferEventPayload.Index) + require.Equal(t, uint64(21000), feeTranferEventPayload.GasConsumed) + // + //// commit block + blockEventPayload, _ := callEVMHeartBeat(t, + ctx, + vm, + snapshot, + ) + // + require.NotEmpty(t, blockEventPayload.Hash) + require.Equal(t, uint64(2_132_170), blockEventPayload.TotalGasUsed) + + txHashes := types.TransactionHashes{txEventPayload.Hash, feeTranferEventPayload.Hash} + require.Equal(t, + txHashes.RootHash(), + blockEventPayload.TransactionHashRoot, + ) + require.NotEmpty(t, blockEventPayload.ReceiptRoot) + + require.Equal(t, uint16(types.ErrCodeNoError), txEventPayload.ErrorCode) + require.Equal(t, uint16(0), txEventPayload.Index) + require.Equal(t, blockEventPayload.Height, txEventPayload.BlockHeight) + require.Equal(t, blockEventPayload.TotalGasUsed-feeTranferEventPayload.GasConsumed, txEventPayload.GasConsumed) + require.Empty(t, txEventPayload.ContractAddress) + + require.Greater(t, int(output.ComputationUsed), 400) + }, + fvm.WithExecutionEffortWeights( + environment.MainnetExecutionEffortWeights, + ), + ) + }) + + t.Run("insufficient FVM computation to execute EVM transaction", func(t *testing.T) { + RunContractWithNewEnvironment( + t, + chain, + GetFileSystemContract(t), + func( + ctx fvm.Context, + vm fvm.VM, + snapshot snapshot.SnapshotTree, + testContract *TestContract, + testAccount *EOATestAccount, + ) { + state, output := runFileSystemContract(ctx, vm, snapshot, testContract, testAccount, 400) + snapshot = snapshot.Append(state) + + require.Len(t, output.Events, 0) + + //// commit block + blockEventPayload, _ := callEVMHeartBeat(t, + ctx, + vm, + snapshot, + ) + // + require.NotEmpty(t, blockEventPayload.Hash) + require.Equal(t, uint64(0), blockEventPayload.TotalGasUsed) + + // only a small amount of computation was used due to the EVM transaction never being executed + require.Less(t, int(output.ComputationUsed), 20) + }, + fvm.WithExecutionEffortWeights( + environment.MainnetExecutionEffortWeights, + ), + ) + }) +} + +func createAndFundFlowAccount( + t *testing.T, + ctx fvm.Context, + vm fvm.VM, + snapshot snapshot.SnapshotTree, +) (flow.Address, flow.AccountPrivateKey, snapshot.SnapshotTree) { + + privateKey, err := testutil.GenerateAccountPrivateKey() + require.NoError(t, err) + + snapshot, accounts, err := testutil.CreateAccounts( + vm, + snapshot, + []flow.AccountPrivateKey{privateKey}, + ctx.Chain) + require.NoError(t, err) + flowAccount := accounts[0] + + // fund the account with 100 tokens + sc := systemcontracts.SystemContractsForChain(ctx.Chain.ChainID()) + code := []byte(fmt.Sprintf( + ` + import FlowToken from %s + import FungibleToken from %s + + transaction { + prepare(account: auth(BorrowValue) &Account) { + let admin = account.storage + .borrow<&FlowToken.Administrator>(from: /storage/flowTokenAdmin)! + + let minter <- admin.createNewMinter(allowedAmount: 100.0) + let vault <- minter.mintTokens(amount: 100.0) + + let receiverRef = getAccount(%s).capabilities + .borrow<&{FungibleToken.Receiver}>(/public/flowTokenReceiver) + ?? panic("Could not borrow receiver reference to the recipient's Vault") + receiverRef.deposit(from: <-vault) + + destroy minter + } + } + `, + sc.FlowToken.Address.HexWithPrefix(), + sc.FungibleToken.Address.HexWithPrefix(), + flowAccount.HexWithPrefix(), + )) + + txBody, err := flow.NewTransactionBodyBuilder(). + SetScript(code). + SetPayer(sc.FlowServiceAccount.Address). + AddAuthorizer(sc.FlowServiceAccount.Address). + Build() + require.NoError(t, err) + + tx := fvm.Transaction(txBody, 0) + + es, output, err := vm.Run(ctx, tx, snapshot) + require.NoError(t, err) + require.NoError(t, output.Err) + snapshot = snapshot.Append(es) + + bal := getFlowAccountBalance( + t, + ctx, + vm, + snapshot, + flowAccount) + // 100 flow in ufix64 + require.Equal(t, uint64(10_000_000_000), bal) + + return flowAccount, privateKey, snapshot +} + +func setupCOA( + t *testing.T, + ctx fvm.Context, + vm fvm.VM, + snap snapshot.SnapshotTree, + coaOwner flow.Address, + initialFund uint64, +) (types.Address, snapshot.SnapshotTree) { + + sc := systemcontracts.SystemContractsForChain(ctx.Chain.ChainID()) + // create a COA and store it under flow account + script := []byte(fmt.Sprintf( + ` + import EVM from %s + import FungibleToken from %s + import FlowToken from %s + + transaction(amount: UFix64) { + prepare(account: auth(Capabilities, Storage) &Account) { + let cadenceOwnedAccount1 <- EVM.createCadenceOwnedAccount() + + let vaultRef = account.storage + .borrow<auth(FungibleToken.Withdraw) &FlowToken.Vault>(from: /storage/flowTokenVault) + ?? panic("Could not borrow reference to the owner's Vault!") + + if amount > 0.0 { + let vault <- vaultRef.withdraw(amount: amount) as! @FlowToken.Vault + cadenceOwnedAccount1.deposit(from: <-vault) + } + + account.storage.save<@EVM.CadenceOwnedAccount>( + <-cadenceOwnedAccount1, + to: /storage/coa + ) + + let cap = account.capabilities.storage + .issue<&EVM.CadenceOwnedAccount>(/storage/coa) + account.capabilities.publish(cap, at: /public/coa) + } + } + `, + sc.EVMContract.Address.HexWithPrefix(), + sc.FungibleToken.Address.HexWithPrefix(), + sc.FlowToken.Address.HexWithPrefix(), + )) + + txBody, err := flow.NewTransactionBodyBuilder(). + SetScript(script). + SetPayer(coaOwner). + AddAuthorizer(coaOwner). + AddArgument(json.MustEncode(cadence.UFix64(initialFund))). + Build() + require.NoError(t, err) + + tx := fvm.Transaction(txBody, 0) + es, output, err := vm.Run(ctx, tx, snap) + require.NoError(t, err) + require.NoError(t, output.Err) + snap = snap.Append(es) + + // 3rd event is the cadence owned account created event + coaAddress, err := types.COAAddressFromFlowCOACreatedEvent(sc.EVMContract.Address, output.Events[1]) + require.NoError(t, err) + + return coaAddress, snap +} + +func callEVMHeartBeat( + t *testing.T, + ctx fvm.Context, + vm fvm.VM, + snap snapshot.SnapshotTree, +) (*events.BlockEventPayload, snapshot.SnapshotTree) { + sc := systemcontracts.SystemContractsForChain(ctx.Chain.ChainID()) + + heartBeatCode := []byte(fmt.Sprintf( + ` + import EVM from %s + transaction { + prepare(serviceAccount: auth(BorrowValue) &Account) { + let evmHeartbeat = serviceAccount.storage + .borrow<&EVM.Heartbeat>(from: /storage/EVMHeartbeat) + ?? panic("Couldn't borrow EVM.Heartbeat Resource") + evmHeartbeat.heartbeat() + } + } + `, + sc.EVMContract.Address.HexWithPrefix(), + )) + txBody, err := flow.NewTransactionBodyBuilder(). + SetScript(heartBeatCode). + SetPayer(sc.FlowServiceAccount.Address). + AddAuthorizer(sc.FlowServiceAccount.Address). + Build() + require.NoError(t, err) + + tx := fvm.Transaction(txBody, 0) + + state, output, err := vm.Run(ctx, tx, snap) + require.NoError(t, err) + require.NoError(t, output.Err) + require.NotEmpty(t, state.WriteSet) + snap = snap.Append(state) + + // validate block event + require.Len(t, output.Events, 1) + blockEvent := output.Events[0] + return BlockEventToPayload(t, blockEvent, sc.EVMContract.Address), snap +} + +func getFlowAccountBalance( + t *testing.T, + ctx fvm.Context, + vm fvm.VM, + snap snapshot.SnapshotTree, + address flow.Address, +) uint64 { + code := []byte(fmt.Sprintf( + ` + access(all) fun main(): UFix64 { + return getAccount(%s).balance + } + `, + address.HexWithPrefix(), + )) + + script := fvm.Script(code) + _, output, err := vm.Run( + ctx, + script, + snap) + require.NoError(t, err) + require.NoError(t, output.Err) + val, ok := output.Value.(cadence.UFix64) + require.True(t, ok) + return uint64(val) +} + +func getEVMAccountBalance( + t *testing.T, + ctx fvm.Context, + vm fvm.VM, + snap snapshot.SnapshotTree, + address types.Address, +) types.Balance { + code := []byte(fmt.Sprintf( + ` + import EVM from %s + access(all) + fun main(addr: [UInt8; 20]): UInt { + return EVM.EVMAddress(bytes: addr).balance().inAttoFLOW() + } + `, + systemcontracts.SystemContractsForChain( + ctx.Chain.ChainID(), + ).EVMContract.Address.HexWithPrefix(), + )) + + script := fvm.Script(code).WithArguments( + json.MustEncode( + cadence.NewArray( + unittest.BytesToCdcUInt8(address.Bytes()), + ).WithType(stdlib.EVMAddressBytesCadenceType), + ), + ) + _, output, err := vm.Run( + ctx, + script, + snap) + require.NoError(t, err) + require.NoError(t, output.Err) + val, ok := output.Value.(cadence.UInt) + require.True(t, ok) + return val.Big() +} + +func getEVMAccountNonce( + t *testing.T, + ctx fvm.Context, + vm fvm.VM, + snap snapshot.SnapshotTree, + address types.Address, +) uint64 { + code := []byte(fmt.Sprintf( + ` + import EVM from %s + access(all) + fun main(addr: [UInt8; 20]): UInt64 { + return EVM.EVMAddress(bytes: addr).nonce() + } + `, + systemcontracts.SystemContractsForChain( + ctx.Chain.ChainID(), + ).EVMContract.Address.HexWithPrefix(), + )) + + script := fvm.Script(code).WithArguments( + json.MustEncode( + cadence.NewArray( + unittest.BytesToCdcUInt8(address.Bytes()), + ).WithType(stdlib.EVMAddressBytesCadenceType), + ), + ) + _, output, err := vm.Run( + ctx, + script, + snap) + require.NoError(t, err) + require.NoError(t, output.Err) + val, ok := output.Value.(cadence.UInt64) + require.True(t, ok) + return uint64(val) +} + +func RunWithNewEnvironment( + t *testing.T, + chain flow.Chain, + f func( + fvm.Context, + fvm.VM, + snapshot.SnapshotTree, + *TestContract, + *EOATestAccount, + ), +) { + rootAddr := evm.StorageAccountAddress(chain.ChainID()) + RunWithTestBackend(t, func(backend *TestBackend) { + RunWithDeployedContract(t, GetStorageTestContract(t), backend, rootAddr, func(testContract *TestContract) { + RunWithEOATestAccount(t, backend, rootAddr, func(testAccount *EOATestAccount) { + blocks := new(envMock.Blocks) + block1 := unittest.BlockFixture() + blocks.On("ByHeightFrom", + block1.Height, + block1.ToHeader(), + ).Return(block1.ToHeader(), nil) + + opts := []fvm.Option{ + fvm.WithChain(chain), + fvm.WithBlockHeader(block1.ToHeader()), + fvm.WithAuthorizationChecksEnabled(false), + fvm.WithSequenceNumberCheckAndIncrementEnabled(false), + fvm.WithEntropyProvider(testutil.EntropyProviderFixture(nil)), + fvm.WithRandomSourceHistoryCallAllowed(true), + fvm.WithBlocks(blocks), + fvm.WithCadenceLogging(true), + } + ctx := fvm.NewContext(opts...) + + vm := fvm.NewVirtualMachine() + snapshotTree := snapshot.NewSnapshotTree(backend) + + baseBootstrapOpts := []fvm.BootstrapProcedureOption{ + fvm.WithInitialTokenSupply(unittest.GenesisTokenSupply), + } + + executionSnapshot, _, err := vm.Run( + ctx, + fvm.Bootstrap(unittest.ServiceAccountPublicKey, baseBootstrapOpts...), + snapshotTree) + require.NoError(t, err) + + snapshotTree = snapshotTree.Append(executionSnapshot) + + f( + fvm.NewContextFromParent(ctx, fvm.WithEVMEnabled(true)), + vm, + snapshotTree, + testContract, + testAccount, + ) + }) + }) + }) +} + +func RunContractWithNewEnvironment( + t *testing.T, + chain flow.Chain, + tc *TestContract, + f func( + fvm.Context, + fvm.VM, + snapshot.SnapshotTree, + *TestContract, + *EOATestAccount, + ), + bootstrapOpts ...fvm.BootstrapProcedureOption, +) { + rootAddr := evm.StorageAccountAddress(chain.ChainID()) + + RunWithTestBackend(t, func(backend *TestBackend) { + RunWithDeployedContract(t, tc, backend, rootAddr, func(testContract *TestContract) { + RunWithEOATestAccount(t, backend, rootAddr, func(testAccount *EOATestAccount) { + + blocks := new(envMock.Blocks) + block1 := unittest.BlockFixture() + header1 := block1.ToHeader() + blocks.On("ByHeightFrom", + header1.Height, + header1, + ).Return(header1, nil) + + opts := []fvm.Option{ + fvm.WithChain(chain), + fvm.WithBlockHeader(header1), + fvm.WithAuthorizationChecksEnabled(false), + fvm.WithSequenceNumberCheckAndIncrementEnabled(false), + fvm.WithEntropyProvider(testutil.EntropyProviderFixture(nil)), + fvm.WithRandomSourceHistoryCallAllowed(true), + fvm.WithBlocks(blocks), + fvm.WithCadenceLogging(true), + } + ctx := fvm.NewContext(opts...) + + vm := fvm.NewVirtualMachine() + snapshotTree := snapshot.NewSnapshotTree(backend) + + baseBootstrapOpts := []fvm.BootstrapProcedureOption{ + fvm.WithInitialTokenSupply(unittest.GenesisTokenSupply), + } + baseBootstrapOpts = append(baseBootstrapOpts, bootstrapOpts...) + + executionSnapshot, _, err := vm.Run( + ctx, + fvm.Bootstrap(unittest.ServiceAccountPublicKey, baseBootstrapOpts...), + snapshotTree) + require.NoError(t, err) + + snapshotTree = snapshotTree.Append(executionSnapshot) + + f( + fvm.NewContextFromParent(ctx, fvm.WithEVMEnabled(true)), + vm, + snapshotTree, + testContract, + testAccount, + ) + }) + }) + }) +} diff --git a/fvm/evm/handler/addressAllocator.go b/fvm/evm/handler/addressAllocator.go new file mode 100644 index 00000000000..dc468246b15 --- /dev/null +++ b/fvm/evm/handler/addressAllocator.go @@ -0,0 +1,82 @@ +package handler + +import ( + "encoding/binary" + + "github.com/onflow/flow-go/fvm/evm/types" +) + +const ( + // `addressIndexMultiplierConstant` is used for mapping address indices + // into deterministic random-looking address postfixes. + // The constant must be an ODD number. + // It is a "nothing-up-my-sleeves" constant, chosen to be big enough so that + // the index and its corresponding address look less "related". + // Note that the least significant byte was set to "77" instead of "88" to force + // the odd parity. + // Look at `mapAddressIndex` for more details. + addressIndexMultiplierConstant = uint64(0xFFEEDDCCBBAA9977) +) + +type AddressAllocator struct { +} + +var _ types.AddressAllocator = &AddressAllocator{} + +// NewAddressAllocator constructs a new statefull address allocator +func NewAddressAllocator() *AddressAllocator { + return &AddressAllocator{} +} + +func (aa *AddressAllocator) COAFactoryAddress() types.Address { + return MakeCOAAddress(0) +} + +func (aa *AddressAllocator) NativeTokenBridgeAddress() types.Address { + return MakePrecompileAddress(0) +} + +// AllocateCOAAddress allocates an address for COA +func (aa *AddressAllocator) AllocateCOAAddress(uuid uint64) types.Address { + return MakeCOAAddress(uuid) +} + +func MakeCOAAddress(index uint64) types.Address { + return makePrefixedAddress(mapAddressIndex(index), types.FlowEVMCOAAddressPrefix) +} + +func (aa *AddressAllocator) AllocatePrecompileAddress(index uint64) types.Address { + target := MakePrecompileAddress(index) + return target +} + +func MakePrecompileAddress(index uint64) types.Address { + return makePrefixedAddress(index, types.FlowEVMExtendedPrecompileAddressPrefix) +} + +func makePrefixedAddress( + index uint64, + prefix [types.FlowEVMSpecialAddressPrefixLen]byte, +) types.Address { + var addr types.Address + copy(addr[:], prefix[:]) + // only works if `len(addr) - len(prefix)` is exactly 8 bytes + binary.BigEndian.PutUint64(addr[len(prefix):], index) + return addr +} + +// `mapAddressIndex` maps an index of 64 bits to a deterministic random-looking 64 bits. +// +// The mapping function must be an injective mapping (in this case bijective) +// where every two indices always map to two different results. Multiple injective +// mappings are possible. +// +// The current implementation uses a simple modular multiplication by a constant modulo 2^64. +// The multiplier constant can be any odd number. Since odd numbers are co-prime with 2^64, they +// have a multiplicative inverse modulo 2^64. +// This makes multiplying by an odd number an injective function (and therefore bijective). +// +// Multiplying modulo 2^64 is implicitly implemented as a uint64 multiplication with a uin64 result. +func mapAddressIndex(index uint64) uint64 { + return uint64(index * addressIndexMultiplierConstant) +} diff --git a/fvm/evm/handler/addressAllocator_test.go b/fvm/evm/handler/addressAllocator_test.go new file mode 100644 index 00000000000..794a4c9342e --- /dev/null +++ b/fvm/evm/handler/addressAllocator_test.go @@ -0,0 +1,40 @@ +package handler_test + +import ( + "testing" + + gethCommon "github.com/ethereum/go-ethereum/common" + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/fvm/evm/handler" + "github.com/onflow/flow-go/fvm/evm/types" +) + +func TestAddressAllocator(t *testing.T) { + aa := handler.NewAddressAllocator() + + adr := aa.AllocatePrecompileAddress(3) + expectedAddress := types.NewAddress(gethCommon.HexToAddress("0x0000000000000000000000010000000000000003")) + require.Equal(t, expectedAddress, adr) + // check conforming to types + require.False(t, types.IsACOAAddress(adr)) + + // test default value fall back + adr = aa.AllocateCOAAddress(1) + expectedAddress = types.NewAddress(gethCommon.HexToAddress("0x000000000000000000000002ffeeddccbbaa9977")) + require.Equal(t, expectedAddress, adr) + // check conforming to types + require.True(t, types.IsACOAAddress(adr)) + + // continous allocation logic + adr = aa.AllocateCOAAddress(2) + expectedAddress = types.NewAddress(gethCommon.HexToAddress("0x000000000000000000000002ffddbb99775532ee")) + require.Equal(t, expectedAddress, adr) + // check conforming to types + require.True(t, types.IsACOAAddress(adr)) + + // factory + factory := aa.COAFactoryAddress() + expectedAddress = types.NewAddress(gethCommon.HexToAddress("0x0000000000000000000000020000000000000000")) + require.Equal(t, expectedAddress, factory) +} diff --git a/fvm/evm/handler/blockHashList.go b/fvm/evm/handler/blockHashList.go new file mode 100644 index 00000000000..635e1b3fc82 --- /dev/null +++ b/fvm/evm/handler/blockHashList.go @@ -0,0 +1,274 @@ +package handler + +import ( + "encoding/binary" + "fmt" + "strings" + + gethCommon "github.com/ethereum/go-ethereum/common" + + "github.com/onflow/flow-go/fvm/evm/types" + "github.com/onflow/flow-go/model/flow" +) + +const ( + blockHashListMetaKey = "BlockHashListMeta" + blockHashListBucketKeyFormat = "BlockHashListBucket%d" + + hashCountPerBucket = 16 + hashEncodingSize = 32 + capacityEncodingSize = 8 + tailEncodingSize = 8 + countEncodingSize = 8 + heightEncodingSize = 8 + metaEncodingSize = capacityEncodingSize + + tailEncodingSize + + countEncodingSize + + heightEncodingSize +) + +func IsBlockHashListBucketKeyFormat(id flow.RegisterID) bool { + return strings.HasPrefix(id.Key, "BlockHashListBucket") +} + +func IsBlockHashListMetaKey(id flow.RegisterID) bool { + return id.Key == blockHashListMetaKey +} + +// BlockHashList stores the last `capacity` number of block hashes +// +// Under the hood it breaks the list of hashes into +// smaller fixed size buckets to minimize the +// number of bytes read and written during set/get operations. +type BlockHashList struct { + backend types.BackendStorage + rootAddress flow.Address + + // cached meta data + capacity int + tail int // index to write to + count int // number of elements (count <= capacity) + height uint64 // keeps the height of last added block +} + +// NewBlockHashList creates a block hash list +// It tries to load the metadata from the backend +// and if not exist it creates one +func NewBlockHashList( + backend types.BackendStorage, + rootAddress flow.Address, + capacity int, +) (*BlockHashList, error) { + bhl := &BlockHashList{ + backend: backend, + rootAddress: rootAddress, + capacity: capacity, + tail: 0, + count: 0, + height: 0, + } + err := bhl.loadMetaData() + if err != nil { + return nil, err + } + // check the loaded capacity against the one provided + if bhl.capacity != capacity { + return nil, fmt.Errorf( + "capacity doesn't match, expected: %d, got: %d", + bhl.capacity, + capacity, + ) + } + return bhl, nil +} + +// Push pushes a block hash for the next height to the list. +// If the list has reached to the capacity, it overwrites the oldest element. +func (bhl *BlockHashList) Push(height uint64, bh gethCommon.Hash) error { + // handle the very first block + if bhl.IsEmpty() && height != 0 { + return fmt.Errorf("out of the order block hash, expected: 0, got: %d", height) + } + // check the block heights before pushing + if !bhl.IsEmpty() && height != bhl.height+1 { + return fmt.Errorf("out of the order block hash, expected: %d, got: %d", bhl.height+1, height) + } + + // updates the block hash stored at index + err := bhl.updateBlockHashAt(bhl.tail, bh) + if err != nil { + return err + } + + // update meta data + bhl.tail = (bhl.tail + 1) % bhl.capacity + bhl.height = height + if bhl.count != bhl.capacity { + bhl.count++ + } + return bhl.storeMetaData() +} + +// IsEmpty returns true if the list is empty +func (bhl *BlockHashList) IsEmpty() bool { + return bhl.count == 0 +} + +// LastAddedBlockHash returns the last block hash added to the list +// for empty list it returns empty hash value +func (bhl *BlockHashList) LastAddedBlockHash() (gethCommon.Hash, error) { + if bhl.IsEmpty() { + // return empty hash + return gethCommon.Hash{}, nil + } + index := (bhl.tail + bhl.capacity - 1) % bhl.capacity + return bhl.getBlockHashAt(index) +} + +// MinAvailableHeight returns the min available height in the list +func (bhl *BlockHashList) MinAvailableHeight() uint64 { + if bhl.IsEmpty() { + return 0 + } + return bhl.height - (uint64(bhl.count) - 1) +} + +// MaxAvailableHeight returns the max available height in the list +func (bhl *BlockHashList) MaxAvailableHeight() uint64 { + return bhl.height +} + +// BlockHashByIndex returns the block hash by block height +func (bhl *BlockHashList) BlockHashByHeight(height uint64) (found bool, bh gethCommon.Hash, err error) { + if bhl.IsEmpty() || + height > bhl.MaxAvailableHeight() || + height < bhl.MinAvailableHeight() { + return false, gethCommon.Hash{}, nil + } + // calculate the index to lookup + diff := bhl.height - height + index := (bhl.tail - int(diff) - 1 + bhl.capacity) % bhl.capacity + bh, err = bhl.getBlockHashAt(index) + return true, bh, err +} + +// updateBlockHashAt updates block hash at index +func (bhl *BlockHashList) updateBlockHashAt(idx int, bh gethCommon.Hash) error { + // fetch the bucket + bucketNumber := idx / hashCountPerBucket + bucket, err := bhl.fetchBucket(bucketNumber) + if err != nil { + return err + } + + cpy := make([]byte, len(bucket)) + copy(cpy, bucket) + + // update the block hash + start := (idx % hashCountPerBucket) * hashEncodingSize + end := start + hashEncodingSize + copy(cpy[start:end], bh.Bytes()) + + // store bucket + return bhl.backend.SetValue( + bhl.rootAddress[:], + []byte(fmt.Sprintf(blockHashListBucketKeyFormat, bucketNumber)), + cpy, + ) +} + +// fetchBucket fetches the bucket +func (bhl *BlockHashList) fetchBucket(num int) ([]byte, error) { + data, err := bhl.backend.GetValue( + bhl.rootAddress[:], + []byte(fmt.Sprintf(blockHashListBucketKeyFormat, num)), + ) + if err != nil { + return nil, err + } + // if not exist create and return an new empty buffer + if len(data) == 0 { + return make([]byte, hashCountPerBucket*hashEncodingSize), nil + } + return data, err +} + +// returns the block hash at the given index +func (bhl *BlockHashList) getBlockHashAt(idx int) (gethCommon.Hash, error) { + // fetch the bucket first + bucket, err := bhl.fetchBucket(idx / hashCountPerBucket) + if err != nil { + return gethCommon.Hash{}, err + } + // return the hash from the bucket + start := (idx % hashCountPerBucket) * hashEncodingSize + end := start + hashEncodingSize + return gethCommon.BytesToHash(bucket[start:end]), nil +} + +// loadMetaData loads the meta data from the storage +func (bhl *BlockHashList) loadMetaData() error { + data, err := bhl.backend.GetValue( + bhl.rootAddress[:], + []byte(blockHashListMetaKey), + ) + if err != nil { + return err + } + // if data doesn't exist + // return and keep the default values + if len(data) == 0 { + return nil + } + // check the data size + if len(data) < metaEncodingSize { + return fmt.Errorf("encoded input too short: %d < %d", len(data), metaEncodingSize) + } + + pos := 0 + // decode capacity + bhl.capacity = int(binary.BigEndian.Uint64(data[pos:])) + pos += capacityEncodingSize + + // decode tail + bhl.tail = int(binary.BigEndian.Uint64(data[pos:])) + pos += tailEncodingSize + + // decode count + bhl.count = int(binary.BigEndian.Uint64(data[pos:])) + pos += countEncodingSize + + // decode height + bhl.height = binary.BigEndian.Uint64(data[pos:]) + + return nil +} + +// storeMetaData stores the meta data into storage +func (bhl *BlockHashList) storeMetaData() error { + // encode meta data + buffer := make([]byte, metaEncodingSize) + pos := 0 + + // encode capacity + binary.BigEndian.PutUint64(buffer[pos:], uint64(bhl.capacity)) + pos += capacityEncodingSize + + // encode tail + binary.BigEndian.PutUint64(buffer[pos:], uint64(bhl.tail)) + pos += tailEncodingSize + + // encode count + binary.BigEndian.PutUint64(buffer[pos:], uint64(bhl.count)) + pos += countEncodingSize + + // encode height + binary.BigEndian.PutUint64(buffer[pos:], bhl.height) + + // store the encoded data into backend + return bhl.backend.SetValue( + bhl.rootAddress[:], + []byte(blockHashListMetaKey), + buffer, + ) +} diff --git a/fvm/evm/handler/blockHashList_test.go b/fvm/evm/handler/blockHashList_test.go new file mode 100644 index 00000000000..ebf1b21e1c8 --- /dev/null +++ b/fvm/evm/handler/blockHashList_test.go @@ -0,0 +1,98 @@ +package handler_test + +import ( + "testing" + + gethCommon "github.com/ethereum/go-ethereum/common" + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/fvm/evm/handler" + "github.com/onflow/flow-go/fvm/evm/testutils" + "github.com/onflow/flow-go/model/flow" +) + +func TestBlockHashList(t *testing.T) { + testutils.RunWithTestBackend(t, func(backend *testutils.TestBackend) { + testutils.RunWithTestFlowEVMRootAddress(t, backend, func(root flow.Address) { + capacity := 256 + bhl, err := handler.NewBlockHashList(backend, root, capacity) + require.NoError(t, err) + require.True(t, bhl.IsEmpty()) + + h, err := bhl.LastAddedBlockHash() + require.NoError(t, err) + require.Equal(t, gethCommon.Hash{}, h) + + found, h, err := bhl.BlockHashByHeight(0) + require.False(t, found) + require.NoError(t, err) + require.Equal(t, gethCommon.Hash{}, h) + + // first add blocks for the full range of capacity + for i := 0; i < capacity; i++ { + err := bhl.Push(uint64(i), gethCommon.Hash{byte(i)}) + require.NoError(t, err) + require.Equal(t, uint64(0), bhl.MinAvailableHeight()) + require.Equal(t, uint64(i), bhl.MaxAvailableHeight()) + h, err := bhl.LastAddedBlockHash() + require.NoError(t, err) + require.Equal(t, gethCommon.Hash{byte(i)}, h) + } + + // check the value for all of them + for i := 0; i < capacity; i++ { + found, h, err := bhl.BlockHashByHeight(uint64(i)) + require.NoError(t, err) + require.True(t, found) + require.Equal(t, gethCommon.Hash{byte(i)}, h) + } + h, err = bhl.LastAddedBlockHash() + require.NoError(t, err) + require.Equal(t, gethCommon.Hash{byte(capacity - 1)}, h) + + // over the border additions + for i := capacity; i < capacity+3; i++ { + err := bhl.Push(uint64(i), gethCommon.Hash{byte(i)}) + require.NoError(t, err) + require.Equal(t, uint64(i-capacity+1), bhl.MinAvailableHeight()) + require.Equal(t, uint64(i), bhl.MaxAvailableHeight()) + } + // check that old block has been replaced + for i := 0; i < 3; i++ { + found, _, err := bhl.BlockHashByHeight(uint64(i)) + require.NoError(t, err) + require.False(t, found) + } + // check the rest of blocks + for i := 3; i < capacity+3; i++ { + found, h, err := bhl.BlockHashByHeight(uint64(i)) + require.NoError(t, err) + require.True(t, found) + require.Equal(t, gethCommon.Hash{byte(i)}, h) + } + h, err = bhl.LastAddedBlockHash() + require.NoError(t, err) + require.Equal(t, gethCommon.Hash{byte(capacity + 2)}, h) + + // construct a new one and check + bhl, err = handler.NewBlockHashList(backend, root, capacity) + require.NoError(t, err) + require.False(t, bhl.IsEmpty()) + + h2, err := bhl.LastAddedBlockHash() + require.NoError(t, err) + require.Equal(t, h, h2) + + require.Equal(t, uint64(3), bhl.MinAvailableHeight()) + require.Equal(t, uint64(capacity+2), bhl.MaxAvailableHeight()) + + // check all the stored blocks + for i := 3; i < capacity+3; i++ { + found, h, err := bhl.BlockHashByHeight(uint64(i)) + require.NoError(t, err) + require.True(t, found) + require.Equal(t, gethCommon.Hash{byte(i)}, h) + } + }) + }) +} diff --git a/fvm/evm/handler/blockstore.go b/fvm/evm/handler/blockstore.go new file mode 100644 index 00000000000..3e1f2581c02 --- /dev/null +++ b/fvm/evm/handler/blockstore.go @@ -0,0 +1,203 @@ +package handler + +import ( + "fmt" + "time" + + gethCommon "github.com/ethereum/go-ethereum/common" + + "github.com/onflow/flow-go/fvm/evm/types" + "github.com/onflow/flow-go/model/flow" +) + +const ( + BlockHashListCapacity = 256 + BlockStoreLatestBlockKey = "LatestBlock" + BlockStoreLatestBlockProposalKey = "LatestBlockProposal" +) + +type BlockStore struct { + chainID flow.ChainID + backend types.Backend + rootAddress flow.Address +} + +var _ types.BlockStore = &BlockStore{} + +// NewBlockStore constructs a new block store +func NewBlockStore( + chainID flow.ChainID, + backend types.Backend, + rootAddress flow.Address, +) *BlockStore { + return &BlockStore{ + chainID: chainID, + backend: backend, + rootAddress: rootAddress, + } +} + +// BlockProposal returns the block proposal to be updated by the handler +func (bs *BlockStore) BlockProposal() (*types.BlockProposal, error) { + // first fetch it from the storage + data, err := bs.backend.GetValue(bs.rootAddress[:], []byte(BlockStoreLatestBlockProposalKey)) + if err != nil { + return nil, err + } + if len(data) != 0 { + return types.NewBlockProposalFromBytes(data) + } + bp, err := bs.constructBlockProposal() + if err != nil { + return nil, err + } + // store block proposal + err = bs.UpdateBlockProposal(bp) + if err != nil { + return nil, err + } + return bp, nil +} + +func (bs *BlockStore) constructBlockProposal() (*types.BlockProposal, error) { + // if available construct a new one + cadenceHeight, err := bs.backend.GetCurrentBlockHeight() + if err != nil { + return nil, err + } + + cadenceBlock, found, err := bs.backend.GetBlockAtHeight(cadenceHeight) + if err != nil { + return nil, err + } + if !found { + return nil, fmt.Errorf("cadence block not found") + } + + lastExecutedBlock, err := bs.LatestBlock() + if err != nil { + return nil, err + } + + parentHash, err := lastExecutedBlock.Hash() + if err != nil { + return nil, err + } + + // cadence block timestamp is unix nanoseconds but evm blocks + // expect timestamps in unix seconds so we convert here + timestamp := uint64(cadenceBlock.Timestamp / int64(time.Second)) + + // read a random value for block proposal + prevrandao := gethCommon.Hash{} + err = bs.backend.ReadRandom(prevrandao[:]) + if err != nil { + return nil, err + } + + blockProposal := types.NewBlockProposal( + parentHash, + lastExecutedBlock.Height+1, + timestamp, + lastExecutedBlock.TotalSupply, + prevrandao, + ) + + return blockProposal, nil +} + +// UpdateBlockProposal updates the block proposal +func (bs *BlockStore) UpdateBlockProposal(bp *types.BlockProposal) error { + blockProposalBytes, err := bp.ToBytes() + if err != nil { + return types.NewFatalError(err) + } + + return bs.backend.SetValue( + bs.rootAddress[:], + []byte(BlockStoreLatestBlockProposalKey), + blockProposalBytes, + ) +} + +// CommitBlockProposal commits the block proposal to the chain +func (bs *BlockStore) CommitBlockProposal(bp *types.BlockProposal) error { + bp.PopulateRoots() + + blockBytes, err := bp.Block.ToBytes() + if err != nil { + return types.NewFatalError(err) + } + + err = bs.backend.SetValue(bs.rootAddress[:], []byte(BlockStoreLatestBlockKey), blockBytes) + if err != nil { + return err + } + + hash, err := bp.Block.Hash() + if err != nil { + return err + } + + bhl, err := bs.getBlockHashList() + if err != nil { + return err + } + err = bhl.Push(bp.Block.Height, hash) + if err != nil { + return err + } + + // construct a new block proposal and store + newBP, err := bs.constructBlockProposal() + if err != nil { + return err + } + err = bs.UpdateBlockProposal(newBP) + if err != nil { + return err + } + + return nil +} + +// LatestBlock returns the latest executed block +func (bs *BlockStore) LatestBlock() (*types.Block, error) { + data, err := bs.backend.GetValue(bs.rootAddress[:], []byte(BlockStoreLatestBlockKey)) + if err != nil { + return nil, err + } + if len(data) == 0 { + return types.GenesisBlock(bs.chainID), nil + } + return types.NewBlockFromBytes(data) +} + +// BlockHash returns the block hash for the last x blocks +func (bs *BlockStore) BlockHash(height uint64) (gethCommon.Hash, error) { + bhl, err := bs.getBlockHashList() + if err != nil { + return gethCommon.Hash{}, err + } + _, hash, err := bhl.BlockHashByHeight(height) + return hash, err +} + +func (bs *BlockStore) getBlockHashList() (*BlockHashList, error) { + bhl, err := NewBlockHashList(bs.backend, bs.rootAddress, BlockHashListCapacity) + if err != nil { + return nil, err + } + + if bhl.IsEmpty() { + err = bhl.Push( + types.GenesisBlock(bs.chainID).Height, + types.GenesisBlockHash(bs.chainID), + ) + if err != nil { + return nil, err + } + } + + return bhl, nil +} diff --git a/fvm/evm/handler/blockstore_benchmark_test.go b/fvm/evm/handler/blockstore_benchmark_test.go new file mode 100644 index 00000000000..013b6326b0a --- /dev/null +++ b/fvm/evm/handler/blockstore_benchmark_test.go @@ -0,0 +1,59 @@ +package handler_test + +import ( + "testing" + "time" + + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/fvm/evm/handler" + "github.com/onflow/flow-go/fvm/evm/testutils" + "github.com/onflow/flow-go/model/flow" +) + +func BenchmarkProposalGrowth(b *testing.B) { benchmarkBlockProposalGrowth(b, 1000) } + +func benchmarkBlockProposalGrowth(b *testing.B, txCounts int) { + testutils.RunWithTestBackend(b, func(backend *testutils.TestBackend) { + testutils.RunWithTestFlowEVMRootAddress(b, backend, func(rootAddr flow.Address) { + + bs := handler.NewBlockStore(flow.Testnet, backend, rootAddr) + for i := 0; i < txCounts; i++ { + bp, err := bs.BlockProposal() + require.NoError(b, err) + res := testutils.RandomResultFixture(b) + bp.AppendTransaction(res) + err = bs.UpdateBlockProposal(bp) + require.NoError(b, err) + } + + // check the impact of updating block proposal after x number of transactions + backend.ResetStats() + startTime := time.Now() + bp, err := bs.BlockProposal() + require.NoError(b, err) + res := testutils.RandomResultFixture(b) + bp.AppendTransaction(res) + err = bs.UpdateBlockProposal(bp) + require.NoError(b, err) + + b.ReportMetric(float64(time.Since(startTime).Nanoseconds()), "proposal_update_time_ns") + b.ReportMetric(float64(backend.TotalBytesRead()), "proposal_update_bytes_read") + b.ReportMetric(float64(backend.TotalBytesWritten()), "proposal_update_bytes_written") + b.ReportMetric(float64(backend.TotalStorageSize()), "proposal_update_total_storage_size") + + // check the impact of block commit after x number of transactions + backend.ResetStats() + startTime = time.Now() + bp, err = bs.BlockProposal() + require.NoError(b, err) + err = bs.CommitBlockProposal(bp) + require.NoError(b, err) + + b.ReportMetric(float64(time.Since(startTime).Nanoseconds()), "block_commit_time_ns") + b.ReportMetric(float64(backend.TotalBytesRead()), "block_commit_bytes_read") + b.ReportMetric(float64(backend.TotalBytesWritten()), "block_commit_bytes_written") + b.ReportMetric(float64(backend.TotalStorageSize()), "block_commit_total_storage_size") + }) + }) +} diff --git a/fvm/evm/handler/blockstore_test.go b/fvm/evm/handler/blockstore_test.go new file mode 100644 index 00000000000..c2e40135949 --- /dev/null +++ b/fvm/evm/handler/blockstore_test.go @@ -0,0 +1,98 @@ +package handler_test + +import ( + "math/big" + "testing" + + gethCommon "github.com/ethereum/go-ethereum/common" + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/fvm/evm/handler" + "github.com/onflow/flow-go/fvm/evm/testutils" + "github.com/onflow/flow-go/fvm/evm/types" + "github.com/onflow/flow-go/model/flow" +) + +func TestBlockStore(t *testing.T) { + + var chainID = flow.Testnet + testutils.RunWithTestBackend(t, func(backend *testutils.TestBackend) { + testutils.RunWithTestFlowEVMRootAddress(t, backend, func(root flow.Address) { + bs := handler.NewBlockStore(chainID, backend, root) + + // check the Genesis block + b, err := bs.LatestBlock() + require.NoError(t, err) + require.Equal(t, types.GenesisBlock(chainID), b) + h, err := bs.BlockHash(0) + require.NoError(t, err) + require.Equal(t, types.GenesisBlockHash(chainID), h) + + // test block proposal construction from the Genesis block + bp, err := bs.BlockProposal() + require.NoError(t, err) + require.Equal(t, uint64(1), bp.Height) + expectedParentHash, err := types.GenesisBlock(chainID).Hash() + require.NoError(t, err) + require.Equal(t, expectedParentHash, bp.ParentBlockHash) + + // if no commit and again block proposal call should return the same + retbp, err := bs.BlockProposal() + require.NoError(t, err) + require.Equal(t, bp, retbp) + + // update the block proposal + bp.TotalGasUsed += 100 + err = bs.UpdateBlockProposal(bp) + require.NoError(t, err) + + // reset the bs and check if it still return the block proposal + bs = handler.NewBlockStore(chainID, backend, root) + retbp, err = bs.BlockProposal() + require.NoError(t, err) + require.Equal(t, bp, retbp) + + // update the block proposal again + supply := big.NewInt(100) + bp.TotalSupply = supply + err = bs.UpdateBlockProposal(bp) + require.NoError(t, err) + // this should still return the genesis block + retb, err := bs.LatestBlock() + require.NoError(t, err) + require.Equal(t, types.GenesisBlock(chainID), retb) + + // commit the changes + err = bs.CommitBlockProposal(bp) + require.NoError(t, err) + retb, err = bs.LatestBlock() + require.NoError(t, err) + require.Equal(t, supply, retb.TotalSupply) + require.Equal(t, uint64(1), retb.Height) + + retbp, err = bs.BlockProposal() + require.NoError(t, err) + require.Equal(t, uint64(2), retbp.Height) + + // check block hashes + // genesis + h, err = bs.BlockHash(0) + require.NoError(t, err) + require.Equal(t, types.GenesisBlockHash(chainID), h) + + // block 1 + h, err = bs.BlockHash(1) + require.NoError(t, err) + expected, err := bp.Hash() + require.NoError(t, err) + require.Equal(t, expected, h) + + // block 2 + h, err = bs.BlockHash(2) + require.NoError(t, err) + require.Equal(t, gethCommon.Hash{}, h) + }) + + }) + +} diff --git a/fvm/evm/handler/coa/coa.go b/fvm/evm/handler/coa/coa.go new file mode 100644 index 00000000000..8be5394cff5 --- /dev/null +++ b/fvm/evm/handler/coa/coa.go @@ -0,0 +1,19 @@ +package coa + +import ( + _ "embed" + "encoding/hex" +) + +var ContractDeploymentRequiredGas = uint64(723_000) + +//go:embed coa_bytes.hex +var contractBytesInHex string + +// ContractBytes is the compiled version of the coa smart contract. +var ContractBytes, _ = hex.DecodeString(contractBytesInHex) + +// ContractABIJSON is the json string of ABI of the coa smart contract. +// +//go:embed coa_abi.json +var ContractABIJSON string diff --git a/fvm/evm/handler/coa/coa.sol b/fvm/evm/handler/coa/coa.sol new file mode 100644 index 00000000000..7c35f35c3cf --- /dev/null +++ b/fvm/evm/handler/coa/coa.sol @@ -0,0 +1,126 @@ +// SPDX-License-Identifier: UNLICENSED + +pragma solidity >=0.7.0 <0.9.0; + +interface IERC165 { + function supportsInterface(bytes4 interfaceId) external view returns (bool); +} + +interface ERC721TokenReceiver { + function onERC721Received( + address _operator, + address _from, + uint256 _tokenId, + bytes calldata _data + ) external returns (bytes4); +} + +interface ERC777TokensRecipient { + function tokensReceived( + address operator, + address from, + address to, + uint256 amount, + bytes calldata data, + bytes calldata operatorData + ) external; +} + +interface ERC1155TokenReceiver { + + function onERC1155Received( + address _operator, + address _from, + uint256 _id, + uint256 _value, + bytes calldata _data + ) external returns (bytes4); + + function onERC1155BatchReceived( + address _operator, + address _from, + uint256[] calldata _ids, + uint256[] calldata _values, + bytes calldata _data + ) external returns (bytes4); + +} + +contract COA is ERC1155TokenReceiver, ERC777TokensRecipient, ERC721TokenReceiver, IERC165 { + address constant public cadenceArch = 0x0000000000000000000000010000000000000001; + + // bytes4(keccak256("onERC721Received(address,address,uint256,bytes)")) + bytes4 constant internal ERC721ReceivedIsSupported = 0x150b7a02; + + // bytes4(keccak256("onERC1155Received(address,address,uint256,uint256,bytes)")) + bytes4 constant internal ERC1155ReceivedIsSupported = 0xf23a6e61; + + // bytes4(keccak256("onERC1155BatchReceived(address,address,uint256[],uint256[],bytes)")) + bytes4 constant internal ERC1155BatchReceivedIsSupported = 0xbc197c81; + + // bytes4(keccak256("isValidSignature(bytes32,bytes)") + bytes4 constant internal ValidERC1271Signature = 0x1626ba7e; + bytes4 constant internal InvalidERC1271Signature = 0xffffffff; + + receive() external payable { + } + function supportsInterface(bytes4 id) external view virtual override returns (bool) { + return + id == type(ERC1155TokenReceiver).interfaceId || + id == type(ERC721TokenReceiver).interfaceId || + id == type(ERC777TokensRecipient).interfaceId || + id == type(IERC165).interfaceId; + } + + function tokensReceived( + address, + address, + address, + uint256, + bytes calldata, + bytes calldata + ) external pure override {} + + function onERC721Received( + address, + address, + uint256, + bytes calldata + ) external pure override returns (bytes4) { + return ERC721ReceivedIsSupported; + } + + function onERC1155Received( + address, + address, + uint256, + uint256, + bytes calldata + ) external pure override returns (bytes4) { + return ERC1155ReceivedIsSupported; + } + + function onERC1155BatchReceived( + address, + address, + uint256[] calldata, + uint256[] calldata, + bytes calldata + ) external pure override returns (bytes4) { + return ERC1155BatchReceivedIsSupported; + } + + // ERC1271 requirement + function isValidSignature( + bytes32 _hash, + bytes memory _sig + ) external view virtual returns (bytes4){ + (bool ok, bytes memory data) = cadenceArch.staticcall(abi.encodeWithSignature("verifyCOAOwnershipProof(address,bytes32,bytes)", address(this), _hash, _sig)); + require(ok); + bool output = abi.decode(data, (bool)); + if (output) { + return ValidERC1271Signature; + } + return InvalidERC1271Signature; + } +} \ No newline at end of file diff --git a/fvm/evm/handler/coa/coa_abi.json b/fvm/evm/handler/coa/coa_abi.json new file mode 100644 index 00000000000..3f46c1f4b8f --- /dev/null +++ b/fvm/evm/handler/coa/coa_abi.json @@ -0,0 +1,212 @@ +[ + { + "inputs": [], + "name": "cadenceArch", + "outputs": [ + { + "internalType": "address", + "name": "", + "type": "address" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "bytes32", + "name": "_hash", + "type": "bytes32" + }, + { + "internalType": "bytes", + "name": "_sig", + "type": "bytes" + } + ], + "name": "isValidSignature", + "outputs": [ + { + "internalType": "bytes4", + "name": "", + "type": "bytes4" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "address", + "name": "", + "type": "address" + }, + { + "internalType": "address", + "name": "", + "type": "address" + }, + { + "internalType": "uint256[]", + "name": "", + "type": "uint256[]" + }, + { + "internalType": "uint256[]", + "name": "", + "type": "uint256[]" + }, + { + "internalType": "bytes", + "name": "", + "type": "bytes" + } + ], + "name": "onERC1155BatchReceived", + "outputs": [ + { + "internalType": "bytes4", + "name": "", + "type": "bytes4" + } + ], + "stateMutability": "pure", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "address", + "name": "", + "type": "address" + }, + { + "internalType": "address", + "name": "", + "type": "address" + }, + { + "internalType": "uint256", + "name": "", + "type": "uint256" + }, + { + "internalType": "uint256", + "name": "", + "type": "uint256" + }, + { + "internalType": "bytes", + "name": "", + "type": "bytes" + } + ], + "name": "onERC1155Received", + "outputs": [ + { + "internalType": "bytes4", + "name": "", + "type": "bytes4" + } + ], + "stateMutability": "pure", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "address", + "name": "", + "type": "address" + }, + { + "internalType": "address", + "name": "", + "type": "address" + }, + { + "internalType": "uint256", + "name": "", + "type": "uint256" + }, + { + "internalType": "bytes", + "name": "", + "type": "bytes" + } + ], + "name": "onERC721Received", + "outputs": [ + { + "internalType": "bytes4", + "name": "", + "type": "bytes4" + } + ], + "stateMutability": "pure", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "bytes4", + "name": "id", + "type": "bytes4" + } + ], + "name": "supportsInterface", + "outputs": [ + { + "internalType": "bool", + "name": "", + "type": "bool" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "address", + "name": "", + "type": "address" + }, + { + "internalType": "address", + "name": "", + "type": "address" + }, + { + "internalType": "address", + "name": "", + "type": "address" + }, + { + "internalType": "uint256", + "name": "", + "type": "uint256" + }, + { + "internalType": "bytes", + "name": "", + "type": "bytes" + }, + { + "internalType": "bytes", + "name": "", + "type": "bytes" + } + ], + "name": "tokensReceived", + "outputs": [], + "stateMutability": "pure", + "type": "function" + }, + { + "stateMutability": "payable", + "type": "receive" + } +] \ No newline at end of file diff --git a/fvm/evm/handler/coa/coa_bytes.hex b/fvm/evm/handler/coa/coa_bytes.hex new file mode 100644 index 00000000000..7d63c2389f1 --- /dev/null +++ b/fvm/evm/handler/coa/coa_bytes.hex @@ -0,0 +1 @@ +608060405234801561000f575f80fd5b50610db98061001d5f395ff3fe608060405260043610610072575f3560e01c80631626ba7e1161004d5780631626ba7e1461011d578063bc197c8114610159578063d0d250bd14610195578063f23a6e61146101bf57610079565b806223de291461007d57806301ffc9a7146100a5578063150b7a02146100e157610079565b3661007957005b5f80fd5b348015610088575f80fd5b506100a3600480360381019061009e9190610641565b6101fb565b005b3480156100b0575f80fd5b506100cb60048036038101906100c69190610760565b610205565b6040516100d891906107a5565b60405180910390f35b3480156100ec575f80fd5b50610107600480360381019061010291906107be565b6103a5565b6040516101149190610851565b60405180910390f35b348015610128575f80fd5b50610143600480360381019061013e91906109d5565b6103b9565b6040516101509190610851565b60405180910390f35b348015610164575f80fd5b5061017f600480360381019061017a9190610a84565b610509565b60405161018c9190610851565b60405180910390f35b3480156101a0575f80fd5b506101a9610520565b6040516101b69190610b6a565b60405180910390f35b3480156101ca575f80fd5b506101e560048036038101906101e09190610b83565b61052d565b6040516101f29190610851565b60405180910390f35b5050505050505050565b5f7f4e2312e0000000000000000000000000000000000000000000000000000000007bffffffffffffffffffffffffffffffffffffffffffffffffffffffff1916827bffffffffffffffffffffffffffffffffffffffffffffffffffffffff191614806102cf57507f150b7a02000000000000000000000000000000000000000000000000000000007bffffffffffffffffffffffffffffffffffffffffffffffffffffffff1916827bffffffffffffffffffffffffffffffffffffffffffffffffffffffff1916145b8061033657507e23de29000000000000000000000000000000000000000000000000000000007bffffffffffffffffffffffffffffffffffffffffffffffffffffffff1916827bffffffffffffffffffffffffffffffffffffffffffffffffffffffff1916145b8061039e57507f01ffc9a7000000000000000000000000000000000000000000000000000000007bffffffffffffffffffffffffffffffffffffffffffffffffffffffff1916827bffffffffffffffffffffffffffffffffffffffffffffffffffffffff1916145b9050919050565b5f63150b7a0260e01b905095945050505050565b5f805f6801000000000000000173ffffffffffffffffffffffffffffffffffffffff163086866040516024016103f193929190610ca2565b6040516020818303038152906040527f5ee837e7000000000000000000000000000000000000000000000000000000007bffffffffffffffffffffffffffffffffffffffffffffffffffffffff19166020820180517bffffffffffffffffffffffffffffffffffffffffffffffffffffffff838183161783525050505060405161047b9190610d18565b5f60405180830381855afa9150503d805f81146104b3576040519150601f19603f3d011682016040523d82523d5f602084013e6104b8565b606091505b5091509150816104c6575f80fd5b5f818060200190518101906104db9190610d58565b905080156104f557631626ba7e60e01b9350505050610503565b63ffffffff60e01b93505050505b92915050565b5f63bc197c8160e01b905098975050505050505050565b6801000000000000000181565b5f63f23a6e6160e01b90509695505050505050565b5f604051905090565b5f80fd5b5f80fd5b5f73ffffffffffffffffffffffffffffffffffffffff82169050919050565b5f61057c82610553565b9050919050565b61058c81610572565b8114610596575f80fd5b50565b5f813590506105a781610583565b92915050565b5f819050919050565b6105bf816105ad565b81146105c9575f80fd5b50565b5f813590506105da816105b6565b92915050565b5f80fd5b5f80fd5b5f80fd5b5f8083601f840112610601576106006105e0565b5b8235905067ffffffffffffffff81111561061e5761061d6105e4565b5b60208301915083600182028301111561063a576106396105e8565b5b9250929050565b5f805f805f805f8060c0898b03121561065d5761065c61054b565b5b5f61066a8b828c01610599565b985050602061067b8b828c01610599565b975050604061068c8b828c01610599565b965050606061069d8b828c016105cc565b955050608089013567ffffffffffffffff8111156106be576106bd61054f565b5b6106ca8b828c016105ec565b945094505060a089013567ffffffffffffffff8111156106ed576106ec61054f565b5b6106f98b828c016105ec565b92509250509295985092959890939650565b5f7fffffffff0000000000000000000000000000000000000000000000000000000082169050919050565b61073f8161070b565b8114610749575f80fd5b50565b5f8135905061075a81610736565b92915050565b5f602082840312156107755761077461054b565b5b5f6107828482850161074c565b91505092915050565b5f8115159050919050565b61079f8161078b565b82525050565b5f6020820190506107b85f830184610796565b92915050565b5f805f805f608086880312156107d7576107d661054b565b5b5f6107e488828901610599565b95505060206107f588828901610599565b9450506040610806888289016105cc565b935050606086013567ffffffffffffffff8111156108275761082661054f565b5b610833888289016105ec565b92509250509295509295909350565b61084b8161070b565b82525050565b5f6020820190506108645f830184610842565b92915050565b5f819050919050565b61087c8161086a565b8114610886575f80fd5b50565b5f8135905061089781610873565b92915050565b5f80fd5b5f601f19601f8301169050919050565b7f4e487b71000000000000000000000000000000000000000000000000000000005f52604160045260245ffd5b6108e7826108a1565b810181811067ffffffffffffffff82111715610906576109056108b1565b5b80604052505050565b5f610918610542565b905061092482826108de565b919050565b5f67ffffffffffffffff821115610943576109426108b1565b5b61094c826108a1565b9050602081019050919050565b828183375f83830152505050565b5f61097961097484610929565b61090f565b9050828152602081018484840111156109955761099461089d565b5b6109a0848285610959565b509392505050565b5f82601f8301126109bc576109bb6105e0565b5b81356109cc848260208601610967565b91505092915050565b5f80604083850312156109eb576109ea61054b565b5b5f6109f885828601610889565b925050602083013567ffffffffffffffff811115610a1957610a1861054f565b5b610a25858286016109a8565b9150509250929050565b5f8083601f840112610a4457610a436105e0565b5b8235905067ffffffffffffffff811115610a6157610a606105e4565b5b602083019150836020820283011115610a7d57610a7c6105e8565b5b9250929050565b5f805f805f805f8060a0898b031215610aa057610a9f61054b565b5b5f610aad8b828c01610599565b9850506020610abe8b828c01610599565b975050604089013567ffffffffffffffff811115610adf57610ade61054f565b5b610aeb8b828c01610a2f565b9650965050606089013567ffffffffffffffff811115610b0e57610b0d61054f565b5b610b1a8b828c01610a2f565b9450945050608089013567ffffffffffffffff811115610b3d57610b3c61054f565b5b610b498b828c016105ec565b92509250509295985092959890939650565b610b6481610572565b82525050565b5f602082019050610b7d5f830184610b5b565b92915050565b5f805f805f8060a08789031215610b9d57610b9c61054b565b5b5f610baa89828a01610599565b9650506020610bbb89828a01610599565b9550506040610bcc89828a016105cc565b9450506060610bdd89828a016105cc565b935050608087013567ffffffffffffffff811115610bfe57610bfd61054f565b5b610c0a89828a016105ec565b92509250509295509295509295565b610c228161086a565b82525050565b5f81519050919050565b5f82825260208201905092915050565b5f5b83811015610c5f578082015181840152602081019050610c44565b5f8484015250505050565b5f610c7482610c28565b610c7e8185610c32565b9350610c8e818560208601610c42565b610c97816108a1565b840191505092915050565b5f606082019050610cb55f830186610b5b565b610cc26020830185610c19565b8181036040830152610cd48184610c6a565b9050949350505050565b5f81905092915050565b5f610cf282610c28565b610cfc8185610cde565b9350610d0c818560208601610c42565b80840191505092915050565b5f610d238284610ce8565b915081905092915050565b610d378161078b565b8114610d41575f80fd5b50565b5f81519050610d5281610d2e565b92915050565b5f60208284031215610d6d57610d6c61054b565b5b5f610d7a84828501610d44565b9150509291505056fea264697066735822122079a2b495dc3da197ff64bc2f601bc2ea89b1704c035aaebb9e4a19d8e71f691064736f6c63430008160033 \ No newline at end of file diff --git a/fvm/evm/handler/handler.go b/fvm/evm/handler/handler.go new file mode 100644 index 00000000000..5db940c2ead --- /dev/null +++ b/fvm/evm/handler/handler.go @@ -0,0 +1,942 @@ +package handler + +import ( + "fmt" + "math/big" + + gethCommon "github.com/ethereum/go-ethereum/common" + gethTypes "github.com/ethereum/go-ethereum/core/types" + "github.com/onflow/cadence/common" + "go.opentelemetry.io/otel/attribute" + + "github.com/onflow/flow-go/fvm/environment" + fvmErrors "github.com/onflow/flow-go/fvm/errors" + "github.com/onflow/flow-go/fvm/evm/events" + "github.com/onflow/flow-go/fvm/evm/handler/coa" + "github.com/onflow/flow-go/fvm/evm/types" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module/trace" +) + +// ContractHandler is responsible for triggering calls to emulator, metering, +// event emission and updating the block +type ContractHandler struct { + flowChainID flow.ChainID + evmContractAddress flow.Address + flowTokenAddress common.Address + blockStore types.BlockStore + addressAllocator types.AddressAllocator + backend types.Backend + emulator types.Emulator + precompiledContracts []types.PrecompiledContract +} + +var _ types.ContractHandler = &ContractHandler{} + +// NewContractHandler constructs a new ContractHandler +func NewContractHandler( + flowChainID flow.ChainID, + evmContractAddress flow.Address, + flowTokenAddress common.Address, + randomBeaconAddress flow.Address, + blockStore types.BlockStore, + addressAllocator types.AddressAllocator, + backend types.Backend, + emulator types.Emulator, +) *ContractHandler { + return &ContractHandler{ + flowChainID: flowChainID, + evmContractAddress: evmContractAddress, + flowTokenAddress: flowTokenAddress, + blockStore: blockStore, + addressAllocator: addressAllocator, + backend: backend, + emulator: emulator, + precompiledContracts: preparePrecompiledContracts( + evmContractAddress, + randomBeaconAddress, + addressAllocator, + backend, + ), + } +} + +// FlowTokenAddress returns the address where the FlowToken contract is deployed +func (h *ContractHandler) FlowTokenAddress() common.Address { + return h.flowTokenAddress +} + +// EVMContractAddress returns the address where EVM contract is deployed +func (h *ContractHandler) EVMContractAddress() common.Address { + return common.Address(h.evmContractAddress) +} + +// DeployCOA deploys a cadence-owned-account and returns the address +func (h *ContractHandler) DeployCOA(uuid uint64) types.Address { + // capture open tracing traces + defer h.backend.StartChildSpan(trace.FVMEVMDeployCOA).End() + + res, err := h.deployCOA(uuid) + panicOnErrorOrInvalidOrFailedState(res, err) + + return *res.DeployedContractAddress +} + +func (h *ContractHandler) deployCOA(uuid uint64) (*types.Result, error) { + // step 1 - check enough computation is available + gaslimit := types.GasLimit(coa.ContractDeploymentRequiredGas) + err := h.checkGasLimit(gaslimit) + if err != nil { + return nil, err + } + + // step 2 - allocate a new address for the COA + target := h.addressAllocator.AllocateCOAAddress(uuid) + + // step 3 - create a COA deployment call + factory := h.addressAllocator.COAFactoryAddress() + factoryAccount := h.AccountByAddress(factory, false) + factoryNonce := factoryAccount.Nonce() + call := types.NewDeployCallWithTargetAddress( + factory, + target, + coa.ContractBytes, + uint64(gaslimit), + new(big.Int), + factoryNonce, + ) + + // step 4 - execute the call + res, err := h.executeAndHandleCall(call, nil, false) + if err != nil { + return nil, err + } + + // step 5 - if successful COA metrics + h.backend.SetNumberOfDeployedCOAs(factoryNonce) + return res, nil +} + +// AccountByAddress returns the account for the given address, +// if isAuthorized is set, account is controlled by the FVM (COAs) +func (h *ContractHandler) AccountByAddress(addr types.Address, isAuthorized bool) types.Account { + return newAccount(h, addr, isAuthorized) +} + +// LastExecutedBlock returns the last executed block +func (h *ContractHandler) LastExecutedBlock() *types.Block { + block, err := h.blockStore.LatestBlock() + panicOnError(err) + return block +} + +// RunOrPanic runs an rlp-encoded evm transaction and +func (h *ContractHandler) RunOrPanic(rlpEncodedTx []byte, gasFeeCollector types.Address) { + // capture open tracing span + defer h.backend.StartChildSpan(trace.FVMEVMRun).End() + + h.runWithGasFeeRefund(gasFeeCollector, func() { + res, err := h.run(rlpEncodedTx) + panicOnErrorOrInvalidOrFailedState(res, err) + }) +} + +// Run tries to run an rlp-encoded evm transaction +// collects the gas fees and pay it to the gasFeeCollector address provided. +func (h *ContractHandler) Run(rlpEncodedTx []byte, gasFeeCollector types.Address) *types.ResultSummary { + // capture open tracing span + defer h.backend.StartChildSpan(trace.FVMEVMRun).End() + + var res *types.Result + var err error + h.runWithGasFeeRefund(gasFeeCollector, func() { + // run transaction + res, err = h.run(rlpEncodedTx) + panicOnError(err) + + }) + // return the result summary + return res.ResultSummary() +} + +// runWithGasFeeRefund runs a method and transfers the balance changes of the +// coinbase address to the provided gas fee collector +func (h *ContractHandler) runWithGasFeeRefund(gasFeeCollector types.Address, f func()) { + // capture coinbase init balance + cb := h.AccountByAddress(types.CoinbaseAddress, true) + initCoinbaseBalance := cb.Balance() + f() + // transfer the gas fees collected to the gas fee collector address + afterBalance := cb.Balance() + diff := new(big.Int).Sub(afterBalance, initCoinbaseBalance) + if diff.Sign() > 0 { + cb.Transfer(gasFeeCollector, diff) + } + if diff.Sign() < 0 { // this should never happen but in case + panic(fvmErrors.NewEVMError(fmt.Errorf("negative balance change on coinbase"))) + } +} + +// BatchRun tries to run batch of rlp-encoded transactions +// collects the gas fees and pay it to the coinbase address provided. +// All transactions provided in the batch are included in a single block, +// except for invalid transactions +func (h *ContractHandler) BatchRun(rlpEncodedTxs [][]byte, gasFeeCollector types.Address) []*types.ResultSummary { + // capture open tracing + span := h.backend.StartChildSpan(trace.FVMEVMBatchRun) + span.SetAttributes(attribute.Int("tx_counts", len(rlpEncodedTxs))) + defer span.End() + + var results []*types.Result + var err error + h.runWithGasFeeRefund(gasFeeCollector, func() { + // batch run transactions and panic if any error + results, err = h.batchRun(rlpEncodedTxs) + panicOnError(err) + }) + + // convert results into result summaries + resSummaries := make([]*types.ResultSummary, len(results)) + for i, r := range results { + resSummaries[i] = r.ResultSummary() + } + return resSummaries +} + +func (h *ContractHandler) batchRun(rlpEncodedTxs [][]byte) ([]*types.Result, error) { + // step 1 - transaction decoding and compute total gas needed + // This is safe to be done before checking the gas + // as it has its own metering + var totalGasLimit types.GasLimit + batchLen := len(rlpEncodedTxs) + txs := make([]*gethTypes.Transaction, batchLen) + + for i, rlpEncodedTx := range rlpEncodedTxs { + tx, err := h.decodeTransaction(rlpEncodedTx) + // if any tx fails decoding revert the batch + if err != nil { + return nil, err + } + + txs[i] = tx + totalGasLimit += types.GasLimit(tx.Gas()) + } + + // step 2 - check if enough computation is available + // for the whole batch + err := h.checkGasLimit(totalGasLimit) + if err != nil { + return nil, err + } + + // step 3 - prepare block context + ctx, err := h.getBlockContext() + if err != nil { + return nil, err + } + + // step 4 - create a block view + blk, err := h.emulator.NewBlockView(ctx) + if err != nil { + return nil, err + } + + var res []*types.Result + // step 5 - batch run transactions + h.backend.RunWithMeteringDisabled( + func() { + res, err = blk.BatchRunTransactions(txs) + }, + ) + + if err != nil { + return nil, err + } + if len(res) == 0 { // safety check for result + return nil, types.ErrUnexpectedEmptyResult + } + + var hasAtLeastOneValid bool + // step 6 - meter all the transaction gas usage + // and append hashes to the block + for _, r := range res { + // meter gas anyway (even for invalid or failed states) + err = h.meterGasUsage(r) + if err != nil { + return nil, err + } + + // include it in a block only if valid (not invalid) + if !r.Invalid() { + hasAtLeastOneValid = true + } + } + + // step 7 - if there were no valid transactions + // skip the rest of steps + if !hasAtLeastOneValid { + return res, nil + } + + // load block proposal + bp, err := h.blockStore.BlockProposal() + if err != nil { + return nil, err + } + + // for valid transactions + for i, r := range res { + if r.Invalid() { // don't emit events for invalid tx + continue + } + + // step 8 - update block proposal + bp.AppendTransaction(r) + + // step 9 - emit transaction event + err = h.emitEvent(events.NewTransactionEvent( + r, + rlpEncodedTxs[i], + bp.Height, + )) + if err != nil { + return nil, err + } + + // step 10 - report metrics + h.backend.EVMTransactionExecuted( + r.GasConsumed, + false, + r.Failed(), + ) + } + + // update the block proposal + err = h.blockStore.UpdateBlockProposal(bp) + if err != nil { + return nil, err + } + + return res, nil +} + +// CommitBlockProposal commits the block proposal +// and add a new block to the EVM blockchain +func (h *ContractHandler) CommitBlockProposal() { + panicOnError(h.commitBlockProposal()) +} + +func (h *ContractHandler) commitBlockProposal() error { + // load latest block proposal + bp, err := h.blockStore.BlockProposal() + if err != nil { + return err + } + + // commit the proposal + err = h.blockStore.CommitBlockProposal(bp) + if err != nil { + return err + } + + // emit block executed event + err = h.emitEvent(events.NewBlockEvent(&bp.Block)) + if err != nil { + return err + } + + // report metrics + h.backend.EVMBlockExecuted( + len(bp.TxHashes), + bp.TotalGasUsed, + types.UnsafeCastOfBalanceToFloat64(bp.TotalSupply), + ) + + // log evm block commitment + logger := h.backend.Logger() + logger.Info(). + Uint64("evm_height", bp.Height). + Int("tx_count", len(bp.TxHashes)). + Uint64("total_gas_used", bp.TotalGasUsed). + Uint64("total_supply", bp.TotalSupply.Uint64()). + Msg("EVM Block Committed") + + return nil +} + +func (h *ContractHandler) run(rlpEncodedTx []byte) (*types.Result, error) { + // step 1 - transaction decoding + tx, err := h.decodeTransaction(rlpEncodedTx) + if err != nil { + return nil, err + } + + // step 2 - check if enough computation is available + err = h.checkGasLimit(types.GasLimit(tx.Gas())) + if err != nil { + return nil, err + } + + // step 3 - prepare block context + ctx, err := h.getBlockContext() + if err != nil { + return nil, err + } + + // step 4 - create a block view + blk, err := h.emulator.NewBlockView(ctx) + if err != nil { + return nil, err + } + + // step 5 - run transaction + var res *types.Result + h.backend.RunWithMeteringDisabled( + func() { + res, err = blk.RunTransaction(tx) + }) + if err != nil { + return nil, err + } + if res == nil { // safety check for result + return nil, types.ErrUnexpectedEmptyResult + } + + // step 6 - meter gas anyway (even for invalid or failed states) + err = h.meterGasUsage(res) + if err != nil { + return nil, err + } + + // step 7 - skip the rest if is invalid tx + if res.Invalid() { + return res, nil + } + + // step 8 - update the block proposal + bp, err := h.blockStore.BlockProposal() + if err != nil { + return nil, err + } + bp.AppendTransaction(res) + err = h.blockStore.UpdateBlockProposal(bp) + if err != nil { + return nil, err + } + + // step 9 - emit transaction event + err = h.emitEvent( + events.NewTransactionEvent(res, rlpEncodedTx, bp.Height), + ) + + if err != nil { + return nil, err + } + + // step 10 - report metrics + h.backend.EVMTransactionExecuted( + res.GasConsumed, + false, + res.Failed(), + ) + + return res, nil +} + +// DryRun simulates execution of the provided RLP-encoded and unsigned transaction. +func (h *ContractHandler) DryRun( + rlpEncodedTx []byte, + from types.Address, +) *types.ResultSummary { + defer h.backend.StartChildSpan(trace.FVMEVMDryRun).End() + + res, err := h.dryRun(rlpEncodedTx, from) + panicOnError(err) + + return res.ResultSummary() +} + +func (h *ContractHandler) dryRun( + rlpEncodedTx []byte, + from types.Address, +) (*types.Result, error) { + // step 1 - transaction decoding + err := h.backend.MeterComputation( + common.ComputationUsage{ + Kind: environment.ComputationKindRLPDecoding, + Intensity: uint64(len(rlpEncodedTx)), + }, + ) + if err != nil { + return nil, err + } + + tx := gethTypes.Transaction{} + err = tx.UnmarshalBinary(rlpEncodedTx) + if err != nil { + return nil, err + } + + ctx, err := h.getBlockContext() + if err != nil { + return nil, err + } + + blk, err := h.emulator.NewBlockView(ctx) + if err != nil { + return nil, err + } + + res, err := blk.DryRunTransaction(&tx, from.ToCommon()) + if err != nil { + return nil, err + } + if res == nil { // safety check for result + return nil, types.ErrUnexpectedEmptyResult + } + + return res, nil +} + +// checkGasLimit checks if enough computation is left in the environment +// before attempting executing a evm operation +func (h *ContractHandler) checkGasLimit(limit types.GasLimit) error { + // check gas limit against what has been left on the transaction side + usage := common.ComputationUsage{ + Kind: environment.ComputationKindEVMGasUsage, + Intensity: uint64(limit), + } + if !h.backend.ComputationAvailable(usage) { + return types.ErrInsufficientComputation + } + return nil +} + +// decodeTransaction decodes RLP encoded transaction payload and meters the resources used. +func (h *ContractHandler) decodeTransaction(encodedTx []byte) (*gethTypes.Transaction, error) { + usage := common.ComputationUsage{ + Kind: environment.ComputationKindRLPDecoding, + Intensity: uint64(len(encodedTx)), + } + err := h.backend.MeterComputation(usage) + if err != nil { + return nil, err + } + + tx := gethTypes.Transaction{} + if err := tx.UnmarshalBinary(encodedTx); err != nil { + return nil, err + } + + return &tx, nil +} + +func (h *ContractHandler) meterGasUsage(res *types.Result) error { + usage := common.ComputationUsage{ + Kind: environment.ComputationKindEVMGasUsage, + Intensity: res.GasConsumed, + } + return h.backend.MeterComputation(usage) +} + +func (h *ContractHandler) emitEvent(event *events.Event) error { + ev, err := event.Payload.ToCadence(h.flowChainID) + if err != nil { + return err + } + return h.backend.EmitEvent(ev) +} + +func (h *ContractHandler) getBlockContext() (types.BlockContext, error) { + bp, err := h.blockStore.BlockProposal() + if err != nil { + return types.BlockContext{}, err + } + + return types.BlockContext{ + ChainID: types.EVMChainIDFromFlowChainID(h.flowChainID), + BlockNumber: bp.Height, + BlockTimestamp: bp.Timestamp, + DirectCallBaseGasUsage: types.DefaultDirectCallBaseGasUsage, + GetHashFunc: func(n uint64) gethCommon.Hash { + hash, err := h.blockStore.BlockHash(n) + panicOnError(err) // we have to handle it here given we can't continue with it even in try case + return hash + }, + ExtraPrecompiledContracts: h.precompiledContracts, + Random: bp.PrevRandao, + TxCountSoFar: uint(len(bp.TxHashes)), + TotalGasUsedSoFar: bp.TotalGasUsed, + GasFeeCollector: types.CoinbaseAddress, + }, nil +} + +func (h *ContractHandler) executeAndHandleCall( + call *types.DirectCall, + totalSupplyDiff *big.Int, + deductSupplyDiff bool, +) (*types.Result, error) { + // step 1 - check enough computation is available + if err := h.checkGasLimit(types.GasLimit(call.GasLimit)); err != nil { + return nil, err + } + + // step 2 - prepare the block context + ctx, err := h.getBlockContext() + if err != nil { + return nil, err + } + + // step 3 - create block view + blk, err := h.emulator.NewBlockView(ctx) + if err != nil { + return nil, err + } + + // step 4 - run direct call + var res *types.Result + h.backend.RunWithMeteringDisabled( + func() { + res, err = blk.DirectCall(call) + }) + // check backend errors first + if err != nil { + return nil, err + } + if res == nil { // safety check for result + return nil, types.ErrUnexpectedEmptyResult + } + + // step 5 - gas meter even invalid or failed status + err = h.meterGasUsage(res) + if err != nil { + return nil, err + } + + // step 6 - if is invalid skip the rest of states + if res.Invalid() { + return res, nil + } + + // step 7 - update block proposal + bp, err := h.blockStore.BlockProposal() + if err != nil { + return nil, err + } + + // append transaction to the block proposal + bp.AppendTransaction(res) + + // update total supply if applicable + if res.Successful() && totalSupplyDiff != nil { + if deductSupplyDiff { + bp.TotalSupply = new(big.Int).Sub(bp.TotalSupply, totalSupplyDiff) + if bp.TotalSupply.Sign() < 0 { + return nil, types.ErrInsufficientTotalSupply + } + } else { + bp.TotalSupply = new(big.Int).Add(bp.TotalSupply, totalSupplyDiff) + } + } + + // update the block proposal + err = h.blockStore.UpdateBlockProposal(bp) + if err != nil { + return nil, err + } + + // step 8 - emit transaction event + encoded, err := call.Encode() + if err != nil { + return nil, err + } + err = h.emitEvent( + events.NewTransactionEvent(res, encoded, bp.Height), + ) + if err != nil { + return nil, err + } + + // step 9 - report metrics + h.backend.EVMTransactionExecuted( + res.GasConsumed, + true, + res.Failed(), + ) + + return res, nil +} + +func (h *ContractHandler) GenerateResourceUUID() uint64 { + uuid, err := h.backend.GenerateUUID() + panicOnError(err) + return uuid +} + +type Account struct { + isAuthorized bool + address types.Address + fch *ContractHandler +} + +// newAccount creates a new evm account +func newAccount(fch *ContractHandler, addr types.Address, isAuthorized bool) *Account { + return &Account{ + isAuthorized: isAuthorized, + fch: fch, + address: addr, + } +} + +// Address returns the address associated with the account +func (a *Account) Address() types.Address { + return a.address +} + +// Nonce returns the nonce of this account +// +// Note: we don't meter any extra computation given reading data +// from the storage already translates into computation +func (a *Account) Nonce() uint64 { + nonce, err := a.nonce() + panicOnError(err) + return nonce +} + +func (a *Account) nonce() (uint64, error) { + ctx, err := a.fch.getBlockContext() + if err != nil { + return 0, err + } + + blk, err := a.fch.emulator.NewReadOnlyBlockView(ctx) + if err != nil { + return 0, err + } + + return blk.NonceOf(a.address) +} + +// Balance returns the balance of this account +// +// Note: we don't meter any extra computation given reading data +// from the storage already translates into computation +func (a *Account) Balance() types.Balance { + bal, err := a.balance() + panicOnError(err) + return bal +} + +func (a *Account) balance() (types.Balance, error) { + ctx, err := a.fch.getBlockContext() + if err != nil { + return nil, err + } + + blk, err := a.fch.emulator.NewReadOnlyBlockView(ctx) + if err != nil { + return nil, err + } + + bl, err := blk.BalanceOf(a.address) + return types.NewBalance(bl), err +} + +// Code returns the code of this account +// +// Note: we don't meter any extra computation given reading data +// from the storage already translates into computation +func (a *Account) Code() types.Code { + code, err := a.code() + panicOnError(err) + return code +} + +func (a *Account) code() (types.Code, error) { + ctx, err := a.fch.getBlockContext() + if err != nil { + return nil, err + } + + blk, err := a.fch.emulator.NewReadOnlyBlockView(ctx) + if err != nil { + return nil, err + } + return blk.CodeOf(a.address) +} + +// CodeHash returns the code hash of this account +// +// Note: we don't meter any extra computation given reading data +// from the storage already translates into computation +func (a *Account) CodeHash() []byte { + codeHash, err := a.codeHash() + panicOnError(err) + return codeHash +} + +func (a *Account) codeHash() ([]byte, error) { + ctx, err := a.fch.getBlockContext() + if err != nil { + return nil, err + } + + blk, err := a.fch.emulator.NewReadOnlyBlockView(ctx) + if err != nil { + return nil, err + } + return blk.CodeHashOf(a.address) +} + +// Deposit deposits the token from the given vault into the flow evm main vault +// and update the account balance with the new amount +func (a *Account) Deposit(v *types.FLOWTokenVault) { + defer a.fch.backend.StartChildSpan(trace.FVMEVMDeposit).End() + + bridge := a.fch.addressAllocator.NativeTokenBridgeAddress() + bridgeAccount := a.fch.AccountByAddress(bridge, false) + // Note: its not an authorized call + res, err := a.fch.executeAndHandleCall( + types.NewDepositCall( + bridge, + a.address, + v.Balance(), + bridgeAccount.Nonce(), + ), + v.Balance(), + false, + ) + panicOnErrorOrInvalidOrFailedState(res, err) +} + +// Withdraw deducts the balance from the account and +// withdraw and return flow token from the Flex main vault. +func (a *Account) Withdraw(b types.Balance) *types.FLOWTokenVault { + defer a.fch.backend.StartChildSpan(trace.FVMEVMWithdraw).End() + + res, err := a.executeAndHandleAuthorizedCall( + types.NewWithdrawCall( + a.fch.addressAllocator.NativeTokenBridgeAddress(), + a.address, + b, + a.Nonce(), + ), + b, + true, + ) + panicOnErrorOrInvalidOrFailedState(res, err) + + return types.NewFlowTokenVault(b) +} + +// Transfer transfers tokens between accounts +func (a *Account) Transfer(to types.Address, balance types.Balance) { + res, err := a.executeAndHandleAuthorizedCall( + types.NewTransferCall( + a.address, + to, + balance, + a.Nonce(), + ), + nil, + false, + ) + panicOnErrorOrInvalidOrFailedState(res, err) +} + +// Deploy deploys a contract to the EVM environment +// the new deployed contract would be at the returned address +// contained in the result summary as data and +// the contract data is not controlled by the caller accounts +func (a *Account) Deploy(code types.Code, gaslimit types.GasLimit, balance types.Balance) *types.ResultSummary { + // capture open tracing span + defer a.fch.backend.StartChildSpan(trace.FVMEVMDeploy).End() + + res, err := a.executeAndHandleAuthorizedCall( + types.NewDeployCall( + a.address, + code, + uint64(gaslimit), + balance, + a.Nonce(), + ), + nil, + false, + ) + panicOnError(err) + + return res.ResultSummary() +} + +// Call calls a smart contract function with the given data +// it would limit the gas used according to the limit provided +// given it doesn't goes beyond what Flow transaction allows. +// the balance would be deducted from the OFA account and would be transferred to the target address +func (a *Account) Call(to types.Address, data types.Data, gaslimit types.GasLimit, balance types.Balance) *types.ResultSummary { + // capture open tracing span + defer a.fch.backend.StartChildSpan(trace.FVMEVMCall).End() + + res, err := a.executeAndHandleAuthorizedCall( + types.NewContractCall( + a.address, + to, + data, + uint64(gaslimit), + balance, + a.Nonce(), + ), + nil, + false, + ) + panicOnError(err) + + return res.ResultSummary() +} + +func (a *Account) executeAndHandleAuthorizedCall( + call *types.DirectCall, + totalSupplyDiff *big.Int, + deductSupplyDiff bool, +) (*types.Result, error) { + if !a.isAuthorized { + return nil, types.ErrUnauthorizedMethodCall + } + return a.fch.executeAndHandleCall(call, totalSupplyDiff, deductSupplyDiff) +} + +func panicOnErrorOrInvalidOrFailedState(res *types.Result, err error) { + + if res != nil && res.Invalid() { + panic(fvmErrors.NewEVMError(res.ValidationError)) + } + + if res != nil && res.Failed() { + panic(fvmErrors.NewEVMError(res.VMError)) + } + + // this should never happen + if err == nil && res == nil { + panic(fvmErrors.NewEVMError(types.ErrUnexpectedEmptyResult)) + } + + panicOnError(err) +} + +// panicOnError errors panic on returned errors +func panicOnError(err error) { + if err == nil { + return + } + + if types.IsAFatalError(err) { + panic(fvmErrors.NewEVMFailure(err)) + } + + if types.IsABackendError(err) { + // backend errors doesn't need wrapping + panic(err) + } + + // any other returned errors are non-fatal errors + panic(fvmErrors.NewEVMError(err)) +} diff --git a/fvm/evm/handler/handler_benchmark_test.go b/fvm/evm/handler/handler_benchmark_test.go new file mode 100644 index 00000000000..136a431aec7 --- /dev/null +++ b/fvm/evm/handler/handler_benchmark_test.go @@ -0,0 +1,83 @@ +package handler_test + +import ( + "testing" + + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/fvm/evm/testutils" + "github.com/onflow/flow-go/fvm/evm/types" + "github.com/onflow/flow-go/model/flow" +) + +func BenchmarkStorage(b *testing.B) { benchmarkStorageGrowth(b, 100, 100, 100) } + +// benchmark +func benchmarkStorageGrowth(b *testing.B, accountCount, setupKittyCount, txPerBlock int) { + testutils.RunWithTestBackend(b, func(backend *testutils.TestBackend) { + testutils.RunWithTestFlowEVMRootAddress(b, backend, func(rootAddr flow.Address) { + testutils.RunWithDeployedContract(b, + testutils.GetDummyKittyTestContract(b), + backend, + rootAddr, + func(tc *testutils.TestContract) { + handler := SetupHandler(b, backend, rootAddr) + accounts := make([]types.Account, accountCount) + // setup several of accounts + // note that trie growth is the function of number of accounts + for i := 0; i < accountCount; i++ { + account := handler.AccountByAddress(handler.DeployCOA(uint64(i+1)), true) + account.Deposit(types.NewFlowTokenVault(types.NewBalanceFromUFix64(100))) + accounts[i] = account + } + backend.DropEvents() + // mint kitties + for i := 0; i < setupKittyCount; i++ { + account := accounts[i%accountCount] + matronId := testutils.RandomBigInt(1000) + sireId := testutils.RandomBigInt(1000) + generation := testutils.RandomBigInt(1000) + genes := testutils.RandomBigInt(1000) + require.NotNil(b, account) + account.Call( + tc.DeployedAt, + tc.MakeCallData(b, + "CreateKitty", + matronId, + sireId, + generation, + genes, + ), + 50_000, + types.NewBalanceFromUFix64(0), + ) + require.Equal(b, 1, len(backend.Events())) + backend.DropEvents() // this would make things lighter + backend.ResetStats() // reset stats + + if i%txPerBlock == 0 { + handler.CommitBlockProposal() + backend.DropEvents() + } + } + + accounts[0].Call( + tc.DeployedAt, + tc.MakeCallData(b, + "CreateKitty", + testutils.RandomBigInt(1000), + testutils.RandomBigInt(1000), + testutils.RandomBigInt(1000), + testutils.RandomBigInt(1000), + ), + 50_000, + types.NewBalanceFromUFix64(0), + ) + + b.ReportMetric(float64(backend.TotalBytesRead()), "bytes_read") + b.ReportMetric(float64(backend.TotalBytesWritten()), "bytes_written") + b.ReportMetric(float64(backend.TotalStorageSize()), "total_storage_size") + }) + }) + }) +} diff --git a/fvm/evm/handler/handler_test.go b/fvm/evm/handler/handler_test.go new file mode 100644 index 00000000000..8dabc3a1646 --- /dev/null +++ b/fvm/evm/handler/handler_test.go @@ -0,0 +1,1325 @@ +package handler_test + +import ( + "fmt" + "math" + "math/big" + "testing" + + gethCommon "github.com/ethereum/go-ethereum/common" + gethCore "github.com/ethereum/go-ethereum/core" + gethTypes "github.com/ethereum/go-ethereum/core/types" + gethVM "github.com/ethereum/go-ethereum/core/vm" + gethParams "github.com/ethereum/go-ethereum/params" + "github.com/ethereum/go-ethereum/rlp" + "github.com/onflow/cadence/common" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/fvm/errors" + "github.com/onflow/flow-go/fvm/evm/emulator" + "github.com/onflow/flow-go/fvm/evm/handler" + "github.com/onflow/flow-go/fvm/evm/handler/coa" + "github.com/onflow/flow-go/fvm/evm/precompiles" + "github.com/onflow/flow-go/fvm/evm/testutils" + "github.com/onflow/flow-go/fvm/evm/types" + "github.com/onflow/flow-go/fvm/systemcontracts" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module/trace" +) + +var flowTokenAddress = common.MustBytesToAddress(systemcontracts.SystemContractsForChain(flow.Emulator).FlowToken.Address.Bytes()) +var randomBeaconAddress = systemcontracts.SystemContractsForChain(flow.Emulator).RandomBeaconHistory.Address + +const defaultChainID = flow.Testnet + +func TestHandler_TransactionRunOrPanic(t *testing.T) { + t.Parallel() + + t.Run("test RunOrPanic run (happy case)", func(t *testing.T) { + t.Parallel() + + testutils.RunWithTestBackend(t, func(backend *testutils.TestBackend) { + testutils.RunWithTestFlowEVMRootAddress(t, backend, func(rootAddr flow.Address) { + testutils.RunWithEOATestAccount(t, backend, rootAddr, func(eoa *testutils.EOATestAccount) { + + sc := systemcontracts.SystemContractsForChain(flow.Emulator) + + bs := handler.NewBlockStore(defaultChainID, backend, rootAddr) + + aa := handler.NewAddressAllocator() + + result := &types.Result{ + ReturnedData: testutils.RandomData(t), + GasConsumed: testutils.RandomGas(1000), + Logs: []*gethTypes.Log{ + testutils.GetRandomLogFixture(t), + testutils.GetRandomLogFixture(t), + }, + } + + em := &testutils.TestEmulator{ + RunTransactionFunc: func(tx *gethTypes.Transaction) (*types.Result, error) { + return result, nil + }, + // this disables the fee collection step + BalanceOfFunc: func(address types.Address) (*big.Int, error) { + return new(big.Int), nil + }, + } + handler := handler.NewContractHandler(flow.Emulator, rootAddr, flowTokenAddress, rootAddr, bs, aa, backend, em) + + coinbase := types.NewAddress(gethCommon.Address{}) + + tx := eoa.PrepareSignAndEncodeTx( + t, + gethCommon.Address{}, + nil, + nil, + 100_000, + big.NewInt(1), + ) + + // calculate tx id to match it + var evmTx gethTypes.Transaction + err := evmTx.UnmarshalBinary(tx) + require.NoError(t, err) + result.TxHash = evmTx.Hash() + + // successfully run (no-panic) + handler.RunOrPanic(tx, coinbase) + + // check event + events := backend.Events() + require.Len(t, events, 1) + + txEventPayload := testutils.TxEventToPayload(t, events[0], sc.EVMContract.Address) + + // check logs + var logs []*gethTypes.Log + err = rlp.DecodeBytes(txEventPayload.Logs, &logs) + require.NoError(t, err) + for i, l := range result.Logs { + assert.Equal(t, l, logs[i]) + } + + // form block + handler.CommitBlockProposal() + + // check block event + events = backend.Events() + require.Len(t, events, 2) + blockEventPayload := testutils.BlockEventToPayload(t, events[1], sc.EVMContract.Address) + // make sure the transaction id included in the block transaction list is the same as tx submitted + assert.Equal( + t, + types.TransactionHashes{txEventPayload.Hash}.RootHash(), + blockEventPayload.TransactionHashRoot, + ) + }) + }) + }) + }) + + t.Run("test RunOrPanic (unhappy non-fatal cases)", func(t *testing.T) { + t.Parallel() + + testutils.RunWithTestBackend(t, func(backend *testutils.TestBackend) { + testutils.RunWithTestFlowEVMRootAddress(t, backend, func(rootAddr flow.Address) { + testutils.RunWithEOATestAccount(t, backend, rootAddr, func(eoa *testutils.EOATestAccount) { + bs := handler.NewBlockStore(defaultChainID, backend, rootAddr) + aa := handler.NewAddressAllocator() + em := &testutils.TestEmulator{ + RunTransactionFunc: func(tx *gethTypes.Transaction) (*types.Result, error) { + return &types.Result{ + ValidationError: fmt.Errorf("some sort of validation error"), + }, nil + }, + // this disables the fee collection step + BalanceOfFunc: func(address types.Address) (*big.Int, error) { + return new(big.Int), nil + }, + } + handler := handler.NewContractHandler(flow.Testnet, rootAddr, flowTokenAddress, rootAddr, bs, aa, backend, em) + + coinbase := types.NewAddress(gethCommon.Address{}) + + // test RLP decoding (non fatal) + assertPanic(t, isNotFatal, func() { + // invalid RLP encoding + invalidTx := "badencoding" + handler.RunOrPanic([]byte(invalidTx), coinbase) + }) + + // test gas limit (non fatal) + assertPanic(t, isNotFatal, func() { + gasLimit := uint64(testutils.TestComputationLimit + 1) + tx := eoa.PrepareSignAndEncodeTx( + t, + gethCommon.Address{}, + nil, + nil, + gasLimit, + big.NewInt(1), + ) + + handler.RunOrPanic([]byte(tx), coinbase) + }) + + // tx validation error + assertPanic(t, isNotFatal, func() { + // tx execution failure + tx := eoa.PrepareSignAndEncodeTx( + t, + gethCommon.Address{}, + nil, + nil, + 100_000, + big.NewInt(1), + ) + + handler.RunOrPanic([]byte(tx), coinbase) + }) + }) + }) + }) + + t.Run("test RunOrPanic (fatal cases)", func(t *testing.T) { + t.Parallel() + + testutils.RunWithTestBackend(t, func(backend *testutils.TestBackend) { + testutils.RunWithTestFlowEVMRootAddress(t, backend, func(rootAddr flow.Address) { + testutils.RunWithEOATestAccount(t, backend, rootAddr, func(eoa *testutils.EOATestAccount) { + bs := handler.NewBlockStore(defaultChainID, backend, rootAddr) + aa := handler.NewAddressAllocator() + em := &testutils.TestEmulator{ + RunTransactionFunc: func(tx *gethTypes.Transaction) (*types.Result, error) { + return &types.Result{}, types.NewFatalError(fmt.Errorf("Fatal error")) + }, + // this disables the fee collection step + BalanceOfFunc: func(address types.Address) (*big.Int, error) { + return new(big.Int), nil + }, + } + handler := handler.NewContractHandler(flow.Testnet, rootAddr, flowTokenAddress, rootAddr, bs, aa, backend, em) + assertPanic(t, errors.IsFailure, func() { + tx := eoa.PrepareSignAndEncodeTx( + t, + gethCommon.Address{}, + nil, + nil, + 100_000, + big.NewInt(1), + ) + handler.RunOrPanic([]byte(tx), types.NewAddress(gethCommon.Address{})) + }) + }) + }) + }) + }) + }) + + t.Run("test RunOrPanic (with integrated emulator)", func(t *testing.T) { + t.Parallel() + + testutils.RunWithTestBackend(t, func(backend *testutils.TestBackend) { + testutils.RunWithTestFlowEVMRootAddress(t, backend, func(rootAddr flow.Address) { + handler := SetupHandler(t, backend, rootAddr) + + eoa := testutils.GetTestEOAAccount(t, testutils.EOATestAccount1KeyHex) + + // deposit 1 Flow to the foa account + addr := handler.DeployCOA(1) + orgBalance := types.NewBalanceFromUFix64(types.OneFlowInUFix64) + vault := types.NewFlowTokenVault(orgBalance) + foa := handler.AccountByAddress(addr, true) + foa.Deposit(vault) + + // transfer 0.1 flow to the non-foa address + deduction := types.NewBalance(big.NewInt(1e17)) + foa.Call(eoa.Address(), nil, 400000, deduction) + expected, err := types.SubBalance(orgBalance, deduction) + require.NoError(t, err) + require.Equal(t, expected, foa.Balance()) + + // transfer 0.01 flow back to the foa through + addition := types.NewBalance(big.NewInt(1e16)) + + tx := eoa.PrepareSignAndEncodeTx( + t, + foa.Address().ToCommon(), + nil, + addition, + gethParams.TxGas*10, + big.NewInt(1e8), // high gas fee to test coinbase collection, + ) + + // setup coinbase + foa2 := handler.DeployCOA(2) + account2 := handler.AccountByAddress(foa2, true) + require.True(t, types.BalancesAreEqual( + types.NewBalanceFromUFix64(0), + account2.Balance(), + )) + + // no panic means success here + handler.RunOrPanic(tx, account2.Address()) + expected, err = types.SubBalance(orgBalance, deduction) + require.NoError(t, err) + expected, err = types.AddBalance(expected, addition) + require.NoError(t, err) + require.Equal(t, expected, foa.Balance()) + + require.NotEqual(t, types.NewBalanceFromUFix64(0), account2.Balance()) + }) + }) + }) +} + +func TestHandler_OpsWithoutEmulator(t *testing.T) { + t.Parallel() + + t.Run("test last executed block call", func(t *testing.T) { + t.Parallel() + + testutils.RunWithTestBackend(t, func(backend *testutils.TestBackend) { + testutils.RunWithTestFlowEVMRootAddress(t, backend, func(rootAddr flow.Address) { + handler := SetupHandler(t, backend, rootAddr) + + // test call last executed block without initialization + b := handler.LastExecutedBlock() + require.Equal(t, types.GenesisBlock(defaultChainID), b) + + // do some changes + address := testutils.RandomAddress(t) + account := handler.AccountByAddress(address, true) + bal := types.OneFlowBalance() + account.Deposit(types.NewFlowTokenVault(bal)) + + handler.CommitBlockProposal() + // check if block height has been incremented + b = handler.LastExecutedBlock() + require.Equal(t, uint64(1), b.Height) + }) + }) + }) + + t.Run("test address allocation", func(t *testing.T) { + t.Parallel() + + testutils.RunWithTestBackend(t, func(backend *testutils.TestBackend) { + testutils.RunWithTestFlowEVMRootAddress(t, backend, func(rootAddr flow.Address) { + h := SetupHandler(t, backend, rootAddr) + + coa := h.DeployCOA(12) + require.NotNil(t, coa) + + expectedAddress := handler.MakeCOAAddress(12) + require.Equal(t, expectedAddress, coa) + }) + }) + }) +} + +func TestHandler_COA(t *testing.T) { + t.Parallel() + + t.Run("test deposit/withdraw (with integrated emulator)", func(t *testing.T) { + testutils.RunWithTestBackend(t, func(backend *testutils.TestBackend) { + testutils.RunWithTestFlowEVMRootAddress(t, backend, func(rootAddr flow.Address) { + sc := systemcontracts.SystemContractsForChain(flow.Emulator) + + handler := SetupHandler(t, backend, rootAddr) + + foa := handler.AccountByAddress(handler.DeployCOA(1), true) + require.NotNil(t, foa) + + zeroBalance := types.NewBalance(big.NewInt(0)) + require.True(t, types.BalancesAreEqual(zeroBalance, foa.Balance())) + + balance := types.OneFlowBalance() + vault := types.NewFlowTokenVault(balance) + + foa.Deposit(vault) + require.Equal(t, balance, foa.Balance()) + + v := foa.Withdraw(balance) + require.Equal(t, balance, v.Balance()) + + require.True(t, types.BalancesAreEqual( + zeroBalance, foa.Balance())) + + events := backend.Events() + require.Len(t, events, 3) + + // Block level expected values + txHashes := make(types.TransactionHashes, 0) + totalGasUsed := uint64(0) + + // deploy COA transaction event + txEventPayload := testutils.TxEventToPayload(t, events[0], sc.EVMContract.Address) + tx, err := types.DirectCallFromEncoded(txEventPayload.Payload) + require.NoError(t, err) + txHashes = append(txHashes, tx.Hash()) + totalGasUsed += txEventPayload.GasConsumed + + // deposit transaction event + txEventPayload = testutils.TxEventToPayload(t, events[1], sc.EVMContract.Address) + tx, err = types.DirectCallFromEncoded(txEventPayload.Payload) + require.NoError(t, err) + require.Equal(t, foa.Address(), tx.To) + require.Equal(t, types.BalanceToBigInt(balance), tx.Value) + txHashes = append(txHashes, tx.Hash()) + totalGasUsed += txEventPayload.GasConsumed + + // withdraw transaction event + txEventPayload = testutils.TxEventToPayload(t, events[2], sc.EVMContract.Address) + tx, err = types.DirectCallFromEncoded(txEventPayload.Payload) + require.NoError(t, err) + require.Equal(t, foa.Address(), tx.From) + require.Equal(t, types.BalanceToBigInt(balance), tx.Value) + txHashes = append(txHashes, tx.Hash()) + totalGasUsed += txEventPayload.GasConsumed + + // block event + handler.CommitBlockProposal() + events = backend.Events() + require.Len(t, events, 4) + + blockEventPayload := testutils.BlockEventToPayload(t, events[3], sc.EVMContract.Address) + assert.Equal( + t, + txHashes.RootHash(), + blockEventPayload.TransactionHashRoot, + ) + + require.Equal(t, totalGasUsed, blockEventPayload.TotalGasUsed) + + // check gas usage + computationUsed, err := backend.ComputationUsed() + require.NoError(t, err) + require.Greater(t, computationUsed, types.DefaultDirectCallBaseGasUsage*3) + + // Withdraw with invalid balance + assertPanic(t, types.IsWithdrawBalanceRoundingError, func() { + // deposit some money + foa.Deposit(vault) + // then withdraw invalid balance + foa.Withdraw(types.NewBalance(big.NewInt(1))) + }) + }) + }) + }) + + t.Run("test coa deployment", func(t *testing.T) { + testutils.RunWithTestBackend(t, func(backend *testutils.TestBackend) { + testutils.RunWithTestFlowEVMRootAddress(t, backend, func(rootAddr flow.Address) { + h := SetupHandler(t, backend, rootAddr) + + coa1 := h.DeployCOA(1) + acc := h.AccountByAddress(coa1, true) + require.NotEmpty(t, acc.Code()) + + // make a second account with some money + coa2 := h.DeployCOA(2) + acc2 := h.AccountByAddress(coa2, true) + acc2.Deposit(types.NewFlowTokenVault(types.MakeABalanceInFlow(100))) + + // transfer money to COA + acc2.Transfer( + coa1, + types.MakeABalanceInFlow(1), + ) + + // make a call to the contract + ret := acc2.Call( + coa1, + testutils.MakeCallData(t, + coa.ContractABIJSON, + "onERC721Received", + gethCommon.Address{1}, + gethCommon.Address{1}, + big.NewInt(0), + []byte{'A'}, + ), + types.GasLimit(3_000_000), + types.EmptyBalance) + + // 0x150b7a02 + expected := types.Data([]byte{ + 21, 11, 122, 2, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + }) + require.Equal(t, types.StatusSuccessful, ret.Status) + require.Equal(t, expected, ret.ReturnedData) + }) + }) + }) + + t.Run("test withdraw (unhappy case)", func(t *testing.T) { + testutils.RunWithTestBackend(t, func(backend *testutils.TestBackend) { + testutils.RunWithTestFlowEVMRootAddress(t, backend, func(rootAddr flow.Address) { + testutils.RunWithEOATestAccount(t, backend, rootAddr, func(eoa *testutils.EOATestAccount) { + bs := handler.NewBlockStore(defaultChainID, backend, rootAddr) + aa := handler.NewAddressAllocator() + + // Withdraw calls are only possible within FOA accounts + assertPanic(t, types.IsAUnauthorizedMethodCallError, func() { + em := &testutils.TestEmulator{ + NonceOfFunc: func(address types.Address) (uint64, error) { + return 0, nil + }, + } + + handler := handler.NewContractHandler(flow.Testnet, rootAddr, flowTokenAddress, rootAddr, bs, aa, backend, em) + + account := handler.AccountByAddress(testutils.RandomAddress(t), false) + account.Withdraw(types.NewBalanceFromUFix64(1)) + }) + + // test insufficient total supply error + assertPanic(t, types.IsAInsufficientTotalSupplyError, func() { + em := &testutils.TestEmulator{ + NonceOfFunc: func(address types.Address) (uint64, error) { + return 0, nil + }, + DirectCallFunc: func(call *types.DirectCall) (*types.Result, error) { + return &types.Result{}, nil + }, + } + + handler := handler.NewContractHandler(flow.Testnet, rootAddr, flowTokenAddress, rootAddr, bs, aa, backend, em) + account := handler.AccountByAddress(testutils.RandomAddress(t), true) + + account.Withdraw(types.NewBalanceFromUFix64(1)) + }) + + // test non fatal error of emulator + assertPanic(t, isNotFatal, func() { + em := &testutils.TestEmulator{ + NonceOfFunc: func(address types.Address) (uint64, error) { + return 0, nil + }, + DirectCallFunc: func(call *types.DirectCall) (*types.Result, error) { + return &types.Result{}, fmt.Errorf("some sort of error") + }, + } + + handler := handler.NewContractHandler(flow.Testnet, rootAddr, flowTokenAddress, rootAddr, bs, aa, backend, em) + account := handler.AccountByAddress(testutils.RandomAddress(t), true) + + account.Withdraw(types.NewBalanceFromUFix64(0)) + }) + + // test fatal error of emulator + assertPanic(t, types.IsAFatalError, func() { + em := &testutils.TestEmulator{ + NonceOfFunc: func(address types.Address) (uint64, error) { + return 0, nil + }, + DirectCallFunc: func(call *types.DirectCall) (*types.Result, error) { + return &types.Result{}, types.NewFatalError(fmt.Errorf("some sort of fatal error")) + }, + } + + handler := handler.NewContractHandler(flow.Testnet, rootAddr, flowTokenAddress, rootAddr, bs, aa, backend, em) + account := handler.AccountByAddress(testutils.RandomAddress(t), true) + + account.Withdraw(types.NewBalanceFromUFix64(0)) + }) + }) + }) + }) + }) + + t.Run("test deposit (unhappy case)", func(t *testing.T) { + t.Parallel() + + testutils.RunWithTestBackend(t, func(backend *testutils.TestBackend) { + testutils.RunWithTestFlowEVMRootAddress(t, backend, func(rootAddr flow.Address) { + testutils.RunWithEOATestAccount(t, backend, rootAddr, func(eoa *testutils.EOATestAccount) { + bs := handler.NewBlockStore(defaultChainID, backend, rootAddr) + aa := handler.NewAddressAllocator() + + // test non fatal error of emulator + assertPanic(t, isNotFatal, func() { + em := &testutils.TestEmulator{ + NonceOfFunc: func(address types.Address) (uint64, error) { + return 0, nil + }, + DirectCallFunc: func(call *types.DirectCall) (*types.Result, error) { + return &types.Result{}, fmt.Errorf("some sort of error") + }, + } + + handler := handler.NewContractHandler(flow.Testnet, rootAddr, flowTokenAddress, rootAddr, bs, aa, backend, em) + account := handler.AccountByAddress(testutils.RandomAddress(t), true) + + account.Deposit(types.NewFlowTokenVault(types.NewBalanceFromUFix64(1))) + }) + + // test fatal error of emulator + assertPanic(t, types.IsAFatalError, func() { + em := &testutils.TestEmulator{ + NonceOfFunc: func(address types.Address) (uint64, error) { + return 0, nil + }, + DirectCallFunc: func(call *types.DirectCall) (*types.Result, error) { + return &types.Result{}, types.NewFatalError(fmt.Errorf("some sort of fatal error")) + }, + } + + handler := handler.NewContractHandler(flow.Testnet, rootAddr, flowTokenAddress, rootAddr, bs, aa, backend, em) + account := handler.AccountByAddress(testutils.RandomAddress(t), true) + + account.Deposit(types.NewFlowTokenVault(types.NewBalanceFromUFix64(1))) + }) + }) + }) + }) + }) + + t.Run("test deploy/call (with integrated emulator)", func(t *testing.T) { + t.Parallel() + + // TODO update this test with events, gas metering, etc + testutils.RunWithTestBackend(t, func(backend *testutils.TestBackend) { + testutils.RunWithTestFlowEVMRootAddress(t, backend, func(rootAddr flow.Address) { + handler := SetupHandler(t, backend, rootAddr) + + foa := handler.AccountByAddress(handler.DeployCOA(1), true) + require.NotNil(t, foa) + + // deposit 10000 flow + bal := types.MakeABalanceInFlow(10000) + vault := types.NewFlowTokenVault(bal) + foa.Deposit(vault) + require.Equal(t, bal, foa.Balance()) + + testContract := testutils.GetStorageTestContract(t) + result := foa.Deploy(testContract.ByteCode, math.MaxUint64, types.NewBalanceFromUFix64(0)) + require.NotNil(t, result.DeployedContractAddress) + addr := *result.DeployedContractAddress + // skip first few bytes as they are deploy codes + assert.Equal(t, testContract.ByteCode[17:], []byte(result.ReturnedData)) + require.NotNil(t, addr) + + num := big.NewInt(22) + _ = foa.Call( + addr, + testContract.MakeCallData(t, "store", num), + math.MaxUint64, + types.NewBalanceFromUFix64(0)) + + res := foa.Call( + addr, + testContract.MakeCallData(t, "retrieve"), + math.MaxUint64, + types.NewBalanceFromUFix64(0)) + + require.Equal(t, num, res.ReturnedData.AsBigInt()) + }) + }) + }) + + t.Run("test call to cadence arch", func(t *testing.T) { + t.Parallel() + + testutils.RunWithTestBackend(t, func(backend *testutils.TestBackend) { + blockHeight := uint64(123) + backend.GetCurrentBlockHeightFunc = func() (uint64, error) { + return blockHeight, nil + } + testutils.RunWithTestFlowEVMRootAddress(t, backend, func(rootAddr flow.Address) { + sc := systemcontracts.SystemContractsForChain(flow.Emulator) + + h := SetupHandler(t, backend, rootAddr) + + foa := h.AccountByAddress(h.DeployCOA(1), true) + require.NotNil(t, foa) + + vault := types.NewFlowTokenVault(types.MakeABalanceInFlow(10000)) + foa.Deposit(vault) + + arch := handler.MakePrecompileAddress(1) + + ret := foa.Call(arch, precompiles.FlowBlockHeightFuncSig[:], math.MaxUint64, types.NewBalanceFromUFix64(0)) + require.Equal(t, big.NewInt(int64(blockHeight)), new(big.Int).SetBytes(ret.ReturnedData)) + + events := backend.Events() + require.Len(t, events, 3) + // last transaction executed event + + event := events[2] + txEventPayload := testutils.TxEventToPayload(t, event, sc.EVMContract.Address) + + apc, err := types.AggregatedPrecompileCallsFromEncoded(txEventPayload.PrecompiledCalls) + require.NoError(t, err) + + require.False(t, apc.IsEmpty()) + pc := apc[0] + require.Equal(t, arch, pc.Address) + require.Len(t, pc.RequiredGasCalls, 1) + require.Equal(t, + pc.RequiredGasCalls[0], + precompiles.FlowBlockHeightFixedGas, + ) + require.Len(t, pc.RunCalls, 1) + require.Equal(t, + pc.RunCalls[0], + types.RunCall{ + Output: ret.ReturnedData, + ErrorMsg: "", + }, + ) + }) + }) + }) + + t.Run("test block.random call (with integrated emulator)", func(t *testing.T) { + t.Parallel() + + testutils.RunWithTestBackend(t, func(backend *testutils.TestBackend) { + random := testutils.RandomCommonHash(t) + backend.ReadRandomFunc = func(buffer []byte) error { + copy(buffer, random.Bytes()) + return nil + } + testutils.RunWithTestFlowEVMRootAddress(t, backend, func(rootAddr flow.Address) { + handler := SetupHandler(t, backend, rootAddr) + + foa := handler.AccountByAddress(handler.DeployCOA(1), true) + require.NotNil(t, foa) + + vault := types.NewFlowTokenVault(types.MakeABalanceInFlow(100)) + foa.Deposit(vault) + + testContract := testutils.GetStorageTestContract(t) + result := foa.Deploy(testContract.ByteCode, math.MaxUint64, types.EmptyBalance) + require.NotNil(t, result.DeployedContractAddress) + addr := *result.DeployedContractAddress + require.Equal(t, types.StatusSuccessful, result.Status) + require.Equal(t, types.ErrCodeNoError, result.ErrorCode) + + ret := foa.Call( + addr, + testContract.MakeCallData(t, "random"), + math.MaxUint64, + types.EmptyBalance) + + require.Equal(t, random.Bytes(), []byte(ret.ReturnedData)) + }) + }) + }) + + // TODO add test with test emulator for unhappy cases (emulator) +} + +func TestHandler_TransactionRun(t *testing.T) { + t.Parallel() + + const chainID = flow.Testnet + + t.Run("test - transaction run (success)", func(t *testing.T) { + t.Parallel() + + testutils.RunWithTestBackend(t, func(backend *testutils.TestBackend) { + testutils.RunWithTestFlowEVMRootAddress(t, backend, func(rootAddr flow.Address) { + testutils.RunWithEOATestAccount(t, backend, rootAddr, func(eoa *testutils.EOATestAccount) { + + bs := handler.NewBlockStore(chainID, backend, rootAddr) + aa := handler.NewAddressAllocator() + + result := &types.Result{ + ReturnedData: testutils.RandomData(t), + GasConsumed: testutils.RandomGas(1000), + Logs: []*gethTypes.Log{ + testutils.GetRandomLogFixture(t), + testutils.GetRandomLogFixture(t), + }, + } + + gasFeeCollector := types.NewAddress(gethCommon.Address{1, 2, 3}) + firstBalanceCall := true + feeCollected := false + coinbaseInitBalance := big.NewInt(1) + coinbaseAfterBalance := big.NewInt(3) + coinbaseDiffBalance := big.NewInt(2) + em := &testutils.TestEmulator{ + RunTransactionFunc: func(tx *gethTypes.Transaction) (*types.Result, error) { + return result, nil + }, + BalanceOfFunc: func(address types.Address) (*big.Int, error) { + if firstBalanceCall { + firstBalanceCall = false + return coinbaseInitBalance, nil + } + return coinbaseAfterBalance, nil + }, + NonceOfFunc: func(address types.Address) (uint64, error) { + return 0, nil + }, + DirectCallFunc: func(call *types.DirectCall) (*types.Result, error) { + feeCollected = true + require.Equal(t, types.CoinbaseAddress, call.From) + require.Equal(t, gasFeeCollector, call.To) + require.True(t, types.BalancesAreEqual(call.Value, coinbaseDiffBalance)) + return &types.Result{ + GasConsumed: 21_000, + }, nil + }, + } + handler := handler.NewContractHandler(chainID, rootAddr, flowTokenAddress, rootAddr, bs, aa, backend, em) + tx := eoa.PrepareSignAndEncodeTx( + t, + gethCommon.Address{}, + nil, + nil, + 100_000, + big.NewInt(1), + ) + + rs := handler.Run(tx, gasFeeCollector) + require.Equal(t, types.StatusSuccessful, rs.Status) + require.Equal(t, result.GasConsumed, rs.GasConsumed) + require.Equal(t, types.ErrCodeNoError, rs.ErrorCode) + require.True(t, feeCollected) + + }) + }) + }) + }) + + t.Run("test - transaction run (failed)", func(t *testing.T) { + t.Parallel() + + testutils.RunWithTestBackend(t, func(backend *testutils.TestBackend) { + testutils.RunWithTestFlowEVMRootAddress(t, backend, func(rootAddr flow.Address) { + testutils.RunWithEOATestAccount(t, backend, rootAddr, func(eoa *testutils.EOATestAccount) { + + bs := handler.NewBlockStore(chainID, backend, rootAddr) + aa := handler.NewAddressAllocator() + + result := &types.Result{ + VMError: gethVM.ErrOutOfGas, + ReturnedData: testutils.RandomData(t), + GasConsumed: testutils.RandomGas(1000), + Logs: []*gethTypes.Log{ + testutils.GetRandomLogFixture(t), + testutils.GetRandomLogFixture(t), + }, + } + + em := &testutils.TestEmulator{ + RunTransactionFunc: func(tx *gethTypes.Transaction) (*types.Result, error) { + return result, nil + }, + BalanceOfFunc: func(address types.Address) (*big.Int, error) { + return new(big.Int), nil + }, + } + handler := handler.NewContractHandler(chainID, rootAddr, flowTokenAddress, rootAddr, bs, aa, backend, em) + + tx := eoa.PrepareSignAndEncodeTx( + t, + gethCommon.Address{}, + nil, + nil, + 100_000, + big.NewInt(1), + ) + + rs := handler.Run(tx, types.NewAddress(gethCommon.Address{})) + require.Equal(t, types.StatusFailed, rs.Status) + require.Equal(t, result.GasConsumed, rs.GasConsumed) + require.Equal(t, types.ExecutionErrCodeOutOfGas, rs.ErrorCode) + + }) + }) + }) + }) + + t.Run("test - transaction run (unhappy cases)", func(t *testing.T) { + t.Parallel() + + testutils.RunWithTestBackend(t, func(backend *testutils.TestBackend) { + testutils.RunWithTestFlowEVMRootAddress(t, backend, func(rootAddr flow.Address) { + testutils.RunWithEOATestAccount(t, backend, rootAddr, func(eoa *testutils.EOATestAccount) { + bs := handler.NewBlockStore(chainID, backend, rootAddr) + aa := handler.NewAddressAllocator() + evmErr := fmt.Errorf("%w: next nonce %v, tx nonce %v", gethCore.ErrNonceTooLow, 1, 0) + em := &testutils.TestEmulator{ + RunTransactionFunc: func(tx *gethTypes.Transaction) (*types.Result, error) { + return &types.Result{ValidationError: evmErr}, nil + }, + BalanceOfFunc: func(address types.Address) (*big.Int, error) { + return new(big.Int), nil + }, + } + handler := handler.NewContractHandler(chainID, rootAddr, flowTokenAddress, rootAddr, bs, aa, backend, em) + + coinbase := types.NewAddress(gethCommon.Address{}) + + gasLimit := uint64(testutils.TestComputationLimit + 1) + tx := eoa.PrepareSignAndEncodeTx( + t, + gethCommon.Address{}, + nil, + nil, + gasLimit, + big.NewInt(1), + ) + + assertPanic(t, isNotFatal, func() { + rs := handler.Run([]byte(tx), coinbase) + require.Equal(t, types.StatusInvalid, rs.Status) + }) + + tx = eoa.PrepareSignAndEncodeTx( + t, + gethCommon.Address{}, + nil, + nil, + 100, + big.NewInt(1), + ) + + rs := handler.Run([]byte(tx), coinbase) + require.Equal(t, types.StatusInvalid, rs.Status) + require.Equal(t, types.ValidationErrCodeNonceTooLow, rs.ErrorCode) + }) + }) + }) + }) + + t.Run("test - transaction batch run (success)", func(t *testing.T) { + t.Parallel() + + testutils.RunWithTestBackend(t, func(backend *testutils.TestBackend) { + testutils.RunWithTestFlowEVMRootAddress(t, backend, func(rootAddr flow.Address) { + testutils.RunWithEOATestAccount(t, backend, rootAddr, func(eoa *testutils.EOATestAccount) { + sc := systemcontracts.SystemContractsForChain(chainID) + + bs := handler.NewBlockStore(chainID, backend, rootAddr) + aa := handler.NewAddressAllocator() + + gasConsumed := testutils.RandomGas(1000) + addr := testutils.RandomAddress(t) + result := func(tx *gethTypes.Transaction) *types.Result { + return &types.Result{ + DeployedContractAddress: &addr, + ReturnedData: testutils.RandomData(t), + GasConsumed: gasConsumed, + TxHash: tx.Hash(), + Logs: []*gethTypes.Log{ + testutils.GetRandomLogFixture(t), + testutils.GetRandomLogFixture(t), + }, + } + } + + gasFeeCollector := types.NewAddress(gethCommon.Address{1, 2, 3}) + firstBalanceCall := true + feeCollected := false + coinbaseInitBalance := big.NewInt(1) + coinbaseAfterBalance := big.NewInt(3) + coinbaseDiffBalance := big.NewInt(2) + var runResults []*types.Result + em := &testutils.TestEmulator{ + BatchRunTransactionFunc: func(txs []*gethTypes.Transaction) ([]*types.Result, error) { + runResults = make([]*types.Result, len(txs)) + for i, tx := range txs { + runResults[i] = result(tx) + } + return runResults, nil + }, + BalanceOfFunc: func(address types.Address) (*big.Int, error) { + if firstBalanceCall { + firstBalanceCall = false + return coinbaseInitBalance, nil + } + return coinbaseAfterBalance, nil + }, + NonceOfFunc: func(address types.Address) (uint64, error) { + return 0, nil + }, + DirectCallFunc: func(call *types.DirectCall) (*types.Result, error) { + feeCollected = true + require.Equal(t, types.CoinbaseAddress, call.From) + require.Equal(t, gasFeeCollector, call.To) + require.True(t, types.BalancesAreEqual(call.Value, coinbaseDiffBalance)) + return &types.Result{ + GasConsumed: 21_000, + }, nil + }, + } + handler := handler.NewContractHandler(chainID, rootAddr, flowTokenAddress, randomBeaconAddress, bs, aa, backend, em) + + gasLimit := uint64(100_000) + + // run multiple successful transactions + const batchSize = 3 + txs := make([][]byte, batchSize) + for i := range txs { + txs[i] = eoa.PrepareSignAndEncodeTx( + t, + gethCommon.Address{}, + nil, + nil, + gasLimit, + big.NewInt(1), + ) + } + + results := handler.BatchRun(txs, gasFeeCollector) + for _, rs := range results { + require.Equal(t, types.StatusSuccessful, rs.Status) + require.Equal(t, gasConsumed, rs.GasConsumed) + require.Equal(t, types.ErrCodeNoError, rs.ErrorCode) + } + require.True(t, feeCollected) + + handler.CommitBlockProposal() + events := backend.Events() + // +1 for fee collection +1 block event + require.Len(t, events, batchSize+1+1) + + for i, event := range events { + if i == batchSize { + break // don't check last block event + } + txEventPayload := testutils.TxEventToPayload(t, event, sc.EVMContract.Address) + + var logs []*gethTypes.Log + err := rlp.DecodeBytes(txEventPayload.Logs, &logs) + require.NoError(t, err) + + for k, l := range runResults[i].Logs { + assert.Equal(t, l, logs[k]) + } + } + + // run single transaction passed in as batch + txs = make([][]byte, 1) + for i := range txs { + txs[i] = eoa.PrepareSignAndEncodeTx( + t, + gethCommon.Address{}, + nil, + nil, + gasLimit, + big.NewInt(1), + ) + } + + results = handler.BatchRun(txs, gasFeeCollector) + for _, rs := range results { + require.Equal(t, types.StatusSuccessful, rs.Status) + } + }) + }) + }) + }) + + t.Run("test - transaction batch run (unhappy case)", func(t *testing.T) { + t.Parallel() + + testutils.RunWithTestBackend(t, func(backend *testutils.TestBackend) { + testutils.RunWithTestFlowEVMRootAddress(t, backend, func(rootAddr flow.Address) { + testutils.RunWithEOATestAccount(t, backend, rootAddr, func(eoa *testutils.EOATestAccount) { + bs := handler.NewBlockStore(chainID, backend, rootAddr) + aa := handler.NewAddressAllocator() + + gasConsumed := testutils.RandomGas(1000) + addr := testutils.RandomAddress(t) + result := func() *types.Result { + return &types.Result{ + DeployedContractAddress: &addr, + ReturnedData: testutils.RandomData(t), + GasConsumed: gasConsumed, + Logs: []*gethTypes.Log{ + testutils.GetRandomLogFixture(t), + testutils.GetRandomLogFixture(t), + }, + } + } + + em := &testutils.TestEmulator{ + BatchRunTransactionFunc: func(txs []*gethTypes.Transaction) ([]*types.Result, error) { + res := make([]*types.Result, len(txs)) + for i := range res { + res[i] = result() + } + return res, nil + }, + // this disables the fee collection step + BalanceOfFunc: func(address types.Address) (*big.Int, error) { + return new(big.Int), nil + }, + } + handler := handler.NewContractHandler(chainID, rootAddr, flowTokenAddress, randomBeaconAddress, bs, aa, backend, em) + coinbase := types.NewAddress(gethCommon.Address{}) + + // batch run empty transactions + txs := make([][]byte, 1) + assertPanic(t, isNotFatal, func() { + handler.BatchRun(txs, coinbase) + }) + + }) + }) + }) + }) + + t.Run("test dry run successful", func(t *testing.T) { + t.Parallel() + + testutils.RunWithTestBackend(t, func(backend *testutils.TestBackend) { + testutils.RunWithTestFlowEVMRootAddress(t, backend, func(rootAddr flow.Address) { + testutils.RunWithEOATestAccount(t, backend, rootAddr, func(eoa *testutils.EOATestAccount) { + + bs := handler.NewBlockStore(defaultChainID, backend, rootAddr) + aa := handler.NewAddressAllocator() + + nonce := uint64(1) + to := gethCommon.Address{1, 2} + amount := big.NewInt(13) + gasLimit := uint64(1337) + gasPrice := big.NewInt(2000) + data := []byte{1, 5} + from := types.Address{3, 4} + + tx := gethTypes.NewTransaction( + nonce, + to, + amount, + gasLimit, + gasPrice, + data, + ) + rlpTx, err := tx.MarshalBinary() + require.NoError(t, err) + + addr := testutils.RandomAddress(t) + result := &types.Result{ + DeployedContractAddress: &addr, + ReturnedData: testutils.RandomData(t), + GasConsumed: testutils.RandomGas(1000), + Logs: []*gethTypes.Log{ + testutils.GetRandomLogFixture(t), + testutils.GetRandomLogFixture(t), + }, + } + + called := false + em := &testutils.TestEmulator{ + DryRunTransactionFunc: func(tx *gethTypes.Transaction, address gethCommon.Address) (*types.Result, error) { + assert.Equal(t, nonce, tx.Nonce()) + assert.Equal(t, &to, tx.To()) + assert.Equal(t, gasLimit, tx.Gas()) + assert.Equal(t, gasPrice, tx.GasPrice()) + assert.Equal(t, data, tx.Data()) + assert.Equal(t, from.ToCommon(), address) + called = true + return result, nil + }, + } + + handler := handler.NewContractHandler(chainID, rootAddr, flowTokenAddress, randomBeaconAddress, bs, aa, backend, em) + + rs := handler.DryRun(rlpTx, from) + require.Equal(t, types.StatusSuccessful, rs.Status) + require.Equal(t, result.GasConsumed, rs.GasConsumed) + require.Equal(t, types.ErrCodeNoError, rs.ErrorCode) + require.True(t, called) + }) + }) + }) + }) + + t.Run("test - open tracing", func(t *testing.T) { + t.Parallel() + + testutils.RunWithTestBackend(t, func(backend *testutils.TestBackend) { + testutils.RunWithTestFlowEVMRootAddress(t, backend, func(rootAddr flow.Address) { + testutils.RunWithEOATestAccount(t, backend, rootAddr, func(eoa *testutils.EOATestAccount) { + + tx := gethTypes.NewTransaction( + uint64(1), + gethCommon.Address{1, 2}, + big.NewInt(13), + uint64(0), + big.NewInt(1000), + []byte{}, + ) + + rlpTx, err := tx.MarshalBinary() + require.NoError(t, err) + + handler := SetupHandler(t, backend, rootAddr) + + backend.ExpectedSpan(t, trace.FVMEVMDryRun) + handler.DryRun(rlpTx, types.EmptyAddress) + + backend.ExpectedSpan(t, trace.FVMEVMRun) + handler.Run(rlpTx, types.EmptyAddress) + + backend.ExpectedSpan(t, trace.FVMEVMBatchRun) + handler.BatchRun([][]byte{rlpTx}, types.EmptyAddress) + + backend.ExpectedSpan(t, trace.FVMEVMDeployCOA) + coa := handler.DeployCOA(1) + + acc := handler.AccountByAddress(coa, true) + + backend.ExpectedSpan(t, trace.FVMEVMCall) + acc.Call(types.EmptyAddress, nil, 1000, types.EmptyBalance) + + backend.ExpectedSpan(t, trace.FVMEVMDeposit) + acc.Deposit(types.NewFlowTokenVault(types.EmptyBalance)) + + backend.ExpectedSpan(t, trace.FVMEVMWithdraw) + acc.Withdraw(types.EmptyBalance) + + backend.ExpectedSpan(t, trace.FVMEVMDeploy) + acc.Deploy(nil, 1, types.EmptyBalance) + }) + }) + }) + }) +} + +func TestHandler_Metrics(t *testing.T) { + t.Parallel() + + testutils.RunWithTestBackend(t, func(backend *testutils.TestBackend) { + testutils.RunWithTestFlowEVMRootAddress(t, backend, func(rootAddr flow.Address) { + testutils.RunWithEOATestAccount(t, backend, rootAddr, func(eoa *testutils.EOATestAccount) { + gasUsed := testutils.RandomGas(1000) + result := &types.Result{ + GasConsumed: gasUsed, + CumulativeGasUsed: gasUsed * 4, + DeployedContractAddress: &types.EmptyAddress, + } + em := &testutils.TestEmulator{ + RunTransactionFunc: func(tx *gethTypes.Transaction) (*types.Result, error) { + return result, nil + }, + BatchRunTransactionFunc: func(txs []*gethTypes.Transaction) ([]*types.Result, error) { + return []*types.Result{result, result}, nil + }, + DirectCallFunc: func(call *types.DirectCall) (*types.Result, error) { + return result, nil + }, + NonceOfFunc: func(address types.Address) (uint64, error) { + return 1, nil + }, + BalanceOfFunc: func(address types.Address) (*big.Int, error) { + return big.NewInt(0), nil + }, + } + handler := handler.NewContractHandler( + flow.Emulator, + rootAddr, + flowTokenAddress, + rootAddr, + handler.NewBlockStore(defaultChainID, backend, rootAddr), + handler.NewAddressAllocator(), + backend, + em, + ) + + tx := eoa.PrepareSignAndEncodeTx( + t, + gethCommon.Address{}, + nil, + nil, + 100_000, + big.NewInt(1), + ) + // run tx + called := 0 + backend.EVMTransactionExecutedFunc = func(gas uint64, isDirect, failed bool) { + require.Equal(t, result.GasConsumed, gas) + require.False(t, isDirect) + require.False(t, failed) + called += 1 + } + handler.Run(tx, types.EmptyAddress) + require.Equal(t, 1, called) + + // batch run + backend.EVMTransactionExecutedFunc = func(gas uint64, isDirect, failed bool) { + require.Equal(t, result.GasConsumed, gas) + require.False(t, isDirect) + require.False(t, failed) + called += 1 + } + handler.BatchRun([][]byte{tx, tx}, types.EmptyAddress) + require.Equal(t, 3, called) + + // Direct call + backend.EVMTransactionExecutedFunc = func(gas uint64, isDirect, failed bool) { + require.Equal(t, result.GasConsumed, gas) + require.True(t, isDirect) + require.False(t, failed) + called += 1 + } + coaCounter := 0 + backend.SetNumberOfDeployedCOAsFunc = func(count uint64) { + coaCounter = int(count) + } + handler.DeployCOA(0) + require.Equal(t, 4, called) + require.Equal(t, 1, coaCounter) + + // form block + backend.EVMBlockExecutedFunc = func(txCount int, gasUsed uint64, totalSupply float64) { + require.Equal(t, 4, txCount) + require.Equal(t, result.GasConsumed*4, gasUsed) + require.Equal(t, float64(0), totalSupply) + called += 1 + } + handler.CommitBlockProposal() + require.Equal(t, 5, called) + }) + }) + }) +} + +// returns true if error passes the checks +type checkError func(error) bool + +var isNotFatal = func(err error) bool { + return !errors.IsFailure(err) +} + +func assertPanic(t *testing.T, check checkError, f func()) { + defer func() { + r := recover() + if r == nil { + t.Fatal("The code did not panic") + } + err, ok := r.(error) + if !ok { + t.Fatal("panic is not with an error type") + } + require.True(t, check(err)) + }() + f() +} + +func SetupHandler(t testing.TB, backend types.Backend, rootAddr flow.Address) *handler.ContractHandler { + return handler.NewContractHandler( + flow.Emulator, + rootAddr, + flowTokenAddress, + rootAddr, + handler.NewBlockStore(defaultChainID, backend, rootAddr), + handler.NewAddressAllocator(), + backend, + emulator.NewEmulator(backend, rootAddr), + ) +} diff --git a/fvm/evm/handler/precompiles.go b/fvm/evm/handler/precompiles.go new file mode 100644 index 00000000000..7919749cdfc --- /dev/null +++ b/fvm/evm/handler/precompiles.go @@ -0,0 +1,136 @@ +package handler + +import ( + "encoding/binary" + "fmt" + + "github.com/onflow/cadence" + "github.com/onflow/cadence/sema" + + "github.com/onflow/flow-go/fvm/environment" + "github.com/onflow/flow-go/fvm/evm/precompiles" + "github.com/onflow/flow-go/fvm/evm/types" + "github.com/onflow/flow-go/model/flow" +) + +func preparePrecompiledContracts( + evmContractAddress flow.Address, + randomBeaconAddress flow.Address, + addressAllocator types.AddressAllocator, + backend types.Backend, +) []types.PrecompiledContract { + archAddress := addressAllocator.AllocatePrecompileAddress(1) + archContract := precompiles.ArchContract( + archAddress, + blockHeightProvider(backend), + coaOwnershipProofValidator(evmContractAddress, backend), + randomSourceProvider(randomBeaconAddress, backend), + revertibleRandomGenerator(backend), + ) + return []types.PrecompiledContract{archContract} +} + +func blockHeightProvider(backend types.Backend) func() (uint64, error) { + return func() (uint64, error) { + h, err := backend.GetCurrentBlockHeight() + if types.IsAFatalError(err) || types.IsABackendError(err) { + panic(err) + } + return h, err + } +} + +const RandomSourceTypeValueFieldName = "value" + +func randomSourceProvider(contractAddress flow.Address, backend types.Backend) func(uint64) ([]byte, error) { + return func(blockHeight uint64) ([]byte, error) { + value, err := backend.Invoke( + environment.ContractFunctionSpec{ + AddressFromChain: func(_ flow.Chain) flow.Address { + return contractAddress + }, + LocationName: "RandomBeaconHistory", + FunctionName: "sourceOfRandomness", + ArgumentTypes: []sema.Type{ + sema.UInt64Type, + }, + }, + []cadence.Value{ + cadence.NewUInt64(blockHeight), + }, + ) + if err != nil { + if types.IsAFatalError(err) || types.IsABackendError(err) { + panic(err) + } + return nil, err + } + + data, ok := value.(cadence.Struct) + if !ok { + return nil, fmt.Errorf("invalid output data received from getRandomSource") + } + + cadenceArray := cadence.SearchFieldByName(data, RandomSourceTypeValueFieldName).(cadence.Array) + source := make([]byte, environment.RandomSourceHistoryLength) + for i := range source { + source[i] = byte(cadenceArray.Values[i].(cadence.UInt8)) + } + + return source, nil + } +} + +func revertibleRandomGenerator(backend types.Backend) func() (uint64, error) { + return func() (uint64, error) { + rand := make([]byte, 8) + err := backend.ReadRandom(rand) + if err != nil { + return 0, err + } + + return binary.BigEndian.Uint64(rand), nil + } +} + +const ValidationResultTypeIsValidFieldName = "isValid" + +func coaOwnershipProofValidator(contractAddress flow.Address, backend types.Backend) func(proof *types.COAOwnershipProofInContext) (bool, error) { + return func(proof *types.COAOwnershipProofInContext) (bool, error) { + value, err := backend.Invoke( + environment.ContractFunctionSpec{ + AddressFromChain: func(_ flow.Chain) flow.Address { + return contractAddress + }, + LocationName: "EVM", + FunctionName: "validateCOAOwnershipProof", + ArgumentTypes: []sema.Type{ + types.FlowAddressSemaType, + types.PublicPathSemaType, + types.SignedDataSemaType, + types.KeyIndicesSemaType, + types.SignaturesSemaType, + types.AddressBytesSemaType, + }, + }, + proof.ToCadenceValues(), + ) + if err != nil { + if types.IsAFatalError(err) || types.IsABackendError(err) { + panic(err) + } + return false, err + } + data, ok := value.(cadence.Struct) + if !ok { + return false, fmt.Errorf("invalid output data received from validateCOAOwnershipProof") + } + + isValidValue := cadence.SearchFieldByName(data, ValidationResultTypeIsValidFieldName) + if isValidValue == nil { + return false, fmt.Errorf("invalid output data received from validateCOAOwnershipProof") + } + + return bool(isValidValue.(cadence.Bool)), nil + } +} diff --git a/fvm/evm/impl/abi.go b/fvm/evm/impl/abi.go new file mode 100644 index 00000000000..3fb0e794dbc --- /dev/null +++ b/fvm/evm/impl/abi.go @@ -0,0 +1,1085 @@ +package impl + +import ( + "math" + "math/big" + "reflect" + "strings" + + gethABI "github.com/ethereum/go-ethereum/accounts/abi" + gethCommon "github.com/ethereum/go-ethereum/common" + "github.com/onflow/cadence/common" + "github.com/onflow/cadence/errors" + "github.com/onflow/cadence/interpreter" + "github.com/onflow/cadence/sema" + + "github.com/onflow/flow-go/fvm/environment" + "github.com/onflow/flow-go/fvm/evm/stdlib" + "github.com/onflow/flow-go/fvm/evm/types" +) + +const abiEncodingByteSize = 32 + +// abiEncodingError +type abiEncodingError struct { + Type interpreter.StaticType + Message string +} + +var _ errors.UserError = abiEncodingError{} + +func (abiEncodingError) IsUserError() {} + +func (e abiEncodingError) Error() string { + var b strings.Builder + b.WriteString("failed to ABI encode value") + + ty := e.Type + if ty != nil { + b.WriteString(" of type ") + b.WriteString(ty.String()) + } + + message := e.Message + if message != "" { + b.WriteString(": ") + b.WriteString(message) + } + + return b.String() +} + +// abiDecodingError +type abiDecodingError struct { + Type interpreter.StaticType + Message string +} + +var _ errors.UserError = abiDecodingError{} + +func (abiDecodingError) IsUserError() {} + +func (e abiDecodingError) Error() string { + var b strings.Builder + b.WriteString("failed to ABI decode data") + + ty := e.Type + if ty != nil { + b.WriteString(" with type ") + b.WriteString(ty.String()) + } + + message := e.Message + if message != "" { + b.WriteString(": ") + b.WriteString(message) + } + + return b.String() +} + +type evmSpecialTypeIDs struct { + AddressTypeID common.TypeID + BytesTypeID common.TypeID + Bytes4TypeID common.TypeID + Bytes32TypeID common.TypeID +} + +func NewEVMSpecialTypeIDs( + gauge common.MemoryGauge, + location common.AddressLocation, +) evmSpecialTypeIDs { + return evmSpecialTypeIDs{ + AddressTypeID: location.TypeID(gauge, stdlib.EVMAddressTypeQualifiedIdentifier), + BytesTypeID: location.TypeID(gauge, stdlib.EVMBytesTypeQualifiedIdentifier), + Bytes4TypeID: location.TypeID(gauge, stdlib.EVMBytes4TypeQualifiedIdentifier), + Bytes32TypeID: location.TypeID(gauge, stdlib.EVMBytes32TypeQualifiedIdentifier), + } +} + +type abiEncodingContext interface { + interpreter.MemberAccessibleContext + interpreter.ValueTransferContext +} + +func reportABIEncodingComputation( + context abiEncodingContext, + locationRange interpreter.LocationRange, + values *interpreter.ArrayValue, + evmTypeIDs evmSpecialTypeIDs, + reportComputation func(intensity uint64), +) { + + values.Iterate( + context, + func(element interpreter.Value) (resume bool) { + switch value := element.(type) { + case *interpreter.StringValue: + // Dynamic variables, such as strings, are encoded + // in 2+ chunks of 32 bytes. The first chunk contains + // the index where information for the string begin, + // the second chunk contains the number of bytes the + // string occupies, and the third chunk contains the + // value of the string itself. + computation := uint64(2 * abiEncodingByteSize) + stringLength := len(value.Str) + chunks := math.Ceil(float64(stringLength) / float64(abiEncodingByteSize)) + computation += uint64(chunks * abiEncodingByteSize) + reportComputation(computation) + + case interpreter.BoolValue, + interpreter.UIntValue, + interpreter.UInt8Value, + interpreter.UInt16Value, + interpreter.UInt32Value, + interpreter.UInt64Value, + interpreter.UInt128Value, + interpreter.UInt256Value, + interpreter.IntValue, + interpreter.Int8Value, + interpreter.Int16Value, + interpreter.Int32Value, + interpreter.Int64Value, + interpreter.Int128Value, + interpreter.Int256Value: + + // Numeric and bool variables are also static variables + // with a fixed size of 32 bytes. + reportComputation(abiEncodingByteSize) + + case *interpreter.CompositeValue: + switch value.TypeID() { + case evmTypeIDs.AddressTypeID: + // EVM addresses are static variables with a fixed + // size of 32 bytes. + reportComputation(abiEncodingByteSize) + + case evmTypeIDs.BytesTypeID: + computation := uint64(2 * abiEncodingByteSize) + valueMember := value.GetMember(context, locationRange, stdlib.EVMBytesTypeValueFieldName) + bytesArray, ok := valueMember.(*interpreter.ArrayValue) + if !ok { + panic(abiEncodingError{ + Type: value.StaticType(context), + Message: "could not convert value field to array", + }) + } + bytesLength := bytesArray.Count() + chunks := math.Ceil(float64(bytesLength) / float64(abiEncodingByteSize)) + computation += uint64(chunks * abiEncodingByteSize) + reportComputation(computation) + + case evmTypeIDs.Bytes4TypeID: + reportComputation(abiEncodingByteSize) + + case evmTypeIDs.Bytes32TypeID: + reportComputation(abiEncodingByteSize) + + default: + panic(abiEncodingError{ + Type: value.StaticType(context), + }) + } + + case *interpreter.ArrayValue: + // Dynamic variables, such as arrays & slices, are encoded + // in 2+ chunks of 32 bytes. The first chunk contains + // the index where information for the array begin, + // the second chunk contains the number of bytes the + // array occupies, and the third chunk contains the + // values of the array itself. + computation := uint64(2 * abiEncodingByteSize) + reportComputation(computation) + reportABIEncodingComputation( + context, + locationRange, + value, + evmTypeIDs, + reportComputation, + ) + + default: + panic(abiEncodingError{ + Type: element.StaticType(context), + }) + } + + // continue iteration + return true + }, + false, + locationRange, + ) +} + +func newInternalEVMTypeEncodeABIFunction( + gauge common.MemoryGauge, + location common.AddressLocation, +) *interpreter.HostFunctionValue { + + evmSpecialTypeIDs := NewEVMSpecialTypeIDs(gauge, location) + + return interpreter.NewStaticHostFunctionValue( + gauge, + stdlib.InternalEVMTypeEncodeABIFunctionType, + func(invocation interpreter.Invocation) interpreter.Value { + context := invocation.InvocationContext + locationRange := invocation.LocationRange + + // Get `values` argument + + valuesArray, ok := invocation.Arguments[0].(*interpreter.ArrayValue) + if !ok { + panic(errors.NewUnreachableError()) + } + + reportABIEncodingComputation( + context, + locationRange, + valuesArray, + evmSpecialTypeIDs, + func(intensity uint64) { + common.UseComputation( + context, + common.ComputationUsage{ + Kind: environment.ComputationKindEVMEncodeABI, + Intensity: intensity, + }, + ) + }, + ) + + size := valuesArray.Count() + + values := make([]any, 0, size) + arguments := make(gethABI.Arguments, 0, size) + + valuesArray.Iterate( + context, + func(element interpreter.Value) (resume bool) { + value, ty, err := encodeABI( + context, + locationRange, + element, + element.StaticType(context), + evmSpecialTypeIDs, + ) + if err != nil { + panic(err) + } + + values = append(values, value) + arguments = append(arguments, gethABI.Argument{Type: ty}) + + // continue iteration + return true + }, + false, + locationRange, + ) + + encodedValues, err := arguments.Pack(values...) + if err != nil { + panic( + abiEncodingError{ + Message: err.Error(), + }, + ) + } + + return interpreter.ByteSliceToByteArrayValue(context, encodedValues) + }, + ) +} + +var gethTypeString = gethABI.Type{T: gethABI.StringTy} + +var gethTypeBool = gethABI.Type{T: gethABI.BoolTy} + +var gethTypeUint = gethABI.Type{T: gethABI.UintTy, Size: 256} + +var gethTypeUint8 = gethABI.Type{T: gethABI.UintTy, Size: 8} + +var gethTypeUint16 = gethABI.Type{T: gethABI.UintTy, Size: 16} + +var gethTypeUint32 = gethABI.Type{T: gethABI.UintTy, Size: 32} + +var gethTypeUint64 = gethABI.Type{T: gethABI.UintTy, Size: 64} + +var gethTypeUint128 = gethABI.Type{T: gethABI.UintTy, Size: 128} + +var gethTypeUint256 = gethABI.Type{T: gethABI.UintTy, Size: 256} + +var gethTypeInt = gethABI.Type{T: gethABI.IntTy, Size: 256} + +var gethTypeInt8 = gethABI.Type{T: gethABI.IntTy, Size: 8} + +var gethTypeInt16 = gethABI.Type{T: gethABI.IntTy, Size: 16} + +var gethTypeInt32 = gethABI.Type{T: gethABI.IntTy, Size: 32} + +var gethTypeInt64 = gethABI.Type{T: gethABI.IntTy, Size: 64} + +var gethTypeInt128 = gethABI.Type{T: gethABI.IntTy, Size: 128} + +var gethTypeInt256 = gethABI.Type{T: gethABI.IntTy, Size: 256} + +var gethTypeAddress = gethABI.Type{T: gethABI.AddressTy, Size: 20} + +var gethTypeBytes = gethABI.Type{T: gethABI.BytesTy} + +var gethTypeBytes4 = gethABI.Type{T: gethABI.FixedBytesTy, Size: 4} + +var gethTypeBytes32 = gethABI.Type{T: gethABI.FixedBytesTy, Size: 32} + +func gethABIType( + staticType interpreter.StaticType, + evmTypeIDs evmSpecialTypeIDs, +) (gethABI.Type, bool) { + switch staticType { + case interpreter.PrimitiveStaticTypeString: + return gethTypeString, true + case interpreter.PrimitiveStaticTypeBool: + return gethTypeBool, true + case interpreter.PrimitiveStaticTypeUInt: + return gethTypeUint, true + case interpreter.PrimitiveStaticTypeUInt8: + return gethTypeUint8, true + case interpreter.PrimitiveStaticTypeUInt16: + return gethTypeUint16, true + case interpreter.PrimitiveStaticTypeUInt32: + return gethTypeUint32, true + case interpreter.PrimitiveStaticTypeUInt64: + return gethTypeUint64, true + case interpreter.PrimitiveStaticTypeUInt128: + return gethTypeUint128, true + case interpreter.PrimitiveStaticTypeUInt256: + return gethTypeUint256, true + case interpreter.PrimitiveStaticTypeInt: + return gethTypeInt, true + case interpreter.PrimitiveStaticTypeInt8: + return gethTypeInt8, true + case interpreter.PrimitiveStaticTypeInt16: + return gethTypeInt16, true + case interpreter.PrimitiveStaticTypeInt32: + return gethTypeInt32, true + case interpreter.PrimitiveStaticTypeInt64: + return gethTypeInt64, true + case interpreter.PrimitiveStaticTypeInt128: + return gethTypeInt128, true + case interpreter.PrimitiveStaticTypeInt256: + return gethTypeInt256, true + case interpreter.PrimitiveStaticTypeAddress: + return gethTypeAddress, true + } + + switch staticType := staticType.(type) { + case *interpreter.CompositeStaticType: + switch staticType.TypeID { + case evmTypeIDs.AddressTypeID: + return gethTypeAddress, true + case evmTypeIDs.BytesTypeID: + return gethTypeBytes, true + case evmTypeIDs.Bytes4TypeID: + return gethTypeBytes4, true + case evmTypeIDs.Bytes32TypeID: + return gethTypeBytes32, true + } + + case *interpreter.ConstantSizedStaticType: + elementGethABIType, ok := gethABIType( + staticType.ElementType(), + evmTypeIDs, + ) + if !ok { + break + } + + return gethABI.Type{ + T: gethABI.ArrayTy, + Elem: &elementGethABIType, + Size: int(staticType.Size), + }, true + + case *interpreter.VariableSizedStaticType: + elementGethABIType, ok := gethABIType( + staticType.ElementType(), + evmTypeIDs, + ) + if !ok { + break + } + + return gethABI.Type{ + T: gethABI.SliceTy, + Elem: &elementGethABIType, + }, true + + } + + return gethABI.Type{}, false +} + +func goType( + staticType interpreter.StaticType, + evmTypeIDs evmSpecialTypeIDs, +) (reflect.Type, bool) { + switch staticType { + case interpreter.PrimitiveStaticTypeString: + return reflect.TypeOf(""), true + case interpreter.PrimitiveStaticTypeBool: + return reflect.TypeOf(true), true + case interpreter.PrimitiveStaticTypeUInt: + return reflect.TypeOf((*big.Int)(nil)), true + case interpreter.PrimitiveStaticTypeUInt8: + return reflect.TypeOf(uint8(0)), true + case interpreter.PrimitiveStaticTypeUInt16: + return reflect.TypeOf(uint16(0)), true + case interpreter.PrimitiveStaticTypeUInt32: + return reflect.TypeOf(uint32(0)), true + case interpreter.PrimitiveStaticTypeUInt64: + return reflect.TypeOf(uint64(0)), true + case interpreter.PrimitiveStaticTypeUInt128: + return reflect.TypeOf((*big.Int)(nil)), true + case interpreter.PrimitiveStaticTypeUInt256: + return reflect.TypeOf((*big.Int)(nil)), true + case interpreter.PrimitiveStaticTypeInt: + return reflect.TypeOf((*big.Int)(nil)), true + case interpreter.PrimitiveStaticTypeInt8: + return reflect.TypeOf(int8(0)), true + case interpreter.PrimitiveStaticTypeInt16: + return reflect.TypeOf(int16(0)), true + case interpreter.PrimitiveStaticTypeInt32: + return reflect.TypeOf(int32(0)), true + case interpreter.PrimitiveStaticTypeInt64: + return reflect.TypeOf(int64(0)), true + case interpreter.PrimitiveStaticTypeInt128: + return reflect.TypeOf((*big.Int)(nil)), true + case interpreter.PrimitiveStaticTypeInt256: + return reflect.TypeOf((*big.Int)(nil)), true + case interpreter.PrimitiveStaticTypeAddress: + return reflect.TypeOf((*big.Int)(nil)), true + } + + switch staticType := staticType.(type) { + case *interpreter.ConstantSizedStaticType: + elementType, ok := goType(staticType.ElementType(), evmTypeIDs) + if !ok { + break + } + + return reflect.ArrayOf(int(staticType.Size), elementType), true + + case *interpreter.VariableSizedStaticType: + elementType, ok := goType(staticType.ElementType(), evmTypeIDs) + if !ok { + break + } + + return reflect.SliceOf(elementType), true + } + + switch staticType.ID() { + case evmTypeIDs.AddressTypeID: + return reflect.TypeOf(gethCommon.Address{}), true + case evmTypeIDs.BytesTypeID: + return reflect.SliceOf(reflect.TypeOf(byte(0))), true + case evmTypeIDs.Bytes4TypeID: + return reflect.ArrayOf(stdlib.EVMBytes4Length, reflect.TypeOf(byte(0))), true + case evmTypeIDs.Bytes32TypeID: + return reflect.ArrayOf(stdlib.EVMBytes32Length, reflect.TypeOf(byte(0))), true + } + + return nil, false +} + +func encodeABI( + context abiEncodingContext, + locationRange interpreter.LocationRange, + value interpreter.Value, + staticType interpreter.StaticType, + evmTypeIDs evmSpecialTypeIDs, +) ( + any, + gethABI.Type, + error, +) { + + switch value := value.(type) { + case *interpreter.StringValue: + if staticType == interpreter.PrimitiveStaticTypeString { + return value.Str, gethTypeString, nil + } + + case interpreter.BoolValue: + if staticType == interpreter.PrimitiveStaticTypeBool { + return bool(value), gethTypeBool, nil + } + + case interpreter.UIntValue: + if staticType == interpreter.PrimitiveStaticTypeUInt { + if value.BigInt.Cmp(sema.UInt256TypeMaxIntBig) > 0 || value.BigInt.Cmp(sema.UInt256TypeMinIntBig) < 0 { + return nil, gethABI.Type{}, abiEncodingError{ + Type: value.StaticType(context), + Message: "value outside the boundaries of uint256", + } + } + return value.BigInt, gethTypeUint, nil + } + + case interpreter.UInt8Value: + if staticType == interpreter.PrimitiveStaticTypeUInt8 { + return uint8(value), gethTypeUint8, nil + } + + case interpreter.UInt16Value: + if staticType == interpreter.PrimitiveStaticTypeUInt16 { + return uint16(value), gethTypeUint16, nil + } + + case interpreter.UInt32Value: + if staticType == interpreter.PrimitiveStaticTypeUInt32 { + return uint32(value), gethTypeUint32, nil + } + + case interpreter.UInt64Value: + if staticType == interpreter.PrimitiveStaticTypeUInt64 { + return uint64(value), gethTypeUint64, nil + } + + case interpreter.UInt128Value: + if staticType == interpreter.PrimitiveStaticTypeUInt128 { + return value.BigInt, gethTypeUint128, nil + } + + case interpreter.UInt256Value: + if staticType == interpreter.PrimitiveStaticTypeUInt256 { + return value.BigInt, gethTypeUint256, nil + } + + case interpreter.IntValue: + if staticType == interpreter.PrimitiveStaticTypeInt { + if value.BigInt.Cmp(sema.Int256TypeMaxIntBig) > 0 || value.BigInt.Cmp(sema.Int256TypeMinIntBig) < 0 { + return nil, gethABI.Type{}, abiEncodingError{ + Type: value.StaticType(context), + Message: "value outside the boundaries of int256", + } + } + return value.BigInt, gethTypeInt, nil + } + + case interpreter.Int8Value: + if staticType == interpreter.PrimitiveStaticTypeInt8 { + return int8(value), gethTypeInt8, nil + } + + case interpreter.Int16Value: + if staticType == interpreter.PrimitiveStaticTypeInt16 { + return int16(value), gethTypeInt16, nil + } + + case interpreter.Int32Value: + if staticType == interpreter.PrimitiveStaticTypeInt32 { + return int32(value), gethTypeInt32, nil + } + + case interpreter.Int64Value: + if staticType == interpreter.PrimitiveStaticTypeInt64 { + return int64(value), gethTypeInt64, nil + } + + case interpreter.Int128Value: + if staticType == interpreter.PrimitiveStaticTypeInt128 { + return value.BigInt, gethTypeInt128, nil + } + + case interpreter.Int256Value: + if staticType == interpreter.PrimitiveStaticTypeInt256 { + return value.BigInt, gethTypeInt256, nil + } + + case *interpreter.CompositeValue: + switch value.TypeID() { + case evmTypeIDs.AddressTypeID: + addressBytesArrayValue := value.GetMember(context, locationRange, stdlib.EVMAddressTypeBytesFieldName) + bytes, err := interpreter.ByteArrayValueToByteSlice( + context, + addressBytesArrayValue, + locationRange, + ) + if err != nil { + panic(err) + } + return gethCommon.Address(bytes), gethTypeAddress, nil + + case evmTypeIDs.BytesTypeID: + bytesValue := value.GetMember(context, locationRange, stdlib.EVMBytesTypeValueFieldName) + bytes, err := interpreter.ByteArrayValueToByteSlice( + context, + bytesValue, + locationRange, + ) + if err != nil { + panic(err) + } + return bytes, gethTypeBytes, nil + + case evmTypeIDs.Bytes4TypeID: + bytesValue := value.GetMember(context, locationRange, stdlib.EVMBytesTypeValueFieldName) + bytes, err := interpreter.ByteArrayValueToByteSlice( + context, + bytesValue, + locationRange, + ) + if err != nil { + panic(err) + } + return [stdlib.EVMBytes4Length]byte(bytes), gethTypeBytes4, nil + + case evmTypeIDs.Bytes32TypeID: + bytesValue := value.GetMember(context, locationRange, stdlib.EVMBytesTypeValueFieldName) + bytes, err := interpreter.ByteArrayValueToByteSlice( + context, + bytesValue, + locationRange, + ) + if err != nil { + panic(err) + } + return [stdlib.EVMBytes32Length]byte(bytes), gethTypeBytes32, nil + } + + case *interpreter.ArrayValue: + arrayStaticType := value.Type + + arrayGethABIType, ok := gethABIType(arrayStaticType, evmTypeIDs) + if !ok { + break + } + + elementStaticType := arrayStaticType.ElementType() + + elementGoType, ok := goType(elementStaticType, evmTypeIDs) + if !ok { + break + } + + var result reflect.Value + + switch arrayStaticType := arrayStaticType.(type) { + case *interpreter.ConstantSizedStaticType: + size := int(arrayStaticType.Size) + result = reflect.Indirect(reflect.New(reflect.ArrayOf(size, elementGoType))) + + case *interpreter.VariableSizedStaticType: + size := value.Count() + result = reflect.MakeSlice(reflect.SliceOf(elementGoType), size, size) + } + + var index int + value.Iterate( + context, + func(element interpreter.Value) (resume bool) { + + arrayElement, _, err := encodeABI( + context, + locationRange, + element, + element.StaticType(context), + evmTypeIDs, + ) + if err != nil { + panic(err) + } + + result.Index(index).Set(reflect.ValueOf(arrayElement)) + + index++ + + // continue iteration + return true + }, + false, + locationRange, + ) + + return result.Interface(), arrayGethABIType, nil + } + + return nil, gethABI.Type{}, abiEncodingError{ + Type: value.StaticType(context), + } +} +func newInternalEVMTypeDecodeABIFunction( + gauge common.MemoryGauge, + location common.AddressLocation, +) *interpreter.HostFunctionValue { + evmSpecialTypeIDs := NewEVMSpecialTypeIDs(gauge, location) + + return interpreter.NewStaticHostFunctionValue( + gauge, + stdlib.InternalEVMTypeDecodeABIFunctionType, + func(invocation interpreter.Invocation) interpreter.Value { + context := invocation.InvocationContext + locationRange := invocation.LocationRange + + // Get `types` argument + + typesArray, ok := invocation.Arguments[0].(*interpreter.ArrayValue) + if !ok { + panic(errors.NewUnreachableError()) + } + + // Get `data` argument + + dataValue, ok := invocation.Arguments[1].(*interpreter.ArrayValue) + if !ok { + panic(errors.NewUnreachableError()) + } + + common.UseComputation( + context, + common.ComputationUsage{ + Kind: environment.ComputationKindEVMDecodeABI, + Intensity: uint64(dataValue.Count()), + }, + ) + + data, err := interpreter.ByteArrayValueToByteSlice(context, dataValue, locationRange) + if err != nil { + panic(err) + } + + var arguments gethABI.Arguments + typesArray.Iterate( + context, + func(element interpreter.Value) (resume bool) { + typeValue, ok := element.(interpreter.TypeValue) + if !ok { + panic(errors.NewUnreachableError()) + } + + staticType := typeValue.Type + + gethABITy, ok := gethABIType(staticType, evmSpecialTypeIDs) + if !ok { + panic(abiDecodingError{ + Type: staticType, + }) + } + + arguments = append( + arguments, + gethABI.Argument{ + Type: gethABITy, + }, + ) + + // continue iteration + return true + }, + false, + locationRange, + ) + + decodedValues, err := arguments.Unpack(data) + if err != nil { + panic(abiDecodingError{}) + } + + var index int + values := make([]interpreter.Value, 0, len(decodedValues)) + + typesArray.Iterate( + context, + func(element interpreter.Value) (resume bool) { + typeValue, ok := element.(interpreter.TypeValue) + if !ok { + panic(errors.NewUnreachableError()) + } + + staticType := typeValue.Type + + value, err := decodeABI( + context, + locationRange, + decodedValues[index], + staticType, + location, + evmSpecialTypeIDs, + ) + if err != nil { + panic(err) + } + + index++ + + values = append(values, value) + + // continue iteration + return true + }, + false, + locationRange, + ) + + arrayType := interpreter.NewVariableSizedStaticType( + context, + interpreter.NewPrimitiveStaticType( + context, + interpreter.PrimitiveStaticTypeAnyStruct, + ), + ) + + return interpreter.NewArrayValue( + context, + locationRange, + arrayType, + common.ZeroAddress, + values..., + ) + }, + ) +} + +type memberAccessibleArrayCreationContext interface { + interpreter.MemberAccessibleContext + interpreter.ArrayCreationContext +} + +func decodeABI( + context memberAccessibleArrayCreationContext, + locationRange interpreter.LocationRange, + value any, + staticType interpreter.StaticType, + location common.AddressLocation, + evmTypeIDs evmSpecialTypeIDs, +) ( + interpreter.Value, + error, +) { + + switch staticType { + case interpreter.PrimitiveStaticTypeString: + value, ok := value.(string) + if !ok { + break + } + return interpreter.NewStringValue( + context, + common.NewStringMemoryUsage(len(value)), + func() string { + return value + }, + ), nil + + case interpreter.PrimitiveStaticTypeBool: + value, ok := value.(bool) + if !ok { + break + } + return interpreter.BoolValue(value), nil + + case interpreter.PrimitiveStaticTypeUInt: + value, ok := value.(*big.Int) + if !ok { + break + } + memoryUsage := common.NewBigIntMemoryUsage( + common.BigIntByteLength(value), + ) + return interpreter.NewUIntValueFromBigInt(context, memoryUsage, func() *big.Int { return value }), nil + + case interpreter.PrimitiveStaticTypeUInt8: + value, ok := value.(uint8) + if !ok { + break + } + return interpreter.NewUInt8Value(context, func() uint8 { return value }), nil + + case interpreter.PrimitiveStaticTypeUInt16: + value, ok := value.(uint16) + if !ok { + break + } + return interpreter.NewUInt16Value(context, func() uint16 { return value }), nil + + case interpreter.PrimitiveStaticTypeUInt32: + value, ok := value.(uint32) + if !ok { + break + } + return interpreter.NewUInt32Value(context, func() uint32 { return value }), nil + + case interpreter.PrimitiveStaticTypeUInt64: + value, ok := value.(uint64) + if !ok { + break + } + return interpreter.NewUInt64Value(context, func() uint64 { return value }), nil + + case interpreter.PrimitiveStaticTypeUInt128: + value, ok := value.(*big.Int) + if !ok { + break + } + return interpreter.NewUInt128ValueFromBigInt(context, func() *big.Int { return value }), nil + + case interpreter.PrimitiveStaticTypeUInt256: + value, ok := value.(*big.Int) + if !ok { + break + } + return interpreter.NewUInt256ValueFromBigInt(context, func() *big.Int { return value }), nil + + case interpreter.PrimitiveStaticTypeInt: + value, ok := value.(*big.Int) + if !ok { + break + } + memoryUsage := common.NewBigIntMemoryUsage( + common.BigIntByteLength(value), + ) + return interpreter.NewIntValueFromBigInt(context, memoryUsage, func() *big.Int { return value }), nil + + case interpreter.PrimitiveStaticTypeInt8: + value, ok := value.(int8) + if !ok { + break + } + return interpreter.NewInt8Value(context, func() int8 { return value }), nil + + case interpreter.PrimitiveStaticTypeInt16: + value, ok := value.(int16) + if !ok { + break + } + return interpreter.NewInt16Value(context, func() int16 { return value }), nil + + case interpreter.PrimitiveStaticTypeInt32: + value, ok := value.(int32) + if !ok { + break + } + return interpreter.NewInt32Value(context, func() int32 { return value }), nil + + case interpreter.PrimitiveStaticTypeInt64: + value, ok := value.(int64) + if !ok { + break + } + return interpreter.NewInt64Value(context, func() int64 { return value }), nil + + case interpreter.PrimitiveStaticTypeInt128: + value, ok := value.(*big.Int) + if !ok { + break + } + return interpreter.NewInt128ValueFromBigInt(context, func() *big.Int { return value }), nil + + case interpreter.PrimitiveStaticTypeInt256: + value, ok := value.(*big.Int) + if !ok { + break + } + return interpreter.NewInt256ValueFromBigInt(context, func() *big.Int { return value }), nil + } + + switch staticType := staticType.(type) { + case interpreter.ArrayStaticType: + array := reflect.ValueOf(value) + + elementStaticType := staticType.ElementType() + + size := array.Len() + + var index int + return interpreter.NewArrayValueWithIterator( + context, + staticType, + common.ZeroAddress, + uint64(size), + func() interpreter.Value { + if index >= size { + return nil + } + + element := array.Index(index).Interface() + + result, err := decodeABI( + context, + locationRange, + element, + elementStaticType, + location, + evmTypeIDs, + ) + if err != nil { + panic(err) + } + + index++ + + return result + }, + ), nil + + case *interpreter.CompositeStaticType: + switch staticType.TypeID { + case evmTypeIDs.AddressTypeID: + addr, ok := value.(gethCommon.Address) + if !ok { + break + } + + var address types.Address + copy(address[:], addr.Bytes()) + return NewEVMAddress( + context, + locationRange, + location, + address, + ), nil + + case evmTypeIDs.BytesTypeID: + bytes, ok := value.([]byte) + if !ok { + break + } + return NewEVMBytes( + context, + locationRange, + location, + bytes, + ), nil + + case evmTypeIDs.Bytes4TypeID: + bytes, ok := value.([stdlib.EVMBytes4Length]byte) + if !ok { + break + } + return NewEVMBytes4( + context, + locationRange, + location, + bytes, + ), nil + + case evmTypeIDs.Bytes32TypeID: + bytes, ok := value.([stdlib.EVMBytes32Length]byte) + if !ok { + break + } + return NewEVMBytes32( + context, + locationRange, + location, + bytes, + ), nil + } + } + + return nil, abiDecodingError{ + Type: staticType, + } +} diff --git a/fvm/evm/impl/impl.go b/fvm/evm/impl/impl.go new file mode 100644 index 00000000000..c588a49e5de --- /dev/null +++ b/fvm/evm/impl/impl.go @@ -0,0 +1,1264 @@ +package impl + +import ( + "fmt" + "math/big" + + "github.com/onflow/cadence" + "github.com/onflow/cadence/common" + "github.com/onflow/cadence/errors" + "github.com/onflow/cadence/interpreter" + "github.com/onflow/cadence/sema" + + "github.com/onflow/flow-go/fvm/evm/stdlib" + "github.com/onflow/flow-go/fvm/evm/types" + "github.com/onflow/flow-go/model/flow" + + gethTypes "github.com/ethereum/go-ethereum/core/types" +) + +var internalEVMContractStaticType = interpreter.ConvertSemaCompositeTypeToStaticCompositeType( + nil, + stdlib.InternalEVMContractType, +) + +func NewInternalEVMContractValue( + gauge common.MemoryGauge, + handler types.ContractHandler, + contractAddress flow.Address, +) *interpreter.SimpleCompositeValue { + location := common.NewAddressLocation(nil, common.Address(contractAddress), stdlib.ContractName) + + return interpreter.NewSimpleCompositeValue( + gauge, + stdlib.InternalEVMContractType.ID(), + internalEVMContractStaticType, + stdlib.InternalEVMContractType.Fields, + map[string]interpreter.Value{ + stdlib.InternalEVMTypeRunFunctionName: newInternalEVMTypeRunFunction(gauge, handler), + stdlib.InternalEVMTypeBatchRunFunctionName: newInternalEVMTypeBatchRunFunction(gauge, handler), + stdlib.InternalEVMTypeCreateCadenceOwnedAccountFunctionName: newInternalEVMTypeCreateCadenceOwnedAccountFunction(gauge, handler), + stdlib.InternalEVMTypeCallFunctionName: newInternalEVMTypeCallFunction(gauge, handler), + stdlib.InternalEVMTypeDepositFunctionName: newInternalEVMTypeDepositFunction(gauge, handler), + stdlib.InternalEVMTypeWithdrawFunctionName: newInternalEVMTypeWithdrawFunction(gauge, handler), + stdlib.InternalEVMTypeDeployFunctionName: newInternalEVMTypeDeployFunction(gauge, handler), + stdlib.InternalEVMTypeBalanceFunctionName: newInternalEVMTypeBalanceFunction(gauge, handler), + stdlib.InternalEVMTypeNonceFunctionName: newInternalEVMTypeNonceFunction(gauge, handler), + stdlib.InternalEVMTypeCodeFunctionName: newInternalEVMTypeCodeFunction(gauge, handler), + stdlib.InternalEVMTypeCodeHashFunctionName: newInternalEVMTypeCodeHashFunction(gauge, handler), + stdlib.InternalEVMTypeEncodeABIFunctionName: newInternalEVMTypeEncodeABIFunction(gauge, location), + stdlib.InternalEVMTypeDecodeABIFunctionName: newInternalEVMTypeDecodeABIFunction(gauge, location), + stdlib.InternalEVMTypeCastToAttoFLOWFunctionName: newInternalEVMTypeCastToAttoFLOWFunction(gauge), + stdlib.InternalEVMTypeCastToFLOWFunctionName: newInternalEVMTypeCastToFLOWFunction(gauge), + stdlib.InternalEVMTypeGetLatestBlockFunctionName: newInternalEVMTypeGetLatestBlockFunction(gauge, handler), + stdlib.InternalEVMTypeDryRunFunctionName: newInternalEVMTypeDryRunFunction(gauge, handler), + stdlib.InternalEVMTypeDryCallFunctionName: newInternalEVMTypeDryCallFunction(gauge, handler), + stdlib.InternalEVMTypeCommitBlockProposalFunctionName: newInternalEVMTypeCommitBlockProposalFunction(gauge, handler), + }, + nil, + nil, + nil, + nil, + ) +} + +func newInternalEVMTypeGetLatestBlockFunction( + gauge common.MemoryGauge, + handler types.ContractHandler, +) *interpreter.HostFunctionValue { + return interpreter.NewStaticHostFunctionValue( + gauge, + stdlib.InternalEVMTypeGetLatestBlockFunctionType, + func(invocation interpreter.Invocation) interpreter.Value { + context := invocation.InvocationContext + locationRange := invocation.LocationRange + + latestBlock := handler.LastExecutedBlock() + + return NewEVMBlockValue( + handler, + gauge, + context, + locationRange, + latestBlock, + ) + }, + ) +} + +func NewEVMBlockValue( + handler types.ContractHandler, + gauge common.MemoryGauge, + context interpreter.MemberAccessibleContext, + locationRange interpreter.LocationRange, + block *types.Block, +) *interpreter.CompositeValue { + loc := common.NewAddressLocation(gauge, handler.EVMContractAddress(), stdlib.ContractName) + hash, err := block.Hash() + if err != nil { + panic(err) + } + + return interpreter.NewCompositeValue( + context, + locationRange, + loc, + stdlib.EVMBlockTypeQualifiedIdentifier, + common.CompositeKindStructure, + []interpreter.CompositeField{ + { + Name: "height", + Value: interpreter.UInt64Value(block.Height), + }, + { + Name: "hash", + Value: interpreter.NewStringValue( + context, + common.NewStringMemoryUsage(len(hash)), + func() string { + return hash.Hex() + }, + ), + }, + { + Name: "totalSupply", + Value: interpreter.NewIntValueFromBigInt( + context, + common.NewBigIntMemoryUsage(common.BigIntByteLength(block.TotalSupply)), + func() *big.Int { + return block.TotalSupply + }, + ), + }, + { + Name: "timestamp", + Value: interpreter.UInt64Value(block.Timestamp), + }, + }, + common.ZeroAddress, + ) +} + +func NewEVMAddress( + context interpreter.MemberAccessibleContext, + locationRange interpreter.LocationRange, + location common.AddressLocation, + address types.Address, +) *interpreter.CompositeValue { + return interpreter.NewCompositeValue( + context, + locationRange, + location, + stdlib.EVMAddressTypeQualifiedIdentifier, + common.CompositeKindStructure, + []interpreter.CompositeField{ + { + Name: stdlib.EVMAddressTypeBytesFieldName, + Value: EVMAddressToAddressBytesArrayValue(context, address), + }, + }, + common.ZeroAddress, + ) +} + +func NewEVMBytes( + context memberAccessibleArrayCreationContext, + locationRange interpreter.LocationRange, + location common.AddressLocation, + bytes []byte, +) *interpreter.CompositeValue { + return interpreter.NewCompositeValue( + context, + locationRange, + location, + stdlib.EVMBytesTypeQualifiedIdentifier, + common.CompositeKindStructure, + []interpreter.CompositeField{ + { + Name: stdlib.EVMBytesTypeValueFieldName, + Value: EVMBytesToBytesArrayValue(context, bytes), + }, + }, + common.ZeroAddress, + ) +} + +func NewEVMBytes4( + context memberAccessibleArrayCreationContext, + locationRange interpreter.LocationRange, + location common.AddressLocation, + bytes [4]byte, +) *interpreter.CompositeValue { + return interpreter.NewCompositeValue( + context, + locationRange, + location, + stdlib.EVMBytes4TypeQualifiedIdentifier, + common.CompositeKindStructure, + []interpreter.CompositeField{ + { + Name: stdlib.EVMBytesTypeValueFieldName, + Value: EVMBytes4ToBytesArrayValue(context, bytes), + }, + }, + common.ZeroAddress, + ) +} + +func NewEVMBytes32( + context memberAccessibleArrayCreationContext, + locationRange interpreter.LocationRange, + location common.AddressLocation, + bytes [32]byte, +) *interpreter.CompositeValue { + return interpreter.NewCompositeValue( + context, + locationRange, + location, + stdlib.EVMBytes32TypeQualifiedIdentifier, + common.CompositeKindStructure, + []interpreter.CompositeField{ + { + Name: stdlib.EVMBytesTypeValueFieldName, + Value: EVMBytes32ToBytesArrayValue(context, bytes), + }, + }, + common.ZeroAddress, + ) +} + +func AddressBytesArrayValueToEVMAddress( + context interpreter.ContainerMutationContext, + locationRange interpreter.LocationRange, + addressBytesValue *interpreter.ArrayValue, +) ( + result types.Address, + err error, +) { + // Convert + + var bytes []byte + bytes, err = interpreter.ByteArrayValueToByteSlice( + context, + addressBytesValue, + locationRange, + ) + if err != nil { + return result, err + } + + // Check length + + length := len(bytes) + const expectedLength = types.AddressLength + if length != expectedLength { + return result, errors.NewDefaultUserError( + "invalid address length: got %d, expected %d", + length, + expectedLength, + ) + } + + copy(result[:], bytes) + + return result, nil +} + +func EVMAddressToAddressBytesArrayValue( + context interpreter.ArrayCreationContext, + address types.Address, +) *interpreter.ArrayValue { + var index int + return interpreter.NewArrayValueWithIterator( + context, + stdlib.EVMAddressBytesStaticType, + common.ZeroAddress, + types.AddressLength, + func() interpreter.Value { + if index >= types.AddressLength { + return nil + } + result := interpreter.NewUInt8Value(context, func() uint8 { + return address[index] + }) + index++ + return result + }, + ) +} + +func EVMBytesToBytesArrayValue( + context interpreter.ArrayCreationContext, + bytes []byte, +) *interpreter.ArrayValue { + var index int + return interpreter.NewArrayValueWithIterator( + context, + stdlib.EVMBytesValueStaticType, + common.ZeroAddress, + uint64(len(bytes)), + func() interpreter.Value { + if index >= len(bytes) { + return nil + } + result := interpreter.NewUInt8Value(context, func() uint8 { + return bytes[index] + }) + index++ + return result + }, + ) +} + +func EVMBytes4ToBytesArrayValue( + context interpreter.ArrayCreationContext, + bytes [4]byte, +) *interpreter.ArrayValue { + var index int + return interpreter.NewArrayValueWithIterator( + context, + stdlib.EVMBytes4ValueStaticType, + common.ZeroAddress, + stdlib.EVMBytes4Length, + func() interpreter.Value { + if index >= stdlib.EVMBytes4Length { + return nil + } + result := interpreter.NewUInt8Value(context, func() uint8 { + return bytes[index] + }) + index++ + return result + }, + ) +} + +func EVMBytes32ToBytesArrayValue( + context interpreter.ArrayCreationContext, + bytes [32]byte, +) *interpreter.ArrayValue { + var index int + return interpreter.NewArrayValueWithIterator( + context, + stdlib.EVMBytes32ValueStaticType, + common.ZeroAddress, + stdlib.EVMBytes32Length, + func() interpreter.Value { + if index >= stdlib.EVMBytes32Length { + return nil + } + result := interpreter.NewUInt8Value(context, func() uint8 { + return bytes[index] + }) + index++ + return result + }, + ) +} + +func newInternalEVMTypeCreateCadenceOwnedAccountFunction( + gauge common.MemoryGauge, + handler types.ContractHandler, +) *interpreter.HostFunctionValue { + return interpreter.NewStaticHostFunctionValue( + gauge, + stdlib.InternalEVMTypeCreateCadenceOwnedAccountFunctionType, + func(invocation interpreter.Invocation) interpreter.Value { + context := invocation.InvocationContext + + uuid, ok := invocation.Arguments[0].(interpreter.UInt64Value) + if !ok { + panic(errors.NewUnreachableError()) + } + + address := handler.DeployCOA(uint64(uuid)) + + return EVMAddressToAddressBytesArrayValue(context, address) + }, + ) +} + +// newInternalEVMTypeCodeFunction returns the code of the account +func newInternalEVMTypeCodeFunction( + gauge common.MemoryGauge, + handler types.ContractHandler, +) *interpreter.HostFunctionValue { + return interpreter.NewStaticHostFunctionValue( + gauge, + stdlib.InternalEVMTypeCodeFunctionType, + func(invocation interpreter.Invocation) interpreter.Value { + context := invocation.InvocationContext + locationRange := invocation.LocationRange + + addressValue, ok := invocation.Arguments[0].(*interpreter.ArrayValue) + if !ok { + panic(errors.NewUnreachableError()) + } + + address, err := AddressBytesArrayValueToEVMAddress(context, locationRange, addressValue) + if err != nil { + panic(err) + } + + const isAuthorized = false + account := handler.AccountByAddress(address, isAuthorized) + + return interpreter.ByteSliceToByteArrayValue(context, account.Code()) + }, + ) +} + +// newInternalEVMTypeNonceFunction returns the nonce of the account +func newInternalEVMTypeNonceFunction( + gauge common.MemoryGauge, + handler types.ContractHandler, +) *interpreter.HostFunctionValue { + return interpreter.NewStaticHostFunctionValue( + gauge, + stdlib.InternalEVMTypeNonceFunctionType, + func(invocation interpreter.Invocation) interpreter.Value { + context := invocation.InvocationContext + locationRange := invocation.LocationRange + + addressValue, ok := invocation.Arguments[0].(*interpreter.ArrayValue) + if !ok { + panic(errors.NewUnreachableError()) + } + + address, err := AddressBytesArrayValueToEVMAddress(context, locationRange, addressValue) + if err != nil { + panic(err) + } + + const isAuthorized = false + account := handler.AccountByAddress(address, isAuthorized) + + return interpreter.UInt64Value(account.Nonce()) + }, + ) +} + +func newInternalEVMTypeCallFunction( + gauge common.MemoryGauge, + handler types.ContractHandler, +) *interpreter.HostFunctionValue { + return interpreter.NewStaticHostFunctionValue( + gauge, + stdlib.InternalEVMTypeCallFunctionType, + func(invocation interpreter.Invocation) interpreter.Value { + context := invocation.InvocationContext + locationRange := invocation.LocationRange + + callArgs, err := parseCallArguments(invocation) + if err != nil { + panic(err) + } + + // Call + + const isAuthorized = true + account := handler.AccountByAddress(callArgs.from, isAuthorized) + result := account.Call(callArgs.to, callArgs.data, callArgs.gasLimit, callArgs.balance) + + return NewResultValue( + handler, + gauge, + context, + locationRange, + result, + ) + }, + ) +} + +func newInternalEVMTypeDryCallFunction( + gauge common.MemoryGauge, + handler types.ContractHandler, +) *interpreter.HostFunctionValue { + return interpreter.NewStaticHostFunctionValue( + gauge, + stdlib.InternalEVMTypeDryCallFunctionType, + func(invocation interpreter.Invocation) interpreter.Value { + context := invocation.InvocationContext + locationRange := invocation.LocationRange + + callArgs, err := parseCallArguments(invocation) + if err != nil { + panic(err) + } + to := callArgs.to.ToCommon() + + tx := gethTypes.NewTx(&gethTypes.LegacyTx{ + Nonce: 0, + To: &to, + Gas: uint64(callArgs.gasLimit), + Data: callArgs.data, + GasPrice: big.NewInt(0), + Value: callArgs.balance, + }) + + txPayload, err := tx.MarshalBinary() + if err != nil { + panic(err) + } + + // call contract function + + res := handler.DryRun(txPayload, callArgs.from) + return NewResultValue(handler, gauge, context, locationRange, res) + }, + ) +} + +const fungibleTokenVaultTypeBalanceFieldName = "balance" + +func newInternalEVMTypeDepositFunction( + gauge common.MemoryGauge, + handler types.ContractHandler, +) *interpreter.HostFunctionValue { + return interpreter.NewStaticHostFunctionValue( + gauge, + stdlib.InternalEVMTypeDepositFunctionType, + func(invocation interpreter.Invocation) interpreter.Value { + context := invocation.InvocationContext + locationRange := invocation.LocationRange + + // Get from vault + + fromValue, ok := invocation.Arguments[0].(*interpreter.CompositeValue) + if !ok { + panic(errors.NewUnreachableError()) + } + + amountValue, ok := fromValue.GetField( + context, + fungibleTokenVaultTypeBalanceFieldName, + ).(interpreter.UFix64Value) + if !ok { + panic(errors.NewUnreachableError()) + } + + amount := types.NewBalanceFromUFix64(cadence.UFix64(amountValue.UFix64Value)) + + // Get to address + + toAddressValue, ok := invocation.Arguments[1].(*interpreter.ArrayValue) + if !ok { + panic(errors.NewUnreachableError()) + } + + toAddress, err := AddressBytesArrayValueToEVMAddress(context, locationRange, toAddressValue) + if err != nil { + panic(err) + } + + // NOTE: We're intentionally not destroying the vault here, + // because the value of it is supposed to be "kept alive". + // Destroying would incorrectly be equivalent to a burn and decrease the total supply, + // and a withdrawal would then have to perform an actual mint of new tokens. + + // Deposit + + const isAuthorized = false + account := handler.AccountByAddress(toAddress, isAuthorized) + account.Deposit(types.NewFlowTokenVault(amount)) + + return interpreter.Void + }, + ) +} + +// newInternalEVMTypeBalanceFunction returns the Flow balance of the account +func newInternalEVMTypeBalanceFunction( + gauge common.MemoryGauge, + handler types.ContractHandler, +) *interpreter.HostFunctionValue { + return interpreter.NewStaticHostFunctionValue( + gauge, + stdlib.InternalEVMTypeBalanceFunctionType, + func(invocation interpreter.Invocation) interpreter.Value { + context := invocation.InvocationContext + locationRange := invocation.LocationRange + + addressValue, ok := invocation.Arguments[0].(*interpreter.ArrayValue) + if !ok { + panic(errors.NewUnreachableError()) + } + + address, err := AddressBytesArrayValueToEVMAddress(context, locationRange, addressValue) + if err != nil { + panic(err) + } + + const isAuthorized = false + account := handler.AccountByAddress(address, isAuthorized) + + return interpreter.UIntValue{BigInt: account.Balance()} + }, + ) +} + +// newInternalEVMTypeCodeHashFunction returns the code hash of the account +func newInternalEVMTypeCodeHashFunction( + gauge common.MemoryGauge, + handler types.ContractHandler, +) *interpreter.HostFunctionValue { + return interpreter.NewStaticHostFunctionValue( + gauge, + stdlib.InternalEVMTypeCodeHashFunctionType, + func(invocation interpreter.Invocation) interpreter.Value { + context := invocation.InvocationContext + locationRange := invocation.LocationRange + + addressValue, ok := invocation.Arguments[0].(*interpreter.ArrayValue) + if !ok { + panic(errors.NewUnreachableError()) + } + + address, err := AddressBytesArrayValueToEVMAddress(context, locationRange, addressValue) + if err != nil { + panic(err) + } + + const isAuthorized = false + account := handler.AccountByAddress(address, isAuthorized) + + return interpreter.ByteSliceToByteArrayValue(context, account.CodeHash()) + }, + ) +} + +func newInternalEVMTypeWithdrawFunction( + gauge common.MemoryGauge, + handler types.ContractHandler, +) *interpreter.HostFunctionValue { + return interpreter.NewStaticHostFunctionValue( + gauge, + stdlib.InternalEVMTypeWithdrawFunctionType, + func(invocation interpreter.Invocation) interpreter.Value { + context := invocation.InvocationContext + locationRange := invocation.LocationRange + + // Get from address + + fromAddressValue, ok := invocation.Arguments[0].(*interpreter.ArrayValue) + if !ok { + panic(errors.NewUnreachableError()) + } + + fromAddress, err := AddressBytesArrayValueToEVMAddress(context, locationRange, fromAddressValue) + if err != nil { + panic(err) + } + + // Get amount + + amountValue, ok := invocation.Arguments[1].(interpreter.UIntValue) + if !ok { + panic(errors.NewUnreachableError()) + } + + amount := types.NewBalance(amountValue.BigInt) + + // Withdraw + + const isAuthorized = true + account := handler.AccountByAddress(fromAddress, isAuthorized) + vault := account.Withdraw(amount) + + ufix, roundedOff, err := types.ConvertBalanceToUFix64(vault.Balance()) + if err != nil { + panic(err) + } + if roundedOff { + panic(types.ErrWithdrawBalanceRounding) + } + + // TODO: improve: maybe call actual constructor + return interpreter.NewCompositeValue( + context, + locationRange, + common.NewAddressLocation(gauge, handler.FlowTokenAddress(), "FlowToken"), + "FlowToken.Vault", + common.CompositeKindResource, + []interpreter.CompositeField{ + { + Name: "balance", + Value: interpreter.NewUFix64Value(gauge, func() uint64 { + return uint64(ufix) + }), + }, + { + Name: sema.ResourceUUIDFieldName, + Value: interpreter.NewUInt64Value(gauge, func() uint64 { + return handler.GenerateResourceUUID() + }), + }, + }, + common.ZeroAddress, + ) + }, + ) +} + +func newInternalEVMTypeDeployFunction( + gauge common.MemoryGauge, + handler types.ContractHandler, +) *interpreter.HostFunctionValue { + return interpreter.NewStaticHostFunctionValue( + gauge, + stdlib.InternalEVMTypeDeployFunctionType, + func(invocation interpreter.Invocation) interpreter.Value { + context := invocation.InvocationContext + locationRange := invocation.LocationRange + + // Get from address + + fromAddressValue, ok := invocation.Arguments[0].(*interpreter.ArrayValue) + if !ok { + panic(errors.NewUnreachableError()) + } + + fromAddress, err := AddressBytesArrayValueToEVMAddress(context, locationRange, fromAddressValue) + if err != nil { + panic(err) + } + + // Get code + + codeValue, ok := invocation.Arguments[1].(*interpreter.ArrayValue) + if !ok { + panic(errors.NewUnreachableError()) + } + + code, err := interpreter.ByteArrayValueToByteSlice(context, codeValue, locationRange) + if err != nil { + panic(err) + } + + // Get gas limit + + gasLimitValue, ok := invocation.Arguments[2].(interpreter.UInt64Value) + if !ok { + panic(errors.NewUnreachableError()) + } + + gasLimit := types.GasLimit(gasLimitValue) + + // Get value + + amountValue, ok := invocation.Arguments[3].(interpreter.UIntValue) + if !ok { + panic(errors.NewUnreachableError()) + } + + amount := types.NewBalance(amountValue.BigInt) + + // Deploy + + const isAuthorized = true + account := handler.AccountByAddress(fromAddress, isAuthorized) + result := account.Deploy(code, gasLimit, amount) + + res := NewResultValue(handler, gauge, context, locationRange, result) + return res + }, + ) +} + +func newInternalEVMTypeCastToAttoFLOWFunction( + gauge common.MemoryGauge, +) *interpreter.HostFunctionValue { + return interpreter.NewStaticHostFunctionValue( + gauge, + stdlib.InternalEVMTypeCastToAttoFLOWFunctionType, + func(invocation interpreter.Invocation) interpreter.Value { + balanceValue, ok := invocation.Arguments[0].(interpreter.UFix64Value) + if !ok { + panic(errors.NewUnreachableError()) + } + balance := types.NewBalanceFromUFix64(cadence.UFix64(balanceValue.UFix64Value)) + return interpreter.UIntValue{BigInt: balance} + }, + ) +} + +func newInternalEVMTypeCastToFLOWFunction( + gauge common.MemoryGauge, +) *interpreter.HostFunctionValue { + return interpreter.NewStaticHostFunctionValue( + gauge, + stdlib.InternalEVMTypeCastToFLOWFunctionType, + func(invocation interpreter.Invocation) interpreter.Value { + balanceValue, ok := invocation.Arguments[0].(interpreter.UIntValue) + if !ok { + panic(errors.NewUnreachableError()) + } + balance := types.NewBalance(balanceValue.BigInt) + // ignoring the rounding error and let user handle it + v, _, err := types.ConvertBalanceToUFix64(balance) + if err != nil { + panic(err) + } + return interpreter.NewUFix64Value(gauge, func() uint64 { + return uint64(v) + }) + }, + ) +} + +func newInternalEVMTypeCommitBlockProposalFunction( + gauge common.MemoryGauge, + handler types.ContractHandler, +) *interpreter.HostFunctionValue { + return interpreter.NewStaticHostFunctionValue( + gauge, + stdlib.InternalEVMTypeCommitBlockProposalFunctionType, + func(invocation interpreter.Invocation) interpreter.Value { + handler.CommitBlockProposal() + return interpreter.Void + }, + ) +} + +func newInternalEVMTypeRunFunction( + gauge common.MemoryGauge, + handler types.ContractHandler, +) *interpreter.HostFunctionValue { + return interpreter.NewStaticHostFunctionValue( + gauge, + stdlib.InternalEVMTypeRunFunctionType, + func(invocation interpreter.Invocation) interpreter.Value { + context := invocation.InvocationContext + locationRange := invocation.LocationRange + + // Get transaction argument + + transactionValue, ok := invocation.Arguments[0].(*interpreter.ArrayValue) + if !ok { + panic(errors.NewUnreachableError()) + } + + transaction, err := interpreter.ByteArrayValueToByteSlice(context, transactionValue, locationRange) + if err != nil { + panic(err) + } + + // Get gas fee collector argument + + gasFeeCollectorValue, ok := invocation.Arguments[1].(*interpreter.ArrayValue) + if !ok { + panic(errors.NewUnreachableError()) + } + + gasFeeCollector, err := interpreter.ByteArrayValueToByteSlice(context, gasFeeCollectorValue, locationRange) + if err != nil { + panic(err) + } + + // run transaction + result := handler.Run(transaction, types.NewAddressFromBytes(gasFeeCollector)) + + return NewResultValue(handler, gauge, context, locationRange, result) + }, + ) +} + +func newInternalEVMTypeDryRunFunction( + gauge common.MemoryGauge, + handler types.ContractHandler, +) *interpreter.HostFunctionValue { + return interpreter.NewStaticHostFunctionValue( + gauge, + stdlib.InternalEVMTypeDryRunFunctionType, + func(invocation interpreter.Invocation) interpreter.Value { + context := invocation.InvocationContext + locationRange := invocation.LocationRange + + // Get transaction argument + + transactionValue, ok := invocation.Arguments[0].(*interpreter.ArrayValue) + if !ok { + panic(errors.NewUnreachableError()) + } + + transaction, err := interpreter.ByteArrayValueToByteSlice(context, transactionValue, locationRange) + if err != nil { + panic(err) + } + + // Get from argument + + fromValue, ok := invocation.Arguments[1].(*interpreter.ArrayValue) + if !ok { + panic(errors.NewUnreachableError()) + } + + from, err := interpreter.ByteArrayValueToByteSlice(context, fromValue, locationRange) + if err != nil { + panic(err) + } + + // call estimate + + res := handler.DryRun(transaction, types.NewAddressFromBytes(from)) + return NewResultValue(handler, gauge, context, locationRange, res) + }, + ) +} + +func newInternalEVMTypeBatchRunFunction( + gauge common.MemoryGauge, + handler types.ContractHandler, +) *interpreter.HostFunctionValue { + return interpreter.NewStaticHostFunctionValue( + gauge, + stdlib.InternalEVMTypeBatchRunFunctionType, + func(invocation interpreter.Invocation) interpreter.Value { + context := invocation.InvocationContext + locationRange := invocation.LocationRange + + // Get transactions batch argument + transactionsBatchValue, ok := invocation.Arguments[0].(*interpreter.ArrayValue) + if !ok { + panic(errors.NewUnreachableError()) + } + + batchCount := transactionsBatchValue.Count() + var transactionBatch [][]byte + if batchCount > 0 { + transactionBatch = make([][]byte, batchCount) + i := 0 + transactionsBatchValue.Iterate(context, func(transactionValue interpreter.Value) (resume bool) { + t, err := interpreter.ByteArrayValueToByteSlice(context, transactionValue, locationRange) + if err != nil { + panic(err) + } + transactionBatch[i] = t + i++ + return true + }, false, locationRange) + } + + // Get gas fee collector argument + gasFeeCollectorValue, ok := invocation.Arguments[1].(*interpreter.ArrayValue) + if !ok { + panic(errors.NewUnreachableError()) + } + + gasFeeCollector, err := interpreter.ByteArrayValueToByteSlice(context, gasFeeCollectorValue, locationRange) + if err != nil { + panic(err) + } + + // Batch run + batchResults := handler.BatchRun(transactionBatch, types.NewAddressFromBytes(gasFeeCollector)) + + values := newResultValues(handler, gauge, context, locationRange, batchResults) + + loc := common.NewAddressLocation(gauge, handler.EVMContractAddress(), stdlib.ContractName) + evmResultType := interpreter.NewVariableSizedStaticType( + context, + interpreter.NewCompositeStaticType( + nil, + loc, + stdlib.EVMResultTypeQualifiedIdentifier, + common.NewTypeIDFromQualifiedName( + nil, + loc, + stdlib.EVMResultTypeQualifiedIdentifier, + ), + ), + ) + + return interpreter.NewArrayValue( + context, + locationRange, + evmResultType, + common.ZeroAddress, + values..., + ) + }, + ) +} + +// newResultValues converts batch run result summary type to cadence array of structs +func newResultValues( + handler types.ContractHandler, + gauge common.MemoryGauge, + context interpreter.MemberAccessibleContext, + locationRange interpreter.LocationRange, + results []*types.ResultSummary, +) []interpreter.Value { + var values []interpreter.Value + if len(results) > 0 { + values = make([]interpreter.Value, 0, len(results)) + for _, result := range results { + res := NewResultValue( + handler, + gauge, + context, + locationRange, + result, + ) + values = append(values, res) + } + } + return values +} + +func NewResultValue( + handler types.ContractHandler, + gauge common.MemoryGauge, + context interpreter.MemberAccessibleContext, + locationRange interpreter.LocationRange, + result *types.ResultSummary, +) *interpreter.CompositeValue { + + evmContractLocation := common.NewAddressLocation( + gauge, + handler.EVMContractAddress(), + stdlib.ContractName, + ) + + deployedContractAddress := result.DeployedContractAddress + deployedContractValue := interpreter.NilOptionalValue + if deployedContractAddress != nil { + deployedContractValue = interpreter.NewSomeValueNonCopying( + context, + NewEVMAddress( + context, + locationRange, + evmContractLocation, + *deployedContractAddress, + ), + ) + } + + fields := []interpreter.CompositeField{ + { + Name: "status", + Value: interpreter.NewEnumCaseValue( + context, + locationRange, + &sema.CompositeType{ + Location: evmContractLocation, + Identifier: stdlib.EVMStatusTypeQualifiedIdentifier, + Kind: common.CompositeKindEnum, + }, + interpreter.NewUInt8Value(gauge, func() uint8 { + return uint8(result.Status) + }), + nil, + ), + }, + { + Name: "errorCode", + Value: interpreter.NewUInt64Value(gauge, func() uint64 { + return uint64(result.ErrorCode) + }), + }, + { + Name: "errorMessage", + Value: interpreter.NewStringValue( + context, + common.NewStringMemoryUsage(len(result.ErrorMessage)), + func() string { + return result.ErrorMessage + }, + ), + }, + { + Name: "gasUsed", + Value: interpreter.NewUInt64Value(gauge, func() uint64 { + return result.GasConsumed + }), + }, + { + Name: "data", + Value: interpreter.ByteSliceToByteArrayValue(context, result.ReturnedData), + }, + { + Name: "deployedContract", + Value: deployedContractValue, + }, + } + + return interpreter.NewCompositeValue( + context, + locationRange, + evmContractLocation, + stdlib.EVMResultTypeQualifiedIdentifier, + common.CompositeKindStructure, + fields, + common.ZeroAddress, + ) +} + +func ResultSummaryFromEVMResultValue(val cadence.Value) (*types.ResultSummary, error) { + str, ok := val.(cadence.Struct) + if !ok { + return nil, fmt.Errorf("invalid input: unexpected value type") + } + + fields := cadence.FieldsMappedByName(str) + + const expectedFieldCount = 6 + if len(fields) != expectedFieldCount { + return nil, fmt.Errorf( + "invalid input: field count mismatch: expected %d, got %d", + expectedFieldCount, + len(fields), + ) + } + + statusEnum, ok := fields[stdlib.EVMResultTypeStatusFieldName].(cadence.Enum) + if !ok { + return nil, fmt.Errorf("invalid input: unexpected type for status field") + } + + status, ok := cadence.FieldsMappedByName(statusEnum)[sema.EnumRawValueFieldName].(cadence.UInt8) + if !ok { + return nil, fmt.Errorf("invalid input: unexpected type for status field") + } + + errorCode, ok := fields[stdlib.EVMResultTypeErrorCodeFieldName].(cadence.UInt64) + if !ok { + return nil, fmt.Errorf("invalid input: unexpected type for error code field") + } + + errorMsg, ok := fields[stdlib.EVMResultTypeErrorMessageFieldName].(cadence.String) + if !ok { + return nil, fmt.Errorf("invalid input: unexpected type for error msg field") + } + + gasUsed, ok := fields[stdlib.EVMResultTypeGasUsedFieldName].(cadence.UInt64) + if !ok { + return nil, fmt.Errorf("invalid input: unexpected type for gas field") + } + + data, ok := fields[stdlib.EVMResultTypeDataFieldName].(cadence.Array) + if !ok { + return nil, fmt.Errorf("invalid input: unexpected type for data field") + } + + convertedData := make([]byte, len(data.Values)) + for i, value := range data.Values { + convertedData[i] = byte(value.(cadence.UInt8)) + } + + var convertedDeployedAddress *types.Address + + deployedAddressField, ok := fields[stdlib.EVMResultTypeDeployedContractFieldName].(cadence.Optional) + if !ok { + return nil, fmt.Errorf("invalid input: unexpected type for deployed contract field") + } + + if deployedAddressField.Value != nil { + evmAddress, ok := deployedAddressField.Value.(cadence.Struct) + if !ok { + return nil, fmt.Errorf("invalid input: unexpected type for deployed contract field") + } + + bytes, ok := cadence.SearchFieldByName(evmAddress, stdlib.EVMAddressTypeBytesFieldName).(cadence.Array) + if !ok { + return nil, fmt.Errorf("invalid input: unexpected type for deployed contract field") + } + + convertedAddress := make([]byte, len(bytes.Values)) + for i, value := range bytes.Values { + convertedAddress[i] = byte(value.(cadence.UInt8)) + } + addr := types.Address(convertedAddress) + convertedDeployedAddress = &addr + } + + return &types.ResultSummary{ + Status: types.Status(status), + ErrorCode: types.ErrorCode(errorCode), + ErrorMessage: string(errorMsg), + GasConsumed: uint64(gasUsed), + MaxGasConsumed: uint64(gasUsed), + ReturnedData: convertedData, + DeployedContractAddress: convertedDeployedAddress, + }, nil + +} + +type callArguments struct { + from types.Address + to types.Address + data []byte + gasLimit types.GasLimit + balance types.Balance +} + +func parseCallArguments(invocation interpreter.Invocation) ( + *callArguments, + error, +) { + context := invocation.InvocationContext + locationRange := invocation.LocationRange + + // Get from address + + fromAddressValue, ok := invocation.Arguments[0].(*interpreter.ArrayValue) + if !ok { + return nil, errors.NewUnreachableError() + } + + fromAddress, err := AddressBytesArrayValueToEVMAddress(context, locationRange, fromAddressValue) + if err != nil { + return nil, err + } + + // Get to address + + toAddressValue, ok := invocation.Arguments[1].(*interpreter.ArrayValue) + if !ok { + return nil, errors.NewUnreachableError() + } + + toAddress, err := AddressBytesArrayValueToEVMAddress(context, locationRange, toAddressValue) + if err != nil { + return nil, err + } + + // Get data + + dataValue, ok := invocation.Arguments[2].(*interpreter.ArrayValue) + if !ok { + return nil, errors.NewUnreachableError() + } + + data, err := interpreter.ByteArrayValueToByteSlice(context, dataValue, locationRange) + if err != nil { + return nil, err + } + + // Get gas limit + + gasLimitValue, ok := invocation.Arguments[3].(interpreter.UInt64Value) + if !ok { + panic(errors.NewUnreachableError()) + } + + gasLimit := types.GasLimit(gasLimitValue) + + // Get balance + + balanceValue, ok := invocation.Arguments[4].(interpreter.UIntValue) + if !ok { + return nil, errors.NewUnreachableError() + } + + balance := types.NewBalance(balanceValue.BigInt) + + return &callArguments{ + from: fromAddress, + to: toAddress, + data: data, + gasLimit: gasLimit, + balance: balance, + }, nil +} diff --git a/fvm/evm/offchain/blocks/block_context.go b/fvm/evm/offchain/blocks/block_context.go new file mode 100644 index 00000000000..680a0e04e34 --- /dev/null +++ b/fvm/evm/offchain/blocks/block_context.go @@ -0,0 +1,117 @@ +package blocks + +import ( + gethCommon "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/eth/tracers" + + "github.com/onflow/flow-go/fvm/evm/types" + "github.com/onflow/flow-go/model/flow" +) + +// NewBlockContext creates a new block context for the given chain ID and height. +// This is for use in offchain re-execution of transactions. +// It includes special casing for some historical block heights: +// - On Mainnet and Testnet the block hash list was stuck in a loop of 256 block hashes until fixed. +// https://github.com/onflow/flow-go/issues/6552 +// - The coinbase address was different on testnet until https://github.com/onflow/flow-evm-gateway/pull/491. +func NewBlockContext( + chainID flow.ChainID, + height uint64, + timestamp uint64, + getHashByHeight func(uint64) gethCommon.Hash, + prevRandao gethCommon.Hash, + tracer *tracers.Tracer, +) (types.BlockContext, error) { + + // coinbase address fix + miner := types.CoinbaseAddress + if chainID == flow.Testnet && height < coinbaseAddressChangeEVMHeightTestnet { + miner = genesisCoinbaseAddressTestnet + } + + return types.BlockContext{ + ChainID: types.EVMChainIDFromFlowChainID(chainID), + BlockNumber: height, + BlockTimestamp: timestamp, + DirectCallBaseGasUsage: types.DefaultDirectCallBaseGasUsage, + DirectCallGasPrice: types.DefaultDirectCallGasPrice, + GasFeeCollector: miner, + GetHashFunc: func(hashHeight uint64) gethCommon.Hash { + // For block heights greater than or equal to the current, + // return an empty block hash. + if hashHeight >= height { + return gethCommon.Hash{} + } + // If the given block height, is more than 256 blocks + // in the past, return an empty block hash. + if height-hashHeight > 256 { + return gethCommon.Hash{} + } + + hash, ok := UseBlockHashCorrection(chainID, height, hashHeight) + if ok { + return hash + } + + return getHashByHeight(hashHeight) + + }, + Random: prevRandao, + Tracer: tracer, + }, nil +} + +// UseBlockHashCorrection returns the block hash correction for the given chain ID, EVM height, and queried EVM height, and a boolean indicating whether the correction should be used. +func UseBlockHashCorrection(chainID flow.ChainID, evmHeightOfCurrentBlock uint64, queriedEVMHeight uint64) (gethCommon.Hash, bool) { + // For testnet & mainnet, we fetch the block hash from the hard-coded + // array of hashes. + if chainID == flow.Mainnet && evmHeightOfCurrentBlock < blockHashListFixHCUEVMHeightMainnet { + return fixedHashes[flow.Mainnet][queriedEVMHeight%256], true + } else if chainID == flow.Testnet && evmHeightOfCurrentBlock < blockHashListFixHCUEVMHeightTestnet { + return fixedHashes[flow.Testnet][queriedEVMHeight%256], true + } + return gethCommon.Hash{}, false +} + +// Testnet52 - Height Coordinated Upgrade 4, Nov 21, 2024 +// Flow Block: 94361765 4c9edc817afeaaa6aeb5e63504ed3f5ba8bcbba3931e53f5437d911a1129b431 +// PR: https://github.com/onflow/flow-go/pull/6734 +const blockHashListFixHCUEVMHeightMainnet = 8357079 + +// Testnet52 - Height Coordinated Upgrade 4, Nov 20, 2024 +// Flow Block: 228025500 7eb808b77f02c3e77c36d57dc893ed63adc5ff6113bb0f4b141bb39e44d634e6 +// PR: https://github.com/onflow/flow-go/pull/6734 +const blockHashListFixHCUEVMHeightTestnet = 16848829 + +// Testnet51 - Height Coordinated Upgrade 1 +// Flow Block: 212562161 1a520608c5457f228405c4c30fc39c8a0af7cf915fb2ede7ec5ccffc2a000f57 +// PR: https://github.com/onflow/flow-go/pull/6380 +const coinbaseAddressChangeEVMHeightTestnet = 1385490 + +var genesisCoinbaseAddressTestnet = types.Address(gethCommon.HexToAddress("0000000000000000000000021169100eecb7c1a6")) + +var fixedHashes map[flow.ChainID][256]gethCommon.Hash + +// generate the fixed hashes for mainnet and testnet +func generateFixedHashes() { + mainnetFixedHashes := [256]gethCommon.Hash{} + testnetFixedHashes := [256]gethCommon.Hash{} + + mainnetHashes := []string{"acb08ca38e1f155f1d038c6d2e1acc0a38915624b5772551c4f985f3ebc3a3e0", "5914c330c16ee5e6b8e60d0435d0390ef3a29cde3e177090c23cf34e111792eb", "89efffebedded274fc0c72a4d3e953d990b5f54b82b696c65390f87b2f9b331c", "824a13a4d2252ff045cc785aa77c6ab8f85b48a24aa5ac198417bc05248e3d20", "5c0eefa82e36a4a7bc8b67f4856f756407189f4011d74c1cc6125599bcd6a18d", "6e6435cf4a9dc503a213fee4c8e5909f32ef284b3dbe780fadf78ce2a70a6a56", "f312e8571dbd7e2c347a0032d7ac42b62966a833ddacf2ba1fd1b0c1dbf755c0", "e9ef75691eadf0e6e9ca88cc0dc9c29e520aa611dc21ba390eead58284949873", "bc2fedff2ca293a75dc36c577dd05742671549586a333e458c8c723a3d3ba40e", "04256e11dc4ecc63eee1b3ad22e31860d26a1cc2103e34f91f12f4a61cd3150f", "454081c5e315537eda05e5fdd8e5b34df7473386c16d140dcf0df9c35159310c", "f4a897310404d46b19a87a45f4e53743c12c1b4530383d3a8ecc972940461cf0", "81765ca144baff8e65ebe989403c8f86ede26cee5580ff5320817a108e54e887", "cb96415a6f8d3ac6abed34fcc83b2745775c0bdffb7561392e1eeab63c28bd19", "c70a0e0279c46e6fae534bf1dbee7796078ae1a9c214d7719b12dcfd4fbbf55c", "4668064ef2d42bbae07e44276b55922ff7830f8ff203d91f91854252bf42dcfc", "bc966f7acce679568f84c0f6164079a4b238b856bce15091fd62d3d94506b92e", "b6d1beb5b1be5715eb61f0b6528e339c75604f2ebf0605238905a0c1fc4f0594", "e367fba588c1fa71fec1e769963d3106a0e73d13e2ec441d2de44949673804b0", "2ea9607ed6160325c0fb6514ad2d0eb4397afe131c787a6d189e39907ade71ac", "5297cff89b9f573c2f7920be0c8d3e71c32f3016a1c893e9f41048e979533a70", "9f81c00947b14ddfb6793437a787d0bb8ba5692e264f7f5087bbd4e8bdc961f7", "6ef10778647ef844ce9c53b740890980a564619e3ce866faea7bd75b67971873", "db1d873cfb81b4aa32b7d575542d545176782737d7a7f3c9a288205124e91467", "cdab6dc09455023a24c38ae89232d4dd5e76d13935f38eb4d60a8ae3c2f87270", "9cc982be62369ee866334c1ce1660046cf23f109e7baea6a6525ac9fa2657143", "8ef919c45b46bbee779b7511a0dfe23cc9631bbd48103e2f9b7cbe9b492ac61e", "26da1293ebe6664711039a56d9f2fcb245a7b3803c4219cbd27430307624977b", "681c1001f30ebd45fe2ec071e168476c3d3367a18cbbb14784f5ad549b6f6c76", "03a921c3db624982c82090c5f562599f0bef0e542bf145c3784e055dafc43f75", "e0304d9cd962ba44165e5dcd4c29bae6e9eeaa263413c8e0ca41d0cf883130a9", "a931939f13b5dc447464b115b487820467743016fb5bee8b43864ad836a98261", "c7ed304fca9a13944e3f81c5fe4e8df4a7327d1c89fd63de357551e99996d9bd", "80f7f4870cdd284f129babe4d08c002acb3a63a43b6d73ea39c28820c9002d20", "e2d09b3b3d27e1d5448c1772e9d300fa1615c791e2d8b4ebce3d4c24e6058cbf", "754869cba21c3bd12d94a23571a947f8a33dc369e6cf4ca18f2fd40e7b5f5a53", "e2dc7e12450ddbc50087fd76041623416c13010be0a804f4676af1768e5d61ac", "7bb9175b93b7cab1e02a3f571f399db950908b57b45f1d584f3a5ac1781496a8", "2e7e5f02e2c107b73dae8b20a0f52d52ea1aa7248c6b4876f70808e5168dada9", "e19d12c9f01d7b8cdf6831e269e0a011cd744a10aa1da65780f80a50c812eafb", "6bdaa6249d9616d1244a7e23995dc416b9f3cc464ca2d5941cccb8d5b1a1eac8", "38e68d98e93683c14c3c0cbf82298c329857503bd88e488c6cc8ce83436295bd", "e6149e3ed7747619bcba88919daf4e9dc167a276887e8bad88158fe796aff9a9", "e6c8562da3023e8d864f04545f26ec68f5a3d9ad984e225104ee63617e22cdec", "677b31d0b0fd1be96e13968e9cf679797082b5fe7a2c3a9934516f9d05a35c5d", "d894c76d4e18fdd1424435028631b008224fa480faf7dd4404faa26805f226f4", "38421bae5c3e39fb73178621e97fc5a8eeb1b6e25a8faba76aa6f808819aa263", "ac90729f29643e4107280ae8b69fe4e862c1cfbeef57a9f70579b16db9479410", "a671eff0c687d04816889d0c56e704796690771cb5a1eff8c83ae5ef417f79bf", "601fc2b0ca9979c4b092a812a88934f46381e42037278523f191e8207ad1e20b", "0dcadeeb37a0836c4aa287c43a7f1e6e72eaabc8fb0f5ad6209f87e69f2bf412", "02bd187372fe4c6bf894fabf7191afca7f2f052e5d42a2cc6fb7f2e6b1a07716", "39da57b24b312b1838a44de3286c51a0189608bfaa5904a7a2a35c675b947322", "fe16a19cdeacc8ce05bf38d3617c1e90579b6991775d3c0358bf3dc52aeae032", "9e7e8957797b6fb78679c60c249cb8b83e045e760a6ec24c506d565ae94c1730", "7ae42245a1611e7d32d573ddc2863f9f1902683a17d46b57794ec90ad61a9d6d", "f599ba650e87dcf51733485aef08e71f1d8f5e47c459250902daf3db9bb9f526", "7d914de318e12963c059aa04fe03cb45849b16620a1e7c2a883164bca65ad3e7", "d66014e30f72e1bb309235b6d9b8db6f6fe13b624c0ec81ed657ef215d13e29f", "7d25f2ab344c3ce12cad30a992264dae73668e694d8690bff158c0c66951b6eb", "c4eeb03288ac4ec6166d31909e3fdea938b76f637bdd4963910aa0cfedf4496f", "a30beb208f4ccaec67f83f72b72d18a428b5876ebf53184248ab3438c226729c", "67d9c883f3f8df5afdd8e897f54f4ddd4522d761d23429c1697034e3bebe8df6", "fffc4c5760e75dc839acb6654e0274afbe07b20ca46e90b0da0262432473955c", "3238927e1ff0d18a573cff7ea6d081579bd9ec9de9f6ba2f67301ef88d65057c", "3af6b7b1124dbabca4aa2734711484ff6fc6d76130cf81633e051ffdc01b3583", "0475c59145cad6563ed3f0cae8d03a09c73d4862c5a090f8d5ba5c43f3f744fe", "896c5230f74946f18dc31d879d228715303ddaf01d6c1050dc4cac1cab8f5273", "a0959444effc54fc3d04a31a87ec466063c510762b2b4e099cda3794c0d59c07", "0f7b8362a5f8bfe9104a2dbcbf25dac6dfaae4fd41862cb0f0e028062b7db9fd", "83303d47daa193a0e9f1cb38b7fef57508b6f8f80aa46d5663f64800c9bd25de", "82892728f36bf81b17e2fc6762444d938f5b8b6e80c09c7189e73a8b6b9b2b04", "39f93ea2ce0afb9ea531662a38cd65984ae38b10076a37ddd10fd45ed35674d0", "6783668b699abfe0b3bcbbc79988e7c1c5191038497cd73e52502702d18b8cdd", "6fb5147a8b6cff70490dd2dcbee8c26e32808034978f8989bd0d73ac1c5cd79b", "b3b550f194004cfb54f24f738fe901058fa4eea680d5f704a1e49996aa7df019", "2e81de6c3c6a1828322fea4a5d7add9c9c4bc940d37760cd78a15f7185bbec5e", "8ea70bc7c983074e7d32d9c47e24ebea9ddc0a04aa4061e82c40566cbb886061", "f0c2785d27868755124be6ae2cdde18804893493f53c2bf3b9ce3bc37a983afb", "f7e684111cb2b43644b5e2a07bcdfbb9231ba8647dae01103bb15ee84ed59dce", "a4c839a3ec06907bc87e18a06aab314e184bf55d8d66acf57012d81bed5f5a0a", "e6f94c1f935b7505d65b70571a169572a5586582dfcd7ec43614eb5a53169556", "5f67958fb79aec5be7e950deb0a9a86fbcd328eb75298946014f06c200fd8dcb", "ba54fe0ea8a35e899ec5e5ff9aa888ae7c5ed8630336a098c131809b6e3a815c", "11eea2b61707439bfcc198d3d483fc7fbc8f5c83f70b6190b6bd1bd11a0edfd4", "d1088b19e8814dca954f5a78f827ea6e20de2b8e0839d5f2d2ece9cc58d72c76", "c5e0a35346bc8c9a45338f5844cd13f5f5b94ae90494c8609b7fe2dd69925429", "32089be74f3bd2191d7e8116a742b40f613d75bd77765c28a11d937755ca52fe", "f584db14565d9abc212b02935724bf05da840670b46e83a64990d7463571f9c2", "a959dab01d61bfb54bfcb58bcdb609f42e2a062fcc63eb3d5b866e582fc589a8", "cf09a5617dde47025acdc7dc544f9d78fa396c383ecee103b5b74b532d9a586a", "03bbdbf7f22cd92a696f1ffac34c99d7e57c201edfdfc617826ef2f648d38475", "7f1a8c24c456052fcc3721707a56c457eb7d80ce8d83d8d23c5a9a0cb70eeaec", "2ed7147f47b4f12924358d18f778cec7d28dd53e9189a5096a7449f42a1ce29d", "c78b588dc0e967fc85abad5d5a18f2be86b7a77363ce701f245507a7043de3f2", "bcc0b4ed36d1512825bb2a2db5ed41cf5a7f5fa5634c8199415eae7a145ab772", "c520f97ac043cf2431641d4532c4a44f9664e728c08382ec798ac49997f19695", "a7825ca8bc2f6ac8556b88cc9a3c2a533504e5a8e011149cc15eaace9320c23d", "de8cc99029674ccd55105e8b5182b22e8c219a8a35e9e5fbb386d232e8e1ffd4", "2ae0a0239db0cdc5108215d38f30d783b4619824a5b420cbfca4fd6242586fa5", "3ce22aea444053e3456ca4edacb1060a5a355a7ca7e585af873388f99e654028", "f18aea7d73a0a8b2c313eaf7e742a08225e68341de787a4003fe49c06a5d4d13", "82dfb93809f99c59f6d41402e863580fe080278faca77cf2eddb651fedb77b05", "08c1d039a238c625ff6715aafc33ee8a1675bfa482ba6edbd0d9cc63d947b5b5", "a90aa55518cc9000eceade9b79644cf723c22f60caf849604dbb2ac22a8b5a86", "077cd67222a27be3640cd4d5cb3946bbc0f7df3fca7c5dd2ab66e4f9187f979e", "e8ccaf643c060c92fca26ab6adec347a6c3fee28bfb2089c5973bfc319cd8da5", "49e9c991a4d793b5ab62c3dc16290cfee8389fb12ab90e182964dbbefb72ad3e", "9ab6a29e6b5cc88f1791de37ab48ea5daa9e222365fb2b590c8f04109a372a5c", "8022544ade8da7ab8b34bf3bc8cd15e90697a4e72e760c809880830e2aadbdc5", "cc6b301ae355cdadf19e48e6cde96d98961c9aee896bf9ed815cb47dae0e1c22", "02b5781e6a697fdd26883f63ecb7d947e0789ed1fffb551bec429a139c0dcef8", "bfd97e8342aee5eee212638e37b691edac398b54bc535ae3458ca72c530ebba6", "1546f5334900491745f87d82fec8082c65cbf6e975b9474041cc7e22fe369130", "8d42b170698fc0c2662a2fc6d1017a45bb7af9c335e1c5a2cc107759d3aab7dd", "0cd379d9866856ed9fc3ad93190cee5d5ab8dc738b71fbb4bfa14e44d2b342bd", "b287e14cd59493d0f0a8d6a8a8ccb056da71527af9610bd38a80f89f43ba9e0e", "699f32bab442ea206544c0478fbf8e55093bdf246346014f242bdf1be60b9b9f", "ef55d74e0c1b660dd69bbef8b1d87ad827da29f2ab5169c14fd17e5ab3f2906e", "cef5074e106ea292c52651ae438bd80ff34b8ebfd31e00ab137190ac8829967f", "312dba438b767fb62feeed74223e5345241e3c9d078863b82c9768d52635d6e2", "102da56cd23259629a60db3c3e60eb2ddcf124ee47ed6e37e09b1cbd023a2a55", "d8a8e6bd81976810315c0950dffc466ae9ef5440629cbfdae970adf9be85a2eb", "d890f76eed51ff1f08b2e08c13123b5b59b92db93874c3a1774c22589ccdfbfa", "8b9a63cd3ff092638e11ed29b542cce0b5098f2f3ef965f5c0b4c18cae90bc69", "1abf154ec1d34306d97189ea9af96a6a33c4bdc597cbc14897b89decbf38661e", "07ca0710e82029b6385832a4b546e0336c587b8ea9280fc384afb611d80ab7ba", "b043f239fe9bb9e78e4102b7ed49d35beea61ed7d677eb53cdbcbbc2783b4079", "1b50849c36638c9afe17cb095d4bc978d8883404b1c58cd3acf2ed09f188c602", "51853c7a3fa6b70dde4f16610ab43241a89ebd3bcd0c473606833551358a8f7c", "350b1984c35d9d48f6f0dbf97e33e76edabb0125538b52927182ea00a4736021", "126ea9840493f9ecbdb8cf04327f0ab8c9315a7420772b2bfd263fd16d1e28b1", "b0190ad8ed68d4f8f91a54240ac7e2205be58b5f8ac5f23e8ffd280c3e554c96", "47e46e9f19a2088625dcb5a1a5c6210f3b4f30e748ba23c6391f314fea4f5bcb", "82b39c4162a1e38739942ca62fb80aa1de7f9a833c0de58d67796a243175b917", "d0cd963ed709c3573789cd8e4c35ae28692db1a6c99f7b38aceb7411a4f7be98", "37a325b033d3f6f1d56f27dd4c5169301f7eba32e8f4c8a8349cc7ece87ddd9d", "51c95c79b6819aa2efb727fd29cd73368488e828fbde2b64af4576e79bf242f6", "3bc469e4ad8a997d246006f09febb05acfe065db25c4a33c8f2437b0dfef0878", "b58599ffd76d2147235706a200780a5ec6195e2a5c13d2b7b8d242b7c1958d16", "140462e616516eb56075d1ea6c01661c2f2638e471a28ccbfcb5d5cf94eb3e74", "9d1cd56f1a33c62840af5b75f5b1e3b0a1475db362a7b8999b8897c8defe8579", "5adea11dd63543557d0f95028656284e482e894672342b664c2d483654c96271", "b01f5826fe1aaf8cfa9955d9f2d66fa2e896e8406117b87291a05c8c0b1510d2", "5974e67c55df5f4c6a0e3230d4322aa70ee9ff975a6e0c65b4fdcf6b84d4b31e", "329fdcec3d7c1e61b190fa5ab4c6d58cdce2441671c695470c95e00679390289", "a5f0189e64f96ef6e06f5208718ba903d1934eb7f0b85aa38fade6e45e1278bb", "0b4ab1c1a890edd1b714c390399293cf1e1d1deef68ee4ef005e3b68ff17ce6d", "5449ba71016c81101f874c61702fe7c472d50c2bff7c815028cc6c84d761aaf9", "d5d5fdd27c59b705652ba82caf7ec3ddd07d4e3168ec4006b3c21b431cf82971", "8a2e4c552b152c8b76cb7e07ea727f26c607b600bb382af4b9f066041156a7fe", "96a49a267355918ba085c665fefbcc6a53e29b35734ec8d1570cbbec61081154", "f040e21168602b67d8afba7aff7cf0aaa4acdb463aedab7a29fe2248f41582d3", "a83dc07b7b05d05954aebd19afb76ab9794e35d1f0bfeb0222f7434579a9fec8", "3259f7323e6e0a7ca95dbec594b4b7ce5f7350bd54ac97a3bdc35c333e1024f2", "c84287dce56c2837eb140485775c13645d3d7195bad44174497c1624e3d6bcf8", "5326aed27fbdb6a4e59a974bee60aa1ae71195aaa311bfeba212c152e0f56266", "8d83acde8c0c2606bbda85fef834620309546855d5917d6162a3f14683095b47", "cba4417044bed9ff8f494919f23661efce69821367fe850a837f7cdd64f5d814", "1bf83c9a48b54e8b4b095bee90f5bcc1ac8e8897b351d93205a64c133bc5bd7b", "0ebb774b03cadda941343d9b2bbf2e7075f049e6e309dd232cf44a36578935ad", "d193e2601554fb3d1fa0c638e147297a76e4a6ac2c02209bc65d7294dbf002e6", "a9b3ba41d99da589a8dd1dfd776d121e6d4ac4f1ee52d1cc3517d2226fc09ad9", "dd53cbd732125e3f22ef9fadd789685d10a49f88f21a6dd66c3790a4b7f2b85b", "6f827b1068f38167235778d893da3e6c7a949a6641fa5b0aa4a116449e7545ba", "80c4debfeb8d3433350b12856003a0378485b087a0e51d4a974ec88fe8b899b2", "addf88642352377a5d80a9f576e1ed7b8754c09aba6be508e2b8f3b1d7d9e042", "8c961cd106e03576e181925fa16dfda42302f96da8679ef61eb64c1a4742e5b6", "7c02dcbda0fa59f3e843836105151bc1a49a66e2a02fb5941595d23abdd376c6", "45da6f88684c89476755a45df16d1bb602fde60f95d8756311495bb53b441637", "3df1b14731bb4b7a070864eeade24fa37c3584475fe3cf199f41709710ac7f4a", "6638900a817ceda30dbfcc8931ab64d047b281c71ce9e7d203f8790fcea042b1", "b2378c5c9b4812924571836703eeae38364924c2c0430e0a671f2b3a8d338130", "f4825a9397baa4bf07ad69e8dc7e69c03a76c0d394160729542f1b46ff03f338", "50573280946a2c75b36064277f4bbb79875881c6f9f55dc834b0f408ce02be00", "3a6903db22957442e3bd81727d3038c69562403aa8584302f49c28e5f0f4f5ce", "081be91f15adc3c6591e317a188d524c1d16d01ba396508e5ed6a897c169e9a8", "84bddbda2880e71a37578cd427c7602c3580b6af74fe9640cdad994678ed6edc", "c1b6f2cf31192cf7a3643b57fa98ee056e0dd6c6f28eec65821f4fb5b6721971", "f9d11cea4b504a360c0d62c3d908d35f5742112588f2a9fa7eefb5d90c1383f5", "478adc2d34dce7af32071a0e2eedb8c7fb6ebb90bfa404f6ebe10776badf1fbd", "8d809a7afb8b0f327646e1efa6f00670642ba9dde2fa2569d67e5c11a2c822da", "0231b304c4325ac717cce997b2f33f885523062f931d812253035916abfb8e47", "f49b278ef762922930de0e7d4b8ada81b64d010539dbf5a2530e1f88c4a6ad29", "617f5ec465f421abd0e6291b6ca5f8e027f2d500b406d87b6056101bac98a1b4", "1081fddf73cb61f080a9fcef1d3ee2bdf466c3ed35876ee82482c1a49bdf2385", "25b819d32eb42de93e50bfbb656030051d7e4ff20d3c78e11506df28a64707ae", "97f38910f204943718d61a88cc539a3f281d540477b0fb2c7929aada1061a1aa", "bf46882478c2a7955093126c7072d7b7fe472967979de522c2c14739bbab7d07", "31a8a2038327e176933240df416d3035861e959eac4528560ff348347c716f27", "d827a95da4a08258897313e839a9613c62de031517db363580c29ccfccfaacc8", "b5de63a660dae61c272f9dc1e646da96eca8a62ef3764c2e3b0ae6b258532268", "60d8f10911e03d48eb7274864a09b19756096e0c28f5ca42a26c4f9b3b7fdc5c", "e5bfc9d179f5fb0810cacbed185cc2b2042b774b95dde4048e8c9b4b4043bd31", "c061bf4ed829c8a43e2c5aa336c67cb4e22635c8e15791cf67ab92e0efb73d30", "aea5b83e75a1dd4f705ef09097965dcd010806537361e228cbe275d783d03a6c", "61fab563337233435da3d3be1e8c0d2332edcbd5bb7085c931e5ed4de2f80ed4", "83044467ce97ee203e81fedac56db84ca469ecf40d278d6e18380db17a719cb6", "fc1dfdc26e01d3974267abd90281f512a6497cea25c198e79318c49a069987f6", "2190499382ade5b6211f7cb7ee8301140c25a8a1e9f95f78a253dd0cee72a9b3", "cdc317b64a7c7d6146d3e63d295b690cea5c8c5deed5e42b094361dcf2038614", "8496b471f706842289855bd5dad8e8ce5a45a0244a537407a62ae82bf28f283e", "dd68dde67735cf4fac77a75f658c01f30b3dd373b7443597c93cf1ee9e1c375c", "7d9fc45eb9727f3a1bc09abb274a904bd1c7c4a8b0ddc131a66d0c35fab12c6b", "d3212e0196e6716a17f83983cfd28a90d4ffd7e7aeb93659a85cc5585266d153", "529b13f078978955ed8c139326647f68298aad6515c978fd532d67814d68a819", "047170f4b389cb5ea020d89957aa1c263d00c7e5923c357fafe2a9539295a70a", "a78a5b14dcf7d45dc1147f12138a46aea7d74643f150947184121c4d8e83aacc", "5fc7cd475121963671bda69d4e83b5da3b915f94780f9b21ad11e14876e6a2ae", "ddf9d7f5b52966e8dc5643c2c7780ce8d5512b581859fad0f11d7862b9082a0e", "98d4c1b60953deba57b070f6686ad1e56dafabe4e0461ff823f7e4f1e2d68a6d", "da05a4b3332528d56f466d3eda964682bc31f90795155ad306960e85239d1570", "52da74b3f44371219361d635f8ec93f428b068aee1d49adfda3f1080b812c403", "03d5d11bb421694cf5829985b2d2ed69cdb66c59874e772f9133feba146e56fd", "95112eaea86e4518c06e90875d56fa96d2c2e1d279263b8aeb55e2ef609c0015", "7385b128fcd181847ccd65e61535a3b1e6c935085feb1f116d07b69f754797c9", "025829df5b0e89d33e50e4da9cbac3699faf423a17a01f82abb1dc5a4aeaf7bf", "b8d71572694b145ff3a891e14463c46bfc2a7f3ce66f4b72489dade529fede9c", "67106f52b3bebaf6148ca60c81bc8802050f299d8e3139a8045ef34a0ee8a83b", "c1e4c64335250f030a8dff08151d8631de4f1737973ece0a66ce5819a6bcdab9", "15ccfb66ee051bc937c87c622ffc726f5f6c9b2c83acf52ed0dc6c63d33e0764", "dabbdafa2406d76784fd51b3f5f4014f97e91a0293e96cac0d7252400793352d", "4c6fe6506950104f209a64e0975ced68826c9d6d5c604725c7cc38119741fe1e", "4c0da75b314859992796ac6fa932c9804e6cbc0372b8af03dc17ee487dd46a01", "126d57ea0faa1410e2bff97a97dee4bb95f931c65e424936a3c663136cf44b28", "7b2000fbbbcb50649b57f7de2fe8e0c2384c16839def35e4ca3b368306c737aa", "191a431907c471085ce9133b62f3ab70ad7ba440ac70790400981e68f46a3a34", "7c6b5159af1596f1b1116915f58686bf5943222da9e864f415626328ee0ae8f0", "c01fc7330f29cdc41647dc85b357fe1c734410628077db6c61f736f2288e91be", "c1c9811dc7c62642ae25fbecdbd276124bbb0b2b3ccde483d81831a092fe8940", "183760186863265934b5678d6701d33b02427f0260de63ab92620cdd0ea0a193", "91036fe1c4780fc9a73005bd4fd0e674d0fdd2c372c1ea036e03d89296322b08", "279f655e7eb78b83a915ebf71097429c2ce71ade9c0ef44f5342f7361dda1c1e", "5b9ea6fe50b0bc7338a425931d5587e7bf29ddc886f95a013dc265f9ad4e6a5f", "e58b9814df7395a036222c5154c090e1edb7413d786f744bc71d3a3e7d3ae51a", "72f05a38389a396e7e099943e7626432809e8fea44b2b59c7f5b1be6e544c477", "66efc642ef86130ae927b9a8211a7898a1a0d4633d800b069b8a435f38a87f2d", "57e2163c10bc4cf0291a22e157e30e2f3bd32774777d562d66b5a56785af16cd", "d8bb29af4ab87ee4c6a5f906da83b486b0cb68804d46520402560fd361f9c046", "08c384948e4a5437238b38307ef1433aad79196ccf3192061381fbe1cb2f95a6", "4961223a92ed9aac5200710c1fac16222cebf4f45d71f9bcf747772ebcc10624", "51749e1822fdbd6e3160abdeae195e281affc52170d4d350b3f205f742ed7b13", "14e8dc225152adf94b64a266a412317eb84fd518055718d4f8261e0fdf8a9826", "4c5dec521f84e603ac86babbe7763fe82125a9eaaa705d8cddd6eec95953a4b5", "8acce8dfac2236fafc944be02d072bfb63ddaea49045e31283d73ab38823fcb7", "12cddcbe68b1fabd5650ded7d323b80460ee122c96e3b58c8b5d29a17b917ec3", "d86759a0c43a2fde5e79adaaa167f9d05338aa8b2bc6fc5f9b1263164aa60343", "5267ab3dd6d646eb7bb1c04b9c23fa104287011a46714accc33f608d36d0f2e7", "d8d8d61f18ebffb56574b089b975016513abba64f68fe0da8c0f8d0a62e0416c", "b0f64d75d6754023267a8bd9dcdd975002ce1aea4d2e8103edf80ed391be3782", "b72d60462ce989b717868769b43678b933f239f977e22e2a0d61fa59721ee3a0", "be9a8aa7883625a2f43670b961827cb4d58edf21618af86e376abb6d743a54c0", "a233d9c85d895c54f9df1c93659ac3b1ad9f46458142a5310f40f11ee9bf6316", "75ee0e41d376721a8a59c7c9dd40282780a0ca863db78dee7a589cfc4c98b3e9", "8b34745c1c95a176ca7f21bd1350ab491763379a3ff99f60214003217f6a7118", "75e4c59a6469d9da7de866054c21689625786d6ced18cf6130aec6fd45766025"} + testnetHashes := []string{"fa857cb5d4b774e975d149a91dc47687ab6400301bba7fab1a70e82bd57ab33b", "57c87eeb449e976020fb60b3366b867ffc9d88ec5c0f10171af4c7c771462130", "1af58b777b8054a15f3e0c60ec1c0501bd7626003a4fabb2017e16f1f4f9b0aa", "155eb38e56a75c59863434446071a29df399e0b79a0f7627f3c0def08c0dee4a", "fc541a457aaacc00c4bbf2ddd296c212c7c7436a1b15fdf40971436f4679060b", "22461d010d68d2b67a7a3373782af7f75eb240a845c4b1fa1c399c48f7d3eaad", "e62881132d705937c2a0f88cd0e94f595e922e752f5a3225ecbb4e4f91f242e5", "61084954ebe8d12d9ac71a9ce32f2f72c5ab819ab3382215e0122b98ef98bf6d", "b65786186ff332a66cf502565101ed3fdf0a005d8ea847829a909cafc948cdb7", "6ec4b77f75ce5bd028a22f88049d856dbf83b34480f24eb13ed567de839e06f9", "c1db0ccc2f546863cda1e14da73d951e4fa4c788427f13500a1a7557709de271", "b8a6f83f59913bece208fbc481bdf8a0ac332433f8cb01a3c5c1b7ae377f2700", "9a8c588bb81d8c622b8c6d9073233c176440da4dce49433b56398c30239cfe8d", "a84205d415780ed3c0566f9f4578efeb6ec4ca51f8a93cc7f89a00ccce8dcb39", "c5d6591d91eef2ca446351e95dc4134438360c1b7389d975d636cbacba435280", "7be74dcff396c8abf98c6727659575a5b157c9ec98c6f1c9504732054f09aaf5", "a7dcad11df6d5778824decb3624953440a2e8f01036083c10adb36b4465ee14a", "ac6e904295a3d736e7f22ecb5698c1fd8964e3f0afc07ee2487e63ee606b9bbf", "d7c2cff7f8a08373b8aed134fe1fa80899ddaaa8dd7722fca9b2954228b25803", "580cf925b0d2ec1617e17f0be43402381d537e789bd5a08c3a681dcdcae2d731", "c71cde092dddca890f9f44567a651434a801119dfca6fa6a8ad6daa26ce4d6a2", "b010526b4edd19af408eae841184d97f1ad6e8955c4a6ac8240e32f75a26e5f9", "9278a4d8204e7b937c41c71b9f03c97c49203d4cc6e4e6d429be80ff1d11bf02", "d57366198709ee6be52ea72cb54cfb6282ddd6708e487839f74b93c06c9a994a", "1d17a3f34d23425ad6fa3b1f57cb1276d988c3064c727995cd6966af22323830", "660a0a66a46fae20c0a4f2b1a5f11c246ce39bc1338f641ea304cf2dc9bd0940", "e4562f14b6464d2ee4e92764b6126fab3b37b12c8b0ccb0cbc539a0f1d54318f", "3ed39df06d960213a978379790386ec1c6df288a524c9bc11dbc869d1133e86d", "f09abfcf424b6bcb7a54fc613828e5ff756b619c957c51457d833efbbfd9c601", "58b6fe973b269639c2a6dc768e1f1f328c3c1d098b6ded3511b1f8e3393f8344", "398fd65258285061025e5b53043496832acca2a6b61906046605df18767a9da3", "b933d1d819cdbeff8e3acf9cba0fe7b3e6db3bb582da027a0f1e432219bd6033", "99baada49d56352f2e221cf62116c70485a83c1174bcd50cf5ba62b35d1661a9", "19a47884389d1f995a37c7e2b19525d44a27a32a5df2c0b9c2954fe458655baf", "3820ea36958821d31b8f2eee80fc17e72dcf361f052c0399931ce979e9a10293", "3a3655c7bb4fb1814002b468d63f72c0626d4c7df4ceac28a68c970a3686712a", "bc181caec490ade2d715e7d0c82cb9ad3fd685dc962d8ffca00861d88f5366b4", "da92ccf74d37b40738c41222cee137c149889966c54d62d91472d2ea81be37f2", "e51d0d81a40598e0d6281d2bbc56a1d8c5aa3c8233f2bd9be2316ad6a24a2dc3", "6cb2e1aea92658471cc40ec0a4bfd64d8e76bc0b9bb5707306fe89d93158e7c4", "e4bb2e67f5ff721ecfac0df301bf3db9704d47a9d33c2f952be17dc23a113c45", "7d29bf4f9796573cf5274900ec667bced39cb0377409d281a2dbceaf99ec8fd9", "45b32bbc856daf25ad81206623f8a7fb53f0afbb488f72ffef4d8f0a9431e62b", "b5aca33f4af1f65d9e9e35035597b58896d99abd5b7954593ffc70c86a90c94d", "7a21bc1136bd1b288fb5be1fd43b39cdfeae9b424e3da274e241dbc1ac780d72", "95bd53bea9d44609b8b24ff5c30feb08c91d92f239632f8093fbb8f37a704112", "61551f4fb10bd3b97870af25c6c18d8582d6badef8e87e3c5297befd1331003a", "ba43a4bd43dcdf44ce163b58d35df3def39f2a2ed29cfcf76f3d7571827b8bc1", "329c277c2f0555d33e294377bf906c404a163ee653d0894661714a25b1d3c8fb", "6e143a6cf96b0b8eb695bd77b1e28f2a61f4dac8a47b3cf2b69d6737d8441242", "991bc0911f5914677f4ba476717a53b0b889b91cf178ae66c0625167f7ac0801", "541fb4e3a4fc928a017bdce01393ea8113b2236dafbb3809973f7b8352442d32", "9c9181ad53d6506666187974b6b9e3a9c0bee8d085d10cc79f50bfb4248ca129", "1cb89bb5668ac284574be9118a78d3fa5d674c84579c75d5596a47d2acce29f6", "116b1c4d1a8fef4cd852a8841b689fff4f1df3a0f5bbeb545942150f4b806646", "b54f3b2b235b816bda74453e228378fcf9b79a293534aac71dbfeb6b0ee1ecad", "9acb23972960f0b4c5d3c6b061a2a1c4af4f7a6d4a0cdd8ec7134ae7bd59f95d", "17f3d6c720bc5efd5ee8226d353d1b347828e621400a2a282a190f5b7bbdd0f0", "1838dc6001bb37cff89aa8675ec0ae8efdfd35c5dc8a793538c31d08df4b8232", "ad362ed3de8ac036d4a89d31282f26e10cb50fa900c6ad76f7ab06cb7155d234", "2bd6a5464607a39d0bcdd07e15d4752d1a52b644bf9a81d8d7e5f9cff0af30af", "44124bbba59755b9004d53c3e721820c40c1cc163b7639b4c1a03ce6955e292b", "f19520a13533371cea4cc20daeef421c31c0a88d4604e58b56ebef82288cdaaf", "c1796053a6e8847cf3d8a545670dd953d1273dd3d9a6e4df6e59e33950cc2890", "49aeb76ef737a04fe91c3a61dc8c7b87adad5978d8951f8d033ddeae6fa2b720", "bed2427fb70a9a9a576528569ccfd8fc86ab0ecd4ca7a932d5a8f39316f887a0", "a8da98fa12885b4165f7635906d9bc240c2eaa66079bf18f496dbecb68c7c49e", "cd7e523f67b5ab520d1c8972f78db9a8d283c66ccf000aa31cda8216fe2e508b", "1e29c627ce7b6402eb5115c59a48d561f4420c44748d7de2ed185142beab4a29", "5ddf101e94858f06934c6019eaa22b93d88eca16592720e9dcd982894ac27060", "c408705873fb0ab3fc4f5811e69ee20b0a1600f52bb4663e29362f4391601ebe", "ddf70a2c37e60622148124c22f8f0e96b4eba0af4d5b8b18015d574f33923a7e", "d6e1f406e0d96c486c1bcbb09768ff0e5577f18c97cdf2c3e86dda54b4007448", "656b861ba19271a6591c7468af61a9d29e331eccc9e526a3d25517d29bd69809", "24372783456ac149b4fd0dc41ee16d55500a3c433fc3b1bd3c1c45c8a93c89c5", "2bbbb4392ab7f1fd8a160a80163b69b5f8db16fdf97c2d8ee9e29df1d9ebd9fe", "cc9fd404792808740bdee891c8e93e3d41bfe56c2438396d1ca8a692dd5fb990", "38080ff661e3142133b82633be87af6db2d33f386d05f8439672a1984aa88d13", "22b7125bf763c17087306776783ab6d1c50084e8a7435b015207f99295aa1af9", "570c31b148e5f909873e8d2253401a64eace826993948cd2f3f4d03a798c6c54", "f0cb29da50bff805a3a1736dbe33ea139893534d0e25a98f354aa5f279adbc97", "cd6b07cee12ae00058b20a6d31173c934933e6339a00885554ccefde008b12e3", "323fa87c41960355883ada3b85bbc13303d8202761ea70d015841060c7f7fde7", "01c7c87db4a01af781695e2984e68b72f04a0f7859749bcfdcbee73466bf0990", "a79003be6397a1fac1d183ebd14d72f69cfd9ab310cd8f9cc9c3d835b05d7556", "50dcfbe053447768b56f6c3159cc6d37aa5791d87abfad32b2952e36de8a20c7", "21647bb0680b8b09b357a54518a50d6c4163d78889f26ef48bc93cfe43acb16d", "96dfe03bc8aa7dd74ef98b4cb7cad866c851b8fd145f4b5bdb54c7b799e58adf", "87037ff5508a2a31c62cbef1feb19f3ec22f44ade292e0a036e8a7d8ef3d13bd", "6e7336d4e63a744ae45cfd320ca237ba4b194d930bcbfbfde2d172616df367b8", "780126f3f77af11cac4a71371812160e436d50f09ee01eb312d6839b7dd4e3a3", "9373a2bdc426bc5bf3242c7f3ecc83a19f2cfc0772ecdeb846e423fc8ec40b5e", "0339e7901bccba1e3c8e05956536823b2b0e7189c66f5796b7602b63a8fd1ff9", "b213bb94b274991d4288a6405954059e99b4c4b891a74a1abcd83ea295331b18", "d0a7195ec0cd987709b4dc6416e0ed6fc9939054ecbf502da8c4c6a09836ed9c", "7b9c334b3aeb75a795f9d6c7c0ee01ab219f31860880eb3480921dcd2a057d2b", "9c4e722d126467603530d242fe91a19fa90ebd3a461ee38f36ef5eefa07e996c", "4306ac8ccd2ce6a880350f95c7d59635371ba3d78bb13353c5b7ff06f7c6fc40", "4b9360e2d86f20850d2c6ff222ed16c6a4252c00afad8d488c30c162b3a10da7", "927f20b9dcfbb80f4a6b5d6067a586835bdcb5f3e921ed87bec67fb5160181d1", "e620bc51fbeb8011f57324b0a7ae6f45c46050cd624887f0a50879880632fdaa", "ee7b749b81e86d46fa3e93b9aba29285bae38a91f175dbf7c619d05fcf91e857", "573d5039fa570ceb3fa136be73c432b49a19af00a7f109325b78160f7dc13db1", "9ab1936825e830d4eab7a945701528579f78a8d1702a76a774e7456ddd3a254e", "2b3538a6fed897c0143f51b82f7e9e1929cb698e7de8d88aa8b1d23cabd58fa9", "21e2f8ae0522da985262ccf8422d98d75068ccd448d15c4bfec9f793713c7644", "c02a276e24fbb64f5b35d4b6555d1d873095e076868cea8dcfdad9e606612f9b", "7756adb6b470c5126693a4de57c1d5b38afab4f7ffc4f982374e8466051bcfca", "f82cbe9343e63fa4bf486f8e4113f91abef7c994e6f7068b500942fede79f095", "782f9df4e3f669149a575922a7318d523b1ab8a5911a2b1c2850839d5762cf03", "89ef33e05604e28f762b3cdf2f20d876adcb104a87c2636c5facb61ec47d020c", "59e374462a0c7e32df5e087d4d250936ef54aa19ca824ebaa63b66406180719d", "11fc2b68e458f12e93398a453c5efac599691bd89d40c35e003dc594d87bf51d", "5f793edc159efab968da834bd44187fff951cec822ca1b8982b1f36d966956be", "da0d474d5e0ec5d0966e1986a5de3f085e0f491da67cdb43d52fdc9848b14314", "8d4eec56231819d18f3fb3ec6e6881b269c0ccb881eedecb5916d2b4ef82c6cf", "137e7ea7c47a724f8a4494a3e73e74f146282382935d64d25385dd720f537e98", "1a2a9c7707443c848897141a4f659fbd0b7fefa47365f2af43183777dcb4a8ef", "7747a6f738959e6d75f16fe6d0782b455258b9c93d0380a230722cd6ae11e0bb", "314e30caef6c7c09b2a85056610949febb6abbbf7702c5d6706cef658123d782", "9ab42848b175c62790b5aa4f256899bb609d05723d364b8d349160afadfd9f95", "853b07dda09eb155dcebbac23e2fa5d76c5f619f3cabfa5e25fd82706485bd25", "a2b0053632aafe21d4dff287c03c362cae2a1d3267cd87d82a7ba9a3795129c9", "7918541145cb2c5918b8fa20a31298a7bc9b8f43aebb69f046f78d070a7f22ef", "0827e91cf9ec4dbb95966d68cdeb90dc8399457f47922d1e53eb2972c87756ef", "6121dac0131fc1fe0f7652d6c2195141c0e6a9b7e5cb555647ec3bb2f90b912d", "134fae4eec772042a832efc19e2f3e449db962f3573c070f2920591c306967b1", "b9a716636f3d1dd47e61aa1216f55317230cf734e06c9f740552f2bbd6e8210a", "d5caa5c0bb57e75c78de5f6f132e19776b777dd205d37ff6c2179412caa32c40", "e11c15139b71e7078a664d430e115c631ac8cdd89a8f4b35e4bbbeb9ec85dc17", "cbff909b284e4b1858adff2a0cee75032a2b2411d805604dfe820e40e855d6b5", "5b4ce1b89dde6b8b5cbec1b454306b7f53a9dadcdbe5df429ea5a33635d989d3", "c06a55411e962e0bf9cc11c14e854be084906b374cc181868c29ebcab0b66775", "ad16c4f73055baa8c0c6f69e294019ea90e3e97ee90923c4478156e15180d19c", "76866d7b50747a469e9891c529b7a58a4b9082d113b7acbe2b46f6049a8d36c7", "df96c9eba4763a1c3a8a0d2eb14e57847ce679adeda80b04cb86ef4f40cf290a", "6421d33aed4529b00db819051abed4ae78f28778feab921177c24378d48b427b", "cb76cbf3c146f5890eef6a8e78349b9291b75d2ca3b947b027f52dab0acbcdd4", "cb9a9e1606d5d6cc59bce096733be7e6902d8c8de19d22cc0f5435ad4e719015", "d3b9005c6b93a657d8edd2312d4d59b8807ea7c509079dfd1e4a8cef3d6852ba", "ebc705fd3ee20a69c5e99b1bf063acff8c926eec9358a36294b8df0fdcd31eb5", "c99e64329e066cc19b2e9962bfa2eb474bb7f9bd1c797421878209c16ca85d80", "55a081aab8afb0cfe83873b812c4495a762bdfc866d74c038d64f73d26944db3", "d830b389b67743e2a2cec5d64af37ce1b991b2781bf2a3fb1e8283bc78e98495", "558d06ff221f4d6e5265465ef2928828a80b498f95d7b1853c4a93d842931ccd", "e967f7ac0177971566b44535eed88a5ffcd0b2ec09de03edbf817f8e110eaf5b", "404df2a8bcf278cae68d9a43b86ff9c2781461ccd227c20aa5e0c5b1db2c0cb1", "f8f5160a6d1e91a3cae676b1e8f8563da2e1cb92869df51c190f0d91f62c81b2", "30a23be3cb0e3feab447217745d537e6c5299f3a95172c234bb84de54169b694", "7c5e66106c5e7cb9e68cd6bec431acdb4b0c9394f2c000a60f0ec558b1667750", "be103be330df170331a747138325af15173704afb808abfd6fc5742c677de241", "d711f0d3914c1bee36324e055ade9058750f2b3d0206f516382702de8eda3757", "519658c8746832821044b074a40661ea1497ca50426888303d8eec43ae8b9d6a", "87cd56d2f6ff774a0c75b029c2a888df7b41319380336f3e4663fe5417229687", "2efe240e7018fd0443262223d286c04120199063f4ef194bdef9af0ab34fa4a8", "8c9a69c950bea4e4beecc286124bf44e2cc78614f767580d59dc22cf94bd23f6", "e7641851ddf32f8fa1937528a2c88a2ef512d45f0a7296c232df6584471ad7ba", "a5beb770e26085eb45a6c5e15acb5844fdda167261e92b20c87dc72c1e0d0a1d", "f54988150d2ba3327251b7a4672ec9bff6fe93f06a7a9f19030f17e693281f11", "6cbfa48ae32ef9b3798f0afe4b86798497a758735dc3ac3e0aa6b42710476f58", "35130215ec7db0e57d5964dacb9aa2ea858e70fc864edd08cf062334823a3ce8", "a935e9ddece310c12baa815a0077e151b300a293f88651d7715ea33151d4016e", "167e10bb4d35aa27a4916de2f846ff5d323a0090c9d37b9c35ca455272ab07be", "a85a1222927f535ca37587d38ab4db2bc940bfc0c6d703003119329d05469a75", "826ab7e279754c009dcd86421f3bdaaa3325bdfff8352788c9f8cfbdddfcfafe", "8336015f3f6ca5d69d5af6dfd521a3e3c024c08121bd42de3a25e5bffb417d42", "194125cbe3f428afbf59da1dd144062ad288011e10beca10ca534f935ea7290d", "3bb36e7a0165d3b6f51b628c18e6b4d9e355b05c5be7a616c881dc395c623c66", "c092f7add11cee0facec22c78badba46fb8688538df1443b7356ceea83bae10d", "31552a2bb308a5778e815fee39b007ce5a633d2e7ba27f08eee2bec6f8d387b7", "373533933e0aae2d2dcffb59b09c49fa64506606aa0359eddf00326ee7bbcc7e", "0f580299cabe89b2dc9809735d14fdabf60cc1b65824bb5f6b5cf283b68210ee", "138c02ce7b36a4d7e82f942a3291bbb357b2e8845b579189ce4c35e01e6b859f", "9dc184037f271c4043b1a6d01d9fbed5d2f156fb561ec2612e5b1cd6aa486083", "cb2ae942cd73059bfe666d9ef78cee5a557cda842c9503df0f7d6b00be815cc5", "f941433597eaa923318023f040798918f743db7bf6d33bc6a13bc8c2e8d3e711", "02a1f2c523e2705b1ab122a06c08bd64080ef76d09d517c56c4e64a3f6626021", "ac3dda90e10c66d26ebb6911924713785f48e8e3d2150aa06ae90db456e1c9a0", "61a39a58e915f953d1ea5c0483f3f45b33ed6f097d76ea6d03d7cf81616f33bd", "b3ff677201fa7543da2f635753305a128c4076409268f1ee53ee824989193e90", "3a2cf44822616731ce40cde80365738e4a4d9af161de3cc2bb3e4f4d3ced8009", "b1a3f23c441a6afece152c4b2e1f1da6fc952f997bc8711a6122e26afafeb5b1", "87123bd9968d64fead15b346ad4ef3b0918aebc596fb7ce8c016c09085985bbd", "eae98597fc685154c882a62073157e1538e37270573de17e7f9bd1af724e1164", "e6dc4cfe6c4b77ebc2a915a49157447a65f85c275ba6c888fddbfae95a2d1c2b", "45ffffa2166eff3624a6b83e5d953669e3639188556330a58656d51ac9008f15", "2b5658b7d00f6d34890e71cf1d57b520e934f6b4087cca5c50604a7c8190488d", "d9b516ec359cccafc8cd2c5721bed137cb0d4b7bb21ba4772baed786a9f059a6", "5005e282fff3675ff3ac18906d5cf9df5b992d0bd95fc9cd3258f386f1c5b5ea", "2dea763455c4ae2c662bd9db6529b85cfd397744cb3da1a639925b0fa2b048b4", "497399dc295066a487984ab67cbfec9bf3d65184bc424a7b96268f2c03e6557f", "8f87e5ab712b41e1bc6f74fd74bb8e96323f62f62bedb35ed578992ddbbd5f47", "a5504fbce2afcd7277b0bd94581050195607d5c6701cff8d8e25f05a2d50d81c", "205b534ee10a3633f87c8ab36590d114f516985470ef5851077ac5c95aa83f16", "0d2093c088c08840643f542a44d9e8c389694f03dc9c62a264445de5758e73c3", "b32c1de573b72b62ce6b77d628f758acbfe89ecaa17d3c4c94cad8dff45dd0c9", "6d75d744de2e5dd7ebd3fa47b22ca0d99d4255ee36b5e767567479e0134e0697", "d3228e2e8e5de7178f2afc4b6f86b13287469b55410a164397bc602a0e3bd2db", "5d0a5e9e280f90c7d1f69b69ff3b5bfb94bce299dde8799520fe92912afd2cff", "aafabddd3fe15559af9138aa113c2473fed25a41ee52877a05dc2f9b24416827", "00a9160b3ae08d4066e53992f3cc004b3f6bf3d840613d6e847fb16323ddb270", "1473078fe8d18a5e3f791064c1083783fdc19517a3f2af47777d8778bb2b2f89", "aa2b720f1b7fd016086641fa0c3a6f8133c5f7eb3e9a65cd01ad0b51e7c35719", "ecdd45371e9a284e97416f414d665afa0aec864277a03c333e785e4d6ba6d439", "66a7301e8f3d54360b15fc64610398888301a3caeb685dc71e0ec0fdd175937f", "bd156dc25f23d82eaef927957d4c8c883ec0c80de4c58310313764ccc701d281", "3e8aa53535920d5886779d30687c2350800e9c712c5c2414db463b9c99f3052e", "308a237dad23fa158e7590ce7c75e788ec3ae6be8f6972a867f2eb94f6417c96", "4b12d020e1df286f672fe5d2eac74d95f817d0bbb8bee15a7913ebd9c3a8014a", "303c6f66eaff75bf2145e3bcc343245bcbedb2df46af1fb1e8382473fd2ab402", "d21e974892bd9209a0e2333b22acb55ec2a4abc015755379640cb81d4ba38d82", "40bdb0c10ce735f5e6abf18bf46dd8ef5625ea828fbfc6e380b70809d7cf76dd", "c0b4d28f557f71bcc41eb3573e2afb6da0c127639972bbcb8f4962cff0896f7a", "d2e36f3773f4c313fafb160ac753f1a11b53783920d45552b693f7a37b80bbe2", "3fd160ad0045137801256a22fed09f5f31aacf31f1681fbf6d70bc03972d2253", "2c0c05796774bdbb27c0a6ec5559817b4cd48feee80dff2c540257f86733e397", "5f17ad7ebf06c9ee5f7c86716e2392fd65b773eb6c94f47ac1ea1e12afbacfd0", "0dc16b207a0a9a722cd0b6ce18419eeb2c7809a9f90f3ebca7cc084d6714469d", "8f576a107b37c1309055282825effed4d57dd7e96fd69595ad300c26f77b07a5", "b433f6a339e84a5dbd8e6638a4547dd029b642d1007199948678d7574350b64e", "738768e552067738d3ba97fabe8ea93c0a6ba3b64cc24fab0e9b0c2ce4842982", "533020acb857afd489d4766280665cc484d184ed8eeaacd031e8a5e70b5c4a88", "1d84007a810cb751a5f7207b36cffb1a7f50c1553cbcb0c922c7cb1ada8bb409", "a0b398eb392174cfa24948edcf03c50553a7367c7f6ed50970456484ea09680b", "f156f642f5fd502eb9d0fff911981506c32e6c40b12362e6b3082dffb7fc6550", "2338e90aafd734d44bd50aad3f4d0f4255e2d2505546925e810798626c79f4f4", "c141f87ed878c297468d5be367ce8df0c7d90be4b6be070059eb9345f8250b62", "57106030bb89bd435844ef9baf318c9696af10784a4cf09359bff4b22a4d74eb", "2419aa33614ded3307173c53d6f614b6567d6f50fdc9a99fd32a299efc3de982", "07e60b9438f0b0fc97151c34b781b2a6370cb4d6c48ecbbfe0016a24ebe7bf31", "c09518a1b22c36e3d599af9f956090609fff015a794680a12f730364c721aae4", "65ddb5cf2927237525c5b3d3613eb346660cba60d0478ea917b6f0aa4907d7d9", "1c8935ede01448904447520b90c742615062e404f3525fe5bd667e06f7341c13", "fcb9e121eb526413ec8c827a3dda5e619a85ffbcb7508f0525ac22a121a100f5", "6d0a6422309f64d722ba79f621a4fe3db0ecf16b40366313b146a97d95667307", "4ad7e9d2a199b2eb3cc1cf7bb35e7b03a0ca18bd7382ed29a18b97ed01cd63a0", "69e377941f0263ce3c585789ae6106782d1f15db0b1942a9627b2bd6fe83e13d", "bb51ed5948d59b0dcb2f5cb5f8a27d3f70b8c71660b0d6d4ab658b6a7ca2356c", "6695e79e0e07fde8c05da60736ba373d55271d5a7c6da2a2c7d30e957a46e7e7", "48bd888c98b158b5c82b148f091a91bb1881b9a1931227f0a5269649a8eebaff", "771382cfa5138ccd32fdddad18e3eb8f1a06eee10704248d1e4d49f32872afe6", "176bb2e118aeb292912fa1903470621ae385e819a50c580301b33165666f3c7d", "15596f8c5f8fb397e5214e6f5eaf286a813b6e5d8bebae2bad1d550511f92840", "bdacbb2d763783f1ac51fd2477276543f79db13a434697a2aedd8523a1427e1f", "ca0e3b746890e8d626840d445989bb0e703f3e4c792aaa49a6b8952ea7696063", "1319af4c3801a463f0e1b7a9cfe2cfbb79e769fb0daed1a2868ade7665765ea6", "172f67582c5270cf0ef8264ef64bc5e17a53aac87693eff1860dfe56aea4209e", "0462589f719e853654d1ca00038dfc806ae7acb9bb5a3f9e6d458f3d4206f532", "f7480a6f46b553517f41238cbd5a6069eab164fd1512e1685f9bddf5c1afa59c", "a5cfdbe5c0b38b0904b5fe6afc2ce583dce1dbc7b4cd88224cbd88efa30b0291", "6f07b548ced6405ef78693332d516d041780f85f0771cfbaba8bbb86a6cdfb7d", "de0184abac150e780e26f1e7de09da64dfee433e8c9a9efe8d93a673350016b8", "8e7cee539c6315ad939a9495e40e7e70e2d07f6b2920cdbcc689457cd9e11997", "0088ccc025bf814e8098607bfbd17448024495a62610700b6000ec448afc1ca3", "d3a0503fdb8802e979871dca7d3c10a928cedf1978e44f42ecb72b96ada13dc3", "add0b405d079dd0c682a1e5026ef1a5b989b0bdf044d2db28249b4d51a74c5dc"} + + // Convert each string to a [32]byte + for i := 0; i < 256; i++ { + // Decode hex string to bytes + mainnetFixedHashes[i] = gethCommon.HexToHash(mainnetHashes[i]) + testnetFixedHashes[i] = gethCommon.HexToHash(testnetHashes[i]) + } + + fixedHashes = make(map[flow.ChainID][256]gethCommon.Hash) + fixedHashes[flow.Mainnet] = mainnetFixedHashes + fixedHashes[flow.Testnet] = testnetFixedHashes +} + +func init() { + generateFixedHashes() +} diff --git a/fvm/evm/offchain/blocks/block_proposal.go b/fvm/evm/offchain/blocks/block_proposal.go new file mode 100644 index 00000000000..cd1d68ed517 --- /dev/null +++ b/fvm/evm/offchain/blocks/block_proposal.go @@ -0,0 +1,34 @@ +package blocks + +import ( + "github.com/onflow/flow-go/fvm/evm/events" + "github.com/onflow/flow-go/fvm/evm/types" +) + +func ReconstructProposal( + blockEvent *events.BlockEventPayload, + results []*types.Result, +) *types.BlockProposal { + receipts := make([]types.LightReceipt, 0, len(results)) + txHashes := make(types.TransactionHashes, 0, len(results)) + + for _, result := range results { + receipts = append(receipts, *result.LightReceipt()) + txHashes = append(txHashes, result.TxHash) + } + + return &types.BlockProposal{ + Block: types.Block{ + ParentBlockHash: blockEvent.ParentBlockHash, + Height: blockEvent.Height, + Timestamp: blockEvent.Timestamp, + TotalSupply: blockEvent.TotalSupply.Big(), + ReceiptRoot: blockEvent.ReceiptRoot, + TransactionHashRoot: blockEvent.TransactionHashRoot, + TotalGasUsed: blockEvent.TotalGasUsed, + PrevRandao: blockEvent.PrevRandao, + }, + Receipts: receipts, + TxHashes: txHashes, + } +} diff --git a/fvm/evm/offchain/blocks/blocks.go b/fvm/evm/offchain/blocks/blocks.go new file mode 100644 index 00000000000..35b8c39638f --- /dev/null +++ b/fvm/evm/offchain/blocks/blocks.go @@ -0,0 +1,147 @@ +package blocks + +import ( + "fmt" + + gethCommon "github.com/ethereum/go-ethereum/common" + + "github.com/onflow/flow-go/fvm/evm/handler" + "github.com/onflow/flow-go/fvm/evm/types" + "github.com/onflow/flow-go/model/flow" +) + +const BlockStoreLatestBlockMetaKey = "LatestBlockMeta" + +// Blocks facilitates access to the recent block hash values +// and also the latest executed block meta data +type Blocks struct { + chainID flow.ChainID + storage types.BackendStorage + rootAddress flow.Address + bhl *handler.BlockHashList +} + +var _ types.BlockSnapshot = (*Blocks)(nil) + +// NewBlocks constructs a new blocks type +func NewBlocks( + chainID flow.ChainID, + rootAddress flow.Address, + storage types.BackendStorage, +) (*Blocks, error) { + var err error + blocks := &Blocks{ + chainID: chainID, + storage: storage, + rootAddress: rootAddress, + } + blocks.bhl, err = handler.NewBlockHashList( + storage, + rootAddress, + handler.BlockHashListCapacity, + ) + if err != nil { + return nil, err + } + // if empty insert genesis block hash + if blocks.bhl.IsEmpty() { + genesis := types.GenesisBlock(chainID) + err = blocks.PushBlockMeta( + NewMeta( + genesis.Height, + genesis.Timestamp, + genesis.PrevRandao, + )) + if err != nil { + return nil, err + } + // push block hash + err = blocks.PushBlockHash( + genesis.Height, + types.GenesisBlockHash(chainID)) + if err != nil { + return nil, err + } + } + return blocks, nil +} + +// PushBlock pushes a new block into the storage +func (b *Blocks) PushBlockMeta( + meta *Meta, +) error { + // check height order + if meta.Height > 0 { + bm, err := b.LatestBlock() + if err != nil { + return err + } + if meta.Height != bm.Height+1 { + return fmt.Errorf("out of order block meta push! got: %d, expected %d ", meta.Height, bm.Height+1) + } + } + return b.storeBlockMetaData(meta) +} + +// PushBlockHash pushes a new block block hash into the storage +func (b *Blocks) PushBlockHash( + height uint64, + hash gethCommon.Hash, +) error { + return b.bhl.Push(height, hash) +} + +func (b *Blocks) LatestBlock() (*Meta, error) { + return b.loadBlockMetaData() +} + +// BlockHash returns the block hash for the given height +func (b *Blocks) BlockHash(height uint64) (gethCommon.Hash, error) { + _, hash, err := b.bhl.BlockHashByHeight(height) + return hash, err +} + +// BlockContext constructs a block context for the latest block +func (b *Blocks) BlockContext() (types.BlockContext, error) { + bm, err := b.LatestBlock() + if err != nil { + return types.BlockContext{}, err + } + + return NewBlockContext( + b.chainID, + bm.Height, + bm.Timestamp, + func(n uint64) gethCommon.Hash { + hash, err := b.BlockHash(n) + if err != nil { + panic(err) + } + return hash + }, + bm.Random, + nil, + ) +} + +// storeBlockMetaData stores the block meta data into storage +func (b *Blocks) storeBlockMetaData(bm *Meta) error { + // store the encoded data into backend + return b.storage.SetValue( + b.rootAddress[:], + []byte(BlockStoreLatestBlockMetaKey), + bm.Encode(), + ) +} + +// loadBlockMetaData loads the block meta data from the storage +func (b *Blocks) loadBlockMetaData() (*Meta, error) { + data, err := b.storage.GetValue( + b.rootAddress[:], + []byte(BlockStoreLatestBlockMetaKey), + ) + if err != nil { + return nil, err + } + return MetaFromEncoded(data) +} diff --git a/fvm/evm/offchain/blocks/blocks_test.go b/fvm/evm/offchain/blocks/blocks_test.go new file mode 100644 index 00000000000..a5268ca66b6 --- /dev/null +++ b/fvm/evm/offchain/blocks/blocks_test.go @@ -0,0 +1,55 @@ +package blocks_test + +import ( + "testing" + + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/fvm/evm/offchain/blocks" + "github.com/onflow/flow-go/fvm/evm/testutils" + "github.com/onflow/flow-go/fvm/evm/types" + "github.com/onflow/flow-go/model/flow" +) + +func TestBlocks(t *testing.T) { + storage := testutils.GetSimpleValueStore() + chainID := flow.Emulator.Chain().ChainID() + rootAddr := flow.Address{1, 2, 3, 4} + blks, err := blocks.NewBlocks(chainID, rootAddr, storage) + require.NoError(t, err) + + // no insertion - genesis block + bm, err := blks.LatestBlock() + require.NoError(t, err) + genesis := types.GenesisBlock(chainID) + require.Equal(t, genesis.Height, bm.Height) + require.Equal(t, genesis.Timestamp, bm.Timestamp) + require.Equal(t, genesis.PrevRandao, bm.Random) + + h, err := blks.BlockHash(0) + require.NoError(t, err) + expectedHash, err := genesis.Hash() + require.NoError(t, err) + require.Equal(t, expectedHash, h) + + // push next block + height := uint64(1) + timestamp := uint64(2) + random := testutils.RandomCommonHash(t) + hash := testutils.RandomCommonHash(t) + + err = blks.PushBlockMeta(blocks.NewMeta(height, timestamp, random)) + require.NoError(t, err) + err = blks.PushBlockHash(height, hash) + require.NoError(t, err) + + // check values + h, err = blks.BlockHash(1) + require.NoError(t, err) + require.Equal(t, hash, h) + bm, err = blks.LatestBlock() + require.NoError(t, err) + require.Equal(t, height, bm.Height) + require.Equal(t, timestamp, bm.Timestamp) + require.Equal(t, random, bm.Random) +} diff --git a/fvm/evm/offchain/blocks/meta.go b/fvm/evm/offchain/blocks/meta.go new file mode 100644 index 00000000000..9af31333a17 --- /dev/null +++ b/fvm/evm/offchain/blocks/meta.go @@ -0,0 +1,81 @@ +package blocks + +import ( + "encoding/binary" + "fmt" + + gethCommon "github.com/ethereum/go-ethereum/common" +) + +const ( + heightEncodingSize = 8 + timestampEncodingSize = 8 + randomEncodingSize = 32 + metaEncodingSize = heightEncodingSize + + timestampEncodingSize + + randomEncodingSize +) + +// Meta holds meta data about a block +type Meta struct { + Height uint64 + Timestamp uint64 + Random gethCommon.Hash +} + +// NewBlockMeta constructs a new block meta +func NewMeta( + height uint64, + timestamp uint64, + random gethCommon.Hash, +) *Meta { + return &Meta{ + Height: height, + Timestamp: timestamp, + Random: random, + } +} + +// Encode encodes a meta +func (bm *Meta) Encode() []byte { + // encode meta data + buffer := make([]byte, metaEncodingSize) + pos := 0 + + // encode height + binary.BigEndian.PutUint64(buffer[pos:], uint64(bm.Height)) + pos += heightEncodingSize + + // encode timestamp + binary.BigEndian.PutUint64(buffer[pos:], uint64(bm.Timestamp)) + pos += timestampEncodingSize + + // encode random + copy(buffer[pos:pos+randomEncodingSize], bm.Random[:]) + + return buffer +} + +// MetaFromEncoded constructs a Meta from encoded data +func MetaFromEncoded(data []byte) (*Meta, error) { + // check the data size + if len(data) < metaEncodingSize { + return nil, fmt.Errorf("encoded input too short: %d < %d", len(data), metaEncodingSize) + } + + bm := &Meta{} + + pos := 0 + // decode height + bm.Height = binary.BigEndian.Uint64(data[pos:]) + pos += heightEncodingSize + + // decode timestamp + bm.Timestamp = binary.BigEndian.Uint64(data[pos:]) + pos += timestampEncodingSize + + // decode random + bm.Random = gethCommon.BytesToHash(data[pos:]) + + return bm, nil +} diff --git a/fvm/evm/offchain/blocks/meta_test.go b/fvm/evm/offchain/blocks/meta_test.go new file mode 100644 index 00000000000..a777200533d --- /dev/null +++ b/fvm/evm/offchain/blocks/meta_test.go @@ -0,0 +1,18 @@ +package blocks_test + +import ( + "testing" + + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/fvm/evm/offchain/blocks" + "github.com/onflow/flow-go/fvm/evm/testutils" +) + +func TestBlockMetaEncodingDecoding(t *testing.T) { + bm := blocks.NewMeta(1, 2, testutils.RandomCommonHash(t)) + + ret, err := blocks.MetaFromEncoded(bm.Encode()) + require.NoError(t, err) + require.Equal(t, ret, bm) +} diff --git a/fvm/evm/offchain/blocks/provider.go b/fvm/evm/offchain/blocks/provider.go new file mode 100644 index 00000000000..b9da39bd468 --- /dev/null +++ b/fvm/evm/offchain/blocks/provider.go @@ -0,0 +1,118 @@ +package blocks + +import ( + "fmt" + + "github.com/onflow/flow-go/fvm/evm/events" + "github.com/onflow/flow-go/fvm/evm/handler" + "github.com/onflow/flow-go/fvm/evm/types" + "github.com/onflow/flow-go/model/flow" +) + +// BasicProvider implements a ledger-backed basic block snapshot provider +// it assumes sequential progress on blocks and expects a +// a OnBlockReceived call before block execution and +// a follow up OnBlockExecuted call after block execution. +type BasicProvider struct { + chainID flow.ChainID + blks *Blocks + rootAddr flow.Address + storage types.BackendStorage + latestBlockPayload *events.BlockEventPayload +} + +var _ types.BlockSnapshotProvider = (*BasicProvider)(nil) + +func NewBasicProvider( + chainID flow.ChainID, + storage types.BackendStorage, + rootAddr flow.Address, +) (*BasicProvider, error) { + blks, err := NewBlocks(chainID, rootAddr, storage) + if err != nil { + return nil, err + } + return &BasicProvider{ + chainID: chainID, + blks: blks, + rootAddr: rootAddr, + storage: storage, + }, nil +} + +// GetSnapshotAt returns a block snapshot at the given height +// Snapshot at a height is not available until `OnBlockReceived` is called for that height. +func (p *BasicProvider) GetSnapshotAt(height uint64) ( + types.BlockSnapshot, + error, +) { + if p.latestBlockPayload.Height != height { + return nil, fmt.Errorf("active block height doesn't match expected: %d, got: %d", p.latestBlockPayload.Height, height) + } + return p.blks, nil +} + +// OnBlockReceived should be called before executing blocks. +func (p *BasicProvider) OnBlockReceived(blockEvent *events.BlockEventPayload) error { + p.latestBlockPayload = blockEvent + // push the new block meta + // it should be done before execution so block context creation + // can be done properly + return p.blks.PushBlockMeta( + NewMeta( + blockEvent.Height, + blockEvent.Timestamp, + blockEvent.PrevRandao, + ), + ) +} + +// OnBlockExecuted should be called after executing blocks. +func (p *BasicProvider) OnBlockExecuted( + height uint64, + resCol types.ReplayResultCollector, + blockProposal *types.BlockProposal, +) error { + // we push the block hash after execution, so the behaviour of the blockhash is + // identical to the evm.handler. + if p.latestBlockPayload.Height != height { + return fmt.Errorf("active block height doesn't match expected: %d, got: %d", p.latestBlockPayload.Height, height) + } + + blockBytes, err := blockProposal.Block.ToBytes() + if err != nil { + return types.NewFatalError(err) + } + + // do the same as handler.CommitBlockProposal + err = p.storage.SetValue( + p.rootAddr[:], + []byte(handler.BlockStoreLatestBlockKey), + blockBytes, + ) + if err != nil { + return err + } + + blockProposalBytes, err := blockProposal.ToBytes() + if err != nil { + return types.NewFatalError(err) + } + + hash := p.latestBlockPayload.Hash + // update block proposal + err = p.storage.SetValue( + p.rootAddr[:], + []byte(handler.BlockStoreLatestBlockProposalKey), + blockProposalBytes, + ) + if err != nil { + return err + } + + // update block hash list + return p.blks.PushBlockHash( + p.latestBlockPayload.Height, + hash, + ) +} diff --git a/fvm/evm/offchain/query/view.go b/fvm/evm/offchain/query/view.go new file mode 100644 index 00000000000..24fff307f6f --- /dev/null +++ b/fvm/evm/offchain/query/view.go @@ -0,0 +1,387 @@ +package query + +import ( + "errors" + "fmt" + "math/big" + + gethCommon "github.com/ethereum/go-ethereum/common" + gethTypes "github.com/ethereum/go-ethereum/core/types" + gethCrypto "github.com/ethereum/go-ethereum/crypto" + gethTracers "github.com/ethereum/go-ethereum/eth/tracers" + "github.com/holiman/uint256" + + "github.com/onflow/flow-go/fvm/evm/emulator" + "github.com/onflow/flow-go/fvm/evm/emulator/state" + "github.com/onflow/flow-go/fvm/evm/offchain/storage" + "github.com/onflow/flow-go/fvm/evm/types" + "github.com/onflow/flow-go/model/flow" +) + +// View provides query capabilities over +// an specific state of the EVM chain. +type View struct { + chainID flow.ChainID + rootAddr flow.Address + storage *storage.EphemeralStorage + blockSnapshot types.BlockSnapshot + tracer *gethTracers.Tracer + extraPCs []types.PrecompiledContract + maxCallGasLimit uint64 +} + +// NewView constructs a new view. +func NewView( + chainID flow.ChainID, + rootAddr flow.Address, + storage *storage.EphemeralStorage, + blockSnapshot types.BlockSnapshot, + maxCallGasLimit uint64, +) *View { + return &View{ + chainID: chainID, + rootAddr: rootAddr, + storage: storage, + blockSnapshot: blockSnapshot, + maxCallGasLimit: maxCallGasLimit, + } +} + +// GetBalance returns the balance for the given address +// can be used for the `eth_getBalance` endpoint +func (v *View) GetBalance(addr gethCommon.Address) (*big.Int, error) { + bv, err := state.NewBaseView(v.storage, v.rootAddr) + if err != nil { + return nil, err + } + bal, err := bv.GetBalance(addr) + if err != nil { + return nil, err + } + return bal.ToBig(), nil +} + +// GetNonce returns the nonce for the given address +// can be used for the `eth_getTransactionCount` endpoint +func (v *View) GetNonce(addr gethCommon.Address) (uint64, error) { + bv, err := state.NewBaseView(v.storage, v.rootAddr) + if err != nil { + return 0, err + } + return bv.GetNonce(addr) +} + +// GetCode returns the code for the given address +// can be used for the `eth_getCode` endpoint +func (v *View) GetCode(addr gethCommon.Address) ([]byte, error) { + bv, err := state.NewBaseView(v.storage, v.rootAddr) + if err != nil { + return nil, err + } + return bv.GetCode(addr) +} + +// GetCodeHash returns the codehash for the given address +func (v *View) GetCodeHash(addr gethCommon.Address) (gethCommon.Hash, error) { + bv, err := state.NewBaseView(v.storage, v.rootAddr) + if err != nil { + return gethCommon.Hash{}, err + } + return bv.GetCodeHash(addr) +} + +// GetSlab returns the slab for the given address and key +// can be used for the `eth_getStorageAt` endpoint +func (v *View) GetSlab(addr gethCommon.Address, key gethCommon.Hash) (gethCommon.Hash, error) { + bv, err := state.NewBaseView(v.storage, v.rootAddr) + if err != nil { + return gethCommon.Hash{}, err + } + return bv.GetState(types.SlotAddress{ + Address: addr, + Key: key, + }) +} + +// DryCall runs a call off-chain and returns the results +// accepts override storage and precompiled call options +// as well as custom tracer. +func (v *View) DryCall( + from gethCommon.Address, + to gethCommon.Address, + data []byte, + value *big.Int, + gasLimit uint64, + opts ...DryCallOption, +) (*types.Result, error) { + + if gasLimit > v.maxCallGasLimit { + return nil, fmt.Errorf( + "gas limit is bigger than max gas limit allowed %d > %d", + gasLimit, v.maxCallGasLimit, + ) + } + + // apply all the options + for _, op := range opts { + err := op(v) + if err != nil { + return nil, err + } + } + + // create context + ctx, err := v.blockSnapshot.BlockContext() + if err != nil { + return nil, err + } + ctx.Tracer = v.tracer + ctx.ExtraPrecompiledContracts = v.extraPCs + + // create emulator + em := emulator.NewEmulator(v.storage, v.rootAddr) + + // create a new block view + bv, err := em.NewBlockView(ctx) + if err != nil { + return nil, err + } + + res, err := bv.DirectCall( + &types.DirectCall{ + From: types.NewAddress(from), + To: types.NewAddress(to), + Data: data, + Value: value, + GasLimit: gasLimit, + }, + ) + if err != nil { + return nil, err + } + + return res, nil +} + +// DryRunOption captures a options +// to be applied before the execution of a dry call. +type DryCallOption func(v *View) error + +// WithStateOverrideBalance constructs a dry call option +// that replaces the balance of an address before the execution a dry call. +func WithStateOverrideBalance( + addr gethCommon.Address, + balance *big.Int, +) DryCallOption { + return func(v *View) error { + baseView, err := state.NewBaseView(v.storage, v.rootAddr) + if err != nil { + return err + } + nonce, err := baseView.GetNonce(addr) + if err != nil { + return err + } + code, err := baseView.GetCode(addr) + if err != nil { + return err + } + codeHash, err := baseView.GetCodeHash(addr) + if err != nil { + return err + } + + convertedBalance, overflow := uint256.FromBig(balance) + if overflow { + return errors.New("balance too large") + } + + err = baseView.UpdateAccount(addr, convertedBalance, nonce, code, codeHash) + if err != nil { + return err + } + return baseView.Commit() + } +} + +// WithStateOverrideNonce constructs a dry call option +// that replaces the nonce of an address before the execution a dry call. +func WithStateOverrideNonce( + addr gethCommon.Address, + nonce uint64, +) DryCallOption { + return func(v *View) error { + baseView, err := state.NewBaseView(v.storage, v.rootAddr) + if err != nil { + return err + } + balance, err := baseView.GetBalance(addr) + if err != nil { + return err + } + code, err := baseView.GetCode(addr) + if err != nil { + return err + } + codeHash, err := baseView.GetCodeHash(addr) + if err != nil { + return err + } + err = baseView.UpdateAccount(addr, balance, nonce, code, codeHash) + if err != nil { + return err + } + return baseView.Commit() + } +} + +// WithStateOverrideCode constructs a dry call option +// that replaces the code of an address before the dry call. +func WithStateOverrideCode( + addr gethCommon.Address, + code []byte, +) DryCallOption { + return func(v *View) error { + baseView, err := state.NewBaseView(v.storage, v.rootAddr) + if err != nil { + return err + } + balance, err := baseView.GetBalance(addr) + if err != nil { + return err + } + nonce, err := baseView.GetNonce(addr) + if err != nil { + return err + } + codeHash := gethTypes.EmptyCodeHash + if len(code) > 0 { + codeHash = gethCrypto.Keccak256Hash(code) + } + err = baseView.UpdateAccount(addr, balance, nonce, code, codeHash) + if err != nil { + return err + } + return baseView.Commit() + } +} + +// WithStateOverrideState constructs a dry call option +// that overrides all slots in the account storage before executing the call. +func WithStateOverrideState( + addr gethCommon.Address, + slots map[gethCommon.Hash]gethCommon.Hash, +) DryCallOption { + return func(v *View) error { + baseView, err := state.NewBaseView(v.storage, v.rootAddr) + if err != nil { + return err + } + + // This forces the account of the slots to be created, otherwise we + // might add slots without its owner account being created. + if err := setupAccount(addr, baseView); err != nil { + return err + } + + // purge all the slots + if err = baseView.PurgeAllSlotsOfAnAccount(addr); err != nil { + return err + } + // no need to be sorted this is off-chain operation + for k, v := range slots { + err = baseView.UpdateSlot(types.SlotAddress{ + Address: addr, + Key: k, + }, v) + if err != nil { + return err + } + } + return baseView.Commit() + } +} + +// WithStateOverrideStateDiff constructs a dry call option +// that overrides slots of an account before executing the call. +func WithStateOverrideStateDiff( + addr gethCommon.Address, + slots map[gethCommon.Hash]gethCommon.Hash, +) DryCallOption { + return func(v *View) error { + baseView, err := state.NewBaseView(v.storage, v.rootAddr) + if err != nil { + return err + } + + // This forces the account of the slots to be created, otherwise we + // might add slots without its owner account being created. + if err := setupAccount(addr, baseView); err != nil { + return err + } + + // no need to be sorted this is off-chain operation + for k, v := range slots { + err = baseView.UpdateSlot(types.SlotAddress{ + Address: addr, + Key: k, + }, v) + if err != nil { + return err + } + } + return baseView.Commit() + } +} + +// WithTracer constructs a dry call option +// that allows running the dry call with the +// custom tracer. +func WithTracer( + tracer *gethTracers.Tracer, +) DryCallOption { + return func(v *View) error { + v.tracer = tracer + return nil + } +} + +// WithExtraPrecompiledContracts constructs a dry call option +// that allows adding the precompiled contracts +// while executing the dry-call. +// +// this method can be used with remote PC caller for cadence arch calls +func WithExtraPrecompiledContracts(pcs []types.PrecompiledContract) DryCallOption { + return func(v *View) error { + v.extraPCs = pcs + return nil + } +} + +// setupAccount updates an account's metadata. If the account does not exist, +// it will be created and initialized with the proper default values. +func setupAccount(addr gethCommon.Address, baseView *state.BaseView) error { + balance, err := baseView.GetBalance(addr) + if err != nil { + return err + } + nonce, err := baseView.GetNonce(addr) + if err != nil { + return err + } + code, err := baseView.GetCode(addr) + if err != nil { + return err + } + codeHash := gethTypes.EmptyCodeHash + if len(code) > 0 { + codeHash = gethCrypto.Keccak256Hash(code) + } + + err = baseView.UpdateAccount(addr, balance, nonce, code, codeHash) + if err != nil { + return err + } + + return nil +} diff --git a/fvm/evm/offchain/query/viewProvider.go b/fvm/evm/offchain/query/viewProvider.go new file mode 100644 index 00000000000..7ce0195e359 --- /dev/null +++ b/fvm/evm/offchain/query/viewProvider.go @@ -0,0 +1,61 @@ +package query + +import ( + "github.com/onflow/flow-go/fvm/evm/offchain/storage" + "github.com/onflow/flow-go/fvm/evm/types" + "github.com/onflow/flow-go/model/flow" +) + +// ViewProvider constructs views +// based on the requirements +type ViewProvider struct { + chainID flow.ChainID + rootAddr flow.Address + storageProvider types.StorageProvider + blockProvider types.BlockSnapshotProvider + maxCallGasLimit uint64 +} + +// NewViewProvider constructs a new ViewProvider +func NewViewProvider( + chainID flow.ChainID, + rootAddr flow.Address, + sp types.StorageProvider, + bp types.BlockSnapshotProvider, + maxCallGasLimit uint64, +) *ViewProvider { + return &ViewProvider{ + chainID: chainID, + storageProvider: sp, + blockProvider: bp, + rootAddr: rootAddr, + maxCallGasLimit: maxCallGasLimit, + } +} + +// GetBlockView returns the block view for the given height (at the end of a block!) +// The `GetSnapshotAt` function of `storageProvider`, will return +// the block state at its start, before any transaction executions. +// This is the intended functionality, when replaying & verifying blocks. +// However, when reading the state from a block, we are interested +// in its end state, after all transaction executions. +// That is why we fetch the block snapshot at the next height. +func (evp *ViewProvider) GetBlockView(height uint64) (*View, error) { + readOnly, err := evp.storageProvider.GetSnapshotAt(height + 1) + if err != nil { + return nil, err + } + blockSnapshot, err := evp.blockProvider.GetSnapshotAt(height) + if err != nil { + return nil, err + } + return &View{ + chainID: evp.chainID, + rootAddr: evp.rootAddr, + maxCallGasLimit: evp.maxCallGasLimit, + storage: storage.NewEphemeralStorage( + storage.NewReadOnlyStorage(readOnly), + ), + blockSnapshot: blockSnapshot, + }, nil +} diff --git a/fvm/evm/offchain/query/view_test.go b/fvm/evm/offchain/query/view_test.go new file mode 100644 index 00000000000..dc524203d24 --- /dev/null +++ b/fvm/evm/offchain/query/view_test.go @@ -0,0 +1,406 @@ +package query_test + +import ( + "bytes" + "math/big" + "testing" + + "github.com/stretchr/testify/require" + + "github.com/ethereum/go-ethereum/common" + + "github.com/onflow/flow-go/fvm/evm/handler" + "github.com/onflow/flow-go/fvm/evm/offchain/blocks" + "github.com/onflow/flow-go/fvm/evm/offchain/query" + "github.com/onflow/flow-go/fvm/evm/offchain/storage" + "github.com/onflow/flow-go/fvm/evm/precompiles" + . "github.com/onflow/flow-go/fvm/evm/testutils" + "github.com/onflow/flow-go/fvm/evm/testutils/contracts" + "github.com/onflow/flow-go/fvm/evm/types" + "github.com/onflow/flow-go/model/flow" +) + +func TestView(t *testing.T) { + + const chainID = flow.Emulator + RunWithTestBackend(t, func(backend *TestBackend) { + RunWithTestFlowEVMRootAddress(t, backend, func(rootAddr flow.Address) { + RunWithDeployedContract(t, + GetStorageTestContract(t), backend, rootAddr, func(testContract *TestContract) { + RunWithEOATestAccount(t, backend, rootAddr, func(testAccount *EOATestAccount) { + + h := SetupHandler(chainID, backend, rootAddr) + + blks, err := blocks.NewBlocks(chainID, rootAddr, backend) + require.NoError(t, err) + + maxCallGasLimit := uint64(5_000_000) + view := query.NewView( + chainID, + rootAddr, + storage.NewEphemeralStorage( + backend, + ), + blks, + maxCallGasLimit, + ) + + // test make balance query + expectedBalance := h.AccountByAddress(testAccount.Address(), false).Balance() + bal, err := view.GetBalance(testAccount.Address().ToCommon()) + require.NoError(t, err) + require.True(t, types.BalancesAreEqual(expectedBalance, bal)) + + // test make nonce query + expectedNonce := h.AccountByAddress(testAccount.Address(), false).Nonce() + nonce, err := view.GetNonce(testAccount.Address().ToCommon()) + require.NoError(t, err) + require.Equal(t, expectedNonce, nonce) + + // test make code query + expectedCode := h.AccountByAddress(testContract.DeployedAt, false).Code() + code, err := view.GetCode(testContract.DeployedAt.ToCommon()) + require.NoError(t, err) + require.True(t, bytes.Equal(expectedCode[:], code[:])) + + // test make code hash query + expectedCodeHash := h.AccountByAddress(testContract.DeployedAt, false).CodeHash() + codeHash, err := view.GetCodeHash(testContract.DeployedAt.ToCommon()) + require.NoError(t, err) + require.True(t, bytes.Equal(expectedCodeHash[:], codeHash[:])) + + // test dry call + // make call to test contract - set + expectedFlowHeight := uint64(3) + pc := &TestPrecompiledContract{ + RequiredGasFunc: func(input []byte) uint64 { + return 1 + }, + RunFunc: func(input []byte) ([]byte, error) { + output := make([]byte, 32) + err := precompiles.EncodeUint64(expectedFlowHeight, output, 0) + return output, err + }, + AddressFunc: func() types.Address { + // cadence arch address + return handler.NewAddressAllocator().AllocatePrecompileAddress(1) + }, + } + res, err := view.DryCall( + testAccount.Address().ToCommon(), + testContract.DeployedAt.ToCommon(), + testContract.MakeCallData(t, "verifyArchCallToFlowBlockHeight", expectedFlowHeight), + big.NewInt(0), + uint64(1_000_000), + query.WithExtraPrecompiledContracts( + []types.PrecompiledContract{pc}), + ) + require.NoError(t, err) + require.NoError(t, res.ValidationError) + require.NoError(t, res.VMError) + + // test dry call with balance state overrides + newBalance := big.NewInt(3000) + res, err = view.DryCall( + testAccount.Address().ToCommon(), + testContract.DeployedAt.ToCommon(), + testContract.MakeCallData(t, + "checkBalance", + testAccount.Address().ToCommon(), + newBalance, + ), + big.NewInt(0), + uint64(1_000_000), + query.WithStateOverrideBalance( + testAccount.Address().ToCommon(), + newBalance, + ), + ) + require.NoError(t, err) + require.NoError(t, res.ValidationError) + require.NoError(t, res.VMError) + + // test max gas limit + _, err = view.DryCall( + testAccount.Address().ToCommon(), + testContract.DeployedAt.ToCommon(), + testContract.MakeCallData(t, + "store", + big.NewInt(2), + ), + big.NewInt(0), + maxCallGasLimit+1, + ) + require.Error(t, err) + require.ErrorContains( + t, + err, + "gas limit is bigger than max gas limit allowed 5000001 > 5000000", + ) + }) + }) + }) + }) +} + +func TestViewStateOverrides(t *testing.T) { + + const chainID = flow.Emulator + RunWithTestBackend(t, func(backend *TestBackend) { + RunWithTestFlowEVMRootAddress(t, backend, func(rootAddr flow.Address) { + RunWithDeployedContract(t, + GetStorageTestContract(t), backend, rootAddr, func(testContract *TestContract) { + RunWithEOATestAccount(t, backend, rootAddr, func(testAccount *EOATestAccount) { + + t.Run("DryCall with WithStateOverrideState for existing account", func(t *testing.T) { + blks, err := blocks.NewBlocks(chainID, rootAddr, backend) + require.NoError(t, err) + + maxCallGasLimit := uint64(5_000_000) + view := query.NewView( + chainID, + rootAddr, + storage.NewEphemeralStorage( + backend, + ), + blks, + maxCallGasLimit, + ) + + newNumberValue := common.HexToHash("0x32") // 50 in hex + res, err := view.DryCall( + testAccount.Address().ToCommon(), + testContract.DeployedAt.ToCommon(), + testContract.MakeCallData(t, "retrieve"), + big.NewInt(0), + uint64(1_000_000), + query.WithStateOverrideState( + testContract.DeployedAt.ToCommon(), + map[common.Hash]common.Hash{ + {0x0}: newNumberValue, + }, + ), + ) + require.NoError(t, err) + require.NoError(t, res.ValidationError) + require.NoError(t, res.VMError) + require.Equal( + t, + newNumberValue, + common.BytesToHash(res.ReturnedData), + ) + }) + + t.Run("DryCall with WithStateOverrideStateDiff for existing account", func(t *testing.T) { + blks, err := blocks.NewBlocks(chainID, rootAddr, backend) + require.NoError(t, err) + + maxCallGasLimit := uint64(5_000_000) + view := query.NewView( + chainID, + rootAddr, + storage.NewEphemeralStorage( + backend, + ), + blks, + maxCallGasLimit, + ) + + newNumberValue := common.HexToHash("0x64") // 100 in hex + res, err := view.DryCall( + testAccount.Address().ToCommon(), + testContract.DeployedAt.ToCommon(), + testContract.MakeCallData(t, "retrieve"), + big.NewInt(0), + uint64(1_000_000), + query.WithStateOverrideStateDiff( + testContract.DeployedAt.ToCommon(), + map[common.Hash]common.Hash{ + {0x0}: newNumberValue, + }, + ), + ) + require.NoError(t, err) + require.NoError(t, res.ValidationError) + require.NoError(t, res.VMError) + require.Equal( + t, + newNumberValue, + common.BytesToHash(res.ReturnedData), + ) + }) + + t.Run("DryCall with WithStateOverrideState for non-existing account", func(t *testing.T) { + blks, err := blocks.NewBlocks(chainID, rootAddr, backend) + require.NoError(t, err) + + maxCallGasLimit := uint64(5_000_000) + view := query.NewView( + chainID, + rootAddr, + storage.NewEphemeralStorage( + backend, + ), + blks, + maxCallGasLimit, + ) + + newNumberValue := common.HexToHash("0x32") // 50 in hex + res, err := view.DryCall( + testAccount.Address().ToCommon(), + testContract.DeployedAt.ToCommon(), + testContract.MakeCallData(t, "retrieve"), + big.NewInt(0), + uint64(1_000_000), + query.WithStateOverrideState( + // this is a random address, without any code in it + common.HexToAddress("0xD370975A6257fE8CeF93101799D602D30838BAad"), + map[common.Hash]common.Hash{ + {0x0}: newNumberValue, + }, + ), + ) + require.NoError(t, err) + require.NoError(t, res.ValidationError) + require.NoError(t, res.VMError) + require.Equal( + t, + common.Hash{0x0}, + common.BytesToHash(res.ReturnedData), + ) + }) + + t.Run("DryCall with WithStateOverrideStateDiff for existing account", func(t *testing.T) { + blks, err := blocks.NewBlocks(chainID, rootAddr, backend) + require.NoError(t, err) + + maxCallGasLimit := uint64(5_000_000) + view := query.NewView( + chainID, + rootAddr, + storage.NewEphemeralStorage( + backend, + ), + blks, + maxCallGasLimit, + ) + + newNumberValue := common.HexToHash("0x64") // 100 in hex + res, err := view.DryCall( + testAccount.Address().ToCommon(), + testContract.DeployedAt.ToCommon(), + testContract.MakeCallData(t, "retrieve"), + big.NewInt(0), + uint64(1_000_000), + query.WithStateOverrideStateDiff( + // this is a random address, without any code in it + common.HexToAddress("0xCebE1e78Db8C757fe18E7EdfE5a3D99B5ca45c8d"), + map[common.Hash]common.Hash{ + {0x0}: newNumberValue, + }, + ), + ) + require.NoError(t, err) + require.NoError(t, res.ValidationError) + require.NoError(t, res.VMError) + require.Equal( + t, + common.Hash{0x0}, + common.BytesToHash(res.ReturnedData), + ) + }) + + t.Run("DryCall with WithStateOverrideCode and WithStateOverrideState for non-existing account", func(t *testing.T) { + blks, err := blocks.NewBlocks(chainID, rootAddr, backend) + require.NoError(t, err) + + maxCallGasLimit := uint64(5_000_000) + view := query.NewView( + chainID, + rootAddr, + storage.NewEphemeralStorage( + backend, + ), + blks, + maxCallGasLimit, + ) + + newContractAddress := common.HexToAddress("0xD370975A6257fE8CeF93101799D602D30838BAad") + + newNumberValue := common.HexToHash("0x32") // 50 in hex + res, err := view.DryCall( + testAccount.Address().ToCommon(), + newContractAddress, + testContract.MakeCallData(t, "retrieve"), + big.NewInt(0), + uint64(1_000_000), + query.WithStateOverrideCode( + newContractAddress, + contracts.TestContractBytes[17:], // we need the deployed byte-code + ), + query.WithStateOverrideState( + newContractAddress, + map[common.Hash]common.Hash{ + {0x0}: newNumberValue, + }, + ), + ) + require.NoError(t, err) + require.NoError(t, res.ValidationError) + require.NoError(t, res.VMError) + require.Equal( + t, + newNumberValue, + common.BytesToHash(res.ReturnedData), + ) + }) + + t.Run("DryCall with WithStateOverrideCode and WithStateOverrideStateDiff for non-existing account", func(t *testing.T) { + blks, err := blocks.NewBlocks(chainID, rootAddr, backend) + require.NoError(t, err) + + maxCallGasLimit := uint64(5_000_000) + view := query.NewView( + chainID, + rootAddr, + storage.NewEphemeralStorage( + backend, + ), + blks, + maxCallGasLimit, + ) + + newContractAddress := common.HexToAddress("0xD370975A6257fE8CeF93101799D602D30838BAad") + + newNumberValue := common.HexToHash("0x332") // 818 in hex + res, err := view.DryCall( + testAccount.Address().ToCommon(), + newContractAddress, + testContract.MakeCallData(t, "retrieve"), + big.NewInt(0), + uint64(1_000_000), + query.WithStateOverrideCode( + newContractAddress, + contracts.TestContractBytes[17:], // we need the deployed byte-code + ), + query.WithStateOverrideStateDiff( + newContractAddress, + map[common.Hash]common.Hash{ + {0x0}: newNumberValue, + }, + ), + ) + require.NoError(t, err) + require.NoError(t, res.ValidationError) + require.NoError(t, res.VMError) + require.Equal( + t, + newNumberValue, + common.BytesToHash(res.ReturnedData), + ) + }) + }) + }) + }) + }) +} diff --git a/fvm/evm/offchain/storage/ephemeral.go b/fvm/evm/offchain/storage/ephemeral.go new file mode 100644 index 00000000000..a1687460526 --- /dev/null +++ b/fvm/evm/offchain/storage/ephemeral.go @@ -0,0 +1,94 @@ +package storage + +import ( + "fmt" + + "github.com/onflow/atree" + + "github.com/onflow/flow-go/fvm/environment" + "github.com/onflow/flow-go/fvm/evm/types" + "github.com/onflow/flow-go/model/flow" +) + +// EphemeralStorage holds on to register changes instead of applying them directly to +// the provided backend storage. It can be used for dry running transaction/calls +// or batching updates for atomic operations. +type EphemeralStorage struct { + parent types.BackendStorage + deltas map[flow.RegisterID]flow.RegisterValue +} + +// NewEphemeralStorage constructs a new EphemeralStorage +func NewEphemeralStorage(parent types.BackendStorage) *EphemeralStorage { + return &EphemeralStorage{ + parent: parent, + deltas: make(map[flow.RegisterID]flow.RegisterValue), + } +} + +var _ types.BackendStorage = (*EphemeralStorage)(nil) + +var _ types.ReplayResultCollector = (*EphemeralStorage)(nil) + +// GetValue reads a register value +func (s *EphemeralStorage) GetValue(owner []byte, key []byte) ([]byte, error) { + // check delta first + ret, found := s.deltas[RegisterID(owner, key)] + if found { + return ret, nil + } + return s.parent.GetValue(owner, key) +} + +// SetValue sets a register value +func (s *EphemeralStorage) SetValue(owner, key, value []byte) error { + s.deltas[RegisterID(owner, key)] = value + return nil +} + +// ValueExists checks if a register exists +func (s *EphemeralStorage) ValueExists(owner []byte, key []byte) (bool, error) { + ret, err := s.GetValue(owner, key) + return len(ret) > 0, err +} + +// AllocateSlabIndex allocates an slab index based on the given owner +func (s *EphemeralStorage) AllocateSlabIndex(owner []byte) (atree.SlabIndex, error) { + statusBytes, err := s.GetValue(owner, []byte(flow.AccountStatusKey)) + if err != nil { + return atree.SlabIndex{}, err + } + if len(statusBytes) == 0 { + return atree.SlabIndex{}, fmt.Errorf("state for account not found") + } + + status, err := environment.AccountStatusFromBytes(statusBytes) + if err != nil { + return atree.SlabIndex{}, err + } + + // get and increment the index + index := status.SlabIndex() + newIndexBytes := index.Next() + + // update the storageIndex bytes + status.SetStorageIndex(newIndexBytes) + err = s.SetValue(owner, []byte(flow.AccountStatusKey), status.ToBytes()) + if err != nil { + return atree.SlabIndex{}, err + } + return index, nil +} + +// StorageRegisterUpdates returns a map of register updates +func (s *EphemeralStorage) StorageRegisterUpdates() map[flow.RegisterID]flow.RegisterValue { + return s.deltas +} + +// RegisterID creates a RegisterID from owner and key +func RegisterID(owner []byte, key []byte) flow.RegisterID { + return flow.RegisterID{ + Owner: string(owner), + Key: string(key), + } +} diff --git a/fvm/evm/offchain/storage/ephemeral_test.go b/fvm/evm/offchain/storage/ephemeral_test.go new file mode 100644 index 00000000000..52a029737a0 --- /dev/null +++ b/fvm/evm/offchain/storage/ephemeral_test.go @@ -0,0 +1,71 @@ +package storage_test + +import ( + "testing" + + "github.com/onflow/atree" + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/fvm/environment" + "github.com/onflow/flow-go/fvm/evm/offchain/storage" + "github.com/onflow/flow-go/fvm/evm/testutils" + "github.com/onflow/flow-go/model/flow" +) + +func TestEphemeralStorage(t *testing.T) { + + parent := testutils.GetSimpleValueStore() + // preset value + owner := []byte("owner") + key1 := []byte("key1") + value1 := []byte{1} + value2 := []byte{2} + err := parent.SetValue(owner, key1, value1) + require.NoError(t, err) + + s := storage.NewEphemeralStorage(parent) + ret, err := s.GetValue(owner, key1) + require.NoError(t, err) + require.Equal(t, value1, ret) + found, err := s.ValueExists(owner, key1) + require.NoError(t, err) + require.True(t, found) + + // test set value + err = s.SetValue(owner, key1, value2) + require.NoError(t, err) + ret, err = s.GetValue(owner, key1) + require.NoError(t, err) + require.Equal(t, value2, ret) + // the parent should still return the value1 + ret, err = parent.GetValue(owner, key1) + require.NoError(t, err) + require.Equal(t, value1, ret) + + // test allocate slab id + _, err = s.AllocateSlabIndex(owner) + require.Error(t, err) + + // setup account + err = s.SetValue(owner, []byte(flow.AccountStatusKey), environment.NewAccountStatus().ToBytes()) + require.NoError(t, err) + + sid, err := s.AllocateSlabIndex(owner) + require.NoError(t, err) + expected := atree.SlabIndex([8]byte{0, 0, 0, 0, 0, 0, 0, 1}) + require.Equal(t, expected, sid) + + sid, err = s.AllocateSlabIndex(owner) + require.NoError(t, err) + expected = atree.SlabIndex([8]byte{0, 0, 0, 0, 0, 0, 0, 2}) + require.Equal(t, expected, sid) + + // fetch delta + delta := s.StorageRegisterUpdates() + require.Len(t, delta, 2) + ret = delta[flow.RegisterID{ + Owner: string(owner), + Key: string(key1), + }] + require.Equal(t, value2, ret) +} diff --git a/fvm/evm/offchain/storage/readonly.go b/fvm/evm/offchain/storage/readonly.go new file mode 100644 index 00000000000..6c66e7c1e43 --- /dev/null +++ b/fvm/evm/offchain/storage/readonly.go @@ -0,0 +1,44 @@ +package storage + +import ( + "fmt" + + "github.com/onflow/atree" + + "github.com/onflow/flow-go/fvm/evm/types" +) + +// ReadOnlyStorage wraps an snapshot and only provides read functionality. +type ReadOnlyStorage struct { + snapshot types.BackendStorageSnapshot +} + +var _ types.BackendStorage = &ReadOnlyStorage{} + +// NewReadOnlyStorage constructs a new ReadOnlyStorage using the given snapshot +func NewReadOnlyStorage(snapshot types.BackendStorageSnapshot) *ReadOnlyStorage { + return &ReadOnlyStorage{ + snapshot, + } +} + +// GetValue reads a register value +func (s *ReadOnlyStorage) GetValue(owner []byte, key []byte) ([]byte, error) { + return s.snapshot.GetValue(owner, key) +} + +// SetValue returns an error if called +func (s *ReadOnlyStorage) SetValue(owner, key, value []byte) error { + return fmt.Errorf("unexpected call received for SetValue with owner: %x, key: %v, value: %x", owner, key, value) +} + +// ValueExists checks if a register exists +func (s *ReadOnlyStorage) ValueExists(owner []byte, key []byte) (bool, error) { + val, err := s.snapshot.GetValue(owner, key) + return len(val) > 0, err +} + +// AllocateSlabIndex returns an error if called +func (s *ReadOnlyStorage) AllocateSlabIndex(owner []byte) (atree.SlabIndex, error) { + return atree.SlabIndex{}, fmt.Errorf("unexpected call received for AllocateSlabIndex with owner: %x", owner) +} diff --git a/fvm/evm/offchain/storage/readonly_test.go b/fvm/evm/offchain/storage/readonly_test.go new file mode 100644 index 00000000000..6020a381156 --- /dev/null +++ b/fvm/evm/offchain/storage/readonly_test.go @@ -0,0 +1,36 @@ +package storage_test + +import ( + "testing" + + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/fvm/evm/offchain/storage" + "github.com/onflow/flow-go/fvm/evm/testutils" +) + +func TestReadOnlyStorage(t *testing.T) { + parent := testutils.GetSimpleValueStore() + + owner := []byte("owner") + key1 := []byte("key1") + value1 := []byte{1} + err := parent.SetValue(owner, key1, value1) + require.NoError(t, err) + + rs := storage.NewReadOnlyStorage(parent) + ret, err := rs.GetValue(owner, key1) + require.NoError(t, err) + require.Equal(t, value1, ret) + + found, err := rs.ValueExists(owner, key1) + require.NoError(t, err) + require.True(t, found) + + err = rs.SetValue(owner, key1, value1) + require.Error(t, err) + + _, err = rs.AllocateSlabIndex(owner) + require.Error(t, err) + +} diff --git a/fvm/evm/offchain/sync/replay.go b/fvm/evm/offchain/sync/replay.go new file mode 100644 index 00000000000..a9fe6b2f955 --- /dev/null +++ b/fvm/evm/offchain/sync/replay.go @@ -0,0 +1,196 @@ +package sync + +import ( + "bytes" + "fmt" + + gethTypes "github.com/ethereum/go-ethereum/core/types" + gethTracer "github.com/ethereum/go-ethereum/eth/tracers" + gethTrie "github.com/ethereum/go-ethereum/trie" + "github.com/onflow/atree" + + "github.com/onflow/flow-go/fvm/evm/emulator" + "github.com/onflow/flow-go/fvm/evm/events" + "github.com/onflow/flow-go/fvm/evm/precompiles" + "github.com/onflow/flow-go/fvm/evm/types" + "github.com/onflow/flow-go/model/flow" +) + +var emptyChecksum = [types.ChecksumLength]byte{0, 0, 0, 0} + +// ReplayBlockExecution re-executes transactions of a block using the +// events emitted when transactions where executed. +// it updates the state of the given ledger and uses the trace +func ReplayBlockExecution( + chainID flow.ChainID, + rootAddr flow.Address, + storage types.BackendStorage, + blockSnapshot types.BlockSnapshot, + tracer *gethTracer.Tracer, + transactionEvents []events.TransactionEventPayload, + blockEvent *events.BlockEventPayload, + validateResults bool, +) ([]*types.Result, error) { + // check the passed block event + if blockEvent == nil { + return nil, fmt.Errorf("nil block event has been passed") + } + + // create a base block context for all transactions + // tx related context values will be replaced during execution + ctx, err := blockSnapshot.BlockContext() + if err != nil { + return nil, err + } + // update the tracer + ctx.Tracer = tracer + + gasConsumedSoFar := uint64(0) + txHashes := make(types.TransactionHashes, len(transactionEvents)) + results := make([]*types.Result, 0, len(transactionEvents)) + for idx, tx := range transactionEvents { + result, err := replayTransactionExecution( + rootAddr, + ctx, + uint(idx), + gasConsumedSoFar, + storage, + &tx, + validateResults, + ) + if err != nil { + return nil, fmt.Errorf("transaction execution failed, txIndex: %d, err: %w", idx, err) + } + gasConsumedSoFar += tx.GasConsumed + txHashes[idx] = tx.Hash + + results = append(results, result) + } + + if validateResults { + // check transaction inclusion + txHashRoot := gethTypes.DeriveSha(txHashes, gethTrie.NewStackTrie(nil)) + if txHashRoot != blockEvent.TransactionHashRoot { + return nil, fmt.Errorf("transaction root hash doesn't match [%x] != [%x]", txHashRoot, blockEvent.TransactionHashRoot) + } + + // check total gas used + if blockEvent.TotalGasUsed != gasConsumedSoFar { + return nil, fmt.Errorf("total gas used doesn't match [%d] != [%d]", gasConsumedSoFar, blockEvent.TotalGasUsed) + } + // no need to check the receipt root hash given we have checked the logs and other + // values during tx execution. + } + + return results, nil +} + +func replayTransactionExecution( + rootAddr flow.Address, + ctx types.BlockContext, + txIndex uint, + gasUsedSoFar uint64, + ledger atree.Ledger, + txEvent *events.TransactionEventPayload, + validate bool, +) (*types.Result, error) { + + // create emulator + em := emulator.NewEmulator(ledger, rootAddr) + + // update block context with tx level info + ctx.TotalGasUsedSoFar = gasUsedSoFar + ctx.TxCountSoFar = txIndex + // populate precompiled calls + if len(txEvent.PrecompiledCalls) > 0 { + pcs, err := types.AggregatedPrecompileCallsFromEncoded(txEvent.PrecompiledCalls) + if err != nil { + return nil, fmt.Errorf("error decoding precompiled calls [%x]: %w", txEvent.Payload, err) + } + ctx.ExtraPrecompiledContracts = precompiles.AggregatedPrecompiledCallsToPrecompiledContracts(pcs) + } + + // create a new block view + bv, err := em.NewBlockView(ctx) + if err != nil { + return nil, err + } + + var res *types.Result + // check if the transaction payload is actually from a direct call, + // which is a special state transition in Flow EVM. + if txEvent.TransactionType == types.DirectCallTxType { + call, err := types.DirectCallFromEncoded(txEvent.Payload) + if err != nil { + return nil, fmt.Errorf("failed to RLP-decode direct call [%x]: %w", txEvent.Payload, err) + } + res, err = bv.DirectCall(call) + if err != nil { + return nil, fmt.Errorf("failed to execute direct call [%x]: %w", txEvent.Hash, err) + } + } else { + gethTx := &gethTypes.Transaction{} + if err := gethTx.UnmarshalBinary(txEvent.Payload); err != nil { + return nil, fmt.Errorf("failed to RLP-decode transaction [%x]: %w", txEvent.Payload, err) + } + res, err = bv.RunTransaction(gethTx) + if err != nil { + return nil, fmt.Errorf("failed to run transaction [%x]: %w", txEvent.Hash, err) + } + } + + // validate results + if validate { + if err := ValidateResult(res, txEvent); err != nil { + return nil, fmt.Errorf("transaction replay failed (txHash %x): %w", txEvent.Hash, err) + } + } + + return res, nil +} + +func ValidateResult( + res *types.Result, + txEvent *events.TransactionEventPayload, +) error { + + // we should never produce invalid transaction, since if the transaction was emitted from the evm core + // it must have either been successful or failed, invalid transactions are not emitted + if res.Invalid() { + return fmt.Errorf("invalid transaction: %w", res.ValidationError) + } + + // check gas consumed + if res.GasConsumed != txEvent.GasConsumed { + return fmt.Errorf("gas consumption mismatch %d != %d", res.GasConsumed, txEvent.GasConsumed) + } + + // check error code + txEventErrorCode := types.ErrorCode(txEvent.ErrorCode) + if errorCode := res.ResultSummary().ErrorCode; errorCode != txEventErrorCode { + return fmt.Errorf("error code mismatch %d != %d", errorCode, txEventErrorCode) + } + + // check encoded logs + encodedLogs, err := res.RLPEncodedLogs() + if err != nil { + return fmt.Errorf("failed to RLP-encode logs: %w", err) + } + if !bytes.Equal(encodedLogs, txEvent.Logs) { + return fmt.Errorf("encoded logs mismatch %x != %x", encodedLogs, txEvent.Logs) + } + + // check deployed address + if deployedAddress := res.DeployedContractAddressString(); deployedAddress != txEvent.ContractAddress { + return fmt.Errorf("deployed address mismatch %s != %s", deployedAddress, txEvent.ContractAddress) + } + + // check the state change checksum + // if empty checksum skip (supporting blocks before checksum integration) + if checksum := res.StateChangeChecksum(); txEvent.StateUpdateChecksum != emptyChecksum && + checksum != txEvent.StateUpdateChecksum { + return fmt.Errorf("state change checksum mismatch %x != %x", checksum, txEvent.StateUpdateChecksum) + } + + return nil +} diff --git a/fvm/evm/offchain/sync/replayer.go b/fvm/evm/offchain/sync/replayer.go new file mode 100644 index 00000000000..e7d33234739 --- /dev/null +++ b/fvm/evm/offchain/sync/replayer.go @@ -0,0 +1,104 @@ +package sync + +import ( + gethTracers "github.com/ethereum/go-ethereum/eth/tracers" + "github.com/rs/zerolog" + + "github.com/onflow/flow-go/fvm/evm/events" + "github.com/onflow/flow-go/fvm/evm/offchain/storage" + "github.com/onflow/flow-go/fvm/evm/types" + "github.com/onflow/flow-go/model/flow" +) + +// Replayer consumes EVM transaction and block +// events, re-execute EVM transaction and follows EVM chain. +// this allows using different tracers and storage solutions. +type Replayer struct { + chainID flow.ChainID + rootAddr flow.Address + logger zerolog.Logger + storageProvider types.StorageProvider + blockProvider types.BlockSnapshotProvider + tracer *gethTracers.Tracer + validateResults bool +} + +// NewReplayer constructs a new Replayer +func NewReplayer( + chainID flow.ChainID, + rootAddr flow.Address, + sp types.StorageProvider, + bp types.BlockSnapshotProvider, + logger zerolog.Logger, + tracer *gethTracers.Tracer, + validateResults bool, +) *Replayer { + return &Replayer{ + chainID: chainID, + rootAddr: rootAddr, + storageProvider: sp, + blockProvider: bp, + logger: logger, + tracer: tracer, + validateResults: validateResults, + } +} + +// ReplayBlock replays the execution of the transactions of an EVM block +func (cr *Replayer) ReplayBlock( + transactionEvents []events.TransactionEventPayload, + blockEvent *events.BlockEventPayload, +) (types.ReplayResultCollector, error) { + res, _, err := cr.ReplayBlockEvents(transactionEvents, blockEvent) + return res, err +} + +// ReplayBlockEvents replays the execution of the transactions of an EVM block +// using the provided transactionEvents and blockEvents, +// which include all the context data for re-executing the transactions, and returns +// the replay result and the result of each transaction. +// the replay result contains the register updates, and the result of each transaction +// contains the execution result of each transaction, which is useful for recontstructing +// the EVM block proposal. +// this method can be called concurrently if underlying storage +// tracer and block snapshot provider support concurrency. +// +// Warning! the list of transaction events has to be sorted based on their +// execution, sometimes the access node might return events out of order +// it needs to be sorted by txIndex and eventIndex respectively. +func (cr *Replayer) ReplayBlockEvents( + transactionEvents []events.TransactionEventPayload, + blockEvent *events.BlockEventPayload, +) (types.ReplayResultCollector, []*types.Result, error) { + // prepare storage + st, err := cr.storageProvider.GetSnapshotAt(blockEvent.Height) + if err != nil { + return nil, nil, err + } + + // create storage + state := storage.NewEphemeralStorage(storage.NewReadOnlyStorage(st)) + + // get block snapshot + bs, err := cr.blockProvider.GetSnapshotAt(blockEvent.Height) + if err != nil { + return nil, nil, err + } + + // replay transactions + results, err := ReplayBlockExecution( + cr.chainID, + cr.rootAddr, + state, + bs, + cr.tracer, + transactionEvents, + blockEvent, + cr.validateResults, + ) + if err != nil { + return nil, nil, err + } + + return state, results, nil +} diff --git a/fvm/evm/offchain/sync/replayer_test.go b/fvm/evm/offchain/sync/replayer_test.go new file mode 100644 index 00000000000..6144c3e3ee8 --- /dev/null +++ b/fvm/evm/offchain/sync/replayer_test.go @@ -0,0 +1,201 @@ +package sync_test + +import ( + "math" + "math/big" + "testing" + + gethCommon "github.com/ethereum/go-ethereum/common" + "github.com/rs/zerolog" + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/fvm/evm" + "github.com/onflow/flow-go/fvm/evm/events" + "github.com/onflow/flow-go/fvm/evm/offchain/blocks" + "github.com/onflow/flow-go/fvm/evm/offchain/storage" + "github.com/onflow/flow-go/fvm/evm/offchain/sync" + . "github.com/onflow/flow-go/fvm/evm/testutils" + "github.com/onflow/flow-go/fvm/evm/types" + "github.com/onflow/flow-go/model/flow" +) + +func TestChainReplay(t *testing.T) { + + const chainID = flow.Emulator + var snapshot *TestValueStore + RunWithTestBackend(t, func(backend *TestBackend) { + RunWithTestFlowEVMRootAddress(t, backend, func(rootAddr flow.Address) { + RunWithDeployedContract(t, + GetStorageTestContract(t), backend, rootAddr, func(testContract *TestContract) { + RunWithEOATestAccount(t, backend, rootAddr, func(testAccount *EOATestAccount) { + handler := SetupHandler(chainID, backend, rootAddr) + + // clone state before apply transactions + snapshot = backend.Clone() + gasFeeCollector := RandomAddress(t) + + totalTxCount := 0 + + // case: check sequential updates to a slot + for i := 0; i < 5; i++ { + tx := testAccount.PrepareSignAndEncodeTx(t, + testContract.DeployedAt.ToCommon(), + testContract.MakeCallData(t, "checkThenStore", big.NewInt(int64(i)), big.NewInt(int64(i+1))), + big.NewInt(0), + uint64(100_000), + big.NewInt(1), + ) + rs := handler.Run(tx, gasFeeCollector) + require.Equal(t, types.ErrorCode(0), rs.ErrorCode) + totalTxCount += 2 // one for tx, one for gas refund + } + + // case: add batch run BatchRun + batchSize := 4 + txBatch := make([][]byte, batchSize) + for i := 0; i < batchSize; i++ { + txBatch[i] = testAccount.PrepareSignAndEncodeTx(t, + testContract.DeployedAt.ToCommon(), + testContract.MakeCallData(t, "store", big.NewInt(int64(i))), + big.NewInt(0), + uint64(100_000), + big.NewInt(1), + ) + } + rss := handler.BatchRun(txBatch, gasFeeCollector) + for _, rs := range rss { + require.Equal(t, types.ErrorCode(0), rs.ErrorCode) + } + totalTxCount += batchSize + 1 // plus one for gas refund + + // case: fetching evm block number + tx := testAccount.PrepareSignAndEncodeTx(t, + testContract.DeployedAt.ToCommon(), + testContract.MakeCallData(t, "checkBlockNumber", big.NewInt(1)), + big.NewInt(0), + uint64(100_000), + big.NewInt(1), + ) + rs := handler.Run(tx, gasFeeCollector) + require.Equal(t, types.ErrorCode(0), rs.ErrorCode) + totalTxCount += 2 // one for tx, one for gas refund + + // case: making a call to the cadence arch + expectedFlowHeight := uint64(3) + tx = testAccount.PrepareSignAndEncodeTx(t, + testContract.DeployedAt.ToCommon(), + testContract.MakeCallData(t, "verifyArchCallToFlowBlockHeight", expectedFlowHeight), + big.NewInt(0), + uint64(2_000_000), + big.NewInt(1), + ) + rs = handler.Run(tx, gasFeeCollector) + require.Equal(t, types.ErrorCode(0), rs.ErrorCode) + totalTxCount += 2 // one for tx, one for gas refund + + // case: fetch evm block hash - last block + expected := types.GenesisBlockHash(chainID) + tx = testAccount.PrepareSignAndEncodeTx(t, + testContract.DeployedAt.ToCommon(), + testContract.MakeCallData(t, "checkBlockHash", big.NewInt(0), expected), + big.NewInt(0), + uint64(100_000), + big.NewInt(1), + ) + rs = handler.Run(tx, gasFeeCollector) + require.Equal(t, types.ErrorCode(0), rs.ErrorCode) + totalTxCount += 2 // one for tx, one for gas refund + + // case: fetch evm block hash - current block + expected = gethCommon.Hash{} + tx = testAccount.PrepareSignAndEncodeTx(t, + testContract.DeployedAt.ToCommon(), + testContract.MakeCallData(t, "checkBlockHash", big.NewInt(1), expected), + big.NewInt(0), + uint64(100_000), + big.NewInt(1), + ) + rs = handler.Run(tx, gasFeeCollector) + require.Equal(t, types.ErrorCode(0), rs.ErrorCode) + totalTxCount += 2 // one for tx, one for gas refund + + // case: coa operations + addr := handler.DeployCOA(100) + totalTxCount += 1 + + coa := handler.AccountByAddress(addr, true) + coa.Deposit(types.NewFlowTokenVault(types.MakeABalanceInFlow(10))) + totalTxCount += 1 + coa.Withdraw(types.NewBalance(types.MakeABalanceInFlow(4))) + totalTxCount += 1 + + expectedBalance := (*big.Int)(types.MakeABalanceInFlow(6)) + rs = coa.Call( + testContract.DeployedAt, + testContract.MakeCallData(t, "checkBalance", addr.ToCommon(), expectedBalance), + 100_000, + types.EmptyBalance, + ) + require.Equal(t, types.ErrorCode(0), rs.ErrorCode) + totalTxCount += 1 + + rs = coa.Deploy(testContract.ByteCode, math.MaxUint64, types.EmptyBalance) + require.Equal(t, types.ErrorCode(0), rs.ErrorCode) + totalTxCount += 1 + + // commit block + handler.CommitBlockProposal() + + // prepare events + txEventPayloads, blockEventPayload := prepareEvents(t, chainID, backend.Events()) + + // because we are doing direct calls, there is no extra + // events (e.g. COA created) events emitted. + require.Len(t, txEventPayloads, totalTxCount) + + // check replay + + bpStorage := storage.NewEphemeralStorage(snapshot) + bp, err := blocks.NewBasicProvider(chainID, bpStorage, rootAddr) + require.NoError(t, err) + + err = bp.OnBlockReceived(blockEventPayload) + require.NoError(t, err) + + sp := NewTestStorageProvider(snapshot, 1) + cr := sync.NewReplayer(chainID, rootAddr, sp, bp, zerolog.Logger{}, nil, true) + res, results, err := cr.ReplayBlockEvents(txEventPayloads, blockEventPayload) + require.NoError(t, err) + + require.Len(t, results, totalTxCount) + + proposal := blocks.ReconstructProposal(blockEventPayload, results) + + err = bp.OnBlockExecuted(blockEventPayload.Height, res, proposal) + require.NoError(t, err) + }) + }) + }) + }) +} + +func prepareEvents( + t *testing.T, + chainID flow.ChainID, + allEvents flow.EventsList) ( + []events.TransactionEventPayload, + *events.BlockEventPayload, +) { + evmContract := evm.ContractAccountAddress(chainID) + var blockEventPayload *events.BlockEventPayload + txEventPayloads := make([]events.TransactionEventPayload, len(allEvents)-1) + for i, event := range allEvents { + // last event is block event + if i == len(allEvents)-1 { + blockEventPayload = BlockEventToPayload(t, event, evmContract) + continue + } + txEventPayloads[i] = *TxEventToPayload(t, event, evmContract) + } + return txEventPayloads, blockEventPayload +} diff --git a/fvm/evm/offchain/utils/collection.go b/fvm/evm/offchain/utils/collection.go new file mode 100644 index 00000000000..143c2bd3eee --- /dev/null +++ b/fvm/evm/offchain/utils/collection.go @@ -0,0 +1,104 @@ +package utils + +import ( + "bufio" + "context" + "encoding/hex" + "encoding/json" + "fmt" + "math" + "os" + "path/filepath" + "sort" + + "github.com/onflow/flow-go-sdk/access/grpc" +) + +func CollectEventData(conf *Config, path string) error { + + flowClient, err := grpc.NewClient(conf.host) + if err != nil { + return err + } + outputFile := filepath.Join(path, "events.jsonl") + fi, err := os.Create(outputFile) + if err != nil { + return err + } + defer fi.Close() + + writer := bufio.NewWriter(fi) + defer writer.Flush() + + ctx := context.Background() + + txEventType := fmt.Sprintf("A.%s.EVM.TransactionExecuted", conf.evmContractAddress) + blockEventType := fmt.Sprintf("A.%s.EVM.BlockExecuted", conf.evmContractAddress) + + for height := conf.startHeight; height < conf.endHeight; height += conf.batchSize { + events := make([]Event, 0) + result, err := flowClient.GetEventsForHeightRange(ctx, txEventType, height, height+conf.batchSize-1) + if err != nil { + return err + } + if len(result) > 0 { + for _, tEvent := range result { + evs := tEvent.Events + for _, e := range evs { + events = append(events, Event{ + FlowBlockHeight: tEvent.Height, + EventType: e.Type, + EventPayload: hex.EncodeToString(e.Payload), + txIndex: e.TransactionIndex, + eventIndex: e.EventIndex, + }) + } + } + } + result, err = flowClient.GetEventsForHeightRange(ctx, blockEventType, height, height+conf.batchSize-1) + if err != nil { + return err + } + if len(result) > 0 { + for _, bEvent := range result { + evs := bEvent.Events + for _, e := range evs { + events = append(events, Event{ + FlowBlockHeight: bEvent.Height, + EventType: e.Type, + EventPayload: hex.EncodeToString(e.Payload), + // setting to max int to make sure it is order as the last event of the evm block + txIndex: math.MaxInt, + }) + } + } + } + + // sort events by flow height, tx index and then event index + sort.Slice(events, func(i, j int) bool { + if events[i].FlowBlockHeight == events[j].FlowBlockHeight { + if events[i].txIndex == events[j].txIndex { + return events[i].eventIndex < events[j].eventIndex + } + return events[i].txIndex < events[j].txIndex + } + return events[i].FlowBlockHeight < events[j].FlowBlockHeight + }) + + for _, ev := range events { + jsonData, err := json.Marshal(ev) + if err != nil { + return err + } + _, err = writer.WriteString(string(jsonData) + "\n") + if err != nil { + return err + } + err = writer.Flush() + if err != nil { + return err + } + } + } + return writer.Flush() +} diff --git a/fvm/evm/offchain/utils/collection_test.go b/fvm/evm/offchain/utils/collection_test.go new file mode 100644 index 00000000000..5dad9b86658 --- /dev/null +++ b/fvm/evm/offchain/utils/collection_test.go @@ -0,0 +1,225 @@ +package utils_test + +import ( + "bufio" + "encoding/hex" + "encoding/json" + "fmt" + "os" + "path/filepath" + "strings" + "testing" + + "github.com/rs/zerolog/log" + "github.com/stretchr/testify/require" + + "github.com/onflow/cadence" + "github.com/onflow/cadence/encoding/ccf" + + "github.com/onflow/flow-go/fvm/environment" + "github.com/onflow/flow-go/fvm/evm" + "github.com/onflow/flow-go/fvm/evm/events" + "github.com/onflow/flow-go/fvm/evm/offchain/utils" + . "github.com/onflow/flow-go/fvm/evm/testutils" + "github.com/onflow/flow-go/model/flow" +) + +func TestTestnetBackwardCompatibility(t *testing.T) { + t.Skip("TIME CONSUMING TESTS. Enable the tests with the events files saved in local") + // how to run this tests + // Note: this is a time consuming tests, so please run it in local + // + // 1) run the following cli to get the events files across different sporks + + // flow events get A.8c5303eaa26202d6.EVM.TransactionExecuted A.8c5303eaa26202d6.EVM.BlockExecuted + // --start 211176670 --end 211176770 --network testnet --host access-001.devnet51.nodes.onflow.org:9000 + // > ~/Downloads/events_devnet51_1.jsonl + // ... + // + // 2) comment the above t.Skip, and update the events file paths and evmStateGob dir + // to run the tests + BackwardCompatibleSinceEVMGenesisBlock( + t, flow.Testnet, []string{ + "~/Downloads/events_devnet51_1.jsonl", + "~/Downloads/events_devnet51_2.jsonl", + }, + "~/Downloads/", + 0, + ) +} + +// BackwardCompatibilityTestSinceEVMGenesisBlock ensures that the offchain package +// can read EVM events from the provided file paths, replay blocks starting from +// the EVM genesis block, and derive a consistent state matching the latest on-chain EVM state. +// +// The parameter `eventsFilePaths` is a list of file paths containing ordered EVM events in JSONL format. +// These EVM event files can be generated using the Flow CLI query command, for example: +// +// flow events get A.8c5303eaa26202d6.EVM.TransactionExecuted A.8c5303eaa26202d6.EVM.BlockExecuted +// +// --start 211176670 --end 211176770 --network testnet --host access-001.devnet51.nodes.onflow.org:9000 +// +// During the replay process, it will generate `values_<height>.gob` and +// `allocators_<height>.gob` checkpoint files for each height. If these checkpoint gob files exist, +// the corresponding event JSON files will be skipped to optimize replay. +func BackwardCompatibleSinceEVMGenesisBlock( + t *testing.T, + chainID flow.ChainID, + eventsFilePaths []string, // ordered EVM events in JSONL format + evmStateGob string, + evmStateEndHeight uint64, // EVM height of an EVM state that a evmStateGob file was created for +) { + // ensure that event files is not an empty array + require.True(t, len(eventsFilePaths) > 0) + + log.Info().Msgf("replaying EVM events from %v to %v, with evmStateGob file in %s, and evmStateEndHeight: %v", + eventsFilePaths[0], eventsFilePaths[len(eventsFilePaths)-1], + evmStateGob, evmStateEndHeight) + + store, evmStateEndHeightOrZero := initStorageWithEVMStateGob(t, chainID, evmStateGob, evmStateEndHeight) + + // the events to replay + nextHeight := evmStateEndHeightOrZero + 1 + + // replay each event files + for _, eventsFilePath := range eventsFilePaths { + log.Info().Msgf("replaying events from %v, nextHeight: %v", eventsFilePath, nextHeight) + + evmStateEndHeight := replayEvents(t, chainID, store, eventsFilePath, evmStateGob, nextHeight) + nextHeight = evmStateEndHeight + 1 + } + + log.Info(). + Msgf("succhessfully replayed all events and state changes are consistent with onchain state change. nextHeight: %v", nextHeight) +} + +func initStorageWithEVMStateGob(t *testing.T, chainID flow.ChainID, evmStateGob string, evmStateEndHeight uint64) ( + *TestValueStore, uint64, +) { + rootAddr := evm.StorageAccountAddress(chainID) + + // if there is no evmStateGob file, create a empty store and initialize the account status, + // return 0 as the genesis height + if evmStateEndHeight == 0 { + store := GetSimpleValueStore() + as := environment.NewAccountStatus() + require.NoError(t, store.SetValue(rootAddr[:], []byte(flow.AccountStatusKey), as.ToBytes())) + + return store, 0 + } + + valueFileName, allocatorFileName := evmStateGobFileNamesByEndHeight(evmStateGob, evmStateEndHeight) + values, err := DeserializeState(valueFileName) + require.NoError(t, err) + allocators, err := DeserializeAllocator(allocatorFileName) + require.NoError(t, err) + store := GetSimpleValueStorePopulated(values, allocators) + return store, evmStateEndHeight +} + +func replayEvents( + t *testing.T, + chainID flow.ChainID, + store *TestValueStore, eventsFilePath string, evmStateGob string, initialNextHeight uint64) uint64 { + + rootAddr := evm.StorageAccountAddress(chainID) + + nextHeight := initialNextHeight + + scanEventFilesAndRun(t, eventsFilePath, + func(blockEventPayload *events.BlockEventPayload, txEvents []events.TransactionEventPayload) error { + if blockEventPayload.Height != nextHeight { + return fmt.Errorf( + "expected height for next block event to be %v, but got %v", + nextHeight, blockEventPayload.Height) + } + + _, _, err := utils.ReplayEVMEventsToStore( + log.Logger, + store, + chainID, + rootAddr, + blockEventPayload, + txEvents, + ) + if err != nil { + return fmt.Errorf("fail to replay events: %w", err) + } + // verify the block height is sequential without gap + nextHeight++ + + return nil + }) + + evmStateEndHeight := nextHeight - 1 + + log.Info().Msgf("finished replaying events from %v to %v, creating evm state gobs", initialNextHeight, evmStateEndHeight) + valuesFile, allocatorsFile := dumpEVMStateToGobFiles(t, store, evmStateGob, evmStateEndHeight) + log.Info().Msgf("evm state gobs created: %v, %v", valuesFile, allocatorsFile) + + return evmStateEndHeight +} + +func evmStateGobFileNamesByEndHeight(dir string, endHeight uint64) (string, string) { + return filepath.Join(dir, fmt.Sprintf("values_%d.gob", endHeight)), + filepath.Join(dir, fmt.Sprintf("allocators_%d.gob", endHeight)) +} + +func dumpEVMStateToGobFiles(t *testing.T, store *TestValueStore, dir string, evmStateEndHeight uint64) (string, string) { + valuesFileName, allocatorsFileName := evmStateGobFileNamesByEndHeight(dir, evmStateEndHeight) + values, allocators := store.Dump() + + require.NoError(t, SerializeState(valuesFileName, values)) + require.NoError(t, SerializeAllocator(allocatorsFileName, allocators)) + return valuesFileName, allocatorsFileName +} + +// scanEventFilesAndRun +func scanEventFilesAndRun( + t *testing.T, + filePath string, + handler func(*events.BlockEventPayload, []events.TransactionEventPayload) error, +) { + file, err := os.Open(filePath) + require.NoError(t, err) + defer file.Close() + + scanner := bufio.NewScanner(file) + + buf := make([]byte, 0, 64*1024) + scanner.Buffer(buf, 1024*1024) + + txEvents := make([]events.TransactionEventPayload, 0) + + for scanner.Scan() { + data := scanner.Bytes() + var e utils.Event + err := json.Unmarshal(data, &e) + require.NoError(t, err) + if strings.Contains(e.EventType, "BlockExecuted") { + temp, err := hex.DecodeString(e.EventPayload) + require.NoError(t, err) + ev, err := ccf.Decode(nil, temp) + require.NoError(t, err) + blockEventPayload, err := events.DecodeBlockEventPayload(ev.(cadence.Event)) + require.NoError(t, err) + + require.NoError(t, handler(blockEventPayload, txEvents), fmt.Sprintf("fail to handle block at height %d", + blockEventPayload.Height)) + + txEvents = make([]events.TransactionEventPayload, 0) + continue + } + + temp, err := hex.DecodeString(e.EventPayload) + require.NoError(t, err) + ev, err := ccf.Decode(nil, temp) + require.NoError(t, err) + txEv, err := events.DecodeTransactionEventPayload(ev.(cadence.Event)) + require.NoError(t, err) + txEvents = append(txEvents, *txEv) + } + if err := scanner.Err(); err != nil { + t.Fatal(err) + } +} diff --git a/fvm/evm/offchain/utils/replay.go b/fvm/evm/offchain/utils/replay.go new file mode 100644 index 00000000000..5aba8affcd1 --- /dev/null +++ b/fvm/evm/offchain/utils/replay.go @@ -0,0 +1,104 @@ +package utils + +import ( + "github.com/rs/zerolog" + + "github.com/onflow/flow-go/fvm/environment" + "github.com/onflow/flow-go/fvm/evm/events" + "github.com/onflow/flow-go/fvm/evm/offchain/blocks" + evmStorage "github.com/onflow/flow-go/fvm/evm/offchain/storage" + "github.com/onflow/flow-go/fvm/evm/offchain/sync" + "github.com/onflow/flow-go/fvm/evm/testutils" + "github.com/onflow/flow-go/model/flow" +) + +func ReplayEVMEventsToStore( + log zerolog.Logger, + store environment.ValueStore, + chainID flow.ChainID, + rootAddr flow.Address, + evmBlockEvent *events.BlockEventPayload, // EVM block event + evmTxEvents []events.TransactionEventPayload, // EVM transaction event +) ( + map[flow.RegisterID]flow.RegisterValue, // EVM state transition updates + map[flow.RegisterID]flow.RegisterValue, // block provider updates + error, +) { + + bpStorage := evmStorage.NewEphemeralStorage(store) + bp, err := blocks.NewBasicProvider(chainID, bpStorage, rootAddr) + if err != nil { + return nil, nil, err + } + + err = bp.OnBlockReceived(evmBlockEvent) + if err != nil { + return nil, nil, err + } + + sp := testutils.NewTestStorageProvider(store, evmBlockEvent.Height) + cr := sync.NewReplayer(chainID, rootAddr, sp, bp, log, nil, true) + res, results, err := cr.ReplayBlockEvents(evmTxEvents, evmBlockEvent) + if err != nil { + return nil, nil, err + } + + // commit all register changes from the EVM state transition + for k, v := range res.StorageRegisterUpdates() { + err = store.SetValue([]byte(k.Owner), []byte(k.Key), v) + if err != nil { + return nil, nil, err + } + } + + blockProposal := blocks.ReconstructProposal(evmBlockEvent, results) + + err = bp.OnBlockExecuted(evmBlockEvent.Height, res, blockProposal) + if err != nil { + return nil, nil, err + } + + // commit all register changes from non-EVM state transition, such + // as block hash list changes + for k, v := range bpStorage.StorageRegisterUpdates() { + // verify the block hash list changes are included in the trie update + + err = store.SetValue([]byte(k.Owner), []byte(k.Key), v) + if err != nil { + return nil, nil, err + } + } + + return res.StorageRegisterUpdates(), bpStorage.StorageRegisterUpdates(), nil +} + +type EVMEventsAccumulator struct { + pendingEVMTxEvents []events.TransactionEventPayload +} + +func NewEVMEventsAccumulator() *EVMEventsAccumulator { + return &EVMEventsAccumulator{ + pendingEVMTxEvents: make([]events.TransactionEventPayload, 0), + } +} + +func (a *EVMEventsAccumulator) HasBlockEvent( + evmBlockEvent *events.BlockEventPayload, + evmTxEvents []events.TransactionEventPayload) ( + *events.BlockEventPayload, + []events.TransactionEventPayload, + bool, // true if there is an EVM block event +) { + a.pendingEVMTxEvents = append(a.pendingEVMTxEvents, evmTxEvents...) + + // if there is no EVM block event, we will accumulate the pending txs + if evmBlockEvent == nil { + return evmBlockEvent, a.pendingEVMTxEvents, false + } + + pendingEVMTxEvents := a.pendingEVMTxEvents + // reset pending events + a.pendingEVMTxEvents = make([]events.TransactionEventPayload, 0) + // if there is an EVM block event, we return the EVM block and the accumulated tx events + return evmBlockEvent, pendingEVMTxEvents, true +} diff --git a/fvm/evm/offchain/utils/types.go b/fvm/evm/offchain/utils/types.go new file mode 100644 index 00000000000..f40b19e4e91 --- /dev/null +++ b/fvm/evm/offchain/utils/types.go @@ -0,0 +1,33 @@ +package utils + +type Event struct { + FlowBlockHeight uint64 `json:"flow_height"` + EventType string `json:"type"` + EventPayload string `json:"payload"` + txIndex int + eventIndex int +} + +type Config struct { + host string + evmContractAddress string // no prefix + startHeight uint64 + endHeight uint64 + batchSize uint64 +} + +var Devnet51Config = Config{ + host: "access-001.devnet51.nodes.onflow.org:9000", + evmContractAddress: "8c5303eaa26202d6", + startHeight: uint64(211176670), + endHeight: uint64(218215349), + batchSize: uint64(50), +} + +var Mainnet25Config = Config{ + host: "access-001.mainnet25.nodes.onflow.org:9000", + evmContractAddress: "e467b9dd11fa00df", + startHeight: uint64(85981135), + endHeight: uint64(88226266), + batchSize: uint64(50), +} diff --git a/fvm/evm/offchain/utils/verify.go b/fvm/evm/offchain/utils/verify.go new file mode 100644 index 00000000000..9335beb6230 --- /dev/null +++ b/fvm/evm/offchain/utils/verify.go @@ -0,0 +1,293 @@ +package utils + +import ( + "bytes" + "context" + "errors" + "fmt" + "strings" + + "github.com/onflow/cadence" + "github.com/onflow/cadence/encoding/ccf" + "github.com/rs/zerolog" + + "github.com/onflow/flow-go/fvm/environment" + "github.com/onflow/flow-go/fvm/evm" + "github.com/onflow/flow-go/fvm/evm/events" + "github.com/onflow/flow-go/ledger" + "github.com/onflow/flow-go/ledger/common/convert" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module/executiondatasync/execution_data" + "github.com/onflow/flow-go/storage" +) + +// EVM Root Height is the first block that has EVM Block Event where the EVM block height is 1 +func IsEVMRootHeight(chainID flow.ChainID, flowHeight uint64) bool { + if chainID == flow.Testnet { + return flowHeight == 211176670 + } else if chainID == flow.Mainnet { + return flowHeight == 85981135 + } + return flowHeight == 1 +} + +// IsSporkHeight returns true if the given flow height is a spork height for the given chainID +// At spork height, there is no EVM events +func IsSporkHeight(chainID flow.ChainID, flowHeight uint64) bool { + if IsEVMRootHeight(chainID, flowHeight) { + return true + } + + if chainID == flow.Testnet { + return flowHeight == 218215349 // Testnet 52 + } else if chainID == flow.Mainnet { + return flowHeight == 88226267 // Mainnet 26 + } + return false +} + +// OffchainReplayBackwardCompatibilityTest replays the offchain EVM state transition for a given range of flow blocks, +// the replay will also verify the StateUpdateChecksum of the EVM state transition from each transaction execution. +// the updated register values will be saved to the given value store. +func OffchainReplayBackwardCompatibilityTest( + log zerolog.Logger, + chainID flow.ChainID, + flowStartHeight uint64, + flowEndHeight uint64, + headers storage.Headers, + results storage.ExecutionResults, + executionDataStore execution_data.ExecutionDataGetter, + store environment.ValueStore, + onHeightReplayed func(uint64) error, +) error { + rootAddr := evm.StorageAccountAddress(chainID) + rootAddrStr := string(rootAddr.Bytes()) + + // pendingEVMTxEvents are tx events that are executed block included in a flow block that + // didn't emit EVM block event, which is caused when the system tx to emit EVM block fails. + // we accumulate these pending txs, and replay them when we encounter a block with EVM block event. + pendingEVMEvents := NewEVMEventsAccumulator() + + for height := flowStartHeight; height <= flowEndHeight; height++ { + // account status initialization for the root account at the EVM root height + if IsEVMRootHeight(chainID, height) { + log.Info().Msgf("initializing EVM state for root height %d", flowStartHeight) + + as := environment.NewAccountStatus() + rootAddr := evm.StorageAccountAddress(chainID) + err := store.SetValue(rootAddr[:], []byte(flow.AccountStatusKey), as.ToBytes()) + if err != nil { + return err + } + + continue + } + + if IsSporkHeight(chainID, height) { + // spork root block has no EVM events + continue + } + + // get EVM events and register updates at the flow height + evmBlockEvent, evmTxEvents, registerUpdates, err := evmEventsAndRegisterUpdatesAtFlowHeight( + height, + headers, results, executionDataStore, rootAddrStr) + if err != nil { + return fmt.Errorf("failed to get EVM events and register updates at height %d: %w", height, err) + } + + blockEvent, txEvents, hasBlockEvent := pendingEVMEvents.HasBlockEvent(evmBlockEvent, evmTxEvents) + + if !hasBlockEvent { + log.Info().Msgf("block has no EVM block, height :%v, txEvents: %v", height, len(evmTxEvents)) + + err = onHeightReplayed(height) + if err != nil { + return err + } + continue + } + + evmUpdates, blockProviderUpdates, err := ReplayEVMEventsToStore( + log, + store, + chainID, + rootAddr, + blockEvent, + txEvents, + ) + if err != nil { + return fmt.Errorf("fail to replay events: %w", err) + } + + err = verifyEVMRegisterUpdates(registerUpdates, evmUpdates, blockProviderUpdates) + if err != nil { + return err + } + + err = onHeightReplayed(height) + if err != nil { + return err + } + } + + return nil +} + +func parseEVMEvents(evts flow.EventsList) (*events.BlockEventPayload, []events.TransactionEventPayload, error) { + var blockEvent *events.BlockEventPayload + txEvents := make([]events.TransactionEventPayload, 0) + + for _, e := range evts { + evtType := string(e.Type) + if strings.Contains(evtType, "BlockExecuted") { + if blockEvent != nil { + return nil, nil, errors.New("multiple block events in a single block") + } + + ev, err := ccf.Decode(nil, e.Payload) + if err != nil { + return nil, nil, err + } + + blockEventPayload, err := events.DecodeBlockEventPayload(ev.(cadence.Event)) + if err != nil { + return nil, nil, err + } + blockEvent = blockEventPayload + } else if strings.Contains(evtType, "TransactionExecuted") { + ev, err := ccf.Decode(nil, e.Payload) + if err != nil { + return nil, nil, err + } + txEv, err := events.DecodeTransactionEventPayload(ev.(cadence.Event)) + if err != nil { + return nil, nil, err + } + txEvents = append(txEvents, *txEv) + } + } + + return blockEvent, txEvents, nil +} + +func evmEventsAndRegisterUpdatesAtFlowHeight( + flowHeight uint64, + headers storage.Headers, + results storage.ExecutionResults, + executionDataStore execution_data.ExecutionDataGetter, + rootAddr string, +) ( + *events.BlockEventPayload, // EVM block event, might be nil if there is no block Event at this height + []events.TransactionEventPayload, // EVM transaction event + map[flow.RegisterID]flow.RegisterValue, // update registers + error, +) { + + blockID, err := headers.BlockIDByHeight(flowHeight) + if err != nil { + return nil, nil, nil, err + } + + result, err := results.ByBlockID(blockID) + if err != nil { + return nil, nil, nil, err + } + + executionData, err := executionDataStore.Get(context.Background(), result.ExecutionDataID) + if err != nil { + return nil, nil, nil, + fmt.Errorf("could not get execution data %v for block %d: %w", + result.ExecutionDataID, flowHeight, err) + } + + evts := flow.EventsList{} + payloads := []*ledger.Payload{} + + for _, chunkData := range executionData.ChunkExecutionDatas { + evts = append(evts, chunkData.Events...) + payloads = append(payloads, chunkData.TrieUpdate.Payloads...) + } + + updates := make(map[flow.RegisterID]flow.RegisterValue, len(payloads)) + for i := len(payloads) - 1; i >= 0; i-- { + regID, regVal, err := convert.PayloadToRegister(payloads[i]) + if err != nil { + return nil, nil, nil, err + } + + // find the register updates for the root account + if regID.Owner == rootAddr { + updates[regID] = regVal + } + } + + // parse EVM events + evmBlockEvent, evmTxEvents, err := parseEVMEvents(evts) + if err != nil { + return nil, nil, nil, err + } + return evmBlockEvent, evmTxEvents, updates, nil +} + +func verifyEVMRegisterUpdates( + registerUpdates map[flow.RegisterID]flow.RegisterValue, + evmUpdates map[flow.RegisterID]flow.RegisterValue, + blockProviderUpdates map[flow.RegisterID]flow.RegisterValue, +) error { + // skip the register level validation + // since the register is not stored at the same slab id as the on-chain EVM + // instead, we will compare by exporting the logic EVM state, which contains + // accounts, codes and slots. + return nil +} + +func VerifyRegisterUpdates(expectedUpdates map[flow.RegisterID]flow.RegisterValue, actualUpdates map[flow.RegisterID]flow.RegisterValue) error { + missingUpdates := make(map[flow.RegisterID]flow.RegisterValue) + additionalUpdates := make(map[flow.RegisterID]flow.RegisterValue) + mismatchingUpdates := make(map[flow.RegisterID][2]flow.RegisterValue) + + for k, v := range expectedUpdates { + if actualVal, ok := actualUpdates[k]; !ok { + missingUpdates[k] = v + } else if !bytes.Equal(v, actualVal) { + mismatchingUpdates[k] = [2]flow.RegisterValue{v, actualVal} + } + + delete(actualUpdates, k) + } + + for k, v := range actualUpdates { + additionalUpdates[k] = v + } + + // Build a combined error message + var errorMessage strings.Builder + + if len(missingUpdates) > 0 { + errorMessage.WriteString("Missing register updates:\n") + for id, value := range missingUpdates { + errorMessage.WriteString(fmt.Sprintf(" RegisterKey: %v, ExpectedValue: %x\n", id.Key, value)) + } + } + + if len(additionalUpdates) > 0 { + errorMessage.WriteString("Additional register updates:\n") + for id, value := range additionalUpdates { + errorMessage.WriteString(fmt.Sprintf(" RegisterKey: %v, ActualValue: %x\n", id.Key, value)) + } + } + + if len(mismatchingUpdates) > 0 { + errorMessage.WriteString("Mismatching register updates:\n") + for id, values := range mismatchingUpdates { + errorMessage.WriteString(fmt.Sprintf(" RegisterKey: %v, ExpectedValue: %x, ActualValue: %x\n", id.Key, values[0], values[1])) + } + } + + if errorMessage.Len() > 0 { + return errors.New(errorMessage.String()) + } + + return nil +} diff --git a/fvm/evm/precompiles/abi.go b/fvm/evm/precompiles/abi.go new file mode 100644 index 00000000000..3d805d9475f --- /dev/null +++ b/fvm/evm/precompiles/abi.go @@ -0,0 +1,231 @@ +package precompiles + +import ( + "encoding/binary" + "errors" + "math/big" + + gethCommon "github.com/ethereum/go-ethereum/common" +) + +// This package provides fast and efficient +// utilities needed for abi encoding and decoding +// encodings are mostly used for testing purpose +// if more complex encoding and decoding is needed please +// use the abi package and pass the ABIs, though +// that has a performance overhead. +const ( + FixedSizeUnitDataReadSize = 32 + Bytes4DataReadSize = 4 + Bytes8DataReadSize = 8 + Bytes32DataReadSize = 32 + Uint64ByteSize = 8 + + EncodedBoolSize = FixedSizeUnitDataReadSize + EncodedAddressSize = FixedSizeUnitDataReadSize + EncodedBytes32Size = FixedSizeUnitDataReadSize + EncodedBytes4Size = FixedSizeUnitDataReadSize + EncodedBytes8Size = FixedSizeUnitDataReadSize + EncodedUint64Size = FixedSizeUnitDataReadSize + EncodedUint256Size = FixedSizeUnitDataReadSize +) + +var ErrInputDataTooSmall = errors.New("input data is too small for decoding") +var ErrBufferTooSmall = errors.New("buffer too small for encoding") +var ErrDataTooLarge = errors.New("input data is too large for encoding") + +// ReadAddress reads an address from the buffer at index +func ReadAddress(buffer []byte, index int) (gethCommon.Address, error) { + if len(buffer) < index+FixedSizeUnitDataReadSize { + return gethCommon.Address{}, ErrInputDataTooSmall + } + paddedData := buffer[index : index+FixedSizeUnitDataReadSize] + // addresses are zero-padded on the left side. + addr := gethCommon.BytesToAddress( + paddedData[FixedSizeUnitDataReadSize-gethCommon.AddressLength:]) + return addr, nil +} + +// EncodeAddress encodes the address and add it to the buffer at the index +func EncodeAddress(address gethCommon.Address, buffer []byte, index int) error { + if len(buffer) < index+EncodedAddressSize { + return ErrBufferTooSmall + } + copy(buffer[index:index+EncodedAddressSize], + gethCommon.LeftPadBytes(address[:], EncodedAddressSize)) + return nil +} + +// ReadBool reads a boolean from the buffer at the index +func ReadBool(buffer []byte, index int) (bool, error) { + if len(buffer) < index+EncodedBoolSize { + return false, ErrInputDataTooSmall + } + // bools are zero-padded on the left side + // so we only need to read the last byte + return uint8(buffer[index+EncodedBoolSize-1]) > 0, nil +} + +// EncodeBool encodes a boolean into fixed size unit of encoded data +func EncodeBool(bitSet bool, buffer []byte, index int) error { + if len(buffer) < index+EncodedBoolSize { + return ErrBufferTooSmall + } + // bit set with left padding + for i := 0; i < EncodedBoolSize; i++ { + buffer[index+i] = 0 + } + if bitSet { + buffer[index+EncodedBoolSize-1] = 1 + } + return nil +} + +// ReadUint64 reads a uint64 from the buffer at index +func ReadUint64(buffer []byte, index int) (uint64, error) { + if len(buffer) < index+EncodedUint64Size { + return 0, ErrInputDataTooSmall + } + // data is expected to be big endian (zero-padded on the left side) + return binary.BigEndian.Uint64( + buffer[index+EncodedUint64Size-Uint64ByteSize : index+EncodedUint64Size]), nil +} + +// EncodeUint64 encodes a uint64 into fixed size unit of encoded data (zero-padded on the left side) +func EncodeUint64(inp uint64, buffer []byte, index int) error { + if len(buffer) < index+EncodedUint64Size { + return ErrBufferTooSmall + } + encoded := make([]byte, 8) + binary.BigEndian.PutUint64(encoded, inp) + copy(buffer[index:index+EncodedUint64Size], + gethCommon.LeftPadBytes(encoded, EncodedUint64Size), + ) + return nil +} + +// ReadUint256 reads an address from the buffer at index +func ReadUint256(buffer []byte, index int) (*big.Int, error) { + if len(buffer) < index+EncodedUint256Size { + return nil, ErrInputDataTooSmall + } + // data is expected to be big endian (zero-padded on the left side) + return new(big.Int).SetBytes(buffer[index : index+EncodedUint256Size]), nil +} + +// ReadBytes4 reads a 4 byte slice from the buffer at index +func ReadBytes4(buffer []byte, index int) ([]byte, error) { + if len(buffer) < index+EncodedBytes4Size { + return nil, ErrInputDataTooSmall + } + // fixed-size byte values are zero-padded on the right side. + return buffer[index : index+Bytes4DataReadSize], nil +} + +// ReadBytes8 reads a 8 byte slice from the buffer at index +func ReadBytes8(buffer []byte, index int) ([]byte, error) { + if len(buffer) < index+EncodedBytes8Size { + return nil, ErrInputDataTooSmall + } + // fixed-size byte values are zero-padded on the right side. + return buffer[index : index+Bytes8DataReadSize], nil +} + +// ReadBytes32 reads a 32 byte slice from the buffer at index +func ReadBytes32(buffer []byte, index int) ([]byte, error) { + if len(buffer) < index+Bytes32DataReadSize { + return nil, ErrInputDataTooSmall + } + return buffer[index : index+Bytes32DataReadSize], nil +} + +// EncodeBytes32 encodes data into a bytes 32 +func EncodeBytes32(data []byte, buffer []byte, index int) error { + if len(data) > EncodedBytes32Size { + return ErrDataTooLarge + } + if len(buffer) < index+EncodedBytes32Size { + return ErrBufferTooSmall + } + copy(buffer[index:index+EncodedBytes32Size], + gethCommon.RightPadBytes(data, EncodedBytes32Size), + ) + return nil +} + +// ReadBytes reads a variable length bytes from the buffer +func ReadBytes(buffer []byte, index int) ([]byte, error) { + if len(buffer) < index+EncodedUint64Size { + return nil, ErrInputDataTooSmall + } + // reading offset (we read into uint64) and adjust index + offset, err := ReadUint64(buffer, index) + if err != nil { + return nil, err + } + index = int(offset) + if len(buffer) < index+EncodedUint64Size { + return nil, ErrInputDataTooSmall + } + // reading length of byte slice + length, err := ReadUint64(buffer, index) + if err != nil { + return nil, err + } + index += EncodedUint64Size + if len(buffer) < index+int(length) { + return nil, ErrInputDataTooSmall + } + return buffer[index : index+int(length)], nil +} + +// SizeNeededForBytesEncoding computes the number of bytes needed for bytes encoding +func SizeNeededForBytesEncoding(data []byte) int { + if len(data) == 0 { + return EncodedUint64Size + EncodedUint64Size + FixedSizeUnitDataReadSize + } + paddedSize := (len(data) / FixedSizeUnitDataReadSize) + if len(data)%FixedSizeUnitDataReadSize != 0 { + paddedSize += 1 + } + return EncodedUint64Size + EncodedUint64Size + paddedSize*FixedSizeUnitDataReadSize +} + +// EncodeBytes encodes the data into the buffer at index and append payload to the +// end of buffer +func EncodeBytes(data []byte, buffer []byte, headerIndex, payloadIndex int) error { + //// updating offset + if len(buffer) < headerIndex+EncodedUint64Size { + return ErrBufferTooSmall + } + dataSize := len(data) + // compute padded data size + paddedSize := (dataSize / FixedSizeUnitDataReadSize) + if dataSize%FixedSizeUnitDataReadSize != 0 { + paddedSize += FixedSizeUnitDataReadSize + } + if len(buffer) < payloadIndex+EncodedUint64Size+paddedSize { + return ErrBufferTooSmall + } + + err := EncodeUint64(uint64(payloadIndex), buffer, headerIndex) + if err != nil { + return err + } + + //// updating payload + // padding data + if dataSize%FixedSizeUnitDataReadSize != 0 { + data = gethCommon.RightPadBytes(data, paddedSize) + } + + // adding length + err = EncodeUint64(uint64(dataSize), buffer, payloadIndex) + if err != nil { + return err + } + payloadIndex += EncodedUint64Size + // adding data + copy(buffer[payloadIndex:payloadIndex+len(data)], data) + return nil +} diff --git a/fvm/evm/precompiles/abi_test.go b/fvm/evm/precompiles/abi_test.go new file mode 100644 index 00000000000..f77804017ee --- /dev/null +++ b/fvm/evm/precompiles/abi_test.go @@ -0,0 +1,131 @@ +package precompiles_test + +import ( + "encoding/hex" + "math/big" + "testing" + + gethCommon "github.com/ethereum/go-ethereum/common" + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/fvm/evm/precompiles" +) + +func TestABIEncodingDecodingFunctions(t *testing.T) { + t.Parallel() + + t.Run("test address", func(t *testing.T) { + encodedAddress, err := hex.DecodeString("000000000000000000000000e592427a0aece92de3edee1f18e0157c05861564") + require.NoError(t, err) + addr, err := precompiles.ReadAddress(encodedAddress, 0) + require.NoError(t, err) + expectedAddress := gethCommon.HexToAddress("e592427a0aece92de3edee1f18e0157c05861564") + require.Equal(t, expectedAddress, addr) + reEncoded := make([]byte, precompiles.EncodedAddressSize) + err = precompiles.EncodeAddress(addr, reEncoded, 0) + require.NoError(t, err) + require.Equal(t, encodedAddress, reEncoded) + }) + + t.Run("test boolean", func(t *testing.T) { + encodedBool, err := hex.DecodeString("0000000000000000000000000000000000000000000000000000000000000001") + require.NoError(t, err) + ret, err := precompiles.ReadBool(encodedBool, 0) + require.NoError(t, err) + require.True(t, ret) + reEncoded := make([]byte, precompiles.EncodedBoolSize) + err = precompiles.EncodeBool(ret, reEncoded, 0) + require.NoError(t, err) + require.Equal(t, encodedBool, reEncoded) + }) + + t.Run("test uint64", func(t *testing.T) { + encodedUint64, err := hex.DecodeString("0000000000000000000000000000000000000000000000000000000000000046") + require.NoError(t, err) + ret, err := precompiles.ReadUint64(encodedUint64, 0) + require.NoError(t, err) + expectedUint64 := uint64(70) + require.Equal(t, expectedUint64, ret) + reEncoded := make([]byte, precompiles.EncodedUint64Size) + err = precompiles.EncodeUint64(ret, reEncoded, 0) + require.NoError(t, err) + require.Equal(t, encodedUint64, reEncoded) + + }) + + t.Run("test read uint256", func(t *testing.T) { + encodedUint256, err := hex.DecodeString("1000000000000000000000000000000000000000000000000000000000000046") + require.NoError(t, err) + ret, err := precompiles.ReadUint256(encodedUint256, 0) + require.NoError(t, err) + expectedValue, success := new(big.Int).SetString("7237005577332262213973186563042994240829374041602535252466099000494570602566", 10) + require.True(t, success) + require.Equal(t, expectedValue, ret) + }) + + t.Run("test fixed size bytes", func(t *testing.T) { + encodedFixedSizeBytes, err := hex.DecodeString("abcdef1200000000000000000000000000000000000000000000000000000000") + require.NoError(t, err) + ret, err := precompiles.ReadBytes4(encodedFixedSizeBytes, 0) + require.NoError(t, err) + require.Equal(t, encodedFixedSizeBytes[0:4], ret) + + ret, err = precompiles.ReadBytes8(encodedFixedSizeBytes, 0) + require.NoError(t, err) + require.Equal(t, encodedFixedSizeBytes[0:8], ret) + + ret, err = precompiles.ReadBytes32(encodedFixedSizeBytes, 0) + require.NoError(t, err) + require.Equal(t, encodedFixedSizeBytes[0:32], ret) + + reEncoded := make([]byte, precompiles.EncodedBytes32Size) + err = precompiles.EncodeBytes32(ret, reEncoded, 0) + require.NoError(t, err) + require.Equal(t, encodedFixedSizeBytes, reEncoded) + }) + + t.Run("test read bytes (variable size)", func(t *testing.T) { + encodedData, err := hex.DecodeString("0000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000b48656c6c6f20576f726c64000000000000000000000000000000000000000000") + require.NoError(t, err) + + ret, err := precompiles.ReadBytes(encodedData, 0) + require.NoError(t, err) + expectedData, err := hex.DecodeString("48656c6c6f20576f726c64") + require.NoError(t, err) + require.Equal(t, expectedData, ret) + + bufferSize := precompiles.SizeNeededForBytesEncoding(expectedData) + buffer := make([]byte, bufferSize) + err = precompiles.EncodeBytes(expectedData, buffer, 0, precompiles.EncodedUint64Size) + require.NoError(t, err) + require.Equal(t, encodedData, buffer) + }) + + t.Run("test size needed for encoding bytes", func(t *testing.T) { + // len zero + data := []byte{} + ret := precompiles.SizeNeededForBytesEncoding(data) + offsetAndLenEncodingSize := precompiles.EncodedUint64Size + precompiles.EncodedUint64Size + expectedSize := offsetAndLenEncodingSize + precompiles.FixedSizeUnitDataReadSize + require.Equal(t, expectedSize, ret) + + // data size 1 + data = []byte{1} + ret = precompiles.SizeNeededForBytesEncoding(data) + expectedSize = offsetAndLenEncodingSize + precompiles.FixedSizeUnitDataReadSize + require.Equal(t, expectedSize, ret) + + // data size 32 + data = make([]byte, 32) + ret = precompiles.SizeNeededForBytesEncoding(data) + expectedSize = offsetAndLenEncodingSize + precompiles.FixedSizeUnitDataReadSize + require.Equal(t, expectedSize, ret) + + // data size 33 + data = make([]byte, 33) + ret = precompiles.SizeNeededForBytesEncoding(data) + expectedSize = offsetAndLenEncodingSize + precompiles.FixedSizeUnitDataReadSize*2 + require.Equal(t, expectedSize, ret) + }) + +} diff --git a/fvm/evm/precompiles/arch.go b/fvm/evm/precompiles/arch.go new file mode 100644 index 00000000000..bdbe7b0a621 --- /dev/null +++ b/fvm/evm/precompiles/arch.go @@ -0,0 +1,255 @@ +package precompiles + +import ( + "fmt" + + "github.com/onflow/flow-go/fvm/evm/types" +) + +const CADENCE_ARCH_PRECOMPILE_NAME = "CADENCE_ARCH" + +var ( + FlowBlockHeightFuncSig = ComputeFunctionSelector("flowBlockHeight", nil) + + ProofVerifierFuncSig = ComputeFunctionSelector( + "verifyCOAOwnershipProof", + []string{"address", "bytes32", "bytes"}, + ) + + RandomSourceFuncSig = ComputeFunctionSelector("getRandomSource", []string{"uint64"}) + + RevertibleRandomFuncSig = ComputeFunctionSelector("revertibleRandom", nil) + + // FlowBlockHeightFixedGas is set to match the `number` opCode (0x43) + FlowBlockHeightFixedGas = uint64(2) + // ProofVerifierBaseGas covers the cost of decoding, checking capability the resource + // and the rest of operations excluding signature verification + ProofVerifierBaseGas = uint64(1_000) + // ProofVerifierGasMultiplerPerSignature is set to match `ECRECOVER` + // but we might increase this in the future + ProofVerifierGasMultiplerPerSignature = uint64(3_000) + + // RandomSourceGas covers the cost of obtaining random sournce bytes + RandomSourceGas = uint64(1_000) + + // RevertibleRandomGas covers the cost of calculating a revertible random bytes + RevertibleRandomGas = uint64(1_000) + + // errUnexpectedInput is returned when the function that doesn't expect an input + // argument, receives one + errUnexpectedInput = fmt.Errorf("unexpected input is provided") +) + +// ArchContract return a procompile for the Cadence Arch contract +// which facilitates access of Flow EVM environment into the Cadence environment. +// for more details see this Flip 223. +func ArchContract( + address types.Address, + heightProvider func() (uint64, error), + proofVer func(*types.COAOwnershipProofInContext) (bool, error), + randomSourceProvider func(uint64) ([]byte, error), + revertibleRandomGenerator func() (uint64, error), +) types.PrecompiledContract { + return MultiFunctionPrecompiledContract( + address, + []Function{ + &flowBlockHeight{heightProvider}, + &proofVerifier{proofVer}, + &randomnessSource{randomSourceProvider}, + &revertibleRandom{revertibleRandomGenerator}, + }, + ) +} + +type flowBlockHeight struct { + flowBlockHeightLookUp func() (uint64, error) +} + +var _ Function = &flowBlockHeight{} + +func (c *flowBlockHeight) FunctionSelector() FunctionSelector { + return FlowBlockHeightFuncSig +} + +func (c *flowBlockHeight) ComputeGas(input []byte) uint64 { + return FlowBlockHeightFixedGas +} + +func (c *flowBlockHeight) Run(input []byte) ([]byte, error) { + if len(input) > 0 { + return nil, errUnexpectedInput + } + bh, err := c.flowBlockHeightLookUp() + if err != nil { + return nil, err + } + // EVM works natively in 256-bit words, + // Encode to 256-bit is the common practice to prevent extra gas consumtion for masking. + buffer := make([]byte, EncodedUint64Size) + return buffer, EncodeUint64(bh, buffer, 0) +} + +type proofVerifier struct { + proofVerifier func(*types.COAOwnershipProofInContext) (bool, error) +} + +var _ Function = &proofVerifier{} + +func (f *proofVerifier) FunctionSelector() FunctionSelector { + return ProofVerifierFuncSig +} + +func (f *proofVerifier) ComputeGas(input []byte) uint64 { + // we compute the gas using a fixed base fee and extra fees + // per signatures. Note that the input data is already trimmed from the function selector + // and the remaining is ABI encoded of the inputs + + // skip to the encoded signature part of args (skip address and bytes32 data part) + index := EncodedAddressSize + Bytes32DataReadSize + // Reading the encoded signature bytes + encodedSignature, err := ReadBytes(input, index) + if err != nil { + // if any error run would anyway fail, so returning any non-zero value here is fine + return ProofVerifierBaseGas + } + // this method would return the number of signatures from the encoded signature data + // this saves the extra time needed for full decoding + // given ComputeGas function is called before charging the gas, we need to keep + // this function as light as possible + count, err := types.COAOwnershipProofSignatureCountFromEncoded(encodedSignature) + if err != nil { + // if any error run would anyway fail, so returning any non-zero value here is fine + return ProofVerifierBaseGas + } + return ProofVerifierBaseGas + uint64(count)*ProofVerifierGasMultiplerPerSignature +} + +func (f *proofVerifier) Run(input []byte) ([]byte, error) { + proof, err := DecodeABIEncodedProof(input) + if err != nil { + return nil, err + } + verified, err := f.proofVerifier(proof) + if err != nil { + return nil, err + } + + buffer := make([]byte, EncodedBoolSize) + return buffer, EncodeBool(verified, buffer, 0) +} + +var _ Function = &randomnessSource{} + +type randomnessSource struct { + randomSourceProvider func(uint64) ([]byte, error) +} + +func (r *randomnessSource) FunctionSelector() FunctionSelector { + return RandomSourceFuncSig +} + +func (r *randomnessSource) ComputeGas(input []byte) uint64 { + return RandomSourceGas +} + +func (r *randomnessSource) Run(input []byte) ([]byte, error) { + height, err := ReadUint64(input, 0) + if err != nil { + return nil, err + } + rand, err := r.randomSourceProvider(height) + if err != nil { + return nil, err + } + + buf := make([]byte, EncodedBytes32Size) + err = EncodeBytes32(rand, buf, 0) + if err != nil { + return nil, err + } + + return buf, nil +} + +var _ Function = &revertibleRandom{} + +type revertibleRandom struct { + revertibleRandomGenerator func() (uint64, error) +} + +func (r *revertibleRandom) FunctionSelector() FunctionSelector { + return RevertibleRandomFuncSig +} + +func (r *revertibleRandom) ComputeGas(input []byte) uint64 { + return RevertibleRandomGas +} + +func (r *revertibleRandom) Run(input []byte) ([]byte, error) { + rand, err := r.revertibleRandomGenerator() + if err != nil { + return nil, err + } + + buf := make([]byte, EncodedUint64Size) + err = EncodeUint64(rand, buf, 0) + if err != nil { + return nil, err + } + + return buf, nil +} + +func DecodeABIEncodedProof(input []byte) (*types.COAOwnershipProofInContext, error) { + index := 0 + caller, err := ReadAddress(input, index) + index += FixedSizeUnitDataReadSize + if err != nil { + return nil, err + } + + hash, err := ReadBytes32(input, index) + index += Bytes32DataReadSize + if err != nil { + return nil, err + } + + encodedProof, err := ReadBytes(input, index) + if err != nil { + return nil, err + } + + return types.NewCOAOwnershipProofInContext( + hash, + types.Address(caller), + encodedProof, + ) +} + +func ABIEncodeProof(proof *types.COAOwnershipProofInContext) ([]byte, error) { + encodedProof, err := proof.COAOwnershipProof.Encode() + if err != nil { + return nil, err + } + bufferSize := EncodedAddressSize + + EncodedBytes32Size + + SizeNeededForBytesEncoding(encodedProof) + + abiEncodedData := make([]byte, bufferSize) + index := 0 + err = EncodeAddress(proof.EVMAddress.ToCommon(), abiEncodedData, index) + if err != nil { + return nil, err + } + index += EncodedAddressSize + err = EncodeBytes32(proof.SignedData, abiEncodedData, index) + if err != nil { + return nil, err + } + index += EncodedBytes32Size + err = EncodeBytes(encodedProof, abiEncodedData, index, index+EncodedUint64Size) + if err != nil { + return nil, err + } + return abiEncodedData, nil +} diff --git a/fvm/evm/precompiles/arch_test.go b/fvm/evm/precompiles/arch_test.go new file mode 100644 index 00000000000..840cf1bd5e9 --- /dev/null +++ b/fvm/evm/precompiles/arch_test.go @@ -0,0 +1,132 @@ +package precompiles_test + +import ( + "testing" + + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/fvm/environment" + "github.com/onflow/flow-go/fvm/evm/precompiles" + "github.com/onflow/flow-go/fvm/evm/testutils" + "github.com/onflow/flow-go/fvm/evm/types" +) + +func TestArchContract(t *testing.T) { + + t.Run("test block height", func(t *testing.T) { + address := testutils.RandomAddress(t) + height := uint64(12) + pc := precompiles.ArchContract( + address, + func() (uint64, error) { + return height, nil + }, + nil, + nil, + nil, + ) + + input := precompiles.FlowBlockHeightFuncSig.Bytes() + require.Equal(t, address, pc.Address()) + require.Equal(t, precompiles.FlowBlockHeightFixedGas, pc.RequiredGas(input)) + ret, err := pc.Run(input) + require.NoError(t, err) + + expected := make([]byte, 32) + expected[31] = 12 + require.Equal(t, expected, ret) + + _, err = pc.Run([]byte{1, 2, 3}) + require.Error(t, err) + }) + + t.Run("test get random source", func(t *testing.T) { + address := testutils.RandomAddress(t) + rand := make([]byte, environment.RandomSourceHistoryLength) + err := precompiles.EncodeBytes32([]byte{13, 23}, rand, 0) + require.NoError(t, err) + + pc := precompiles.ArchContract( + address, + nil, + nil, + func(u uint64) ([]byte, error) { + return rand, nil + }, + nil, + ) + + require.Equal(t, address, pc.Address()) + + height := make([]byte, 32) + require.NoError(t, precompiles.EncodeUint64(13, height, 0)) + + input := append(precompiles.RandomSourceFuncSig.Bytes(), height...) + require.Equal(t, precompiles.RandomSourceGas, pc.RequiredGas(input)) + + ret, err := pc.Run(input) + require.Len(t, ret, environment.RandomSourceHistoryLength) + require.NoError(t, err) + + resultRand, err := precompiles.ReadBytes32(ret, 0) + require.NoError(t, err) + require.Equal(t, rand, resultRand) + }) + + t.Run("test revertible random", func(t *testing.T) { + address := testutils.RandomAddress(t) + rand := uint64(1337) + pc := precompiles.ArchContract( + address, + nil, + nil, + nil, + func() (uint64, error) { + return rand, nil + }, + ) + + require.Equal(t, address, pc.Address()) + + input := precompiles.RevertibleRandomFuncSig.Bytes() + require.Equal(t, precompiles.RevertibleRandomGas, pc.RequiredGas(input)) + + ret, err := pc.Run(input) + require.NoError(t, err) + + resultRand, err := precompiles.ReadUint64(ret, 0) + require.NoError(t, err) + require.Equal(t, rand, resultRand) + }) + + t.Run("test proof verification", func(t *testing.T) { + proof := testutils.COAOwnershipProofInContextFixture(t) + pc := precompiles.ArchContract( + testutils.RandomAddress(t), + nil, + func(p *types.COAOwnershipProofInContext) (bool, error) { + require.Equal(t, proof, p) + return true, nil + }, + nil, + nil, + ) + + abiEncodedData, err := precompiles.ABIEncodeProof(proof) + require.NoError(t, err) + + // add function selector to the input + input := append(precompiles.ProofVerifierFuncSig.Bytes(), abiEncodedData...) + + expectedGas := precompiles.ProofVerifierBaseGas + + uint64(len(proof.KeyIndices))*precompiles.ProofVerifierGasMultiplerPerSignature + require.Equal(t, expectedGas, pc.RequiredGas(input)) + + ret, err := pc.Run(input) + require.NoError(t, err) + + expected := make([]byte, 32) + expected[31] = 1 + require.Equal(t, expected, ret) + }) +} diff --git a/fvm/evm/precompiles/precompile.go b/fvm/evm/precompiles/precompile.go new file mode 100644 index 00000000000..070eab83328 --- /dev/null +++ b/fvm/evm/precompiles/precompile.go @@ -0,0 +1,79 @@ +package precompiles + +import ( + "errors" + + "github.com/onflow/flow-go/fvm/evm/types" +) + +// InvalidMethodCallGasUsage captures how much gas we charge for invalid method call +const InvalidMethodCallGasUsage = uint64(1) + +// ErrInvalidMethodCall is returned when the method is not available on the contract +var ErrInvalidMethodCall = errors.New("invalid method call") + +// Function is an interface for a function in a multi-function precompile contract +type Function interface { + // FunctionSelector returns the function selector bytes for this function + FunctionSelector() FunctionSelector + + // ComputeGas computes the gas needed for the given input + ComputeGas(input []byte) uint64 + + // Run runs the function on the given data + Run(input []byte) ([]byte, error) +} + +// MultiFunctionPrecompiledContract constructs a multi-function precompile smart contract +func MultiFunctionPrecompiledContract( + address types.Address, + functions []Function, +) types.PrecompiledContract { + pc := &precompile{ + functions: make(map[FunctionSelector]Function), + address: address, + } + for _, f := range functions { + pc.functions[f.FunctionSelector()] = f + } + return pc +} + +type precompile struct { + address types.Address + functions map[FunctionSelector]Function +} + +func (p *precompile) Address() types.Address { + return p.address +} + +// RequiredGas calculates the contract gas use +func (p *precompile) RequiredGas(input []byte) (output uint64) { + if len(input) < FunctionSelectorLength { + return InvalidMethodCallGasUsage + } + sig, data := SplitFunctionSelector(input) + callable, found := p.functions[sig] + if !found { + return InvalidMethodCallGasUsage + } + return callable.ComputeGas(data) +} + +// Run runs the precompiled contract +func (p *precompile) Run(input []byte) (output []byte, err error) { + if len(input) < FunctionSelectorLength { + return nil, ErrInvalidMethodCall + } + sig, data := SplitFunctionSelector(input) + callable, found := p.functions[sig] + if !found { + return nil, ErrInvalidMethodCall + } + return callable.Run(data) +} + +func (p *precompile) Name() string { + return CADENCE_ARCH_PRECOMPILE_NAME +} diff --git a/fvm/evm/precompiles/precompile_test.go b/fvm/evm/precompiles/precompile_test.go new file mode 100644 index 00000000000..bdfabd9a1c6 --- /dev/null +++ b/fvm/evm/precompiles/precompile_test.go @@ -0,0 +1,73 @@ +package precompiles_test + +import ( + "testing" + + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/fvm/evm/precompiles" + "github.com/onflow/flow-go/fvm/evm/testutils" +) + +func TestMutiFunctionContract(t *testing.T) { + t.Parallel() + + address := testutils.RandomAddress(t) + sig := precompiles.FunctionSelector{1, 2, 3, 4} + data := "data" + input := append(sig[:], data...) + gas := uint64(20) + output := []byte("output") + + pc := precompiles.MultiFunctionPrecompiledContract(address, []precompiles.Function{ + &mockedFunction{ + FunctionSelectorFunc: func() precompiles.FunctionSelector { + return sig + }, + ComputeGasFunc: func(inp []byte) uint64 { + require.Equal(t, []byte(data), inp) + return gas + }, + RunFunc: func(inp []byte) ([]byte, error) { + require.Equal(t, []byte(data), inp) + return output, nil + }, + }}) + + require.Equal(t, address, pc.Address()) + require.Equal(t, gas, pc.RequiredGas(input)) + ret, err := pc.Run(input) + require.NoError(t, err) + require.Equal(t, output, ret) + + input2 := []byte("non existing signature and data") + _, err = pc.Run(input2) + require.Equal(t, precompiles.ErrInvalidMethodCall, err) +} + +type mockedFunction struct { + FunctionSelectorFunc func() precompiles.FunctionSelector + ComputeGasFunc func(input []byte) uint64 + RunFunc func(input []byte) ([]byte, error) +} + +func (mf *mockedFunction) FunctionSelector() precompiles.FunctionSelector { + if mf.FunctionSelectorFunc == nil { + panic("method not set for mocked function") + } + return mf.FunctionSelectorFunc() +} + +func (mf *mockedFunction) ComputeGas(input []byte) uint64 { + if mf.ComputeGasFunc == nil { + panic("method not set for mocked function") + } + return mf.ComputeGasFunc(input) +} + +func (mf *mockedFunction) Run(input []byte) ([]byte, error) { + if mf.RunFunc == nil { + panic("method not set for mocked function") + } + return mf.RunFunc(input) +} diff --git a/fvm/evm/precompiles/replayer.go b/fvm/evm/precompiles/replayer.go new file mode 100644 index 00000000000..61bedecbaf3 --- /dev/null +++ b/fvm/evm/precompiles/replayer.go @@ -0,0 +1,81 @@ +package precompiles + +import ( + "errors" + "fmt" + + "github.com/onflow/flow-go/fvm/evm/types" +) + +var ( + // errInvalidPrecompiledContractCalls is returned when an invalid list of + // precompiled contract calls is passed + errInvalidPrecompiledContractCalls = fmt.Errorf("invalid list of precompiled contract calls") + // errUnexpectedCall is returned when a call to the precompile is not expected + errUnexpectedCall = fmt.Errorf("unexpected call") +) + +// AggregatedPrecompiledCallsToPrecompiledContracts +// converts an aggregated set of precompile calls +// into a list of replayer precompiled contract +func AggregatedPrecompiledCallsToPrecompiledContracts(apc types.AggregatedPrecompiledCalls) []types.PrecompiledContract { + res := make([]types.PrecompiledContract, 0) + for _, ap := range apc { + res = append(res, NewReplayerPrecompiledContract(&ap)) + } + return res +} + +// ReplayerPrecompiledContract is a precompiled contract +// that replay the outputs based on the input +type ReplayerPrecompiledContract struct { + expectedCalls *types.PrecompiledCalls + requiredGasIndex, runIndex int +} + +// NewReplayerPrecompiledContract constructs a ReplayerPrecompiledContract +func NewReplayerPrecompiledContract( + expectedCalls *types.PrecompiledCalls, +) *ReplayerPrecompiledContract { + if expectedCalls == nil { + panic(errInvalidPrecompiledContractCalls) + } + return &ReplayerPrecompiledContract{ + expectedCalls: expectedCalls, + } +} + +func (p *ReplayerPrecompiledContract) Address() types.Address { + return p.expectedCalls.Address +} + +func (p *ReplayerPrecompiledContract) RequiredGas(input []byte) (output uint64) { + if p.requiredGasIndex > len(p.expectedCalls.RequiredGasCalls) { + panic(errUnexpectedCall) + } + output = p.expectedCalls.RequiredGasCalls[p.requiredGasIndex] + p.requiredGasIndex++ + return +} + +func (p *ReplayerPrecompiledContract) Run(input []byte) (output []byte, err error) { + if p.runIndex > len(p.expectedCalls.RunCalls) { + panic(errUnexpectedCall) + } + output = p.expectedCalls.RunCalls[p.runIndex].Output + errMsg := p.expectedCalls.RunCalls[p.runIndex].ErrorMsg + if len(errMsg) > 0 { + err = errors.New(errMsg) + } + p.runIndex++ + return +} + +func (p *ReplayerPrecompiledContract) HasReplayedAll() bool { + return len(p.expectedCalls.RequiredGasCalls) == p.requiredGasIndex && + len(p.expectedCalls.RunCalls) == p.runIndex +} + +func (p *ReplayerPrecompiledContract) Name() string { + return CADENCE_ARCH_PRECOMPILE_NAME +} diff --git a/fvm/evm/precompiles/replayer_test.go b/fvm/evm/precompiles/replayer_test.go new file mode 100644 index 00000000000..02ee281a74c --- /dev/null +++ b/fvm/evm/precompiles/replayer_test.go @@ -0,0 +1,61 @@ +package precompiles_test + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/fvm/evm/precompiles" + "github.com/onflow/flow-go/fvm/evm/testutils" + "github.com/onflow/flow-go/fvm/evm/types" +) + +func TestReplayer(t *testing.T) { + + address := testutils.RandomAddress(t) + input1 := []byte{0, 1} + input2 := []byte{2, 3} + gas1 := uint64(1) + gas2 := uint64(2) + output1 := []byte{4, 5} + output2 := []byte{} + errMsg2 := "some error message" + + pc := &types.PrecompiledCalls{ + Address: address, + RequiredGasCalls: []uint64{ + gas1, + gas2, + }, + RunCalls: []types.RunCall{ + { + Output: output1, + }, + { + Output: output2, + ErrorMsg: errMsg2, + }, + }, + } + + rep := precompiles.NewReplayerPrecompiledContract(pc) + require.Equal(t, address, rep.Address()) + require.False(t, rep.HasReplayedAll()) + + require.Equal(t, gas1, rep.RequiredGas(input1)) + ret, err := rep.Run(input1) + require.NoError(t, err) + require.Equal(t, output1, ret) + require.False(t, rep.HasReplayedAll()) + + require.Equal(t, gas2, rep.RequiredGas(input2)) + ret, err = rep.Run(input2) + require.Equal(t, errMsg2, err.Error()) + require.Equal(t, output2, ret) + + require.True(t, rep.HasReplayedAll()) + + assert.Panics(t, func() { _ = rep.RequiredGas(input2) }, "expected to panic") + assert.Panics(t, func() { _, _ = rep.Run(input2) }, "expected to panic") +} diff --git a/fvm/evm/precompiles/selector.go b/fvm/evm/precompiles/selector.go new file mode 100644 index 00000000000..a62c8f5b9ac --- /dev/null +++ b/fvm/evm/precompiles/selector.go @@ -0,0 +1,35 @@ +package precompiles + +import ( + "fmt" + "strings" + + gethCrypto "github.com/ethereum/go-ethereum/crypto" +) + +const FunctionSelectorLength = 4 + +// This is derived as the first 4 bytes of the Keccak hash of the ASCII form of the signature of the method +type FunctionSelector [FunctionSelectorLength]byte + +func (fs FunctionSelector) Bytes() []byte { + return fs[:] +} + +// ComputeFunctionSelector computes the function selector +// given the canonical name of function and args. +// for example the canonical format for int is int256 +func ComputeFunctionSelector(name string, args []string) FunctionSelector { + var sig FunctionSelector + input := fmt.Sprintf("%v(%v)", name, strings.Join(args, ",")) + copy(sig[0:FunctionSelectorLength], gethCrypto.Keccak256([]byte(input))[:FunctionSelectorLength]) + return sig +} + +// SplitFunctionSelector splits the function signature from input data and +// returns the rest of the data +func SplitFunctionSelector(input []byte) (FunctionSelector, []byte) { + var funcSig FunctionSelector + copy(funcSig[:], input[0:FunctionSelectorLength]) + return funcSig, input[FunctionSelectorLength:] +} diff --git a/fvm/evm/precompiles/selector_test.go b/fvm/evm/precompiles/selector_test.go new file mode 100644 index 00000000000..d6f36b9fffe --- /dev/null +++ b/fvm/evm/precompiles/selector_test.go @@ -0,0 +1,27 @@ +package precompiles_test + +import ( + "testing" + + gethCrypto "github.com/ethereum/go-ethereum/crypto" + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/fvm/evm/precompiles" +) + +func TestFunctionSelector(t *testing.T) { + t.Parallel() + + expected := gethCrypto.Keccak256([]byte("test()"))[:4] + require.Equal(t, expected, precompiles.ComputeFunctionSelector("test", nil).Bytes()) + + expected = gethCrypto.Keccak256([]byte("test(uint32,uint16)"))[:precompiles.FunctionSelectorLength] + require.Equal(t, expected, + precompiles.ComputeFunctionSelector("test", []string{"uint32", "uint16"}).Bytes()) + + selector := []byte{1, 2, 3, 4} + data := []byte{5, 6, 7, 8} + retSelector, retData := precompiles.SplitFunctionSelector(append(selector, data...)) + require.Equal(t, selector, retSelector[:]) + require.Equal(t, data, retData) +} diff --git a/fvm/evm/stdlib/checking.go b/fvm/evm/stdlib/checking.go new file mode 100644 index 00000000000..28ec292f3bc --- /dev/null +++ b/fvm/evm/stdlib/checking.go @@ -0,0 +1,70 @@ +package stdlib + +import ( + "github.com/onflow/cadence/common" + "github.com/onflow/cadence/runtime" + + "github.com/onflow/flow-go/fvm/environment" +) + +// checkingInterface is a runtime.Interface implementation +// that can be used for ParseAndCheckProgram. +// It is not suitable for execution. +type checkingInterface struct { + runtime.EmptyRuntimeInterface + SystemContractCodes map[common.Location][]byte + Programs map[runtime.Location]*runtime.Program + cryptoContractAddress common.Address +} + +var _ runtime.Interface = &checkingInterface{} + +func (i *checkingInterface) ResolveLocation( + identifiers []runtime.Identifier, + location runtime.Location, +) ( + []runtime.ResolvedLocation, + error, +) { + return environment.ResolveLocation( + identifiers, + location, + nil, + i.cryptoContractAddress, + ) +} + +func (i *checkingInterface) GetOrLoadProgram( + location runtime.Location, + load func() (*runtime.Program, error), +) ( + program *runtime.Program, + err error, +) { + if i.Programs == nil { + i.Programs = map[runtime.Location]*runtime.Program{} + } + + var ok bool + program, ok = i.Programs[location] + if ok { + return + } + + program, err = load() + + // NOTE: important: still set empty program, + // even if error occurred + + i.Programs[location] = program + + return +} + +func (i *checkingInterface) GetCode(location common.Location) ([]byte, error) { + return i.SystemContractCodes[location], nil +} + +func (i *checkingInterface) GetAccountContractCode(location common.AddressLocation) (code []byte, err error) { + return i.SystemContractCodes[location], nil +} diff --git a/fvm/evm/stdlib/contract.cdc b/fvm/evm/stdlib/contract.cdc new file mode 100644 index 00000000000..66eb01b40a7 --- /dev/null +++ b/fvm/evm/stdlib/contract.cdc @@ -0,0 +1,1031 @@ +import Crypto +import "NonFungibleToken" +import "FungibleToken" +import "FlowToken" + +/* + + The Flow EVM contract defines important types and functionality + to allow Cadence code and Flow SDKs to interface + with the Etherem Virtual Machine environment on Flow. + + The EVM contract emits events when relevant actions happen in Flow EVM + such as creating new blocks, executing transactions, and bridging FLOW + + This contract also defines Cadence-Owned Account functionality, + which is currently the only way for Cadence code to interact with Flow EVM. + + Additionally, functionality is provided for common EVM types + such as addresses, balances, ABIs, transaction results, and more. + + The EVM contract is deployed to the Flow Service Account on every network + and many of its functionality is directly connected to the protocol software + to allow interaction with the EVM. + + See additional EVM documentation here: https://developers.flow.com/evm/about + +*/ + +access(all) contract EVM { + + /// Block executed event is emitted when a new block is created, + /// which always happens when a transaction is executed. + access(all) event BlockExecuted ( + // height or number of the block + height: UInt64, + // hash of the block + hash: [UInt8; 32], + // timestamp of the block creation + timestamp: UInt64, + // total Flow supply + totalSupply: Int, + // all gas used in the block by transactions included + totalGasUsed: UInt64, + // parent block hash + parentHash: [UInt8; 32], + // root hash of all the transaction receipts + receiptRoot: [UInt8; 32], + // root hash of all the transaction hashes + transactionHashRoot: [UInt8; 32], + /// value returned for PREVRANDAO opcode + prevrandao: [UInt8; 32], + ) + + /// Transaction executed event is emitted every time a transaction + /// is executed by the EVM (even if failed). + access(all) event TransactionExecuted ( + // hash of the transaction + hash: [UInt8; 32], + // index of the transaction in a block + index: UInt16, + // type of the transaction + type: UInt8, + // RLP encoded transaction payload + payload: [UInt8], + // code indicating a specific validation (201-300) or execution (301-400) error + errorCode: UInt16, + // a human-readable message about the error (if any) + errorMessage: String, + // the amount of gas transaction used + gasConsumed: UInt64, + // if transaction was a deployment contains a newly deployed contract address + contractAddress: String, + // RLP encoded logs + logs: [UInt8], + // block height in which transaction was included + blockHeight: UInt64, + /// captures the hex encoded data that is returned from + /// the evm. For contract deployments + /// it returns the code deployed to + /// the address provided in the contractAddress field. + /// in case of revert, the smart contract custom error message + /// is also returned here (see EIP-140 for more details). + returnedData: [UInt8], + /// captures the input and output of the calls (rlp encoded) to the extra + /// precompiled contracts (e.g. Cadence Arch) during the transaction execution. + /// This data helps to replay the transactions without the need to + /// have access to the full cadence state data. + precompiledCalls: [UInt8], + /// stateUpdateChecksum provides a mean to validate + /// the updates to the storage when re-executing a transaction off-chain. + stateUpdateChecksum: [UInt8; 4] + ) + + /// FLOWTokensDeposited is emitted when FLOW tokens is bridged + /// into the EVM environment. Note that this event is not emitted + /// for transfer of flow tokens between two EVM addresses. + /// Similar to the FungibleToken.Deposited event + /// this event includes a depositedUUID that captures the + /// uuid of the source vault. + access(all) event FLOWTokensDeposited ( + address: String, + amount: UFix64, + depositedUUID: UInt64, + balanceAfterInAttoFlow: UInt + ) + + /// FLOWTokensWithdrawn is emitted when FLOW tokens are bridged + /// out of the EVM environment. Note that this event is not emitted + /// for transfer of flow tokens between two EVM addresses. + /// similar to the FungibleToken.Withdrawn events + /// this event includes a withdrawnUUID that captures the + /// uuid of the returning vault. + access(all) event FLOWTokensWithdrawn ( + address: String, + amount: UFix64, + withdrawnUUID: UInt64, + balanceAfterInAttoFlow: UInt + ) + + /// BridgeAccessorUpdated is emitted when the BridgeAccessor Capability + /// is updated in the stored BridgeRouter along with identifying + /// information about both. + access(all) event BridgeAccessorUpdated ( + routerType: Type, + routerUUID: UInt64, + routerAddress: Address, + accessorType: Type, + accessorUUID: UInt64, + accessorAddress: Address + ) + + /// Block returns information about the latest executed block. + access(all) struct EVMBlock { + access(all) let height: UInt64 + + access(all) let hash: String + + access(all) let totalSupply: Int + + access(all) let timestamp: UInt64 + + init(height: UInt64, hash: String, totalSupply: Int, timestamp: UInt64) { + self.height = height + self.hash = hash + self.totalSupply = totalSupply + self.timestamp = timestamp + } + } + + /// Returns the latest executed block. + access(all) + fun getLatestBlock(): EVMBlock { + return InternalEVM.getLatestBlock() as! EVMBlock + } + + /// EVMAddress is an EVM-compatible address + access(all) struct EVMAddress { + + /// Bytes of the address + access(all) let bytes: [UInt8; 20] + + /// Constructs a new EVM address from the given byte representation + view init(bytes: [UInt8; 20]) { + self.bytes = bytes + } + + /// Balance of the address + access(all) + view fun balance(): Balance { + let balance = InternalEVM.balance( + address: self.bytes + ) + return Balance(attoflow: balance) + } + + /// Nonce of the address + access(all) + fun nonce(): UInt64 { + return InternalEVM.nonce( + address: self.bytes + ) + } + + /// Code of the address + access(all) + fun code(): [UInt8] { + return InternalEVM.code( + address: self.bytes + ) + } + + /// CodeHash of the address + access(all) + fun codeHash(): [UInt8] { + return InternalEVM.codeHash( + address: self.bytes + ) + } + + /// Deposits the given vault into the EVM account with the given address + access(all) + fun deposit(from: @FlowToken.Vault) { + let amount = from.balance + if amount == 0.0 { + destroy from + return + } + let depositedUUID = from.uuid + InternalEVM.deposit( + from: <-from, + to: self.bytes + ) + emit FLOWTokensDeposited( + address: self.toString(), + amount: amount, + depositedUUID: depositedUUID, + balanceAfterInAttoFlow: self.balance().attoflow + ) + } + + /// Serializes the address to a hex string without the 0x prefix + /// Future implementations should pass data to InternalEVM for native serialization + access(all) + view fun toString(): String { + return String.encodeHex(self.bytes.toVariableSized()) + } + + /// Compares the address with another address + access(all) + view fun equals(_ other: EVMAddress): Bool { + return self.bytes == other.bytes + } + } + + /// Converts a hex string to an EVM address if the string is a valid hex string + /// Future implementations should pass data to InternalEVM for native deserialization + access(all) + fun addressFromString(_ asHex: String): EVMAddress { + pre { + asHex.length == 40 || asHex.length == 42: + "EVM.addressFromString(): Invalid hex string length for an EVM address. The provided string is \(asHex.length), but the length must be 40 or 42." + } + // Strip the 0x prefix if it exists + var withoutPrefix = (asHex[1] == "x" ? asHex.slice(from: 2, upTo: asHex.length) : asHex).toLower() + let bytes = withoutPrefix.decodeHex().toConstantSized<[UInt8; 20]>()! + return EVMAddress(bytes: bytes) + } + + /// EVMBytes is a type wrapper used for ABI encoding/decoding into + /// Solidity `bytes` type + access(all) struct EVMBytes { + + /// Byte array representing the `bytes` value + access(all) let value: [UInt8] + + view init(value: [UInt8]) { + self.value = value + } + } + + /// EVMBytes4 is a type wrapper used for ABI encoding/decoding into + /// Solidity `bytes4` type + access(all) struct EVMBytes4 { + + /// Byte array representing the `bytes4` value + access(all) let value: [UInt8; 4] + + view init(value: [UInt8; 4]) { + self.value = value + } + } + + /// EVMBytes32 is a type wrapper used for ABI encoding/decoding into + /// Solidity `bytes32` type + access(all) struct EVMBytes32 { + + /// Byte array representing the `bytes32` value + access(all) let value: [UInt8; 32] + + view init(value: [UInt8; 32]) { + self.value = value + } + } + + access(all) struct Balance { + + /// The balance in atto-FLOW + /// Atto-FLOW is the smallest denomination of FLOW (1e18 FLOW) + /// that is used to store account balances inside EVM + /// similar to the way WEI is used to store ETH divisible to 18 decimal places. + access(all) var attoflow: UInt + + /// Constructs a new balance + access(all) + view init(attoflow: UInt) { + self.attoflow = attoflow + } + + /// Sets the balance by a UFix64 (8 decimal points), the format + /// that is used in Cadence to store FLOW tokens. + access(all) + fun setFLOW(flow: UFix64){ + self.attoflow = InternalEVM.castToAttoFLOW(balance: flow) + } + + /// Casts the balance to a UFix64 (rounding down) + /// Warning! casting a balance to a UFix64 which supports a lower level of precision + /// (8 decimal points in compare to 18) might result in rounding down error. + /// Use the inAttoFlow function if you need more accuracy. + access(all) + view fun inFLOW(): UFix64 { + return InternalEVM.castToFLOW(balance: self.attoflow) + } + + /// Returns the balance in Atto-FLOW + access(all) + view fun inAttoFLOW(): UInt { + return self.attoflow + } + + /// Returns true if the balance is zero + access(all) + fun isZero(): Bool { + return self.attoflow == 0 + } + } + + /// reports the status of evm execution. + access(all) enum Status: UInt8 { + /// Returned (rarely) when status is unknown + /// and something has gone very wrong. + access(all) case unknown + + /// Returned when execution of an evm transaction/call + /// has failed at the validation step (e.g. nonce mismatch). + /// An invalid transaction/call is rejected to be executed + /// or be included in a block. + access(all) case invalid + + /// Returned when execution of an evm transaction/call + /// has been successful but the vm has reported an error in + /// the outcome of execution (e.g. running out of gas). + /// A failed tx/call is included in a block. + /// Note that resubmission of a failed transaction would + /// result in invalid status in the second attempt, given + /// the nonce would become invalid. + access(all) case failed + + /// Returned when execution of an evm transaction/call + /// has been successful and no error is reported by the vm. + access(all) case successful + } + + /// Reports the outcome of an evm transaction/call execution attempt + access(all) struct Result { + /// status of the execution + access(all) let status: Status + + /// error code (error code zero means no error) + access(all) let errorCode: UInt64 + + /// error message + access(all) let errorMessage: String + + /// returns the amount of gas metered during + /// evm execution + access(all) let gasUsed: UInt64 + + /// returns the data that is returned from + /// the evm for the call. For coa.deploy + /// calls it returns the code deployed to + /// the address provided in the contractAddress field. + /// in case of revert, the smart contract custom error message + /// is also returned here (see EIP-140 for more details). + access(all) let data: [UInt8] + + /// returns the newly deployed contract address + /// if the transaction caused such a deployment + /// otherwise the value is nil. + access(all) let deployedContract: EVMAddress? + + init( + status: Status, + errorCode: UInt64, + errorMessage: String, + gasUsed: UInt64, + data: [UInt8], + contractAddress: [UInt8; 20]? + ) { + self.status = status + self.errorCode = errorCode + self.errorMessage = errorMessage + self.gasUsed = gasUsed + self.data = data + + if let addressBytes = contractAddress { + self.deployedContract = EVMAddress(bytes: addressBytes) + } else { + self.deployedContract = nil + } + } + } + + /* + Cadence-Owned Accounts (COA) + A COA is a natively supported EVM smart contract wallet type + that allows a Cadence resource to own and control an EVM address. + This native wallet provides the primitives needed to bridge + or control assets across Flow EVM and Cadence. + From the EVM perspective, COAs are smart contract wallets + that accept native token transfers and support several ERCs + including ERC-165, ERC-721, ERC-777, ERC-1155, ERC-1271. + + COAs are not controlled by a key. + Instead, every COA account has a unique resource accessible + on the Cadence side, and anyone who owns that resource submits transactions + on behalf of this address. These direct transactions have COA’s EVM address + as the tx.origin and a new EVM transaction type (TxType = 0xff) + is used to differentiate these transactions from other types + of EVM transactions (e.g, DynamicFeeTxType (0x02). + + Because of this, users are never able to access a key for their account, + meaning that they cannot control their COA's address on other EVM blockchains. + */ + + /* Entitlements enabling finer-grained access control on a CadenceOwnedAccount */ + + /// Allows validating ownership of a COA + access(all) entitlement Validate + + /// Allows withdrawing FLOW from the COA back to Cadence + access(all) entitlement Withdraw + + /// Allows sending Call transactions from the COA + access(all) entitlement Call + + /// Allows sending deploy contract transactions from the COA + access(all) entitlement Deploy + + /// Allows access to all the privliged functionality on a COA + access(all) entitlement Owner + + /// Allows access to all bridging functionality for COAs + access(all) entitlement Bridge + + /// Event that indicates when a new COA is created + access(all) event CadenceOwnedAccountCreated(address: String, uuid: UInt64) + + /// Interface for types that have an associated EVM address + access(all) resource interface Addressable { + /// Gets the EVM address + access(all) + view fun address(): EVMAddress + } + + access(all) resource CadenceOwnedAccount: Addressable { + + access(self) var addressBytes: [UInt8; 20] + + init() { + // address is initially set to zero + // but updated through initAddress later + // we have to do this since we need resource id (uuid) + // to calculate the EVM address for this cadence owned account + self.addressBytes = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] + } + + /// Sets the EVM address for the COA. Only callable once on initial creation. + /// + /// @param addressBytes: The 20 byte EVM address + /// + /// @return the token decimals of the ERC20 + access(contract) + fun initAddress(addressBytes: [UInt8; 20]) { + // only allow set address for the first time + // check address is empty + pre { + self.addressBytes == [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]: + "EVM.CadenceOwnedAccount.initAddress(): Cannot initialize the address bytes if it has already been set!" + } + self.addressBytes = addressBytes + } + + /// Gets The EVM address of the cadence owned account + /// + access(all) + view fun address(): EVMAddress { + // Always create a new EVMAddress instance + return EVMAddress(bytes: self.addressBytes) + } + + /// Gets the balance of the cadence owned account + /// + access(all) + view fun balance(): Balance { + return self.address().balance() + } + + /// Deposits the given vault into the cadence owned account's balance + /// + /// @param from: The FlowToken Vault to deposit to this cadence owned account + /// + /// @return the token decimals of the ERC20 + access(all) + fun deposit(from: @FlowToken.Vault) { + self.address().deposit(from: <-from) + } + + /// Gets the EVM address of the cadence owned account behind an entitlement, + /// acting as proof of access + access(Owner | Validate) + view fun protectedAddress(): EVMAddress { + return self.address() + } + + /// Withdraws the balance from the cadence owned account's balance + /// Note that amounts smaller than 10nF (10e-8) can't be withdrawn + /// given that Flow Token Vaults use UFix64s to store balances. + /// If the given balance conversion to UFix64 results in + /// rounding error, this function would fail. + /// + /// @param balance: The EVM balance to withdraw + /// + /// @return A FlowToken Vault with the requested balance + access(Owner | Withdraw) + fun withdraw(balance: Balance): @FlowToken.Vault { + if balance.isZero() { + return <-FlowToken.createEmptyVault(vaultType: Type<@FlowToken.Vault>()) + } + let vault <- InternalEVM.withdraw( + from: self.addressBytes, + amount: balance.attoflow + ) as! @FlowToken.Vault + emit FLOWTokensWithdrawn( + address: self.address().toString(), + amount: balance.inFLOW(), + withdrawnUUID: vault.uuid, + balanceAfterInAttoFlow: self.balance().attoflow + ) + return <-vault + } + + /// Deploys a contract to the EVM environment. + /// Returns the result which contains address of + /// the newly deployed contract + /// + /// @param code: The bytecode of the Solidity contract + /// @param gasLimit: The EVM Gas limit for the deployment transaction + /// @param value: The value, as an EVM.Balance object, to send with the deployment + /// + /// @return The EVM transaction result + access(Owner | Deploy) + fun deploy( + code: [UInt8], + gasLimit: UInt64, + value: Balance + ): Result { + return InternalEVM.deploy( + from: self.addressBytes, + code: code, + gasLimit: gasLimit, + value: value.attoflow + ) as! Result + } + + /// Calls a function with the given data. + /// The execution is limited by the given amount of gas + access(Owner | Call) + fun call( + to: EVMAddress, + data: [UInt8], + gasLimit: UInt64, + value: Balance + ): Result { + return InternalEVM.call( + from: self.addressBytes, + to: to.bytes, + data: data, + gasLimit: gasLimit, + value: value.attoflow + ) as! Result + } + + /// Calls a contract function with the given data. + /// The execution is limited by the given amount of gas. + /// The transaction state changes are not persisted. + access(all) + fun dryCall( + to: EVMAddress, + data: [UInt8], + gasLimit: UInt64, + value: Balance, + ): Result { + return InternalEVM.dryCall( + from: self.addressBytes, + to: to.bytes, + data: data, + gasLimit: gasLimit, + value: value.attoflow + ) as! Result + } + + /// Bridges the given NFT to the EVM environment, requiring a Provider + /// from which to withdraw a fee to fulfill the bridge request + /// + /// @param nft: The NFT to bridge to the COA's address in Flow EVM + /// @param feeProvider: A Withdraw entitled Provider reference to a FlowToken Vault + /// that contains the fees to be taken to pay for bridging + access(all) + fun depositNFT( + nft: @{NonFungibleToken.NFT}, + feeProvider: auth(FungibleToken.Withdraw) &{FungibleToken.Provider} + ) { + EVM.borrowBridgeAccessor().depositNFT(nft: <-nft, to: self.address(), feeProvider: feeProvider) + } + + /// Bridges the given NFT from the EVM environment, requiring a Provider + /// from which to withdraw a fee to fulfill the bridge request. + /// Note: the caller has to own the requested NFT in EVM + /// + /// @param type: The Cadence type of the NFT to withdraw + /// @param id: The EVM ERC721 ID of the NFT to withdraw + /// @param feeProvider: A Withdraw entitled Provider reference to a FlowToken Vault + /// that contains the fees to be taken to pay for bridging + /// + /// @return The requested NFT + access(Owner | Bridge) + fun withdrawNFT( + type: Type, + id: UInt256, + feeProvider: auth(FungibleToken.Withdraw) &{FungibleToken.Provider} + ): @{NonFungibleToken.NFT} { + return <- EVM.borrowBridgeAccessor().withdrawNFT( + caller: &self as auth(Call) &CadenceOwnedAccount, + type: type, + id: id, + feeProvider: feeProvider + ) + } + + /// Bridges the given Vault to the EVM environment + access(all) + fun depositTokens( + vault: @{FungibleToken.Vault}, + feeProvider: auth(FungibleToken.Withdraw) &{FungibleToken.Provider} + ) { + EVM.borrowBridgeAccessor().depositTokens(vault: <-vault, to: self.address(), feeProvider: feeProvider) + } + + /// Bridges the given fungible tokens from the EVM environment, requiring a Provider from which to withdraw a + /// fee to fulfill the bridge request. Note: the caller should own the requested tokens & sufficient balance of + /// requested tokens in EVM + access(Owner | Bridge) + fun withdrawTokens( + type: Type, + amount: UInt256, + feeProvider: auth(FungibleToken.Withdraw) &{FungibleToken.Provider} + ): @{FungibleToken.Vault} { + return <- EVM.borrowBridgeAccessor().withdrawTokens( + caller: &self as auth(Call) &CadenceOwnedAccount, + type: type, + amount: amount, + feeProvider: feeProvider + ) + } + } + + /// Creates a new cadence owned account + access(all) + fun createCadenceOwnedAccount(): @CadenceOwnedAccount { + let acc <-create CadenceOwnedAccount() + let addr = InternalEVM.createCadenceOwnedAccount(uuid: acc.uuid) + acc.initAddress(addressBytes: addr) + + emit CadenceOwnedAccountCreated(address: acc.address().toString(), uuid: acc.uuid) + return <-acc + } + + /// Runs an a RLP-encoded EVM transaction, deducts the gas fees, + /// and deposits the gas fees into the provided coinbase address. + /// + /// @param tx: The rlp-encoded transaction to run + /// @param coinbase: The address of entity to receive the transaction fees + /// for relaying the transaction + /// + /// @return: The transaction result + access(all) + fun run(tx: [UInt8], coinbase: EVMAddress): Result { + return InternalEVM.run( + tx: tx, + coinbase: coinbase.bytes + ) as! Result + } + + /// mustRun runs the transaction using EVM.run + /// It will rollback if the tx execution status is unknown or invalid. + /// Note that this method does not rollback if transaction + /// is executed but an vm error is reported as the outcome + /// of the execution (status: failed). + access(all) + fun mustRun(tx: [UInt8], coinbase: EVMAddress): Result { + let runResult = self.run(tx: tx, coinbase: coinbase) + assert( + runResult.status == Status.failed || runResult.status == Status.successful, + message: "EVM.mustRun(): The provided transaction is not valid for execution" + ) + return runResult + } + + /// Simulates running unsigned RLP-encoded transaction using + /// the from address as the signer. + /// The transaction state changes are not persisted. + /// This is useful for gas estimation or calling view contract functions. + access(all) + fun dryRun(tx: [UInt8], from: EVMAddress): Result { + return InternalEVM.dryRun( + tx: tx, + from: from.bytes, + ) as! Result + } + + /// Calls a contract function with the given data. + /// The execution is limited by the given amount of gas. + /// The transaction state changes are not persisted. + access(all) + fun dryCall( + from: EVMAddress, + to: EVMAddress, + data: [UInt8], + gasLimit: UInt64, + value: Balance, + ): Result { + return InternalEVM.dryCall( + from: from.bytes, + to: to.bytes, + data: data, + gasLimit: gasLimit, + value: value.attoflow + ) as! Result + } + + /// Runs a batch of RLP-encoded EVM transactions, deducts the gas fees, + /// and deposits the gas fees into the provided coinbase address. + /// An invalid transaction is not executed and not included in the block. + access(all) + fun batchRun(txs: [[UInt8]], coinbase: EVMAddress): [Result] { + return InternalEVM.batchRun( + txs: txs, + coinbase: coinbase.bytes, + ) as! [Result] + } + + access(all) + fun encodeABI(_ values: [AnyStruct]): [UInt8] { + return InternalEVM.encodeABI(values) + } + + access(all) + fun decodeABI(types: [Type], data: [UInt8]): [AnyStruct] { + return InternalEVM.decodeABI(types: types, data: data) + } + + access(all) + fun encodeABIWithSignature( + _ signature: String, + _ values: [AnyStruct] + ): [UInt8] { + let methodID = HashAlgorithm.KECCAK_256.hash( + signature.utf8 + ).slice(from: 0, upTo: 4) + let arguments = InternalEVM.encodeABI(values) + + return methodID.concat(arguments) + } + + access(all) + fun decodeABIWithSignature( + _ signature: String, + types: [Type], + data: [UInt8] + ): [AnyStruct] { + let methodID = HashAlgorithm.KECCAK_256.hash( + signature.utf8 + ).slice(from: 0, upTo: 4) + + for byte in methodID { + if byte != data.removeFirst() { + panic("EVM.decodeABIWithSignature(): Cannot decode! The signature does not match the provided data.") + } + } + + return InternalEVM.decodeABI(types: types, data: data) + } + + /// ValidationResult returns the result of COA ownership proof validation + access(all) struct ValidationResult { + + access(all) let isValid: Bool + + /// If there was a problem with validation, this describes + /// what the problem was + access(all) let problem: String? + + init(isValid: Bool, problem: String?) { + self.isValid = isValid + self.problem = problem + } + } + + /// validateCOAOwnershipProof validates a COA ownership proof + access(all) + fun validateCOAOwnershipProof( + address: Address, + path: PublicPath, + signedData: [UInt8], + keyIndices: [UInt64], + signatures: [[UInt8]], + evmAddress: [UInt8; 20] + ): ValidationResult { + // make signature set first + // check number of signatures matches number of key indices + if keyIndices.length != signatures.length { + return ValidationResult( + isValid: false, + problem: "EVM.validateCOAOwnershipProof(): Key indices array length" + .concat(" doesn't match the signatures array length!") + ) + } + + // fetch account + let acc = getAccount(address) + + var signatureSet: [Crypto.KeyListSignature] = [] + let keyList = Crypto.KeyList() + var keyListLength = 0 + let seenAccountKeyIndices: {Int: Int} = {} + for signatureIndex, signature in signatures { + // index of the key on the account + let accountKeyIndex = Int(keyIndices[signatureIndex]!) + // index of the key in the key list + var keyListIndex = 0 + + if !seenAccountKeyIndices.containsKey(accountKeyIndex) { + // fetch account key with accountKeyIndex + if let key = acc.keys.get(keyIndex: accountKeyIndex) { + if key.isRevoked { + return ValidationResult( + isValid: false, + problem: "EVM.validateCOAOwnershipProof(): Cannot validate COA ownership" + .concat(" for Cadence account \(address). The account key at index \(accountKeyIndex) is revoked.") + ) + } + + keyList.add( + key.publicKey, + hashAlgorithm: key.hashAlgorithm, + // normalization factor. We need to divide by 1000 because the + // `Crypto.KeyList.verify()` function expects the weight to be + // in the range [0, 1]. 1000 is the key weight threshold. + weight: key.weight / 1000.0, + ) + + keyListIndex = keyListLength + keyListLength = keyListLength + 1 + seenAccountKeyIndices[accountKeyIndex] = keyListIndex + } else { + return ValidationResult( + isValid: false, + problem: "EVM.validateCOAOwnershipProof(): Cannot validate COA ownership" + .concat(" for Cadence account \(address). The key index \(accountKeyIndex) is invalid.") + ) + } + } else { + // if we have already seen this accountKeyIndex, use the keyListIndex + // that was previously assigned to it + // `Crypto.KeyList.verify()` knows how to handle duplicate keys + keyListIndex = seenAccountKeyIndices[accountKeyIndex]! + } + + signatureSet.append(Crypto.KeyListSignature( + keyIndex: keyListIndex, + signature: signature + )) + } + + let isValid = keyList.verify( + signatureSet: signatureSet, + signedData: signedData, + domainSeparationTag: "FLOW-V0.0-user" + ) + + if !isValid{ + return ValidationResult( + isValid: false, + problem: "EVM.validateCOAOwnershipProof(): Cannot validate COA ownership" + .concat(" for Cadence account \(address). The given signatures are not valid or provide enough weight.") + ) + } + + let coaRef = acc.capabilities.borrow<&EVM.CadenceOwnedAccount>(path) + if coaRef == nil { + return ValidationResult( + isValid: false, + problem: "EVM.validateCOAOwnershipProof(): Cannot validate COA ownership. " + .concat("Could not borrow the COA resource for account \(address).") + ) + } + + // verify evm address matching + var addr = coaRef!.address() + for index, item in coaRef!.address().bytes { + if item != evmAddress[index] { + return ValidationResult( + isValid: false, + problem: "EVM.validateCOAOwnershipProof(): Cannot validate COA ownership." + .concat("The provided evm address does not match the account's COA address.") + ) + } + } + + return ValidationResult( + isValid: true, + problem: nil + ) + } + + /// Interface for a resource which acts as an entrypoint to the VM bridge + access(all) resource interface BridgeAccessor { + + /// Endpoint enabling the bridging of an NFT to EVM + access(Bridge) + fun depositNFT( + nft: @{NonFungibleToken.NFT}, + to: EVMAddress, + feeProvider: auth(FungibleToken.Withdraw) &{FungibleToken.Provider} + ) + + /// Endpoint enabling the bridging of an NFT from EVM + access(Bridge) + fun withdrawNFT( + caller: auth(Call) &CadenceOwnedAccount, + type: Type, + id: UInt256, + feeProvider: auth(FungibleToken.Withdraw) &{FungibleToken.Provider} + ): @{NonFungibleToken.NFT} + + /// Endpoint enabling the bridging of a fungible token vault to EVM + access(Bridge) + fun depositTokens( + vault: @{FungibleToken.Vault}, + to: EVMAddress, + feeProvider: auth(FungibleToken.Withdraw) &{FungibleToken.Provider} + ) + + /// Endpoint enabling the bridging of fungible tokens from EVM + access(Bridge) + fun withdrawTokens( + caller: auth(Call) &CadenceOwnedAccount, + type: Type, + amount: UInt256, + feeProvider: auth(FungibleToken.Withdraw) &{FungibleToken.Provider} + ): @{FungibleToken.Vault} + } + + /// Interface which captures a Capability to the bridge Accessor, + /// saving it within the BridgeRouter resource + access(all) resource interface BridgeRouter { + + /// Returns a reference to the BridgeAccessor designated + /// for internal bridge requests + access(Bridge) view fun borrowBridgeAccessor(): auth(Bridge) &{BridgeAccessor} + + /// Sets the BridgeAccessor Capability in the BridgeRouter + access(Bridge) fun setBridgeAccessor(_ accessor: Capability<auth(Bridge) &{BridgeAccessor}>) { + pre { + accessor.check(): + "EVM.setBridgeAccessor(): Invalid BridgeAccessor Capability provided" + emit BridgeAccessorUpdated( + routerType: self.getType(), + routerUUID: self.uuid, + routerAddress: self.owner?.address ?? panic("EVM.setBridgeAccessor(): Router must be stored in an account's storage"), + accessorType: accessor.borrow()!.getType(), + accessorUUID: accessor.borrow()!.uuid, + accessorAddress: accessor.address + ) + } + } + } + + /// Returns a reference to the BridgeAccessor designated for internal bridge requests + access(self) + view fun borrowBridgeAccessor(): auth(Bridge) &{BridgeAccessor} { + return self.account.storage.borrow<auth(Bridge) &{BridgeRouter}>(from: /storage/evmBridgeRouter) + ?.borrowBridgeAccessor() + ?? panic("EVM.borrowBridgeAccessor(): Could not borrow a reference to the EVM bridge.") + } + + /// The Heartbeat resource controls the block production. + /// It is stored in the storage and used in the Flow protocol + /// to call the heartbeat function once per block. + access(all) resource Heartbeat { + /// heartbeat calls commit block proposals and forms new blocks + /// including all the recently executed transactions. + /// The Flow protocol makes sure to call this function + /// once per block as a system call. + access(all) + fun heartbeat() { + InternalEVM.commitBlockProposal() + } + } + + /// setupHeartbeat creates a heartbeat resource and saves it to storage. + /// The function is called once during the contract initialization. + /// + /// The heartbeat resource is used to control the block production, + /// and used in the Flow protocol to call the heartbeat function once per block. + /// + /// The function can be called by anyone, but only once: + /// the function will fail if the resource already exists. + /// + /// The resulting resource is stored in the account storage, + /// and is only accessible by the account, not the caller of the function. + access(all) + fun setupHeartbeat() { + self.account.storage.save(<-create Heartbeat(), to: /storage/EVMHeartbeat) + } + + init() { + self.setupHeartbeat() + } +} diff --git a/fvm/evm/stdlib/contract.go b/fvm/evm/stdlib/contract.go new file mode 100644 index 00000000000..74ea2c5dfb4 --- /dev/null +++ b/fvm/evm/stdlib/contract.go @@ -0,0 +1,660 @@ +package stdlib + +import ( + _ "embed" + "fmt" + "regexp" + + "github.com/onflow/cadence" + "github.com/onflow/cadence/common" + "github.com/onflow/cadence/interpreter" + "github.com/onflow/cadence/runtime" + "github.com/onflow/cadence/sema" + "github.com/onflow/cadence/stdlib" + + "github.com/onflow/flow-go/model/flow" +) + +//go:embed contract.cdc +var contractCode string + +//go:embed contract_minimal.cdc +var ContractMinimalCode string + +var nftImportPattern = regexp.MustCompile(`(?m)^import "NonFungibleToken"`) +var fungibleTokenImportPattern = regexp.MustCompile(`(?m)^import "FungibleToken"`) +var flowTokenImportPattern = regexp.MustCompile(`(?m)^import "FlowToken"`) + +func ContractCode(nonFungibleTokenAddress, fungibleTokenAddress, flowTokenAddress flow.Address) []byte { + evmContract := nftImportPattern.ReplaceAllString( + contractCode, + fmt.Sprintf("import NonFungibleToken from %s", nonFungibleTokenAddress.HexWithPrefix()), + ) + evmContract = fungibleTokenImportPattern.ReplaceAllString( + evmContract, + fmt.Sprintf("import FungibleToken from %s", fungibleTokenAddress.HexWithPrefix()), + ) + evmContract = flowTokenImportPattern.ReplaceAllString( + evmContract, + fmt.Sprintf("import FlowToken from %s", flowTokenAddress.HexWithPrefix()), + ) + return []byte(evmContract) +} + +const ( + ContractName = "EVM" + + EVMAddressTypeBytesFieldName = "bytes" + + EVMBytesTypeValueFieldName = "value" + + EVMAddressTypeQualifiedIdentifier = "EVM.EVMAddress" + + EVMBalanceTypeQualifiedIdentifier = "EVM.Balance" + + EVMBytesTypeQualifiedIdentifier = "EVM.EVMBytes" + + EVMBytes4TypeQualifiedIdentifier = "EVM.EVMBytes4" + + EVMBytes32TypeQualifiedIdentifier = "EVM.EVMBytes32" + + EVMResultTypeQualifiedIdentifier = "EVM.Result" + EVMResultTypeStatusFieldName = "status" + EVMResultTypeErrorCodeFieldName = "errorCode" + EVMResultTypeErrorMessageFieldName = "errorMessage" + EVMResultTypeGasUsedFieldName = "gasUsed" + EVMResultTypeDataFieldName = "data" + EVMResultTypeDeployedContractFieldName = "deployedContract" + + EVMStatusTypeQualifiedIdentifier = "EVM.Status" + + EVMBlockTypeQualifiedIdentifier = "EVM.EVMBlock" +) + +const ( + EVMAddressLength = 20 + EVMBytes4Length = 4 + EVMBytes32Length = 32 +) + +var ( + EVMTransactionBytesCadenceType = cadence.NewVariableSizedArrayType(cadence.UInt8Type) + + EVMTransactionBytesType = sema.NewVariableSizedType(nil, sema.UInt8Type) + EVMTransactionsBatchBytesType = sema.NewVariableSizedType(nil, EVMTransactionBytesType) + EVMAddressBytesType = sema.NewConstantSizedType(nil, sema.UInt8Type, EVMAddressLength) + + EVMAddressBytesStaticType = interpreter.ConvertSemaArrayTypeToStaticArrayType(nil, EVMAddressBytesType) + + EVMBytesValueStaticType = interpreter.ConvertSemaArrayTypeToStaticArrayType(nil, EVMTransactionBytesType) + + EVMBytes4ValueStaticType = interpreter.ConvertSemaArrayTypeToStaticArrayType( + nil, + sema.NewConstantSizedType(nil, sema.UInt8Type, EVMBytes4Length), + ) + + EVMBytes32ValueStaticType = interpreter.ConvertSemaArrayTypeToStaticArrayType( + nil, + sema.NewConstantSizedType(nil, sema.UInt8Type, EVMBytes32Length), + ) + + EVMAddressBytesCadenceType = cadence.NewConstantSizedArrayType(EVMAddressLength, cadence.UInt8Type) +) + +// InternalEVM.encodeABI + +const InternalEVMTypeEncodeABIFunctionName = "encodeABI" + +var InternalEVMTypeEncodeABIFunctionType = &sema.FunctionType{ + Parameters: []sema.Parameter{ + { + Label: sema.ArgumentLabelNotRequired, + Identifier: "values", + TypeAnnotation: sema.NewTypeAnnotation( + sema.NewVariableSizedType(nil, sema.AnyStructType), + ), + }, + }, + ReturnTypeAnnotation: sema.NewTypeAnnotation(sema.ByteArrayType), +} + +// InternalEVM.decodeABI + +const InternalEVMTypeDecodeABIFunctionName = "decodeABI" + +var InternalEVMTypeDecodeABIFunctionType = &sema.FunctionType{ + Parameters: []sema.Parameter{ + { + Identifier: "types", + TypeAnnotation: sema.NewTypeAnnotation( + sema.NewVariableSizedType(nil, sema.MetaType), + ), + }, + { + Label: "data", + TypeAnnotation: sema.NewTypeAnnotation(sema.ByteArrayType), + }, + }, + ReturnTypeAnnotation: sema.NewTypeAnnotation( + sema.NewVariableSizedType(nil, sema.AnyStructType), + ), +} + +// InternalEVM.run + +const InternalEVMTypeRunFunctionName = "run" + +var InternalEVMTypeRunFunctionType = &sema.FunctionType{ + Parameters: []sema.Parameter{ + { + Label: "tx", + TypeAnnotation: sema.NewTypeAnnotation(EVMTransactionBytesType), + }, + { + Label: "coinbase", + TypeAnnotation: sema.NewTypeAnnotation(EVMAddressBytesType), + }, + }, + // Actually EVM.Result, but cannot refer to it here + ReturnTypeAnnotation: sema.NewTypeAnnotation(sema.AnyStructType), +} + +// InternalEVM.dryRun + +const InternalEVMTypeDryRunFunctionName = "dryRun" + +var InternalEVMTypeDryRunFunctionType = &sema.FunctionType{ + Parameters: []sema.Parameter{ + { + Label: "tx", + TypeAnnotation: sema.NewTypeAnnotation(EVMTransactionBytesType), + }, + { + Label: "from", + TypeAnnotation: sema.NewTypeAnnotation(EVMAddressBytesType), + }, + }, + // Actually EVM.Result, but cannot refer to it here + ReturnTypeAnnotation: sema.NewTypeAnnotation(sema.AnyStructType), +} + +// InternalEVM.batchRun + +const InternalEVMTypeBatchRunFunctionName = "batchRun" + +var InternalEVMTypeBatchRunFunctionType *sema.FunctionType = &sema.FunctionType{ + Parameters: []sema.Parameter{ + { + Label: "txs", + TypeAnnotation: sema.NewTypeAnnotation(EVMTransactionsBatchBytesType), + }, + { + Label: "coinbase", + TypeAnnotation: sema.NewTypeAnnotation(EVMAddressBytesType), + }, + }, + // Actually [EVM.Result], but cannot refer to it here + ReturnTypeAnnotation: sema.NewTypeAnnotation(sema.NewVariableSizedType(nil, sema.AnyStructType)), +} + +// InternalEVM.call + +const InternalEVMTypeCallFunctionName = "call" + +var InternalEVMTypeCallFunctionType = &sema.FunctionType{ + Parameters: []sema.Parameter{ + { + Label: "from", + TypeAnnotation: sema.NewTypeAnnotation(EVMAddressBytesType), + }, + { + Label: "to", + TypeAnnotation: sema.NewTypeAnnotation(EVMAddressBytesType), + }, + { + Label: "data", + TypeAnnotation: sema.NewTypeAnnotation(sema.ByteArrayType), + }, + { + Label: "gasLimit", + TypeAnnotation: sema.NewTypeAnnotation(sema.UInt64Type), + }, + { + Label: "value", + TypeAnnotation: sema.NewTypeAnnotation(sema.UIntType), + }, + }, + // Actually EVM.Result, but cannot refer to it here + ReturnTypeAnnotation: sema.NewTypeAnnotation(sema.AnyStructType), +} + +// InternalEVM.dryCall + +const InternalEVMTypeDryCallFunctionName = "dryCall" + +var InternalEVMTypeDryCallFunctionType = &sema.FunctionType{ + Parameters: []sema.Parameter{ + { + Label: "from", + TypeAnnotation: sema.NewTypeAnnotation(EVMAddressBytesType), + }, + { + Label: "to", + TypeAnnotation: sema.NewTypeAnnotation(EVMAddressBytesType), + }, + { + Label: "data", + TypeAnnotation: sema.NewTypeAnnotation(sema.ByteArrayType), + }, + { + Label: "gasLimit", + TypeAnnotation: sema.NewTypeAnnotation(sema.UInt64Type), + }, + { + Label: "value", + TypeAnnotation: sema.NewTypeAnnotation(sema.UIntType), + }, + }, + // Actually EVM.Result, but cannot refer to it here + ReturnTypeAnnotation: sema.NewTypeAnnotation(sema.AnyStructType), +} + +// InternalEVM.createCadenceOwnedAccount + +const InternalEVMTypeCreateCadenceOwnedAccountFunctionName = "createCadenceOwnedAccount" + +var InternalEVMTypeCreateCadenceOwnedAccountFunctionType = &sema.FunctionType{ + Parameters: []sema.Parameter{ + { + Label: "uuid", + TypeAnnotation: sema.NewTypeAnnotation(sema.UInt64Type), + }, + }, + ReturnTypeAnnotation: sema.NewTypeAnnotation(EVMAddressBytesType), +} + +// InternalEVM.deposit + +const InternalEVMTypeDepositFunctionName = "deposit" + +var InternalEVMTypeDepositFunctionType = &sema.FunctionType{ + Parameters: []sema.Parameter{ + { + Label: "from", + TypeAnnotation: sema.NewTypeAnnotation(sema.AnyResourceType), + }, + { + Label: "to", + TypeAnnotation: sema.NewTypeAnnotation(EVMAddressBytesType), + }, + }, + ReturnTypeAnnotation: sema.NewTypeAnnotation(sema.VoidType), +} + +// InternalEVM.balance + +const InternalEVMTypeBalanceFunctionName = "balance" + +var InternalEVMTypeBalanceFunctionType = &sema.FunctionType{ + Purity: sema.FunctionPurityView, + Parameters: []sema.Parameter{ + { + Label: "address", + TypeAnnotation: sema.NewTypeAnnotation(EVMAddressBytesType), + }, + }, + ReturnTypeAnnotation: sema.NewTypeAnnotation(sema.UIntType), +} + +// InternalEVM.nonce + +const InternalEVMTypeNonceFunctionName = "nonce" + +var InternalEVMTypeNonceFunctionType = &sema.FunctionType{ + Parameters: []sema.Parameter{ + { + Label: "address", + TypeAnnotation: sema.NewTypeAnnotation(EVMAddressBytesType), + }, + }, + ReturnTypeAnnotation: sema.NewTypeAnnotation(sema.UInt64Type), +} + +// InternalEVM.code + +const InternalEVMTypeCodeFunctionName = "code" + +var InternalEVMTypeCodeFunctionType = &sema.FunctionType{ + Parameters: []sema.Parameter{ + { + Label: "address", + TypeAnnotation: sema.NewTypeAnnotation(EVMAddressBytesType), + }, + }, + ReturnTypeAnnotation: sema.NewTypeAnnotation(sema.ByteArrayType), +} + +// InternalEVM.codeHash + +const InternalEVMTypeCodeHashFunctionName = "codeHash" + +var InternalEVMTypeCodeHashFunctionType = &sema.FunctionType{ + Parameters: []sema.Parameter{ + { + Label: "address", + TypeAnnotation: sema.NewTypeAnnotation(EVMAddressBytesType), + }, + }, + ReturnTypeAnnotation: sema.NewTypeAnnotation(sema.ByteArrayType), +} + +// InternalEVM.withdraw + +const InternalEVMTypeWithdrawFunctionName = "withdraw" + +var InternalEVMTypeWithdrawFunctionType = &sema.FunctionType{ + Parameters: []sema.Parameter{ + { + Label: "from", + TypeAnnotation: sema.NewTypeAnnotation(EVMAddressBytesType), + }, + { + Label: "amount", + TypeAnnotation: sema.NewTypeAnnotation(sema.UIntType), + }, + }, + ReturnTypeAnnotation: sema.NewTypeAnnotation(sema.AnyResourceType), +} + +// InternalEVM.deploy + +const InternalEVMTypeDeployFunctionName = "deploy" + +var InternalEVMTypeDeployFunctionType = &sema.FunctionType{ + Parameters: []sema.Parameter{ + { + Label: "from", + TypeAnnotation: sema.NewTypeAnnotation(EVMAddressBytesType), + }, + { + Label: "code", + TypeAnnotation: sema.NewTypeAnnotation(sema.ByteArrayType), + }, + { + Label: "gasLimit", + TypeAnnotation: sema.NewTypeAnnotation(sema.UInt64Type), + }, + { + Label: "value", + TypeAnnotation: sema.NewTypeAnnotation(sema.UIntType), + }, + }, + // Actually EVM.Result, but cannot refer to it here + ReturnTypeAnnotation: sema.NewTypeAnnotation(sema.AnyStructType), +} + +// InternalEVM.castToAttoFLOW + +const InternalEVMTypeCastToAttoFLOWFunctionName = "castToAttoFLOW" + +var InternalEVMTypeCastToAttoFLOWFunctionType = &sema.FunctionType{ + Purity: sema.FunctionPurityView, + Parameters: []sema.Parameter{ + { + Label: "balance", + TypeAnnotation: sema.NewTypeAnnotation(sema.UFix64Type), + }, + }, + ReturnTypeAnnotation: sema.NewTypeAnnotation(sema.UIntType), +} + +// InternalEVM.castToFLOW + +const InternalEVMTypeCastToFLOWFunctionName = "castToFLOW" + +var InternalEVMTypeCastToFLOWFunctionType = &sema.FunctionType{ + Purity: sema.FunctionPurityView, + Parameters: []sema.Parameter{ + { + Label: "balance", + TypeAnnotation: sema.NewTypeAnnotation(sema.UIntType), + }, + }, + ReturnTypeAnnotation: sema.NewTypeAnnotation(sema.UFix64Type), +} + +// InternalEVM.commitBlockProposal + +const InternalEVMTypeCommitBlockProposalFunctionName = "commitBlockProposal" + +var InternalEVMTypeCommitBlockProposalFunctionType = &sema.FunctionType{ + Parameters: []sema.Parameter{}, + ReturnTypeAnnotation: sema.NewTypeAnnotation(sema.VoidType), +} + +// InternalEVM.getLatestBlock + +const InternalEVMTypeGetLatestBlockFunctionName = "getLatestBlock" + +var InternalEVMTypeGetLatestBlockFunctionType = &sema.FunctionType{ + Parameters: []sema.Parameter{}, + // Actually EVM.Block, but cannot refer to it here + ReturnTypeAnnotation: sema.NewTypeAnnotation(sema.AnyStructType), +} + +// InternalEVM + +const InternalEVMContractName = "InternalEVM" + +var InternalEVMContractType = func() *sema.CompositeType { + ty := &sema.CompositeType{ + Identifier: InternalEVMContractName, + Kind: common.CompositeKindContract, + } + + ty.Members = sema.MembersAsMap([]*sema.Member{ + sema.NewUnmeteredPublicFunctionMember( + ty, + InternalEVMTypeRunFunctionName, + InternalEVMTypeRunFunctionType, + "", + ), + sema.NewUnmeteredPublicFunctionMember( + ty, + InternalEVMTypeDryRunFunctionName, + InternalEVMTypeDryRunFunctionType, + "", + ), + sema.NewUnmeteredPublicFunctionMember( + ty, + InternalEVMTypeBatchRunFunctionName, + InternalEVMTypeBatchRunFunctionType, + "", + ), + sema.NewUnmeteredPublicFunctionMember( + ty, + InternalEVMTypeCreateCadenceOwnedAccountFunctionName, + InternalEVMTypeCreateCadenceOwnedAccountFunctionType, + "", + ), + sema.NewUnmeteredPublicFunctionMember( + ty, + InternalEVMTypeCallFunctionName, + InternalEVMTypeCallFunctionType, + "", + ), + sema.NewUnmeteredPublicFunctionMember( + ty, + InternalEVMTypeDryCallFunctionName, + InternalEVMTypeDryCallFunctionType, + "", + ), + sema.NewUnmeteredPublicFunctionMember( + ty, + InternalEVMTypeDepositFunctionName, + InternalEVMTypeDepositFunctionType, + "", + ), + sema.NewUnmeteredPublicFunctionMember( + ty, + InternalEVMTypeWithdrawFunctionName, + InternalEVMTypeWithdrawFunctionType, + "", + ), + sema.NewUnmeteredPublicFunctionMember( + ty, + InternalEVMTypeDeployFunctionName, + InternalEVMTypeDeployFunctionType, + "", + ), + sema.NewUnmeteredPublicFunctionMember( + ty, + InternalEVMTypeCastToAttoFLOWFunctionName, + InternalEVMTypeCastToAttoFLOWFunctionType, + "", + ), + sema.NewUnmeteredPublicFunctionMember( + ty, + InternalEVMTypeCastToFLOWFunctionName, + InternalEVMTypeCastToFLOWFunctionType, + "", + ), + sema.NewUnmeteredPublicFunctionMember( + ty, + InternalEVMTypeBalanceFunctionName, + InternalEVMTypeBalanceFunctionType, + "", + ), + sema.NewUnmeteredPublicFunctionMember( + ty, + InternalEVMTypeNonceFunctionName, + InternalEVMTypeNonceFunctionType, + "", + ), + sema.NewUnmeteredPublicFunctionMember( + ty, + InternalEVMTypeCodeFunctionName, + InternalEVMTypeCodeFunctionType, + "", + ), + sema.NewUnmeteredPublicFunctionMember( + ty, + InternalEVMTypeCodeHashFunctionName, + InternalEVMTypeCodeHashFunctionType, + "", + ), + sema.NewUnmeteredPublicFunctionMember( + ty, + InternalEVMTypeEncodeABIFunctionName, + InternalEVMTypeEncodeABIFunctionType, + "", + ), + sema.NewUnmeteredPublicFunctionMember( + ty, + InternalEVMTypeDecodeABIFunctionName, + InternalEVMTypeDecodeABIFunctionType, + "", + ), + sema.NewUnmeteredPublicFunctionMember( + ty, + InternalEVMTypeGetLatestBlockFunctionName, + InternalEVMTypeGetLatestBlockFunctionType, + "", + ), + sema.NewUnmeteredPublicFunctionMember( + ty, + InternalEVMTypeCommitBlockProposalFunctionName, + InternalEVMTypeCommitBlockProposalFunctionType, + "", + ), + }) + return ty +}() + +func newInternalEVMStandardLibraryValue( + value interpreter.Value, +) stdlib.StandardLibraryValue { + return stdlib.StandardLibraryValue{ + Name: InternalEVMContractName, + Type: InternalEVMContractType, + Value: value, + Kind: common.DeclarationKindContract, + } +} + +var internalEVMStandardLibraryType = stdlib.StandardLibraryType{ + Name: InternalEVMContractName, + Type: InternalEVMContractType, + Kind: common.DeclarationKindContract, +} + +func SetupEnvironment( + env runtime.Environment, + internalEVMValue interpreter.Value, + contractAddress flow.Address, +) { + location := common.NewAddressLocation(nil, common.Address(contractAddress), ContractName) + + env.DeclareType( + internalEVMStandardLibraryType, + location, + ) + env.DeclareValue( + newInternalEVMStandardLibraryValue(internalEVMValue), + location, + ) +} + +func NewEVMAddressCadenceType(address common.Address) *cadence.StructType { + return cadence.NewStructType( + common.NewAddressLocation(nil, address, ContractName), + EVMAddressTypeQualifiedIdentifier, + []cadence.Field{ + { + Identifier: "bytes", + Type: EVMAddressBytesCadenceType, + }, + }, + nil, + ) +} + +func NewBalanceCadenceType(address common.Address) *cadence.StructType { + return cadence.NewStructType( + common.NewAddressLocation(nil, address, ContractName), + EVMBalanceTypeQualifiedIdentifier, + []cadence.Field{ + { + Identifier: "attoflow", + Type: cadence.UIntType, + }, + }, + nil, + ) +} + +func NewEVMBlockCadenceType(address common.Address) *cadence.StructType { + return cadence.NewStructType( + common.NewAddressLocation(nil, address, ContractName), + EVMBlockTypeQualifiedIdentifier, + []cadence.Field{ + { + Identifier: "height", + Type: cadence.UInt64Type, + }, + { + Identifier: "hash", + Type: cadence.StringType, + }, + { + Identifier: "totalSupply", + Type: cadence.IntType, + }, + { + Identifier: "timestamp", + Type: cadence.UInt64Type, + }, + }, + nil, + ) +} diff --git a/fvm/evm/stdlib/contract_minimal.cdc b/fvm/evm/stdlib/contract_minimal.cdc new file mode 100644 index 00000000000..45378726215 --- /dev/null +++ b/fvm/evm/stdlib/contract_minimal.cdc @@ -0,0 +1,60 @@ +access(all) +contract EVM { + + /// EVMAddress is an EVM-compatible address + access(all) + struct EVMAddress { + + /// Bytes of the address + access(all) + let bytes: [UInt8; 20] + + /// Constructs a new EVM address from the given byte representation + init(bytes: [UInt8; 20]) { + self.bytes = bytes + } + + } + + access(all) + fun encodeABI(_ values: [AnyStruct]): [UInt8] { + return InternalEVM.encodeABI(values) + } + + access(all) + fun decodeABI(types: [Type], data: [UInt8]): [AnyStruct] { + return InternalEVM.decodeABI(types: types, data: data) + } + + access(all) + fun encodeABIWithSignature( + _ signature: String, + _ values: [AnyStruct] + ): [UInt8] { + let methodID = HashAlgorithm.KECCAK_256.hash( + signature.utf8 + ).slice(from: 0, upTo: 4) + let arguments = InternalEVM.encodeABI(values) + + return methodID.concat(arguments) + } + + access(all) + fun decodeABIWithSignature( + _ signature: String, + types: [Type], + data: [UInt8] + ): [AnyStruct] { + let methodID = HashAlgorithm.KECCAK_256.hash( + signature.utf8 + ).slice(from: 0, upTo: 4) + + for byte in methodID { + if byte != data.removeFirst() { + panic("signature mismatch") + } + } + + return InternalEVM.decodeABI(types: types, data: data) + } +} diff --git a/fvm/evm/stdlib/contract_test.go b/fvm/evm/stdlib/contract_test.go new file mode 100644 index 00000000000..33b29beac40 --- /dev/null +++ b/fvm/evm/stdlib/contract_test.go @@ -0,0 +1,6140 @@ +package stdlib_test + +import ( + "bytes" + "encoding/binary" + "encoding/hex" + "math/big" + "strings" + "testing" + + gethTypes "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/crypto" + "github.com/onflow/cadence" + "github.com/onflow/cadence/common" + "github.com/onflow/cadence/encoding/json" + "github.com/onflow/cadence/runtime" + "github.com/onflow/cadence/sema" + cadenceStdlib "github.com/onflow/cadence/stdlib" + . "github.com/onflow/cadence/test_utils/common_utils" + . "github.com/onflow/cadence/test_utils/runtime_utils" + coreContracts "github.com/onflow/flow-core-contracts/lib/go/contracts" + coreContractstemplates "github.com/onflow/flow-core-contracts/lib/go/templates" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/fvm/blueprints" + "github.com/onflow/flow-go/fvm/environment" + "github.com/onflow/flow-go/fvm/evm/impl" + "github.com/onflow/flow-go/fvm/evm/stdlib" + . "github.com/onflow/flow-go/fvm/evm/testutils" + "github.com/onflow/flow-go/fvm/evm/types" + "github.com/onflow/flow-go/fvm/meter" + "github.com/onflow/flow-go/model/flow" +) + +func newLocationResolver( + cryptoContractAddress flow.Address, +) func( + identifiers []runtime.Identifier, + location runtime.Location, +) ([]runtime.ResolvedLocation, error) { + cryptoContractAddress2 := common.Address(cryptoContractAddress) + return func( + identifiers []runtime.Identifier, + location runtime.Location, + ) ([]runtime.ResolvedLocation, error) { + return environment.ResolveLocation( + identifiers, + location, + nil, + cryptoContractAddress2, + ) + } +} + +type testContractHandler struct { + flowTokenAddress common.Address + evmContractAddress common.Address + deployCOA func(uint64) types.Address + accountByAddress func(types.Address, bool) types.Account + lastExecutedBlock func() *types.Block + run func(tx []byte, coinbase types.Address) *types.ResultSummary + batchRun func(txs [][]byte, coinbase types.Address) []*types.ResultSummary + generateResourceUUID func() uint64 + dryRun func(tx []byte, from types.Address) *types.ResultSummary + commitBlockProposal func() +} + +var _ types.ContractHandler = &testContractHandler{} + +func (t *testContractHandler) FlowTokenAddress() common.Address { + return t.flowTokenAddress +} + +func (t *testContractHandler) EVMContractAddress() common.Address { + return t.evmContractAddress +} + +func (t *testContractHandler) DeployCOA(uuid uint64) types.Address { + if t.deployCOA == nil { + var address types.Address + binary.LittleEndian.PutUint64(address[:], uuid) + return address + } + return t.deployCOA(uuid) +} + +func (t *testContractHandler) AccountByAddress(addr types.Address, isAuthorized bool) types.Account { + if t.accountByAddress == nil { + panic("unexpected AccountByAddress") + } + return t.accountByAddress(addr, isAuthorized) +} + +func (t *testContractHandler) LastExecutedBlock() *types.Block { + if t.lastExecutedBlock == nil { + panic("unexpected LastExecutedBlock") + } + return t.lastExecutedBlock() +} + +func (t *testContractHandler) Run(tx []byte, coinbase types.Address) *types.ResultSummary { + if t.run == nil { + panic("unexpected Run") + } + return t.run(tx, coinbase) +} + +func (t *testContractHandler) DryRun(tx []byte, from types.Address) *types.ResultSummary { + if t.dryRun == nil { + panic("unexpected DryRun") + } + return t.dryRun(tx, from) +} + +func (t *testContractHandler) BatchRun(txs [][]byte, coinbase types.Address) []*types.ResultSummary { + if t.batchRun == nil { + panic("unexpected BatchRun") + } + return t.batchRun(txs, coinbase) +} + +func (t *testContractHandler) GenerateResourceUUID() uint64 { + if t.generateResourceUUID == nil { + panic("unexpected GenerateResourceUUID") + } + return t.generateResourceUUID() +} + +func (t *testContractHandler) CommitBlockProposal() { + if t.commitBlockProposal == nil { + panic("unexpected CommitBlockProposal") + } + t.commitBlockProposal() +} + +type testFlowAccount struct { + address types.Address + balance func() types.Balance + code func() types.Code + codeHash func() []byte + nonce func() uint64 + transfer func(address types.Address, balance types.Balance) + deposit func(vault *types.FLOWTokenVault) + withdraw func(balance types.Balance) *types.FLOWTokenVault + deploy func(code types.Code, limit types.GasLimit, balance types.Balance) *types.ResultSummary + call func(address types.Address, data types.Data, limit types.GasLimit, balance types.Balance) *types.ResultSummary +} + +var _ types.Account = &testFlowAccount{} + +func (t *testFlowAccount) Address() types.Address { + return t.address +} + +func (t *testFlowAccount) Balance() types.Balance { + if t.balance == nil { + return types.NewBalanceFromUFix64(0) + } + return t.balance() +} + +func (t *testFlowAccount) Code() types.Code { + if t.code == nil { + return types.Code{} + } + return t.code() +} + +func (t *testFlowAccount) CodeHash() []byte { + if t.codeHash == nil { + return nil + } + return t.codeHash() +} + +func (t *testFlowAccount) Nonce() uint64 { + if t.nonce == nil { + return 0 + } + return t.nonce() +} + +func (t *testFlowAccount) Transfer(address types.Address, balance types.Balance) { + if t.transfer == nil { + panic("unexpected Transfer") + } + t.transfer(address, balance) +} + +func (t *testFlowAccount) Deposit(vault *types.FLOWTokenVault) { + if t.deposit == nil { + panic("unexpected Deposit") + } + t.deposit(vault) +} + +func (t *testFlowAccount) Withdraw(balance types.Balance) *types.FLOWTokenVault { + if t.withdraw == nil { + panic("unexpected Withdraw") + } + return t.withdraw(balance) +} + +func (t *testFlowAccount) Deploy(code types.Code, limit types.GasLimit, balance types.Balance) *types.ResultSummary { + if t.deploy == nil { + panic("unexpected Deploy") + } + return t.deploy(code, limit, balance) +} + +func (t *testFlowAccount) Call(address types.Address, data types.Data, limit types.GasLimit, balance types.Balance) *types.ResultSummary { + if t.call == nil { + panic("unexpected Call") + } + return t.call(address, data, limit, balance) +} + +func requireEqualEventAddress(t *testing.T, event cadence.Event, address types.Address) { + actual := cadence.SearchFieldByName(event, types.CadenceOwnedAccountCreatedTypeAddressFieldName) + strippedHex := strings.TrimPrefix(address.String(), "0x") + expected, err := cadence.NewString(strippedHex) + if err != nil { + require.NoError(t, err) + } + require.Equal(t, expected, actual) +} + +func deployContracts( + t *testing.T, + rt runtime.Runtime, + contractsAddress flow.Address, + runtimeInterface *TestRuntimeInterface, + transactionEnvironment runtime.Environment, + nextTransactionLocation func() common.TransactionLocation, +) { + + contractsAddressHex := contractsAddress.Hex() + + env := coreContractstemplates.Environment{ + ServiceAccountAddress: contractsAddressHex, + ViewResolverAddress: contractsAddressHex, + BurnerAddress: contractsAddressHex, + FungibleTokenAddress: contractsAddressHex, + NonFungibleTokenAddress: contractsAddressHex, + MetadataViewsAddress: contractsAddressHex, + FungibleTokenMetadataViewsAddress: contractsAddressHex, + CryptoAddress: contractsAddressHex, + } + + contracts := []struct { + name string + code []byte + deployTx []byte + }{ + { + name: "Crypto", + code: coreContracts.Crypto(), + }, + { + name: "ViewResolver", + code: coreContracts.ViewResolver(), + }, + { + name: "Burner", + code: coreContracts.Burner(), + }, + { + name: "FungibleToken", + code: coreContracts.FungibleToken( + env, + ), + }, + { + name: "NonFungibleToken", + code: coreContracts.NonFungibleToken( + env, + ), + }, + { + name: "MetadataViews", + code: coreContracts.MetadataViews( + env, + ), + }, + { + name: "FungibleTokenMetadataViews", + code: coreContracts.FungibleTokenMetadataViews( + env, + ), + }, + { + name: "FlowToken", + code: coreContracts.FlowToken( + env, + ), + deployTx: []byte(` + transaction(name: String, code: String) { + prepare(signer: auth(AddContract, Storage, Capabilities) &Account) { + signer.contracts.add(name: name, code: code.utf8, signer) + } + } + `), + }, + { + name: stdlib.ContractName, + code: stdlib.ContractCode(contractsAddress, contractsAddress, contractsAddress), + }, + } + + for _, contract := range contracts { + + deployTx := contract.deployTx + if len(deployTx) == 0 { + deployTx = blueprints.DeployContractTransactionTemplate + } + + err := rt.ExecuteTransaction( + runtime.Script{ + Source: deployTx, + Arguments: EncodeArgs([]cadence.Value{ + cadence.String(contract.name), + cadence.String(contract.code), + }), + }, + runtime.Context{ + Interface: runtimeInterface, + Environment: transactionEnvironment, + Location: nextTransactionLocation(), + }, + ) + require.NoError(t, err) + } + +} + +func newEVMTransactionEnvironment(handler types.ContractHandler, contractAddress flow.Address) runtime.Environment { + transactionEnvironment := runtime.NewBaseInterpreterEnvironment(runtime.Config{}) + + internalEVMValue := impl.NewInternalEVMContractValue( + nil, + handler, + contractAddress, + ) + + stdlib.SetupEnvironment( + transactionEnvironment, + internalEVMValue, + contractAddress, + ) + + return transactionEnvironment +} + +func newEVMScriptEnvironment(handler types.ContractHandler, contractAddress flow.Address) runtime.Environment { + scriptEnvironment := runtime.NewScriptInterpreterEnvironment(runtime.Config{}) + + internalEVMValue := impl.NewInternalEVMContractValue( + nil, + handler, + contractAddress, + ) + + stdlib.SetupEnvironment( + scriptEnvironment, + internalEVMValue, + contractAddress, + ) + + return scriptEnvironment +} + +func TestEVMEncodeABI(t *testing.T) { + + t.Parallel() + + handler := &testContractHandler{} + + contractsAddress := flow.BytesToAddress([]byte{0x1}) + + transactionEnvironment := newEVMTransactionEnvironment(handler, contractsAddress) + scriptEnvironment := newEVMScriptEnvironment(handler, contractsAddress) + + rt := runtime.NewRuntime(runtime.Config{}) + + script := []byte(` + import EVM from 0x1 + + access(all) + fun main(): [UInt8] { + return EVM.encodeABI(["John Doe", UInt64(33), false]) + } + `) + + accountCodes := map[common.Location][]byte{} + var events []cadence.Event + + runtimeInterface := &TestRuntimeInterface{ + Storage: NewTestLedger(nil, nil), + OnGetSigningAccounts: func() ([]runtime.Address, error) { + return []runtime.Address{runtime.Address(contractsAddress)}, nil + }, + OnResolveLocation: newLocationResolver(contractsAddress), + OnUpdateAccountContractCode: func(location common.AddressLocation, code []byte) error { + accountCodes[location] = code + return nil + }, + OnGetAccountContractCode: func(location common.AddressLocation) (code []byte, err error) { + code = accountCodes[location] + return code, nil + }, + OnEmitEvent: func(event cadence.Event) error { + events = append(events, event) + return nil + }, + OnDecodeArgument: func(b []byte, t cadence.Type) (cadence.Value, error) { + return json.Decode(nil, b) + }, + } + + nextTransactionLocation := NewTransactionLocationGenerator() + nextScriptLocation := NewScriptLocationGenerator() + + // Deploy contracts + + deployContracts( + t, + rt, + contractsAddress, + runtimeInterface, + transactionEnvironment, + nextTransactionLocation, + ) + + gauge := meter.NewMeter(meter.DefaultParameters().WithComputationWeights(meter.ExecutionEffortWeights{ + environment.ComputationKindEVMEncodeABI: 1 << meter.MeterExecutionInternalPrecisionBytes, + })) + + // Run script + result, err := rt.ExecuteScript( + runtime.Script{ + Source: script, + Arguments: [][]byte{}, + }, + runtime.Context{ + Interface: runtimeInterface, + Environment: scriptEnvironment, + Location: nextScriptLocation(), + MemoryGauge: gauge, + ComputationGauge: gauge, + }, + ) + require.NoError(t, err) + + abiBytes := []byte{ + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, + 0x0, 0x0, 0x0, 0x60, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x21, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, + 0x0, 0x8, 0x4a, 0x6f, 0x68, 0x6e, 0x20, 0x44, 0x6f, 0x65, 0x0, 0x0, + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, + } + cdcBytes := make([]cadence.Value, 0) + for _, bt := range abiBytes { + cdcBytes = append(cdcBytes, cadence.UInt8(bt)) + } + encodedABI := cadence.NewArray( + cdcBytes, + ).WithType(cadence.NewVariableSizedArrayType(cadence.UInt8Type)) + + assert.Equal(t, + encodedABI, + result, + ) + assert.Equal(t, uint64(len(cdcBytes)), gauge.TotalComputationUsed()) +} + +func TestEVMEncodeABIByteTypes(t *testing.T) { + + t.Parallel() + + handler := &testContractHandler{} + contractsAddress := flow.BytesToAddress([]byte{0x1}) + transactionEnvironment := newEVMTransactionEnvironment(handler, contractsAddress) + scriptEnvironment := newEVMScriptEnvironment(handler, contractsAddress) + rt := runtime.NewRuntime(runtime.Config{}) + + accountCodes := map[common.Location][]byte{} + var events []cadence.Event + + runtimeInterface := &TestRuntimeInterface{ + Storage: NewTestLedger(nil, nil), + OnGetSigningAccounts: func() ([]runtime.Address, error) { + return []runtime.Address{runtime.Address(contractsAddress)}, nil + }, + OnResolveLocation: newLocationResolver(contractsAddress), + OnUpdateAccountContractCode: func(location common.AddressLocation, code []byte) error { + accountCodes[location] = code + return nil + }, + OnGetAccountContractCode: func(location common.AddressLocation) (code []byte, err error) { + code = accountCodes[location] + return code, nil + }, + OnEmitEvent: func(event cadence.Event) error { + events = append(events, event) + return nil + }, + OnDecodeArgument: func(b []byte, t cadence.Type) (cadence.Value, error) { + return json.Decode(nil, b) + }, + } + + nextTransactionLocation := NewTransactionLocationGenerator() + nextScriptLocation := NewScriptLocationGenerator() + + // Deploy contracts + deployContracts( + t, + rt, + contractsAddress, + runtimeInterface, + transactionEnvironment, + nextTransactionLocation, + ) + + t.Run("ABI encode into `bytes` Solidity type", func(t *testing.T) { + script := []byte(` + import EVM from 0x1 + + access(all) + fun main(): [UInt8] { + let bytes: EVM.EVMBytes = EVM.EVMBytes(value: [5, 10, 15, 20, 25]) + return EVM.encodeABI([bytes]) + } + `) + gauge := meter.NewMeter(meter.DefaultParameters().WithComputationWeights(meter.ExecutionEffortWeights{ + environment.ComputationKindEVMEncodeABI: 1 << meter.MeterExecutionInternalPrecisionBytes, + })) + + // Run script + result, err := rt.ExecuteScript( + runtime.Script{ + Source: script, + Arguments: [][]byte{}, + }, + runtime.Context{ + Interface: runtimeInterface, + Environment: scriptEnvironment, + Location: nextScriptLocation(), + MemoryGauge: gauge, + ComputationGauge: gauge, + }, + ) + require.NoError(t, err) + + abiBytes := []byte{ + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, + 0x0, 0x0, 0x0, 0x0, 0x0, 0x20, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x5, 0x5, + 0xa, 0xf, 0x14, 0x19, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, + 0x0, 0x0, 0x0, 0x0, 0x0, + } + expected := "00000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000005050a0f1419000000000000000000000000000000000000000000000000000000" + assert.Equal( + t, + expected, + hex.EncodeToString(abiBytes), + ) + cdcBytes := make([]cadence.Value, 0) + for _, bt := range abiBytes { + cdcBytes = append(cdcBytes, cadence.UInt8(bt)) + } + encodedABI := cadence.NewArray( + cdcBytes, + ).WithType(cadence.NewVariableSizedArrayType(cadence.UInt8Type)) + + assert.Equal(t, + encodedABI, + result, + ) + assert.Equal(t, uint64(len(cdcBytes)), gauge.TotalComputationUsed()) + }) + + t.Run("ABI encode into `bytes[]` Solidity type", func(t *testing.T) { + script := []byte(` + import EVM from 0x1 + + access(all) + fun main(): [UInt8] { + let bytesArray: [EVM.EVMBytes] = [ + EVM.EVMBytes(value: [5]), + EVM.EVMBytes(value: [10]) + ] + return EVM.encodeABI([bytesArray]) + } + `) + + gauge := meter.NewMeter(meter.DefaultParameters().WithComputationWeights(meter.ExecutionEffortWeights{ + environment.ComputationKindEVMEncodeABI: 1 << meter.MeterExecutionInternalPrecisionBytes, + })) + + // Run script + result, err := rt.ExecuteScript( + runtime.Script{ + Source: script, + Arguments: [][]byte{}, + }, + runtime.Context{ + Interface: runtimeInterface, + Environment: scriptEnvironment, + Location: nextScriptLocation(), + MemoryGauge: gauge, + ComputationGauge: gauge, + }, + ) + require.NoError(t, err) + + abiBytes := []byte{ + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x20, 0x0, 0x0, 0x0, 0x0, + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, + 0x0, 0x0, 0x0, 0x2, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x40, + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, + 0x0, 0x0, 0x0, 0x1, 0x5, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1, 0xa, 0x0, 0x0, 0x0, + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, + 0x0, 0x0, 0x0, 0x0, + } + expected := "00000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000001050000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000010a00000000000000000000000000000000000000000000000000000000000000" + assert.Equal( + t, + expected, + hex.EncodeToString(abiBytes), + ) + cdcBytes := make([]cadence.Value, 0) + for _, bt := range abiBytes { + cdcBytes = append(cdcBytes, cadence.UInt8(bt)) + } + encodedABI := cadence.NewArray( + cdcBytes, + ).WithType(cadence.NewVariableSizedArrayType(cadence.UInt8Type)) + + assert.Equal(t, + encodedABI, + result, + ) + assert.Equal(t, uint64(len(cdcBytes)), gauge.TotalComputationUsed()) + }) + + t.Run("ABI encode into `bytes4` Solidity type", func(t *testing.T) { + script := []byte(` + import EVM from 0x1 + + access(all) + fun main(): [UInt8] { + let bytes: EVM.EVMBytes4 = EVM.EVMBytes4(value: [5, 10, 15, 20]) + return EVM.encodeABI([bytes]) + } + `) + + gauge := meter.NewMeter(meter.DefaultParameters().WithComputationWeights(meter.ExecutionEffortWeights{ + environment.ComputationKindEVMEncodeABI: 1 << meter.MeterExecutionInternalPrecisionBytes, + })) + // Run script + result, err := rt.ExecuteScript( + runtime.Script{ + Source: script, + Arguments: [][]byte{}, + }, + runtime.Context{ + Interface: runtimeInterface, + Environment: scriptEnvironment, + Location: nextScriptLocation(), + MemoryGauge: gauge, + ComputationGauge: gauge, + }, + ) + require.NoError(t, err) + + abiBytes := []byte{ + 0x5, 0xa, 0xf, 0x14, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, + } + expected := "050a0f1400000000000000000000000000000000000000000000000000000000" + assert.Equal( + t, + expected, + hex.EncodeToString(abiBytes), + ) + cdcBytes := make([]cadence.Value, 0) + for _, bt := range abiBytes { + cdcBytes = append(cdcBytes, cadence.UInt8(bt)) + } + encodedABI := cadence.NewArray( + cdcBytes, + ).WithType(cadence.NewVariableSizedArrayType(cadence.UInt8Type)) + + assert.Equal(t, + encodedABI, + result, + ) + assert.Equal(t, uint64(len(cdcBytes)), gauge.TotalComputationUsed()) + }) + + t.Run("ABI encode into `bytes4[]` Solidity type", func(t *testing.T) { + script := []byte(` + import EVM from 0x1 + + access(all) + fun main(): [UInt8] { + let bytesArray: [EVM.EVMBytes4] = [ + EVM.EVMBytes4(value: [5, 10, 15, 20]), + EVM.EVMBytes4(value: [25, 30, 35, 40]) + ] + return EVM.encodeABI([bytesArray]) + } + `) + + gauge := meter.NewMeter(meter.DefaultParameters().WithComputationWeights(meter.ExecutionEffortWeights{ + environment.ComputationKindEVMEncodeABI: 1 << meter.MeterExecutionInternalPrecisionBytes, + })) + + // Run script + result, err := rt.ExecuteScript( + runtime.Script{ + Source: script, + Arguments: [][]byte{}, + }, + runtime.Context{ + Interface: runtimeInterface, + Environment: scriptEnvironment, + Location: nextScriptLocation(), + MemoryGauge: gauge, + ComputationGauge: gauge, + }, + ) + require.NoError(t, err) + + abiBytes := []byte{ + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x20, 0x0, 0x0, 0x0, 0x0, + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, + 0x0, 0x0, 0x0, 0x2, 0x5, 0xa, 0xf, 0x14, 0x0, 0x0, 0x0, 0x0, + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, + 0x19, 0x1e, 0x23, 0x28, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, + } + expected := "00000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000002050a0f1400000000000000000000000000000000000000000000000000000000191e232800000000000000000000000000000000000000000000000000000000" + assert.Equal( + t, + expected, + hex.EncodeToString(abiBytes), + ) + cdcBytes := make([]cadence.Value, 0) + for _, bt := range abiBytes { + cdcBytes = append(cdcBytes, cadence.UInt8(bt)) + } + encodedABI := cadence.NewArray( + cdcBytes, + ).WithType(cadence.NewVariableSizedArrayType(cadence.UInt8Type)) + + assert.Equal(t, + encodedABI, + result, + ) + assert.Equal(t, uint64(len(cdcBytes)), gauge.TotalComputationUsed()) + }) + + t.Run("ABI encode into `bytes32` Solidity type", func(t *testing.T) { + script := []byte(` + import EVM from 0x1 + + access(all) + fun main(): [UInt8] { + let bytes: EVM.EVMBytes32 = EVM.EVMBytes32( + value: [ + 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, + 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, + 31, 32 + ] + ) + return EVM.encodeABI([bytes]) + } + `) + + gauge := meter.NewMeter(meter.DefaultParameters().WithComputationWeights(meter.ExecutionEffortWeights{ + environment.ComputationKindEVMEncodeABI: 1 << meter.MeterExecutionInternalPrecisionBytes, + })) + + // Run script + result, err := rt.ExecuteScript( + runtime.Script{ + Source: script, + Arguments: [][]byte{}, + }, + runtime.Context{ + Interface: runtimeInterface, + Environment: scriptEnvironment, + Location: nextScriptLocation(), + MemoryGauge: gauge, + ComputationGauge: gauge, + }, + ) + require.NoError(t, err) + + abiBytes := []byte{ + 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0xa, 0xb, 0xc, + 0xd, 0xe, 0xf, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, + 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, 0x20, + } + expected := "0102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f20" + assert.Equal( + t, + expected, + hex.EncodeToString(abiBytes), + ) + cdcBytes := make([]cadence.Value, 0) + for _, bt := range abiBytes { + cdcBytes = append(cdcBytes, cadence.UInt8(bt)) + } + encodedABI := cadence.NewArray( + cdcBytes, + ).WithType(cadence.NewVariableSizedArrayType(cadence.UInt8Type)) + + assert.Equal(t, + encodedABI, + result, + ) + assert.Equal(t, uint64(len(cdcBytes)), gauge.TotalComputationUsed()) + }) + + t.Run("ABI encode into `bytes32[]` Solidity type", func(t *testing.T) { + script := []byte(` + import EVM from 0x1 + + access(all) + fun main(): [UInt8] { + let bytesA: EVM.EVMBytes32 = EVM.EVMBytes32( + value: [ + 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, + 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, + 31, 32 + ] + ) + let bytesB: EVM.EVMBytes32 = EVM.EVMBytes32( + value: [ + 32, 31, 30, 29, 28, 27, 26, 25, 24, 23, 22, 21, 20, 19, + 18, 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, + 2, 1 + ] + ) + let bytesArray: [EVM.EVMBytes32] = [bytesA, bytesB] + return EVM.encodeABI([bytesArray]) + } + `) + + gauge := meter.NewMeter(meter.DefaultParameters().WithComputationWeights(meter.ExecutionEffortWeights{ + environment.ComputationKindEVMEncodeABI: 1 << meter.MeterExecutionInternalPrecisionBytes, + })) + + // Run script + result, err := rt.ExecuteScript( + runtime.Script{ + Source: script, + Arguments: [][]byte{}, + }, + runtime.Context{ + Interface: runtimeInterface, + Environment: scriptEnvironment, + Location: nextScriptLocation(), + MemoryGauge: gauge, + ComputationGauge: gauge, + }, + ) + require.NoError(t, err) + + abiBytes := []byte{ + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, + 0x0, 0x0, 0x0, 0x0, 0x0, 0x20, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2, 0x1, + 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0xa, 0xb, 0xc, 0xd, 0xe, + 0xf, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, + 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, 0x20, 0x20, 0x1f, 0x1e, 0x1d, + 0x1c, 0x1b, 0x1a, 0x19, 0x18, 0x17, 0x16, 0x15, 0x14, 0x13, 0x12, + 0x11, 0x10, 0xf, 0xe, 0xd, 0xc, 0xb, 0xa, 0x9, 0x8, 0x7, 0x6, 0x5, + 0x4, 0x3, 0x2, 0x1, + } + expected := "000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000000020102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f20201f1e1d1c1b1a191817161514131211100f0e0d0c0b0a090807060504030201" + assert.Equal( + t, + expected, + hex.EncodeToString(abiBytes), + ) + cdcBytes := make([]cadence.Value, 0) + for _, bt := range abiBytes { + cdcBytes = append(cdcBytes, cadence.UInt8(bt)) + } + encodedABI := cadence.NewArray( + cdcBytes, + ).WithType(cadence.NewVariableSizedArrayType(cadence.UInt8Type)) + + assert.Equal(t, + encodedABI, + result, + ) + assert.Equal(t, uint64(len(cdcBytes)), gauge.TotalComputationUsed()) + }) +} + +func TestEVMEncodeABIBytesRoundtrip(t *testing.T) { + + t.Parallel() + + handler := &testContractHandler{} + contractsAddress := flow.BytesToAddress([]byte{0x1}) + transactionEnvironment := newEVMTransactionEnvironment(handler, contractsAddress) + scriptEnvironment := newEVMScriptEnvironment(handler, contractsAddress) + rt := runtime.NewRuntime(runtime.Config{}) + + accountCodes := map[common.Location][]byte{} + var events []cadence.Event + + runtimeInterface := &TestRuntimeInterface{ + Storage: NewTestLedger(nil, nil), + OnGetSigningAccounts: func() ([]runtime.Address, error) { + return []runtime.Address{runtime.Address(contractsAddress)}, nil + }, + OnResolveLocation: newLocationResolver(contractsAddress), + OnUpdateAccountContractCode: func(location common.AddressLocation, code []byte) error { + accountCodes[location] = code + return nil + }, + OnGetAccountContractCode: func(location common.AddressLocation) (code []byte, err error) { + code = accountCodes[location] + return code, nil + }, + OnEmitEvent: func(event cadence.Event) error { + events = append(events, event) + return nil + }, + OnDecodeArgument: func(b []byte, t cadence.Type) (cadence.Value, error) { + return json.Decode(nil, b) + }, + } + + nextTransactionLocation := NewTransactionLocationGenerator() + nextScriptLocation := NewScriptLocationGenerator() + + // Deploy contracts + deployContracts( + t, + rt, + contractsAddress, + runtimeInterface, + transactionEnvironment, + nextTransactionLocation, + ) + + t.Run("ABI encode/decode into `bytes` Solidity type", func(t *testing.T) { + script := []byte(` + import EVM from 0x1 + + access(all) + fun main(): Bool { + let bytes: EVM.EVMBytes = EVM.EVMBytes(value: [5, 10, 15, 20, 25]) + let encodedData = EVM.encodeABI([bytes]) + let types = [Type<EVM.EVMBytes>()] + let values = EVM.decodeABI(types: types, data: encodedData) + + assert(values.length == 1) + let evmBytes = values[0] as! EVM.EVMBytes + assert(evmBytes.value == [5, 10, 15, 20, 25]) + + return true + } + `) + + gauge := meter.NewMeter(meter.DefaultParameters().WithComputationWeights(meter.ExecutionEffortWeights{ + environment.ComputationKindEVMEncodeABI: 1 << meter.MeterExecutionInternalPrecisionBytes, + })) + + // Run script + result, err := rt.ExecuteScript( + runtime.Script{ + Source: script, + Arguments: [][]byte{}, + }, + runtime.Context{ + Interface: runtimeInterface, + Environment: scriptEnvironment, + Location: nextScriptLocation(), + MemoryGauge: gauge, + ComputationGauge: gauge, + }, + ) + require.NoError(t, err) + + assert.Equal(t, + cadence.Bool(true), + result, + ) + + assert.Equal(t, uint64(96), gauge.TotalComputationUsed()) + }) + + t.Run("ABI encode/decode into `bytes[]` Solidity type", func(t *testing.T) { + script := []byte(` + import EVM from 0x1 + + access(all) + fun main(): Bool { + let bytes: EVM.EVMBytes = EVM.EVMBytes(value: [5, 10, 15, 20, 25]) + let bytesArray: [EVM.EVMBytes] = [bytes] + let encodedData = EVM.encodeABI([bytesArray]) + let types = [Type<[EVM.EVMBytes]>()] + let values = EVM.decodeABI(types: types, data: encodedData) + + assert(values.length == 1) + let evmBytes = values[0] as! [EVM.EVMBytes] + assert(evmBytes[0].value == [5, 10, 15, 20, 25]) + + return true + } + `) + + gauge := meter.NewMeter(meter.DefaultParameters().WithComputationWeights(meter.ExecutionEffortWeights{ + environment.ComputationKindEVMEncodeABI: 1 << meter.MeterExecutionInternalPrecisionBytes, + })) + + // Run script + result, err := rt.ExecuteScript( + runtime.Script{ + Source: script, + Arguments: [][]byte{}, + }, + runtime.Context{ + Interface: runtimeInterface, + Environment: scriptEnvironment, + Location: nextScriptLocation(), + MemoryGauge: gauge, + ComputationGauge: gauge, + }, + ) + require.NoError(t, err) + + assert.Equal(t, + cadence.Bool(true), + result, + ) + + assert.Equal(t, uint64(160), gauge.TotalComputationUsed()) + }) + + t.Run("ABI encode/decode into `bytes4` Solidity type", func(t *testing.T) { + script := []byte(` + import EVM from 0x1 + + access(all) + fun main(): Bool { + let bytes: EVM.EVMBytes4 = EVM.EVMBytes4(value: [5, 10, 15, 20]) + let encodedData = EVM.encodeABI([bytes]) + let types = [Type<EVM.EVMBytes4>()] + let values = EVM.decodeABI(types: types, data: encodedData) + + assert(values.length == 1) + let evmBytes = values[0] as! EVM.EVMBytes4 + assert(evmBytes.value == [5, 10, 15, 20]) + + return true + } + `) + gauge := meter.NewMeter(meter.DefaultParameters().WithComputationWeights(meter.ExecutionEffortWeights{ + environment.ComputationKindEVMEncodeABI: 1 << meter.MeterExecutionInternalPrecisionBytes, + })) + + // Run script + result, err := rt.ExecuteScript( + runtime.Script{ + Source: script, + Arguments: [][]byte{}, + }, + runtime.Context{ + Interface: runtimeInterface, + Environment: scriptEnvironment, + Location: nextScriptLocation(), + MemoryGauge: gauge, + ComputationGauge: gauge, + }, + ) + require.NoError(t, err) + + assert.Equal(t, + cadence.Bool(true), + result, + ) + + assert.Equal(t, uint64(32), gauge.TotalComputationUsed()) + }) + + t.Run("ABI encode/decode into `bytes4[]` Solidity type", func(t *testing.T) { + script := []byte(` + import EVM from 0x1 + + access(all) + fun main(): Bool { + let bytes: EVM.EVMBytes4 = EVM.EVMBytes4(value: [5, 10, 15, 20]) + let bytesArray: [EVM.EVMBytes4] = [bytes] + let encodedData = EVM.encodeABI([bytesArray]) + let types = [Type<[EVM.EVMBytes4]>()] + let values = EVM.decodeABI(types: types, data: encodedData) + + assert(values.length == 1) + let evmBytes = values[0] as! [EVM.EVMBytes4] + assert(evmBytes[0].value == [5, 10, 15, 20]) + + return true + } + `) + gauge := meter.NewMeter(meter.DefaultParameters().WithComputationWeights(meter.ExecutionEffortWeights{ + environment.ComputationKindEVMEncodeABI: 1 << meter.MeterExecutionInternalPrecisionBytes, + })) + + // Run script + result, err := rt.ExecuteScript( + runtime.Script{ + Source: script, + Arguments: [][]byte{}, + }, + runtime.Context{ + Interface: runtimeInterface, + Environment: scriptEnvironment, + Location: nextScriptLocation(), + MemoryGauge: gauge, + ComputationGauge: gauge, + }, + ) + require.NoError(t, err) + + assert.Equal(t, + cadence.Bool(true), + result, + ) + + assert.Equal(t, uint64(96), gauge.TotalComputationUsed()) + }) + + t.Run("ABI encode/decode into `bytes32` Solidity type", func(t *testing.T) { + script := []byte(` + import EVM from 0x1 + + access(all) + fun main(): Bool { + let bytes: EVM.EVMBytes32 = EVM.EVMBytes32( + value: [ + 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, + 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, + 31, 32 + ] + ) + let encodedData = EVM.encodeABI([bytes]) + let types = [Type<EVM.EVMBytes32>()] + let values = EVM.decodeABI(types: types, data: encodedData) + + assert(values.length == 1) + let evmBytes = values[0] as! EVM.EVMBytes32 + assert(evmBytes.value == [ + 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, + 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, + 31, 32 + ]) + + return true + } + `) + gauge := meter.NewMeter(meter.DefaultParameters().WithComputationWeights(meter.ExecutionEffortWeights{ + environment.ComputationKindEVMEncodeABI: 1 << meter.MeterExecutionInternalPrecisionBytes, + })) + + // Run script + result, err := rt.ExecuteScript( + runtime.Script{ + Source: script, + Arguments: [][]byte{}, + }, + runtime.Context{ + Interface: runtimeInterface, + Environment: scriptEnvironment, + Location: nextScriptLocation(), + MemoryGauge: gauge, + ComputationGauge: gauge, + }, + ) + require.NoError(t, err) + + assert.Equal(t, + cadence.Bool(true), + result, + ) + + assert.Equal(t, uint64(32), gauge.TotalComputationUsed()) + }) + + t.Run("ABI encode/decode into `bytes32[]` Solidity type", func(t *testing.T) { + script := []byte(` + import EVM from 0x1 + + access(all) + fun main(): Bool { + let bytes: EVM.EVMBytes32 = EVM.EVMBytes32( + value: [ + 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, + 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, + 31, 32 + ] + ) + let bytesArray: [EVM.EVMBytes32] = [bytes] + let encodedData = EVM.encodeABI([bytesArray]) + let types = [Type<[EVM.EVMBytes32]>()] + let values = EVM.decodeABI(types: types, data: encodedData) + + assert(values.length == 1) + let evmBytes = values[0] as! [EVM.EVMBytes32] + assert(evmBytes[0].value == [ + 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, + 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, + 31, 32 + ]) + + return true + } + `) + gauge := meter.NewMeter(meter.DefaultParameters().WithComputationWeights(meter.ExecutionEffortWeights{ + environment.ComputationKindEVMEncodeABI: 1 << meter.MeterExecutionInternalPrecisionBytes, + })) + + // Run script + result, err := rt.ExecuteScript( + runtime.Script{ + Source: script, + Arguments: [][]byte{}, + }, + runtime.Context{ + Interface: runtimeInterface, + Environment: scriptEnvironment, + Location: nextScriptLocation(), + MemoryGauge: gauge, + ComputationGauge: gauge, + }, + ) + require.NoError(t, err) + + assert.Equal(t, + cadence.Bool(true), + result, + ) + + assert.Equal(t, uint64(96), gauge.TotalComputationUsed()) + }) +} + +func TestEVMEncodeABIComputation(t *testing.T) { + + t.Parallel() + + handler := &testContractHandler{} + + contractsAddress := flow.BytesToAddress([]byte{0x1}) + + transactionEnvironment := newEVMTransactionEnvironment(handler, contractsAddress) + scriptEnvironment := newEVMScriptEnvironment(handler, contractsAddress) + + rt := runtime.NewRuntime(runtime.Config{}) + + script := []byte(` + import EVM from 0x1 + + access(all) + fun main(): [UInt8] { + let address = EVM.EVMAddress( + bytes: "7A58c0Be72BE218B41C608b7Fe7C5bB630736C71" + .decodeHex() + .toConstantSized<[UInt8; 20]>()! + ) + let arr: [UInt8] = [1, 2, 3, 4, 5] + + return EVM.encodeABI([ + "John Doe", + UInt64(33), + false, + address, + [arr], + ["one", "two", "three"] + ]) + } + `) + + accountCodes := map[common.Location][]byte{} + var events []cadence.Event + + runtimeInterface := &TestRuntimeInterface{ + Storage: NewTestLedger(nil, nil), + OnGetSigningAccounts: func() ([]runtime.Address, error) { + return []runtime.Address{runtime.Address(contractsAddress)}, nil + }, + OnResolveLocation: newLocationResolver(contractsAddress), + OnUpdateAccountContractCode: func(location common.AddressLocation, code []byte) error { + accountCodes[location] = code + return nil + }, + OnGetAccountContractCode: func(location common.AddressLocation) (code []byte, err error) { + code = accountCodes[location] + return code, nil + }, + OnEmitEvent: func(event cadence.Event) error { + events = append(events, event) + return nil + }, + OnDecodeArgument: func(b []byte, t cadence.Type) (cadence.Value, error) { + return json.Decode(nil, b) + }, + } + + nextTransactionLocation := NewTransactionLocationGenerator() + nextScriptLocation := NewScriptLocationGenerator() + + // Deploy contracts + + deployContracts( + t, + rt, + contractsAddress, + runtimeInterface, + transactionEnvironment, + nextTransactionLocation, + ) + gauge := meter.NewMeter(meter.DefaultParameters().WithComputationWeights(meter.ExecutionEffortWeights{ + environment.ComputationKindEVMEncodeABI: 1 << meter.MeterExecutionInternalPrecisionBytes, + })) + + // Run script + result, err := rt.ExecuteScript( + runtime.Script{ + Source: script, + Arguments: [][]byte{}, + }, + runtime.Context{ + Interface: runtimeInterface, + Environment: scriptEnvironment, + Location: nextScriptLocation(), + MemoryGauge: gauge, + ComputationGauge: gauge, + }, + ) + require.NoError(t, err) + + cdcBytes, ok := result.(cadence.Array) + require.True(t, ok) + // computation & len(cdcBytes.Values) is equal to 832 + assert.Equal(t, uint64(len(cdcBytes.Values)), gauge.TotalComputationUsed()) +} + +func TestEVMEncodeABIComputationEmptyDynamicVariables(t *testing.T) { + + t.Parallel() + + handler := &testContractHandler{} + + contractsAddress := flow.BytesToAddress([]byte{0x1}) + + transactionEnvironment := newEVMTransactionEnvironment(handler, contractsAddress) + scriptEnvironment := newEVMScriptEnvironment(handler, contractsAddress) + + rt := runtime.NewRuntime(runtime.Config{}) + + script := []byte(` + import EVM from 0x1 + + access(all) + fun main(): [UInt8] { + return EVM.encodeABI([ + "", + [[""], [] as [String]], + [] as [UInt8], + ["", "", ""] + ]) + } + `) + + accountCodes := map[common.Location][]byte{} + var events []cadence.Event + + runtimeInterface := &TestRuntimeInterface{ + Storage: NewTestLedger(nil, nil), + OnGetSigningAccounts: func() ([]runtime.Address, error) { + return []runtime.Address{runtime.Address(contractsAddress)}, nil + }, + OnResolveLocation: newLocationResolver(contractsAddress), + OnUpdateAccountContractCode: func(location common.AddressLocation, code []byte) error { + accountCodes[location] = code + return nil + }, + OnGetAccountContractCode: func(location common.AddressLocation) (code []byte, err error) { + code = accountCodes[location] + return code, nil + }, + OnEmitEvent: func(event cadence.Event) error { + events = append(events, event) + return nil + }, + OnDecodeArgument: func(b []byte, t cadence.Type) (cadence.Value, error) { + return json.Decode(nil, b) + }, + } + + nextTransactionLocation := NewTransactionLocationGenerator() + nextScriptLocation := NewScriptLocationGenerator() + + // Deploy contracts + + deployContracts( + t, + rt, + contractsAddress, + runtimeInterface, + transactionEnvironment, + nextTransactionLocation, + ) + + gauge := meter.NewMeter(meter.DefaultParameters().WithComputationWeights(meter.ExecutionEffortWeights{ + environment.ComputationKindEVMEncodeABI: 1 << meter.MeterExecutionInternalPrecisionBytes, + })) + + // Run script + + result, err := rt.ExecuteScript( + runtime.Script{ + Source: script, + Arguments: [][]byte{}, + }, + runtime.Context{ + Interface: runtimeInterface, + Environment: scriptEnvironment, + Location: nextScriptLocation(), + MemoryGauge: gauge, + ComputationGauge: gauge, + }, + ) + require.NoError(t, err) + + cdcBytes, ok := result.(cadence.Array) + require.True(t, ok) + // computation & len(cdcBytes.Values) is equal to 832 + assert.Equal(t, uint64(len(cdcBytes.Values)), gauge.TotalComputationUsed()) +} + +func TestEVMEncodeABIComputationDynamicVariablesAboveChunkSize(t *testing.T) { + + t.Parallel() + + handler := &testContractHandler{} + + contractsAddress := flow.BytesToAddress([]byte{0x1}) + + transactionEnvironment := newEVMTransactionEnvironment(handler, contractsAddress) + scriptEnvironment := newEVMScriptEnvironment(handler, contractsAddress) + + rt := runtime.NewRuntime(runtime.Config{}) + + script := []byte(` + import EVM from 0x1 + + access(all) + fun main(): [UInt8] { + let str = "abcdefghijklmnopqrstuvwxyz" + let arr: [UInt64] = [ + 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, + 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, + 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53 + ] + + return EVM.encodeABI([ + str, + str.concat(str).concat(str), + [[str]], + arr, + [arr], + arr.concat(arr).concat(arr) + ]) + } + `) + + accountCodes := map[common.Location][]byte{} + var events []cadence.Event + + runtimeInterface := &TestRuntimeInterface{ + Storage: NewTestLedger(nil, nil), + OnGetSigningAccounts: func() ([]runtime.Address, error) { + return []runtime.Address{runtime.Address(contractsAddress)}, nil + }, + OnResolveLocation: newLocationResolver(contractsAddress), + OnUpdateAccountContractCode: func(location common.AddressLocation, code []byte) error { + accountCodes[location] = code + return nil + }, + OnGetAccountContractCode: func(location common.AddressLocation) (code []byte, err error) { + code = accountCodes[location] + return code, nil + }, + OnEmitEvent: func(event cadence.Event) error { + events = append(events, event) + return nil + }, + OnDecodeArgument: func(b []byte, t cadence.Type) (cadence.Value, error) { + return json.Decode(nil, b) + }, + } + + nextTransactionLocation := NewTransactionLocationGenerator() + nextScriptLocation := NewScriptLocationGenerator() + + // Deploy contracts + + deployContracts( + t, + rt, + contractsAddress, + runtimeInterface, + transactionEnvironment, + nextTransactionLocation, + ) + + gauge := meter.NewMeter(meter.DefaultParameters().WithComputationWeights(meter.ExecutionEffortWeights{ + environment.ComputationKindEVMEncodeABI: 1 << meter.MeterExecutionInternalPrecisionBytes, + })) + + // Run script + + result, err := rt.ExecuteScript( + runtime.Script{ + Source: script, + Arguments: [][]byte{}, + }, + runtime.Context{ + Interface: runtimeInterface, + Environment: scriptEnvironment, + Location: nextScriptLocation(), + MemoryGauge: gauge, + ComputationGauge: gauge, + }, + ) + require.NoError(t, err) + + cdcBytes, ok := result.(cadence.Array) + require.True(t, ok) + // computation & len(cdcBytes.Values) is equal to 832 + assert.Equal(t, uint64(len(cdcBytes.Values)), gauge.TotalComputationUsed()) +} + +func TestEVMDecodeABI(t *testing.T) { + + t.Parallel() + + handler := &testContractHandler{} + + contractsAddress := flow.BytesToAddress([]byte{0x1}) + + transactionEnvironment := newEVMTransactionEnvironment(handler, contractsAddress) + scriptEnvironment := newEVMScriptEnvironment(handler, contractsAddress) + + rt := runtime.NewRuntime(runtime.Config{}) + + script := []byte(` + import EVM from 0x1 + + access(all) + fun main(data: [UInt8]): Bool { + let types = [Type<String>(), Type<UInt64>(), Type<Bool>()] + let values = EVM.decodeABI(types: types, data: data) + + assert(values.length == 3) + assert((values[0] as! String) == "John Doe") + assert((values[1] as! UInt64) == UInt64(33)) + assert((values[2] as! Bool) == false) + + return true + } + `) + + accountCodes := map[common.Location][]byte{} + var events []cadence.Event + + runtimeInterface := &TestRuntimeInterface{ + Storage: NewTestLedger(nil, nil), + OnGetSigningAccounts: func() ([]runtime.Address, error) { + return []runtime.Address{runtime.Address(contractsAddress)}, nil + }, + OnResolveLocation: newLocationResolver(contractsAddress), + OnUpdateAccountContractCode: func(location common.AddressLocation, code []byte) error { + accountCodes[location] = code + return nil + }, + OnGetAccountContractCode: func(location common.AddressLocation) (code []byte, err error) { + code = accountCodes[location] + return code, nil + }, + OnEmitEvent: func(event cadence.Event) error { + events = append(events, event) + return nil + }, + OnDecodeArgument: func(b []byte, t cadence.Type) (cadence.Value, error) { + return json.Decode(nil, b) + }, + } + + nextTransactionLocation := NewTransactionLocationGenerator() + nextScriptLocation := NewScriptLocationGenerator() + + // Deploy contracts + + deployContracts( + t, + rt, + contractsAddress, + runtimeInterface, + transactionEnvironment, + nextTransactionLocation, + ) + + gauge := meter.NewMeter(meter.DefaultParameters().WithComputationWeights(meter.ExecutionEffortWeights{ + environment.ComputationKindEVMDecodeABI: 1 << meter.MeterExecutionInternalPrecisionBytes, + })) + + // Run script + abiBytes := []byte{ + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, + 0x0, 0x0, 0x0, 0x60, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x21, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, + 0x0, 0x8, 0x4a, 0x6f, 0x68, 0x6e, 0x20, 0x44, 0x6f, 0x65, 0x0, 0x0, + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, + } + cdcBytes := make([]cadence.Value, 0) + for _, bt := range abiBytes { + cdcBytes = append(cdcBytes, cadence.UInt8(bt)) + } + encodedABI := cadence.NewArray( + cdcBytes, + ).WithType(cadence.NewVariableSizedArrayType(cadence.UInt8Type)) + + result, err := rt.ExecuteScript( + runtime.Script{ + Source: script, + Arguments: EncodeArgs([]cadence.Value{ + encodedABI, + }), + }, + runtime.Context{ + Interface: runtimeInterface, + Environment: scriptEnvironment, + Location: nextScriptLocation(), + MemoryGauge: gauge, + ComputationGauge: gauge, + }, + ) + require.NoError(t, err) + + assert.Equal(t, cadence.NewBool(true), result) + assert.Equal(t, uint64(len(cdcBytes)), gauge.TotalComputationUsed()) +} + +func TestEVMDecodeABIComputation(t *testing.T) { + + t.Parallel() + + handler := &testContractHandler{} + + contractsAddress := flow.BytesToAddress([]byte{0x1}) + + transactionEnvironment := newEVMTransactionEnvironment(handler, contractsAddress) + scriptEnvironment := newEVMScriptEnvironment(handler, contractsAddress) + + rt := runtime.NewRuntime(runtime.Config{}) + + script := []byte(` + import EVM from 0x1 + + access(all) + fun main(): [UInt8] { + let address = EVM.EVMAddress( + bytes: "7A58c0Be72BE218B41C608b7Fe7C5bB630736C71" + .decodeHex() + .toConstantSized<[UInt8; 20]>()! + ) + let arr: [UInt8] = [1, 2, 3, 4, 5] + + let data = EVM.encodeABI([ + "John Doe", + UInt64(33), + true, + address, + [arr], + ["one", "two", "three"] + ]) + + let types = [ + Type<String>(), Type<UInt64>(), Type<Bool>(), Type<EVM.EVMAddress>(), + Type<[[UInt8]]>(), Type<[String]>() + ] + let values = EVM.decodeABI(types: types, data: data) + + return data + } + `) + + accountCodes := map[common.Location][]byte{} + var events []cadence.Event + + runtimeInterface := &TestRuntimeInterface{ + Storage: NewTestLedger(nil, nil), + OnGetSigningAccounts: func() ([]runtime.Address, error) { + return []runtime.Address{runtime.Address(contractsAddress)}, nil + }, + OnResolveLocation: newLocationResolver(contractsAddress), + OnUpdateAccountContractCode: func(location common.AddressLocation, code []byte) error { + accountCodes[location] = code + return nil + }, + OnGetAccountContractCode: func(location common.AddressLocation) (code []byte, err error) { + code = accountCodes[location] + return code, nil + }, + OnEmitEvent: func(event cadence.Event) error { + events = append(events, event) + return nil + }, + OnDecodeArgument: func(b []byte, t cadence.Type) (cadence.Value, error) { + return json.Decode(nil, b) + }, + } + + nextTransactionLocation := NewTransactionLocationGenerator() + nextScriptLocation := NewScriptLocationGenerator() + + // Deploy contracts + + deployContracts( + t, + rt, + contractsAddress, + runtimeInterface, + transactionEnvironment, + nextTransactionLocation, + ) + gauge := meter.NewMeter(meter.DefaultParameters().WithComputationWeights(meter.ExecutionEffortWeights{ + environment.ComputationKindEVMEncodeABI: 1 << meter.MeterExecutionInternalPrecisionBytes, + })) + + // Run script + + result, err := rt.ExecuteScript( + runtime.Script{ + Source: script, + Arguments: [][]byte{}, + }, + runtime.Context{ + Interface: runtimeInterface, + Environment: scriptEnvironment, + Location: nextScriptLocation(), + MemoryGauge: gauge, + ComputationGauge: gauge, + }, + ) + require.NoError(t, err) + + cdcBytes, ok := result.(cadence.Array) + require.True(t, ok) + // computation & len(cdcBytes.Values) is equal to 832 + assert.Equal(t, uint64(len(cdcBytes.Values)), gauge.TotalComputationUsed()) +} + +func TestEVMEncodeDecodeABIRoundtripForUintIntTypes(t *testing.T) { + + t.Parallel() + + handler := &testContractHandler{} + contractsAddress := flow.BytesToAddress([]byte{0x1}) + transactionEnvironment := newEVMTransactionEnvironment(handler, contractsAddress) + scriptEnvironment := newEVMScriptEnvironment(handler, contractsAddress) + rt := runtime.NewRuntime(runtime.Config{}) + + accountCodes := map[common.Location][]byte{} + var events []cadence.Event + + runtimeInterface := &TestRuntimeInterface{ + Storage: NewTestLedger(nil, nil), + OnGetSigningAccounts: func() ([]runtime.Address, error) { + return []runtime.Address{runtime.Address(contractsAddress)}, nil + }, + OnResolveLocation: newLocationResolver(contractsAddress), + OnUpdateAccountContractCode: func(location common.AddressLocation, code []byte) error { + accountCodes[location] = code + return nil + }, + OnGetAccountContractCode: func(location common.AddressLocation) (code []byte, err error) { + code = accountCodes[location] + return code, nil + }, + OnEmitEvent: func(event cadence.Event) error { + events = append(events, event) + return nil + }, + OnDecodeArgument: func(b []byte, t cadence.Type) (cadence.Value, error) { + return json.Decode(nil, b) + }, + OnHash: func( + data []byte, + tag string, + hashAlgorithm runtime.HashAlgorithm, + ) ([]byte, error) { + return crypto.Keccak256(data), nil + }, + } + + nextTransactionLocation := NewTransactionLocationGenerator() + nextScriptLocation := NewScriptLocationGenerator() + + // Deploy contracts + + deployContracts( + t, + rt, + contractsAddress, + runtimeInterface, + transactionEnvironment, + nextTransactionLocation, + ) + + t.Run("with values between the boundaries", func(t *testing.T) { + + script := []byte(` + import EVM from 0x1 + + access(all) + fun main(): Bool { + // Check UInt/Int encode/decode + let amount: UInt256 = 18446744073709551615 + let minBalance: Int256 = -18446744073709551615 + let data = EVM.encodeABIWithSignature( + "withdraw(uint,int)", + [UInt(amount), Int(minBalance)] + ) + let values = EVM.decodeABIWithSignature( + "withdraw(uint,int)", + types: [Type<UInt>(), Type<Int>()], + data: data + ) + assert((values[0] as! UInt) == UInt(amount)) + assert((values[1] as! Int) == Int(minBalance)) + + return true + } + `) + + // Run script + + result, err := rt.ExecuteScript( + runtime.Script{ + Source: script, + Arguments: [][]byte{}, + }, + runtime.Context{ + Interface: runtimeInterface, + Environment: scriptEnvironment, + Location: nextScriptLocation(), + }, + ) + require.NoError(t, err) + + assert.Equal(t, cadence.Bool(true), result) + }) + + t.Run("with values at the boundaries", func(t *testing.T) { + + script := []byte(` + import EVM from 0x1 + + access(all) + fun main(): Bool { + // Check UInt*/Int* encode/decode + let data = EVM.encodeABIWithSignature( + "withdraw(uint,int,uint,int)", + [UInt(UInt256.max), Int(Int256.max),UInt(UInt256.min), Int(Int256.min)] + ) + let values = EVM.decodeABIWithSignature( + "withdraw(uint,int,uint,int)", + types: [Type<UInt>(), Type<Int>(),Type<UInt>(), Type<Int>()], + data: data + ) + assert((values[0] as! UInt) == UInt(UInt256.max)) + assert((values[1] as! Int) == Int(Int256.max)) + assert((values[2] as! UInt) == UInt(UInt256.min)) + assert((values[3] as! Int) == Int(Int256.min)) + + return true + } + `) + + // Run script + + result, err := rt.ExecuteScript( + runtime.Script{ + Source: script, + Arguments: [][]byte{}, + }, + runtime.Context{ + Interface: runtimeInterface, + Environment: scriptEnvironment, + Location: nextScriptLocation(), + }, + ) + require.NoError(t, err) + + assert.Equal(t, cadence.Bool(true), result) + }) + + t.Run("with UInt values outside the boundaries", func(t *testing.T) { + + script := []byte(` + import EVM from 0x1 + + access(all) + fun main(): Bool { + let data = EVM.encodeABIWithSignature( + "withdraw(uint)", + [UInt(UInt256.max)+10] + ) + + return true + } + `) + + // Run script + + _, err := rt.ExecuteScript( + runtime.Script{ + Source: script, + Arguments: [][]byte{}, + }, + runtime.Context{ + Interface: runtimeInterface, + Environment: scriptEnvironment, + Location: nextScriptLocation(), + }, + ) + require.Error(t, err) + + assert.ErrorContains( + t, + err, + "failed to ABI encode value of type UInt: value outside the boundaries of uint256", + ) + }) + + t.Run("with Int values outside the max boundary", func(t *testing.T) { + + script := []byte(` + import EVM from 0x1 + + access(all) + fun main(): Bool { + let data = EVM.encodeABIWithSignature( + "withdraw(int)", + [Int(Int256.max)+10] + ) + + return true + } + `) + + // Run script + + _, err := rt.ExecuteScript( + runtime.Script{ + Source: script, + Arguments: [][]byte{}, + }, + runtime.Context{ + Interface: runtimeInterface, + Environment: scriptEnvironment, + Location: nextScriptLocation(), + }, + ) + require.Error(t, err) + + assert.ErrorContains( + t, + err, + "failed to ABI encode value of type Int: value outside the boundaries of int256", + ) + }) + + t.Run("with Int values outside the min boundary", func(t *testing.T) { + + script := []byte(` + import EVM from 0x1 + + access(all) + fun main(): Bool { + let data = EVM.encodeABIWithSignature( + "withdraw(int)", + [Int(Int256.min)-10] + ) + + return true + } + `) + + // Run script + + _, err := rt.ExecuteScript( + runtime.Script{ + Source: script, + Arguments: [][]byte{}, + }, + runtime.Context{ + Interface: runtimeInterface, + Environment: scriptEnvironment, + Location: nextScriptLocation(), + }, + ) + require.Error(t, err) + + assert.ErrorContains( + t, + err, + "failed to ABI encode value of type Int: value outside the boundaries of int256", + ) + }) +} + +func TestEVMEncodeDecodeABIRoundtrip(t *testing.T) { + + t.Parallel() + + handler := &testContractHandler{} + + contractsAddress := flow.BytesToAddress([]byte{0x1}) + + transactionEnvironment := newEVMTransactionEnvironment(handler, contractsAddress) + scriptEnvironment := newEVMScriptEnvironment(handler, contractsAddress) + + rt := runtime.NewRuntime(runtime.Config{}) + + script := []byte(` + import EVM from 0x1 + + access(all) + fun main(): Bool { + // Check EVM.EVMAddress encode/decode + let address = EVM.EVMAddress( + bytes: "7A58c0Be72BE218B41C608b7Fe7C5bB630736C71" + .decodeHex() + .toConstantSized<[UInt8; 20]>()! + ) + var data = EVM.encodeABI([address]) + var values = EVM.decodeABI(types: [Type<EVM.EVMAddress>()], data: data) + assert(values.length == 1) + assert((values[0] as! EVM.EVMAddress).bytes == address.bytes) + + // Check String encode/decode + data = EVM.encodeABI(["John Doe", ""]) + values = EVM.decodeABI(types: [Type<String>(), Type<String>()], data: data) + assert((values[0] as! String) == "John Doe") + assert((values[1] as! String) == "") + + // Check Bool encode/decode + data = EVM.encodeABI([true, false]) + values = EVM.decodeABI(types: [Type<Bool>(), Type<Bool>()], data: data) + assert((values[0] as! Bool) == true) + assert((values[1] as! Bool) == false) + + // Check UInt*/Int* encode/decode + data = EVM.encodeABI([ + UInt8(33), + UInt16(33), + UInt32(33), + UInt64(33), + UInt128(33), + UInt256(33), + Int8(-33), + Int16(-33), + Int32(-33), + Int64(-33), + Int128(-33), + Int256(-33), + UInt(33), + Int(-33) + ]) + values = EVM.decodeABI( + types: [ + Type<UInt8>(), + Type<UInt16>(), + Type<UInt32>(), + Type<UInt64>(), + Type<UInt128>(), + Type<UInt256>(), + Type<Int8>(), + Type<Int16>(), + Type<Int32>(), + Type<Int64>(), + Type<Int128>(), + Type<Int256>(), + Type<UInt>(), + Type<Int>() + ], + data: data + ) + assert((values[0] as! UInt8) == 33) + assert((values[1] as! UInt16) == 33) + assert((values[2] as! UInt32) == 33) + assert((values[3] as! UInt64) == 33) + assert((values[4] as! UInt128) == 33) + assert((values[5] as! UInt256) == 33) + assert((values[6] as! Int8) == -33) + assert((values[7] as! Int16) == -33) + assert((values[8] as! Int32) == -33) + assert((values[9] as! Int64) == -33) + assert((values[10] as! Int128) == -33) + assert((values[11] as! Int256) == -33) + assert((values[12] as! UInt) == 33) + assert((values[13] as! Int) == -33) + + // Check variable-size array of leaf types encode/decode + data = EVM.encodeABI([ + ["one", "two"], + [true, false], + [5, 10] as [UInt8], + [5, 10] as [UInt16], + [5, 10] as [UInt32], + [5, 10] as [UInt64], + [5, 10] as [UInt128], + [5, 10] as [UInt256], + [-5, -10] as [Int8], + [-5, -10] as [Int16], + [-5, -10] as [Int32], + [-5, -10] as [Int64], + [-5, -10] as [Int128], + [-5, -10] as [Int256], + [address] as [EVM.EVMAddress] + ]) + values = EVM.decodeABI( + types: [ + Type<[String]>(), + Type<[Bool]>(), + Type<[UInt8]>(), + Type<[UInt16]>(), + Type<[UInt32]>(), + Type<[UInt64]>(), + Type<[UInt128]>(), + Type<[UInt256]>(), + Type<[Int8]>(), + Type<[Int16]>(), + Type<[Int32]>(), + Type<[Int64]>(), + Type<[Int128]>(), + Type<[Int256]>(), + Type<[EVM.EVMAddress]>() + ], + data: data + ) + assert((values[0] as! [String]) == ["one", "two"]) + assert((values[1] as! [Bool]) == [true, false]) + assert((values[2] as! [UInt8]) == [5, 10]) + assert((values[3] as! [UInt16]) == [5, 10]) + assert((values[4] as! [UInt32]) == [5, 10]) + assert((values[5] as! [UInt64]) == [5, 10]) + assert((values[6] as! [UInt128]) == [5, 10]) + assert((values[7] as! [UInt256]) == [5, 10]) + assert((values[8] as! [Int8]) == [-5, -10]) + assert((values[9] as! [Int16]) == [-5, -10]) + assert((values[10] as! [Int32]) == [-5, -10]) + assert((values[11] as! [Int64]) == [-5, -10]) + assert((values[12] as! [Int128]) == [-5, -10]) + assert((values[13] as! [Int256]) == [-5, -10]) + assert((values[14] as! [EVM.EVMAddress])[0].bytes == [address][0].bytes) + + // Check constant-size array of leaf types encode/decode + data = EVM.encodeABI([ + ["one", "two"] as [String; 2], + [true, false] as [Bool; 2], + [5, 10] as [UInt8; 2], + [5, 10] as [UInt16; 2], + [5, 10] as [UInt32; 2], + [5, 10] as [UInt64; 2], + [5, 10] as [UInt128; 2], + [5, 10] as [UInt256; 2], + [-5, -10] as [Int8; 2], + [-5, -10] as [Int16; 2], + [-5, -10] as [Int32; 2], + [-5, -10] as [Int64; 2], + [-5, -10] as [Int128; 2], + [-5, -10] as [Int256; 2], + [address] as [EVM.EVMAddress; 1] + ]) + values = EVM.decodeABI( + types: [ + Type<[String; 2]>(), + Type<[Bool; 2]>(), + Type<[UInt8; 2]>(), + Type<[UInt16; 2]>(), + Type<[UInt32; 2]>(), + Type<[UInt64; 2]>(), + Type<[UInt128; 2]>(), + Type<[UInt256; 2]>(), + Type<[Int8; 2]>(), + Type<[Int16; 2]>(), + Type<[Int32; 2]>(), + Type<[Int64; 2]>(), + Type<[Int128; 2]>(), + Type<[Int256; 2]>(), + Type<[EVM.EVMAddress; 1]>() + ], + data: data + ) + assert((values[0] as! [String; 2]) == ["one", "two"]) + assert((values[1] as! [Bool; 2]) == [true, false]) + assert((values[2] as! [UInt8; 2]) == [5, 10]) + assert((values[3] as! [UInt16; 2]) == [5, 10]) + assert((values[4] as! [UInt32; 2]) == [5, 10]) + assert((values[5] as! [UInt64; 2]) == [5, 10]) + assert((values[6] as! [UInt128; 2]) == [5, 10]) + assert((values[7] as! [UInt256; 2]) == [5, 10]) + assert((values[8] as! [Int8; 2]) == [-5, -10]) + assert((values[9] as! [Int16; 2]) == [-5, -10]) + assert((values[10] as! [Int32; 2]) == [-5, -10]) + assert((values[11] as! [Int64; 2]) == [-5, -10]) + assert((values[12] as! [Int128; 2]) == [-5, -10]) + assert((values[13] as! [Int256; 2]) == [-5, -10]) + assert((values[14] as! [EVM.EVMAddress; 1])[0].bytes == [address][0].bytes) + + // Check partial decoding of encoded data + data = EVM.encodeABI(["Peter", UInt64(9999)]) + values = EVM.decodeABI(types: [Type<String>()], data: data) + assert(values.length == 1) + assert((values[0] as! String) == "Peter") + + // Check nested arrays of leaf values + data = EVM.encodeABI([[["Foo", "Bar"], ["Baz", "Qux"]]]) + values = EVM.decodeABI(types: [Type<[[String]]>()], data: data) + assert(values.length == 1) + assert((values[0] as! [[String]]) == [["Foo", "Bar"], ["Baz", "Qux"]]) + + return true + } + `) + + accountCodes := map[common.Location][]byte{} + var events []cadence.Event + + runtimeInterface := &TestRuntimeInterface{ + Storage: NewTestLedger(nil, nil), + OnGetSigningAccounts: func() ([]runtime.Address, error) { + return []runtime.Address{runtime.Address(contractsAddress)}, nil + }, + OnResolveLocation: newLocationResolver(contractsAddress), + OnUpdateAccountContractCode: func(location common.AddressLocation, code []byte) error { + accountCodes[location] = code + return nil + }, + OnGetAccountContractCode: func(location common.AddressLocation) (code []byte, err error) { + code = accountCodes[location] + return code, nil + }, + OnEmitEvent: func(event cadence.Event) error { + events = append(events, event) + return nil + }, + OnDecodeArgument: func(b []byte, t cadence.Type) (cadence.Value, error) { + return json.Decode(nil, b) + }, + } + + nextTransactionLocation := NewTransactionLocationGenerator() + nextScriptLocation := NewScriptLocationGenerator() + + // Deploy contracts + + deployContracts( + t, + rt, + contractsAddress, + runtimeInterface, + transactionEnvironment, + nextTransactionLocation, + ) + + // Run script + + result, err := rt.ExecuteScript( + runtime.Script{ + Source: script, + Arguments: [][]byte{}, + }, + runtime.Context{ + Interface: runtimeInterface, + Environment: scriptEnvironment, + Location: nextScriptLocation(), + }, + ) + require.NoError(t, err) + + assert.Equal(t, + cadence.Bool(true), + result, + ) +} + +func TestEVMEncodeDecodeABIErrors(t *testing.T) { + + t.Parallel() + + t.Run("encodeABI with unsupported Address type", func(t *testing.T) { + + t.Parallel() + + handler := &testContractHandler{} + + contractsAddress := flow.BytesToAddress([]byte{0x1}) + + transactionEnvironment := newEVMTransactionEnvironment(handler, contractsAddress) + scriptEnvironment := newEVMScriptEnvironment(handler, contractsAddress) + + rt := runtime.NewRuntime(runtime.Config{}) + + accountCodes := map[common.Location][]byte{} + var events []cadence.Event + + runtimeInterface := &TestRuntimeInterface{ + Storage: NewTestLedger(nil, nil), + OnGetSigningAccounts: func() ([]runtime.Address, error) { + return []runtime.Address{runtime.Address(contractsAddress)}, nil + }, + OnResolveLocation: newLocationResolver(contractsAddress), + OnUpdateAccountContractCode: func(location common.AddressLocation, code []byte) error { + accountCodes[location] = code + return nil + }, + OnGetAccountContractCode: func(location common.AddressLocation) (code []byte, err error) { + code = accountCodes[location] + return code, nil + }, + OnEmitEvent: func(event cadence.Event) error { + events = append(events, event) + return nil + }, + OnDecodeArgument: func(b []byte, t cadence.Type) (cadence.Value, error) { + return json.Decode(nil, b) + }, + } + + nextTransactionLocation := NewTransactionLocationGenerator() + nextScriptLocation := NewScriptLocationGenerator() + + // Deploy contracts + + deployContracts( + t, + rt, + contractsAddress, + runtimeInterface, + transactionEnvironment, + nextTransactionLocation, + ) + + // Run script + + script := []byte(` + import EVM from 0x1 + + access(all) + fun main(): Bool { + let address: Address = 0x045a1763c93006ca + let data = EVM.encodeABI([address]) + + return true + } + `) + + _, err := rt.ExecuteScript( + runtime.Script{ + Source: script, + Arguments: [][]byte{}, + }, + runtime.Context{ + Interface: runtimeInterface, + Environment: scriptEnvironment, + Location: nextScriptLocation(), + }, + ) + RequireError(t, err) + assert.ErrorContains( + t, + err, + "failed to ABI encode value of type Address", + ) + }) + + t.Run("encodeABI with unsupported fixed-point number type", func(t *testing.T) { + + t.Parallel() + + handler := &testContractHandler{} + + contractsAddress := flow.BytesToAddress([]byte{0x1}) + + transactionEnvironment := newEVMTransactionEnvironment(handler, contractsAddress) + scriptEnvironment := newEVMScriptEnvironment(handler, contractsAddress) + + rt := runtime.NewRuntime(runtime.Config{}) + + accountCodes := map[common.Location][]byte{} + var events []cadence.Event + + runtimeInterface := &TestRuntimeInterface{ + Storage: NewTestLedger(nil, nil), + OnGetSigningAccounts: func() ([]runtime.Address, error) { + return []runtime.Address{runtime.Address(contractsAddress)}, nil + }, + OnResolveLocation: newLocationResolver(contractsAddress), + OnUpdateAccountContractCode: func(location common.AddressLocation, code []byte) error { + accountCodes[location] = code + return nil + }, + OnGetAccountContractCode: func(location common.AddressLocation) (code []byte, err error) { + code = accountCodes[location] + return code, nil + }, + OnEmitEvent: func(event cadence.Event) error { + events = append(events, event) + return nil + }, + OnDecodeArgument: func(b []byte, t cadence.Type) (cadence.Value, error) { + return json.Decode(nil, b) + }, + } + + nextTransactionLocation := NewTransactionLocationGenerator() + nextScriptLocation := NewScriptLocationGenerator() + + // Deploy contracts + + deployContracts( + t, + rt, + contractsAddress, + runtimeInterface, + transactionEnvironment, + nextTransactionLocation, + ) + + // Run script + + script := []byte(` + import EVM from 0x1 + + access(all) + fun main(): Bool { + let data = EVM.encodeABI([0.2]) + + return true + } + `) + + _, err := rt.ExecuteScript( + runtime.Script{ + Source: script, + Arguments: [][]byte{}, + }, + runtime.Context{ + Interface: runtimeInterface, + Environment: scriptEnvironment, + Location: nextScriptLocation(), + }, + ) + RequireError(t, err) + assert.ErrorContains( + t, + err, + "failed to ABI encode value of type UFix64", + ) + }) + + t.Run("encodeABI with unsupported dictionary type", func(t *testing.T) { + + t.Parallel() + + handler := &testContractHandler{} + + contractsAddress := flow.BytesToAddress([]byte{0x1}) + + transactionEnvironment := newEVMTransactionEnvironment(handler, contractsAddress) + scriptEnvironment := newEVMScriptEnvironment(handler, contractsAddress) + + rt := runtime.NewRuntime(runtime.Config{}) + + accountCodes := map[common.Location][]byte{} + var events []cadence.Event + + runtimeInterface := &TestRuntimeInterface{ + Storage: NewTestLedger(nil, nil), + OnGetSigningAccounts: func() ([]runtime.Address, error) { + return []runtime.Address{runtime.Address(contractsAddress)}, nil + }, + OnResolveLocation: newLocationResolver(contractsAddress), + OnUpdateAccountContractCode: func(location common.AddressLocation, code []byte) error { + accountCodes[location] = code + return nil + }, + OnGetAccountContractCode: func(location common.AddressLocation) (code []byte, err error) { + code = accountCodes[location] + return code, nil + }, + OnEmitEvent: func(event cadence.Event) error { + events = append(events, event) + return nil + }, + OnDecodeArgument: func(b []byte, t cadence.Type) (cadence.Value, error) { + return json.Decode(nil, b) + }, + } + + nextTransactionLocation := NewTransactionLocationGenerator() + nextScriptLocation := NewScriptLocationGenerator() + + // Deploy contracts + + deployContracts( + t, + rt, + contractsAddress, + runtimeInterface, + transactionEnvironment, + nextTransactionLocation, + ) + + // Run script + + script := []byte(` + import EVM from 0x1 + + access(all) + fun main(): Bool { + let dict: {Int: Bool} = {0: false, 1: true} + let data = EVM.encodeABI([dict]) + + return true + } + `) + + _, err := rt.ExecuteScript( + runtime.Script{ + Source: script, + Arguments: [][]byte{}, + }, + runtime.Context{ + Interface: runtimeInterface, + Environment: scriptEnvironment, + Location: nextScriptLocation(), + }, + ) + RequireError(t, err) + assert.ErrorContains( + t, + err, + "failed to ABI encode value of type {Int: Bool}", + ) + }) + + t.Run("encodeABI with unsupported array element type", func(t *testing.T) { + + t.Parallel() + + handler := &testContractHandler{} + + contractsAddress := flow.BytesToAddress([]byte{0x1}) + + transactionEnvironment := newEVMTransactionEnvironment(handler, contractsAddress) + scriptEnvironment := newEVMScriptEnvironment(handler, contractsAddress) + + rt := runtime.NewRuntime(runtime.Config{}) + + accountCodes := map[common.Location][]byte{} + var events []cadence.Event + + runtimeInterface := &TestRuntimeInterface{ + Storage: NewTestLedger(nil, nil), + OnGetSigningAccounts: func() ([]runtime.Address, error) { + return []runtime.Address{runtime.Address(contractsAddress)}, nil + }, + OnResolveLocation: newLocationResolver(contractsAddress), + OnUpdateAccountContractCode: func(location common.AddressLocation, code []byte) error { + accountCodes[location] = code + return nil + }, + OnGetAccountContractCode: func(location common.AddressLocation) (code []byte, err error) { + code = accountCodes[location] + return code, nil + }, + OnEmitEvent: func(event cadence.Event) error { + events = append(events, event) + return nil + }, + OnDecodeArgument: func(b []byte, t cadence.Type) (cadence.Value, error) { + return json.Decode(nil, b) + }, + } + + nextTransactionLocation := NewTransactionLocationGenerator() + nextScriptLocation := NewScriptLocationGenerator() + + // Deploy contracts + + deployContracts( + t, + rt, + contractsAddress, + runtimeInterface, + transactionEnvironment, + nextTransactionLocation, + ) + + // Run script + + script := []byte(` + import EVM from 0x1 + + access(all) + fun main(): Bool { + let chars: [Character] = ["a", "b", "c"] + let data = EVM.encodeABI([chars]) + + return true + } + `) + + _, err := rt.ExecuteScript( + runtime.Script{ + Source: script, + Arguments: [][]byte{}, + }, + runtime.Context{ + Interface: runtimeInterface, + Environment: scriptEnvironment, + Location: nextScriptLocation(), + }, + ) + RequireError(t, err) + assert.ErrorContains( + t, + err, + "failed to ABI encode value of type Character", + ) + }) + + t.Run("encodeABI with unsupported custom composite type", func(t *testing.T) { + + t.Parallel() + + handler := &testContractHandler{} + + contractsAddress := flow.BytesToAddress([]byte{0x1}) + + transactionEnvironment := newEVMTransactionEnvironment(handler, contractsAddress) + scriptEnvironment := newEVMScriptEnvironment(handler, contractsAddress) + + rt := runtime.NewRuntime(runtime.Config{}) + + accountCodes := map[common.Location][]byte{} + var events []cadence.Event + + runtimeInterface := &TestRuntimeInterface{ + Storage: NewTestLedger(nil, nil), + OnGetSigningAccounts: func() ([]runtime.Address, error) { + return []runtime.Address{runtime.Address(contractsAddress)}, nil + }, + OnResolveLocation: newLocationResolver(contractsAddress), + OnUpdateAccountContractCode: func(location common.AddressLocation, code []byte) error { + accountCodes[location] = code + return nil + }, + OnGetAccountContractCode: func(location common.AddressLocation) (code []byte, err error) { + code = accountCodes[location] + return code, nil + }, + OnEmitEvent: func(event cadence.Event) error { + events = append(events, event) + return nil + }, + OnDecodeArgument: func(b []byte, t cadence.Type) (cadence.Value, error) { + return json.Decode(nil, b) + }, + } + + nextTransactionLocation := NewTransactionLocationGenerator() + nextScriptLocation := NewScriptLocationGenerator() + + // Deploy contracts + + deployContracts( + t, + rt, + contractsAddress, + runtimeInterface, + transactionEnvironment, + nextTransactionLocation, + ) + + // Run script + + script := []byte(` + import EVM from 0x1 + + access(all) struct Token { + access(all) let id: Int + access(all) var balance: UInt + + init(id: Int, balance: UInt) { + self.id = id + self.balance = balance + } + } + + access(all) + fun main(): Bool { + let token = Token(id: 9, balance: 150) + let data = EVM.encodeABI([token]) + + return true + } + `) + + _, err := rt.ExecuteScript( + runtime.Script{ + Source: script, + Arguments: [][]byte{}, + }, + runtime.Context{ + Interface: runtimeInterface, + Environment: scriptEnvironment, + Location: nextScriptLocation(), + }, + ) + RequireError(t, err) + assert.ErrorContains( + t, + err, + "failed to ABI encode value of type s.0100000000000000000000000000000000000000000000000000000000000000.Token", + ) + }) + + t.Run("decodeABI with mismatched type", func(t *testing.T) { + + t.Parallel() + + handler := &testContractHandler{} + + contractsAddress := flow.BytesToAddress([]byte{0x1}) + + transactionEnvironment := newEVMTransactionEnvironment(handler, contractsAddress) + scriptEnvironment := newEVMScriptEnvironment(handler, contractsAddress) + + rt := runtime.NewRuntime(runtime.Config{}) + + accountCodes := map[common.Location][]byte{} + var events []cadence.Event + + runtimeInterface := &TestRuntimeInterface{ + Storage: NewTestLedger(nil, nil), + OnGetSigningAccounts: func() ([]runtime.Address, error) { + return []runtime.Address{runtime.Address(contractsAddress)}, nil + }, + OnResolveLocation: newLocationResolver(contractsAddress), + OnUpdateAccountContractCode: func(location common.AddressLocation, code []byte) error { + accountCodes[location] = code + return nil + }, + OnGetAccountContractCode: func(location common.AddressLocation) (code []byte, err error) { + code = accountCodes[location] + return code, nil + }, + OnEmitEvent: func(event cadence.Event) error { + events = append(events, event) + return nil + }, + OnDecodeArgument: func(b []byte, t cadence.Type) (cadence.Value, error) { + return json.Decode(nil, b) + }, + } + + nextTransactionLocation := NewTransactionLocationGenerator() + nextScriptLocation := NewScriptLocationGenerator() + + // Deploy contracts + + deployContracts( + t, + rt, + contractsAddress, + runtimeInterface, + transactionEnvironment, + nextTransactionLocation, + ) + + // Run script + + script := []byte(` + import EVM from 0x1 + + access(all) + fun main(): Bool { + let data = EVM.encodeABI(["Peter"]) + let values = EVM.decodeABI(types: [Type<Bool>()], data: data) + + return true + } + `) + + _, err := rt.ExecuteScript( + runtime.Script{ + Source: script, + Arguments: [][]byte{}, + }, + runtime.Context{ + Interface: runtimeInterface, + Environment: scriptEnvironment, + Location: nextScriptLocation(), + }, + ) + RequireError(t, err) + assert.ErrorContains( + t, + err, + "failed to ABI decode data", + ) + }) + + t.Run("decodeABI with surplus of types", func(t *testing.T) { + + t.Parallel() + + handler := &testContractHandler{} + + contractsAddress := flow.BytesToAddress([]byte{0x1}) + + transactionEnvironment := newEVMTransactionEnvironment(handler, contractsAddress) + scriptEnvironment := newEVMScriptEnvironment(handler, contractsAddress) + + rt := runtime.NewRuntime(runtime.Config{}) + + accountCodes := map[common.Location][]byte{} + var events []cadence.Event + + runtimeInterface := &TestRuntimeInterface{ + Storage: NewTestLedger(nil, nil), + OnGetSigningAccounts: func() ([]runtime.Address, error) { + return []runtime.Address{runtime.Address(contractsAddress)}, nil + }, + OnResolveLocation: newLocationResolver(contractsAddress), + OnUpdateAccountContractCode: func(location common.AddressLocation, code []byte) error { + accountCodes[location] = code + return nil + }, + OnGetAccountContractCode: func(location common.AddressLocation) (code []byte, err error) { + code = accountCodes[location] + return code, nil + }, + OnEmitEvent: func(event cadence.Event) error { + events = append(events, event) + return nil + }, + OnDecodeArgument: func(b []byte, t cadence.Type) (cadence.Value, error) { + return json.Decode(nil, b) + }, + } + + nextTransactionLocation := NewTransactionLocationGenerator() + nextScriptLocation := NewScriptLocationGenerator() + + // Deploy contracts + + deployContracts( + t, + rt, + contractsAddress, + runtimeInterface, + transactionEnvironment, + nextTransactionLocation, + ) + + // Run script + + script := []byte(` + import EVM from 0x1 + + access(all) + fun main(): Bool { + let data = EVM.encodeABI(["Peter"]) + let values = EVM.decodeABI(types: [Type<String>(), Type<Bool>()], data: data) + + return true + } + `) + + _, err := rt.ExecuteScript( + runtime.Script{ + Source: script, + Arguments: [][]byte{}, + }, + runtime.Context{ + Interface: runtimeInterface, + Environment: scriptEnvironment, + Location: nextScriptLocation(), + }, + ) + RequireError(t, err) + assert.ErrorContains( + t, + err, + "failed to ABI decode data", + ) + }) + + t.Run("decodeABI with unsupported fixed-point number type", func(t *testing.T) { + + t.Parallel() + + handler := &testContractHandler{} + + contractsAddress := flow.BytesToAddress([]byte{0x1}) + + transactionEnvironment := newEVMTransactionEnvironment(handler, contractsAddress) + scriptEnvironment := newEVMScriptEnvironment(handler, contractsAddress) + + rt := runtime.NewRuntime(runtime.Config{}) + + accountCodes := map[common.Location][]byte{} + var events []cadence.Event + + runtimeInterface := &TestRuntimeInterface{ + Storage: NewTestLedger(nil, nil), + OnGetSigningAccounts: func() ([]runtime.Address, error) { + return []runtime.Address{runtime.Address(contractsAddress)}, nil + }, + OnResolveLocation: newLocationResolver(contractsAddress), + OnUpdateAccountContractCode: func(location common.AddressLocation, code []byte) error { + accountCodes[location] = code + return nil + }, + OnGetAccountContractCode: func(location common.AddressLocation) (code []byte, err error) { + code = accountCodes[location] + return code, nil + }, + OnEmitEvent: func(event cadence.Event) error { + events = append(events, event) + return nil + }, + OnDecodeArgument: func(b []byte, t cadence.Type) (cadence.Value, error) { + return json.Decode(nil, b) + }, + } + + nextTransactionLocation := NewTransactionLocationGenerator() + nextScriptLocation := NewScriptLocationGenerator() + + // Deploy contracts + + deployContracts( + t, + rt, + contractsAddress, + runtimeInterface, + transactionEnvironment, + nextTransactionLocation, + ) + + // Run script + + script := []byte(` + import EVM from 0x1 + + access(all) + fun main(): Bool { + let data = EVM.encodeABI(["Peter"]) + let values = EVM.decodeABI(types: [Type<UFix64>()], data: data) + + return true + } + `) + + _, err := rt.ExecuteScript( + runtime.Script{ + Source: script, + Arguments: [][]byte{}, + }, + runtime.Context{ + Interface: runtimeInterface, + Environment: scriptEnvironment, + Location: nextScriptLocation(), + }, + ) + RequireError(t, err) + assert.ErrorContains( + t, + err, + "failed to ABI decode data with type UFix64", + ) + }) + + t.Run("decodeABI with unsupported dictionary type", func(t *testing.T) { + + t.Parallel() + + handler := &testContractHandler{} + + contractsAddress := flow.BytesToAddress([]byte{0x1}) + + transactionEnvironment := newEVMTransactionEnvironment(handler, contractsAddress) + scriptEnvironment := newEVMScriptEnvironment(handler, contractsAddress) + + rt := runtime.NewRuntime(runtime.Config{}) + + accountCodes := map[common.Location][]byte{} + var events []cadence.Event + + runtimeInterface := &TestRuntimeInterface{ + Storage: NewTestLedger(nil, nil), + OnGetSigningAccounts: func() ([]runtime.Address, error) { + return []runtime.Address{runtime.Address(contractsAddress)}, nil + }, + OnResolveLocation: newLocationResolver(contractsAddress), + OnUpdateAccountContractCode: func(location common.AddressLocation, code []byte) error { + accountCodes[location] = code + return nil + }, + OnGetAccountContractCode: func(location common.AddressLocation) (code []byte, err error) { + code = accountCodes[location] + return code, nil + }, + OnEmitEvent: func(event cadence.Event) error { + events = append(events, event) + return nil + }, + OnDecodeArgument: func(b []byte, t cadence.Type) (cadence.Value, error) { + return json.Decode(nil, b) + }, + } + + nextTransactionLocation := NewTransactionLocationGenerator() + nextScriptLocation := NewScriptLocationGenerator() + + // Deploy contracts + + deployContracts( + t, + rt, + contractsAddress, + runtimeInterface, + transactionEnvironment, + nextTransactionLocation, + ) + + // Run script + + script := []byte(` + import EVM from 0x1 + + access(all) + fun main(): Bool { + let data = EVM.encodeABI(["Peter"]) + let values = EVM.decodeABI(types: [Type<{Int: Bool}>()], data: data) + + return true + } + `) + + _, err := rt.ExecuteScript( + runtime.Script{ + Source: script, + Arguments: [][]byte{}, + }, + runtime.Context{ + Interface: runtimeInterface, + Environment: scriptEnvironment, + Location: nextScriptLocation(), + }, + ) + RequireError(t, err) + assert.ErrorContains( + t, + err, + "failed to ABI decode data with type {Int: Bool}", + ) + }) + + t.Run("decodeABI with unsupported array element type", func(t *testing.T) { + + t.Parallel() + + handler := &testContractHandler{} + + contractsAddress := flow.BytesToAddress([]byte{0x1}) + + transactionEnvironment := newEVMTransactionEnvironment(handler, contractsAddress) + scriptEnvironment := newEVMScriptEnvironment(handler, contractsAddress) + + rt := runtime.NewRuntime(runtime.Config{}) + + accountCodes := map[common.Location][]byte{} + var events []cadence.Event + + runtimeInterface := &TestRuntimeInterface{ + Storage: NewTestLedger(nil, nil), + OnGetSigningAccounts: func() ([]runtime.Address, error) { + return []runtime.Address{runtime.Address(contractsAddress)}, nil + }, + OnResolveLocation: newLocationResolver(contractsAddress), + OnUpdateAccountContractCode: func(location common.AddressLocation, code []byte) error { + accountCodes[location] = code + return nil + }, + OnGetAccountContractCode: func(location common.AddressLocation) (code []byte, err error) { + code = accountCodes[location] + return code, nil + }, + OnEmitEvent: func(event cadence.Event) error { + events = append(events, event) + return nil + }, + OnDecodeArgument: func(b []byte, t cadence.Type) (cadence.Value, error) { + return json.Decode(nil, b) + }, + } + + nextTransactionLocation := NewTransactionLocationGenerator() + nextScriptLocation := NewScriptLocationGenerator() + + // Deploy contracts + + deployContracts( + t, + rt, + contractsAddress, + runtimeInterface, + transactionEnvironment, + nextTransactionLocation, + ) + + // Run script + + script := []byte(` + import EVM from 0x1 + + access(all) + fun main(): Bool { + let data = EVM.encodeABI(["Peter"]) + let values = EVM.decodeABI(types: [Type<[Character]>()], data: data) + + return true + } + `) + + _, err := rt.ExecuteScript( + runtime.Script{ + Source: script, + Arguments: [][]byte{}, + }, + runtime.Context{ + Interface: runtimeInterface, + Environment: scriptEnvironment, + Location: nextScriptLocation(), + }, + ) + RequireError(t, err) + assert.ErrorContains( + t, + err, + "failed to ABI decode data with type [Character]", + ) + }) + + t.Run("decodeABI with unsupported custom composite type", func(t *testing.T) { + + t.Parallel() + + handler := &testContractHandler{} + + contractsAddress := flow.BytesToAddress([]byte{0x1}) + + transactionEnvironment := newEVMTransactionEnvironment(handler, contractsAddress) + scriptEnvironment := newEVMScriptEnvironment(handler, contractsAddress) + + rt := runtime.NewRuntime(runtime.Config{}) + + accountCodes := map[common.Location][]byte{} + var events []cadence.Event + + runtimeInterface := &TestRuntimeInterface{ + Storage: NewTestLedger(nil, nil), + OnGetSigningAccounts: func() ([]runtime.Address, error) { + return []runtime.Address{runtime.Address(contractsAddress)}, nil + }, + OnResolveLocation: newLocationResolver(contractsAddress), + OnUpdateAccountContractCode: func(location common.AddressLocation, code []byte) error { + accountCodes[location] = code + return nil + }, + OnGetAccountContractCode: func(location common.AddressLocation) (code []byte, err error) { + code = accountCodes[location] + return code, nil + }, + OnEmitEvent: func(event cadence.Event) error { + events = append(events, event) + return nil + }, + OnDecodeArgument: func(b []byte, t cadence.Type) (cadence.Value, error) { + return json.Decode(nil, b) + }, + } + + nextTransactionLocation := NewTransactionLocationGenerator() + nextScriptLocation := NewScriptLocationGenerator() + + // Deploy contracts + + deployContracts( + t, + rt, + contractsAddress, + runtimeInterface, + transactionEnvironment, + nextTransactionLocation, + ) + + // Run script + + script := []byte(` + import EVM from 0x1 + + access(all) struct Token { + access(all) let id: Int + access(all) var balance: UInt + + init(id: Int, balance: UInt) { + self.id = id + self.balance = balance + } + } + + access(all) + fun main(): Bool { + let data = EVM.encodeABI(["Peter"]) + let values = EVM.decodeABI(types: [Type<Token>()], data: data) + + return true + } + `) + + _, err := rt.ExecuteScript( + runtime.Script{ + Source: script, + Arguments: [][]byte{}, + }, + runtime.Context{ + Interface: runtimeInterface, + Environment: scriptEnvironment, + Location: nextScriptLocation(), + }, + ) + RequireError(t, err) + assert.ErrorContains( + t, + err, + "failed to ABI decode data with type s.0100000000000000000000000000000000000000000000000000000000000000.Token", + ) + }) +} + +func TestEVMEncodeABIWithSignature(t *testing.T) { + + t.Parallel() + + handler := &testContractHandler{} + + contractsAddress := flow.BytesToAddress([]byte{0x1}) + + transactionEnvironment := newEVMTransactionEnvironment(handler, contractsAddress) + scriptEnvironment := newEVMScriptEnvironment(handler, contractsAddress) + + rt := runtime.NewRuntime(runtime.Config{}) + + script := []byte(` + import EVM from 0x1 + + access(all) + fun main(): [UInt8] { + let address = EVM.EVMAddress( + bytes: "7A58c0Be72BE218B41C608b7Fe7C5bB630736C71" + .decodeHex() + .toConstantSized<[UInt8; 20]>()! + ) + + return EVM.encodeABIWithSignature( + "withdraw(address,uint256)", + [address, UInt256(250)] + ) + } + `) + + accountCodes := map[common.Location][]byte{} + var events []cadence.Event + + runtimeInterface := &TestRuntimeInterface{ + Storage: NewTestLedger(nil, nil), + OnGetSigningAccounts: func() ([]runtime.Address, error) { + return []runtime.Address{runtime.Address(contractsAddress)}, nil + }, + OnResolveLocation: newLocationResolver(contractsAddress), + OnUpdateAccountContractCode: func(location common.AddressLocation, code []byte) error { + accountCodes[location] = code + return nil + }, + OnGetAccountContractCode: func(location common.AddressLocation) (code []byte, err error) { + code = accountCodes[location] + return code, nil + }, + OnEmitEvent: func(event cadence.Event) error { + events = append(events, event) + return nil + }, + OnDecodeArgument: func(b []byte, t cadence.Type) (cadence.Value, error) { + return json.Decode(nil, b) + }, + OnHash: func( + data []byte, + tag string, + hashAlgorithm runtime.HashAlgorithm, + ) ([]byte, error) { + return crypto.Keccak256(data), nil + }, + } + + nextTransactionLocation := NewTransactionLocationGenerator() + nextScriptLocation := NewScriptLocationGenerator() + + // Deploy contracts + + deployContracts( + t, + rt, + contractsAddress, + runtimeInterface, + transactionEnvironment, + nextTransactionLocation, + ) + gauge := meter.NewMeter(meter.DefaultParameters().WithComputationWeights(meter.ExecutionEffortWeights{ + environment.ComputationKindEVMEncodeABI: 1 << meter.MeterExecutionInternalPrecisionBytes, + })) + + // Run script + + result, err := rt.ExecuteScript( + runtime.Script{ + Source: script, + Arguments: [][]byte{}, + }, + runtime.Context{ + Interface: runtimeInterface, + Environment: scriptEnvironment, + Location: nextScriptLocation(), + MemoryGauge: gauge, + ComputationGauge: gauge, + }, + ) + require.NoError(t, err) + + abiBytes := []byte{ + 0xf3, 0xfe, 0xf3, 0xa3, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, + 0x0, 0x0, 0x0, 0x7a, 0x58, 0xc0, 0xbe, 0x72, 0xbe, 0x21, 0x8b, 0x41, + 0xc6, 0x8, 0xb7, 0xfe, 0x7c, 0x5b, 0xb6, 0x30, 0x73, 0x6c, 0x71, 0x0, + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, + 0x0, 0x0, 0xfa, + } + cdcBytes := make([]cadence.Value, 0) + for _, bt := range abiBytes { + cdcBytes = append(cdcBytes, cadence.UInt8(bt)) + } + encodedABI := cadence.NewArray( + cdcBytes, + ).WithType(cadence.NewVariableSizedArrayType(cadence.UInt8Type)) + + assert.Equal(t, + encodedABI, + result, + ) + // The method ID is a byte array of length 4 + assert.Equal(t, uint64(len(cdcBytes)), gauge.TotalComputationUsed()+4) +} + +func TestEVMDecodeABIWithSignature(t *testing.T) { + + t.Parallel() + + handler := &testContractHandler{} + + contractsAddress := flow.BytesToAddress([]byte{0x1}) + + transactionEnvironment := newEVMTransactionEnvironment(handler, contractsAddress) + scriptEnvironment := newEVMScriptEnvironment(handler, contractsAddress) + + rt := runtime.NewRuntime(runtime.Config{}) + + script := []byte(` + import EVM from 0x1 + + access(all) + fun main(data: [UInt8]): Bool { + let values = EVM.decodeABIWithSignature( + "withdraw(address,uint256)", + types: [Type<EVM.EVMAddress>(), Type<UInt256>()], + data: data + ) + + // bytes for address 0x7A58c0Be72BE218B41C608b7Fe7C5bB630736C71 + let address = EVM.EVMAddress( + bytes: [ + 122, 88, 192, 190, 114, 190, 33, 139, 65, 198, + 8, 183, 254, 124, 91, 182, 48, 115, 108, 113 + ] + ) + + assert(values.length == 2) + assert((values[0] as! EVM.EVMAddress).bytes == address.bytes) + assert((values[1] as! UInt256) == UInt256(250)) + + return true + } + `) + + accountCodes := map[common.Location][]byte{} + var events []cadence.Event + + runtimeInterface := &TestRuntimeInterface{ + Storage: NewTestLedger(nil, nil), + OnGetSigningAccounts: func() ([]runtime.Address, error) { + return []runtime.Address{runtime.Address(contractsAddress)}, nil + }, + OnResolveLocation: newLocationResolver(contractsAddress), + OnUpdateAccountContractCode: func(location common.AddressLocation, code []byte) error { + accountCodes[location] = code + return nil + }, + OnGetAccountContractCode: func(location common.AddressLocation) (code []byte, err error) { + code = accountCodes[location] + return code, nil + }, + OnEmitEvent: func(event cadence.Event) error { + events = append(events, event) + return nil + }, + OnDecodeArgument: func(b []byte, t cadence.Type) (cadence.Value, error) { + return json.Decode(nil, b) + }, + OnHash: func( + data []byte, + tag string, + hashAlgorithm runtime.HashAlgorithm, + ) ([]byte, error) { + return crypto.Keccak256(data), nil + }, + } + + nextTransactionLocation := NewTransactionLocationGenerator() + nextScriptLocation := NewScriptLocationGenerator() + + // Deploy contracts + + deployContracts( + t, + rt, + contractsAddress, + runtimeInterface, + transactionEnvironment, + nextTransactionLocation, + ) + + gauge := meter.NewMeter(meter.DefaultParameters().WithComputationWeights(meter.ExecutionEffortWeights{ + environment.ComputationKindEVMDecodeABI: 1 << meter.MeterExecutionInternalPrecisionBytes, + })) + + // Run script + abiBytes := []byte{ + 0xf3, 0xfe, 0xf3, 0xa3, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, + 0x0, 0x0, 0x0, 0x7a, 0x58, 0xc0, 0xbe, 0x72, 0xbe, 0x21, 0x8b, 0x41, + 0xc6, 0x8, 0xb7, 0xfe, 0x7c, 0x5b, 0xb6, 0x30, 0x73, 0x6c, 0x71, 0x0, + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, + 0x0, 0x0, 0xfa, + } + cdcBytes := make([]cadence.Value, 0) + for _, bt := range abiBytes { + cdcBytes = append(cdcBytes, cadence.UInt8(bt)) + } + encodedABI := cadence.NewArray( + cdcBytes, + ).WithType(cadence.NewVariableSizedArrayType(cadence.UInt8Type)) + + result, err := rt.ExecuteScript( + runtime.Script{ + Source: script, + Arguments: EncodeArgs([]cadence.Value{ + encodedABI, + }), + }, + runtime.Context{ + Interface: runtimeInterface, + Environment: scriptEnvironment, + Location: nextScriptLocation(), + MemoryGauge: gauge, + ComputationGauge: gauge, + }, + ) + require.NoError(t, err) + + assert.Equal(t, cadence.NewBool(true), result) + // The method ID is a byte array of length 4 + assert.Equal(t, uint64(len(cdcBytes)), gauge.TotalComputationUsed()+4) +} + +func TestEVMDecodeABIWithSignatureMismatch(t *testing.T) { + + t.Parallel() + + handler := &testContractHandler{} + + contractsAddress := flow.BytesToAddress([]byte{0x1}) + + transactionEnvironment := newEVMTransactionEnvironment(handler, contractsAddress) + scriptEnvironment := newEVMScriptEnvironment(handler, contractsAddress) + + rt := runtime.NewRuntime(runtime.Config{}) + + script := []byte(` + import EVM from 0x1 + + access(all) + fun main(data: [UInt8]): Bool { + // The data was encoded for the function "withdraw(address,uint256)", + // but we pass a different function signature + let values = EVM.decodeABIWithSignature( + "deposit(uint256, address)", + types: [Type<UInt256>(), Type<EVM.EVMAddress>()], + data: data + ) + + return true + } + `) + + accountCodes := map[common.Location][]byte{} + var events []cadence.Event + + runtimeInterface := &TestRuntimeInterface{ + Storage: NewTestLedger(nil, nil), + OnGetSigningAccounts: func() ([]runtime.Address, error) { + return []runtime.Address{runtime.Address(contractsAddress)}, nil + }, + OnResolveLocation: newLocationResolver(contractsAddress), + OnUpdateAccountContractCode: func(location common.AddressLocation, code []byte) error { + accountCodes[location] = code + return nil + }, + OnGetAccountContractCode: func(location common.AddressLocation) (code []byte, err error) { + code = accountCodes[location] + return code, nil + }, + OnEmitEvent: func(event cadence.Event) error { + events = append(events, event) + return nil + }, + OnDecodeArgument: func(b []byte, t cadence.Type) (cadence.Value, error) { + return json.Decode(nil, b) + }, + OnHash: func( + data []byte, + tag string, + hashAlgorithm runtime.HashAlgorithm, + ) ([]byte, error) { + return crypto.Keccak256(data), nil + }, + } + + nextTransactionLocation := NewTransactionLocationGenerator() + nextScriptLocation := NewScriptLocationGenerator() + + // Deploy contracts + + deployContracts( + t, + rt, + contractsAddress, + runtimeInterface, + transactionEnvironment, + nextTransactionLocation, + ) + + // Run script + abiBytes := []byte{ + 0xf3, 0xfe, 0xf3, 0xa3, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, + 0x0, 0x0, 0x0, 0x7a, 0x58, 0xc0, 0xbe, 0x72, 0xbe, 0x21, 0x8b, 0x41, + 0xc6, 0x8, 0xb7, 0xfe, 0x7c, 0x5b, 0xb6, 0x30, 0x73, 0x6c, 0x71, 0x0, + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, + 0x0, 0x0, 0xfa, + } + cdcBytes := make([]cadence.Value, 0) + for _, bt := range abiBytes { + cdcBytes = append(cdcBytes, cadence.UInt8(bt)) + } + encodedABI := cadence.NewArray( + cdcBytes, + ).WithType(cadence.NewVariableSizedArrayType(cadence.UInt8Type)) + + _, err := rt.ExecuteScript( + runtime.Script{ + Source: script, + Arguments: EncodeArgs([]cadence.Value{ + encodedABI, + }), + }, + runtime.Context{ + Interface: runtimeInterface, + Environment: scriptEnvironment, + Location: nextScriptLocation(), + }, + ) + require.Error(t, err) + assert.ErrorContains(t, err, "EVM.decodeABIWithSignature(): Cannot decode! The signature does not match the provided data.") +} + +func TestEVMAddressConstructionAndReturn(t *testing.T) { + + t.Parallel() + + handler := &testContractHandler{} + + contractsAddress := flow.BytesToAddress([]byte{0x1}) + + transactionEnvironment := newEVMTransactionEnvironment(handler, contractsAddress) + scriptEnvironment := newEVMScriptEnvironment(handler, contractsAddress) + + rt := runtime.NewRuntime(runtime.Config{}) + + script := []byte(` + import EVM from 0x1 + + access(all) + fun main(_ bytes: [UInt8; 20]): EVM.EVMAddress { + return EVM.EVMAddress(bytes: bytes) + } + `) + + accountCodes := map[common.Location][]byte{} + var events []cadence.Event + + runtimeInterface := &TestRuntimeInterface{ + Storage: NewTestLedger(nil, nil), + OnGetSigningAccounts: func() ([]runtime.Address, error) { + return []runtime.Address{runtime.Address(contractsAddress)}, nil + }, + OnResolveLocation: newLocationResolver(contractsAddress), + OnUpdateAccountContractCode: func(location common.AddressLocation, code []byte) error { + accountCodes[location] = code + return nil + }, + OnGetAccountContractCode: func(location common.AddressLocation) (code []byte, err error) { + code = accountCodes[location] + return code, nil + }, + OnEmitEvent: func(event cadence.Event) error { + events = append(events, event) + return nil + }, + OnDecodeArgument: func(b []byte, t cadence.Type) (cadence.Value, error) { + return json.Decode(nil, b) + }, + } + + addressBytesArray := cadence.NewArray([]cadence.Value{ + cadence.UInt8(1), cadence.UInt8(1), + cadence.UInt8(2), cadence.UInt8(2), + cadence.UInt8(3), cadence.UInt8(3), + cadence.UInt8(4), cadence.UInt8(4), + cadence.UInt8(5), cadence.UInt8(5), + cadence.UInt8(6), cadence.UInt8(6), + cadence.UInt8(7), cadence.UInt8(7), + cadence.UInt8(8), cadence.UInt8(8), + cadence.UInt8(9), cadence.UInt8(9), + cadence.UInt8(10), cadence.UInt8(10), + }).WithType(stdlib.EVMAddressBytesCadenceType) + + nextTransactionLocation := NewTransactionLocationGenerator() + nextScriptLocation := NewScriptLocationGenerator() + + // Deploy contracts + + deployContracts( + t, + rt, + contractsAddress, + runtimeInterface, + transactionEnvironment, + nextTransactionLocation, + ) + + // Run script + + result, err := rt.ExecuteScript( + runtime.Script{ + Source: script, + Arguments: EncodeArgs([]cadence.Value{ + addressBytesArray, + }), + }, + runtime.Context{ + Interface: runtimeInterface, + Environment: scriptEnvironment, + Location: nextScriptLocation(), + }, + ) + require.NoError(t, err) + + evmAddressCadenceType := stdlib.NewEVMAddressCadenceType(common.Address(contractsAddress)) + + assert.Equal(t, + cadence.NewStruct([]cadence.Value{ + addressBytesArray, + }).WithType(evmAddressCadenceType), + result, + ) +} + +func TestEVMAddressSerializationAndDeserialization(t *testing.T) { + + t.Parallel() + + handler := &testContractHandler{} + + contractsAddress := flow.BytesToAddress([]byte{0x1}) + + transactionEnvironment := newEVMTransactionEnvironment(handler, contractsAddress) + scriptEnvironment := newEVMScriptEnvironment(handler, contractsAddress) + + rt := runtime.NewRuntime(runtime.Config{}) + + addressFromBytesScript := []byte(` + import EVM from 0x1 + + access(all) + fun main(_ bytes: [UInt8; 20]): EVM.EVMAddress { + return EVM.EVMAddress(bytes: bytes) + } + `) + + accountCodes := map[common.Location][]byte{} + var events []cadence.Event + + runtimeInterface := &TestRuntimeInterface{ + Storage: NewTestLedger(nil, nil), + OnGetSigningAccounts: func() ([]runtime.Address, error) { + return []runtime.Address{runtime.Address(contractsAddress)}, nil + }, + OnResolveLocation: newLocationResolver(contractsAddress), + OnUpdateAccountContractCode: func(location common.AddressLocation, code []byte) error { + accountCodes[location] = code + return nil + }, + OnGetAccountContractCode: func(location common.AddressLocation) (code []byte, err error) { + code = accountCodes[location] + return code, nil + }, + OnEmitEvent: func(event cadence.Event) error { + events = append(events, event) + return nil + }, + OnDecodeArgument: func(b []byte, t cadence.Type) (cadence.Value, error) { + return json.Decode(nil, b) + }, + } + + sourceBytes := []byte{ + 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, + 6, 6, 7, 7, 8, 8, 9, 9, 10, 10, + } + + // construct the address as a cadence value from sourceBytes + addressBytesArray := cadence.NewArray([]cadence.Value{ + cadence.UInt8(sourceBytes[0]), cadence.UInt8(sourceBytes[1]), + cadence.UInt8(sourceBytes[2]), cadence.UInt8(sourceBytes[3]), + cadence.UInt8(sourceBytes[4]), cadence.UInt8(sourceBytes[5]), + cadence.UInt8(sourceBytes[6]), cadence.UInt8(sourceBytes[7]), + cadence.UInt8(sourceBytes[8]), cadence.UInt8(sourceBytes[9]), + cadence.UInt8(sourceBytes[10]), cadence.UInt8(sourceBytes[11]), + cadence.UInt8(sourceBytes[12]), cadence.UInt8(sourceBytes[13]), + cadence.UInt8(sourceBytes[14]), cadence.UInt8(sourceBytes[15]), + cadence.UInt8(sourceBytes[16]), cadence.UInt8(sourceBytes[17]), + cadence.UInt8(sourceBytes[18]), cadence.UInt8(sourceBytes[19]), + }).WithType(stdlib.EVMAddressBytesCadenceType) + + nextTransactionLocation := NewTransactionLocationGenerator() + nextScriptLocation := NewScriptLocationGenerator() + + // Deploy contracts + + deployContracts( + t, + rt, + contractsAddress, + runtimeInterface, + transactionEnvironment, + nextTransactionLocation, + ) + + // Run script + + constructAddrResult, err := rt.ExecuteScript( + runtime.Script{ + Source: addressFromBytesScript, + Arguments: EncodeArgs([]cadence.Value{ + addressBytesArray, + }), + }, + runtime.Context{ + Interface: runtimeInterface, + Environment: scriptEnvironment, + Location: nextScriptLocation(), + }, + ) + require.NoError(t, err) + + evmAddressCadenceType := stdlib.NewEVMAddressCadenceType(common.Address(contractsAddress)) + evmAddress := cadence.NewStruct([]cadence.Value{ + addressBytesArray, + }).WithType(evmAddressCadenceType) + + assert.Equal(t, + evmAddress, + constructAddrResult, + ) + + // Attempt to serialize and deserialize the address + + addressSerializationScript := []byte(` + import EVM from 0x1 + + access(all) + fun main(address: EVM.EVMAddress): String { + return address.toString() + } + `) + + serializeAddrResult, err := rt.ExecuteScript( + runtime.Script{ + Source: addressSerializationScript, + Arguments: EncodeArgs([]cadence.Value{ + evmAddress, + }), + }, + runtime.Context{ + Interface: runtimeInterface, + Environment: scriptEnvironment, + Location: nextScriptLocation(), + }, + ) + + require.NoError(t, err) + + // Encode the sourceBytes array as a hex string as the expected value to compare the result against + + expectedHex, _ := cadence.NewString(hex.EncodeToString(sourceBytes)) + + assert.Equal(t, + expectedHex, + serializeAddrResult, + ) + + // Attempt to deserialize the address + + addressDeserializationScript := []byte(` + import EVM from 0x1 + + access(all) + fun main(hexString: String): EVM.EVMAddress { + return EVM.addressFromString(hexString) + } + `) + + deserializeAddrResult, err := rt.ExecuteScript( + runtime.Script{ + Source: addressDeserializationScript, + Arguments: EncodeArgs([]cadence.Value{ + serializeAddrResult, + }), + }, + runtime.Context{ + Interface: runtimeInterface, + Environment: scriptEnvironment, + Location: nextScriptLocation(), + }, + ) + + require.NoError(t, err) + + assert.Equal(t, + evmAddress, + deserializeAddrResult, + ) +} + +func TestBalanceConstructionAndReturn(t *testing.T) { + + t.Parallel() + + handler := &testContractHandler{} + + contractsAddress := flow.BytesToAddress([]byte{0x1}) + + transactionEnvironment := newEVMTransactionEnvironment(handler, contractsAddress) + scriptEnvironment := newEVMScriptEnvironment(handler, contractsAddress) + + rt := runtime.NewRuntime(runtime.Config{}) + + script := []byte(` + import EVM from 0x1 + + access(all) + fun main(_ attoflow: UInt): EVM.Balance { + return EVM.Balance(attoflow: attoflow) + } + `) + + accountCodes := map[common.Location][]byte{} + var events []cadence.Event + + runtimeInterface := &TestRuntimeInterface{ + Storage: NewTestLedger(nil, nil), + OnGetSigningAccounts: func() ([]runtime.Address, error) { + return []runtime.Address{runtime.Address(contractsAddress)}, nil + }, + OnResolveLocation: newLocationResolver(contractsAddress), + OnUpdateAccountContractCode: func(location common.AddressLocation, code []byte) error { + accountCodes[location] = code + return nil + }, + OnGetAccountContractCode: func(location common.AddressLocation) (code []byte, err error) { + code = accountCodes[location] + return code, nil + }, + OnEmitEvent: func(event cadence.Event) error { + events = append(events, event) + return nil + }, + OnDecodeArgument: func(b []byte, t cadence.Type) (cadence.Value, error) { + return json.Decode(nil, b) + }, + } + + nextTransactionLocation := NewTransactionLocationGenerator() + nextScriptLocation := NewScriptLocationGenerator() + + // Deploy contracts + + deployContracts( + t, + rt, + contractsAddress, + runtimeInterface, + transactionEnvironment, + nextTransactionLocation, + ) + + // Run script + + flowValue := cadence.NewUInt(1230000000000000000) + + result, err := rt.ExecuteScript( + runtime.Script{ + Source: script, + Arguments: EncodeArgs([]cadence.Value{ + flowValue, + }), + }, + runtime.Context{ + Interface: runtimeInterface, + Environment: scriptEnvironment, + Location: nextScriptLocation(), + }, + ) + require.NoError(t, err) + + evmBalanceCadenceType := stdlib.NewBalanceCadenceType(common.Address(contractsAddress)) + + assert.Equal(t, + cadence.NewStruct([]cadence.Value{ + flowValue, + }).WithType(evmBalanceCadenceType), + result, + ) +} + +func TestEVMRun(t *testing.T) { + + t.Parallel() + + evmTx := cadence.NewArray([]cadence.Value{ + cadence.UInt8(1), + cadence.UInt8(2), + cadence.UInt8(3), + }).WithType(stdlib.EVMTransactionBytesCadenceType) + + coinbase := cadence.NewArray([]cadence.Value{ + cadence.UInt8(1), cadence.UInt8(1), + cadence.UInt8(2), cadence.UInt8(2), + cadence.UInt8(3), cadence.UInt8(3), + cadence.UInt8(4), cadence.UInt8(4), + cadence.UInt8(5), cadence.UInt8(5), + cadence.UInt8(6), cadence.UInt8(6), + cadence.UInt8(7), cadence.UInt8(7), + cadence.UInt8(8), cadence.UInt8(8), + cadence.UInt8(9), cadence.UInt8(9), + cadence.UInt8(10), cadence.UInt8(10), + }).WithType(stdlib.EVMAddressBytesCadenceType) + + runCalled := false + + contractsAddress := flow.BytesToAddress([]byte{0x1}) + handler := &testContractHandler{ + evmContractAddress: common.Address(contractsAddress), + run: func(tx []byte, coinbase types.Address) *types.ResultSummary { + runCalled = true + + assert.Equal(t, []byte{1, 2, 3}, tx) + assert.Equal(t, + types.Address{ + 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9, 10, 10, + }, + coinbase, + ) + return &types.ResultSummary{ + Status: types.StatusSuccessful, + } + }, + } + + transactionEnvironment := newEVMTransactionEnvironment(handler, contractsAddress) + scriptEnvironment := newEVMScriptEnvironment(handler, contractsAddress) + + rt := runtime.NewRuntime(runtime.Config{}) + + script := []byte(` + import EVM from 0x1 + + access(all) + fun main(tx: [UInt8], coinbaseBytes: [UInt8; 20]): UInt8 { + let coinbase = EVM.EVMAddress(bytes: coinbaseBytes) + let res = EVM.run(tx: tx, coinbase: coinbase) + let st = res.status + return st.rawValue + } + `) + + accountCodes := map[common.Location][]byte{} + var events []cadence.Event + + runtimeInterface := &TestRuntimeInterface{ + Storage: NewTestLedger(nil, nil), + OnGetSigningAccounts: func() ([]runtime.Address, error) { + return []runtime.Address{runtime.Address(contractsAddress)}, nil + }, + OnResolveLocation: newLocationResolver(contractsAddress), + OnUpdateAccountContractCode: func(location common.AddressLocation, code []byte) error { + accountCodes[location] = code + return nil + }, + OnGetAccountContractCode: func(location common.AddressLocation) (code []byte, err error) { + code = accountCodes[location] + return code, nil + }, + OnEmitEvent: func(event cadence.Event) error { + events = append(events, event) + return nil + }, + OnDecodeArgument: func(b []byte, t cadence.Type) (cadence.Value, error) { + return json.Decode(nil, b) + }, + } + + nextTransactionLocation := NewTransactionLocationGenerator() + nextScriptLocation := NewScriptLocationGenerator() + + // Deploy contracts + + deployContracts( + t, + rt, + contractsAddress, + runtimeInterface, + transactionEnvironment, + nextTransactionLocation, + ) + + // Run script + + val, err := rt.ExecuteScript( + runtime.Script{ + Source: script, + Arguments: EncodeArgs([]cadence.Value{evmTx, coinbase}), + }, + runtime.Context{ + Interface: runtimeInterface, + Environment: scriptEnvironment, + Location: nextScriptLocation(), + }, + ) + require.NoError(t, err) + + assert.Equal(t, types.StatusSuccessful, types.Status(val.(cadence.UInt8))) + assert.True(t, runCalled) + + // test must run + script = []byte(` + import EVM from 0x1 + + access(all) + fun main(tx: [UInt8], coinbaseBytes: [UInt8; 20]): UInt8 { + let coinbase = EVM.EVMAddress(bytes: coinbaseBytes) + let res = EVM.mustRun(tx: tx, coinbase: coinbase) + let st = res.status + return st.rawValue + } + `) + val, err = rt.ExecuteScript( + runtime.Script{ + Source: script, + Arguments: EncodeArgs([]cadence.Value{evmTx, coinbase}), + }, + runtime.Context{ + Interface: runtimeInterface, + Environment: scriptEnvironment, + Location: nextScriptLocation(), + }, + ) + require.NoError(t, err) + + assert.Equal(t, types.StatusSuccessful, types.Status(val.(cadence.UInt8))) + assert.True(t, runCalled) +} + +func TestEVMDryRun(t *testing.T) { + + t.Parallel() + + dryRunCalled := false + evmTx := cadence.NewArray([]cadence.Value{ + cadence.UInt8(1), + cadence.UInt8(2), + cadence.UInt8(3), + }).WithType(stdlib.EVMTransactionBytesCadenceType) + + contractsAddress := flow.BytesToAddress([]byte{0x1}) + handler := &testContractHandler{ + evmContractAddress: common.Address(contractsAddress), + dryRun: func(tx []byte, from types.Address) *types.ResultSummary { + dryRunCalled = true + assert.Equal(t, types.Address{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19}, from) + assert.Equal(t, tx, []byte{1, 2, 3}) + + return &types.ResultSummary{ + Status: types.StatusSuccessful, + } + }, + } + + transactionEnvironment := newEVMTransactionEnvironment(handler, contractsAddress) + scriptEnvironment := newEVMScriptEnvironment(handler, contractsAddress) + + rt := runtime.NewRuntime(runtime.Config{}) + + accountCodes := map[common.Location][]byte{} + var events []cadence.Event + + runtimeInterface := &TestRuntimeInterface{ + Storage: NewTestLedger(nil, nil), + OnGetSigningAccounts: func() ([]runtime.Address, error) { + return []runtime.Address{runtime.Address(contractsAddress)}, nil + }, + OnResolveLocation: newLocationResolver(contractsAddress), + OnUpdateAccountContractCode: func(location common.AddressLocation, code []byte) error { + accountCodes[location] = code + return nil + }, + OnGetAccountContractCode: func(location common.AddressLocation) (code []byte, err error) { + code = accountCodes[location] + return code, nil + }, + OnEmitEvent: func(event cadence.Event) error { + events = append(events, event) + return nil + }, + OnDecodeArgument: func(b []byte, t cadence.Type) (cadence.Value, error) { + return json.Decode(nil, b) + }, + } + + nextTransactionLocation := NewTransactionLocationGenerator() + nextScriptLocation := NewScriptLocationGenerator() + + // Deploy contracts + + deployContracts( + t, + rt, + contractsAddress, + runtimeInterface, + transactionEnvironment, + nextTransactionLocation, + ) + + // Run script + + script := []byte(` + import EVM from 0x1 + + access(all) + fun main(tx: [UInt8]): EVM.Result { + return EVM.dryRun( + tx: tx, + from: EVM.EVMAddress(bytes: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19]), // random address + ) + } + `) + + val, err := rt.ExecuteScript( + runtime.Script{ + Source: script, + Arguments: EncodeArgs([]cadence.Value{evmTx}), + }, + runtime.Context{ + Interface: runtimeInterface, + Environment: scriptEnvironment, + Location: nextScriptLocation(), + }, + ) + require.NoError(t, err) + res, err := impl.ResultSummaryFromEVMResultValue(val) + require.NoError(t, err) + assert.Equal(t, types.StatusSuccessful, res.Status) + assert.True(t, dryRunCalled) +} + +func TestEVMDryCall(t *testing.T) { + + t.Parallel() + + dryCallCalled := false + + contractsAddress := flow.BytesToAddress([]byte{0x1}) + handler := &testContractHandler{ + evmContractAddress: common.Address(contractsAddress), + dryRun: func(tx []byte, from types.Address) *types.ResultSummary { + dryCallCalled = true + gethTx := &gethTypes.Transaction{} + if err := gethTx.UnmarshalBinary(tx); err != nil { + require.Fail(t, err.Error()) + } + + require.NotNil(t, gethTx.To()) + + assert.Equal( + t, + types.Address{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 10, 0, 0, 0, 0, 0, 0, 0, 0, 10}, + from, + ) + assert.Equal( + t, + types.Address{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 10, 0, 0, 0, 0, 0, 0, 0, 0, 15}, + types.NewAddress(*gethTx.To()), + ) + assert.Equal(t, []byte{255, 107, 204, 122}, gethTx.Data()) + assert.Equal(t, uint64(33_000), gethTx.Gas()) + assert.Equal(t, big.NewInt(150), gethTx.Value()) + + return &types.ResultSummary{ + Status: types.StatusSuccessful, + } + }, + } + + transactionEnvironment := newEVMTransactionEnvironment(handler, contractsAddress) + scriptEnvironment := newEVMScriptEnvironment(handler, contractsAddress) + + rt := runtime.NewRuntime(runtime.Config{}) + + accountCodes := map[common.Location][]byte{} + var events []cadence.Event + + runtimeInterface := &TestRuntimeInterface{ + Storage: NewTestLedger(nil, nil), + OnGetSigningAccounts: func() ([]runtime.Address, error) { + return []runtime.Address{runtime.Address(contractsAddress)}, nil + }, + OnResolveLocation: newLocationResolver(contractsAddress), + OnUpdateAccountContractCode: func(location common.AddressLocation, code []byte) error { + accountCodes[location] = code + return nil + }, + OnGetAccountContractCode: func(location common.AddressLocation) (code []byte, err error) { + code = accountCodes[location] + return code, nil + }, + OnEmitEvent: func(event cadence.Event) error { + events = append(events, event) + return nil + }, + OnDecodeArgument: func(b []byte, t cadence.Type) (cadence.Value, error) { + return json.Decode(nil, b) + }, + } + + nextTransactionLocation := NewTransactionLocationGenerator() + nextScriptLocation := NewScriptLocationGenerator() + + // Deploy contracts + + deployContracts( + t, + rt, + contractsAddress, + runtimeInterface, + transactionEnvironment, + nextTransactionLocation, + ) + + // Run script + + script := []byte(` + import EVM from 0x1 + + access(all) + fun main(): EVM.Result { + return EVM.dryCall( + from: EVM.EVMAddress(bytes: [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 10, 0, 0, 0, 0, 0, 0, 0, 0, 10]), + to: EVM.EVMAddress(bytes: [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 10, 0, 0, 0, 0, 0, 0, 0, 0, 15]), + data: [255, 107, 204, 122], + gasLimit: 33000, + value: EVM.Balance(attoflow: 150) + ) + } + `) + + val, err := rt.ExecuteScript( + runtime.Script{ + Source: script, + Arguments: nil, + }, + runtime.Context{ + Interface: runtimeInterface, + Environment: scriptEnvironment, + Location: nextScriptLocation(), + }, + ) + require.NoError(t, err) + res, err := impl.ResultSummaryFromEVMResultValue(val) + require.NoError(t, err) + assert.Equal(t, types.StatusSuccessful, res.Status) + assert.True(t, dryCallCalled) +} + +func TestEVMBatchRun(t *testing.T) { + + t.Parallel() + + evmTxs := cadence.NewArray([]cadence.Value{ + cadence.NewArray([]cadence.Value{cadence.UInt8(1), cadence.UInt8(2), cadence.UInt8(3)}), + cadence.NewArray([]cadence.Value{cadence.UInt8(4), cadence.UInt8(5), cadence.UInt8(6)}), + cadence.NewArray([]cadence.Value{cadence.UInt8(7), cadence.UInt8(8), cadence.UInt8(9)}), + }).WithType(cadence.NewVariableSizedArrayType(cadence.NewVariableSizedArrayType(cadence.UInt8Type))) + + coinbase := cadence.NewArray([]cadence.Value{ + cadence.UInt8(1), cadence.UInt8(1), + cadence.UInt8(2), cadence.UInt8(2), + cadence.UInt8(3), cadence.UInt8(3), + cadence.UInt8(4), cadence.UInt8(4), + cadence.UInt8(5), cadence.UInt8(5), + cadence.UInt8(6), cadence.UInt8(6), + cadence.UInt8(7), cadence.UInt8(7), + cadence.UInt8(8), cadence.UInt8(8), + cadence.UInt8(9), cadence.UInt8(9), + cadence.UInt8(10), cadence.UInt8(10), + }).WithType(stdlib.EVMAddressBytesCadenceType) + + runCalled := false + + contractsAddress := flow.BytesToAddress([]byte{0x1}) + handler := &testContractHandler{ + evmContractAddress: common.Address(contractsAddress), + batchRun: func(txs [][]byte, coinbase types.Address) []*types.ResultSummary { + runCalled = true + + assert.EqualValues(t, [][]byte{ + {1, 2, 3}, {4, 5, 6}, {7, 8, 9}, + }, txs) + assert.Equal(t, + types.Address{ + 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9, 10, 10, + }, + coinbase, + ) + + results := make([]*types.ResultSummary, 3) + for i := range results { + results[i] = &types.ResultSummary{ + Status: types.StatusSuccessful, + } + } + + return results + }, + } + + transactionEnvironment := newEVMTransactionEnvironment(handler, contractsAddress) + scriptEnvironment := newEVMScriptEnvironment(handler, contractsAddress) + + rt := runtime.NewRuntime(runtime.Config{}) + + script := []byte(` + import EVM from 0x1 + + access(all) + fun main(txs: [[UInt8]], coinbaseBytes: [UInt8; 20]): [EVM.Result] { + let coinbase = EVM.EVMAddress(bytes: coinbaseBytes) + return EVM.batchRun(txs: txs, coinbase: coinbase) + } + `) + + accountCodes := map[common.Location][]byte{} + var events []cadence.Event + + runtimeInterface := &TestRuntimeInterface{ + Storage: NewTestLedger(nil, nil), + OnGetSigningAccounts: func() ([]runtime.Address, error) { + return []runtime.Address{runtime.Address(contractsAddress)}, nil + }, + OnResolveLocation: newLocationResolver(contractsAddress), + OnUpdateAccountContractCode: func(location common.AddressLocation, code []byte) error { + accountCodes[location] = code + return nil + }, + OnGetAccountContractCode: func(location common.AddressLocation) (code []byte, err error) { + code = accountCodes[location] + return code, nil + }, + OnEmitEvent: func(event cadence.Event) error { + events = append(events, event) + return nil + }, + OnDecodeArgument: func(b []byte, t cadence.Type) (cadence.Value, error) { + return json.Decode(nil, b) + }, + } + + nextTransactionLocation := NewTransactionLocationGenerator() + nextScriptLocation := NewScriptLocationGenerator() + + // Deploy contracts + + deployContracts( + t, + rt, + contractsAddress, + runtimeInterface, + transactionEnvironment, + nextTransactionLocation, + ) + + // Run script + + val, err := rt.ExecuteScript( + runtime.Script{ + Source: script, + Arguments: EncodeArgs([]cadence.Value{evmTxs, coinbase}), + }, + runtime.Context{ + Interface: runtimeInterface, + Environment: scriptEnvironment, + Location: nextScriptLocation(), + }, + ) + require.NoError(t, err) + + resultsCadence, ok := val.(cadence.Array) + require.True(t, ok) + + for _, v := range resultsCadence.Values { + res, err := impl.ResultSummaryFromEVMResultValue(v) + require.NoError(t, err) + assert.Equal(t, types.StatusSuccessful, res.Status) + } + assert.True(t, runCalled) +} + +func TestEVMCreateCadenceOwnedAccount(t *testing.T) { + + t.Parallel() + + uuidCounter := uint64(0) + handler := &testContractHandler{ + deployCOA: func(uuid uint64) types.Address { + require.Equal(t, uuidCounter, uuid) + return types.Address{uint8(uuidCounter)} + }, + } + + contractsAddress := flow.BytesToAddress([]byte{0x1}) + + transactionEnvironment := newEVMTransactionEnvironment(handler, contractsAddress) + scriptEnvironment := newEVMScriptEnvironment(handler, contractsAddress) + rt := runtime.NewRuntime(runtime.Config{}) + + script := []byte(` + import EVM from 0x1 + + access(all) + fun main(): [UInt8; 20] { + let cadenceOwnedAccount1 <- EVM.createCadenceOwnedAccount() + destroy cadenceOwnedAccount1 + + let cadenceOwnedAccount2 <- EVM.createCadenceOwnedAccount() + let bytes = cadenceOwnedAccount2.address().bytes + destroy cadenceOwnedAccount2 + + return bytes + } + `) + + accountCodes := map[common.Location][]byte{} + var events []cadence.Event + + runtimeInterface := &TestRuntimeInterface{ + Storage: NewTestLedger(nil, nil), + OnGetSigningAccounts: func() ([]runtime.Address, error) { + return []runtime.Address{runtime.Address(contractsAddress)}, nil + }, + OnResolveLocation: newLocationResolver(contractsAddress), + OnUpdateAccountContractCode: func(location common.AddressLocation, code []byte) error { + accountCodes[location] = code + return nil + }, + OnGetAccountContractCode: func(location common.AddressLocation) (code []byte, err error) { + code = accountCodes[location] + return code, nil + }, + OnEmitEvent: func(event cadence.Event) error { + events = append(events, event) + return nil + }, + OnDecodeArgument: func(b []byte, t cadence.Type) (cadence.Value, error) { + return json.Decode(nil, b) + }, + OnGenerateUUID: func() (uint64, error) { + uuidCounter++ + return uuidCounter, nil + }, + } + + nextTransactionLocation := NewTransactionLocationGenerator() + nextScriptLocation := NewScriptLocationGenerator() + + // Deploy contracts + + deployContracts( + t, + rt, + contractsAddress, + runtimeInterface, + transactionEnvironment, + nextTransactionLocation, + ) + + // reset events + events = make([]cadence.Event, 0) + + // Run script + actual, err := rt.ExecuteScript( + runtime.Script{ + Source: script, + }, + runtime.Context{ + Interface: runtimeInterface, + Environment: scriptEnvironment, + Location: nextScriptLocation(), + }, + ) + require.NoError(t, err) + + expected := cadence.NewArray([]cadence.Value{ + cadence.UInt8(5), cadence.UInt8(0), + cadence.UInt8(0), cadence.UInt8(0), + cadence.UInt8(0), cadence.UInt8(0), + cadence.UInt8(0), cadence.UInt8(0), + cadence.UInt8(0), cadence.UInt8(0), + cadence.UInt8(0), cadence.UInt8(0), + cadence.UInt8(0), cadence.UInt8(0), + cadence.UInt8(0), cadence.UInt8(0), + cadence.UInt8(0), cadence.UInt8(0), + cadence.UInt8(0), cadence.UInt8(0), + }).WithType(cadence.NewConstantSizedArrayType( + types.AddressLength, + cadence.UInt8Type, + )) + + require.Equal(t, expected, actual) + + // check deposit event + expectedEventTypes := []string{ + "EVM.CadenceOwnedAccountCreated", + "EVM.CadenceOwnedAccountCreated", + } + CheckCadenceEventTypes(t, events, expectedEventTypes) + + // check cadence owned account created events + expectedCoaAddress := types.Address{4} + requireEqualEventAddress(t, events[0], expectedCoaAddress) + + expectedCoaAddress = types.Address{5} + requireEqualEventAddress(t, events[1], expectedCoaAddress) +} + +func TestCadenceOwnedAccountCall(t *testing.T) { + + t.Parallel() + + expectedBalance, err := cadence.NewUFix64FromParts(1, 23000000) + require.NoError(t, err) + + contractsAddress := flow.BytesToAddress([]byte{0x1}) + + handler := &testContractHandler{ + evmContractAddress: common.Address(contractsAddress), + accountByAddress: func(fromAddress types.Address, isAuthorized bool) types.Account { + assert.Equal(t, types.Address{4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, fromAddress) + assert.True(t, isAuthorized) + + return &testFlowAccount{ + address: fromAddress, + call: func( + toAddress types.Address, + data types.Data, + limit types.GasLimit, + balance types.Balance, + ) *types.ResultSummary { + assert.Equal(t, types.Address{4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, toAddress) + assert.Equal(t, types.Data{4, 5, 6}, data) + assert.Equal(t, types.GasLimit(9999), limit) + assert.Equal(t, types.NewBalanceFromUFix64(expectedBalance), balance) + + return &types.ResultSummary{ + Status: types.StatusSuccessful, + ReturnedData: types.Data{3, 1, 4}, + } + }, + } + }, + } + + transactionEnvironment := newEVMTransactionEnvironment(handler, contractsAddress) + scriptEnvironment := newEVMScriptEnvironment(handler, contractsAddress) + + rt := runtime.NewRuntime(runtime.Config{}) + + script := []byte(` + import EVM from 0x1 + + access(all) + fun main(): [UInt8] { + let cadenceOwnedAccount <- EVM.createCadenceOwnedAccount() + let bal = EVM.Balance(attoflow: 0) + bal.setFLOW(flow: 1.23) + let response = cadenceOwnedAccount.call( + to: EVM.EVMAddress( + bytes: [4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] + ), + data: [4, 5, 6], + gasLimit: 9999, + value: bal + ) + destroy cadenceOwnedAccount + return response.data + } + `) + + accountCodes := map[common.Location][]byte{} + var events []cadence.Event + + runtimeInterface := &TestRuntimeInterface{ + Storage: NewTestLedger(nil, nil), + OnGetSigningAccounts: func() ([]runtime.Address, error) { + return []runtime.Address{runtime.Address(contractsAddress)}, nil + }, + OnResolveLocation: newLocationResolver(contractsAddress), + OnUpdateAccountContractCode: func(location common.AddressLocation, code []byte) error { + accountCodes[location] = code + return nil + }, + OnGetAccountContractCode: func(location common.AddressLocation) (code []byte, err error) { + code = accountCodes[location] + return code, nil + }, + OnEmitEvent: func(event cadence.Event) error { + events = append(events, event) + return nil + }, + OnDecodeArgument: func(b []byte, t cadence.Type) (cadence.Value, error) { + return json.Decode(nil, b) + }, + } + + nextTransactionLocation := NewTransactionLocationGenerator() + nextScriptLocation := NewScriptLocationGenerator() + + // Deploy contracts + + deployContracts( + t, + rt, + contractsAddress, + runtimeInterface, + transactionEnvironment, + nextTransactionLocation, + ) + + // Run script + + actual, err := rt.ExecuteScript( + runtime.Script{ + Source: script, + }, + runtime.Context{ + Interface: runtimeInterface, + Environment: scriptEnvironment, + Location: nextScriptLocation(), + }, + ) + require.NoError(t, err) + + expected := cadence.NewArray([]cadence.Value{ + cadence.UInt8(3), + cadence.UInt8(1), + cadence.UInt8(4), + }).WithType(cadence.NewVariableSizedArrayType(cadence.UInt8Type)) + + require.Equal(t, expected, actual) +} + +func TestCadenceOwnedAccountDryCall(t *testing.T) { + + t.Parallel() + + dryCallCalled := false + + contractsAddress := flow.BytesToAddress([]byte{0x1}) + + handler := &testContractHandler{ + evmContractAddress: common.Address(contractsAddress), + dryRun: func(tx []byte, from types.Address) *types.ResultSummary { + dryCallCalled = true + gethTx := &gethTypes.Transaction{} + if err := gethTx.UnmarshalBinary(tx); err != nil { + require.Fail(t, err.Error()) + } + + require.NotNil(t, gethTx.To()) + + assert.Equal( + t, + types.Address{4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + from, + ) + assert.Equal( + t, + types.Address{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 10, 0, 0, 0, 0, 0, 0, 0, 0, 15}, + types.NewAddress(*gethTx.To()), + ) + assert.Equal(t, []byte{4, 5, 6}, gethTx.Data()) + assert.Equal(t, uint64(33_000), gethTx.Gas()) + assert.Equal(t, big.NewInt(1230000000000000000), gethTx.Value()) + + return &types.ResultSummary{ + Status: types.StatusSuccessful, + ReturnedData: []byte{3, 1, 4}, + } + }, + } + + transactionEnvironment := newEVMTransactionEnvironment(handler, contractsAddress) + scriptEnvironment := newEVMScriptEnvironment(handler, contractsAddress) + + rt := runtime.NewRuntime(runtime.Config{}) + + script := []byte(` + import EVM from 0x1 + + access(all) + fun main(): [UInt8] { + let cadenceOwnedAccount <- EVM.createCadenceOwnedAccount() + let bal = EVM.Balance(attoflow: 0) + bal.setFLOW(flow: 1.23) + let response = cadenceOwnedAccount.dryCall( + to: EVM.EVMAddress( + bytes: [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 10, 0, 0, 0, 0, 0, 0, 0, 0, 15] + ), + data: [4, 5, 6], + gasLimit: 33000, + value: bal + ) + destroy cadenceOwnedAccount + return response.data + } + `) + + accountCodes := map[common.Location][]byte{} + var events []cadence.Event + + runtimeInterface := &TestRuntimeInterface{ + Storage: NewTestLedger(nil, nil), + OnGetSigningAccounts: func() ([]runtime.Address, error) { + return []runtime.Address{runtime.Address(contractsAddress)}, nil + }, + OnResolveLocation: newLocationResolver(contractsAddress), + OnUpdateAccountContractCode: func(location common.AddressLocation, code []byte) error { + accountCodes[location] = code + return nil + }, + OnGetAccountContractCode: func(location common.AddressLocation) (code []byte, err error) { + code = accountCodes[location] + return code, nil + }, + OnEmitEvent: func(event cadence.Event) error { + events = append(events, event) + return nil + }, + OnDecodeArgument: func(b []byte, t cadence.Type) (cadence.Value, error) { + return json.Decode(nil, b) + }, + } + + nextTransactionLocation := NewTransactionLocationGenerator() + nextScriptLocation := NewScriptLocationGenerator() + + // Deploy contracts + + deployContracts( + t, + rt, + contractsAddress, + runtimeInterface, + transactionEnvironment, + nextTransactionLocation, + ) + + // Run script + + actual, err := rt.ExecuteScript( + runtime.Script{ + Source: script, + }, + runtime.Context{ + Interface: runtimeInterface, + Environment: scriptEnvironment, + Location: nextScriptLocation(), + }, + ) + require.NoError(t, err) + + expected := cadence.NewArray([]cadence.Value{ + cadence.UInt8(3), + cadence.UInt8(1), + cadence.UInt8(4), + }).WithType(cadence.NewVariableSizedArrayType(cadence.UInt8Type)) + + require.Equal(t, expected, actual) + require.True(t, dryCallCalled) +} + +func TestEVMAddressDeposit(t *testing.T) { + + t.Parallel() + + expectedBalanceInUFix64, err := cadence.NewUFix64FromParts(1, 23000000) + require.NoError(t, err) + expectedBalance := types.NewBalanceFromUFix64(expectedBalanceInUFix64) + + var deposited bool + + handler := &testContractHandler{ + + accountByAddress: func(fromAddress types.Address, isAuthorized bool) types.Account { + assert.Equal(t, types.Address{4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, fromAddress) + assert.False(t, isAuthorized) + + return &testFlowAccount{ + address: fromAddress, + deposit: func(vault *types.FLOWTokenVault) { + deposited = true + assert.Equal( + t, + expectedBalance, + vault.Balance(), + ) + }, + } + }, + } + + contractsAddress := flow.BytesToAddress([]byte{0x1}) + + transactionEnvironment := newEVMTransactionEnvironment(handler, contractsAddress) + scriptEnvironment := newEVMScriptEnvironment(handler, contractsAddress) + + rt := runtime.NewRuntime(runtime.Config{}) + + script := []byte(` + import EVM from 0x1 + import FlowToken from 0x1 + + access(all) + fun main() { + let admin = getAuthAccount<auth(Storage) &Account>(0x1) + .storage.borrow<&FlowToken.Administrator>(from: /storage/flowTokenAdmin)! + let minter <- admin.createNewMinter(allowedAmount: 1.23) + let vault <- minter.mintTokens(amount: 1.23) + destroy minter + + let address = EVM.EVMAddress( + bytes: [4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] + ) + address.deposit(from: <-vault) + } + `) + + accountCodes := map[common.Location][]byte{} + var events []cadence.Event + + runtimeInterface := &TestRuntimeInterface{ + Storage: NewTestLedger(nil, nil), + OnGetSigningAccounts: func() ([]runtime.Address, error) { + return []runtime.Address{runtime.Address(contractsAddress)}, nil + }, + OnResolveLocation: newLocationResolver(contractsAddress), + OnUpdateAccountContractCode: func(location common.AddressLocation, code []byte) error { + accountCodes[location] = code + return nil + }, + OnGetAccountContractCode: func(location common.AddressLocation) (code []byte, err error) { + code = accountCodes[location] + return code, nil + }, + OnEmitEvent: func(event cadence.Event) error { + events = append(events, event) + return nil + }, + OnDecodeArgument: func(b []byte, t cadence.Type) (cadence.Value, error) { + return json.Decode(nil, b) + }, + } + + nextTransactionLocation := NewTransactionLocationGenerator() + nextScriptLocation := NewScriptLocationGenerator() + + // Deploy contracts + + deployContracts( + t, + rt, + contractsAddress, + runtimeInterface, + transactionEnvironment, + nextTransactionLocation, + ) + + // Run script + + _, err = rt.ExecuteScript( + runtime.Script{ + Source: script, + }, + runtime.Context{ + Interface: runtimeInterface, + Environment: scriptEnvironment, + Location: nextScriptLocation(), + }, + ) + require.NoError(t, err) + + require.True(t, deposited) +} + +func TestCOADeposit(t *testing.T) { + + t.Parallel() + + expectedBalance, err := cadence.NewUFix64FromParts(1, 23000000) + require.NoError(t, err) + + var deposited bool + + var expectedCoaAddress = types.Address{6, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} + + handler := &testContractHandler{ + + accountByAddress: func(fromAddress types.Address, isAuthorized bool) types.Account { + assert.Equal(t, expectedCoaAddress, fromAddress) + assert.False(t, isAuthorized) + + return &testFlowAccount{ + address: fromAddress, + deposit: func(vault *types.FLOWTokenVault) { + deposited = true + assert.Equal( + t, + types.NewBalanceFromUFix64(expectedBalance), + vault.Balance(), + ) + }, + } + }, + } + + contractsAddress := flow.BytesToAddress([]byte{0x1}) + + transactionEnvironment := newEVMTransactionEnvironment(handler, contractsAddress) + scriptEnvironment := newEVMScriptEnvironment(handler, contractsAddress) + + rt := runtime.NewRuntime(runtime.Config{}) + + script := []byte(` + import EVM from 0x1 + import FlowToken from 0x1 + + access(all) + fun main() { + let admin = getAuthAccount<auth(BorrowValue) &Account>(0x1) + .storage.borrow<&FlowToken.Administrator>(from: /storage/flowTokenAdmin)! + let minter <- admin.createNewMinter(allowedAmount: 1.23) + let vault <- minter.mintTokens(amount: 1.23) + destroy minter + + let cadenceOwnedAccount <- EVM.createCadenceOwnedAccount() + cadenceOwnedAccount.deposit(from: <-vault) + destroy cadenceOwnedAccount + } + `) + + accountCodes := map[common.Location][]byte{} + var events []cadence.Event + + runtimeInterface := &TestRuntimeInterface{ + Storage: NewTestLedger(nil, nil), + OnGetSigningAccounts: func() ([]runtime.Address, error) { + return []runtime.Address{runtime.Address(contractsAddress)}, nil + }, + OnResolveLocation: newLocationResolver(contractsAddress), + OnUpdateAccountContractCode: func(location common.AddressLocation, code []byte) error { + accountCodes[location] = code + return nil + }, + OnGetAccountContractCode: func(location common.AddressLocation) (code []byte, err error) { + code = accountCodes[location] + return code, nil + }, + OnEmitEvent: func(event cadence.Event) error { + events = append(events, event) + return nil + }, + OnDecodeArgument: func(b []byte, t cadence.Type) (cadence.Value, error) { + return json.Decode(nil, b) + }, + } + + nextTransactionLocation := NewTransactionLocationGenerator() + nextScriptLocation := NewScriptLocationGenerator() + + // Deploy contracts + + deployContracts( + t, + rt, + contractsAddress, + runtimeInterface, + transactionEnvironment, + nextTransactionLocation, + ) + + // Run script + + // reset events before script execution + events = make([]cadence.Event, 0) + + _, err = rt.ExecuteScript( + runtime.Script{ + Source: script, + }, + runtime.Context{ + Interface: runtimeInterface, + Environment: scriptEnvironment, + Location: nextScriptLocation(), + }, + ) + require.NoError(t, err) + + require.True(t, deposited) + + // check deposit event + expectedEventTypes := []string{ + "FlowToken.MinterCreated", + "FlowToken.TokensMinted", + "EVM.CadenceOwnedAccountCreated", + "EVM.FLOWTokensDeposited", + } + CheckCadenceEventTypes(t, events, expectedEventTypes) + + // token deposit event + tokenDepositEvent := events[3] + tokenDepositEventFields := cadence.FieldsMappedByName(tokenDepositEvent) + + requireEqualEventAddress(t, tokenDepositEvent, expectedCoaAddress) + + // check amount + require.Equal(t, + expectedBalance, + tokenDepositEventFields["amount"], + ) + + // check depositedUUID, based on the transaction content + // its expected the uuid of 4 be allocated to the source vault. + expectedDepositedUUID := cadence.UInt64(5) + require.Equal(t, + expectedDepositedUUID, + tokenDepositEventFields["depositedUUID"], + ) +} + +func TestCadenceOwnedAccountWithdraw(t *testing.T) { + + t.Parallel() + + expectedDepositBalance, err := cadence.NewUFix64FromParts(2, 34000000) + require.NoError(t, err) + + expectedWithdrawBalance, err := cadence.NewUFix64FromParts(1, 23000000) + require.NoError(t, err) + + var deposited bool + var withdrew bool + + contractsAddress := flow.BytesToAddress([]byte{0x1}) + + var nextUUID uint64 = 1 + + var expectedCoaAddress = types.Address{6, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} + + handler := &testContractHandler{ + flowTokenAddress: common.Address(contractsAddress), + accountByAddress: func(fromAddress types.Address, isAuthorized bool) types.Account { + assert.Equal(t, expectedCoaAddress, fromAddress) + + return &testFlowAccount{ + address: fromAddress, + deposit: func(vault *types.FLOWTokenVault) { + deposited = true + assert.Equal(t, + types.NewBalanceFromUFix64(expectedDepositBalance), + vault.Balance(), + ) + }, + withdraw: func(balance types.Balance) *types.FLOWTokenVault { + assert.Equal(t, + types.NewBalanceFromUFix64(expectedWithdrawBalance), + balance, + ) + withdrew = true + return types.NewFlowTokenVault(balance) + }, + } + }, + generateResourceUUID: func() uint64 { + uuid := nextUUID + nextUUID++ + return uuid + }, + } + + transactionEnvironment := newEVMTransactionEnvironment(handler, contractsAddress) + scriptEnvironment := newEVMScriptEnvironment(handler, contractsAddress) + + rt := runtime.NewRuntime(runtime.Config{}) + + script := []byte(` + import EVM from 0x1 + import FlowToken from 0x1 + + access(all) + fun main(): UFix64 { + let admin = getAuthAccount<auth(BorrowValue) &Account>(0x1) + .storage.borrow<&FlowToken.Administrator>(from: /storage/flowTokenAdmin)! + let minter <- admin.createNewMinter(allowedAmount: 2.34) + let vault <- minter.mintTokens(amount: 2.34) + destroy minter + + let cadenceOwnedAccount <- EVM.createCadenceOwnedAccount() + cadenceOwnedAccount.deposit(from: <-vault) + + let vault2 <- cadenceOwnedAccount.withdraw(balance: EVM.Balance(attoflow: 1230000000000000000)) + let balance = vault2.balance + log(vault2.uuid) + + destroy cadenceOwnedAccount + destroy vault2 + + return balance + } + `) + + accountCodes := map[common.Location][]byte{} + var events []cadence.Event + var logs []string + + runtimeInterface := &TestRuntimeInterface{ + Storage: NewTestLedger(nil, nil), + OnGetSigningAccounts: func() ([]runtime.Address, error) { + return []runtime.Address{runtime.Address(contractsAddress)}, nil + }, + OnResolveLocation: newLocationResolver(contractsAddress), + OnUpdateAccountContractCode: func(location common.AddressLocation, code []byte) error { + accountCodes[location] = code + return nil + }, + OnGetAccountContractCode: func(location common.AddressLocation) (code []byte, err error) { + code = accountCodes[location] + return code, nil + }, + OnEmitEvent: func(event cadence.Event) error { + events = append(events, event) + return nil + }, + OnDecodeArgument: func(b []byte, t cadence.Type) (cadence.Value, error) { + return json.Decode(nil, b) + }, + OnProgramLog: func(s string) { + logs = append(logs, s) + }, + } + + nextTransactionLocation := NewTransactionLocationGenerator() + nextScriptLocation := NewScriptLocationGenerator() + + // Deploy contracts + + deployContracts( + t, + rt, + contractsAddress, + runtimeInterface, + transactionEnvironment, + nextTransactionLocation, + ) + + // reset events + events = make([]cadence.Event, 0) + // Run script + result, err := rt.ExecuteScript( + runtime.Script{ + Source: script, + }, + runtime.Context{ + Interface: runtimeInterface, + Environment: scriptEnvironment, + Location: nextScriptLocation(), + }, + ) + require.NoError(t, err) + + assert.True(t, deposited) + assert.True(t, withdrew) + assert.Equal(t, expectedWithdrawBalance, result) + + assert.Equal(t, []string{"1"}, logs) + + // check deposit event + expectedEventTypes := []string{ + "FlowToken.MinterCreated", + "FlowToken.TokensMinted", + "EVM.CadenceOwnedAccountCreated", + "EVM.FLOWTokensDeposited", + "EVM.FLOWTokensWithdrawn", + } + CheckCadenceEventTypes(t, events, expectedEventTypes) + + // token deposit event + tokenWithdrawEvent := events[4] + tokenWithdrawEventFields := cadence.FieldsMappedByName(tokenWithdrawEvent) + + requireEqualEventAddress(t, tokenWithdrawEvent, expectedCoaAddress) + + // check amount + require.Equal(t, + expectedWithdrawBalance, + tokenWithdrawEventFields["amount"], + ) + + // check expectedWithdrawnUUID + // last allocated UUID is 1 + expectedWithdrawnUUID := cadence.UInt64(1) + require.Equal(t, + expectedWithdrawnUUID, + tokenWithdrawEventFields["withdrawnUUID"], + ) +} + +func TestCadenceOwnedAccountDeploy(t *testing.T) { + + t.Parallel() + + var deployed bool + + contractsAddress := flow.BytesToAddress([]byte{0x1}) + + expectedBalance, err := cadence.NewUFix64FromParts(1, 23000000) + require.NoError(t, err) + + handler := &testContractHandler{ + evmContractAddress: common.Address(contractsAddress), + accountByAddress: func(fromAddress types.Address, isAuthorized bool) types.Account { + assert.Equal(t, types.Address{4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, fromAddress) + assert.True(t, isAuthorized) + + return &testFlowAccount{ + address: fromAddress, + deploy: func(code types.Code, limit types.GasLimit, balance types.Balance) *types.ResultSummary { + deployed = true + assert.Equal(t, types.Code{4, 5, 6}, code) + assert.Equal(t, types.GasLimit(9999), limit) + assert.Equal(t, types.NewBalanceFromUFix64(expectedBalance), balance) + + return &types.ResultSummary{ + Status: types.StatusSuccessful, + DeployedContractAddress: &types.Address{5}, + ReturnedData: types.Data{5}, + } + }, + } + }, + } + + transactionEnvironment := newEVMTransactionEnvironment(handler, contractsAddress) + scriptEnvironment := newEVMScriptEnvironment(handler, contractsAddress) + + rt := runtime.NewRuntime(runtime.Config{}) + + script := []byte(` + import EVM from 0x1 + + access(all) + fun main(): [UInt8] { + let cadenceOwnedAccount <- EVM.createCadenceOwnedAccount() + let res = cadenceOwnedAccount.deploy( + code: [4, 5, 6], + gasLimit: 9999, + value: EVM.Balance(attoflow: 1230000000000000000) + ) + destroy cadenceOwnedAccount + + assert(res.deployedContract?.bytes == [5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]) + return res.data + } + `) + + accountCodes := map[common.Location][]byte{} + var events []cadence.Event + + runtimeInterface := &TestRuntimeInterface{ + Storage: NewTestLedger(nil, nil), + OnGetSigningAccounts: func() ([]runtime.Address, error) { + return []runtime.Address{runtime.Address(contractsAddress)}, nil + }, + OnResolveLocation: newLocationResolver(contractsAddress), + OnUpdateAccountContractCode: func(location common.AddressLocation, code []byte) error { + accountCodes[location] = code + return nil + }, + OnGetAccountContractCode: func(location common.AddressLocation) (code []byte, err error) { + code = accountCodes[location] + return code, nil + }, + OnEmitEvent: func(event cadence.Event) error { + events = append(events, event) + return nil + }, + OnDecodeArgument: func(b []byte, t cadence.Type) (cadence.Value, error) { + return json.Decode(nil, b) + }, + } + + nextTransactionLocation := NewTransactionLocationGenerator() + nextScriptLocation := NewScriptLocationGenerator() + + // Deploy contracts + + deployContracts( + t, + rt, + contractsAddress, + runtimeInterface, + transactionEnvironment, + nextTransactionLocation, + ) + + // Run script + + actual, err := rt.ExecuteScript( + runtime.Script{ + Source: script, + }, + runtime.Context{ + Interface: runtimeInterface, + Environment: scriptEnvironment, + Location: nextScriptLocation(), + }, + ) + require.NoError(t, err) + + expected := cadence. + NewArray([]cadence.Value{cadence.UInt8(5)}). + WithType(cadence.NewVariableSizedArrayType(cadence.UInt8Type)) + + require.Equal(t, expected, actual) + + require.True(t, deployed) +} + +func RunEVMScript( + t *testing.T, + handler *testContractHandler, + script []byte, + expectedValue cadence.Value, +) { + contractsAddress := flow.Address(handler.evmContractAddress) + transactionEnvironment := newEVMTransactionEnvironment(handler, contractsAddress) + scriptEnvironment := newEVMScriptEnvironment(handler, contractsAddress) + + rt := runtime.NewRuntime(runtime.Config{}) + + accountCodes := map[common.Location][]byte{} + var events []cadence.Event + + runtimeInterface := &TestRuntimeInterface{ + Storage: NewTestLedger(nil, nil), + OnGetSigningAccounts: func() ([]runtime.Address, error) { + return []runtime.Address{runtime.Address(contractsAddress)}, nil + }, + OnResolveLocation: newLocationResolver(contractsAddress), + OnUpdateAccountContractCode: func(location common.AddressLocation, code []byte) error { + accountCodes[location] = code + return nil + }, + OnGetAccountContractCode: func(location common.AddressLocation) (code []byte, err error) { + code = accountCodes[location] + return code, nil + }, + OnEmitEvent: func(event cadence.Event) error { + events = append(events, event) + return nil + }, + OnDecodeArgument: func(b []byte, t cadence.Type) (cadence.Value, error) { + return json.Decode(nil, b) + }, + } + + nextTransactionLocation := NewTransactionLocationGenerator() + nextScriptLocation := NewScriptLocationGenerator() + + // Deploy contracts + + deployContracts( + t, + rt, + contractsAddress, + runtimeInterface, + transactionEnvironment, + nextTransactionLocation, + ) + + // Run script + + actual, err := rt.ExecuteScript( + runtime.Script{ + Source: script, + }, + runtime.Context{ + Interface: runtimeInterface, + Environment: scriptEnvironment, + Location: nextScriptLocation(), + }, + ) + require.NoError(t, err) + + require.Equal(t, expectedValue, actual) +} + +func TestEVMAccountBalance(t *testing.T) { + t.Parallel() + + contractsAddress := flow.BytesToAddress([]byte{0x1}) + expectedBalanceValue := cadence.NewUInt(1013370000000000000) + expectedBalance := cadence. + NewStruct([]cadence.Value{expectedBalanceValue}). + WithType(stdlib.NewBalanceCadenceType(common.Address(contractsAddress))) + + handler := &testContractHandler{ + flowTokenAddress: common.Address(contractsAddress), + evmContractAddress: common.Address(contractsAddress), + accountByAddress: func(fromAddress types.Address, isAuthorized bool) types.Account { + assert.Equal(t, types.Address{4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, fromAddress) + assert.False(t, isAuthorized) + + return &testFlowAccount{ + address: fromAddress, + balance: func() types.Balance { + return types.NewBalance(expectedBalanceValue.Value) + }, + } + }, + } + + script := []byte(` + import EVM from 0x1 + + access(all) + fun main(): EVM.Balance { + let cadenceOwnedAccount <- EVM.createCadenceOwnedAccount() + let balance = cadenceOwnedAccount.balance() + destroy cadenceOwnedAccount + return balance + } + `) + RunEVMScript(t, handler, script, expectedBalance) +} + +func TestEVMAccountNonce(t *testing.T) { + t.Parallel() + + contractsAddress := flow.BytesToAddress([]byte{0x1}) + expectedNonceValue := cadence.NewUInt64(2000) + handler := &testContractHandler{ + flowTokenAddress: common.Address(contractsAddress), + evmContractAddress: common.Address(contractsAddress), + accountByAddress: func(fromAddress types.Address, isAuthorized bool) types.Account { + assert.Equal(t, types.Address{4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, fromAddress) + assert.False(t, isAuthorized) + + return &testFlowAccount{ + address: fromAddress, + nonce: func() uint64 { + return uint64(expectedNonceValue) + }, + } + }, + } + + script := []byte(` + import EVM from 0x1 + + access(all) + fun main(): UInt64 { + let cadenceOwnedAccount <- EVM.createCadenceOwnedAccount() + let nonce = cadenceOwnedAccount.address().nonce() + destroy cadenceOwnedAccount + return nonce + } + `) + + RunEVMScript(t, handler, script, expectedNonceValue) +} + +func TestEVMAccountCode(t *testing.T) { + t.Parallel() + + contractsAddress := flow.BytesToAddress([]byte{0x1}) + expectedCodeRaw := []byte{1, 2, 3} + expectedCodeValue := cadence.NewArray( + []cadence.Value{cadence.UInt8(1), cadence.UInt8(2), cadence.UInt8(3)}, + ).WithType(cadence.NewVariableSizedArrayType(cadence.UInt8Type)) + + handler := &testContractHandler{ + flowTokenAddress: common.Address(contractsAddress), + evmContractAddress: common.Address(contractsAddress), + accountByAddress: func(fromAddress types.Address, isAuthorized bool) types.Account { + assert.Equal(t, types.Address{4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, fromAddress) + assert.False(t, isAuthorized) + + return &testFlowAccount{ + address: fromAddress, + code: func() types.Code { + return expectedCodeRaw + }, + } + }, + } + + script := []byte(` + import EVM from 0x1 + + access(all) + fun main(): [UInt8] { + let cadenceOwnedAccount <- EVM.createCadenceOwnedAccount() + let code = cadenceOwnedAccount.address().code() + destroy cadenceOwnedAccount + return code + } + `) + + RunEVMScript(t, handler, script, expectedCodeValue) +} + +func TestEVMAccountCodeHash(t *testing.T) { + t.Parallel() + + contractsAddress := flow.BytesToAddress([]byte{0x1}) + expectedCodeHashRaw := []byte{1, 2, 3} + expectedCodeHashValue := cadence.NewArray( + []cadence.Value{cadence.UInt8(1), cadence.UInt8(2), cadence.UInt8(3)}, + ).WithType(cadence.NewVariableSizedArrayType(cadence.UInt8Type)) + + handler := &testContractHandler{ + flowTokenAddress: common.Address(contractsAddress), + evmContractAddress: common.Address(contractsAddress), + accountByAddress: func(fromAddress types.Address, isAuthorized bool) types.Account { + assert.Equal(t, types.Address{4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, fromAddress) + assert.False(t, isAuthorized) + + return &testFlowAccount{ + address: fromAddress, + codeHash: func() []byte { + return expectedCodeHashRaw + }, + } + }, + } + + script := []byte(` + import EVM from 0x1 + + access(all) + fun main(): [UInt8] { + let cadenceOwnedAccount <- EVM.createCadenceOwnedAccount() + let codeHash = cadenceOwnedAccount.address().codeHash() + destroy cadenceOwnedAccount + return codeHash + } + `) + + RunEVMScript(t, handler, script, expectedCodeHashValue) +} + +func TestEVMValidateCOAOwnershipProof(t *testing.T) { + t.Parallel() + contractsAddress := flow.BytesToAddress([]byte{0x1}) + + type onGetAccountKeyFunc func( + addr runtime.Address, + index uint32, + ) (*cadenceStdlib.AccountKey, error) + + type onVerifySignatureFunc func( + signature []byte, + tag string, + sd, + publicKey []byte, + signatureAlgorithm runtime.SignatureAlgorithm, + hashAlgorithm runtime.HashAlgorithm, + ) (bool, error) + + validate := func( + proof *types.COAOwnershipProofInContext, + onGetAccountKey onGetAccountKeyFunc, + onVerifySignature onVerifySignatureFunc, + ) (cadence.Value, error) { + handler := &testContractHandler{ + deployCOA: func(_ uint64) types.Address { + return proof.EVMAddress + }, + } + transactionEnvironment := newEVMTransactionEnvironment(handler, contractsAddress) + scriptEnvironment := newEVMScriptEnvironment(handler, contractsAddress) + + rt := runtime.NewRuntime(runtime.Config{}) + + accountCodes := map[common.Location][]byte{} + var events []cadence.Event + + runtimeInterface := &TestRuntimeInterface{ + Storage: NewTestLedger(nil, nil), + OnGetSigningAccounts: func() ([]runtime.Address, error) { + return []runtime.Address{runtime.Address(contractsAddress)}, nil + }, + OnResolveLocation: newLocationResolver(contractsAddress), + OnUpdateAccountContractCode: func(location common.AddressLocation, code []byte) error { + accountCodes[location] = code + return nil + }, + OnGetAccountContractCode: func(location common.AddressLocation) (code []byte, err error) { + code = accountCodes[location] + return code, nil + }, + OnEmitEvent: func(event cadence.Event) error { + events = append(events, event) + return nil + }, + OnDecodeArgument: func(b []byte, t cadence.Type) (cadence.Value, error) { + return json.Decode(nil, b) + }, + OnGetAccountKey: onGetAccountKey, + OnVerifySignature: onVerifySignature, + } + + nextTransactionLocation := NewTransactionLocationGenerator() + nextScriptLocation := NewScriptLocationGenerator() + + // Deploy contracts + + deployContracts( + t, + rt, + contractsAddress, + runtimeInterface, + transactionEnvironment, + nextTransactionLocation, + ) + + setupTx := []byte(` + import EVM from 0x1 + + transaction { + prepare(account: auth(Capabilities, SaveValue) &Account) { + let cadenceOwnedAccount <- EVM.createCadenceOwnedAccount() + + account.storage.save( + <-cadenceOwnedAccount, + to: /storage/coa + ) + + let cap = account.capabilities.storage + .issue<&EVM.CadenceOwnedAccount>(/storage/coa) + account.capabilities.publish(cap, at: /public/coa) + } + }`) + + err := rt.ExecuteTransaction( + runtime.Script{ + Source: setupTx, + }, + runtime.Context{ + Interface: runtimeInterface, + Environment: transactionEnvironment, + Location: nextTransactionLocation(), + }, + ) + require.NoError(t, err) + + script := []byte(` + import EVM from 0x1 + + access(all) + fun main( + address: Address, + path: PublicPath, + signedData: [UInt8], + keyIndices: [UInt64], + signatures: [[UInt8]], + evmAddress: [UInt8; 20] + ): EVM.ValidationResult { + return EVM.validateCOAOwnershipProof( + address: address, + path: path, + signedData: signedData, + keyIndices: keyIndices, + signatures: signatures, + evmAddress: evmAddress + ) + } + `) + + // Run script + result, err := rt.ExecuteScript( + runtime.Script{ + Source: script, + Arguments: EncodeArgs(proof.ToCadenceValues()), + }, + runtime.Context{ + Interface: runtimeInterface, + Environment: scriptEnvironment, + Location: nextScriptLocation(), + }, + ) + + return result, err + } + + t.Run("Single key", func(t *testing.T) { + proof := &types.COAOwnershipProofInContext{ + COAOwnershipProof: types.COAOwnershipProof{ + Address: types.FlowAddress(contractsAddress), + CapabilityPath: "coa", + Signatures: []types.Signature{[]byte("signature")}, + KeyIndices: []uint64{0}, + }, + SignedData: []byte("signedData"), + EVMAddress: RandomAddress(t), + } + + result, err := validate( + proof, + func( + addr runtime.Address, + index uint32, + ) (*cadenceStdlib.AccountKey, error) { + require.Equal(t, proof.Address[:], addr[:]) + return &cadenceStdlib.AccountKey{ + PublicKey: &cadenceStdlib.PublicKey{}, + KeyIndex: index, + Weight: 1000, + HashAlgo: sema.HashAlgorithmKECCAK_256, + IsRevoked: false, + }, nil + }, + func( + signature []byte, + tag string, + sd, + publicKey []byte, + signatureAlgorithm runtime.SignatureAlgorithm, + hashAlgorithm runtime.HashAlgorithm, + ) (bool, error) { + return true, nil + }, + ) + + require.NoError(t, err) + + isValid := result.(cadence.Struct).SearchFieldByName("isValid").(cadence.Bool) + require.True(t, bool(isValid)) + }) + + t.Run("Two keys", func(t *testing.T) { + proof := &types.COAOwnershipProofInContext{ + COAOwnershipProof: types.COAOwnershipProof{ + Address: types.FlowAddress(contractsAddress), + CapabilityPath: "coa", + Signatures: []types.Signature{[]byte("signature2"), []byte("signature0")}, + KeyIndices: []uint64{2, 0}, + }, + SignedData: []byte("signedData"), + EVMAddress: RandomAddress(t), + } + + result, err := validate( + proof, + func(addr runtime.Address, index uint32) (*cadenceStdlib.AccountKey, error) { + require.Equal(t, proof.Address[:], addr[:]) + return &cadenceStdlib.AccountKey{ + PublicKey: &cadenceStdlib.PublicKey{ + // encode the key index into the public key + PublicKey: []byte{byte(index)}, + }, + KeyIndex: index, + Weight: 1000, + HashAlgo: sema.HashAlgorithmKECCAK_256, + IsRevoked: false, + }, nil + }, + func( + signature []byte, + tag string, + sd, + publicKey []byte, + signatureAlgorithm runtime.SignatureAlgorithm, + hashAlgorithm runtime.HashAlgorithm, + ) (bool, error) { + if bytes.Equal(signature, []byte("signature2")) { + require.Equal(t, byte(2), publicKey[0]) + return true, nil + } else if bytes.Equal(signature, []byte("signature0")) { + require.Equal(t, byte(0), publicKey[0]) + return true, nil + } else { + return false, nil + } + }, + ) + + require.NoError(t, err) + + isValid := result.(cadence.Struct).SearchFieldByName("isValid").(cadence.Bool) + require.True(t, bool(isValid)) + }) + + t.Run("Two keys insufficient weight", func(t *testing.T) { + proof := &types.COAOwnershipProofInContext{ + COAOwnershipProof: types.COAOwnershipProof{ + Address: types.FlowAddress(contractsAddress), + CapabilityPath: "coa", + Signatures: []types.Signature{[]byte("signature2"), []byte("signature0")}, + KeyIndices: []uint64{2, 0}, + }, + SignedData: []byte("signedData"), + EVMAddress: RandomAddress(t), + } + + result, err := validate( + proof, + func(addr runtime.Address, index uint32) (*cadenceStdlib.AccountKey, error) { + require.Equal(t, proof.Address[:], addr[:]) + return &cadenceStdlib.AccountKey{ + PublicKey: &cadenceStdlib.PublicKey{ + // encode the key index into the public key + PublicKey: []byte{byte(index)}, + }, + KeyIndex: index, + Weight: 499, + HashAlgo: sema.HashAlgorithmKECCAK_256, + IsRevoked: false, + }, nil + }, + func( + signature []byte, + tag string, + sd, + publicKey []byte, + signatureAlgorithm runtime.SignatureAlgorithm, + hashAlgorithm runtime.HashAlgorithm, + ) (bool, error) { + if bytes.Equal(signature, []byte("signature2")) { + require.Equal(t, byte(2), publicKey[0]) + return true, nil + } else if bytes.Equal(signature, []byte("signature0")) { + require.Equal(t, byte(0), publicKey[0]) + return true, nil + } else { + return false, nil + } + }, + ) + + require.NoError(t, err) + + isValid := result.(cadence.Struct).SearchFieldByName("isValid").(cadence.Bool) + require.False(t, bool(isValid)) + message := result.(cadence.Struct). + SearchFieldByName("problem").(cadence.Optional). + Value.(cadence.String).String() + require.Equal(t, "\"EVM.validateCOAOwnershipProof(): Cannot validate COA ownership for Cadence account 0x0000000000000001. The given signatures are not valid or provide enough weight.\"", message) + }) +} + +func TestInternalEVMAccess(t *testing.T) { + + t.Parallel() + + handler := &testContractHandler{} + + contractsAddress := flow.BytesToAddress([]byte{0x1}) + transactionEnvironment := newEVMTransactionEnvironment(handler, contractsAddress) + scriptEnvironment := newEVMScriptEnvironment(handler, contractsAddress) + rt := runtime.NewRuntime(runtime.Config{}) + + script := []byte(` + import EVM from 0x1 + + access(all) + fun main() { + let a = InternalEVM.createBridgedAccount() + } + `) + + accountCodes := map[common.Location][]byte{} + + runtimeInterface := &TestRuntimeInterface{ + Storage: NewTestLedger(nil, nil), + OnGetSigningAccounts: func() ([]runtime.Address, error) { + return []runtime.Address{runtime.Address(contractsAddress)}, nil + }, + OnResolveLocation: newLocationResolver(contractsAddress), + OnUpdateAccountContractCode: func(location common.AddressLocation, code []byte) error { + accountCodes[location] = code + return nil + }, + OnGetAccountContractCode: func(location common.AddressLocation) (code []byte, err error) { + code = accountCodes[location] + return code, nil + }, + OnEmitEvent: func(event cadence.Event) error { + return nil + }, + OnDecodeArgument: func(b []byte, t cadence.Type) (cadence.Value, error) { + return json.Decode(nil, b) + }, + } + + nextTransactionLocation := NewTransactionLocationGenerator() + nextScriptLocation := NewScriptLocationGenerator() + + // Deploy contracts + + deployContracts( + t, + rt, + contractsAddress, + runtimeInterface, + transactionEnvironment, + nextTransactionLocation, + ) + + // Run script + + _, err := rt.ExecuteScript( + runtime.Script{ + Source: script, + }, + runtime.Context{ + Interface: runtimeInterface, + Environment: scriptEnvironment, + Location: nextScriptLocation(), + }, + ) + require.Error(t, err) +} + +func TestEVMGetLatestBlock(t *testing.T) { + t.Parallel() + + contractsAddress := flow.BytesToAddress([]byte{0x1}) + + latestBlock := &types.Block{ + Height: uint64(2), + TotalSupply: big.NewInt(1500000000000000000), + Timestamp: uint64(1337), + } + handler := &testContractHandler{ + evmContractAddress: common.Address(contractsAddress), + lastExecutedBlock: func() *types.Block { + return latestBlock + }, + } + + script := []byte(` + import EVM from 0x1 + + access(all) + fun main(): EVM.EVMBlock { + return EVM.getLatestBlock() + } + `) + + evmBlockCadenceType := stdlib.NewEVMBlockCadenceType( + common.Address(contractsAddress), + ) + + blockHeight := cadence.NewUInt64(latestBlock.Height) + hash, err := latestBlock.Hash() + require.NoError(t, err) + blockHash, err := cadence.NewString(hash.Hex()) + require.NoError(t, err) + blockTotalSupply := cadence.NewIntFromBig(latestBlock.TotalSupply) + timestamp := cadence.NewUInt64(latestBlock.Timestamp) + + expectedEVMBlock := cadence.NewStruct([]cadence.Value{ + blockHeight, + blockHash, + blockTotalSupply, + timestamp, + }).WithType(evmBlockCadenceType) + + RunEVMScript(t, handler, script, expectedEVMBlock) +} diff --git a/fvm/evm/stdlib/type.go b/fvm/evm/stdlib/type.go new file mode 100644 index 00000000000..c1381b92f82 --- /dev/null +++ b/fvm/evm/stdlib/type.go @@ -0,0 +1,138 @@ +package stdlib + +import ( + "fmt" + + "github.com/onflow/cadence" + "github.com/onflow/cadence/common" + "github.com/onflow/cadence/runtime" + "github.com/onflow/cadence/sema" + coreContracts "github.com/onflow/flow-core-contracts/lib/go/contracts" + + "github.com/onflow/flow-go/fvm/systemcontracts" + "github.com/onflow/flow-go/model/flow" +) + +func newContractType(chainID flow.ChainID) *sema.CompositeType { + + contracts := systemcontracts.SystemContractsForChain(chainID) + + evmCode := ContractCode( + contracts.NonFungibleToken.Address, + contracts.FungibleToken.Address, + contracts.FlowToken.Address, + ) + + evmContractAddress := contracts.EVMContract.Address + + evmContractLocation := common.AddressLocation{ + Address: common.Address(evmContractAddress), + Name: ContractName, + } + + templatesEnv := contracts.AsTemplateEnv() + + cryptoContractLocation := contracts.Crypto.Location() + + runtimeInterface := &checkingInterface{ + cryptoContractAddress: cryptoContractLocation.Address, + SystemContractCodes: map[common.Location][]byte{ + contracts.ViewResolver.Location(): coreContracts.ViewResolver(), + contracts.Burner.Location(): coreContracts.Burner(), + contracts.FungibleToken.Location(): coreContracts.FungibleToken(templatesEnv), + contracts.NonFungibleToken.Location(): coreContracts.NonFungibleToken(templatesEnv), + contracts.MetadataViews.Location(): coreContracts.MetadataViews(templatesEnv), + contracts.FlowToken.Location(): coreContracts.FlowToken(templatesEnv), + contracts.FungibleTokenMetadataViews.Location(): coreContracts.FungibleTokenMetadataViews(templatesEnv), + cryptoContractLocation: coreContracts.Crypto(), + }, + } + + env := runtime.NewBaseInterpreterEnvironment(runtime.Config{}) + env.Configure( + runtimeInterface, + runtime.NewCodesAndPrograms(), + nil, + nil, + nil, + nil, + ) + + SetupEnvironment(env, nil, evmContractAddress) + + program, err := env.ParseAndCheckProgram(evmCode, evmContractLocation, false) + if err != nil { + panic(err) + } + + evmContractTypeID := evmContractLocation.TypeID(nil, ContractName) + + return program.Elaboration.CompositeType(evmContractTypeID) +} + +var contractTypes = map[flow.ChainID]*sema.CompositeType{} + +type CadenceTypes struct { + TransactionExecuted *cadence.EventType + BlockExecuted *cadence.EventType +} + +var cadenceTypes = map[flow.ChainID]CadenceTypes{} + +func exportCadenceEventType(contractType *sema.CompositeType, name string) (*cadence.EventType, error) { + transactionEventType, ok := contractType.GetNestedTypes().Get(name) + if !ok { + return nil, fmt.Errorf("missing %s type", name) + } + exportedType := runtime.ExportType( + transactionEventType, + map[sema.TypeID]cadence.Type{}, + ) + + eventType, ok := exportedType.(*cadence.EventType) + if !ok { + return nil, fmt.Errorf("type %s is not an event", name) + } + + return eventType, nil +} + +func init() { + for _, chain := range flow.AllChainIDs() { + contractType := newContractType(chain) + contractTypes[chain] = contractType + + transactionExecutedEvent, err := exportCadenceEventType(contractType, "TransactionExecuted") + if err != nil { + panic(err) + } + + blockExecutedEvent, err := exportCadenceEventType(contractType, "BlockExecuted") + if err != nil { + panic(err) + } + + cadenceTypes[chain] = CadenceTypes{ + TransactionExecuted: transactionExecutedEvent, + BlockExecuted: blockExecutedEvent, + } + } +} + +func ContractTypeForChain(chainID flow.ChainID) *sema.CompositeType { + contractType, ok := contractTypes[chainID] + if !ok { + // this is a panic, since it can only happen if the code is wrong + panic(fmt.Sprintf("unknown chain: %s", chainID)) + } + return contractType +} + +func CadenceTypesForChain(chainID flow.ChainID) CadenceTypes { + cadenceTypes, ok := cadenceTypes[chainID] + if !ok { + // this is a panic, since it can only happen if the code is wrong + panic(fmt.Sprintf("unknown chain: %s", chainID)) + } + return cadenceTypes +} diff --git a/fvm/evm/stdlib/type_test.go b/fvm/evm/stdlib/type_test.go new file mode 100644 index 00000000000..743b6a0a596 --- /dev/null +++ b/fvm/evm/stdlib/type_test.go @@ -0,0 +1,31 @@ +package stdlib_test + +import ( + "testing" + + "github.com/onflow/cadence/sema" + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/fvm/evm/stdlib" + "github.com/onflow/flow-go/model/flow" +) + +func TestContractTypeForChain(t *testing.T) { + t.Parallel() + + comp := stdlib.ContractTypeForChain(flow.Emulator) + require.NotNil(t, comp) + + nestedTypes := comp.GetNestedTypes() + + blockExecutedType, present := nestedTypes.Get("BlockExecuted") + require.True(t, present) + + require.IsType(t, &sema.CompositeType{}, blockExecutedType) + blockExecutedEventType := blockExecutedType.(*sema.CompositeType) + + require.Equal(t, + "EVM.BlockExecuted", + blockExecutedEventType.QualifiedIdentifier(), + ) +} diff --git a/fvm/evm/testutils/accounts.go b/fvm/evm/testutils/accounts.go new file mode 100644 index 00000000000..ea24d351c89 --- /dev/null +++ b/fvm/evm/testutils/accounts.go @@ -0,0 +1,155 @@ +package testutils + +import ( + "bytes" + "crypto/ecdsa" + "io" + "math/big" + "sync" + "testing" + + gethCommon "github.com/ethereum/go-ethereum/common" + gethTypes "github.com/ethereum/go-ethereum/core/types" + gethCrypto "github.com/ethereum/go-ethereum/crypto" + "github.com/stretchr/testify/require" + + "github.com/onflow/atree" + + "github.com/onflow/flow-go/fvm/evm/emulator" + "github.com/onflow/flow-go/fvm/evm/types" + "github.com/onflow/flow-go/model/flow" +) + +// address: 658bdf435d810c91414ec09147daa6db62406379 +const EOATestAccount1KeyHex = "9c647b8b7c4e7c3490668fb6c11473619db80c93704c70893d3813af4090c39c" + +type EOATestAccount struct { + address gethCommon.Address + key *ecdsa.PrivateKey + nonce uint64 + signer gethTypes.Signer + lock sync.Mutex +} + +func (a *EOATestAccount) Address() types.Address { + return types.Address(a.address) +} + +func (a *EOATestAccount) PrepareSignAndEncodeTx( + t testing.TB, + to gethCommon.Address, + data []byte, + amount *big.Int, + gasLimit uint64, + gasPrice *big.Int, +) []byte { + tx := a.PrepareAndSignTx(t, to, data, amount, gasLimit, gasPrice) + var b bytes.Buffer + writer := io.Writer(&b) + err := tx.EncodeRLP(writer) + require.NoError(t, err) + return b.Bytes() +} + +func (a *EOATestAccount) PrepareAndSignTx( + t testing.TB, + to gethCommon.Address, + data []byte, + amount *big.Int, + gasLimit uint64, + gasPrice *big.Int, +) *gethTypes.Transaction { + a.lock.Lock() + defer a.lock.Unlock() + + tx := a.signTx( + t, + gethTypes.NewTransaction( + a.nonce, + to, + amount, + gasLimit, + gasPrice, + data, + ), + ) + a.nonce++ + + return tx +} + +func (a *EOATestAccount) SignTx( + t testing.TB, + tx *gethTypes.Transaction, +) *gethTypes.Transaction { + a.lock.Lock() + defer a.lock.Unlock() + + return a.signTx(t, tx) +} + +func (a *EOATestAccount) signTx( + t testing.TB, + tx *gethTypes.Transaction, +) *gethTypes.Transaction { + tx, err := gethTypes.SignTx(tx, a.signer, a.key) + require.NoError(t, err) + return tx +} + +func (a *EOATestAccount) Nonce() uint64 { + return a.nonce +} + +func (a *EOATestAccount) SetNonce(nonce uint64) { + a.lock.Lock() + defer a.lock.Unlock() + + a.nonce = nonce +} + +func GetTestEOAAccount(t testing.TB, keyHex string) *EOATestAccount { + key, _ := gethCrypto.HexToECDSA(keyHex) + address := gethCrypto.PubkeyToAddress(key.PublicKey) + signer := emulator.GetDefaultSigner() + return &EOATestAccount{ + address: address, + key: key, + signer: signer, + lock: sync.Mutex{}, + } +} + +func RunWithEOATestAccount(t testing.TB, led atree.Ledger, flowEVMRootAddress flow.Address, f func(*EOATestAccount)) { + account := FundAndGetEOATestAccount(t, led, flowEVMRootAddress) + f(account) +} + +func FundAndGetEOATestAccount(t testing.TB, led atree.Ledger, flowEVMRootAddress flow.Address) *EOATestAccount { + account := GetTestEOAAccount(t, EOATestAccount1KeyHex) + + // fund account + e := emulator.NewEmulator(led, flowEVMRootAddress) + + blk, err := e.NewBlockView(types.NewDefaultBlockContext(2)) + require.NoError(t, err) + + _, err = blk.DirectCall( + types.NewDepositCall( + RandomAddress(t), // any random non-empty address works here + account.Address(), + new(big.Int).Mul(big.NewInt(1e18), big.NewInt(1000)), + account.nonce, + ), + ) + require.NoError(t, err) + + blk2, err := e.NewReadOnlyBlockView(types.NewDefaultBlockContext(2)) + require.NoError(t, err) + + bal, err := blk2.BalanceOf(account.Address()) + require.NoError(t, err) + require.Greater(t, bal.Uint64(), uint64(0)) + + return account +} diff --git a/fvm/evm/testutils/backend.go b/fvm/evm/testutils/backend.go new file mode 100644 index 00000000000..be206c4d7ca --- /dev/null +++ b/fvm/evm/testutils/backend.go @@ -0,0 +1,680 @@ +package testutils + +import ( + "crypto/rand" + "encoding/binary" + "fmt" + "testing" + + "github.com/onflow/cadence/stdlib" + "github.com/rs/zerolog" + otelTrace "go.opentelemetry.io/otel/trace" + + "github.com/onflow/atree" + "github.com/onflow/cadence" + "github.com/onflow/cadence/common" + "github.com/onflow/cadence/encoding/ccf" + "github.com/onflow/cadence/runtime" + "github.com/stretchr/testify/require" + "golang.org/x/exp/maps" + + "github.com/onflow/flow-go/fvm/environment" + "github.com/onflow/flow-go/fvm/evm/types" + "github.com/onflow/flow-go/fvm/meter" + "github.com/onflow/flow-go/fvm/tracing" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module/trace" + "github.com/onflow/flow-go/utils/unittest" +) + +var TestFlowEVMRootAddress = flow.Address{1, 2, 3, 4} +var TestComputationLimit = uint64(100_000_000) + +func RunWithTestFlowEVMRootAddress(t testing.TB, backend atree.Ledger, f func(flow.Address)) { + as := environment.NewAccountStatus() + err := backend.SetValue(TestFlowEVMRootAddress[:], []byte(flow.AccountStatusKey), as.ToBytes()) + require.NoError(t, err) + f(TestFlowEVMRootAddress) +} + +func RunWithTestBackend(t testing.TB, f func(*TestBackend)) { + tb := &TestBackend{ + TestValueStore: GetSimpleValueStore(), + testEventEmitter: getSimpleEventEmitter(), + testMeter: getSimpleMeter(), + TestBlockInfo: getSimpleBlockStore(), + TestRandomGenerator: getSimpleRandomGenerator(), + TestContractFunctionInvoker: &TestContractFunctionInvoker{}, + TestTracer: &TestTracer{}, + TestMetricsReporter: &TestMetricsReporter{}, + TestLoggerProvider: &TestLoggerProvider{}, + } + f(tb) +} + +func fullKey(owner, key []byte) string { + return fmt.Sprintf("%x~%s", owner, key) +} + +func GetSimpleValueStore() *TestValueStore { + return GetSimpleValueStorePopulated( + make(map[string][]byte), + make(map[string]uint64), + ) +} + +func GetSimpleValueStorePopulated( + data map[string][]byte, + allocator map[string]uint64, +) *TestValueStore { + bytesRead := 0 + bytesWritten := 0 + return &TestValueStore{ + GetValueFunc: func(owner, key []byte) ([]byte, error) { + fk := fullKey(owner, key) + value := data[fk] + bytesRead += len(fk) + len(value) + return value, nil + }, + SetValueFunc: func(owner, key, value []byte) error { + fk := fullKey(owner, key) + data[fk] = value + bytesWritten += len(fk) + len(value) + return nil + }, + ValueExistsFunc: func(owner, key []byte) (bool, error) { + fk := fullKey(owner, key) + value := data[fk] + bytesRead += len(fk) + len(value) + return len(value) > 0, nil + }, + AllocateSlabIndexFunc: func(owner []byte) (atree.SlabIndex, error) { + index := allocator[string(owner)] + // TODO: figure out why it result in a collision + if index == 0 { + index = 10 + } + var data [8]byte + allocator[string(owner)] = index + 1 + binary.BigEndian.PutUint64(data[:], index) + bytesRead += len(owner) + 8 + bytesWritten += len(owner) + 8 + return atree.SlabIndex(data), nil + }, + TotalStorageSizeFunc: func() int { + size := 0 + for key, item := range data { + size += len(item) + len([]byte(key)) + } + for key := range allocator { + size += len(key) + 8 + } + return size + }, + TotalBytesReadFunc: func() int { + return bytesRead + }, + TotalBytesWrittenFunc: func() int { + return bytesWritten + }, + TotalStorageItemsFunc: func() int { + return len(maps.Keys(data)) + len(maps.Keys(allocator)) + }, + ResetStatsFunc: func() { + bytesRead = 0 + bytesWritten = 0 + }, + + CloneFunc: func() *TestValueStore { + // clone data + newData := make(map[string][]byte) + for k, v := range data { + newData[k] = v + } + newAllocator := make(map[string]uint64) + for k, v := range allocator { + newAllocator[k] = v + } + // clone allocator + return GetSimpleValueStorePopulated(newData, newAllocator) + }, + + DumpFunc: func() (map[string][]byte, map[string]uint64) { + // clone data + newData := make(map[string][]byte) + for k, v := range data { + newData[k] = v + } + newAllocator := make(map[string]uint64) + for k, v := range allocator { + newAllocator[k] = v + } + return newData, newAllocator + }, + } +} + +func getSimpleEventEmitter() *testEventEmitter { + events := make(flow.EventsList, 0) + return &testEventEmitter{ + emitEvent: func(event cadence.Event) error { + payload, err := ccf.Encode(event) + if err != nil { + return err + } + e, err := flow.NewEvent( + flow.UntrustedEvent{ + Type: flow.EventType(event.EventType.ID()), + TransactionID: unittest.IdentifierFixture(), + TransactionIndex: 0, + EventIndex: 0, + Payload: payload, + }, + ) + if err != nil { + return fmt.Errorf("could not construct event: %w", err) + } + + events = append(events, *e) + return nil + }, + events: func() flow.EventsList { + return events + }, + reset: func() { + events = make(flow.EventsList, 0) + }, + } +} + +func getSimpleMeter() *testMeter { + compUsed := uint64(0) + return &testMeter{ + meterComputation: func(usage common.ComputationUsage) error { + compUsed += usage.Intensity + if compUsed > TestComputationLimit { + return fmt.Errorf("computation limit has hit %d", TestComputationLimit) + } + return nil + }, + hasComputationCapacity: func(usage common.ComputationUsage) bool { + return compUsed+usage.Intensity < TestComputationLimit + }, + computationUsed: func() (uint64, error) { + return compUsed, nil + }, + } +} + +func getSimpleBlockStore() *TestBlockInfo { + var index int64 = 1 + return &TestBlockInfo{ + GetCurrentBlockHeightFunc: func() (uint64, error) { + index++ + return uint64(index), nil + }, + GetBlockAtHeightFunc: func(height uint64) (runtime.Block, bool, error) { + return runtime.Block{ + Height: height, + View: 0, + Hash: stdlib.BlockHash{}, + Timestamp: int64(height), + }, true, nil + }, + } +} + +type TestBackend struct { + *TestValueStore + *testMeter + *testEventEmitter + *TestBlockInfo + *TestRandomGenerator + *TestContractFunctionInvoker + *testUUIDGenerator + *TestTracer + *TestMetricsReporter + *TestLoggerProvider +} + +var _ types.Backend = &TestBackend{} + +func (tb *TestBackend) TotalStorageSize() int { + if tb.TotalStorageSizeFunc == nil { + panic("method not set") + } + return tb.TotalStorageSizeFunc() +} + +func (tb *TestBackend) DropEvents() { + if tb.reset == nil { + panic("method not set") + } + tb.reset() +} + +func (tb *TestBackend) Get(id flow.RegisterID) (flow.RegisterValue, error) { + return tb.GetValue([]byte(id.Owner), []byte(id.Key)) +} + +type TestValueStore struct { + GetValueFunc func(owner, key []byte) ([]byte, error) + SetValueFunc func(owner, key, value []byte) error + ValueExistsFunc func(owner, key []byte) (bool, error) + AllocateSlabIndexFunc func(owner []byte) (atree.SlabIndex, error) + TotalStorageSizeFunc func() int + TotalBytesReadFunc func() int + TotalBytesWrittenFunc func() int + TotalStorageItemsFunc func() int + ResetStatsFunc func() + CloneFunc func() *TestValueStore + DumpFunc func() (map[string][]byte, map[string]uint64) +} + +var _ environment.ValueStore = &TestValueStore{} + +func (vs *TestValueStore) GetValue(owner, key []byte) ([]byte, error) { + getValueFunc := vs.GetValueFunc + if getValueFunc == nil { + panic("method not set") + } + return getValueFunc(owner, key) +} + +func (vs *TestValueStore) SetValue(owner, key, value []byte) error { + setValueFunc := vs.SetValueFunc + if setValueFunc == nil { + panic("method not set") + } + return setValueFunc(owner, key, value) +} + +func (vs *TestValueStore) ValueExists(owner, key []byte) (bool, error) { + valueExistsFunc := vs.ValueExistsFunc + if valueExistsFunc == nil { + panic("method not set") + } + return valueExistsFunc(owner, key) +} + +func (vs *TestValueStore) AllocateSlabIndex(owner []byte) (atree.SlabIndex, error) { + allocateSlabIndexFunc := vs.AllocateSlabIndexFunc + if allocateSlabIndexFunc == nil { + panic("method not set") + } + return allocateSlabIndexFunc(owner) +} + +func (vs *TestValueStore) TotalBytesRead() int { + totalBytesReadFunc := vs.TotalBytesReadFunc + if totalBytesReadFunc == nil { + panic("method not set") + } + return totalBytesReadFunc() +} + +func (vs *TestValueStore) TotalBytesWritten() int { + totalBytesWrittenFunc := vs.TotalBytesWrittenFunc + if totalBytesWrittenFunc == nil { + panic("method not set") + } + return totalBytesWrittenFunc() +} + +func (vs *TestValueStore) TotalStorageSize() int { + totalStorageSizeFunc := vs.TotalStorageSizeFunc + if totalStorageSizeFunc == nil { + panic("method not set") + } + return totalStorageSizeFunc() +} + +func (vs *TestValueStore) TotalStorageItems() int { + totalStorageItemsFunc := vs.TotalStorageItemsFunc + if totalStorageItemsFunc == nil { + panic("method not set") + } + return totalStorageItemsFunc() +} + +func (vs *TestValueStore) ResetStats() { + resetStatsFunc := vs.ResetStatsFunc + if resetStatsFunc == nil { + panic("method not set") + } + resetStatsFunc() +} + +func (vs *TestValueStore) Clone() *TestValueStore { + cloneFunc := vs.CloneFunc + if cloneFunc == nil { + panic("method not set") + } + return cloneFunc() +} + +func (vs *TestValueStore) Dump() (map[string][]byte, map[string]uint64) { + dumpFunc := vs.DumpFunc + if dumpFunc == nil { + panic("method not set") + } + return dumpFunc() +} + +type testMeter struct { + meterComputation func(usage common.ComputationUsage) error + hasComputationCapacity func(common.ComputationUsage) bool + computationUsed func() (uint64, error) + computationIntensities func() meter.MeteredComputationIntensities + + meterMemory func(usage common.MemoryUsage) error + memoryUsed func() (uint64, error) + + meterEmittedEvent func(byteSize uint64) error + totalEmittedEventBytes func() uint64 + + interactionUsed func() (uint64, error) + + disabled bool +} + +var _ environment.Meter = &testMeter{} + +func (m *testMeter) MeterComputation(usage common.ComputationUsage) error { + if m.disabled { + return nil + } + meterComputation := m.meterComputation + if meterComputation == nil { + panic("method not set") + } + return meterComputation(usage) +} + +func (m *testMeter) ComputationAvailable(usage common.ComputationUsage) bool { + hasComputationCapacity := m.hasComputationCapacity + if hasComputationCapacity == nil { + panic("method not set") + } + return hasComputationCapacity(usage) +} + +func (m *testMeter) ComputationIntensities() meter.MeteredComputationIntensities { + computationIntensities := m.computationIntensities + if computationIntensities == nil { + panic("method not set") + } + return computationIntensities() +} + +func (m *testMeter) ComputationUsed() (uint64, error) { + computationUsed := m.computationUsed + if computationUsed == nil { + panic("method not set") + } + return computationUsed() +} + +func (m *testMeter) RunWithMeteringDisabled(f func()) { + disabled := m.disabled + m.disabled = true + f() + m.disabled = disabled +} + +func (m *testMeter) MeterMemory(usage common.MemoryUsage) error { + if m.disabled { + return nil + } + meterMemory := m.meterMemory + if meterMemory == nil { + panic("method not set") + } + return meterMemory(usage) +} + +func (m *testMeter) MemoryUsed() (uint64, error) { + memoryUsed := m.memoryUsed + if memoryUsed == nil { + panic("method not set") + } + return memoryUsed() +} + +func (m *testMeter) InteractionUsed() (uint64, error) { + interactionUsed := m.interactionUsed + if interactionUsed == nil { + panic("method not set") + } + return interactionUsed() +} + +func (m *testMeter) MeterEmittedEvent(byteSize uint64) error { + if m.disabled { + return nil + } + meterEmittedEvent := m.meterEmittedEvent + if meterEmittedEvent == nil { + panic("method not set") + } + return meterEmittedEvent(byteSize) +} + +func (m *testMeter) TotalEmittedEventBytes() uint64 { + totalEmittedEventBytes := m.totalEmittedEventBytes + if totalEmittedEventBytes == nil { + panic("method not set") + } + return totalEmittedEventBytes() +} + +type testEventEmitter struct { + emitEvent func(event cadence.Event) error + events func() flow.EventsList + serviceEvents func() flow.EventsList + convertedServiceEvents func() flow.ServiceEventList + reset func() +} + +var _ environment.EventEmitter = &testEventEmitter{} + +func (vs *testEventEmitter) EmitEvent(event cadence.Event) error { + emitEvent := vs.emitEvent + if emitEvent == nil { + panic("method not set") + } + return emitEvent(event) +} + +func (vs *testEventEmitter) Events() flow.EventsList { + events := vs.events + if events == nil { + panic("method not set") + } + return events() +} + +func (vs *testEventEmitter) ServiceEvents() flow.EventsList { + serviceEvents := vs.serviceEvents + if serviceEvents == nil { + panic("method not set") + } + return serviceEvents() +} + +func (vs *testEventEmitter) ConvertedServiceEvents() flow.ServiceEventList { + convertedServiceEvents := vs.convertedServiceEvents + if convertedServiceEvents == nil { + panic("method not set") + } + return convertedServiceEvents() +} + +func (vs *testEventEmitter) Reset() { + reset := vs.reset + if reset == nil { + panic("method not set") + } + reset() +} + +type TestBlockInfo struct { + GetCurrentBlockHeightFunc func() (uint64, error) + GetBlockAtHeightFunc func(height uint64) (runtime.Block, bool, error) +} + +var _ environment.BlockInfo = &TestBlockInfo{} + +// GetCurrentBlockHeight returns the current block height. +func (tb *TestBlockInfo) GetCurrentBlockHeight() (uint64, error) { + getCurrentBlockHeightFunc := tb.GetCurrentBlockHeightFunc + if getCurrentBlockHeightFunc == nil { + panic("GetCurrentBlockHeight method is not set") + } + return getCurrentBlockHeightFunc() +} + +// GetBlockAtHeight returns the block at the given height. +func (tb *TestBlockInfo) GetBlockAtHeight(height uint64) (runtime.Block, bool, error) { + getBlockAtHeightFunc := tb.GetBlockAtHeightFunc + if getBlockAtHeightFunc == nil { + panic("GetBlockAtHeight method is not set") + } + return getBlockAtHeightFunc(height) +} + +type TestRandomGenerator struct { + ReadRandomFunc func([]byte) error +} + +var _ environment.RandomGenerator = &TestRandomGenerator{} + +func (t *TestRandomGenerator) ReadRandom(buffer []byte) error { + readRandomFunc := t.ReadRandomFunc + if readRandomFunc == nil { + panic("ReadRandomFunc method is not set") + } + return readRandomFunc(buffer) +} + +func getSimpleRandomGenerator() *TestRandomGenerator { + return &TestRandomGenerator{ + ReadRandomFunc: func(buffer []byte) error { + _, err := rand.Read(buffer) + return err + }, + } +} + +type TestContractFunctionInvoker struct { + InvokeFunc func( + spec environment.ContractFunctionSpec, + arguments []cadence.Value, + ) ( + cadence.Value, + error, + ) +} + +var _ environment.ContractFunctionInvoker = &TestContractFunctionInvoker{} + +func (t *TestContractFunctionInvoker) Invoke( + spec environment.ContractFunctionSpec, + arguments []cadence.Value, +) ( + cadence.Value, + error, +) { + invokeFunc := t.InvokeFunc + if invokeFunc == nil { + panic("InvokeFunc method is not set") + } + return invokeFunc(spec, arguments) +} + +type testUUIDGenerator struct { + generateUUID func() (uint64, error) +} + +var _ environment.UUIDGenerator = &testUUIDGenerator{} + +func (t *testUUIDGenerator) GenerateUUID() (uint64, error) { + generateUUID := t.generateUUID + if generateUUID == nil { + panic("generateUUID method is not set") + } + return generateUUID() +} + +type TestTracer struct { + StartChildSpanFunc func(trace.SpanName, ...otelTrace.SpanStartOption) tracing.TracerSpan +} + +var _ environment.Tracer = &TestTracer{} + +func (tt *TestTracer) StartChildSpan( + name trace.SpanName, + options ...otelTrace.SpanStartOption, +) tracing.TracerSpan { + // if not set we use noop tracer + startChildSpanFunc := tt.StartChildSpanFunc + if startChildSpanFunc == nil { + return tracing.NewMockTracerSpan() + } + return startChildSpanFunc(name, options...) +} + +func (tt *TestTracer) ExpectedSpan(t *testing.T, expected trace.SpanName) { + tt.StartChildSpanFunc = func( + sn trace.SpanName, + sso ...otelTrace.SpanStartOption, + ) tracing.TracerSpan { + require.Equal(t, expected, sn) + return tracing.NewMockTracerSpan() + } +} + +type TestMetricsReporter struct { + SetNumberOfDeployedCOAsFunc func(uint64) + EVMTransactionExecutedFunc func(uint64, bool, bool) + EVMBlockExecutedFunc func(int, uint64, float64) +} + +var _ environment.EVMMetricsReporter = &TestMetricsReporter{} + +func (tmr *TestMetricsReporter) SetNumberOfDeployedCOAs(count uint64) { + // call the method if available otherwise skip + setNumberOfDeployedCOAsFunc := tmr.SetNumberOfDeployedCOAsFunc + if setNumberOfDeployedCOAsFunc != nil { + setNumberOfDeployedCOAsFunc(count) + } +} + +func (tmr *TestMetricsReporter) EVMTransactionExecuted(gasUsed uint64, isDirectCall bool, failed bool) { + // call the method if available otherwise skip + evmTransactionExecutedFunc := tmr.EVMTransactionExecutedFunc + if evmTransactionExecutedFunc != nil { + evmTransactionExecutedFunc(gasUsed, isDirectCall, failed) + } +} + +func (tmr *TestMetricsReporter) EVMBlockExecuted(txCount int, totalGasUsed uint64, totalSupplyInFlow float64) { + // call the method if available otherwise skip + evmBlockExecutedFunc := tmr.EVMBlockExecutedFunc + if evmBlockExecutedFunc != nil { + evmBlockExecutedFunc(txCount, totalGasUsed, totalSupplyInFlow) + } +} + +type TestLoggerProvider struct { + LoggerFunc func() zerolog.Logger +} + +func (tlp *TestLoggerProvider) Logger() zerolog.Logger { + // call the method if not available return noop logger + loggerFunc := tlp.LoggerFunc + if loggerFunc != nil { + return loggerFunc() + } + return zerolog.Nop() +} diff --git a/fvm/evm/testutils/cadence.go b/fvm/evm/testutils/cadence.go new file mode 100644 index 00000000000..12f4889a428 --- /dev/null +++ b/fvm/evm/testutils/cadence.go @@ -0,0 +1,29 @@ +package testutils + +import ( + "fmt" + "testing" + + "github.com/onflow/cadence" + "github.com/onflow/cadence/encoding/json" + "github.com/stretchr/testify/require" +) + +func EncodeArgs(argValues []cadence.Value) [][]byte { + args := make([][]byte, len(argValues)) + for i, arg := range argValues { + var err error + args[i], err = json.Encode(arg) + if err != nil { + panic(fmt.Errorf("broken test: invalid argument: %w", err)) + } + } + return args +} + +func CheckCadenceEventTypes(t testing.TB, events []cadence.Event, expectedTypes []string) { + require.Equal(t, len(events), len(expectedTypes)) + for i, ev := range events { + require.Equal(t, expectedTypes[i], ev.EventType.QualifiedIdentifier) + } +} diff --git a/fvm/evm/testutils/contract.go b/fvm/evm/testutils/contract.go new file mode 100644 index 00000000000..d9286e62688 --- /dev/null +++ b/fvm/evm/testutils/contract.go @@ -0,0 +1,121 @@ +package testutils + +import ( + "math" + "math/big" + "strings" + "testing" + + gethABI "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/onflow/atree" + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/fvm/evm/emulator" + "github.com/onflow/flow-go/fvm/evm/testutils/contracts" + "github.com/onflow/flow-go/fvm/evm/types" + "github.com/onflow/flow-go/model/flow" +) + +type TestContract struct { + ABI string + ByteCode []byte + DeployedAt types.Address +} + +func MakeCallData(t testing.TB, abiString string, name string, args ...interface{}) []byte { + abi, err := gethABI.JSON(strings.NewReader(abiString)) + require.NoError(t, err) + call, err := abi.Pack(name, args...) + require.NoError(t, err) + return call +} + +func (tc *TestContract) MakeCallData(t testing.TB, name string, args ...interface{}) []byte { + return MakeCallData(t, tc.ABI, name, args...) +} + +func (tc *TestContract) SetDeployedAt(deployedAt types.Address) { + tc.DeployedAt = deployedAt +} + +func GetStorageTestContract(tb testing.TB) *TestContract { + return &TestContract{ + ABI: contracts.TestContractABIJSON, + ByteCode: contracts.TestContractBytes, + } +} + +func GetDummyKittyTestContract(t testing.TB) *TestContract { + return &TestContract{ + ABI: contracts.DummyKittyContractABIJSON, + ByteCode: contracts.DummyKittyContractBytes, + } +} + +func GetProxyContract(t testing.TB) *TestContract { + return &TestContract{ + ABI: contracts.ProxyContractABIJSON, + ByteCode: contracts.ProxyContractBytes, + } +} + +func GetFactoryTestContract(t testing.TB) *TestContract { + return &TestContract{ + ABI: contracts.FactoryContractABIJSON, + ByteCode: contracts.FactoryContractBytes, + } +} + +func GetFileSystemContract(t testing.TB) *TestContract { + return &TestContract{ + ABI: contracts.FileSystemContractABIJSON, + ByteCode: contracts.FileSystemContractBytes, + } +} + +func RunWithDeployedContract(t testing.TB, tc *TestContract, led atree.Ledger, flowEVMRootAddress flow.Address, f func(*TestContract)) { + DeployContract(t, RandomAddress(t), tc, led, flowEVMRootAddress) + f(tc) +} + +func DeployContract(t testing.TB, caller types.Address, tc *TestContract, led atree.Ledger, flowEVMRootAddress flow.Address) { + // deploy contract + e := emulator.NewEmulator(led, flowEVMRootAddress) + + ctx := types.NewDefaultBlockContext(2) + + bl, err := e.NewReadOnlyBlockView(ctx) + require.NoError(t, err) + + nonce, err := bl.NonceOf(caller) + require.NoError(t, err) + + blk, err := e.NewBlockView(ctx) + require.NoError(t, err) + + _, err = blk.DirectCall( + types.NewDepositCall( + RandomAddress(t), // any random non-empty address works here + caller, + new(big.Int).Mul(big.NewInt(1e18), big.NewInt(1000)), + nonce, + ), + ) + require.NoError(t, err) + + blk2, err := e.NewBlockView(types.NewDefaultBlockContext(3)) + require.NoError(t, err) + + res, err := blk2.DirectCall( + types.NewDeployCall( + caller, + tc.ByteCode, + math.MaxUint64, + big.NewInt(0), + nonce+1, + ), + ) + require.NoError(t, err) + require.NotNil(t, res.DeployedContractAddress) + tc.SetDeployedAt(*res.DeployedContractAddress) +} diff --git a/fvm/evm/testutils/contracts/contracts.go b/fvm/evm/testutils/contracts/contracts.go new file mode 100644 index 00000000000..dcdaf96072c --- /dev/null +++ b/fvm/evm/testutils/contracts/contracts.go @@ -0,0 +1,49 @@ +package contracts + +import ( + _ "embed" + "encoding/hex" +) + +//go:embed test_bytes.hex +var testContractBytesInHex string + +var TestContractBytes, _ = hex.DecodeString(testContractBytesInHex) + +//go:embed test_abi.json +var TestContractABIJSON string + +//go:embed dummy_kitty_bytes.hex +var dummyKittyContractBytesInHex string + +var DummyKittyContractBytes, _ = hex.DecodeString(dummyKittyContractBytesInHex) + +//go:embed dummy_kitty_abi.json +var DummyKittyContractABIJSON string + +//go:embed proxy_bytes.hex +var proxyContractBytesInHex string + +var ProxyContractBytes, _ = hex.DecodeString(proxyContractBytesInHex) + +//go:embed proxy_abi.json +var ProxyContractABIJSON string + +//go:embed factory_bytes.hex +var factoryContractBytesInHex string + +var FactoryContractBytes, _ = hex.DecodeString(factoryContractBytesInHex) + +//go:embed factory_abi.json +var FactoryContractABIJSON string + +//go:embed factory_deployable_abi.json +var FactoryDeployableContractABIJSON string + +//go:embed file_system_bytes.hex +var fileSystemContractBytesInHex string + +var FileSystemContractBytes, _ = hex.DecodeString(fileSystemContractBytesInHex) + +//go:embed file_system_abi.json +var FileSystemContractABIJSON string diff --git a/fvm/evm/testutils/contracts/dummy_kitty.sol b/fvm/evm/testutils/contracts/dummy_kitty.sol new file mode 100644 index 00000000000..e93570da7f1 --- /dev/null +++ b/fvm/evm/testutils/contracts/dummy_kitty.sol @@ -0,0 +1,79 @@ +// SPDX-License-Identifier: GPL-3.0 + +contract DummyKitty { + event BirthEvent(address owner, uint256 kittyId, uint256 matronId, uint256 sireId, uint256 genes); + event TransferEvent(address from, address to, uint256 tokenId); + + struct Kitty { + uint256 genes; + uint64 birthTime; + uint32 matronId; + uint32 sireId; + uint16 generation; + } + + uint256 idCounter; + + // @dev all kitties + Kitty[] kitties; + + /// @dev a mapping from cat IDs to the address that owns them. + mapping (uint256 => address) public kittyIndexToOwner; + + // @dev a mapping from owner address to count of tokens that address owns. + mapping (address => uint256) ownershipTokenCount; + + /// @dev a method to transfer kitty + function Transfer(address _from, address _to, uint256 _tokenId) external { + // Since the number of kittens is capped to 2^32 we can't overflow this + ownershipTokenCount[_to]++; + // transfer ownership + kittyIndexToOwner[_tokenId] = _to; + // When creating new kittens _from is 0x0, but we can't account that address. + if (_from != address(0)) { + ownershipTokenCount[_from]--; + } + // Emit the transfer event. + emit TransferEvent(_from, _to, _tokenId); + } + + /// @dev a method callable by anyone to create a kitty + function CreateKitty( + uint256 _matronId, + uint256 _sireId, + uint256 _generation, + uint256 _genes + ) + external + returns (uint) + { + + require(_matronId == uint256(uint32(_matronId))); + require(_sireId == uint256(uint32(_sireId))); + require(_generation == uint256(uint16(_generation))); + + Kitty memory _kitty = Kitty({ + genes: _genes, + birthTime: uint64(block.timestamp), + matronId: uint32(_matronId), + sireId: uint32(_sireId), + generation: uint16(_generation) + }); + + kitties.push(_kitty); + + emit BirthEvent( + msg.sender, + idCounter, + uint256(_kitty.matronId), + uint256(_kitty.sireId), + _kitty.genes + ); + + this.Transfer(address(0), msg.sender, idCounter); + + idCounter++; + + return idCounter; + } +} \ No newline at end of file diff --git a/fvm/evm/testutils/contracts/dummy_kitty_abi.json b/fvm/evm/testutils/contracts/dummy_kitty_abi.json new file mode 100644 index 00000000000..44294463ca1 --- /dev/null +++ b/fvm/evm/testutils/contracts/dummy_kitty_abi.json @@ -0,0 +1,140 @@ +[ + { + "anonymous": false, + "inputs": [ + { + "indexed": false, + "internalType": "address", + "name": "owner", + "type": "address" + }, + { + "indexed": false, + "internalType": "uint256", + "name": "kittyId", + "type": "uint256" + }, + { + "indexed": false, + "internalType": "uint256", + "name": "matronId", + "type": "uint256" + }, + { + "indexed": false, + "internalType": "uint256", + "name": "sireId", + "type": "uint256" + }, + { + "indexed": false, + "internalType": "uint256", + "name": "genes", + "type": "uint256" + } + ], + "name": "BirthEvent", + "type": "event" + }, + { + "anonymous": false, + "inputs": [ + { + "indexed": false, + "internalType": "address", + "name": "from", + "type": "address" + }, + { + "indexed": false, + "internalType": "address", + "name": "to", + "type": "address" + }, + { + "indexed": false, + "internalType": "uint256", + "name": "tokenId", + "type": "uint256" + } + ], + "name": "TransferEvent", + "type": "event" + }, + { + "inputs": [ + { + "internalType": "uint256", + "name": "_matronId", + "type": "uint256" + }, + { + "internalType": "uint256", + "name": "_sireId", + "type": "uint256" + }, + { + "internalType": "uint256", + "name": "_generation", + "type": "uint256" + }, + { + "internalType": "uint256", + "name": "_genes", + "type": "uint256" + } + ], + "name": "CreateKitty", + "outputs": [ + { + "internalType": "uint256", + "name": "", + "type": "uint256" + } + ], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "address", + "name": "_from", + "type": "address" + }, + { + "internalType": "address", + "name": "_to", + "type": "address" + }, + { + "internalType": "uint256", + "name": "_tokenId", + "type": "uint256" + } + ], + "name": "Transfer", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "uint256", + "name": "", + "type": "uint256" + } + ], + "name": "kittyIndexToOwner", + "outputs": [ + { + "internalType": "address", + "name": "", + "type": "address" + } + ], + "stateMutability": "view", + "type": "function" + } +] \ No newline at end of file diff --git a/fvm/evm/testutils/contracts/dummy_kitty_bytes.hex b/fvm/evm/testutils/contracts/dummy_kitty_bytes.hex new file mode 100644 index 00000000000..81d8760e5db --- /dev/null +++ b/fvm/evm/testutils/contracts/dummy_kitty_bytes.hex @@ -0,0 +1 @@ +608060405234801561001057600080fd5b506107dd806100206000396000f3fe608060405234801561001057600080fd5b50600436106100415760003560e01c8063a45f4bfc14610046578063d0b169d114610076578063ddf252ad146100a6575b600080fd5b610060600480360381019061005b91906104e4565b6100c2565b60405161006d9190610552565b60405180910390f35b610090600480360381019061008b919061056d565b6100f5565b60405161009d91906105e3565b60405180910390f35b6100c060048036038101906100bb919061062a565b610338565b005b60026020528060005260406000206000915054906101000a900473ffffffffffffffffffffffffffffffffffffffff1681565b60008463ffffffff16851461010957600080fd5b8363ffffffff16841461011b57600080fd5b8261ffff16831461012b57600080fd5b60006040518060a001604052808481526020014267ffffffffffffffff1681526020018763ffffffff1681526020018663ffffffff1681526020018561ffff16815250905060018190806001815401808255809150506001900390600052602060002090600202016000909190919091506000820151816000015560208201518160010160006101000a81548167ffffffffffffffff021916908367ffffffffffffffff16021790555060408201518160010160086101000a81548163ffffffff021916908363ffffffff160217905550606082015181600101600c6101000a81548163ffffffff021916908363ffffffff16021790555060808201518160010160106101000a81548161ffff021916908361ffff16021790555050507fc1e409485f45287e73ab1623a8f2ef17af5eac1b4c792ee9ec466e8795e7c09133600054836040015163ffffffff16846060015163ffffffff16856000015160405161029995949392919061067d565b60405180910390a13073ffffffffffffffffffffffffffffffffffffffff1663ddf252ad6000336000546040518463ffffffff1660e01b81526004016102e1939291906106d0565b600060405180830381600087803b1580156102fb57600080fd5b505af115801561030f573d6000803e3d6000fd5b5050505060008081548092919061032590610736565b9190505550600054915050949350505050565b600360008373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff168152602001908152602001600020600081548092919061038890610736565b9190505550816002600083815260200190815260200160002060006101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff160217905550600073ffffffffffffffffffffffffffffffffffffffff168373ffffffffffffffffffffffffffffffffffffffff161461046957600360008473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002060008154809291906104639061077e565b91905055505b7feaf1c4b3ce0f4f62a2bae7eb3e68225c75f7e6ff4422073b7437b9a78d25f17083838360405161049c939291906106d0565b60405180910390a1505050565b600080fd5b6000819050919050565b6104c1816104ae565b81146104cc57600080fd5b50565b6000813590506104de816104b8565b92915050565b6000602082840312156104fa576104f96104a9565b5b6000610508848285016104cf565b91505092915050565b600073ffffffffffffffffffffffffffffffffffffffff82169050919050565b600061053c82610511565b9050919050565b61054c81610531565b82525050565b60006020820190506105676000830184610543565b92915050565b60008060008060808587031215610587576105866104a9565b5b6000610595878288016104cf565b94505060206105a6878288016104cf565b93505060406105b7878288016104cf565b92505060606105c8878288016104cf565b91505092959194509250565b6105dd816104ae565b82525050565b60006020820190506105f860008301846105d4565b92915050565b61060781610531565b811461061257600080fd5b50565b600081359050610624816105fe565b92915050565b600080600060608486031215610643576106426104a9565b5b600061065186828701610615565b935050602061066286828701610615565b9250506040610673868287016104cf565b9150509250925092565b600060a0820190506106926000830188610543565b61069f60208301876105d4565b6106ac60408301866105d4565b6106b960608301856105d4565b6106c660808301846105d4565b9695505050505050565b60006060820190506106e56000830186610543565b6106f26020830185610543565b6106ff60408301846105d4565b949350505050565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052601160045260246000fd5b6000610741826104ae565b91507fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff820361077357610772610707565b5b600182019050919050565b6000610789826104ae565b91506000820361079c5761079b610707565b5b60018203905091905056fea2646970667358221220ab35c07ec72cc064a663de06ec7f5f919b1a499a25cf6ef0c63a45fdd4a1e91e64736f6c63430008120033 \ No newline at end of file diff --git a/fvm/evm/testutils/contracts/factory.sol b/fvm/evm/testutils/contracts/factory.sol new file mode 100644 index 00000000000..39ffea4e7be --- /dev/null +++ b/fvm/evm/testutils/contracts/factory.sol @@ -0,0 +1,63 @@ +// SPDX-License-Identifier: UNLICENSED + +pragma solidity >=0.7.0 <0.9.0; + +contract Factory { + constructor() payable {} + + function deploy(bytes32 salt) public returns (address) { + new Deployable{salt: salt}(); + return _getCreate2Address(salt, keccak256(abi.encodePacked(type(Deployable).creationCode))); + } + + function deployAndDestroy(bytes32 salt, uint256 value) public { + Deployable dep = new Deployable{salt: salt}(); + dep.set(value); + dep.destroy(address(this)); + } + + function depositAndDeploy(bytes32 salt, uint256 amount, uint256 stored) public returns (address) { + address addr = _getCreate2Address(salt, keccak256(abi.encodePacked(type(Deployable).creationCode))); + bool success; + assembly { + success := call(gas(), addr, amount, 0, 0, 0, 0) + } + require(success); + Deployable dep = new Deployable{salt: salt}(); + dep.set(stored); + return _getCreate2Address(salt, keccak256(abi.encodePacked(type(Deployable).creationCode))); + } + + function depositDeployAndDestroy(bytes32 salt, uint256 amount, uint256 stored) public { + address addr = _getCreate2Address(salt, keccak256(abi.encodePacked(type(Deployable).creationCode))); + bool success; + assembly { + success := call(gas(), addr, amount, 0, 0, 0, 0) + } + require(success); + Deployable dep = new Deployable{salt: salt}(); + dep.set(stored); + dep.destroy(address(this)); + } + + function _getCreate2Address(bytes32 salt, bytes32 codeHash) internal view returns (address) { + return address(uint160(uint256(keccak256(abi.encodePacked(bytes1(0xff), address(this), salt, codeHash))))); + } +} + +contract Deployable { + uint256 number; + constructor() payable {} + function set(uint256 num) public { + number = num; + } + function get() public view returns (uint256){ + return number; + } + function destroy(address etherDestination) external { + selfdestruct(payable(etherDestination)); + } + + receive() external payable { + } +} \ No newline at end of file diff --git a/fvm/evm/testutils/contracts/factory_abi.json b/fvm/evm/testutils/contracts/factory_abi.json new file mode 100644 index 00000000000..924407ab38a --- /dev/null +++ b/fvm/evm/testutils/contracts/factory_abi.json @@ -0,0 +1,96 @@ +[ + { + "inputs": [], + "stateMutability": "payable", + "type": "constructor" + }, + { + "inputs": [ + { + "internalType": "bytes32", + "name": "salt", + "type": "bytes32" + } + ], + "name": "deploy", + "outputs": [ + { + "internalType": "address", + "name": "", + "type": "address" + } + ], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "bytes32", + "name": "salt", + "type": "bytes32" + }, + { + "internalType": "uint256", + "name": "value", + "type": "uint256" + } + ], + "name": "deployAndDestroy", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "bytes32", + "name": "salt", + "type": "bytes32" + }, + { + "internalType": "uint256", + "name": "amount", + "type": "uint256" + }, + { + "internalType": "uint256", + "name": "stored", + "type": "uint256" + } + ], + "name": "depositAndDeploy", + "outputs": [ + { + "internalType": "address", + "name": "", + "type": "address" + } + ], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "bytes32", + "name": "salt", + "type": "bytes32" + }, + { + "internalType": "uint256", + "name": "amount", + "type": "uint256" + }, + { + "internalType": "uint256", + "name": "stored", + "type": "uint256" + } + ], + "name": "depositDeployAndDestroy", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + } +] \ No newline at end of file diff --git a/fvm/evm/testutils/contracts/factory_bytes.hex b/fvm/evm/testutils/contracts/factory_bytes.hex new file mode 100644 index 00000000000..ebe208d2232 --- /dev/null +++ b/fvm/evm/testutils/contracts/factory_bytes.hex @@ -0,0 +1 @@ +6080604052610af6806100115f395ff3fe608060405234801561000f575f80fd5b506004361061004a575f3560e01c80632b85ba381461004e578063624ff5c41461007e5780639e3bb99e1461009a578063cf4fe092146100b6575b5f80fd5b610068600480360381019061006391906105b4565b6100e6565b604051610075919061061e565b60405180910390f35b6100986004803603810190610093919061066a565b61016d565b005b6100b460048036038101906100af91906106ba565b6102d8565b005b6100d060048036038101906100cb919061066a565b6103d6565b6040516100dd919061061e565b60405180910390f35b5f816040516100f490610570565b8190604051809103905ff5905080158015610111573d5f803e3d5ffd5b5050610166826040518060200161012790610570565b6020820181038252601f19601f8201166040525060405160200161014b919061074a565b60405160208183030381529060405280519060200120610534565b9050919050565b5f6101c1846040518060200161018290610570565b6020820181038252601f19601f820116604052506040516020016101a6919061074a565b60405160208183030381529060405280519060200120610534565b90505f805f805f87865af19050806101d7575f80fd5b5f856040516101e590610570565b8190604051809103905ff5905080158015610202573d5f803e3d5ffd5b5090508073ffffffffffffffffffffffffffffffffffffffff166360fe47b1856040518263ffffffff1660e01b815260040161023e919061076f565b5f604051808303815f87803b158015610255575f80fd5b505af1158015610267573d5f803e3d5ffd5b505050508073ffffffffffffffffffffffffffffffffffffffff1662f55d9d306040518263ffffffff1660e01b81526004016102a3919061061e565b5f604051808303815f87803b1580156102ba575f80fd5b505af11580156102cc573d5f803e3d5ffd5b50505050505050505050565b5f826040516102e690610570565b8190604051809103905ff5905080158015610303573d5f803e3d5ffd5b5090508073ffffffffffffffffffffffffffffffffffffffff166360fe47b1836040518263ffffffff1660e01b815260040161033f919061076f565b5f604051808303815f87803b158015610356575f80fd5b505af1158015610368573d5f803e3d5ffd5b505050508073ffffffffffffffffffffffffffffffffffffffff1662f55d9d306040518263ffffffff1660e01b81526004016103a4919061061e565b5f604051808303815f87803b1580156103bb575f80fd5b505af11580156103cd573d5f803e3d5ffd5b50505050505050565b5f8061042b85604051806020016103ec90610570565b6020820181038252601f19601f82011660405250604051602001610410919061074a565b60405160208183030381529060405280519060200120610534565b90505f805f805f88865af1905080610441575f80fd5b5f8660405161044f90610570565b8190604051809103905ff590508015801561046c573d5f803e3d5ffd5b5090508073ffffffffffffffffffffffffffffffffffffffff166360fe47b1866040518263ffffffff1660e01b81526004016104a8919061076f565b5f604051808303815f87803b1580156104bf575f80fd5b505af11580156104d1573d5f803e3d5ffd5b5050505061052887604051806020016104e990610570565b6020820181038252601f19601f8201166040525060405160200161050d919061074a565b60405160208183030381529060405280519060200120610534565b93505050509392505050565b5f60ff60f81b3084846040516020016105509493929190610838565b604051602081830303815290604052805190602001205f1c905092915050565b61023b8061088683390190565b5f80fd5b5f819050919050565b61059381610581565b811461059d575f80fd5b50565b5f813590506105ae8161058a565b92915050565b5f602082840312156105c9576105c861057d565b5b5f6105d6848285016105a0565b91505092915050565b5f73ffffffffffffffffffffffffffffffffffffffff82169050919050565b5f610608826105df565b9050919050565b610618816105fe565b82525050565b5f6020820190506106315f83018461060f565b92915050565b5f819050919050565b61064981610637565b8114610653575f80fd5b50565b5f8135905061066481610640565b92915050565b5f805f606084860312156106815761068061057d565b5b5f61068e868287016105a0565b935050602061069f86828701610656565b92505060406106b086828701610656565b9150509250925092565b5f80604083850312156106d0576106cf61057d565b5b5f6106dd858286016105a0565b92505060206106ee85828601610656565b9150509250929050565b5f81519050919050565b5f81905092915050565b8281835e5f83830152505050565b5f610724826106f8565b61072e8185610702565b935061073e81856020860161070c565b80840191505092915050565b5f610755828461071a565b915081905092915050565b61076981610637565b82525050565b5f6020820190506107825f830184610760565b92915050565b5f7fff0000000000000000000000000000000000000000000000000000000000000082169050919050565b5f819050919050565b6107cd6107c882610788565b6107b3565b82525050565b5f8160601b9050919050565b5f6107e9826107d3565b9050919050565b5f6107fa826107df565b9050919050565b61081261080d826105fe565b6107f0565b82525050565b5f819050919050565b61083261082d82610581565b610818565b82525050565b5f61084382876107bc565b6001820191506108538286610801565b6014820191506108638285610821565b6020820191506108738284610821565b6020820191508190509594505050505056fe608060405261022a806100115f395ff3fe608060405260043610610036575f3560e01c8062f55d9d1461004157806360fe47b1146100695780636d4ce63c146100915761003d565b3661003d57005b5f80fd5b34801561004c575f80fd5b5061006760048036038101906100629190610143565b6100bb565b005b348015610074575f80fd5b5061008f600480360381019061008a91906101a1565b6100d4565b005b34801561009c575f80fd5b506100a56100dd565b6040516100b291906101db565b60405180910390f35b8073ffffffffffffffffffffffffffffffffffffffff16ff5b805f8190555050565b5f8054905090565b5f80fd5b5f73ffffffffffffffffffffffffffffffffffffffff82169050919050565b5f610112826100e9565b9050919050565b61012281610108565b811461012c575f80fd5b50565b5f8135905061013d81610119565b92915050565b5f60208284031215610158576101576100e5565b5b5f6101658482850161012f565b91505092915050565b5f819050919050565b6101808161016e565b811461018a575f80fd5b50565b5f8135905061019b81610177565b92915050565b5f602082840312156101b6576101b56100e5565b5b5f6101c38482850161018d565b91505092915050565b6101d58161016e565b82525050565b5f6020820190506101ee5f8301846101cc565b9291505056fea2646970667358221220cbb07f58eed2fb91caf5b496fe4f528d45cf25085898c4f2735c5dcf5bd2fc9f64736f6c634300081a0033a2646970667358221220d1e0a588152bd25c8a5276df79dde8e504c854e590f1dbb00ec9808aee242b4164736f6c634300081a0033 \ No newline at end of file diff --git a/fvm/evm/testutils/contracts/factory_deployable_abi.json b/fvm/evm/testutils/contracts/factory_deployable_abi.json new file mode 100644 index 00000000000..f33f2acc2fa --- /dev/null +++ b/fvm/evm/testutils/contracts/factory_deployable_abi.json @@ -0,0 +1,50 @@ +[ + { + "inputs": [], + "stateMutability": "payable", + "type": "constructor" + }, + { + "inputs": [ + { + "internalType": "address", + "name": "etherDestination", + "type": "address" + } + ], + "name": "destroy", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [], + "name": "get", + "outputs": [ + { + "internalType": "uint256", + "name": "", + "type": "uint256" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "uint256", + "name": "num", + "type": "uint256" + } + ], + "name": "set", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "stateMutability": "payable", + "type": "receive" + } +] \ No newline at end of file diff --git a/fvm/evm/testutils/contracts/file_system.sol b/fvm/evm/testutils/contracts/file_system.sol new file mode 100644 index 00000000000..7a30b13dac8 --- /dev/null +++ b/fvm/evm/testutils/contracts/file_system.sol @@ -0,0 +1,167 @@ +// SPDX-License-Identifier: AGPL-3.0 + +pragma solidity >=0.7.0 <0.9.0; + +/** + * @title FileSystem + * @dev File system representation. + */ +contract FileSystem { + + address public immutable deployer = 0xea02F564664A477286B93712829180be4764fAe2; + address public immutable twitter = 0x7525Fe558b4EafA9e6346846E4027ffAB32F80A2; + string public hijess = "ikirshu"; + string public _name = "File System"; + mapping( address => mapping( string => mapping( uint256 => string ) ) ) public chunks; + mapping( address => mapping( string => mapping( uint256 => bool) ) ) public lock; + mapping( address => mapping( string => uint256 ) ) public length; + constructor() {} + + /** + * @dev Returns the name of the contract. + */ + function name( + ) public view virtual returns (string memory) { + return _name; + } + + /** + * @dev Check owner. + * @param _namespace Address owning the hash. + */ + function checkOwner( + address _namespace) + public + view { + require( msg.sender == _namespace ); + } + + /** + * @dev Returns total chunks for a file. + * @param _namespace Address owning the hash. + * @param _hash Hash of the file the chunk belongs. + */ + function getLength( + address _namespace, + string memory _hash) public view virtual returns (uint256) { + return length[_namespace][_hash]; + } + + /** + * @dev Check chunk unlock state. + * @param _namespace Address owning the hash. + * @param _hash Hash of the file the chunk belongs. + * @param _index Which chunk are you checking. + */ + function checkUnlocked( + address _namespace, + string memory _hash, + uint256 _index) + public + view { + require( ! lock[_namespace][_hash][_index] ); + } + + /** + * @dev Check chunk lock state. + * @param _namespace Address owning the hash. + * @param _hash Hash of the file the chunk belongs. + * @param _index Which chunk are you checking. + */ + function checkLocked( + address _namespace, + string memory _hash, + uint256 _index) + public + view { + require( + lock[_namespace][_hash][_index] + ); + } + + /** + * @dev Publish chunk. + * @param _namespace Namespace for the file definition. + * @param _hash Hash of the file the chunk belongs. + * @param _index Which chunk are you setting. + * @param _chunk In which post the chunk is contained. + */ + function publishChunk( + address _namespace, + string memory _hash, + uint256 _index, + string memory _chunk) public { + checkOwner( + _namespace); + checkUnlocked( + _namespace, + _hash, + _index); + chunks[_namespace][_hash][_index] = _chunk; + if ( _index > length[msg.sender][_hash] ) { + length[_namespace][_hash] = _index; + } + } + + /** + * @dev Lock the chunk. + * @param _hash Hash of the file. + * @param _index Which chunk to lock. + */ + function lockChunk( + address _namespace, + string memory _hash, + uint256 _index) + public + { + checkOwner( + _namespace + ); + checkUnlocked( + _namespace, + _hash, + _index); + lock[_namespace][_hash][_index] = true; + } + + /** + * @dev Read a chunk. + * @param _namespace Where the filo resides. + * @param _hash Hash of the file. + * @param _index Which chunk. + */ + function readChunk( + address _namespace, + string memory _hash, + uint256 _index) + public + view + returns (string memory) + { + checkLocked( + _namespace, + _hash, + _index + ); + return chunks[_namespace][_hash][_index]; + } + + /** + * @dev Verify a chunk. + * @param _namespace Where the filo resides. + * @param _hash Hash of the file. + * @param _index Which chunk. + */ + function verifyChunk( + address _namespace, + string memory _hash, + uint256 _index) + public + view + returns (bytes32) + { + return sha256( + abi.encode( + chunks[_namespace][_hash][_index])); + } +} \ No newline at end of file diff --git a/fvm/evm/testutils/contracts/file_system_abi.json b/fvm/evm/testutils/contracts/file_system_abi.json new file mode 100644 index 00000000000..2f0490873ea --- /dev/null +++ b/fvm/evm/testutils/contracts/file_system_abi.json @@ -0,0 +1,346 @@ +[ + { + "inputs": [], + "stateMutability": "nonpayable", + "type": "constructor" + }, + { + "inputs": [], + "name": "_name", + "outputs": [ + { + "internalType": "string", + "name": "", + "type": "string" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "address", + "name": "_namespace", + "type": "address" + }, + { + "internalType": "string", + "name": "_hash", + "type": "string" + }, + { + "internalType": "uint256", + "name": "_index", + "type": "uint256" + } + ], + "name": "checkLocked", + "outputs": [], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "address", + "name": "_namespace", + "type": "address" + } + ], + "name": "checkOwner", + "outputs": [], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "address", + "name": "_namespace", + "type": "address" + }, + { + "internalType": "string", + "name": "_hash", + "type": "string" + }, + { + "internalType": "uint256", + "name": "_index", + "type": "uint256" + } + ], + "name": "checkUnlocked", + "outputs": [], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "address", + "name": "", + "type": "address" + }, + { + "internalType": "string", + "name": "", + "type": "string" + }, + { + "internalType": "uint256", + "name": "", + "type": "uint256" + } + ], + "name": "chunks", + "outputs": [ + { + "internalType": "string", + "name": "", + "type": "string" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [], + "name": "deployer", + "outputs": [ + { + "internalType": "address", + "name": "", + "type": "address" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "address", + "name": "_namespace", + "type": "address" + }, + { + "internalType": "string", + "name": "_hash", + "type": "string" + } + ], + "name": "getLength", + "outputs": [ + { + "internalType": "uint256", + "name": "", + "type": "uint256" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [], + "name": "hijess", + "outputs": [ + { + "internalType": "string", + "name": "", + "type": "string" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "address", + "name": "", + "type": "address" + }, + { + "internalType": "string", + "name": "", + "type": "string" + } + ], + "name": "length", + "outputs": [ + { + "internalType": "uint256", + "name": "", + "type": "uint256" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "address", + "name": "", + "type": "address" + }, + { + "internalType": "string", + "name": "", + "type": "string" + }, + { + "internalType": "uint256", + "name": "", + "type": "uint256" + } + ], + "name": "lock", + "outputs": [ + { + "internalType": "bool", + "name": "", + "type": "bool" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "address", + "name": "_namespace", + "type": "address" + }, + { + "internalType": "string", + "name": "_hash", + "type": "string" + }, + { + "internalType": "uint256", + "name": "_index", + "type": "uint256" + } + ], + "name": "lockChunk", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [], + "name": "name", + "outputs": [ + { + "internalType": "string", + "name": "", + "type": "string" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "address", + "name": "_namespace", + "type": "address" + }, + { + "internalType": "string", + "name": "_hash", + "type": "string" + }, + { + "internalType": "uint256", + "name": "_index", + "type": "uint256" + }, + { + "internalType": "string", + "name": "_chunk", + "type": "string" + } + ], + "name": "publishChunk", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "address", + "name": "_namespace", + "type": "address" + }, + { + "internalType": "string", + "name": "_hash", + "type": "string" + }, + { + "internalType": "uint256", + "name": "_index", + "type": "uint256" + } + ], + "name": "readChunk", + "outputs": [ + { + "internalType": "string", + "name": "", + "type": "string" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [], + "name": "twitter", + "outputs": [ + { + "internalType": "address", + "name": "", + "type": "address" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "address", + "name": "_namespace", + "type": "address" + }, + { + "internalType": "string", + "name": "_hash", + "type": "string" + }, + { + "internalType": "uint256", + "name": "_index", + "type": "uint256" + } + ], + "name": "verifyChunk", + "outputs": [ + { + "internalType": "bytes32", + "name": "", + "type": "bytes32" + } + ], + "stateMutability": "view", + "type": "function" + } +] \ No newline at end of file diff --git a/fvm/evm/testutils/contracts/file_system_bytes.hex b/fvm/evm/testutils/contracts/file_system_bytes.hex new file mode 100644 index 00000000000..eea85c2a429 --- /dev/null +++ b/fvm/evm/testutils/contracts/file_system_bytes.hex @@ -0,0 +1 @@ +0x60c060405273ea02f564664a477286b93712829180be4764fae273ffffffffffffffffffffffffffffffffffffffff1660809073ffffffffffffffffffffffffffffffffffffffff16815250737525fe558b4eafa9e6346846e4027ffab32f80a273ffffffffffffffffffffffffffffffffffffffff1660a09073ffffffffffffffffffffffffffffffffffffffff168152506040518060400160405280600781526020017f696b6972736875000000000000000000000000000000000000000000000000008152505f90816100d59190610369565b506040518060400160405280600b81526020017f46696c652053797374656d0000000000000000000000000000000000000000008152506001908161011a9190610369565b50348015610126575f5ffd5b50610438565b5f81519050919050565b7f4e487b71000000000000000000000000000000000000000000000000000000005f52604160045260245ffd5b7f4e487b71000000000000000000000000000000000000000000000000000000005f52602260045260245ffd5b5f60028204905060018216806101a757607f821691505b6020821081036101ba576101b9610163565b5b50919050565b5f819050815f5260205f209050919050565b5f6020601f8301049050919050565b5f82821b905092915050565b5f6008830261021c7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff826101e1565b61022686836101e1565b95508019841693508086168417925050509392505050565b5f819050919050565b5f819050919050565b5f61026a6102656102608461023e565b610247565b61023e565b9050919050565b5f819050919050565b61028383610250565b61029761028f82610271565b8484546101ed565b825550505050565b5f5f905090565b6102ae61029f565b6102b981848461027a565b505050565b5b818110156102dc576102d15f826102a6565b6001810190506102bf565b5050565b601f821115610321576102f2816101c0565b6102fb846101d2565b8101602085101561030a578190505b61031e610316856101d2565b8301826102be565b50505b505050565b5f82821c905092915050565b5f6103415f1984600802610326565b1980831691505092915050565b5f6103598383610332565b9150826002028217905092915050565b6103728261012c565b67ffffffffffffffff81111561038b5761038a610136565b5b6103958254610190565b6103a08282856102e0565b5f60209050601f8311600181146103d1575f84156103bf578287015190505b6103c9858261034e565b865550610430565b601f1984166103df866101c0565b5f5b82811015610406578489015182556001820191506020850194506020810190506103e1565b86831015610423578489015161041f601f891682610332565b8355505b6001600288020188555050505b505050505050565b60805160a05161152a6104595f395f61098e01525f610af8015261152a5ff3fe608060405234801561000f575f5ffd5b50600436106100fd575f3560e01c8063891a8b9111610095578063d28d885211610064578063d28d8852146102cf578063d5f39488146102ed578063e0e3671c1461030b578063ef1f3b5214610327576100fd565b8063891a8b9114610235578063abfaeee014610265578063b697114914610283578063d1e0f2d3146102b3576100fd565b806325509cfa116100d157806325509cfa1461019d5780632ebbadbd146101b957806338e611b3146101e957806360a8936d14610219576100fd565b806226675a1461010157806306fdde03146101315780631110e5911461014f5780631adbeefb1461017f575b5f5ffd5b61011b60048036038101906101169190610dc7565b610343565b6040516101289190610e4b565b60405180910390f35b610139610420565b6040516101469190610ec4565b60405180910390f35b61016960048036038101906101649190610dc7565b6104b0565b6040516101769190610ec4565b60405180910390f35b6101876105b5565b6040516101949190610ec4565b60405180910390f35b6101b760048036038101906101b29190610dc7565b610640565b005b6101d360048036038101906101ce9190610ee4565b6106c3565b6040516101e09190610f4d565b60405180910390f35b61020360048036038101906101fe9190610dc7565b610726565b6040516102109190610f80565b60405180910390f35b610233600480360381019061022e9190610f99565b610773565b005b61024f600480360381019061024a9190610dc7565b6108c3565b60405161025c9190610ec4565b60405180910390f35b61026d61098c565b60405161027a9190611044565b60405180910390f35b61029d60048036038101906102989190610ee4565b6109b0565b6040516102aa9190610f4d565b60405180910390f35b6102cd60048036038101906102c89190610dc7565b6109e8565b005b6102d7610a6a565b6040516102e49190610ec4565b60405180910390f35b6102f5610af6565b6040516103029190611044565b60405180910390f35b6103256004803603810190610320919061105d565b610b1a565b005b610341600480360381019061033c9190610dc7565b610b54565b005b5f6002805f8673ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1681526020019081526020015f208460405161039091906110c2565b90815260200160405180910390205f8481526020019081526020015f206040516020016103bd91906111c8565b6040516020818303038152906040526040516103d9919061122c565b602060405180830381855afa1580156103f4573d5f5f3e3d5ffd5b5050506040513d601f19601f82011682018060405250810190610417919061126c565b90509392505050565b60606001805461042f90611105565b80601f016020809104026020016040519081016040528092919081815260200182805461045b90611105565b80156104a65780601f1061047d576101008083540402835291602001916104a6565b820191905f5260205f20905b81548152906001019060200180831161048957829003601f168201915b5050505050905090565b60606104bd8484846109e8565b60025f8573ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1681526020019081526020015f208360405161050891906110c2565b90815260200160405180910390205f8381526020019081526020015f20805461053090611105565b80601f016020809104026020016040519081016040528092919081815260200182805461055c90611105565b80156105a75780601f1061057e576101008083540402835291602001916105a7565b820191905f5260205f20905b81548152906001019060200180831161058a57829003601f168201915b505050505090509392505050565b5f80546105c190611105565b80601f01602080910402602001604051908101604052809291908181526020018280546105ed90611105565b80156106385780601f1061060f57610100808354040283529160200191610638565b820191905f5260205f20905b81548152906001019060200180831161061b57829003601f168201915b505050505081565b60035f8473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1681526020019081526020015f208260405161068b91906110c2565b90815260200160405180910390205f8281526020019081526020015f205f9054906101000a900460ff16156106be575f5ffd5b505050565b5f60045f8473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1681526020019081526020015f208260405161070f91906110c2565b908152602001604051809103902054905092915050565b6003602052825f5260405f2082805160208101820180518482526020830160208501208183528095505050505050602052805f5260405f205f92509250509054906101000a900460ff1681565b61077c84610b1a565b610787848484610640565b8060025f8673ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1681526020019081526020015f20846040516107d391906110c2565b90815260200160405180910390205f8481526020019081526020015f2090816107fc9190611425565b5060045f3373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1681526020019081526020015f208360405161084891906110c2565b9081526020016040518091039020548211156108bd578160045f8673ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1681526020019081526020015f20846040516108aa91906110c2565b9081526020016040518091039020819055505b50505050565b6002602052825f5260405f2082805160208101820180518482526020830160208501208183528095505050505050602052805f5260405f205f925092505050805461090d90611105565b80601f016020809104026020016040519081016040528092919081815260200182805461093990611105565b80156109845780601f1061095b57610100808354040283529160200191610984565b820191905f5260205f20905b81548152906001019060200180831161096757829003601f168201915b505050505081565b7f000000000000000000000000000000000000000000000000000000000000000081565b6004602052815f5260405f20818051602081018201805184825260208301602085012081835280955050505050505f91509150505481565b60035f8473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1681526020019081526020015f2082604051610a3391906110c2565b90815260200160405180910390205f8281526020019081526020015f205f9054906101000a900460ff16610a65575f5ffd5b505050565b60018054610a7790611105565b80601f0160208091040260200160405190810160405280929190818152602001828054610aa390611105565b8015610aee5780601f10610ac557610100808354040283529160200191610aee565b820191905f5260205f20905b815481529060010190602001808311610ad157829003601f168201915b505050505081565b7f000000000000000000000000000000000000000000000000000000000000000081565b8073ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff1614610b51575f5ffd5b50565b610b5d83610b1a565b610b68838383610640565b600160035f8573ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1681526020019081526020015f2083604051610bb591906110c2565b90815260200160405180910390205f8381526020019081526020015f205f6101000a81548160ff021916908315150217905550505050565b5f604051905090565b5f5ffd5b5f5ffd5b5f73ffffffffffffffffffffffffffffffffffffffff82169050919050565b5f610c2782610bfe565b9050919050565b610c3781610c1d565b8114610c41575f5ffd5b50565b5f81359050610c5281610c2e565b92915050565b5f5ffd5b5f5ffd5b5f601f19601f8301169050919050565b7f4e487b71000000000000000000000000000000000000000000000000000000005f52604160045260245ffd5b610ca682610c60565b810181811067ffffffffffffffff82111715610cc557610cc4610c70565b5b80604052505050565b5f610cd7610bed565b9050610ce38282610c9d565b919050565b5f67ffffffffffffffff821115610d0257610d01610c70565b5b610d0b82610c60565b9050602081019050919050565b828183375f83830152505050565b5f610d38610d3384610ce8565b610cce565b905082815260208101848484011115610d5457610d53610c5c565b5b610d5f848285610d18565b509392505050565b5f82601f830112610d7b57610d7a610c58565b5b8135610d8b848260208601610d26565b91505092915050565b5f819050919050565b610da681610d94565b8114610db0575f5ffd5b50565b5f81359050610dc181610d9d565b92915050565b5f5f5f60608486031215610dde57610ddd610bf6565b5b5f610deb86828701610c44565b935050602084013567ffffffffffffffff811115610e0c57610e0b610bfa565b5b610e1886828701610d67565b9250506040610e2986828701610db3565b9150509250925092565b5f819050919050565b610e4581610e33565b82525050565b5f602082019050610e5e5f830184610e3c565b92915050565b5f81519050919050565b5f82825260208201905092915050565b8281835e5f83830152505050565b5f610e9682610e64565b610ea08185610e6e565b9350610eb0818560208601610e7e565b610eb981610c60565b840191505092915050565b5f6020820190508181035f830152610edc8184610e8c565b905092915050565b5f5f60408385031215610efa57610ef9610bf6565b5b5f610f0785828601610c44565b925050602083013567ffffffffffffffff811115610f2857610f27610bfa565b5b610f3485828601610d67565b9150509250929050565b610f4781610d94565b82525050565b5f602082019050610f605f830184610f3e565b92915050565b5f8115159050919050565b610f7a81610f66565b82525050565b5f602082019050610f935f830184610f71565b92915050565b5f5f5f5f60808587031215610fb157610fb0610bf6565b5b5f610fbe87828801610c44565b945050602085013567ffffffffffffffff811115610fdf57610fde610bfa565b5b610feb87828801610d67565b9350506040610ffc87828801610db3565b925050606085013567ffffffffffffffff81111561101d5761101c610bfa565b5b61102987828801610d67565b91505092959194509250565b61103e81610c1d565b82525050565b5f6020820190506110575f830184611035565b92915050565b5f6020828403121561107257611071610bf6565b5b5f61107f84828501610c44565b91505092915050565b5f81905092915050565b5f61109c82610e64565b6110a68185611088565b93506110b6818560208601610e7e565b80840191505092915050565b5f6110cd8284611092565b915081905092915050565b7f4e487b71000000000000000000000000000000000000000000000000000000005f52602260045260245ffd5b5f600282049050600182168061111c57607f821691505b60208210810361112f5761112e6110d8565b5b50919050565b5f819050815f5260205f209050919050565b5f815461115381611105565b61115d8186610e6e565b9450600182165f8114611177576001811461118d576111bf565b60ff1983168652811515602002860193506111bf565b61119685611135565b5f5b838110156111b757815481890152600182019150602081019050611198565b808801955050505b50505092915050565b5f6020820190508181035f8301526111e08184611147565b905092915050565b5f81519050919050565b5f81905092915050565b5f611206826111e8565b61121081856111f2565b9350611220818560208601610e7e565b80840191505092915050565b5f61123782846111fc565b915081905092915050565b61124b81610e33565b8114611255575f5ffd5b50565b5f8151905061126681611242565b92915050565b5f6020828403121561128157611280610bf6565b5b5f61128e84828501611258565b91505092915050565b5f6020601f8301049050919050565b5f82821b905092915050565b5f600883026112e17fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff826112a6565b6112eb86836112a6565b95508019841693508086168417925050509392505050565b5f819050919050565b5f61132661132161131c84610d94565b611303565b610d94565b9050919050565b5f819050919050565b61133f8361130c565b61135361134b8261132d565b8484546112b2565b825550505050565b5f5f905090565b61136a61135b565b611375818484611336565b505050565b5b818110156113985761138d5f82611362565b60018101905061137b565b5050565b601f8211156113dd576113ae81611135565b6113b784611297565b810160208510156113c6578190505b6113da6113d285611297565b83018261137a565b50505b505050565b5f82821c905092915050565b5f6113fd5f19846008026113e2565b1980831691505092915050565b5f61141583836113ee565b9150826002028217905092915050565b61142e82610e64565b67ffffffffffffffff81111561144757611446610c70565b5b6114518254611105565b61145c82828561139c565b5f60209050601f83116001811461148d575f841561147b578287015190505b611485858261140a565b8655506114ec565b601f19841661149b86611135565b5f5b828110156114c25784890151825560018201915060208501945060208101905061149d565b868310156114df57848901516114db601f8916826113ee565b8355505b6001600288020188555050505b50505050505056fea26469706673582212205224816f22605aa903e793d9ba4308a3c7e2585c62a3924705070cc59ff0f8b064736f6c634300081c0033 \ No newline at end of file diff --git a/fvm/evm/testutils/contracts/proxy.sol b/fvm/evm/testutils/contracts/proxy.sol new file mode 100644 index 00000000000..5dc3f80107b --- /dev/null +++ b/fvm/evm/testutils/contracts/proxy.sol @@ -0,0 +1,25 @@ +pragma solidity ^0.8.0; + +contract Proxy { + address private _implementation; + + function setImplementation(address implementation) external { + _implementation = implementation; + } + + fallback() external payable { + address impl = _implementation; + assembly { + calldatacopy(0, 0, calldatasize()) + let result := delegatecall(gas(), impl, 0, calldatasize(), 0, 0) + returndatacopy(0, 0, returndatasize()) + switch result + case 0 { + revert(0, returndatasize()) + } + default { + return(0, returndatasize()) + } + } + } +} \ No newline at end of file diff --git a/fvm/evm/testutils/contracts/proxy_abi.json b/fvm/evm/testutils/contracts/proxy_abi.json new file mode 100644 index 00000000000..aaba81ba703 --- /dev/null +++ b/fvm/evm/testutils/contracts/proxy_abi.json @@ -0,0 +1,19 @@ +[ + { + "stateMutability": "payable", + "type": "fallback" + }, + { + "inputs": [ + { + "internalType": "address", + "name": "implementation", + "type": "address" + } + ], + "name": "setImplementation", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + } +] \ No newline at end of file diff --git a/fvm/evm/testutils/contracts/proxy_bytes.hex b/fvm/evm/testutils/contracts/proxy_bytes.hex new file mode 100644 index 00000000000..700cc41f67d --- /dev/null +++ b/fvm/evm/testutils/contracts/proxy_bytes.hex @@ -0,0 +1 @@ +6080604052348015600e575f80fd5b5061018e8061001c5f395ff3fe608060405260043610610021575f3560e01c8063d784d4261461006557610022565b5b5f805f9054906101000a900473ffffffffffffffffffffffffffffffffffffffff169050365f80375f80365f845af43d5f803e805f8114610061573d5ff35b3d5ffd5b348015610070575f80fd5b5061008b6004803603810190610086919061012d565b61008d565b005b805f806101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff16021790555050565b5f80fd5b5f73ffffffffffffffffffffffffffffffffffffffff82169050919050565b5f6100fc826100d3565b9050919050565b61010c816100f2565b8114610116575f80fd5b50565b5f8135905061012781610103565b92915050565b5f60208284031215610142576101416100cf565b5b5f61014f84828501610119565b9150509291505056fea2646970667358221220dc83726b5fd29997fb44f2a424f0959fe05c9736efc3bf92f405182d14cdb5fb64736f6c634300081a0033 \ No newline at end of file diff --git a/fvm/evm/testutils/contracts/test.sol b/fvm/evm/testutils/contracts/test.sol new file mode 100644 index 00000000000..d5deadd9b37 --- /dev/null +++ b/fvm/evm/testutils/contracts/test.sol @@ -0,0 +1,113 @@ +// SPDX-License-Identifier: GPL-3.0 + +pragma solidity >=0.7.0 <0.9.0; + +contract Storage { + + address constant public cadenceArch = 0x0000000000000000000000010000000000000001; + event NewStore(address indexed caller, uint256 indexed value); + + error MyCustomError(uint value, string message); + + uint256 number; + + constructor() payable { + } + + function store(uint256 num) public { + number = num; + } + + function checkThenStore(uint256 prev, uint256 num)public { + require(number == prev, "stored value check failed"); + number = num; + } + + function storeWithLog(uint256 num) public { + emit NewStore(msg.sender, num); + number = num; + } + + function storeButRevert(uint256 num) public { + number = num; + revert(); + } + + function retrieve() public view returns (uint256){ + return number; + } + + function blockNumber() public view returns (uint256) { + return block.number; + } + + function checkBlockNumber(uint expected) public view { + require(expected == block.number, "block number check failed"); + } + + function blockTime() public view returns (uint) { + return block.timestamp; + } + + function blockHash(uint num) public view returns (bytes32) { + return blockhash(num); + } + + function checkBlockHash(uint num, bytes32 expected) public view { + require(expected == blockhash(num), "hash check failed"); + } + + function checkBalance(address addr, uint expected) public view{ + require(expected == addr.balance, "balance check failed"); + } + + function random() public view returns (uint256) { + return block.prevrandao; + } + + function chainID() public view returns (uint256) { + return block.chainid; + } + + function destroy() public { + selfdestruct(payable(msg.sender)); + } + + function assertError() public pure{ + require(false, "Assert Error Message"); + } + + function customError() public pure{ + revert MyCustomError(5, "Value is too low"); + } + + function verifyArchCallToRandomSource(uint64 height) public view returns (bytes32) { + (bool ok, bytes memory data) = cadenceArch.staticcall(abi.encodeWithSignature("getRandomSource(uint64)", height)); + require(ok, "unsuccessful call to arch "); + bytes32 output = abi.decode(data, (bytes32)); + return output; + } + + function verifyArchCallToRevertibleRandom() public view returns (uint64) { + (bool ok, bytes memory data) = cadenceArch.staticcall(abi.encodeWithSignature("revertibleRandom()")); + require(ok, "unsuccessful call to arch"); + uint64 output = abi.decode(data, (uint64)); + return output; + } + + function verifyArchCallToFlowBlockHeight(uint64 expected) public view returns (uint64){ + (bool ok, bytes memory data) = cadenceArch.staticcall(abi.encodeWithSignature("flowBlockHeight()")); + require(ok, "unsuccessful call to arch "); + uint64 output = abi.decode(data, (uint64)); + require(expected == output, "output doesnt match the expected value"); + return output; + } + + function verifyArchCallToVerifyCOAOwnershipProof(bool expected, address arg0 , bytes32 arg1 , bytes memory arg2 ) public view returns (bool){ + (bool ok, bytes memory data) = cadenceArch.staticcall(abi.encodeWithSignature("verifyCOAOwnershipProof(address,bytes32,bytes)", arg0, arg1, arg2)); + require(ok, "unsuccessful call to arch"); + bool output = abi.decode(data, (bool)); + require(expected == output, "output doesnt match the expected value"); + return output; + } +} \ No newline at end of file diff --git a/fvm/evm/testutils/contracts/test_abi.json b/fvm/evm/testutils/contracts/test_abi.json new file mode 100644 index 00000000000..aff2b6afe16 --- /dev/null +++ b/fvm/evm/testutils/contracts/test_abi.json @@ -0,0 +1,351 @@ +[ + { + "inputs": [], + "stateMutability": "payable", + "type": "constructor" + }, + { + "inputs": [ + { + "internalType": "uint256", + "name": "value", + "type": "uint256" + }, + { + "internalType": "string", + "name": "message", + "type": "string" + } + ], + "name": "MyCustomError", + "type": "error" + }, + { + "anonymous": false, + "inputs": [ + { + "indexed": true, + "internalType": "address", + "name": "caller", + "type": "address" + }, + { + "indexed": true, + "internalType": "uint256", + "name": "value", + "type": "uint256" + } + ], + "name": "NewStore", + "type": "event" + }, + { + "inputs": [], + "name": "assertError", + "outputs": [], + "stateMutability": "pure", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "uint256", + "name": "num", + "type": "uint256" + } + ], + "name": "blockHash", + "outputs": [ + { + "internalType": "bytes32", + "name": "", + "type": "bytes32" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [], + "name": "blockNumber", + "outputs": [ + { + "internalType": "uint256", + "name": "", + "type": "uint256" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [], + "name": "blockTime", + "outputs": [ + { + "internalType": "uint256", + "name": "", + "type": "uint256" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [], + "name": "cadenceArch", + "outputs": [ + { + "internalType": "address", + "name": "", + "type": "address" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [], + "name": "chainID", + "outputs": [ + { + "internalType": "uint256", + "name": "", + "type": "uint256" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "address", + "name": "addr", + "type": "address" + }, + { + "internalType": "uint256", + "name": "expected", + "type": "uint256" + } + ], + "name": "checkBalance", + "outputs": [], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "uint256", + "name": "num", + "type": "uint256" + }, + { + "internalType": "bytes32", + "name": "expected", + "type": "bytes32" + } + ], + "name": "checkBlockHash", + "outputs": [], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "uint256", + "name": "expected", + "type": "uint256" + } + ], + "name": "checkBlockNumber", + "outputs": [], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "uint256", + "name": "prev", + "type": "uint256" + }, + { + "internalType": "uint256", + "name": "num", + "type": "uint256" + } + ], + "name": "checkThenStore", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [], + "name": "customError", + "outputs": [], + "stateMutability": "pure", + "type": "function" + }, + { + "inputs": [], + "name": "destroy", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [], + "name": "random", + "outputs": [ + { + "internalType": "uint256", + "name": "", + "type": "uint256" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [], + "name": "retrieve", + "outputs": [ + { + "internalType": "uint256", + "name": "", + "type": "uint256" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "uint256", + "name": "num", + "type": "uint256" + } + ], + "name": "store", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "uint256", + "name": "num", + "type": "uint256" + } + ], + "name": "storeButRevert", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "uint256", + "name": "num", + "type": "uint256" + } + ], + "name": "storeWithLog", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "uint64", + "name": "expected", + "type": "uint64" + } + ], + "name": "verifyArchCallToFlowBlockHeight", + "outputs": [ + { + "internalType": "uint64", + "name": "", + "type": "uint64" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "uint64", + "name": "height", + "type": "uint64" + } + ], + "name": "verifyArchCallToRandomSource", + "outputs": [ + { + "internalType": "bytes32", + "name": "", + "type": "bytes32" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [], + "name": "verifyArchCallToRevertibleRandom", + "outputs": [ + { + "internalType": "uint64", + "name": "", + "type": "uint64" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "bool", + "name": "expected", + "type": "bool" + }, + { + "internalType": "address", + "name": "arg0", + "type": "address" + }, + { + "internalType": "bytes32", + "name": "arg1", + "type": "bytes32" + }, + { + "internalType": "bytes", + "name": "arg2", + "type": "bytes" + } + ], + "name": "verifyArchCallToVerifyCOAOwnershipProof", + "outputs": [ + { + "internalType": "bool", + "name": "", + "type": "bool" + } + ], + "stateMutability": "view", + "type": "function" + } +] \ No newline at end of file diff --git a/fvm/evm/testutils/contracts/test_bytes.hex b/fvm/evm/testutils/contracts/test_bytes.hex new file mode 100644 index 00000000000..b7a74895f9f --- /dev/null +++ b/fvm/evm/testutils/contracts/test_bytes.hex @@ -0,0 +1 @@ +608060405261170d806100115f395ff3fe608060405234801561000f575f80fd5b5060043610610135575f3560e01c806383197ef0116100b6578063b2821c8f1161007a578063b2821c8f14610325578063cbaff5f914610343578063d0d250bd1461034d578063d462f09b1461036b578063d695cdca14610387578063dda3a7bd146103a357610135565b806383197ef01461028157806385df51fd1461028b578063911007b4146102bb578063a7b93d28146102eb578063adc879e91461030757610135565b806357e871e7116100fd57806357e871e7146101dd5780635ec01e4d146101fb5780636057361d146102195780636babb22414610235578063828dd0481461025157610135565b80632e64cec11461013957806348b15166146101575780634cbefa6a146101755780634d7b9bd51461019157806352e24024146101ad575b5f80fd5b6101416103ad565b60405161014e9190610c41565b60405180910390f35b61015f6103b5565b60405161016c9190610c41565b60405180910390f35b61018f600480360381019061018a9190610c95565b6103bc565b005b6101ab60048036038101906101a69190610d1a565b6103c5565b005b6101c760048036038101906101c29190610d95565b610422565b6040516101d49190610dcf565b60405180910390f35b6101e56105cf565b6040516101f29190610c41565b60405180910390f35b6102036105d6565b6040516102109190610c41565b60405180910390f35b610233600480360381019061022e9190610c95565b6105dd565b005b61024f600480360381019061024a9190610c95565b6105e6565b005b61026b60048036038101906102669190610f8c565b610633565b604051610278919061101b565b60405180910390f35b6102896107e2565b005b6102a560048036038101906102a09190610c95565b6107fb565b6040516102b29190611043565b60405180910390f35b6102d560048036038101906102d09190610d95565b610805565b6040516102e29190611043565b60405180910390f35b6103056004803603810190610300919061105c565b610967565b005b61030f6109b4565b60405161031c9190610c41565b60405180910390f35b61032d6109bb565b60405161033a9190610dcf565b60405180910390f35b61034b610b10565b005b610355610b52565b60405161036291906110a9565b60405180910390f35b610385600480360381019061038091906110c2565b610b5f565b005b6103a1600480360381019061039c9190610c95565b610ba6565b005b6103ab610beb565b005b5f8054905090565b5f42905090565b805f8190555f80fd5b8173ffffffffffffffffffffffffffffffffffffffff1631811461041e576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004016104159061115a565b60405180910390fd5b5050565b5f805f6801000000000000000173ffffffffffffffffffffffffffffffffffffffff166040516024016040516020818303038152906040527f53e87d66000000000000000000000000000000000000000000000000000000007bffffffffffffffffffffffffffffffffffffffffffffffffffffffff19166020820180517bffffffffffffffffffffffffffffffffffffffffffffffffffffffff83818316178352505050506040516104d591906111ca565b5f60405180830381855afa9150503d805f811461050d576040519150601f19603f3d011682016040523d82523d5f602084013e610512565b606091505b509150915081610557576040517f08c379a000000000000000000000000000000000000000000000000000000000815260040161054e9061122a565b60405180910390fd5b5f8180602001905181019061056c919061125c565b90508067ffffffffffffffff168567ffffffffffffffff16146105c4576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004016105bb906112f7565b60405180910390fd5b809350505050919050565b5f43905090565b5f44905090565b805f8190555050565b803373ffffffffffffffffffffffffffffffffffffffff167f043cc306157a91d747b36aba0e235bbbc5771d75aba162f6e5540767d22673c660405160405180910390a3805f8190555050565b5f805f6801000000000000000173ffffffffffffffffffffffffffffffffffffffff1686868660405160240161066b9392919061135d565b6040516020818303038152906040527f5ee837e7000000000000000000000000000000000000000000000000000000007bffffffffffffffffffffffffffffffffffffffffffffffffffffffff19166020820180517bffffffffffffffffffffffffffffffffffffffffffffffffffffffff83818316178352505050506040516106f591906111ca565b5f60405180830381855afa9150503d805f811461072d576040519150601f19603f3d011682016040523d82523d5f602084013e610732565b606091505b509150915081610777576040517f08c379a000000000000000000000000000000000000000000000000000000000815260040161076e906113e3565b60405180910390fd5b5f8180602001905181019061078c9190611415565b9050801515881515146107d4576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004016107cb906112f7565b60405180910390fd5b809350505050949350505050565b3373ffffffffffffffffffffffffffffffffffffffff16ff5b5f81409050919050565b5f805f6801000000000000000173ffffffffffffffffffffffffffffffffffffffff16846040516024016108399190610dcf565b6040516020818303038152906040527f78a75fbe000000000000000000000000000000000000000000000000000000007bffffffffffffffffffffffffffffffffffffffffffffffffffffffff19166020820180517bffffffffffffffffffffffffffffffffffffffffffffffffffffffff83818316178352505050506040516108c391906111ca565b5f60405180830381855afa9150503d805f81146108fb576040519150601f19603f3d011682016040523d82523d5f602084013e610900565b606091505b509150915081610945576040517f08c379a000000000000000000000000000000000000000000000000000000000815260040161093c9061122a565b60405180910390fd5b5f8180602001905181019061095a9190611454565b9050809350505050919050565b815f54146109aa576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004016109a1906114c9565b60405180910390fd5b805f819055505050565b5f46905090565b5f805f6801000000000000000173ffffffffffffffffffffffffffffffffffffffff166040516024016040516020818303038152906040527f705fab20000000000000000000000000000000000000000000000000000000007bffffffffffffffffffffffffffffffffffffffffffffffffffffffff19166020820180517bffffffffffffffffffffffffffffffffffffffffffffffffffffffff8381831617835250505050604051610a6e91906111ca565b5f60405180830381855afa9150503d805f8114610aa6576040519150601f19603f3d011682016040523d82523d5f602084013e610aab565b606091505b509150915081610af0576040517f08c379a0000000000000000000000000000000000000000000000000000000008152600401610ae7906113e3565b60405180910390fd5b5f81806020019051810190610b05919061125c565b905080935050505090565b5f610b50576040517f08c379a0000000000000000000000000000000000000000000000000000000008152600401610b4790611531565b60405180910390fd5b565b6801000000000000000181565b81408114610ba2576040517f08c379a0000000000000000000000000000000000000000000000000000000008152600401610b9990611599565b60405180910390fd5b5050565b438114610be8576040517f08c379a0000000000000000000000000000000000000000000000000000000008152600401610bdf90611601565b60405180910390fd5b50565b60056040517f9195785a000000000000000000000000000000000000000000000000000000008152600401610c2091906116ab565b60405180910390fd5b5f819050919050565b610c3b81610c29565b82525050565b5f602082019050610c545f830184610c32565b92915050565b5f604051905090565b5f80fd5b5f80fd5b610c7481610c29565b8114610c7e575f80fd5b50565b5f81359050610c8f81610c6b565b92915050565b5f60208284031215610caa57610ca9610c63565b5b5f610cb784828501610c81565b91505092915050565b5f73ffffffffffffffffffffffffffffffffffffffff82169050919050565b5f610ce982610cc0565b9050919050565b610cf981610cdf565b8114610d03575f80fd5b50565b5f81359050610d1481610cf0565b92915050565b5f8060408385031215610d3057610d2f610c63565b5b5f610d3d85828601610d06565b9250506020610d4e85828601610c81565b9150509250929050565b5f67ffffffffffffffff82169050919050565b610d7481610d58565b8114610d7e575f80fd5b50565b5f81359050610d8f81610d6b565b92915050565b5f60208284031215610daa57610da9610c63565b5b5f610db784828501610d81565b91505092915050565b610dc981610d58565b82525050565b5f602082019050610de25f830184610dc0565b92915050565b5f8115159050919050565b610dfc81610de8565b8114610e06575f80fd5b50565b5f81359050610e1781610df3565b92915050565b5f819050919050565b610e2f81610e1d565b8114610e39575f80fd5b50565b5f81359050610e4a81610e26565b92915050565b5f80fd5b5f80fd5b5f601f19601f8301169050919050565b7f4e487b71000000000000000000000000000000000000000000000000000000005f52604160045260245ffd5b610e9e82610e58565b810181811067ffffffffffffffff82111715610ebd57610ebc610e68565b5b80604052505050565b5f610ecf610c5a565b9050610edb8282610e95565b919050565b5f67ffffffffffffffff821115610efa57610ef9610e68565b5b610f0382610e58565b9050602081019050919050565b828183375f83830152505050565b5f610f30610f2b84610ee0565b610ec6565b905082815260208101848484011115610f4c57610f4b610e54565b5b610f57848285610f10565b509392505050565b5f82601f830112610f7357610f72610e50565b5b8135610f83848260208601610f1e565b91505092915050565b5f805f8060808587031215610fa457610fa3610c63565b5b5f610fb187828801610e09565b9450506020610fc287828801610d06565b9350506040610fd387828801610e3c565b925050606085013567ffffffffffffffff811115610ff457610ff3610c67565b5b61100087828801610f5f565b91505092959194509250565b61101581610de8565b82525050565b5f60208201905061102e5f83018461100c565b92915050565b61103d81610e1d565b82525050565b5f6020820190506110565f830184611034565b92915050565b5f806040838503121561107257611071610c63565b5b5f61107f85828601610c81565b925050602061109085828601610c81565b9150509250929050565b6110a381610cdf565b82525050565b5f6020820190506110bc5f83018461109a565b92915050565b5f80604083850312156110d8576110d7610c63565b5b5f6110e585828601610c81565b92505060206110f685828601610e3c565b9150509250929050565b5f82825260208201905092915050565b7f62616c616e636520636865636b206661696c65640000000000000000000000005f82015250565b5f611144601483611100565b915061114f82611110565b602082019050919050565b5f6020820190508181035f83015261117181611138565b9050919050565b5f81519050919050565b5f81905092915050565b8281835e5f83830152505050565b5f6111a482611178565b6111ae8185611182565b93506111be81856020860161118c565b80840191505092915050565b5f6111d5828461119a565b915081905092915050565b7f756e7375636365737366756c2063616c6c20746f2061726368200000000000005f82015250565b5f611214601a83611100565b915061121f826111e0565b602082019050919050565b5f6020820190508181035f83015261124181611208565b9050919050565b5f8151905061125681610d6b565b92915050565b5f6020828403121561127157611270610c63565b5b5f61127e84828501611248565b91505092915050565b7f6f757470757420646f65736e74206d61746368207468652065787065637465645f8201527f2076616c75650000000000000000000000000000000000000000000000000000602082015250565b5f6112e1602683611100565b91506112ec82611287565b604082019050919050565b5f6020820190508181035f83015261130e816112d5565b9050919050565b5f82825260208201905092915050565b5f61132f82611178565b6113398185611315565b935061134981856020860161118c565b61135281610e58565b840191505092915050565b5f6060820190506113705f83018661109a565b61137d6020830185611034565b818103604083015261138f8184611325565b9050949350505050565b7f756e7375636365737366756c2063616c6c20746f2061726368000000000000005f82015250565b5f6113cd601983611100565b91506113d882611399565b602082019050919050565b5f6020820190508181035f8301526113fa816113c1565b9050919050565b5f8151905061140f81610df3565b92915050565b5f6020828403121561142a57611429610c63565b5b5f61143784828501611401565b91505092915050565b5f8151905061144e81610e26565b92915050565b5f6020828403121561146957611468610c63565b5b5f61147684828501611440565b91505092915050565b7f73746f7265642076616c756520636865636b206661696c6564000000000000005f82015250565b5f6114b3601983611100565b91506114be8261147f565b602082019050919050565b5f6020820190508181035f8301526114e0816114a7565b9050919050565b7f417373657274204572726f72204d6573736167650000000000000000000000005f82015250565b5f61151b601483611100565b9150611526826114e7565b602082019050919050565b5f6020820190508181035f8301526115488161150f565b9050919050565b7f6861736820636865636b206661696c65640000000000000000000000000000005f82015250565b5f611583601183611100565b915061158e8261154f565b602082019050919050565b5f6020820190508181035f8301526115b081611577565b9050919050565b7f626c6f636b206e756d62657220636865636b206661696c6564000000000000005f82015250565b5f6115eb601983611100565b91506115f6826115b7565b602082019050919050565b5f6020820190508181035f830152611618816115df565b9050919050565b5f819050919050565b5f819050919050565b5f61164b6116466116418461161f565b611628565b610c29565b9050919050565b61165b81611631565b82525050565b7f56616c756520697320746f6f206c6f77000000000000000000000000000000005f82015250565b5f611695601083611100565b91506116a082611661565b602082019050919050565b5f6040820190506116be5f830184611652565b81810360208301526116cf81611689565b90509291505056fea26469706673582212200ade6e0552548fb1b9355b0e891417786eff51bed669555dec499c0a4512075c64736f6c634300081a0033 \ No newline at end of file diff --git a/fvm/evm/testutils/emulator.go b/fvm/evm/testutils/emulator.go new file mode 100644 index 00000000000..37d3f9fcb5e --- /dev/null +++ b/fvm/evm/testutils/emulator.go @@ -0,0 +1,98 @@ +package testutils + +import ( + "math/big" + + gethCommon "github.com/ethereum/go-ethereum/common" + + gethTypes "github.com/ethereum/go-ethereum/core/types" + + "github.com/onflow/flow-go/fvm/evm/types" +) + +type TestEmulator struct { + BalanceOfFunc func(address types.Address) (*big.Int, error) + NonceOfFunc func(address types.Address) (uint64, error) + CodeOfFunc func(address types.Address) (types.Code, error) + CodeHashOfFunc func(address types.Address) ([]byte, error) + DirectCallFunc func(call *types.DirectCall) (*types.Result, error) + RunTransactionFunc func(tx *gethTypes.Transaction) (*types.Result, error) + DryRunTransactionFunc func(tx *gethTypes.Transaction, address gethCommon.Address) (*types.Result, error) + BatchRunTransactionFunc func(txs []*gethTypes.Transaction) ([]*types.Result, error) +} + +var _ types.Emulator = &TestEmulator{} + +// NewBlock returns a new block +func (em *TestEmulator) NewBlockView(_ types.BlockContext) (types.BlockView, error) { + return em, nil +} + +// NewBlock returns a new block view +func (em *TestEmulator) NewReadOnlyBlockView(_ types.BlockContext) (types.ReadOnlyBlockView, error) { + return em, nil +} + +// BalanceOf returns the balance of this address +func (em *TestEmulator) BalanceOf(address types.Address) (*big.Int, error) { + if em.BalanceOfFunc == nil { + panic("method not set") + } + return em.BalanceOfFunc(address) +} + +// NonceOfFunc returns the nonce for this address +func (em *TestEmulator) NonceOf(address types.Address) (uint64, error) { + if em.NonceOfFunc == nil { + panic("method not set") + } + return em.NonceOfFunc(address) +} + +// CodeOf returns the code for this address +func (em *TestEmulator) CodeOf(address types.Address) (types.Code, error) { + if em.CodeOfFunc == nil { + panic("method not set") + } + return em.CodeOfFunc(address) +} + +// CodeHashOf returns the code hash for this address +func (em *TestEmulator) CodeHashOf(address types.Address) ([]byte, error) { + if em.CodeHashOfFunc == nil { + panic("method not set") + } + return em.CodeHashOfFunc(address) +} + +// DirectCall executes a direct call +func (em *TestEmulator) DirectCall(call *types.DirectCall) (*types.Result, error) { + if em.DirectCallFunc == nil { + panic("method not set") + } + return em.DirectCallFunc(call) +} + +// RunTransaction runs a transaction and collect gas fees to the coinbase account +func (em *TestEmulator) RunTransaction(tx *gethTypes.Transaction) (*types.Result, error) { + if em.RunTransactionFunc == nil { + panic("method not set") + } + return em.RunTransactionFunc(tx) +} + +// BatchRunTransactions batch runs transactions and collect gas fees to the coinbase account +func (em *TestEmulator) BatchRunTransactions(txs []*gethTypes.Transaction) ([]*types.Result, error) { + if em.BatchRunTransactionFunc == nil { + panic("method not set") + } + return em.BatchRunTransactionFunc(txs) +} + +// DryRunTransaction simulates transaction execution +func (em *TestEmulator) DryRunTransaction(tx *gethTypes.Transaction, address gethCommon.Address) (*types.Result, error) { + if em.DryRunTransactionFunc == nil { + panic("method not set") + } + return em.DryRunTransactionFunc(tx, address) +} diff --git a/fvm/evm/testutils/event.go b/fvm/evm/testutils/event.go new file mode 100644 index 00000000000..6614bd22ee6 --- /dev/null +++ b/fvm/evm/testutils/event.go @@ -0,0 +1,55 @@ +package testutils + +import ( + "testing" + + "github.com/onflow/cadence" + "github.com/onflow/cadence/common" + "github.com/onflow/cadence/encoding/ccf" + "github.com/stretchr/testify/require" + "gotest.tools/assert" + + "github.com/onflow/flow-go/fvm/evm/events" + "github.com/onflow/flow-go/model/flow" +) + +func flowToCadenceEvent(t testing.TB, event flow.Event) cadence.Event { + ev, err := ccf.Decode(nil, event.Payload) + require.NoError(t, err) + cadenceEvent, ok := ev.(cadence.Event) + require.True(t, ok) + return cadenceEvent +} + +func TxEventToPayload(t testing.TB, event flow.Event, evmContract flow.Address) *events.TransactionEventPayload { + assert.Equal( + t, + common.NewAddressLocation( + nil, + common.Address(evmContract), + string(events.EventTypeTransactionExecuted), + ).ID(), + string(event.Type), + ) + cadenceEvent := flowToCadenceEvent(t, event) + txEventPayload, err := events.DecodeTransactionEventPayload(cadenceEvent) + require.NoError(t, err) + return txEventPayload +} + +func BlockEventToPayload(t testing.TB, event flow.Event, evmContract flow.Address) *events.BlockEventPayload { + assert.Equal( + t, + common.NewAddressLocation( + nil, + common.Address(evmContract), + string(events.EventTypeBlockExecuted), + ).ID(), + string(event.Type), + ) + + cadenceEvent := flowToCadenceEvent(t, event) + blockEventPayload, err := events.DecodeBlockEventPayload(cadenceEvent) + require.NoError(t, err) + return blockEventPayload +} diff --git a/fvm/evm/testutils/gob.go b/fvm/evm/testutils/gob.go new file mode 100644 index 00000000000..1c944a1e9e3 --- /dev/null +++ b/fvm/evm/testutils/gob.go @@ -0,0 +1,88 @@ +package testutils + +import ( + "encoding/gob" + "os" +) + +// Serialize function: saves map data to a file +func SerializeState(filename string, data map[string][]byte) error { + // Create a file to save data + file, err := os.Create(filename) + if err != nil { + return err + } + defer file.Close() + + // Use gob to encode data + encoder := gob.NewEncoder(file) + err = encoder.Encode(data) + if err != nil { + return err + } + + return nil +} + +// Deserialize function: reads map data from a file +func DeserializeState(filename string) (map[string][]byte, error) { + // Open the file for reading + file, err := os.Open(filename) + if err != nil { + return nil, err + } + defer file.Close() + + // Prepare the map to store decoded data + var data map[string][]byte + + // Use gob to decode data + decoder := gob.NewDecoder(file) + err = decoder.Decode(&data) + if err != nil { + return nil, err + } + + return data, nil +} + +// Serialize function: saves map data to a file +func SerializeAllocator(filename string, data map[string]uint64) error { + // Create a file to save data + file, err := os.Create(filename) + if err != nil { + return err + } + defer file.Close() + + // Use gob to encode data + encoder := gob.NewEncoder(file) + err = encoder.Encode(data) + if err != nil { + return err + } + + return nil +} + +// Deserialize function: reads map data from a file +func DeserializeAllocator(filename string) (map[string]uint64, error) { + // Open the file for reading + file, err := os.Open(filename) + if err != nil { + return nil, err + } + defer file.Close() + + // Prepare the map to store decoded data + var data map[string]uint64 + + // Use gob to decode data + decoder := gob.NewDecoder(file) + err = decoder.Decode(&data) + if err != nil { + return nil, err + } + + return data, nil +} diff --git a/fvm/evm/testutils/handler.go b/fvm/evm/testutils/handler.go new file mode 100644 index 00000000000..ba9085ee2c5 --- /dev/null +++ b/fvm/evm/testutils/handler.go @@ -0,0 +1,65 @@ +package testutils + +import ( + "github.com/onflow/cadence/common" + + "github.com/onflow/flow-go/fvm/evm/emulator" + "github.com/onflow/flow-go/fvm/evm/handler" + "github.com/onflow/flow-go/fvm/evm/precompiles" + "github.com/onflow/flow-go/fvm/evm/types" + "github.com/onflow/flow-go/fvm/systemcontracts" + "github.com/onflow/flow-go/model/flow" +) + +func SetupHandler( + chainID flow.ChainID, + backend types.Backend, + rootAddr flow.Address, +) *handler.ContractHandler { + return handler.NewContractHandler( + chainID, + rootAddr, + common.MustBytesToAddress(systemcontracts.SystemContractsForChain(chainID).FlowToken.Address.Bytes()), + rootAddr, + handler.NewBlockStore(chainID, backend, rootAddr), + handler.NewAddressAllocator(), + backend, + emulator.NewEmulator(backend, rootAddr), + ) +} + +type TestPrecompiledContract struct { + RequiredGasFunc func(input []byte) uint64 + RunFunc func(input []byte) ([]byte, error) + AddressFunc func() types.Address +} + +var _ types.PrecompiledContract = &TestPrecompiledContract{} + +// RequiredGas returns the contract gas use +func (pc *TestPrecompiledContract) RequiredGas(input []byte) uint64 { + if pc.RequiredGasFunc == nil { + panic("RequiredGasFunc is not set for the test precompiled contract") + } + return pc.RequiredGasFunc(input) +} + +// Run runs the precompiled contract +func (pc *TestPrecompiledContract) Run(input []byte) ([]byte, error) { + if pc.RunFunc == nil { + panic("RunFunc is not set for the test precompiled contract") + } + return pc.RunFunc(input) +} + +// Address returns the address that this contract is deployed to +func (pc *TestPrecompiledContract) Address() types.Address { + if pc.AddressFunc == nil { + panic("AddressFunc is not set for the test precompiled contract") + } + return pc.AddressFunc() +} + +func (pc *TestPrecompiledContract) Name() string { + return precompiles.CADENCE_ARCH_PRECOMPILE_NAME +} diff --git a/fvm/evm/testutils/misc.go b/fvm/evm/testutils/misc.go new file mode 100644 index 00000000000..32e625ee44f --- /dev/null +++ b/fvm/evm/testutils/misc.go @@ -0,0 +1,120 @@ +package testutils + +import ( + cryptoRand "crypto/rand" + "math/big" + "math/rand" + "testing" + + gethCommon "github.com/ethereum/go-ethereum/common" + gethTypes "github.com/ethereum/go-ethereum/core/types" + "github.com/holiman/uint256" + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/fvm/evm/types" +) + +func RandomCommonHash(t testing.TB) gethCommon.Hash { + ret := gethCommon.Hash{} + _, err := cryptoRand.Read(ret[:gethCommon.HashLength]) + require.NoError(t, err) + return ret +} + +func RandomUint256Int(limit int64) *uint256.Int { + return uint256.NewInt(uint64(rand.Int63n(limit) + 1)) +} + +func RandomBigInt(limit int64) *big.Int { + return big.NewInt(rand.Int63n(limit) + 1) +} + +func RandomAddress(t testing.TB) types.Address { + return types.NewAddress(RandomCommonAddress(t)) +} + +func RandomCommonAddress(t testing.TB) gethCommon.Address { + ret := gethCommon.Address{} + _, err := cryptoRand.Read(ret[:gethCommon.AddressLength]) + require.NoError(t, err) + return ret +} + +func RandomGas(limit int64) uint64 { + return uint64(rand.Int63n(limit) + 1) +} + +func RandomData(t testing.TB) []byte { + // byte size [1, 100] + size := rand.Intn(100) + 1 + ret := make([]byte, size) + _, err := cryptoRand.Read(ret[:]) + require.NoError(t, err) + return ret +} + +func GetRandomLogFixture(t testing.TB) *gethTypes.Log { + return &gethTypes.Log{ + Address: RandomCommonAddress(t), + Topics: []gethCommon.Hash{ + RandomCommonHash(t), + RandomCommonHash(t), + }, + Data: RandomData(t), + } +} + +func COAOwnershipProofFixture(t testing.TB) *types.COAOwnershipProof { + return &types.COAOwnershipProof{ + Address: types.FlowAddress{1, 2, 3}, + CapabilityPath: "path", + KeyIndices: types.KeyIndices{1, 2}, + Signatures: types.Signatures{ + types.Signature("sig1"), + types.Signature("sig2"), + }, + } +} + +func COAOwnershipProofInContextFixture(t testing.TB) *types.COAOwnershipProofInContext { + signedMsg := RandomCommonHash(t) + return &types.COAOwnershipProofInContext{ + COAOwnershipProof: *COAOwnershipProofFixture(t), + SignedData: types.SignedData(signedMsg[:]), + EVMAddress: RandomAddress(t), + } +} + +func RandomResultFixture(t testing.TB) *types.Result { + contractAddress := RandomAddress(t) + return &types.Result{ + Index: 1, + TxType: 1, + TxHash: RandomCommonHash(t), + ReturnedData: RandomData(t), + GasConsumed: RandomGas(1000), + DeployedContractAddress: &contractAddress, + Logs: []*gethTypes.Log{ + GetRandomLogFixture(t), + GetRandomLogFixture(t), + }, + } +} + +func AggregatedPrecompiledCallsFixture(t testing.TB) types.AggregatedPrecompiledCalls { + return types.AggregatedPrecompiledCalls{ + types.PrecompiledCalls{ + Address: RandomAddress(t), + RequiredGasCalls: []uint64{2}, + RunCalls: []types.RunCall{ + { + Output: RandomData(t), + }, + { + Output: []byte{}, + ErrorMsg: "Some error msg", + }, + }, + }, + } +} diff --git a/fvm/evm/testutils/offchain.go b/fvm/evm/testutils/offchain.go new file mode 100644 index 00000000000..5a5e675798a --- /dev/null +++ b/fvm/evm/testutils/offchain.go @@ -0,0 +1,37 @@ +package testutils + +import ( + "fmt" + + "github.com/onflow/flow-go/fvm/evm/offchain/storage" + "github.com/onflow/flow-go/fvm/evm/types" +) + +// TestStorageProvider constructs a new +// storage provider that only provides +// storage for an specific height +type TestStorageProvider struct { + storage types.BackendStorage + height uint64 +} + +var _ types.StorageProvider = &TestStorageProvider{} + +// NewTestStorageProvider constructs a new TestStorageProvider +func NewTestStorageProvider( + initSnapshot types.BackendStorageSnapshot, + height uint64, +) *TestStorageProvider { + return &TestStorageProvider{ + storage: storage.NewEphemeralStorage(storage.NewReadOnlyStorage(initSnapshot)), + height: height, + } +} + +// GetSnapshotAt returns the snapshot at specific block height +func (sp *TestStorageProvider) GetSnapshotAt(height uint64) (types.BackendStorageSnapshot, error) { + if height != sp.height { + return nil, fmt.Errorf("storage for the given height (%d) is not available", height) + } + return sp.storage, nil +} diff --git a/fvm/evm/testutils/uploader.go b/fvm/evm/testutils/uploader.go new file mode 100644 index 00000000000..1c4569fb417 --- /dev/null +++ b/fvm/evm/testutils/uploader.go @@ -0,0 +1,13 @@ +package testutils + +import ( + "encoding/json" +) + +type MockUploader struct { + UploadFunc func(string, json.RawMessage) error +} + +func (m MockUploader) Upload(id string, data json.RawMessage) error { + return m.UploadFunc(id, data) +} diff --git a/fvm/evm/types/account.go b/fvm/evm/types/account.go new file mode 100644 index 00000000000..08b3555420a --- /dev/null +++ b/fvm/evm/types/account.go @@ -0,0 +1,56 @@ +package types + +// Account is an EVM account, currently +// three types of accounts are supported on Flow EVM, +// externally owned accounts (EOAs), smart contract accounts and cadence owned accounts +// Cadence-owned-account (COA) is a new type of account in the environment, +// that instead of being managed by public key, +// it is managed by a resource owned by a Flow account. +// +// In other words, the FVM account who owns the FOA resource +// can bridge native tokens to and from the account associated with the COA, +// deploy contracts to the environment, +// or call methods on contracts without the need to sign a transaction. +type Account interface { + // Address returns the address of this account + Address() Address + + // Balance returns the balance of this account + Balance() Balance + + // Code returns the code of this account + Code() Code + + // CodeHash returns the code hash of this account + CodeHash() []byte + + // Nonce returns the nonce of this account + Nonce() uint64 + + // Deposit deposits the token from the given vault into this account + Deposit(*FLOWTokenVault) + + // Withdraw withdraws the balance from account and + // return it as a FlowTokenVault + // works only for COAs + Withdraw(Balance) *FLOWTokenVault + + // Transfer is a utility method on top of call for transferring tokens to another account + Transfer(to Address, balance Balance) + + // Deploy deploys a contract to the environment + // the new deployed contract would be at the returned + // result address and the contract data is not controlled by the COA + // works only for COAs + Deploy(Code, GasLimit, Balance) *ResultSummary + + // Call calls a smart contract function with the given data. + // The gas usage is limited by the given gas limit, + // and the Flow transaction's computation limit. + // The fees are deducted from the COA + // and are transferred to the target address. + // if no data is provided it would behave as transferring tokens to the + // target address + // works only for COAs + Call(Address, Data, GasLimit, Balance) *ResultSummary +} diff --git a/fvm/evm/types/address.go b/fvm/evm/types/address.go new file mode 100644 index 00000000000..cc27dac5577 --- /dev/null +++ b/fvm/evm/types/address.go @@ -0,0 +1,150 @@ +package types + +import ( + "bytes" + "encoding/hex" + "fmt" + + gethCommon "github.com/ethereum/go-ethereum/common" + "github.com/onflow/cadence" + "github.com/onflow/cadence/encoding/ccf" + "github.com/onflow/cadence/sema" + + "github.com/onflow/flow-go/model/flow" +) + +// FlowEVMSpecialAddressPrefixLen captures the number of prefix bytes with constant values for special accounts (extended precompiles and COAs). +// +// The prefix length should insure a high-enough level of security against finding a pre-image using the hash +// function used for EVM addresses generation (Keccak256). This is required to avoid finding an EVM address +// that is also a valid FlowEVM address. +// The target (minimal) security in this case is the security level provided by EVM addresses. +// Since EVM addresses are 160-bits long, they offer only 80 bits of security (collision resistance +// offers the lowest level). +// A pre-image resistance of 80 bits requires the prefix to be at least 80-bits long (i.e 10 bytes). +// When used as a prefix in EVM addresses (20-bytes long), a prefix length of 12 bytes +// leaves a variable part of 8 bytes (64 bits). +const FlowEVMSpecialAddressPrefixLen = 12 + +const COAAddressTemplate = "A.%v.EVM.CadenceOwnedAccountCreated" + +var ( + // Using leading zeros for prefix helps with the storage compactness. + // + // Prefix for the built-in EVM precompiles + FlowEVMNativePrecompileAddressPrefix = [FlowEVMSpecialAddressPrefixLen]byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} + // Prefix for the extended precompiles + FlowEVMExtendedPrecompileAddressPrefix = [FlowEVMSpecialAddressPrefixLen]byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1} + // Prefix for the COA addresses + FlowEVMCOAAddressPrefix = [FlowEVMSpecialAddressPrefixLen]byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2} + // Coinbase address + CoinbaseAddress = Address{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0} +) + +// Address is an EVM-compatible address +type Address gethCommon.Address + +// AddressLength holds the number of bytes used for each EVM address +const AddressLength = gethCommon.AddressLength + +// NewAddress constructs a new Address +func NewAddress(addr gethCommon.Address) Address { + return Address(addr) +} + +// EmptyAddress is an empty evm address +var EmptyAddress = Address(gethCommon.Address{}) + +// Bytes returns a byte slice for the address +func (fa Address) Bytes() []byte { + return fa[:] +} + +// String returns the hex encoding of the address +// it returns empty string if address is empty +func (fa Address) String() string { + if fa == EmptyAddress { + return "" + } + return fa.ToCommon().Hex() +} + +// ToCommon returns the geth address +func (fa Address) ToCommon() gethCommon.Address { + return gethCommon.Address(fa) +} + +// NewAddressFromBytes constructs a new address from bytes +func NewAddressFromBytes(inp []byte) Address { + return Address(gethCommon.BytesToAddress(inp)) +} + +const CadenceOwnedAccountCreatedTypeAddressFieldName = "address" + +func COAAddressFromFlowCOACreatedEvent(evmContractAddress flow.Address, event flow.Event) (Address, error) { + // check the type first + if string(event.Type) != fmt.Sprintf(COAAddressTemplate, evmContractAddress.Hex()) { + return Address{}, fmt.Errorf("wrong event type is passed") + } + + // then decode + eventData, err := ccf.Decode(nil, event.Payload) + if err != nil { + return Address{}, err + } + + cadenceEvent, ok := eventData.(cadence.Event) + if !ok { + return Address{}, fmt.Errorf("event data is not a cadence event") + } + + addressValue := cadence.SearchFieldByName( + cadenceEvent, + CadenceOwnedAccountCreatedTypeAddressFieldName, + ) + + addressString, ok := addressValue.(cadence.String) + if !ok { + return Address{}, fmt.Errorf("address is not a string") + } + + addressBytes, err := hex.DecodeString(string(addressString)) + if err != nil { + return Address{}, err + } + + return NewAddressFromBytes(addressBytes), nil +} + +// NewAddressFromString constructs a new address from an string +func NewAddressFromString(str string) Address { + return NewAddressFromBytes([]byte(str)) +} + +var AddressBytesCadenceType = cadence.NewConstantSizedArrayType(AddressLength, cadence.UInt8Type) +var AddressBytesSemaType = sema.ByteArrayType + +func (a Address) ToCadenceValue() cadence.Array { + values := make([]cadence.Value, len(a)) + for i, v := range a { + values[i] = cadence.NewUInt8(v) + } + return cadence.NewArray(values). + WithType(AddressBytesCadenceType) +} + +// IsACOAAddress returns true if the address is a COA address +// +// This test insures `addr` has been generated as a COA address with high probability. +// Brute forcing an EVM address `addr` to pass the `IsACOAAddress` test is as hard as the bit-length +// of `FlowEVMCOAAddressPrefix` (here 96 bits). +// Although this is lower than the protocol-wide security level in Flow (128 bits), it remains +// higher than the EVM addresses security (80 bits when considering collision attacks) +func IsACOAAddress(addr Address) bool { + return bytes.HasPrefix(addr[:], FlowEVMCOAAddressPrefix[:]) +} + +// IsAnExtendedPrecompileAddress returns true if the address is a extended precompile address +func IsAnExtendedPrecompileAddress(addr Address) bool { + return bytes.HasPrefix(addr[:], FlowEVMExtendedPrecompileAddressPrefix[:]) +} diff --git a/fvm/evm/types/backend.go b/fvm/evm/types/backend.go new file mode 100644 index 00000000000..985a8bc29e1 --- /dev/null +++ b/fvm/evm/types/backend.go @@ -0,0 +1,26 @@ +package types + +import ( + "github.com/onflow/flow-go/fvm/environment" +) + +// BackendStorage provides an interface for storage of registers +type BackendStorage interface { + environment.ValueStore +} + +// Backend provides a subset of the FVM environment functionality +// Any error returned by a Backend is expected to be a `FatalError` or +// a `BackendError`. +type Backend interface { + BackendStorage + environment.Meter + environment.EventEmitter + environment.BlockInfo + environment.RandomGenerator + environment.ContractFunctionInvoker + environment.UUIDGenerator + environment.Tracer + environment.EVMMetricsReporter + environment.LoggerProvider +} diff --git a/fvm/evm/types/balance.go b/fvm/evm/types/balance.go new file mode 100644 index 00000000000..2dfdfc53ca6 --- /dev/null +++ b/fvm/evm/types/balance.go @@ -0,0 +1,128 @@ +package types + +import ( + "fmt" + "math" + "math/big" + + "github.com/onflow/cadence" + "github.com/onflow/cadence/fixedpoint" +) + +var ( + AttoScale = 18 + UFixedScale = fixedpoint.Fix64Scale + UFixedToAttoConversionScale = AttoScale - UFixedScale + UFixToAttoConversionMultiplier = new(big.Int).Exp(big.NewInt(10), big.NewInt(int64(UFixedToAttoConversionScale)), nil) + + OneFlowInUFix64 = cadence.UFix64(uint64(math.Pow(10, float64(UFixedScale)))) + EmptyBalance = Balance(new(big.Int)) +) + +// Balance represents the balance of an address +// in the evm environment (Flow EVM), balances are kept in atto-flow (1e-18 flow); +// the smallest denomination of FLOW token (similar to how Wei is used to store Eth) +// But A Cadence FLOW Vault uses a Cadence.UFix64 to store values in Flow, which means +// 1e-8 is the smallest value that can be stored on the vault. +// The balance here considers the highest precision (atto-flow) but utility +// function has been provided for conversion from/to UFix64 to prevent accidental +// conversion errors and dealing with rounding errors. +type Balance *big.Int + +// BalancesAreEqual returns true if balances are equal +func BalancesAreEqual(bal1, bal2 Balance) bool { + return (*big.Int)(bal1).Cmp(bal2) == 0 +} + +// NewBalance constructs a new balance from an atto-flow value +func NewBalance(inp *big.Int) Balance { + return Balance(inp) +} + +// NewBalanceFromUFix64 constructs a new balance from flow value (how its stored in Cadence Flow) +func NewBalanceFromUFix64(inp cadence.UFix64) Balance { + return new(big.Int).Mul( + new(big.Int).SetUint64(uint64(inp)), + UFixToAttoConversionMultiplier) +} + +// CopyBalance creates a copy of the balance +func CopyBalance(inp Balance) Balance { + return Balance(new(big.Int).Set(inp)) +} + +// BalanceToBigInt convert balance into big int +func BalanceToBigInt(bal Balance) *big.Int { + return (*big.Int)(bal) +} + +// UnsafeCastOfBalanceToFloat64 tries to cast the balance into a float64, +// +// Warning! this method is only provided for logging and metric reporting +// purposes, using float64 for any actual computation result in non-determinism. +func UnsafeCastOfBalanceToFloat64(bal Balance) float64 { + res, _ := new(big.Float).Quo( + new(big.Float).SetInt(bal), + new(big.Float).SetInt( + new(big.Int).Exp( + big.NewInt(10), + big.NewInt(int64(AttoScale)), + nil, + ), + ), + ).Float64() + return res +} + +// ConvertBalanceToUFix64 casts the balance into a UFix64, +// +// Warning! The smallest unit of Flow token that a FlowVault (Cadence) could store is 1e10^-8, +// so transferring smaller values (or values with smalls fractions) could result in loss in +// conversion. The rounded flag should be used to prevent loss of assets. +func ConvertBalanceToUFix64(bal Balance) (value cadence.UFix64, roundedOff bool, err error) { + converted := new(big.Int).Div(bal, UFixToAttoConversionMultiplier) + if !converted.IsUint64() { + // this should never happen + err = fmt.Errorf("balance can't be casted to a uint64") + } + return cadence.UFix64(converted.Uint64()), BalanceConversionToUFix64ProneToRoundingError(bal), err +} + +// BalanceConversionToUFix64ProneToRoundingError returns true +// if casting to UFix64 could result in rounding error +func BalanceConversionToUFix64ProneToRoundingError(bal Balance) bool { + return new(big.Int).Mod(bal, UFixToAttoConversionMultiplier).BitLen() != 0 +} + +// Subtract balance 2 from balance 1 and returns the result as a new balance +func SubBalance(bal1 Balance, bal2 Balance) (Balance, error) { + if (*big.Int)(bal1).Cmp(bal2) == -1 { + return nil, ErrInvalidBalance + } + return new(big.Int).Sub(bal1, bal2), nil +} + +// AddBalance balance 2 to balance 1 and returns the result as a new balance +func AddBalance(bal1 Balance, bal2 Balance) (Balance, error) { + return new(big.Int).Add(bal1, bal2), nil +} + +// MakeABalanceInFlow makes a balance object that has `amount` Flow Token in it +func MakeABalanceInFlow(amount uint64) Balance { + return NewBalance(MakeBigIntInFlow(amount)) +} + +// MakeBigIntInFlow makes big int containing `amount` of Flow +func MakeBigIntInFlow(amount uint64) *big.Int { + return new(big.Int).Mul(OneFlowBalance(), new(big.Int).SetUint64(amount)) +} + +// OneFlow creates a big int including one flow +func OneFlow() *big.Int { + return new(big.Int).Exp(big.NewInt(10), big.NewInt(int64(AttoScale)), nil) +} + +// OneFlowBalance creates a new balance including one flow +func OneFlowBalance() Balance { + return Balance(OneFlow()) +} diff --git a/fvm/evm/types/balance_test.go b/fvm/evm/types/balance_test.go new file mode 100644 index 00000000000..f8381d1513f --- /dev/null +++ b/fvm/evm/types/balance_test.go @@ -0,0 +1,49 @@ +package types_test + +import ( + "math/big" + "testing" + + "github.com/stretchr/testify/require" + + "github.com/onflow/cadence" + + "github.com/onflow/flow-go/fvm/evm/types" +) + +func TestBalance(t *testing.T) { + // test attoflow to flow + bal := types.OneFlowBalance() + require.Equal(t, bal, types.NewBalanceFromUFix64(types.OneFlowInUFix64)) + + // 100.0002 Flow + u, err := cadence.NewUFix64("100.0002") + require.NoError(t, err) + require.Equal(t, "100.00020000", u.String()) + + bb := types.NewBalanceFromUFix64(u) + require.Equal(t, "100000200000000000000", types.BalanceToBigInt(bb).String()) + require.False(t, types.BalanceConversionToUFix64ProneToRoundingError(bb)) + bret, roundedOff, err := types.ConvertBalanceToUFix64(bb) + require.NoError(t, err) + require.Equal(t, u, bret) + require.False(t, roundedOff) + + // rounded off flag + bal = types.NewBalance(big.NewInt(1)) + require.NoError(t, err) + require.True(t, types.BalanceConversionToUFix64ProneToRoundingError(bal)) + bret, roundedOff, err = types.ConvertBalanceToUFix64(bal) + require.NoError(t, err) + require.Equal(t, cadence.UFix64(0), bret) + require.True(t, roundedOff) + + // test unsafe conversion to float64 + u, err = cadence.NewUFix64("100.0002") + require.NoError(t, err) + bal = types.NewBalanceFromUFix64(u) + require.NoError(t, err) + ret := types.UnsafeCastOfBalanceToFloat64(bal) + require.Equal(t, 100.0002, ret) + +} diff --git a/fvm/evm/types/block.go b/fvm/evm/types/block.go new file mode 100644 index 00000000000..a9f48f581c9 --- /dev/null +++ b/fvm/evm/types/block.go @@ -0,0 +1,323 @@ +package types + +import ( + "bytes" + "math/big" + "time" + + gethCommon "github.com/ethereum/go-ethereum/common" + gethTypes "github.com/ethereum/go-ethereum/core/types" + gethCrypto "github.com/ethereum/go-ethereum/crypto" + gethRLP "github.com/ethereum/go-ethereum/rlp" + gethTrie "github.com/ethereum/go-ethereum/trie" + + "github.com/onflow/flow-go/model/flow" +) + +// Block represents a evm block. +// It captures block info such as height and state +type Block struct { + // the hash of the parent block + ParentBlockHash gethCommon.Hash + + // Height returns the height of this block + Height uint64 + + // Timestamp is a Unix timestamp in seconds at which the block was created + // Note that this value must be provided from the FVM Block + Timestamp uint64 + + // holds the total amount of the native token deposited in the evm side. (in attoflow) + TotalSupply *big.Int + + // ReceiptRoot returns the root hash of the receipts emitted in this block + // Note that this value won't be unique to each block, for example for the + // case of empty trie of receipts or a single receipt with no logs and failed state + // the same receipt root would be reported for block. + ReceiptRoot gethCommon.Hash + + // TransactionHashRoot returns the root hash of the transaction hashes + // included in this block. + // Note that despite similar functionality this is a bit different than TransactionRoot + // provided by Ethereum. TransactionRoot constructs a Merkle proof with leafs holding + // encoded transactions as values. But TransactionHashRoot uses transaction hash + // values as node values. Proofs are still compatible but might require an extra hashing step. + TransactionHashRoot gethCommon.Hash + + // TotalGasUsed stores gas used by all transactions included in the block. + TotalGasUsed uint64 + + // PrevRandao is the value returned for block.prevrandao opcode + PrevRandao gethCommon.Hash +} + +// ToBytes encodes the block into bytes +func (b *Block) ToBytes() ([]byte, error) { + return gethRLP.EncodeToBytes(b) +} + +// Hash returns the hash of the block +func (b *Block) Hash() (gethCommon.Hash, error) { + data, err := b.ToBytes() + return gethCrypto.Keccak256Hash(data), err +} + +// NewBlock constructs a new block +func NewBlock( + parentBlockHash gethCommon.Hash, + height uint64, + timestamp uint64, + totalSupply *big.Int, + prevRandao gethCommon.Hash, +) *Block { + return &Block{ + ParentBlockHash: parentBlockHash, + Height: height, + Timestamp: timestamp, + TotalSupply: totalSupply, + ReceiptRoot: gethTypes.EmptyReceiptsHash, + TransactionHashRoot: gethTypes.EmptyRootHash, + PrevRandao: prevRandao, + } +} + +// NewBlockFromBytes constructs a new block from encoded data +func NewBlockFromBytes(encoded []byte) (*Block, error) { + res := &Block{} + err := gethRLP.DecodeBytes(encoded, res) + if err != nil { + res = decodeBlockBreakingChanges(encoded) + if res == nil { + return nil, err + } + } + return res, nil +} + +// GenesisTimestamp returns the block time stamp for EVM genesis block +func GenesisTimestamp(flowChainID flow.ChainID) uint64 { + switch flowChainID { + case flow.Testnet: + return uint64(time.Date(2024, time.August, 1, 0, 0, 0, 0, time.UTC).Unix()) + case flow.Mainnet: + return uint64(time.Date(2024, time.September, 1, 0, 0, 0, 0, time.UTC).Unix()) + default: + return 0 + } +} + +// GenesisBlock returns the genesis block in the EVM environment +func GenesisBlock(chainID flow.ChainID) *Block { + return &Block{ + ParentBlockHash: gethCommon.Hash{}, + Height: uint64(0), + Timestamp: GenesisTimestamp(chainID), + TotalSupply: new(big.Int), + ReceiptRoot: gethTypes.EmptyRootHash, + TransactionHashRoot: gethTypes.EmptyRootHash, + TotalGasUsed: 0, + PrevRandao: gethCommon.Hash{}, + } +} + +// when testnet was launched the block structure +// didn't have the preRandao filed so the hash of the event +// was different from hashing the genesis block struct. +var TestNetGenesisHash = gethCommon.Hash{ + 60, 220, 118, 103, 27, 85, 73, 205, + 46, 2, 83, 105, 179, 240, 255, 14, + 55, 21, 42, 211, 55, 87, 177, 115, + 118, 144, 125, 37, 146, 116, 168, 229, +} + +// GenesisBlockHash returns the genesis block hash in the EVM environment +func GenesisBlockHash(chainID flow.ChainID) gethCommon.Hash { + // for the case of testnet, the block didn't initially + // had the preRandao and it was not part of the hash calculation. + if chainID == flow.Testnet { + return TestNetGenesisHash + } + h, err := GenesisBlock(chainID).Hash() + if err != nil { // this never happens + panic(err) + } + return h +} + +// BlockProposal is a EVM block proposal +// holding all the interim data of block before commitment +type BlockProposal struct { + Block + + // Receipts keeps a order list of light receipts generated during block execution + Receipts []LightReceipt + + // TxHashes keeps transaction hashes included in this block proposal + TxHashes TransactionHashes +} + +// AppendTransaction appends a transaction hash to the list of transaction hashes of the block +// and also update the receipts +func (b *BlockProposal) AppendTransaction(res *Result) { + // we don't append invalid transactions to blocks + if res == nil || res.Invalid() { + return + } + b.TxHashes = append(b.TxHashes, res.TxHash) + r := res.LightReceipt() + if r == nil { + return + } + b.Receipts = append(b.Receipts, *r) + b.TotalGasUsed = r.CumulativeGasUsed +} + +// PopulateRoots populates receiptRoot and transactionHashRoot +func (b *BlockProposal) PopulateRoots() { + // TODO: we can make this concurrent if needed in the future + // to improve the block production speed + b.PopulateTransactionHashRoot() + b.PopulateReceiptRoot() +} + +// PopulateTransactionHashRoot sets the transactionHashRoot +func (b *BlockProposal) PopulateTransactionHashRoot() { + if len(b.TransactionHashRoot) == 0 { + b.TransactionHashRoot = gethTypes.EmptyRootHash + return + } + b.TransactionHashRoot = b.TxHashes.RootHash() +} + +// PopulateReceiptRoot sets the receiptRoot +func (b *BlockProposal) PopulateReceiptRoot() { + if len(b.Receipts) == 0 { + b.ReceiptRoot = gethTypes.EmptyReceiptsHash + return + } + receipts := make(gethTypes.Receipts, len(b.Receipts)) + for i, lr := range b.Receipts { + receipts[i] = lr.ToReceipt() + } + b.ReceiptRoot = gethTypes.DeriveSha(receipts, gethTrie.NewStackTrie(nil)) +} + +// ToBytes encodes the block proposal into bytes +func (b *BlockProposal) ToBytes() ([]byte, error) { + return gethRLP.EncodeToBytes(b) +} + +// NewBlockProposalFromBytes constructs a new block proposal from encoded data +func NewBlockProposalFromBytes(encoded []byte) (*BlockProposal, error) { + res := &BlockProposal{} + err := gethRLP.DecodeBytes(encoded, res) + if err != nil { + res = decodeBlockProposalBreakingChanges(encoded) + if res == nil { + return nil, err + } + } + return res, nil +} + +func NewBlockProposal( + parentBlockHash gethCommon.Hash, + height uint64, + timestamp uint64, + totalSupply *big.Int, + prevRandao gethCommon.Hash, +) *BlockProposal { + return &BlockProposal{ + Block: Block{ + ParentBlockHash: parentBlockHash, + Height: height, + Timestamp: timestamp, + TotalSupply: totalSupply, + ReceiptRoot: gethTypes.EmptyRootHash, + PrevRandao: prevRandao, + }, + Receipts: make([]LightReceipt, 0), + TxHashes: make([]gethCommon.Hash, 0), + } +} + +type TransactionHashes []gethCommon.Hash + +func (t TransactionHashes) Len() int { + return len(t) +} + +func (t TransactionHashes) EncodeIndex(index int, buffer *bytes.Buffer) { + buffer.Write(t[index].Bytes()) +} + +func (t TransactionHashes) RootHash() gethCommon.Hash { + return gethTypes.DeriveSha(t, gethTrie.NewStackTrie(nil)) +} + +// Below block type section, defines earlier block types, +// this is being used to decode blocks that were stored +// before block type changes. It allows us to still decode +// a block that would otherwise be invalid if decoded into +// latest version of the above Block type. + +// before adding PrevRandao to the block +type BlockV0 struct { + ParentBlockHash gethCommon.Hash + Height uint64 + Timestamp uint64 + TotalSupply *big.Int + ReceiptRoot gethCommon.Hash + TransactionHashRoot gethCommon.Hash + TotalGasUsed uint64 +} + +type BlockProposalV0 struct { + BlockV0 + Receipts []LightReceipt + TxHashes TransactionHashes +} + +// decodeBlockBreakingChanges will try to decode the bytes into all +// previous versions of block type, if it succeeds it will return the +// migrated block, otherwise it will return nil. +func decodeBlockBreakingChanges(encoded []byte) *Block { + b0 := &BlockV0{} + if err := gethRLP.DecodeBytes(encoded, b0); err == nil { + return &Block{ + ParentBlockHash: b0.ParentBlockHash, + Height: b0.Height, + Timestamp: b0.Timestamp, + TotalSupply: b0.TotalSupply, + ReceiptRoot: b0.ReceiptRoot, + TransactionHashRoot: b0.TransactionHashRoot, + TotalGasUsed: b0.TotalGasUsed, + PrevRandao: gethCommon.Hash{}, + } + } + return nil +} + +// decodeBlockProposalBreakingChanges will try to decode the bytes into all +// previous versions of block proposal type, if it succeeds it will return the +// migrated block, otherwise it will return nil. +func decodeBlockProposalBreakingChanges(encoded []byte) *BlockProposal { + bp0 := &BlockProposalV0{} + if err := gethRLP.DecodeBytes(encoded, bp0); err == nil { + return &BlockProposal{ + Block: Block{ + ParentBlockHash: bp0.ParentBlockHash, + Height: bp0.Height, + Timestamp: bp0.Timestamp, + TotalSupply: bp0.TotalSupply, + ReceiptRoot: bp0.ReceiptRoot, + TransactionHashRoot: bp0.TransactionHashRoot, + TotalGasUsed: bp0.TotalGasUsed, + PrevRandao: gethCommon.Hash{}, + }, + Receipts: bp0.Receipts, + TxHashes: bp0.TxHashes, + } + } + return nil +} diff --git a/fvm/evm/types/block_test.go b/fvm/evm/types/block_test.go new file mode 100644 index 00000000000..03e6ca530f7 --- /dev/null +++ b/fvm/evm/types/block_test.go @@ -0,0 +1,126 @@ +package types + +import ( + "math/big" + "testing" + + gethCommon "github.com/ethereum/go-ethereum/common" + gethTypes "github.com/ethereum/go-ethereum/core/types" + gethRLP "github.com/ethereum/go-ethereum/rlp" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/model/flow" +) + +func Test_GenesisBlock(t *testing.T) { + testnetGenesis := GenesisBlock(flow.Testnet) + require.Equal(t, testnetGenesis.Timestamp, GenesisTimestamp(flow.Testnet)) + testnetGenesisHash := GenesisBlockHash(flow.Testnet) + require.Equal(t, TestNetGenesisHash, testnetGenesisHash) + + mainnetGenesis := GenesisBlock(flow.Mainnet) + require.Equal(t, mainnetGenesis.Timestamp, GenesisTimestamp(flow.Mainnet)) + mainnetGenesisHash := GenesisBlockHash(flow.Mainnet) + h, err := mainnetGenesis.Hash() + require.NoError(t, err) + require.Equal(t, h, mainnetGenesisHash) + + assert.NotEqual(t, testnetGenesisHash, mainnetGenesisHash) +} + +func Test_BlockHash(t *testing.T) { + b := Block{ + ParentBlockHash: gethCommon.HexToHash("0xaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"), + Height: 1, + TotalSupply: big.NewInt(1000), + ReceiptRoot: gethCommon.Hash{0x2, 0x3, 0x4}, + TotalGasUsed: 135, + TransactionHashRoot: gethCommon.Hash{0x5, 0x6, 0x7}, + } + + h1, err := b.Hash() + require.NoError(t, err) + + b.Height = 2 + + h2, err := b.Hash() + require.NoError(t, err) + + // hashes should not equal if any data is changed + assert.NotEqual(t, h1, h2) +} + +func Test_BlockProposal(t *testing.T) { + bp := NewBlockProposal(gethCommon.Hash{1}, 1, 0, nil, gethCommon.Hash{1, 2, 3}) + + bp.AppendTransaction(nil) + require.Empty(t, bp.TxHashes) + require.Equal(t, uint64(0), bp.TotalGasUsed) + + bp.PopulateRoots() + require.Equal(t, gethTypes.EmptyReceiptsHash, bp.ReceiptRoot) + require.Equal(t, gethTypes.EmptyRootHash, bp.TransactionHashRoot) + + res := &Result{ + TxHash: gethCommon.Hash{2}, + GasConsumed: 10, + CumulativeGasUsed: 20, + } + bp.AppendTransaction(res) + require.Equal(t, res.TxHash, bp.TxHashes[0]) + require.Equal(t, res.CumulativeGasUsed, bp.TotalGasUsed) + require.Equal(t, *res.LightReceipt(), bp.Receipts[0]) + + bp.PopulateRoots() + require.NotEqual(t, gethTypes.EmptyReceiptsHash, bp.ReceiptRoot) +} + +func Test_DecodeHistoricBlocks(t *testing.T) { + bv0 := BlockV0{ + ParentBlockHash: gethCommon.HexToHash("0xaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"), + Height: 1, + Timestamp: 2, + TotalSupply: big.NewInt(3), + ReceiptRoot: gethCommon.Hash{0x04}, + TransactionHashRoot: gethCommon.Hash{0x05}, + TotalGasUsed: 0, + } + b0, err := gethRLP.EncodeToBytes(bv0) + require.NoError(t, err) + + b := decodeBlockBreakingChanges(b0) + require.Equal(t, b.ParentBlockHash, bv0.ParentBlockHash) + require.Equal(t, b.Height, bv0.Height) + require.Equal(t, b.Timestamp, bv0.Timestamp) + require.Equal(t, b.TotalSupply.Uint64(), bv0.TotalSupply.Uint64()) + require.Equal(t, b.ReceiptRoot, bv0.ReceiptRoot) + require.Equal(t, b.TransactionHashRoot, bv0.TransactionHashRoot) + require.Equal(t, b.TotalGasUsed, bv0.TotalGasUsed) + require.Empty(t, b.PrevRandao) + + bpv0 := BlockProposalV0{ + BlockV0: bv0, + Receipts: []LightReceipt{ + {CumulativeGasUsed: 10}, + {CumulativeGasUsed: 2}, + }, + TxHashes: []gethCommon.Hash{{1, 2}, {3, 4}, {5, 6}}, + } + + bp0, err := gethRLP.EncodeToBytes(bpv0) + require.NoError(t, err) + + bp, err := NewBlockProposalFromBytes(bp0) + require.NoError(t, err) + require.Equal(t, bp.ParentBlockHash, bpv0.ParentBlockHash) + require.Equal(t, bp.Height, bpv0.Height) + require.Equal(t, bp.Timestamp, bpv0.Timestamp) + require.Equal(t, bp.TotalSupply.Uint64(), bpv0.TotalSupply.Uint64()) + require.Equal(t, bp.ReceiptRoot, bpv0.ReceiptRoot) + require.Equal(t, bp.TransactionHashRoot, bpv0.TransactionHashRoot) + require.Equal(t, bp.TotalGasUsed, bpv0.TotalGasUsed) + require.Empty(t, bp.PrevRandao) + require.Len(t, bp.Receipts, 2) + require.Len(t, bp.TxHashes, 3) +} diff --git a/fvm/evm/types/call.go b/fvm/evm/types/call.go new file mode 100644 index 00000000000..c8dd7efae97 --- /dev/null +++ b/fvm/evm/types/call.go @@ -0,0 +1,277 @@ +package types + +import ( + "fmt" + "math/big" + + gethCommon "github.com/ethereum/go-ethereum/common" + gethCore "github.com/ethereum/go-ethereum/core" + gethTypes "github.com/ethereum/go-ethereum/core/types" + gethParams "github.com/ethereum/go-ethereum/params" + "github.com/ethereum/go-ethereum/rlp" +) + +const ( + // tx type 255 is used for direct calls from COAs + DirectCallTxType = byte(255) + + UnknownCallSubType = byte(0) + DepositCallSubType = byte(1) + WithdrawCallSubType = byte(2) + TransferCallSubType = byte(3) + DeployCallSubType = byte(4) + ContractCallSubType = byte(5) + + // Note that these gas values might need to change if we + // change the transaction (e.g. add access list), + // then it has to be updated to use Intrinsic function + // to calculate the minimum gas needed to run the transaction. + IntrinsicFeeForTokenTransfer = gethParams.TxGas + + // 21_000 is the minimum for a transaction + max gas allowed for receive/fallback methods + DefaultGasLimitForTokenTransfer = IntrinsicFeeForTokenTransfer + 2_300 + + // the value is set to the gas limit for transfer to facilitate transfers + // to smart contract addresses. + DepositCallGasLimit = DefaultGasLimitForTokenTransfer + WithdrawCallGasLimit = DefaultGasLimitForTokenTransfer +) + +// DirectCall captures all the data related to a direct call to evm +// direct calls are similar to transactions but they don't have +// signatures and don't need sequence number checks +// Note that while we don't check the nonce, it impacts +// hash calculation and also impacts the address of resulting contract +// when deployed through direct calls. +// Users don't have the worry about the nonce, handler sets +// it to the right value. +type DirectCall struct { + Type byte + SubType byte + From Address + To Address + Data []byte + Value *big.Int + GasLimit uint64 + Nonce uint64 +} + +// DirectCallFromEncoded constructs a DirectCall from encoded data +func DirectCallFromEncoded(encoded []byte) (*DirectCall, error) { + if encoded[0] != DirectCallTxType { + return nil, fmt.Errorf("tx type mismatch") + } + dc := &DirectCall{} + return dc, rlp.DecodeBytes(encoded[1:], dc) +} + +// Encode encodes the direct call it also adds the type +// as the very first byte, similar to how evm encodes types. +func (dc *DirectCall) Encode() ([]byte, error) { + encoded, err := rlp.EncodeToBytes(dc) + return append([]byte{dc.Type}, encoded...), err +} + +// Hash computes the hash of a direct call +func (dc *DirectCall) Hash() gethCommon.Hash { + // we use geth transaction hash calculation since direct call hash is included in the + // block transaction hashes, and thus observed as any other transaction + // We construct this Legacy tx type so the external 3rd party tools + // don't have to support a new type for the purpose of hash computation + return dc.Transaction().Hash() +} + +// Message constructs a core.Message from the direct call +func (dc *DirectCall) Message() *gethCore.Message { + return &gethCore.Message{ + From: dc.From.ToCommon(), + To: dc.to(), + Value: dc.Value, + Data: dc.Data, + Nonce: dc.Nonce, + GasLimit: dc.GasLimit, + GasPrice: big.NewInt(0), // price is set to zero fo direct calls + GasTipCap: big.NewInt(0), // also known as maxPriorityFeePerGas (in GWei) + GasFeeCap: big.NewInt(0), // also known as maxFeePerGas (in GWei) + // TODO: maybe revisit setting the access list + // AccessList: tx.AccessList(), + // When SkipNonceChecks is true, the message nonce is + // not checked against the account nonce in state. + // When SkipFromEOACheck is true, it disables checking + // that the sender is an EOA. + // Since we use the direct calls for COAs, we set + // the nonce and the COA is an smart contract. + SkipNonceChecks: true, + SkipFromEOACheck: true, + } +} + +// Transaction constructs a geth.Transaction from the direct call +func (dc *DirectCall) Transaction() *gethTypes.Transaction { + // Since a direct call doesn't have a valid signature + // and we need to somehow include the From field for the purpose + // of hash calculation. we define the canonical format by + // using the FROM bytes to set the bytes for the R part of the tx (big endian), + // S captures the subtype of transaction and V is set to DirectCallTxType (255). + return gethTypes.NewTx(&gethTypes.LegacyTx{ + GasPrice: big.NewInt(0), + Gas: dc.GasLimit, + To: dc.to(), + Value: dc.Value, + Data: dc.Data, + Nonce: dc.Nonce, + R: new(big.Int).SetBytes(dc.From.Bytes()), + S: new(big.Int).SetBytes([]byte{dc.SubType}), + V: new(big.Int).SetBytes([]byte{DirectCallTxType}), + }) +} + +// EmptyToField returns true if `to` field contains an empty address +func (dc *DirectCall) EmptyToField() bool { + return dc.To == EmptyAddress +} + +func (dc *DirectCall) to() *gethCommon.Address { + if !dc.EmptyToField() { + ct := dc.To.ToCommon() + return &ct + } + return nil +} + +// NewDepositCall constructs a new deposit direct call +func NewDepositCall( + bridge Address, + address Address, + amount *big.Int, + nonce uint64, +) *DirectCall { + return &DirectCall{ + Type: DirectCallTxType, + SubType: DepositCallSubType, + From: bridge, + To: address, + Data: nil, + Value: amount, + GasLimit: DepositCallGasLimit, + Nonce: nonce, + } +} + +// NewDepositCall constructs a new withdraw direct call +func NewWithdrawCall( + bridge Address, + address Address, + amount *big.Int, + nonce uint64, +) *DirectCall { + return &DirectCall{ + Type: DirectCallTxType, + SubType: WithdrawCallSubType, + From: address, + To: bridge, + Data: nil, + Value: amount, + GasLimit: WithdrawCallGasLimit, + Nonce: nonce, + } +} + +func NewTransferCall( + from Address, + to Address, + amount *big.Int, + nonce uint64, +) *DirectCall { + return &DirectCall{ + Type: DirectCallTxType, + SubType: TransferCallSubType, + From: from, + To: to, + Data: nil, + Value: amount, + GasLimit: DefaultGasLimitForTokenTransfer, + Nonce: nonce, + } +} + +// NewDeployCall constructs a new deploy direct call +func NewDeployCall( + caller Address, + code Code, + gasLimit uint64, + value *big.Int, + nonce uint64, +) *DirectCall { + return &DirectCall{ + Type: DirectCallTxType, + SubType: DeployCallSubType, + From: caller, + To: EmptyAddress, + Data: code, + Value: value, + GasLimit: gasLimit, + Nonce: nonce, + } +} + +// NewDeployCallWithTargetAddress constructs a new deployment call +// for the given target address +// +// Warning! This subtype should only be used internally for +// deploying contracts at given addresses (e.g. COA account init setup) +// should not be used for other means. +func NewDeployCallWithTargetAddress( + caller Address, + to Address, + code Code, + gasLimit uint64, + value *big.Int, + nonce uint64, +) *DirectCall { + return &DirectCall{ + Type: DirectCallTxType, + SubType: DeployCallSubType, + From: caller, + To: to, + Data: code, + Value: value, + GasLimit: gasLimit, + Nonce: nonce, + } +} + +// NewContractCall constructs a new contract call +func NewContractCall( + caller Address, + to Address, + data Data, + gasLimit uint64, + value *big.Int, + nonce uint64, +) *DirectCall { + return &DirectCall{ + Type: DirectCallTxType, + SubType: ContractCallSubType, + From: caller, + To: to, + Data: data, + Value: value, + GasLimit: gasLimit, + Nonce: nonce, + } +} + +// GasLimit sets the limit for the total gas used by a transaction +type GasLimit uint64 + +// Code holds an smart contract code +type Code []byte + +// Data holds the data passed as part of a call +type Data []byte + +// AsBigInt process the data and return it as a big integer +func (d Data) AsBigInt() *big.Int { + return new(big.Int).SetBytes(d) +} diff --git a/fvm/evm/types/call_test.go b/fvm/evm/types/call_test.go new file mode 100644 index 00000000000..0bc63b9d017 --- /dev/null +++ b/fvm/evm/types/call_test.go @@ -0,0 +1,66 @@ +package types + +import ( + "bytes" + "io" + "math/big" + "testing" + + gethTypes "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/rlp" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestDirectCall(t *testing.T) { + dc := &DirectCall{ + Type: DirectCallTxType, + SubType: DepositCallSubType, + From: Address{0x1, 0x2}, + To: Address{0x3, 0x4}, + Data: []byte{0xf, 0xa, 0xb}, + Value: big.NewInt(5), + GasLimit: 100, + } + + t.Run("calculate hash", func(t *testing.T) { + h := dc.Hash() + assert.Equal(t, "0xed76124cc3c59f13e1113f5c380e2a67dab9bf616afc645073d2491fe3aecb62", h.Hex()) + + // the hash should stay the same after RLP encoding and decoding + var b bytes.Buffer + writer := io.Writer(&b) + err := dc.Transaction().EncodeRLP(writer) + require.NoError(t, err) + + reconstructedTx := &gethTypes.Transaction{} + err = reconstructedTx.DecodeRLP(rlp.NewStream(io.Reader(&b), 1000)) + require.NoError(t, err) + + h = reconstructedTx.Hash() + assert.Equal(t, "0xed76124cc3c59f13e1113f5c380e2a67dab9bf616afc645073d2491fe3aecb62", h.Hex()) + }) + + t.Run("same content except `from` should result in different hashes", func(t *testing.T) { + h := dc.Hash() + dc.From = Address{0x4, 0x5} + h2 := dc.Hash() + assert.NotEqual(t, h2.Hex(), h.Hex()) + }) + + t.Run("construct transaction", func(t *testing.T) { + tx := dc.Transaction() + h := dc.Hash() + assert.Equal(t, dc.Value, tx.Value()) + assert.Equal(t, dc.To.ToCommon(), *tx.To()) + assert.Equal(t, h, tx.Hash()) + assert.Equal(t, dc.GasLimit, tx.Gas()) + assert.Equal(t, dc.Data, tx.Data()) + assert.Equal(t, uint64(0), tx.Nonce()) // no nonce exists for direct call + + v, r, s := tx.RawSignatureValues() + require.Equal(t, dc.From.Bytes(), r.Bytes()) + require.Equal(t, []byte{dc.SubType}, s.Bytes()) + require.Equal(t, []byte{DirectCallTxType}, v.Bytes()) + }) +} diff --git a/fvm/evm/types/chainIDs.go b/fvm/evm/types/chainIDs.go new file mode 100644 index 00000000000..0414e7bef89 --- /dev/null +++ b/fvm/evm/types/chainIDs.go @@ -0,0 +1,29 @@ +package types + +import ( + "math/big" + + "github.com/onflow/flow-go/model/flow" +) + +var ( + FlowEVMPreviewNetChainID = big.NewInt(646) + FlowEVMTestNetChainID = big.NewInt(545) + FlowEVMMainNetChainID = big.NewInt(747) + + FlowEVMPreviewNetChainIDInUInt64 = FlowEVMPreviewNetChainID.Uint64() + FlowEVMTestNetChainIDInUInt64 = FlowEVMTestNetChainID.Uint64() + FlowEVMMainNetChainIDInUInt64 = FlowEVMMainNetChainID.Uint64() +) + +func EVMChainIDFromFlowChainID(flowChainID flow.ChainID) *big.Int { + // default evm chain ID is previewNet + switch flowChainID { + case flow.Mainnet: + return FlowEVMMainNetChainID + case flow.Testnet: + return FlowEVMTestNetChainID + default: + return FlowEVMPreviewNetChainID + } +} diff --git a/fvm/evm/types/codeFinder.go b/fvm/evm/types/codeFinder.go new file mode 100644 index 00000000000..8f987f3ecb3 --- /dev/null +++ b/fvm/evm/types/codeFinder.go @@ -0,0 +1,161 @@ +package types + +import ( + "errors" + "fmt" + + gethCore "github.com/ethereum/go-ethereum/core" + gethVM "github.com/ethereum/go-ethereum/core/vm" +) + +func ValidationErrorCode(err error) ErrorCode { + // direct errors that are returned by the evm + switch err { + case gethVM.ErrGasUintOverflow: + return ValidationErrCodeGasUintOverflow + } + + // wrapped errors return from the evm + nested := errors.Unwrap(err) + switch nested { + case gethCore.ErrNonceTooLow: + return ValidationErrCodeNonceTooLow + case gethCore.ErrNonceTooHigh: + return ValidationErrCodeNonceTooHigh + case gethCore.ErrNonceMax: + return ValidationErrCodeNonceMax + case gethCore.ErrGasLimitReached: + return ValidationErrCodeGasLimitReached + case gethCore.ErrInsufficientFundsForTransfer: + return ValidationErrCodeInsufficientFundsForTransfer + case gethCore.ErrMaxInitCodeSizeExceeded: + return ValidationErrCodeMaxInitCodeSizeExceeded + case gethCore.ErrInsufficientFunds: + return ValidationErrCodeInsufficientFunds + case gethCore.ErrIntrinsicGas: + return ValidationErrCodeIntrinsicGas + case gethCore.ErrTxTypeNotSupported: + return ValidationErrCodeTxTypeNotSupported + case gethCore.ErrTipAboveFeeCap: + return ValidationErrCodeTipAboveFeeCap + case gethCore.ErrTipVeryHigh: + return ValidationErrCodeTipVeryHigh + case gethCore.ErrFeeCapVeryHigh: + return ValidationErrCodeFeeCapVeryHigh + case gethCore.ErrFeeCapTooLow: + return ValidationErrCodeFeeCapTooLow + case gethCore.ErrSenderNoEOA: + return ValidationErrCodeSenderNoEOA + case gethCore.ErrBlobFeeCapTooLow: + return ValidationErrCodeBlobFeeCapTooLow + default: + return ValidationErrCodeMisc + } +} + +func ExecutionErrorCode(err error) ErrorCode { + // execution VM errors are never wrapped + switch err { + case gethVM.ErrOutOfGas: + return ExecutionErrCodeOutOfGas + case gethVM.ErrCodeStoreOutOfGas: + return ExecutionErrCodeCodeStoreOutOfGas + case gethVM.ErrDepth: + return ExecutionErrCodeDepth + case gethVM.ErrInsufficientBalance: + return ExecutionErrCodeInsufficientBalance + case gethVM.ErrContractAddressCollision: + return ExecutionErrCodeContractAddressCollision + case gethVM.ErrExecutionReverted: + return ExecutionErrCodeExecutionReverted + case gethVM.ErrMaxInitCodeSizeExceeded: + return ExecutionErrCodeMaxInitCodeSizeExceeded + case gethVM.ErrMaxCodeSizeExceeded: + return ExecutionErrCodeMaxCodeSizeExceeded + case gethVM.ErrInvalidJump: + return ExecutionErrCodeInvalidJump + case gethVM.ErrWriteProtection: + return ExecutionErrCodeWriteProtection + case gethVM.ErrReturnDataOutOfBounds: + return ExecutionErrCodeReturnDataOutOfBounds + case gethVM.ErrGasUintOverflow: + return ExecutionErrCodeGasUintOverflow + case gethVM.ErrInvalidCode: + return ExecutionErrCodeInvalidCode + case gethVM.ErrNonceUintOverflow: + return ExecutionErrCodeNonceUintOverflow + default: + return ExecutionErrCodeMisc + } +} + +func ErrorFromCode(errorCode ErrorCode) error { + switch errorCode { + case ValidationErrCodeGasUintOverflow: + return gethVM.ErrGasUintOverflow + case ValidationErrCodeNonceTooLow: + return gethCore.ErrNonceTooLow + case ValidationErrCodeNonceTooHigh: + return gethCore.ErrNonceTooHigh + case ValidationErrCodeNonceMax: + return gethCore.ErrNonceMax + case ValidationErrCodeGasLimitReached: + return gethCore.ErrGasLimitReached + case ValidationErrCodeInsufficientFundsForTransfer: + return gethCore.ErrInsufficientFundsForTransfer + case ValidationErrCodeMaxInitCodeSizeExceeded: + return gethCore.ErrMaxInitCodeSizeExceeded + case ValidationErrCodeInsufficientFunds: + return gethCore.ErrInsufficientFunds + case ValidationErrCodeIntrinsicGas: + return gethCore.ErrIntrinsicGas + case ValidationErrCodeTxTypeNotSupported: + return gethCore.ErrTxTypeNotSupported + case ValidationErrCodeTipAboveFeeCap: + return gethCore.ErrTipAboveFeeCap + case ValidationErrCodeTipVeryHigh: + return gethCore.ErrTipVeryHigh + case ValidationErrCodeFeeCapVeryHigh: + return gethCore.ErrFeeCapVeryHigh + case ValidationErrCodeFeeCapTooLow: + return gethCore.ErrFeeCapTooLow + case ValidationErrCodeSenderNoEOA: + return gethCore.ErrSenderNoEOA + case ValidationErrCodeBlobFeeCapTooLow: + return gethCore.ErrBlobFeeCapTooLow + case ExecutionErrCodeOutOfGas: + return gethVM.ErrOutOfGas + case ExecutionErrCodeCodeStoreOutOfGas: + return gethVM.ErrCodeStoreOutOfGas + case ExecutionErrCodeDepth: + return gethVM.ErrDepth + case ExecutionErrCodeInsufficientBalance: + return gethVM.ErrInsufficientBalance + case ExecutionErrCodeContractAddressCollision: + return gethVM.ErrContractAddressCollision + case ExecutionErrCodeExecutionReverted: + return gethVM.ErrExecutionReverted + case ExecutionErrCodeMaxInitCodeSizeExceeded: + return gethVM.ErrMaxInitCodeSizeExceeded + case ExecutionErrCodeMaxCodeSizeExceeded: + return gethVM.ErrMaxCodeSizeExceeded + case ExecutionErrCodeInvalidJump: + return gethVM.ErrInvalidJump + case ExecutionErrCodeWriteProtection: + return gethVM.ErrWriteProtection + case ExecutionErrCodeReturnDataOutOfBounds: + return gethVM.ErrReturnDataOutOfBounds + case ExecutionErrCodeGasUintOverflow: + return gethVM.ErrGasUintOverflow + case ExecutionErrCodeInvalidCode: + return gethVM.ErrInvalidCode + case ExecutionErrCodeNonceUintOverflow: + return gethVM.ErrNonceUintOverflow + case ValidationErrCodeMisc: + return fmt.Errorf("validation error: %d", errorCode) + case ExecutionErrCodeMisc: + return fmt.Errorf("execution error: %d", errorCode) + } + + return fmt.Errorf("unknown error code: %d", errorCode) +} diff --git a/fvm/evm/types/emulator.go b/fvm/evm/types/emulator.go new file mode 100644 index 00000000000..9ec1636acf6 --- /dev/null +++ b/fvm/evm/types/emulator.go @@ -0,0 +1,109 @@ +package types + +import ( + "math" + "math/big" + + gethCommon "github.com/ethereum/go-ethereum/common" + gethTypes "github.com/ethereum/go-ethereum/core/types" + gethCrypto "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/eth/tracers" +) + +var ( + // DefaultBlockLevelGasLimit is the default value for the block gas limit + // currently set to maximum and we don't consider any limit + // given number of included EVM transactions are naturally + // limited by the Flow block production limits. + DefaultBlockLevelGasLimit = uint64(math.MaxUint64) + // DefaultBaseFee is the default base fee value for the block + // is set to zero but can be updated by the config + DefaultBaseFee = big.NewInt(0) + + // DefaultDirectCallBaseGasUsage holds the minimum gas + // charge for direct calls + DefaultDirectCallBaseGasUsage = uint64(21_000) + // DefaultDirectCallGasPrice captures the default + // gas price for the direct call. + // its set to zero currently given that we charge + // computation but we don't need to refund to any + // coinbase account. + DefaultDirectCallGasPrice = uint64(0) + + // anything block number above 0 works here + BlockNumberForEVMRules = big.NewInt(1) +) + +// BlockContext holds the context needed for the emulator operations +type BlockContext struct { + ChainID *big.Int + BlockNumber uint64 + BlockTimestamp uint64 + DirectCallBaseGasUsage uint64 + DirectCallGasPrice uint64 + TxCountSoFar uint + TotalGasUsedSoFar uint64 + GasFeeCollector Address + GetHashFunc func(n uint64) gethCommon.Hash + Random gethCommon.Hash + Tracer *tracers.Tracer + + // a set of extra precompiled contracts to be injected + ExtraPrecompiledContracts []PrecompiledContract +} + +// NewDefaultBlockContext returns a new default block context +func NewDefaultBlockContext(BlockNumber uint64) BlockContext { + return BlockContext{ + ChainID: FlowEVMPreviewNetChainID, + BlockNumber: BlockNumber, + DirectCallBaseGasUsage: DefaultDirectCallBaseGasUsage, + DirectCallGasPrice: DefaultDirectCallGasPrice, + GetHashFunc: func(n uint64) gethCommon.Hash { // default returns some random hash values + return gethCommon.BytesToHash(gethCrypto.Keccak256([]byte(new(big.Int).SetUint64(n).String()))) + }, + } +} + +// ReadOnlyBlockView provides a read only view of a block +type ReadOnlyBlockView interface { + // BalanceOf returns the balance of this address + BalanceOf(address Address) (*big.Int, error) + // NonceOf returns the nonce of this address + NonceOf(address Address) (uint64, error) + // CodeOf returns the code for this address + CodeOf(address Address) (Code, error) + // CodeHashOf returns the code hash for this address + CodeHashOf(address Address) ([]byte, error) +} + +// BlockView facilitates execution of a transaction or a direct evm call in the context of a block +// Any error returned by any of the methods (e.g. stateDB errors) if non-fatal stops the outer flow transaction +// if fatal stops the node. +// EVM validation errors and EVM execution errors are part of the returned result +// and should be handled separately. +type BlockView interface { + // DirectCall executes a direct call + DirectCall(call *DirectCall) (*Result, error) + + // RunTransaction executes an evm transaction + RunTransaction(tx *gethTypes.Transaction) (*Result, error) + + // DryRunTransaction executes unsigned transaction but does not persist the state changes, + // since transaction is not signed, from address is used as the signer. + DryRunTransaction(tx *gethTypes.Transaction, from gethCommon.Address) (*Result, error) + + // BatchRunTransactions executes a batch of evm transactions producing + // a slice of execution Result where each result corresponds to each + // item in the txs slice. + BatchRunTransactions(txs []*gethTypes.Transaction) ([]*Result, error) +} + +// Emulator emulates an evm-compatible chain +type Emulator interface { + // constructs a new block view + NewReadOnlyBlockView(ctx BlockContext) (ReadOnlyBlockView, error) + + // constructs a new block + NewBlockView(ctx BlockContext) (BlockView, error) +} diff --git a/fvm/evm/types/errors.go b/fvm/evm/types/errors.go new file mode 100644 index 00000000000..420da1be689 --- /dev/null +++ b/fvm/evm/types/errors.go @@ -0,0 +1,217 @@ +package types + +import ( + "errors" + "fmt" +) + +type ErrorCode uint16 + +// internal error codes +const ( // code reserved for no error + ErrCodeNoError ErrorCode = 0 + + // covers all other validation codes that doesn't have an specific code + ValidationErrCodeMisc ErrorCode = 100 + + // general execution error returned for cases that don't have an specific code + ExecutionErrCodeMisc ErrorCode = 400 +) + +// geth evm core errors (reserved range: [201-300) ) +const ( + // the nonce of the tx is lower than the expected + ValidationErrCodeNonceTooLow ErrorCode = iota + 201 + // the nonce of the tx is higher than the expected + ValidationErrCodeNonceTooHigh + // tx sender account has reached to the maximum nonce + ValidationErrCodeNonceMax + // not enough gas is available on the block to include this transaction + ValidationErrCodeGasLimitReached + // the transaction sender doesn't have enough funds for transfer(topmost call only). + ValidationErrCodeInsufficientFundsForTransfer + // creation transaction provides the init code bigger than init code size limit. + ValidationErrCodeMaxInitCodeSizeExceeded + // the total cost of executing a transaction is higher than the balance of the user's account. + ValidationErrCodeInsufficientFunds + // overflow detected when calculating the gas usage + ValidationErrCodeGasUintOverflow + // the transaction is specified to use less gas than required to start the invocation. + ValidationErrCodeIntrinsicGas + // the transaction is not supported in the current network configuration. + ValidationErrCodeTxTypeNotSupported + // tip was set to higher than the total fee cap + ValidationErrCodeTipAboveFeeCap + // an extremely big numbers is set for the tip field + ValidationErrCodeTipVeryHigh + // an extremely big numbers is set for the fee cap field + ValidationErrCodeFeeCapVeryHigh + // the transaction fee cap is less than the base fee of the block + ValidationErrCodeFeeCapTooLow + // the sender of a transaction is a contract + ValidationErrCodeSenderNoEOA + // the transaction fee cap is less than the blob gas fee of the block. + ValidationErrCodeBlobFeeCapTooLow +) + +// evm execution errors (reserved range: [301-400) ) +const ( + // execution ran out of gas + ExecutionErrCodeOutOfGas ErrorCode = iota + 301 + // contract creation code storage out of gas + ExecutionErrCodeCodeStoreOutOfGas + // max call depth exceeded + ExecutionErrCodeDepth + // insufficient balance for transfer + ExecutionErrCodeInsufficientBalance + // contract address collision" + ExecutionErrCodeContractAddressCollision + // execution reverted + ExecutionErrCodeExecutionReverted + // max initcode size exceeded + ExecutionErrCodeMaxInitCodeSizeExceeded + // max code size exceeded + ExecutionErrCodeMaxCodeSizeExceeded + // invalid jump destination + ExecutionErrCodeInvalidJump + // write protection + ExecutionErrCodeWriteProtection + // return data out of bounds + ExecutionErrCodeReturnDataOutOfBounds + // gas uint64 overflow + ExecutionErrCodeGasUintOverflow + // invalid code: must not begin with 0xef + ExecutionErrCodeInvalidCode + // nonce uint64 overflow + ExecutionErrCodeNonceUintOverflow +) + +var ( + // ErrInvalidBalance is returned when an invalid amount is provided for transfer or balance change (e.g. negative) + ErrInvalidBalance = errors.New("invalid amount for transfer or balance change") + + // ErrInsufficientComputation is returned when not enough computation is + // left in the context of flow transaction to execute the evm operation. + ErrInsufficientComputation = errors.New("insufficient computation") + + // ErrUnauthorizedMethodCall method call, usually emitted when calls are called on EOA accounts + ErrUnauthorizedMethodCall = errors.New("unauthorized method call") + + // ErrWithdrawBalanceRounding is returned when withdraw call has a balance that could + // result in rounding error, i.e. the balance contains fractions smaller than 10^8 Flow (smallest unit allowed to transfer). + ErrWithdrawBalanceRounding = errors.New("withdraw failed! the balance is susceptible to the rounding error") + + // ErrUnexpectedEmptyResult is returned when a result is expected to be returned by the emulator + // but nil has been returned. This should never happen and is a safety error. + ErrUnexpectedEmptyResult = errors.New("unexpected empty result has been returned") + + // ErrInsufficientTotalSupply is returned when flow token + // withdraw request is received but not enough balance is on EVM native token vault + // this should never happen but its a safety measure to protect Flow against EVM issues. + ErrInsufficientTotalSupply = NewFatalError(errors.New("insufficient total supply")) + + // ErrNotImplemented is a fatal error when something is called that is not implemented + ErrNotImplemented = NewFatalError(errors.New("a functionality is called that is not implemented")) +) + +// StateError is a non-fatal error, returned when a state operation +// has failed (e.g. reaching storage interaction limit) +type StateError struct { + err error +} + +// NewStateError returns a new StateError +func NewStateError(rootCause error) StateError { + return StateError{ + err: rootCause, + } +} + +// Unwrap unwraps the underlying evm error +func (err StateError) Unwrap() error { + return err.err +} + +func (err StateError) Error() string { + return fmt.Sprintf("state error: %v", err.err) +} + +// IsAStateError returns true if the error or any underlying errors +// is a state error +func IsAStateError(err error) bool { + return errors.As(err, &StateError{}) +} + +// FatalError is used for any error that is not user related and something +// unusual has happened. Usually we stop the node when this happens +// given it might have a non-deterministic root. +type FatalError struct { + err error +} + +// NewFatalError returns a new FatalError +func NewFatalError(rootCause error) FatalError { + return FatalError{ + err: rootCause, + } +} + +// Unwrap unwraps the underlying fatal error +func (err FatalError) Unwrap() error { + return err.err +} + +func (err FatalError) Error() string { + return fmt.Sprintf("fatal error: %v", err.err) +} + +// IsAFatalError returns true if the error or underlying error +// is of fatal type. +func IsAFatalError(err error) bool { + return errors.As(err, &FatalError{}) +} + +// IsAInsufficientTotalSupplyError returns true if the +// error type is InsufficientTotalSupplyError +func IsAInsufficientTotalSupplyError(err error) bool { + return errors.Is(err, ErrInsufficientTotalSupply) +} + +// IsWithdrawBalanceRoundingError returns true if the error type is +// ErrWithdrawBalanceRounding +func IsWithdrawBalanceRoundingError(err error) bool { + return errors.Is(err, ErrWithdrawBalanceRounding) +} + +// IsAUnauthorizedMethodCallError returns true if the error type is +// UnauthorizedMethodCallError +func IsAUnauthorizedMethodCallError(err error) bool { + return errors.Is(err, ErrUnauthorizedMethodCall) +} + +// BackendError is a non-fatal error wraps errors returned from the backend +type BackendError struct { + err error +} + +// NewBackendError returns a new BackendError +func NewBackendError(rootCause error) BackendError { + return BackendError{ + err: rootCause, + } +} + +// Unwrap unwraps the underlying evm error +func (err BackendError) Unwrap() error { + return err.err +} + +func (err BackendError) Error() string { + return fmt.Sprintf("backend error: %v", err.err) +} + +// IsABackendError returns true if the error or +// any underlying errors is a backend error +func IsABackendError(err error) bool { + return errors.As(err, &BackendError{}) +} diff --git a/fvm/evm/types/handler.go b/fvm/evm/types/handler.go new file mode 100644 index 00000000000..4e4548e9c93 --- /dev/null +++ b/fvm/evm/types/handler.go @@ -0,0 +1,95 @@ +package types + +import ( + gethCommon "github.com/ethereum/go-ethereum/common" + "github.com/onflow/cadence/common" +) + +// EVM is an account inside FVM with special access to the underlying infrastructure +// which allows to run a virtual EVM-based blockchain inside FVM. +// +// There are two ways to interact with this environment: +// +// First, passing a signed transaction (EOA account) to the `EVM.run` Cadence function +// creates a new block, updates the internal merkle tree, and emits a new root hash. +// +// The Second way is through a new form of account called cadence-owned-accounts (COAs), +// which is represented and controlled through a resource, owned by a Flow account. +// The owner of the COA resource can interact with the evm environment on behalf of the address stored on the resource. +// +// The evm environment shares the same native token as Flow, there are no new tokens minted. +// Other ERC-20 fungible tokens can be bridged between COA resources and Flow accounts. + +// ContractHandler handles operations on the evm environment +type ContractHandler interface { + // DeployCOA deploys a Cadence owned account and return the address + DeployCOA(uuid uint64) Address + + // AccountByAddress returns an account by address + // if isAuthorized is set, it allows for functionality like `call`, `deploy` + // should only be set for the cadence owned accounts only. + AccountByAddress(address Address, isAuthorized bool) Account + + // LastExecutedBlock returns information about the last executed block + LastExecutedBlock() *Block + + // Run runs a transaction in the evm environment, + // collects the gas fees, and transfers it to the gasFeeCollector account + Run(tx []byte, gasFeeCollector Address) *ResultSummary + + // DryRun simulates execution of the provided RLP-encoded and unsigned transaction. + // Because the transaction is unsigned the from address is required, since + // from address is normally derived from the transaction signature. + // The function should not have any persisted changes made to the state. + DryRun(tx []byte, from Address) *ResultSummary + + // BatchRun runs transaction batch in the evm environment, + // collect all the gas fees and transfers the gas fees to the gasFeeCollector account. + BatchRun(txs [][]byte, gasFeeCollector Address) []*ResultSummary + + // FlowTokenAddress returns the address where FLOW token is deployed + FlowTokenAddress() common.Address + + // EVMContractAddress returns the address where EVM is deployed + EVMContractAddress() common.Address + + // GenerateResourceUUID generates a new UUID for a resource + GenerateResourceUUID() uint64 + + // Constructs and commits a new block from the block proposal + CommitBlockProposal() +} + +// AddressAllocator allocates addresses, used by the handler +type AddressAllocator interface { + // AllocateAddress allocates an address to be used by a COA resource + AllocateCOAAddress(uuid uint64) Address + + // COAFactoryAddress returns the address for the COA factory + COAFactoryAddress() Address + + // NativeTokenBridgeAddress returns the address for the native token bridge + // used for deposit and withdraw calls + NativeTokenBridgeAddress() Address + + // AllocateAddress allocates an address by index to be used by a precompile contract + AllocatePrecompileAddress(index uint64) Address +} + +// BlockStore stores the chain of blocks +type BlockStore interface { + // LatestBlock returns the latest appended block + LatestBlock() (*Block, error) + + // BlockHash returns the hash of the block at the given height + BlockHash(height uint64) (gethCommon.Hash, error) + + // BlockProposal returns the active block proposal + BlockProposal() (*BlockProposal, error) + + // UpdateBlockProposal replaces the current block proposal with the ones passed + UpdateBlockProposal(*BlockProposal) error + + // CommitBlockProposal commits the block proposal and update the chain of blocks + CommitBlockProposal(*BlockProposal) error +} diff --git a/fvm/evm/types/offchain.go b/fvm/evm/types/offchain.go new file mode 100644 index 00000000000..5fc91e4aaea --- /dev/null +++ b/fvm/evm/types/offchain.go @@ -0,0 +1,42 @@ +package types + +import ( + "github.com/onflow/flow-go/model/flow" +) + +// BackendStorageSnapshot provides a read only view of registers +type BackendStorageSnapshot interface { + GetValue(owner []byte, key []byte) ([]byte, error) +} + +// StorageProvider provides access to storage at +// specific time point in history of the EVM chain +type StorageProvider interface { + // GetSnapshotAt returns a readonly snapshot of storage + // at specific block (start state of the block before executing transactions) + GetSnapshotAt(evmBlockHeight uint64) (BackendStorageSnapshot, error) +} + +// BlockSnapshot provides access to the block information +// at specific block height +type BlockSnapshot interface { + // BlockContext constructs and returns the block context for the block + // + // Warning! the block hash provider on this one has to return empty + // for the current block to stay compatible with how on-chain EVM + // behaves. so if we are on block 10, and we query for the block hash on block + // 10 it should return empty hash. + BlockContext() (BlockContext, error) +} + +type BlockSnapshotProvider interface { + // GetSnapshotAt returns a readonly snapshot of block given evm block height + GetSnapshotAt(evmBlockHeight uint64) (BlockSnapshot, error) +} + +// ReplayResultCollector collects results of replay a block +type ReplayResultCollector interface { + // StorageRegisterUpdates returns the set of register changes + // (only the EVM-related registers) + StorageRegisterUpdates() map[flow.RegisterID]flow.RegisterValue +} diff --git a/fvm/evm/types/precompiled.go b/fvm/evm/types/precompiled.go new file mode 100644 index 00000000000..1344280b69a --- /dev/null +++ b/fvm/evm/types/precompiled.go @@ -0,0 +1,132 @@ +package types + +import ( + "bytes" + "fmt" + + gethVM "github.com/ethereum/go-ethereum/core/vm" + "github.com/ethereum/go-ethereum/rlp" +) + +// PrecompiledContract wraps gethVM precompiles with +// functionality to return where the contract is deployed +type PrecompiledContract interface { + // PrecompiledContract provides an interface for + // calling requiredGas and run + gethVM.PrecompiledContract + // Address returns the address where the precompile is deployed + Address() Address +} + +// RunCall captures a call to the Run method of a precompiled contract +type RunCall struct { + Output []byte + ErrorMsg string +} + +// PrecompiledCalls captures all the calls to a precompiled contract +type PrecompiledCalls struct { + Address Address + RequiredGasCalls []uint64 + RunCalls []RunCall +} + +// IsEmpty returns true if no requiredGas or run calls is captured +func (pc *PrecompiledCalls) IsEmpty() bool { + return len(pc.RequiredGasCalls) == 0 && len(pc.RunCalls) == 0 +} + +const ( + AggregatedPrecompiledCallsEncodingByteSize int = 1 + AggregatedPrecompiledCallsEncodingVersion uint8 = 2 // current version +) + +// AggregatedPrecompiledCalls aggregates a list of precompiled calls +// the list should be sorted by the address +type AggregatedPrecompiledCalls []PrecompiledCalls + +// IsEmpty returns true if all of the underlying precompiled calls are empty +func (apc AggregatedPrecompiledCalls) IsEmpty() bool { + isEmpty := true + for _, ap := range apc { + if !ap.IsEmpty() { + isEmpty = false + } + } + return isEmpty +} + +// Encode encodes the aggregated precompile calls using rlp encoding +// if there is no underlying call, we encode to empty bytes to save +// space on transaction results (common case) +// TODO: In the future versions of the encoding we might skip encoding the inputs +// given it just takes space and not needed during execution time +func (apc AggregatedPrecompiledCalls) Encode() ([]byte, error) { + if apc.IsEmpty() { + return []byte{}, nil + } + buffer := bytes.NewBuffer(make([]byte, 0)) + // write the encoding version + buffer.WriteByte(AggregatedPrecompiledCallsEncodingVersion) + // then RLP encode + err := rlp.Encode(buffer, apc) + return buffer.Bytes(), err +} + +// AggregatedPrecompileCallsFromEncoded constructs an AggregatedPrecompileCalls from encoded data +func AggregatedPrecompileCallsFromEncoded(encoded []byte) (AggregatedPrecompiledCalls, error) { + apc := make([]PrecompiledCalls, 0) + if len(encoded) == 0 { + return apc, nil + } + switch int(encoded[0]) { + case 1: + return decodePrecompiledCallsV1(encoded) + case 2: + return apc, rlp.DecodeBytes(encoded[AggregatedPrecompiledCallsEncodingByteSize:], &apc) + default: + return nil, fmt.Errorf("unknown type for encoded AggregatedPrecompiledCalls received %d", int(encoded[0])) + } +} + +func decodePrecompiledCallsV1(encoded []byte) (AggregatedPrecompiledCalls, error) { + legacy := make([]precompiledCallsV1, 0) + err := rlp.DecodeBytes(encoded[AggregatedPrecompiledCallsEncodingByteSize:], &legacy) + if err != nil { + return nil, err + } + apc := make([]PrecompiledCalls, len(legacy)) + for i, ap := range legacy { + reqCalls := make([]uint64, len(ap.RequiredGasCalls)) + for j, rc := range ap.RequiredGasCalls { + reqCalls[j] = rc.Output + } + runCalls := make([]RunCall, len(ap.RunCalls)) + for j, rc := range ap.RunCalls { + runCalls[j] = RunCall{ + Output: rc.Output, + ErrorMsg: rc.ErrorMsg, + } + } + apc[i] = PrecompiledCalls{ + Address: ap.Address, + RequiredGasCalls: reqCalls, + RunCalls: runCalls, + } + } + return apc, nil +} + +// legacy encoding types +type precompiledCallsV1 struct { + Address Address + RequiredGasCalls []struct { + Input []byte + Output uint64 + } + RunCalls []struct { + Input []byte + Output []byte + ErrorMsg string + } +} diff --git a/fvm/evm/types/precompiled_test.go b/fvm/evm/types/precompiled_test.go new file mode 100644 index 00000000000..9c5febb7d1a --- /dev/null +++ b/fvm/evm/types/precompiled_test.go @@ -0,0 +1,107 @@ +package types_test + +import ( + "encoding/hex" + "testing" + + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/fvm/evm/testutils" + "github.com/onflow/flow-go/fvm/evm/types" +) + +func TestPrecompiledCallsEncoding(t *testing.T) { + t.Run("test latest version of encoding", func(t *testing.T) { + // empty precompiled calls + empty := types.AggregatedPrecompiledCalls{ + types.PrecompiledCalls{ + Address: testutils.RandomAddress(t), + }, + types.PrecompiledCalls{ + Address: testutils.RandomAddress(t), + }, + } + + encoded, err := empty.Encode() + require.NoError(t, err) + require.Empty(t, encoded) + + apc := types.AggregatedPrecompiledCalls{ + types.PrecompiledCalls{ + Address: testutils.RandomAddress(t), + RequiredGasCalls: []uint64{2}, + RunCalls: []types.RunCall{}, + }, + } + + encoded, err = apc.Encode() + require.NoError(t, err) + require.NotEmpty(t, encoded) + + ret, err := types.AggregatedPrecompileCallsFromEncoded(encoded) + require.NoError(t, err) + require.Equal(t, apc, ret) + + apc = types.AggregatedPrecompiledCalls{ + types.PrecompiledCalls{ + Address: testutils.RandomAddress(t), + RequiredGasCalls: []uint64{2}, + RunCalls: []types.RunCall{ + { + Output: []byte{5, 6}, + }, + { + Output: []byte{}, + ErrorMsg: "Some error msg", + }, + }, + }, + } + + encoded, err = apc.Encode() + require.NoError(t, err) + require.NotEmpty(t, encoded) + + ret, err = types.AggregatedPrecompileCallsFromEncoded(encoded) + require.NoError(t, err) + require.Equal(t, apc, ret) + + }) + + t.Run("test latest version of encoding v1", func(t *testing.T) { + encodedV1, err := hex.DecodeString("01f7f69408190002143239aaaed0d52d4a5bf218d62453ffc3c20102dcc782030482050680d3820304808e536f6d65206572726f72206d7367") + require.NoError(t, err) + expected := types.AggregatedPrecompiledCalls{ + types.PrecompiledCalls{ + Address: types.Address{0x8, 0x19, 0x0, 0x2, 0x14, 0x32, 0x39, 0xaa, 0xae, 0xd0, 0xd5, 0x2d, 0x4a, 0x5b, 0xf2, 0x18, 0xd6, 0x24, 0x53, 0xff}, + RequiredGasCalls: []uint64{2}, + RunCalls: []types.RunCall{ + { + Output: []byte{5, 6}, + }, + { + Output: []byte{}, + ErrorMsg: "Some error msg", + }, + }, + }, + } + + apc, err := types.AggregatedPrecompileCallsFromEncoded(encodedV1) + require.NoError(t, err) + require.Equal(t, len(expected), len(apc)) + for i := range expected { + require.Equal(t, expected[i].Address, apc[i].Address) + require.Equal(t, len(expected[i].RequiredGasCalls), len(apc[i].RequiredGasCalls)) + for j := range expected[i].RequiredGasCalls { + require.Equal(t, expected[i].RequiredGasCalls[j], apc[i].RequiredGasCalls[j]) + } + require.Equal(t, len(expected[i].RunCalls), len(apc[i].RunCalls)) + for j := range expected[i].RunCalls { + require.Equal(t, expected[i].RunCalls[j].ErrorMsg, apc[i].RunCalls[j].ErrorMsg) + require.Equal(t, expected[i].RunCalls[j].Output, apc[i].RunCalls[j].Output) + } + } + }) + +} diff --git a/fvm/evm/types/proof.go b/fvm/evm/types/proof.go new file mode 100644 index 00000000000..6d2ed3803ab --- /dev/null +++ b/fvm/evm/types/proof.go @@ -0,0 +1,169 @@ +package types + +import ( + "fmt" + + "github.com/ethereum/go-ethereum/rlp" + "github.com/onflow/cadence" + "github.com/onflow/cadence/common" + "github.com/onflow/cadence/sema" + cadenceRLP "github.com/onflow/cadence/stdlib/rlp" + + "github.com/onflow/flow-go/model/flow" +) + +type FlowAddress flow.Address + +var FlowAddressCadenceType = cadence.AddressType +var FlowAddressSemaType = sema.TheAddressType + +func (addr FlowAddress) ToCadenceValue() cadence.Address { + return cadence.Address(addr) +} + +type PublicPath string + +var PublicPathCadenceType = cadence.PathType +var PublicPathSemaType = sema.PathType + +func (p PublicPath) ToCadenceValue() cadence.Path { + return cadence.Path{ + Domain: common.PathDomainPublic, + Identifier: string(p), + } +} + +type SignedData []byte + +var SignedDataCadenceType = cadence.NewVariableSizedArrayType(cadence.UInt8Type) +var SignedDataSemaType = sema.ByteArrayType + +func (sd SignedData) ToCadenceValue() cadence.Array { + values := make([]cadence.Value, len(sd)) + for i, v := range sd { + values[i] = cadence.NewUInt8(v) + } + return cadence.NewArray(values).WithType(SignedDataCadenceType) +} + +type KeyIndices []uint64 + +var KeyIndicesCadenceType = cadence.NewVariableSizedArrayType(cadence.UInt64Type) +var KeyIndicesSemaType = &sema.VariableSizedType{Type: sema.UInt64Type} + +func (ki KeyIndices) ToCadenceValue() cadence.Array { + values := make([]cadence.Value, len(ki)) + for i, v := range ki { + values[i] = cadence.NewUInt64(v) + } + return cadence.NewArray(values).WithType(KeyIndicesCadenceType) +} + +func (ki KeyIndices) Count() int { + return len(ki) +} + +type Signature []byte + +var SignatureCadenceType = cadence.NewVariableSizedArrayType(cadence.UInt8Type) + +func (s Signature) ToCadenceValue() cadence.Array { + values := make([]cadence.Value, len(s)) + for i, v := range s { + values[i] = cadence.NewUInt8(v) + } + return cadence.NewArray(values).WithType(SignatureCadenceType) +} + +type Signatures []Signature + +var SignaturesCadenceType = cadence.NewVariableSizedArrayType(SignatureCadenceType) +var SignaturesSemaType = sema.ByteArrayArrayType + +func (ss Signatures) ToCadenceValue() cadence.Array { + values := make([]cadence.Value, len(ss)) + for i, s := range ss { + values[i] = s.ToCadenceValue() + } + return cadence.NewArray(values).WithType(SignaturesCadenceType) +} + +func (ss Signatures) Count() int { + return len(ss) +} + +// COAOwnershipProofInContext contains all the data +// needed to verify a COAOwnership proof. +// The proof is verified by checking the signatures over the +// input signed data (SignedData), then loading the resource +// capability from the provided path in the proof, and +// at last checking if the EVMAddress of the resource matches +// the provided one. +type COAOwnershipProofInContext struct { + COAOwnershipProof + SignedData SignedData + EVMAddress Address +} + +func NewCOAOwnershipProofInContext(sd []byte, addr Address, encodedProof []byte) (*COAOwnershipProofInContext, error) { + proof, err := COAOwnershipProofFromEncoded(encodedProof) + if err != nil { + return nil, err + } + return &COAOwnershipProofInContext{ + COAOwnershipProof: *proof, + SignedData: sd, + EVMAddress: addr, + }, nil +} + +func (proof *COAOwnershipProofInContext) ToCadenceValues() []cadence.Value { + return []cadence.Value{ + proof.Address.ToCadenceValue(), + proof.CapabilityPath.ToCadenceValue(), + proof.SignedData.ToCadenceValue(), + proof.KeyIndices.ToCadenceValue(), + proof.Signatures.ToCadenceValue(), + proof.EVMAddress.ToCadenceValue(), + } +} + +// COAOwnershipProof is a proof that a flow account +// controls a COA resource. To do so, the flow +// account (Address is address of this account) +// provides signatures (with proper total weights) over an arbitrary data input +// set by proof requester. KeyIndices captures, +// which account keys has been used for signatures. +// Beside signatures, it provides the CapabilityPath +// where the resource EVMAddress capability is stored. +type COAOwnershipProof struct { + KeyIndices KeyIndices + Address FlowAddress + CapabilityPath PublicPath + Signatures Signatures +} + +func (p *COAOwnershipProof) Encode() ([]byte, error) { + return rlp.EncodeToBytes(p) +} + +func COAOwnershipProofSignatureCountFromEncoded(data []byte) (int, error) { + // first break into proof encoded items + encodedItems, _, err := cadenceRLP.DecodeList(data, 0) + if err != nil { + return 0, err + } + // first encoded item is KeyIndices + // so reading number of elements in the key indices + // should return the count without the need to fully decode + KeyIndices, _, err := cadenceRLP.DecodeList(encodedItems[0], 0) + return len(KeyIndices), err +} + +func COAOwnershipProofFromEncoded(data []byte) (*COAOwnershipProof, error) { + if len(data) == 0 { + return nil, fmt.Errorf("empty proof") + } + p := &COAOwnershipProof{} + return p, rlp.DecodeBytes(data, p) +} diff --git a/fvm/evm/types/proof_test.go b/fvm/evm/types/proof_test.go new file mode 100644 index 00000000000..b4ad31c8030 --- /dev/null +++ b/fvm/evm/types/proof_test.go @@ -0,0 +1,24 @@ +package types_test + +import ( + "testing" + + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/fvm/evm/testutils" + "github.com/onflow/flow-go/fvm/evm/types" +) + +func TestProof(t *testing.T) { + proof := testutils.COAOwnershipProofFixture(t) + encoded, err := proof.Encode() + require.NoError(t, err) + + ret, err := types.COAOwnershipProofFromEncoded(encoded) + require.NoError(t, err) + require.Equal(t, proof, ret) + + count, err := types.COAOwnershipProofSignatureCountFromEncoded(encoded) + require.NoError(t, err) + require.Equal(t, 2, count) +} diff --git a/fvm/evm/types/result.go b/fvm/evm/types/result.go new file mode 100644 index 00000000000..19e54311957 --- /dev/null +++ b/fvm/evm/types/result.go @@ -0,0 +1,355 @@ +package types + +import ( + "fmt" + + "github.com/ethereum/go-ethereum/accounts/abi" + gethCommon "github.com/ethereum/go-ethereum/common" + gethTypes "github.com/ethereum/go-ethereum/core/types" + gethVM "github.com/ethereum/go-ethereum/core/vm" + "github.com/ethereum/go-ethereum/rlp" +) + +// InvalidTransactionGasCost is a gas cost we charge when +// a transaction or call fails at validation step. +// in typical evm environment this doesn't exist given +// if a transaction is invalid it won't be included +// and no fees can be charged for users even though +// the validation has used some resources, in our case +// given we charge the fees on flow transaction and we +// are doing on chain validation we can/should charge the +// user for the validation fee. +const InvalidTransactionGasCost = 1_000 + +// ChecksumLength captures number of bytes a checksum uses +const ChecksumLength = 4 + +// Status captures the status of an interaction to the emulator +type Status uint8 + +var ( + StatusUnknown Status = 0 + // StatusInvalid shows that the transaction was not a valid + // transaction and rejected to be executed and included in any block. + StatusInvalid Status = 1 + // StatusFailed shows that the transaction has been executed, + // but the output of the execution was an error + // for this case a block is formed and receipts are available + StatusFailed Status = 2 + // StatusSuccessful shows that the transaction has been executed and the execution has returned success + // for this case a block is formed and receipts are available + StatusSuccessful Status = 3 +) + +// ResultSummary summarizes the outcome of a EVM call or tx run +type ResultSummary struct { + Status Status + ErrorCode ErrorCode + ErrorMessage string + GasConsumed uint64 + MaxGasConsumed uint64 + DeployedContractAddress *Address + ReturnedData Data +} + +// NewInvalidResult creates a new result that hold transaction validation +// error as well as the defined gas cost for validation. +func NewInvalidResult(tx *gethTypes.Transaction, err error) *Result { + return &Result{ + TxType: tx.Type(), + TxHash: tx.Hash(), + ValidationError: err, + GasConsumed: InvalidTransactionGasCost, + } +} + +// Result captures the result of an interaction to the emulator +// it could be the output of a direct call or output of running an +// evm transaction. +// Its more comprehensive than typical evm receipt, usually +// the receipt generation requires some extra calculation (e.g. Deployed contract address) +// but we take a different approach here and include more data so that +// it requires less work for anyone who tracks and consume results. +type Result struct { + // captures error returned during validation step (pre-checks) + ValidationError error + // captures error returned by the EVM + VMError error + // type of transaction defined by the evm package + // see DirectCallTxType as extra type we added type for direct calls. + TxType uint8 + // total gas consumed during execution, not including the refunded gas + GasConsumed uint64 + // maximum gas consumed during execution, excluding gas refunds + MaxGasConsumed uint64 + // total gas used by the block after this tx execution + CumulativeGasUsed uint64 + // the address where the contract is deployed (if any) + DeployedContractAddress *Address + // returned data from a function call + ReturnedData []byte + // EVM logs (events that are emitted by evm) + Logs []*gethTypes.Log + // TX hash holds the cached value of tx hash + TxHash gethCommon.Hash + // transaction block inclusion index + Index uint16 + // PrecompiledCalls captures an encoded list of calls to the precompile + // during the execution of transaction + PrecompiledCalls []byte + // StateChangeCommitment captures a commitment over the state change (delta) + StateChangeCommitment []byte +} + +// Invalid returns true if transaction has been rejected +func (res *Result) Invalid() bool { + return res.ValidationError != nil +} + +// Failed returns true if transaction has been executed but VM has returned some error +func (res *Result) Failed() bool { + return res.VMError != nil +} + +// Successful returns true if transaction has been executed without any errors +func (res *Result) Successful() bool { + return !res.Failed() && !res.Invalid() +} + +// SetValidationError sets the validation error +// and also sets the gas used to the fixed invalid gas usage +func (res *Result) SetValidationError(err error) { + res.ValidationError = err + // for invalid transactions we only set the gasConsumed + // for metering reasons, yet we do not set the CumulativeGasUsed + // since we won't consider then for the block construction purposes + res.GasConsumed = InvalidTransactionGasCost +} + +// returns the VM error as an string, if no error it returns an empty string +func (res *Result) VMErrorString() string { + if res.VMError != nil { + return res.VMError.Error() + } + return "" +} + +// ErrorMsg returns the error message, if any VM or Validation error +// both error would never happen at the same time +// but if it happens the priority is by validation error +func (res *Result) ErrorMsg() string { + errorMsg := "" + if res.VMError != nil { + errorMsg = res.VMError.Error() + } + if res.ValidationError != nil { + errorMsg = res.ValidationError.Error() + } + return errorMsg +} + +// ErrorMessageWithRevertReason returns the error message, if any VM or Validation +// error occurred. Execution reverts coming from `assert` or `require` Solidity +// statements, are parsed into their human-friendly representation. +func (res *Result) ErrorMessageWithRevertReason() string { + errorMessage := res.ErrorMsg() + + if res.ResultSummary().ErrorCode == ExecutionErrCodeExecutionReverted { + reason, errUnpack := abi.UnpackRevert(res.ReturnedData) + if errUnpack == nil { + errorMessage = fmt.Sprintf("%v: %v", gethVM.ErrExecutionReverted.Error(), reason) + } + } + + return errorMessage +} + +// RLPEncodedLogs returns the rlp encoding of the logs +func (res *Result) RLPEncodedLogs() ([]byte, error) { + var encodedLogs []byte + var err error + if len(res.Logs) > 0 { + encodedLogs, err = rlp.EncodeToBytes(res.Logs) + if err != nil { + return encodedLogs, err + } + } + return encodedLogs, nil +} + +// DeployedContractAddressString returns an string of the deployed address +// it returns an empty string if the deployed address is nil +func (res *Result) DeployedContractAddressString() string { + deployedAddress := "" + if res.DeployedContractAddress != nil { + deployedAddress = res.DeployedContractAddress.String() + } + return deployedAddress +} + +// StateChangeChecksum constructs a checksum +// based on the state change commitment on the result +func (res *Result) StateChangeChecksum() [ChecksumLength]byte { + return SliceToChecksumLength(res.StateChangeCommitment) +} + +// SliceToChecksumLength cuts the first 4 bytes of the input and convert it into checksum +func SliceToChecksumLength(input []byte) [ChecksumLength]byte { + // the first 4 bytes of StateChangeCommitment is used as checksum + var checksum [ChecksumLength]byte + if len(input) >= ChecksumLength { + copy(checksum[:ChecksumLength], input[:ChecksumLength]) + } + return checksum +} + +// Receipt constructs an EVM-style receipt +// can be used by json-rpc and other integration to be returned. +// +// This is method is also used to construct block receipt root hash +// which requires the return receipt satisfy RLP encoding and cover these fields +// Type (txType), PostState or Status, CumulativeGasUsed, Logs and Logs Bloom +// and for each log, Address, Topics, Data (consensus fields) +// During execution we also do fill in BlockNumber, TxIndex, Index (event index) +func (res *Result) Receipt() *gethTypes.Receipt { + if res.Invalid() { + return nil + } + + receipt := &gethTypes.Receipt{ + TxHash: res.TxHash, + GasUsed: res.GasConsumed, + CumulativeGasUsed: res.CumulativeGasUsed, + Logs: res.Logs, + } + + // only add tx type if not direct call + if res.TxType != DirectCallTxType { + receipt.Type = res.TxType + } + + if res.DeployedContractAddress != nil { + receipt.ContractAddress = res.DeployedContractAddress.ToCommon() + } + if res.Failed() { + receipt.Status = gethTypes.ReceiptStatusFailed + } else { + receipt.Status = gethTypes.ReceiptStatusSuccessful + } + + receipt.Bloom = gethTypes.CreateBloom(receipt) + return receipt +} + +// LightReceipt constructs a light receipt from the result +// that is used for storing in block proposal. +func (res *Result) LightReceipt() *LightReceipt { + if res.Invalid() { + return nil + } + + receipt := &LightReceipt{ + CumulativeGasUsed: res.CumulativeGasUsed, + } + + receipt.Logs = make([]LightLog, len(res.Logs)) + for i, l := range res.Logs { + receipt.Logs[i] = LightLog{ + Address: l.Address, + Topics: l.Topics, + Data: l.Data, + } + } + + // only add tx type if not direct call + if res.TxType != DirectCallTxType { + receipt.Type = res.TxType + } + + // add status + if res.Failed() { + receipt.Status = uint8(gethTypes.ReceiptStatusFailed) + } else { + receipt.Status = uint8(gethTypes.ReceiptStatusSuccessful) + } + + return receipt +} + +// ResultSummary constructs a result summary +func (res *Result) ResultSummary() *ResultSummary { + rs := &ResultSummary{ + GasConsumed: res.GasConsumed, + MaxGasConsumed: res.MaxGasConsumed, + DeployedContractAddress: res.DeployedContractAddress, + ReturnedData: res.ReturnedData, + Status: StatusSuccessful, + } + + if res.Invalid() { + rs.ErrorCode = ValidationErrorCode(res.ValidationError) + rs.ErrorMessage = res.ValidationError.Error() + rs.Status = StatusInvalid + return rs + } + + if res.Failed() { + rs.ErrorCode = ExecutionErrorCode(res.VMError) + rs.ErrorMessage = res.VMError.Error() + rs.Status = StatusFailed + return rs + } + + return rs +} + +// LightLog captures only consensus fields of an EVM log +// used by the LightReceipt +type LightLog struct { + // address of the contract that generated the event + Address gethCommon.Address + // list of topics provided by the contract. + Topics []gethCommon.Hash + // supplied by the contract, usually ABI-encoded + Data []byte +} + +// LightReceipt captures only the consensus fields of +// a receipt, making storage of receipts for the purpose +// of trie building more storage efficient. +// +// Note that we don't store Bloom as we can reconstruct it +// later. We don't have PostState and we use a uint8 for +// status as there is currently only acts as boolean. +// Data shows that using the light receipt results in 60% storage reduction +// for block proposals and the extra overheads are manageable. +type LightReceipt struct { + Type uint8 + Status uint8 + CumulativeGasUsed uint64 + Logs []LightLog +} + +// ToReceipt constructs a Receipt from the LightReceipt +// Warning, this only populates the consensus fields +// and if you want the full data, use the receipt +// from the result. +func (lr *LightReceipt) ToReceipt() *gethTypes.Receipt { + receipt := &gethTypes.Receipt{ + Type: lr.Type, + Status: uint64(lr.Status), + CumulativeGasUsed: lr.CumulativeGasUsed, + } + + receipt.Logs = make([]*gethTypes.Log, len(lr.Logs)) + for i, l := range lr.Logs { + receipt.Logs[i] = &gethTypes.Log{ + Address: l.Address, + Topics: l.Topics, + Data: l.Data, + } + } + + receipt.Bloom = gethTypes.CreateBloom(receipt) + return receipt +} diff --git a/fvm/evm/types/result_test.go b/fvm/evm/types/result_test.go new file mode 100644 index 00000000000..dcdfaf71000 --- /dev/null +++ b/fvm/evm/types/result_test.go @@ -0,0 +1,28 @@ +package types_test + +import ( + "testing" + + gethTypes "github.com/ethereum/go-ethereum/core/types" + gethTrie "github.com/ethereum/go-ethereum/trie" + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/fvm/evm/testutils" +) + +func TestLightReceipts(t *testing.T) { + resCount := 10 + receipts := make(gethTypes.Receipts, resCount) + reconstructedReceipts := make(gethTypes.Receipts, resCount) + var totalGas uint64 + for i := 0; i < resCount; i++ { + res := testutils.RandomResultFixture(t) + receipts[i] = res.Receipt() + reconstructedReceipts[i] = res.LightReceipt().ToReceipt() + totalGas += res.GasConsumed + } + // the root hash for reconstructed receipts should match the receipts + root1 := gethTypes.DeriveSha(receipts, gethTrie.NewStackTrie(nil)) + root2 := gethTypes.DeriveSha(reconstructedReceipts, gethTrie.NewStackTrie(nil)) + require.Equal(t, root1, root2) +} diff --git a/fvm/evm/types/state.go b/fvm/evm/types/state.go new file mode 100644 index 00000000000..b7bdeb56b2a --- /dev/null +++ b/fvm/evm/types/state.go @@ -0,0 +1,193 @@ +package types + +import ( + gethCommon "github.com/ethereum/go-ethereum/common" + gethTypes "github.com/ethereum/go-ethereum/core/types" + gethVM "github.com/ethereum/go-ethereum/core/vm" + "github.com/ethereum/go-ethereum/rlp" + "github.com/holiman/uint256" + "github.com/onflow/crypto/hash" +) + +// StateDB acts as the main interface to the EVM runtime +type StateDB interface { + gethVM.StateDB + + // Commit commits the changes and + // returns a commitment over changes + // setting `finalize` flag + // calls a subsequent call to Finalize + // deferring finalization and calling it once at the end + // improves efficiency of batch operations. + Commit(finalize bool) (hash.Hash, error) + + // Finalize flushes all the changes + // to the permanent storage + Finalize() error + + // Logs collects and prepares logs + Logs( + blockNumber uint64, + txHash gethCommon.Hash, + txIndex uint, + ) []*gethTypes.Log + + // Preimages returns a map of preimages + Preimages() map[gethCommon.Hash][]byte + + // Reset resets uncommitted changes and transient artifacts such as error, logs, + // preimages, access lists, ... + // The method is often called between execution of different transactions + Reset() + + // Error returns any error that has been cached so far by the state. + Error() error +} + +// ReadOnlyView provides a readonly view of the state +type ReadOnlyView interface { + // Exist returns true if the address exist in the state + Exist(gethCommon.Address) (bool, error) + // IsCreated returns true if address has been created in this tx + IsCreated(gethCommon.Address) bool + // IsNewContract returns true if address is a new contract + // either is a new account or it had balance but no code before + IsNewContract(addr gethCommon.Address) bool + // HasSelfDestructed returns true if an address has self destructed + // it also returns the balance of address before selfdestruction call + HasSelfDestructed(gethCommon.Address) (bool, *uint256.Int) + // GetBalance returns the balance of an address + GetBalance(gethCommon.Address) (*uint256.Int, error) + // GetNonce returns the nonce of an address + GetNonce(gethCommon.Address) (uint64, error) + // GetCode returns the code of an address + GetCode(gethCommon.Address) ([]byte, error) + // GetCodeHash returns the code hash of an address + GetCodeHash(gethCommon.Address) (gethCommon.Hash, error) + // GetCodeSize returns the code size of an address + GetCodeSize(gethCommon.Address) (int, error) + // GetState returns values for an slot in the main storage + GetState(SlotAddress) (gethCommon.Hash, error) + // GetStorageRoot returns some sort of root for the given address. + // Warning! Since StateDB doesn't construct a Merkel tree under the hood, + // the behavior of this endpoint is as follow: + // - if an account doesn't exist it returns common.Hash{} + // - if account is EOA it returns gethCommon.EmptyRootHash + // - else it returns a unique hash value as the root but this returned + GetStorageRoot(gethCommon.Address) (gethCommon.Hash, error) + // GetTransientState returns values for an slot transient storage + GetTransientState(SlotAddress) gethCommon.Hash + // GetRefund returns the total amount of (gas) refund + GetRefund() uint64 + // AddressInAccessList checks if an address is in the access list + AddressInAccessList(gethCommon.Address) bool + // SlotInAccessList checks if a slot is in the access list + SlotInAccessList(SlotAddress) (addressOk bool, slotOk bool) +} + +// HotView captures a high-level mutable view of the state +type HotView interface { + ReadOnlyView + + // CreateAccount creates a new account + CreateAccount(gethCommon.Address) error + // CreateContract is used whenever a contract is created. This may be preceded + // by CreateAccount, but that is not required if it already existed in the + // state due to funds sent beforehand. + CreateContract(gethCommon.Address) + // SelfDestruct set the flag for destruction of the account after execution + SelfDestruct(gethCommon.Address) error + + // SubBalance subtracts the amount from the balance the given address + SubBalance(gethCommon.Address, *uint256.Int) error + // AddBalance adds the amount to the balance of the given address + AddBalance(gethCommon.Address, *uint256.Int) error + // SetNonce sets the nonce for the given address + SetNonce(gethCommon.Address, uint64) error + // SetCode sets the code for the given address + SetCode(gethCommon.Address, []byte) error + + // SetState sets a value for the given slot in the main storage + // and returns the previous value. + SetState(SlotAddress, gethCommon.Hash) (gethCommon.Hash, error) + // SetTransientState sets a value for the given slot in the transient storage + SetTransientState(SlotAddress, gethCommon.Hash) + + // AddRefund adds the amount to the total (gas) refund + AddRefund(uint64) error + // SubRefund subtracts the amount from the total (gas) refund + SubRefund(uint64) error + + // AddAddressToAccessList adds an address to the per-transaction access list + AddAddressToAccessList(addr gethCommon.Address) (addressAdded bool) + // AddSlotToAccessList adds a slot to the per-transaction access list + AddSlotToAccessList(SlotAddress) (addressAdded, slotAdded bool) + + // AddLog append a log to the log collection + AddLog(*gethTypes.Log) + // AddPreimage adds a preimage to the list of preimages (input -> hash mapping) + AddPreimage(gethCommon.Hash, []byte) +} + +// BaseView is a low-level mutable view of the state +// baseview is usually updated at the commit calls to the higher level view +type BaseView interface { + ReadOnlyView + + // Creates a new account + CreateAccount( + addr gethCommon.Address, + balance *uint256.Int, + nonce uint64, + code []byte, + codeHash gethCommon.Hash, + ) error + + // UpdateAccount updates a account + UpdateAccount( + addr gethCommon.Address, + balance *uint256.Int, + nonce uint64, + code []byte, + codeHash gethCommon.Hash, + ) error + + // DeleteAccount deletes an account + DeleteAccount(addr gethCommon.Address) error + + // UpdateSlot updates the value for the given slot in the main storage + UpdateSlot( + slot SlotAddress, + value gethCommon.Hash, + ) error + + // Commit commits the changes + Commit() error +} + +// SlotAddress captures an address to a storage slot +type SlotAddress struct { + Address gethCommon.Address + Key gethCommon.Hash +} + +// SlotEntry captures an address to a storage slot and the value stored in it +type SlotEntry struct { + Address gethCommon.Address + Key gethCommon.Hash + Value gethCommon.Hash +} + +// Encoded returns the encoded content of the slot entry +func (se *SlotEntry) Encode() ([]byte, error) { + return rlp.EncodeToBytes(se) +} + +// SlotEntryFromEncoded constructs an slot entry from the encoded data +func SlotEntryFromEncoded(encoded []byte) (*SlotEntry, error) { + if len(encoded) == 0 { + return nil, nil + } + se := &SlotEntry{} + return se, rlp.DecodeBytes(encoded, se) +} diff --git a/fvm/evm/types/tokenVault.go b/fvm/evm/types/tokenVault.go new file mode 100644 index 00000000000..77eb4a67f3e --- /dev/null +++ b/fvm/evm/types/tokenVault.go @@ -0,0 +1,30 @@ +package types + +import "math/big" + +// FLOWTokenVault holds a balance of flow token +type FLOWTokenVault struct { + balance Balance +} + +func NewFlowTokenVault(balance Balance) *FLOWTokenVault { + return &FLOWTokenVault{balance: balance} +} + +func (t *FLOWTokenVault) Balance() Balance { + return t.balance +} + +func (t *FLOWTokenVault) Withdraw(b Balance) (*FLOWTokenVault, error) { + var err error + t.balance, err = SubBalance(t.balance, b) + return NewFlowTokenVault(b), err +} + +func (t *FLOWTokenVault) Deposit(inp *FLOWTokenVault) error { + var err error + t.balance, err = AddBalance(t.balance, inp.balance) + // reset balance for the inp incase + inp.balance = new(big.Int) + return err +} diff --git a/fvm/evm/types/tokenVault_test.go b/fvm/evm/types/tokenVault_test.go new file mode 100644 index 00000000000..02a57ecfe07 --- /dev/null +++ b/fvm/evm/types/tokenVault_test.go @@ -0,0 +1,25 @@ +package types_test + +import ( + "testing" + + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/fvm/evm/types" +) + +func TestVault(t *testing.T) { + vault1 := types.NewFlowTokenVault(types.MakeABalanceInFlow(3)) + + vault2, err := vault1.Withdraw(types.OneFlowBalance()) + require.NoError(t, err) + + require.Equal(t, types.MakeABalanceInFlow(2), vault1.Balance()) + require.Equal(t, types.OneFlowBalance(), vault2.Balance()) + + toBeDeposited := types.NewFlowTokenVault(types.OneFlowBalance()) + err = vault1.Deposit(toBeDeposited) + require.NoError(t, err) + require.Equal(t, types.MakeABalanceInFlow(3), vault1.Balance()) + require.Equal(t, types.EmptyBalance, toBeDeposited.Balance()) +} diff --git a/fvm/executionParameters.go b/fvm/executionParameters.go index 46b64382b73..a23c8360804 100644 --- a/fvm/executionParameters.go +++ b/fvm/executionParameters.go @@ -5,8 +5,12 @@ import ( "fmt" "math" + "github.com/rs/zerolog" + + "github.com/onflow/flow-go/fvm/storage/snapshot" + "github.com/onflow/cadence" - "github.com/onflow/cadence/runtime/common" + "github.com/onflow/cadence/common" "github.com/onflow/flow-go/fvm/blueprints" "github.com/onflow/flow-go/fvm/environment" @@ -15,6 +19,7 @@ import ( "github.com/onflow/flow-go/fvm/storage" "github.com/onflow/flow-go/fvm/storage/derived" "github.com/onflow/flow-go/fvm/storage/state" + "github.com/onflow/flow-go/fvm/systemcontracts" ) func ProcedureStateParameters( @@ -50,74 +55,80 @@ func getBasicMeterParameters( return params } -// getBodyMeterParameters returns the set of meter parameters used for -// transaction/script body execution. -func getBodyMeterParameters( +// getExecutionParameters returns the set of meter parameters used for +// transaction/script body execution and the minimum required version as defined by the +// NodeVersionBeacon contract. +func getExecutionParameters( + log zerolog.Logger, ctx Context, proc Procedure, txnState storage.TransactionPreparer, -) ( - meter.MeterParameters, - error, -) { - procParams := getBasicMeterParameters(ctx, proc) +) (state.ExecutionParameters, *snapshot.ExecutionSnapshot, error) { + meterParams := getBasicMeterParameters(ctx, proc) - overrides, err := txnState.GetMeterParamOverrides( + executionParams, executionParamsStateRead, err := txnState.GetStateExecutionParameters( txnState, - NewMeterParamOverridesComputer(ctx, txnState)) + NewExecutionParametersComputer(log, ctx, txnState)) if err != nil { - return procParams, err + return state.ExecutionParameters{ + MeterParameters: meterParams, + }, nil, err } - if overrides.ComputationWeights != nil { - procParams = procParams.WithComputationWeights( - overrides.ComputationWeights) + if executionParams.ComputationWeights != nil { + meterParams = meterParams.WithComputationWeights( + executionParams.ComputationWeights) } - if overrides.MemoryWeights != nil { - procParams = procParams.WithMemoryWeights(overrides.MemoryWeights) + if executionParams.MemoryWeights != nil { + meterParams = meterParams.WithMemoryWeights(executionParams.MemoryWeights) } - if overrides.MemoryLimit != nil { - procParams = procParams.WithMemoryLimit(*overrides.MemoryLimit) + if executionParams.MemoryLimit != nil { + meterParams = meterParams.WithMemoryLimit(*executionParams.MemoryLimit) } // NOTE: The memory limit (and interaction limit) may be overridden by the // environment. We need to ignore the override in that case. if proc.ShouldDisableMemoryAndInteractionLimits(ctx) { - procParams = procParams.WithMemoryLimit(math.MaxUint64). + meterParams = meterParams.WithMemoryLimit(math.MaxUint64). WithStorageInteractionLimit(math.MaxUint64) } - return procParams, nil + return state.ExecutionParameters{ + MeterParameters: meterParams, + }, executionParamsStateRead, nil } -type MeterParamOverridesComputer struct { +type ExecutionParametersComputer struct { + log zerolog.Logger ctx Context txnState storage.TransactionPreparer } -func NewMeterParamOverridesComputer( +func NewExecutionParametersComputer( + log zerolog.Logger, ctx Context, txnState storage.TransactionPreparer, -) MeterParamOverridesComputer { - return MeterParamOverridesComputer{ +) ExecutionParametersComputer { + return ExecutionParametersComputer{ + log: log, ctx: ctx, txnState: txnState, } } -func (computer MeterParamOverridesComputer) Compute( +func (computer ExecutionParametersComputer) Compute( _ state.NestedTransactionPreparer, _ struct{}, ) ( - derived.MeterParamOverrides, + derived.StateExecutionParameters, error, ) { - var overrides derived.MeterParamOverrides + var overrides derived.StateExecutionParameters var err error - computer.txnState.RunWithAllLimitsDisabled(func() { - overrides, err = computer.getMeterParamOverrides() + computer.txnState.RunWithMeteringDisabled(func() { + overrides, err = computer.getExecutionParameters() }) if err != nil { @@ -129,14 +140,18 @@ func (computer MeterParamOverridesComputer) Compute( return overrides, nil } -func (computer MeterParamOverridesComputer) getMeterParamOverrides() ( - derived.MeterParamOverrides, +func (computer ExecutionParametersComputer) getExecutionParameters() ( + derived.StateExecutionParameters, error, ) { - // Check that the service account exists because all the settings are - // stored in it - serviceAddress := computer.ctx.Chain.ServiceAddress() - service := common.Address(serviceAddress) + sc := systemcontracts.SystemContractsForChain(computer.ctx.Chain.ChainID()) + + // The execution parameters are stored in the ExecutionParametersAccount. This is + // just the service account for all networks except mainnet and testnet. + // For mainnet and testnet, the execution parameters are stored in a separate + // account, so that they are separated from the frequently changing data on the + // service account. + service := common.Address(sc.ExecutionParametersAccount.Address) env := environment.NewScriptEnv( context.Background(), @@ -144,7 +159,7 @@ func (computer MeterParamOverridesComputer) getMeterParamOverrides() ( computer.ctx.EnvironmentParams, computer.txnState) - overrides := derived.MeterParamOverrides{} + overrides := derived.StateExecutionParameters{} // set the property if no error, but if the error is a fatal error then // return it @@ -268,7 +283,7 @@ func cadenceValueToWeights(value cadence.Value) (map[uint]uint64, bool) { return nil, false } - result[uint(key.ToGoValue().(uint64))] = uint64(value) + result[uint(key)] = uint64(value) } return result, true @@ -331,5 +346,5 @@ func GetExecutionMemoryLimit( blueprints.TransactionFeesExecutionMemoryLimitPath.String()) } - return memoryLimitRaw.ToGoValue().(uint64), nil + return uint64(memoryLimitRaw), nil } diff --git a/fvm/executionParameters_test.go b/fvm/executionParameters_test.go index 7775d010f2f..d9331478679 100644 --- a/fvm/executionParameters_test.go +++ b/fvm/executionParameters_test.go @@ -8,8 +8,8 @@ import ( "github.com/stretchr/testify/require" "github.com/onflow/cadence" + "github.com/onflow/cadence/common" "github.com/onflow/cadence/runtime" - "github.com/onflow/cadence/runtime/common" "github.com/onflow/flow-go/fvm" "github.com/onflow/flow-go/fvm/blueprints" @@ -32,7 +32,7 @@ func TestGetExecutionMemoryWeights(t *testing.T) { envMock := &fvmmock.Environment{} envMock.On("BorrowCadenceRuntime", mock.Anything).Return( reusableRuntime.NewReusableCadenceRuntime( - &testutil.TestInterpreterRuntime{ + &testutil.TestRuntime{ ReadStoredFunc: readStored, }, runtime.Config{}, @@ -162,7 +162,7 @@ func TestGetExecutionEffortWeights(t *testing.T) { envMock := &fvmmock.Environment{} envMock.On("BorrowCadenceRuntime", mock.Anything).Return( reusableRuntime.NewReusableCadenceRuntime( - &testutil.TestInterpreterRuntime{ + &testutil.TestRuntime{ ReadStoredFunc: readStored, }, runtime.Config{}, diff --git a/fvm/fvm.go b/fvm/fvm.go index 557cf2f7599..ae92d08d945 100644 --- a/fvm/fvm.go +++ b/fvm/fvm.go @@ -5,9 +5,10 @@ import ( "fmt" "github.com/onflow/cadence" + "github.com/onflow/cadence/common" "github.com/onflow/flow-go/fvm/environment" - errors "github.com/onflow/flow-go/fvm/errors" + "github.com/onflow/flow-go/fvm/errors" "github.com/onflow/flow-go/fvm/meter" "github.com/onflow/flow-go/fvm/storage" "github.com/onflow/flow-go/fvm/storage/logical" @@ -83,7 +84,7 @@ func Run(executor ProcedureExecutor) error { return executor.Execute() } -// An Procedure is an operation (or set of operations) that reads or writes ledger state. +// A Procedure is an operation (or set of operations) that reads or writes ledger state. type Procedure interface { NewExecutor( ctx Context, @@ -103,6 +104,12 @@ type Procedure interface { // VM runs procedures type VM interface { + NewExecutor( + Context, + Procedure, + storage.TransactionPreparer, + ) ProcedureExecutor + Run( Context, Procedure, @@ -112,8 +119,6 @@ type VM interface { ProcedureOutput, error, ) - - GetAccount(Context, flow.Address, snapshot.StorageSnapshot) (*flow.Account, error) } var _ VM = (*VirtualMachine)(nil) @@ -126,6 +131,14 @@ func NewVirtualMachine() *VirtualMachine { return &VirtualMachine{} } +func (vm *VirtualMachine) NewExecutor( + ctx Context, + proc Procedure, + txn storage.TransactionPreparer, +) ProcedureExecutor { + return proc.NewExecutor(ctx, txn) +} + // Run runs a procedure against a ledger in the given context. func (vm *VirtualMachine) Run( ctx Context, @@ -147,7 +160,12 @@ func (vm *VirtualMachine) Run( var err error switch proc.Type() { case ScriptProcedureType: - storageTxn = blockDatabase.NewSnapshotReadTransaction(stateParameters) + if ctx.AllowProgramCacheWritesInScripts { + // if configured, allow scripts to update the programs cache + storageTxn, err = blockDatabase.NewCachingSnapshotReadTransaction(stateParameters) + } else { + storageTxn = blockDatabase.NewSnapshotReadTransaction(stateParameters) + } case TransactionProcedureType, BootstrapProcedureType: storageTxn, err = blockDatabase.NewTransaction( proc.ExecutionTime(), @@ -184,7 +202,7 @@ func (vm *VirtualMachine) Run( } // GetAccount returns an account by address or an error if none exists. -func (vm *VirtualMachine) GetAccount( +func GetAccount( ctx Context, address flow.Address, storageSnapshot snapshot.StorageSnapshot, @@ -192,6 +210,103 @@ func (vm *VirtualMachine) GetAccount( *flow.Account, error, ) { + env, _ := getScriptEnvironment(ctx, storageSnapshot) + + account, err := env.GetAccount(address) + if err != nil { + if errors.IsLedgerFailure(err) { + return nil, fmt.Errorf( + "cannot get account, this error usually happens if the "+ + "reference block for this query is not set to a recent "+ + "block: %w", + err) + } + return nil, fmt.Errorf("cannot get account: %w", err) + } + return account, nil +} + +// GetAccountBalance returns an account balance by address or an error if none exists. +func GetAccountBalance( + ctx Context, + address flow.Address, + storageSnapshot snapshot.StorageSnapshot, +) ( + uint64, + error, +) { + env, _ := getScriptEnvironment(ctx, storageSnapshot) + + accountBalance, err := env.GetAccountBalance(common.MustBytesToAddress(address.Bytes())) + + if err != nil { + return 0, fmt.Errorf("cannot get account balance: %w", err) + } + return accountBalance, nil +} + +// GetAccountAvailableBalance returns an account available balance by address or an error if none exists. +func GetAccountAvailableBalance( + ctx Context, + address flow.Address, + storageSnapshot snapshot.StorageSnapshot, +) ( + uint64, + error, +) { + env, _ := getScriptEnvironment(ctx, storageSnapshot) + + accountBalance, err := env.GetAccountAvailableBalance(common.MustBytesToAddress(address.Bytes())) + + if err != nil { + return 0, fmt.Errorf("cannot get account balance: %w", err) + } + return accountBalance, nil +} + +// GetAccountKeys returns an account keys by address or an error if none exists. +func GetAccountKeys( + ctx Context, + address flow.Address, + storageSnapshot snapshot.StorageSnapshot, +) ( + []flow.AccountPublicKey, + error, +) { + _, accountInfo := getScriptEnvironment(ctx, storageSnapshot) + accountKeys, err := accountInfo.GetAccountKeys(address) + + if err != nil { + return nil, fmt.Errorf("cannot get account keys: %w", err) + } + return accountKeys, nil +} + +// GetAccountKey returns an account key by address and index or an error if none exists. +func GetAccountKey( + ctx Context, + address flow.Address, + keyIndex uint32, + storageSnapshot snapshot.StorageSnapshot, +) ( + *flow.AccountPublicKey, + error, +) { + _, accountInfo := getScriptEnvironment(ctx, storageSnapshot) + accountKey, err := accountInfo.GetAccountKeyByIndex(address, keyIndex) + + if err != nil { + return nil, fmt.Errorf("cannot get account keys: %w", err) + } + + return accountKey, nil +} + +// Helper function to initialize common components. +func getScriptEnvironment( + ctx Context, + storageSnapshot snapshot.StorageSnapshot, +) (environment.Environment, environment.AccountInfo) { blockDatabase := storage.NewBlockDatabase( storageSnapshot, 0, @@ -210,16 +325,6 @@ func (vm *VirtualMachine) GetAccount( ctx.TracerSpan, ctx.EnvironmentParams, storageTxn) - account, err := env.GetAccount(address) - if err != nil { - if errors.IsLedgerFailure(err) { - return nil, fmt.Errorf( - "cannot get account, this error usually happens if the "+ - "reference block for this query is not set to a recent "+ - "block: %w", - err) - } - return nil, fmt.Errorf("cannot get account: %w", err) - } - return account, nil + + return env, env.AccountInfo } diff --git a/fvm/fvm_bench_test.go b/fvm/fvm_bench_test.go index c5eee155c06..77f9c851b67 100644 --- a/fvm/fvm_bench_test.go +++ b/fvm/fvm_bench_test.go @@ -2,24 +2,28 @@ package fvm_test import ( "context" + "encoding/hex" "encoding/json" "fmt" "io" + "math/big" "strings" "testing" + "github.com/ipfs/boxo/blockstore" "github.com/ipfs/go-datastore" dssync "github.com/ipfs/go-datastore/sync" - blockstore "github.com/ipfs/go-ipfs-blockstore" - "github.com/rs/zerolog" - "github.com/stretchr/testify/mock" - "github.com/stretchr/testify/require" - + "github.com/onflow/atree" "github.com/onflow/cadence" + "github.com/onflow/cadence/encoding/ccf" jsoncdc "github.com/onflow/cadence/encoding/json" "github.com/onflow/cadence/runtime" + "github.com/onflow/cadence/stdlib" + "github.com/rs/zerolog" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" - flow2 "github.com/onflow/flow-go-sdk" + flowsdk "github.com/onflow/flow-go-sdk" "github.com/onflow/flow-go-sdk/templates" "github.com/onflow/flow-go/engine/execution" @@ -30,9 +34,15 @@ import ( bootstrapexec "github.com/onflow/flow-go/engine/execution/state/bootstrap" "github.com/onflow/flow-go/engine/execution/testutil" "github.com/onflow/flow-go/fvm" + "github.com/onflow/flow-go/fvm/environment" + "github.com/onflow/flow-go/fvm/evm/testutils" + "github.com/onflow/flow-go/fvm/evm/types" reusableRuntime "github.com/onflow/flow-go/fvm/runtime" "github.com/onflow/flow-go/fvm/storage/derived" "github.com/onflow/flow-go/fvm/storage/snapshot" + "github.com/onflow/flow-go/fvm/storage/state" + "github.com/onflow/flow-go/fvm/systemcontracts" + "github.com/onflow/flow-go/fvm/tracing" completeLedger "github.com/onflow/flow-go/ledger/complete" "github.com/onflow/flow-go/ledger/complete/wal/fixtures" "github.com/onflow/flow-go/model/flow" @@ -72,19 +82,21 @@ func (account *TestBenchAccount) RetAndIncSeqNumber() uint64 { func (account *TestBenchAccount) DeployContract(b *testing.B, blockExec TestBenchBlockExecutor, contractName string, contract string) { serviceAccount := blockExec.ServiceAccount(b) - txBody := testutil.CreateContractDeploymentTransaction( + txBodyBuilder := testutil.CreateContractDeploymentTransaction( contractName, contract, account.Address, - blockExec.Chain(b)) + blockExec.Chain(b)). + SetProposalKey(serviceAccount.Address, 0, serviceAccount.RetAndIncSeqNumber()). + SetPayer(serviceAccount.Address) - txBody.SetProposalKey(serviceAccount.Address, 0, serviceAccount.RetAndIncSeqNumber()) - txBody.SetPayer(serviceAccount.Address) + err := testutil.SignPayload(txBodyBuilder, account.Address, account.PrivateKey) + require.NoError(b, err) - err := testutil.SignPayload(txBody, account.Address, account.PrivateKey) + err = testutil.SignEnvelope(txBodyBuilder, serviceAccount.Address, serviceAccount.PrivateKey) require.NoError(b, err) - err = testutil.SignEnvelope(txBody, serviceAccount.Address, serviceAccount.PrivateKey) + txBody, err := txBodyBuilder.Build() require.NoError(b, err) computationResult := blockExec.ExecuteCollections(b, [][]*flow.TransactionBody{{txBody}}) @@ -93,12 +105,12 @@ func (account *TestBenchAccount) DeployContract(b *testing.B, blockExec TestBenc func (account *TestBenchAccount) AddArrayToStorage(b *testing.B, blockExec TestBenchBlockExecutor, list []string) { serviceAccount := blockExec.ServiceAccount(b) - txBody := flow.NewTransactionBody(). + txBodyBuilder := flow.NewTransactionBodyBuilder(). SetScript([]byte(` transaction(list: [String]) { - prepare(acct: AuthAccount) { - acct.load<[String]>(from: /storage/test) - acct.save(list, to: /storage/test) + prepare(acct: auth(Storage) &Account) { + acct.storage.load<[String]>(from: /storage/test) + acct.storage.save(list, to: /storage/test) } execute {} } @@ -111,17 +123,18 @@ func (account *TestBenchAccount) AddArrayToStorage(b *testing.B, blockExec TestB } cadenceArray, err := jsoncdc.Encode(cadence.NewArray(cadenceArrayValues)) require.NoError(b, err) - txBody.AddArgument(cadenceArray) - - txBody.SetProposalKey(serviceAccount.Address, 0, serviceAccount.RetAndIncSeqNumber()) - txBody.SetPayer(serviceAccount.Address) + txBodyBuilder.AddArgument(cadenceArray). + SetProposalKey(serviceAccount.Address, 0, serviceAccount.RetAndIncSeqNumber()). + SetPayer(serviceAccount.Address) if account.Address != serviceAccount.Address { - err = testutil.SignPayload(txBody, account.Address, account.PrivateKey) + err = testutil.SignPayload(txBodyBuilder, account.Address, account.PrivateKey) require.NoError(b, err) } - err = testutil.SignEnvelope(txBody, serviceAccount.Address, serviceAccount.PrivateKey) + err = testutil.SignEnvelope(txBodyBuilder, serviceAccount.Address, serviceAccount.PrivateKey) + require.NoError(b, err) + txBody, err := txBodyBuilder.Build() require.NoError(b, err) computationResult := blockExec.ExecuteCollections(b, [][]*flow.TransactionBody{{txBody}}) @@ -157,6 +170,7 @@ func NewBasicBlockExecutor(tb testing.TB, chain flow.Chain, logger zerolog.Logge runtime.Config{}, ), ), + fvm.WithEVMEnabled(true), } fvmContext := fvm.NewContext(opts...) @@ -209,7 +223,7 @@ func NewBasicBlockExecutor(tb testing.TB, chain flow.Chain, logger zerolog.Logge me := new(moduleMock.Local) me.On("NodeID").Return(unittest.IdentifierFixture()) - me.On("Sign", mock.Anything, mock.Anything).Return(nil, nil) + me.On("Sign", mock.Anything, mock.Anything).Return(unittest.SignatureFixture(), nil) me.On("SignFunc", mock.Anything, mock.Anything, mock.Anything). Return(nil, nil) @@ -223,7 +237,9 @@ func NewBasicBlockExecutor(tb testing.TB, chain flow.Chain, logger zerolog.Logge ledgerCommitter, me, prov, - nil) + nil, + testutil.ProtocolStateWithSourceFixture(nil), + 1) // We're interested in fvm's serial execution time require.NoError(tb, err) activeSnapshot := snapshot.NewSnapshotTree( @@ -257,7 +273,7 @@ func (b *BasicBlockExecutor) ExecuteCollections(tb testing.TB, collections [][]* executableBlock.StartState = &b.activeStateCommitment derivedBlockData := b.derivedChainData.GetOrCreateDerivedBlockData( - executableBlock.ID(), + executableBlock.BlockID(), executableBlock.ParentID()) computationResult, err := b.blockComputer.ExecuteBlock( @@ -277,28 +293,50 @@ func (b *BasicBlockExecutor) ExecuteCollections(tb testing.TB, collections [][]* return computationResult } +func (b *BasicBlockExecutor) RunWithLedger(tb testing.TB, f func(ledger atree.Ledger)) { + ts := state.NewTransactionState(b.activeSnapshot, state.DefaultParameters()) + + accounts := environment.NewAccounts(ts) + meter := environment.NewMeter(ts) + + valueStore := environment.NewValueStore( + tracing.NewMockTracerSpan(), + meter, + accounts, + ) + + f(valueStore) + + newSnapshot, err := ts.FinalizeMainTransaction() + require.NoError(tb, err) + + b.activeSnapshot = b.activeSnapshot.Append(newSnapshot) +} + func (b *BasicBlockExecutor) SetupAccounts(tb testing.TB, privateKeys []flow.AccountPrivateKey) []TestBenchAccount { accounts := make([]TestBenchAccount, 0) serviceAddress := b.Chain(tb).ServiceAddress() for _, privateKey := range privateKeys { - accountKey := flow2.NewAccountKey(). + accountKey := flowsdk.NewAccountKey(). FromPrivateKey(privateKey.PrivateKey). SetWeight(fvm.AccountKeyWeightThreshold). SetHashAlgo(privateKey.HashAlgo). SetSigAlgo(privateKey.SignAlgo) - sdkTX, err := templates.CreateAccount([]*flow2.AccountKey{accountKey}, []templates.Contract{}, flow2.BytesToAddress(serviceAddress.Bytes())) + sdkTX, err := templates.CreateAccount([]*flowsdk.AccountKey{accountKey}, []templates.Contract{}, flowsdk.BytesToAddress(serviceAddress.Bytes())) require.NoError(tb, err) - txBody := flow.NewTransactionBody(). + txBodyBuilder := flow.NewTransactionBodyBuilder(). SetScript(sdkTX.Script). SetArguments(sdkTX.Arguments). AddAuthorizer(serviceAddress). SetProposalKey(serviceAddress, 0, b.ServiceAccount(tb).RetAndIncSeqNumber()). SetPayer(serviceAddress) - err = testutil.SignEnvelope(txBody, b.Chain(tb).ServiceAddress(), unittest.ServiceAccountPrivateKey) + err = testutil.SignEnvelope(txBodyBuilder, b.Chain(tb).ServiceAddress(), unittest.ServiceAccountPrivateKey) + require.NoError(tb, err) + txBody, err := txBodyBuilder.Build() require.NoError(tb, err) computationResult := b.ExecuteCollections(tb, [][]*flow.TransactionBody{{txBody}}) @@ -308,12 +346,17 @@ func (b *BasicBlockExecutor) SetupAccounts(tb testing.TB, privateKeys []flow.Acc for _, event := range computationResult.AllEvents() { if event.Type == flow.EventAccountCreated { - data, err := jsoncdc.Decode(nil, event.Payload) + data, err := ccf.Decode(nil, event.Payload) if err != nil { tb.Fatal("setup account failed, error decoding events") } - addr = flow.ConvertAddress( - data.(cadence.Event).Fields[0].(cadence.Address)) + + address := cadence.SearchFieldByName( + data.(cadence.Event), + stdlib.AccountEventAddressParameter.Identifier, + ).(cadence.Address) + + addr = flow.ConvertAddress(address) break } } @@ -371,6 +414,11 @@ func (l *logExtractor) Write(p []byte) (n int, err error) { var _ io.Writer = &logExtractor{} +type benchTransactionContext struct { + EvmTestContract *testutils.TestContract + EvmTestAccount *testutils.EOATestAccount +} + // BenchmarkRuntimeEmptyTransaction simulates executing blocks with `transactionsPerBlock` // where each transaction is an empty transaction func BenchmarkRuntimeTransaction(b *testing.B) { @@ -385,8 +433,15 @@ func BenchmarkRuntimeTransaction(b *testing.B) { TimeSpent: map[string]uint64{}, InteractionUsed: map[string]uint64{}, } + sc := systemcontracts.SystemContractsForChain(chain.ChainID()) - benchTransaction := func(b *testing.B, tx string) { + testContractAddress, err := chain.AddressAtIndex(systemcontracts.LastSystemAccountIndex + 1) + require.NoError(b, err) + + benchTransaction := func( + b *testing.B, + txStringFunc func(b *testing.B, context benchTransactionContext) string, + ) { logger := zerolog.New(logE).Level(zerolog.DebugLevel) @@ -405,8 +460,12 @@ func BenchmarkRuntimeTransaction(b *testing.B) { for _, account := range accounts { addrs = append(addrs, account.Address) } + evmAddress, err := chain.AddressAtIndex(systemcontracts.EVMStorageAccountIndex) + require.NoError(b, err) + addrs = append(addrs, evmAddress) + // fund all accounts so not to run into storage problems - fundAccounts(b, blockExecutor, cadence.UFix64(10_000_000_000), addrs...) + fundAccounts(b, blockExecutor, cadence.UFix64(1_000_000_000_000), addrs...) accounts[0].DeployContract(b, blockExecutor, "TestContract", ` access(all) contract TestContract { @@ -415,40 +474,62 @@ func BenchmarkRuntimeTransaction(b *testing.B) { access(all) fun empty() { } - access(all) fun emit() { + access(all) fun emitEvent() { emit SomeEvent() } } `) + require.Equal(b, testContractAddress, accounts[0].Address, + "test contract should be deployed to first available account index") accounts[0].AddArrayToStorage(b, blockExecutor, []string{longString, longString, longString, longString, longString}) - btx := []byte(tx) + tc := testutils.GetStorageTestContract(b) + var evmTestAccount *testutils.EOATestAccount + blockExecutor.RunWithLedger(b, func(ledger atree.Ledger) { + testutils.DeployContract(b, types.EmptyAddress, tc, ledger, chain.ServiceAddress()) + evmTestAccount = testutils.FundAndGetEOATestAccount(b, ledger, chain.ServiceAddress()) + }) + + benchTransactionContext := benchTransactionContext{ + EvmTestContract: tc, + EvmTestAccount: evmTestAccount, + } benchmarkAccount := &accounts[0] b.ResetTimer() // setup done, lets start measuring + b.StopTimer() for i := 0; i < b.N; i++ { transactions := make([]*flow.TransactionBody, transactionsPerBlock) for j := 0; j < transactionsPerBlock; j++ { - txBody := flow.NewTransactionBody(). + tx := txStringFunc(b, benchTransactionContext) + + btx := []byte(tx) + txBodyBuilder := flow.NewTransactionBodyBuilder(). SetScript(btx). AddAuthorizer(benchmarkAccount.Address). SetProposalKey(benchmarkAccount.Address, 0, benchmarkAccount.RetAndIncSeqNumber()). SetPayer(benchmarkAccount.Address) - err = testutil.SignEnvelope(txBody, benchmarkAccount.Address, benchmarkAccount.PrivateKey) + err = testutil.SignEnvelope(txBodyBuilder, benchmarkAccount.Address, benchmarkAccount.PrivateKey) + require.NoError(b, err) + + txBody, err := txBodyBuilder.Build() require.NoError(b, err) transactions[j] = txBody } - + b.StartTimer() computationResult := blockExecutor.ExecuteCollections(b, [][]*flow.TransactionBody{transactions}) + b.StopTimer() totalInteractionUsed := uint64(0) totalComputationUsed := uint64(0) - for _, txRes := range computationResult.AllTransactionResults() { + results := computationResult.AllTransactionResults() + // not interested in the system transaction + for _, txRes := range results[0 : len(results)-1] { require.Empty(b, txRes.ErrorMessage) - totalInteractionUsed += logE.InteractionUsed[txRes.ID().String()] + totalInteractionUsed += logE.InteractionUsed[txRes.TransactionID.String()] totalComputationUsed += txRes.ComputationUsed } b.ReportMetric(float64(totalInteractionUsed/uint64(transactionsPerBlock)), "interactions") @@ -457,130 +538,264 @@ func BenchmarkRuntimeTransaction(b *testing.B) { } templateTx := func(rep int, prepare string) string { - return fmt.Sprintf(` + return fmt.Sprintf( + ` import FungibleToken from 0x%s import FlowToken from 0x%s import TestContract from 0x%s + import EVM from 0x%s transaction(){ - prepare(signer: AuthAccount){ + prepare(signer: auth(Storage, Capabilities) &Account){ var i = 0 while i < %d { - i = i + 1 + i = i + 1 %s } } - }`, fvm.FungibleTokenAddress(chain), fvm.FlowTokenAddress(chain), "754aed9de6197641", rep, prepare) + }`, + sc.FungibleToken.Address.Hex(), + sc.FlowToken.Address.Hex(), + testContractAddress, + sc.EVMContract.Address.Hex(), + rep, + prepare, + ) } b.Run("reference tx", func(b *testing.B) { - benchTransaction(b, templateTx(100, "")) + benchTransaction(b, + func(b *testing.B, context benchTransactionContext) string { + return templateTx(100, "") + }, + ) }) b.Run("convert int to string", func(b *testing.B) { - benchTransaction(b, templateTx(100, `i.toString()`)) + benchTransaction(b, + func(b *testing.B, context benchTransactionContext) string { + return templateTx(100, `i.toString()`) + }, + ) }) b.Run("convert int to string and concatenate it", func(b *testing.B) { - benchTransaction(b, templateTx(100, `"x".concat(i.toString())`)) + benchTransaction(b, + func(b *testing.B, context benchTransactionContext) string { + return templateTx(100, `"x".concat(i.toString())`) + }, + ) }) b.Run("get signer address", func(b *testing.B) { - benchTransaction(b, templateTx(100, `signer.address`)) + benchTransaction(b, + func(b *testing.B, context benchTransactionContext) string { + return templateTx(100, `signer.address`) + }, + ) }) b.Run("get public account", func(b *testing.B) { - benchTransaction(b, templateTx(100, `getAccount(signer.address)`)) + benchTransaction(b, + func(b *testing.B, context benchTransactionContext) string { + return templateTx(100, `getAccount(signer.address)`) + }, + ) }) b.Run("get account and get balance", func(b *testing.B) { - benchTransaction(b, templateTx(100, `getAccount(signer.address).balance`)) + benchTransaction(b, + func(b *testing.B, context benchTransactionContext) string { + return templateTx(100, `getAccount(signer.address).balance`) + }, + ) }) b.Run("get account and get available balance", func(b *testing.B) { - benchTransaction(b, templateTx(100, `getAccount(signer.address).availableBalance`)) + benchTransaction(b, + func(b *testing.B, context benchTransactionContext) string { + return templateTx(100, `getAccount(signer.address).availableBalance`) + }, + ) }) b.Run("get account and get storage used", func(b *testing.B) { - benchTransaction(b, templateTx(100, `getAccount(signer.address).storageUsed`)) + benchTransaction(b, + func(b *testing.B, context benchTransactionContext) string { + return templateTx(100, `getAccount(signer.address).storage.used`) + }, + ) }) b.Run("get account and get storage capacity", func(b *testing.B) { - benchTransaction(b, templateTx(100, `getAccount(signer.address).storageCapacity`)) + benchTransaction(b, + func(b *testing.B, context benchTransactionContext) string { + return templateTx(100, `getAccount(signer.address).storage.capacity`) + }, + ) }) b.Run("get signer vault", func(b *testing.B) { benchTransaction( b, - templateTx(100, `let vaultRef = signer.borrow<&FlowToken.Vault>(from: /storage/flowTokenVault)!`), + func(b *testing.B, context benchTransactionContext) string { + return templateTx(100, + `let vaultRef = signer.storage.borrow<&FlowToken.Vault>(from: /storage/flowTokenVault)!`) + }, ) }) b.Run("get signer receiver", func(b *testing.B) { benchTransaction( b, - templateTx(100, `let receiverRef = getAccount(signer.address) - .getCapability(/public/flowTokenReceiver) - .borrow<&{FungibleToken.Receiver}>()!`), + func(b *testing.B, context benchTransactionContext) string { + return templateTx(100, + `let receiverRef = getAccount(signer.address) + .capabilities.borrow<&{FungibleToken.Receiver}>(/public/flowTokenReceiver)!`) + }, ) }) b.Run("transfer tokens", func(b *testing.B) { benchTransaction( b, - templateTx(100, ` - let receiverRef = getAccount(signer.address) - .getCapability(/public/flowTokenReceiver) - .borrow<&{FungibleToken.Receiver}>()! + func(b *testing.B, context benchTransactionContext) string { + return templateTx(100, ` + let receiverRef = getAccount(signer.address) + .capabilities.borrow<&{FungibleToken.Receiver}>(/public/flowTokenReceiver)! - let vaultRef = signer.borrow<&FlowToken.Vault>(from: /storage/flowTokenVault)! + let vaultRef = signer.storage.borrow<auth(FungibleToken.Withdraw) &FlowToken.Vault>(from: /storage/flowTokenVault)! - receiverRef.deposit(from: <-vaultRef.withdraw(amount: 0.00001)) - `), + receiverRef.deposit(from: <-vaultRef.withdraw(amount: 0.00001)) + `) + }, ) }) b.Run("load and save empty string on signers address", func(b *testing.B) { benchTransaction( b, - templateTx(100, ` - signer.load<String>(from: /storage/testpath) - signer.save("", to: /storage/testpath) - `), + func(b *testing.B, context benchTransactionContext) string { + return templateTx(100, ` + signer.storage.load<String>(from: /storage/testpath) + signer.storage.save("", to: /storage/testpath) + `) + }, ) }) b.Run("load and save long string on signers address", func(b *testing.B) { benchTransaction( b, - templateTx(100, fmt.Sprintf(` - signer.load<String>(from: /storage/testpath) - signer.save("%s", to: /storage/testpath) - `, longString)), + func(b *testing.B, context benchTransactionContext) string { + return templateTx(100, fmt.Sprintf(` + signer.storage.load<String>(from: /storage/testpath) + signer.storage.save("%s", to: /storage/testpath) + `, longString)) + }, ) }) b.Run("create new account", func(b *testing.B) { - benchTransaction(b, templateTx(50, `let acct = AuthAccount(payer: signer)`)) + benchTransaction(b, + func(b *testing.B, context benchTransactionContext) string { + return templateTx(50, `let acct = Account(payer: signer)`) + }, + ) }) b.Run("call empty contract function", func(b *testing.B) { - benchTransaction(b, templateTx(100, `TestContract.empty()`)) + benchTransaction(b, + func(b *testing.B, context benchTransactionContext) string { + return templateTx(100, `TestContract.empty()`) + }, + ) }) b.Run("emit event", func(b *testing.B) { - benchTransaction(b, templateTx(100, `TestContract.emit()`)) + benchTransaction(b, + func(b *testing.B, context benchTransactionContext) string { + return templateTx(100, `TestContract.emitEvent()`) + }, + ) }) b.Run("borrow array from storage", func(b *testing.B) { benchTransaction( b, - templateTx(100, ` - let strings = signer.borrow<&[String]>(from: /storage/test)! - var i = 0 - while (i < strings.length) { - log(strings[i]) - i = i +1 - } - `), + func(b *testing.B, context benchTransactionContext) string { + return templateTx(100, ` + let strings = signer.storage.borrow<&[String]>(from: /storage/test)! + var i = 0 + while (i < strings.length) { + log(strings[i]) + i = i +1 + } + `) + }, ) }) b.Run("copy array from storage", func(b *testing.B) { benchTransaction( b, - templateTx(100, ` - let strings = signer.copy<[String]>(from: /storage/test)! - var i = 0 - while (i < strings.length) { - log(strings[i]) - i = i +1 + func(b *testing.B, context benchTransactionContext) string { + return templateTx(100, ` + let strings = signer.storage.copy<[String]>(from: /storage/test)! + var i = 0 + while (i < strings.length) { + log(strings[i]) + i = i +1 + } + `) + }, + ) + }) + + benchEvm := func(b *testing.B, control bool) { + // This is the same as the evm benchmark but without the EVM.run call + // This way we can observe the cost of just the EVM.run call + benchTransaction( + b, + func(b *testing.B, context benchTransactionContext) string { + coinbaseBytes := context.EvmTestAccount.Address().Bytes() + transactionBody := fmt.Sprintf(` + let coinbaseBytesRaw = "%s".decodeHex() + let coinbaseBytes: [UInt8; 20] = [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0] + for j, v in coinbaseBytesRaw { + coinbaseBytes[j] = v + } + let coinbase = EVM.EVMAddress(bytes: coinbaseBytes) + `, hex.EncodeToString(coinbaseBytes)) + + num := int64(12) + gasLimit := uint64(100_000) + + // add 10 EVM transactions to the Flow transaction body + for i := 0; i < 100; i++ { + txBytes := context.EvmTestAccount.PrepareSignAndEncodeTx(b, + context.EvmTestContract.DeployedAt.ToCommon(), + context.EvmTestContract.MakeCallData(b, "store", big.NewInt(num)), + big.NewInt(0), + gasLimit, + big.NewInt(0), + ) + if control { + transactionBody += fmt.Sprintf(` + let txBytes%[1]d = "%[2]s".decodeHex() + EVM.run(tx: txBytes%[1]d, coinbase: coinbase) + `, + i, + hex.EncodeToString(txBytes), + ) + } else { + // don't run the EVM transaction but do the hex conversion + transactionBody += fmt.Sprintf(` + let txBytes%[1]d = "%[2]s".decodeHex() + //EVM.run(tx: txBytes%[1]d, coinbase: coinbase) + `, + i, + hex.EncodeToString(txBytes), + ) + } + } - `), + + return templateTx(1, transactionBody) + }, ) + } + + b.Run("evm", func(b *testing.B) { + benchEvm(b, false) + }) + + b.Run("evm control", func(b *testing.B) { + benchEvm(b, true) }) + } const TransferTxTemplate = ` @@ -590,8 +805,8 @@ const TransferTxTemplate = ` transaction(testTokenIDs: [UInt64], recipientAddress: Address) { let transferTokens: @NonFungibleToken.Collection - prepare(acct: AuthAccount) { - let ref = acct.borrow<&BatchNFT.Collection>(from: /storage/TestTokenCollection)! + prepare(acct: auth(BorrowValue) &Account) { + let ref = acct.storage.borrow<&BatchNFT.Collection>(from: /storage/TestTokenCollection)! self.transferTokens <- ref.batchWithdraw(ids: testTokenIDs) } @@ -599,8 +814,7 @@ const TransferTxTemplate = ` // get the recipient's public account object let recipient = getAccount(recipientAddress) // get the Collection reference for the receiver - let receiverRef = recipient.getCapability(/public/TestTokenCollection) - .borrow<&{BatchNFT.TestTokenCollectionPublic}>()! + let receiverRef = recipient.capabilities.borrow<&{BatchNFT.TestTokenCollectionPublic}>(/public/TestTokenCollection)! // deposit the NFT in the receivers collection receiverRef.batchDeposit(tokens: <-self.transferTokens) } @@ -673,7 +887,7 @@ func BenchRunNFTBatchTransfer(b *testing.B, ) require.NoError(b, err) - txBody := flow.NewTransactionBody(). + txBodyBuilder := flow.NewTransactionBodyBuilder(). SetScript(transferTx). SetProposalKey(serviceAccount.Address, 0, serviceAccount.RetAndIncSeqNumber()). AddAuthorizer(accounts[1].Address). @@ -681,17 +895,22 @@ func BenchRunNFTBatchTransfer(b *testing.B, AddArgument(encodedAddress). SetPayer(serviceAccount.Address) - err = testutil.SignPayload(txBody, accounts[1].Address, accounts[1].PrivateKey) + err = testutil.SignPayload(txBodyBuilder, accounts[1].Address, accounts[1].PrivateKey) + require.NoError(b, err) + + err = testutil.SignEnvelope(txBodyBuilder, serviceAccount.Address, serviceAccount.PrivateKey) require.NoError(b, err) - err = testutil.SignEnvelope(txBody, serviceAccount.Address, serviceAccount.PrivateKey) + txBody, err := txBodyBuilder.Build() require.NoError(b, err) transactions[j] = txBody } computationResult = blockExecutor.ExecuteCollections(b, [][]*flow.TransactionBody{transactions}) - for _, txRes := range computationResult.AllTransactionResults() { + results := computationResult.AllTransactionResults() + // not interested in the system transaction + for _, txRes := range results[0 : len(results)-1] { require.Empty(b, txRes.ErrorMessage) } } @@ -705,30 +924,29 @@ func setupReceiver(b *testing.B, be TestBenchBlockExecutor, nftAccount, batchNFT import BatchNFT from 0x%s transaction { - prepare(signer: AuthAccount) { + prepare(signer: auth(SaveValue) &Account) { signer.save( <-BatchNFT.createEmptyCollection(), to: /storage/TestTokenCollection ) - signer.link<&BatchNFT.Collection>( - /public/TestTokenCollection, - target: /storage/TestTokenCollection - ) } }` setupTx := []byte(fmt.Sprintf(setUpReceiverTemplate, nftAccount.Address.Hex(), batchNFTAccount.Address.Hex())) - txBody := flow.NewTransactionBody(). + txBodyBuilder := flow.NewTransactionBodyBuilder(). SetScript(setupTx). SetProposalKey(serviceAccount.Address, 0, serviceAccount.RetAndIncSeqNumber()). AddAuthorizer(targetAccount.Address). SetPayer(serviceAccount.Address) - err := testutil.SignPayload(txBody, targetAccount.Address, targetAccount.PrivateKey) + err := testutil.SignPayload(txBodyBuilder, targetAccount.Address, targetAccount.PrivateKey) require.NoError(b, err) - err = testutil.SignEnvelope(txBody, serviceAccount.Address, serviceAccount.PrivateKey) + err = testutil.SignEnvelope(txBodyBuilder, serviceAccount.Address, serviceAccount.PrivateKey) + require.NoError(b, err) + + txBody, err := txBodyBuilder.Build() require.NoError(b, err) computationResult := be.ExecuteCollections(b, [][]*flow.TransactionBody{{txBody}}) @@ -740,31 +958,34 @@ func mintNFTs(b *testing.B, be TestBenchBlockExecutor, batchNFTAccount *TestBenc mintScriptTemplate := ` import BatchNFT from 0x%s transaction { - prepare(signer: AuthAccount) { - let adminRef = signer.borrow<&BatchNFT.Admin>(from: /storage/BatchNFTAdmin)! + prepare(signer: auth(BorrowValue) &Account) { + let adminRef = signer.storage.borrow<&BatchNFT.Admin>(from: /storage/BatchNFTAdmin)! let playID = adminRef.createPlay(metadata: {"name": "Test"}) let setID = BatchNFT.nextSetID adminRef.createSet(name: "Test") let setRef = adminRef.borrowSet(setID: setID) setRef.addPlay(playID: playID) let testTokens <- setRef.batchMintTestToken(playID: playID, quantity: %d) - signer.borrow<&BatchNFT.Collection>(from: /storage/TestTokenCollection)! + signer.storage.borrow<&BatchNFT.Collection>(from: /storage/TestTokenCollection)! .batchDeposit(tokens: <-testTokens) } }` mintScript := []byte(fmt.Sprintf(mintScriptTemplate, batchNFTAccount.Address.Hex(), size)) - txBody := flow.NewTransactionBody(). - SetGasLimit(999999). + txBodyBuilder := flow.NewTransactionBodyBuilder(). + SetComputeLimit(999999). SetScript(mintScript). SetProposalKey(serviceAccount.Address, 0, serviceAccount.RetAndIncSeqNumber()). AddAuthorizer(batchNFTAccount.Address). SetPayer(serviceAccount.Address) - err := testutil.SignPayload(txBody, batchNFTAccount.Address, batchNFTAccount.PrivateKey) + err := testutil.SignPayload(txBodyBuilder, batchNFTAccount.Address, batchNFTAccount.PrivateKey) + require.NoError(b, err) + + err = testutil.SignEnvelope(txBodyBuilder, serviceAccount.Address, serviceAccount.PrivateKey) require.NoError(b, err) - err = testutil.SignEnvelope(txBody, serviceAccount.Address, serviceAccount.PrivateKey) + txBody, err := txBodyBuilder.Build() require.NoError(b, err) computationResult := be.ExecuteCollections(b, [][]*flow.TransactionBody{{txBody}}) @@ -774,14 +995,17 @@ func mintNFTs(b *testing.B, be TestBenchBlockExecutor, batchNFTAccount *TestBenc func fundAccounts(b *testing.B, be TestBenchBlockExecutor, value cadence.UFix64, accounts ...flow.Address) { serviceAccount := be.ServiceAccount(b) for _, a := range accounts { - txBody := transferTokensTx(be.Chain(b)) - txBody.SetProposalKey(serviceAccount.Address, 0, serviceAccount.RetAndIncSeqNumber()) - txBody.AddArgument(jsoncdc.MustEncode(value)) - txBody.AddArgument(jsoncdc.MustEncode(cadence.Address(a))) - txBody.AddAuthorizer(serviceAccount.Address) - txBody.SetPayer(serviceAccount.Address) - - err := testutil.SignEnvelope(txBody, serviceAccount.Address, serviceAccount.PrivateKey) + txBodyBuilder := transferTokensTx(be.Chain(b)) + txBodyBuilder.SetProposalKey(serviceAccount.Address, 0, serviceAccount.RetAndIncSeqNumber()). + AddArgument(jsoncdc.MustEncode(value)). + AddArgument(jsoncdc.MustEncode(cadence.Address(a))). + AddAuthorizer(serviceAccount.Address). + SetPayer(serviceAccount.Address) + + err := testutil.SignEnvelope(txBodyBuilder, serviceAccount.Address, serviceAccount.PrivateKey) + require.NoError(b, err) + + txBody, err := txBodyBuilder.Build() require.NoError(b, err) computationResult := be.ExecuteCollections(b, [][]*flow.TransactionBody{{txBody}}) @@ -794,29 +1018,28 @@ func deployBatchNFT(b *testing.B, be TestBenchBlockExecutor, owner *TestBenchAcc return fmt.Sprintf(` import NonFungibleToken from 0x%s - pub contract BatchNFT: NonFungibleToken { - pub event ContractInitialized() - pub event PlayCreated(id: UInt32, metadata: {String:String}) - pub event NewSeriesStarted(newCurrentSeries: UInt32) - pub event SetCreated(setID: UInt32, series: UInt32) - pub event PlayAddedToSet(setID: UInt32, playID: UInt32) - pub event PlayRetiredFromSet(setID: UInt32, playID: UInt32, numTestTokens: UInt32) - pub event SetLocked(setID: UInt32) - pub event TestTokenMinted(testTokenID: UInt64, playID: UInt32, setID: UInt32, serialNumber: UInt32) - pub event Withdraw(id: UInt64, from: Address?) - pub event Deposit(id: UInt64, to: Address?) - pub event TestTokenDestroyed(id: UInt64) - pub var currentSeries: UInt32 + access(all) contract BatchNFT: NonFungibleToken { + access(all) event ContractInitialized() + access(all) event PlayCreated(id: UInt32, metadata: {String:String}) + access(all) event NewSeriesStarted(newCurrentSeries: UInt32) + access(all) event SetCreated(setID: UInt32, series: UInt32) + access(all) event PlayAddedToSet(setID: UInt32, playID: UInt32) + access(all) event PlayRetiredFromSet(setID: UInt32, playID: UInt32, numTestTokens: UInt32) + access(all) event SetLocked(setID: UInt32) + access(all) event TestTokenMinted(testTokenID: UInt64, playID: UInt32, setID: UInt32, serialNumber: UInt32) + access(all) event Withdraw(id: UInt64, from: Address?) + access(all) event Deposit(id: UInt64, to: Address?) + access(all) var currentSeries: UInt32 access(self) var playDatas: {UInt32: Play} access(self) var setDatas: {UInt32: SetData} access(self) var sets: @{UInt32: Set} - pub var nextPlayID: UInt32 - pub var nextSetID: UInt32 - pub var totalSupply: UInt64 + access(all) var nextPlayID: UInt32 + access(all) var nextSetID: UInt32 + access(all) var totalSupply: UInt64 - pub struct Play { - pub let playID: UInt32 - pub let metadata: {String: String} + access(all) struct Play { + access(all) let playID: UInt32 + access(all) let metadata: {String: String} init(metadata: {String: String}) { pre { @@ -830,10 +1053,10 @@ func deployBatchNFT(b *testing.B, be TestBenchBlockExecutor, owner *TestBenchAcc } } - pub struct SetData { - pub let setID: UInt32 - pub let name: String - pub let series: UInt32 + access(all) struct SetData { + access(all) let setID: UInt32 + access(all) let name: String + access(all) let series: UInt32 init(name: String) { pre { name.length > 0: "New Set name cannot be empty" @@ -846,12 +1069,12 @@ func deployBatchNFT(b *testing.B, be TestBenchBlockExecutor, owner *TestBenchAcc } } - pub resource Set { - pub let setID: UInt32 - pub var plays: [UInt32] - pub var retired: {UInt32: Bool} - pub var locked: Bool - pub var numberMintedPerPlay: {UInt32: UInt32} + access(all) resource Set { + access(all) let setID: UInt32 + access(all) var plays: [UInt32] + access(all) var retired: {UInt32: Bool} + access(all) var locked: Bool + access(all) var numberMintedPerPlay: {UInt32: UInt32} init(name: String) { self.setID = BatchNFT.nextSetID @@ -863,7 +1086,7 @@ func deployBatchNFT(b *testing.B, be TestBenchBlockExecutor, owner *TestBenchAcc BatchNFT.setDatas[self.setID] = SetData(name: name) } - pub fun addPlay(playID: UInt32) { + access(all) fun addPlay(playID: UInt32) { pre { BatchNFT.playDatas[playID] != nil: "Cannot add the Play to Set: Play doesn't exist" !self.locked: "Cannot add the play to the Set after the set has been locked" @@ -876,13 +1099,13 @@ func deployBatchNFT(b *testing.B, be TestBenchBlockExecutor, owner *TestBenchAcc emit PlayAddedToSet(setID: self.setID, playID: playID) } - pub fun addPlays(playIDs: [UInt32]) { + access(all) fun addPlays(playIDs: [UInt32]) { for play in playIDs { self.addPlay(playID: play) } } - pub fun retirePlay(playID: UInt32) { + access(all) fun retirePlay(playID: UInt32) { pre { self.retired[playID] != nil: "Cannot retire the Play: Play doesn't exist in this set!" } @@ -894,20 +1117,20 @@ func deployBatchNFT(b *testing.B, be TestBenchBlockExecutor, owner *TestBenchAcc } } - pub fun retireAll() { + access(all) fun retireAll() { for play in self.plays { self.retirePlay(playID: play) } } - pub fun lock() { + access(all) fun lock() { if !self.locked { self.locked = true emit SetLocked(setID: self.setID) } } - pub fun mintTestToken(playID: UInt32): @NFT { + access(all) fun mintTestToken(playID: UInt32): @NFT { pre { self.retired[playID] != nil: "Cannot mint the testToken: This play doesn't exist" !self.retired[playID]!: "Cannot mint the testToken from this play: This play has been retired" @@ -922,7 +1145,7 @@ func deployBatchNFT(b *testing.B, be TestBenchBlockExecutor, owner *TestBenchAcc return <-newTestToken } - pub fun batchMintTestToken(playID: UInt32, quantity: UInt64): @Collection { + access(all) fun batchMintTestToken(playID: UInt32, quantity: UInt64): @Collection { let newCollection <- create Collection() var i: UInt64 = 0 @@ -935,10 +1158,10 @@ func deployBatchNFT(b *testing.B, be TestBenchBlockExecutor, owner *TestBenchAcc } } - pub struct TestTokenData { - pub let setID: UInt32 - pub let playID: UInt32 - pub let serialNumber: UInt32 + access(all) struct TestTokenData { + access(all) let setID: UInt32 + access(all) let playID: UInt32 + access(all) let serialNumber: UInt32 init(setID: UInt32, playID: UInt32, serialNumber: UInt32) { self.setID = setID @@ -948,9 +1171,9 @@ func deployBatchNFT(b *testing.B, be TestBenchBlockExecutor, owner *TestBenchAcc } - pub resource NFT: NonFungibleToken.INFT { - pub let id: UInt64 - pub let data: TestTokenData + access(all) resource NFT: NonFungibleToken.INFT { + access(all) let id: UInt64 + access(all) let data: TestTokenData init(serialNumber: UInt32, playID: UInt32, setID: UInt32) { BatchNFT.totalSupply = BatchNFT.totalSupply + UInt64(1) @@ -961,14 +1184,10 @@ func deployBatchNFT(b *testing.B, be TestBenchBlockExecutor, owner *TestBenchAcc emit TestTokenMinted(testTokenID: self.id, playID: playID, setID: self.data.setID, serialNumber: self.data.serialNumber) } - - destroy() { - emit TestTokenDestroyed(id: self.id) - } } - pub resource Admin { - pub fun createPlay(metadata: {String: String}): UInt32 { + access(all) resource Admin { + access(all) fun createPlay(metadata: {String: String}): UInt32 { var newPlay = Play(metadata: metadata) let newID = newPlay.playID @@ -977,20 +1196,20 @@ func deployBatchNFT(b *testing.B, be TestBenchBlockExecutor, owner *TestBenchAcc return newID } - pub fun createSet(name: String) { + access(all) fun createSet(name: String) { var newSet <- create Set(name: name) BatchNFT.sets[newSet.setID] <-! newSet } - pub fun borrowSet(setID: UInt32): &Set { + access(all) fun borrowSet(setID: UInt32): &Set { pre { BatchNFT.sets[setID] != nil: "Cannot borrow Set: The Set doesn't exist" } return (&BatchNFT.sets[setID] as &Set?)! } - pub fun startNewSeries(): UInt32 { + access(all) fun startNewSeries(): UInt32 { BatchNFT.currentSeries = BatchNFT.currentSeries + UInt32(1) emit NewSeriesStarted(newCurrentSeries: BatchNFT.currentSeries) @@ -998,17 +1217,17 @@ func deployBatchNFT(b *testing.B, be TestBenchBlockExecutor, owner *TestBenchAcc return BatchNFT.currentSeries } - pub fun createNewAdmin(): @Admin { + access(all) fun createNewAdmin(): @Admin { return <-create Admin() } } - pub resource interface TestTokenCollectionPublic { - pub fun deposit(token: @NonFungibleToken.NFT) - pub fun batchDeposit(tokens: @NonFungibleToken.Collection) - pub fun getIDs(): [UInt64] - pub fun borrowNFT(id: UInt64): &NonFungibleToken.NFT - pub fun borrowTestToken(id: UInt64): &BatchNFT.NFT? { + access(all) resource interface TestTokenCollectionPublic { + access(all) fun deposit(token: @NonFungibleToken.NFT) + access(all) fun batchDeposit(tokens: @NonFungibleToken.Collection) + access(all) fun getIDs(): [UInt64] + access(all) fun borrowNFT(id: UInt64): &NonFungibleToken.NFT + access(all) fun borrowTestToken(id: UInt64): &BatchNFT.NFT? { post { (result == nil) || (result?.id == id): "Cannot borrow TestToken reference: The ID of the returned reference is incorrect" @@ -1016,14 +1235,14 @@ func deployBatchNFT(b *testing.B, be TestBenchBlockExecutor, owner *TestBenchAcc } } - pub resource Collection: TestTokenCollectionPublic, NonFungibleToken.Provider, NonFungibleToken.Receiver, NonFungibleToken.CollectionPublic { - pub var ownedNFTs: @{UInt64: NonFungibleToken.NFT} + access(all) resource Collection: TestTokenCollectionPublic, NonFungibleToken.Provider, NonFungibleToken.Receiver, NonFungibleToken.CollectionPublic { + access(all) var ownedNFTs: @{UInt64: NonFungibleToken.NFT} init() { self.ownedNFTs <- {} } - pub fun withdraw(withdrawID: UInt64): @NonFungibleToken.NFT { + access(all) fun withdraw(withdrawID: UInt64): @NonFungibleToken.NFT { let token <- self.ownedNFTs.remove(key: withdrawID) ?? panic("Cannot withdraw: TestToken does not exist in the collection") @@ -1032,7 +1251,7 @@ func deployBatchNFT(b *testing.B, be TestBenchBlockExecutor, owner *TestBenchAcc return <-token } - pub fun batchWithdraw(ids: [UInt64]): @NonFungibleToken.Collection { + access(all) fun batchWithdraw(ids: [UInt64]): @NonFungibleToken.Collection { var batchCollection <- create Collection() for id in ids { @@ -1041,7 +1260,7 @@ func deployBatchNFT(b *testing.B, be TestBenchBlockExecutor, owner *TestBenchAcc return <-batchCollection } - pub fun deposit(token: @NonFungibleToken.NFT) { + access(all) fun deposit(token: @NonFungibleToken.NFT) { let token <- token as! @BatchNFT.NFT let id = token.id @@ -1054,7 +1273,7 @@ func deployBatchNFT(b *testing.B, be TestBenchBlockExecutor, owner *TestBenchAcc destroy oldToken } - pub fun batchDeposit(tokens: @NonFungibleToken.Collection) { + access(all) fun batchDeposit(tokens: @NonFungibleToken.Collection) { let keys = tokens.getIDs() for key in keys { @@ -1063,15 +1282,15 @@ func deployBatchNFT(b *testing.B, be TestBenchBlockExecutor, owner *TestBenchAcc destroy tokens } - pub fun getIDs(): [UInt64] { + access(all) fun getIDs(): [UInt64] { return self.ownedNFTs.keys } - pub fun borrowNFT(id: UInt64): &NonFungibleToken.NFT { + access(all) fun borrowNFT(id: UInt64): &NonFungibleToken.NFT { return (&self.ownedNFTs[id] as &NonFungibleToken.NFT?)! } - pub fun borrowTestToken(id: UInt64): &BatchNFT.NFT? { + access(all) fun borrowTestToken(id: UInt64): &BatchNFT.NFT? { if self.ownedNFTs[id] != nil { let ref = (&self.ownedNFTs[id] as auth &NonFungibleToken.NFT?)! return ref as! &BatchNFT.NFT @@ -1079,24 +1298,21 @@ func deployBatchNFT(b *testing.B, be TestBenchBlockExecutor, owner *TestBenchAcc return nil } } - destroy() { - destroy self.ownedNFTs - } } - pub fun createEmptyCollection(): @NonFungibleToken.Collection { + access(all) fun createEmptyCollection(): @NonFungibleToken.Collection { return <-create BatchNFT.Collection() } - pub fun getAllPlays(): [BatchNFT.Play] { + access(all) fun getAllPlays(): [BatchNFT.Play] { return BatchNFT.playDatas.values } - pub fun getPlayMetaData(playID: UInt32): {String: String}? { + access(all) fun getPlayMetaData(playID: UInt32): {String: String}? { return self.playDatas[playID]?.metadata } - pub fun getPlayMetaDataByField(playID: UInt32, field: String): String? { + access(all) fun getPlayMetaDataByField(playID: UInt32, field: String): String? { if let play = BatchNFT.playDatas[playID] { return play.metadata[field] } else { @@ -1104,15 +1320,15 @@ func deployBatchNFT(b *testing.B, be TestBenchBlockExecutor, owner *TestBenchAcc } } - pub fun getSetName(setID: UInt32): String? { + access(all) fun getSetName(setID: UInt32): String? { return BatchNFT.setDatas[setID]?.name } - pub fun getSetSeries(setID: UInt32): UInt32? { + access(all) fun getSetSeries(setID: UInt32): UInt32? { return BatchNFT.setDatas[setID]?.series } - pub fun getSetIDsByName(setName: String): [UInt32]? { + access(all) fun getSetIDsByName(setName: String): [UInt32]? { var setIDs: [UInt32] = [] for setData in BatchNFT.setDatas.values { @@ -1128,11 +1344,11 @@ func deployBatchNFT(b *testing.B, be TestBenchBlockExecutor, owner *TestBenchAcc } } - pub fun getPlaysInSet(setID: UInt32): [UInt32]? { + access(all) fun getPlaysInSet(setID: UInt32): [UInt32]? { return BatchNFT.sets[setID]?.plays } - pub fun isEditionRetired(setID: UInt32, playID: UInt32): Bool? { + access(all) fun isEditionRetired(setID: UInt32, playID: UInt32): Bool? { if let setToRead <- BatchNFT.sets.remove(key: setID) { let retired = setToRead.retired[playID] BatchNFT.sets[setID] <-! setToRead @@ -1142,11 +1358,11 @@ func deployBatchNFT(b *testing.B, be TestBenchBlockExecutor, owner *TestBenchAcc } } - pub fun isSetLocked(setID: UInt32): Bool? { + access(all) fun isSetLocked(setID: UInt32): Bool? { return BatchNFT.sets[setID]?.locked } - pub fun getNumTestTokensInEdition(setID: UInt32, playID: UInt32): UInt32? { + access(all) fun getNumTestTokensInEdition(setID: UInt32, playID: UInt32): UInt32? { if let setToRead <- BatchNFT.sets.remove(key: setID) { let amount = setToRead.numberMintedPerPlay[playID] BatchNFT.sets[setID] <-! setToRead @@ -1165,9 +1381,12 @@ func deployBatchNFT(b *testing.B, be TestBenchBlockExecutor, owner *TestBenchAcc self.nextSetID = 1 self.totalSupply = 0 - self.account.save<@Collection>(<- create Collection(), to: /storage/TestTokenCollection) - self.account.link<&{TestTokenCollectionPublic}>(/public/TestTokenCollection, target: /storage/TestTokenCollection) - self.account.save<@Admin>(<- create Admin(), to: /storage/BatchNFTAdmin) + self.account.storage.save<@Collection>(<- create Collection(), to: /storage/TestTokenCollection) + + let collectionCap = self.account.capabilities.storage.issue<&{TestTokenCollectionPublic}>(/storage/TestTokenCollection) + self.account.capabilities.publish(collectionCap, at: /public/TestTokenCollection) + + self.account.storage.save<@Admin>(<- create Admin(), to: /storage/BatchNFTAdmin) emit ContractInitialized() } } @@ -1178,44 +1397,44 @@ func deployBatchNFT(b *testing.B, be TestBenchBlockExecutor, owner *TestBenchAcc func deployNFT(b *testing.B, be TestBenchBlockExecutor, owner *TestBenchAccount) { const nftContract = ` - pub contract interface NonFungibleToken { - pub var totalSupply: UInt64 - pub event ContractInitialized() - pub event Withdraw(id: UInt64, from: Address?) - pub event Deposit(id: UInt64, to: Address?) - pub resource interface INFT { - pub let id: UInt64 + access(all) contract interface NonFungibleToken { + access(all) var totalSupply: UInt64 + access(all) event ContractInitialized() + access(all) event Withdraw(id: UInt64, from: Address?) + access(all) event Deposit(id: UInt64, to: Address?) + access(all) resource interface INFT { + access(all) let id: UInt64 } - pub resource NFT: INFT { - pub let id: UInt64 + access(all) resource NFT: INFT { + access(all) let id: UInt64 } - pub resource interface Provider { - pub fun withdraw(withdrawID: UInt64): @NFT { + access(all) resource interface Provider { + access(all) fun withdraw(withdrawID: UInt64): @NFT { post { result.id == withdrawID: "The ID of the withdrawn token must be the same as the requested ID" } } } - pub resource interface Receiver { - pub fun deposit(token: @NFT) + access(all) resource interface Receiver { + access(all) fun deposit(token: @NFT) } - pub resource interface CollectionPublic { - pub fun deposit(token: @NFT) - pub fun getIDs(): [UInt64] - pub fun borrowNFT(id: UInt64): &NFT + access(all) resource interface CollectionPublic { + access(all) fun deposit(token: @NFT) + access(all) fun getIDs(): [UInt64] + access(all) fun borrowNFT(id: UInt64): &NFT } - pub resource Collection: Provider, Receiver, CollectionPublic { - pub var ownedNFTs: @{UInt64: NFT} - pub fun withdraw(withdrawID: UInt64): @NFT - pub fun deposit(token: @NFT) - pub fun getIDs(): [UInt64] - pub fun borrowNFT(id: UInt64): &NFT { + access(all) resource Collection: Provider, Receiver, CollectionPublic { + access(all) var ownedNFTs: @{UInt64: NFT} + access(all) fun withdraw(withdrawID: UInt64): @NFT + access(all) fun deposit(token: @NFT) + access(all) fun getIDs(): [UInt64] + access(all) fun borrowNFT(id: UInt64): &NFT { pre { self.ownedNFTs[id] != nil: "NFT does not exist in the collection!" } } } - pub fun createEmptyCollection(): @Collection { + access(all) fun createEmptyCollection(): @Collection { post { result.getIDs().length == 0: "The created collection must be empty!" } diff --git a/fvm/fvm_blockcontext_test.go b/fvm/fvm_blockcontext_test.go index bb94ad2abb9..4974acc22b9 100644 --- a/fvm/fvm_blockcontext_test.go +++ b/fvm/fvm_blockcontext_test.go @@ -10,26 +10,31 @@ import ( "testing" "github.com/onflow/cadence" + "github.com/onflow/cadence/encoding/ccf" jsoncdc "github.com/onflow/cadence/encoding/json" "github.com/onflow/cadence/runtime" + "github.com/onflow/cadence/stdlib" + "github.com/onflow/crypto" + "github.com/onflow/crypto/hash" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" - "github.com/onflow/flow-go/crypto" - "github.com/onflow/flow-go/crypto/hash" "github.com/onflow/flow-go/engine/execution/testutil" "github.com/onflow/flow-go/fvm" "github.com/onflow/flow-go/fvm/blueprints" envMock "github.com/onflow/flow-go/fvm/environment/mock" - errors "github.com/onflow/flow-go/fvm/errors" + "github.com/onflow/flow-go/fvm/errors" "github.com/onflow/flow-go/fvm/storage/snapshot" + "github.com/onflow/flow-go/fvm/systemcontracts" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/utils/unittest" ) -func transferTokensTx(chain flow.Chain) *flow.TransactionBody { - return flow.NewTransactionBody(). - SetScript([]byte(fmt.Sprintf(` +func transferTokensTx(chain flow.Chain) *flow.TransactionBodyBuilder { + sc := systemcontracts.SystemContractsForChain(chain.ChainID()) + return flow.NewTransactionBodyBuilder(). + SetScript([]byte(fmt.Sprintf( + ` // This transaction is a template for a transaction that // could be used by anyone to send tokens to another account // that has been set up to receive tokens. @@ -43,12 +48,12 @@ func transferTokensTx(chain flow.Chain) *flow.TransactionBody { transaction(amount: UFix64, to: Address) { // The Vault resource that holds the tokens that are being transferred - let sentVault: @FungibleToken.Vault + let sentVault: @{FungibleToken.Vault} - prepare(signer: AuthAccount) { + prepare(signer: auth(BorrowValue) &Account) { // Get a reference to the signer's stored vault - let vaultRef = signer.borrow<&FlowToken.Vault>(from: /storage/flowTokenVault) + let vaultRef = signer.storage.borrow<auth(FungibleToken.Withdraw) &FlowToken.Vault>(from: /storage/flowTokenVault) ?? panic("Could not borrow reference to the owner's Vault!") // Withdraw tokens from the signer's stored vault @@ -61,14 +66,16 @@ func transferTokensTx(chain flow.Chain) *flow.TransactionBody { let recipient = getAccount(to) // Get a reference to the recipient's Receiver - let receiverRef = recipient.getCapability(/public/flowTokenReceiver) - .borrow<&{FungibleToken.Receiver}>() + let receiverRef = recipient.capabilities.borrow<&{FungibleToken.Receiver}>(/public/flowTokenReceiver) ?? panic("Could not borrow receiver reference to the recipient's Vault") // Deposit the withdrawn tokens in the recipient's receiver receiverRef.deposit(from: <-self.sentVault) } - }`, fvm.FungibleTokenAddress(chain), fvm.FlowTokenAddress(chain))), + }`, + sc.FungibleToken.Address.Hex(), + sc.FlowToken.Address.Hex(), + )), ) } @@ -96,15 +103,18 @@ func TestBlockContext_ExecuteTransaction(t *testing.T) { ) t.Run("Success", func(t *testing.T) { - txBody := flow.NewTransactionBody(). + txBodyBuilder := flow.NewTransactionBodyBuilder(). SetScript([]byte(` transaction { - prepare(signer: AuthAccount) {} + prepare(signer: &Account) {} } `)). AddAuthorizer(unittest.AddressFixture()) - err := testutil.SignTransactionAsServiceAccount(txBody, 0, chain) + err := testutil.SignTransactionAsServiceAccount(txBodyBuilder, 0, chain) + require.NoError(t, err) + + txBody, err := txBodyBuilder.Build() require.NoError(t, err) _, output, err := vm.Run( @@ -116,12 +126,12 @@ func TestBlockContext_ExecuteTransaction(t *testing.T) { }) t.Run("Failure", func(t *testing.T) { - txBody := flow.NewTransactionBody(). + txBodyBuilder := flow.NewTransactionBodyBuilder(). SetScript([]byte(` transaction { var x: Int - prepare(signer: AuthAccount) { + prepare(signer: &Account) { self.x = 0 } @@ -135,7 +145,10 @@ func TestBlockContext_ExecuteTransaction(t *testing.T) { } `)) - err := testutil.SignTransactionAsServiceAccount(txBody, 0, chain) + err := testutil.SignTransactionAsServiceAccount(txBodyBuilder, 0, chain) + require.NoError(t, err) + + txBody, err := txBodyBuilder.Build() require.NoError(t, err) _, output, err := vm.Run( @@ -147,7 +160,7 @@ func TestBlockContext_ExecuteTransaction(t *testing.T) { }) t.Run("Logs", func(t *testing.T) { - txBody := flow.NewTransactionBody(). + txBodyBuilder := flow.NewTransactionBodyBuilder(). SetScript([]byte(` transaction { execute { @@ -157,7 +170,10 @@ func TestBlockContext_ExecuteTransaction(t *testing.T) { } `)) - err := testutil.SignTransactionAsServiceAccount(txBody, 0, chain) + err := testutil.SignTransactionAsServiceAccount(txBodyBuilder, 0, chain) + require.NoError(t, err) + + txBody, err := txBodyBuilder.Build() require.NoError(t, err) _, output, err := vm.Run( @@ -171,17 +187,20 @@ func TestBlockContext_ExecuteTransaction(t *testing.T) { }) t.Run("Events", func(t *testing.T) { - txBody := flow.NewTransactionBody(). + txBodyBuilder := flow.NewTransactionBodyBuilder(). SetScript([]byte(` transaction { - prepare(signer: AuthAccount) { - AuthAccount(payer: signer) + prepare(signer: auth(BorrowValue) &Account) { + Account(payer: signer) } } `)). AddAuthorizer(chain.ServiceAddress()) - err := testutil.SignTransactionAsServiceAccount(txBody, 0, chain) + err := testutil.SignTransactionAsServiceAccount(txBodyBuilder, 0, chain) + require.NoError(t, err) + + txBody, err := txBodyBuilder.Build() require.NoError(t, err) _, output, err := vm.Run( @@ -221,20 +240,22 @@ func TestBlockContext_DeployContract(t *testing.T) { chain) require.NoError(t, err) - txBody := testutil.DeployCounterContractTransaction(accounts[0], chain) + txBodyBuilder := testutil.DeployCounterContractTransaction(accounts[0], chain). + SetProposalKey(chain.ServiceAddress(), 0, 0). + SetPayer(chain.ServiceAddress()) - txBody.SetProposalKey(chain.ServiceAddress(), 0, 0) - txBody.SetPayer(chain.ServiceAddress()) - - err = testutil.SignPayload(txBody, accounts[0], privateKeys[0]) + err = testutil.SignPayload(txBodyBuilder, accounts[0], privateKeys[0]) require.NoError(t, err) err = testutil.SignEnvelope( - txBody, + txBodyBuilder, chain.ServiceAddress(), unittest.ServiceAccountPrivateKey) require.NoError(t, err) + txBody, err := txBodyBuilder.Build() + require.NoError(t, err) + _, output, err := vm.Run( ctx, fvm.Transaction(txBody, 0), @@ -258,20 +279,22 @@ func TestBlockContext_DeployContract(t *testing.T) { chain) require.NoError(t, err) - txBody := testutil.DeployCounterContractTransaction(accounts[0], chain) - - txBody.SetProposalKey(chain.ServiceAddress(), 0, 0) - txBody.SetPayer(chain.ServiceAddress()) + txBodyBuilder := testutil.DeployCounterContractTransaction(accounts[0], chain). + SetProposalKey(chain.ServiceAddress(), 0, 0). + SetPayer(chain.ServiceAddress()) - err = testutil.SignPayload(txBody, accounts[0], privateKeys[0]) + err = testutil.SignPayload(txBodyBuilder, accounts[0], privateKeys[0]) require.NoError(t, err) err = testutil.SignEnvelope( - txBody, + txBodyBuilder, chain.ServiceAddress(), unittest.ServiceAccountPrivateKey) require.NoError(t, err) + txBody, err := txBodyBuilder.Build() + require.NoError(t, err) + executionSnapshot, output, err := vm.Run( ctx, fvm.Transaction(txBody, 0), @@ -282,12 +305,16 @@ func TestBlockContext_DeployContract(t *testing.T) { snapshotTree = snapshotTree.Append(executionSnapshot) // transaction will panic if `contracts.names` is incorrect - txBody = flow.NewTransactionBody(). + txBodyBuilder = flow.NewTransactionBodyBuilder(). SetScript([]byte(` transaction { - prepare(signer: AuthAccount) { + prepare(signer: &Account) { var s : String = "" - for name in signer.contracts.names { + let names = signer.contracts.names + var i = 0 + while i < names.length { + let name = names[i] + i = i + 1 s = s.concat(name).concat(",") } if s != "Container," { @@ -296,20 +323,22 @@ func TestBlockContext_DeployContract(t *testing.T) { } } `)). - AddAuthorizer(accounts[0]) + AddAuthorizer(accounts[0]). + SetProposalKey(chain.ServiceAddress(), 0, 1). + SetPayer(chain.ServiceAddress()) - txBody.SetProposalKey(chain.ServiceAddress(), 0, 1) - txBody.SetPayer(chain.ServiceAddress()) - - err = testutil.SignPayload(txBody, accounts[0], privateKeys[0]) + err = testutil.SignPayload(txBodyBuilder, accounts[0], privateKeys[0]) require.NoError(t, err) err = testutil.SignEnvelope( - txBody, + txBodyBuilder, chain.ServiceAddress(), unittest.ServiceAccountPrivateKey) require.NoError(t, err) + txBody, err = txBodyBuilder.Build() + require.NoError(t, err) + _, output, err = vm.Run( ctx, fvm.Transaction(txBody, 0), @@ -332,22 +361,22 @@ func TestBlockContext_DeployContract(t *testing.T) { chain) require.NoError(t, err) - txBody := testutil.DeployLocalReplayLimitedTransaction( - accounts[0], - chain) + txBodyBuilder := testutil.DeployLocalReplayLimitedTransaction(accounts[0], chain). + SetProposalKey(chain.ServiceAddress(), 0, 0). + SetPayer(chain.ServiceAddress()) - txBody.SetProposalKey(chain.ServiceAddress(), 0, 0) - txBody.SetPayer(chain.ServiceAddress()) - - err = testutil.SignPayload(txBody, accounts[0], privateKeys[0]) + err = testutil.SignPayload(txBodyBuilder, accounts[0], privateKeys[0]) require.NoError(t, err) err = testutil.SignEnvelope( - txBody, + txBodyBuilder, chain.ServiceAddress(), unittest.ServiceAccountPrivateKey) require.NoError(t, err) + txBody, err := txBodyBuilder.Build() + require.NoError(t, err) + _, output, err := vm.Run( ctx, fvm.Transaction(txBody, 0), @@ -376,22 +405,24 @@ func TestBlockContext_DeployContract(t *testing.T) { chain) require.NoError(t, err) - txBody := testutil.DeployGlobalReplayLimitedTransaction( + txBodyBuilder := testutil.DeployGlobalReplayLimitedTransaction( accounts[0], - chain) + chain). + SetProposalKey(chain.ServiceAddress(), 0, 0). + SetPayer(chain.ServiceAddress()) - txBody.SetProposalKey(chain.ServiceAddress(), 0, 0) - txBody.SetPayer(chain.ServiceAddress()) - - err = testutil.SignPayload(txBody, accounts[0], privateKeys[0]) + err = testutil.SignPayload(txBodyBuilder, accounts[0], privateKeys[0]) require.NoError(t, err) err = testutil.SignEnvelope( - txBody, + txBodyBuilder, chain.ServiceAddress(), unittest.ServiceAccountPrivateKey) require.NoError(t, err) + txBody, err := txBodyBuilder.Build() + require.NoError(t, err) + _, output, err := vm.Run( ctx, fvm.Transaction(txBody, 0), @@ -420,10 +451,13 @@ func TestBlockContext_DeployContract(t *testing.T) { chain) require.NoError(t, err) - txBody := testutil.DeployUnauthorizedCounterContractTransaction( + txBodyBuilder := testutil.DeployUnauthorizedCounterContractTransaction( accounts[0]) - err = testutil.SignTransaction(txBody, accounts[0], privateKeys[0], 0) + err = testutil.SignTransaction(txBodyBuilder, accounts[0], privateKeys[0], 0) + require.NoError(t, err) + + txBody, err := txBodyBuilder.Build() require.NoError(t, err) _, output, err := vm.Run( @@ -465,12 +499,14 @@ func TestBlockContext_DeployContract(t *testing.T) { chain) require.NoError(t, err) - txBody := testutil.DeployUnauthorizedCounterContractTransaction( - accounts[0]) - txBody.SetProposalKey(accounts[0], 0, 0) - txBody.SetPayer(accounts[0]) + txBodyBuilder := testutil.DeployUnauthorizedCounterContractTransaction(accounts[0]). + SetProposalKey(accounts[0], 0, 0). + SetPayer(accounts[0]) - err = testutil.SignEnvelope(txBody, accounts[0], privateKeys[0]) + err = testutil.SignEnvelope(txBodyBuilder, accounts[0], privateKeys[0]) + require.NoError(t, err) + + txBody, err := txBodyBuilder.Build() require.NoError(t, err) _, output, err := vm.Run( @@ -507,12 +543,13 @@ func TestBlockContext_DeployContract(t *testing.T) { chain) require.NoError(t, err) - txBody := testutil.DeployUnauthorizedCounterContractTransaction( - accounts[0]) - txBody.SetProposalKey(accounts[0], 0, 0) - txBody.SetPayer(accounts[0]) + txBodyBuilder := testutil.DeployUnauthorizedCounterContractTransaction(accounts[0]). + SetProposalKey(accounts[0], 0, 0). + SetPayer(accounts[0]) + err = testutil.SignEnvelope(txBodyBuilder, accounts[0], privateKeys[0]) + require.NoError(t, err) - err = testutil.SignEnvelope(txBody, accounts[0], privateKeys[0]) + txBody, err := txBodyBuilder.Build() require.NoError(t, err) _, output, err := vm.Run( @@ -537,19 +574,22 @@ func TestBlockContext_DeployContract(t *testing.T) { chain) require.NoError(t, err) - txBody := testutil.DeployCounterContractTransaction(accounts[0], chain) - txBody.SetProposalKey(chain.ServiceAddress(), 0, 0) - txBody.SetPayer(chain.ServiceAddress()) + txBodyBuilder := testutil.DeployCounterContractTransaction(accounts[0], chain). + SetProposalKey(chain.ServiceAddress(), 0, 0). + SetPayer(chain.ServiceAddress()) - err = testutil.SignPayload(txBody, accounts[0], privateKeys[0]) + err = testutil.SignPayload(txBodyBuilder, accounts[0], privateKeys[0]) require.NoError(t, err) err = testutil.SignEnvelope( - txBody, + txBodyBuilder, chain.ServiceAddress(), unittest.ServiceAccountPrivateKey) require.NoError(t, err) + txBody, err := txBodyBuilder.Build() + require.NoError(t, err) + executionSnapshot, output, err := vm.Run( ctx, fvm.Transaction(txBody, 0), @@ -559,12 +599,13 @@ func TestBlockContext_DeployContract(t *testing.T) { snapshotTree = snapshotTree.Append(executionSnapshot) - txBody = testutil.UpdateUnauthorizedCounterContractTransaction( - accounts[0]) - txBody.SetProposalKey(accounts[0], 0, 0) - txBody.SetPayer(accounts[0]) + txBodyBuilder = testutil.UpdateUnauthorizedCounterContractTransaction(accounts[0]). + SetProposalKey(accounts[0], 0, 0). + SetPayer(accounts[0]) + err = testutil.SignEnvelope(txBodyBuilder, accounts[0], privateKeys[0]) + require.NoError(t, err) - err = testutil.SignEnvelope(txBody, accounts[0], privateKeys[0]) + txBody, err = txBodyBuilder.Build() require.NoError(t, err) _, output, err = vm.Run( @@ -590,19 +631,22 @@ func TestBlockContext_DeployContract(t *testing.T) { chain) require.NoError(t, err) - txBody := testutil.DeployCounterContractTransaction(accounts[0], chain) - txBody.SetProposalKey(chain.ServiceAddress(), 0, 0) - txBody.SetPayer(chain.ServiceAddress()) + txBodyBuilder := testutil.DeployCounterContractTransaction(accounts[0], chain). + SetProposalKey(chain.ServiceAddress(), 0, 0). + SetPayer(chain.ServiceAddress()) - err = testutil.SignPayload(txBody, accounts[0], privateKeys[0]) + err = testutil.SignPayload(txBodyBuilder, accounts[0], privateKeys[0]) require.NoError(t, err) err = testutil.SignEnvelope( - txBody, + txBodyBuilder, chain.ServiceAddress(), unittest.ServiceAccountPrivateKey) require.NoError(t, err) + txBody, err := txBodyBuilder.Build() + require.NoError(t, err) + executionSnapshot, output, err := vm.Run( ctx, fvm.Transaction(txBody, 0), @@ -612,12 +656,14 @@ func TestBlockContext_DeployContract(t *testing.T) { snapshotTree = snapshotTree.Append(executionSnapshot) - txBody = testutil.RemoveUnauthorizedCounterContractTransaction( - accounts[0]) - txBody.SetProposalKey(accounts[0], 0, 0) - txBody.SetPayer(accounts[0]) + txBodyBuilder = testutil.RemoveUnauthorizedCounterContractTransaction(accounts[0]). + SetProposalKey(accounts[0], 0, 0). + SetPayer(accounts[0]) + + err = testutil.SignEnvelope(txBodyBuilder, accounts[0], privateKeys[0]) + require.NoError(t, err) - err = testutil.SignEnvelope(txBody, accounts[0], privateKeys[0]) + txBody, err = txBodyBuilder.Build() require.NoError(t, err) _, output, err = vm.Run( @@ -648,19 +694,22 @@ func TestBlockContext_DeployContract(t *testing.T) { chain) require.NoError(t, err) - txBody := testutil.DeployCounterContractTransaction(accounts[0], chain) - txBody.SetProposalKey(chain.ServiceAddress(), 0, 0) - txBody.SetPayer(chain.ServiceAddress()) + txBodyBuilder := testutil.DeployCounterContractTransaction(accounts[0], chain). + SetProposalKey(chain.ServiceAddress(), 0, 0). + SetPayer(chain.ServiceAddress()) - err = testutil.SignPayload(txBody, accounts[0], privateKeys[0]) + err = testutil.SignPayload(txBodyBuilder, accounts[0], privateKeys[0]) require.NoError(t, err) err = testutil.SignEnvelope( - txBody, + txBodyBuilder, chain.ServiceAddress(), unittest.ServiceAccountPrivateKey) require.NoError(t, err) + txBody, err := txBodyBuilder.Build() + require.NoError(t, err) + executionSnapshot, output, err := vm.Run( ctx, fvm.Transaction(txBody, 0), @@ -670,19 +719,22 @@ func TestBlockContext_DeployContract(t *testing.T) { snapshotTree = snapshotTree.Append(executionSnapshot) - txBody = testutil.RemoveCounterContractTransaction(accounts[0], chain) - txBody.SetProposalKey(accounts[0], 0, 0) - txBody.SetPayer(chain.ServiceAddress()) + txBodyBuilder = testutil.RemoveCounterContractTransaction(accounts[0], chain). + SetProposalKey(accounts[0], 0, 0). + SetPayer(chain.ServiceAddress()) - err = testutil.SignPayload(txBody, accounts[0], privateKeys[0]) + err = testutil.SignPayload(txBodyBuilder, accounts[0], privateKeys[0]) require.NoError(t, err) err = testutil.SignEnvelope( - txBody, + txBodyBuilder, chain.ServiceAddress(), unittest.ServiceAccountPrivateKey) require.NoError(t, err) + txBody, err = txBodyBuilder.Build() + require.NoError(t, err) + _, output, err = vm.Run( ctx, fvm.Transaction(txBody, 0), @@ -706,19 +758,22 @@ func TestBlockContext_DeployContract(t *testing.T) { require.NoError(t, err) // setup a new authorizer account - authTxBody, err := blueprints.SetContractDeploymentAuthorizersTransaction( + authTxBodyBuilder, err := blueprints.SetContractDeploymentAuthorizersTransaction( chain.ServiceAddress(), []flow.Address{chain.ServiceAddress(), accounts[0]}) require.NoError(t, err) - authTxBody.SetProposalKey(chain.ServiceAddress(), 0, 0) - authTxBody.SetPayer(chain.ServiceAddress()) + authTxBodyBuilder.SetProposalKey(chain.ServiceAddress(), 0, 0). + SetPayer(chain.ServiceAddress()) err = testutil.SignEnvelope( - authTxBody, + authTxBodyBuilder, chain.ServiceAddress(), unittest.ServiceAccountPrivateKey) require.NoError(t, err) + authTxBody, err := authTxBodyBuilder.Build() + require.NoError(t, err) + executionSnapshot, output, err := vm.Run( ctx, fvm.Transaction(authTxBody, 0), @@ -729,11 +784,14 @@ func TestBlockContext_DeployContract(t *testing.T) { snapshotTree = snapshotTree.Append(executionSnapshot) // test deploying a new contract (not authorized by service account) - txBody := testutil.DeployUnauthorizedCounterContractTransaction(accounts[0]) - txBody.SetProposalKey(accounts[0], 0, 0) - txBody.SetPayer(accounts[0]) + txBodyBuilder := testutil.DeployUnauthorizedCounterContractTransaction(accounts[0]). + SetProposalKey(accounts[0], 0, 0). + SetPayer(accounts[0]) + + err = testutil.SignEnvelope(txBodyBuilder, accounts[0], privateKeys[0]) + require.NoError(t, err) - err = testutil.SignEnvelope(txBody, accounts[0], privateKeys[0]) + txBody, err := txBodyBuilder.Build() require.NoError(t, err) _, output, err = vm.Run( @@ -801,7 +859,7 @@ func TestBlockContext_ExecuteTransaction_WithArguments(t *testing.T) { label: "Parameters and authorizer", script: ` transaction(x: Int, y: String) { - prepare(acct: AuthAccount) { log(acct.address) } + prepare(acct: &Account) { log(acct.address) } execute { log(x); log(y) } }`, args: [][]byte{arg1, arg2}, @@ -818,15 +876,17 @@ func TestBlockContext_ExecuteTransaction_WithArguments(t *testing.T) { for _, tt := range tests { t.Run(tt.label, func(t *testing.T) { - txBody := flow.NewTransactionBody(). + txBodyBuilder := flow.NewTransactionBodyBuilder(). SetScript([]byte(tt.script)). SetArguments(tt.args) for _, authorizer := range tt.authorizers { - txBody.AddAuthorizer(authorizer) + txBodyBuilder.AddAuthorizer(authorizer) } - err := testutil.SignTransactionAsServiceAccount(txBody, 0, chain) + err := testutil.SignTransactionAsServiceAccount(txBodyBuilder, 0, chain) + require.NoError(t, err) + txBody, err := txBodyBuilder.Build() require.NoError(t, err) _, output, err := vm.Run( @@ -841,7 +901,7 @@ func TestBlockContext_ExecuteTransaction_WithArguments(t *testing.T) { } func gasLimitScript(depth int) string { return fmt.Sprintf(` - pub fun foo(_ i: Int) { + access(all) fun foo(_ i: Int) { if i <= 0 { return } @@ -900,11 +960,13 @@ func TestBlockContext_ExecuteTransaction_GasLimit(t *testing.T) { for _, tt := range tests { t.Run(tt.label, func(t *testing.T) { - txBody := flow.NewTransactionBody(). + txBodyBuilder := flow.NewTransactionBodyBuilder(). SetScript([]byte(tt.script)). - SetGasLimit(tt.gasLimit) + SetComputeLimit(tt.gasLimit) - err := testutil.SignTransactionAsServiceAccount(txBody, 0, chain) + err := testutil.SignTransactionAsServiceAccount(txBodyBuilder, 0, chain) + require.NoError(t, err) + txBody, err := txBodyBuilder.Build() require.NoError(t, err) _, output, err := vm.Run( @@ -930,7 +992,7 @@ func TestBlockContext_ExecuteTransaction_StorageLimit(t *testing.T) { script := fmt.Sprintf(` access(all) contract Container { access(all) resource Counter { - pub var longString: String + access(all) var longString: String init() { self.longString = "%s" } @@ -962,24 +1024,26 @@ func TestBlockContext_ExecuteTransaction_StorageLimit(t *testing.T) { chain) require.NoError(t, err) - txBody := testutil.CreateContractDeploymentTransaction( + txBodyBuilder := testutil.CreateContractDeploymentTransaction( "Container", script, accounts[0], - chain) - - txBody.SetProposalKey(chain.ServiceAddress(), 0, 0) - txBody.SetPayer(chain.ServiceAddress()) + chain). + SetProposalKey(chain.ServiceAddress(), 0, 0). + SetPayer(chain.ServiceAddress()) - err = testutil.SignPayload(txBody, accounts[0], privateKeys[0]) + err = testutil.SignPayload(txBodyBuilder, accounts[0], privateKeys[0]) require.NoError(t, err) err = testutil.SignEnvelope( - txBody, + txBodyBuilder, chain.ServiceAddress(), unittest.ServiceAccountPrivateKey) require.NoError(t, err) + txBody, err := txBodyBuilder.Build() + require.NoError(t, err) + _, output, err := vm.Run( ctx, fvm.Transaction(txBody, 0), @@ -1008,41 +1072,47 @@ func TestBlockContext_ExecuteTransaction_StorageLimit(t *testing.T) { chain) require.NoError(t, err) + sc := systemcontracts.SystemContractsForChain(chain.ChainID()) // deposit more flow to increase capacity - txBody := flow.NewTransactionBody(). - SetScript([]byte(fmt.Sprintf(` + txBodyBuilder := flow.NewTransactionBodyBuilder(). + SetScript([]byte(fmt.Sprintf( + ` import FungibleToken from %s import FlowToken from %s transaction { - prepare(signer: AuthAccount, service: AuthAccount) { + prepare(signer: auth(AddContract) &Account, service: auth(BorrowValue) &Account) { signer.contracts.add(name: "%s", code: "%s".decodeHex()) - let vaultRef = service.borrow<&FlowToken.Vault>(from: /storage/flowTokenVault)! + let vaultRef = service.storage.borrow<auth(FungibleToken.Withdraw) &FlowToken.Vault>(from: /storage/flowTokenVault)! // deposit additional flow let payment <- vaultRef.withdraw(amount: 10.0) as! @FlowToken.Vault - let receiver = signer.getCapability(/public/flowTokenReceiver)!.borrow<&{FungibleToken.Receiver}>() + let receiver = signer.capabilities.borrow<&{FungibleToken.Receiver}>(/public/flowTokenReceiver) ?? panic("Could not borrow receiver reference to the recipient's Vault") receiver.deposit(from: <-payment) } - }`, fvm.FungibleTokenAddress(chain).HexWithPrefix(), - fvm.FlowTokenAddress(chain).HexWithPrefix(), + }`, + sc.FungibleToken.Address.HexWithPrefix(), + sc.FlowToken.Address.HexWithPrefix(), "Container", - hex.EncodeToString([]byte(script))))). + hex.EncodeToString([]byte(script)), + ))). AddAuthorizer(accounts[0]). AddAuthorizer(chain.ServiceAddress()). SetProposalKey(chain.ServiceAddress(), 0, 0). SetPayer(chain.ServiceAddress()) - err = testutil.SignPayload(txBody, accounts[0], privateKeys[0]) + err = testutil.SignPayload(txBodyBuilder, accounts[0], privateKeys[0]) require.NoError(t, err) err = testutil.SignEnvelope( - txBody, + txBodyBuilder, chain.ServiceAddress(), unittest.ServiceAccountPrivateKey) require.NoError(t, err) + txBody, err := txBodyBuilder.Build() + require.NoError(t, err) _, output, err := vm.Run( ctx, @@ -1065,7 +1135,7 @@ func TestBlockContext_ExecuteTransaction_InteractionLimitReached(t *testing.T) { script := fmt.Sprintf(` access(all) contract Container { access(all) resource Counter { - pub var longString: String + access(all) var longString: String init() { self.longString = "%s" } @@ -1102,7 +1172,7 @@ func TestBlockContext_ExecuteTransaction_InteractionLimitReached(t *testing.T) { return uint64(sn) } // fund account so the payer can pay for the next transaction. - txBody := transferTokensTx(chain). + txBodyBuilder := transferTokensTx(chain). SetProposalKey(chain.ServiceAddress(), 0, seqNum()). AddAuthorizer(chain.ServiceAddress()). AddArgument( @@ -1112,10 +1182,12 @@ func TestBlockContext_ExecuteTransaction_InteractionLimitReached(t *testing.T) { SetPayer(chain.ServiceAddress()) err = testutil.SignEnvelope( - txBody, + txBodyBuilder, chain.ServiceAddress(), unittest.ServiceAccountPrivateKey) require.NoError(t, err) + txBody, err := txBodyBuilder.Build() + require.NoError(t, err) executionSnapshot, output, err := vm.Run( ctx, @@ -1128,22 +1200,23 @@ func TestBlockContext_ExecuteTransaction_InteractionLimitReached(t *testing.T) { ctx.MaxStateInteractionSize = 500_000 - txBody = testutil.CreateContractDeploymentTransaction( + txBodyBuilder = testutil.CreateContractDeploymentTransaction( "Container", script, accounts[0], - chain) - - txBody.SetProposalKey(chain.ServiceAddress(), 0, seqNum()) - txBody.SetPayer(accounts[0]) + chain). + SetProposalKey(chain.ServiceAddress(), 0, seqNum()). + SetPayer(accounts[0]) err = testutil.SignPayload( - txBody, + txBodyBuilder, chain.ServiceAddress(), unittest.ServiceAccountPrivateKey) require.NoError(t, err) - err = testutil.SignEnvelope(txBody, accounts[0], privateKeys[0]) + err = testutil.SignEnvelope(txBodyBuilder, accounts[0], privateKeys[0]) + require.NoError(t, err) + txBody, err = txBodyBuilder.Build() require.NoError(t, err) _, output, err = vm.Run( @@ -1176,23 +1249,24 @@ func TestBlockContext_ExecuteTransaction_InteractionLimitReached(t *testing.T) { chain) require.NoError(t, err) - txBody := testutil.CreateContractDeploymentTransaction( + txBodyBuilder := testutil.CreateContractDeploymentTransaction( "Container", script, accounts[0], - chain) - - txBody.SetProposalKey(chain.ServiceAddress(), 0, 0) - txBody.SetPayer(chain.ServiceAddress()) + chain). + SetProposalKey(chain.ServiceAddress(), 0, 0). + SetPayer(chain.ServiceAddress()) - err = testutil.SignPayload(txBody, accounts[0], privateKeys[0]) + err = testutil.SignPayload(txBodyBuilder, accounts[0], privateKeys[0]) require.NoError(t, err) err = testutil.SignEnvelope( - txBody, + txBodyBuilder, chain.ServiceAddress(), unittest.ServiceAccountPrivateKey) require.NoError(t, err) + txBody, err := txBodyBuilder.Build() + require.NoError(t, err) _, output, err := vm.Run( ctx, @@ -1224,21 +1298,23 @@ func TestBlockContext_ExecuteTransaction_InteractionLimitReached(t *testing.T) { chain) require.NoError(t, err) - _, txBody := testutil.CreateMultiAccountCreationTransaction( + _, txBodyBuilder := testutil.CreateMultiAccountCreationTransaction( t, chain, 40) - txBody.SetProposalKey(chain.ServiceAddress(), 0, 0) - txBody.SetPayer(accounts[0]) + txBodyBuilder.SetProposalKey(chain.ServiceAddress(), 0, 0). + SetPayer(accounts[0]) err = testutil.SignPayload( - txBody, + txBodyBuilder, chain.ServiceAddress(), unittest.ServiceAccountPrivateKey) require.NoError(t, err) - err = testutil.SignEnvelope(txBody, accounts[0], privateKeys[0]) + err = testutil.SignEnvelope(txBodyBuilder, accounts[0], privateKeys[0]) + require.NoError(t, err) + txBody, err := txBodyBuilder.Build() require.NoError(t, err) _, output, err := vm.Run( @@ -1254,8 +1330,8 @@ func TestBlockContext_ExecuteTransaction_InteractionLimitReached(t *testing.T) { var createAccountScript = []byte(` transaction { - prepare(signer: AuthAccount) { - let acct = AuthAccount(payer: signer) + prepare(signer: auth(BorrowValue) &Account) { + let acct = Account(payer: signer) } } `) @@ -1273,7 +1349,7 @@ func TestBlockContext_ExecuteScript(t *testing.T) { t.Run("script success", func(t *testing.T) { code := []byte(` - pub fun main(): Int { + access(all) fun main(): Int { return 42 } `) @@ -1289,7 +1365,7 @@ func TestBlockContext_ExecuteScript(t *testing.T) { t.Run("script failure", func(t *testing.T) { code := []byte(` - pub fun main(): Int { + access(all) fun main(): Int { assert(1 == 2) return 42 } @@ -1306,7 +1382,7 @@ func TestBlockContext_ExecuteScript(t *testing.T) { t.Run("script logs", func(t *testing.T) { code := []byte(` - pub fun main(): Int { + access(all) fun main(): Int { log("foo") log("bar") return 42 @@ -1342,17 +1418,17 @@ func TestBlockContext_ExecuteScript(t *testing.T) { // Deploy the test contract const contract = ` - pub contract Test { + access(all) contract Test { - pub struct Foo {} + access(all) struct Foo {} - pub let foos: [Foo] + access(all) let foos: [Foo] init() { self.foos = [] } - pub fun add() { + access(all) fun add() { self.foos.append(Foo()) } } @@ -1360,23 +1436,24 @@ func TestBlockContext_ExecuteScript(t *testing.T) { address := accounts[0] - txBody := testutil.CreateContractDeploymentTransaction( + txBodyBuilder := testutil.CreateContractDeploymentTransaction( "Test", contract, address, - chain) + chain). + SetProposalKey(chain.ServiceAddress(), 0, 0). + SetPayer(chain.ServiceAddress()) - txBody.SetProposalKey(chain.ServiceAddress(), 0, 0) - txBody.SetPayer(chain.ServiceAddress()) - - err = testutil.SignPayload(txBody, address, privateKeys[0]) + err = testutil.SignPayload(txBodyBuilder, address, privateKeys[0]) require.NoError(t, err) err = testutil.SignEnvelope( - txBody, + txBodyBuilder, chain.ServiceAddress(), unittest.ServiceAccountPrivateKey) require.NoError(t, err) + txBody, err := txBodyBuilder.Build() + require.NoError(t, err) executionSnapshot, output, err := vm.Run( ctx, @@ -1393,7 +1470,7 @@ func TestBlockContext_ExecuteScript(t *testing.T) { ` import Test from 0x%s - pub fun main() { + access(all) fun main() { Test.add() } `, @@ -1421,19 +1498,19 @@ func TestBlockContext_GetBlockInfo(t *testing.T) { blocks := new(envMock.Blocks) block1 := unittest.BlockFixture() - block2 := unittest.BlockWithParentFixture(block1.Header) - block3 := unittest.BlockWithParentFixture(block2.Header) + block2 := unittest.BlockWithParentFixture(block1.ToHeader()) + block3 := unittest.BlockWithParentFixture(block2.ToHeader()) - blocks.On("ByHeightFrom", block1.Header.Height, block1.Header).Return(block1.Header, nil) - blocks.On("ByHeightFrom", block2.Header.Height, block1.Header).Return(block2.Header, nil) + blocks.On("ByHeightFrom", block1.Height, block1.ToHeader()).Return(block1.ToHeader(), nil) + blocks.On("ByHeightFrom", block2.Height, block1.ToHeader()).Return(block2.ToHeader(), nil) type logPanic struct{} - blocks.On("ByHeightFrom", block3.Header.Height, block1.Header).Run(func(args mock.Arguments) { panic(logPanic{}) }) + blocks.On("ByHeightFrom", block3.Height, block1.ToHeader()).Run(func(args mock.Arguments) { panic(logPanic{}) }) - blockCtx := fvm.NewContextFromParent(ctx, fvm.WithBlocks(blocks), fvm.WithBlockHeader(block1.Header)) + blockCtx := fvm.NewContextFromParent(ctx, fvm.WithBlocks(blocks), fvm.WithBlockHeader(block1.ToHeader())) t.Run("works as transaction", func(t *testing.T) { - txBody := flow.NewTransactionBody(). + txBodyBuilder := flow.NewTransactionBodyBuilder(). SetScript([]byte(` transaction { execute { @@ -1446,7 +1523,9 @@ func TestBlockContext_GetBlockInfo(t *testing.T) { } `)) - err := testutil.SignTransactionAsServiceAccount(txBody, 0, chain) + err := testutil.SignTransactionAsServiceAccount(txBodyBuilder, 0, chain) + require.NoError(t, err) + txBody, err := txBodyBuilder.Build() require.NoError(t, err) _, output, err := vm.Run( @@ -1461,10 +1540,10 @@ func TestBlockContext_GetBlockInfo(t *testing.T) { t, fmt.Sprintf( "Block(height: %v, view: %v, id: 0x%x, timestamp: %.8f)", - block1.Header.Height, - block1.Header.View, + block1.Height, + block1.View, block1.ID(), - float64(block1.Header.Timestamp.Unix()), + float64(block1.Timestamp/1000), // Unix time in seconds ), output.Logs[0], ) @@ -1472,10 +1551,10 @@ func TestBlockContext_GetBlockInfo(t *testing.T) { t, fmt.Sprintf( "Block(height: %v, view: %v, id: 0x%x, timestamp: %.8f)", - block2.Header.Height, - block2.Header.View, + block2.Height, + block2.View, block2.ID(), - float64(block2.Header.Timestamp.Unix()), + float64(block2.Timestamp/1000), // Unix time in seconds ), output.Logs[1], ) @@ -1483,7 +1562,7 @@ func TestBlockContext_GetBlockInfo(t *testing.T) { t.Run("works as script", func(t *testing.T) { code := []byte(` - pub fun main() { + access(all) fun main() { let block = getCurrentBlock() log(block) @@ -1503,10 +1582,10 @@ func TestBlockContext_GetBlockInfo(t *testing.T) { require.Equal(t, fmt.Sprintf( "Block(height: %v, view: %v, id: 0x%x, timestamp: %.8f)", - block1.Header.Height, - block1.Header.View, + block1.Height, + block1.View, block1.ID(), - float64(block1.Header.Timestamp.Unix()), + float64(block1.Timestamp/1000), // Unix time in seconds ), output.Logs[0], ) @@ -1514,17 +1593,17 @@ func TestBlockContext_GetBlockInfo(t *testing.T) { t, fmt.Sprintf( "Block(height: %v, view: %v, id: 0x%x, timestamp: %.8f)", - block2.Header.Height, - block2.Header.View, + block2.Height, + block2.View, block2.ID(), - float64(block2.Header.Timestamp.Unix()), + float64(block2.Timestamp/1000), // Unix time in seconds ), output.Logs[1], ) }) t.Run("panics if external function panics in transaction", func(t *testing.T) { - tx := flow.NewTransactionBody(). + txBodyBuilder := flow.NewTransactionBodyBuilder(). SetScript([]byte(` transaction { execute { @@ -1534,12 +1613,14 @@ func TestBlockContext_GetBlockInfo(t *testing.T) { } `)) - err := testutil.SignTransactionAsServiceAccount(tx, 0, chain) + err := testutil.SignTransactionAsServiceAccount(txBodyBuilder, 0, chain) + require.NoError(t, err) + txBody, err := txBodyBuilder.Build() require.NoError(t, err) _, output, err := vm.Run( blockCtx, - fvm.Transaction(tx, 0), + fvm.Transaction(txBody, 0), testutil.RootBootstrappedLedger(vm, ctx)) require.NoError(t, err) require.Error(t, output.Err) @@ -1547,7 +1628,7 @@ func TestBlockContext_GetBlockInfo(t *testing.T) { t.Run("panics if external function panics in script", func(t *testing.T) { script := []byte(` - pub fun main() { + access(all) fun main() { let block = getCurrentBlock() let nextBlock = getBlock(at: block.height + UInt64(2)) } @@ -1579,17 +1660,17 @@ func TestBlockContext_GetAccount(t *testing.T) { sequenceNumber := uint64(0) createAccount := func() (flow.Address, crypto.PublicKey) { - privateKey, txBody := testutil.CreateAccountCreationTransaction( + privateKey, txBodyBuilder := testutil.CreateAccountCreationTransaction( t, chain) - txBody.SetProposalKey(chain.ServiceAddress(), 0, sequenceNumber) - txBody.SetPayer(chain.ServiceAddress()) + txBodyBuilder.SetProposalKey(chain.ServiceAddress(), 0, sequenceNumber) + txBodyBuilder.SetPayer(chain.ServiceAddress()) sequenceNumber++ rootHasher := hash.NewSHA2_256() - err := txBody.SignEnvelope( + err := txBodyBuilder.SignEnvelope( chain.ServiceAddress(), 0, unittest.ServiceAccountPrivateKey.PrivateKey, @@ -1597,6 +1678,9 @@ func TestBlockContext_GetAccount(t *testing.T) { ) require.NoError(t, err) + txBody, err := txBodyBuilder.Build() + require.NoError(t, err) + // execute the transaction executionSnapshot, output, err := vm.Run( ctx, @@ -1613,18 +1697,21 @@ func TestBlockContext_GetAccount(t *testing.T) { // read the address of the account created (e.g. "0x01" and convert it // to flow.address) - data, err := jsoncdc.Decode(nil, accountCreatedEvents[0].Payload) + data, err := ccf.Decode(nil, accountCreatedEvents[0].Payload) require.NoError(t, err) - address := flow.ConvertAddress( - data.(cadence.Event).Fields[0].(cadence.Address)) - return address, privateKey.PublicKey( - fvm.AccountKeyWeightThreshold).PublicKey + address := flow.ConvertAddress( + cadence.SearchFieldByName( + data.(cadence.Event), + stdlib.AccountEventAddressParameter.Identifier, + ).(cadence.Address), + ) + return address, privateKey.PublicKey(fvm.AccountKeyWeightThreshold).PublicKey } addressGen := chain.NewAddressGenerator() // skip the addresses of 4 reserved accounts - for i := 0; i < 4; i++ { + for i := 0; i < systemcontracts.LastSystemAccountIndex; i++ { _, err := addressGen.NextAddress() require.NoError(t, err) } @@ -1643,7 +1730,7 @@ func TestBlockContext_GetAccount(t *testing.T) { // happy path - get each of the created account and check if it is the right one t.Run("get accounts", func(t *testing.T) { for address, expectedKey := range accounts { - account, err := vm.GetAccount(ctx, address, snapshotTree) + account, err := fvm.GetAccount(ctx, address, snapshotTree) require.NoError(t, err) require.Len(t, account.Keys, 1) @@ -1657,38 +1744,40 @@ func TestBlockContext_GetAccount(t *testing.T) { address, err := addressGen.NextAddress() require.NoError(t, err) - account, err := vm.GetAccount(ctx, address, snapshotTree) + account, err := fvm.GetAccount(ctx, address, snapshotTree) require.True(t, errors.IsAccountNotFoundError(err)) require.Nil(t, account) }) } -func TestBlockContext_UnsafeRandom(t *testing.T) { - - t.Parallel() - +func TestBlockContext_Random(t *testing.T) { chain, vm := createChainAndVm(flow.Mainnet) - - header := &flow.Header{Height: 42} - + header := &flow.Header{HeaderBody: flow.HeaderBody{Height: 42}} + source := testutil.EntropyProviderFixture(nil) ctx := fvm.NewContext( fvm.WithChain(chain), fvm.WithBlockHeader(header), + fvm.WithEntropyProvider(source), fvm.WithCadenceLogging(true), ) - t.Run("works as transaction", func(t *testing.T) { - txBody := flow.NewTransactionBody(). - SetScript([]byte(` - transaction { - execute { - let rand = unsafeRandom() - log(rand) - } - } - `)) + txCode := []byte(` + transaction { + execute { + let rand1 = revertibleRandom<UInt64>() + log(rand1) + let rand2 = revertibleRandom<UInt64>() + log(rand2) + } + } + `) + + getTxRandoms := func(t *testing.T) [2]uint64 { + txBodyBuilder := flow.NewTransactionBodyBuilder().SetScript(txCode) + err := testutil.SignTransactionAsServiceAccount(txBodyBuilder, 0, chain) + require.NoError(t, err) - err := testutil.SignTransactionAsServiceAccount(txBody, 0, chain) + txBody, err := txBodyBuilder.Build() require.NoError(t, err) _, output, err := vm.Run( @@ -1697,13 +1786,78 @@ func TestBlockContext_UnsafeRandom(t *testing.T) { testutil.RootBootstrappedLedger(vm, ctx)) require.NoError(t, err) require.NoError(t, output.Err) + require.Len(t, output.Logs, 2) - require.Len(t, output.Logs, 1) + r1, err := strconv.ParseUint(output.Logs[0], 10, 64) + require.NoError(t, err) + r2, err := strconv.ParseUint(output.Logs[1], 10, 64) + require.NoError(t, err) + return [2]uint64{r1, r2} + } + + // - checks that unsafeRandom works on transactions + // - (sanity) checks that two successive randoms aren't equal + t.Run("single transaction", func(t *testing.T) { + randoms := getTxRandoms(t) + require.NotEqual(t, randoms[1], randoms[0], "extremely unlikely to be equal") + }) + + // checks that two transactions with different IDs do not generate the same randoms + t.Run("two transactions", func(t *testing.T) { + // getLoggedRandoms generates different tx IDs because envelope signature is randomized + randoms1 := getTxRandoms(t) + randoms2 := getTxRandoms(t) + require.NotEqual(t, randoms1[0], randoms2[0], "extremely unlikely to be equal") + }) - num, err := strconv.ParseUint(output.Logs[0], 10, 64) + scriptCode := ` + access(all) + fun main(a: Int8) { + let rand = revertibleRandom<UInt64>() + log(rand) + let rand%[1]d = revertibleRandom<UInt64>() + log(rand%[1]d) + } + ` + + getScriptRandoms := func(t *testing.T, codeSalt int, arg int) [2]uint64 { + script := fvm.Script([]byte(fmt.Sprintf(scriptCode, codeSalt))). + WithArguments(jsoncdc.MustEncode(cadence.Int8(arg))) + + _, output, err := vm.Run(ctx, script, testutil.RootBootstrappedLedger(vm, ctx)) require.NoError(t, err) - require.Equal(t, uint64(0x7515f254adc6f8af), num) + require.NoError(t, output.Err) + + r1, err := strconv.ParseUint(output.Logs[0], 10, 64) + require.NoError(t, err) + r2, err := strconv.ParseUint(output.Logs[1], 10, 64) + require.NoError(t, err) + return [2]uint64{r1, r2} + } + + // - checks that unsafeRandom works on scripts + // - (sanity) checks that two successive randoms aren't equal + t.Run("single script", func(t *testing.T) { + randoms := getScriptRandoms(t, 1, 0) + require.NotEqual(t, randoms[1], randoms[0], "extremely unlikely to be equal") + }) + + // checks that two scripts with different codes do not generate the same randoms + t.Run("two script codes", func(t *testing.T) { + // getScriptRandoms generates different scripts IDs using different codes + randoms1 := getScriptRandoms(t, 1, 0) + randoms2 := getScriptRandoms(t, 2, 0) + require.NotEqual(t, randoms1[0], randoms2[0], "extremely unlikely to be equal") }) + + // checks that two scripts with same codes but different arguments do not generate the same randoms + t.Run("same script codes different arguments", func(t *testing.T) { + // getScriptRandoms generates different scripts IDs using different arguments + randoms1 := getScriptRandoms(t, 1, 0) + randoms2 := getScriptRandoms(t, 1, 1) + require.NotEqual(t, randoms1[0], randoms2[0], "extremely unlikely to be equal") + }) + } func TestBlockContext_ExecuteTransaction_CreateAccount_WithMonotonicAddresses(t *testing.T) { @@ -1716,11 +1870,13 @@ func TestBlockContext_ExecuteTransaction_CreateAccount_WithMonotonicAddresses(t fvm.WithChain(chain), ) - txBody := flow.NewTransactionBody(). + txBodyBuilder := flow.NewTransactionBodyBuilder(). SetScript(createAccountScript). AddAuthorizer(chain.ServiceAddress()) - err := testutil.SignTransactionAsServiceAccount(txBody, 0, chain) + err := testutil.SignTransactionAsServiceAccount(txBodyBuilder, 0, chain) + require.NoError(t, err) + txBody, err := txBodyBuilder.Build() require.NoError(t, err) _, output, err := vm.Run( @@ -1734,12 +1890,19 @@ func TestBlockContext_ExecuteTransaction_CreateAccount_WithMonotonicAddresses(t require.Len(t, accountCreatedEvents, 1) - data, err := jsoncdc.Decode(nil, accountCreatedEvents[0].Payload) + data, err := ccf.Decode(nil, accountCreatedEvents[0].Payload) require.NoError(t, err) + address := flow.ConvertAddress( - data.(cadence.Event).Fields[0].(cadence.Address)) + cadence.SearchFieldByName( + data.(cadence.Event), + stdlib.AccountEventAddressParameter.Identifier, + ).(cadence.Address), + ) - require.Equal(t, flow.HexToAddress("05"), address) + // convert LastSystemAccountIndex + 1 to a flow.Address + expected := flow.HexToAddress(fmt.Sprintf("0x%02x", systemcontracts.LastSystemAccountIndex+1)) + require.Equal(t, expected, address) } func TestBlockContext_ExecuteTransaction_FailingTransactions(t *testing.T) { @@ -1751,19 +1914,23 @@ func TestBlockContext_ExecuteTransaction_FailingTransactions(t *testing.T) { address flow.Address, ) uint64 { - code := []byte(fmt.Sprintf(` + sc := systemcontracts.SystemContractsForChain(chain.ChainID()) + code := []byte(fmt.Sprintf( + ` import FungibleToken from 0x%s import FlowToken from 0x%s - pub fun main(account: Address): UFix64 { + access(all) fun main(account: Address): UFix64 { let acct = getAccount(account) - let vaultRef = acct.getCapability(/public/flowTokenBalance) - .borrow<&FlowToken.Vault{FungibleToken.Balance}>() + let vaultRef = acct.capabilities.borrow<&FlowToken.Vault>(/public/flowTokenBalance) ?? panic("Could not borrow Balance reference to the Vault") return vaultRef.balance } - `, fvm.FungibleTokenAddress(chain), fvm.FlowTokenAddress(chain))) + `, + sc.FungibleToken.Address.Hex(), + sc.FlowToken.Address.Hex(), + )) script := fvm.Script(code).WithArguments( jsoncdc.MustEncode(cadence.NewAddress(address)), ) @@ -1771,7 +1938,7 @@ func TestBlockContext_ExecuteTransaction_FailingTransactions(t *testing.T) { _, output, err := vm.Run(ctx, script, storageSnapshot) require.NoError(t, err) require.NoError(t, output.Err) - return output.Value.ToGoValue().(uint64) + return uint64(output.Value.(cadence.UFix64)) } t.Run("Transaction fails because of storage", newVMTest().withBootstrapProcedureOptions( @@ -1779,6 +1946,8 @@ func TestBlockContext_ExecuteTransaction_FailingTransactions(t *testing.T) { fvm.WithAccountCreationFee(fvm.DefaultAccountCreationFee), fvm.WithStorageMBPerFLOW(fvm.DefaultStorageMBPerFLOW), fvm.WithExecutionMemoryLimit(math.MaxUint64), + // The evm account has a storage exception, and if we don't bootstrap with evm, + // the first created account will have that address. ).run( func(t *testing.T, vm fvm.VM, chain flow.Chain, ctx fvm.Context, snapshotTree snapshot.SnapshotTree) { ctx.LimitAccountStorage = true // this test requires storage limits to be enforced @@ -1803,16 +1972,16 @@ func TestBlockContext_ExecuteTransaction_FailingTransactions(t *testing.T) { snapshotTree, accounts[0]) - txBody := transferTokensTx(chain). + txBodyBuilder := transferTokensTx(chain). AddAuthorizer(accounts[0]). AddArgument(jsoncdc.MustEncode(cadence.UFix64(1))). - AddArgument(jsoncdc.MustEncode( - cadence.NewAddress(chain.ServiceAddress()))) + AddArgument(jsoncdc.MustEncode(cadence.NewAddress(chain.ServiceAddress()))). + SetProposalKey(accounts[0], 0, 0). + SetPayer(accounts[0]) - txBody.SetProposalKey(accounts[0], 0, 0) - txBody.SetPayer(accounts[0]) - - err = testutil.SignEnvelope(txBody, accounts[0], privateKeys[0]) + err = testutil.SignEnvelope(txBodyBuilder, accounts[0], privateKeys[0]) + require.NoError(t, err) + txBody, err := txBodyBuilder.Build() require.NoError(t, err) executionSnapshot, output, err := vm.Run( @@ -1870,15 +2039,16 @@ func TestBlockContext_ExecuteTransaction_FailingTransactions(t *testing.T) { accounts[0]) // transfer tokens to non-existent account - txBody := transferTokensTx(chain). + txBodyBuilder := transferTokensTx(chain). AddAuthorizer(accounts[0]). AddArgument(jsoncdc.MustEncode(cadence.UFix64(1))). - AddArgument(jsoncdc.MustEncode(cadence.NewAddress(lastAddress))) + AddArgument(jsoncdc.MustEncode(cadence.NewAddress(lastAddress))). + SetProposalKey(accounts[0], 0, 0). + SetPayer(accounts[0]) - txBody.SetProposalKey(accounts[0], 0, 0) - txBody.SetPayer(accounts[0]) - - err = testutil.SignEnvelope(txBody, accounts[0], privateKeys[0]) + err = testutil.SignEnvelope(txBodyBuilder, accounts[0], privateKeys[0]) + require.NoError(t, err) + txBody, err := txBodyBuilder.Build() require.NoError(t, err) executionSnapshot, output, err := vm.Run( @@ -1924,17 +2094,17 @@ func TestBlockContext_ExecuteTransaction_FailingTransactions(t *testing.T) { chain) require.NoError(t, err) - txBody := transferTokensTx(chain). + txBodyBuilder := transferTokensTx(chain). AddAuthorizer(accounts[0]). AddArgument(jsoncdc.MustEncode(cadence.UFix64(1_0000_0000_0000))). AddArgument(jsoncdc.MustEncode( - cadence.NewAddress(chain.ServiceAddress()))) + cadence.NewAddress(chain.ServiceAddress()))). + SetProposalKey(accounts[0], 0, 10). // set wrong sequence number + SetPayer(accounts[0]) - // set wrong sequence number - txBody.SetProposalKey(accounts[0], 0, 10) - txBody.SetPayer(accounts[0]) - - err = testutil.SignEnvelope(txBody, accounts[0], privateKeys[0]) + err = testutil.SignEnvelope(txBodyBuilder, accounts[0], privateKeys[0]) + require.NoError(t, err) + txBody, err := txBodyBuilder.Build() require.NoError(t, err) _, output, err := vm.Run( @@ -1983,17 +2153,18 @@ func TestBlockContext_ExecuteTransaction_FailingTransactions(t *testing.T) { chain) require.NoError(t, err) - txBody := transferTokensTx(chain). + txBodyBuilder := transferTokensTx(chain). AddAuthorizer(accounts[0]). AddArgument(jsoncdc.MustEncode( cadence.UFix64(1_0000_0000_0000))). AddArgument(jsoncdc.MustEncode( - cadence.NewAddress(chain.ServiceAddress()))) + cadence.NewAddress(chain.ServiceAddress()))). + SetProposalKey(accounts[0], 0, 0). + SetPayer(accounts[0]) - txBody.SetProposalKey(accounts[0], 0, 0) - txBody.SetPayer(accounts[0]) - - err = testutil.SignEnvelope(txBody, accounts[0], privateKeys[0]) + err = testutil.SignEnvelope(txBodyBuilder, accounts[0], privateKeys[0]) + require.NoError(t, err) + txBody, err := txBodyBuilder.Build() require.NoError(t, err) executionSnapshot, output, err := vm.Run( diff --git a/fvm/fvm_fuzz_test.go b/fvm/fvm_fuzz_test.go index 392e82e7696..1adf52cc6e7 100644 --- a/fvm/fvm_fuzz_test.go +++ b/fvm/fvm_fuzz_test.go @@ -5,9 +5,11 @@ import ( "math" "testing" + "github.com/onflow/cadence/stdlib" "github.com/stretchr/testify/require" "github.com/onflow/cadence" + "github.com/onflow/cadence/encoding/ccf" jsoncdc "github.com/onflow/cadence/encoding/json" "github.com/onflow/flow-go/engine/execution/testutil" @@ -16,6 +18,7 @@ import ( "github.com/onflow/flow-go/fvm/errors" "github.com/onflow/flow-go/fvm/meter" "github.com/onflow/flow-go/fvm/storage/snapshot" + "github.com/onflow/flow-go/fvm/systemcontracts" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/utils/unittest" ) @@ -34,13 +37,13 @@ func FuzzTransactionComputationLimit(f *testing.F) { vmt.run(func(t *testing.T, vm fvm.VM, chain flow.Chain, ctx fvm.Context, snapshotTree snapshot.SnapshotTree) { // create the transaction - txBody := tt.createTxBody(t, tctx) + txBodyBuilder := tt.createTxBody(t, tctx) // set the computation limit - txBody.SetGasLimit(computationLimit) + txBodyBuilder.SetComputeLimit(computationLimit) // sign the transaction err := testutil.SignEnvelope( - txBody, + txBodyBuilder, tctx.address, tctx.privateKey, ) @@ -53,6 +56,9 @@ func FuzzTransactionComputationLimit(f *testing.F) { var output fvm.ProcedureOutput + txBody, err := txBodyBuilder.Build() + require.NoError(t, err) + // run the transaction require.NotPanics(t, func() { _, output, err = vm.Run( @@ -82,7 +88,7 @@ type transactionTypeContext struct { } type transactionType struct { - createTxBody func(t *testing.T, tctx transactionTypeContext) *flow.TransactionBody + createTxBody func(t *testing.T, tctx transactionTypeContext) *flow.TransactionBodyBuilder require func(t *testing.T, tctx transactionTypeContext, results fuzzResults) } @@ -91,20 +97,19 @@ var fuzzTransactionTypes = []transactionType{ // Token transfer of 0 tokens. // should succeed if no limits are hit. // fees should be deducted no matter what. - createTxBody: func(t *testing.T, tctx transactionTypeContext) *flow.TransactionBody { + createTxBody: func(t *testing.T, tctx transactionTypeContext) *flow.TransactionBodyBuilder { txBody := transferTokensTx(tctx.chain). AddAuthorizer(tctx.address). AddArgument(jsoncdc.MustEncode(cadence.UFix64(0))). // 0 value transferred - AddArgument(jsoncdc.MustEncode(cadence.NewAddress(tctx.chain.ServiceAddress()))) - - txBody.SetProposalKey(tctx.address, 0, 0) - txBody.SetPayer(tctx.address) + AddArgument(jsoncdc.MustEncode(cadence.NewAddress(tctx.chain.ServiceAddress()))). + SetProposalKey(tctx.address, 0, 0). + SetPayer(tctx.address) return txBody }, require: func(t *testing.T, tctx transactionTypeContext, results fuzzResults) { // if there is an error, it should be computation exceeded if results.output.Err != nil { - require.Len(t, results.output.Events, 3) + require.Len(t, results.output.Events, 5) unittest.EnsureEventsIndexSeq(t, results.output.Events, tctx.chain.ChainID()) codes := []errors.ErrorCode{ errors.ErrCodeComputationLimitExceededError, @@ -117,7 +122,11 @@ var fuzzTransactionTypes = []transactionType{ // fees should be deducted no matter the input fees, deducted := getDeductedFees(t, tctx, results) require.True(t, deducted, "Fees should be deducted.") - require.GreaterOrEqual(t, fees.ToGoValue().(uint64), fuzzTestsInclusionFees) + require.GreaterOrEqual( + t, + uint64(fees), + fuzzTestsInclusionFees, + ) unittest.EnsureEventsIndexSeq(t, results.output.Events, tctx.chain.ChainID()) }, }, @@ -125,14 +134,13 @@ var fuzzTransactionTypes = []transactionType{ // Token transfer of too many tokens. // Should never succeed. // fees should be deducted no matter what. - createTxBody: func(t *testing.T, tctx transactionTypeContext) *flow.TransactionBody { + createTxBody: func(t *testing.T, tctx transactionTypeContext) *flow.TransactionBodyBuilder { txBody := transferTokensTx(tctx.chain). AddAuthorizer(tctx.address). - AddArgument(jsoncdc.MustEncode(cadence.UFix64(2 * tctx.addressFunds))). // too much value transferred - AddArgument(jsoncdc.MustEncode(cadence.NewAddress(tctx.chain.ServiceAddress()))) - - txBody.SetProposalKey(tctx.address, 0, 0) - txBody.SetPayer(tctx.address) + AddArgument(jsoncdc.MustEncode(cadence.UFix64(2*tctx.addressFunds))). // too much value transferred + AddArgument(jsoncdc.MustEncode(cadence.NewAddress(tctx.chain.ServiceAddress()))). + SetProposalKey(tctx.address, 0, 0). + SetPayer(tctx.address) return txBody }, require: func(t *testing.T, tctx transactionTypeContext, results fuzzResults) { @@ -149,7 +157,10 @@ var fuzzTransactionTypes = []transactionType{ // fees should be deducted no matter the input fees, deducted := getDeductedFees(t, tctx, results) require.True(t, deducted, "Fees should be deducted.") - require.GreaterOrEqual(t, fees.ToGoValue().(uint64), fuzzTestsInclusionFees) + require.GreaterOrEqual(t, + uint64(fees), + fuzzTestsInclusionFees, + ) unittest.EnsureEventsIndexSeq(t, results.output.Events, tctx.chain.ChainID()) }, }, @@ -157,11 +168,11 @@ var fuzzTransactionTypes = []transactionType{ // Transaction that calls panic. // Should never succeed. // fees should be deducted no matter what. - createTxBody: func(t *testing.T, tctx transactionTypeContext) *flow.TransactionBody { + createTxBody: func(t *testing.T, tctx transactionTypeContext) *flow.TransactionBodyBuilder { // empty transaction - txBody := flow.NewTransactionBody().SetScript([]byte("transaction(){prepare(){};execute{panic(\"some panic\")}}")) - txBody.SetProposalKey(tctx.address, 0, 0) - txBody.SetPayer(tctx.address) + txBody := flow.NewTransactionBodyBuilder().SetScript([]byte("transaction(){prepare(){};execute{panic(\"some panic\")}}")). + SetProposalKey(tctx.address, 0, 0). + SetPayer(tctx.address) return txBody }, require: func(t *testing.T, tctx transactionTypeContext, results fuzzResults) { @@ -178,17 +189,21 @@ var fuzzTransactionTypes = []transactionType{ // fees should be deducted no matter the input fees, deducted := getDeductedFees(t, tctx, results) require.True(t, deducted, "Fees should be deducted.") - require.GreaterOrEqual(t, fees.ToGoValue().(uint64), fuzzTestsInclusionFees) + require.GreaterOrEqual(t, + uint64(fees), + fuzzTestsInclusionFees, + ) unittest.EnsureEventsIndexSeq(t, results.output.Events, tctx.chain.ChainID()) }, }, { - createTxBody: func(t *testing.T, tctx transactionTypeContext) *flow.TransactionBody { + createTxBody: func(t *testing.T, tctx transactionTypeContext) *flow.TransactionBodyBuilder { // create account - txBody := flow.NewTransactionBody().SetScript(createAccountScript). - AddAuthorizer(tctx.address) - txBody.SetProposalKey(tctx.address, 0, 0) - txBody.SetPayer(tctx.address) + txBody := flow.NewTransactionBodyBuilder(). + SetScript(createAccountScript). + AddAuthorizer(tctx.address). + SetProposalKey(tctx.address, 0, 0). + SetPayer(tctx.address) return txBody }, require: func(t *testing.T, tctx transactionTypeContext, results fuzzResults) { @@ -207,7 +222,10 @@ var fuzzTransactionTypes = []transactionType{ // fees should be deducted no matter the input fees, deducted := getDeductedFees(t, tctx, results) require.True(t, deducted, "Fees should be deducted.") - require.GreaterOrEqual(t, fees.ToGoValue().(uint64), fuzzTestsInclusionFees) + require.GreaterOrEqual(t, + uint64(fees), + fuzzTestsInclusionFees, + ) unittest.EnsureEventsIndexSeq(t, results.output.Events, tctx.chain.ChainID()) }, }, @@ -219,22 +237,30 @@ const fuzzTestsInclusionFees = uint64(1_000) func getDeductedFees(tb testing.TB, tctx transactionTypeContext, results fuzzResults) (fees cadence.UFix64, deducted bool) { tb.Helper() + sc := systemcontracts.SystemContractsForChain(tctx.chain.ChainID()) + var ok bool var feesDeductedEvent cadence.Event for _, e := range results.output.Events { - if string(e.Type) == fmt.Sprintf("A.%s.FlowFees.FeesDeducted", environment.FlowFeesAddress(tctx.chain)) { - data, err := jsoncdc.Decode(nil, e.Payload) + if string(e.Type) == fmt.Sprintf("A.%s.FlowFees.FeesDeducted", sc.FlowFees.Address.Hex()) { + data, err := ccf.Decode(nil, e.Payload) require.NoError(tb, err) feesDeductedEvent, ok = data.(cadence.Event) - require.True(tb, ok, "Event payload should be of type cadence event.") + require.True(tb, ok, "Event payload should be of type cadence event") } } if feesDeductedEvent.Type() == nil { return 0, false } - fees, ok = feesDeductedEvent.Fields[0].(cadence.UFix64) - require.True(tb, ok, "FeesDeducted[0] event should be of type cadence.UFix64.") - return fees, true + + feesValue := cadence.SearchFieldByName(feesDeductedEvent, "amount") + require.IsType(tb, + cadence.UFix64(0), + feesValue, + "FeesDeducted event amount field should be of type cadence.UFix64", + ) + + return feesValue.(cadence.UFix64), true } func bootstrapFuzzStateAndTxContext(tb testing.TB) (bootstrappedVmTest, transactionTypeContext) { @@ -246,7 +272,7 @@ func bootstrapFuzzStateAndTxContext(tb testing.TB) (bootstrappedVmTest, transact bootstrappedVMTest, err := newVMTest().withBootstrapProcedureOptions( fvm.WithTransactionFee(fvm.DefaultTransactionFees), fvm.WithExecutionMemoryLimit(math.MaxUint32), - fvm.WithExecutionEffortWeights(mainnetExecutionEffortWeights), + fvm.WithExecutionEffortWeights(environment.MainnetExecutionEffortWeights), fvm.WithExecutionMemoryWeights(meter.DefaultMemoryWeights), fvm.WithMinimumStorageReservation(fvm.DefaultMinimumStorageReservation), fvm.WithAccountCreationFee(fvm.DefaultAccountCreationFee), @@ -256,14 +282,17 @@ func bootstrapFuzzStateAndTxContext(tb testing.TB) (bootstrappedVmTest, transact fvm.WithAccountStorageLimit(true), ).bootstrapWith(func(vm fvm.VM, chain flow.Chain, ctx fvm.Context, snapshotTree snapshot.SnapshotTree) (snapshot.SnapshotTree, error) { // ==== Create an account ==== - var txBody *flow.TransactionBody - privateKey, txBody = testutil.CreateAccountCreationTransaction(tb, chain) + var txBodyBuilder *flow.TransactionBodyBuilder + privateKey, txBodyBuilder = testutil.CreateAccountCreationTransaction(tb, chain) - err := testutil.SignTransactionAsServiceAccount(txBody, 0, chain) + err := testutil.SignTransactionAsServiceAccount(txBodyBuilder, 0, chain) if err != nil { return snapshotTree, err } + txBody, err := txBodyBuilder.Build() + require.NoError(tb, err) + executionSnapshot, output, err := vm.Run( ctx, fvm.Transaction(txBody, 0), @@ -276,28 +305,34 @@ func bootstrapFuzzStateAndTxContext(tb testing.TB) (bootstrappedVmTest, transact accountCreatedEvents := filterAccountCreatedEvents(output.Events) // read the address of the account created (e.g. "0x01" and convert it to flow.address) - data, err := jsoncdc.Decode(nil, accountCreatedEvents[0].Payload) + data, err := ccf.Decode(nil, accountCreatedEvents[0].Payload) require.NoError(tb, err) address = flow.ConvertAddress( - data.(cadence.Event).Fields[0].(cadence.Address)) + cadence.SearchFieldByName( + data.(cadence.Event), + stdlib.AccountEventAddressParameter.Identifier, + ).(cadence.Address), + ) // ==== Transfer tokens to new account ==== - txBody = transferTokensTx(chain). + txBodyBuilder = transferTokensTx(chain). AddAuthorizer(chain.ServiceAddress()). AddArgument(jsoncdc.MustEncode(cadence.UFix64(1_000_000_000))). // 10 FLOW - AddArgument(jsoncdc.MustEncode(cadence.NewAddress(address))) - - txBody.SetProposalKey(chain.ServiceAddress(), 0, 1) - txBody.SetPayer(chain.ServiceAddress()) + AddArgument(jsoncdc.MustEncode(cadence.NewAddress(address))). + SetProposalKey(chain.ServiceAddress(), 0, 1). + SetPayer(chain.ServiceAddress()) err = testutil.SignEnvelope( - txBody, + txBodyBuilder, chain.ServiceAddress(), unittest.ServiceAccountPrivateKey, ) require.NoError(tb, err) + txBody, err = txBodyBuilder.Build() + require.NoError(tb, err) + executionSnapshot, output, err = vm.Run( ctx, fvm.Transaction(txBody, 0), diff --git a/fvm/fvm_signature_test.go b/fvm/fvm_signature_test.go index 6a4e20ad284..8712f972279 100644 --- a/fvm/fvm_signature_test.go +++ b/fvm/fvm_signature_test.go @@ -10,8 +10,8 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "github.com/onflow/flow-go/crypto" - "github.com/onflow/flow-go/crypto/hash" + "github.com/onflow/crypto" + "github.com/onflow/crypto/hash" "github.com/onflow/flow-go/engine/execution/testutil" "github.com/onflow/flow-go/fvm" @@ -27,6 +27,12 @@ var createMessage = func(m string) (signableMessage []byte, message cadence.Arra return signableMessage, message } +var uint8ArrayArrayType = cadence.NewVariableSizedArrayType( + cadence.NewVariableSizedArrayType( + cadence.UInt8Type, + ), +) + func TestKeyListSignature(t *testing.T) { t.Parallel() @@ -57,7 +63,7 @@ func TestKeyListSignature(t *testing.T) { "SHA3_256", func(tag string) hash.Hasher { hasher, err := fvmCrypto.NewPrefixedHashing(hash.SHA3_256, tag) - require.Nil(t, err) + require.NoError(t, err) return hasher }, }, @@ -65,7 +71,7 @@ func TestKeyListSignature(t *testing.T) { "SHA2_256", func(tag string) hash.Hasher { hasher, err := fvmCrypto.NewPrefixedHashing(hash.SHA2_256, tag) - require.Nil(t, err) + require.NoError(t, err) return hasher }, }, @@ -73,7 +79,7 @@ func TestKeyListSignature(t *testing.T) { "KECCAK_256", func(tag string) hash.Hasher { hasher, err := fvmCrypto.NewPrefixedHashing(hash.Keccak_256, tag) - require.Nil(t, err) + require.NoError(t, err) return hasher }, }, @@ -86,7 +92,8 @@ func TestKeyListSignature(t *testing.T) { ` import Crypto - pub fun main( + access(all) + fun main( rawPublicKeys: [[UInt8]], message: [UInt8], signatures: [[UInt8]], @@ -121,11 +128,13 @@ func TestKeyListSignature(t *testing.T) { return keyList.verify( signatureSet: signatureSet, signedData: message, + domainSeparationTag: "%s" ) } `, signatureAlgorithm.name, hashAlgorithm.name, + tag, ), ) @@ -403,7 +412,8 @@ func TestBLSMultiSignature(t *testing.T) { ` import Crypto - pub fun main( + access(all) + fun main( publicKey: [UInt8], proof: [UInt8] ): Bool { @@ -512,7 +522,7 @@ func TestBLSMultiSignature(t *testing.T) { ` import Crypto - pub fun main( + access(all) fun main( signatures: [[UInt8]], ): [UInt8]? { return BLS.aggregateSignatures(signatures)! @@ -548,12 +558,8 @@ func TestBLSMultiSignature(t *testing.T) { script := fvm.Script(code).WithArguments( jsoncdc.MustEncode(cadence.Array{ - Values: signatures, - ArrayType: &cadence.VariableSizedArrayType{ - ElementType: &cadence.VariableSizedArrayType{ - ElementType: cadence.UInt8Type{}, - }, - }, + Values: signatures, + ArrayType: uint8ArrayArrayType, }), ) @@ -580,12 +586,8 @@ func TestBLSMultiSignature(t *testing.T) { script := fvm.Script(code).WithArguments( jsoncdc.MustEncode(cadence.Array{ - Values: signatures, - ArrayType: &cadence.VariableSizedArrayType{ - ElementType: &cadence.VariableSizedArrayType{ - ElementType: cadence.UInt8Type{}, - }, - }, + Values: signatures, + ArrayType: uint8ArrayArrayType, }), ) @@ -603,12 +605,8 @@ func TestBLSMultiSignature(t *testing.T) { signatures := []cadence.Value{} script := fvm.Script(code).WithArguments( jsoncdc.MustEncode(cadence.Array{ - Values: signatures, - ArrayType: &cadence.VariableSizedArrayType{ - ElementType: &cadence.VariableSizedArrayType{ - ElementType: cadence.UInt8Type{}, - }, - }, + Values: signatures, + ArrayType: uint8ArrayArrayType, }), ) @@ -637,7 +635,7 @@ func TestBLSMultiSignature(t *testing.T) { ` import Crypto - pub fun main( + access(all) fun main( publicKeys: [[UInt8]] ): [UInt8]? { let pks: [PublicKey] = [] @@ -673,12 +671,8 @@ func TestBLSMultiSignature(t *testing.T) { script := fvm.Script(code(BLSSignatureAlgorithm)).WithArguments( jsoncdc.MustEncode(cadence.Array{ - Values: publicKeys, - ArrayType: &cadence.VariableSizedArrayType{ - ElementType: &cadence.VariableSizedArrayType{ - ElementType: cadence.UInt8Type{}, - }, - }, + Values: publicKeys, + ArrayType: uint8ArrayArrayType, }), ) @@ -707,12 +701,8 @@ func TestBLSMultiSignature(t *testing.T) { script := fvm.Script(code(signatureAlgorithm)).WithArguments( jsoncdc.MustEncode(cadence.Array{ - Values: publicKeys, - ArrayType: &cadence.VariableSizedArrayType{ - ElementType: &cadence.VariableSizedArrayType{ - ElementType: cadence.UInt8Type{}, - }, - }, + Values: publicKeys, + ArrayType: uint8ArrayArrayType, }), ) @@ -727,12 +717,8 @@ func TestBLSMultiSignature(t *testing.T) { var publicKeys []cadence.Value script := fvm.Script(code(BLSSignatureAlgorithm)).WithArguments( jsoncdc.MustEncode(cadence.Array{ - Values: publicKeys, - ArrayType: &cadence.VariableSizedArrayType{ - ElementType: &cadence.VariableSizedArrayType{ - ElementType: cadence.UInt8Type{}, - }, - }, + Values: publicKeys, + ArrayType: uint8ArrayArrayType, }), ) @@ -761,7 +747,7 @@ func TestBLSMultiSignature(t *testing.T) { code := []byte(` import Crypto - pub fun main( + access(all) fun main( publicKeys: [[UInt8]], signatures: [[UInt8]], message: [UInt8], @@ -807,20 +793,12 @@ func TestBLSMultiSignature(t *testing.T) { script := fvm.Script(code).WithArguments( jsoncdc.MustEncode(cadence.Array{ // keys - Values: publicKeys, - ArrayType: &cadence.VariableSizedArrayType{ - ElementType: &cadence.VariableSizedArrayType{ - ElementType: cadence.UInt8Type{}, - }, - }, + Values: publicKeys, + ArrayType: uint8ArrayArrayType, }), jsoncdc.MustEncode(cadence.Array{ // signatures - Values: signatures, - ArrayType: &cadence.VariableSizedArrayType{ - ElementType: &cadence.VariableSizedArrayType{ - ElementType: cadence.UInt8Type{}, - }, - }, + Values: signatures, + ArrayType: uint8ArrayArrayType, }), jsoncdc.MustEncode(cadenceMessage), jsoncdc.MustEncode(cadence.String(tag)), diff --git a/fvm/fvm_test.go b/fvm/fvm_test.go index 1acca029284..c5939f6a27b 100644 --- a/fvm/fvm_test.go +++ b/fvm/fvm_test.go @@ -1,44 +1,59 @@ package fvm_test import ( + "context" "crypto/rand" + "encoding/binary" "encoding/hex" "fmt" "math" "strings" "testing" + "github.com/stretchr/testify/assert" + mockery "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + "github.com/onflow/cadence" + "github.com/onflow/cadence/common" + "github.com/onflow/cadence/encoding/ccf" jsoncdc "github.com/onflow/cadence/encoding/json" + cadenceErrors "github.com/onflow/cadence/errors" + "github.com/onflow/cadence/interpreter" "github.com/onflow/cadence/runtime" - "github.com/onflow/cadence/runtime/common" - "github.com/stretchr/testify/require" - - "github.com/onflow/flow-go/crypto" + "github.com/onflow/cadence/sema" + cadenceStdlib "github.com/onflow/cadence/stdlib" + "github.com/onflow/cadence/test_utils/runtime_utils" + "github.com/onflow/crypto" + "github.com/onflow/flow-core-contracts/lib/go/contracts" + bridge "github.com/onflow/flow-evm-bridge" + flowsdk "github.com/onflow/flow-go-sdk" + "github.com/onflow/flow-go-sdk/test" "github.com/onflow/flow-go/engine/execution/testutil" exeUtils "github.com/onflow/flow-go/engine/execution/utils" "github.com/onflow/flow-go/fvm" + "github.com/onflow/flow-go/fvm/blueprints" fvmCrypto "github.com/onflow/flow-go/fvm/crypto" "github.com/onflow/flow-go/fvm/environment" - errors "github.com/onflow/flow-go/fvm/errors" + envMock "github.com/onflow/flow-go/fvm/environment/mock" + "github.com/onflow/flow-go/fvm/errors" + "github.com/onflow/flow-go/fvm/evm/events" + "github.com/onflow/flow-go/fvm/evm/handler" + "github.com/onflow/flow-go/fvm/evm/stdlib" + "github.com/onflow/flow-go/fvm/evm/types" "github.com/onflow/flow-go/fvm/meter" reusableRuntime "github.com/onflow/flow-go/fvm/runtime" "github.com/onflow/flow-go/fvm/storage/snapshot" + "github.com/onflow/flow-go/fvm/storage/snapshot/mock" + "github.com/onflow/flow-go/fvm/storage/state" + "github.com/onflow/flow-go/fvm/storage/testutils" + "github.com/onflow/flow-go/fvm/systemcontracts" + "github.com/onflow/flow-go/fvm/tracing" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/utils/unittest" ) -// from 18.8.2022 -var mainnetExecutionEffortWeights = meter.ExecutionEffortWeights{ - common.ComputationKindStatement: 1569, - common.ComputationKindLoop: 1569, - common.ComputationKindFunctionInvocation: 1569, - environment.ComputationKindGetValue: 808, - environment.ComputationKindCreateAccount: 2837670, - environment.ComputationKindSetValue: 765, -} - type vmTest struct { bootstrapOptions []fvm.BootstrapProcedureOption contextOptions []fvm.Option @@ -69,6 +84,7 @@ func (vmt vmTest) run( baseOpts := []fvm.Option{ // default chain is Testnet fvm.WithChain(flow.Testnet.Chain()), + fvm.WithEntropyProvider(testutil.EntropyProviderFixture(nil)), } opts := append(baseOpts, vmt.contextOptions...) @@ -173,7 +189,8 @@ func TestHashing(t *testing.T) { ` import Crypto - pub fun main(data: [UInt8]): [UInt8] { + access(all) + fun main(data: [UInt8]): [UInt8] { return Crypto.hash(data, algorithm: HashAlgorithm.%s) } `, hashName)) @@ -183,7 +200,7 @@ func TestHashing(t *testing.T) { ` import Crypto - pub fun main(data: [UInt8], tag: String): [UInt8] { + access(all) fun main(data: [UInt8], tag: String): [UInt8] { return Crypto.hashWithTag(data, tag: tag, algorithm: HashAlgorithm.%s) } `, hashName)) @@ -347,7 +364,7 @@ func TestHashing(t *testing.T) { if err == nil && output.Err == nil { cadenceArray := output.Value.(cadence.Array) for _, value := range cadenceArray.Values { - byteResult = append(byteResult, value.(cadence.UInt8).ToGoValue().(uint8)) + byteResult = append(byteResult, uint8(value.(cadence.UInt8))) } } @@ -379,7 +396,7 @@ func TestHashing(t *testing.T) { result1 := make([]byte, 0) cadenceArray := output.Value.(cadence.Array) for _, value := range cadenceArray.Values { - result1 = append(result1, value.(cadence.UInt8).ToGoValue().(uint8)) + result1 = append(result1, uint8(value.(cadence.UInt8))) } code = hashScript(algo.Name()) @@ -394,7 +411,7 @@ func TestHashing(t *testing.T) { result2 := make([]byte, 0) cadenceArray = output.Value.(cadence.Array) for _, value := range cadenceArray.Values { - result2 = append(result2, value.(cadence.UInt8).ToGoValue().(uint8)) + result2 = append(result2, uint8(value.(cadence.UInt8))) } result3, err := fvmCrypto.HashWithTag(fvmCrypto.RuntimeToCryptoHashingAlgorithm(algo), "", data) @@ -420,9 +437,12 @@ func TestWithServiceAccount(t *testing.T) { snapshotTree := snapshot.NewSnapshotTree(nil) - txBody := flow.NewTransactionBody(). - SetScript([]byte(`transaction { prepare(signer: AuthAccount) { AuthAccount(payer: signer) } }`)). - AddAuthorizer(chain.ServiceAddress()) + txBody, err := flow.NewTransactionBodyBuilder(). + SetScript([]byte(`transaction { prepare(signer: auth(BorrowValue) &Account) { Account(payer: signer) } }`)). + SetPayer(chain.ServiceAddress()). + AddAuthorizer(chain.ServiceAddress()). + Build() + require.NoError(t, err) t.Run("With service account enabled", func(t *testing.T) { executionSnapshot, output, err := vm.Run( @@ -481,7 +501,7 @@ func TestEventLimits(t *testing.T) { deployingContractScriptTemplate := ` transaction { - prepare(signer: AuthAccount) { + prepare(signer: auth(AddContract) &Account) { let code = "%s".decodeHex() signer.contracts.add( name: "TestContract", @@ -495,10 +515,12 @@ func TestEventLimits(t *testing.T) { ctx, fvm.WithEventCollectionSizeLimit(2)) - txBody := flow.NewTransactionBody(). + txBody, err := flow.NewTransactionBodyBuilder(). SetScript([]byte(fmt.Sprintf(deployingContractScriptTemplate, hex.EncodeToString([]byte(testContract))))). SetPayer(chain.ServiceAddress()). - AddAuthorizer(chain.ServiceAddress()) + AddAuthorizer(chain.ServiceAddress()). + Build() + require.NoError(t, err) executionSnapshot, output, err := vm.Run( ctx, @@ -509,16 +531,19 @@ func TestEventLimits(t *testing.T) { snapshotTree = snapshotTree.Append(executionSnapshot) - txBody = flow.NewTransactionBody(). + txBody, err = flow.NewTransactionBodyBuilder(). SetScript([]byte(fmt.Sprintf(` import TestContract from 0x%s transaction { - prepare(acct: AuthAccount) {} + prepare(acct: &Account) {} execute { TestContract.EmitEvent() } }`, chain.ServiceAddress()))). - AddAuthorizer(chain.ServiceAddress()) + SetPayer(chain.ServiceAddress()). + AddAuthorizer(chain.ServiceAddress()). + Build() + require.NoError(t, err) t.Run("With limits", func(t *testing.T) { txBody.Payer = unittest.RandomAddressFixture() @@ -570,19 +595,20 @@ func TestHappyPathTransactionSigning(t *testing.T) { chain) require.NoError(t, err) - txBody := flow.NewTransactionBody(). - SetScript([]byte(`transaction(){}`)) - - txBody.SetProposalKey(accounts[0], 0, 0) - txBody.SetPayer(accounts[0]) + txBodyBuilder := flow.NewTransactionBodyBuilder(). + SetScript([]byte(`transaction(){}`)). + SetProposalKey(accounts[0], 0, 0). + SetPayer(accounts[0]) hasher, err := exeUtils.NewHasher(privateKey.HashAlgo) require.NoError(t, err) - sig, err := txBody.Sign(txBody.EnvelopeMessage(), privateKey.PrivateKey, hasher) + sig, err := txBodyBuilder.Sign(txBodyBuilder.EnvelopeMessage(), privateKey.PrivateKey, hasher) require.NoError(t, err) - txBody.AddEnvelopeSignature(accounts[0], 0, sig) + txBodyBuilder.AddEnvelopeSignature(accounts[0], 0, sig) + txBody, err := txBodyBuilder.Build() + require.NoError(t, err) _, output, err := vm.Run( ctx, fvm.Transaction(txBody, 0), @@ -595,20 +621,23 @@ func TestHappyPathTransactionSigning(t *testing.T) { func TestTransactionFeeDeduction(t *testing.T) { getBalance := func(vm fvm.VM, chain flow.Chain, ctx fvm.Context, snapshotTree snapshot.SnapshotTree, address flow.Address) uint64 { - - code := []byte(fmt.Sprintf(` + sc := systemcontracts.SystemContractsForChain(chain.ChainID()) + code := []byte(fmt.Sprintf( + ` import FungibleToken from 0x%s import FlowToken from 0x%s - pub fun main(account: Address): UFix64 { + access(all) fun main(account: Address): UFix64 { let acct = getAccount(account) - let vaultRef = acct.getCapability(/public/flowTokenBalance) - .borrow<&FlowToken.Vault{FungibleToken.Balance}>() + let vaultRef = acct.capabilities.borrow<&FlowToken.Vault>(/public/flowTokenBalance) ?? panic("Could not borrow Balance reference to the Vault") return vaultRef.balance } - `, fvm.FungibleTokenAddress(chain), fvm.FlowTokenAddress(chain))) + `, + sc.FungibleToken.Address.Hex(), + sc.FlowToken.Address.Hex(), + )) script := fvm.Script(code).WithArguments( jsoncdc.MustEncode(cadence.NewAddress(address)), ) @@ -616,7 +645,7 @@ func TestTransactionFeeDeduction(t *testing.T) { _, output, err := vm.Run(ctx, script, snapshotTree) require.NoError(t, err) require.NoError(t, output.Err) - return output.Value.ToGoValue().(uint64) + return uint64(output.Value.(cadence.UFix64)) } type testCase struct { @@ -630,7 +659,13 @@ func TestTransactionFeeDeduction(t *testing.T) { txFees := uint64(1_000) // 0.00001 fundingAmount := uint64(100_000_000) // 1.0 transferAmount := uint64(123_456) - minimumStorageReservation := fvm.DefaultMinimumStorageReservation.ToGoValue().(uint64) + minimumStorageReservation := uint64(fvm.DefaultMinimumStorageReservation) + + chain := flow.Testnet.Chain() + sc := systemcontracts.SystemContractsForChain(chain.ChainID()) + depositedEvent := fmt.Sprintf("A.%s.FlowToken.TokensDeposited", sc.FlowToken.Address) + withdrawnEvent := fmt.Sprintf("A.%s.FlowToken.TokensWithdrawn", sc.FlowToken.Address) + feesDeductedEvent := fmt.Sprintf("A.%s.FlowFees.FeesDeducted", sc.FlowFees.Address) testCases := []testCase{ { @@ -654,10 +689,10 @@ func TestTransactionFeeDeduction(t *testing.T) { chain := flow.Testnet.Chain() for _, e := range output.Events { - if string(e.Type) == fmt.Sprintf("A.%s.FlowToken.TokensDeposited", fvm.FlowTokenAddress(chain)) { + if string(e.Type) == depositedEvent { deposits = append(deposits, e) } - if string(e.Type) == fmt.Sprintf("A.%s.FlowToken.TokensWithdrawn", fvm.FlowTokenAddress(chain)) { + if string(e.Type) == withdrawnEvent { withdraws = append(withdraws, e) } } @@ -686,7 +721,7 @@ func TestTransactionFeeDeduction(t *testing.T) { var feeDeduction flow.Event // fee deduction event for _, e := range output.Events { - if string(e.Type) == fmt.Sprintf("A.%s.FlowFees.FeesDeducted", environment.FlowFeesAddress(chain)) { + if string(e.Type) == feesDeductedEvent { feeDeduction = e break } @@ -694,16 +729,28 @@ func TestTransactionFeeDeduction(t *testing.T) { unittest.EnsureEventsIndexSeq(t, output.Events, chain.ChainID()) require.NotEmpty(t, feeDeduction.Payload) - payload, err := jsoncdc.Decode(nil, feeDeduction.Payload) + payload, err := ccf.Decode(nil, feeDeduction.Payload) require.NoError(t, err) event := payload.(cadence.Event) - require.Equal(t, txFees, event.Fields[0].ToGoValue()) + fields := cadence.FieldsMappedByName(event) + + actualTXFees := fields["amount"] + actualExecutionEffort := fields["executionEffort"] + actualInclusionEffort := fields["inclusionEffort"] + + require.Equal(t, + txFees, + uint64(actualTXFees.(cadence.UFix64)), + ) // Inclusion effort should be equivalent to 1.0 UFix64 - require.Equal(t, uint64(100_000_000), event.Fields[1].ToGoValue()) + require.Equal(t, + uint64(100_000_000), + uint64(actualInclusionEffort.(cadence.UFix64)), + ) // Execution effort should be non-0 - require.Greater(t, event.Fields[2].ToGoValue(), uint64(0)) + require.Greater(t, actualExecutionEffort, uint64(0)) }, }, @@ -749,10 +796,10 @@ func TestTransactionFeeDeduction(t *testing.T) { chain := flow.Testnet.Chain() for _, e := range output.Events { - if string(e.Type) == fmt.Sprintf("A.%s.FlowToken.TokensDeposited", fvm.FlowTokenAddress(chain)) { + if string(e.Type) == depositedEvent { deposits = append(deposits, e) } - if string(e.Type) == fmt.Sprintf("A.%s.FlowToken.TokensWithdrawn", fvm.FlowTokenAddress(chain)) { + if string(e.Type) == withdrawnEvent { withdraws = append(withdraws, e) } } @@ -776,10 +823,10 @@ func TestTransactionFeeDeduction(t *testing.T) { chain := flow.Testnet.Chain() for _, e := range output.Events { - if string(e.Type) == fmt.Sprintf("A.%s.FlowToken.TokensDeposited", fvm.FlowTokenAddress(chain)) { + if string(e.Type) == depositedEvent { deposits = append(deposits, e) } - if string(e.Type) == fmt.Sprintf("A.%s.FlowToken.TokensWithdrawn", fvm.FlowTokenAddress(chain)) { + if string(e.Type) == withdrawnEvent { withdraws = append(withdraws, e) } } @@ -814,10 +861,10 @@ func TestTransactionFeeDeduction(t *testing.T) { chain := flow.Testnet.Chain() for _, e := range output.Events { - if string(e.Type) == fmt.Sprintf("A.%s.FlowToken.TokensDeposited", fvm.FlowTokenAddress(chain)) { + if string(e.Type) == depositedEvent { deposits = append(deposits, e) } - if string(e.Type) == fmt.Sprintf("A.%s.FlowToken.TokensWithdrawn", fvm.FlowTokenAddress(chain)) { + if string(e.Type) == withdrawnEvent { withdraws = append(withdraws, e) } } @@ -867,10 +914,10 @@ func TestTransactionFeeDeduction(t *testing.T) { chain := flow.Testnet.Chain() for _, e := range output.Events { - if string(e.Type) == fmt.Sprintf("A.%s.FlowToken.TokensDeposited", fvm.FlowTokenAddress(chain)) { + if string(e.Type) == depositedEvent { deposits = append(deposits, e) } - if string(e.Type) == fmt.Sprintf("A.%s.FlowToken.TokensWithdrawn", fvm.FlowTokenAddress(chain)) { + if string(e.Type) == withdrawnEvent { withdraws = append(withdraws, e) } } @@ -894,10 +941,10 @@ func TestTransactionFeeDeduction(t *testing.T) { chain := flow.Testnet.Chain() for _, e := range output.Events { - if string(e.Type) == fmt.Sprintf("A.%s.FlowToken.TokensDeposited", fvm.FlowTokenAddress(chain)) { + if string(e.Type) == depositedEvent { deposits = append(deposits, e) } - if string(e.Type) == fmt.Sprintf("A.%s.FlowToken.TokensWithdrawn", fvm.FlowTokenAddress(chain)) { + if string(e.Type) == withdrawnEvent { withdraws = append(withdraws, e) } } @@ -912,11 +959,13 @@ func TestTransactionFeeDeduction(t *testing.T) { runTx := func(tc testCase) func(t *testing.T, vm fvm.VM, chain flow.Chain, ctx fvm.Context, snapshotTree snapshot.SnapshotTree) { return func(t *testing.T, vm fvm.VM, chain flow.Chain, ctx fvm.Context, snapshotTree snapshot.SnapshotTree) { // ==== Create an account ==== - privateKey, txBody := testutil.CreateAccountCreationTransaction(t, chain) + privateKey, txBodyBuilder := testutil.CreateAccountCreationTransaction(t, chain) - err := testutil.SignTransactionAsServiceAccount(txBody, 0, chain) + err := testutil.SignTransactionAsServiceAccount(txBodyBuilder, 0, chain) require.NoError(t, err) + txBody, err := txBodyBuilder.Build() + require.NoError(t, err) executionSnapshot, output, err := vm.Run( ctx, fvm.Transaction(txBody, 0), @@ -926,7 +975,7 @@ func TestTransactionFeeDeduction(t *testing.T) { snapshotTree = snapshotTree.Append(executionSnapshot) - require.Len(t, output.Events, 10) + require.Len(t, output.Events, 20) unittest.EnsureEventsIndexSeq(t, output.Events, chain.ChainID()) accountCreatedEvents := filterAccountCreatedEvents(output.Events) @@ -934,27 +983,33 @@ func TestTransactionFeeDeduction(t *testing.T) { require.Len(t, accountCreatedEvents, 1) // read the address of the account created (e.g. "0x01" and convert it to flow.address) - data, err := jsoncdc.Decode(nil, accountCreatedEvents[0].Payload) + data, err := ccf.Decode(nil, accountCreatedEvents[0].Payload) require.NoError(t, err) + address := flow.ConvertAddress( - data.(cadence.Event).Fields[0].(cadence.Address)) + cadence.SearchFieldByName( + data.(cadence.Event), + cadenceStdlib.AccountEventAddressParameter.Identifier, + ).(cadence.Address), + ) // ==== Transfer tokens to new account ==== - txBody = transferTokensTx(chain). + txBodyBuilder = transferTokensTx(chain). AddAuthorizer(chain.ServiceAddress()). AddArgument(jsoncdc.MustEncode(cadence.UFix64(tc.fundWith))). - AddArgument(jsoncdc.MustEncode(cadence.NewAddress(address))) - - txBody.SetProposalKey(chain.ServiceAddress(), 0, 1) - txBody.SetPayer(chain.ServiceAddress()) + AddArgument(jsoncdc.MustEncode(cadence.NewAddress(address))). + SetProposalKey(chain.ServiceAddress(), 0, 1). + SetPayer(chain.ServiceAddress()) err = testutil.SignEnvelope( - txBody, + txBodyBuilder, chain.ServiceAddress(), unittest.ServiceAccountPrivateKey, ) require.NoError(t, err) + txBody, err = txBodyBuilder.Build() + require.NoError(t, err) executionSnapshot, output, err = vm.Run( ctx, fvm.Transaction(txBody, 0), @@ -968,27 +1023,28 @@ func TestTransactionFeeDeduction(t *testing.T) { // ==== Transfer tokens from new account ==== - txBody = transferTokensTx(chain). + txBodyBuilder = transferTokensTx(chain). AddAuthorizer(address). AddArgument(jsoncdc.MustEncode(cadence.UFix64(tc.tryToTransfer))). - AddArgument(jsoncdc.MustEncode(cadence.NewAddress(chain.ServiceAddress()))) - - txBody.SetProposalKey(address, 0, 0) - txBody.SetPayer(address) + AddArgument(jsoncdc.MustEncode(cadence.NewAddress(chain.ServiceAddress()))). + SetProposalKey(address, 0, 0). + SetPayer(address) if tc.gasLimit == 0 { - txBody.SetGasLimit(fvm.DefaultComputationLimit) + txBodyBuilder.SetComputeLimit(fvm.DefaultComputationLimit) } else { - txBody.SetGasLimit(tc.gasLimit) + txBodyBuilder.SetComputeLimit(tc.gasLimit) } err = testutil.SignEnvelope( - txBody, + txBodyBuilder, address, privateKey, ) require.NoError(t, err) + txBody, err = txBodyBuilder.Build() + require.NoError(t, err) executionSnapshot, output, err = vm.Run( ctx, fvm.Transaction(txBody, 0), @@ -1012,10 +1068,11 @@ func TestTransactionFeeDeduction(t *testing.T) { t.Run(fmt.Sprintf("Transaction Fees %d: %s", i, tc.name), newVMTest().withBootstrapProcedureOptions( fvm.WithTransactionFee(fvm.DefaultTransactionFees), fvm.WithExecutionMemoryLimit(math.MaxUint64), - fvm.WithExecutionEffortWeights(mainnetExecutionEffortWeights), + fvm.WithExecutionEffortWeights(environment.MainnetExecutionEffortWeights), fvm.WithExecutionMemoryWeights(meter.DefaultMemoryWeights), ).withContextOptions( fvm.WithTransactionFeesEnabled(true), + fvm.WithChain(chain), ).run( runTx(tc)), ) @@ -1028,11 +1085,12 @@ func TestTransactionFeeDeduction(t *testing.T) { fvm.WithMinimumStorageReservation(fvm.DefaultMinimumStorageReservation), fvm.WithAccountCreationFee(fvm.DefaultAccountCreationFee), fvm.WithExecutionMemoryLimit(math.MaxUint64), - fvm.WithExecutionEffortWeights(mainnetExecutionEffortWeights), + fvm.WithExecutionEffortWeights(environment.MainnetExecutionEffortWeights), fvm.WithExecutionMemoryWeights(meter.DefaultMemoryWeights), ).withContextOptions( fvm.WithTransactionFeesEnabled(true), fvm.WithAccountStorageLimit(true), + fvm.WithChain(chain), ).run( runTx(tc)), ) @@ -1041,7 +1099,11 @@ func TestTransactionFeeDeduction(t *testing.T) { func TestSettingExecutionWeights(t *testing.T) { + // change the chain so that the metering settings are read from the service account + chain := flow.Emulator.Chain() + t.Run("transaction should fail with high weights", newVMTest().withBootstrapProcedureOptions( + fvm.WithMinimumStorageReservation(fvm.DefaultMinimumStorageReservation), fvm.WithAccountCreationFee(fvm.DefaultAccountCreationFee), fvm.WithStorageMBPerFLOW(fvm.DefaultStorageMBPerFLOW), @@ -1050,13 +1112,15 @@ func TestSettingExecutionWeights(t *testing.T) { common.ComputationKindLoop: 100_000 << meter.MeterExecutionInternalPrecisionBytes, }, ), + ).withContextOptions( + fvm.WithChain(chain), ).run( func(t *testing.T, vm fvm.VM, chain flow.Chain, ctx fvm.Context, snapshotTree snapshot.SnapshotTree) { - txBody := flow.NewTransactionBody(). + txBodyBuilder := flow.NewTransactionBodyBuilder(). SetScript([]byte(` transaction { - prepare(signer: AuthAccount) { + prepare(signer: &Account) { var a = 0 while a < 100 { a = a + 1 @@ -1068,7 +1132,10 @@ func TestSettingExecutionWeights(t *testing.T) { AddAuthorizer(chain.ServiceAddress()). SetPayer(chain.ServiceAddress()) - err := testutil.SignTransactionAsServiceAccount(txBody, 0, chain) + err := testutil.SignTransactionAsServiceAccount(txBodyBuilder, 0, chain) + require.NoError(t, err) + + txBody, err := txBodyBuilder.Build() require.NoError(t, err) _, output, err := vm.Run( @@ -1098,6 +1165,7 @@ func TestSettingExecutionWeights(t *testing.T) { ), ).withContextOptions( fvm.WithMemoryLimit(10_000_000_000), + fvm.WithChain(chain), ).run( func(t *testing.T, vm fvm.VM, chain flow.Chain, ctx fvm.Context, snapshotTree snapshot.SnapshotTree) { // Create an account private key. @@ -1113,10 +1181,10 @@ func TestSettingExecutionWeights(t *testing.T) { chain) require.NoError(t, err) - txBody := flow.NewTransactionBody(). + txBodyBuilder := flow.NewTransactionBodyBuilder(). SetScript([]byte(` transaction { - prepare(signer: AuthAccount) { + prepare(signer: &Account) { var a = 1 } } @@ -1125,7 +1193,10 @@ func TestSettingExecutionWeights(t *testing.T) { AddAuthorizer(accounts[0]). SetPayer(accounts[0]) - err = testutil.SignTransaction(txBody, accounts[0], privateKeys[0], 0) + err = testutil.SignTransaction(txBodyBuilder, accounts[0], privateKeys[0], 0) + require.NoError(t, err) + + txBody, err := txBodyBuilder.Build() require.NoError(t, err) _, output, err := vm.Run( @@ -1148,13 +1219,14 @@ func TestSettingExecutionWeights(t *testing.T) { ), ).withContextOptions( fvm.WithMemoryLimit(10_000_000_000), + fvm.WithChain(chain), ).run( func(t *testing.T, vm fvm.VM, chain flow.Chain, ctx fvm.Context, snapshotTree snapshot.SnapshotTree) { - txBody := flow.NewTransactionBody(). + txBodyBuilder := flow.NewTransactionBodyBuilder(). SetScript([]byte(` transaction { - prepare(signer: AuthAccount) { + prepare(signer: &Account) { var a = 1 } } @@ -1163,7 +1235,10 @@ func TestSettingExecutionWeights(t *testing.T) { AddAuthorizer(chain.ServiceAddress()). SetPayer(chain.ServiceAddress()) - err := testutil.SignTransactionAsServiceAccount(txBody, 0, chain) + err := testutil.SignTransactionAsServiceAccount(txBodyBuilder, 0, chain) + require.NoError(t, err) + + txBody, err := txBodyBuilder.Build() require.NoError(t, err) _, output, err := vm.Run( @@ -1192,6 +1267,8 @@ func TestSettingExecutionWeights(t *testing.T) { fvm.WithExecutionMemoryWeights( memoryWeights, ), + ).withContextOptions( + fvm.WithChain(chain), ).run( func(t *testing.T, vm fvm.VM, chain flow.Chain, ctx fvm.Context, snapshotTree snapshot.SnapshotTree) { privateKeys, err := testutil.GenerateAccountPrivateKeys(1) @@ -1207,10 +1284,10 @@ func TestSettingExecutionWeights(t *testing.T) { // This transaction is specially designed to use a lot of breaks // as the weight for breaks is much higher than usual. // putting a `while true {break}` in a loop does not use the same amount of memory. - txBody := flow.NewTransactionBody(). + txBodyBuilder := flow.NewTransactionBodyBuilder(). SetScript([]byte(` transaction { - prepare(signer: AuthAccount) { + prepare(signer: &Account) { while true {break};while true {break};while true {break};while true {break};while true {break}; while true {break};while true {break};while true {break};while true {break};while true {break}; while true {break};while true {break};while true {break};while true {break};while true {break}; @@ -1235,7 +1312,10 @@ func TestSettingExecutionWeights(t *testing.T) { } `)) - err = testutil.SignTransaction(txBody, accounts[0], privateKeys[0], 0) + err = testutil.SignTransaction(txBodyBuilder, accounts[0], privateKeys[0], 0) + require.NoError(t, err) + + txBody, err := txBodyBuilder.Build() require.NoError(t, err) _, output, err := vm.Run( @@ -1259,13 +1339,15 @@ func TestSettingExecutionWeights(t *testing.T) { environment.ComputationKindCreateAccount: (fvm.DefaultComputationLimit + 1) << meter.MeterExecutionInternalPrecisionBytes, }, ), + ).withContextOptions( + fvm.WithChain(chain), ).run( func(t *testing.T, vm fvm.VM, chain flow.Chain, ctx fvm.Context, snapshotTree snapshot.SnapshotTree) { - txBody := flow.NewTransactionBody(). + txBodyBuilder := flow.NewTransactionBodyBuilder(). SetScript([]byte(` transaction { - prepare(signer: AuthAccount) { - AuthAccount(payer: signer) + prepare(signer: auth(BorrowValue) &Account) { + Account(payer: signer) } } `)). @@ -1273,7 +1355,10 @@ func TestSettingExecutionWeights(t *testing.T) { AddAuthorizer(chain.ServiceAddress()). SetPayer(chain.ServiceAddress()) - err := testutil.SignTransactionAsServiceAccount(txBody, 0, chain) + err := testutil.SignTransactionAsServiceAccount(txBodyBuilder, 0, chain) + require.NoError(t, err) + + txBody, err := txBodyBuilder.Build() require.NoError(t, err) _, output, err := vm.Run( @@ -1295,14 +1380,16 @@ func TestSettingExecutionWeights(t *testing.T) { environment.ComputationKindCreateAccount: 100_000_000 << meter.MeterExecutionInternalPrecisionBytes, }, ), + ).withContextOptions( + fvm.WithChain(chain), ).run( func(t *testing.T, vm fvm.VM, chain flow.Chain, ctx fvm.Context, snapshotTree snapshot.SnapshotTree) { - txBody := flow.NewTransactionBody(). + txBodyBuilder := flow.NewTransactionBodyBuilder(). SetScript([]byte(` transaction { - prepare(signer: AuthAccount) { - AuthAccount(payer: signer) + prepare(signer: auth(BorrowValue) &Account) { + Account(payer: signer) } } `)). @@ -1310,7 +1397,10 @@ func TestSettingExecutionWeights(t *testing.T) { AddAuthorizer(chain.ServiceAddress()). SetPayer(chain.ServiceAddress()) - err := testutil.SignTransactionAsServiceAccount(txBody, 0, chain) + err := testutil.SignTransactionAsServiceAccount(txBodyBuilder, 0, chain) + require.NoError(t, err) + + txBody, err := txBodyBuilder.Build() require.NoError(t, err) _, output, err := vm.Run( @@ -1332,13 +1422,15 @@ func TestSettingExecutionWeights(t *testing.T) { environment.ComputationKindCreateAccount: 100_000_000 << meter.MeterExecutionInternalPrecisionBytes, }, ), + ).withContextOptions( + fvm.WithChain(chain), ).run( func(t *testing.T, vm fvm.VM, chain flow.Chain, ctx fvm.Context, snapshotTree snapshot.SnapshotTree) { - txBody := flow.NewTransactionBody(). + txBodyBuilder := flow.NewTransactionBodyBuilder(). SetScript([]byte(` transaction { - prepare(signer: AuthAccount) { - AuthAccount(payer: signer) + prepare(signer: auth(BorrowValue) &Account) { + Account(payer: signer) } } `)). @@ -1346,7 +1438,10 @@ func TestSettingExecutionWeights(t *testing.T) { AddAuthorizer(chain.ServiceAddress()). SetPayer(chain.ServiceAddress()) - err := testutil.SignTransactionAsServiceAccount(txBody, 0, chain) + err := testutil.SignTransactionAsServiceAccount(txBodyBuilder, 0, chain) + require.NoError(t, err) + + txBody, err := txBodyBuilder.Build() require.NoError(t, err) _, output, err := vm.Run( @@ -1375,21 +1470,26 @@ func TestSettingExecutionWeights(t *testing.T) { fvm.WithAccountStorageLimit(true), fvm.WithTransactionFeesEnabled(true), fvm.WithMemoryLimit(math.MaxUint64), + fvm.WithChain(chain), ).run( func(t *testing.T, vm fvm.VM, chain flow.Chain, ctx fvm.Context, snapshotTree snapshot.SnapshotTree) { // Use the maximum amount of computation so that the transaction still passes. - loops := uint64(997) + loops := uint64(996) + executionEffortNeededToCheckStorage := uint64(1) maxExecutionEffort := uint64(997) - txBody := flow.NewTransactionBody(). + txBodyBuilder := flow.NewTransactionBodyBuilder(). SetScript([]byte(fmt.Sprintf(` - transaction() {prepare(signer: AuthAccount){var i=0; while i < %d {i = i +1 } } execute{}} + transaction() {prepare(signer: &Account){var i=0; while i < %d {i = i +1 } } execute{}} `, loops))). SetProposalKey(chain.ServiceAddress(), 0, 0). AddAuthorizer(chain.ServiceAddress()). SetPayer(chain.ServiceAddress()). - SetGasLimit(maxExecutionEffort) + SetComputeLimit(maxExecutionEffort) - err := testutil.SignTransactionAsServiceAccount(txBody, 0, chain) + err := testutil.SignTransactionAsServiceAccount(txBodyBuilder, 0, chain) + require.NoError(t, err) + + txBody, err := txBodyBuilder.Build() require.NoError(t, err) executionSnapshot, output, err := vm.Run( @@ -1401,21 +1501,24 @@ func TestSettingExecutionWeights(t *testing.T) { snapshotTree = snapshotTree.Append(executionSnapshot) - // expected used is number of loops. - require.Equal(t, loops, output.ComputationUsed) + // expected computation used is number of loops + 1 (from the storage limit check). + require.Equal(t, loops+executionEffortNeededToCheckStorage, output.ComputationUsed) // increasing the number of loops should fail the transaction. loops = loops + 1 - txBody = flow.NewTransactionBody(). + txBodyBuilder = flow.NewTransactionBodyBuilder(). SetScript([]byte(fmt.Sprintf(` - transaction() {prepare(signer: AuthAccount){var i=0; while i < %d {i = i +1 } } execute{}} + transaction() {prepare(signer: &Account){var i=0; while i < %d {i = i +1 } } execute{}} `, loops))). SetProposalKey(chain.ServiceAddress(), 0, 1). AddAuthorizer(chain.ServiceAddress()). SetPayer(chain.ServiceAddress()). - SetGasLimit(maxExecutionEffort) + SetComputeLimit(maxExecutionEffort) + + err = testutil.SignTransactionAsServiceAccount(txBodyBuilder, 1, chain) + require.NoError(t, err) - err = testutil.SignTransactionAsServiceAccount(txBody, 1, chain) + txBody, err = txBodyBuilder.Build() require.NoError(t, err) _, output, err = vm.Run( @@ -1425,23 +1528,139 @@ func TestSettingExecutionWeights(t *testing.T) { require.NoError(t, err) require.ErrorContains(t, output.Err, "computation exceeds limit (997)") - // computation used should the actual computation used. - require.Equal(t, loops, output.ComputationUsed) + // expected computation used is still number of loops + 1 (from the storage limit check). + require.Equal(t, loops+executionEffortNeededToCheckStorage, output.ComputationUsed) for _, event := range output.Events { // the fee deduction event should only contain the max gas worth of execution effort. if strings.Contains(string(event.Type), "FlowFees.FeesDeducted") { - ev, err := jsoncdc.Decode(nil, event.Payload) + v, err := ccf.Decode(nil, event.Payload) require.NoError(t, err) + + ev := v.(cadence.Event) + + actualExecutionEffort := cadence.SearchFieldByName(ev, "executionEffort") + require.Equal( t, maxExecutionEffort, - ev.(cadence.Event).Fields[2].ToGoValue().(uint64)) + uint64(actualExecutionEffort.(cadence.UFix64)), + ) } } unittest.EnsureEventsIndexSeq(t, output.Events, chain.ChainID()) }, )) + + t.Run("transaction with more accounts touched uses more computation", newVMTest().withBootstrapProcedureOptions( + fvm.WithMinimumStorageReservation(fvm.DefaultMinimumStorageReservation), + fvm.WithAccountCreationFee(fvm.DefaultAccountCreationFee), + fvm.WithStorageMBPerFLOW(fvm.DefaultStorageMBPerFLOW), + fvm.WithTransactionFee(fvm.DefaultTransactionFees), + fvm.WithExecutionEffortWeights( + meter.ExecutionEffortWeights{ + common.ComputationKindStatement: 0, + // only count loops + // the storage check has a loop + common.ComputationKindLoop: 1 << meter.MeterExecutionInternalPrecisionBytes, + common.ComputationKindFunctionInvocation: 0, + }, + ), + ).withContextOptions( + fvm.WithAccountStorageLimit(true), + fvm.WithTransactionFeesEnabled(true), + fvm.WithMemoryLimit(math.MaxUint64), + fvm.WithChain(chain), + ).run( + func(t *testing.T, vm fvm.VM, chain flow.Chain, ctx fvm.Context, snapshotTree snapshot.SnapshotTree) { + // Create an account private key. + privateKeys, err := testutil.GenerateAccountPrivateKeys(5) + require.NoError(t, err) + + // Bootstrap a ledger, creating accounts with the provided + // private keys and the root account. + snapshotTree, accounts, err := testutil.CreateAccounts( + vm, + snapshotTree, + privateKeys, + chain) + require.NoError(t, err) + + sc := systemcontracts.SystemContractsForChain(chain.ChainID()) + + // create a transaction without loops so only the looping in the storage check is counted. + txBodyBuilder := flow.NewTransactionBodyBuilder(). + SetScript([]byte(fmt.Sprintf(` + import FungibleToken from 0x%s + import FlowToken from 0x%s + + transaction() { + let sentVault: @{FungibleToken.Vault} + + prepare(signer: auth(BorrowValue) &Account) { + let vaultRef = signer.storage.borrow<auth(FungibleToken.Withdraw) &FlowToken.Vault>(from: /storage/flowTokenVault) + ?? panic("Could not borrow reference to the owner's Vault!") + + self.sentVault <- vaultRef.withdraw(amount: 5.0) + } + + execute { + let recipient1 = getAccount(%s) + let recipient2 = getAccount(%s) + let recipient3 = getAccount(%s) + let recipient4 = getAccount(%s) + let recipient5 = getAccount(%s) + + let receiverRef1 = recipient1.capabilities.borrow<&{FungibleToken.Receiver}>(/public/flowTokenReceiver) + ?? panic("Could not borrow receiver reference to the recipient's Vault") + let receiverRef2 = recipient2.capabilities.borrow<&{FungibleToken.Receiver}>(/public/flowTokenReceiver) + ?? panic("Could not borrow receiver reference to the recipient's Vault") + let receiverRef3 = recipient3.capabilities.borrow<&{FungibleToken.Receiver}>(/public/flowTokenReceiver) + ?? panic("Could not borrow receiver reference to the recipient's Vault") + let receiverRef4 = recipient4.capabilities.borrow<&{FungibleToken.Receiver}>(/public/flowTokenReceiver) + ?? panic("Could not borrow receiver reference to the recipient's Vault") + let receiverRef5 = recipient5.capabilities.borrow<&{FungibleToken.Receiver}>(/public/flowTokenReceiver) + ?? panic("Could not borrow receiver reference to the recipient's Vault") + + receiverRef1.deposit(from: <-self.sentVault.withdraw(amount: 1.0)) + receiverRef2.deposit(from: <-self.sentVault.withdraw(amount: 1.0)) + receiverRef3.deposit(from: <-self.sentVault.withdraw(amount: 1.0)) + receiverRef4.deposit(from: <-self.sentVault.withdraw(amount: 1.0)) + receiverRef5.deposit(from: <-self.sentVault.withdraw(amount: 1.0)) + + destroy self.sentVault + } + }`, + sc.FungibleToken.Address, + sc.FlowToken.Address, + accounts[0].HexWithPrefix(), + accounts[1].HexWithPrefix(), + accounts[2].HexWithPrefix(), + accounts[3].HexWithPrefix(), + accounts[4].HexWithPrefix(), + ))). + SetProposalKey(chain.ServiceAddress(), 0, 0). + AddAuthorizer(chain.ServiceAddress()). + SetPayer(chain.ServiceAddress()) + + err = testutil.SignTransactionAsServiceAccount(txBodyBuilder, 0, chain) + require.NoError(t, err) + + txBody, err := txBodyBuilder.Build() + require.NoError(t, err) + + _, output, err := vm.Run( + ctx, + fvm.Transaction(txBody, 0), + snapshotTree) + require.NoError(t, err) + require.NoError(t, output.Err) + + // The storage check should loop once for each of the five accounts created + + // once for the service account + require.Equal(t, uint64(5+1), output.ComputationUsed) + }, + )) } func TestStorageUsed(t *testing.T) { @@ -1455,7 +1674,7 @@ func TestStorageUsed(t *testing.T) { ) code := []byte(` - pub fun main(): UInt64 { + access(all) fun main(): UInt64 { var addresses: [Address]= [ 0x2a3c4c2581cef731, 0x2a3c4c2581cef731, 0x2a3c4c2581cef731, 0x2a3c4c2581cef731, 0x2a3c4c2581cef731, 0x2a3c4c2581cef731, 0x2a3c4c2581cef731, 0x2a3c4c2581cef731, 0x2a3c4c2581cef731, @@ -1474,7 +1693,7 @@ func TestStorageUsed(t *testing.T) { var storageUsed: UInt64 = 0 for address in addresses { let account = getAccount(address) - storageUsed = account.storageUsed + storageUsed = account.storage.used } return storageUsed @@ -1589,14 +1808,19 @@ func TestEnforcingComputationLimit(t *testing.T) { ), ) - txBody := flow.NewTransactionBody(). + txBodyBuilder := flow.NewTransactionBodyBuilder(). SetScript(script). - SetGasLimit(computationLimit) + SetComputeLimit(computationLimit) if test.payerIsServAcc { - txBody.SetPayer(chain.ServiceAddress()). - SetGasLimit(0) + txBodyBuilder.SetPayer(chain.ServiceAddress()). + SetComputeLimit(0) + } else { + txBodyBuilder.SetPayer(unittest.RandomAddressFixture()) } + txBody, err := txBodyBuilder.Build() + require.NoError(t, err) + tx := fvm.Transaction(txBody, 0) _, output, err := vm.Run(ctx, tx, nil) @@ -1645,13 +1869,14 @@ func TestStorageCapacity(t *testing.T) { snapshotTree) // Transfer FLOW from service account to test accounts - - transferTxBody := transferTokensTx(chain). + transferTxBody, err := transferTokensTx(chain). AddAuthorizer(service). AddArgument(jsoncdc.MustEncode(cadence.UFix64(1_000_000))). AddArgument(jsoncdc.MustEncode(cadence.NewAddress(signer))). SetProposalKey(service, 0, 0). - SetPayer(service) + SetPayer(service). + Build() + require.NoError(t, err) executionSnapshot, output, err := vm.Run( ctx, @@ -1662,12 +1887,14 @@ func TestStorageCapacity(t *testing.T) { snapshotTree = snapshotTree.Append(executionSnapshot) - transferTxBody = transferTokensTx(chain). + transferTxBody, err = transferTokensTx(chain). AddAuthorizer(service). AddArgument(jsoncdc.MustEncode(cadence.UFix64(1_000_000))). AddArgument(jsoncdc.MustEncode(cadence.NewAddress(target))). SetProposalKey(service, 0, 0). - SetPayer(service) + SetPayer(service). + Build() + require.NoError(t, err) executionSnapshot, output, err = vm.Run( ctx, @@ -1679,37 +1906,41 @@ func TestStorageCapacity(t *testing.T) { snapshotTree = snapshotTree.Append(executionSnapshot) // Perform test + sc := systemcontracts.SystemContractsForChain(chain.ChainID()) - txBody := flow.NewTransactionBody(). - SetScript([]byte(fmt.Sprintf(` + txBody, err := flow.NewTransactionBodyBuilder(). + SetScript([]byte(fmt.Sprintf( + ` import FungibleToken from 0x%s import FlowToken from 0x%s transaction(target: Address) { - prepare(signer: AuthAccount) { + prepare(signer: auth(BorrowValue) &Account) { let receiverRef = getAccount(target) - .getCapability(/public/flowTokenReceiver) - .borrow<&{FungibleToken.Receiver}>() + .capabilities.borrow<&{FungibleToken.Receiver}>(/public/flowTokenReceiver) ?? panic("Could not borrow receiver reference to the recipient''s Vault") - let vaultRef = signer - .borrow<&{FungibleToken.Provider}>(from: /storage/flowTokenVault) + let vaultRef = signer.storage + .borrow<auth(FungibleToken.Withdraw) &FlowToken.Vault>(from: /storage/flowTokenVault) ?? panic("Could not borrow reference to the owner''s Vault!") - var cap0: UInt64 = signer.storageCapacity + var cap0: UInt64 = signer.storage.capacity receiverRef.deposit(from: <- vaultRef.withdraw(amount: 0.0000001)) - var cap1: UInt64 = signer.storageCapacity + var cap1: UInt64 = signer.storage.capacity log(cap0 - cap1) } }`, - fvm.FungibleTokenAddress(chain), - fvm.FlowTokenAddress(chain), + sc.FungibleToken.Address.Hex(), + sc.FlowToken.Address.Hex(), ))). + SetPayer(signer). AddArgument(jsoncdc.MustEncode(cadence.NewAddress(target))). - AddAuthorizer(signer) + AddAuthorizer(signer). + Build() + require.NoError(t, err) _, output, err = vm.Run( ctx, @@ -1747,11 +1978,11 @@ func TestScriptContractMutationsFailure(t *testing.T) { scriptCtx := fvm.NewContextFromParent(ctx) - contract := "pub contract Foo {}" + contract := "access(all) contract Foo {}" script := fvm.Script([]byte(fmt.Sprintf(` - pub fun main(account: Address) { - let acc = getAuthAccount(account) + access(all) fun main(account: Address) { + let acc = getAuthAccount<auth(AddContract) &Account>(account) acc.contracts.add(name: "Foo", code: "%s".decodeHex()) }`, hex.EncodeToString([]byte(contract))), )).WithArguments( @@ -1789,11 +2020,11 @@ func TestScriptContractMutationsFailure(t *testing.T) { subCtx := fvm.NewContextFromParent(ctx) - contract := "pub contract Foo {}" + contract := "access(all) contract Foo {}" - txBody := flow.NewTransactionBody().SetScript([]byte(fmt.Sprintf(` + txBodyBuilder := flow.NewTransactionBodyBuilder().SetScript([]byte(fmt.Sprintf(` transaction { - prepare(signer: AuthAccount, service: AuthAccount) { + prepare(signer: auth(AddContract) &Account, service: &Account) { signer.contracts.add(name: "Foo", code: "%s".decodeHex()) } } @@ -1803,12 +2034,15 @@ func TestScriptContractMutationsFailure(t *testing.T) { SetPayer(chain.ServiceAddress()). SetProposalKey(chain.ServiceAddress(), 0, 0) - _ = testutil.SignPayload(txBody, account, privateKey) + _ = testutil.SignPayload(txBodyBuilder, account, privateKey) _ = testutil.SignEnvelope( - txBody, + txBodyBuilder, chain.ServiceAddress(), unittest.ServiceAccountPrivateKey) + txBody, err := txBodyBuilder.Build() + require.NoError(t, err) + executionSnapshot, output, err := vm.Run( subCtx, fvm.Transaction(txBody, 0), @@ -1819,8 +2053,8 @@ func TestScriptContractMutationsFailure(t *testing.T) { snapshotTree = snapshotTree.Append(executionSnapshot) script := fvm.Script([]byte(` - pub fun main(account: Address) { - let acc = getAuthAccount(account) + access(all) fun main(account: Address) { + let acc = getAuthAccount<auth(RemoveContract) &Account>(account) let n = acc.contracts.names[0] acc.contracts.remove(name: n) }`, @@ -1859,11 +2093,11 @@ func TestScriptContractMutationsFailure(t *testing.T) { subCtx := fvm.NewContextFromParent(ctx) - contract := "pub contract Foo {}" + contract := "access(all) contract Foo {}" - txBody := flow.NewTransactionBody().SetScript([]byte(fmt.Sprintf(` + txBodyBuilder := flow.NewTransactionBodyBuilder().SetScript([]byte(fmt.Sprintf(` transaction { - prepare(signer: AuthAccount, service: AuthAccount) { + prepare(signer: auth(AddContract) &Account, service: &Account) { signer.contracts.add(name: "Foo", code: "%s".decodeHex()) } } @@ -1873,12 +2107,15 @@ func TestScriptContractMutationsFailure(t *testing.T) { SetPayer(chain.ServiceAddress()). SetProposalKey(chain.ServiceAddress(), 0, 0) - _ = testutil.SignPayload(txBody, account, privateKey) + _ = testutil.SignPayload(txBodyBuilder, account, privateKey) _ = testutil.SignEnvelope( - txBody, + txBodyBuilder, chain.ServiceAddress(), unittest.ServiceAccountPrivateKey) + txBody, err := txBodyBuilder.Build() + require.NoError(t, err) + executionSnapshot, output, err := vm.Run( subCtx, fvm.Transaction(txBody, 0), @@ -1889,10 +2126,10 @@ func TestScriptContractMutationsFailure(t *testing.T) { snapshotTree = snapshotTree.Append(executionSnapshot) script := fvm.Script([]byte(fmt.Sprintf(` - pub fun main(account: Address) { - let acc = getAuthAccount(account) + access(all) fun main(account: Address) { + let acc = getAuthAccount<auth(UpdateContract) &Account>(account) let n = acc.contracts.names[0] - acc.contracts.update__experimental(name: n, code: "%s".decodeHex()) + acc.contracts.update(name: n, code: "%s".decodeHex()) }`, hex.EncodeToString([]byte(contract))))).WithArguments( jsoncdc.MustEncode(address), ) @@ -1937,9 +2174,16 @@ func TestScriptAccountKeyMutationsFailure(t *testing.T) { privateKey, _ := crypto.GeneratePrivateKey(crypto.ECDSAP256, seed) script := fvm.Script([]byte(` - pub fun main(account: Address, k: [UInt8]) { - let acc = getAuthAccount(account) - acc.addPublicKey(k) + access(all) fun main(account: Address, k: [UInt8]) { + let acc = getAuthAccount<auth(AddKey) &Account>(account) + acc.keys.add( + publicKey: PublicKey( + publicKey: k, + signatureAlgorithm: SignatureAlgorithm.ECDSA_P256 + ), + hashAlgorithm: HashAlgorithm.SHA3_256, + weight: 100.0 + ) }`, )).WithArguments( jsoncdc.MustEncode(address), @@ -1979,9 +2223,9 @@ func TestScriptAccountKeyMutationsFailure(t *testing.T) { scriptCtx := fvm.NewContextFromParent(ctx) script := fvm.Script([]byte(` - pub fun main(account: Address) { - let acc = getAuthAccount(account) - acc.removePublicKey(0) + access(all) fun main(account: Address) { + let acc = getAuthAccount<auth(RevokeKey) &Account>(account) + acc.keys.revoke(keyIndex: 0) }`, )).WithArguments( jsoncdc.MustEncode(address), @@ -1998,6 +2242,91 @@ func TestScriptAccountKeyMutationsFailure(t *testing.T) { ) } +func TestScriptExecutionLimit(t *testing.T) { + + t.Parallel() + + chain := flow.Emulator.Chain() + + script := fvm.Script([]byte(` + access(all) fun main() { + var s: Int256 = 1024102410241024 + var i: Int256 = 0 + var a: Int256 = 7 + var b: Int256 = 5 + var c: Int256 = 2 + + while i < 150000 { + s = s * a + s = s / b + s = s / c + i = i + 1 + } + } + `)) + + bootstrapProcedureOptions := []fvm.BootstrapProcedureOption{ + fvm.WithTransactionFee(fvm.DefaultTransactionFees), + fvm.WithExecutionMemoryLimit(math.MaxUint32), + fvm.WithExecutionEffortWeights(map[common.ComputationKind]uint64{ + common.ComputationKindStatement: 1569, + common.ComputationKindLoop: 1569, + common.ComputationKindFunctionInvocation: 1569, + environment.ComputationKindGetValue: 808, + environment.ComputationKindCreateAccount: 2837670, + environment.ComputationKindSetValue: 765, + }), + fvm.WithExecutionMemoryWeights(meter.DefaultMemoryWeights), + fvm.WithMinimumStorageReservation(fvm.DefaultMinimumStorageReservation), + fvm.WithAccountCreationFee(fvm.DefaultAccountCreationFee), + fvm.WithStorageMBPerFLOW(fvm.DefaultStorageMBPerFLOW), + } + + t.Run("Exceeding computation limit", + newVMTest().withBootstrapProcedureOptions( + bootstrapProcedureOptions..., + ).withContextOptions( + fvm.WithTransactionFeesEnabled(true), + fvm.WithAccountStorageLimit(true), + fvm.WithComputationLimit(10000), + fvm.WithChain(chain), + ).run( + func(t *testing.T, vm fvm.VM, chain flow.Chain, ctx fvm.Context, snapshotTree snapshot.SnapshotTree) { + scriptCtx := fvm.NewContextFromParent(ctx) + + _, output, err := vm.Run(scriptCtx, script, snapshotTree) + require.NoError(t, err) + require.Error(t, output.Err) + require.True(t, errors.IsComputationLimitExceededError(output.Err)) + require.ErrorContains(t, output.Err, "computation exceeds limit (10000)") + require.GreaterOrEqual(t, output.ComputationUsed, uint64(10000)) + require.GreaterOrEqual(t, output.MemoryEstimate, uint64(548020260)) + }, + ), + ) + + t.Run("Sufficient computation limit", + newVMTest().withBootstrapProcedureOptions( + bootstrapProcedureOptions..., + ).withContextOptions( + fvm.WithTransactionFeesEnabled(true), + fvm.WithAccountStorageLimit(true), + fvm.WithComputationLimit(20000), + fvm.WithChain(chain), + ).run( + func(t *testing.T, vm fvm.VM, chain flow.Chain, ctx fvm.Context, snapshotTree snapshot.SnapshotTree) { + scriptCtx := fvm.NewContextFromParent(ctx) + + _, output, err := vm.Run(scriptCtx, script, snapshotTree) + require.NoError(t, err) + require.NoError(t, output.Err) + require.GreaterOrEqual(t, output.ComputationUsed, uint64(17955)) + require.GreaterOrEqual(t, output.MemoryEstimate, uint64(984017413)) + }, + ), + ) +} + func TestInteractionLimit(t *testing.T) { type testCase struct { name string @@ -2011,7 +2340,7 @@ func TestInteractionLimit(t *testing.T) { interactionLimit: math.MaxUint64, require: func(t *testing.T, output fvm.ProcedureOutput) { require.NoError(t, output.Err) - require.Len(t, output.Events, 5) + require.Len(t, output.Events, 9) }, }, { @@ -2019,7 +2348,7 @@ func TestInteractionLimit(t *testing.T) { interactionLimit: fvm.DefaultMaxInteractionSize, require: func(t *testing.T, output fvm.ProcedureOutput) { require.NoError(t, output.Err) - require.Len(t, output.Events, 5) + require.Len(t, output.Events, 9) unittest.EnsureEventsIndexSeq(t, output.Events, flow.Testnet.Chain().ChainID()) }, }, @@ -2028,16 +2357,16 @@ func TestInteractionLimit(t *testing.T) { interactionLimit: 170000, require: func(t *testing.T, output fvm.ProcedureOutput) { require.NoError(t, output.Err) - require.Len(t, output.Events, 5) + require.Len(t, output.Events, 9) unittest.EnsureEventsIndexSeq(t, output.Events, flow.Testnet.Chain().ChainID()) }, }, { - name: "even lower low limit fails, and has only 3 events", + name: "even lower low limit fails, and has only 5 events", interactionLimit: 5000, require: func(t *testing.T, output fvm.ProcedureOutput) { require.Error(t, output.Err) - require.Len(t, output.Events, 3) + require.Len(t, output.Events, 5) unittest.EnsureEventsIndexSeq(t, output.Events, flow.Testnet.Chain().ChainID()) }, }, @@ -2059,14 +2388,17 @@ func TestInteractionLimit(t *testing.T) { ).bootstrapWith( func(vm fvm.VM, chain flow.Chain, ctx fvm.Context, snapshotTree snapshot.SnapshotTree) (snapshot.SnapshotTree, error) { // ==== Create an account ==== - var txBody *flow.TransactionBody - privateKey, txBody = testutil.CreateAccountCreationTransaction(t, chain) + var txBodyBuilder *flow.TransactionBodyBuilder + privateKey, txBodyBuilder = testutil.CreateAccountCreationTransaction(t, chain) - err := testutil.SignTransactionAsServiceAccount(txBody, 0, chain) + err := testutil.SignTransactionAsServiceAccount(txBodyBuilder, 0, chain) if err != nil { return snapshotTree, err } + txBody, err := txBodyBuilder.Build() + require.NoError(t, err) + executionSnapshot, output, err := vm.Run( ctx, fvm.Transaction(txBody, 0), @@ -2084,24 +2416,28 @@ func TestInteractionLimit(t *testing.T) { accountCreatedEvents := filterAccountCreatedEvents(output.Events) // read the address of the account created (e.g. "0x01" and convert it to flow.address) - data, err := jsoncdc.Decode(nil, accountCreatedEvents[0].Payload) + data, err := ccf.Decode(nil, accountCreatedEvents[0].Payload) if err != nil { return snapshotTree, err } + address = flow.ConvertAddress( - data.(cadence.Event).Fields[0].(cadence.Address)) + cadence.SearchFieldByName( + data.(cadence.Event), + cadenceStdlib.AccountEventAddressParameter.Identifier, + ).(cadence.Address), + ) // ==== Transfer tokens to new account ==== - txBody = transferTokensTx(chain). + txBodyBuilder = transferTokensTx(chain). AddAuthorizer(chain.ServiceAddress()). AddArgument(jsoncdc.MustEncode(cadence.UFix64(1_000_000))). - AddArgument(jsoncdc.MustEncode(cadence.NewAddress(address))) - - txBody.SetProposalKey(chain.ServiceAddress(), 0, 1) - txBody.SetPayer(chain.ServiceAddress()) + AddArgument(jsoncdc.MustEncode(cadence.NewAddress(address))). + SetProposalKey(chain.ServiceAddress(), 0, 1). + SetPayer(chain.ServiceAddress()) err = testutil.SignEnvelope( - txBody, + txBodyBuilder, chain.ServiceAddress(), unittest.ServiceAccountPrivateKey, ) @@ -2109,6 +2445,9 @@ func TestInteractionLimit(t *testing.T) { return snapshotTree, err } + txBody, err = txBodyBuilder.Build() + require.NoError(t, err) + executionSnapshot, output, err = vm.Run( ctx, fvm.Transaction(txBody, 0), @@ -2126,24 +2465,26 @@ func TestInteractionLimit(t *testing.T) { t.Run(tc.name, vmt.run( func(t *testing.T, vm fvm.VM, chain flow.Chain, ctx fvm.Context, snapshotTree snapshot.SnapshotTree) { // ==== Transfer funds with lowe interaction limit ==== - txBody := transferTokensTx(chain). + txBodyBuilder := transferTokensTx(chain). AddAuthorizer(address). AddArgument(jsoncdc.MustEncode(cadence.UFix64(1))). - AddArgument(jsoncdc.MustEncode(cadence.NewAddress(chain.ServiceAddress()))) - - txBody.SetProposalKey(address, 0, 0) - txBody.SetPayer(address) + AddArgument(jsoncdc.MustEncode(cadence.NewAddress(chain.ServiceAddress()))). + SetProposalKey(address, 0, 0). + SetPayer(address) hasher, err := exeUtils.NewHasher(privateKey.HashAlgo) require.NoError(t, err) - sig, err := txBody.Sign(txBody.EnvelopeMessage(), privateKey.PrivateKey, hasher) + sig, err := txBodyBuilder.Sign(txBodyBuilder.EnvelopeMessage(), privateKey.PrivateKey, hasher) require.NoError(t, err) - txBody.AddEnvelopeSignature(address, 0, sig) + txBodyBuilder.AddEnvelopeSignature(address, 0, sig) // ==== IMPORTANT LINE ==== ctx.MaxStateInteractionSize = tc.interactionLimit + txBody, err := txBodyBuilder.Build() + require.NoError(t, err) + _, output, err := vm.Run( ctx, fvm.Transaction(txBody, 0), @@ -2155,296 +2496,1739 @@ func TestInteractionLimit(t *testing.T) { } } -func TestAuthAccountCapabilities(t *testing.T) { - - t.Parallel() - - t.Run("transaction", func(t *testing.T) { - - t.Parallel() - - test := func(t *testing.T, allowAccountLinking bool) { - newVMTest(). - withBootstrapProcedureOptions(). - withContextOptions( - fvm.WithReusableCadenceRuntimePool( - reusableRuntime.NewReusableCadenceRuntimePool( - 1, - runtime.Config{ - AccountLinkingEnabled: true, - }, - ), - ), - ). - run( - func( - t *testing.T, - vm fvm.VM, - chain flow.Chain, - ctx fvm.Context, - snapshotTree snapshot.SnapshotTree, - ) { - // Create an account private key. - privateKeys, err := testutil.GenerateAccountPrivateKeys(1) - privateKey := privateKeys[0] - require.NoError(t, err) - - // Bootstrap a ledger, creating accounts with the - // provided private keys and the root account. - snapshotTree, accounts, err := testutil.CreateAccounts( - vm, - snapshotTree, - privateKeys, - chain) - require.NoError(t, err) - account := accounts[0] - - var pragma string - if allowAccountLinking { - pragma = "#allowAccountLinking" - } - - code := fmt.Sprintf( - ` - %s - transaction { - prepare(acct: AuthAccount) { - acct.linkAccount(/private/foo) - } - } - `, - pragma, - ) - - txBody := flow.NewTransactionBody(). - SetScript([]byte(code)). - AddAuthorizer(account). - SetPayer(chain.ServiceAddress()). - SetProposalKey(chain.ServiceAddress(), 0, 0) - - _ = testutil.SignPayload(txBody, account, privateKey) - _ = testutil.SignEnvelope( - txBody, - chain.ServiceAddress(), - unittest.ServiceAccountPrivateKey) - - _, output, err := vm.Run( - ctx, - fvm.Transaction(txBody, 0), - snapshotTree) - require.NoError(t, err) - if allowAccountLinking { - require.NoError(t, output.Err) - } else { - require.Error(t, output.Err) - } - }, - )(t) - } - - t.Run("account linking allowed", func(t *testing.T) { - test(t, true) - }) +func TestAttachments(t *testing.T) { - t.Run("account linking disallowed", func(t *testing.T) { - test(t, false) - }) - }) + newVMTest(). + withBootstrapProcedureOptions(). + run( + func( + t *testing.T, + vm fvm.VM, + chain flow.Chain, + ctx fvm.Context, + snapshotTree snapshot.SnapshotTree, + ) { + script := fvm.Script([]byte(` - t.Run("contract", func(t *testing.T) { + access(all) resource R {} - t.Parallel() + access(all) attachment A for R {} - test := func(t *testing.T, allowAccountLinking bool) { - newVMTest(). - withBootstrapProcedureOptions(). - withContextOptions( - fvm.WithReusableCadenceRuntimePool( - reusableRuntime.NewReusableCadenceRuntimePool( - 1, - runtime.Config{ - AccountLinkingEnabled: true, - }, - ), - ), - fvm.WithContractDeploymentRestricted(false), - ). - run( - func( - t *testing.T, - vm fvm.VM, - chain flow.Chain, - ctx fvm.Context, - snapshotTree snapshot.SnapshotTree, - ) { - // Create two private keys - privateKeys, err := testutil.GenerateAccountPrivateKeys(2) - require.NoError(t, err) - - // Bootstrap a ledger, creating accounts with the provided private keys and the root account. - snapshotTree, accounts, err := testutil.CreateAccounts( - vm, - snapshotTree, - privateKeys, - chain) - require.NoError(t, err) - - // Deploy contract - contractCode := ` - pub contract AccountLinker { - pub fun link(_ account: AuthAccount) { - account.linkAccount(/private/acct) - } - } - ` - - deployingContractScriptTemplate := ` - transaction { - prepare(signer: AuthAccount) { - signer.contracts.add( - name: "AccountLinker", - code: "%s".decodeHex() - ) - } - } - ` - - txBody := flow.NewTransactionBody(). - SetScript([]byte(fmt.Sprintf( - deployingContractScriptTemplate, - hex.EncodeToString([]byte(contractCode)), - ))). - SetPayer(chain.ServiceAddress()). - SetProposalKey(chain.ServiceAddress(), 0, 0). - AddAuthorizer(accounts[0]) - _ = testutil.SignPayload(txBody, accounts[0], privateKeys[0]) - _ = testutil.SignEnvelope(txBody, chain.ServiceAddress(), unittest.ServiceAccountPrivateKey) - - executionSnapshot, output, err := vm.Run( - ctx, - fvm.Transaction(txBody, 0), - snapshotTree) - require.NoError(t, err) - require.NoError(t, output.Err) - - snapshotTree = snapshotTree.Append(executionSnapshot) - - // Use contract - - var pragma string - if allowAccountLinking { - pragma = "#allowAccountLinking" + access(all) fun main() { + let r <- create R() + r[A] + destroy r } + `)) - code := fmt.Sprintf( - ` - %s - import AccountLinker from %s - transaction { - prepare(acct: AuthAccount) { - AccountLinker.link(acct) - } - } - `, - pragma, - accounts[0].HexWithPrefix(), - ) - - txBody = flow.NewTransactionBody(). - SetScript([]byte(code)). - AddAuthorizer(accounts[1]). - SetPayer(chain.ServiceAddress()). - SetProposalKey(chain.ServiceAddress(), 0, 1) - - _ = testutil.SignPayload(txBody, accounts[1], privateKeys[1]) - _ = testutil.SignEnvelope(txBody, chain.ServiceAddress(), unittest.ServiceAccountPrivateKey) - - _, output, err = vm.Run( - ctx, - fvm.Transaction(txBody, 1), - snapshotTree) - require.NoError(t, err) - if allowAccountLinking { - require.NoError(t, output.Err) - - require.Len(t, output.Events, 1) - require.Equal( - t, - flow.EventType("flow.AccountLinked"), - output.Events[0].Type) - } else { - require.Error(t, output.Err) - } - }, - )(t) - } + _, output, err := vm.Run(ctx, script, snapshotTree) + require.NoError(t, err) + require.NoError(t, output.Err) - t.Run("account linking allowed", func(t *testing.T) { - test(t, true) - }) + }, + )(t) - t.Run("account linking disallowed", func(t *testing.T) { - test(t, false) - }) - }) } -func TestAttachments(t *testing.T) { - test := func(t *testing.T, attachmentsEnabled bool) { +func TestCapabilityControllers(t *testing.T) { + test := func(t *testing.T) { newVMTest(). withBootstrapProcedureOptions(). withContextOptions( fvm.WithReusableCadenceRuntimePool( reusableRuntime.NewReusableCadenceRuntimePool( 1, - runtime.Config{ - AttachmentsEnabled: attachmentsEnabled, - }, + runtime.Config{}, ), ), ). - run( - func( - t *testing.T, - vm fvm.VM, - chain flow.Chain, - ctx fvm.Context, - snapshotTree snapshot.SnapshotTree, - ) { - script := fvm.Script([]byte(` - - pub resource R {} - - pub attachment A for R {} - - pub fun main() { - let r <- create R() - r[A] - destroy r + run(func( + t *testing.T, + vm fvm.VM, + chain flow.Chain, + ctx fvm.Context, + snapshotTree snapshot.SnapshotTree, + ) { + txBodyBuilder := flow.NewTransactionBodyBuilder(). + SetScript([]byte(` + transaction { + prepare(signer: auth(Capabilities) &Account) { + let cap = signer.capabilities.storage.issue<&Int>(/storage/foo) + assert(cap.id == 7) + + let cap2 = signer.capabilities.storage.issue<&String>(/storage/bar) + assert(cap2.id == 8) + } } - `)) + `)). + SetProposalKey(chain.ServiceAddress(), 0, 0). + AddAuthorizer(chain.ServiceAddress()). + SetPayer(chain.ServiceAddress()) - _, output, err := vm.Run(ctx, script, snapshotTree) - require.NoError(t, err) + err := testutil.SignTransactionAsServiceAccount(txBodyBuilder, 0, chain) + require.NoError(t, err) - if attachmentsEnabled { - require.NoError(t, output.Err) - } else { - require.Error(t, output.Err) - require.ErrorContains( - t, - output.Err, - "attachments are not enabled") - } - }, + txBody, err := txBodyBuilder.Build() + require.NoError(t, err) + + _, output, err := vm.Run( + ctx, + fvm.Transaction(txBody, 0), + snapshotTree) + require.NoError(t, err) + require.NoError(t, output.Err) + }, + )(t) + } + + test(t) + +} + +func TestStorageIterationWithBrokenValues(t *testing.T) { + + t.Parallel() + + newVMTest(). + withBootstrapProcedureOptions(). + withContextOptions( + fvm.WithReusableCadenceRuntimePool( + reusableRuntime.NewReusableCadenceRuntimePool( + 1, + runtime.Config{}, + ), + ), + fvm.WithContractDeploymentRestricted(false), + ). + run( + func( + t *testing.T, + vm fvm.VM, + chain flow.Chain, + ctx fvm.Context, + snapshotTree snapshot.SnapshotTree, + ) { + // Create a private key + privateKeys, err := testutil.GenerateAccountPrivateKeys(1) + require.NoError(t, err) + + // Bootstrap a ledger, creating an account with the provided private key and the root account. + snapshotTree, accounts, err := testutil.CreateAccounts( + vm, + snapshotTree, + privateKeys, + chain, + ) + require.NoError(t, err) + + contractA := ` + access(all) contract A { + access(all) struct interface Foo{} + } + ` + + updatedContractA := ` + access(all) contract A { + access(all) struct interface Foo{ + access(all) fun hello() + } + } + ` + + contractB := fmt.Sprintf(` + import A from %s + + access(all) contract B { + access(all) struct Bar : A.Foo {} + + access(all) struct interface Foo2{} + }`, + accounts[0].HexWithPrefix(), + ) + + contractC := fmt.Sprintf(` + import B from %s + import A from %s + + access(all) contract C { + access(all) struct Bar : A.Foo, B.Foo2 {} + + access(all) struct interface Foo3{} + }`, + accounts[0].HexWithPrefix(), + accounts[0].HexWithPrefix(), + ) + + contractD := fmt.Sprintf(` + import C from %s + import B from %s + import A from %s + + access(all) contract D { + access(all) struct Bar : A.Foo, B.Foo2, C.Foo3 {} + }`, + accounts[0].HexWithPrefix(), + accounts[0].HexWithPrefix(), + accounts[0].HexWithPrefix(), + ) + + var sequenceNumber uint64 = 0 + + runTransaction := func(code []byte) { + txBodyBuilder := flow.NewTransactionBodyBuilder(). + SetScript(code). + SetPayer(chain.ServiceAddress()). + SetProposalKey(chain.ServiceAddress(), 0, sequenceNumber). + AddAuthorizer(accounts[0]) + + _ = testutil.SignPayload(txBodyBuilder, accounts[0], privateKeys[0]) + _ = testutil.SignEnvelope(txBodyBuilder, chain.ServiceAddress(), unittest.ServiceAccountPrivateKey) + + txBody, err := txBodyBuilder.Build() + require.NoError(t, err) + + executionSnapshot, output, err := vm.Run( + ctx, + fvm.Transaction(txBody, 0), + snapshotTree, + ) + require.NoError(t, err) + require.NoError(t, output.Err) + + snapshotTree = snapshotTree.Append(executionSnapshot) + + // increment sequence number + sequenceNumber++ + } + + // Deploy `A` + runTransaction(runtime_utils.DeploymentTransaction( + "A", + []byte(contractA), + )) + + // Deploy `B` + runTransaction(runtime_utils.DeploymentTransaction( + "B", + []byte(contractB), + )) + + // Deploy `C` + runTransaction(runtime_utils.DeploymentTransaction( + "C", + []byte(contractC), + )) + + // Deploy `D` + runTransaction(runtime_utils.DeploymentTransaction( + "D", + []byte(contractD), + )) + + // Store values + runTransaction([]byte(fmt.Sprintf( + ` + import D from %s + import C from %s + import B from %s + + transaction { + prepare(signer: auth(Capabilities, Storage) &Account) { + signer.storage.save("Hello, World!", to: /storage/a) + signer.storage.save(["one", "two", "three"], to: /storage/b) + signer.storage.save(D.Bar(), to: /storage/c) + signer.storage.save(C.Bar(), to: /storage/d) + signer.storage.save(B.Bar(), to: /storage/e) + + let aCap = signer.capabilities.storage.issue<&String>(/storage/a) + signer.capabilities.publish(aCap, at: /public/a) + + let bCap = signer.capabilities.storage.issue<&[String]>(/storage/b) + signer.capabilities.publish(bCap, at: /public/b) + + let cCap = signer.capabilities.storage.issue<&D.Bar>(/storage/c) + signer.capabilities.publish(cCap, at: /public/c) + + let dCap = signer.capabilities.storage.issue<&C.Bar>(/storage/d) + signer.capabilities.publish(dCap, at: /public/d) + + let eCap = signer.capabilities.storage.issue<&B.Bar>(/storage/e) + signer.capabilities.publish(eCap, at: /public/e) + } + }`, + accounts[0].HexWithPrefix(), + accounts[0].HexWithPrefix(), + accounts[0].HexWithPrefix(), + ))) + + // Update `A`, such that `B`, `C` and `D` are now broken. + runTransaction(runtime_utils.UpdateTransaction( + "A", + []byte(updatedContractA), + )) + + // Iterate stored values + runTransaction([]byte( + ` + transaction { + prepare(account: auth(Storage) &Account) { + var total = 0 + account.storage.forEachPublic(fun (path: PublicPath, type: Type): Bool { + let cap = account.capabilities.get<&AnyStruct>(path) + if cap.check() { + total = total + 1 + } + return true + }) + assert(total == 2, message:"found ".concat(total.toString())) + + total = 0 + account.storage.forEachStored(fun (path: StoragePath, type: Type): Bool { + if account.storage.check<AnyStruct>(from: path) { + account.storage.copy<AnyStruct>(from: path) + total = total + 1 + } + return true + }) + + assert(total == 2, message:"found ".concat(total.toString())) + } + }`, + )) + }, + )(t) +} + +func TestEntropyCallOnlyOkIfAllowed(t *testing.T) { + source := testutil.EntropyProviderFixture(nil) + + test := func(t *testing.T, allowed bool) { + newVMTest(). + withBootstrapProcedureOptions(). + withContextOptions( + fvm.WithRandomSourceHistoryCallAllowed(allowed), + fvm.WithEntropyProvider(source), + ). + run(func( + t *testing.T, + vm fvm.VM, + chain flow.Chain, + ctx fvm.Context, + snapshotTree snapshot.SnapshotTree, + ) { + txBodyBuilder := flow.NewTransactionBodyBuilder(). + SetScript([]byte(` + transaction { + prepare() { + randomSourceHistory() + } + } + `)). + SetProposalKey(chain.ServiceAddress(), 0, 0). + SetPayer(chain.ServiceAddress()) + + err := testutil.SignTransactionAsServiceAccount(txBodyBuilder, 0, chain) + require.NoError(t, err) + + txBody, err := txBodyBuilder.Build() + require.NoError(t, err) + + _, output, err := vm.Run( + ctx, + fvm.Transaction(txBody, 0), + snapshotTree) + require.NoError(t, err) + + if allowed { + require.NoError(t, output.Err) + } else { + require.Error(t, output.Err) + require.True(t, errors.HasErrorCode(output.Err, errors.ErrCodeOperationNotSupportedError)) + } + }, )(t) } - t.Run("attachments enabled", func(t *testing.T) { + t.Run("enabled", func(t *testing.T) { test(t, true) }) - t.Run("attachments disabled", func(t *testing.T) { + t.Run("disabled", func(t *testing.T) { test(t, false) }) } + +func TestEntropyCallExpectsNoParameters(t *testing.T) { + source := testutil.EntropyProviderFixture(nil) + newVMTest(). + withBootstrapProcedureOptions(). + withContextOptions( + fvm.WithRandomSourceHistoryCallAllowed(true), + fvm.WithEntropyProvider(source), + ). + run(func( + t *testing.T, + vm fvm.VM, + chain flow.Chain, + ctx fvm.Context, + snapshotTree snapshot.SnapshotTree, + ) { + txBodyBuilder := flow.NewTransactionBodyBuilder(). + SetScript([]byte(` + transaction { + prepare() { + randomSourceHistory("foo") + } + } + `)). + SetProposalKey(chain.ServiceAddress(), 0, 0). + SetPayer(chain.ServiceAddress()) + + err := testutil.SignTransactionAsServiceAccount(txBodyBuilder, 0, chain) + require.NoError(t, err) + + txBody, err := txBodyBuilder.Build() + require.NoError(t, err) + + _, output, err := vm.Run( + ctx, + fvm.Transaction(txBody, 0), + snapshotTree) + require.NoError(t, err) + + require.ErrorContains(t, output.Err, "too many arguments") + }, + )(t) +} + +func TestTransientNetworkCoreContractAddresses(t *testing.T) { + + // This test ensures that the transient networks have the correct core contract addresses. + newVMTest(). + run( + func( + t *testing.T, + vm fvm.VM, + chain flow.Chain, + ctx fvm.Context, + snapshotTree snapshot.SnapshotTree, + ) { + sc := systemcontracts.SystemContractsForChain(chain.ChainID()) + + for _, contract := range sc.All() { + txnState := testutils.NewSimpleTransaction(snapshotTree) + accounts := environment.NewAccounts(txnState) + + yes, err := accounts.ContractExists(contract.Name, contract.Address) + require.NoError(t, err) + require.True(t, yes, "contract %s does not exist", contract.Name) + } + }) +} + +func TestFlowCallbackScheduler(t *testing.T) { + ctxOpts := []fvm.Option{ + fvm.WithScheduleCallbacksEnabled(true), + } + + newVMTest(). + withContextOptions(ctxOpts...). + run(func( + t *testing.T, + vm fvm.VM, + chain flow.Chain, + ctx fvm.Context, + snapshotTree snapshot.SnapshotTree, + ) { + sc := systemcontracts.SystemContractsForChain(chain.ChainID()) + require.NotNil(t, sc.FlowCallbackScheduler.Address) + require.NotNil(t, sc.FlowCallbackScheduler.Name) + + script := fvm.Script([]byte(fmt.Sprintf(` + import FlowTransactionScheduler from %s + access(all) fun main(): FlowTransactionScheduler.Status? { + return FlowTransactionScheduler.getStatus(id: 1) + } + `, sc.FlowCallbackScheduler.Address.HexWithPrefix()))) + + _, output, err := vm.Run(ctx, script, snapshotTree) + require.NoError(t, err) + require.NoError(t, output.Err) + require.NotNil(t, output.Value) + require.Equal(t, output.Value, cadence.NewOptional(nil)) + + script = fvm.Script([]byte(fmt.Sprintf(` + import FlowTransactionScheduler from %s + access(all) fun main(): UInt64 { + return FlowTransactionScheduler.getSlotAvailableEffort(timestamp: 1.0, priority: FlowTransactionScheduler.Priority.High) + } + `, sc.FlowCallbackScheduler.Address.HexWithPrefix()))) + + const maxEffortAvailable = 30_000 // FLIP 330 + _, output, err = vm.Run(ctx, script, snapshotTree) + require.NoError(t, err) + require.NoError(t, output.Err) + require.NotNil(t, output.Value) + require.Equal(t, cadence.UInt64(maxEffortAvailable), output.Value) + }, + )(t) +} + +func TestEVM(t *testing.T) { + blocks := new(envMock.Blocks) + block1 := unittest.BlockFixture() + blocks.On("ByHeightFrom", + block1.Height, + block1.ToHeader(), + ).Return(block1.ToHeader(), nil) + + ctxOpts := []fvm.Option{ + // default is testnet, but testnet has a special EVM storage contract location + // so we have to use emulator here so that the EVM storage contract is deployed + // to the 5th address + fvm.WithChain(flow.Emulator.Chain()), + fvm.WithEVMEnabled(true), + fvm.WithBlocks(blocks), + fvm.WithBlockHeader(block1.ToHeader()), + fvm.WithCadenceLogging(true), + } + + t.Run("successful transaction", newVMTest(). + withBootstrapProcedureOptions(fvm.WithSetupEVMEnabled(true)). + withContextOptions(ctxOpts...). + run(func( + t *testing.T, + vm fvm.VM, + chain flow.Chain, + ctx fvm.Context, + snapshotTree snapshot.SnapshotTree, + ) { + // generate test address + genArr := make([]cadence.Value, 20) + for i := range genArr { + genArr[i] = cadence.UInt8(i) + } + addrBytes := cadence.NewArray(genArr).WithType(stdlib.EVMAddressBytesCadenceType) + encodedArg, err := jsoncdc.Encode(addrBytes) + require.NoError(t, err) + + sc := systemcontracts.SystemContractsForChain(chain.ChainID()) + + txBodyBuilder := flow.NewTransactionBodyBuilder(). + SetScript([]byte(fmt.Sprintf(` + import EVM from %s + + transaction(bytes: [UInt8; 20]) { + execute { + let addr = EVM.EVMAddress(bytes: bytes) + log(addr) + } + } + `, sc.EVMContract.Address.HexWithPrefix()))). + SetProposalKey(chain.ServiceAddress(), 0, 0). + SetPayer(chain.ServiceAddress()). + AddArgument(encodedArg) + + err = testutil.SignTransactionAsServiceAccount(txBodyBuilder, 0, chain) + require.NoError(t, err) + + txBody, err := txBodyBuilder.Build() + require.NoError(t, err) + + _, output, err := vm.Run( + ctx, + fvm.Transaction(txBody, 0), + snapshotTree) + + require.NoError(t, err) + require.NoError(t, output.Err) + require.Len(t, output.Logs, 1) + require.Equal(t, output.Logs[0], fmt.Sprintf( + "A.%s.EVM.EVMAddress(bytes: %s)", + sc.EVMContract.Address, + addrBytes.String(), + )) + }), + ) + + // this test makes sure the execution error is correctly handled and returned as a correct type + t.Run("execution reverted", newVMTest(). + withBootstrapProcedureOptions(fvm.WithSetupEVMEnabled(true)). + withContextOptions(ctxOpts...). + run(func( + t *testing.T, + vm fvm.VM, + chain flow.Chain, + ctx fvm.Context, + snapshotTree snapshot.SnapshotTree, + ) { + sc := systemcontracts.SystemContractsForChain(chain.ChainID()) + script := fvm.Script([]byte(fmt.Sprintf(` + import EVM from %s + + access(all) fun main() { + let bal = EVM.Balance(attoflow: 1000000000000000000) + let acc <- EVM.createCadenceOwnedAccount() + + // withdraw insufficient balance + destroy acc.withdraw(balance: bal) + destroy acc + } + `, sc.EVMContract.Address.HexWithPrefix()))) + + _, output, err := vm.Run(ctx, script, snapshotTree) + + require.NoError(t, err) + require.Error(t, output.Err) + require.True(t, errors.IsEVMError(output.Err)) + + // make sure error is not treated as internal error by Cadence + var internal cadenceErrors.InternalError + require.False(t, errors.As(output.Err, &internal)) + }), + ) + + // this test makes sure the EVM error is correctly returned as an error and has a correct type + // we have implemented a snapshot wrapper to return an error from the EVM + t.Run("internal evm error handling", newVMTest(). + withBootstrapProcedureOptions(fvm.WithSetupEVMEnabled(true)). + withContextOptions(ctxOpts...). + run(func( + t *testing.T, + vm fvm.VM, + chain flow.Chain, + ctx fvm.Context, + snapshotTree snapshot.SnapshotTree, + ) { + sc := systemcontracts.SystemContractsForChain(chain.ChainID()) + + tests := []struct { + err error + errChecker func(error) bool + }{{ + types.ErrNotImplemented, + types.IsAFatalError, + }, { + types.NewStateError(fmt.Errorf("test state error")), + types.IsAStateError, + }} + + for _, e := range tests { + // this mock will return an error we provide with the test once it starts to access address allocator registers + // that is done to make sure the error is coming out of EVM execution + errStorage := &mock.StorageSnapshot{} + errStorage. + On("Get", mockery.AnythingOfType("flow.RegisterID")). + Return(func(id flow.RegisterID) (flow.RegisterValue, error) { + if id.Key == "LatestBlock" || id.Key == "LatestBlockProposal" { + return nil, e.err + } + return snapshotTree.Get(id) + }) + + script := fvm.Script([]byte(fmt.Sprintf(` + import EVM from %s + + access(all) + fun main() { + destroy <- EVM.createCadenceOwnedAccount() + } + `, sc.EVMContract.Address.HexWithPrefix()))) + + _, output, err := vm.Run(ctx, script, errStorage) + + require.NoError(t, output.Err) + require.Error(t, err) + // make sure error it's the right type of error + require.True(t, e.errChecker(err), "error is not of the right type") + } + }), + ) + + t.Run("deploy contract code", newVMTest(). + withBootstrapProcedureOptions(fvm.WithSetupEVMEnabled(true)). + withContextOptions(ctxOpts...). + run(func( + t *testing.T, + vm fvm.VM, + chain flow.Chain, + ctx fvm.Context, + snapshotTree snapshot.SnapshotTree, + ) { + sc := systemcontracts.SystemContractsForChain(chain.ChainID()) + + txBodyBuilder := flow.NewTransactionBodyBuilder(). + SetScript([]byte(fmt.Sprintf(` + import FungibleToken from %s + import FlowToken from %s + import EVM from %s + + transaction() { + prepare(acc: auth(Storage) &Account) { + let vaultRef = acc.storage + .borrow<auth(FungibleToken.Withdraw) &FlowToken.Vault>(from: /storage/flowTokenVault) + ?? panic("Could not borrow reference to the owner's Vault!") + + let evmHeartbeat = acc.storage + .borrow<&EVM.Heartbeat>(from: /storage/EVMHeartbeat) + ?? panic("Couldn't borrow EVM.Heartbeat Resource") + + let acc <- EVM.createCadenceOwnedAccount() + let amount <- vaultRef.withdraw(amount: 0.0000001) as! @FlowToken.Vault + acc.deposit(from: <- amount) + destroy acc + + // commit blocks + evmHeartbeat.heartbeat() + } + }`, + sc.FungibleToken.Address.HexWithPrefix(), + sc.FlowToken.Address.HexWithPrefix(), + sc.FlowServiceAccount.Address.HexWithPrefix(), // TODO this should be sc.EVM.Address not found there??? + ))). + SetProposalKey(chain.ServiceAddress(), 0, 0). + AddAuthorizer(chain.ServiceAddress()). + SetPayer(chain.ServiceAddress()) + + err := testutil.SignTransactionAsServiceAccount(txBodyBuilder, 0, chain) + require.NoError(t, err) + + txBody, err := txBodyBuilder.Build() + require.NoError(t, err) + + ctx = fvm.NewContextFromParent(ctx, fvm.WithEVMEnabled(true)) + _, output, err := vm.Run( + ctx, + fvm.Transaction(txBody, 0), + snapshotTree) + + require.NoError(t, err) + require.NoError(t, output.Err) + require.Len(t, output.Events, 6) + + txExe, blockExe := output.Events[3], output.Events[5] + txExecutedID := common.NewAddressLocation( + nil, + common.Address(sc.EVMContract.Address), + string(events.EventTypeTransactionExecuted), + ).ID() + blockExecutedID := common.NewAddressLocation( + nil, + common.Address(sc.EVMContract.Address), + string(events.EventTypeBlockExecuted), + ).ID() + assert.Equal(t, txExecutedID, string(txExe.Type)) + assert.Equal(t, blockExecutedID, string(blockExe.Type)) + + // convert events to type ids + eventTypeIDs := make([]common.TypeID, 0, len(output.Events)) + + for _, event := range output.Events { + eventTypeIDs = append(eventTypeIDs, common.TypeID(event.Type)) + } + + assert.ElementsMatch( + t, + []common.TypeID{ + common.TypeID(txExecutedID), + "A.f8d6e0586b0a20c7.EVM.CadenceOwnedAccountCreated", + "A.ee82856bf20e2aa6.FungibleToken.Withdrawn", + common.TypeID(txExecutedID), + "A.f8d6e0586b0a20c7.EVM.FLOWTokensDeposited", + common.TypeID(blockExecutedID), + }, + eventTypeIDs, + ) + }), + ) +} + +func TestVMBridge(t *testing.T) { + blocks := new(envMock.Blocks) + block1 := unittest.BlockFixture() + blocks.On("ByHeightFrom", + block1.Height, + block1.ToHeader(), + ).Return(block1.ToHeader(), nil) + + ctxOpts := []fvm.Option{ + // default is testnet, but testnet has a special EVM storage contract location + // so we have to use emulator here so that the EVM storage contract is deployed + // to the 5th address + fvm.WithChain(flow.Emulator.Chain()), + fvm.WithEVMEnabled(true), + fvm.WithBlocks(blocks), + fvm.WithBlockHeader(block1.ToHeader()), + fvm.WithCadenceLogging(true), + fvm.WithContractDeploymentRestricted(false), + } + + t.Run("successful FT Type Onboarding and Bridging", newVMTest(). + withBootstrapProcedureOptions(fvm.WithSetupEVMEnabled(true), fvm.WithSetupVMBridgeEnabled(true)). + withContextOptions(ctxOpts...). + run(func( + t *testing.T, + vm fvm.VM, + chain flow.Chain, + ctx fvm.Context, + snapshotTree snapshot.SnapshotTree, + ) { + + sc := systemcontracts.SystemContractsForChain(chain.ChainID()) + + env := sc.AsTemplateEnv() + + bridgeEnv := bridge.Environment{ + CrossVMNFTAddress: env.ServiceAccountAddress, + CrossVMTokenAddress: env.ServiceAccountAddress, + FlowEVMBridgeHandlerInterfacesAddress: env.ServiceAccountAddress, + IBridgePermissionsAddress: env.ServiceAccountAddress, + ICrossVMAddress: env.ServiceAccountAddress, + ICrossVMAssetAddress: env.ServiceAccountAddress, + IEVMBridgeNFTMinterAddress: env.ServiceAccountAddress, + IEVMBridgeTokenMinterAddress: env.ServiceAccountAddress, + IFlowEVMNFTBridgeAddress: env.ServiceAccountAddress, + IFlowEVMTokenBridgeAddress: env.ServiceAccountAddress, + FlowEVMBridgeAddress: env.ServiceAccountAddress, + FlowEVMBridgeAccessorAddress: env.ServiceAccountAddress, + FlowEVMBridgeConfigAddress: env.ServiceAccountAddress, + FlowEVMBridgeHandlersAddress: env.ServiceAccountAddress, + FlowEVMBridgeNFTEscrowAddress: env.ServiceAccountAddress, + FlowEVMBridgeResolverAddress: env.ServiceAccountAddress, + FlowEVMBridgeTemplatesAddress: env.ServiceAccountAddress, + FlowEVMBridgeTokenEscrowAddress: env.ServiceAccountAddress, + FlowEVMBridgeUtilsAddress: env.ServiceAccountAddress, + ArrayUtilsAddress: env.ServiceAccountAddress, + ScopedFTProvidersAddress: env.ServiceAccountAddress, + SerializeAddress: env.ServiceAccountAddress, + SerializeMetadataAddress: env.ServiceAccountAddress, + StringUtilsAddress: env.ServiceAccountAddress, + } + + // Create an account private key. + privateKey, err := testutil.GenerateAccountPrivateKey() + require.NoError(t, err) + + // Create accounts with the provided private + // key and the root account. + snapshotTree, accounts, err := testutil.CreateAccounts( + vm, + snapshotTree, + []flow.AccountPrivateKey{privateKey}, + chain) + require.NoError(t, err) + + txBodyBuilder := blueprints.TransferFlowTokenTransaction(env, chain.ServiceAddress(), accounts[0], "2.0") + + err = testutil.SignTransactionAsServiceAccount(txBodyBuilder, 0, chain) + require.NoError(t, err) + + txBody, err := txBodyBuilder.Build() + require.NoError(t, err) + + executionSnapshot, output, err := vm.Run( + ctx, + fvm.Transaction(txBody, 0), + snapshotTree) + + require.NoError(t, err) + require.NoError(t, output.Err) + + snapshotTree = snapshotTree.Append(executionSnapshot) + + // Deploy the ExampleToken contract + tokenContract := contracts.ExampleToken(env) + tokenContractName := "ExampleToken" + txBodyBuilder = blueprints.DeployContractTransaction( + accounts[0], + tokenContract, + tokenContractName, + ) + + err = testutil.SignTransaction(txBodyBuilder, accounts[0], privateKey, 0) + require.NoError(t, err) + + txBody, err = txBodyBuilder.Build() + require.NoError(t, err) + + executionSnapshot, output, err = vm.Run( + ctx, + fvm.Transaction(txBody, 0), + snapshotTree) + + require.NoError(t, err) + require.NoError(t, output.Err) + + snapshotTree = snapshotTree.Append(executionSnapshot) + + // Onboard the Fungible Token Type + typeToOnboard := "A." + accounts[0].String() + "." + tokenContractName + ".Vault" + + txBodyBuilder = blueprints.OnboardToBridgeByTypeIDTransaction(env, bridgeEnv, accounts[0], typeToOnboard) + + err = testutil.SignTransaction(txBodyBuilder, accounts[0], privateKey, 1) + require.NoError(t, err) + + txBody, err = txBodyBuilder.Build() + require.NoError(t, err) + + executionSnapshot, output, err = vm.Run( + ctx, + fvm.Transaction(txBody, 0), + snapshotTree) + + snapshotTree = snapshotTree.Append(executionSnapshot) + + require.NoError(t, err) + require.NoError(t, output.Err) + require.Len(t, output.Events, 7) + for _, event := range output.Events { + if strings.Contains(string(event.Type), "Onboarded") { + // decode the event payload + data, _ := ccf.Decode(nil, event.Payload) + // get the contractAddress field from the event + typeOnboarded := cadence.SearchFieldByName( + data.(cadence.Event), + "type", + ).(cadence.String) + + require.Equal(t, typeToOnboard, typeOnboarded.String()[1:len(typeOnboarded)+1]) + } + } + + // Create COA in the new account + txBodyBuilder = blueprints.CreateCOATransaction(env, bridgeEnv, accounts[0]) + err = testutil.SignTransaction(txBodyBuilder, accounts[0], privateKey, 2) + require.NoError(t, err) + + txBody, err = txBodyBuilder.Build() + require.NoError(t, err) + + executionSnapshot, output, err = vm.Run( + ctx, + fvm.Transaction(txBody, 0), + snapshotTree) + + require.NoError(t, err) + require.NoError(t, output.Err) + + snapshotTree = snapshotTree.Append(executionSnapshot) + + // Bridge the Fungible Token to EVM + txBodyBuilder = blueprints.BridgeFTToEVMTransaction(env, bridgeEnv, accounts[0], typeToOnboard, "1.0") + err = testutil.SignTransaction(txBodyBuilder, accounts[0], privateKey, 3) + require.NoError(t, err) + + txBody, err = txBodyBuilder.Build() + require.NoError(t, err) + + executionSnapshot, output, err = vm.Run( + ctx, + fvm.Transaction(txBody, 0), + snapshotTree) + + require.NoError(t, err) + require.NoError(t, output.Err) + + snapshotTree = snapshotTree.Append(executionSnapshot) + + // Confirm that the FT is escrowed + script := blueprints.GetEscrowedTokenBalanceScript(env, bridgeEnv) + + arguments := []cadence.Value{ + cadence.String(typeToOnboard), + } + + encodedArguments := make([][]byte, 0, len(arguments)) + for _, argument := range arguments { + encodedArguments = append(encodedArguments, jsoncdc.MustEncode(argument)) + } + + _, output, err = vm.Run( + ctx, + fvm.Script(script). + WithArguments(encodedArguments...), + snapshotTree) + require.NoError(t, err) + require.NoError(t, output.Err) + + result := output.Value.(cadence.Optional).Value + expected, _ := cadence.NewUFix64("1.0") + require.Equal(t, expected, result) + + // Bridge the tokens back to Cadence + txBodyBuilder = blueprints.BridgeFTFromEVMTransaction(env, bridgeEnv, accounts[0], typeToOnboard, 1000000000000000000) + + err = testutil.SignTransaction(txBodyBuilder, accounts[0], privateKey, 4) + require.NoError(t, err) + + txBody, err = txBodyBuilder.Build() + require.NoError(t, err) + + executionSnapshot, output, err = vm.Run( + ctx, + fvm.Transaction(txBody, 0), + snapshotTree) + + require.NoError(t, err) + require.NoError(t, output.Err) + + snapshotTree = snapshotTree.Append(executionSnapshot) + + // Confirm that the FT is no longer escrowed + script = blueprints.GetEscrowedTokenBalanceScript(env, bridgeEnv) + + arguments = []cadence.Value{ + cadence.String(typeToOnboard), + } + + encodedArguments = make([][]byte, 0, len(arguments)) + for _, argument := range arguments { + encodedArguments = append(encodedArguments, jsoncdc.MustEncode(argument)) + } + + _, output, err = vm.Run( + ctx, + fvm.Script(script). + WithArguments(encodedArguments...), + snapshotTree) + require.NoError(t, err) + require.NoError(t, output.Err) + + result = output.Value.(cadence.Optional).Value + expected, _ = cadence.NewUFix64("0.0") + require.Equal(t, expected, result) + }), + ) + + t.Run("successful NFT Type Onboarding and Bridging", newVMTest(). + withBootstrapProcedureOptions(fvm.WithSetupEVMEnabled(true), fvm.WithSetupVMBridgeEnabled(true)). + withContextOptions(ctxOpts...). + run(func( + t *testing.T, + vm fvm.VM, + chain flow.Chain, + ctx fvm.Context, + snapshotTree snapshot.SnapshotTree, + ) { + + sc := systemcontracts.SystemContractsForChain(chain.ChainID()) + + env := sc.AsTemplateEnv() + + bridgeEnv := bridge.Environment{ + CrossVMNFTAddress: env.ServiceAccountAddress, + CrossVMTokenAddress: env.ServiceAccountAddress, + FlowEVMBridgeHandlerInterfacesAddress: env.ServiceAccountAddress, + IBridgePermissionsAddress: env.ServiceAccountAddress, + ICrossVMAddress: env.ServiceAccountAddress, + ICrossVMAssetAddress: env.ServiceAccountAddress, + IEVMBridgeNFTMinterAddress: env.ServiceAccountAddress, + IEVMBridgeTokenMinterAddress: env.ServiceAccountAddress, + IFlowEVMNFTBridgeAddress: env.ServiceAccountAddress, + IFlowEVMTokenBridgeAddress: env.ServiceAccountAddress, + FlowEVMBridgeAddress: env.ServiceAccountAddress, + FlowEVMBridgeAccessorAddress: env.ServiceAccountAddress, + FlowEVMBridgeConfigAddress: env.ServiceAccountAddress, + FlowEVMBridgeHandlersAddress: env.ServiceAccountAddress, + FlowEVMBridgeNFTEscrowAddress: env.ServiceAccountAddress, + FlowEVMBridgeResolverAddress: env.ServiceAccountAddress, + FlowEVMBridgeTemplatesAddress: env.ServiceAccountAddress, + FlowEVMBridgeTokenEscrowAddress: env.ServiceAccountAddress, + FlowEVMBridgeUtilsAddress: env.ServiceAccountAddress, + ArrayUtilsAddress: env.ServiceAccountAddress, + ScopedFTProvidersAddress: env.ServiceAccountAddress, + SerializeAddress: env.ServiceAccountAddress, + SerializeMetadataAddress: env.ServiceAccountAddress, + StringUtilsAddress: env.ServiceAccountAddress, + } + + // Create an account private key. + privateKey, err := testutil.GenerateAccountPrivateKey() + require.NoError(t, err) + + // Create accounts with the provided private + // key and the root account. + snapshotTree, accounts, err := testutil.CreateAccounts( + vm, + snapshotTree, + []flow.AccountPrivateKey{privateKey}, + chain) + require.NoError(t, err) + + txBodyBuilder := blueprints.TransferFlowTokenTransaction(env, chain.ServiceAddress(), accounts[0], "2.0") + + err = testutil.SignTransactionAsServiceAccount(txBodyBuilder, 0, chain) + require.NoError(t, err) + + txBody, err := txBodyBuilder.Build() + require.NoError(t, err) + + executionSnapshot, output, err := vm.Run( + ctx, + fvm.Transaction(txBody, 0), + snapshotTree) + + require.NoError(t, err) + require.NoError(t, output.Err) + + snapshotTree = snapshotTree.Append(executionSnapshot) + + // Deploy the ExampleNFT contract + nftContract := contracts.ExampleNFT(env) + nftContractName := "ExampleNFT" + txBodyBuilder = blueprints.DeployContractTransaction( + accounts[0], + nftContract, + nftContractName, + ) + + err = testutil.SignTransaction(txBodyBuilder, accounts[0], privateKey, 0) + require.NoError(t, err) + + txBody, err = txBodyBuilder.Build() + require.NoError(t, err) + + executionSnapshot, output, err = vm.Run( + ctx, + fvm.Transaction(txBody, 0), + snapshotTree) + + require.NoError(t, err) + require.NoError(t, output.Err) + + snapshotTree = snapshotTree.Append(executionSnapshot) + + // Onboard the Non-Fungible Token Type + typeToOnboard := "A." + accounts[0].String() + "." + nftContractName + ".NFT" + + txBodyBuilder = blueprints.OnboardToBridgeByTypeIDTransaction(env, bridgeEnv, accounts[0], typeToOnboard) + + err = testutil.SignTransaction(txBodyBuilder, accounts[0], privateKey, 1) + require.NoError(t, err) + + txBody, err = txBodyBuilder.Build() + require.NoError(t, err) + + executionSnapshot, output, err = vm.Run( + ctx, + fvm.Transaction(txBody, 0), + snapshotTree) + + require.NoError(t, err) + require.NoError(t, output.Err) + require.Len(t, output.Events, 7) + for _, event := range output.Events { + if strings.Contains(string(event.Type), "Onboarded") { + // decode the event payload + data, _ := ccf.Decode(nil, event.Payload) + // get the contractAddress field from the event + typeOnboarded := cadence.SearchFieldByName( + data.(cadence.Event), + "type", + ).(cadence.String) + + require.Equal(t, typeToOnboard, typeOnboarded.String()[1:len(typeOnboarded)+1]) + } + } + + snapshotTree = snapshotTree.Append(executionSnapshot) + + // Create COA in the new account + txBodyBuilder = blueprints.CreateCOATransaction(env, bridgeEnv, accounts[0]) + err = testutil.SignTransaction(txBodyBuilder, accounts[0], privateKey, 2) + require.NoError(t, err) + + txBody, err = txBodyBuilder.Build() + require.NoError(t, err) + + executionSnapshot, output, err = vm.Run( + ctx, + fvm.Transaction(txBody, 0), + snapshotTree) + + require.NoError(t, err) + require.NoError(t, output.Err) + + snapshotTree = snapshotTree.Append(executionSnapshot) + + // Mint an NFT + txBodyBuilder = flow.NewTransactionBodyBuilder(). + SetScript([]byte(fmt.Sprintf( + ` + import NonFungibleToken from 0x%s + import ExampleNFT from 0x%s + import MetadataViews from 0x%s + import FungibleToken from 0x%s + + transaction { + + /// local variable for storing the minter reference + let minter: &ExampleNFT.NFTMinter + + /// Reference to the receiver's collection + let recipientCollectionRef: &{NonFungibleToken.Receiver} + + prepare(signer: auth(BorrowValue) &Account) { + + let collectionData = ExampleNFT.resolveContractView(resourceType: nil, viewType: Type<MetadataViews.NFTCollectionData>()) as! MetadataViews.NFTCollectionData? + ?? panic("Could not resolve NFTCollectionData view. The ExampleNFT contract needs to implement the NFTCollectionData Metadata view in order to execute this transaction") + + // borrow a reference to the NFTMinter resource in storage + self.minter = signer.storage.borrow<&ExampleNFT.NFTMinter>(from: ExampleNFT.MinterStoragePath) + ?? panic("The signer does not store an ExampleNFT.Minter object at the path " + .concat(ExampleNFT.MinterStoragePath.toString()) + .concat("The signer must initialize their account with this minter resource first!")) + + // Borrow the recipient's public NFT collection reference + self.recipientCollectionRef = getAccount(0x%s).capabilities.borrow<&{NonFungibleToken.Receiver}>(collectionData.publicPath) + ?? panic("The recipient does not have a NonFungibleToken Receiver at " + .concat(collectionData.publicPath.toString()) + .concat(" that is capable of receiving an NFT.") + .concat("The recipient must initialize their account with this collection and receiver first!")) + } + + execute { + // Mint the NFT and deposit it to the recipient's collection + let mintedNFT <- self.minter.mintNFT( + name: "BridgeTestNFT", + description: "", + thumbnail: "", + royalties: [] + ) + self.recipientCollectionRef.deposit(token: <-mintedNFT) + } + } + `, + env.NonFungibleTokenAddress, accounts[0].String(), env.NonFungibleTokenAddress, env.FungibleTokenAddress, accounts[0].String(), + ))).AddAuthorizer(accounts[0]) + + err = testutil.SignTransaction(txBodyBuilder, accounts[0], privateKey, 3) + require.NoError(t, err) + + txBody, err = txBodyBuilder.Build() + require.NoError(t, err) + + executionSnapshot, output, err = vm.Run( + ctx, + fvm.Transaction(txBody, 0), + snapshotTree) + require.NoError(t, err) + require.NoError(t, output.Err) + + snapshotTree = snapshotTree.Append(executionSnapshot) + id := cadence.UInt64(0) + + for _, event := range output.Events { + if strings.Contains(string(event.Type), "Minted") { + // decode the event payload + data, _ := ccf.Decode(nil, event.Payload) + // get the contractAddress field from the event + id = cadence.SearchFieldByName( + data.(cadence.Event), + "id", + ).(cadence.UInt64) + } + } + + // Bridge the NFT to EVM + txBodyBuilder = blueprints.BridgeNFTToEVMTransaction(env, bridgeEnv, accounts[0], typeToOnboard, id) + + err = testutil.SignTransaction(txBodyBuilder, accounts[0], privateKey, 4) + require.NoError(t, err) + + txBody, err = txBodyBuilder.Build() + require.NoError(t, err) + + executionSnapshot, output, err = vm.Run( + ctx, + fvm.Transaction(txBody, 0), + snapshotTree) + + require.NoError(t, err) + require.NoError(t, output.Err) + + snapshotTree = snapshotTree.Append(executionSnapshot) + + // Confirm that the NFT is escrowed + script := blueprints.GetIsNFTInEscrowScript(env, bridgeEnv) + + arguments := []cadence.Value{ + cadence.String(typeToOnboard), + id, + } + + encodedArguments := make([][]byte, 0, len(arguments)) + for _, argument := range arguments { + encodedArguments = append(encodedArguments, jsoncdc.MustEncode(argument)) + } + + _, output, err = vm.Run( + ctx, + fvm.Script(script). + WithArguments(encodedArguments...), + snapshotTree) + require.NoError(t, err) + require.NoError(t, output.Err) + + result := output.Value.(cadence.Bool) + require.Equal(t, cadence.Bool(true), result) + + id256 := cadence.NewUInt256(uint(id)) + + // Bridge the NFT back to Cadence + txBodyBuilder = blueprints.BridgeNFTFromEVMTransaction(env, bridgeEnv, accounts[0], typeToOnboard, id256) + + err = testutil.SignTransaction(txBodyBuilder, accounts[0], privateKey, 5) + require.NoError(t, err) + + txBody, err = txBodyBuilder.Build() + require.NoError(t, err) + + executionSnapshot, output, err = vm.Run( + ctx, + fvm.Transaction(txBody, 0), + snapshotTree) + + require.NoError(t, err) + require.NoError(t, output.Err) + + snapshotTree = snapshotTree.Append(executionSnapshot) + + // Confirm that the NFT is no longer escrowed + + _, output, err = vm.Run( + ctx, + fvm.Script(script). + WithArguments(encodedArguments...), + snapshotTree) + require.NoError(t, err) + require.NoError(t, output.Err) + + result = output.Value.(cadence.Bool) + require.Equal(t, cadence.Bool(false), result) + }), + ) +} + +func TestAccountCapabilitiesGetEntitledRejection(t *testing.T) { + + // Note: This cannot be tested anymore using a transaction, + // because publish method also aborts when trying to publish an entitled capability. + // Therefore, test the functionality of the `ValidateAccountCapabilitiesGet` function. + + t.Run("entitled capability", func(t *testing.T) { + + env := environment.NewScriptEnv( + context.TODO(), + tracing.NewMockTracerSpan(), + environment.DefaultEnvironmentParams(), + nil, + ) + + valid, err := env.ValidateAccountCapabilitiesGet( + nil, + interpreter.EmptyLocationRange, + interpreter.AddressValue(common.ZeroAddress), + interpreter.NewUnmeteredPathValue(common.PathDomainPublic, "dummy_value"), + sema.NewReferenceType( + nil, + sema.NewEntitlementSetAccess( + []*sema.EntitlementType{ + sema.MutateType, + }, + sema.Conjunction, + ), + sema.IntType, + ), + nil, + ) + assert.NoError(t, err) + assert.False(t, valid) + }) + + t.Run("non-entitled capability", func(t *testing.T) { + + env := environment.NewScriptEnv( + context.TODO(), + tracing.NewMockTracerSpan(), + environment.DefaultEnvironmentParams(), + nil, + ) + + valid, err := env.ValidateAccountCapabilitiesGet( + nil, + interpreter.EmptyLocationRange, + interpreter.AddressValue(common.ZeroAddress), + interpreter.NewUnmeteredPathValue(common.PathDomainPublic, "dummy_value"), + sema.NewReferenceType( + nil, + sema.UnauthorizedAccess, + sema.IntType, + ), + nil, + ) + assert.NoError(t, err) + assert.True(t, valid) + }) +} + +func TestAccountCapabilitiesPublishEntitledRejection(t *testing.T) { + + t.Run("entitled capability", newVMTest(). + run(func( + t *testing.T, + vm fvm.VM, + chain flow.Chain, + ctx fvm.Context, + snapshotTree snapshot.SnapshotTree, + ) { + + serviceAddress := chain.ServiceAddress() + txBodyBuilder := flow.NewTransactionBodyBuilder(). + SetScript([]byte(` + transaction { + prepare(signer: auth(Capabilities, Storage) &Account) { + signer.storage.save(42, to: /storage/number) + let cap = signer.capabilities.storage.issue<auth(Insert) &Int>(/storage/number) + signer.capabilities.publish(cap, at: /public/number) + } + } + `)). + AddAuthorizer(serviceAddress). + SetProposalKey(serviceAddress, 0, 0). + SetPayer(serviceAddress) + + err := testutil.SignTransactionAsServiceAccount(txBodyBuilder, 0, chain) + require.NoError(t, err) + + txBody, err := txBodyBuilder.Build() + require.NoError(t, err) + + _, output, err := vm.Run( + ctx, + fvm.Transaction(txBody, 0), + snapshotTree) + + require.NoError(t, err) + + var publishingError *interpreter.EntitledCapabilityPublishingError + require.ErrorAs(t, output.Err, &publishingError) + }), + ) + + t.Run("non entitled capability", newVMTest(). + run(func( + t *testing.T, + vm fvm.VM, + chain flow.Chain, + ctx fvm.Context, + snapshotTree snapshot.SnapshotTree, + ) { + + serviceAddress := chain.ServiceAddress() + txBodyBuilder := flow.NewTransactionBodyBuilder(). + SetScript([]byte(` + transaction { + prepare(signer: auth(Capabilities, Storage) &Account) { + signer.storage.save(42, to: /storage/number) + let cap = signer.capabilities.storage.issue<&Int>(/storage/number) + signer.capabilities.publish(cap, at: /public/number) + } + } + `)). + AddAuthorizer(serviceAddress). + SetProposalKey(serviceAddress, 0, 0). + SetPayer(serviceAddress) + + err := testutil.SignTransactionAsServiceAccount(txBodyBuilder, 0, chain) + require.NoError(t, err) + + txBody, err := txBodyBuilder.Build() + require.NoError(t, err) + + _, output, err := vm.Run( + ctx, + fvm.Transaction(txBody, 0), + snapshotTree) + + require.NoError(t, err) + require.NoError(t, output.Err) + }), + ) +} + +func TestCrypto(t *testing.T) { + t.Parallel() + + const chainID = flow.Testnet + + test := func(t *testing.T, importDecl string) { + + chain, vm := createChainAndVm(chainID) + + ctx := fvm.NewContext( + fvm.WithChain(chain), + fvm.WithCadenceLogging(true), + ) + + script := []byte(fmt.Sprintf( + ` + %s + + access(all) + fun main( + rawPublicKeys: [String], + weights: [UFix64], + domainSeparationTag: String, + signatures: [String], + toAddress: Address, + fromAddress: Address, + amount: UFix64 + ): Bool { + let keyList = Crypto.KeyList() + + var i = 0 + for rawPublicKey in rawPublicKeys { + keyList.add( + PublicKey( + publicKey: rawPublicKey.decodeHex(), + signatureAlgorithm: SignatureAlgorithm.ECDSA_P256 + ), + hashAlgorithm: HashAlgorithm.SHA3_256, + weight: weights[i], + ) + i = i + 1 + } + + let signatureSet: [Crypto.KeyListSignature] = [] + + var j = 0 + for signature in signatures { + signatureSet.append( + Crypto.KeyListSignature( + keyIndex: j, + signature: signature.decodeHex() + ) + ) + j = j + 1 + } + + // assemble the same message in cadence + let message = toAddress.toBytes() + .concat(fromAddress.toBytes()) + .concat(amount.toBigEndianBytes()) + + return keyList.verify( + signatureSet: signatureSet, + signedData: message, + domainSeparationTag: domainSeparationTag + ) + } + `, + importDecl, + )) + + accountKeys := test.AccountKeyGenerator() + + // create the keys + keyAlice, signerAlice := accountKeys.NewWithSigner() + keyBob, signerBob := accountKeys.NewWithSigner() + + // create the message that will be signed + addresses := test.AddressGenerator() + + toAddress := cadence.Address(addresses.New()) + fromAddress := cadence.Address(addresses.New()) + + amount, err := cadence.NewUFix64("100.00") + require.NoError(t, err) + + var message []byte + message = append(message, toAddress.Bytes()...) + message = append(message, fromAddress.Bytes()...) + message = append(message, amount.ToBigEndianBytes()...) + + // sign the message with Alice and Bob + signatureAlice, err := flowsdk.SignUserMessage(signerAlice, message) + require.NoError(t, err) + + signatureBob, err := flowsdk.SignUserMessage(signerBob, message) + require.NoError(t, err) + + publicKeys := cadence.NewArray([]cadence.Value{ + cadence.String(hex.EncodeToString(keyAlice.PublicKey.Encode())), + cadence.String(hex.EncodeToString(keyBob.PublicKey.Encode())), + }) + + // each signature has half weight + weightAlice, err := cadence.NewUFix64("0.5") + require.NoError(t, err) + + weightBob, err := cadence.NewUFix64("0.5") + require.NoError(t, err) + + weights := cadence.NewArray([]cadence.Value{ + weightAlice, + weightBob, + }) + + signatures := cadence.NewArray([]cadence.Value{ + cadence.String(hex.EncodeToString(signatureAlice)), + cadence.String(hex.EncodeToString(signatureBob)), + }) + + domainSeparationTag := cadence.String("FLOW-V0.0-user") + + arguments := []cadence.Value{ + publicKeys, + weights, + domainSeparationTag, + signatures, + toAddress, + fromAddress, + amount, + } + + encodedArguments := make([][]byte, 0, len(arguments)) + for _, argument := range arguments { + encodedArguments = append(encodedArguments, jsoncdc.MustEncode(argument)) + } + + snapshotTree := testutil.RootBootstrappedLedger(vm, ctx) + + _, output, err := vm.Run( + ctx, + fvm.Script(script). + WithArguments(encodedArguments...), + snapshotTree) + require.NoError(t, err) + + require.NoError(t, output.Err) + + result := output.Value + + assert.Equal(t, + cadence.NewBool(true), + result, + ) + } + + t.Run("identifier location", func(t *testing.T) { + t.Parallel() + + test(t, "import Crypto") + }) + + t.Run("address location", func(t *testing.T) { + t.Parallel() + + sc := systemcontracts.SystemContractsForChain(chainID) + cryptoContractAddress := sc.Crypto.Address.HexWithPrefix() + + test(t, fmt.Sprintf("import Crypto from %s", cryptoContractAddress)) + }) +} + +func Test_BlockHashListShouldWriteOnPush(t *testing.T) { + + chain := flow.Emulator.Chain() + sc := systemcontracts.SystemContractsForChain(chain.ChainID()) + + push := func(bhl *handler.BlockHashList, height uint64) { + buffer := make([]byte, 32) + pos := 0 + + // encode height as block hash + binary.BigEndian.PutUint64(buffer[pos:], height) + err := bhl.Push(height, [32]byte(buffer)) + require.NoError(t, err) + } + + t.Run("block hash list write on push", newVMTest(). + withContextOptions( + fvm.WithChain(chain), + fvm.WithEVMEnabled(true), + ). + run(func( + t *testing.T, + vm fvm.VM, + chain flow.Chain, + ctx fvm.Context, + snapshotTree snapshot.SnapshotTree, + ) { + capacity := 256 + + // for the setup we make sure all the block hash list buckets exist + + ts := state.NewTransactionState(snapshotTree, state.DefaultParameters()) + accounts := environment.NewAccounts(ts) + envMeter := environment.NewMeter(ts) + + valueStore := environment.NewValueStore( + tracing.NewMockTracerSpan(), + envMeter, + accounts, + ) + + bhl, err := handler.NewBlockHashList(valueStore, sc.EVMStorage.Address, capacity) + require.NoError(t, err) + + // fill the block hash list + height := uint64(0) + for ; height < uint64(capacity); height++ { + push(bhl, height) + } + + es, err := ts.FinalizeMainTransaction() + require.NoError(t, err) + snapshotTree = snapshotTree.Append(es) + + // end of test setup + + ts = state.NewTransactionState(snapshotTree, state.DefaultParameters()) + accounts = environment.NewAccounts(ts) + envMeter = environment.NewMeter(ts) + + valueStore = environment.NewValueStore( + tracing.NewMockTracerSpan(), + envMeter, + accounts, + ) + + bhl, err = handler.NewBlockHashList(valueStore, sc.EVMStorage.Address, capacity) + require.NoError(t, err) + + // after we push the changes should be applied and the first block hash in the bucket should be capacity+1 instead of 0 + push(bhl, height) + + es, err = ts.FinalizeMainTransaction() + require.NoError(t, err) + + // the write set should have both block metadata and block hash list bucket + require.Len(t, es.WriteSet, 2) + newBlockHashListBucket, ok := es.WriteSet[flow.NewRegisterID(sc.EVMStorage.Address, "BlockHashListBucket0")] + require.True(t, ok) + // full expected block hash list bucket split by individual block hashes + // first block hash is the capacity+1 instead of 0 (00 00 00 00 00 00 01 00) + expectedBlockHashListBucket, err := hex.DecodeString( + "0000000000000100000000000000000000000000000000000000000000000000" + + "0000000000000001000000000000000000000000000000000000000000000000" + + "0000000000000002000000000000000000000000000000000000000000000000" + + "0000000000000003000000000000000000000000000000000000000000000000" + + "0000000000000004000000000000000000000000000000000000000000000000" + + "0000000000000005000000000000000000000000000000000000000000000000" + + "0000000000000006000000000000000000000000000000000000000000000000" + + "0000000000000007000000000000000000000000000000000000000000000000" + + "0000000000000008000000000000000000000000000000000000000000000000" + + "0000000000000009000000000000000000000000000000000000000000000000" + + "000000000000000a000000000000000000000000000000000000000000000000" + + "000000000000000b000000000000000000000000000000000000000000000000" + + "000000000000000c000000000000000000000000000000000000000000000000" + + "000000000000000d000000000000000000000000000000000000000000000000" + + "000000000000000e000000000000000000000000000000000000000000000000" + + "000000000000000f000000000000000000000000000000000000000000000000") + require.NoError(t, err) + require.Equal(t, expectedBlockHashListBucket, newBlockHashListBucket) + })) +} diff --git a/fvm/initialize/options.go b/fvm/initialize/options.go new file mode 100644 index 00000000000..e484dcaccfd --- /dev/null +++ b/fvm/initialize/options.go @@ -0,0 +1,45 @@ +package initialize + +import ( + "github.com/onflow/flow-go/fvm" + "github.com/onflow/flow-go/fvm/environment" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/storage" +) + +// InitFvmOptions initializes the FVM options based on the chain ID and headers. +// This function is extracted so that it can be reused in multiple places, +// and ensure that the FVM options are consistent across different components. +func InitFvmOptions( + chainID flow.ChainID, + headers storage.Headers, + transactionFeesDisabled bool, +) []fvm.Option { + blockFinder := environment.NewBlockFinder(headers) + vmOpts := []fvm.Option{ + fvm.WithChain(chainID.Chain()), + fvm.WithBlocks(blockFinder), + fvm.WithAccountStorageLimit(true), + } + switch chainID { + case flow.Testnet, + flow.Sandboxnet, + flow.Previewnet, + flow.Mainnet: + feesEnabled := !transactionFeesDisabled + vmOpts = append(vmOpts, + fvm.WithTransactionFeesEnabled(feesEnabled), + ) + } + switch chainID { + case flow.Testnet, + flow.Sandboxnet, + flow.Previewnet, + flow.Localnet, + flow.Benchnet: + vmOpts = append(vmOpts, + fvm.WithContractDeploymentRestricted(false), + ) + } + return vmOpts +} diff --git a/fvm/meter/computation_meter.go b/fvm/meter/computation_meter.go index d16b9eedc08..f153fb2c76b 100644 --- a/fvm/meter/computation_meter.go +++ b/fvm/meter/computation_meter.go @@ -3,12 +3,12 @@ package meter import ( "math" - "github.com/onflow/cadence/runtime/common" + "github.com/onflow/cadence/common" "github.com/onflow/flow-go/fvm/errors" ) -type MeteredComputationIntensities map[common.ComputationKind]uint +type MeteredComputationIntensities map[common.ComputationKind]uint64 var ( // DefaultComputationWeights is the default weights for computation intensities @@ -29,6 +29,15 @@ const MeterExecutionInternalPrecisionBytes = 16 type ExecutionEffortWeights map[common.ComputationKind]uint64 +func (weights ExecutionEffortWeights) ComputationFromIntensities(intensities MeteredComputationIntensities) uint64 { + var result uint64 + for kind, weight := range weights { + intensity := uint64(intensities[kind]) + result += weight * intensity + } + return result >> MeterExecutionInternalPrecisionBytes +} + type ComputationMeterParameters struct { computationLimit uint64 computationWeights ExecutionEffortWeights @@ -60,8 +69,8 @@ func (params ComputationMeterParameters) ComputationWeights() ExecutionEffortWei } // TotalComputationLimit returns the total computation limit -func (params ComputationMeterParameters) TotalComputationLimit() uint { - return uint(params.computationLimit >> MeterExecutionInternalPrecisionBytes) +func (params ComputationMeterParameters) TotalComputationLimit() uint64 { + return params.computationLimit >> MeterExecutionInternalPrecisionBytes } type ComputationMeter struct { @@ -79,16 +88,16 @@ func NewComputationMeter(params ComputationMeterParameters) ComputationMeter { } // MeterComputation captures computation usage and returns an error if it goes beyond the limit -func (m *ComputationMeter) MeterComputation( - kind common.ComputationKind, - intensity uint, -) error { +func (m *ComputationMeter) MeterComputation(usage common.ComputationUsage) error { + kind := usage.Kind + intensity := usage.Intensity + m.computationIntensities[kind] += intensity w, ok := m.params.computationWeights[kind] if !ok { return nil } - m.computationUsed += w * uint64(intensity) + m.computationUsed += w * intensity if m.computationUsed > m.params.computationLimit { return errors.NewComputationLimitExceededError( uint64(m.params.TotalComputationLimit())) @@ -96,6 +105,19 @@ func (m *ComputationMeter) MeterComputation( return nil } +// ComputationAvailable returns true if enough computation is left in the transaction for the given intensity and type +func (m *ComputationMeter) ComputationAvailable(usage common.ComputationUsage) bool { + w, ok := m.params.computationWeights[usage.Kind] + // if not found return has capacity + // given the behaviour of MeterComputation is ignoring intensities without a set weight + if !ok { + return true + } + + potentialComputationUsage := m.computationUsed + w*usage.Intensity + return potentialComputationUsage <= m.params.computationLimit +} + // ComputationIntensities returns all the measured computational intensities func (m *ComputationMeter) ComputationIntensities() MeteredComputationIntensities { return m.computationIntensities diff --git a/fvm/meter/memory_meter.go b/fvm/meter/memory_meter.go index dfc93b43301..5994be03533 100644 --- a/fvm/meter/memory_meter.go +++ b/fvm/meter/memory_meter.go @@ -3,7 +3,7 @@ package meter import ( "math" - "github.com/onflow/cadence/runtime/common" + "github.com/onflow/cadence/common" "github.com/onflow/flow-go/fvm/errors" ) @@ -21,26 +21,25 @@ var ( common.MemoryKindNumberValue: 8, // weights for these values include the cost of the Go struct itself (first number) // as well as the overhead for creation of the underlying atree (second number) - common.MemoryKindArrayValueBase: 57 + 48, - common.MemoryKindDictionaryValueBase: 33 + 96, - common.MemoryKindCompositeValueBase: 233 + 96, - common.MemoryKindSimpleCompositeValue: 73, - common.MemoryKindSimpleCompositeValueBase: 89, - common.MemoryKindOptionalValue: 41, - common.MemoryKindTypeValue: 17, - common.MemoryKindPathValue: 24, - common.MemoryKindStorageCapabilityValue: 1, - common.MemoryKindPathLinkValue: 1, - common.MemoryKindAccountLinkValue: 1, - common.MemoryKindAccountReferenceValue: 1, - common.MemoryKindPublishedValue: 1, - common.MemoryKindStorageReferenceValue: 41, - common.MemoryKindEphemeralReferenceValue: 41, - common.MemoryKindInterpretedFunctionValue: 128, - common.MemoryKindHostFunctionValue: 41, - common.MemoryKindBoundFunctionValue: 25, - common.MemoryKindBigInt: 50, - common.MemoryKindVoidExpression: 1, + common.MemoryKindArrayValueBase: 57 + 48, + common.MemoryKindDictionaryValueBase: 33 + 96, + common.MemoryKindCompositeValueBase: 233 + 96, + common.MemoryKindSimpleCompositeValue: 73, + common.MemoryKindSimpleCompositeValueBase: 89, + common.MemoryKindOptionalValue: 41, + common.MemoryKindTypeValue: 17, + common.MemoryKindPathValue: 24, + common.MemoryKindCapabilityValue: 1, // TODO: update with proper weight + common.MemoryKindStorageCapabilityControllerValue: 32, + common.MemoryKindAccountCapabilityControllerValue: 32, + common.MemoryKindPublishedValue: 1, + common.MemoryKindStorageReferenceValue: 41, + common.MemoryKindEphemeralReferenceValue: 41, + common.MemoryKindInterpretedFunctionValue: 128, + common.MemoryKindHostFunctionValue: 41, + common.MemoryKindBoundFunctionValue: 25, + common.MemoryKindBigInt: 50, + common.MemoryKindVoidExpression: 1, // Atree @@ -62,64 +61,66 @@ var ( common.MemoryKindConstantSizedStaticType: 25, common.MemoryKindDictionaryStaticType: 33, common.MemoryKindOptionalStaticType: 17, - common.MemoryKindRestrictedStaticType: 41, + common.MemoryKindIntersectionStaticType: 41, common.MemoryKindReferenceStaticType: 41, common.MemoryKindCapabilityStaticType: 17, common.MemoryKindFunctionStaticType: 9, // Cadence Values - common.MemoryKindCadenceVoidValue: 1, - common.MemoryKindCadenceOptionalValue: 17, - common.MemoryKindCadenceBoolValue: 8, - common.MemoryKindCadenceStringValue: 16, - common.MemoryKindCadenceCharacterValue: 16, - common.MemoryKindCadenceAddressValue: 8, - common.MemoryKindCadenceIntValue: 50, - common.MemoryKindCadenceNumberValue: 1, - common.MemoryKindCadenceArrayValueBase: 41, - common.MemoryKindCadenceArrayValueLength: 16, - common.MemoryKindCadenceDictionaryValue: 41, - common.MemoryKindCadenceKeyValuePair: 33, - common.MemoryKindCadenceStructValueBase: 33, - common.MemoryKindCadenceStructValueSize: 16, - common.MemoryKindCadenceResourceValueBase: 33, - common.MemoryKindCadenceResourceValueSize: 16, - common.MemoryKindCadenceEventValueBase: 33, - common.MemoryKindCadenceEventValueSize: 16, - common.MemoryKindCadenceContractValueBase: 33, - common.MemoryKindCadenceContractValueSize: 16, - common.MemoryKindCadenceEnumValueBase: 33, - common.MemoryKindCadenceEnumValueSize: 16, - common.MemoryKindCadencePathLinkValue: 1, - common.MemoryKindCadencePathValue: 33, - common.MemoryKindCadenceTypeValue: 17, - common.MemoryKindCadenceStorageCapabilityValue: 1, - common.MemoryKindCadenceFunctionValue: 1, - common.MemoryKindCadenceAttachmentValueBase: 33, - common.MemoryKindCadenceAttachmentValueSize: 16, + common.MemoryKindCadenceVoidValue: 1, + common.MemoryKindCadenceOptionalValue: 17, + common.MemoryKindCadenceBoolValue: 8, + common.MemoryKindCadenceStringValue: 16, + common.MemoryKindCadenceCharacterValue: 16, + common.MemoryKindCadenceAddressValue: 8, + common.MemoryKindCadenceIntValue: 50, + common.MemoryKindCadenceNumberValue: 1, + common.MemoryKindCadenceArrayValueBase: 41, + common.MemoryKindCadenceArrayValueLength: 16, + common.MemoryKindCadenceDictionaryValue: 41, + common.MemoryKindCadenceKeyValuePair: 33, + common.MemoryKindCadenceStructValueBase: 33, + common.MemoryKindCadenceStructValueSize: 16, + common.MemoryKindCadenceResourceValueBase: 33, + common.MemoryKindCadenceResourceValueSize: 16, + common.MemoryKindCadenceEventValueBase: 33, + common.MemoryKindCadenceEventValueSize: 16, + common.MemoryKindCadenceContractValueBase: 33, + common.MemoryKindCadenceContractValueSize: 16, + common.MemoryKindCadenceEnumValueBase: 33, + common.MemoryKindCadenceEnumValueSize: 16, + common.MemoryKindCadencePathValue: 33, + common.MemoryKindCadenceTypeValue: 17, + common.MemoryKindCadenceCapabilityValue: 1, // TODO: update with proper weight + common.MemoryKindCadenceDeprecatedPathCapabilityType: 1, // TODO: remove, deprecated. Also has a wrong name + common.MemoryKindCadenceFunctionValue: 1, + common.MemoryKindCadenceAttachmentValueBase: 33, + common.MemoryKindCadenceAttachmentValueSize: 16, // Cadence Types - common.MemoryKindCadenceOptionalType: 17, - common.MemoryKindCadenceVariableSizedArrayType: 17, - common.MemoryKindCadenceConstantSizedArrayType: 25, - common.MemoryKindCadenceDictionaryType: 33, - common.MemoryKindCadenceField: 33, - common.MemoryKindCadenceParameter: 49, - common.MemoryKindCadenceStructType: 81, - common.MemoryKindCadenceResourceType: 81, - common.MemoryKindCadenceEventType: 81, - common.MemoryKindCadenceContractType: 81, - common.MemoryKindCadenceStructInterfaceType: 81, - common.MemoryKindCadenceResourceInterfaceType: 81, - common.MemoryKindCadenceContractInterfaceType: 81, - common.MemoryKindCadenceFunctionType: 41, - common.MemoryKindCadenceReferenceType: 25, - common.MemoryKindCadenceRestrictedType: 57, - common.MemoryKindCadenceCapabilityType: 17, - common.MemoryKindCadenceEnumType: 97, - common.MemoryKindCadenceAttachmentType: 81, + common.MemoryKindCadenceTypeParameter: 17, + common.MemoryKindCadenceOptionalType: 17, + common.MemoryKindCadenceVariableSizedArrayType: 17, + common.MemoryKindCadenceConstantSizedArrayType: 25, + common.MemoryKindCadenceDictionaryType: 33, + common.MemoryKindCadenceField: 33, + common.MemoryKindCadenceParameter: 49, + common.MemoryKindCadenceStructType: 81, + common.MemoryKindCadenceResourceType: 81, + common.MemoryKindCadenceEventType: 81, + common.MemoryKindCadenceContractType: 81, + common.MemoryKindCadenceStructInterfaceType: 81, + common.MemoryKindCadenceResourceInterfaceType: 81, + common.MemoryKindCadenceContractInterfaceType: 81, + common.MemoryKindCadenceFunctionType: 41, + common.MemoryKindCadenceReferenceType: 25, + common.MemoryKindCadenceIntersectionType: 57, + common.MemoryKindCadenceDeprecatedRestrictedType: 57, + common.MemoryKindCadenceCapabilityType: 17, + common.MemoryKindCadenceEnumType: 97, + common.MemoryKindCadenceAttachmentType: 81, // Misc @@ -154,18 +155,22 @@ var ( common.MemoryKindMembers: 276, common.MemoryKindTypeAnnotation: 25, common.MemoryKindDictionaryEntry: 33, - - common.MemoryKindFunctionDeclaration: 49, - common.MemoryKindCompositeDeclaration: 65, - common.MemoryKindInterfaceDeclaration: 41, - common.MemoryKindEnumCaseDeclaration: 25, - common.MemoryKindFieldDeclaration: 41, - common.MemoryKindTransactionDeclaration: 81, - common.MemoryKindImportDeclaration: 41, - common.MemoryKindVariableDeclaration: 97, - common.MemoryKindSpecialFunctionDeclaration: 17, - common.MemoryKindPragmaDeclaration: 17, - common.MemoryKindAttachmentDeclaration: 70, + common.MemoryKindSwitchCase: 17, + + common.MemoryKindFunctionDeclaration: 49, + common.MemoryKindCompositeDeclaration: 65, + common.MemoryKindInterfaceDeclaration: 41, + common.MemoryKindEnumCaseDeclaration: 25, + common.MemoryKindFieldDeclaration: 41, + common.MemoryKindTransactionDeclaration: 81, + common.MemoryKindImportDeclaration: 41, + common.MemoryKindVariableDeclaration: 97, + common.MemoryKindSpecialFunctionDeclaration: 17, + common.MemoryKindPragmaDeclaration: 17, + common.MemoryKindAttachmentDeclaration: 70, + common.MemoryKindEntitlementDeclaration: 33, + common.MemoryKindEntitlementMappingElement: 17, + common.MemoryKindEntitlementMappingDeclaration: 57, common.MemoryKindAssignmentStatement: 41, common.MemoryKindBreakStatement: 1, @@ -180,28 +185,29 @@ var ( common.MemoryKindWhileStatement: 25, common.MemoryKindRemoveStatement: 33, - common.MemoryKindBooleanExpression: 9, - common.MemoryKindNilExpression: 1, - common.MemoryKindStringExpression: 17, - common.MemoryKindIntegerExpression: 33, - common.MemoryKindFixedPointExpression: 49, - common.MemoryKindArrayExpression: 25, - common.MemoryKindDictionaryExpression: 25, - common.MemoryKindIdentifierExpression: 1, - common.MemoryKindInvocationExpression: 49, - common.MemoryKindMemberExpression: 25, - common.MemoryKindIndexExpression: 33, - common.MemoryKindConditionalExpression: 49, - common.MemoryKindUnaryExpression: 25, - common.MemoryKindBinaryExpression: 41, - common.MemoryKindFunctionExpression: 25, - common.MemoryKindCastingExpression: 41, - common.MemoryKindCreateExpression: 9, - common.MemoryKindDestroyExpression: 17, - common.MemoryKindReferenceExpression: 33, - common.MemoryKindForceExpression: 17, - common.MemoryKindPathExpression: 1, - common.MemoryKindAttachExpression: 33, + common.MemoryKindBooleanExpression: 9, + common.MemoryKindNilExpression: 1, + common.MemoryKindStringExpression: 17, + common.MemoryKindStringTemplateExpression: 49, + common.MemoryKindIntegerExpression: 33, + common.MemoryKindFixedPointExpression: 49, + common.MemoryKindArrayExpression: 25, + common.MemoryKindDictionaryExpression: 25, + common.MemoryKindIdentifierExpression: 1, + common.MemoryKindInvocationExpression: 49, + common.MemoryKindMemberExpression: 25, + common.MemoryKindIndexExpression: 33, + common.MemoryKindConditionalExpression: 49, + common.MemoryKindUnaryExpression: 25, + common.MemoryKindBinaryExpression: 41, + common.MemoryKindFunctionExpression: 25, + common.MemoryKindCastingExpression: 41, + common.MemoryKindCreateExpression: 9, + common.MemoryKindDestroyExpression: 17, + common.MemoryKindReferenceExpression: 33, + common.MemoryKindForceExpression: 17, + common.MemoryKindPathExpression: 1, + common.MemoryKindAttachExpression: 33, common.MemoryKindConstantSizedType: 25, common.MemoryKindDictionaryType: 33, @@ -210,7 +216,7 @@ var ( common.MemoryKindNominalType: 25, common.MemoryKindOptionalType: 17, common.MemoryKindReferenceType: 25, - common.MemoryKindRestrictedType: 41, + common.MemoryKindIntersectionType: 41, common.MemoryKindVariableSizedType: 17, common.MemoryKindPosition: 25, @@ -220,18 +226,43 @@ var ( common.MemoryKindElaboration: 501, // sema types - common.MemoryKindVariableSizedSemaType: 51, - common.MemoryKindConstantSizedSemaType: 59, - common.MemoryKindDictionarySemaType: 67, - common.MemoryKindOptionalSemaType: 17, - common.MemoryKindRestrictedSemaType: 75, - common.MemoryKindReferenceSemaType: 25, - common.MemoryKindCapabilitySemaType: 51, + common.MemoryKindVariableSizedSemaType: 51, + common.MemoryKindConstantSizedSemaType: 59, + common.MemoryKindDictionarySemaType: 67, + common.MemoryKindOptionalSemaType: 17, + common.MemoryKindIntersectionSemaType: 75, + common.MemoryKindReferenceSemaType: 25, + common.MemoryKindCapabilitySemaType: 51, + common.MemoryKindEntitlementSemaType: 49, + common.MemoryKindEntitlementMapSemaType: 73, + common.MemoryKindEntitlementRelationSemaType: 73, // ordered-map common.MemoryKindOrderedMap: 17, common.MemoryKindOrderedMapEntryList: 50, common.MemoryKindOrderedMapEntry: 64, + + // Entitlement access + common.MemoryKindEntitlementSetStaticAccess: 17, + common.MemoryKindEntitlementMapStaticAccess: 17, + common.MemoryKindCadenceEntitlementSetAccess: 33, + common.MemoryKindCadenceEntitlementMapAccess: 17, + + // InclusiveRange + common.MemoryKindInclusiveRangeStaticType: 17, + common.MemoryKindCadenceInclusiveRangeValue: 81, + common.MemoryKindCadenceInclusiveRangeType: 33, + common.MemoryKindInclusiveRangeSemaType: 17, + + common.MemoryKindContractVariable: 17, + common.MemoryKindGoSliceLength: 17, + + common.MemoryKindCompiler: 17, + common.MemoryKindCompilerGlobal: 17, + common.MemoryKindCompilerConstant: 17, + + common.MemoryKindBoundFunctionVMValue: 17, + common.MemoryKindImplicitReferenceVMValue: 17, } ) @@ -242,7 +273,7 @@ func _() { } type ExecutionMemoryWeights map[common.MemoryKind]uint64 -type MeteredMemoryIntensities map[common.MemoryKind]uint +type MeteredMemoryAmounts map[common.MemoryKind]uint64 type MemoryMeterParameters struct { memoryLimit uint64 @@ -282,33 +313,36 @@ func (params MeterParameters) WithMemoryWeights( type MemoryMeter struct { params MemoryMeterParameters - memoryIntensities MeteredMemoryIntensities - memoryEstimate uint64 + memoryAmounts MeteredMemoryAmounts + memoryEstimate uint64 } -// MemoryIntensities returns all the measured memory intensities -func (m *MemoryMeter) MemoryIntensities() MeteredMemoryIntensities { - return m.memoryIntensities +// MemoryAmounts returns all the measured memory amounts +func (m *MemoryMeter) MemoryAmounts() MeteredMemoryAmounts { + return m.memoryAmounts } // NewMemoryMeter constructs a new Meter func NewMemoryMeter(params MemoryMeterParameters) MemoryMeter { m := MemoryMeter{ - params: params, - memoryIntensities: make(MeteredMemoryIntensities), + params: params, + memoryAmounts: make(MeteredMemoryAmounts), } return m } // MeterMemory captures memory usage and returns an error if it goes beyond the limit -func (m *MemoryMeter) MeterMemory(kind common.MemoryKind, intensity uint) error { - m.memoryIntensities[kind] += intensity +func (m *MemoryMeter) MeterMemory(usage common.MemoryUsage) error { + kind := usage.Kind + amount := usage.Amount + + m.memoryAmounts[kind] += amount w, ok := m.params.memoryWeights[kind] if !ok { return nil } - m.memoryEstimate += w * uint64(intensity) + m.memoryEstimate += w * amount if m.memoryEstimate > m.params.memoryLimit { return errors.NewMemoryLimitExceededError(m.params.TotalMemoryLimit()) } @@ -324,7 +358,7 @@ func (m *MemoryMeter) TotalMemoryEstimate() uint64 { func (m *MemoryMeter) Merge(child MemoryMeter) { m.memoryEstimate = m.memoryEstimate + child.TotalMemoryEstimate() - for key, intensity := range child.memoryIntensities { - m.memoryIntensities[key] += intensity + for key, intensity := range child.memoryAmounts { + m.memoryAmounts[key] += intensity } } diff --git a/fvm/meter/meter_test.go b/fvm/meter/meter_test.go index 1ad60f343c0..99b069aa219 100644 --- a/fvm/meter/meter_test.go +++ b/fvm/meter/meter_test.go @@ -8,7 +8,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "github.com/onflow/cadence/runtime/common" + "github.com/onflow/cadence/common" "github.com/onflow/flow-go/fvm/errors" "github.com/onflow/flow-go/fvm/meter" @@ -23,7 +23,7 @@ func TestWeightedComputationMetering(t *testing.T) { WithComputationLimit(1). WithMemoryLimit(2), ) - require.Equal(t, uint(1), m.TotalComputationLimit()) + require.Equal(t, uint64(1), m.TotalComputationLimit()) require.Equal(t, uint64(2), m.TotalMemoryLimit()) }) @@ -33,7 +33,7 @@ func TestWeightedComputationMetering(t *testing.T) { WithComputationLimit(math.MaxUint32). WithMemoryLimit(math.MaxUint32), ) - require.Equal(t, uint(math.MaxUint32), m.TotalComputationLimit()) + require.Equal(t, uint64(math.MaxUint32), m.TotalComputationLimit()) require.Equal(t, uint64(math.MaxUint32), m.TotalMemoryLimit()) }) @@ -47,28 +47,58 @@ func TestWeightedComputationMetering(t *testing.T) { WithMemoryWeights(map[common.MemoryKind]uint64{0: 1}), ) - err := m.MeterComputation(0, 1) + err := m.MeterComputation( + common.ComputationUsage{ + Kind: 0, + Intensity: 1, + }, + ) require.NoError(t, err) require.Equal(t, uint64(1), m.TotalComputationUsed()) - err = m.MeterComputation(0, 2) + err = m.MeterComputation( + common.ComputationUsage{ + Kind: 0, + Intensity: 2, + }, + ) require.NoError(t, err) require.Equal(t, uint64(1+2), m.TotalComputationUsed()) - err = m.MeterComputation(0, 8) + err = m.MeterComputation( + common.ComputationUsage{ + Kind: 0, + Intensity: 8, + }, + ) require.Error(t, err) require.True(t, errors.IsComputationLimitExceededError(err)) require.Equal(t, err.Error(), errors.NewComputationLimitExceededError(10).Error()) - err = m.MeterMemory(0, 2) + err = m.MeterMemory( + common.MemoryUsage{ + Kind: 0, + Amount: 2, + }, + ) require.NoError(t, err) require.Equal(t, uint64(2), m.TotalMemoryEstimate()) - err = m.MeterMemory(0, 3) + err = m.MeterMemory( + common.MemoryUsage{ + Kind: 0, + Amount: 3, + }, + ) require.NoError(t, err) require.Equal(t, uint64(2+3), m.TotalMemoryEstimate()) - err = m.MeterMemory(0, 8) + err = m.MeterMemory( + common.MemoryUsage{ + Kind: 0, + Amount: 8, + }, + ) require.Error(t, err) require.True(t, errors.IsMemoryLimitExceededError(err)) require.Equal(t, err.Error(), errors.NewMemoryLimitExceededError(10).Error()) @@ -84,15 +114,25 @@ func TestWeightedComputationMetering(t *testing.T) { WithMemoryWeights(map[common.MemoryKind]uint64{0: 17}), ) - err := m.MeterComputation(0, 1) + err := m.MeterComputation( + common.ComputationUsage{ + Kind: 0, + Intensity: 1, + }, + ) require.NoError(t, err) require.Equal(t, uint64(13), m.TotalComputationUsed()) - require.Equal(t, uint(1), m.ComputationIntensities()[0]) + require.Equal(t, uint64(1), m.ComputationIntensities()[0]) - err = m.MeterMemory(0, 2) + err = m.MeterMemory( + common.MemoryUsage{ + Kind: 0, + Amount: 2, + }, + ) require.NoError(t, err) require.Equal(t, uint64(34), m.TotalMemoryEstimate()) - require.Equal(t, uint(2), m.MemoryIntensities()[0]) + require.Equal(t, uint64(2), m.MemoryAmounts()[0]) }) t.Run("meter computation with weights lower than MeterInternalPrecisionBytes", func(t *testing.T) { @@ -104,17 +144,100 @@ func TestWeightedComputationMetering(t *testing.T) { WithMemoryWeights(map[common.MemoryKind]uint64{0: 1}), ) - internalPrecisionMinusOne := uint((1 << meter.MeterExecutionInternalPrecisionBytes) - 1) + internalPrecisionMinusOne := uint64((1 << meter.MeterExecutionInternalPrecisionBytes) - 1) - err := m.MeterComputation(0, internalPrecisionMinusOne) + err := m.MeterComputation( + common.ComputationUsage{ + Kind: 0, + Intensity: internalPrecisionMinusOne, + }, + ) require.NoError(t, err) require.Equal(t, uint64(0), m.TotalComputationUsed()) require.Equal(t, internalPrecisionMinusOne, m.ComputationIntensities()[0]) - err = m.MeterComputation(0, 1) + err = m.MeterComputation( + common.ComputationUsage{ + Kind: 0, + Intensity: 1, + }, + ) + require.NoError(t, err) + require.Equal(t, uint64(1), m.TotalComputationUsed()) + require.Equal(t, uint64(1<<meter.MeterExecutionInternalPrecisionBytes), m.ComputationIntensities()[0]) + }) + + t.Run("check computation capacity", func(t *testing.T) { + m := meter.NewMeter( + meter.DefaultParameters(). + WithComputationLimit(10). + WithComputationWeights( + map[common.ComputationKind]uint64{0: 1 << meter.MeterExecutionInternalPrecisionBytes}), + ) + + hasCapacity := m.ComputationAvailable( + common.ComputationUsage{ + Kind: 0, + Intensity: 1, + }, + ) + require.True(t, hasCapacity) + + err := m.MeterComputation( + common.ComputationUsage{ + Kind: 0, + Intensity: 1, + }, + ) require.NoError(t, err) require.Equal(t, uint64(1), m.TotalComputationUsed()) - require.Equal(t, uint(1<<meter.MeterExecutionInternalPrecisionBytes), m.ComputationIntensities()[0]) + + require.True(t, m.ComputationAvailable( + common.ComputationUsage{ + Kind: 0, + Intensity: 9, + }, + )) + require.False(t, m.ComputationAvailable( + common.ComputationUsage{ + Kind: 0, + Intensity: 10, + }, + )) + + // test a type without a weight (default zero) + require.True(t, m.ComputationAvailable( + common.ComputationUsage{ + Kind: 1, + Intensity: 10, + }, + )) + }) + + t.Run("check computation available", func(t *testing.T) { + m := meter.NewMeter( + meter.DefaultParameters(). + WithComputationLimit(10). + WithComputationWeights( + map[common.ComputationKind]uint64{0: 1 << meter.MeterExecutionInternalPrecisionBytes}), + ) + + available := m.ComputationAvailable(common.ComputationUsage{Kind: 0, Intensity: 10}) + require.True(t, available) + + err := m.MeterComputation( + common.ComputationUsage{ + Kind: 0, + Intensity: 1, + }, + ) + require.NoError(t, err) + require.Equal(t, uint64(1), m.TotalComputationUsed()) + + require.False(t, m.ComputationAvailable(common.ComputationUsage{Kind: 0, Intensity: 10})) + + // test a type without a weight (default MaxUint64) + require.True(t, m.ComputationAvailable(common.ComputationUsage{Kind: 1, Intensity: math.MaxUint64})) }) t.Run("merge meters", func(t *testing.T) { @@ -128,36 +251,61 @@ func TestWeightedComputationMetering(t *testing.T) { WithMemoryWeights(map[common.MemoryKind]uint64{0: 1}), ) - err := m.MeterComputation(compKind, 1) + err := m.MeterComputation( + common.ComputationUsage{ + Kind: compKind, + Intensity: 1, + }, + ) require.NoError(t, err) child1 := meter.NewMeter(m.MeterParameters) - err = child1.MeterComputation(compKind, 2) + err = child1.MeterComputation( + common.ComputationUsage{ + Kind: compKind, + Intensity: 2, + }, + ) require.NoError(t, err) child2 := meter.NewMeter(m.MeterParameters) - err = child2.MeterComputation(compKind, 3) + err = child2.MeterComputation( + common.ComputationUsage{ + Kind: compKind, + Intensity: 3, + }, + ) require.NoError(t, err) child3 := meter.NewMeter(m.MeterParameters) - err = child3.MeterComputation(compKind, 4) + err = child3.MeterComputation( + common.ComputationUsage{ + Kind: compKind, + Intensity: 4, + }, + ) require.NoError(t, err) m.MergeMeter(child1) require.Equal(t, uint64(1+2), m.TotalComputationUsed()) - require.Equal(t, uint(1+2), m.ComputationIntensities()[compKind]) + require.Equal(t, uint64(1+2), m.ComputationIntensities()[compKind]) m.MergeMeter(child2) require.Equal(t, uint64(1+2+3), m.TotalComputationUsed()) - require.Equal(t, uint(1+2+3), m.ComputationIntensities()[compKind]) + require.Equal(t, uint64(1+2+3), m.ComputationIntensities()[compKind]) // merge hits limit, but is accepted. m.MergeMeter(child3) require.Equal(t, uint64(1+2+3+4), m.TotalComputationUsed()) - require.Equal(t, uint(1+2+3+4), m.ComputationIntensities()[compKind]) + require.Equal(t, uint64(1+2+3+4), m.ComputationIntensities()[compKind]) // error after merge (hitting limit) - err = m.MeterComputation(compKind, 0) + err = m.MeterComputation( + common.ComputationUsage{ + Kind: compKind, + Intensity: 0, + }, + ) require.Error(t, err) require.True(t, errors.IsComputationLimitExceededError(err)) require.Equal(t, err.Error(), errors.NewComputationLimitExceededError(9).Error()) @@ -172,17 +320,27 @@ func TestWeightedComputationMetering(t *testing.T) { WithComputationWeights(map[common.ComputationKind]uint64{0: 1 << meter.MeterExecutionInternalPrecisionBytes}), ) - err := m.MeterComputation(compKind, 1) + err := m.MeterComputation( + common.ComputationUsage{ + Kind: compKind, + Intensity: 1, + }, + ) require.NoError(t, err) child := meter.NewMeter(m.MeterParameters) - err = child.MeterComputation(compKind, 1) + err = child.MeterComputation( + common.ComputationUsage{ + Kind: compKind, + Intensity: 1, + }, + ) require.NoError(t, err) // hitting limit and ignoring it m.MergeMeter(child) require.Equal(t, uint64(1+1), m.TotalComputationUsed()) - require.Equal(t, uint(1+1), m.ComputationIntensities()[compKind]) + require.Equal(t, uint64(1+1), m.ComputationIntensities()[compKind]) }) t.Run("merge meters - large values - computation", func(t *testing.T) { @@ -194,16 +352,31 @@ func TestWeightedComputationMetering(t *testing.T) { }), ) - err := m.MeterComputation(0, 1) + err := m.MeterComputation( + common.ComputationUsage{ + Kind: 0, + Intensity: 1, + }, + ) require.NoError(t, err) child1 := meter.NewMeter(m.MeterParameters) - err = child1.MeterComputation(0, 1) + err = child1.MeterComputation( + common.ComputationUsage{ + Kind: 0, + Intensity: 1, + }, + ) require.NoError(t, err) m.MergeMeter(child1) - err = m.MeterComputation(0, 0) + err = m.MeterComputation( + common.ComputationUsage{ + Kind: 0, + Intensity: 0, + }, + ) require.True(t, errors.IsComputationLimitExceededError(err)) }) @@ -216,16 +389,31 @@ func TestWeightedComputationMetering(t *testing.T) { }), ) - err := m.MeterMemory(0, 1) + err := m.MeterMemory( + common.MemoryUsage{ + Kind: 0, + Amount: 1, + }, + ) require.NoError(t, err) child1 := meter.NewMeter(m.MeterParameters) - err = child1.MeterMemory(0, 1) + err = child1.MeterMemory( + common.MemoryUsage{ + Kind: 0, + Amount: 1, + }, + ) require.NoError(t, err) m.MergeMeter(child1) - err = m.MeterMemory(0, 0) + err = m.MeterMemory( + common.MemoryUsage{ + Kind: 0, + Amount: 0, + }, + ) require.Error(t, err) require.True(t, errors.IsMemoryLimitExceededError(err)) require.Equal(t, err.Error(), errors.NewMemoryLimitExceededError(math.MaxUint32).Error()) @@ -247,52 +435,112 @@ func TestWeightedComputationMetering(t *testing.T) { } reset() - err := m.MeterComputation(0, 1) + err := m.MeterComputation( + common.ComputationUsage{ + Kind: 0, + Intensity: 1, + }, + ) require.NoError(t, err) require.Equal(t, uint64(0), m.TotalComputationUsed()) reset() - err = m.MeterComputation(0, 1<<meter.MeterExecutionInternalPrecisionBytes) + err = m.MeterComputation( + common.ComputationUsage{ + Kind: 0, + Intensity: 1 << meter.MeterExecutionInternalPrecisionBytes, + }, + ) require.NoError(t, err) require.Equal(t, uint64(0), m.TotalComputationUsed()) reset() - err = m.MeterComputation(0, math.MaxUint32) + err = m.MeterComputation( + common.ComputationUsage{ + Kind: 0, + Intensity: math.MaxUint32, + }, + ) require.NoError(t, err) require.Equal(t, uint64(0), m.TotalComputationUsed()) reset() - err = m.MeterComputation(1, 1) + err = m.MeterComputation( + common.ComputationUsage{ + Kind: 1, + Intensity: 1, + }, + ) require.NoError(t, err) require.Equal(t, uint64(0), m.TotalComputationUsed()) reset() - err = m.MeterComputation(1, 1<<meter.MeterExecutionInternalPrecisionBytes) + err = m.MeterComputation( + common.ComputationUsage{ + Kind: 1, + Intensity: 1 << meter.MeterExecutionInternalPrecisionBytes, + }, + ) require.NoError(t, err) require.Equal(t, uint64(1), m.TotalComputationUsed()) reset() - err = m.MeterComputation(1, math.MaxUint32) + err = m.MeterComputation( + common.ComputationUsage{ + Kind: 1, + Intensity: math.MaxUint32, + }, + ) require.NoError(t, err) require.Equal(t, uint64(1<<16-1), m.TotalComputationUsed()) reset() - err = m.MeterComputation(2, 1) + err = m.MeterComputation( + common.ComputationUsage{ + Kind: 2, + Intensity: 1, + }, + ) require.NoError(t, err) require.Equal(t, uint64(1), m.TotalComputationUsed()) reset() - err = m.MeterComputation(2, 1<<meter.MeterExecutionInternalPrecisionBytes) + err = m.MeterComputation( + common.ComputationUsage{ + Kind: 2, + Intensity: 1 << meter.MeterExecutionInternalPrecisionBytes, + }, + ) require.NoError(t, err) require.Equal(t, uint64(1<<16), m.TotalComputationUsed()) reset() - err = m.MeterComputation(2, math.MaxUint32) + err = m.MeterComputation( + common.ComputationUsage{ + Kind: 2, + Intensity: math.MaxUint32, + }, + ) require.NoError(t, err) require.Equal(t, uint64(math.MaxUint32), m.TotalComputationUsed()) reset() - err = m.MeterComputation(3, 1) + err = m.MeterComputation( + common.ComputationUsage{ + Kind: 3, + Intensity: 1, + }, + ) require.True(t, errors.IsComputationLimitExceededError(err)) reset() - err = m.MeterComputation(3, 1<<meter.MeterExecutionInternalPrecisionBytes) + err = m.MeterComputation( + common.ComputationUsage{ + Kind: 3, + Intensity: 1 << meter.MeterExecutionInternalPrecisionBytes, + }, + ) require.True(t, errors.IsComputationLimitExceededError(err)) reset() - err = m.MeterComputation(3, math.MaxUint32) + err = m.MeterComputation( + common.ComputationUsage{ + Kind: 3, + Intensity: math.MaxUint32, + }, + ) require.True(t, errors.IsComputationLimitExceededError(err)) }) @@ -312,51 +560,111 @@ func TestWeightedComputationMetering(t *testing.T) { } reset() - err := m.MeterMemory(0, 1) + err := m.MeterMemory( + common.MemoryUsage{ + Kind: 0, + Amount: 1, + }, + ) require.NoError(t, err) require.Equal(t, uint64(0), m.TotalMemoryEstimate()) reset() - err = m.MeterMemory(0, 1) + err = m.MeterMemory( + common.MemoryUsage{ + Kind: 0, + Amount: 1, + }, + ) require.NoError(t, err) require.Equal(t, uint64(0), m.TotalMemoryEstimate()) reset() - err = m.MeterMemory(0, math.MaxUint32) + err = m.MeterMemory( + common.MemoryUsage{ + Kind: 0, + Amount: math.MaxUint32, + }, + ) require.NoError(t, err) require.Equal(t, uint64(0), m.TotalMemoryEstimate()) reset() - err = m.MeterMemory(1, 1) + err = m.MeterMemory( + common.MemoryUsage{ + Kind: 1, + Amount: 1, + }, + ) require.NoError(t, err) require.Equal(t, uint64(1), m.TotalMemoryEstimate()) reset() - err = m.MeterMemory(1, 1) + err = m.MeterMemory( + common.MemoryUsage{ + Kind: 1, + Amount: 1, + }, + ) require.NoError(t, err) require.Equal(t, uint64(1), m.TotalMemoryEstimate()) reset() - err = m.MeterMemory(1, math.MaxUint32) + err = m.MeterMemory( + common.MemoryUsage{ + Kind: 1, + Amount: math.MaxUint32, + }, + ) require.NoError(t, err) require.Equal(t, uint64(math.MaxUint32), m.TotalMemoryEstimate()) reset() - err = m.MeterMemory(2, 1) + err = m.MeterMemory( + common.MemoryUsage{ + Kind: 2, + Amount: 1, + }, + ) require.NoError(t, err) require.Equal(t, uint64(2), m.TotalMemoryEstimate()) reset() - err = m.MeterMemory(2, 1) + err = m.MeterMemory( + common.MemoryUsage{ + Kind: 2, + Amount: 1, + }, + ) require.NoError(t, err) require.Equal(t, uint64(2), m.TotalMemoryEstimate()) reset() - err = m.MeterMemory(2, math.MaxUint32) + err = m.MeterMemory( + common.MemoryUsage{ + Kind: 2, + Amount: math.MaxUint32, + }, + ) require.True(t, errors.IsMemoryLimitExceededError(err)) reset() - err = m.MeterMemory(3, 1) + err = m.MeterMemory( + common.MemoryUsage{ + Kind: 3, + Amount: 1, + }, + ) require.True(t, errors.IsMemoryLimitExceededError(err)) reset() - err = m.MeterMemory(3, 1) + err = m.MeterMemory( + common.MemoryUsage{ + Kind: 3, + Amount: 1, + }, + ) require.True(t, errors.IsMemoryLimitExceededError(err)) reset() - err = m.MeterMemory(3, math.MaxUint32) + err = m.MeterMemory( + common.MemoryUsage{ + Kind: 3, + Amount: math.MaxUint32, + }, + ) require.True(t, errors.IsMemoryLimitExceededError(err)) }) } @@ -386,7 +694,7 @@ func TestStorageLimits(t *testing.T) { meter.DefaultParameters(), ) - key1 := flow.NewRegisterID("", "1") + key1 := flow.NewRegisterID(flow.EmptyAddress, "1") val1 := []byte{0x1, 0x2, 0x3} size1 := meter.GetStorageKeyValueSizeForTesting(key1, val1) @@ -401,7 +709,7 @@ func TestStorageLimits(t *testing.T) { require.Equal(t, meter1.TotalBytesReadFromStorage(), size1) // first read of key2 - key2 := flow.NewRegisterID("", "2") + key2 := flow.NewRegisterID(flow.EmptyAddress, "2") val2 := []byte{0x3, 0x2, 0x1} size2 := meter.GetStorageKeyValueSizeForTesting(key2, val2) @@ -415,7 +723,7 @@ func TestStorageLimits(t *testing.T) { meter.DefaultParameters(), ) - key1 := flow.NewRegisterID("", "1") + key1 := flow.NewRegisterID(flow.EmptyAddress, "1") val1 := []byte{0x1, 0x2, 0x3} val2 := []byte{0x1, 0x2, 0x3, 0x4} @@ -430,7 +738,7 @@ func TestStorageLimits(t *testing.T) { require.Equal(t, meter1.TotalBytesWrittenToStorage(), meter.GetStorageKeyValueSizeForTesting(key1, val2)) // first write of key2 - key2 := flow.NewRegisterID("", "2") + key2 := flow.NewRegisterID(flow.EmptyAddress, "2") err = meter1.MeterStorageWrite(key2, val2, false) require.NoError(t, err) require.Equal(t, meter1.TotalBytesWrittenToStorage(), @@ -442,7 +750,7 @@ func TestStorageLimits(t *testing.T) { meter.DefaultParameters().WithStorageInteractionLimit(1), ) - key1 := flow.NewRegisterID("", "1") + key1 := flow.NewRegisterID(flow.EmptyAddress, "1") val1 := []byte{0x1, 0x2, 0x3} err := meter1.MeterStorageRead(key1, val1, false /* not enforced */) @@ -456,7 +764,7 @@ func TestStorageLimits(t *testing.T) { meter.DefaultParameters().WithStorageInteractionLimit(testLimit), ) - key1 := flow.NewRegisterID("", "1") + key1 := flow.NewRegisterID(flow.EmptyAddress, "1") val1 := []byte{0x1, 0x2, 0x3} err := meter1.MeterStorageRead(key1, val1, true /* enforced */) @@ -474,7 +782,7 @@ func TestStorageLimits(t *testing.T) { meter.DefaultParameters().WithStorageInteractionLimit(testLimit), ) - key1 := flow.NewRegisterID("", "1") + key1 := flow.NewRegisterID(flow.EmptyAddress, "1") val1 := []byte{0x1, 0x2, 0x3} err := meter1.MeterStorageWrite(key1, val1, false /* not enforced */) @@ -487,7 +795,7 @@ func TestStorageLimits(t *testing.T) { meter.DefaultParameters().WithStorageInteractionLimit(testLimit), ) - key1 := flow.NewRegisterID("", "1") + key1 := flow.NewRegisterID(flow.EmptyAddress, "1") val1 := []byte{0x1, 0x2, 0x3} err := meter1.MeterStorageWrite(key1, val1, true /* enforced */) @@ -504,8 +812,8 @@ func TestStorageLimits(t *testing.T) { meter.DefaultParameters(), ) - key1 := flow.NewRegisterID("", "1") - key2 := flow.NewRegisterID("", "2") + key1 := flow.NewRegisterID(flow.EmptyAddress, "1") + key2 := flow.NewRegisterID(flow.EmptyAddress, "2") val1 := []byte{0x1, 0x2, 0x3} val2 := []byte{0x1, 0x2, 0x3, 0x4} size1 := meter.GetStorageKeyValueSizeForTesting(key1, val1) @@ -525,8 +833,8 @@ func TestStorageLimits(t *testing.T) { }) t.Run("metering storage read and written - exceeding limit - not enforced", func(t *testing.T) { - key1 := flow.NewRegisterID("", "1") - key2 := flow.NewRegisterID("", "2") + key1 := flow.NewRegisterID(flow.EmptyAddress, "1") + key2 := flow.NewRegisterID(flow.EmptyAddress, "2") val1 := []byte{0x1, 0x2, 0x3} val2 := []byte{0x1, 0x2, 0x3, 0x4} size1 := meter.GetStorageKeyValueSizeForTesting(key1, val1) @@ -550,8 +858,8 @@ func TestStorageLimits(t *testing.T) { }) t.Run("metering storage read and written - exceeding limit - enforced", func(t *testing.T) { - key1 := flow.NewRegisterID("", "1") - key2 := flow.NewRegisterID("", "2") + key1 := flow.NewRegisterID(flow.EmptyAddress, "1") + key2 := flow.NewRegisterID(flow.EmptyAddress, "2") val1 := []byte{0x1, 0x2, 0x3} val2 := []byte{0x1, 0x2, 0x3, 0x4} size1 := meter.GetStorageKeyValueSizeForTesting(key1, val1) @@ -581,13 +889,13 @@ func TestStorageLimits(t *testing.T) { meter1 := meter.NewMeter( meter.DefaultParameters(), ) - readKey1 := flow.NewRegisterID("", "r1") + readKey1 := flow.NewRegisterID(flow.EmptyAddress, "r1") readVal1 := []byte{0x1, 0x2, 0x3} readSize1 := meter.GetStorageKeyValueSizeForTesting(readKey1, readVal1) err := meter1.MeterStorageRead(readKey1, readVal1, false) require.NoError(t, err) - writeKey1 := flow.NewRegisterID("", "w1") + writeKey1 := flow.NewRegisterID(flow.EmptyAddress, "w1") writeVal1 := []byte{0x1, 0x2, 0x3, 0x4} writeSize1 := meter.GetStorageKeyValueSizeForTesting(writeKey1, writeVal1) err = meter1.MeterStorageWrite(writeKey1, writeVal1, false) @@ -598,7 +906,7 @@ func TestStorageLimits(t *testing.T) { meter.DefaultParameters(), ) - writeKey2 := flow.NewRegisterID("", "w2") + writeKey2 := flow.NewRegisterID(flow.EmptyAddress, "w2") writeVal2 := []byte{0x1, 0x2, 0x3, 0x4, 0x5} writeSize2 := meter.GetStorageKeyValueSizeForTesting(writeKey2, writeVal2) diff --git a/fvm/migration/Migration.cdc b/fvm/migration/Migration.cdc new file mode 100644 index 00000000000..57631bc3b9e --- /dev/null +++ b/fvm/migration/Migration.cdc @@ -0,0 +1,31 @@ + + +access(all) +contract Migration { + + access(all) + resource Admin { + + access(all) + fun migrate() { + Migration.migrate() + } + } + + access(all) + let adminStoragePath: StoragePath + + init() { + self.adminStoragePath = /storage/migrationAdmin + + self.account.storage.save( + <-create Admin(), + to: self.adminStoragePath + ) + } + + access(contract) + fun migrate() { + // NO-OP + } +} diff --git a/fvm/migration/migration.go b/fvm/migration/migration.go new file mode 100644 index 00000000000..32fa5378bfc --- /dev/null +++ b/fvm/migration/migration.go @@ -0,0 +1,14 @@ +package migration + +import ( + _ "embed" +) + +//go:embed Migration.cdc +var contractCode string + +const ContractName = "Migration" + +func ContractCode() []byte { + return []byte(contractCode) +} diff --git a/fvm/mock/bootstrap_procedure_option.go b/fvm/mock/bootstrap_procedure_option.go deleted file mode 100644 index ea5010b451e..00000000000 --- a/fvm/mock/bootstrap_procedure_option.go +++ /dev/null @@ -1,44 +0,0 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. - -package mock - -import ( - fvm "github.com/onflow/flow-go/fvm" - mock "github.com/stretchr/testify/mock" -) - -// BootstrapProcedureOption is an autogenerated mock type for the BootstrapProcedureOption type -type BootstrapProcedureOption struct { - mock.Mock -} - -// Execute provides a mock function with given fields: _a0 -func (_m *BootstrapProcedureOption) Execute(_a0 *fvm.BootstrapProcedure) *fvm.BootstrapProcedure { - ret := _m.Called(_a0) - - var r0 *fvm.BootstrapProcedure - if rf, ok := ret.Get(0).(func(*fvm.BootstrapProcedure) *fvm.BootstrapProcedure); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*fvm.BootstrapProcedure) - } - } - - return r0 -} - -type mockConstructorTestingTNewBootstrapProcedureOption interface { - mock.TestingT - Cleanup(func()) -} - -// NewBootstrapProcedureOption creates a new instance of BootstrapProcedureOption. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewBootstrapProcedureOption(t mockConstructorTestingTNewBootstrapProcedureOption) *BootstrapProcedureOption { - mock := &BootstrapProcedureOption{} - mock.Mock.Test(t) - - t.Cleanup(func() { mock.AssertExpectations(t) }) - - return mock -} diff --git a/fvm/mock/option.go b/fvm/mock/option.go deleted file mode 100644 index 3e306aae44b..00000000000 --- a/fvm/mock/option.go +++ /dev/null @@ -1,42 +0,0 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. - -package mock - -import ( - fvm "github.com/onflow/flow-go/fvm" - mock "github.com/stretchr/testify/mock" -) - -// Option is an autogenerated mock type for the Option type -type Option struct { - mock.Mock -} - -// Execute provides a mock function with given fields: ctx -func (_m *Option) Execute(ctx fvm.Context) fvm.Context { - ret := _m.Called(ctx) - - var r0 fvm.Context - if rf, ok := ret.Get(0).(func(fvm.Context) fvm.Context); ok { - r0 = rf(ctx) - } else { - r0 = ret.Get(0).(fvm.Context) - } - - return r0 -} - -type mockConstructorTestingTNewOption interface { - mock.TestingT - Cleanup(func()) -} - -// NewOption creates a new instance of Option. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewOption(t mockConstructorTestingTNewOption) *Option { - mock := &Option{} - mock.Mock.Test(t) - - t.Cleanup(func() { mock.AssertExpectations(t) }) - - return mock -} diff --git a/fvm/mock/procedure.go b/fvm/mock/procedure.go index f4c2929490f..51420176b0a 100644 --- a/fvm/mock/procedure.go +++ b/fvm/mock/procedure.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mock @@ -19,6 +19,10 @@ type Procedure struct { func (_m *Procedure) ComputationLimit(ctx fvm.Context) uint64 { ret := _m.Called(ctx) + if len(ret) == 0 { + panic("no return value specified for ComputationLimit") + } + var r0 uint64 if rf, ok := ret.Get(0).(func(fvm.Context) uint64); ok { r0 = rf(ctx) @@ -29,10 +33,14 @@ func (_m *Procedure) ComputationLimit(ctx fvm.Context) uint64 { return r0 } -// ExecutionTime provides a mock function with given fields: +// ExecutionTime provides a mock function with no fields func (_m *Procedure) ExecutionTime() logical.Time { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for ExecutionTime") + } + var r0 logical.Time if rf, ok := ret.Get(0).(func() logical.Time); ok { r0 = rf() @@ -47,6 +55,10 @@ func (_m *Procedure) ExecutionTime() logical.Time { func (_m *Procedure) MemoryLimit(ctx fvm.Context) uint64 { ret := _m.Called(ctx) + if len(ret) == 0 { + panic("no return value specified for MemoryLimit") + } + var r0 uint64 if rf, ok := ret.Get(0).(func(fvm.Context) uint64); ok { r0 = rf(ctx) @@ -61,6 +73,10 @@ func (_m *Procedure) MemoryLimit(ctx fvm.Context) uint64 { func (_m *Procedure) NewExecutor(ctx fvm.Context, txnState storage.TransactionPreparer) fvm.ProcedureExecutor { ret := _m.Called(ctx, txnState) + if len(ret) == 0 { + panic("no return value specified for NewExecutor") + } + var r0 fvm.ProcedureExecutor if rf, ok := ret.Get(0).(func(fvm.Context, storage.TransactionPreparer) fvm.ProcedureExecutor); ok { r0 = rf(ctx, txnState) @@ -77,6 +93,10 @@ func (_m *Procedure) NewExecutor(ctx fvm.Context, txnState storage.TransactionPr func (_m *Procedure) ShouldDisableMemoryAndInteractionLimits(ctx fvm.Context) bool { ret := _m.Called(ctx) + if len(ret) == 0 { + panic("no return value specified for ShouldDisableMemoryAndInteractionLimits") + } + var r0 bool if rf, ok := ret.Get(0).(func(fvm.Context) bool); ok { r0 = rf(ctx) @@ -87,10 +107,14 @@ func (_m *Procedure) ShouldDisableMemoryAndInteractionLimits(ctx fvm.Context) bo return r0 } -// Type provides a mock function with given fields: +// Type provides a mock function with no fields func (_m *Procedure) Type() fvm.ProcedureType { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Type") + } + var r0 fvm.ProcedureType if rf, ok := ret.Get(0).(func() fvm.ProcedureType); ok { r0 = rf() @@ -101,13 +125,12 @@ func (_m *Procedure) Type() fvm.ProcedureType { return r0 } -type mockConstructorTestingTNewProcedure interface { +// NewProcedure creates a new instance of Procedure. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewProcedure(t interface { mock.TestingT Cleanup(func()) -} - -// NewProcedure creates a new instance of Procedure. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewProcedure(t mockConstructorTestingTNewProcedure) *Procedure { +}) *Procedure { mock := &Procedure{} mock.Mock.Test(t) diff --git a/fvm/mock/procedure_executor.go b/fvm/mock/procedure_executor.go index f649e1816ef..14fb580b89b 100644 --- a/fvm/mock/procedure_executor.go +++ b/fvm/mock/procedure_executor.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mock @@ -12,15 +12,19 @@ type ProcedureExecutor struct { mock.Mock } -// Cleanup provides a mock function with given fields: +// Cleanup provides a mock function with no fields func (_m *ProcedureExecutor) Cleanup() { _m.Called() } -// Execute provides a mock function with given fields: +// Execute provides a mock function with no fields func (_m *ProcedureExecutor) Execute() error { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Execute") + } + var r0 error if rf, ok := ret.Get(0).(func() error); ok { r0 = rf() @@ -31,10 +35,14 @@ func (_m *ProcedureExecutor) Execute() error { return r0 } -// Output provides a mock function with given fields: +// Output provides a mock function with no fields func (_m *ProcedureExecutor) Output() fvm.ProcedureOutput { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Output") + } + var r0 fvm.ProcedureOutput if rf, ok := ret.Get(0).(func() fvm.ProcedureOutput); ok { r0 = rf() @@ -45,10 +53,14 @@ func (_m *ProcedureExecutor) Output() fvm.ProcedureOutput { return r0 } -// Preprocess provides a mock function with given fields: +// Preprocess provides a mock function with no fields func (_m *ProcedureExecutor) Preprocess() error { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Preprocess") + } + var r0 error if rf, ok := ret.Get(0).(func() error); ok { r0 = rf() @@ -59,13 +71,12 @@ func (_m *ProcedureExecutor) Preprocess() error { return r0 } -type mockConstructorTestingTNewProcedureExecutor interface { +// NewProcedureExecutor creates a new instance of ProcedureExecutor. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewProcedureExecutor(t interface { mock.TestingT Cleanup(func()) -} - -// NewProcedureExecutor creates a new instance of ProcedureExecutor. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewProcedureExecutor(t mockConstructorTestingTNewProcedureExecutor) *ProcedureExecutor { +}) *ProcedureExecutor { mock := &ProcedureExecutor{} mock.Mock.Test(t) diff --git a/fvm/mock/vm.go b/fvm/mock/vm.go index 73736ace35b..12e27262997 100644 --- a/fvm/mock/vm.go +++ b/fvm/mock/vm.go @@ -1,14 +1,14 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mock import ( fvm "github.com/onflow/flow-go/fvm" - flow "github.com/onflow/flow-go/model/flow" - mock "github.com/stretchr/testify/mock" snapshot "github.com/onflow/flow-go/fvm/storage/snapshot" + + storage "github.com/onflow/flow-go/fvm/storage" ) // VM is an autogenerated mock type for the VM type @@ -16,36 +16,34 @@ type VM struct { mock.Mock } -// GetAccount provides a mock function with given fields: _a0, _a1, _a2 -func (_m *VM) GetAccount(_a0 fvm.Context, _a1 flow.Address, _a2 snapshot.StorageSnapshot) (*flow.Account, error) { +// NewExecutor provides a mock function with given fields: _a0, _a1, _a2 +func (_m *VM) NewExecutor(_a0 fvm.Context, _a1 fvm.Procedure, _a2 storage.TransactionPreparer) fvm.ProcedureExecutor { ret := _m.Called(_a0, _a1, _a2) - var r0 *flow.Account - var r1 error - if rf, ok := ret.Get(0).(func(fvm.Context, flow.Address, snapshot.StorageSnapshot) (*flow.Account, error)); ok { - return rf(_a0, _a1, _a2) + if len(ret) == 0 { + panic("no return value specified for NewExecutor") } - if rf, ok := ret.Get(0).(func(fvm.Context, flow.Address, snapshot.StorageSnapshot) *flow.Account); ok { + + var r0 fvm.ProcedureExecutor + if rf, ok := ret.Get(0).(func(fvm.Context, fvm.Procedure, storage.TransactionPreparer) fvm.ProcedureExecutor); ok { r0 = rf(_a0, _a1, _a2) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(*flow.Account) + r0 = ret.Get(0).(fvm.ProcedureExecutor) } } - if rf, ok := ret.Get(1).(func(fvm.Context, flow.Address, snapshot.StorageSnapshot) error); ok { - r1 = rf(_a0, _a1, _a2) - } else { - r1 = ret.Error(1) - } - - return r0, r1 + return r0 } // Run provides a mock function with given fields: _a0, _a1, _a2 func (_m *VM) Run(_a0 fvm.Context, _a1 fvm.Procedure, _a2 snapshot.StorageSnapshot) (*snapshot.ExecutionSnapshot, fvm.ProcedureOutput, error) { ret := _m.Called(_a0, _a1, _a2) + if len(ret) == 0 { + panic("no return value specified for Run") + } + var r0 *snapshot.ExecutionSnapshot var r1 fvm.ProcedureOutput var r2 error @@ -75,13 +73,12 @@ func (_m *VM) Run(_a0 fvm.Context, _a1 fvm.Procedure, _a2 snapshot.StorageSnapsh return r0, r1, r2 } -type mockConstructorTestingTNewVM interface { +// NewVM creates a new instance of VM. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewVM(t interface { mock.TestingT Cleanup(func()) -} - -// NewVM creates a new instance of VM. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewVM(t mockConstructorTestingTNewVM) *VM { +}) *VM { mock := &VM{} mock.Mock.Test(t) diff --git a/fvm/runtime/reusable_cadence_runtime.go b/fvm/runtime/reusable_cadence_runtime.go index 057dfa65ea0..7c6c3c318f0 100644 --- a/fvm/runtime/reusable_cadence_runtime.go +++ b/fvm/runtime/reusable_cadence_runtime.go @@ -2,33 +2,107 @@ package runtime import ( "github.com/onflow/cadence" + "github.com/onflow/cadence/common" + "github.com/onflow/cadence/interpreter" "github.com/onflow/cadence/runtime" - "github.com/onflow/cadence/runtime/common" - "github.com/onflow/cadence/runtime/sema" + "github.com/onflow/cadence/sema" + "github.com/onflow/cadence/stdlib" + + "github.com/onflow/flow-go/fvm/errors" ) // Note: this is a subset of environment.Environment, redeclared to handle // circular dependency. type Environment interface { runtime.Interface + common.Gauge + + RandomSourceHistory() ([]byte, error) +} + +// randomSourceFunctionType is the type of the `randomSource` function. +// This defines the signature as `func(): [UInt8]` +var randomSourceFunctionType = &sema.FunctionType{ + ReturnTypeAnnotation: sema.NewTypeAnnotation(sema.ByteArrayType), } type ReusableCadenceRuntime struct { runtime.Runtime - runtime.Environment + TxRuntimeEnv runtime.Environment + ScriptRuntimeEnv runtime.Environment fvmEnv Environment } -func NewReusableCadenceRuntime(rt runtime.Runtime, config runtime.Config) *ReusableCadenceRuntime { +func NewReusableCadenceRuntime( + rt runtime.Runtime, + config runtime.Config, +) *ReusableCadenceRuntime { reusable := &ReusableCadenceRuntime{ - Runtime: rt, - Environment: runtime.NewBaseInterpreterEnvironment(config), + Runtime: rt, + TxRuntimeEnv: runtime.NewBaseInterpreterEnvironment(config), + ScriptRuntimeEnv: runtime.NewScriptInterpreterEnvironment(config), } + reusable.declareRandomSourceHistory() + return reusable } +func (reusable *ReusableCadenceRuntime) declareRandomSourceHistory() { + + // Declare the `randomSourceHistory` function. This function is **only** used by the + // System transaction, to fill the `RandomBeaconHistory` contract via the heartbeat + // resource. This allows the `RandomBeaconHistory` contract to be a standard contract, + // without any special parts. + // Since the `randomSourceHistory` function is only used by the System transaction, + // it is not part of the cadence standard library, and can just be injected from here. + // It also doesnt need user documentation, since it is not (and should not) + // be called by the user. If it is called by the user it will panic. + functionType := randomSourceFunctionType + + blockRandomSource := stdlib.StandardLibraryValue{ + Name: "randomSourceHistory", + Type: functionType, + Kind: common.DeclarationKindFunction, + Value: interpreter.NewUnmeteredStaticHostFunctionValue( + functionType, + func(invocation interpreter.Invocation) interpreter.Value { + + actualArgumentCount := len(invocation.Arguments) + expectedArgumentCount := len(functionType.Parameters) + + if actualArgumentCount != expectedArgumentCount { + panic(errors.NewInvalidArgumentErrorf( + "incorrect number of arguments: got %d, expected %d", + actualArgumentCount, + expectedArgumentCount, + )) + } + + var err error + var source []byte + fvmEnv := reusable.fvmEnv + if fvmEnv != nil { + source, err = fvmEnv.RandomSourceHistory() + } else { + err = errors.NewOperationNotSupportedError("randomSourceHistory") + } + + if err != nil { + panic(err) + } + + return interpreter.ByteSliceToByteArrayValue( + invocation.InvocationContext, + source) + }, + ), + } + + reusable.TxRuntimeEnv.DeclareValue(blockRandomSource, nil) +} + func (reusable *ReusableCadenceRuntime) SetFvmEnvironment(fvmEnv Environment) { reusable.fvmEnv = fvmEnv } @@ -44,8 +118,10 @@ func (reusable *ReusableCadenceRuntime) ReadStored( address, path, runtime.Context{ - Interface: reusable.fvmEnv, - Environment: reusable.Environment, + Interface: reusable.fvmEnv, + Environment: reusable.TxRuntimeEnv, + MemoryGauge: reusable.fvmEnv, + ComputationGauge: reusable.fvmEnv, }, ) } @@ -65,8 +141,10 @@ func (reusable *ReusableCadenceRuntime) InvokeContractFunction( arguments, argumentTypes, runtime.Context{ - Interface: reusable.fvmEnv, - Environment: reusable.Environment, + Interface: reusable.fvmEnv, + Environment: reusable.TxRuntimeEnv, + MemoryGauge: reusable.fvmEnv, + ComputationGauge: reusable.fvmEnv, }, ) } @@ -78,9 +156,11 @@ func (reusable *ReusableCadenceRuntime) NewTransactionExecutor( return reusable.Runtime.NewTransactionExecutor( script, runtime.Context{ - Interface: reusable.fvmEnv, - Location: location, - Environment: reusable.Environment, + Interface: reusable.fvmEnv, + Location: location, + Environment: reusable.TxRuntimeEnv, + MemoryGauge: reusable.fvmEnv, + ComputationGauge: reusable.fvmEnv, }, ) } @@ -95,8 +175,11 @@ func (reusable *ReusableCadenceRuntime) ExecuteScript( return reusable.Runtime.ExecuteScript( script, runtime.Context{ - Interface: reusable.fvmEnv, - Location: location, + Interface: reusable.fvmEnv, + Location: location, + Environment: reusable.ScriptRuntimeEnv, + MemoryGauge: reusable.fvmEnv, + ComputationGauge: reusable.fvmEnv, }, ) } @@ -109,7 +192,7 @@ type ReusableCadenceRuntimePool struct { config runtime.Config // When newCustomRuntime is nil, the pool will create standard cadence - // interpreter runtimes via runtime.NewInterpreterRuntime. Otherwise, the + // interpreter runtimes via runtime.NewRuntime. Otherwise, the // pool will create runtimes using this function. // // Note that this is primarily used for testing. @@ -160,7 +243,7 @@ func (pool ReusableCadenceRuntimePool) newRuntime() runtime.Runtime { if pool.newCustomRuntime != nil { return pool.newCustomRuntime(pool.config) } - return runtime.NewInterpreterRuntime(pool.config) + return runtime.NewRuntime(pool.config) } func (pool ReusableCadenceRuntimePool) Borrow( diff --git a/fvm/runtime/reusable_cadence_runtime_test.go b/fvm/runtime/reusable_cadence_runtime_test.go index 758fa2f7426..cf6a2a44867 100644 --- a/fvm/runtime/reusable_cadence_runtime_test.go +++ b/fvm/runtime/reusable_cadence_runtime_test.go @@ -59,7 +59,7 @@ func TestReusableCadenceRuntimePoolSharing(t *testing.T) { default: } - var otherPool ReusableCadenceRuntimePool = pool + var otherPool = pool entry := otherPool.Borrow(nil) require.NotNil(t, entry) diff --git a/fvm/runtime/testutil/runtime.go b/fvm/runtime/testutil/runtime.go index 58bafd926f3..bcc0b133c2f 100644 --- a/fvm/runtime/testutil/runtime.go +++ b/fvm/runtime/testutil/runtime.go @@ -2,93 +2,91 @@ package testutil import ( "github.com/onflow/cadence" + "github.com/onflow/cadence/common" + "github.com/onflow/cadence/interpreter" "github.com/onflow/cadence/runtime" - "github.com/onflow/cadence/runtime/common" - "github.com/onflow/cadence/runtime/interpreter" - "github.com/onflow/cadence/runtime/sema" + "github.com/onflow/cadence/sema" ) -var _ runtime.Runtime = &TestInterpreterRuntime{} +var _ runtime.Runtime = &TestRuntime{} -type TestInterpreterRuntime struct { - ReadStoredFunc func(address common.Address, path cadence.Path, context runtime.Context) (cadence.Value, error) - InvokeContractFunc func(a common.AddressLocation, s string, values []cadence.Value, types []sema.Type, ctx runtime.Context) (cadence.Value, error) +type TestRuntime struct { + ReadStoredFunc func( + address common.Address, + path cadence.Path, + context runtime.Context, + ) (cadence.Value, error) + InvokeContractFunc func( + a common.AddressLocation, + s string, + values []cadence.Value, + types []sema.Type, + ctx runtime.Context, + ) (cadence.Value, error) } -func (t *TestInterpreterRuntime) Config() runtime.Config { +func (t *TestRuntime) Config() runtime.Config { panic("Config not defined") } -func (t *TestInterpreterRuntime) NewScriptExecutor(script runtime.Script, context runtime.Context) runtime.Executor { +func (t *TestRuntime) NewScriptExecutor(_ runtime.Script, _ runtime.Context) runtime.Executor { panic("NewScriptExecutor not defined") } -func (t *TestInterpreterRuntime) NewTransactionExecutor(script runtime.Script, context runtime.Context) runtime.Executor { +func (t *TestRuntime) NewTransactionExecutor(_ runtime.Script, _ runtime.Context) runtime.Executor { panic("NewTransactionExecutor not defined") } -func (t *TestInterpreterRuntime) NewContractFunctionExecutor(contractLocation common.AddressLocation, functionName string, arguments []cadence.Value, argumentTypes []sema.Type, context runtime.Context) runtime.Executor { +func (t *TestRuntime) NewContractFunctionExecutor( + _ common.AddressLocation, + _ string, + _ []cadence.Value, + _ []sema.Type, + _ runtime.Context, +) runtime.Executor { panic("NewContractFunctionExecutor not defined") } -func (t *TestInterpreterRuntime) SetDebugger(debugger *interpreter.Debugger) { +func (t *TestRuntime) SetDebugger(_ *interpreter.Debugger) { panic("SetDebugger not defined") } -func (t *TestInterpreterRuntime) ExecuteScript(runtime.Script, runtime.Context) (cadence.Value, error) { +func (t *TestRuntime) ExecuteScript(_ runtime.Script, _ runtime.Context) (cadence.Value, error) { panic("ExecuteScript not defined") } -func (t *TestInterpreterRuntime) ExecuteTransaction(runtime.Script, runtime.Context) error { +func (t *TestRuntime) ExecuteTransaction(_ runtime.Script, _ runtime.Context) error { panic("ExecuteTransaction not defined") } -func (t *TestInterpreterRuntime) InvokeContractFunction(a common.AddressLocation, s string, values []cadence.Value, types []sema.Type, ctx runtime.Context) (cadence.Value, error) { +func (t *TestRuntime) InvokeContractFunction( + a common.AddressLocation, + s string, + values []cadence.Value, + types []sema.Type, + ctx runtime.Context, +) (cadence.Value, error) { if t.InvokeContractFunc == nil { panic("InvokeContractFunction not defined") } return t.InvokeContractFunc(a, s, values, types, ctx) } -func (t *TestInterpreterRuntime) ParseAndCheckProgram([]byte, runtime.Context) (*interpreter.Program, error) { +func (t *TestRuntime) ParseAndCheckProgram(_ []byte, _ runtime.Context) (*interpreter.Program, error) { panic("ParseAndCheckProgram not defined") } -func (t *TestInterpreterRuntime) SetCoverageReport(*runtime.CoverageReport) { - panic("SetCoverageReport not defined") -} - -func (t *TestInterpreterRuntime) SetContractUpdateValidationEnabled(bool) { - panic("SetContractUpdateValidationEnabled not defined") -} - -func (t *TestInterpreterRuntime) SetAtreeValidationEnabled(bool) { - panic("SetAtreeValidationEnabled not defined") -} - -func (t *TestInterpreterRuntime) SetTracingEnabled(bool) { - panic("SetTracingEnabled not defined") -} - -func (t *TestInterpreterRuntime) SetInvalidatedResourceValidationEnabled(bool) { - panic("SetInvalidatedResourceValidationEnabled not defined") -} - -func (t *TestInterpreterRuntime) SetResourceOwnerChangeHandlerEnabled(bool) { - panic("SetResourceOwnerChangeHandlerEnabled not defined") -} - -func (t *TestInterpreterRuntime) ReadStored(address common.Address, path cadence.Path, context runtime.Context) (cadence.Value, error) { +func (t *TestRuntime) ReadStored( + address common.Address, + path cadence.Path, + context runtime.Context, +) (cadence.Value, error) { if t.ReadStoredFunc == nil { panic("ReadStored not defined") } return t.ReadStoredFunc(address, path, context) } -func (t *TestInterpreterRuntime) ReadLinked(common.Address, cadence.Path, runtime.Context) (cadence.Value, error) { - panic("ReadLinked not defined") -} - -func (*TestInterpreterRuntime) Storage(runtime.Context) (*runtime.Storage, *interpreter.Interpreter, error) { - panic("not implemented") +func (*TestRuntime) Storage(_ runtime.Context) (*runtime.Storage, *interpreter.Interpreter, error) { + panic("Storage not defined") } diff --git a/fvm/runtime/wrapped_cadence_runtime.go b/fvm/runtime/wrapped_cadence_runtime.go index 9e8c695d0a5..b57512509d4 100644 --- a/fvm/runtime/wrapped_cadence_runtime.go +++ b/fvm/runtime/wrapped_cadence_runtime.go @@ -2,10 +2,10 @@ package runtime import ( "github.com/onflow/cadence" + "github.com/onflow/cadence/common" + "github.com/onflow/cadence/interpreter" "github.com/onflow/cadence/runtime" - "github.com/onflow/cadence/runtime/common" - "github.com/onflow/cadence/runtime/interpreter" - "github.com/onflow/cadence/runtime/sema" + "github.com/onflow/cadence/sema" "github.com/onflow/flow-go/fvm/errors" ) @@ -67,11 +67,6 @@ func (wr WrappedCadenceRuntime) ReadStored(address common.Address, path cadence. return v, errors.HandleRuntimeError(err) } -func (wr WrappedCadenceRuntime) ReadLinked(address common.Address, path cadence.Path, context runtime.Context) (cadence.Value, error) { - v, err := wr.Runtime.ReadLinked(address, path, context) - return v, errors.HandleRuntimeError(err) -} - func (wr WrappedCadenceRuntime) Storage(context runtime.Context) (*runtime.Storage, *interpreter.Interpreter, error) { s, i, err := wr.Runtime.Storage(context) return s, i, errors.HandleRuntimeError(err) diff --git a/fvm/script.go b/fvm/script.go index 10bd5d68717..e79b8c5ff0f 100644 --- a/fvm/script.go +++ b/fvm/script.go @@ -4,11 +4,13 @@ import ( "context" "fmt" + "github.com/hashicorp/go-multierror" + "github.com/onflow/cadence/common" "github.com/onflow/cadence/runtime" - "github.com/onflow/cadence/runtime/common" "github.com/onflow/flow-go/fvm/environment" "github.com/onflow/flow-go/fvm/errors" + "github.com/onflow/flow-go/fvm/evm" "github.com/onflow/flow-go/fvm/storage" "github.com/onflow/flow-go/fvm/storage/logical" "github.com/onflow/flow-go/model/flow" @@ -23,7 +25,7 @@ type ScriptProcedure struct { } func Script(code []byte) *ScriptProcedure { - scriptHash := hash.DefaultHasher.ComputeHash(code) + scriptHash := hash.DefaultComputeHash(code) return &ScriptProcedure{ Script: code, @@ -57,7 +59,7 @@ func NewScriptWithContextAndArgs( reqContext context.Context, args ...[]byte, ) *ScriptProcedure { - scriptHash := hash.DefaultHasher.ComputeHash(code) + scriptHash := hash.DefaultComputeHash(code) return &ScriptProcedure{ ID: flow.HashToID(scriptHash), Script: code, @@ -120,6 +122,10 @@ func newScriptExecutor( proc *ScriptProcedure, txnState storage.TransactionPreparer, ) *scriptExecutor { + // update `ctx.EnvironmentParams` with the script info before + // creating the executor + scriptInfo := environment.NewScriptInfoParams(proc.Script, proc.Arguments) + ctx.EnvironmentParams.SetScriptInfoParams(scriptInfo) return &scriptExecutor{ ctx: ctx, proc: proc, @@ -166,7 +172,8 @@ func (executor *scriptExecutor) Execute() error { } func (executor *scriptExecutor) execute() error { - meterParams, err := getBodyMeterParameters( + executionParams, _, err := getExecutionParameters( + executor.env.Logger(), executor.ctx, executor.proc, executor.txnState) @@ -175,7 +182,7 @@ func (executor *scriptExecutor) execute() error { } txnId, err := executor.txnState.BeginNestedTransactionWithMeterParams( - meterParams) + executionParams) if err != nil { return err } @@ -193,16 +200,31 @@ func (executor *scriptExecutor) executeScript() error { rt := executor.env.BorrowCadenceRuntime() defer executor.env.ReturnCadenceRuntime(rt) + chainID := executor.ctx.Chain.ChainID() + + if executor.ctx.EVMEnabled { + err := evm.SetupEnvironment( + chainID, + executor.env, + rt.ScriptRuntimeEnv, + ) + if err != nil { + return err + } + } + value, err := rt.ExecuteScript( runtime.Script{ Source: executor.proc.Script, Arguments: executor.proc.Arguments, }, - common.ScriptLocation(executor.proc.ID)) + common.ScriptLocation(executor.proc.ID), + ) + populateErr := executor.output.PopulateEnvironmentValues(executor.env) if err != nil { - return err + return multierror.Append(err, populateErr) } executor.output.Value = value - return executor.output.PopulateEnvironmentValues(executor.env) + return populateErr } diff --git a/fvm/storage/block_database.go b/fvm/storage/block_database.go index de0cddde909..96bc3ba157c 100644 --- a/fvm/storage/block_database.go +++ b/fvm/storage/block_database.go @@ -66,6 +66,7 @@ func (database *BlockDatabase) NewTransaction( }, nil } +// NewSnapshotReadTransaction creates a new readonly transaction. func (database *BlockDatabase) NewSnapshotReadTransaction( parameters state.StateParameters, ) Transaction { @@ -78,15 +79,29 @@ func (database *BlockDatabase) NewSnapshotReadTransaction( } } +// NewCachingSnapshotReadTransaction creates a new readonly transaction that allows writing to the +// derived transaction data table. +func (database *BlockDatabase) NewCachingSnapshotReadTransaction( + parameters state.StateParameters, +) (Transaction, error) { + return &transaction{ + TransactionData: database.BlockData.NewCachingSnapshotReadTransactionData(parameters), + DerivedTransactionData: database.DerivedBlockData.NewCachingSnapshotReadDerivedTransactionData(), + }, nil +} + func (txn *transaction) Validate() error { - err := txn.TransactionData.Validate() + err := txn.DerivedTransactionData.Validate() if err != nil { - return fmt.Errorf("primary index validate failed: %w", err) + return fmt.Errorf("derived indices validate failed: %w", err) } - err = txn.DerivedTransactionData.Validate() + // NOTE: Since the primary txn's SnapshotTime() is exposed to the user, + // the primary txn should be validated last to prevent primary txn' + // snapshot time advancement in case of derived txn validation failure. + err = txn.TransactionData.Validate() if err != nil { - return fmt.Errorf("derived indices validate failed: %w", err) + return fmt.Errorf("primary index validate failed: %w", err) } return nil @@ -98,14 +113,17 @@ func (txn *transaction) Finalize() error { } func (txn *transaction) Commit() (*snapshot.ExecutionSnapshot, error) { - executionSnapshot, err := txn.TransactionData.Commit() + err := txn.DerivedTransactionData.Commit() if err != nil { - return nil, fmt.Errorf("primary index commit failed: %w", err) + return nil, fmt.Errorf("derived indices commit failed: %w", err) } - err = txn.DerivedTransactionData.Commit() + // NOTE: Since the primary txn's SnapshotTime() is exposed to the user, + // the primary txn should be committed last to prevent primary txn' + // snapshot time advancement in case of derived txn commit failure. + executionSnapshot, err := txn.TransactionData.Commit() if err != nil { - return nil, fmt.Errorf("derived indices commit failed: %w", err) + return nil, fmt.Errorf("primary index commit failed: %w", err) } return executionSnapshot, nil diff --git a/fvm/storage/derived/dependencies.go b/fvm/storage/derived/dependencies.go index c5ba3b85e29..65ece9dc173 100644 --- a/fvm/storage/derived/dependencies.go +++ b/fvm/storage/derived/dependencies.go @@ -1,7 +1,7 @@ package derived import ( - "github.com/onflow/cadence/runtime/common" + "github.com/onflow/cadence/common" ) // ProgramDependencies are the locations of programs that a program depends on. diff --git a/fvm/storage/derived/dependencies_test.go b/fvm/storage/derived/dependencies_test.go index 90bb1e09482..e94eb742395 100644 --- a/fvm/storage/derived/dependencies_test.go +++ b/fvm/storage/derived/dependencies_test.go @@ -3,7 +3,7 @@ package derived_test import ( "testing" - "github.com/onflow/cadence/runtime/common" + "github.com/onflow/cadence/common" "github.com/stretchr/testify/require" "github.com/onflow/flow-go/fvm/storage/derived" diff --git a/fvm/storage/derived/derived_block_data.go b/fvm/storage/derived/derived_block_data.go index f39c3a1553a..b86801d7b6d 100644 --- a/fvm/storage/derived/derived_block_data.go +++ b/fvm/storage/derived/derived_block_data.go @@ -3,10 +3,11 @@ package derived import ( "fmt" - "github.com/onflow/cadence/runtime/common" - "github.com/onflow/cadence/runtime/interpreter" + "github.com/onflow/cadence/common" + "github.com/onflow/cadence/runtime" "github.com/onflow/flow-go/fvm/storage/logical" + "github.com/onflow/flow-go/fvm/storage/snapshot" "github.com/onflow/flow-go/fvm/storage/state" ) @@ -21,11 +22,13 @@ type DerivedTransactionPreparer interface { ) GetProgram(location common.AddressLocation) (*Program, bool) - GetMeterParamOverrides( + // GetStateExecutionParameters returns parameters needed for execution from the state. + GetStateExecutionParameters( txnState state.NestedTransactionPreparer, - getMeterParamOverrides ValueComputer[struct{}, MeterParamOverrides], + getMeterParamOverrides ValueComputer[struct{}, StateExecutionParameters], ) ( - MeterParamOverrides, + StateExecutionParameters, + *snapshot.ExecutionSnapshot, error, ) @@ -33,7 +36,7 @@ type DerivedTransactionPreparer interface { } type Program struct { - *interpreter.Program + *runtime.Program Dependencies ProgramDependencies } @@ -43,7 +46,7 @@ type Program struct { type DerivedBlockData struct { programs *DerivedDataTable[common.AddressLocation, *Program] - meterParamOverrides *DerivedDataTable[struct{}, MeterParamOverrides] + meterParamOverrides *DerivedDataTable[struct{}, StateExecutionParameters] } // DerivedTransactionData is the derived data scratch space for a single @@ -56,7 +59,7 @@ type DerivedTransactionData struct { // There's only a single entry in this table. For simplicity, we'll use // struct{} as the entry's key. - meterParamOverrides *TableTransaction[struct{}, MeterParamOverrides] + executionParameters *TableTransaction[struct{}, StateExecutionParameters] } func NewEmptyDerivedBlockData( @@ -69,7 +72,7 @@ func NewEmptyDerivedBlockData( ](initialSnapshotTime), meterParamOverrides: NewEmptyTable[ struct{}, - MeterParamOverrides, + StateExecutionParameters, ](initialSnapshotTime), } } @@ -82,13 +85,16 @@ func (block *DerivedBlockData) NewChildDerivedBlockData() *DerivedBlockData { } func (block *DerivedBlockData) NewSnapshotReadDerivedTransactionData() *DerivedTransactionData { - txnPrograms := block.programs.NewSnapshotReadTableTransaction() - - txnMeterParamOverrides := block.meterParamOverrides.NewSnapshotReadTableTransaction() + return &DerivedTransactionData{ + programs: block.programs.NewSnapshotReadTableTransaction(), + executionParameters: block.meterParamOverrides.NewSnapshotReadTableTransaction(), + } +} +func (block *DerivedBlockData) NewCachingSnapshotReadDerivedTransactionData() *DerivedTransactionData { return &DerivedTransactionData{ - programs: txnPrograms, - meterParamOverrides: txnMeterParamOverrides, + programs: block.programs.NewCachingSnapshotReadTableTransaction(), + executionParameters: block.meterParamOverrides.NewCachingSnapshotReadTableTransaction(), } } @@ -115,7 +121,7 @@ func (block *DerivedBlockData) NewDerivedTransactionData( return &DerivedTransactionData{ programs: txnPrograms, - meterParamOverrides: txnMeterParamOverrides, + executionParameters: txnMeterParamOverrides, }, nil } @@ -172,18 +178,19 @@ func (transaction *DerivedTransactionData) AddInvalidator( } transaction.programs.AddInvalidator(invalidator.ProgramInvalidator()) - transaction.meterParamOverrides.AddInvalidator( - invalidator.MeterParamOverridesInvalidator()) + transaction.executionParameters.AddInvalidator( + invalidator.ExecutionParametersInvalidator()) } -func (transaction *DerivedTransactionData) GetMeterParamOverrides( +func (transaction *DerivedTransactionData) GetStateExecutionParameters( txnState state.NestedTransactionPreparer, - getMeterParamOverrides ValueComputer[struct{}, MeterParamOverrides], + getMeterParamOverrides ValueComputer[struct{}, StateExecutionParameters], ) ( - MeterParamOverrides, + StateExecutionParameters, + *snapshot.ExecutionSnapshot, error, ) { - return transaction.meterParamOverrides.GetOrCompute( + return transaction.executionParameters.GetWithStateOrCompute( txnState, struct{}{}, getMeterParamOverrides) @@ -195,7 +202,7 @@ func (transaction *DerivedTransactionData) Validate() error { return fmt.Errorf("programs validate failed: %w", err) } - err = transaction.meterParamOverrides.Validate() + err = transaction.executionParameters.Validate() if err != nil { return fmt.Errorf("meter param overrides validate failed: %w", err) } @@ -209,7 +216,7 @@ func (transaction *DerivedTransactionData) Commit() error { return fmt.Errorf("programs commit failed: %w", err) } - err = transaction.meterParamOverrides.Commit() + err = transaction.executionParameters.Commit() if err != nil { return fmt.Errorf("meter param overrides commit failed: %w", err) } diff --git a/fvm/storage/derived/derived_chain_data.go b/fvm/storage/derived/derived_chain_data.go index a3ec9a488df..8bcf0b03d00 100644 --- a/fvm/storage/derived/derived_chain_data.go +++ b/fvm/storage/derived/derived_chain_data.go @@ -4,7 +4,7 @@ import ( "fmt" "sync" - "github.com/hashicorp/golang-lru/simplelru" + "github.com/hashicorp/golang-lru/v2/simplelru" "github.com/onflow/flow-go/model/flow" ) @@ -21,11 +21,11 @@ type DerivedChainData struct { // on Get. mutex sync.Mutex - lru *simplelru.LRU + lru *simplelru.LRU[flow.Identifier, *DerivedBlockData] } func NewDerivedChainData(chainCacheSize uint) (*DerivedChainData, error) { - lru, err := simplelru.NewLRU(int(chainCacheSize), nil) + lru, err := simplelru.NewLRU[flow.Identifier, *DerivedBlockData](int(chainCacheSize), nil) if err != nil { return nil, fmt.Errorf("cannot create LRU cache: %w", err) } @@ -40,7 +40,7 @@ func (chain *DerivedChainData) unsafeGet( ) *DerivedBlockData { currentEntry, ok := chain.lru.Get(currentBlockId) if ok { - return currentEntry.(*DerivedBlockData) + return currentEntry } return nil @@ -70,7 +70,7 @@ func (chain *DerivedChainData) GetOrCreateDerivedBlockData( var current *DerivedBlockData parentEntry, ok := chain.lru.Get(parentBlockId) if ok { - current = parentEntry.(*DerivedBlockData).NewChildDerivedBlockData() + current = parentEntry.NewChildDerivedBlockData() } else { current = NewEmptyDerivedBlockData(0) } diff --git a/fvm/storage/derived/derived_chain_data_test.go b/fvm/storage/derived/derived_chain_data_test.go index 0c79af2f603..959874928f1 100644 --- a/fvm/storage/derived/derived_chain_data_test.go +++ b/fvm/storage/derived/derived_chain_data_test.go @@ -3,8 +3,8 @@ package derived import ( "testing" - "github.com/onflow/cadence/runtime/common" - "github.com/onflow/cadence/runtime/interpreter" + "github.com/onflow/cadence/common" + "github.com/onflow/cadence/runtime" "github.com/stretchr/testify/require" @@ -40,7 +40,7 @@ func TestDerivedChainData(t *testing.T) { loc1 := testLocation("0a") prog1 := &Program{ - Program: &interpreter.Program{}, + Program: &runtime.Program{}, } txn, err := block1.NewDerivedTransactionData(0, 0) @@ -75,7 +75,7 @@ func TestDerivedChainData(t *testing.T) { loc2 := testLocation("0b") prog2 := &Program{ - Program: &interpreter.Program{}, + Program: &runtime.Program{}, } txn, err = block2.NewDerivedTransactionData(0, 0) diff --git a/fvm/storage/derived/invalidator.go b/fvm/storage/derived/invalidator.go index 63e3ee290d5..6e7d2918fe8 100644 --- a/fvm/storage/derived/invalidator.go +++ b/fvm/storage/derived/invalidator.go @@ -1,7 +1,7 @@ package derived import ( - "github.com/onflow/cadence/runtime/common" + "github.com/onflow/cadence/common" "github.com/onflow/flow-go/fvm/meter" ) @@ -12,17 +12,22 @@ type MeterParamOverrides struct { MemoryLimit *uint64 // nil indicates no override } +// StateExecutionParameters are parameters needed for execution defined in the execution state. +type StateExecutionParameters struct { + MeterParamOverrides +} + type ProgramInvalidator TableInvalidator[ common.AddressLocation, *Program, ] -type MeterParamOverridesInvalidator TableInvalidator[ +type ExecutionParametersInvalidator TableInvalidator[ struct{}, - MeterParamOverrides, + StateExecutionParameters, ] type TransactionInvalidator interface { ProgramInvalidator() ProgramInvalidator - MeterParamOverridesInvalidator() MeterParamOverridesInvalidator + ExecutionParametersInvalidator() ExecutionParametersInvalidator } diff --git a/fvm/storage/derived/table.go b/fvm/storage/derived/table.go index 91d7153dcb4..2c9a70631ea 100644 --- a/fvm/storage/derived/table.go +++ b/fvm/storage/derived/table.go @@ -77,6 +77,11 @@ type TableTransaction[TKey comparable, TVal any] struct { // When isSnapshotReadTransaction is true, invalidators must be empty. isSnapshotReadTransaction bool invalidators chainedTableInvalidators[TKey, TVal] + + // ignoreLatestCommitExecutionTime is used to bypass latestCommitExecutionTime checks during + // commit. This is used when operating in caching mode with scripts since "commits" are all done + // at the end of the block and are not expected to progress the execution time. + ignoreLatestCommitExecutionTime bool } func NewEmptyTable[ @@ -198,25 +203,55 @@ func (table *DerivedDataTable[TKey, TVal]) unsafeValidate( applicable := table.invalidators.ApplicableInvalidators( txn.toValidateTime) - if applicable.ShouldInvalidateEntries() { - for key, entry := range txn.writeSet { - if applicable.ShouldInvalidateEntry( + shouldInvalidateEntries := applicable.ShouldInvalidateEntries() + + for key, entry := range txn.writeSet { + current, ok := table.items[key] + if ok && current != entry { + // The derived data table must always return the same item for a given key, + // otherwise the cadence runtime will have issues comparing resolved cadence types. + // + // for example: + // two transactions are run concurrently, first loads (cadence contracts) + // A and B where B depends on A. The second transaction also loads A and C, + // where C depends on A. The first transaction commits first. + // The A from the second transaction is equivalent to the A from + // the first transaction but it is not the same object. + // + // Overwriting A with the A from the second transaction will cause program B + // to break because it will not know the types from A returned from + // the cache in the future. + // Not overwriting A will cause program C to break because it will not know + // the types from A returned from the cache in the future. + // + // The solution is to treat this as a conflict and retry the transaction. + // When the transaction is retried, the A from the first transaction will + // be used to load C in the second transaction. + + return errors.NewRetryableConflictError( + "invalid TableTransaction: write conflict") + } + + if !shouldInvalidateEntries || + !applicable.ShouldInvalidateEntry( key, entry.Value, - entry.ExecutionSnapshot) { - - if txn.snapshotTime == txn.executionTime { - // This should never happen since the transaction is - // sequentially executed. - return fmt.Errorf( - "invalid TableTransaction: unrecoverable outdated " + - "write set") - } + entry.ExecutionSnapshot, + ) { + continue + } - return errors.NewRetryableConflictError( - "invalid TableTransaction: outdated write set") - } + if txn.snapshotTime == txn.executionTime { + // This should never happen since the transaction is + // sequentially executed. + return fmt.Errorf( + "invalid TableTransaction: unrecoverable outdated " + + "write set") } + + return errors.NewRetryableConflictError( + "invalid TableTransaction: outdated write set") + } txn.toValidateTime = table.latestCommitExecutionTime + 1 @@ -240,6 +275,7 @@ func (table *DerivedDataTable[TKey, TVal]) commit( defer table.lock.Unlock() if !txn.isSnapshotReadTransaction && + !txn.ignoreLatestCommitExecutionTime && table.latestCommitExecutionTime+1 < txn.snapshotTime { return fmt.Errorf( @@ -298,15 +334,17 @@ func (table *DerivedDataTable[TKey, TVal]) newTableTransaction( snapshotTime logical.Time, executionTime logical.Time, isSnapshotReadTransaction bool, + ignoreLatestCommitExecutionTime bool, ) *TableTransaction[TKey, TVal] { return &TableTransaction[TKey, TVal]{ - table: table, - snapshotTime: snapshotTime, - executionTime: executionTime, - toValidateTime: snapshotTime, - readSet: map[TKey]*invalidatableEntry[TVal]{}, - writeSet: map[TKey]*invalidatableEntry[TVal]{}, - isSnapshotReadTransaction: isSnapshotReadTransaction, + table: table, + snapshotTime: snapshotTime, + executionTime: executionTime, + toValidateTime: snapshotTime, + readSet: map[TKey]*invalidatableEntry[TVal]{}, + writeSet: map[TKey]*invalidatableEntry[TVal]{}, + isSnapshotReadTransaction: isSnapshotReadTransaction, + ignoreLatestCommitExecutionTime: ignoreLatestCommitExecutionTime, } } @@ -314,6 +352,15 @@ func (table *DerivedDataTable[TKey, TVal]) NewSnapshotReadTableTransaction() *Ta return table.newTableTransaction( logical.EndOfBlockExecutionTime, logical.EndOfBlockExecutionTime, + true, + false) +} + +func (table *DerivedDataTable[TKey, TVal]) NewCachingSnapshotReadTableTransaction() *TableTransaction[TKey, TVal] { + return table.newTableTransaction( + logical.EndOfBlockExecutionTime, + logical.EndOfBlockExecutionTime, + false, true) } @@ -342,6 +389,7 @@ func (table *DerivedDataTable[TKey, TVal]) NewTableTransaction( return table.newTableTransaction( snapshotTime, executionTime, + false, false), nil } @@ -418,6 +466,27 @@ func (txn *TableTransaction[TKey, TVal]) GetOrCompute( ) ( TVal, error, +) { + val, _, err := txn.GetWithStateOrCompute(txnState, key, computer) + return val, err +} + +// GetWithStateOrCompute returns the key's value and the execution snapshot used to +// compute it. If a pre-computed value is available, +// then the pre-computed value is returned and the cached state is replayed on +// txnState. Otherwise, the value is computed using valFunc; both the value +// and the states used to compute the value are captured. +// +// Note: valFunc must be an idempotent function and it must not modify +// txnState's values. +func (txn *TableTransaction[TKey, TVal]) GetWithStateOrCompute( + txnState state.NestedTransactionPreparer, + key TKey, + computer ValueComputer[TKey, TVal], +) ( + TVal, + *snapshot.ExecutionSnapshot, + error, ) { var defaultVal TVal @@ -425,17 +494,17 @@ func (txn *TableTransaction[TKey, TVal]) GetOrCompute( if ok { err := txnState.AttachAndCommitNestedTransaction(state) if err != nil { - return defaultVal, fmt.Errorf( + return defaultVal, nil, fmt.Errorf( "failed to replay cached state: %w", err) } - return val, nil + return val, state, nil } nestedTxId, err := txnState.BeginNestedTransaction() if err != nil { - return defaultVal, fmt.Errorf("failed to start nested txn: %w", err) + return defaultVal, nil, fmt.Errorf("failed to start nested txn: %w", err) } val, err = computer.Compute(txnState, key) @@ -449,12 +518,12 @@ func (txn *TableTransaction[TKey, TVal]) GetOrCompute( } if err != nil { - return defaultVal, fmt.Errorf("failed to derive value: %w", err) + return defaultVal, nil, fmt.Errorf("failed to derive value: %w", err) } txn.set(key, val, committedState) - return val, nil + return val, committedState, nil } func (txn *TableTransaction[TKey, TVal]) AddInvalidator( diff --git a/fvm/storage/derived/table_test.go b/fvm/storage/derived/table_test.go index 2d131c0f500..1089a03f4ad 100644 --- a/fvm/storage/derived/table_test.go +++ b/fvm/storage/derived/table_test.go @@ -12,6 +12,7 @@ import ( "github.com/onflow/flow-go/fvm/storage/snapshot" "github.com/onflow/flow-go/fvm/storage/state" "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/utils/unittest" ) func newEmptyTestBlock() *DerivedDataTable[string, *string] { @@ -482,7 +483,7 @@ func TestDerivedDataTableCommitWriteOnlyTransactionWithInvalidation(t *testing.T require.Equal(t, 0, len(block.EntriesForTestingOnly())) } -func TestDerivedDataTableCommitUseOriginalEntryOnDuplicateWriteEntries(t *testing.T) { +func TestDerivedDataTableCommitErrorOnDuplicateWriteEntries(t *testing.T) { block := newEmptyTestBlock() testSetupTxn, err := block.NewTableTransaction(0, 11) @@ -514,7 +515,9 @@ func TestDerivedDataTableCommitUseOriginalEntryOnDuplicateWriteEntries(t *testin testTxn.SetForTestingOnly(key, otherValue, otherSnapshot) err = testTxn.Commit() - require.NoError(t, err) + + require.Error(t, err) + require.True(t, errors.IsRetryableConflictError(err)) entries = block.EntriesForTestingOnly() require.Equal(t, 1, len(entries)) @@ -523,11 +526,6 @@ func TestDerivedDataTableCommitUseOriginalEntryOnDuplicateWriteEntries(t *testin require.True(t, ok) require.Same(t, expectedEntry, actualEntry) - require.False(t, actualEntry.isInvalid) - require.Same(t, expectedValue, actualEntry.Value) - require.Same(t, expectedSnapshot, actualEntry.ExecutionSnapshot) - require.NotSame(t, otherValue, actualEntry.Value) - require.NotSame(t, otherSnapshot, actualEntry.ExecutionSnapshot) } func TestDerivedDataTableCommitReadOnlyTransactionNoInvalidation(t *testing.T) { @@ -965,7 +963,7 @@ func (computer *testValueComputer) Compute( func TestDerivedDataTableGetOrCompute(t *testing.T) { blockDerivedData := NewEmptyTable[flow.RegisterID, int](0) - key := flow.NewRegisterID("addr", "key") + key := flow.NewRegisterID(unittest.RandomAddressFixture(), "key") value := 12345 t.Run("compute value", func(t *testing.T) { @@ -1003,7 +1001,7 @@ func TestDerivedDataTableGetOrCompute(t *testing.T) { // Commit to setup the next test. err = txnDerivedData.Commit() - assert.Nil(t, err) + assert.NoError(t, err) }) t.Run("get value", func(t *testing.T) { diff --git a/fvm/storage/primary/block_data.go b/fvm/storage/primary/block_data.go index bf5c3d7aa58..df5d08978fd 100644 --- a/fvm/storage/primary/block_data.go +++ b/fvm/storage/primary/block_data.go @@ -86,7 +86,7 @@ func (block *BlockData) NewTransactionData( executionTime > logical.LargestNormalTransactionExecutionTime { return nil, fmt.Errorf( - "invalid tranaction: execution time out of bound") + "invalid transaction: execution time out of bound") } txn := block.newTransactionData( @@ -104,6 +104,15 @@ func (block *BlockData) NewTransactionData( return txn, nil } +func (block *BlockData) NewCachingSnapshotReadTransactionData( + parameters state.StateParameters, +) *TransactionData { + return block.newTransactionData( + false, + logical.EndOfBlockExecutionTime, + parameters) +} + func (block *BlockData) NewSnapshotReadTransactionData( parameters state.StateParameters, ) *TransactionData { diff --git a/fvm/storage/snapshot/execution_snapshot.go b/fvm/storage/snapshot/execution_snapshot.go index 89cabec443a..9b26e4e07b6 100644 --- a/fvm/storage/snapshot/execution_snapshot.go +++ b/fvm/storage/snapshot/execution_snapshot.go @@ -1,6 +1,8 @@ package snapshot import ( + "strings" + "golang.org/x/exp/slices" "github.com/onflow/flow-go/fvm/meter" @@ -29,14 +31,22 @@ func (snapshot *ExecutionSnapshot) UpdatedRegisters() flow.RegisterEntries { entries = append(entries, flow.RegisterEntry{Key: key, Value: value}) } - slices.SortFunc(entries, func(a, b flow.RegisterEntry) bool { - return (a.Key.Owner < b.Key.Owner) || - (a.Key.Owner == b.Key.Owner && a.Key.Key < b.Key.Key) + slices.SortFunc(entries, func(a, b flow.RegisterEntry) int { + ownerCmp := strings.Compare(a.Key.Owner, b.Key.Owner) + if ownerCmp != 0 { + return ownerCmp + } + return strings.Compare(a.Key.Key, b.Key.Key) }) return entries } +// UpdatedRegisterSet returns all registers that were updated by this view. +func (snapshot *ExecutionSnapshot) UpdatedRegisterSet() map[flow.RegisterID]flow.RegisterValue { + return snapshot.WriteSet +} + // UpdatedRegisterIDs returns all register ids that were updated by this // view. The returned ids are unsorted. func (snapshot *ExecutionSnapshot) UpdatedRegisterIDs() []flow.RegisterID { diff --git a/fvm/storage/snapshot/mock/peeker.go b/fvm/storage/snapshot/mock/peeker.go new file mode 100644 index 00000000000..d20fc001ac3 --- /dev/null +++ b/fvm/storage/snapshot/mock/peeker.go @@ -0,0 +1,57 @@ +// Code generated by mockery. DO NOT EDIT. + +package mock + +import ( + flow "github.com/onflow/flow-go/model/flow" + mock "github.com/stretchr/testify/mock" +) + +// Peeker is an autogenerated mock type for the Peeker type +type Peeker struct { + mock.Mock +} + +// Peek provides a mock function with given fields: id +func (_m *Peeker) Peek(id flow.RegisterID) (flow.RegisterValue, error) { + ret := _m.Called(id) + + if len(ret) == 0 { + panic("no return value specified for Peek") + } + + var r0 flow.RegisterValue + var r1 error + if rf, ok := ret.Get(0).(func(flow.RegisterID) (flow.RegisterValue, error)); ok { + return rf(id) + } + if rf, ok := ret.Get(0).(func(flow.RegisterID) flow.RegisterValue); ok { + r0 = rf(id) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(flow.RegisterValue) + } + } + + if rf, ok := ret.Get(1).(func(flow.RegisterID) error); ok { + r1 = rf(id) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// NewPeeker creates a new instance of Peeker. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewPeeker(t interface { + mock.TestingT + Cleanup(func()) +}) *Peeker { + mock := &Peeker{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/fvm/storage/snapshot/mock/storage_snapshot.go b/fvm/storage/snapshot/mock/storage_snapshot.go new file mode 100644 index 00000000000..4164561d381 --- /dev/null +++ b/fvm/storage/snapshot/mock/storage_snapshot.go @@ -0,0 +1,57 @@ +// Code generated by mockery. DO NOT EDIT. + +package mock + +import ( + flow "github.com/onflow/flow-go/model/flow" + mock "github.com/stretchr/testify/mock" +) + +// StorageSnapshot is an autogenerated mock type for the StorageSnapshot type +type StorageSnapshot struct { + mock.Mock +} + +// Get provides a mock function with given fields: id +func (_m *StorageSnapshot) Get(id flow.RegisterID) (flow.RegisterValue, error) { + ret := _m.Called(id) + + if len(ret) == 0 { + panic("no return value specified for Get") + } + + var r0 flow.RegisterValue + var r1 error + if rf, ok := ret.Get(0).(func(flow.RegisterID) (flow.RegisterValue, error)); ok { + return rf(id) + } + if rf, ok := ret.Get(0).(func(flow.RegisterID) flow.RegisterValue); ok { + r0 = rf(id) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(flow.RegisterValue) + } + } + + if rf, ok := ret.Get(1).(func(flow.RegisterID) error); ok { + r1 = rf(id) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// NewStorageSnapshot creates a new instance of StorageSnapshot. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewStorageSnapshot(t interface { + mock.TestingT + Cleanup(func()) +}) *StorageSnapshot { + mock := &StorageSnapshot{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/fvm/storage/snapshot/snapshot_tree_test.go b/fvm/storage/snapshot/snapshot_tree_test.go index 5ccf83481e6..0395e861a7f 100644 --- a/fvm/storage/snapshot/snapshot_tree_test.go +++ b/fvm/storage/snapshot/snapshot_tree_test.go @@ -10,10 +10,10 @@ import ( ) func TestSnapshotTree(t *testing.T) { - id1 := flow.NewRegisterID("1", "") - id2 := flow.NewRegisterID("2", "") - id3 := flow.NewRegisterID("3", "") - missingId := flow.NewRegisterID("missing", "") + id1 := flow.NewRegisterID(flow.HexToAddress("0x1"), "") + id2 := flow.NewRegisterID(flow.HexToAddress("0x2"), "") + id3 := flow.NewRegisterID(flow.HexToAddress("0x3"), "") + missingId := flow.NewRegisterID(flow.HexToAddress("0x99"), "") value1v0 := flow.RegisterValue("1v0") diff --git a/fvm/storage/state/execution_state.go b/fvm/storage/state/execution_state.go index 8f9a03f2dab..425b9f4d60e 100644 --- a/fvm/storage/state/execution_state.go +++ b/fvm/storage/state/execution_state.go @@ -3,7 +3,8 @@ package state import ( "fmt" - "github.com/onflow/cadence/runtime/common" + "github.com/onflow/cadence/common" + "github.com/onflow/crypto/hash" "github.com/onflow/flow-go/fvm/errors" "github.com/onflow/flow-go/fvm/meter" @@ -39,6 +40,10 @@ type StateParameters struct { maxValueSizeAllowed uint64 } +type ExecutionParameters struct { + meter.MeterParameters +} + func DefaultParameters() StateParameters { return StateParameters{ MeterParameters: meter.DefaultParameters(), @@ -75,38 +80,51 @@ func (params StateParameters) WithMaxValueSizeAllowed( } type limitsController struct { - enforceLimits bool + meteringEnabled bool maxKeySizeAllowed uint64 maxValueSizeAllowed uint64 } func newLimitsController(params StateParameters) *limitsController { return &limitsController{ - enforceLimits: true, + meteringEnabled: true, maxKeySizeAllowed: params.maxKeySizeAllowed, maxValueSizeAllowed: params.maxValueSizeAllowed, } } -func (controller *limitsController) RunWithAllLimitsDisabled(f func()) { +func (controller *limitsController) RunWithMeteringDisabled(f func()) { if f == nil { return } - current := controller.enforceLimits - controller.enforceLimits = false + current := controller.meteringEnabled + controller.meteringEnabled = false f() - controller.enforceLimits = current + controller.meteringEnabled = current } // NewExecutionState constructs a new state func NewExecutionState( snapshot snapshot.StorageSnapshot, params StateParameters, +) *ExecutionState { + return NewExecutionStateWithSpockStateHasher( + snapshot, + params, + DefaultSpockSecretHasher, + ) +} + +// NewExecutionStateWithSpockStateHasher constructs a new state with a custom hasher +func NewExecutionStateWithSpockStateHasher( + snapshot snapshot.StorageSnapshot, + params StateParameters, + getHasher func() hash.Hasher, ) *ExecutionState { m := meter.NewMeter(params.MeterParameters) return &ExecutionState{ finalized: false, - spockState: newSpockState(snapshot), + spockState: newSpockState(snapshot, getHasher), meter: m, limitsController: newLimitsController(params), } @@ -115,19 +133,19 @@ func NewExecutionState( // NewChildWithMeterParams generates a new child state using the provide meter // parameters. func (state *ExecutionState) NewChildWithMeterParams( - params meter.MeterParameters, + params ExecutionParameters, ) *ExecutionState { return &ExecutionState{ finalized: false, spockState: state.spockState.NewChild(), - meter: meter.NewMeter(params), + meter: meter.NewMeter(params.MeterParameters), limitsController: state.limitsController, } } // NewChild generates a new child state using the parent's meter parameters. func (state *ExecutionState) NewChild() *ExecutionState { - return state.NewChildWithMeterParams(state.meter.MeterParameters) + return state.NewChildWithMeterParams(state.ExecutionParameters()) } // InteractionUsed returns the amount of ledger interaction (total ledger byte read + total ledger byte written) @@ -157,7 +175,7 @@ func (state *ExecutionState) Get(id flow.RegisterID) (flow.RegisterValue, error) var value []byte var err error - if state.enforceLimits { + if state.meteringEnabled { if err = state.checkSize(id, []byte{}); err != nil { return nil, err } @@ -170,7 +188,7 @@ func (state *ExecutionState) Get(id flow.RegisterID) (flow.RegisterValue, error) return nil, fmt.Errorf("failed to read %s: %w", id, getError) } - err = state.meter.MeterStorageRead(id, value, state.enforceLimits) + err = state.meter.MeterStorageRead(id, value, state.meteringEnabled) return value, err } @@ -180,7 +198,7 @@ func (state *ExecutionState) Set(id flow.RegisterID, value flow.RegisterValue) e return fmt.Errorf("cannot Set on a finalized state") } - if state.enforceLimits { + if state.meteringEnabled { if err := state.checkSize(id, value); err != nil { return err } @@ -193,21 +211,34 @@ func (state *ExecutionState) Set(id flow.RegisterID, value flow.RegisterValue) e return fmt.Errorf("failed to update %s: %w", id, setError) } - return state.meter.MeterStorageWrite(id, value, state.enforceLimits) + return state.meter.MeterStorageWrite(id, value, state.meteringEnabled) } // MeterComputation meters computation usage -func (state *ExecutionState) MeterComputation(kind common.ComputationKind, intensity uint) error { +func (state *ExecutionState) MeterComputation(usage common.ComputationUsage) error { if state.finalized { return fmt.Errorf("cannot MeterComputation on a finalized state") } - if state.enforceLimits { - return state.meter.MeterComputation(kind, intensity) + if state.meteringEnabled { + return state.meter.MeterComputation(usage) } return nil } +// ComputationAvailable checks if enough computation capacity is available without metering +func (state *ExecutionState) ComputationAvailable(usage common.ComputationUsage) bool { + if state.finalized { + // if state is finalized return false + return false + } + + if state.meteringEnabled { + return state.meter.ComputationAvailable(usage) + } + return true +} + // TotalComputationUsed returns total computation used func (state *ExecutionState) TotalComputationUsed() uint64 { return state.meter.TotalComputationUsed() @@ -219,26 +250,26 @@ func (state *ExecutionState) ComputationIntensities() meter.MeteredComputationIn } // TotalComputationLimit returns total computation limit -func (state *ExecutionState) TotalComputationLimit() uint { +func (state *ExecutionState) TotalComputationLimit() uint64 { return state.meter.TotalComputationLimit() } // MeterMemory meters memory usage -func (state *ExecutionState) MeterMemory(kind common.MemoryKind, intensity uint) error { +func (state *ExecutionState) MeterMemory(usage common.MemoryUsage) error { if state.finalized { return fmt.Errorf("cannot MeterMemory on a finalized state") } - if state.enforceLimits { - return state.meter.MeterMemory(kind, intensity) + if state.meteringEnabled { + return state.meter.MeterMemory(usage) } return nil } -// MemoryIntensities returns computation intensities -func (state *ExecutionState) MemoryIntensities() meter.MeteredMemoryIntensities { - return state.meter.MemoryIntensities() +// MemoryAmounts returns memory amounts +func (state *ExecutionState) MemoryAmounts() meter.MeteredMemoryAmounts { + return state.meter.MemoryAmounts() } // TotalMemoryEstimate returns total memory used @@ -256,7 +287,7 @@ func (state *ExecutionState) MeterEmittedEvent(byteSize uint64) error { return fmt.Errorf("cannot MeterEmittedEvent on a finalized state") } - if state.enforceLimits { + if state.meteringEnabled { return state.meter.MeterEmittedEvent(byteSize) } @@ -310,6 +341,12 @@ func (state *ExecutionState) checkSize( return nil } +func (state *ExecutionState) ExecutionParameters() ExecutionParameters { + return ExecutionParameters{ + MeterParameters: state.meter.MeterParameters, + } +} + func (state *ExecutionState) readSetSize() int { return state.spockState.readSetSize() } diff --git a/fvm/storage/state/execution_state_test.go b/fvm/storage/state/execution_state_test.go index 84184f1f4f7..29961fa7e98 100644 --- a/fvm/storage/state/execution_state_test.go +++ b/fvm/storage/state/execution_state_test.go @@ -8,6 +8,7 @@ import ( "github.com/onflow/flow-go/fvm/meter" "github.com/onflow/flow-go/fvm/storage/state" "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/utils/unittest" ) func createByteArray(size int) []byte { @@ -23,12 +24,12 @@ func TestExecutionState_Finalize(t *testing.T) { child := parent.NewChild() - readId := flow.NewRegisterID("0", "x") + readId := flow.NewRegisterID(unittest.RandomAddressFixture(), "x") _, err := child.Get(readId) require.NoError(t, err) - writeId := flow.NewRegisterID("1", "y") + writeId := flow.NewRegisterID(unittest.RandomAddressFixture(), "y") writeValue := flow.RegisterValue("a") err = child.Set(writeId, writeValue) @@ -39,7 +40,7 @@ func TestExecutionState_Finalize(t *testing.T) { require.Equal( t, map[flow.RegisterID]struct{}{ - readId: struct{}{}, + readId: {}, }, childSnapshot.ReadSet) @@ -65,8 +66,10 @@ func TestExecutionState_Finalize(t *testing.T) { func TestExecutionState_ChildMergeFunctionality(t *testing.T) { st := state.NewExecutionState(nil, state.DefaultParameters()) + owner := unittest.RandomAddressFixture() + t.Run("test read from parent state (backoff)", func(t *testing.T) { - key := flow.NewRegisterID("address", "key1") + key := flow.NewRegisterID(owner, "key1") value := createByteArray(1) // set key1 on parent err := st.Set(key, value) @@ -80,7 +83,7 @@ func TestExecutionState_ChildMergeFunctionality(t *testing.T) { }) t.Run("test write to child (no merge)", func(t *testing.T) { - key := flow.NewRegisterID("address", "key2") + key := flow.NewRegisterID(owner, "key2") value := createByteArray(2) stChild := st.NewChild() @@ -95,7 +98,7 @@ func TestExecutionState_ChildMergeFunctionality(t *testing.T) { }) t.Run("test write to child and merge", func(t *testing.T) { - key := flow.NewRegisterID("address", "key3") + key := flow.NewRegisterID(owner, "key3") value := createByteArray(3) stChild := st.NewChild() @@ -119,7 +122,7 @@ func TestExecutionState_ChildMergeFunctionality(t *testing.T) { }) t.Run("test write to ledger", func(t *testing.T) { - key := flow.NewRegisterID("address", "key4") + key := flow.NewRegisterID(owner, "key4") value := createByteArray(4) // set key4 on parent err := st.Set(key, value) @@ -138,7 +141,7 @@ func TestExecutionState_MaxValueSize(t *testing.T) { nil, state.DefaultParameters().WithMaxValueSizeAllowed(6)) - key := flow.NewRegisterID("address", "key") + key := flow.NewRegisterID(unittest.RandomAddressFixture(), "key") // update should pass value := createByteArray(5) @@ -157,8 +160,8 @@ func TestExecutionState_MaxKeySize(t *testing.T) { // Note: owners are always 8 bytes state.DefaultParameters().WithMaxKeySizeAllowed(8+2)) - key1 := flow.NewRegisterID("1", "23") - key2 := flow.NewRegisterID("123", "234") + key1 := flow.NewRegisterID(unittest.RandomAddressFixture(), "23") + key2 := flow.NewRegisterID(unittest.RandomAddressFixture(), "234") // read _, err := st.Get(key1) @@ -179,19 +182,19 @@ func TestExecutionState_MaxKeySize(t *testing.T) { } func TestExecutionState_MaxInteraction(t *testing.T) { - key1 := flow.NewRegisterID("1", "2") + key1 := flow.NewRegisterID(unittest.RandomAddressFixture(), "2") key1Size := uint64(8 + 1) value1 := []byte("A") value1Size := uint64(1) - key2 := flow.NewRegisterID("123", "23") + key2 := flow.NewRegisterID(unittest.RandomAddressFixture(), "23") key2Size := uint64(8 + 2) - key3 := flow.NewRegisterID("234", "345") + key3 := flow.NewRegisterID(unittest.RandomAddressFixture(), "345") key3Size := uint64(8 + 3) - key4 := flow.NewRegisterID("3", "4567") + key4 := flow.NewRegisterID(unittest.RandomAddressFixture(), "4567") key4Size := uint64(8 + 4) st := state.NewExecutionState( diff --git a/fvm/storage/state/spock_state.go b/fvm/storage/state/spock_state.go index 9a47ac08710..56244e9b257 100644 --- a/fvm/storage/state/spock_state.go +++ b/fvm/storage/state/spock_state.go @@ -4,7 +4,8 @@ import ( "encoding/binary" "fmt" - "github.com/onflow/flow-go/crypto/hash" + "github.com/onflow/crypto/hash" + "github.com/onflow/flow-go/fvm/storage/snapshot" "github.com/onflow/flow-go/model/flow" ) @@ -23,6 +24,8 @@ type spockState struct { spockSecretHasher hash.Hasher + getHasher func() hash.Hasher + // NOTE: spockState is no longer accessible once Finalize is called. We // can't support access after Finalize since spockSecretHasher.SumHash is // not idempotent. Repeated calls to SumHash (without modifying the input) @@ -30,17 +33,26 @@ type spockState struct { finalizedSpockSecret []byte } -func newSpockState(base snapshot.StorageSnapshot) *spockState { +// DefaultSpockSecretHasher returns a new SHA3_256 hasher +var DefaultSpockSecretHasher = func() hash.Hasher { + return hash.NewSHA3_256() +} + +// NewSpockState creates a new spock state +// getHasher will be called to create a new hasher for the spock state and each child state +func newSpockState(base snapshot.StorageSnapshot, getHasher func() hash.Hasher) *spockState { return &spockState{ storageState: newStorageState(base), - spockSecretHasher: hash.NewSHA3_256(), + spockSecretHasher: getHasher(), + getHasher: getHasher, } } func (state *spockState) NewChild() *spockState { return &spockState{ storageState: state.storageState.NewChild(), - spockSecretHasher: hash.NewSHA3_256(), + spockSecretHasher: state.getHasher(), + getHasher: state.getHasher, } } diff --git a/fvm/storage/state/spock_state_test.go b/fvm/storage/state/spock_state_test.go index eafd30c1305..bbf9b6ccb07 100644 --- a/fvm/storage/state/spock_state_test.go +++ b/fvm/storage/state/spock_state_test.go @@ -9,10 +9,13 @@ import ( "github.com/onflow/flow-go/fvm/storage/snapshot" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/utils/rand" + "github.com/onflow/flow-go/utils/unittest" ) type spockTestOp func(*testing.T, *spockState) +var fooOwner = unittest.RandomAddressFixture() + func chainSpockTestOps(prevOps spockTestOp, op spockTestOp) spockTestOp { return func(t *testing.T, state *spockState) { if prevOps != nil { @@ -28,8 +31,8 @@ func testSpock( ) []*spockState { resultStates := []*spockState{} for _, experiment := range counterfactualExperiments { - run1 := newSpockState(snapshot.MapStorageSnapshot{}) - run2 := newSpockState(snapshot.MapStorageSnapshot{}) + run1 := newSpockState(snapshot.MapStorageSnapshot{}, DefaultSpockSecretHasher) + run2 := newSpockState(snapshot.MapStorageSnapshot{}, DefaultSpockSecretHasher) if experiment != nil { experiment(t, run1) @@ -50,7 +53,8 @@ func testSpock( } func TestSpockStateGet(t *testing.T) { - registerId := flow.NewRegisterID("foo", "bar") + otherOwner := unittest.RandomAddressFixture() + registerId := flow.NewRegisterID(fooOwner, "bar") states := testSpock( t, @@ -71,11 +75,11 @@ func TestSpockStateGet(t *testing.T) { }, // Reading different register ids will result in different spock func(t *testing.T, state *spockState) { - _, err := state.Get(flow.NewRegisterID("fo0", "bar")) + _, err := state.Get(flow.NewRegisterID(otherOwner, "bar")) require.NoError(t, err) }, func(t *testing.T, state *spockState) { - _, err := state.Get(flow.NewRegisterID("foo", "baR")) + _, err := state.Get(flow.NewRegisterID(fooOwner, "baR")) require.NoError(t, err) }, }) @@ -94,7 +98,7 @@ func TestSpockStateGet(t *testing.T) { } func TestSpockStateGetDifferentUnderlyingStorage(t *testing.T) { - badRegisterId := flow.NewRegisterID("foo", "bad") + badRegisterId := flow.NewRegisterID(fooOwner, "bad") value1 := flow.RegisterValue([]byte("abc")) value2 := flow.RegisterValue([]byte("blah")) @@ -102,12 +106,16 @@ func TestSpockStateGetDifferentUnderlyingStorage(t *testing.T) { state1 := newSpockState( snapshot.MapStorageSnapshot{ badRegisterId: value1, - }) + }, + DefaultSpockSecretHasher, + ) state2 := newSpockState( snapshot.MapStorageSnapshot{ badRegisterId: value2, - }) + }, + DefaultSpockSecretHasher, + ) value, err := state1.Get(badRegisterId) require.NoError(t, err) @@ -127,7 +135,7 @@ func TestSpockStateGetDifferentUnderlyingStorage(t *testing.T) { } func TestSpockStateGetVsSetNil(t *testing.T) { - registerId := flow.NewRegisterID("foo", "bar") + registerId := flow.NewRegisterID(fooOwner, "bar") _ = testSpock( t, @@ -144,7 +152,8 @@ func TestSpockStateGetVsSetNil(t *testing.T) { } func TestSpockStateSet(t *testing.T) { - registerId := flow.NewRegisterID("foo", "bar") + otherOwner := unittest.RandomAddressFixture() + registerId := flow.NewRegisterID(fooOwner, "bar") value := flow.RegisterValue([]byte("value")) states := testSpock( @@ -166,11 +175,11 @@ func TestSpockStateSet(t *testing.T) { }, // Setting different register id will result in different spock func(t *testing.T, state *spockState) { - err := state.Set(flow.NewRegisterID("foo", "baR"), value) + err := state.Set(flow.NewRegisterID(fooOwner, "baR"), value) require.NoError(t, err) }, func(t *testing.T, state *spockState) { - err := state.Set(flow.NewRegisterID("foO", "bar"), value) + err := state.Set(flow.NewRegisterID(otherOwner, "bar"), value) require.NoError(t, err) }, // Setting different register value will result in different spock @@ -194,8 +203,8 @@ func TestSpockStateSet(t *testing.T) { } func TestSpockStateSetValueInjection(t *testing.T) { - registerId1 := flow.NewRegisterID("foo", "injection") - registerId2 := flow.NewRegisterID("foo", "inject") + registerId1 := flow.NewRegisterID(fooOwner, "injection") + registerId2 := flow.NewRegisterID(fooOwner, "inject") _ = testSpock( t, @@ -213,7 +222,7 @@ func TestSpockStateSetValueInjection(t *testing.T) { func TestSpockStateMerge(t *testing.T) { readSet := map[flow.RegisterID]struct{}{ - flow.NewRegisterID("foo", "bar"): struct{}{}, + flow.NewRegisterID(fooOwner, "bar"): struct{}{}, } states := testSpock( @@ -265,13 +274,13 @@ func TestSpockStateMerge(t *testing.T) { require.ErrorContains(t, err, "cannot Merge on a finalized state") } func TestSpockStateDropChanges(t *testing.T) { - registerId := flow.NewRegisterID("foo", "read") + registerId := flow.NewRegisterID(fooOwner, "read") setup := func(t *testing.T, state *spockState) { _, err := state.Get(registerId) require.NoError(t, err) - err = state.Set(flow.NewRegisterID("foo", "write"), []byte("blah")) + err = state.Set(flow.NewRegisterID(fooOwner, "write"), []byte("blah")) require.NoError(t, err) } @@ -331,7 +340,7 @@ func TestSpockStateRandomOps(t *testing.T) { chain[len(chain)-1], func(t *testing.T, state *spockState) { _, err := state.Get( - flow.NewRegisterID("", fmt.Sprintf("%d", id))) + flow.NewRegisterID(flow.EmptyAddress, fmt.Sprintf("%d", id))) require.NoError(t, err) })) case uint(1): @@ -347,7 +356,7 @@ func TestSpockStateRandomOps(t *testing.T) { chain[len(chain)-1], func(t *testing.T, state *spockState) { err := state.Set( - flow.NewRegisterID("", fmt.Sprintf("%d", id)), + flow.NewRegisterID(flow.EmptyAddress, fmt.Sprintf("%d", id)), []byte(fmt.Sprintf("%d", value))) require.NoError(t, err) })) @@ -383,23 +392,28 @@ func TestSpockStateRandomOps(t *testing.T) { _ = testSpock(t, chain) } func TestSpockStateNewChild(t *testing.T) { - baseRegisterId := flow.NewRegisterID("", "base") + baseRegisterId := flow.NewRegisterID(flow.EmptyAddress, "base") baseValue := flow.RegisterValue([]byte("base")) - parentRegisterId1 := flow.NewRegisterID("parent", "1") + parentOwner := unittest.RandomAddressFixture() + childOwner := unittest.RandomAddressFixture() + + parentRegisterId1 := flow.NewRegisterID(parentOwner, "1") parentValue := flow.RegisterValue([]byte("parent")) - parentRegisterId2 := flow.NewRegisterID("parent", "2") + parentRegisterId2 := flow.NewRegisterID(parentOwner, "2") - childRegisterId1 := flow.NewRegisterID("child", "1") + childRegisterId1 := flow.NewRegisterID(childOwner, "1") childValue := flow.RegisterValue([]byte("child")) - childRegisterId2 := flow.NewRegisterID("child", "2") + childRegisterId2 := flow.NewRegisterID(childOwner, "2") parent := newSpockState( snapshot.MapStorageSnapshot{ baseRegisterId: baseValue, - }) + }, + DefaultSpockSecretHasher, + ) err := parent.Set(parentRegisterId1, parentValue) require.NoError(t, err) diff --git a/fvm/storage/state/storage_state_test.go b/fvm/storage/state/storage_state_test.go index 87ff6a195ac..ab852bd91f5 100644 --- a/fvm/storage/state/storage_state_test.go +++ b/fvm/storage/state/storage_state_test.go @@ -7,13 +7,16 @@ import ( "github.com/onflow/flow-go/fvm/storage/snapshot" "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/utils/unittest" ) func TestStorageStateSet(t *testing.T) { - registerId1 := flow.NewRegisterID("foo", "1") + fooOwner := unittest.RandomAddressFixture() + + registerId1 := flow.NewRegisterID(fooOwner, "1") value1 := flow.RegisterValue([]byte("value1")) - registerId2 := flow.NewRegisterID("foo", "2") + registerId2 := flow.NewRegisterID(fooOwner, "2") value2 := flow.RegisterValue([]byte("value2")) state := newStorageState(nil) @@ -40,13 +43,13 @@ func TestStorageStateSet(t *testing.T) { func TestStorageStateGetFromNilBase(t *testing.T) { state := newStorageState(nil) - value, err := state.Get(flow.NewRegisterID("foo", "bar")) + value, err := state.Get(flow.NewRegisterID(unittest.RandomAddressFixture(), "bar")) require.NoError(t, err) require.Nil(t, value) } func TestStorageStateGetFromBase(t *testing.T) { - registerId := flow.NewRegisterID("", "base") + registerId := flow.NewRegisterID(flow.EmptyAddress, "base") baseValue := flow.RegisterValue([]byte("base")) state := newStorageState( @@ -89,7 +92,7 @@ func TestStorageStateGetFromBase(t *testing.T) { } func TestStorageStateGetFromWriteSet(t *testing.T) { - registerId := flow.NewRegisterID("", "base") + registerId := flow.NewRegisterID(flow.EmptyAddress, "base") expectedValue := flow.RegisterValue([]byte("base")) state := newStorageState(nil) @@ -112,22 +115,25 @@ func TestStorageStateGetFromWriteSet(t *testing.T) { } func TestStorageStateMerge(t *testing.T) { - baseRegisterId := flow.NewRegisterID("", "base") + parentOwner := unittest.RandomAddressFixture() + childOwner := unittest.RandomAddressFixture() + + baseRegisterId := flow.NewRegisterID(flow.EmptyAddress, "base") baseValue := flow.RegisterValue([]byte("base")) - parentRegisterId1 := flow.NewRegisterID("parent", "1") + parentRegisterId1 := flow.NewRegisterID(parentOwner, "1") parentValue := flow.RegisterValue([]byte("parent")) - parentRegisterId2 := flow.NewRegisterID("parent", "2") + parentRegisterId2 := flow.NewRegisterID(parentOwner, "2") - parentRegisterId3 := flow.NewRegisterID("parent", "3") + parentRegisterId3 := flow.NewRegisterID(parentOwner, "3") originalParentValue3 := flow.RegisterValue([]byte("parent value")) updatedParentValue3 := flow.RegisterValue([]byte("child value")) - childRegisterId1 := flow.NewRegisterID("child", "1") + childRegisterId1 := flow.NewRegisterID(childOwner, "1") childValue1 := flow.RegisterValue([]byte("child")) - childRegisterId2 := flow.NewRegisterID("child", "2") + childRegisterId2 := flow.NewRegisterID(childOwner, "2") parent := newStorageState( snapshot.MapStorageSnapshot{ diff --git a/fvm/storage/state/transaction_state.go b/fvm/storage/state/transaction_state.go index 602fa282585..a24fb7c680c 100644 --- a/fvm/storage/state/transaction_state.go +++ b/fvm/storage/state/transaction_state.go @@ -3,7 +3,7 @@ package state import ( "fmt" - "github.com/onflow/cadence/runtime/common" + "github.com/onflow/cadence/common" "github.com/onflow/flow-go/fvm/meter" "github.com/onflow/flow-go/fvm/storage/snapshot" @@ -20,13 +20,14 @@ func (id NestedTransactionId) StateForTestingOnly() *ExecutionState { } type Meter interface { - MeterComputation(kind common.ComputationKind, intensity uint) error + MeterComputation(usage common.ComputationUsage) error + ComputationAvailable(usage common.ComputationUsage) bool ComputationIntensities() meter.MeteredComputationIntensities - TotalComputationLimit() uint + TotalComputationLimit() uint64 TotalComputationUsed() uint64 - MeterMemory(kind common.MemoryKind, intensity uint) error - MemoryIntensities() meter.MeteredMemoryIntensities + MeterMemory(usage common.MemoryUsage) error + MemoryAmounts() meter.MeteredMemoryAmounts TotalMemoryEstimate() uint64 InteractionUsed() uint64 @@ -34,8 +35,13 @@ type Meter interface { MeterEmittedEvent(byteSize uint64) error TotalEmittedEventBytes() uint64 - // RunWithAllLimitsDisabled runs f with limits disabled - RunWithAllLimitsDisabled(f func()) + // RunWithMeteringDisabled runs f with limits disabled + // This function can be used to run a function that fits one of these cases: + // - the function should not fail due to metering limits + // - the function is not invokable by the user and has a constant execution time (e.g. fee deduction) + // - the function is metered once before calling `RunWithMeteringDisabled` with a special weight (e.g. create account) + // and doesn't need additional metering inside the function + RunWithMeteringDisabled(f func()) } // NestedTransactionPreparer provides active transaction states and facilitates @@ -43,6 +49,9 @@ type Meter interface { type NestedTransactionPreparer interface { Meter + // ExecutionParameters returns the execution parameters + ExecutionParameters() ExecutionParameters + // NumNestedTransactions returns the number of uncommitted nested // transactions. Note that the main transaction is not considered a // nested transaction. @@ -82,7 +91,7 @@ type NestedTransactionPreparer interface { // the provided meter parameters. This returns error if the current nested // transaction is program restricted. BeginNestedTransactionWithMeterParams( - params meter.MeterParameters, + params ExecutionParameters, ) ( NestedTransactionId, error, @@ -176,9 +185,17 @@ func NewTransactionState( params StateParameters, ) NestedTransactionPreparer { startState := NewExecutionState(snapshot, params) + return NewTransactionStateFromExecutionState(startState) +} + +// NewTransactionStateFromExecutionState constructs a new state transaction directly +// from an execution state. +func NewTransactionStateFromExecutionState( + startState *ExecutionState, +) NestedTransactionPreparer { return &transactionState{ nestedTransactions: []nestedTransactionStackFrame{ - nestedTransactionStackFrame{ + { ExecutionState: startState, parseRestriction: nil, }, @@ -190,6 +207,10 @@ func (txnState *transactionState) current() nestedTransactionStackFrame { return txnState.nestedTransactions[txnState.NumNestedTransactions()] } +func (txnState *transactionState) ExecutionParameters() ExecutionParameters { + return txnState.current().ExecutionParameters() +} + func (txnState *transactionState) NumNestedTransactions() int { return len(txnState.nestedTransactions) - 1 } @@ -257,7 +278,7 @@ func (txnState *transactionState) BeginNestedTransaction() ( } func (txnState *transactionState) BeginNestedTransactionWithMeterParams( - params meter.MeterParameters, + params ExecutionParameters, ) ( NestedTransactionId, error, @@ -428,25 +449,23 @@ func (txnState *transactionState) Set( return txnState.current().Set(id, value) } -func (txnState *transactionState) MeterComputation( - kind common.ComputationKind, - intensity uint, -) error { - return txnState.current().MeterComputation(kind, intensity) +func (txnState *transactionState) MeterComputation(usage common.ComputationUsage) error { + return txnState.current().MeterComputation(usage) } -func (txnState *transactionState) MeterMemory( - kind common.MemoryKind, - intensity uint, -) error { - return txnState.current().MeterMemory(kind, intensity) +func (txnState *transactionState) ComputationAvailable(usage common.ComputationUsage) bool { + return txnState.current().ComputationAvailable(usage) +} + +func (txnState *transactionState) MeterMemory(usage common.MemoryUsage) error { + return txnState.current().MeterMemory(usage) } func (txnState *transactionState) ComputationIntensities() meter.MeteredComputationIntensities { return txnState.current().ComputationIntensities() } -func (txnState *transactionState) TotalComputationLimit() uint { +func (txnState *transactionState) TotalComputationLimit() uint64 { return txnState.current().TotalComputationLimit() } @@ -454,8 +473,8 @@ func (txnState *transactionState) TotalComputationUsed() uint64 { return txnState.current().TotalComputationUsed() } -func (txnState *transactionState) MemoryIntensities() meter.MeteredMemoryIntensities { - return txnState.current().MemoryIntensities() +func (txnState *transactionState) MemoryAmounts() meter.MeteredMemoryAmounts { + return txnState.current().MemoryAmounts() } func (txnState *transactionState) TotalMemoryEstimate() uint64 { @@ -474,6 +493,6 @@ func (txnState *transactionState) TotalEmittedEventBytes() uint64 { return txnState.current().TotalEmittedEventBytes() } -func (txnState *transactionState) RunWithAllLimitsDisabled(f func()) { - txnState.current().RunWithAllLimitsDisabled(f) +func (txnState *transactionState) RunWithMeteringDisabled(f func()) { + txnState.current().RunWithMeteringDisabled(f) } diff --git a/fvm/storage/state/transaction_state_test.go b/fvm/storage/state/transaction_state_test.go index 5f91fe8b4b5..f8b18b5d186 100644 --- a/fvm/storage/state/transaction_state_test.go +++ b/fvm/storage/state/transaction_state_test.go @@ -4,12 +4,13 @@ import ( "math" "testing" - "github.com/onflow/cadence/runtime/common" + "github.com/onflow/cadence/common" "github.com/stretchr/testify/require" "github.com/onflow/flow-go/fvm/meter" "github.com/onflow/flow-go/fvm/storage/state" "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/utils/unittest" ) func newTestTransactionState() state.NestedTransactionPreparer { @@ -50,7 +51,7 @@ func TestUnrestrictedNestedTransactionBasic(t *testing.T) { // Ensure the values are written to the correctly nested state - key := flow.NewRegisterID("address", "key") + key := flow.NewRegisterID(unittest.RandomAddressFixture(), "key") val := createByteArray(2) err = txn.Set(key, val) @@ -103,7 +104,9 @@ func TestUnrestrictedNestedTransactionDifferentMeterParams(t *testing.T) { require.Equal(t, uint(math.MaxUint), mainState.TotalMemoryLimit()) id1, err := txn.BeginNestedTransactionWithMeterParams( - meter.DefaultParameters().WithMemoryLimit(1)) + state.ExecutionParameters{ + MeterParameters: meter.DefaultParameters().WithMemoryLimit(1), + }) require.NoError(t, err) nestedState1 := id1.StateForTestingOnly() @@ -111,7 +114,9 @@ func TestUnrestrictedNestedTransactionDifferentMeterParams(t *testing.T) { require.Equal(t, uint(1), nestedState1.TotalMemoryLimit()) id2, err := txn.BeginNestedTransactionWithMeterParams( - meter.DefaultParameters().WithMemoryLimit(2)) + state.ExecutionParameters{ + MeterParameters: meter.DefaultParameters().WithMemoryLimit(2), + }) require.NoError(t, err) nestedState2 := id2.StateForTestingOnly() @@ -173,7 +178,7 @@ func TestParseRestrictedNestedTransactionBasic(t *testing.T) { // Sanity check - key := flow.NewRegisterID("address", "key") + key := flow.NewRegisterID(unittest.RandomAddressFixture(), "key") v, err := restrictedNestedState2.Get(key) require.NoError(t, err) @@ -280,7 +285,7 @@ func TestRestartNestedTransaction(t *testing.T) { id, err := txn.BeginNestedTransaction() require.NoError(t, err) - key := flow.NewRegisterID("address", "key") + key := flow.NewRegisterID(unittest.RandomAddressFixture(), "key") val := createByteArray(2) for i := 0; i < 10; i++ { @@ -332,7 +337,7 @@ func TestRestartNestedTransactionWithInvalidId(t *testing.T) { id, err := txn.BeginNestedTransaction() require.NoError(t, err) - key := flow.NewRegisterID("address", "key") + key := flow.NewRegisterID(unittest.RandomAddressFixture(), "key") val := createByteArray(2) err = txn.Set(key, val) @@ -498,7 +503,7 @@ func TestFinalizeMainTransaction(t *testing.T) { id1, err := txn.BeginNestedTransaction() require.NoError(t, err) - registerId := flow.NewRegisterID("foo", "bar") + registerId := flow.NewRegisterID(unittest.RandomAddressFixture(), "bar") value, err := txn.Get(registerId) require.NoError(t, err) @@ -531,18 +536,21 @@ func TestInterimReadSet(t *testing.T) { // Setup test with a bunch of outstanding nested transaction. - readRegisterId1 := flow.NewRegisterID("read", "1") - readRegisterId2 := flow.NewRegisterID("read", "2") - readRegisterId3 := flow.NewRegisterID("read", "3") - readRegisterId4 := flow.NewRegisterID("read", "4") + readOwner := unittest.RandomAddressFixture() + writeOwner := unittest.RandomAddressFixture() + + readRegisterId1 := flow.NewRegisterID(readOwner, "1") + readRegisterId2 := flow.NewRegisterID(readOwner, "2") + readRegisterId3 := flow.NewRegisterID(readOwner, "3") + readRegisterId4 := flow.NewRegisterID(readOwner, "4") - writeRegisterId1 := flow.NewRegisterID("write", "1") + writeRegisterId1 := flow.NewRegisterID(writeOwner, "1") writeValue1 := flow.RegisterValue([]byte("value1")) - writeRegisterId2 := flow.NewRegisterID("write", "2") + writeRegisterId2 := flow.NewRegisterID(writeOwner, "2") writeValue2 := flow.RegisterValue([]byte("value2")) - writeRegisterId3 := flow.NewRegisterID("write", "3") + writeRegisterId3 := flow.NewRegisterID(writeOwner, "3") writeValue3 := flow.RegisterValue([]byte("value3")) err := txn.Set(writeRegisterId1, writeValue1) diff --git a/fvm/storage/transaction.go b/fvm/storage/transaction.go index 58b98de7b44..cd89eb302da 100644 --- a/fvm/storage/transaction.go +++ b/fvm/storage/transaction.go @@ -2,6 +2,7 @@ package storage import ( "github.com/onflow/flow-go/fvm/storage/derived" + "github.com/onflow/flow-go/fvm/storage/logical" "github.com/onflow/flow-go/fvm/storage/snapshot" "github.com/onflow/flow-go/fvm/storage/state" ) @@ -14,6 +15,9 @@ type TransactionPreparer interface { type Transaction interface { TransactionPreparer + // SnapshotTime returns the transaction's current snapshot time. + SnapshotTime() logical.Time + // Finalize convert transaction preparer's intermediate state into // committable state. Finalize() error @@ -27,9 +31,3 @@ type Transaction interface { // transaction is not committed. Commit() (*snapshot.ExecutionSnapshot, error) } - -// TODO(patrick): implement proper transaction. -type SerialTransaction struct { - state.NestedTransactionPreparer - *derived.DerivedTransactionData -} diff --git a/fvm/systemcontracts/system_contracts.go b/fvm/systemcontracts/system_contracts.go index fa416bdb715..d8145bf5a69 100644 --- a/fvm/systemcontracts/system_contracts.go +++ b/fvm/systemcontracts/system_contracts.go @@ -16,26 +16,52 @@ package systemcontracts import ( "fmt" + "github.com/onflow/cadence/common" + "github.com/onflow/flow-core-contracts/lib/go/templates" + "github.com/onflow/flow-go/model/flow" ) const ( - // Unqualified names of system smart contracts (not including address prefix) - ContractNameEpoch = "FlowEpoch" - ContractNameClusterQC = "FlowClusterQC" - ContractNameDKG = "FlowDKG" - ContractNameServiceAccount = "FlowServiceAccount" - ContractNameFlowFees = "FlowFees" - ContractNameStorageFees = "FlowStorageFees" - ContractNameNodeVersionBeacon = "NodeVersionBeacon" + ContractNameEpoch = "FlowEpoch" + ContractNameIDTableStaking = "FlowIDTableStaking" + ContractNameClusterQC = "FlowClusterQC" + ContractNameDKG = "FlowDKG" + ContractNameServiceAccount = "FlowServiceAccount" + ContractNameFlowFees = "FlowFees" + ContractNameStorageFees = "FlowStorageFees" + ContractNameFlowCallbackScheduler = "FlowTransactionScheduler" + ContractNameNodeVersionBeacon = "NodeVersionBeacon" + ContractNameRandomBeaconHistory = "RandomBeaconHistory" + ContractNameFungibleToken = "FungibleToken" + ContractNameFlowToken = "FlowToken" + ContractNameFungibleTokenSwitchboard = "FungibleTokenSwitchboard" + ContractNameFungibleTokenMetadataViews = "FungibleTokenMetadataViews" + ContractNameNonFungibleToken = "NonFungibleToken" + ContractNameMetadataViews = "MetadataViews" + ContractNameViewResolver = "ViewResolver" + ContractNameCrossVMMetadataViews = "CrossVMMetadataViews" + ContractNameEVM = "EVM" + ContractNameBurner = "Burner" + ContractNameCrypto = "Crypto" + ContractNameMigration = "Migration" + + // AccountNameEVMStorage is not a contract, but a special account that is used to store EVM state + AccountNameEVMStorage = "EVMStorageAccount" + // AccountNameExecutionParametersAccount is not a contract, but a special account that is used to store execution parameters + // It is a separate account on all networks in order to separate it away + // from the frequently changing data on the service account. + AccountNameExecutionParametersAccount = "ExecutionParametersAccount" // Unqualified names of service events (not including address prefix or contract name) - EventNameEpochSetup = "EpochSetup" - EventNameEpochCommit = "EpochCommit" - EventNameVersionBeacon = "VersionBeacon" + EventNameEpochSetup = "EpochSetup" + EventNameEpochCommit = "EpochCommit" + EventNameEpochRecover = "EpochRecover" + EventNameVersionBeacon = "VersionBeacon" // VersionBeacon only controls version of ENs, describing software compatability via semantic versioning + EventNameProtocolStateVersionUpgrade = "ProtocolStateVersionUpgrade" // Protocol State version applies to all nodes and uses an _integer version_ of the _protocol state_ // Unqualified names of service event contract functions (not including address prefix or contract name) @@ -46,6 +72,46 @@ const ( ContractStorageFeesFunction_calculateAccountCapacity = "calculateAccountCapacity" ContractStorageFeesFunction_getAccountsCapacityForTransactionStorageCheck = "getAccountsCapacityForTransactionStorageCheck" ContractStorageFeesFunction_defaultTokenAvailableBalance = "defaultTokenAvailableBalance" + + // These are the account indexes of system contracts as deployed by the default bootstrapping. + // On long-running networks some of these contracts might have been deployed after bootstrapping, + // and therefore might not be at these indexes. + + FungibleTokenAccountIndex = 2 + FlowTokenAccountIndex = 3 + FlowFeesAccountIndex = 4 + EVMStorageAccountIndex = 5 + + // LastSystemAccountIndex is the last index of a system accounts. + // Other addresses will be created after this one. + LastSystemAccountIndex = EVMStorageAccountIndex +) + +// Well-known addresses for system contracts on long-running networks. +// For now, all system contracts tracked by this package are deployed to the same +// address (per chain) as the staking contract. +// +// Ref: https://docs.onflow.org/core-contracts/staking-contract-reference/ +var ( + // stakingContractAddressMainnet is the address of the FlowIDTableStaking contract on Mainnet + stakingContractAddressMainnet = flow.HexToAddress("8624b52f9ddcd04a") + // stakingContractAddressTestnet is the address of the FlowIDTableStaking contract on Testnet + stakingContractAddressTestnet = flow.HexToAddress("9eca2b38b18b5dfe") + + // nftTokenAddressTestnet is the address of the NonFungibleToken contract on Testnet + nftTokenAddressMainnet = flow.HexToAddress("1d7e57aa55817448") + // nftTokenAddressTestnet is the address of the NonFungibleToken contract on Testnet + nftTokenAddressTestnet = flow.HexToAddress("631e88ae7f1d7c20") + + // evmStorageAddressTestnet is the address of the EVM state storage contract on Testnet + evmStorageAddressTestnet = flow.HexToAddress("1a54ed2be7552821") + // evmStorageAddressMainnet is the address of the EVM state storage contract on Mainnet + evmStorageAddressMainnet = flow.HexToAddress("d421a63faae318f9") + + // executionParametersAddressTestnet is the address of the Execution Parameters contract on Testnet + executionParametersAddressTestnet = flow.HexToAddress("6997a2f2cf57b73a") + // executionParametersAddressMainnet is the address of the Execution Parameters contract on Mainnet + executionParametersAddressMainnet = flow.HexToAddress("f426ff57ee8f6110") ) // SystemContract represents a system contract on a particular chain. @@ -54,6 +120,19 @@ type SystemContract struct { Name string } +func (c SystemContract) Location() common.AddressLocation { + return common.AddressLocation{ + Address: common.Address(c.Address), + Name: c.Name, + } +} + +// SystemAccount represents an address used by the system. +type SystemAccount struct { + Address flow.Address + Name string +} + // ServiceEvent represents a service event on a particular chain. type ServiceEvent struct { Address flow.Address @@ -75,17 +154,119 @@ func (se ServiceEvent) EventType() flow.EventType { // SystemContracts is a container for all system contracts on a particular chain. type SystemContracts struct { - Epoch SystemContract - ClusterQC SystemContract - DKG SystemContract - NodeVersionBeacon SystemContract + // epoch related contracts + Epoch SystemContract + IDTableStaking SystemContract + ClusterQC SystemContract + DKG SystemContract + + // service account related contracts + FlowServiceAccount SystemContract + FlowCallbackScheduler SystemContract + NodeVersionBeacon SystemContract + RandomBeaconHistory SystemContract + FlowStorageFees SystemContract + ExecutionParametersAccount SystemContract + + // token related contracts + FlowFees SystemContract + FlowToken SystemContract + FungibleToken SystemContract + FungibleTokenSwitchboard SystemContract + FungibleTokenMetadataViews SystemContract + + // NFT related contracts + NonFungibleToken SystemContract + MetadataViews SystemContract + ViewResolver SystemContract + CrossVMMetadataViews SystemContract + + // EVM related contracts + EVMContract SystemContract + EVMStorage SystemAccount + + // Utility contracts + Burner SystemContract + Crypto SystemContract + + // Migration contracts + Migration SystemContract +} + +// AsTemplateEnv returns a template environment with all system contracts filled in. +// This is useful for generating Cadence code from templates. +func (c SystemContracts) AsTemplateEnv() templates.Environment { + return templates.Environment{ + EpochAddress: c.Epoch.Address.Hex(), + IDTableAddress: c.IDTableStaking.Address.Hex(), + QuorumCertificateAddress: c.ClusterQC.Address.Hex(), + DkgAddress: c.DKG.Address.Hex(), + + ServiceAccountAddress: c.FlowServiceAccount.Address.Hex(), + NodeVersionBeaconAddress: c.NodeVersionBeacon.Address.Hex(), + RandomBeaconHistoryAddress: c.RandomBeaconHistory.Address.Hex(), + StorageFeesAddress: c.FlowStorageFees.Address.Hex(), + EVMAddress: c.EVMContract.Address.Hex(), + + FlowFeesAddress: c.FlowFees.Address.Hex(), + FlowTokenAddress: c.FlowToken.Address.Hex(), + FlowTransactionSchedulerAddress: c.FlowCallbackScheduler.Address.Hex(), + FungibleTokenAddress: c.FungibleToken.Address.Hex(), + FungibleTokenSwitchboardAddress: c.FungibleTokenSwitchboard.Address.Hex(), + FungibleTokenMetadataViewsAddress: c.FungibleTokenMetadataViews.Address.Hex(), + + NonFungibleTokenAddress: c.NonFungibleToken.Address.Hex(), + MetadataViewsAddress: c.MetadataViews.Address.Hex(), + ViewResolverAddress: c.ViewResolver.Address.Hex(), + CrossVMMetadataViewsAddress: c.CrossVMMetadataViews.Address.Hex(), + + BurnerAddress: c.Burner.Address.Hex(), + CryptoAddress: c.Crypto.Address.Hex(), + } +} + +// All returns all system contracts as a slice. +func (c SystemContracts) All() []SystemContract { + return []SystemContract{ + c.Epoch, + c.IDTableStaking, + c.ClusterQC, + c.DKG, + + c.FlowServiceAccount, + c.FlowCallbackScheduler, + c.NodeVersionBeacon, + c.RandomBeaconHistory, + c.FlowStorageFees, + + c.FlowFees, + c.FlowToken, + c.FungibleToken, + c.FungibleTokenMetadataViews, + c.FungibleTokenSwitchboard, + + c.NonFungibleToken, + c.MetadataViews, + c.ViewResolver, + c.CrossVMMetadataViews, + + c.EVMContract, + // EVMStorage is not included here, since it is not a contract + + c.Burner, + c.Crypto, + + c.Migration, + } } // ServiceEvents is a container for all service events on a particular chain. type ServiceEvents struct { - EpochSetup ServiceEvent - EpochCommit ServiceEvent - VersionBeacon ServiceEvent + EpochSetup ServiceEvent + EpochCommit ServiceEvent + EpochRecover ServiceEvent + VersionBeacon ServiceEvent + ProtocolStateVersionUpgrade ServiceEvent } // All returns all service events as a slice. @@ -93,126 +274,238 @@ func (se ServiceEvents) All() []ServiceEvent { return []ServiceEvent{ se.EpochSetup, se.EpochCommit, + se.EpochRecover, se.VersionBeacon, + se.ProtocolStateVersionUpgrade, } } // SystemContractsForChain returns the system contract configuration for the given chain. -func SystemContractsForChain(chainID flow.ChainID) (*SystemContracts, error) { - addresses, ok := contractAddressesByChainID[chainID] +// Panics if the chain is unknown. +func SystemContractsForChain(chainID flow.ChainID) *SystemContracts { + contracts, ok := systemContractsForChain[chainID] if !ok { - return nil, fmt.Errorf("unknown chain id (%s)", chainID.String()) - } - - contracts := &SystemContracts{ - Epoch: SystemContract{ - Address: addresses[ContractNameEpoch], - Name: ContractNameEpoch, - }, - ClusterQC: SystemContract{ - Address: addresses[ContractNameClusterQC], - Name: ContractNameClusterQC, - }, - DKG: SystemContract{ - Address: addresses[ContractNameDKG], - Name: ContractNameDKG, - }, - NodeVersionBeacon: SystemContract{ - Address: addresses[ContractNameNodeVersionBeacon], - Name: ContractNameNodeVersionBeacon, - }, - } - - return contracts, nil + // this is a panic, since it can only happen if the code is wrong + panic(fmt.Sprintf("unknown chain: %s", chainID)) + } + return contracts } +var systemContractsForChain = map[flow.ChainID]*SystemContracts{} + // ServiceEventsForChain returns the service event confirmation for the given chain. -func ServiceEventsForChain(chainID flow.ChainID) (*ServiceEvents, error) { - addresses, ok := contractAddressesByChainID[chainID] +// Panics if the chain is unknown. +func ServiceEventsForChain(chainID flow.ChainID) *ServiceEvents { + events, ok := serviceEventsForChain[chainID] if !ok { - return nil, fmt.Errorf("unknown chain id (%s)", chainID.String()) - } - - events := &ServiceEvents{ - EpochSetup: ServiceEvent{ - Address: addresses[ContractNameEpoch], - ContractName: ContractNameEpoch, - Name: EventNameEpochSetup, - }, - EpochCommit: ServiceEvent{ - Address: addresses[ContractNameEpoch], - ContractName: ContractNameEpoch, - Name: EventNameEpochCommit, - }, - VersionBeacon: ServiceEvent{ - Address: addresses[ContractNameNodeVersionBeacon], - ContractName: ContractNameNodeVersionBeacon, - Name: EventNameVersionBeacon, - }, - } - - return events, nil + // this is a panic, since it can only happen if the code is wrong + panic(fmt.Sprintf("unknown chain: %s", chainID)) + } + return events } -// contractAddressesByChainID stores the default system smart contract -// addresses for each chain. -var contractAddressesByChainID map[flow.ChainID]map[string]flow.Address +var serviceEventsForChain = map[flow.ChainID]*ServiceEvents{} -// Well-known addresses for system contracts on long-running networks. -// For now, all system contracts tracked by this package are deployed to the same -// address (per chain) as the staking contract. -// -// Ref: https://docs.onflow.org/core-contracts/staking-contract-reference/ -var ( - // stakingContractAddressMainnet is the address of the FlowIDTableStaking contract on Mainnet - stakingContractAddressMainnet = flow.HexToAddress("8624b52f9ddcd04a") - // stakingContractAddressTestnet is the address of the FlowIDTableStaking contract on Testnet - stakingContractAddressTestnet = flow.HexToAddress("9eca2b38b18b5dfe") -) +var contractAddressFunc = map[string]func(id flow.ChainID) flow.Address{} func init() { - contractAddressesByChainID = make(map[flow.ChainID]map[string]flow.Address) - - // Main Flow network - // All system contracts are deployed to the account of the staking contract - mainnet := map[string]flow.Address{ - ContractNameEpoch: stakingContractAddressMainnet, - ContractNameClusterQC: stakingContractAddressMainnet, - ContractNameDKG: stakingContractAddressMainnet, - ContractNameNodeVersionBeacon: flow.Emulator.Chain().ServiceAddress(), - } - contractAddressesByChainID[flow.Mainnet] = mainnet - - // Long-lived test networks - // All system contracts are deployed to the account of the staking contract - testnet := map[string]flow.Address{ - ContractNameEpoch: stakingContractAddressTestnet, - ContractNameClusterQC: stakingContractAddressTestnet, - ContractNameDKG: stakingContractAddressTestnet, - ContractNameNodeVersionBeacon: flow.Emulator.Chain().ServiceAddress(), - } - contractAddressesByChainID[flow.Testnet] = testnet - - // Sandboxnet test network - // All system contracts are deployed to the service account - sandboxnet := map[string]flow.Address{ - ContractNameEpoch: flow.Sandboxnet.Chain().ServiceAddress(), - ContractNameClusterQC: flow.Sandboxnet.Chain().ServiceAddress(), - ContractNameDKG: flow.Sandboxnet.Chain().ServiceAddress(), - ContractNameNodeVersionBeacon: flow.Emulator.Chain().ServiceAddress(), - } - contractAddressesByChainID[flow.Sandboxnet] = sandboxnet - - // Transient test networks - // All system contracts are deployed to the service account - transient := map[string]flow.Address{ - ContractNameEpoch: flow.Emulator.Chain().ServiceAddress(), - ContractNameClusterQC: flow.Emulator.Chain().ServiceAddress(), - ContractNameDKG: flow.Emulator.Chain().ServiceAddress(), - ContractNameNodeVersionBeacon: flow.Emulator.Chain().ServiceAddress(), - } - contractAddressesByChainID[flow.Emulator] = transient - contractAddressesByChainID[flow.Localnet] = transient - contractAddressesByChainID[flow.BftTestnet] = transient - contractAddressesByChainID[flow.Benchnet] = transient + + serviceAddressFunc := func(chain flow.ChainID) flow.Address { + return chain.Chain().ServiceAddress() + } + + // epoch contracts are deployed on a separate account on mainnet and testnet + epochAddressFunc := func(chain flow.ChainID) flow.Address { + switch chain { + case flow.Mainnet: + return stakingContractAddressMainnet + case flow.Testnet: + return stakingContractAddressTestnet + default: + return chain.Chain().ServiceAddress() + } + } + + // some contracts are always at an address with a a predetermined index + nthAddressFunc := func(index uint64) func(chain flow.ChainID) flow.Address { + return func(chain flow.ChainID) flow.Address { + address, err := chain.Chain().AddressAtIndex(index) + if err != nil { + // this can only happen if the code is wrong + panic(fmt.Sprintf("failed to get %d address: %v", FlowFeesAccountIndex, err)) + } + return address + } + } + + nftTokenAddressFunc := func(chain flow.ChainID) flow.Address { + switch chain { + case flow.Mainnet: + return nftTokenAddressMainnet + case flow.Testnet: + return nftTokenAddressTestnet + default: + return chain.Chain().ServiceAddress() + } + } + + evmStorageEVMFunc := func(chain flow.ChainID) flow.Address { + switch chain { + case flow.Mainnet: + return evmStorageAddressMainnet + case flow.Testnet: + return evmStorageAddressTestnet + default: + return nthAddressFunc(EVMStorageAccountIndex)(chain) + } + } + + burnerAddressFunc := func(chain flow.ChainID) flow.Address { + switch chain { + case flow.Mainnet, flow.Testnet: + return nthAddressFunc(FungibleTokenAccountIndex)(chain) + default: + return serviceAddressFunc(chain) + } + } + + executionParametersAccountFunc := func(chain flow.ChainID) flow.Address { + switch chain { + case flow.Mainnet: + return executionParametersAddressMainnet + case flow.Testnet: + return executionParametersAddressTestnet + default: + return nthAddressFunc(FungibleTokenAccountIndex)(chain) + } + } + + contractAddressFunc = map[string]func(id flow.ChainID) flow.Address{ + ContractNameIDTableStaking: epochAddressFunc, + ContractNameEpoch: epochAddressFunc, + ContractNameClusterQC: epochAddressFunc, + ContractNameDKG: epochAddressFunc, + + ContractNameNodeVersionBeacon: serviceAddressFunc, + ContractNameRandomBeaconHistory: serviceAddressFunc, + ContractNameServiceAccount: serviceAddressFunc, + ContractNameStorageFees: serviceAddressFunc, + ContractNameFlowCallbackScheduler: serviceAddressFunc, + AccountNameExecutionParametersAccount: executionParametersAccountFunc, + + ContractNameFlowFees: nthAddressFunc(FlowFeesAccountIndex), + ContractNameFungibleToken: nthAddressFunc(FungibleTokenAccountIndex), + ContractNameFlowToken: nthAddressFunc(FlowTokenAccountIndex), + ContractNameFungibleTokenSwitchboard: nthAddressFunc(FungibleTokenAccountIndex), + ContractNameFungibleTokenMetadataViews: nthAddressFunc(FungibleTokenAccountIndex), + + ContractNameNonFungibleToken: nftTokenAddressFunc, + ContractNameMetadataViews: nftTokenAddressFunc, + ContractNameViewResolver: nftTokenAddressFunc, + ContractNameCrossVMMetadataViews: nftTokenAddressFunc, + + ContractNameEVM: serviceAddressFunc, + AccountNameEVMStorage: evmStorageEVMFunc, + + ContractNameBurner: burnerAddressFunc, + ContractNameCrypto: serviceAddressFunc, + + ContractNameMigration: serviceAddressFunc, + } + + getSystemContractsForChain := func(chainID flow.ChainID) *SystemContracts { + + addressOfContract := func(name string) SystemContract { + addressFunc, ok := contractAddressFunc[name] + if !ok { + // this is a panic, since it can only happen if the code is wrong + panic(fmt.Sprintf("unknown system contract name: %s", name)) + } + + return SystemContract{ + Address: addressFunc(chainID), + Name: name, + } + } + + addressOfAccount := func(name string) SystemAccount { + addressFunc, ok := contractAddressFunc[name] + if !ok { + // this is a panic, since it can only happen if the code is wrong + panic(fmt.Sprintf("unknown system account name: %s", name)) + } + + return SystemAccount{ + Address: addressFunc(chainID), + Name: name, + } + } + + contracts := &SystemContracts{ + Epoch: addressOfContract(ContractNameEpoch), + IDTableStaking: addressOfContract(ContractNameIDTableStaking), + ClusterQC: addressOfContract(ContractNameClusterQC), + DKG: addressOfContract(ContractNameDKG), + + FlowServiceAccount: addressOfContract(ContractNameServiceAccount), + FlowCallbackScheduler: addressOfContract(ContractNameFlowCallbackScheduler), + NodeVersionBeacon: addressOfContract(ContractNameNodeVersionBeacon), + RandomBeaconHistory: addressOfContract(ContractNameRandomBeaconHistory), + FlowStorageFees: addressOfContract(ContractNameStorageFees), + ExecutionParametersAccount: addressOfContract(AccountNameExecutionParametersAccount), + + FlowFees: addressOfContract(ContractNameFlowFees), + FlowToken: addressOfContract(ContractNameFlowToken), + FungibleToken: addressOfContract(ContractNameFungibleToken), + FungibleTokenMetadataViews: addressOfContract(ContractNameFungibleTokenMetadataViews), + FungibleTokenSwitchboard: addressOfContract(ContractNameFungibleTokenSwitchboard), + + NonFungibleToken: addressOfContract(ContractNameNonFungibleToken), + MetadataViews: addressOfContract(ContractNameMetadataViews), + ViewResolver: addressOfContract(ContractNameViewResolver), + CrossVMMetadataViews: addressOfContract(ContractNameCrossVMMetadataViews), + + EVMContract: addressOfContract(ContractNameEVM), + EVMStorage: addressOfAccount(AccountNameEVMStorage), + + Burner: addressOfContract(ContractNameBurner), + Crypto: addressOfContract(ContractNameCrypto), + + Migration: addressOfContract(ContractNameMigration), + } + + return contracts + } + + getServiceEventsForChain := func(chainID flow.ChainID) *ServiceEvents { + + event := func(contractName, eventName string) ServiceEvent { + addressFunc, ok := contractAddressFunc[contractName] + if !ok { + // this is a panic, since it can only happen if the code is wrong + panic(fmt.Sprintf("unknown system contract name: %s", contractName)) + } + + return ServiceEvent{ + Address: addressFunc(chainID), + ContractName: contractName, + Name: eventName, + } + } + + events := &ServiceEvents{ + EpochSetup: event(ContractNameEpoch, EventNameEpochSetup), + EpochCommit: event(ContractNameEpoch, EventNameEpochCommit), + EpochRecover: event(ContractNameEpoch, EventNameEpochRecover), + VersionBeacon: event(ContractNameNodeVersionBeacon, EventNameVersionBeacon), + ProtocolStateVersionUpgrade: event(ContractNameNodeVersionBeacon, EventNameProtocolStateVersionUpgrade), + } + return events + } + + // pre-populate the system contracts and service events for all chains for fast access + for _, chain := range flow.AllChainIDs() { + serviceEventsForChain[chain] = getServiceEventsForChain(chain) + systemContractsForChain[chain] = getSystemContractsForChain(chain) + } } diff --git a/fvm/systemcontracts/system_contracts_test.go b/fvm/systemcontracts/system_contracts_test.go index bae3308aac0..0c42b9b6508 100644 --- a/fvm/systemcontracts/system_contracts_test.go +++ b/fvm/systemcontracts/system_contracts_test.go @@ -13,18 +13,10 @@ import ( // TestSystemContract_Address tests that we can retrieve a canonical address // for all accepted chains and contracts. func TestSystemContracts(t *testing.T) { - chains := []flow.ChainID{ - flow.Mainnet, - flow.Testnet, - flow.Sandboxnet, - flow.Benchnet, - flow.Localnet, - flow.Emulator, - } + chains := flow.AllChainIDs() for _, chain := range chains { - _, err := SystemContractsForChain(chain) - require.NoError(t, err) + require.NotPanics(t, func() { SystemContractsForChain(chain) }) checkSystemContracts(t, chain) } } @@ -34,47 +26,30 @@ func TestSystemContracts(t *testing.T) { func TestSystemContract_InvalidChainID(t *testing.T) { invalidChain := flow.ChainID("invalid-chain") - _, err := SystemContractsForChain(invalidChain) - assert.Error(t, err) + require.Panics(t, func() { SystemContractsForChain(invalidChain) }) } // TestServiceEvents tests that we can retrieve service events for all accepted // chains and contracts. func TestServiceEvents(t *testing.T) { - chains := []flow.ChainID{ - flow.Mainnet, - flow.Testnet, - flow.Sandboxnet, - flow.Benchnet, - flow.Localnet, - flow.Emulator, - } + chains := flow.AllChainIDs() for _, chain := range chains { - _, err := ServiceEventsForChain(chain) + require.NotPanics(t, func() { ServiceEventsForChain(chain) }) checkServiceEvents(t, chain) - require.NoError(t, err) } } // TestServiceEventLookup_Consistency sanity checks consistency of the lookup // method, in case an update to ServiceEvents forgets to update the lookup. func TestServiceEventAll_Consistency(t *testing.T) { - chains := []flow.ChainID{ - flow.Mainnet, - flow.Testnet, - flow.Sandboxnet, - flow.Benchnet, - flow.Localnet, - flow.Emulator, - } + chains := flow.AllChainIDs() fields := reflect.TypeOf(ServiceEvents{}).NumField() for _, chain := range chains { - events, err := ServiceEventsForChain(chain) - require.NoError(t, err) + events := ServiceEventsForChain(chain) - // ensure all events are returns + // ensure all events are present all := events.All() assert.Equal(t, fields, len(all)) } @@ -85,39 +60,42 @@ func TestServiceEventAll_Consistency(t *testing.T) { func TestServiceEvents_InvalidChainID(t *testing.T) { invalidChain := flow.ChainID("invalid-chain") - _, err := ServiceEventsForChain(invalidChain) - assert.Error(t, err) + require.Panics(t, func() { ServiceEventsForChain(invalidChain) }) } func checkSystemContracts(t *testing.T, chainID flow.ChainID) { - contracts, err := SystemContractsForChain(chainID) - require.NoError(t, err) + contracts := SystemContractsForChain(chainID) - addresses, ok := contractAddressesByChainID[chainID] - require.True(t, ok, "missing chain %s", chainID.String()) + address := func(name string) flow.Address { + f, ok := contractAddressFunc[name] + require.True(t, ok, "missing contract %s for chain %s", name, chainID.String()) + return f(chainID) + } // entries may not be empty - assert.NotEqual(t, flow.EmptyAddress, addresses[ContractNameEpoch]) - assert.NotEqual(t, flow.EmptyAddress, addresses[ContractNameClusterQC]) - assert.NotEqual(t, flow.EmptyAddress, addresses[ContractNameDKG]) - assert.NotEqual(t, flow.EmptyAddress, addresses[ContractNameNodeVersionBeacon]) + assert.NotEqual(t, flow.EmptyAddress, address(ContractNameEpoch)) + assert.NotEqual(t, flow.EmptyAddress, address(ContractNameClusterQC)) + assert.NotEqual(t, flow.EmptyAddress, address(ContractNameDKG)) + assert.NotEqual(t, flow.EmptyAddress, address(ContractNameNodeVersionBeacon)) // entries must match internal mapping - assert.Equal(t, addresses[ContractNameEpoch], contracts.Epoch.Address) - assert.Equal(t, addresses[ContractNameClusterQC], contracts.ClusterQC.Address) - assert.Equal(t, addresses[ContractNameDKG], contracts.DKG.Address) - assert.Equal(t, addresses[ContractNameNodeVersionBeacon], contracts.NodeVersionBeacon.Address) + assert.Equal(t, address(ContractNameEpoch), contracts.Epoch.Address) + assert.Equal(t, address(ContractNameClusterQC), contracts.ClusterQC.Address) + assert.Equal(t, address(ContractNameDKG), contracts.DKG.Address) + assert.Equal(t, address(ContractNameNodeVersionBeacon), contracts.NodeVersionBeacon.Address) } func checkServiceEvents(t *testing.T, chainID flow.ChainID) { - events, err := ServiceEventsForChain(chainID) - require.NoError(t, err) + events := ServiceEventsForChain(chainID) - addresses, ok := contractAddressesByChainID[chainID] - require.True(t, ok, "missing chain %w", chainID.String()) + address := func(name string) flow.Address { + f, ok := contractAddressFunc[name] + require.True(t, ok, "missing contract %s for chain %s", name, chainID.String()) + return f(chainID) + } - epochContractAddr := addresses[ContractNameEpoch] - versionContractAddr := addresses[ContractNameNodeVersionBeacon] + epochContractAddr := address(ContractNameEpoch) + versionContractAddr := address(ContractNameNodeVersionBeacon) // entries may not be empty assert.NotEqual(t, flow.EmptyAddress, epochContractAddr) assert.NotEqual(t, flow.EmptyAddress, versionContractAddr) @@ -126,4 +104,5 @@ func checkServiceEvents(t *testing.T, chainID flow.ChainID) { assert.Equal(t, epochContractAddr, events.EpochSetup.Address) assert.Equal(t, epochContractAddr, events.EpochCommit.Address) assert.Equal(t, versionContractAddr, events.VersionBeacon.Address) + assert.Equal(t, versionContractAddr, events.ProtocolStateVersionUpgrade.Address) } diff --git a/fvm/transactionInvoker.go b/fvm/transactionInvoker.go index 2e46664f13f..db78e86aa80 100644 --- a/fvm/transactionInvoker.go +++ b/fvm/transactionInvoker.go @@ -4,14 +4,15 @@ import ( "fmt" "strconv" + "github.com/onflow/cadence/common" "github.com/onflow/cadence/runtime" - "github.com/onflow/cadence/runtime/common" "github.com/rs/zerolog" "go.opentelemetry.io/otel/attribute" otelTrace "go.opentelemetry.io/otel/trace" "github.com/onflow/flow-go/fvm/environment" "github.com/onflow/flow-go/fvm/errors" + "github.com/onflow/flow-go/fvm/evm" reusableRuntime "github.com/onflow/flow-go/fvm/runtime" "github.com/onflow/flow-go/fvm/storage" "github.com/onflow/flow-go/fvm/storage/derived" @@ -64,6 +65,11 @@ type transactionExecutor struct { startedTransactionBodyExecution bool nestedTxnId state.NestedTransactionId + // the state reads needed to compute the metering parameters + // this is used to invalidate the metering parameters if a transaction + // writes to any of those registers + executionStateRead *snapshot.ExecutionSnapshot + cadenceRuntime *reusableRuntime.ReusableCadenceRuntime txnBodyExecutor runtime.Executor @@ -180,16 +186,41 @@ func (executor *transactionExecutor) preprocess() error { // infrequently modified and are expensive to compute. For now this includes // reading meter parameter overrides and parsing programs. func (executor *transactionExecutor) preprocessTransactionBody() error { - meterParams, err := getBodyMeterParameters( + chainID := executor.ctx.Chain.ChainID() + + // setup EVM + if executor.ctx.EVMEnabled { + err := evm.SetupEnvironment( + chainID, + executor.env, + executor.cadenceRuntime.TxRuntimeEnv, + ) + if err != nil { + return err + } + } + + // get meter parameters + executionParameters, executionStateRead, err := getExecutionParameters( + executor.env.Logger(), executor.ctx, executor.proc, executor.txnState) if err != nil { - return fmt.Errorf("error gettng meter parameters: %w", err) + return fmt.Errorf("error getting execution parameters: %w", err) + } + + if len(executionStateRead.WriteSet) != 0 { + // this should never happen + // and indicates an implementation error + panic("getting execution parameters should not write to registers") } + // we need to save the execution state read for invalidation purposes + executor.executionStateRead = executionStateRead + txnId, err := executor.txnState.BeginNestedTransactionWithMeterParams( - meterParams) + executionParameters) if err != nil { return err } @@ -224,6 +255,20 @@ func (executor *transactionExecutor) execute() error { } func (executor *transactionExecutor) ExecuteTransactionBody() error { + chainID := executor.ctx.Chain.ChainID() + + // setup EVM + if executor.ctx.EVMEnabled { + err := evm.SetupEnvironment( + chainID, + executor.env, + executor.cadenceRuntime.TxRuntimeEnv, + ) + if err != nil { + return err + } + } + var invalidator derived.TransactionInvalidator if !executor.errs.CollectedError() { @@ -236,7 +281,7 @@ func (executor *transactionExecutor) ExecuteTransactionBody() error { if executor.errs.CollectedError() { invalidator = nil - executor.txnState.RunWithAllLimitsDisabled(executor.errorExecution) + executor.txnState.RunWithMeteringDisabled(executor.errorExecution) if executor.errs.CollectedFailure() { return executor.errs.ErrorOrNil() } @@ -256,7 +301,7 @@ func (executor *transactionExecutor) deductTransactionFees() (err error) { return nil } - computationLimit := uint64(executor.txnState.TotalComputationLimit()) + computationLimit := executor.txnState.TotalComputationLimit() computationUsed, err := executor.env.ComputationUsed() if err != nil { @@ -286,24 +331,25 @@ func (executor *transactionExecutor) deductTransactionFees() (err error) { // logExecutionIntensities logs execution intensities of the transaction func (executor *transactionExecutor) logExecutionIntensities() { - if !executor.env.Logger().Debug().Enabled() { + log := executor.env.Logger() + if !log.Debug().Enabled() { return } computation := zerolog.Dict() - for s, u := range executor.txnState.ComputationIntensities() { - computation.Uint(strconv.FormatUint(uint64(s), 10), u) + for kind, intensity := range executor.txnState.ComputationIntensities() { + computation.Uint64(strconv.FormatUint(uint64(kind), 10), intensity) } memory := zerolog.Dict() - for s, u := range executor.txnState.MemoryIntensities() { - memory.Uint(strconv.FormatUint(uint64(s), 10), u) + for kind, amount := range executor.txnState.MemoryAmounts() { + memory.Uint64(strconv.FormatUint(uint64(kind), 10), amount) } - executor.env.Logger().Debug(). + log.Debug(). Uint64("ledgerInteractionUsed", executor.txnState.InteractionUsed()). Uint64("computationUsed", executor.txnState.TotalComputationUsed()). Uint64("memoryEstimate", executor.txnState.TotalMemoryEstimate()). Dict("computationIntensities", computation). - Dict("memoryIntensities", memory). + Dict("memoryAmounts", memory). Msg("transaction execution data") } @@ -314,7 +360,7 @@ func (executor *transactionExecutor) normalExecution() ( var maxTxFees uint64 // run with limits disabled since this is a static cost check // and should be accounted for in the inclusion cost. - executor.txnState.RunWithAllLimitsDisabled(func() { + executor.txnState.RunWithMeteringDisabled(func() { maxTxFees, err = executor.CheckPayerBalanceAndReturnMaxFees( executor.proc, executor.txnState, @@ -357,31 +403,28 @@ func (executor *transactionExecutor) normalExecution() ( invalidator = environment.NewDerivedDataInvalidator( contractUpdates, - executor.ctx.Chain.ServiceAddress(), - bodySnapshot) + bodySnapshot, + executor.executionStateRead, + ) // Check if all account storage limits are ok // - // disable the computation/memory limit checks on storage checks, - // so we don't error from computation/memory limits on this part. - // // The storage limit check is performed for all accounts that were touched during the transaction. // The storage capacity of an account depends on its balance and should be higher than the accounts storage used. // The payer account is special cased in this check and its balance is considered max_fees lower than its // actual balance, for the purpose of calculating storage capacity, because the payer will have to pay for this tx. - executor.txnState.RunWithAllLimitsDisabled(func() { - err = executor.CheckStorageLimits( - executor.env, - bodySnapshot, - executor.proc.Transaction.Payer, - maxTxFees) - }) + err = executor.CheckStorageLimits( + executor.ctx, + executor.env, + bodySnapshot, + executor.proc.Transaction.Payer, + maxTxFees) if err != nil { return } - executor.txnState.RunWithAllLimitsDisabled(func() { + executor.txnState.RunWithMeteringDisabled(func() { err = executor.deductTransactionFees() }) @@ -391,7 +434,8 @@ func (executor *transactionExecutor) normalExecution() ( // Clear changes and try to deduct fees again. func (executor *transactionExecutor) errorExecution() { // log transaction as failed - executor.env.Logger().Info(). + log := executor.env.Logger() + log.Info(). Err(executor.errs.ErrorOrNil()). Msg("transaction executed with error") @@ -408,7 +452,7 @@ func (executor *transactionExecutor) errorExecution() { // if fee deduction fails just do clean up and exit if feesError != nil { - executor.env.Logger().Info(). + log.Info(). Err(feesError). Msg("transaction fee deduction executed with error") diff --git a/fvm/transactionInvoker_test.go b/fvm/transactionInvoker_test.go index f58c609f130..78feb7f3ce4 100644 --- a/fvm/transactionInvoker_test.go +++ b/fvm/transactionInvoker_test.go @@ -5,10 +5,10 @@ import ( "testing" "github.com/onflow/cadence" + "github.com/onflow/cadence/common" + "github.com/onflow/cadence/interpreter" "github.com/onflow/cadence/runtime" - "github.com/onflow/cadence/runtime/common" - "github.com/onflow/cadence/runtime/interpreter" - "github.com/onflow/cadence/runtime/sema" + "github.com/onflow/cadence/sema" "github.com/rs/zerolog" "github.com/stretchr/testify/require" @@ -36,7 +36,7 @@ func TestSafetyCheck(t *testing.T) { executor := proc.NewExecutor(context, txnState) err := fvm.Run(executor) - require.Nil(t, err) + require.NoError(t, err) require.Error(t, executor.Output().Err) require.NotContains(t, buffer.String(), "programs") @@ -62,7 +62,7 @@ func TestSafetyCheck(t *testing.T) { executor := proc.NewExecutor(context, txnState) err := fvm.Run(executor) - require.Nil(t, err) + require.NoError(t, err) require.Error(t, executor.Output().Err) require.NotContains(t, buffer.String(), "programs") @@ -134,10 +134,6 @@ func (e *ErrorReturningRuntime) ReadStored(_ common.Address, _ cadence.Path, _ r return nil, nil } -func (e *ErrorReturningRuntime) ReadLinked(_ common.Address, _ cadence.Path, _ runtime.Context) (cadence.Value, error) { - panic("ReadLinked not expected") -} - func (e *ErrorReturningRuntime) InvokeContractFunction(_ common.AddressLocation, _ string, _ []cadence.Value, _ []sema.Type, _ runtime.Context) (cadence.Value, error) { panic("InvokeContractFunction not expected") } diff --git a/fvm/transactionPayerBalanceChecker.go b/fvm/transactionPayerBalanceChecker.go index 96618582863..5f3bbf067eb 100644 --- a/fvm/transactionPayerBalanceChecker.go +++ b/fvm/transactionPayerBalanceChecker.go @@ -12,6 +12,52 @@ import ( type TransactionPayerBalanceChecker struct{} +const VerifyPayerBalanceResultTypeCanExecuteTransactionFieldName = "canExecuteTransaction" +const VerifyPayerBalanceResultTypeRequiredBalanceFieldName = "requiredBalance" +const VerifyPayerBalanceResultTypeMaximumTransactionFeesFieldName = "maximumTransactionFees" + +// DecodeVerifyPayerBalanceResult decodes the VerifyPayerBalanceResult struct +// https://github.com/onflow/flow-core-contracts/blob/7c70c6a1d33c2879b60c78e363fa68fc6fce13b9/contracts/FlowFees.cdc#L75 +func DecodeVerifyPayerBalanceResult(resultValue cadence.Value) ( + canExecuteTransaction cadence.Bool, + requiredBalance cadence.UFix64, + maximumTransactionFees cadence.UFix64, + err error, +) { + result, ok := resultValue.(cadence.Struct) + if !ok { + return false, 0, 0, fmt.Errorf("invalid VerifyPayerBalanceResult value: not a struct") + } + + fields := cadence.FieldsMappedByName(result) + + canExecuteTransaction, ok = fields[VerifyPayerBalanceResultTypeCanExecuteTransactionFieldName].(cadence.Bool) + if !ok { + return false, 0, 0, fmt.Errorf( + "invalid VerifyPayerBalanceResult field: %s", + VerifyPayerBalanceResultTypeCanExecuteTransactionFieldName, + ) + } + + requiredBalance, ok = fields[VerifyPayerBalanceResultTypeRequiredBalanceFieldName].(cadence.UFix64) + if !ok { + return false, 0, 0, fmt.Errorf( + "invalid VerifyPayerBalanceResult field: %s", + VerifyPayerBalanceResultTypeRequiredBalanceFieldName, + ) + } + + maximumTransactionFees, ok = fields[VerifyPayerBalanceResultTypeMaximumTransactionFeesFieldName].(cadence.UFix64) + if !ok { + return false, 0, 0, fmt.Errorf( + "invalid VerifyPayerBalanceResult field: %s", + VerifyPayerBalanceResultTypeMaximumTransactionFeesFieldName, + ) + } + + return canExecuteTransaction, requiredBalance, maximumTransactionFees, nil +} + func (_ TransactionPayerBalanceChecker) CheckPayerBalanceAndReturnMaxFees( proc *TransactionProcedure, txnState storage.TransactionPreparer, @@ -27,7 +73,7 @@ func (_ TransactionPayerBalanceChecker) CheckPayerBalanceAndReturnMaxFees( var resultValue cadence.Value var err error - txnState.RunWithAllLimitsDisabled(func() { + txnState.RunWithMeteringDisabled(func() { // Don't meter the payer balance check. // It has a static cost, and its cost should be part of the inclusion fees, not part of execution fees. resultValue, err = env.CheckPayerBalanceAndGetMaxTxFees( @@ -40,21 +86,14 @@ func (_ TransactionPayerBalanceChecker) CheckPayerBalanceAndReturnMaxFees( return 0, errors.NewPayerBalanceCheckFailure(proc.Transaction.Payer, err) } - // parse expected result from the Cadence runtime - // https://github.com/onflow/flow-core-contracts/blob/7c70c6a1d33c2879b60c78e363fa68fc6fce13b9/contracts/FlowFees.cdc#L75 - result, ok := resultValue.(cadence.Struct) - if ok && len(result.Fields) == 3 { - payerCanPay, okBool := result.Fields[0].(cadence.Bool) - requiredBalance, okBalance := result.Fields[1].(cadence.UFix64) - maxFees, okFees := result.Fields[2].(cadence.UFix64) + payerCanPay, requiredBalance, maxFees, err := DecodeVerifyPayerBalanceResult(resultValue) + if err != nil { + return 0, errors.NewPayerBalanceCheckFailure(proc.Transaction.Payer, err) + } - if okBool && okBalance && okFees { - if !payerCanPay { - return 0, errors.NewInsufficientPayerBalanceError(proc.Transaction.Payer, requiredBalance) - } - return uint64(maxFees), nil - } + if !payerCanPay { + return 0, errors.NewInsufficientPayerBalanceError(proc.Transaction.Payer, requiredBalance) } - return 0, errors.NewPayerBalanceCheckFailure(proc.Transaction.Payer, fmt.Errorf("invalid result type")) + return uint64(maxFees), nil } diff --git a/fvm/transactionPayerBalanceChecker_test.go b/fvm/transactionPayerBalanceChecker_test.go index 931f2984bd1..2dc3ca677b5 100644 --- a/fvm/transactionPayerBalanceChecker_test.go +++ b/fvm/transactionPayerBalanceChecker_test.go @@ -16,6 +16,28 @@ import ( "github.com/onflow/flow-go/model/flow" ) +var verifyPayerBalanceResultType = cadence.NewStructType( + // TODO: location + nil, + // TODO: qualified identifier + "", + []cadence.Field{ + { + Identifier: fvm.VerifyPayerBalanceResultTypeCanExecuteTransactionFieldName, + Type: cadence.BoolType, + }, + { + Identifier: fvm.VerifyPayerBalanceResultTypeRequiredBalanceFieldName, + Type: cadence.UFix64Type, + }, + { + Identifier: fvm.VerifyPayerBalanceResultTypeMaximumTransactionFeesFieldName, + Type: cadence.UFix64Type, + }, + }, + nil, +) + func TestTransactionPayerBalanceChecker(t *testing.T) { payer := flow.HexToAddress("1") t.Run("TransactionFeesEnabled == false disables the balance check", func(t *testing.T) { @@ -52,7 +74,7 @@ func TestTransactionPayerBalanceChecker(t *testing.T) { d := fvm.TransactionPayerBalanceChecker{} maxFees, err := d.CheckPayerBalanceAndReturnMaxFees(proc, txnState, env) require.Error(t, err) - require.True(t, errors.HasErrorCode(err, errors.FailureCodePayerBalanceCheckFailure)) + require.True(t, errors.HasFailureCode(err, errors.FailureCodePayerBalanceCheckFailure)) require.ErrorIs(t, err, someError) require.Equal(t, uint64(0), maxFees) }) @@ -73,22 +95,26 @@ func TestTransactionPayerBalanceChecker(t *testing.T) { d := fvm.TransactionPayerBalanceChecker{} maxFees, err := d.CheckPayerBalanceAndReturnMaxFees(proc, txnState, env) require.Error(t, err) - require.True(t, errors.HasErrorCode(err, errors.FailureCodePayerBalanceCheckFailure)) + require.True(t, errors.HasFailureCode(err, errors.FailureCodePayerBalanceCheckFailure)) require.Equal(t, uint64(0), maxFees) }) t.Run("if payer can pay return max fees", func(t *testing.T) { env := &fvmmock.Environment{} env.On("TransactionFeesEnabled").Return(true) - env.On("CheckPayerBalanceAndGetMaxTxFees", mock.Anything, mock.Anything, mock.Anything).Return( - cadence.Struct{ - Fields: []cadence.Value{ - cadence.NewBool(true), - cadence.UFix64(100), - cadence.UFix64(100), - }, - }, - nil) + env.On( + "CheckPayerBalanceAndGetMaxTxFees", + mock.Anything, + mock.Anything, + mock.Anything, + ).Return( + cadence.NewStruct([]cadence.Value{ + cadence.NewBool(true), + cadence.UFix64(100), + cadence.UFix64(100), + }).WithType(verifyPayerBalanceResultType), + nil, + ) proc := &fvm.TransactionProcedure{} proc.Transaction = &flow.TransactionBody{} @@ -105,15 +131,19 @@ func TestTransactionPayerBalanceChecker(t *testing.T) { t.Run("if payer cannot pay return insufficient balance error", func(t *testing.T) { env := &fvmmock.Environment{} env.On("TransactionFeesEnabled").Return(true) - env.On("CheckPayerBalanceAndGetMaxTxFees", mock.Anything, mock.Anything, mock.Anything).Return( - cadence.Struct{ - Fields: []cadence.Value{ - cadence.NewBool(false), - cadence.UFix64(100), - cadence.UFix64(101), - }, - }, - nil) + env.On( + "CheckPayerBalanceAndGetMaxTxFees", + mock.Anything, + mock.Anything, + mock.Anything, + ).Return( + cadence.NewStruct([]cadence.Value{ + cadence.NewBool(false), + cadence.UFix64(100), + cadence.UFix64(101), + }).WithType(verifyPayerBalanceResultType), + nil, + ) proc := &fvm.TransactionProcedure{} proc.Transaction = &flow.TransactionBody{} diff --git a/fvm/transactionSequenceNum.go b/fvm/transactionSequenceNum.go index 81b77e4868f..bb750d13876 100644 --- a/fvm/transactionSequenceNum.go +++ b/fvm/transactionSequenceNum.go @@ -7,7 +7,6 @@ import ( "github.com/onflow/flow-go/fvm/errors" "github.com/onflow/flow-go/fvm/storage" "github.com/onflow/flow-go/fvm/tracing" - "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/module/trace" ) @@ -20,7 +19,7 @@ func (c TransactionSequenceNumberChecker) CheckAndIncrementSequenceNumber( ) error { // TODO(Janez): verification is part of inclusion fees, not execution fees. var err error - txnState.RunWithAllLimitsDisabled(func() { + txnState.RunWithMeteringDisabled(func() { err = c.checkAndIncrementSequenceNumber(tracer, proc, txnState) }) @@ -54,29 +53,30 @@ func (c TransactionSequenceNumberChecker) checkAndIncrementSequenceNumber( accounts := environment.NewAccounts(txnState) proposalKey := proc.Transaction.ProposalKey - var accountKey flow.AccountPublicKey - - accountKey, err = accounts.GetPublicKey(proposalKey.Address, proposalKey.KeyIndex) + revoked, err := accounts.GetAccountPublicKeyRevokedStatus(proposalKey.Address, proposalKey.KeyIndex) if err != nil { return errors.NewInvalidProposalSignatureError(proposalKey, err) } - if accountKey.Revoked { + if revoked { return errors.NewInvalidProposalSignatureError( proposalKey, fmt.Errorf("proposal key has been revoked")) } + seqNumber, err := accounts.GetAccountPublicKeySequenceNumber(proposalKey.Address, proposalKey.KeyIndex) + if err != nil { + return err + } + // Note that proposal key verification happens at the txVerifier and not here. - valid := accountKey.SeqNumber == proposalKey.SequenceNumber + valid := seqNumber == proposalKey.SequenceNumber if !valid { - return errors.NewInvalidProposalSeqNumberError(proposalKey, accountKey.SeqNumber) + return errors.NewInvalidProposalSeqNumberError(proposalKey, seqNumber) } - accountKey.SeqNumber++ - - _, err = accounts.SetPublicKey(proposalKey.Address, proposalKey.KeyIndex, accountKey) + err = accounts.IncrementAccountPublicKeySequenceNumber(proposalKey.Address, proposalKey.KeyIndex) if err != nil { restartError := txnState.RestartNestedTransaction(nestedTxnId) if restartError != nil { diff --git a/fvm/transactionSequenceNum_test.go b/fvm/transactionSequenceNum_test.go index c711e30d7cc..f56c0511906 100644 --- a/fvm/transactionSequenceNum_test.go +++ b/fvm/transactionSequenceNum_test.go @@ -26,8 +26,13 @@ func TestTransactionSequenceNumProcess(t *testing.T) { err = accounts.Create([]flow.AccountPublicKey{privKey.PublicKey(1000)}, address) require.NoError(t, err) - tx := flow.TransactionBody{} - tx.SetProposalKey(address, 0, 0) + tx := flow.TransactionBody{ + ProposalKey: flow.ProposalKey{ + Address: address, + KeyIndex: 0, + SequenceNumber: 0, + }, + } proc := fvm.Transaction(&tx, 0) seqChecker := fvm.TransactionSequenceNumberChecker{} @@ -38,9 +43,9 @@ func TestTransactionSequenceNumProcess(t *testing.T) { require.NoError(t, err) // get fetch the sequence number and it should be updated - key, err := accounts.GetPublicKey(address, 0) + seqNumber, err := accounts.GetAccountPublicKeySequenceNumber(address, 0) require.NoError(t, err) - require.Equal(t, key.SeqNumber, uint64(1)) + require.Equal(t, seqNumber, uint64(1)) }) t.Run("invalid sequence number", func(t *testing.T) { txnState := testutils.NewSimpleTransaction(nil) @@ -53,9 +58,14 @@ func TestTransactionSequenceNumProcess(t *testing.T) { err = accounts.Create([]flow.AccountPublicKey{privKey.PublicKey(1000)}, address) require.NoError(t, err) - tx := flow.TransactionBody{} - // invalid sequence number is 2 - tx.SetProposalKey(address, 0, 2) + tx := flow.TransactionBody{ + // invalid sequence number is 2 + ProposalKey: flow.ProposalKey{ + Address: address, + KeyIndex: 0, + SequenceNumber: 2, + }, + } proc := fvm.Transaction(&tx, 0) seqChecker := fvm.TransactionSequenceNumberChecker{} @@ -67,9 +77,9 @@ func TestTransactionSequenceNumProcess(t *testing.T) { require.True(t, errors.HasErrorCode(err, errors.ErrCodeInvalidProposalSeqNumberError)) // get fetch the sequence number and check it to be unchanged - key, err := accounts.GetPublicKey(address, 0) + seqNumber, err := accounts.GetAccountPublicKeySequenceNumber(address, 0) require.NoError(t, err) - require.Equal(t, key.SeqNumber, uint64(0)) + require.Equal(t, seqNumber, uint64(0)) }) t.Run("invalid address", func(t *testing.T) { txnState := testutils.NewSimpleTransaction(nil) @@ -82,9 +92,14 @@ func TestTransactionSequenceNumProcess(t *testing.T) { err = accounts.Create([]flow.AccountPublicKey{privKey.PublicKey(1000)}, address) require.NoError(t, err) - tx := flow.TransactionBody{} // wrong address - tx.SetProposalKey(flow.HexToAddress("2222"), 0, 0) + tx := flow.TransactionBody{ + ProposalKey: flow.ProposalKey{ + Address: flow.HexToAddress("2222"), + KeyIndex: 0, + SequenceNumber: 0, + }, + } proc := fvm.Transaction(&tx, 0) seqChecker := &fvm.TransactionSequenceNumberChecker{} @@ -95,8 +110,8 @@ func TestTransactionSequenceNumProcess(t *testing.T) { require.Error(t, err) // get fetch the sequence number and check it to be unchanged - key, err := accounts.GetPublicKey(address, 0) + seqNumber, err := accounts.GetAccountPublicKeySequenceNumber(address, 0) require.NoError(t, err) - require.Equal(t, key.SeqNumber, uint64(0)) + require.Equal(t, seqNumber, uint64(0)) }) } diff --git a/fvm/transactionStorageLimiter.go b/fvm/transactionStorageLimiter.go index 9d504adf7bf..37ebf12ddee 100644 --- a/fvm/transactionStorageLimiter.go +++ b/fvm/transactionStorageLimiter.go @@ -5,12 +5,13 @@ import ( "fmt" "github.com/onflow/cadence" - "github.com/onflow/cadence/runtime/common" + "github.com/onflow/cadence/common" "golang.org/x/exp/slices" "github.com/onflow/flow-go/fvm/environment" "github.com/onflow/flow-go/fvm/errors" "github.com/onflow/flow-go/fvm/storage/snapshot" + "github.com/onflow/flow-go/fvm/systemcontracts" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/module/trace" ) @@ -33,6 +34,7 @@ type TransactionStorageLimiter struct{} // The payers balance is considered to be maxTxFees lower that its actual balance, due to the fact that // the fee deduction step happens after the storage limit check. func (limiter TransactionStorageLimiter) CheckStorageLimits( + ctx Context, env environment.Environment, snapshot *snapshot.ExecutionSnapshot, payer flow.Address, @@ -44,7 +46,7 @@ func (limiter TransactionStorageLimiter) CheckStorageLimits( defer env.StartChildSpan(trace.FVMTransactionStorageUsedCheck).End() - err := limiter.checkStorageLimits(env, snapshot, payer, maxTxFees) + err := limiter.checkStorageLimits(ctx, env, snapshot, payer, maxTxFees) if err != nil { return fmt.Errorf("storage limit check failed: %w", err) } @@ -55,6 +57,7 @@ func (limiter TransactionStorageLimiter) CheckStorageLimits( // storage limit is exceeded. The returned list include addresses of updated // registers (and the payer's address). func (limiter TransactionStorageLimiter) getStorageCheckAddresses( + ctx Context, snapshot *snapshot.ExecutionSnapshot, payer flow.Address, maxTxFees uint64, @@ -71,12 +74,17 @@ func (limiter TransactionStorageLimiter) getStorageCheckAddresses( addresses = append(addresses, payer) } + sc := systemcontracts.SystemContractsForChain(ctx.Chain.ChainID()) for id := range snapshot.WriteSet { address, ok := addressFromRegisterId(id) if !ok { continue } + if limiter.shouldSkipSpecialAddress(ctx, address, sc) { + continue + } + _, ok = dedup[address] if ok { continue @@ -88,10 +96,10 @@ func (limiter TransactionStorageLimiter) getStorageCheckAddresses( slices.SortFunc( addresses, - func(a flow.Address, b flow.Address) bool { + func(a flow.Address, b flow.Address) int { // reverse order to maintain compatibility with previous // implementation. - return bytes.Compare(a[:], b[:]) >= 0 + return bytes.Compare(b[:], a[:]) }) return addresses } @@ -99,12 +107,13 @@ func (limiter TransactionStorageLimiter) getStorageCheckAddresses( // checkStorageLimits checks if the transaction changed the storage of any // address and exceeded the storage limit. func (limiter TransactionStorageLimiter) checkStorageLimits( + ctx Context, env environment.Environment, snapshot *snapshot.ExecutionSnapshot, payer flow.Address, maxTxFees uint64, ) error { - addresses := limiter.getStorageCheckAddresses(snapshot, payer, maxTxFees) + addresses := limiter.getStorageCheckAddresses(ctx, snapshot, payer, maxTxFees) usages := make([]uint64, len(addresses)) @@ -155,3 +164,18 @@ func (limiter TransactionStorageLimiter) checkStorageLimits( return nil } + +// shouldSkipSpecialAddress returns true if the address is a special address where storage +// limits are not enforced. +// This is currently only the EVM storage address. This is a temporary solution. +func (limiter TransactionStorageLimiter) shouldSkipSpecialAddress( + ctx Context, + address flow.Address, + sc *systemcontracts.SystemContracts, +) bool { + if !ctx.EVMEnabled { + return false + } + + return sc.EVMStorage.Address == address +} diff --git a/fvm/transactionStorageLimiter_test.go b/fvm/transactionStorageLimiter_test.go index b9b2a87ec3a..aa97fd2b4cd 100644 --- a/fvm/transactionStorageLimiter_test.go +++ b/fvm/transactionStorageLimiter_test.go @@ -8,9 +8,11 @@ import ( "github.com/stretchr/testify/require" "github.com/onflow/flow-go/fvm" + "github.com/onflow/flow-go/fvm/environment" fvmmock "github.com/onflow/flow-go/fvm/environment/mock" "github.com/onflow/flow-go/fvm/errors" "github.com/onflow/flow-go/fvm/storage/snapshot" + "github.com/onflow/flow-go/fvm/systemcontracts" "github.com/onflow/flow-go/fvm/tracing" "github.com/onflow/flow-go/model/flow" ) @@ -19,15 +21,19 @@ func TestTransactionStorageLimiter(t *testing.T) { owner := flow.HexToAddress("1") executionSnapshot := &snapshot.ExecutionSnapshot{ WriteSet: map[flow.RegisterID]flow.RegisterValue{ - flow.NewRegisterID(string(owner[:]), "a"): flow.RegisterValue("foo"), - flow.NewRegisterID(string(owner[:]), "b"): flow.RegisterValue("bar"), + flow.NewRegisterID(owner, "a"): flow.RegisterValue("foo"), + flow.NewRegisterID(owner, "b"): flow.RegisterValue("bar"), + }, + } + + ctx := fvm.Context{ + EnvironmentParams: environment.EnvironmentParams{ + Chain: flow.Emulator.Chain(), }, } t.Run("capacity > storage -> OK", func(t *testing.T) { - chain := flow.Mainnet.Chain() env := &fvmmock.Environment{} - env.On("Chain").Return(chain) env.On("LimitAccountStorage").Return(true) env.On("StartChildSpan", mock.Anything).Return( tracing.NewMockTracerSpan()) @@ -40,13 +46,11 @@ func TestTransactionStorageLimiter(t *testing.T) { ) d := &fvm.TransactionStorageLimiter{} - err := d.CheckStorageLimits(env, executionSnapshot, flow.EmptyAddress, 0) + err := d.CheckStorageLimits(ctx, env, executionSnapshot, flow.EmptyAddress, 0) require.NoError(t, err, "Transaction with higher capacity than storage used should work") }) t.Run("capacity = storage -> OK", func(t *testing.T) { - chain := flow.Mainnet.Chain() env := &fvmmock.Environment{} - env.On("Chain").Return(chain) env.On("LimitAccountStorage").Return(true) env.On("StartChildSpan", mock.Anything).Return( tracing.NewMockTracerSpan()) @@ -59,13 +63,11 @@ func TestTransactionStorageLimiter(t *testing.T) { ) d := &fvm.TransactionStorageLimiter{} - err := d.CheckStorageLimits(env, executionSnapshot, flow.EmptyAddress, 0) + err := d.CheckStorageLimits(ctx, env, executionSnapshot, flow.EmptyAddress, 0) require.NoError(t, err, "Transaction with equal capacity than storage used should work") }) t.Run("capacity = storage -> OK (dedup payer)", func(t *testing.T) { - chain := flow.Mainnet.Chain() env := &fvmmock.Environment{} - env.On("Chain").Return(chain) env.On("LimitAccountStorage").Return(true) env.On("StartChildSpan", mock.Anything).Return( tracing.NewMockTracerSpan()) @@ -78,13 +80,11 @@ func TestTransactionStorageLimiter(t *testing.T) { ) d := &fvm.TransactionStorageLimiter{} - err := d.CheckStorageLimits(env, executionSnapshot, owner, 0) + err := d.CheckStorageLimits(ctx, env, executionSnapshot, owner, 0) require.NoError(t, err, "Transaction with equal capacity than storage used should work") }) t.Run("capacity < storage -> Not OK", func(t *testing.T) { - chain := flow.Mainnet.Chain() env := &fvmmock.Environment{} - env.On("Chain").Return(chain) env.On("LimitAccountStorage").Return(true) env.On("StartChildSpan", mock.Anything).Return( tracing.NewMockTracerSpan()) @@ -97,13 +97,11 @@ func TestTransactionStorageLimiter(t *testing.T) { ) d := &fvm.TransactionStorageLimiter{} - err := d.CheckStorageLimits(env, executionSnapshot, flow.EmptyAddress, 0) + err := d.CheckStorageLimits(ctx, env, executionSnapshot, flow.EmptyAddress, 0) require.Error(t, err, "Transaction with lower capacity than storage used should fail") }) t.Run("capacity > storage -> OK (payer not updated)", func(t *testing.T) { - chain := flow.Mainnet.Chain() env := &fvmmock.Environment{} - env.On("Chain").Return(chain) env.On("LimitAccountStorage").Return(true) env.On("StartChildSpan", mock.Anything).Return( tracing.NewMockTracerSpan()) @@ -118,13 +116,11 @@ func TestTransactionStorageLimiter(t *testing.T) { executionSnapshot = &snapshot.ExecutionSnapshot{} d := &fvm.TransactionStorageLimiter{} - err := d.CheckStorageLimits(env, executionSnapshot, owner, 1) + err := d.CheckStorageLimits(ctx, env, executionSnapshot, owner, 1) require.NoError(t, err, "Transaction with higher capacity than storage used should work") }) t.Run("capacity < storage -> Not OK (payer not updated)", func(t *testing.T) { - chain := flow.Mainnet.Chain() env := &fvmmock.Environment{} - env.On("Chain").Return(chain) env.On("LimitAccountStorage").Return(true) env.On("StartChildSpan", mock.Anything).Return( tracing.NewMockTracerSpan()) @@ -139,13 +135,11 @@ func TestTransactionStorageLimiter(t *testing.T) { executionSnapshot = &snapshot.ExecutionSnapshot{} d := &fvm.TransactionStorageLimiter{} - err := d.CheckStorageLimits(env, executionSnapshot, owner, 1000) + err := d.CheckStorageLimits(ctx, env, executionSnapshot, owner, 1000) require.Error(t, err, "Transaction with lower capacity than storage used should fail") }) t.Run("if ctx LimitAccountStorage false-> OK", func(t *testing.T) { - chain := flow.Mainnet.Chain() env := &fvmmock.Environment{} - env.On("Chain").Return(chain) env.On("LimitAccountStorage").Return(false) env.On("StartChildSpan", mock.Anything).Return( tracing.NewMockTracerSpan()) @@ -159,27 +153,70 @@ func TestTransactionStorageLimiter(t *testing.T) { ) d := &fvm.TransactionStorageLimiter{} - err := d.CheckStorageLimits(env, executionSnapshot, flow.EmptyAddress, 0) + err := d.CheckStorageLimits(ctx, env, executionSnapshot, flow.EmptyAddress, 0) require.NoError(t, err, "Transaction with higher capacity than storage used should work") }) - t.Run("non existing accounts or any other errors on fetching storage used -> Not OK", func(t *testing.T) { - chain := flow.Mainnet.Chain() + t.Run( + "non existing accounts or any other errors on fetching storage used -> Not OK", + func(t *testing.T) { + env := &fvmmock.Environment{} + env.On("LimitAccountStorage").Return(true) + env.On("StartChildSpan", mock.Anything).Return( + tracing.NewMockTracerSpan()) + env.On("GetStorageUsed", mock.Anything). + Return(uint64(0), errors.NewAccountNotFoundError(owner)) + env.On("AccountsStorageCapacity", mock.Anything, mock.Anything, mock.Anything).Return( + cadence.NewArray([]cadence.Value{ + bytesToUFix64(100), + }), + nil, + ) + + d := &fvm.TransactionStorageLimiter{} + err := d.CheckStorageLimits(ctx, env, executionSnapshot, flow.EmptyAddress, 0) + require.Error( + t, + err, + "check storage used on non existing account (not general registers) should fail", + ) + }, + ) + t.Run("special account is skipped", func(t *testing.T) { + sc := systemcontracts.SystemContractsForChain(ctx.Chain.ChainID()) + evm := sc.EVMStorage.Address + + executionSnapshot := &snapshot.ExecutionSnapshot{ + WriteSet: map[flow.RegisterID]flow.RegisterValue{ + flow.NewRegisterID(evm, "a"): flow.RegisterValue("foo"), + }, + } + env := &fvmmock.Environment{} - env.On("Chain").Return(chain) env.On("LimitAccountStorage").Return(true) env.On("StartChildSpan", mock.Anything).Return( tracing.NewMockTracerSpan()) - env.On("GetStorageUsed", mock.Anything).Return(uint64(0), errors.NewAccountNotFoundError(owner)) - env.On("AccountsStorageCapacity", mock.Anything, mock.Anything, mock.Anything).Return( - cadence.NewArray([]cadence.Value{ - bytesToUFix64(100), - }), - nil, - ) + env.On("GetStorageUsed", mock.Anything). + Return(uint64(0), errors.NewAccountNotFoundError(owner)) + env.On("AccountsStorageCapacity", mock.Anything, mock.Anything, mock.Anything). + Run(func(args mock.Arguments) { + require.Len(t, args.Get(0).([]flow.Address), 0) + }). + Return( + // since the special account is skipped, the resulting array from AccountsStorageCapacity should be empty + cadence.NewArray([]cadence.Value{}), + nil, + ) d := &fvm.TransactionStorageLimiter{} - err := d.CheckStorageLimits(env, executionSnapshot, flow.EmptyAddress, 0) - require.Error(t, err, "check storage used on non existing account (not general registers) should fail") + + // if EVM is disabled don't skip the storage check + err := d.CheckStorageLimits(ctx, env, executionSnapshot, flow.EmptyAddress, 0) + require.Error(t, err) + + // if EVM is enabled skip the storage check + ctx := fvm.NewContextFromParent(ctx, fvm.WithEVMEnabled(true)) + err = d.CheckStorageLimits(ctx, env, executionSnapshot, flow.EmptyAddress, 0) + require.NoError(t, err) }) } diff --git a/fvm/transactionVerifier.go b/fvm/transactionVerifier.go index 67c3b76db5f..c8e1a4f1a3b 100644 --- a/fvm/transactionVerifier.go +++ b/fvm/transactionVerifier.go @@ -17,7 +17,7 @@ import ( ) type signatureType struct { - message []byte + payload []byte errorBuilder func(flow.TransactionSignature, error) errors.CodedError @@ -38,7 +38,7 @@ type signatureContinuation struct { signatureEntry // accountKey is set by getAccountKeys(). - accountKey flow.AccountPublicKey + accountKey flow.RuntimeAccountPublicKey // invokedVerify and verifyErr are set by verifyAccountSignatures(). Note // that verifyAccountSignatures() is always called after getAccountKeys() @@ -66,9 +66,14 @@ func (entry *signatureContinuation) verify() errors.CodedError { entry.invokedVerify = true + valid, message := entry.ValidateExtensionDataAndReconstructMessage(entry.payload) + if !valid { + entry.verifyErr = entry.newError(fmt.Errorf("signature extension data is not valid")) + } + valid, err := crypto.VerifySignatureFromTransaction( entry.Signature, - entry.message, + message, entry.accountKey.PublicKey, entry.accountKey.HashAlgo, ) @@ -124,7 +129,7 @@ func newSignatureEntries( type uniqueKey struct { address flow.Address - index uint64 + index uint32 } duplicate := make(map[uniqueKey]struct{}, numSignatures) @@ -173,7 +178,7 @@ func (v *TransactionVerifier) CheckAuthorization( ) error { // TODO(Janez): verification is part of inclusion fees, not execution fees. var err error - txnState.RunWithAllLimitsDisabled(func() { + txnState.RunWithMeteringDisabled(func() { err = v.verifyTransaction(tracer, proc, txnState, keyWeightThreshold) }) if err != nil { @@ -206,7 +211,8 @@ func (v *TransactionVerifier) verifyTransaction( tx.PayloadSignatures, tx.PayloadMessage(), tx.EnvelopeSignatures, - tx.EnvelopeMessage()) + tx.EnvelopeMessage(), + ) if err != nil { return err } @@ -259,14 +265,14 @@ func (v *TransactionVerifier) verifyTransaction( // getAccountKeys gets the signatures' account keys and populate the account // keys into the signature continuation structs. func (v *TransactionVerifier) getAccountKeys( - txnState storage.TransactionPreparer, + _ storage.TransactionPreparer, accounts environment.Accounts, signatures []*signatureContinuation, proposalKey flow.ProposalKey, ) error { foundProposalSignature := false for _, signature := range signatures { - accountKey, err := accounts.GetPublicKey( + accountKey, err := accounts.GetRuntimeAccountPublicKey( signature.Address, signature.KeyIndex) if err != nil { diff --git a/fvm/transactionVerifier_test.go b/fvm/transactionVerifier_test.go index 3fb0e5d9aa8..7d222bac013 100644 --- a/fvm/transactionVerifier_test.go +++ b/fvm/transactionVerifier_test.go @@ -1,6 +1,7 @@ package fvm_test import ( + "fmt" "testing" "github.com/stretchr/testify/require" @@ -34,8 +35,6 @@ func TestTransactionVerification(t *testing.T) { err = accounts.Create([]flow.AccountPublicKey{privKey2.PublicKey(1000)}, address2) require.NoError(t, err) - tx := &flow.TransactionBody{} - run := func( body *flow.TransactionBody, ctx fvm.Context, @@ -55,10 +54,15 @@ func TestTransactionVerification(t *testing.T) { KeyIndex: 0, } - tx.SetProposalKey(address1, 0, 0) - tx.SetPayer(address1) - - tx.PayloadSignatures = []flow.TransactionSignature{sig, sig} + tx := &flow.TransactionBody{ + ProposalKey: flow.ProposalKey{ + Address: address1, + KeyIndex: 0, + SequenceNumber: 0, + }, + Payer: address1, + PayloadSignatures: []flow.TransactionSignature{sig, sig}, + } ctx := fvm.NewContext( fvm.WithAuthorizationChecksEnabled(true), @@ -79,11 +83,16 @@ func TestTransactionVerification(t *testing.T) { KeyIndex: 0, } - tx.SetProposalKey(address1, 0, 0) - tx.SetPayer(address1) - - tx.PayloadSignatures = []flow.TransactionSignature{sig} - tx.EnvelopeSignatures = []flow.TransactionSignature{sig} + tx := &flow.TransactionBody{ + ProposalKey: flow.ProposalKey{ + Address: address1, + KeyIndex: 0, + SequenceNumber: 0, + }, + Payer: address1, + PayloadSignatures: []flow.TransactionSignature{sig}, + EnvelopeSignatures: []flow.TransactionSignature{sig}, + } ctx := fvm.NewContext( fvm.WithAuthorizationChecksEnabled(true), @@ -98,8 +107,14 @@ func TestTransactionVerification(t *testing.T) { }) t.Run("invalid envelope signature", func(t *testing.T) { - tx.SetProposalKey(address1, 0, 0) - tx.SetPayer(address2) + tx := &flow.TransactionBody{ + ProposalKey: flow.ProposalKey{ + Address: address1, + KeyIndex: 0, + SequenceNumber: 0, + }, + Payer: address2, + } // assign a valid payload signature hasher1, err := crypto.NewPrefixedHashing(privKey1.HashAlgo, flow.TransactionTagString) @@ -135,8 +150,6 @@ func TestTransactionVerification(t *testing.T) { }) t.Run("invalid payload signature", func(t *testing.T) { - tx.SetProposalKey(address1, 0, 0) - tx.SetPayer(address2) sig1 := flow.TransactionSignature{ Address: address1, @@ -145,6 +158,15 @@ func TestTransactionVerification(t *testing.T) { // invalid signature } + tx := &flow.TransactionBody{ + ProposalKey: flow.ProposalKey{ + Address: address1, + KeyIndex: 0, + SequenceNumber: 0, + }, + Payer: address2, + } + // assign a valid envelope signature hasher2, err := crypto.NewPrefixedHashing(privKey2.HashAlgo, flow.TransactionTagString) require.NoError(t, err) @@ -175,8 +197,6 @@ func TestTransactionVerification(t *testing.T) { // TODO: this test expects a Payload error but should be updated to expect en Envelope error. // The test should be updated once the FVM updates the order of validating signatures: // envelope needs to be checked first and payload later. - tx.SetProposalKey(address1, 0, 0) - tx.SetPayer(address2) sig1 := flow.TransactionSignature{ Address: address1, @@ -192,8 +212,16 @@ func TestTransactionVerification(t *testing.T) { // invalid signature } - tx.PayloadSignatures = []flow.TransactionSignature{sig1} - tx.EnvelopeSignatures = []flow.TransactionSignature{sig2} + tx := &flow.TransactionBody{ + ProposalKey: flow.ProposalKey{ + Address: address1, + KeyIndex: 0, + SequenceNumber: 0, + }, + Payer: address2, + PayloadSignatures: []flow.TransactionSignature{sig1}, + EnvelopeSignatures: []flow.TransactionSignature{sig2}, + } ctx := fvm.NewContext( fvm.WithAuthorizationChecksEnabled(true), @@ -206,4 +234,68 @@ func TestTransactionVerification(t *testing.T) { // TODO: update to InvalidEnvelopeSignatureError once FVM verifier is updated. require.True(t, errors.IsInvalidPayloadSignatureError(err)) }) + + // test that Transaction Signature verification uses the correct domain tag for verification + // i.e the message verification reconstruction logic uses the right tag (check signatureContinuation.verify() ) + t.Run("tag combinations", func(t *testing.T) { + cases := []struct { + signTag string + validity bool + }{ + { + signTag: string(flow.TransactionDomainTag[:]), // only valid tag + validity: true, + }, + { + signTag: "", // invalid tag + validity: false, + }, { + signTag: "random_tag", // invalid tag + validity: false, + }, + } + + sig := flow.TransactionSignature{ + Address: address1, + SignerIndex: 0, + KeyIndex: 0, + } + + tx := &flow.TransactionBody{ + ProposalKey: flow.ProposalKey{ + Address: address1, + KeyIndex: 0, + SequenceNumber: 0, + }, + Payer: address1, + EnvelopeSignatures: []flow.TransactionSignature{sig}, + } + + for _, c := range cases { + t.Run(fmt.Sprintf("sign tag: %v", c.signTag), func(t *testing.T) { + + // generate an envelope signature using the test tag + hasher, err := crypto.NewPrefixedHashing(privKey1.HashAlgo, c.signTag) + require.NoError(t, err) + sig, err := privKey1.PrivateKey.Sign(tx.EnvelopeMessage(), hasher) + require.NoError(t, err) + + // set the signature into the transaction + tx.EnvelopeSignatures[0].Signature = sig + + ctx := fvm.NewContext( + fvm.WithAuthorizationChecksEnabled(true), + fvm.WithAccountKeyWeightThreshold(1000), + fvm.WithSequenceNumberCheckAndIncrementEnabled(false), + fvm.WithTransactionBodyExecutionEnabled(false)) + err = run(tx, ctx, txnState) + if c.validity { + require.NoError(t, err) + } else { + require.Error(t, err) + require.True(t, errors.IsInvalidEnvelopeSignatureError(err)) + } + }) + } + }) } diff --git a/go.mod b/go.mod index 602fb4c15fd..771521f637a 100644 --- a/go.mod +++ b/go.mod @@ -1,280 +1,366 @@ module github.com/onflow/flow-go -go 1.19 +go 1.25.0 require ( - cloud.google.com/go/compute/metadata v0.2.3 + cloud.google.com/go/compute/metadata v0.8.0 cloud.google.com/go/profiler v0.3.0 - cloud.google.com/go/storage v1.28.1 + cloud.google.com/go/storage v1.50.0 github.com/antihax/optional v1.0.0 - github.com/aws/aws-sdk-go-v2/config v1.18.19 + github.com/aws/aws-sdk-go-v2/config v1.31.9 github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.5.1 github.com/aws/aws-sdk-go-v2/service/s3 v1.15.0 - github.com/btcsuite/btcd/btcec/v2 v2.2.1 - github.com/davecgh/go-spew v1.1.1 + github.com/btcsuite/btcd/btcec/v2 v2.3.4 + github.com/cockroachdb/pebble/v2 v2.0.6 + github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc github.com/dgraph-io/badger/v2 v2.2007.4 github.com/ef-ds/deque v1.0.4 - github.com/ethereum/go-ethereum v1.9.13 - github.com/fxamacker/cbor/v2 v2.4.1-0.20220515183430-ad2eae63303f - github.com/gammazero/workerpool v1.1.2 + github.com/ethereum/go-ethereum v1.16.3 + github.com/fxamacker/cbor/v2 v2.8.1-0.20250402194037-6f932b086829 + github.com/gammazero/workerpool v1.1.3 github.com/gogo/protobuf v1.3.2 github.com/golang/mock v1.6.0 - github.com/golang/protobuf v1.5.2 - github.com/google/go-cmp v0.5.9 - github.com/google/pprof v0.0.0-20221219190121-3cb0bae90811 - github.com/google/uuid v1.3.0 - github.com/gorilla/mux v1.8.0 - github.com/grpc-ecosystem/go-grpc-middleware/providers/zerolog/v2 v2.0.0-rc.2 - github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.0.0-20200501113911-9a95f0fdbfea + github.com/golang/protobuf v1.5.4 + github.com/google/go-cmp v0.7.0 + github.com/google/pprof v0.0.0-20241210010833-40e02aabc2ad + github.com/google/uuid v1.6.0 + github.com/gorilla/mux v1.8.1 + github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.1.0 github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 - github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0 + github.com/grpc-ecosystem/grpc-gateway/v2 v2.22.0 github.com/hashicorp/go-multierror v1.1.1 - github.com/hashicorp/golang-lru v0.5.4 github.com/improbable-eng/grpc-web v0.15.0 - github.com/ipfs/go-block-format v0.0.3 - github.com/ipfs/go-blockservice v0.4.0 - github.com/ipfs/go-cid v0.3.2 - github.com/ipfs/go-datastore v0.6.0 - github.com/ipfs/go-ds-badger2 v0.1.3 - github.com/ipfs/go-ipfs-blockstore v1.2.0 - github.com/ipfs/go-ipfs-provider v0.7.0 - github.com/ipfs/go-ipld-format v0.3.0 + github.com/ipfs/go-block-format v0.2.0 + github.com/ipfs/go-cid v0.4.1 + github.com/ipfs/go-datastore v0.8.2 + github.com/ipfs/go-ds-pebble v0.5.0 + github.com/ipfs/go-ipld-format v0.6.0 github.com/ipfs/go-log v1.0.5 github.com/ipfs/go-log/v2 v2.5.1 github.com/libp2p/go-addr-util v0.1.0 - github.com/libp2p/go-libp2p v0.24.2 - github.com/libp2p/go-libp2p-kad-dht v0.19.0 - github.com/libp2p/go-libp2p-kbucket v0.5.0 - github.com/libp2p/go-libp2p-pubsub v0.8.2-0.20221201175637-3d2eab35722e - github.com/m4ksio/wal v1.0.1-0.20221209164835-154a17396e4c - github.com/montanaflynn/stats v0.6.6 - github.com/multiformats/go-multiaddr v0.8.0 - github.com/multiformats/go-multiaddr-dns v0.3.1 - github.com/multiformats/go-multihash v0.2.1 - github.com/onflow/atree v0.5.0 - github.com/onflow/cadence v0.38.1 - github.com/onflow/flow v0.3.4 - github.com/onflow/flow-core-contracts/lib/go/contracts v1.2.3 - github.com/onflow/flow-core-contracts/lib/go/templates v1.2.3 - github.com/onflow/flow-go-sdk v0.40.0 - github.com/onflow/flow-go/crypto v0.24.7 - github.com/onflow/flow/protobuf/go/flow v0.3.2-0.20230428213521-89bcc9e8517e - github.com/onflow/go-bitswap v0.0.0-20221017184039-808c5791a8a8 + github.com/libp2p/go-libp2p v0.38.2 + github.com/libp2p/go-libp2p-kad-dht v0.25.2 + github.com/libp2p/go-libp2p-kbucket v0.6.3 + github.com/libp2p/go-libp2p-pubsub v0.13.0 + github.com/montanaflynn/stats v0.7.1 + github.com/multiformats/go-multiaddr v0.14.0 + github.com/multiformats/go-multiaddr-dns v0.4.1 + github.com/multiformats/go-multihash v0.2.3 + github.com/onflow/atree v0.10.1 + github.com/onflow/cadence v1.7.1 + github.com/onflow/crypto v0.25.3 + github.com/onflow/flow v0.4.15 + github.com/onflow/flow-core-contracts/lib/go/contracts v1.9.0 + github.com/onflow/flow-core-contracts/lib/go/templates v1.9.0 + github.com/onflow/flow-go-sdk v1.8.4 + github.com/onflow/flow/protobuf/go/flow v0.4.16 github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 - github.com/pierrec/lz4 v2.6.1+incompatible github.com/pkg/errors v0.9.1 github.com/pkg/profile v1.7.0 - github.com/prometheus/client_golang v1.14.0 + github.com/prometheus/client_golang v1.20.5 github.com/rs/cors v1.8.0 github.com/rs/zerolog v1.29.0 - github.com/schollz/progressbar/v3 v3.8.3 + github.com/schollz/progressbar/v3 v3.18.0 github.com/sethvargo/go-retry v0.2.3 github.com/shirou/gopsutil/v3 v3.22.2 - github.com/spf13/cobra v1.6.1 - github.com/spf13/pflag v1.0.5 - github.com/spf13/viper v1.12.0 - github.com/stretchr/testify v1.8.2 - github.com/vmihailenco/msgpack v4.0.4+incompatible + github.com/spf13/cobra v1.8.1 + github.com/spf13/pflag v1.0.6 + github.com/spf13/viper v1.15.0 + github.com/stretchr/testify v1.11.1 github.com/vmihailenco/msgpack/v4 v4.3.11 - go.opentelemetry.io/otel v1.8.0 - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.8.0 - go.opentelemetry.io/otel/sdk v1.8.0 - go.opentelemetry.io/otel/trace v1.8.0 - go.uber.org/atomic v1.10.0 - go.uber.org/multierr v1.9.0 - golang.org/x/crypto v0.4.0 - golang.org/x/exp v0.0.0-20221217163422-3c43f8badb15 - golang.org/x/sync v0.1.0 - golang.org/x/sys v0.6.0 - golang.org/x/text v0.8.0 - golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac - golang.org/x/tools v0.6.0 - google.golang.org/api v0.114.0 - google.golang.org/genproto v0.0.0-20230306155012-7f2fa6fef1f4 - google.golang.org/grpc v1.53.0 + go.opentelemetry.io/otel v1.37.0 + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.31.0 + go.opentelemetry.io/otel/sdk v1.37.0 + go.opentelemetry.io/otel/trace v1.37.0 + go.uber.org/atomic v1.11.0 + go.uber.org/multierr v1.11.0 + golang.org/x/crypto v0.41.0 + golang.org/x/exp v0.0.0-20241217172543-b2144cdd0a67 + golang.org/x/sync v0.16.0 + golang.org/x/sys v0.35.0 + golang.org/x/text v0.28.0 + golang.org/x/time v0.12.0 + golang.org/x/tools v0.36.0 + google.golang.org/api v0.247.0 + google.golang.org/genproto v0.0.0-20250603155806-513f23925822 + google.golang.org/grpc v1.75.1 google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.2.0 - google.golang.org/protobuf v1.30.0 + google.golang.org/protobuf v1.36.9 gotest.tools v2.2.0+incompatible - pgregory.net/rapid v0.4.7 + pgregory.net/rapid v1.1.0 ) require ( github.com/coreos/go-semver v0.3.0 - github.com/slok/go-http-metrics v0.10.0 - gonum.org/v1/gonum v0.8.2 + github.com/docker/go-units v0.5.0 + github.com/dustin/go-humanize v1.0.1 + github.com/fxamacker/golang-lru/v2 v2.0.0-20250716153046-22c8d17dc4ee + github.com/go-playground/validator/v10 v10.19.0 + github.com/golang/snappy v0.0.5-0.20231225225746-43d5d4cd4e0e + github.com/gorilla/websocket v1.5.3 + github.com/hashicorp/golang-lru/v2 v2.0.7 + github.com/holiman/uint256 v1.3.2 + github.com/huandu/go-clone/generic v1.7.2 + github.com/ipfs/boxo v0.17.1-0.20240131173518-89bceff34bf1 + github.com/jordanschalm/lockctx v0.0.0-20250412215529-226f85c10956 + github.com/libp2p/go-libp2p-routing-helpers v0.7.4 + github.com/mitchellh/mapstructure v1.5.0 + github.com/onflow/flow-evm-bridge v0.1.0 + github.com/onflow/go-ethereum v1.13.4 + github.com/onflow/nft-storefront/lib/go/contracts v1.0.0 + github.com/onflow/wal v1.0.2 + github.com/pierrec/lz4/v4 v4.1.22 + github.com/slok/go-http-metrics v0.12.0 + github.com/sony/gobreaker v0.5.0 + go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.21.0 + golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da + google.golang.org/genproto/googleapis/api v0.0.0-20250707201910-8d1bb00bc6a7 + google.golang.org/genproto/googleapis/bytestream v0.0.0-20250804133106-a7a43d27e69b + gopkg.in/yaml.v2 v2.4.0 ) require ( - cloud.google.com/go v0.110.0 // indirect - cloud.google.com/go/compute v1.18.0 // indirect - cloud.google.com/go/iam v0.12.0 // indirect - github.com/aws/aws-sdk-go-v2 v1.17.7 // indirect - github.com/aws/aws-sdk-go-v2/credentials v1.13.18 // indirect - github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.13.1 // indirect - github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.31 // indirect - github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.25 // indirect - github.com/aws/aws-sdk-go-v2/internal/ini v1.3.32 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.3.0 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.25 // indirect + github.com/crate-crypto/go-eth-kzg v1.3.0 // indirect + github.com/emicklei/dot v1.6.2 // indirect + github.com/ethereum/c-kzg-4844/v2 v2.1.0 // indirect + github.com/ferranbt/fastssz v0.1.4 // indirect +) + +require ( + cel.dev/expr v0.24.0 // indirect + cloud.google.com/go v0.120.0 // indirect + cloud.google.com/go/auth v0.16.4 // indirect + cloud.google.com/go/auth/oauth2adapt v0.2.8 // indirect + cloud.google.com/go/iam v1.5.2 // indirect + cloud.google.com/go/monitoring v1.24.2 // indirect + github.com/DataDog/zstd v1.5.6-0.20230824185856-869dae002e5e // indirect + github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.29.0 // indirect + github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.50.0 // indirect + github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.50.0 // indirect + github.com/Jorropo/jsync v1.0.1 // indirect + github.com/Microsoft/go-winio v0.6.2 // indirect + github.com/OneOfOne/xxhash v1.2.8 // indirect + github.com/SaveTheRbtz/mph v0.1.1-0.20240117162131-4166ec7869bc // indirect + github.com/StackExchange/wmi v1.2.1 // indirect + github.com/VictoriaMetrics/fastcache v1.12.2 // indirect + github.com/aws/aws-sdk-go-v2 v1.39.1 // indirect + github.com/aws/aws-sdk-go-v2/credentials v1.18.13 // indirect + github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.7 // indirect + github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.8 // indirect + github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.8 // indirect + github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.1 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.7 // indirect github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.7.0 // indirect - github.com/aws/aws-sdk-go-v2/service/sso v1.12.6 // indirect - github.com/aws/aws-sdk-go-v2/service/ssooidc v1.14.6 // indirect - github.com/aws/aws-sdk-go-v2/service/sts v1.18.7 // indirect - github.com/aws/smithy-go v1.13.5 // indirect - github.com/benbjohnson/clock v1.3.0 // indirect + github.com/aws/aws-sdk-go-v2/service/sso v1.29.3 // indirect + github.com/aws/aws-sdk-go-v2/service/ssooidc v1.34.5 // indirect + github.com/aws/aws-sdk-go-v2/service/sts v1.38.4 // indirect + github.com/aws/smithy-go v1.23.0 // indirect + github.com/benbjohnson/clock v1.3.5 // indirect github.com/beorn7/perks v1.0.1 // indirect - github.com/bits-and-blooms/bitset v1.3.0 // indirect - github.com/cenkalti/backoff v2.2.1+incompatible // indirect - github.com/cenkalti/backoff/v4 v4.1.3 // indirect + github.com/bits-and-blooms/bitset v1.24.0 // indirect + github.com/cenkalti/backoff/v4 v4.3.0 // indirect github.com/cespare/xxhash v1.1.0 // indirect - github.com/cespare/xxhash/v2 v2.2.0 // indirect - github.com/containerd/cgroups v1.0.4 // indirect + github.com/cespare/xxhash/v2 v2.3.0 // indirect + github.com/cncf/xds/go v0.0.0-20250501225837-2ac532fd4443 // indirect + github.com/cockroachdb/crlib v0.0.0-20241015224233-894974b3ad94 // indirect + github.com/cockroachdb/errors v1.11.3 // indirect + github.com/cockroachdb/fifo v0.0.0-20240606204812-0bbfbd93a7ce // indirect + github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b // indirect + github.com/cockroachdb/redact v1.1.5 // indirect + github.com/cockroachdb/swiss v0.0.0-20250624142022-d6e517c1d961 // indirect + github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06 // indirect + github.com/consensys/gnark-crypto v0.18.0 // indirect + github.com/containerd/cgroups v1.1.0 // indirect github.com/coreos/go-systemd/v22 v22.5.0 // indirect + github.com/crate-crypto/go-ipa v0.0.0-20240724233137-53bbb0ceb27a // indirect github.com/cskr/pubsub v1.0.2 // indirect github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c // indirect - github.com/decred/dcrd/dcrec/secp256k1/v4 v4.1.0 // indirect + github.com/deckarep/golang-set/v2 v2.6.0 // indirect + github.com/decred/dcrd/dcrec/secp256k1/v4 v4.3.0 // indirect github.com/desertbit/timer v0.0.0-20180107155436-c41aec40b27f // indirect - github.com/dgraph-io/ristretto v0.0.3-0.20200630154024-f66de99634de // indirect + github.com/dgraph-io/ristretto v0.1.0 // indirect github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2 // indirect - github.com/docker/go-units v0.5.0 // indirect - github.com/dustin/go-humanize v1.0.1 // indirect - github.com/elastic/gosigar v0.14.2 // indirect + github.com/elastic/gosigar v0.14.3 // indirect + github.com/envoyproxy/go-control-plane/envoy v1.32.4 // indirect + github.com/envoyproxy/protoc-gen-validate v1.2.1 // indirect + github.com/ethereum/go-verkle v0.2.2 // indirect github.com/felixge/fgprof v0.9.3 // indirect - github.com/flynn/noise v1.0.0 // indirect + github.com/felixge/httpsnoop v1.0.4 // indirect + github.com/filecoin-project/go-clock v0.1.0 // indirect + github.com/flynn/noise v1.1.0 // indirect github.com/francoispqt/gojay v1.2.13 // indirect - github.com/fsnotify/fsnotify v1.5.4 // indirect - github.com/fxamacker/circlehash v0.3.0 // indirect - github.com/gammazero/deque v0.1.0 // indirect - github.com/ghodss/yaml v1.0.0 // indirect + github.com/fsnotify/fsnotify v1.6.0 // indirect + github.com/fxamacker/circlehash v0.3.0 + github.com/gabriel-vasile/mimetype v1.4.6 // indirect + github.com/gammazero/deque v1.0.0 // indirect + github.com/getsentry/sentry-go v0.27.0 // indirect + github.com/go-jose/go-jose/v4 v4.1.1 // indirect github.com/go-kit/kit v0.12.0 // indirect github.com/go-kit/log v0.2.1 // indirect - github.com/go-logfmt/logfmt v0.5.1 // indirect - github.com/go-logr/logr v1.2.3 // indirect + github.com/go-logfmt/logfmt v0.6.0 // indirect + github.com/go-logr/logr v1.4.3 // indirect github.com/go-logr/stdr v1.2.2 // indirect - github.com/go-ole/go-ole v1.2.6 // indirect - github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0 // indirect - github.com/go-test/deep v1.0.8 // indirect + github.com/go-ole/go-ole v1.3.0 // indirect + github.com/go-playground/locales v0.14.1 // indirect + github.com/go-playground/universal-translator v0.18.1 // indirect + github.com/go-task/slim-sprig/v3 v3.0.0 // indirect github.com/godbus/dbus/v5 v5.1.0 // indirect - github.com/golang/glog v1.0.0 // indirect - github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect - github.com/golang/snappy v0.0.4 // indirect + github.com/gofrs/flock v0.12.1 // indirect + github.com/golang/glog v1.2.5 // indirect github.com/google/gopacket v1.1.19 // indirect - github.com/googleapis/enterprise-certificate-proxy v0.2.3 // indirect - github.com/googleapis/gax-go/v2 v2.7.1 // indirect - github.com/gorilla/websocket v1.5.0 // indirect + github.com/google/s2a-go v0.1.9 // indirect + github.com/googleapis/enterprise-certificate-proxy v0.3.6 // indirect + github.com/googleapis/gax-go/v2 v2.15.0 // indirect github.com/hashicorp/errwrap v1.1.0 // indirect + github.com/hashicorp/golang-lru v1.0.2 // indirect github.com/hashicorp/hcl v1.0.0 // indirect - github.com/huin/goupnp v1.0.3 // indirect - github.com/inconshreveable/mousetrap v1.0.1 // indirect + github.com/holiman/bloomfilter/v2 v2.0.3 // indirect + github.com/huandu/go-clone v1.6.0 // indirect + github.com/huin/goupnp v1.3.0 // indirect + github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/ipfs/bbloom v0.0.4 // indirect - github.com/ipfs/go-bitswap v0.9.0 // indirect github.com/ipfs/go-cidutil v0.1.0 // indirect - github.com/ipfs/go-fetcher v1.5.0 // indirect github.com/ipfs/go-ipfs-delay v0.0.1 // indirect - github.com/ipfs/go-ipfs-ds-help v1.1.0 // indirect - github.com/ipfs/go-ipfs-exchange-interface v0.2.0 // indirect - github.com/ipfs/go-ipfs-pq v0.0.2 // indirect - github.com/ipfs/go-ipfs-util v0.0.2 // indirect - github.com/ipfs/go-ipns v0.2.0 // indirect + github.com/ipfs/go-ipfs-pq v0.0.3 // indirect + github.com/ipfs/go-ipfs-util v0.0.3 // indirect github.com/ipfs/go-metrics-interface v0.0.1 // indirect - github.com/ipfs/go-peertaskqueue v0.7.0 // indirect - github.com/ipfs/go-verifcid v0.0.1 // indirect - github.com/ipld/go-ipld-prime v0.14.1 // indirect + github.com/ipfs/go-peertaskqueue v0.8.2 // indirect + github.com/ipld/go-ipld-prime v0.21.0 // indirect github.com/jackpal/go-nat-pmp v1.0.2 // indirect github.com/jbenet/go-temp-err-catcher v0.1.0 // indirect github.com/jbenet/goprocess v0.1.4 // indirect - github.com/jmespath/go-jmespath v0.4.0 // indirect - github.com/kevinburke/go-bindata v3.23.0+incompatible // indirect - github.com/klauspost/compress v1.15.13 // indirect - github.com/klauspost/cpuid/v2 v2.2.2 // indirect - github.com/koron/go-ssdp v0.0.3 // indirect + github.com/k0kubun/pp/v3 v3.5.0 // indirect + github.com/kevinburke/go-bindata v3.24.0+incompatible // indirect + github.com/klauspost/compress v1.17.11 // indirect + github.com/klauspost/cpuid/v2 v2.2.9 // indirect + github.com/koron/go-ssdp v0.0.4 // indirect + github.com/kr/pretty v0.3.1 // indirect + github.com/kr/text v0.2.0 // indirect + github.com/leodido/go-urn v1.4.0 // indirect github.com/libp2p/go-buffer-pool v0.1.0 // indirect github.com/libp2p/go-cidranger v1.1.0 // indirect - github.com/libp2p/go-flow-metrics v0.1.0 // indirect - github.com/libp2p/go-libp2p-asn-util v0.2.0 // indirect - github.com/libp2p/go-libp2p-core v0.20.1 // indirect + github.com/libp2p/go-flow-metrics v0.2.0 // indirect + github.com/libp2p/go-libp2p-asn-util v0.4.1 // indirect github.com/libp2p/go-libp2p-record v0.2.0 // indirect - github.com/libp2p/go-msgio v0.2.0 // indirect - github.com/libp2p/go-nat v0.1.0 // indirect - github.com/libp2p/go-netroute v0.2.1 // indirect - github.com/libp2p/go-openssl v0.1.0 // indirect - github.com/libp2p/go-reuseport v0.2.0 // indirect - github.com/libp2p/go-yamux/v4 v4.0.0 // indirect - github.com/logrusorgru/aurora v2.0.3+incompatible // indirect - github.com/lucas-clemente/quic-go v0.31.1 // indirect + github.com/libp2p/go-msgio v0.3.0 // indirect + github.com/libp2p/go-nat v0.2.0 // indirect + github.com/libp2p/go-netroute v0.2.2 // indirect + github.com/libp2p/go-reuseport v0.4.0 // indirect + github.com/libp2p/go-yamux/v4 v4.0.1 // indirect + github.com/logrusorgru/aurora/v4 v4.0.0 // indirect github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 // indirect - github.com/magiconair/properties v1.8.6 // indirect - github.com/marten-seemann/qtls-go1-18 v0.1.3 // indirect - github.com/marten-seemann/qtls-go1-19 v0.1.1 // indirect + github.com/magiconair/properties v1.8.7 // indirect github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd // indirect - github.com/mattn/go-colorable v0.1.13 // indirect - github.com/mattn/go-isatty v0.0.17 // indirect - github.com/mattn/go-pointer v0.0.1 // indirect - github.com/mattn/go-runewidth v0.0.13 // indirect - github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect - github.com/miekg/dns v1.1.50 // indirect + github.com/mattn/go-colorable v0.1.14 // indirect + github.com/mattn/go-isatty v0.0.20 // indirect + github.com/mattn/go-runewidth v0.0.16 // indirect + github.com/miekg/dns v1.1.62 // indirect github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b // indirect github.com/mikioh/tcpopt v0.0.0-20190314235656-172688c1accc // indirect - github.com/minio/sha256-simd v1.0.0 // indirect + github.com/minio/sha256-simd v1.0.1 // indirect github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db // indirect - github.com/mitchellh/mapstructure v1.5.0 // indirect github.com/mr-tron/base58 v1.2.0 // indirect github.com/multiformats/go-base32 v0.1.0 // indirect github.com/multiformats/go-base36 v0.2.0 // indirect github.com/multiformats/go-multiaddr-fmt v0.1.0 // indirect - github.com/multiformats/go-multibase v0.1.1 // indirect - github.com/multiformats/go-multicodec v0.7.0 // indirect - github.com/multiformats/go-multistream v0.3.3 // indirect + github.com/multiformats/go-multibase v0.2.0 // indirect + github.com/multiformats/go-multicodec v0.9.0 // indirect + github.com/multiformats/go-multistream v0.6.0 // indirect github.com/multiformats/go-varint v0.0.7 // indirect - github.com/onflow/flow-ft/lib/go/contracts v0.7.0 // indirect - github.com/onflow/sdks v0.5.0 // indirect - github.com/onsi/ginkgo/v2 v2.6.1 // indirect - github.com/opencontainers/runtime-spec v1.0.2 // indirect + github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect + github.com/olekukonko/tablewriter v0.0.5 // indirect + github.com/onflow/fixed-point v0.1.1 // indirect + github.com/onflow/flow-ft/lib/go/contracts v1.0.1 // indirect + github.com/onflow/flow-ft/lib/go/templates v1.0.1 // indirect + github.com/onflow/flow-nft/lib/go/contracts v1.3.0 // indirect + github.com/onflow/flow-nft/lib/go/templates v1.3.0 // indirect + github.com/onflow/sdks v0.6.0-preview.1 // indirect + github.com/onsi/ginkgo/v2 v2.22.0 // indirect + github.com/opencontainers/runtime-spec v1.2.0 // indirect github.com/opentracing/opentracing-go v1.2.0 // indirect - github.com/pelletier/go-toml v1.9.5 // indirect - github.com/pelletier/go-toml/v2 v2.0.2 // indirect - github.com/pmezard/go-difflib v1.0.0 // indirect - github.com/polydawn/refmt v0.0.0-20201211092308-30ac6d18308e // indirect + github.com/pelletier/go-toml/v2 v2.2.1 // indirect + github.com/pion/datachannel v1.5.10 // indirect + github.com/pion/dtls/v2 v2.2.12 // indirect + github.com/pion/ice/v2 v2.3.37 // indirect + github.com/pion/interceptor v0.1.37 // indirect + github.com/pion/logging v0.2.2 // indirect + github.com/pion/mdns v0.0.12 // indirect + github.com/pion/randutil v0.1.0 // indirect + github.com/pion/rtcp v1.2.15 // indirect + github.com/pion/rtp v1.8.10 // indirect + github.com/pion/sctp v1.8.35 // indirect + github.com/pion/sdp/v3 v3.0.9 // indirect + github.com/pion/srtp/v2 v2.0.20 // indirect + github.com/pion/stun v0.6.1 // indirect + github.com/pion/stun/v2 v2.0.0 // indirect + github.com/pion/transport/v2 v2.2.10 // indirect + github.com/pion/transport/v3 v3.0.7 // indirect + github.com/pion/turn/v2 v2.1.6 // indirect + github.com/pion/webrtc/v3 v3.3.5 // indirect + github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 // indirect + github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect + github.com/polydawn/refmt v0.89.0 // indirect github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c // indirect - github.com/prometheus/client_model v0.3.0 // indirect - github.com/prometheus/common v0.39.0 // indirect - github.com/prometheus/procfs v0.9.0 // indirect + github.com/prometheus/client_model v0.6.1 // indirect + github.com/prometheus/common v0.61.0 // indirect + github.com/prometheus/procfs v0.15.1 // indirect github.com/psiemens/sconfig v0.1.0 // indirect + github.com/quic-go/qpack v0.5.1 // indirect + github.com/quic-go/quic-go v0.48.2 // indirect + github.com/quic-go/webtransport-go v0.8.1-0.20241018022711-4ac2c9250e66 // indirect github.com/raulk/go-watchdog v1.3.0 // indirect - github.com/rivo/uniseg v0.2.1-0.20211004051800-57c86be7915a // indirect - github.com/spacemonkeygo/spacelog v0.0.0-20180420211403-2296661a0572 // indirect + github.com/rivo/uniseg v0.4.7 // indirect + github.com/rogpeppe/go-internal v1.13.1 // indirect + github.com/shirou/gopsutil v3.21.4-0.20210419000835-c7a38de76ee5+incompatible // indirect github.com/spaolacci/murmur3 v1.1.0 // indirect - github.com/spf13/afero v1.9.0 // indirect + github.com/spf13/afero v1.10.0 // indirect github.com/spf13/cast v1.5.0 // indirect github.com/spf13/jwalterweatherman v1.1.0 // indirect - github.com/stretchr/objx v0.5.0 // indirect - github.com/subosito/gotenv v1.4.0 // indirect + github.com/spiffe/go-spiffe/v2 v2.5.0 // indirect + github.com/stretchr/objx v0.5.2 // indirect + github.com/subosito/gotenv v1.4.2 // indirect + github.com/supranational/blst v0.3.14 // indirect + github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 // indirect github.com/texttheater/golang-levenshtein/levenshtein v0.0.0-20200805054039-cae8b0eaed6c // indirect - github.com/tklauser/go-sysconf v0.3.9 // indirect - github.com/tklauser/numcpus v0.3.0 // indirect + github.com/tklauser/go-sysconf v0.3.12 // indirect + github.com/tklauser/numcpus v0.6.1 // indirect github.com/turbolent/prettier v0.0.0-20220320183459-661cc755135d // indirect github.com/vmihailenco/tagparser v0.1.1 // indirect github.com/whyrusleeping/go-keyspace v0.0.0-20160322163242-5b898ac5add1 // indirect - github.com/whyrusleeping/timecache v0.0.0-20160911033111-cfcb2f1abfee // indirect + github.com/wlynxg/anet v0.0.5 // indirect github.com/x448/float16 v0.8.4 // indirect github.com/yusufpapurcu/wmi v1.2.2 // indirect - github.com/zeebo/blake3 v0.2.3 // indirect + github.com/zeebo/blake3 v0.2.4 // indirect + github.com/zeebo/errs v1.4.0 // indirect go.opencensus.io v0.24.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.8.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.8.0 // indirect - go.opentelemetry.io/proto/otlp v0.18.0 // indirect - go.uber.org/dig v1.15.0 // indirect - go.uber.org/fx v1.18.2 // indirect - go.uber.org/zap v1.24.0 // indirect - golang.org/x/mod v0.8.0 // indirect - golang.org/x/net v0.8.0 // indirect - golang.org/x/oauth2 v0.6.0 // indirect - golang.org/x/term v0.6.0 // indirect - golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 // indirect - google.golang.org/appengine v1.6.7 // indirect - gopkg.in/ini.v1 v1.66.6 // indirect - gopkg.in/yaml.v2 v2.4.0 // indirect + go.opentelemetry.io/auto/sdk v1.1.0 // indirect + go.opentelemetry.io/contrib/detectors/gcp v1.36.0 // indirect + go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.61.0 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.61.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.31.0 // indirect + go.opentelemetry.io/otel/metric v1.37.0 // indirect + go.opentelemetry.io/otel/sdk/metric v1.37.0 // indirect + go.opentelemetry.io/proto/otlp v1.3.1 // indirect + go.uber.org/dig v1.18.0 // indirect + go.uber.org/fx v1.23.0 // indirect + go.uber.org/mock v0.5.0 // indirect + go.uber.org/zap v1.27.0 // indirect + golang.org/x/mod v0.27.0 // indirect + golang.org/x/net v0.43.0 // indirect + golang.org/x/oauth2 v0.30.0 // indirect + golang.org/x/term v0.34.0 // indirect + gonum.org/v1/gonum v0.16.0 // indirect + google.golang.org/appengine v1.6.8 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20250804133106-a7a43d27e69b // indirect + gopkg.in/ini.v1 v1.67.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect - lukechampine.com/blake3 v1.1.7 // indirect - nhooyr.io/websocket v1.8.6 // indirect + lukechampine.com/blake3 v1.4.1 // indirect + nhooyr.io/websocket v1.8.7 // indirect ) + +// Using custom fork until https://github.com/onflow/flow-go/issues/5338 is resolved +replace github.com/ipfs/boxo => github.com/onflow/boxo v0.0.0-20240201202436-f2477b92f483 + +// Using custom fork until https://github.com/ipfs/go-ds-pebble/issues/64 is merged +replace github.com/ipfs/go-ds-pebble => github.com/onflow/go-ds-pebble v0.0.0-20251003225212-131edca3a897 diff --git a/go.sum b/go.sum index ed305eed14f..9252c67ea5b 100644 --- a/go.sum +++ b/go.sum @@ -1,3 +1,5 @@ +cel.dev/expr v0.24.0 h1:56OvJKSH3hDGL0ml5uSxZmz3/3Pq4tJ+fb1unVLAFcY= +cel.dev/expr v0.24.0/go.mod h1:hLPLo1W4QUmuYdA72RBX06QTs6MXw941piREPl3Yfiw= cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.31.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= @@ -31,8 +33,12 @@ cloud.google.com/go v0.94.1/go.mod h1:qAlAugsXlC+JWO+Bke5vCtc9ONxjQT3drlTTnAplMW cloud.google.com/go v0.97.0/go.mod h1:GF7l59pYBVlXQIBLx3a761cZ41F9bBH3JUlihCt2Udc= cloud.google.com/go v0.99.0/go.mod h1:w0Xx2nLzqWJPuozYQX+hFfCSI8WioryfRDzkoI/Y2ZA= cloud.google.com/go v0.100.2/go.mod h1:4Xra9TjzAeYHrl5+oeLlzbM2k3mjVhZh4UqTZ//w99A= -cloud.google.com/go v0.110.0 h1:Zc8gqp3+a9/Eyph2KDmcGaPtbKRIoqq4YTlL4NMD0Ys= -cloud.google.com/go v0.110.0/go.mod h1:SJnCLqQ0FCFGSZMUNUf84MV3Aia54kn7pi8st7tMzaY= +cloud.google.com/go v0.120.0 h1:wc6bgG9DHyKqF5/vQvX1CiZrtHnxJjBlKUyF9nP6meA= +cloud.google.com/go v0.120.0/go.mod h1:/beW32s8/pGRuj4IILWQNd4uuebeT4dkOhKmkfit64Q= +cloud.google.com/go/auth v0.16.4 h1:fXOAIQmkApVvcIn7Pc2+5J8QTMVbUGLscnSVNl11su8= +cloud.google.com/go/auth v0.16.4/go.mod h1:j10ncYwjX/g3cdX7GpEzsdM+d+ZNsXAbb6qXA7p1Y5M= +cloud.google.com/go/auth/oauth2adapt v0.2.8 h1:keo8NaayQZ6wimpNSmW5OPc283g65QNIiLpZnkHRbnc= +cloud.google.com/go/auth/oauth2adapt v0.2.8/go.mod h1:XQ9y31RkqZCcwJWNSx2Xvric3RrU88hAYYbjDWYDL+c= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= @@ -44,16 +50,19 @@ cloud.google.com/go/compute v1.3.0/go.mod h1:cCZiE1NHEtai4wiufUhW8I8S1JKkAnhnQJW cloud.google.com/go/compute v1.5.0/go.mod h1:9SMHyhJlzhlkJqrPAc839t2BZFTSk6Jdj6mkzQJeu0M= cloud.google.com/go/compute v1.6.0/go.mod h1:T29tfhtVbq1wvAPo0E3+7vhgmkOYeXjhFvz/FMzPu0s= cloud.google.com/go/compute v1.6.1/go.mod h1:g85FgpzFvNULZ+S8AYq87axRKuf2Kh7deLqV/jJ3thU= -cloud.google.com/go/compute v1.18.0 h1:FEigFqoDbys2cvFkZ9Fjq4gnHBP55anJ0yQyau2f9oY= -cloud.google.com/go/compute v1.18.0/go.mod h1:1X7yHxec2Ga+Ss6jPyjxRxpu2uu7PLgsOVXvgU0yacs= -cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY= -cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= +cloud.google.com/go/compute/metadata v0.8.0 h1:HxMRIbao8w17ZX6wBnjhcDkW6lTFpgcaobyVfZWqRLA= +cloud.google.com/go/compute/metadata v0.8.0/go.mod h1:sYOGTp851OV9bOFJ9CH7elVvyzopvWQFNNghtDQ/Biw= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= cloud.google.com/go/iam v0.3.0/go.mod h1:XzJPvDayI+9zsASAFO68Hk07u3z+f+JrT2xXNdp4bnY= -cloud.google.com/go/iam v0.12.0 h1:DRtTY29b75ciH6Ov1PHb4/iat2CLCvrOm40Q0a6DFpE= -cloud.google.com/go/iam v0.12.0/go.mod h1:knyHGviacl11zrtZUoDuYpDgLjvr28sLQaG0YB2GYAY= -cloud.google.com/go/longrunning v0.4.1 h1:v+yFJOfKC3yZdY6ZUI933pIYdhyhV8S3NpWrXWmg7jM= +cloud.google.com/go/iam v1.5.2 h1:qgFRAGEmd8z6dJ/qyEchAuL9jpswyODjA2lS+w234g8= +cloud.google.com/go/iam v1.5.2/go.mod h1:SE1vg0N81zQqLzQEwxL2WI6yhetBdbNQuTvIKCSkUHE= +cloud.google.com/go/logging v1.13.0 h1:7j0HgAp0B94o1YRDqiqm26w4q1rDMH7XNRU34lJXHYc= +cloud.google.com/go/logging v1.13.0/go.mod h1:36CoKh6KA/M0PbhPKMq6/qety2DCAErbhXT62TuXALA= +cloud.google.com/go/longrunning v0.6.7 h1:IGtfDWHhQCgCjwQjV9iiLnUta9LBCo8R9QmAFsS/PrE= +cloud.google.com/go/longrunning v0.6.7/go.mod h1:EAFV3IZAKmM56TyiE6VAP3VoTzhZzySwI/YI1s/nRsY= +cloud.google.com/go/monitoring v1.24.2 h1:5OTsoJ1dXYIiMiuL+sYscLc9BumrL3CarVLL7dd7lHM= +cloud.google.com/go/monitoring v1.24.2/go.mod h1:x7yzPWcgDRnPEv3sI+jJGBkwl5qINf+6qY4eq0I9B4U= cloud.google.com/go/profiler v0.3.0 h1:R6y/xAeifaUXxd2x6w+jIwKxoKl8Cv5HJvcvASTPWJo= cloud.google.com/go/profiler v0.3.0/go.mod h1:9wYk9eY4iZHsev8TQb61kh3wiOiSyz/xOYixWPzweCU= cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= @@ -67,161 +76,153 @@ cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RX cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= cloud.google.com/go/storage v1.14.0/go.mod h1:GrKmX003DSIwi9o29oFT7YDnHYwZoctc3fOKtUw0Xmo= cloud.google.com/go/storage v1.22.1/go.mod h1:S8N1cAStu7BOeFfE8KAQzmyyLkK8p/vmRq6kuBTW58Y= -cloud.google.com/go/storage v1.28.1 h1:F5QDG5ChchaAVQhINh24U99OWHURqrW8OmQcGKXcbgI= -cloud.google.com/go/storage v1.28.1/go.mod h1:Qnisd4CqDdo6BGs2AD5LLnEsmSQ80wQ5ogcBBKhU86Y= +cloud.google.com/go/storage v1.50.0 h1:3TbVkzTooBvnZsk7WaAQfOsNrdoM8QHusXA1cpk6QJs= +cloud.google.com/go/storage v1.50.0/go.mod h1:l7XeiD//vx5lfqE3RavfmU9yvk5Pp0Zhcv482poyafY= +cloud.google.com/go/trace v1.11.6 h1:2O2zjPzqPYAHrn3OKl029qlqG6W8ZdYaOWRyr8NgMT4= +cloud.google.com/go/trace v1.11.6/go.mod h1:GA855OeDEBiBMzcckLPE2kDunIpC72N+Pq8WFieFjnI= dmitri.shuralyov.com/app/changes v0.0.0-20180602232624-0a106ad413e3/go.mod h1:Yl+fi1br7+Rr3LqpNJf1/uxUdtRUV+Tnj0o93V2B9MU= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= dmitri.shuralyov.com/html/belt v0.0.0-20180602232347-f7d459c86be0/go.mod h1:JLBrvjyP0v+ecvNYvCpyZgu5/xkfAUhi6wJj28eUfSU= dmitri.shuralyov.com/service/change v0.0.0-20181023043359-a85b471d5412/go.mod h1:a1inKt/atXimZ4Mv927x+r7UpyzRUf4emIoiiSC2TN4= dmitri.shuralyov.com/state v0.0.0-20180228185332-28bcc343414c/go.mod h1:0PRwlb0D6DFvNNtx+9ybjezNCa8XF0xaYcETyp6rHWU= git.apache.org/thrift.git v0.0.0-20180902110319-2566ecd5d999/go.mod h1:fPE2ZNJGynbRyZ4dJvy6G277gSllfV2HJqblrnkyeyg= -github.com/AndreasBriese/bbloom v0.0.0-20180913140656-343706a395b7/go.mod h1:bOvUY6CB00SOBii9/FifXqc0awNKxLFCL/+pkDPuyl8= -github.com/AndreasBriese/bbloom v0.0.0-20190306092124-e2d15f34fcf9/go.mod h1:bOvUY6CB00SOBii9/FifXqc0awNKxLFCL/+pkDPuyl8= -github.com/Azure/azure-pipeline-go v0.2.1/go.mod h1:UGSo8XybXnIGZ3epmeBw7Jdz+HiUVpqIlpz/HKHylF4= -github.com/Azure/azure-pipeline-go v0.2.2/go.mod h1:4rQ/NZncSvGqNkkOsNpOU1tgoNuIlp9AfUH5G1tvCHc= -github.com/Azure/azure-storage-blob-go v0.7.0/go.mod h1:f9YQKtsG1nMisotuTPpO0tjNuEjKRYAcJU8/ydDI++4= -github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI= -github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0= -github.com/Azure/go-autorest/autorest/adal v0.8.0/go.mod h1:Z6vX6WXXuyieHAXwMj0S6HY6e6wcHn37qQMBQlvY3lc= -github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA= -github.com/Azure/go-autorest/autorest/date v0.2.0/go.mod h1:vcORJHLJEh643/Ioh9+vPmf1Ij9AEBM5FuBIXLmIy0g= -github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= -github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= -github.com/Azure/go-autorest/autorest/mocks v0.3.0/go.mod h1:a8FDP3DYzQ4RYfVAxAN3SVSiiO77gL2j2ronKKP0syM= -github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc= -github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= -github.com/DataDog/zstd v1.4.1/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo= +github.com/DataDog/zstd v1.5.6-0.20230824185856-869dae002e5e h1:ZIWapoIRN1VqT8GR8jAwb1Ie9GyehWjVcGh32Y2MznE= +github.com/DataDog/zstd v1.5.6-0.20230824185856-869dae002e5e/go.mod h1:g4AWEaM3yOg3HYfnJ3YIawPnVdXJh9QME85blwSAmyw= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.29.0 h1:UQUsRi8WTzhZntp5313l+CHIAT95ojUI2lpP/ExlZa4= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.29.0/go.mod h1:Cz6ft6Dkn3Et6l2v2a9/RpN7epQ1GtDlO6lj8bEcOvw= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.50.0 h1:5IT7xOdq17MtcdtL/vtl6mGfzhaq4m4vpollPRmlsBQ= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.50.0/go.mod h1:ZV4VOm0/eHR06JLrXWe09068dHpr3TRpY9Uo7T+anuA= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/cloudmock v0.50.0 h1:nNMpRpnkWDAaqcpxMJvxa/Ud98gjbYwayJY4/9bdjiU= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/cloudmock v0.50.0/go.mod h1:SZiPHWGOOk3bl8tkevxkoiwPgsIl6CwrWcbwjfHZpdM= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.50.0 h1:ig/FpDD2JofP/NExKQUbn7uOSZzJAQqogfqluZK4ed4= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.50.0/go.mod h1:otE2jQekW/PqXk1Awf5lmfokJx4uwuqcj1ab5SpGeW0= +github.com/Jorropo/jsync v1.0.1 h1:6HgRolFZnsdfzRUj+ImB9og1JYOxQoReSywkHOGSaUU= +github.com/Jorropo/jsync v1.0.1/go.mod h1:jCOZj3vrBCri3bSU3ErUYvevKlnbssrXeCivybS5ABQ= github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0= -github.com/Kubuxu/go-os-helper v0.0.1/go.mod h1:N8B+I7vPCT80IcP58r50u4+gEEcsZETFUpAzWW2ep1Y= +github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY= +github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= -github.com/OneOfOne/xxhash v1.2.5 h1:zl/OfRA6nftbBK9qTohYBJ5xvw6C/oNKizR7cZGl3cI= -github.com/OneOfOne/xxhash v1.2.5/go.mod h1:eZbhyaAYD41SGSSsnmcpxVoRiQ/MPUTjUdIIOT9Um7Q= +github.com/OneOfOne/xxhash v1.2.8 h1:31czK/TI9sNkxIKfaUfGlU47BAxQ0ztGgd9vPyqimf8= +github.com/OneOfOne/xxhash v1.2.8/go.mod h1:eZbhyaAYD41SGSSsnmcpxVoRiQ/MPUTjUdIIOT9Um7Q= +github.com/SaveTheRbtz/mph v0.1.1-0.20240117162131-4166ec7869bc h1:DCHzPQOcU/7gwDTWbFQZc5qHMPS1g0xTO56k8NXsv9M= +github.com/SaveTheRbtz/mph v0.1.1-0.20240117162131-4166ec7869bc/go.mod h1:LJM5a3zcIJ/8TmZwlUczvROEJT8ntOdhdG9jjcR1B0I= github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo= github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI= -github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg= -github.com/VictoriaMetrics/fastcache v1.5.3/go.mod h1:+jv9Ckb+za/P1ZRg/sulP5Ni1v49daAVERr0H3CuscE= +github.com/StackExchange/wmi v1.2.1 h1:VIkavFPXSjcnS+O8yTq7NI32k0R5Aj+v39y29VYDOSA= +github.com/StackExchange/wmi v1.2.1/go.mod h1:rcmrprowKIVzvc+NUiLncP2uuArMWLCbu9SBzvHz7e8= +github.com/VictoriaMetrics/fastcache v1.12.2 h1:N0y9ASrJ0F6h0QaC3o6uJb3NIZ9VKLjCM7NQbSmF7WI= +github.com/VictoriaMetrics/fastcache v1.12.2/go.mod h1:AmC+Nzz1+3G2eCPapF6UcsnkThDcMsQicp4xDukwJYI= github.com/VividCortex/gohistogram v1.0.0/go.mod h1:Pf5mBqqDxYaXu3hDrrU+w6nw50o/4+TcAqDqk/vUH7g= -github.com/aead/siphash v1.0.1/go.mod h1:Nywa3cDsYNNK3gaciGTWPwHt0wlpNV15vwmswBAUSII= +github.com/aclements/go-perfevent v0.0.0-20240301234650-f7843625020f h1:JjxwchlOepwsUWcQwD2mLUAGE9aCp0/ehy6yCHFBOvo= +github.com/aclements/go-perfevent v0.0.0-20240301234650-f7843625020f/go.mod h1:tMDTce/yLLN/SK8gMOxQfnyeMeCg8KGzp0D1cbECEeo= github.com/afex/hystrix-go v0.0.0-20180502004556-fa1af6a1f4f5/go.mod h1:SkGFH1ia65gfNATL8TAiHDNxPzPdmEL5uirI2Uyuz6c= -github.com/ajstarks/svgo v0.0.0-20180226025133-644b8db467af/go.mod h1:K08gAheRH3/J6wwsYMMT4xOr94bZjxIelGM0+d/wbFw= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= +github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156 h1:eMwmnE/GDgah4HI848JfFxHt+iPb26b4zyfspmqY0/8= github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156/go.mod h1:Cb/ax3seSYIx7SuZdm2G2xzfwmv3TPSk2ucNfQESPXM= github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c= github.com/antihax/optional v1.0.0 h1:xK2lYat7ZLaVVcIuj82J8kIro4V6kDe0AUDFboUCwcg= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= github.com/apache/thrift v0.13.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= -github.com/aristanetworks/goarista v0.0.0-20170210015632-ea17b1a17847/go.mod h1:D/tb0zPVXnP7fmsLZjtdUhSsumbK/ij54UXjjVgMGxQ= github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/aryann/difflib v0.0.0-20170710044230-e206f873d14a/go.mod h1:DAHtR1m6lCRdSC2Tm3DSWRPvIPr6xNKyeHdqDQSQT+A= github.com/aws/aws-lambda-go v1.13.3/go.mod h1:4UKl9IzQMoD+QF79YdCuzCwp8VbmG4VAQwij/eHl5CU= -github.com/aws/aws-sdk-go v1.25.48/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g= github.com/aws/aws-sdk-go-v2 v1.9.0/go.mod h1:cK/D0BBs0b/oWPIcX/Z/obahJK1TT7IPVjy53i/mX/4= -github.com/aws/aws-sdk-go-v2 v1.17.7 h1:CLSjnhJSTSogvqUGhIC6LqFKATMRexcxLZ0i/Nzk9Eg= -github.com/aws/aws-sdk-go-v2 v1.17.7/go.mod h1:uzbQtefpm44goOPmdKyAlXSNcwlRgF3ePWVW6EtJvvw= +github.com/aws/aws-sdk-go-v2 v1.39.1 h1:fWZhGAwVRK/fAN2tmt7ilH4PPAE11rDj7HytrmbZ2FE= +github.com/aws/aws-sdk-go-v2 v1.39.1/go.mod h1:sDioUELIUO9Znk23YVmIk86/9DOpkbyyVb1i/gUNFXY= github.com/aws/aws-sdk-go-v2/config v1.8.0/go.mod h1:w9+nMZ7soXCe5nT46Ri354SNhXDQ6v+V5wqDjnZE+GY= -github.com/aws/aws-sdk-go-v2/config v1.18.19 h1:AqFK6zFNtq4i1EYu+eC7lcKHYnZagMn6SW171la0bGw= -github.com/aws/aws-sdk-go-v2/config v1.18.19/go.mod h1:XvTmGMY8d52ougvakOv1RpiTLPz9dlG/OQHsKU/cMmY= +github.com/aws/aws-sdk-go-v2/config v1.31.9 h1:Q+9hVk8kmDGlC7XcDout/vs0FZhHnuPCPv+TRAYDans= +github.com/aws/aws-sdk-go-v2/config v1.31.9/go.mod h1:OpMrPn6rRbHKU4dAVNCk/EQx8sEQJI7hl9GZZ5u/Y+U= github.com/aws/aws-sdk-go-v2/credentials v1.4.0/go.mod h1:dgGR+Qq7Wjcd4AOAW5Rf5Tnv3+x7ed6kETXyS9WCuAY= -github.com/aws/aws-sdk-go-v2/credentials v1.13.18 h1:EQMdtHwz0ILTW1hoP+EwuWhwCG1hD6l3+RWFQABET4c= -github.com/aws/aws-sdk-go-v2/credentials v1.13.18/go.mod h1:vnwlwjIe+3XJPBYKu1et30ZPABG3VaXJYr8ryohpIyM= +github.com/aws/aws-sdk-go-v2/credentials v1.18.13 h1:gkpEm65/ZfrGJ3wbFH++Ki7DyaWtsWbK9idX6OXCo2E= +github.com/aws/aws-sdk-go-v2/credentials v1.18.13/go.mod h1:eVTHz1yI2/WIlXTE8f70mcrSxNafXD5sJpTIM9f+kmo= github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.5.0/go.mod h1:CpNzHK9VEFUCknu50kkB8z58AH2B5DvPP7ea1LHve/Y= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.13.1 h1:gt57MN3liKiyGopcqgNzJb2+d9MJaKT/q1OksHNXVE4= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.13.1/go.mod h1:lfUx8puBRdM5lVVMQlwt2v+ofiG/X6Ms+dy0UkG/kXw= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.7 h1:Is2tPmieqGS2edBnmOJIbdvOA6Op+rRpaYR60iBAwXM= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.7/go.mod h1:F1i5V5421EGci570yABvpIXgRIBPb5JM+lSkHF6Dq5w= github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.5.1 h1:VGkV9KmhGqOQWnHyi4gLG98kE6OecT42fdrCGFWxJsc= github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.5.1/go.mod h1:PLlnMiki//sGnCJiW+aVpvP/C8Kcm8mEj/IVm9+9qk4= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.31 h1:sJLYcS+eZn5EeNINGHSCRAwUJMFVqklwkH36Vbyai7M= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.31/go.mod h1:QT0BqUvX1Bh2ABdTGnjqEjvjzrCfIniM9Sc8zn9Yndo= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.25 h1:1mnRASEKnkqsntcxHaysxwgVoUUp5dkiB+l3llKnqyg= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.25/go.mod h1:zBHOPwhBc3FlQjQJE/D3IfPWiWaQmT06Vq9aNukDo0k= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.8 h1:6bgAZgRyT4RoFWhxS+aoGMFyE0cD1bSzFnEEi4bFPGI= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.8/go.mod h1:KcGkXFVU8U28qS4KvLEcPxytPZPBcRawaH2Pf/0jptE= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.8 h1:HhJYoES3zOz34yWEpGENqJvRVPqpmJyR3+AFg9ybhdY= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.8/go.mod h1:JnA+hPWeYAVbDssp83tv+ysAG8lTfLVXvSsyKg/7xNA= github.com/aws/aws-sdk-go-v2/internal/ini v1.2.2/go.mod h1:BQV0agm+JEhqR+2RT5e1XTFIDcAAV0eW6z2trp+iduw= -github.com/aws/aws-sdk-go-v2/internal/ini v1.3.32 h1:p5luUImdIqywn6JpQsW3tq5GNOxKmOnEpybzPx+d1lk= -github.com/aws/aws-sdk-go-v2/internal/ini v1.3.32/go.mod h1:XGhIBZDEgfqmFIugclZ6FU7v75nHhBDtzuB4xB/tEi4= -github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.3.0 h1:gceOysEWNNwLd6cki65IMBZ4WAM0MwgBQq2n7kejoT8= +github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3 h1:bIqFDwgGXXN1Kpp99pDOdKMTTb5d2KyU5X/BZxjOkRo= +github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3/go.mod h1:H5O/EsxDWyU+LP/V8i5sm8cxoZgc2fdNR9bxlOFrQTo= github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.3.0/go.mod h1:v8ygadNyATSm6elwJ/4gzJwcFhri9RqS8skgHKiwXPU= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.1 h1:oegbebPEMA/1Jny7kvwejowCaHz1FWZAQ94WXFNCyTM= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.1/go.mod h1:kemo5Myr9ac0U9JfSjMo9yHLtw+pECEHsFtJ9tqCEI8= github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.3.0/go.mod h1:R1KK+vY8AfalhG1AOu5e35pOD2SdoPKQCFLTvnxiohk= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.25 h1:5LHn8JQ0qvjD9L9JhMtylnkcw7j05GDZqM9Oin6hpr0= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.25/go.mod h1:/95IA+0lMnzW6XzqYJRpjjsAbKEORVeO0anQqjd2CNU= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.7 h1:mLgc5QIgOy26qyh5bvW+nDoAppxgn3J2WV3m9ewq7+8= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.7/go.mod h1:wXb/eQnqt8mDQIQTTmcw58B5mYGxzLGZGK8PWNFZ0BA= github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.7.0 h1:HWsM0YQWX76V6MOp07YuTYacm8k7h69ObJuw7Nck+og= github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.7.0/go.mod h1:LKb3cKNQIMh+itGnEpKGcnL/6OIjPZqrtYah1w5f+3o= github.com/aws/aws-sdk-go-v2/service/s3 v1.15.0 h1:nPLfLPfglacc29Y949sDxpr3X/blaY40s3B85WT2yZU= github.com/aws/aws-sdk-go-v2/service/s3 v1.15.0/go.mod h1:Iv2aJVtVSm/D22rFoX99cLG4q4uB7tppuCsulGe98k4= github.com/aws/aws-sdk-go-v2/service/sso v1.4.0/go.mod h1:+1fpWnL96DL23aXPpMGbsmKe8jLTEfbjuQoA4WS1VaA= -github.com/aws/aws-sdk-go-v2/service/sso v1.12.6 h1:5V7DWLBd7wTELVz5bPpwzYy/sikk0gsgZfj40X+l5OI= -github.com/aws/aws-sdk-go-v2/service/sso v1.12.6/go.mod h1:Y1VOmit/Fn6Tz1uFAeCO6Q7M2fmfXSCLeL5INVYsLuY= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.14.6 h1:B8cauxOH1W1v7rd8RdI/MWnoR4Ze0wIHWrb90qczxj4= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.14.6/go.mod h1:Lh/bc9XUf8CfOY6Jp5aIkQtN+j1mc+nExc+KXj9jx2s= +github.com/aws/aws-sdk-go-v2/service/sso v1.29.3 h1:7PKX3VYsZ8LUWceVRuv0+PU+E7OtQb1lgmi5vmUE9CM= +github.com/aws/aws-sdk-go-v2/service/sso v1.29.3/go.mod h1:Ql6jE9kyyWI5JHn+61UT/Y5Z0oyVJGmgmJbZD5g4unY= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.34.5 h1:gBBZmSuIySGqDLtXdZiYpwyzbJKXQD2jjT0oDY6ywbo= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.34.5/go.mod h1:XclEty74bsGBCr1s0VSaA11hQ4ZidK4viWK7rRfO88I= github.com/aws/aws-sdk-go-v2/service/sts v1.7.0/go.mod h1:0qcSMCyASQPN2sk/1KQLQ2Fh6yq8wm0HSDAimPhzCoM= -github.com/aws/aws-sdk-go-v2/service/sts v1.18.7 h1:bWNgNdRko2x6gqa0blfATqAZKZokPIeM1vfmQt2pnvM= -github.com/aws/aws-sdk-go-v2/service/sts v1.18.7/go.mod h1:JuTnSoeePXmMVe9G8NcjjwgOKEfZ4cOjMuT2IBT/2eI= +github.com/aws/aws-sdk-go-v2/service/sts v1.38.4 h1:PR00NXRYgY4FWHqOGx3fC3lhVKjsp1GdloDv2ynMSd8= +github.com/aws/aws-sdk-go-v2/service/sts v1.38.4/go.mod h1:Z+Gd23v97pX9zK97+tX4ppAgqCt3Z2dIXB02CtBncK8= github.com/aws/smithy-go v1.8.0/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAmR5n+E= -github.com/aws/smithy-go v1.13.5 h1:hgz0X/DX0dGqTYpGALqXJoRKRj5oQ7150i5FdTePzO8= -github.com/aws/smithy-go v1.13.5/go.mod h1:Tg+OJXh4MB2R/uN61Ko2f6hTZwB/ZYGOtib8J3gBHzA= +github.com/aws/smithy-go v1.23.0 h1:8n6I3gXzWJB2DxBDnfxgBaSX6oe0d/t10qGz7OKqMCE= +github.com/aws/smithy-go v1.23.0/go.mod h1:t1ufH5HMublsJYulve2RKmHDC15xu1f26kHCp/HgceI= github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= -github.com/benbjohnson/clock v1.3.0 h1:ip6w0uFQkncKQ979AypyG0ER7mqUSBdKLOgAle/AT8A= github.com/benbjohnson/clock v1.3.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= +github.com/benbjohnson/clock v1.3.5 h1:VvXlSJBzZpA/zum6Sj74hxwYI2DIxRWuNIoXAzHZz5o= +github.com/benbjohnson/clock v1.3.5/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= -github.com/bits-and-blooms/bitset v1.3.0 h1:h7mv5q31cthBTd7V4kLAZaIThj1e8vPGcSqpPue9KVI= -github.com/bits-and-blooms/bitset v1.3.0/go.mod h1:gIdJ4wp64HaoK2YrL1Q5/N7Y16edYb8uY+O0FJTyyDA= +github.com/bits-and-blooms/bitset v1.24.0 h1:H4x4TuulnokZKvHLfzVRTHJfFfnHEeSYJizujEZvmAM= +github.com/bits-and-blooms/bitset v1.24.0/go.mod h1:7hO7Gc7Pp1vODcmWvKMRA9BNmbv6a/7QIWpPxHddWR8= github.com/bradfitz/go-smtpd v0.0.0-20170404230938-deb6d6237625/go.mod h1:HYsPBTaaSFSlLx/70C2HPIMNZpVV8+vt/A+FMnYP11g= -github.com/btcsuite/btcd v0.0.0-20171128150713-2e60448ffcc6/go.mod h1:Dmm/EzmjnCiweXmzRIAiUWCInVmPgjkzgv5k4tVyXiQ= -github.com/btcsuite/btcd v0.0.0-20190213025234-306aecffea32/go.mod h1:DrZx5ec/dmnfpw9KyYoQyYo7d0KEvTkk/5M/vbZjAr8= -github.com/btcsuite/btcd v0.0.0-20190523000118-16327141da8c/go.mod h1:3J08xEfcugPacsc34/LKRU2yO7YmuT8yt28J8k2+rrI= -github.com/btcsuite/btcd v0.0.0-20190605094302-a0d1e3e36d50/go.mod h1:3J08xEfcugPacsc34/LKRU2yO7YmuT8yt28J8k2+rrI= -github.com/btcsuite/btcd v0.0.0-20190824003749-130ea5bddde3/go.mod h1:3J08xEfcugPacsc34/LKRU2yO7YmuT8yt28J8k2+rrI= -github.com/btcsuite/btcd v0.20.1-beta/go.mod h1:wVuoA8VJLEcwgqHBwHmzLRazpKxTv13Px/pDuV7OomQ= -github.com/btcsuite/btcd v0.21.0-beta/go.mod h1:ZSWyehm27aAuS9bvkATT+Xte3hjHZ+MRgMY/8NJ7K94= -github.com/btcsuite/btcd/btcec/v2 v2.2.1 h1:xP60mv8fvp+0khmrN0zTdPC3cNm24rfeE6lh2R/Yv3E= -github.com/btcsuite/btcd/btcec/v2 v2.2.1/go.mod h1:9/CSmJxmuvqzX9Wh2fXMWToLOHhPd11lSPuIupwTkI8= -github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f/go.mod h1:TdznJufoqS23FtqVCzL0ZqgP5MqXbb4fg/WgDys70nA= -github.com/btcsuite/btcutil v0.0.0-20190207003914-4c204d697803/go.mod h1:+5NJ2+qvTyV9exUAL/rxXi3DcLg2Ts+ymUAY5y4NvMg= -github.com/btcsuite/btcutil v0.0.0-20190425235716-9e5f4b9a998d/go.mod h1:+5NJ2+qvTyV9exUAL/rxXi3DcLg2Ts+ymUAY5y4NvMg= -github.com/btcsuite/btcutil v1.0.2/go.mod h1:j9HUFwoQRsZL3V4n+qG+CUnEGHOarIxfC3Le2Yhbcts= -github.com/btcsuite/go-socks v0.0.0-20170105172521-4720035b7bfd/go.mod h1:HHNXQzUsZCxOoE+CPiyCTO6x34Zs86zZUiwtpXoGdtg= -github.com/btcsuite/goleveldb v0.0.0-20160330041536-7834afc9e8cd/go.mod h1:F+uVaaLLH7j4eDXPRvw78tMflu7Ie2bzYOH4Y8rRKBY= -github.com/btcsuite/goleveldb v1.0.0/go.mod h1:QiK9vBlgftBg6rWQIj6wFzbPfRjiykIEhBH4obrXJ/I= -github.com/btcsuite/snappy-go v0.0.0-20151229074030-0bdef8d06723/go.mod h1:8woku9dyThutzjeg+3xrA5iCpBRH8XEEg3lh6TiUghc= -github.com/btcsuite/snappy-go v1.0.0/go.mod h1:8woku9dyThutzjeg+3xrA5iCpBRH8XEEg3lh6TiUghc= -github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792/go.mod h1:ghJtEyQwv5/p4Mg4C0fgbePVuGr935/5ddU9Z3TmDRY= -github.com/btcsuite/winsvc v1.0.0/go.mod h1:jsenWakMcC0zFBFurPLEAyrnc/teJEM1O46fmI40EZs= +github.com/btcsuite/btcd/btcec/v2 v2.3.4 h1:3EJjcN70HCu/mwqlUsGK8GcNVyLVxFDlWurTXGPFfiQ= +github.com/btcsuite/btcd/btcec/v2 v2.3.4/go.mod h1:zYzJ8etWJQIv1Ogk7OzpWjowwOdXY1W/17j2MW85J04= github.com/buger/jsonparser v0.0.0-20181115193947-bf1c66bbce23/go.mod h1:bbYlZJ7hK1yFx9hf58LP0zeX7UjIGs20ufpu3evjr+s= +github.com/bytedance/sonic v1.11.5 h1:G00FYjjqll5iQ1PYXynbg/hyzqBqavH8Mo9/oTopd9k= +github.com/bytedance/sonic v1.11.5/go.mod h1:X2PC2giUdj/Cv2lliWFLk6c/DUQok5rViJSemeB0wDw= +github.com/bytedance/sonic/loader v0.1.0 h1:skjHJ2Bi9ibbq3Dwzh1w42MQ7wZJrXmEZr/uqUn3f0Q= +github.com/bytedance/sonic/loader v0.1.0/go.mod h1:UmRT+IRTGKz/DAkzcEGzyVqQFJ7H9BqwBO3pm9H/+HY= github.com/casbin/casbin/v2 v2.1.2/go.mod h1:YcPU1XXisHhLzuxH9coDNf2FbKpjGlbCg3n9yuLkIJQ= -github.com/cenkalti/backoff v2.2.1+incompatible h1:tNowT99t7UNflLxfYYSlKYsBpXdEet03Pg2g16Swow4= github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= github.com/cenkalti/backoff/v4 v4.1.1/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= -github.com/cenkalti/backoff/v4 v4.1.3 h1:cFAlzYUlVYDysBEH2T5hyJZMh3+5+WCBvSnK6Q8UtC4= -github.com/cenkalti/backoff/v4 v4.1.3/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= +github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8= +github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/cespare/cp v0.1.0/go.mod h1:SOGHArjBr4JWaSDEVpWpo/hNg6RoKrls6Oh40hiwW+s= github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= -github.com/cespare/xxhash/v2 v2.0.1-0.20190104013014-3767db7a7e18/go.mod h1:HD5P3vAIAh+Y2GAxg0PrPN1P8WkepXGpjbUPDHJqqKM= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/cheekybits/genny v1.0.0/go.mod h1:+tQajlRqAUrPI7DOSpB0XAqZYtQakVtB7wXkRAgjxjQ= +github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= +github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/chengxilo/virtualterm v1.0.4 h1:Z6IpERbRVlfB8WkOmtbHiDbBANU7cimRIof7mk9/PwM= +github.com/chengxilo/virtualterm v1.0.4/go.mod h1:DyxxBZz/x1iqJjFxTFcr6/x+jSpqN0iwWCOK1q10rlY= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= github.com/cilium/ebpf v0.2.0/go.mod h1:To2CFviqOWL/M0gIMsvSMlqe7em/l1ALkX1PyjrX2Qs= github.com/clbanning/x2j v0.0.0-20191024224557-825249438eec/go.mod h1:jMjuTZXRI4dUb/I5gc9Hdhagfvm9+RyrPryS/auMzxE= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/cloudflare/cloudflare-go v0.10.2-0.20190916151808-a80f83b9add9/go.mod h1:1MxXX1Ux4x6mqPmjkUgTP1CdXIBXKX7T+Jk9Gxrmx+U= +github.com/cloudwego/base64x v0.1.3 h1:b5J/l8xolB7dyDTTmhJP2oTs5LdrjyrUFuNxdfq5hAg= +github.com/cloudwego/base64x v0.1.3/go.mod h1:1+1K5BUHIQzyapgpF7LwvOGAEDicKtt1umPV+aN8pi8= +github.com/cloudwego/iasm v0.2.0 h1:1KNIy1I1H9hNNFEEH3DVnI4UujN+1zjpuk6gwHLTssg= +github.com/cloudwego/iasm v0.2.0/go.mod h1:8rXZaNYT2n95jn+zTI1sDr+IgcD2GVs0nlbbQPiEFhY= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= @@ -231,11 +232,37 @@ github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWH github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20250501225837-2ac532fd4443 h1:aQ3y1lwWyqYPiWZThqv1aFbZMiM9vblcSArJRf2Irls= +github.com/cncf/xds/go v0.0.0-20250501225837-2ac532fd4443/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8= +github.com/cockroachdb/crlib v0.0.0-20241015224233-894974b3ad94 h1:bvJv505UUfjzbaIPdNS4AEkHreDqQk6yuNpsdRHpwFA= +github.com/cockroachdb/crlib v0.0.0-20241015224233-894974b3ad94/go.mod h1:Gq51ZeKaFCXk6QwuGM0w1dnaOqc/F5zKT2zA9D6Xeac= github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= +github.com/cockroachdb/datadriven v1.0.3-0.20240530155848-7682d40af056 h1:slXychO2uDM6hYRu4c0pD0udNI8uObfeKN6UInWViS8= +github.com/cockroachdb/datadriven v1.0.3-0.20240530155848-7682d40af056/go.mod h1:a9RdTaap04u637JoCzcUoIcDmvwSUtcUFtT/C3kJlTU= +github.com/cockroachdb/errors v1.11.3 h1:5bA+k2Y6r+oz/6Z/RFlNeVCesGARKuC6YymtcDrbC/I= +github.com/cockroachdb/errors v1.11.3/go.mod h1:m4UIW4CDjx+R5cybPsNrRbreomiFqt8o1h1wUVazSd8= +github.com/cockroachdb/fifo v0.0.0-20240606204812-0bbfbd93a7ce h1:giXvy4KSc/6g/esnpM7Geqxka4WSqI1SZc7sMJFd3y4= +github.com/cockroachdb/fifo v0.0.0-20240606204812-0bbfbd93a7ce/go.mod h1:9/y3cnZ5GKakj/H4y9r9GTjCvAFta7KLgSHPJJYc52M= +github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b h1:r6VH0faHjZeQy818SGhaone5OnYfxFR/+AzdY3sf5aE= +github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b/go.mod h1:Vz9DsVWQQhf3vs21MhPMZpMGSht7O/2vFW2xusFUVOs= +github.com/cockroachdb/metamorphic v0.0.0-20231108215700-4ba948b56895 h1:XANOgPYtvELQ/h4IrmPAohXqe2pWA8Bwhejr3VQoZsA= +github.com/cockroachdb/metamorphic v0.0.0-20231108215700-4ba948b56895/go.mod h1:aPd7gM9ov9M8v32Yy5NJrDyOcD8z642dqs+F0CeNXfA= +github.com/cockroachdb/pebble v1.1.5 h1:5AAWCBWbat0uE0blr8qzufZP5tBjkRyy/jWe1QWLnvw= +github.com/cockroachdb/pebble v1.1.5/go.mod h1:17wO9el1YEigxkP/YtV8NtCivQDgoCyBg5c4VR/eOWo= +github.com/cockroachdb/pebble/v2 v2.0.6 h1:eL54kX2AKp1ePJ/8vq4IO3xIEPpvVjlSP12dlLYilyE= +github.com/cockroachdb/pebble/v2 v2.0.6/go.mod h1:un1DXG73PKw3F7Ndd30YactyvsFviI9Fuhe0tENdnyA= +github.com/cockroachdb/redact v1.1.5 h1:u1PMllDkdFfPWaNGMyLD1+so+aq3uUItthCFqzwPJ30= +github.com/cockroachdb/redact v1.1.5/go.mod h1:BVNblN9mBWFyMyqK1k3AAiSxhvhfK2oOZZ2lK+dpvRg= +github.com/cockroachdb/swiss v0.0.0-20250624142022-d6e517c1d961 h1:Nua446ru3juLHLZd4AwKNzClZgL1co3pUPGv3o8FlcA= +github.com/cockroachdb/swiss v0.0.0-20250624142022-d6e517c1d961/go.mod h1:yBRu/cnL4ks9bgy4vAASdjIW+/xMlFwuHKqtmh3GZQg= +github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06 h1:zuQyyAKVxetITBuuhv3BI9cMrmStnpT18zmgmTxunpo= +github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06/go.mod h1:7nc4anLGjupUW/PeY5qiNYsdNXj7zopG+eqsS7To5IQ= github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI= +github.com/consensys/gnark-crypto v0.18.0 h1:vIye/FqI50VeAr0B3dx+YjeIvmc3LWz4yEfbWBpTUf0= +github.com/consensys/gnark-crypto v0.18.0/go.mod h1:L3mXGFTe1ZN+RSJ+CLjUt9x7PNdx8ubaYfDROyp2Z8c= github.com/containerd/cgroups v0.0.0-20201119153540-4cbc285b3327/go.mod h1:ZJeTFisyysqgcCdecO57Dj79RfL0LNeGiFUqLYQRYLE= -github.com/containerd/cgroups v1.0.4 h1:jN/mbWBEaz+T1pi5OFtnkQ+8qnmEbAr1Oo1FRm5B0dA= -github.com/containerd/cgroups v1.0.4/go.mod h1:nLNQtsF7Sl2HxNebu77i1R0oDlhiTG+kO4JTrUzo6IA= +github.com/containerd/cgroups v1.1.0 h1:v8rEWFl6EoqHB+swVNjVoCJE8o3jX7e8nqBGPLaDFBM= +github.com/containerd/cgroups v1.1.0/go.mod h1:6ppBcbh/NOOUU+dMKrykgaBnK9lCIBxHqJDGwsa1mIw= github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= @@ -254,46 +281,41 @@ github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfc github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= -github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= +github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= +github.com/crate-crypto/go-eth-kzg v1.3.0 h1:05GrhASN9kDAidaFJOda6A4BEvgvuXbazXg/0E3OOdI= +github.com/crate-crypto/go-eth-kzg v1.3.0/go.mod h1:J9/u5sWfznSObptgfa92Jq8rTswn6ahQWEuiLHOjCUI= +github.com/crate-crypto/go-ipa v0.0.0-20240724233137-53bbb0ceb27a h1:W8mUrRp6NOVl3J+MYp5kPMoUZPp7aOYHtaua31lwRHg= +github.com/crate-crypto/go-ipa v0.0.0-20240724233137-53bbb0ceb27a/go.mod h1:sTwzHBvIzm2RfVCGNEBZgRyjwK40bVoun3ZnGOCafNM= github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/cskr/pubsub v1.0.2 h1:vlOzMhl6PFn60gRlTQQsIfVwaPB/B/8MziK8FhEPt/0= github.com/cskr/pubsub v1.0.2/go.mod h1:/8MzYXk/NJAz782G8RPkFzXTZVu63VotefPnR9TIRis= -github.com/davecgh/go-spew v0.0.0-20171005155431-ecdeabc65495/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davidlazar/go-crypto v0.0.0-20170701192655-dcfb0a7ac018/go.mod h1:rQYf4tfk5sSwFsnDg3qYaBxSjsD9S8+59vW0dKUgme4= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c h1:pFUpOrbxDR6AkioZ1ySsx5yxlDQZ8stG2b88gTPxgJU= github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c/go.mod h1:6UhI8N9EjYm1c2odKpFpAYeR8dsBeM7PtzQhRgxRr9U= -github.com/deckarep/golang-set v0.0.0-20180603214616-504e848d77ea/go.mod h1:93vsz/8Wt4joVM7c2AVqh+YRMiUSc14yDtF28KmMOgQ= -github.com/decred/dcrd/crypto/blake256 v1.0.0 h1:/8DMNYp9SGi5f0w7uCm6d6M4OU2rGFK09Y2A4Xv7EE0= -github.com/decred/dcrd/dcrec/secp256k1/v4 v4.1.0 h1:HbphB4TFFXpv7MNrT52FGrrgVXF1owhMVTHFZIlnvd4= -github.com/decred/dcrd/dcrec/secp256k1/v4 v4.1.0/go.mod h1:DZGJHZMqrU4JJqFAWUS2UO1+lbSKsdiOoYi9Zzey7Fc= -github.com/decred/dcrd/lru v1.0.0/go.mod h1:mxKOwFd7lFjN2GZYsiz/ecgqR6kkYAl+0pz0tEMk218= +github.com/deckarep/golang-set/v2 v2.6.0 h1:XfcQbWM1LlMB8BsJ8N9vW5ehnnPVIw0je80NsVHagjM= +github.com/deckarep/golang-set/v2 v2.6.0/go.mod h1:VAky9rY/yGXJOLEDv3OMci+7wtDpOF4IN+y82NBOac4= +github.com/decred/dcrd/crypto/blake256 v1.0.1 h1:7PltbUIQB7u/FfZ39+DGa/ShuMyJ5ilcvdfma9wOH6Y= +github.com/decred/dcrd/crypto/blake256 v1.0.1/go.mod h1:2OfgNZ5wDpcsFmHmCK5gZTPcCXqlm2ArzUIkw9czNJo= +github.com/decred/dcrd/dcrec/secp256k1/v4 v4.3.0 h1:rpfIENRNNilwHwZeG5+P150SMrnNEcHYvcCuK6dPZSg= +github.com/decred/dcrd/dcrec/secp256k1/v4 v4.3.0/go.mod h1:v57UDF4pDQJcEfFUCRop3lJL149eHGSe9Jvczhzjo/0= github.com/desertbit/timer v0.0.0-20180107155436-c41aec40b27f h1:U5y3Y5UE0w7amNe7Z5G/twsBW0KEalRQXZzf8ufSh9I= github.com/desertbit/timer v0.0.0-20180107155436-c41aec40b27f/go.mod h1:xH/i4TFMt8koVQZ6WFms69WAsDWr2XsYL3Hkl7jkoLE= -github.com/dgraph-io/badger v1.5.5-0.20190226225317-8115aed38f8f/go.mod h1:VZxzAIRPHRVNRKRo6AXrX9BJegn6il06VMTZVJYCIjQ= -github.com/dgraph-io/badger v1.6.0-rc1/go.mod h1:zwt7syl517jmP8s94KqSxTlM6IMsdhYy6psNgSztDR4= -github.com/dgraph-io/badger v1.6.0/go.mod h1:zwt7syl517jmP8s94KqSxTlM6IMsdhYy6psNgSztDR4= -github.com/dgraph-io/badger v1.6.1/go.mod h1:FRmFw3uxvcpa8zG3Rxs0th+hCLIuaQg8HlNV5bjgnuU= -github.com/dgraph-io/badger/v2 v2.2007.3/go.mod h1:26P/7fbL4kUZVEVKLAKXkBXKOydDmM2p1e+NhhnBCAE= github.com/dgraph-io/badger/v2 v2.2007.4 h1:TRWBQg8UrlUhaFdco01nO2uXwzKS7zd+HVdwV/GHc4o= github.com/dgraph-io/badger/v2 v2.2007.4/go.mod h1:vSw/ax2qojzbN6eXHIx6KPKtCSHJN/Uz0X0VPruTIhk= -github.com/dgraph-io/ristretto v0.0.2/go.mod h1:KPxhHT9ZxKefz+PCeOGsrHpl1qZ7i70dGTu2u+Ahh6E= -github.com/dgraph-io/ristretto v0.0.3-0.20200630154024-f66de99634de h1:t0UHb5vdojIDUqktM6+xJAfScFBsVpXZmqC9dsgJmeA= github.com/dgraph-io/ristretto v0.0.3-0.20200630154024-f66de99634de/go.mod h1:KPxhHT9ZxKefz+PCeOGsrHpl1qZ7i70dGTu2u+Ahh6E= +github.com/dgraph-io/ristretto v0.1.0 h1:Jv3CGQHp9OjuMBSne1485aDpUkTKEcUqF+jm/LuerPI= +github.com/dgraph-io/ristretto v0.1.0/go.mod h1:fux0lOrBhrVCJd3lcTHsIJhq1T2rokOu6v9Vcb3Q9ug= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= -github.com/dgryski/go-farm v0.0.0-20190104051053-3adb47b1fb0f/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2 h1:tdlZCpZ/P9DhczCTSixgIKmwPv6+wP5DGjqLYw5SUiA= github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= -github.com/dlclark/regexp2 v1.2.0/go.mod h1:2pZnwuY/m+8K6iRw6wQdMtk+rH5tNGR1i55kozfMjCc= -github.com/docker/docker v1.4.2-0.20180625184442-8e610b2b55bf/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= -github.com/dop251/goja v0.0.0-20200219165308-d1232e640a87/go.mod h1:Mw6PkjjMXWbTj+nnj4s3QPXq1jaT0s5pC0iFD4+BOAA= github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= @@ -301,14 +323,14 @@ github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+m github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU= github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I= -github.com/edsrzf/mmap-go v0.0.0-20160512033002-935e0e8a636c/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M= github.com/edsrzf/mmap-go v1.0.0/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M= github.com/ef-ds/deque v1.0.4 h1:iFAZNmveMT9WERAkqLJ+oaABF9AcVQ5AjXem/hroniI= github.com/ef-ds/deque v1.0.4/go.mod h1:gXDnTC3yqvBcHbq2lcExjtAcVrOnJCbMcZXmuj8Z4tg= -github.com/elastic/gosigar v0.8.1-0.20180330100440-37f05ff46ffa/go.mod h1:cdorVVzy1fhmEqmtgqkoE3bYtCfSCkVyjTyCIo22xvs= github.com/elastic/gosigar v0.12.0/go.mod h1:iXRIGg2tLnu7LBdpqzyQfGDEidKCfWcCMS0WKyPWoMs= -github.com/elastic/gosigar v0.14.2 h1:Dg80n8cr90OZ7x+bAax/QjoW/XqTI11RmA79ZwIm9/4= -github.com/elastic/gosigar v0.14.2/go.mod h1:iXRIGg2tLnu7LBdpqzyQfGDEidKCfWcCMS0WKyPWoMs= +github.com/elastic/gosigar v0.14.3 h1:xwkKwPia+hSfg9GqrCUKYdId102m9qTJIIr7egmK/uo= +github.com/elastic/gosigar v0.14.3/go.mod h1:iXRIGg2tLnu7LBdpqzyQfGDEidKCfWcCMS0WKyPWoMs= +github.com/emicklei/dot v1.6.2 h1:08GN+DD79cy/tzN6uLCT84+2Wk9u+wvqP+Hkx/dIR8A= +github.com/emicklei/dot v1.6.2/go.mod h1:DeV7GvQtIw4h2u73RKBkkFdvVAz0D9fzeJrgPW6gy/s= github.com/envoyproxy/go-control-plane v0.6.9/go.mod h1:SBwIajubJHhxtWwsL9s8ss4safvEdbitLhGGK48rN6g= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= @@ -319,52 +341,75 @@ github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.m github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE= +github.com/envoyproxy/go-control-plane v0.13.4 h1:zEqyPVyku6IvWCFwux4x9RxkLOMUL+1vC9xUFv5l2/M= +github.com/envoyproxy/go-control-plane v0.13.4/go.mod h1:kDfuBlDVsSj2MjrLEtRWtHlsWIFcGyB2RMO44Dc5GZA= +github.com/envoyproxy/go-control-plane/envoy v1.32.4 h1:jb83lalDRZSpPWW2Z7Mck/8kXZ5CQAFYVjQcdVIr83A= +github.com/envoyproxy/go-control-plane/envoy v1.32.4/go.mod h1:Gzjc5k8JcJswLjAx1Zm+wSYE20UrLtt7JZMWiWQXQEw= +github.com/envoyproxy/go-control-plane/ratelimit v0.1.0 h1:/G9QYbddjL25KvtKTv3an9lx6VBE2cnb8wp1vEGNYGI= +github.com/envoyproxy/go-control-plane/ratelimit v0.1.0/go.mod h1:Wk+tMFAFbCXaJPzVVHnPgRKdUdwW/KdbRt94AzgRee4= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/ethereum/go-ethereum v1.9.13 h1:rOPqjSngvs1VSYH2H+PMPiWt4VEulvNRbFgqiGqJM3E= -github.com/ethereum/go-ethereum v1.9.13/go.mod h1:qwN9d1GLyDh0N7Ab8bMGd0H9knaji2jOBm2RrMGjXls= -github.com/fatih/color v1.3.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= +github.com/envoyproxy/protoc-gen-validate v1.2.1 h1:DEo3O99U8j4hBFwbJfrz9VtgcDfUKS7KJ7spH3d86P8= +github.com/envoyproxy/protoc-gen-validate v1.2.1/go.mod h1:d/C80l/jxXLdfEIhX1W2TmLfsJ31lvEjwamM4DxlWXU= +github.com/ethereum/c-kzg-4844/v2 v2.1.0 h1:gQropX9YFBhl3g4HYhwE70zq3IHFRgbbNPw0Shwzf5w= +github.com/ethereum/c-kzg-4844/v2 v2.1.0/go.mod h1:TC48kOKjJKPbN7C++qIgt0TJzZ70QznYR7Ob+WXl57E= +github.com/ethereum/go-ethereum v1.16.3 h1:nDoBSrmsrPbrDIVLTkDQCy1U9KdHN+F2PzvMbDoS42Q= +github.com/ethereum/go-ethereum v1.16.3/go.mod h1:Lrsc6bt9Gm9RyvhfFK53vboCia8kpF9nv+2Ukntnl+8= +github.com/ethereum/go-verkle v0.2.2 h1:I2W0WjnrFUIzzVPwm8ykY+7pL2d4VhlsePn4j7cnFk8= +github.com/ethereum/go-verkle v0.2.2/go.mod h1:M3b90YRnzqKyyzBEWJGqj8Qff4IDeXnzFw0P9bFw3uk= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/felixge/fgprof v0.9.3 h1:VvyZxILNuCiUCSXtPtYmmtGvb65nqXh2QFWc0Wpf2/g= github.com/felixge/fgprof v0.9.3/go.mod h1:RdbpDgzqYVh/T9fPELJyV7EYJuHB55UTEULNun8eiPw= -github.com/fjl/memsize v0.0.0-20180418122429-ca190fb6ffbc/go.mod h1:VvhXpOYNQvB+uIk2RvXzuaQtkQJzzIx6lSBe1xv7hi0= +github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= +github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= +github.com/ferranbt/fastssz v0.1.4 h1:OCDB+dYDEQDvAgtAGnTSidK1Pe2tW3nFV40XyMkTeDY= +github.com/ferranbt/fastssz v0.1.4/go.mod h1:Ea3+oeoRGGLGm5shYAeDgu6PGUlcvQhE2fILyD9+tGg= +github.com/filecoin-project/go-clock v0.1.0 h1:SFbYIM75M8NnFm1yMHhN9Ahy3W5bEZV9gd6MPfXbKVU= +github.com/filecoin-project/go-clock v0.1.0/go.mod h1:4uB/O4PvOjlx1VCMdZ9MyDZXRm//gkj1ELEbxfI1AZs= github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc= -github.com/flynn/noise v0.0.0-20180327030543-2492fe189ae6/go.mod h1:1i71OnUq3iUe1ma7Lr6yG6/rjvM3emb6yoL7xLFzcVQ= -github.com/flynn/noise v1.0.0 h1:DlTHqmzmvcEiKj+4RYo/imoswx/4r6iBlCMfVtrMXpQ= -github.com/flynn/noise v1.0.0/go.mod h1:xbMo+0i6+IGbYdJhF31t2eR1BIU0CYc12+BNAKwUTag= -github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= +github.com/flynn/noise v1.1.0 h1:KjPQoQCEFdZDiP03phOvGi11+SVVhBG2wOWAorLsstg= +github.com/flynn/noise v1.1.0/go.mod h1:xbMo+0i6+IGbYdJhF31t2eR1BIU0CYc12+BNAKwUTag= github.com/francoispqt/gojay v1.2.13 h1:d2m3sFjloqoIUQU3TsHBgj6qg/BVGlTBeHDUmyJnXKk= github.com/francoispqt/gojay v1.2.13/go.mod h1:ehT5mTG4ua4581f1++1WLG0vPdaA9HaiDsoyrBGkyDY= github.com/franela/goblin v0.0.0-20200105215937-c9ffbefa60db/go.mod h1:7dvUGVsVBjqR7JHJk0brhHOZYGmfBYOrK0ZhYMEtBr4= github.com/franela/goreq v0.0.0-20171204163338-bcd34c9993f8/go.mod h1:ZhphrRTfi2rbfLwlschooIH4+wKKDR4Pdxhh+TRoA20= -github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k= -github.com/frankban/quicktest v1.14.0/go.mod h1:NeW+ay9A/U67EYXNFA1nPE8e/tnQv/09mUdL/ijj8og= -github.com/frankban/quicktest v1.14.3 h1:FJKSZTDHjyhriyC81FLQ0LY93eSai0ZyR/ZIkd3ZUKE= +github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8= +github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= -github.com/fsnotify/fsnotify v1.5.4 h1:jRbGcIw6P2Meqdwuo0H1p6JVLbL5DHKAKlYndzMwVZI= -github.com/fsnotify/fsnotify v1.5.4/go.mod h1:OVB6XrOHzAwXMpEM7uPOzcehqUV2UqJxmVXmkdnm1bU= -github.com/fxamacker/cbor/v2 v2.4.1-0.20220515183430-ad2eae63303f h1:dxTR4AaxCwuQv9LAVTAC2r1szlS+epeuPT5ClLKT6ZY= -github.com/fxamacker/cbor/v2 v2.4.1-0.20220515183430-ad2eae63303f/go.mod h1:TA1xS00nchWmaBnEIxPSE5oHLuJBAVvqrtAnWBwBCVo= +github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY= +github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw= +github.com/fxamacker/cbor/v2 v2.8.1-0.20250402194037-6f932b086829 h1:qOglMkJ5YBwog/GU/NXhP9gFqxUGMuqnmCkbj65JMhk= +github.com/fxamacker/cbor/v2 v2.8.1-0.20250402194037-6f932b086829/go.mod h1:vM4b+DJCtHn+zz7h3FFp/hDAI9WNWCsZj23V5ytsSxQ= github.com/fxamacker/circlehash v0.3.0 h1:XKdvTtIJV9t7DDUtsf0RIpC1OcxZtPbmgIH7ekx28WA= github.com/fxamacker/circlehash v0.3.0/go.mod h1:3aq3OfVvsWtkWMb6A1owjOQFA+TLsD5FgJflnaQwtMM= -github.com/gammazero/deque v0.1.0 h1:f9LnNmq66VDeuAlSAapemq/U7hJ2jpIWa4c09q8Dlik= -github.com/gammazero/deque v0.1.0/go.mod h1:KQw7vFau1hHuM8xmI9RbgKFbAsQFWmBpqQ2KenFLk6M= -github.com/gammazero/workerpool v1.1.2 h1:vuioDQbgrz4HoaCi2q1HLlOXdpbap5AET7xu5/qj87g= -github.com/gammazero/workerpool v1.1.2/go.mod h1:UelbXcO0zCIGFcufcirHhq2/xtLXJdQ29qZNlXG9OjQ= -github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff/go.mod h1:x7DCsMOv1taUwEWCzT4cmDeAkigA5/QCwUodaVOe8Ww= -github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk= +github.com/fxamacker/golang-lru/v2 v2.0.0-20250716153046-22c8d17dc4ee h1:9RFHOj6xUdQRi1lz/BJXwi0IloXtv6Y2tp7rdSC7SQk= +github.com/fxamacker/golang-lru/v2 v2.0.0-20250716153046-22c8d17dc4ee/go.mod h1:1FYBKLDzpfjjoWMTK1cIOxsTomg/n35DWNLu6FoYEb8= +github.com/gabriel-vasile/mimetype v1.4.6 h1:3+PzJTKLkvgjeTbts6msPJt4DixhT4YtFNf1gtGe3zc= +github.com/gabriel-vasile/mimetype v1.4.6/go.mod h1:JX1qVKqZd40hUPpAfiNTe0Sne7hdfKSbOqqmkq8GCXc= +github.com/gammazero/deque v1.0.0 h1:LTmimT8H7bXkkCy6gZX7zNLtkbz4NdS2z8LZuor3j34= +github.com/gammazero/deque v1.0.0/go.mod h1:iflpYvtGfM3U8S8j+sZEKIak3SAKYpA5/SQewgfXDKo= +github.com/gammazero/workerpool v1.1.3 h1:WixN4xzukFoN0XSeXF6puqEqFTl2mECI9S6W44HWy9Q= +github.com/gammazero/workerpool v1.1.3/go.mod h1:wPjyBLDbyKnUn2XwwyD3EEwo9dHutia9/fwNmSHWACc= +github.com/getsentry/sentry-go v0.27.0 h1:Pv98CIbtB3LkMWmXi4Joa5OOcwbmnX88sF5qbK3r3Ps= +github.com/getsentry/sentry-go v0.27.0/go.mod h1:lc76E2QywIyW8WuBnwl8Lc4bkmQH4+w1gwTf25trprY= +github.com/ghemawat/stream v0.0.0-20171120220530-696b145b53b9 h1:r5GgOLGbza2wVHRzK7aAj6lWZjfbAwiu/RDCVOKjRyM= +github.com/ghemawat/stream v0.0.0-20171120220530-696b145b53b9/go.mod h1:106OIgooyS7OzLDOpUGgm9fA3bQENb/cFSyyBmMoJDs= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/gin-contrib/sse v0.1.0 h1:Y/yl/+YNO8GZSjAhjMsSuLt29uWRFHdHYUb5lYOV9qE= github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI= github.com/gin-gonic/gin v1.5.0/go.mod h1:Nd6IXA8m5kNZdNEHMBd93KT+mdY3+bewLgRvmCsR2Do= github.com/gin-gonic/gin v1.6.3/go.mod h1:75u5sXoLsGZoRN5Sgbi1eraJ4GU3++wFwWzhwvtwp4M= -github.com/gin-gonic/gin v1.7.4 h1:QmUZXrvJ9qZ3GfWvQ+2wnW/1ePrTEJqPKMYEU3lD/DM= +github.com/gin-gonic/gin v1.9.1 h1:4idEAncQnU5cB7BeOkPtxjfCSye0AAm1R0RVIqJ+Jmg= +github.com/gin-gonic/gin v1.9.1/go.mod h1:hPrL7YrpYKXt5YId3A/Tnip5kqbEAP+KLuI3SUcPTeU= github.com/gliderlabs/ssh v0.1.1/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0= -github.com/go-check/check v0.0.0-20180628173108-788fd7840127/go.mod h1:9ES+weclKsC9YodN5RgxqK/VD9HM9JsCSh7rNhMZE98= github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q= +github.com/go-errors/errors v1.4.2 h1:J6MZopCL4uSllY1OfXM374weqZFFItUbrImctkmUxIA= +github.com/go-errors/errors v1.4.2/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-jose/go-jose/v4 v4.1.1 h1:JYhSgy4mXXzAdF3nUx3ygx347LRXJRrpgyU3adRmkAI= +github.com/go-jose/go-jose/v4 v4.1.1/go.mod h1:BdsZGqgdO3b6tTc6LSE56wcDbMMLuPsw5d4ZD5f94kA= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.10.0/go.mod h1:xUsJbQ/Fp4kEt7AFgCuvyX4a71u8h9jB8tj/ORgOZ7o= @@ -375,58 +420,63 @@ github.com/go-kit/log v0.2.1/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBj github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= -github.com/go-logfmt/logfmt v0.5.1 h1:otpy5pqBCBZ1ng9RQ0dPu4PN7ba75Y/aA+UpowDyNVA= -github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= +github.com/go-logfmt/logfmt v0.6.0 h1:wGYYu3uicYdqXVgoYbvnkrPVXkuLM1p1ifugDMEdRi4= +github.com/go-logfmt/logfmt v0.6.0/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0= -github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI= +github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= -github.com/go-ole/go-ole v1.2.1/go.mod h1:7FAglXiTm7HKlQRDeOQ6ZNUHidzCWXuZWq/1dTyBNF8= -github.com/go-ole/go-ole v1.2.6 h1:/Fpf6oFPoeFik9ty7siob0G6Ke8QvQEuVcuChpwXzpY= +github.com/go-ole/go-ole v1.2.5/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= +github.com/go-ole/go-ole v1.3.0 h1:Dt6ye7+vXGIKZ7Xtk4s6/xVdGDQynvom7xCFEdWr6uE= +github.com/go-ole/go-ole v1.3.0/go.mod h1:5LS6F96DhAwUc7C+1HLexzMXY1xGRSryjyPPKW6zv78= github.com/go-playground/assert/v2 v2.0.1/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4= +github.com/go-playground/assert/v2 v2.2.0 h1:JvknZsQTYeFEAhQwI4qEt9cyV5ONwRHC+lYKSsYSR8s= +github.com/go-playground/assert/v2 v2.2.0/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4= github.com/go-playground/locales v0.12.1/go.mod h1:IUMDtCfWo/w/mtMfIE/IG2K+Ey3ygWanZIBtBW0W2TM= -github.com/go-playground/locales v0.13.0 h1:HyWk6mgj5qFqCT5fjGBuRArbVDfE4hi8+e8ceBS/t7Q= github.com/go-playground/locales v0.13.0/go.mod h1:taPMhCMXrRLJO55olJkUXHZBHCxTMfnGwq/HNwmWNS8= +github.com/go-playground/locales v0.14.1 h1:EWaQ/wswjilfKLTECiXz7Rh+3BjFhfDFKv/oXslEjJA= +github.com/go-playground/locales v0.14.1/go.mod h1:hxrqLVvrK65+Rwrd5Fc6F2O76J/NuW9t0sjnWqG1slY= github.com/go-playground/universal-translator v0.16.0/go.mod h1:1AnU7NaIRDWWzGEKwgtJRd2xk99HeFyHw3yid4rvQIY= -github.com/go-playground/universal-translator v0.17.0 h1:icxd5fm+REJzpZx7ZfpaD876Lmtgy7VtROAbHHXk8no= github.com/go-playground/universal-translator v0.17.0/go.mod h1:UkSxE5sNxxRwHyU+Scu5vgOQjsIJAF8j9muTVoKLVtA= +github.com/go-playground/universal-translator v0.18.1 h1:Bcnm0ZwsGyWbCzImXv+pAJnYK9S473LQFuzCbDbfSFY= +github.com/go-playground/universal-translator v0.18.1/go.mod h1:xekY+UJKNuX9WP91TpwSH2VMlDf28Uj24BCp08ZFTUY= github.com/go-playground/validator/v10 v10.2.0/go.mod h1:uOYAAleCW8F/7oMFd6aG0GOhaH6EGOAJShg8Id5JGkI= -github.com/go-playground/validator/v10 v10.4.1 h1:pH2c5ADXtd66mxoE0Zm9SUhxE20r7aM3F26W0hOn+GE= -github.com/go-sourcemap/sourcemap v2.1.2+incompatible/go.mod h1:F8jJfvm2KbVjc5NqelyYJmf/v5J0dwNLS2mL4sNA1Jg= +github.com/go-playground/validator/v10 v10.19.0 h1:ol+5Fu+cSq9JD7SoSqe04GMI92cbn0+wvQ3bZ8b/AU4= +github.com/go-playground/validator/v10 v10.19.0/go.mod h1:dbuPbCMFw/DrkbEynArYaCwl3amGuJotoKCe95atGMM= github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= -github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0 h1:p104kn46Q8WdvHunIJ9dAyjPVtrBPhSr3KT2yUst43I= -github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= -github.com/go-test/deep v1.0.8 h1:TDsG77qcSprGbC6vTN8OuXp5g+J+b5Pcguhf7Zt61VM= -github.com/go-test/deep v1.0.8/go.mod h1:5C2ZWiW0ErCdrYzpqxLbTX7MG14M9iiw8DgHncVwcsE= +github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= +github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= +github.com/go-yaml/yaml v2.1.0+incompatible/go.mod h1:w2MrLa16VYP0jy6N7M5kHaCkaLENm+P+Tv+MfurjSw0= github.com/gobwas/httphead v0.0.0-20180130184737-2c6c146eadee h1:s+21KNqlpePfkah2I+gwHF8xmJWRjooY+5248k6m4A0= github.com/gobwas/httphead v0.0.0-20180130184737-2c6c146eadee/go.mod h1:L0fX3K22YWvt/FAX9NnzrNzcI4wNYi9Yku4O0LKYflo= github.com/gobwas/pool v0.2.0 h1:QEmUOlnSjWtnpRGHF3SauEiOsy82Cup83Vf2LcMlnc8= github.com/gobwas/pool v0.2.0/go.mod h1:q8bcK0KcYlCgd9e7WYLm9LpyS+YeLd8JVDW6WezmKEw= github.com/gobwas/ws v1.0.2 h1:CoAavW/wd/kulfZmSIBt6p24n4j7tHgNVCjsfHVNUbo= github.com/gobwas/ws v1.0.2/go.mod h1:szmBTxLgaFppYjEmNtny/v3w89xOydFnnZMcgRRu/EM= +github.com/goccy/go-json v0.10.4 h1:JSwxQzIqKfmFX1swYPpUThQZp/Ka4wzJdK0LWVytLPM= +github.com/goccy/go-json v0.10.4/go.mod h1:oq7eo15ShAhp70Anwd5lgX2pLfOS3QCiwU/PULtXL6M= github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/godbus/dbus/v5 v5.1.0 h1:4KLkAxT3aOY8Li4FRJe/KvhoNFFxo0m6fNuFUO8QJUk= github.com/godbus/dbus/v5 v5.1.0/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/gofrs/flock v0.12.1 h1:MTLVXXHf8ekldpJk3AKicLij9MdwOWkZ+a/jHHZby9E= +github.com/gofrs/flock v0.12.1/go.mod h1:9zxTsyu5xtJ9DK+1tFZyibEV7y3uwDxPPfbxeeHCoD0= github.com/gogo/googleapis v1.1.0/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= -github.com/gogo/protobuf v1.3.0/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= -github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/glog v1.0.0 h1:nfP3RFugxnNRyKgeWd4oI1nYvXpxrx8ck8ZrcizshdQ= -github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4= +github.com/golang/glog v1.2.5 h1:DrW6hGnjIhtvhOIiAKT6Psh/Kd/ldepEa81DKeiRJ5I= +github.com/golang/glog v1.2.5/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w= github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20191027212112-611e8accdfc9/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= @@ -443,9 +493,7 @@ github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3K github.com/golang/mock v1.6.0 h1:ErTB+efbowRARo13NNdxyJji2egdxLGQhRaY+DUumQc= github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.0/go.mod h1:Qd/q+1AKNOZr9uGQzbzCmRO6sUih6GTPZv6a1/R87v0= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.2-0.20190517061210-b285ee9cfc6c/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= @@ -460,13 +508,14 @@ github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM= -github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= +github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golang/snappy v0.0.5-0.20231225225746-43d5d4cd4e0e h1:4bw4WeyTYPp0smaXiJZCNnLrvVBqirQVreixayXezGc= +github.com/golang/snappy v0.0.5-0.20231225225746-43d5d4cd4e0e/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= @@ -483,12 +532,13 @@ github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= -github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= -github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= +github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ= github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/gopacket v1.1.17/go.mod h1:UdDNZ1OO62aGYVnPhxT1U6aI7ukYtA/kB8vaU0diBUM= +github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= +github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gopacket v1.1.19 h1:ves8RnFZPGiFnTS0uPQStjwru6uO6h+nlr9j6fL7kF8= github.com/google/gopacket v1.1.19/go.mod h1:iJ8V8n6KS+z2U1A8pUwu8bW5SyEMkXJB8Yo/Vo+TKTo= github.com/google/martian v2.1.0+incompatible h1:/CP5g8u/VJHijgedC/Legn3BAbAaWPgecwXBIDzw5no= @@ -496,7 +546,8 @@ github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXi github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= github.com/google/martian/v3 v3.2.1/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk= -github.com/google/martian/v3 v3.3.2 h1:IqNFLAmvJOgVlpdEBiQbDc2EwKW77amAycfTuWKdfvw= +github.com/google/martian/v3 v3.3.3 h1:DIhPTQrbPkgs2yJYdXU/eNACCG5DVQjySNRNlflZ9Fc= +github.com/google/martian/v3 v3.3.3/go.mod h1:iEPrYcgCF7jA9OtScMFQyAlZZ4YXTKEtJ1E6RWzmBA0= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= @@ -514,17 +565,18 @@ github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLe github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20211214055906-6f57359322fd/go.mod h1:KgnwoLYCZ8IQu3XUZ8Nc/bM9CCZFOyjUNOSygVozoDg= github.com/google/pprof v0.0.0-20220412212628-83db2b799d1f/go.mod h1:Pt31oes+eGImORns3McJn8zHefuQl2rG8l6xQjGYB4U= -github.com/google/pprof v0.0.0-20221219190121-3cb0bae90811 h1:wORs2YN3R3ona/CXYuTvLM31QlgoNKHvlCNuArCDDCU= -github.com/google/pprof v0.0.0-20221219190121-3cb0bae90811/go.mod h1:dDKJzRmX4S37WGHujM7tX//fmj1uioxKzKxz3lo4HJo= +github.com/google/pprof v0.0.0-20241210010833-40e02aabc2ad h1:a6HEuzUHeKH6hwfN/ZoQgRgVIWFJljSWa/zetS2WTvg= +github.com/google/pprof v0.0.0-20241210010833-40e02aabc2ad/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/google/s2a-go v0.1.9 h1:LGD7gtMgezd8a/Xak7mEWL0PjoTQFvpRudN895yqKW0= +github.com/google/s2a-go v0.1.9/go.mod h1:YA0Ei2ZQL3acow2O62kdp9UlnvMmU7kA6Eutn0dXayM= github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= -github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/googleapis/enterprise-certificate-proxy v0.2.3 h1:yk9/cqRKtT9wXZSsRH9aurXEpJX+U6FLtpYTdC3R06k= -github.com/googleapis/enterprise-certificate-proxy v0.2.3/go.mod h1:AwSRAtLfXpU5Nm3pW+v7rGDHp09LsPtGY9MduiEsR9k= +github.com/google/uuid v1.3.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/enterprise-certificate-proxy v0.3.6 h1:GW/XbdyBFQ8Qe+YAmFU9uHLo7OnF5tL52HFAgMmyrf4= +github.com/googleapis/enterprise-certificate-proxy v0.3.6/go.mod h1:MkHOF77EYAE7qfSuSS9PU6g4Nt4e11cnsDUowfwewLA= github.com/googleapis/gax-go v2.0.0+incompatible/go.mod h1:SFVmujtThgffbyetf+mdk2eWhX2bMyUtNHzFKcPA9HY= github.com/googleapis/gax-go/v2 v2.0.3/go.mod h1:LLvjysVCY1JZeum8Z6l8qUty8fiNwE08qbEPm1M08qg= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= @@ -534,43 +586,37 @@ github.com/googleapis/gax-go/v2 v2.1.1/go.mod h1:hddJymUZASv3XPyGkUpKj8pPO47Rmb0 github.com/googleapis/gax-go/v2 v2.2.0/go.mod h1:as02EH8zWkzwUoLbBaFeQ+arQaj/OthfcblKl4IGNaM= github.com/googleapis/gax-go/v2 v2.3.0/go.mod h1:b8LNqSzNabLiUpXKkY7HAR5jr6bIT99EXz9pXxye9YM= github.com/googleapis/gax-go/v2 v2.4.0/go.mod h1:XOTVJ59hdnfJLIP/dh8n5CGryZR2LxK9wbMD5+iXC6c= -github.com/googleapis/gax-go/v2 v2.7.1 h1:gF4c0zjUP2H/s/hEGyLA3I0fA2ZWjzYiONAD6cvPr8A= -github.com/googleapis/gax-go/v2 v2.7.1/go.mod h1:4orTrqY6hXxxaUL4LHIPl6lGo8vAE38/qKbhSAKP6QI= +github.com/googleapis/gax-go/v2 v2.15.0 h1:SyjDc1mGgZU5LncH8gimWo9lW1DtIfPibOG81vgd/bo= +github.com/googleapis/gax-go/v2 v2.15.0/go.mod h1:zVVkkxAQHa1RQpg9z2AUCMnKhi0Qld9rcmyfL1OZhoc= github.com/googleapis/go-type-adapters v1.0.0/go.mod h1:zHW75FOG2aur7gAO2B+MLby+cLsWGBF62rFAi7WjWO4= github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g= -github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/gopherjs/gopherjs v0.0.0-20190430165422-3e4dfb77656c h1:7lF+Vz0LqiRidnzC1Oq86fpX1q/iEv2KJdrCtttYjT4= +github.com/gopherjs/gopherjs v0.0.0-20190430165422-3e4dfb77656c/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= -github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= -github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= +github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY= +github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ= github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= -github.com/gorilla/websocket v1.4.1-0.20190629185528-ae1634f6a989/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= github.com/gorilla/websocket v1.4.1/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= -github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= -github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc= -github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= -github.com/graph-gophers/graphql-go v0.0.0-20191115155744-f33e81362277/go.mod h1:9CQHMSxwO4MprSdzoIEobiHpoLtHm77vfxsvsIN5Vuc= +github.com/gorilla/websocket v1.5.3 h1:saDtZ6Pbx/0u+bgYQ3q96pZgCzfhKXGPqt7kZ72aNNg= +github.com/gorilla/websocket v1.5.3/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= github.com/grpc-ecosystem/go-grpc-middleware v1.2.2/go.mod h1:EaizFBKfUKtMIF5iaDEhniwNedqGo9FuLFzppDr3uwI= -github.com/grpc-ecosystem/go-grpc-middleware/providers/zerolog/v2 v2.0.0-rc.2 h1:uxUHSMwWDJ/9jVPHNumRC8WZOi3hrBL22ObVOoLg4ww= -github.com/grpc-ecosystem/go-grpc-middleware/providers/zerolog/v2 v2.0.0-rc.2/go.mod h1:BL7w7qd2l/j9jgY6WMhYutfOFQc0I8RTVwtjpnAMoTM= -github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.0.0-20200501113911-9a95f0fdbfea h1:1Tk1IbruXbunEnaIZEFb+Hpv9BIZti3OxKwKn5wWyKk= -github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.0.0-20200501113911-9a95f0fdbfea/go.mod h1:GugMBs30ZSAkckqXEAIEGyYdDH6EgqowG8ppA3Zt+AY= +github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.1.0 h1:pRhl55Yx1eC7BZ1N+BBWwnKaMyD8uC+34TLdndZMAKk= +github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.1.0/go.mod h1:XKMd7iuf/RGPSMJ/U4HP0zS2Z9Fh8Ps9a+6X26m/tmI= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= github.com/grpc-ecosystem/grpc-gateway v1.5.0/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw= github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0 h1:BZHcxBETFHIdVyhyEfOvn/RdU/QGdLI4y34qQGjGWO0= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0/go.mod h1:hgWBS7lorOAVIJEQMi4ZsPv9hVvWI6+ch50m39Pf2Ks= -github.com/gxed/hashland/keccakpg v0.0.1/go.mod h1:kRzw3HkwxFU1mpmPP8v1WyQzwdGfmKFJ6tItnhQ67kU= -github.com/gxed/hashland/murmur3 v0.0.1/go.mod h1:KjXop02n4/ckmZSnY2+HKcLud/tcmvhST0bie/0lS48= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.22.0 h1:asbCHRVmodnJTuQ3qamDwqVOIjwqUPTYmYuemVOx+Ys= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.22.0/go.mod h1:ggCgvZ2r7uOoQjOyu2Y1NhHmEPPzzuhWgcza5M1Ji1I= github.com/hashicorp/consul/api v1.3.0/go.mod h1:MmDNSzIMUjNpY/mQ398R4bk2FnqQLoPndWW5VkKPlCE= github.com/hashicorp/consul/sdk v0.3.0/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= @@ -589,510 +635,213 @@ github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/b github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90= -github.com/hashicorp/golang-lru v0.0.0-20160813221303-0a025b7e63ad/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc= -github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= +github.com/hashicorp/golang-lru v1.0.2 h1:dV3g9Z/unq5DpblPpw+Oqcv4dU/1omnb4Ok8iPY6p1c= +github.com/hashicorp/golang-lru v1.0.2/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= +github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k= +github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM= github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ= github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= +github.com/holiman/bloomfilter/v2 v2.0.3 h1:73e0e/V0tCydx14a0SCYS/EWCxgwLZ18CZcZKVu0fao= +github.com/holiman/bloomfilter/v2 v2.0.3/go.mod h1:zpoh+gs7qcpqrHr3dB55AMiJwo0iURXE7ZOP9L9hSkA= +github.com/holiman/uint256 v1.3.2 h1:a9EgMPSC1AAaj1SZL5zIQD3WbwTuHrMGOerLjGmM/TA= +github.com/holiman/uint256 v1.3.2/go.mod h1:EOMSn4q6Nyt9P6efbI3bueV4e1b3dGlUCXeiRV4ng7E= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/huandu/go-assert v1.1.5 h1:fjemmA7sSfYHJD7CUqs9qTwwfdNAx7/j2/ZlHXzNB3c= +github.com/huandu/go-assert v1.1.5/go.mod h1:yOLvuqZwmcHIC5rIzrBhT7D3Q9c3GFnd0JrPVhn/06U= +github.com/huandu/go-clone v1.6.0 h1:HMo5uvg4wgfiy5FoGOqlFLQED/VGRm2D9Pi8g1FXPGc= +github.com/huandu/go-clone v1.6.0/go.mod h1:ReGivhG6op3GYr+UY3lS6mxjKp7MIGTknuU5TbTVaXE= +github.com/huandu/go-clone/generic v1.7.2 h1:47pQphxs1Xc9cVADjOHN+Bm5D0hNagwH9UXErbxgVKA= +github.com/huandu/go-clone/generic v1.7.2/go.mod h1:xgd9ZebcMsBWWcBx5mVMCoqMX24gLWr5lQicr+nVXNs= github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmKTg= -github.com/huin/goupnp v0.0.0-20161224104101-679507af18f3/go.mod h1:MZ2ZmwcBpvOoJ22IJsc7va19ZwoheaBk43rKg12SKag= -github.com/huin/goupnp v1.0.0/go.mod h1:n9v9KO1tAxYH82qOn+UTIFQDmx5n1Zxd/ClZDMX7Bnc= -github.com/huin/goupnp v1.0.3 h1:N8No57ls+MnjlB+JPiCVSOyy/ot7MJTqlo7rn+NYSqQ= -github.com/huin/goupnp v1.0.3/go.mod h1:ZxNlw5WqJj6wSsRK5+YfflQGXYfccj5VgQsMNixHM7Y= -github.com/huin/goutil v0.0.0-20170803182201-1ca381bf3150/go.mod h1:PpLOETDnJ0o3iZrZfqZzyLl6l7F3c6L1oWn7OICBi6o= +github.com/huin/goupnp v1.3.0 h1:UvLUlWDNpoUdYzb2TCn+MuTWtcjXKSza2n6CBdQ0xXc= +github.com/huin/goupnp v1.3.0/go.mod h1:gnGPsThkYa7bFi/KWmEysQRf48l2dvR5bxr2OFckNX8= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/ianlancetaylor/demangle v0.0.0-20210905161508-09a460cdf81d/go.mod h1:aYm2/VgdVmcIU8iMfdMvDMsRAQjcfZSKFby6HOFvi/w= github.com/improbable-eng/grpc-web v0.15.0 h1:BN+7z6uNXZ1tQGcNAuaU1YjsLTApzkjt2tzCixLaUPQ= github.com/improbable-eng/grpc-web v0.15.0/go.mod h1:1sy9HKV4Jt9aEs9JSnkWlRJPuPtwNr0l57L4f878wP8= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= -github.com/inconshreveable/mousetrap v1.0.1 h1:U3uMjPSQEBMNp1lFxmllqCPM6P5u/Xq7Pgzkat/bFNc= -github.com/inconshreveable/mousetrap v1.0.1/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= -github.com/influxdata/influxdb v1.2.3-0.20180221223340-01288bdb0883/go.mod h1:qZna6X/4elxqT3yI9iZYdZrWWdeFOOprn86kgg4+IzY= +github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= +github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/influxdata/influxdb1-client v0.0.0-20191209144304-8bf82d3c094d/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo= -github.com/ipfs/bbloom v0.0.1/go.mod h1:oqo8CVWsJFMOZqTglBG4wydCE4IQA/G2/SEofB0rjUI= github.com/ipfs/bbloom v0.0.4 h1:Gi+8EGJ2y5qiD5FbsbpX/TMNcJw8gSqr7eyjHa4Fhvs= github.com/ipfs/bbloom v0.0.4/go.mod h1:cS9YprKXpoZ9lT0n/Mw/a6/aFV6DTjTLYHeA+gyqMG0= -github.com/ipfs/go-bitswap v0.1.8/go.mod h1:TOWoxllhccevbWFUR2N7B1MTSVVge1s6XSMiCSA4MzM= -github.com/ipfs/go-bitswap v0.3.4/go.mod h1:4T7fvNv/LmOys+21tnLzGKncMeeXUYUd1nUiJ2teMvI= -github.com/ipfs/go-bitswap v0.5.0/go.mod h1:WwyyYD33RHCpczgHjpx+xjWYIy8l41K+l5EMy4/ctSM= -github.com/ipfs/go-bitswap v0.9.0 h1:/dZi/XhUN/aIk78pI4kaZrilUglJ+7/SCmOHWIpiy8E= -github.com/ipfs/go-bitswap v0.9.0/go.mod h1:zkfBcGWp4dQTQd0D0akpudhpOVUAJT9GbH9tDmR8/s4= -github.com/ipfs/go-block-format v0.0.1/go.mod h1:DK/YYcsSUIVAFNwo/KZCdIIbpN0ROH/baNLgayt4pFc= -github.com/ipfs/go-block-format v0.0.2/go.mod h1:AWR46JfpcObNfg3ok2JHDUfdiHRgWhJgCQF+KIgOPJY= -github.com/ipfs/go-block-format v0.0.3 h1:r8t66QstRp/pd/or4dpnbVfXT5Gt7lOqRvC+/dDTpMc= -github.com/ipfs/go-block-format v0.0.3/go.mod h1:4LmD4ZUw0mhO+JSKdpWwrzATiEfM7WWgQ8H5l6P8MVk= -github.com/ipfs/go-blockservice v0.1.4/go.mod h1:OTZhFpkgY48kNzbgyvcexW9cHrpjBYIjSR0KoDOFOLU= -github.com/ipfs/go-blockservice v0.2.0/go.mod h1:Vzvj2fAnbbyly4+T7D5+p9n3+ZKVHA2bRMMo1QoILtQ= -github.com/ipfs/go-blockservice v0.4.0 h1:7MUijAW5SqdsqEW/EhnNFRJXVF8mGU5aGhZ3CQaCWbY= -github.com/ipfs/go-blockservice v0.4.0/go.mod h1:kRjO3wlGW9mS1aKuiCeGhx9K1DagQ10ACpVO59qgAx4= -github.com/ipfs/go-cid v0.0.1/go.mod h1:GHWU/WuQdMPmIosc4Yn1bcCT7dSeX4lBafM7iqUPQvM= -github.com/ipfs/go-cid v0.0.2/go.mod h1:GHWU/WuQdMPmIosc4Yn1bcCT7dSeX4lBafM7iqUPQvM= -github.com/ipfs/go-cid v0.0.3/go.mod h1:GHWU/WuQdMPmIosc4Yn1bcCT7dSeX4lBafM7iqUPQvM= -github.com/ipfs/go-cid v0.0.4/go.mod h1:4LLaPOQwmk5z9LBgQnpkivrx8BJjUyGwTXCd5Xfj6+M= -github.com/ipfs/go-cid v0.0.5/go.mod h1:plgt+Y5MnOey4vO4UlUazGqdbEXuFYitED67FexhXog= -github.com/ipfs/go-cid v0.0.6/go.mod h1:6Ux9z5e+HpkQdckYoX1PG/6xqKspzlEIR5SDmgqgC/I= +github.com/ipfs/go-block-format v0.2.0 h1:ZqrkxBA2ICbDRbK8KJs/u0O3dlp6gmAuuXUJNiW1Ycs= +github.com/ipfs/go-block-format v0.2.0/go.mod h1:+jpL11nFx5A/SPpsoBn6Bzkra/zaArfSmsknbPMYgzM= github.com/ipfs/go-cid v0.0.7/go.mod h1:6Ux9z5e+HpkQdckYoX1PG/6xqKspzlEIR5SDmgqgC/I= -github.com/ipfs/go-cid v0.1.0/go.mod h1:rH5/Xv83Rfy8Rw6xG+id3DYAMUVmem1MowoKwdXmN2o= -github.com/ipfs/go-cid v0.3.2 h1:OGgOd+JCFM+y1DjWPmVH+2/4POtpDzwcr7VgnB7mZXc= -github.com/ipfs/go-cid v0.3.2/go.mod h1:gQ8pKqT/sUxGY+tIwy1RPpAojYu7jAyCp5Tz1svoupw= -github.com/ipfs/go-cidutil v0.0.2/go.mod h1:ewllrvrxG6AMYStla3GD7Cqn+XYSLqjK0vc+086tB6s= +github.com/ipfs/go-cid v0.4.1 h1:A/T3qGvxi4kpKWWcPC/PgbvDA2bjVLO7n4UeVwnbs/s= +github.com/ipfs/go-cid v0.4.1/go.mod h1:uQHwDeX4c6CtyrFwdqyhpNcxVewur1M7l7fNU7LKwZk= github.com/ipfs/go-cidutil v0.1.0 h1:RW5hO7Vcf16dplUU60Hs0AKDkQAVPVplr7lk97CFL+Q= github.com/ipfs/go-cidutil v0.1.0/go.mod h1:e7OEVBMIv9JaOxt9zaGEmAoSlXW9jdFZ5lP/0PwcfpA= -github.com/ipfs/go-datastore v0.0.1/go.mod h1:d4KVXhMt913cLBEI/PXAy6ko+W7e9AhyAKBGh803qeE= -github.com/ipfs/go-datastore v0.0.5/go.mod h1:d4KVXhMt913cLBEI/PXAy6ko+W7e9AhyAKBGh803qeE= -github.com/ipfs/go-datastore v0.1.1/go.mod h1:w38XXW9kVFNp57Zj5knbKWM2T+KOZCGDRVNdgPHtbHw= -github.com/ipfs/go-datastore v0.4.0/go.mod h1:SX/xMIKoCszPqp+z9JhPYCmoOoXTvaa13XEbGtsFUhA= -github.com/ipfs/go-datastore v0.4.1/go.mod h1:SX/xMIKoCszPqp+z9JhPYCmoOoXTvaa13XEbGtsFUhA= -github.com/ipfs/go-datastore v0.4.4/go.mod h1:SX/xMIKoCszPqp+z9JhPYCmoOoXTvaa13XEbGtsFUhA= -github.com/ipfs/go-datastore v0.4.5/go.mod h1:eXTcaaiN6uOlVCLS9GjJUJtlvJfM3xk23w3fyfrmmJs= -github.com/ipfs/go-datastore v0.5.0/go.mod h1:9zhEApYMTl17C8YDp7JmU7sQZi2/wqiYh73hakZ90Bk= -github.com/ipfs/go-datastore v0.5.1/go.mod h1:9zhEApYMTl17C8YDp7JmU7sQZi2/wqiYh73hakZ90Bk= -github.com/ipfs/go-datastore v0.6.0 h1:JKyz+Gvz1QEZw0LsX1IBn+JFCJQH4SJVFtM4uWU0Myk= -github.com/ipfs/go-datastore v0.6.0/go.mod h1:rt5M3nNbSO/8q1t4LNkLyUwRs8HupMeN/8O4Vn9YAT8= +github.com/ipfs/go-datastore v0.8.2 h1:Jy3wjqQR6sg/LhyY0NIePZC3Vux19nLtg7dx0TVqr6U= +github.com/ipfs/go-datastore v0.8.2/go.mod h1:W+pI1NsUsz3tcsAACMtfC+IZdnQTnC/7VfPoJBQuts0= github.com/ipfs/go-detect-race v0.0.1 h1:qX/xay2W3E4Q1U7d9lNs1sU9nvguX0a7319XbyQ6cOk= github.com/ipfs/go-detect-race v0.0.1/go.mod h1:8BNT7shDZPo99Q74BpGMK+4D8Mn4j46UU0LZ723meps= -github.com/ipfs/go-ds-badger v0.0.2/go.mod h1:Y3QpeSFWQf6MopLTiZD+VT6IC1yZqaGmjvRcKeSGij8= -github.com/ipfs/go-ds-badger v0.0.5/go.mod h1:g5AuuCGmr7efyzQhLL8MzwqcauPojGPUaHzfGTzuE3s= -github.com/ipfs/go-ds-badger v0.2.1/go.mod h1:Tx7l3aTph3FMFrRS838dcSJh+jjA7cX9DrGVwx/NOwE= -github.com/ipfs/go-ds-badger v0.2.3/go.mod h1:pEYw0rgg3FIrywKKnL+Snr+w/LjJZVMTBRn4FS6UHUk= -github.com/ipfs/go-ds-badger2 v0.1.3 h1:Zo9JicXJ1DmXTN4KOw7oPXkspZ0AWHcAFCP1tQKnegg= -github.com/ipfs/go-ds-badger2 v0.1.3/go.mod h1:TPhhljfrgewjbtuL/tczP8dNrBYwwk+SdPYbms/NO9w= -github.com/ipfs/go-ds-leveldb v0.0.1/go.mod h1:feO8V3kubwsEF22n0YRQCffeb79OOYIykR4L04tMOYc= -github.com/ipfs/go-ds-leveldb v0.4.1/go.mod h1:jpbku/YqBSsBc1qgME8BkWS4AxzF2cEu1Ii2r79Hh9s= -github.com/ipfs/go-ds-leveldb v0.4.2/go.mod h1:jpbku/YqBSsBc1qgME8BkWS4AxzF2cEu1Ii2r79Hh9s= -github.com/ipfs/go-fetcher v1.5.0 h1:oreKTKBzja3S09rSmoZlA3KGVlRiUbJ1pQjtB4K6y3w= -github.com/ipfs/go-fetcher v1.5.0/go.mod h1:5pDZ0393oRF/fHiLmtFZtpMNBQfHOYNPtryWedVuSWE= -github.com/ipfs/go-ipfs-blockstore v0.0.1/go.mod h1:d3WClOmRQKFnJ0Jz/jj/zmksX0ma1gROTlovZKBmN08= -github.com/ipfs/go-ipfs-blockstore v0.1.4/go.mod h1:Jxm3XMVjh6R17WvxFEiyKBLUGr86HgIYJW/D/MwqeYQ= -github.com/ipfs/go-ipfs-blockstore v0.2.0/go.mod h1:SNeEpz/ICnMYZQYr7KNZTjdn7tEPB/99xpe8xI1RW7o= -github.com/ipfs/go-ipfs-blockstore v1.2.0 h1:n3WTeJ4LdICWs/0VSfjHrlqpPpl6MZ+ySd3j8qz0ykw= -github.com/ipfs/go-ipfs-blockstore v1.2.0/go.mod h1:eh8eTFLiINYNSNawfZOC7HOxNTxpB1PFuA5E1m/7exE= github.com/ipfs/go-ipfs-blocksutil v0.0.1 h1:Eh/H4pc1hsvhzsQoMEP3Bke/aW5P5rVM1IWFJMcGIPQ= github.com/ipfs/go-ipfs-blocksutil v0.0.1/go.mod h1:Yq4M86uIOmxmGPUHv/uI7uKqZNtLb449gwKqXjIsnRk= -github.com/ipfs/go-ipfs-delay v0.0.0-20181109222059-70721b86a9a8/go.mod h1:8SP1YXK1M1kXuc4KJZINY3TQQ03J2rwBG9QfXmbRPrw= github.com/ipfs/go-ipfs-delay v0.0.1 h1:r/UXYyRcddO6thwOnhiznIAiSvxMECGgtv35Xs1IeRQ= github.com/ipfs/go-ipfs-delay v0.0.1/go.mod h1:8SP1YXK1M1kXuc4KJZINY3TQQ03J2rwBG9QfXmbRPrw= -github.com/ipfs/go-ipfs-ds-help v0.0.1/go.mod h1:gtP9xRaZXqIQRh1HRpp595KbBEdgqWFxefeVKOV8sxo= -github.com/ipfs/go-ipfs-ds-help v0.1.1/go.mod h1:SbBafGJuGsPI/QL3j9Fc5YPLeAu+SzOkI0gFwAg+mOs= -github.com/ipfs/go-ipfs-ds-help v1.1.0 h1:yLE2w9RAsl31LtfMt91tRZcrx+e61O5mDxFRR994w4Q= -github.com/ipfs/go-ipfs-ds-help v1.1.0/go.mod h1:YR5+6EaebOhfcqVCyqemItCLthrpVNot+rsOU/5IatU= -github.com/ipfs/go-ipfs-exchange-interface v0.0.1/go.mod h1:c8MwfHjtQjPoDyiy9cFquVtVHkO9b9Ob3FG91qJnWCM= -github.com/ipfs/go-ipfs-exchange-interface v0.1.0/go.mod h1:ych7WPlyHqFvCi/uQI48zLZuAWVP5iTQPXEfVaw5WEI= -github.com/ipfs/go-ipfs-exchange-interface v0.2.0 h1:8lMSJmKogZYNo2jjhUs0izT+dck05pqUw4mWNW9Pw6Y= -github.com/ipfs/go-ipfs-exchange-interface v0.2.0/go.mod h1:z6+RhJuDQbqKguVyslSOuVDhqF9JtTrO3eptSAiW2/Y= -github.com/ipfs/go-ipfs-exchange-offline v0.0.1/go.mod h1:WhHSFCVYX36H/anEKQboAzpUws3x7UeEGkzQc3iNkM0= -github.com/ipfs/go-ipfs-exchange-offline v0.1.0/go.mod h1:YdJXa+yPF1na+gfYHYejtLwHFpuKv22eatApNiSfanM= -github.com/ipfs/go-ipfs-exchange-offline v0.3.0 h1:c/Dg8GDPzixGd0MC8Jh6mjOwU57uYokgWRFidfvEkuA= -github.com/ipfs/go-ipfs-pq v0.0.1/go.mod h1:LWIqQpqfRG3fNc5XsnIhz/wQ2XXGyugQwls7BgUmUfY= -github.com/ipfs/go-ipfs-pq v0.0.2 h1:e1vOOW6MuOwG2lqxcLA+wEn93i/9laCY8sXAw76jFOY= -github.com/ipfs/go-ipfs-pq v0.0.2/go.mod h1:LWIqQpqfRG3fNc5XsnIhz/wQ2XXGyugQwls7BgUmUfY= -github.com/ipfs/go-ipfs-provider v0.7.0 h1:5GpHv46eIS8h2mbbKg1ckU5paajDYJtE4GA/SBepOQg= -github.com/ipfs/go-ipfs-provider v0.7.0/go.mod h1:mgjsWgDt9j19N1REPxRa31p+eRIQmjNt5McNdQQ5CsA= -github.com/ipfs/go-ipfs-routing v0.1.0/go.mod h1:hYoUkJLyAUKhF58tysKpids8RNDPO42BVMgK5dNsoqY= -github.com/ipfs/go-ipfs-routing v0.2.0/go.mod h1:384byD/LHKhAgKE3NmwOjXCpDzhczROMBzidoYV7tfM= -github.com/ipfs/go-ipfs-routing v0.2.1 h1:E+whHWhJkdN9YeoHZNj5itzc+OR292AJ2uE9FFiW0BY= -github.com/ipfs/go-ipfs-util v0.0.1/go.mod h1:spsl5z8KUnrve+73pOhSVZND1SIxPW5RyBCNzQxlJBc= -github.com/ipfs/go-ipfs-util v0.0.2 h1:59Sswnk1MFaiq+VcaknX7aYEyGyGDAA73ilhEK2POp8= -github.com/ipfs/go-ipfs-util v0.0.2/go.mod h1:CbPtkWJzjLdEcezDns2XYaehFVNXG9zrdrtMecczcsQ= -github.com/ipfs/go-ipld-format v0.3.0 h1:Mwm2oRLzIuUwEPewWAWyMuuBQUsn3awfFEYVb8akMOQ= -github.com/ipfs/go-ipld-format v0.3.0/go.mod h1:co/SdBE8h99968X0hViiw1MNlh6fvxxnHpvVLnH7jSM= -github.com/ipfs/go-ipns v0.2.0 h1:BgmNtQhqOw5XEZ8RAfWEpK4DhqaYiuP6h71MhIp7xXU= -github.com/ipfs/go-ipns v0.2.0/go.mod h1:3cLT2rbvgPZGkHJoPO1YMJeh6LtkxopCkKFcio/wE24= +github.com/ipfs/go-ipfs-pq v0.0.3 h1:YpoHVJB+jzK15mr/xsWC574tyDLkezVrDNeaalQBsTE= +github.com/ipfs/go-ipfs-pq v0.0.3/go.mod h1:btNw5hsHBpRcSSgZtiNm/SLj5gYIZ18AKtv3kERkRb4= +github.com/ipfs/go-ipfs-util v0.0.3 h1:2RFdGez6bu2ZlZdI+rWfIdbQb1KudQp3VGwPtdNCmE0= +github.com/ipfs/go-ipfs-util v0.0.3/go.mod h1:LHzG1a0Ig4G+iZ26UUOMjHd+lfM84LZCrn17xAKWBvs= +github.com/ipfs/go-ipld-format v0.6.0 h1:VEJlA2kQ3LqFSIm5Vu6eIlSxD/Ze90xtc4Meten1F5U= +github.com/ipfs/go-ipld-format v0.6.0/go.mod h1:g4QVMTn3marU3qXchwjpKPKgJv+zF+OlaKMyhJ4LHPg= github.com/ipfs/go-log v0.0.1/go.mod h1:kL1d2/hzSpI0thNYjiKfjanbVNU+IIGA/WnNESY9leM= -github.com/ipfs/go-log v1.0.2/go.mod h1:1MNjMxe0u6xvJZgeqbJ8vdo2TKaGwZ1a0Bpza+sr2Sk= -github.com/ipfs/go-log v1.0.3/go.mod h1:OsLySYkwIbiSUR/yBTdv1qPtcE4FW3WPWk/ewz9Ru+A= -github.com/ipfs/go-log v1.0.4/go.mod h1:oDCg2FkjogeFOhqqb+N39l2RpTNPL6F/StPkB3kPgcs= github.com/ipfs/go-log v1.0.5 h1:2dOuUCB1Z7uoczMWgAyDck5JLb72zHzrMnGnCNNbvY8= github.com/ipfs/go-log v1.0.5/go.mod h1:j0b8ZoR+7+R99LD9jZ6+AJsrzkPbSXbZfGakb5JPtIo= -github.com/ipfs/go-log/v2 v2.0.2/go.mod h1:O7P1lJt27vWHhOwQmcFEvlmo49ry2VY2+JfBWFaa9+0= -github.com/ipfs/go-log/v2 v2.0.3/go.mod h1:O7P1lJt27vWHhOwQmcFEvlmo49ry2VY2+JfBWFaa9+0= -github.com/ipfs/go-log/v2 v2.0.5/go.mod h1:eZs4Xt4ZUJQFM3DlanGhy7TkwwawCZcSByscwkWG+dw= -github.com/ipfs/go-log/v2 v2.1.1/go.mod h1:2v2nsGfZsvvAJz13SyFzf9ObaqwHiHxsPLEHntrv9KM= github.com/ipfs/go-log/v2 v2.1.3/go.mod h1:/8d0SH3Su5Ooc31QlL1WysJhvyOTDCjcCZ9Axpmri6g= -github.com/ipfs/go-log/v2 v2.3.0/go.mod h1:QqGoj30OTpnKaG/LKTGTxoP2mmQtjVMEnK72gynbe/g= -github.com/ipfs/go-log/v2 v2.5.0/go.mod h1:prSpmC1Gpllc9UYWxDiZDreBYw7zp4Iqp1kOLU9U5UI= github.com/ipfs/go-log/v2 v2.5.1 h1:1XdUzF7048prq4aBjDQQ4SL5RxftpRGdXhNRwKSAlcY= github.com/ipfs/go-log/v2 v2.5.1/go.mod h1:prSpmC1Gpllc9UYWxDiZDreBYw7zp4Iqp1kOLU9U5UI= github.com/ipfs/go-metrics-interface v0.0.1 h1:j+cpbjYvu4R8zbleSs36gvB7jR+wsL2fGD6n0jO4kdg= github.com/ipfs/go-metrics-interface v0.0.1/go.mod h1:6s6euYU4zowdslK0GKHmqaIZ3j/b/tL7HTWtJ4VPgWY= -github.com/ipfs/go-peertaskqueue v0.1.1/go.mod h1:Jmk3IyCcfl1W3jTW3YpghSwSEC6IJ3Vzz/jUmWw8Z0U= -github.com/ipfs/go-peertaskqueue v0.2.0/go.mod h1:5/eNrBEbtSKWCG+kQK8K8fGNixoYUnr+P7jivavs9lY= -github.com/ipfs/go-peertaskqueue v0.7.0 h1:VyO6G4sbzX80K58N60cCaHsSsypbUNs1GjO5seGNsQ0= -github.com/ipfs/go-peertaskqueue v0.7.0/go.mod h1:M/akTIE/z1jGNXMU7kFB4TeSEFvj68ow0Rrb04donIU= -github.com/ipfs/go-verifcid v0.0.1 h1:m2HI7zIuR5TFyQ1b79Da5N9dnnCP1vcu2QqawmWlK2E= -github.com/ipfs/go-verifcid v0.0.1/go.mod h1:5Hrva5KBeIog4A+UpqlaIU+DEstipcJYQQZc0g37pY0= -github.com/ipld/go-ipld-prime v0.11.0/go.mod h1:+WIAkokurHmZ/KwzDOMUuoeJgaRQktHtEaLglS3ZeV8= -github.com/ipld/go-ipld-prime v0.14.1 h1:n9obcUnuqPK34HlfbiB+o9GhXE/x59uue4z9YTsaoj4= -github.com/ipld/go-ipld-prime v0.14.1/go.mod h1:QcE4Y9n/ZZr8Ijg5bGPT0GqYWgZ1704nH0RDcQtgTP0= -github.com/jackpal/gateway v1.0.5/go.mod h1:lTpwd4ACLXmpyiCTRtfiNyVnUmqT9RivzCDQetPfnjA= -github.com/jackpal/go-nat-pmp v1.0.1/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+4orBN1SBKc= -github.com/jackpal/go-nat-pmp v1.0.2-0.20160603034137-1fa385a6f458/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+4orBN1SBKc= +github.com/ipfs/go-peertaskqueue v0.8.2 h1:PaHFRaVFdxQk1Qo3OKiHPYjmmusQy7gKQUaL8JDszAU= +github.com/ipfs/go-peertaskqueue v0.8.2/go.mod h1:L6QPvou0346c2qPJNiJa6BvOibxDfaiPlqHInmzg0FA= +github.com/ipld/go-ipld-prime v0.21.0 h1:n4JmcpOlPDIxBcY037SVfpd1G+Sj1nKZah0m6QH9C2E= +github.com/ipld/go-ipld-prime v0.21.0/go.mod h1:3RLqy//ERg/y5oShXXdx5YIp50cFGOanyMctpPjsvxQ= github.com/jackpal/go-nat-pmp v1.0.2 h1:KzKSgb7qkJvOUTqYl9/Hg/me3pWgBmERKrTGD7BdWus= github.com/jackpal/go-nat-pmp v1.0.2/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+4orBN1SBKc= -github.com/jbenet/go-cienv v0.0.0-20150120210510-1bb1476777ec/go.mod h1:rGaEvXB4uRSZMmzKNLoXvTu1sfx+1kv/DojUlPrSZGs= github.com/jbenet/go-cienv v0.1.0/go.mod h1:TqNnHUmJgXau0nCzC7kXWeotg3J9W34CUv5Djy1+FlA= -github.com/jbenet/go-temp-err-catcher v0.0.0-20150120210811-aac704a3f4f2/go.mod h1:8GXXJV31xl8whumTzdZsTt3RnUIiPqzkyf7mxToRCMs= github.com/jbenet/go-temp-err-catcher v0.1.0 h1:zpb3ZH6wIE8Shj2sKS+khgRvf7T7RABoLk/+KKHggpk= github.com/jbenet/go-temp-err-catcher v0.1.0/go.mod h1:0kJRvmDZXNMIiJirNPEYfhpPwbGVtZVWC34vc5WLsDk= -github.com/jbenet/goprocess v0.0.0-20160826012719-b497e2f366b8/go.mod h1:Ly/wlsjFq/qrU3Rar62tu1gASgGw6chQbSh/XgIIXCY= -github.com/jbenet/goprocess v0.1.3/go.mod h1:5yspPrukOVuOLORacaBi858NqyClJPQxYZlqdZVfqY4= github.com/jbenet/goprocess v0.1.4 h1:DRGOFReOMqqDNXwW70QkacFW0YN9QnwLV0Vqk+3oU0o= github.com/jbenet/goprocess v0.1.4/go.mod h1:5yspPrukOVuOLORacaBi858NqyClJPQxYZlqdZVfqY4= github.com/jellevandenhooff/dkim v0.0.0-20150330215556-f50fe3d243e1/go.mod h1:E0B/fFc00Y+Rasa88328GlI/XbtyysCtTHZS8h7IrBU= -github.com/jessevdk/go-flags v0.0.0-20141203071132-1679536dcc89/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= -github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= -github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= -github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8= github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= +github.com/jordanschalm/lockctx v0.0.0-20250412215529-226f85c10956 h1:4iii8SOozVG1lpkdPELRsjPEBhU4DeFPz2r2Fjj3UDU= +github.com/jordanschalm/lockctx v0.0.0-20250412215529-226f85c10956/go.mod h1:qsnXMryYP9X7JbzskIn0+N40sE6XNXLr9kYRRP6rwXU= github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= -github.com/jrick/logrotate v1.0.0/go.mod h1:LNinyqDIJnpAur+b8yyulnQw/wDuN1+BYKlTRt3OuAQ= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= +github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo= github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= -github.com/julienschmidt/httprouter v1.1.1-0.20170430222011-975b5c4c7c21/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= -github.com/jung-kurt/gofpdf v1.0.3-0.20190309125859-24315acbbda5/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes= -github.com/k0kubun/go-ansi v0.0.0-20180517002512-3bf9e2903213/go.mod h1:vNUNkEQ1e29fT/6vq2aBdFsgNPmy8qMdSay1npru+Sw= -github.com/kami-zh/go-capturer v0.0.0-20171211120116-e492ea43421d/go.mod h1:P2viExyCEfeWGU259JnaQ34Inuec4R38JCyBx2edgD0= -github.com/karalabe/usb v0.0.0-20190919080040-51dc0efba356/go.mod h1:Od972xHfMJowv7NGVDiWVxk2zxnWgjLlJzE+F4F7AGU= -github.com/kevinburke/go-bindata v3.23.0+incompatible h1:rqNOXZlqrYhMVVAsQx8wuc+LaA73YcfbQ407wAykyS8= -github.com/kevinburke/go-bindata v3.23.0+incompatible/go.mod h1:/pEEZ72flUW2p0yi30bslSp9YqD9pysLxunQDdb2CPM= +github.com/k0kubun/pp/v3 v3.5.0 h1:iYNlYA5HJAJvkD4ibuf9c8y6SHM0QFhaBuCqm1zHp0w= +github.com/k0kubun/pp/v3 v3.5.0/go.mod h1:5lzno5ZZeEeTV/Ky6vs3g6d1U3WarDrH8k240vMtGro= +github.com/kevinburke/go-bindata v3.24.0+incompatible h1:qajFA3D0pH94OTLU4zcCCKCDgR+Zr2cZK/RPJHDdFoY= +github.com/kevinburke/go-bindata v3.24.0+incompatible/go.mod h1:/pEEZ72flUW2p0yi30bslSp9YqD9pysLxunQDdb2CPM= github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6CZQHDETBtE9HaSEkGmuNXF86RwHhHUvq4= github.com/klauspost/compress v1.10.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= github.com/klauspost/compress v1.11.7/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= github.com/klauspost/compress v1.12.3/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg= -github.com/klauspost/compress v1.15.13 h1:NFn1Wr8cfnenSJSA46lLq4wHCcBzKTSjnBIexDMMOV0= -github.com/klauspost/compress v1.15.13/go.mod h1:QPwzmACJjUTFsnSHH934V6woptycfrDDJnH7hvFVbGM= -github.com/klauspost/cpuid/v2 v2.0.4/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= -github.com/klauspost/cpuid/v2 v2.0.6/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= -github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= -github.com/klauspost/cpuid/v2 v2.0.12/go.mod h1:g2LTdtYhdyuGPqyWyv7qRAmj1WBqxuObKfj5c0PQa7c= -github.com/klauspost/cpuid/v2 v2.2.2 h1:xPMwiykqNK9VK0NYC3+jTMYv9I6Vl3YdjZgPZKG3zO0= -github.com/klauspost/cpuid/v2 v2.2.2/go.mod h1:RVVoqg1df56z8g3pUjL/3lE5UfnlrJX8tyFgg4nqhuY= +github.com/klauspost/compress v1.17.11 h1:In6xLpyWOi1+C7tXUUWv2ot1QvBjxevKAaI6IXrJmUc= +github.com/klauspost/compress v1.17.11/go.mod h1:pMDklpSncoRMuLFrf1W9Ss9KT+0rH90U12bZKk7uwG0= +github.com/klauspost/cpuid/v2 v2.2.9 h1:66ze0taIn2H33fBvCkXuv9BmCwDfafmiIVpKV9kKGuY= +github.com/klauspost/cpuid/v2 v2.2.9/go.mod h1:rqkxqrZ1EhYM9G+hXH7YdowN5R5RGN6NK4QwQ3WMXF8= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/koron/go-ssdp v0.0.0-20180514024734-4a0ed625a78b/go.mod h1:5Ky9EC2xfoUKUor0Hjgi2BJhCSXJfMOFlmyYrVKGQMk= -github.com/koron/go-ssdp v0.0.0-20191105050749-2e1c40ed0b5d/go.mod h1:5Ky9EC2xfoUKUor0Hjgi2BJhCSXJfMOFlmyYrVKGQMk= -github.com/koron/go-ssdp v0.0.3 h1:JivLMY45N76b4p/vsWGOKewBQu6uf39y8l+AQ7sDKx8= -github.com/koron/go-ssdp v0.0.3/go.mod h1:b2MxI6yh02pKrsyNoQUsk4+YNikaGhe4894J+Q5lDvA= +github.com/koron/go-ssdp v0.0.4 h1:1IDwrghSKYM7yLf7XCzbByg2sJ/JcNOZRXS2jczTwz0= +github.com/koron/go-ssdp v0.0.4/go.mod h1:oDXq+E5IL5q0U8uSBcoAXzTzInwy5lEgC91HoKtbmZk= github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= -github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= -github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= -github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/pty v1.1.3/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= -github.com/leanovate/gopter v0.2.9 h1:fQjYxZaynp97ozCzfOyOuAGOU4aU/z37zf/tOujFk7c= +github.com/leanovate/gopter v0.2.11 h1:vRjThO1EKPb/1NsDXuDrzldR28RLkBflWYcU9CvzWu4= +github.com/leanovate/gopter v0.2.11/go.mod h1:aK3tzZP/C+p1m3SPRE4SYZFGP7jjkuSI4f7Xvpt0S9c= github.com/leodido/go-urn v1.1.0/go.mod h1:+cyI34gQWZcE1eQU7NVgKkkzdXDQHr1dBMtdAPozLkw= -github.com/leodido/go-urn v1.2.0 h1:hpXL4XnriNwQ/ABnpepYM/1vCLWNDfUNts8dX3xTG6Y= github.com/leodido/go-urn v1.2.0/go.mod h1:+8+nEpDfqqsY+g338gtMEUOtuK+4dEMhiQEgxpxOKII= -github.com/libp2p/go-addr-util v0.0.1/go.mod h1:4ac6O7n9rIAKB1dnd+s8IbbMXkt+oBpzX4/+RACcnlQ= -github.com/libp2p/go-addr-util v0.0.2/go.mod h1:Ecd6Fb3yIuLzq4bD7VcywcVSBtefcAwnUISBM3WG15E= +github.com/leodido/go-urn v1.4.0 h1:WT9HwE9SGECu3lg4d/dIA+jxlljEa1/ffXKmRjqdmIQ= +github.com/leodido/go-urn v1.4.0/go.mod h1:bvxc+MVxLKB4z00jd1z+Dvzr47oO32F/QSNjSBOlFxI= github.com/libp2p/go-addr-util v0.1.0 h1:acKsntI33w2bTU7tC9a0SaPimJGfSI0bFKC18ChxeVI= github.com/libp2p/go-addr-util v0.1.0/go.mod h1:6I3ZYuFr2O/9D+SoyM0zEw0EF3YkldtTX406BpdQMqw= -github.com/libp2p/go-buffer-pool v0.0.1/go.mod h1:xtyIz9PMobb13WaxR6Zo1Pd1zXJKYg0a8KiIvDp3TzQ= -github.com/libp2p/go-buffer-pool v0.0.2/go.mod h1:MvaB6xw5vOrDl8rYZGLFdKAuk/hRoRZd1Vi32+RXyFM= github.com/libp2p/go-buffer-pool v0.1.0 h1:oK4mSFcQz7cTQIfqbe4MIj9gLW+mnanjyFtc6cdF0Y8= github.com/libp2p/go-buffer-pool v0.1.0/go.mod h1:N+vh8gMqimBzdKkSMVuydVDq+UV5QTWy5HSiZacSbPg= github.com/libp2p/go-cidranger v1.1.0 h1:ewPN8EZ0dd1LSnrtuwd4709PXVcITVeuwbag38yPW7c= github.com/libp2p/go-cidranger v1.1.0/go.mod h1:KWZTfSr+r9qEo9OkI9/SIEeAtw+NNoU0dXIXt15Okic= -github.com/libp2p/go-conn-security-multistream v0.1.0/go.mod h1:aw6eD7LOsHEX7+2hJkDxw1MteijaVcI+/eP2/x3J1xc= -github.com/libp2p/go-conn-security-multistream v0.2.0/go.mod h1:hZN4MjlNetKD3Rq5Jb/P5ohUnFLNzEAR4DLSzpn2QLU= -github.com/libp2p/go-conn-security-multistream v0.2.1/go.mod h1:cR1d8gA0Hr59Fj6NhaTpFhJZrjSYuNmhpT2r25zYR70= -github.com/libp2p/go-eventbus v0.1.0/go.mod h1:vROgu5cs5T7cv7POWlWxBaVLxfSegC5UGQf8A2eEmx4= -github.com/libp2p/go-eventbus v0.2.1/go.mod h1:jc2S4SoEVPP48H9Wpzm5aiGwUCBMfGhVhhBjyhhCJs8= -github.com/libp2p/go-flow-metrics v0.0.1/go.mod h1:Iv1GH0sG8DtYN3SVJ2eG221wMiNpZxBdp967ls1g+k8= -github.com/libp2p/go-flow-metrics v0.0.3/go.mod h1:HeoSNUrOJVK1jEpDqVEiUOIXqhbnS27omG0uWU5slZs= -github.com/libp2p/go-flow-metrics v0.1.0 h1:0iPhMI8PskQwzh57jB9WxIuIOQ0r+15PChFGkx3Q3WM= -github.com/libp2p/go-flow-metrics v0.1.0/go.mod h1:4Xi8MX8wj5aWNDAZttg6UPmc0ZrnFNsMtpsYUClFtro= -github.com/libp2p/go-libp2p v0.1.1/go.mod h1:I00BRo1UuUSdpuc8Q2mN7yDF/oTUTRAX6JWpTiK9Rp8= -github.com/libp2p/go-libp2p v0.6.1/go.mod h1:CTFnWXogryAHjXAKEbOf1OWY+VeAP3lDMZkfEI5sT54= -github.com/libp2p/go-libp2p v0.7.0/go.mod h1:hZJf8txWeCduQRDC/WSqBGMxaTHCOYHt2xSU1ivxn0k= -github.com/libp2p/go-libp2p v0.7.4/go.mod h1:oXsBlTLF1q7pxr+9w6lqzS1ILpyHsaBPniVO7zIHGMw= -github.com/libp2p/go-libp2p v0.8.1/go.mod h1:QRNH9pwdbEBpx5DTJYg+qxcVaDMAz3Ee/qDKwXujH5o= -github.com/libp2p/go-libp2p v0.13.0/go.mod h1:pM0beYdACRfHO1WcJlp65WXyG2A6NqYM+t2DTVAJxMo= -github.com/libp2p/go-libp2p v0.14.3/go.mod h1:d12V4PdKbpL0T1/gsUNN8DfgMuRPDX8bS2QxCZlwRH0= -github.com/libp2p/go-libp2p v0.24.2 h1:iMViPIcLY0D6zr/f+1Yq9EavCZu2i7eDstsr1nEwSAk= -github.com/libp2p/go-libp2p v0.24.2/go.mod h1:WuxtL2V8yGjam03D93ZBC19tvOUiPpewYv1xdFGWu1k= -github.com/libp2p/go-libp2p-asn-util v0.2.0 h1:rg3+Os8jbnO5DxkC7K/Utdi+DkY3q/d1/1q+8WeNAsw= -github.com/libp2p/go-libp2p-asn-util v0.2.0/go.mod h1:WoaWxbHKBymSN41hWSq/lGKJEca7TNm58+gGJi2WsLI= -github.com/libp2p/go-libp2p-autonat v0.1.0/go.mod h1:1tLf2yXxiE/oKGtDwPYWTSYG3PtvYlJmg7NeVtPRqH8= -github.com/libp2p/go-libp2p-autonat v0.1.1/go.mod h1:OXqkeGOY2xJVWKAGV2inNF5aKN/djNA3fdpCWloIudE= -github.com/libp2p/go-libp2p-autonat v0.2.0/go.mod h1:DX+9teU4pEEoZUqR1PiMlqliONQdNbfzE1C718tcViI= -github.com/libp2p/go-libp2p-autonat v0.2.1/go.mod h1:MWtAhV5Ko1l6QBsHQNSuM6b1sRkXrpk0/LqCr+vCVxI= -github.com/libp2p/go-libp2p-autonat v0.2.2/go.mod h1:HsM62HkqZmHR2k1xgX34WuWDzk/nBwNHoeyyT4IWV6A= -github.com/libp2p/go-libp2p-autonat v0.4.0/go.mod h1:YxaJlpr81FhdOv3W3BTconZPfhaYivRdf53g+S2wobk= -github.com/libp2p/go-libp2p-autonat v0.4.2/go.mod h1:YxaJlpr81FhdOv3W3BTconZPfhaYivRdf53g+S2wobk= -github.com/libp2p/go-libp2p-blankhost v0.1.1/go.mod h1:pf2fvdLJPsC1FsVrNP3DUUvMzUts2dsLLBEpo1vW1ro= -github.com/libp2p/go-libp2p-blankhost v0.1.4/go.mod h1:oJF0saYsAXQCSfDq254GMNmLNz6ZTHTOvtF4ZydUvwU= -github.com/libp2p/go-libp2p-blankhost v0.2.0/go.mod h1:eduNKXGTioTuQAUcZ5epXi9vMl+t4d8ugUBRQ4SqaNQ= -github.com/libp2p/go-libp2p-circuit v0.1.0/go.mod h1:Ahq4cY3V9VJcHcn1SBXjr78AbFkZeIRmfunbA7pmFh8= -github.com/libp2p/go-libp2p-circuit v0.1.4/go.mod h1:CY67BrEjKNDhdTk8UgBX1Y/H5c3xkAcs3gnksxY7osU= -github.com/libp2p/go-libp2p-circuit v0.2.1/go.mod h1:BXPwYDN5A8z4OEY9sOfr2DUQMLQvKt/6oku45YUmjIo= -github.com/libp2p/go-libp2p-circuit v0.4.0/go.mod h1:t/ktoFIUzM6uLQ+o1G6NuBl2ANhBKN9Bc8jRIk31MoA= -github.com/libp2p/go-libp2p-core v0.0.1/go.mod h1:g/VxnTZ/1ygHxH3dKok7Vno1VfpvGcGip57wjTU4fco= -github.com/libp2p/go-libp2p-core v0.0.2/go.mod h1:9dAcntw/n46XycV4RnlBq3BpgrmyUi9LuoTNdPrbUco= -github.com/libp2p/go-libp2p-core v0.0.3/go.mod h1:j+YQMNz9WNSkNezXOsahp9kwZBKBvxLpKD316QWSJXE= -github.com/libp2p/go-libp2p-core v0.0.4/go.mod h1:jyuCQP356gzfCFtRKyvAbNkyeuxb7OlyhWZ3nls5d2I= -github.com/libp2p/go-libp2p-core v0.2.0/go.mod h1:X0eyB0Gy93v0DZtSYbEM7RnMChm9Uv3j7yRXjO77xSI= -github.com/libp2p/go-libp2p-core v0.2.2/go.mod h1:8fcwTbsG2B+lTgRJ1ICZtiM5GWCWZVoVrLaDRvIRng0= -github.com/libp2p/go-libp2p-core v0.2.4/go.mod h1:STh4fdfa5vDYr0/SzYYeqnt+E6KfEV5VxfIrm0bcI0g= -github.com/libp2p/go-libp2p-core v0.3.0/go.mod h1:ACp3DmS3/N64c2jDzcV429ukDpicbL6+TrrxANBjPGw= -github.com/libp2p/go-libp2p-core v0.3.1/go.mod h1:thvWy0hvaSBhnVBaW37BvzgVV68OUhgJJLAa6almrII= -github.com/libp2p/go-libp2p-core v0.4.0/go.mod h1:49XGI+kc38oGVwqSBhDEwytaAxgZasHhFfQKibzTls0= -github.com/libp2p/go-libp2p-core v0.5.0/go.mod h1:49XGI+kc38oGVwqSBhDEwytaAxgZasHhFfQKibzTls0= -github.com/libp2p/go-libp2p-core v0.5.1/go.mod h1:uN7L2D4EvPCvzSH5SrhR72UWbnSGpt5/a35Sm4upn4Y= -github.com/libp2p/go-libp2p-core v0.5.4/go.mod h1:uN7L2D4EvPCvzSH5SrhR72UWbnSGpt5/a35Sm4upn4Y= -github.com/libp2p/go-libp2p-core v0.5.5/go.mod h1:vj3awlOr9+GMZJFH9s4mpt9RHHgGqeHCopzbYKZdRjM= -github.com/libp2p/go-libp2p-core v0.5.6/go.mod h1:txwbVEhHEXikXn9gfC7/UDDw7rkxuX0bJvM49Ykaswo= -github.com/libp2p/go-libp2p-core v0.5.7/go.mod h1:txwbVEhHEXikXn9gfC7/UDDw7rkxuX0bJvM49Ykaswo= -github.com/libp2p/go-libp2p-core v0.6.0/go.mod h1:txwbVEhHEXikXn9gfC7/UDDw7rkxuX0bJvM49Ykaswo= -github.com/libp2p/go-libp2p-core v0.7.0/go.mod h1:FfewUH/YpvWbEB+ZY9AQRQ4TAD8sJBt/G1rVvhz5XT8= -github.com/libp2p/go-libp2p-core v0.8.0/go.mod h1:FfewUH/YpvWbEB+ZY9AQRQ4TAD8sJBt/G1rVvhz5XT8= -github.com/libp2p/go-libp2p-core v0.8.1/go.mod h1:FfewUH/YpvWbEB+ZY9AQRQ4TAD8sJBt/G1rVvhz5XT8= -github.com/libp2p/go-libp2p-core v0.8.2/go.mod h1:FfewUH/YpvWbEB+ZY9AQRQ4TAD8sJBt/G1rVvhz5XT8= -github.com/libp2p/go-libp2p-core v0.8.5/go.mod h1:FfewUH/YpvWbEB+ZY9AQRQ4TAD8sJBt/G1rVvhz5XT8= -github.com/libp2p/go-libp2p-core v0.20.1 h1:fQz4BJyIFmSZAiTbKV8qoYhEH5Dtv/cVhZbG3Ib/+Cw= -github.com/libp2p/go-libp2p-core v0.20.1/go.mod h1:6zR8H7CvQWgYLsbG4on6oLNSGcyKaYFSEYyDt51+bIY= -github.com/libp2p/go-libp2p-crypto v0.1.0/go.mod h1:sPUokVISZiy+nNuTTH/TY+leRSxnFj/2GLjtOTW90hI= -github.com/libp2p/go-libp2p-discovery v0.1.0/go.mod h1:4F/x+aldVHjHDHuX85x1zWoFTGElt8HnoDzwkFZm29g= -github.com/libp2p/go-libp2p-discovery v0.2.0/go.mod h1:s4VGaxYMbw4+4+tsoQTqh7wfxg97AEdo4GYBt6BadWg= -github.com/libp2p/go-libp2p-discovery v0.3.0/go.mod h1:o03drFnz9BVAZdzC/QUQ+NeQOu38Fu7LJGEOK2gQltw= -github.com/libp2p/go-libp2p-discovery v0.5.0/go.mod h1:+srtPIU9gDaBNu//UHvcdliKBIcr4SfDcm0/PfPJLug= -github.com/libp2p/go-libp2p-kad-dht v0.19.0 h1:2HuiInHZTm9ZvQajaqdaPLHr0PCKKigWiflakimttE0= -github.com/libp2p/go-libp2p-kad-dht v0.19.0/go.mod h1:qPIXdiZsLczhV4/+4EO1jE8ae0YCW4ZOogc4WVIyTEU= -github.com/libp2p/go-libp2p-kbucket v0.5.0 h1:g/7tVm8ACHDxH29BGrpsQlnNeu+6OF1A9bno/4/U1oA= -github.com/libp2p/go-libp2p-kbucket v0.5.0/go.mod h1:zGzGCpQd78b5BNTDGHNDLaTt9aDK/A02xeZp9QeFC4U= -github.com/libp2p/go-libp2p-loggables v0.1.0 h1:h3w8QFfCt2UJl/0/NW4K829HX/0S4KD31PQ7m8UXXO8= -github.com/libp2p/go-libp2p-loggables v0.1.0/go.mod h1:EyumB2Y6PrYjr55Q3/tiJ/o3xoDasoRYM7nOzEpoa90= -github.com/libp2p/go-libp2p-mplex v0.2.0/go.mod h1:Ejl9IyjvXJ0T9iqUTE1jpYATQ9NM3g+OtR+EMMODbKo= -github.com/libp2p/go-libp2p-mplex v0.2.1/go.mod h1:SC99Rxs8Vuzrf/6WhmH41kNn13TiYdAWNYHrwImKLnE= -github.com/libp2p/go-libp2p-mplex v0.2.2/go.mod h1:74S9eum0tVQdAfFiKxAyKzNdSuLqw5oadDq7+L/FELo= -github.com/libp2p/go-libp2p-mplex v0.2.3/go.mod h1:CK3p2+9qH9x+7ER/gWWDYJ3QW5ZxWDkm+dVvjfuG3ek= -github.com/libp2p/go-libp2p-mplex v0.4.0/go.mod h1:yCyWJE2sc6TBTnFpjvLuEJgTSw/u+MamvzILKdX7asw= -github.com/libp2p/go-libp2p-mplex v0.4.1/go.mod h1:cmy+3GfqfM1PceHTLL7zQzAAYaryDu6iPSC+CIb094g= -github.com/libp2p/go-libp2p-nat v0.0.4/go.mod h1:N9Js/zVtAXqaeT99cXgTV9e75KpnWCvVOiGzlcHmBbY= -github.com/libp2p/go-libp2p-nat v0.0.5/go.mod h1:1qubaE5bTZMJE+E/uu2URroMbzdubFz1ChgiN79yKPE= -github.com/libp2p/go-libp2p-nat v0.0.6/go.mod h1:iV59LVhB3IkFvS6S6sauVTSOrNEANnINbI/fkaLimiw= -github.com/libp2p/go-libp2p-netutil v0.1.0 h1:zscYDNVEcGxyUpMd0JReUZTrpMfia8PmLKcKF72EAMQ= -github.com/libp2p/go-libp2p-netutil v0.1.0/go.mod h1:3Qv/aDqtMLTUyQeundkKsA+YCThNdbQD54k3TqjpbFU= -github.com/libp2p/go-libp2p-noise v0.1.1/go.mod h1:QDFLdKX7nluB7DEnlVPbz7xlLHdwHFA9HiohJRr3vwM= -github.com/libp2p/go-libp2p-noise v0.2.0/go.mod h1:IEbYhBBzGyvdLBoxxULL/SGbJARhUeqlO8lVSREYu2Q= -github.com/libp2p/go-libp2p-peer v0.2.0/go.mod h1:RCffaCvUyW2CJmG2gAWVqwePwW7JMgxjsHm7+J5kjWY= -github.com/libp2p/go-libp2p-peerstore v0.1.0/go.mod h1:2CeHkQsr8svp4fZ+Oi9ykN1HBb6u0MOvdJ7YIsmcwtY= -github.com/libp2p/go-libp2p-peerstore v0.1.3/go.mod h1:BJ9sHlm59/80oSkpWgr1MyY1ciXAXV397W6h1GH/uKI= -github.com/libp2p/go-libp2p-peerstore v0.2.0/go.mod h1:N2l3eVIeAitSg3Pi2ipSrJYnqhVnMNQZo9nkSCuAbnQ= -github.com/libp2p/go-libp2p-peerstore v0.2.1/go.mod h1:NQxhNjWxf1d4w6PihR8btWIRjwRLBr4TYKfNgrUkOPA= -github.com/libp2p/go-libp2p-peerstore v0.2.2/go.mod h1:NQxhNjWxf1d4w6PihR8btWIRjwRLBr4TYKfNgrUkOPA= -github.com/libp2p/go-libp2p-peerstore v0.2.6/go.mod h1:ss/TWTgHZTMpsU/oKVVPQCGuDHItOpf2W8RxAi50P2s= -github.com/libp2p/go-libp2p-peerstore v0.2.7/go.mod h1:ss/TWTgHZTMpsU/oKVVPQCGuDHItOpf2W8RxAi50P2s= -github.com/libp2p/go-libp2p-pnet v0.2.0/go.mod h1:Qqvq6JH/oMZGwqs3N1Fqhv8NVhrdYcO0BW4wssv21LA= -github.com/libp2p/go-libp2p-pubsub v0.8.2-0.20221201175637-3d2eab35722e h1:phmi6mEoO5y2AQP68+4vZhNpHtZ4dum2ieFtWdmjXak= -github.com/libp2p/go-libp2p-pubsub v0.8.2-0.20221201175637-3d2eab35722e/go.mod h1:e4kT+DYjzPUYGZeWk4I+oxCSYTXizzXii5LDRRhjKSw= -github.com/libp2p/go-libp2p-quic-transport v0.10.0/go.mod h1:RfJbZ8IqXIhxBRm5hqUEJqjiiY8xmEuq3HUDS993MkA= -github.com/libp2p/go-libp2p-record v0.1.0/go.mod h1:ujNc8iuE5dlKWVy6wuL6dd58t0n7xI4hAIl8pE6wu5Q= +github.com/libp2p/go-flow-metrics v0.2.0 h1:EIZzjmeOE6c8Dav0sNv35vhZxATIXWZg6j/C08XmmDw= +github.com/libp2p/go-flow-metrics v0.2.0/go.mod h1:st3qqfu8+pMfh+9Mzqb2GTiwrAGjIPszEjZmtksN8Jc= +github.com/libp2p/go-libp2p v0.38.2 h1:9SZQDOCi82A25An4kx30lEtr6kGTxrtoaDkbs5xrK5k= +github.com/libp2p/go-libp2p v0.38.2/go.mod h1:QWV4zGL3O9nXKdHirIC59DoRcZ446dfkjbOJ55NEWFo= +github.com/libp2p/go-libp2p-asn-util v0.4.1 h1:xqL7++IKD9TBFMgnLPZR6/6iYhawHKHl950SO9L6n94= +github.com/libp2p/go-libp2p-asn-util v0.4.1/go.mod h1:d/NI6XZ9qxw67b4e+NgpQexCIiFYJjErASrYW4PFDN8= +github.com/libp2p/go-libp2p-kad-dht v0.25.2 h1:FOIk9gHoe4YRWXTu8SY9Z1d0RILol0TrtApsMDPjAVQ= +github.com/libp2p/go-libp2p-kad-dht v0.25.2/go.mod h1:6za56ncRHYXX4Nc2vn8z7CZK0P4QiMcrn77acKLM2Oo= +github.com/libp2p/go-libp2p-kbucket v0.6.3 h1:p507271wWzpy2f1XxPzCQG9NiN6R6lHL9GiSErbQQo0= +github.com/libp2p/go-libp2p-kbucket v0.6.3/go.mod h1:RCseT7AH6eJWxxk2ol03xtP9pEHetYSPXOaJnOiD8i0= +github.com/libp2p/go-libp2p-pubsub v0.13.0 h1:RmFQ2XAy3zQtbt2iNPy7Tt0/3fwTnHpCQSSnmGnt1Ps= +github.com/libp2p/go-libp2p-pubsub v0.13.0/go.mod h1:m0gpUOyrXKXdE7c8FNQ9/HLfWbxaEw7xku45w+PaqZo= github.com/libp2p/go-libp2p-record v0.2.0 h1:oiNUOCWno2BFuxt3my4i1frNrt7PerzB3queqa1NkQ0= github.com/libp2p/go-libp2p-record v0.2.0/go.mod h1:I+3zMkvvg5m2OcSdoL0KPljyJyvNDFGKX7QdlpYUcwk= -github.com/libp2p/go-libp2p-secio v0.1.0/go.mod h1:tMJo2w7h3+wN4pgU2LSYeiKPrfqBgkOsdiKK77hE7c8= -github.com/libp2p/go-libp2p-secio v0.2.0/go.mod h1:2JdZepB8J5V9mBp79BmwsaPQhRPNN2NrnB2lKQcdy6g= -github.com/libp2p/go-libp2p-secio v0.2.1/go.mod h1:cWtZpILJqkqrSkiYcDBh5lA3wbT2Q+hz3rJQq3iftD8= -github.com/libp2p/go-libp2p-secio v0.2.2/go.mod h1:wP3bS+m5AUnFA+OFO7Er03uO1mncHG0uVwGrwvjYlNY= -github.com/libp2p/go-libp2p-swarm v0.1.0/go.mod h1:wQVsCdjsuZoc730CgOvh5ox6K8evllckjebkdiY5ta4= -github.com/libp2p/go-libp2p-swarm v0.2.2/go.mod h1:fvmtQ0T1nErXym1/aa1uJEyN7JzaTNyBcHImCxRpPKU= -github.com/libp2p/go-libp2p-swarm v0.2.3/go.mod h1:P2VO/EpxRyDxtChXz/VPVXyTnszHvokHKRhfkEgFKNM= -github.com/libp2p/go-libp2p-swarm v0.2.8/go.mod h1:JQKMGSth4SMqonruY0a8yjlPVIkb0mdNSwckW7OYziM= -github.com/libp2p/go-libp2p-swarm v0.3.0/go.mod h1:hdv95GWCTmzkgeJpP+GK/9D9puJegb7H57B5hWQR5Kk= -github.com/libp2p/go-libp2p-swarm v0.4.0/go.mod h1:XVFcO52VoLoo0eitSxNQWYq4D6sydGOweTOAjJNraCw= -github.com/libp2p/go-libp2p-swarm v0.5.0/go.mod h1:sU9i6BoHE0Ve5SKz3y9WfKrh8dUat6JknzUehFx8xW4= -github.com/libp2p/go-libp2p-testing v0.0.2/go.mod h1:gvchhf3FQOtBdr+eFUABet5a4MBLK8jM3V4Zghvmi+E= -github.com/libp2p/go-libp2p-testing v0.0.3/go.mod h1:gvchhf3FQOtBdr+eFUABet5a4MBLK8jM3V4Zghvmi+E= -github.com/libp2p/go-libp2p-testing v0.0.4/go.mod h1:gvchhf3FQOtBdr+eFUABet5a4MBLK8jM3V4Zghvmi+E= -github.com/libp2p/go-libp2p-testing v0.1.0/go.mod h1:xaZWMJrPUM5GlDBxCeGUi7kI4eqnjVyavGroI2nxEM0= -github.com/libp2p/go-libp2p-testing v0.1.1/go.mod h1:xaZWMJrPUM5GlDBxCeGUi7kI4eqnjVyavGroI2nxEM0= -github.com/libp2p/go-libp2p-testing v0.1.2-0.20200422005655-8775583591d8/go.mod h1:Qy8sAncLKpwXtS2dSnDOP8ktexIAHKu+J+pnZOFZLTc= -github.com/libp2p/go-libp2p-testing v0.3.0/go.mod h1:efZkql4UZ7OVsEfaxNHZPzIehtsBXMrXnCfJIgDti5g= -github.com/libp2p/go-libp2p-testing v0.4.0/go.mod h1:Q+PFXYoiYFN5CAEG2w3gLPEzotlKsNSbKQ/lImlOWF0= +github.com/libp2p/go-libp2p-routing-helpers v0.7.4 h1:6LqS1Bzn5CfDJ4tzvP9uwh42IB7TJLNFJA6dEeGBv84= +github.com/libp2p/go-libp2p-routing-helpers v0.7.4/go.mod h1:we5WDj9tbolBXOuF1hGOkR+r7Uh1408tQbAKaT5n1LE= github.com/libp2p/go-libp2p-testing v0.12.0 h1:EPvBb4kKMWO29qP4mZGyhVzUyR25dvfUIK5WDu6iPUA= -github.com/libp2p/go-libp2p-tls v0.1.3/go.mod h1:wZfuewxOndz5RTnCAxFliGjvYSDA40sKitV4c50uI1M= -github.com/libp2p/go-libp2p-transport-upgrader v0.1.1/go.mod h1:IEtA6or8JUbsV07qPW4r01GnTenLW4oi3lOPbUMGJJA= -github.com/libp2p/go-libp2p-transport-upgrader v0.2.0/go.mod h1:mQcrHj4asu6ArfSoMuyojOdjx73Q47cYD7s5+gZOlns= -github.com/libp2p/go-libp2p-transport-upgrader v0.3.0/go.mod h1:i+SKzbRnvXdVbU3D1dwydnTmKRPXiAR/fyvi1dXuL4o= -github.com/libp2p/go-libp2p-transport-upgrader v0.4.0/go.mod h1:J4ko0ObtZSmgn5BX5AmegP+dK3CSnU2lMCKsSq/EY0s= -github.com/libp2p/go-libp2p-transport-upgrader v0.4.2/go.mod h1:NR8ne1VwfreD5VIWIU62Agt/J18ekORFU/j1i2y8zvk= -github.com/libp2p/go-libp2p-yamux v0.2.0/go.mod h1:Db2gU+XfLpm6E4rG5uGCFX6uXA8MEXOxFcRoXUODaK8= -github.com/libp2p/go-libp2p-yamux v0.2.1/go.mod h1:1FBXiHDk1VyRM1C0aez2bCfHQ4vMZKkAQzZbkSQt5fI= -github.com/libp2p/go-libp2p-yamux v0.2.2/go.mod h1:lIohaR0pT6mOt0AZ0L2dFze9hds9Req3OfS+B+dv4qw= -github.com/libp2p/go-libp2p-yamux v0.2.5/go.mod h1:Zpgj6arbyQrmZ3wxSZxfBmbdnWtbZ48OpsfmQVTErwA= -github.com/libp2p/go-libp2p-yamux v0.2.7/go.mod h1:X28ENrBMU/nm4I3Nx4sZ4dgjZ6VhLEn0XhIoZ5viCwU= -github.com/libp2p/go-libp2p-yamux v0.2.8/go.mod h1:/t6tDqeuZf0INZMTgd0WxIRbtK2EzI2h7HbFm9eAKI4= -github.com/libp2p/go-libp2p-yamux v0.4.0/go.mod h1:+DWDjtFMzoAwYLVkNZftoucn7PelNoy5nm3tZ3/Zw30= -github.com/libp2p/go-libp2p-yamux v0.5.0/go.mod h1:AyR8k5EzyM2QN9Bbdg6X1SkVVuqLwTGf0L4DFq9g6po= -github.com/libp2p/go-libp2p-yamux v0.5.1/go.mod h1:dowuvDu8CRWmr0iqySMiSxK+W0iL5cMVO9S94Y6gkv4= -github.com/libp2p/go-libp2p-yamux v0.5.4/go.mod h1:tfrXbyaTqqSU654GTvK3ocnSZL3BuHoeTSqhcel1wsE= -github.com/libp2p/go-maddr-filter v0.0.4/go.mod h1:6eT12kSQMA9x2pvFQa+xesMKUBlj9VImZbj3B9FBH/Q= -github.com/libp2p/go-maddr-filter v0.0.5/go.mod h1:Jk+36PMfIqCJhAnaASRH83bdAvfDRp/w6ENFaC9bG+M= +github.com/libp2p/go-libp2p-testing v0.12.0/go.mod h1:KcGDRXyN7sQCllucn1cOOS+Dmm7ujhfEyXQL5lvkcPg= github.com/libp2p/go-maddr-filter v0.1.0/go.mod h1:VzZhTXkMucEGGEOSKddrwGiOv0tUhgnKqNEmIAz/bPU= -github.com/libp2p/go-mplex v0.0.3/go.mod h1:pK5yMLmOoBR1pNCqDlA2GQrdAVTMkqFalaTWe7l4Yd0= -github.com/libp2p/go-mplex v0.1.0/go.mod h1:SXgmdki2kwCUlCCbfGLEgHjC4pFqhTp0ZoV6aiKgxDU= -github.com/libp2p/go-mplex v0.1.1/go.mod h1:Xgz2RDCi3co0LeZfgjm4OgUF15+sVR8SRcu3SFXI1lk= -github.com/libp2p/go-mplex v0.1.2/go.mod h1:Xgz2RDCi3co0LeZfgjm4OgUF15+sVR8SRcu3SFXI1lk= -github.com/libp2p/go-mplex v0.2.0/go.mod h1:0Oy/A9PQlwBytDRp4wSkFnzHYDKcpLot35JQ6msjvYQ= -github.com/libp2p/go-mplex v0.3.0/go.mod h1:0Oy/A9PQlwBytDRp4wSkFnzHYDKcpLot35JQ6msjvYQ= -github.com/libp2p/go-msgio v0.0.2/go.mod h1:63lBBgOTDKQL6EWazRMCwXsEeEeK9O2Cd+0+6OOuipQ= -github.com/libp2p/go-msgio v0.0.4/go.mod h1:63lBBgOTDKQL6EWazRMCwXsEeEeK9O2Cd+0+6OOuipQ= -github.com/libp2p/go-msgio v0.0.6/go.mod h1:4ecVB6d9f4BDSL5fqvPiC4A3KivjWn+Venn/1ALLMWA= -github.com/libp2p/go-msgio v0.2.0 h1:W6shmB+FeynDrUVl2dgFQvzfBZcXiyqY4VmpQLu9FqU= -github.com/libp2p/go-msgio v0.2.0/go.mod h1:dBVM1gW3Jk9XqHkU4eKdGvVHdLa51hoGfll6jMJMSlY= -github.com/libp2p/go-nat v0.0.3/go.mod h1:88nUEt0k0JD45Bk93NIwDqjlhiOwOoV36GchpcVc1yI= -github.com/libp2p/go-nat v0.0.4/go.mod h1:Nmw50VAvKuk38jUBcmNh6p9lUJLoODbJRvYAa/+KSDo= -github.com/libp2p/go-nat v0.0.5/go.mod h1:B7NxsVNPZmRLvMOwiEO1scOSyjA56zxYAGv1yQgRkEU= -github.com/libp2p/go-nat v0.1.0 h1:MfVsH6DLcpa04Xr+p8hmVRG4juse0s3J8HyNWYHffXg= -github.com/libp2p/go-nat v0.1.0/go.mod h1:X7teVkwRHNInVNWQiO/tAiAVRwSr5zoRz4YSTC3uRBM= -github.com/libp2p/go-netroute v0.1.2/go.mod h1:jZLDV+1PE8y5XxBySEBgbuVAXbhtuHSdmLPL2n9MKbk= -github.com/libp2p/go-netroute v0.1.3/go.mod h1:jZLDV+1PE8y5XxBySEBgbuVAXbhtuHSdmLPL2n9MKbk= -github.com/libp2p/go-netroute v0.1.5/go.mod h1:V1SR3AaECRkEQCoFFzYwVYWvYIEtlxx89+O3qcpCl4A= -github.com/libp2p/go-netroute v0.1.6/go.mod h1:AqhkMh0VuWmfgtxKPp3Oc1LdU5QSWS7wl0QLhSZqXxQ= -github.com/libp2p/go-netroute v0.2.1 h1:V8kVrpD8GK0Riv15/7VN6RbUQ3URNZVosw7H2v9tksU= -github.com/libp2p/go-netroute v0.2.1/go.mod h1:hraioZr0fhBjG0ZRXJJ6Zj2IVEVNx6tDTFQfSmcq7mQ= -github.com/libp2p/go-openssl v0.0.2/go.mod h1:v8Zw2ijCSWBQi8Pq5GAixw6DbFfa9u6VIYDXnvOXkc0= -github.com/libp2p/go-openssl v0.0.3/go.mod h1:unDrJpgy3oFr+rqXsarWifmJuNnJR4chtO1HmaZjggc= -github.com/libp2p/go-openssl v0.0.4/go.mod h1:unDrJpgy3oFr+rqXsarWifmJuNnJR4chtO1HmaZjggc= -github.com/libp2p/go-openssl v0.0.5/go.mod h1:unDrJpgy3oFr+rqXsarWifmJuNnJR4chtO1HmaZjggc= -github.com/libp2p/go-openssl v0.0.7/go.mod h1:unDrJpgy3oFr+rqXsarWifmJuNnJR4chtO1HmaZjggc= -github.com/libp2p/go-openssl v0.1.0 h1:LBkKEcUv6vtZIQLVTegAil8jbNpJErQ9AnT+bWV+Ooo= -github.com/libp2p/go-openssl v0.1.0/go.mod h1:OiOxwPpL3n4xlenjx2h7AwSGaFSC/KZvf6gNdOBQMtc= -github.com/libp2p/go-reuseport v0.0.1/go.mod h1:jn6RmB1ufnQwl0Q1f+YxAj8isJgDCQzaaxIFYDhcYEA= -github.com/libp2p/go-reuseport v0.0.2/go.mod h1:SPD+5RwGC7rcnzngoYC86GjPzjSywuQyMVAheVBD9nQ= -github.com/libp2p/go-reuseport v0.2.0 h1:18PRvIMlpY6ZK85nIAicSBuXXvrYoSw3dsBAR7zc560= -github.com/libp2p/go-reuseport v0.2.0/go.mod h1:bvVho6eLMm6Bz5hmU0LYN3ixd3nPPvtIlaURZZgOY4k= -github.com/libp2p/go-reuseport-transport v0.0.2/go.mod h1:YkbSDrvjUVDL6b8XqriyA20obEtsW9BLkuOUyQAOCbs= -github.com/libp2p/go-reuseport-transport v0.0.3/go.mod h1:Spv+MPft1exxARzP2Sruj2Wb5JSyHNncjf1Oi2dEbzM= -github.com/libp2p/go-reuseport-transport v0.0.4/go.mod h1:trPa7r/7TJK/d+0hdBLOCGvpQQVOU74OXbNCIMkufGw= -github.com/libp2p/go-sockaddr v0.0.2/go.mod h1:syPvOmNs24S3dFVGJA1/mrqdeijPxLV2Le3BRLKd68k= -github.com/libp2p/go-sockaddr v0.1.0/go.mod h1:syPvOmNs24S3dFVGJA1/mrqdeijPxLV2Le3BRLKd68k= -github.com/libp2p/go-sockaddr v0.1.1/go.mod h1:syPvOmNs24S3dFVGJA1/mrqdeijPxLV2Le3BRLKd68k= -github.com/libp2p/go-stream-muxer v0.0.1/go.mod h1:bAo8x7YkSpadMTbtTaxGVHWUQsR/l5MEaHbKaliuT14= -github.com/libp2p/go-stream-muxer-multistream v0.2.0/go.mod h1:j9eyPol/LLRqT+GPLSxvimPhNph4sfYfMoDPd7HkzIc= -github.com/libp2p/go-stream-muxer-multistream v0.3.0/go.mod h1:yDh8abSIzmZtqtOt64gFJUXEryejzNb0lisTt+fAMJA= -github.com/libp2p/go-tcp-transport v0.1.0/go.mod h1:oJ8I5VXryj493DEJ7OsBieu8fcg2nHGctwtInJVpipc= -github.com/libp2p/go-tcp-transport v0.1.1/go.mod h1:3HzGvLbx6etZjnFlERyakbaYPdfjg2pWP97dFZworkY= -github.com/libp2p/go-tcp-transport v0.2.0/go.mod h1:vX2U0CnWimU4h0SGSEsg++AzvBcroCGYw28kh94oLe0= -github.com/libp2p/go-tcp-transport v0.2.1/go.mod h1:zskiJ70MEfWz2MKxvFB/Pv+tPIB1PpPUrHIWQ8aFw7M= -github.com/libp2p/go-tcp-transport v0.2.3/go.mod h1:9dvr03yqrPyYGIEN6Dy5UvdJZjyPFvl1S/igQ5QD1SU= -github.com/libp2p/go-ws-transport v0.1.0/go.mod h1:rjw1MG1LU9YDC6gzmwObkPd/Sqwhw7yT74kj3raBFuo= -github.com/libp2p/go-ws-transport v0.2.0/go.mod h1:9BHJz/4Q5A9ludYWKoGCFC5gUElzlHoKzu0yY9p/klM= -github.com/libp2p/go-ws-transport v0.3.0/go.mod h1:bpgTJmRZAvVHrgHybCVyqoBmyLQ1fiZuEaBYusP5zsk= -github.com/libp2p/go-ws-transport v0.4.0/go.mod h1:EcIEKqf/7GDjth6ksuS/6p7R49V4CBY6/E7R/iyhYUA= -github.com/libp2p/go-yamux v1.2.2/go.mod h1:FGTiPvoV/3DVdgWpX+tM0OW3tsM+W5bSE3gZwqQTcow= -github.com/libp2p/go-yamux v1.2.3/go.mod h1:FGTiPvoV/3DVdgWpX+tM0OW3tsM+W5bSE3gZwqQTcow= -github.com/libp2p/go-yamux v1.3.0/go.mod h1:FGTiPvoV/3DVdgWpX+tM0OW3tsM+W5bSE3gZwqQTcow= -github.com/libp2p/go-yamux v1.3.3/go.mod h1:FGTiPvoV/3DVdgWpX+tM0OW3tsM+W5bSE3gZwqQTcow= -github.com/libp2p/go-yamux v1.3.5/go.mod h1:FGTiPvoV/3DVdgWpX+tM0OW3tsM+W5bSE3gZwqQTcow= -github.com/libp2p/go-yamux v1.3.7/go.mod h1:fr7aVgmdNGJK+N1g+b6DW6VxzbRCjCOejR/hkmpooHE= -github.com/libp2p/go-yamux v1.4.0/go.mod h1:fr7aVgmdNGJK+N1g+b6DW6VxzbRCjCOejR/hkmpooHE= -github.com/libp2p/go-yamux v1.4.1/go.mod h1:fr7aVgmdNGJK+N1g+b6DW6VxzbRCjCOejR/hkmpooHE= -github.com/libp2p/go-yamux/v2 v2.0.0/go.mod h1:NVWira5+sVUIU6tu1JWvaRn1dRnG+cawOJiflsAM+7U= -github.com/libp2p/go-yamux/v2 v2.2.0/go.mod h1:3So6P6TV6r75R9jiBpiIKgU/66lOarCZjqROGxzPpPQ= -github.com/libp2p/go-yamux/v4 v4.0.0 h1:+Y80dV2Yx/kv7Y7JKu0LECyVdMXm1VUoko+VQ9rBfZQ= -github.com/libp2p/go-yamux/v4 v4.0.0/go.mod h1:NWjl8ZTLOGlozrXSOZ/HlfG++39iKNnM5wwmtQP1YB4= +github.com/libp2p/go-msgio v0.3.0 h1:mf3Z8B1xcFN314sWX+2vOTShIE0Mmn2TXn3YCUQGNj0= +github.com/libp2p/go-msgio v0.3.0/go.mod h1:nyRM819GmVaF9LX3l03RMh10QdOroF++NBbxAb0mmDM= +github.com/libp2p/go-nat v0.2.0 h1:Tyz+bUFAYqGyJ/ppPPymMGbIgNRH+WqC5QrT5fKrrGk= +github.com/libp2p/go-nat v0.2.0/go.mod h1:3MJr+GRpRkyT65EpVPBstXLvOlAPzUVlG6Pwg9ohLJk= +github.com/libp2p/go-netroute v0.2.2 h1:Dejd8cQ47Qx2kRABg6lPwknU7+nBnFRpko45/fFPuZ8= +github.com/libp2p/go-netroute v0.2.2/go.mod h1:Rntq6jUAH0l9Gg17w5bFGhcC9a+vk4KNXs6s7IljKYE= +github.com/libp2p/go-reuseport v0.4.0 h1:nR5KU7hD0WxXCJbmw7r2rhRYruNRl2koHw8fQscQm2s= +github.com/libp2p/go-reuseport v0.4.0/go.mod h1:ZtI03j/wO5hZVDFo2jKywN6bYKWLOy8Se6DrI2E1cLU= +github.com/libp2p/go-yamux/v4 v4.0.1 h1:FfDR4S1wj6Bw2Pqbc8Uz7pCxeRBPbwsBbEdfwiCypkQ= +github.com/libp2p/go-yamux/v4 v4.0.1/go.mod h1:NWjl8ZTLOGlozrXSOZ/HlfG++39iKNnM5wwmtQP1YB4= github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM= github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4= -github.com/logrusorgru/aurora v2.0.3+incompatible h1:tOpm7WcpBTn4fjmVfgpQq0EfczGlG91VSDkswnjF5A8= -github.com/logrusorgru/aurora v2.0.3+incompatible/go.mod h1:7rIyQOR62GCctdiQpZ/zOJlFyk6y+94wXzv6RNZgaR4= -github.com/lucas-clemente/quic-go v0.19.3/go.mod h1:ADXpNbTQjq1hIzCpB+y/k5iz4n4z4IwqoLb94Kh5Hu8= -github.com/lucas-clemente/quic-go v0.31.1 h1:O8Od7hfioqq0PMYHDyBkxU2aA7iZ2W9pjbrWuja2YR4= -github.com/lucas-clemente/quic-go v0.31.1/go.mod h1:0wFbizLgYzqHqtlyxyCaJKlE7bYgE6JQ+54TLd/Dq2g= +github.com/logrusorgru/aurora/v4 v4.0.0 h1:sRjfPpun/63iADiSvGGjgA1cAYegEWMPCJdUpJYn9JA= +github.com/logrusorgru/aurora/v4 v4.0.0/go.mod h1:lP0iIa2nrnT/qoFXcOZSrZQpJ1o6n2CUf/hyHi2Q4ZQ= github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 h1:6E+4a0GO5zZEnZ81pIr0yLvtUWk2if982qA3F3QD6H4= github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0/go.mod h1:zJYVVT2jmtg6P3p1VtQj7WsuWi/y4VnjVBn7F8KPB3I= github.com/lunixbochs/vtclean v1.0.0/go.mod h1:pHhQNgMf3btfWnGBVipUOjRYhoOsdGqdm/+2c2E2WMI= github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ= -github.com/m4ksio/wal v1.0.1-0.20221209164835-154a17396e4c h1:OqVcb1Dkheracn4fgCjxlfhuSnM8jmPbrWkJbRIC4fo= -github.com/m4ksio/wal v1.0.1-0.20221209164835-154a17396e4c/go.mod h1:5/Yq7mnb+VdE44ff+FL8LSOPEquOVqm/7Hz40U4VUZo= github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= -github.com/magiconair/properties v1.8.6 h1:5ibWZ6iY0NctNGWo87LalDlEZ6R41TqbbDamhfG/Qzo= -github.com/magiconair/properties v1.8.6/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60= -github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY= +github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0= github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/marten-seemann/qpack v0.2.1/go.mod h1:F7Gl5L1jIgN1D11ucXefiuJS9UMVP2opoCp2jDKb7wc= -github.com/marten-seemann/qpack v0.3.0 h1:UiWstOgT8+znlkDPOg2+3rIuYXJ2CnGDkGUXN6ki6hE= -github.com/marten-seemann/qtls v0.10.0/go.mod h1:UvMd1oaYDACI99/oZUYLzMCkBXQVT0aGm99sJhbT8hs= -github.com/marten-seemann/qtls-go1-15 v0.1.1/go.mod h1:GyFwywLKkRt+6mfU99csTEY1joMZz5vmB1WNZH3P81I= -github.com/marten-seemann/qtls-go1-18 v0.1.3 h1:R4H2Ks8P6pAtUagjFty2p7BVHn3XiwDAl7TTQf5h7TI= -github.com/marten-seemann/qtls-go1-18 v0.1.3/go.mod h1:mJttiymBAByA49mhlNZZGrH5u1uXYZJ+RW28Py7f4m4= -github.com/marten-seemann/qtls-go1-19 v0.1.1 h1:mnbxeq3oEyQxQXwI4ReCgW9DPoPR94sNlqWoDZnjRIE= -github.com/marten-seemann/qtls-go1-19 v0.1.1/go.mod h1:5HTDWtVudo/WFsHKRNuOhWlbdjrfs5JHrYb0wIJqGpI= github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd h1:br0buuQ854V8u83wA0rVZ8ttrq5CpaPZdvrK0LP2lOk= github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd/go.mod h1:QuCEs1Nt24+FYQEqAAncTDPJIuGs+LxK1MCiFL25pMU= -github.com/marten-seemann/webtransport-go v0.4.3 h1:vkt5o/Ci+luknRteWdYGYH1KcB7ziup+J+1PzZJIvmg= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= -github.com/mattn/go-colorable v0.1.0/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= github.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcnceauSikq3lYCQ= -github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4= -github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= -github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= -github.com/mattn/go-ieproxy v0.0.0-20190610004146-91bb50d98149/go.mod h1:31jz6HNzdxOmlERGGEc4v/dMssOfmp2p5bT/okiKFFc= -github.com/mattn/go-ieproxy v0.0.0-20190702010315-6dee0af9227d/go.mod h1:31jz6HNzdxOmlERGGEc4v/dMssOfmp2p5bT/okiKFFc= +github.com/mattn/go-colorable v0.1.14 h1:9A9LHSqF/7dyVVX6g0U9cwm9pG3kP9gSzcuIPHPsaIE= +github.com/mattn/go-colorable v0.1.14/go.mod h1:6LmQG8QLFO4G5z1gPvYEzlUgJ2wF+stgPZH1UqBm1s8= github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= -github.com/mattn/go-isatty v0.0.5-0.20180830101745-3fb116b82035/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= -github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= github.com/mattn/go-isatty v0.0.9/go.mod h1:YNRxwqDuOph6SZLI9vUUz6OYw3QyUt7WiY2yME+cCiQ= github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= -github.com/mattn/go-isatty v0.0.13/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= -github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= -github.com/mattn/go-isatty v0.0.17 h1:BTarxUcIeDqL27Mc+vyvdWYSL28zpIhv3RoTdsLMPng= -github.com/mattn/go-isatty v0.0.17/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= -github.com/mattn/go-pointer v0.0.1 h1:n+XhsuGeVO6MEAp7xyEukFINEa+Quek5psIR/ylA6o0= -github.com/mattn/go-pointer v0.0.1/go.mod h1:2zXcozF6qYGgmsG+SeTZz3oAbFLdD3OWqnUbNvJZAlc= +github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= +github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= -github.com/mattn/go-runewidth v0.0.3/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= -github.com/mattn/go-runewidth v0.0.4/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= -github.com/mattn/go-runewidth v0.0.13 h1:lTGmDsbAYt5DmK6OnoV7EuIF1wEIFAcxld6ypU4OSgU= -github.com/mattn/go-runewidth v0.0.13/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= +github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= +github.com/mattn/go-runewidth v0.0.16 h1:E5ScNMtiwvlvB5paMFdw9p4kSQzbXFikJ5SQO6TULQc= +github.com/mattn/go-runewidth v0.0.16/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= -github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo= -github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= -github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b/go.mod h1:01TrycV0kFyexm33Z7vhZRXopbI8J3TDReVlkTgMUxE= github.com/microcosm-cc/bluemonday v1.0.1/go.mod h1:hsXNsILzKxV+sX77C5b8FSuKF00vh2OMYv+xgHpAMF4= github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= -github.com/miekg/dns v1.1.12/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= -github.com/miekg/dns v1.1.28/go.mod h1:KNUDUusw/aVsxyTYZM1oqvCicbwhgbNgztCETuNZ7xM= -github.com/miekg/dns v1.1.31/go.mod h1:KNUDUusw/aVsxyTYZM1oqvCicbwhgbNgztCETuNZ7xM= -github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJysuI= -github.com/miekg/dns v1.1.50 h1:DQUfb9uc6smULcREF09Uc+/Gd46YWqJd5DbpPE9xkcA= -github.com/miekg/dns v1.1.50/go.mod h1:e3IlAVfNqAllflbibAZEWOXOQ+Ynzk/dDozDxY7XnME= +github.com/miekg/dns v1.1.62 h1:cN8OuEF1/x5Rq6Np+h1epln8OiyPWV+lROx9LxcGgIQ= +github.com/miekg/dns v1.1.62/go.mod h1:mvDlcItzm+br7MToIKqkglaGhlFMHJ9DTNNWONWXbNQ= github.com/mikioh/tcp v0.0.0-20190314235350-803a9b46060c h1:bzE/A84HN25pxAuk9Eej1Kz9OUelF97nAc82bDquQI8= github.com/mikioh/tcp v0.0.0-20190314235350-803a9b46060c/go.mod h1:0SQS9kMwD2VsyFEB++InYyBJroV/FRmBgcydeSUcJms= github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b h1:z78hV3sbSMAUoyUMM0I83AUIT6Hu17AWfgjzIbtrYFc= @@ -1100,13 +849,9 @@ github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b/go.mod h1:lxPUiZwKo github.com/mikioh/tcpopt v0.0.0-20190314235656-172688c1accc h1:PTfri+PuQmWDqERdnNMiD9ZejrlswWrCpBEZgWOiTrc= github.com/mikioh/tcpopt v0.0.0-20190314235656-172688c1accc/go.mod h1:cGKTAVKx4SxOuR/czcZ/E2RSJ3sfHs8FpHhQ5CWMf9s= github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1/go.mod h1:pD8RvIylQ358TN4wwqatJ8rNavkEINozVn9DtGI3dfQ= -github.com/minio/sha256-simd v0.0.0-20190131020904-2d45a736cd16/go.mod h1:2FMWW+8GMoPweT6+pI63m9YE3Lmw4J71hV56Chs1E/U= -github.com/minio/sha256-simd v0.0.0-20190328051042-05b4dd3047e5/go.mod h1:2FMWW+8GMoPweT6+pI63m9YE3Lmw4J71hV56Chs1E/U= -github.com/minio/sha256-simd v0.1.0/go.mod h1:2FMWW+8GMoPweT6+pI63m9YE3Lmw4J71hV56Chs1E/U= github.com/minio/sha256-simd v0.1.1-0.20190913151208-6de447530771/go.mod h1:B5e1o+1/KgNmWrSQK08Y6Z1Vb5pwIktudl0J58iy0KM= -github.com/minio/sha256-simd v0.1.1/go.mod h1:B5e1o+1/KgNmWrSQK08Y6Z1Vb5pwIktudl0J58iy0KM= -github.com/minio/sha256-simd v1.0.0 h1:v1ta+49hkWZyvaKwrQB8elexRqm6Y0aMLjCNsrYxo6g= -github.com/minio/sha256-simd v1.0.0/go.mod h1:OuYzVNI5vcoYIAmbIvHPl3N3jUzVedXbKy5RFepssQM= +github.com/minio/sha256-simd v1.0.1 h1:6kaan5IFmwTNynnKKpDHe6FWHohJOHhCPchzK49dzMM= +github.com/minio/sha256-simd v1.0.1/go.mod h1:Pz6AKMiUdngCLpeTL/RJY1M9rUuPMYujV5xJjtbRSN8= github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db h1:62I3jR2EmQ4l5rM/4FEfDWcRD+abF5XlKShorW5LRoQ= github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db/go.mod h1:l0dey0ia/Uv7NcFFVbCLtqEBQbrT4OCwCSKTEv6enCw= @@ -1125,87 +870,51 @@ github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJ github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= -github.com/montanaflynn/stats v0.6.6 h1:Duep6KMIDpY4Yo11iFsvyqJDyfzLF9+sndUKT+v64GQ= -github.com/montanaflynn/stats v0.6.6/go.mod h1:etXPPgVO6n31NxCd9KQUMvCM+ve0ruNzt6R8Bnaayow= +github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/montanaflynn/stats v0.7.1 h1:etflOAAHORrCC44V+aR6Ftzort912ZU+YLiSTuV8eaE= +github.com/montanaflynn/stats v0.7.1/go.mod h1:etXPPgVO6n31NxCd9KQUMvCM+ve0ruNzt6R8Bnaayow= github.com/mr-tron/base58 v1.1.0/go.mod h1:xcD2VGqlgYjBdcBLw+TuYLr8afG+Hj8g2eTVqeSzSU8= -github.com/mr-tron/base58 v1.1.1/go.mod h1:xcD2VGqlgYjBdcBLw+TuYLr8afG+Hj8g2eTVqeSzSU8= github.com/mr-tron/base58 v1.1.2/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc= github.com/mr-tron/base58 v1.1.3/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc= github.com/mr-tron/base58 v1.2.0 h1:T/HDJBh4ZCPbU39/+c3rRvE0uKBQlU27+QI8LJ4t64o= github.com/mr-tron/base58 v1.2.0/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc= github.com/multiformats/go-base32 v0.0.3/go.mod h1:pLiuGC8y0QR3Ue4Zug5UzK9LjgbkL8NSQj0zQ5Nz/AA= -github.com/multiformats/go-base32 v0.0.4/go.mod h1:jNLFzjPZtp3aIARHbJRZIaPuspdH0J6q39uUM5pnABM= github.com/multiformats/go-base32 v0.1.0 h1:pVx9xoSPqEIQG8o+UbAe7DNi51oej1NtK+aGkbLYxPE= github.com/multiformats/go-base32 v0.1.0/go.mod h1:Kj3tFY6zNr+ABYMqeUNeGvkIC/UYgtWibDcT0rExnbI= github.com/multiformats/go-base36 v0.1.0/go.mod h1:kFGE83c6s80PklsHO9sRn2NCoffoRdUUOENyW/Vv6sM= github.com/multiformats/go-base36 v0.2.0 h1:lFsAbNOGeKtuKozrtBsAkSVhv1p9D0/qedU9rQyccr0= github.com/multiformats/go-base36 v0.2.0/go.mod h1:qvnKE++v+2MWCfePClUEjE78Z7P2a1UV0xHgWc0hkp4= -github.com/multiformats/go-multiaddr v0.0.1/go.mod h1:xKVEak1K9cS1VdmPZW3LSIb6lgmoS58qz/pzqmAxV44= -github.com/multiformats/go-multiaddr v0.0.2/go.mod h1:xKVEak1K9cS1VdmPZW3LSIb6lgmoS58qz/pzqmAxV44= -github.com/multiformats/go-multiaddr v0.0.4/go.mod h1:xKVEak1K9cS1VdmPZW3LSIb6lgmoS58qz/pzqmAxV44= -github.com/multiformats/go-multiaddr v0.1.0/go.mod h1:xKVEak1K9cS1VdmPZW3LSIb6lgmoS58qz/pzqmAxV44= github.com/multiformats/go-multiaddr v0.1.1/go.mod h1:aMKBKNEYmzmDmxfX88/vz+J5IU55txyt0p4aiWVohjo= -github.com/multiformats/go-multiaddr v0.2.0/go.mod h1:0nO36NvPpyV4QzvTLi/lafl2y95ncPj0vFwVF6k6wJ4= -github.com/multiformats/go-multiaddr v0.2.1/go.mod h1:s/Apk6IyxfvMjDafnhJgJ3/46z7tZ04iMk5wP4QMGGE= github.com/multiformats/go-multiaddr v0.2.2/go.mod h1:NtfXiOtHvghW9KojvtySjH5y0u0xW5UouOmQQrn6a3Y= -github.com/multiformats/go-multiaddr v0.3.0/go.mod h1:dF9kph9wfJ+3VLAaeBqo9Of8x4fJxp6ggJGteB8HQTI= -github.com/multiformats/go-multiaddr v0.3.1/go.mod h1:uPbspcUPd5AfaP6ql3ujFY+QWzmBD8uLLL4bXW0XfGc= github.com/multiformats/go-multiaddr v0.3.3/go.mod h1:lCKNGP1EQ1eZ35Za2wlqnabm9xQkib3fyB+nZXHLag0= -github.com/multiformats/go-multiaddr v0.8.0 h1:aqjksEcqK+iD/Foe1RRFsGZh8+XFiGo7FgUCZlpv3LU= -github.com/multiformats/go-multiaddr v0.8.0/go.mod h1:Fs50eBDWvZu+l3/9S6xAE7ZYj6yhxlvaVZjakWN7xRs= -github.com/multiformats/go-multiaddr-dns v0.0.1/go.mod h1:9kWcqw/Pj6FwxAwW38n/9403szc57zJPs45fmnznu3Q= -github.com/multiformats/go-multiaddr-dns v0.0.2/go.mod h1:9kWcqw/Pj6FwxAwW38n/9403szc57zJPs45fmnznu3Q= -github.com/multiformats/go-multiaddr-dns v0.2.0/go.mod h1:TJ5pr5bBO7Y1B18djPuRsVkduhQH2YqYSbxWJzYGdK0= -github.com/multiformats/go-multiaddr-dns v0.3.1 h1:QgQgR+LQVt3NPTjbrLLpsaT2ufAA2y0Mkk+QRVJbW3A= -github.com/multiformats/go-multiaddr-dns v0.3.1/go.mod h1:G/245BRQ6FJGmryJCrOuTdB37AMA5AMOVuO6NY3JwTk= -github.com/multiformats/go-multiaddr-fmt v0.0.1/go.mod h1:aBYjqL4T/7j4Qx+R73XSv/8JsgnRFlf0w2KGLCmXl3Q= +github.com/multiformats/go-multiaddr v0.14.0 h1:bfrHrJhrRuh/NXH5mCnemjpbGjzRw/b+tJFOD41g2tU= +github.com/multiformats/go-multiaddr v0.14.0/go.mod h1:6EkVAxtznq2yC3QT5CM1UTAwG0GTP3EWAIcjHuzQ+r4= +github.com/multiformats/go-multiaddr-dns v0.4.1 h1:whi/uCLbDS3mSEUMb1MsoT4uzUeZB0N32yzufqS0i5M= +github.com/multiformats/go-multiaddr-dns v0.4.1/go.mod h1:7hfthtB4E4pQwirrz+J0CcDUfbWzTqEzVyYKKIKpgkc= github.com/multiformats/go-multiaddr-fmt v0.1.0 h1:WLEFClPycPkp4fnIzoFoV9FVd49/eQsuaL3/CWe167E= github.com/multiformats/go-multiaddr-fmt v0.1.0/go.mod h1:hGtDIW4PU4BqJ50gW2quDuPVjyWNZxToGUh/HwTZYJo= -github.com/multiformats/go-multiaddr-net v0.0.1/go.mod h1:nw6HSxNmCIQH27XPGBuX+d1tnvM7ihcFwHMSstNAVUU= -github.com/multiformats/go-multiaddr-net v0.1.0/go.mod h1:5JNbcfBOP4dnhoZOv10JJVkJO0pCCEf8mTnipAo2UZQ= -github.com/multiformats/go-multiaddr-net v0.1.1/go.mod h1:5JNbcfBOP4dnhoZOv10JJVkJO0pCCEf8mTnipAo2UZQ= -github.com/multiformats/go-multiaddr-net v0.1.2/go.mod h1:QsWt3XK/3hwvNxZJp92iMQKME1qHfpYmyIjFVsSOY6Y= -github.com/multiformats/go-multiaddr-net v0.1.3/go.mod h1:ilNnaM9HbmVFqsb/qcNysjCu4PVONlrBZpHIrw/qQuA= -github.com/multiformats/go-multiaddr-net v0.1.4/go.mod h1:ilNnaM9HbmVFqsb/qcNysjCu4PVONlrBZpHIrw/qQuA= -github.com/multiformats/go-multiaddr-net v0.1.5/go.mod h1:ilNnaM9HbmVFqsb/qcNysjCu4PVONlrBZpHIrw/qQuA= -github.com/multiformats/go-multiaddr-net v0.2.0/go.mod h1:gGdH3UXny6U3cKKYCvpXI5rnK7YaOIEOPVDI9tsJbEA= -github.com/multiformats/go-multibase v0.0.1/go.mod h1:bja2MqRZ3ggyXtZSEDKpl0uO/gviWFaSteVbWT51qgs= github.com/multiformats/go-multibase v0.0.3/go.mod h1:5+1R4eQrT3PkYZ24C3W2Ue2tPwIdYQD509ZjSb5y9Oc= -github.com/multiformats/go-multibase v0.1.1 h1:3ASCDsuLX8+j4kx58qnJ4YFq/JWTJpCyDW27ztsVTOI= -github.com/multiformats/go-multibase v0.1.1/go.mod h1:ZEjHE+IsUrgp5mhlEAYjMtZwK1k4haNkcaPg9aoe1a8= -github.com/multiformats/go-multicodec v0.3.0/go.mod h1:qGGaQmioCDh+TeFOnxrbU0DaIPw8yFgAZgFG0V7p1qQ= -github.com/multiformats/go-multicodec v0.7.0 h1:rTUjGOwjlhGHbEMbPoSUJowG1spZTVsITRANCjKTUAQ= -github.com/multiformats/go-multicodec v0.7.0/go.mod h1:GUC8upxSBE4oG+q3kWZRw/+6yC1BqO550bjhWsJbZlw= -github.com/multiformats/go-multihash v0.0.1/go.mod h1:w/5tugSrLEbWqlcgJabL3oHFKTwfvkofsjW2Qa1ct4U= -github.com/multiformats/go-multihash v0.0.5/go.mod h1:lt/HCbqlQwlPBz7lv0sQCdtfcMtlJvakRUn/0Ual8po= +github.com/multiformats/go-multibase v0.2.0 h1:isdYCVLvksgWlMW9OZRYJEa9pZETFivncJHmHnnd87g= +github.com/multiformats/go-multibase v0.2.0/go.mod h1:bFBZX4lKCA/2lyOFSAoKH5SS6oPyjtnzK/XTFDPkNuk= +github.com/multiformats/go-multicodec v0.9.0 h1:pb/dlPnzee/Sxv/j4PmkDRxCOi3hXTz3IbPKOXWJkmg= +github.com/multiformats/go-multicodec v0.9.0/go.mod h1:L3QTQvMIaVBkXOXXtVmYE+LI16i14xuaojr/H7Ai54k= github.com/multiformats/go-multihash v0.0.8/go.mod h1:YSLudS+Pi8NHE7o6tb3D8vrpKa63epEDmG8nTduyAew= -github.com/multiformats/go-multihash v0.0.10/go.mod h1:YSLudS+Pi8NHE7o6tb3D8vrpKa63epEDmG8nTduyAew= github.com/multiformats/go-multihash v0.0.13/go.mod h1:VdAWLKTwram9oKAatUcLxBNUjdtcVwxObEQBtRfuyjc= github.com/multiformats/go-multihash v0.0.14/go.mod h1:VdAWLKTwram9oKAatUcLxBNUjdtcVwxObEQBtRfuyjc= -github.com/multiformats/go-multihash v0.0.15/go.mod h1:D6aZrWNLFTV/ynMpKsNtB40mJzmCl4jb1alC0OvHiHg= -github.com/multiformats/go-multihash v0.0.16/go.mod h1:zhfEIgVnB/rPMfxgFw15ZmGoNaKyNUIE4IWHG/kC+Ag= -github.com/multiformats/go-multihash v0.1.0/go.mod h1:RJlXsxt6vHGaia+S8We0ErjhojtKzPP2AH4+kYM7k84= -github.com/multiformats/go-multihash v0.2.1 h1:aem8ZT0VA2nCHHk7bPJ1BjUbHNciqZC/d16Vve9l108= -github.com/multiformats/go-multihash v0.2.1/go.mod h1:WxoMcYG85AZVQUyRyo9s4wULvW5qrI9vb2Lt6evduFc= -github.com/multiformats/go-multistream v0.1.0/go.mod h1:fJTiDfXJVmItycydCnNx4+wSzZ5NwG2FEVAI30fiovg= -github.com/multiformats/go-multistream v0.1.1/go.mod h1:KmHZ40hzVxiaiwlj3MEbYgK9JFk2/9UktWZAF54Du38= -github.com/multiformats/go-multistream v0.2.0/go.mod h1:5GZPQZbkWOLOn3J2y4Y99vVW7vOfsAflxARk3x14o6k= -github.com/multiformats/go-multistream v0.2.1/go.mod h1:5GZPQZbkWOLOn3J2y4Y99vVW7vOfsAflxARk3x14o6k= -github.com/multiformats/go-multistream v0.2.2/go.mod h1:UIcnm7Zuo8HKG+HkWgfQsGL+/MIEhyTqbODbIUwSXKs= -github.com/multiformats/go-multistream v0.3.3 h1:d5PZpjwRgVlbwfdTDjife7XszfZd8KYWfROYFlGcR8o= -github.com/multiformats/go-multistream v0.3.3/go.mod h1:ODRoqamLUsETKS9BNcII4gcRsJBU5VAwRIv7O39cEXg= -github.com/multiformats/go-varint v0.0.1/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE= -github.com/multiformats/go-varint v0.0.2/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE= +github.com/multiformats/go-multihash v0.2.3 h1:7Lyc8XfX/IY2jWb/gI7JP+o7JEq9hOa7BFvVU9RSh+U= +github.com/multiformats/go-multihash v0.2.3/go.mod h1:dXgKXCXjBzdscBLk9JkjINiEsCKRVch90MdaGiKsvSM= +github.com/multiformats/go-multistream v0.6.0 h1:ZaHKbsL404720283o4c/IHQXiS6gb8qAN5EIJ4PN5EA= +github.com/multiformats/go-multistream v0.6.0/go.mod h1:MOyoG5otO24cHIg8kf9QW2/NozURlkP/rvi2FQJyCPg= github.com/multiformats/go-varint v0.0.5/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE= github.com/multiformats/go-varint v0.0.6/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE= github.com/multiformats/go-varint v0.0.7 h1:sWSGR+f/eu5ABZA2ZpYKBILXTTs9JWpdEM/nEGOHFS8= github.com/multiformats/go-varint v0.0.7/go.mod h1:r8PUYw/fD/SjBCiKOoDlGF6QawOELpZAu9eioSos/OU= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f h1:KUppIJq7/+SVif2QVs3tOP0zanoHgBEVAwHxUSIzRqU= github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mwitkow/grpc-proxy v0.0.0-20181017164139-0f1106ef9c76/go.mod h1:x5OoJHDHqxHS801UIuhqGl6QdSAEJvtausosHSdazIo= -github.com/naoina/go-stringutil v0.1.0/go.mod h1:XJ2SJL9jCtBh+P9q5btrd/Ylo8XwT/h1USek5+NqSA0= -github.com/naoina/toml v0.1.2-0.20170918210437-9fafd6967416/go.mod h1:NBIhNtsFMo3G2szEBne+bO4gS192HuIYRqfvOWb4i1E= github.com/nats-io/jwt v0.3.0/go.mod h1:fRYCDE99xlTsqUzISS1Bi75UBJ6ljOJQOAAu5VglpSg= github.com/nats-io/jwt v0.3.2/go.mod h1:/euKqTS1ZD+zzjYrY7pseZrTtWQSjujC7xjPc8wL6eU= github.com/nats-io/nats-server/v2 v2.1.2/go.mod h1:Afk+wRZqkMQs/p45uXdrVLuab3gwv3Z8C4HTBu8GD/k= @@ -1216,52 +925,71 @@ github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OS github.com/neelance/astrewrite v0.0.0-20160511093645-99348263ae86/go.mod h1:kHJEU3ofeGjhHklVoIGuVj85JJwZ6kWPaJwCIxgnFmo= github.com/neelance/sourcemap v0.0.0-20151028013722-8c68805598ab/go.mod h1:Qr6/a/Q4r9LP1IltGz7tA7iOK1WonHEYhu1HRBA7ZiM= github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= +github.com/nxadm/tail v1.4.11 h1:8feyoE3OzPrcshW5/MJ4sGESc5cqmGkGCWlco4l0bqY= +github.com/nxadm/tail v1.4.11/go.mod h1:OTaG3NK980DZzxbRq6lEuzgU+mug70nY11sMd4JXXHc= github.com/oklog/oklog v0.3.2/go.mod h1:FCV+B7mhrz4o+ueLpx+KqkyXRGMWOYEvfiXtdGtbWGs= github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= -github.com/olekukonko/tablewriter v0.0.1/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= -github.com/olekukonko/tablewriter v0.0.2-0.20190409134802-7e037d187b0c/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= -github.com/onflow/atree v0.5.0 h1:y3lh8hY2fUo8KVE2ALVcz0EiNTq0tXJ6YTXKYVDA+3E= -github.com/onflow/atree v0.5.0/go.mod h1:gBHU0M05qCbv9NN0kijLWMgC47gHVNBIp4KmsVFi0tc= -github.com/onflow/cadence v0.38.1 h1:8YpnE1ixAGB8hF3t+slkHGhjfIBJ95dqUS+sEHrM2kY= -github.com/onflow/cadence v0.38.1/go.mod h1:SpfjNhPsJxGIHbOthE9JD/e8JFaFY73joYLPsov+PY4= -github.com/onflow/flow v0.3.4 h1:FXUWVdYB90f/rjNcY0Owo30gL790tiYff9Pb/sycXYE= -github.com/onflow/flow v0.3.4/go.mod h1:lzyAYmbu1HfkZ9cfnL5/sjrrsnJiUU8fRL26CqLP7+c= -github.com/onflow/flow-core-contracts/lib/go/contracts v1.2.3 h1:wV+gcgOY0oJK4HLZQYQoK+mm09rW1XSxf83yqJwj0n4= -github.com/onflow/flow-core-contracts/lib/go/contracts v1.2.3/go.mod h1:Osvy81E/+tscQM+d3kRFjktcIcZj2bmQ9ESqRQWDEx8= -github.com/onflow/flow-core-contracts/lib/go/templates v1.2.3 h1:X25A1dNajNUtE+KoV76wQ6BR6qI7G65vuuRXxDDqX7E= -github.com/onflow/flow-core-contracts/lib/go/templates v1.2.3/go.mod h1:dqAUVWwg+NlOhsuBHex7bEWmsUjsiExzhe/+t4xNH6A= -github.com/onflow/flow-ft/lib/go/contracts v0.7.0 h1:XEKE6qJUw3luhsYmIOteXP53gtxNxrwTohgxJXCYqBE= -github.com/onflow/flow-ft/lib/go/contracts v0.7.0/go.mod h1:kTMFIySzEJJeupk+7EmXs0EJ6CBWY/MV9fv9iYQk+RU= -github.com/onflow/flow-go-sdk v0.40.0 h1:s8uwoyTquN8tjdXpqGmNkXTjf79yUII8JExc5QEl4Xw= -github.com/onflow/flow-go-sdk v0.40.0/go.mod h1:34dxXk9Hp/bQw6Zy6+H44Xo0kQU+aJyQoqdDxq00rJM= -github.com/onflow/flow-go/crypto v0.24.7 h1:RCLuB83At4z5wkAyUCF7MYEnPoIIOHghJaODuJyEoW0= -github.com/onflow/flow-go/crypto v0.24.7/go.mod h1:fqCzkIBBMRRkciVrvW21rECKq1oD7Q6u+bCI78lfNX0= -github.com/onflow/flow/protobuf/go/flow v0.3.2-0.20230428213521-89bcc9e8517e h1:QYEd3KWTt309YGBch4IGK6vJ6b7cOGx2NStEnd5NeHM= -github.com/onflow/flow/protobuf/go/flow v0.3.2-0.20230428213521-89bcc9e8517e/go.mod h1:NA2pX2nw8zuaxfKphhKsk00kWLwfd+tv8mS23YXO4Sk= -github.com/onflow/go-bitswap v0.0.0-20221017184039-808c5791a8a8 h1:XcSR/n2aSVO7lOEsKScYALcpHlfowLwicZ9yVbL6bnA= -github.com/onflow/go-bitswap v0.0.0-20221017184039-808c5791a8a8/go.mod h1:73C8FlT4L/Qe4Cf5iXUNL8b2pvu4zs5dJMMJ5V2TjUI= -github.com/onflow/sdks v0.5.0 h1:2HCRibwqDaQ1c9oUApnkZtEAhWiNY2GTpRD5+ftdkN8= -github.com/onflow/sdks v0.5.0/go.mod h1:F0dj0EyHC55kknLkeD10js4mo14yTdMotnWMslPirrU= +github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N7AbDhec= +github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY= +github.com/onflow/atree v0.10.1 h1:8sixWP3l3LitcyuKkVepbIsLbfr7JN3cCB/iA1j2JD8= +github.com/onflow/atree v0.10.1/go.mod h1:+BuiL0XuIigHJqwkdIuDNzxXvyDx1jYUog/w+iZhcE8= +github.com/onflow/boxo v0.0.0-20240201202436-f2477b92f483 h1:LpiQhTAfM9CAmNVEs0n//cBBgCg+vJSiIxTHYUklZ84= +github.com/onflow/boxo v0.0.0-20240201202436-f2477b92f483/go.mod h1:pIZgTWdm3k3pLF9Uq6MB8JEcW07UDwNJjlXW1HELW80= +github.com/onflow/cadence v1.7.1 h1:VpPiC13e4qrdpyqaagd+dpHtQwfJ/djr97FgT3SovWA= +github.com/onflow/cadence v1.7.1/go.mod h1:1lKdLNVHIoO0jEjkRPMtOmBWYCG1An9TXSoiCuGIIpo= +github.com/onflow/crypto v0.25.3 h1:XQ3HtLsw8h1+pBN+NQ1JYM9mS2mVXTyg55OldaAIF7U= +github.com/onflow/crypto v0.25.3/go.mod h1:+1igaXiK6Tjm9wQOBD1EGwW7bYWMUGKtwKJ/2QL/OWs= +github.com/onflow/fixed-point v0.1.1 h1:j0jYZVO8VGyk1476alGudEg7XqCkeTVxb5ElRJRKS90= +github.com/onflow/fixed-point v0.1.1/go.mod h1:gJdoHqKtToKdOZbvryJvDZfcpzC7d2fyWuo3ZmLtcGY= +github.com/onflow/flow v0.4.15 h1:MdrhULSE5iSYNyLCihH8DI4uab5VciVZUKDFON6zylY= +github.com/onflow/flow v0.4.15/go.mod h1:lzyAYmbu1HfkZ9cfnL5/sjrrsnJiUU8fRL26CqLP7+c= +github.com/onflow/flow-core-contracts/lib/go/contracts v1.9.0 h1:m6lHp0xDdmVWbpbTpFlq6XxVrB+2J8qwnzMV30zdZeM= +github.com/onflow/flow-core-contracts/lib/go/contracts v1.9.0/go.mod h1:jBDqVep0ICzhXky56YlyO4aiV2Jl/5r7wnqUPpvi7zE= +github.com/onflow/flow-core-contracts/lib/go/templates v1.9.0 h1:8jn4Lxp/dpyWdgJ+5XEDUkYOf2aveObZtHtkdnYIEco= +github.com/onflow/flow-core-contracts/lib/go/templates v1.9.0/go.mod h1:twSVyUt3rNrgzAmxtBX+1Gw64QlPemy17cyvnXYy1Ug= +github.com/onflow/flow-evm-bridge v0.1.0 h1:7X2osvo4NnQgHj8aERUmbYtv9FateX8liotoLnPL9nM= +github.com/onflow/flow-evm-bridge v0.1.0/go.mod h1:5UYwsnu6WcBNrwitGFxphCl5yq7fbWYGYuiCSTVF6pk= +github.com/onflow/flow-ft/lib/go/contracts v1.0.1 h1:Ts5ob+CoCY2EjEd0W6vdLJ7hLL3SsEftzXG2JlmSe24= +github.com/onflow/flow-ft/lib/go/contracts v1.0.1/go.mod h1:PwsL8fC81cjnUnTfmyL/HOIyHnyaw/JA474Wfj2tl6A= +github.com/onflow/flow-ft/lib/go/templates v1.0.1 h1:FDYKAiGowABtoMNusLuRCILIZDtVqJ/5tYI4VkF5zfM= +github.com/onflow/flow-ft/lib/go/templates v1.0.1/go.mod h1:uQ8XFqmMK2jxyBSVrmyuwdWjTEb+6zGjRYotfDJ5pAE= +github.com/onflow/flow-go-sdk v1.8.4 h1:WHtVjryOU6ZJx0jUSjBPOrWoGqGDr+eEejyIkfbiBCE= +github.com/onflow/flow-go-sdk v1.8.4/go.mod h1:Jli9sI78LAnoC3OVGeAs0ngOezoLTfE/GrKOAB9TbTw= +github.com/onflow/flow-nft/lib/go/contracts v1.3.0 h1:DmNop+O0EMyicZvhgdWboFG57xz5t9Qp81FKlfKyqJc= +github.com/onflow/flow-nft/lib/go/contracts v1.3.0/go.mod h1:eZ9VMMNfCq0ho6kV25xJn1kXeCfxnkhj3MwF3ed08gY= +github.com/onflow/flow-nft/lib/go/templates v1.3.0 h1:uGIBy4GEY6Z9hKP7sm5nA5kwvbvLWW4nWx5NN9Wg0II= +github.com/onflow/flow-nft/lib/go/templates v1.3.0/go.mod h1:gVbb5fElaOwKhV5UEUjM+JQTjlsguHg2jwRupfM/nng= +github.com/onflow/flow/protobuf/go/flow v0.4.16 h1:UADQeq/mpuqFk+EkwqDNoF70743raWQKmB/Dm/eKt2Q= +github.com/onflow/flow/protobuf/go/flow v0.4.16/go.mod h1:NA2pX2nw8zuaxfKphhKsk00kWLwfd+tv8mS23YXO4Sk= +github.com/onflow/go-ds-pebble v0.0.0-20251003225212-131edca3a897 h1:ZtFYJ3OSR00aiKMMxgm3fRYWqYzjvDXeoBGQm6yC8DE= +github.com/onflow/go-ds-pebble v0.0.0-20251003225212-131edca3a897/go.mod h1:aiCRVcj3K60sxc6k5C+HO9C6rouqiSkjR/WKnbTcMfQ= +github.com/onflow/go-ethereum v1.13.4 h1:iNO86fm8RbBbhZ87ZulblInqCdHnAQVY8okBrNsTevc= +github.com/onflow/go-ethereum v1.13.4/go.mod h1:cE/gEUkAffhwbVmMJYz+t1dAfVNHNwZCgc3BWtZxBGY= +github.com/onflow/nft-storefront/lib/go/contracts v1.0.0 h1:sxyWLqGm/p4EKT6DUlQESDG1ZNMN9GjPCm1gTq7NGfc= +github.com/onflow/nft-storefront/lib/go/contracts v1.0.0/go.mod h1:kMeq9zUwCrgrSojEbTUTTJpZ4WwacVm2pA7LVFr+glk= +github.com/onflow/sdks v0.6.0-preview.1 h1:mb/cUezuqWEP1gFZNAgUI4boBltudv4nlfxke1KBp9k= +github.com/onflow/sdks v0.6.0-preview.1/go.mod h1:F0dj0EyHC55kknLkeD10js4mo14yTdMotnWMslPirrU= +github.com/onflow/wal v1.0.2 h1:5bgsJVf2O3cfMNK12fiiTyYZ8cOrUiELt3heBJfHOhc= +github.com/onflow/wal v1.0.2/go.mod h1:iMC8gkLqu4nkbkAla5HkSBb+FGyQOZiWz3DYm2wSXCk= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.12.0/go.mod h1:oUhWkIvk5aDxtKvDDuw8gItl8pKl42LzjC9KZE0HfGg= github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= -github.com/onsi/ginkgo/v2 v2.6.1 h1:1xQPCjcqYw/J5LchOcp4/2q/jzJFjiAOc25chhnDw+Q= -github.com/onsi/ginkgo/v2 v2.6.1/go.mod h1:yjiuMwPokqY1XauOgju45q3sJt6VzQ/Fict1LFVcsAo= -github.com/onsi/gomega v1.4.1/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= +github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= +github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= +github.com/onsi/ginkgo/v2 v2.22.0 h1:Yed107/8DjTr0lKCNt7Dn8yQ6ybuDRQoMGrNFKzMfHg= +github.com/onsi/ginkgo/v2 v2.22.0/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo= github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= -github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= -github.com/onsi/gomega v1.9.0/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoTdcA= github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= -github.com/onsi/gomega v1.24.1 h1:KORJXNNTzJXzu4ScJWssJfJMnJ+2QJqhoQSRwNlze9E= +github.com/onsi/gomega v1.34.2 h1:pNCwDkzrsv7MS9kpaQvVb1aVLahQXyJ/Tv5oAZMI3i8= +github.com/onsi/gomega v1.34.2/go.mod h1:v1xfxRgk0KIsG+QOdm7p8UosrOzPYRo60fd3B/1Dukc= github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk= -github.com/opencontainers/runtime-spec v1.0.2 h1:UfAcuLBJB9Coz72x1hgl8O5RVzTdNiaglX6v2DM6FI0= github.com/opencontainers/runtime-spec v1.0.2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/runtime-spec v1.2.0 h1:z97+pHb3uELt/yiAWD691HNHQIF07bE7dzrbT927iTk= +github.com/opencontainers/runtime-spec v1.2.0/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492/go.mod h1:Ngi6UdF0k5OKD5t5wlmGhe/EDKPoUM3BXZSSfIuJbis= github.com/opentracing/basictracer-go v1.0.0/go.mod h1:QfBfYuafItcjQuMwinw9GhYKwFXS9KnPs5lxoYwgW74= github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= @@ -1277,19 +1005,62 @@ github.com/pact-foundation/pact-go v1.0.4/go.mod h1:uExwJY4kCzNPcHRj+hCR/HBbOOIw github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 h1:onHthvaw9LFnH4t2DcNVpwGmV9E1BkGknEliJkfwQj0= github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58/go.mod h1:DXv8WO4yhMYhSNPKjeNKa5WY9YCIEBRbNzFFPJbWO6Y= -github.com/pborman/uuid v0.0.0-20170112150404-1b00554d8222/go.mod h1:VyrYX9gd7irzKovcSS6BIIEwPRkP2Wm2m9ufcdFSJ34= github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= -github.com/pelletier/go-toml v1.9.5 h1:4yBQzkHv+7BHq2PQUZF3Mx0IYxG7LsP222s7Agd3ve8= -github.com/pelletier/go-toml v1.9.5/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= -github.com/pelletier/go-toml/v2 v2.0.2 h1:+jQXlF3scKIcSEKkdHzXhCTDLPFi5r1wnK6yPS+49Gw= -github.com/pelletier/go-toml/v2 v2.0.2/go.mod h1:MovirKjgVRESsAvNZlAjtFwV867yGuwRkXbG66OzopI= +github.com/pelletier/go-toml/v2 v2.2.1 h1:9TA9+T8+8CUCO2+WYnDLCgrYi9+omqKXyjDtosvtEhg= +github.com/pelletier/go-toml/v2 v2.2.1/go.mod h1:1t835xjRzz80PqgE6HHgN2JOsmgYu/h4qDAS4n929Rs= github.com/performancecopilot/speed v3.0.0+incompatible/go.mod h1:/CLtqpZ5gBg1M9iaPbIdPPGyKcA8hKdoy6hAWba7Yac= -github.com/peterh/liner v1.1.1-0.20190123174540-a2c9a5303de7/go.mod h1:CRroGNssyjTd/qIG2FyxByd2S8JEAZXBl4qUrZf8GS0= github.com/pierrec/lz4 v1.0.2-0.20190131084431-473cd7ce01a1/go.mod h1:3/3N9NVKO0jef7pBehbT1qWhCMrIgbYNnFAZCqQ5LRc= github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= -github.com/pierrec/lz4 v2.6.1+incompatible h1:9UY3+iC23yxF0UfGaYrGplQ+79Rg+h/q9FV9ix19jjM= -github.com/pierrec/lz4 v2.6.1+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= +github.com/pierrec/lz4/v4 v4.1.22 h1:cKFw6uJDK+/gfw5BcDL0JL5aBsAFdsIT18eRtLj7VIU= +github.com/pierrec/lz4/v4 v4.1.22/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= +github.com/pingcap/errors v0.11.4 h1:lFuQV/oaUMGcD2tqt+01ROSmJs75VG1ToEOkZIZ4nE4= +github.com/pingcap/errors v0.11.4/go.mod h1:Oi8TUi2kEtXXLMJk9l1cGmz20kV3TaQ0usTwv5KuLY8= +github.com/pion/datachannel v1.5.10 h1:ly0Q26K1i6ZkGf42W7D4hQYR90pZwzFOjTq5AuCKk4o= +github.com/pion/datachannel v1.5.10/go.mod h1:p/jJfC9arb29W7WrxyKbepTU20CFgyx5oLo8Rs4Py/M= +github.com/pion/dtls/v2 v2.2.7/go.mod h1:8WiMkebSHFD0T+dIU+UeBaoV7kDhOW5oDCzZ7WZ/F9s= +github.com/pion/dtls/v2 v2.2.12 h1:KP7H5/c1EiVAAKUmXyCzPiQe5+bCJrpOeKg/L05dunk= +github.com/pion/dtls/v2 v2.2.12/go.mod h1:d9SYc9fch0CqK90mRk1dC7AkzzpwJj6u2GU3u+9pqFE= +github.com/pion/ice/v2 v2.3.37 h1:ObIdaNDu1rCo7hObhs34YSBcO7fjslJMZV0ux+uZWh0= +github.com/pion/ice/v2 v2.3.37/go.mod h1:mBF7lnigdqgtB+YHkaY/Y6s6tsyRyo4u4rPGRuOjUBQ= +github.com/pion/interceptor v0.1.37 h1:aRA8Zpab/wE7/c0O3fh1PqY0AJI3fCSEM5lRWJVorwI= +github.com/pion/interceptor v0.1.37/go.mod h1:JzxbJ4umVTlZAf+/utHzNesY8tmRkM2lVmkS82TTj8Y= +github.com/pion/logging v0.2.2 h1:M9+AIj/+pxNsDfAT64+MAVgJO0rsyLnoJKCqf//DoeY= +github.com/pion/logging v0.2.2/go.mod h1:k0/tDVsRCX2Mb2ZEmTqNa7CWsQPc+YYCB7Q+5pahoms= +github.com/pion/mdns v0.0.12 h1:CiMYlY+O0azojWDmxdNr7ADGrnZ+V6Ilfner+6mSVK8= +github.com/pion/mdns v0.0.12/go.mod h1:VExJjv8to/6Wqm1FXK+Ii/Z9tsVk/F5sD/N70cnYFbk= +github.com/pion/randutil v0.1.0 h1:CFG1UdESneORglEsnimhUjf33Rwjubwj6xfiOXBa3mA= +github.com/pion/randutil v0.1.0/go.mod h1:XcJrSMMbbMRhASFVOlj/5hQial/Y8oH/HVo7TBZq+j8= +github.com/pion/rtcp v1.2.12/go.mod h1:sn6qjxvnwyAkkPzPULIbVqSKI5Dv54Rv7VG0kNxh9L4= +github.com/pion/rtcp v1.2.15 h1:LZQi2JbdipLOj4eBjK4wlVoQWfrZbh3Q6eHtWtJBZBo= +github.com/pion/rtcp v1.2.15/go.mod h1:jlGuAjHMEXwMUHK78RgX0UmEJFV4zUKOFHR7OP+D3D0= +github.com/pion/rtp v1.8.3/go.mod h1:pBGHaFt/yW7bf1jjWAoUjpSNoDnw98KTMg+jWWvziqU= +github.com/pion/rtp v1.8.10 h1:puphjdbjPB+L+NFaVuZ5h6bt1g5q4kFIoI+r5q/g0CU= +github.com/pion/rtp v1.8.10/go.mod h1:8uMBJj32Pa1wwx8Fuv/AsFhn8jsgw+3rUC2PfoBZ8p4= +github.com/pion/sctp v1.8.35 h1:qwtKvNK1Wc5tHMIYgTDJhfZk7vATGVHhXbUDfHbYwzA= +github.com/pion/sctp v1.8.35/go.mod h1:EcXP8zCYVTRy3W9xtOF7wJm1L1aXfKRQzaM33SjQlzg= +github.com/pion/sdp/v3 v3.0.9 h1:pX++dCHoHUwq43kuwf3PyJfHlwIj4hXA7Vrifiq0IJY= +github.com/pion/sdp/v3 v3.0.9/go.mod h1:B5xmvENq5IXJimIO4zfp6LAe1fD9N+kFv+V/1lOdz8M= +github.com/pion/srtp/v2 v2.0.20 h1:HNNny4s+OUmG280ETrCdgFndp4ufx3/uy85EawYEhTk= +github.com/pion/srtp/v2 v2.0.20/go.mod h1:0KJQjA99A6/a0DOVTu1PhDSw0CXF2jTkqOoMg3ODqdA= +github.com/pion/stun v0.6.1 h1:8lp6YejULeHBF8NmV8e2787BogQhduZugh5PdhDyyN4= +github.com/pion/stun v0.6.1/go.mod h1:/hO7APkX4hZKu/D0f2lHzNyvdkTGtIy3NDmLR7kSz/8= +github.com/pion/stun/v2 v2.0.0 h1:A5+wXKLAypxQri59+tmQKVs7+l6mMM+3d+eER9ifRU0= +github.com/pion/stun/v2 v2.0.0/go.mod h1:22qRSh08fSEttYUmJZGlriq9+03jtVmXNODgLccj8GQ= +github.com/pion/transport/v2 v2.2.1/go.mod h1:cXXWavvCnFF6McHTft3DWS9iic2Mftcz1Aq29pGcU5g= +github.com/pion/transport/v2 v2.2.3/go.mod h1:q2U/tf9FEfnSBGSW6w5Qp5PFWRLRj3NjLhCCgpRK4p0= +github.com/pion/transport/v2 v2.2.4/go.mod h1:q2U/tf9FEfnSBGSW6w5Qp5PFWRLRj3NjLhCCgpRK4p0= +github.com/pion/transport/v2 v2.2.10 h1:ucLBLE8nuxiHfvkFKnkDQRYWYfp8ejf4YBOPfaQpw6Q= +github.com/pion/transport/v2 v2.2.10/go.mod h1:sq1kSLWs+cHW9E+2fJP95QudkzbK7wscs8yYgQToO5E= +github.com/pion/transport/v3 v3.0.1/go.mod h1:UY7kiITrlMv7/IKgd5eTUcaahZx5oUN3l9SzK5f5xE0= +github.com/pion/transport/v3 v3.0.7 h1:iRbMH05BzSNwhILHoBoAPxoB9xQgOaJk+591KC9P1o0= +github.com/pion/transport/v3 v3.0.7/go.mod h1:YleKiTZ4vqNxVwh77Z0zytYi7rXHl7j6uPLGhhz9rwo= +github.com/pion/turn/v2 v2.1.3/go.mod h1:huEpByKKHix2/b9kmTAM3YoX6MKP+/D//0ClgUYR2fY= +github.com/pion/turn/v2 v2.1.6 h1:Xr2niVsiPTB0FPtt+yAWKFUkU1eotQbGgpTIld4x1Gc= +github.com/pion/turn/v2 v2.1.6/go.mod h1:huEpByKKHix2/b9kmTAM3YoX6MKP+/D//0ClgUYR2fY= +github.com/pion/webrtc/v3 v3.3.5 h1:ZsSzaMz/i9nblPdiAkZoP+E6Kmjw+jnyq3bEmU3EtRg= +github.com/pion/webrtc/v3 v3.3.5/go.mod h1:liNa+E1iwyzyXqNUwvoMRNQ10x8h8FOeJKL8RkIbamE= +github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= @@ -1298,10 +1069,13 @@ github.com/pkg/profile v1.2.1/go.mod h1:hJw3o1OdXxsrSjjVksARp5W95eeEaEfptyVZyv6J github.com/pkg/profile v1.7.0 h1:hnbDkaNWPCLMO9wGLdBFTIZvzDrDfBM2072E1S9gJkA= github.com/pkg/profile v1.7.0/go.mod h1:8Uer0jas47ZQMJ7VD+OHknK4YDY07LPUC6dEvqDjvNo= github.com/pkg/sftp v1.13.1/go.mod h1:3HaPG6Dq1ILlpPZRO0HVMrsydcdLt6HRDccSgb87qRg= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 h1:GFCKgmp0tecUJ0sJuv4pzYCqS9+RGSn52M3FUwPs+uo= +github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10/go.mod h1:t/avpk3KcrXxUnYOhZhMXJlSEyie6gQbtLq5NM3loB8= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/polydawn/refmt v0.0.0-20201211092308-30ac6d18308e h1:ZOcivgkkFRnjfoTcGsDq3UQYiBmekwLA+qg0OjyB/ls= -github.com/polydawn/refmt v0.0.0-20201211092308-30ac6d18308e/go.mod h1:uIp+gprXxxrWSjjklXD+mN4wed/tMfjMMmN/9+JsA9o= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/polydawn/refmt v0.89.0 h1:ADJTApkvkeBZsN0tBTx8QjpD9JkmxbKp0cxfr9qszm4= +github.com/polydawn/refmt v0.89.0/go.mod h1:/zvteZs/GwLtCgZ4BL6CBsk9IKIlexP43ObX9AxTqTw= github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c h1:ncq/mPwQF4JjgDlrVEn3C11VoGHZN7m8qihwgMEtzYw= github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= @@ -1312,17 +1086,16 @@ github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDf github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= github.com/prometheus/client_golang v1.3.0/go.mod h1:hJaj2vgQTGQmVCsAACORcieXFeDPbaTKGT+JTgUa3og= github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= -github.com/prometheus/client_golang v1.10.0/go.mod h1:WJM3cc3yu7XKBKa/I8WeZm+V3eltZnBwfENSU7mdogU= -github.com/prometheus/client_golang v1.14.0 h1:nJdhIvne2eSX/XRAFV9PcvFFRbrjbcTUj0VP62TMhnw= -github.com/prometheus/client_golang v1.14.0/go.mod h1:8vpkKitgIVNcqrRBWh1C4TIUQgYNtG/XQE4E/Zae36Y= +github.com/prometheus/client_golang v1.20.5 h1:cxppBPuYhUnsO6yo/aoRol4L7q7UFfdm+bR9r+8l63Y= +github.com/prometheus/client_golang v1.20.5/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.1.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.3.0 h1:UBgGFHqYdG/TPFD1B1ogZywDqEkwp3fBMvqdiQ7Xew4= -github.com/prometheus/client_model v0.3.0/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w= +github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= +github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= github.com/prometheus/common v0.0.0-20180801064454-c7de2306084e/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= @@ -1331,9 +1104,8 @@ github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y8 github.com/prometheus/common v0.7.0/go.mod h1:DjGbpBbp5NYNiECxcL/VnbXCCaQpKd3tt26CguLLsqA= github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= github.com/prometheus/common v0.15.0/go.mod h1:U+gB1OBLb1lF3O42bTCL+FK18tX9Oar16Clt/msog/s= -github.com/prometheus/common v0.18.0/go.mod h1:U+gB1OBLb1lF3O42bTCL+FK18tX9Oar16Clt/msog/s= -github.com/prometheus/common v0.39.0 h1:oOyhkDq05hPZKItWVBkJ6g6AtGxi+fy7F4JvUV8uhsI= -github.com/prometheus/common v0.39.0/go.mod h1:6XBZ7lYdLCbkAVhwRsWTZn+IN5AB9F/NXd5w0BbEX0Y= +github.com/prometheus/common v0.61.0 h1:3gv/GThfX0cV2lpO7gkTUwZru38mxevy90Bj8YFSRQQ= +github.com/prometheus/common v0.61.0/go.mod h1:zr29OCN/2BsJRaFwG8QOBr41D6kkchKbpeNH7pAjb/s= github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= @@ -1342,33 +1114,35 @@ github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsT github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= github.com/prometheus/procfs v0.3.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= -github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= -github.com/prometheus/procfs v0.9.0 h1:wzCHvIvM5SxWqYvwgVL7yJY8Lz3PKn49KQtpgMYJfhI= -github.com/prometheus/procfs v0.9.0/go.mod h1:+pB4zwohETzFnmlpe6yd2lSc+0/46IYZRB/chUwxUZY= -github.com/prometheus/tsdb v0.6.2-0.20190402121629-4f204dcbc150/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= +github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= +github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= +github.com/prysmaticlabs/gohashtree v0.0.4-beta h1:H/EbCuXPeTV3lpKeXGPpEV9gsUpkqOOVnWapUyeWro4= +github.com/prysmaticlabs/gohashtree v0.0.4-beta/go.mod h1:BFdtALS+Ffhg3lGQIHv9HDWuHS8cTvHZzrHWxwOtGOs= github.com/psiemens/sconfig v0.1.0 h1:xfWqW+TRpih7mXZIqKYTmpRhlZLQ1kbxV8EjllPv76s= github.com/psiemens/sconfig v0.1.0/go.mod h1:+MLKqdledP/8G3rOBpknbLh0IclCf4WneJUtS26JB2U= +github.com/quic-go/qpack v0.5.1 h1:giqksBPnT/HDtZ6VhtFKgoLOWmlyo9Ei6u9PqzIMbhI= +github.com/quic-go/qpack v0.5.1/go.mod h1:+PC4XFrEskIVkcLzpEkbLqq1uCoxPhQuvK5rH1ZgaEg= +github.com/quic-go/quic-go v0.48.2 h1:wsKXZPeGWpMpCGSWqOcqpW2wZYic/8T3aqiOID0/KWE= +github.com/quic-go/quic-go v0.48.2/go.mod h1:yBgs3rWBOADpga7F+jJsb6Ybg1LSYiQvwWlLX+/6HMs= +github.com/quic-go/webtransport-go v0.8.1-0.20241018022711-4ac2c9250e66 h1:4WFk6u3sOT6pLa1kQ50ZVdm8BQFgJNA117cepZxtLIg= +github.com/quic-go/webtransport-go v0.8.1-0.20241018022711-4ac2c9250e66/go.mod h1:Vp72IJajgeOL6ddqrAhmp7IM9zbTcgkQxD/YdxrVwMw= github.com/raulk/go-watchdog v1.3.0 h1:oUmdlHxdkXRJlwfG0O9omj8ukerm8MEQavSiDTEtBsk= github.com/raulk/go-watchdog v1.3.0/go.mod h1:fIvOnLbF0b0ZwkB9YU4mOW9Did//4vPZtDqv66NfsMU= github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= -github.com/rivo/uniseg v0.2.1-0.20211004051800-57c86be7915a h1:s7GrsqeorVkFR1vGmQ6WVL9nup0eyQCC+YVUeSQLH/Q= -github.com/rivo/uniseg v0.2.1-0.20211004051800-57c86be7915a/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= -github.com/rjeczalik/notify v0.9.1/go.mod h1:rKwnCoCGeuQnwBtTSPL9Dad03Vh2n40ePRrjvIXnJho= +github.com/rivo/uniseg v0.4.7 h1:WUdvkW8uEhrYfLC4ZzdpI2ztxP1I582+49Oc5Mq64VQ= +github.com/rivo/uniseg v0.4.7/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88= github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/rogpeppe/go-internal v1.6.1 h1:/FiVV8dS/e+YqF2JvO3yXRFbBLTIuSDkuC7aBOAvL+k= -github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= -github.com/rs/cors v0.0.0-20160617231935-a62a804a8a00/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU= +github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= +github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= +github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU= github.com/rs/cors v1.8.0 h1:P2KMzcFwrPoSjkF1WLRPsp3UMLyql8L4v9hQpVeK5so= github.com/rs/cors v1.8.0/go.mod h1:EBwu+T5AvHOcXwvZIkQFjUN6s8Czyqw12GL/Y0tUyRM= -github.com/rs/xhandler v0.0.0-20160618193221-ed27b6fd6521/go.mod h1:RvLn4FgxWubrpZHtQLnOf6EwhN2hEMusxZOhcW9H3UQ= -github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ= github.com/rs/xid v1.4.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg= -github.com/rs/zerolog v1.19.0/go.mod h1:IzD0RJ65iWH0w97OQQebJEvTZYvsCUm9WVLWBQrJRjo= github.com/rs/zerolog v1.29.0 h1:Zes4hju04hjbvkVkOhdl2HpZa+0PmVwigmo8XoORE5w= github.com/rs/zerolog v1.29.0/go.mod h1:NILgTygv/Uej1ra5XxGf82ZFSLk58MFGAUS2o6usyD0= github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= @@ -1376,12 +1150,14 @@ github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQD github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E= -github.com/schollz/progressbar/v3 v3.8.3 h1:FnLGl3ewlDUP+YdSwveXBaXs053Mem/du+wr7XSYKl8= -github.com/schollz/progressbar/v3 v3.8.3/go.mod h1:pWnVCjSBZsT2X3nx9HfRdnCDrpbevliMeoEVhStwHko= +github.com/schollz/progressbar/v3 v3.18.0 h1:uXdoHABRFmNIjUfte/Ex7WtuyVslrw2wVPQmCN62HpA= +github.com/schollz/progressbar/v3 v3.18.0/go.mod h1:IsO3lpbaGuzh8zIMzgY3+J8l4C8GjO0Y9S69eFvNsec= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= github.com/sethvargo/go-retry v0.2.3 h1:oYlgvIvsju3jNbottWABtbnoLC+GDtLdBHxKWxQm/iU= github.com/sethvargo/go-retry v0.2.3/go.mod h1:1afjQuvh7s4gflMObvjLPaWgluLLyhA1wmVZ6KLpICw= +github.com/shirou/gopsutil v3.21.4-0.20210419000835-c7a38de76ee5+incompatible h1:Bn1aCHHRnjv4Bl16T8rcaFjYSrGrIZvpiGO6P3Q4GpU= +github.com/shirou/gopsutil v3.21.4-0.20210419000835-c7a38de76ee5+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA= github.com/shirou/gopsutil/v3 v3.22.2 h1:wCrArWFkHYIdDxx/FSfF5RB4dpJYW6t7rcp3+zL8uks= github.com/shirou/gopsutil/v3 v3.22.2/go.mod h1:WapW1AOOPlHyXr+yOyw3uYx36enocrtSoSBy0L5vUHY= github.com/shurcooL/component v0.0.0-20170202220835-f88ec8f54cc4/go.mod h1:XhFIlyj5a1fBNx5aJTbKoIq0mNaPvOagO+HjB3EtxrY= @@ -1411,58 +1187,58 @@ github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPx github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= -github.com/sirupsen/logrus v1.8.1 h1:dJKuHgqk1NNQlqoA6BTlM1Wf9DOH3NBjQyu0h9+AZZE= -github.com/slok/go-http-metrics v0.10.0 h1:rh0LaYEKza5eaYRGDXujKrOln57nHBi4TtVhmNEpbgM= -github.com/slok/go-http-metrics v0.10.0/go.mod h1:lFqdaS4kWMfUKCSukjC47PdCeTk+hXDUVm8kLHRqJ38= -github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM= +github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= +github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= +github.com/slok/go-http-metrics v0.12.0 h1:mAb7hrX4gB4ItU6NkFoKYdBslafg3o60/HbGBRsKaG8= +github.com/slok/go-http-metrics v0.12.0/go.mod h1:Ee/mdT9BYvGrlGzlClkK05pP2hRHmVbRF9dtUVS8LNA= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= -github.com/smartystreets/goconvey v1.6.4 h1:fv0U8FUIMPNf1L9lnHLvLhgicrIVChEkdzIKYqbNC9s= +github.com/smartystreets/assertions v1.2.0 h1:42S6lae5dvLc7BrLu/0ugRtcFVjoJNMC/N3yZFZkDFs= +github.com/smartystreets/assertions v1.2.0/go.mod h1:tcbTF8ujkAEcZ8TElKY+i30BzYlVhC/LOxJk7iOWnoo= github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= -github.com/smola/gocompat v0.2.0/go.mod h1:1B0MlxbmoZNo3h8guHp8HztB3BSYR5itql9qtVc0ypY= +github.com/smartystreets/goconvey v1.7.2 h1:9RBaZCeXEQ3UselpuwUQHltGVXvdwm6cv1hgR6gDIPg= +github.com/smartystreets/goconvey v1.7.2/go.mod h1:Vw0tHAZW6lzCRk3xgdin6fKYcG+G3Pg9vgXWeJpQFMM= github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= github.com/sony/gobreaker v0.4.1/go.mod h1:ZKptC7FHNvhBz7dN2LGjPVBz2sZJmc0/PkyDJOjmxWY= +github.com/sony/gobreaker v0.5.0 h1:dRCvqm0P490vZPmy7ppEk2qCnCieBooFJ+YoXGYB+yg= +github.com/sony/gobreaker v0.5.0/go.mod h1:ZKptC7FHNvhBz7dN2LGjPVBz2sZJmc0/PkyDJOjmxWY= github.com/sourcegraph/annotate v0.0.0-20160123013949-f4cad6c6324d/go.mod h1:UdhH50NIW0fCiwBSr0co2m7BnFLdv4fQTgdqdJTHFeE= github.com/sourcegraph/syntaxhighlight v0.0.0-20170531221838-bd320f5d308e/go.mod h1:HuIsMU8RRBOtsCgI77wP899iHVBQpCmg4ErYMZB+2IA= -github.com/spacemonkeygo/openssl v0.0.0-20181017203307-c2dcc5cca94a/go.mod h1:7AyxJNCJ7SBZ1MfVQCWD6Uqo2oubI2Eq2y2eqf+A5r0= -github.com/spacemonkeygo/spacelog v0.0.0-20180420211403-2296661a0572 h1:RC6RW7j+1+HkWaX/Yh71Ee5ZHaHYt7ZP4sQgUrm6cDU= -github.com/spacemonkeygo/spacelog v0.0.0-20180420211403-2296661a0572/go.mod h1:w0SWMsp6j9O/dk4/ZpIhL+3CkG8ofA2vuv7k+ltqUMc= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= -github.com/spaolacci/murmur3 v1.0.1-0.20190317074736-539464a789e9/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI= github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= -github.com/spf13/afero v1.9.0 h1:sFSLUHgxdnN32Qy38hK3QkYBFXZj9DKjVjCUCtD7juY= -github.com/spf13/afero v1.9.0/go.mod h1:iUV7ddyEEZPO5gA3zD4fJt6iStLlL+Lg4m2cihcDf8Y= +github.com/spf13/afero v1.10.0 h1:EaGW2JJh15aKOejeuJ+wpFSHnbd7GE6Wvp3TsNhb6LY= +github.com/spf13/afero v1.10.0/go.mod h1:UBogFpq8E9Hx+xc5CNTTEpTnuHVmXDwZcZcE1eb/UhQ= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cast v1.5.0 h1:rj3WzYc11XZaIZMPKmwP96zkFEnnAmV8s6XbB2aY32w= github.com/spf13/cast v1.5.0/go.mod h1:SpXXQ5YoyJw6s3/6cMTQuxvgRl3PCJiyaX9p6b155UU= github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= -github.com/spf13/cobra v1.6.1 h1:o94oiPyS4KD1mPy2fmcYYHHfCxLqYjJOhGsCHFZtEzA= -github.com/spf13/cobra v1.6.1/go.mod h1:IOw/AERYS7UzyrGinqmz6HLUo219MORXGxhbaJUqzrY= +github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM= +github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y= github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk= github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo= github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= -github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/pflag v1.0.6 h1:jFzHGLGAlb3ruxLB8MhbI6A8+AQX/2eW4qeyNZXNp2o= +github.com/spf13/pflag v1.0.6/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE= -github.com/spf13/viper v1.12.0 h1:CZ7eSOd3kZoaYDLbXnmzgQI5RlciuXBMA+18HwHRfZQ= -github.com/spf13/viper v1.12.0/go.mod h1:b6COn30jlNxbm/V2IqWiNWkJ+vZNiMNksliPCiuKtSI= -github.com/src-d/envconfig v1.0.0/go.mod h1:Q9YQZ7BKITldTBnoxsE5gOeB5y66RyPXeue/R4aaNBc= -github.com/status-im/keycard-go v0.0.0-20190316090335-8537d3370df4/go.mod h1:RZLeN1LMWmRsyYjvAu+I6Dm9QmlDaIIt+Y+4Kd7Tp+Q= -github.com/steakknife/bloomfilter v0.0.0-20180922174646-6819c0d2a570/go.mod h1:8OR4w3TdeIHIh1g6EMY5p0gVNOovcWC+1vpc7naMuAw= -github.com/steakknife/hamming v0.0.0-20180906055917-c99c65617cd3/go.mod h1:hpGUWaI9xL8pRQCTXQgocU38Qw1g0Us7n5PxxTwTCYU= +github.com/spf13/viper v1.15.0 h1:js3yy885G8xwJa6iOISGFwd+qlUo5AvyXb7CiihdtiU= +github.com/spf13/viper v1.15.0/go.mod h1:fFcTBJxvhhzSJiZy8n+PeW6t8l+KeT/uTARa0jHOQLA= +github.com/spiffe/go-spiffe/v2 v2.5.0 h1:N2I01KCUkv1FAjZXJMwh95KK1ZIQLYbPfhaxw8WS0hE= +github.com/spiffe/go-spiffe/v2 v2.5.0/go.mod h1:P+NxobPc6wXhVtINNtFjNWGBTreew1GBUCwT2wPmb7g= github.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= github.com/streadway/amqp v0.0.0-20190827072141-edfb9018d271/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= github.com/streadway/handy v0.0.0-20190108123426-d5acb3125c2a/go.mod h1:qNTQ5P5JnDBl6z3cMAg/SywNDC5ABu5ApDIw6lUbRmI= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= -github.com/stretchr/objx v0.5.0 h1:1zr/of2m5FGMsad5YfcqgdqdWrIhu+EBEJRhR1U7z/c= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= +github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= @@ -1470,62 +1246,61 @@ github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5 github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/stretchr/testify v1.8.2 h1:+h33VjcLVPDHtOdpUCuF+7gSuG3yGIftsP1YvFihtJ8= -github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/subosito/gotenv v1.4.0 h1:yAzM1+SmVcz5R4tXGsNMu1jUl2aOJXoiWUCEwwnGrvs= -github.com/subosito/gotenv v1.4.0/go.mod h1:mZd6rFysKEcUhUHXJk0C/08wAgyDBFuwEYL7vWWGaGo= -github.com/supranational/blst v0.3.10 h1:CMciDZ/h4pXDDXQASe8ZGTNKUiVNxVVA5hpci2Uuhuk= -github.com/syndtr/goleveldb v1.0.0/go.mod h1:ZVVdQEZoIme9iO1Ch2Jdy24qqXrMMOU6lpPAyBWyWuQ= -github.com/syndtr/goleveldb v1.0.1-0.20190923125748-758128399b1d/go.mod h1:9OrXJhf154huy1nPWmuSrkgjPUtUNhA+Zmy+6AESzuA= +github.com/stretchr/testify v1.8.3/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= +github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= +github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= +github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= +github.com/subosito/gotenv v1.4.2 h1:X1TuBLAMDFbaTAChgCBLu3DU3UPyELpnF2jjJ2cz/S8= +github.com/subosito/gotenv v1.4.2/go.mod h1:ayKnFf/c6rvx/2iiLrJUk1e6plDbT3edrFNGqEflhK0= +github.com/supranational/blst v0.3.14 h1:xNMoHRJOTwMn63ip6qoWJ2Ymgvj7E2b9jY2FAwY+qRo= +github.com/supranational/blst v0.3.14/go.mod h1:jZJtfjgudtNl4en1tzwPIV3KjUnQUvG3/j+w+fVonLw= +github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 h1:epCh84lMvA70Z7CTTCmYQn2CKbY8j86K7/FAIr141uY= +github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7/go.mod h1:q4W45IWZaF22tdD+VEXcAWRA037jwmWEB5VWYORlTpc= github.com/tarm/serial v0.0.0-20180830185346-98f6abe2eb07/go.mod h1:kDXzergiv9cbyO7IOYJZWg1U88JhDg3PB6klq9Hg2pA= github.com/texttheater/golang-levenshtein/levenshtein v0.0.0-20200805054039-cae8b0eaed6c h1:HelZ2kAFadG0La9d+4htN4HzQ68Bm2iM9qKMSMES6xg= github.com/texttheater/golang-levenshtein/levenshtein v0.0.0-20200805054039-cae8b0eaed6c/go.mod h1:JlzghshsemAMDGZLytTFY8C1JQxQPhnatWqNwUXjggo= -github.com/tklauser/go-sysconf v0.3.9 h1:JeUVdAOWhhxVcU6Eqr/ATFHgXk/mmiItdKeJPev3vTo= github.com/tklauser/go-sysconf v0.3.9/go.mod h1:11DU/5sG7UexIrp/O6g35hrWzu0JxlwQ3LSFUzyeuhs= -github.com/tklauser/numcpus v0.3.0 h1:ILuRUQBtssgnxw0XXIjKUC56fgnOrFoQQ/4+DeU2biQ= +github.com/tklauser/go-sysconf v0.3.12 h1:0QaGUFOdQaIVdPgfITYzaTegZvdCjmYO52cSFAEVmqU= +github.com/tklauser/go-sysconf v0.3.12/go.mod h1:Ho14jnntGE1fpdOqQEEaiKRpvIavV0hSfmBq8nJbHYI= github.com/tklauser/numcpus v0.3.0/go.mod h1:yFGUr7TUHQRAhyqBcEg0Ge34zDBAsIvJJcyE6boqnA8= +github.com/tklauser/numcpus v0.6.1 h1:ng9scYS7az0Bk4OZLvrNXNSAO2Pxr1XXRAPyjhIx+Fk= +github.com/tklauser/numcpus v0.6.1/go.mod h1:1XfjsgE2zo8GVw7POkMbHENHzVg3GzmoZ9fESEdAacY= github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/turbolent/prettier v0.0.0-20220320183459-661cc755135d h1:5JInRQbk5UBX8JfUvKh2oYTLMVwj3p6n+wapDDm7hko= github.com/turbolent/prettier v0.0.0-20220320183459-661cc755135d/go.mod h1:Nlx5Y115XQvNcIdIy7dZXaNSUpzwBSge4/Ivk93/Yog= -github.com/tyler-smith/go-bip39 v1.0.1-0.20181017060643-dbb3b84ba2ef/go.mod h1:sJ5fKU0s6JVwZjjcUEX2zFOnvq0ASQ2K9Zr6cf67kNs= +github.com/twitchyliquid64/golang-asm v0.15.1 h1:SU5vSMR7hnwNxj24w34ZyCi/FmDZTkS4MhqMhdFk5YI= +github.com/twitchyliquid64/golang-asm v0.15.1/go.mod h1:a1lVb/DtPvCB8fslRZhAngC2+aY1QWCk3Cedj/Gdt08= github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc= github.com/ugorji/go v1.1.7 h1:/68gy2h+1mWMrwZFeD1kQialdSzAb432dtpeJ42ovdo= github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVMw= github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= -github.com/ugorji/go/codec v1.1.7 h1:2SvQaVZ1ouYrrKKwoSk2pzd4A9evlKJb9oTL+OaLUSs= github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY= +github.com/ugorji/go/codec v1.2.12 h1:9LC83zGrHhuUA9l16C9AHXAqEV/2wBQ4nkvumAE65EE= +github.com/ugorji/go/codec v1.2.12/go.mod h1:UNopzCgEMSXjBc6AOMqYvWC1ktqTAfzJZUZgYf6w6lg= github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= +github.com/urfave/cli v1.22.10/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= github.com/viant/assertly v0.4.8/go.mod h1:aGifi++jvCrUaklKEKT0BU95igDNaqkvz+49uaYMPRU= github.com/viant/toolbox v0.24.0/go.mod h1:OxMCG57V0PXuIP2HNQrtJf2CjqdmbrOx5EkMILuUhzM= -github.com/vmihailenco/msgpack v4.0.4+incompatible h1:dSLoQfGFAo3F6OoNhwUmLwVgaUXK79GlxNBwueZn0xI= -github.com/vmihailenco/msgpack v4.0.4+incompatible/go.mod h1:fy3FlTQTDXWkZ7Bh6AcGMlsjHatGryHQYUTf1ShIgkk= github.com/vmihailenco/msgpack/v4 v4.3.11 h1:Q47CePddpNGNhk4GCnAx9DDtASi2rasatE0cd26cZoE= github.com/vmihailenco/msgpack/v4 v4.3.11/go.mod h1:gborTTJjAo/GWTqqRjrLCn9pgNN+NXzzngzBKDPIqw4= github.com/vmihailenco/tagparser v0.1.1 h1:quXMXlA39OCbd2wAdTsGDlK9RkOk6Wuw+x37wVyIuWY= github.com/vmihailenco/tagparser v0.1.1/go.mod h1:OeAg3pn3UbLjkWt+rN9oFYB6u/cQgqMEUPoW2WPyhdI= -github.com/warpfork/go-testmark v0.3.0 h1:Q81c4u7hT+BR5kNfNQhEF0VT2pmL7+Kk0wD+ORYl7iA= -github.com/warpfork/go-testmark v0.3.0/go.mod h1:jhEf8FVxd+F17juRubpmut64NEG6I2rgkUhlcqqXwE0= -github.com/warpfork/go-wish v0.0.0-20200122115046-b9ea61034e4a h1:G++j5e0OC488te356JvdhaM8YS6nMsjLAYF7JxCv07w= -github.com/warpfork/go-wish v0.0.0-20200122115046-b9ea61034e4a/go.mod h1:x6AKhvSSexNrVSrViXSHUEbICjmGXhtgABaHIySUSGw= +github.com/warpfork/go-testmark v0.12.1 h1:rMgCpJfwy1sJ50x0M0NgyphxYYPMOODIJHhsXyEHU0s= +github.com/warpfork/go-testmark v0.12.1/go.mod h1:kHwy7wfvGSPh1rQJYKayD4AbtNaeyZdcGi9tNJTaa5Y= +github.com/warpfork/go-wish v0.0.0-20220906213052-39a1cc7a02d0 h1:GDDkbFiaK8jsSDJfjId/PEGEShv6ugrt4kYsC5UIDaQ= +github.com/warpfork/go-wish v0.0.0-20220906213052-39a1cc7a02d0/go.mod h1:x6AKhvSSexNrVSrViXSHUEbICjmGXhtgABaHIySUSGw= github.com/whyrusleeping/go-keyspace v0.0.0-20160322163242-5b898ac5add1 h1:EKhdznlJHPMoKr0XTrX+IlJs1LH3lyx2nfr1dOlZ79k= github.com/whyrusleeping/go-keyspace v0.0.0-20160322163242-5b898ac5add1/go.mod h1:8UvriyWtv5Q5EOgjHaSseUEdkQfvwFv1I/In/O2M9gc= github.com/whyrusleeping/go-logging v0.0.0-20170515211332-0457bb6b88fc/go.mod h1:bopw91TMyo8J3tvftk8xmU2kPmlrt4nScJQZU2hE5EM= -github.com/whyrusleeping/go-logging v0.0.1/go.mod h1:lDPYj54zutzG1XYfHAhcc7oNXEburHQBn+Iqd4yS4vE= -github.com/whyrusleeping/go-notifier v0.0.0-20170827234753-097c5d47330f/go.mod h1:cZNvX9cFybI01GriPRMXDtczuvUhgbcYr9iCGaNlRv8= -github.com/whyrusleeping/mafmt v1.2.8/go.mod h1:faQJFPbLSxzD9xpA02ttW/tS9vZykNvXwGvqIpk20FA= -github.com/whyrusleeping/mdns v0.0.0-20180901202407-ef14215e6b30/go.mod h1:j4l84WPFclQPj320J9gp0XwNKBb3U0zt5CBqjPp22G4= -github.com/whyrusleeping/mdns v0.0.0-20190826153040-b9b60ed33aa9/go.mod h1:j4l84WPFclQPj320J9gp0XwNKBb3U0zt5CBqjPp22G4= -github.com/whyrusleeping/multiaddr-filter v0.0.0-20160516205228-e903e4adabd7/go.mod h1:X2c0RVCI1eSUFI8eLcY3c0423ykwiUdxLJtkDvruhjI= -github.com/whyrusleeping/timecache v0.0.0-20160911033111-cfcb2f1abfee h1:lYbXeSvJi5zk5GLKVuid9TVjS9a0OmLIDKTfoZBL6Ow= -github.com/whyrusleeping/timecache v0.0.0-20160911033111-cfcb2f1abfee/go.mod h1:m2aV4LZI4Aez7dP5PMyVKEHhUyEJ/RjmPEDOpDvudHg= -github.com/wsddn/go-ecdh v0.0.0-20161211032359-48726bab9208/go.mod h1:IotVbo4F+mw0EzQ08zFqg7pK3FebNXpaMsRy2RT+Ees= -github.com/x-cray/logrus-prefixed-formatter v0.5.2/go.mod h1:2duySbKsL6M18s5GU7VPsoEPHyzalCE06qoARUCeBBE= +github.com/wlynxg/anet v0.0.3/go.mod h1:eay5PRQr7fIVAMbTbchTnO9gG65Hg/uYGdc7mguHxoA= +github.com/wlynxg/anet v0.0.5 h1:J3VJGi1gvo0JwZ/P1/Yc/8p63SoW98B5dHkYDmpgvvU= +github.com/wlynxg/anet v0.0.5/go.mod h1:eay5PRQr7fIVAMbTbchTnO9gG65Hg/uYGdc7mguHxoA= github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= @@ -1535,12 +1310,15 @@ github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9de github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= github.com/yusufpapurcu/wmi v1.2.2 h1:KBNDSne4vP5mbSWnJbO+51IMOXJB67QiYCSBrubbPRg= github.com/yusufpapurcu/wmi v1.2.2/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= -github.com/zeebo/assert v1.1.0 h1:hU1L1vLTHsnO8x8c9KAR5GmM5QscxHg5RNU5z5qbUWY= -github.com/zeebo/assert v1.1.0/go.mod h1:Pq9JiuJQpG8JLJdtkwrJESF0Foym2/D9XMU5ciN/wJ0= -github.com/zeebo/blake3 v0.2.3 h1:TFoLXsjeXqRNFxSbk35Dk4YtszE/MQQGK10BH4ptoTg= -github.com/zeebo/blake3 v0.2.3/go.mod h1:mjJjZpnsyIVtVgTOSpJ9vmRE4wgDeyt2HU3qXvvKCaQ= +github.com/zeebo/assert v1.3.0 h1:g7C04CbJuIDKNPFHmsk4hwZDO5O+kntRxzaUoNXj+IQ= +github.com/zeebo/assert v1.3.0/go.mod h1:Pq9JiuJQpG8JLJdtkwrJESF0Foym2/D9XMU5ciN/wJ0= +github.com/zeebo/blake3 v0.2.4 h1:KYQPkhpRtcqh0ssGYcKLG1JYvddkEA8QwCM/yBqhaZI= +github.com/zeebo/blake3 v0.2.4/go.mod h1:7eeQ6d2iXWRGF6npfaxl2CU+xy2Fjo2gxeyZGCRUjcE= +github.com/zeebo/errs v1.4.0 h1:XNdoD/RRMKP7HD0UhJnIzUy74ISdGGxURlYG8HSWSfM= +github.com/zeebo/errs v1.4.0/go.mod h1:sgbWHsvVuTPHcqJJGQ1WhI5KbWlHYz+2+2C/LSEtCw4= github.com/zeebo/pcg v1.0.1 h1:lyqfGeWiv4ahac6ttHs+I5hwtH/+1mrhlCtVNQM2kHo= github.com/zeebo/pcg v1.0.1/go.mod h1:09F0S9iiKrwn9rlI5yjLkmrug154/YRW6KnnXVDM/l4= go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= @@ -1551,7 +1329,6 @@ go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= go.opencensus.io v0.20.2/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= -go.opencensus.io v0.22.1/go.mod h1:Ap50jQcDJrx6rB6VgeeFPtuPIf3wMRvRfrfYDO6+BmA= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= @@ -1559,93 +1336,91 @@ go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= -go.opentelemetry.io/otel v1.8.0 h1:zcvBFizPbpa1q7FehvFiHbQwGzmPILebO0tyqIR5Djg= -go.opentelemetry.io/otel v1.8.0/go.mod h1:2pkj+iMj0o03Y+cW6/m8Y4WkRdYN3AvCXCnzRMp9yvM= -go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.8.0 h1:ao8CJIShCaIbaMsGxy+jp2YHSudketpDgDRcbirov78= -go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.8.0/go.mod h1:78XhIg8Ht9vR4tbLNUhXsiOnE2HOuSeKAiAcoVQEpOY= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.8.0 h1:LrHL1A3KqIgAgi6mK7Q0aczmzU414AONAGT5xtnp+uo= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.8.0/go.mod h1:w8aZL87GMOvOBa2lU/JlVXE1q4chk/0FX+8ai4513bw= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.8.0 h1:00hCSGLIxdYK/Z7r8GkaX0QIlfvgU3tmnLlQvcnix6U= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.8.0/go.mod h1:twhIvtDQW2sWP1O2cT1N8nkSBgKCRZv2z6COTTBrf8Q= -go.opentelemetry.io/otel/sdk v1.8.0 h1:xwu69/fNuwbSHWe/0PGS888RmjWY181OmcXDQKu7ZQk= -go.opentelemetry.io/otel/sdk v1.8.0/go.mod h1:uPSfc+yfDH2StDM/Rm35WE8gXSNdvCg023J6HeGNO0c= -go.opentelemetry.io/otel/trace v1.8.0 h1:cSy0DF9eGI5WIfNwZ1q2iUyGj00tGzP24dE1lOlHrfY= -go.opentelemetry.io/otel/trace v1.8.0/go.mod h1:0Bt3PXY8w+3pheS3hQUt+wow8b1ojPaTBoTCh2zIFI4= +go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= +go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= +go.opentelemetry.io/contrib/detectors/gcp v1.36.0 h1:F7q2tNlCaHY9nMKHR6XH9/qkp8FktLnIcy6jJNyOCQw= +go.opentelemetry.io/contrib/detectors/gcp v1.36.0/go.mod h1:IbBN8uAIIx734PTonTPxAxnjc2pQTxWNkwfstZ+6H2k= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.61.0 h1:q4XOmH/0opmeuJtPsbFNivyl7bCt7yRBbeEm2sC/XtQ= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.61.0/go.mod h1:snMWehoOh2wsEwnvvwtDyFCxVeDAODenXHtn5vzrKjo= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.61.0 h1:F7Jx+6hwnZ41NSFTO5q4LYDtJRXBf2PD0rNBkeB/lus= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.61.0/go.mod h1:UHB22Z8QsdRDrnAtX4PntOl36ajSxcdUMt1sF7Y6E7Q= +go.opentelemetry.io/otel v1.37.0 h1:9zhNfelUvx0KBfu/gb+ZgeAfAgtWrfHJZcAqFC228wQ= +go.opentelemetry.io/otel v1.37.0/go.mod h1:ehE/umFRLnuLa/vSccNq9oS1ErUlkkK71gMcN34UG8I= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.31.0 h1:K0XaT3DwHAcV4nKLzcQvwAgSyisUghWoY20I7huthMk= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.31.0/go.mod h1:B5Ki776z/MBnVha1Nzwp5arlzBbE3+1jk+pGmaP5HME= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.31.0 h1:FFeLy03iVTXP6ffeN2iXrxfGsZGCjVx0/4KlizjyBwU= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.31.0/go.mod h1:TMu73/k1CP8nBUpDLc71Wj/Kf7ZS9FK5b53VapRsP9o= +go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.29.0 h1:WDdP9acbMYjbKIyJUhTvtzj601sVJOqgWdUxSdR/Ysc= +go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.29.0/go.mod h1:BLbf7zbNIONBLPwvFnwNHGj4zge8uTCM/UPIVW1Mq2I= +go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.21.0 h1:VhlEQAPp9R1ktYfrPk5SOryw1e9LDDTZCbIPFrho0ec= +go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.21.0/go.mod h1:kB3ufRbfU+CQ4MlUcqtW8Z7YEOBeK2DJ6CmR5rYYF3E= +go.opentelemetry.io/otel/metric v1.37.0 h1:mvwbQS5m0tbmqML4NqK+e3aDiO02vsf/WgbsdpcPoZE= +go.opentelemetry.io/otel/metric v1.37.0/go.mod h1:04wGrZurHYKOc+RKeye86GwKiTb9FKm1WHtO+4EVr2E= +go.opentelemetry.io/otel/sdk v1.37.0 h1:ItB0QUqnjesGRvNcmAcU0LyvkVyGJ2xftD29bWdDvKI= +go.opentelemetry.io/otel/sdk v1.37.0/go.mod h1:VredYzxUvuo2q3WRcDnKDjbdvmO0sCzOvVAiY+yUkAg= +go.opentelemetry.io/otel/sdk/metric v1.37.0 h1:90lI228XrB9jCMuSdA0673aubgRobVZFhbjxHHspCPc= +go.opentelemetry.io/otel/sdk/metric v1.37.0/go.mod h1:cNen4ZWfiD37l5NhS+Keb5RXVWZWpRE+9WyVCpbo5ps= +go.opentelemetry.io/otel/trace v1.37.0 h1:HLdcFNbRQBE2imdSEgm/kwqmQj1Or1l/7bW6mxVK7z4= +go.opentelemetry.io/otel/trace v1.37.0/go.mod h1:TlgrlQ+PtQO5XFerSPUYG0JSgGyryXewPGyayAWSBS0= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= -go.opentelemetry.io/proto/otlp v0.18.0 h1:W5hyXNComRa23tGpKwG+FRAc4rfF6ZUg1JReK+QHS80= -go.opentelemetry.io/proto/otlp v0.18.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= +go.opentelemetry.io/proto/otlp v1.3.1 h1:TrMUixzpM0yuc/znrFTP9MMRh8trP93mkCiDVeXrui0= +go.opentelemetry.io/proto/otlp v1.3.1/go.mod h1:0X1WI4de4ZsLrrJNLAQbFeLCm3T7yBkR0XqQ7niQU+8= go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= -go.uber.org/atomic v1.10.0 h1:9qC72Qh0+3MqyJbAn8YU5xVq1frD8bn3JtD2oXtafVQ= -go.uber.org/atomic v1.10.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= -go.uber.org/dig v1.15.0 h1:vq3YWr8zRj1eFGC7Gvf907hE0eRjPTZ1d3xHadD6liE= -go.uber.org/dig v1.15.0/go.mod h1:pKHs0wMynzL6brANhB2hLMro+zalv1osARTviTcqHLM= -go.uber.org/fx v1.18.2 h1:bUNI6oShr+OVFQeU8cDNbnN7VFsu+SsjHzUF51V/GAU= -go.uber.org/fx v1.18.2/go.mod h1:g0V1KMQ66zIRk8bLu3Ea5Jt2w/cHlOIp4wdRsgh0JaY= -go.uber.org/goleak v1.0.0/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= +go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE= +go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= +go.uber.org/dig v1.18.0 h1:imUL1UiY0Mg4bqbFfsRQO5G4CGRBec/ZujWTvSVp3pw= +go.uber.org/dig v1.18.0/go.mod h1:Us0rSJiThwCv2GteUN0Q7OKvU7n5J4dxZ9JKUXozFdE= +go.uber.org/fx v1.23.0 h1:lIr/gYWQGfTwGcSXWXu4vP5Ws6iqnNEIY+F/aFzCKTg= +go.uber.org/fx v1.23.0/go.mod h1:o/D9n+2mLP6v1EG+qsdT1O8wKopYAsqZasju97SDFCU= go.uber.org/goleak v1.1.11-0.20210813005559-691160354723/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= -go.uber.org/goleak v1.1.12 h1:gZAh5/EyT/HQwlpkCy6wTpqfH9H8Lz8zbm3dZh+OyzA= +go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= +go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= +go.uber.org/mock v0.5.0 h1:KAMbZvZPyBPWgD14IrIQ38QCyjwpvVVV6K/bHl1IwQU= +go.uber.org/mock v0.5.0/go.mod h1:ge71pBPLYDk7QIi1LupWxdAykm7KIEFchiOqd6z7qMM= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU= go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= -go.uber.org/multierr v1.7.0/go.mod h1:7EAYxJLBy9rStEaz58O2t4Uvip6FSURkq8/ppBp95ak= -go.uber.org/multierr v1.9.0 h1:7fIwc/ZtS0q++VgcfqFDxSBZVv/Xo49/SYnDFupUwlI= -go.uber.org/multierr v1.9.0/go.mod h1:X2jQV1h+kxSjClGpnseKVIxpmcjrj7MNnI0bnlfKTVQ= +go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= +go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= -go.uber.org/zap v1.14.1/go.mod h1:Mb2vm2krFEG5DV0W9qcHBYFtp/Wku1cvYaqPsS/WYfc= -go.uber.org/zap v1.15.0/go.mod h1:Mb2vm2krFEG5DV0W9qcHBYFtp/Wku1cvYaqPsS/WYfc= go.uber.org/zap v1.16.0/go.mod h1:MA8QOfq0BHJwdXa996Y4dYkAqRKB8/1K1QMMZVaNZjQ= go.uber.org/zap v1.19.1/go.mod h1:j3DNczoxDZroyBnOT1L/Q79cfUMGZxlv/9dzN7SM1rI= -go.uber.org/zap v1.24.0 h1:FiJd5l1UOLj0wCgbSE0rwwXHzEdAZS6hiiSnxJN/D60= -go.uber.org/zap v1.24.0/go.mod h1:2kMP+WWQ8aoFoedH3T2sq6iJ2yDWpHbP0f6MQbS9Gkg= +go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= +go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= go4.org v0.0.0-20180809161055-417644f6feb5/go.mod h1:MkTOUMDaeVYJUOUsaDXIhWPZYa1yOyC1qaOBpL57BhE= +golang.org/x/arch v0.7.0 h1:pskyeJh/3AmoQ8CPE95vxHLqp1G1GfGNXTmcl9NEKTc= +golang.org/x/arch v0.7.0/go.mod h1:FEVrYAQjsQXMVJ1nsMoVVXPZg6p2JE2mx8psSWTDQys= golang.org/x/build v0.0.0-20190111050920-041ab4dc3f9d/go.mod h1:OWs+y06UdEOHN4y+MfF/py+xQ/tYqIWW03b70/CG9Rw= -golang.org/x/crypto v0.0.0-20170930174604-9419663f5a44/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181030102418-4d3f4d9ffa16/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20190225124518-7f87c0fbb88b/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190313024323-a1f597ede03a/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190426145343-a29dc8fdc734/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190513172903-22d7a77e9e5f/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190530122614-20be4c3c3ed5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190618222545-ea8f1a30c443/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20200115085410-6d4e4cb37c7d/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20200221231518-2aa609cf4a9d/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20200311171314-f7b00557c8c4/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20200423211502-4bdfaf469ed5/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20200510223506-06a226fb4e37/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200602180216-279210d13fed/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= -golang.org/x/crypto v0.0.0-20210506145944-38f3c27a63bf/go.mod h1:P+XmwS30IXTQdn5tA2iutPOUgjI07+tq3H3K9MVA1s8= -golang.org/x/crypto v0.0.0-20210513164829-c07d793c2f9a/go.mod h1:P+XmwS30IXTQdn5tA2iutPOUgjI07+tq3H3K9MVA1s8= -golang.org/x/crypto v0.0.0-20210817164053-32db794688a5/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.0.0-20211108221036-ceb1ce70b4fa/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.4.0 h1:UVQgzMY87xqpKNgb+kDsll2Igd33HszWHFLmpaRMq/8= -golang.org/x/crypto v0.4.0/go.mod h1:3quD/ATkf6oY+rnes5c3ExXTbLc8mueNue5/DoinL80= -golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.8.0/go.mod h1:mRqEX+O9/h5TFCrQhkgjo2yKi0yYA+9ecGkdQoHrywE= +golang.org/x/crypto v0.12.0/go.mod h1:NF0Gs7EO5K4qLn+Ylc+fih8BSTeIjAP05siRnAh98yw= +golang.org/x/crypto v0.18.0/go.mod h1:R0j02AL6hcrfOiy9T4ZYp/rcWeMxM3L6QYxlOuEG1mg= +golang.org/x/crypto v0.41.0 h1:WKYxWedPGCTVVl5+WHSSrOBT0O8lx32+zxmHxijgXp4= +golang.org/x/crypto v0.41.0/go.mod h1:pO5AFd7FA68rFak7rOAGVuygIISepHftHnr8dr6+sUc= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20190125153040-c74c464bbbf2/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= @@ -1656,9 +1431,8 @@ golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u0 golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= golang.org/x/exp v0.0.0-20200331195152-e8c3332aa8e5/go.mod h1:4M0jN8W1tt0AVLNr8HDosyJCDCDuyL9N9+3m7wDWgKw= -golang.org/x/exp v0.0.0-20221217163422-3c43f8badb15 h1:5oN1Pz/eDhCpbMbLstvIPa0b/BEQo6g6nwV3pLjfM6w= -golang.org/x/exp v0.0.0-20221217163422-3c43f8badb15/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc= -golang.org/x/image v0.0.0-20180708004352-c73c2afc3b81/go.mod h1:ux5Hcp/YLpHSI86hEcLt0YII63i6oz57MZXIpbrjZUs= +golang.org/x/exp v0.0.0-20241217172543-b2144cdd0a67 h1:1UoZQm6f0P/ZO0w1Ri+f+ifG/gXhegadRdwBIXEFWDo= +golang.org/x/exp v0.0.0-20241217172543-b2144cdd0a67/go.mod h1:qj5a5QZpwLU2NLQudwIN5koi3beDhSAlJwa67PuM98c= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= @@ -1685,13 +1459,13 @@ golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.8.0 h1:LUYupSeNrTNCGzR/hVBk2NHZO4hXcVaW1k4Qx7rjPx8= +golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/net v0.0.0-20180719180050-a680a1efc54d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/mod v0.27.0 h1:kb+q2PyFnEADO2IEF935ehFUXlWiNjJWtRNgBLSfbxQ= +golang.org/x/mod v0.27.0/go.mod h1:rWI627Fq0DEoudcK+MBkNkCe0EetEaDSwJJkCcjpazc= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181011144130-49bb7cea24b1/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181029044818-c44066c5c816/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181106065722-10aee1819953/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -1709,13 +1483,11 @@ golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= -golang.org/x/net v0.0.0-20190611141213-3f473d35a33a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= @@ -1731,6 +1503,7 @@ golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/ golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200813134508-3edf25e44fcc/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= @@ -1741,17 +1514,22 @@ golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.0.0-20210423184538-5f58ad60dda6/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20210726213435-c6fcb2dbf985/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220325170049-de3da57026de/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220412020605-290c469a71a5/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.8.0 h1:Zrh2ngAOFYneWTAIAPethzeaQLuHwhuBkuV6ZiRnUaQ= -golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= +golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= +golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns= +golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= +golang.org/x/net v0.14.0/go.mod h1:PpSgVXXLK0OxS0F31C1/tv6XNguvCrnXIDrFMspZIUI= +golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY= +golang.org/x/net v0.43.0 h1:lat02VYK2j4aLzMzecihNvTlJNQUq316m2Mr9rnM6YE= +golang.org/x/net v0.43.0/go.mod h1:vhO1fvI4dGsIjh73sWfUVjj3N7CA9WkKJNQm2svM6Jg= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20181017192945-9dcd33a902f4/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= @@ -1773,8 +1551,8 @@ golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= golang.org/x/oauth2 v0.0.0-20220309155454-6242fa91716a/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= golang.org/x/oauth2 v0.0.0-20220411215720-9780585627b5/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= -golang.org/x/oauth2 v0.6.0 h1:Lh8GPgSKBfWSwFvtuWOfeI3aAAnbXTSutYxJiOJFgIw= -golang.org/x/oauth2 v0.6.0/go.mod h1:ycmewcwgD4Rpr3eZJLSB4Kyyljb3qDh40vJ8STE5HKw= +golang.org/x/oauth2 v0.30.0 h1:dnDm7JmhM45NNpd8FDDeLhK6FwqbOf4MLCM9zb1BOHI= +golang.org/x/oauth2 v0.30.0/go.mod h1:B++QgG3ZKulg6sRPGD/mqlHQs5rB3Ml9erfeDY7xKlU= golang.org/x/perf v0.0.0-20180704124530-6e6d33e29852/go.mod h1:JLpeXjPJfIyPr5TlbXLkXWLhP8nz10XfvxElABhCtcw= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -1787,8 +1565,10 @@ golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o= +golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.16.0 h1:ycBJEhp9p4vXvUZNszeOq0kGTPghopOL8q0fq3vstxw= +golang.org/x/sync v0.16.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= golang.org/x/sys v0.0.0-20180810173357-98c5dad5d1a0/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -1801,19 +1581,14 @@ golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5h golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190219092855-153ac476189d/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190228124157-a34e9553db1e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190316082340-a2f829d7f35f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190405154228-4b34438f7a67/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190526052359-791d8a0f4d09/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190610200419-93c9922d18ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1821,7 +1596,6 @@ golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1850,6 +1624,7 @@ golang.org/x/sys v0.0.0-20200602225109-6fdc65e7d980/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200814200057-3d37ad5750ed/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1857,19 +1632,14 @@ golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210303074136-134d130e1a04/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210309074719-68d13333faf2/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210317225723-c4fcb01b228e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210426080607-c94f62235c83/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210603125802-9665404d3644/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -1881,10 +1651,8 @@ golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20210816074244-15123e1e1f71/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210910150752-751e447fb3d0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211025112917-711f33c9992c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211124211545-fe61309f8881/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211210111614-af8b64212486/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -1895,16 +1663,29 @@ golang.org/x/sys v0.0.0-20220227234510-4e6760a101f9/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220328115105-d36c6a25d886/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220502124256-b6088ccd6cba/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220704084225-05e143d24a9e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.6.0 h1:MVltZSvRTcU2ljQOhs94SXPftV6DCNnZViHeQps87pQ= +golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= +golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.9.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.14.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.35.0 h1:vz1N37gP5bs89s7He8XuIYXpyY0+QlsKmzipCbUtyxI= +golang.org/x/sys v0.35.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.6.0 h1:clScbb1cHjoCkyRbWwBEUZ5H/tIFu5TAXIqaZD0Gcjw= -golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U= +golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= +golang.org/x/term v0.7.0/go.mod h1:P32HKFT3hSsZrRxla30E9HqToFYAQPCMs/zFMBUFqPY= +golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= +golang.org/x/term v0.11.0/go.mod h1:zC9APTIj3jG3FdV/Ons+XE1riIZXG4aZ4GTHiPZJPIU= +golang.org/x/term v0.16.0/go.mod h1:yn7UURbUtPyrVJPGPq404EukNFxcm/foM+bV/bfcDsY= +golang.org/x/term v0.34.0 h1:O/2T7POpk0ZZ7MAzMeWFSg6S5IpWd/RXDlM9hgM3DR4= +golang.org/x/term v0.34.0/go.mod h1:5jC53AEywhIVebHgPVeg0mj8OD3VO9OzclacVrqpaAw= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -1914,23 +1695,25 @@ golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= -golang.org/x/text v0.8.0 h1:57P1ETyNKtuIjB4SRd15iJxuhj8Gc416Y78H3qgMh68= -golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= +golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= +golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= +golang.org/x/text v0.12.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= +golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= +golang.org/x/text v0.28.0 h1:rhazDwis8INMIwQ4tpjLDzUhx6RlXqZNPEM0huQojng= +golang.org/x/text v0.28.0/go.mod h1:U8nCwOR8jO/marOQ0QbDiOngZVEBB7MAiitBuMjXiNU= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac h1:7zkz7BUtwNFFqcowJ+RIgu2MaV/MapERkDIy+mwPyjs= -golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.12.0 h1:ScB/8o8olJvc+CQPWrK3fPZNfh7qgwCrY0zJmoEQLSE= +golang.org/x/time v0.12.0/go.mod h1:CDIdPxbZBQxdj6cxyCIdrNogrJKMJ7pr37NYpMcMDSg= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181030000716-a0a13e073c7b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20181130052023-1c3d964395ce/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190206041539-40960b6deb8e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= @@ -1943,18 +1726,15 @@ golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgw golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20190828213141-aed303cbaa74/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191216052735-49a3e744a425/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200103221440-774c71fcf114/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= @@ -1990,22 +1770,19 @@ golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.6-0.20210726203631-07bc1bf47fb2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.6.0 h1:BOw41kyTf3PuCW1pVQf8+Cyg8pMlkYB1oo9iJ6D/lKM= +golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= +golang.org/x/tools v0.36.0 h1:kWS0uv/zsvHEle1LbV5LE8QujrxB3wfQyxHfhOk0Qkg= +golang.org/x/tools v0.36.0/go.mod h1:WBDiHKJK8YgLHlcQPYQzNCkUxUypCaa5ZegCVutKm+s= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20220411194840-2f41105eb62f/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 h1:H2TDz8ibqkAF6YGhCdN3jS9O0/s90v0rJh3X/OLHEUk= -golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= -gonum.org/v1/gonum v0.0.0-20180816165407-929014505bf4/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo= -gonum.org/v1/gonum v0.8.2 h1:CCXrcPKiGGotvnN6jfUsKk4rRqm7q09/YbKb5xCEvtM= -gonum.org/v1/gonum v0.8.2/go.mod h1:oe/vMfY3deqTw+1EZJhuvEW2iwGF1bW9wwu7XCu0+v0= -gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0 h1:OE9mWmgKkjJyEmDAAtGMPjXu+YNeGvK9VTSHY6+Qihc= -gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw= -gonum.org/v1/plot v0.0.0-20190515093506-e2840ee46a6b/go.mod h1:Wt8AAjI+ypCyYX3nZBvf6cAIx93T+c/OS2HFAYskSZc= +golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da h1:noIWHXmPHxILtqtCOPIhSt0ABwskkZKjD3bXGnZGpNY= +golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da/go.mod h1:NDW/Ps6MPRej6fsCIbMTohpP40sJ/P/vI1MoTEGwX90= +gonum.org/v1/gonum v0.16.0 h1:5+ul4Swaf3ESvrOnidPp4GZbzf0mxVQpDCYUQE7OJfk= +gonum.org/v1/gonum v0.16.0/go.mod h1:fef3am4MQ93R2HHpKnLk4/Tbh/s0+wqD5nfa6Pnwy4E= google.golang.org/api v0.0.0-20180910000450-7ca32eb868bf/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= google.golang.org/api v0.0.0-20181030000543-1d582fd0359e/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= google.golang.org/api v0.1.0/go.mod h1:UGEZY7KEX120AnNLIHFMKIo4obdJhkp2tPbaPlQx13Y= @@ -2047,8 +1824,8 @@ google.golang.org/api v0.71.0/go.mod h1:4PyU6e6JogV1f9eA4voyrTY2batOLdgZ5qZ5HOCc google.golang.org/api v0.74.0/go.mod h1:ZpfMZOVRMywNyvJFeqL9HRWBgAuRfSjJFpe9QtRRyDs= google.golang.org/api v0.75.0/go.mod h1:pU9QmyHLnzlpar1Mjt4IbapUCy8J+6HD6GeELN69ljA= google.golang.org/api v0.78.0/go.mod h1:1Sg78yoMLOhlQTeF+ARBoytAcH1NNyyl390YMy6rKmw= -google.golang.org/api v0.114.0 h1:1xQPji6cO2E2vLiI+C/XiFAnsn1WV3mjaEwGLhi3grE= -google.golang.org/api v0.114.0/go.mod h1:ifYI2ZsFK6/uGddGfAD5BMxlnkBqCmqHSDUVi45N5Yg= +google.golang.org/api v0.247.0 h1:tSd/e0QrUlLsrwMKmkbQhYVa109qIintOls2Wh6bngc= +google.golang.org/api v0.247.0/go.mod h1:r1qZOPmxXffXg6xS5uhx16Fa/UFY8QU/K4bfKrnvovM= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.3.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= @@ -2057,8 +1834,9 @@ google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7 google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.8 h1:IhEN5q69dyKagZPYMSdIjS2HqprW324FRQZJcGqPAsM= +google.golang.org/appengine v1.6.8/go.mod h1:1jJ3jBArFh5pcgW8gCtRJnepW8FzD1V44FJffLiz/Ds= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20180831171423-11092d34479b/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20181029155118-b69ba1387ce2/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= @@ -2143,8 +1921,14 @@ google.golang.org/genproto v0.0.0-20220421151946-72621c1f0bd3/go.mod h1:8w6bsBMX google.golang.org/genproto v0.0.0-20220429170224-98d788798c3e/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= google.golang.org/genproto v0.0.0-20220505152158-f39f71e6c8f3/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= google.golang.org/genproto v0.0.0-20220518221133-4f43b3371335/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= -google.golang.org/genproto v0.0.0-20230306155012-7f2fa6fef1f4 h1:DdoeryqhaXp1LtT/emMP1BRJPHHKFi5akj/nbx/zNTA= -google.golang.org/genproto v0.0.0-20230306155012-7f2fa6fef1f4/go.mod h1:NWraEVixdDnqcqQ30jipen1STv2r/n24Wb7twVTGR4s= +google.golang.org/genproto v0.0.0-20250603155806-513f23925822 h1:rHWScKit0gvAPuOnu87KpaYtjK5zBMLcULh7gxkCXu4= +google.golang.org/genproto v0.0.0-20250603155806-513f23925822/go.mod h1:HubltRL7rMh0LfnQPkMH4NPDFEWp0jw3vixw7jEM53s= +google.golang.org/genproto/googleapis/api v0.0.0-20250707201910-8d1bb00bc6a7 h1:FiusG7LWj+4byqhbvmB+Q93B/mOxJLN2DTozDuZm4EU= +google.golang.org/genproto/googleapis/api v0.0.0-20250707201910-8d1bb00bc6a7/go.mod h1:kXqgZtrWaf6qS3jZOCnCH7WYfrvFjkC51bM8fz3RsCA= +google.golang.org/genproto/googleapis/bytestream v0.0.0-20250804133106-a7a43d27e69b h1:YzmLjVBzUKrr0zPM1KkGPEicd3WHSccw1k9RivnvngU= +google.golang.org/genproto/googleapis/bytestream v0.0.0-20250804133106-a7a43d27e69b/go.mod h1:h6yxum/C2qRb4txaZRLDHK8RyS0H/o2oEDeKY4onY/Y= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250804133106-a7a43d27e69b h1:zPKJod4w6F1+nRGDI9ubnXYhU9NSWoFAijkHkUXeTK8= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250804133106-a7a43d27e69b/go.mod h1:qQ0YXyHHx3XkvlzUtpXDkS29lDSafHMZBAZDc03LQ3A= google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.16.0/go.mod h1:0JHn/cJsOMiMfNA9+DeHDlAU7KAAB5GDlYFpa9MZMio= google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= @@ -2161,7 +1945,6 @@ google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8 google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= -google.golang.org/grpc v1.28.1/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= @@ -2180,12 +1963,11 @@ google.golang.org/grpc v1.39.0/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnD google.golang.org/grpc v1.39.1/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= google.golang.org/grpc v1.40.1/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= -google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= google.golang.org/grpc v1.44.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ= google.golang.org/grpc v1.46.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= -google.golang.org/grpc v1.53.0 h1:LAv2ds7cmFV/XTS3XG1NneeENYrXGmorPxsBbptIjNc= -google.golang.org/grpc v1.53.0/go.mod h1:OnIrk0ipVdj4N5d9IUoFUx72/VlD7+jUsHwZgwSMQpw= +google.golang.org/grpc v1.75.1 h1:/ODCNEuf9VghjgO3rqLcfg8fiOP0nSluljWFlDxELLI= +google.golang.org/grpc v1.75.1/go.mod h1:JtPAzKiq4v1xcAB2hydNlWI2RnF85XXcV0mhKXr2ecQ= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.2.0 h1:TLkBREm4nIsEcexnCjgQd5GQWaHcqMzwQV0TX9pq8S0= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.2.0/go.mod h1:DNq5QpG7LJqD2AamLZ7zvKE0DEpVl2BSEVjFycAAjRY= @@ -2203,8 +1985,8 @@ google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp0 google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= -google.golang.org/protobuf v1.30.0 h1:kPPoIgf3TsEvrm0PFe15JQ+570QVxYzEvvHqChK+cng= -google.golang.org/protobuf v1.30.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.36.9 h1:w2gp2mA27hUeUzj9Ex9FBjsBm40zfaDtEWow293U7Iw= +google.golang.org/protobuf v1.36.9/go.mod h1:fuxRtAxBytpl4zzqUh6/eyUujkJdNiuEkXntxiD/uRU= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= @@ -2218,15 +2000,11 @@ gopkg.in/gcfg.v1 v1.2.3/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o= gopkg.in/go-playground/assert.v1 v1.2.1/go.mod h1:9RXL0bg/zibRAgZUYszZSwO/z8Y/a8bDuhia5mkpMnE= gopkg.in/go-playground/validator.v9 v9.29.1/go.mod h1:+c9/zcJMFNgbLvly1L1V+PpxWdVbfP1avr/N00E2vyQ= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= -gopkg.in/ini.v1 v1.66.6 h1:LATuAqN/shcYAOkv3wl2L4rkaKqkcgTBQjOyYDvcPKI= -gopkg.in/ini.v1 v1.66.6/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= -gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce/go.mod h1:5AcXVHNjg+BDxry382+8OKon8SEWiKktQR07RKPsv1c= -gopkg.in/olebedev/go-duktape.v3 v3.0.0-20200316214253-d7b0ff38cac9/go.mod h1:uAJfkITjFhyEEuUfm7bsmCZRbW5WRq8s9EY8HZ6hCns= +gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA= +gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= -gopkg.in/src-d/go-cli.v0 v0.0.0-20181105080154-d492247bbc0d/go.mod h1:z+K8VcOYVYcSwSjGebuDL6176A1XskgbtNl64NSg+n8= -gopkg.in/src-d/go-log.v1 v1.0.1/go.mod h1:GN34hKP0g305ysm2/hctJ0Y8nWP3zxXXJ8GFabTyABE= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= -gopkg.in/urfave/cli.v1 v1.20.0/go.mod h1:vuBzUtMdQeixQj8LVd+/98pzhxNGQoyuPBlsXHOQNO0= gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI= gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= @@ -2253,15 +2031,14 @@ honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -lukechampine.com/blake3 v1.1.6/go.mod h1:tkKEOtDkNtklkXtLNEOGNq5tcV90tJiA1vAA12R78LA= -lukechampine.com/blake3 v1.1.7 h1:GgRMhmdsuK8+ii6UZFDL8Nb+VyMwadAgcJyfYHxG6n0= -lukechampine.com/blake3 v1.1.7/go.mod h1:tkKEOtDkNtklkXtLNEOGNq5tcV90tJiA1vAA12R78LA= -nhooyr.io/websocket v1.8.6 h1:s+C3xAMLwGmlI31Nyn/eAehUlZPwfYZu2JXM621Q5/k= +lukechampine.com/blake3 v1.4.1 h1:I3Smz7gso8w4/TunLKec6K2fn+kyKtDxr/xcQEN84Wg= +lukechampine.com/blake3 v1.4.1/go.mod h1:QFosUxmjB8mnrWFSNwKmvxHpfY72bmD2tQ0kBMM3kwo= nhooyr.io/websocket v1.8.6/go.mod h1:B70DZP8IakI65RVQ51MsWP/8jndNma26DVA/nFSCgW0= -pgregory.net/rapid v0.4.7 h1:MTNRktPuv5FNqOO151TM9mDTa+XHcX6ypYeISDVD14g= -pgregory.net/rapid v0.4.7/go.mod h1:UYpPVyjFHzYBGHIxLFoupi8vwk6rXNzRY9OMvVxFIOU= +nhooyr.io/websocket v1.8.7 h1:usjR2uOr/zjjkVMy0lW+PPohFok7PCow5sDjLgX4P4g= +nhooyr.io/websocket v1.8.7/go.mod h1:B70DZP8IakI65RVQ51MsWP/8jndNma26DVA/nFSCgW0= +pgregory.net/rapid v1.1.0 h1:CMa0sjHSru3puNx+J0MIAuiiEV4N0qj8/cMWGBBCsjw= +pgregory.net/rapid v1.1.0/go.mod h1:PY5XlDGj0+V1FCq0o192FdRhpKHGTRIWBgqjDBTrq04= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= -rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= diff --git a/insecure/.mockery.yaml b/insecure/.mockery.yaml new file mode 100644 index 00000000000..e48e549f352 --- /dev/null +++ b/insecure/.mockery.yaml @@ -0,0 +1,16 @@ +with-expecter: False +include-auto-generated: False +disable-func-mocks: True +dir: "{{.InterfaceDir}}/mock" +outpkg: "mock" +filename: "{{.InterfaceName | snakecase}}.go" +mockname: "{{.InterfaceName}}" +all: True + +# Suppress warnings +issue-845-fix: True +disable-version-string: True +resolve-type-alias: False + +packages: + github.com/onflow/flow-go/insecure: diff --git a/insecure/Makefile b/insecure/Makefile index 72a38cf4b4d..982676938f0 100644 --- a/insecure/Makefile +++ b/insecure/Makefile @@ -1,6 +1,10 @@ # Name of the cover profile COVER_PROFILE := cover.out +# By default, this will run all tests in all packages, but we have a way to override this in CI so that we can +# dynamically split up CI jobs into smaller jobs that can be run in parallel +GO_TEST_PACKAGES := ./... + # allows CI to specify whether to have race detection on / off ifeq ($(RACE_DETECTOR),1) RACE_FLAG := -race @@ -8,7 +12,24 @@ else RACE_FLAG := endif +# set `CRYPTO_FLAG` when building natively (not cross-compiling) +include ../crypto_adx_flag.mk + # runs all unit tests of the insecure module .PHONY: test test: - go test $(if $(VERBOSE),-v,) -coverprofile=$(COVER_PROFILE) $(RACE_FLAG) $(if $(JSON_OUTPUT),-json,) $(if $(NUM_RUNS),-count $(NUM_RUNS),) --tags relic ./... + CGO_CFLAGS=$(CRYPTO_FLAG) go test $(if $(VERBOSE),-v,) -coverprofile=$(COVER_PROFILE) $(RACE_FLAG) $(if $(JSON_OUTPUT),-json,) $(if $(NUM_RUNS),-count $(NUM_RUNS),) $(GO_TEST_PACKAGES) + +.PHONY: lint +lint: tidy + # revive -config revive.toml -exclude storage/ledger/trie ./... + ../tools/custom-gcl run -v + +# this ensures there is no unused dependency being added by accident +.PHONY: tidy +tidy: + go mod tidy -v + cd integration; go mod tidy -v + cd crypto; go mod tidy -v + cd insecure; go mod tidy -v + git diff --exit-code diff --git a/insecure/cmd/access/main.go b/insecure/cmd/access/main.go index 836f35cbf67..bd59326c769 100644 --- a/insecure/cmd/access/main.go +++ b/insecure/cmd/access/main.go @@ -1,6 +1,8 @@ package main import ( + "context" + nodebuilder "github.com/onflow/flow-go/cmd/access/node_builder" insecmd "github.com/onflow/flow-go/insecure/cmd" "github.com/onflow/flow-go/model/flow" @@ -31,5 +33,5 @@ func main() { builder.Logger.Fatal().Err(err).Send() } - node.Run() + node.Run(context.Background()) } diff --git a/insecure/cmd/corrupted_builder.go b/insecure/cmd/corrupted_builder.go index fc346c6528f..a4b26211c21 100644 --- a/insecure/cmd/corrupted_builder.go +++ b/insecure/cmd/corrupted_builder.go @@ -11,8 +11,11 @@ import ( "github.com/onflow/flow-go/insecure/corruptnet" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/module" + "github.com/onflow/flow-go/module/metrics" + "github.com/onflow/flow-go/network" "github.com/onflow/flow-go/network/p2p" - p2pconfig "github.com/onflow/flow-go/network/p2p/p2pbuilder/config" + p2pbuilderconfig "github.com/onflow/flow-go/network/p2p/builder/config" + "github.com/onflow/flow-go/network/p2p/connection" "github.com/onflow/flow-go/network/p2p/unicast/ratelimit" "github.com/onflow/flow-go/utils/logging" ) @@ -70,24 +73,24 @@ func (cnb *CorruptedNodeBuilder) enqueueNetworkingLayer() { myAddr = cnb.FlowNodeBuilder.BaseConfig.BindAddr } - uniCfg := &p2pconfig.UnicastConfig{ - StreamRetryInterval: cnb.UnicastCreateStreamRetryDelay, + uniCfg := &p2pbuilderconfig.UnicastConfig{ RateLimiterDistributor: cnb.UnicastRateLimiterDistributor, + Unicast: cnb.FlowConfig.NetworkConfig.Unicast, } - connGaterCfg := &p2pconfig.ConnectionGaterConfig{ + connGaterCfg := &p2pbuilderconfig.ConnectionGaterConfig{ InterceptPeerDialFilters: []p2p.PeerFilter{}, // disable connection gater onInterceptPeerDialFilters InterceptSecuredFilters: []p2p.PeerFilter{}, // disable connection gater onInterceptSecuredFilters } - peerManagerCfg := &p2pconfig.PeerManagerConfig{ - ConnectionPruning: cnb.NetworkConnectionPruning, - UpdateInterval: cnb.PeerUpdateInterval, + peerManagerCfg := &p2pbuilderconfig.PeerManagerConfig{ + ConnectionPruning: cnb.FlowConfig.NetworkConfig.NetworkConnectionPruning, + UpdateInterval: cnb.FlowConfig.NetworkConfig.PeerUpdateInterval, + ConnectorFactory: connection.DefaultLibp2pBackoffConnectorFactory(), } // create default libp2p factory if corrupt node should enable the topic validator - libP2PNodeFactory := corruptlibp2p.NewCorruptLibP2PNodeFactory( - cnb.Logger, + corruptLibp2pNode, err := corruptlibp2p.InitCorruptLibp2pNode(cnb.Logger, cnb.RootChainID, myAddr, cnb.NetworkKey, @@ -96,28 +99,28 @@ func (cnb *CorruptedNodeBuilder) enqueueNetworkingLayer() { cnb.Metrics.Network, cnb.Resolver, cnb.BaseConfig.NodeRole, - connGaterCfg, - // run peer manager with the specified interval and let it also prune connections + connGaterCfg, // run peer manager with the specified interval and let it also prune connections peerManagerCfg, uniCfg, - cnb.GossipSubConfig, + cnb.FlowConfig.NetworkConfig, + &p2p.DisallowListCacheConfig{ + MaxSize: cnb.FlowConfig.NetworkConfig.DisallowListNotificationCacheSize, + Metrics: metrics.DisallowListCacheMetricsFactory(cnb.HeroCacheMetricsFactory(), network.PrivateNetwork), + }, cnb.TopicValidatorDisabled, cnb.WithPubSubMessageSigning, - cnb.WithPubSubStrictSignatureVerification, - ) - - libp2pNode, err := libP2PNodeFactory() + cnb.WithPubSubStrictSignatureVerification) if err != nil { return nil, fmt.Errorf("failed to create libp2p node: %w", err) } - cnb.LibP2PNode = libp2pNode + cnb.LibP2PNode = corruptLibp2pNode cnb.Logger.Info(). Hex("node_id", logging.ID(cnb.NodeID)). Str("address", myAddr). Bool("topic_validator_disabled", cnb.TopicValidatorDisabled). Msg("corrupted libp2p node initialized") - return libp2pNode, nil + return corruptLibp2pNode, nil }) cnb.FlowNodeBuilder.OverrideComponent(cmd.NetworkComponent, func(node *cmd.NodeConfig) (module.ReadyDoneAware, error) { myAddr := cnb.FlowNodeBuilder.NodeConfig.Me.Address() @@ -142,21 +145,14 @@ func (cnb *CorruptedNodeBuilder) enqueueNetworkingLayer() { // initializes corruptible network that acts as a wrapper around the original flow network of the node, hence // allowing a remote attacker to control the ingress and egress traffic of the node. - corruptibleNetwork, err := corruptnet.NewCorruptNetwork( - cnb.Logger, - cnb.RootChainID, - address, - cnb.Me, - cnb.CodecFactory(), - flowNetwork, - ccf) + corruptibleNetwork, err := corruptnet.NewCorruptNetwork(cnb.Logger, cnb.RootChainID, address, cnb.Me, cnb.CodecFactory(), flowNetwork, ccf) if err != nil { return nil, fmt.Errorf("could not create corruptible network: %w", err) } cnb.Logger.Info().Hex("node_id", logging.ID(cnb.NodeID)).Str("address", address).Msg("corruptible network initiated") // override the original flow network with the corruptible network. - cnb.Network = corruptibleNetwork + cnb.EngineRegistry = corruptibleNetwork return corruptibleNetwork, nil }) diff --git a/insecure/cmd/execution/main.go b/insecure/cmd/execution/main.go index 1a998fd8351..0bba64f5364 100644 --- a/insecure/cmd/execution/main.go +++ b/insecure/cmd/execution/main.go @@ -1,6 +1,8 @@ package main import ( + "context" + "github.com/onflow/flow-go/cmd" insecmd "github.com/onflow/flow-go/insecure/cmd" "github.com/onflow/flow-go/model/flow" @@ -23,5 +25,5 @@ func main() { if err != nil { corruptedExecutionBuilder.Logger.Fatal().Err(err).Send() } - node.Run() + node.Run(context.Background()) } diff --git a/insecure/cmd/mods_override.sh b/insecure/cmd/mods_override.sh index 6f6b4d4a6a7..c78d39a0daa 100755 --- a/insecure/cmd/mods_override.sh +++ b/insecure/cmd/mods_override.sh @@ -5,8 +5,5 @@ mv insecure/go.mod insecure/go2.mod cp ./go.mod ./go2.mod cp ./go.sum ./go2.sum -# inject forked libp2p-pubsub into main module to allow building corrupt Docker images -echo "require github.com/yhassanzadeh13/go-libp2p-pubsub v0.6.2-0.20221208234712-b44d9133e4ee" >> ./go.mod - # update go.sum since added new dependency go mod tidy diff --git a/insecure/cmd/verification/main.go b/insecure/cmd/verification/main.go index 91c876dde5a..1a6db4adc51 100644 --- a/insecure/cmd/verification/main.go +++ b/insecure/cmd/verification/main.go @@ -1,6 +1,8 @@ package main import ( + "context" + "github.com/onflow/flow-go/cmd" insecmd "github.com/onflow/flow-go/insecure/cmd" "github.com/onflow/flow-go/model/flow" @@ -23,5 +25,5 @@ func main() { if err != nil { corruptedVerificationBuilder.Logger.Fatal().Err(err).Send() } - node.Run() + node.Run(context.Background()) } diff --git a/insecure/corruptlibp2p/fixtures.go b/insecure/corruptlibp2p/fixtures.go index 599d1bcefe1..e6cb9fac9a6 100644 --- a/insecure/corruptlibp2p/fixtures.go +++ b/insecure/corruptlibp2p/fixtures.go @@ -1,108 +1,14 @@ package corruptlibp2p import ( + corrupt "github.com/libp2p/go-libp2p-pubsub" pubsub "github.com/libp2p/go-libp2p-pubsub" pubsubpb "github.com/libp2p/go-libp2p-pubsub/pb" "github.com/libp2p/go-libp2p/core/peer" - corrupt "github.com/yhassanzadeh13/go-libp2p-pubsub" "github.com/onflow/flow-go/network/p2p" - "github.com/onflow/flow-go/utils/unittest" ) -const ( - // topicIDFixtureLen is the length of the topic ID fixture for testing. - topicIDFixtureLen = 10 - // messageIDFixtureLen is the length of the message ID fixture for testing. - messageIDFixtureLen = 10 -) - -type GossipSubCtrlOption func(*pubsubpb.ControlMessage) - -// GossipSubCtrlFixture returns a ControlMessage with the given options. -func GossipSubCtrlFixture(opts ...GossipSubCtrlOption) *pubsubpb.ControlMessage { - msg := &pubsubpb.ControlMessage{} - for _, opt := range opts { - opt(msg) - } - return msg -} - -// WithIHave adds iHave control messages of the given size and number to the control message. -func WithIHave(msgCount int, msgSize int) GossipSubCtrlOption { - return func(msg *pubsubpb.ControlMessage) { - iHaves := make([]*pubsubpb.ControlIHave, msgCount) - for i := 0; i < msgCount; i++ { - topicId := GossipSubTopicIdFixture() - iHaves[i] = &pubsubpb.ControlIHave{ - TopicID: &topicId, - MessageIDs: gossipSubMessageIdsFixture(msgSize), - } - } - msg.Ihave = iHaves - } -} - -// WithIWant adds iWant control messages of the given size and number to the control message. -func WithIWant(msgCount int, msgSize int) GossipSubCtrlOption { - return func(msg *pubsubpb.ControlMessage) { - iWants := make([]*pubsubpb.ControlIWant, msgCount) - for i := 0; i < msgCount; i++ { - iWants[i] = &pubsubpb.ControlIWant{ - MessageIDs: gossipSubMessageIdsFixture(msgSize), - } - } - msg.Iwant = iWants - } -} - -// WithGraft adds GRAFT control messages with given topicID to the control message. -func WithGraft(msgCount int, topicId string) GossipSubCtrlOption { - return func(msg *pubsubpb.ControlMessage) { - grafts := make([]*pubsubpb.ControlGraft, msgCount) - for i := 0; i < msgCount; i++ { - grafts[i] = &pubsubpb.ControlGraft{ - TopicID: &topicId, - } - } - msg.Graft = grafts - } -} - -// WithPrune adds PRUNE control messages with given topicID to the control message. -func WithPrune(msgCount int, topicId string) GossipSubCtrlOption { - return func(msg *pubsubpb.ControlMessage) { - prunes := make([]*pubsubpb.ControlPrune, msgCount) - for i := 0; i < msgCount; i++ { - prunes[i] = &pubsubpb.ControlPrune{ - TopicID: &topicId, - } - } - msg.Prune = prunes - } -} - -// gossipSubMessageIdFixture returns a random gossipSub message ID. -func gossipSubMessageIdFixture() string { - // TODO: messageID length should be a parameter. - return unittest.GenerateRandomStringWithLen(messageIDFixtureLen) -} - -// GossipSubTopicIdFixture returns a random gossipSub topic ID. -func GossipSubTopicIdFixture() string { - // TODO: topicID length should be a parameter. - return unittest.GenerateRandomStringWithLen(topicIDFixtureLen) -} - -// gossipSubMessageIdsFixture returns a slice of random gossipSub message IDs of the given size. -func gossipSubMessageIdsFixture(count int) []string { - msgIds := make([]string, count) - for i := 0; i < count; i++ { - msgIds[i] = gossipSubMessageIdFixture() - } - return msgIds -} - // CorruptInspectorFunc wraps a normal RPC inspector with a corrupt inspector func by translating corrupt.RPC -> pubsubpb.RPC // before calling Inspect func. func CorruptInspectorFunc(inspector p2p.GossipSubRPCInspector) func(id peer.ID, rpc *corrupt.RPC) error { diff --git a/insecure/corruptlibp2p/gossipsub_spammer.go b/insecure/corruptlibp2p/gossipsub_spammer.go index 08b9821409f..94e8c8557c8 100644 --- a/insecure/corruptlibp2p/gossipsub_spammer.go +++ b/insecure/corruptlibp2p/gossipsub_spammer.go @@ -5,13 +5,14 @@ import ( "testing" "time" + corrupt "github.com/libp2p/go-libp2p-pubsub" pb "github.com/libp2p/go-libp2p-pubsub/pb" "github.com/libp2p/go-libp2p/core/peer" "github.com/stretchr/testify/require" - corrupt "github.com/yhassanzadeh13/go-libp2p-pubsub" "github.com/onflow/flow-go/insecure/internal" "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module" "github.com/onflow/flow-go/network/p2p" p2ptest "github.com/onflow/flow-go/network/p2p/test" ) @@ -24,9 +25,37 @@ type GossipSubRouterSpammer struct { SpammerId flow.Identity } -// NewGossipSubRouterSpammer is the main method tests call for spamming attacks. -func NewGossipSubRouterSpammer(t *testing.T, sporkId flow.Identifier, role flow.Role) *GossipSubRouterSpammer { - spammerNode, spammerId, router := createSpammerNode(t, sporkId, role) +// NewGossipSubRouterSpammer creates a new GossipSubRouterSpammer. +// Args: +// - t: the test object. +// - sporkId: the spork node's ID. +// - role: the role of the spork node. +// - provider: the identity provider. +// Returns: +// - the GossipSubRouterSpammer. +func NewGossipSubRouterSpammer(t *testing.T, sporkId flow.Identifier, role flow.Role, provider module.IdentityProvider) *GossipSubRouterSpammer { + return NewGossipSubRouterSpammerWithRpcInspector(t, sporkId, role, provider, func(id peer.ID, rpc *corrupt.RPC) error { + return nil // no-op + }) +} + +// NewGossipSubRouterSpammerWithRpcInspector creates a new GossipSubRouterSpammer with a custom RPC inspector. +// The RPC inspector is called before each incoming RPC is processed by the router. +// If the inspector returns an error, the RPC is dropped. +// Args: +// - t: the test object. +// - sporkId: the spork node's ID. +// - role: the role of the spork node. +// - provider: the identity provider. +// - inspector: the RPC inspector. +// Returns: +// - the GossipSubRouterSpammer. +func NewGossipSubRouterSpammerWithRpcInspector(t *testing.T, + sporkId flow.Identifier, + role flow.Role, + provider module.IdentityProvider, + inspector func(id peer.ID, rpc *corrupt.RPC) error) *GossipSubRouterSpammer { + spammerNode, spammerId, router := newSpammerNodeWithRpcInspector(t, sporkId, role, provider, inspector) return &GossipSubRouterSpammer{ router: router, SpammerNode: spammerNode, @@ -36,18 +65,18 @@ func NewGossipSubRouterSpammer(t *testing.T, sporkId flow.Identifier, role flow. // SpamControlMessage spams the victim with junk control messages. // ctlMessages is the list of spam messages to send to the victim node. -func (s *GossipSubRouterSpammer) SpamControlMessage(t *testing.T, victim p2p.LibP2PNode, ctlMessages []pb.ControlMessage) { +func (s *GossipSubRouterSpammer) SpamControlMessage(t *testing.T, victim p2p.LibP2PNode, ctlMessages []pb.ControlMessage, msgs ...*pb.Message) { for _, ctlMessage := range ctlMessages { - require.True(t, s.router.Get().SendControl(victim.Host().ID(), &ctlMessage)) + s.router.Get().SendControl(victim.ID(), &ctlMessage, msgs...) } } // GenerateCtlMessages generates control messages before they are sent so the test can prepare // to expect receiving them before they are sent by the spammer. -func (s *GossipSubRouterSpammer) GenerateCtlMessages(msgCount int, opts ...GossipSubCtrlOption) []pb.ControlMessage { +func (s *GossipSubRouterSpammer) GenerateCtlMessages(msgCount int, opts ...p2ptest.GossipSubCtrlOption) []pb.ControlMessage { var ctlMgs []pb.ControlMessage for i := 0; i < msgCount; i++ { - ctlMsg := GossipSubCtrlFixture(opts...) + ctlMsg := p2ptest.GossipSubCtrlFixture(opts...) ctlMgs = append(ctlMgs, *ctlMsg) } return ctlMgs @@ -63,21 +92,42 @@ func (s *GossipSubRouterSpammer) Start(t *testing.T) { s.router.set(s.router.Get()) } -func createSpammerNode(t *testing.T, sporkId flow.Identifier, role flow.Role) (p2p.LibP2PNode, flow.Identity, *atomicRouter) { +// newSpammerNodeWithRpcInspector creates a new spammer node, which is capable of sending spam control and actual messages to other nodes. +// It also creates a new atomic router that allows us to set the router to a new instance of the corrupt router. +// Args: +// - sporkId: the spork id of the spammer node. +// - role: the role of the spammer node. +// - provider: the identity provider of the spammer node. +// - inspector: the inspector function that is called when a message is received by the spammer node. +// Returns: +// - p2p.LibP2PNode: the spammer node. +// - flow.Identity: the identity of the spammer node. +// - *atomicRouter: the atomic router that allows us to set the router to a new instance of the corrupt router. +func newSpammerNodeWithRpcInspector( + t *testing.T, + sporkId flow.Identifier, + role flow.Role, + provider module.IdentityProvider, + inspector func(id peer.ID, rpc *corrupt.RPC) error) (p2p.LibP2PNode, flow.Identity, *atomicRouter) { router := newAtomicRouter() + var opts []p2ptest.NodeFixtureParameterOption + opts = append(opts, + p2ptest.WithRole(role), + p2ptest.WithValidateQueueSize(10_000), // set a high limit to avoid dropping messages + internal.WithCorruptGossipSub( + CorruptGossipSubFactory(func(r *corrupt.GossipSubRouter) { + require.NotNil(t, r) + router.set(r) + }), + CorruptGossipSubConfigFactoryWithInspector(inspector), + ), + ) spammerNode, spammerId := p2ptest.NodeFixture( t, sporkId, t.Name(), - p2ptest.WithRole(role), - internal.WithCorruptGossipSub(CorruptGossipSubFactory(func(r *corrupt.GossipSubRouter) { - require.NotNil(t, r) - router.set(r) - }), - CorruptGossipSubConfigFactoryWithInspector(func(id peer.ID, rpc *corrupt.RPC) error { - // here we can inspect the incoming RPC message to the spammer node - return nil - })), + provider, + opts..., ) return spammerNode, spammerId, router } diff --git a/insecure/corruptlibp2p/libp2p_node_factory.go b/insecure/corruptlibp2p/libp2p_node_factory.go index 80c7ca4bdfe..4b63968e680 100644 --- a/insecure/corruptlibp2p/libp2p_node_factory.go +++ b/insecure/corruptlibp2p/libp2p_node_factory.go @@ -4,80 +4,140 @@ import ( "context" "fmt" + corrupt "github.com/libp2p/go-libp2p-pubsub" "github.com/libp2p/go-libp2p/core/host" "github.com/libp2p/go-libp2p/core/peer" madns "github.com/multiformats/go-multiaddr-dns" "github.com/rs/zerolog" - corrupt "github.com/yhassanzadeh13/go-libp2p-pubsub" - fcrypto "github.com/onflow/flow-go/crypto" + fcrypto "github.com/onflow/crypto" + + "github.com/onflow/flow-go/cmd" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/module" "github.com/onflow/flow-go/module/metrics" + "github.com/onflow/flow-go/network" + "github.com/onflow/flow-go/network/codec/cbor" + "github.com/onflow/flow-go/network/netconf" "github.com/onflow/flow-go/network/p2p" - "github.com/onflow/flow-go/network/p2p/p2pbuilder" - p2pconfig "github.com/onflow/flow-go/network/p2p/p2pbuilder/config" + p2pbuilder "github.com/onflow/flow-go/network/p2p/builder" + p2pbuilderconfig "github.com/onflow/flow-go/network/p2p/builder/config" + p2pnode "github.com/onflow/flow-go/network/p2p/node" ) -// NewCorruptLibP2PNodeFactory wrapper around the original DefaultLibP2PNodeFactory. Nodes returned from this factory func will be corrupted libp2p nodes. -func NewCorruptLibP2PNodeFactory( +// InitCorruptLibp2pNode initializes and returns a corrupt libp2p node that should only be used for BFT testing in +// the BFT testnet. This node is corrupt in the sense that it uses a forked version of the go-libp2p-pubsub library and +// is not compatible with the go-libp2p-pubsub library used by the other nodes in the network. This node should only be +// used for testing purposes. +// Args: +// - log: logger +// - chainID: chain id of the network this node is being used for (should be BFT testnet) +// - address: address of the node in the form of /ip4/ ... /tcp/ ... /p2p/ ... (see libp2p documentation for more info) +// - flowKey: private key of the node used for signing messages and establishing secure connections +// - sporkId: spork id of the network this node is being used for. +// - idProvider: identity provider used for translating peer ids to flow ids. +// - metricsCfg: metrics configuration used for initializing the metrics collector +// - resolver: resolver used for resolving multiaddresses to ip addresses +// - role: role of the node (a valid Flow role). +// - connGaterCfg: connection gater configuration used for initializing the connection gater +// - peerManagerCfg: peer manager configuration used for initializing the peer manager +// - uniCfg: unicast configuration used for initializing the unicast +// - gossipSubCfg: gossipsub configuration used for initializing the gossipsub +// - topicValidatorDisabled: whether or not topic validator is disabled +// - withMessageSigning: whether or not message signing is enabled +// - withStrictSignatureVerification: whether or not strict signature verification is enabled +// Returns: +// - p2p.LibP2PNode: initialized corrupt libp2p node +// - error: error if any. Any error returned from this function is fatal. +func InitCorruptLibp2pNode( log zerolog.Logger, chainID flow.ChainID, address string, flowKey fcrypto.PrivateKey, sporkId flow.Identifier, idProvider module.IdentityProvider, - metricsCfg module.LibP2PMetrics, + metricsCfg module.NetworkMetrics, resolver madns.BasicResolver, role string, - connGaterCfg *p2pconfig.ConnectionGaterConfig, - peerManagerCfg *p2pconfig.PeerManagerConfig, - uniCfg *p2pconfig.UnicastConfig, - gossipSubCfg *p2pbuilder.GossipSubConfig, + connGaterCfg *p2pbuilderconfig.ConnectionGaterConfig, + peerManagerCfg *p2pbuilderconfig.PeerManagerConfig, + uniCfg *p2pbuilderconfig.UnicastConfig, + netConfig *netconf.Config, + disallowListCacheCfg *p2p.DisallowListCacheConfig, topicValidatorDisabled, withMessageSigning, withStrictSignatureVerification bool, -) p2p.LibP2PFactoryFunc { - return func() (p2p.LibP2PNode, error) { - if chainID != flow.BftTestnet { - panic("illegal chain id for using corrupt libp2p node") - } +) (p2p.LibP2PNode, error) { + if chainID != flow.BftTestnet { + panic("illegal chain id for using corrupt libp2p node") + } - builder, err := p2pbuilder.DefaultNodeBuilder( - log, - address, - flowKey, - sporkId, - idProvider, - &p2pconfig.MetricsConfig{ - HeroCacheFactory: metrics.NewNoopHeroCacheMetricsFactory(), - Metrics: metricsCfg, - }, - resolver, - role, - connGaterCfg, - peerManagerCfg, - gossipSubCfg, - p2pbuilder.DefaultResourceManagerConfig(), - uniCfg) + metCfg := &p2pbuilderconfig.MetricsConfig{ + HeroCacheFactory: metrics.NewNoopHeroCacheMetricsFactory(), + Metrics: metricsCfg, + } - if err != nil { - return nil, fmt.Errorf("could not create corrupt libp2p node builder: %w", err) - } - if topicValidatorDisabled { - builder.SetCreateNode(NewCorruptLibP2PNode) - } + dhtActivationStatus, err := cmd.DhtSystemActivationStatus(role, true) + if err != nil { + return nil, fmt.Errorf("could not get dht system activation status: %w", err) + } + builder, err := p2pbuilder.DefaultNodeBuilder( + log, + address, + network.PrivateNetwork, + flowKey, + sporkId, + idProvider, + metCfg, + resolver, + role, + connGaterCfg, + peerManagerCfg, + &netConfig.GossipSub, + &netConfig.ResourceManager, + uniCfg, + &netConfig.ConnectionManager, + disallowListCacheCfg, + dhtActivationStatus) - overrideWithCorruptGossipSub(builder, WithMessageSigning(withMessageSigning), WithStrictSignatureVerification(withStrictSignatureVerification)) - return builder.Build() + if err != nil { + return nil, fmt.Errorf("could not create corrupt libp2p node builder: %w", err) } + if topicValidatorDisabled { + builder.OverrideNodeConstructor(func(config *p2p.NodeConfig) (p2p.LibP2PNode, error) { + node, err := p2pnode.NewNode(&p2p.NodeConfig{ + Logger: config.Logger, + Host: config.Host, + PeerManager: config.PeerManager, + Parameters: config.Parameters, + DisallowListCacheCfg: disallowListCacheCfg, + }) + + if err != nil { + return nil, fmt.Errorf("could not create libp2p node part of the corrupt libp2p: %w", err) + } + + return &CorruptP2PNode{Node: node, logger: config.Logger.With().Str("component", "corrupt_libp2p").Logger(), codec: cbor.NewCodec()}, nil + }) + } + + overrideWithCorruptGossipSub( + builder, + WithMessageSigning(withMessageSigning), + WithStrictSignatureVerification(withStrictSignatureVerification)) + return builder.Build() } -// CorruptGossipSubFactory returns a factory function that creates a new instance of the forked gossipsub module from -// github.com/yhassanzadeh13/go-libp2p-pubsub for the purpose of BFT testing and attack vector implementation. +// CorruptGossipSubFactory returns a factory function that creates a new instance of the corrupt +// gossipsub adapter for the purpose of BFT testing and attack vector implementation. func CorruptGossipSubFactory(routerOpts ...func(*corrupt.GossipSubRouter)) p2p.GossipSubFactoryFunc { - factory := func(ctx context.Context, logger zerolog.Logger, host host.Host, cfg p2p.PubSubAdapterConfig) (p2p.PubSubAdapter, error) { - adapter, router, err := NewCorruptGossipSubAdapter(ctx, logger, host, cfg) + factory := func( + ctx context.Context, + logger zerolog.Logger, + host host.Host, + cfg p2p.PubSubAdapterConfig, + clusterChangeConsumer p2p.CollectionClusterChangesConsumer) (p2p.PubSubAdapter, error) { + adapter, router, err := NewCorruptGossipSubAdapter(ctx, logger, host, cfg, clusterChangeConsumer) for _, opt := range routerOpts { opt(router) } @@ -86,16 +146,16 @@ func CorruptGossipSubFactory(routerOpts ...func(*corrupt.GossipSubRouter)) p2p.G return factory } -// CorruptGossipSubConfigFactory returns a factory function that creates a new instance of the forked gossipsub config -// from github.com/yhassanzadeh13/go-libp2p-pubsub for the purpose of BFT testing and attack vector implementation. +// CorruptGossipSubConfigFactory returns a factory function that creates a new instance of the corrupt +// gossipsub config for the purpose of BFT testing and attack vector implementation. func CorruptGossipSubConfigFactory(opts ...CorruptPubSubAdapterConfigOption) p2p.GossipSubAdapterConfigFunc { return func(base *p2p.BasePubSubAdapterConfig) p2p.PubSubAdapterConfig { return NewCorruptPubSubAdapterConfig(base, opts...) } } -// CorruptGossipSubConfigFactoryWithInspector returns a factory function that creates a new instance of the forked gossipsub config -// from github.com/yhassanzadeh13/go-libp2p-pubsub for the purpose of BFT testing and attack vector implementation. +// CorruptGossipSubConfigFactoryWithInspector returns a factory function that creates a new instance +// of the corrupt gossipsub config for the purpose of BFT testing and attack vector implementation. func CorruptGossipSubConfigFactoryWithInspector(inspector func(peer.ID, *corrupt.RPC) error) p2p.GossipSubAdapterConfigFunc { return func(base *p2p.BasePubSubAdapterConfig) p2p.PubSubAdapterConfig { return NewCorruptPubSubAdapterConfig(base, WithInspector(inspector)) @@ -104,5 +164,5 @@ func CorruptGossipSubConfigFactoryWithInspector(inspector func(peer.ID, *corrupt func overrideWithCorruptGossipSub(builder p2p.NodeBuilder, opts ...CorruptPubSubAdapterConfigOption) { factory := CorruptGossipSubFactory() - builder.SetGossipSubFactory(factory, CorruptGossipSubConfigFactory(opts...)) + builder.OverrideGossipSubFactory(factory, CorruptGossipSubConfigFactory(opts...)) } diff --git a/insecure/corruptlibp2p/p2p_node.go b/insecure/corruptlibp2p/p2p_node.go index 143e1a9e938..8d16cfdb69b 100644 --- a/insecure/corruptlibp2p/p2p_node.go +++ b/insecure/corruptlibp2p/p2p_node.go @@ -4,16 +4,14 @@ import ( "context" pubsub "github.com/libp2p/go-libp2p-pubsub" - "github.com/libp2p/go-libp2p/core/host" "github.com/libp2p/go-libp2p/core/peer" "github.com/rs/zerolog" "github.com/onflow/flow-go/network" "github.com/onflow/flow-go/network/channels" - "github.com/onflow/flow-go/network/codec/cbor" "github.com/onflow/flow-go/network/message" "github.com/onflow/flow-go/network/p2p" - "github.com/onflow/flow-go/network/p2p/p2pnode" + p2pnode "github.com/onflow/flow-go/network/p2p/node" validator "github.com/onflow/flow-go/network/validator/pubsub" ) @@ -49,9 +47,3 @@ func (n *CorruptP2PNode) Subscribe(topic channels.Topic, _ p2p.TopicValidatorFun topicValidator := AcceptAllTopicValidator() return n.Node.Subscribe(topic, topicValidator) } - -// NewCorruptLibP2PNode returns corrupted libP2PNode that will subscribe to topics using the AcceptAllTopicValidator. -func NewCorruptLibP2PNode(logger zerolog.Logger, host host.Host, pCache p2p.ProtocolPeerCache, peerManager p2p.PeerManager) p2p.LibP2PNode { - node := p2pnode.NewNode(logger, host, pCache, peerManager) - return &CorruptP2PNode{Node: node, logger: logger, codec: cbor.NewCodec()} -} diff --git a/insecure/corruptlibp2p/pubsub_adapter.go b/insecure/corruptlibp2p/pubsub_adapter.go index c059bb0e3f1..0a191dcfb56 100644 --- a/insecure/corruptlibp2p/pubsub_adapter.go +++ b/insecure/corruptlibp2p/pubsub_adapter.go @@ -4,33 +4,31 @@ import ( "context" "fmt" + corrupt "github.com/libp2p/go-libp2p-pubsub" pubsub "github.com/libp2p/go-libp2p-pubsub" "github.com/libp2p/go-libp2p/core/host" "github.com/libp2p/go-libp2p/core/peer" "github.com/rs/zerolog" - corrupt "github.com/yhassanzadeh13/go-libp2p-pubsub" "github.com/onflow/flow-go/insecure/internal" + "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/module/component" "github.com/onflow/flow-go/module/irrecoverable" + "github.com/onflow/flow-go/network/channels" "github.com/onflow/flow-go/network/p2p" + p2plogging "github.com/onflow/flow-go/network/p2p/logging" "github.com/onflow/flow-go/utils/logging" ) -// CorruptGossipSubAdapter is a wrapper around the forked pubsub topic from -// github.com/yhassanzadeh13/go-libp2p-pubsub that implements the p2p.PubSubAdapter. -// This is needed because in order to use the forked pubsub module, we need to -// use the entire dependency tree of the forked module which is resolved to -// github.com/yhassanzadeh13/go-libp2p-pubsub. This means that we cannot use -// the original libp2p pubsub module in the same package. -// Note: we use the forked pubsub module for sake of BFT testing and attack vector -// implementation, it is designed to be completely isolated in the "insecure" package, and -// totally separated from the rest of the codebase. +// CorruptGossipSubAdapter is a wrapper that implements the p2p.PubSubAdapter and injects some additional +// observability. type CorruptGossipSubAdapter struct { component.Component - gossipSub *corrupt.PubSub - router *corrupt.GossipSubRouter - logger zerolog.Logger + gossipSub *corrupt.PubSub + router *corrupt.GossipSubRouter + logger zerolog.Logger + clusterChangeConsumer p2p.CollectionClusterChangesConsumer + peerScoreExposer p2p.PeerScoreExposer } var _ p2p.PubSubAdapter = (*CorruptGossipSubAdapter)(nil) @@ -62,7 +60,7 @@ func (c *CorruptGossipSubAdapter) RegisterTopicValidator(topic string, topicVali c.logger.Fatal(). Bool(logging.KeySuspicious, true). Str("topic", topic). - Str("origin_peer", from.String()). + Str("origin_peer", p2plogging.PeerId(from)). Str("result", fmt.Sprintf("%v", result)). Str("message_type", fmt.Sprintf("%T", message.Data)). Msgf("invalid validation result, should be a bug in the topic validator") @@ -71,7 +69,7 @@ func (c *CorruptGossipSubAdapter) RegisterTopicValidator(topic string, topicVali c.logger.Warn(). Bool(logging.KeySuspicious, true). Str("topic", topic). - Str("origin_peer", from.String()). + Str("origin_peer", p2plogging.PeerId(from)). Str("result", fmt.Sprintf("%v", result)). Str("message_type", fmt.Sprintf("%T", message.Data)). Msg("invalid validation result, returning reject") @@ -104,7 +102,35 @@ func (c *CorruptGossipSubAdapter) ListPeers(topic string) []peer.ID { return c.gossipSub.ListPeers(topic) } -func NewCorruptGossipSubAdapter(ctx context.Context, logger zerolog.Logger, h host.Host, cfg p2p.PubSubAdapterConfig) (p2p.PubSubAdapter, *corrupt.GossipSubRouter, error) { +func (c *CorruptGossipSubAdapter) GetLocalMeshPeers(topic channels.Topic) []peer.ID { + // this method is a no-op in the corrupt gossipsub; as the corrupt gossipsub is solely used for testing, it does not come with a mesh tracer. + return []peer.ID{} +} + +func (c *CorruptGossipSubAdapter) ActiveClustersChanged(lst flow.ChainIDList) { + c.clusterChangeConsumer.ActiveClustersChanged(lst) +} + +// PeerScoreExposer returns the peer score exposer for the gossipsub adapter. The exposer is a read-only interface +// for querying peer scores and returns the local scoring table of the underlying gossipsub node. +// The exposer is only available if the gossipsub adapter was configured with a score tracer. +// If the gossipsub adapter was not configured with a score tracer, the exposer will be nil. +// Args: +// +// None. +// +// Returns: +// +// The peer score exposer for the gossipsub adapter. +func (c *CorruptGossipSubAdapter) PeerScoreExposer() p2p.PeerScoreExposer { + return c.peerScoreExposer +} + +func NewCorruptGossipSubAdapter(ctx context.Context, + logger zerolog.Logger, + h host.Host, + cfg p2p.PubSubAdapterConfig, + clusterChangeConsumer p2p.CollectionClusterChangesConsumer) (p2p.PubSubAdapter, *corrupt.GossipSubRouter, error) { gossipSubConfig, ok := cfg.(*CorruptPubSubAdapterConfig) if !ok { return nil, nil, fmt.Errorf("invalid gossipsub config type: %T", cfg) @@ -119,18 +145,34 @@ func NewCorruptGossipSubAdapter(ctx context.Context, logger zerolog.Logger, h ho return nil, nil, fmt.Errorf("failed to create corrupt gossipsub: %w", err) } - builder := component.NewComponentManagerBuilder(). - AddWorker(func(ctx irrecoverable.SignalerContext, ready component.ReadyFunc) { - ready() - <-ctx.Done() - }).Build() - + builder := component.NewComponentManagerBuilder() adapter := &CorruptGossipSubAdapter{ - Component: builder, - gossipSub: gossipSub, - router: router, - logger: logger, + gossipSub: gossipSub, + router: router, + logger: logger, + clusterChangeConsumer: clusterChangeConsumer, + } + + if scoreTracer := gossipSubConfig.ScoreTracer(); scoreTracer != nil { + builder.AddWorker(func(ctx irrecoverable.SignalerContext, ready component.ReadyFunc) { + ready() + logger.Debug().Str("component", "corrupt-gossipsub_score_tracer").Msg("starting score tracer") + scoreTracer.Start(ctx) + logger.Debug().Str("component", "corrupt-gossipsub_score_tracer").Msg("score tracer started") + + <-scoreTracer.Done() + logger.Debug().Str("component", "corrupt-gossipsub_score_tracer").Msg("score tracer stopped") + }) + adapter.peerScoreExposer = scoreTracer } + builder.AddWorker(func(ctx irrecoverable.SignalerContext, ready component.ReadyFunc) { + ready() + // it is likely that this adapter is configured without a score tracer, so we need to + // wait for the context to be done in order to prevent immature shutdown. + <-ctx.Done() + }) + + adapter.Component = builder.Build() return adapter, router, nil } diff --git a/insecure/corruptlibp2p/pubsub_adapter_config.go b/insecure/corruptlibp2p/pubsub_adapter_config.go index 002e18608e0..275a8e69497 100644 --- a/insecure/corruptlibp2p/pubsub_adapter_config.go +++ b/insecure/corruptlibp2p/pubsub_adapter_config.go @@ -1,29 +1,24 @@ package corruptlibp2p import ( + "time" + + corrupt "github.com/libp2p/go-libp2p-pubsub" pb "github.com/libp2p/go-libp2p-pubsub/pb" "github.com/libp2p/go-libp2p/core/peer" "github.com/libp2p/go-libp2p/core/routing" discoveryRouting "github.com/libp2p/go-libp2p/p2p/discovery/routing" - corrupt "github.com/yhassanzadeh13/go-libp2p-pubsub" "github.com/onflow/flow-go/network/p2p" ) -// CorruptPubSubAdapterConfig is a wrapper around the forked pubsub topic from -// github.com/yhassanzadeh13/go-libp2p-pubsub that implements the p2p.PubSubAdapterConfig. -// This is needed because in order to use the forked pubsub module, we need to -// use the entire dependency tree of the forked module which is resolved to -// github.com/yhassanzadeh13/go-libp2p-pubsub. This means that we cannot use -// the original libp2p pubsub module in the same package. -// Note: we use the forked pubsub module for sake of BFT testing and attack vector -// implementation, it is designed to be completely isolated in the "insecure" package, and -// totally separated from the rest of the codebase. +// CorruptPubSubAdapterConfig is a wrapper that implements the p2p.PubSubAdapterConfig. type CorruptPubSubAdapterConfig struct { options []corrupt.Option inspector func(peer.ID, *corrupt.RPC) error withMessageSigning bool withStrictSignatureVerification bool + scoreTracer p2p.PeerScoreTracer } type CorruptPubSubAdapterConfigOption func(config *CorruptPubSubAdapterConfig) @@ -78,8 +73,56 @@ func (c *CorruptPubSubAdapterConfig) WithSubscriptionFilter(filter p2p.Subscript c.options = append(c.options, corrupt.WithSubscriptionFilter(filter)) } -func (c *CorruptPubSubAdapterConfig) WithScoreOption(_ p2p.ScoreOptionBuilder) { - // CorruptPubSub does not support score options. This is a no-op. +func (c *CorruptPubSubAdapterConfig) WithScoreOption(option p2p.ScoreOptionBuilder) { + params, thresholds := option.BuildFlowPubSubScoreOption() + // convert flow pubsub score option to corrupt pubsub score option + corruptParams := &corrupt.PeerScoreParams{ + SkipAtomicValidation: params.SkipAtomicValidation, + TopicScoreCap: params.TopicScoreCap, + AppSpecificScore: params.AppSpecificScore, + AppSpecificWeight: params.AppSpecificWeight, + IPColocationFactorWeight: params.IPColocationFactorWeight, + IPColocationFactorThreshold: params.IPColocationFactorThreshold, + IPColocationFactorWhitelist: params.IPColocationFactorWhitelist, + BehaviourPenaltyWeight: params.BehaviourPenaltyWeight, + BehaviourPenaltyThreshold: params.BehaviourPenaltyThreshold, + BehaviourPenaltyDecay: params.BehaviourPenaltyDecay, + DecayInterval: params.DecayInterval, + DecayToZero: params.DecayToZero, + RetainScore: params.RetainScore, + SeenMsgTTL: params.SeenMsgTTL, + } + corruptThresholds := &corrupt.PeerScoreThresholds{ + SkipAtomicValidation: thresholds.SkipAtomicValidation, + GossipThreshold: thresholds.GossipThreshold, + PublishThreshold: thresholds.PublishThreshold, + GraylistThreshold: thresholds.GraylistThreshold, + AcceptPXThreshold: thresholds.AcceptPXThreshold, + OpportunisticGraftThreshold: thresholds.OpportunisticGraftThreshold, + } + for topic, topicParams := range params.Topics { + corruptParams.Topics[topic] = &corrupt.TopicScoreParams{ + SkipAtomicValidation: topicParams.SkipAtomicValidation, + TopicWeight: topicParams.TopicWeight, + TimeInMeshWeight: topicParams.TimeInMeshWeight, + TimeInMeshQuantum: topicParams.TimeInMeshQuantum, + TimeInMeshCap: topicParams.TimeInMeshCap, + FirstMessageDeliveriesWeight: topicParams.FirstMessageDeliveriesWeight, + FirstMessageDeliveriesDecay: topicParams.FirstMessageDeliveriesDecay, + FirstMessageDeliveriesCap: topicParams.FirstMessageDeliveriesCap, + MeshMessageDeliveriesWeight: topicParams.MeshMessageDeliveriesWeight, + MeshMessageDeliveriesDecay: topicParams.MeshMessageDeliveriesDecay, + MeshMessageDeliveriesCap: topicParams.MeshMessageDeliveriesCap, + MeshMessageDeliveriesThreshold: topicParams.MeshMessageDeliveriesThreshold, + MeshMessageDeliveriesWindow: topicParams.MeshMessageDeliveriesWindow, + MeshMessageDeliveriesActivation: topicParams.MeshMessageDeliveriesActivation, + MeshFailurePenaltyWeight: topicParams.MeshFailurePenaltyWeight, + MeshFailurePenaltyDecay: topicParams.MeshFailurePenaltyDecay, + InvalidMessageDeliveriesWeight: topicParams.InvalidMessageDeliveriesWeight, + InvalidMessageDeliveriesDecay: topicParams.InvalidMessageDeliveriesDecay, + } + } + c.options = append(c.options, corrupt.WithPeerScore(corruptParams, corruptThresholds)) } func (c *CorruptPubSubAdapterConfig) WithTracer(_ p2p.PubSubTracer) { @@ -93,14 +136,29 @@ func (c *CorruptPubSubAdapterConfig) WithMessageIdFunction(f func([]byte) string })) } -func (c *CorruptPubSubAdapterConfig) WithScoreTracer(_ p2p.PeerScoreTracer) { - // CorruptPubSub does not support score tracer. This is a no-op. +func (c *CorruptPubSubAdapterConfig) WithScoreTracer(tracer p2p.PeerScoreTracer) { + c.scoreTracer = tracer + c.options = append(c.options, corrupt.WithPeerScoreInspect(func(snapshot map[peer.ID]*corrupt.PeerScoreSnapshot) { + tracer.UpdatePeerScoreSnapshots(convertPeerScoreSnapshots(snapshot)) + }, tracer.UpdateInterval())) } -func (c *CorruptPubSubAdapterConfig) WithInspectorSuite(_ p2p.GossipSubInspectorSuite) { +func (c *CorruptPubSubAdapterConfig) ScoreTracer() p2p.PeerScoreTracer { + return c.scoreTracer +} + +func (c *CorruptPubSubAdapterConfig) WithRpcInspector(_ p2p.GossipSubRPCInspector) { // CorruptPubSub does not support inspector suite. This is a no-op. } +func (c *CorruptPubSubAdapterConfig) WithPeerGater(_ map[string]float64, _ time.Duration) { + // CorruptPubSub does not need peer gater. This is a no-op. +} + +func (c *CorruptPubSubAdapterConfig) WithValidateQueueSize(size int) { + c.options = append(c.options, corrupt.WithValidateQueueSize(size)) +} + func (c *CorruptPubSubAdapterConfig) Build() []corrupt.Option { return c.options } @@ -112,3 +170,43 @@ func defaultCorruptPubsubOptions(base *p2p.BasePubSubAdapterConfig, withMessageS corrupt.WithMaxMessageSize(base.MaxMessageSize), } } + +// convertPeerScoreSnapshots converts a libp2p pubsub peer score snapshot to a Flow peer score snapshot. +// Args: +// - snapshot: the libp2p pubsub peer score snapshot. +// +// Returns: +// - map[peer.ID]*p2p.PeerScoreSnapshot: the Flow peer score snapshot. +func convertPeerScoreSnapshots(snapshot map[peer.ID]*corrupt.PeerScoreSnapshot) map[peer.ID]*p2p.PeerScoreSnapshot { + newSnapshot := make(map[peer.ID]*p2p.PeerScoreSnapshot) + for id, snap := range snapshot { + newSnapshot[id] = &p2p.PeerScoreSnapshot{ + Topics: convertTopicScoreSnapshot(snap.Topics), + Score: snap.Score, + AppSpecificScore: snap.AppSpecificScore, + BehaviourPenalty: snap.BehaviourPenalty, + IPColocationFactor: snap.IPColocationFactor, + } + } + return newSnapshot +} + +// convertTopicScoreSnapshot converts a libp2p pubsub topic score snapshot to a Flow topic score snapshot. +// Args: +// - snapshot: the libp2p pubsub topic score snapshot. +// +// Returns: +// - map[string]*p2p.TopicScoreSnapshot: the Flow topic score snapshot. +func convertTopicScoreSnapshot(snapshot map[string]*corrupt.TopicScoreSnapshot) map[string]*p2p.TopicScoreSnapshot { + newSnapshot := make(map[string]*p2p.TopicScoreSnapshot) + for topic, snap := range snapshot { + newSnapshot[topic] = &p2p.TopicScoreSnapshot{ + TimeInMesh: snap.TimeInMesh, + FirstMessageDeliveries: snap.FirstMessageDeliveries, + MeshMessageDeliveries: snap.MeshMessageDeliveries, + InvalidMessageDeliveries: snap.InvalidMessageDeliveries, + } + } + + return newSnapshot +} diff --git a/insecure/corruptlibp2p/spam_test.go b/insecure/corruptlibp2p/spam_test.go index c99c07f308f..dfad4c6e2aa 100644 --- a/insecure/corruptlibp2p/spam_test.go +++ b/insecure/corruptlibp2p/spam_test.go @@ -2,18 +2,20 @@ package corruptlibp2p_test import ( "context" + "fmt" "sync" "testing" "time" "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/model/messages" "github.com/onflow/flow-go/network/channels" pb "github.com/libp2p/go-libp2p-pubsub/pb" + corrupt "github.com/libp2p/go-libp2p-pubsub" "github.com/libp2p/go-libp2p/core/peer" "github.com/stretchr/testify/require" - corrupt "github.com/yhassanzadeh13/go-libp2p-pubsub" "github.com/onflow/flow-go/insecure/corruptlibp2p" "github.com/onflow/flow-go/insecure/internal" @@ -30,17 +32,18 @@ func TestSpam_IHave(t *testing.T) { const messagesToSpam = 3 sporkId := unittest.IdentifierFixture() role := flow.RoleConsensus - - gsrSpammer := corruptlibp2p.NewGossipSubRouterSpammer(t, sporkId, role) + idProvider := unittest.NewUpdatableIDProvider(flow.IdentityList{}) + gsrSpammer := corruptlibp2p.NewGossipSubRouterSpammer(t, sporkId, role, idProvider) allSpamIHavesReceived := sync.WaitGroup{} allSpamIHavesReceived.Add(messagesToSpam) var iHaveReceivedCtlMsgs []pb.ControlMessage - victimNode, _ := p2ptest.NodeFixture( + victimNode, victimIdentity := p2ptest.NodeFixture( t, sporkId, t.Name(), + idProvider, p2ptest.WithRole(role), internal.WithCorruptGossipSub(corruptlibp2p.CorruptGossipSubFactory(), corruptlibp2p.CorruptGossipSubConfigFactoryWithInspector(func(id peer.ID, rpc *corrupt.RPC) error { @@ -54,28 +57,28 @@ func TestSpam_IHave(t *testing.T) { return nil })), ) - + idProvider.SetIdentities(flow.IdentityList{&victimIdentity, &gsrSpammer.SpammerId}) // starts nodes ctx, cancel := context.WithCancel(context.Background()) signalerCtx := irrecoverable.NewMockSignalerContext(t, ctx) defer cancel() nodes := []p2p.LibP2PNode{gsrSpammer.SpammerNode, victimNode} - p2ptest.StartNodes(t, signalerCtx, nodes, 5*time.Second) - defer p2ptest.StopNodes(t, nodes, cancel, 5*time.Second) + p2ptest.StartNodes(t, signalerCtx, nodes) + defer p2ptest.StopNodes(t, nodes, cancel) gsrSpammer.Start(t) // prior to the test we should ensure that spammer and victim connect. // this is vital as the spammer will circumvent the normal pubsub subscription mechanism and send iHAVE messages directly to the victim. // without a prior connection established, directly spamming pubsub messages may cause a race condition in the pubsub implementation. - p2ptest.EnsureConnected(t, ctx, nodes) - p2ptest.EnsurePubsubMessageExchange(t, ctx, nodes, func() (interface{}, channels.Topic) { - blockTopic := channels.TopicFromChannel(channels.PushBlocks, sporkId) - return unittest.ProposalFixture(), blockTopic + p2ptest.TryConnectionAndEnsureConnected(t, ctx, nodes) + blockTopic := channels.TopicFromChannel(channels.PushBlocks, sporkId) + p2ptest.EnsurePubsubMessageExchange(t, ctx, nodes, blockTopic, 1, func() interface{} { + return (*messages.Proposal)(unittest.ProposalFixture()) }) // prepare to spam - generate iHAVE control messages - iHaveSentCtlMsgs := gsrSpammer.GenerateCtlMessages(messagesToSpam, corruptlibp2p.WithIHave(messagesToSpam, 5)) + iHaveSentCtlMsgs := gsrSpammer.GenerateCtlMessages(messagesToSpam, p2ptest.WithIHave(messagesToSpam, 5, fmt.Sprintf("%s/%s", channels.PushBlocks, sporkId))) // start spamming the victim peer gsrSpammer.SpamControlMessage(t, victimNode, iHaveSentCtlMsgs) diff --git a/insecure/corruptnet/conduit.go b/insecure/corruptnet/conduit.go index eb38cad9c0e..edd920a0989 100644 --- a/insecure/corruptnet/conduit.go +++ b/insecure/corruptnet/conduit.go @@ -4,10 +4,9 @@ import ( "context" "fmt" - "github.com/onflow/flow-go/network" - "github.com/onflow/flow-go/insecure" "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/network" "github.com/onflow/flow-go/network/channels" ) diff --git a/insecure/corruptnet/conduit_factory.go b/insecure/corruptnet/conduit_factory.go index c62ab0b2340..4a0ba877cb2 100644 --- a/insecure/corruptnet/conduit_factory.go +++ b/insecure/corruptnet/conduit_factory.go @@ -18,7 +18,7 @@ const networkingProtocolTCP = "tcp" // ConduitFactory implements a corrupt conduit factory, that creates corrupt conduits. type ConduitFactory struct { logger zerolog.Logger - adapter network.Adapter + adapter network.ConduitAdapter egressController insecure.EgressController } @@ -36,10 +36,10 @@ func NewCorruptConduitFactory(logger zerolog.Logger, chainId flow.ChainID) *Cond return factory } -// RegisterAdapter sets the Adapter component of the factory. -// The Adapter is a wrapper around the Network layer that only exposes the set of methods +// RegisterAdapter sets the ConduitAdapter component of the factory. +// The ConduitAdapter is a wrapper around the Network layer that only exposes the set of methods // that are needed by a conduit. -func (c *ConduitFactory) RegisterAdapter(adapter network.Adapter) error { +func (c *ConduitFactory) RegisterAdapter(adapter network.ConduitAdapter) error { if c.adapter != nil { return fmt.Errorf("could not register a new network adapter, one already exists") } @@ -61,7 +61,7 @@ func (c *ConduitFactory) RegisterEgressController(controller insecure.EgressCont } // NewConduit creates a conduit on the specified channel. -// Prior to creating any conduit, the factory requires an Adapter to be registered with it. +// Prior to creating any conduit, the factory requires an ConduitAdapter to be registered with it. func (c *ConduitFactory) NewConduit(ctx context.Context, channel channels.Channel) (network.Conduit, error) { if c.adapter == nil { return nil, fmt.Errorf("could not create a new conduit, missing a registered network adapter") diff --git a/insecure/corruptnet/conduit_factory_test.go b/insecure/corruptnet/conduit_factory_test.go index 4a170b7d1b6..ba1d7ad3dac 100644 --- a/insecure/corruptnet/conduit_factory_test.go +++ b/insecure/corruptnet/conduit_factory_test.go @@ -10,7 +10,7 @@ import ( mockinsecure "github.com/onflow/flow-go/insecure/mock" "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/network/mocknetwork" + mocknetwork "github.com/onflow/flow-go/network/mock" "github.com/onflow/flow-go/utils/unittest" ) @@ -20,7 +20,7 @@ func TestNewConduit_HappyPath(t *testing.T) { ccf := NewCorruptConduitFactory(unittest.Logger(), flow.BftTestnet) channel := channels.TestNetworkChannel require.NoError(t, ccf.RegisterEgressController(&mockinsecure.EgressController{})) - require.NoError(t, ccf.RegisterAdapter(&mocknetwork.Adapter{})) + require.NoError(t, ccf.RegisterAdapter(&mocknetwork.ConduitAdapter{})) c, err := ccf.NewConduit(context.Background(), channel) require.NoError(t, err) @@ -30,7 +30,7 @@ func TestNewConduit_HappyPath(t *testing.T) { // TestRegisterAdapter_FailDoubleRegistration checks that CorruptibleConduitFactory can be registered with only one adapter. func TestRegisterAdapter_FailDoubleRegistration(t *testing.T) { ccf := NewCorruptConduitFactory(unittest.Logger(), flow.BftTestnet) - adapter := mocknetwork.NewAdapter(t) + adapter := mocknetwork.NewConduitAdapter(t) // registering adapter should be successful require.NoError(t, ccf.RegisterAdapter(adapter)) @@ -69,7 +69,7 @@ func TestNewConduit_MissingAdapter(t *testing.T) { func TestNewConduit_MissingEgressController(t *testing.T) { ccf := NewCorruptConduitFactory(unittest.Logger(), flow.BftTestnet) channel := channels.TestNetworkChannel - require.NoError(t, ccf.RegisterAdapter(&mocknetwork.Adapter{})) + require.NoError(t, ccf.RegisterAdapter(&mocknetwork.ConduitAdapter{})) c, err := ccf.NewConduit(context.Background(), channel) require.ErrorContains(t, err, "missing a registered egress controller") diff --git a/insecure/corruptnet/message_processor.go b/insecure/corruptnet/message_processor.go index 324388544d7..4a4556368e5 100644 --- a/insecure/corruptnet/message_processor.go +++ b/insecure/corruptnet/message_processor.go @@ -7,6 +7,7 @@ import ( "github.com/onflow/flow-go/insecure" "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/model/messages" flownet "github.com/onflow/flow-go/network" "github.com/onflow/flow-go/network/channels" ) @@ -37,7 +38,11 @@ func (m *MessageProcessor) Process(channel channels.Channel, originID flow.Ident Str("origin_id", fmt.Sprintf("%v", originID)). Str("flow_protocol_event", fmt.Sprintf("%T", event)).Logger() lg.Debug().Msg("processing new incoming event") - attackerRegistered := m.ingressController.HandleIncomingEvent(event, channel, originID) + msg, err := messages.InternalToMessage(event) + if err != nil { + return fmt.Errorf("failed to convert event %T to message: %v", event, err) + } + attackerRegistered := m.ingressController.HandleIncomingEvent(msg, channel, originID) if !attackerRegistered { // No attack orchestrator registered yet, hence pass the ingress message back to the original processor. err := m.originalProcessor.Process(channel, originID, event) diff --git a/insecure/corruptnet/message_processor_test.go b/insecure/corruptnet/message_processor_test.go index 2f24f2ff79b..c63577e09c4 100644 --- a/insecure/corruptnet/message_processor_test.go +++ b/insecure/corruptnet/message_processor_test.go @@ -3,13 +3,12 @@ package corruptnet import ( "testing" - "github.com/onflow/flow-go/network/mocknetwork" - "github.com/stretchr/testify/require" mockinsecure "github.com/onflow/flow-go/insecure/mock" "github.com/onflow/flow-go/model/libp2p/message" "github.com/onflow/flow-go/network/channels" + mocknetwork "github.com/onflow/flow-go/network/mock" "github.com/onflow/flow-go/utils/unittest" ) @@ -28,7 +27,10 @@ func TestProcess_AttackerRegistered(t *testing.T) { ingressController.On("HandleIncomingEvent", msg, channel, originId).Return(true) messageProcessor := NewCorruptMessageProcessor(unittest.Logger(), originalProcessor, ingressController) - err := messageProcessor.Process(channel, originId, msg) + internal, err := msg.ToInternal() + require.NoError(t, err) + + err = messageProcessor.Process(channel, originId, internal) require.NoError(t, err) } @@ -48,11 +50,14 @@ func TestProcess_AttackerNotRegistered(t *testing.T) { corruptChannel := channels.TestNetworkChannel ingressMsg := &message.TestMessage{Text: "this is a test msg"} + internalIngress, err := ingressMsg.ToInternal() + require.NoError(t, err) + // this simulates the corrupt message processor sending the message on the original message processor when an attacker is not registered - originalProcessor.On("Process", corruptChannel, originId, ingressMsg).Return(nil) + originalProcessor.On("Process", corruptChannel, originId, internalIngress).Return(nil) messageProcessor := NewCorruptMessageProcessor(unittest.Logger(), originalProcessor, ingressController) - err := messageProcessor.Process(corruptChannel, originId, ingressMsg) + err = messageProcessor.Process(corruptChannel, originId, internalIngress) require.NoError(t, err) } diff --git a/insecure/corruptnet/network.go b/insecure/corruptnet/network.go index 14486a1c286..f59e6c9b6de 100644 --- a/insecure/corruptnet/network.go +++ b/insecure/corruptnet/network.go @@ -14,20 +14,22 @@ import ( "github.com/rs/zerolog" "google.golang.org/grpc" - "github.com/onflow/flow-go/crypto" - "github.com/onflow/flow-go/crypto/hash" + "github.com/onflow/crypto/hash" + "github.com/onflow/flow-go/engine/execution/computation/computer" "github.com/onflow/flow-go/engine/execution/utils" verutils "github.com/onflow/flow-go/engine/verification/utils" "github.com/onflow/flow-go/engine/verification/verifier" "github.com/onflow/flow-go/insecure" "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/model/messages" "github.com/onflow/flow-go/module" "github.com/onflow/flow-go/module/component" "github.com/onflow/flow-go/module/irrecoverable" flownet "github.com/onflow/flow-go/network" "github.com/onflow/flow-go/network/channels" "github.com/onflow/flow-go/utils/logging" + "github.com/onflow/flow-go/utils/unittest" ) // Network is a wrapper around the original flow network, that allows a remote attack orchestrator @@ -47,8 +49,8 @@ type Network struct { codec flownet.Codec mu sync.Mutex me module.Local - flowNetwork flownet.Network // original flow network of the node. - server *grpc.Server // touch point of orchestrator network to this factory. + flowNetwork flownet.EngineRegistry // original flow network of the node. + server *grpc.Server // touch point of orchestrator network to this factory. gRPCListenAddress net.Addr conduitFactory insecure.CorruptConduitFactory attackerInboundStream insecure.CorruptNetwork_ConnectAttackerServer // inbound stream to attack orchestrator @@ -63,7 +65,7 @@ type Network struct { approvalHasher hash.Hasher } -var _ flownet.Network = (*Network)(nil) +var _ flownet.EngineRegistry = (*Network)(nil) var _ insecure.EgressController = (*Network)(nil) var _ insecure.IngressController = (*Network)(nil) var _ insecure.CorruptNetworkServer = (*Network)(nil) @@ -74,7 +76,7 @@ func NewCorruptNetwork( address string, me module.Local, codec flownet.Codec, - flowNetwork flownet.Network, + flowNetwork flownet.EngineRegistry, conduitFactory insecure.CorruptConduitFactory) (*Network, error) { if chainId != flow.BftTestnet { panic("illegal chain id for using corrupt network") @@ -250,7 +252,11 @@ func (n *Network) processAttackerIngressMessage(msg *insecure.IngressMessage) er lg.Fatal().Msg("corrupt network received ingress message for an unknown channel") } - err = originalProcessor.(flownet.MessageProcessor).Process(channels.Channel(msg.ChannelID), senderId, event) + internal, err := event.ToInternal() + if err != nil { + lg.Fatal().Err(err).Msg("failed to convert event to internal") + } + err = originalProcessor.(flownet.MessageProcessor).Process(channels.Channel(msg.ChannelID), senderId, internal) if err != nil { lg.Fatal().Err(err).Msg("could not relay ingress message to original processor") } @@ -277,7 +283,7 @@ func (n *Network) processAttackerEgressMessage(msg *insecure.Message) error { Str("flow_protocol_event_type", fmt.Sprintf("%T", event)).Logger() switch e := event.(type) { - case *flow.ExecutionReceipt: + case *messages.ExecutionReceipt: if len(e.ExecutorSignature) == 0 { // empty signature field on execution receipt means attack orchestrator is dictating a result to // CCF, and the receipt fields must be filled out locally. @@ -288,10 +294,10 @@ func (n *Network) processAttackerEgressMessage(msg *insecure.Message) error { Msg("could not generate receipt for attack orchestrator's dictated result") return fmt.Errorf("could not generate execution receipt for attack orchestrator's result: %w", err) } - event = receipt // swaps event with the receipt. + event = (*messages.ExecutionReceipt)(receipt) // swaps event with the receipt. } - case *flow.ResultApproval: + case *messages.ResultApproval: if len(e.VerifierSignature) == 0 { // empty signature field on result approval means attack orchestrator is dictating an attestation to // CCF, and the approval fields must be filled out locally. @@ -304,7 +310,7 @@ func (n *Network) processAttackerEgressMessage(msg *insecure.Message) error { Msg("could not generate result approval for attack orchestrator's dictated attestation") return fmt.Errorf("could not generate result approval for attack orchestrator's attestation: %w", err) } - event = approval // swaps event with the receipt. + event = (*messages.ResultApproval)(approval) // swaps event with the receipt. } } @@ -424,7 +430,7 @@ func (n *Network) eventToIngressMessage(event interface{}, channel channels.Chan func (n *Network) generateExecutionReceipt(result *flow.ExecutionResult) (*flow.ExecutionReceipt, error) { // TODO: fill spock secret with dictated spock data from attack orchestrator. - return computer.GenerateExecutionReceipt(n.me, n.receiptHasher, result, []crypto.Signature{}) + return computer.GenerateExecutionReceipt(n.me, n.receiptHasher, result, unittest.SignaturesFixture(1)) } func (n *Network) generateResultApproval(attestation *flow.Attestation) (*flow.ResultApproval, error) { diff --git a/insecure/corruptnet/network_common_test.go b/insecure/corruptnet/network_common_test.go index 20da42c69a3..987dc2dfcfc 100644 --- a/insecure/corruptnet/network_common_test.go +++ b/insecure/corruptnet/network_common_test.go @@ -15,7 +15,7 @@ import ( "github.com/onflow/flow-go/insecure" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/model/libp2p/message" - "github.com/onflow/flow-go/network/mocknetwork" + mocknetwork "github.com/onflow/flow-go/network/mock" "github.com/onflow/flow-go/network/channels" "github.com/onflow/flow-go/utils/unittest" @@ -28,7 +28,7 @@ func TestEngineClosingChannel(t *testing.T) { func( corruptedId flow.Identity, // identity of ccf corruptNetwork *Network, - adapter *mocknetwork.Adapter, // mock adapter that ccf uses to communicate with authorized flow nodes. + adapter *mocknetwork.ConduitAdapter, // mock adapter that ccf uses to communicate with authorized flow nodes. stream insecure.CorruptNetwork_ProcessAttackerMessageClient, // gRPC interface that orchestrator network uses to send messages to this ccf. ) { channel := channels.TestNetworkChannel @@ -58,7 +58,7 @@ func processAttackerMessage_EmptyEgressIngressMessage_Exit(t *testing.T) { func( corruptedId flow.Identity, // identity of ccf corruptNetwork *Network, - adapter *mocknetwork.Adapter, // mock adapter that ccf uses to communicate with authorized flow nodes. + adapter *mocknetwork.ConduitAdapter, // mock adapter that ccf uses to communicate with authorized flow nodes. stream insecure.CorruptNetwork_ProcessAttackerMessageClient, // gRPC interface that orchestrator network uses to send messages to this ccf. ) { corruptedEventDispatchedOnFlowNetWg := sync.WaitGroup{} @@ -99,7 +99,7 @@ func processAttackerMessage_NotEmptyEgressIngressMessage_Exit(t *testing.T) { func( corruptedId flow.Identity, // identity of ccf corruptNetwork *Network, - adapter *mocknetwork.Adapter, // mock adapter that ccf uses to communicate with authorized flow nodes. + adapter *mocknetwork.ConduitAdapter, // mock adapter that ccf uses to communicate with authorized flow nodes. stream insecure.CorruptNetwork_ProcessAttackerMessageClient, // gRPC interface that orchestrator network uses to send messages to this ccf. ) { // creates a corrupted event that attacker is sending on the flow network through the diff --git a/insecure/corruptnet/network_egress_test.go b/insecure/corruptnet/network_egress_test.go index c2b807990fa..adc9d88659b 100644 --- a/insecure/corruptnet/network_egress_test.go +++ b/insecure/corruptnet/network_egress_test.go @@ -7,18 +7,19 @@ import ( "testing" "time" + "github.com/onflow/flow-go/model/messages" "github.com/onflow/flow-go/network/channels" "github.com/golang/protobuf/ptypes/empty" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" - "github.com/onflow/flow-go/engine/testutil" "github.com/onflow/flow-go/insecure" mockinsecure "github.com/onflow/flow-go/insecure/mock" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/model/libp2p/message" - "github.com/onflow/flow-go/network/mocknetwork" + "github.com/onflow/flow-go/module/local" + mocknetwork "github.com/onflow/flow-go/network/mock" "github.com/onflow/flow-go/utils/unittest" ) @@ -26,16 +27,20 @@ import ( // The attacker is mocked out in this test. func TestHandleOutgoingEvent_AttackerRegistered(t *testing.T) { codec := unittest.NetworkCodec() - corruptedIdentity := unittest.IdentityFixture(unittest.WithAddress(insecure.DefaultAddress)) - flowNetwork := mocknetwork.NewNetwork(t) + corruptedIdentity := unittest.PrivateNodeInfoFixture(unittest.WithAddress(insecure.DefaultAddress)) + flowNetwork := mocknetwork.NewEngineRegistry(t) ccf := mockinsecure.NewCorruptConduitFactory(t) ccf.On("RegisterEgressController", mock.Anything).Return(nil) + privateKeys, err := corruptedIdentity.PrivateKeys() + require.NoError(t, err) + me, err := local.New(corruptedIdentity.Identity().IdentitySkeleton, privateKeys.StakingKey) + require.NoError(t, err) corruptNetwork, err := NewCorruptNetwork( unittest.Logger(), flow.BftTestnet, insecure.DefaultAddress, - testutil.LocalFixture(t, corruptedIdentity), + me, codec, flowNetwork, ccf) @@ -88,7 +93,7 @@ func TestHandleOutgoingEvent_NoAttacker_UnicastOverNetwork(t *testing.T) { func( corruptedId flow.Identity, // identity of ccf corruptNetwork *Network, - adapter *mocknetwork.Adapter, // mock adapter that ccf uses to communicate with authorized flow nodes. + adapter *mocknetwork.ConduitAdapter, // mock adapter that ccf uses to communicate with authorized flow nodes. stream insecure.CorruptNetwork_ProcessAttackerMessageClient, // gRPC interface that orchestrator network uses to send messages to this ccf. ) { msg := &message.TestMessage{Text: "this is a test msg"} @@ -111,7 +116,7 @@ func TestHandleOutgoingEvent_NoAttacker_PublishOverNetwork(t *testing.T) { func( corruptedId flow.Identity, // identity of ccf corruptNetwork *Network, - adapter *mocknetwork.Adapter, // mock adapter that ccf uses to communicate with authorized flow nodes. + adapter *mocknetwork.ConduitAdapter, // mock adapter that ccf uses to communicate with authorized flow nodes. stream insecure.CorruptNetwork_ProcessAttackerMessageClient, // gRPC interface that orchestrator network uses to send messages to this ccf. ) { msg := &message.TestMessage{Text: "this is a test msg"} @@ -138,7 +143,7 @@ func TestHandleOutgoingEvent_NoAttacker_MulticastOverNetwork(t *testing.T) { func( corruptedId flow.Identity, // identity of ccf corruptNetwork *Network, - adapter *mocknetwork.Adapter, // mock adapter that ccf uses to communicate with authorized flow nodes. + adapter *mocknetwork.ConduitAdapter, // mock adapter that ccf uses to communicate with authorized flow nodes. stream insecure.CorruptNetwork_ProcessAttackerMessageClient, // gRPC interface that orchestrator network uses to send messages to this ccf. ) { msg := &message.TestMessage{Text: "this is a test msg"} @@ -164,7 +169,7 @@ func TestProcessAttackerMessage_MessageSentOnFlowNetwork(t *testing.T) { func( corruptedId flow.Identity, // identity of ccf corruptNetwork *Network, - adapter *mocknetwork.Adapter, // mock adapter that ccf uses to communicate with authorized flow nodes. + adapter *mocknetwork.ConduitAdapter, // mock adapter that ccf uses to communicate with authorized flow nodes. stream insecure.CorruptNetwork_ProcessAttackerMessageClient, // gRPC interface that orchestrator network uses to send messages to this ccf. ) { // creates a corrupted event that attacker is sending on the flow network through the @@ -204,7 +209,7 @@ func TestProcessAttackerMessage_ResultApproval_Dictated(t *testing.T) { func( corruptedId flow.Identity, // identity of ccf corruptNetwork *Network, - adapter *mocknetwork.Adapter, // mock adapter that ccf uses to communicate with authorized flow nodes. + adapter *mocknetwork.ConduitAdapter, // mock adapter that ccf uses to communicate with authorized flow nodes. stream insecure.CorruptNetwork_ProcessAttackerMessageClient, // gRPC interface that orchestrator network uses to send messages to this ccf. ) { // creates a corrupted result approval that attacker is sending on the flow network through the @@ -212,7 +217,7 @@ func TestProcessAttackerMessage_ResultApproval_Dictated(t *testing.T) { // corrupted result approval dictated by attacker needs to only have the attestation field, as the rest will be // filled up by the CCF. dictatedAttestation := *unittest.AttestationFixture() - msg, _, _ := insecure.EgressMessageFixture(t, unittest.NetworkCodec(), insecure.Protocol_PUBLISH, &flow.ResultApproval{ + msg, _, _ := insecure.EgressMessageFixture(t, unittest.NetworkCodec(), insecure.Protocol_PUBLISH, &messages.ResultApproval{ Body: flow.ResultApprovalBody{ Attestation: dictatedAttestation, }, @@ -228,7 +233,7 @@ func TestProcessAttackerMessage_ResultApproval_Dictated(t *testing.T) { corruptedEventDispatchedOnFlowNetWg := sync.WaitGroup{} corruptedEventDispatchedOnFlowNetWg.Add(1) adapter.On("PublishOnChannel", params...).Run(func(args mock.Arguments) { - approval, ok := args[1].(*flow.ResultApproval) + approval, ok := args[1].(*messages.ResultApproval) require.True(t, ok) // attestation part of the approval must be the same as attacker dictates. @@ -276,11 +281,11 @@ func TestProcessAttackerMessage_ResultApproval_PassThrough(t *testing.T) { func( corruptedId flow.Identity, // identity of ccf corruptNetwork *Network, - adapter *mocknetwork.Adapter, // mock flow network that ccf uses to communicate with authorized flow nodes. + adapter *mocknetwork.ConduitAdapter, // mock flow network that ccf uses to communicate with authorized flow nodes. stream insecure.CorruptNetwork_ProcessAttackerMessageClient, // gRPC interface that orchestrator network uses to send messages to this ccf. ) { - passThroughApproval := unittest.ResultApprovalFixture() + passThroughApproval := (*messages.ResultApproval)(unittest.ResultApprovalFixture()) msg, _, _ := insecure.EgressMessageFixture(t, unittest.NetworkCodec(), insecure.Protocol_PUBLISH, passThroughApproval) params := []interface{}{channels.Channel(msg.Egress.ChannelID), mock.Anything} @@ -293,7 +298,7 @@ func TestProcessAttackerMessage_ResultApproval_PassThrough(t *testing.T) { corruptedEventDispatchedOnFlowNetWg := sync.WaitGroup{} corruptedEventDispatchedOnFlowNetWg.Add(1) adapter.On("PublishOnChannel", params...).Run(func(args mock.Arguments) { - approval, ok := args[1].(*flow.ResultApproval) + approval, ok := args[1].(*messages.ResultApproval) require.True(t, ok) // attestation part of the approval must be the same as attacker dictates. @@ -320,7 +325,7 @@ func TestProcessAttackerMessage_ExecutionReceipt_Dictated(t *testing.T) { func( corruptedId flow.Identity, // identity of ccf corruptNetwork *Network, - adapter *mocknetwork.Adapter, // mock flow network that ccf uses to communicate with authorized flow nodes. + adapter *mocknetwork.ConduitAdapter, // mock flow network that ccf uses to communicate with authorized flow nodes. stream insecure.CorruptNetwork_ProcessAttackerMessageClient, // gRPC interface that orchestrator network uses to send messages to this ccf. ) { // creates a corrupted execution receipt that attacker is sending on the flow network through the @@ -328,8 +333,8 @@ func TestProcessAttackerMessage_ExecutionReceipt_Dictated(t *testing.T) { // corrupted execution receipt dictated by attacker needs to only have the result field, as the rest will be // filled up by the CCF. dictatedResult := *unittest.ExecutionResultFixture() - msg, _, _ := insecure.EgressMessageFixture(t, unittest.NetworkCodec(), insecure.Protocol_PUBLISH, &flow.ExecutionReceipt{ - ExecutionResult: dictatedResult, + msg, _, _ := insecure.EgressMessageFixture(t, unittest.NetworkCodec(), insecure.Protocol_PUBLISH, &messages.ExecutionReceipt{ + UnsignedExecutionReceipt: flow.UnsignedExecutionReceipt{ExecutionResult: dictatedResult}, }) params := []interface{}{channels.Channel(msg.Egress.ChannelID), mock.Anything} @@ -342,7 +347,7 @@ func TestProcessAttackerMessage_ExecutionReceipt_Dictated(t *testing.T) { corruptedEventDispatchedOnFlowNetWg := sync.WaitGroup{} corruptedEventDispatchedOnFlowNetWg.Add(1) adapter.On("PublishOnChannel", params...).Run(func(args mock.Arguments) { - receipt, ok := args[1].(*flow.ExecutionReceipt) + receipt, ok := args[1].(*messages.ExecutionReceipt) require.True(t, ok) // result part of the receipt must be the same as attacker dictates. @@ -352,8 +357,8 @@ func TestProcessAttackerMessage_ExecutionReceipt_Dictated(t *testing.T) { require.Equal(t, corruptedId.NodeID, receipt.ExecutorID) // receipt should have a valid signature from corrupted node - id := receipt.ID() - valid, err := corruptedId.StakingPubKey.Verify(receipt.ExecutorSignature, id[:], corruptNetwork.receiptHasher) + unsignedReceiptID := receipt.UnsignedExecutionReceipt.ID() + valid, err := corruptedId.StakingPubKey.Verify(receipt.ExecutorSignature, unsignedReceiptID[:], corruptNetwork.receiptHasher) require.NoError(t, err) require.True(t, valid) @@ -380,11 +385,10 @@ func TestProcessAttackerMessage_ExecutionReceipt_PassThrough(t *testing.T) { func( corruptedId flow.Identity, // identity of ccf corruptNetwork *Network, - adapter *mocknetwork.Adapter, // mock flow network that ccf uses to communicate with authorized flow nodes. + adapter *mocknetwork.ConduitAdapter, // mock flow network that ccf uses to communicate with authorized flow nodes. stream insecure.CorruptNetwork_ProcessAttackerMessageClient, // gRPC interface that orchestrator network uses to send messages to the corrupt network. ) { - - passThroughReceipt := unittest.ExecutionReceiptFixture() + passThroughReceipt := (*messages.ExecutionReceipt)(unittest.ExecutionReceiptFixture()) msg, _, _ := insecure.EgressMessageFixture(t, unittest.NetworkCodec(), insecure.Protocol_PUBLISH, passThroughReceipt) params := []interface{}{channels.Channel(msg.Egress.ChannelID), mock.Anything} @@ -397,7 +401,7 @@ func TestProcessAttackerMessage_ExecutionReceipt_PassThrough(t *testing.T) { corruptedEventDispatchedOnFlowNetWg := sync.WaitGroup{} corruptedEventDispatchedOnFlowNetWg.Add(1) adapter.On("PublishOnChannel", params...).Run(func(args mock.Arguments) { - receipt, ok := args[1].(*flow.ExecutionReceipt) + receipt, ok := args[1].(*messages.ExecutionReceipt) require.True(t, ok) // receipt should be completely intact. diff --git a/insecure/corruptnet/network_ingress_test.go b/insecure/corruptnet/network_ingress_test.go index 6c69250ac41..06df7951cc5 100644 --- a/insecure/corruptnet/network_ingress_test.go +++ b/insecure/corruptnet/network_ingress_test.go @@ -12,7 +12,7 @@ import ( "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/model/libp2p/message" "github.com/onflow/flow-go/network/channels" - "github.com/onflow/flow-go/network/mocknetwork" + mocknetwork "github.com/onflow/flow-go/network/mock" "github.com/onflow/flow-go/utils/unittest" ) @@ -23,7 +23,7 @@ func TestHandleIncomingEvent_AttackerRegistered(t *testing.T) { func( corruptedId flow.Identity, // identity of ccf corruptNetwork *Network, - adapter *mocknetwork.Adapter, // mock adapter that ccf uses to communicate with authorized flow nodes. + adapter *mocknetwork.ConduitAdapter, // mock adapter that ccf uses to communicate with authorized flow nodes. stream insecure.CorruptNetwork_ProcessAttackerMessageClient, // gRPC interface that orchestrator network uses to send messages to this ccf. ) { codec := unittest.NetworkCodec() @@ -74,7 +74,7 @@ func TestHandleIncomingEvent_NoAttacker(t *testing.T) { func( corruptedId flow.Identity, // identity of ccf corruptNetwork *Network, - adapter *mocknetwork.Adapter, // mock adapter that ccf uses to communicate with authorized flow nodes. + adapter *mocknetwork.ConduitAdapter, // mock adapter that ccf uses to communicate with authorized flow nodes. stream insecure.CorruptNetwork_ProcessAttackerMessageClient, // gRPC interface that orchestrator network uses to send messages to this ccf. ) { originId := unittest.IdentifierFixture() diff --git a/insecure/corruptnet/network_test_helper.go b/insecure/corruptnet/network_test_helper.go index 11b45734575..229b47c572f 100644 --- a/insecure/corruptnet/network_test_helper.go +++ b/insecure/corruptnet/network_test_helper.go @@ -9,21 +9,18 @@ import ( "testing" "time" - "github.com/stretchr/testify/mock" - "github.com/rs/zerolog" - + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" "google.golang.org/grpc" grpcinsecure "google.golang.org/grpc/credentials/insecure" "github.com/onflow/flow-go/insecure" - "github.com/onflow/flow-go/module/irrecoverable" - - "github.com/stretchr/testify/require" - - "github.com/onflow/flow-go/engine/testutil" + "github.com/onflow/flow-go/model/bootstrap" "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/network/mocknetwork" + "github.com/onflow/flow-go/module/irrecoverable" + "github.com/onflow/flow-go/module/local" + mocknetwork "github.com/onflow/flow-go/network/mock" "github.com/onflow/flow-go/utils/unittest" ) @@ -31,17 +28,17 @@ import ( // By default, no attacker is registered on this corruptible network. // This function is not meant to be used by tests directly because it expects the corrupt network to be properly started and stopped. // Otherwise, it will throw mock expectations errors. -func corruptNetworkFixture(t *testing.T, logger zerolog.Logger, corruptedID ...*flow.Identity) (*Network, *mocknetwork.Adapter) { +func corruptNetworkFixture(t *testing.T, logger zerolog.Logger, corruptedID ...flow.Identifier) (*Network, *mocknetwork.ConduitAdapter, bootstrap.NodeInfo) { // create corruptible network with no attacker registered codec := unittest.NetworkCodec() - corruptedIdentity := unittest.IdentityFixture(unittest.WithAddress(insecure.DefaultAddress)) + corruptedIdentity := unittest.PrivateNodeInfoFixture(unittest.WithAddress(insecure.DefaultAddress)) // some tests will want to create corruptible network with a specific ID if len(corruptedID) > 0 { - corruptedIdentity = corruptedID[0] + corruptedIdentity.NodeID = corruptedID[0] } - flowNetwork := mocknetwork.NewNetwork(t) + flowNetwork := mocknetwork.NewEngineRegistry(t) flowNetwork.On("Start", mock.Anything).Return() // mock flow network will pretend to be ready when required @@ -61,22 +58,26 @@ func corruptNetworkFixture(t *testing.T, logger zerolog.Logger, corruptedID ...* // set up adapter, so we can check that it called the expected method. // It will be checked automatically without having to remember to call mock.AssertExpectationsForObjects() - adapter := mocknetwork.NewAdapter(t) + adapter := mocknetwork.NewConduitAdapter(t) err := ccf.RegisterAdapter(adapter) require.NoError(t, err) + private, err := corruptedIdentity.PrivateKeys() + require.NoError(t, err) + me, err := local.New(corruptedIdentity.Identity().IdentitySkeleton, private.StakingKey) + require.NoError(t, err) corruptibleNetwork, err := NewCorruptNetwork( logger, flow.BftTestnet, insecure.DefaultAddress, - testutil.LocalFixture(t, corruptedIdentity), + me, codec, flowNetwork, ccf) require.NoError(t, err) // return adapter so callers can set up test specific expectations - return corruptibleNetwork, adapter + return corruptibleNetwork, adapter, corruptedIdentity } // runCorruptNetworkTest creates and starts a corruptible network, runs the "run" function of a simulated attacker and then @@ -85,12 +86,10 @@ func runCorruptNetworkTest(t *testing.T, logger zerolog.Logger, run func( flow.Identity, // identity of ccf *Network, // corruptible network - *mocknetwork.Adapter, // mock adapter that corrupted network uses to communicate with authorized flow nodes. + *mocknetwork.ConduitAdapter, // mock adapter that corrupted network uses to communicate with authorized flow nodes. insecure.CorruptNetwork_ProcessAttackerMessageClient, // gRPC interface that orchestrator network uses to send messages to this ccf. )) { - corruptedIdentity := unittest.IdentityFixture(unittest.WithAddress(insecure.DefaultAddress)) - // life-cycle management of corruptible network ctx, cancel := context.WithCancel(context.Background()) ccfCtx, errChan := irrecoverable.WithSignaler(ctx) @@ -103,7 +102,8 @@ func runCorruptNetworkTest(t *testing.T, logger zerolog.Logger, } }() - corruptibleNetwork, adapter := corruptNetworkFixture(t, logger, corruptedIdentity) + corruptedIdentifier := unittest.IdentifierFixture() + corruptibleNetwork, adapter, corruptedIdentity := corruptNetworkFixture(t, logger, corruptedIdentifier) // start corruptible network corruptibleNetwork.Start(ccfCtx) @@ -124,7 +124,7 @@ func runCorruptNetworkTest(t *testing.T, logger zerolog.Logger, stream, err := client.ProcessAttackerMessage(context.Background()) require.NoError(t, err) - run(*corruptedIdentity, corruptibleNetwork, adapter, stream) + run(*corruptedIdentity.Identity(), corruptibleNetwork, adapter, stream) // terminates orchestratorNetwork cancel() diff --git a/insecure/dependency_test.go b/insecure/dependency_test.go new file mode 100644 index 00000000000..a2375847be9 --- /dev/null +++ b/insecure/dependency_test.go @@ -0,0 +1,8 @@ +package insecure + +import "github.com/btcsuite/btcd/chaincfg/chainhash" + +// this is added to resolve the issue with chainhash ambiguous import, +// the code is not used, but it's needed to force go.mod specify and retain chainhash version +// workaround for issue: https://github.com/golang/go/issues/27899 +var _ = chainhash.Hash{} diff --git a/insecure/fixtures.go b/insecure/fixtures.go index 3abe3392f8c..6292a7ac316 100644 --- a/insecure/fixtures.go +++ b/insecure/fixtures.go @@ -11,6 +11,7 @@ import ( "github.com/onflow/flow-go/model/libp2p/message" "github.com/onflow/flow-go/network" "github.com/onflow/flow-go/network/channels" + flownetmsg "github.com/onflow/flow-go/network/message" "github.com/onflow/flow-go/utils/unittest" ) @@ -40,7 +41,7 @@ func EgressMessageFixture(t *testing.T, codec network.Codec, protocol Protocol, // encodes event to create payload payload, err := codec.Encode(content) require.NoError(t, err) - eventIDHash, err := network.EventId(channel, payload) + eventIDHash, err := flownetmsg.EventId(channel, payload) require.NoError(t, err) eventID := flow.HashToID(eventIDHash) diff --git a/insecure/go.mod b/insecure/go.mod index 73398c2b192..56cfc97a0d4 100644 --- a/insecure/go.mod +++ b/insecure/go.mod @@ -1,271 +1,353 @@ module github.com/onflow/flow-go/insecure -go 1.19 +go 1.25.0 require ( - github.com/golang/protobuf v1.5.2 + github.com/btcsuite/btcd/chaincfg/chainhash v1.0.2 + github.com/golang/protobuf v1.5.4 github.com/hashicorp/go-multierror v1.1.1 - github.com/ipfs/go-datastore v0.6.0 - github.com/libp2p/go-libp2p v0.24.2 - github.com/libp2p/go-libp2p-pubsub v0.8.2 - github.com/multiformats/go-multiaddr-dns v0.3.1 - github.com/onflow/flow-go v0.29.8 - github.com/onflow/flow-go/crypto v0.24.7 + github.com/ipfs/go-datastore v0.8.2 + github.com/libp2p/go-libp2p v0.38.2 + github.com/libp2p/go-libp2p-pubsub v0.13.0 + github.com/multiformats/go-multiaddr-dns v0.4.1 + github.com/onflow/crypto v0.25.3 + github.com/onflow/flow-go v0.36.2-0.20240717162253-d5d2e606ef53 github.com/rs/zerolog v1.29.0 - github.com/spf13/pflag v1.0.5 - github.com/stretchr/testify v1.8.2 - github.com/yhassanzadeh13/go-libp2p-pubsub v0.6.2-0.20221208234712-b44d9133e4ee - go.uber.org/atomic v1.10.0 - google.golang.org/grpc v1.53.0 - google.golang.org/protobuf v1.30.0 + github.com/spf13/pflag v1.0.6 + github.com/stretchr/testify v1.11.1 + go.uber.org/atomic v1.11.0 + google.golang.org/grpc v1.75.1 + google.golang.org/protobuf v1.36.9 ) require ( - cloud.google.com/go v0.110.0 // indirect - cloud.google.com/go/compute v1.18.0 // indirect - cloud.google.com/go/compute/metadata v0.2.3 // indirect - cloud.google.com/go/iam v0.12.0 // indirect - cloud.google.com/go/storage v1.28.1 // indirect - github.com/aws/aws-sdk-go-v2 v1.17.7 // indirect - github.com/aws/aws-sdk-go-v2/config v1.18.19 // indirect - github.com/aws/aws-sdk-go-v2/credentials v1.13.18 // indirect - github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.13.1 // indirect + cel.dev/expr v0.24.0 // indirect + cloud.google.com/go v0.120.0 // indirect + cloud.google.com/go/auth v0.16.4 // indirect + cloud.google.com/go/auth/oauth2adapt v0.2.8 // indirect + cloud.google.com/go/compute/metadata v0.8.0 // indirect + cloud.google.com/go/iam v1.5.2 // indirect + cloud.google.com/go/monitoring v1.24.2 // indirect + cloud.google.com/go/storage v1.50.0 // indirect + github.com/DataDog/zstd v1.5.6-0.20230824185856-869dae002e5e // indirect + github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.29.0 // indirect + github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.50.0 // indirect + github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.50.0 // indirect + github.com/Jorropo/jsync v1.0.1 // indirect + github.com/Microsoft/go-winio v0.6.2 // indirect + github.com/SaveTheRbtz/mph v0.1.1-0.20240117162131-4166ec7869bc // indirect + github.com/StackExchange/wmi v1.2.1 // indirect + github.com/VictoriaMetrics/fastcache v1.12.2 // indirect + github.com/aws/aws-sdk-go-v2 v1.39.1 // indirect + github.com/aws/aws-sdk-go-v2/config v1.31.9 // indirect + github.com/aws/aws-sdk-go-v2/credentials v1.18.13 // indirect + github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.7 // indirect github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.5.1 // indirect - github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.31 // indirect - github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.25 // indirect - github.com/aws/aws-sdk-go-v2/internal/ini v1.3.32 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.3.0 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.25 // indirect + github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.8 // indirect + github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.8 // indirect + github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.1 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.7 // indirect github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.7.0 // indirect github.com/aws/aws-sdk-go-v2/service/s3 v1.15.0 // indirect - github.com/aws/aws-sdk-go-v2/service/sso v1.12.6 // indirect - github.com/aws/aws-sdk-go-v2/service/ssooidc v1.14.6 // indirect - github.com/aws/aws-sdk-go-v2/service/sts v1.18.7 // indirect - github.com/aws/smithy-go v1.13.5 // indirect - github.com/benbjohnson/clock v1.3.0 // indirect + github.com/aws/aws-sdk-go-v2/service/sso v1.29.3 // indirect + github.com/aws/aws-sdk-go-v2/service/ssooidc v1.34.5 // indirect + github.com/aws/aws-sdk-go-v2/service/sts v1.38.4 // indirect + github.com/aws/smithy-go v1.23.0 // indirect + github.com/benbjohnson/clock v1.3.5 // indirect github.com/beorn7/perks v1.0.1 // indirect - github.com/bits-and-blooms/bitset v1.3.0 // indirect - github.com/btcsuite/btcd/btcec/v2 v2.2.1 // indirect - github.com/cenkalti/backoff v2.2.1+incompatible // indirect - github.com/cenkalti/backoff/v4 v4.1.3 // indirect + github.com/bits-and-blooms/bitset v1.24.0 // indirect + github.com/btcsuite/btcd/btcec/v2 v2.3.4 // indirect + github.com/cenkalti/backoff/v4 v4.3.0 // indirect github.com/cespare/xxhash v1.1.0 // indirect - github.com/cespare/xxhash/v2 v2.2.0 // indirect - github.com/containerd/cgroups v1.0.4 // indirect + github.com/cespare/xxhash/v2 v2.3.0 // indirect + github.com/cncf/xds/go v0.0.0-20250501225837-2ac532fd4443 // indirect + github.com/cockroachdb/crlib v0.0.0-20241015224233-894974b3ad94 // indirect + github.com/cockroachdb/errors v1.11.3 // indirect + github.com/cockroachdb/fifo v0.0.0-20240606204812-0bbfbd93a7ce // indirect + github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b // indirect + github.com/cockroachdb/pebble/v2 v2.0.6 // indirect + github.com/cockroachdb/redact v1.1.5 // indirect + github.com/cockroachdb/swiss v0.0.0-20250624142022-d6e517c1d961 // indirect + github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06 // indirect + github.com/consensys/gnark-crypto v0.18.0 // indirect + github.com/containerd/cgroups v1.1.0 // indirect github.com/coreos/go-semver v0.3.0 // indirect github.com/coreos/go-systemd/v22 v22.5.0 // indirect + github.com/crate-crypto/go-eth-kzg v1.3.0 // indirect + github.com/crate-crypto/go-ipa v0.0.0-20240724233137-53bbb0ceb27a // indirect github.com/cskr/pubsub v1.0.2 // indirect - github.com/davecgh/go-spew v1.1.1 // indirect + github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c // indirect - github.com/decred/dcrd/dcrec/secp256k1/v4 v4.1.0 // indirect + github.com/deckarep/golang-set/v2 v2.6.0 // indirect + github.com/decred/dcrd/dcrec/secp256k1/v4 v4.3.0 // indirect github.com/desertbit/timer v0.0.0-20180107155436-c41aec40b27f // indirect github.com/dgraph-io/badger/v2 v2.2007.4 // indirect - github.com/dgraph-io/ristretto v0.0.3-0.20200630154024-f66de99634de // indirect + github.com/dgraph-io/ristretto v0.1.0 // indirect github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2 // indirect github.com/docker/go-units v0.5.0 // indirect github.com/dustin/go-humanize v1.0.1 // indirect github.com/ef-ds/deque v1.0.4 // indirect - github.com/elastic/gosigar v0.14.2 // indirect - github.com/ethereum/go-ethereum v1.9.13 // indirect - github.com/flynn/noise v1.0.0 // indirect + github.com/elastic/gosigar v0.14.3 // indirect + github.com/emicklei/dot v1.6.2 // indirect + github.com/envoyproxy/go-control-plane/envoy v1.32.4 // indirect + github.com/envoyproxy/protoc-gen-validate v1.2.1 // indirect + github.com/ethereum/c-kzg-4844/v2 v2.1.0 // indirect + github.com/ethereum/go-ethereum v1.16.3 // indirect + github.com/ethereum/go-verkle v0.2.2 // indirect + github.com/felixge/httpsnoop v1.0.4 // indirect + github.com/ferranbt/fastssz v0.1.4 // indirect + github.com/filecoin-project/go-clock v0.1.0 // indirect + github.com/flynn/noise v1.1.0 // indirect github.com/francoispqt/gojay v1.2.13 // indirect - github.com/fsnotify/fsnotify v1.5.4 // indirect - github.com/fxamacker/cbor/v2 v2.4.1-0.20220515183430-ad2eae63303f // indirect + github.com/fsnotify/fsnotify v1.6.0 // indirect + github.com/fxamacker/cbor/v2 v2.8.1-0.20250402194037-6f932b086829 // indirect github.com/fxamacker/circlehash v0.3.0 // indirect - github.com/gammazero/deque v0.1.0 // indirect - github.com/gammazero/workerpool v1.1.2 // indirect - github.com/ghodss/yaml v1.0.0 // indirect + github.com/fxamacker/golang-lru/v2 v2.0.0-20250716153046-22c8d17dc4ee // indirect + github.com/gabriel-vasile/mimetype v1.4.6 // indirect + github.com/gammazero/deque v1.0.0 // indirect + github.com/gammazero/workerpool v1.1.3 // indirect + github.com/getsentry/sentry-go v0.27.0 // indirect + github.com/go-jose/go-jose/v4 v4.1.1 // indirect github.com/go-kit/kit v0.12.0 // indirect github.com/go-kit/log v0.2.1 // indirect - github.com/go-logfmt/logfmt v0.5.1 // indirect - github.com/go-logr/logr v1.2.3 // indirect + github.com/go-logfmt/logfmt v0.6.0 // indirect + github.com/go-logr/logr v1.4.3 // indirect github.com/go-logr/stdr v1.2.2 // indirect - github.com/go-ole/go-ole v1.2.6 // indirect - github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0 // indirect - github.com/go-test/deep v1.0.8 // indirect + github.com/go-ole/go-ole v1.3.0 // indirect + github.com/go-playground/locales v0.14.1 // indirect + github.com/go-playground/universal-translator v0.18.1 // indirect + github.com/go-playground/validator/v10 v10.19.0 // indirect + github.com/go-task/slim-sprig/v3 v3.0.0 // indirect github.com/godbus/dbus/v5 v5.1.0 // indirect + github.com/gofrs/flock v0.12.1 // indirect github.com/gogo/protobuf v1.3.2 // indirect - github.com/golang/glog v1.0.0 // indirect - github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect - github.com/golang/mock v1.6.0 // indirect - github.com/golang/snappy v0.0.4 // indirect - github.com/google/go-cmp v0.5.9 // indirect + github.com/golang/glog v1.2.5 // indirect + github.com/golang/snappy v0.0.5-0.20231225225746-43d5d4cd4e0e // indirect github.com/google/gopacket v1.1.19 // indirect - github.com/google/pprof v0.0.0-20221219190121-3cb0bae90811 // indirect - github.com/google/uuid v1.3.0 // indirect - github.com/googleapis/enterprise-certificate-proxy v0.2.3 // indirect - github.com/googleapis/gax-go/v2 v2.7.1 // indirect - github.com/gorilla/mux v1.8.0 // indirect - github.com/gorilla/websocket v1.5.0 // indirect - github.com/grpc-ecosystem/go-grpc-middleware/providers/zerolog/v2 v2.0.0-rc.2 // indirect - github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.0.0-20200501113911-9a95f0fdbfea // indirect + github.com/google/pprof v0.0.0-20241210010833-40e02aabc2ad // indirect + github.com/google/s2a-go v0.1.9 // indirect + github.com/google/uuid v1.6.0 // indirect + github.com/googleapis/enterprise-certificate-proxy v0.3.6 // indirect + github.com/googleapis/gax-go/v2 v2.15.0 // indirect + github.com/gorilla/mux v1.8.1 // indirect + github.com/gorilla/websocket v1.5.3 // indirect + github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.1.0 // indirect github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 // indirect - github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0 // indirect + github.com/grpc-ecosystem/grpc-gateway/v2 v2.22.0 // indirect github.com/hashicorp/errwrap v1.1.0 // indirect - github.com/hashicorp/golang-lru v0.5.4 // indirect + github.com/hashicorp/golang-lru v1.0.2 // indirect + github.com/hashicorp/golang-lru/v2 v2.0.7 // indirect github.com/hashicorp/hcl v1.0.0 // indirect - github.com/huin/goupnp v1.0.3 // indirect + github.com/holiman/bloomfilter/v2 v2.0.3 // indirect + github.com/holiman/uint256 v1.3.2 // indirect + github.com/huandu/go-clone v1.7.2 // indirect + github.com/huandu/go-clone/generic v1.7.2 // indirect + github.com/huin/goupnp v1.3.0 // indirect github.com/improbable-eng/grpc-web v0.15.0 // indirect - github.com/inconshreveable/mousetrap v1.0.1 // indirect + github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/ipfs/bbloom v0.0.4 // indirect - github.com/ipfs/go-block-format v0.0.3 // indirect - github.com/ipfs/go-blockservice v0.4.0 // indirect - github.com/ipfs/go-cid v0.3.2 // indirect + github.com/ipfs/boxo v0.27.2 // indirect + github.com/ipfs/go-block-format v0.2.0 // indirect + github.com/ipfs/go-cid v0.4.1 // indirect github.com/ipfs/go-cidutil v0.1.0 // indirect - github.com/ipfs/go-ds-badger2 v0.1.3 // indirect - github.com/ipfs/go-fetcher v1.5.0 // indirect - github.com/ipfs/go-ipfs-blockstore v1.2.0 // indirect + github.com/ipfs/go-ds-pebble v0.5.0 // indirect github.com/ipfs/go-ipfs-delay v0.0.1 // indirect - github.com/ipfs/go-ipfs-ds-help v1.1.0 // indirect - github.com/ipfs/go-ipfs-exchange-interface v0.2.0 // indirect - github.com/ipfs/go-ipfs-pq v0.0.2 // indirect - github.com/ipfs/go-ipfs-provider v0.7.0 // indirect - github.com/ipfs/go-ipfs-util v0.0.2 // indirect - github.com/ipfs/go-ipld-format v0.3.0 // indirect - github.com/ipfs/go-ipns v0.2.0 // indirect + github.com/ipfs/go-ipfs-pq v0.0.3 // indirect + github.com/ipfs/go-ipfs-util v0.0.3 // indirect + github.com/ipfs/go-ipld-format v0.6.0 // indirect github.com/ipfs/go-log v1.0.5 // indirect github.com/ipfs/go-log/v2 v2.5.1 // indirect github.com/ipfs/go-metrics-interface v0.0.1 // indirect - github.com/ipfs/go-peertaskqueue v0.7.0 // indirect - github.com/ipfs/go-verifcid v0.0.1 // indirect - github.com/ipld/go-ipld-prime v0.14.1 // indirect + github.com/ipfs/go-peertaskqueue v0.8.2 // indirect + github.com/ipld/go-ipld-prime v0.21.0 // indirect github.com/jackpal/go-nat-pmp v1.0.2 // indirect github.com/jbenet/go-temp-err-catcher v0.1.0 // indirect github.com/jbenet/goprocess v0.1.4 // indirect - github.com/jmespath/go-jmespath v0.4.0 // indirect - github.com/kevinburke/go-bindata v3.23.0+incompatible // indirect - github.com/klauspost/compress v1.15.13 // indirect - github.com/klauspost/cpuid/v2 v2.2.2 // indirect - github.com/koron/go-ssdp v0.0.3 // indirect + github.com/jordanschalm/lockctx v0.0.0-20250412215529-226f85c10956 // indirect + github.com/k0kubun/pp/v3 v3.5.0 // indirect + github.com/kevinburke/go-bindata v3.24.0+incompatible // indirect + github.com/klauspost/compress v1.17.11 // indirect + github.com/klauspost/cpuid/v2 v2.2.9 // indirect + github.com/koron/go-ssdp v0.0.4 // indirect + github.com/kr/pretty v0.3.1 // indirect + github.com/kr/text v0.2.0 // indirect + github.com/leodido/go-urn v1.4.0 // indirect github.com/libp2p/go-addr-util v0.1.0 // indirect github.com/libp2p/go-buffer-pool v0.1.0 // indirect github.com/libp2p/go-cidranger v1.1.0 // indirect - github.com/libp2p/go-flow-metrics v0.1.0 // indirect - github.com/libp2p/go-libp2p-asn-util v0.2.0 // indirect - github.com/libp2p/go-libp2p-core v0.20.1 // indirect - github.com/libp2p/go-libp2p-kad-dht v0.19.0 // indirect - github.com/libp2p/go-libp2p-kbucket v0.5.0 // indirect + github.com/libp2p/go-flow-metrics v0.2.0 // indirect + github.com/libp2p/go-libp2p-asn-util v0.4.1 // indirect + github.com/libp2p/go-libp2p-kad-dht v0.28.2 // indirect + github.com/libp2p/go-libp2p-kbucket v0.6.4 // indirect github.com/libp2p/go-libp2p-record v0.2.0 // indirect - github.com/libp2p/go-msgio v0.2.0 // indirect - github.com/libp2p/go-nat v0.1.0 // indirect - github.com/libp2p/go-netroute v0.2.1 // indirect - github.com/libp2p/go-openssl v0.1.0 // indirect - github.com/libp2p/go-reuseport v0.2.0 // indirect - github.com/libp2p/go-yamux/v4 v4.0.0 // indirect - github.com/logrusorgru/aurora v2.0.3+incompatible // indirect - github.com/lucas-clemente/quic-go v0.31.1 // indirect + github.com/libp2p/go-libp2p-routing-helpers v0.7.4 // indirect + github.com/libp2p/go-msgio v0.3.0 // indirect + github.com/libp2p/go-nat v0.2.0 // indirect + github.com/libp2p/go-netroute v0.2.2 // indirect + github.com/libp2p/go-reuseport v0.4.0 // indirect + github.com/libp2p/go-yamux/v4 v4.0.1 // indirect + github.com/logrusorgru/aurora/v4 v4.0.0 // indirect github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 // indirect - github.com/m4ksio/wal v1.0.1-0.20221209164835-154a17396e4c // indirect - github.com/magiconair/properties v1.8.6 // indirect - github.com/marten-seemann/qtls-go1-18 v0.1.3 // indirect - github.com/marten-seemann/qtls-go1-19 v0.1.1 // indirect + github.com/magiconair/properties v1.8.7 // indirect github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd // indirect - github.com/mattn/go-colorable v0.1.13 // indirect - github.com/mattn/go-isatty v0.0.17 // indirect - github.com/mattn/go-pointer v0.0.1 // indirect - github.com/mattn/go-runewidth v0.0.13 // indirect - github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect - github.com/miekg/dns v1.1.50 // indirect + github.com/mattn/go-colorable v0.1.14 // indirect + github.com/mattn/go-isatty v0.0.20 // indirect + github.com/mattn/go-runewidth v0.0.16 // indirect + github.com/miekg/dns v1.1.62 // indirect github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b // indirect github.com/mikioh/tcpopt v0.0.0-20190314235656-172688c1accc // indirect - github.com/minio/sha256-simd v1.0.0 // indirect + github.com/minio/sha256-simd v1.0.1 // indirect github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db // indirect github.com/mitchellh/mapstructure v1.5.0 // indirect github.com/mr-tron/base58 v1.2.0 // indirect github.com/multiformats/go-base32 v0.1.0 // indirect github.com/multiformats/go-base36 v0.2.0 // indirect - github.com/multiformats/go-multiaddr v0.8.0 // indirect + github.com/multiformats/go-multiaddr v0.14.0 // indirect github.com/multiformats/go-multiaddr-fmt v0.1.0 // indirect - github.com/multiformats/go-multibase v0.1.1 // indirect - github.com/multiformats/go-multicodec v0.7.0 // indirect - github.com/multiformats/go-multihash v0.2.1 // indirect - github.com/multiformats/go-multistream v0.3.3 // indirect + github.com/multiformats/go-multibase v0.2.0 // indirect + github.com/multiformats/go-multicodec v0.9.0 // indirect + github.com/multiformats/go-multihash v0.2.3 // indirect + github.com/multiformats/go-multistream v0.6.0 // indirect github.com/multiformats/go-varint v0.0.7 // indirect - github.com/onflow/atree v0.5.0 // indirect - github.com/onflow/cadence v0.38.1 // indirect - github.com/onflow/flow-core-contracts/lib/go/contracts v1.2.3 // indirect - github.com/onflow/flow-core-contracts/lib/go/templates v1.2.3 // indirect - github.com/onflow/flow-ft/lib/go/contracts v0.7.0 // indirect - github.com/onflow/flow-go-sdk v0.40.0 // indirect - github.com/onflow/flow/protobuf/go/flow v0.3.2-0.20230428213521-89bcc9e8517e // indirect - github.com/onflow/go-bitswap v0.0.0-20221017184039-808c5791a8a8 // indirect - github.com/onflow/sdks v0.5.0 // indirect - github.com/onsi/ginkgo/v2 v2.6.1 // indirect - github.com/opencontainers/runtime-spec v1.0.2 // indirect + github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect + github.com/olekukonko/tablewriter v0.0.5 // indirect + github.com/onflow/atree v0.10.1 // indirect + github.com/onflow/cadence v1.7.1 // indirect + github.com/onflow/fixed-point v0.1.1 // indirect + github.com/onflow/flow-core-contracts/lib/go/contracts v1.9.0 // indirect + github.com/onflow/flow-core-contracts/lib/go/templates v1.9.0 // indirect + github.com/onflow/flow-evm-bridge v0.1.0 // indirect + github.com/onflow/flow-ft/lib/go/contracts v1.0.1 // indirect + github.com/onflow/flow-ft/lib/go/templates v1.0.1 // indirect + github.com/onflow/flow-go-sdk v1.8.4 // indirect + github.com/onflow/flow-nft/lib/go/contracts v1.3.0 // indirect + github.com/onflow/flow-nft/lib/go/templates v1.3.0 // indirect + github.com/onflow/flow/protobuf/go/flow v0.4.16 // indirect + github.com/onflow/go-ethereum v1.16.2 // indirect + github.com/onflow/nft-storefront/lib/go/contracts v1.0.0 // indirect + github.com/onflow/sdks v0.6.0-preview.1 // indirect + github.com/onflow/wal v1.0.2 // indirect + github.com/onsi/ginkgo/v2 v2.22.0 // indirect + github.com/opencontainers/runtime-spec v1.2.0 // indirect github.com/opentracing/opentracing-go v1.2.0 // indirect github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 // indirect - github.com/pelletier/go-toml v1.9.5 // indirect - github.com/pelletier/go-toml/v2 v2.0.2 // indirect - github.com/pierrec/lz4 v2.6.1+incompatible // indirect + github.com/pelletier/go-toml/v2 v2.2.1 // indirect + github.com/pierrec/lz4/v4 v4.1.22 // indirect + github.com/pion/datachannel v1.5.10 // indirect + github.com/pion/dtls/v2 v2.2.12 // indirect + github.com/pion/ice/v2 v2.3.37 // indirect + github.com/pion/interceptor v0.1.37 // indirect + github.com/pion/logging v0.2.2 // indirect + github.com/pion/mdns v0.0.12 // indirect + github.com/pion/randutil v0.1.0 // indirect + github.com/pion/rtcp v1.2.15 // indirect + github.com/pion/rtp v1.8.10 // indirect + github.com/pion/sctp v1.8.35 // indirect + github.com/pion/sdp/v3 v3.0.9 // indirect + github.com/pion/srtp/v2 v2.0.20 // indirect + github.com/pion/stun v0.6.1 // indirect + github.com/pion/stun/v2 v2.0.0 // indirect + github.com/pion/transport/v2 v2.2.10 // indirect + github.com/pion/transport/v3 v3.0.7 // indirect + github.com/pion/turn/v2 v2.1.6 // indirect + github.com/pion/webrtc/v3 v3.3.5 // indirect github.com/pkg/errors v0.9.1 // indirect - github.com/pmezard/go-difflib v1.0.0 // indirect - github.com/polydawn/refmt v0.0.0-20201211092308-30ac6d18308e // indirect + github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 // indirect + github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect + github.com/polydawn/refmt v0.89.0 // indirect github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c // indirect - github.com/prometheus/client_golang v1.14.0 // indirect - github.com/prometheus/client_model v0.3.0 // indirect - github.com/prometheus/common v0.39.0 // indirect - github.com/prometheus/procfs v0.9.0 // indirect + github.com/prometheus/client_golang v1.20.5 // indirect + github.com/prometheus/client_model v0.6.1 // indirect + github.com/prometheus/common v0.61.0 // indirect + github.com/prometheus/procfs v0.15.1 // indirect github.com/psiemens/sconfig v0.1.0 // indirect + github.com/quic-go/qpack v0.5.1 // indirect + github.com/quic-go/quic-go v0.48.2 // indirect + github.com/quic-go/webtransport-go v0.8.1-0.20241018022711-4ac2c9250e66 // indirect github.com/raulk/go-watchdog v1.3.0 // indirect - github.com/rivo/uniseg v0.2.1-0.20211004051800-57c86be7915a // indirect + github.com/rivo/uniseg v0.4.7 // indirect + github.com/rogpeppe/go-internal v1.13.1 // indirect github.com/rs/cors v1.8.0 // indirect - github.com/schollz/progressbar/v3 v3.8.3 // indirect + github.com/schollz/progressbar/v3 v3.18.0 // indirect github.com/sethvargo/go-retry v0.2.3 // indirect + github.com/shirou/gopsutil v3.21.4-0.20210419000835-c7a38de76ee5+incompatible // indirect github.com/shirou/gopsutil/v3 v3.22.2 // indirect - github.com/slok/go-http-metrics v0.10.0 // indirect - github.com/spacemonkeygo/spacelog v0.0.0-20180420211403-2296661a0572 // indirect + github.com/slok/go-http-metrics v0.12.0 // indirect + github.com/sony/gobreaker v0.5.0 // indirect github.com/spaolacci/murmur3 v1.1.0 // indirect - github.com/spf13/afero v1.9.0 // indirect + github.com/spf13/afero v1.10.0 // indirect github.com/spf13/cast v1.5.0 // indirect - github.com/spf13/cobra v1.6.1 // indirect + github.com/spf13/cobra v1.8.1 // indirect github.com/spf13/jwalterweatherman v1.1.0 // indirect - github.com/spf13/viper v1.12.0 // indirect - github.com/stretchr/objx v0.5.0 // indirect - github.com/subosito/gotenv v1.4.0 // indirect + github.com/spf13/viper v1.15.0 // indirect + github.com/spiffe/go-spiffe/v2 v2.5.0 // indirect + github.com/stretchr/objx v0.5.2 // indirect + github.com/subosito/gotenv v1.4.2 // indirect + github.com/supranational/blst v0.3.14 // indirect + github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 // indirect github.com/texttheater/golang-levenshtein/levenshtein v0.0.0-20200805054039-cae8b0eaed6c // indirect - github.com/tklauser/go-sysconf v0.3.9 // indirect - github.com/tklauser/numcpus v0.3.0 // indirect + github.com/tklauser/go-sysconf v0.3.12 // indirect + github.com/tklauser/numcpus v0.6.1 // indirect github.com/turbolent/prettier v0.0.0-20220320183459-661cc755135d // indirect - github.com/vmihailenco/msgpack v4.0.4+incompatible // indirect github.com/vmihailenco/msgpack/v4 v4.3.11 // indirect github.com/vmihailenco/tagparser v0.1.1 // indirect github.com/whyrusleeping/go-keyspace v0.0.0-20160322163242-5b898ac5add1 // indirect - github.com/whyrusleeping/timecache v0.0.0-20160911033111-cfcb2f1abfee // indirect + github.com/wlynxg/anet v0.0.5 // indirect github.com/x448/float16 v0.8.4 // indirect github.com/yusufpapurcu/wmi v1.2.2 // indirect - github.com/zeebo/blake3 v0.2.3 // indirect + github.com/zeebo/blake3 v0.2.4 // indirect + github.com/zeebo/errs v1.4.0 // indirect go.opencensus.io v0.24.0 // indirect - go.opentelemetry.io/otel v1.8.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.8.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.8.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.8.0 // indirect - go.opentelemetry.io/otel/sdk v1.8.0 // indirect - go.opentelemetry.io/otel/trace v1.8.0 // indirect - go.opentelemetry.io/proto/otlp v0.18.0 // indirect - go.uber.org/dig v1.15.0 // indirect - go.uber.org/fx v1.18.2 // indirect - go.uber.org/multierr v1.9.0 // indirect - go.uber.org/zap v1.24.0 // indirect - golang.org/x/crypto v0.4.0 // indirect - golang.org/x/exp v0.0.0-20221217163422-3c43f8badb15 // indirect - golang.org/x/mod v0.8.0 // indirect - golang.org/x/net v0.8.0 // indirect - golang.org/x/oauth2 v0.6.0 // indirect - golang.org/x/sync v0.1.0 // indirect - golang.org/x/sys v0.6.0 // indirect - golang.org/x/term v0.6.0 // indirect - golang.org/x/text v0.8.0 // indirect - golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac // indirect - golang.org/x/tools v0.6.0 // indirect - golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 // indirect - google.golang.org/api v0.114.0 // indirect - google.golang.org/appengine v1.6.7 // indirect - google.golang.org/genproto v0.0.0-20230306155012-7f2fa6fef1f4 // indirect + go.opentelemetry.io/auto/sdk v1.1.0 // indirect + go.opentelemetry.io/contrib/detectors/gcp v1.36.0 // indirect + go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.61.0 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.61.0 // indirect + go.opentelemetry.io/otel v1.37.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.31.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.31.0 // indirect + go.opentelemetry.io/otel/metric v1.37.0 // indirect + go.opentelemetry.io/otel/sdk v1.37.0 // indirect + go.opentelemetry.io/otel/sdk/metric v1.37.0 // indirect + go.opentelemetry.io/otel/trace v1.37.0 // indirect + go.opentelemetry.io/proto/otlp v1.3.1 // indirect + go.uber.org/dig v1.18.0 // indirect + go.uber.org/fx v1.23.0 // indirect + go.uber.org/mock v0.5.0 // indirect + go.uber.org/multierr v1.11.0 // indirect + go.uber.org/zap v1.27.0 // indirect + golang.org/x/crypto v0.41.0 // indirect + golang.org/x/exp v0.0.0-20241217172543-b2144cdd0a67 // indirect + golang.org/x/mod v0.27.0 // indirect + golang.org/x/net v0.43.0 // indirect + golang.org/x/oauth2 v0.30.0 // indirect + golang.org/x/sync v0.16.0 // indirect + golang.org/x/sys v0.35.0 // indirect + golang.org/x/term v0.34.0 // indirect + golang.org/x/text v0.28.0 // indirect + golang.org/x/time v0.12.0 // indirect + golang.org/x/tools v0.36.0 // indirect + golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da // indirect + gonum.org/v1/gonum v0.16.0 // indirect + google.golang.org/api v0.247.0 // indirect + google.golang.org/appengine v1.6.8 // indirect + google.golang.org/genproto v0.0.0-20250603155806-513f23925822 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20250707201910-8d1bb00bc6a7 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20250804133106-a7a43d27e69b // indirect google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.2.0 // indirect - gopkg.in/ini.v1 v1.66.6 // indirect + gopkg.in/ini.v1 v1.67.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect - lukechampine.com/blake3 v1.1.7 // indirect - nhooyr.io/websocket v1.8.6 // indirect + lukechampine.com/blake3 v1.4.1 // indirect + nhooyr.io/websocket v1.8.7 // indirect ) replace github.com/onflow/flow-go => ../ + +// Using custom fork until https://github.com/onflow/flow-go/issues/5338 is resolved +replace github.com/ipfs/boxo => github.com/onflow/boxo v0.0.0-20240201202436-f2477b92f483 + +// Using custom fork until https://github.com/ipfs/go-ds-pebble/issues/64 is merged +replace github.com/ipfs/go-ds-pebble => github.com/onflow/go-ds-pebble v0.0.0-20251003225212-131edca3a897 + +replace github.com/hashicorp/golang-lru/v2 => github.com/fxamacker/golang-lru/v2 v2.0.0-20250430153159-6f72f038a30f diff --git a/insecure/go.sum b/insecure/go.sum index 129d83cb596..c1921e45eb4 100644 --- a/insecure/go.sum +++ b/insecure/go.sum @@ -1,3 +1,5 @@ +cel.dev/expr v0.24.0 h1:56OvJKSH3hDGL0ml5uSxZmz3/3Pq4tJ+fb1unVLAFcY= +cel.dev/expr v0.24.0/go.mod h1:hLPLo1W4QUmuYdA72RBX06QTs6MXw941piREPl3Yfiw= cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.31.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= @@ -19,24 +21,32 @@ cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHOb cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI= cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk= cloud.google.com/go v0.75.0/go.mod h1:VGuuCn7PG0dwsd5XPVm2Mm3wlh3EL55/79EKB6hlPTY= -cloud.google.com/go v0.110.0 h1:Zc8gqp3+a9/Eyph2KDmcGaPtbKRIoqq4YTlL4NMD0Ys= -cloud.google.com/go v0.110.0/go.mod h1:SJnCLqQ0FCFGSZMUNUf84MV3Aia54kn7pi8st7tMzaY= +cloud.google.com/go v0.120.0 h1:wc6bgG9DHyKqF5/vQvX1CiZrtHnxJjBlKUyF9nP6meA= +cloud.google.com/go v0.120.0/go.mod h1:/beW32s8/pGRuj4IILWQNd4uuebeT4dkOhKmkfit64Q= +cloud.google.com/go/auth v0.16.4 h1:fXOAIQmkApVvcIn7Pc2+5J8QTMVbUGLscnSVNl11su8= +cloud.google.com/go/auth v0.16.4/go.mod h1:j10ncYwjX/g3cdX7GpEzsdM+d+ZNsXAbb6qXA7p1Y5M= +cloud.google.com/go/auth/oauth2adapt v0.2.8 h1:keo8NaayQZ6wimpNSmW5OPc283g65QNIiLpZnkHRbnc= +cloud.google.com/go/auth/oauth2adapt v0.2.8/go.mod h1:XQ9y31RkqZCcwJWNSx2Xvric3RrU88hAYYbjDWYDL+c= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= -cloud.google.com/go/compute v1.18.0 h1:FEigFqoDbys2cvFkZ9Fjq4gnHBP55anJ0yQyau2f9oY= -cloud.google.com/go/compute v1.18.0/go.mod h1:1X7yHxec2Ga+Ss6jPyjxRxpu2uu7PLgsOVXvgU0yacs= -cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY= -cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= +cloud.google.com/go/compute/metadata v0.8.0 h1:HxMRIbao8w17ZX6wBnjhcDkW6lTFpgcaobyVfZWqRLA= +cloud.google.com/go/compute/metadata v0.8.0/go.mod h1:sYOGTp851OV9bOFJ9CH7elVvyzopvWQFNNghtDQ/Biw= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= -cloud.google.com/go/iam v0.12.0 h1:DRtTY29b75ciH6Ov1PHb4/iat2CLCvrOm40Q0a6DFpE= -cloud.google.com/go/iam v0.12.0/go.mod h1:knyHGviacl11zrtZUoDuYpDgLjvr28sLQaG0YB2GYAY= -cloud.google.com/go/longrunning v0.4.1 h1:v+yFJOfKC3yZdY6ZUI933pIYdhyhV8S3NpWrXWmg7jM= +cloud.google.com/go/iam v1.5.2 h1:qgFRAGEmd8z6dJ/qyEchAuL9jpswyODjA2lS+w234g8= +cloud.google.com/go/iam v1.5.2/go.mod h1:SE1vg0N81zQqLzQEwxL2WI6yhetBdbNQuTvIKCSkUHE= +cloud.google.com/go/logging v1.13.0 h1:7j0HgAp0B94o1YRDqiqm26w4q1rDMH7XNRU34lJXHYc= +cloud.google.com/go/logging v1.13.0/go.mod h1:36CoKh6KA/M0PbhPKMq6/qety2DCAErbhXT62TuXALA= +cloud.google.com/go/longrunning v0.6.7 h1:IGtfDWHhQCgCjwQjV9iiLnUta9LBCo8R9QmAFsS/PrE= +cloud.google.com/go/longrunning v0.6.7/go.mod h1:EAFV3IZAKmM56TyiE6VAP3VoTzhZzySwI/YI1s/nRsY= +cloud.google.com/go/monitoring v1.24.2 h1:5OTsoJ1dXYIiMiuL+sYscLc9BumrL3CarVLL7dd7lHM= +cloud.google.com/go/monitoring v1.24.2/go.mod h1:x7yzPWcgDRnPEv3sI+jJGBkwl5qINf+6qY4eq0I9B4U= cloud.google.com/go/profiler v0.3.0 h1:R6y/xAeifaUXxd2x6w+jIwKxoKl8Cv5HJvcvASTPWJo= +cloud.google.com/go/profiler v0.3.0/go.mod h1:9wYk9eY4iZHsev8TQb61kh3wiOiSyz/xOYixWPzweCU= cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= @@ -47,172 +57,187 @@ cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohl cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= cloud.google.com/go/storage v1.14.0/go.mod h1:GrKmX003DSIwi9o29oFT7YDnHYwZoctc3fOKtUw0Xmo= -cloud.google.com/go/storage v1.28.1 h1:F5QDG5ChchaAVQhINh24U99OWHURqrW8OmQcGKXcbgI= -cloud.google.com/go/storage v1.28.1/go.mod h1:Qnisd4CqDdo6BGs2AD5LLnEsmSQ80wQ5ogcBBKhU86Y= +cloud.google.com/go/storage v1.50.0 h1:3TbVkzTooBvnZsk7WaAQfOsNrdoM8QHusXA1cpk6QJs= +cloud.google.com/go/storage v1.50.0/go.mod h1:l7XeiD//vx5lfqE3RavfmU9yvk5Pp0Zhcv482poyafY= +cloud.google.com/go/trace v1.11.6 h1:2O2zjPzqPYAHrn3OKl029qlqG6W8ZdYaOWRyr8NgMT4= +cloud.google.com/go/trace v1.11.6/go.mod h1:GA855OeDEBiBMzcckLPE2kDunIpC72N+Pq8WFieFjnI= dmitri.shuralyov.com/app/changes v0.0.0-20180602232624-0a106ad413e3/go.mod h1:Yl+fi1br7+Rr3LqpNJf1/uxUdtRUV+Tnj0o93V2B9MU= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= dmitri.shuralyov.com/html/belt v0.0.0-20180602232347-f7d459c86be0/go.mod h1:JLBrvjyP0v+ecvNYvCpyZgu5/xkfAUhi6wJj28eUfSU= dmitri.shuralyov.com/service/change v0.0.0-20181023043359-a85b471d5412/go.mod h1:a1inKt/atXimZ4Mv927x+r7UpyzRUf4emIoiiSC2TN4= dmitri.shuralyov.com/state v0.0.0-20180228185332-28bcc343414c/go.mod h1:0PRwlb0D6DFvNNtx+9ybjezNCa8XF0xaYcETyp6rHWU= git.apache.org/thrift.git v0.0.0-20180902110319-2566ecd5d999/go.mod h1:fPE2ZNJGynbRyZ4dJvy6G277gSllfV2HJqblrnkyeyg= -github.com/AndreasBriese/bbloom v0.0.0-20180913140656-343706a395b7/go.mod h1:bOvUY6CB00SOBii9/FifXqc0awNKxLFCL/+pkDPuyl8= -github.com/AndreasBriese/bbloom v0.0.0-20190306092124-e2d15f34fcf9/go.mod h1:bOvUY6CB00SOBii9/FifXqc0awNKxLFCL/+pkDPuyl8= -github.com/Azure/azure-pipeline-go v0.2.1/go.mod h1:UGSo8XybXnIGZ3epmeBw7Jdz+HiUVpqIlpz/HKHylF4= -github.com/Azure/azure-pipeline-go v0.2.2/go.mod h1:4rQ/NZncSvGqNkkOsNpOU1tgoNuIlp9AfUH5G1tvCHc= -github.com/Azure/azure-storage-blob-go v0.7.0/go.mod h1:f9YQKtsG1nMisotuTPpO0tjNuEjKRYAcJU8/ydDI++4= -github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI= -github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0= -github.com/Azure/go-autorest/autorest/adal v0.8.0/go.mod h1:Z6vX6WXXuyieHAXwMj0S6HY6e6wcHn37qQMBQlvY3lc= -github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA= -github.com/Azure/go-autorest/autorest/date v0.2.0/go.mod h1:vcORJHLJEh643/Ioh9+vPmf1Ij9AEBM5FuBIXLmIy0g= -github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= -github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= -github.com/Azure/go-autorest/autorest/mocks v0.3.0/go.mod h1:a8FDP3DYzQ4RYfVAxAN3SVSiiO77gL2j2ronKKP0syM= -github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc= -github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= -github.com/DataDog/zstd v1.4.1/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo= +github.com/DataDog/zstd v1.5.6-0.20230824185856-869dae002e5e h1:ZIWapoIRN1VqT8GR8jAwb1Ie9GyehWjVcGh32Y2MznE= +github.com/DataDog/zstd v1.5.6-0.20230824185856-869dae002e5e/go.mod h1:g4AWEaM3yOg3HYfnJ3YIawPnVdXJh9QME85blwSAmyw= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.29.0 h1:UQUsRi8WTzhZntp5313l+CHIAT95ojUI2lpP/ExlZa4= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.29.0/go.mod h1:Cz6ft6Dkn3Et6l2v2a9/RpN7epQ1GtDlO6lj8bEcOvw= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.50.0 h1:5IT7xOdq17MtcdtL/vtl6mGfzhaq4m4vpollPRmlsBQ= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.50.0/go.mod h1:ZV4VOm0/eHR06JLrXWe09068dHpr3TRpY9Uo7T+anuA= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/cloudmock v0.50.0 h1:nNMpRpnkWDAaqcpxMJvxa/Ud98gjbYwayJY4/9bdjiU= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/cloudmock v0.50.0/go.mod h1:SZiPHWGOOk3bl8tkevxkoiwPgsIl6CwrWcbwjfHZpdM= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.50.0 h1:ig/FpDD2JofP/NExKQUbn7uOSZzJAQqogfqluZK4ed4= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.50.0/go.mod h1:otE2jQekW/PqXk1Awf5lmfokJx4uwuqcj1ab5SpGeW0= +github.com/Jorropo/jsync v1.0.1 h1:6HgRolFZnsdfzRUj+ImB9og1JYOxQoReSywkHOGSaUU= +github.com/Jorropo/jsync v1.0.1/go.mod h1:jCOZj3vrBCri3bSU3ErUYvevKlnbssrXeCivybS5ABQ= github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0= -github.com/Kubuxu/go-os-helper v0.0.1/go.mod h1:N8B+I7vPCT80IcP58r50u4+gEEcsZETFUpAzWW2ep1Y= +github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY= +github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= -github.com/OneOfOne/xxhash v1.2.5 h1:zl/OfRA6nftbBK9qTohYBJ5xvw6C/oNKizR7cZGl3cI= -github.com/OneOfOne/xxhash v1.2.5/go.mod h1:eZbhyaAYD41SGSSsnmcpxVoRiQ/MPUTjUdIIOT9Um7Q= +github.com/OneOfOne/xxhash v1.2.8 h1:31czK/TI9sNkxIKfaUfGlU47BAxQ0ztGgd9vPyqimf8= +github.com/OneOfOne/xxhash v1.2.8/go.mod h1:eZbhyaAYD41SGSSsnmcpxVoRiQ/MPUTjUdIIOT9Um7Q= +github.com/SaveTheRbtz/mph v0.1.1-0.20240117162131-4166ec7869bc h1:DCHzPQOcU/7gwDTWbFQZc5qHMPS1g0xTO56k8NXsv9M= +github.com/SaveTheRbtz/mph v0.1.1-0.20240117162131-4166ec7869bc/go.mod h1:LJM5a3zcIJ/8TmZwlUczvROEJT8ntOdhdG9jjcR1B0I= github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo= github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI= -github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg= -github.com/VictoriaMetrics/fastcache v1.5.3/go.mod h1:+jv9Ckb+za/P1ZRg/sulP5Ni1v49daAVERr0H3CuscE= +github.com/StackExchange/wmi v1.2.1 h1:VIkavFPXSjcnS+O8yTq7NI32k0R5Aj+v39y29VYDOSA= +github.com/StackExchange/wmi v1.2.1/go.mod h1:rcmrprowKIVzvc+NUiLncP2uuArMWLCbu9SBzvHz7e8= +github.com/VictoriaMetrics/fastcache v1.12.2 h1:N0y9ASrJ0F6h0QaC3o6uJb3NIZ9VKLjCM7NQbSmF7WI= +github.com/VictoriaMetrics/fastcache v1.12.2/go.mod h1:AmC+Nzz1+3G2eCPapF6UcsnkThDcMsQicp4xDukwJYI= github.com/VividCortex/gohistogram v1.0.0/go.mod h1:Pf5mBqqDxYaXu3hDrrU+w6nw50o/4+TcAqDqk/vUH7g= -github.com/aead/siphash v1.0.1/go.mod h1:Nywa3cDsYNNK3gaciGTWPwHt0wlpNV15vwmswBAUSII= +github.com/aclements/go-perfevent v0.0.0-20240301234650-f7843625020f h1:JjxwchlOepwsUWcQwD2mLUAGE9aCp0/ehy6yCHFBOvo= +github.com/aclements/go-perfevent v0.0.0-20240301234650-f7843625020f/go.mod h1:tMDTce/yLLN/SK8gMOxQfnyeMeCg8KGzp0D1cbECEeo= github.com/afex/hystrix-go v0.0.0-20180502004556-fa1af6a1f4f5/go.mod h1:SkGFH1ia65gfNATL8TAiHDNxPzPdmEL5uirI2Uyuz6c= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= +github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156 h1:eMwmnE/GDgah4HI848JfFxHt+iPb26b4zyfspmqY0/8= github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156/go.mod h1:Cb/ax3seSYIx7SuZdm2G2xzfwmv3TPSk2ucNfQESPXM= github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c= -github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= github.com/apache/thrift v0.13.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= -github.com/aristanetworks/goarista v0.0.0-20170210015632-ea17b1a17847/go.mod h1:D/tb0zPVXnP7fmsLZjtdUhSsumbK/ij54UXjjVgMGxQ= github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/aryann/difflib v0.0.0-20170710044230-e206f873d14a/go.mod h1:DAHtR1m6lCRdSC2Tm3DSWRPvIPr6xNKyeHdqDQSQT+A= github.com/aws/aws-lambda-go v1.13.3/go.mod h1:4UKl9IzQMoD+QF79YdCuzCwp8VbmG4VAQwij/eHl5CU= -github.com/aws/aws-sdk-go v1.25.48/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g= github.com/aws/aws-sdk-go-v2 v1.9.0/go.mod h1:cK/D0BBs0b/oWPIcX/Z/obahJK1TT7IPVjy53i/mX/4= -github.com/aws/aws-sdk-go-v2 v1.17.7 h1:CLSjnhJSTSogvqUGhIC6LqFKATMRexcxLZ0i/Nzk9Eg= -github.com/aws/aws-sdk-go-v2 v1.17.7/go.mod h1:uzbQtefpm44goOPmdKyAlXSNcwlRgF3ePWVW6EtJvvw= +github.com/aws/aws-sdk-go-v2 v1.39.1 h1:fWZhGAwVRK/fAN2tmt7ilH4PPAE11rDj7HytrmbZ2FE= +github.com/aws/aws-sdk-go-v2 v1.39.1/go.mod h1:sDioUELIUO9Znk23YVmIk86/9DOpkbyyVb1i/gUNFXY= github.com/aws/aws-sdk-go-v2/config v1.8.0/go.mod h1:w9+nMZ7soXCe5nT46Ri354SNhXDQ6v+V5wqDjnZE+GY= -github.com/aws/aws-sdk-go-v2/config v1.18.19 h1:AqFK6zFNtq4i1EYu+eC7lcKHYnZagMn6SW171la0bGw= -github.com/aws/aws-sdk-go-v2/config v1.18.19/go.mod h1:XvTmGMY8d52ougvakOv1RpiTLPz9dlG/OQHsKU/cMmY= +github.com/aws/aws-sdk-go-v2/config v1.31.9 h1:Q+9hVk8kmDGlC7XcDout/vs0FZhHnuPCPv+TRAYDans= +github.com/aws/aws-sdk-go-v2/config v1.31.9/go.mod h1:OpMrPn6rRbHKU4dAVNCk/EQx8sEQJI7hl9GZZ5u/Y+U= github.com/aws/aws-sdk-go-v2/credentials v1.4.0/go.mod h1:dgGR+Qq7Wjcd4AOAW5Rf5Tnv3+x7ed6kETXyS9WCuAY= -github.com/aws/aws-sdk-go-v2/credentials v1.13.18 h1:EQMdtHwz0ILTW1hoP+EwuWhwCG1hD6l3+RWFQABET4c= -github.com/aws/aws-sdk-go-v2/credentials v1.13.18/go.mod h1:vnwlwjIe+3XJPBYKu1et30ZPABG3VaXJYr8ryohpIyM= +github.com/aws/aws-sdk-go-v2/credentials v1.18.13 h1:gkpEm65/ZfrGJ3wbFH++Ki7DyaWtsWbK9idX6OXCo2E= +github.com/aws/aws-sdk-go-v2/credentials v1.18.13/go.mod h1:eVTHz1yI2/WIlXTE8f70mcrSxNafXD5sJpTIM9f+kmo= github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.5.0/go.mod h1:CpNzHK9VEFUCknu50kkB8z58AH2B5DvPP7ea1LHve/Y= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.13.1 h1:gt57MN3liKiyGopcqgNzJb2+d9MJaKT/q1OksHNXVE4= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.13.1/go.mod h1:lfUx8puBRdM5lVVMQlwt2v+ofiG/X6Ms+dy0UkG/kXw= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.7 h1:Is2tPmieqGS2edBnmOJIbdvOA6Op+rRpaYR60iBAwXM= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.7/go.mod h1:F1i5V5421EGci570yABvpIXgRIBPb5JM+lSkHF6Dq5w= github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.5.1 h1:VGkV9KmhGqOQWnHyi4gLG98kE6OecT42fdrCGFWxJsc= github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.5.1/go.mod h1:PLlnMiki//sGnCJiW+aVpvP/C8Kcm8mEj/IVm9+9qk4= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.31 h1:sJLYcS+eZn5EeNINGHSCRAwUJMFVqklwkH36Vbyai7M= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.31/go.mod h1:QT0BqUvX1Bh2ABdTGnjqEjvjzrCfIniM9Sc8zn9Yndo= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.25 h1:1mnRASEKnkqsntcxHaysxwgVoUUp5dkiB+l3llKnqyg= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.25/go.mod h1:zBHOPwhBc3FlQjQJE/D3IfPWiWaQmT06Vq9aNukDo0k= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.8 h1:6bgAZgRyT4RoFWhxS+aoGMFyE0cD1bSzFnEEi4bFPGI= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.8/go.mod h1:KcGkXFVU8U28qS4KvLEcPxytPZPBcRawaH2Pf/0jptE= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.8 h1:HhJYoES3zOz34yWEpGENqJvRVPqpmJyR3+AFg9ybhdY= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.8/go.mod h1:JnA+hPWeYAVbDssp83tv+ysAG8lTfLVXvSsyKg/7xNA= github.com/aws/aws-sdk-go-v2/internal/ini v1.2.2/go.mod h1:BQV0agm+JEhqR+2RT5e1XTFIDcAAV0eW6z2trp+iduw= -github.com/aws/aws-sdk-go-v2/internal/ini v1.3.32 h1:p5luUImdIqywn6JpQsW3tq5GNOxKmOnEpybzPx+d1lk= -github.com/aws/aws-sdk-go-v2/internal/ini v1.3.32/go.mod h1:XGhIBZDEgfqmFIugclZ6FU7v75nHhBDtzuB4xB/tEi4= -github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.3.0 h1:gceOysEWNNwLd6cki65IMBZ4WAM0MwgBQq2n7kejoT8= +github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3 h1:bIqFDwgGXXN1Kpp99pDOdKMTTb5d2KyU5X/BZxjOkRo= +github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3/go.mod h1:H5O/EsxDWyU+LP/V8i5sm8cxoZgc2fdNR9bxlOFrQTo= github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.3.0/go.mod h1:v8ygadNyATSm6elwJ/4gzJwcFhri9RqS8skgHKiwXPU= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.1 h1:oegbebPEMA/1Jny7kvwejowCaHz1FWZAQ94WXFNCyTM= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.1/go.mod h1:kemo5Myr9ac0U9JfSjMo9yHLtw+pECEHsFtJ9tqCEI8= github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.3.0/go.mod h1:R1KK+vY8AfalhG1AOu5e35pOD2SdoPKQCFLTvnxiohk= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.25 h1:5LHn8JQ0qvjD9L9JhMtylnkcw7j05GDZqM9Oin6hpr0= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.25/go.mod h1:/95IA+0lMnzW6XzqYJRpjjsAbKEORVeO0anQqjd2CNU= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.7 h1:mLgc5QIgOy26qyh5bvW+nDoAppxgn3J2WV3m9ewq7+8= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.7/go.mod h1:wXb/eQnqt8mDQIQTTmcw58B5mYGxzLGZGK8PWNFZ0BA= github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.7.0 h1:HWsM0YQWX76V6MOp07YuTYacm8k7h69ObJuw7Nck+og= github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.7.0/go.mod h1:LKb3cKNQIMh+itGnEpKGcnL/6OIjPZqrtYah1w5f+3o= github.com/aws/aws-sdk-go-v2/service/s3 v1.15.0 h1:nPLfLPfglacc29Y949sDxpr3X/blaY40s3B85WT2yZU= github.com/aws/aws-sdk-go-v2/service/s3 v1.15.0/go.mod h1:Iv2aJVtVSm/D22rFoX99cLG4q4uB7tppuCsulGe98k4= github.com/aws/aws-sdk-go-v2/service/sso v1.4.0/go.mod h1:+1fpWnL96DL23aXPpMGbsmKe8jLTEfbjuQoA4WS1VaA= -github.com/aws/aws-sdk-go-v2/service/sso v1.12.6 h1:5V7DWLBd7wTELVz5bPpwzYy/sikk0gsgZfj40X+l5OI= -github.com/aws/aws-sdk-go-v2/service/sso v1.12.6/go.mod h1:Y1VOmit/Fn6Tz1uFAeCO6Q7M2fmfXSCLeL5INVYsLuY= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.14.6 h1:B8cauxOH1W1v7rd8RdI/MWnoR4Ze0wIHWrb90qczxj4= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.14.6/go.mod h1:Lh/bc9XUf8CfOY6Jp5aIkQtN+j1mc+nExc+KXj9jx2s= +github.com/aws/aws-sdk-go-v2/service/sso v1.29.3 h1:7PKX3VYsZ8LUWceVRuv0+PU+E7OtQb1lgmi5vmUE9CM= +github.com/aws/aws-sdk-go-v2/service/sso v1.29.3/go.mod h1:Ql6jE9kyyWI5JHn+61UT/Y5Z0oyVJGmgmJbZD5g4unY= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.34.5 h1:gBBZmSuIySGqDLtXdZiYpwyzbJKXQD2jjT0oDY6ywbo= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.34.5/go.mod h1:XclEty74bsGBCr1s0VSaA11hQ4ZidK4viWK7rRfO88I= github.com/aws/aws-sdk-go-v2/service/sts v1.7.0/go.mod h1:0qcSMCyASQPN2sk/1KQLQ2Fh6yq8wm0HSDAimPhzCoM= -github.com/aws/aws-sdk-go-v2/service/sts v1.18.7 h1:bWNgNdRko2x6gqa0blfATqAZKZokPIeM1vfmQt2pnvM= -github.com/aws/aws-sdk-go-v2/service/sts v1.18.7/go.mod h1:JuTnSoeePXmMVe9G8NcjjwgOKEfZ4cOjMuT2IBT/2eI= +github.com/aws/aws-sdk-go-v2/service/sts v1.38.4 h1:PR00NXRYgY4FWHqOGx3fC3lhVKjsp1GdloDv2ynMSd8= +github.com/aws/aws-sdk-go-v2/service/sts v1.38.4/go.mod h1:Z+Gd23v97pX9zK97+tX4ppAgqCt3Z2dIXB02CtBncK8= github.com/aws/smithy-go v1.8.0/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAmR5n+E= -github.com/aws/smithy-go v1.13.5 h1:hgz0X/DX0dGqTYpGALqXJoRKRj5oQ7150i5FdTePzO8= -github.com/aws/smithy-go v1.13.5/go.mod h1:Tg+OJXh4MB2R/uN61Ko2f6hTZwB/ZYGOtib8J3gBHzA= +github.com/aws/smithy-go v1.23.0 h1:8n6I3gXzWJB2DxBDnfxgBaSX6oe0d/t10qGz7OKqMCE= +github.com/aws/smithy-go v1.23.0/go.mod h1:t1ufH5HMublsJYulve2RKmHDC15xu1f26kHCp/HgceI= github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= -github.com/benbjohnson/clock v1.3.0 h1:ip6w0uFQkncKQ979AypyG0ER7mqUSBdKLOgAle/AT8A= github.com/benbjohnson/clock v1.3.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= +github.com/benbjohnson/clock v1.3.5 h1:VvXlSJBzZpA/zum6Sj74hxwYI2DIxRWuNIoXAzHZz5o= +github.com/benbjohnson/clock v1.3.5/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= -github.com/bits-and-blooms/bitset v1.3.0 h1:h7mv5q31cthBTd7V4kLAZaIThj1e8vPGcSqpPue9KVI= -github.com/bits-and-blooms/bitset v1.3.0/go.mod h1:gIdJ4wp64HaoK2YrL1Q5/N7Y16edYb8uY+O0FJTyyDA= +github.com/bits-and-blooms/bitset v1.24.0 h1:H4x4TuulnokZKvHLfzVRTHJfFfnHEeSYJizujEZvmAM= +github.com/bits-and-blooms/bitset v1.24.0/go.mod h1:7hO7Gc7Pp1vODcmWvKMRA9BNmbv6a/7QIWpPxHddWR8= github.com/bradfitz/go-smtpd v0.0.0-20170404230938-deb6d6237625/go.mod h1:HYsPBTaaSFSlLx/70C2HPIMNZpVV8+vt/A+FMnYP11g= -github.com/btcsuite/btcd v0.0.0-20171128150713-2e60448ffcc6/go.mod h1:Dmm/EzmjnCiweXmzRIAiUWCInVmPgjkzgv5k4tVyXiQ= -github.com/btcsuite/btcd v0.0.0-20190213025234-306aecffea32/go.mod h1:DrZx5ec/dmnfpw9KyYoQyYo7d0KEvTkk/5M/vbZjAr8= -github.com/btcsuite/btcd v0.0.0-20190523000118-16327141da8c/go.mod h1:3J08xEfcugPacsc34/LKRU2yO7YmuT8yt28J8k2+rrI= -github.com/btcsuite/btcd v0.0.0-20190605094302-a0d1e3e36d50/go.mod h1:3J08xEfcugPacsc34/LKRU2yO7YmuT8yt28J8k2+rrI= -github.com/btcsuite/btcd v0.0.0-20190824003749-130ea5bddde3/go.mod h1:3J08xEfcugPacsc34/LKRU2yO7YmuT8yt28J8k2+rrI= -github.com/btcsuite/btcd v0.20.1-beta/go.mod h1:wVuoA8VJLEcwgqHBwHmzLRazpKxTv13Px/pDuV7OomQ= -github.com/btcsuite/btcd v0.21.0-beta/go.mod h1:ZSWyehm27aAuS9bvkATT+Xte3hjHZ+MRgMY/8NJ7K94= -github.com/btcsuite/btcd/btcec/v2 v2.2.1 h1:xP60mv8fvp+0khmrN0zTdPC3cNm24rfeE6lh2R/Yv3E= -github.com/btcsuite/btcd/btcec/v2 v2.2.1/go.mod h1:9/CSmJxmuvqzX9Wh2fXMWToLOHhPd11lSPuIupwTkI8= -github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f/go.mod h1:TdznJufoqS23FtqVCzL0ZqgP5MqXbb4fg/WgDys70nA= -github.com/btcsuite/btcutil v0.0.0-20190207003914-4c204d697803/go.mod h1:+5NJ2+qvTyV9exUAL/rxXi3DcLg2Ts+ymUAY5y4NvMg= -github.com/btcsuite/btcutil v0.0.0-20190425235716-9e5f4b9a998d/go.mod h1:+5NJ2+qvTyV9exUAL/rxXi3DcLg2Ts+ymUAY5y4NvMg= -github.com/btcsuite/btcutil v1.0.2/go.mod h1:j9HUFwoQRsZL3V4n+qG+CUnEGHOarIxfC3Le2Yhbcts= -github.com/btcsuite/go-socks v0.0.0-20170105172521-4720035b7bfd/go.mod h1:HHNXQzUsZCxOoE+CPiyCTO6x34Zs86zZUiwtpXoGdtg= -github.com/btcsuite/goleveldb v0.0.0-20160330041536-7834afc9e8cd/go.mod h1:F+uVaaLLH7j4eDXPRvw78tMflu7Ie2bzYOH4Y8rRKBY= -github.com/btcsuite/goleveldb v1.0.0/go.mod h1:QiK9vBlgftBg6rWQIj6wFzbPfRjiykIEhBH4obrXJ/I= -github.com/btcsuite/snappy-go v0.0.0-20151229074030-0bdef8d06723/go.mod h1:8woku9dyThutzjeg+3xrA5iCpBRH8XEEg3lh6TiUghc= -github.com/btcsuite/snappy-go v1.0.0/go.mod h1:8woku9dyThutzjeg+3xrA5iCpBRH8XEEg3lh6TiUghc= -github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792/go.mod h1:ghJtEyQwv5/p4Mg4C0fgbePVuGr935/5ddU9Z3TmDRY= -github.com/btcsuite/winsvc v1.0.0/go.mod h1:jsenWakMcC0zFBFurPLEAyrnc/teJEM1O46fmI40EZs= +github.com/btcsuite/btcd/btcec/v2 v2.3.4 h1:3EJjcN70HCu/mwqlUsGK8GcNVyLVxFDlWurTXGPFfiQ= +github.com/btcsuite/btcd/btcec/v2 v2.3.4/go.mod h1:zYzJ8etWJQIv1Ogk7OzpWjowwOdXY1W/17j2MW85J04= +github.com/btcsuite/btcd/chaincfg/chainhash v1.0.2 h1:KdUfX2zKommPRa+PD0sWZUyXe9w277ABlgELO7H04IM= +github.com/btcsuite/btcd/chaincfg/chainhash v1.0.2/go.mod h1:7SFka0XMvUgj3hfZtydOrQY2mwhPclbT2snogU7SQQc= github.com/buger/jsonparser v0.0.0-20181115193947-bf1c66bbce23/go.mod h1:bbYlZJ7hK1yFx9hf58LP0zeX7UjIGs20ufpu3evjr+s= +github.com/bytedance/sonic v1.11.5 h1:G00FYjjqll5iQ1PYXynbg/hyzqBqavH8Mo9/oTopd9k= +github.com/bytedance/sonic v1.11.5/go.mod h1:X2PC2giUdj/Cv2lliWFLk6c/DUQok5rViJSemeB0wDw= +github.com/bytedance/sonic/loader v0.1.0 h1:skjHJ2Bi9ibbq3Dwzh1w42MQ7wZJrXmEZr/uqUn3f0Q= +github.com/bytedance/sonic/loader v0.1.0/go.mod h1:UmRT+IRTGKz/DAkzcEGzyVqQFJ7H9BqwBO3pm9H/+HY= github.com/casbin/casbin/v2 v2.1.2/go.mod h1:YcPU1XXisHhLzuxH9coDNf2FbKpjGlbCg3n9yuLkIJQ= -github.com/cenkalti/backoff v2.2.1+incompatible h1:tNowT99t7UNflLxfYYSlKYsBpXdEet03Pg2g16Swow4= github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= github.com/cenkalti/backoff/v4 v4.1.1/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= -github.com/cenkalti/backoff/v4 v4.1.3 h1:cFAlzYUlVYDysBEH2T5hyJZMh3+5+WCBvSnK6Q8UtC4= -github.com/cenkalti/backoff/v4 v4.1.3/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= +github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8= +github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/cespare/cp v0.1.0/go.mod h1:SOGHArjBr4JWaSDEVpWpo/hNg6RoKrls6Oh40hiwW+s= github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= -github.com/cespare/xxhash/v2 v2.0.1-0.20190104013014-3767db7a7e18/go.mod h1:HD5P3vAIAh+Y2GAxg0PrPN1P8WkepXGpjbUPDHJqqKM= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/cheekybits/genny v1.0.0/go.mod h1:+tQajlRqAUrPI7DOSpB0XAqZYtQakVtB7wXkRAgjxjQ= +github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= +github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/chengxilo/virtualterm v1.0.4 h1:Z6IpERbRVlfB8WkOmtbHiDbBANU7cimRIof7mk9/PwM= +github.com/chengxilo/virtualterm v1.0.4/go.mod h1:DyxxBZz/x1iqJjFxTFcr6/x+jSpqN0iwWCOK1q10rlY= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= github.com/cilium/ebpf v0.2.0/go.mod h1:To2CFviqOWL/M0gIMsvSMlqe7em/l1ALkX1PyjrX2Qs= github.com/clbanning/x2j v0.0.0-20191024224557-825249438eec/go.mod h1:jMjuTZXRI4dUb/I5gc9Hdhagfvm9+RyrPryS/auMzxE= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/cloudflare/cloudflare-go v0.10.2-0.20190916151808-a80f83b9add9/go.mod h1:1MxXX1Ux4x6mqPmjkUgTP1CdXIBXKX7T+Jk9Gxrmx+U= +github.com/cloudwego/base64x v0.1.3 h1:b5J/l8xolB7dyDTTmhJP2oTs5LdrjyrUFuNxdfq5hAg= +github.com/cloudwego/base64x v0.1.3/go.mod h1:1+1K5BUHIQzyapgpF7LwvOGAEDicKtt1umPV+aN8pi8= +github.com/cloudwego/iasm v0.2.0 h1:1KNIy1I1H9hNNFEEH3DVnI4UujN+1zjpuk6gwHLTssg= +github.com/cloudwego/iasm v0.2.0/go.mod h1:8rXZaNYT2n95jn+zTI1sDr+IgcD2GVs0nlbbQPiEFhY= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= -github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= -github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20250501225837-2ac532fd4443 h1:aQ3y1lwWyqYPiWZThqv1aFbZMiM9vblcSArJRf2Irls= +github.com/cncf/xds/go v0.0.0-20250501225837-2ac532fd4443/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8= +github.com/cockroachdb/crlib v0.0.0-20241015224233-894974b3ad94 h1:bvJv505UUfjzbaIPdNS4AEkHreDqQk6yuNpsdRHpwFA= +github.com/cockroachdb/crlib v0.0.0-20241015224233-894974b3ad94/go.mod h1:Gq51ZeKaFCXk6QwuGM0w1dnaOqc/F5zKT2zA9D6Xeac= github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= +github.com/cockroachdb/datadriven v1.0.3-0.20240530155848-7682d40af056 h1:slXychO2uDM6hYRu4c0pD0udNI8uObfeKN6UInWViS8= +github.com/cockroachdb/datadriven v1.0.3-0.20240530155848-7682d40af056/go.mod h1:a9RdTaap04u637JoCzcUoIcDmvwSUtcUFtT/C3kJlTU= +github.com/cockroachdb/errors v1.11.3 h1:5bA+k2Y6r+oz/6Z/RFlNeVCesGARKuC6YymtcDrbC/I= +github.com/cockroachdb/errors v1.11.3/go.mod h1:m4UIW4CDjx+R5cybPsNrRbreomiFqt8o1h1wUVazSd8= +github.com/cockroachdb/fifo v0.0.0-20240606204812-0bbfbd93a7ce h1:giXvy4KSc/6g/esnpM7Geqxka4WSqI1SZc7sMJFd3y4= +github.com/cockroachdb/fifo v0.0.0-20240606204812-0bbfbd93a7ce/go.mod h1:9/y3cnZ5GKakj/H4y9r9GTjCvAFta7KLgSHPJJYc52M= +github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b h1:r6VH0faHjZeQy818SGhaone5OnYfxFR/+AzdY3sf5aE= +github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b/go.mod h1:Vz9DsVWQQhf3vs21MhPMZpMGSht7O/2vFW2xusFUVOs= +github.com/cockroachdb/metamorphic v0.0.0-20231108215700-4ba948b56895 h1:XANOgPYtvELQ/h4IrmPAohXqe2pWA8Bwhejr3VQoZsA= +github.com/cockroachdb/metamorphic v0.0.0-20231108215700-4ba948b56895/go.mod h1:aPd7gM9ov9M8v32Yy5NJrDyOcD8z642dqs+F0CeNXfA= +github.com/cockroachdb/pebble v1.1.5 h1:5AAWCBWbat0uE0blr8qzufZP5tBjkRyy/jWe1QWLnvw= +github.com/cockroachdb/pebble v1.1.5/go.mod h1:17wO9el1YEigxkP/YtV8NtCivQDgoCyBg5c4VR/eOWo= +github.com/cockroachdb/pebble/v2 v2.0.6 h1:eL54kX2AKp1ePJ/8vq4IO3xIEPpvVjlSP12dlLYilyE= +github.com/cockroachdb/pebble/v2 v2.0.6/go.mod h1:un1DXG73PKw3F7Ndd30YactyvsFviI9Fuhe0tENdnyA= +github.com/cockroachdb/redact v1.1.5 h1:u1PMllDkdFfPWaNGMyLD1+so+aq3uUItthCFqzwPJ30= +github.com/cockroachdb/redact v1.1.5/go.mod h1:BVNblN9mBWFyMyqK1k3AAiSxhvhfK2oOZZ2lK+dpvRg= +github.com/cockroachdb/swiss v0.0.0-20250624142022-d6e517c1d961 h1:Nua446ru3juLHLZd4AwKNzClZgL1co3pUPGv3o8FlcA= +github.com/cockroachdb/swiss v0.0.0-20250624142022-d6e517c1d961/go.mod h1:yBRu/cnL4ks9bgy4vAASdjIW+/xMlFwuHKqtmh3GZQg= +github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06 h1:zuQyyAKVxetITBuuhv3BI9cMrmStnpT18zmgmTxunpo= +github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06/go.mod h1:7nc4anLGjupUW/PeY5qiNYsdNXj7zopG+eqsS7To5IQ= github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI= +github.com/consensys/gnark-crypto v0.18.0 h1:vIye/FqI50VeAr0B3dx+YjeIvmc3LWz4yEfbWBpTUf0= +github.com/consensys/gnark-crypto v0.18.0/go.mod h1:L3mXGFTe1ZN+RSJ+CLjUt9x7PNdx8ubaYfDROyp2Z8c= github.com/containerd/cgroups v0.0.0-20201119153540-4cbc285b3327/go.mod h1:ZJeTFisyysqgcCdecO57Dj79RfL0LNeGiFUqLYQRYLE= -github.com/containerd/cgroups v1.0.4 h1:jN/mbWBEaz+T1pi5OFtnkQ+8qnmEbAr1Oo1FRm5B0dA= -github.com/containerd/cgroups v1.0.4/go.mod h1:nLNQtsF7Sl2HxNebu77i1R0oDlhiTG+kO4JTrUzo6IA= +github.com/containerd/cgroups v1.1.0 h1:v8rEWFl6EoqHB+swVNjVoCJE8o3jX7e8nqBGPLaDFBM= +github.com/containerd/cgroups v1.1.0/go.mod h1:6ppBcbh/NOOUU+dMKrykgaBnK9lCIBxHqJDGwsa1mIw= github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= @@ -231,46 +256,41 @@ github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfc github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= -github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= +github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= +github.com/crate-crypto/go-eth-kzg v1.3.0 h1:05GrhASN9kDAidaFJOda6A4BEvgvuXbazXg/0E3OOdI= +github.com/crate-crypto/go-eth-kzg v1.3.0/go.mod h1:J9/u5sWfznSObptgfa92Jq8rTswn6ahQWEuiLHOjCUI= +github.com/crate-crypto/go-ipa v0.0.0-20240724233137-53bbb0ceb27a h1:W8mUrRp6NOVl3J+MYp5kPMoUZPp7aOYHtaua31lwRHg= +github.com/crate-crypto/go-ipa v0.0.0-20240724233137-53bbb0ceb27a/go.mod h1:sTwzHBvIzm2RfVCGNEBZgRyjwK40bVoun3ZnGOCafNM= github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/cskr/pubsub v1.0.2 h1:vlOzMhl6PFn60gRlTQQsIfVwaPB/B/8MziK8FhEPt/0= github.com/cskr/pubsub v1.0.2/go.mod h1:/8MzYXk/NJAz782G8RPkFzXTZVu63VotefPnR9TIRis= -github.com/davecgh/go-spew v0.0.0-20171005155431-ecdeabc65495/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davidlazar/go-crypto v0.0.0-20170701192655-dcfb0a7ac018/go.mod h1:rQYf4tfk5sSwFsnDg3qYaBxSjsD9S8+59vW0dKUgme4= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c h1:pFUpOrbxDR6AkioZ1ySsx5yxlDQZ8stG2b88gTPxgJU= github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c/go.mod h1:6UhI8N9EjYm1c2odKpFpAYeR8dsBeM7PtzQhRgxRr9U= -github.com/deckarep/golang-set v0.0.0-20180603214616-504e848d77ea/go.mod h1:93vsz/8Wt4joVM7c2AVqh+YRMiUSc14yDtF28KmMOgQ= -github.com/decred/dcrd/crypto/blake256 v1.0.0 h1:/8DMNYp9SGi5f0w7uCm6d6M4OU2rGFK09Y2A4Xv7EE0= -github.com/decred/dcrd/dcrec/secp256k1/v4 v4.1.0 h1:HbphB4TFFXpv7MNrT52FGrrgVXF1owhMVTHFZIlnvd4= -github.com/decred/dcrd/dcrec/secp256k1/v4 v4.1.0/go.mod h1:DZGJHZMqrU4JJqFAWUS2UO1+lbSKsdiOoYi9Zzey7Fc= -github.com/decred/dcrd/lru v1.0.0/go.mod h1:mxKOwFd7lFjN2GZYsiz/ecgqR6kkYAl+0pz0tEMk218= +github.com/deckarep/golang-set/v2 v2.6.0 h1:XfcQbWM1LlMB8BsJ8N9vW5ehnnPVIw0je80NsVHagjM= +github.com/deckarep/golang-set/v2 v2.6.0/go.mod h1:VAky9rY/yGXJOLEDv3OMci+7wtDpOF4IN+y82NBOac4= +github.com/decred/dcrd/crypto/blake256 v1.0.1 h1:7PltbUIQB7u/FfZ39+DGa/ShuMyJ5ilcvdfma9wOH6Y= +github.com/decred/dcrd/crypto/blake256 v1.0.1/go.mod h1:2OfgNZ5wDpcsFmHmCK5gZTPcCXqlm2ArzUIkw9czNJo= +github.com/decred/dcrd/dcrec/secp256k1/v4 v4.3.0 h1:rpfIENRNNilwHwZeG5+P150SMrnNEcHYvcCuK6dPZSg= +github.com/decred/dcrd/dcrec/secp256k1/v4 v4.3.0/go.mod h1:v57UDF4pDQJcEfFUCRop3lJL149eHGSe9Jvczhzjo/0= github.com/desertbit/timer v0.0.0-20180107155436-c41aec40b27f h1:U5y3Y5UE0w7amNe7Z5G/twsBW0KEalRQXZzf8ufSh9I= github.com/desertbit/timer v0.0.0-20180107155436-c41aec40b27f/go.mod h1:xH/i4TFMt8koVQZ6WFms69WAsDWr2XsYL3Hkl7jkoLE= -github.com/dgraph-io/badger v1.5.5-0.20190226225317-8115aed38f8f/go.mod h1:VZxzAIRPHRVNRKRo6AXrX9BJegn6il06VMTZVJYCIjQ= -github.com/dgraph-io/badger v1.6.0-rc1/go.mod h1:zwt7syl517jmP8s94KqSxTlM6IMsdhYy6psNgSztDR4= -github.com/dgraph-io/badger v1.6.0/go.mod h1:zwt7syl517jmP8s94KqSxTlM6IMsdhYy6psNgSztDR4= -github.com/dgraph-io/badger v1.6.1/go.mod h1:FRmFw3uxvcpa8zG3Rxs0th+hCLIuaQg8HlNV5bjgnuU= -github.com/dgraph-io/badger/v2 v2.2007.3/go.mod h1:26P/7fbL4kUZVEVKLAKXkBXKOydDmM2p1e+NhhnBCAE= github.com/dgraph-io/badger/v2 v2.2007.4 h1:TRWBQg8UrlUhaFdco01nO2uXwzKS7zd+HVdwV/GHc4o= github.com/dgraph-io/badger/v2 v2.2007.4/go.mod h1:vSw/ax2qojzbN6eXHIx6KPKtCSHJN/Uz0X0VPruTIhk= -github.com/dgraph-io/ristretto v0.0.2/go.mod h1:KPxhHT9ZxKefz+PCeOGsrHpl1qZ7i70dGTu2u+Ahh6E= -github.com/dgraph-io/ristretto v0.0.3-0.20200630154024-f66de99634de h1:t0UHb5vdojIDUqktM6+xJAfScFBsVpXZmqC9dsgJmeA= github.com/dgraph-io/ristretto v0.0.3-0.20200630154024-f66de99634de/go.mod h1:KPxhHT9ZxKefz+PCeOGsrHpl1qZ7i70dGTu2u+Ahh6E= +github.com/dgraph-io/ristretto v0.1.0 h1:Jv3CGQHp9OjuMBSne1485aDpUkTKEcUqF+jm/LuerPI= +github.com/dgraph-io/ristretto v0.1.0/go.mod h1:fux0lOrBhrVCJd3lcTHsIJhq1T2rokOu6v9Vcb3Q9ug= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= -github.com/dgryski/go-farm v0.0.0-20190104051053-3adb47b1fb0f/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2 h1:tdlZCpZ/P9DhczCTSixgIKmwPv6+wP5DGjqLYw5SUiA= github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= -github.com/dlclark/regexp2 v1.2.0/go.mod h1:2pZnwuY/m+8K6iRw6wQdMtk+rH5tNGR1i55kozfMjCc= -github.com/docker/docker v1.4.2-0.20180625184442-8e610b2b55bf/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= -github.com/dop251/goja v0.0.0-20200219165308-d1232e640a87/go.mod h1:Mw6PkjjMXWbTj+nnj4s3QPXq1jaT0s5pC0iFD4+BOAA= github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= @@ -278,65 +298,89 @@ github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+m github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU= github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I= -github.com/edsrzf/mmap-go v0.0.0-20160512033002-935e0e8a636c/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M= github.com/edsrzf/mmap-go v1.0.0/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M= github.com/ef-ds/deque v1.0.4 h1:iFAZNmveMT9WERAkqLJ+oaABF9AcVQ5AjXem/hroniI= github.com/ef-ds/deque v1.0.4/go.mod h1:gXDnTC3yqvBcHbq2lcExjtAcVrOnJCbMcZXmuj8Z4tg= -github.com/elastic/gosigar v0.8.1-0.20180330100440-37f05ff46ffa/go.mod h1:cdorVVzy1fhmEqmtgqkoE3bYtCfSCkVyjTyCIo22xvs= github.com/elastic/gosigar v0.12.0/go.mod h1:iXRIGg2tLnu7LBdpqzyQfGDEidKCfWcCMS0WKyPWoMs= -github.com/elastic/gosigar v0.14.2 h1:Dg80n8cr90OZ7x+bAax/QjoW/XqTI11RmA79ZwIm9/4= -github.com/elastic/gosigar v0.14.2/go.mod h1:iXRIGg2tLnu7LBdpqzyQfGDEidKCfWcCMS0WKyPWoMs= +github.com/elastic/gosigar v0.14.3 h1:xwkKwPia+hSfg9GqrCUKYdId102m9qTJIIr7egmK/uo= +github.com/elastic/gosigar v0.14.3/go.mod h1:iXRIGg2tLnu7LBdpqzyQfGDEidKCfWcCMS0WKyPWoMs= +github.com/emicklei/dot v1.6.2 h1:08GN+DD79cy/tzN6uLCT84+2Wk9u+wvqP+Hkx/dIR8A= +github.com/emicklei/dot v1.6.2/go.mod h1:DeV7GvQtIw4h2u73RKBkkFdvVAz0D9fzeJrgPW6gy/s= github.com/envoyproxy/go-control-plane v0.6.9/go.mod h1:SBwIajubJHhxtWwsL9s8ss4safvEdbitLhGGK48rN6g= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po= github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= -github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= -github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= +github.com/envoyproxy/go-control-plane v0.13.4 h1:zEqyPVyku6IvWCFwux4x9RxkLOMUL+1vC9xUFv5l2/M= +github.com/envoyproxy/go-control-plane v0.13.4/go.mod h1:kDfuBlDVsSj2MjrLEtRWtHlsWIFcGyB2RMO44Dc5GZA= +github.com/envoyproxy/go-control-plane/envoy v1.32.4 h1:jb83lalDRZSpPWW2Z7Mck/8kXZ5CQAFYVjQcdVIr83A= +github.com/envoyproxy/go-control-plane/envoy v1.32.4/go.mod h1:Gzjc5k8JcJswLjAx1Zm+wSYE20UrLtt7JZMWiWQXQEw= +github.com/envoyproxy/go-control-plane/ratelimit v0.1.0 h1:/G9QYbddjL25KvtKTv3an9lx6VBE2cnb8wp1vEGNYGI= +github.com/envoyproxy/go-control-plane/ratelimit v0.1.0/go.mod h1:Wk+tMFAFbCXaJPzVVHnPgRKdUdwW/KdbRt94AzgRee4= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/ethereum/go-ethereum v1.9.13 h1:rOPqjSngvs1VSYH2H+PMPiWt4VEulvNRbFgqiGqJM3E= -github.com/ethereum/go-ethereum v1.9.13/go.mod h1:qwN9d1GLyDh0N7Ab8bMGd0H9knaji2jOBm2RrMGjXls= -github.com/fatih/color v1.3.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= +github.com/envoyproxy/protoc-gen-validate v1.2.1 h1:DEo3O99U8j4hBFwbJfrz9VtgcDfUKS7KJ7spH3d86P8= +github.com/envoyproxy/protoc-gen-validate v1.2.1/go.mod h1:d/C80l/jxXLdfEIhX1W2TmLfsJ31lvEjwamM4DxlWXU= +github.com/ethereum/c-kzg-4844/v2 v2.1.0 h1:gQropX9YFBhl3g4HYhwE70zq3IHFRgbbNPw0Shwzf5w= +github.com/ethereum/c-kzg-4844/v2 v2.1.0/go.mod h1:TC48kOKjJKPbN7C++qIgt0TJzZ70QznYR7Ob+WXl57E= +github.com/ethereum/go-ethereum v1.16.3 h1:nDoBSrmsrPbrDIVLTkDQCy1U9KdHN+F2PzvMbDoS42Q= +github.com/ethereum/go-ethereum v1.16.3/go.mod h1:Lrsc6bt9Gm9RyvhfFK53vboCia8kpF9nv+2Ukntnl+8= +github.com/ethereum/go-verkle v0.2.2 h1:I2W0WjnrFUIzzVPwm8ykY+7pL2d4VhlsePn4j7cnFk8= +github.com/ethereum/go-verkle v0.2.2/go.mod h1:M3b90YRnzqKyyzBEWJGqj8Qff4IDeXnzFw0P9bFw3uk= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= -github.com/fjl/memsize v0.0.0-20180418122429-ca190fb6ffbc/go.mod h1:VvhXpOYNQvB+uIk2RvXzuaQtkQJzzIx6lSBe1xv7hi0= +github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= +github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= +github.com/ferranbt/fastssz v0.1.4 h1:OCDB+dYDEQDvAgtAGnTSidK1Pe2tW3nFV40XyMkTeDY= +github.com/ferranbt/fastssz v0.1.4/go.mod h1:Ea3+oeoRGGLGm5shYAeDgu6PGUlcvQhE2fILyD9+tGg= +github.com/filecoin-project/go-clock v0.1.0 h1:SFbYIM75M8NnFm1yMHhN9Ahy3W5bEZV9gd6MPfXbKVU= +github.com/filecoin-project/go-clock v0.1.0/go.mod h1:4uB/O4PvOjlx1VCMdZ9MyDZXRm//gkj1ELEbxfI1AZs= github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc= -github.com/flynn/noise v0.0.0-20180327030543-2492fe189ae6/go.mod h1:1i71OnUq3iUe1ma7Lr6yG6/rjvM3emb6yoL7xLFzcVQ= -github.com/flynn/noise v1.0.0 h1:DlTHqmzmvcEiKj+4RYo/imoswx/4r6iBlCMfVtrMXpQ= -github.com/flynn/noise v1.0.0/go.mod h1:xbMo+0i6+IGbYdJhF31t2eR1BIU0CYc12+BNAKwUTag= +github.com/flynn/noise v1.1.0 h1:KjPQoQCEFdZDiP03phOvGi11+SVVhBG2wOWAorLsstg= +github.com/flynn/noise v1.1.0/go.mod h1:xbMo+0i6+IGbYdJhF31t2eR1BIU0CYc12+BNAKwUTag= github.com/francoispqt/gojay v1.2.13 h1:d2m3sFjloqoIUQU3TsHBgj6qg/BVGlTBeHDUmyJnXKk= github.com/francoispqt/gojay v1.2.13/go.mod h1:ehT5mTG4ua4581f1++1WLG0vPdaA9HaiDsoyrBGkyDY= github.com/franela/goblin v0.0.0-20200105215937-c9ffbefa60db/go.mod h1:7dvUGVsVBjqR7JHJk0brhHOZYGmfBYOrK0ZhYMEtBr4= github.com/franela/goreq v0.0.0-20171204163338-bcd34c9993f8/go.mod h1:ZhphrRTfi2rbfLwlschooIH4+wKKDR4Pdxhh+TRoA20= -github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k= -github.com/frankban/quicktest v1.14.0/go.mod h1:NeW+ay9A/U67EYXNFA1nPE8e/tnQv/09mUdL/ijj8og= -github.com/frankban/quicktest v1.14.3 h1:FJKSZTDHjyhriyC81FLQ0LY93eSai0ZyR/ZIkd3ZUKE= +github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8= +github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= -github.com/fsnotify/fsnotify v1.5.4 h1:jRbGcIw6P2Meqdwuo0H1p6JVLbL5DHKAKlYndzMwVZI= -github.com/fsnotify/fsnotify v1.5.4/go.mod h1:OVB6XrOHzAwXMpEM7uPOzcehqUV2UqJxmVXmkdnm1bU= -github.com/fxamacker/cbor/v2 v2.4.1-0.20220515183430-ad2eae63303f h1:dxTR4AaxCwuQv9LAVTAC2r1szlS+epeuPT5ClLKT6ZY= -github.com/fxamacker/cbor/v2 v2.4.1-0.20220515183430-ad2eae63303f/go.mod h1:TA1xS00nchWmaBnEIxPSE5oHLuJBAVvqrtAnWBwBCVo= +github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY= +github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw= +github.com/fxamacker/cbor/v2 v2.8.1-0.20250402194037-6f932b086829 h1:qOglMkJ5YBwog/GU/NXhP9gFqxUGMuqnmCkbj65JMhk= +github.com/fxamacker/cbor/v2 v2.8.1-0.20250402194037-6f932b086829/go.mod h1:vM4b+DJCtHn+zz7h3FFp/hDAI9WNWCsZj23V5ytsSxQ= github.com/fxamacker/circlehash v0.3.0 h1:XKdvTtIJV9t7DDUtsf0RIpC1OcxZtPbmgIH7ekx28WA= github.com/fxamacker/circlehash v0.3.0/go.mod h1:3aq3OfVvsWtkWMb6A1owjOQFA+TLsD5FgJflnaQwtMM= -github.com/gammazero/deque v0.1.0 h1:f9LnNmq66VDeuAlSAapemq/U7hJ2jpIWa4c09q8Dlik= -github.com/gammazero/deque v0.1.0/go.mod h1:KQw7vFau1hHuM8xmI9RbgKFbAsQFWmBpqQ2KenFLk6M= -github.com/gammazero/workerpool v1.1.2 h1:vuioDQbgrz4HoaCi2q1HLlOXdpbap5AET7xu5/qj87g= -github.com/gammazero/workerpool v1.1.2/go.mod h1:UelbXcO0zCIGFcufcirHhq2/xtLXJdQ29qZNlXG9OjQ= -github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff/go.mod h1:x7DCsMOv1taUwEWCzT4cmDeAkigA5/QCwUodaVOe8Ww= -github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk= +github.com/fxamacker/golang-lru/v2 v2.0.0-20250430153159-6f72f038a30f h1:/gqGg2NQVvwiLXs7ppw2uneC5AAd2Z9OTp0zgu42zNI= +github.com/fxamacker/golang-lru/v2 v2.0.0-20250430153159-6f72f038a30f/go.mod h1:qnbgnNzfydwuHjSCApF4bdul+tZ8T3y1MkZG/OFczLA= +github.com/fxamacker/golang-lru/v2 v2.0.0-20250716153046-22c8d17dc4ee h1:9RFHOj6xUdQRi1lz/BJXwi0IloXtv6Y2tp7rdSC7SQk= +github.com/fxamacker/golang-lru/v2 v2.0.0-20250716153046-22c8d17dc4ee/go.mod h1:1FYBKLDzpfjjoWMTK1cIOxsTomg/n35DWNLu6FoYEb8= +github.com/gabriel-vasile/mimetype v1.4.6 h1:3+PzJTKLkvgjeTbts6msPJt4DixhT4YtFNf1gtGe3zc= +github.com/gabriel-vasile/mimetype v1.4.6/go.mod h1:JX1qVKqZd40hUPpAfiNTe0Sne7hdfKSbOqqmkq8GCXc= +github.com/gammazero/deque v1.0.0 h1:LTmimT8H7bXkkCy6gZX7zNLtkbz4NdS2z8LZuor3j34= +github.com/gammazero/deque v1.0.0/go.mod h1:iflpYvtGfM3U8S8j+sZEKIak3SAKYpA5/SQewgfXDKo= +github.com/gammazero/workerpool v1.1.3 h1:WixN4xzukFoN0XSeXF6puqEqFTl2mECI9S6W44HWy9Q= +github.com/gammazero/workerpool v1.1.3/go.mod h1:wPjyBLDbyKnUn2XwwyD3EEwo9dHutia9/fwNmSHWACc= +github.com/getsentry/sentry-go v0.27.0 h1:Pv98CIbtB3LkMWmXi4Joa5OOcwbmnX88sF5qbK3r3Ps= +github.com/getsentry/sentry-go v0.27.0/go.mod h1:lc76E2QywIyW8WuBnwl8Lc4bkmQH4+w1gwTf25trprY= +github.com/ghemawat/stream v0.0.0-20171120220530-696b145b53b9 h1:r5GgOLGbza2wVHRzK7aAj6lWZjfbAwiu/RDCVOKjRyM= +github.com/ghemawat/stream v0.0.0-20171120220530-696b145b53b9/go.mod h1:106OIgooyS7OzLDOpUGgm9fA3bQENb/cFSyyBmMoJDs= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/gin-contrib/sse v0.1.0 h1:Y/yl/+YNO8GZSjAhjMsSuLt29uWRFHdHYUb5lYOV9qE= github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI= github.com/gin-gonic/gin v1.5.0/go.mod h1:Nd6IXA8m5kNZdNEHMBd93KT+mdY3+bewLgRvmCsR2Do= github.com/gin-gonic/gin v1.6.3/go.mod h1:75u5sXoLsGZoRN5Sgbi1eraJ4GU3++wFwWzhwvtwp4M= -github.com/gin-gonic/gin v1.7.4 h1:QmUZXrvJ9qZ3GfWvQ+2wnW/1ePrTEJqPKMYEU3lD/DM= +github.com/gin-gonic/gin v1.9.1 h1:4idEAncQnU5cB7BeOkPtxjfCSye0AAm1R0RVIqJ+Jmg= +github.com/gin-gonic/gin v1.9.1/go.mod h1:hPrL7YrpYKXt5YId3A/Tnip5kqbEAP+KLuI3SUcPTeU= github.com/gliderlabs/ssh v0.1.1/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0= -github.com/go-check/check v0.0.0-20180628173108-788fd7840127/go.mod h1:9ES+weclKsC9YodN5RgxqK/VD9HM9JsCSh7rNhMZE98= github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q= +github.com/go-errors/errors v1.4.2 h1:J6MZopCL4uSllY1OfXM374weqZFFItUbrImctkmUxIA= +github.com/go-errors/errors v1.4.2/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-jose/go-jose/v4 v4.1.1 h1:JYhSgy4mXXzAdF3nUx3ygx347LRXJRrpgyU3adRmkAI= +github.com/go-jose/go-jose/v4 v4.1.1/go.mod h1:BdsZGqgdO3b6tTc6LSE56wcDbMMLuPsw5d4ZD5f94kA= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.10.0/go.mod h1:xUsJbQ/Fp4kEt7AFgCuvyX4a71u8h9jB8tj/ORgOZ7o= @@ -347,57 +391,63 @@ github.com/go-kit/log v0.2.1/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBj github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= -github.com/go-logfmt/logfmt v0.5.1 h1:otpy5pqBCBZ1ng9RQ0dPu4PN7ba75Y/aA+UpowDyNVA= -github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= +github.com/go-logfmt/logfmt v0.6.0 h1:wGYYu3uicYdqXVgoYbvnkrPVXkuLM1p1ifugDMEdRi4= +github.com/go-logfmt/logfmt v0.6.0/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0= -github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI= +github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= -github.com/go-ole/go-ole v1.2.1/go.mod h1:7FAglXiTm7HKlQRDeOQ6ZNUHidzCWXuZWq/1dTyBNF8= -github.com/go-ole/go-ole v1.2.6 h1:/Fpf6oFPoeFik9ty7siob0G6Ke8QvQEuVcuChpwXzpY= +github.com/go-ole/go-ole v1.2.5/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= +github.com/go-ole/go-ole v1.3.0 h1:Dt6ye7+vXGIKZ7Xtk4s6/xVdGDQynvom7xCFEdWr6uE= +github.com/go-ole/go-ole v1.3.0/go.mod h1:5LS6F96DhAwUc7C+1HLexzMXY1xGRSryjyPPKW6zv78= github.com/go-playground/assert/v2 v2.0.1/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4= +github.com/go-playground/assert/v2 v2.2.0 h1:JvknZsQTYeFEAhQwI4qEt9cyV5ONwRHC+lYKSsYSR8s= +github.com/go-playground/assert/v2 v2.2.0/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4= github.com/go-playground/locales v0.12.1/go.mod h1:IUMDtCfWo/w/mtMfIE/IG2K+Ey3ygWanZIBtBW0W2TM= -github.com/go-playground/locales v0.13.0 h1:HyWk6mgj5qFqCT5fjGBuRArbVDfE4hi8+e8ceBS/t7Q= github.com/go-playground/locales v0.13.0/go.mod h1:taPMhCMXrRLJO55olJkUXHZBHCxTMfnGwq/HNwmWNS8= +github.com/go-playground/locales v0.14.1 h1:EWaQ/wswjilfKLTECiXz7Rh+3BjFhfDFKv/oXslEjJA= +github.com/go-playground/locales v0.14.1/go.mod h1:hxrqLVvrK65+Rwrd5Fc6F2O76J/NuW9t0sjnWqG1slY= github.com/go-playground/universal-translator v0.16.0/go.mod h1:1AnU7NaIRDWWzGEKwgtJRd2xk99HeFyHw3yid4rvQIY= -github.com/go-playground/universal-translator v0.17.0 h1:icxd5fm+REJzpZx7ZfpaD876Lmtgy7VtROAbHHXk8no= github.com/go-playground/universal-translator v0.17.0/go.mod h1:UkSxE5sNxxRwHyU+Scu5vgOQjsIJAF8j9muTVoKLVtA= +github.com/go-playground/universal-translator v0.18.1 h1:Bcnm0ZwsGyWbCzImXv+pAJnYK9S473LQFuzCbDbfSFY= +github.com/go-playground/universal-translator v0.18.1/go.mod h1:xekY+UJKNuX9WP91TpwSH2VMlDf28Uj24BCp08ZFTUY= github.com/go-playground/validator/v10 v10.2.0/go.mod h1:uOYAAleCW8F/7oMFd6aG0GOhaH6EGOAJShg8Id5JGkI= -github.com/go-playground/validator/v10 v10.4.1 h1:pH2c5ADXtd66mxoE0Zm9SUhxE20r7aM3F26W0hOn+GE= -github.com/go-sourcemap/sourcemap v2.1.2+incompatible/go.mod h1:F8jJfvm2KbVjc5NqelyYJmf/v5J0dwNLS2mL4sNA1Jg= +github.com/go-playground/validator/v10 v10.19.0 h1:ol+5Fu+cSq9JD7SoSqe04GMI92cbn0+wvQ3bZ8b/AU4= +github.com/go-playground/validator/v10 v10.19.0/go.mod h1:dbuPbCMFw/DrkbEynArYaCwl3amGuJotoKCe95atGMM= github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= -github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0 h1:p104kn46Q8WdvHunIJ9dAyjPVtrBPhSr3KT2yUst43I= -github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= -github.com/go-test/deep v1.0.8 h1:TDsG77qcSprGbC6vTN8OuXp5g+J+b5Pcguhf7Zt61VM= -github.com/go-test/deep v1.0.8/go.mod h1:5C2ZWiW0ErCdrYzpqxLbTX7MG14M9iiw8DgHncVwcsE= +github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= +github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= +github.com/go-yaml/yaml v2.1.0+incompatible/go.mod h1:w2MrLa16VYP0jy6N7M5kHaCkaLENm+P+Tv+MfurjSw0= github.com/gobwas/httphead v0.0.0-20180130184737-2c6c146eadee h1:s+21KNqlpePfkah2I+gwHF8xmJWRjooY+5248k6m4A0= github.com/gobwas/httphead v0.0.0-20180130184737-2c6c146eadee/go.mod h1:L0fX3K22YWvt/FAX9NnzrNzcI4wNYi9Yku4O0LKYflo= github.com/gobwas/pool v0.2.0 h1:QEmUOlnSjWtnpRGHF3SauEiOsy82Cup83Vf2LcMlnc8= github.com/gobwas/pool v0.2.0/go.mod h1:q8bcK0KcYlCgd9e7WYLm9LpyS+YeLd8JVDW6WezmKEw= github.com/gobwas/ws v1.0.2 h1:CoAavW/wd/kulfZmSIBt6p24n4j7tHgNVCjsfHVNUbo= github.com/gobwas/ws v1.0.2/go.mod h1:szmBTxLgaFppYjEmNtny/v3w89xOydFnnZMcgRRu/EM= +github.com/goccy/go-json v0.10.4 h1:JSwxQzIqKfmFX1swYPpUThQZp/Ka4wzJdK0LWVytLPM= +github.com/goccy/go-json v0.10.4/go.mod h1:oq7eo15ShAhp70Anwd5lgX2pLfOS3QCiwU/PULtXL6M= github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/godbus/dbus/v5 v5.1.0 h1:4KLkAxT3aOY8Li4FRJe/KvhoNFFxo0m6fNuFUO8QJUk= github.com/godbus/dbus/v5 v5.1.0/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/gofrs/flock v0.12.1 h1:MTLVXXHf8ekldpJk3AKicLij9MdwOWkZ+a/jHHZby9E= +github.com/gofrs/flock v0.12.1/go.mod h1:9zxTsyu5xtJ9DK+1tFZyibEV7y3uwDxPPfbxeeHCoD0= github.com/gogo/googleapis v1.1.0/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= -github.com/gogo/protobuf v1.3.0/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/glog v1.0.0 h1:nfP3RFugxnNRyKgeWd4oI1nYvXpxrx8ck8ZrcizshdQ= -github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4= +github.com/golang/glog v1.2.5 h1:DrW6hGnjIhtvhOIiAKT6Psh/Kd/ldepEa81DKeiRJ5I= +github.com/golang/glog v1.2.5/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w= github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20191027212112-611e8accdfc9/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= @@ -413,9 +463,7 @@ github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71 github.com/golang/mock v1.6.0 h1:ErTB+efbowRARo13NNdxyJji2egdxLGQhRaY+DUumQc= github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.0/go.mod h1:Qd/q+1AKNOZr9uGQzbzCmRO6sUih6GTPZv6a1/R87v0= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.2-0.20190517061210-b285ee9cfc6c/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= @@ -429,13 +477,14 @@ github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QD github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= -github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= +github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golang/snappy v0.0.5-0.20231225225746-43d5d4cd4e0e h1:4bw4WeyTYPp0smaXiJZCNnLrvVBqirQVreixayXezGc= +github.com/golang/snappy v0.0.5-0.20231225225746-43d5d4cd4e0e/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= @@ -451,20 +500,21 @@ github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= -github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= -github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= -github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= +github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ= github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/gopacket v1.1.17/go.mod h1:UdDNZ1OO62aGYVnPhxT1U6aI7ukYtA/kB8vaU0diBUM= +github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= +github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gopacket v1.1.19 h1:ves8RnFZPGiFnTS0uPQStjwru6uO6h+nlr9j6fL7kF8= github.com/google/gopacket v1.1.19/go.mod h1:iJ8V8n6KS+z2U1A8pUwu8bW5SyEMkXJB8Yo/Vo+TKTo= github.com/google/martian v2.1.0+incompatible h1:/CP5g8u/VJHijgedC/Legn3BAbAaWPgecwXBIDzw5no= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= -github.com/google/martian/v3 v3.3.2 h1:IqNFLAmvJOgVlpdEBiQbDc2EwKW77amAycfTuWKdfvw= +github.com/google/martian/v3 v3.3.3 h1:DIhPTQrbPkgs2yJYdXU/eNACCG5DVQjySNRNlflZ9Fc= +github.com/google/martian/v3 v3.3.3/go.mod h1:iEPrYcgCF7jA9OtScMFQyAlZZ4YXTKEtJ1E6RWzmBA0= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= @@ -475,57 +525,51 @@ github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hf github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20201218002935-b9804c9f04c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20221219190121-3cb0bae90811 h1:wORs2YN3R3ona/CXYuTvLM31QlgoNKHvlCNuArCDDCU= -github.com/google/pprof v0.0.0-20221219190121-3cb0bae90811/go.mod h1:dDKJzRmX4S37WGHujM7tX//fmj1uioxKzKxz3lo4HJo= +github.com/google/pprof v0.0.0-20241210010833-40e02aabc2ad h1:a6HEuzUHeKH6hwfN/ZoQgRgVIWFJljSWa/zetS2WTvg= +github.com/google/pprof v0.0.0-20241210010833-40e02aabc2ad/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/google/s2a-go v0.1.9 h1:LGD7gtMgezd8a/Xak7mEWL0PjoTQFvpRudN895yqKW0= +github.com/google/s2a-go v0.1.9/go.mod h1:YA0Ei2ZQL3acow2O62kdp9UlnvMmU7kA6Eutn0dXayM= github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= -github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/googleapis/enterprise-certificate-proxy v0.2.3 h1:yk9/cqRKtT9wXZSsRH9aurXEpJX+U6FLtpYTdC3R06k= -github.com/googleapis/enterprise-certificate-proxy v0.2.3/go.mod h1:AwSRAtLfXpU5Nm3pW+v7rGDHp09LsPtGY9MduiEsR9k= +github.com/google/uuid v1.3.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/enterprise-certificate-proxy v0.3.6 h1:GW/XbdyBFQ8Qe+YAmFU9uHLo7OnF5tL52HFAgMmyrf4= +github.com/googleapis/enterprise-certificate-proxy v0.3.6/go.mod h1:MkHOF77EYAE7qfSuSS9PU6g4Nt4e11cnsDUowfwewLA= github.com/googleapis/gax-go v2.0.0+incompatible/go.mod h1:SFVmujtThgffbyetf+mdk2eWhX2bMyUtNHzFKcPA9HY= github.com/googleapis/gax-go/v2 v2.0.3/go.mod h1:LLvjysVCY1JZeum8Z6l8qUty8fiNwE08qbEPm1M08qg= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= -github.com/googleapis/gax-go/v2 v2.7.1 h1:gF4c0zjUP2H/s/hEGyLA3I0fA2ZWjzYiONAD6cvPr8A= -github.com/googleapis/gax-go/v2 v2.7.1/go.mod h1:4orTrqY6hXxxaUL4LHIPl6lGo8vAE38/qKbhSAKP6QI= +github.com/googleapis/gax-go/v2 v2.15.0 h1:SyjDc1mGgZU5LncH8gimWo9lW1DtIfPibOG81vgd/bo= +github.com/googleapis/gax-go/v2 v2.15.0/go.mod h1:zVVkkxAQHa1RQpg9z2AUCMnKhi0Qld9rcmyfL1OZhoc= github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g= -github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/gopherjs/gopherjs v0.0.0-20190430165422-3e4dfb77656c h1:7lF+Vz0LqiRidnzC1Oq86fpX1q/iEv2KJdrCtttYjT4= +github.com/gopherjs/gopherjs v0.0.0-20190430165422-3e4dfb77656c/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= -github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= -github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= +github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY= +github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ= github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= -github.com/gorilla/websocket v1.4.1-0.20190629185528-ae1634f6a989/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= github.com/gorilla/websocket v1.4.1/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= -github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= -github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc= -github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= -github.com/graph-gophers/graphql-go v0.0.0-20191115155744-f33e81362277/go.mod h1:9CQHMSxwO4MprSdzoIEobiHpoLtHm77vfxsvsIN5Vuc= +github.com/gorilla/websocket v1.5.3 h1:saDtZ6Pbx/0u+bgYQ3q96pZgCzfhKXGPqt7kZ72aNNg= +github.com/gorilla/websocket v1.5.3/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= github.com/grpc-ecosystem/go-grpc-middleware v1.2.2/go.mod h1:EaizFBKfUKtMIF5iaDEhniwNedqGo9FuLFzppDr3uwI= -github.com/grpc-ecosystem/go-grpc-middleware/providers/zerolog/v2 v2.0.0-rc.2 h1:uxUHSMwWDJ/9jVPHNumRC8WZOi3hrBL22ObVOoLg4ww= -github.com/grpc-ecosystem/go-grpc-middleware/providers/zerolog/v2 v2.0.0-rc.2/go.mod h1:BL7w7qd2l/j9jgY6WMhYutfOFQc0I8RTVwtjpnAMoTM= -github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.0.0-20200501113911-9a95f0fdbfea h1:1Tk1IbruXbunEnaIZEFb+Hpv9BIZti3OxKwKn5wWyKk= -github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.0.0-20200501113911-9a95f0fdbfea/go.mod h1:GugMBs30ZSAkckqXEAIEGyYdDH6EgqowG8ppA3Zt+AY= +github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.1.0 h1:pRhl55Yx1eC7BZ1N+BBWwnKaMyD8uC+34TLdndZMAKk= +github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.1.0/go.mod h1:XKMd7iuf/RGPSMJ/U4HP0zS2Z9Fh8Ps9a+6X26m/tmI= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= github.com/grpc-ecosystem/grpc-gateway v1.5.0/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw= github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= -github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0 h1:BZHcxBETFHIdVyhyEfOvn/RdU/QGdLI4y34qQGjGWO0= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0/go.mod h1:hgWBS7lorOAVIJEQMi4ZsPv9hVvWI6+ch50m39Pf2Ks= -github.com/gxed/hashland/keccakpg v0.0.1/go.mod h1:kRzw3HkwxFU1mpmPP8v1WyQzwdGfmKFJ6tItnhQ67kU= -github.com/gxed/hashland/murmur3 v0.0.1/go.mod h1:KjXop02n4/ckmZSnY2+HKcLud/tcmvhST0bie/0lS48= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.22.0 h1:asbCHRVmodnJTuQ3qamDwqVOIjwqUPTYmYuemVOx+Ys= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.22.0/go.mod h1:ggCgvZ2r7uOoQjOyu2Y1NhHmEPPzzuhWgcza5M1Ji1I= github.com/hashicorp/consul/api v1.3.0/go.mod h1:MmDNSzIMUjNpY/mQ398R4bk2FnqQLoPndWW5VkKPlCE= github.com/hashicorp/consul/sdk v0.3.0/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= @@ -544,507 +588,212 @@ github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/b github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90= -github.com/hashicorp/golang-lru v0.0.0-20160813221303-0a025b7e63ad/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc= -github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= +github.com/hashicorp/golang-lru v1.0.2 h1:dV3g9Z/unq5DpblPpw+Oqcv4dU/1omnb4Ok8iPY6p1c= +github.com/hashicorp/golang-lru v1.0.2/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ= github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= +github.com/holiman/bloomfilter/v2 v2.0.3 h1:73e0e/V0tCydx14a0SCYS/EWCxgwLZ18CZcZKVu0fao= +github.com/holiman/bloomfilter/v2 v2.0.3/go.mod h1:zpoh+gs7qcpqrHr3dB55AMiJwo0iURXE7ZOP9L9hSkA= +github.com/holiman/uint256 v1.3.2 h1:a9EgMPSC1AAaj1SZL5zIQD3WbwTuHrMGOerLjGmM/TA= +github.com/holiman/uint256 v1.3.2/go.mod h1:EOMSn4q6Nyt9P6efbI3bueV4e1b3dGlUCXeiRV4ng7E= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/huandu/go-assert v1.1.5 h1:fjemmA7sSfYHJD7CUqs9qTwwfdNAx7/j2/ZlHXzNB3c= +github.com/huandu/go-assert v1.1.5/go.mod h1:yOLvuqZwmcHIC5rIzrBhT7D3Q9c3GFnd0JrPVhn/06U= +github.com/huandu/go-clone v1.7.2 h1:3+Aq0Ed8XK+zKkLjE2dfHg0XrpIfcohBE1K+c8Usxoo= +github.com/huandu/go-clone v1.7.2/go.mod h1:ReGivhG6op3GYr+UY3lS6mxjKp7MIGTknuU5TbTVaXE= +github.com/huandu/go-clone/generic v1.7.2 h1:47pQphxs1Xc9cVADjOHN+Bm5D0hNagwH9UXErbxgVKA= +github.com/huandu/go-clone/generic v1.7.2/go.mod h1:xgd9ZebcMsBWWcBx5mVMCoqMX24gLWr5lQicr+nVXNs= github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmKTg= -github.com/huin/goupnp v0.0.0-20161224104101-679507af18f3/go.mod h1:MZ2ZmwcBpvOoJ22IJsc7va19ZwoheaBk43rKg12SKag= -github.com/huin/goupnp v1.0.0/go.mod h1:n9v9KO1tAxYH82qOn+UTIFQDmx5n1Zxd/ClZDMX7Bnc= -github.com/huin/goupnp v1.0.3 h1:N8No57ls+MnjlB+JPiCVSOyy/ot7MJTqlo7rn+NYSqQ= -github.com/huin/goupnp v1.0.3/go.mod h1:ZxNlw5WqJj6wSsRK5+YfflQGXYfccj5VgQsMNixHM7Y= -github.com/huin/goutil v0.0.0-20170803182201-1ca381bf3150/go.mod h1:PpLOETDnJ0o3iZrZfqZzyLl6l7F3c6L1oWn7OICBi6o= +github.com/huin/goupnp v1.3.0 h1:UvLUlWDNpoUdYzb2TCn+MuTWtcjXKSza2n6CBdQ0xXc= +github.com/huin/goupnp v1.3.0/go.mod h1:gnGPsThkYa7bFi/KWmEysQRf48l2dvR5bxr2OFckNX8= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/improbable-eng/grpc-web v0.15.0 h1:BN+7z6uNXZ1tQGcNAuaU1YjsLTApzkjt2tzCixLaUPQ= github.com/improbable-eng/grpc-web v0.15.0/go.mod h1:1sy9HKV4Jt9aEs9JSnkWlRJPuPtwNr0l57L4f878wP8= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= -github.com/inconshreveable/mousetrap v1.0.1 h1:U3uMjPSQEBMNp1lFxmllqCPM6P5u/Xq7Pgzkat/bFNc= -github.com/inconshreveable/mousetrap v1.0.1/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= -github.com/influxdata/influxdb v1.2.3-0.20180221223340-01288bdb0883/go.mod h1:qZna6X/4elxqT3yI9iZYdZrWWdeFOOprn86kgg4+IzY= +github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= +github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/influxdata/influxdb1-client v0.0.0-20191209144304-8bf82d3c094d/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo= -github.com/ipfs/bbloom v0.0.1/go.mod h1:oqo8CVWsJFMOZqTglBG4wydCE4IQA/G2/SEofB0rjUI= github.com/ipfs/bbloom v0.0.4 h1:Gi+8EGJ2y5qiD5FbsbpX/TMNcJw8gSqr7eyjHa4Fhvs= github.com/ipfs/bbloom v0.0.4/go.mod h1:cS9YprKXpoZ9lT0n/Mw/a6/aFV6DTjTLYHeA+gyqMG0= -github.com/ipfs/go-bitswap v0.1.8/go.mod h1:TOWoxllhccevbWFUR2N7B1MTSVVge1s6XSMiCSA4MzM= -github.com/ipfs/go-bitswap v0.3.4/go.mod h1:4T7fvNv/LmOys+21tnLzGKncMeeXUYUd1nUiJ2teMvI= -github.com/ipfs/go-bitswap v0.5.0/go.mod h1:WwyyYD33RHCpczgHjpx+xjWYIy8l41K+l5EMy4/ctSM= -github.com/ipfs/go-bitswap v0.9.0 h1:/dZi/XhUN/aIk78pI4kaZrilUglJ+7/SCmOHWIpiy8E= -github.com/ipfs/go-block-format v0.0.1/go.mod h1:DK/YYcsSUIVAFNwo/KZCdIIbpN0ROH/baNLgayt4pFc= -github.com/ipfs/go-block-format v0.0.2/go.mod h1:AWR46JfpcObNfg3ok2JHDUfdiHRgWhJgCQF+KIgOPJY= -github.com/ipfs/go-block-format v0.0.3 h1:r8t66QstRp/pd/or4dpnbVfXT5Gt7lOqRvC+/dDTpMc= -github.com/ipfs/go-block-format v0.0.3/go.mod h1:4LmD4ZUw0mhO+JSKdpWwrzATiEfM7WWgQ8H5l6P8MVk= -github.com/ipfs/go-blockservice v0.1.4/go.mod h1:OTZhFpkgY48kNzbgyvcexW9cHrpjBYIjSR0KoDOFOLU= -github.com/ipfs/go-blockservice v0.2.0/go.mod h1:Vzvj2fAnbbyly4+T7D5+p9n3+ZKVHA2bRMMo1QoILtQ= -github.com/ipfs/go-blockservice v0.4.0 h1:7MUijAW5SqdsqEW/EhnNFRJXVF8mGU5aGhZ3CQaCWbY= -github.com/ipfs/go-blockservice v0.4.0/go.mod h1:kRjO3wlGW9mS1aKuiCeGhx9K1DagQ10ACpVO59qgAx4= -github.com/ipfs/go-cid v0.0.1/go.mod h1:GHWU/WuQdMPmIosc4Yn1bcCT7dSeX4lBafM7iqUPQvM= -github.com/ipfs/go-cid v0.0.2/go.mod h1:GHWU/WuQdMPmIosc4Yn1bcCT7dSeX4lBafM7iqUPQvM= -github.com/ipfs/go-cid v0.0.3/go.mod h1:GHWU/WuQdMPmIosc4Yn1bcCT7dSeX4lBafM7iqUPQvM= -github.com/ipfs/go-cid v0.0.4/go.mod h1:4LLaPOQwmk5z9LBgQnpkivrx8BJjUyGwTXCd5Xfj6+M= -github.com/ipfs/go-cid v0.0.5/go.mod h1:plgt+Y5MnOey4vO4UlUazGqdbEXuFYitED67FexhXog= -github.com/ipfs/go-cid v0.0.6/go.mod h1:6Ux9z5e+HpkQdckYoX1PG/6xqKspzlEIR5SDmgqgC/I= +github.com/ipfs/go-block-format v0.2.0 h1:ZqrkxBA2ICbDRbK8KJs/u0O3dlp6gmAuuXUJNiW1Ycs= +github.com/ipfs/go-block-format v0.2.0/go.mod h1:+jpL11nFx5A/SPpsoBn6Bzkra/zaArfSmsknbPMYgzM= github.com/ipfs/go-cid v0.0.7/go.mod h1:6Ux9z5e+HpkQdckYoX1PG/6xqKspzlEIR5SDmgqgC/I= -github.com/ipfs/go-cid v0.1.0/go.mod h1:rH5/Xv83Rfy8Rw6xG+id3DYAMUVmem1MowoKwdXmN2o= -github.com/ipfs/go-cid v0.3.2 h1:OGgOd+JCFM+y1DjWPmVH+2/4POtpDzwcr7VgnB7mZXc= -github.com/ipfs/go-cid v0.3.2/go.mod h1:gQ8pKqT/sUxGY+tIwy1RPpAojYu7jAyCp5Tz1svoupw= -github.com/ipfs/go-cidutil v0.0.2/go.mod h1:ewllrvrxG6AMYStla3GD7Cqn+XYSLqjK0vc+086tB6s= +github.com/ipfs/go-cid v0.4.1 h1:A/T3qGvxi4kpKWWcPC/PgbvDA2bjVLO7n4UeVwnbs/s= +github.com/ipfs/go-cid v0.4.1/go.mod h1:uQHwDeX4c6CtyrFwdqyhpNcxVewur1M7l7fNU7LKwZk= github.com/ipfs/go-cidutil v0.1.0 h1:RW5hO7Vcf16dplUU60Hs0AKDkQAVPVplr7lk97CFL+Q= github.com/ipfs/go-cidutil v0.1.0/go.mod h1:e7OEVBMIv9JaOxt9zaGEmAoSlXW9jdFZ5lP/0PwcfpA= -github.com/ipfs/go-datastore v0.0.1/go.mod h1:d4KVXhMt913cLBEI/PXAy6ko+W7e9AhyAKBGh803qeE= -github.com/ipfs/go-datastore v0.0.5/go.mod h1:d4KVXhMt913cLBEI/PXAy6ko+W7e9AhyAKBGh803qeE= -github.com/ipfs/go-datastore v0.1.1/go.mod h1:w38XXW9kVFNp57Zj5knbKWM2T+KOZCGDRVNdgPHtbHw= -github.com/ipfs/go-datastore v0.4.0/go.mod h1:SX/xMIKoCszPqp+z9JhPYCmoOoXTvaa13XEbGtsFUhA= -github.com/ipfs/go-datastore v0.4.1/go.mod h1:SX/xMIKoCszPqp+z9JhPYCmoOoXTvaa13XEbGtsFUhA= -github.com/ipfs/go-datastore v0.4.4/go.mod h1:SX/xMIKoCszPqp+z9JhPYCmoOoXTvaa13XEbGtsFUhA= -github.com/ipfs/go-datastore v0.4.5/go.mod h1:eXTcaaiN6uOlVCLS9GjJUJtlvJfM3xk23w3fyfrmmJs= -github.com/ipfs/go-datastore v0.5.0/go.mod h1:9zhEApYMTl17C8YDp7JmU7sQZi2/wqiYh73hakZ90Bk= -github.com/ipfs/go-datastore v0.5.1/go.mod h1:9zhEApYMTl17C8YDp7JmU7sQZi2/wqiYh73hakZ90Bk= -github.com/ipfs/go-datastore v0.6.0 h1:JKyz+Gvz1QEZw0LsX1IBn+JFCJQH4SJVFtM4uWU0Myk= -github.com/ipfs/go-datastore v0.6.0/go.mod h1:rt5M3nNbSO/8q1t4LNkLyUwRs8HupMeN/8O4Vn9YAT8= +github.com/ipfs/go-datastore v0.8.2 h1:Jy3wjqQR6sg/LhyY0NIePZC3Vux19nLtg7dx0TVqr6U= +github.com/ipfs/go-datastore v0.8.2/go.mod h1:W+pI1NsUsz3tcsAACMtfC+IZdnQTnC/7VfPoJBQuts0= github.com/ipfs/go-detect-race v0.0.1 h1:qX/xay2W3E4Q1U7d9lNs1sU9nvguX0a7319XbyQ6cOk= github.com/ipfs/go-detect-race v0.0.1/go.mod h1:8BNT7shDZPo99Q74BpGMK+4D8Mn4j46UU0LZ723meps= -github.com/ipfs/go-ds-badger v0.0.2/go.mod h1:Y3QpeSFWQf6MopLTiZD+VT6IC1yZqaGmjvRcKeSGij8= -github.com/ipfs/go-ds-badger v0.0.5/go.mod h1:g5AuuCGmr7efyzQhLL8MzwqcauPojGPUaHzfGTzuE3s= -github.com/ipfs/go-ds-badger v0.2.1/go.mod h1:Tx7l3aTph3FMFrRS838dcSJh+jjA7cX9DrGVwx/NOwE= -github.com/ipfs/go-ds-badger v0.2.3/go.mod h1:pEYw0rgg3FIrywKKnL+Snr+w/LjJZVMTBRn4FS6UHUk= -github.com/ipfs/go-ds-badger2 v0.1.3 h1:Zo9JicXJ1DmXTN4KOw7oPXkspZ0AWHcAFCP1tQKnegg= -github.com/ipfs/go-ds-badger2 v0.1.3/go.mod h1:TPhhljfrgewjbtuL/tczP8dNrBYwwk+SdPYbms/NO9w= -github.com/ipfs/go-ds-leveldb v0.0.1/go.mod h1:feO8V3kubwsEF22n0YRQCffeb79OOYIykR4L04tMOYc= -github.com/ipfs/go-ds-leveldb v0.4.1/go.mod h1:jpbku/YqBSsBc1qgME8BkWS4AxzF2cEu1Ii2r79Hh9s= -github.com/ipfs/go-ds-leveldb v0.4.2/go.mod h1:jpbku/YqBSsBc1qgME8BkWS4AxzF2cEu1Ii2r79Hh9s= -github.com/ipfs/go-fetcher v1.5.0 h1:oreKTKBzja3S09rSmoZlA3KGVlRiUbJ1pQjtB4K6y3w= -github.com/ipfs/go-fetcher v1.5.0/go.mod h1:5pDZ0393oRF/fHiLmtFZtpMNBQfHOYNPtryWedVuSWE= -github.com/ipfs/go-ipfs-blockstore v0.0.1/go.mod h1:d3WClOmRQKFnJ0Jz/jj/zmksX0ma1gROTlovZKBmN08= -github.com/ipfs/go-ipfs-blockstore v0.1.4/go.mod h1:Jxm3XMVjh6R17WvxFEiyKBLUGr86HgIYJW/D/MwqeYQ= -github.com/ipfs/go-ipfs-blockstore v0.2.0/go.mod h1:SNeEpz/ICnMYZQYr7KNZTjdn7tEPB/99xpe8xI1RW7o= -github.com/ipfs/go-ipfs-blockstore v1.2.0 h1:n3WTeJ4LdICWs/0VSfjHrlqpPpl6MZ+ySd3j8qz0ykw= -github.com/ipfs/go-ipfs-blockstore v1.2.0/go.mod h1:eh8eTFLiINYNSNawfZOC7HOxNTxpB1PFuA5E1m/7exE= github.com/ipfs/go-ipfs-blocksutil v0.0.1 h1:Eh/H4pc1hsvhzsQoMEP3Bke/aW5P5rVM1IWFJMcGIPQ= github.com/ipfs/go-ipfs-blocksutil v0.0.1/go.mod h1:Yq4M86uIOmxmGPUHv/uI7uKqZNtLb449gwKqXjIsnRk= -github.com/ipfs/go-ipfs-delay v0.0.0-20181109222059-70721b86a9a8/go.mod h1:8SP1YXK1M1kXuc4KJZINY3TQQ03J2rwBG9QfXmbRPrw= github.com/ipfs/go-ipfs-delay v0.0.1 h1:r/UXYyRcddO6thwOnhiznIAiSvxMECGgtv35Xs1IeRQ= github.com/ipfs/go-ipfs-delay v0.0.1/go.mod h1:8SP1YXK1M1kXuc4KJZINY3TQQ03J2rwBG9QfXmbRPrw= -github.com/ipfs/go-ipfs-ds-help v0.0.1/go.mod h1:gtP9xRaZXqIQRh1HRpp595KbBEdgqWFxefeVKOV8sxo= -github.com/ipfs/go-ipfs-ds-help v0.1.1/go.mod h1:SbBafGJuGsPI/QL3j9Fc5YPLeAu+SzOkI0gFwAg+mOs= -github.com/ipfs/go-ipfs-ds-help v1.1.0 h1:yLE2w9RAsl31LtfMt91tRZcrx+e61O5mDxFRR994w4Q= -github.com/ipfs/go-ipfs-ds-help v1.1.0/go.mod h1:YR5+6EaebOhfcqVCyqemItCLthrpVNot+rsOU/5IatU= -github.com/ipfs/go-ipfs-exchange-interface v0.0.1/go.mod h1:c8MwfHjtQjPoDyiy9cFquVtVHkO9b9Ob3FG91qJnWCM= -github.com/ipfs/go-ipfs-exchange-interface v0.1.0/go.mod h1:ych7WPlyHqFvCi/uQI48zLZuAWVP5iTQPXEfVaw5WEI= -github.com/ipfs/go-ipfs-exchange-interface v0.2.0 h1:8lMSJmKogZYNo2jjhUs0izT+dck05pqUw4mWNW9Pw6Y= -github.com/ipfs/go-ipfs-exchange-interface v0.2.0/go.mod h1:z6+RhJuDQbqKguVyslSOuVDhqF9JtTrO3eptSAiW2/Y= -github.com/ipfs/go-ipfs-exchange-offline v0.0.1/go.mod h1:WhHSFCVYX36H/anEKQboAzpUws3x7UeEGkzQc3iNkM0= -github.com/ipfs/go-ipfs-exchange-offline v0.1.0/go.mod h1:YdJXa+yPF1na+gfYHYejtLwHFpuKv22eatApNiSfanM= -github.com/ipfs/go-ipfs-exchange-offline v0.3.0 h1:c/Dg8GDPzixGd0MC8Jh6mjOwU57uYokgWRFidfvEkuA= -github.com/ipfs/go-ipfs-pq v0.0.1/go.mod h1:LWIqQpqfRG3fNc5XsnIhz/wQ2XXGyugQwls7BgUmUfY= -github.com/ipfs/go-ipfs-pq v0.0.2 h1:e1vOOW6MuOwG2lqxcLA+wEn93i/9laCY8sXAw76jFOY= -github.com/ipfs/go-ipfs-pq v0.0.2/go.mod h1:LWIqQpqfRG3fNc5XsnIhz/wQ2XXGyugQwls7BgUmUfY= -github.com/ipfs/go-ipfs-provider v0.7.0 h1:5GpHv46eIS8h2mbbKg1ckU5paajDYJtE4GA/SBepOQg= -github.com/ipfs/go-ipfs-provider v0.7.0/go.mod h1:mgjsWgDt9j19N1REPxRa31p+eRIQmjNt5McNdQQ5CsA= -github.com/ipfs/go-ipfs-routing v0.1.0/go.mod h1:hYoUkJLyAUKhF58tysKpids8RNDPO42BVMgK5dNsoqY= -github.com/ipfs/go-ipfs-routing v0.2.0/go.mod h1:384byD/LHKhAgKE3NmwOjXCpDzhczROMBzidoYV7tfM= -github.com/ipfs/go-ipfs-routing v0.2.1 h1:E+whHWhJkdN9YeoHZNj5itzc+OR292AJ2uE9FFiW0BY= -github.com/ipfs/go-ipfs-util v0.0.1/go.mod h1:spsl5z8KUnrve+73pOhSVZND1SIxPW5RyBCNzQxlJBc= -github.com/ipfs/go-ipfs-util v0.0.2 h1:59Sswnk1MFaiq+VcaknX7aYEyGyGDAA73ilhEK2POp8= -github.com/ipfs/go-ipfs-util v0.0.2/go.mod h1:CbPtkWJzjLdEcezDns2XYaehFVNXG9zrdrtMecczcsQ= -github.com/ipfs/go-ipld-format v0.3.0 h1:Mwm2oRLzIuUwEPewWAWyMuuBQUsn3awfFEYVb8akMOQ= -github.com/ipfs/go-ipld-format v0.3.0/go.mod h1:co/SdBE8h99968X0hViiw1MNlh6fvxxnHpvVLnH7jSM= -github.com/ipfs/go-ipns v0.2.0 h1:BgmNtQhqOw5XEZ8RAfWEpK4DhqaYiuP6h71MhIp7xXU= -github.com/ipfs/go-ipns v0.2.0/go.mod h1:3cLT2rbvgPZGkHJoPO1YMJeh6LtkxopCkKFcio/wE24= +github.com/ipfs/go-ipfs-pq v0.0.3 h1:YpoHVJB+jzK15mr/xsWC574tyDLkezVrDNeaalQBsTE= +github.com/ipfs/go-ipfs-pq v0.0.3/go.mod h1:btNw5hsHBpRcSSgZtiNm/SLj5gYIZ18AKtv3kERkRb4= +github.com/ipfs/go-ipfs-util v0.0.3 h1:2RFdGez6bu2ZlZdI+rWfIdbQb1KudQp3VGwPtdNCmE0= +github.com/ipfs/go-ipfs-util v0.0.3/go.mod h1:LHzG1a0Ig4G+iZ26UUOMjHd+lfM84LZCrn17xAKWBvs= +github.com/ipfs/go-ipld-format v0.6.0 h1:VEJlA2kQ3LqFSIm5Vu6eIlSxD/Ze90xtc4Meten1F5U= +github.com/ipfs/go-ipld-format v0.6.0/go.mod h1:g4QVMTn3marU3qXchwjpKPKgJv+zF+OlaKMyhJ4LHPg= github.com/ipfs/go-log v0.0.1/go.mod h1:kL1d2/hzSpI0thNYjiKfjanbVNU+IIGA/WnNESY9leM= -github.com/ipfs/go-log v1.0.2/go.mod h1:1MNjMxe0u6xvJZgeqbJ8vdo2TKaGwZ1a0Bpza+sr2Sk= -github.com/ipfs/go-log v1.0.3/go.mod h1:OsLySYkwIbiSUR/yBTdv1qPtcE4FW3WPWk/ewz9Ru+A= -github.com/ipfs/go-log v1.0.4/go.mod h1:oDCg2FkjogeFOhqqb+N39l2RpTNPL6F/StPkB3kPgcs= github.com/ipfs/go-log v1.0.5 h1:2dOuUCB1Z7uoczMWgAyDck5JLb72zHzrMnGnCNNbvY8= github.com/ipfs/go-log v1.0.5/go.mod h1:j0b8ZoR+7+R99LD9jZ6+AJsrzkPbSXbZfGakb5JPtIo= -github.com/ipfs/go-log/v2 v2.0.2/go.mod h1:O7P1lJt27vWHhOwQmcFEvlmo49ry2VY2+JfBWFaa9+0= -github.com/ipfs/go-log/v2 v2.0.3/go.mod h1:O7P1lJt27vWHhOwQmcFEvlmo49ry2VY2+JfBWFaa9+0= -github.com/ipfs/go-log/v2 v2.0.5/go.mod h1:eZs4Xt4ZUJQFM3DlanGhy7TkwwawCZcSByscwkWG+dw= -github.com/ipfs/go-log/v2 v2.1.1/go.mod h1:2v2nsGfZsvvAJz13SyFzf9ObaqwHiHxsPLEHntrv9KM= github.com/ipfs/go-log/v2 v2.1.3/go.mod h1:/8d0SH3Su5Ooc31QlL1WysJhvyOTDCjcCZ9Axpmri6g= -github.com/ipfs/go-log/v2 v2.3.0/go.mod h1:QqGoj30OTpnKaG/LKTGTxoP2mmQtjVMEnK72gynbe/g= -github.com/ipfs/go-log/v2 v2.5.0/go.mod h1:prSpmC1Gpllc9UYWxDiZDreBYw7zp4Iqp1kOLU9U5UI= github.com/ipfs/go-log/v2 v2.5.1 h1:1XdUzF7048prq4aBjDQQ4SL5RxftpRGdXhNRwKSAlcY= github.com/ipfs/go-log/v2 v2.5.1/go.mod h1:prSpmC1Gpllc9UYWxDiZDreBYw7zp4Iqp1kOLU9U5UI= github.com/ipfs/go-metrics-interface v0.0.1 h1:j+cpbjYvu4R8zbleSs36gvB7jR+wsL2fGD6n0jO4kdg= github.com/ipfs/go-metrics-interface v0.0.1/go.mod h1:6s6euYU4zowdslK0GKHmqaIZ3j/b/tL7HTWtJ4VPgWY= -github.com/ipfs/go-peertaskqueue v0.1.1/go.mod h1:Jmk3IyCcfl1W3jTW3YpghSwSEC6IJ3Vzz/jUmWw8Z0U= -github.com/ipfs/go-peertaskqueue v0.2.0/go.mod h1:5/eNrBEbtSKWCG+kQK8K8fGNixoYUnr+P7jivavs9lY= -github.com/ipfs/go-peertaskqueue v0.7.0 h1:VyO6G4sbzX80K58N60cCaHsSsypbUNs1GjO5seGNsQ0= -github.com/ipfs/go-peertaskqueue v0.7.0/go.mod h1:M/akTIE/z1jGNXMU7kFB4TeSEFvj68ow0Rrb04donIU= -github.com/ipfs/go-verifcid v0.0.1 h1:m2HI7zIuR5TFyQ1b79Da5N9dnnCP1vcu2QqawmWlK2E= -github.com/ipfs/go-verifcid v0.0.1/go.mod h1:5Hrva5KBeIog4A+UpqlaIU+DEstipcJYQQZc0g37pY0= -github.com/ipld/go-ipld-prime v0.11.0/go.mod h1:+WIAkokurHmZ/KwzDOMUuoeJgaRQktHtEaLglS3ZeV8= -github.com/ipld/go-ipld-prime v0.14.1 h1:n9obcUnuqPK34HlfbiB+o9GhXE/x59uue4z9YTsaoj4= -github.com/ipld/go-ipld-prime v0.14.1/go.mod h1:QcE4Y9n/ZZr8Ijg5bGPT0GqYWgZ1704nH0RDcQtgTP0= -github.com/jackpal/gateway v1.0.5/go.mod h1:lTpwd4ACLXmpyiCTRtfiNyVnUmqT9RivzCDQetPfnjA= -github.com/jackpal/go-nat-pmp v1.0.1/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+4orBN1SBKc= -github.com/jackpal/go-nat-pmp v1.0.2-0.20160603034137-1fa385a6f458/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+4orBN1SBKc= +github.com/ipfs/go-peertaskqueue v0.8.2 h1:PaHFRaVFdxQk1Qo3OKiHPYjmmusQy7gKQUaL8JDszAU= +github.com/ipfs/go-peertaskqueue v0.8.2/go.mod h1:L6QPvou0346c2qPJNiJa6BvOibxDfaiPlqHInmzg0FA= +github.com/ipfs/go-test v0.0.4 h1:DKT66T6GBB6PsDFLoO56QZPrOmzJkqU1FZH5C9ySkew= +github.com/ipfs/go-test v0.0.4/go.mod h1:qhIM1EluEfElKKM6fnWxGn822/z9knUGM1+I/OAQNKI= +github.com/ipld/go-ipld-prime v0.21.0 h1:n4JmcpOlPDIxBcY037SVfpd1G+Sj1nKZah0m6QH9C2E= +github.com/ipld/go-ipld-prime v0.21.0/go.mod h1:3RLqy//ERg/y5oShXXdx5YIp50cFGOanyMctpPjsvxQ= github.com/jackpal/go-nat-pmp v1.0.2 h1:KzKSgb7qkJvOUTqYl9/Hg/me3pWgBmERKrTGD7BdWus= github.com/jackpal/go-nat-pmp v1.0.2/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+4orBN1SBKc= -github.com/jbenet/go-cienv v0.0.0-20150120210510-1bb1476777ec/go.mod h1:rGaEvXB4uRSZMmzKNLoXvTu1sfx+1kv/DojUlPrSZGs= github.com/jbenet/go-cienv v0.1.0/go.mod h1:TqNnHUmJgXau0nCzC7kXWeotg3J9W34CUv5Djy1+FlA= -github.com/jbenet/go-temp-err-catcher v0.0.0-20150120210811-aac704a3f4f2/go.mod h1:8GXXJV31xl8whumTzdZsTt3RnUIiPqzkyf7mxToRCMs= github.com/jbenet/go-temp-err-catcher v0.1.0 h1:zpb3ZH6wIE8Shj2sKS+khgRvf7T7RABoLk/+KKHggpk= github.com/jbenet/go-temp-err-catcher v0.1.0/go.mod h1:0kJRvmDZXNMIiJirNPEYfhpPwbGVtZVWC34vc5WLsDk= -github.com/jbenet/goprocess v0.0.0-20160826012719-b497e2f366b8/go.mod h1:Ly/wlsjFq/qrU3Rar62tu1gASgGw6chQbSh/XgIIXCY= -github.com/jbenet/goprocess v0.1.3/go.mod h1:5yspPrukOVuOLORacaBi858NqyClJPQxYZlqdZVfqY4= github.com/jbenet/goprocess v0.1.4 h1:DRGOFReOMqqDNXwW70QkacFW0YN9QnwLV0Vqk+3oU0o= github.com/jbenet/goprocess v0.1.4/go.mod h1:5yspPrukOVuOLORacaBi858NqyClJPQxYZlqdZVfqY4= github.com/jellevandenhooff/dkim v0.0.0-20150330215556-f50fe3d243e1/go.mod h1:E0B/fFc00Y+Rasa88328GlI/XbtyysCtTHZS8h7IrBU= -github.com/jessevdk/go-flags v0.0.0-20141203071132-1679536dcc89/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= -github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= -github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= -github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8= github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= +github.com/jordanschalm/lockctx v0.0.0-20250412215529-226f85c10956 h1:4iii8SOozVG1lpkdPELRsjPEBhU4DeFPz2r2Fjj3UDU= +github.com/jordanschalm/lockctx v0.0.0-20250412215529-226f85c10956/go.mod h1:qsnXMryYP9X7JbzskIn0+N40sE6XNXLr9kYRRP6rwXU= github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= -github.com/jrick/logrotate v1.0.0/go.mod h1:LNinyqDIJnpAur+b8yyulnQw/wDuN1+BYKlTRt3OuAQ= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= +github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo= github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= -github.com/julienschmidt/httprouter v1.1.1-0.20170430222011-975b5c4c7c21/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= -github.com/k0kubun/go-ansi v0.0.0-20180517002512-3bf9e2903213/go.mod h1:vNUNkEQ1e29fT/6vq2aBdFsgNPmy8qMdSay1npru+Sw= -github.com/kami-zh/go-capturer v0.0.0-20171211120116-e492ea43421d/go.mod h1:P2viExyCEfeWGU259JnaQ34Inuec4R38JCyBx2edgD0= -github.com/karalabe/usb v0.0.0-20190919080040-51dc0efba356/go.mod h1:Od972xHfMJowv7NGVDiWVxk2zxnWgjLlJzE+F4F7AGU= -github.com/kevinburke/go-bindata v3.23.0+incompatible h1:rqNOXZlqrYhMVVAsQx8wuc+LaA73YcfbQ407wAykyS8= -github.com/kevinburke/go-bindata v3.23.0+incompatible/go.mod h1:/pEEZ72flUW2p0yi30bslSp9YqD9pysLxunQDdb2CPM= +github.com/k0kubun/pp/v3 v3.5.0 h1:iYNlYA5HJAJvkD4ibuf9c8y6SHM0QFhaBuCqm1zHp0w= +github.com/k0kubun/pp/v3 v3.5.0/go.mod h1:5lzno5ZZeEeTV/Ky6vs3g6d1U3WarDrH8k240vMtGro= +github.com/kevinburke/go-bindata v3.24.0+incompatible h1:qajFA3D0pH94OTLU4zcCCKCDgR+Zr2cZK/RPJHDdFoY= +github.com/kevinburke/go-bindata v3.24.0+incompatible/go.mod h1:/pEEZ72flUW2p0yi30bslSp9YqD9pysLxunQDdb2CPM= github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6CZQHDETBtE9HaSEkGmuNXF86RwHhHUvq4= github.com/klauspost/compress v1.10.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= github.com/klauspost/compress v1.11.7/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= github.com/klauspost/compress v1.12.3/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg= -github.com/klauspost/compress v1.15.13 h1:NFn1Wr8cfnenSJSA46lLq4wHCcBzKTSjnBIexDMMOV0= -github.com/klauspost/compress v1.15.13/go.mod h1:QPwzmACJjUTFsnSHH934V6woptycfrDDJnH7hvFVbGM= -github.com/klauspost/cpuid/v2 v2.0.4/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= -github.com/klauspost/cpuid/v2 v2.0.6/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= -github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= -github.com/klauspost/cpuid/v2 v2.0.12/go.mod h1:g2LTdtYhdyuGPqyWyv7qRAmj1WBqxuObKfj5c0PQa7c= -github.com/klauspost/cpuid/v2 v2.2.2 h1:xPMwiykqNK9VK0NYC3+jTMYv9I6Vl3YdjZgPZKG3zO0= -github.com/klauspost/cpuid/v2 v2.2.2/go.mod h1:RVVoqg1df56z8g3pUjL/3lE5UfnlrJX8tyFgg4nqhuY= +github.com/klauspost/compress v1.17.11 h1:In6xLpyWOi1+C7tXUUWv2ot1QvBjxevKAaI6IXrJmUc= +github.com/klauspost/compress v1.17.11/go.mod h1:pMDklpSncoRMuLFrf1W9Ss9KT+0rH90U12bZKk7uwG0= +github.com/klauspost/cpuid/v2 v2.2.9 h1:66ze0taIn2H33fBvCkXuv9BmCwDfafmiIVpKV9kKGuY= +github.com/klauspost/cpuid/v2 v2.2.9/go.mod h1:rqkxqrZ1EhYM9G+hXH7YdowN5R5RGN6NK4QwQ3WMXF8= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/koron/go-ssdp v0.0.0-20180514024734-4a0ed625a78b/go.mod h1:5Ky9EC2xfoUKUor0Hjgi2BJhCSXJfMOFlmyYrVKGQMk= -github.com/koron/go-ssdp v0.0.0-20191105050749-2e1c40ed0b5d/go.mod h1:5Ky9EC2xfoUKUor0Hjgi2BJhCSXJfMOFlmyYrVKGQMk= -github.com/koron/go-ssdp v0.0.3 h1:JivLMY45N76b4p/vsWGOKewBQu6uf39y8l+AQ7sDKx8= -github.com/koron/go-ssdp v0.0.3/go.mod h1:b2MxI6yh02pKrsyNoQUsk4+YNikaGhe4894J+Q5lDvA= +github.com/koron/go-ssdp v0.0.4 h1:1IDwrghSKYM7yLf7XCzbByg2sJ/JcNOZRXS2jczTwz0= +github.com/koron/go-ssdp v0.0.4/go.mod h1:oDXq+E5IL5q0U8uSBcoAXzTzInwy5lEgC91HoKtbmZk= github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= -github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= -github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= -github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/pty v1.1.3/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= -github.com/leanovate/gopter v0.2.9 h1:fQjYxZaynp97ozCzfOyOuAGOU4aU/z37zf/tOujFk7c= +github.com/leanovate/gopter v0.2.11 h1:vRjThO1EKPb/1NsDXuDrzldR28RLkBflWYcU9CvzWu4= +github.com/leanovate/gopter v0.2.11/go.mod h1:aK3tzZP/C+p1m3SPRE4SYZFGP7jjkuSI4f7Xvpt0S9c= github.com/leodido/go-urn v1.1.0/go.mod h1:+cyI34gQWZcE1eQU7NVgKkkzdXDQHr1dBMtdAPozLkw= -github.com/leodido/go-urn v1.2.0 h1:hpXL4XnriNwQ/ABnpepYM/1vCLWNDfUNts8dX3xTG6Y= github.com/leodido/go-urn v1.2.0/go.mod h1:+8+nEpDfqqsY+g338gtMEUOtuK+4dEMhiQEgxpxOKII= -github.com/libp2p/go-addr-util v0.0.1/go.mod h1:4ac6O7n9rIAKB1dnd+s8IbbMXkt+oBpzX4/+RACcnlQ= -github.com/libp2p/go-addr-util v0.0.2/go.mod h1:Ecd6Fb3yIuLzq4bD7VcywcVSBtefcAwnUISBM3WG15E= +github.com/leodido/go-urn v1.4.0 h1:WT9HwE9SGECu3lg4d/dIA+jxlljEa1/ffXKmRjqdmIQ= +github.com/leodido/go-urn v1.4.0/go.mod h1:bvxc+MVxLKB4z00jd1z+Dvzr47oO32F/QSNjSBOlFxI= github.com/libp2p/go-addr-util v0.1.0 h1:acKsntI33w2bTU7tC9a0SaPimJGfSI0bFKC18ChxeVI= github.com/libp2p/go-addr-util v0.1.0/go.mod h1:6I3ZYuFr2O/9D+SoyM0zEw0EF3YkldtTX406BpdQMqw= -github.com/libp2p/go-buffer-pool v0.0.1/go.mod h1:xtyIz9PMobb13WaxR6Zo1Pd1zXJKYg0a8KiIvDp3TzQ= -github.com/libp2p/go-buffer-pool v0.0.2/go.mod h1:MvaB6xw5vOrDl8rYZGLFdKAuk/hRoRZd1Vi32+RXyFM= github.com/libp2p/go-buffer-pool v0.1.0 h1:oK4mSFcQz7cTQIfqbe4MIj9gLW+mnanjyFtc6cdF0Y8= github.com/libp2p/go-buffer-pool v0.1.0/go.mod h1:N+vh8gMqimBzdKkSMVuydVDq+UV5QTWy5HSiZacSbPg= github.com/libp2p/go-cidranger v1.1.0 h1:ewPN8EZ0dd1LSnrtuwd4709PXVcITVeuwbag38yPW7c= github.com/libp2p/go-cidranger v1.1.0/go.mod h1:KWZTfSr+r9qEo9OkI9/SIEeAtw+NNoU0dXIXt15Okic= -github.com/libp2p/go-conn-security-multistream v0.1.0/go.mod h1:aw6eD7LOsHEX7+2hJkDxw1MteijaVcI+/eP2/x3J1xc= -github.com/libp2p/go-conn-security-multistream v0.2.0/go.mod h1:hZN4MjlNetKD3Rq5Jb/P5ohUnFLNzEAR4DLSzpn2QLU= -github.com/libp2p/go-conn-security-multistream v0.2.1/go.mod h1:cR1d8gA0Hr59Fj6NhaTpFhJZrjSYuNmhpT2r25zYR70= -github.com/libp2p/go-eventbus v0.1.0/go.mod h1:vROgu5cs5T7cv7POWlWxBaVLxfSegC5UGQf8A2eEmx4= -github.com/libp2p/go-eventbus v0.2.1/go.mod h1:jc2S4SoEVPP48H9Wpzm5aiGwUCBMfGhVhhBjyhhCJs8= -github.com/libp2p/go-flow-metrics v0.0.1/go.mod h1:Iv1GH0sG8DtYN3SVJ2eG221wMiNpZxBdp967ls1g+k8= -github.com/libp2p/go-flow-metrics v0.0.3/go.mod h1:HeoSNUrOJVK1jEpDqVEiUOIXqhbnS27omG0uWU5slZs= -github.com/libp2p/go-flow-metrics v0.1.0 h1:0iPhMI8PskQwzh57jB9WxIuIOQ0r+15PChFGkx3Q3WM= -github.com/libp2p/go-flow-metrics v0.1.0/go.mod h1:4Xi8MX8wj5aWNDAZttg6UPmc0ZrnFNsMtpsYUClFtro= -github.com/libp2p/go-libp2p v0.1.1/go.mod h1:I00BRo1UuUSdpuc8Q2mN7yDF/oTUTRAX6JWpTiK9Rp8= -github.com/libp2p/go-libp2p v0.6.1/go.mod h1:CTFnWXogryAHjXAKEbOf1OWY+VeAP3lDMZkfEI5sT54= -github.com/libp2p/go-libp2p v0.7.0/go.mod h1:hZJf8txWeCduQRDC/WSqBGMxaTHCOYHt2xSU1ivxn0k= -github.com/libp2p/go-libp2p v0.7.4/go.mod h1:oXsBlTLF1q7pxr+9w6lqzS1ILpyHsaBPniVO7zIHGMw= -github.com/libp2p/go-libp2p v0.8.1/go.mod h1:QRNH9pwdbEBpx5DTJYg+qxcVaDMAz3Ee/qDKwXujH5o= -github.com/libp2p/go-libp2p v0.13.0/go.mod h1:pM0beYdACRfHO1WcJlp65WXyG2A6NqYM+t2DTVAJxMo= -github.com/libp2p/go-libp2p v0.14.3/go.mod h1:d12V4PdKbpL0T1/gsUNN8DfgMuRPDX8bS2QxCZlwRH0= -github.com/libp2p/go-libp2p v0.24.2 h1:iMViPIcLY0D6zr/f+1Yq9EavCZu2i7eDstsr1nEwSAk= -github.com/libp2p/go-libp2p v0.24.2/go.mod h1:WuxtL2V8yGjam03D93ZBC19tvOUiPpewYv1xdFGWu1k= -github.com/libp2p/go-libp2p-asn-util v0.2.0 h1:rg3+Os8jbnO5DxkC7K/Utdi+DkY3q/d1/1q+8WeNAsw= -github.com/libp2p/go-libp2p-asn-util v0.2.0/go.mod h1:WoaWxbHKBymSN41hWSq/lGKJEca7TNm58+gGJi2WsLI= -github.com/libp2p/go-libp2p-autonat v0.1.0/go.mod h1:1tLf2yXxiE/oKGtDwPYWTSYG3PtvYlJmg7NeVtPRqH8= -github.com/libp2p/go-libp2p-autonat v0.1.1/go.mod h1:OXqkeGOY2xJVWKAGV2inNF5aKN/djNA3fdpCWloIudE= -github.com/libp2p/go-libp2p-autonat v0.2.0/go.mod h1:DX+9teU4pEEoZUqR1PiMlqliONQdNbfzE1C718tcViI= -github.com/libp2p/go-libp2p-autonat v0.2.1/go.mod h1:MWtAhV5Ko1l6QBsHQNSuM6b1sRkXrpk0/LqCr+vCVxI= -github.com/libp2p/go-libp2p-autonat v0.2.2/go.mod h1:HsM62HkqZmHR2k1xgX34WuWDzk/nBwNHoeyyT4IWV6A= -github.com/libp2p/go-libp2p-autonat v0.4.0/go.mod h1:YxaJlpr81FhdOv3W3BTconZPfhaYivRdf53g+S2wobk= -github.com/libp2p/go-libp2p-autonat v0.4.2/go.mod h1:YxaJlpr81FhdOv3W3BTconZPfhaYivRdf53g+S2wobk= -github.com/libp2p/go-libp2p-blankhost v0.1.1/go.mod h1:pf2fvdLJPsC1FsVrNP3DUUvMzUts2dsLLBEpo1vW1ro= -github.com/libp2p/go-libp2p-blankhost v0.1.4/go.mod h1:oJF0saYsAXQCSfDq254GMNmLNz6ZTHTOvtF4ZydUvwU= -github.com/libp2p/go-libp2p-blankhost v0.2.0/go.mod h1:eduNKXGTioTuQAUcZ5epXi9vMl+t4d8ugUBRQ4SqaNQ= -github.com/libp2p/go-libp2p-circuit v0.1.0/go.mod h1:Ahq4cY3V9VJcHcn1SBXjr78AbFkZeIRmfunbA7pmFh8= -github.com/libp2p/go-libp2p-circuit v0.1.4/go.mod h1:CY67BrEjKNDhdTk8UgBX1Y/H5c3xkAcs3gnksxY7osU= -github.com/libp2p/go-libp2p-circuit v0.2.1/go.mod h1:BXPwYDN5A8z4OEY9sOfr2DUQMLQvKt/6oku45YUmjIo= -github.com/libp2p/go-libp2p-circuit v0.4.0/go.mod h1:t/ktoFIUzM6uLQ+o1G6NuBl2ANhBKN9Bc8jRIk31MoA= -github.com/libp2p/go-libp2p-core v0.0.1/go.mod h1:g/VxnTZ/1ygHxH3dKok7Vno1VfpvGcGip57wjTU4fco= -github.com/libp2p/go-libp2p-core v0.0.2/go.mod h1:9dAcntw/n46XycV4RnlBq3BpgrmyUi9LuoTNdPrbUco= -github.com/libp2p/go-libp2p-core v0.0.3/go.mod h1:j+YQMNz9WNSkNezXOsahp9kwZBKBvxLpKD316QWSJXE= -github.com/libp2p/go-libp2p-core v0.0.4/go.mod h1:jyuCQP356gzfCFtRKyvAbNkyeuxb7OlyhWZ3nls5d2I= -github.com/libp2p/go-libp2p-core v0.2.0/go.mod h1:X0eyB0Gy93v0DZtSYbEM7RnMChm9Uv3j7yRXjO77xSI= -github.com/libp2p/go-libp2p-core v0.2.2/go.mod h1:8fcwTbsG2B+lTgRJ1ICZtiM5GWCWZVoVrLaDRvIRng0= -github.com/libp2p/go-libp2p-core v0.2.4/go.mod h1:STh4fdfa5vDYr0/SzYYeqnt+E6KfEV5VxfIrm0bcI0g= -github.com/libp2p/go-libp2p-core v0.3.0/go.mod h1:ACp3DmS3/N64c2jDzcV429ukDpicbL6+TrrxANBjPGw= -github.com/libp2p/go-libp2p-core v0.3.1/go.mod h1:thvWy0hvaSBhnVBaW37BvzgVV68OUhgJJLAa6almrII= -github.com/libp2p/go-libp2p-core v0.4.0/go.mod h1:49XGI+kc38oGVwqSBhDEwytaAxgZasHhFfQKibzTls0= -github.com/libp2p/go-libp2p-core v0.5.0/go.mod h1:49XGI+kc38oGVwqSBhDEwytaAxgZasHhFfQKibzTls0= -github.com/libp2p/go-libp2p-core v0.5.1/go.mod h1:uN7L2D4EvPCvzSH5SrhR72UWbnSGpt5/a35Sm4upn4Y= -github.com/libp2p/go-libp2p-core v0.5.4/go.mod h1:uN7L2D4EvPCvzSH5SrhR72UWbnSGpt5/a35Sm4upn4Y= -github.com/libp2p/go-libp2p-core v0.5.5/go.mod h1:vj3awlOr9+GMZJFH9s4mpt9RHHgGqeHCopzbYKZdRjM= -github.com/libp2p/go-libp2p-core v0.5.6/go.mod h1:txwbVEhHEXikXn9gfC7/UDDw7rkxuX0bJvM49Ykaswo= -github.com/libp2p/go-libp2p-core v0.5.7/go.mod h1:txwbVEhHEXikXn9gfC7/UDDw7rkxuX0bJvM49Ykaswo= -github.com/libp2p/go-libp2p-core v0.6.0/go.mod h1:txwbVEhHEXikXn9gfC7/UDDw7rkxuX0bJvM49Ykaswo= -github.com/libp2p/go-libp2p-core v0.7.0/go.mod h1:FfewUH/YpvWbEB+ZY9AQRQ4TAD8sJBt/G1rVvhz5XT8= -github.com/libp2p/go-libp2p-core v0.8.0/go.mod h1:FfewUH/YpvWbEB+ZY9AQRQ4TAD8sJBt/G1rVvhz5XT8= -github.com/libp2p/go-libp2p-core v0.8.1/go.mod h1:FfewUH/YpvWbEB+ZY9AQRQ4TAD8sJBt/G1rVvhz5XT8= -github.com/libp2p/go-libp2p-core v0.8.2/go.mod h1:FfewUH/YpvWbEB+ZY9AQRQ4TAD8sJBt/G1rVvhz5XT8= -github.com/libp2p/go-libp2p-core v0.8.5/go.mod h1:FfewUH/YpvWbEB+ZY9AQRQ4TAD8sJBt/G1rVvhz5XT8= -github.com/libp2p/go-libp2p-core v0.20.1 h1:fQz4BJyIFmSZAiTbKV8qoYhEH5Dtv/cVhZbG3Ib/+Cw= -github.com/libp2p/go-libp2p-core v0.20.1/go.mod h1:6zR8H7CvQWgYLsbG4on6oLNSGcyKaYFSEYyDt51+bIY= -github.com/libp2p/go-libp2p-crypto v0.1.0/go.mod h1:sPUokVISZiy+nNuTTH/TY+leRSxnFj/2GLjtOTW90hI= -github.com/libp2p/go-libp2p-discovery v0.1.0/go.mod h1:4F/x+aldVHjHDHuX85x1zWoFTGElt8HnoDzwkFZm29g= -github.com/libp2p/go-libp2p-discovery v0.2.0/go.mod h1:s4VGaxYMbw4+4+tsoQTqh7wfxg97AEdo4GYBt6BadWg= -github.com/libp2p/go-libp2p-discovery v0.3.0/go.mod h1:o03drFnz9BVAZdzC/QUQ+NeQOu38Fu7LJGEOK2gQltw= -github.com/libp2p/go-libp2p-discovery v0.5.0/go.mod h1:+srtPIU9gDaBNu//UHvcdliKBIcr4SfDcm0/PfPJLug= -github.com/libp2p/go-libp2p-kad-dht v0.19.0 h1:2HuiInHZTm9ZvQajaqdaPLHr0PCKKigWiflakimttE0= -github.com/libp2p/go-libp2p-kad-dht v0.19.0/go.mod h1:qPIXdiZsLczhV4/+4EO1jE8ae0YCW4ZOogc4WVIyTEU= -github.com/libp2p/go-libp2p-kbucket v0.5.0 h1:g/7tVm8ACHDxH29BGrpsQlnNeu+6OF1A9bno/4/U1oA= -github.com/libp2p/go-libp2p-kbucket v0.5.0/go.mod h1:zGzGCpQd78b5BNTDGHNDLaTt9aDK/A02xeZp9QeFC4U= -github.com/libp2p/go-libp2p-loggables v0.1.0 h1:h3w8QFfCt2UJl/0/NW4K829HX/0S4KD31PQ7m8UXXO8= -github.com/libp2p/go-libp2p-loggables v0.1.0/go.mod h1:EyumB2Y6PrYjr55Q3/tiJ/o3xoDasoRYM7nOzEpoa90= -github.com/libp2p/go-libp2p-mplex v0.2.0/go.mod h1:Ejl9IyjvXJ0T9iqUTE1jpYATQ9NM3g+OtR+EMMODbKo= -github.com/libp2p/go-libp2p-mplex v0.2.1/go.mod h1:SC99Rxs8Vuzrf/6WhmH41kNn13TiYdAWNYHrwImKLnE= -github.com/libp2p/go-libp2p-mplex v0.2.2/go.mod h1:74S9eum0tVQdAfFiKxAyKzNdSuLqw5oadDq7+L/FELo= -github.com/libp2p/go-libp2p-mplex v0.2.3/go.mod h1:CK3p2+9qH9x+7ER/gWWDYJ3QW5ZxWDkm+dVvjfuG3ek= -github.com/libp2p/go-libp2p-mplex v0.4.0/go.mod h1:yCyWJE2sc6TBTnFpjvLuEJgTSw/u+MamvzILKdX7asw= -github.com/libp2p/go-libp2p-mplex v0.4.1/go.mod h1:cmy+3GfqfM1PceHTLL7zQzAAYaryDu6iPSC+CIb094g= -github.com/libp2p/go-libp2p-nat v0.0.4/go.mod h1:N9Js/zVtAXqaeT99cXgTV9e75KpnWCvVOiGzlcHmBbY= -github.com/libp2p/go-libp2p-nat v0.0.5/go.mod h1:1qubaE5bTZMJE+E/uu2URroMbzdubFz1ChgiN79yKPE= -github.com/libp2p/go-libp2p-nat v0.0.6/go.mod h1:iV59LVhB3IkFvS6S6sauVTSOrNEANnINbI/fkaLimiw= -github.com/libp2p/go-libp2p-netutil v0.1.0 h1:zscYDNVEcGxyUpMd0JReUZTrpMfia8PmLKcKF72EAMQ= -github.com/libp2p/go-libp2p-netutil v0.1.0/go.mod h1:3Qv/aDqtMLTUyQeundkKsA+YCThNdbQD54k3TqjpbFU= -github.com/libp2p/go-libp2p-noise v0.1.1/go.mod h1:QDFLdKX7nluB7DEnlVPbz7xlLHdwHFA9HiohJRr3vwM= -github.com/libp2p/go-libp2p-noise v0.2.0/go.mod h1:IEbYhBBzGyvdLBoxxULL/SGbJARhUeqlO8lVSREYu2Q= -github.com/libp2p/go-libp2p-peer v0.2.0/go.mod h1:RCffaCvUyW2CJmG2gAWVqwePwW7JMgxjsHm7+J5kjWY= -github.com/libp2p/go-libp2p-peerstore v0.1.0/go.mod h1:2CeHkQsr8svp4fZ+Oi9ykN1HBb6u0MOvdJ7YIsmcwtY= -github.com/libp2p/go-libp2p-peerstore v0.1.3/go.mod h1:BJ9sHlm59/80oSkpWgr1MyY1ciXAXV397W6h1GH/uKI= -github.com/libp2p/go-libp2p-peerstore v0.2.0/go.mod h1:N2l3eVIeAitSg3Pi2ipSrJYnqhVnMNQZo9nkSCuAbnQ= -github.com/libp2p/go-libp2p-peerstore v0.2.1/go.mod h1:NQxhNjWxf1d4w6PihR8btWIRjwRLBr4TYKfNgrUkOPA= -github.com/libp2p/go-libp2p-peerstore v0.2.2/go.mod h1:NQxhNjWxf1d4w6PihR8btWIRjwRLBr4TYKfNgrUkOPA= -github.com/libp2p/go-libp2p-peerstore v0.2.6/go.mod h1:ss/TWTgHZTMpsU/oKVVPQCGuDHItOpf2W8RxAi50P2s= -github.com/libp2p/go-libp2p-peerstore v0.2.7/go.mod h1:ss/TWTgHZTMpsU/oKVVPQCGuDHItOpf2W8RxAi50P2s= -github.com/libp2p/go-libp2p-pnet v0.2.0/go.mod h1:Qqvq6JH/oMZGwqs3N1Fqhv8NVhrdYcO0BW4wssv21LA= -github.com/libp2p/go-libp2p-pubsub v0.8.2 h1:QLGUmkgKmwEVxVDYGsqc5t9CykOMY2Y21cXQHjR462I= -github.com/libp2p/go-libp2p-pubsub v0.8.2/go.mod h1:e4kT+DYjzPUYGZeWk4I+oxCSYTXizzXii5LDRRhjKSw= -github.com/libp2p/go-libp2p-quic-transport v0.10.0/go.mod h1:RfJbZ8IqXIhxBRm5hqUEJqjiiY8xmEuq3HUDS993MkA= -github.com/libp2p/go-libp2p-record v0.1.0/go.mod h1:ujNc8iuE5dlKWVy6wuL6dd58t0n7xI4hAIl8pE6wu5Q= +github.com/libp2p/go-flow-metrics v0.2.0 h1:EIZzjmeOE6c8Dav0sNv35vhZxATIXWZg6j/C08XmmDw= +github.com/libp2p/go-flow-metrics v0.2.0/go.mod h1:st3qqfu8+pMfh+9Mzqb2GTiwrAGjIPszEjZmtksN8Jc= +github.com/libp2p/go-libp2p v0.38.2 h1:9SZQDOCi82A25An4kx30lEtr6kGTxrtoaDkbs5xrK5k= +github.com/libp2p/go-libp2p v0.38.2/go.mod h1:QWV4zGL3O9nXKdHirIC59DoRcZ446dfkjbOJ55NEWFo= +github.com/libp2p/go-libp2p-asn-util v0.4.1 h1:xqL7++IKD9TBFMgnLPZR6/6iYhawHKHl950SO9L6n94= +github.com/libp2p/go-libp2p-asn-util v0.4.1/go.mod h1:d/NI6XZ9qxw67b4e+NgpQexCIiFYJjErASrYW4PFDN8= +github.com/libp2p/go-libp2p-kad-dht v0.28.2 h1:/VivUl/Ru0tVgkWNhDDBy8pK6q+gRdI+z8VfqmSUJWo= +github.com/libp2p/go-libp2p-kad-dht v0.28.2/go.mod h1:sUR/qh4p/5+YFXBtwOiCmIBeBA2YD94ttmL+Xk8+pTE= +github.com/libp2p/go-libp2p-kbucket v0.6.4 h1:OjfiYxU42TKQSB8t8WYd8MKhYhMJeO2If+NiuKfb6iQ= +github.com/libp2p/go-libp2p-kbucket v0.6.4/go.mod h1:jp6w82sczYaBsAypt5ayACcRJi0lgsba7o4TzJKEfWA= +github.com/libp2p/go-libp2p-pubsub v0.13.0 h1:RmFQ2XAy3zQtbt2iNPy7Tt0/3fwTnHpCQSSnmGnt1Ps= +github.com/libp2p/go-libp2p-pubsub v0.13.0/go.mod h1:m0gpUOyrXKXdE7c8FNQ9/HLfWbxaEw7xku45w+PaqZo= github.com/libp2p/go-libp2p-record v0.2.0 h1:oiNUOCWno2BFuxt3my4i1frNrt7PerzB3queqa1NkQ0= github.com/libp2p/go-libp2p-record v0.2.0/go.mod h1:I+3zMkvvg5m2OcSdoL0KPljyJyvNDFGKX7QdlpYUcwk= -github.com/libp2p/go-libp2p-secio v0.1.0/go.mod h1:tMJo2w7h3+wN4pgU2LSYeiKPrfqBgkOsdiKK77hE7c8= -github.com/libp2p/go-libp2p-secio v0.2.0/go.mod h1:2JdZepB8J5V9mBp79BmwsaPQhRPNN2NrnB2lKQcdy6g= -github.com/libp2p/go-libp2p-secio v0.2.1/go.mod h1:cWtZpILJqkqrSkiYcDBh5lA3wbT2Q+hz3rJQq3iftD8= -github.com/libp2p/go-libp2p-secio v0.2.2/go.mod h1:wP3bS+m5AUnFA+OFO7Er03uO1mncHG0uVwGrwvjYlNY= -github.com/libp2p/go-libp2p-swarm v0.1.0/go.mod h1:wQVsCdjsuZoc730CgOvh5ox6K8evllckjebkdiY5ta4= -github.com/libp2p/go-libp2p-swarm v0.2.2/go.mod h1:fvmtQ0T1nErXym1/aa1uJEyN7JzaTNyBcHImCxRpPKU= -github.com/libp2p/go-libp2p-swarm v0.2.3/go.mod h1:P2VO/EpxRyDxtChXz/VPVXyTnszHvokHKRhfkEgFKNM= -github.com/libp2p/go-libp2p-swarm v0.2.8/go.mod h1:JQKMGSth4SMqonruY0a8yjlPVIkb0mdNSwckW7OYziM= -github.com/libp2p/go-libp2p-swarm v0.3.0/go.mod h1:hdv95GWCTmzkgeJpP+GK/9D9puJegb7H57B5hWQR5Kk= -github.com/libp2p/go-libp2p-swarm v0.4.0/go.mod h1:XVFcO52VoLoo0eitSxNQWYq4D6sydGOweTOAjJNraCw= -github.com/libp2p/go-libp2p-swarm v0.5.0/go.mod h1:sU9i6BoHE0Ve5SKz3y9WfKrh8dUat6JknzUehFx8xW4= -github.com/libp2p/go-libp2p-testing v0.0.2/go.mod h1:gvchhf3FQOtBdr+eFUABet5a4MBLK8jM3V4Zghvmi+E= -github.com/libp2p/go-libp2p-testing v0.0.3/go.mod h1:gvchhf3FQOtBdr+eFUABet5a4MBLK8jM3V4Zghvmi+E= -github.com/libp2p/go-libp2p-testing v0.0.4/go.mod h1:gvchhf3FQOtBdr+eFUABet5a4MBLK8jM3V4Zghvmi+E= -github.com/libp2p/go-libp2p-testing v0.1.0/go.mod h1:xaZWMJrPUM5GlDBxCeGUi7kI4eqnjVyavGroI2nxEM0= -github.com/libp2p/go-libp2p-testing v0.1.1/go.mod h1:xaZWMJrPUM5GlDBxCeGUi7kI4eqnjVyavGroI2nxEM0= -github.com/libp2p/go-libp2p-testing v0.1.2-0.20200422005655-8775583591d8/go.mod h1:Qy8sAncLKpwXtS2dSnDOP8ktexIAHKu+J+pnZOFZLTc= -github.com/libp2p/go-libp2p-testing v0.3.0/go.mod h1:efZkql4UZ7OVsEfaxNHZPzIehtsBXMrXnCfJIgDti5g= -github.com/libp2p/go-libp2p-testing v0.4.0/go.mod h1:Q+PFXYoiYFN5CAEG2w3gLPEzotlKsNSbKQ/lImlOWF0= +github.com/libp2p/go-libp2p-routing-helpers v0.7.4 h1:6LqS1Bzn5CfDJ4tzvP9uwh42IB7TJLNFJA6dEeGBv84= +github.com/libp2p/go-libp2p-routing-helpers v0.7.4/go.mod h1:we5WDj9tbolBXOuF1hGOkR+r7Uh1408tQbAKaT5n1LE= github.com/libp2p/go-libp2p-testing v0.12.0 h1:EPvBb4kKMWO29qP4mZGyhVzUyR25dvfUIK5WDu6iPUA= -github.com/libp2p/go-libp2p-tls v0.1.3/go.mod h1:wZfuewxOndz5RTnCAxFliGjvYSDA40sKitV4c50uI1M= -github.com/libp2p/go-libp2p-transport-upgrader v0.1.1/go.mod h1:IEtA6or8JUbsV07qPW4r01GnTenLW4oi3lOPbUMGJJA= -github.com/libp2p/go-libp2p-transport-upgrader v0.2.0/go.mod h1:mQcrHj4asu6ArfSoMuyojOdjx73Q47cYD7s5+gZOlns= -github.com/libp2p/go-libp2p-transport-upgrader v0.3.0/go.mod h1:i+SKzbRnvXdVbU3D1dwydnTmKRPXiAR/fyvi1dXuL4o= -github.com/libp2p/go-libp2p-transport-upgrader v0.4.0/go.mod h1:J4ko0ObtZSmgn5BX5AmegP+dK3CSnU2lMCKsSq/EY0s= -github.com/libp2p/go-libp2p-transport-upgrader v0.4.2/go.mod h1:NR8ne1VwfreD5VIWIU62Agt/J18ekORFU/j1i2y8zvk= -github.com/libp2p/go-libp2p-yamux v0.2.0/go.mod h1:Db2gU+XfLpm6E4rG5uGCFX6uXA8MEXOxFcRoXUODaK8= -github.com/libp2p/go-libp2p-yamux v0.2.1/go.mod h1:1FBXiHDk1VyRM1C0aez2bCfHQ4vMZKkAQzZbkSQt5fI= -github.com/libp2p/go-libp2p-yamux v0.2.2/go.mod h1:lIohaR0pT6mOt0AZ0L2dFze9hds9Req3OfS+B+dv4qw= -github.com/libp2p/go-libp2p-yamux v0.2.5/go.mod h1:Zpgj6arbyQrmZ3wxSZxfBmbdnWtbZ48OpsfmQVTErwA= -github.com/libp2p/go-libp2p-yamux v0.2.7/go.mod h1:X28ENrBMU/nm4I3Nx4sZ4dgjZ6VhLEn0XhIoZ5viCwU= -github.com/libp2p/go-libp2p-yamux v0.2.8/go.mod h1:/t6tDqeuZf0INZMTgd0WxIRbtK2EzI2h7HbFm9eAKI4= -github.com/libp2p/go-libp2p-yamux v0.4.0/go.mod h1:+DWDjtFMzoAwYLVkNZftoucn7PelNoy5nm3tZ3/Zw30= -github.com/libp2p/go-libp2p-yamux v0.5.0/go.mod h1:AyR8k5EzyM2QN9Bbdg6X1SkVVuqLwTGf0L4DFq9g6po= -github.com/libp2p/go-libp2p-yamux v0.5.1/go.mod h1:dowuvDu8CRWmr0iqySMiSxK+W0iL5cMVO9S94Y6gkv4= -github.com/libp2p/go-libp2p-yamux v0.5.4/go.mod h1:tfrXbyaTqqSU654GTvK3ocnSZL3BuHoeTSqhcel1wsE= -github.com/libp2p/go-maddr-filter v0.0.4/go.mod h1:6eT12kSQMA9x2pvFQa+xesMKUBlj9VImZbj3B9FBH/Q= -github.com/libp2p/go-maddr-filter v0.0.5/go.mod h1:Jk+36PMfIqCJhAnaASRH83bdAvfDRp/w6ENFaC9bG+M= +github.com/libp2p/go-libp2p-testing v0.12.0/go.mod h1:KcGDRXyN7sQCllucn1cOOS+Dmm7ujhfEyXQL5lvkcPg= github.com/libp2p/go-maddr-filter v0.1.0/go.mod h1:VzZhTXkMucEGGEOSKddrwGiOv0tUhgnKqNEmIAz/bPU= -github.com/libp2p/go-mplex v0.0.3/go.mod h1:pK5yMLmOoBR1pNCqDlA2GQrdAVTMkqFalaTWe7l4Yd0= -github.com/libp2p/go-mplex v0.1.0/go.mod h1:SXgmdki2kwCUlCCbfGLEgHjC4pFqhTp0ZoV6aiKgxDU= -github.com/libp2p/go-mplex v0.1.1/go.mod h1:Xgz2RDCi3co0LeZfgjm4OgUF15+sVR8SRcu3SFXI1lk= -github.com/libp2p/go-mplex v0.1.2/go.mod h1:Xgz2RDCi3co0LeZfgjm4OgUF15+sVR8SRcu3SFXI1lk= -github.com/libp2p/go-mplex v0.2.0/go.mod h1:0Oy/A9PQlwBytDRp4wSkFnzHYDKcpLot35JQ6msjvYQ= -github.com/libp2p/go-mplex v0.3.0/go.mod h1:0Oy/A9PQlwBytDRp4wSkFnzHYDKcpLot35JQ6msjvYQ= -github.com/libp2p/go-msgio v0.0.2/go.mod h1:63lBBgOTDKQL6EWazRMCwXsEeEeK9O2Cd+0+6OOuipQ= -github.com/libp2p/go-msgio v0.0.4/go.mod h1:63lBBgOTDKQL6EWazRMCwXsEeEeK9O2Cd+0+6OOuipQ= -github.com/libp2p/go-msgio v0.0.6/go.mod h1:4ecVB6d9f4BDSL5fqvPiC4A3KivjWn+Venn/1ALLMWA= -github.com/libp2p/go-msgio v0.2.0 h1:W6shmB+FeynDrUVl2dgFQvzfBZcXiyqY4VmpQLu9FqU= -github.com/libp2p/go-msgio v0.2.0/go.mod h1:dBVM1gW3Jk9XqHkU4eKdGvVHdLa51hoGfll6jMJMSlY= -github.com/libp2p/go-nat v0.0.3/go.mod h1:88nUEt0k0JD45Bk93NIwDqjlhiOwOoV36GchpcVc1yI= -github.com/libp2p/go-nat v0.0.4/go.mod h1:Nmw50VAvKuk38jUBcmNh6p9lUJLoODbJRvYAa/+KSDo= -github.com/libp2p/go-nat v0.0.5/go.mod h1:B7NxsVNPZmRLvMOwiEO1scOSyjA56zxYAGv1yQgRkEU= -github.com/libp2p/go-nat v0.1.0 h1:MfVsH6DLcpa04Xr+p8hmVRG4juse0s3J8HyNWYHffXg= -github.com/libp2p/go-nat v0.1.0/go.mod h1:X7teVkwRHNInVNWQiO/tAiAVRwSr5zoRz4YSTC3uRBM= -github.com/libp2p/go-netroute v0.1.2/go.mod h1:jZLDV+1PE8y5XxBySEBgbuVAXbhtuHSdmLPL2n9MKbk= -github.com/libp2p/go-netroute v0.1.3/go.mod h1:jZLDV+1PE8y5XxBySEBgbuVAXbhtuHSdmLPL2n9MKbk= -github.com/libp2p/go-netroute v0.1.5/go.mod h1:V1SR3AaECRkEQCoFFzYwVYWvYIEtlxx89+O3qcpCl4A= -github.com/libp2p/go-netroute v0.1.6/go.mod h1:AqhkMh0VuWmfgtxKPp3Oc1LdU5QSWS7wl0QLhSZqXxQ= -github.com/libp2p/go-netroute v0.2.1 h1:V8kVrpD8GK0Riv15/7VN6RbUQ3URNZVosw7H2v9tksU= -github.com/libp2p/go-netroute v0.2.1/go.mod h1:hraioZr0fhBjG0ZRXJJ6Zj2IVEVNx6tDTFQfSmcq7mQ= -github.com/libp2p/go-openssl v0.0.2/go.mod h1:v8Zw2ijCSWBQi8Pq5GAixw6DbFfa9u6VIYDXnvOXkc0= -github.com/libp2p/go-openssl v0.0.3/go.mod h1:unDrJpgy3oFr+rqXsarWifmJuNnJR4chtO1HmaZjggc= -github.com/libp2p/go-openssl v0.0.4/go.mod h1:unDrJpgy3oFr+rqXsarWifmJuNnJR4chtO1HmaZjggc= -github.com/libp2p/go-openssl v0.0.5/go.mod h1:unDrJpgy3oFr+rqXsarWifmJuNnJR4chtO1HmaZjggc= -github.com/libp2p/go-openssl v0.0.7/go.mod h1:unDrJpgy3oFr+rqXsarWifmJuNnJR4chtO1HmaZjggc= -github.com/libp2p/go-openssl v0.1.0 h1:LBkKEcUv6vtZIQLVTegAil8jbNpJErQ9AnT+bWV+Ooo= -github.com/libp2p/go-openssl v0.1.0/go.mod h1:OiOxwPpL3n4xlenjx2h7AwSGaFSC/KZvf6gNdOBQMtc= -github.com/libp2p/go-reuseport v0.0.1/go.mod h1:jn6RmB1ufnQwl0Q1f+YxAj8isJgDCQzaaxIFYDhcYEA= -github.com/libp2p/go-reuseport v0.0.2/go.mod h1:SPD+5RwGC7rcnzngoYC86GjPzjSywuQyMVAheVBD9nQ= -github.com/libp2p/go-reuseport v0.2.0 h1:18PRvIMlpY6ZK85nIAicSBuXXvrYoSw3dsBAR7zc560= -github.com/libp2p/go-reuseport v0.2.0/go.mod h1:bvVho6eLMm6Bz5hmU0LYN3ixd3nPPvtIlaURZZgOY4k= -github.com/libp2p/go-reuseport-transport v0.0.2/go.mod h1:YkbSDrvjUVDL6b8XqriyA20obEtsW9BLkuOUyQAOCbs= -github.com/libp2p/go-reuseport-transport v0.0.3/go.mod h1:Spv+MPft1exxARzP2Sruj2Wb5JSyHNncjf1Oi2dEbzM= -github.com/libp2p/go-reuseport-transport v0.0.4/go.mod h1:trPa7r/7TJK/d+0hdBLOCGvpQQVOU74OXbNCIMkufGw= -github.com/libp2p/go-sockaddr v0.0.2/go.mod h1:syPvOmNs24S3dFVGJA1/mrqdeijPxLV2Le3BRLKd68k= -github.com/libp2p/go-sockaddr v0.1.0/go.mod h1:syPvOmNs24S3dFVGJA1/mrqdeijPxLV2Le3BRLKd68k= -github.com/libp2p/go-sockaddr v0.1.1/go.mod h1:syPvOmNs24S3dFVGJA1/mrqdeijPxLV2Le3BRLKd68k= -github.com/libp2p/go-stream-muxer v0.0.1/go.mod h1:bAo8x7YkSpadMTbtTaxGVHWUQsR/l5MEaHbKaliuT14= -github.com/libp2p/go-stream-muxer-multistream v0.2.0/go.mod h1:j9eyPol/LLRqT+GPLSxvimPhNph4sfYfMoDPd7HkzIc= -github.com/libp2p/go-stream-muxer-multistream v0.3.0/go.mod h1:yDh8abSIzmZtqtOt64gFJUXEryejzNb0lisTt+fAMJA= -github.com/libp2p/go-tcp-transport v0.1.0/go.mod h1:oJ8I5VXryj493DEJ7OsBieu8fcg2nHGctwtInJVpipc= -github.com/libp2p/go-tcp-transport v0.1.1/go.mod h1:3HzGvLbx6etZjnFlERyakbaYPdfjg2pWP97dFZworkY= -github.com/libp2p/go-tcp-transport v0.2.0/go.mod h1:vX2U0CnWimU4h0SGSEsg++AzvBcroCGYw28kh94oLe0= -github.com/libp2p/go-tcp-transport v0.2.1/go.mod h1:zskiJ70MEfWz2MKxvFB/Pv+tPIB1PpPUrHIWQ8aFw7M= -github.com/libp2p/go-tcp-transport v0.2.3/go.mod h1:9dvr03yqrPyYGIEN6Dy5UvdJZjyPFvl1S/igQ5QD1SU= -github.com/libp2p/go-ws-transport v0.1.0/go.mod h1:rjw1MG1LU9YDC6gzmwObkPd/Sqwhw7yT74kj3raBFuo= -github.com/libp2p/go-ws-transport v0.2.0/go.mod h1:9BHJz/4Q5A9ludYWKoGCFC5gUElzlHoKzu0yY9p/klM= -github.com/libp2p/go-ws-transport v0.3.0/go.mod h1:bpgTJmRZAvVHrgHybCVyqoBmyLQ1fiZuEaBYusP5zsk= -github.com/libp2p/go-ws-transport v0.4.0/go.mod h1:EcIEKqf/7GDjth6ksuS/6p7R49V4CBY6/E7R/iyhYUA= -github.com/libp2p/go-yamux v1.2.2/go.mod h1:FGTiPvoV/3DVdgWpX+tM0OW3tsM+W5bSE3gZwqQTcow= -github.com/libp2p/go-yamux v1.2.3/go.mod h1:FGTiPvoV/3DVdgWpX+tM0OW3tsM+W5bSE3gZwqQTcow= -github.com/libp2p/go-yamux v1.3.0/go.mod h1:FGTiPvoV/3DVdgWpX+tM0OW3tsM+W5bSE3gZwqQTcow= -github.com/libp2p/go-yamux v1.3.3/go.mod h1:FGTiPvoV/3DVdgWpX+tM0OW3tsM+W5bSE3gZwqQTcow= -github.com/libp2p/go-yamux v1.3.5/go.mod h1:FGTiPvoV/3DVdgWpX+tM0OW3tsM+W5bSE3gZwqQTcow= -github.com/libp2p/go-yamux v1.3.7/go.mod h1:fr7aVgmdNGJK+N1g+b6DW6VxzbRCjCOejR/hkmpooHE= -github.com/libp2p/go-yamux v1.4.0/go.mod h1:fr7aVgmdNGJK+N1g+b6DW6VxzbRCjCOejR/hkmpooHE= -github.com/libp2p/go-yamux v1.4.1/go.mod h1:fr7aVgmdNGJK+N1g+b6DW6VxzbRCjCOejR/hkmpooHE= -github.com/libp2p/go-yamux/v2 v2.0.0/go.mod h1:NVWira5+sVUIU6tu1JWvaRn1dRnG+cawOJiflsAM+7U= -github.com/libp2p/go-yamux/v2 v2.2.0/go.mod h1:3So6P6TV6r75R9jiBpiIKgU/66lOarCZjqROGxzPpPQ= -github.com/libp2p/go-yamux/v4 v4.0.0 h1:+Y80dV2Yx/kv7Y7JKu0LECyVdMXm1VUoko+VQ9rBfZQ= -github.com/libp2p/go-yamux/v4 v4.0.0/go.mod h1:NWjl8ZTLOGlozrXSOZ/HlfG++39iKNnM5wwmtQP1YB4= +github.com/libp2p/go-msgio v0.3.0 h1:mf3Z8B1xcFN314sWX+2vOTShIE0Mmn2TXn3YCUQGNj0= +github.com/libp2p/go-msgio v0.3.0/go.mod h1:nyRM819GmVaF9LX3l03RMh10QdOroF++NBbxAb0mmDM= +github.com/libp2p/go-nat v0.2.0 h1:Tyz+bUFAYqGyJ/ppPPymMGbIgNRH+WqC5QrT5fKrrGk= +github.com/libp2p/go-nat v0.2.0/go.mod h1:3MJr+GRpRkyT65EpVPBstXLvOlAPzUVlG6Pwg9ohLJk= +github.com/libp2p/go-netroute v0.2.2 h1:Dejd8cQ47Qx2kRABg6lPwknU7+nBnFRpko45/fFPuZ8= +github.com/libp2p/go-netroute v0.2.2/go.mod h1:Rntq6jUAH0l9Gg17w5bFGhcC9a+vk4KNXs6s7IljKYE= +github.com/libp2p/go-reuseport v0.4.0 h1:nR5KU7hD0WxXCJbmw7r2rhRYruNRl2koHw8fQscQm2s= +github.com/libp2p/go-reuseport v0.4.0/go.mod h1:ZtI03j/wO5hZVDFo2jKywN6bYKWLOy8Se6DrI2E1cLU= +github.com/libp2p/go-yamux/v4 v4.0.1 h1:FfDR4S1wj6Bw2Pqbc8Uz7pCxeRBPbwsBbEdfwiCypkQ= +github.com/libp2p/go-yamux/v4 v4.0.1/go.mod h1:NWjl8ZTLOGlozrXSOZ/HlfG++39iKNnM5wwmtQP1YB4= github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM= github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4= -github.com/logrusorgru/aurora v2.0.3+incompatible h1:tOpm7WcpBTn4fjmVfgpQq0EfczGlG91VSDkswnjF5A8= -github.com/logrusorgru/aurora v2.0.3+incompatible/go.mod h1:7rIyQOR62GCctdiQpZ/zOJlFyk6y+94wXzv6RNZgaR4= -github.com/lucas-clemente/quic-go v0.19.3/go.mod h1:ADXpNbTQjq1hIzCpB+y/k5iz4n4z4IwqoLb94Kh5Hu8= -github.com/lucas-clemente/quic-go v0.31.1 h1:O8Od7hfioqq0PMYHDyBkxU2aA7iZ2W9pjbrWuja2YR4= -github.com/lucas-clemente/quic-go v0.31.1/go.mod h1:0wFbizLgYzqHqtlyxyCaJKlE7bYgE6JQ+54TLd/Dq2g= +github.com/logrusorgru/aurora/v4 v4.0.0 h1:sRjfPpun/63iADiSvGGjgA1cAYegEWMPCJdUpJYn9JA= +github.com/logrusorgru/aurora/v4 v4.0.0/go.mod h1:lP0iIa2nrnT/qoFXcOZSrZQpJ1o6n2CUf/hyHi2Q4ZQ= github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 h1:6E+4a0GO5zZEnZ81pIr0yLvtUWk2if982qA3F3QD6H4= github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0/go.mod h1:zJYVVT2jmtg6P3p1VtQj7WsuWi/y4VnjVBn7F8KPB3I= github.com/lunixbochs/vtclean v1.0.0/go.mod h1:pHhQNgMf3btfWnGBVipUOjRYhoOsdGqdm/+2c2E2WMI= github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ= -github.com/m4ksio/wal v1.0.1-0.20221209164835-154a17396e4c h1:OqVcb1Dkheracn4fgCjxlfhuSnM8jmPbrWkJbRIC4fo= -github.com/m4ksio/wal v1.0.1-0.20221209164835-154a17396e4c/go.mod h1:5/Yq7mnb+VdE44ff+FL8LSOPEquOVqm/7Hz40U4VUZo= github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= -github.com/magiconair/properties v1.8.6 h1:5ibWZ6iY0NctNGWo87LalDlEZ6R41TqbbDamhfG/Qzo= -github.com/magiconair/properties v1.8.6/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60= -github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY= +github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0= github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/marten-seemann/qpack v0.2.1/go.mod h1:F7Gl5L1jIgN1D11ucXefiuJS9UMVP2opoCp2jDKb7wc= -github.com/marten-seemann/qpack v0.3.0 h1:UiWstOgT8+znlkDPOg2+3rIuYXJ2CnGDkGUXN6ki6hE= -github.com/marten-seemann/qtls v0.10.0/go.mod h1:UvMd1oaYDACI99/oZUYLzMCkBXQVT0aGm99sJhbT8hs= -github.com/marten-seemann/qtls-go1-15 v0.1.1/go.mod h1:GyFwywLKkRt+6mfU99csTEY1joMZz5vmB1WNZH3P81I= -github.com/marten-seemann/qtls-go1-18 v0.1.3 h1:R4H2Ks8P6pAtUagjFty2p7BVHn3XiwDAl7TTQf5h7TI= -github.com/marten-seemann/qtls-go1-18 v0.1.3/go.mod h1:mJttiymBAByA49mhlNZZGrH5u1uXYZJ+RW28Py7f4m4= -github.com/marten-seemann/qtls-go1-19 v0.1.1 h1:mnbxeq3oEyQxQXwI4ReCgW9DPoPR94sNlqWoDZnjRIE= -github.com/marten-seemann/qtls-go1-19 v0.1.1/go.mod h1:5HTDWtVudo/WFsHKRNuOhWlbdjrfs5JHrYb0wIJqGpI= github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd h1:br0buuQ854V8u83wA0rVZ8ttrq5CpaPZdvrK0LP2lOk= github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd/go.mod h1:QuCEs1Nt24+FYQEqAAncTDPJIuGs+LxK1MCiFL25pMU= -github.com/marten-seemann/webtransport-go v0.4.3 h1:vkt5o/Ci+luknRteWdYGYH1KcB7ziup+J+1PzZJIvmg= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= -github.com/mattn/go-colorable v0.1.0/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= github.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcnceauSikq3lYCQ= -github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4= -github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= -github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= -github.com/mattn/go-ieproxy v0.0.0-20190610004146-91bb50d98149/go.mod h1:31jz6HNzdxOmlERGGEc4v/dMssOfmp2p5bT/okiKFFc= -github.com/mattn/go-ieproxy v0.0.0-20190702010315-6dee0af9227d/go.mod h1:31jz6HNzdxOmlERGGEc4v/dMssOfmp2p5bT/okiKFFc= +github.com/mattn/go-colorable v0.1.14 h1:9A9LHSqF/7dyVVX6g0U9cwm9pG3kP9gSzcuIPHPsaIE= +github.com/mattn/go-colorable v0.1.14/go.mod h1:6LmQG8QLFO4G5z1gPvYEzlUgJ2wF+stgPZH1UqBm1s8= github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= -github.com/mattn/go-isatty v0.0.5-0.20180830101745-3fb116b82035/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= -github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= github.com/mattn/go-isatty v0.0.9/go.mod h1:YNRxwqDuOph6SZLI9vUUz6OYw3QyUt7WiY2yME+cCiQ= github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= -github.com/mattn/go-isatty v0.0.13/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= -github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= -github.com/mattn/go-isatty v0.0.17 h1:BTarxUcIeDqL27Mc+vyvdWYSL28zpIhv3RoTdsLMPng= -github.com/mattn/go-isatty v0.0.17/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= -github.com/mattn/go-pointer v0.0.1 h1:n+XhsuGeVO6MEAp7xyEukFINEa+Quek5psIR/ylA6o0= -github.com/mattn/go-pointer v0.0.1/go.mod h1:2zXcozF6qYGgmsG+SeTZz3oAbFLdD3OWqnUbNvJZAlc= +github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= +github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= -github.com/mattn/go-runewidth v0.0.3/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= -github.com/mattn/go-runewidth v0.0.4/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= -github.com/mattn/go-runewidth v0.0.13 h1:lTGmDsbAYt5DmK6OnoV7EuIF1wEIFAcxld6ypU4OSgU= -github.com/mattn/go-runewidth v0.0.13/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= +github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= +github.com/mattn/go-runewidth v0.0.16 h1:E5ScNMtiwvlvB5paMFdw9p4kSQzbXFikJ5SQO6TULQc= +github.com/mattn/go-runewidth v0.0.16/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= -github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo= -github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= -github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b/go.mod h1:01TrycV0kFyexm33Z7vhZRXopbI8J3TDReVlkTgMUxE= github.com/microcosm-cc/bluemonday v1.0.1/go.mod h1:hsXNsILzKxV+sX77C5b8FSuKF00vh2OMYv+xgHpAMF4= github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= -github.com/miekg/dns v1.1.12/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= -github.com/miekg/dns v1.1.28/go.mod h1:KNUDUusw/aVsxyTYZM1oqvCicbwhgbNgztCETuNZ7xM= -github.com/miekg/dns v1.1.31/go.mod h1:KNUDUusw/aVsxyTYZM1oqvCicbwhgbNgztCETuNZ7xM= -github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJysuI= -github.com/miekg/dns v1.1.50 h1:DQUfb9uc6smULcREF09Uc+/Gd46YWqJd5DbpPE9xkcA= -github.com/miekg/dns v1.1.50/go.mod h1:e3IlAVfNqAllflbibAZEWOXOQ+Ynzk/dDozDxY7XnME= +github.com/miekg/dns v1.1.62 h1:cN8OuEF1/x5Rq6Np+h1epln8OiyPWV+lROx9LxcGgIQ= +github.com/miekg/dns v1.1.62/go.mod h1:mvDlcItzm+br7MToIKqkglaGhlFMHJ9DTNNWONWXbNQ= github.com/mikioh/tcp v0.0.0-20190314235350-803a9b46060c h1:bzE/A84HN25pxAuk9Eej1Kz9OUelF97nAc82bDquQI8= github.com/mikioh/tcp v0.0.0-20190314235350-803a9b46060c/go.mod h1:0SQS9kMwD2VsyFEB++InYyBJroV/FRmBgcydeSUcJms= github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b h1:z78hV3sbSMAUoyUMM0I83AUIT6Hu17AWfgjzIbtrYFc= @@ -1052,13 +801,9 @@ github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b/go.mod h1:lxPUiZwKo github.com/mikioh/tcpopt v0.0.0-20190314235656-172688c1accc h1:PTfri+PuQmWDqERdnNMiD9ZejrlswWrCpBEZgWOiTrc= github.com/mikioh/tcpopt v0.0.0-20190314235656-172688c1accc/go.mod h1:cGKTAVKx4SxOuR/czcZ/E2RSJ3sfHs8FpHhQ5CWMf9s= github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1/go.mod h1:pD8RvIylQ358TN4wwqatJ8rNavkEINozVn9DtGI3dfQ= -github.com/minio/sha256-simd v0.0.0-20190131020904-2d45a736cd16/go.mod h1:2FMWW+8GMoPweT6+pI63m9YE3Lmw4J71hV56Chs1E/U= -github.com/minio/sha256-simd v0.0.0-20190328051042-05b4dd3047e5/go.mod h1:2FMWW+8GMoPweT6+pI63m9YE3Lmw4J71hV56Chs1E/U= -github.com/minio/sha256-simd v0.1.0/go.mod h1:2FMWW+8GMoPweT6+pI63m9YE3Lmw4J71hV56Chs1E/U= github.com/minio/sha256-simd v0.1.1-0.20190913151208-6de447530771/go.mod h1:B5e1o+1/KgNmWrSQK08Y6Z1Vb5pwIktudl0J58iy0KM= -github.com/minio/sha256-simd v0.1.1/go.mod h1:B5e1o+1/KgNmWrSQK08Y6Z1Vb5pwIktudl0J58iy0KM= -github.com/minio/sha256-simd v1.0.0 h1:v1ta+49hkWZyvaKwrQB8elexRqm6Y0aMLjCNsrYxo6g= -github.com/minio/sha256-simd v1.0.0/go.mod h1:OuYzVNI5vcoYIAmbIvHPl3N3jUzVedXbKy5RFepssQM= +github.com/minio/sha256-simd v1.0.1 h1:6kaan5IFmwTNynnKKpDHe6FWHohJOHhCPchzK49dzMM= +github.com/minio/sha256-simd v1.0.1/go.mod h1:Pz6AKMiUdngCLpeTL/RJY1M9rUuPMYujV5xJjtbRSN8= github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db h1:62I3jR2EmQ4l5rM/4FEfDWcRD+abF5XlKShorW5LRoQ= github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db/go.mod h1:l0dey0ia/Uv7NcFFVbCLtqEBQbrT4OCwCSKTEv6enCw= @@ -1077,85 +822,49 @@ github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJ github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= +github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/mr-tron/base58 v1.1.0/go.mod h1:xcD2VGqlgYjBdcBLw+TuYLr8afG+Hj8g2eTVqeSzSU8= -github.com/mr-tron/base58 v1.1.1/go.mod h1:xcD2VGqlgYjBdcBLw+TuYLr8afG+Hj8g2eTVqeSzSU8= github.com/mr-tron/base58 v1.1.2/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc= github.com/mr-tron/base58 v1.1.3/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc= github.com/mr-tron/base58 v1.2.0 h1:T/HDJBh4ZCPbU39/+c3rRvE0uKBQlU27+QI8LJ4t64o= github.com/mr-tron/base58 v1.2.0/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc= github.com/multiformats/go-base32 v0.0.3/go.mod h1:pLiuGC8y0QR3Ue4Zug5UzK9LjgbkL8NSQj0zQ5Nz/AA= -github.com/multiformats/go-base32 v0.0.4/go.mod h1:jNLFzjPZtp3aIARHbJRZIaPuspdH0J6q39uUM5pnABM= github.com/multiformats/go-base32 v0.1.0 h1:pVx9xoSPqEIQG8o+UbAe7DNi51oej1NtK+aGkbLYxPE= github.com/multiformats/go-base32 v0.1.0/go.mod h1:Kj3tFY6zNr+ABYMqeUNeGvkIC/UYgtWibDcT0rExnbI= github.com/multiformats/go-base36 v0.1.0/go.mod h1:kFGE83c6s80PklsHO9sRn2NCoffoRdUUOENyW/Vv6sM= github.com/multiformats/go-base36 v0.2.0 h1:lFsAbNOGeKtuKozrtBsAkSVhv1p9D0/qedU9rQyccr0= github.com/multiformats/go-base36 v0.2.0/go.mod h1:qvnKE++v+2MWCfePClUEjE78Z7P2a1UV0xHgWc0hkp4= -github.com/multiformats/go-multiaddr v0.0.1/go.mod h1:xKVEak1K9cS1VdmPZW3LSIb6lgmoS58qz/pzqmAxV44= -github.com/multiformats/go-multiaddr v0.0.2/go.mod h1:xKVEak1K9cS1VdmPZW3LSIb6lgmoS58qz/pzqmAxV44= -github.com/multiformats/go-multiaddr v0.0.4/go.mod h1:xKVEak1K9cS1VdmPZW3LSIb6lgmoS58qz/pzqmAxV44= -github.com/multiformats/go-multiaddr v0.1.0/go.mod h1:xKVEak1K9cS1VdmPZW3LSIb6lgmoS58qz/pzqmAxV44= github.com/multiformats/go-multiaddr v0.1.1/go.mod h1:aMKBKNEYmzmDmxfX88/vz+J5IU55txyt0p4aiWVohjo= -github.com/multiformats/go-multiaddr v0.2.0/go.mod h1:0nO36NvPpyV4QzvTLi/lafl2y95ncPj0vFwVF6k6wJ4= -github.com/multiformats/go-multiaddr v0.2.1/go.mod h1:s/Apk6IyxfvMjDafnhJgJ3/46z7tZ04iMk5wP4QMGGE= github.com/multiformats/go-multiaddr v0.2.2/go.mod h1:NtfXiOtHvghW9KojvtySjH5y0u0xW5UouOmQQrn6a3Y= -github.com/multiformats/go-multiaddr v0.3.0/go.mod h1:dF9kph9wfJ+3VLAaeBqo9Of8x4fJxp6ggJGteB8HQTI= -github.com/multiformats/go-multiaddr v0.3.1/go.mod h1:uPbspcUPd5AfaP6ql3ujFY+QWzmBD8uLLL4bXW0XfGc= github.com/multiformats/go-multiaddr v0.3.3/go.mod h1:lCKNGP1EQ1eZ35Za2wlqnabm9xQkib3fyB+nZXHLag0= -github.com/multiformats/go-multiaddr v0.8.0 h1:aqjksEcqK+iD/Foe1RRFsGZh8+XFiGo7FgUCZlpv3LU= -github.com/multiformats/go-multiaddr v0.8.0/go.mod h1:Fs50eBDWvZu+l3/9S6xAE7ZYj6yhxlvaVZjakWN7xRs= -github.com/multiformats/go-multiaddr-dns v0.0.1/go.mod h1:9kWcqw/Pj6FwxAwW38n/9403szc57zJPs45fmnznu3Q= -github.com/multiformats/go-multiaddr-dns v0.0.2/go.mod h1:9kWcqw/Pj6FwxAwW38n/9403szc57zJPs45fmnznu3Q= -github.com/multiformats/go-multiaddr-dns v0.2.0/go.mod h1:TJ5pr5bBO7Y1B18djPuRsVkduhQH2YqYSbxWJzYGdK0= -github.com/multiformats/go-multiaddr-dns v0.3.1 h1:QgQgR+LQVt3NPTjbrLLpsaT2ufAA2y0Mkk+QRVJbW3A= -github.com/multiformats/go-multiaddr-dns v0.3.1/go.mod h1:G/245BRQ6FJGmryJCrOuTdB37AMA5AMOVuO6NY3JwTk= -github.com/multiformats/go-multiaddr-fmt v0.0.1/go.mod h1:aBYjqL4T/7j4Qx+R73XSv/8JsgnRFlf0w2KGLCmXl3Q= +github.com/multiformats/go-multiaddr v0.14.0 h1:bfrHrJhrRuh/NXH5mCnemjpbGjzRw/b+tJFOD41g2tU= +github.com/multiformats/go-multiaddr v0.14.0/go.mod h1:6EkVAxtznq2yC3QT5CM1UTAwG0GTP3EWAIcjHuzQ+r4= +github.com/multiformats/go-multiaddr-dns v0.4.1 h1:whi/uCLbDS3mSEUMb1MsoT4uzUeZB0N32yzufqS0i5M= +github.com/multiformats/go-multiaddr-dns v0.4.1/go.mod h1:7hfthtB4E4pQwirrz+J0CcDUfbWzTqEzVyYKKIKpgkc= github.com/multiformats/go-multiaddr-fmt v0.1.0 h1:WLEFClPycPkp4fnIzoFoV9FVd49/eQsuaL3/CWe167E= github.com/multiformats/go-multiaddr-fmt v0.1.0/go.mod h1:hGtDIW4PU4BqJ50gW2quDuPVjyWNZxToGUh/HwTZYJo= -github.com/multiformats/go-multiaddr-net v0.0.1/go.mod h1:nw6HSxNmCIQH27XPGBuX+d1tnvM7ihcFwHMSstNAVUU= -github.com/multiformats/go-multiaddr-net v0.1.0/go.mod h1:5JNbcfBOP4dnhoZOv10JJVkJO0pCCEf8mTnipAo2UZQ= -github.com/multiformats/go-multiaddr-net v0.1.1/go.mod h1:5JNbcfBOP4dnhoZOv10JJVkJO0pCCEf8mTnipAo2UZQ= -github.com/multiformats/go-multiaddr-net v0.1.2/go.mod h1:QsWt3XK/3hwvNxZJp92iMQKME1qHfpYmyIjFVsSOY6Y= -github.com/multiformats/go-multiaddr-net v0.1.3/go.mod h1:ilNnaM9HbmVFqsb/qcNysjCu4PVONlrBZpHIrw/qQuA= -github.com/multiformats/go-multiaddr-net v0.1.4/go.mod h1:ilNnaM9HbmVFqsb/qcNysjCu4PVONlrBZpHIrw/qQuA= -github.com/multiformats/go-multiaddr-net v0.1.5/go.mod h1:ilNnaM9HbmVFqsb/qcNysjCu4PVONlrBZpHIrw/qQuA= -github.com/multiformats/go-multiaddr-net v0.2.0/go.mod h1:gGdH3UXny6U3cKKYCvpXI5rnK7YaOIEOPVDI9tsJbEA= -github.com/multiformats/go-multibase v0.0.1/go.mod h1:bja2MqRZ3ggyXtZSEDKpl0uO/gviWFaSteVbWT51qgs= github.com/multiformats/go-multibase v0.0.3/go.mod h1:5+1R4eQrT3PkYZ24C3W2Ue2tPwIdYQD509ZjSb5y9Oc= -github.com/multiformats/go-multibase v0.1.1 h1:3ASCDsuLX8+j4kx58qnJ4YFq/JWTJpCyDW27ztsVTOI= -github.com/multiformats/go-multibase v0.1.1/go.mod h1:ZEjHE+IsUrgp5mhlEAYjMtZwK1k4haNkcaPg9aoe1a8= -github.com/multiformats/go-multicodec v0.3.0/go.mod h1:qGGaQmioCDh+TeFOnxrbU0DaIPw8yFgAZgFG0V7p1qQ= -github.com/multiformats/go-multicodec v0.7.0 h1:rTUjGOwjlhGHbEMbPoSUJowG1spZTVsITRANCjKTUAQ= -github.com/multiformats/go-multicodec v0.7.0/go.mod h1:GUC8upxSBE4oG+q3kWZRw/+6yC1BqO550bjhWsJbZlw= -github.com/multiformats/go-multihash v0.0.1/go.mod h1:w/5tugSrLEbWqlcgJabL3oHFKTwfvkofsjW2Qa1ct4U= -github.com/multiformats/go-multihash v0.0.5/go.mod h1:lt/HCbqlQwlPBz7lv0sQCdtfcMtlJvakRUn/0Ual8po= +github.com/multiformats/go-multibase v0.2.0 h1:isdYCVLvksgWlMW9OZRYJEa9pZETFivncJHmHnnd87g= +github.com/multiformats/go-multibase v0.2.0/go.mod h1:bFBZX4lKCA/2lyOFSAoKH5SS6oPyjtnzK/XTFDPkNuk= +github.com/multiformats/go-multicodec v0.9.0 h1:pb/dlPnzee/Sxv/j4PmkDRxCOi3hXTz3IbPKOXWJkmg= +github.com/multiformats/go-multicodec v0.9.0/go.mod h1:L3QTQvMIaVBkXOXXtVmYE+LI16i14xuaojr/H7Ai54k= github.com/multiformats/go-multihash v0.0.8/go.mod h1:YSLudS+Pi8NHE7o6tb3D8vrpKa63epEDmG8nTduyAew= -github.com/multiformats/go-multihash v0.0.10/go.mod h1:YSLudS+Pi8NHE7o6tb3D8vrpKa63epEDmG8nTduyAew= github.com/multiformats/go-multihash v0.0.13/go.mod h1:VdAWLKTwram9oKAatUcLxBNUjdtcVwxObEQBtRfuyjc= github.com/multiformats/go-multihash v0.0.14/go.mod h1:VdAWLKTwram9oKAatUcLxBNUjdtcVwxObEQBtRfuyjc= -github.com/multiformats/go-multihash v0.0.15/go.mod h1:D6aZrWNLFTV/ynMpKsNtB40mJzmCl4jb1alC0OvHiHg= -github.com/multiformats/go-multihash v0.0.16/go.mod h1:zhfEIgVnB/rPMfxgFw15ZmGoNaKyNUIE4IWHG/kC+Ag= -github.com/multiformats/go-multihash v0.1.0/go.mod h1:RJlXsxt6vHGaia+S8We0ErjhojtKzPP2AH4+kYM7k84= -github.com/multiformats/go-multihash v0.2.1 h1:aem8ZT0VA2nCHHk7bPJ1BjUbHNciqZC/d16Vve9l108= -github.com/multiformats/go-multihash v0.2.1/go.mod h1:WxoMcYG85AZVQUyRyo9s4wULvW5qrI9vb2Lt6evduFc= -github.com/multiformats/go-multistream v0.1.0/go.mod h1:fJTiDfXJVmItycydCnNx4+wSzZ5NwG2FEVAI30fiovg= -github.com/multiformats/go-multistream v0.1.1/go.mod h1:KmHZ40hzVxiaiwlj3MEbYgK9JFk2/9UktWZAF54Du38= -github.com/multiformats/go-multistream v0.2.0/go.mod h1:5GZPQZbkWOLOn3J2y4Y99vVW7vOfsAflxARk3x14o6k= -github.com/multiformats/go-multistream v0.2.1/go.mod h1:5GZPQZbkWOLOn3J2y4Y99vVW7vOfsAflxARk3x14o6k= -github.com/multiformats/go-multistream v0.2.2/go.mod h1:UIcnm7Zuo8HKG+HkWgfQsGL+/MIEhyTqbODbIUwSXKs= -github.com/multiformats/go-multistream v0.3.3 h1:d5PZpjwRgVlbwfdTDjife7XszfZd8KYWfROYFlGcR8o= -github.com/multiformats/go-multistream v0.3.3/go.mod h1:ODRoqamLUsETKS9BNcII4gcRsJBU5VAwRIv7O39cEXg= -github.com/multiformats/go-varint v0.0.1/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE= -github.com/multiformats/go-varint v0.0.2/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE= +github.com/multiformats/go-multihash v0.2.3 h1:7Lyc8XfX/IY2jWb/gI7JP+o7JEq9hOa7BFvVU9RSh+U= +github.com/multiformats/go-multihash v0.2.3/go.mod h1:dXgKXCXjBzdscBLk9JkjINiEsCKRVch90MdaGiKsvSM= +github.com/multiformats/go-multistream v0.6.0 h1:ZaHKbsL404720283o4c/IHQXiS6gb8qAN5EIJ4PN5EA= +github.com/multiformats/go-multistream v0.6.0/go.mod h1:MOyoG5otO24cHIg8kf9QW2/NozURlkP/rvi2FQJyCPg= github.com/multiformats/go-varint v0.0.5/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE= github.com/multiformats/go-varint v0.0.6/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE= github.com/multiformats/go-varint v0.0.7 h1:sWSGR+f/eu5ABZA2ZpYKBILXTTs9JWpdEM/nEGOHFS8= github.com/multiformats/go-varint v0.0.7/go.mod h1:r8PUYw/fD/SjBCiKOoDlGF6QawOELpZAu9eioSos/OU= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f h1:KUppIJq7/+SVif2QVs3tOP0zanoHgBEVAwHxUSIzRqU= github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mwitkow/grpc-proxy v0.0.0-20181017164139-0f1106ef9c76/go.mod h1:x5OoJHDHqxHS801UIuhqGl6QdSAEJvtausosHSdazIo= -github.com/naoina/go-stringutil v0.1.0/go.mod h1:XJ2SJL9jCtBh+P9q5btrd/Ylo8XwT/h1USek5+NqSA0= -github.com/naoina/toml v0.1.2-0.20170918210437-9fafd6967416/go.mod h1:NBIhNtsFMo3G2szEBne+bO4gS192HuIYRqfvOWb4i1E= github.com/nats-io/jwt v0.3.0/go.mod h1:fRYCDE99xlTsqUzISS1Bi75UBJ6ljOJQOAAu5VglpSg= github.com/nats-io/jwt v0.3.2/go.mod h1:/euKqTS1ZD+zzjYrY7pseZrTtWQSjujC7xjPc8wL6eU= github.com/nats-io/nats-server/v2 v2.1.2/go.mod h1:Afk+wRZqkMQs/p45uXdrVLuab3gwv3Z8C4HTBu8GD/k= @@ -1166,50 +875,69 @@ github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OS github.com/neelance/astrewrite v0.0.0-20160511093645-99348263ae86/go.mod h1:kHJEU3ofeGjhHklVoIGuVj85JJwZ6kWPaJwCIxgnFmo= github.com/neelance/sourcemap v0.0.0-20151028013722-8c68805598ab/go.mod h1:Qr6/a/Q4r9LP1IltGz7tA7iOK1WonHEYhu1HRBA7ZiM= github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= +github.com/nxadm/tail v1.4.11 h1:8feyoE3OzPrcshW5/MJ4sGESc5cqmGkGCWlco4l0bqY= +github.com/nxadm/tail v1.4.11/go.mod h1:OTaG3NK980DZzxbRq6lEuzgU+mug70nY11sMd4JXXHc= github.com/oklog/oklog v0.3.2/go.mod h1:FCV+B7mhrz4o+ueLpx+KqkyXRGMWOYEvfiXtdGtbWGs= github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= -github.com/olekukonko/tablewriter v0.0.1/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= -github.com/olekukonko/tablewriter v0.0.2-0.20190409134802-7e037d187b0c/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= -github.com/onflow/atree v0.5.0 h1:y3lh8hY2fUo8KVE2ALVcz0EiNTq0tXJ6YTXKYVDA+3E= -github.com/onflow/atree v0.5.0/go.mod h1:gBHU0M05qCbv9NN0kijLWMgC47gHVNBIp4KmsVFi0tc= -github.com/onflow/cadence v0.38.1 h1:8YpnE1ixAGB8hF3t+slkHGhjfIBJ95dqUS+sEHrM2kY= -github.com/onflow/cadence v0.38.1/go.mod h1:SpfjNhPsJxGIHbOthE9JD/e8JFaFY73joYLPsov+PY4= -github.com/onflow/flow-core-contracts/lib/go/contracts v1.2.3 h1:wV+gcgOY0oJK4HLZQYQoK+mm09rW1XSxf83yqJwj0n4= -github.com/onflow/flow-core-contracts/lib/go/contracts v1.2.3/go.mod h1:Osvy81E/+tscQM+d3kRFjktcIcZj2bmQ9ESqRQWDEx8= -github.com/onflow/flow-core-contracts/lib/go/templates v1.2.3 h1:X25A1dNajNUtE+KoV76wQ6BR6qI7G65vuuRXxDDqX7E= -github.com/onflow/flow-core-contracts/lib/go/templates v1.2.3/go.mod h1:dqAUVWwg+NlOhsuBHex7bEWmsUjsiExzhe/+t4xNH6A= -github.com/onflow/flow-ft/lib/go/contracts v0.7.0 h1:XEKE6qJUw3luhsYmIOteXP53gtxNxrwTohgxJXCYqBE= -github.com/onflow/flow-ft/lib/go/contracts v0.7.0/go.mod h1:kTMFIySzEJJeupk+7EmXs0EJ6CBWY/MV9fv9iYQk+RU= -github.com/onflow/flow-go-sdk v0.40.0 h1:s8uwoyTquN8tjdXpqGmNkXTjf79yUII8JExc5QEl4Xw= -github.com/onflow/flow-go-sdk v0.40.0/go.mod h1:34dxXk9Hp/bQw6Zy6+H44Xo0kQU+aJyQoqdDxq00rJM= -github.com/onflow/flow-go/crypto v0.24.7 h1:RCLuB83At4z5wkAyUCF7MYEnPoIIOHghJaODuJyEoW0= -github.com/onflow/flow-go/crypto v0.24.7/go.mod h1:fqCzkIBBMRRkciVrvW21rECKq1oD7Q6u+bCI78lfNX0= -github.com/onflow/flow/protobuf/go/flow v0.3.2-0.20230428213521-89bcc9e8517e h1:QYEd3KWTt309YGBch4IGK6vJ6b7cOGx2NStEnd5NeHM= -github.com/onflow/flow/protobuf/go/flow v0.3.2-0.20230428213521-89bcc9e8517e/go.mod h1:NA2pX2nw8zuaxfKphhKsk00kWLwfd+tv8mS23YXO4Sk= -github.com/onflow/go-bitswap v0.0.0-20221017184039-808c5791a8a8 h1:XcSR/n2aSVO7lOEsKScYALcpHlfowLwicZ9yVbL6bnA= -github.com/onflow/go-bitswap v0.0.0-20221017184039-808c5791a8a8/go.mod h1:73C8FlT4L/Qe4Cf5iXUNL8b2pvu4zs5dJMMJ5V2TjUI= -github.com/onflow/sdks v0.5.0 h1:2HCRibwqDaQ1c9oUApnkZtEAhWiNY2GTpRD5+ftdkN8= -github.com/onflow/sdks v0.5.0/go.mod h1:F0dj0EyHC55kknLkeD10js4mo14yTdMotnWMslPirrU= +github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N7AbDhec= +github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY= +github.com/onflow/atree v0.10.1 h1:8sixWP3l3LitcyuKkVepbIsLbfr7JN3cCB/iA1j2JD8= +github.com/onflow/atree v0.10.1/go.mod h1:+BuiL0XuIigHJqwkdIuDNzxXvyDx1jYUog/w+iZhcE8= +github.com/onflow/boxo v0.0.0-20240201202436-f2477b92f483 h1:LpiQhTAfM9CAmNVEs0n//cBBgCg+vJSiIxTHYUklZ84= +github.com/onflow/boxo v0.0.0-20240201202436-f2477b92f483/go.mod h1:pIZgTWdm3k3pLF9Uq6MB8JEcW07UDwNJjlXW1HELW80= +github.com/onflow/cadence v1.7.1 h1:VpPiC13e4qrdpyqaagd+dpHtQwfJ/djr97FgT3SovWA= +github.com/onflow/cadence v1.7.1/go.mod h1:1lKdLNVHIoO0jEjkRPMtOmBWYCG1An9TXSoiCuGIIpo= +github.com/onflow/crypto v0.25.3 h1:XQ3HtLsw8h1+pBN+NQ1JYM9mS2mVXTyg55OldaAIF7U= +github.com/onflow/crypto v0.25.3/go.mod h1:+1igaXiK6Tjm9wQOBD1EGwW7bYWMUGKtwKJ/2QL/OWs= +github.com/onflow/fixed-point v0.1.1 h1:j0jYZVO8VGyk1476alGudEg7XqCkeTVxb5ElRJRKS90= +github.com/onflow/fixed-point v0.1.1/go.mod h1:gJdoHqKtToKdOZbvryJvDZfcpzC7d2fyWuo3ZmLtcGY= +github.com/onflow/flow-core-contracts/lib/go/contracts v1.9.0 h1:m6lHp0xDdmVWbpbTpFlq6XxVrB+2J8qwnzMV30zdZeM= +github.com/onflow/flow-core-contracts/lib/go/contracts v1.9.0/go.mod h1:jBDqVep0ICzhXky56YlyO4aiV2Jl/5r7wnqUPpvi7zE= +github.com/onflow/flow-core-contracts/lib/go/templates v1.9.0 h1:8jn4Lxp/dpyWdgJ+5XEDUkYOf2aveObZtHtkdnYIEco= +github.com/onflow/flow-core-contracts/lib/go/templates v1.9.0/go.mod h1:twSVyUt3rNrgzAmxtBX+1Gw64QlPemy17cyvnXYy1Ug= +github.com/onflow/flow-evm-bridge v0.1.0 h1:7X2osvo4NnQgHj8aERUmbYtv9FateX8liotoLnPL9nM= +github.com/onflow/flow-evm-bridge v0.1.0/go.mod h1:5UYwsnu6WcBNrwitGFxphCl5yq7fbWYGYuiCSTVF6pk= +github.com/onflow/flow-ft/lib/go/contracts v1.0.1 h1:Ts5ob+CoCY2EjEd0W6vdLJ7hLL3SsEftzXG2JlmSe24= +github.com/onflow/flow-ft/lib/go/contracts v1.0.1/go.mod h1:PwsL8fC81cjnUnTfmyL/HOIyHnyaw/JA474Wfj2tl6A= +github.com/onflow/flow-ft/lib/go/templates v1.0.1 h1:FDYKAiGowABtoMNusLuRCILIZDtVqJ/5tYI4VkF5zfM= +github.com/onflow/flow-ft/lib/go/templates v1.0.1/go.mod h1:uQ8XFqmMK2jxyBSVrmyuwdWjTEb+6zGjRYotfDJ5pAE= +github.com/onflow/flow-go-sdk v1.8.4 h1:WHtVjryOU6ZJx0jUSjBPOrWoGqGDr+eEejyIkfbiBCE= +github.com/onflow/flow-go-sdk v1.8.4/go.mod h1:Jli9sI78LAnoC3OVGeAs0ngOezoLTfE/GrKOAB9TbTw= +github.com/onflow/flow-nft/lib/go/contracts v1.3.0 h1:DmNop+O0EMyicZvhgdWboFG57xz5t9Qp81FKlfKyqJc= +github.com/onflow/flow-nft/lib/go/contracts v1.3.0/go.mod h1:eZ9VMMNfCq0ho6kV25xJn1kXeCfxnkhj3MwF3ed08gY= +github.com/onflow/flow-nft/lib/go/templates v1.3.0 h1:uGIBy4GEY6Z9hKP7sm5nA5kwvbvLWW4nWx5NN9Wg0II= +github.com/onflow/flow-nft/lib/go/templates v1.3.0/go.mod h1:gVbb5fElaOwKhV5UEUjM+JQTjlsguHg2jwRupfM/nng= +github.com/onflow/flow/protobuf/go/flow v0.4.16 h1:UADQeq/mpuqFk+EkwqDNoF70743raWQKmB/Dm/eKt2Q= +github.com/onflow/flow/protobuf/go/flow v0.4.16/go.mod h1:NA2pX2nw8zuaxfKphhKsk00kWLwfd+tv8mS23YXO4Sk= +github.com/onflow/go-ds-pebble v0.0.0-20251003225212-131edca3a897 h1:ZtFYJ3OSR00aiKMMxgm3fRYWqYzjvDXeoBGQm6yC8DE= +github.com/onflow/go-ds-pebble v0.0.0-20251003225212-131edca3a897/go.mod h1:aiCRVcj3K60sxc6k5C+HO9C6rouqiSkjR/WKnbTcMfQ= +github.com/onflow/go-ethereum v1.16.2 h1:yhC3DA5PTNmUmu7ziq8GmWyQ23KNjle4jCabxpKYyNk= +github.com/onflow/go-ethereum v1.16.2/go.mod h1:1vsrG/9APHPqt+mVFni60hIXkqkVdU9WQayNjYi/Ah4= +github.com/onflow/nft-storefront/lib/go/contracts v1.0.0 h1:sxyWLqGm/p4EKT6DUlQESDG1ZNMN9GjPCm1gTq7NGfc= +github.com/onflow/nft-storefront/lib/go/contracts v1.0.0/go.mod h1:kMeq9zUwCrgrSojEbTUTTJpZ4WwacVm2pA7LVFr+glk= +github.com/onflow/sdks v0.6.0-preview.1 h1:mb/cUezuqWEP1gFZNAgUI4boBltudv4nlfxke1KBp9k= +github.com/onflow/sdks v0.6.0-preview.1/go.mod h1:F0dj0EyHC55kknLkeD10js4mo14yTdMotnWMslPirrU= +github.com/onflow/wal v1.0.2 h1:5bgsJVf2O3cfMNK12fiiTyYZ8cOrUiELt3heBJfHOhc= +github.com/onflow/wal v1.0.2/go.mod h1:iMC8gkLqu4nkbkAla5HkSBb+FGyQOZiWz3DYm2wSXCk= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.12.0/go.mod h1:oUhWkIvk5aDxtKvDDuw8gItl8pKl42LzjC9KZE0HfGg= github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= -github.com/onsi/ginkgo/v2 v2.6.1 h1:1xQPCjcqYw/J5LchOcp4/2q/jzJFjiAOc25chhnDw+Q= -github.com/onsi/ginkgo/v2 v2.6.1/go.mod h1:yjiuMwPokqY1XauOgju45q3sJt6VzQ/Fict1LFVcsAo= -github.com/onsi/gomega v1.4.1/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= +github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= +github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= +github.com/onsi/ginkgo/v2 v2.22.0 h1:Yed107/8DjTr0lKCNt7Dn8yQ6ybuDRQoMGrNFKzMfHg= +github.com/onsi/ginkgo/v2 v2.22.0/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo= github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= -github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= -github.com/onsi/gomega v1.9.0/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoTdcA= github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= -github.com/onsi/gomega v1.24.1 h1:KORJXNNTzJXzu4ScJWssJfJMnJ+2QJqhoQSRwNlze9E= +github.com/onsi/gomega v1.34.2 h1:pNCwDkzrsv7MS9kpaQvVb1aVLahQXyJ/Tv5oAZMI3i8= +github.com/onsi/gomega v1.34.2/go.mod h1:v1xfxRgk0KIsG+QOdm7p8UosrOzPYRo60fd3B/1Dukc= github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk= -github.com/opencontainers/runtime-spec v1.0.2 h1:UfAcuLBJB9Coz72x1hgl8O5RVzTdNiaglX6v2DM6FI0= github.com/opencontainers/runtime-spec v1.0.2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/runtime-spec v1.2.0 h1:z97+pHb3uELt/yiAWD691HNHQIF07bE7dzrbT927iTk= +github.com/opencontainers/runtime-spec v1.2.0/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492/go.mod h1:Ngi6UdF0k5OKD5t5wlmGhe/EDKPoUM3BXZSSfIuJbis= github.com/opentracing/basictracer-go v1.0.0/go.mod h1:QfBfYuafItcjQuMwinw9GhYKwFXS9KnPs5lxoYwgW74= github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= @@ -1225,29 +953,75 @@ github.com/pact-foundation/pact-go v1.0.4/go.mod h1:uExwJY4kCzNPcHRj+hCR/HBbOOIw github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 h1:onHthvaw9LFnH4t2DcNVpwGmV9E1BkGknEliJkfwQj0= github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58/go.mod h1:DXv8WO4yhMYhSNPKjeNKa5WY9YCIEBRbNzFFPJbWO6Y= -github.com/pborman/uuid v0.0.0-20170112150404-1b00554d8222/go.mod h1:VyrYX9gd7irzKovcSS6BIIEwPRkP2Wm2m9ufcdFSJ34= github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= -github.com/pelletier/go-toml v1.9.5 h1:4yBQzkHv+7BHq2PQUZF3Mx0IYxG7LsP222s7Agd3ve8= -github.com/pelletier/go-toml v1.9.5/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= -github.com/pelletier/go-toml/v2 v2.0.2 h1:+jQXlF3scKIcSEKkdHzXhCTDLPFi5r1wnK6yPS+49Gw= -github.com/pelletier/go-toml/v2 v2.0.2/go.mod h1:MovirKjgVRESsAvNZlAjtFwV867yGuwRkXbG66OzopI= +github.com/pelletier/go-toml/v2 v2.2.1 h1:9TA9+T8+8CUCO2+WYnDLCgrYi9+omqKXyjDtosvtEhg= +github.com/pelletier/go-toml/v2 v2.2.1/go.mod h1:1t835xjRzz80PqgE6HHgN2JOsmgYu/h4qDAS4n929Rs= github.com/performancecopilot/speed v3.0.0+incompatible/go.mod h1:/CLtqpZ5gBg1M9iaPbIdPPGyKcA8hKdoy6hAWba7Yac= -github.com/peterh/liner v1.1.1-0.20190123174540-a2c9a5303de7/go.mod h1:CRroGNssyjTd/qIG2FyxByd2S8JEAZXBl4qUrZf8GS0= github.com/pierrec/lz4 v1.0.2-0.20190131084431-473cd7ce01a1/go.mod h1:3/3N9NVKO0jef7pBehbT1qWhCMrIgbYNnFAZCqQ5LRc= github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= -github.com/pierrec/lz4 v2.6.1+incompatible h1:9UY3+iC23yxF0UfGaYrGplQ+79Rg+h/q9FV9ix19jjM= -github.com/pierrec/lz4 v2.6.1+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= +github.com/pierrec/lz4/v4 v4.1.22 h1:cKFw6uJDK+/gfw5BcDL0JL5aBsAFdsIT18eRtLj7VIU= +github.com/pierrec/lz4/v4 v4.1.22/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= +github.com/pingcap/errors v0.11.4 h1:lFuQV/oaUMGcD2tqt+01ROSmJs75VG1ToEOkZIZ4nE4= +github.com/pingcap/errors v0.11.4/go.mod h1:Oi8TUi2kEtXXLMJk9l1cGmz20kV3TaQ0usTwv5KuLY8= +github.com/pion/datachannel v1.5.10 h1:ly0Q26K1i6ZkGf42W7D4hQYR90pZwzFOjTq5AuCKk4o= +github.com/pion/datachannel v1.5.10/go.mod h1:p/jJfC9arb29W7WrxyKbepTU20CFgyx5oLo8Rs4Py/M= +github.com/pion/dtls/v2 v2.2.7/go.mod h1:8WiMkebSHFD0T+dIU+UeBaoV7kDhOW5oDCzZ7WZ/F9s= +github.com/pion/dtls/v2 v2.2.12 h1:KP7H5/c1EiVAAKUmXyCzPiQe5+bCJrpOeKg/L05dunk= +github.com/pion/dtls/v2 v2.2.12/go.mod h1:d9SYc9fch0CqK90mRk1dC7AkzzpwJj6u2GU3u+9pqFE= +github.com/pion/ice/v2 v2.3.37 h1:ObIdaNDu1rCo7hObhs34YSBcO7fjslJMZV0ux+uZWh0= +github.com/pion/ice/v2 v2.3.37/go.mod h1:mBF7lnigdqgtB+YHkaY/Y6s6tsyRyo4u4rPGRuOjUBQ= +github.com/pion/interceptor v0.1.37 h1:aRA8Zpab/wE7/c0O3fh1PqY0AJI3fCSEM5lRWJVorwI= +github.com/pion/interceptor v0.1.37/go.mod h1:JzxbJ4umVTlZAf+/utHzNesY8tmRkM2lVmkS82TTj8Y= +github.com/pion/logging v0.2.2 h1:M9+AIj/+pxNsDfAT64+MAVgJO0rsyLnoJKCqf//DoeY= +github.com/pion/logging v0.2.2/go.mod h1:k0/tDVsRCX2Mb2ZEmTqNa7CWsQPc+YYCB7Q+5pahoms= +github.com/pion/mdns v0.0.12 h1:CiMYlY+O0azojWDmxdNr7ADGrnZ+V6Ilfner+6mSVK8= +github.com/pion/mdns v0.0.12/go.mod h1:VExJjv8to/6Wqm1FXK+Ii/Z9tsVk/F5sD/N70cnYFbk= +github.com/pion/randutil v0.1.0 h1:CFG1UdESneORglEsnimhUjf33Rwjubwj6xfiOXBa3mA= +github.com/pion/randutil v0.1.0/go.mod h1:XcJrSMMbbMRhASFVOlj/5hQial/Y8oH/HVo7TBZq+j8= +github.com/pion/rtcp v1.2.12/go.mod h1:sn6qjxvnwyAkkPzPULIbVqSKI5Dv54Rv7VG0kNxh9L4= +github.com/pion/rtcp v1.2.15 h1:LZQi2JbdipLOj4eBjK4wlVoQWfrZbh3Q6eHtWtJBZBo= +github.com/pion/rtcp v1.2.15/go.mod h1:jlGuAjHMEXwMUHK78RgX0UmEJFV4zUKOFHR7OP+D3D0= +github.com/pion/rtp v1.8.3/go.mod h1:pBGHaFt/yW7bf1jjWAoUjpSNoDnw98KTMg+jWWvziqU= +github.com/pion/rtp v1.8.10 h1:puphjdbjPB+L+NFaVuZ5h6bt1g5q4kFIoI+r5q/g0CU= +github.com/pion/rtp v1.8.10/go.mod h1:8uMBJj32Pa1wwx8Fuv/AsFhn8jsgw+3rUC2PfoBZ8p4= +github.com/pion/sctp v1.8.35 h1:qwtKvNK1Wc5tHMIYgTDJhfZk7vATGVHhXbUDfHbYwzA= +github.com/pion/sctp v1.8.35/go.mod h1:EcXP8zCYVTRy3W9xtOF7wJm1L1aXfKRQzaM33SjQlzg= +github.com/pion/sdp/v3 v3.0.9 h1:pX++dCHoHUwq43kuwf3PyJfHlwIj4hXA7Vrifiq0IJY= +github.com/pion/sdp/v3 v3.0.9/go.mod h1:B5xmvENq5IXJimIO4zfp6LAe1fD9N+kFv+V/1lOdz8M= +github.com/pion/srtp/v2 v2.0.20 h1:HNNny4s+OUmG280ETrCdgFndp4ufx3/uy85EawYEhTk= +github.com/pion/srtp/v2 v2.0.20/go.mod h1:0KJQjA99A6/a0DOVTu1PhDSw0CXF2jTkqOoMg3ODqdA= +github.com/pion/stun v0.6.1 h1:8lp6YejULeHBF8NmV8e2787BogQhduZugh5PdhDyyN4= +github.com/pion/stun v0.6.1/go.mod h1:/hO7APkX4hZKu/D0f2lHzNyvdkTGtIy3NDmLR7kSz/8= +github.com/pion/stun/v2 v2.0.0 h1:A5+wXKLAypxQri59+tmQKVs7+l6mMM+3d+eER9ifRU0= +github.com/pion/stun/v2 v2.0.0/go.mod h1:22qRSh08fSEttYUmJZGlriq9+03jtVmXNODgLccj8GQ= +github.com/pion/transport/v2 v2.2.1/go.mod h1:cXXWavvCnFF6McHTft3DWS9iic2Mftcz1Aq29pGcU5g= +github.com/pion/transport/v2 v2.2.3/go.mod h1:q2U/tf9FEfnSBGSW6w5Qp5PFWRLRj3NjLhCCgpRK4p0= +github.com/pion/transport/v2 v2.2.4/go.mod h1:q2U/tf9FEfnSBGSW6w5Qp5PFWRLRj3NjLhCCgpRK4p0= +github.com/pion/transport/v2 v2.2.10 h1:ucLBLE8nuxiHfvkFKnkDQRYWYfp8ejf4YBOPfaQpw6Q= +github.com/pion/transport/v2 v2.2.10/go.mod h1:sq1kSLWs+cHW9E+2fJP95QudkzbK7wscs8yYgQToO5E= +github.com/pion/transport/v3 v3.0.1/go.mod h1:UY7kiITrlMv7/IKgd5eTUcaahZx5oUN3l9SzK5f5xE0= +github.com/pion/transport/v3 v3.0.7 h1:iRbMH05BzSNwhILHoBoAPxoB9xQgOaJk+591KC9P1o0= +github.com/pion/transport/v3 v3.0.7/go.mod h1:YleKiTZ4vqNxVwh77Z0zytYi7rXHl7j6uPLGhhz9rwo= +github.com/pion/turn/v2 v2.1.3/go.mod h1:huEpByKKHix2/b9kmTAM3YoX6MKP+/D//0ClgUYR2fY= +github.com/pion/turn/v2 v2.1.6 h1:Xr2niVsiPTB0FPtt+yAWKFUkU1eotQbGgpTIld4x1Gc= +github.com/pion/turn/v2 v2.1.6/go.mod h1:huEpByKKHix2/b9kmTAM3YoX6MKP+/D//0ClgUYR2fY= +github.com/pion/webrtc/v3 v3.3.5 h1:ZsSzaMz/i9nblPdiAkZoP+E6Kmjw+jnyq3bEmU3EtRg= +github.com/pion/webrtc/v3 v3.3.5/go.mod h1:liNa+E1iwyzyXqNUwvoMRNQ10x8h8FOeJKL8RkIbamE= +github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/profile v1.2.1/go.mod h1:hJw3o1OdXxsrSjjVksARp5W95eeEaEfptyVZyv6JUPA= github.com/pkg/sftp v1.13.1/go.mod h1:3HaPG6Dq1ILlpPZRO0HVMrsydcdLt6HRDccSgb87qRg= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 h1:GFCKgmp0tecUJ0sJuv4pzYCqS9+RGSn52M3FUwPs+uo= +github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10/go.mod h1:t/avpk3KcrXxUnYOhZhMXJlSEyie6gQbtLq5NM3loB8= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/polydawn/refmt v0.0.0-20201211092308-30ac6d18308e h1:ZOcivgkkFRnjfoTcGsDq3UQYiBmekwLA+qg0OjyB/ls= -github.com/polydawn/refmt v0.0.0-20201211092308-30ac6d18308e/go.mod h1:uIp+gprXxxrWSjjklXD+mN4wed/tMfjMMmN/9+JsA9o= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/polydawn/refmt v0.89.0 h1:ADJTApkvkeBZsN0tBTx8QjpD9JkmxbKp0cxfr9qszm4= +github.com/polydawn/refmt v0.89.0/go.mod h1:/zvteZs/GwLtCgZ4BL6CBsk9IKIlexP43ObX9AxTqTw= github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c h1:ncq/mPwQF4JjgDlrVEn3C11VoGHZN7m8qihwgMEtzYw= github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= @@ -1258,17 +1032,16 @@ github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDf github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= github.com/prometheus/client_golang v1.3.0/go.mod h1:hJaj2vgQTGQmVCsAACORcieXFeDPbaTKGT+JTgUa3og= github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= -github.com/prometheus/client_golang v1.10.0/go.mod h1:WJM3cc3yu7XKBKa/I8WeZm+V3eltZnBwfENSU7mdogU= -github.com/prometheus/client_golang v1.14.0 h1:nJdhIvne2eSX/XRAFV9PcvFFRbrjbcTUj0VP62TMhnw= -github.com/prometheus/client_golang v1.14.0/go.mod h1:8vpkKitgIVNcqrRBWh1C4TIUQgYNtG/XQE4E/Zae36Y= +github.com/prometheus/client_golang v1.20.5 h1:cxppBPuYhUnsO6yo/aoRol4L7q7UFfdm+bR9r+8l63Y= +github.com/prometheus/client_golang v1.20.5/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.1.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.3.0 h1:UBgGFHqYdG/TPFD1B1ogZywDqEkwp3fBMvqdiQ7Xew4= -github.com/prometheus/client_model v0.3.0/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w= +github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= +github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= github.com/prometheus/common v0.0.0-20180801064454-c7de2306084e/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= @@ -1277,9 +1050,8 @@ github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y8 github.com/prometheus/common v0.7.0/go.mod h1:DjGbpBbp5NYNiECxcL/VnbXCCaQpKd3tt26CguLLsqA= github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= github.com/prometheus/common v0.15.0/go.mod h1:U+gB1OBLb1lF3O42bTCL+FK18tX9Oar16Clt/msog/s= -github.com/prometheus/common v0.18.0/go.mod h1:U+gB1OBLb1lF3O42bTCL+FK18tX9Oar16Clt/msog/s= -github.com/prometheus/common v0.39.0 h1:oOyhkDq05hPZKItWVBkJ6g6AtGxi+fy7F4JvUV8uhsI= -github.com/prometheus/common v0.39.0/go.mod h1:6XBZ7lYdLCbkAVhwRsWTZn+IN5AB9F/NXd5w0BbEX0Y= +github.com/prometheus/common v0.61.0 h1:3gv/GThfX0cV2lpO7gkTUwZru38mxevy90Bj8YFSRQQ= +github.com/prometheus/common v0.61.0/go.mod h1:zr29OCN/2BsJRaFwG8QOBr41D6kkchKbpeNH7pAjb/s= github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= @@ -1288,33 +1060,34 @@ github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsT github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= github.com/prometheus/procfs v0.3.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= -github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= -github.com/prometheus/procfs v0.9.0 h1:wzCHvIvM5SxWqYvwgVL7yJY8Lz3PKn49KQtpgMYJfhI= -github.com/prometheus/procfs v0.9.0/go.mod h1:+pB4zwohETzFnmlpe6yd2lSc+0/46IYZRB/chUwxUZY= -github.com/prometheus/tsdb v0.6.2-0.20190402121629-4f204dcbc150/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= +github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= +github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= +github.com/prysmaticlabs/gohashtree v0.0.4-beta h1:H/EbCuXPeTV3lpKeXGPpEV9gsUpkqOOVnWapUyeWro4= +github.com/prysmaticlabs/gohashtree v0.0.4-beta/go.mod h1:BFdtALS+Ffhg3lGQIHv9HDWuHS8cTvHZzrHWxwOtGOs= github.com/psiemens/sconfig v0.1.0 h1:xfWqW+TRpih7mXZIqKYTmpRhlZLQ1kbxV8EjllPv76s= github.com/psiemens/sconfig v0.1.0/go.mod h1:+MLKqdledP/8G3rOBpknbLh0IclCf4WneJUtS26JB2U= +github.com/quic-go/qpack v0.5.1 h1:giqksBPnT/HDtZ6VhtFKgoLOWmlyo9Ei6u9PqzIMbhI= +github.com/quic-go/qpack v0.5.1/go.mod h1:+PC4XFrEskIVkcLzpEkbLqq1uCoxPhQuvK5rH1ZgaEg= +github.com/quic-go/quic-go v0.48.2 h1:wsKXZPeGWpMpCGSWqOcqpW2wZYic/8T3aqiOID0/KWE= +github.com/quic-go/quic-go v0.48.2/go.mod h1:yBgs3rWBOADpga7F+jJsb6Ybg1LSYiQvwWlLX+/6HMs= +github.com/quic-go/webtransport-go v0.8.1-0.20241018022711-4ac2c9250e66 h1:4WFk6u3sOT6pLa1kQ50ZVdm8BQFgJNA117cepZxtLIg= +github.com/quic-go/webtransport-go v0.8.1-0.20241018022711-4ac2c9250e66/go.mod h1:Vp72IJajgeOL6ddqrAhmp7IM9zbTcgkQxD/YdxrVwMw= github.com/raulk/go-watchdog v1.3.0 h1:oUmdlHxdkXRJlwfG0O9omj8ukerm8MEQavSiDTEtBsk= github.com/raulk/go-watchdog v1.3.0/go.mod h1:fIvOnLbF0b0ZwkB9YU4mOW9Did//4vPZtDqv66NfsMU= github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= -github.com/rivo/uniseg v0.2.1-0.20211004051800-57c86be7915a h1:s7GrsqeorVkFR1vGmQ6WVL9nup0eyQCC+YVUeSQLH/Q= -github.com/rivo/uniseg v0.2.1-0.20211004051800-57c86be7915a/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= -github.com/rjeczalik/notify v0.9.1/go.mod h1:rKwnCoCGeuQnwBtTSPL9Dad03Vh2n40ePRrjvIXnJho= +github.com/rivo/uniseg v0.4.7 h1:WUdvkW8uEhrYfLC4ZzdpI2ztxP1I582+49Oc5Mq64VQ= +github.com/rivo/uniseg v0.4.7/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88= github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= -github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/rogpeppe/go-internal v1.6.1 h1:/FiVV8dS/e+YqF2JvO3yXRFbBLTIuSDkuC7aBOAvL+k= -github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= -github.com/rs/cors v0.0.0-20160617231935-a62a804a8a00/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU= +github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= +github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= +github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU= github.com/rs/cors v1.8.0 h1:P2KMzcFwrPoSjkF1WLRPsp3UMLyql8L4v9hQpVeK5so= github.com/rs/cors v1.8.0/go.mod h1:EBwu+T5AvHOcXwvZIkQFjUN6s8Czyqw12GL/Y0tUyRM= -github.com/rs/xhandler v0.0.0-20160618193221-ed27b6fd6521/go.mod h1:RvLn4FgxWubrpZHtQLnOf6EwhN2hEMusxZOhcW9H3UQ= -github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ= github.com/rs/xid v1.4.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg= -github.com/rs/zerolog v1.19.0/go.mod h1:IzD0RJ65iWH0w97OQQebJEvTZYvsCUm9WVLWBQrJRjo= github.com/rs/zerolog v1.29.0 h1:Zes4hju04hjbvkVkOhdl2HpZa+0PmVwigmo8XoORE5w= github.com/rs/zerolog v1.29.0/go.mod h1:NILgTygv/Uej1ra5XxGf82ZFSLk58MFGAUS2o6usyD0= github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= @@ -1322,12 +1095,14 @@ github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQD github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E= -github.com/schollz/progressbar/v3 v3.8.3 h1:FnLGl3ewlDUP+YdSwveXBaXs053Mem/du+wr7XSYKl8= -github.com/schollz/progressbar/v3 v3.8.3/go.mod h1:pWnVCjSBZsT2X3nx9HfRdnCDrpbevliMeoEVhStwHko= +github.com/schollz/progressbar/v3 v3.18.0 h1:uXdoHABRFmNIjUfte/Ex7WtuyVslrw2wVPQmCN62HpA= +github.com/schollz/progressbar/v3 v3.18.0/go.mod h1:IsO3lpbaGuzh8zIMzgY3+J8l4C8GjO0Y9S69eFvNsec= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= github.com/sethvargo/go-retry v0.2.3 h1:oYlgvIvsju3jNbottWABtbnoLC+GDtLdBHxKWxQm/iU= github.com/sethvargo/go-retry v0.2.3/go.mod h1:1afjQuvh7s4gflMObvjLPaWgluLLyhA1wmVZ6KLpICw= +github.com/shirou/gopsutil v3.21.4-0.20210419000835-c7a38de76ee5+incompatible h1:Bn1aCHHRnjv4Bl16T8rcaFjYSrGrIZvpiGO6P3Q4GpU= +github.com/shirou/gopsutil v3.21.4-0.20210419000835-c7a38de76ee5+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA= github.com/shirou/gopsutil/v3 v3.22.2 h1:wCrArWFkHYIdDxx/FSfF5RB4dpJYW6t7rcp3+zL8uks= github.com/shirou/gopsutil/v3 v3.22.2/go.mod h1:WapW1AOOPlHyXr+yOyw3uYx36enocrtSoSBy0L5vUHY= github.com/shurcooL/component v0.0.0-20170202220835-f88ec8f54cc4/go.mod h1:XhFIlyj5a1fBNx5aJTbKoIq0mNaPvOagO+HjB3EtxrY= @@ -1357,138 +1132,137 @@ github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPx github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= -github.com/sirupsen/logrus v1.8.1 h1:dJKuHgqk1NNQlqoA6BTlM1Wf9DOH3NBjQyu0h9+AZZE= -github.com/slok/go-http-metrics v0.10.0 h1:rh0LaYEKza5eaYRGDXujKrOln57nHBi4TtVhmNEpbgM= -github.com/slok/go-http-metrics v0.10.0/go.mod h1:lFqdaS4kWMfUKCSukjC47PdCeTk+hXDUVm8kLHRqJ38= -github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM= +github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= +github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= +github.com/slok/go-http-metrics v0.12.0 h1:mAb7hrX4gB4ItU6NkFoKYdBslafg3o60/HbGBRsKaG8= +github.com/slok/go-http-metrics v0.12.0/go.mod h1:Ee/mdT9BYvGrlGzlClkK05pP2hRHmVbRF9dtUVS8LNA= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= -github.com/smartystreets/goconvey v1.6.4 h1:fv0U8FUIMPNf1L9lnHLvLhgicrIVChEkdzIKYqbNC9s= +github.com/smartystreets/assertions v1.2.0 h1:42S6lae5dvLc7BrLu/0ugRtcFVjoJNMC/N3yZFZkDFs= +github.com/smartystreets/assertions v1.2.0/go.mod h1:tcbTF8ujkAEcZ8TElKY+i30BzYlVhC/LOxJk7iOWnoo= github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= -github.com/smola/gocompat v0.2.0/go.mod h1:1B0MlxbmoZNo3h8guHp8HztB3BSYR5itql9qtVc0ypY= +github.com/smartystreets/goconvey v1.7.2 h1:9RBaZCeXEQ3UselpuwUQHltGVXvdwm6cv1hgR6gDIPg= +github.com/smartystreets/goconvey v1.7.2/go.mod h1:Vw0tHAZW6lzCRk3xgdin6fKYcG+G3Pg9vgXWeJpQFMM= github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= github.com/sony/gobreaker v0.4.1/go.mod h1:ZKptC7FHNvhBz7dN2LGjPVBz2sZJmc0/PkyDJOjmxWY= +github.com/sony/gobreaker v0.5.0 h1:dRCvqm0P490vZPmy7ppEk2qCnCieBooFJ+YoXGYB+yg= +github.com/sony/gobreaker v0.5.0/go.mod h1:ZKptC7FHNvhBz7dN2LGjPVBz2sZJmc0/PkyDJOjmxWY= github.com/sourcegraph/annotate v0.0.0-20160123013949-f4cad6c6324d/go.mod h1:UdhH50NIW0fCiwBSr0co2m7BnFLdv4fQTgdqdJTHFeE= github.com/sourcegraph/syntaxhighlight v0.0.0-20170531221838-bd320f5d308e/go.mod h1:HuIsMU8RRBOtsCgI77wP899iHVBQpCmg4ErYMZB+2IA= -github.com/spacemonkeygo/openssl v0.0.0-20181017203307-c2dcc5cca94a/go.mod h1:7AyxJNCJ7SBZ1MfVQCWD6Uqo2oubI2Eq2y2eqf+A5r0= -github.com/spacemonkeygo/spacelog v0.0.0-20180420211403-2296661a0572 h1:RC6RW7j+1+HkWaX/Yh71Ee5ZHaHYt7ZP4sQgUrm6cDU= -github.com/spacemonkeygo/spacelog v0.0.0-20180420211403-2296661a0572/go.mod h1:w0SWMsp6j9O/dk4/ZpIhL+3CkG8ofA2vuv7k+ltqUMc= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= -github.com/spaolacci/murmur3 v1.0.1-0.20190317074736-539464a789e9/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI= github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= -github.com/spf13/afero v1.9.0 h1:sFSLUHgxdnN32Qy38hK3QkYBFXZj9DKjVjCUCtD7juY= -github.com/spf13/afero v1.9.0/go.mod h1:iUV7ddyEEZPO5gA3zD4fJt6iStLlL+Lg4m2cihcDf8Y= +github.com/spf13/afero v1.10.0 h1:EaGW2JJh15aKOejeuJ+wpFSHnbd7GE6Wvp3TsNhb6LY= +github.com/spf13/afero v1.10.0/go.mod h1:UBogFpq8E9Hx+xc5CNTTEpTnuHVmXDwZcZcE1eb/UhQ= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cast v1.5.0 h1:rj3WzYc11XZaIZMPKmwP96zkFEnnAmV8s6XbB2aY32w= github.com/spf13/cast v1.5.0/go.mod h1:SpXXQ5YoyJw6s3/6cMTQuxvgRl3PCJiyaX9p6b155UU= github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= -github.com/spf13/cobra v1.6.1 h1:o94oiPyS4KD1mPy2fmcYYHHfCxLqYjJOhGsCHFZtEzA= -github.com/spf13/cobra v1.6.1/go.mod h1:IOw/AERYS7UzyrGinqmz6HLUo219MORXGxhbaJUqzrY= +github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM= +github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y= github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk= github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo= github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= -github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/pflag v1.0.6 h1:jFzHGLGAlb3ruxLB8MhbI6A8+AQX/2eW4qeyNZXNp2o= +github.com/spf13/pflag v1.0.6/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE= -github.com/spf13/viper v1.12.0 h1:CZ7eSOd3kZoaYDLbXnmzgQI5RlciuXBMA+18HwHRfZQ= -github.com/spf13/viper v1.12.0/go.mod h1:b6COn30jlNxbm/V2IqWiNWkJ+vZNiMNksliPCiuKtSI= -github.com/src-d/envconfig v1.0.0/go.mod h1:Q9YQZ7BKITldTBnoxsE5gOeB5y66RyPXeue/R4aaNBc= -github.com/status-im/keycard-go v0.0.0-20190316090335-8537d3370df4/go.mod h1:RZLeN1LMWmRsyYjvAu+I6Dm9QmlDaIIt+Y+4Kd7Tp+Q= -github.com/steakknife/bloomfilter v0.0.0-20180922174646-6819c0d2a570/go.mod h1:8OR4w3TdeIHIh1g6EMY5p0gVNOovcWC+1vpc7naMuAw= -github.com/steakknife/hamming v0.0.0-20180906055917-c99c65617cd3/go.mod h1:hpGUWaI9xL8pRQCTXQgocU38Qw1g0Us7n5PxxTwTCYU= +github.com/spf13/viper v1.15.0 h1:js3yy885G8xwJa6iOISGFwd+qlUo5AvyXb7CiihdtiU= +github.com/spf13/viper v1.15.0/go.mod h1:fFcTBJxvhhzSJiZy8n+PeW6t8l+KeT/uTARa0jHOQLA= +github.com/spiffe/go-spiffe/v2 v2.5.0 h1:N2I01KCUkv1FAjZXJMwh95KK1ZIQLYbPfhaxw8WS0hE= +github.com/spiffe/go-spiffe/v2 v2.5.0/go.mod h1:P+NxobPc6wXhVtINNtFjNWGBTreew1GBUCwT2wPmb7g= github.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= github.com/streadway/amqp v0.0.0-20190827072141-edfb9018d271/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= github.com/streadway/handy v0.0.0-20190108123426-d5acb3125c2a/go.mod h1:qNTQ5P5JnDBl6z3cMAg/SywNDC5ABu5ApDIw6lUbRmI= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= -github.com/stretchr/objx v0.5.0 h1:1zr/of2m5FGMsad5YfcqgdqdWrIhu+EBEJRhR1U7z/c= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= +github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= -github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/stretchr/testify v1.8.2 h1:+h33VjcLVPDHtOdpUCuF+7gSuG3yGIftsP1YvFihtJ8= -github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/subosito/gotenv v1.4.0 h1:yAzM1+SmVcz5R4tXGsNMu1jUl2aOJXoiWUCEwwnGrvs= -github.com/subosito/gotenv v1.4.0/go.mod h1:mZd6rFysKEcUhUHXJk0C/08wAgyDBFuwEYL7vWWGaGo= -github.com/supranational/blst v0.3.10 h1:CMciDZ/h4pXDDXQASe8ZGTNKUiVNxVVA5hpci2Uuhuk= -github.com/syndtr/goleveldb v1.0.0/go.mod h1:ZVVdQEZoIme9iO1Ch2Jdy24qqXrMMOU6lpPAyBWyWuQ= -github.com/syndtr/goleveldb v1.0.1-0.20190923125748-758128399b1d/go.mod h1:9OrXJhf154huy1nPWmuSrkgjPUtUNhA+Zmy+6AESzuA= +github.com/stretchr/testify v1.8.3/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= +github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= +github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= +github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= +github.com/subosito/gotenv v1.4.2 h1:X1TuBLAMDFbaTAChgCBLu3DU3UPyELpnF2jjJ2cz/S8= +github.com/subosito/gotenv v1.4.2/go.mod h1:ayKnFf/c6rvx/2iiLrJUk1e6plDbT3edrFNGqEflhK0= +github.com/supranational/blst v0.3.14 h1:xNMoHRJOTwMn63ip6qoWJ2Ymgvj7E2b9jY2FAwY+qRo= +github.com/supranational/blst v0.3.14/go.mod h1:jZJtfjgudtNl4en1tzwPIV3KjUnQUvG3/j+w+fVonLw= +github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 h1:epCh84lMvA70Z7CTTCmYQn2CKbY8j86K7/FAIr141uY= +github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7/go.mod h1:q4W45IWZaF22tdD+VEXcAWRA037jwmWEB5VWYORlTpc= github.com/tarm/serial v0.0.0-20180830185346-98f6abe2eb07/go.mod h1:kDXzergiv9cbyO7IOYJZWg1U88JhDg3PB6klq9Hg2pA= github.com/texttheater/golang-levenshtein/levenshtein v0.0.0-20200805054039-cae8b0eaed6c h1:HelZ2kAFadG0La9d+4htN4HzQ68Bm2iM9qKMSMES6xg= github.com/texttheater/golang-levenshtein/levenshtein v0.0.0-20200805054039-cae8b0eaed6c/go.mod h1:JlzghshsemAMDGZLytTFY8C1JQxQPhnatWqNwUXjggo= -github.com/tklauser/go-sysconf v0.3.9 h1:JeUVdAOWhhxVcU6Eqr/ATFHgXk/mmiItdKeJPev3vTo= github.com/tklauser/go-sysconf v0.3.9/go.mod h1:11DU/5sG7UexIrp/O6g35hrWzu0JxlwQ3LSFUzyeuhs= -github.com/tklauser/numcpus v0.3.0 h1:ILuRUQBtssgnxw0XXIjKUC56fgnOrFoQQ/4+DeU2biQ= +github.com/tklauser/go-sysconf v0.3.12 h1:0QaGUFOdQaIVdPgfITYzaTegZvdCjmYO52cSFAEVmqU= +github.com/tklauser/go-sysconf v0.3.12/go.mod h1:Ho14jnntGE1fpdOqQEEaiKRpvIavV0hSfmBq8nJbHYI= github.com/tklauser/numcpus v0.3.0/go.mod h1:yFGUr7TUHQRAhyqBcEg0Ge34zDBAsIvJJcyE6boqnA8= +github.com/tklauser/numcpus v0.6.1 h1:ng9scYS7az0Bk4OZLvrNXNSAO2Pxr1XXRAPyjhIx+Fk= +github.com/tklauser/numcpus v0.6.1/go.mod h1:1XfjsgE2zo8GVw7POkMbHENHzVg3GzmoZ9fESEdAacY= github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/turbolent/prettier v0.0.0-20220320183459-661cc755135d h1:5JInRQbk5UBX8JfUvKh2oYTLMVwj3p6n+wapDDm7hko= github.com/turbolent/prettier v0.0.0-20220320183459-661cc755135d/go.mod h1:Nlx5Y115XQvNcIdIy7dZXaNSUpzwBSge4/Ivk93/Yog= -github.com/tyler-smith/go-bip39 v1.0.1-0.20181017060643-dbb3b84ba2ef/go.mod h1:sJ5fKU0s6JVwZjjcUEX2zFOnvq0ASQ2K9Zr6cf67kNs= +github.com/twitchyliquid64/golang-asm v0.15.1 h1:SU5vSMR7hnwNxj24w34ZyCi/FmDZTkS4MhqMhdFk5YI= +github.com/twitchyliquid64/golang-asm v0.15.1/go.mod h1:a1lVb/DtPvCB8fslRZhAngC2+aY1QWCk3Cedj/Gdt08= github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc= github.com/ugorji/go v1.1.7 h1:/68gy2h+1mWMrwZFeD1kQialdSzAb432dtpeJ42ovdo= github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVMw= github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= -github.com/ugorji/go/codec v1.1.7 h1:2SvQaVZ1ouYrrKKwoSk2pzd4A9evlKJb9oTL+OaLUSs= github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY= +github.com/ugorji/go/codec v1.2.12 h1:9LC83zGrHhuUA9l16C9AHXAqEV/2wBQ4nkvumAE65EE= +github.com/ugorji/go/codec v1.2.12/go.mod h1:UNopzCgEMSXjBc6AOMqYvWC1ktqTAfzJZUZgYf6w6lg= github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= +github.com/urfave/cli v1.22.10/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= github.com/viant/assertly v0.4.8/go.mod h1:aGifi++jvCrUaklKEKT0BU95igDNaqkvz+49uaYMPRU= github.com/viant/toolbox v0.24.0/go.mod h1:OxMCG57V0PXuIP2HNQrtJf2CjqdmbrOx5EkMILuUhzM= -github.com/vmihailenco/msgpack v4.0.4+incompatible h1:dSLoQfGFAo3F6OoNhwUmLwVgaUXK79GlxNBwueZn0xI= -github.com/vmihailenco/msgpack v4.0.4+incompatible/go.mod h1:fy3FlTQTDXWkZ7Bh6AcGMlsjHatGryHQYUTf1ShIgkk= github.com/vmihailenco/msgpack/v4 v4.3.11 h1:Q47CePddpNGNhk4GCnAx9DDtASi2rasatE0cd26cZoE= github.com/vmihailenco/msgpack/v4 v4.3.11/go.mod h1:gborTTJjAo/GWTqqRjrLCn9pgNN+NXzzngzBKDPIqw4= github.com/vmihailenco/tagparser v0.1.1 h1:quXMXlA39OCbd2wAdTsGDlK9RkOk6Wuw+x37wVyIuWY= github.com/vmihailenco/tagparser v0.1.1/go.mod h1:OeAg3pn3UbLjkWt+rN9oFYB6u/cQgqMEUPoW2WPyhdI= -github.com/warpfork/go-testmark v0.3.0 h1:Q81c4u7hT+BR5kNfNQhEF0VT2pmL7+Kk0wD+ORYl7iA= -github.com/warpfork/go-testmark v0.3.0/go.mod h1:jhEf8FVxd+F17juRubpmut64NEG6I2rgkUhlcqqXwE0= -github.com/warpfork/go-wish v0.0.0-20200122115046-b9ea61034e4a h1:G++j5e0OC488te356JvdhaM8YS6nMsjLAYF7JxCv07w= -github.com/warpfork/go-wish v0.0.0-20200122115046-b9ea61034e4a/go.mod h1:x6AKhvSSexNrVSrViXSHUEbICjmGXhtgABaHIySUSGw= +github.com/warpfork/go-testmark v0.12.1 h1:rMgCpJfwy1sJ50x0M0NgyphxYYPMOODIJHhsXyEHU0s= +github.com/warpfork/go-testmark v0.12.1/go.mod h1:kHwy7wfvGSPh1rQJYKayD4AbtNaeyZdcGi9tNJTaa5Y= +github.com/warpfork/go-wish v0.0.0-20220906213052-39a1cc7a02d0 h1:GDDkbFiaK8jsSDJfjId/PEGEShv6ugrt4kYsC5UIDaQ= +github.com/warpfork/go-wish v0.0.0-20220906213052-39a1cc7a02d0/go.mod h1:x6AKhvSSexNrVSrViXSHUEbICjmGXhtgABaHIySUSGw= github.com/whyrusleeping/go-keyspace v0.0.0-20160322163242-5b898ac5add1 h1:EKhdznlJHPMoKr0XTrX+IlJs1LH3lyx2nfr1dOlZ79k= github.com/whyrusleeping/go-keyspace v0.0.0-20160322163242-5b898ac5add1/go.mod h1:8UvriyWtv5Q5EOgjHaSseUEdkQfvwFv1I/In/O2M9gc= github.com/whyrusleeping/go-logging v0.0.0-20170515211332-0457bb6b88fc/go.mod h1:bopw91TMyo8J3tvftk8xmU2kPmlrt4nScJQZU2hE5EM= -github.com/whyrusleeping/go-logging v0.0.1/go.mod h1:lDPYj54zutzG1XYfHAhcc7oNXEburHQBn+Iqd4yS4vE= -github.com/whyrusleeping/go-notifier v0.0.0-20170827234753-097c5d47330f/go.mod h1:cZNvX9cFybI01GriPRMXDtczuvUhgbcYr9iCGaNlRv8= -github.com/whyrusleeping/mafmt v1.2.8/go.mod h1:faQJFPbLSxzD9xpA02ttW/tS9vZykNvXwGvqIpk20FA= -github.com/whyrusleeping/mdns v0.0.0-20180901202407-ef14215e6b30/go.mod h1:j4l84WPFclQPj320J9gp0XwNKBb3U0zt5CBqjPp22G4= -github.com/whyrusleeping/mdns v0.0.0-20190826153040-b9b60ed33aa9/go.mod h1:j4l84WPFclQPj320J9gp0XwNKBb3U0zt5CBqjPp22G4= -github.com/whyrusleeping/multiaddr-filter v0.0.0-20160516205228-e903e4adabd7/go.mod h1:X2c0RVCI1eSUFI8eLcY3c0423ykwiUdxLJtkDvruhjI= -github.com/whyrusleeping/timecache v0.0.0-20160911033111-cfcb2f1abfee h1:lYbXeSvJi5zk5GLKVuid9TVjS9a0OmLIDKTfoZBL6Ow= -github.com/whyrusleeping/timecache v0.0.0-20160911033111-cfcb2f1abfee/go.mod h1:m2aV4LZI4Aez7dP5PMyVKEHhUyEJ/RjmPEDOpDvudHg= -github.com/wsddn/go-ecdh v0.0.0-20161211032359-48726bab9208/go.mod h1:IotVbo4F+mw0EzQ08zFqg7pK3FebNXpaMsRy2RT+Ees= -github.com/x-cray/logrus-prefixed-formatter v0.5.2/go.mod h1:2duySbKsL6M18s5GU7VPsoEPHyzalCE06qoARUCeBBE= +github.com/wlynxg/anet v0.0.3/go.mod h1:eay5PRQr7fIVAMbTbchTnO9gG65Hg/uYGdc7mguHxoA= +github.com/wlynxg/anet v0.0.5 h1:J3VJGi1gvo0JwZ/P1/Yc/8p63SoW98B5dHkYDmpgvvU= +github.com/wlynxg/anet v0.0.5/go.mod h1:eay5PRQr7fIVAMbTbchTnO9gG65Hg/uYGdc7mguHxoA= github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= -github.com/yhassanzadeh13/go-libp2p-pubsub v0.6.2-0.20221208234712-b44d9133e4ee h1:yFB2xjfswpuRh8FHagdBMKcBMltjr5u/XKzX6fkJO5E= -github.com/yhassanzadeh13/go-libp2p-pubsub v0.6.2-0.20221208234712-b44d9133e4ee/go.mod h1:Tylw4k1H86gbJx84i3r7qahN/mBaeMpUBvHY0Igshfw= github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= github.com/yusufpapurcu/wmi v1.2.2 h1:KBNDSne4vP5mbSWnJbO+51IMOXJB67QiYCSBrubbPRg= github.com/yusufpapurcu/wmi v1.2.2/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= -github.com/zeebo/assert v1.1.0 h1:hU1L1vLTHsnO8x8c9KAR5GmM5QscxHg5RNU5z5qbUWY= -github.com/zeebo/assert v1.1.0/go.mod h1:Pq9JiuJQpG8JLJdtkwrJESF0Foym2/D9XMU5ciN/wJ0= -github.com/zeebo/blake3 v0.2.3 h1:TFoLXsjeXqRNFxSbk35Dk4YtszE/MQQGK10BH4ptoTg= -github.com/zeebo/blake3 v0.2.3/go.mod h1:mjJjZpnsyIVtVgTOSpJ9vmRE4wgDeyt2HU3qXvvKCaQ= +github.com/zeebo/assert v1.3.0 h1:g7C04CbJuIDKNPFHmsk4hwZDO5O+kntRxzaUoNXj+IQ= +github.com/zeebo/assert v1.3.0/go.mod h1:Pq9JiuJQpG8JLJdtkwrJESF0Foym2/D9XMU5ciN/wJ0= +github.com/zeebo/blake3 v0.2.4 h1:KYQPkhpRtcqh0ssGYcKLG1JYvddkEA8QwCM/yBqhaZI= +github.com/zeebo/blake3 v0.2.4/go.mod h1:7eeQ6d2iXWRGF6npfaxl2CU+xy2Fjo2gxeyZGCRUjcE= +github.com/zeebo/errs v1.4.0 h1:XNdoD/RRMKP7HD0UhJnIzUy74ISdGGxURlYG8HSWSfM= +github.com/zeebo/errs v1.4.0/go.mod h1:sgbWHsvVuTPHcqJJGQ1WhI5KbWlHYz+2+2C/LSEtCw4= github.com/zeebo/pcg v1.0.1 h1:lyqfGeWiv4ahac6ttHs+I5hwtH/+1mrhlCtVNQM2kHo= github.com/zeebo/pcg v1.0.1/go.mod h1:09F0S9iiKrwn9rlI5yjLkmrug154/YRW6KnnXVDM/l4= go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= @@ -1499,97 +1273,93 @@ go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= go.opencensus.io v0.20.2/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= -go.opencensus.io v0.22.1/go.mod h1:Ap50jQcDJrx6rB6VgeeFPtuPIf3wMRvRfrfYDO6+BmA= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= -go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= -go.opentelemetry.io/otel v1.8.0 h1:zcvBFizPbpa1q7FehvFiHbQwGzmPILebO0tyqIR5Djg= -go.opentelemetry.io/otel v1.8.0/go.mod h1:2pkj+iMj0o03Y+cW6/m8Y4WkRdYN3AvCXCnzRMp9yvM= -go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.8.0 h1:ao8CJIShCaIbaMsGxy+jp2YHSudketpDgDRcbirov78= -go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.8.0/go.mod h1:78XhIg8Ht9vR4tbLNUhXsiOnE2HOuSeKAiAcoVQEpOY= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.8.0 h1:LrHL1A3KqIgAgi6mK7Q0aczmzU414AONAGT5xtnp+uo= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.8.0/go.mod h1:w8aZL87GMOvOBa2lU/JlVXE1q4chk/0FX+8ai4513bw= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.8.0 h1:00hCSGLIxdYK/Z7r8GkaX0QIlfvgU3tmnLlQvcnix6U= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.8.0/go.mod h1:twhIvtDQW2sWP1O2cT1N8nkSBgKCRZv2z6COTTBrf8Q= -go.opentelemetry.io/otel/sdk v1.8.0 h1:xwu69/fNuwbSHWe/0PGS888RmjWY181OmcXDQKu7ZQk= -go.opentelemetry.io/otel/sdk v1.8.0/go.mod h1:uPSfc+yfDH2StDM/Rm35WE8gXSNdvCg023J6HeGNO0c= -go.opentelemetry.io/otel/trace v1.8.0 h1:cSy0DF9eGI5WIfNwZ1q2iUyGj00tGzP24dE1lOlHrfY= -go.opentelemetry.io/otel/trace v1.8.0/go.mod h1:0Bt3PXY8w+3pheS3hQUt+wow8b1ojPaTBoTCh2zIFI4= -go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= -go.opentelemetry.io/proto/otlp v0.18.0 h1:W5hyXNComRa23tGpKwG+FRAc4rfF6ZUg1JReK+QHS80= -go.opentelemetry.io/proto/otlp v0.18.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= +go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= +go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= +go.opentelemetry.io/contrib/detectors/gcp v1.36.0 h1:F7q2tNlCaHY9nMKHR6XH9/qkp8FktLnIcy6jJNyOCQw= +go.opentelemetry.io/contrib/detectors/gcp v1.36.0/go.mod h1:IbBN8uAIIx734PTonTPxAxnjc2pQTxWNkwfstZ+6H2k= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.61.0 h1:q4XOmH/0opmeuJtPsbFNivyl7bCt7yRBbeEm2sC/XtQ= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.61.0/go.mod h1:snMWehoOh2wsEwnvvwtDyFCxVeDAODenXHtn5vzrKjo= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.61.0 h1:F7Jx+6hwnZ41NSFTO5q4LYDtJRXBf2PD0rNBkeB/lus= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.61.0/go.mod h1:UHB22Z8QsdRDrnAtX4PntOl36ajSxcdUMt1sF7Y6E7Q= +go.opentelemetry.io/otel v1.37.0 h1:9zhNfelUvx0KBfu/gb+ZgeAfAgtWrfHJZcAqFC228wQ= +go.opentelemetry.io/otel v1.37.0/go.mod h1:ehE/umFRLnuLa/vSccNq9oS1ErUlkkK71gMcN34UG8I= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.31.0 h1:K0XaT3DwHAcV4nKLzcQvwAgSyisUghWoY20I7huthMk= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.31.0/go.mod h1:B5Ki776z/MBnVha1Nzwp5arlzBbE3+1jk+pGmaP5HME= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.31.0 h1:FFeLy03iVTXP6ffeN2iXrxfGsZGCjVx0/4KlizjyBwU= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.31.0/go.mod h1:TMu73/k1CP8nBUpDLc71Wj/Kf7ZS9FK5b53VapRsP9o= +go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.29.0 h1:WDdP9acbMYjbKIyJUhTvtzj601sVJOqgWdUxSdR/Ysc= +go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.29.0/go.mod h1:BLbf7zbNIONBLPwvFnwNHGj4zge8uTCM/UPIVW1Mq2I= +go.opentelemetry.io/otel/metric v1.37.0 h1:mvwbQS5m0tbmqML4NqK+e3aDiO02vsf/WgbsdpcPoZE= +go.opentelemetry.io/otel/metric v1.37.0/go.mod h1:04wGrZurHYKOc+RKeye86GwKiTb9FKm1WHtO+4EVr2E= +go.opentelemetry.io/otel/sdk v1.37.0 h1:ItB0QUqnjesGRvNcmAcU0LyvkVyGJ2xftD29bWdDvKI= +go.opentelemetry.io/otel/sdk v1.37.0/go.mod h1:VredYzxUvuo2q3WRcDnKDjbdvmO0sCzOvVAiY+yUkAg= +go.opentelemetry.io/otel/sdk/metric v1.37.0 h1:90lI228XrB9jCMuSdA0673aubgRobVZFhbjxHHspCPc= +go.opentelemetry.io/otel/sdk/metric v1.37.0/go.mod h1:cNen4ZWfiD37l5NhS+Keb5RXVWZWpRE+9WyVCpbo5ps= +go.opentelemetry.io/otel/trace v1.37.0 h1:HLdcFNbRQBE2imdSEgm/kwqmQj1Or1l/7bW6mxVK7z4= +go.opentelemetry.io/otel/trace v1.37.0/go.mod h1:TlgrlQ+PtQO5XFerSPUYG0JSgGyryXewPGyayAWSBS0= +go.opentelemetry.io/proto/otlp v1.3.1 h1:TrMUixzpM0yuc/znrFTP9MMRh8trP93mkCiDVeXrui0= +go.opentelemetry.io/proto/otlp v1.3.1/go.mod h1:0X1WI4de4ZsLrrJNLAQbFeLCm3T7yBkR0XqQ7niQU+8= go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= -go.uber.org/atomic v1.10.0 h1:9qC72Qh0+3MqyJbAn8YU5xVq1frD8bn3JtD2oXtafVQ= -go.uber.org/atomic v1.10.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= -go.uber.org/dig v1.15.0 h1:vq3YWr8zRj1eFGC7Gvf907hE0eRjPTZ1d3xHadD6liE= -go.uber.org/dig v1.15.0/go.mod h1:pKHs0wMynzL6brANhB2hLMro+zalv1osARTviTcqHLM= -go.uber.org/fx v1.18.2 h1:bUNI6oShr+OVFQeU8cDNbnN7VFsu+SsjHzUF51V/GAU= -go.uber.org/fx v1.18.2/go.mod h1:g0V1KMQ66zIRk8bLu3Ea5Jt2w/cHlOIp4wdRsgh0JaY= -go.uber.org/goleak v1.0.0/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= +go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE= +go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= +go.uber.org/dig v1.18.0 h1:imUL1UiY0Mg4bqbFfsRQO5G4CGRBec/ZujWTvSVp3pw= +go.uber.org/dig v1.18.0/go.mod h1:Us0rSJiThwCv2GteUN0Q7OKvU7n5J4dxZ9JKUXozFdE= +go.uber.org/fx v1.23.0 h1:lIr/gYWQGfTwGcSXWXu4vP5Ws6iqnNEIY+F/aFzCKTg= +go.uber.org/fx v1.23.0/go.mod h1:o/D9n+2mLP6v1EG+qsdT1O8wKopYAsqZasju97SDFCU= go.uber.org/goleak v1.1.11-0.20210813005559-691160354723/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= -go.uber.org/goleak v1.1.12 h1:gZAh5/EyT/HQwlpkCy6wTpqfH9H8Lz8zbm3dZh+OyzA= +go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= +go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= +go.uber.org/mock v0.5.0 h1:KAMbZvZPyBPWgD14IrIQ38QCyjwpvVVV6K/bHl1IwQU= +go.uber.org/mock v0.5.0/go.mod h1:ge71pBPLYDk7QIi1LupWxdAykm7KIEFchiOqd6z7qMM= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU= go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= -go.uber.org/multierr v1.7.0/go.mod h1:7EAYxJLBy9rStEaz58O2t4Uvip6FSURkq8/ppBp95ak= -go.uber.org/multierr v1.9.0 h1:7fIwc/ZtS0q++VgcfqFDxSBZVv/Xo49/SYnDFupUwlI= -go.uber.org/multierr v1.9.0/go.mod h1:X2jQV1h+kxSjClGpnseKVIxpmcjrj7MNnI0bnlfKTVQ= +go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= +go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= -go.uber.org/zap v1.14.1/go.mod h1:Mb2vm2krFEG5DV0W9qcHBYFtp/Wku1cvYaqPsS/WYfc= -go.uber.org/zap v1.15.0/go.mod h1:Mb2vm2krFEG5DV0W9qcHBYFtp/Wku1cvYaqPsS/WYfc= go.uber.org/zap v1.16.0/go.mod h1:MA8QOfq0BHJwdXa996Y4dYkAqRKB8/1K1QMMZVaNZjQ= go.uber.org/zap v1.19.1/go.mod h1:j3DNczoxDZroyBnOT1L/Q79cfUMGZxlv/9dzN7SM1rI= -go.uber.org/zap v1.24.0 h1:FiJd5l1UOLj0wCgbSE0rwwXHzEdAZS6hiiSnxJN/D60= -go.uber.org/zap v1.24.0/go.mod h1:2kMP+WWQ8aoFoedH3T2sq6iJ2yDWpHbP0f6MQbS9Gkg= +go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= +go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= go4.org v0.0.0-20180809161055-417644f6feb5/go.mod h1:MkTOUMDaeVYJUOUsaDXIhWPZYa1yOyC1qaOBpL57BhE= +golang.org/x/arch v0.7.0 h1:pskyeJh/3AmoQ8CPE95vxHLqp1G1GfGNXTmcl9NEKTc= +golang.org/x/arch v0.7.0/go.mod h1:FEVrYAQjsQXMVJ1nsMoVVXPZg6p2JE2mx8psSWTDQys= golang.org/x/build v0.0.0-20190111050920-041ab4dc3f9d/go.mod h1:OWs+y06UdEOHN4y+MfF/py+xQ/tYqIWW03b70/CG9Rw= -golang.org/x/crypto v0.0.0-20170930174604-9419663f5a44/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181030102418-4d3f4d9ffa16/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20190225124518-7f87c0fbb88b/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190313024323-a1f597ede03a/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190426145343-a29dc8fdc734/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190513172903-22d7a77e9e5f/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190530122614-20be4c3c3ed5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190618222545-ea8f1a30c443/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20200115085410-6d4e4cb37c7d/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20200221231518-2aa609cf4a9d/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20200311171314-f7b00557c8c4/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20200423211502-4bdfaf469ed5/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20200510223506-06a226fb4e37/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200602180216-279210d13fed/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= -golang.org/x/crypto v0.0.0-20210506145944-38f3c27a63bf/go.mod h1:P+XmwS30IXTQdn5tA2iutPOUgjI07+tq3H3K9MVA1s8= -golang.org/x/crypto v0.0.0-20210513164829-c07d793c2f9a/go.mod h1:P+XmwS30IXTQdn5tA2iutPOUgjI07+tq3H3K9MVA1s8= -golang.org/x/crypto v0.0.0-20210817164053-32db794688a5/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.0.0-20211108221036-ceb1ce70b4fa/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.4.0 h1:UVQgzMY87xqpKNgb+kDsll2Igd33HszWHFLmpaRMq/8= -golang.org/x/crypto v0.4.0/go.mod h1:3quD/ATkf6oY+rnes5c3ExXTbLc8mueNue5/DoinL80= +golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.8.0/go.mod h1:mRqEX+O9/h5TFCrQhkgjo2yKi0yYA+9ecGkdQoHrywE= +golang.org/x/crypto v0.12.0/go.mod h1:NF0Gs7EO5K4qLn+Ylc+fih8BSTeIjAP05siRnAh98yw= +golang.org/x/crypto v0.18.0/go.mod h1:R0j02AL6hcrfOiy9T4ZYp/rcWeMxM3L6QYxlOuEG1mg= +golang.org/x/crypto v0.41.0 h1:WKYxWedPGCTVVl5+WHSSrOBT0O8lx32+zxmHxijgXp4= +golang.org/x/crypto v0.41.0/go.mod h1:pO5AFd7FA68rFak7rOAGVuygIISepHftHnr8dr6+sUc= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -1601,8 +1371,8 @@ golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u0 golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= golang.org/x/exp v0.0.0-20200331195152-e8c3332aa8e5/go.mod h1:4M0jN8W1tt0AVLNr8HDosyJCDCDuyL9N9+3m7wDWgKw= -golang.org/x/exp v0.0.0-20221217163422-3c43f8badb15 h1:5oN1Pz/eDhCpbMbLstvIPa0b/BEQo6g6nwV3pLjfM6w= -golang.org/x/exp v0.0.0-20221217163422-3c43f8badb15/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc= +golang.org/x/exp v0.0.0-20241217172543-b2144cdd0a67 h1:1UoZQm6f0P/ZO0w1Ri+f+ifG/gXhegadRdwBIXEFWDo= +golang.org/x/exp v0.0.0-20241217172543-b2144cdd0a67/go.mod h1:qj5a5QZpwLU2NLQudwIN5koi3beDhSAlJwa67PuM98c= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= @@ -1628,13 +1398,13 @@ golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.8.0 h1:LUYupSeNrTNCGzR/hVBk2NHZO4hXcVaW1k4Qx7rjPx8= +golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/net v0.0.0-20180719180050-a680a1efc54d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/mod v0.27.0 h1:kb+q2PyFnEADO2IEF935ehFUXlWiNjJWtRNgBLSfbxQ= +golang.org/x/mod v0.27.0/go.mod h1:rWI627Fq0DEoudcK+MBkNkCe0EetEaDSwJJkCcjpazc= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181011144130-49bb7cea24b1/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181029044818-c44066c5c816/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181106065722-10aee1819953/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -1652,13 +1422,11 @@ golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= -golang.org/x/net v0.0.0-20190611141213-3f473d35a33a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= @@ -1674,6 +1442,7 @@ golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/ golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200813134508-3edf25e44fcc/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= @@ -1682,13 +1451,17 @@ golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.0.0-20210423184538-5f58ad60dda6/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= -golang.org/x/net v0.0.0-20210726213435-c6fcb2dbf985/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.8.0 h1:Zrh2ngAOFYneWTAIAPethzeaQLuHwhuBkuV6ZiRnUaQ= -golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= +golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= +golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns= +golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= +golang.org/x/net v0.14.0/go.mod h1:PpSgVXXLK0OxS0F31C1/tv6XNguvCrnXIDrFMspZIUI= +golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY= +golang.org/x/net v0.43.0 h1:lat02VYK2j4aLzMzecihNvTlJNQUq316m2Mr9rnM6YE= +golang.org/x/net v0.43.0/go.mod h1:vhO1fvI4dGsIjh73sWfUVjj3N7CA9WkKJNQm2svM6Jg= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20181017192945-9dcd33a902f4/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= @@ -1700,9 +1473,8 @@ golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.6.0 h1:Lh8GPgSKBfWSwFvtuWOfeI3aAAnbXTSutYxJiOJFgIw= -golang.org/x/oauth2 v0.6.0/go.mod h1:ycmewcwgD4Rpr3eZJLSB4Kyyljb3qDh40vJ8STE5HKw= +golang.org/x/oauth2 v0.30.0 h1:dnDm7JmhM45NNpd8FDDeLhK6FwqbOf4MLCM9zb1BOHI= +golang.org/x/oauth2 v0.30.0/go.mod h1:B++QgG3ZKulg6sRPGD/mqlHQs5rB3Ml9erfeDY7xKlU= golang.org/x/perf v0.0.0-20180704124530-6e6d33e29852/go.mod h1:JLpeXjPJfIyPr5TlbXLkXWLhP8nz10XfvxElABhCtcw= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -1715,8 +1487,10 @@ golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o= +golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.16.0 h1:ycBJEhp9p4vXvUZNszeOq0kGTPghopOL8q0fq3vstxw= +golang.org/x/sync v0.16.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= golang.org/x/sys v0.0.0-20180810173357-98c5dad5d1a0/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -1729,19 +1503,14 @@ golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5h golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190219092855-153ac476189d/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190228124157-a34e9553db1e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190316082340-a2f829d7f35f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190405154228-4b34438f7a67/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190526052359-791d8a0f4d09/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190610200419-93c9922d18ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1749,7 +1518,6 @@ golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1778,6 +1546,7 @@ golang.org/x/sys v0.0.0-20200602225109-6fdc65e7d980/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200814200057-3d37ad5750ed/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1785,59 +1554,66 @@ golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210303074136-134d130e1a04/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210309074719-68d13333faf2/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210317225723-c4fcb01b228e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210426080607-c94f62235c83/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210816074244-15123e1e1f71/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210910150752-751e447fb3d0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211025112917-711f33c9992c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220111092808-5a964db01320/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220704084225-05e143d24a9e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.6.0 h1:MVltZSvRTcU2ljQOhs94SXPftV6DCNnZViHeQps87pQ= +golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= +golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.9.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.14.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.35.0 h1:vz1N37gP5bs89s7He8XuIYXpyY0+QlsKmzipCbUtyxI= +golang.org/x/sys v0.35.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.6.0 h1:clScbb1cHjoCkyRbWwBEUZ5H/tIFu5TAXIqaZD0Gcjw= -golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U= +golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= +golang.org/x/term v0.7.0/go.mod h1:P32HKFT3hSsZrRxla30E9HqToFYAQPCMs/zFMBUFqPY= +golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= +golang.org/x/term v0.11.0/go.mod h1:zC9APTIj3jG3FdV/Ons+XE1riIZXG4aZ4GTHiPZJPIU= +golang.org/x/term v0.16.0/go.mod h1:yn7UURbUtPyrVJPGPq404EukNFxcm/foM+bV/bfcDsY= +golang.org/x/term v0.34.0 h1:O/2T7POpk0ZZ7MAzMeWFSg6S5IpWd/RXDlM9hgM3DR4= +golang.org/x/term v0.34.0/go.mod h1:5jC53AEywhIVebHgPVeg0mj8OD3VO9OzclacVrqpaAw= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.8.0 h1:57P1ETyNKtuIjB4SRd15iJxuhj8Gc416Y78H3qgMh68= -golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= +golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= +golang.org/x/text v0.12.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= +golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= +golang.org/x/text v0.28.0 h1:rhazDwis8INMIwQ4tpjLDzUhx6RlXqZNPEM0huQojng= +golang.org/x/text v0.28.0/go.mod h1:U8nCwOR8jO/marOQ0QbDiOngZVEBB7MAiitBuMjXiNU= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac h1:7zkz7BUtwNFFqcowJ+RIgu2MaV/MapERkDIy+mwPyjs= -golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.12.0 h1:ScB/8o8olJvc+CQPWrK3fPZNfh7qgwCrY0zJmoEQLSE= +golang.org/x/time v0.12.0/go.mod h1:CDIdPxbZBQxdj6cxyCIdrNogrJKMJ7pr37NYpMcMDSg= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181030000716-a0a13e073c7b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20181130052023-1c3d964395ce/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= @@ -1851,18 +1627,15 @@ golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgw golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20190828213141-aed303cbaa74/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191216052735-49a3e744a425/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200103221440-774c71fcf114/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= @@ -1893,18 +1666,19 @@ golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4f golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210108195828-e2f9c7f1fc8e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= -golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.6-0.20210726203631-07bc1bf47fb2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.6.0 h1:BOw41kyTf3PuCW1pVQf8+Cyg8pMlkYB1oo9iJ6D/lKM= +golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= +golang.org/x/tools v0.36.0 h1:kWS0uv/zsvHEle1LbV5LE8QujrxB3wfQyxHfhOk0Qkg= +golang.org/x/tools v0.36.0/go.mod h1:WBDiHKJK8YgLHlcQPYQzNCkUxUypCaa5ZegCVutKm+s= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 h1:H2TDz8ibqkAF6YGhCdN3jS9O0/s90v0rJh3X/OLHEUk= -golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= -gonum.org/v1/gonum v0.8.2 h1:CCXrcPKiGGotvnN6jfUsKk4rRqm7q09/YbKb5xCEvtM= +golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da h1:noIWHXmPHxILtqtCOPIhSt0ABwskkZKjD3bXGnZGpNY= +golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da/go.mod h1:NDW/Ps6MPRej6fsCIbMTohpP40sJ/P/vI1MoTEGwX90= +gonum.org/v1/gonum v0.16.0 h1:5+ul4Swaf3ESvrOnidPp4GZbzf0mxVQpDCYUQE7OJfk= +gonum.org/v1/gonum v0.16.0/go.mod h1:fef3am4MQ93R2HHpKnLk4/Tbh/s0+wqD5nfa6Pnwy4E= google.golang.org/api v0.0.0-20180910000450-7ca32eb868bf/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= google.golang.org/api v0.0.0-20181030000543-1d582fd0359e/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= google.golang.org/api v0.1.0/go.mod h1:UGEZY7KEX120AnNLIHFMKIo4obdJhkp2tPbaPlQx13Y= @@ -1928,8 +1702,8 @@ google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz513 google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE= google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= -google.golang.org/api v0.114.0 h1:1xQPji6cO2E2vLiI+C/XiFAnsn1WV3mjaEwGLhi3grE= -google.golang.org/api v0.114.0/go.mod h1:ifYI2ZsFK6/uGddGfAD5BMxlnkBqCmqHSDUVi45N5Yg= +google.golang.org/api v0.247.0 h1:tSd/e0QrUlLsrwMKmkbQhYVa109qIintOls2Wh6bngc= +google.golang.org/api v0.247.0/go.mod h1:r1qZOPmxXffXg6xS5uhx16Fa/UFY8QU/K4bfKrnvovM= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.3.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= @@ -1938,8 +1712,9 @@ google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7 google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.8 h1:IhEN5q69dyKagZPYMSdIjS2HqprW324FRQZJcGqPAsM= +google.golang.org/appengine v1.6.8/go.mod h1:1jJ3jBArFh5pcgW8gCtRJnepW8FzD1V44FJffLiz/Ds= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20180831171423-11092d34479b/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20181029155118-b69ba1387ce2/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= @@ -1969,7 +1744,6 @@ google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfG google.golang.org/genproto v0.0.0-20200423170343-7949de9c1215/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= @@ -1984,9 +1758,14 @@ google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6D google.golang.org/genproto v0.0.0-20210108203827-ffc7fda8c3d7/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210126160654-44e461bb6506/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210226172003-ab064af71705/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20230306155012-7f2fa6fef1f4 h1:DdoeryqhaXp1LtT/emMP1BRJPHHKFi5akj/nbx/zNTA= -google.golang.org/genproto v0.0.0-20230306155012-7f2fa6fef1f4/go.mod h1:NWraEVixdDnqcqQ30jipen1STv2r/n24Wb7twVTGR4s= +google.golang.org/genproto v0.0.0-20250603155806-513f23925822 h1:rHWScKit0gvAPuOnu87KpaYtjK5zBMLcULh7gxkCXu4= +google.golang.org/genproto v0.0.0-20250603155806-513f23925822/go.mod h1:HubltRL7rMh0LfnQPkMH4NPDFEWp0jw3vixw7jEM53s= +google.golang.org/genproto/googleapis/api v0.0.0-20250707201910-8d1bb00bc6a7 h1:FiusG7LWj+4byqhbvmB+Q93B/mOxJLN2DTozDuZm4EU= +google.golang.org/genproto/googleapis/api v0.0.0-20250707201910-8d1bb00bc6a7/go.mod h1:kXqgZtrWaf6qS3jZOCnCH7WYfrvFjkC51bM8fz3RsCA= +google.golang.org/genproto/googleapis/bytestream v0.0.0-20250804133106-a7a43d27e69b h1:YzmLjVBzUKrr0zPM1KkGPEicd3WHSccw1k9RivnvngU= +google.golang.org/genproto/googleapis/bytestream v0.0.0-20250804133106-a7a43d27e69b/go.mod h1:h6yxum/C2qRb4txaZRLDHK8RyS0H/o2oEDeKY4onY/Y= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250804133106-a7a43d27e69b h1:zPKJod4w6F1+nRGDI9ubnXYhU9NSWoFAijkHkUXeTK8= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250804133106-a7a43d27e69b/go.mod h1:qQ0YXyHHx3XkvlzUtpXDkS29lDSafHMZBAZDc03LQ3A= google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.16.0/go.mod h1:0JHn/cJsOMiMfNA9+DeHDlAU7KAAB5GDlYFpa9MZMio= google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= @@ -2003,21 +1782,16 @@ google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8 google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= -google.golang.org/grpc v1.28.1/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/grpc v1.32.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= -google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= -google.golang.org/grpc v1.53.0 h1:LAv2ds7cmFV/XTS3XG1NneeENYrXGmorPxsBbptIjNc= -google.golang.org/grpc v1.53.0/go.mod h1:OnIrk0ipVdj4N5d9IUoFUx72/VlD7+jUsHwZgwSMQpw= +google.golang.org/grpc v1.75.1 h1:/ODCNEuf9VghjgO3rqLcfg8fiOP0nSluljWFlDxELLI= +google.golang.org/grpc v1.75.1/go.mod h1:JtPAzKiq4v1xcAB2hydNlWI2RnF85XXcV0mhKXr2ecQ= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.2.0 h1:TLkBREm4nIsEcexnCjgQd5GQWaHcqMzwQV0TX9pq8S0= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.2.0/go.mod h1:DNq5QpG7LJqD2AamLZ7zvKE0DEpVl2BSEVjFycAAjRY= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= @@ -2033,8 +1807,8 @@ google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlba google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.30.0 h1:kPPoIgf3TsEvrm0PFe15JQ+570QVxYzEvvHqChK+cng= -google.golang.org/protobuf v1.30.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.36.9 h1:w2gp2mA27hUeUzj9Ex9FBjsBm40zfaDtEWow293U7Iw= +google.golang.org/protobuf v1.36.9/go.mod h1:fuxRtAxBytpl4zzqUh6/eyUujkJdNiuEkXntxiD/uRU= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= @@ -2048,20 +1822,15 @@ gopkg.in/gcfg.v1 v1.2.3/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o= gopkg.in/go-playground/assert.v1 v1.2.1/go.mod h1:9RXL0bg/zibRAgZUYszZSwO/z8Y/a8bDuhia5mkpMnE= gopkg.in/go-playground/validator.v9 v9.29.1/go.mod h1:+c9/zcJMFNgbLvly1L1V+PpxWdVbfP1avr/N00E2vyQ= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= -gopkg.in/ini.v1 v1.66.6 h1:LATuAqN/shcYAOkv3wl2L4rkaKqkcgTBQjOyYDvcPKI= -gopkg.in/ini.v1 v1.66.6/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= -gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce/go.mod h1:5AcXVHNjg+BDxry382+8OKon8SEWiKktQR07RKPsv1c= -gopkg.in/olebedev/go-duktape.v3 v3.0.0-20200316214253-d7b0ff38cac9/go.mod h1:uAJfkITjFhyEEuUfm7bsmCZRbW5WRq8s9EY8HZ6hCns= +gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA= +gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= -gopkg.in/src-d/go-cli.v0 v0.0.0-20181105080154-d492247bbc0d/go.mod h1:z+K8VcOYVYcSwSjGebuDL6176A1XskgbtNl64NSg+n8= -gopkg.in/src-d/go-log.v1 v1.0.1/go.mod h1:GN34hKP0g305ysm2/hctJ0Y8nWP3zxXXJ8GFabTyABE= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= -gopkg.in/urfave/cli.v1 v1.20.0/go.mod h1:vuBzUtMdQeixQj8LVd+/98pzhxNGQoyuPBlsXHOQNO0= gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI= gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= @@ -2083,12 +1852,13 @@ honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -lukechampine.com/blake3 v1.1.6/go.mod h1:tkKEOtDkNtklkXtLNEOGNq5tcV90tJiA1vAA12R78LA= -lukechampine.com/blake3 v1.1.7 h1:GgRMhmdsuK8+ii6UZFDL8Nb+VyMwadAgcJyfYHxG6n0= -lukechampine.com/blake3 v1.1.7/go.mod h1:tkKEOtDkNtklkXtLNEOGNq5tcV90tJiA1vAA12R78LA= -nhooyr.io/websocket v1.8.6 h1:s+C3xAMLwGmlI31Nyn/eAehUlZPwfYZu2JXM621Q5/k= +lukechampine.com/blake3 v1.4.1 h1:I3Smz7gso8w4/TunLKec6K2fn+kyKtDxr/xcQEN84Wg= +lukechampine.com/blake3 v1.4.1/go.mod h1:QFosUxmjB8mnrWFSNwKmvxHpfY72bmD2tQ0kBMM3kwo= nhooyr.io/websocket v1.8.6/go.mod h1:B70DZP8IakI65RVQ51MsWP/8jndNma26DVA/nFSCgW0= -pgregory.net/rapid v0.4.7 h1:MTNRktPuv5FNqOO151TM9mDTa+XHcX6ypYeISDVD14g= +nhooyr.io/websocket v1.8.7 h1:usjR2uOr/zjjkVMy0lW+PPohFok7PCow5sDjLgX4P4g= +nhooyr.io/websocket v1.8.7/go.mod h1:B70DZP8IakI65RVQ51MsWP/8jndNma26DVA/nFSCgW0= +pgregory.net/rapid v1.1.0 h1:CMa0sjHSru3puNx+J0MIAuiiEV4N0qj8/cMWGBBCsjw= +pgregory.net/rapid v1.1.0/go.mod h1:PY5XlDGj0+V1FCq0o192FdRhpKHGTRIWBgqjDBTrq04= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= diff --git a/insecure/integration/functional/test/gossipsub/rpc_inspector/utils.go b/insecure/integration/functional/test/gossipsub/rpc_inspector/utils.go new file mode 100644 index 00000000000..a4073b64218 --- /dev/null +++ b/insecure/integration/functional/test/gossipsub/rpc_inspector/utils.go @@ -0,0 +1,68 @@ +package rpc_inspector + +import ( + "context" + "fmt" + "math/rand" + "testing" + "time" + + "github.com/onflow/flow-go/config" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/model/messages" + "github.com/onflow/flow-go/module" + "github.com/onflow/flow-go/module/irrecoverable" + "github.com/onflow/flow-go/module/metrics" + "github.com/onflow/flow-go/network/channels" + "github.com/onflow/flow-go/network/p2p" + p2ptest "github.com/onflow/flow-go/network/p2p/test" + "github.com/onflow/flow-go/network/p2p/tracer" + "github.com/onflow/flow-go/utils/unittest" +) + +// StartNodesAndEnsureConnected starts the victim and spammer node and ensures they are both connected. +func startNodesAndEnsureConnected(t *testing.T, ctx irrecoverable.SignalerContext, nodes []p2p.LibP2PNode, sporkID flow.Identifier) { + p2ptest.StartNodes(t, ctx, nodes) + // prior to the test we should ensure that spammer and victim connect. + // this is vital as the spammer will circumvent the normal pubsub subscription mechanism and send iHAVE messages directly to the victim. + // without a prior connection established, directly spamming pubsub messages may cause a race condition in the pubsub implementation. + p2ptest.TryConnectionAndEnsureConnected(t, ctx, nodes) + blockTopic := channels.TopicFromChannel(channels.PushBlocks, sporkID) + p2ptest.EnsurePubsubMessageExchange(t, ctx, nodes, blockTopic, 1, func() interface{} { + return (*messages.Proposal)(unittest.ProposalFixture()) + }) +} + +func stopComponents(t *testing.T, cancel context.CancelFunc, nodes []p2p.LibP2PNode, components ...module.ReadyDoneAware) { + p2ptest.StopNodes(t, nodes, cancel) + unittest.RequireComponentsDoneBefore(t, time.Second, components...) +} + +func randomClusterPrefixedTopic() channels.Topic { + return channels.Topic(channels.SyncCluster(flow.ChainID(fmt.Sprintf("%d", rand.Uint64())))) +} + +func randomClusterPrefixedTopics(n int) []string { + topics := make([]string, n) + for i := 0; i < n; i++ { + topics[i] = randomClusterPrefixedTopic().String() + } + return topics +} + +func meshTracerFixture(flowConfig *config.FlowConfig, idProvider module.IdentityProvider) *tracer.GossipSubMeshTracer { + meshTracerCfg := &tracer.GossipSubMeshTracerConfig{ + Logger: unittest.Logger(), + Metrics: metrics.NewNoopCollector(), + IDProvider: idProvider, + LoggerInterval: time.Second, + HeroCacheMetricsFactory: metrics.NewNoopHeroCacheMetricsFactory(), + RpcSentTracker: tracer.RpcSentTrackerConfig{ + CacheSize: flowConfig.NetworkConfig.GossipSub.RpcTracer.RPCSentTrackerCacheSize, + WorkerQueueCacheSize: flowConfig.NetworkConfig.GossipSub.RpcTracer.RPCSentTrackerQueueCacheSize, + WorkerQueueNumber: flowConfig.NetworkConfig.GossipSub.RpcTracer.RpcSentTrackerNumOfWorkers, + }, + DuplicateMessageTrackerCacheConfig: flowConfig.NetworkConfig.GossipSub.RpcTracer.DuplicateMessageTrackerConfig, + } + return tracer.NewGossipSubMeshTracer(meshTracerCfg) +} diff --git a/insecure/integration/functional/test/gossipsub/rpc_inspector/validation_inspector_test.go b/insecure/integration/functional/test/gossipsub/rpc_inspector/validation_inspector_test.go new file mode 100644 index 00000000000..8a5431adb2f --- /dev/null +++ b/insecure/integration/functional/test/gossipsub/rpc_inspector/validation_inspector_test.go @@ -0,0 +1,1110 @@ +package rpc_inspector + +import ( + "context" + "fmt" + "math" + "testing" + "time" + + corrupt "github.com/libp2p/go-libp2p-pubsub" + pubsub "github.com/libp2p/go-libp2p-pubsub" + pb "github.com/libp2p/go-libp2p-pubsub/pb" + pubsub_pb "github.com/libp2p/go-libp2p-pubsub/pb" + "github.com/libp2p/go-libp2p/core/peer" + mockery "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + "go.uber.org/atomic" + + "github.com/onflow/flow-go/config" + "github.com/onflow/flow-go/insecure/corruptlibp2p" + "github.com/onflow/flow-go/insecure/internal" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/model/messages" + "github.com/onflow/flow-go/module/irrecoverable" + "github.com/onflow/flow-go/module/metrics" + "github.com/onflow/flow-go/module/mock" + "github.com/onflow/flow-go/network" + "github.com/onflow/flow-go/network/channels" + "github.com/onflow/flow-go/network/p2p" + "github.com/onflow/flow-go/network/p2p/inspector/validation" + p2pmsg "github.com/onflow/flow-go/network/p2p/message" + mockp2p "github.com/onflow/flow-go/network/p2p/mock" + p2ptest "github.com/onflow/flow-go/network/p2p/test" + "github.com/onflow/flow-go/utils/rand" + "github.com/onflow/flow-go/utils/unittest" +) + +// TestValidationInspector_InvalidTopicId_Detection ensures that when an RPC control message contains an invalid topic ID an invalid control message +// notification is disseminated with the expected error. +// An invalid topic ID could have any of the following properties: +// - unknown topic: the topic is not a known Flow topic +// - malformed topic: topic is malformed in some way +// - invalid spork ID: spork ID prepended to topic and current spork ID do not match +func TestValidationInspector_InvalidTopicId_Detection(t *testing.T) { + role := flow.RoleConsensus + sporkID := unittest.IdentifierFixture() + flowConfig, err := config.DefaultConfig() + require.NoError(t, err) + inspectorConfig := flowConfig.NetworkConfig.GossipSub.RpcInspector.Validation + + messageCount := 100 + inspectorConfig.InspectionQueue.NumberOfWorkers = 1 + controlMessageCount := int64(1) + + count := atomic.NewUint64(0) + invGraftNotifCount := atomic.NewUint64(0) + invPruneNotifCount := atomic.NewUint64(0) + invIHaveNotifCount := atomic.NewUint64(0) + done := make(chan struct{}) + expectedNumOfTotalNotif := 9 + + idProvider := mock.NewIdentityProvider(t) + spammer := corruptlibp2p.NewGossipSubRouterSpammer(t, sporkID, role, idProvider) + + ctx, cancel := context.WithCancel(context.Background()) + signalerCtx := irrecoverable.NewMockSignalerContext(t, ctx) + + consumer := mockp2p.NewGossipSubInvCtrlMsgNotifConsumer(t) + consumer.On("OnInvalidControlMessageNotification", mockery.Anything).Run(func(args mockery.Arguments) { + count.Inc() + notification, ok := args[0].(*p2p.InvCtrlMsgNotif) + require.True(t, ok) + require.Equal(t, notification.TopicType, p2p.CtrlMsgNonClusterTopicType, "IsClusterPrefixed is expected to be false, no RPC with cluster prefixed topic sent in this test") + require.Equal(t, spammer.SpammerNode.ID(), notification.PeerID) + require.True(t, validation.IsInvalidTopicIDThresholdExceeded(notification.Error)) + switch notification.MsgType { + case p2pmsg.CtrlMsgGraft: + invGraftNotifCount.Inc() + case p2pmsg.CtrlMsgPrune: + invPruneNotifCount.Inc() + case p2pmsg.CtrlMsgIHave: + invIHaveNotifCount.Inc() + default: + require.Fail(t, fmt.Sprintf("unexpected control message type %s error: %s", notification.MsgType, notification.Error)) + } + if count.Load() == uint64(expectedNumOfTotalNotif) { + close(done) + } + }).Return().Times(expectedNumOfTotalNotif) + + meshTracer := meshTracerFixture(flowConfig, idProvider) + topicProvider := p2ptest.NewUpdatableTopicProviderFixture() + validationInspector, err := validation.NewControlMsgValidationInspector(&validation.InspectorParams{ + Logger: unittest.Logger(), + SporkID: sporkID, + Config: &inspectorConfig, + IdProvider: idProvider, + HeroCacheMetricsFactory: metrics.NewNoopHeroCacheMetricsFactory(), + InspectorMetrics: metrics.NewNoopCollector(), + RpcTracker: meshTracer, + NetworkingType: network.PrivateNetwork, + InvalidControlMessageNotificationConsumer: consumer, + TopicOracle: func() p2p.TopicProvider { + return topicProvider + }, + }) + require.NoError(t, err) + corruptInspectorFunc := corruptlibp2p.CorruptInspectorFunc(validationInspector) + victimNode, victimIdentity := p2ptest.NodeFixture(t, + sporkID, + t.Name(), + idProvider, + p2ptest.WithRole(role), + internal.WithCorruptGossipSub(corruptlibp2p.CorruptGossipSubFactory(), corruptlibp2p.CorruptGossipSubConfigFactoryWithInspector(corruptInspectorFunc))) + idProvider.On("ByPeerID", victimNode.ID()).Return(&victimIdentity, true).Maybe() + idProvider.On("ByPeerID", spammer.SpammerNode.ID()).Return(&spammer.SpammerId, true).Maybe() + + // create unknown topic + unknownTopic := channels.Topic(fmt.Sprintf("%s/%s", p2ptest.GossipSubTopicIdFixture(), sporkID)) + // create malformed topic + malformedTopic, err := rand.GenerateRandomString(100) + require.NoError(t, err) + // a topics spork ID is considered invalid if it does not match the current spork ID + invalidSporkIDTopic := channels.Topic(fmt.Sprintf("%s/%s", channels.PushBlocks, unittest.IdentifierFixture())) + + // set topic oracle to return list with all topics to avoid hasSubscription failures and force topic validation + topicProvider.UpdateTopics([]string{unknownTopic.String(), malformedTopic, invalidSporkIDTopic.String()}) + + validationInspector.Start(signalerCtx) + nodes := []p2p.LibP2PNode{victimNode, spammer.SpammerNode} + startNodesAndEnsureConnected(t, signalerCtx, nodes, sporkID) + spammer.Start(t) + defer stopComponents(t, cancel, nodes, validationInspector) + + // prepare to spam - generate control messages + graftCtlMsgsWithUnknownTopic := spammer.GenerateCtlMessages(int(controlMessageCount), p2ptest.WithGraft(messageCount, unknownTopic.String())) + graftCtlMsgsWithMalformedTopic := spammer.GenerateCtlMessages(int(controlMessageCount), p2ptest.WithGraft(messageCount, malformedTopic)) + graftCtlMsgsInvalidSporkIDTopic := spammer.GenerateCtlMessages(int(controlMessageCount), p2ptest.WithGraft(messageCount, invalidSporkIDTopic.String())) + + pruneCtlMsgsWithUnknownTopic := spammer.GenerateCtlMessages(int(controlMessageCount), p2ptest.WithPrune(messageCount, unknownTopic.String())) + pruneCtlMsgsWithMalformedTopic := spammer.GenerateCtlMessages(int(controlMessageCount), p2ptest.WithPrune(messageCount, malformedTopic)) + pruneCtlMsgsInvalidSporkIDTopic := spammer.GenerateCtlMessages(int(controlMessageCount), p2ptest.WithPrune(messageCount, invalidSporkIDTopic.String())) + + iHaveCtlMsgsWithUnknownTopic := spammer.GenerateCtlMessages(int(controlMessageCount), p2ptest.WithIHave(messageCount, 1000, unknownTopic.String())) + iHaveCtlMsgsWithMalformedTopic := spammer.GenerateCtlMessages(int(controlMessageCount), p2ptest.WithIHave(messageCount, 1000, malformedTopic)) + iHaveCtlMsgsInvalidSporkIDTopic := spammer.GenerateCtlMessages(int(controlMessageCount), p2ptest.WithIHave(messageCount, 1000, invalidSporkIDTopic.String())) + + // spam the victim peer with invalid graft messages + spammer.SpamControlMessage(t, victimNode, graftCtlMsgsWithUnknownTopic) + spammer.SpamControlMessage(t, victimNode, graftCtlMsgsWithMalformedTopic) + spammer.SpamControlMessage(t, victimNode, graftCtlMsgsInvalidSporkIDTopic) + + // spam the victim peer with invalid prune messages + spammer.SpamControlMessage(t, victimNode, pruneCtlMsgsWithUnknownTopic) + spammer.SpamControlMessage(t, victimNode, pruneCtlMsgsWithMalformedTopic) + spammer.SpamControlMessage(t, victimNode, pruneCtlMsgsInvalidSporkIDTopic) + + // spam the victim peer with invalid ihave messages + spammer.SpamControlMessage(t, victimNode, iHaveCtlMsgsWithUnknownTopic) + spammer.SpamControlMessage(t, victimNode, iHaveCtlMsgsWithMalformedTopic) + spammer.SpamControlMessage(t, victimNode, iHaveCtlMsgsInvalidSporkIDTopic) + + unittest.RequireCloseBefore(t, done, 5*time.Second, "failed to inspect RPC messages on time") + + // ensure we receive the expected number of invalid control message notifications for graft and prune control message types + // we send 3 messages with 3 diff invalid topics + require.Equal(t, uint64(3), invGraftNotifCount.Load()) + require.Equal(t, uint64(3), invPruneNotifCount.Load()) + require.Equal(t, uint64(3), invIHaveNotifCount.Load()) +} + +// TestValidationInspector_DuplicateTopicId_Detection ensures that when an RPC control message contains a duplicate topic ID an invalid control message +// notification is disseminated with the expected error. +func TestValidationInspector_DuplicateTopicId_Detection(t *testing.T) { + role := flow.RoleConsensus + sporkID := unittest.IdentifierFixture() + flowConfig, err := config.DefaultConfig() + require.NoError(t, err) + inspectorConfig := flowConfig.NetworkConfig.GossipSub.RpcInspector.Validation + + inspectorConfig.InspectionQueue.NumberOfWorkers = 1 + + // sets the message count to the max of the duplicate topic id threshold for graft and prune control messages to ensure + // a successful attack + messageCount := int(math.Max(float64(inspectorConfig.IHave.DuplicateTopicIdThreshold), float64(inspectorConfig.GraftPrune.DuplicateTopicIdThreshold))) + 2 + controlMessageCount := int64(1) + + count := atomic.NewInt64(0) + done := make(chan struct{}) + expectedNumOfTotalNotif := 3 + invGraftNotifCount := atomic.NewUint64(0) + invPruneNotifCount := atomic.NewUint64(0) + invIHaveNotifCount := atomic.NewUint64(0) + + idProvider := mock.NewIdentityProvider(t) + spammer := corruptlibp2p.NewGossipSubRouterSpammer(t, sporkID, role, idProvider) + + ctx, cancel := context.WithCancel(context.Background()) + signalerCtx := irrecoverable.NewMockSignalerContext(t, ctx) + + consumer := mockp2p.NewGossipSubInvCtrlMsgNotifConsumer(t) + consumer.On("OnInvalidControlMessageNotification", mockery.Anything).Run(func(args mockery.Arguments) { + count.Inc() + notification, ok := args[0].(*p2p.InvCtrlMsgNotif) + require.True(t, ok) + require.Equal(t, notification.TopicType, p2p.CtrlMsgNonClusterTopicType, "IsClusterPrefixed is expected to be false, no RPC with cluster prefixed topic sent in this test") + require.True(t, validation.IsDuplicateTopicIDThresholdExceeded(notification.Error)) + require.Equal(t, spammer.SpammerNode.ID(), notification.PeerID) + switch notification.MsgType { + case p2pmsg.CtrlMsgGraft: + invGraftNotifCount.Inc() + case p2pmsg.CtrlMsgPrune: + invPruneNotifCount.Inc() + case p2pmsg.CtrlMsgIHave: + invIHaveNotifCount.Inc() + default: + require.Fail(t, fmt.Sprintf("unexpected control message type %s error: %s", notification.MsgType, notification.Error)) + } + + if count.Load() == int64(expectedNumOfTotalNotif) { + close(done) + } + }).Return().Times(expectedNumOfTotalNotif) + + meshTracer := meshTracerFixture(flowConfig, idProvider) + topicProvider := p2ptest.NewUpdatableTopicProviderFixture() + validationInspector, err := validation.NewControlMsgValidationInspector(&validation.InspectorParams{ + Logger: unittest.Logger(), + SporkID: sporkID, + Config: &inspectorConfig, + IdProvider: idProvider, + HeroCacheMetricsFactory: metrics.NewNoopHeroCacheMetricsFactory(), + InspectorMetrics: metrics.NewNoopCollector(), + RpcTracker: meshTracer, + NetworkingType: network.PrivateNetwork, + InvalidControlMessageNotificationConsumer: consumer, + TopicOracle: func() p2p.TopicProvider { + return topicProvider + }, + }) + require.NoError(t, err) + + corruptInspectorFunc := corruptlibp2p.CorruptInspectorFunc(validationInspector) + victimNode, victimIdentity := p2ptest.NodeFixture(t, + sporkID, + t.Name(), + idProvider, + p2ptest.WithRole(role), + internal.WithCorruptGossipSub(corruptlibp2p.CorruptGossipSubFactory(), corruptlibp2p.CorruptGossipSubConfigFactoryWithInspector(corruptInspectorFunc))) + idProvider.On("ByPeerID", victimNode.ID()).Return(&victimIdentity, true).Maybe() + idProvider.On("ByPeerID", spammer.SpammerNode.ID()).Return(&spammer.SpammerId, true).Maybe() + + // a topics spork ID is considered invalid if it does not match the current spork ID + duplicateTopic := channels.Topic(fmt.Sprintf("%s/%s", channels.PushBlocks, sporkID)) + // set topic oracle to return list with all topics to avoid hasSubscription failures and force topic validation + topicProvider.UpdateTopics([]string{duplicateTopic.String()}) + + validationInspector.Start(signalerCtx) + nodes := []p2p.LibP2PNode{victimNode, spammer.SpammerNode} + startNodesAndEnsureConnected(t, signalerCtx, nodes, sporkID) + spammer.Start(t) + defer stopComponents(t, cancel, nodes, validationInspector) + + // prepare to spam - generate control messages + graftCtlMsgsDuplicateTopic := spammer.GenerateCtlMessages(int(controlMessageCount), p2ptest.WithGraft(messageCount, duplicateTopic.String())) + ihaveCtlMsgsDuplicateTopic := spammer.GenerateCtlMessages(int(controlMessageCount), p2ptest.WithIHave(messageCount, 10, duplicateTopic.String())) + pruneCtlMsgsDuplicateTopic := spammer.GenerateCtlMessages(int(controlMessageCount), p2ptest.WithPrune(messageCount, duplicateTopic.String())) + + // start spamming the victim peer + spammer.SpamControlMessage(t, victimNode, graftCtlMsgsDuplicateTopic) + spammer.SpamControlMessage(t, victimNode, ihaveCtlMsgsDuplicateTopic) + spammer.SpamControlMessage(t, victimNode, pruneCtlMsgsDuplicateTopic) + + unittest.RequireCloseBefore(t, done, 5*time.Second, "failed to inspect RPC messages on time") + // ensure we receive the expected number of invalid control message notifications for graft and prune control message types + require.Equal(t, uint64(1), invGraftNotifCount.Load()) + require.Equal(t, uint64(1), invPruneNotifCount.Load()) + require.Equal(t, uint64(1), invIHaveNotifCount.Load()) +} + +// TestValidationInspector_IHaveDuplicateMessageId_Detection ensures that when an RPC iHave control message contains a duplicate message ID for a single topic +// notification is disseminated with the expected error. +func TestValidationInspector_IHaveDuplicateMessageId_Detection(t *testing.T) { + role := flow.RoleConsensus + sporkID := unittest.IdentifierFixture() + flowConfig, err := config.DefaultConfig() + require.NoError(t, err) + inspectorConfig := flowConfig.NetworkConfig.GossipSub.RpcInspector.Validation + inspectorConfig.InspectionQueue.NumberOfWorkers = 1 + + count := atomic.NewInt64(0) + done := make(chan struct{}) + expectedNumOfTotalNotif := 1 + invIHaveNotifCount := atomic.NewUint64(0) + idProvider := mock.NewIdentityProvider(t) + spammer := corruptlibp2p.NewGossipSubRouterSpammer(t, sporkID, role, idProvider) + + ctx, cancel := context.WithCancel(context.Background()) + signalerCtx := irrecoverable.NewMockSignalerContext(t, ctx) + consumer := mockp2p.NewGossipSubInvCtrlMsgNotifConsumer(t) + consumer.On("OnInvalidControlMessageNotification", mockery.Anything).Run(func(args mockery.Arguments) { + count.Inc() + notification, ok := args[0].(*p2p.InvCtrlMsgNotif) + require.True(t, ok) + require.Equal(t, notification.TopicType, p2p.CtrlMsgNonClusterTopicType, "IsClusterPrefixed is expected to be false, no RPC with cluster prefixed topic sent in this test") + require.True(t, validation.IsDuplicateMessageIDErr(notification.Error)) + require.Equal(t, spammer.SpammerNode.ID(), notification.PeerID) + require.True(t, notification.MsgType == p2pmsg.CtrlMsgIHave, fmt.Sprintf("unexpected control message type %s error: %s", notification.MsgType, notification.Error)) + invIHaveNotifCount.Inc() + + if count.Load() == int64(expectedNumOfTotalNotif) { + close(done) + } + }).Return().Times(expectedNumOfTotalNotif) + + meshTracer := meshTracerFixture(flowConfig, idProvider) + topicProvider := p2ptest.NewUpdatableTopicProviderFixture() + validationInspector, err := validation.NewControlMsgValidationInspector(&validation.InspectorParams{ + Logger: unittest.Logger(), + SporkID: sporkID, + Config: &inspectorConfig, + IdProvider: idProvider, + HeroCacheMetricsFactory: metrics.NewNoopHeroCacheMetricsFactory(), + InspectorMetrics: metrics.NewNoopCollector(), + RpcTracker: meshTracer, + NetworkingType: network.PrivateNetwork, + InvalidControlMessageNotificationConsumer: consumer, + TopicOracle: func() p2p.TopicProvider { + return topicProvider + }, + }) + require.NoError(t, err) + + corruptInspectorFunc := corruptlibp2p.CorruptInspectorFunc(validationInspector) + victimNode, victimIdentity := p2ptest.NodeFixture(t, + sporkID, + t.Name(), + idProvider, + p2ptest.WithRole(role), + internal.WithCorruptGossipSub(corruptlibp2p.CorruptGossipSubFactory(), corruptlibp2p.CorruptGossipSubConfigFactoryWithInspector(corruptInspectorFunc))) + idProvider.On("ByPeerID", victimNode.ID()).Return(&victimIdentity, true).Maybe() + idProvider.On("ByPeerID", spammer.SpammerNode.ID()).Return(&spammer.SpammerId, true).Maybe() + + // prepare to spam - generate control messages + pushBlocks := channels.Topic(fmt.Sprintf("%s/%s", channels.PushBlocks, sporkID)) + reqChunks := channels.Topic(fmt.Sprintf("%s/%s", channels.RequestChunks, sporkID)) + // set topic oracle to return list with all topics to avoid hasSubscription failures and force topic validation + topicProvider.UpdateTopics([]string{pushBlocks.String(), reqChunks.String()}) + + validationInspector.Start(signalerCtx) + nodes := []p2p.LibP2PNode{victimNode, spammer.SpammerNode} + startNodesAndEnsureConnected(t, signalerCtx, nodes, sporkID) + // to suppress peers provider not set + p2ptest.RegisterPeerProviders(t, nodes) + spammer.Start(t) + defer stopComponents(t, cancel, nodes, validationInspector) + + // generate 2 control messages with iHaves for different topics + messageIdCount := inspectorConfig.IHave.DuplicateMessageIdThreshold + 2 + messageIds := unittest.IdentifierListFixture(1) + for i := 0; i < messageIdCount; i++ { + messageIds = append(messageIds, messageIds[0]) + } + // prepares 2 control messages with iHave messages for different topics with duplicate message IDs + ihaveCtlMsgs1 := spammer.GenerateCtlMessages( + 1, + p2ptest.WithIHaveMessageIDs(messageIds.Strings(), pushBlocks.String()), + p2ptest.WithIHaveMessageIDs(messageIds.Strings(), reqChunks.String())) + + // start spamming the victim peer + spammer.SpamControlMessage(t, victimNode, ihaveCtlMsgs1) + + unittest.RequireCloseBefore(t, done, 5*time.Second, "failed to inspect RPC messages on time") + // ensure we receive the expected number of invalid control message notifications + require.Equal(t, uint64(1), invIHaveNotifCount.Load()) +} + +// TestValidationInspector_UnknownClusterId_Detection ensures that when an RPC control message contains a topic with an unknown cluster ID an invalid control message +// notification is disseminated with the expected error. +func TestValidationInspector_UnknownClusterId_Detection(t *testing.T) { + role := flow.RoleConsensus + sporkID := unittest.IdentifierFixture() + flowConfig, err := config.DefaultConfig() + require.NoError(t, err) + inspectorConfig := flowConfig.NetworkConfig.GossipSub.RpcInspector.Validation + // set hard threshold to 0 so that in the case of invalid cluster ID + // we force the inspector to return an error + inspectorConfig.ClusterPrefixedMessage.HardThreshold = 0 + inspectorConfig.InspectionQueue.NumberOfWorkers = 1 + // set invalid topic id threshold to 0 so that inspector returns error early + inspectorConfig.GraftPrune.InvalidTopicIdThreshold = 0 + + // ensure we send a number of message with unknown cluster ids higher than the invalid topic ids threshold + // restricting the message count to 1 allows us to only aggregate a single error when the error is logged in the inspector. + messageCount := 60 + controlMessageCount := int64(1) + + count := atomic.NewInt64(0) + done := make(chan struct{}) + expectedNumOfTotalNotif := 2 + invGraftNotifCount := atomic.NewUint64(0) + invPruneNotifCount := atomic.NewUint64(0) + + idProvider := mock.NewIdentityProvider(t) + spammer := corruptlibp2p.NewGossipSubRouterSpammer(t, sporkID, role, idProvider) + ctx, cancel := context.WithCancel(context.Background()) + signalerCtx := irrecoverable.NewMockSignalerContext(t, ctx) + + consumer := mockp2p.NewGossipSubInvCtrlMsgNotifConsumer(t) + consumer.On("OnInvalidControlMessageNotification", mockery.Anything).Run(func(args mockery.Arguments) { + count.Inc() + notification, ok := args[0].(*p2p.InvCtrlMsgNotif) + require.True(t, ok) + require.Equal(t, notification.TopicType, p2p.CtrlMsgTopicTypeClusterPrefixed) + require.Equal(t, spammer.SpammerNode.ID(), notification.PeerID) + require.True(t, validation.IsInvalidTopicIDThresholdExceeded(notification.Error)) + switch notification.MsgType { + case p2pmsg.CtrlMsgGraft: + invGraftNotifCount.Inc() + case p2pmsg.CtrlMsgPrune: + invPruneNotifCount.Inc() + default: + require.Fail(t, fmt.Sprintf("unexpected control message type %s error: %s", notification.MsgType, notification.Error)) + } + + if count.Load() == int64(expectedNumOfTotalNotif) { + close(done) + } + }).Return().Times(expectedNumOfTotalNotif) + + meshTracer := meshTracerFixture(flowConfig, idProvider) + topicProvider := p2ptest.NewUpdatableTopicProviderFixture() + validationInspector, err := validation.NewControlMsgValidationInspector(&validation.InspectorParams{ + Logger: unittest.Logger(), + SporkID: sporkID, + Config: &inspectorConfig, + IdProvider: idProvider, + HeroCacheMetricsFactory: metrics.NewNoopHeroCacheMetricsFactory(), + InspectorMetrics: metrics.NewNoopCollector(), + RpcTracker: meshTracer, + NetworkingType: network.PrivateNetwork, + InvalidControlMessageNotificationConsumer: consumer, + TopicOracle: func() p2p.TopicProvider { + return topicProvider + }, + }) + require.NoError(t, err) + + corruptInspectorFunc := corruptlibp2p.CorruptInspectorFunc(validationInspector) + victimNode, victimIdentity := p2ptest.NodeFixture(t, + sporkID, + t.Name(), + idProvider, + p2ptest.WithRole(role), + internal.WithCorruptGossipSub(corruptlibp2p.CorruptGossipSubFactory(), corruptlibp2p.CorruptGossipSubConfigFactoryWithInspector(corruptInspectorFunc))) + idProvider.On("ByPeerID", victimNode.ID()).Return(&victimIdentity, true).Maybe() + idProvider.On("ByPeerID", spammer.SpammerNode.ID()).Return(&spammer.SpammerId, true) + + // setup cluster prefixed topic with an invalid cluster ID + unknownClusterID := channels.Topic(channels.SyncCluster("unknown-cluster-ID")) + // set topic oracle to return list with all topics to avoid hasSubscription failures and force topic validation + topicProvider.UpdateTopics([]string{unknownClusterID.String()}) + + // consume cluster ID update so that active cluster IDs set + validationInspector.ActiveClustersChanged(flow.ChainIDList{"known-cluster-id"}) + + validationInspector.Start(signalerCtx) + nodes := []p2p.LibP2PNode{victimNode, spammer.SpammerNode} + startNodesAndEnsureConnected(t, signalerCtx, nodes, sporkID) + spammer.Start(t) + defer stopComponents(t, cancel, nodes, validationInspector) + + // prepare to spam - generate control messages + graftCtlMsgsDuplicateTopic := spammer.GenerateCtlMessages(int(controlMessageCount), p2ptest.WithGraft(messageCount, unknownClusterID.String())) + pruneCtlMsgsDuplicateTopic := spammer.GenerateCtlMessages(int(controlMessageCount), p2ptest.WithPrune(messageCount, unknownClusterID.String())) + + // start spamming the victim peer + spammer.SpamControlMessage(t, victimNode, graftCtlMsgsDuplicateTopic) + spammer.SpamControlMessage(t, victimNode, pruneCtlMsgsDuplicateTopic) + + unittest.RequireCloseBefore(t, done, 5*time.Second, "failed to inspect RPC messages on time") + // ensure we receive the expected number of invalid control message notifications for graft and prune control message types + require.Equal(t, uint64(1), invGraftNotifCount.Load()) + require.Equal(t, uint64(1), invPruneNotifCount.Load()) +} + +// TestValidationInspector_ActiveClusterIdsNotSet_Graft_Detection ensures that an error is returned only after the cluster prefixed topics received for a peer exceed the configured +// cluster prefix hard threshold when the active cluster IDs not set and an invalid control message notification is disseminated with the expected error. +// This test involves Graft control messages. +func TestValidationInspector_ActiveClusterIdsNotSet_Graft_Detection(t *testing.T) { + role := flow.RoleConsensus + sporkID := unittest.IdentifierFixture() + flowConfig, err := config.DefaultConfig() + require.NoError(t, err) + inspectorConfig := flowConfig.NetworkConfig.GossipSub.RpcInspector.Validation + inspectorConfig.GraftPrune.InvalidTopicIdThreshold = 0 + inspectorConfig.ClusterPrefixedMessage.HardThreshold = 5 + inspectorConfig.InspectionQueue.NumberOfWorkers = 1 + + count := atomic.NewInt64(0) + done := make(chan struct{}) + idProvider := mock.NewIdentityProvider(t) + spammer := corruptlibp2p.NewGossipSubRouterSpammer(t, sporkID, role, idProvider) + ctx, cancel := context.WithCancel(context.Background()) + signalerCtx := irrecoverable.NewMockSignalerContext(t, ctx) + + consumer := mockp2p.NewGossipSubInvCtrlMsgNotifConsumer(t) + consumer.On("OnInvalidControlMessageNotification", mockery.Anything).Run(func(args mockery.Arguments) { + count.Inc() + notification, ok := args[0].(*p2p.InvCtrlMsgNotif) + require.True(t, ok) + require.Equal(t, notification.TopicType, p2p.CtrlMsgTopicTypeClusterPrefixed) + require.Equal(t, spammer.SpammerNode.ID(), notification.PeerID) + require.True(t, validation.IsInvalidTopicIDThresholdExceeded(notification.Error)) + require.Equal(t, notification.MsgType, p2pmsg.CtrlMsgGraft) + if count.Load() == 1 { + close(done) + } + }).Return().Once() + meshTracer := meshTracerFixture(flowConfig, idProvider) + + topicProvider := p2ptest.NewUpdatableTopicProviderFixture() + validationInspector, err := validation.NewControlMsgValidationInspector(&validation.InspectorParams{ + Logger: unittest.Logger(), + SporkID: sporkID, + Config: &inspectorConfig, + IdProvider: idProvider, + HeroCacheMetricsFactory: metrics.NewNoopHeroCacheMetricsFactory(), + InspectorMetrics: metrics.NewNoopCollector(), + RpcTracker: meshTracer, + NetworkingType: network.PrivateNetwork, + InvalidControlMessageNotificationConsumer: consumer, + TopicOracle: func() p2p.TopicProvider { + return topicProvider + }, + }) + require.NoError(t, err) + + corruptInspectorFunc := corruptlibp2p.CorruptInspectorFunc(validationInspector) + victimNode, victimIdentity := p2ptest.NodeFixture(t, + sporkID, + t.Name(), + idProvider, + p2ptest.WithRole(role), + internal.WithCorruptGossipSub(corruptlibp2p.CorruptGossipSubFactory(), corruptlibp2p.CorruptGossipSubConfigFactoryWithInspector(corruptInspectorFunc))) + idProvider.On("ByPeerID", victimNode.ID()).Return(&victimIdentity, true).Maybe() + idProvider.On("ByPeerID", spammer.SpammerNode.ID()).Return(&spammer.SpammerId, true) + topics := randomClusterPrefixedTopics(int(inspectorConfig.ClusterPrefixedMessage.HardThreshold) + 1) + // set topic oracle to return list with all topics to avoid hasSubscription failures and force topic validation + topicProvider.UpdateTopics(topics) + + // we deliberately avoid setting the cluster IDs so that we eventually receive errors after we have exceeded the allowed cluster + // prefixed hard threshold + validationInspector.Start(signalerCtx) + nodes := []p2p.LibP2PNode{victimNode, spammer.SpammerNode} + startNodesAndEnsureConnected(t, signalerCtx, nodes, sporkID) + spammer.Start(t) + defer stopComponents(t, cancel, nodes, validationInspector) + // generate multiple control messages with GRAFT's for randomly generated + // cluster prefixed channels, this ensures we do not encounter duplicate topic ID errors + ctlMsgs := spammer.GenerateCtlMessages(1, p2ptest.WithGrafts(topics...)) + // start spamming the victim peer + spammer.SpamControlMessage(t, victimNode, ctlMsgs) + + unittest.RequireCloseBefore(t, done, 5*time.Second, "failed to inspect RPC messages on time") +} + +// TestValidationInspector_ActiveClusterIdsNotSet_Prune_Detection ensures that an error is returned only after the cluster prefixed topics received for a peer exceed the configured +// cluster prefix hard threshold when the active cluster IDs not set and an invalid control message notification is disseminated with the expected error. +// This test involves Prune control messages. +func TestValidationInspector_ActiveClusterIdsNotSet_Prune_Detection(t *testing.T) { + role := flow.RoleConsensus + sporkID := unittest.IdentifierFixture() + flowConfig, err := config.DefaultConfig() + require.NoError(t, err) + inspectorConfig := flowConfig.NetworkConfig.GossipSub.RpcInspector.Validation + inspectorConfig.GraftPrune.InvalidTopicIdThreshold = 0 + inspectorConfig.ClusterPrefixedMessage.HardThreshold = 5 + inspectorConfig.InspectionQueue.NumberOfWorkers = 1 + + count := atomic.NewInt64(0) + done := make(chan struct{}) + idProvider := mock.NewIdentityProvider(t) + spammer := corruptlibp2p.NewGossipSubRouterSpammer(t, sporkID, role, idProvider) + ctx, cancel := context.WithCancel(context.Background()) + signalerCtx := irrecoverable.NewMockSignalerContext(t, ctx) + + consumer := mockp2p.NewGossipSubInvCtrlMsgNotifConsumer(t) + consumer.On("OnInvalidControlMessageNotification", mockery.Anything).Run(func(args mockery.Arguments) { + count.Inc() + notification, ok := args[0].(*p2p.InvCtrlMsgNotif) + require.True(t, ok) + require.Equal(t, notification.TopicType, p2p.CtrlMsgTopicTypeClusterPrefixed) + require.Equal(t, spammer.SpammerNode.ID(), notification.PeerID) + require.True(t, validation.IsInvalidTopicIDThresholdExceeded(notification.Error)) + require.Equal(t, notification.MsgType, p2pmsg.CtrlMsgPrune) + if count.Load() == 1 { + close(done) + } + }).Return().Once() + meshTracer := meshTracerFixture(flowConfig, idProvider) + topicProvider := p2ptest.NewUpdatableTopicProviderFixture() + validationInspector, err := validation.NewControlMsgValidationInspector(&validation.InspectorParams{ + Logger: unittest.Logger(), + SporkID: sporkID, + Config: &inspectorConfig, + IdProvider: idProvider, + HeroCacheMetricsFactory: metrics.NewNoopHeroCacheMetricsFactory(), + InspectorMetrics: metrics.NewNoopCollector(), + RpcTracker: meshTracer, + NetworkingType: network.PrivateNetwork, + InvalidControlMessageNotificationConsumer: consumer, + TopicOracle: func() p2p.TopicProvider { + return topicProvider + }, + }) + require.NoError(t, err) + + corruptInspectorFunc := corruptlibp2p.CorruptInspectorFunc(validationInspector) + victimNode, victimIdentity := p2ptest.NodeFixture(t, + sporkID, + t.Name(), + idProvider, + p2ptest.WithRole(role), + internal.WithCorruptGossipSub(corruptlibp2p.CorruptGossipSubFactory(), corruptlibp2p.CorruptGossipSubConfigFactoryWithInspector(corruptInspectorFunc))) + idProvider.On("ByPeerID", victimNode.ID()).Return(&victimIdentity, true).Maybe() + idProvider.On("ByPeerID", spammer.SpammerNode.ID()).Return(&spammer.SpammerId, true) + topics := randomClusterPrefixedTopics(int(inspectorConfig.ClusterPrefixedMessage.HardThreshold) + 1) + // set topic oracle to return list with all topics to avoid hasSubscription failures and force topic validation + topicProvider.UpdateTopics(topics) + + // we deliberately avoid setting the cluster IDs so that we eventually receive errors after we have exceeded the allowed cluster + // prefixed hard threshold + validationInspector.Start(signalerCtx) + nodes := []p2p.LibP2PNode{victimNode, spammer.SpammerNode} + startNodesAndEnsureConnected(t, signalerCtx, nodes, sporkID) + spammer.Start(t) + defer stopComponents(t, cancel, nodes, validationInspector) + // generate multiple control messages with prunes for randomly generated + // cluster prefixed channels, this ensures we do not encounter duplicate topic ID errors + ctlMsgs := spammer.GenerateCtlMessages(1, p2ptest.WithPrunes(topics...)) + // start spamming the victim peer + spammer.SpamControlMessage(t, victimNode, ctlMsgs) + + unittest.RequireCloseBefore(t, done, 5*time.Second, "failed to inspect RPC messages on time") +} + +// TestValidationInspector_Unstaked_Node_Detection ensures that RPC control message inspector disseminates an invalid control message notification when an unstaked peer +// sends an RPC. +func TestValidationInspector_UnstakedNode_Detection(t *testing.T) { + role := flow.RoleConsensus + sporkID := unittest.IdentifierFixture() + flowConfig, err := config.DefaultConfig() + require.NoError(t, err) + inspectorConfig := flowConfig.NetworkConfig.GossipSub.RpcInspector.Validation + inspectorConfig.InspectionQueue.NumberOfWorkers = 1 + controlMessageCount := int64(1) + + count := atomic.NewInt64(0) + done := make(chan struct{}) + + idProvider := mock.NewIdentityProvider(t) + inspectorIDProvider := mock.NewIdentityProvider(t) + spammer := corruptlibp2p.NewGossipSubRouterSpammer(t, sporkID, role, idProvider) + ctx, cancel := context.WithCancel(context.Background()) + signalerCtx := irrecoverable.NewMockSignalerContext(t, ctx) + + unstakedPeerID := unittest.PeerIdFixture(t) + consumer := mockp2p.NewGossipSubInvCtrlMsgNotifConsumer(t) + consumer.On("OnInvalidControlMessageNotification", mockery.Anything).Run(func(args mockery.Arguments) { + count.Inc() + notification, ok := args[0].(*p2p.InvCtrlMsgNotif) + require.True(t, ok) + require.Equal(t, notification.TopicType, p2p.CtrlMsgNonClusterTopicType) + require.Equal(t, unstakedPeerID, notification.PeerID) + require.True(t, validation.IsErrUnstakedPeer(notification.Error)) + require.Equal(t, notification.MsgType, p2pmsg.CtrlMsgRPC) + + if count.Load() == 2 { + close(done) + } + }).Return() + + meshTracer := meshTracerFixture(flowConfig, idProvider) + topicProvider := p2ptest.NewUpdatableTopicProviderFixture() + validationInspector, err := validation.NewControlMsgValidationInspector(&validation.InspectorParams{ + Logger: unittest.Logger(), + SporkID: sporkID, + Config: &inspectorConfig, + IdProvider: inspectorIDProvider, + HeroCacheMetricsFactory: metrics.NewNoopHeroCacheMetricsFactory(), + InspectorMetrics: metrics.NewNoopCollector(), + RpcTracker: meshTracer, + NetworkingType: network.PrivateNetwork, + InvalidControlMessageNotificationConsumer: consumer, + TopicOracle: func() p2p.TopicProvider { + return topicProvider + }, + }) + require.NoError(t, err) + corruptInspectorFunc := corruptlibp2p.CorruptInspectorFunc(validationInspector) + // we need to wait until nodes are connected before we can start returning unstaked identity. + nodesConnected := atomic.NewBool(false) + victimNode, victimIdentity := p2ptest.NodeFixture(t, + sporkID, + t.Name(), + idProvider, + p2ptest.WithRole(role), + internal.WithCorruptGossipSub(corruptlibp2p.CorruptGossipSubFactory(), corruptlibp2p.CorruptGossipSubConfigFactoryWithInspector(func(id peer.ID, rpc *corrupt.RPC) error { + if nodesConnected.Load() { + // after nodes are connected invoke corrupt callback with an unstaked peer ID + return corruptInspectorFunc(unstakedPeerID, rpc) + } + return corruptInspectorFunc(id, rpc) + }))) + idProvider.On("ByPeerID", victimNode.ID()).Return(&victimIdentity, true).Maybe() + idProvider.On("ByPeerID", spammer.SpammerNode.ID()).Return(&spammer.SpammerId, true).Maybe() + inspectorIDProvider.On("ByPeerID", spammer.SpammerNode.ID()).Return(&spammer.SpammerId, true) + inspectorIDProvider.On("ByPeerID", unstakedPeerID).Return(nil, false) + + validationInspector.Start(signalerCtx) + nodes := []p2p.LibP2PNode{victimNode, spammer.SpammerNode} + startNodesAndEnsureConnected(t, signalerCtx, nodes, sporkID) + nodesConnected.Store(true) + spammer.Start(t) + defer stopComponents(t, cancel, nodes, validationInspector) + + // prepare to spam - generate control messages each of which will be immediately rejected because the sender is unstaked + graftCtlMsgsDuplicateTopic := spammer.GenerateCtlMessages(int(controlMessageCount), p2ptest.WithGraft(10, "")) + pruneCtlMsgsDuplicateTopic := spammer.GenerateCtlMessages(int(controlMessageCount), p2ptest.WithPrune(10, "")) + + // start spamming the victim peer + spammer.SpamControlMessage(t, victimNode, graftCtlMsgsDuplicateTopic) + spammer.SpamControlMessage(t, victimNode, pruneCtlMsgsDuplicateTopic) + + unittest.RequireCloseBefore(t, done, 5*time.Second, "failed to inspect RPC messages on time") +} + +// TestValidationInspector_InspectIWants_CacheMissThreshold ensures that expected invalid control message notification is disseminated when the number of iWant message Ids +// without a corresponding iHave message sent with the same message ID exceeds the configured cache miss threshold. +func TestValidationInspector_InspectIWants_CacheMissThreshold(t *testing.T) { + role := flow.RoleConsensus + sporkID := unittest.IdentifierFixture() + // create our RPC validation inspector + flowConfig, err := config.DefaultConfig() + require.NoError(t, err) + inspectorConfig := flowConfig.NetworkConfig.GossipSub.RpcInspector.Validation + inspectorConfig.InspectionQueue.NumberOfWorkers = 1 + inspectorConfig.IWant.CacheMissThreshold = 10 + messageCount := 10 + controlMessageCount := int64(1) + cacheMissThresholdNotifCount := atomic.NewUint64(0) + done := make(chan struct{}) + + idProvider := mock.NewIdentityProvider(t) + spammer := corruptlibp2p.NewGossipSubRouterSpammer(t, sporkID, role, idProvider) + + ctx, cancel := context.WithCancel(context.Background()) + signalerCtx := irrecoverable.NewMockSignalerContext(t, ctx) + + consumer := mockp2p.NewGossipSubInvCtrlMsgNotifConsumer(t) + consumer.On("OnInvalidControlMessageNotification", mockery.Anything).Run(func(args mockery.Arguments) { + notification, ok := args[0].(*p2p.InvCtrlMsgNotif) + require.True(t, ok) + require.Equal(t, notification.TopicType, p2p.CtrlMsgNonClusterTopicType, "IsClusterPrefixed is expected to be false, no RPC with cluster prefixed topic sent in this test") + require.Equal(t, spammer.SpammerNode.ID(), notification.PeerID) + require.True(t, notification.MsgType == p2pmsg.CtrlMsgIWant, fmt.Sprintf("unexpected control message type %s error: %s", notification.MsgType, notification.Error)) + require.True(t, validation.IsIWantCacheMissThresholdErr(notification.Error)) + + cacheMissThresholdNotifCount.Inc() + if cacheMissThresholdNotifCount.Load() == 1 { + close(done) + } + }).Return().Once() + + meshTracer := meshTracerFixture(flowConfig, idProvider) + topicProvider := p2ptest.NewUpdatableTopicProviderFixture() + validationInspector, err := validation.NewControlMsgValidationInspector(&validation.InspectorParams{ + Logger: unittest.Logger(), + SporkID: sporkID, + Config: &inspectorConfig, + IdProvider: idProvider, + HeroCacheMetricsFactory: metrics.NewNoopHeroCacheMetricsFactory(), + InspectorMetrics: metrics.NewNoopCollector(), + RpcTracker: meshTracer, + NetworkingType: network.PrivateNetwork, + InvalidControlMessageNotificationConsumer: consumer, + TopicOracle: func() p2p.TopicProvider { + return topicProvider + }, + }) + require.NoError(t, err) + corruptInspectorFunc := corruptlibp2p.CorruptInspectorFunc(validationInspector) + victimNode, victimIdentity := p2ptest.NodeFixture(t, + sporkID, + t.Name(), + idProvider, + p2ptest.WithRole(role), + internal.WithCorruptGossipSub(corruptlibp2p.CorruptGossipSubFactory(), corruptlibp2p.CorruptGossipSubConfigFactoryWithInspector(corruptInspectorFunc))) + idProvider.On("ByPeerID", victimNode.ID()).Return(&victimIdentity, true).Maybe() + idProvider.On("ByPeerID", spammer.SpammerNode.ID()).Return(&spammer.SpammerId, true).Maybe() + + messageIDs := p2ptest.GossipSubMessageIdsFixture(10) + + // create control message with iWant that contains 5 message IDs that were not tracked + ctlWithIWants := spammer.GenerateCtlMessages(int(controlMessageCount), p2ptest.WithIWant(messageCount, messageCount)) + ctlWithIWants[0].Iwant[0].MessageIDs = messageIDs // the first 5 message ids will not have a corresponding iHave + topic := channels.PushBlocks + // create control message with iHave that contains only the last 4 message IDs, this will force cache misses for the other 6 message IDs + ctlWithIhaves := spammer.GenerateCtlMessages(int(controlMessageCount), p2ptest.WithIHave(messageCount, messageCount, topic.String())) + ctlWithIhaves[0].Ihave[0].MessageIDs = messageIDs[6:] + // set topic oracle + topicProvider.UpdateTopics([]string{topic.String()}) + validationInspector.Start(signalerCtx) + nodes := []p2p.LibP2PNode{victimNode, spammer.SpammerNode} + startNodesAndEnsureConnected(t, signalerCtx, nodes, sporkID) + spammer.Start(t) + meshTracer.Start(signalerCtx) + defer stopComponents(t, cancel, nodes, validationInspector, meshTracer) + + // simulate tracking some message IDs + meshTracer.SendRPC(&pubsub.RPC{ + RPC: pb.RPC{ + Control: &ctlWithIhaves[0], + }, + }, "") + + // spam the victim with iWant message that contains message IDs that do not have a corresponding iHave + spammer.SpamControlMessage(t, victimNode, ctlWithIWants) + + unittest.RequireCloseBefore(t, done, 2*time.Second, "failed to inspect RPC messages on time") +} + +// TestValidationInspector_InspectRpcPublishMessages ensures that expected invalid control message notification is disseminated when the number of errors encountered during +// RPC publish message validation exceeds the configured error threshold. +func TestValidationInspector_InspectRpcPublishMessages(t *testing.T) { + role := flow.RoleConsensus + sporkID := unittest.IdentifierFixture() + // create our RPC validation inspector + flowConfig, err := config.DefaultConfig() + require.NoError(t, err) + inspectorConfig := flowConfig.NetworkConfig.GossipSub.RpcInspector.Validation + inspectorConfig.InspectionQueue.NumberOfWorkers = 1 + + idProvider := mock.NewIdentityProvider(t) + spammer := corruptlibp2p.NewGossipSubRouterSpammer(t, sporkID, role, idProvider) + + controlMessageCount := int64(1) + notificationCount := atomic.NewUint64(0) + done := make(chan struct{}) + validTopic := channels.Topic(fmt.Sprintf("%s/%s", channels.TestNetworkChannel.String(), sporkID)).String() + // create unknown topic + unknownTopic := channels.Topic(fmt.Sprintf("%s/%s", p2ptest.GossipSubTopicIdFixture(), sporkID)).String() + // create malformed topic + malformedTopic, err := rand.GenerateRandomString(100) + require.NoError(t, err) + // a topics spork ID is considered invalid if it does not match the current spork ID + invalidSporkIDTopic := channels.Topic(fmt.Sprintf("%s/%s", channels.PushBlocks, unittest.IdentifierFixture())).String() + + // unknown peer ID + unknownPeerID := unittest.PeerIdFixture(t) + + // ejected identity + ejectedIdentityPeerID := unittest.PeerIdFixture(t) + ejectedIdentity := unittest.IdentityFixture() + ejectedIdentity.EpochParticipationStatus = flow.EpochParticipationStatusEjected + + // invalid messages this should force a notification to disseminate + invalidPublishMsgs := []*pb.Message{ + {Topic: &unknownTopic, From: []byte(spammer.SpammerNode.ID())}, + {Topic: &malformedTopic, From: []byte(spammer.SpammerNode.ID())}, + {Topic: &malformedTopic, From: []byte(spammer.SpammerNode.ID())}, + {Topic: &malformedTopic, From: []byte(spammer.SpammerNode.ID())}, + {Topic: &invalidSporkIDTopic, From: []byte(spammer.SpammerNode.ID())}, + {Topic: &validTopic, From: []byte(unknownPeerID)}, + {Topic: &validTopic, From: []byte(ejectedIdentityPeerID)}, + } + topic := channels.Topic(fmt.Sprintf("%s/%s", channels.PushBlocks, sporkID)) + // first create 4 valid messages + publishMsgs := unittest.GossipSubMessageFixtures(4, topic.String(), unittest.WithFrom(spammer.SpammerNode.ID())) + publishMsgs = append(publishMsgs, invalidPublishMsgs...) + + ctx, cancel := context.WithCancel(context.Background()) + signalerCtx := irrecoverable.NewMockSignalerContext(t, ctx) + + consumer := mockp2p.NewGossipSubInvCtrlMsgNotifConsumer(t) + consumer.On("OnInvalidControlMessageNotification", mockery.Anything).Run(func(args mockery.Arguments) { + notification, ok := args[0].(*p2p.InvCtrlMsgNotif) + require.True(t, ok) + require.Equal(t, notification.TopicType, p2p.CtrlMsgNonClusterTopicType, "IsClusterPrefixed is expected to be false, no RPC with cluster prefixed topic sent in this test") + require.Equal(t, spammer.SpammerNode.ID(), notification.PeerID) + require.True(t, notification.MsgType == p2pmsg.RpcPublishMessage, fmt.Sprintf("unexpected control message type %s error: %s", notification.MsgType, notification.Error)) + require.True(t, validation.IsInvalidRpcPublishMessagesErr(notification.Error)) + require.Contains(t, + notification.Error.Error(), + fmt.Sprintf("%d error(s) encountered", len(invalidPublishMsgs)), + fmt.Sprintf("expected %d errors, an error for each invalid pubsub message", len(invalidPublishMsgs))) + require.Contains(t, notification.Error.Error(), fmt.Sprintf("unstaked peer: %s", unknownPeerID)) + require.Contains(t, notification.Error.Error(), fmt.Sprintf("ejected peer: %s", ejectedIdentityPeerID)) + notificationCount.Inc() + if notificationCount.Load() == 1 { + close(done) + } + }).Return().Once() + + meshTracer := meshTracerFixture(flowConfig, idProvider) + topicProvider := p2ptest.NewUpdatableTopicProviderFixture() + validationInspector, err := validation.NewControlMsgValidationInspector(&validation.InspectorParams{ + Logger: unittest.Logger(), + SporkID: sporkID, + Config: &inspectorConfig, + IdProvider: idProvider, + HeroCacheMetricsFactory: metrics.NewNoopHeroCacheMetricsFactory(), + InspectorMetrics: metrics.NewNoopCollector(), + RpcTracker: meshTracer, + NetworkingType: network.PrivateNetwork, + InvalidControlMessageNotificationConsumer: consumer, + TopicOracle: func() p2p.TopicProvider { + return topicProvider + }, + }) + require.NoError(t, err) + // set topic oracle to return list with all topics to avoid hasSubscription failures and force topic validation + topics := make([]string, len(publishMsgs)) + for i := 0; i < len(publishMsgs); i++ { + topics[i] = publishMsgs[i].GetTopic() + } + + topicProvider.UpdateTopics(topics) + // after 7 errors encountered disseminate a notification + inspectorConfig.PublishMessages.ErrorThreshold = 6 + corruptInspectorFunc := corruptlibp2p.CorruptInspectorFunc(validationInspector) + victimNode, victimIdentity := p2ptest.NodeFixture(t, + sporkID, + t.Name(), + idProvider, + p2ptest.WithRole(role), + internal.WithCorruptGossipSub(corruptlibp2p.CorruptGossipSubFactory(), corruptlibp2p.CorruptGossipSubConfigFactoryWithInspector(corruptInspectorFunc))) + idProvider.On("ByPeerID", victimNode.ID()).Return(&victimIdentity, true).Maybe() + idProvider.On("ByPeerID", spammer.SpammerNode.ID()).Return(&spammer.SpammerId, true).Maybe() + + // return nil for unknown peer ID indicating unstaked peer + idProvider.On("ByPeerID", unknownPeerID).Return(nil, false).Once() + // return ejected identity for peer ID will force message validation failure + idProvider.On("ByPeerID", ejectedIdentityPeerID).Return(ejectedIdentity, true).Once() + + validationInspector.Start(signalerCtx) + nodes := []p2p.LibP2PNode{victimNode, spammer.SpammerNode} + startNodesAndEnsureConnected(t, signalerCtx, nodes, sporkID) + spammer.Start(t) + meshTracer.Start(signalerCtx) + defer stopComponents(t, cancel, nodes, validationInspector, meshTracer) + + // prepare to spam - generate control messages + ctlMsg := spammer.GenerateCtlMessages(int(controlMessageCount)) + // start spamming the victim peer + spammer.SpamControlMessage(t, victimNode, ctlMsg, publishMsgs...) + + unittest.RequireCloseBefore(t, done, 5*time.Second, "failed to inspect RPC messages on time") + // ensure we receive the expected number of invalid control message notifications for graft and prune control message types + require.Equal(t, uint64(1), notificationCount.Load()) +} + +// TestGossipSubSpamMitigationIntegration_Grafts tests that the spam mitigation feature of GossipSub is working as expected for Graft control messages. +func TestGossipSubSpamMitigationIntegration_Grafts(t *testing.T) { + testGossipSubSpamMitigationIntegration(t, p2pmsg.CtrlMsgGraft) +} + +// TestGossipSubSpamMitigationIntegration_Prunes tests that the spam mitigation feature of GossipSub is working as expected for Prune control messages. +func TestGossipSubSpamMitigationIntegration_Prunes(t *testing.T) { + testGossipSubSpamMitigationIntegration(t, p2pmsg.CtrlMsgPrune) +} + +// TestGossipSubSpamMitigationIntegration_IHaves tests that the spam mitigation feature of GossipSub is working as expected for IHaves control messages. +func TestGossipSubSpamMitigationIntegration_IHaves(t *testing.T) { + testGossipSubSpamMitigationIntegration(t, p2pmsg.CtrlMsgIHave) +} + +// testGossipSubSpamMitigationIntegration tests that the spam mitigation feature of GossipSub is working as expected. +// The test puts together the spam detection (through the GossipSubInspector) and the spam mitigation (through the +// scoring system) and ensures that the mitigation is triggered when the spam detection detects spam. +// The test scenario involves a spammer node that sends a large number of control messages for the specified control message type to a victim node. +// The victim node is configured to use the GossipSubInspector to detect spam and the scoring system to mitigate spam. +// The test ensures that the victim node is disconnected from the spammer node on the GossipSub mesh after the spam detection is triggered. +func testGossipSubSpamMitigationIntegration(t *testing.T, msgType p2pmsg.ControlMessageType) { + idProvider := mock.NewIdentityProvider(t) + sporkID := unittest.IdentifierFixture() + spammer := corruptlibp2p.NewGossipSubRouterSpammer(t, sporkID, flow.RoleConsensus, idProvider) + ctx, cancel := context.WithCancel(context.Background()) + signalerCtx := irrecoverable.NewMockSignalerContext(t, ctx) + + cfg, err := config.DefaultConfig() + require.NoError(t, err) + // set the scoring parameters to be more aggressive to speed up the test + cfg.NetworkConfig.GossipSub.RpcTracer.ScoreTracerInterval = 100 * time.Millisecond + cfg.NetworkConfig.GossipSub.ScoringParameters.ScoringRegistryParameters.AppSpecificScore.ScoreTTL = 100 * time.Millisecond + cfg.NetworkConfig.GossipSub.ScoringParameters.ScoringRegistryParameters.SpamRecordCache.Decay.MaximumSpamPenaltyDecayFactor = .99 + + victimNode, victimId := p2ptest.NodeFixture(t, + sporkID, + t.Name(), + idProvider, + p2ptest.WithRole(flow.RoleConsensus), + p2ptest.OverrideFlowConfig(cfg)) + + ids := flow.IdentityList{&victimId, &spammer.SpammerId} + idProvider.On("ByPeerID", mockery.Anything).Return(func(peerId peer.ID) *flow.Identity { + switch peerId { + case victimNode.ID(): + return &victimId + case spammer.SpammerNode.ID(): + return &spammer.SpammerId + default: + return nil + } + + }, func(peerId peer.ID) bool { + switch peerId { + case victimNode.ID(): + fallthrough + case spammer.SpammerNode.ID(): + return true + default: + return false + } + }) + + spamRpcCount := 1000 // total number of individual rpc messages to send + spamCtrlMsgCount := int64(1000) // total number of control messages to send on each RPC + + // unknownTopic is an unknown topic to the victim node but shaped like a valid topic (i.e., it has the correct prefix and spork ID). + unknownTopic := channels.Topic(fmt.Sprintf("%s/%s", p2ptest.GossipSubTopicIdFixture(), sporkID)) + + // malformedTopic is a topic that is not shaped like a valid topic (i.e., it does not have the correct prefix and spork ID). + malformedTopic := channels.Topic("!@#$%^&**((") + + // invalidSporkIDTopic is a topic that has a valid prefix but an invalid spork ID (i.e., not the current spork ID). + invalidSporkIDTopic := channels.Topic(fmt.Sprintf("%s/%s", channels.PushBlocks, unittest.IdentifierFixture())) + + // duplicateTopic is a valid topic that is used to send duplicate spam messages. + duplicateTopic := channels.Topic(fmt.Sprintf("%s/%s", channels.PushBlocks, sporkID)) + + // starting the nodes. + nodes := []p2p.LibP2PNode{victimNode, spammer.SpammerNode} + p2ptest.StartNodes(t, signalerCtx, nodes) + defer p2ptest.StopNodes(t, nodes, cancel) + spammer.Start(t) + + // wait for the nodes to discover each other + p2ptest.LetNodesDiscoverEachOther(t, ctx, nodes, ids) + + // as nodes started fresh and no spamming has happened yet, the nodes should be able to exchange messages on the topic. + blockTopic := channels.TopicFromChannel(channels.PushBlocks, sporkID) + p2ptest.EnsurePubsubMessageExchange(t, ctx, nodes, blockTopic, 1, func() interface{} { + return (*messages.Proposal)(unittest.ProposalFixture()) + }) + + var unknownTopicSpam []pubsub_pb.ControlMessage + var malformedTopicSpam []pubsub_pb.ControlMessage + var invalidSporkIDTopicSpam []pubsub_pb.ControlMessage + var duplicateTopicSpam []pubsub_pb.ControlMessage + switch msgType { + case p2pmsg.CtrlMsgGraft: + unknownTopicSpam = spammer.GenerateCtlMessages(int(spamCtrlMsgCount), p2ptest.WithGraft(spamRpcCount, unknownTopic.String())) + malformedTopicSpam = spammer.GenerateCtlMessages(int(spamCtrlMsgCount), p2ptest.WithGraft(spamRpcCount, malformedTopic.String())) + invalidSporkIDTopicSpam = spammer.GenerateCtlMessages(int(spamCtrlMsgCount), p2ptest.WithGraft(spamRpcCount, invalidSporkIDTopic.String())) + duplicateTopicSpam = spammer.GenerateCtlMessages(int(spamCtrlMsgCount), // sets duplicate to +2 above the threshold to ensure that the victim node will penalize the spammer node + p2ptest.WithGraft(cfg.NetworkConfig.GossipSub.RpcInspector.Validation.GraftPrune.DuplicateTopicIdThreshold+2, duplicateTopic.String())) + case p2pmsg.CtrlMsgPrune: + unknownTopicSpam = spammer.GenerateCtlMessages(int(spamCtrlMsgCount), p2ptest.WithPrune(spamRpcCount, unknownTopic.String())) + malformedTopicSpam = spammer.GenerateCtlMessages(int(spamCtrlMsgCount), p2ptest.WithPrune(spamRpcCount, malformedTopic.String())) + invalidSporkIDTopicSpam = spammer.GenerateCtlMessages(int(spamCtrlMsgCount), p2ptest.WithPrune(spamRpcCount, invalidSporkIDTopic.String())) + duplicateTopicSpam = spammer.GenerateCtlMessages(int(spamCtrlMsgCount), // sets duplicate to +2 above the threshold to ensure that the victim node will penalize the spammer node + p2ptest.WithPrune(cfg.NetworkConfig.GossipSub.RpcInspector.Validation.GraftPrune.DuplicateTopicIdThreshold+2, duplicateTopic.String())) + case p2pmsg.CtrlMsgIHave: + unknownTopicSpam = spammer.GenerateCtlMessages(int(spamCtrlMsgCount), p2ptest.WithIHave(spamRpcCount, 100, unknownTopic.String())) + malformedTopicSpam = spammer.GenerateCtlMessages(int(spamCtrlMsgCount), p2ptest.WithIHave(spamRpcCount, 100, malformedTopic.String())) + invalidSporkIDTopicSpam = spammer.GenerateCtlMessages(int(spamCtrlMsgCount), p2ptest.WithIHave(spamRpcCount, 100, invalidSporkIDTopic.String())) + duplicateTopicSpam = spammer.GenerateCtlMessages(int(spamCtrlMsgCount), // sets duplicate to +2 above the threshold to ensure that the victim node will penalize the spammer node + p2ptest.WithIHave(cfg.NetworkConfig.GossipSub.RpcInspector.Validation.IHave.DuplicateTopicIdThreshold+spamRpcCount, 100, duplicateTopic.String())) + default: + t.Fatal("invalid control message type expected graft or prune") + } + + // start spamming the victim peer + spammer.SpamControlMessage(t, victimNode, unknownTopicSpam) + spammer.SpamControlMessage(t, victimNode, malformedTopicSpam) + spammer.SpamControlMessage(t, victimNode, invalidSporkIDTopicSpam) + spammer.SpamControlMessage(t, victimNode, duplicateTopicSpam) + scoreOptParameters := cfg.NetworkConfig.GossipSub.ScoringParameters.PeerScoring.Internal.Thresholds + // wait for three GossipSub heartbeat intervals to ensure that the victim node has penalized the spammer node. + require.Eventually(t, func() bool { + score, ok := victimNode.PeerScoreExposer().GetScore(spammer.SpammerNode.ID()) + return ok && score < 2*scoreOptParameters.Graylist + }, 5*time.Second, 100*time.Millisecond, "expected victim node to penalize spammer node") + + // now we expect the detection and mitigation to kick in and the victim node to disconnect from the spammer node. + // so the spammer and victim nodes should not be able to exchange messages on the topic. + p2ptest.EnsureNoPubsubExchangeBetweenGroups(t, + ctx, + []p2p.LibP2PNode{victimNode}, + flow.IdentifierList{victimId.NodeID}, + []p2p.LibP2PNode{spammer.SpammerNode}, + flow.IdentifierList{spammer.SpammerId.NodeID}, + blockTopic, + 1, + func() interface{} { + return (*messages.Proposal)(unittest.ProposalFixture()) + }) +} diff --git a/insecure/integration/functional/test/gossipsub/scoring/ihave_spam_test.go b/insecure/integration/functional/test/gossipsub/scoring/ihave_spam_test.go new file mode 100644 index 00000000000..212c4394dd8 --- /dev/null +++ b/insecure/integration/functional/test/gossipsub/scoring/ihave_spam_test.go @@ -0,0 +1,495 @@ +package scoring + +import ( + "context" + "fmt" + "testing" + "time" + + corrupt "github.com/libp2p/go-libp2p-pubsub" + pubsub "github.com/libp2p/go-libp2p-pubsub" + pb "github.com/libp2p/go-libp2p-pubsub/pb" + "github.com/libp2p/go-libp2p/core/peer" + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/config" + "github.com/onflow/flow-go/insecure/corruptlibp2p" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/model/messages" + "github.com/onflow/flow-go/module/irrecoverable" + "github.com/onflow/flow-go/network/channels" + "github.com/onflow/flow-go/network/p2p" + p2ptest "github.com/onflow/flow-go/network/p2p/test" + "github.com/onflow/flow-go/utils/concurrentmap" + "github.com/onflow/flow-go/utils/unittest" +) + +// TestGossipSubIHaveBrokenPromises_Below_Threshold tests that as long as the spammer stays below the ihave spam thresholds, it is not caught and +// penalized by the victim node. +// The thresholds are: +// - Maximum messages that include iHave per heartbeat is: 10 (gossipsub parameter; GossipSubMaxIHaveMessages), after which iHave messages are dropped. +// - Threshold for broken promises of iHave per heartbeat is: 10 (Flow parameter; defaultBehaviourPenaltyThreshold). It means that GossipSub samples one iHave id out of the +// entire RPC and if that iHave id is not eventually delivered within 3 seconds (gossipsub parameter, GossipSubIWantFollowupTime), then the promise is considered broken. We set +// this threshold to 10 meaning that the first 10 broken promises are ignored. This is to allow for some network churn. +// - Per hearbeat (gossipsub parameter GossipSubHeartbeatInterval, 1 second ), the spammer is allowed to send at most 5000 ihave messages (gossip sub parameter; GossipSubMaxIHaveLength) on aggregate, and +// excess messages are dropped (without being counted as broken promises). +func TestGossipSubIHaveBrokenPromises_Below_Threshold(t *testing.T) { + role := flow.RoleConsensus + sporkId := unittest.IdentifierFixture() + blockTopic := channels.TopicFromChannel(channels.PushBlocks, sporkId) + + receivedIWants := concurrentmap.New[string, struct{}]() + idProvider := unittest.NewUpdatableIDProvider(flow.IdentityList{}) + spammer := corruptlibp2p.NewGossipSubRouterSpammerWithRpcInspector(t, sporkId, role, idProvider, func(id peer.ID, rpc *corrupt.RPC) error { + // override rpc inspector of the spammer node to keep track of the iwants it has received. + if rpc.RPC.Control == nil || rpc.RPC.Control.Iwant == nil { + return nil + } + for _, iwant := range rpc.RPC.Control.Iwant { + for _, msgId := range iwant.MessageIDs { + receivedIWants.Add(msgId, struct{}{}) + } + } + return nil + }) + + ctx, cancel := context.WithCancel(context.Background()) + signalerCtx := irrecoverable.NewMockSignalerContext(t, ctx) + // we override some of the default scoring parameters in order to speed up the test in a time-efficient manner. + blockTopicOverrideParams := defaultTopicScoreParams(t) + blockTopicOverrideParams.MeshMessageDeliveriesActivation = 1 * time.Second // we start observing the mesh message deliveries after 1 second of the node startup. + // we disable invalid message delivery parameters, as the way we implement spammer, when it spams ihave messages, it does not sign them. Hence, without decaying the invalid message deliveries, + // the node would be penalized for invalid message delivery way sooner than it can mount an ihave broken-promises spam attack. + blockTopicOverrideParams.InvalidMessageDeliveriesWeight = 0.0 + blockTopicOverrideParams.InvalidMessageDeliveriesDecay = 0.0 + + conf, err := config.DefaultConfig() + require.NoError(t, err) + // we override the decay interval to 1 second so that the score is updated within 1 second intervals. + conf.NetworkConfig.GossipSub.ScoringParameters.PeerScoring.Internal.DecayInterval = 1 * time.Second + // score tracer interval is set to 500 milliseconds to speed up the test, it should be shorter than the heartbeat interval (1 second) of gossipsub to catch the score updates in time. + conf.NetworkConfig.GossipSub.RpcTracer.ScoreTracerInterval = 500 * time.Millisecond + + // relaxing the scoring parameters to fit the test scenario. + conf.NetworkConfig.GossipSub.ScoringParameters.PeerScoring.Internal.Behaviour.PenaltyDecay = 0.99 + conf.NetworkConfig.GossipSub.ScoringParameters.PeerScoring.Internal.Behaviour.PenaltyThreshold = 10 + conf.NetworkConfig.GossipSub.ScoringParameters.PeerScoring.Internal.Behaviour.PenaltyWeight = -1 + + victimNode, victimIdentity := p2ptest.NodeFixture( + t, + sporkId, + t.Name(), + idProvider, + p2ptest.WithRole(role), + p2ptest.OverrideFlowConfig(conf), + p2ptest.EnablePeerScoringWithOverride(&p2p.PeerScoringConfigOverride{ + TopicScoreParams: map[channels.Topic]*pubsub.TopicScoreParams{ + blockTopic: blockTopicOverrideParams, + }, + }), + ) + + ids := flow.IdentityList{&spammer.SpammerId, &victimIdentity} + idProvider.SetIdentities(ids) + nodes := []p2p.LibP2PNode{spammer.SpammerNode, victimNode} + // to suppress the logs of "peer provider has not set" + victimNode.WithPeersProvider(func() peer.IDSlice { + return peer.IDSlice{spammer.SpammerNode.ID()} + }) + spammer.SpammerNode.WithPeersProvider(func() peer.IDSlice { + return peer.IDSlice{victimNode.ID()} + }) + + p2ptest.StartNodes(t, signalerCtx, nodes) + defer p2ptest.StopNodes(t, nodes, cancel) + + p2ptest.LetNodesDiscoverEachOther(t, ctx, nodes, ids) + p2ptest.TryConnectionAndEnsureConnected(t, ctx, nodes) + + // checks end-to-end message delivery works on GossipSub + p2ptest.EnsurePubsubMessageExchange(t, ctx, nodes, blockTopic, 1, func() interface{} { + return (*messages.Proposal)(unittest.ProposalFixture()) + }) + + // creates 10 RPCs each with 10 iHave messages, each iHave message has 50 message ids, hence overall, we have 5000 iHave message ids. + spamIHaveBrokenPromise(t, spammer, blockTopic.String(), receivedIWants, victimNode) + + // wait till victim counts the spam iHaves as broken promises (one per RPC for a total of 10). + initialBehavioralPenalty := float64(0) // keeps track of the initial behavioral penalty of the spammer node for decay testing. + require.Eventually(t, func() bool { + behavioralPenalty, ok := victimNode.PeerScoreExposer().GetBehaviourPenalty(spammer.SpammerNode.ID()) + if !ok { + return false + } + // ideally, we should have 10, but we give it a buffer of 2.5 to account for 25% discrepancy due to scoring decays, timer asynchrony, and floating point errors. + if behavioralPenalty < 7.5 { + t.Logf("pending on behavioral penalty %f", behavioralPenalty) + return false + } + t.Logf("success on behavioral penalty %f", behavioralPenalty) + initialBehavioralPenalty = behavioralPenalty + return true + // Note: we have to wait at least 3 seconds for an iHave to be considered as broken promise (gossipsub parameters), we set it to 10 + // seconds to be on the safe side. + // Also, the internal heartbeat of GossipSub is 1 second, hence, there is no need to have ticks shorter than 500 milliseconds. + }, 10*time.Second, 500*time.Millisecond) + + scoreParams := conf.NetworkConfig.GossipSub.ScoringParameters + + spammerScore, ok := victimNode.PeerScoreExposer().GetScore(spammer.SpammerNode.ID()) + require.True(t, ok, "sanity check failed, we should have a score for the spammer node") + // since spammer is not yet considered to be penalized, its score must be greater than the gossipsub health thresholds. + require.Greaterf(t, + spammerScore, + scoreParams.PeerScoring.Internal.Thresholds.Gossip, + "sanity check failed, the score of the spammer node must be greater than gossip threshold: %f, actual: %f", + scoreParams.PeerScoring.Internal.Thresholds.Gossip, + spammerScore) + require.Greaterf(t, + spammerScore, + scoreParams.PeerScoring.Internal.Thresholds.Publish, + "sanity check failed, the score of the spammer node must be greater than publish threshold: %f, actual: %f", + scoreParams.PeerScoring.Internal.Thresholds.Publish, + spammerScore) + require.Greaterf(t, + spammerScore, + scoreParams.PeerScoring.Internal.Thresholds.Graylist, + "sanity check failed, the score of the spammer node must be greater than graylist threshold: %f, actual: %f", + scoreParams.PeerScoring.Internal.Thresholds.Graylist, + spammerScore) + + // eventually, after a heartbeat the spammer behavioral counter must be decayed + require.Eventually(t, func() bool { + behavioralPenalty, ok := victimNode.PeerScoreExposer().GetBehaviourPenalty(spammer.SpammerNode.ID()) + if !ok { + return false + } + if behavioralPenalty >= initialBehavioralPenalty { // after a heartbeat the spammer behavioral counter must be decayed. + return false + } + + return true + }, 2*time.Second, 500*time.Millisecond, "sanity check failed, the spammer behavioral counter must be decayed after a heartbeat") + + // since spammer stays below the threshold, it should be able to exchange messages with the victim node over pubsub. + p2ptest.EnsurePubsubMessageExchange(t, ctx, nodes, blockTopic, 1, func() interface{} { + return (*messages.Proposal)(unittest.ProposalFixture()) + }) +} + +// TestGossipSubIHaveBrokenPromises_Above_Threshold tests that a continuous stream of spam iHave broken promises will +// eventually cause the spammer node to be graylisted (i.e., no incoming RPCs from the spammer node will be accepted, and +// no outgoing RPCs to the spammer node will be sent). +// The test performs 3 rounds of attacks: each round with 10 RPCs, each RPC with 1 iHave messages, each iHave message with 500 message ids, hence overall, we have 5000 iHave message ids. +// Note that based on GossipSub parameters 5000 iHave is the most one can send within one heart beat. +// First round of attack makes spammers broken promises still below the threshold of 10 RPCs (broken promises are counted per RPC), hence no degradation of the spammers score. +// Second round of attack makes spammers broken promises above the threshold of 10 RPCs, hence a degradation of the spammers score. +// Third round of attack makes spammers broken promises to around 20 RPCs above the threshold, which causes the graylisting of the spammer node. +func TestGossipSubIHaveBrokenPromises_Above_Threshold(t *testing.T) { + role := flow.RoleConsensus + sporkId := unittest.IdentifierFixture() + blockTopic := channels.TopicFromChannel(channels.PushBlocks, sporkId) + + receivedIWants := concurrentmap.New[string, struct{}]() + idProvider := unittest.NewUpdatableIDProvider(flow.IdentityList{}) + spammer := corruptlibp2p.NewGossipSubRouterSpammerWithRpcInspector(t, sporkId, role, idProvider, func(id peer.ID, rpc *corrupt.RPC) error { + // override rpc inspector of the spammer node to keep track of the iwants it has received. + if rpc.RPC.Control == nil || rpc.RPC.Control.Iwant == nil { + return nil + } + for _, iwant := range rpc.RPC.Control.Iwant { + for _, msgId := range iwant.MessageIDs { + receivedIWants.Add(msgId, struct{}{}) + } + } + return nil + }) + + conf, err := config.DefaultConfig() + require.NoError(t, err) + // overcompensate for RPC truncation + conf.NetworkConfig.GossipSub.RpcInspector.Validation.IHave.MessageCountThreshold = 10000 + conf.NetworkConfig.GossipSub.RpcInspector.Validation.IHave.MessageIdCountThreshold = 10000 + conf.NetworkConfig.GossipSub.RpcInspector.Validation.IHave.MessageCountThreshold = 10000 + conf.NetworkConfig.GossipSub.RpcInspector.Validation.IHave.MessageIdCountThreshold = 10000 + // we override the decay interval to 1 second so that the score is updated within 1 second intervals. + conf.NetworkConfig.GossipSub.ScoringParameters.PeerScoring.Internal.DecayInterval = 1 * time.Second + // score tracer interval is set to 500 milliseconds to speed up the test, it should be shorter than the heartbeat interval (1 second) of gossipsub to catch the score updates in time. + conf.NetworkConfig.GossipSub.RpcTracer.ScoreTracerInterval = 500 * time.Millisecond + + // relaxing the scoring parameters to fit the test scenario. + conf.NetworkConfig.GossipSub.ScoringParameters.PeerScoring.Internal.Behaviour.PenaltyDecay = 0.99 + conf.NetworkConfig.GossipSub.ScoringParameters.PeerScoring.Internal.Behaviour.PenaltyThreshold = 10 + conf.NetworkConfig.GossipSub.ScoringParameters.PeerScoring.Internal.Behaviour.PenaltyWeight = -1 + + ctx, cancel := context.WithCancel(context.Background()) + signalerCtx := irrecoverable.NewMockSignalerContext(t, ctx) + // we override some of the default scoring parameters in order to speed up the test in a time-efficient manner. + blockTopicOverrideParams := defaultTopicScoreParams(t) + blockTopicOverrideParams.MeshMessageDeliveriesActivation = 1 * time.Second // we start observing the mesh message deliveries after 1 second of the node startup. + // we disable invalid message delivery parameters, as the way we implement spammer, when it spams ihave messages, it does not sign them. Hence, without decaying the invalid message deliveries, + // the node would be penalized for invalid message delivery way sooner than it can mount an ihave broken-promises spam attack. + blockTopicOverrideParams.InvalidMessageDeliveriesWeight = 0.0 + blockTopicOverrideParams.InvalidMessageDeliveriesDecay = 0.0 + + // relaxing the scoring parameters to fit the test scenario. + conf.NetworkConfig.GossipSub.ScoringParameters.PeerScoring.Internal.Behaviour.PenaltyDecay = 0.99 + conf.NetworkConfig.GossipSub.ScoringParameters.PeerScoring.Internal.Behaviour.PenaltyThreshold = 10 + conf.NetworkConfig.GossipSub.ScoringParameters.PeerScoring.Internal.Behaviour.PenaltyWeight = -1 + + victimNode, victimIdentity := p2ptest.NodeFixture( + t, + sporkId, + t.Name(), + idProvider, + p2ptest.OverrideFlowConfig(conf), + p2ptest.WithRole(role), + p2ptest.EnablePeerScoringWithOverride(&p2p.PeerScoringConfigOverride{ + TopicScoreParams: map[channels.Topic]*pubsub.TopicScoreParams{ + blockTopic: blockTopicOverrideParams, + }, + }), + ) + + ids := flow.IdentityList{&spammer.SpammerId, &victimIdentity} + idProvider.SetIdentities(ids) + nodes := []p2p.LibP2PNode{spammer.SpammerNode, victimNode} + // to suppress the logs of "peer provider has not set" + victimNode.WithPeersProvider(func() peer.IDSlice { + return peer.IDSlice{spammer.SpammerNode.ID()} + }) + spammer.SpammerNode.WithPeersProvider(func() peer.IDSlice { + return peer.IDSlice{victimNode.ID()} + }) + + p2ptest.StartNodes(t, signalerCtx, nodes) + defer p2ptest.StopNodes(t, nodes, cancel) + + p2ptest.LetNodesDiscoverEachOther(t, ctx, nodes, ids) + p2ptest.TryConnectionAndEnsureConnected(t, ctx, nodes) + + // checks end-to-end message delivery works on GossipSub + p2ptest.EnsurePubsubMessageExchange(t, ctx, nodes, blockTopic, 1, func() interface{} { + return (*messages.Proposal)(unittest.ProposalFixture()) + }) + + initScore, ok := victimNode.PeerScoreExposer().GetScore(spammer.SpammerNode.ID()) + require.True(t, ok, "score for spammer node must be present") + + // FIRST ROUND OF ATTACK: spammer sends 10 RPCs to the victim node, each containing 500 iHave messages. + spamIHaveBrokenPromise(t, spammer, blockTopic.String(), receivedIWants, victimNode) + t.Log("first round of attack finished") + + // wait till victim counts the spam iHaves as broken promises for the second round of attack (one per RPC for a total of 10). + require.Eventually(t, func() bool { + behavioralPenalty, ok := victimNode.PeerScoreExposer().GetBehaviourPenalty(spammer.SpammerNode.ID()) + if !ok { + return false + } + + // ideally, we should have 10, but we give it a buffer of 2.5 to account for 25% discrepancy due to scoring decays, timer asynchrony, and floating point errors. + if behavioralPenalty < 7.5 { + t.Logf("[first round] pending on behavioral penalty %f", behavioralPenalty) + return false + } + + t.Logf("[first round] success on behavioral penalty %f", behavioralPenalty) + return true + // Note: we have to wait at least 3 seconds for an iHave to be considered as broken promise (gossipsub parameters), we set it to 10 seconds to be on the safe side. + // Also, the internal heartbeat of GossipSub is 1 second, hence, there is no need to have ticks shorter than 500 milliseconds. + }, 10*time.Second, 500*time.Millisecond) + + scoreAfterFirstRound, ok := victimNode.PeerScoreExposer().GetScore(spammer.SpammerNode.ID()) + require.True(t, ok, "score for spammer node must be present") + // spammer score after first round must not be decreased severely, we account for 10% drop due to under-performing + // (on sending fresh new messages since that is not part of the test). + require.Greater(t, scoreAfterFirstRound, 0.9*initScore) + + // SECOND ROUND OF ATTACK: spammer sends 10 RPCs to the victim node, each containing 500 iHave messages. + spamIHaveBrokenPromise(t, spammer, blockTopic.String(), receivedIWants, victimNode) + t.Log("second round of attack finished") + // wait till victim counts the spam iHaves as broken promises for the second round of attack (one per RPC for a total of 10). + require.Eventually(t, func() bool { + behavioralPenalty, ok := victimNode.PeerScoreExposer().GetBehaviourPenalty(spammer.SpammerNode.ID()) + if !ok { + return false + } + + // ideally, we should have 20 (10 from the first round, 10 from the second round), but we give it a buffer of 5 to account for 25% discrepancy due to scoring decays, timer asynchrony, and floating point errors. + if behavioralPenalty < 15 { + t.Logf("[second round] pending on behavioral penalty %f", behavioralPenalty) + return false + } + + t.Logf("[second round] success on behavioral penalty %f", behavioralPenalty) + return true + // Note: we have to wait at least 3 seconds for an iHave to be considered as broken promise (gossipsub parameters), we set it to 10 seconds to be on the safe side. + // Also, the internal heartbeat of GossipSub is 1 second, hence, there is no need to have ticks shorter than 500 milliseconds. + }, 10*time.Second, 500*time.Millisecond) + + scoreParams := conf.NetworkConfig.GossipSub.ScoringParameters + + spammerScore, ok := victimNode.PeerScoreExposer().GetScore(spammer.SpammerNode.ID()) + require.True(t, ok, "sanity check failed, we should have a score for the spammer node") + // with the second round of the attack, the spammer is about 10 broken promises above the threshold (total ~20 broken promises, but the first 10 are not counted). + // we expect the score to be dropped to initScore - 10 * 10 * 0.01 * scoring.MaxAppSpecificReward, however, instead of 10, we consider 5 about the threshold, to account for decays. + require.LessOrEqual(t, + spammerScore, + initScore-5*5*0.01*scoreParams.PeerScoring.Protocol.AppSpecificScore.MaxAppSpecificReward, + "sanity check failed, the score of the spammer node must be less than the initial score minus 8 * 8 * 0.01 * scoring.MaxAppSpecificReward: %f, actual: %f", + initScore-5*5*0.1*scoreParams.PeerScoring.Protocol.AppSpecificScore.MaxAppSpecificReward, + spammerScore) + require.Greaterf(t, + spammerScore, + scoreParams.PeerScoring.Internal.Thresholds.Gossip, + "sanity check failed, the score of the spammer node must be greater than gossip threshold: %f, actual: %f", + scoreParams.PeerScoring.Internal.Thresholds.Gossip, + spammerScore) + require.Greaterf(t, + spammerScore, + scoreParams.PeerScoring.Internal.Thresholds.Publish, + "sanity check failed, the score of the spammer node must be greater than publish threshold: %f, actual: %f", + scoreParams.PeerScoring.Internal.Thresholds.Publish, + spammerScore) + require.Greaterf(t, + spammerScore, + scoreParams.PeerScoring.Internal.Thresholds.Graylist, + "sanity check failed, the score of the spammer node must be greater than graylist threshold: %f, actual: %f", + scoreParams.PeerScoring.Internal.Thresholds.Graylist, + spammerScore) + + // since the spammer score is above the gossip, graylist and publish thresholds, it should be still able to exchange messages with victim. + p2ptest.EnsurePubsubMessageExchange(t, ctx, nodes, blockTopic, 1, func() interface{} { + return (*messages.Proposal)(unittest.ProposalFixture()) + }) + + // THIRD ROUND OF ATTACK: spammer sends 10 RPCs to the victim node, each containing 500 iHave messages, we expect spammer to be graylisted. + spamIHaveBrokenPromise(t, spammer, blockTopic.String(), receivedIWants, victimNode) + t.Log("third round of attack finished") + // wait till victim counts the spam iHaves as broken promises for the third round of attack (one per RPC for a total of 10). + require.Eventually(t, func() bool { + behavioralPenalty, ok := victimNode.PeerScoreExposer().GetBehaviourPenalty(spammer.SpammerNode.ID()) + if !ok { + return false + } + // ideally, we should have 30 (10 from the first round, 10 from the second round, 10 from the third round), but we give it a buffer of 7.5 to account for 25% discrepancy due to scoring decays, timer asynchrony, and floating point errors. + if behavioralPenalty < 22.5 { + t.Logf("[third round] pending on behavioral penalty %f", behavioralPenalty) + return false + } + + t.Logf("[third round] success on behavioral penalty %f", behavioralPenalty) + return true + // Note: we have to wait at least 3 seconds for an iHave to be considered as broken promise (gossipsub parameters), we set it to 10 seconds to be on the safe side. + // Also, the internal heartbeat of GossipSub is 1 second, hence, there is no need to have ticks shorter than 500 milliseconds. + }, 10*time.Second, 500*time.Millisecond) + + spammerScore, ok = victimNode.PeerScoreExposer().GetScore(spammer.SpammerNode.ID()) + require.True(t, ok, "sanity check failed, we should have a score for the spammer node") + // with the third round of the attack, the spammer is about 20 broken promises above the threshold (total ~30 broken promises), hence its overall score must be below the gossip, publish, and graylist thresholds, meaning that + // victim will not exchange messages with it anymore, and also that it will be graylisted meaning all incoming and outgoing RPCs to and from the spammer will be dropped by the victim. + require.Lessf(t, + spammerScore, + scoreParams.PeerScoring.Internal.Thresholds.Gossip, + "sanity check failed, the score of the spammer node must be less than gossip threshold: %f, actual: %f", + scoreParams.PeerScoring.Internal.Thresholds.Gossip, + spammerScore) + require.Lessf(t, + spammerScore, + scoreParams.PeerScoring.Internal.Thresholds.Publish, + "sanity check failed, the score of the spammer node must be less than publish threshold: %f, actual: %f", + scoreParams.PeerScoring.Internal.Thresholds.Publish, + spammerScore) + require.Lessf(t, + spammerScore, + scoreParams.PeerScoring.Internal.Thresholds.Graylist, + "sanity check failed, the score of the spammer node must be less than graylist threshold: %f, actual: %f", + scoreParams.PeerScoring.Internal.Thresholds.Graylist, + spammerScore) + + // since the spammer score is below the gossip, graylist and publish thresholds, it should not be able to exchange messages with victim anymore. + p2ptest.EnsureNoPubsubExchangeBetweenGroups( + t, + ctx, + []p2p.LibP2PNode{spammer.SpammerNode}, + flow.IdentifierList{spammer.SpammerId.NodeID}, + []p2p.LibP2PNode{victimNode}, + flow.IdentifierList{victimIdentity.NodeID}, + blockTopic, + 1, + func() interface{} { + return (*messages.Proposal)(unittest.ProposalFixture()) + }) +} + +// spamIHaveBrokenPromises is a test utility function that is exclusive for the TestGossipSubIHaveBrokenPromises_.* tests. +// It creates and sends 10 RPCs each with 1 iHave message, each iHave message has 500 message ids, hence overall, we have 5000 iHave message ids. +// It then sends those iHave spams to the victim node and waits till the victim node responds with iWants for all the spam iHaves. +// There are some notes to consider: +// - we can't send more than one iHave message per RPC in this test, as each iHave should have a distinct topic, and we only have one subscribed topic in the TestGossipSubIHaveBrokenPromises_.* tests. +// - we can't send more than 10 RPCs containing iHave messages per heartbeat (1 sec). This is a gossipsub parameter (GossipSubMaxIHaveMessages). Hence, we choose 10 RPCs to always stay at the threshold. +// - we can't send more than 5000 iHave messages per heartbeat (1 sec). This is a gossipsub parameter (GossipSubMaxIHaveLength). Hence, we choose 500 message ids per iHave message to always stay at the threshold (10 * 500 = 5000). +// - Note that victim nodes picks one iHave id out of the entire RPC and if that iHave id is not eventually delivered within 3 seconds (gossipsub parameter, GossipSubIWantFollowupTime), then the promise is considered broken. Hence, broken promises are counted per RPC (not per iHave message). +// Args: +// - t: the test instance. +// - spammer: the spammer node. +// - topic: the topic to spam. +// - receivedIWants: a map to keep track of the iWants received by the victim node (exclusive to TestGossipSubIHaveBrokenPromises). +// - victimNode: the victim node. +func spamIHaveBrokenPromise(t *testing.T, + spammer *corruptlibp2p.GossipSubRouterSpammer, + topic string, + receivedIWants *concurrentmap.Map[string, struct{}], + victimNode p2p.LibP2PNode) { + rpcCount := 10 + // we can't send more than one iHave per RPC in this test, as each iHave should have a distinct topic, and we only have one subscribed topic. + // when the node does not have a topic subscription, it will discard the iHave message. + iHavesPerRPC := 1 + // there is a cap on the max iHaves a gossipsub node processes per heartbeat (1 sec), we don't want to exceed that (currently 5000 iHave messages per heartbeat). + messageIdsPerIHave := 500 + spamCtrlMsgs := spammer.GenerateCtlMessages(rpcCount, p2ptest.WithIHave(iHavesPerRPC, messageIdsPerIHave, topic)) + + // sanity check + require.Len(t, spamCtrlMsgs, rpcCount) + var sentIHaves []string + + // checks that iHave message ids are not duplicated + for _, msg := range spamCtrlMsgs { + // sanity check + require.Len(t, msg.Ihave, iHavesPerRPC) + for _, iHave := range msg.Ihave { + // sanity check + require.Len(t, iHave.MessageIDs, messageIdsPerIHave) + for _, msgId := range iHave.MessageIDs { + require.NotContains(t, sentIHaves, msgId) + sentIHaves = append(sentIHaves, msgId) + } + } + } + + // spams the victim node with spam iHave messages, since iHave messages are for junk message ids, there will be no + // reply from spammer to victim over the iWants. Hence, the victim must count this towards 10 broken promises eventually. + // Note that victim nodes picks one iHave id out of the entire RPC and if that iHave id is not eventually delivered within 3 seconds (gossipsub parameter, GossipSubIWantFollowupTime), + // then the promise is considered broken. Hence, broken promises are counted per RPC (not per iHave message). + // This sums up to 10 broken promises (1 per RPC). + for i := 0; i < len(spamCtrlMsgs); i++ { + spammer.SpamControlMessage(t, victimNode, []pb.ControlMessage{spamCtrlMsgs[i]}) + // we wait 50 milliseconds between each RPC to add an artificial delay between RPCs; this is to reduce the chance that all RPCs arrive in the same heartbeat, hence + // victim node dropping some. + time.Sleep(50 * time.Millisecond) + } + + // wait till all the spam iHaves are responded with iWants. + require.Eventually(t, + func() bool { + for _, msgId := range sentIHaves { + if _, ok := receivedIWants.Get(msgId); !ok { + return false + } + } + + return true + }, 10*time.Second, + 100*time.Millisecond, + fmt.Sprintf("sanity check failed, we should have received all the iWants for the spam iHaves, expected: %d, actual: %d", len(sentIHaves), receivedIWants.Size())) +} diff --git a/insecure/integration/functional/test/gossipsub/scoring/scoring_test.go b/insecure/integration/functional/test/gossipsub/scoring/scoring_test.go new file mode 100644 index 00000000000..bc35964732c --- /dev/null +++ b/insecure/integration/functional/test/gossipsub/scoring/scoring_test.go @@ -0,0 +1,632 @@ +package scoring + +import ( + "context" + "fmt" + "testing" + "time" + + pubsub "github.com/libp2p/go-libp2p-pubsub" + pubsub_pb "github.com/libp2p/go-libp2p-pubsub/pb" + "github.com/libp2p/go-libp2p/core/peer" + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/config" + "github.com/onflow/flow-go/insecure/corruptlibp2p" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/model/messages" + "github.com/onflow/flow-go/module/irrecoverable" + "github.com/onflow/flow-go/module/mock" + "github.com/onflow/flow-go/network/channels" + "github.com/onflow/flow-go/network/p2p" + p2ptest "github.com/onflow/flow-go/network/p2p/test" + validator "github.com/onflow/flow-go/network/validator/pubsub" + "github.com/onflow/flow-go/utils/unittest" +) + +// TestGossipSubInvalidMessageDelivery_Integration tests that when a victim peer is spammed with invalid messages from +// a spammer peer, the victim will eventually penalize the spammer and stop receiving messages from them. +// Note: the term integration is used here because it requires integrating all components of the libp2p stack. +func TestGossipSubInvalidMessageDelivery_Integration(t *testing.T) { + tt := []struct { + name string + spamMsgFactory func(spammerId peer.ID, victimId peer.ID, topic channels.Topic) *pubsub_pb.Message + }{ + { + name: "unknown peer, invalid signature", + spamMsgFactory: func(spammerId peer.ID, _ peer.ID, topic channels.Topic) *pubsub_pb.Message { + return p2ptest.PubsubMessageFixture(t, p2ptest.WithTopic(topic.String())) + }, + }, + { + name: "unknown peer, missing signature", + spamMsgFactory: func(spammerId peer.ID, _ peer.ID, topic channels.Topic) *pubsub_pb.Message { + return p2ptest.PubsubMessageFixture(t, p2ptest.WithTopic(topic.String()), p2ptest.WithoutSignature()) + }, + }, + { + name: "known peer, invalid signature", + spamMsgFactory: func(spammerId peer.ID, _ peer.ID, topic channels.Topic) *pubsub_pb.Message { + return p2ptest.PubsubMessageFixture(t, p2ptest.WithFrom(spammerId), p2ptest.WithTopic(topic.String())) + }, + }, + { + name: "known peer, missing signature", + spamMsgFactory: func(spammerId peer.ID, _ peer.ID, topic channels.Topic) *pubsub_pb.Message { + return p2ptest.PubsubMessageFixture(t, p2ptest.WithFrom(spammerId), p2ptest.WithTopic(topic.String()), p2ptest.WithoutSignature()) + }, + }, + { + name: "self-origin, invalid signature", // bounce back our own messages + spamMsgFactory: func(_ peer.ID, victimId peer.ID, topic channels.Topic) *pubsub_pb.Message { + return p2ptest.PubsubMessageFixture(t, p2ptest.WithFrom(victimId), p2ptest.WithTopic(topic.String())) + }, + }, + { + name: "self-origin, no signature", // bounce back our own messages + spamMsgFactory: func(_ peer.ID, victimId peer.ID, topic channels.Topic) *pubsub_pb.Message { + return p2ptest.PubsubMessageFixture(t, p2ptest.WithFrom(victimId), p2ptest.WithTopic(topic.String()), p2ptest.WithoutSignature()) + }, + }, + { + name: "no sender", + spamMsgFactory: func(_ peer.ID, victimId peer.ID, topic channels.Topic) *pubsub_pb.Message { + return p2ptest.PubsubMessageFixture(t, p2ptest.WithoutSignerId(), p2ptest.WithTopic(topic.String())) + }, + }, + { + name: "no sender, missing signature", + spamMsgFactory: func(_ peer.ID, victimId peer.ID, topic channels.Topic) *pubsub_pb.Message { + return p2ptest.PubsubMessageFixture(t, p2ptest.WithoutSignerId(), p2ptest.WithTopic(topic.String()), p2ptest.WithoutSignature()) + }, + }, + } + + for _, tc := range tt { + t.Run(tc.name, func(t *testing.T) { + testGossipSubInvalidMessageDeliveryScoring(t, tc.spamMsgFactory) + }) + } +} + +// testGossipSubInvalidMessageDeliveryScoring tests that when a victim peer is spammed with invalid messages from +// a spammer peer, the victim will eventually penalize the spammer and stop receiving messages from them. +// Args: +// - t: the test instance. +// - spamMsgFactory: a function that creates unique invalid messages to spam the victim with. +func testGossipSubInvalidMessageDeliveryScoring(t *testing.T, spamMsgFactory func(peer.ID, peer.ID, channels.Topic) *pubsub_pb.Message) { + role := flow.RoleConsensus + sporkId := unittest.IdentifierFixture() + blockTopic := channels.TopicFromChannel(channels.PushBlocks, sporkId) + + idProvider := unittest.NewUpdatableIDProvider(flow.IdentityList{}) + spammer := corruptlibp2p.NewGossipSubRouterSpammer(t, sporkId, role, idProvider) + ctx, cancel := context.WithCancel(context.Background()) + signalerCtx := irrecoverable.NewMockSignalerContext(t, ctx) + + cfg, err := config.DefaultConfig() + require.NoError(t, err) + // we override the decay interval to 1 second so that the score is updated within 1 second intervals. + cfg.NetworkConfig.GossipSub.RpcTracer.ScoreTracerInterval = 1 * time.Second + cfg.NetworkConfig.GossipSub.ScoringParameters.PeerScoring.Internal.TopicParameters.InvalidMessageDeliveriesDecay = .99 + + victimNode, victimIdentity := p2ptest.NodeFixture( + t, + sporkId, + t.Name(), + idProvider, + p2ptest.WithRole(role), + p2ptest.OverrideFlowConfig(cfg), + p2ptest.WithValidateQueueSize(3000), // prevent node from dropping messages on slow CI machines + ) + + ids := flow.IdentityList{&spammer.SpammerId, &victimIdentity} + idProvider.SetIdentities(ids) + nodes := []p2p.LibP2PNode{spammer.SpammerNode, victimNode} + + p2ptest.StartNodes(t, signalerCtx, nodes) + defer p2ptest.StopNodes(t, nodes, cancel) + + p2ptest.LetNodesDiscoverEachOther(t, ctx, nodes, ids) + p2ptest.TryConnectionAndEnsureConnected(t, ctx, nodes) + + // checks end-to-end message delivery works on GossipSub + p2ptest.EnsurePubsubMessageExchange(t, ctx, nodes, blockTopic, 1, func() interface{} { + return (*messages.Proposal)(unittest.ProposalFixture()) + }) + + // generates 3000 spam messages to send to the victim node; based on default-config.yaml, ~1400 of these messages are enough to + // penalize the spammer node to disconnect from the victim node. + totalSpamMessages := 3000 + msgs := make([]*pubsub_pb.Message, 0) + for i := 0; i <= totalSpamMessages; i++ { + msgs = append(msgs, spamMsgFactory(spammer.SpammerNode.ID(), victimNode.ID(), blockTopic)) + } + + // sends all 3000 spam messages to the victim node over 1 RPC. + spammer.SpamControlMessage(t, victimNode, + spammer.GenerateCtlMessages(1), msgs...) + + thresholds := cfg.NetworkConfig.GossipSub.ScoringParameters.PeerScoring.Internal.Thresholds + + // wait for at most 3 seconds for the victim node to penalize the spammer node. + // Each heartbeat is 1 second, so 3 heartbeats should be enough to penalize the spammer node. + // Ideally, we should wait for 1 heartbeat, but the score may not be updated immediately after the heartbeat. + details := "" + require.Eventually(t, func() bool { + spammerScore, ok := victimNode.PeerScoreExposer().GetScore(spammer.SpammerNode.ID()) + if !ok { + details = "failed to get spammer score" + return false + } + details = fmt.Sprintf("spammer score: %f", spammerScore) + if spammerScore >= thresholds.Gossip { + // ensure the score is low enough so that no gossip is routed by victim node to spammer node. + return false + } + details = fmt.Sprintf("%s, gossip threshold: %f", details, thresholds.Gossip) + if spammerScore >= thresholds.Publish { + // ensure the score is low enough so that none of the published messages of the victim node are routed to the spammer node. + return false + } + details = fmt.Sprintf("%s, publish threshold: %f", details, thresholds.Publish) + if spammerScore >= thresholds.Graylist { + // ensure the score is low enough so that the victim node does not accept RPC messages from the spammer node. + return false + } + details = fmt.Sprintf("%s, graylist threshold: %f", details, thresholds.Graylist) + + return true + }, 5*time.Second, 100*time.Millisecond, details) + + spammerScore, ok := victimNode.PeerScoreExposer().GetScore(spammer.SpammerNode.ID()) + require.True(t, ok) + t.Logf("spammer score: %f", spammerScore) + t.Logf("gossip theshold: %f", thresholds.Gossip) + t.Logf("publish theshold: %f", thresholds.Publish) + t.Logf("graylist theshold: %f", thresholds.Graylist) + + topicsSnapshot, ok := victimNode.PeerScoreExposer().GetTopicScores(spammer.SpammerNode.ID()) + require.True(t, ok) + require.NotNil(t, topicsSnapshot, "topic scores must not be nil") + require.NotEmpty(t, topicsSnapshot, "topic scores must not be empty") + blkTopicSnapshot, ok := topicsSnapshot[blockTopic.String()] + require.True(t, ok) + + // ensure that the topic snapshot of the spammer contains a record of at least (40%) of the spam messages sent. The 40% is to account for the messages that were + // delivered before the score was updated, after the spammer is PRUNED, as well as to account for decay. + require.True(t, blkTopicSnapshot.InvalidMessageDeliveries > 0.4*float64(totalSpamMessages), + "invalid message deliveries must be greater than %f. invalid message deliveries: %f", 0.4*float64(totalSpamMessages), + blkTopicSnapshot.InvalidMessageDeliveries) + + p2ptest.EnsureNoPubsubExchangeBetweenGroups( + t, + ctx, + []p2p.LibP2PNode{victimNode}, + flow.IdentifierList{victimIdentity.NodeID}, + []p2p.LibP2PNode{spammer.SpammerNode}, + flow.IdentifierList{spammer.SpammerId.NodeID}, + blockTopic, + 1, + func() interface{} { + return (*messages.Proposal)(unittest.ProposalFixture()) + }) +} + +// TestGossipSubMeshDeliveryScoring_UnderDelivery_SingleTopic tests that when a peer is under-performing in a topic mesh, its score is (slightly) penalized. +func TestGossipSubMeshDeliveryScoring_UnderDelivery_SingleTopic(t *testing.T) { + t.Parallel() + + role := flow.RoleConsensus + sporkId := unittest.IdentifierFixture() + + idProvider := mock.NewIdentityProvider(t) + ctx, cancel := context.WithCancel(context.Background()) + signalerCtx := irrecoverable.NewMockSignalerContext(t, ctx) + + blockTopic := channels.TopicFromChannel(channels.PushBlocks, sporkId) + + // we override some of the default scoring parameters in order to speed up the test in a time-efficient manner. + blockTopicOverrideParams := defaultTopicScoreParams(t) + blockTopicOverrideParams.MeshMessageDeliveriesActivation = 1 * time.Second // we start observing the mesh message deliveries after 1 second of the node startup. + + conf, err := config.DefaultConfig() + require.NoError(t, err) + // we override the decay interval to 1 second so that the score is updated within 1 second intervals. + conf.NetworkConfig.GossipSub.ScoringParameters.PeerScoring.Internal.DecayInterval = 1 * time.Second + conf.NetworkConfig.GossipSub.RpcTracer.ScoreTracerInterval = 1 * time.Second + thisNode, thisId := p2ptest.NodeFixture( // this node is the one that will be penalizing the under-performer node. + t, + sporkId, + t.Name(), + idProvider, + p2ptest.WithRole(role), + p2ptest.OverrideFlowConfig(conf), + p2ptest.EnablePeerScoringWithOverride( + &p2p.PeerScoringConfigOverride{ + TopicScoreParams: map[channels.Topic]*pubsub.TopicScoreParams{ + blockTopic: blockTopicOverrideParams, + }, + }), + ) + + underPerformerNode, underPerformerId := p2ptest.NodeFixture( + t, + sporkId, + t.Name(), + idProvider, + p2ptest.WithRole(role), + ) + + idProvider.On("ByPeerID", thisNode.ID()).Return(&thisId, true).Maybe() + idProvider.On("ByPeerID", underPerformerNode.ID()).Return(&underPerformerId, true).Maybe() + ids := flow.IdentityList{&underPerformerId, &thisId} + nodes := []p2p.LibP2PNode{underPerformerNode, thisNode} + + p2ptest.StartNodes(t, signalerCtx, nodes) + defer p2ptest.StopNodes(t, nodes, cancel) + + p2ptest.LetNodesDiscoverEachOther(t, ctx, nodes, ids) + p2ptest.TryConnectionAndEnsureConnected(t, ctx, nodes) + + // initially both nodes should be able to publish and receive messages from each other in the topic mesh. + p2ptest.EnsurePubsubMessageExchange(t, ctx, nodes, blockTopic, 1, func() interface{} { + return (*messages.Proposal)(unittest.ProposalFixture()) + }) + + scoreParams := conf.NetworkConfig.GossipSub.ScoringParameters.PeerScoring.Protocol + + // Also initially the under-performing node should have a score that is at least equal to the MaxAppSpecificReward. + // The reason is in our scoring system, we reward the staked nodes by MaxAppSpecificReward, and the under-performing node is considered staked + // as it is in the id provider of thisNode. + require.Eventually(t, func() bool { + underPerformingNodeScore, ok := thisNode.PeerScoreExposer().GetScore(underPerformerNode.ID()) + if !ok { + return false + } + if underPerformingNodeScore < scoreParams.AppSpecificScore.MaxAppSpecificReward { + // ensure the score is high enough so that gossip is routed by victim node to spammer node. + return false + } + + return true + }, 1*time.Second, 100*time.Millisecond) + + // however, after one decay interval, we expect the score of the under-performing node to be penalized by -0.05 * MaxAppSpecificReward as + // it has not been able to deliver messages to this node in the topic mesh since the past decay interval. + require.Eventually(t, func() bool { + underPerformingNodeScore, ok := thisNode.PeerScoreExposer().GetScore(underPerformerNode.ID()) + if !ok { + return false + } + if underPerformingNodeScore > 0.96*scoreParams.AppSpecificScore.MaxAppSpecificReward { // score must be penalized by -0.05 * MaxAppSpecificReward. + // 0.96 is to account for floating point errors. + return false + } + if underPerformingNodeScore < conf.NetworkConfig.GossipSub.ScoringParameters.PeerScoring.Internal.Thresholds.Gossip { // even the node is slightly penalized, it should still be able to gossip with this node. + return false + } + if underPerformingNodeScore < conf.NetworkConfig.GossipSub.ScoringParameters.PeerScoring.Internal.Thresholds.Publish { // even the node is slightly penalized, it should still be able to publish to this node. + return false + } + if underPerformingNodeScore < conf.NetworkConfig.GossipSub.ScoringParameters.PeerScoring.Internal.Thresholds.Graylist { // even the node is slightly penalized, it should still be able to establish rpc connection with this node. + return false + } + + return true + }, 3*time.Second, 100*time.Millisecond) + + // even though the under-performing node is penalized, it should still be able to publish and receive messages from this node in the topic mesh. + p2ptest.EnsurePubsubMessageExchange(t, ctx, nodes, blockTopic, 1, func() interface{} { + return (*messages.Proposal)(unittest.ProposalFixture()) + }) +} + +// TestGossipSubMeshDeliveryScoring_UnderDelivery_TwoTopics tests that when a peer is under-performing in two topics, it is penalized in both topics. +func TestGossipSubMeshDeliveryScoring_UnderDelivery_TwoTopics(t *testing.T) { + t.Parallel() + + role := flow.RoleConsensus + sporkId := unittest.IdentifierFixture() + + idProvider := mock.NewIdentityProvider(t) + ctx, cancel := context.WithCancel(context.Background()) + signalerCtx := irrecoverable.NewMockSignalerContext(t, ctx) + + blockTopic := channels.TopicFromChannel(channels.PushBlocks, sporkId) + dkgTopic := channels.TopicFromChannel(channels.DKGCommittee, sporkId) + + // we override some of the default scoring parameters in order to speed up the test in a time-efficient manner. + blockTopicOverrideParams := defaultTopicScoreParams(t) + blockTopicOverrideParams.MeshMessageDeliveriesActivation = 1 * time.Second // we start observing the mesh message deliveries after 1 second of the node startup. + dkgTopicOverrideParams := defaultTopicScoreParams(t) + dkgTopicOverrideParams.MeshMessageDeliveriesActivation = 1 * time.Second // we start observing the mesh message deliveries after 1 second of the node startup. + + conf, err := config.DefaultConfig() + require.NoError(t, err) + // we override the decay interval to 1 second so that the score is updated within 1 second intervals. + conf.NetworkConfig.GossipSub.ScoringParameters.PeerScoring.Internal.DecayInterval = 1 * time.Second + conf.NetworkConfig.GossipSub.RpcTracer.ScoreTracerInterval = 1 * time.Second + thisNode, thisId := p2ptest.NodeFixture( // this node is the one that will be penalizing the under-performer node. + t, + sporkId, + t.Name(), + idProvider, + p2ptest.WithRole(role), + p2ptest.OverrideFlowConfig(conf), + p2ptest.EnablePeerScoringWithOverride( + &p2p.PeerScoringConfigOverride{ + TopicScoreParams: map[channels.Topic]*pubsub.TopicScoreParams{ + blockTopic: blockTopicOverrideParams, + dkgTopic: dkgTopicOverrideParams, + }, + }), + ) + + underPerformerNode, underPerformerId := p2ptest.NodeFixture( + t, + sporkId, + t.Name(), + idProvider, + p2ptest.WithRole(role), + ) + + idProvider.On("ByPeerID", thisNode.ID()).Return(&thisId, true).Maybe() + idProvider.On("ByPeerID", underPerformerNode.ID()).Return(&underPerformerId, true).Maybe() + ids := flow.IdentityList{&underPerformerId, &thisId} + nodes := []p2p.LibP2PNode{underPerformerNode, thisNode} + + p2ptest.StartNodes(t, signalerCtx, nodes) + defer p2ptest.StopNodes(t, nodes, cancel) + + p2ptest.LetNodesDiscoverEachOther(t, ctx, nodes, ids) + p2ptest.TryConnectionAndEnsureConnected(t, ctx, nodes) + + // subscribe to the topics. + for _, node := range nodes { + for _, topic := range []channels.Topic{blockTopic, dkgTopic} { + _, err := node.Subscribe(topic, validator.TopicValidator(unittest.Logger(), unittest.AllowAllPeerFilter())) + require.NoError(t, err) + } + } + + scoreParams := conf.NetworkConfig.GossipSub.ScoringParameters.PeerScoring.Protocol.AppSpecificScore + + // Initially the under-performing node should have a score that is at least equal to the MaxAppSpecificReward. + // The reason is in our scoring system, we reward the staked nodes by MaxAppSpecificReward, and the under-performing node is considered staked + // as it is in the id provider of thisNode. + require.Eventually(t, func() bool { + underPerformingNodeScore, ok := thisNode.PeerScoreExposer().GetScore(underPerformerNode.ID()) + if !ok { + return false + } + if underPerformingNodeScore < scoreParams.MaxAppSpecificReward { + // ensure the score is high enough so that gossip is routed by victim node to spammer node. + return false + } + + return true + }, 2*time.Second, 100*time.Millisecond) + + // No message delivery happens intentionally, so that the under-performing node is penalized. + + // however, after one decay interval, we expect the score of the under-performing node to be penalized by ~ 2 * -0.05 * MaxAppSpecificReward. + require.Eventually(t, func() bool { + underPerformingNodeScore, ok := thisNode.PeerScoreExposer().GetScore(underPerformerNode.ID()) + if !ok { + return false + } + if underPerformingNodeScore > 0.91*scoreParams.MaxAppSpecificReward { // score must be penalized by ~ 2 * -0.05 * MaxAppSpecificReward. + // 0.91 is to account for the floating point errors. + return false + } + if underPerformingNodeScore < conf.NetworkConfig.GossipSub.ScoringParameters.PeerScoring.Internal.Thresholds.Gossip { // even the node is slightly penalized, it should still be able to gossip with this node. + return false + } + if underPerformingNodeScore < conf.NetworkConfig.GossipSub.ScoringParameters.PeerScoring.Internal.Thresholds.Publish { // even the node is slightly penalized, it should still be able to publish to this node. + return false + } + if underPerformingNodeScore < conf.NetworkConfig.GossipSub.ScoringParameters.PeerScoring.Internal.Thresholds.Graylist { // even the node is slightly penalized, it should still be able to establish rpc connection with this node. + return false + } + + return true + }, 3*time.Second, 100*time.Millisecond) + + // even though the under-performing node is penalized, it should still be able to publish and receive messages from this node in both topic meshes. + p2ptest.EnsurePubsubMessageExchange(t, ctx, nodes, blockTopic, 1, func() interface{} { + return (*messages.Proposal)(unittest.ProposalFixture()) + }) + p2ptest.EnsurePubsubMessageExchange(t, ctx, nodes, dkgTopic, 1, func() interface{} { + return unittest.DKGMessageFixture() + }) +} + +// TestGossipSubMeshDeliveryScoring_Replay_Will_Not_Counted tests that replayed messages will not be counted towards the mesh message deliveries. +func TestGossipSubMeshDeliveryScoring_Replay_Will_Not_Counted(t *testing.T) { + t.Parallel() + + role := flow.RoleConsensus + sporkId := unittest.IdentifierFixture() + + idProvider := mock.NewIdentityProvider(t) + ctx, cancel := context.WithCancel(context.Background()) + signalerCtx := irrecoverable.NewMockSignalerContext(t, ctx) + + blockTopic := channels.TopicFromChannel(channels.PushBlocks, sporkId) + + // we override some of the default scoring parameters in order to speed up the test in a time-efficient manner. + conf, err := config.DefaultConfig() + require.NoError(t, err) + // we override the decay interval to 1 second so that the score is updated within 1 second intervals. + conf.NetworkConfig.GossipSub.ScoringParameters.PeerScoring.Internal.DecayInterval = 1 * time.Second + conf.NetworkConfig.GossipSub.RpcTracer.ScoreTracerInterval = 1 * time.Second + blockTopicOverrideParams := defaultTopicScoreParams(t) + blockTopicOverrideParams.MeshMessageDeliveriesActivation = 1 * time.Second // we start observing the mesh message deliveries after 1 second of the node startup. + // this node is the one that will be penalizing the under-performer node. + thisNode, thisId := p2ptest.NodeFixture( + t, + sporkId, + t.Name(), + idProvider, + p2ptest.WithRole(role), + p2ptest.OverrideFlowConfig(conf), + p2ptest.EnablePeerScoringWithOverride( + &p2p.PeerScoringConfigOverride{ + TopicScoreParams: map[channels.Topic]*pubsub.TopicScoreParams{ + blockTopic: blockTopicOverrideParams, + }, + }), + ) + + replayingNode, replayingId := p2ptest.NodeFixture( + t, + sporkId, + t.Name(), + idProvider, + p2ptest.WithRole(role), + ) + + idProvider.On("ByPeerID", thisNode.ID()).Return(&thisId, true).Maybe() + idProvider.On("ByPeerID", replayingNode.ID()).Return(&replayingId, true).Maybe() + ids := flow.IdentityList{&replayingId, &thisId} + nodes := []p2p.LibP2PNode{replayingNode, thisNode} + + p2ptest.StartNodes(t, signalerCtx, nodes) + defer p2ptest.StopNodes(t, nodes, cancel) + + p2ptest.LetNodesDiscoverEachOther(t, ctx, nodes, ids) + p2ptest.TryConnectionAndEnsureConnected(t, ctx, nodes) + + // initially both nodes should be able to publish and receive messages from each other in the block topic mesh. + p2ptest.EnsurePubsubMessageExchange(t, ctx, nodes, blockTopic, 1, func() interface{} { + return (*messages.Proposal)(unittest.ProposalFixture()) + }) + + scoreParams := conf.NetworkConfig.GossipSub.ScoringParameters.PeerScoring.Protocol.AppSpecificScore + + // Initially the replaying node should have a score that is at least equal to the MaxAppSpecificReward. + // The reason is in our scoring system, we reward the staked nodes by MaxAppSpecificReward, and initially every node is considered staked + // as it is in the id provider of thisNode. + initialReplayingNodeScore := float64(0) + require.Eventually(t, func() bool { + replayingNodeScore, ok := thisNode.PeerScoreExposer().GetScore(replayingNode.ID()) + if !ok { + return false + } + if replayingNodeScore < scoreParams.MaxAppSpecificReward { + // ensure the score is high enough so that gossip is routed by victim node to spammer node. + return false + } + + initialReplayingNodeScore = replayingNodeScore + return true + }, 2*time.Second, 100*time.Millisecond) + + // replaying node acts honestly and sends 200 block proposals on the topic mesh. This is twice the + // defaultTopicMeshMessageDeliveryThreshold, which prevents the replaying node to be penalized. + proposalList := make([]*messages.Proposal, 200) + for i := 0; i < len(proposalList); i++ { + proposalList[i] = (*messages.Proposal)(unittest.ProposalFixture()) + } + i := -1 + p2ptest.EnsurePubsubMessageExchangeFromNode(t, ctx, replayingNode, thisNode, thisId.NodeID, blockTopic, len(proposalList), func() interface{} { + i += 1 + return proposalList[i] + }) + + // as the replaying node is not penalized, we expect its score to be equal to the initial score. + require.Eventually(t, func() bool { + replayingNodeScore, ok := thisNode.PeerScoreExposer().GetScore(replayingNode.ID()) + if !ok { + return false + } + if replayingNodeScore < scoreParams.MaxAppSpecificReward { + // ensure the score is high enough so that gossip is routed by victim node to spammer node. + return false + } + if replayingNodeScore != initialReplayingNodeScore { + // ensure the score is not penalized. + return false + } + + initialReplayingNodeScore = replayingNodeScore + return true + }, 2*time.Second, 100*time.Millisecond) + + // now the replaying node acts maliciously and just replays the same messages again. + i = -1 + p2ptest.EnsureNoPubsubMessageExchange( + t, + ctx, + []p2p.LibP2PNode{replayingNode}, + []p2p.LibP2PNode{thisNode}, + flow.IdentifierList{thisId.NodeID}, + blockTopic, + len(proposalList), + func() interface{} { + i += 1 + return proposalList[i] + }) + + // since the last decay interval, the replaying node has not delivered anything new, so its score should be penalized for under-performing. + require.Eventually(t, func() bool { + replayingNodeScore, ok := thisNode.PeerScoreExposer().GetScore(replayingNode.ID()) + if !ok { + return false + } + + if replayingNodeScore >= initialReplayingNodeScore { + // node must be penalized for just replaying the same messages. + return false + } + + if replayingNodeScore >= scoreParams.MaxAppSpecificReward { + // node must be penalized for just replaying the same messages. + return false + } + + // following if-statements check that even though the node is penalized, it is not penalized too much, and + // can still participate in the network. We don't desire to disallow list a node for just under-performing. + if replayingNodeScore < conf.NetworkConfig.GossipSub.ScoringParameters.PeerScoring.Internal.Thresholds.Gossip { + return false + } + + if replayingNodeScore < conf.NetworkConfig.GossipSub.ScoringParameters.PeerScoring.Internal.Thresholds.Publish { + return false + } + + if replayingNodeScore < conf.NetworkConfig.GossipSub.ScoringParameters.PeerScoring.Internal.Thresholds.Graylist { + return false + } + + initialReplayingNodeScore = replayingNodeScore + return true + }, 2*time.Second, 100*time.Millisecond) + + // even though the replaying node is penalized, it should still be able to publish and receive messages from this node in both topic meshes. + p2ptest.EnsurePubsubMessageExchange(t, ctx, nodes, blockTopic, 1, func() interface{} { + return (*messages.Proposal)(unittest.ProposalFixture()) + }) +} + +// defaultTopicScoreParams returns the default score params for topics. +func defaultTopicScoreParams(t *testing.T) *pubsub.TopicScoreParams { + defaultConfig, err := config.DefaultConfig() + require.NoError(t, err) + topicScoreParams := defaultConfig.NetworkConfig.GossipSub.ScoringParameters.PeerScoring.Internal.TopicParameters + p := &pubsub.TopicScoreParams{ + TopicWeight: topicScoreParams.TopicWeight, + SkipAtomicValidation: topicScoreParams.SkipAtomicValidation, + InvalidMessageDeliveriesWeight: topicScoreParams.InvalidMessageDeliveriesWeight, + InvalidMessageDeliveriesDecay: topicScoreParams.InvalidMessageDeliveriesDecay, + TimeInMeshQuantum: topicScoreParams.TimeInMeshQuantum, + MeshMessageDeliveriesWeight: topicScoreParams.MeshDeliveriesWeight, + MeshMessageDeliveriesDecay: topicScoreParams.MeshMessageDeliveriesDecay, + MeshMessageDeliveriesCap: topicScoreParams.MeshMessageDeliveriesCap, + MeshMessageDeliveriesThreshold: topicScoreParams.MeshMessageDeliveryThreshold, + MeshMessageDeliveriesWindow: topicScoreParams.MeshMessageDeliveriesWindow, + MeshMessageDeliveriesActivation: topicScoreParams.MeshMessageDeliveryActivation, + } + return p +} diff --git a/insecure/integration/test/composability_test.go b/insecure/integration/tests/composability_test.go similarity index 91% rename from insecure/integration/test/composability_test.go rename to insecure/integration/tests/composability_test.go index c7ba04d3b7a..feca9a2b198 100644 --- a/insecure/integration/test/composability_test.go +++ b/insecure/integration/tests/composability_test.go @@ -1,4 +1,4 @@ -package test +package tests import ( "context" @@ -9,17 +9,17 @@ import ( "github.com/stretchr/testify/mock" - "github.com/onflow/flow-go/network/mocknetwork" + mocknetwork "github.com/onflow/flow-go/network/mock" "github.com/stretchr/testify/require" - "github.com/onflow/flow-go/engine/testutil" "github.com/onflow/flow-go/insecure" "github.com/onflow/flow-go/insecure/corruptnet" "github.com/onflow/flow-go/insecure/orchestrator" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/model/libp2p/message" "github.com/onflow/flow-go/module/irrecoverable" + "github.com/onflow/flow-go/module/local" "github.com/onflow/flow-go/network/channels" "github.com/onflow/flow-go/network/stub" "github.com/onflow/flow-go/utils/unittest" @@ -86,19 +86,25 @@ func TestCorruptNetworkFrameworkHappyPath(t *testing.T) { wg := &sync.WaitGroup{} wg.Add(2) // wait for both egress and ingress events to be received. + internalCorruptedEgressEvent, err := corruptedEgressEvent.ToInternal() + require.NoError(t, err) + // we expect to receive the corrupted egress event on the honest node. // event must arrive at the channel set by orchestrator. // origin id of the message must be the corrupted node. // content of event must be swapped with corrupted event. - honestEngine.On("Process", testChannel, corruptedIdentity.NodeID, corruptedEgressEvent).Return(nil).Run(func(args mock.Arguments) { + honestEngine.On("Process", testChannel, corruptedIdentity.NodeID, internalCorruptedEgressEvent).Return(nil).Run(func(args mock.Arguments) { wg.Done() }) + internalCorruptedIngressEvent, err := corruptedIngressEvent.ToInternal() + require.NoError(t, err) + // we expect to receive the corrupted ingress event on the corrupted node. // event must arrive at the channel set by orchestrator. // origin id of the message must be the honest node. // content of event must be swapped with corrupted event. - corruptedEngine.On("Process", testChannel, honestIdentity.NodeID, corruptedIngressEvent).Return(nil).Run(func(args mock.Arguments) { + corruptedEngine.On("Process", testChannel, honestIdentity.NodeID, internalCorruptedIngressEvent).Return(nil).Run(func(args mock.Arguments) { // simulate the Process logic of the corrupted engine on reception of message from underlying network. wg.Done() }) @@ -122,7 +128,7 @@ func TestCorruptNetworkFrameworkHappyPath(t *testing.T) { // withCorruptNetwork creates a real corrupt network, starts it, runs the "run" function, and then stops it. func withCorruptNetwork(t *testing.T, run func(*testing.T, flow.Identity, *corruptnet.Network, *stub.Hub)) { codec := unittest.NetworkCodec() - corruptedIdentity := unittest.IdentityFixture(unittest.WithAddress(insecure.DefaultAddress)) + corruptedIdentity := unittest.PrivateNodeInfoFixture(unittest.WithAddress(insecure.DefaultAddress)) // life-cycle management of orchestratorNetwork. ctx, cancel := context.WithCancel(context.Background()) @@ -138,11 +144,16 @@ func withCorruptNetwork(t *testing.T, run func(*testing.T, flow.Identity, *corru hub := stub.NewNetworkHub() ccf := corruptnet.NewCorruptConduitFactory(unittest.Logger(), flow.BftTestnet) flowNetwork := stub.NewNetwork(t, corruptedIdentity.NodeID, hub, stub.WithConduitFactory(ccf)) + + privateKeys, err := corruptedIdentity.PrivateKeys() + require.NoError(t, err) + me, err := local.New(corruptedIdentity.Identity().IdentitySkeleton, privateKeys.StakingKey) + require.NoError(t, err) corruptNetwork, err := corruptnet.NewCorruptNetwork( unittest.Logger(), flow.BftTestnet, insecure.DefaultAddress, - testutil.LocalFixture(t, corruptedIdentity), + me, codec, flowNetwork, ccf) @@ -161,7 +172,7 @@ func withCorruptNetwork(t *testing.T, run func(*testing.T, flow.Identity, *corru flowNetwork.StartConDev(100*time.Millisecond, true) }, 100*time.Millisecond, "failed to start corrupted node network") - run(t, *corruptedIdentity, corruptNetwork, hub) + run(t, *corruptedIdentity.Identity(), corruptNetwork, hub) // terminates orchestratorNetwork cancel() diff --git a/insecure/integration/test/mock_orchestrator.go b/insecure/integration/tests/mock_orchestrator.go similarity index 99% rename from insecure/integration/test/mock_orchestrator.go rename to insecure/integration/tests/mock_orchestrator.go index b992e1d6c20..5d11879d6df 100644 --- a/insecure/integration/test/mock_orchestrator.go +++ b/insecure/integration/tests/mock_orchestrator.go @@ -1,4 +1,4 @@ -package test +package tests import ( "github.com/onflow/flow-go/insecure" diff --git a/insecure/internal/subscription.go b/insecure/internal/subscription.go index 60df67de909..f0d1abdc952 100644 --- a/insecure/internal/subscription.go +++ b/insecure/internal/subscription.go @@ -3,21 +3,15 @@ package internal import ( "context" + corrupt "github.com/libp2p/go-libp2p-pubsub" pubsub "github.com/libp2p/go-libp2p-pubsub" - corrupt "github.com/yhassanzadeh13/go-libp2p-pubsub" "github.com/onflow/flow-go/network/p2p" ) -// CorruptSubscription is a wrapper around the forked pubsub subscription from -// github.com/yhassanzadeh13/go-libp2p-pubsub that implements the p2p.Subscription. -// This is needed because in order to use the forked pubsub module, we need to -// use the entire dependency tree of the forked module which is resolved to -// github.com/yhassanzadeh13/go-libp2p-pubsub. This means that we cannot use -// the original libp2p pubsub module in the same package. -// Note: we use the forked pubsub module for sake of BFT testing and attack vector -// implementation, it is designed to be completely isolated in the "insecure" package, and -// totally separated from the rest of the codebase. +// CorruptSubscription is a wrapper that implements the p2p.Subscription. +// This is previously needed because we used a forked pubsub module. This is no longer the case +// so we could refactor this in the future to remove this wrapper. type CorruptSubscription struct { s *corrupt.Subscription } diff --git a/insecure/internal/topic.go b/insecure/internal/topic.go index 0b0c9c77156..82dba5365f7 100644 --- a/insecure/internal/topic.go +++ b/insecure/internal/topic.go @@ -3,20 +3,14 @@ package internal import ( "context" - corrupt "github.com/yhassanzadeh13/go-libp2p-pubsub" + corrupt "github.com/libp2p/go-libp2p-pubsub" "github.com/onflow/flow-go/network/p2p" ) -// CorruptTopic is a wrapper around the forked pubsub topic from -// github.com/yhassanzadeh13/go-libp2p-pubsub that implements the p2p.Topic. -// This is needed because in order to use the forked pubsub module, we need to -// use the entire dependency tree of the forked module which is resolved to -// github.com/yhassanzadeh13/go-libp2p-pubsub. This means that we cannot use -// the original libp2p pubsub module in the same package. -// Note: we use the forked pubsub module for sake of BFT testing and attack vector -// implementation, it is designed to be completely isolated in the "insecure" package, and -// totally separated from the rest of the codebase. +// CorruptTopic is a wrapper that implements the p2p.Topic. +// This is previously needed because we used a forked pubsub module. This is no longer the case +// so we could refactor this in the future to remove this wrapper. type CorruptTopic struct { t *corrupt.Topic } diff --git a/insecure/mock/attack_orchestrator.go b/insecure/mock/attack_orchestrator.go index 8e89b466e39..eefce7f82dc 100644 --- a/insecure/mock/attack_orchestrator.go +++ b/insecure/mock/attack_orchestrator.go @@ -1,6 +1,6 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. -package mockinsecure +package mock import ( insecure "github.com/onflow/flow-go/insecure" @@ -16,6 +16,10 @@ type AttackOrchestrator struct { func (_m *AttackOrchestrator) HandleEgressEvent(_a0 *insecure.EgressEvent) error { ret := _m.Called(_a0) + if len(ret) == 0 { + panic("no return value specified for HandleEgressEvent") + } + var r0 error if rf, ok := ret.Get(0).(func(*insecure.EgressEvent) error); ok { r0 = rf(_a0) @@ -30,6 +34,10 @@ func (_m *AttackOrchestrator) HandleEgressEvent(_a0 *insecure.EgressEvent) error func (_m *AttackOrchestrator) HandleIngressEvent(_a0 *insecure.IngressEvent) error { ret := _m.Called(_a0) + if len(ret) == 0 { + panic("no return value specified for HandleIngressEvent") + } + var r0 error if rf, ok := ret.Get(0).(func(*insecure.IngressEvent) error); ok { r0 = rf(_a0) @@ -45,13 +53,12 @@ func (_m *AttackOrchestrator) Register(_a0 insecure.OrchestratorNetwork) { _m.Called(_a0) } -type mockConstructorTestingTNewAttackOrchestrator interface { +// NewAttackOrchestrator creates a new instance of AttackOrchestrator. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewAttackOrchestrator(t interface { mock.TestingT Cleanup(func()) -} - -// NewAttackOrchestrator creates a new instance of AttackOrchestrator. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewAttackOrchestrator(t mockConstructorTestingTNewAttackOrchestrator) *AttackOrchestrator { +}) *AttackOrchestrator { mock := &AttackOrchestrator{} mock.Mock.Test(t) diff --git a/insecure/mock/conduit_controller.go b/insecure/mock/conduit_controller.go deleted file mode 100644 index 3db99ab9c3a..00000000000 --- a/insecure/mock/conduit_controller.go +++ /dev/null @@ -1,67 +0,0 @@ -// Code generated by mockery v2.13.1. DO NOT EDIT. - -package mockinsecure - -import ( - flow "github.com/onflow/flow-go/model/flow" - channels "github.com/onflow/flow-go/network/channels" - - insecure "github.com/onflow/flow-go/insecure" - - mock "github.com/stretchr/testify/mock" -) - -// ConduitController is an autogenerated mock type for the ConduitController type -type ConduitController struct { - mock.Mock -} - -// EngineClosingChannel provides a mock function with given fields: _a0 -func (_m *ConduitController) EngineClosingChannel(_a0 channels.Channel) error { - ret := _m.Called(_a0) - - var r0 error - if rf, ok := ret.Get(0).(func(channels.Channel) error); ok { - r0 = rf(_a0) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// HandleIncomingEvent provides a mock function with given fields: _a0, _a1, _a2, _a3, _a4 -func (_m *ConduitController) HandleIncomingEvent(_a0 interface{}, _a1 channels.Channel, _a2 insecure.Protocol, _a3 uint32, _a4 ...flow.Identifier) error { - _va := make([]interface{}, len(_a4)) - for _i := range _a4 { - _va[_i] = _a4[_i] - } - var _ca []interface{} - _ca = append(_ca, _a0, _a1, _a2, _a3) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) - - var r0 error - if rf, ok := ret.Get(0).(func(interface{}, channels.Channel, insecure.Protocol, uint32, ...flow.Identifier) error); ok { - r0 = rf(_a0, _a1, _a2, _a3, _a4...) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -type mockConstructorTestingTNewConduitController interface { - mock.TestingT - Cleanup(func()) -} - -// NewConduitController creates a new instance of ConduitController. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewConduitController(t mockConstructorTestingTNewConduitController) *ConduitController { - mock := &ConduitController{} - mock.Mock.Test(t) - - t.Cleanup(func() { mock.AssertExpectations(t) }) - - return mock -} diff --git a/insecure/mock/corrupt_conduit_factory.go b/insecure/mock/corrupt_conduit_factory.go index 5e51f6e832c..6d66aa4f5ea 100644 --- a/insecure/mock/corrupt_conduit_factory.go +++ b/insecure/mock/corrupt_conduit_factory.go @@ -1,6 +1,6 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. -package mockinsecure +package mock import ( context "context" @@ -25,6 +25,10 @@ type CorruptConduitFactory struct { func (_m *CorruptConduitFactory) NewConduit(_a0 context.Context, _a1 channels.Channel) (network.Conduit, error) { ret := _m.Called(_a0, _a1) + if len(ret) == 0 { + panic("no return value specified for NewConduit") + } + var r0 network.Conduit var r1 error if rf, ok := ret.Get(0).(func(context.Context, channels.Channel) (network.Conduit, error)); ok { @@ -48,11 +52,15 @@ func (_m *CorruptConduitFactory) NewConduit(_a0 context.Context, _a1 channels.Ch } // RegisterAdapter provides a mock function with given fields: _a0 -func (_m *CorruptConduitFactory) RegisterAdapter(_a0 network.Adapter) error { +func (_m *CorruptConduitFactory) RegisterAdapter(_a0 network.ConduitAdapter) error { ret := _m.Called(_a0) + if len(ret) == 0 { + panic("no return value specified for RegisterAdapter") + } + var r0 error - if rf, ok := ret.Get(0).(func(network.Adapter) error); ok { + if rf, ok := ret.Get(0).(func(network.ConduitAdapter) error); ok { r0 = rf(_a0) } else { r0 = ret.Error(0) @@ -65,6 +73,10 @@ func (_m *CorruptConduitFactory) RegisterAdapter(_a0 network.Adapter) error { func (_m *CorruptConduitFactory) RegisterEgressController(_a0 insecure.EgressController) error { ret := _m.Called(_a0) + if len(ret) == 0 { + panic("no return value specified for RegisterEgressController") + } + var r0 error if rf, ok := ret.Get(0).(func(insecure.EgressController) error); ok { r0 = rf(_a0) @@ -86,6 +98,10 @@ func (_m *CorruptConduitFactory) SendOnFlowNetwork(_a0 interface{}, _a1 channels _ca = append(_ca, _va...) ret := _m.Called(_ca...) + if len(ret) == 0 { + panic("no return value specified for SendOnFlowNetwork") + } + var r0 error if rf, ok := ret.Get(0).(func(interface{}, channels.Channel, insecure.Protocol, uint, ...flow.Identifier) error); ok { r0 = rf(_a0, _a1, _a2, _a3, _a4...) @@ -100,6 +116,10 @@ func (_m *CorruptConduitFactory) SendOnFlowNetwork(_a0 interface{}, _a1 channels func (_m *CorruptConduitFactory) UnregisterChannel(_a0 channels.Channel) error { ret := _m.Called(_a0) + if len(ret) == 0 { + panic("no return value specified for UnregisterChannel") + } + var r0 error if rf, ok := ret.Get(0).(func(channels.Channel) error); ok { r0 = rf(_a0) @@ -110,13 +130,12 @@ func (_m *CorruptConduitFactory) UnregisterChannel(_a0 channels.Channel) error { return r0 } -type mockConstructorTestingTNewCorruptConduitFactory interface { +// NewCorruptConduitFactory creates a new instance of CorruptConduitFactory. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewCorruptConduitFactory(t interface { mock.TestingT Cleanup(func()) -} - -// NewCorruptConduitFactory creates a new instance of CorruptConduitFactory. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewCorruptConduitFactory(t mockConstructorTestingTNewCorruptConduitFactory) *CorruptConduitFactory { +}) *CorruptConduitFactory { mock := &CorruptConduitFactory{} mock.Mock.Test(t) diff --git a/insecure/mock/corrupt_network__connect_attacker_client.go b/insecure/mock/corrupt_network__connect_attacker_client.go deleted file mode 100644 index 05dd0eca7ce..00000000000 --- a/insecure/mock/corrupt_network__connect_attacker_client.go +++ /dev/null @@ -1,158 +0,0 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. - -package mockinsecure - -import ( - context "context" - - insecure "github.com/onflow/flow-go/insecure" - metadata "google.golang.org/grpc/metadata" - - mock "github.com/stretchr/testify/mock" -) - -// CorruptNetwork_ConnectAttackerClient is an autogenerated mock type for the CorruptNetwork_ConnectAttackerClient type -type CorruptNetwork_ConnectAttackerClient struct { - mock.Mock -} - -// CloseSend provides a mock function with given fields: -func (_m *CorruptNetwork_ConnectAttackerClient) CloseSend() error { - ret := _m.Called() - - var r0 error - if rf, ok := ret.Get(0).(func() error); ok { - r0 = rf() - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// Context provides a mock function with given fields: -func (_m *CorruptNetwork_ConnectAttackerClient) Context() context.Context { - ret := _m.Called() - - var r0 context.Context - if rf, ok := ret.Get(0).(func() context.Context); ok { - r0 = rf() - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(context.Context) - } - } - - return r0 -} - -// Header provides a mock function with given fields: -func (_m *CorruptNetwork_ConnectAttackerClient) Header() (metadata.MD, error) { - ret := _m.Called() - - var r0 metadata.MD - var r1 error - if rf, ok := ret.Get(0).(func() (metadata.MD, error)); ok { - return rf() - } - if rf, ok := ret.Get(0).(func() metadata.MD); ok { - r0 = rf() - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(metadata.MD) - } - } - - if rf, ok := ret.Get(1).(func() error); ok { - r1 = rf() - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// Recv provides a mock function with given fields: -func (_m *CorruptNetwork_ConnectAttackerClient) Recv() (*insecure.Message, error) { - ret := _m.Called() - - var r0 *insecure.Message - var r1 error - if rf, ok := ret.Get(0).(func() (*insecure.Message, error)); ok { - return rf() - } - if rf, ok := ret.Get(0).(func() *insecure.Message); ok { - r0 = rf() - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*insecure.Message) - } - } - - if rf, ok := ret.Get(1).(func() error); ok { - r1 = rf() - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// RecvMsg provides a mock function with given fields: m -func (_m *CorruptNetwork_ConnectAttackerClient) RecvMsg(m interface{}) error { - ret := _m.Called(m) - - var r0 error - if rf, ok := ret.Get(0).(func(interface{}) error); ok { - r0 = rf(m) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// SendMsg provides a mock function with given fields: m -func (_m *CorruptNetwork_ConnectAttackerClient) SendMsg(m interface{}) error { - ret := _m.Called(m) - - var r0 error - if rf, ok := ret.Get(0).(func(interface{}) error); ok { - r0 = rf(m) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// Trailer provides a mock function with given fields: -func (_m *CorruptNetwork_ConnectAttackerClient) Trailer() metadata.MD { - ret := _m.Called() - - var r0 metadata.MD - if rf, ok := ret.Get(0).(func() metadata.MD); ok { - r0 = rf() - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(metadata.MD) - } - } - - return r0 -} - -type mockConstructorTestingTNewCorruptNetwork_ConnectAttackerClient interface { - mock.TestingT - Cleanup(func()) -} - -// NewCorruptNetwork_ConnectAttackerClient creates a new instance of CorruptNetwork_ConnectAttackerClient. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewCorruptNetwork_ConnectAttackerClient(t mockConstructorTestingTNewCorruptNetwork_ConnectAttackerClient) *CorruptNetwork_ConnectAttackerClient { - mock := &CorruptNetwork_ConnectAttackerClient{} - mock.Mock.Test(t) - - t.Cleanup(func() { mock.AssertExpectations(t) }) - - return mock -} diff --git a/insecure/mock/corrupt_network__connect_attacker_server.go b/insecure/mock/corrupt_network__connect_attacker_server.go deleted file mode 100644 index f36128847bd..00000000000 --- a/insecure/mock/corrupt_network__connect_attacker_server.go +++ /dev/null @@ -1,123 +0,0 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. - -package mockinsecure - -import ( - context "context" - - insecure "github.com/onflow/flow-go/insecure" - metadata "google.golang.org/grpc/metadata" - - mock "github.com/stretchr/testify/mock" -) - -// CorruptNetwork_ConnectAttackerServer is an autogenerated mock type for the CorruptNetwork_ConnectAttackerServer type -type CorruptNetwork_ConnectAttackerServer struct { - mock.Mock -} - -// Context provides a mock function with given fields: -func (_m *CorruptNetwork_ConnectAttackerServer) Context() context.Context { - ret := _m.Called() - - var r0 context.Context - if rf, ok := ret.Get(0).(func() context.Context); ok { - r0 = rf() - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(context.Context) - } - } - - return r0 -} - -// RecvMsg provides a mock function with given fields: m -func (_m *CorruptNetwork_ConnectAttackerServer) RecvMsg(m interface{}) error { - ret := _m.Called(m) - - var r0 error - if rf, ok := ret.Get(0).(func(interface{}) error); ok { - r0 = rf(m) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// Send provides a mock function with given fields: _a0 -func (_m *CorruptNetwork_ConnectAttackerServer) Send(_a0 *insecure.Message) error { - ret := _m.Called(_a0) - - var r0 error - if rf, ok := ret.Get(0).(func(*insecure.Message) error); ok { - r0 = rf(_a0) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// SendHeader provides a mock function with given fields: _a0 -func (_m *CorruptNetwork_ConnectAttackerServer) SendHeader(_a0 metadata.MD) error { - ret := _m.Called(_a0) - - var r0 error - if rf, ok := ret.Get(0).(func(metadata.MD) error); ok { - r0 = rf(_a0) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// SendMsg provides a mock function with given fields: m -func (_m *CorruptNetwork_ConnectAttackerServer) SendMsg(m interface{}) error { - ret := _m.Called(m) - - var r0 error - if rf, ok := ret.Get(0).(func(interface{}) error); ok { - r0 = rf(m) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// SetHeader provides a mock function with given fields: _a0 -func (_m *CorruptNetwork_ConnectAttackerServer) SetHeader(_a0 metadata.MD) error { - ret := _m.Called(_a0) - - var r0 error - if rf, ok := ret.Get(0).(func(metadata.MD) error); ok { - r0 = rf(_a0) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// SetTrailer provides a mock function with given fields: _a0 -func (_m *CorruptNetwork_ConnectAttackerServer) SetTrailer(_a0 metadata.MD) { - _m.Called(_a0) -} - -type mockConstructorTestingTNewCorruptNetwork_ConnectAttackerServer interface { - mock.TestingT - Cleanup(func()) -} - -// NewCorruptNetwork_ConnectAttackerServer creates a new instance of CorruptNetwork_ConnectAttackerServer. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewCorruptNetwork_ConnectAttackerServer(t mockConstructorTestingTNewCorruptNetwork_ConnectAttackerServer) *CorruptNetwork_ConnectAttackerServer { - mock := &CorruptNetwork_ConnectAttackerServer{} - mock.Mock.Test(t) - - t.Cleanup(func() { mock.AssertExpectations(t) }) - - return mock -} diff --git a/insecure/mock/corrupt_network__process_attacker_message_client.go b/insecure/mock/corrupt_network__process_attacker_message_client.go deleted file mode 100644 index ef61ab21a14..00000000000 --- a/insecure/mock/corrupt_network__process_attacker_message_client.go +++ /dev/null @@ -1,174 +0,0 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. - -package mockinsecure - -import ( - context "context" - - insecure "github.com/onflow/flow-go/insecure" - emptypb "google.golang.org/protobuf/types/known/emptypb" - - metadata "google.golang.org/grpc/metadata" - - mock "github.com/stretchr/testify/mock" -) - -// CorruptNetwork_ProcessAttackerMessageClient is an autogenerated mock type for the CorruptNetwork_ProcessAttackerMessageClient type -type CorruptNetwork_ProcessAttackerMessageClient struct { - mock.Mock -} - -// CloseAndRecv provides a mock function with given fields: -func (_m *CorruptNetwork_ProcessAttackerMessageClient) CloseAndRecv() (*emptypb.Empty, error) { - ret := _m.Called() - - var r0 *emptypb.Empty - var r1 error - if rf, ok := ret.Get(0).(func() (*emptypb.Empty, error)); ok { - return rf() - } - if rf, ok := ret.Get(0).(func() *emptypb.Empty); ok { - r0 = rf() - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*emptypb.Empty) - } - } - - if rf, ok := ret.Get(1).(func() error); ok { - r1 = rf() - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// CloseSend provides a mock function with given fields: -func (_m *CorruptNetwork_ProcessAttackerMessageClient) CloseSend() error { - ret := _m.Called() - - var r0 error - if rf, ok := ret.Get(0).(func() error); ok { - r0 = rf() - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// Context provides a mock function with given fields: -func (_m *CorruptNetwork_ProcessAttackerMessageClient) Context() context.Context { - ret := _m.Called() - - var r0 context.Context - if rf, ok := ret.Get(0).(func() context.Context); ok { - r0 = rf() - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(context.Context) - } - } - - return r0 -} - -// Header provides a mock function with given fields: -func (_m *CorruptNetwork_ProcessAttackerMessageClient) Header() (metadata.MD, error) { - ret := _m.Called() - - var r0 metadata.MD - var r1 error - if rf, ok := ret.Get(0).(func() (metadata.MD, error)); ok { - return rf() - } - if rf, ok := ret.Get(0).(func() metadata.MD); ok { - r0 = rf() - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(metadata.MD) - } - } - - if rf, ok := ret.Get(1).(func() error); ok { - r1 = rf() - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// RecvMsg provides a mock function with given fields: m -func (_m *CorruptNetwork_ProcessAttackerMessageClient) RecvMsg(m interface{}) error { - ret := _m.Called(m) - - var r0 error - if rf, ok := ret.Get(0).(func(interface{}) error); ok { - r0 = rf(m) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// Send provides a mock function with given fields: _a0 -func (_m *CorruptNetwork_ProcessAttackerMessageClient) Send(_a0 *insecure.Message) error { - ret := _m.Called(_a0) - - var r0 error - if rf, ok := ret.Get(0).(func(*insecure.Message) error); ok { - r0 = rf(_a0) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// SendMsg provides a mock function with given fields: m -func (_m *CorruptNetwork_ProcessAttackerMessageClient) SendMsg(m interface{}) error { - ret := _m.Called(m) - - var r0 error - if rf, ok := ret.Get(0).(func(interface{}) error); ok { - r0 = rf(m) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// Trailer provides a mock function with given fields: -func (_m *CorruptNetwork_ProcessAttackerMessageClient) Trailer() metadata.MD { - ret := _m.Called() - - var r0 metadata.MD - if rf, ok := ret.Get(0).(func() metadata.MD); ok { - r0 = rf() - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(metadata.MD) - } - } - - return r0 -} - -type mockConstructorTestingTNewCorruptNetwork_ProcessAttackerMessageClient interface { - mock.TestingT - Cleanup(func()) -} - -// NewCorruptNetwork_ProcessAttackerMessageClient creates a new instance of CorruptNetwork_ProcessAttackerMessageClient. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewCorruptNetwork_ProcessAttackerMessageClient(t mockConstructorTestingTNewCorruptNetwork_ProcessAttackerMessageClient) *CorruptNetwork_ProcessAttackerMessageClient { - mock := &CorruptNetwork_ProcessAttackerMessageClient{} - mock.Mock.Test(t) - - t.Cleanup(func() { mock.AssertExpectations(t) }) - - return mock -} diff --git a/insecure/mock/corrupt_network__process_attacker_message_server.go b/insecure/mock/corrupt_network__process_attacker_message_server.go deleted file mode 100644 index 00339fd8a42..00000000000 --- a/insecure/mock/corrupt_network__process_attacker_message_server.go +++ /dev/null @@ -1,151 +0,0 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. - -package mockinsecure - -import ( - context "context" - - insecure "github.com/onflow/flow-go/insecure" - emptypb "google.golang.org/protobuf/types/known/emptypb" - - metadata "google.golang.org/grpc/metadata" - - mock "github.com/stretchr/testify/mock" -) - -// CorruptNetwork_ProcessAttackerMessageServer is an autogenerated mock type for the CorruptNetwork_ProcessAttackerMessageServer type -type CorruptNetwork_ProcessAttackerMessageServer struct { - mock.Mock -} - -// Context provides a mock function with given fields: -func (_m *CorruptNetwork_ProcessAttackerMessageServer) Context() context.Context { - ret := _m.Called() - - var r0 context.Context - if rf, ok := ret.Get(0).(func() context.Context); ok { - r0 = rf() - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(context.Context) - } - } - - return r0 -} - -// Recv provides a mock function with given fields: -func (_m *CorruptNetwork_ProcessAttackerMessageServer) Recv() (*insecure.Message, error) { - ret := _m.Called() - - var r0 *insecure.Message - var r1 error - if rf, ok := ret.Get(0).(func() (*insecure.Message, error)); ok { - return rf() - } - if rf, ok := ret.Get(0).(func() *insecure.Message); ok { - r0 = rf() - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*insecure.Message) - } - } - - if rf, ok := ret.Get(1).(func() error); ok { - r1 = rf() - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// RecvMsg provides a mock function with given fields: m -func (_m *CorruptNetwork_ProcessAttackerMessageServer) RecvMsg(m interface{}) error { - ret := _m.Called(m) - - var r0 error - if rf, ok := ret.Get(0).(func(interface{}) error); ok { - r0 = rf(m) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// SendAndClose provides a mock function with given fields: _a0 -func (_m *CorruptNetwork_ProcessAttackerMessageServer) SendAndClose(_a0 *emptypb.Empty) error { - ret := _m.Called(_a0) - - var r0 error - if rf, ok := ret.Get(0).(func(*emptypb.Empty) error); ok { - r0 = rf(_a0) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// SendHeader provides a mock function with given fields: _a0 -func (_m *CorruptNetwork_ProcessAttackerMessageServer) SendHeader(_a0 metadata.MD) error { - ret := _m.Called(_a0) - - var r0 error - if rf, ok := ret.Get(0).(func(metadata.MD) error); ok { - r0 = rf(_a0) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// SendMsg provides a mock function with given fields: m -func (_m *CorruptNetwork_ProcessAttackerMessageServer) SendMsg(m interface{}) error { - ret := _m.Called(m) - - var r0 error - if rf, ok := ret.Get(0).(func(interface{}) error); ok { - r0 = rf(m) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// SetHeader provides a mock function with given fields: _a0 -func (_m *CorruptNetwork_ProcessAttackerMessageServer) SetHeader(_a0 metadata.MD) error { - ret := _m.Called(_a0) - - var r0 error - if rf, ok := ret.Get(0).(func(metadata.MD) error); ok { - r0 = rf(_a0) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// SetTrailer provides a mock function with given fields: _a0 -func (_m *CorruptNetwork_ProcessAttackerMessageServer) SetTrailer(_a0 metadata.MD) { - _m.Called(_a0) -} - -type mockConstructorTestingTNewCorruptNetwork_ProcessAttackerMessageServer interface { - mock.TestingT - Cleanup(func()) -} - -// NewCorruptNetwork_ProcessAttackerMessageServer creates a new instance of CorruptNetwork_ProcessAttackerMessageServer. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewCorruptNetwork_ProcessAttackerMessageServer(t mockConstructorTestingTNewCorruptNetwork_ProcessAttackerMessageServer) *CorruptNetwork_ProcessAttackerMessageServer { - mock := &CorruptNetwork_ProcessAttackerMessageServer{} - mock.Mock.Test(t) - - t.Cleanup(func() { mock.AssertExpectations(t) }) - - return mock -} diff --git a/insecure/mock/corrupt_network_client.go b/insecure/mock/corrupt_network_client.go deleted file mode 100644 index b7f1b3c0f00..00000000000 --- a/insecure/mock/corrupt_network_client.go +++ /dev/null @@ -1,100 +0,0 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. - -package mockinsecure - -import ( - context "context" - - grpc "google.golang.org/grpc" - emptypb "google.golang.org/protobuf/types/known/emptypb" - - insecure "github.com/onflow/flow-go/insecure" - - mock "github.com/stretchr/testify/mock" -) - -// CorruptNetworkClient is an autogenerated mock type for the CorruptNetworkClient type -type CorruptNetworkClient struct { - mock.Mock -} - -// ConnectAttacker provides a mock function with given fields: ctx, in, opts -func (_m *CorruptNetworkClient) ConnectAttacker(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (insecure.CorruptNetwork_ConnectAttackerClient, error) { - _va := make([]interface{}, len(opts)) - for _i := range opts { - _va[_i] = opts[_i] - } - var _ca []interface{} - _ca = append(_ca, ctx, in) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) - - var r0 insecure.CorruptNetwork_ConnectAttackerClient - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, *emptypb.Empty, ...grpc.CallOption) (insecure.CorruptNetwork_ConnectAttackerClient, error)); ok { - return rf(ctx, in, opts...) - } - if rf, ok := ret.Get(0).(func(context.Context, *emptypb.Empty, ...grpc.CallOption) insecure.CorruptNetwork_ConnectAttackerClient); ok { - r0 = rf(ctx, in, opts...) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(insecure.CorruptNetwork_ConnectAttackerClient) - } - } - - if rf, ok := ret.Get(1).(func(context.Context, *emptypb.Empty, ...grpc.CallOption) error); ok { - r1 = rf(ctx, in, opts...) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// ProcessAttackerMessage provides a mock function with given fields: ctx, opts -func (_m *CorruptNetworkClient) ProcessAttackerMessage(ctx context.Context, opts ...grpc.CallOption) (insecure.CorruptNetwork_ProcessAttackerMessageClient, error) { - _va := make([]interface{}, len(opts)) - for _i := range opts { - _va[_i] = opts[_i] - } - var _ca []interface{} - _ca = append(_ca, ctx) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) - - var r0 insecure.CorruptNetwork_ProcessAttackerMessageClient - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, ...grpc.CallOption) (insecure.CorruptNetwork_ProcessAttackerMessageClient, error)); ok { - return rf(ctx, opts...) - } - if rf, ok := ret.Get(0).(func(context.Context, ...grpc.CallOption) insecure.CorruptNetwork_ProcessAttackerMessageClient); ok { - r0 = rf(ctx, opts...) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(insecure.CorruptNetwork_ProcessAttackerMessageClient) - } - } - - if rf, ok := ret.Get(1).(func(context.Context, ...grpc.CallOption) error); ok { - r1 = rf(ctx, opts...) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -type mockConstructorTestingTNewCorruptNetworkClient interface { - mock.TestingT - Cleanup(func()) -} - -// NewCorruptNetworkClient creates a new instance of CorruptNetworkClient. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewCorruptNetworkClient(t mockConstructorTestingTNewCorruptNetworkClient) *CorruptNetworkClient { - mock := &CorruptNetworkClient{} - mock.Mock.Test(t) - - t.Cleanup(func() { mock.AssertExpectations(t) }) - - return mock -} diff --git a/insecure/mock/corrupt_network_server.go b/insecure/mock/corrupt_network_server.go deleted file mode 100644 index 3ba497383d8..00000000000 --- a/insecure/mock/corrupt_network_server.go +++ /dev/null @@ -1,58 +0,0 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. - -package mockinsecure - -import ( - insecure "github.com/onflow/flow-go/insecure" - emptypb "google.golang.org/protobuf/types/known/emptypb" - - mock "github.com/stretchr/testify/mock" -) - -// CorruptNetworkServer is an autogenerated mock type for the CorruptNetworkServer type -type CorruptNetworkServer struct { - mock.Mock -} - -// ConnectAttacker provides a mock function with given fields: _a0, _a1 -func (_m *CorruptNetworkServer) ConnectAttacker(_a0 *emptypb.Empty, _a1 insecure.CorruptNetwork_ConnectAttackerServer) error { - ret := _m.Called(_a0, _a1) - - var r0 error - if rf, ok := ret.Get(0).(func(*emptypb.Empty, insecure.CorruptNetwork_ConnectAttackerServer) error); ok { - r0 = rf(_a0, _a1) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// ProcessAttackerMessage provides a mock function with given fields: _a0 -func (_m *CorruptNetworkServer) ProcessAttackerMessage(_a0 insecure.CorruptNetwork_ProcessAttackerMessageServer) error { - ret := _m.Called(_a0) - - var r0 error - if rf, ok := ret.Get(0).(func(insecure.CorruptNetwork_ProcessAttackerMessageServer) error); ok { - r0 = rf(_a0) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -type mockConstructorTestingTNewCorruptNetworkServer interface { - mock.TestingT - Cleanup(func()) -} - -// NewCorruptNetworkServer creates a new instance of CorruptNetworkServer. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewCorruptNetworkServer(t mockConstructorTestingTNewCorruptNetworkServer) *CorruptNetworkServer { - mock := &CorruptNetworkServer{} - mock.Mock.Test(t) - - t.Cleanup(func() { mock.AssertExpectations(t) }) - - return mock -} diff --git a/insecure/mock/corrupted_node_connection.go b/insecure/mock/corrupted_node_connection.go index b5839b26941..102a76a57a0 100644 --- a/insecure/mock/corrupted_node_connection.go +++ b/insecure/mock/corrupted_node_connection.go @@ -1,6 +1,6 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. -package mockinsecure +package mock import ( insecure "github.com/onflow/flow-go/insecure" @@ -12,10 +12,14 @@ type CorruptedNodeConnection struct { mock.Mock } -// CloseConnection provides a mock function with given fields: +// CloseConnection provides a mock function with no fields func (_m *CorruptedNodeConnection) CloseConnection() error { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for CloseConnection") + } + var r0 error if rf, ok := ret.Get(0).(func() error); ok { r0 = rf() @@ -30,6 +34,10 @@ func (_m *CorruptedNodeConnection) CloseConnection() error { func (_m *CorruptedNodeConnection) SendMessage(_a0 *insecure.Message) error { ret := _m.Called(_a0) + if len(ret) == 0 { + panic("no return value specified for SendMessage") + } + var r0 error if rf, ok := ret.Get(0).(func(*insecure.Message) error); ok { r0 = rf(_a0) @@ -40,13 +48,12 @@ func (_m *CorruptedNodeConnection) SendMessage(_a0 *insecure.Message) error { return r0 } -type mockConstructorTestingTNewCorruptedNodeConnection interface { +// NewCorruptedNodeConnection creates a new instance of CorruptedNodeConnection. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewCorruptedNodeConnection(t interface { mock.TestingT Cleanup(func()) -} - -// NewCorruptedNodeConnection creates a new instance of CorruptedNodeConnection. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewCorruptedNodeConnection(t mockConstructorTestingTNewCorruptedNodeConnection) *CorruptedNodeConnection { +}) *CorruptedNodeConnection { mock := &CorruptedNodeConnection{} mock.Mock.Test(t) diff --git a/insecure/mock/corrupted_node_connector.go b/insecure/mock/corrupted_node_connector.go index 93b5535a6b8..e55c4852ef3 100644 --- a/insecure/mock/corrupted_node_connector.go +++ b/insecure/mock/corrupted_node_connector.go @@ -1,6 +1,6 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. -package mockinsecure +package mock import ( insecure "github.com/onflow/flow-go/insecure" @@ -20,6 +20,10 @@ type CorruptedNodeConnector struct { func (_m *CorruptedNodeConnector) Connect(_a0 irrecoverable.SignalerContext, _a1 flow.Identifier) (insecure.CorruptedNodeConnection, error) { ret := _m.Called(_a0, _a1) + if len(ret) == 0 { + panic("no return value specified for Connect") + } + var r0 insecure.CorruptedNodeConnection var r1 error if rf, ok := ret.Get(0).(func(irrecoverable.SignalerContext, flow.Identifier) (insecure.CorruptedNodeConnection, error)); ok { @@ -47,13 +51,12 @@ func (_m *CorruptedNodeConnector) WithIncomingMessageHandler(_a0 func(*insecure. _m.Called(_a0) } -type mockConstructorTestingTNewCorruptedNodeConnector interface { +// NewCorruptedNodeConnector creates a new instance of CorruptedNodeConnector. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewCorruptedNodeConnector(t interface { mock.TestingT Cleanup(func()) -} - -// NewCorruptedNodeConnector creates a new instance of CorruptedNodeConnector. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewCorruptedNodeConnector(t mockConstructorTestingTNewCorruptedNodeConnector) *CorruptedNodeConnector { +}) *CorruptedNodeConnector { mock := &CorruptedNodeConnector{} mock.Mock.Test(t) diff --git a/insecure/mock/egress_controller.go b/insecure/mock/egress_controller.go index 8f332bdf74e..9fd8e9f25a1 100644 --- a/insecure/mock/egress_controller.go +++ b/insecure/mock/egress_controller.go @@ -1,6 +1,6 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. -package mockinsecure +package mock import ( flow "github.com/onflow/flow-go/model/flow" @@ -20,6 +20,10 @@ type EgressController struct { func (_m *EgressController) EngineClosingChannel(_a0 channels.Channel) error { ret := _m.Called(_a0) + if len(ret) == 0 { + panic("no return value specified for EngineClosingChannel") + } + var r0 error if rf, ok := ret.Get(0).(func(channels.Channel) error); ok { r0 = rf(_a0) @@ -41,6 +45,10 @@ func (_m *EgressController) HandleOutgoingEvent(_a0 interface{}, _a1 channels.Ch _ca = append(_ca, _va...) ret := _m.Called(_ca...) + if len(ret) == 0 { + panic("no return value specified for HandleOutgoingEvent") + } + var r0 error if rf, ok := ret.Get(0).(func(interface{}, channels.Channel, insecure.Protocol, uint32, ...flow.Identifier) error); ok { r0 = rf(_a0, _a1, _a2, _a3, _a4...) @@ -51,13 +59,12 @@ func (_m *EgressController) HandleOutgoingEvent(_a0 interface{}, _a1 channels.Ch return r0 } -type mockConstructorTestingTNewEgressController interface { +// NewEgressController creates a new instance of EgressController. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewEgressController(t interface { mock.TestingT Cleanup(func()) -} - -// NewEgressController creates a new instance of EgressController. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewEgressController(t mockConstructorTestingTNewEgressController) *EgressController { +}) *EgressController { mock := &EgressController{} mock.Mock.Test(t) diff --git a/insecure/mock/ingress_controller.go b/insecure/mock/ingress_controller.go index 16efd7a1f17..8cdf1c0ab3d 100644 --- a/insecure/mock/ingress_controller.go +++ b/insecure/mock/ingress_controller.go @@ -1,6 +1,6 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. -package mockinsecure +package mock import ( flow "github.com/onflow/flow-go/model/flow" @@ -18,6 +18,10 @@ type IngressController struct { func (_m *IngressController) HandleIncomingEvent(_a0 interface{}, _a1 channels.Channel, _a2 flow.Identifier) bool { ret := _m.Called(_a0, _a1, _a2) + if len(ret) == 0 { + panic("no return value specified for HandleIncomingEvent") + } + var r0 bool if rf, ok := ret.Get(0).(func(interface{}, channels.Channel, flow.Identifier) bool); ok { r0 = rf(_a0, _a1, _a2) @@ -28,13 +32,12 @@ func (_m *IngressController) HandleIncomingEvent(_a0 interface{}, _a1 channels.C return r0 } -type mockConstructorTestingTNewIngressController interface { +// NewIngressController creates a new instance of IngressController. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewIngressController(t interface { mock.TestingT Cleanup(func()) -} - -// NewIngressController creates a new instance of IngressController. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewIngressController(t mockConstructorTestingTNewIngressController) *IngressController { +}) *IngressController { mock := &IngressController{} mock.Mock.Test(t) diff --git a/insecure/mock/orchestrator_network.go b/insecure/mock/orchestrator_network.go index c00c42d6185..760bbe7a9d1 100644 --- a/insecure/mock/orchestrator_network.go +++ b/insecure/mock/orchestrator_network.go @@ -1,6 +1,6 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. -package mockinsecure +package mock import ( insecure "github.com/onflow/flow-go/insecure" @@ -14,10 +14,14 @@ type OrchestratorNetwork struct { mock.Mock } -// Done provides a mock function with given fields: +// Done provides a mock function with no fields func (_m *OrchestratorNetwork) Done() <-chan struct{} { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Done") + } + var r0 <-chan struct{} if rf, ok := ret.Get(0).(func() <-chan struct{}); ok { r0 = rf() @@ -35,10 +39,14 @@ func (_m *OrchestratorNetwork) Observe(_a0 *insecure.Message) { _m.Called(_a0) } -// Ready provides a mock function with given fields: +// Ready provides a mock function with no fields func (_m *OrchestratorNetwork) Ready() <-chan struct{} { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Ready") + } + var r0 <-chan struct{} if rf, ok := ret.Get(0).(func() <-chan struct{}); ok { r0 = rf() @@ -55,6 +63,10 @@ func (_m *OrchestratorNetwork) Ready() <-chan struct{} { func (_m *OrchestratorNetwork) SendEgress(_a0 *insecure.EgressEvent) error { ret := _m.Called(_a0) + if len(ret) == 0 { + panic("no return value specified for SendEgress") + } + var r0 error if rf, ok := ret.Get(0).(func(*insecure.EgressEvent) error); ok { r0 = rf(_a0) @@ -69,6 +81,10 @@ func (_m *OrchestratorNetwork) SendEgress(_a0 *insecure.EgressEvent) error { func (_m *OrchestratorNetwork) SendIngress(_a0 *insecure.IngressEvent) error { ret := _m.Called(_a0) + if len(ret) == 0 { + panic("no return value specified for SendIngress") + } + var r0 error if rf, ok := ret.Get(0).(func(*insecure.IngressEvent) error); ok { r0 = rf(_a0) @@ -84,13 +100,12 @@ func (_m *OrchestratorNetwork) Start(_a0 irrecoverable.SignalerContext) { _m.Called(_a0) } -type mockConstructorTestingTNewOrchestratorNetwork interface { +// NewOrchestratorNetwork creates a new instance of OrchestratorNetwork. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewOrchestratorNetwork(t interface { mock.TestingT Cleanup(func()) -} - -// NewOrchestratorNetwork creates a new instance of OrchestratorNetwork. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewOrchestratorNetwork(t mockConstructorTestingTNewOrchestratorNetwork) *OrchestratorNetwork { +}) *OrchestratorNetwork { mock := &OrchestratorNetwork{} mock.Mock.Test(t) diff --git a/insecure/network.pb.go b/insecure/network.pb.go index 8461d3aff54..461c20b6643 100644 --- a/insecure/network.pb.go +++ b/insecure/network.pb.go @@ -6,12 +6,13 @@ package insecure import ( context "context" fmt "fmt" + math "math" + proto "github.com/golang/protobuf/proto" grpc "google.golang.org/grpc" codes "google.golang.org/grpc/codes" status "google.golang.org/grpc/status" emptypb "google.golang.org/protobuf/types/known/emptypb" - math "math" ) // Reference imports to suppress errors if they are not otherwise used. diff --git a/insecure/orchestrator/network.go b/insecure/orchestrator/network.go index a41a4781c2c..11e24874e79 100644 --- a/insecure/orchestrator/network.go +++ b/insecure/orchestrator/network.go @@ -13,6 +13,7 @@ import ( "github.com/onflow/flow-go/module/irrecoverable" "github.com/onflow/flow-go/network" "github.com/onflow/flow-go/network/channels" + flownetmsg "github.com/onflow/flow-go/network/message" "github.com/onflow/flow-go/utils/logging" ) @@ -159,7 +160,7 @@ func (on *Network) processEgressMessage(message *insecure.EgressMessage) error { channel := channels.Channel(message.ChannelID) - egressEventIDHash, err := network.EventId(channel, message.Payload) + egressEventIDHash, err := flownetmsg.EventId(channel, message.Payload) if err != nil { return fmt.Errorf("could not create egress event ID: %w", err) } @@ -205,7 +206,7 @@ func (on *Network) processIngressMessage(message *insecure.IngressMessage) error defer on.orchestratorMutex.Unlock() channel := channels.Channel(message.ChannelID) - ingressEventIDHash, err := network.EventId(channel, message.Payload) + ingressEventIDHash, err := flownetmsg.EventId(channel, message.Payload) if err != nil { return fmt.Errorf("could not create ingress event ID: %w", err) } diff --git a/insecure/rpc_inspector/metrics_inspector_test.go b/insecure/rpc_inspector/metrics_inspector_test.go deleted file mode 100644 index 4b7147d946b..00000000000 --- a/insecure/rpc_inspector/metrics_inspector_test.go +++ /dev/null @@ -1,85 +0,0 @@ -package rpc_inspector - -import ( - "context" - "testing" - "time" - - pubsub "github.com/libp2p/go-libp2p-pubsub" - "github.com/libp2p/go-libp2p/core/peer" - "github.com/stretchr/testify/mock" - "github.com/stretchr/testify/require" - "go.uber.org/atomic" - - "github.com/onflow/flow-go/insecure/corruptlibp2p" - "github.com/onflow/flow-go/insecure/internal" - "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/module/irrecoverable" - "github.com/onflow/flow-go/network/channels" - "github.com/onflow/flow-go/network/p2p" - "github.com/onflow/flow-go/network/p2p/inspector" - mockp2p "github.com/onflow/flow-go/network/p2p/mock" - p2ptest "github.com/onflow/flow-go/network/p2p/test" - "github.com/onflow/flow-go/utils/unittest" -) - -// TestMetricsInspector_ObserveRPC ensures that the gossipsub rpc metrics inspector observes metrics for control messages as expected. -func TestMetricsInspector_ObserveRPC(t *testing.T) { - t.Parallel() - role := flow.RoleConsensus - sporkID := unittest.IdentifierFixture() - spammer := corruptlibp2p.NewGossipSubRouterSpammer(t, sporkID, role) - ctx, cancel := context.WithCancel(context.Background()) - signalerCtx := irrecoverable.NewMockSignalerContext(t, ctx) - - messageCount := 100 - controlMessageCount := 5 - - metricsObservedCount := atomic.NewInt64(0) - mockMetricsObserver := mockp2p.NewGossipSubControlMetricsObserver(t) - mockMetricsObserver.On("ObserveRPC", mock.Anything, mock.Anything). - Run(func(args mock.Arguments) { - peerID, ok := args.Get(0).(peer.ID) - require.True(t, ok) - require.Equal(t, spammer.SpammerNode.Host().ID(), peerID) - rpc, ok := args.Get(1).(*pubsub.RPC) - require.True(t, ok) - // there are some default rpc messages exchanged between the nodes on startup - // we can ignore those rpc messages not configured directly by this test - if len(rpc.GetControl().GetPrune()) != 100 { - return - } - require.True(t, messageCount == len(rpc.GetControl().GetPrune())) - require.True(t, messageCount == len(rpc.GetControl().GetGraft())) - require.True(t, messageCount == len(rpc.GetControl().GetIhave())) - metricsObservedCount.Inc() - }) - metricsInspector := inspector.NewControlMsgMetricsInspector(unittest.Logger(), mockMetricsObserver, 2) - corruptInspectorFunc := corruptlibp2p.CorruptInspectorFunc(metricsInspector) - victimNode, _ := p2ptest.NodeFixture( - t, - sporkID, - t.Name(), - p2ptest.WithRole(role), - internal.WithCorruptGossipSub(corruptlibp2p.CorruptGossipSubFactory(), - corruptlibp2p.CorruptGossipSubConfigFactoryWithInspector(corruptInspectorFunc)), - ) - metricsInspector.Start(signalerCtx) - nodes := []p2p.LibP2PNode{victimNode, spammer.SpammerNode} - startNodesAndEnsureConnected(t, signalerCtx, nodes, sporkID) - spammer.Start(t) - defer stopNodesAndInspector(t, cancel, nodes, metricsInspector) - // prepare to spam - generate control messages - ctlMsgs := spammer.GenerateCtlMessages(controlMessageCount, - corruptlibp2p.WithGraft(messageCount, channels.PushBlocks.String()), - corruptlibp2p.WithPrune(messageCount, channels.PushBlocks.String()), - corruptlibp2p.WithIHave(messageCount, 1000)) - - // start spamming the victim peer - spammer.SpamControlMessage(t, victimNode, ctlMsgs) - - // eventually we should process each spammed control message and observe metrics for them - require.Eventually(t, func() bool { - return metricsObservedCount.Load() == int64(controlMessageCount) - }, 5*time.Second, 10*time.Millisecond, "did not observe metrics for all control messages on time") -} diff --git a/insecure/rpc_inspector/utils.go b/insecure/rpc_inspector/utils.go deleted file mode 100644 index 02cf9492f7c..00000000000 --- a/insecure/rpc_inspector/utils.go +++ /dev/null @@ -1,32 +0,0 @@ -package rpc_inspector - -import ( - "context" - "testing" - "time" - - "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/module/irrecoverable" - "github.com/onflow/flow-go/network/channels" - "github.com/onflow/flow-go/network/p2p" - p2ptest "github.com/onflow/flow-go/network/p2p/test" - "github.com/onflow/flow-go/utils/unittest" -) - -// StartNodesAndEnsureConnected starts the victim and spammer node and ensures they are both connected. -func startNodesAndEnsureConnected(t *testing.T, ctx irrecoverable.SignalerContext, nodes []p2p.LibP2PNode, sporkID flow.Identifier) { - p2ptest.StartNodes(t, ctx, nodes, 5*time.Second) - // prior to the test we should ensure that spammer and victim connect. - // this is vital as the spammer will circumvent the normal pubsub subscription mechanism and send iHAVE messages directly to the victim. - // without a prior connection established, directly spamming pubsub messages may cause a race condition in the pubsub implementation. - p2ptest.EnsureConnected(t, ctx, nodes) - p2ptest.EnsurePubsubMessageExchange(t, ctx, nodes, func() (interface{}, channels.Topic) { - blockTopic := channels.TopicFromChannel(channels.PushBlocks, sporkID) - return unittest.ProposalFixture(), blockTopic - }) -} - -func stopNodesAndInspector(t *testing.T, cancel context.CancelFunc, nodes []p2p.LibP2PNode, inspector p2p.GossipSubRPCInspector) { - p2ptest.StopNodes(t, nodes, cancel, 5*time.Second) - unittest.RequireComponentsDoneBefore(t, time.Second, inspector) -} diff --git a/insecure/rpc_inspector/validation_inspector_test.go b/insecure/rpc_inspector/validation_inspector_test.go deleted file mode 100644 index b0363fc1214..00000000000 --- a/insecure/rpc_inspector/validation_inspector_test.go +++ /dev/null @@ -1,448 +0,0 @@ -package rpc_inspector - -import ( - "context" - "fmt" - "os" - "testing" - "time" - - pb "github.com/libp2p/go-libp2p-pubsub/pb" - "github.com/libp2p/go-libp2p/core/peer" - "github.com/rs/zerolog" - mockery "github.com/stretchr/testify/mock" - "github.com/stretchr/testify/require" - corrupt "github.com/yhassanzadeh13/go-libp2p-pubsub" - "go.uber.org/atomic" - - "github.com/onflow/flow-go/insecure/corruptlibp2p" - "github.com/onflow/flow-go/insecure/internal" - "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/module/id" - "github.com/onflow/flow-go/module/irrecoverable" - "github.com/onflow/flow-go/module/mock" - "github.com/onflow/flow-go/network/channels" - "github.com/onflow/flow-go/network/p2p" - "github.com/onflow/flow-go/network/p2p/inspector/validation" - mockp2p "github.com/onflow/flow-go/network/p2p/mock" - "github.com/onflow/flow-go/network/p2p/p2pbuilder/inspector" - p2ptest "github.com/onflow/flow-go/network/p2p/test" - "github.com/onflow/flow-go/utils/unittest" -) - -// TestValidationInspector_SafetyThreshold ensures that when RPC control message count is below the configured safety threshold the control message validation inspector -// does not return any errors and validation is skipped. -func TestValidationInspector_SafetyThreshold(t *testing.T) { - t.Parallel() - role := flow.RoleConsensus - sporkID := unittest.IdentifierFixture() - spammer := corruptlibp2p.NewGossipSubRouterSpammer(t, sporkID, role) - ctx, cancel := context.WithCancel(context.Background()) - signalerCtx := irrecoverable.NewMockSignalerContext(t, ctx) - // if GRAFT/PRUNE message count is lower than safety threshold the RPC validation should pass - safetyThreshold := uint64(10) - // create our RPC validation inspector - inspectorConfig := inspector.DefaultRPCValidationConfig() - inspectorConfig.NumberOfWorkers = 1 - inspectorConfig.GraftValidationCfg.SafetyThreshold = safetyThreshold - inspectorConfig.PruneValidationCfg.SafetyThreshold = safetyThreshold - - messageCount := 5 - controlMessageCount := int64(2) - - // expected log message logged when valid number GRAFT control messages spammed under safety threshold - graftExpectedMessageStr := fmt.Sprintf("control message %s inspection passed 5 is below configured safety threshold", p2p.CtrlMsgGraft) - // expected log message logged when valid number PRUNE control messages spammed under safety threshold - pruneExpectedMessageStr := fmt.Sprintf("control message %s inspection passed 5 is below configured safety threshold", p2p.CtrlMsgGraft) - - graftInfoLogsReceived := atomic.NewInt64(0) - pruneInfoLogsReceived := atomic.NewInt64(0) - - // setup logger hook, we expect info log validation is skipped - hook := zerolog.HookFunc(func(e *zerolog.Event, level zerolog.Level, message string) { - if level == zerolog.TraceLevel { - if message == graftExpectedMessageStr { - graftInfoLogsReceived.Inc() - } - - if message == pruneExpectedMessageStr { - pruneInfoLogsReceived.Inc() - } - } - }) - logger := zerolog.New(os.Stdout).Hook(hook) - distributor := mockp2p.NewGossipSubInspectorNotificationDistributor(t) - mockDistributorReadyDoneAware(distributor) - defer distributor.AssertNotCalled(t, "Distribute", mockery.Anything) - inspector := validation.NewControlMsgValidationInspector(logger, sporkID, inspectorConfig, distributor) - corruptInspectorFunc := corruptlibp2p.CorruptInspectorFunc(inspector) - victimNode, _ := p2ptest.NodeFixture( - t, - sporkID, - t.Name(), - p2ptest.WithRole(role), - internal.WithCorruptGossipSub(corruptlibp2p.CorruptGossipSubFactory(), - corruptlibp2p.CorruptGossipSubConfigFactoryWithInspector(corruptInspectorFunc)), - ) - inspector.Start(signalerCtx) - nodes := []p2p.LibP2PNode{victimNode, spammer.SpammerNode} - startNodesAndEnsureConnected(t, signalerCtx, nodes, sporkID) - spammer.Start(t) - defer stopNodesAndInspector(t, cancel, nodes, inspector) - // prepare to spam - generate control messages - ctlMsgs := spammer.GenerateCtlMessages(int(controlMessageCount), - corruptlibp2p.WithGraft(messageCount, channels.PushBlocks.String()), - corruptlibp2p.WithPrune(messageCount, channels.PushBlocks.String())) - - // start spamming the victim peer - spammer.SpamControlMessage(t, victimNode, ctlMsgs) - - // eventually we should receive 2 info logs each for GRAFT inspection and PRUNE inspection - require.Eventually(t, func() bool { - return graftInfoLogsReceived.Load() == controlMessageCount && pruneInfoLogsReceived.Load() == controlMessageCount - }, 2*time.Second, 10*time.Millisecond) -} - -// TestValidationInspector_DiscardThreshold ensures that when RPC control message count is above the configured discard threshold the control message validation inspector -// returns the expected error. -func TestValidationInspector_DiscardThreshold(t *testing.T) { - t.Parallel() - role := flow.RoleConsensus - sporkID := unittest.IdentifierFixture() - spammer := corruptlibp2p.NewGossipSubRouterSpammer(t, sporkID, role) - ctx, cancel := context.WithCancel(context.Background()) - signalerCtx := irrecoverable.NewMockSignalerContext(t, ctx) - // if GRAFT/PRUNE message count is higher than discard threshold the RPC validation should fail and expected error should be returned - discardThreshold := uint64(10) - // create our RPC validation inspector - inspectorConfig := inspector.DefaultRPCValidationConfig() - inspectorConfig.NumberOfWorkers = 1 - inspectorConfig.GraftValidationCfg.DiscardThreshold = discardThreshold - inspectorConfig.PruneValidationCfg.DiscardThreshold = discardThreshold - - messageCount := 50 - controlMessageCount := int64(1) - logger := unittest.Logger() - distributor := mockp2p.NewGossipSubInspectorNotificationDistributor(t) - mockDistributorReadyDoneAware(distributor) - count := atomic.NewInt64(0) - done := make(chan struct{}) - distributor.On("Distribute", mockery.Anything). - Twice(). - Run(func(args mockery.Arguments) { - count.Inc() - notification, ok := args[0].(*p2p.InvCtrlMsgNotif) - require.True(t, ok) - require.Equal(t, spammer.SpammerNode.Host().ID(), notification.PeerID) - require.True(t, validation.IsErrDiscardThreshold(notification.Err)) - require.Equal(t, uint64(messageCount), notification.Count) - require.True(t, notification.MsgType == p2p.CtrlMsgGraft || notification.MsgType == p2p.CtrlMsgPrune) - if count.Load() == 2 { - close(done) - } - }).Return(nil) - inspector := validation.NewControlMsgValidationInspector(logger, sporkID, inspectorConfig, distributor) - // we use inline inspector here so that we can check the error type when we inspect an RPC and - // track which control message type the error involves - inlineInspector := func(id peer.ID, rpc *corrupt.RPC) error { - pubsubRPC := corruptlibp2p.CorruptRPCToPubSubRPC(rpc) - return inspector.Inspect(id, pubsubRPC) - } - victimNode, _ := p2ptest.NodeFixture( - t, - sporkID, - t.Name(), - p2ptest.WithRole(role), - internal.WithCorruptGossipSub(corruptlibp2p.CorruptGossipSubFactory(), - corruptlibp2p.CorruptGossipSubConfigFactoryWithInspector(inlineInspector)), - ) - - inspector.Start(signalerCtx) - nodes := []p2p.LibP2PNode{victimNode, spammer.SpammerNode} - startNodesAndEnsureConnected(t, signalerCtx, nodes, sporkID) - spammer.Start(t) - defer stopNodesAndInspector(t, cancel, nodes, inspector) - - // prepare to spam - generate control messages - graftCtlMsgs := spammer.GenerateCtlMessages(int(controlMessageCount), corruptlibp2p.WithGraft(messageCount, channels.PushBlocks.String())) - pruneCtlMsgs := spammer.GenerateCtlMessages(int(controlMessageCount), corruptlibp2p.WithPrune(messageCount, channels.PushBlocks.String())) - - // start spamming the victim peer - spammer.SpamControlMessage(t, victimNode, graftCtlMsgs) - spammer.SpamControlMessage(t, victimNode, pruneCtlMsgs) - - unittest.RequireCloseBefore(t, done, 2*time.Second, "failed to inspect RPC messages on time") -} - -// TestValidationInspector_RateLimitedPeer ensures that the control message validation inspector rate limits peers per control message type as expected. -func TestValidationInspector_RateLimitedPeer(t *testing.T) { - t.Parallel() - role := flow.RoleConsensus - sporkID := unittest.IdentifierFixture() - spammer := corruptlibp2p.NewGossipSubRouterSpammer(t, sporkID, role) - ctx, cancel := context.WithCancel(context.Background()) - signalerCtx := irrecoverable.NewMockSignalerContext(t, ctx) - - // create our RPC validation inspector - inspectorConfig := inspector.DefaultRPCValidationConfig() - inspectorConfig.NumberOfWorkers = 1 - - // here we set the message count to the amount of flow channels - // so that we can generate a valid ctl msg with all valid topics. - flowChannels := channels.Channels() - messageCount := flowChannels.Len() - controlMessageCount := int64(1) - - distributor := mockp2p.NewGossipSubInspectorNotificationDistributor(t) - mockDistributorReadyDoneAware(distributor) - count := atomic.NewInt64(0) - done := make(chan struct{}) - distributor.On("Distribute", mockery.Anything). - Times(4). - Run(func(args mockery.Arguments) { - count.Inc() - notification, ok := args[0].(*p2p.InvCtrlMsgNotif) - require.True(t, ok) - require.Equal(t, spammer.SpammerNode.Host().ID(), notification.PeerID) - require.True(t, validation.IsErrRateLimitedControlMsg(notification.Err)) - require.Equal(t, uint64(messageCount), notification.Count) - require.True(t, notification.MsgType == p2p.CtrlMsgGraft || notification.MsgType == p2p.CtrlMsgPrune) - if count.Load() == 4 { - close(done) - } - }).Return(nil) - inspector := validation.NewControlMsgValidationInspector(unittest.Logger(), sporkID, inspectorConfig, distributor) - corruptInspectorFunc := corruptlibp2p.CorruptInspectorFunc(inspector) - victimNode, _ := p2ptest.NodeFixture( - t, - sporkID, - t.Name(), - p2ptest.WithRole(role), - internal.WithCorruptGossipSub(corruptlibp2p.CorruptGossipSubFactory(), - corruptlibp2p.CorruptGossipSubConfigFactoryWithInspector(corruptInspectorFunc)), - ) - - inspector.Start(signalerCtx) - nodes := []p2p.LibP2PNode{victimNode, spammer.SpammerNode} - startNodesAndEnsureConnected(t, signalerCtx, nodes, sporkID) - spammer.Start(t) - defer stopNodesAndInspector(t, cancel, nodes, inspector) - - // the first time we spam this message it will be processed completely so we need to ensure - // all topics are valid and no duplicates exists. - validCtlMsgs := spammer.GenerateCtlMessages(int(controlMessageCount), func(message *pb.ControlMessage) { - grafts := make([]*pb.ControlGraft, messageCount) - prunes := make([]*pb.ControlPrune, messageCount) - for i := 0; i < messageCount; i++ { - topic := fmt.Sprintf("%s/%s", flowChannels[i].String(), sporkID) - grafts[i] = &pb.ControlGraft{TopicID: &topic} - prunes[i] = &pb.ControlPrune{TopicID: &topic} - } - message.Graft = grafts - message.Prune = prunes - }) - - // start spamming the victim peer - for i := 0; i < 3; i++ { - spammer.SpamControlMessage(t, victimNode, validCtlMsgs) - } - - unittest.RequireCloseBefore(t, done, 2*time.Second, "failed to inspect RPC messages on time") -} - -// TestValidationInspector_InvalidTopicID ensures that when an RPC control message contains an invalid topic ID the expected error is logged. -func TestValidationInspector_InvalidTopicID(t *testing.T) { - t.Parallel() - role := flow.RoleConsensus - sporkID := unittest.IdentifierFixture() - spammer := corruptlibp2p.NewGossipSubRouterSpammer(t, sporkID, role) - ctx, cancel := context.WithCancel(context.Background()) - signalerCtx := irrecoverable.NewMockSignalerContext(t, ctx) - // if GRAFT/PRUNE message count is higher than discard threshold the RPC validation should fail and expected error should be returned - // create our RPC validation inspector - inspectorConfig := inspector.DefaultRPCValidationConfig() - inspectorConfig.PruneValidationCfg.SafetyThreshold = 0 - inspectorConfig.GraftValidationCfg.SafetyThreshold = 0 - inspectorConfig.NumberOfWorkers = 1 - - // SafetyThreshold < messageCount < DiscardThreshold ensures that the RPC message will be further inspected and topic IDs will be checked - // restricting the message count to 1 allows us to only aggregate a single error when the error is logged in the inspector. - messageCount := inspectorConfig.GraftValidationCfg.SafetyThreshold + 1 - controlMessageCount := int64(1) - unknownTopic := channels.Topic(fmt.Sprintf("%s/%s", corruptlibp2p.GossipSubTopicIdFixture(), sporkID)) - malformedTopic := channels.Topic("!@#$%^&**((") - // a topics spork ID is considered invalid if it does not match the current spork ID - invalidSporkIDTopic := channels.Topic(fmt.Sprintf("%s/%s", channels.PushBlocks, unittest.IdentifierFixture())) - duplicateTopic := channels.Topic(fmt.Sprintf("%s/%s", channels.PushBlocks, sporkID)) - - distributor := mockp2p.NewGossipSubInspectorNotificationDistributor(t) - mockDistributorReadyDoneAware(distributor) - count := atomic.NewInt64(0) - done := make(chan struct{}) - distributor.On("Distribute", mockery.Anything). - Times(8). - Run(func(args mockery.Arguments) { - count.Inc() - notification, ok := args[0].(*p2p.InvCtrlMsgNotif) - require.True(t, ok) - require.Equal(t, spammer.SpammerNode.Host().ID(), notification.PeerID) - require.True(t, validation.IsErrInvalidTopic(notification.Err) || validation.IsErrDuplicateTopic(notification.Err)) - require.True(t, messageCount == notification.Count || notification.Count == 3) - require.True(t, notification.MsgType == p2p.CtrlMsgGraft || notification.MsgType == p2p.CtrlMsgPrune) - if count.Load() == 8 { - close(done) - } - }).Return(nil) - inspector := validation.NewControlMsgValidationInspector(unittest.Logger(), sporkID, inspectorConfig, distributor) - corruptInspectorFunc := corruptlibp2p.CorruptInspectorFunc(inspector) - victimNode, _ := p2ptest.NodeFixture( - t, - sporkID, - t.Name(), - p2ptest.WithRole(role), - internal.WithCorruptGossipSub(corruptlibp2p.CorruptGossipSubFactory(), - corruptlibp2p.CorruptGossipSubConfigFactoryWithInspector(corruptInspectorFunc)), - ) - - inspector.Start(signalerCtx) - nodes := []p2p.LibP2PNode{victimNode, spammer.SpammerNode} - startNodesAndEnsureConnected(t, signalerCtx, nodes, sporkID) - spammer.Start(t) - defer stopNodesAndInspector(t, cancel, nodes, inspector) - - // prepare to spam - generate control messages - graftCtlMsgsWithUnknownTopic := spammer.GenerateCtlMessages(int(controlMessageCount), corruptlibp2p.WithGraft(int(messageCount), unknownTopic.String())) - graftCtlMsgsWithMalformedTopic := spammer.GenerateCtlMessages(int(controlMessageCount), corruptlibp2p.WithGraft(int(messageCount), malformedTopic.String())) - graftCtlMsgsInvalidSporkIDTopic := spammer.GenerateCtlMessages(int(controlMessageCount), corruptlibp2p.WithGraft(int(messageCount), invalidSporkIDTopic.String())) - graftCtlMsgsDuplicateTopic := spammer.GenerateCtlMessages(int(controlMessageCount), corruptlibp2p.WithGraft(3, duplicateTopic.String())) - - pruneCtlMsgsWithUnknownTopic := spammer.GenerateCtlMessages(int(controlMessageCount), corruptlibp2p.WithPrune(int(messageCount), unknownTopic.String())) - pruneCtlMsgsWithMalformedTopic := spammer.GenerateCtlMessages(int(controlMessageCount), corruptlibp2p.WithPrune(int(messageCount), malformedTopic.String())) - pruneCtlMsgsInvalidSporkIDTopic := spammer.GenerateCtlMessages(int(controlMessageCount), corruptlibp2p.WithGraft(int(messageCount), invalidSporkIDTopic.String())) - pruneCtlMsgsDuplicateTopic := spammer.GenerateCtlMessages(int(controlMessageCount), corruptlibp2p.WithPrune(3, duplicateTopic.String())) - - // start spamming the victim peer - spammer.SpamControlMessage(t, victimNode, graftCtlMsgsWithUnknownTopic) - spammer.SpamControlMessage(t, victimNode, graftCtlMsgsWithMalformedTopic) - spammer.SpamControlMessage(t, victimNode, graftCtlMsgsInvalidSporkIDTopic) - spammer.SpamControlMessage(t, victimNode, graftCtlMsgsDuplicateTopic) - - spammer.SpamControlMessage(t, victimNode, pruneCtlMsgsWithUnknownTopic) - spammer.SpamControlMessage(t, victimNode, pruneCtlMsgsWithMalformedTopic) - spammer.SpamControlMessage(t, victimNode, pruneCtlMsgsInvalidSporkIDTopic) - spammer.SpamControlMessage(t, victimNode, pruneCtlMsgsDuplicateTopic) - - unittest.RequireCloseBefore(t, done, 5*time.Second, "failed to inspect RPC messages on time") -} - -// TestGossipSubSpamMitigationIntegration tests that the spam mitigation feature of GossipSub is working as expected. -// The test puts toghether the spam detection (through the GossipSubInspector) and the spam mitigation (through the -// scoring system) and ensures that the mitigation is triggered when the spam detection detects spam. -// The test scenario involves a spammer node that sends a large number of control messages to a victim node. -// The victim node is configured to use the GossipSubInspector to detect spam and the scoring system to mitigate spam. -// The test ensures that the victim node is disconnected from the spammer node on the GossipSub mesh after the spam detection is triggered. -func TestGossipSubSpamMitigationIntegration(t *testing.T) { - t.Parallel() - idProvider := mock.NewIdentityProvider(t) - sporkID := unittest.IdentifierFixture() - spammer := corruptlibp2p.NewGossipSubRouterSpammer(t, sporkID, flow.RoleConsensus) - ctx, cancel := context.WithCancel(context.Background()) - signalerCtx := irrecoverable.NewMockSignalerContext(t, ctx) - - victimNode, victimId := p2ptest.NodeFixture( - t, - sporkID, - t.Name(), - p2ptest.WithRole(flow.RoleConsensus), - p2ptest.WithPeerScoringEnabled(idProvider), - ) - - ids := flow.IdentityList{&victimId, &spammer.SpammerId} - provider := id.NewFixedIdentityProvider(ids) - idProvider.On("ByPeerID", mockery.Anything).Return( - func(peerId peer.ID) *flow.Identity { - identity, _ := provider.ByPeerID(peerId) - return identity - }, func(peerId peer.ID) bool { - _, ok := provider.ByPeerID(peerId) - return ok - }) - - spamRpcCount := 10 // total number of individual rpc messages to send - spamCtrlMsgCount := int64(10) // total number of control messages to send on each RPC - - // unknownTopic is an unknown topic to the victim node but shaped like a valid topic (i.e., it has the correct prefix and spork ID). - unknownTopic := channels.Topic(fmt.Sprintf("%s/%s", corruptlibp2p.GossipSubTopicIdFixture(), sporkID)) - - // malformedTopic is a topic that is not shaped like a valid topic (i.e., it does not have the correct prefix and spork ID). - malformedTopic := channels.Topic("!@#$%^&**((") - - // invalidSporkIDTopic is a topic that has a valid prefix but an invalid spork ID (i.e., not the current spork ID). - invalidSporkIDTopic := channels.Topic(fmt.Sprintf("%s/%s", channels.PushBlocks, unittest.IdentifierFixture())) - - // duplicateTopic is a valid topic that is used to send duplicate spam messages. - duplicateTopic := channels.Topic(fmt.Sprintf("%s/%s", channels.PushBlocks, sporkID)) - - // starting the nodes. - nodes := []p2p.LibP2PNode{victimNode, spammer.SpammerNode} - p2ptest.StartNodes(t, signalerCtx, nodes, 100*time.Millisecond) - defer p2ptest.StopNodes(t, nodes, cancel, 2*time.Second) - spammer.Start(t) - - // wait for the nodes to discover each other - p2ptest.LetNodesDiscoverEachOther(t, ctx, nodes, ids) - - // as nodes started fresh and no spamming has happened yet, the nodes should be able to exchange messages on the topic. - p2ptest.EnsurePubsubMessageExchange(t, ctx, nodes, func() (interface{}, channels.Topic) { - blockTopic := channels.TopicFromChannel(channels.PushBlocks, sporkID) - return unittest.ProposalFixture(), blockTopic - }) - - // prepares spam graft and prune messages with different strategies. - graftCtlMsgsWithUnknownTopic := spammer.GenerateCtlMessages(int(spamCtrlMsgCount), corruptlibp2p.WithGraft(int(spamRpcCount), unknownTopic.String())) - graftCtlMsgsWithMalformedTopic := spammer.GenerateCtlMessages(int(spamCtrlMsgCount), corruptlibp2p.WithGraft(int(spamRpcCount), malformedTopic.String())) - graftCtlMsgsInvalidSporkIDTopic := spammer.GenerateCtlMessages(int(spamCtrlMsgCount), corruptlibp2p.WithGraft(int(spamRpcCount), invalidSporkIDTopic.String())) - graftCtlMsgsDuplicateTopic := spammer.GenerateCtlMessages(int(spamCtrlMsgCount), corruptlibp2p.WithGraft(3, duplicateTopic.String())) - - pruneCtlMsgsWithUnknownTopic := spammer.GenerateCtlMessages(int(spamCtrlMsgCount), corruptlibp2p.WithPrune(int(spamRpcCount), unknownTopic.String())) - pruneCtlMsgsWithMalformedTopic := spammer.GenerateCtlMessages(int(spamCtrlMsgCount), corruptlibp2p.WithPrune(int(spamRpcCount), malformedTopic.String())) - pruneCtlMsgsInvalidSporkIDTopic := spammer.GenerateCtlMessages(int(spamCtrlMsgCount), corruptlibp2p.WithGraft(int(spamRpcCount), invalidSporkIDTopic.String())) - pruneCtlMsgsDuplicateTopic := spammer.GenerateCtlMessages(int(spamCtrlMsgCount), corruptlibp2p.WithPrune(3, duplicateTopic.String())) - - // start spamming the victim peer - spammer.SpamControlMessage(t, victimNode, graftCtlMsgsWithUnknownTopic) - spammer.SpamControlMessage(t, victimNode, graftCtlMsgsWithMalformedTopic) - spammer.SpamControlMessage(t, victimNode, graftCtlMsgsInvalidSporkIDTopic) - spammer.SpamControlMessage(t, victimNode, graftCtlMsgsDuplicateTopic) - - spammer.SpamControlMessage(t, victimNode, pruneCtlMsgsWithUnknownTopic) - spammer.SpamControlMessage(t, victimNode, pruneCtlMsgsWithMalformedTopic) - spammer.SpamControlMessage(t, victimNode, pruneCtlMsgsInvalidSporkIDTopic) - spammer.SpamControlMessage(t, victimNode, pruneCtlMsgsDuplicateTopic) - - // wait for two GossipSub heartbeat intervals to ensure that the victim node has penalized the spammer node. - time.Sleep(2 * time.Second) - - // now we expect the detection and mitigation to kick in and the victim node to disconnect from the spammer node. - // so the spammer and victim nodes should not be able to exchange messages on the topic. - p2ptest.EnsureNoPubsubExchangeBetweenGroups(t, ctx, []p2p.LibP2PNode{victimNode}, []p2p.LibP2PNode{spammer.SpammerNode}, func() (interface{}, channels.Topic) { - blockTopic := channels.TopicFromChannel(channels.PushBlocks, sporkID) - return unittest.ProposalFixture(), blockTopic - }) -} - -// mockDistributorReadyDoneAware mocks the Ready and Done methods of the distributor to return a channel that is already closed, -// so that the distributor is considered ready and done when the test needs. -func mockDistributorReadyDoneAware(d *mockp2p.GossipSubInspectorNotificationDistributor) { - d.On("Start", mockery.Anything).Return().Maybe() - d.On("Ready").Return(func() <-chan struct{} { - ch := make(chan struct{}) - close(ch) - return ch - }()).Maybe() - d.On("Done").Return(func() <-chan struct{} { - ch := make(chan struct{}) - close(ch) - return ch - }()).Maybe() -} diff --git a/insecure/wintermute/attackOrchestrator.go b/insecure/wintermute/attackOrchestrator.go index 40b7b60616e..3853c6dc294 100644 --- a/insecure/wintermute/attackOrchestrator.go +++ b/insecure/wintermute/attackOrchestrator.go @@ -75,7 +75,7 @@ func (o *Orchestrator) HandleEgressEvent(event *insecure.EgressEvent) error { switch event.FlowProtocolEvent.(type) { - case *flow.ExecutionReceipt: + case *messages.ExecutionReceipt: // orchestrator received execution receipt from corrupted EN after EN executed a block. if err := o.handleExecutionReceiptEvent(event); err != nil { return fmt.Errorf("could not handle execution receipt event: %w", err) @@ -90,7 +90,7 @@ func (o *Orchestrator) HandleEgressEvent(event *insecure.EgressEvent) error { if err := o.handleChunkDataPackResponseEvent(event); err != nil { return fmt.Errorf("could not handle chunk data pack response event: %w", err) } - case *flow.ResultApproval: + case *messages.ResultApproval: // orchestrator receives a result approval from corrupted VN. If it is an approval for the original result, it should // be wintermuted, i.e., a corrupted VN must not approve any conflicting result with the corrupted result (otherwise, it // causes a sealing halt at consensus nodes). @@ -144,14 +144,14 @@ func (o *Orchestrator) corruptExecutionResult(receipt *flow.ExecutionReceipt) *f BlockID: receipt.ExecutionResult.BlockID, // replace all chunks with new ones to simulate chunk corruption Chunks: flow.ChunkList{ - unittest.ChunkFixture(receipt.ExecutionResult.BlockID, 0, unittest.WithChunkStartState(receiptStartState)), + unittest.ChunkFixture(receipt.ExecutionResult.BlockID, 0, receiptStartState), }, ServiceEvents: receipt.ExecutionResult.ServiceEvents, ExecutionDataID: receipt.ExecutionResult.ExecutionDataID, } if chunksNum > 1 { - result.Chunks = append(result.Chunks, unittest.ChunkListFixture(uint(chunksNum-1), receipt.ExecutionResult.BlockID)...) + result.Chunks = append(result.Chunks, unittest.ChunkListFixture(uint(chunksNum-1), receipt.ExecutionResult.BlockID, result.Chunks[0].EndState)...) } return result @@ -179,7 +179,7 @@ func (o *Orchestrator) handleExecutionReceiptEvent(receiptEvent *insecure.Egress return fmt.Errorf("wrong sender role for execution receipt: %s", corruptedIdentity.Role.String()) } - receipt, ok := receiptEvent.FlowProtocolEvent.(*flow.ExecutionReceipt) + receipt, ok := receiptEvent.FlowProtocolEvent.(*messages.ExecutionReceipt) if !ok { return fmt.Errorf("protocol event is not an execution receipt: %T", receiptEvent.FlowProtocolEvent) } @@ -213,12 +213,17 @@ func (o *Orchestrator) handleExecutionReceiptEvent(receiptEvent *insecure.Egress return nil } + internalMsg, err := receipt.ToInternal() + if err != nil { + lg.Fatal().Err(err).Msg("failed to convert event to internal") + } + // replace honest receipt with corrupted receipt - corruptedResult := o.corruptExecutionResult(receipt) + corruptedResult := o.corruptExecutionResult(internalMsg.(*flow.ExecutionReceipt)) corruptedExecutionIds := o.allNodeIds.Filter( - filter.And(filter.HasRole(flow.RoleExecution), - filter.HasNodeID(o.corruptedNodeIds...))).NodeIDs() + filter.And(filter.HasRole[flow.Identity](flow.RoleExecution), + filter.HasNodeID[flow.Identity](o.corruptedNodeIds...))).NodeIDs() // sends corrupted execution result to all corrupted execution nodes. for _, corruptedExecutionId := range corruptedExecutionIds { @@ -232,7 +237,7 @@ func (o *Orchestrator) handleExecutionReceiptEvent(receiptEvent *insecure.Egress TargetIds: receiptEvent.TargetIds, // wrapping execution result in an execution receipt for sake of encoding and decoding. - FlowProtocolEvent: &flow.ExecutionReceipt{ExecutionResult: *corruptedResult}, + FlowProtocolEvent: &messages.ExecutionReceipt{UnsignedExecutionReceipt: flow.UnsignedExecutionReceipt{ExecutionResult: *corruptedResult}}, }) if err != nil { return fmt.Errorf("could not send rpc on channel: %w", err) @@ -340,7 +345,7 @@ func (o *Orchestrator) handleChunkDataPackResponseEvent(chunkDataPackReplyEvent } o.logger.Debug(). Hex("corrupted_id", logging.ID(chunkDataPackReplyEvent.CorruptOriginId)). - Hex("chunk_id", logging.ID(cdpRep.ChunkDataPack.ID())). + Hex("chunk_id", logging.ID(cdpRep.ChunkDataPack.ChunkID)). Msg("chunk data pack response passed through") return nil } @@ -350,7 +355,7 @@ func (o *Orchestrator) handleChunkDataPackResponseEvent(chunkDataPackReplyEvent func (o *Orchestrator) handleResultApprovalEvent(resultApprovalEvent *insecure.EgressEvent) error { // non-nil state means a result has been corrupted, hence checking whether the approval // belongs to the chunks of the original (non-corrupted) result. - approval := resultApprovalEvent.FlowProtocolEvent.(*flow.ResultApproval) + approval := resultApprovalEvent.FlowProtocolEvent.(*messages.ResultApproval) lg := o.logger.With(). Hex("result_id", logging.ID(approval.Body.ExecutionResultID)). Uint64("chunk_index", approval.Body.ChunkIndex). @@ -394,7 +399,7 @@ func (o *Orchestrator) replyWithAttestation(chunkDataPackRequestEvent *insecure. } // sends an attestation on behalf of verification node to all consensus nodes - consensusIds := o.allNodeIds.Filter(filter.HasRole(flow.RoleConsensus)).NodeIDs() + consensusIds := o.allNodeIds.Filter(filter.HasRole[flow.Identity](flow.RoleConsensus)).NodeIDs() err = o.network.SendEgress(&insecure.EgressEvent{ CorruptOriginId: chunkDataPackRequestEvent.CorruptOriginId, Channel: channels.PushApprovals, @@ -403,7 +408,7 @@ func (o *Orchestrator) replyWithAttestation(chunkDataPackRequestEvent *insecure. TargetIds: consensusIds, // wrapping attestation in a result approval for sake of encoding and decoding. - FlowProtocolEvent: &flow.ResultApproval{Body: flow.ResultApprovalBody{Attestation: *attestation}}, + FlowProtocolEvent: &messages.ResultApproval{Body: flow.ResultApprovalBody{Attestation: *attestation}}, }) if err != nil { return false, fmt.Errorf("could not send attestation for corrupted chunk: %w", err) diff --git a/insecure/wintermute/attackOrchestrator_test.go b/insecure/wintermute/attackOrchestrator_test.go index 1c5d46f6899..bba9530d42b 100644 --- a/insecure/wintermute/attackOrchestrator_test.go +++ b/insecure/wintermute/attackOrchestrator_test.go @@ -27,13 +27,13 @@ func TestSingleExecutionReceipt(t *testing.T) { rootStateFixture, allIds, corruptedIds := bootstrapWintermuteFlowSystem(t) // identities of nodes who are expected targets of an execution receipt. - receiptTargetIds, err := rootStateFixture.State.Final().Identities(filter.HasRole(flow.RoleAccess, flow.RoleConsensus, flow.RoleVerification)) + receiptTargetIds, err := rootStateFixture.State.Final().Identities(filter.HasRole[flow.Identity](flow.RoleAccess, flow.RoleConsensus, flow.RoleVerification)) require.NoError(t, err) corruptedExecutionIds := flow.IdentifierList( allIds.Filter( - filter.And(filter.HasRole(flow.RoleExecution), - filter.HasNodeID(corruptedIds...)), + filter.And(filter.HasRole[flow.Identity](flow.RoleExecution), + filter.HasNodeID[flow.Identity](corruptedIds...)), ).NodeIDs()) eventMap, receipts := receiptsWithSameResultFixture(t, 1, corruptedExecutionIds[0:1], receiptTargetIds.NodeIDs()) @@ -140,11 +140,11 @@ func testConcurrentExecutionReceipts(t *testing.T, rootStateFixture, allIds, corruptedIds := bootstrapWintermuteFlowSystem(t) corruptedExecutionIds := flow.IdentifierList( allIds.Filter( - filter.And(filter.HasRole(flow.RoleExecution), - filter.HasNodeID(corruptedIds...)), + filter.And(filter.HasRole[flow.Identity](flow.RoleExecution), + filter.HasNodeID[flow.Identity](corruptedIds...)), ).NodeIDs()) // identities of nodes who are expected targets of an execution receipt. - receiptTargetIds, err := rootStateFixture.State.Final().Identities(filter.HasRole(flow.RoleAccess, flow.RoleConsensus, flow.RoleVerification)) + receiptTargetIds, err := rootStateFixture.State.Final().Identities(filter.HasRole[flow.Identity](flow.RoleAccess, flow.RoleConsensus, flow.RoleVerification)) require.NoError(t, err) var eventMap map[flow.Identifier]*insecure.EgressEvent @@ -250,7 +250,7 @@ func mockOrchestratorNetworkForCorruptedExecutionResult( // make sure message being sent on correct channel require.Equal(t, channels.PushReceipts, event.Channel) - corruptedResult, ok := event.FlowProtocolEvent.(*flow.ExecutionReceipt) + corruptedResult, ok := event.FlowProtocolEvent.(*messages.ExecutionReceipt) require.True(t, ok) // make sure the original uncorrupted execution receipt is NOT sent to orchestrator @@ -270,8 +270,8 @@ func TestRespondingWithCorruptedAttestation(t *testing.T) { _, allIds, corruptedIds := bootstrapWintermuteFlowSystem(t) corruptedVerIds := flow.IdentifierList( allIds.Filter( - filter.And(filter.HasRole(flow.RoleVerification), - filter.HasNodeID(corruptedIds...)), + filter.And(filter.HasRole[flow.Identity](flow.RoleVerification), + filter.HasNodeID[flow.Identity](corruptedIds...)), ).NodeIDs()) wintermuteOrchestrator := NewOrchestrator(unittest.Logger(), corruptedIds, allIds) @@ -295,7 +295,7 @@ func TestRespondingWithCorruptedAttestation(t *testing.T) { // output of orchestrator for a corrupted chunk request from a corrupted verification node // should be a result approval containing a dictated attestation. - approval, ok := event.FlowProtocolEvent.(*flow.ResultApproval) + approval, ok := event.FlowProtocolEvent.(*messages.ResultApproval) require.True(t, ok) attestation := approval.Body.Attestation @@ -351,8 +351,8 @@ func TestPassingThroughChunkDataRequests(t *testing.T) { _, allIds, corruptedIds := bootstrapWintermuteFlowSystem(t) corruptedVerIds := flow.IdentifierList( allIds.Filter( - filter.And(filter.HasRole(flow.RoleVerification), - filter.HasNodeID(corruptedIds...)), + filter.And(filter.HasRole[flow.Identity](flow.RoleVerification), + filter.HasNodeID[flow.Identity](corruptedIds...)), ).NodeIDs()) wintermuteOrchestrator := NewOrchestrator(unittest.Logger(), corruptedIds, allIds) @@ -440,7 +440,7 @@ func TestPassingThroughChunkDataResponse_WithAttack(t *testing.T) { func testPassingThroughChunkDataResponse(t *testing.T, state *attackState) { totalChunks := 10 _, allIds, corruptedIds := bootstrapWintermuteFlowSystem(t) - verIds := flow.IdentifierList(allIds.Filter(filter.HasRole(flow.RoleVerification)).NodeIDs()) + verIds := flow.IdentifierList(allIds.Filter(filter.HasRole[flow.Identity](flow.RoleVerification)).NodeIDs()) wintermuteOrchestrator := NewOrchestrator(unittest.Logger(), corruptedIds, allIds) wintermuteOrchestrator.state = state @@ -510,8 +510,8 @@ func TestWintermuteChunkResponseForCorruptedChunks(t *testing.T) { _, allIds, corruptedIds := bootstrapWintermuteFlowSystem(t) honestVnIds := flow.IdentifierList( allIds.Filter(filter.And( - filter.HasRole(flow.RoleVerification), - filter.Not(filter.HasNodeID(corruptedIds...)))).NodeIDs()) + filter.HasRole[flow.Identity](flow.RoleVerification), + filter.Not(filter.HasNodeID[flow.Identity](corruptedIds...)))).NodeIDs()) wintermuteOrchestrator := NewOrchestrator(unittest.Logger(), corruptedIds, allIds) originalResult := unittest.ExecutionResultFixture() @@ -566,7 +566,7 @@ func TestPassingThroughMiscellaneousEvents(t *testing.T) { Protocol: insecure.Protocol_MULTICAST, TargetNum: 3, TargetIds: unittest.IdentifierListFixture(10), - FlowProtocolEvent: unittest.BlockFixture(), + FlowProtocolEvent: *unittest.BlockFixture(), } eventPassThrough := &sync.WaitGroup{} @@ -643,7 +643,7 @@ func TestPassingThrough_ResultApproval(t *testing.T) { Protocol: insecure.Protocol_MULTICAST, TargetNum: 3, TargetIds: unittest.IdentifierListFixture(10), - FlowProtocolEvent: approval, + FlowProtocolEvent: (*messages.ResultApproval)(approval), } approvalPassThrough := &sync.WaitGroup{} @@ -659,7 +659,7 @@ func TestPassingThrough_ResultApproval(t *testing.T) { require.True(t, ok) // passed through event must be a result approval - _, ok = event.FlowProtocolEvent.(*flow.ResultApproval) + _, ok = event.FlowProtocolEvent.(*messages.ResultApproval) require.True(t, ok) // response must be a pass through @@ -717,9 +717,9 @@ func TestWintermute_ResultApproval(t *testing.T) { Protocol: insecure.Protocol_MULTICAST, TargetNum: 3, TargetIds: unittest.IdentifierListFixture(10), - FlowProtocolEvent: unittest.ResultApprovalFixture( + FlowProtocolEvent: (*messages.ResultApproval)(unittest.ResultApprovalFixture( unittest.WithExecutionResultID(originalResult.ID()), - unittest.WithChunk(0)), + unittest.WithChunk(0))), } // mocks orchestrator network diff --git a/insecure/wintermute/helpers.go b/insecure/wintermute/helpers.go index 3aedee317ed..8c9d201f7a9 100644 --- a/insecure/wintermute/helpers.go +++ b/insecure/wintermute/helpers.go @@ -102,7 +102,7 @@ func receiptsWithSameResultFixture( require.Equal(t, result.ID(), receipt.ExecutionResult.ID()) - event := executionReceiptEvent(receipt, targetIds) + event := executionReceiptEvent((*messages.ExecutionReceipt)(receipt), targetIds) _, ok := eventMap[receipt.ID()] require.False(t, ok) // check for duplicate receipts. @@ -117,7 +117,7 @@ func receiptsWithSameResultFixture( } // executionReceiptEvent creates the orchestrator network event of the corresponding execution receipt. -func executionReceiptEvent(receipt *flow.ExecutionReceipt, targetIds flow.IdentifierList) *insecure.EgressEvent { +func executionReceiptEvent(receipt *messages.ExecutionReceipt, targetIds flow.IdentifierList) *insecure.EgressEvent { return &insecure.EgressEvent{ CorruptOriginId: receipt.ExecutorID, Channel: channels.PushReceipts, @@ -143,7 +143,7 @@ func chunkDataPackResponseForReceipts(receipts []*flow.ExecutionReceipt, verIds } cdpRep := &messages.ChunkDataResponse{ - ChunkDataPack: *unittest.ChunkDataPackFixture(chunkId), + ChunkDataPack: flow.UntrustedChunkDataPack(*unittest.ChunkDataPackFixture(chunkId)), } chunkIds = chunkIds.Union(flow.IdentifierList{chunkId}) @@ -207,14 +207,21 @@ func orchestratorOutputSanityCheck( for _, outputEvent := range outputEvents { switch event := outputEvent.FlowProtocolEvent.(type) { - case *flow.ExecutionReceipt: + case *messages.ExecutionReceipt: if len(event.ExecutorSignature.Bytes()) != 0 { // a receipt with a non-empty signature is a pass-through receipt. // makes sure sender is a corrupted execution node. ok := corrEnIds.Contains(outputEvent.CorruptOriginId) require.True(t, ok) + + internalEvent, err := event.ToInternal() + require.NoError(t, err) + + receipt, ok := internalEvent.(*flow.ExecutionReceipt) + require.True(t, ok) + // uses union to avoid adding duplicate. - passThroughReceipts = passThroughReceipts.Union(flow.IdentifierList{event.ID()}) + passThroughReceipts = passThroughReceipts.Union(flow.IdentifierList{receipt.ID()}) } else { // a receipt with an empty signature contains a dictated result from wintermute orchestrator. // the rest of receipt will be filled by the corrupted node @@ -263,7 +270,7 @@ func receiptsWithDistinctResultFixture( for i := 0; i < count; i++ { for _, exeId := range exeIds { receipt := unittest.ExecutionReceiptFixture(unittest.WithExecutorID(exeId)) - event := executionReceiptEvent(receipt, targetIds) + event := executionReceiptEvent((*messages.ExecutionReceipt)(receipt), targetIds) _, ok := eventMap[receipt.ID()] require.False(t, ok) // checks for duplicate receipts. diff --git a/integration/Makefile b/integration/Makefile index a4f354c7e4d..26652e4ba14 100644 --- a/integration/Makefile +++ b/integration/Makefile @@ -1,6 +1,8 @@ # Name of the cover profile COVER_PROFILE := cover.out +GO_TEST_PACKAGES := `go list ./... | grep -v -e integration/tests` + # allows CI to specify whether to have race detection on / off ifeq ($(RACE_DETECTOR),1) RACE_FLAG := -race @@ -8,67 +10,101 @@ else RACE_FLAG := endif +# set `CRYPTO_FLAG` when building natively (not cross-compiling) +include ../crypto_adx_flag.mk + # Run the integration test suite .PHONY: integration-test integration-test: access-tests ghost-tests mvp-tests execution-tests verification-tests upgrades-tests collection-tests epochs-tests network-tests consensus-tests -.PHONY: ci-integration-test -ci-integration-test: access-tests ghost-tests mvp-tests epochs-tests consensus-tests execution-tests verification-tests upgrades-tests network-tests collection-tests - -############################################################################################ -# CAUTION: DO NOT MODIFY THE TARGETS BELOW! DOING SO WILL BREAK THE FLAKY TEST MONITOR -# In particular, do not skip tests by commenting them out here. - # Run unit tests for test utilities in this module .PHONY: test test: - go test $(if $(VERBOSE),-v,) -tags relic -coverprofile=$(COVER_PROFILE) $(RACE_FLAG) $(if $(JSON_OUTPUT),-json,) $(if $(NUM_RUNS),-count $(NUM_RUNS),) `go list ./... | grep -v -e integration/tests` + CGO_CFLAGS=$(CRYPTO_FLAG) go test $(if $(VERBOSE),-v,) -coverprofile=$(COVER_PROFILE) $(RACE_FLAG) $(if $(JSON_OUTPUT),-json,) $(if $(NUM_RUNS),-count $(NUM_RUNS),) $(GO_TEST_PACKAGES) .PHONY: access-tests -access-tests: - go test $(if $(VERBOSE),-v,) $(RACE_FLAG) $(if $(JSON_OUTPUT),-json,) $(if $(NUM_RUNS),-count $(NUM_RUNS),) -tags relic ./tests/access/... +access-tests: access-cohort1-tests access-cohort2-tests access-cohort3-tests access-cohort4-tests + +.PHONY: access-cohort1-tests +access-cohort1-tests: + CGO_CFLAGS=$(CRYPTO_FLAG) go test -failfast $(if $(VERBOSE),-v,) $(RACE_FLAG) $(if $(JSON_OUTPUT),-json,) $(if $(NUM_RUNS),-count $(NUM_RUNS),) ./tests/access/cohort1/... + +.PHONY: access-cohort2-tests +access-cohort2-tests: + CGO_CFLAGS=$(CRYPTO_FLAG) go test -failfast $(if $(VERBOSE),-v,) $(RACE_FLAG) $(if $(JSON_OUTPUT),-json,) $(if $(NUM_RUNS),-count $(NUM_RUNS),) ./tests/access/cohort2/... + +.PHONY: access-cohort3-tests +access-cohort3-tests: + CGO_CFLAGS=$(CRYPTO_FLAG) go test -failfast $(if $(VERBOSE),-v,) $(RACE_FLAG) $(if $(JSON_OUTPUT),-json,) $(if $(NUM_RUNS),-count $(NUM_RUNS),) ./tests/access/cohort3/... + +.PHONY: access-cohort4-tests +access-cohort4-tests: + CGO_CFLAGS=$(CRYPTO_FLAG) go test -failfast $(if $(VERBOSE),-v,) $(RACE_FLAG) $(if $(JSON_OUTPUT),-json,) $(if $(NUM_RUNS),-count $(NUM_RUNS),) ./tests/access/cohort4/... .PHONY: collection-tests collection-tests: - go test $(if $(VERBOSE),-v,) $(RACE_FLAG) $(if $(JSON_OUTPUT),-json,) $(if $(NUM_RUNS),-count $(NUM_RUNS),) -tags relic ./tests/collection/... + CGO_CFLAGS=$(CRYPTO_FLAG) go test -failfast $(if $(VERBOSE),-v,) $(RACE_FLAG) $(if $(JSON_OUTPUT),-json,) $(if $(NUM_RUNS),-count $(NUM_RUNS),) ./tests/collection/... .PHONY: consensus-tests consensus-tests: - go test $(if $(VERBOSE),-v,) $(RACE_FLAG) $(if $(JSON_OUTPUT),-json,) $(if $(NUM_RUNS),-count $(NUM_RUNS),) -tags relic ./tests/consensus/... + CGO_CFLAGS=$(CRYPTO_FLAG) go test -failfast $(if $(VERBOSE),-v,) $(RACE_FLAG) $(if $(JSON_OUTPUT),-json,) $(if $(NUM_RUNS),-count $(NUM_RUNS),) ./tests/consensus/... .PHONY: epochs-tests -epochs-tests: +epochs-tests: epochs-cohort1-tests epochs-cohort2-tests + +.PHONY: epochs-cohort1-tests +epochs-cohort1-tests: # Use a higher timeout of 20m for the suite of tests which span full epochs - go test $(if $(VERBOSE),-v,) $(RACE_FLAG) $(if $(JSON_OUTPUT),-json,) $(if $(NUM_RUNS),-count $(NUM_RUNS),) -tags relic -timeout 30m ./tests/epochs/... + CGO_CFLAGS=$(CRYPTO_FLAG) go test -failfast $(if $(VERBOSE),-v,) $(RACE_FLAG) $(if $(JSON_OUTPUT),-json,) $(if $(NUM_RUNS),-count $(NUM_RUNS),) -timeout 20m ./tests/epochs/cohort1/... + +.PHONY: epochs-cohort2-tests +epochs-cohort2-tests: + # Use a higher timeout of 20m for the suite of tests which span full epochs + CGO_CFLAGS=$(CRYPTO_FLAG) go test -failfast $(if $(VERBOSE),-v,) $(RACE_FLAG) $(if $(JSON_OUTPUT),-json,) $(if $(NUM_RUNS),-count $(NUM_RUNS),) -timeout 20m ./tests/epochs/cohort2/... .PHONY: ghost-tests ghost-tests: - go test $(if $(VERBOSE),-v,) $(RACE_FLAG) $(if $(JSON_OUTPUT),-json,) $(if $(NUM_RUNS),-count $(NUM_RUNS),) -tags relic ./tests/ghost/... + CGO_CFLAGS=$(CRYPTO_FLAG) go test -failfast $(if $(VERBOSE),-v,) $(RACE_FLAG) $(if $(JSON_OUTPUT),-json,) $(if $(NUM_RUNS),-count $(NUM_RUNS),) ./tests/ghost/... .PHONY: mvp-tests mvp-tests: - go test $(if $(VERBOSE),-v,) $(RACE_FLAG) $(if $(JSON_OUTPUT),-json,) $(if $(NUM_RUNS),-count $(NUM_RUNS),) -tags relic ./tests/mvp/... + CGO_CFLAGS=$(CRYPTO_FLAG) go test -failfast $(if $(VERBOSE),-v,) $(RACE_FLAG) $(if $(JSON_OUTPUT),-json,) $(if $(NUM_RUNS),-count $(NUM_RUNS),) ./tests/mvp/... .PHONY: execution-tests execution-tests: - go test $(if $(VERBOSE),-v,) $(RACE_FLAG) $(if $(JSON_OUTPUT),-json,) $(if $(NUM_RUNS),-count $(NUM_RUNS),) -tags relic ./tests/execution/... + CGO_CFLAGS=$(CRYPTO_FLAG) go test -failfast $(if $(VERBOSE),-v,) $(RACE_FLAG) $(if $(JSON_OUTPUT),-json,) $(if $(NUM_RUNS),-count $(NUM_RUNS),) ./tests/execution/... .PHONY: verification-tests verification-tests: - go test $(if $(VERBOSE),-v,) $(RACE_FLAG) $(if $(JSON_OUTPUT),-json,) $(if $(NUM_RUNS),-count $(NUM_RUNS),) -tags relic ./tests/verification/... + CGO_CFLAGS=$(CRYPTO_FLAG) go test -failfast $(if $(VERBOSE),-v,) $(RACE_FLAG) $(if $(JSON_OUTPUT),-json,) $(if $(NUM_RUNS),-count $(NUM_RUNS),) ./tests/verification/... +# upgrades-tests tests need to be run sequentially (-p 1) due to interference between different Docker networks when tests are run in parallel .PHONY: upgrades-tests upgrades-tests: - go test $(if $(VERBOSE),-v,) $(RACE_FLAG) $(if $(JSON_OUTPUT),-json,) $(if $(NUM_RUNS),-count $(NUM_RUNS),) -tags relic ./tests/upgrades/... + CGO_CFLAGS=$(CRYPTO_FLAG) go test -failfast $(if $(VERBOSE),-v,) $(RACE_FLAG) $(if $(JSON_OUTPUT),-json,) $(if $(NUM_RUNS),-count $(NUM_RUNS),) ./tests/upgrades/... -p 1 .PHONY: network-tests network-tests: - go test $(if $(VERBOSE),-v,) $(RACE_FLAG) $(if $(JSON_OUTPUT),-json,) $(if $(NUM_RUNS),-count $(NUM_RUNS),) -tags relic ./tests/network/... + CGO_CFLAGS=$(CRYPTO_FLAG) go test -failfast $(if $(VERBOSE),-v,) $(RACE_FLAG) $(if $(JSON_OUTPUT),-json,) $(if $(NUM_RUNS),-count $(NUM_RUNS),) ./tests/network/... # BFT tests need to be run sequentially (-p 1) due to interference between different Docker networks when tests are run in parallel +.PHONY: bft-framework-tests +bft-framework-tests: + CGO_CFLAGS=$(CRYPTO_FLAG) go test -failfast $(if $(VERBOSE),-v,) $(RACE_FLAG) $(if $(JSON_OUTPUT),-json,) $(if $(NUM_RUNS),-count $(NUM_RUNS),) ./tests/bft/framework/... -p 1 +.PHONY: bft-protocol-tests +bft-protocol-tests: + CGO_CFLAGS=$(CRYPTO_FLAG) go test -failfast $(if $(VERBOSE),-v,) $(RACE_FLAG) $(if $(JSON_OUTPUT),-json,) $(if $(NUM_RUNS),-count $(NUM_RUNS),) ./tests/bft/protocol/... -p 1 +.PHONY: bft-gossipsub-tests +bft-gossipsub-tests: + CGO_CFLAGS=$(CRYPTO_FLAG) go test -failfast $(if $(VERBOSE),-v,) $(RACE_FLAG) $(if $(JSON_OUTPUT),-json,) $(if $(NUM_RUNS),-count $(NUM_RUNS),) ./tests/bft/gossipsub/... -p 1 + .PHONY: bft-tests -bft-tests: - go test $(if $(VERBOSE),-v,) $(RACE_FLAG) $(if $(JSON_OUTPUT),-json,) $(if $(NUM_RUNS),-count $(NUM_RUNS),) -tags relic ./tests/bft/... -p 1 +bft-tests: bft-framework-tests bft-protocol-tests bft-gossipsub-tests +.PHONY: lint +lint: + ../tools/custom-gcl run -v ./... -############################################################################################ +.PHONY: fix-lint +fix-lint: + ../tools/custom-gcl run -v --fix ./... diff --git a/integration/README.md b/integration/README.md index b6b59f4fa82..8b479f06477 100644 --- a/integration/README.md +++ b/integration/README.md @@ -14,15 +14,15 @@ Since the test cases run docker instances as a network of nodes, we need to ensu To ensure the latest docker images have been built, you can run: ``` -make docker-build-access -make docker-build-collection -make docker-build-consensus -make docker-build-execution -make docker-build-verification -make docker-build-ghost +make docker-native-build-access +make docker-native-build-collection +make docker-native-build-consensus +make docker-native-build-execution +make docker-native-build-verification +make docker-native-build-ghost ``` -Or simply run `make docker-build-flow` +Or simply run `make docker-native-build-flow` After images have been built, we can run the integration tests: ``` @@ -65,11 +65,11 @@ Because launching a full execution node in the consensus integration tests will ### Rebuild image when debugging During test cases debugging, you might want to update some code. However, if you run `make integration-test` after updating the code, the new change will not be included, because the integration tests still use the old code from the docker image, which was built before adding the changes. -So you need to rebuild all the images by running `make docker-build-flow` again before re-running the integration tests. +So you need to rebuild all the images by running `make docker-native-build-flow` again before re-running the integration tests. Rebuilding all images takes quite some time, here is a shortcut: -If consensus's code was changed, then only consensus's image need to be rebuilt, so simply run `make docker-build-consensus` instead of rebuilding all the images. +If consensus's code was changed, then only consensus's image need to be rebuilt, so simply run `make docker-native-build-consensus` instead of rebuilding all the images. ### Organization @@ -81,4 +81,4 @@ in the Makefile. To send random transactions, for example to load test a network, run `cd integration/localnet; make load`. -In order to build a docker container with the benchmarking binary, run `make docker-build-loader` from the root of this repository. +In order to build a docker container with the benchmarking binary, run `make docker-native-build-loader` from the root of this repository. diff --git a/integration/benchmark/account/account.go b/integration/benchmark/account/account.go index 81a938d93b3..9cce1304056 100644 --- a/integration/benchmark/account/account.go +++ b/integration/benchmark/account/account.go @@ -12,62 +12,70 @@ import ( ) type FlowAccount struct { - Address *flowsdk.Address - ID int - - keys *keystore + Address flowsdk.Address + keys *keystore + PrivateKey crypto.PrivateKey + HashAlgo crypto.HashAlgorithm } -func New(i int, address *flowsdk.Address, privKey crypto.PrivateKey, accountKeys []*flowsdk.AccountKey) (*FlowAccount, error) { - keys := make([]*accountKey, 0, len(accountKeys)) +func New( + address flowsdk.Address, + privateKey crypto.PrivateKey, + hashAlgo crypto.HashAlgorithm, + accountKeys []flowsdk.AccountKey, +) (*FlowAccount, error) { + keys := make([]*AccountKey, 0, len(accountKeys)) for _, key := range accountKeys { - signer, err := crypto.NewInMemorySigner(privKey, key.HashAlgo) + // signer are not thread safe, so we need to create a new signer for each key + signer, err := crypto.NewInMemorySigner(privateKey, hashAlgo) if err != nil { - return nil, fmt.Errorf("error while creating signer: %w", err) + return nil, fmt.Errorf("error while creating in-memory signer: %w", err) } - keys = append(keys, &accountKey{ - AccountKey: *key, + keys = append(keys, &AccountKey{ + AccountKey: key, Address: address, Signer: signer, }) } return &FlowAccount{ - Address: address, - ID: i, - keys: newKeystore(keys), + Address: address, + keys: newKeystore(keys), + PrivateKey: privateKey, + HashAlgo: hashAlgo, }, nil } -func LoadServiceAccount( +func LoadAccount( ctx context.Context, flowClient access.Client, - servAccAddress *flowsdk.Address, - servAccPrivKeyHex string, + address flowsdk.Address, + privateKey crypto.PrivateKey, + hashAlgo crypto.HashAlgorithm, ) (*FlowAccount, error) { - acc, err := flowClient.GetAccount(ctx, *servAccAddress) + acc, err := flowClient.GetAccount(ctx, address) if err != nil { - return nil, fmt.Errorf("error while calling get account for service account: %w", err) + return nil, fmt.Errorf("error while calling get account for account %s: %w", address, err) } - privateKey, err := crypto.DecodePrivateKeyHex(acc.Keys[0].SigAlgo, servAccPrivKeyHex) - if err != nil { - return nil, fmt.Errorf("error while decoding serice account private key hex: %w", err) + keys := make([]flowsdk.AccountKey, len(acc.Keys)) + for i, key := range acc.Keys { + keys[i] = *key } - return New(0, servAccAddress, privateKey, acc.Keys) + return New(address, privateKey, hashAlgo, keys) } func (acc *FlowAccount) NumKeys() int { return acc.keys.Size() } -func (acc *FlowAccount) GetKey() (*accountKey, error) { +func (acc *FlowAccount) GetKey() (*AccountKey, error) { return acc.keys.getKey() } -// randomPrivateKey returns a randomly generated ECDSA P-256 private key. +// RandomPrivateKey returns a randomly generated ECDSA P-256 private key. func RandomPrivateKey() crypto.PrivateKey { seed := make([]byte, crypto.MinSeedLength) diff --git a/integration/benchmark/account/account_loader.go b/integration/benchmark/account/account_loader.go new file mode 100644 index 00000000000..595cabfaf67 --- /dev/null +++ b/integration/benchmark/account/account_loader.go @@ -0,0 +1,64 @@ +package account + +import ( + "context" + + "github.com/rs/zerolog" + + flowsdk "github.com/onflow/flow-go-sdk" + "github.com/onflow/flow-go-sdk/access" + "github.com/onflow/flow-go-sdk/crypto" +) + +type Loader interface { + Load( + address flowsdk.Address, + privateKey crypto.PrivateKey, + hashAlgo crypto.HashAlgorithm, + ) (*FlowAccount, error) +} + +type ClientAccountLoader struct { + log zerolog.Logger + ctx context.Context + flowClient access.Client +} + +func NewClientAccountLoader( + log zerolog.Logger, + ctx context.Context, + flowClient access.Client, +) *ClientAccountLoader { + return &ClientAccountLoader{ + log: log.With().Str("component", "account_loader").Logger(), + ctx: ctx, + flowClient: flowClient, + } +} + +func (c *ClientAccountLoader) Load( + address flowsdk.Address, + privateKey crypto.PrivateKey, + hashAlgo crypto.HashAlgorithm, +) (*FlowAccount, error) { + acc, err := LoadAccount(c.ctx, c.flowClient, address, privateKey, hashAlgo) + + c.log.Debug(). + Str("address", address.String()). + Int("keys", acc.NumKeys()). + Msg("Loaded account") + + return acc, err +} + +func ReloadAccount(c Loader, acc *FlowAccount) error { + newAcc, err := c.Load(acc.Address, acc.PrivateKey, acc.HashAlgo) + if err != nil { + return err + } + + acc.keys = newAcc.keys + return nil +} + +var _ Loader = (*ClientAccountLoader)(nil) diff --git a/integration/benchmark/account/account_provider.go b/integration/benchmark/account/account_provider.go new file mode 100644 index 00000000000..d46ecd42dbe --- /dev/null +++ b/integration/benchmark/account/account_provider.go @@ -0,0 +1,249 @@ +package account + +import ( + "context" + "errors" + "fmt" + + "github.com/onflow/cadence" + "github.com/rs/zerolog" + "golang.org/x/sync/errgroup" + + flowsdk "github.com/onflow/flow-go-sdk" + + "github.com/onflow/flow-go/module/util" + + "github.com/onflow/flow-go-sdk/crypto" + + "github.com/onflow/flow-go/fvm/blueprints" + "github.com/onflow/flow-go/fvm/systemcontracts" + "github.com/onflow/flow-go/integration/benchmark/common" + "github.com/onflow/flow-go/integration/benchmark/scripts" + "github.com/onflow/flow-go/model/flow" +) + +var ErrNoAccountsAvailable = errors.New("no accounts available") + +type AccountProvider interface { + // BorrowAvailableAccount borrows an account from the account provider. + // It doesn't block. + // If no account is available, it returns ErrNoAccountsAvailable. + BorrowAvailableAccount() (*FlowAccount, error) + // ReturnAvailableAccount returns an account to the account provider, so it can be reused. + ReturnAvailableAccount(*FlowAccount) + // GetAddresses returns the addresses of the first n available accounts. + GetAddresses(uint) ([]flowsdk.Address, error) +} + +type provider struct { + log zerolog.Logger + availableAccounts chan *FlowAccount + numberOfAccounts int + accountCreationBatchSize int +} + +var _ AccountProvider = (*provider)(nil) + +func (p *provider) BorrowAvailableAccount() (*FlowAccount, error) { + select { + case account := <-p.availableAccounts: + return account, nil + default: + return nil, ErrNoAccountsAvailable + } +} + +func (p *provider) ReturnAvailableAccount(account *FlowAccount) { + select { + case p.availableAccounts <- account: + default: + } +} + +func (p *provider) GetAddresses(u uint) ([]flowsdk.Address, error) { + addresses := make([]flowsdk.Address, 0, u) + for i := uint(0); i < u; i++ { + select { + case account := <-p.availableAccounts: + addresses = append(addresses, account.Address) + p.availableAccounts <- account + default: + return addresses, ErrNoAccountsAvailable + } + } + return addresses, nil +} + +func SetupProvider( + log zerolog.Logger, + ctx context.Context, + numberOfAccounts int, + fundAmount uint64, + rb common.ReferenceBlockProvider, + creator *FlowAccount, + sender common.TransactionSender, + chain flow.Chain, +) (AccountProvider, error) { + p := &provider{ + log: log.With().Str("component", "AccountProvider").Logger(), + availableAccounts: make(chan *FlowAccount, numberOfAccounts), + numberOfAccounts: numberOfAccounts, + accountCreationBatchSize: 25, + } + + err := p.init(ctx, fundAmount, rb, creator, sender, chain) + if err != nil { + return nil, fmt.Errorf("failed to initialize account provider: %w", err) + } + + return p, nil +} + +func (p *provider) init( + ctx context.Context, + fundAmount uint64, + rb common.ReferenceBlockProvider, + creator *FlowAccount, + sender common.TransactionSender, + chain flow.Chain, +) error { + g, ctx := errgroup.WithContext(ctx) + g.SetLimit(creator.NumKeys()) + + progress := util.LogProgress(p.log, + util.DefaultLogProgressConfig( + "creating accounts", + p.numberOfAccounts, + )) + + p.log.Info(). + Int("number_of_accounts", p.numberOfAccounts). + Int("account_creation_batch_size", p.accountCreationBatchSize). + Int("number_of_keys", creator.NumKeys()). + Msg("creating accounts") + + for i := 0; i < p.numberOfAccounts; i += p.accountCreationBatchSize { + i := i + g.Go(func() error { + select { + case <-ctx.Done(): + return nil + default: + } + + num := p.accountCreationBatchSize + if i+p.accountCreationBatchSize > p.numberOfAccounts { + num = p.numberOfAccounts - i + } + + defer func() { progress(num) }() + + err := p.createAccountBatch(num, fundAmount, rb, creator, sender, chain) + if err != nil { + p.log. + Err(err). + Int("batch_size", num). + Int("index", i). + Msg("error creating accounts") + return err + } + + return nil + }) + } + err := g.Wait() + if err != nil { + return fmt.Errorf("error creating accounts: %w", err) + } + return nil +} + +func (p *provider) createAccountBatch( + num int, + fundAmount uint64, + rb common.ReferenceBlockProvider, + creator *FlowAccount, + sender common.TransactionSender, + chain flow.Chain, +) error { + wrapErr := func(err error) error { + return fmt.Errorf("error in create accounts: %w", err) + } + + privKey := RandomPrivateKey() + accountKey := flowsdk.NewAccountKey(). + FromPrivateKey(privKey). + SetHashAlgo(crypto.SHA3_256). + SetWeight(flowsdk.AccountKeyWeightThreshold) + + sc := systemcontracts.SystemContractsForChain(chain.ChainID()) + + // Generate an account creation script + createAccountTx := flowsdk.NewTransaction(). + SetScript(scripts.CreateAccountsTransaction( + flowsdk.BytesToAddress(sc.FungibleToken.Address.Bytes()), + flowsdk.BytesToAddress(sc.FlowToken.Address.Bytes()))). + SetReferenceBlockID(rb.ReferenceBlockID()) + + publicKey := blueprints.BytesToCadenceArray(accountKey.PublicKey.Encode()) + count := cadence.NewInt(num) + + initialTokenAmount := cadence.UFix64(fundAmount) + + err := createAccountTx.AddArgument(publicKey) + if err != nil { + return wrapErr(err) + } + + err = createAccountTx.AddArgument(count) + if err != nil { + return wrapErr(err) + } + + err = createAccountTx.AddArgument(initialTokenAmount) + if err != nil { + return wrapErr(err) + } + + key, err := creator.GetKey() + if err != nil { + return wrapErr(err) + } + defer key.Done() + + err = key.SetProposerPayerAndSign(createAccountTx) + if err != nil { + return wrapErr(err) + } + + result, err := sender.Send(createAccountTx) + if err == nil || errors.Is(err, common.TransactionError{}) { + key.IncrementSequenceNumber() + } + if err != nil { + return wrapErr(err) + } + + var accountsCreated int + for _, event := range result.Events { + if event.Type != flowsdk.EventAccountCreated { + continue + } + + accountCreatedEvent := flowsdk.AccountCreatedEvent(event) + accountAddress := accountCreatedEvent.Address() + + newAcc, err := New(accountAddress, privKey, crypto.SHA3_256, []flowsdk.AccountKey{*accountKey}) + if err != nil { + return fmt.Errorf("failed to create account: %w", err) + } + accountsCreated++ + + p.availableAccounts <- newAcc + } + if accountsCreated != num { + return fmt.Errorf("failed to create enough contracts, expected: %d, created: %d", + num, accountsCreated) + } + return nil +} diff --git a/integration/benchmark/account/keys.go b/integration/benchmark/account/keys.go index 82d90e59c81..e2cf568c300 100644 --- a/integration/benchmark/account/keys.go +++ b/integration/benchmark/account/keys.go @@ -1,9 +1,17 @@ package account import ( + "errors" "fmt" "sync" + "github.com/onflow/cadence" + "github.com/rs/zerolog" + + "github.com/onflow/flow-go/fvm/blueprints" + "github.com/onflow/flow-go/integration/benchmark/common" + "github.com/onflow/flow-go/integration/benchmark/scripts" + flowsdk "github.com/onflow/flow-go-sdk" "github.com/onflow/flow-go-sdk/crypto" @@ -11,25 +19,25 @@ import ( var ErrNoKeysAvailable = fmt.Errorf("no keys available") -type accountKey struct { +type AccountKey struct { flowsdk.AccountKey mu sync.Mutex ks *keystore - Address *flowsdk.Address - Signer crypto.InMemorySigner + Address flowsdk.Address + Signer crypto.Signer inuse bool } type keystore struct { - availableKeys chan *accountKey + availableKeys chan *AccountKey size int } -func newKeystore(keys []*accountKey) *keystore { +func newKeystore(keys []*AccountKey) *keystore { ks := &keystore{} - availableKeys := make(chan *accountKey, len(keys)) + availableKeys := make(chan *AccountKey, len(keys)) for _, key := range keys { key.ks = ks availableKeys <- key @@ -44,7 +52,7 @@ func (k *keystore) Size() int { return k.size } -func (k *keystore) getKey() (*accountKey, error) { +func (k *keystore) getKey() (*AccountKey, error) { select { case key := <-k.availableKeys: key.mu.Lock() @@ -61,7 +69,7 @@ func (k *keystore) getKey() (*accountKey, error) { } } -func (k *accountKey) markUnused() { +func (k *AccountKey) markUnused() { k.mu.Lock() defer k.mu.Unlock() @@ -69,14 +77,14 @@ func (k *accountKey) markUnused() { } // Done unlocks a key after use and puts it back into the pool. -func (k *accountKey) Done() { +func (k *AccountKey) Done() { k.markUnused() k.ks.availableKeys <- k } // IncrementSequenceNumber is called when a key was successfully used to sign a transaction as the proposer. // It increments the sequence number. -func (k *accountKey) IncrementSequenceNumber() { +func (k *AccountKey) IncrementSequenceNumber() { k.mu.Lock() defer k.mu.Unlock() @@ -86,17 +94,93 @@ func (k *accountKey) IncrementSequenceNumber() { k.SequenceNumber++ } -func (k *accountKey) SignPayload(tx *flowsdk.Transaction) error { - return tx.SignPayload(*k.Address, k.Index, k.Signer) +func (k *AccountKey) SignPayload(tx *flowsdk.Transaction) error { + return tx.SignPayload(k.Address, k.Index, k.Signer) } -func (k *accountKey) SignTx(tx *flowsdk.Transaction) error { +func (k *AccountKey) SetProposerPayerAndSign(tx *flowsdk.Transaction) error { if len(tx.Authorizers) == 0 { - tx = tx.AddAuthorizer(*k.Address) + tx = tx.AddAuthorizer(k.Address) } return tx. - SetProposalKey(*k.Address, k.Index, k.SequenceNumber). - SetPayer(*k.Address). - SignEnvelope(*k.Address, k.Index, k.Signer) + SetProposalKey(k.Address, k.Index, k.SequenceNumber). + SetPayer(k.Address). + SignEnvelope(k.Address, k.Index, k.Signer) +} + +func EnsureAccountHasKeys( + log zerolog.Logger, + account *FlowAccount, + num int, + referenceBlockProvider common.ReferenceBlockProvider, + sender common.TransactionSender, +) error { + if account.NumKeys() >= num { + return nil + } + + numberOfKeysToAdd := num - account.NumKeys() + + return AddKeysToAccount(log, account, numberOfKeysToAdd, referenceBlockProvider, sender) +} + +func AddKeysToAccount( + log zerolog.Logger, + account *FlowAccount, + numberOfKeysToAdd int, + referenceBlockProvider common.ReferenceBlockProvider, + sender common.TransactionSender, +) error { + log.Debug(). + Int("number_of_keys_to_add", numberOfKeysToAdd). + Str("account", account.Address.String()). + Msg("adding keys to account") + + key, err := account.GetKey() + if err != nil { + return err + } + defer key.Done() + + wrapErr := func(err error) error { + return fmt.Errorf("error adding keys to account %s: %w", account.Address, err) + } + accountKeys := make([]flowsdk.AccountKey, numberOfKeysToAdd) + for i := 0; i < numberOfKeysToAdd; i++ { + accountKey := key.AccountKey + accountKey.Index = uint32(i + account.NumKeys()) + accountKey.SequenceNumber = 0 + accountKeys[i] = accountKey + } + + cadenceKeys := make([]cadence.Value, numberOfKeysToAdd) + for i := 0; i < numberOfKeysToAdd; i++ { + cadenceKeys[i] = blueprints.BytesToCadenceArray(accountKeys[i].PublicKey.Encode()) + } + cadenceKeysArray := cadence.NewArray(cadenceKeys) + + addKeysTx := flowsdk.NewTransaction(). + SetScript(scripts.AddKeysToAccountTransaction). + SetReferenceBlockID(referenceBlockProvider.ReferenceBlockID()) + + err = addKeysTx.AddArgument(cadenceKeysArray) + if err != nil { + return err + } + + err = key.SetProposerPayerAndSign(addKeysTx) + if err != nil { + return wrapErr(err) + } + + _, err = sender.Send(addKeysTx) + if err == nil || errors.Is(err, common.TransactionError{}) { + key.IncrementSequenceNumber() + } + if err != nil { + return wrapErr(err) + } + + return nil } diff --git a/integration/benchmark/cmd/ci/adjuster.go b/integration/benchmark/adjuster.go similarity index 79% rename from integration/benchmark/cmd/ci/adjuster.go rename to integration/benchmark/adjuster.go index ed96d9b53ab..5e05eac7d3f 100644 --- a/integration/benchmark/cmd/ci/adjuster.go +++ b/integration/benchmark/adjuster.go @@ -1,17 +1,17 @@ -package main +package benchmark import ( "context" + "errors" "fmt" + "math" "time" "github.com/rs/zerolog" "go.einride.tech/pid" - - "github.com/onflow/flow-go/integration/benchmark" ) -type adjuster struct { +type Adjuster struct { ctx context.Context cancel context.CancelFunc done chan struct{} @@ -19,8 +19,8 @@ type adjuster struct { controller *pid.Controller params AdjusterParams - lg *benchmark.ContLoadGenerator - workerStatsTracker *benchmark.WorkerStatsTracker + lg *ContLoadGenerator + workerStatsTracker *WorkerStatsTracker log zerolog.Logger } type AdjusterParams struct { @@ -38,18 +38,18 @@ type adjusterState struct { executed uint timedout uint - targetTPS uint + targetTPS float64 } func NewTPSAdjuster( ctx context.Context, log zerolog.Logger, - lg *benchmark.ContLoadGenerator, - workerStatsTracker *benchmark.WorkerStatsTracker, + lg *ContLoadGenerator, + workerStatsTracker *WorkerStatsTracker, params AdjusterParams, -) *adjuster { +) *Adjuster { ctx, cancel := context.WithCancel(ctx) - a := &adjuster{ + a := &Adjuster{ ctx: ctx, cancel: cancel, done: make(chan struct{}), @@ -77,34 +77,34 @@ func NewTPSAdjuster( go func() { defer close(a.done) - log.Info().Dur("delayInMS", params.Delay).Msg("Waiting before starting TPS adjuster") + log.Info().Dur("delayInMS", params.Delay).Msg("Waiting before starting TPS Adjuster") select { case <-time.After(params.Delay): - log.Info().Msg("starting TPS adjuster") + log.Info().Msg("starting TPS Adjuster") case <-ctx.Done(): return } err := a.adjustTPSForever() - if err != nil && err != context.Canceled { - log.Error().Err(err).Msg("adjuster failed") + if err != nil && !errors.Is(err, context.Canceled) { + log.Error().Err(err).Msg("Adjuster failed") } }() return a } -func (a *adjuster) Stop() { +func (a *Adjuster) Stop() { a.cancel() <-a.done } -func (a *adjuster) adjustTPSForever() (err error) { +func (a *Adjuster) adjustTPSForever() (err error) { initialStats := a.workerStatsTracker.GetStats() lastState := adjusterState{ timestamp: time.Now(), tps: 0, - targetTPS: a.params.InitialTPS, + targetTPS: float64(a.params.InitialTPS), executed: uint(initialStats.TxsExecuted), timedout: uint(initialStats.TxsTimedout), } @@ -133,7 +133,7 @@ func (a *adjuster) adjustTPSForever() (err error) { // compared to the last round. // // Target TPS is always bounded by [minTPS, maxTPS]. -func (a *adjuster) adjustOnce(nowTs time.Time, lastState adjusterState) (adjusterState, error) { +func (a *Adjuster) adjustOnce(nowTs time.Time, lastState adjusterState) (adjusterState, error) { timeDiff := nowTs.Sub(lastState.timestamp) currentStats := a.workerStatsTracker.GetStats() @@ -146,25 +146,26 @@ func (a *adjuster) adjustOnce(nowTs time.Time, lastState adjusterState) (adjuste ratio := 1. + a.controller.State.ControlSignal targetInflight := inflight * ratio - unboundedTPS := uint(float64(lastState.targetTPS) * ratio) + unboundedTPS := float64(lastState.targetTPS) * ratio boundedTPS := boundTPS(unboundedTPS, a.params.MinTPS, a.params.MaxTPS) + roundedTPS := uint(math.Round(boundedTPS)) // number of timed out transactions in the last interval txsTimedout := currentStats.TxsTimedout - int(lastState.timedout) currentTPS := float64(currentStats.TxsExecuted-int(lastState.executed)) / timeDiff.Seconds() a.log.Info(). - Uint("lastTargetTPS", lastState.targetTPS). + Float64("lastTargetTPS", lastState.targetTPS). Float64("lastTPS", lastState.tps). Float64("currentTPS", currentTPS). - Uint("unboundedTPS", unboundedTPS). - Uint("targetTPS", boundedTPS). + Float64("unboundedTPS", unboundedTPS). + Uint("targetTPS", roundedTPS). Interface("pid", a.controller.State). Float64("targetInflight", targetInflight). Float64("inflight", inflight). Int("txsTimedout", txsTimedout). Msg("adjusting TPS") - err := a.lg.SetTPS(boundedTPS) + err := a.lg.SetTPS(roundedTPS) if err != nil { return lastState, fmt.Errorf("unable to set tps: %w", err) } @@ -179,12 +180,12 @@ func (a *adjuster) adjustOnce(nowTs time.Time, lastState adjusterState) (adjuste }, nil } -func boundTPS(tps, min, max uint) uint { +func boundTPS(tps float64, min, max uint) float64 { switch { - case tps < min: - return min - case tps > max: - return max + case tps < float64(min): + return float64(min) + case tps > float64(max): + return float64(max) default: return tps } diff --git a/integration/benchmark/cmd/ci/main.go b/integration/benchmark/cmd/ci/main.go index adab61e1f4c..7cee95ee32d 100644 --- a/integration/benchmark/cmd/ci/main.go +++ b/integration/benchmark/cmd/ci/main.go @@ -3,23 +3,24 @@ package main import ( "context" "flag" - "net" "os" "strings" "time" - "github.com/prometheus/client_golang/prometheus" - "github.com/rs/zerolog" "google.golang.org/grpc" "google.golang.org/grpc/credentials/insecure" + "gopkg.in/yaml.v3" + + "github.com/prometheus/client_golang/prometheus" flowsdk "github.com/onflow/flow-go-sdk" "github.com/onflow/flow-go-sdk/access" client "github.com/onflow/flow-go-sdk/access/grpc" "github.com/onflow/flow-go/integration/benchmark" - pb "github.com/onflow/flow-go/integration/benchmark/proto" + "github.com/onflow/flow-go/integration/benchmark/load" + "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/module/metrics" "github.com/onflow/flow-go/utils/unittest" ) @@ -30,10 +31,9 @@ type BenchmarkInfo struct { // Hardcoded CI values const ( - loadType = "token-transfer" + defaultLoadType = load.TokenTransferLoadType metricport = uint(8080) accessNodeAddress = "127.0.0.1:4001" - pushgateway = "127.0.0.1:9091" accountMultiplier = 50 feedbackEnabled = true serviceAccountPrivateKeyHex = unittest.ServiceAccountPrivateKeyHex @@ -42,37 +42,43 @@ const ( defaultMetricCollectionInterval = 20 * time.Second // gRPC constants - defaultMaxMsgSize = 1024 * 1024 * 16 // 16 MB - defaultGRPCAddress = "127.0.0.1:4777" + defaultMaxMsgSize = 1024 * 1024 * 16 // 16 MB ) func main() { logLvl := flag.String("log-level", "info", "set log level") // CI relevant flags - grpcAddressFlag := flag.String("grpc-address", defaultGRPCAddress, "listen address for gRPC server") initialTPSFlag := flag.Int("tps-initial", 10, "starting transactions per second") maxTPSFlag := flag.Int("tps-max", *initialTPSFlag, "maximum transactions per second allowed") minTPSFlag := flag.Int("tps-min", *initialTPSFlag, "minimum transactions per second allowed") + loadTypeFlag := flag.String("load-type", string(defaultLoadType), "load type (token-transfer / const-exec / evm) from the load config file") + loadConfigFileLocationFlag := flag.String("load-config", "", "load config file location. If not provided, default config will be used.") + adjustIntervalFlag := flag.Duration("tps-adjust-interval", defaultAdjustInterval, "interval for adjusting TPS") adjustDelayFlag := flag.Duration("tps-adjust-delay", 120*time.Second, "delay before adjusting TPS") - statIntervalFlag := flag.Duration("stat-interval", defaultMetricCollectionInterval, "") durationFlag := flag.Duration("duration", 10*time.Minute, "test duration") + + statIntervalFlag := flag.Duration("stat-interval", defaultMetricCollectionInterval, "") gitRepoPathFlag := flag.String("git-repo-path", "../..", "git repo path of the filesystem") gitRepoURLFlag := flag.String("git-repo-url", "https://github.com/onflow/flow-go.git", "git repo URL") bigQueryUpload := flag.Bool("bigquery-upload", true, "whether to upload results to BigQuery (true / false)") - bigQueryProjectFlag := flag.String("bigquery-project", "dapperlabs-data", "project name for the bigquery uploader") - bigQueryDatasetFlag := flag.String("bigquery-dataset", "dev_src_flow_tps_metrics", "dataset name for the bigquery uploader") + pushgateway := flag.String("pushgateway", "disabled", "host:port for pushgateway") + bigQueryProjectFlag := flag.String("bigquery-project", "ff-data-platform", "project name for the bigquery uploader") + bigQueryDatasetFlag := flag.String("bigquery-dataset", "dev_src_flow_performance_metrics", "dataset name for the bigquery uploader") bigQueryRawTableFlag := flag.String("bigquery-raw-table", "rawResults", "table name for the bigquery raw results") flag.Parse() - // parse log level and apply to logger - log := zerolog.New(os.Stderr).With().Timestamp().Logger().Output(zerolog.ConsoleWriter{Out: os.Stderr}) - lvl, err := zerolog.ParseLevel(strings.ToLower(*logLvl)) - if err != nil { - log.Fatal().Err(err).Str("strLevel", *logLvl).Msg("invalid log level") - } - log = log.Level(lvl) + log := setupLogger(logLvl) + + loadConfig := getLoadConfig( + log, + *loadConfigFileLocationFlag, + *loadTypeFlag, + *minTPSFlag, + *maxTPSFlag, + *initialTPSFlag, + ) if *gitRepoPathFlag == "" { flag.PrintDefaults() @@ -86,29 +92,11 @@ func main() { <-server.Ready() loaderMetrics := metrics.NewLoaderCollector() - grpcServerOptions := []grpc.ServerOption{ - grpc.MaxRecvMsgSize(defaultMaxMsgSize), - grpc.MaxSendMsgSize(defaultMaxMsgSize), - } - grpcServer := grpc.NewServer(grpcServerOptions...) - defer grpcServer.Stop() - - pb.RegisterBenchmarkServer(grpcServer, &benchmarkServer{}) - - grpcListener, err := net.Listen("tcp", *grpcAddressFlag) - if err != nil { - log.Fatal().Err(err).Str("address", *grpcAddressFlag).Msg("failed to listen") + if *pushgateway != "disabled" { + sp := benchmark.NewStatsPusher(ctx, log, *pushgateway, "loader", prometheus.DefaultGatherer) + defer sp.Stop() } - go func() { - if err := grpcServer.Serve(grpcListener); err != nil { - log.Fatal().Err(err).Msg("failed to serve") - } - }() - - sp := benchmark.NewStatsPusher(ctx, log, pushgateway, "loader", prometheus.DefaultGatherer) - defer sp.Stop() - addressGen := flowsdk.NewAddressGenerator(flowsdk.Emulator) serviceAccountAddress := addressGen.NextAddress() fungibleTokenAddress := addressGen.NextAddress() @@ -121,11 +109,13 @@ func main() { flowClient, err := client.NewClient( accessNodeAddress, - grpc.WithDefaultCallOptions( - grpc.MaxCallRecvMsgSize(defaultMaxMsgSize), - grpc.MaxCallSendMsgSize(defaultMaxMsgSize), + client.WithGRPCDialOptions( + grpc.WithDefaultCallOptions( + grpc.MaxCallRecvMsgSize(defaultMaxMsgSize), + grpc.MaxCallSendMsgSize(defaultMaxMsgSize), + ), + grpc.WithTransportCredentials(insecure.NewCredentials()), ), - grpc.WithTransportCredentials(insecure.NewCredentials()), ) if err != nil { log.Fatal().Err(err).Msg("unable to initialize Flow client") @@ -136,10 +126,7 @@ func main() { // prepare load generator log.Info(). - Str("load_type", loadType). - Int("initialTPS", *initialTPSFlag). - Int("minTPS", *minTPSFlag). - Int("maxTPS", *maxTPSFlag). + Interface("loadConfig", loadConfig). Dur("duration", *durationFlag). Msg("Running load case") @@ -148,10 +135,16 @@ func main() { workerStatsTracker := benchmark.NewWorkerStatsTracker(bCtx) defer workerStatsTracker.Stop() - statsLogger := benchmark.NewPeriodicStatsLogger(workerStatsTracker, log) + statsLogger := benchmark.NewPeriodicStatsLogger(ctx, workerStatsTracker, log) statsLogger.Start() defer statsLogger.Stop() + loadParams := benchmark.LoadParams{ + NumberOfAccounts: maxInflight, + LoadConfig: loadConfig, + FeedbackEnabled: feedbackEnabled, + } + lg, err := benchmark.New( bCtx, log, @@ -159,51 +152,38 @@ func main() { loaderMetrics, []access.Client{flowClient}, benchmark.NetworkParams{ - ServAccPrivKeyHex: serviceAccountPrivateKeyHex, - ServiceAccountAddress: &serviceAccountAddress, - FungibleTokenAddress: &fungibleTokenAddress, - FlowTokenAddress: &flowTokenAddress, - }, - benchmark.LoadParams{ - NumberOfAccounts: maxInflight, - LoadType: benchmark.LoadType(loadType), - FeedbackEnabled: feedbackEnabled, + ServAccPrivKeyHex: serviceAccountPrivateKeyHex, + ChainId: flow.Emulator, }, - // We do support only one load type for now. - benchmark.ConstExecParams{}, + loadParams, ) - if err != nil { - log.Fatal().Err(err).Msg("unable to create new cont load generator") - } - - err = lg.Init() - if err != nil { - log.Fatal().Err(err).Msg("unable to init loader") - } - // run load - err = lg.SetTPS(uint(*initialTPSFlag)) if err != nil { - log.Fatal().Err(err).Msg("unable to set tps") + log.Fatal().Err(err).Msg("unable to create new cont load generator") } - adjuster := NewTPSAdjuster( - bCtx, + adjuster := benchmark.NewTPSAdjuster( + ctx, log, lg, workerStatsTracker, - - AdjusterParams{ + benchmark.AdjusterParams{ Delay: *adjustDelayFlag, Interval: *adjustIntervalFlag, - InitialTPS: uint(*initialTPSFlag), - MinTPS: uint(*minTPSFlag), - MaxTPS: uint(*maxTPSFlag), - MaxInflight: uint(maxInflight / 2), + InitialTPS: uint(loadParams.LoadConfig.TPSInitial), + MinTPS: uint(loadParams.LoadConfig.TpsMin), + MaxTPS: uint(loadParams.LoadConfig.TpsMax), + MaxInflight: uint(loadParams.NumberOfAccounts / 2), }, ) defer adjuster.Stop() + // start the load with the initial TPS + err = lg.SetTPS(uint(loadConfig.TPSInitial)) + if err != nil { + log.Fatal().Err(err).Msg("unable to set tps") + } + recorder := NewTPSRecorder(bCtx, workerStatsTracker, *statIntervalFlag) defer recorder.Stop() @@ -217,7 +197,6 @@ func main() { log.Info().Msg("Cancelling benchmark context") bCancel() recorder.Stop() - adjuster.Stop() log.Info().Msg("Stopping load generator") lg.Stop() @@ -227,7 +206,7 @@ func main() { // only upload valid data if *bigQueryUpload { repoInfo := MustGetRepoInfo(log, *gitRepoURLFlag, *gitRepoPathFlag) - mustUploadData(ctx, log, recorder, repoInfo, *bigQueryProjectFlag, *bigQueryDatasetFlag, *bigQueryRawTableFlag) + mustUploadData(ctx, log, recorder, repoInfo, *bigQueryProjectFlag, *bigQueryDatasetFlag, *bigQueryRawTableFlag, loadConfig.LoadName) } else { log.Info().Int("raw_tps_size", len(recorder.BenchmarkResults.RawTPS)).Msg("logging tps results locally") // log results locally when not uploading to BigQuery @@ -237,21 +216,92 @@ func main() { } } +func getLoadConfig( + log zerolog.Logger, + loadConfigLocation string, + load string, + minTPS int, + maxTPS int, + initialTPS int, +) benchmark.LoadConfig { + if loadConfigLocation == "" { + lc := benchmark.LoadConfig{ + LoadName: load, + LoadType: load, + TpsMax: maxTPS, + TpsMin: minTPS, + TPSInitial: initialTPS, + } + + log.Info(). + Interface("loadConfig", lc). + Msg("Load config file not provided, using parameters supplied in TPS flags") + return lc + } + + var loadConfigs map[string]benchmark.LoadConfig + + // check if the file exists + if _, err := os.Stat(loadConfigLocation); os.IsNotExist(err) { + log.Fatal().Err(err).Str("loadConfigLocation", loadConfigLocation).Msg("load config file not found") + } + + yamlFile, err := os.ReadFile(loadConfigLocation) + if err != nil { + log.Fatal().Err(err).Str("loadConfigLocation", loadConfigLocation).Msg("failed to read load config file") + } + + err = yaml.Unmarshal(yamlFile, &loadConfigs) + if err != nil { + log.Fatal().Err(err).Str("loadConfigLocation", loadConfigLocation).Msg("failed to unmarshal load config file") + } + + lc, ok := loadConfigs[load] + if !ok { + log.Fatal().Str("load", load).Msg("load not found in load config file") + } + lc.LoadName = load + + return lc +} + +// setupLogger parses log level and apply to logger +func setupLogger(logLvl *string) zerolog.Logger { + log := zerolog.New(os.Stderr). + With(). + Timestamp(). + Logger(). + Output(zerolog.ConsoleWriter{Out: os.Stderr}) + + lvl, err := zerolog.ParseLevel(strings.ToLower(*logLvl)) + if err != nil { + log.Fatal().Err(err).Str("strLevel", *logLvl).Msg("invalid log level") + } + log = log.Level(lvl) + return log +} + func mustUploadData( ctx context.Context, log zerolog.Logger, - recorder *tpsRecorder, + recorder *TPSRecorder, repoInfo *RepoInfo, bigQueryProject string, bigQueryDataset string, bigQueryRawTable string, + loadName string, ) { log.Info().Msg("Initializing BigQuery") db, err := NewDB(ctx, log, bigQueryProject) if err != nil { log.Fatal().Err(err).Msg("failed to create bigquery client") } - defer db.Close() + defer func(db *DB) { + err := db.Close() + if err != nil { + log.Fatal().Err(err).Msg("failed to close bigquery client") + } + }(db) err = db.createTable(ctx, bigQueryDataset, bigQueryRawTable, RawRecord{}) if err != nil { @@ -265,7 +315,7 @@ func mustUploadData( bigQueryRawTable, recorder.BenchmarkResults, *repoInfo, - BenchmarkInfo{BenchmarkType: loadType}, + BenchmarkInfo{BenchmarkType: loadName}, MustGetDefaultEnvironment(), ) if err != nil { @@ -273,7 +323,7 @@ func mustUploadData( } } -func mustValidateData(log zerolog.Logger, recorder *tpsRecorder) { +func mustValidateData(log zerolog.Logger, recorder *TPSRecorder) { log.Info().Msg("Validating data") var totalTPS float64 for _, record := range recorder.BenchmarkResults.RawTPS { diff --git a/integration/benchmark/cmd/ci/recorder.go b/integration/benchmark/cmd/ci/recorder.go index 80c03440f5e..6e482be6b14 100644 --- a/integration/benchmark/cmd/ci/recorder.go +++ b/integration/benchmark/cmd/ci/recorder.go @@ -39,7 +39,7 @@ type BenchmarkResults struct { RawTPS []RawTPSRecord } -type tpsRecorder struct { +type TPSRecorder struct { BenchmarkResults lastStats benchmark.WorkerStats @@ -56,10 +56,10 @@ func NewTPSRecorder( ctx context.Context, workerStatsTracker *benchmark.WorkerStatsTracker, statInterval time.Duration, -) *tpsRecorder { +) *TPSRecorder { ctx, cancel := context.WithCancel(ctx) - r := &tpsRecorder{ + r := &TPSRecorder{ BenchmarkResults: BenchmarkResults{ Status: StatusUnknown, StartTime: time.Now(), @@ -90,7 +90,7 @@ func NewTPSRecorder( return r } -func (r *tpsRecorder) Stop() { +func (r *TPSRecorder) Stop() { r.stopOnce.Do(func() { r.cancel() <-r.done @@ -103,11 +103,11 @@ func (r *tpsRecorder) Stop() { }) } -func (r *tpsRecorder) SetStatus(status Status) { +func (r *TPSRecorder) SetStatus(status Status) { r.Status = status } -func (r *tpsRecorder) record(nowTs time.Time, stats benchmark.WorkerStats) { +func (r *TPSRecorder) record(nowTs time.Time, stats benchmark.WorkerStats) { if !r.lastTs.IsZero() { r.RawTPS = append(r.RawTPS, r.statsToRawTPS(nowTs, stats)) } @@ -116,7 +116,7 @@ func (r *tpsRecorder) record(nowTs time.Time, stats benchmark.WorkerStats) { r.lastTs = nowTs } -func (r *tpsRecorder) statsToRawTPS(nowTs time.Time, stats benchmark.WorkerStats) RawTPSRecord { +func (r *TPSRecorder) statsToRawTPS(nowTs time.Time, stats benchmark.WorkerStats) RawTPSRecord { timeDiff := nowTs.Sub(r.lastTs).Seconds() return RawTPSRecord{ diff --git a/integration/benchmark/cmd/ci/server.go b/integration/benchmark/cmd/ci/server.go deleted file mode 100644 index b3420a203e6..00000000000 --- a/integration/benchmark/cmd/ci/server.go +++ /dev/null @@ -1,28 +0,0 @@ -package main - -import ( - "context" - - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" - "google.golang.org/protobuf/types/known/emptypb" - - pb "github.com/onflow/flow-go/integration/benchmark/proto" -) - -type benchmarkServer struct { - pb.UnimplementedBenchmarkServer -} - -func (s *benchmarkServer) StartMacroBenchmark(req *pb.StartMacroBenchmarkRequest, stream pb.Benchmark_StartMacroBenchmarkServer) error { - return status.Errorf(codes.Unimplemented, "method StartMacroBenchmark not implemented") -} -func (s *benchmarkServer) GetMacroBenchmark(context.Context, *pb.GetMacroBenchmarkRequest) (*pb.GetMacroBenchmarkResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method GetMacroBenchmark not implemented") -} -func (s *benchmarkServer) ListMacroBenchmarks(context.Context, *emptypb.Empty) (*pb.ListMacroBenchmarksResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method ListMacroBenchmarks not implemented") -} -func (s *benchmarkServer) Status(context.Context, *emptypb.Empty) (*pb.StatusResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Status not implemented") -} diff --git a/integration/benchmark/cmd/manual/Dockerfile b/integration/benchmark/cmd/manual/Dockerfile index 1ad38985a43..094f93a7882 100644 --- a/integration/benchmark/cmd/manual/Dockerfile +++ b/integration/benchmark/cmd/manual/Dockerfile @@ -1,23 +1,14 @@ # syntax = docker/dockerfile:experimental # NOTE: Must be run in the context of the repo's root directory -FROM golang:1.19-buster AS build-setup +FROM golang:1.25-bookworm AS build-setup RUN apt-get update -RUN apt-get -y install cmake zip - -## (1) Build Relic first to maximize caching -FROM build-setup AS build-relic +RUN apt-get -y install zip RUN mkdir /build WORKDIR /build -# Copy over the crypto package -COPY crypto ./crypto - -# Build Relic (this places build artifacts in /build/relic/build) -RUN cd ./crypto/ && go generate - ## (2) Build the app binary FROM build-setup AS build-env @@ -35,12 +26,12 @@ ARG TARGET COPY . . -# Copy over Relic build artifacts -COPY --from=build-relic /build/crypto/relic/build ./crypto/relic/build - FROM build-env as build-production WORKDIR /app +# CGO_FLAG can be overwritten +ARG CGO_FLAG + # Keep Go's build cache between builds. # https://github.com/golang/go/issues/27719#issuecomment-514747274 # Also, allow ssh access @@ -48,7 +39,7 @@ RUN --mount=type=cache,sharing=locked,target=/go/pkg/mod \ --mount=type=cache,target=/root/.cache/go-build \ --mount=type=ssh \ cd integration && \ - CGO_ENABLED=1 go build --tags relic -ldflags "-extldflags -static" -o ./app ./${TARGET} + CGO_ENABLED=1 CGO_CFLAGS="${CGO_FLAG}" go build -ldflags "-extldflags -static" -o ./app ./${TARGET} RUN mv /app/integration/app /app/app diff --git a/integration/benchmark/cmd/manual/main.go b/integration/benchmark/cmd/manual/main.go index 9250b2a1521..1a5c7f8be6d 100644 --- a/integration/benchmark/cmd/manual/main.go +++ b/integration/benchmark/cmd/manual/main.go @@ -35,7 +35,7 @@ const ( func main() { sleep := flag.Duration("sleep", 0, "duration to sleep before benchmarking starts") - loadTypeFlag := flag.String("load-type", "token-transfer", "type of loads (\"token-transfer\", \"add-keys\", \"computation-heavy\", \"event-heavy\", \"ledger-heavy\", \"const-exec\")") + loadTypeFlag := flag.String("load-type", "token-transfer", "type of loads (\"token-transfer\", \"add-keys\", \"computation-heavy\", \"event-heavy\", \"ledger-heavy\", \"const-exec\", \"exec-data-heavy\")") tpsFlag := flag.String("tps", "1", "transactions per second (TPS) to send, accepts a comma separated list of values if used in conjunction with `tps-durations`") tpsDurationsFlag := flag.String("tps-durations", "0", "duration that each load test will run, accepts a comma separted list that will be applied to multiple values of the `tps` flag (defaults to infinite if not provided, meaning only the first tps case will be tested; additional values will be ignored)") chainIDStr := flag.String("chain", string(flowsdk.Emulator), "chain ID") @@ -47,13 +47,9 @@ func main() { _ = flag.Bool("track-txs", false, "deprecated") accountMultiplierFlag := flag.Int("account-multiplier", 100, "number of accounts to create per load tps") feedbackEnabled := flag.Bool("feedback-enabled", true, "wait for trannsaction execution before submitting new transaction") - maxConstExecTxSizeInBytes := flag.Uint("const-exec-max-tx-size", flow.DefaultMaxTransactionByteSize/10, "max byte size of constant exec transaction size to generate") - authAccNumInConstExecTx := flag.Uint("const-exec-num-authorizer", 1, "num of authorizer for each constant exec transaction to generate") - argSizeInByteInConstExecTx := flag.Uint("const-exec-arg-size", 100, "byte size of tx argument for each constant exec transaction to generate") - payerKeyCountInConstExecTx := flag.Uint("const-exec-payer-key-count", 2, "num of payer keys for each constant exec transaction to generate") flag.Parse() - chainID := flowsdk.ChainID([]byte(*chainIDStr)) + chainID := flowsdk.ChainID(*chainIDStr) // parse log level and apply to logger log := zerolog.New(os.Stderr).With().Timestamp().Logger().Output(zerolog.ConsoleWriter{Out: os.Stderr}) @@ -70,8 +66,10 @@ func main() { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - sp := benchmark.NewStatsPusher(ctx, log, *pushgateway, "loader", prometheus.DefaultGatherer) - defer sp.Stop() + if *pushgateway != "disabled" { + sp := benchmark.NewStatsPusher(ctx, log, *pushgateway, "loader", prometheus.DefaultGatherer) + defer sp.Stop() + } addressGen := flowsdk.NewAddressGenerator(chainID) serviceAccountAddress := addressGen.NextAddress() @@ -92,11 +90,13 @@ func main() { for _, addr := range accessNodeAddrs { client, err := client.NewClient( addr, - grpc.WithDefaultCallOptions( - grpc.MaxCallRecvMsgSize(defaultMaxMsgSize), - grpc.MaxCallSendMsgSize(defaultMaxMsgSize), + client.WithGRPCDialOptions( + grpc.WithDefaultCallOptions( + grpc.MaxCallRecvMsgSize(defaultMaxMsgSize), + grpc.MaxCallSendMsgSize(defaultMaxMsgSize), + ), + grpc.WithTransportCredentials(insecure.NewCredentials()), ), - grpc.WithTransportCredentials(insecure.NewCredentials()), ) if err != nil { log.Fatal().Str("addr", addr).Err(err).Msgf("unable to initialize flow client") @@ -116,7 +116,7 @@ func main() { workerStatsTracker := benchmark.NewWorkerStatsTracker(ctx) defer workerStatsTracker.Stop() - statsLogger := benchmark.NewPeriodicStatsLogger(workerStatsTracker, log) + statsLogger := benchmark.NewPeriodicStatsLogger(context.TODO(), workerStatsTracker, log) statsLogger.Start() defer statsLogger.Stop() @@ -127,21 +127,19 @@ func main() { loaderMetrics, clients, benchmark.NetworkParams{ - ServAccPrivKeyHex: *serviceAccountPrivateKeyHex, - ServiceAccountAddress: &serviceAccountAddress, - FungibleTokenAddress: &fungibleTokenAddress, - FlowTokenAddress: &flowTokenAddress, + ServAccPrivKeyHex: *serviceAccountPrivateKeyHex, + ChainId: flow.ChainID(chainID), }, benchmark.LoadParams{ NumberOfAccounts: int(maxTPS) * *accountMultiplierFlag, - LoadType: benchmark.LoadType(*loadTypeFlag), - FeedbackEnabled: *feedbackEnabled, - }, - benchmark.ConstExecParams{ - MaxTxSizeInByte: *maxConstExecTxSizeInBytes, - AuthAccountNum: *authAccNumInConstExecTx, - ArgSizeInByte: *argSizeInByteInConstExecTx, - PayerKeyCount: *payerKeyCountInConstExecTx, + LoadConfig: benchmark.LoadConfig{ + LoadName: *loadTypeFlag, + LoadType: *loadTypeFlag, + TpsMax: int(maxTPS), + TpsMin: int(maxTPS), + TPSInitial: int(maxTPS), + }, + FeedbackEnabled: *feedbackEnabled, }, ) if err != nil { @@ -149,11 +147,6 @@ func main() { } defer lg.Stop() - err = lg.Init() - if err != nil { - log.Fatal().Err(err).Msg("unable to init loader") - } - for i, c := range loadCases { log.Info(). Str("load_type", *loadTypeFlag). diff --git a/integration/benchmark/common/errors.go b/integration/benchmark/common/errors.go new file mode 100644 index 00000000000..cd8341a3760 --- /dev/null +++ b/integration/benchmark/common/errors.go @@ -0,0 +1,19 @@ +package common + +import "fmt" + +type TransactionError struct { + Err error +} + +func (m TransactionError) Error() string { + return fmt.Sprintf("TransactionError: %s", m.Err) +} + +func (m TransactionError) Unwrap() error { + return m.Err +} + +func NewTransactionError(err error) *TransactionError { + return &TransactionError{Err: err} +} diff --git a/integration/benchmark/common/reference_block_provider.go b/integration/benchmark/common/reference_block_provider.go new file mode 100644 index 00000000000..c03af0cab21 --- /dev/null +++ b/integration/benchmark/common/reference_block_provider.go @@ -0,0 +1,8 @@ +package common + +import flowsdk "github.com/onflow/flow-go-sdk" + +type ReferenceBlockProvider interface { + // ReferenceBlockID returns the reference block ID of a recent block. + ReferenceBlockID() flowsdk.Identifier +} diff --git a/integration/benchmark/common/transaction_sender.go b/integration/benchmark/common/transaction_sender.go new file mode 100644 index 00000000000..4f14693a5ea --- /dev/null +++ b/integration/benchmark/common/transaction_sender.go @@ -0,0 +1,10 @@ +package common + +import flowsdk "github.com/onflow/flow-go-sdk" + +type TransactionSender interface { + // Send sends a transaction to the network. + // It blocks until the transaction result is received or an error occurs. + // If the transaction execution fails, the returned error type is TransactionError. + Send(tx *flowsdk.Transaction) (flowsdk.TransactionResult, error) +} diff --git a/integration/benchmark/contLoadGenerator.go b/integration/benchmark/contLoadGenerator.go index 941e061acca..7f1c31562cf 100644 --- a/integration/benchmark/contLoadGenerator.go +++ b/integration/benchmark/contLoadGenerator.go @@ -5,50 +5,24 @@ import ( "errors" "fmt" "sync" - "time" - "github.com/onflow/cadence" "github.com/rs/zerolog" - "golang.org/x/sync/errgroup" - - "github.com/onflow/flow-go/integration/benchmark/account" - "github.com/onflow/flow-go/module/metrics" flowsdk "github.com/onflow/flow-go-sdk" "github.com/onflow/flow-go-sdk/access" "github.com/onflow/flow-go-sdk/crypto" - "github.com/onflow/flow-go/model/flow" -) - -type LoadType string - -const ( - TokenTransferLoadType LoadType = "token-transfer" - TokenAddKeysLoadType LoadType = "add-keys" - CompHeavyLoadType LoadType = "computation-heavy" - EventHeavyLoadType LoadType = "event-heavy" - LedgerHeavyLoadType LoadType = "ledger-heavy" - ConstExecCostLoadType LoadType = "const-exec" // for an empty transactions with various tx arguments -) -const lostTransactionThreshold = 90 * time.Second - -var accountCreationBatchSize = 750 // a higher number would hit max gRPC message size - -const ( - // flow testnets only have 10e6 total supply, so we choose a small amounts here - tokensPerTransfer = 0.000001 - tokensPerAccount = 10 + "github.com/onflow/flow-go/fvm/systemcontracts" + "github.com/onflow/flow-go/integration/benchmark/account" + "github.com/onflow/flow-go/integration/benchmark/common" + "github.com/onflow/flow-go/integration/benchmark/load" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module/metrics" + "github.com/onflow/flow-go/utils/unittest" ) -// ConstExecParam hosts all parameters for const-exec load type -type ConstExecParams struct { - MaxTxSizeInByte uint - AuthAccountNum uint - ArgSizeInByte uint - PayerKeyCount uint -} +const lostTransactionThreshold = 180 * time.Second // ContLoadGenerator creates a continuous load of transactions to the network // by creating many accounts and transfer flow tokens between them @@ -58,12 +32,7 @@ type ContLoadGenerator struct { log zerolog.Logger loaderMetrics *metrics.LoaderCollector loadParams LoadParams - networkParams NetworkParams - constExecParams ConstExecParams flowClient access.Client - serviceAccount *account.FlowAccount - favContractAddress *flowsdk.Address - availableAccounts chan *account.FlowAccount // queue with accounts available for workers workerStatsTracker *WorkerStatsTracker stoppedChannel chan struct{} follower TxFollower @@ -71,21 +40,27 @@ type ContLoadGenerator struct { workersMutex sync.Mutex workers []*Worker - - accountsMutex sync.Mutex - accounts []*account.FlowAccount } type NetworkParams struct { - ServAccPrivKeyHex string - ServiceAccountAddress *flowsdk.Address - FungibleTokenAddress *flowsdk.Address - FlowTokenAddress *flowsdk.Address + ServAccPrivKeyHex string + ChainId flow.ChainID +} + +type LoadConfig struct { + // LoadName is the name of the load. This can be different from the LoadType + // and is used to identify the load in the results. The use case is when a single + // load type is used to run multiple loads with different parameters. + LoadName string `yaml:"-"` + LoadType string `yaml:"load_type"` + TpsMax int `yaml:"tps_max"` + TpsMin int `yaml:"tps_min"` + TPSInitial int `yaml:"tps_initial"` } type LoadParams struct { NumberOfAccounts int - LoadType LoadType + LoadConfig LoadConfig // TODO(rbtz): inject a TxFollower FeedbackEnabled bool @@ -100,15 +75,21 @@ func New( flowClients []access.Client, networkParams NetworkParams, loadParams LoadParams, - constExecParams ConstExecParams, ) (*ContLoadGenerator, error) { if len(flowClients) == 0 { return nil, errors.New("no flow clients available") } - // TODO(rbtz): add loadbalancing between multiple clients + flowClient := flowClients[0] - servAcc, err := account.LoadServiceAccount(ctx, flowClient, networkParams.ServiceAccountAddress, networkParams.ServAccPrivKeyHex) + sc := systemcontracts.SystemContractsForChain(networkParams.ChainId) + + privateKey, err := crypto.DecodePrivateKeyHex(unittest.ServiceAccountPrivateKey.SignAlgo, networkParams.ServAccPrivKeyHex) + if err != nil { + return nil, fmt.Errorf("error while decoding serice account private key hex: %w", err) + } + + servAcc, err := account.LoadAccount(ctx, flowClient, flowsdk.BytesToAddress(sc.FlowServiceAccount.Address.Bytes()), privateKey, unittest.ServiceAccountPrivateKey.HashAlgo) if err != nil { return nil, fmt.Errorf("error loading service account %w", err) } @@ -123,270 +104,104 @@ func New( return nil, err } - // check and cap params for const-exec mode - if loadParams.LoadType == ConstExecCostLoadType { - if constExecParams.MaxTxSizeInByte > flow.DefaultMaxTransactionByteSize { - errMsg := fmt.Sprintf("MaxTxSizeInByte(%d) is larger than DefaultMaxTransactionByteSize(%d).", - constExecParams.MaxTxSizeInByte, - flow.DefaultMaxTransactionByteSize) - log.Error().Msg(errMsg) - - return nil, errors.New(errMsg) - } - - // accounts[0] will be used as the proposer\payer - if constExecParams.AuthAccountNum > uint(loadParams.NumberOfAccounts-1) { - errMsg := fmt.Sprintf("Number of authorizer(%d) is larger than max possible(%d).", - constExecParams.AuthAccountNum, - loadParams.NumberOfAccounts-1) - log.Error().Msg(errMsg) - - return nil, errors.New(errMsg) - } - - if constExecParams.ArgSizeInByte > flow.DefaultMaxTransactionByteSize { - errMsg := fmt.Sprintf("ArgSizeInByte(%d) is larger than DefaultMaxTransactionByteSize(%d).", - constExecParams.ArgSizeInByte, - flow.DefaultMaxTransactionByteSize) - log.Error().Msg(errMsg) - return nil, errors.New(errMsg) - } - } - lg := &ContLoadGenerator{ ctx: ctx, log: log, loaderMetrics: loaderMetrics, loadParams: loadParams, - networkParams: networkParams, - constExecParams: constExecParams, flowClient: flowClient, - serviceAccount: servAcc, - accounts: make([]*account.FlowAccount, 0), - availableAccounts: make(chan *account.FlowAccount, loadParams.NumberOfAccounts), workerStatsTracker: workerStatsTracker, follower: follower, stoppedChannel: make(chan struct{}), } - lg.log.Info().Int("num_keys", lg.serviceAccount.NumKeys()).Msg("service account loaded") - - // TODO(rbtz): hide load implementation behind an interface - switch loadParams.LoadType { - case TokenTransferLoadType: - lg.workFunc = lg.sendTokenTransferTx - case TokenAddKeysLoadType: - lg.workFunc = lg.sendAddKeyTx - case ConstExecCostLoadType: - lg.workFunc = lg.sendConstExecCostTx - case CompHeavyLoadType, EventHeavyLoadType, LedgerHeavyLoadType: - lg.workFunc = lg.sendFavContractTx - default: - return nil, fmt.Errorf("unknown load type: %s", loadParams.LoadType) - } - - return lg, nil -} - -func (lg *ContLoadGenerator) stopped() bool { - select { - case <-lg.stoppedChannel: - return true - default: - return false - } -} + lg.log.Info().Int("num_keys", servAcc.NumKeys()).Msg("service account loaded") -func (lg *ContLoadGenerator) populateServiceAccountKeys(num int) error { - if lg.serviceAccount.NumKeys() >= num { - return nil + ts := &transactionSender{ + ctx: ctx, + log: log, + flowClient: flowClient, + loaderMetrics: loaderMetrics, + workerStatsTracker: workerStatsTracker, + follower: follower, + lostTransactionThreshold: lostTransactionThreshold, } - key1, _ := lg.serviceAccount.GetKey() - lg.log.Info(). - Stringer("HashAlgo", key1.HashAlgo). - Stringer("SigAlgo", key1.SigAlgo). - Int("Index", key1.Index). - Int("Weight", key1.Weight). - Msg("service account info") - key1.Done() - - numberOfKeysToAdd := num - lg.serviceAccount.NumKeys() + accountLoader := account.NewClientAccountLoader(lg.log, ctx, flowClient) - lg.log.Info().Int("num_keys_to_add", numberOfKeysToAdd).Msg("adding keys to service account") - - addKeysTx, err := lg.createAddKeyTx(*lg.serviceAccount.Address, uint(numberOfKeysToAdd)) + err = account.EnsureAccountHasKeys(lg.log, servAcc, 100, lg.follower, ts) if err != nil { - return fmt.Errorf("error creating add key tx: %w", err) + return nil, fmt.Errorf("error ensuring service account has keys: %w", err) } - addKeysTx.SetReferenceBlockID(lg.follower.BlockID()) + // we need to wait for the tx adding keys to be sealed otherwise the client won't + // pickup the changes + // TODO: add a better way to wait for txs to be sealed + time.Sleep(10 * time.Second) - key, err := lg.serviceAccount.GetKey() + err = account.ReloadAccount(accountLoader, servAcc) if err != nil { - return fmt.Errorf("error getting service account key: %w", err) + return nil, fmt.Errorf("error reloading service account: %w", err) } - defer key.Done() - err = key.SignTx(addKeysTx) + ap, err := account.SetupProvider( + lg.log, + ctx, + loadParams.NumberOfAccounts, + 100_000_000_000, + lg.follower, + servAcc, + ts, + networkParams.ChainId.Chain(), + ) if err != nil { - return fmt.Errorf("error signing transaction: %w", err) + return nil, fmt.Errorf("error setting up account provider: %w", err) } - ch, err := lg.sendTx(0, addKeysTx) - if err != nil { - return fmt.Errorf("error sending transaction: %w", err) + lc := load.LoadContext{ + ChainID: networkParams.ChainId, + WorkerContext: load.WorkerContext{ + WorkerID: -1, + }, + AccountProvider: ap, + TransactionSender: ts, + ReferenceBlockProvider: lg.follower, + Proposer: servAcc, } - defer key.IncrementSequenceNumber() - var result flowsdk.TransactionResult - select { - case result = <-ch: - case <-lg.Done(): - return fmt.Errorf("load generator stopped") - } + l := load.CreateLoadType(log, load.LoadType(loadParams.LoadConfig.LoadType)) - lg.log.Info().Stringer("result", result.Status).Msg("add key tx") - if result.Error != nil { - return fmt.Errorf("error adding keys to service account: %w", result.Error) - } - - // reload service account until it has enough keys - timeout := time.After(30 * time.Second) - for { - select { - case <-timeout: - return fmt.Errorf("timeout waiting for service account to have %d keys", num) - case <-lg.Done(): - return fmt.Errorf("load generator stopped") - default: - } - - lg.serviceAccount, err = account.LoadServiceAccount(lg.ctx, lg.flowClient, lg.serviceAccount.Address, lg.networkParams.ServAccPrivKeyHex) - if err != nil { - return fmt.Errorf("error loading service account %w", err) - } - lg.log.Info().Int("num_keys", lg.serviceAccount.NumKeys()).Msg("service account reloaded") - - if lg.serviceAccount.NumKeys() >= num { - break - } - - time.Sleep(1 * time.Second) - } - - return nil -} - -// TODO(rbtz): make part of New -func (lg *ContLoadGenerator) Init() error { - err := lg.populateServiceAccountKeys(50) + err = l.Setup(log, lc) if err != nil { - return fmt.Errorf("error populating service account keys: %w", err) - } - - g := errgroup.Group{} - for i := 0; i < lg.loadParams.NumberOfAccounts; i += accountCreationBatchSize { - i := i - g.Go(func() error { - if lg.stopped() { - return lg.ctx.Err() - } - - num := lg.loadParams.NumberOfAccounts - i - if num > accountCreationBatchSize { - num = accountCreationBatchSize - } - - lg.log.Info().Int("cumulative", i).Int("num", num).Int("numberOfAccounts", lg.loadParams.NumberOfAccounts).Msg("creating accounts") - for { - err := lg.createAccounts(num) - if errors.Is(err, account.ErrNoKeysAvailable) { - lg.log.Warn().Err(err).Msg("error creating accounts, retrying...") - time.Sleep(1 * time.Second) - continue - } - return err - } - }) - // This is needed to avoid hitting the gRPC message size limit. - time.Sleep(1 * time.Second) + return nil, fmt.Errorf("error setting up load: %w", err) } - if err := g.Wait(); err != nil { - return fmt.Errorf("error creating accounts: %w", err) - } + lg.workFunc = func(workerID int) { + wlc := lc + wlc.WorkerContext.WorkerID = workerID - // TODO(rbtz): create an interface for different load types: Setup() - if lg.loadParams.LoadType != ConstExecCostLoadType { - err := lg.setupFavContract() - if err != nil { - lg.log.Error().Err(err).Msg("failed to setup fav contract") - return err - } - } else { - lg.log.Info().Int("numberOfAccountsCreated", len(lg.accounts)). - Msg("new accounts created. Grabbing the first as the proposer/payer " + - "and adding multiple keys to that account") - - err := lg.addKeysToProposerAccount(lg.accounts[0]) - if err != nil { - lg.log.Error().Msg("failed to create add-key transaction for const-exec") - return err + log := lg.log.With().Int("workerID", workerID).Logger() + err := l.Load(log, wlc) + if err != nil && !errors.Is(err, context.Canceled) { + log.Error().Err(err).Msg("error running load") } } - return nil + return lg, nil } -func (lg *ContLoadGenerator) setupFavContract() error { - // take one of the accounts - if len(lg.accounts) == 0 { - return errors.New("can't setup fav contract, zero accounts available") - } - - acc := lg.accounts[0] - - lg.log.Trace().Msg("creating fav contract deployment script") - deployScript := DeployingMyFavContractScript() - - lg.log.Trace().Msg("creating fav contract deployment transaction") - deploymentTx := flowsdk.NewTransaction(). - SetReferenceBlockID(lg.follower.BlockID()). - SetScript(deployScript). - SetGasLimit(9999) - - lg.log.Trace().Msg("signing transaction") - - key, err := acc.GetKey() - if err != nil { - lg.log.Error().Err(err).Msg("error getting key") - return err - } - defer key.Done() - - err = key.SignTx(deploymentTx) - if err != nil { - lg.log.Error().Err(err).Msg("error signing transaction") - return err - } - - ch, err := lg.sendTx(-1, deploymentTx) - if err != nil { - return err +func (lg *ContLoadGenerator) stopped() bool { + select { + case <-lg.stoppedChannel: + return true + default: + return false } - defer key.IncrementSequenceNumber() - - <-ch - lg.workerStatsTracker.IncTxExecuted() - - lg.favContractAddress = acc.Address - return nil } func (lg *ContLoadGenerator) startWorkers(num int) error { for i := 0; i < num; i++ { - worker := NewWorker(len(lg.workers), 1*time.Second, lg.workFunc) + worker := NewWorker(lg.ctx, len(lg.workers), 1*time.Second, lg.workFunc) lg.log.Trace().Int("workerID", worker.workerID).Msg("starting worker") worker.Start() lg.workers = append(lg.workers, worker) @@ -470,497 +285,52 @@ func (lg *ContLoadGenerator) Done() <-chan struct{} { return lg.stoppedChannel } -func (lg *ContLoadGenerator) createAccounts(num int) error { - privKey := account.RandomPrivateKey() - accountKey := flowsdk.NewAccountKey(). - FromPrivateKey(privKey). - SetHashAlgo(crypto.SHA3_256). - SetWeight(flowsdk.AccountKeyWeightThreshold) - - // Generate an account creation script - createAccountTx := flowsdk.NewTransaction(). - SetScript(CreateAccountsScript(*lg.networkParams.FungibleTokenAddress, *lg.networkParams.FlowTokenAddress)). - SetReferenceBlockID(lg.follower.BlockID()). - SetGasLimit(999999) - - publicKey := bytesToCadenceArray(accountKey.PublicKey.Encode()) - count := cadence.NewInt(num) - - initialTokenAmount, err := cadence.NewUFix64FromParts( - tokensPerAccount, - 0, - ) - if err != nil { - return err - } - - err = createAccountTx.AddArgument(publicKey) - if err != nil { - return err - } - - err = createAccountTx.AddArgument(count) - if err != nil { - return err - } - - err = createAccountTx.AddArgument(initialTokenAmount) - if err != nil { - return err - } - - key, err := lg.serviceAccount.GetKey() - if err != nil { - lg.log.Error().Err(err).Msg("error getting key") - return err - } - defer key.Done() - - err = key.SignTx(createAccountTx) - if err != nil { - return err - } - - // Do not wait for the transaction to be sealed. - ch, err := lg.sendTx(-1, createAccountTx) - if err != nil { - return err - } - defer key.IncrementSequenceNumber() - - var result flowsdk.TransactionResult - select { - case result = <-ch: - lg.workerStatsTracker.IncTxExecuted() - case <-time.After(60 * time.Second): - return fmt.Errorf("timeout waiting for account creation tx to be executed") - case <-lg.Done(): - return fmt.Errorf("loader stopped while waiting for account creation tx to be executed") - } - - log := lg.log.With().Str("tx_id", createAccountTx.ID().String()).Logger() - log.Trace().Str("status", result.Status.String()).Msg("account creation tx executed") - if result.Error != nil { - log.Error().Err(result.Error).Msg("account creation tx failed") - } - - var accountsCreated int - for _, event := range result.Events { - log.Trace().Str("event_type", event.Type).Str("event", event.String()).Msg("account creation tx event") - - if event.Type == flowsdk.EventAccountCreated { - accountCreatedEvent := flowsdk.AccountCreatedEvent(event) - accountAddress := accountCreatedEvent.Address() - - log.Trace().Hex("address", accountAddress.Bytes()).Msg("new account created") - - newAcc, err := account.New(accountsCreated, &accountAddress, privKey, []*flowsdk.AccountKey{accountKey}) - if err != nil { - return fmt.Errorf("failed to create account: %w", err) - } - accountsCreated++ - - lg.accountsMutex.Lock() - lg.accounts = append(lg.accounts, newAcc) - lg.accountsMutex.Unlock() - lg.availableAccounts <- newAcc - - log.Trace().Hex("address", accountAddress.Bytes()).Msg("new account added") - } - } - if accountsCreated != num { - return fmt.Errorf("failed to create enough contracts, expected: %d, created: %d", - num, accountsCreated) - } - return nil +type transactionSender struct { + ctx context.Context + log zerolog.Logger + flowClient access.Client + loaderMetrics *metrics.LoaderCollector + workerStatsTracker *WorkerStatsTracker + follower TxFollower + lostTransactionThreshold time.Duration } -func (lg *ContLoadGenerator) createAddKeyTx(accountAddress flowsdk.Address, numberOfKeysToAdd uint) (*flowsdk.Transaction, error) { +func (t *transactionSender) Send(tx *flowsdk.Transaction) (flowsdk.TransactionResult, error) { + // Add follower before sending the transaction to avoid race condition + ch := t.follower.Follow(tx.ID()) - key, err := lg.serviceAccount.GetKey() + err := t.flowClient.SendTransaction(t.ctx, *tx) if err != nil { - return nil, err + return flowsdk.TransactionResult{}, fmt.Errorf("error sending transaction: %w", err) } - key.Done() // we don't actually need it - cadenceKeys := make([]cadence.Value, numberOfKeysToAdd) - for i := uint(0); i < numberOfKeysToAdd; i++ { - accountKey := key.AccountKey - cadenceKeys[i] = bytesToCadenceArray(accountKey.PublicKey.Encode()) - } - cadenceKeysArray := cadence.NewArray(cadenceKeys) - - addKeysScript, err := AddKeyToAccountScript() - if err != nil { - lg.log.Error().Err(err).Msg("error getting add key to account script") - return nil, err - } - - addKeysTx := flowsdk.NewTransaction(). - SetScript(addKeysScript). - AddAuthorizer(accountAddress). - SetReferenceBlockID(lg.follower.BlockID()). - SetGasLimit(9999) - - err = addKeysTx.AddArgument(cadenceKeysArray) - if err != nil { - lg.log.Error().Err(err).Msg("error constructing add keys to account transaction") - return nil, err - } - - return addKeysTx, nil -} - -func (lg *ContLoadGenerator) sendAddKeyTx(workerID int) { - log := lg.log.With().Int("workerID", workerID).Logger() - - // TODO move this as a configurable parameter - numberOfKeysToAdd := uint(50) - - log.Trace().Msg("getting next available account") - - acc := <-lg.availableAccounts - defer func() { lg.availableAccounts <- acc }() - - log.Trace().Msg("creating add proposer key script") - - addKeysTx, err := lg.createAddKeyTx(*acc.Address, numberOfKeysToAdd) - if err != nil { - log.Error().Err(err).Msg("error creating AddKey transaction") - return - } - - log.Trace().Msg("creating transaction") - - addKeysTx.SetReferenceBlockID(lg.follower.BlockID()) - - log.Trace().Msg("signing transaction") - - key, err := acc.GetKey() - if err != nil { - log.Error().Err(err).Msg("error getting service account key") - return - } - defer key.Done() - - err = key.SignTx(addKeysTx) - if err != nil { - log.Error().Err(err).Msg("error signing transaction") - return - } - - ch, err := lg.sendTx(workerID, addKeysTx) - if err != nil { - return - } - defer key.IncrementSequenceNumber() - <-ch - lg.workerStatsTracker.IncTxExecuted() -} - -func (lg *ContLoadGenerator) addKeysToProposerAccount(proposerPayerAccount *account.FlowAccount) error { - if proposerPayerAccount == nil { - return errors.New("proposerPayerAccount is nil") - } - - addKeysToPayerTx, err := lg.createAddKeyTx(*lg.accounts[0].Address, lg.constExecParams.PayerKeyCount) - if err != nil { - lg.log.Error().Msg("failed to create add-key transaction for const-exec") - return err - } - addKeysToPayerTx.SetReferenceBlockID(lg.follower.BlockID()) - - lg.log.Info().Msg("signing the add-key transaction for const-exec") - - key, err := lg.accounts[0].GetKey() - if err != nil { - lg.log.Error().Err(err).Msg("error getting key") - return err - } - defer key.Done() - - err = key.SignTx(addKeysToPayerTx) - if err != nil { - lg.log.Error().Err(err).Msg("error signing the add-key transaction for const-exec") - return err - } - - lg.log.Info().Msg("issuing the add-key transaction for const-exec") - ch, err := lg.sendTx(0, addKeysToPayerTx) - if err != nil { - return err - } - defer key.IncrementSequenceNumber() - - <-ch - lg.workerStatsTracker.IncTxExecuted() - - lg.log.Info().Msg("the add-key transaction for const-exec is done") - return nil -} - -func (lg *ContLoadGenerator) sendConstExecCostTx(workerID int) { - log := lg.log.With().Int("workerID", workerID).Logger() - - txScriptNoComment := ConstExecCostTransaction(lg.constExecParams.AuthAccountNum, 0) - - proposerKey, err := lg.accounts[0].GetKey() - if err != nil { - log.Error().Err(err).Msg("error getting key") - return - } - defer proposerKey.Done() - - tx := flowsdk.NewTransaction(). - SetReferenceBlockID(lg.follower.BlockID()). - SetScript(txScriptNoComment). - SetGasLimit(10). // const-exec tx has empty transaction - SetProposalKey(*proposerKey.Address, proposerKey.Index, proposerKey.SequenceNumber). - SetPayer(*proposerKey.Address) - - txArgStr := generateRandomStringWithLen(lg.constExecParams.ArgSizeInByte) - txArg, err := cadence.NewString(txArgStr) - if err != nil { - log.Trace().Msg("Failed to generate cadence String parameter. Using empty string.") - } - err = tx.AddArgument(txArg) - if err != nil { - log.Trace().Msg("Failed to add argument. Skipping.") - } - - // Add authorizers. lg.accounts[0] used as proposer\payer - log.Trace().Msg("Adding tx authorizers") - for i := uint(1); i < lg.constExecParams.AuthAccountNum+1; i++ { - tx = tx.AddAuthorizer(*lg.accounts[i].Address) - } - - log.Trace().Msg("Authorizers signing tx") - for i := uint(1); i < lg.constExecParams.AuthAccountNum+1; i++ { - key, err := lg.accounts[i].GetKey() - if err != nil { - log.Error().Err(err).Msg("error getting key") - return - } - - err = key.SignPayload(tx) - key.Done() // authorizers don't need to increment their sequence number - - if err != nil { - log.Error().Err(err).Msg("error signing payload") - return - } - } - - log.Trace().Msg("Payer signing tx") - for i := uint(1); i < lg.constExecParams.PayerKeyCount; i++ { - key, err := lg.accounts[i].GetKey() - if err != nil { - log.Error().Err(err).Msg("error getting key") - return - } - - err = tx.SignEnvelope(*key.Address, key.Index, key.Signer) - key.Done() // payers don't need to increment their sequence number - - if err != nil { - log.Error().Err(err).Msg("error signing transaction") - return - } - } - - // calculate RLP-encoded binary size of the transaction without comment - txSizeWithoutComment := uint(len(tx.Encode())) - if txSizeWithoutComment > lg.constExecParams.MaxTxSizeInByte { - log.Error().Msg(fmt.Sprintf("current tx size(%d) without comment "+ - "is larger than max tx size configured(%d)", - txSizeWithoutComment, lg.constExecParams.MaxTxSizeInByte)) - return - } - - // now adding comment to fulfill the final transaction size - commentSizeInByte := lg.constExecParams.MaxTxSizeInByte - txSizeWithoutComment - txScriptWithComment := ConstExecCostTransaction(lg.constExecParams.AuthAccountNum, commentSizeInByte) - tx = tx.SetScript(txScriptWithComment) - - txSizeWithComment := uint(len(tx.Encode())) - log.Trace().Uint("Max Tx Size", lg.constExecParams.MaxTxSizeInByte). - Uint("Actual Tx Size", txSizeWithComment). - Uint("Tx Arg Size", lg.constExecParams.ArgSizeInByte). - Uint("Num of Authorizers", lg.constExecParams.AuthAccountNum). - Uint("Num of payer keys", lg.constExecParams.PayerKeyCount). - Uint("Script comment length", commentSizeInByte). - Msg("Generating one const-exec transaction") - - log.Trace().Msg("Issuing tx") - ch, err := lg.sendTx(workerID, tx) - if err != nil { - log.Error().Err(err).Msg("const-exec tx failed") - return - } - defer proposerKey.IncrementSequenceNumber() - - <-ch - lg.workerStatsTracker.IncTxExecuted() - - log.Trace().Msg("const-exec tx suceeded") -} - -func (lg *ContLoadGenerator) sendTokenTransferTx(workerID int) { - log := lg.log.With().Int("workerID", workerID).Logger() - - log.Trace(). - Int("availableAccounts", len(lg.availableAccounts)). - Msg("getting next available account") - - var acc *account.FlowAccount - - select { - case acc = <-lg.availableAccounts: - default: - log.Error().Msg("next available account channel empty; skipping send") - return - } - defer func() { lg.availableAccounts <- acc }() - nextAcc := lg.accounts[(acc.ID+1)%len(lg.accounts)] - - log.Trace(). - Float64("tokens", tokensPerTransfer). - Hex("srcAddress", acc.Address.Bytes()). - Hex("dstAddress", nextAcc.Address.Bytes()). - Int("srcAccount", acc.ID). - Int("dstAccount", nextAcc.ID). - Msg("creating transfer script") - - transferTx, err := TokenTransferTransaction( - lg.networkParams.FungibleTokenAddress, - lg.networkParams.FlowTokenAddress, - nextAcc.Address, - tokensPerTransfer) - if err != nil { - log.Error().Err(err).Msg("error creating token transfer script") - return - } - - log.Trace().Msg("creating token transfer transaction") - transferTx = transferTx. - SetReferenceBlockID(lg.follower.BlockID()). - SetGasLimit(9999) - - log.Trace().Msg("signing transaction") - - key, err := acc.GetKey() - if err != nil { - log.Error().Err(err).Msg("error getting key") - return - } - defer key.Done() - - err = key.SignTx(transferTx) - if err != nil { - log.Error().Err(err).Msg("error signing transaction") - return - } + t.workerStatsTracker.IncTxSent() + t.loaderMetrics.TransactionSent() + timer := time.NewTimer(t.lostTransactionThreshold) + defer timer.Stop() startTime := time.Now() - ch, err := lg.sendTx(workerID, transferTx) - if err != nil { - return - } - defer key.IncrementSequenceNumber() - - log = log.With().Hex("tx_id", transferTx.ID().Bytes()).Logger() - log.Trace().Msg("transaction sent") - - t := time.NewTimer(lostTransactionThreshold) - defer t.Stop() select { + case <-t.ctx.Done(): + return flowsdk.TransactionResult{}, t.ctx.Err() case result := <-ch: + t.workerStatsTracker.IncTxExecuted() + if result.Error != nil { - lg.workerStatsTracker.IncTxFailed() + t.workerStatsTracker.IncTxFailed() + return result, common.NewTransactionError(result.Error) } - log.Trace(). - Dur("duration", time.Since(startTime)). - Err(result.Error). - Str("status", result.Status.String()). - Msg("transaction confirmed") - case <-t.C: - lg.loaderMetrics.TransactionLost() - log.Warn(). + + return result, nil + case <-timer.C: + t.loaderMetrics.TransactionLost() + t.log.Warn(). Dur("duration", time.Since(startTime)). - Int("availableAccounts", len(lg.availableAccounts)). Msg("transaction lost") - lg.workerStatsTracker.IncTxTimedout() - case <-lg.Done(): - return - } - lg.workerStatsTracker.IncTxExecuted() -} - -// TODO update this to include loadtype -func (lg *ContLoadGenerator) sendFavContractTx(workerID int) { - log := lg.log.With().Int("workerID", workerID).Logger() - log.Trace().Msg("getting next available account") - - acc := <-lg.availableAccounts - defer func() { lg.availableAccounts <- acc }() - var txScript []byte - - switch lg.loadParams.LoadType { - case CompHeavyLoadType: - txScript = ComputationHeavyScript(*lg.favContractAddress) - case EventHeavyLoadType: - txScript = EventHeavyScript(*lg.favContractAddress) - case LedgerHeavyLoadType: - txScript = LedgerHeavyScript(*lg.favContractAddress) - default: - log.Error().Msg("unknown load type") - return - } - - log.Trace().Msg("creating transaction") - tx := flowsdk.NewTransaction(). - SetReferenceBlockID(lg.follower.BlockID()). - SetScript(txScript). - SetGasLimit(9999) - - log.Trace().Msg("signing transaction") - - key, err := acc.GetKey() - if err != nil { - log.Error().Err(err).Msg("error getting key") - return + t.workerStatsTracker.IncTxTimedOut() + return flowsdk.TransactionResult{}, fmt.Errorf("transaction lost") } - defer key.Done() - - err = key.SignTx(tx) - if err != nil { - log.Error().Err(err).Msg("error signing transaction") - return - } - - ch, err := lg.sendTx(workerID, tx) - if err != nil { - return - } - defer key.IncrementSequenceNumber() - <-ch } -func (lg *ContLoadGenerator) sendTx(workerID int, tx *flowsdk.Transaction) (<-chan flowsdk.TransactionResult, error) { - log := lg.log.With().Int("workerID", workerID).Str("tx_id", tx.ID().String()).Logger() - log.Trace().Msg("sending transaction") - - // Add watcher before sending the transaction to avoid race condition - ch := lg.follower.Follow(tx.ID()) - - err := lg.flowClient.SendTransaction(lg.ctx, *tx) - if err != nil { - log.Error().Err(err).Msg("error sending transaction") - return nil, err - } - - lg.workerStatsTracker.IncTxSent() - lg.loaderMetrics.TransactionSent() - return ch, err -} +var _ common.TransactionSender = (*transactionSender)(nil) diff --git a/integration/benchmark/follower.go b/integration/benchmark/follower.go index 933a528622d..0681570b491 100644 --- a/integration/benchmark/follower.go +++ b/integration/benchmark/follower.go @@ -6,14 +6,18 @@ import ( "sync" "time" + "github.com/onflow/flow-go/integration/benchmark/common" + flowsdk "github.com/onflow/flow-go-sdk" "github.com/onflow/flow-go-sdk/access" + "github.com/onflow/flow-go/module/metrics" "github.com/rs/zerolog" ) type TxFollower interface { + common.ReferenceBlockProvider // Follow returns a channel that is closed when the transaction is complete. Follow(ID flowsdk.Identifier) <-chan flowsdk.TransactionResult @@ -27,17 +31,17 @@ type TxFollower interface { Stop() } -type followerOption func(f *txFollowerImpl) +type FollowerOption func(f *txFollowerImpl) -func WithLogger(logger zerolog.Logger) followerOption { +func WithLogger(logger zerolog.Logger) FollowerOption { return func(f *txFollowerImpl) { f.logger = logger } } -func WithInteval(interval time.Duration) followerOption { +func WithInteval(interval time.Duration) FollowerOption { return func(f *txFollowerImpl) { f.interval = interval } } -func WithMetrics(m *metrics.LoaderCollector) followerOption { +func WithMetrics(m *metrics.LoaderCollector) FollowerOption { return func(f *txFollowerImpl) { f.metrics = m } } @@ -72,7 +76,7 @@ type txInfo struct { // NewTxFollower creates a new follower that tracks the current block height // and can notify on transaction completion. -func NewTxFollower(ctx context.Context, client access.Client, opts ...followerOption) (TxFollower, error) { +func NewTxFollower(ctx context.Context, client access.Client, opts ...FollowerOption) (TxFollower, error) { newCtx, cancel := context.WithCancel(ctx) f := &txFollowerImpl{ @@ -282,13 +286,17 @@ func (f *txFollowerImpl) Stop() { f.txToChan = make(map[flowsdk.Identifier]txInfo) } +func (f *txFollowerImpl) ReferenceBlockID() flowsdk.Identifier { + return f.BlockID() +} + type nopTxFollower struct { *txFollowerImpl } // NewNopTxFollower creates a new follower that tracks the current block height and ID // but does not notify on transaction completion. -func NewNopTxFollower(ctx context.Context, client access.Client, opts ...followerOption) (TxFollower, error) { +func NewNopTxFollower(ctx context.Context, client access.Client, opts ...FollowerOption) (TxFollower, error) { f, err := NewTxFollower(ctx, client, opts...) if err != nil { return nil, err diff --git a/integration/benchmark/follower_test.go b/integration/benchmark/follower_test.go index 1b5b942e497..8ae218a6885 100644 --- a/integration/benchmark/follower_test.go +++ b/integration/benchmark/follower_test.go @@ -10,6 +10,7 @@ import ( "github.com/stretchr/testify/require" flowsdk "github.com/onflow/flow-go-sdk" + mockClient "github.com/onflow/flow-go/integration/benchmark/mock" "github.com/onflow/flow-go/utils/unittest" ) diff --git a/integration/benchmark/load/add_keys_load.go b/integration/benchmark/load/add_keys_load.go new file mode 100644 index 00000000000..95325e961cd --- /dev/null +++ b/integration/benchmark/load/add_keys_load.go @@ -0,0 +1,43 @@ +package load + +import ( + "fmt" + + "github.com/rs/zerolog" + + "github.com/onflow/flow-go/integration/benchmark/account" +) + +type AddKeysLoad struct { + numberOfKeysToAdd int +} + +func NewAddKeysLoad() *AddKeysLoad { + return &AddKeysLoad{ + 10, + } +} + +var _ Load = (*AddKeysLoad)(nil) + +func (l *AddKeysLoad) Type() LoadType { + return AddKeysLoadType +} + +func (l *AddKeysLoad) Setup(_ zerolog.Logger, _ LoadContext) error { + return nil +} + +func (l *AddKeysLoad) Load(log zerolog.Logger, lc LoadContext) error { + wrapErr := func(err error) error { + return fmt.Errorf("failed to send load: %w", err) + } + + acc, err := lc.BorrowAvailableAccount() + if err != nil { + return wrapErr(err) + } + defer lc.ReturnAvailableAccount(acc) + + return account.AddKeysToAccount(log, acc, l.numberOfKeysToAdd, lc, lc) +} diff --git a/integration/benchmark/load/common.go b/integration/benchmark/load/common.go new file mode 100644 index 00000000000..4ce4e589fc8 --- /dev/null +++ b/integration/benchmark/load/common.go @@ -0,0 +1,72 @@ +package load + +import ( + "errors" + "fmt" + + "github.com/rs/zerolog" + + flowsdk "github.com/onflow/flow-go-sdk" + + "github.com/onflow/flow-go/integration/benchmark/common" + + "github.com/onflow/flow-go/integration/benchmark/account" +) + +// transactionFunc is a function that creates a transaction. +// It is used by sendSimpleTransaction. +type transactionFunc func( + log zerolog.Logger, + lc LoadContext, + acc *account.FlowAccount, +) (*flowsdk.Transaction, error) + +// sendSimpleTransaction is a helper function for sending a transaction. +// It +// - borrows an account, +// - creates a transaction, +// - sets the reference block ID, +// - sets the proposer and payer and one authorizer (if not already set), +// - signs it with the account, +// - sends the transaction to the network. +// - waits for the transaction result. +func sendSimpleTransaction(log zerolog.Logger, lc LoadContext, txFN transactionFunc) error { + wrapErr := func(err error) error { + return fmt.Errorf("error in send simple transaction: %w", err) + } + + acc, err := lc.BorrowAvailableAccount() + if err != nil { + return wrapErr(err) + } + defer lc.ReturnAvailableAccount(acc) + + tx, err := txFN(log, lc, acc) + if err != nil { + return wrapErr(err) + } + + tx.SetReferenceBlockID(lc.ReferenceBlockID()) + + key, err := acc.GetKey() + if err != nil { + return wrapErr(err) + } + defer key.Done() + + err = key.SetProposerPayerAndSign(tx) + if err != nil { + return wrapErr(err) + } + + _, err = lc.Send(tx) + if err == nil || errors.Is(err, common.TransactionError{}) { + key.IncrementSequenceNumber() + } + if err != nil { + + return wrapErr(err) + } + + return nil +} diff --git a/integration/benchmark/load/create_account_load.go b/integration/benchmark/load/create_account_load.go new file mode 100644 index 00000000000..ce71426b62a --- /dev/null +++ b/integration/benchmark/load/create_account_load.go @@ -0,0 +1,53 @@ +package load + +import ( + "github.com/rs/zerolog" + + flowsdk "github.com/onflow/flow-go-sdk" + + "github.com/onflow/flow-go/integration/benchmark/account" +) + +type CreateAccountLoad struct { +} + +func NewCreateAccountLoad() *CreateAccountLoad { + return &CreateAccountLoad{} +} + +func (c CreateAccountLoad) Type() LoadType { + return CreateAccount +} + +func (c CreateAccountLoad) Setup(zerolog.Logger, LoadContext) error { + // no setup needed + return nil +} + +func (c CreateAccountLoad) Load(log zerolog.Logger, lc LoadContext) error { + + return sendSimpleTransaction( + log, + lc, + func( + log zerolog.Logger, + lc LoadContext, + acc *account.FlowAccount, + ) (*flowsdk.Transaction, error) { + tx := flowsdk.NewTransaction(). + SetScript( + []byte(` + transaction() { + prepare(signer: auth(BorrowValue) &Account) { + Account(payer: signer) + } + }`, + ), + ) + + return tx, nil + + }) +} + +var _ Load = (*CreateAccountLoad)(nil) diff --git a/integration/benchmark/load/evm_batch_load.go b/integration/benchmark/load/evm_batch_load.go new file mode 100644 index 00000000000..146136fa789 --- /dev/null +++ b/integration/benchmark/load/evm_batch_load.go @@ -0,0 +1,443 @@ +package load + +import ( + "bytes" + "context" + "crypto/ecdsa" + "fmt" + "io" + "math/big" + "time" + + gethcommon "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/params" + "github.com/onflow/cadence" + "github.com/rs/zerolog" + "go.uber.org/atomic" + "golang.org/x/sync/errgroup" + + flowsdk "github.com/onflow/flow-go-sdk" + + "github.com/onflow/flow-go/fvm/blueprints" + "github.com/onflow/flow-go/fvm/evm/emulator" + "github.com/onflow/flow-go/fvm/evm/stdlib" + EVMBatchTypes "github.com/onflow/flow-go/fvm/evm/types" + "github.com/onflow/flow-go/fvm/systemcontracts" + "github.com/onflow/flow-go/integration/benchmark/account" + "github.com/onflow/flow-go/module/util" +) + +type EVMBatchTransferLoad struct { + PreCreateEOAAccounts int + TransfersPerTransaction uint64 + + log zerolog.Logger + tokensPerTransfer cadence.UFix64 + + eoaChan chan *eoa + doneChan chan struct{} + + transfers atomic.Uint64 + creations atomic.Uint64 + + bridgedAcountAddress flowsdk.Address +} + +func NewEVMBatchTransferLoad(log zerolog.Logger) *EVMBatchTransferLoad { + load := &EVMBatchTransferLoad{ + log: log.With().Str("component", "EVMBatchTransferLoad").Logger(), + tokensPerTransfer: cadence.UFix64(100), + // really large channel, + // it's going to get filled as needed + eoaChan: make(chan *eoa, 1_000_000), + doneChan: make(chan struct{}), + PreCreateEOAAccounts: 5000, + TransfersPerTransaction: 150, + } + + go load.reportStatus() + + return load +} + +var _ Load = (*EVMBatchTransferLoad)(nil) +var _ io.Closer = (*EVMBatchTransferLoad)(nil) + +func (l *EVMBatchTransferLoad) Close() error { + close(l.doneChan) + return nil +} + +func (l *EVMBatchTransferLoad) reportStatus() { + // report status every 10 seconds until done + for { + select { + case <-l.doneChan: + return + case <-time.After(10 * time.Second): + l.log.Info(). + Uint64("transfers", l.transfers.Load()). + Uint64("creations", l.creations.Load()). + Msg("EVMBatchTransferLoad status report") + } + } + +} + +func (l *EVMBatchTransferLoad) Type() LoadType { + return EVMBatchTransferLoadType +} + +func (l *EVMBatchTransferLoad) Setup(log zerolog.Logger, lc LoadContext) error { + + // create a shared bridged account + err := sendSimpleTransaction( + log, + lc, + func( + log zerolog.Logger, + lc LoadContext, + acc *account.FlowAccount, + ) (*flowsdk.Transaction, error) { + l.bridgedAcountAddress = acc.Address + + sc := systemcontracts.SystemContractsForChain(lc.ChainID) + + contractName := "BridgedAccountContract" + + contract := fmt.Sprintf(` + import EVM from %s + import FlowToken from %s + + access(all) contract BridgedAccountContract { + access(self) var acc: @EVM.CadenceOwnedAccount + + access(all) + fun address() : EVM.EVMAddress { + return self.acc.address() + } + + access(all) + fun call( + to: EVM.EVMAddress, + data: [UInt8], + gasLimit: UInt64, + value: EVM.Balance + ): EVM.Result { + return self.acc.call( + to: to, + data: data, + gasLimit: gasLimit, + value: value + ) + } + + access(all) + fun deposit(from: @FlowToken.Vault) { + self.acc.deposit(from: <-from) + } + + init() { + self.acc <- EVM.createCadenceOwnedAccount() + } + } + `, + sc.EVMContract.Address.HexWithPrefix(), + sc.FlowToken.Address.HexWithPrefix()) + + tx := flowsdk.NewTransaction(). + SetScript(blueprints.DeployContractTransactionTemplate) + + err := tx.AddArgument(cadence.String(contractName)) + if err != nil { + return nil, err + } + err = tx.AddArgument(cadence.String(contract)) + if err != nil { + return nil, err + } + + return tx, nil + }) + if err != nil { + return fmt.Errorf("error creating shared bridged account: %w", err) + } + + // create some EOA ahead of time to get a better result for the benchmark + createEOA := l.PreCreateEOAAccounts + + g, ctx := errgroup.WithContext(context.Background()) + g.SetLimit(lc.Proposer.NumKeys()) + + progress := util.LogProgress(l.log, + util.DefaultLogProgressConfig( + "creating and funding EOC accounts", + createEOA, + )) + + l.log.Info(). + Int("number_of_accounts", createEOA). + Int("number_of_keys", lc.Proposer.NumKeys()). + Msg("creating and funding EOC accounts") + + for i := 0; i < createEOA; i += 1 { + i := i + g.Go(func() error { + select { + case <-ctx.Done(): + return nil + default: + } + defer func() { progress(1) }() + + eoa, err := l.setupTransaction(log, lc) + if err != nil { + return err + } + + if err != nil { + l.log. + Err(err). + Int("index", i). + Msg("error creating EOA accounts") + return err + } + + l.creations.Add(1) + + select { + case l.eoaChan <- eoa: + default: + } + + return nil + }) + } + err = g.Wait() + if err != nil { + return fmt.Errorf("error creating EOC accounts: %w", err) + } + return nil +} + +func (l *EVMBatchTransferLoad) Load(log zerolog.Logger, lc LoadContext) error { + select { + case eoa := <-l.eoaChan: + if eoa == nil { + return nil + } + err := l.transferTransaction(log, lc, eoa) + if err == nil { + eoa.nonce += l.TransfersPerTransaction + } + + l.transfers.Add(1) + + select { + case l.eoaChan <- eoa: + default: + } + + return err + default: + // no eoa available, create a new one + eoa, err := l.setupTransaction(log, lc) + if err != nil { + return err + } + l.creations.Add(1) + + select { + case l.eoaChan <- eoa: + default: + } + + return nil + } +} + +func (l *EVMBatchTransferLoad) setupTransaction( + log zerolog.Logger, + lc LoadContext, +) (*eoa, error) { + eoa := &eoa{} + + privateKey, err := crypto.GenerateKey() + if err != nil { + return nil, err + } + + publicKey := privateKey.Public() + publicKeyECDSA, ok := publicKey.(*ecdsa.PublicKey) + if !ok { + return nil, fmt.Errorf("error casting public key to ECDSA") + } + eoa.pk = privateKey + eoa.adress = crypto.PubkeyToAddress(*publicKeyECDSA) + + addressCadenceBytes := make([]cadence.Value, 20) + for i := range addressCadenceBytes { + addressCadenceBytes[i] = cadence.UInt8(eoa.adress[i]) + } + + eoa.addressArg = cadence.NewArray(addressCadenceBytes).WithType(stdlib.EVMAddressBytesCadenceType) + + err = sendSimpleTransaction( + log, + lc, + func( + log zerolog.Logger, + lc LoadContext, + acc *account.FlowAccount, + ) (*flowsdk.Transaction, error) { + sc := systemcontracts.SystemContractsForChain(lc.ChainID) + + amountArg, err := cadence.NewUFix64("1.0") + if err != nil { + return nil, err + } + + // Fund evm address + txBody := flowsdk.NewTransaction(). + SetScript([]byte(fmt.Sprintf( + ` + import EVM from %s + import FungibleToken from %s + import FlowToken from %s + import BridgedAccountContract from 0x%s + + transaction(address: [UInt8; 20], amount: UFix64) { + let fundVault: @FlowToken.Vault + + prepare(signer: auth(Storage) &Account) { + let vaultRef = signer.storage + .borrow<auth(FungibleToken.Withdraw) &FlowToken.Vault>( + from: /storage/flowTokenVault + ) + ?? panic("Could not borrow reference to the owner's Vault!") + + // 1.0 Flow for the EVM gass fees + self.fundVault <- vaultRef.withdraw(amount: amount+1.0) as! @FlowToken.Vault + } + + execute { + BridgedAccountContract.deposit(from: <-self.fundVault) + let fundAddress = EVM.EVMAddress(bytes: address) + var balance = EVM.Balance(attoflow: 0) + balance.setFLOW(flow: amount) + BridgedAccountContract.call( + to: fundAddress, + data: [], + gasLimit: 21000, + value: balance) + } + } + `, + sc.FlowServiceAccount.Address.HexWithPrefix(), + sc.FungibleToken.Address.HexWithPrefix(), + sc.FlowToken.Address.HexWithPrefix(), + l.bridgedAcountAddress.Hex(), + ))) + + err = txBody.AddArgument(eoa.addressArg) + if err != nil { + return nil, err + } + err = txBody.AddArgument(amountArg) + if err != nil { + return nil, err + } + + return txBody, nil + }) + if err != nil { + return nil, err + } + return eoa, nil +} + +func (l *EVMBatchTransferLoad) transferTransaction( + log zerolog.Logger, + lc LoadContext, + eoa *eoa, +) error { + return sendSimpleTransaction( + log, + lc, + func( + log zerolog.Logger, + lc LoadContext, + acc *account.FlowAccount, + ) (*flowsdk.Transaction, error) { + nonce := eoa.nonce + to := gethcommon.HexToAddress("") + gasPrice := big.NewInt(0) + + oneFlow := cadence.UFix64(100_000_000) + amount := new(big.Int).Div(EVMBatchTypes.OneFlowBalance(), big.NewInt(int64(oneFlow))) + + batchCount := int(l.TransfersPerTransaction) + + txBytes := make([]cadence.Value, batchCount) + for i := 0; i < batchCount; i++ { + + EVMBatchTx := types.NewTx(&types.LegacyTx{Nonce: nonce + uint64(i), To: &to, Value: amount, Gas: params.TxGas, GasPrice: gasPrice, Data: nil}) + + signed, err := types.SignTx(EVMBatchTx, emulator.GetDefaultSigner(), eoa.pk) + if err != nil { + return nil, fmt.Errorf("error signing EVM transaction: %w", err) + } + var encoded bytes.Buffer + err = signed.EncodeRLP(&encoded) + if err != nil { + return nil, fmt.Errorf("error encoding EVM transaction: %w", err) + } + + encodedCadence := make([]cadence.Value, 0) + for _, b := range encoded.Bytes() { + encodedCadence = append(encodedCadence, cadence.UInt8(b)) + } + transactionBytes := cadence.NewArray(encodedCadence).WithType(stdlib.EVMTransactionBytesCadenceType) + + txBytes[i] = transactionBytes + } + + txs := cadence.NewArray(txBytes). + WithType(cadence.NewVariableSizedArrayType( + stdlib.EVMTransactionBytesCadenceType, + )) + + sc := systemcontracts.SystemContractsForChain(lc.ChainID) + txBody := flowsdk.NewTransaction(). + SetScript([]byte(fmt.Sprintf( + ` +import EVM from %s +import FungibleToken from %s +import FlowToken from %s + +transaction(txs: [[UInt8]], address: [UInt8; 20]) { + prepare(signer: &Account){} + execute { + EVM.batchRun(txs: txs, coinbase: EVM.EVMAddress(bytes: address)) + } +} + `, + sc.EVMContract.Address.HexWithPrefix(), + sc.FungibleToken.Address.HexWithPrefix(), + sc.FlowToken.Address.HexWithPrefix(), + ))) + + err := txBody.AddArgument(txs) + if err != nil { + return nil, fmt.Errorf("error adding argument to transaction: %w", err) + } + err = txBody.AddArgument(eoa.addressArg) + if err != nil { + return nil, fmt.Errorf("error adding argument to transaction: %w", err) + } + + return txBody, nil + }) +} diff --git a/integration/benchmark/load/evm_load.go b/integration/benchmark/load/evm_load.go new file mode 100644 index 00000000000..ed592a5773a --- /dev/null +++ b/integration/benchmark/load/evm_load.go @@ -0,0 +1,435 @@ +package load + +import ( + "bytes" + "context" + "crypto/ecdsa" + "fmt" + "io" + "math/big" + "time" + + gethcommon "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/params" + "github.com/onflow/cadence" + "github.com/rs/zerolog" + "go.uber.org/atomic" + "golang.org/x/sync/errgroup" + + flowsdk "github.com/onflow/flow-go-sdk" + + "github.com/onflow/flow-go/fvm/blueprints" + "github.com/onflow/flow-go/fvm/evm/emulator" + "github.com/onflow/flow-go/fvm/evm/stdlib" + evmTypes "github.com/onflow/flow-go/fvm/evm/types" + "github.com/onflow/flow-go/fvm/systemcontracts" + "github.com/onflow/flow-go/integration/benchmark/account" + "github.com/onflow/flow-go/module/util" +) + +// eoa is a struct that represents an evm owned account. +type eoa struct { + addressArg cadence.Value + nonce uint64 + pk *ecdsa.PrivateKey + adress gethcommon.Address +} + +type EVMTransferLoad struct { + PreCreateEOAAccounts int + + log zerolog.Logger + tokensPerTransfer cadence.UFix64 + + eoaChan chan *eoa + doneChan chan struct{} + + transfers atomic.Uint64 + creations atomic.Uint64 + + bridgedAcountAddress flowsdk.Address +} + +func NewEVMTransferLoad(log zerolog.Logger) *EVMTransferLoad { + load := &EVMTransferLoad{ + log: log.With().Str("component", "EVMTransferLoad").Logger(), + tokensPerTransfer: cadence.UFix64(100), + // really large channel, + // it's going to get filled as needed + eoaChan: make(chan *eoa, 1_000_000), + doneChan: make(chan struct{}), + PreCreateEOAAccounts: 5000, + } + + go load.reportStatus() + + return load +} + +var _ Load = (*EVMTransferLoad)(nil) +var _ io.Closer = (*EVMTransferLoad)(nil) + +func (l *EVMTransferLoad) Close() error { + close(l.eoaChan) + close(l.doneChan) + return nil +} + +func (l *EVMTransferLoad) reportStatus() { + // report status every 10 seconds until done + for { + select { + case <-l.doneChan: + return + case <-time.After(10 * time.Second): + l.log.Info(). + Uint64("transfers", l.transfers.Load()). + Uint64("creations", l.creations.Load()). + Msg("EVMTransferLoad status report") + } + } + +} + +func (l *EVMTransferLoad) Type() LoadType { + return EVMTransferLoadType +} + +func (l *EVMTransferLoad) Setup(log zerolog.Logger, lc LoadContext) error { + + // create a shared bridged account + err := sendSimpleTransaction( + log, + lc, + func( + log zerolog.Logger, + lc LoadContext, + acc *account.FlowAccount, + ) (*flowsdk.Transaction, error) { + l.bridgedAcountAddress = acc.Address + + sc := systemcontracts.SystemContractsForChain(lc.ChainID) + + contractName := "BridgedAccountContract" + + contract := fmt.Sprintf(` + import EVM from %s + import FlowToken from %s + + access(all) contract BridgedAccountContract { + access(self) var acc: @EVM.CadenceOwnedAccount + + access(all) + fun address() : EVM.EVMAddress { + return self.acc.address() + } + + access(all) + fun call( + to: EVM.EVMAddress, + data: [UInt8], + gasLimit: UInt64, + value: EVM.Balance + ): EVM.Result { + return self.acc.call( + to: to, + data: data, + gasLimit: gasLimit, + value: value + ) + } + + access(all) + fun deposit(from: @FlowToken.Vault) { + self.acc.deposit(from: <-from) + } + + init() { + self.acc <- EVM.createCadenceOwnedAccount() + } + } + `, + sc.EVMContract.Address.HexWithPrefix(), + sc.FlowToken.Address.HexWithPrefix()) + + tx := flowsdk.NewTransaction(). + SetScript(blueprints.DeployContractTransactionTemplate) + + err := tx.AddArgument(cadence.String(contractName)) + if err != nil { + return nil, err + } + err = tx.AddArgument(cadence.String(contract)) + if err != nil { + return nil, err + } + + return tx, nil + }) + if err != nil { + return fmt.Errorf("error creating shared bridged account: %w", err) + } + + // create some EOA ahead of time to get a better result for the benchmark + createEOA := l.PreCreateEOAAccounts + + g, ctx := errgroup.WithContext(context.Background()) + g.SetLimit(lc.Proposer.NumKeys()) + + progress := util.LogProgress(l.log, + util.DefaultLogProgressConfig( + "creating and funding EOC accounts", + createEOA, + )) + + l.log.Info(). + Int("number_of_accounts", createEOA). + Int("number_of_keys", lc.Proposer.NumKeys()). + Msg("creating and funding EOC accounts") + + for i := 0; i < createEOA; i += 1 { + i := i + g.Go(func() error { + select { + case <-ctx.Done(): + return nil + default: + } + defer func() { progress(1) }() + + eoa, err := l.setupTransaction(log, lc) + if err != nil { + return err + } + + if err != nil { + l.log. + Err(err). + Int("index", i). + Msg("error creating EOA accounts") + return err + } + + l.creations.Add(1) + + select { + case l.eoaChan <- eoa: + default: + } + + return nil + }) + } + err = g.Wait() + if err != nil { + return fmt.Errorf("error creating EOC accounts: %w", err) + } + return nil +} + +func (l *EVMTransferLoad) Load(log zerolog.Logger, lc LoadContext) error { + select { + case eoa := <-l.eoaChan: + if eoa == nil { + return nil + } + err := l.transferTransaction(log, lc, eoa) + if err == nil { + eoa.nonce += 1 + } + + l.transfers.Add(1) + + select { + case l.eoaChan <- eoa: + default: + } + + return err + default: + // no eoa available, create a new one + eoa, err := l.setupTransaction(log, lc) + if err != nil { + return err + } + l.creations.Add(1) + + select { + case l.eoaChan <- eoa: + default: + } + + return nil + } +} + +func (l *EVMTransferLoad) setupTransaction( + log zerolog.Logger, + lc LoadContext, +) (*eoa, error) { + eoa := &eoa{} + + privateKey, err := crypto.GenerateKey() + if err != nil { + return nil, err + } + + publicKey := privateKey.Public() + publicKeyECDSA, ok := publicKey.(*ecdsa.PublicKey) + if !ok { + return nil, fmt.Errorf("error casting public key to ECDSA") + } + eoa.pk = privateKey + eoa.adress = crypto.PubkeyToAddress(*publicKeyECDSA) + + addressCadenceBytes := make([]cadence.Value, 20) + for i := range addressCadenceBytes { + addressCadenceBytes[i] = cadence.UInt8(eoa.adress[i]) + } + + eoa.addressArg = cadence.NewArray(addressCadenceBytes).WithType(stdlib.EVMAddressBytesCadenceType) + + err = sendSimpleTransaction( + log, + lc, + func( + log zerolog.Logger, + lc LoadContext, + acc *account.FlowAccount, + ) (*flowsdk.Transaction, error) { + sc := systemcontracts.SystemContractsForChain(lc.ChainID) + + amountArg, err := cadence.NewUFix64("1.0") + if err != nil { + return nil, err + } + + // Fund evm address + txBody := flowsdk.NewTransaction(). + SetScript([]byte(fmt.Sprintf( + ` + import EVM from %s + import FungibleToken from %s + import FlowToken from %s + import BridgedAccountContract from 0x%s + + transaction(address: [UInt8; 20], amount: UFix64) { + let fundVault: @FlowToken.Vault + + prepare(signer: auth(Storage) &Account) { + let vaultRef = signer.storage + .borrow<auth(FungibleToken.Withdraw) &FlowToken.Vault>( + from: /storage/flowTokenVault + ) + ?? panic("Could not borrow reference to the owner's Vault!") + + // 1.0 Flow for the EVM gass fees + self.fundVault <- vaultRef.withdraw(amount: amount+1.0) as! @FlowToken.Vault + } + + execute { + BridgedAccountContract.deposit(from: <-self.fundVault) + let fundAddress = EVM.EVMAddress(bytes: address) + var balance = EVM.Balance(attoflow: 0) + balance.setFLOW(flow: amount) + BridgedAccountContract.call( + to: fundAddress, + data: [], + gasLimit: 21000, + value: balance) + } + } + `, + sc.FlowServiceAccount.Address.HexWithPrefix(), + sc.FungibleToken.Address.HexWithPrefix(), + sc.FlowToken.Address.HexWithPrefix(), + l.bridgedAcountAddress.Hex(), + ))) + + err = txBody.AddArgument(eoa.addressArg) + if err != nil { + return nil, err + } + err = txBody.AddArgument(amountArg) + if err != nil { + return nil, err + } + + return txBody, nil + }) + if err != nil { + return nil, err + } + return eoa, nil +} + +func (l *EVMTransferLoad) transferTransaction( + log zerolog.Logger, + lc LoadContext, + eoa *eoa, +) error { + return sendSimpleTransaction( + log, + lc, + func( + log zerolog.Logger, + lc LoadContext, + acc *account.FlowAccount, + ) (*flowsdk.Transaction, error) { + nonce := eoa.nonce + to := gethcommon.HexToAddress("") + gasPrice := big.NewInt(0) + + oneFlow := cadence.UFix64(100_000_000) + amount := new(big.Int).Div(evmTypes.OneFlowBalance(), big.NewInt(int64(oneFlow))) + evmTx := types.NewTx(&types.LegacyTx{Nonce: nonce, To: &to, Value: amount, Gas: params.TxGas, GasPrice: gasPrice, Data: nil}) + + signed, err := types.SignTx(evmTx, emulator.GetDefaultSigner(), eoa.pk) + if err != nil { + return nil, fmt.Errorf("error signing EVM transaction: %w", err) + } + var encoded bytes.Buffer + err = signed.EncodeRLP(&encoded) + if err != nil { + return nil, fmt.Errorf("error encoding EVM transaction: %w", err) + } + + encodedCadence := make([]cadence.Value, 0) + for _, b := range encoded.Bytes() { + encodedCadence = append(encodedCadence, cadence.UInt8(b)) + } + transactionBytes := cadence.NewArray(encodedCadence).WithType(stdlib.EVMTransactionBytesCadenceType) + + sc := systemcontracts.SystemContractsForChain(lc.ChainID) + txBody := flowsdk.NewTransaction(). + SetScript([]byte(fmt.Sprintf( + ` +import EVM from %s +import FungibleToken from %s +import FlowToken from %s + +transaction(encodedTx: [UInt8], address: [UInt8; 20]) { + prepare(signer: &Account){} + execute { + EVM.run(tx: encodedTx, coinbase: EVM.EVMAddress(bytes: address)) + } +} + `, + sc.EVMContract.Address.HexWithPrefix(), + sc.FungibleToken.Address.HexWithPrefix(), + sc.FlowToken.Address.HexWithPrefix(), + ))) + + err = txBody.AddArgument(transactionBytes) + if err != nil { + return nil, fmt.Errorf("error adding argument to transaction: %w", err) + } + err = txBody.AddArgument(eoa.addressArg) + if err != nil { + return nil, fmt.Errorf("error adding argument to transaction: %w", err) + } + return txBody, nil + }) +} diff --git a/integration/benchmark/load/load_type.go b/integration/benchmark/load/load_type.go new file mode 100644 index 00000000000..df3580f12eb --- /dev/null +++ b/integration/benchmark/load/load_type.go @@ -0,0 +1,103 @@ +package load + +import ( + _ "embed" + + "github.com/rs/zerolog" + + "github.com/onflow/flow-go/integration/benchmark/account" + "github.com/onflow/flow-go/integration/benchmark/common" + "github.com/onflow/flow-go/integration/benchmark/scripts" + "github.com/onflow/flow-go/model/flow" +) + +type LoadType string + +const ( + CompHeavyLoadType LoadType = "computation-heavy" + EventHeavyLoadType LoadType = "event-heavy" + ExecDataHeavyLoadType LoadType = "exec-data-heavy" + LedgerHeavyLoadType LoadType = "ledger-heavy" + + // TODO: port this load type from old code + // ConstExecCostLoadType LoadType = "const-exec" // for an empty transactions with various tx arguments + + TokenTransferLoadType LoadType = "token-transfer" + TokenTransferMultiLoadType LoadType = "token-transfer-multi" + AddKeysLoadType LoadType = "add-keys" + EVMTransferLoadType LoadType = "evm-transfer" + CreateAccount LoadType = "create-account" + EVMBatchTransferLoadType LoadType = "evm-batch-transfer" +) + +type LoadContext struct { + ChainID flow.ChainID + WorkerContext + account.AccountProvider + common.TransactionSender + common.ReferenceBlockProvider + Proposer *account.FlowAccount +} + +type WorkerContext struct { + WorkerID int +} + +type Load interface { + Type() LoadType + // Setup is called once before the load starts. + Setup(log zerolog.Logger, lc LoadContext) error + // Load is called repeatedly from multiple goroutines. + Load(log zerolog.Logger, lc LoadContext) error +} + +var CompHeavyLoad = NewSimpleLoadType( + CompHeavyLoadType, + "ComputationHeavy", + scripts.ComputationHeavyContractTemplate, + scripts.ComputationHeavyScriptTemplate) + +var EventHeavyLoad = NewSimpleLoadType( + EventHeavyLoadType, + "EventHeavy", + scripts.EventHeavyContractTemplate, + scripts.EventHeavyScriptTemplate) + +var LedgerHeavyLoad = NewSimpleLoadType( + LedgerHeavyLoadType, + "LedgerHeavy", + scripts.LedgerHeavyContractTemplate, + scripts.LedgerHeavyScriptTemplate) + +var ExecDataHeavyLoad = NewSimpleLoadType( + ExecDataHeavyLoadType, + "DataHeavy", + scripts.DataHeavyContractTemplate, + scripts.DataHeavyScriptTemplate) + +func CreateLoadType(log zerolog.Logger, t LoadType) Load { + switch t { + case CompHeavyLoadType: + return CompHeavyLoad + case EventHeavyLoadType: + return EventHeavyLoad + case LedgerHeavyLoadType: + return LedgerHeavyLoad + case ExecDataHeavyLoadType: + return ExecDataHeavyLoad + case TokenTransferLoadType: + return NewTokenTransferLoad() + case TokenTransferMultiLoadType: + return NewTokenTransferMultiLoad() + case AddKeysLoadType: + return NewAddKeysLoad() + case EVMTransferLoadType: + return NewEVMTransferLoad(log) + case EVMBatchTransferLoadType: + return NewEVMBatchTransferLoad(log) + case CreateAccount: + return NewCreateAccountLoad() + default: + panic("unknown load type") + } +} diff --git a/integration/benchmark/load/load_type_test.go b/integration/benchmark/load/load_type_test.go new file mode 100644 index 00000000000..d40385c87b6 --- /dev/null +++ b/integration/benchmark/load/load_type_test.go @@ -0,0 +1,348 @@ +package load_test + +import ( + "context" + "fmt" + "sync" + "testing" + + "github.com/onflow/cadence" + "github.com/onflow/cadence/encoding/ccf" + "github.com/rs/zerolog" + "github.com/stretchr/testify/require" + + sdk "github.com/onflow/flow-go-sdk" + "github.com/onflow/flow-go-sdk/crypto" + + cadenceCommon "github.com/onflow/cadence/common" + + "github.com/onflow/flow-go/engine/execution/computation" + "github.com/onflow/flow-go/engine/execution/testutil" + "github.com/onflow/flow-go/fvm" + "github.com/onflow/flow-go/fvm/environment" + envMock "github.com/onflow/flow-go/fvm/environment/mock" + "github.com/onflow/flow-go/fvm/meter" + "github.com/onflow/flow-go/fvm/storage/snapshot" + "github.com/onflow/flow-go/integration/benchmark/account" + "github.com/onflow/flow-go/integration/benchmark/common" + "github.com/onflow/flow-go/integration/benchmark/load" + "github.com/onflow/flow-go/integration/convert" + "github.com/onflow/flow-go/integration/internal/emulator" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/utils/unittest" +) + +func TestLoadTypes(t *testing.T) { + + log := zerolog.New(zerolog.NewTestWriter(t)) + + evmLoad := load.NewEVMTransferLoad(log) + // don't create that many accounts for the test + evmLoad.PreCreateEOAAccounts = 20 + + evmBatchLoad := load.NewEVMBatchTransferLoad(log) + // don't create that many accounts for the test + evmBatchLoad.PreCreateEOAAccounts = 20 + + loads := []load.Load{ + load.CompHeavyLoad, + load.EventHeavyLoad, + load.LedgerHeavyLoad, + load.ExecDataHeavyLoad, + load.NewTokenTransferLoad(), + load.NewTokenTransferMultiLoad(), + load.NewAddKeysLoad(), + evmLoad, + evmBatchLoad, + load.NewCreateAccountLoad(), + } + + for _, l := range loads { + t.Run(string(l.Type()), testLoad(log, l)) + } +} + +func testLoad(log zerolog.Logger, l load.Load) func(t *testing.T) { + + return func(t *testing.T) { + + chain := flow.Benchnet.Chain() + + vm, ctx, snapshotTree := bootstrapVM(t, chain) + testSnapshotTree := &testSnapshotTree{snapshot: snapshotTree} + + blockProvider := noopReferenceBlockProvider{} + transactionSender := &testTransactionSender{ + t: t, + log: log.With().Str("component", "testTransactionSender").Logger(), + vm: vm, + ctx: ctx, + snapshot: testSnapshotTree, + } + accountLoader := &TestAccountLoader{ + ctx: ctx, + vm: vm, + snapshot: testSnapshotTree, + } + + serviceAccount, err := accountLoader.Load(sdk.ServiceAddress(sdk.ChainID(chain.ChainID())), unittest.ServiceAccountPrivateKey.PrivateKey, unittest.ServiceAccountPrivateKey.HashAlgo) + require.NoError(t, err) + + err = account.EnsureAccountHasKeys(log, serviceAccount, 50, blockProvider, transactionSender) + require.NoError(t, err) + + err = account.ReloadAccount(accountLoader, serviceAccount) + require.NoError(t, err) + + accountProvider, err := account.SetupProvider( + log, + context.Background(), + 100, + 10_000_000_000, + blockProvider, + serviceAccount, + transactionSender, + chain, + ) + require.NoError(t, err) + + lc := load.LoadContext{ + ChainID: chain.ChainID(), + AccountProvider: accountProvider, + ReferenceBlockProvider: blockProvider, + TransactionSender: transactionSender, + WorkerContext: load.WorkerContext{ + WorkerID: 0, + }, + Proposer: serviceAccount, + } + + err = l.Setup(log, lc) + require.NoError(t, err) + for i := 0; i < 100; i++ { + err = l.Load(log, lc) + require.NoError(t, err) + } + } +} + +func bootstrapVM(t *testing.T, chain flow.Chain) (*fvm.VirtualMachine, fvm.Context, snapshot.SnapshotTree) { + source := testutil.EntropyProviderFixture(nil) + + blocks := new(envMock.Blocks) + block1 := unittest.BlockFixture() + blocks.On("ByHeightFrom", + block1.Height, + block1.ToHeader(), + ).Return(block1.ToHeader(), nil) + + opts := computation.DefaultFVMOptions( + chain.ChainID(), + false, + false, + ) + opts = append(opts, + fvm.WithTransactionFeesEnabled(true), + fvm.WithAccountStorageLimit(true), + fvm.WithContractDeploymentRestricted(false), + fvm.WithEntropyProvider(source), + fvm.WithBlocks(blocks), + fvm.WithBlockHeader(block1.ToHeader()), + ) + + ctx := fvm.NewContext(opts...) + + vm := fvm.NewVirtualMachine() + snapshotTree := snapshot.NewSnapshotTree(nil) + bootstrapOpts := []fvm.BootstrapProcedureOption{ + fvm.WithInitialTokenSupply(unittest.GenesisTokenSupply), + fvm.WithAccountCreationFee(fvm.DefaultAccountCreationFee), + fvm.WithMinimumStorageReservation(fvm.DefaultMinimumStorageReservation), + fvm.WithTransactionFee(fvm.DefaultTransactionFees), + fvm.WithStorageMBPerFLOW(fvm.DefaultStorageMBPerFLOW), + } + + executionSnapshot, _, err := vm.Run( + ctx, + fvm.Bootstrap(unittest.ServiceAccountPublicKey, bootstrapOpts...), + snapshotTree) + require.NoError(t, err) + snapshotTree = snapshotTree.Append(executionSnapshot) + + return vm, ctx, snapshotTree +} + +type noopReferenceBlockProvider struct{} + +func (n noopReferenceBlockProvider) ReferenceBlockID() sdk.Identifier { + return sdk.EmptyID +} + +var _ common.ReferenceBlockProvider = noopReferenceBlockProvider{} + +type testTransactionSender struct { + t *testing.T + log zerolog.Logger + vm *fvm.VirtualMachine + ctx fvm.Context + snapshot *testSnapshotTree +} + +var _ common.TransactionSender = (*testTransactionSender)(nil) + +func (t *testTransactionSender) Send(tx *sdk.Transaction) (sdk.TransactionResult, error) { + txBodyBuilder := + flow.NewTransactionBodyBuilder(). + SetScript(tx.Script). + SetReferenceBlockID(convert.IDFromSDK(tx.ReferenceBlockID)). + SetComputeLimit(tx.GasLimit). + SetProposalKey( + flow.BytesToAddress(tx.ProposalKey.Address.Bytes()), + tx.ProposalKey.KeyIndex, + tx.ProposalKey.SequenceNumber, + ). + SetPayer(flow.BytesToAddress(tx.Payer.Bytes())) + + for _, auth := range tx.Authorizers { + txBodyBuilder.AddAuthorizer(flow.BytesToAddress(auth.Bytes())) + } + for _, arg := range tx.Arguments { + txBodyBuilder.AddArgument(arg) + } + for _, sig := range tx.PayloadSignatures { + txBodyBuilder.AddPayloadSignatureWithExtensionData( + flow.BytesToAddress(sig.Address.Bytes()), + sig.KeyIndex, + sig.Signature, + sig.ExtensionData, + ) + } + for _, sig := range tx.EnvelopeSignatures { + txBodyBuilder.AddEnvelopeSignatureWithExtensionData( + flow.BytesToAddress(sig.Address.Bytes()), + sig.KeyIndex, + sig.Signature, + sig.ExtensionData, + ) + } + + txBody, err := txBodyBuilder.Build() + if err != nil { + return sdk.TransactionResult{}, fmt.Errorf("failed to build transaction body: %w", err) + } + + require.Equal(t.t, string(tx.PayloadMessage()), string(txBody.PayloadMessage())) + require.Equal(t.t, string(tx.EnvelopeMessage()), string(txBody.EnvelopeMessage())) + + proc := fvm.Transaction(txBody, 0) + + t.snapshot.Lock() + defer t.snapshot.Unlock() + + executionSnapshot, result, err := t.vm.Run(t.ctx, proc, t.snapshot) + if err != nil { + return sdk.TransactionResult{}, err + } + // Update the snapshot + t.snapshot.Append(executionSnapshot) + + // temporarily hardcode the weights as they are not confirmed yet + executionEffortWeights := meter.ExecutionEffortWeights{ + cadenceCommon.ComputationKindStatement: 314, + cadenceCommon.ComputationKindLoop: 314, + cadenceCommon.ComputationKindFunctionInvocation: 314, + environment.ComputationKindGetValue: 162, + environment.ComputationKindCreateAccount: 567534, + environment.ComputationKindSetValue: 153, + environment.ComputationKindEVMGasUsage: 13, + } + + computationUsed := executionEffortWeights.ComputationFromIntensities(result.ComputationIntensities) + t.log.Debug().Uint64("computation", computationUsed).Msg("Transaction applied") + + sdkResult := sdk.TransactionResult{ + Status: sdk.TransactionStatusSealed, + Error: result.Err, + BlockID: sdk.EmptyID, + BlockHeight: 0, + TransactionID: emulator.FlowIdentifierToSDK(txBody.ID()), + CollectionID: sdk.EmptyID, + } + + for _, event := range result.Events { + decoded, err := ccf.Decode(nil, event.Payload) + if err != nil { + return sdkResult, fmt.Errorf("error decoding event payload: %w", err) + } + + sdkResult.Events = append(sdkResult.Events, sdk.Event{ + Type: string(event.Type), + TransactionID: sdk.Identifier{}, + TransactionIndex: 0, + EventIndex: int(event.EventIndex), + Value: decoded.(cadence.Event), + Payload: event.Payload, + }) + } + + if result.Err != nil { + return sdkResult, common.NewTransactionError(result.Err) + } + + return sdkResult, nil +} + +type TestAccountLoader struct { + ctx fvm.Context + vm *fvm.VirtualMachine + snapshot *testSnapshotTree +} + +var _ account.Loader = (*TestAccountLoader)(nil) + +func (t *TestAccountLoader) Load( + address sdk.Address, + privateKey crypto.PrivateKey, + hashAlgo crypto.HashAlgorithm) (*account.FlowAccount, error) { + wrapErr := func(err error) error { + return fmt.Errorf("error while loading account: %w", err) + } + + t.snapshot.Lock() + defer t.snapshot.Unlock() + + acc, err := fvm.GetAccount(t.ctx, flow.ConvertAddress(address), t.snapshot) + if err != nil { + return nil, wrapErr(err) + } + + keys := make([]sdk.AccountKey, 0, len(acc.Keys)) + for _, key := range acc.Keys { + keys = append(keys, sdk.AccountKey{ + Index: key.Index, + PublicKey: key.PublicKey, + SigAlgo: key.SignAlgo, + HashAlgo: key.HashAlgo, + Weight: key.Weight, + SequenceNumber: key.SeqNumber, + Revoked: key.Revoked, + }) + } + + return account.New(address, privateKey, hashAlgo, keys) +} + +type testSnapshotTree struct { + snapshot snapshot.SnapshotTree + sync.Mutex +} + +func (t *testSnapshotTree) Get(id flow.RegisterID) (flow.RegisterValue, error) { + return t.snapshot.Get(id) +} + +var _ snapshot.StorageSnapshot = (*testSnapshotTree)(nil) + +func (t *testSnapshotTree) Append(snapshot *snapshot.ExecutionSnapshot) { + t.snapshot = t.snapshot.Append(snapshot) +} diff --git a/integration/benchmark/load/simple_load.go b/integration/benchmark/load/simple_load.go new file mode 100644 index 00000000000..b1b05cf9005 --- /dev/null +++ b/integration/benchmark/load/simple_load.go @@ -0,0 +1,96 @@ +package load + +import ( + "fmt" + + "github.com/onflow/cadence" + "github.com/rs/zerolog" + + flowsdk "github.com/onflow/flow-go-sdk" + + "github.com/onflow/flow-go/fvm/blueprints" + "github.com/onflow/flow-go/integration/benchmark/account" +) + +// SimpleLoad is a load that at setup deploys a contract, +// and at load sends a transaction using that contract. +type SimpleLoad struct { + loadType LoadType + contractName string + contractTemplate string + scriptTemplate string + + contractAddress flowsdk.Address +} + +var _ Load = (*SimpleLoad)(nil) + +// NewSimpleLoadType creates a new SimpleLoad. +// - loadType is the type of the load. +// - contractName is the name of the contract. +// - contractTemplate is the template of the contract. +// - scriptTemplate is the template of the script. It should contain a %s placeholder for +// the contract address. +func NewSimpleLoadType( + loadType LoadType, + contractName string, + contractTemplate string, + scriptTemplate string, +) *SimpleLoad { + return &SimpleLoad{ + loadType: loadType, + contractName: contractName, + contractTemplate: contractTemplate, + scriptTemplate: scriptTemplate, + } +} + +func (l *SimpleLoad) Type() LoadType { + return l.loadType +} + +func (l *SimpleLoad) Setup(log zerolog.Logger, lc LoadContext) error { + return sendSimpleTransaction( + log, + lc, + func( + log zerolog.Logger, + lc LoadContext, + acc *account.FlowAccount, + ) (*flowsdk.Transaction, error) { + // this is going to be the contract address + l.contractAddress = acc.Address + + deploymentTx := flowsdk.NewTransaction(). + SetReferenceBlockID(lc.ReferenceBlockID()). + SetScript(blueprints.DeployContractTransactionTemplate) + + err := deploymentTx.AddArgument(cadence.String(l.contractName)) + if err != nil { + return nil, err + } + err = deploymentTx.AddArgument(cadence.String(l.contractTemplate)) + if err != nil { + return nil, err + } + + return deploymentTx, nil + }, + ) +} + +func (l *SimpleLoad) Load(log zerolog.Logger, lc LoadContext) error { + return sendSimpleTransaction( + log, + lc, + func( + log zerolog.Logger, + lc LoadContext, + acc *account.FlowAccount, + ) (*flowsdk.Transaction, error) { + txScript := fmt.Sprintf(l.scriptTemplate, l.contractAddress) + return flowsdk.NewTransaction(). + SetScript([]byte(txScript)), nil + }, + ) +} diff --git a/integration/benchmark/load/token_transfer_load.go b/integration/benchmark/load/token_transfer_load.go new file mode 100644 index 00000000000..703b013900b --- /dev/null +++ b/integration/benchmark/load/token_transfer_load.go @@ -0,0 +1,73 @@ +package load + +import ( + "github.com/onflow/cadence" + "github.com/rs/zerolog" + + flowsdk "github.com/onflow/flow-go-sdk" + + "github.com/onflow/flow-go/fvm/errors" + "github.com/onflow/flow-go/fvm/systemcontracts" + "github.com/onflow/flow-go/integration/benchmark/account" + "github.com/onflow/flow-go/integration/benchmark/scripts" + "github.com/onflow/flow-go/model/flow" +) + +type TokenTransferLoad struct { + tokensPerTransfer cadence.UFix64 +} + +func NewTokenTransferLoad() *TokenTransferLoad { + return &TokenTransferLoad{ + tokensPerTransfer: cadence.UFix64(100), + } +} + +var _ Load = (*TokenTransferLoad)(nil) + +func (l *TokenTransferLoad) Type() LoadType { + return TokenTransferLoadType +} + +func (l *TokenTransferLoad) Setup(_ zerolog.Logger, _ LoadContext) error { + return nil +} + +func (l *TokenTransferLoad) Load(log zerolog.Logger, lc LoadContext) error { + return sendSimpleTransaction( + log, + lc, + func( + log zerolog.Logger, + lc LoadContext, + acc *account.FlowAccount, + ) (*flowsdk.Transaction, error) { + sc := systemcontracts.SystemContractsForChain(lc.ChainID) + + // get another account to send tokens to + var destinationAddress flow.Address + acc2, err := lc.BorrowAvailableAccount() + if err != nil { + if !errors.Is(err, account.ErrNoAccountsAvailable) { + return nil, err + } + // if no accounts are available, just send to the service account + destinationAddress = sc.FlowServiceAccount.Address + } else { + destinationAddress = flow.ConvertAddress(acc2.Address) + lc.ReturnAvailableAccount(acc2) + } + + transferTx, err := scripts.TokenTransferTransaction( + sc.FungibleToken.Address, + sc.FlowToken.Address, + destinationAddress, + l.tokensPerTransfer) + if err != nil { + return nil, err + } + + return transferTx, nil + }, + ) +} diff --git a/integration/benchmark/load/token_transfer_multiple_load.go b/integration/benchmark/load/token_transfer_multiple_load.go new file mode 100644 index 00000000000..05bd4d6ca5b --- /dev/null +++ b/integration/benchmark/load/token_transfer_multiple_load.go @@ -0,0 +1,94 @@ +package load + +import ( + "sync" + + "github.com/onflow/cadence" + "github.com/rs/zerolog" + + "github.com/onflow/flow-go/model/flow" + + flowsdk "github.com/onflow/flow-go-sdk" + + "github.com/onflow/flow-go/fvm/systemcontracts" + "github.com/onflow/flow-go/integration/benchmark/account" + "github.com/onflow/flow-go/integration/benchmark/scripts" +) + +type TokenTransferMultiLoad struct { + tokensPerTransfer cadence.UFix64 + accountsPerTransfer uint + + destinationAddresses []flow.Address + setupDestinationAddresses sync.Once +} + +func NewTokenTransferMultiLoad() *TokenTransferMultiLoad { + return &TokenTransferMultiLoad{ + tokensPerTransfer: cadence.UFix64(100), + accountsPerTransfer: 100, + } +} + +var _ Load = (*TokenTransferMultiLoad)(nil) + +func (l *TokenTransferMultiLoad) Type() LoadType { + return TokenTransferMultiLoadType +} + +func (l *TokenTransferMultiLoad) Setup(_ zerolog.Logger, _ LoadContext) error { + return nil +} + +func (l *TokenTransferMultiLoad) Load(log zerolog.Logger, lc LoadContext) error { + return sendSimpleTransaction( + log, + lc, + func( + log zerolog.Logger, + lc LoadContext, + acc *account.FlowAccount, + ) (*flowsdk.Transaction, error) { + sc := systemcontracts.SystemContractsForChain(lc.ChainID) + + // we only need to get the destination addresses once + l.setupDestinationAddresses.Do(func() { + destinationSDKAddresses, err := lc.GetAddresses(l.accountsPerTransfer) + if err != nil { + log.Warn().Err(err).Msg("failed to get destination addresses") + l.destinationAddresses = make([]flow.Address, l.accountsPerTransfer) + for i := 0; uint(i) < l.accountsPerTransfer; i++ { + l.destinationAddresses[i] = sc.FlowServiceAccount.Address + } + } + + l.destinationAddresses = apply( + destinationSDKAddresses, + func(a flowsdk.Address) flow.Address { + return flow.ConvertAddress(a) + }) + }) + // get another account to send tokens to + + transferTx, err := scripts.TokenTransferMultiTransaction( + sc.FungibleToken.Address, + sc.FlowToken.Address, + l.destinationAddresses, + l.tokensPerTransfer) + if err != nil { + return nil, err + } + + return transferTx, nil + }, + ) +} + +func apply[I any, O any](input []I, transform func(I) O) []O { + output := make([]O, 0, len(input)) + for _, i := range input { + output = append(output, transform(i)) + } + return output + +} diff --git a/integration/benchmark/mock/client.go b/integration/benchmark/mock/client.go index c8b6e6797d8..718c0d70cd0 100644 --- a/integration/benchmark/mock/client.go +++ b/integration/benchmark/mock/client.go @@ -1,11 +1,12 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mock import ( - context "context" - cadence "github.com/onflow/cadence" + access "github.com/onflow/flow-go-sdk/access" + + context "context" flow "github.com/onflow/flow-go-sdk" @@ -17,10 +18,14 @@ type Client struct { mock.Mock } -// Close provides a mock function with given fields: +// Close provides a mock function with no fields func (_m *Client) Close() error { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Close") + } + var r0 error if rf, ok := ret.Get(0).(func() error); ok { r0 = rf() @@ -35,6 +40,10 @@ func (_m *Client) Close() error { func (_m *Client) ExecuteScriptAtBlockHeight(ctx context.Context, height uint64, script []byte, arguments []cadence.Value) (cadence.Value, error) { ret := _m.Called(ctx, height, script, arguments) + if len(ret) == 0 { + panic("no return value specified for ExecuteScriptAtBlockHeight") + } + var r0 cadence.Value var r1 error if rf, ok := ret.Get(0).(func(context.Context, uint64, []byte, []cadence.Value) (cadence.Value, error)); ok { @@ -61,6 +70,10 @@ func (_m *Client) ExecuteScriptAtBlockHeight(ctx context.Context, height uint64, func (_m *Client) ExecuteScriptAtBlockID(ctx context.Context, blockID flow.Identifier, script []byte, arguments []cadence.Value) (cadence.Value, error) { ret := _m.Called(ctx, blockID, script, arguments) + if len(ret) == 0 { + panic("no return value specified for ExecuteScriptAtBlockID") + } + var r0 cadence.Value var r1 error if rf, ok := ret.Get(0).(func(context.Context, flow.Identifier, []byte, []cadence.Value) (cadence.Value, error)); ok { @@ -87,6 +100,10 @@ func (_m *Client) ExecuteScriptAtBlockID(ctx context.Context, blockID flow.Ident func (_m *Client) ExecuteScriptAtLatestBlock(ctx context.Context, script []byte, arguments []cadence.Value) (cadence.Value, error) { ret := _m.Called(ctx, script, arguments) + if len(ret) == 0 { + panic("no return value specified for ExecuteScriptAtLatestBlock") + } + var r0 cadence.Value var r1 error if rf, ok := ret.Get(0).(func(context.Context, []byte, []cadence.Value) (cadence.Value, error)); ok { @@ -113,6 +130,10 @@ func (_m *Client) ExecuteScriptAtLatestBlock(ctx context.Context, script []byte, func (_m *Client) GetAccount(ctx context.Context, address flow.Address) (*flow.Account, error) { ret := _m.Called(ctx, address) + if len(ret) == 0 { + panic("no return value specified for GetAccount") + } + var r0 *flow.Account var r1 error if rf, ok := ret.Get(0).(func(context.Context, flow.Address) (*flow.Account, error)); ok { @@ -139,6 +160,10 @@ func (_m *Client) GetAccount(ctx context.Context, address flow.Address) (*flow.A func (_m *Client) GetAccountAtBlockHeight(ctx context.Context, address flow.Address, blockHeight uint64) (*flow.Account, error) { ret := _m.Called(ctx, address, blockHeight) + if len(ret) == 0 { + panic("no return value specified for GetAccountAtBlockHeight") + } + var r0 *flow.Account var r1 error if rf, ok := ret.Get(0).(func(context.Context, flow.Address, uint64) (*flow.Account, error)); ok { @@ -165,6 +190,10 @@ func (_m *Client) GetAccountAtBlockHeight(ctx context.Context, address flow.Addr func (_m *Client) GetAccountAtLatestBlock(ctx context.Context, address flow.Address) (*flow.Account, error) { ret := _m.Called(ctx, address) + if len(ret) == 0 { + panic("no return value specified for GetAccountAtLatestBlock") + } + var r0 *flow.Account var r1 error if rf, ok := ret.Get(0).(func(context.Context, flow.Address) (*flow.Account, error)); ok { @@ -187,10 +216,190 @@ func (_m *Client) GetAccountAtLatestBlock(ctx context.Context, address flow.Addr return r0, r1 } +// GetAccountBalanceAtBlockHeight provides a mock function with given fields: ctx, address, blockHeight +func (_m *Client) GetAccountBalanceAtBlockHeight(ctx context.Context, address flow.Address, blockHeight uint64) (uint64, error) { + ret := _m.Called(ctx, address, blockHeight) + + if len(ret) == 0 { + panic("no return value specified for GetAccountBalanceAtBlockHeight") + } + + var r0 uint64 + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, flow.Address, uint64) (uint64, error)); ok { + return rf(ctx, address, blockHeight) + } + if rf, ok := ret.Get(0).(func(context.Context, flow.Address, uint64) uint64); ok { + r0 = rf(ctx, address, blockHeight) + } else { + r0 = ret.Get(0).(uint64) + } + + if rf, ok := ret.Get(1).(func(context.Context, flow.Address, uint64) error); ok { + r1 = rf(ctx, address, blockHeight) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetAccountBalanceAtLatestBlock provides a mock function with given fields: ctx, address +func (_m *Client) GetAccountBalanceAtLatestBlock(ctx context.Context, address flow.Address) (uint64, error) { + ret := _m.Called(ctx, address) + + if len(ret) == 0 { + panic("no return value specified for GetAccountBalanceAtLatestBlock") + } + + var r0 uint64 + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, flow.Address) (uint64, error)); ok { + return rf(ctx, address) + } + if rf, ok := ret.Get(0).(func(context.Context, flow.Address) uint64); ok { + r0 = rf(ctx, address) + } else { + r0 = ret.Get(0).(uint64) + } + + if rf, ok := ret.Get(1).(func(context.Context, flow.Address) error); ok { + r1 = rf(ctx, address) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetAccountKeyAtBlockHeight provides a mock function with given fields: ctx, address, keyIndex, height +func (_m *Client) GetAccountKeyAtBlockHeight(ctx context.Context, address flow.Address, keyIndex uint32, height uint64) (*flow.AccountKey, error) { + ret := _m.Called(ctx, address, keyIndex, height) + + if len(ret) == 0 { + panic("no return value specified for GetAccountKeyAtBlockHeight") + } + + var r0 *flow.AccountKey + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, flow.Address, uint32, uint64) (*flow.AccountKey, error)); ok { + return rf(ctx, address, keyIndex, height) + } + if rf, ok := ret.Get(0).(func(context.Context, flow.Address, uint32, uint64) *flow.AccountKey); ok { + r0 = rf(ctx, address, keyIndex, height) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*flow.AccountKey) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, flow.Address, uint32, uint64) error); ok { + r1 = rf(ctx, address, keyIndex, height) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetAccountKeyAtLatestBlock provides a mock function with given fields: ctx, address, keyIndex +func (_m *Client) GetAccountKeyAtLatestBlock(ctx context.Context, address flow.Address, keyIndex uint32) (*flow.AccountKey, error) { + ret := _m.Called(ctx, address, keyIndex) + + if len(ret) == 0 { + panic("no return value specified for GetAccountKeyAtLatestBlock") + } + + var r0 *flow.AccountKey + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, flow.Address, uint32) (*flow.AccountKey, error)); ok { + return rf(ctx, address, keyIndex) + } + if rf, ok := ret.Get(0).(func(context.Context, flow.Address, uint32) *flow.AccountKey); ok { + r0 = rf(ctx, address, keyIndex) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*flow.AccountKey) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, flow.Address, uint32) error); ok { + r1 = rf(ctx, address, keyIndex) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetAccountKeysAtBlockHeight provides a mock function with given fields: ctx, address, height +func (_m *Client) GetAccountKeysAtBlockHeight(ctx context.Context, address flow.Address, height uint64) ([]*flow.AccountKey, error) { + ret := _m.Called(ctx, address, height) + + if len(ret) == 0 { + panic("no return value specified for GetAccountKeysAtBlockHeight") + } + + var r0 []*flow.AccountKey + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, flow.Address, uint64) ([]*flow.AccountKey, error)); ok { + return rf(ctx, address, height) + } + if rf, ok := ret.Get(0).(func(context.Context, flow.Address, uint64) []*flow.AccountKey); ok { + r0 = rf(ctx, address, height) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]*flow.AccountKey) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, flow.Address, uint64) error); ok { + r1 = rf(ctx, address, height) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetAccountKeysAtLatestBlock provides a mock function with given fields: ctx, address +func (_m *Client) GetAccountKeysAtLatestBlock(ctx context.Context, address flow.Address) ([]*flow.AccountKey, error) { + ret := _m.Called(ctx, address) + + if len(ret) == 0 { + panic("no return value specified for GetAccountKeysAtLatestBlock") + } + + var r0 []*flow.AccountKey + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, flow.Address) ([]*flow.AccountKey, error)); ok { + return rf(ctx, address) + } + if rf, ok := ret.Get(0).(func(context.Context, flow.Address) []*flow.AccountKey); ok { + r0 = rf(ctx, address) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]*flow.AccountKey) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, flow.Address) error); ok { + r1 = rf(ctx, address) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + // GetBlockByHeight provides a mock function with given fields: ctx, height func (_m *Client) GetBlockByHeight(ctx context.Context, height uint64) (*flow.Block, error) { ret := _m.Called(ctx, height) + if len(ret) == 0 { + panic("no return value specified for GetBlockByHeight") + } + var r0 *flow.Block var r1 error if rf, ok := ret.Get(0).(func(context.Context, uint64) (*flow.Block, error)); ok { @@ -217,6 +426,10 @@ func (_m *Client) GetBlockByHeight(ctx context.Context, height uint64) (*flow.Bl func (_m *Client) GetBlockByID(ctx context.Context, blockID flow.Identifier) (*flow.Block, error) { ret := _m.Called(ctx, blockID) + if len(ret) == 0 { + panic("no return value specified for GetBlockByID") + } + var r0 *flow.Block var r1 error if rf, ok := ret.Get(0).(func(context.Context, flow.Identifier) (*flow.Block, error)); ok { @@ -243,6 +456,10 @@ func (_m *Client) GetBlockByID(ctx context.Context, blockID flow.Identifier) (*f func (_m *Client) GetBlockHeaderByHeight(ctx context.Context, height uint64) (*flow.BlockHeader, error) { ret := _m.Called(ctx, height) + if len(ret) == 0 { + panic("no return value specified for GetBlockHeaderByHeight") + } + var r0 *flow.BlockHeader var r1 error if rf, ok := ret.Get(0).(func(context.Context, uint64) (*flow.BlockHeader, error)); ok { @@ -269,6 +486,10 @@ func (_m *Client) GetBlockHeaderByHeight(ctx context.Context, height uint64) (*f func (_m *Client) GetBlockHeaderByID(ctx context.Context, blockID flow.Identifier) (*flow.BlockHeader, error) { ret := _m.Called(ctx, blockID) + if len(ret) == 0 { + panic("no return value specified for GetBlockHeaderByID") + } + var r0 *flow.BlockHeader var r1 error if rf, ok := ret.Get(0).(func(context.Context, flow.Identifier) (*flow.BlockHeader, error)); ok { @@ -295,6 +516,10 @@ func (_m *Client) GetBlockHeaderByID(ctx context.Context, blockID flow.Identifie func (_m *Client) GetCollection(ctx context.Context, colID flow.Identifier) (*flow.Collection, error) { ret := _m.Called(ctx, colID) + if len(ret) == 0 { + panic("no return value specified for GetCollection") + } + var r0 *flow.Collection var r1 error if rf, ok := ret.Get(0).(func(context.Context, flow.Identifier) (*flow.Collection, error)); ok { @@ -317,10 +542,44 @@ func (_m *Client) GetCollection(ctx context.Context, colID flow.Identifier) (*fl return r0, r1 } +// GetCollectionByID provides a mock function with given fields: ctx, id +func (_m *Client) GetCollectionByID(ctx context.Context, id flow.Identifier) (*flow.Collection, error) { + ret := _m.Called(ctx, id) + + if len(ret) == 0 { + panic("no return value specified for GetCollectionByID") + } + + var r0 *flow.Collection + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, flow.Identifier) (*flow.Collection, error)); ok { + return rf(ctx, id) + } + if rf, ok := ret.Get(0).(func(context.Context, flow.Identifier) *flow.Collection); ok { + r0 = rf(ctx, id) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*flow.Collection) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, flow.Identifier) error); ok { + r1 = rf(ctx, id) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + // GetEventsForBlockIDs provides a mock function with given fields: ctx, eventType, blockIDs func (_m *Client) GetEventsForBlockIDs(ctx context.Context, eventType string, blockIDs []flow.Identifier) ([]flow.BlockEvents, error) { ret := _m.Called(ctx, eventType, blockIDs) + if len(ret) == 0 { + panic("no return value specified for GetEventsForBlockIDs") + } + var r0 []flow.BlockEvents var r1 error if rf, ok := ret.Get(0).(func(context.Context, string, []flow.Identifier) ([]flow.BlockEvents, error)); ok { @@ -347,6 +606,10 @@ func (_m *Client) GetEventsForBlockIDs(ctx context.Context, eventType string, bl func (_m *Client) GetEventsForHeightRange(ctx context.Context, eventType string, startHeight uint64, endHeight uint64) ([]flow.BlockEvents, error) { ret := _m.Called(ctx, eventType, startHeight, endHeight) + if len(ret) == 0 { + panic("no return value specified for GetEventsForHeightRange") + } + var r0 []flow.BlockEvents var r1 error if rf, ok := ret.Get(0).(func(context.Context, string, uint64, uint64) ([]flow.BlockEvents, error)); ok { @@ -369,10 +632,74 @@ func (_m *Client) GetEventsForHeightRange(ctx context.Context, eventType string, return r0, r1 } +// GetExecutionDataByBlockID provides a mock function with given fields: ctx, blockID +func (_m *Client) GetExecutionDataByBlockID(ctx context.Context, blockID flow.Identifier) (*flow.ExecutionData, error) { + ret := _m.Called(ctx, blockID) + + if len(ret) == 0 { + panic("no return value specified for GetExecutionDataByBlockID") + } + + var r0 *flow.ExecutionData + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, flow.Identifier) (*flow.ExecutionData, error)); ok { + return rf(ctx, blockID) + } + if rf, ok := ret.Get(0).(func(context.Context, flow.Identifier) *flow.ExecutionData); ok { + r0 = rf(ctx, blockID) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*flow.ExecutionData) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, flow.Identifier) error); ok { + r1 = rf(ctx, blockID) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetExecutionResultByID provides a mock function with given fields: ctx, id +func (_m *Client) GetExecutionResultByID(ctx context.Context, id flow.Identifier) (*flow.ExecutionResult, error) { + ret := _m.Called(ctx, id) + + if len(ret) == 0 { + panic("no return value specified for GetExecutionResultByID") + } + + var r0 *flow.ExecutionResult + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, flow.Identifier) (*flow.ExecutionResult, error)); ok { + return rf(ctx, id) + } + if rf, ok := ret.Get(0).(func(context.Context, flow.Identifier) *flow.ExecutionResult); ok { + r0 = rf(ctx, id) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*flow.ExecutionResult) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, flow.Identifier) error); ok { + r1 = rf(ctx, id) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + // GetExecutionResultForBlockID provides a mock function with given fields: ctx, blockID func (_m *Client) GetExecutionResultForBlockID(ctx context.Context, blockID flow.Identifier) (*flow.ExecutionResult, error) { ret := _m.Called(ctx, blockID) + if len(ret) == 0 { + panic("no return value specified for GetExecutionResultForBlockID") + } + var r0 *flow.ExecutionResult var r1 error if rf, ok := ret.Get(0).(func(context.Context, flow.Identifier) (*flow.ExecutionResult, error)); ok { @@ -395,10 +722,44 @@ func (_m *Client) GetExecutionResultForBlockID(ctx context.Context, blockID flow return r0, r1 } +// GetFullCollectionByID provides a mock function with given fields: ctx, id +func (_m *Client) GetFullCollectionByID(ctx context.Context, id flow.Identifier) (*flow.FullCollection, error) { + ret := _m.Called(ctx, id) + + if len(ret) == 0 { + panic("no return value specified for GetFullCollectionByID") + } + + var r0 *flow.FullCollection + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, flow.Identifier) (*flow.FullCollection, error)); ok { + return rf(ctx, id) + } + if rf, ok := ret.Get(0).(func(context.Context, flow.Identifier) *flow.FullCollection); ok { + r0 = rf(ctx, id) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*flow.FullCollection) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, flow.Identifier) error); ok { + r1 = rf(ctx, id) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + // GetLatestBlock provides a mock function with given fields: ctx, isSealed func (_m *Client) GetLatestBlock(ctx context.Context, isSealed bool) (*flow.Block, error) { ret := _m.Called(ctx, isSealed) + if len(ret) == 0 { + panic("no return value specified for GetLatestBlock") + } + var r0 *flow.Block var r1 error if rf, ok := ret.Get(0).(func(context.Context, bool) (*flow.Block, error)); ok { @@ -425,6 +786,10 @@ func (_m *Client) GetLatestBlock(ctx context.Context, isSealed bool) (*flow.Bloc func (_m *Client) GetLatestBlockHeader(ctx context.Context, isSealed bool) (*flow.BlockHeader, error) { ret := _m.Called(ctx, isSealed) + if len(ret) == 0 { + panic("no return value specified for GetLatestBlockHeader") + } + var r0 *flow.BlockHeader var r1 error if rf, ok := ret.Get(0).(func(context.Context, bool) (*flow.BlockHeader, error)); ok { @@ -451,6 +816,10 @@ func (_m *Client) GetLatestBlockHeader(ctx context.Context, isSealed bool) (*flo func (_m *Client) GetLatestProtocolStateSnapshot(ctx context.Context) ([]byte, error) { ret := _m.Called(ctx) + if len(ret) == 0 { + panic("no return value specified for GetLatestProtocolStateSnapshot") + } + var r0 []byte var r1 error if rf, ok := ret.Get(0).(func(context.Context) ([]byte, error)); ok { @@ -473,25 +842,269 @@ func (_m *Client) GetLatestProtocolStateSnapshot(ctx context.Context) ([]byte, e return r0, r1 } -// GetTransaction provides a mock function with given fields: ctx, txID -func (_m *Client) GetTransaction(ctx context.Context, txID flow.Identifier) (*flow.Transaction, error) { - ret := _m.Called(ctx, txID) +// GetNetworkParameters provides a mock function with given fields: ctx +func (_m *Client) GetNetworkParameters(ctx context.Context) (*flow.NetworkParameters, error) { + ret := _m.Called(ctx) - var r0 *flow.Transaction + if len(ret) == 0 { + panic("no return value specified for GetNetworkParameters") + } + + var r0 *flow.NetworkParameters var r1 error - if rf, ok := ret.Get(0).(func(context.Context, flow.Identifier) (*flow.Transaction, error)); ok { - return rf(ctx, txID) + if rf, ok := ret.Get(0).(func(context.Context) (*flow.NetworkParameters, error)); ok { + return rf(ctx) } - if rf, ok := ret.Get(0).(func(context.Context, flow.Identifier) *flow.Transaction); ok { - r0 = rf(ctx, txID) + if rf, ok := ret.Get(0).(func(context.Context) *flow.NetworkParameters); ok { + r0 = rf(ctx) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(*flow.Transaction) + r0 = ret.Get(0).(*flow.NetworkParameters) } } - if rf, ok := ret.Get(1).(func(context.Context, flow.Identifier) error); ok { - r1 = rf(ctx, txID) + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(ctx) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetNodeVersionInfo provides a mock function with given fields: ctx +func (_m *Client) GetNodeVersionInfo(ctx context.Context) (*flow.NodeVersionInfo, error) { + ret := _m.Called(ctx) + + if len(ret) == 0 { + panic("no return value specified for GetNodeVersionInfo") + } + + var r0 *flow.NodeVersionInfo + var r1 error + if rf, ok := ret.Get(0).(func(context.Context) (*flow.NodeVersionInfo, error)); ok { + return rf(ctx) + } + if rf, ok := ret.Get(0).(func(context.Context) *flow.NodeVersionInfo); ok { + r0 = rf(ctx) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*flow.NodeVersionInfo) + } + } + + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(ctx) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetProtocolStateSnapshotByBlockID provides a mock function with given fields: ctx, blockID +func (_m *Client) GetProtocolStateSnapshotByBlockID(ctx context.Context, blockID flow.Identifier) ([]byte, error) { + ret := _m.Called(ctx, blockID) + + if len(ret) == 0 { + panic("no return value specified for GetProtocolStateSnapshotByBlockID") + } + + var r0 []byte + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, flow.Identifier) ([]byte, error)); ok { + return rf(ctx, blockID) + } + if rf, ok := ret.Get(0).(func(context.Context, flow.Identifier) []byte); ok { + r0 = rf(ctx, blockID) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]byte) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, flow.Identifier) error); ok { + r1 = rf(ctx, blockID) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetProtocolStateSnapshotByHeight provides a mock function with given fields: ctx, blockHeight +func (_m *Client) GetProtocolStateSnapshotByHeight(ctx context.Context, blockHeight uint64) ([]byte, error) { + ret := _m.Called(ctx, blockHeight) + + if len(ret) == 0 { + panic("no return value specified for GetProtocolStateSnapshotByHeight") + } + + var r0 []byte + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, uint64) ([]byte, error)); ok { + return rf(ctx, blockHeight) + } + if rf, ok := ret.Get(0).(func(context.Context, uint64) []byte); ok { + r0 = rf(ctx, blockHeight) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]byte) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, uint64) error); ok { + r1 = rf(ctx, blockHeight) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetSystemTransaction provides a mock function with given fields: ctx, blockID +func (_m *Client) GetSystemTransaction(ctx context.Context, blockID flow.Identifier) (*flow.Transaction, error) { + ret := _m.Called(ctx, blockID) + + if len(ret) == 0 { + panic("no return value specified for GetSystemTransaction") + } + + var r0 *flow.Transaction + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, flow.Identifier) (*flow.Transaction, error)); ok { + return rf(ctx, blockID) + } + if rf, ok := ret.Get(0).(func(context.Context, flow.Identifier) *flow.Transaction); ok { + r0 = rf(ctx, blockID) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*flow.Transaction) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, flow.Identifier) error); ok { + r1 = rf(ctx, blockID) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetSystemTransactionResult provides a mock function with given fields: ctx, blockID +func (_m *Client) GetSystemTransactionResult(ctx context.Context, blockID flow.Identifier) (*flow.TransactionResult, error) { + ret := _m.Called(ctx, blockID) + + if len(ret) == 0 { + panic("no return value specified for GetSystemTransactionResult") + } + + var r0 *flow.TransactionResult + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, flow.Identifier) (*flow.TransactionResult, error)); ok { + return rf(ctx, blockID) + } + if rf, ok := ret.Get(0).(func(context.Context, flow.Identifier) *flow.TransactionResult); ok { + r0 = rf(ctx, blockID) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*flow.TransactionResult) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, flow.Identifier) error); ok { + r1 = rf(ctx, blockID) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetSystemTransactionResultWithID provides a mock function with given fields: ctx, blockID, systemTxID +func (_m *Client) GetSystemTransactionResultWithID(ctx context.Context, blockID flow.Identifier, systemTxID flow.Identifier) (*flow.TransactionResult, error) { + ret := _m.Called(ctx, blockID, systemTxID) + + if len(ret) == 0 { + panic("no return value specified for GetSystemTransactionResultWithID") + } + + var r0 *flow.TransactionResult + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, flow.Identifier, flow.Identifier) (*flow.TransactionResult, error)); ok { + return rf(ctx, blockID, systemTxID) + } + if rf, ok := ret.Get(0).(func(context.Context, flow.Identifier, flow.Identifier) *flow.TransactionResult); ok { + r0 = rf(ctx, blockID, systemTxID) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*flow.TransactionResult) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, flow.Identifier, flow.Identifier) error); ok { + r1 = rf(ctx, blockID, systemTxID) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetSystemTransactionWithID provides a mock function with given fields: ctx, blockID, systemTxID +func (_m *Client) GetSystemTransactionWithID(ctx context.Context, blockID flow.Identifier, systemTxID flow.Identifier) (*flow.Transaction, error) { + ret := _m.Called(ctx, blockID, systemTxID) + + if len(ret) == 0 { + panic("no return value specified for GetSystemTransactionWithID") + } + + var r0 *flow.Transaction + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, flow.Identifier, flow.Identifier) (*flow.Transaction, error)); ok { + return rf(ctx, blockID, systemTxID) + } + if rf, ok := ret.Get(0).(func(context.Context, flow.Identifier, flow.Identifier) *flow.Transaction); ok { + r0 = rf(ctx, blockID, systemTxID) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*flow.Transaction) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, flow.Identifier, flow.Identifier) error); ok { + r1 = rf(ctx, blockID, systemTxID) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetTransaction provides a mock function with given fields: ctx, txID +func (_m *Client) GetTransaction(ctx context.Context, txID flow.Identifier) (*flow.Transaction, error) { + ret := _m.Called(ctx, txID) + + if len(ret) == 0 { + panic("no return value specified for GetTransaction") + } + + var r0 *flow.Transaction + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, flow.Identifier) (*flow.Transaction, error)); ok { + return rf(ctx, txID) + } + if rf, ok := ret.Get(0).(func(context.Context, flow.Identifier) *flow.Transaction); ok { + r0 = rf(ctx, txID) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*flow.Transaction) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, flow.Identifier) error); ok { + r1 = rf(ctx, txID) } else { r1 = ret.Error(1) } @@ -503,6 +1116,10 @@ func (_m *Client) GetTransaction(ctx context.Context, txID flow.Identifier) (*fl func (_m *Client) GetTransactionResult(ctx context.Context, txID flow.Identifier) (*flow.TransactionResult, error) { ret := _m.Called(ctx, txID) + if len(ret) == 0 { + panic("no return value specified for GetTransactionResult") + } + var r0 *flow.TransactionResult var r1 error if rf, ok := ret.Get(0).(func(context.Context, flow.Identifier) (*flow.TransactionResult, error)); ok { @@ -525,10 +1142,44 @@ func (_m *Client) GetTransactionResult(ctx context.Context, txID flow.Identifier return r0, r1 } +// GetTransactionResultByIndex provides a mock function with given fields: ctx, blockID, index +func (_m *Client) GetTransactionResultByIndex(ctx context.Context, blockID flow.Identifier, index uint32) (*flow.TransactionResult, error) { + ret := _m.Called(ctx, blockID, index) + + if len(ret) == 0 { + panic("no return value specified for GetTransactionResultByIndex") + } + + var r0 *flow.TransactionResult + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, flow.Identifier, uint32) (*flow.TransactionResult, error)); ok { + return rf(ctx, blockID, index) + } + if rf, ok := ret.Get(0).(func(context.Context, flow.Identifier, uint32) *flow.TransactionResult); ok { + r0 = rf(ctx, blockID, index) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*flow.TransactionResult) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, flow.Identifier, uint32) error); ok { + r1 = rf(ctx, blockID, index) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + // GetTransactionResultsByBlockID provides a mock function with given fields: ctx, blockID func (_m *Client) GetTransactionResultsByBlockID(ctx context.Context, blockID flow.Identifier) ([]*flow.TransactionResult, error) { ret := _m.Called(ctx, blockID) + if len(ret) == 0 { + panic("no return value specified for GetTransactionResultsByBlockID") + } + var r0 []*flow.TransactionResult var r1 error if rf, ok := ret.Get(0).(func(context.Context, flow.Identifier) ([]*flow.TransactionResult, error)); ok { @@ -555,6 +1206,10 @@ func (_m *Client) GetTransactionResultsByBlockID(ctx context.Context, blockID fl func (_m *Client) GetTransactionsByBlockID(ctx context.Context, blockID flow.Identifier) ([]*flow.Transaction, error) { ret := _m.Called(ctx, blockID) + if len(ret) == 0 { + panic("no return value specified for GetTransactionsByBlockID") + } + var r0 []*flow.Transaction var r1 error if rf, ok := ret.Get(0).(func(context.Context, flow.Identifier) ([]*flow.Transaction, error)); ok { @@ -581,6 +1236,10 @@ func (_m *Client) GetTransactionsByBlockID(ctx context.Context, blockID flow.Ide func (_m *Client) Ping(ctx context.Context) error { ret := _m.Called(ctx) + if len(ret) == 0 { + panic("no return value specified for Ping") + } + var r0 error if rf, ok := ret.Get(0).(func(context.Context) error); ok { r0 = rf(ctx) @@ -591,10 +1250,53 @@ func (_m *Client) Ping(ctx context.Context) error { return r0 } +// SendAndSubscribeTransactionStatuses provides a mock function with given fields: ctx, tx +func (_m *Client) SendAndSubscribeTransactionStatuses(ctx context.Context, tx flow.Transaction) (<-chan *flow.TransactionResult, <-chan error, error) { + ret := _m.Called(ctx, tx) + + if len(ret) == 0 { + panic("no return value specified for SendAndSubscribeTransactionStatuses") + } + + var r0 <-chan *flow.TransactionResult + var r1 <-chan error + var r2 error + if rf, ok := ret.Get(0).(func(context.Context, flow.Transaction) (<-chan *flow.TransactionResult, <-chan error, error)); ok { + return rf(ctx, tx) + } + if rf, ok := ret.Get(0).(func(context.Context, flow.Transaction) <-chan *flow.TransactionResult); ok { + r0 = rf(ctx, tx) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(<-chan *flow.TransactionResult) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, flow.Transaction) <-chan error); ok { + r1 = rf(ctx, tx) + } else { + if ret.Get(1) != nil { + r1 = ret.Get(1).(<-chan error) + } + } + + if rf, ok := ret.Get(2).(func(context.Context, flow.Transaction) error); ok { + r2 = rf(ctx, tx) + } else { + r2 = ret.Error(2) + } + + return r0, r1, r2 +} + // SendTransaction provides a mock function with given fields: ctx, tx func (_m *Client) SendTransaction(ctx context.Context, tx flow.Transaction) error { ret := _m.Called(ctx, tx) + if len(ret) == 0 { + panic("no return value specified for SendTransaction") + } + var r0 error if rf, ok := ret.Get(0).(func(context.Context, flow.Transaction) error); ok { r0 = rf(ctx, tx) @@ -605,13 +1307,650 @@ func (_m *Client) SendTransaction(ctx context.Context, tx flow.Transaction) erro return r0 } -type mockConstructorTestingTNewClient interface { - mock.TestingT - Cleanup(func()) +// SubscribeAccountStatusesFromLatestBlock provides a mock function with given fields: ctx, filter +func (_m *Client) SubscribeAccountStatusesFromLatestBlock(ctx context.Context, filter flow.AccountStatusFilter) (<-chan *flow.AccountStatus, <-chan error, error) { + ret := _m.Called(ctx, filter) + + if len(ret) == 0 { + panic("no return value specified for SubscribeAccountStatusesFromLatestBlock") + } + + var r0 <-chan *flow.AccountStatus + var r1 <-chan error + var r2 error + if rf, ok := ret.Get(0).(func(context.Context, flow.AccountStatusFilter) (<-chan *flow.AccountStatus, <-chan error, error)); ok { + return rf(ctx, filter) + } + if rf, ok := ret.Get(0).(func(context.Context, flow.AccountStatusFilter) <-chan *flow.AccountStatus); ok { + r0 = rf(ctx, filter) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(<-chan *flow.AccountStatus) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, flow.AccountStatusFilter) <-chan error); ok { + r1 = rf(ctx, filter) + } else { + if ret.Get(1) != nil { + r1 = ret.Get(1).(<-chan error) + } + } + + if rf, ok := ret.Get(2).(func(context.Context, flow.AccountStatusFilter) error); ok { + r2 = rf(ctx, filter) + } else { + r2 = ret.Error(2) + } + + return r0, r1, r2 } -// NewClient creates a new instance of Client. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewClient(t mockConstructorTestingTNewClient) *Client { +// SubscribeAccountStatusesFromStartBlockID provides a mock function with given fields: ctx, startBlockID, filter +func (_m *Client) SubscribeAccountStatusesFromStartBlockID(ctx context.Context, startBlockID flow.Identifier, filter flow.AccountStatusFilter) (<-chan *flow.AccountStatus, <-chan error, error) { + ret := _m.Called(ctx, startBlockID, filter) + + if len(ret) == 0 { + panic("no return value specified for SubscribeAccountStatusesFromStartBlockID") + } + + var r0 <-chan *flow.AccountStatus + var r1 <-chan error + var r2 error + if rf, ok := ret.Get(0).(func(context.Context, flow.Identifier, flow.AccountStatusFilter) (<-chan *flow.AccountStatus, <-chan error, error)); ok { + return rf(ctx, startBlockID, filter) + } + if rf, ok := ret.Get(0).(func(context.Context, flow.Identifier, flow.AccountStatusFilter) <-chan *flow.AccountStatus); ok { + r0 = rf(ctx, startBlockID, filter) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(<-chan *flow.AccountStatus) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, flow.Identifier, flow.AccountStatusFilter) <-chan error); ok { + r1 = rf(ctx, startBlockID, filter) + } else { + if ret.Get(1) != nil { + r1 = ret.Get(1).(<-chan error) + } + } + + if rf, ok := ret.Get(2).(func(context.Context, flow.Identifier, flow.AccountStatusFilter) error); ok { + r2 = rf(ctx, startBlockID, filter) + } else { + r2 = ret.Error(2) + } + + return r0, r1, r2 +} + +// SubscribeAccountStatusesFromStartHeight provides a mock function with given fields: ctx, startBlockHeight, filter +func (_m *Client) SubscribeAccountStatusesFromStartHeight(ctx context.Context, startBlockHeight uint64, filter flow.AccountStatusFilter) (<-chan *flow.AccountStatus, <-chan error, error) { + ret := _m.Called(ctx, startBlockHeight, filter) + + if len(ret) == 0 { + panic("no return value specified for SubscribeAccountStatusesFromStartHeight") + } + + var r0 <-chan *flow.AccountStatus + var r1 <-chan error + var r2 error + if rf, ok := ret.Get(0).(func(context.Context, uint64, flow.AccountStatusFilter) (<-chan *flow.AccountStatus, <-chan error, error)); ok { + return rf(ctx, startBlockHeight, filter) + } + if rf, ok := ret.Get(0).(func(context.Context, uint64, flow.AccountStatusFilter) <-chan *flow.AccountStatus); ok { + r0 = rf(ctx, startBlockHeight, filter) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(<-chan *flow.AccountStatus) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, uint64, flow.AccountStatusFilter) <-chan error); ok { + r1 = rf(ctx, startBlockHeight, filter) + } else { + if ret.Get(1) != nil { + r1 = ret.Get(1).(<-chan error) + } + } + + if rf, ok := ret.Get(2).(func(context.Context, uint64, flow.AccountStatusFilter) error); ok { + r2 = rf(ctx, startBlockHeight, filter) + } else { + r2 = ret.Error(2) + } + + return r0, r1, r2 +} + +// SubscribeBlockDigestsFromLatest provides a mock function with given fields: ctx, blockStatus +func (_m *Client) SubscribeBlockDigestsFromLatest(ctx context.Context, blockStatus flow.BlockStatus) (<-chan *flow.BlockDigest, <-chan error, error) { + ret := _m.Called(ctx, blockStatus) + + if len(ret) == 0 { + panic("no return value specified for SubscribeBlockDigestsFromLatest") + } + + var r0 <-chan *flow.BlockDigest + var r1 <-chan error + var r2 error + if rf, ok := ret.Get(0).(func(context.Context, flow.BlockStatus) (<-chan *flow.BlockDigest, <-chan error, error)); ok { + return rf(ctx, blockStatus) + } + if rf, ok := ret.Get(0).(func(context.Context, flow.BlockStatus) <-chan *flow.BlockDigest); ok { + r0 = rf(ctx, blockStatus) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(<-chan *flow.BlockDigest) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, flow.BlockStatus) <-chan error); ok { + r1 = rf(ctx, blockStatus) + } else { + if ret.Get(1) != nil { + r1 = ret.Get(1).(<-chan error) + } + } + + if rf, ok := ret.Get(2).(func(context.Context, flow.BlockStatus) error); ok { + r2 = rf(ctx, blockStatus) + } else { + r2 = ret.Error(2) + } + + return r0, r1, r2 +} + +// SubscribeBlockDigestsFromStartBlockID provides a mock function with given fields: ctx, startBlockID, blockStatus +func (_m *Client) SubscribeBlockDigestsFromStartBlockID(ctx context.Context, startBlockID flow.Identifier, blockStatus flow.BlockStatus) (<-chan *flow.BlockDigest, <-chan error, error) { + ret := _m.Called(ctx, startBlockID, blockStatus) + + if len(ret) == 0 { + panic("no return value specified for SubscribeBlockDigestsFromStartBlockID") + } + + var r0 <-chan *flow.BlockDigest + var r1 <-chan error + var r2 error + if rf, ok := ret.Get(0).(func(context.Context, flow.Identifier, flow.BlockStatus) (<-chan *flow.BlockDigest, <-chan error, error)); ok { + return rf(ctx, startBlockID, blockStatus) + } + if rf, ok := ret.Get(0).(func(context.Context, flow.Identifier, flow.BlockStatus) <-chan *flow.BlockDigest); ok { + r0 = rf(ctx, startBlockID, blockStatus) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(<-chan *flow.BlockDigest) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, flow.Identifier, flow.BlockStatus) <-chan error); ok { + r1 = rf(ctx, startBlockID, blockStatus) + } else { + if ret.Get(1) != nil { + r1 = ret.Get(1).(<-chan error) + } + } + + if rf, ok := ret.Get(2).(func(context.Context, flow.Identifier, flow.BlockStatus) error); ok { + r2 = rf(ctx, startBlockID, blockStatus) + } else { + r2 = ret.Error(2) + } + + return r0, r1, r2 +} + +// SubscribeBlockDigestsFromStartHeight provides a mock function with given fields: ctx, startHeight, blockStatus +func (_m *Client) SubscribeBlockDigestsFromStartHeight(ctx context.Context, startHeight uint64, blockStatus flow.BlockStatus) (<-chan *flow.BlockDigest, <-chan error, error) { + ret := _m.Called(ctx, startHeight, blockStatus) + + if len(ret) == 0 { + panic("no return value specified for SubscribeBlockDigestsFromStartHeight") + } + + var r0 <-chan *flow.BlockDigest + var r1 <-chan error + var r2 error + if rf, ok := ret.Get(0).(func(context.Context, uint64, flow.BlockStatus) (<-chan *flow.BlockDigest, <-chan error, error)); ok { + return rf(ctx, startHeight, blockStatus) + } + if rf, ok := ret.Get(0).(func(context.Context, uint64, flow.BlockStatus) <-chan *flow.BlockDigest); ok { + r0 = rf(ctx, startHeight, blockStatus) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(<-chan *flow.BlockDigest) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, uint64, flow.BlockStatus) <-chan error); ok { + r1 = rf(ctx, startHeight, blockStatus) + } else { + if ret.Get(1) != nil { + r1 = ret.Get(1).(<-chan error) + } + } + + if rf, ok := ret.Get(2).(func(context.Context, uint64, flow.BlockStatus) error); ok { + r2 = rf(ctx, startHeight, blockStatus) + } else { + r2 = ret.Error(2) + } + + return r0, r1, r2 +} + +// SubscribeBlockHeadersFromLatest provides a mock function with given fields: ctx, blockStatus +func (_m *Client) SubscribeBlockHeadersFromLatest(ctx context.Context, blockStatus flow.BlockStatus) (<-chan *flow.BlockHeader, <-chan error, error) { + ret := _m.Called(ctx, blockStatus) + + if len(ret) == 0 { + panic("no return value specified for SubscribeBlockHeadersFromLatest") + } + + var r0 <-chan *flow.BlockHeader + var r1 <-chan error + var r2 error + if rf, ok := ret.Get(0).(func(context.Context, flow.BlockStatus) (<-chan *flow.BlockHeader, <-chan error, error)); ok { + return rf(ctx, blockStatus) + } + if rf, ok := ret.Get(0).(func(context.Context, flow.BlockStatus) <-chan *flow.BlockHeader); ok { + r0 = rf(ctx, blockStatus) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(<-chan *flow.BlockHeader) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, flow.BlockStatus) <-chan error); ok { + r1 = rf(ctx, blockStatus) + } else { + if ret.Get(1) != nil { + r1 = ret.Get(1).(<-chan error) + } + } + + if rf, ok := ret.Get(2).(func(context.Context, flow.BlockStatus) error); ok { + r2 = rf(ctx, blockStatus) + } else { + r2 = ret.Error(2) + } + + return r0, r1, r2 +} + +// SubscribeBlockHeadersFromStartBlockID provides a mock function with given fields: ctx, startBlockID, blockStatus +func (_m *Client) SubscribeBlockHeadersFromStartBlockID(ctx context.Context, startBlockID flow.Identifier, blockStatus flow.BlockStatus) (<-chan *flow.BlockHeader, <-chan error, error) { + ret := _m.Called(ctx, startBlockID, blockStatus) + + if len(ret) == 0 { + panic("no return value specified for SubscribeBlockHeadersFromStartBlockID") + } + + var r0 <-chan *flow.BlockHeader + var r1 <-chan error + var r2 error + if rf, ok := ret.Get(0).(func(context.Context, flow.Identifier, flow.BlockStatus) (<-chan *flow.BlockHeader, <-chan error, error)); ok { + return rf(ctx, startBlockID, blockStatus) + } + if rf, ok := ret.Get(0).(func(context.Context, flow.Identifier, flow.BlockStatus) <-chan *flow.BlockHeader); ok { + r0 = rf(ctx, startBlockID, blockStatus) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(<-chan *flow.BlockHeader) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, flow.Identifier, flow.BlockStatus) <-chan error); ok { + r1 = rf(ctx, startBlockID, blockStatus) + } else { + if ret.Get(1) != nil { + r1 = ret.Get(1).(<-chan error) + } + } + + if rf, ok := ret.Get(2).(func(context.Context, flow.Identifier, flow.BlockStatus) error); ok { + r2 = rf(ctx, startBlockID, blockStatus) + } else { + r2 = ret.Error(2) + } + + return r0, r1, r2 +} + +// SubscribeBlockHeadersFromStartHeight provides a mock function with given fields: ctx, startHeight, blockStatus +func (_m *Client) SubscribeBlockHeadersFromStartHeight(ctx context.Context, startHeight uint64, blockStatus flow.BlockStatus) (<-chan *flow.BlockHeader, <-chan error, error) { + ret := _m.Called(ctx, startHeight, blockStatus) + + if len(ret) == 0 { + panic("no return value specified for SubscribeBlockHeadersFromStartHeight") + } + + var r0 <-chan *flow.BlockHeader + var r1 <-chan error + var r2 error + if rf, ok := ret.Get(0).(func(context.Context, uint64, flow.BlockStatus) (<-chan *flow.BlockHeader, <-chan error, error)); ok { + return rf(ctx, startHeight, blockStatus) + } + if rf, ok := ret.Get(0).(func(context.Context, uint64, flow.BlockStatus) <-chan *flow.BlockHeader); ok { + r0 = rf(ctx, startHeight, blockStatus) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(<-chan *flow.BlockHeader) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, uint64, flow.BlockStatus) <-chan error); ok { + r1 = rf(ctx, startHeight, blockStatus) + } else { + if ret.Get(1) != nil { + r1 = ret.Get(1).(<-chan error) + } + } + + if rf, ok := ret.Get(2).(func(context.Context, uint64, flow.BlockStatus) error); ok { + r2 = rf(ctx, startHeight, blockStatus) + } else { + r2 = ret.Error(2) + } + + return r0, r1, r2 +} + +// SubscribeBlocksFromLatest provides a mock function with given fields: ctx, blockStatus +func (_m *Client) SubscribeBlocksFromLatest(ctx context.Context, blockStatus flow.BlockStatus) (<-chan *flow.Block, <-chan error, error) { + ret := _m.Called(ctx, blockStatus) + + if len(ret) == 0 { + panic("no return value specified for SubscribeBlocksFromLatest") + } + + var r0 <-chan *flow.Block + var r1 <-chan error + var r2 error + if rf, ok := ret.Get(0).(func(context.Context, flow.BlockStatus) (<-chan *flow.Block, <-chan error, error)); ok { + return rf(ctx, blockStatus) + } + if rf, ok := ret.Get(0).(func(context.Context, flow.BlockStatus) <-chan *flow.Block); ok { + r0 = rf(ctx, blockStatus) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(<-chan *flow.Block) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, flow.BlockStatus) <-chan error); ok { + r1 = rf(ctx, blockStatus) + } else { + if ret.Get(1) != nil { + r1 = ret.Get(1).(<-chan error) + } + } + + if rf, ok := ret.Get(2).(func(context.Context, flow.BlockStatus) error); ok { + r2 = rf(ctx, blockStatus) + } else { + r2 = ret.Error(2) + } + + return r0, r1, r2 +} + +// SubscribeBlocksFromStartBlockID provides a mock function with given fields: ctx, startBlockID, blockStatus +func (_m *Client) SubscribeBlocksFromStartBlockID(ctx context.Context, startBlockID flow.Identifier, blockStatus flow.BlockStatus) (<-chan *flow.Block, <-chan error, error) { + ret := _m.Called(ctx, startBlockID, blockStatus) + + if len(ret) == 0 { + panic("no return value specified for SubscribeBlocksFromStartBlockID") + } + + var r0 <-chan *flow.Block + var r1 <-chan error + var r2 error + if rf, ok := ret.Get(0).(func(context.Context, flow.Identifier, flow.BlockStatus) (<-chan *flow.Block, <-chan error, error)); ok { + return rf(ctx, startBlockID, blockStatus) + } + if rf, ok := ret.Get(0).(func(context.Context, flow.Identifier, flow.BlockStatus) <-chan *flow.Block); ok { + r0 = rf(ctx, startBlockID, blockStatus) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(<-chan *flow.Block) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, flow.Identifier, flow.BlockStatus) <-chan error); ok { + r1 = rf(ctx, startBlockID, blockStatus) + } else { + if ret.Get(1) != nil { + r1 = ret.Get(1).(<-chan error) + } + } + + if rf, ok := ret.Get(2).(func(context.Context, flow.Identifier, flow.BlockStatus) error); ok { + r2 = rf(ctx, startBlockID, blockStatus) + } else { + r2 = ret.Error(2) + } + + return r0, r1, r2 +} + +// SubscribeBlocksFromStartHeight provides a mock function with given fields: ctx, startHeight, blockStatus +func (_m *Client) SubscribeBlocksFromStartHeight(ctx context.Context, startHeight uint64, blockStatus flow.BlockStatus) (<-chan *flow.Block, <-chan error, error) { + ret := _m.Called(ctx, startHeight, blockStatus) + + if len(ret) == 0 { + panic("no return value specified for SubscribeBlocksFromStartHeight") + } + + var r0 <-chan *flow.Block + var r1 <-chan error + var r2 error + if rf, ok := ret.Get(0).(func(context.Context, uint64, flow.BlockStatus) (<-chan *flow.Block, <-chan error, error)); ok { + return rf(ctx, startHeight, blockStatus) + } + if rf, ok := ret.Get(0).(func(context.Context, uint64, flow.BlockStatus) <-chan *flow.Block); ok { + r0 = rf(ctx, startHeight, blockStatus) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(<-chan *flow.Block) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, uint64, flow.BlockStatus) <-chan error); ok { + r1 = rf(ctx, startHeight, blockStatus) + } else { + if ret.Get(1) != nil { + r1 = ret.Get(1).(<-chan error) + } + } + + if rf, ok := ret.Get(2).(func(context.Context, uint64, flow.BlockStatus) error); ok { + r2 = rf(ctx, startHeight, blockStatus) + } else { + r2 = ret.Error(2) + } + + return r0, r1, r2 +} + +// SubscribeEventsByBlockHeight provides a mock function with given fields: ctx, startHeight, filter, opts +func (_m *Client) SubscribeEventsByBlockHeight(ctx context.Context, startHeight uint64, filter flow.EventFilter, opts ...access.SubscribeOption) (<-chan flow.BlockEvents, <-chan error, error) { + _va := make([]interface{}, len(opts)) + for _i := range opts { + _va[_i] = opts[_i] + } + var _ca []interface{} + _ca = append(_ca, ctx, startHeight, filter) + _ca = append(_ca, _va...) + ret := _m.Called(_ca...) + + if len(ret) == 0 { + panic("no return value specified for SubscribeEventsByBlockHeight") + } + + var r0 <-chan flow.BlockEvents + var r1 <-chan error + var r2 error + if rf, ok := ret.Get(0).(func(context.Context, uint64, flow.EventFilter, ...access.SubscribeOption) (<-chan flow.BlockEvents, <-chan error, error)); ok { + return rf(ctx, startHeight, filter, opts...) + } + if rf, ok := ret.Get(0).(func(context.Context, uint64, flow.EventFilter, ...access.SubscribeOption) <-chan flow.BlockEvents); ok { + r0 = rf(ctx, startHeight, filter, opts...) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(<-chan flow.BlockEvents) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, uint64, flow.EventFilter, ...access.SubscribeOption) <-chan error); ok { + r1 = rf(ctx, startHeight, filter, opts...) + } else { + if ret.Get(1) != nil { + r1 = ret.Get(1).(<-chan error) + } + } + + if rf, ok := ret.Get(2).(func(context.Context, uint64, flow.EventFilter, ...access.SubscribeOption) error); ok { + r2 = rf(ctx, startHeight, filter, opts...) + } else { + r2 = ret.Error(2) + } + + return r0, r1, r2 +} + +// SubscribeEventsByBlockID provides a mock function with given fields: ctx, startBlockID, filter, opts +func (_m *Client) SubscribeEventsByBlockID(ctx context.Context, startBlockID flow.Identifier, filter flow.EventFilter, opts ...access.SubscribeOption) (<-chan flow.BlockEvents, <-chan error, error) { + _va := make([]interface{}, len(opts)) + for _i := range opts { + _va[_i] = opts[_i] + } + var _ca []interface{} + _ca = append(_ca, ctx, startBlockID, filter) + _ca = append(_ca, _va...) + ret := _m.Called(_ca...) + + if len(ret) == 0 { + panic("no return value specified for SubscribeEventsByBlockID") + } + + var r0 <-chan flow.BlockEvents + var r1 <-chan error + var r2 error + if rf, ok := ret.Get(0).(func(context.Context, flow.Identifier, flow.EventFilter, ...access.SubscribeOption) (<-chan flow.BlockEvents, <-chan error, error)); ok { + return rf(ctx, startBlockID, filter, opts...) + } + if rf, ok := ret.Get(0).(func(context.Context, flow.Identifier, flow.EventFilter, ...access.SubscribeOption) <-chan flow.BlockEvents); ok { + r0 = rf(ctx, startBlockID, filter, opts...) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(<-chan flow.BlockEvents) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, flow.Identifier, flow.EventFilter, ...access.SubscribeOption) <-chan error); ok { + r1 = rf(ctx, startBlockID, filter, opts...) + } else { + if ret.Get(1) != nil { + r1 = ret.Get(1).(<-chan error) + } + } + + if rf, ok := ret.Get(2).(func(context.Context, flow.Identifier, flow.EventFilter, ...access.SubscribeOption) error); ok { + r2 = rf(ctx, startBlockID, filter, opts...) + } else { + r2 = ret.Error(2) + } + + return r0, r1, r2 +} + +// SubscribeExecutionDataByBlockHeight provides a mock function with given fields: ctx, startHeight +func (_m *Client) SubscribeExecutionDataByBlockHeight(ctx context.Context, startHeight uint64) (<-chan *flow.ExecutionDataStreamResponse, <-chan error, error) { + ret := _m.Called(ctx, startHeight) + + if len(ret) == 0 { + panic("no return value specified for SubscribeExecutionDataByBlockHeight") + } + + var r0 <-chan *flow.ExecutionDataStreamResponse + var r1 <-chan error + var r2 error + if rf, ok := ret.Get(0).(func(context.Context, uint64) (<-chan *flow.ExecutionDataStreamResponse, <-chan error, error)); ok { + return rf(ctx, startHeight) + } + if rf, ok := ret.Get(0).(func(context.Context, uint64) <-chan *flow.ExecutionDataStreamResponse); ok { + r0 = rf(ctx, startHeight) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(<-chan *flow.ExecutionDataStreamResponse) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, uint64) <-chan error); ok { + r1 = rf(ctx, startHeight) + } else { + if ret.Get(1) != nil { + r1 = ret.Get(1).(<-chan error) + } + } + + if rf, ok := ret.Get(2).(func(context.Context, uint64) error); ok { + r2 = rf(ctx, startHeight) + } else { + r2 = ret.Error(2) + } + + return r0, r1, r2 +} + +// SubscribeExecutionDataByBlockID provides a mock function with given fields: ctx, startBlockID +func (_m *Client) SubscribeExecutionDataByBlockID(ctx context.Context, startBlockID flow.Identifier) (<-chan *flow.ExecutionDataStreamResponse, <-chan error, error) { + ret := _m.Called(ctx, startBlockID) + + if len(ret) == 0 { + panic("no return value specified for SubscribeExecutionDataByBlockID") + } + + var r0 <-chan *flow.ExecutionDataStreamResponse + var r1 <-chan error + var r2 error + if rf, ok := ret.Get(0).(func(context.Context, flow.Identifier) (<-chan *flow.ExecutionDataStreamResponse, <-chan error, error)); ok { + return rf(ctx, startBlockID) + } + if rf, ok := ret.Get(0).(func(context.Context, flow.Identifier) <-chan *flow.ExecutionDataStreamResponse); ok { + r0 = rf(ctx, startBlockID) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(<-chan *flow.ExecutionDataStreamResponse) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, flow.Identifier) <-chan error); ok { + r1 = rf(ctx, startBlockID) + } else { + if ret.Get(1) != nil { + r1 = ret.Get(1).(<-chan error) + } + } + + if rf, ok := ret.Get(2).(func(context.Context, flow.Identifier) error); ok { + r2 = rf(ctx, startBlockID) + } else { + r2 = ret.Error(2) + } + + return r0, r1, r2 +} + +// NewClient creates a new instance of Client. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewClient(t interface { + mock.TestingT + Cleanup(func()) +}) *Client { mock := &Client{} mock.Mock.Test(t) diff --git a/integration/benchmark/mocksiface/mocks.go b/integration/benchmark/mocksiface/mocks.go deleted file mode 100644 index 0068b5676c2..00000000000 --- a/integration/benchmark/mocksiface/mocks.go +++ /dev/null @@ -1,10 +0,0 @@ -package mocksiface_test - -import ( - "github.com/onflow/flow-go-sdk/access" -) - -// This is a proxy for the real access.Client for mockery to use. -type Client interface { - access.Client -} diff --git a/integration/benchmark/prometheus.go b/integration/benchmark/prometheus.go index baa29e8ae4c..8fa7caca2cf 100644 --- a/integration/benchmark/prometheus.go +++ b/integration/benchmark/prometheus.go @@ -9,7 +9,7 @@ import ( "github.com/rs/zerolog" ) -type statsPusherImpl struct { +type StatsPusherImpl struct { pusher *push.Pusher cancel context.CancelFunc done chan struct{} @@ -22,10 +22,10 @@ func NewStatsPusher( pushgateway string, job string, gatherer prometheus.Gatherer, -) *statsPusherImpl { +) *StatsPusherImpl { localCtx, cancel := context.WithCancel(ctx) - sp := &statsPusherImpl{ + sp := &StatsPusherImpl{ pusher: push.New(pushgateway, job).Gatherer(gatherer), done: make(chan struct{}), cancel: cancel, @@ -60,7 +60,7 @@ func NewStatsPusher( } // Stop the stats pusher and waits for it to finish. -func (sp *statsPusherImpl) Stop() { +func (sp *StatsPusherImpl) Stop() { sp.cancel() <-sp.done } diff --git a/integration/benchmark/proto/generate.go b/integration/benchmark/proto/generate.go deleted file mode 100644 index b36797e4592..00000000000 --- a/integration/benchmark/proto/generate.go +++ /dev/null @@ -1,3 +0,0 @@ -//go:generate protoc --go_out=. --go_opt=paths=source_relative --go-grpc_out=. --go-grpc_opt=paths=source_relative macro_benchmark.proto - -package proto diff --git a/integration/benchmark/proto/macro_benchmark.pb.go b/integration/benchmark/proto/macro_benchmark.pb.go deleted file mode 100644 index ada83bc8cc4..00000000000 --- a/integration/benchmark/proto/macro_benchmark.pb.go +++ /dev/null @@ -1,434 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.28.1 -// protoc v3.21.9 -// source: macro_benchmark.proto - -package proto - -import ( - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - emptypb "google.golang.org/protobuf/types/known/emptypb" - reflect "reflect" - sync "sync" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -type StartMacroBenchmarkRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields -} - -func (x *StartMacroBenchmarkRequest) Reset() { - *x = StartMacroBenchmarkRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_macro_benchmark_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *StartMacroBenchmarkRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*StartMacroBenchmarkRequest) ProtoMessage() {} - -func (x *StartMacroBenchmarkRequest) ProtoReflect() protoreflect.Message { - mi := &file_macro_benchmark_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use StartMacroBenchmarkRequest.ProtoReflect.Descriptor instead. -func (*StartMacroBenchmarkRequest) Descriptor() ([]byte, []int) { - return file_macro_benchmark_proto_rawDescGZIP(), []int{0} -} - -type StartMacroBenchmarkResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields -} - -func (x *StartMacroBenchmarkResponse) Reset() { - *x = StartMacroBenchmarkResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_macro_benchmark_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *StartMacroBenchmarkResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*StartMacroBenchmarkResponse) ProtoMessage() {} - -func (x *StartMacroBenchmarkResponse) ProtoReflect() protoreflect.Message { - mi := &file_macro_benchmark_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use StartMacroBenchmarkResponse.ProtoReflect.Descriptor instead. -func (*StartMacroBenchmarkResponse) Descriptor() ([]byte, []int) { - return file_macro_benchmark_proto_rawDescGZIP(), []int{1} -} - -type GetMacroBenchmarkRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields -} - -func (x *GetMacroBenchmarkRequest) Reset() { - *x = GetMacroBenchmarkRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_macro_benchmark_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *GetMacroBenchmarkRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*GetMacroBenchmarkRequest) ProtoMessage() {} - -func (x *GetMacroBenchmarkRequest) ProtoReflect() protoreflect.Message { - mi := &file_macro_benchmark_proto_msgTypes[2] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use GetMacroBenchmarkRequest.ProtoReflect.Descriptor instead. -func (*GetMacroBenchmarkRequest) Descriptor() ([]byte, []int) { - return file_macro_benchmark_proto_rawDescGZIP(), []int{2} -} - -type GetMacroBenchmarkResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields -} - -func (x *GetMacroBenchmarkResponse) Reset() { - *x = GetMacroBenchmarkResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_macro_benchmark_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *GetMacroBenchmarkResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*GetMacroBenchmarkResponse) ProtoMessage() {} - -func (x *GetMacroBenchmarkResponse) ProtoReflect() protoreflect.Message { - mi := &file_macro_benchmark_proto_msgTypes[3] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use GetMacroBenchmarkResponse.ProtoReflect.Descriptor instead. -func (*GetMacroBenchmarkResponse) Descriptor() ([]byte, []int) { - return file_macro_benchmark_proto_rawDescGZIP(), []int{3} -} - -type ListMacroBenchmarksResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields -} - -func (x *ListMacroBenchmarksResponse) Reset() { - *x = ListMacroBenchmarksResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_macro_benchmark_proto_msgTypes[4] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ListMacroBenchmarksResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ListMacroBenchmarksResponse) ProtoMessage() {} - -func (x *ListMacroBenchmarksResponse) ProtoReflect() protoreflect.Message { - mi := &file_macro_benchmark_proto_msgTypes[4] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ListMacroBenchmarksResponse.ProtoReflect.Descriptor instead. -func (*ListMacroBenchmarksResponse) Descriptor() ([]byte, []int) { - return file_macro_benchmark_proto_rawDescGZIP(), []int{4} -} - -type StatusResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields -} - -func (x *StatusResponse) Reset() { - *x = StatusResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_macro_benchmark_proto_msgTypes[5] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *StatusResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*StatusResponse) ProtoMessage() {} - -func (x *StatusResponse) ProtoReflect() protoreflect.Message { - mi := &file_macro_benchmark_proto_msgTypes[5] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use StatusResponse.ProtoReflect.Descriptor instead. -func (*StatusResponse) Descriptor() ([]byte, []int) { - return file_macro_benchmark_proto_rawDescGZIP(), []int{5} -} - -var File_macro_benchmark_proto protoreflect.FileDescriptor - -var file_macro_benchmark_proto_rawDesc = []byte{ - 0x0a, 0x15, 0x6d, 0x61, 0x63, 0x72, 0x6f, 0x5f, 0x62, 0x65, 0x6e, 0x63, 0x68, 0x6d, 0x61, 0x72, - 0x6b, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x09, 0x62, 0x65, 0x6e, 0x63, 0x68, 0x6d, 0x61, - 0x72, 0x6b, 0x1a, 0x1b, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, - 0x1c, 0x0a, 0x1a, 0x53, 0x74, 0x61, 0x72, 0x74, 0x4d, 0x61, 0x63, 0x72, 0x6f, 0x42, 0x65, 0x6e, - 0x63, 0x68, 0x6d, 0x61, 0x72, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x1d, 0x0a, - 0x1b, 0x53, 0x74, 0x61, 0x72, 0x74, 0x4d, 0x61, 0x63, 0x72, 0x6f, 0x42, 0x65, 0x6e, 0x63, 0x68, - 0x6d, 0x61, 0x72, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x1a, 0x0a, 0x18, - 0x47, 0x65, 0x74, 0x4d, 0x61, 0x63, 0x72, 0x6f, 0x42, 0x65, 0x6e, 0x63, 0x68, 0x6d, 0x61, 0x72, - 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x1b, 0x0a, 0x19, 0x47, 0x65, 0x74, 0x4d, - 0x61, 0x63, 0x72, 0x6f, 0x42, 0x65, 0x6e, 0x63, 0x68, 0x6d, 0x61, 0x72, 0x6b, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x1d, 0x0a, 0x1b, 0x4c, 0x69, 0x73, 0x74, 0x4d, 0x61, 0x63, - 0x72, 0x6f, 0x42, 0x65, 0x6e, 0x63, 0x68, 0x6d, 0x61, 0x72, 0x6b, 0x73, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x10, 0x0a, 0x0e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x32, 0xef, 0x02, 0x0a, 0x09, 0x42, 0x65, 0x6e, 0x63, 0x68, - 0x6d, 0x61, 0x72, 0x6b, 0x12, 0x68, 0x0a, 0x13, 0x53, 0x74, 0x61, 0x72, 0x74, 0x4d, 0x61, 0x63, - 0x72, 0x6f, 0x42, 0x65, 0x6e, 0x63, 0x68, 0x6d, 0x61, 0x72, 0x6b, 0x12, 0x25, 0x2e, 0x62, 0x65, - 0x6e, 0x63, 0x68, 0x6d, 0x61, 0x72, 0x6b, 0x2e, 0x53, 0x74, 0x61, 0x72, 0x74, 0x4d, 0x61, 0x63, - 0x72, 0x6f, 0x42, 0x65, 0x6e, 0x63, 0x68, 0x6d, 0x61, 0x72, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x1a, 0x26, 0x2e, 0x62, 0x65, 0x6e, 0x63, 0x68, 0x6d, 0x61, 0x72, 0x6b, 0x2e, 0x53, - 0x74, 0x61, 0x72, 0x74, 0x4d, 0x61, 0x63, 0x72, 0x6f, 0x42, 0x65, 0x6e, 0x63, 0x68, 0x6d, 0x61, - 0x72, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x30, 0x01, 0x12, 0x60, - 0x0a, 0x11, 0x47, 0x65, 0x74, 0x4d, 0x61, 0x63, 0x72, 0x6f, 0x42, 0x65, 0x6e, 0x63, 0x68, 0x6d, - 0x61, 0x72, 0x6b, 0x12, 0x23, 0x2e, 0x62, 0x65, 0x6e, 0x63, 0x68, 0x6d, 0x61, 0x72, 0x6b, 0x2e, - 0x47, 0x65, 0x74, 0x4d, 0x61, 0x63, 0x72, 0x6f, 0x42, 0x65, 0x6e, 0x63, 0x68, 0x6d, 0x61, 0x72, - 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x24, 0x2e, 0x62, 0x65, 0x6e, 0x63, 0x68, - 0x6d, 0x61, 0x72, 0x6b, 0x2e, 0x47, 0x65, 0x74, 0x4d, 0x61, 0x63, 0x72, 0x6f, 0x42, 0x65, 0x6e, - 0x63, 0x68, 0x6d, 0x61, 0x72, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, - 0x12, 0x57, 0x0a, 0x13, 0x4c, 0x69, 0x73, 0x74, 0x4d, 0x61, 0x63, 0x72, 0x6f, 0x42, 0x65, 0x6e, - 0x63, 0x68, 0x6d, 0x61, 0x72, 0x6b, 0x73, 0x12, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, - 0x26, 0x2e, 0x62, 0x65, 0x6e, 0x63, 0x68, 0x6d, 0x61, 0x72, 0x6b, 0x2e, 0x4c, 0x69, 0x73, 0x74, - 0x4d, 0x61, 0x63, 0x72, 0x6f, 0x42, 0x65, 0x6e, 0x63, 0x68, 0x6d, 0x61, 0x72, 0x6b, 0x73, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x3d, 0x0a, 0x06, 0x53, 0x74, 0x61, - 0x74, 0x75, 0x73, 0x12, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x19, 0x2e, 0x62, 0x65, - 0x6e, 0x63, 0x68, 0x6d, 0x61, 0x72, 0x6b, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x42, 0x37, 0x5a, 0x35, 0x67, 0x69, 0x74, 0x68, - 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x6f, 0x6e, 0x66, 0x6c, 0x6f, 0x77, 0x2f, 0x66, 0x6c, - 0x6f, 0x77, 0x2d, 0x67, 0x6f, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x67, 0x72, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x2f, 0x62, 0x65, 0x63, 0x6e, 0x68, 0x6d, 0x61, 0x72, 0x6b, 0x2f, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -} - -var ( - file_macro_benchmark_proto_rawDescOnce sync.Once - file_macro_benchmark_proto_rawDescData = file_macro_benchmark_proto_rawDesc -) - -func file_macro_benchmark_proto_rawDescGZIP() []byte { - file_macro_benchmark_proto_rawDescOnce.Do(func() { - file_macro_benchmark_proto_rawDescData = protoimpl.X.CompressGZIP(file_macro_benchmark_proto_rawDescData) - }) - return file_macro_benchmark_proto_rawDescData -} - -var file_macro_benchmark_proto_msgTypes = make([]protoimpl.MessageInfo, 6) -var file_macro_benchmark_proto_goTypes = []interface{}{ - (*StartMacroBenchmarkRequest)(nil), // 0: benchmark.StartMacroBenchmarkRequest - (*StartMacroBenchmarkResponse)(nil), // 1: benchmark.StartMacroBenchmarkResponse - (*GetMacroBenchmarkRequest)(nil), // 2: benchmark.GetMacroBenchmarkRequest - (*GetMacroBenchmarkResponse)(nil), // 3: benchmark.GetMacroBenchmarkResponse - (*ListMacroBenchmarksResponse)(nil), // 4: benchmark.ListMacroBenchmarksResponse - (*StatusResponse)(nil), // 5: benchmark.StatusResponse - (*emptypb.Empty)(nil), // 6: google.protobuf.Empty -} -var file_macro_benchmark_proto_depIdxs = []int32{ - 0, // 0: benchmark.Benchmark.StartMacroBenchmark:input_type -> benchmark.StartMacroBenchmarkRequest - 2, // 1: benchmark.Benchmark.GetMacroBenchmark:input_type -> benchmark.GetMacroBenchmarkRequest - 6, // 2: benchmark.Benchmark.ListMacroBenchmarks:input_type -> google.protobuf.Empty - 6, // 3: benchmark.Benchmark.Status:input_type -> google.protobuf.Empty - 1, // 4: benchmark.Benchmark.StartMacroBenchmark:output_type -> benchmark.StartMacroBenchmarkResponse - 3, // 5: benchmark.Benchmark.GetMacroBenchmark:output_type -> benchmark.GetMacroBenchmarkResponse - 4, // 6: benchmark.Benchmark.ListMacroBenchmarks:output_type -> benchmark.ListMacroBenchmarksResponse - 5, // 7: benchmark.Benchmark.Status:output_type -> benchmark.StatusResponse - 4, // [4:8] is the sub-list for method output_type - 0, // [0:4] is the sub-list for method input_type - 0, // [0:0] is the sub-list for extension type_name - 0, // [0:0] is the sub-list for extension extendee - 0, // [0:0] is the sub-list for field type_name -} - -func init() { file_macro_benchmark_proto_init() } -func file_macro_benchmark_proto_init() { - if File_macro_benchmark_proto != nil { - return - } - if !protoimpl.UnsafeEnabled { - file_macro_benchmark_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*StartMacroBenchmarkRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_macro_benchmark_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*StartMacroBenchmarkResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_macro_benchmark_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetMacroBenchmarkRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_macro_benchmark_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetMacroBenchmarkResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_macro_benchmark_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ListMacroBenchmarksResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_macro_benchmark_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*StatusResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_macro_benchmark_proto_rawDesc, - NumEnums: 0, - NumMessages: 6, - NumExtensions: 0, - NumServices: 1, - }, - GoTypes: file_macro_benchmark_proto_goTypes, - DependencyIndexes: file_macro_benchmark_proto_depIdxs, - MessageInfos: file_macro_benchmark_proto_msgTypes, - }.Build() - File_macro_benchmark_proto = out.File - file_macro_benchmark_proto_rawDesc = nil - file_macro_benchmark_proto_goTypes = nil - file_macro_benchmark_proto_depIdxs = nil -} diff --git a/integration/benchmark/proto/macro_benchmark.proto b/integration/benchmark/proto/macro_benchmark.proto deleted file mode 100644 index e461ea81892..00000000000 --- a/integration/benchmark/proto/macro_benchmark.proto +++ /dev/null @@ -1,28 +0,0 @@ -syntax = "proto3"; - -package benchmark; -option go_package = "github.com/onflow/flow-go/integration/becnhmark/proto"; - -import "google/protobuf/empty.proto"; - -message StartMacroBenchmarkRequest {} -message StartMacroBenchmarkResponse {} - -message GetMacroBenchmarkRequest {} -message GetMacroBenchmarkResponse {} - -message ListMacroBenchmarksResponse {} - -message StatusResponse {} - -service Benchmark { - rpc StartMacroBenchmark(StartMacroBenchmarkRequest) - returns (stream StartMacroBenchmarkResponse) {} - rpc GetMacroBenchmark(GetMacroBenchmarkRequest) - returns (GetMacroBenchmarkResponse) {} - rpc ListMacroBenchmarks(google.protobuf.Empty) - returns (ListMacroBenchmarksResponse) {} - - rpc Status(google.protobuf.Empty) returns (StatusResponse) {} -} - diff --git a/integration/benchmark/proto/macro_benchmark_grpc.pb.go b/integration/benchmark/proto/macro_benchmark_grpc.pb.go deleted file mode 100644 index 3dd37ac58ca..00000000000 --- a/integration/benchmark/proto/macro_benchmark_grpc.pb.go +++ /dev/null @@ -1,242 +0,0 @@ -// Code generated by protoc-gen-go-grpc. DO NOT EDIT. -// versions: -// - protoc-gen-go-grpc v1.2.0 -// - protoc v3.21.9 -// source: macro_benchmark.proto - -package proto - -import ( - context "context" - grpc "google.golang.org/grpc" - codes "google.golang.org/grpc/codes" - status "google.golang.org/grpc/status" - emptypb "google.golang.org/protobuf/types/known/emptypb" -) - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -// Requires gRPC-Go v1.32.0 or later. -const _ = grpc.SupportPackageIsVersion7 - -// BenchmarkClient is the client API for Benchmark service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. -type BenchmarkClient interface { - StartMacroBenchmark(ctx context.Context, in *StartMacroBenchmarkRequest, opts ...grpc.CallOption) (Benchmark_StartMacroBenchmarkClient, error) - GetMacroBenchmark(ctx context.Context, in *GetMacroBenchmarkRequest, opts ...grpc.CallOption) (*GetMacroBenchmarkResponse, error) - ListMacroBenchmarks(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*ListMacroBenchmarksResponse, error) - Status(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*StatusResponse, error) -} - -type benchmarkClient struct { - cc grpc.ClientConnInterface -} - -func NewBenchmarkClient(cc grpc.ClientConnInterface) BenchmarkClient { - return &benchmarkClient{cc} -} - -func (c *benchmarkClient) StartMacroBenchmark(ctx context.Context, in *StartMacroBenchmarkRequest, opts ...grpc.CallOption) (Benchmark_StartMacroBenchmarkClient, error) { - stream, err := c.cc.NewStream(ctx, &Benchmark_ServiceDesc.Streams[0], "/benchmark.Benchmark/StartMacroBenchmark", opts...) - if err != nil { - return nil, err - } - x := &benchmarkStartMacroBenchmarkClient{stream} - if err := x.ClientStream.SendMsg(in); err != nil { - return nil, err - } - if err := x.ClientStream.CloseSend(); err != nil { - return nil, err - } - return x, nil -} - -type Benchmark_StartMacroBenchmarkClient interface { - Recv() (*StartMacroBenchmarkResponse, error) - grpc.ClientStream -} - -type benchmarkStartMacroBenchmarkClient struct { - grpc.ClientStream -} - -func (x *benchmarkStartMacroBenchmarkClient) Recv() (*StartMacroBenchmarkResponse, error) { - m := new(StartMacroBenchmarkResponse) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -func (c *benchmarkClient) GetMacroBenchmark(ctx context.Context, in *GetMacroBenchmarkRequest, opts ...grpc.CallOption) (*GetMacroBenchmarkResponse, error) { - out := new(GetMacroBenchmarkResponse) - err := c.cc.Invoke(ctx, "/benchmark.Benchmark/GetMacroBenchmark", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *benchmarkClient) ListMacroBenchmarks(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*ListMacroBenchmarksResponse, error) { - out := new(ListMacroBenchmarksResponse) - err := c.cc.Invoke(ctx, "/benchmark.Benchmark/ListMacroBenchmarks", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *benchmarkClient) Status(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*StatusResponse, error) { - out := new(StatusResponse) - err := c.cc.Invoke(ctx, "/benchmark.Benchmark/Status", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -// BenchmarkServer is the server API for Benchmark service. -// All implementations must embed UnimplementedBenchmarkServer -// for forward compatibility -type BenchmarkServer interface { - StartMacroBenchmark(*StartMacroBenchmarkRequest, Benchmark_StartMacroBenchmarkServer) error - GetMacroBenchmark(context.Context, *GetMacroBenchmarkRequest) (*GetMacroBenchmarkResponse, error) - ListMacroBenchmarks(context.Context, *emptypb.Empty) (*ListMacroBenchmarksResponse, error) - Status(context.Context, *emptypb.Empty) (*StatusResponse, error) - mustEmbedUnimplementedBenchmarkServer() -} - -// UnimplementedBenchmarkServer must be embedded to have forward compatible implementations. -type UnimplementedBenchmarkServer struct { -} - -func (UnimplementedBenchmarkServer) StartMacroBenchmark(*StartMacroBenchmarkRequest, Benchmark_StartMacroBenchmarkServer) error { - return status.Errorf(codes.Unimplemented, "method StartMacroBenchmark not implemented") -} -func (UnimplementedBenchmarkServer) GetMacroBenchmark(context.Context, *GetMacroBenchmarkRequest) (*GetMacroBenchmarkResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method GetMacroBenchmark not implemented") -} -func (UnimplementedBenchmarkServer) ListMacroBenchmarks(context.Context, *emptypb.Empty) (*ListMacroBenchmarksResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method ListMacroBenchmarks not implemented") -} -func (UnimplementedBenchmarkServer) Status(context.Context, *emptypb.Empty) (*StatusResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Status not implemented") -} -func (UnimplementedBenchmarkServer) mustEmbedUnimplementedBenchmarkServer() {} - -// UnsafeBenchmarkServer may be embedded to opt out of forward compatibility for this service. -// Use of this interface is not recommended, as added methods to BenchmarkServer will -// result in compilation errors. -type UnsafeBenchmarkServer interface { - mustEmbedUnimplementedBenchmarkServer() -} - -func RegisterBenchmarkServer(s grpc.ServiceRegistrar, srv BenchmarkServer) { - s.RegisterService(&Benchmark_ServiceDesc, srv) -} - -func _Benchmark_StartMacroBenchmark_Handler(srv interface{}, stream grpc.ServerStream) error { - m := new(StartMacroBenchmarkRequest) - if err := stream.RecvMsg(m); err != nil { - return err - } - return srv.(BenchmarkServer).StartMacroBenchmark(m, &benchmarkStartMacroBenchmarkServer{stream}) -} - -type Benchmark_StartMacroBenchmarkServer interface { - Send(*StartMacroBenchmarkResponse) error - grpc.ServerStream -} - -type benchmarkStartMacroBenchmarkServer struct { - grpc.ServerStream -} - -func (x *benchmarkStartMacroBenchmarkServer) Send(m *StartMacroBenchmarkResponse) error { - return x.ServerStream.SendMsg(m) -} - -func _Benchmark_GetMacroBenchmark_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(GetMacroBenchmarkRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(BenchmarkServer).GetMacroBenchmark(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/benchmark.Benchmark/GetMacroBenchmark", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(BenchmarkServer).GetMacroBenchmark(ctx, req.(*GetMacroBenchmarkRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Benchmark_ListMacroBenchmarks_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(emptypb.Empty) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(BenchmarkServer).ListMacroBenchmarks(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/benchmark.Benchmark/ListMacroBenchmarks", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(BenchmarkServer).ListMacroBenchmarks(ctx, req.(*emptypb.Empty)) - } - return interceptor(ctx, in, info, handler) -} - -func _Benchmark_Status_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(emptypb.Empty) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(BenchmarkServer).Status(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/benchmark.Benchmark/Status", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(BenchmarkServer).Status(ctx, req.(*emptypb.Empty)) - } - return interceptor(ctx, in, info, handler) -} - -// Benchmark_ServiceDesc is the grpc.ServiceDesc for Benchmark service. -// It's only intended for direct use with grpc.RegisterService, -// and not to be introspected or modified (even as a copy) -var Benchmark_ServiceDesc = grpc.ServiceDesc{ - ServiceName: "benchmark.Benchmark", - HandlerType: (*BenchmarkServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "GetMacroBenchmark", - Handler: _Benchmark_GetMacroBenchmark_Handler, - }, - { - MethodName: "ListMacroBenchmarks", - Handler: _Benchmark_ListMacroBenchmarks_Handler, - }, - { - MethodName: "Status", - Handler: _Benchmark_Status_Handler, - }, - }, - Streams: []grpc.StreamDesc{ - { - StreamName: "StartMacroBenchmark", - Handler: _Benchmark_StartMacroBenchmark_Handler, - ServerStreams: true, - }, - }, - Metadata: "macro_benchmark.proto", -} diff --git a/integration/benchmark/scripts.go b/integration/benchmark/scripts.go deleted file mode 100644 index 2ebce479b49..00000000000 --- a/integration/benchmark/scripts.go +++ /dev/null @@ -1,130 +0,0 @@ -package benchmark - -import ( - _ "embed" - "encoding/hex" - "fmt" - "math/rand" - "strings" - - "github.com/onflow/cadence" - - flowsdk "github.com/onflow/flow-go-sdk" -) - -//go:embed scripts/tokenTransferTransaction.cdc -var tokenTransferTransactionTemplate string - -// TokenTransferTransaction returns a transaction script for transferring `amount` flow tokens to `toAddr` address -func TokenTransferTransaction(ftAddr, flowToken, toAddr *flowsdk.Address, amount float64) (*flowsdk.Transaction, error) { - - withFTAddr := strings.Replace(tokenTransferTransactionTemplate, "0xFUNGIBLETOKENADDRESS", "0x"+ftAddr.Hex(), 1) - withFlowTokenAddr := strings.Replace(withFTAddr, "0xTOKENADDRESS", "0x"+flowToken.Hex(), 1) - - tx := flowsdk.NewTransaction(). - SetScript([]byte(withFlowTokenAddr)) - - cadAmount, err := cadence.NewUFix64(fmt.Sprintf("%f", amount)) - if err != nil { - return nil, err - } - - err = tx.AddArgument(cadAmount) - if err != nil { - return nil, err - } - err = tx.AddArgument(cadence.BytesToAddress(toAddr.Bytes())) - if err != nil { - return nil, err - } - - return tx, nil -} - -//go:embed scripts/addKeyToAccountTransaction.cdc -var addKeyToAccountTransactionTemplate string - -// AddKeyToAccountScript returns a transaction script to add keys to an account -func AddKeyToAccountScript() ([]byte, error) { - return []byte(addKeyToAccountTransactionTemplate), nil -} - -//go:embed scripts/createAccountsTransaction.cdc -var createAccountsScriptTemplate string - -// CreateAccountsScript returns a transaction script for creating an account -func CreateAccountsScript(fungibleToken, flowToken flowsdk.Address) []byte { - return []byte(fmt.Sprintf(createAccountsScriptTemplate, fungibleToken, flowToken)) -} - -//go:embed scripts/myFavContract.cdc -var myFavContract string - -//go:embed scripts/deployingMyFavContractTransaction.cdc -var deployingMyFavContractScriptTemplate string - -func DeployingMyFavContractScript() []byte { - return []byte(fmt.Sprintf(deployingMyFavContractScriptTemplate, "MyFavContract", hex.EncodeToString([]byte(myFavContract)))) - -} - -//go:embed scripts/eventHeavyTransaction.cdc -var eventHeavyScriptTemplate string - -func EventHeavyScript(favContractAddress flowsdk.Address) []byte { - return []byte(fmt.Sprintf(eventHeavyScriptTemplate, favContractAddress)) -} - -//go:embed scripts/compHeavyTransaction.cdc -var compHeavyScriptTemplate string - -func ComputationHeavyScript(favContractAddress flowsdk.Address) []byte { - return []byte(fmt.Sprintf(compHeavyScriptTemplate, favContractAddress)) -} - -//go:embed scripts/ledgerHeavyTransaction.cdc -var ledgerHeavyScriptTemplate string - -func LedgerHeavyScript(favContractAddress flowsdk.Address) []byte { - return []byte(fmt.Sprintf(ledgerHeavyScriptTemplate, favContractAddress)) -} - -//go:embed scripts/constExecCostTransaction.cdc -var constExecTransactionTemplate string - -func generateRandomStringWithLen(commentLen uint) string { - const letterBytes = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ" - bytes := make([]byte, commentLen) - for i := range bytes { - bytes[i] = letterBytes[rand.Intn(len(letterBytes))] - } - return string(bytes) -} - -func generateAuthAccountParamList(authAccountNum uint) string { - authAccountList := []string{} - for i := uint(0); i < authAccountNum; i++ { - authAccountList = append(authAccountList, fmt.Sprintf("acct%d: AuthAccount", i+1)) - } - return strings.Join(authAccountList, ", ") -} - -// ConstExecCostTransaction returns a transaction script for constant execution size (0) -func ConstExecCostTransaction(numOfAuthorizer, commentSizeInByte uint) []byte { - commentStr := generateRandomStringWithLen(commentSizeInByte) - authAccountListStr := generateAuthAccountParamList(numOfAuthorizer) - - // the transaction template has two `%s`: #1 is for comment; #2 is for AuthAccount param list - return []byte(fmt.Sprintf(constExecTransactionTemplate, commentStr, authAccountListStr)) -} - -func bytesToCadenceArray(l []byte) cadence.Array { - values := make([]cadence.Value, len(l)) - for i, b := range l { - values[i] = cadence.NewUInt8(b) - } - - return cadence.NewArray(values) -} - -// TODO add tx size heavy similar to add keys diff --git a/integration/benchmark/scripts/addKeyToAccountTransaction.cdc b/integration/benchmark/scripts/addKeyToAccountTransaction.cdc deleted file mode 100644 index c029729528d..00000000000 --- a/integration/benchmark/scripts/addKeyToAccountTransaction.cdc +++ /dev/null @@ -1,15 +0,0 @@ -transaction(keys: [[UInt8]]) { - prepare(signer: AuthAccount) { - for key in keys { - let publicKey = PublicKey( - publicKey: key, - signatureAlgorithm: SignatureAlgorithm.ECDSA_P256 - ) - signer.keys.add( - publicKey: publicKey, - hashAlgorithm: HashAlgorithm.SHA2_256, - weight: 1000.0 - ) - } - } -} diff --git a/integration/benchmark/scripts/addKeysToAccountTransaction.cdc b/integration/benchmark/scripts/addKeysToAccountTransaction.cdc new file mode 100644 index 00000000000..500dc894c92 --- /dev/null +++ b/integration/benchmark/scripts/addKeysToAccountTransaction.cdc @@ -0,0 +1,15 @@ +transaction(keys: [[UInt8]]) { + prepare(signer: auth(AddContract, AddKey) &Account) { + for key in keys { + let publicKey = PublicKey( + publicKey: key, + signatureAlgorithm: SignatureAlgorithm.ECDSA_P256 + ) + signer.keys.add( + publicKey: publicKey, + hashAlgorithm: HashAlgorithm.SHA2_256, + weight: 1000.0 + ) + } + } +} diff --git a/integration/benchmark/scripts/compHeavyContract.cdc b/integration/benchmark/scripts/compHeavyContract.cdc new file mode 100644 index 00000000000..a44a757228b --- /dev/null +++ b/integration/benchmark/scripts/compHeavyContract.cdc @@ -0,0 +1,16 @@ +access(all) contract ComputationHeavy { + access(all) fun ComputationHeavy(_ n: Int) { + var s: Int256 = 1024102410241024 + var i = 0 + var a = Int256(7) + var b = Int256(5) + var c = Int256(2) + while i < n { + s = s * a + s = s / b + s = s / c + i = i + 1 + } + log(i) + } +} diff --git a/integration/benchmark/scripts/compHeavyTransaction.cdc b/integration/benchmark/scripts/compHeavyTransaction.cdc index 00215f27b8f..4d269f3fb74 100644 --- a/integration/benchmark/scripts/compHeavyTransaction.cdc +++ b/integration/benchmark/scripts/compHeavyTransaction.cdc @@ -1,8 +1,8 @@ -import MyFavContract from 0x%s +import ComputationHeavy from 0x%s transaction { - prepare(acct: AuthAccount) {} + prepare(acct: &Account) {} execute { - MyFavContract.ComputationHeavy() + ComputationHeavy.ComputationHeavy(1500) } } diff --git a/integration/benchmark/scripts/createAccountsTransaction.cdc b/integration/benchmark/scripts/createAccountsTransaction.cdc index 8f925b996e2..5829377e002 100644 --- a/integration/benchmark/scripts/createAccountsTransaction.cdc +++ b/integration/benchmark/scripts/createAccountsTransaction.cdc @@ -2,13 +2,13 @@ import FungibleToken from 0x%s import FlowToken from 0x%s transaction(publicKey: [UInt8], count: Int, initialTokenAmount: UFix64) { - prepare(signer: AuthAccount) { - let vault = signer.borrow<&FlowToken.Vault>(from: /storage/flowTokenVault) + prepare(signer: auth(AddContract, BorrowValue) &Account) { + let vault = signer.storage.borrow<auth(FungibleToken.Withdraw) &FlowToken.Vault>(from: /storage/flowTokenVault) ?? panic("Could not borrow reference to the owner's Vault") var i = 0 while i < count { - let account = AuthAccount(payer: signer) + let account = Account(payer: signer) let publicKey2 = PublicKey( publicKey: publicKey, signatureAlgorithm: SignatureAlgorithm.ECDSA_P256 @@ -19,8 +19,7 @@ transaction(publicKey: [UInt8], count: Int, initialTokenAmount: UFix64) { weight: 1000.0 ) - let receiver = account.getCapability(/public/flowTokenReceiver) - .borrow<&{FungibleToken.Receiver}>() + let receiver = account.capabilities.borrow<&{FungibleToken.Receiver}>(/public/flowTokenReceiver) ?? panic("Could not borrow receiver reference to the recipient's Vault") receiver.deposit(from: <-vault.withdraw(amount: initialTokenAmount)) diff --git a/integration/benchmark/scripts/dataHeavyContract.cdc b/integration/benchmark/scripts/dataHeavyContract.cdc new file mode 100644 index 00000000000..8b8e86589ab --- /dev/null +++ b/integration/benchmark/scripts/dataHeavyContract.cdc @@ -0,0 +1,63 @@ +access(all) contract DataHeavy { + + init() { + self.itemCounter = UInt32(0) + self.items = [] + } + + // items + access(all) event NewItemAddedEvent(id: UInt32, metadata: {String: String}) + + access(self) var itemCounter: UInt32 + + access(all) struct Item { + + access(all) let itemID: UInt32 + + access(all) let metadata: {String: String} + + init(_ metadata: {String: String}) { + self.itemID = DataHeavy.itemCounter + self.metadata = metadata + + // inc the counter + DataHeavy.itemCounter = DataHeavy.itemCounter + UInt32(1) + + // emit event + emit NewItemAddedEvent(id: self.itemID, metadata: self.metadata) + } + } + + access(self) var items: [Item] + + access(all) fun AddItem(_ metadata: {String: String}){ + let item = Item(metadata) + self.items.append(item) + } + + access(all) fun AddManyRandomItems(_ n: Int){ + var i = 0 + while i < n { + DataHeavy.AddItem({"data": "ABCDEFGHIJKLMNOP"}) + i = i + 1 + } + } + + + access(all) event LargeEvent(value: Int256, str: String, list: [UInt256], dic: {String: String}) + + access(all) fun EventHeavy(_ n: Int) { + var s: Int256 = 1024102410241024 + var i = 0 + + while i < n { + emit LargeEvent(value: s, str: s.toString(), list:[], dic:{s.toString():s.toString()}) + i = i + 1 + } + log(i) + } + + access(all) fun LedgerInteractionHeavy(_ n: Int) { + DataHeavy.AddManyRandomItems(n) + } +} diff --git a/integration/benchmark/scripts/dataHeavyTransaction.cdc b/integration/benchmark/scripts/dataHeavyTransaction.cdc new file mode 100644 index 00000000000..0638779a87c --- /dev/null +++ b/integration/benchmark/scripts/dataHeavyTransaction.cdc @@ -0,0 +1,9 @@ +import DataHeavy from 0x%s + +transaction { + prepare(acct: &Account) {} + execute { + DataHeavy.LedgerInteractionHeavy(100) + DataHeavy.EventHeavy(100) + } +} diff --git a/integration/benchmark/scripts/deployingMyFavContractTransaction.cdc b/integration/benchmark/scripts/deployingMyFavContractTransaction.cdc index 921ec76e26d..55acce84933 100644 --- a/integration/benchmark/scripts/deployingMyFavContractTransaction.cdc +++ b/integration/benchmark/scripts/deployingMyFavContractTransaction.cdc @@ -1,5 +1,5 @@ transaction { - prepare(signer: AuthAccount) { + prepare(signer: auth(AddContract) &Account) { signer.contracts.add(name: "%s", code: "%s".decodeHex()) } } diff --git a/integration/benchmark/scripts/eventHeavyContract.cdc b/integration/benchmark/scripts/eventHeavyContract.cdc new file mode 100644 index 00000000000..eca1475071c --- /dev/null +++ b/integration/benchmark/scripts/eventHeavyContract.cdc @@ -0,0 +1,14 @@ +access(all) contract EventHeavy { + access(all) event LargeEvent(value: Int256, str: String, list: [UInt256], dic: {String: String}) + + access(all) fun EventHeavy(_ n: Int) { + var s: Int256 = 1024102410241024 + var i = 0 + + while i < n { + emit LargeEvent(value: s, str: s.toString(), list:[], dic:{s.toString():s.toString()}) + i = i + 1 + } + log(i) + } +} diff --git a/integration/benchmark/scripts/eventHeavyTransaction.cdc b/integration/benchmark/scripts/eventHeavyTransaction.cdc index b9df85b2192..bea13fa814f 100644 --- a/integration/benchmark/scripts/eventHeavyTransaction.cdc +++ b/integration/benchmark/scripts/eventHeavyTransaction.cdc @@ -1,8 +1,8 @@ -import MyFavContract from 0x%s +import EventHeavy from 0x%s transaction { - prepare(acct: AuthAccount) {} + prepare(acct: &Account) {} execute { - MyFavContract.EventHeavy() + EventHeavy.EventHeavy(220) } } diff --git a/integration/benchmark/scripts/execDataHeavyTransaction.cdc b/integration/benchmark/scripts/execDataHeavyTransaction.cdc new file mode 100644 index 00000000000..a8db82199a3 --- /dev/null +++ b/integration/benchmark/scripts/execDataHeavyTransaction.cdc @@ -0,0 +1,9 @@ +import MyFavContract from 0x%s + +transaction { + prepare(acct: &Account) {} + execute { + MyFavContract.LedgerInteractionHeavy(100) + MyFavContract.EventHeavy(100) + } +} diff --git a/integration/benchmark/scripts/ledgerHeavyContract.cdc b/integration/benchmark/scripts/ledgerHeavyContract.cdc new file mode 100644 index 00000000000..1fa13888ec8 --- /dev/null +++ b/integration/benchmark/scripts/ledgerHeavyContract.cdc @@ -0,0 +1,48 @@ +access(all) contract LedgerHeavy { + access(all) fun LedgerInteractionHeavy(_ n: Int) { + LedgerHeavy.AddManyRandomItems(n) + } + + access(self) var items: [Item] + + // items + access(all) event NewItemAddedEvent(id: UInt32, metadata: {String: String}) + + access(self) var itemCounter: UInt32 + + access(all) struct Item { + + access(all) let itemID: UInt32 + + access(all) let metadata: {String: String} + + init(_ metadata: {String: String}) { + self.itemID = LedgerHeavy.itemCounter + self.metadata = metadata + + // inc the counter + LedgerHeavy.itemCounter = LedgerHeavy.itemCounter + UInt32(1) + + // emit event + emit NewItemAddedEvent(id: self.itemID, metadata: self.metadata) + } + } + + access(all) fun AddItem(_ metadata: {String: String}){ + let item = Item(metadata) + self.items.append(item) + } + + access(all) fun AddManyRandomItems(_ n: Int){ + var i = 0 + while i < n { + LedgerHeavy.AddItem({"data": "ABCDEFGHIJKLMNOP"}) + i = i + 1 + } + } + + init() { + self.itemCounter = UInt32(0) + self.items = [] + } +} diff --git a/integration/benchmark/scripts/ledgerHeavyTransaction.cdc b/integration/benchmark/scripts/ledgerHeavyTransaction.cdc index 0b07e590c8a..e8c2d2abc11 100644 --- a/integration/benchmark/scripts/ledgerHeavyTransaction.cdc +++ b/integration/benchmark/scripts/ledgerHeavyTransaction.cdc @@ -1,8 +1,8 @@ -import MyFavContract from 0x%s +import LedgerHeavy from 0x%s transaction { - prepare(acct: AuthAccount) {} + prepare(acct: &Account) {} execute { - MyFavContract.LedgerInteractionHeavy() + LedgerHeavy.LedgerInteractionHeavy(100) } } diff --git a/integration/benchmark/scripts/myFavContract.cdc b/integration/benchmark/scripts/myFavContract.cdc index 48182a11431..ded85b8dbfb 100644 --- a/integration/benchmark/scripts/myFavContract.cdc +++ b/integration/benchmark/scripts/myFavContract.cdc @@ -12,9 +12,9 @@ access(all) contract MyFavContract { access(all) struct Item { - pub let itemID: UInt32 + access(all) let itemID: UInt32 - pub let metadata: {String: String} + access(all) let metadata: {String: String} init(_ metadata: {String: String}) { self.itemID = MyFavContract.itemCounter @@ -45,13 +45,13 @@ access(all) contract MyFavContract { // heavy operations // computation heavy function - access(all) fun ComputationHeavy() { + access(all) fun ComputationHeavy(_ n: Int) { var s: Int256 = 1024102410241024 var i = 0 var a = Int256(7) var b = Int256(5) var c = Int256(2) - while i < 15000 { + while i < n { s = s * a s = s / b s = s / c @@ -63,18 +63,18 @@ access(all) contract MyFavContract { access(all) event LargeEvent(value: Int256, str: String, list: [UInt256], dic: {String: String}) // event heavy function - access(all) fun EventHeavy() { + access(all) fun EventHeavy(_ n: Int) { var s: Int256 = 1024102410241024 var i = 0 - while i < 220 { + while i < n { emit LargeEvent(value: s, str: s.toString(), list:[], dic:{s.toString():s.toString()}) i = i + 1 } log(i) } - access(all) fun LedgerInteractionHeavy() { - MyFavContract.AddManyRandomItems(700) + access(all) fun LedgerInteractionHeavy(_ n: Int) { + MyFavContract.AddManyRandomItems(n) } } diff --git a/integration/benchmark/scripts/scripts.go b/integration/benchmark/scripts/scripts.go new file mode 100644 index 00000000000..54ccf32bebe --- /dev/null +++ b/integration/benchmark/scripts/scripts.go @@ -0,0 +1,99 @@ +package scripts + +import ( + _ "embed" + "fmt" + "strings" + + "github.com/onflow/cadence" + + flowsdk "github.com/onflow/flow-go-sdk" + + "github.com/onflow/flow-go/model/flow" +) + +//go:embed addKeysToAccountTransaction.cdc +var AddKeysToAccountTransaction []byte + +//go:embed createAccountsTransaction.cdc +var createAccountsTransactionTemplate string + +func CreateAccountsTransaction(fungibleToken, flowToken flowsdk.Address) []byte { + return []byte(fmt.Sprintf(createAccountsTransactionTemplate, fungibleToken, flowToken)) +} + +//go:embed compHeavyTransaction.cdc +var ComputationHeavyScriptTemplate string + +//go:embed compHeavyContract.cdc +var ComputationHeavyContractTemplate string + +//go:embed eventHeavyTransaction.cdc +var EventHeavyScriptTemplate string + +//go:embed eventHeavyContract.cdc +var EventHeavyContractTemplate string + +//go:embed ledgerHeavyTransaction.cdc +var LedgerHeavyScriptTemplate string + +//go:embed ledgerHeavyContract.cdc +var LedgerHeavyContractTemplate string + +//go:embed dataHeavyTransaction.cdc +var DataHeavyScriptTemplate string + +//go:embed dataHeavyContract.cdc +var DataHeavyContractTemplate string + +//go:embed tokenTransferTransaction.cdc +var tokenTransferTransactionTemplate string + +//go:embed tokenTransferMultiTransaction.cdc +var tokenTransferMultiTransactionTemplate string + +// TokenTransferTransaction returns a transaction script for transferring `amount` flow tokens to `toAddr` address +func TokenTransferTransaction(ftAddr, flowToken, toAddr flow.Address, amount cadence.UFix64) (*flowsdk.Transaction, error) { + + withFTAddr := strings.Replace(tokenTransferTransactionTemplate, "\"FungibleToken\"", "FungibleToken from 0x"+ftAddr.Hex(), 1) + withFlowTokenAddr := strings.Replace(withFTAddr, "\"FlowToken\"", "FlowToken from 0x"+flowToken.Hex(), 1) + + tx := flowsdk.NewTransaction(). + SetScript([]byte(withFlowTokenAddr)) + + err := tx.AddArgument(amount) + if err != nil { + return nil, err + } + err = tx.AddArgument(cadence.BytesToAddress(toAddr.Bytes())) + if err != nil { + return nil, err + } + + return tx, nil +} + +// TokenTransferMultiTransaction returns a transaction script for transferring `amount` flow tokens to all `toAddrs` addresses +func TokenTransferMultiTransaction(ftAddr, flowToken flow.Address, toAddrs []flow.Address, amount cadence.UFix64) (*flowsdk.Transaction, error) { + withFTAddr := strings.Replace(tokenTransferMultiTransactionTemplate, "\"FungibleToken\"", "FungibleToken from 0x"+ftAddr.Hex(), 1) + withFlowTokenAddr := strings.Replace(withFTAddr, "\"FlowToken\"", "FlowToken from 0x"+flowToken.Hex(), 1) + + tx := flowsdk.NewTransaction(). + SetScript([]byte(withFlowTokenAddr)) + + err := tx.AddArgument(amount) + if err != nil { + return nil, err + } + toAddrsArg := make([]cadence.Value, len(toAddrs)) + for i, addr := range toAddrs { + toAddrsArg[i] = cadence.NewAddress(addr) + } + + err = tx.AddArgument(cadence.NewArray(toAddrsArg)) + if err != nil { + return nil, err + } + + return tx, nil +} diff --git a/integration/benchmark/scripts/tokenTransferMultiTransaction.cdc b/integration/benchmark/scripts/tokenTransferMultiTransaction.cdc new file mode 100644 index 00000000000..0117c48e00b --- /dev/null +++ b/integration/benchmark/scripts/tokenTransferMultiTransaction.cdc @@ -0,0 +1,22 @@ +import "FungibleToken" +import "FlowToken" + +transaction(amount: UFix64, to: [Address]) { + let sentVault: @{FungibleToken.Vault} + + prepare(signer: auth(BorrowValue) &Account) { + let vaultRef = signer.storage.borrow<auth(FungibleToken.Withdraw) &FlowToken.Vault>(from: /storage/flowTokenVault) + ?? panic("Could not borrow reference to the owner's Vault!") + self.sentVault <- vaultRef.withdraw(amount: amount*UFix64(to.length)) + } + + execute { + for recipient in to { + let receiverRef = getAccount(recipient) + .capabilities.borrow<&{FungibleToken.Receiver}>(/public/flowTokenReceiver) + ?? panic("Could not borrow receiver reference to the recipient's Vault") + receiverRef.deposit(from: <-self.sentVault.withdraw(amount: amount)) + } + destroy self.sentVault + } +} diff --git a/integration/benchmark/scripts/tokenTransferTransaction.cdc b/integration/benchmark/scripts/tokenTransferTransaction.cdc index 31057f6bf25..dde8cb7e629 100644 --- a/integration/benchmark/scripts/tokenTransferTransaction.cdc +++ b/integration/benchmark/scripts/tokenTransferTransaction.cdc @@ -1,19 +1,18 @@ -import FungibleToken from 0xFUNGIBLETOKENADDRESS -import FlowToken from 0xTOKENADDRESS +import "FungibleToken" +import "FlowToken" transaction(amount: UFix64, to: Address) { - let sentVault: @FungibleToken.Vault + let sentVault: @{FungibleToken.Vault} - prepare(signer: AuthAccount) { - let vaultRef = signer.borrow<&FlowToken.Vault>(from: /storage/flowTokenVault) + prepare(signer: auth(BorrowValue) &Account) { + let vaultRef = signer.storage.borrow<auth(FungibleToken.Withdraw) &FlowToken.Vault>(from: /storage/flowTokenVault) ?? panic("Could not borrow reference to the owner's Vault!") self.sentVault <- vaultRef.withdraw(amount: amount) } execute { let receiverRef = getAccount(to) - .getCapability(/public/flowTokenReceiver) - .borrow<&{FungibleToken.Receiver}>() + .capabilities.borrow<&{FungibleToken.Receiver}>(/public/flowTokenReceiver) ?? panic("Could not borrow receiver reference to the recipient's Vault") receiverRef.deposit(from: <-self.sentVault) } diff --git a/integration/benchmark/server/bench.sh b/integration/benchmark/server/bench.sh new file mode 100755 index 00000000000..691748b6237 --- /dev/null +++ b/integration/benchmark/server/bench.sh @@ -0,0 +1,51 @@ +#!/bin/bash + +set -x +set -o pipefail + +# this flow-go sub folder will be where all the TPS tests will be run +# this will keep the TPS automation code separate from the code that's being tested so we won't run into issues +# of having old versions of automation code just because we happen to be testing an older version flow-go +git clone https://github.com/onflow/flow-go.git +cd flow-go/integration/localnet || exit + +git fetch +git fetch --tags + +while read -r input; do + + remainder="$input" + branch="${remainder%%:*}"; remainder="${remainder#*:}" + hash="${remainder%%:*}"; remainder="${remainder#*:}" + load="${remainder%%:*}"; remainder="${remainder#*:}" + + git pull + git checkout master + git checkout "$branch" || continue + git reset --hard "$hash" || continue + + git log --oneline | head -1 + git describe + + # instead of running "make stop" which uses docker-compose for a lot of older versions, + # we explicitly run the command here with "docker compose" + DOCKER_BUILDKIT=1 COMPOSE_DOCKER_CLI_BUILD=1 docker compose -f docker-compose.nodes.yml down -v --remove-orphans + + make clean-data + make -e COLLECTION=12 VERIFICATION=12 NCLUSTERS=12 LOGLEVEL=INFO bootstrap + + DOCKER_BUILDKIT=1 COMPOSE_DOCKER_CLI_BUILD=1 docker compose -f docker-compose.nodes.yml build || continue + DOCKER_BUILDKIT=1 COMPOSE_DOCKER_CLI_BUILD=1 docker compose -f docker-compose.nodes.yml up -d || continue + + # sleep is workaround for slow initialization of some node types, so that benchmark does not quit immediately with "connection refused" + sleep 30; + go run ../benchmark/cmd/ci -log-level info -git-repo-path ../../ -tps-initial 800 -tps-min 1 -tps-max 1200 -duration 30m -load-type "$load" -load-config "../benchmark/server/load-config.yml" + + # instead of running "make stop" which uses docker-compose for a lot of older versions, + # we explicitly run the command here with "docker compose" + DOCKER_BUILDKIT=1 COMPOSE_DOCKER_CLI_BUILD=1 docker compose -f docker-compose.nodes.yml down -v --remove-orphans + + docker system prune -a -f + make clean-data +done </opt/commits.recent + diff --git a/integration/benchmark/server/control.sh b/integration/benchmark/server/control.sh new file mode 100755 index 00000000000..26ecad5a289 --- /dev/null +++ b/integration/benchmark/server/control.sh @@ -0,0 +1,30 @@ +#!/bin/bash + +# assumes flow-go was already cloned by user + +# need to add this, otherwise will get the following error when systemd executes git commands +# fatal: detected dubious ownership in repository at '/tmp/flow-go' +# git config --system --add safe.directory /opt/flow-go + +git fetch + +commits_file="/opt/commits.recent" +# the commits_file stores a list of git merge commit hashes of any branches that will each be tested via benchmarking +# and the load they will be tested with +# Sample: +# master:2735ae8dd46ea4d44131284747db849884126712:token-transfer + +# clear the file +: > $commits_file + +# the load_types array stores the different types of loads that will be run on the commits +load_types=("token-transfer" "create-account" "ledger-heavy" "evm-transfer") + +# get the merge commits from the last week from master ordered by author date +for commit in $(git log --merges --first-parent --format="%S:%H" origin/master --since '1 week' --author-date-order ) +do + for load in "${load_types[@]}" + do + echo "$commit:$load" | tee -a $commits_file + done +done diff --git a/integration/benchmark/server/load-config.yml b/integration/benchmark/server/load-config.yml new file mode 100644 index 00000000000..1d06fe392cb --- /dev/null +++ b/integration/benchmark/server/load-config.yml @@ -0,0 +1,30 @@ +token-transfer: + load_type: token-transfer + tps_initial: 800 + tps_min: 1 + tps_max: 1200 +token-transfer-multi: + load_type: token-transfer-multi + tps_initial: 20 + tps_min: 1 + tps_max: 1200 +create-account: + load_type: create-account + tps_initial: 600 + tps_min: 1 + tps_max: 1200 +ledger-heavy: + load_type: ledger-heavy + tps_initial: 3 + tps_min: 1 + tps_max: 1200 +evm-transfer: + load_type: evm-transfer + tps_initial: 500 + tps_min: 1 + tps_max: 1200 +evm-batch-transfer: + load_type: evm-batch-transfer + tps_initial: 15 + tps_min: 1 + tps_max: 1200 diff --git a/integration/benchmark/server/runs.sh b/integration/benchmark/server/runs.sh new file mode 100755 index 00000000000..ed591c1f215 --- /dev/null +++ b/integration/benchmark/server/runs.sh @@ -0,0 +1,4 @@ +#!/bin/bash + +# keeps track of all the historical times TPS tests were run +date +"Current date and time is %a %b %d %T %Z %Y" | tee -a /opt/runs.txt diff --git a/integration/benchmark/server/systemd/flow-tps.service b/integration/benchmark/server/systemd/flow-tps.service new file mode 100644 index 00000000000..9cc8696b965 --- /dev/null +++ b/integration/benchmark/server/systemd/flow-tps.service @@ -0,0 +1,13 @@ +[Unit] +Description=Flow TPS tests - generate list of merge commit hashes and run TPS tests against each one +After=network.target + +[Service] +Type=simple +ExecStart=/var/flow/flow-go/integration/benchmark/server/tps.sh +WorkingDirectory=/var/flow/flow-go/integration/benchmark/server +Environment="GOPATH=/var/flow/go" "GOCACHE=/var/flow/gocache" +Restart=always + +[Install] +WantedBy=multi-user.target diff --git a/integration/benchmark/server/tps.sh b/integration/benchmark/server/tps.sh new file mode 100755 index 00000000000..da355f05fd1 --- /dev/null +++ b/integration/benchmark/server/tps.sh @@ -0,0 +1,5 @@ +#!/bin/bash + +source runs.sh +source control.sh +source bench.sh diff --git a/integration/benchmark/worker.go b/integration/benchmark/worker.go index ad2a42ccc5b..09fe652a006 100644 --- a/integration/benchmark/worker.go +++ b/integration/benchmark/worker.go @@ -19,9 +19,13 @@ type Worker struct { wg sync.WaitGroup } -func NewWorker(workerID int, interval time.Duration, work workFunc) *Worker { - // TODO(rbtz): pass in real context - ctx, cancel := context.WithCancel(context.TODO()) +func NewWorker( + ctx context.Context, + workerID int, + interval time.Duration, + work workFunc, +) *Worker { + ctx, cancel := context.WithCancel(ctx) return &Worker{ workerID: workerID, diff --git a/integration/benchmark/worker_stats_tracker.go b/integration/benchmark/worker_stats_tracker.go index a568fd7d1b9..cd582a2c2bf 100644 --- a/integration/benchmark/worker_stats_tracker.go +++ b/integration/benchmark/worker_stats_tracker.go @@ -83,7 +83,7 @@ func (st *WorkerStatsTracker) Stop() { st.wg.Wait() } -func (st *WorkerStatsTracker) IncTxTimedout() { +func (st *WorkerStatsTracker) IncTxTimedOut() { st.mux.Lock() defer st.mux.Unlock() @@ -125,18 +125,27 @@ func (st *WorkerStatsTracker) GetStats() WorkerStats { return st.stats } -func NewPeriodicStatsLogger(st *WorkerStatsTracker, log zerolog.Logger) *Worker { - w := NewWorker(0, 1*time.Second, func(workerID int) { - stats := st.GetStats() - log.Info(). - Int("Workers", stats.Workers). - Int("TxsSent", stats.TxsSent). - Int("TxsTimedout", stats.TxsTimedout). - Int("TxsExecuted", stats.TxsExecuted). - Float64("TxsSentMovingAverage", stats.TxsSentMovingAverage). - Float64("TxsExecutedMovingAverage", stats.TxsExecutedMovingAverage). - Msg("worker stats") - }) +func NewPeriodicStatsLogger( + ctx context.Context, + st *WorkerStatsTracker, + log zerolog.Logger, +) *Worker { + w := NewWorker( + ctx, + 0, + 3*time.Second, + func(workerID int) { + stats := st.GetStats() + log.Info(). + Int("Workers", stats.Workers). + Int("TxsSent", stats.TxsSent). + Int("TxsTimedout", stats.TxsTimedout). + Int("TxsExecuted", stats.TxsExecuted). + Float64("TxsSentMovingAverage", stats.TxsSentMovingAverage). + Float64("TxsExecutedMovingAverage", stats.TxsExecutedMovingAverage). + Msg("worker stats") + }, + ) return w } diff --git a/integration/benchmark/worker_test.go b/integration/benchmark/worker_test.go index 3fd6a31b844..3a51063970c 100644 --- a/integration/benchmark/worker_test.go +++ b/integration/benchmark/worker_test.go @@ -1,6 +1,7 @@ package benchmark import ( + "context" "testing" "time" @@ -15,7 +16,12 @@ func TestWorkerImmediate(t *testing.T) { t.Parallel() t.Run("immediate", func(t *testing.T) { done := make(chan struct{}) - w := NewWorker(0, time.Hour, func(workerID int) { close(done) }) + w := NewWorker( + context.Background(), + 0, + time.Hour, + func(workerID int) { close(done) }, + ) w.Start() unittest.AssertClosesBefore(t, done, 5*time.Second) @@ -30,6 +36,7 @@ func TestWorker(t *testing.T) { i := atomic.NewInt64(0) done := make(chan struct{}) w := NewWorker( + context.Background(), 0, time.Millisecond, func(workerID int) { @@ -49,11 +56,13 @@ func TestWorker(t *testing.T) { func TestWorkerStartStop(t *testing.T) { t.Parallel() t.Run("stop w/o start", func(t *testing.T) { - w := NewWorker(0, time.Second, func(workerID int) {}) + w := NewWorker( + context.Background(), 0, time.Second, func(workerID int) {}) w.Stop() }) t.Run("stop and start", func(t *testing.T) { - w := NewWorker(0, time.Second, func(workerID int) {}) + w := NewWorker( + context.Background(), 0, time.Second, func(workerID int) {}) w.Start() w.Stop() }) diff --git a/integration/benchnet2/Makefile b/integration/benchnet2/Makefile index 62859fbf74c..208a3ffd073 100644 --- a/integration/benchnet2/Makefile +++ b/integration/benchnet2/Makefile @@ -1,15 +1,27 @@ # default value of the Docker base registry URL which can be overriden when invoking the Makefile DOCKER_REGISTRY := us-west1-docker.pkg.dev/dl-flow-benchnet-automation/benchnet +CONFIGURATION_BUCKET := flow-benchnet-automation # default values that callers can override when calling target +# Node role counts ACCESS = 1 -COLLECTION = 6 -VALID_COLLECTION := $(shell test $(COLLECTION) -ge 6; echo $$?) +COLLECTION = 2 +VALID_COLLECTION := $(shell test $(COLLECTION) -ge 2; echo $$?) CONSENSUS = 2 VALID_CONSENSUS := $(shell test $(CONSENSUS) -ge 2; echo $$?) EXECUTION = 2 VALID_EXECUTION := $(shell test $(EXECUTION) -ge 2; echo $$?) VERIFICATION = 1 +# Epoch config +EPOCH_LEN = 5000 +EPOCH_STAKING_PHASE_LEN = 500 +DKG_PHASE_LEN = 1000 +EPOCH_EXTENSION_LEN = 600 +# Spork root config +ROOT_VIEW = 0 + +KVSTORE_VERSION=default +FINALIZATION_SAFETY_THRESHOLD=100 validate: ifeq ($(strip $(VALID_EXECUTION)), 1) @@ -18,7 +30,7 @@ ifeq ($(strip $(VALID_EXECUTION)), 1) else ifeq ($(strip $(VALID_CONSENSUS)), 1) $(error Number of Consensus nodes should be no less than 2) else ifeq ($(strip $(VALID_COLLECTION)), 1) - $(error Number of Collection nodes should be no less than 6) + $(error Number of Collection nodes should be no less than 2) else ifeq ($(strip $(NETWORK_ID)),) $(error NETWORK_ID cannot be empty) else ifeq ($(strip $(NAMESPACE)),) @@ -27,17 +39,59 @@ endif # assumes there is a checked out version of flow-go in a "flow-go" sub-folder at this level so that the bootstrap executable # for the checked out version will be run in the sub folder but the bootstrap folder will be created here (outside of the checked out flow-go in the sub folder) -gen-bootstrap: clone-flow - cd flow-go && make crypto_setup_gopath - cd flow-go/cmd/bootstrap && go run -tags relic . genconfig --address-format "%s%d-${NETWORK_ID}.${NAMESPACE}:3569" --access $(ACCESS) --collection $(COLLECTION) --consensus $(CONSENSUS) --execution $(EXECUTION) --verification $(VERIFICATION) --weight 100 -o ./ --config ../../../bootstrap/conf/node-config.json - cd flow-go/cmd/bootstrap && go run -tags relic . keygen --machine-account --config ../../../bootstrap/conf/node-config.json -o ../../../bootstrap/keys +gen-bootstrap: + cd flow-go-bootstrap/cmd/bootstrap && go run . genconfig \ + --address-format "%s%d-${NETWORK_ID}.${NAMESPACE}:3569" \ + --access $(ACCESS) \ + --collection $(COLLECTION) \ + --consensus $(CONSENSUS) \ + --execution $(EXECUTION) \ + --verification $(VERIFICATION) \ + --weight 100 \ + -o ./ \ + --config ../../../bootstrap/conf/node-config.json + cd flow-go-bootstrap/cmd/bootstrap && go run . keygen \ + --machine-account \ + --config ../../../bootstrap/conf/node-config.json \ + -o ../../../bootstrap/keys echo {} > ./bootstrap/conf/partner-stakes.json mkdir ./bootstrap/partner-nodes - cd flow-go/cmd/bootstrap && go run -tags relic . rootblock --root-chain bench --root-height 0 --root-parent 0000000000000000000000000000000000000000000000000000000000000000 --config ../../../bootstrap/conf/node-config.json -o ../../../bootstrap/ --fast-kg --partner-dir ../../../bootstrap/partner-nodes --partner-weights ../../../bootstrap/conf/partner-stakes.json --internal-priv-dir ../../../bootstrap/keys/private-root-information - cd flow-go/cmd/bootstrap && go run -tags relic . finalize --root-commit 0000000000000000000000000000000000000000000000000000000000000000 --service-account-public-key-json "{\"PublicKey\":\"R7MTEDdLclRLrj2MI1hcp4ucgRTpR15PCHAWLM5nks6Y3H7+PGkfZTP2di2jbITooWO4DD1yqaBSAVK8iQ6i0A==\",\"SignAlgo\":2,\"HashAlgo\":1,\"SeqNumber\":0,\"Weight\":1000}" --config ../../../bootstrap/conf/node-config.json -o ../../../bootstrap/ --partner-dir ../../../bootstrap/partner-nodes --partner-weights ../../../bootstrap/conf/partner-stakes.json --collection-clusters 1 --epoch-counter 0 --epoch-length 30000 --epoch-staking-phase-length 20000 --epoch-dkg-phase-length 2000 --genesis-token-supply="1000000000.0" --protocol-version=0 --internal-priv-dir ../../../bootstrap/keys/private-root-information --dkg-data ../../../bootstrap/private-root-information/root-dkg-data.priv.json --root-block ../../../bootstrap/public-root-information/root-block.json --root-block-votes-dir ../../../bootstrap/public-root-information/root-block-votes/ --epoch-commit-safety-threshold=1000 + cd flow-go-bootstrap/cmd/bootstrap && go run . rootblock \ + --root-chain bench \ + --root-height 0 \ + --root-parent 0000000000000000000000000000000000000000000000000000000000000000 \ + --root-view $(ROOT_VIEW) \ + --epoch-counter 0 \ + --epoch-length $(EPOCH_LEN) \ + --epoch-staking-phase-length $(EPOCH_STAKING_PHASE_LEN) \ + --epoch-dkg-phase-length $(DKG_PHASE_LEN) \ + --kvstore-version=$(KVSTORE_VERSION) \ + --kvstore-epoch-extension-view-count=$(EPOCH_EXTENSION_LEN) \ + --kvstore-finalization-safety-threshold=$(FINALIZATION_SAFETY_THRESHOLD)\ + --collection-clusters 1 \ + --protocol-version=0 \ + --use-default-epoch-timing \ + --config ../../../bootstrap/conf/node-config.json \ + -o ../../../bootstrap/ \ + --partner-dir ../../../bootstrap/partner-nodes \ + --partner-weights ../../../bootstrap/conf/partner-stakes.json \ + --internal-priv-dir ../../../bootstrap/keys/private-root-information + cd flow-go-bootstrap/cmd/bootstrap && go run . finalize \ + --config ../../../bootstrap/conf/node-config.json \ + -o ../../../bootstrap/ \ + --partner-dir ../../../bootstrap/partner-nodes \ + --partner-weights ../../../bootstrap/conf/partner-stakes.json \ + --internal-priv-dir ../../../bootstrap/keys/private-root-information \ + --dkg-data ../../../bootstrap/private-root-information/root-dkg-data.priv.json \ + --root-block ../../../bootstrap/public-root-information/root-block.json \ + --intermediary-bootstrapping-data ../../../bootstrap/public-root-information/intermediary-bootstrapping-data.json \ + --root-commit 0000000000000000000000000000000000000000000000000000000000000000 \ + --genesis-token-supply="1000000000.0" \ + --service-account-public-key-json "{\"PublicKey\":\"R7MTEDdLclRLrj2MI1hcp4ucgRTpR15PCHAWLM5nks6Y3H7+PGkfZTP2di2jbITooWO4DD1yqaBSAVK8iQ6i0A==\",\"SignAlgo\":2,\"HashAlgo\":1,\"SeqNumber\":0,\"Weight\":1000}" \ + --root-block-votes-dir ../../../bootstrap/public-root-information/root-block-votes/ gen-helm-l1: - go run automate/cmd/level1/bootstrap.go --data bootstrap/public-root-information/root-protocol-state-snapshot.json --dockerTag $(NETWORK_ID) --dockerRegistry $(DOCKER_REGISTRY) + go run automate/cmd/level1/bootstrap.go --data bootstrap/public-root-information/root-protocol-state-snapshot.json --dockerTag $(DOCKER_TAG) --dockerRegistry $(DOCKER_REGISTRY) gen-helm-l2: go run automate/cmd/level2/template.go --data template-data.json --template automate/templates/helm-values-all-nodes.yml --outPath="./values.yml" @@ -55,7 +109,7 @@ deploy-all: validate gen-helm-values k8s-secrets-create helm-deploy clean-all: validate k8s-delete k8s-delete-secrets clean-bootstrap clean-gen-helm clean-flow # target to be used in workflow as local clean up will not be needed -remote-clean-all: validate k8s-delete-secrets k8s-delete +remote-clean-all: validate delete-configuration k8s-delete clean-bootstrap: rm -rf ./bootstrap @@ -64,26 +118,27 @@ clean-gen-helm: rm -f values.yml rm -f template-data.json -k8s-secrets-create: - bash ./create-secrets.sh ${NETWORK_ID} ${NAMESPACE} +download-values-file: + gsutil cp gs://${CONFIGURATION_BUCKET}/${NETWORK_ID}/values.yml . + +upload-bootstrap: + tar -cvf ${NETWORK_ID}.tar -C ./bootstrap . + gsutil cp ${NETWORK_ID}.tar gs://${CONFIGURATION_BUCKET}/${NETWORK_ID}.tar + gsutil cp values.yml gs://${CONFIGURATION_BUCKET}/${NETWORK_ID}/values.yml -helm-deploy: - helm upgrade --install -f ./values.yml ${NETWORK_ID} ./flow --set ingress.enabled=true --set networkId="${NETWORK_ID}" --set owner="${OWNER}" --debug --namespace ${NAMESPACE} --wait +helm-deploy: download-values-file + helm upgrade --install -f ./values.yml ${NETWORK_ID} ./flow --set ingress.enabled=true --set networkId="${NETWORK_ID}" --set owner="${OWNER}" --set configurationBucket="${CONFIGURATION_BUCKET}" --debug --namespace ${NAMESPACE} --wait k8s-delete: helm delete ${NETWORK_ID} --namespace ${NAMESPACE} - kubectl delete pvc -l networkId=${NETWORK_ID} --namespace ${NAMESPACE} + kubectl delete pvc -l network=${NETWORK_ID} --namespace ${NAMESPACE} -k8s-delete-secrets: - kubectl delete secrets -l networkId=${NETWORK_ID} --namespace ${NAMESPACE} +delete-configuration: + gsutil rm gs://${CONFIGURATION_BUCKET}/${NETWORK_ID}.tar + gsutil rm gs://${CONFIGURATION_BUCKET}/${NETWORK_ID}/values.yml k8s-pod-health: validate kubectl get pods --namespace ${NAMESPACE} -clone-flow: clean-flow - # this cloned repo will be used for generating bootstrap info specific to that tag / version - git clone https://github.com/onflow/flow-go.git - cd flow-go && git checkout $(REF_FOR_BOOTSTRAP) - clean-flow: rm -rf flow-go diff --git a/integration/benchnet2/automate/level1/bootstrap.go b/integration/benchnet2/automate/level1/bootstrap.go index bfc5f4466bf..c79b26c3147 100644 --- a/integration/benchnet2/automate/level1/bootstrap.go +++ b/integration/benchnet2/automate/level1/bootstrap.go @@ -6,6 +6,8 @@ import ( "log" "os" "strings" + + "github.com/onflow/flow-go/state/protocol/inmem" ) type Bootstrap struct { @@ -37,27 +39,27 @@ func (b *Bootstrap) GenTemplateData(outputToFile bool, dockerTag string, dockerR // map any json data map - we can't use arrays here because the bootstrap json data is not an array of objects // this avoids the use of structs in case the json changes // https://stackoverflow.com/a/38437140/5719544 - var dataMap map[string]interface{} - err = json.Unmarshal(dataBytes, &dataMap) + var snapshot inmem.EncodableSnapshot + err = json.Unmarshal(dataBytes, &snapshot) if err != nil { log.Fatal(err) } // examine "Identities" section for list of node data to extract and build out node data list - identities := dataMap["Identities"].([]interface{}) + epochData := snapshot.SealingSegment.LatestProtocolStateEntry().EpochEntry + identities := epochData.CurrentEpochSetup.Participants var nodeDataList []NodeData for _, identity := range identities { - identityMap := identity.(map[string]interface{}) - nodeID := identityMap["NodeID"].(string) - role := identityMap["Role"].(string) - address := identityMap["Address"].(string) + nodeID := identity.NodeID + role := identity.Role + address := identity.Address // address will be in format: "verification1.:3569" so we want to extract the name from before the '.' name := strings.Split(address, ".")[0] nodeDataList = append(nodeDataList, NodeData{ - Id: nodeID, - Role: role, + Id: nodeID.String(), + Role: role.String(), Name: name, DockerTag: dockerTag, DockerRegistry: dockerRegistry, diff --git a/integration/benchnet2/automate/level1/bootstrap_test.go b/integration/benchnet2/automate/level1/bootstrap_test.go index 5354ded82d4..4987a350988 100644 --- a/integration/benchnet2/automate/level1/bootstrap_test.go +++ b/integration/benchnet2/automate/level1/bootstrap_test.go @@ -12,11 +12,19 @@ import ( const BootstrapPath = "../testdata/level1/data/" const ExpectedOutputPath = "../testdata/level1/expected" +// TestGenerateBootstrap_DataTable validates that a root snapshot fixture produces the +// expected output template. If this test fails, it is likely because the underlying +// Snapshot model has changed. In that case, you can generate a new fixture file with +// a script like: +// +// participants := unittest.IdentityListFixture(10, unittest.WithAllRoles()) +// snapshot := unittest.RootSnapshotFixture(participants) +// json.NewEncoder(os.Stdout).Encode(snapshot.Encodable()) func TestGenerateBootstrap_DataTable(t *testing.T) { testDataMap := map[string]testData{ - "2 AN, 6 LN, 3 CN, 2 EN, 1 VN": { - bootstrapPath: filepath.Join(BootstrapPath, "root-protocol-state-snapshot1.json"), - expectedOutput: filepath.Join(ExpectedOutputPath, "template-data-input1.json"), + "10 nodes": { + bootstrapPath: filepath.Join(BootstrapPath, "snapshot-fixture1.json"), + expectedOutput: filepath.Join(ExpectedOutputPath, "template-fixture1.json"), dockerTag: "v0.27.6", dockerRegistry: "gcr.io/flow-container-registry/", }, @@ -33,7 +41,7 @@ func TestGenerateBootstrap_DataTable(t *testing.T) { expectedDataBytes, err := os.ReadFile(testData.expectedOutput) require.NoError(t, err) err = json.Unmarshal(expectedDataBytes, &expectedNodeData) - require.Nil(t, err) + require.NoError(t, err) // check generated template data file is correct require.Equal(t, len(expectedNodeData), len(actualNodeData)) diff --git a/integration/benchnet2/automate/templates/helm-values-all-nodes.yml b/integration/benchnet2/automate/templates/helm-values-all-nodes.yml index 5c939ad2dd6..e4b9db2e55a 100644 --- a/integration/benchnet2/automate/templates/helm-values-all-nodes.yml +++ b/integration/benchnet2/automate/templates/helm-values-all-nodes.yml @@ -10,12 +10,13 @@ access: limits: cpu: "800m" memory: "10Gi" - storage: 1G + storage: 2G nodes: {{- range $val := .}}{{if eq ($val.role) ("access")}} {{$val.name}}: args:{{template "args" .}} - --loglevel=INFO + - --admin-addr=0.0.0.0:9002 - --rpc-addr=0.0.0.0:9000 - --secure-rpc-addr=0.0.0.0:9001 - --http-addr=0.0.0.0:8000 @@ -38,12 +39,13 @@ collection: limits: cpu: "800m" memory: "10Gi" - storage: 1G + storage: 2G nodes: {{- range $val := .}}{{if eq ($val.role) ("collection")}} {{$val.name}}: args:{{template "args" .}} - --loglevel=INFO + - --admin-addr=0.0.0.0:9002 - --block-rate-delay=950ms - --ingress-addr=0.0.0.0:9000 - --insecure-access-api=false @@ -67,7 +69,10 @@ consensus: {{$val.name}}: args:{{template "args" .}} - --loglevel=DEBUG - - --block-rate-delay=800ms + - --admin-addr=0.0.0.0:9002 + # Benchnet networks use default 1bps timing + - --cruise-ctl-max-view-duration=1500ms + - --hotstuff-min-timeout=2s - --chunk-alpha=1 - --emergency-sealing-active=false - --insecure-access-api=false @@ -85,16 +90,16 @@ execution: limits: cpu: "800m" memory: "10Gi" - storage: 10G + storage: 50G nodes: {{- range $val := .}}{{if eq ($val.role) ("execution")}} {{$val.name}}: args:{{template "args" .}} - --loglevel=INFO - - --triedir=/trie + - --admin-addr=0.0.0.0:9002 - --rpc-addr=0.0.0.0:9000 - - --cadence-tracing=false - --extensive-tracing=false + - --enable-storehouse=false env:{{template "env" .}} image: {{$val.docker_registry}}/execution:{{$val.docker_tag}} nodeId: {{$val.node_id}} @@ -114,6 +119,7 @@ verification: {{$val.name}}: args:{{template "args" .}} - --loglevel=INFO + - --admin-addr=0.0.0.0:9002 - --chunk-alpha=1 env:{{template "env" .}} image: {{$val.docker_registry}}/verification:{{$val.docker_tag}} @@ -121,7 +127,7 @@ verification: {{end}}{{end}} {{define "args"}} - - --bootstrapdir=/bootstrap + - --bootstrapdir=/data/bootstrap - --datadir=/data/protocol - --secretsdir=/data/secret - --bind=0.0.0.0:3569 diff --git a/integration/benchnet2/automate/testdata/level1/data/root-protocol-state-snapshot1.json b/integration/benchnet2/automate/testdata/level1/data/root-protocol-state-snapshot1.json deleted file mode 100644 index 4c3ab0db114..00000000000 --- a/integration/benchnet2/automate/testdata/level1/data/root-protocol-state-snapshot1.json +++ /dev/null @@ -1,824 +0,0 @@ -{ - "Head": { - "ChainID": "flow-benchnet", - "ParentID": "0000000000000000000000000000000000000000000000000000000000000000", - "Height": 0, - "PayloadHash": "7b3b313bd83e01d13c449d4dd4bac04137f5090bcb305ddd750298bd16e5339b", - "Timestamp": "2022-11-04T14:03:22Z", - "View": 0, - "ParentVoterIndices": null, - "ParentVoterSigData": null, - "ProposerID": "0000000000000000000000000000000000000000000000000000000000000000", - "ProposerSigData": null, - "ID": "452adae802d6a03d24e8369e5df4bc93617d407ebde2dfeee2554e4ef0c28d6a" - }, - "Identities": [ - { - "NodeID": "416c65782048656e74736368656c001844616b8e9b5680103f25545b2e535d72", - "Address": "collection1.:3569", - "Role": "collection", - "Weight": 1000, - "StakingPubKey": "l3r+iJ55R81PjKLmapkBQEhr6PaWQKjXEFb4dBdmHhQ7JKcpndtgXqFoxPUbhC+sFtQKTAsr8XJuhDjbBetf3xEYqMkb6y4BD+Ji6Wm4AdtB74LgaiXqJLCPAehULLkI", - "NetworkPubKey": "LN8V7Msz37skANzyL8CwOxR+umk2tuOHEefBp5PwqEM5gyqkFFN24TlwpXMe0bxFAk0XCPfkphF2Fdua0KrxwQ==" - }, - { - "NodeID": "416e647265772042757269616e004bf4e37ab54b9ef5103294895fc58a1fe67b", - "Address": "collection2.:3569", - "Role": "collection", - "Weight": 1000, - "StakingPubKey": "oW36l4h/JSzNXufJS+09jP3cpQkfKfcyaqMVQMgMiVDGk9qXck3XGqHWRwHGliG9D8L0EWw0WeNPJVxop7iyJOGDgYHmKBVqo4anTDe2cHK7KjL/jWYmf8FM4/jI+9Z6", - "NetworkPubKey": "/nDR5ro0WUlVjXEEDGh0fb/wkxlg17DvrvCCWFuaoCukzETqM6VYV5o5Afsrt85I2MX/1aaBH4cB0yeYKT4h4g==" - }, - { - "NodeID": "4261737469616e204d756c6c657200f26a128c4ef2b8752f3ad798cfa910d97f", - "Address": "collection3.:3569", - "Role": "collection", - "Weight": 1000, - "StakingPubKey": "qGb7lluGuJqg4hsDr6cOceyJbvYsZnmu9V1OduXsCsb1WXh7yiXwbnB4400r5/sWAWaI6vjIr/btVK694MvJllB0QC5WLVVVU4PIldHBE3pn9DDBnmk3mfUPL9ob265l", - "NetworkPubKey": "rnSLNQtIhBfPiN8nrr4Nsbx3AH2jwtWxY4S9lkr+cGoARAQ7D7K4GH0SObhBetnSj4igaHOsDMzeC2owQL0/7w==" - }, - { - "NodeID": "42656e6a616d696e2056616e204d6574657200bf596a51dee05642917a9c12c0", - "Address": "collection4.:3569", - "Role": "collection", - "Weight": 1000, - "StakingPubKey": "oMY2bsEMVt0IFE/kfUFBdCoFLYbeq9TEes9PUhViSQgMVBmyJ/wjfN9Q1FvlmGcCEtLM8P0EWbEiHvAgao6MUkLsqTFOhExlulqQska6za5hMMqCeLdjbZTl/Kk37Cey", - "NetworkPubKey": "hOzBuFgkkRv3K2mRr4L1H1FfcJxhdwfdjKLCTuzTQCvHw9AJNJ7tIypAjgBxcDMz4onqLhtzuShUjs8zTPeyZA==" - }, - { - "NodeID": "436173657920417272696e67746f6e004cdbedda99daf9ff9a787c0618cee363", - "Address": "collection5.:3569", - "Role": "collection", - "Weight": 1000, - "StakingPubKey": "ournHqKKQ0zacuQl1uNqLnOulZ2yV+OSq4kY9wKrvpfg0y2bsJhFjkmyle8Ffoi5Exd4TejDqv+Vhp3Dzm5X8RJM5jFvqzDIRy/fTR19LOXRNXNi88BrxajTJf9ZFZVt", - "NetworkPubKey": "xHhc4n1UNAwGx18lk1XI1vamt1D3yFT7grBgjFbAC3d3jrs7B0uyVkwQNKW1L5aS0CGaPYn/W85HnN+7Lgn9iA==" - }, - { - "NodeID": "44696574657220536869726c657900c100318341c6796198aa37e627949074bc", - "Address": "collection6.:3569", - "Role": "collection", - "Weight": 1000, - "StakingPubKey": "goWe7pNkhi44qHnRkKpsf4AiRGP3rn0nwvQoXESAVzL2gI+9P67Fjx6yzn/1WGCWClmYN0ZSPxjcLVzm84Lk6P5oakg5VMqW3XLOoh+qsMzDwdxj+8sAOS/Ru25BuB8e", - "NetworkPubKey": "oo8hfXYogKS0xDhoB1XjHKBzAmOPw6mUWPVRaTV20f9SYAs4IMCN+U62TEl4a7aXz3OWO0pvYYKFndNFLNLA8g==" - }, - { - "NodeID": "4a616d65732048756e74657200e9ffa4e085542cfa80d15f0e61e54606c6cdb3", - "Address": "consensus1.:3569", - "Role": "consensus", - "Weight": 1000, - "StakingPubKey": "kTbHpoV2R0Ef5WgbL/iDidnqSDD833Xda1Djm9IEOQMChKg7k1dbyS8DITl6Zb5WGfbeHUN3G9cNU2BGSGG4mE41cD276M64+Vx+mHkCzSc+uG69BORCsF703cSXocq8", - "NetworkPubKey": "UswVkeEZpRexigHGj2Rsiq5sC/v8gmqxoEqTM4M5/0CSb2YzgXzAeQpYyrP0a2kVZAn5cYSThPBu1GBATbEo6A==" - }, - { - "NodeID": "4a65666665727920446f796c65005cf2fe1daafe62a66f59fd9afa12f0f78914", - "Address": "consensus2.:3569", - "Role": "consensus", - "Weight": 1000, - "StakingPubKey": "uJFIXJzXtuq+dWyhQdi68MgzoOVMML++yo+coaDqNV4OAfTZiuVgzqBrt4G9R1EqCn7CJA79kW8c18IiJdtmAUDwYHg3xhwZjBjrCgMSTkhGrPevyoOnGQNfqQ4xr7bL", - "NetworkPubKey": "YTGQbfw0HGJOcBbx/VGYEqcjnWGpRedivdLB3Od1hpVfPBgYWOSS8qndV9PfUzuAEPr2YqRWbfcK7l8gPl8dyA==" - }, - { - "NodeID": "4a6f7264616e20536368616c6d0064527abdf7ba98fac951ab71ac6aba31cfa1", - "Address": "consensus3.:3569", - "Role": "consensus", - "Weight": 1000, - "StakingPubKey": "pQTTHFSnX47P6p/F+5PK8G2YHHb2I86RDAK0XsDzc/7Bb+gP1+BmA/LnZH9KtCK0EGesjanfcNrV7auxk0vZZyNfg6Tfv1EpfWKv3yhxDVX3YxqrAr21TiZOftG9AXkd", - "NetworkPubKey": "fDW1pSPep/lCfdJfDUkj94PULq7//Qo2d8MI9sD824uke2fjuj/XRKxnDhEmvfOjVW1n3hGDpJ6fZZqB5mpehg==" - }, - { - "NodeID": "4a6f73682048616e6e616e00571da9984a91e31b5592e90d7be91703b2750235", - "Address": "execution1.:3569", - "Role": "execution", - "Weight": 1000, - "StakingPubKey": "g1uhAX+FAd621BY48Q+2FTy8jGxIKCtLCqYmj/tsbPt1j0QjryxDgxhlZ3fCznGfAH4exTXoYCUcK0uZ6zPHSO3HpNBqJ0p/VXQVsKxb+BkfrJ57jvSdFFjNr8De3JRO", - "NetworkPubKey": "PuPWlSXScP/pse2yyL/5qQcAtt5GUf7962RdUGy9hXsTjM+nrK371lpgqGsbngo0aHIUR2FqhzKr4O+sjGwqqQ==" - }, - { - "NodeID": "4b616e205a68616e670000937a7f84d6df0ca2acf95125c33c4b2637bae4e680", - "Address": "execution2.:3569", - "Role": "execution", - "Weight": 1000, - "StakingPubKey": "hpa0KDI1B/8lKK0W/tS7xlXUfmX8L8RnkROVLLFzPCNIFEFuk5j+ogO0ivziCxueEvKRmMvwV08JVL5/ete4E+LaGXXwrY6dda9EMx5PjWy+37HZPFuo7K4Ho0evp6/7", - "NetworkPubKey": "O3G0K0uUs8i2fBstnNscqo/lGwXirpVwA8igmVILKmvqoZIBgJHjrDKAgIjwzt2CVERC+74yMhKIBb5Acww5qg==" - }, - { - "NodeID": "4c61796e65204c616672616e636500ee3643453a3694301f3a232c2f5b9427e2", - "Address": "verification1.:3569", - "Role": "verification", - "Weight": 1000, - "StakingPubKey": "tkOdEz5neKr3QQ+uX7evxAopYKifXve+Q4Uihu4i9IG76r1l0qdf9PvDCjm5V9jQCJvcR467MkJe+wgI471O/ZFbY4BZ6ZnRw7Wy2SfO1BRS6DsUBkLetpzYzpyv/kak", - "NetworkPubKey": "khr2BQ5jLFtRUIfTUY64v6q1KWQcftYMRLOOqw7yRj8MIFgqL7Tku6NcjQNzmA9JG8humU1mz7ToGOf724WC6w==" - }, - { - "NodeID": "c8a31df973605a8ec8351810d38e70fc66d9871ef978194f246025a5f9f7bf6e", - "Address": "access1.:3569", - "Role": "access", - "Weight": 1000, - "StakingPubKey": "toI/M1jd37slSu3FdSHZpl+BAd87IMyjvIVxAAizQTkZdNWAXStfoc2RuiDXBWKgBjoCGVpngRRjFJs+LYcS48k1u6s67CPWzyl0wuQOsHrwy/I0bibYxf42JKvwnUOa", - "NetworkPubKey": "MKXI7iylu7A85AOyNTp5hStVh10StIs9EvtYJYlzXh0q5ehub5cspl9Tj6TpvSwUaI0MJZQ60hdkm4uFxzIIMQ==" - }, - { - "NodeID": "d3be7a089cc8a29a3ad8fcff5809c1ae27f35159ebcf585e3e4e91a1f3b87d89", - "Address": "access2.:3569", - "Role": "access", - "Weight": 1000, - "StakingPubKey": "pMWuhGCBzWssB/r2O1r7DWq8Kq5nmW6dzSKfPvosE0ip8xvXV7WBoAk1hrfpCqCkCAb8fZDNFVPeu9Xyw6gk0jL9PAfyZ7yvTEBnueZmnR9K13ZivyzubtqPYg0+f0kz", - "NetworkPubKey": "qhrQoE4oNHwp+XjA+6hN1Ye6pHmB4ctsV8zqwIfZRwh9vN+eljimmT4Da0Gx0ErFS0aqe1gG7m7VT64lHYSONQ==" - } - ], - "LatestSeal": { - "BlockID": "452adae802d6a03d24e8369e5df4bc93617d407ebde2dfeee2554e4ef0c28d6a", - "ResultID": "b0673867a09da5840bb3a80907a3e8b2191ebc40bc1c2fbe3d317000222c6974", - "FinalState": "3f6035c1324186316f86fbeb37248196e67ff946e47efafa94508e89639c046c", - "AggregatedApprovalSigs": null, - "ID": "51e7d6952a7d1f3982723835f6b06a530bd83dff755c17577e94456324fadd21" - }, - "LatestResult": { - "PreviousResultID": "0000000000000000000000000000000000000000000000000000000000000000", - "BlockID": "452adae802d6a03d24e8369e5df4bc93617d407ebde2dfeee2554e4ef0c28d6a", - "Chunks": [ - { - "CollectionIndex": 0, - "StartState": "0000000000000000000000000000000000000000000000000000000000000000", - "EventCollection": "0000000000000000000000000000000000000000000000000000000000000000", - "BlockID": "0000000000000000000000000000000000000000000000000000000000000000", - "TotalComputationUsed": 0, - "NumberOfTransactions": 0, - "Index": 0, - "EndState": "3f6035c1324186316f86fbeb37248196e67ff946e47efafa94508e89639c046c" - } - ], - "ServiceEvents": [ - { - "Type": "setup", - "Event": { - "Counter": 0, - "FirstView": 0, - "DKGPhase1FinalView": 549, - "DKGPhase2FinalView": 1049, - "DKGPhase3FinalView": 1549, - "FinalView": 1599, - "Participants": [ - { - "NodeID": "416c65782048656e74736368656c001844616b8e9b5680103f25545b2e535d72", - "Address": "collection1.:3569", - "Role": "collection", - "Weight": 1000, - "StakingPubKey": "l3r+iJ55R81PjKLmapkBQEhr6PaWQKjXEFb4dBdmHhQ7JKcpndtgXqFoxPUbhC+sFtQKTAsr8XJuhDjbBetf3xEYqMkb6y4BD+Ji6Wm4AdtB74LgaiXqJLCPAehULLkI", - "NetworkPubKey": "LN8V7Msz37skANzyL8CwOxR+umk2tuOHEefBp5PwqEM5gyqkFFN24TlwpXMe0bxFAk0XCPfkphF2Fdua0KrxwQ==" - }, - { - "NodeID": "416e647265772042757269616e004bf4e37ab54b9ef5103294895fc58a1fe67b", - "Address": "collection2.:3569", - "Role": "collection", - "Weight": 1000, - "StakingPubKey": "oW36l4h/JSzNXufJS+09jP3cpQkfKfcyaqMVQMgMiVDGk9qXck3XGqHWRwHGliG9D8L0EWw0WeNPJVxop7iyJOGDgYHmKBVqo4anTDe2cHK7KjL/jWYmf8FM4/jI+9Z6", - "NetworkPubKey": "/nDR5ro0WUlVjXEEDGh0fb/wkxlg17DvrvCCWFuaoCukzETqM6VYV5o5Afsrt85I2MX/1aaBH4cB0yeYKT4h4g==" - }, - { - "NodeID": "4261737469616e204d756c6c657200f26a128c4ef2b8752f3ad798cfa910d97f", - "Address": "collection3.:3569", - "Role": "collection", - "Weight": 1000, - "StakingPubKey": "qGb7lluGuJqg4hsDr6cOceyJbvYsZnmu9V1OduXsCsb1WXh7yiXwbnB4400r5/sWAWaI6vjIr/btVK694MvJllB0QC5WLVVVU4PIldHBE3pn9DDBnmk3mfUPL9ob265l", - "NetworkPubKey": "rnSLNQtIhBfPiN8nrr4Nsbx3AH2jwtWxY4S9lkr+cGoARAQ7D7K4GH0SObhBetnSj4igaHOsDMzeC2owQL0/7w==" - }, - { - "NodeID": "42656e6a616d696e2056616e204d6574657200bf596a51dee05642917a9c12c0", - "Address": "collection4.:3569", - "Role": "collection", - "Weight": 1000, - "StakingPubKey": "oMY2bsEMVt0IFE/kfUFBdCoFLYbeq9TEes9PUhViSQgMVBmyJ/wjfN9Q1FvlmGcCEtLM8P0EWbEiHvAgao6MUkLsqTFOhExlulqQska6za5hMMqCeLdjbZTl/Kk37Cey", - "NetworkPubKey": "hOzBuFgkkRv3K2mRr4L1H1FfcJxhdwfdjKLCTuzTQCvHw9AJNJ7tIypAjgBxcDMz4onqLhtzuShUjs8zTPeyZA==" - }, - { - "NodeID": "436173657920417272696e67746f6e004cdbedda99daf9ff9a787c0618cee363", - "Address": "collection5.:3569", - "Role": "collection", - "Weight": 1000, - "StakingPubKey": "ournHqKKQ0zacuQl1uNqLnOulZ2yV+OSq4kY9wKrvpfg0y2bsJhFjkmyle8Ffoi5Exd4TejDqv+Vhp3Dzm5X8RJM5jFvqzDIRy/fTR19LOXRNXNi88BrxajTJf9ZFZVt", - "NetworkPubKey": "xHhc4n1UNAwGx18lk1XI1vamt1D3yFT7grBgjFbAC3d3jrs7B0uyVkwQNKW1L5aS0CGaPYn/W85HnN+7Lgn9iA==" - }, - { - "NodeID": "44696574657220536869726c657900c100318341c6796198aa37e627949074bc", - "Address": "collection6.:3569", - "Role": "collection", - "Weight": 1000, - "StakingPubKey": "goWe7pNkhi44qHnRkKpsf4AiRGP3rn0nwvQoXESAVzL2gI+9P67Fjx6yzn/1WGCWClmYN0ZSPxjcLVzm84Lk6P5oakg5VMqW3XLOoh+qsMzDwdxj+8sAOS/Ru25BuB8e", - "NetworkPubKey": "oo8hfXYogKS0xDhoB1XjHKBzAmOPw6mUWPVRaTV20f9SYAs4IMCN+U62TEl4a7aXz3OWO0pvYYKFndNFLNLA8g==" - }, - { - "NodeID": "4a616d65732048756e74657200e9ffa4e085542cfa80d15f0e61e54606c6cdb3", - "Address": "consensus1.:3569", - "Role": "consensus", - "Weight": 1000, - "StakingPubKey": "kTbHpoV2R0Ef5WgbL/iDidnqSDD833Xda1Djm9IEOQMChKg7k1dbyS8DITl6Zb5WGfbeHUN3G9cNU2BGSGG4mE41cD276M64+Vx+mHkCzSc+uG69BORCsF703cSXocq8", - "NetworkPubKey": "UswVkeEZpRexigHGj2Rsiq5sC/v8gmqxoEqTM4M5/0CSb2YzgXzAeQpYyrP0a2kVZAn5cYSThPBu1GBATbEo6A==" - }, - { - "NodeID": "4a65666665727920446f796c65005cf2fe1daafe62a66f59fd9afa12f0f78914", - "Address": "consensus2.:3569", - "Role": "consensus", - "Weight": 1000, - "StakingPubKey": "uJFIXJzXtuq+dWyhQdi68MgzoOVMML++yo+coaDqNV4OAfTZiuVgzqBrt4G9R1EqCn7CJA79kW8c18IiJdtmAUDwYHg3xhwZjBjrCgMSTkhGrPevyoOnGQNfqQ4xr7bL", - "NetworkPubKey": "YTGQbfw0HGJOcBbx/VGYEqcjnWGpRedivdLB3Od1hpVfPBgYWOSS8qndV9PfUzuAEPr2YqRWbfcK7l8gPl8dyA==" - }, - { - "NodeID": "4a6f7264616e20536368616c6d0064527abdf7ba98fac951ab71ac6aba31cfa1", - "Address": "consensus3.:3569", - "Role": "consensus", - "Weight": 1000, - "StakingPubKey": "pQTTHFSnX47P6p/F+5PK8G2YHHb2I86RDAK0XsDzc/7Bb+gP1+BmA/LnZH9KtCK0EGesjanfcNrV7auxk0vZZyNfg6Tfv1EpfWKv3yhxDVX3YxqrAr21TiZOftG9AXkd", - "NetworkPubKey": "fDW1pSPep/lCfdJfDUkj94PULq7//Qo2d8MI9sD824uke2fjuj/XRKxnDhEmvfOjVW1n3hGDpJ6fZZqB5mpehg==" - }, - { - "NodeID": "4a6f73682048616e6e616e00571da9984a91e31b5592e90d7be91703b2750235", - "Address": "execution1.:3569", - "Role": "execution", - "Weight": 1000, - "StakingPubKey": "g1uhAX+FAd621BY48Q+2FTy8jGxIKCtLCqYmj/tsbPt1j0QjryxDgxhlZ3fCznGfAH4exTXoYCUcK0uZ6zPHSO3HpNBqJ0p/VXQVsKxb+BkfrJ57jvSdFFjNr8De3JRO", - "NetworkPubKey": "PuPWlSXScP/pse2yyL/5qQcAtt5GUf7962RdUGy9hXsTjM+nrK371lpgqGsbngo0aHIUR2FqhzKr4O+sjGwqqQ==" - }, - { - "NodeID": "4b616e205a68616e670000937a7f84d6df0ca2acf95125c33c4b2637bae4e680", - "Address": "execution2.:3569", - "Role": "execution", - "Weight": 1000, - "StakingPubKey": "hpa0KDI1B/8lKK0W/tS7xlXUfmX8L8RnkROVLLFzPCNIFEFuk5j+ogO0ivziCxueEvKRmMvwV08JVL5/ete4E+LaGXXwrY6dda9EMx5PjWy+37HZPFuo7K4Ho0evp6/7", - "NetworkPubKey": "O3G0K0uUs8i2fBstnNscqo/lGwXirpVwA8igmVILKmvqoZIBgJHjrDKAgIjwzt2CVERC+74yMhKIBb5Acww5qg==" - }, - { - "NodeID": "4c61796e65204c616672616e636500ee3643453a3694301f3a232c2f5b9427e2", - "Address": "verification1.:3569", - "Role": "verification", - "Weight": 1000, - "StakingPubKey": "tkOdEz5neKr3QQ+uX7evxAopYKifXve+Q4Uihu4i9IG76r1l0qdf9PvDCjm5V9jQCJvcR467MkJe+wgI471O/ZFbY4BZ6ZnRw7Wy2SfO1BRS6DsUBkLetpzYzpyv/kak", - "NetworkPubKey": "khr2BQ5jLFtRUIfTUY64v6q1KWQcftYMRLOOqw7yRj8MIFgqL7Tku6NcjQNzmA9JG8humU1mz7ToGOf724WC6w==" - }, - { - "NodeID": "c8a31df973605a8ec8351810d38e70fc66d9871ef978194f246025a5f9f7bf6e", - "Address": "access1.:3569", - "Role": "access", - "Weight": 1000, - "StakingPubKey": "toI/M1jd37slSu3FdSHZpl+BAd87IMyjvIVxAAizQTkZdNWAXStfoc2RuiDXBWKgBjoCGVpngRRjFJs+LYcS48k1u6s67CPWzyl0wuQOsHrwy/I0bibYxf42JKvwnUOa", - "NetworkPubKey": "MKXI7iylu7A85AOyNTp5hStVh10StIs9EvtYJYlzXh0q5ehub5cspl9Tj6TpvSwUaI0MJZQ60hdkm4uFxzIIMQ==" - }, - { - "NodeID": "d3be7a089cc8a29a3ad8fcff5809c1ae27f35159ebcf585e3e4e91a1f3b87d89", - "Address": "access2.:3569", - "Role": "access", - "Weight": 1000, - "StakingPubKey": "pMWuhGCBzWssB/r2O1r7DWq8Kq5nmW6dzSKfPvosE0ip8xvXV7WBoAk1hrfpCqCkCAb8fZDNFVPeu9Xyw6gk0jL9PAfyZ7yvTEBnueZmnR9K13ZivyzubtqPYg0+f0kz", - "NetworkPubKey": "qhrQoE4oNHwp+XjA+6hN1Ye6pHmB4ctsV8zqwIfZRwh9vN+eljimmT4Da0Gx0ErFS0aqe1gG7m7VT64lHYSONQ==" - } - ], - "Assignments": [ - [ - "416c65782048656e74736368656c001844616b8e9b5680103f25545b2e535d72", - "416e647265772042757269616e004bf4e37ab54b9ef5103294895fc58a1fe67b", - "4261737469616e204d756c6c657200f26a128c4ef2b8752f3ad798cfa910d97f", - "42656e6a616d696e2056616e204d6574657200bf596a51dee05642917a9c12c0", - "436173657920417272696e67746f6e004cdbedda99daf9ff9a787c0618cee363", - "44696574657220536869726c657900c100318341c6796198aa37e627949074bc" - ] - ], - "RandomSource": "bdnS7hUdKnnXNjb9LBC/Xw==" - } - }, - { - "Type": "commit", - "Event": { - "Counter": 0, - "ClusterQCs": [ - { - "SigData": "sq4dWuSz8vQUIzF9lv9u80J8xfnP1Jv6PZcHhIkwYPkwQaCN3F/7y+dZFPprjLbK", - "VoterIDs": [ - "416c65782048656e74736368656c001844616b8e9b5680103f25545b2e535d72", - "416e647265772042757269616e004bf4e37ab54b9ef5103294895fc58a1fe67b", - "4261737469616e204d756c6c657200f26a128c4ef2b8752f3ad798cfa910d97f", - "42656e6a616d696e2056616e204d6574657200bf596a51dee05642917a9c12c0", - "436173657920417272696e67746f6e004cdbedda99daf9ff9a787c0618cee363" - ] - } - ], - "DKGGroupKey": "8e3ea9a984bcff363f70c90d5faed0b73cb6b940060329cb2f508960627601f23479ceea40943d0278fe5e2391bd2095150204b04fb8404a94054298fb9ea864be9db6aa1fab3f95cd19e3b56a50e7952377828494df28f1ed6964f491699fd1", - "DKGParticipantKeys": [ - "b400ebae6d5072fe8d9f5969cef1da4bbea99212c76d64eb68e229f4292a4dc1bb7335e510f7a13926be20c37156d6e318d2bad9122936623d83227f03dab63541d2103aad8dbdf93b98ec35b8da504d4aa89b106d45cbb58ea52d20545613dc", - "a40d4fe6a7bda097737fc8533ad16cde22bffe073fdb58c756a5cde6d85bc3aa05d06a87475aa8c8cc7151c552fc4dd51628eaa144170bd511a67bd3936b89e7c1bcd9fcecfb3b2d8a61fba3faf8030d267fa76235e1fecd22cd9cdbd70a424c", - "82009cdd9dce04e924e4e1c60dfd704a3ae85ee9e60f31e86e0abff28c199658cf1f443d4064a844adf0a74b8cc44e5417075c8751f5b180578b61426141df857640a5b1ca6f69770e0b1f268d1a1308b0e657a58b65e60a16281ca939a2d2b4" - ] - } - } - ], - "ExecutionDataID": "0000000000000000000000000000000000000000000000000000000000000000", - "ID": "b0673867a09da5840bb3a80907a3e8b2191ebc40bc1c2fbe3d317000222c6974" - }, - "SealingSegment": { - "Blocks": [ - { - "Header": { - "ChainID": "flow-benchnet", - "ParentID": "0000000000000000000000000000000000000000000000000000000000000000", - "Height": 0, - "PayloadHash": "7b3b313bd83e01d13c449d4dd4bac04137f5090bcb305ddd750298bd16e5339b", - "Timestamp": "2022-11-04T14:03:22Z", - "View": 0, - "ParentVoterIndices": null, - "ParentVoterSigData": null, - "ProposerID": "0000000000000000000000000000000000000000000000000000000000000000", - "ProposerSigData": null, - "ID": "452adae802d6a03d24e8369e5df4bc93617d407ebde2dfeee2554e4ef0c28d6a" - }, - "Payload": { - "Guarantees": null, - "Seals": null, - "Receipts": null, - "Results": null - } - } - ], - "ExecutionResults": [ - { - "PreviousResultID": "0000000000000000000000000000000000000000000000000000000000000000", - "BlockID": "452adae802d6a03d24e8369e5df4bc93617d407ebde2dfeee2554e4ef0c28d6a", - "Chunks": [ - { - "CollectionIndex": 0, - "StartState": "0000000000000000000000000000000000000000000000000000000000000000", - "EventCollection": "0000000000000000000000000000000000000000000000000000000000000000", - "BlockID": "0000000000000000000000000000000000000000000000000000000000000000", - "TotalComputationUsed": 0, - "NumberOfTransactions": 0, - "Index": 0, - "EndState": "3f6035c1324186316f86fbeb37248196e67ff946e47efafa94508e89639c046c" - } - ], - "ServiceEvents": [ - { - "Type": "setup", - "Event": { - "Counter": 0, - "FirstView": 0, - "DKGPhase1FinalView": 549, - "DKGPhase2FinalView": 1049, - "DKGPhase3FinalView": 1549, - "FinalView": 1599, - "Participants": [ - { - "NodeID": "416c65782048656e74736368656c001844616b8e9b5680103f25545b2e535d72", - "Address": "collection1.:3569", - "Role": "collection", - "Weight": 1000, - "StakingPubKey": "l3r+iJ55R81PjKLmapkBQEhr6PaWQKjXEFb4dBdmHhQ7JKcpndtgXqFoxPUbhC+sFtQKTAsr8XJuhDjbBetf3xEYqMkb6y4BD+Ji6Wm4AdtB74LgaiXqJLCPAehULLkI", - "NetworkPubKey": "LN8V7Msz37skANzyL8CwOxR+umk2tuOHEefBp5PwqEM5gyqkFFN24TlwpXMe0bxFAk0XCPfkphF2Fdua0KrxwQ==" - }, - { - "NodeID": "416e647265772042757269616e004bf4e37ab54b9ef5103294895fc58a1fe67b", - "Address": "collection2.:3569", - "Role": "collection", - "Weight": 1000, - "StakingPubKey": "oW36l4h/JSzNXufJS+09jP3cpQkfKfcyaqMVQMgMiVDGk9qXck3XGqHWRwHGliG9D8L0EWw0WeNPJVxop7iyJOGDgYHmKBVqo4anTDe2cHK7KjL/jWYmf8FM4/jI+9Z6", - "NetworkPubKey": "/nDR5ro0WUlVjXEEDGh0fb/wkxlg17DvrvCCWFuaoCukzETqM6VYV5o5Afsrt85I2MX/1aaBH4cB0yeYKT4h4g==" - }, - { - "NodeID": "4261737469616e204d756c6c657200f26a128c4ef2b8752f3ad798cfa910d97f", - "Address": "collection3.:3569", - "Role": "collection", - "Weight": 1000, - "StakingPubKey": "qGb7lluGuJqg4hsDr6cOceyJbvYsZnmu9V1OduXsCsb1WXh7yiXwbnB4400r5/sWAWaI6vjIr/btVK694MvJllB0QC5WLVVVU4PIldHBE3pn9DDBnmk3mfUPL9ob265l", - "NetworkPubKey": "rnSLNQtIhBfPiN8nrr4Nsbx3AH2jwtWxY4S9lkr+cGoARAQ7D7K4GH0SObhBetnSj4igaHOsDMzeC2owQL0/7w==" - }, - { - "NodeID": "42656e6a616d696e2056616e204d6574657200bf596a51dee05642917a9c12c0", - "Address": "collection4.:3569", - "Role": "collection", - "Weight": 1000, - "StakingPubKey": "oMY2bsEMVt0IFE/kfUFBdCoFLYbeq9TEes9PUhViSQgMVBmyJ/wjfN9Q1FvlmGcCEtLM8P0EWbEiHvAgao6MUkLsqTFOhExlulqQska6za5hMMqCeLdjbZTl/Kk37Cey", - "NetworkPubKey": "hOzBuFgkkRv3K2mRr4L1H1FfcJxhdwfdjKLCTuzTQCvHw9AJNJ7tIypAjgBxcDMz4onqLhtzuShUjs8zTPeyZA==" - }, - { - "NodeID": "436173657920417272696e67746f6e004cdbedda99daf9ff9a787c0618cee363", - "Address": "collection5.:3569", - "Role": "collection", - "Weight": 1000, - "StakingPubKey": "ournHqKKQ0zacuQl1uNqLnOulZ2yV+OSq4kY9wKrvpfg0y2bsJhFjkmyle8Ffoi5Exd4TejDqv+Vhp3Dzm5X8RJM5jFvqzDIRy/fTR19LOXRNXNi88BrxajTJf9ZFZVt", - "NetworkPubKey": "xHhc4n1UNAwGx18lk1XI1vamt1D3yFT7grBgjFbAC3d3jrs7B0uyVkwQNKW1L5aS0CGaPYn/W85HnN+7Lgn9iA==" - }, - { - "NodeID": "44696574657220536869726c657900c100318341c6796198aa37e627949074bc", - "Address": "collection6.:3569", - "Role": "collection", - "Weight": 1000, - "StakingPubKey": "goWe7pNkhi44qHnRkKpsf4AiRGP3rn0nwvQoXESAVzL2gI+9P67Fjx6yzn/1WGCWClmYN0ZSPxjcLVzm84Lk6P5oakg5VMqW3XLOoh+qsMzDwdxj+8sAOS/Ru25BuB8e", - "NetworkPubKey": "oo8hfXYogKS0xDhoB1XjHKBzAmOPw6mUWPVRaTV20f9SYAs4IMCN+U62TEl4a7aXz3OWO0pvYYKFndNFLNLA8g==" - }, - { - "NodeID": "4a616d65732048756e74657200e9ffa4e085542cfa80d15f0e61e54606c6cdb3", - "Address": "consensus1.:3569", - "Role": "consensus", - "Weight": 1000, - "StakingPubKey": "kTbHpoV2R0Ef5WgbL/iDidnqSDD833Xda1Djm9IEOQMChKg7k1dbyS8DITl6Zb5WGfbeHUN3G9cNU2BGSGG4mE41cD276M64+Vx+mHkCzSc+uG69BORCsF703cSXocq8", - "NetworkPubKey": "UswVkeEZpRexigHGj2Rsiq5sC/v8gmqxoEqTM4M5/0CSb2YzgXzAeQpYyrP0a2kVZAn5cYSThPBu1GBATbEo6A==" - }, - { - "NodeID": "4a65666665727920446f796c65005cf2fe1daafe62a66f59fd9afa12f0f78914", - "Address": "consensus2.:3569", - "Role": "consensus", - "Weight": 1000, - "StakingPubKey": "uJFIXJzXtuq+dWyhQdi68MgzoOVMML++yo+coaDqNV4OAfTZiuVgzqBrt4G9R1EqCn7CJA79kW8c18IiJdtmAUDwYHg3xhwZjBjrCgMSTkhGrPevyoOnGQNfqQ4xr7bL", - "NetworkPubKey": "YTGQbfw0HGJOcBbx/VGYEqcjnWGpRedivdLB3Od1hpVfPBgYWOSS8qndV9PfUzuAEPr2YqRWbfcK7l8gPl8dyA==" - }, - { - "NodeID": "4a6f7264616e20536368616c6d0064527abdf7ba98fac951ab71ac6aba31cfa1", - "Address": "consensus3.:3569", - "Role": "consensus", - "Weight": 1000, - "StakingPubKey": "pQTTHFSnX47P6p/F+5PK8G2YHHb2I86RDAK0XsDzc/7Bb+gP1+BmA/LnZH9KtCK0EGesjanfcNrV7auxk0vZZyNfg6Tfv1EpfWKv3yhxDVX3YxqrAr21TiZOftG9AXkd", - "NetworkPubKey": "fDW1pSPep/lCfdJfDUkj94PULq7//Qo2d8MI9sD824uke2fjuj/XRKxnDhEmvfOjVW1n3hGDpJ6fZZqB5mpehg==" - }, - { - "NodeID": "4a6f73682048616e6e616e00571da9984a91e31b5592e90d7be91703b2750235", - "Address": "execution1.:3569", - "Role": "execution", - "Weight": 1000, - "StakingPubKey": "g1uhAX+FAd621BY48Q+2FTy8jGxIKCtLCqYmj/tsbPt1j0QjryxDgxhlZ3fCznGfAH4exTXoYCUcK0uZ6zPHSO3HpNBqJ0p/VXQVsKxb+BkfrJ57jvSdFFjNr8De3JRO", - "NetworkPubKey": "PuPWlSXScP/pse2yyL/5qQcAtt5GUf7962RdUGy9hXsTjM+nrK371lpgqGsbngo0aHIUR2FqhzKr4O+sjGwqqQ==" - }, - { - "NodeID": "4b616e205a68616e670000937a7f84d6df0ca2acf95125c33c4b2637bae4e680", - "Address": "execution2.:3569", - "Role": "execution", - "Weight": 1000, - "StakingPubKey": "hpa0KDI1B/8lKK0W/tS7xlXUfmX8L8RnkROVLLFzPCNIFEFuk5j+ogO0ivziCxueEvKRmMvwV08JVL5/ete4E+LaGXXwrY6dda9EMx5PjWy+37HZPFuo7K4Ho0evp6/7", - "NetworkPubKey": "O3G0K0uUs8i2fBstnNscqo/lGwXirpVwA8igmVILKmvqoZIBgJHjrDKAgIjwzt2CVERC+74yMhKIBb5Acww5qg==" - }, - { - "NodeID": "4c61796e65204c616672616e636500ee3643453a3694301f3a232c2f5b9427e2", - "Address": "verification1.:3569", - "Role": "verification", - "Weight": 1000, - "StakingPubKey": "tkOdEz5neKr3QQ+uX7evxAopYKifXve+Q4Uihu4i9IG76r1l0qdf9PvDCjm5V9jQCJvcR467MkJe+wgI471O/ZFbY4BZ6ZnRw7Wy2SfO1BRS6DsUBkLetpzYzpyv/kak", - "NetworkPubKey": "khr2BQ5jLFtRUIfTUY64v6q1KWQcftYMRLOOqw7yRj8MIFgqL7Tku6NcjQNzmA9JG8humU1mz7ToGOf724WC6w==" - }, - { - "NodeID": "c8a31df973605a8ec8351810d38e70fc66d9871ef978194f246025a5f9f7bf6e", - "Address": "access1.:3569", - "Role": "access", - "Weight": 1000, - "StakingPubKey": "toI/M1jd37slSu3FdSHZpl+BAd87IMyjvIVxAAizQTkZdNWAXStfoc2RuiDXBWKgBjoCGVpngRRjFJs+LYcS48k1u6s67CPWzyl0wuQOsHrwy/I0bibYxf42JKvwnUOa", - "NetworkPubKey": "MKXI7iylu7A85AOyNTp5hStVh10StIs9EvtYJYlzXh0q5ehub5cspl9Tj6TpvSwUaI0MJZQ60hdkm4uFxzIIMQ==" - }, - { - "NodeID": "d3be7a089cc8a29a3ad8fcff5809c1ae27f35159ebcf585e3e4e91a1f3b87d89", - "Address": "access2.:3569", - "Role": "access", - "Weight": 1000, - "StakingPubKey": "pMWuhGCBzWssB/r2O1r7DWq8Kq5nmW6dzSKfPvosE0ip8xvXV7WBoAk1hrfpCqCkCAb8fZDNFVPeu9Xyw6gk0jL9PAfyZ7yvTEBnueZmnR9K13ZivyzubtqPYg0+f0kz", - "NetworkPubKey": "qhrQoE4oNHwp+XjA+6hN1Ye6pHmB4ctsV8zqwIfZRwh9vN+eljimmT4Da0Gx0ErFS0aqe1gG7m7VT64lHYSONQ==" - } - ], - "Assignments": [ - [ - "416c65782048656e74736368656c001844616b8e9b5680103f25545b2e535d72", - "416e647265772042757269616e004bf4e37ab54b9ef5103294895fc58a1fe67b", - "4261737469616e204d756c6c657200f26a128c4ef2b8752f3ad798cfa910d97f", - "42656e6a616d696e2056616e204d6574657200bf596a51dee05642917a9c12c0", - "436173657920417272696e67746f6e004cdbedda99daf9ff9a787c0618cee363", - "44696574657220536869726c657900c100318341c6796198aa37e627949074bc" - ] - ], - "RandomSource": "bdnS7hUdKnnXNjb9LBC/Xw==" - } - }, - { - "Type": "commit", - "Event": { - "Counter": 0, - "ClusterQCs": [ - { - "SigData": "sq4dWuSz8vQUIzF9lv9u80J8xfnP1Jv6PZcHhIkwYPkwQaCN3F/7y+dZFPprjLbK", - "VoterIDs": [ - "416c65782048656e74736368656c001844616b8e9b5680103f25545b2e535d72", - "416e647265772042757269616e004bf4e37ab54b9ef5103294895fc58a1fe67b", - "4261737469616e204d756c6c657200f26a128c4ef2b8752f3ad798cfa910d97f", - "42656e6a616d696e2056616e204d6574657200bf596a51dee05642917a9c12c0", - "436173657920417272696e67746f6e004cdbedda99daf9ff9a787c0618cee363" - ] - } - ], - "DKGGroupKey": "8e3ea9a984bcff363f70c90d5faed0b73cb6b940060329cb2f508960627601f23479ceea40943d0278fe5e2391bd2095150204b04fb8404a94054298fb9ea864be9db6aa1fab3f95cd19e3b56a50e7952377828494df28f1ed6964f491699fd1", - "DKGParticipantKeys": [ - "b400ebae6d5072fe8d9f5969cef1da4bbea99212c76d64eb68e229f4292a4dc1bb7335e510f7a13926be20c37156d6e318d2bad9122936623d83227f03dab63541d2103aad8dbdf93b98ec35b8da504d4aa89b106d45cbb58ea52d20545613dc", - "a40d4fe6a7bda097737fc8533ad16cde22bffe073fdb58c756a5cde6d85bc3aa05d06a87475aa8c8cc7151c552fc4dd51628eaa144170bd511a67bd3936b89e7c1bcd9fcecfb3b2d8a61fba3faf8030d267fa76235e1fecd22cd9cdbd70a424c", - "82009cdd9dce04e924e4e1c60dfd704a3ae85ee9e60f31e86e0abff28c199658cf1f443d4064a844adf0a74b8cc44e5417075c8751f5b180578b61426141df857640a5b1ca6f69770e0b1f268d1a1308b0e657a58b65e60a16281ca939a2d2b4" - ] - } - } - ], - "ExecutionDataID": "0000000000000000000000000000000000000000000000000000000000000000", - "ID": "b0673867a09da5840bb3a80907a3e8b2191ebc40bc1c2fbe3d317000222c6974" - } - ], - "LatestSeals": { - "452adae802d6a03d24e8369e5df4bc93617d407ebde2dfeee2554e4ef0c28d6a": "51e7d6952a7d1f3982723835f6b06a530bd83dff755c17577e94456324fadd21" - }, - "FirstSeal": { - "BlockID": "452adae802d6a03d24e8369e5df4bc93617d407ebde2dfeee2554e4ef0c28d6a", - "ResultID": "b0673867a09da5840bb3a80907a3e8b2191ebc40bc1c2fbe3d317000222c6974", - "FinalState": "3f6035c1324186316f86fbeb37248196e67ff946e47efafa94508e89639c046c", - "AggregatedApprovalSigs": null, - "ID": "51e7d6952a7d1f3982723835f6b06a530bd83dff755c17577e94456324fadd21" - } - }, - "QuorumCertificate": { - "View": 0, - "BlockID": "452adae802d6a03d24e8369e5df4bc93617d407ebde2dfeee2554e4ef0c28d6a", - "SignerIndices": "5DgtbuA=", - "SigData": "+GQAsKeYu+OM+UbjbUrj88WTao5ZBKx/6Ac/Om7KAHll/9QAg2MB0SuJeXmSxk/XtHaMzoCwju+rawNIcGhsWhkpgU3rRh0M26ezuZlw5iVKEsH5S/EgPYj4wwHk1tvjenmA81GD" - }, - "Phase": 1, - "Epochs": { - "Previous": null, - "Current": { - "Counter": 0, - "FirstView": 0, - "DKGPhase1FinalView": 549, - "DKGPhase2FinalView": 1049, - "DKGPhase3FinalView": 1549, - "FinalView": 1599, - "RandomSource": "bdnS7hUdKnnXNjb9LBC/Xw==", - "InitialIdentities": [ - { - "NodeID": "416c65782048656e74736368656c001844616b8e9b5680103f25545b2e535d72", - "Address": "collection1.:3569", - "Role": "collection", - "Weight": 1000, - "StakingPubKey": "l3r+iJ55R81PjKLmapkBQEhr6PaWQKjXEFb4dBdmHhQ7JKcpndtgXqFoxPUbhC+sFtQKTAsr8XJuhDjbBetf3xEYqMkb6y4BD+Ji6Wm4AdtB74LgaiXqJLCPAehULLkI", - "NetworkPubKey": "LN8V7Msz37skANzyL8CwOxR+umk2tuOHEefBp5PwqEM5gyqkFFN24TlwpXMe0bxFAk0XCPfkphF2Fdua0KrxwQ==" - }, - { - "NodeID": "416e647265772042757269616e004bf4e37ab54b9ef5103294895fc58a1fe67b", - "Address": "collection2.:3569", - "Role": "collection", - "Weight": 1000, - "StakingPubKey": "oW36l4h/JSzNXufJS+09jP3cpQkfKfcyaqMVQMgMiVDGk9qXck3XGqHWRwHGliG9D8L0EWw0WeNPJVxop7iyJOGDgYHmKBVqo4anTDe2cHK7KjL/jWYmf8FM4/jI+9Z6", - "NetworkPubKey": "/nDR5ro0WUlVjXEEDGh0fb/wkxlg17DvrvCCWFuaoCukzETqM6VYV5o5Afsrt85I2MX/1aaBH4cB0yeYKT4h4g==" - }, - { - "NodeID": "4261737469616e204d756c6c657200f26a128c4ef2b8752f3ad798cfa910d97f", - "Address": "collection3.:3569", - "Role": "collection", - "Weight": 1000, - "StakingPubKey": "qGb7lluGuJqg4hsDr6cOceyJbvYsZnmu9V1OduXsCsb1WXh7yiXwbnB4400r5/sWAWaI6vjIr/btVK694MvJllB0QC5WLVVVU4PIldHBE3pn9DDBnmk3mfUPL9ob265l", - "NetworkPubKey": "rnSLNQtIhBfPiN8nrr4Nsbx3AH2jwtWxY4S9lkr+cGoARAQ7D7K4GH0SObhBetnSj4igaHOsDMzeC2owQL0/7w==" - }, - { - "NodeID": "42656e6a616d696e2056616e204d6574657200bf596a51dee05642917a9c12c0", - "Address": "collection4.:3569", - "Role": "collection", - "Weight": 1000, - "StakingPubKey": "oMY2bsEMVt0IFE/kfUFBdCoFLYbeq9TEes9PUhViSQgMVBmyJ/wjfN9Q1FvlmGcCEtLM8P0EWbEiHvAgao6MUkLsqTFOhExlulqQska6za5hMMqCeLdjbZTl/Kk37Cey", - "NetworkPubKey": "hOzBuFgkkRv3K2mRr4L1H1FfcJxhdwfdjKLCTuzTQCvHw9AJNJ7tIypAjgBxcDMz4onqLhtzuShUjs8zTPeyZA==" - }, - { - "NodeID": "436173657920417272696e67746f6e004cdbedda99daf9ff9a787c0618cee363", - "Address": "collection5.:3569", - "Role": "collection", - "Weight": 1000, - "StakingPubKey": "ournHqKKQ0zacuQl1uNqLnOulZ2yV+OSq4kY9wKrvpfg0y2bsJhFjkmyle8Ffoi5Exd4TejDqv+Vhp3Dzm5X8RJM5jFvqzDIRy/fTR19LOXRNXNi88BrxajTJf9ZFZVt", - "NetworkPubKey": "xHhc4n1UNAwGx18lk1XI1vamt1D3yFT7grBgjFbAC3d3jrs7B0uyVkwQNKW1L5aS0CGaPYn/W85HnN+7Lgn9iA==" - }, - { - "NodeID": "44696574657220536869726c657900c100318341c6796198aa37e627949074bc", - "Address": "collection6.:3569", - "Role": "collection", - "Weight": 1000, - "StakingPubKey": "goWe7pNkhi44qHnRkKpsf4AiRGP3rn0nwvQoXESAVzL2gI+9P67Fjx6yzn/1WGCWClmYN0ZSPxjcLVzm84Lk6P5oakg5VMqW3XLOoh+qsMzDwdxj+8sAOS/Ru25BuB8e", - "NetworkPubKey": "oo8hfXYogKS0xDhoB1XjHKBzAmOPw6mUWPVRaTV20f9SYAs4IMCN+U62TEl4a7aXz3OWO0pvYYKFndNFLNLA8g==" - }, - { - "NodeID": "4a616d65732048756e74657200e9ffa4e085542cfa80d15f0e61e54606c6cdb3", - "Address": "consensus1.:3569", - "Role": "consensus", - "Weight": 1000, - "StakingPubKey": "kTbHpoV2R0Ef5WgbL/iDidnqSDD833Xda1Djm9IEOQMChKg7k1dbyS8DITl6Zb5WGfbeHUN3G9cNU2BGSGG4mE41cD276M64+Vx+mHkCzSc+uG69BORCsF703cSXocq8", - "NetworkPubKey": "UswVkeEZpRexigHGj2Rsiq5sC/v8gmqxoEqTM4M5/0CSb2YzgXzAeQpYyrP0a2kVZAn5cYSThPBu1GBATbEo6A==" - }, - { - "NodeID": "4a65666665727920446f796c65005cf2fe1daafe62a66f59fd9afa12f0f78914", - "Address": "consensus2.:3569", - "Role": "consensus", - "Weight": 1000, - "StakingPubKey": "uJFIXJzXtuq+dWyhQdi68MgzoOVMML++yo+coaDqNV4OAfTZiuVgzqBrt4G9R1EqCn7CJA79kW8c18IiJdtmAUDwYHg3xhwZjBjrCgMSTkhGrPevyoOnGQNfqQ4xr7bL", - "NetworkPubKey": "YTGQbfw0HGJOcBbx/VGYEqcjnWGpRedivdLB3Od1hpVfPBgYWOSS8qndV9PfUzuAEPr2YqRWbfcK7l8gPl8dyA==" - }, - { - "NodeID": "4a6f7264616e20536368616c6d0064527abdf7ba98fac951ab71ac6aba31cfa1", - "Address": "consensus3.:3569", - "Role": "consensus", - "Weight": 1000, - "StakingPubKey": "pQTTHFSnX47P6p/F+5PK8G2YHHb2I86RDAK0XsDzc/7Bb+gP1+BmA/LnZH9KtCK0EGesjanfcNrV7auxk0vZZyNfg6Tfv1EpfWKv3yhxDVX3YxqrAr21TiZOftG9AXkd", - "NetworkPubKey": "fDW1pSPep/lCfdJfDUkj94PULq7//Qo2d8MI9sD824uke2fjuj/XRKxnDhEmvfOjVW1n3hGDpJ6fZZqB5mpehg==" - }, - { - "NodeID": "4a6f73682048616e6e616e00571da9984a91e31b5592e90d7be91703b2750235", - "Address": "execution1.:3569", - "Role": "execution", - "Weight": 1000, - "StakingPubKey": "g1uhAX+FAd621BY48Q+2FTy8jGxIKCtLCqYmj/tsbPt1j0QjryxDgxhlZ3fCznGfAH4exTXoYCUcK0uZ6zPHSO3HpNBqJ0p/VXQVsKxb+BkfrJ57jvSdFFjNr8De3JRO", - "NetworkPubKey": "PuPWlSXScP/pse2yyL/5qQcAtt5GUf7962RdUGy9hXsTjM+nrK371lpgqGsbngo0aHIUR2FqhzKr4O+sjGwqqQ==" - }, - { - "NodeID": "4b616e205a68616e670000937a7f84d6df0ca2acf95125c33c4b2637bae4e680", - "Address": "execution2.:3569", - "Role": "execution", - "Weight": 1000, - "StakingPubKey": "hpa0KDI1B/8lKK0W/tS7xlXUfmX8L8RnkROVLLFzPCNIFEFuk5j+ogO0ivziCxueEvKRmMvwV08JVL5/ete4E+LaGXXwrY6dda9EMx5PjWy+37HZPFuo7K4Ho0evp6/7", - "NetworkPubKey": "O3G0K0uUs8i2fBstnNscqo/lGwXirpVwA8igmVILKmvqoZIBgJHjrDKAgIjwzt2CVERC+74yMhKIBb5Acww5qg==" - }, - { - "NodeID": "4c61796e65204c616672616e636500ee3643453a3694301f3a232c2f5b9427e2", - "Address": "verification1.:3569", - "Role": "verification", - "Weight": 1000, - "StakingPubKey": "tkOdEz5neKr3QQ+uX7evxAopYKifXve+Q4Uihu4i9IG76r1l0qdf9PvDCjm5V9jQCJvcR467MkJe+wgI471O/ZFbY4BZ6ZnRw7Wy2SfO1BRS6DsUBkLetpzYzpyv/kak", - "NetworkPubKey": "khr2BQ5jLFtRUIfTUY64v6q1KWQcftYMRLOOqw7yRj8MIFgqL7Tku6NcjQNzmA9JG8humU1mz7ToGOf724WC6w==" - }, - { - "NodeID": "c8a31df973605a8ec8351810d38e70fc66d9871ef978194f246025a5f9f7bf6e", - "Address": "access1.:3569", - "Role": "access", - "Weight": 1000, - "StakingPubKey": "toI/M1jd37slSu3FdSHZpl+BAd87IMyjvIVxAAizQTkZdNWAXStfoc2RuiDXBWKgBjoCGVpngRRjFJs+LYcS48k1u6s67CPWzyl0wuQOsHrwy/I0bibYxf42JKvwnUOa", - "NetworkPubKey": "MKXI7iylu7A85AOyNTp5hStVh10StIs9EvtYJYlzXh0q5ehub5cspl9Tj6TpvSwUaI0MJZQ60hdkm4uFxzIIMQ==" - }, - { - "NodeID": "d3be7a089cc8a29a3ad8fcff5809c1ae27f35159ebcf585e3e4e91a1f3b87d89", - "Address": "access2.:3569", - "Role": "access", - "Weight": 1000, - "StakingPubKey": "pMWuhGCBzWssB/r2O1r7DWq8Kq5nmW6dzSKfPvosE0ip8xvXV7WBoAk1hrfpCqCkCAb8fZDNFVPeu9Xyw6gk0jL9PAfyZ7yvTEBnueZmnR9K13ZivyzubtqPYg0+f0kz", - "NetworkPubKey": "qhrQoE4oNHwp+XjA+6hN1Ye6pHmB4ctsV8zqwIfZRwh9vN+eljimmT4Da0Gx0ErFS0aqe1gG7m7VT64lHYSONQ==" - } - ], - "Clustering": [ - [ - { - "NodeID": "416c65782048656e74736368656c001844616b8e9b5680103f25545b2e535d72", - "Address": "collection1.:3569", - "Role": "collection", - "Weight": 1000, - "StakingPubKey": "l3r+iJ55R81PjKLmapkBQEhr6PaWQKjXEFb4dBdmHhQ7JKcpndtgXqFoxPUbhC+sFtQKTAsr8XJuhDjbBetf3xEYqMkb6y4BD+Ji6Wm4AdtB74LgaiXqJLCPAehULLkI", - "NetworkPubKey": "LN8V7Msz37skANzyL8CwOxR+umk2tuOHEefBp5PwqEM5gyqkFFN24TlwpXMe0bxFAk0XCPfkphF2Fdua0KrxwQ==" - }, - { - "NodeID": "416e647265772042757269616e004bf4e37ab54b9ef5103294895fc58a1fe67b", - "Address": "collection2.:3569", - "Role": "collection", - "Weight": 1000, - "StakingPubKey": "oW36l4h/JSzNXufJS+09jP3cpQkfKfcyaqMVQMgMiVDGk9qXck3XGqHWRwHGliG9D8L0EWw0WeNPJVxop7iyJOGDgYHmKBVqo4anTDe2cHK7KjL/jWYmf8FM4/jI+9Z6", - "NetworkPubKey": "/nDR5ro0WUlVjXEEDGh0fb/wkxlg17DvrvCCWFuaoCukzETqM6VYV5o5Afsrt85I2MX/1aaBH4cB0yeYKT4h4g==" - }, - { - "NodeID": "4261737469616e204d756c6c657200f26a128c4ef2b8752f3ad798cfa910d97f", - "Address": "collection3.:3569", - "Role": "collection", - "Weight": 1000, - "StakingPubKey": "qGb7lluGuJqg4hsDr6cOceyJbvYsZnmu9V1OduXsCsb1WXh7yiXwbnB4400r5/sWAWaI6vjIr/btVK694MvJllB0QC5WLVVVU4PIldHBE3pn9DDBnmk3mfUPL9ob265l", - "NetworkPubKey": "rnSLNQtIhBfPiN8nrr4Nsbx3AH2jwtWxY4S9lkr+cGoARAQ7D7K4GH0SObhBetnSj4igaHOsDMzeC2owQL0/7w==" - }, - { - "NodeID": "42656e6a616d696e2056616e204d6574657200bf596a51dee05642917a9c12c0", - "Address": "collection4.:3569", - "Role": "collection", - "Weight": 1000, - "StakingPubKey": "oMY2bsEMVt0IFE/kfUFBdCoFLYbeq9TEes9PUhViSQgMVBmyJ/wjfN9Q1FvlmGcCEtLM8P0EWbEiHvAgao6MUkLsqTFOhExlulqQska6za5hMMqCeLdjbZTl/Kk37Cey", - "NetworkPubKey": "hOzBuFgkkRv3K2mRr4L1H1FfcJxhdwfdjKLCTuzTQCvHw9AJNJ7tIypAjgBxcDMz4onqLhtzuShUjs8zTPeyZA==" - }, - { - "NodeID": "436173657920417272696e67746f6e004cdbedda99daf9ff9a787c0618cee363", - "Address": "collection5.:3569", - "Role": "collection", - "Weight": 1000, - "StakingPubKey": "ournHqKKQ0zacuQl1uNqLnOulZ2yV+OSq4kY9wKrvpfg0y2bsJhFjkmyle8Ffoi5Exd4TejDqv+Vhp3Dzm5X8RJM5jFvqzDIRy/fTR19LOXRNXNi88BrxajTJf9ZFZVt", - "NetworkPubKey": "xHhc4n1UNAwGx18lk1XI1vamt1D3yFT7grBgjFbAC3d3jrs7B0uyVkwQNKW1L5aS0CGaPYn/W85HnN+7Lgn9iA==" - }, - { - "NodeID": "44696574657220536869726c657900c100318341c6796198aa37e627949074bc", - "Address": "collection6.:3569", - "Role": "collection", - "Weight": 1000, - "StakingPubKey": "goWe7pNkhi44qHnRkKpsf4AiRGP3rn0nwvQoXESAVzL2gI+9P67Fjx6yzn/1WGCWClmYN0ZSPxjcLVzm84Lk6P5oakg5VMqW3XLOoh+qsMzDwdxj+8sAOS/Ru25BuB8e", - "NetworkPubKey": "oo8hfXYogKS0xDhoB1XjHKBzAmOPw6mUWPVRaTV20f9SYAs4IMCN+U62TEl4a7aXz3OWO0pvYYKFndNFLNLA8g==" - } - ] - ], - "Clusters": [ - { - "Index": 0, - "Counter": 0, - "Members": [ - { - "NodeID": "416c65782048656e74736368656c001844616b8e9b5680103f25545b2e535d72", - "Address": "collection1.:3569", - "Role": "collection", - "Weight": 1000, - "StakingPubKey": "l3r+iJ55R81PjKLmapkBQEhr6PaWQKjXEFb4dBdmHhQ7JKcpndtgXqFoxPUbhC+sFtQKTAsr8XJuhDjbBetf3xEYqMkb6y4BD+Ji6Wm4AdtB74LgaiXqJLCPAehULLkI", - "NetworkPubKey": "LN8V7Msz37skANzyL8CwOxR+umk2tuOHEefBp5PwqEM5gyqkFFN24TlwpXMe0bxFAk0XCPfkphF2Fdua0KrxwQ==" - }, - { - "NodeID": "416e647265772042757269616e004bf4e37ab54b9ef5103294895fc58a1fe67b", - "Address": "collection2.:3569", - "Role": "collection", - "Weight": 1000, - "StakingPubKey": "oW36l4h/JSzNXufJS+09jP3cpQkfKfcyaqMVQMgMiVDGk9qXck3XGqHWRwHGliG9D8L0EWw0WeNPJVxop7iyJOGDgYHmKBVqo4anTDe2cHK7KjL/jWYmf8FM4/jI+9Z6", - "NetworkPubKey": "/nDR5ro0WUlVjXEEDGh0fb/wkxlg17DvrvCCWFuaoCukzETqM6VYV5o5Afsrt85I2MX/1aaBH4cB0yeYKT4h4g==" - }, - { - "NodeID": "4261737469616e204d756c6c657200f26a128c4ef2b8752f3ad798cfa910d97f", - "Address": "collection3.:3569", - "Role": "collection", - "Weight": 1000, - "StakingPubKey": "qGb7lluGuJqg4hsDr6cOceyJbvYsZnmu9V1OduXsCsb1WXh7yiXwbnB4400r5/sWAWaI6vjIr/btVK694MvJllB0QC5WLVVVU4PIldHBE3pn9DDBnmk3mfUPL9ob265l", - "NetworkPubKey": "rnSLNQtIhBfPiN8nrr4Nsbx3AH2jwtWxY4S9lkr+cGoARAQ7D7K4GH0SObhBetnSj4igaHOsDMzeC2owQL0/7w==" - }, - { - "NodeID": "42656e6a616d696e2056616e204d6574657200bf596a51dee05642917a9c12c0", - "Address": "collection4.:3569", - "Role": "collection", - "Weight": 1000, - "StakingPubKey": "oMY2bsEMVt0IFE/kfUFBdCoFLYbeq9TEes9PUhViSQgMVBmyJ/wjfN9Q1FvlmGcCEtLM8P0EWbEiHvAgao6MUkLsqTFOhExlulqQska6za5hMMqCeLdjbZTl/Kk37Cey", - "NetworkPubKey": "hOzBuFgkkRv3K2mRr4L1H1FfcJxhdwfdjKLCTuzTQCvHw9AJNJ7tIypAjgBxcDMz4onqLhtzuShUjs8zTPeyZA==" - }, - { - "NodeID": "436173657920417272696e67746f6e004cdbedda99daf9ff9a787c0618cee363", - "Address": "collection5.:3569", - "Role": "collection", - "Weight": 1000, - "StakingPubKey": "ournHqKKQ0zacuQl1uNqLnOulZ2yV+OSq4kY9wKrvpfg0y2bsJhFjkmyle8Ffoi5Exd4TejDqv+Vhp3Dzm5X8RJM5jFvqzDIRy/fTR19LOXRNXNi88BrxajTJf9ZFZVt", - "NetworkPubKey": "xHhc4n1UNAwGx18lk1XI1vamt1D3yFT7grBgjFbAC3d3jrs7B0uyVkwQNKW1L5aS0CGaPYn/W85HnN+7Lgn9iA==" - }, - { - "NodeID": "44696574657220536869726c657900c100318341c6796198aa37e627949074bc", - "Address": "collection6.:3569", - "Role": "collection", - "Weight": 1000, - "StakingPubKey": "goWe7pNkhi44qHnRkKpsf4AiRGP3rn0nwvQoXESAVzL2gI+9P67Fjx6yzn/1WGCWClmYN0ZSPxjcLVzm84Lk6P5oakg5VMqW3XLOoh+qsMzDwdxj+8sAOS/Ru25BuB8e", - "NetworkPubKey": "oo8hfXYogKS0xDhoB1XjHKBzAmOPw6mUWPVRaTV20f9SYAs4IMCN+U62TEl4a7aXz3OWO0pvYYKFndNFLNLA8g==" - } - ], - "RootBlock": { - "Header": { - "ChainID": "cluster-0-c7d91d0c823e302961fcceb2c28afb44bf54690ee93f265300d16516ac92e764", - "ParentID": "0000000000000000000000000000000000000000000000000000000000000000", - "Height": 0, - "PayloadHash": "0114797c3404a47fd9d3bffb926b8df8a611dcbd94854e870af5f3c22fe55ab2", - "Timestamp": "2018-12-19T22:32:30.000000042Z", - "View": 0, - "ParentVoterIndices": null, - "ParentVoterSigData": null, - "ProposerID": "0000000000000000000000000000000000000000000000000000000000000000", - "ProposerSigData": null, - "ID": "422642675b30df68facb104c48c0e7745d1e78695c0ed6420d3517c67ee747be" - }, - "Payload": { - "Collection": { - "Transactions": [] - }, - "ReferenceBlockID": "0000000000000000000000000000000000000000000000000000000000000000" - } - }, - "RootQC": { - "View": 0, - "BlockID": "422642675b30df68facb104c48c0e7745d1e78695c0ed6420d3517c67ee747be", - "SignerIndices": "XMPp+/g=", - "SigData": "sq4dWuSz8vQUIzF9lv9u80J8xfnP1Jv6PZcHhIkwYPkwQaCN3F/7y+dZFPprjLbK" - } - } - ], - "DKG": { - "GroupKey": "8e3ea9a984bcff363f70c90d5faed0b73cb6b940060329cb2f508960627601f23479ceea40943d0278fe5e2391bd2095150204b04fb8404a94054298fb9ea864be9db6aa1fab3f95cd19e3b56a50e7952377828494df28f1ed6964f491699fd1", - "Participants": { - "4a616d65732048756e74657200e9ffa4e085542cfa80d15f0e61e54606c6cdb3": { - "Index": 0, - "KeyShare": "b400ebae6d5072fe8d9f5969cef1da4bbea99212c76d64eb68e229f4292a4dc1bb7335e510f7a13926be20c37156d6e318d2bad9122936623d83227f03dab63541d2103aad8dbdf93b98ec35b8da504d4aa89b106d45cbb58ea52d20545613dc" - }, - "4a65666665727920446f796c65005cf2fe1daafe62a66f59fd9afa12f0f78914": { - "Index": 1, - "KeyShare": "a40d4fe6a7bda097737fc8533ad16cde22bffe073fdb58c756a5cde6d85bc3aa05d06a87475aa8c8cc7151c552fc4dd51628eaa144170bd511a67bd3936b89e7c1bcd9fcecfb3b2d8a61fba3faf8030d267fa76235e1fecd22cd9cdbd70a424c" - }, - "4a6f7264616e20536368616c6d0064527abdf7ba98fac951ab71ac6aba31cfa1": { - "Index": 2, - "KeyShare": "82009cdd9dce04e924e4e1c60dfd704a3ae85ee9e60f31e86e0abff28c199658cf1f443d4064a844adf0a74b8cc44e5417075c8751f5b180578b61426141df857640a5b1ca6f69770e0b1f268d1a1308b0e657a58b65e60a16281ca939a2d2b4" - } - } - } - }, - "Next": null - }, - "Params": { - "ChainID": "flow-benchnet", - "SporkID": "452adae802d6a03d24e8369e5df4bc93617d407ebde2dfeee2554e4ef0c28d6a", - "ProtocolVersion": 0 - } -} \ No newline at end of file diff --git a/integration/benchnet2/automate/testdata/level1/data/snapshot-fixture1.json b/integration/benchnet2/automate/testdata/level1/data/snapshot-fixture1.json new file mode 100644 index 00000000000..197c8f81738 --- /dev/null +++ b/integration/benchnet2/automate/testdata/level1/data/snapshot-fixture1.json @@ -0,0 +1 @@ +{"SealingSegment":{"Blocks":[{"Block":{"Header":{"ChainID":"flow-emulator","ParentID":"0000000000000000000000000000000000000000000000000000000000000000","Height":0,"PayloadHash":"4d0bdbeabeca734b7abc7908770d582fa48c5f443452ae0629d5fc4e2fa9886c","Timestamp":1545258750000,"View":0,"ParentView":0,"ParentVoterIndices":null,"ParentVoterSigData":null,"ProposerID":"0000000000000000000000000000000000000000000000000000000000000000","LastViewTC":null,"ID":"4bb784dbbb4428d1e21482f2cb14f4d3c0262b29976317f3c90a68e5c1e25013"},"Payload":{"Guarantees":null,"Seals":null,"Receipts":null,"Results":null,"ProtocolStateID":"873d2f32be044056768e94962f8a2ec07f60f40ae22d4396b3baf29ddffae820"}},"ProposerSigData":null}],"ExtraBlocks":[],"ExecutionResults":[{"PreviousResultID":"0000000000000000000000000000000000000000000000000000000000000000","BlockID":"4bb784dbbb4428d1e21482f2cb14f4d3c0262b29976317f3c90a68e5c1e25013","Chunks":[{"CollectionIndex":0,"StartState":"0000000000000000000000000000000000000000000000000000000000000000","EventCollection":"0000000000000000000000000000000000000000000000000000000000000000","ServiceEventCount":null,"BlockID":"0000000000000000000000000000000000000000000000000000000000000000","TotalComputationUsed":0,"NumberOfTransactions":0,"Index":0,"EndState":"dfba67948e4c8453f42e5cfa95b1bde1472df4661835bb512e6f7d9265ecc1c4"}],"ServiceEvents":[{"Type":"setup","Event":{"Counter":1,"FirstView":0,"DKGPhase1FinalView":100,"DKGPhase2FinalView":200,"DKGPhase3FinalView":300,"FinalView":100000,"Participants":[{"NodeID":"083fdd06d4a4a560001a3f6576689db323399775198bfd2b75d3345723f0d979","Address":"083fdd06d4a4a560001a3f6576689db323399775198bfd2b75d3345723f0d979@flow.com:1234","Role":"collection","InitialWeight":1000,"StakingPubKey":"q8kxnV/xM07xXen+kMT1D92KHF/VFs/DtQn+s/54jlUUcbHecsCPS2xDggweT1NMEcY/3W5bkSlmUVAvVxGycYNVmXphodHTLv6nPsi0ZsgzXHDkK5Zf7LTYxzMyU3AV","NetworkPubKey":null},{"NodeID":"101b061ec2c53e18617c8d4a7c2e1ecaff8574dbeab61588ea2a7205f085555f","Address":"101b061ec2c53e18617c8d4a7c2e1ecaff8574dbeab61588ea2a7205f085555f@flow.com:1234","Role":"execution","InitialWeight":1000,"StakingPubKey":"pLRpU+PpguxHdi74kyQoWa10PzitPUmXn534vIrooaMpBRNeW1TWZCNnHe1wy0ATB8BF7AeUxAobldam5b/Dn1/kB6yoWQy7wFLsWASgc/38Bh/AlyXUiUIOjMGE5HLx","NetworkPubKey":null},{"NodeID":"287a7299497e61de9c9e08300ddbc0c74f3a951b4a23710cd46a7097aefe984d","Address":"287a7299497e61de9c9e08300ddbc0c74f3a951b4a23710cd46a7097aefe984d@flow.com:1234","Role":"verification","InitialWeight":1000,"StakingPubKey":"jEDxK889vdalnfhE63P52WXxAxGVjHN7AtC70SGM9u1pez3cgzV6ZhwfFablj1ZxC9KUCMK8XvOi/MXXE0t2er26IVojTk8C54hlQ6KGTbXKTyz/bmKGlqpJs7B293Mx","NetworkPubKey":null},{"NodeID":"6409849958aca562c695cceaff597f57ef3c0bd5b3d74fcb9fa7ae668d0b82c0","Address":"6409849958aca562c695cceaff597f57ef3c0bd5b3d74fcb9fa7ae668d0b82c0@flow.com:1234","Role":"consensus","InitialWeight":1000,"StakingPubKey":"t9AfPySIzAW+nLJFBgbNW4sKgHDfctgUBkF+U03G0ql893IdGFUDJjCHuVAw03XjBydxMBRVDV5JWsp7PCuq6yJIiG52noX3gC3O2h7PisvcJLV9YT3tEbpK33ZbtgA3","NetworkPubKey":null},{"NodeID":"71080c5b7c40738f81e429a632e3e5a69a6c8e68c7dea9d7eaf4520495f2a133","Address":"71080c5b7c40738f81e429a632e3e5a69a6c8e68c7dea9d7eaf4520495f2a133@flow.com:1234","Role":"verification","InitialWeight":1000,"StakingPubKey":"tiNPNZ6RtFLDkjWwTp5TgwiTEkSPOBD1pbsqXX/ubInKa81sgXX3b2RUXho/FlxyGIdzRPETTHoObiBm0Icn/8Z+XFz5TjbrVK3nUfNsYL+iTgg3MQIeOzTf7ogUueo3","NetworkPubKey":null},{"NodeID":"8e962133021dfd4f7a82e93c917fe83b5fbffa790650b2064690f2381fc4215f","Address":"8e962133021dfd4f7a82e93c917fe83b5fbffa790650b2064690f2381fc4215f@flow.com:1234","Role":"access","InitialWeight":1000,"StakingPubKey":"izqm3PiwR0APmkY3CjqyaH1lm/doAFO8EWX0hbEqDLpDR7TCsa3Yl+fQzPa3QEvxB6tKM15GgIMqx3wig5B/pT02qWwBFqfND0HyiTDhDcZXdFWWJsZkT2ONPw0l3+5t","NetworkPubKey":null},{"NodeID":"9432682b1ca21a812764fcbc1a1d5b4b8d5c79482c578141a79321d4727be468","Address":"9432682b1ca21a812764fcbc1a1d5b4b8d5c79482c578141a79321d4727be468@flow.com:1234","Role":"collection","InitialWeight":1000,"StakingPubKey":"mP2GNZNxPUCeeliBFlxYc8O6Ubek/oAEmDEZjBgwU5xtdOzag43iKR1D0j//HjuiB+SOqrcyyTg4EPPPVxsXZqwyF15Kp1CeYkd/BDX2I77NuaneoSJ6LjEWoUaHgPVy","NetworkPubKey":null},{"NodeID":"e46dcc74bb3598380ea2673020394f230525736543fa1912567e73e9365adc46","Address":"e46dcc74bb3598380ea2673020394f230525736543fa1912567e73e9365adc46@flow.com:1234","Role":"access","InitialWeight":1000,"StakingPubKey":"qsxVdXTbpg1MYKX9v3mWfb5/RudAVqMUo5xRFjrkzXU/cMhkYYH39QY5Cu8z3H1eGAymf9RJRMYI2N3pesz2btY+nD0mCWjwD3BX5r9cTY0t5GriI5ALMKi8rLYz1nG2","NetworkPubKey":null},{"NodeID":"e7c69bbb3e869d0edb728582ab8fe18fb0a0a382514aa3791cca4b5b1da465ae","Address":"e7c69bbb3e869d0edb728582ab8fe18fb0a0a382514aa3791cca4b5b1da465ae@flow.com:1234","Role":"execution","InitialWeight":1000,"StakingPubKey":"h6iYAaSnI+ZlY0LRVDyG1T9d6gIgRrduZwNz1QMA4IcMPIsZINfDqcCdvWr5V0Z3DewAQgvz848NCY0j0bGqK+Z7EYrVYypA7rWJHR2EVEladlLb/oVzyY5AVBrV2U52","NetworkPubKey":null},{"NodeID":"eade711f79377b0f88cff8239e88aecfe5b36ff27c5da522d7112ce04d3c5df2","Address":"eade711f79377b0f88cff8239e88aecfe5b36ff27c5da522d7112ce04d3c5df2@flow.com:1234","Role":"consensus","InitialWeight":1000,"StakingPubKey":"j04oGdIKLTGBwUHvusLYonl9X2htt5ezkFqIar6NEzQvFgtQK3E/oCkVknv6t1DkBGXrIdRNnMMfxRhATH3LQi9Kl6IhzhKUd7YwBQ2gnT01b8pyEl5t1xQ6Vbwb/YDR","NetworkPubKey":null}],"Assignments":[["083fdd06d4a4a560001a3f6576689db323399775198bfd2b75d3345723f0d979","9432682b1ca21a812764fcbc1a1d5b4b8d5c79482c578141a79321d4727be468"]],"RandomSource":"EugPYRsHZ0BUzrqH0KB9gw==","TargetDuration":3600,"TargetEndTime":1746492607}},{"Type":"commit","Event":{"Counter":1,"ClusterQCs":[{"SigData":"+JmFAAEBAQCwEYo5xM6F0+auLSdIE33HtvQeP3ma7eWo5j38oCVffEF8axz8csdjGT+XOsTw2iCXsOGfhIMd4igMQXNeL2CD4fozU8ggU7XLu1Cv9iZC9v71JY64f+gU0b0Yf7FZzl8Bs7AUDhjK2FxiO02O5GxhjOy03h+ZhTpvl4pU53AW+z9YThvdvicthrVvaOuts4wpiaI=","VoterIDs":["083fdd06d4a4a560001a3f6576689db323399775198bfd2b75d3345723f0d979","9432682b1ca21a812764fcbc1a1d5b4b8d5c79482c578141a79321d4727be468"]}],"DKGGroupKey":"ad6dc6f9e9e5b6d5c7efaef0b68fb11c74130044a810de2c63c75b569354582bbfbd6d65dddc50dde9abbdc150d6b3bd017b52cdebb22dbc163e443a25de8efda5c5345fbc21de32bff02376f4da68a511ed7a8056815f5aa2a95e75a0b255a6","DKGParticipantKeys":["98a78d9a64325136a2a76b8c01a147a8629e7752de24a5c490b2eb7a6e5775e5498db5e5f9ec65e88af1b38e96a3b80c0df0d4083e2588d06970fb342f8ba7a721b08e093d9966ade02fd64652459d517307b80a9381dd9640a1ac9184ce40c0","ae79cb3e40e74612af7e5f72a3d5a59d0c43f457cf3e8a5282d7acd886c81521c39211b38a682d2c37904187bc107bbb00fca433928df5ad55cd6810276fc75ce063a1d52869aa32e412c4d6c36ac13e71dd37c4773e36cc0e44df88308bc903"],"DKGIndexMap":{"6409849958aca562c695cceaff597f57ef3c0bd5b3d74fcb9fa7ae668d0b82c0":0,"eade711f79377b0f88cff8239e88aecfe5b36ff27c5da522d7112ce04d3c5df2":1}}}],"ExecutionDataID":"0000000000000000000000000000000000000000000000000000000000000000","ID":"77da394347683bc3c80a2abde5f1a3c405d8f6d1ba66b5fa301aade4fc6c0b95"}],"LatestSeals":{"4bb784dbbb4428d1e21482f2cb14f4d3c0262b29976317f3c90a68e5c1e25013":"4a910e144f5ed588152e477550d96829bec2bac8f5d072956d7d8d1d41884f2f"},"FirstSeal":{"BlockID":"4bb784dbbb4428d1e21482f2cb14f4d3c0262b29976317f3c90a68e5c1e25013","ResultID":"77da394347683bc3c80a2abde5f1a3c405d8f6d1ba66b5fa301aade4fc6c0b95","FinalState":"dfba67948e4c8453f42e5cfa95b1bde1472df4661835bb512e6f7d9265ecc1c4","AggregatedApprovalSigs":[{"VerifierSignatures":["4mGMKE5AwF+QQbjHqdsVYYZazXhgw1ddnElpMdE42JEVLmBL0CKyftXYX0Z4qnwt","OZ640tKiGVsv1S2ClMKcRXlNxXFb8Sr7/HTQZX5ydYWOALnK8o23jgCgqcBAOinv","utJVtqSzaLutcXxo53Z6C292pcOzDEN6U7t36kg9luV0xgyLK4S/uND4I9PuUiKg","vdnab4atbXr62wUqPXq5XLkPw18z2FA4Cb5i+pcQz3S0NcgBzLUBsz/Gf4kZOvLF","nRTQpyRpMJBF9UB0gW8RH8dBucA9//D40X9XD7Rprfopkr4y/RAMp1/wveGCcjo1","+COT08niboytGoLbX+U6iY9LQcX53x/fC2lYf/ksZhDCK8ikWwzeSRADHVtWFeN4","S/HequcwuV4sEVHzTHfRfcJQPVgOD95ASiGogbKXDILMFARIbw8obgOWixqexJBd"],"SignerIDs":["5e510956c369fd5d063347ff331d7b60ad370065005e144b62eba71c21fa62ee","bb841d5407ccf101f8b315be9c176fbb9a24bf2e2005a86443f50572d47ef26b","295e986852acf4ffa847b50744c4e6561ed84112373f10266170e6bc914fa063","bd55c8520ef83e55ef3e8df864e2cc35142e948edc8089b15fd8cee99f7f3e71","773ad5ebf63df481a9fc70fb272ece87fad469ca175e4ee808ae11bfd6d5cb73","092ee0ffc52ac8e4aa5208be72546db99da46c12485a06c31e2e6f0b142c38ce","73adb53c2c27a2ca11214ce08ac0cfb8f1c24d04d046b634b17951927a1b8bee"]}]},"ProtocolStateEntries":{"873d2f32be044056768e94962f8a2ec07f60f40ae22d4396b3baf29ddffae820":{"KVStore":{"Version":1,"Data":"hK5WZXJzaW9uVXBncmFkZcCsRXBvY2hTdGF0ZUlExCA2h0umRz8fU2bJxgH3i4Ia/3kbZAOw4MOeUv7htO7zp7dFcG9jaEV4dGVuc2lvblZpZXdDb3VudM8AAAAAAAACWLtGaW5hbGl6YXRpb25TYWZldHlUaHJlc2hvbGTPAAAAAAAAAGQ="},"EpochEntry":{"PreviousEpoch":null,"CurrentEpoch":{"SetupID":"78cdf4cde701bfbffb61017537f324600ff3eaa0a568b8afbb02ae03c65a8ac9","CommitID":"6b3aa1496465ec8fd3c028c7462fe16763804e708d69a3d9b1530f574c4b410f","ActiveIdentities":[{"NodeID":"083fdd06d4a4a560001a3f6576689db323399775198bfd2b75d3345723f0d979","Ejected":false},{"NodeID":"101b061ec2c53e18617c8d4a7c2e1ecaff8574dbeab61588ea2a7205f085555f","Ejected":false},{"NodeID":"287a7299497e61de9c9e08300ddbc0c74f3a951b4a23710cd46a7097aefe984d","Ejected":false},{"NodeID":"6409849958aca562c695cceaff597f57ef3c0bd5b3d74fcb9fa7ae668d0b82c0","Ejected":false},{"NodeID":"71080c5b7c40738f81e429a632e3e5a69a6c8e68c7dea9d7eaf4520495f2a133","Ejected":false},{"NodeID":"8e962133021dfd4f7a82e93c917fe83b5fbffa790650b2064690f2381fc4215f","Ejected":false},{"NodeID":"9432682b1ca21a812764fcbc1a1d5b4b8d5c79482c578141a79321d4727be468","Ejected":false},{"NodeID":"e46dcc74bb3598380ea2673020394f230525736543fa1912567e73e9365adc46","Ejected":false},{"NodeID":"e7c69bbb3e869d0edb728582ab8fe18fb0a0a382514aa3791cca4b5b1da465ae","Ejected":false},{"NodeID":"eade711f79377b0f88cff8239e88aecfe5b36ff27c5da522d7112ce04d3c5df2","Ejected":false}],"EpochExtensions":null},"NextEpoch":null,"EpochFallbackTriggered":false,"PreviousEpochSetup":null,"PreviousEpochCommit":null,"CurrentEpochSetup":{"Counter":1,"FirstView":0,"DKGPhase1FinalView":100,"DKGPhase2FinalView":200,"DKGPhase3FinalView":300,"FinalView":100000,"Participants":[{"NodeID":"083fdd06d4a4a560001a3f6576689db323399775198bfd2b75d3345723f0d979","Address":"083fdd06d4a4a560001a3f6576689db323399775198bfd2b75d3345723f0d979@flow.com:1234","Role":"collection","InitialWeight":1000,"StakingPubKey":"q8kxnV/xM07xXen+kMT1D92KHF/VFs/DtQn+s/54jlUUcbHecsCPS2xDggweT1NMEcY/3W5bkSlmUVAvVxGycYNVmXphodHTLv6nPsi0ZsgzXHDkK5Zf7LTYxzMyU3AV","NetworkPubKey":null},{"NodeID":"101b061ec2c53e18617c8d4a7c2e1ecaff8574dbeab61588ea2a7205f085555f","Address":"101b061ec2c53e18617c8d4a7c2e1ecaff8574dbeab61588ea2a7205f085555f@flow.com:1234","Role":"execution","InitialWeight":1000,"StakingPubKey":"pLRpU+PpguxHdi74kyQoWa10PzitPUmXn534vIrooaMpBRNeW1TWZCNnHe1wy0ATB8BF7AeUxAobldam5b/Dn1/kB6yoWQy7wFLsWASgc/38Bh/AlyXUiUIOjMGE5HLx","NetworkPubKey":null},{"NodeID":"287a7299497e61de9c9e08300ddbc0c74f3a951b4a23710cd46a7097aefe984d","Address":"287a7299497e61de9c9e08300ddbc0c74f3a951b4a23710cd46a7097aefe984d@flow.com:1234","Role":"verification","InitialWeight":1000,"StakingPubKey":"jEDxK889vdalnfhE63P52WXxAxGVjHN7AtC70SGM9u1pez3cgzV6ZhwfFablj1ZxC9KUCMK8XvOi/MXXE0t2er26IVojTk8C54hlQ6KGTbXKTyz/bmKGlqpJs7B293Mx","NetworkPubKey":null},{"NodeID":"6409849958aca562c695cceaff597f57ef3c0bd5b3d74fcb9fa7ae668d0b82c0","Address":"6409849958aca562c695cceaff597f57ef3c0bd5b3d74fcb9fa7ae668d0b82c0@flow.com:1234","Role":"consensus","InitialWeight":1000,"StakingPubKey":"t9AfPySIzAW+nLJFBgbNW4sKgHDfctgUBkF+U03G0ql893IdGFUDJjCHuVAw03XjBydxMBRVDV5JWsp7PCuq6yJIiG52noX3gC3O2h7PisvcJLV9YT3tEbpK33ZbtgA3","NetworkPubKey":null},{"NodeID":"71080c5b7c40738f81e429a632e3e5a69a6c8e68c7dea9d7eaf4520495f2a133","Address":"71080c5b7c40738f81e429a632e3e5a69a6c8e68c7dea9d7eaf4520495f2a133@flow.com:1234","Role":"verification","InitialWeight":1000,"StakingPubKey":"tiNPNZ6RtFLDkjWwTp5TgwiTEkSPOBD1pbsqXX/ubInKa81sgXX3b2RUXho/FlxyGIdzRPETTHoObiBm0Icn/8Z+XFz5TjbrVK3nUfNsYL+iTgg3MQIeOzTf7ogUueo3","NetworkPubKey":null},{"NodeID":"8e962133021dfd4f7a82e93c917fe83b5fbffa790650b2064690f2381fc4215f","Address":"8e962133021dfd4f7a82e93c917fe83b5fbffa790650b2064690f2381fc4215f@flow.com:1234","Role":"access","InitialWeight":1000,"StakingPubKey":"izqm3PiwR0APmkY3CjqyaH1lm/doAFO8EWX0hbEqDLpDR7TCsa3Yl+fQzPa3QEvxB6tKM15GgIMqx3wig5B/pT02qWwBFqfND0HyiTDhDcZXdFWWJsZkT2ONPw0l3+5t","NetworkPubKey":null},{"NodeID":"9432682b1ca21a812764fcbc1a1d5b4b8d5c79482c578141a79321d4727be468","Address":"9432682b1ca21a812764fcbc1a1d5b4b8d5c79482c578141a79321d4727be468@flow.com:1234","Role":"collection","InitialWeight":1000,"StakingPubKey":"mP2GNZNxPUCeeliBFlxYc8O6Ubek/oAEmDEZjBgwU5xtdOzag43iKR1D0j//HjuiB+SOqrcyyTg4EPPPVxsXZqwyF15Kp1CeYkd/BDX2I77NuaneoSJ6LjEWoUaHgPVy","NetworkPubKey":null},{"NodeID":"e46dcc74bb3598380ea2673020394f230525736543fa1912567e73e9365adc46","Address":"e46dcc74bb3598380ea2673020394f230525736543fa1912567e73e9365adc46@flow.com:1234","Role":"access","InitialWeight":1000,"StakingPubKey":"qsxVdXTbpg1MYKX9v3mWfb5/RudAVqMUo5xRFjrkzXU/cMhkYYH39QY5Cu8z3H1eGAymf9RJRMYI2N3pesz2btY+nD0mCWjwD3BX5r9cTY0t5GriI5ALMKi8rLYz1nG2","NetworkPubKey":null},{"NodeID":"e7c69bbb3e869d0edb728582ab8fe18fb0a0a382514aa3791cca4b5b1da465ae","Address":"e7c69bbb3e869d0edb728582ab8fe18fb0a0a382514aa3791cca4b5b1da465ae@flow.com:1234","Role":"execution","InitialWeight":1000,"StakingPubKey":"h6iYAaSnI+ZlY0LRVDyG1T9d6gIgRrduZwNz1QMA4IcMPIsZINfDqcCdvWr5V0Z3DewAQgvz848NCY0j0bGqK+Z7EYrVYypA7rWJHR2EVEladlLb/oVzyY5AVBrV2U52","NetworkPubKey":null},{"NodeID":"eade711f79377b0f88cff8239e88aecfe5b36ff27c5da522d7112ce04d3c5df2","Address":"eade711f79377b0f88cff8239e88aecfe5b36ff27c5da522d7112ce04d3c5df2@flow.com:1234","Role":"consensus","InitialWeight":1000,"StakingPubKey":"j04oGdIKLTGBwUHvusLYonl9X2htt5ezkFqIar6NEzQvFgtQK3E/oCkVknv6t1DkBGXrIdRNnMMfxRhATH3LQi9Kl6IhzhKUd7YwBQ2gnT01b8pyEl5t1xQ6Vbwb/YDR","NetworkPubKey":null}],"Assignments":[["083fdd06d4a4a560001a3f6576689db323399775198bfd2b75d3345723f0d979","9432682b1ca21a812764fcbc1a1d5b4b8d5c79482c578141a79321d4727be468"]],"RandomSource":"EugPYRsHZ0BUzrqH0KB9gw==","TargetDuration":3600,"TargetEndTime":1746492607},"CurrentEpochCommit":{"Counter":1,"ClusterQCs":[{"SigData":"+JmFAAEBAQCwEYo5xM6F0+auLSdIE33HtvQeP3ma7eWo5j38oCVffEF8axz8csdjGT+XOsTw2iCXsOGfhIMd4igMQXNeL2CD4fozU8ggU7XLu1Cv9iZC9v71JY64f+gU0b0Yf7FZzl8Bs7AUDhjK2FxiO02O5GxhjOy03h+ZhTpvl4pU53AW+z9YThvdvicthrVvaOuts4wpiaI=","VoterIDs":["083fdd06d4a4a560001a3f6576689db323399775198bfd2b75d3345723f0d979","9432682b1ca21a812764fcbc1a1d5b4b8d5c79482c578141a79321d4727be468"]}],"DKGGroupKey":"ad6dc6f9e9e5b6d5c7efaef0b68fb11c74130044a810de2c63c75b569354582bbfbd6d65dddc50dde9abbdc150d6b3bd017b52cdebb22dbc163e443a25de8efda5c5345fbc21de32bff02376f4da68a511ed7a8056815f5aa2a95e75a0b255a6","DKGParticipantKeys":["98a78d9a64325136a2a76b8c01a147a8629e7752de24a5c490b2eb7a6e5775e5498db5e5f9ec65e88af1b38e96a3b80c0df0d4083e2588d06970fb342f8ba7a721b08e093d9966ade02fd64652459d517307b80a9381dd9640a1ac9184ce40c0","ae79cb3e40e74612af7e5f72a3d5a59d0c43f457cf3e8a5282d7acd886c81521c39211b38a682d2c37904187bc107bbb00fca433928df5ad55cd6810276fc75ce063a1d52869aa32e412c4d6c36ac13e71dd37c4773e36cc0e44df88308bc903"],"DKGIndexMap":{"6409849958aca562c695cceaff597f57ef3c0bd5b3d74fcb9fa7ae668d0b82c0":0,"eade711f79377b0f88cff8239e88aecfe5b36ff27c5da522d7112ce04d3c5df2":1}},"NextEpochSetup":null,"NextEpochCommit":null,"CurrentEpochIdentityTable":[{"EncodableIdentitySkeleton":{"NodeID":"083fdd06d4a4a560001a3f6576689db323399775198bfd2b75d3345723f0d979","Address":"083fdd06d4a4a560001a3f6576689db323399775198bfd2b75d3345723f0d979@flow.com:1234","Role":"collection","InitialWeight":1000,"StakingPubKey":"q8kxnV/xM07xXen+kMT1D92KHF/VFs/DtQn+s/54jlUUcbHecsCPS2xDggweT1NMEcY/3W5bkSlmUVAvVxGycYNVmXphodHTLv6nPsi0ZsgzXHDkK5Zf7LTYxzMyU3AV","NetworkPubKey":null},"ParticipationStatus":"EpochParticipationStatusActive"},{"EncodableIdentitySkeleton":{"NodeID":"101b061ec2c53e18617c8d4a7c2e1ecaff8574dbeab61588ea2a7205f085555f","Address":"101b061ec2c53e18617c8d4a7c2e1ecaff8574dbeab61588ea2a7205f085555f@flow.com:1234","Role":"execution","InitialWeight":1000,"StakingPubKey":"pLRpU+PpguxHdi74kyQoWa10PzitPUmXn534vIrooaMpBRNeW1TWZCNnHe1wy0ATB8BF7AeUxAobldam5b/Dn1/kB6yoWQy7wFLsWASgc/38Bh/AlyXUiUIOjMGE5HLx","NetworkPubKey":null},"ParticipationStatus":"EpochParticipationStatusActive"},{"EncodableIdentitySkeleton":{"NodeID":"287a7299497e61de9c9e08300ddbc0c74f3a951b4a23710cd46a7097aefe984d","Address":"287a7299497e61de9c9e08300ddbc0c74f3a951b4a23710cd46a7097aefe984d@flow.com:1234","Role":"verification","InitialWeight":1000,"StakingPubKey":"jEDxK889vdalnfhE63P52WXxAxGVjHN7AtC70SGM9u1pez3cgzV6ZhwfFablj1ZxC9KUCMK8XvOi/MXXE0t2er26IVojTk8C54hlQ6KGTbXKTyz/bmKGlqpJs7B293Mx","NetworkPubKey":null},"ParticipationStatus":"EpochParticipationStatusActive"},{"EncodableIdentitySkeleton":{"NodeID":"6409849958aca562c695cceaff597f57ef3c0bd5b3d74fcb9fa7ae668d0b82c0","Address":"6409849958aca562c695cceaff597f57ef3c0bd5b3d74fcb9fa7ae668d0b82c0@flow.com:1234","Role":"consensus","InitialWeight":1000,"StakingPubKey":"t9AfPySIzAW+nLJFBgbNW4sKgHDfctgUBkF+U03G0ql893IdGFUDJjCHuVAw03XjBydxMBRVDV5JWsp7PCuq6yJIiG52noX3gC3O2h7PisvcJLV9YT3tEbpK33ZbtgA3","NetworkPubKey":null},"ParticipationStatus":"EpochParticipationStatusActive"},{"EncodableIdentitySkeleton":{"NodeID":"71080c5b7c40738f81e429a632e3e5a69a6c8e68c7dea9d7eaf4520495f2a133","Address":"71080c5b7c40738f81e429a632e3e5a69a6c8e68c7dea9d7eaf4520495f2a133@flow.com:1234","Role":"verification","InitialWeight":1000,"StakingPubKey":"tiNPNZ6RtFLDkjWwTp5TgwiTEkSPOBD1pbsqXX/ubInKa81sgXX3b2RUXho/FlxyGIdzRPETTHoObiBm0Icn/8Z+XFz5TjbrVK3nUfNsYL+iTgg3MQIeOzTf7ogUueo3","NetworkPubKey":null},"ParticipationStatus":"EpochParticipationStatusActive"},{"EncodableIdentitySkeleton":{"NodeID":"8e962133021dfd4f7a82e93c917fe83b5fbffa790650b2064690f2381fc4215f","Address":"8e962133021dfd4f7a82e93c917fe83b5fbffa790650b2064690f2381fc4215f@flow.com:1234","Role":"access","InitialWeight":1000,"StakingPubKey":"izqm3PiwR0APmkY3CjqyaH1lm/doAFO8EWX0hbEqDLpDR7TCsa3Yl+fQzPa3QEvxB6tKM15GgIMqx3wig5B/pT02qWwBFqfND0HyiTDhDcZXdFWWJsZkT2ONPw0l3+5t","NetworkPubKey":null},"ParticipationStatus":"EpochParticipationStatusActive"},{"EncodableIdentitySkeleton":{"NodeID":"9432682b1ca21a812764fcbc1a1d5b4b8d5c79482c578141a79321d4727be468","Address":"9432682b1ca21a812764fcbc1a1d5b4b8d5c79482c578141a79321d4727be468@flow.com:1234","Role":"collection","InitialWeight":1000,"StakingPubKey":"mP2GNZNxPUCeeliBFlxYc8O6Ubek/oAEmDEZjBgwU5xtdOzag43iKR1D0j//HjuiB+SOqrcyyTg4EPPPVxsXZqwyF15Kp1CeYkd/BDX2I77NuaneoSJ6LjEWoUaHgPVy","NetworkPubKey":null},"ParticipationStatus":"EpochParticipationStatusActive"},{"EncodableIdentitySkeleton":{"NodeID":"e46dcc74bb3598380ea2673020394f230525736543fa1912567e73e9365adc46","Address":"e46dcc74bb3598380ea2673020394f230525736543fa1912567e73e9365adc46@flow.com:1234","Role":"access","InitialWeight":1000,"StakingPubKey":"qsxVdXTbpg1MYKX9v3mWfb5/RudAVqMUo5xRFjrkzXU/cMhkYYH39QY5Cu8z3H1eGAymf9RJRMYI2N3pesz2btY+nD0mCWjwD3BX5r9cTY0t5GriI5ALMKi8rLYz1nG2","NetworkPubKey":null},"ParticipationStatus":"EpochParticipationStatusActive"},{"EncodableIdentitySkeleton":{"NodeID":"e7c69bbb3e869d0edb728582ab8fe18fb0a0a382514aa3791cca4b5b1da465ae","Address":"e7c69bbb3e869d0edb728582ab8fe18fb0a0a382514aa3791cca4b5b1da465ae@flow.com:1234","Role":"execution","InitialWeight":1000,"StakingPubKey":"h6iYAaSnI+ZlY0LRVDyG1T9d6gIgRrduZwNz1QMA4IcMPIsZINfDqcCdvWr5V0Z3DewAQgvz848NCY0j0bGqK+Z7EYrVYypA7rWJHR2EVEladlLb/oVzyY5AVBrV2U52","NetworkPubKey":null},"ParticipationStatus":"EpochParticipationStatusActive"},{"EncodableIdentitySkeleton":{"NodeID":"eade711f79377b0f88cff8239e88aecfe5b36ff27c5da522d7112ce04d3c5df2","Address":"eade711f79377b0f88cff8239e88aecfe5b36ff27c5da522d7112ce04d3c5df2@flow.com:1234","Role":"consensus","InitialWeight":1000,"StakingPubKey":"j04oGdIKLTGBwUHvusLYonl9X2htt5ezkFqIar6NEzQvFgtQK3E/oCkVknv6t1DkBGXrIdRNnMMfxRhATH3LQi9Kl6IhzhKUd7YwBQ2gnT01b8pyEl5t1xQ6Vbwb/YDR","NetworkPubKey":null},"ParticipationStatus":"EpochParticipationStatusActive"}],"NextEpochIdentityTable":[]}}}},"QuorumCertificate":{"View":0,"BlockID":"4bb784dbbb4428d1e21482f2cb14f4d3c0262b29976317f3c90a68e5c1e25013","SignerIndices":"QAA=","SigData":"+JmFAQEAAAGw5FJlM62v7NKsi+0YgdBvbbc17iOh/TYiTttf9EtZQeYj5CBnAf/j0RYJrd3ltlIJsCO9LjuyCvUG8xcI13+HsbqBVAmcW3inTTOCTXu6MBR1EESvmv++/MOwnoSJEJtJJrBsuA8cMaXr747rPWz2pxcSqQ1uuC/ZuIG35tkAhfia+daoyE4OkXgJNdeEM1Zk22o="},"Params":{"ChainID":"flow-emulator","SporkID":"4bb784dbbb4428d1e21482f2cb14f4d3c0262b29976317f3c90a68e5c1e25013","SporkRootBlockHeight":0},"SealedVersionBeacon":null} diff --git a/integration/benchnet2/automate/testdata/level1/expected/template-data-input1.json b/integration/benchnet2/automate/testdata/level1/expected/template-data-input1.json deleted file mode 100644 index d392360ba5c..00000000000 --- a/integration/benchnet2/automate/testdata/level1/expected/template-data-input1.json +++ /dev/null @@ -1,100 +0,0 @@ -[ - { - "role": "access", - "name": "access1", - "node_id": "c8a31df973605a8ec8351810d38e70fc66d9871ef978194f246025a5f9f7bf6e", - "docker_tag": "v0.27.6", - "docker_registry": "gcr.io/flow-container-registry/" - }, - { - "role": "access", - "name": "access2", - "node_id": "d3be7a089cc8a29a3ad8fcff5809c1ae27f35159ebcf585e3e4e91a1f3b87d89", - "docker_tag": "v0.27.6", - "docker_registry": "gcr.io/flow-container-registry/" - }, - { - "role": "collection", - "name": "collection1", - "node_id": "416c65782048656e74736368656c001844616b8e9b5680103f25545b2e535d72", - "docker_tag": "v0.27.6", - "docker_registry": "gcr.io/flow-container-registry/" - }, - { - "role": "collection", - "name": "collection2", - "node_id": "416e647265772042757269616e004bf4e37ab54b9ef5103294895fc58a1fe67b", - "docker_tag": "v0.27.6", - "docker_registry": "gcr.io/flow-container-registry/" - }, - { - "role": "collection", - "name": "collection3", - "node_id": "4261737469616e204d756c6c657200f26a128c4ef2b8752f3ad798cfa910d97f", - "docker_tag": "v0.27.6", - "docker_registry": "gcr.io/flow-container-registry/" - }, - { - "role": "collection", - "name": "collection4", - "node_id": "42656e6a616d696e2056616e204d6574657200bf596a51dee05642917a9c12c0", - "docker_tag": "v0.27.6", - "docker_registry": "gcr.io/flow-container-registry/" - }, - { - "role": "collection", - "name": "collection5", - "node_id": "436173657920417272696e67746f6e004cdbedda99daf9ff9a787c0618cee363", - "docker_tag": "v0.27.6", - "docker_registry": "gcr.io/flow-container-registry/" - }, - { - "role": "collection", - "name": "collection6", - "node_id": "44696574657220536869726c657900c100318341c6796198aa37e627949074bc", - "docker_tag": "v0.27.6", - "docker_registry": "gcr.io/flow-container-registry/" - }, - { - "role": "consensus", - "name": "consensus1", - "node_id": "4a616d65732048756e74657200e9ffa4e085542cfa80d15f0e61e54606c6cdb3", - "docker_tag": "v0.27.6", - "docker_registry": "gcr.io/flow-container-registry/" - }, - { - "role": "consensus", - "name": "consensus2", - "node_id": "4a65666665727920446f796c65005cf2fe1daafe62a66f59fd9afa12f0f78914", - "docker_tag": "v0.27.6", - "docker_registry": "gcr.io/flow-container-registry/" - }, - { - "role": "consensus", - "name": "consensus3", - "node_id": "4a6f7264616e20536368616c6d0064527abdf7ba98fac951ab71ac6aba31cfa1", - "docker_tag": "v0.27.6", - "docker_registry": "gcr.io/flow-container-registry/" - }, - { - "role": "execution", - "name": "execution1", - "node_id": "4a6f73682048616e6e616e00571da9984a91e31b5592e90d7be91703b2750235", - "docker_tag": "v0.27.6", - "docker_registry": "gcr.io/flow-container-registry/" - }, - { - "role": "execution", - "name": "execution2", - "node_id": "4b616e205a68616e670000937a7f84d6df0ca2acf95125c33c4b2637bae4e680", - "docker_tag": "v0.27.6", - "docker_registry": "gcr.io/flow-container-registry/" - }, - { - "role": "verification", - "name": "verification1", - "node_id": "4c61796e65204c616672616e636500ee3643453a3694301f3a232c2f5b9427e2", - "docker_tag": "v0.27.6", - "docker_registry": "gcr.io/flow-container-registry/" - } -] diff --git a/integration/benchnet2/automate/testdata/level1/expected/template-fixture1.json b/integration/benchnet2/automate/testdata/level1/expected/template-fixture1.json new file mode 100644 index 00000000000..adf361e4e16 --- /dev/null +++ b/integration/benchnet2/automate/testdata/level1/expected/template-fixture1.json @@ -0,0 +1 @@ +[{"node_id":"083fdd06d4a4a560001a3f6576689db323399775198bfd2b75d3345723f0d979","name":"083fdd06d4a4a560001a3f6576689db323399775198bfd2b75d3345723f0d979@flow","role":"collection","docker_tag":"v0.27.6","docker_registry":"gcr.io/flow-container-registry/"},{"node_id":"101b061ec2c53e18617c8d4a7c2e1ecaff8574dbeab61588ea2a7205f085555f","name":"101b061ec2c53e18617c8d4a7c2e1ecaff8574dbeab61588ea2a7205f085555f@flow","role":"execution","docker_tag":"v0.27.6","docker_registry":"gcr.io/flow-container-registry/"},{"node_id":"287a7299497e61de9c9e08300ddbc0c74f3a951b4a23710cd46a7097aefe984d","name":"287a7299497e61de9c9e08300ddbc0c74f3a951b4a23710cd46a7097aefe984d@flow","role":"verification","docker_tag":"v0.27.6","docker_registry":"gcr.io/flow-container-registry/"},{"node_id":"6409849958aca562c695cceaff597f57ef3c0bd5b3d74fcb9fa7ae668d0b82c0","name":"6409849958aca562c695cceaff597f57ef3c0bd5b3d74fcb9fa7ae668d0b82c0@flow","role":"consensus","docker_tag":"v0.27.6","docker_registry":"gcr.io/flow-container-registry/"},{"node_id":"71080c5b7c40738f81e429a632e3e5a69a6c8e68c7dea9d7eaf4520495f2a133","name":"71080c5b7c40738f81e429a632e3e5a69a6c8e68c7dea9d7eaf4520495f2a133@flow","role":"verification","docker_tag":"v0.27.6","docker_registry":"gcr.io/flow-container-registry/"},{"node_id":"8e962133021dfd4f7a82e93c917fe83b5fbffa790650b2064690f2381fc4215f","name":"8e962133021dfd4f7a82e93c917fe83b5fbffa790650b2064690f2381fc4215f@flow","role":"access","docker_tag":"v0.27.6","docker_registry":"gcr.io/flow-container-registry/"},{"node_id":"9432682b1ca21a812764fcbc1a1d5b4b8d5c79482c578141a79321d4727be468","name":"9432682b1ca21a812764fcbc1a1d5b4b8d5c79482c578141a79321d4727be468@flow","role":"collection","docker_tag":"v0.27.6","docker_registry":"gcr.io/flow-container-registry/"},{"node_id":"e46dcc74bb3598380ea2673020394f230525736543fa1912567e73e9365adc46","name":"e46dcc74bb3598380ea2673020394f230525736543fa1912567e73e9365adc46@flow","role":"access","docker_tag":"v0.27.6","docker_registry":"gcr.io/flow-container-registry/"},{"node_id":"e7c69bbb3e869d0edb728582ab8fe18fb0a0a382514aa3791cca4b5b1da465ae","name":"e7c69bbb3e869d0edb728582ab8fe18fb0a0a382514aa3791cca4b5b1da465ae@flow","role":"execution","docker_tag":"v0.27.6","docker_registry":"gcr.io/flow-container-registry/"},{"node_id":"eade711f79377b0f88cff8239e88aecfe5b36ff27c5da522d7112ce04d3c5df2","name":"eade711f79377b0f88cff8239e88aecfe5b36ff27c5da522d7112ce04d3c5df2@flow","role":"consensus","docker_tag":"v0.27.6","docker_registry":"gcr.io/flow-container-registry/"}] diff --git a/integration/benchnet2/automate/testdata/level2/expected/values1.yml b/integration/benchnet2/automate/testdata/level2/expected/values1.yml index 86cdaf50172..f3e2e954852 100644 --- a/integration/benchnet2/automate/testdata/level2/expected/values1.yml +++ b/integration/benchnet2/automate/testdata/level2/expected/values1.yml @@ -497,7 +497,6 @@ execution: - --loglevel=INFO - --triedir=/trie - --rpc-addr=0.0.0.0:9000 - - --cadence-tracing=false - --extensive-tracing=false env: - name: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT @@ -523,7 +522,6 @@ execution: - --loglevel=INFO - --triedir=/trie - --rpc-addr=0.0.0.0:9000 - - --cadence-tracing=false - --extensive-tracing=false env: - name: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT diff --git a/integration/benchnet2/automate/testdata/level2/templates/values11-nested-template-defaults.yml b/integration/benchnet2/automate/testdata/level2/templates/values11-nested-template-defaults.yml index 9427b2ab1c6..f210216b83a 100644 --- a/integration/benchnet2/automate/testdata/level2/templates/values11-nested-template-defaults.yml +++ b/integration/benchnet2/automate/testdata/level2/templates/values11-nested-template-defaults.yml @@ -97,7 +97,6 @@ execution: - --loglevel=INFO - --triedir=/trie - --rpc-addr=0.0.0.0:9000 - - --cadence-tracing=false - --extensive-tracing=false env:{{template "env" .}} image: {{$val.docker_registry}}/execution:{{$val.docker_tag}} diff --git a/integration/benchnet2/create-secrets.sh b/integration/benchnet2/create-secrets.sh deleted file mode 100644 index 63f1b54bcab..00000000000 --- a/integration/benchnet2/create-secrets.sh +++ /dev/null @@ -1,58 +0,0 @@ -#!/bin/bash - -# Set Arguments -NETWORK_ID=$1 -NAMESPACE=$2 - -# Create execution-state secrets required to run network -# Note - As K8s secrets cannot contain forward slashes, we remove the path prefix -# Note - Since this is non-secret, this could be a configmap rather than a secret -for f in bootstrap/execution-state/*; do - # Remove the bootstrap/execution-state/ prefix - # Example start bootstrap/execution-state/00000000 - # Example result 00000000 - PREFIXREMOVED=${f//bootstrap\/execution-state\//}; - PREFIXREMOVED="$NETWORK_ID.$PREFIXREMOVED"; - - # Create the secret after string manipulation - kubectl create secret generic $PREFIXREMOVED --from-file=$f --namespace=$NAMESPACE; - kubectl label secret $PREFIXREMOVED "service=flow" --namespace=$NAMESPACE - kubectl label secret $PREFIXREMOVED "networkId=$NETWORK_ID" --namespace=$NAMESPACE -done - -# Create private-root-information secrets required to run network -# Note - As K8s secrets cannot contain forward slashes, the "${PREFIXREMOVED///\//.}" replaces forward slashes with periods -# Example filename bootstrap/private-root-information/private-node-info_416c65782048656e74736368656c00e4e3235298a4b91382ecd84f13b9c237e6/node-info.priv.json -# Example key name result after string manipulation 416c65782048656e74736368656c00e4e3235298a4b91382ecd84f13b9c237e6.node-info.priv.json -for f in bootstrap/private-root-information/*/*; do - # Remove the bootstrap/private-root-information/private-node-info_ prefix to ensure NodeId is retained - # Example result 416c65782048656e74736368656c00e4e3235298a4b91382ecd84f13b9c237e6/node-info.priv.json - PREFIXREMOVED=${f//bootstrap\/private-root-information\/private-node-info_/}; - PREFIXREMOVED="$NETWORK_ID.$PREFIXREMOVED"; - - # Substitute the forward slash "/" for a period "." - # Example $PREFIXREMOVED value 416c65782048656e74736368656c00e4e3235298a4b91382ecd84f13b9c237e6/node-info.priv.json - # Example result after string manipulation 416c65782048656e74736368656c00e4e3235298a4b91382ecd84f13b9c237e6.node-info.priv.json - KEYNAME=${PREFIXREMOVED//\//.} - - # Create the secret after string manipulation - kubectl create secret generic $KEYNAME --from-file=$f --namespace=$NAMESPACE; - kubectl label secret $KEYNAME "service=flow" --namespace=$NAMESPACE - kubectl label secret $KEYNAME "networkId=$NETWORK_ID" --namespace=$NAMESPACE -done - -# Create public-root-information secrets required to run network -# Note - As K8s secrets cannot contain forward slashes, we remove the path prefix -# Note - Since this is non-secret, this could be a configmap rather than a secret -for f in bootstrap/public-root-information/*.json; do - # Remove the bootstrap/public-root-informationn/private-node-info_ prefix - # Example start bootstrap/public-root-information/node-infos.pub.json - # Example result node-info.pub.json - PREFIXREMOVED=${f//bootstrap\/public-root-information\//}; - PREFIXREMOVED="$NETWORK_ID.$PREFIXREMOVED"; - - # Create the secret after string manipulation - kubectl create secret generic $PREFIXREMOVED --from-file=$f --namespace=$NAMESPACE ; - kubectl label secret $PREFIXREMOVED "service=flow" --namespace=$NAMESPACE - kubectl label secret $PREFIXREMOVED "networkId=$NETWORK_ID" --namespace=$NAMESPACE -done diff --git a/integration/benchnet2/flow/templates/access.yml b/integration/benchnet2/flow/templates/access.yml index b2b3e13b0d1..91a28035201 100644 --- a/integration/benchnet2/flow/templates/access.yml +++ b/integration/benchnet2/flow/templates/access.yml @@ -7,8 +7,8 @@ metadata: name: {{ $k }} labels: app: {{ $k }} - networkId: {{ $.Values.networkId }} - nodeType: access + network: {{ $.Values.networkId }} + role: access owner: {{ $.Values.owner }} service: flow @@ -18,9 +18,9 @@ spec: selector: matchLabels: app: {{ $k }} - nodeType: access + role: access service: flow - networkId: {{ $.Values.networkId }} + network: {{ $.Values.networkId }} template: metadata: @@ -30,10 +30,26 @@ spec: prometheus.io/port: "8080" labels: app: {{ $k }} - nodeType: access + role: access service: flow - networkId: {{ $.Values.networkId }} - spec: + network: {{ $.Values.networkId }} + {{- if contains "access1-" $k }} + pyroscope.io/scrape: "true" + {{- end }} + spec: + nodeSelector: + iam.gke.io/gke-metadata-server-enabled: "true" + serviceAccountName: "benchnet-configuration-reader" + initContainers: + - name: bootstrap-download + image: gcr.io/google.com/cloudsdktool/google-cloud-cli:372.0.0 + command: + - 'sh' + - '-c' + - "mkdir -p /data/bootstrap; cd /data/bootstrap; gsutil cp gs://{{ $.Values.configurationBucket }}/{{ $.Values.networkId }}.tar - | tar -x" + volumeMounts: + - name: data + mountPath: /data containers: - name: {{ $k }} image: {{ $v.image }} @@ -61,63 +77,17 @@ spec: - name: data mountPath: /data - - name: node-info-priv-json - readOnly: true - mountPath: /bootstrap/private-root-information/private-node-info_{{ $v.nodeId }}/node-info.priv.json - subPath: node-info.priv.json - - - name: node-info-pub-json - readOnly: true - mountPath: /bootstrap/public-root-information/node-infos.pub.json - subPath: node-infos.pub.json - - - name: root-block-json - readOnly: true - mountPath: /bootstrap/public-root-information/root-block.json - subPath: root-block.json - - - name: root-protocol-state-snapshot-json - readOnly: true - mountPath: /bootstrap/public-root-information/root-protocol-state-snapshot.json - subPath: root-protocol-state-snapshot.json - - - name: secretsdb-key - readOnly: true - mountPath: /bootstrap/private-root-information/private-node-info_{{ $v.nodeId }}/secretsdb-key - subPath: secretsdb-key - {{ if $v.resources }} resources: {{ $v.resources | toYaml | nindent 12 }} {{ else}} resources: {{ $.Values.access.defaults.resources | toYaml | nindent 12 }} {{ end }} - volumes: - - name: node-info-priv-json - secret: - secretName: {{ $.Values.networkId }}.{{ $v.nodeId }}.node-info.priv.json - - - name: node-info-pub-json - secret: - secretName: {{ $.Values.networkId }}.node-infos.pub.json - - - name: root-block-json - secret: - secretName: {{ $.Values.networkId }}.root-block.json - - - name: root-protocol-state-snapshot-json - secret: - secretName: {{ $.Values.networkId }}.root-protocol-state-snapshot.json - - - name: secretsdb-key - secret: - secretName: {{ $.Values.networkId }}.{{ $v.nodeId }}.secretsdb-key - volumeClaimTemplates: - metadata: name: data labels: - networkId: {{ $.Values.networkId }} + network: {{ $.Values.networkId }} spec: accessModes: ["ReadWriteOnce"] resources: @@ -138,7 +108,7 @@ metadata: name: {{ $k }} labels: app: {{ $k }} - networkId: {{ $.Values.networkId }} + network: {{ $.Values.networkId }} spec: {{ if $v.servicePorts }} ports: {{ $v.servicePorts | toYaml | nindent 12 }} diff --git a/integration/benchnet2/flow/templates/collection.yml b/integration/benchnet2/flow/templates/collection.yml index 88d12d82296..b4f59a203e5 100644 --- a/integration/benchnet2/flow/templates/collection.yml +++ b/integration/benchnet2/flow/templates/collection.yml @@ -7,8 +7,8 @@ metadata: name: {{ $k }} labels: app: {{ $k }} - networkId: {{ $.Values.networkId }} - nodeType: collection + network: {{ $.Values.networkId }} + role: collection owner: {{ $.Values.owner }} service: flow @@ -18,7 +18,7 @@ spec: selector: matchLabels: app: {{ $k }} - nodeType: collection + role: collection service: flow template: @@ -29,10 +29,26 @@ spec: prometheus.io/port: "8080" labels: app: {{ $k }} - nodeType: collection + role: collection service: flow - networkId: {{ $.Values.networkId }} - spec: + network: {{ $.Values.networkId }} + {{- if contains "collection1-" $k }} + pyroscope.io/scrape: "true" + {{- end }} + spec: + nodeSelector: + iam.gke.io/gke-metadata-server-enabled: "true" + serviceAccountName: "benchnet-configuration-reader" + initContainers: + - name: bootstrap-download + image: gcr.io/google.com/cloudsdktool/google-cloud-cli:372.0.0 + command: + - 'sh' + - '-c' + - "mkdir -p /data/bootstrap; cd /data/bootstrap; gsutil cp gs://{{ $.Values.configurationBucket }}/{{ $.Values.networkId }}.tar - | tar -x" + volumeMounts: + - name: data + mountPath: /data containers: - name: {{ $k }} image: {{ $v.image }} @@ -61,72 +77,16 @@ spec: - name: data mountPath: /data - - name: node-info-pub-json - readOnly: true - mountPath: /bootstrap/public-root-information/node-infos.pub.json - subPath: node-infos.pub.json - - - name: node-info-priv-json - readOnly: true - mountPath: /bootstrap/private-root-information/private-node-info_{{ $v.nodeId }}/node-info.priv.json - subPath: node-info.priv.json - - - name: node-machine-account-info-priv-json - readOnly: true - mountPath: /bootstrap/private-root-information/private-node-info_{{ $v.nodeId }}/node-machine-account-info.priv.json - subPath: node-machine-account-info.priv.json - - - name: root-block-json - readOnly: true - mountPath: /bootstrap/public-root-information/root-block.json - subPath: root-block.json - - - name: root-protocol-state-snapshot-json - readOnly: true - mountPath: /bootstrap/public-root-information/root-protocol-state-snapshot.json - subPath: root-protocol-state-snapshot.json - - - name: secretsdb-key - readOnly: true - mountPath: /bootstrap/private-root-information/private-node-info_{{ $v.nodeId }}/secretsdb-key - subPath: secretsdb-key - {{ if $v.resources }} resources: {{ $v.resources | toYaml | nindent 12 }} {{ else}} resources: {{ $.Values.collection.defaults.resources | toYaml | nindent 12 }} {{ end }} - - volumes: - - name: node-info-priv-json - secret: - secretName: {{ $.Values.networkId }}.{{ $v.nodeId }}.node-info.priv.json - - - name: node-info-pub-json - secret: - secretName: {{ $.Values.networkId }}.node-infos.pub.json - - - name: root-block-json - secret: - secretName: {{ $.Values.networkId }}.root-block.json - - - name: root-protocol-state-snapshot-json - secret: - secretName: {{ $.Values.networkId }}.root-protocol-state-snapshot.json - - - name: node-machine-account-info-priv-json - secret: - secretName: {{ $.Values.networkId }}.{{ $v.nodeId }}.node-machine-account-info.priv.json - - - name: secretsdb-key - secret: - secretName: {{ $.Values.networkId }}.{{ $v.nodeId }}.secretsdb-key - volumeClaimTemplates: - metadata: name: data labels: - networkId: {{ $.Values.networkId }} + network: {{ $.Values.networkId }} spec: accessModes: ["ReadWriteOnce"] resources: @@ -146,7 +106,7 @@ metadata: name: {{ $k }} labels: app: {{ $k }} - networkId: {{ $.Values.networkId }} + network: {{ $.Values.networkId }} owner: {{ $.Values.owner }} spec: {{ if $v.servicePorts }} @@ -157,4 +117,4 @@ spec: selector: app: {{ $k }} type: NodePort -{{- end }} \ No newline at end of file +{{- end }} diff --git a/integration/benchnet2/flow/templates/consensus.yml b/integration/benchnet2/flow/templates/consensus.yml index 68afeef202d..04e2126156b 100644 --- a/integration/benchnet2/flow/templates/consensus.yml +++ b/integration/benchnet2/flow/templates/consensus.yml @@ -7,8 +7,8 @@ metadata: name: {{ $k }} labels: app: {{ $k }} - networkId: {{ $.Values.networkId }} - nodeType: consensus + network: {{ $.Values.networkId }} + role: consensus owner: {{ $.Values.owner }} service: flow @@ -18,7 +18,7 @@ spec: selector: matchLabels: app: {{ $k }} - nodeType: consensus + role: consensus service: flow template: @@ -29,10 +29,26 @@ spec: prometheus.io/port: "8080" labels: app: {{ $k }} - nodeType: consensus + role: consensus service: flow - networkId: {{ $.Values.networkId }} - spec: + network: {{ $.Values.networkId }} + {{- if contains "consensus1-" $k }} + pyroscope.io/scrape: "true" + {{- end }} + spec: + nodeSelector: + iam.gke.io/gke-metadata-server-enabled: "true" + serviceAccountName: "benchnet-configuration-reader" + initContainers: + - name: bootstrap-download + image: gcr.io/google.com/cloudsdktool/google-cloud-cli:372.0.0 + command: + - 'sh' + - '-c' + - "mkdir -p /data/bootstrap; cd /data/bootstrap; gsutil cp gs://{{ $.Values.configurationBucket }}/{{ $.Values.networkId }}.tar - | tar -x" + volumeMounts: + - name: data + mountPath: /data containers: - name: {{ $k }} image: {{ $v.image }} @@ -60,81 +76,17 @@ spec: - name: data mountPath: /data - - name: node-info-pub-json - readOnly: true - mountPath: /bootstrap/public-root-information/node-infos.pub.json - subPath: node-infos.pub.json - - - name: node-info-priv-json - readOnly: true - mountPath: /bootstrap/private-root-information/private-node-info_{{ $v.nodeId }}/node-info.priv.json - subPath: node-info.priv.json - - - name: node-machine-account-info-priv-json - readOnly: true - mountPath: /bootstrap/private-root-information/private-node-info_{{ $v.nodeId }}/node-machine-account-info.priv.json - subPath: node-machine-account-info.priv.json - - - name: random-beacon-priv-json - readOnly: true - mountPath: /bootstrap/private-root-information/private-node-info_{{ $v.nodeId }}/random-beacon.priv.json - subPath: random-beacon.priv.json - - - name: root-block-json - readOnly: true - mountPath: /bootstrap/public-root-information/root-block.json - subPath: root-block.json - - - name: root-protocol-state-snapshot-json - readOnly: true - mountPath: /bootstrap/public-root-information/root-protocol-state-snapshot.json - subPath: root-protocol-state-snapshot.json - - - name: secretsdb-key - readOnly: true - mountPath: /bootstrap/private-root-information/private-node-info_{{ $v.nodeId }}/secretsdb-key - subPath: secretsdb-key - {{ if $v.resources }} resources: {{ $v.resources | toYaml | nindent 12 }} {{ else}} resources: {{ $.Values.consensus.defaults.resources | toYaml | nindent 12 }} {{ end }} - volumes: - - name: node-info-priv-json - secret: - secretName: {{ $.Values.networkId }}.{{ $v.nodeId }}.node-info.priv.json - - - name: node-info-pub-json - secret: - secretName: {{ $.Values.networkId }}.node-infos.pub.json - - - name: root-block-json - secret: - secretName: {{ $.Values.networkId }}.root-block.json - - - name: root-protocol-state-snapshot-json - secret: - secretName: {{ $.Values.networkId }}.root-protocol-state-snapshot.json - - - name: node-machine-account-info-priv-json - secret: - secretName: {{ $.Values.networkId }}.{{ $v.nodeId }}.node-machine-account-info.priv.json - - - name: random-beacon-priv-json - secret: - secretName: {{ $.Values.networkId }}.{{ $v.nodeId }}.random-beacon.priv.json - - - name: secretsdb-key - secret: - secretName: {{ $.Values.networkId }}.{{ $v.nodeId }}.secretsdb-key - volumeClaimTemplates: - metadata: name: data labels: - networkId: {{ $.Values.networkId }} + network: {{ $.Values.networkId }} spec: accessModes: ["ReadWriteOnce"] resources: @@ -154,7 +106,7 @@ metadata: name: {{ $k }} labels: app: {{ $k }} - networkId: {{ $.Values.networkId }} + network: {{ $.Values.networkId }} owner: {{ $.Values.owner }} spec: {{ if $v.servicePorts }} @@ -165,4 +117,4 @@ spec: selector: app: {{ $k }} type: NodePort -{{- end }} \ No newline at end of file +{{- end }} diff --git a/integration/benchnet2/flow/templates/execution.yml b/integration/benchnet2/flow/templates/execution.yml index 3fec330eed1..a6152d40035 100644 --- a/integration/benchnet2/flow/templates/execution.yml +++ b/integration/benchnet2/flow/templates/execution.yml @@ -7,8 +7,8 @@ metadata: name: {{ $k }} labels: app: {{ $k }} - networkId: {{ $.Values.networkId }} - nodeType: execution + network: {{ $.Values.networkId }} + role: execution owner: {{ $.Values.owner }} service: flow @@ -18,7 +18,7 @@ spec: selector: matchLabels: app: {{ $k }} - nodeType: execution + role: execution service: flow template: @@ -29,10 +29,26 @@ spec: prometheus.io/port: "8080" labels: app: {{ $k }} - nodeType: execution + role: execution service: flow - networkId: {{ $.Values.networkId }} - spec: + network: {{ $.Values.networkId }} + {{- if contains "execution1-" $k }} + pyroscope.io/scrape: "true" + {{- end }} + spec: + nodeSelector: + iam.gke.io/gke-metadata-server-enabled: "true" + serviceAccountName: "benchnet-configuration-reader" + initContainers: + - name: bootstrap-download + image: gcr.io/google.com/cloudsdktool/google-cloud-cli:372.0.0 + command: + - 'sh' + - '-c' + - "mkdir -p /data/bootstrap; cd /data/bootstrap; gsutil cp gs://{{ $.Values.configurationBucket }}/{{ $.Values.networkId }}.tar - | tar -x" + volumeMounts: + - name: data + mountPath: /data containers: - name: {{ $k }} image: {{ $v.image }} @@ -57,74 +73,20 @@ spec: {{ end }} volumeMounts: - - name: execution-state - readOnly: true - mountPath: /bootstrap/execution-state/ - - name: data mountPath: /data - - name: node-info-priv-json - readOnly: true - mountPath: /bootstrap/private-root-information/private-node-info_{{ $v.nodeId }}/node-info.priv.json - subPath: node-info.priv.json - - - name: node-info-pub-json - readOnly: true - mountPath: /bootstrap/public-root-information/node-infos.pub.json - subPath: node-infos.pub.json - - - name: root-block-json - readOnly: true - mountPath: /bootstrap/public-root-information/root-block.json - subPath: root-block.json - - - name: root-protocol-state-snapshot-json - readOnly: true - mountPath: /bootstrap/public-root-information/root-protocol-state-snapshot.json - subPath: root-protocol-state-snapshot.json - - - name: secretsdb-key - readOnly: true - mountPath: /bootstrap/private-root-information/private-node-info_{{ $v.nodeId }}/secretsdb-key - subPath: secretsdb-key - {{ if $v.resources }} resources: {{ $v.resources | toYaml | nindent 12 }} {{ else}} resources: {{ $.Values.execution.defaults.resources | toYaml | nindent 12 }} {{ end }} - volumes: - - name: execution-state - secret: - secretName: "{{ $.Values.networkId }}.00000000" - - - name: node-info-priv-json - secret: - secretName: {{ $.Values.networkId }}.{{ $v.nodeId }}.node-info.priv.json - - - name: node-info-pub-json - secret: - secretName: {{ $.Values.networkId }}.node-infos.pub.json - - - name: root-block-json - secret: - secretName: {{ $.Values.networkId }}.root-block.json - - - name: root-protocol-state-snapshot-json - secret: - secretName: {{ $.Values.networkId }}.root-protocol-state-snapshot.json - - - name: secretsdb-key - secret: - secretName: {{ $.Values.networkId }}.{{ $v.nodeId }}.secretsdb-key - volumeClaimTemplates: - metadata: name: data labels: - networkId: {{ $.Values.networkId }} + network: {{ $.Values.networkId }} spec: accessModes: ["ReadWriteOnce"] resources: @@ -144,7 +106,7 @@ metadata: name: {{ $k }} labels: app: {{ $k }} - networkId: {{ $.Values.networkId }} + network: {{ $.Values.networkId }} owner: {{ $.Values.owner }} spec: {{ if $v.servicePorts }} @@ -155,4 +117,4 @@ spec: selector: app: {{ $k }} type: NodePort -{{- end }} \ No newline at end of file +{{- end }} diff --git a/integration/benchnet2/flow/templates/verification.yml b/integration/benchnet2/flow/templates/verification.yml index f71e86634bb..51d2a4bab11 100644 --- a/integration/benchnet2/flow/templates/verification.yml +++ b/integration/benchnet2/flow/templates/verification.yml @@ -7,9 +7,9 @@ metadata: name: {{ $k }} labels: app: {{ $k }} - nodeType: verification + network: {{ $.Values.networkId }} + role: verification service: flow - networkId: {{ $.Values.networkId }} spec: serviceName: {{ $k }} @@ -17,9 +17,9 @@ spec: selector: matchLabels: app: {{ $k }} - nodeType: verification + role: verification service: flow - networkId: {{ $.Values.networkId }} + network: {{ $.Values.networkId }} template: metadata: @@ -29,11 +29,27 @@ spec: prometheus.io/port: "8080" labels: app: {{ $k }} - networkId: {{ $.Values.networkId }} - nodeType: verification + network: {{ $.Values.networkId }} + role: verification owner: {{ $.Values.owner }} service: flow - spec: + {{- if contains "verification1-" $k }} + pyroscope.io/scrape: "true" + {{- end }} + spec: + nodeSelector: + iam.gke.io/gke-metadata-server-enabled: "true" + serviceAccountName: "benchnet-configuration-reader" + initContainers: + - name: bootstrap-download + image: gcr.io/google.com/cloudsdktool/google-cloud-cli:372.0.0 + command: + - 'sh' + - '-c' + - "mkdir -p /data/bootstrap; cd /data/bootstrap; gsutil cp gs://{{ $.Values.configurationBucket }}/{{ $.Values.networkId }}.tar - | tar -x" + volumeMounts: + - name: data + mountPath: /data containers: - name: {{ $k }} image: {{ $v.image }} @@ -61,64 +77,16 @@ spec: - name: data mountPath: /data - - name: node-info-priv-json - readOnly: true - mountPath: /bootstrap/private-root-information/private-node-info_{{ $v.nodeId }}/node-info.priv.json - subPath: node-info.priv.json - - - name: node-info-pub-json - readOnly: true - mountPath: /bootstrap/public-root-information/node-infos.pub.json - subPath: node-infos.pub.json - - - name: root-block-json - readOnly: true - mountPath: /bootstrap/public-root-information/root-block.json - subPath: root-block.json - - - name: root-protocol-state-snapshot-json - readOnly: true - mountPath: /bootstrap/public-root-information/root-protocol-state-snapshot.json - subPath: root-protocol-state-snapshot.json - - - name: secretsdb-key - readOnly: true - mountPath: /bootstrap/private-root-information/private-node-info_{{ $v.nodeId }}/secretsdb-key - subPath: secretsdb-key - {{ if $v.resources }} resources: {{ $v.resources | toYaml | nindent 12 }} {{ else}} resources: {{ $.Values.verification.defaults.resources | toYaml | nindent 12 }} {{ end }} - - volumes: - - - name: node-info-priv-json - secret: - secretName: {{ $.Values.networkId }}.{{ $v.nodeId }}.node-info.priv.json - - - name: node-info-pub-json - secret: - secretName: {{ $.Values.networkId }}.node-infos.pub.json - - - name: root-block-json - secret: - secretName: {{ $.Values.networkId }}.root-block.json - - - name: root-protocol-state-snapshot-json - secret: - secretName: {{ $.Values.networkId }}.root-protocol-state-snapshot.json - - - name: secretsdb-key - secret: - secretName: {{ $.Values.networkId }}.{{ $v.nodeId }}.secretsdb-key - volumeClaimTemplates: - metadata: name: data labels: - networkId: {{ $.Values.networkId }} + network: {{ $.Values.networkId }} spec: accessModes: ["ReadWriteOnce"] resources: @@ -138,7 +106,7 @@ metadata: name: {{ $k }} labels: app: {{ $k }} - networkId: {{ $.Values.networkId }} + network: {{ $.Values.networkId }} owner: {{ $.Values.owner }} spec: {{ if $v.servicePorts }} @@ -149,4 +117,4 @@ spec: selector: app: {{ $k }} type: NodePort -{{- end }} \ No newline at end of file +{{- end }} diff --git a/integration/convert/convert.go b/integration/convert/convert.go index 2ff5ed28707..6b5c3a9882c 100644 --- a/integration/convert/convert.go +++ b/integration/convert/convert.go @@ -23,14 +23,14 @@ func ToSDKTransactionSignature(sig flow.TransactionSignature) sdk.TransactionSig Address: sdk.Address(sig.Address), SignerIndex: sig.SignerIndex, Signature: sig.Signature, - KeyIndex: int(sig.KeyIndex), + KeyIndex: sig.KeyIndex, } } func ToSDKProposalKey(key flow.ProposalKey) sdk.ProposalKey { return sdk.ProposalKey{ Address: sdk.Address(key.Address), - KeyIndex: int(key.KeyIndex), + KeyIndex: key.KeyIndex, SequenceNumber: key.SequenceNumber, } } diff --git a/integration/dkg/dkg_client_test.go b/integration/dkg/dkg_client_test.go index cdfe3985993..40c99420696 100644 --- a/integration/dkg/dkg_client_test.go +++ b/integration/dkg/dkg_client_test.go @@ -11,8 +11,7 @@ import ( "github.com/onflow/cadence" jsoncdc "github.com/onflow/cadence/encoding/json" - emulator "github.com/onflow/flow-emulator" - + "github.com/onflow/crypto" "github.com/onflow/flow-core-contracts/lib/go/contracts" "github.com/onflow/flow-core-contracts/lib/go/templates" @@ -21,7 +20,7 @@ import ( sdktemplates "github.com/onflow/flow-go-sdk/templates" "github.com/onflow/flow-go-sdk/test" - "github.com/onflow/flow-go/crypto" + emulator "github.com/onflow/flow-go/integration/internal/emulator" "github.com/onflow/flow-go/integration/utils" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/module/dkg" @@ -33,13 +32,13 @@ type ClientSuite struct { contractClient *dkg.Client - env templates.Environment - blockchain *emulator.Blockchain - emulatorClient *utils.EmulatorClient - - dkgAddress sdk.Address - dkgAccountKey *sdk.AccountKey - dkgSigner sdkcrypto.Signer + env templates.Environment + blockchain emulator.Emulator + emulatorClient *utils.EmulatorClient + serviceAccountAddress sdk.Address + dkgAddress sdk.Address + dkgAccountKey *sdk.AccountKey + dkgSigner sdkcrypto.Signer } func TestDKGClient(t *testing.T) { @@ -49,12 +48,14 @@ func TestDKGClient(t *testing.T) { // Setup Test creates the blockchain client, the emulated blockchain and deploys // the DKG contract to the emulator func (s *ClientSuite) SetupTest() { - blockchain, err := emulator.NewBlockchain(emulator.WithStorageLimitEnabled(false)) + blockchain, err := emulator.New( + emulator.WithStorageLimitEnabled(false), + ) require.NoError(s.T(), err) s.blockchain = blockchain s.emulatorClient = utils.NewEmulatorClient(blockchain) - + s.serviceAccountAddress = sdk.Address(s.blockchain.ServiceKey().Address) // deploy contract s.deployDKGContract() @@ -68,7 +69,7 @@ func (s *ClientSuite) deployDKGContract() { code := contracts.FlowDKG() // deploy the contract to the emulator - dkgAddress, err := s.blockchain.CreateAccount([]*sdk.AccountKey{accountKey}, []sdktemplates.Contract{ + dkgAddress, err := s.emulatorClient.CreateAccount([]*sdk.AccountKey{accountKey}, []sdktemplates.Contract{ { Name: "FlowDKG", Source: string(code), @@ -146,19 +147,9 @@ func (s *ClientSuite) TestNilDKGSubmission() { // prepare DKG clients := s.prepareDKG(participants) - // generate list of public keys - numberOfNodes := len(participants) - publicKeys := make([]crypto.PublicKey, 0, numberOfNodes+1) - for i := 0; i < numberOfNodes; i++ { - publicKeys = append(publicKeys, nil) - } - - // create a nil group public key - var groupPublicKey crypto.PublicKey - // submit empty nil keys for each participant for _, client := range clients { - err := client.SubmitResult(groupPublicKey, publicKeys) + err := client.SubmitEmptyResult() require.NoError(s.T(), err) } } @@ -175,14 +166,16 @@ func (s *ClientSuite) TestSubmitResult() { // generate list of public keys numberOfNodes := len(participants) publicKeys := make([]crypto.PublicKey, 0, numberOfNodes) + indexMap := make(flow.DKGIndexMap, numberOfNodes) for i := 0; i < numberOfNodes; i++ { privateKey := unittest.KeyFixture(crypto.BLSBLS12381) publicKeys = append(publicKeys, privateKey.PublicKey()) + indexMap[participants[i]] = i } // create a group public key groupPublicKey := unittest.KeyFixture(crypto.BLSBLS12381).PublicKey() - err := clients[0].SubmitResult(groupPublicKey, publicKeys) + err := clients[0].SubmitParametersAndResult(indexMap, groupPublicKey, publicKeys) require.NoError(s.T(), err) } @@ -202,7 +195,7 @@ func (s *ClientSuite) prepareDKG(participants []flow.Identifier) []*dkg.Client { // create account key, address and signer for participant accountKey, signer := test.AccountKeyGenerator().NewWithSigner() - address, err := s.blockchain.CreateAccount([]*sdk.AccountKey{accountKey}, nil) + address, err := s.emulatorClient.CreateAccount([]*sdk.AccountKey{accountKey}, nil) require.NoError(s.T(), err) accountKeys[index], addresses[index], signers[index] = accountKey, address, signer @@ -229,18 +222,18 @@ func (s *ClientSuite) setUpAdmin() { // set up admin resource setUpAdminTx := sdk.NewTransaction(). - SetScript(templates.GeneratePublishDKGParticipantScript(s.env)). - SetGasLimit(9999). - SetProposalKey(s.blockchain.ServiceKey().Address, s.blockchain.ServiceKey().Index, + SetScript(templates.GeneratePublishDKGAdminScript(s.env)). + SetComputeLimit(9999). + SetProposalKey(s.serviceAccountAddress, s.blockchain.ServiceKey().Index, s.blockchain.ServiceKey().SequenceNumber). - SetPayer(s.blockchain.ServiceKey().Address). + SetPayer(s.serviceAccountAddress). AddAuthorizer(s.dkgAddress) signer, err := s.blockchain.ServiceKey().Signer() require.NoError(s.T(), err) s.signAndSubmit(setUpAdminTx, - []sdk.Address{s.blockchain.ServiceKey().Address, s.dkgAddress}, + []sdk.Address{s.serviceAccountAddress, s.dkgAddress}, []sdkcrypto.Signer{signer, s.dkgSigner}, ) } @@ -258,10 +251,10 @@ func (s *ClientSuite) startDKGWithParticipants(nodeIDs []flow.Identifier) { // start DKG using admin resource startDKGTx := sdk.NewTransaction(). SetScript(templates.GenerateStartDKGScript(s.env)). - SetGasLimit(9999). - SetProposalKey(s.blockchain.ServiceKey().Address, s.blockchain.ServiceKey().Index, + SetComputeLimit(9999). + SetProposalKey(s.serviceAccountAddress, s.blockchain.ServiceKey().Index, s.blockchain.ServiceKey().SequenceNumber). - SetPayer(s.blockchain.ServiceKey().Address). + SetPayer(s.serviceAccountAddress). AddAuthorizer(s.dkgAddress) err := startDKGTx.AddArgument(cadence.NewArray(valueNodeIDs)) @@ -271,7 +264,7 @@ func (s *ClientSuite) startDKGWithParticipants(nodeIDs []flow.Identifier) { require.NoError(s.T(), err) s.signAndSubmit(startDKGTx, - []sdk.Address{s.blockchain.ServiceKey().Address, s.dkgAddress}, + []sdk.Address{s.serviceAccountAddress, s.dkgAddress}, []sdkcrypto.Signer{signer, s.dkgSigner}, ) @@ -289,10 +282,10 @@ func (s *ClientSuite) createParticipant(nodeID flow.Identifier, authoriser sdk.A // create DKG partcipant createParticipantTx := sdk.NewTransaction(). SetScript(templates.GenerateCreateDKGParticipantScript(s.env)). - SetGasLimit(9999). - SetProposalKey(s.blockchain.ServiceKey().Address, s.blockchain.ServiceKey().Index, + SetComputeLimit(9999). + SetProposalKey(s.serviceAccountAddress, s.blockchain.ServiceKey().Index, s.blockchain.ServiceKey().SequenceNumber). - SetPayer(s.blockchain.ServiceKey().Address). + SetPayer(s.serviceAccountAddress). AddAuthorizer(authoriser) err := createParticipantTx.AddArgument(cadence.NewAddress(s.dkgAddress)) @@ -307,7 +300,7 @@ func (s *ClientSuite) createParticipant(nodeID flow.Identifier, authoriser sdk.A require.NoError(s.T(), err) s.signAndSubmit(createParticipantTx, - []sdk.Address{s.blockchain.ServiceKey().Address, authoriser}, + []sdk.Address{s.serviceAccountAddress, authoriser}, []sdkcrypto.Signer{s2, signer}, ) diff --git a/integration/dkg/dkg_client_wrapper.go b/integration/dkg/dkg_client_wrapper.go index d23ded924d3..e46a5fb52c8 100644 --- a/integration/dkg/dkg_client_wrapper.go +++ b/integration/dkg/dkg_client_wrapper.go @@ -5,11 +5,13 @@ import ( "fmt" "time" + "github.com/onflow/flow-go/module" + + "github.com/onflow/crypto" "go.uber.org/atomic" sdk "github.com/onflow/flow-go-sdk" - "github.com/onflow/flow-go/crypto" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/model/messages" model "github.com/onflow/flow-go/model/messages" @@ -27,6 +29,8 @@ type DKGClientWrapper struct { enabled *atomic.Bool } +var _ module.DKGContractClient = (*DKGClientWrapper)(nil) + // NewDKGClientWrapper instantiates a new DKGClientWrapper func NewDKGClientWrapper(client *dkgmod.Client) *DKGClientWrapper { return &DKGClientWrapper{ @@ -76,10 +80,18 @@ func (c *DKGClientWrapper) ReadBroadcast(fromIndex uint, referenceBlock flow.Ide return c.client.ReadBroadcast(fromIndex, referenceBlock) } -// SubmitResult implements the DKGContractClient interface -func (c *DKGClientWrapper) SubmitResult(groupPubKey crypto.PublicKey, pubKeys []crypto.PublicKey) error { +// SubmitParametersAndResult implements the DKGContractClient interface +func (c *DKGClientWrapper) SubmitParametersAndResult(indexMap flow.DKGIndexMap, groupPubKey crypto.PublicKey, pubKeys []crypto.PublicKey) error { if !c.enabled.Load() { return fmt.Errorf("failed to submit DKG result: %w", errClientDisabled) } - return c.client.SubmitResult(groupPubKey, pubKeys) + return c.client.SubmitParametersAndResult(indexMap, groupPubKey, pubKeys) +} + +// SubmitEmptyResult implements the DKGContractClient interface +func (c *DKGClientWrapper) SubmitEmptyResult() error { + if !c.enabled.Load() { + return fmt.Errorf("failed to submit empty DKG result: %w", errClientDisabled) + } + return c.client.SubmitEmptyResult() } diff --git a/integration/dkg/dkg_emulator_suite.go b/integration/dkg/dkg_emulator_suite.go index c35faf22936..5a7310aa901 100644 --- a/integration/dkg/dkg_emulator_suite.go +++ b/integration/dkg/dkg_emulator_suite.go @@ -2,9 +2,13 @@ package dkg import ( "context" + "encoding/hex" "fmt" "os" + "github.com/onflow/crypto" + "golang.org/x/exp/slices" + "github.com/rs/zerolog" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -14,23 +18,23 @@ import ( jsoncdc "github.com/onflow/cadence/encoding/json" "github.com/onflow/flow-core-contracts/lib/go/contracts" "github.com/onflow/flow-core-contracts/lib/go/templates" - emulator "github.com/onflow/flow-emulator" sdk "github.com/onflow/flow-go-sdk" sdkcrypto "github.com/onflow/flow-go-sdk/crypto" sdktemplates "github.com/onflow/flow-go-sdk/templates" "github.com/onflow/flow-go-sdk/test" - "github.com/onflow/flow-go/module/metrics" dkgeng "github.com/onflow/flow-go/engine/consensus/dkg" "github.com/onflow/flow-go/engine/testutil" - "github.com/onflow/flow-go/fvm" + "github.com/onflow/flow-go/fvm/systemcontracts" + emulator "github.com/onflow/flow-go/integration/internal/emulator" "github.com/onflow/flow-go/integration/tests/lib" "github.com/onflow/flow-go/integration/utils" "github.com/onflow/flow-go/model/bootstrap" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/module" "github.com/onflow/flow-go/module/dkg" + "github.com/onflow/flow-go/module/metrics" "github.com/onflow/flow-go/network/stub" "github.com/onflow/flow-go/state/protocol/events/gadgets" "github.com/onflow/flow-go/storage/badger" @@ -46,17 +50,17 @@ type EmulatorSuite struct { chainID flow.ChainID hub *stub.Hub // in-mem test network env templates.Environment - blockchain *emulator.Blockchain + blockchain emulator.Emulator adminEmulatorClient *utils.EmulatorClient adminDKGContractClient *dkg.Client dkgAddress sdk.Address dkgAccountKey *sdk.AccountKey dkgSigner sdkcrypto.Signer checkDKGUnhappy bool // activate log hook for DKGBroker to check if the DKG core is flagging misbehaviours - - netIDs flow.IdentityList - nodeAccounts []*nodeAccount - nodes []*node + serviceAccountAddress sdk.Address + netIDs flow.IdentityList + nodeAccounts []*nodeAccount + nodes []*node } func (s *EmulatorSuite) SetupTest() { @@ -64,9 +68,13 @@ func (s *EmulatorSuite) SetupTest() { s.deployDKGContract() s.setupDKGAdmin() - s.netIDs = unittest.IdentityListFixture(numberOfNodes, unittest.WithRole(flow.RoleConsensus)) - for _, id := range s.netIDs { + boostrapNodesInfo := unittest.PrivateNodeInfosFixture(numberOfNodes, unittest.WithRole(flow.RoleConsensus)) + slices.SortFunc(boostrapNodesInfo, func(lhs, rhs bootstrap.NodeInfo) int { + return flow.IdentifierCanonical(lhs.NodeID, rhs.NodeID) + }) + for _, id := range boostrapNodesInfo { s.nodeAccounts = append(s.nodeAccounts, s.createAndFundAccount(id)) + s.netIDs = append(s.netIDs, id.Identity()) } for _, acc := range s.nodeAccounts { @@ -88,6 +96,7 @@ func (s *EmulatorSuite) BeforeTest(_, testName string) { } // We need to initialise the nodes with a list of identities that contain // all roles, otherwise there would be an error initialising the first epoch + identities := unittest.CompleteIdentitySet(s.netIDs...) for _, node := range s.nodes { s.initEngines(node, identities) @@ -109,14 +118,14 @@ func (s *EmulatorSuite) TearDownTest() { func (s *EmulatorSuite) initEmulator() { s.chainID = flow.Emulator - blockchain, err := emulator.NewBlockchain( + blockchain, err := emulator.New( emulator.WithTransactionExpiry(flow.DefaultTransactionExpiry), emulator.WithStorageLimitEnabled(false), ) - require.NoError(s.T(), err) + s.Require().NoError(err) s.blockchain = blockchain - + s.serviceAccountAddress = sdk.Address(s.blockchain.ServiceKey().Address) s.adminEmulatorClient = utils.NewEmulatorClient(blockchain) s.hub = stub.NewNetworkHub() @@ -129,7 +138,7 @@ func (s *EmulatorSuite) deployDKGContract() { dkgAccountKey, dkgAccountSigner := test.AccountKeyGenerator().NewWithSigner() // deploy the contract to the emulator - dkgAddress, err := s.blockchain.CreateAccount([]*sdk.AccountKey{dkgAccountKey}, []sdktemplates.Contract{ + dkgAddress, err := s.adminEmulatorClient.CreateAccount([]*sdk.AccountKey{dkgAccountKey}, []sdktemplates.Contract{ { Name: "FlowDKG", Source: string(contracts.FlowDKG()), @@ -157,25 +166,25 @@ func (s *EmulatorSuite) deployDKGContract() { func (s *EmulatorSuite) setupDKGAdmin() { setUpAdminTx := sdk.NewTransaction(). - SetScript(templates.GeneratePublishDKGParticipantScript(s.env)). - SetGasLimit(9999). + SetScript(templates.GeneratePublishDKGAdminScript(s.env)). + SetComputeLimit(9999). SetProposalKey( - s.blockchain.ServiceKey().Address, + s.serviceAccountAddress, s.blockchain.ServiceKey().Index, s.blockchain.ServiceKey().SequenceNumber). - SetPayer(s.blockchain.ServiceKey().Address). + SetPayer(s.serviceAccountAddress). AddAuthorizer(s.dkgAddress) signer, err := s.blockchain.ServiceKey().Signer() require.NoError(s.T(), err) _, err = s.prepareAndSubmit(setUpAdminTx, - []sdk.Address{s.blockchain.ServiceKey().Address, s.dkgAddress}, + []sdk.Address{s.serviceAccountAddress, s.dkgAddress}, []sdkcrypto.Signer{signer, s.dkgSigner}, ) require.NoError(s.T(), err) } // createAndFundAccount creates a nodeAccount and funds it in the emulator -func (s *EmulatorSuite) createAndFundAccount(netID *flow.Identity) *nodeAccount { +func (s *EmulatorSuite) createAndFundAccount(netID bootstrap.NodeInfo) *nodeAccount { accountPrivateKey := lib.RandomPrivateKey() accountKey := sdk.NewAccountKey(). FromPrivateKey(accountPrivateKey). @@ -186,11 +195,13 @@ func (s *EmulatorSuite) createAndFundAccount(netID *flow.Identity) *nodeAccount accountSigner, err := sdkcrypto.NewInMemorySigner(accountPrivateKey, accountKey.HashAlgo) require.NoError(s.T(), err) + sc := systemcontracts.SystemContractsForChain(s.chainID) + /*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ create Flow account ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/ - newAccountAddress, err := s.blockchain.CreateAccount( + newAccountAddress, err := s.adminEmulatorClient.CreateAccount( []*sdk.AccountKey{accountKey}, []sdktemplates.Contract{}, ) @@ -208,30 +219,29 @@ func (s *EmulatorSuite) createAndFundAccount(netID *flow.Identity) *nodeAccount import FlowToken from 0x%s transaction(amount: UFix64, recipient: Address) { - let sentVault: @FungibleToken.Vault - prepare(signer: AuthAccount) { - let vaultRef = signer.borrow<&FlowToken.Vault>(from: /storage/flowTokenVault) + let sentVault: @{FungibleToken.Vault} + prepare(signer: auth(BorrowValue) &Account) { + let vaultRef = signer.storage.borrow<auth(FungibleToken.Withdraw) &FlowToken.Vault>(from: /storage/flowTokenVault) ?? panic("failed to borrow reference to sender vault") self.sentVault <- vaultRef.withdraw(amount: amount) } execute { let receiverRef = getAccount(recipient) - .getCapability(/public/flowTokenReceiver) - .borrow<&{FungibleToken.Receiver}>() + .capabilities.borrow<&{FungibleToken.Receiver}>(/public/flowTokenReceiver) ?? panic("failed to borrow reference to recipient vault") receiverRef.deposit(from: <-self.sentVault) } }`, - fvm.FungibleTokenAddress(s.chainID.Chain()).Hex(), - fvm.FlowTokenAddress(s.chainID.Chain()).Hex(), + sc.FungibleToken.Address.Hex(), + sc.FlowToken.Address.Hex(), ))). - AddAuthorizer(s.blockchain.ServiceKey().Address). + AddAuthorizer(s.serviceAccountAddress). SetProposalKey( - s.blockchain.ServiceKey().Address, + s.serviceAccountAddress, s.blockchain.ServiceKey().Index, s.blockchain.ServiceKey().SequenceNumber, ). - SetPayer(s.blockchain.ServiceKey().Address) + SetPayer(s.serviceAccountAddress) err = fundAccountTx.AddArgument(cadence.UFix64(1_000_000)) require.NoError(s.T(), err) @@ -240,7 +250,7 @@ func (s *EmulatorSuite) createAndFundAccount(netID *flow.Identity) *nodeAccount signer, err := s.blockchain.ServiceKey().Signer() require.NoError(s.T(), err) _, err = s.prepareAndSubmit(fundAccountTx, - []sdk.Address{s.blockchain.ServiceKey().Address}, + []sdk.Address{s.serviceAccountAddress}, []sdkcrypto.Signer{signer}, ) require.NoError(s.T(), err) @@ -301,12 +311,12 @@ func (s *EmulatorSuite) startDKGWithParticipants(accounts []*nodeAccount) { // start DKG using admin resource startDKGTx := sdk.NewTransaction(). SetScript(templates.GenerateStartDKGScript(s.env)). - SetGasLimit(9999). + SetComputeLimit(9999). SetProposalKey( - s.blockchain.ServiceKey().Address, + s.serviceAccountAddress, s.blockchain.ServiceKey().Index, s.blockchain.ServiceKey().SequenceNumber). - SetPayer(s.blockchain.ServiceKey().Address). + SetPayer(s.serviceAccountAddress). AddAuthorizer(s.dkgAddress) err := startDKGTx.AddArgument(cadence.NewArray(valueNodeIDs)) @@ -314,7 +324,7 @@ func (s *EmulatorSuite) startDKGWithParticipants(accounts []*nodeAccount) { signer, err := s.blockchain.ServiceKey().Signer() require.NoError(s.T(), err) _, err = s.prepareAndSubmit(startDKGTx, - []sdk.Address{s.blockchain.ServiceKey().Address, s.dkgAddress}, + []sdk.Address{s.serviceAccountAddress, s.dkgAddress}, []sdkcrypto.Signer{signer, s.dkgSigner}, ) require.NoError(s.T(), err) @@ -328,9 +338,9 @@ func (s *EmulatorSuite) startDKGWithParticipants(accounts []*nodeAccount) { func (s *EmulatorSuite) claimDKGParticipant(node *node) { createParticipantTx := sdk.NewTransaction(). SetScript(templates.GenerateCreateDKGParticipantScript(s.env)). - SetGasLimit(9999). + SetComputeLimit(9999). SetProposalKey( - s.blockchain.ServiceKey().Address, + s.serviceAccountAddress, s.blockchain.ServiceKey().Index, s.blockchain.ServiceKey().SequenceNumber, ). @@ -346,7 +356,7 @@ func (s *EmulatorSuite) claimDKGParticipant(node *node) { signer, err := s.blockchain.ServiceKey().Signer() require.NoError(s.T(), err) _, err = s.prepareAndSubmit(createParticipantTx, - []sdk.Address{node.account.accountAddress, s.blockchain.ServiceKey().Address, s.dkgAddress}, + []sdk.Address{node.account.accountAddress, s.serviceAccountAddress, s.dkgAddress}, []sdkcrypto.Signer{node.account.accountSigner, signer, s.dkgSigner}, ) require.NoError(s.T(), err) @@ -367,21 +377,21 @@ func (s *EmulatorSuite) sendDummyTx() (*flow.Block, error) { createAccountTx, err := sdktemplates.CreateAccount( []*sdk.AccountKey{test.AccountKeyGenerator().New()}, []sdktemplates.Contract{}, - s.blockchain.ServiceKey().Address) + s.serviceAccountAddress) if err != nil { return nil, err } createAccountTx. SetProposalKey( - s.blockchain.ServiceKey().Address, + s.serviceAccountAddress, s.blockchain.ServiceKey().Index, s.blockchain.ServiceKey().SequenceNumber). - SetPayer(s.blockchain.ServiceKey().Address) + SetPayer(s.serviceAccountAddress) signer, err := s.blockchain.ServiceKey().Signer() require.NoError(s.T(), err) block, err := s.prepareAndSubmit(createAccountTx, - []sdk.Address{s.blockchain.ServiceKey().Address}, + []sdk.Address{s.serviceAccountAddress}, []sdkcrypto.Signer{signer}, ) return block, err @@ -390,31 +400,41 @@ func (s *EmulatorSuite) sendDummyTx() (*flow.Block, error) { func (s *EmulatorSuite) isDKGCompleted() bool { template := templates.GenerateGetDKGCompletedScript(s.env) value := s.executeScript(template, nil) - return value.ToGoValue().(bool) + return bool(value.(cadence.Bool)) } -func (s *EmulatorSuite) getResult() []string { - script := fmt.Sprintf(` - import FlowDKG from 0x%s - - pub fun main(): [String?]? { - return FlowDKG.dkgCompleted() - } `, - s.env.DkgAddress, - ) - - res := s.executeScript([]byte(script), nil) - value := res.(cadence.Optional).ToGoValue() +// getParametersAndResult retrieves the DKG setup parameters (`flow.DKGIndexMap`) and the DKG result from the DKG white-board smart contract. +func (s *EmulatorSuite) getParametersAndResult() (flow.DKGIndexMap, crypto.PublicKey, []crypto.PublicKey) { + res := s.executeScript(templates.GenerateGetDKGCanonicalFinalSubmissionScript(s.env), nil) + value := res.(cadence.Optional).Value if value == nil { - return []string{} + s.Fail("DKG result is nil") } - dkgResult := []string{} - for _, item := range value.([]interface{}) { - s := item.(string) - dkgResult = append(dkgResult, s) + + decodePubkey := func(r string) crypto.PublicKey { + pkBytes, err := hex.DecodeString(r) + require.NoError(s.T(), err) + pk, err := crypto.DecodePublicKey(crypto.BLSBLS12381, pkBytes) + require.NoError(s.T(), err) + return pk } - return dkgResult + fields := value.(cadence.Struct).FieldsMappedByName() + groupKey := decodePubkey(string(UnwrapOptional[cadence.String](fields["groupPubKey"]))) + + dkgKeyShares := CadenceArrayTo(UnwrapOptional[cadence.Array](fields["pubKeys"]), func(value cadence.Value) crypto.PublicKey { + return decodePubkey(string(value.(cadence.String))) + }) + + cdcIndexMap := CDCToDKGIDMapping(UnwrapOptional[cadence.Dictionary](fields["idMapping"])) + indexMap := make(flow.DKGIndexMap, len(cdcIndexMap)) + for k, v := range cdcIndexMap { + nodeID, err := flow.HexStringToIdentifier(k) + require.NoError(s.T(), err) + indexMap[nodeID] = v + } + + return indexMap, groupKey, dkgKeyShares } func (s *EmulatorSuite) initEngines(node *node, ids flow.IdentityList) { @@ -428,7 +448,7 @@ func (s *EmulatorSuite) initEngines(node *node, ids flow.IdentityList) { // dkgState is used to store the private key resulting from the node's // participation in the DKG run - dkgState, err := badger.NewDKGState(core.Metrics, core.SecretsDB) + dkgState, err := badger.NewRecoverableRandomBeaconStateMachine(core.Metrics, core.SecretsDB, core.Me.NodeID()) s.Require().NoError(err) // brokerTunnel is used to communicate between the messaging engine and the @@ -459,12 +479,6 @@ func (s *EmulatorSuite) initEngines(node *node, ids flow.IdentityList) { controllerFactoryLogger = zerolog.New(os.Stdout).Hook(hook) } - // create a config with no delays for tests - config := dkg.ControllerConfig{ - BaseStartDelay: 0, - BaseHandleFirstBroadcastDelay: 0, - } - // the reactor engine reacts to new views being finalized and drives the // DKG protocol reactorEngine := dkgeng.NewReactorEngine( @@ -477,7 +491,6 @@ func (s *EmulatorSuite) initEngines(node *node, ids flow.IdentityList) { core.Me, []module.DKGContractClient{node.dkgContractClient}, brokerTunnel, - config, ), viewsObserver, ) @@ -488,7 +501,6 @@ func (s *EmulatorSuite) initEngines(node *node, ids flow.IdentityList) { node.GenericNode = core node.messagingEngine = messagingEngine node.dkgState = dkgState - node.safeBeaconKeys = badger.NewSafeBeaconPrivateKeys(dkgState) node.reactorEngine = reactorEngine } @@ -529,3 +541,26 @@ func (s *EmulatorSuite) executeScript(script []byte, arguments [][]byte) cadence require.True(s.T(), result.Succeeded()) return result.Value } + +func UnwrapOptional[T cadence.Value](optional cadence.Value) T { + return optional.(cadence.Optional).Value.(T) +} + +func CadenceArrayTo[T any](arr cadence.Value, convert func(cadence.Value) T) []T { + out := make([]T, len(arr.(cadence.Array).Values)) + for i := range out { + out[i] = convert(arr.(cadence.Array).Values[i]) + } + return out +} + +func CDCToDKGIDMapping(cdc cadence.Value) map[string]int { + idMappingCDC := cdc.(cadence.Dictionary) + idMapping := make(map[string]int, len(idMappingCDC.Pairs)) + for _, pair := range idMappingCDC.Pairs { + nodeID := string(pair.Key.(cadence.String)) + index := pair.Value.(cadence.Int).Int() + idMapping[nodeID] = index + } + return idMapping +} diff --git a/integration/dkg/dkg_emulator_test.go b/integration/dkg/dkg_emulator_test.go index 8d349bd8899..4ad988f51a8 100644 --- a/integration/dkg/dkg_emulator_test.go +++ b/integration/dkg/dkg_emulator_test.go @@ -1,7 +1,6 @@ package dkg import ( - "encoding/hex" "fmt" "math/rand" "testing" @@ -11,7 +10,8 @@ import ( "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" - "github.com/onflow/flow-go/crypto" + "github.com/onflow/crypto" + "github.com/onflow/flow-go/model/flow" msig "github.com/onflow/flow-go/module/signature" "github.com/onflow/flow-go/utils/unittest" @@ -50,21 +50,25 @@ func (s *EmulatorSuite) runTest(goodNodes int, emulatorProblems bool) { DKGPhase2FinalView: 200, DKGPhase3FinalView: 250, FinalView: 300, - Participants: s.netIDs, - RandomSource: []byte("random bytes for seed"), + Participants: s.netIDs.ToSkeleton(), + RandomSource: unittest.EpochSetupRandomSourceFixture(), } // create the EpochSetup that will trigger the next DKG run with all the // desired parameters nextEpochSetup := flow.EpochSetup{ Counter: currentCounter + 1, - Participants: s.netIDs, - RandomSource: []byte("random bytes for seed"), + Participants: s.netIDs.ToSkeleton(), + RandomSource: unittest.EpochSetupRandomSourceFixture(), FirstView: 301, FinalView: 600, } - firstBlock := &flow.Header{View: 100} + firstBlock := &flow.Header{ + HeaderBody: flow.HeaderBody{ + View: 100, + }, + } for _, node := range nodes { node.setEpochs(s.T(), currentEpochSetup, nextEpochSetup, firstBlock) @@ -104,9 +108,9 @@ func (s *EmulatorSuite) runTest(goodNodes int, emulatorProblems bool) { if err == nil { for _, node := range nodes { - node.ProtocolEvents.BlockFinalized(block.Header) + node.ProtocolEvents.BlockFinalized(block.ToHeader()) } - view = int(block.Header.View) + view = int(block.View) } } @@ -127,22 +131,7 @@ func (s *EmulatorSuite) runTest(goodNodes int, emulatorProblems bool) { // the result is an array of public keys where the first item is the group // public key - res := s.getResult() - - assert.Equal(s.T(), len(s.nodes)+1, len(res)) - pubKeys := make([]crypto.PublicKey, 0, len(res)) - for _, r := range res { - pkBytes, err := hex.DecodeString(r) - assert.NoError(s.T(), err) - pk, err := crypto.DecodePublicKey(crypto.BLSBLS12381, pkBytes) - assert.NoError(s.T(), err) - pubKeys = append(pubKeys, pk) - } - - groupPubKeyBytes, err := hex.DecodeString(res[0]) - assert.NoError(s.T(), err) - groupPubKey, err := crypto.DecodePublicKey(crypto.BLSBLS12381, groupPubKeyBytes) - assert.NoError(s.T(), err) + _, groupPubKey, pubKeys := s.getParametersAndResult() tag := "some tag" hasher := msig.NewBLSHasher(tag) @@ -151,8 +140,7 @@ func (s *EmulatorSuite) runTest(goodNodes int, emulatorProblems bool) { signatures := []crypto.Signature{} indices := []int{} for i, n := range nodes { - // TODO: to replace with safeBeaconKeys - beaconKey, err := n.dkgState.RetrieveMyBeaconPrivateKey(nextEpochSetup.Counter) + beaconKey, err := n.dkgState.UnsafeRetrieveMyBeaconPrivateKey(nextEpochSetup.Counter) require.NoError(s.T(), err) signature, err := beaconKey.Sign(sigData, hasher) @@ -161,9 +149,9 @@ func (s *EmulatorSuite) runTest(goodNodes int, emulatorProblems bool) { signatures = append(signatures, signature) indices = append(indices, i) - ok, err := pubKeys[i+1].Verify(signature, sigData, hasher) + ok, err := pubKeys[i].Verify(signature, sigData, hasher) require.NoError(s.T(), err) - assert.True(s.T(), ok, fmt.Sprintf("signature %d share doesn't verify under the public key share", i+1)) + assert.True(s.T(), ok, fmt.Sprintf("signature %d share doesn't verify under the public key share", i)) } // shuffle the signatures and indices before constructing the group diff --git a/integration/dkg/dkg_whiteboard_client.go b/integration/dkg/dkg_whiteboard_client.go index 1dc4213029e..9928f1bfaf5 100644 --- a/integration/dkg/dkg_whiteboard_client.go +++ b/integration/dkg/dkg_whiteboard_client.go @@ -3,7 +3,10 @@ package dkg import ( "sync" - "github.com/onflow/flow-go/crypto" + "github.com/onflow/flow-go/module" + + "github.com/onflow/crypto" + "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/model/messages" ) @@ -16,6 +19,8 @@ type WhiteboardClient struct { whiteboard *whiteboard } +var _ module.DKGContractClient = (*WhiteboardClient)(nil) + // NewWhiteboardClient instantiates a new WhiteboardClient with a reference to // an existing whiteboard object. func NewWhiteboardClient(nodeID flow.Identifier, whiteboard *whiteboard) *WhiteboardClient { @@ -43,10 +48,17 @@ func (wc *WhiteboardClient) ReadBroadcast(fromIndex uint, referenceBlock flow.Id return msgs, nil } -// SubmitResult implements the DKGContractClient interface. It publishes the +// SubmitParametersAndResult implements the DKGContractClient interface. It publishes the // DKG results under the node's ID. -func (wc *WhiteboardClient) SubmitResult(groupKey crypto.PublicKey, pubKeys []crypto.PublicKey) error { - wc.whiteboard.submit(wc.nodeID, groupKey, pubKeys) +func (wc *WhiteboardClient) SubmitParametersAndResult(indexMap flow.DKGIndexMap, groupKey crypto.PublicKey, pubKeys []crypto.PublicKey) error { + wc.whiteboard.submit(wc.nodeID, groupKey, pubKeys, indexMap) + return nil +} + +// SubmitEmptyResult implements the DKGContractClient interface. It publishes the +// empty DKG result under the node's ID. +func (wc *WhiteboardClient) SubmitEmptyResult() error { + wc.whiteboard.submit(wc.nodeID, nil, nil, nil) return nil } @@ -67,6 +79,7 @@ type whiteboard struct { type result struct { groupKey crypto.PublicKey pubKeys []crypto.PublicKey + indexMap flow.DKGIndexMap } // Fingerprint implements the Fingerprinter interface used by MakeID @@ -98,11 +111,16 @@ func (w *whiteboard) read(fromIndex uint) []messages.BroadcastDKGMessage { return w.messages[fromIndex:] } -func (w *whiteboard) submit(nodeID flow.Identifier, groupKey crypto.PublicKey, pubKeys []crypto.PublicKey) { +func (w *whiteboard) submit( + nodeID flow.Identifier, + groupKey crypto.PublicKey, + pubKeys []crypto.PublicKey, + indexMap flow.DKGIndexMap, +) { w.Lock() defer w.Unlock() - result := result{groupKey: groupKey, pubKeys: pubKeys} + result := result{groupKey: groupKey, pubKeys: pubKeys, indexMap: indexMap} resultHash := flow.MakeID(result) w.results[resultHash] = result diff --git a/integration/dkg/dkg_whiteboard_test.go b/integration/dkg/dkg_whiteboard_test.go index a7b00fa1172..c2e6230bec8 100644 --- a/integration/dkg/dkg_whiteboard_test.go +++ b/integration/dkg/dkg_whiteboard_test.go @@ -6,18 +6,21 @@ import ( "testing" "time" + "golang.org/x/exp/slices" + "github.com/rs/zerolog" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "github.com/onflow/flow-go/module" - "github.com/onflow/flow-go/module/metrics" + "github.com/onflow/crypto" - "github.com/onflow/flow-go/crypto" dkgeng "github.com/onflow/flow-go/engine/consensus/dkg" "github.com/onflow/flow-go/engine/testutil" + "github.com/onflow/flow-go/model/bootstrap" "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module" "github.com/onflow/flow-go/module/dkg" + "github.com/onflow/flow-go/module/metrics" msig "github.com/onflow/flow-go/module/signature" "github.com/onflow/flow-go/network/stub" "github.com/onflow/flow-go/state/protocol/events/gadgets" @@ -35,14 +38,19 @@ func createNodes( hub *stub.Hub, chainID flow.ChainID, whiteboard *whiteboard, - conIdentities flow.IdentityList, + conIdentities []bootstrap.NodeInfo, currentEpochSetup flow.EpochSetup, nextEpochSetup flow.EpochSetup, - firstBlock *flow.Header) ([]*node, flow.IdentityList) { + firstBlock *flow.Header) []*node { + + identities := make(flow.IdentityList, 0, len(conIdentities)) + for _, identity := range conIdentities { + identities = append(identities, identity.Identity()) + } // We need to initialise the nodes with a list of identities that contain // all roles, otherwise there would be an error initialising the first epoch - identities := unittest.CompleteIdentitySet(conIdentities...) + identities = unittest.CompleteIdentitySet(identities...) nodes := []*node{} for _, id := range conIdentities { @@ -57,14 +65,14 @@ func createNodes( firstBlock)) } - return nodes, conIdentities + return nodes } // createNode instantiates a node with a network hub, a whiteboard reference, // and a pre-set EpochSetup that will be used to trigger the next DKG run. func createNode( t *testing.T, - id *flow.Identity, + id bootstrap.NodeInfo, ids []*flow.Identity, hub *stub.Hub, chainID flow.ChainID, @@ -83,30 +91,29 @@ func createNode( // keyKeys is used to store the private key resulting from the node's // participation in the DKG run - dkgState, err := badger.NewDKGState(core.Metrics, core.SecretsDB) + dkgState, err := badger.NewRecoverableRandomBeaconStateMachine(core.Metrics, core.SecretsDB, core.Me.NodeID()) require.NoError(t, err) - // configure the state snapthost at firstBlock to return the desired + // configure the state snapshot at firstBlock to return the desired // Epochs - currentEpoch := new(protocolmock.Epoch) - currentEpoch.On("Counter").Return(currentSetup.Counter, nil) - currentEpoch.On("InitialIdentities").Return(currentSetup.Participants, nil) - currentEpoch.On("DKGPhase1FinalView").Return(currentSetup.DKGPhase1FinalView, nil) - currentEpoch.On("DKGPhase2FinalView").Return(currentSetup.DKGPhase2FinalView, nil) - currentEpoch.On("DKGPhase3FinalView").Return(currentSetup.DKGPhase3FinalView, nil) - currentEpoch.On("RandomSource").Return(nextSetup.RandomSource, nil) - - nextEpoch := new(protocolmock.Epoch) - nextEpoch.On("Counter").Return(nextSetup.Counter, nil) - nextEpoch.On("InitialIdentities").Return(nextSetup.Participants, nil) - nextEpoch.On("RandomSource").Return(nextSetup.RandomSource, nil) + currentEpoch := new(protocolmock.CommittedEpoch) + currentEpoch.On("Counter").Return(currentSetup.Counter) + currentEpoch.On("InitialIdentities").Return(currentSetup.Participants) + currentEpoch.On("DKGPhase1FinalView").Return(currentSetup.DKGPhase1FinalView) + currentEpoch.On("DKGPhase2FinalView").Return(currentSetup.DKGPhase2FinalView) + currentEpoch.On("DKGPhase3FinalView").Return(currentSetup.DKGPhase3FinalView) + currentEpoch.On("RandomSource").Return(nextSetup.RandomSource) + + nextEpoch := new(protocolmock.TentativeEpoch) + nextEpoch.On("Counter").Return(nextSetup.Counter) + nextEpoch.On("InitialIdentities").Return(nextSetup.Participants) epochQuery := mocks.NewEpochQuery(t, currentSetup.Counter) - epochQuery.Add(currentEpoch) - epochQuery.Add(nextEpoch) + epochQuery.AddCommitted(currentEpoch) + epochQuery.AddTentative(nextEpoch) snapshot := new(protocolmock.Snapshot) snapshot.On("Epochs").Return(epochQuery) - snapshot.On("Phase").Return(flow.EpochPhaseStaking, nil) + snapshot.On("EpochPhase").Return(flow.EpochPhaseStaking, nil) snapshot.On("Head").Return(firstBlock, nil) state := new(protocolmock.ParticipantState) state.On("AtBlockID", firstBlock.ID()).Return(snapshot) @@ -138,12 +145,6 @@ func createNode( }) controllerFactoryLogger := zerolog.New(os.Stdout).Hook(hook) - // create a config with no delays for tests - config := dkg.ControllerConfig{ - BaseStartDelay: 0, - BaseHandleFirstBroadcastDelay: 0, - } - // the reactor engine reacts to new views being finalized and drives the // DKG protocol reactorEngine := dkgeng.NewReactorEngine( @@ -156,7 +157,6 @@ func createNode( core.Me, []module.DKGContractClient{NewWhiteboardClient(id.NodeID, whiteboard)}, brokerTunnel, - config, ), viewsObserver, ) @@ -164,13 +164,10 @@ func createNode( // reactorEngine consumes the EpochSetupPhaseStarted event core.ProtocolEvents.AddConsumer(reactorEngine) - safeBeaconKeys := badger.NewSafeBeaconPrivateKeys(dkgState) - node := node{ t: t, GenericNode: core, dkgState: dkgState, - safeBeaconKeys: safeBeaconKeys, messagingEngine: messagingEngine, reactorEngine: reactorEngine, } @@ -194,7 +191,14 @@ func TestWithWhiteboard(t *testing.T) { // we run the DKG protocol with N consensus nodes N := 10 - conIdentities := unittest.IdentityListFixture(N, unittest.WithRole(flow.RoleConsensus)) + bootstrapNodesInfo := unittest.PrivateNodeInfosFixture(N, unittest.WithRole(flow.RoleConsensus)) + slices.SortFunc(bootstrapNodesInfo, func(lhs, rhs bootstrap.NodeInfo) int { + return flow.IdentifierCanonical(lhs.NodeID, rhs.NodeID) + }) + conIdentities := make(flow.IdentitySkeletonList, 0, len(bootstrapNodesInfo)) + for _, identity := range bootstrapNodesInfo { + conIdentities = append(conIdentities, &identity.Identity().IdentitySkeleton) + } // The EpochSetup event is received at view 100. The phase transitions are // at views 150, 200, and 250. In between phase transitions, the controller @@ -214,7 +218,11 @@ func TestWithWhiteboard(t *testing.T) { blocks := make(map[uint64]*flow.Header) var view uint64 for view = 100; view <= 250; view += dkgeng.DefaultPollStep { - blocks[view] = &flow.Header{View: view} + blocks[view] = &flow.Header{ + HeaderBody: flow.HeaderBody{ + View: view, + }, + } } firstBlock := blocks[100] @@ -227,27 +235,28 @@ func TestWithWhiteboard(t *testing.T) { DKGPhase2FinalView: 200, DKGPhase3FinalView: 250, FinalView: 300, - Participants: conIdentities, - RandomSource: []byte("random bytes for seed"), + Participants: conIdentities.ToSkeleton(), + RandomSource: unittest.EpochSetupRandomSourceFixture(), } // create the EpochSetup that will trigger the next DKG run with all the // desired parameters nextEpochSetup := flow.EpochSetup{ Counter: currentCounter + 1, - Participants: conIdentities, - RandomSource: []byte("random bytes for seed"), + Participants: conIdentities.ToSkeleton(), + RandomSource: unittest.EpochSetupRandomSourceFixture(), } - nodes, _ := createNodes( + nodes := createNodes( t, hub, chainID, whiteboard, - conIdentities, + bootstrapNodesInfo, currentEpochSetup, nextEpochSetup, - firstBlock) + firstBlock, + ) for _, node := range nodes { node.Start() @@ -290,9 +299,7 @@ func TestWithWhiteboard(t *testing.T) { signatures := []crypto.Signature{} indices := []int{} for i, n := range nodes { - - // TODO: to replace with safeBeaconKeys - beaconKey, err := n.dkgState.RetrieveMyBeaconPrivateKey(nextEpochSetup.Counter) + beaconKey, err := n.dkgState.UnsafeRetrieveMyBeaconPrivateKey(nextEpochSetup.Counter) require.NoError(t, err) signature, err := beaconKey.Sign(sigData, hasher) diff --git a/integration/dkg/node.go b/integration/dkg/node.go index acd288e53dd..62197239685 100644 --- a/integration/dkg/node.go +++ b/integration/dkg/node.go @@ -4,10 +4,9 @@ import ( "crypto" "testing" - "github.com/stretchr/testify/require" - sdk "github.com/onflow/flow-go-sdk" sdkcrypto "github.com/onflow/flow-go-sdk/crypto" + "github.com/onflow/flow-go/engine/consensus/dkg" testmock "github.com/onflow/flow-go/engine/testutil/mock" "github.com/onflow/flow-go/model/bootstrap" @@ -19,7 +18,7 @@ import ( ) type nodeAccount struct { - netID *flow.Identity + netID bootstrap.NodeInfo privKey crypto.PrivateKey accountKey *sdk.AccountKey accountID string @@ -36,7 +35,6 @@ type node struct { account *nodeAccount dkgContractClient *DKGClientWrapper dkgState storage.DKGState - safeBeaconKeys storage.SafeBeaconKeys messagingEngine *dkg.MessagingEngine reactorEngine *dkg.ReactorEngine } @@ -54,39 +52,34 @@ func (n *node) Ready() <-chan struct{} { } func (n *node) Done() <-chan struct{} { - require.NoError(n.t, n.PublicDB.Close()) - require.NoError(n.t, n.SecretsDB.Close()) return util.AllDone(n.messagingEngine, n.reactorEngine) } -// setEpochs configures the mock state snapthost at firstBlock to return the -// desired current and next epochs +// setEpochs configures the mock state snapshot at firstBlock to return the +// desired current and next epochs. +// The next epoch is set up as tentative, since this helper is only used by the DKG emulator test +// and DKG events occur during the `flow.EpochPhaseSetup` phase before the next epoch is committed. func (n *node) setEpochs(t *testing.T, currentSetup flow.EpochSetup, nextSetup flow.EpochSetup, firstBlock *flow.Header) { + currentEpoch := new(protocolmock.CommittedEpoch) + currentEpoch.On("Counter").Return(currentSetup.Counter) + currentEpoch.On("InitialIdentities").Return(currentSetup.Participants) + currentEpoch.On("DKGPhase1FinalView").Return(currentSetup.DKGPhase1FinalView) + currentEpoch.On("DKGPhase2FinalView").Return(currentSetup.DKGPhase2FinalView) + currentEpoch.On("DKGPhase3FinalView").Return(currentSetup.DKGPhase3FinalView) + currentEpoch.On("FinalView").Return(currentSetup.FinalView) + currentEpoch.On("FirstView").Return(currentSetup.FirstView) + currentEpoch.On("RandomSource").Return(nextSetup.RandomSource) - currentEpoch := new(protocolmock.Epoch) - currentEpoch.On("Counter").Return(currentSetup.Counter, nil) - currentEpoch.On("InitialIdentities").Return(currentSetup.Participants, nil) - currentEpoch.On("DKGPhase1FinalView").Return(currentSetup.DKGPhase1FinalView, nil) - currentEpoch.On("DKGPhase2FinalView").Return(currentSetup.DKGPhase2FinalView, nil) - currentEpoch.On("DKGPhase3FinalView").Return(currentSetup.DKGPhase3FinalView, nil) - currentEpoch.On("FinalView").Return(currentSetup.FinalView, nil) - currentEpoch.On("FirstView").Return(currentSetup.FirstView, nil) - currentEpoch.On("RandomSource").Return(nextSetup.RandomSource, nil) - - nextEpoch := new(protocolmock.Epoch) - nextEpoch.On("Counter").Return(nextSetup.Counter, nil) - nextEpoch.On("InitialIdentities").Return(nextSetup.Participants, nil) - nextEpoch.On("RandomSource").Return(nextSetup.RandomSource, nil) - nextEpoch.On("DKG").Return(nil, nil) // no error means didn't run into EECC - nextEpoch.On("FirstView").Return(nextSetup.FirstView, nil) - nextEpoch.On("FinalView").Return(nextSetup.FinalView, nil) + nextEpoch := new(protocolmock.TentativeEpoch) + nextEpoch.On("Counter").Return(nextSetup.Counter) + nextEpoch.On("InitialIdentities").Return(nextSetup.Participants) epochQuery := mocks.NewEpochQuery(t, currentSetup.Counter) - epochQuery.Add(currentEpoch) - epochQuery.Add(nextEpoch) + epochQuery.AddCommitted(currentEpoch) + epochQuery.AddTentative(nextEpoch) snapshot := new(protocolmock.Snapshot) snapshot.On("Epochs").Return(epochQuery) - snapshot.On("Phase").Return(flow.EpochPhaseStaking, nil) + snapshot.On("EpochPhase").Return(flow.EpochPhaseStaking, nil) snapshot.On("Head").Return(firstBlock, nil) state := new(protocolmock.ParticipantState) state.On("AtBlockID", firstBlock.ID()).Return(snapshot) diff --git a/integration/epochs/cluster_epoch_test.go b/integration/epochs/cluster_epoch_test.go index fd2deb10606..44055500f40 100644 --- a/integration/epochs/cluster_epoch_test.go +++ b/integration/epochs/cluster_epoch_test.go @@ -3,20 +3,22 @@ package epochs import ( "encoding/hex" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" + "github.com/onflow/cadence" jsoncdc "github.com/onflow/cadence/encoding/json" + "github.com/onflow/crypto" "github.com/onflow/flow-core-contracts/lib/go/contracts" "github.com/onflow/flow-core-contracts/lib/go/templates" - emulator "github.com/onflow/flow-emulator" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - "github.com/stretchr/testify/suite" sdk "github.com/onflow/flow-go-sdk" sdkcrypto "github.com/onflow/flow-go-sdk/crypto" sdktemplates "github.com/onflow/flow-go-sdk/templates" "github.com/onflow/flow-go-sdk/test" - "github.com/onflow/flow-go/crypto" + + emulator "github.com/onflow/flow-go/integration/internal/emulator" "github.com/onflow/flow-go/integration/utils" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/model/flow/factory" @@ -28,10 +30,10 @@ import ( type Suite struct { suite.Suite - env templates.Environment - blockchain *emulator.Blockchain - emulatorClient *utils.EmulatorClient - + env templates.Environment + blockchain *emulator.Blockchain + emulatorClient *utils.EmulatorClient + serviceAccountAddress sdk.Address // Quorum Certificate deployed account and address qcAddress sdk.Address qcAccountKey *sdk.AccountKey @@ -43,10 +45,12 @@ func (s *Suite) SetupTest() { // create a new instance of the emulated blockchain var err error - s.blockchain, err = emulator.NewBlockchain(emulator.WithStorageLimitEnabled(false)) + s.blockchain, err = emulator.New( + emulator.WithStorageLimitEnabled(false), + ) s.Require().NoError(err) s.emulatorClient = utils.NewEmulatorClient(s.blockchain) - + s.serviceAccountAddress = sdk.Address(s.blockchain.ServiceKey().Address) // deploy epoch qc contract s.deployEpochQCContract() } @@ -60,7 +64,7 @@ func (s *Suite) deployEpochQCContract() { QCCode := contracts.FlowQC() // deploy the contract to the emulator - QCAddress, err := s.blockchain.CreateAccount([]*sdk.AccountKey{QCAccountKey}, []sdktemplates.Contract{ + QCAddress, err := s.emulatorClient.CreateAccount([]*sdk.AccountKey{QCAccountKey}, []sdktemplates.Contract{ { Name: "FlowClusterQC", Source: string(QCCode), @@ -78,10 +82,10 @@ func (s *Suite) deployEpochQCContract() { } // CreateClusterList creates a clustering with the nodes split evenly and returns the resulting `ClusterList` -func (s *Suite) CreateClusterList(clusterCount, nodesPerCluster int) (flow.ClusterList, flow.IdentityList) { +func (s *Suite) CreateClusterList(clusterCount, nodesPerCluster int) (flow.ClusterList, flow.IdentitySkeletonList) { // create list of nodes to be used for the clustering - nodes := unittest.IdentityListFixture(clusterCount*nodesPerCluster, unittest.WithRole(flow.RoleCollection)) + nodes := unittest.IdentityListFixture(clusterCount*nodesPerCluster, unittest.WithRole(flow.RoleCollection)).ToSkeleton() // create cluster assignment clusterAssignment := unittest.ClusterAssignment(uint(clusterCount), nodes) @@ -98,17 +102,17 @@ func (s *Suite) PublishVoter() { // sign and publish voter transaction publishVoterTx := sdk.NewTransaction(). SetScript(templates.GeneratePublishVoterScript(s.env)). - SetGasLimit(9999). - SetProposalKey(s.blockchain.ServiceKey().Address, + SetComputeLimit(9999). + SetProposalKey(s.serviceAccountAddress, s.blockchain.ServiceKey().Index, s.blockchain.ServiceKey().SequenceNumber). - SetPayer(s.blockchain.ServiceKey().Address). + SetPayer(s.serviceAccountAddress). AddAuthorizer(s.qcAddress) signer, err := s.blockchain.ServiceKey().Signer() require.NoError(s.T(), err) s.SignAndSubmit(publishVoterTx, - []sdk.Address{s.blockchain.ServiceKey().Address, s.qcAddress}, + []sdk.Address{s.serviceAccountAddress, s.qcAddress}, []sdkcrypto.Signer{signer, s.qcSigner}) } @@ -118,10 +122,10 @@ func (s *Suite) StartVoting(clustering flow.ClusterList, clusterCount, nodesPerC // submit admin transaction to start voting startVotingTx := sdk.NewTransaction(). SetScript(templates.GenerateStartVotingScript(s.env)). - SetGasLimit(9999). - SetProposalKey(s.blockchain.ServiceKey().Address, + SetComputeLimit(9999). + SetProposalKey(s.serviceAccountAddress, s.blockchain.ServiceKey().Index, s.blockchain.ServiceKey().SequenceNumber). - SetPayer(s.blockchain.ServiceKey().Address). + SetPayer(s.serviceAccountAddress). AddAuthorizer(s.qcAddress) clusterIndices := make([]cadence.Value, 0, clusterCount) @@ -142,14 +146,14 @@ func (s *Suite) StartVoting(clustering flow.ClusterList, clusterCount, nodesPerC cdcNodeID, err := cadence.NewString(node.NodeID.String()) require.NoError(s.T(), err) nodeIDs = append(nodeIDs, cdcNodeID) - nodeWeights = append(nodeWeights, cadence.NewUInt64(node.Weight)) + nodeWeights = append(nodeWeights, cadence.NewUInt64(node.InitialWeight)) } clusterNodeIDs[index] = cadence.NewArray(nodeIDs) clusterNodeWeights[index] = cadence.NewArray(nodeWeights) } - // add cluster indicies to tx argument + // add cluster indices to tx argument err := startVotingTx.AddArgument(cadence.NewArray(clusterIndices)) require.NoError(s.T(), err) @@ -165,7 +169,7 @@ func (s *Suite) StartVoting(clustering flow.ClusterList, clusterCount, nodesPerC require.NoError(s.T(), err) s.SignAndSubmit(startVotingTx, - []sdk.Address{s.blockchain.ServiceKey().Address, s.qcAddress}, + []sdk.Address{s.serviceAccountAddress, s.qcAddress}, []sdkcrypto.Signer{signer, s.qcSigner}) } @@ -174,10 +178,10 @@ func (s *Suite) CreateVoterResource(address sdk.Address, nodeID flow.Identifier, registerVoterTx := sdk.NewTransaction(). SetScript(templates.GenerateCreateVoterScript(s.env)). - SetGasLimit(9999). - SetProposalKey(s.blockchain.ServiceKey().Address, + SetComputeLimit(9999). + SetProposalKey(s.serviceAccountAddress, s.blockchain.ServiceKey().Index, s.blockchain.ServiceKey().SequenceNumber). - SetPayer(s.blockchain.ServiceKey().Address). + SetPayer(s.serviceAccountAddress). AddAuthorizer(address) err := registerVoterTx.AddArgument(cadence.NewAddress(s.qcAddress)) @@ -197,24 +201,24 @@ func (s *Suite) CreateVoterResource(address sdk.Address, nodeID flow.Identifier, require.NoError(s.T(), err) s.SignAndSubmit(registerVoterTx, - []sdk.Address{s.blockchain.ServiceKey().Address, address}, + []sdk.Address{s.serviceAccountAddress, address}, []sdkcrypto.Signer{signer, nodeSigner}) } func (s *Suite) StopVoting() { tx := sdk.NewTransaction(). SetScript(templates.GenerateStopVotingScript(s.env)). - SetGasLimit(9999). - SetProposalKey(s.blockchain.ServiceKey().Address, + SetComputeLimit(9999). + SetProposalKey(s.serviceAccountAddress, s.blockchain.ServiceKey().Index, s.blockchain.ServiceKey().SequenceNumber). - SetPayer(s.blockchain.ServiceKey().Address). + SetPayer(s.serviceAccountAddress). AddAuthorizer(s.qcAddress) signer, err := s.blockchain.ServiceKey().Signer() require.NoError(s.T(), err) s.SignAndSubmit(tx, - []sdk.Address{s.blockchain.ServiceKey().Address, s.qcAddress}, + []sdk.Address{s.serviceAccountAddress, s.qcAddress}, []sdkcrypto.Signer{signer, s.qcSigner}) } @@ -227,7 +231,7 @@ func (s *Suite) NodeHasVoted(nodeID flow.Identifier) bool { return false } - return result.Value.ToGoValue().(bool) + return bool(result.Value.(cadence.Bool)) } /** diff --git a/integration/epochs/epoch_qc_test.go b/integration/epochs/epoch_qc_test.go index b8912cdb644..afc1a34b9e0 100644 --- a/integration/epochs/epoch_qc_test.go +++ b/integration/epochs/epoch_qc_test.go @@ -9,6 +9,7 @@ import ( "github.com/rs/zerolog" "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" sdk "github.com/onflow/flow-go-sdk" @@ -46,7 +47,7 @@ func (s *Suite) TestEpochQuorumCertificate() { clustering, nodes := s.CreateClusterList(clusterCount, nodesPerCluster) // mock the epoch object to return counter 0 and clustering as our clusterList - epoch := &protomock.Epoch{} + epoch := &protomock.TentativeEpoch{} epoch.On("Counter").Return(epochCounter, nil) epoch.On("Clustering").Return(clustering, nil) @@ -65,15 +66,25 @@ func (s *Suite) TestEpochQuorumCertificate() { // find cluster and create root block cluster, _, _ := clustering.ByNodeID(node.NodeID) - rootBlock := clusterstate.CanonicalRootBlock(uint64(epochCounter), cluster) + rootBlock, err := clusterstate.CanonicalRootBlock(uint64(epochCounter), cluster) + s.Require().NoError(err) key, signer := test.AccountKeyGenerator().NewWithSigner() // create account on emualted chain - address, err := s.blockchain.CreateAccount([]*sdk.AccountKey{key}, []sdktemplates.Contract{}) + address, err := s.emulatorClient.CreateAccount([]*sdk.AccountKey{key}, []sdktemplates.Contract{}) s.Require().NoError(err) - client := epochs.NewQCContractClient(zerolog.Nop(), s.emulatorClient, flow.ZeroID, nodeID, address.String(), 0, s.qcAddress.String(), signer) + client := epochs.NewQCContractClient( + zerolog.Nop(), + s.emulatorClient, + flow.ZeroID, + nodeID, + address.String(), + 0, + s.qcAddress.String(), + signer, + ) s.Require().NoError(err) local := &modulemock.Local{} @@ -84,12 +95,19 @@ func (s *Suite) TestEpochQuorumCertificate() { signature, err := stakingPrivKey.Sign(voteMessage, hasher) s.Require().NoError(err) - vote := hotstuffmodel.VoteFromFlow(nodeID, blockID, view, signature) + vote, err := hotstuffmodel.NewVote(hotstuffmodel.UntrustedVote{ + View: view, + BlockID: blockID, + SignerID: nodeID, + SigData: signature, + }) + require.NoError(s.T(), err) + hotSigner := &hotstuff.Signer{} hotSigner.On("CreateVote", mock.Anything).Return(vote, nil) snapshot := &protomock.Snapshot{} - snapshot.On("Phase").Return(flow.EpochPhaseSetup, nil) + snapshot.On("EpochPhase").Return(flow.EpochPhaseSetup, nil) state := &protomock.State{} state.On("CanonicalRootBlock").Return(rootBlock) diff --git a/integration/go.mod b/integration/go.mod index 478283c6530..ea2c0737936 100644 --- a/integration/go.mod +++ b/integration/go.mod @@ -1,327 +1,394 @@ module github.com/onflow/flow-go/integration -go 1.19 +go 1.25.0 require ( - cloud.google.com/go/bigquery v1.48.0 + cloud.google.com/go/bigquery v1.69.0 github.com/VividCortex/ewma v1.2.0 + github.com/btcsuite/btcd/chaincfg/chainhash v1.0.2 + github.com/cockroachdb/pebble/v2 v2.0.6 github.com/coreos/go-semver v0.3.0 - github.com/dapperlabs/testingdock v0.4.4 - github.com/dgraph-io/badger/v2 v2.2007.4 - github.com/docker/docker v1.4.2-0.20190513124817-8c8457b0f2f8 + github.com/dapperlabs/testingdock v0.4.5-0.20231020233342-a2853fe18724 + github.com/docker/docker v24.0.6+incompatible github.com/docker/go-connections v0.4.0 - github.com/go-git/go-git/v5 v5.5.2 + github.com/ethereum/go-ethereum v1.16.3 + github.com/go-git/go-git/v5 v5.11.0 github.com/go-yaml/yaml v2.1.0+incompatible - github.com/ipfs/go-blockservice v0.4.0 - github.com/ipfs/go-cid v0.3.2 - github.com/ipfs/go-datastore v0.6.0 - github.com/ipfs/go-ds-badger2 v0.1.3 - github.com/ipfs/go-ipfs-blockstore v1.2.0 - github.com/onflow/cadence v0.38.1 - github.com/onflow/flow-core-contracts/lib/go/contracts v1.2.3 - github.com/onflow/flow-core-contracts/lib/go/templates v1.2.3 - github.com/onflow/flow-emulator v0.48.1-0.20230502171545-1c91ebbf6870 - github.com/onflow/flow-go v0.30.1-0.20230501182206-6a911be58b92 - github.com/onflow/flow-go-sdk v0.40.0 - github.com/onflow/flow-go/crypto v0.24.7 + github.com/gorilla/websocket v1.5.3 + github.com/ipfs/boxo v0.27.2 + github.com/ipfs/go-cid v0.4.1 + github.com/ipfs/go-datastore v0.8.2 + github.com/ipfs/go-ds-pebble v0.5.0 + github.com/libp2p/go-libp2p v0.38.2 + github.com/onflow/cadence v1.7.1 + github.com/onflow/crypto v0.25.3 + github.com/onflow/flow-core-contracts/lib/go/contracts v1.9.0 + github.com/onflow/flow-core-contracts/lib/go/templates v1.9.0 + github.com/onflow/flow-go v0.38.0-preview.0.0.20241021221952-af9cd6e99de1 + github.com/onflow/flow-go-sdk v1.8.4 github.com/onflow/flow-go/insecure v0.0.0-00010101000000-000000000000 - github.com/onflow/flow/protobuf/go/flow v0.3.2-0.20230428213521-89bcc9e8517e - github.com/plus3it/gorecurcopy v0.0.1 - github.com/prometheus/client_golang v1.14.0 + github.com/onflow/flow/protobuf/go/flow v0.4.16 + github.com/prometheus/client_golang v1.20.5 + github.com/prometheus/client_model v0.6.1 + github.com/prometheus/common v0.61.0 + github.com/psiemens/graceland v1.0.0 github.com/rs/zerolog v1.29.0 - github.com/stretchr/testify v1.8.2 + github.com/stretchr/testify v1.11.1 go.einride.tech/pid v0.1.0 - go.uber.org/atomic v1.10.0 - golang.org/x/exp v0.0.0-20221217163422-3c43f8badb15 - golang.org/x/sync v0.1.0 - google.golang.org/grpc v1.53.0 - google.golang.org/protobuf v1.30.0 + go.uber.org/atomic v1.11.0 + go.uber.org/mock v0.5.0 + golang.org/x/exp v0.0.0-20241217172543-b2144cdd0a67 + golang.org/x/sync v0.16.0 + google.golang.org/grpc v1.75.1 + google.golang.org/protobuf v1.36.9 + gopkg.in/yaml.v3 v3.0.1 ) require ( - cloud.google.com/go v0.110.0 // indirect - cloud.google.com/go/compute v1.18.0 // indirect - cloud.google.com/go/compute/metadata v0.2.3 // indirect - cloud.google.com/go/iam v0.12.0 // indirect - cloud.google.com/go/storage v1.28.1 // indirect - github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78 // indirect - github.com/Microsoft/go-winio v0.5.2 // indirect - github.com/Microsoft/hcsshim v0.8.7 // indirect - github.com/OneOfOne/xxhash v1.2.5 // indirect - github.com/ProtonMail/go-crypto v0.0.0-20221026131551-cf6655e29de4 // indirect - github.com/acomagu/bufpipe v1.0.3 // indirect - github.com/andybalholm/brotli v1.0.4 // indirect - github.com/apache/arrow/go/v10 v10.0.1 // indirect - github.com/apache/thrift v0.16.0 // indirect - github.com/aws/aws-sdk-go-v2 v1.17.7 // indirect - github.com/aws/aws-sdk-go-v2/config v1.18.19 // indirect - github.com/aws/aws-sdk-go-v2/credentials v1.13.18 // indirect - github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.13.1 // indirect + cel.dev/expr v0.24.0 // indirect + cloud.google.com/go v0.121.0 // indirect + cloud.google.com/go/auth v0.16.4 // indirect + cloud.google.com/go/auth/oauth2adapt v0.2.8 // indirect + cloud.google.com/go/compute/metadata v0.8.0 // indirect + cloud.google.com/go/iam v1.5.2 // indirect + cloud.google.com/go/monitoring v1.24.2 // indirect + cloud.google.com/go/storage v1.53.0 // indirect + dario.cat/mergo v1.0.0 // indirect + github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect + github.com/DataDog/zstd v1.5.6-0.20230824185856-869dae002e5e // indirect + github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.29.0 // indirect + github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.51.0 // indirect + github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.51.0 // indirect + github.com/Jorropo/jsync v1.0.1 // indirect + github.com/Microsoft/go-winio v0.6.2 // indirect + github.com/ProtonMail/go-crypto v0.0.0-20230828082145-3c4c8a2d2371 // indirect + github.com/SaveTheRbtz/mph v0.1.1-0.20240117162131-4166ec7869bc // indirect + github.com/StackExchange/wmi v1.2.1 // indirect + github.com/VictoriaMetrics/fastcache v1.12.2 // indirect + github.com/apache/arrow/go/v15 v15.0.2 // indirect + github.com/aws/aws-sdk-go-v2 v1.39.1 // indirect + github.com/aws/aws-sdk-go-v2/config v1.31.9 // indirect + github.com/aws/aws-sdk-go-v2/credentials v1.18.13 // indirect + github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.7 // indirect github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.5.1 // indirect - github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.31 // indirect - github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.25 // indirect - github.com/aws/aws-sdk-go-v2/internal/ini v1.3.32 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.3.0 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.25 // indirect + github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.8 // indirect + github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.8 // indirect + github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.1 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.7 // indirect github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.7.0 // indirect github.com/aws/aws-sdk-go-v2/service/s3 v1.15.0 // indirect - github.com/aws/aws-sdk-go-v2/service/sso v1.12.6 // indirect - github.com/aws/aws-sdk-go-v2/service/ssooidc v1.14.6 // indirect - github.com/aws/aws-sdk-go-v2/service/sts v1.18.7 // indirect - github.com/aws/smithy-go v1.13.5 // indirect - github.com/benbjohnson/clock v1.3.0 // indirect + github.com/aws/aws-sdk-go-v2/service/sso v1.29.3 // indirect + github.com/aws/aws-sdk-go-v2/service/ssooidc v1.34.5 // indirect + github.com/aws/aws-sdk-go-v2/service/sts v1.38.4 // indirect + github.com/aws/smithy-go v1.23.0 // indirect + github.com/benbjohnson/clock v1.3.5 // indirect github.com/beorn7/perks v1.0.1 // indirect - github.com/bits-and-blooms/bitset v1.3.0 // indirect - github.com/btcsuite/btcd/btcec/v2 v2.2.1 // indirect - github.com/cenkalti/backoff v2.2.1+incompatible // indirect - github.com/cenkalti/backoff/v4 v4.1.3 // indirect + github.com/bits-and-blooms/bitset v1.24.0 // indirect + github.com/btcsuite/btcd/btcec/v2 v2.3.4 // indirect + github.com/cenkalti/backoff/v4 v4.3.0 // indirect github.com/cespare/xxhash v1.1.0 // indirect - github.com/cespare/xxhash/v2 v2.2.0 // indirect - github.com/cloudflare/circl v1.1.0 // indirect - github.com/containerd/cgroups v1.0.4 // indirect - github.com/containerd/continuity v0.0.0-20200107194136-26c1120b8d41 // indirect - github.com/containerd/fifo v0.0.0-20191213151349-ff969a566b00 // indirect + github.com/cespare/xxhash/v2 v2.3.0 // indirect + github.com/cloudflare/circl v1.3.3 // indirect + github.com/cncf/xds/go v0.0.0-20250501225837-2ac532fd4443 // indirect + github.com/cockroachdb/crlib v0.0.0-20241015224233-894974b3ad94 // indirect + github.com/cockroachdb/errors v1.11.3 // indirect + github.com/cockroachdb/fifo v0.0.0-20240606204812-0bbfbd93a7ce // indirect + github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b // indirect + github.com/cockroachdb/redact v1.1.5 // indirect + github.com/cockroachdb/swiss v0.0.0-20250624142022-d6e517c1d961 // indirect + github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06 // indirect + github.com/consensys/gnark-crypto v0.18.0 // indirect + github.com/containerd/cgroups v1.1.0 // indirect + github.com/containerd/fifo v1.1.0 // indirect github.com/coreos/go-systemd/v22 v22.5.0 // indirect + github.com/crate-crypto/go-eth-kzg v1.3.0 // indirect + github.com/crate-crypto/go-ipa v0.0.0-20240724233137-53bbb0ceb27a // indirect github.com/cskr/pubsub v1.0.2 // indirect - github.com/davecgh/go-spew v1.1.1 // indirect + github.com/cyphar/filepath-securejoin v0.2.4 // indirect + github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c // indirect - github.com/decred/dcrd/dcrec/secp256k1/v4 v4.1.0 // indirect - github.com/dgraph-io/ristretto v0.0.3 // indirect + github.com/deckarep/golang-set/v2 v2.6.0 // indirect + github.com/decred/dcrd/dcrec/secp256k1/v4 v4.3.0 // indirect + github.com/dgraph-io/badger/v2 v2.2007.4 // indirect + github.com/dgraph-io/ristretto v0.1.0 // indirect github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13 // indirect - github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect - github.com/docker/cli v0.0.0-20191105005515-99c5edceb48d // indirect - github.com/docker/distribution v2.6.0-rc.1.0.20171207180435-f4118485915a+incompatible // indirect - github.com/docker/docker-credential-helpers v0.6.3 // indirect + github.com/distribution/reference v0.5.0 // indirect + github.com/docker/cli v24.0.6+incompatible // indirect + github.com/docker/distribution v2.8.3+incompatible // indirect + github.com/docker/docker-credential-helpers v0.8.0 // indirect github.com/docker/go-metrics v0.0.1 // indirect github.com/docker/go-units v0.5.0 // indirect github.com/dustin/go-humanize v1.0.1 // indirect github.com/ef-ds/deque v1.0.4 // indirect - github.com/elastic/gosigar v0.14.2 // indirect + github.com/elastic/gosigar v0.14.3 // indirect + github.com/emicklei/dot v1.6.2 // indirect github.com/emirpasic/gods v1.18.1 // indirect - github.com/ethereum/go-ethereum v1.10.1 // indirect - github.com/flynn/noise v1.0.0 // indirect + github.com/envoyproxy/go-control-plane/envoy v1.32.4 // indirect + github.com/envoyproxy/protoc-gen-validate v1.2.1 // indirect + github.com/ethereum/c-kzg-4844/v2 v2.1.0 // indirect + github.com/ethereum/go-verkle v0.2.2 // indirect + github.com/felixge/httpsnoop v1.0.4 // indirect + github.com/ferranbt/fastssz v0.1.4 // indirect + github.com/filecoin-project/go-clock v0.1.0 // indirect + github.com/flynn/noise v1.1.0 // indirect github.com/francoispqt/gojay v1.2.13 // indirect - github.com/fsnotify/fsnotify v1.5.4 // indirect - github.com/fxamacker/cbor/v2 v2.4.1-0.20220515183430-ad2eae63303f // indirect + github.com/fsnotify/fsnotify v1.6.0 // indirect + github.com/fxamacker/cbor/v2 v2.8.1-0.20250402194037-6f932b086829 // indirect github.com/fxamacker/circlehash v0.3.0 // indirect - github.com/gammazero/deque v0.1.0 // indirect - github.com/gammazero/workerpool v1.1.2 // indirect - github.com/ghodss/yaml v1.0.0 // indirect - github.com/glebarez/go-sqlite v1.21.1 // indirect - github.com/go-git/gcfg v1.5.0 // indirect - github.com/go-git/go-billy/v5 v5.4.0 // indirect + github.com/fxamacker/golang-lru/v2 v2.0.0-20250716153046-22c8d17dc4ee // indirect + github.com/gabriel-vasile/mimetype v1.4.6 // indirect + github.com/gammazero/deque v1.0.0 // indirect + github.com/gammazero/workerpool v1.1.3 // indirect + github.com/getsentry/sentry-go v0.27.0 // indirect + github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 // indirect + github.com/go-git/go-billy/v5 v5.5.0 // indirect + github.com/go-jose/go-jose/v4 v4.1.1 // indirect github.com/go-kit/kit v0.12.0 // indirect github.com/go-kit/log v0.2.1 // indirect - github.com/go-logfmt/logfmt v0.5.1 // indirect - github.com/go-logr/logr v1.2.3 // indirect + github.com/go-logfmt/logfmt v0.6.0 // indirect + github.com/go-logr/logr v1.4.3 // indirect github.com/go-logr/stdr v1.2.2 // indirect - github.com/go-ole/go-ole v1.2.6 // indirect - github.com/go-redis/redis/v8 v8.11.5 // indirect - github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0 // indirect - github.com/go-test/deep v1.0.8 // indirect - github.com/goccy/go-json v0.9.11 // indirect + github.com/go-ole/go-ole v1.3.0 // indirect + github.com/go-playground/locales v0.14.1 // indirect + github.com/go-playground/universal-translator v0.18.1 // indirect + github.com/go-playground/validator/v10 v10.19.0 // indirect + github.com/go-task/slim-sprig/v3 v3.0.0 // indirect + github.com/goccy/go-json v0.10.4 // indirect github.com/godbus/dbus/v5 v5.1.0 // indirect + github.com/gofrs/flock v0.12.1 // indirect github.com/gogo/protobuf v1.3.2 // indirect - github.com/golang/glog v1.0.0 // indirect + github.com/golang/glog v1.2.5 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect - github.com/golang/mock v1.6.0 // indirect - github.com/golang/protobuf v1.5.2 // indirect - github.com/golang/snappy v0.0.4 // indirect - github.com/google/flatbuffers v2.0.8+incompatible // indirect - github.com/google/go-cmp v0.5.9 // indirect + github.com/golang/protobuf v1.5.4 // indirect + github.com/golang/snappy v0.0.5-0.20231225225746-43d5d4cd4e0e // indirect + github.com/google/flatbuffers v23.5.26+incompatible // indirect github.com/google/gopacket v1.1.19 // indirect - github.com/google/pprof v0.0.0-20221219190121-3cb0bae90811 // indirect - github.com/google/uuid v1.3.0 // indirect - github.com/googleapis/enterprise-certificate-proxy v0.2.3 // indirect - github.com/googleapis/gax-go/v2 v2.7.1 // indirect - github.com/gorilla/websocket v1.5.0 // indirect - github.com/grpc-ecosystem/go-grpc-middleware/providers/zerolog/v2 v2.0.0-rc.2 // indirect - github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.0.0-20200501113911-9a95f0fdbfea // indirect + github.com/google/pprof v0.0.0-20241210010833-40e02aabc2ad // indirect + github.com/google/s2a-go v0.1.9 // indirect + github.com/google/uuid v1.6.0 // indirect + github.com/googleapis/enterprise-certificate-proxy v0.3.6 // indirect + github.com/googleapis/gax-go/v2 v2.15.0 // indirect + github.com/gorilla/mux v1.8.1 // indirect + github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.1.0 // indirect github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 // indirect - github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0 // indirect + github.com/grpc-ecosystem/grpc-gateway/v2 v2.22.0 // indirect github.com/hashicorp/errwrap v1.1.0 // indirect github.com/hashicorp/go-multierror v1.1.1 // indirect - github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d // indirect + github.com/hashicorp/golang-lru v1.0.2 // indirect + github.com/hashicorp/golang-lru/v2 v2.0.7 // indirect github.com/hashicorp/hcl v1.0.0 // indirect - github.com/huin/goupnp v1.0.3 // indirect - github.com/imdario/mergo v0.3.13 // indirect + github.com/holiman/bloomfilter/v2 v2.0.3 // indirect + github.com/holiman/uint256 v1.3.2 // indirect + github.com/huandu/go-clone v1.7.2 // indirect + github.com/huandu/go-clone/generic v1.7.2 // indirect + github.com/huin/goupnp v1.3.0 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/ipfs/bbloom v0.0.4 // indirect - github.com/ipfs/go-block-format v0.0.3 // indirect + github.com/ipfs/go-block-format v0.2.0 // indirect github.com/ipfs/go-cidutil v0.1.0 // indirect - github.com/ipfs/go-fetcher v1.5.0 // indirect github.com/ipfs/go-ipfs-delay v0.0.1 // indirect - github.com/ipfs/go-ipfs-ds-help v1.1.0 // indirect - github.com/ipfs/go-ipfs-exchange-interface v0.2.0 // indirect - github.com/ipfs/go-ipfs-pq v0.0.2 // indirect - github.com/ipfs/go-ipfs-provider v0.7.0 // indirect - github.com/ipfs/go-ipfs-util v0.0.2 // indirect - github.com/ipfs/go-ipld-format v0.3.0 // indirect - github.com/ipfs/go-ipns v0.2.0 // indirect + github.com/ipfs/go-ipfs-pq v0.0.3 // indirect + github.com/ipfs/go-ipfs-util v0.0.3 // indirect + github.com/ipfs/go-ipld-format v0.6.0 // indirect github.com/ipfs/go-log v1.0.5 // indirect github.com/ipfs/go-log/v2 v2.5.1 // indirect github.com/ipfs/go-metrics-interface v0.0.1 // indirect - github.com/ipfs/go-peertaskqueue v0.7.0 // indirect - github.com/ipfs/go-verifcid v0.0.1 // indirect - github.com/ipld/go-ipld-prime v0.14.1 // indirect + github.com/ipfs/go-peertaskqueue v0.8.2 // indirect + github.com/ipld/go-ipld-prime v0.21.0 // indirect github.com/jackpal/go-nat-pmp v1.0.2 // indirect github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 // indirect github.com/jbenet/go-temp-err-catcher v0.1.0 // indirect github.com/jbenet/goprocess v0.1.4 // indirect - github.com/jmespath/go-jmespath v0.4.0 // indirect - github.com/kevinburke/go-bindata v3.23.0+incompatible // indirect + github.com/jordanschalm/lockctx v0.0.0-20250412215529-226f85c10956 // indirect + github.com/k0kubun/pp/v3 v3.5.0 // indirect + github.com/kevinburke/go-bindata v3.24.0+incompatible // indirect github.com/kevinburke/ssh_config v1.2.0 // indirect - github.com/klauspost/asmfmt v1.3.2 // indirect - github.com/klauspost/compress v1.15.13 // indirect - github.com/klauspost/cpuid/v2 v2.2.3 // indirect - github.com/koron/go-ssdp v0.0.3 // indirect + github.com/klauspost/compress v1.17.11 // indirect + github.com/klauspost/cpuid/v2 v2.2.9 // indirect + github.com/koron/go-ssdp v0.0.4 // indirect + github.com/kr/pretty v0.3.1 // indirect + github.com/kr/text v0.2.0 // indirect + github.com/leodido/go-urn v1.4.0 // indirect github.com/libp2p/go-addr-util v0.1.0 // indirect github.com/libp2p/go-buffer-pool v0.1.0 // indirect github.com/libp2p/go-cidranger v1.1.0 // indirect - github.com/libp2p/go-flow-metrics v0.1.0 // indirect - github.com/libp2p/go-libp2p v0.24.2 // indirect - github.com/libp2p/go-libp2p-asn-util v0.2.0 // indirect - github.com/libp2p/go-libp2p-core v0.20.1 // indirect - github.com/libp2p/go-libp2p-kad-dht v0.19.0 // indirect - github.com/libp2p/go-libp2p-kbucket v0.5.0 // indirect - github.com/libp2p/go-libp2p-pubsub v0.8.2 // indirect + github.com/libp2p/go-flow-metrics v0.2.0 // indirect + github.com/libp2p/go-libp2p-asn-util v0.4.1 // indirect + github.com/libp2p/go-libp2p-kad-dht v0.28.2 // indirect + github.com/libp2p/go-libp2p-kbucket v0.6.4 // indirect + github.com/libp2p/go-libp2p-pubsub v0.13.0 // indirect github.com/libp2p/go-libp2p-record v0.2.0 // indirect - github.com/libp2p/go-msgio v0.2.0 // indirect - github.com/libp2p/go-nat v0.1.0 // indirect - github.com/libp2p/go-netroute v0.2.1 // indirect - github.com/libp2p/go-openssl v0.1.0 // indirect - github.com/libp2p/go-reuseport v0.2.0 // indirect - github.com/libp2p/go-yamux/v4 v4.0.0 // indirect - github.com/logrusorgru/aurora v2.0.3+incompatible // indirect - github.com/lucas-clemente/quic-go v0.31.1 // indirect + github.com/libp2p/go-libp2p-routing-helpers v0.7.4 // indirect + github.com/libp2p/go-msgio v0.3.0 // indirect + github.com/libp2p/go-nat v0.2.0 // indirect + github.com/libp2p/go-netroute v0.2.2 // indirect + github.com/libp2p/go-reuseport v0.4.0 // indirect + github.com/libp2p/go-yamux/v4 v4.0.1 // indirect + github.com/logrusorgru/aurora/v4 v4.0.0 // indirect github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 // indirect - github.com/m4ksio/wal v1.0.1-0.20221209164835-154a17396e4c // indirect - github.com/magiconair/properties v1.8.6 // indirect - github.com/marten-seemann/qtls-go1-18 v0.1.3 // indirect - github.com/marten-seemann/qtls-go1-19 v0.1.1 // indirect + github.com/magiconair/properties v1.8.7 // indirect github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd // indirect - github.com/mattn/go-colorable v0.1.13 // indirect - github.com/mattn/go-isatty v0.0.17 // indirect - github.com/mattn/go-pointer v0.0.1 // indirect - github.com/mattn/go-runewidth v0.0.13 // indirect - github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect - github.com/miekg/dns v1.1.50 // indirect + github.com/mattn/go-colorable v0.1.14 // indirect + github.com/mattn/go-isatty v0.0.20 // indirect + github.com/mattn/go-runewidth v0.0.16 // indirect + github.com/miekg/dns v1.1.62 // indirect github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b // indirect github.com/mikioh/tcpopt v0.0.0-20190314235656-172688c1accc // indirect - github.com/minio/asm2plan9s v0.0.0-20200509001527-cdd76441f9d8 // indirect - github.com/minio/c2goasm v0.0.0-20190812172519-36a3d3bbc4f3 // indirect - github.com/minio/sha256-simd v1.0.0 // indirect + github.com/minio/sha256-simd v1.0.1 // indirect github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db // indirect github.com/mitchellh/mapstructure v1.5.0 // indirect + github.com/moby/term v0.5.0 // indirect github.com/morikuni/aec v1.0.0 // indirect github.com/mr-tron/base58 v1.2.0 // indirect github.com/multiformats/go-base32 v0.1.0 // indirect github.com/multiformats/go-base36 v0.2.0 // indirect - github.com/multiformats/go-multiaddr v0.8.0 // indirect - github.com/multiformats/go-multiaddr-dns v0.3.1 // indirect + github.com/multiformats/go-multiaddr v0.14.0 // indirect + github.com/multiformats/go-multiaddr-dns v0.4.1 // indirect github.com/multiformats/go-multiaddr-fmt v0.1.0 // indirect - github.com/multiformats/go-multibase v0.1.1 // indirect - github.com/multiformats/go-multicodec v0.7.0 // indirect - github.com/multiformats/go-multihash v0.2.1 // indirect - github.com/multiformats/go-multistream v0.3.3 // indirect + github.com/multiformats/go-multibase v0.2.0 // indirect + github.com/multiformats/go-multicodec v0.9.0 // indirect + github.com/multiformats/go-multihash v0.2.3 // indirect + github.com/multiformats/go-multistream v0.6.0 // indirect github.com/multiformats/go-varint v0.0.7 // indirect - github.com/onflow/atree v0.5.0 // indirect - github.com/onflow/flow-ft/lib/go/contracts v0.7.0 // indirect - github.com/onflow/go-bitswap v0.0.0-20221017184039-808c5791a8a8 // indirect - github.com/onflow/sdks v0.5.0 // indirect - github.com/onsi/ginkgo/v2 v2.6.1 // indirect - github.com/opencontainers/go-digest v1.0.0-rc1 // indirect - github.com/opencontainers/image-spec v1.0.1 // indirect - github.com/opencontainers/runc v0.1.1 // indirect - github.com/opencontainers/runtime-spec v1.0.2 // indirect + github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect + github.com/olekukonko/tablewriter v0.0.5 // indirect + github.com/onflow/atree v0.10.1 // indirect + github.com/onflow/fixed-point v0.1.1 // indirect + github.com/onflow/flow-evm-bridge v0.1.0 // indirect + github.com/onflow/flow-ft/lib/go/contracts v1.0.1 // indirect + github.com/onflow/flow-ft/lib/go/templates v1.0.1 // indirect + github.com/onflow/flow-nft/lib/go/contracts v1.3.0 // indirect + github.com/onflow/flow-nft/lib/go/templates v1.3.0 // indirect + github.com/onflow/go-ethereum v1.16.2 // indirect + github.com/onflow/nft-storefront/lib/go/contracts v1.0.0 // indirect + github.com/onflow/sdks v0.6.0-preview.1 // indirect + github.com/onflow/wal v1.0.2 // indirect + github.com/onsi/ginkgo/v2 v2.22.0 // indirect + github.com/opencontainers/go-digest v1.0.0 // indirect + github.com/opencontainers/image-spec v1.0.2 // indirect + github.com/opencontainers/runtime-spec v1.2.0 // indirect github.com/opentracing/opentracing-go v1.2.0 // indirect github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 // indirect - github.com/pelletier/go-toml v1.9.5 // indirect - github.com/pelletier/go-toml/v2 v2.0.2 // indirect - github.com/pierrec/lz4 v2.6.1+incompatible // indirect - github.com/pierrec/lz4/v4 v4.1.15 // indirect - github.com/pjbgf/sha1cd v0.2.3 // indirect + github.com/pelletier/go-toml/v2 v2.2.2 // indirect + github.com/pierrec/lz4/v4 v4.1.22 // indirect + github.com/pion/datachannel v1.5.10 // indirect + github.com/pion/dtls/v2 v2.2.12 // indirect + github.com/pion/ice/v2 v2.3.37 // indirect + github.com/pion/interceptor v0.1.37 // indirect + github.com/pion/logging v0.2.2 // indirect + github.com/pion/mdns v0.0.12 // indirect + github.com/pion/randutil v0.1.0 // indirect + github.com/pion/rtcp v1.2.15 // indirect + github.com/pion/rtp v1.8.10 // indirect + github.com/pion/sctp v1.8.35 // indirect + github.com/pion/sdp/v3 v3.0.9 // indirect + github.com/pion/srtp/v2 v2.0.20 // indirect + github.com/pion/stun v0.6.1 // indirect + github.com/pion/stun/v2 v2.0.0 // indirect + github.com/pion/transport/v2 v2.2.10 // indirect + github.com/pion/transport/v3 v3.0.7 // indirect + github.com/pion/turn/v2 v2.1.6 // indirect + github.com/pion/webrtc/v3 v3.3.5 // indirect + github.com/pjbgf/sha1cd v0.3.0 // indirect github.com/pkg/errors v0.9.1 // indirect - github.com/pmezard/go-difflib v1.0.0 // indirect - github.com/polydawn/refmt v0.0.0-20201211092308-30ac6d18308e // indirect + github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 // indirect + github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect + github.com/polydawn/refmt v0.89.0 // indirect github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c // indirect - github.com/prometheus/client_model v0.3.0 // indirect - github.com/prometheus/common v0.39.0 // indirect - github.com/prometheus/procfs v0.9.0 // indirect - github.com/psiemens/graceland v1.0.0 // indirect + github.com/prometheus/procfs v0.15.1 // indirect github.com/psiemens/sconfig v0.1.0 // indirect + github.com/quic-go/qpack v0.5.1 // indirect + github.com/quic-go/quic-go v0.48.2 // indirect + github.com/quic-go/webtransport-go v0.8.1-0.20241018022711-4ac2c9250e66 // indirect github.com/raulk/go-watchdog v1.3.0 // indirect - github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec // indirect - github.com/rivo/uniseg v0.2.1-0.20211004051800-57c86be7915a // indirect - github.com/schollz/progressbar/v3 v3.8.3 // indirect - github.com/sergi/go-diff v1.1.0 // indirect + github.com/rivo/uniseg v0.4.7 // indirect + github.com/rogpeppe/go-internal v1.13.1 // indirect + github.com/rootless-containers/rootlesskit v1.1.1 // indirect + github.com/schollz/progressbar/v3 v3.18.0 // indirect + github.com/sergi/go-diff v1.2.0 // indirect github.com/sethvargo/go-retry v0.2.3 // indirect - github.com/shirou/gopsutil/v3 v3.22.2 // indirect - github.com/sirupsen/logrus v1.8.1 // indirect - github.com/skeema/knownhosts v1.1.0 // indirect - github.com/slok/go-http-metrics v0.10.0 // indirect - github.com/spacemonkeygo/spacelog v0.0.0-20180420211403-2296661a0572 // indirect + github.com/shirou/gopsutil v3.21.4-0.20210419000835-c7a38de76ee5+incompatible // indirect + github.com/shirou/gopsutil/v3 v3.24.5 // indirect + github.com/shoenig/go-m1cpu v0.1.6 // indirect + github.com/sirupsen/logrus v1.9.3 // indirect + github.com/skeema/knownhosts v1.2.1 // indirect + github.com/slok/go-http-metrics v0.12.0 // indirect + github.com/sony/gobreaker v0.5.0 // indirect github.com/spaolacci/murmur3 v1.1.0 // indirect - github.com/spf13/afero v1.9.0 // indirect + github.com/spf13/afero v1.11.0 // indirect github.com/spf13/cast v1.5.0 // indirect - github.com/spf13/cobra v1.7.0 // indirect + github.com/spf13/cobra v1.8.1 // indirect github.com/spf13/jwalterweatherman v1.1.0 // indirect - github.com/spf13/pflag v1.0.5 // indirect - github.com/spf13/viper v1.12.0 // indirect - github.com/stretchr/objx v0.5.0 // indirect - github.com/subosito/gotenv v1.4.0 // indirect + github.com/spf13/pflag v1.0.6 // indirect + github.com/spf13/viper v1.15.0 // indirect + github.com/spiffe/go-spiffe/v2 v2.5.0 // indirect + github.com/stretchr/objx v0.5.2 // indirect + github.com/subosito/gotenv v1.4.2 // indirect + github.com/supranational/blst v0.3.14 // indirect + github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 // indirect github.com/texttheater/golang-levenshtein/levenshtein v0.0.0-20200805054039-cae8b0eaed6c // indirect - github.com/tklauser/go-sysconf v0.3.9 // indirect - github.com/tklauser/numcpus v0.3.0 // indirect + github.com/tklauser/go-sysconf v0.3.12 // indirect + github.com/tklauser/numcpus v0.6.1 // indirect github.com/turbolent/prettier v0.0.0-20220320183459-661cc755135d // indirect - github.com/vmihailenco/msgpack v4.0.4+incompatible // indirect github.com/vmihailenco/msgpack/v4 v4.3.12 // indirect github.com/vmihailenco/tagparser v0.1.2 // indirect github.com/whyrusleeping/go-keyspace v0.0.0-20160322163242-5b898ac5add1 // indirect - github.com/whyrusleeping/timecache v0.0.0-20160911033111-cfcb2f1abfee // indirect + github.com/wlynxg/anet v0.0.5 // indirect github.com/x448/float16 v0.8.4 // indirect github.com/xanzy/ssh-agent v0.3.3 // indirect - github.com/yhassanzadeh13/go-libp2p-pubsub v0.6.2-0.20221208234712-b44d9133e4ee // indirect - github.com/yusufpapurcu/wmi v1.2.2 // indirect - github.com/zeebo/blake3 v0.2.3 // indirect + github.com/yusufpapurcu/wmi v1.2.4 // indirect + github.com/zeebo/blake3 v0.2.4 // indirect + github.com/zeebo/errs v1.4.0 // indirect github.com/zeebo/xxh3 v1.0.2 // indirect go.opencensus.io v0.24.0 // indirect - go.opentelemetry.io/otel v1.8.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.8.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.8.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.8.0 // indirect - go.opentelemetry.io/otel/sdk v1.8.0 // indirect - go.opentelemetry.io/otel/trace v1.8.0 // indirect - go.opentelemetry.io/proto/otlp v0.18.0 // indirect - go.uber.org/dig v1.15.0 // indirect - go.uber.org/fx v1.18.2 // indirect - go.uber.org/multierr v1.9.0 // indirect - go.uber.org/zap v1.24.0 // indirect - golang.org/x/crypto v0.4.0 // indirect - golang.org/x/mod v0.8.0 // indirect - golang.org/x/net v0.8.0 // indirect - golang.org/x/oauth2 v0.6.0 // indirect - golang.org/x/sys v0.6.0 // indirect - golang.org/x/term v0.6.0 // indirect - golang.org/x/text v0.8.0 // indirect - golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac // indirect - golang.org/x/tools v0.6.0 // indirect - golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 // indirect - google.golang.org/api v0.114.0 // indirect - google.golang.org/appengine v1.6.7 // indirect - google.golang.org/genproto v0.0.0-20230306155012-7f2fa6fef1f4 // indirect + go.opentelemetry.io/auto/sdk v1.1.0 // indirect + go.opentelemetry.io/contrib/detectors/gcp v1.36.0 // indirect + go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.61.0 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.61.0 // indirect + go.opentelemetry.io/otel v1.37.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.31.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.31.0 // indirect + go.opentelemetry.io/otel/metric v1.37.0 // indirect + go.opentelemetry.io/otel/sdk v1.37.0 // indirect + go.opentelemetry.io/otel/sdk/metric v1.37.0 // indirect + go.opentelemetry.io/otel/trace v1.37.0 // indirect + go.opentelemetry.io/proto/otlp v1.3.1 // indirect + go.uber.org/dig v1.18.0 // indirect + go.uber.org/fx v1.23.0 // indirect + go.uber.org/multierr v1.11.0 // indirect + go.uber.org/zap v1.27.0 // indirect + golang.org/x/crypto v0.41.0 // indirect + golang.org/x/mod v0.27.0 // indirect + golang.org/x/net v0.43.0 // indirect + golang.org/x/oauth2 v0.30.0 // indirect + golang.org/x/sys v0.35.0 // indirect + golang.org/x/term v0.34.0 // indirect + golang.org/x/text v0.28.0 // indirect + golang.org/x/time v0.12.0 // indirect + golang.org/x/tools v0.36.0 // indirect + golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da // indirect + gonum.org/v1/gonum v0.16.0 // indirect + google.golang.org/api v0.247.0 // indirect + google.golang.org/appengine v1.6.8 // indirect + google.golang.org/genproto v0.0.0-20250603155806-513f23925822 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20250707201910-8d1bb00bc6a7 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20250804133106-a7a43d27e69b // indirect google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.2.0 // indirect - gopkg.in/ini.v1 v1.66.6 // indirect + gopkg.in/ini.v1 v1.67.0 // indirect gopkg.in/warnings.v0 v0.1.2 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect - gopkg.in/yaml.v3 v3.0.1 // indirect - lukechampine.com/blake3 v1.1.7 // indirect - modernc.org/libc v1.22.3 // indirect - modernc.org/mathutil v1.5.0 // indirect - modernc.org/memory v1.5.0 // indirect - modernc.org/sqlite v1.21.1 // indirect + lukechampine.com/blake3 v1.4.1 // indirect ) replace github.com/onflow/flow-go => ../ replace github.com/onflow/flow-go/insecure => ../insecure + +// Using custom fork until https://github.com/onflow/flow-go/issues/5338 is resolved +replace github.com/ipfs/boxo => github.com/onflow/boxo v0.0.0-20240201202436-f2477b92f483 + +// Using custom fork until https://github.com/ipfs/go-ds-pebble/issues/64 is merged +replace github.com/ipfs/go-ds-pebble => github.com/onflow/go-ds-pebble v0.0.0-20251003225212-131edca3a897 + +replace github.com/hashicorp/golang-lru/v2 => github.com/fxamacker/golang-lru/v2 v2.0.0-20250430153159-6f72f038a30f diff --git a/integration/go.sum b/integration/go.sum index 5aa4af7288b..7c2c8280e16 100644 --- a/integration/go.sum +++ b/integration/go.sum @@ -1,345 +1,259 @@ -bazil.org/fuse v0.0.0-20160811212531-371fbbdaa898/go.mod h1:Xbm+BRKSBEpa4q4hTSxohYNQpsxXPbPry4JJWOB3LB8= +cel.dev/expr v0.24.0 h1:56OvJKSH3hDGL0ml5uSxZmz3/3Pq4tJ+fb1unVLAFcY= +cel.dev/expr v0.24.0/go.mod h1:hLPLo1W4QUmuYdA72RBX06QTs6MXw941piREPl3Yfiw= cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.31.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.37.0/go.mod h1:TS1dMSSfndXH133OKGwekG838Om/cQT0BUHV3HcBgoo= -cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= -cloud.google.com/go v0.43.0/go.mod h1:BOSR3VbTLkk6FDC/TcffxP4NF/FFBGA5ku+jvKOP7pg= -cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= -cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= -cloud.google.com/go v0.44.3/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= -cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= -cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= -cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= -cloud.google.com/go v0.51.0/go.mod h1:hWtGJ6gnXH+KgDv+V0zFGDvpi07n3z8ZNj3T1RW0Gcw= -cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= -cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= -cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= -cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= -cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= -cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= -cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= -cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI= -cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk= -cloud.google.com/go v0.75.0/go.mod h1:VGuuCn7PG0dwsd5XPVm2Mm3wlh3EL55/79EKB6hlPTY= -cloud.google.com/go v0.110.0 h1:Zc8gqp3+a9/Eyph2KDmcGaPtbKRIoqq4YTlL4NMD0Ys= -cloud.google.com/go v0.110.0/go.mod h1:SJnCLqQ0FCFGSZMUNUf84MV3Aia54kn7pi8st7tMzaY= -cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= -cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= -cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= -cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= -cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= -cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= -cloud.google.com/go/bigquery v1.48.0 h1:u+fhS1jJOkPO9vdM84M8HO5VznTfVUicBeoXNKD26ho= -cloud.google.com/go/bigquery v1.48.0/go.mod h1:QAwSz+ipNgfL5jxiaK7weyOhzdoAy1zFm0Nf1fysJac= -cloud.google.com/go/bigtable v1.2.0/go.mod h1:JcVAOl45lrTmQfLj7T6TxyMzIN/3FGGcFm+2xVAli2o= -cloud.google.com/go/compute v1.18.0 h1:FEigFqoDbys2cvFkZ9Fjq4gnHBP55anJ0yQyau2f9oY= -cloud.google.com/go/compute v1.18.0/go.mod h1:1X7yHxec2Ga+Ss6jPyjxRxpu2uu7PLgsOVXvgU0yacs= -cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY= -cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= -cloud.google.com/go/datacatalog v1.12.0 h1:3uaYULZRLByPdbuUvacGeqneudztEM4xqKQsBcxbDnY= -cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= -cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= -cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk= -cloud.google.com/go/iam v0.12.0 h1:DRtTY29b75ciH6Ov1PHb4/iat2CLCvrOm40Q0a6DFpE= -cloud.google.com/go/iam v0.12.0/go.mod h1:knyHGviacl11zrtZUoDuYpDgLjvr28sLQaG0YB2GYAY= -cloud.google.com/go/longrunning v0.4.1 h1:v+yFJOfKC3yZdY6ZUI933pIYdhyhV8S3NpWrXWmg7jM= +cloud.google.com/go v0.121.0 h1:pgfwva8nGw7vivjZiRfrmglGWiCJBP+0OmDpenG/Fwg= +cloud.google.com/go v0.121.0/go.mod h1:rS7Kytwheu/y9buoDmu5EIpMMCI4Mb8ND4aeN4Vwj7Q= +cloud.google.com/go/auth v0.16.4 h1:fXOAIQmkApVvcIn7Pc2+5J8QTMVbUGLscnSVNl11su8= +cloud.google.com/go/auth v0.16.4/go.mod h1:j10ncYwjX/g3cdX7GpEzsdM+d+ZNsXAbb6qXA7p1Y5M= +cloud.google.com/go/auth/oauth2adapt v0.2.8 h1:keo8NaayQZ6wimpNSmW5OPc283g65QNIiLpZnkHRbnc= +cloud.google.com/go/auth/oauth2adapt v0.2.8/go.mod h1:XQ9y31RkqZCcwJWNSx2Xvric3RrU88hAYYbjDWYDL+c= +cloud.google.com/go/bigquery v1.69.0 h1:rZvHnjSUs5sHK3F9awiuFk2PeOaB8suqNuim21GbaTc= +cloud.google.com/go/bigquery v1.69.0/go.mod h1:TdGLquA3h/mGg+McX+GsqG9afAzTAcldMjqhdjHTLew= +cloud.google.com/go/compute/metadata v0.8.0 h1:HxMRIbao8w17ZX6wBnjhcDkW6lTFpgcaobyVfZWqRLA= +cloud.google.com/go/compute/metadata v0.8.0/go.mod h1:sYOGTp851OV9bOFJ9CH7elVvyzopvWQFNNghtDQ/Biw= +cloud.google.com/go/datacatalog v1.26.0 h1:eFgygb3DTufTWWUB8ARk+dSuXz+aefNJXTlkWlQcWwE= +cloud.google.com/go/datacatalog v1.26.0/go.mod h1:bLN2HLBAwB3kLTFT5ZKLHVPj/weNz6bR0c7nYp0LE14= +cloud.google.com/go/iam v1.5.2 h1:qgFRAGEmd8z6dJ/qyEchAuL9jpswyODjA2lS+w234g8= +cloud.google.com/go/iam v1.5.2/go.mod h1:SE1vg0N81zQqLzQEwxL2WI6yhetBdbNQuTvIKCSkUHE= +cloud.google.com/go/logging v1.13.0 h1:7j0HgAp0B94o1YRDqiqm26w4q1rDMH7XNRU34lJXHYc= +cloud.google.com/go/logging v1.13.0/go.mod h1:36CoKh6KA/M0PbhPKMq6/qety2DCAErbhXT62TuXALA= +cloud.google.com/go/longrunning v0.6.7 h1:IGtfDWHhQCgCjwQjV9iiLnUta9LBCo8R9QmAFsS/PrE= +cloud.google.com/go/longrunning v0.6.7/go.mod h1:EAFV3IZAKmM56TyiE6VAP3VoTzhZzySwI/YI1s/nRsY= +cloud.google.com/go/monitoring v1.24.2 h1:5OTsoJ1dXYIiMiuL+sYscLc9BumrL3CarVLL7dd7lHM= +cloud.google.com/go/monitoring v1.24.2/go.mod h1:x7yzPWcgDRnPEv3sI+jJGBkwl5qINf+6qY4eq0I9B4U= cloud.google.com/go/profiler v0.3.0 h1:R6y/xAeifaUXxd2x6w+jIwKxoKl8Cv5HJvcvASTPWJo= -cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= -cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= -cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= -cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= -cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= -cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= -cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= -cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= -cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= -cloud.google.com/go/storage v1.14.0/go.mod h1:GrKmX003DSIwi9o29oFT7YDnHYwZoctc3fOKtUw0Xmo= -cloud.google.com/go/storage v1.28.1 h1:F5QDG5ChchaAVQhINh24U99OWHURqrW8OmQcGKXcbgI= -cloud.google.com/go/storage v1.28.1/go.mod h1:Qnisd4CqDdo6BGs2AD5LLnEsmSQ80wQ5ogcBBKhU86Y= -collectd.org v0.3.0/go.mod h1:A/8DzQBkF6abtvrT2j/AU/4tiBgJWYyh0y/oB/4MlWE= +cloud.google.com/go/profiler v0.3.0/go.mod h1:9wYk9eY4iZHsev8TQb61kh3wiOiSyz/xOYixWPzweCU= +cloud.google.com/go/storage v1.53.0 h1:gg0ERZwL17pJ+Cz3cD2qS60w1WMDnwcm5YPAIQBHUAw= +cloud.google.com/go/storage v1.53.0/go.mod h1:7/eO2a/srr9ImZW9k5uufcNahT2+fPb8w5it1i5boaA= +cloud.google.com/go/trace v1.11.6 h1:2O2zjPzqPYAHrn3OKl029qlqG6W8ZdYaOWRyr8NgMT4= +cloud.google.com/go/trace v1.11.6/go.mod h1:GA855OeDEBiBMzcckLPE2kDunIpC72N+Pq8WFieFjnI= +dario.cat/mergo v1.0.0 h1:AGCNq9Evsj31mOgNPcLyXc+4PNABt905YmuqPYYpBWk= +dario.cat/mergo v1.0.0/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk= dmitri.shuralyov.com/app/changes v0.0.0-20180602232624-0a106ad413e3/go.mod h1:Yl+fi1br7+Rr3LqpNJf1/uxUdtRUV+Tnj0o93V2B9MU= -dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= dmitri.shuralyov.com/html/belt v0.0.0-20180602232347-f7d459c86be0/go.mod h1:JLBrvjyP0v+ecvNYvCpyZgu5/xkfAUhi6wJj28eUfSU= dmitri.shuralyov.com/service/change v0.0.0-20181023043359-a85b471d5412/go.mod h1:a1inKt/atXimZ4Mv927x+r7UpyzRUf4emIoiiSC2TN4= dmitri.shuralyov.com/state v0.0.0-20180228185332-28bcc343414c/go.mod h1:0PRwlb0D6DFvNNtx+9ybjezNCa8XF0xaYcETyp6rHWU= git.apache.org/thrift.git v0.0.0-20180902110319-2566ecd5d999/go.mod h1:fPE2ZNJGynbRyZ4dJvy6G277gSllfV2HJqblrnkyeyg= -github.com/AndreasBriese/bbloom v0.0.0-20180913140656-343706a395b7/go.mod h1:bOvUY6CB00SOBii9/FifXqc0awNKxLFCL/+pkDPuyl8= -github.com/AndreasBriese/bbloom v0.0.0-20190306092124-e2d15f34fcf9/go.mod h1:bOvUY6CB00SOBii9/FifXqc0awNKxLFCL/+pkDPuyl8= -github.com/Azure/azure-pipeline-go v0.2.1/go.mod h1:UGSo8XybXnIGZ3epmeBw7Jdz+HiUVpqIlpz/HKHylF4= -github.com/Azure/azure-pipeline-go v0.2.2/go.mod h1:4rQ/NZncSvGqNkkOsNpOU1tgoNuIlp9AfUH5G1tvCHc= -github.com/Azure/azure-storage-blob-go v0.7.0/go.mod h1:f9YQKtsG1nMisotuTPpO0tjNuEjKRYAcJU8/ydDI++4= -github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78 h1:w+iIsaOQNcT7OZ575w+acHgRric5iCyQh+xv+KJ4HB8= -github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= -github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI= -github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0= -github.com/Azure/go-autorest/autorest/adal v0.8.0/go.mod h1:Z6vX6WXXuyieHAXwMj0S6HY6e6wcHn37qQMBQlvY3lc= -github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA= -github.com/Azure/go-autorest/autorest/date v0.2.0/go.mod h1:vcORJHLJEh643/Ioh9+vPmf1Ij9AEBM5FuBIXLmIy0g= -github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= -github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= -github.com/Azure/go-autorest/autorest/mocks v0.3.0/go.mod h1:a8FDP3DYzQ4RYfVAxAN3SVSiiO77gL2j2ronKKP0syM= -github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc= -github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk= +github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 h1:UQHMgLO+TxOElx5B5HZ4hJQsoJ/PvUvKRhJHDQXO8P8= +github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= -github.com/DATA-DOG/go-sqlmock v1.3.3/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM= -github.com/DataDog/zstd v1.4.1/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo= -github.com/JohnCGriffin/overflow v0.0.0-20211019200055-46fa312c352c h1:RGWPOewvKIROun94nF7v2cua9qP+thov/7M50KEoeSU= -github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0= -github.com/Kubuxu/go-os-helper v0.0.1/go.mod h1:N8B+I7vPCT80IcP58r50u4+gEEcsZETFUpAzWW2ep1Y= -github.com/Microsoft/go-winio v0.4.15-0.20190919025122-fc70bd9a86b5/go.mod h1:tTuCMEN+UleMWgg9dVx4Hu52b1bJo+59jBh3ajtinzw= -github.com/Microsoft/go-winio v0.5.2 h1:a9IhgEQBCUEk6QCdml9CiJGhAws+YwffDHEMp1VMrpA= +github.com/BurntSushi/toml v1.2.1/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= +github.com/DataDog/zstd v1.5.6-0.20230824185856-869dae002e5e h1:ZIWapoIRN1VqT8GR8jAwb1Ie9GyehWjVcGh32Y2MznE= +github.com/DataDog/zstd v1.5.6-0.20230824185856-869dae002e5e/go.mod h1:g4AWEaM3yOg3HYfnJ3YIawPnVdXJh9QME85blwSAmyw= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.29.0 h1:UQUsRi8WTzhZntp5313l+CHIAT95ojUI2lpP/ExlZa4= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.29.0/go.mod h1:Cz6ft6Dkn3Et6l2v2a9/RpN7epQ1GtDlO6lj8bEcOvw= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.51.0 h1:fYE9p3esPxA/C0rQ0AHhP0drtPXDRhaWiwg1DPqO7IU= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.51.0/go.mod h1:BnBReJLvVYx2CS/UHOgVz2BXKXD9wsQPxZug20nZhd0= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/cloudmock v0.51.0 h1:OqVGm6Ei3x5+yZmSJG1Mh2NwHvpVmZ08CB5qJhT9Nuk= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/cloudmock v0.51.0/go.mod h1:SZiPHWGOOk3bl8tkevxkoiwPgsIl6CwrWcbwjfHZpdM= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.51.0 h1:6/0iUd0xrnX7qt+mLNRwg5c0PGv8wpE8K90ryANQwMI= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.51.0/go.mod h1:otE2jQekW/PqXk1Awf5lmfokJx4uwuqcj1ab5SpGeW0= +github.com/Jorropo/jsync v1.0.1 h1:6HgRolFZnsdfzRUj+ImB9og1JYOxQoReSywkHOGSaUU= +github.com/Jorropo/jsync v1.0.1/go.mod h1:jCOZj3vrBCri3bSU3ErUYvevKlnbssrXeCivybS5ABQ= github.com/Microsoft/go-winio v0.5.2/go.mod h1:WpS1mjBmmwHBEWmogvA2mj8546UReBk4v8QkMxJ6pZY= -github.com/Microsoft/hcsshim v0.8.7 h1:ptnOoufxGSzauVTsdE+wMYnCWA301PdoN4xg5oRdZpg= -github.com/Microsoft/hcsshim v0.8.7/go.mod h1:OHd7sQqRFrYd3RmSgbgji+ctCwkbq2wbEYNSzOYtcBQ= +github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM= +github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY= +github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= -github.com/OneOfOne/xxhash v1.2.5 h1:zl/OfRA6nftbBK9qTohYBJ5xvw6C/oNKizR7cZGl3cI= -github.com/OneOfOne/xxhash v1.2.5/go.mod h1:eZbhyaAYD41SGSSsnmcpxVoRiQ/MPUTjUdIIOT9Um7Q= -github.com/ProtonMail/go-crypto v0.0.0-20221026131551-cf6655e29de4 h1:ra2OtmuW0AE5csawV4YXMNGNQQXvLRps3z2Z59OPO+I= -github.com/ProtonMail/go-crypto v0.0.0-20221026131551-cf6655e29de4/go.mod h1:UBYPn8k0D56RtnR8RFQMjmh4KrZzWJ5o7Z9SYjossQ8= -github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo= -github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI= -github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg= -github.com/VictoriaMetrics/fastcache v1.5.7/go.mod h1:ptDBkNMQI4RtmVo8VS/XwRY6RoTu1dAWCbrk+6WsEM8= +github.com/OneOfOne/xxhash v1.2.8 h1:31czK/TI9sNkxIKfaUfGlU47BAxQ0ztGgd9vPyqimf8= +github.com/OneOfOne/xxhash v1.2.8/go.mod h1:eZbhyaAYD41SGSSsnmcpxVoRiQ/MPUTjUdIIOT9Um7Q= +github.com/ProtonMail/go-crypto v0.0.0-20230828082145-3c4c8a2d2371 h1:kkhsdkhsCvIsutKu5zLMgWtgh9YxGCNAw8Ad8hjwfYg= +github.com/ProtonMail/go-crypto v0.0.0-20230828082145-3c4c8a2d2371/go.mod h1:EjAoLdwvbIOoOQr3ihjnSoLZRtE8azugULFRteWMNc0= +github.com/SaveTheRbtz/mph v0.1.1-0.20240117162131-4166ec7869bc h1:DCHzPQOcU/7gwDTWbFQZc5qHMPS1g0xTO56k8NXsv9M= +github.com/SaveTheRbtz/mph v0.1.1-0.20240117162131-4166ec7869bc/go.mod h1:LJM5a3zcIJ/8TmZwlUczvROEJT8ntOdhdG9jjcR1B0I= +github.com/StackExchange/wmi v1.2.1 h1:VIkavFPXSjcnS+O8yTq7NI32k0R5Aj+v39y29VYDOSA= +github.com/StackExchange/wmi v1.2.1/go.mod h1:rcmrprowKIVzvc+NUiLncP2uuArMWLCbu9SBzvHz7e8= +github.com/VictoriaMetrics/fastcache v1.12.2 h1:N0y9ASrJ0F6h0QaC3o6uJb3NIZ9VKLjCM7NQbSmF7WI= +github.com/VictoriaMetrics/fastcache v1.12.2/go.mod h1:AmC+Nzz1+3G2eCPapF6UcsnkThDcMsQicp4xDukwJYI= github.com/VividCortex/ewma v1.2.0 h1:f58SaIzcDXrSy3kWaHNvuJgJ3Nmz59Zji6XoJR/q1ow= github.com/VividCortex/ewma v1.2.0/go.mod h1:nz4BbCtbLyFDeC9SUHbtcT5644juEuWfUAUnGx7j5l4= -github.com/VividCortex/gohistogram v1.0.0/go.mod h1:Pf5mBqqDxYaXu3hDrrU+w6nw50o/4+TcAqDqk/vUH7g= -github.com/acomagu/bufpipe v1.0.3 h1:fxAGrHZTgQ9w5QqVItgzwj235/uYZYgbXitB+dLupOk= -github.com/acomagu/bufpipe v1.0.3/go.mod h1:mxdxdup/WdsKVreO5GpW4+M/1CE2sMG4jeGJ2sYmHc4= -github.com/aead/siphash v1.0.1/go.mod h1:Nywa3cDsYNNK3gaciGTWPwHt0wlpNV15vwmswBAUSII= -github.com/afex/hystrix-go v0.0.0-20180502004556-fa1af6a1f4f5/go.mod h1:SkGFH1ia65gfNATL8TAiHDNxPzPdmEL5uirI2Uyuz6c= -github.com/ajstarks/svgo v0.0.0-20180226025133-644b8db467af/go.mod h1:K08gAheRH3/J6wwsYMMT4xOr94bZjxIelGM0+d/wbFw= +github.com/aclements/go-perfevent v0.0.0-20240301234650-f7843625020f h1:JjxwchlOepwsUWcQwD2mLUAGE9aCp0/ehy6yCHFBOvo= +github.com/aclements/go-perfevent v0.0.0-20240301234650-f7843625020f/go.mod h1:tMDTce/yLLN/SK8gMOxQfnyeMeCg8KGzp0D1cbECEeo= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= -github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= +github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156 h1:eMwmnE/GDgah4HI848JfFxHt+iPb26b4zyfspmqY0/8= github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156/go.mod h1:Cb/ax3seSYIx7SuZdm2G2xzfwmv3TPSk2ucNfQESPXM= -github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo29Kk6CurOXKm700vrz8f0KW0JNfpkRJY/8= -github.com/andybalholm/brotli v1.0.4 h1:V7DdXeJtZscaqfNuAdSRuRFzuiKlHSC/Zh3zl9qY3JY= -github.com/andybalholm/brotli v1.0.4/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHGRSepvi9Eig= github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c= github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be h1:9AeTilPcZAjCFIImctFaOjnTIavg87rW78vTPkQqLI8= github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be/go.mod h1:ySMOLuWl6zY27l47sB3qLNK6tF2fkHG55UZxx8oIVo4= -github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= -github.com/apache/arrow/go/arrow v0.0.0-20191024131854-af6fa24be0db/go.mod h1:VTxUBvSJ3s3eHAg65PNgrsn5BtqCRPdmyXh6rAfdxN0= -github.com/apache/arrow/go/v10 v10.0.1 h1:n9dERvixoC/1JjDmBcs9FPaEryoANa2sCgVFo6ez9cI= -github.com/apache/arrow/go/v10 v10.0.1/go.mod h1:YvhnlEePVnBS4+0z3fhPfUy7W1Ikj0Ih0vcRo/gZ1M0= -github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= -github.com/apache/thrift v0.13.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= -github.com/apache/thrift v0.16.0 h1:qEy6UW60iVOlUy+b9ZR0d5WzUWYGOo4HfopoyBaNmoY= -github.com/apache/thrift v0.16.0/go.mod h1:PHK3hniurgQaNMZYaCLEqXKsYK8upmhPbmdP2FXSqgU= -github.com/aristanetworks/goarista v0.0.0-20170210015632-ea17b1a17847/go.mod h1:D/tb0zPVXnP7fmsLZjtdUhSsumbK/ij54UXjjVgMGxQ= -github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= +github.com/apache/arrow/go/v15 v15.0.2 h1:60IliRbiyTWCWjERBCkO1W4Qun9svcYoZrSLcyOsMLE= +github.com/apache/arrow/go/v15 v15.0.2/go.mod h1:DGXsR3ajT524njufqf95822i+KTh+yea1jass9YXgjA= github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= -github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= -github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= -github.com/aryann/difflib v0.0.0-20170710044230-e206f873d14a/go.mod h1:DAHtR1m6lCRdSC2Tm3DSWRPvIPr6xNKyeHdqDQSQT+A= -github.com/aws/aws-lambda-go v1.13.3/go.mod h1:4UKl9IzQMoD+QF79YdCuzCwp8VbmG4VAQwij/eHl5CU= -github.com/aws/aws-sdk-go v1.25.48/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= -github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= -github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g= github.com/aws/aws-sdk-go-v2 v1.9.0/go.mod h1:cK/D0BBs0b/oWPIcX/Z/obahJK1TT7IPVjy53i/mX/4= -github.com/aws/aws-sdk-go-v2 v1.17.7 h1:CLSjnhJSTSogvqUGhIC6LqFKATMRexcxLZ0i/Nzk9Eg= -github.com/aws/aws-sdk-go-v2 v1.17.7/go.mod h1:uzbQtefpm44goOPmdKyAlXSNcwlRgF3ePWVW6EtJvvw= +github.com/aws/aws-sdk-go-v2 v1.39.1 h1:fWZhGAwVRK/fAN2tmt7ilH4PPAE11rDj7HytrmbZ2FE= +github.com/aws/aws-sdk-go-v2 v1.39.1/go.mod h1:sDioUELIUO9Znk23YVmIk86/9DOpkbyyVb1i/gUNFXY= github.com/aws/aws-sdk-go-v2/config v1.8.0/go.mod h1:w9+nMZ7soXCe5nT46Ri354SNhXDQ6v+V5wqDjnZE+GY= -github.com/aws/aws-sdk-go-v2/config v1.18.19 h1:AqFK6zFNtq4i1EYu+eC7lcKHYnZagMn6SW171la0bGw= -github.com/aws/aws-sdk-go-v2/config v1.18.19/go.mod h1:XvTmGMY8d52ougvakOv1RpiTLPz9dlG/OQHsKU/cMmY= +github.com/aws/aws-sdk-go-v2/config v1.31.9 h1:Q+9hVk8kmDGlC7XcDout/vs0FZhHnuPCPv+TRAYDans= +github.com/aws/aws-sdk-go-v2/config v1.31.9/go.mod h1:OpMrPn6rRbHKU4dAVNCk/EQx8sEQJI7hl9GZZ5u/Y+U= github.com/aws/aws-sdk-go-v2/credentials v1.4.0/go.mod h1:dgGR+Qq7Wjcd4AOAW5Rf5Tnv3+x7ed6kETXyS9WCuAY= -github.com/aws/aws-sdk-go-v2/credentials v1.13.18 h1:EQMdtHwz0ILTW1hoP+EwuWhwCG1hD6l3+RWFQABET4c= -github.com/aws/aws-sdk-go-v2/credentials v1.13.18/go.mod h1:vnwlwjIe+3XJPBYKu1et30ZPABG3VaXJYr8ryohpIyM= +github.com/aws/aws-sdk-go-v2/credentials v1.18.13 h1:gkpEm65/ZfrGJ3wbFH++Ki7DyaWtsWbK9idX6OXCo2E= +github.com/aws/aws-sdk-go-v2/credentials v1.18.13/go.mod h1:eVTHz1yI2/WIlXTE8f70mcrSxNafXD5sJpTIM9f+kmo= github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.5.0/go.mod h1:CpNzHK9VEFUCknu50kkB8z58AH2B5DvPP7ea1LHve/Y= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.13.1 h1:gt57MN3liKiyGopcqgNzJb2+d9MJaKT/q1OksHNXVE4= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.13.1/go.mod h1:lfUx8puBRdM5lVVMQlwt2v+ofiG/X6Ms+dy0UkG/kXw= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.7 h1:Is2tPmieqGS2edBnmOJIbdvOA6Op+rRpaYR60iBAwXM= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.7/go.mod h1:F1i5V5421EGci570yABvpIXgRIBPb5JM+lSkHF6Dq5w= github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.5.1 h1:VGkV9KmhGqOQWnHyi4gLG98kE6OecT42fdrCGFWxJsc= github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.5.1/go.mod h1:PLlnMiki//sGnCJiW+aVpvP/C8Kcm8mEj/IVm9+9qk4= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.31 h1:sJLYcS+eZn5EeNINGHSCRAwUJMFVqklwkH36Vbyai7M= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.31/go.mod h1:QT0BqUvX1Bh2ABdTGnjqEjvjzrCfIniM9Sc8zn9Yndo= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.25 h1:1mnRASEKnkqsntcxHaysxwgVoUUp5dkiB+l3llKnqyg= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.25/go.mod h1:zBHOPwhBc3FlQjQJE/D3IfPWiWaQmT06Vq9aNukDo0k= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.8 h1:6bgAZgRyT4RoFWhxS+aoGMFyE0cD1bSzFnEEi4bFPGI= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.8/go.mod h1:KcGkXFVU8U28qS4KvLEcPxytPZPBcRawaH2Pf/0jptE= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.8 h1:HhJYoES3zOz34yWEpGENqJvRVPqpmJyR3+AFg9ybhdY= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.8/go.mod h1:JnA+hPWeYAVbDssp83tv+ysAG8lTfLVXvSsyKg/7xNA= github.com/aws/aws-sdk-go-v2/internal/ini v1.2.2/go.mod h1:BQV0agm+JEhqR+2RT5e1XTFIDcAAV0eW6z2trp+iduw= -github.com/aws/aws-sdk-go-v2/internal/ini v1.3.32 h1:p5luUImdIqywn6JpQsW3tq5GNOxKmOnEpybzPx+d1lk= -github.com/aws/aws-sdk-go-v2/internal/ini v1.3.32/go.mod h1:XGhIBZDEgfqmFIugclZ6FU7v75nHhBDtzuB4xB/tEi4= -github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.3.0 h1:gceOysEWNNwLd6cki65IMBZ4WAM0MwgBQq2n7kejoT8= +github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3 h1:bIqFDwgGXXN1Kpp99pDOdKMTTb5d2KyU5X/BZxjOkRo= +github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3/go.mod h1:H5O/EsxDWyU+LP/V8i5sm8cxoZgc2fdNR9bxlOFrQTo= github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.3.0/go.mod h1:v8ygadNyATSm6elwJ/4gzJwcFhri9RqS8skgHKiwXPU= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.1 h1:oegbebPEMA/1Jny7kvwejowCaHz1FWZAQ94WXFNCyTM= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.1/go.mod h1:kemo5Myr9ac0U9JfSjMo9yHLtw+pECEHsFtJ9tqCEI8= github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.3.0/go.mod h1:R1KK+vY8AfalhG1AOu5e35pOD2SdoPKQCFLTvnxiohk= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.25 h1:5LHn8JQ0qvjD9L9JhMtylnkcw7j05GDZqM9Oin6hpr0= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.25/go.mod h1:/95IA+0lMnzW6XzqYJRpjjsAbKEORVeO0anQqjd2CNU= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.7 h1:mLgc5QIgOy26qyh5bvW+nDoAppxgn3J2WV3m9ewq7+8= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.7/go.mod h1:wXb/eQnqt8mDQIQTTmcw58B5mYGxzLGZGK8PWNFZ0BA= github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.7.0 h1:HWsM0YQWX76V6MOp07YuTYacm8k7h69ObJuw7Nck+og= github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.7.0/go.mod h1:LKb3cKNQIMh+itGnEpKGcnL/6OIjPZqrtYah1w5f+3o= github.com/aws/aws-sdk-go-v2/service/s3 v1.15.0 h1:nPLfLPfglacc29Y949sDxpr3X/blaY40s3B85WT2yZU= github.com/aws/aws-sdk-go-v2/service/s3 v1.15.0/go.mod h1:Iv2aJVtVSm/D22rFoX99cLG4q4uB7tppuCsulGe98k4= github.com/aws/aws-sdk-go-v2/service/sso v1.4.0/go.mod h1:+1fpWnL96DL23aXPpMGbsmKe8jLTEfbjuQoA4WS1VaA= -github.com/aws/aws-sdk-go-v2/service/sso v1.12.6 h1:5V7DWLBd7wTELVz5bPpwzYy/sikk0gsgZfj40X+l5OI= -github.com/aws/aws-sdk-go-v2/service/sso v1.12.6/go.mod h1:Y1VOmit/Fn6Tz1uFAeCO6Q7M2fmfXSCLeL5INVYsLuY= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.14.6 h1:B8cauxOH1W1v7rd8RdI/MWnoR4Ze0wIHWrb90qczxj4= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.14.6/go.mod h1:Lh/bc9XUf8CfOY6Jp5aIkQtN+j1mc+nExc+KXj9jx2s= +github.com/aws/aws-sdk-go-v2/service/sso v1.29.3 h1:7PKX3VYsZ8LUWceVRuv0+PU+E7OtQb1lgmi5vmUE9CM= +github.com/aws/aws-sdk-go-v2/service/sso v1.29.3/go.mod h1:Ql6jE9kyyWI5JHn+61UT/Y5Z0oyVJGmgmJbZD5g4unY= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.34.5 h1:gBBZmSuIySGqDLtXdZiYpwyzbJKXQD2jjT0oDY6ywbo= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.34.5/go.mod h1:XclEty74bsGBCr1s0VSaA11hQ4ZidK4viWK7rRfO88I= github.com/aws/aws-sdk-go-v2/service/sts v1.7.0/go.mod h1:0qcSMCyASQPN2sk/1KQLQ2Fh6yq8wm0HSDAimPhzCoM= -github.com/aws/aws-sdk-go-v2/service/sts v1.18.7 h1:bWNgNdRko2x6gqa0blfATqAZKZokPIeM1vfmQt2pnvM= -github.com/aws/aws-sdk-go-v2/service/sts v1.18.7/go.mod h1:JuTnSoeePXmMVe9G8NcjjwgOKEfZ4cOjMuT2IBT/2eI= +github.com/aws/aws-sdk-go-v2/service/sts v1.38.4 h1:PR00NXRYgY4FWHqOGx3fC3lhVKjsp1GdloDv2ynMSd8= +github.com/aws/aws-sdk-go-v2/service/sts v1.38.4/go.mod h1:Z+Gd23v97pX9zK97+tX4ppAgqCt3Z2dIXB02CtBncK8= github.com/aws/smithy-go v1.8.0/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAmR5n+E= -github.com/aws/smithy-go v1.13.5 h1:hgz0X/DX0dGqTYpGALqXJoRKRj5oQ7150i5FdTePzO8= -github.com/aws/smithy-go v1.13.5/go.mod h1:Tg+OJXh4MB2R/uN61Ko2f6hTZwB/ZYGOtib8J3gBHzA= +github.com/aws/smithy-go v1.23.0 h1:8n6I3gXzWJB2DxBDnfxgBaSX6oe0d/t10qGz7OKqMCE= +github.com/aws/smithy-go v1.23.0/go.mod h1:t1ufH5HMublsJYulve2RKmHDC15xu1f26kHCp/HgceI= github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= -github.com/benbjohnson/clock v1.3.0 h1:ip6w0uFQkncKQ979AypyG0ER7mqUSBdKLOgAle/AT8A= github.com/benbjohnson/clock v1.3.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= +github.com/benbjohnson/clock v1.3.5 h1:VvXlSJBzZpA/zum6Sj74hxwYI2DIxRWuNIoXAzHZz5o= +github.com/benbjohnson/clock v1.3.5/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= -github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= -github.com/bits-and-blooms/bitset v1.3.0 h1:h7mv5q31cthBTd7V4kLAZaIThj1e8vPGcSqpPue9KVI= -github.com/bits-and-blooms/bitset v1.3.0/go.mod h1:gIdJ4wp64HaoK2YrL1Q5/N7Y16edYb8uY+O0FJTyyDA= -github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84= -github.com/blang/semver v3.1.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= -github.com/bmizerany/pat v0.0.0-20170815010413-6226ea591a40/go.mod h1:8rLXio+WjiTceGBHIoTvn60HIbs7Hm7bcHjyrSqYB9c= -github.com/boltdb/bolt v1.3.1/go.mod h1:clJnj/oiGkjum5o1McbSZDSLxVThjynRyGBgiAx27Ps= +github.com/bits-and-blooms/bitset v1.24.0 h1:H4x4TuulnokZKvHLfzVRTHJfFfnHEeSYJizujEZvmAM= +github.com/bits-and-blooms/bitset v1.24.0/go.mod h1:7hO7Gc7Pp1vODcmWvKMRA9BNmbv6a/7QIWpPxHddWR8= github.com/bradfitz/go-smtpd v0.0.0-20170404230938-deb6d6237625/go.mod h1:HYsPBTaaSFSlLx/70C2HPIMNZpVV8+vt/A+FMnYP11g= -github.com/btcsuite/btcd v0.0.0-20171128150713-2e60448ffcc6/go.mod h1:Dmm/EzmjnCiweXmzRIAiUWCInVmPgjkzgv5k4tVyXiQ= -github.com/btcsuite/btcd v0.0.0-20190213025234-306aecffea32/go.mod h1:DrZx5ec/dmnfpw9KyYoQyYo7d0KEvTkk/5M/vbZjAr8= -github.com/btcsuite/btcd v0.0.0-20190523000118-16327141da8c/go.mod h1:3J08xEfcugPacsc34/LKRU2yO7YmuT8yt28J8k2+rrI= -github.com/btcsuite/btcd v0.0.0-20190605094302-a0d1e3e36d50/go.mod h1:3J08xEfcugPacsc34/LKRU2yO7YmuT8yt28J8k2+rrI= -github.com/btcsuite/btcd v0.0.0-20190824003749-130ea5bddde3/go.mod h1:3J08xEfcugPacsc34/LKRU2yO7YmuT8yt28J8k2+rrI= -github.com/btcsuite/btcd v0.20.1-beta/go.mod h1:wVuoA8VJLEcwgqHBwHmzLRazpKxTv13Px/pDuV7OomQ= -github.com/btcsuite/btcd v0.21.0-beta/go.mod h1:ZSWyehm27aAuS9bvkATT+Xte3hjHZ+MRgMY/8NJ7K94= -github.com/btcsuite/btcd/btcec/v2 v2.2.1 h1:xP60mv8fvp+0khmrN0zTdPC3cNm24rfeE6lh2R/Yv3E= -github.com/btcsuite/btcd/btcec/v2 v2.2.1/go.mod h1:9/CSmJxmuvqzX9Wh2fXMWToLOHhPd11lSPuIupwTkI8= -github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f/go.mod h1:TdznJufoqS23FtqVCzL0ZqgP5MqXbb4fg/WgDys70nA= -github.com/btcsuite/btcutil v0.0.0-20190207003914-4c204d697803/go.mod h1:+5NJ2+qvTyV9exUAL/rxXi3DcLg2Ts+ymUAY5y4NvMg= -github.com/btcsuite/btcutil v0.0.0-20190425235716-9e5f4b9a998d/go.mod h1:+5NJ2+qvTyV9exUAL/rxXi3DcLg2Ts+ymUAY5y4NvMg= -github.com/btcsuite/btcutil v1.0.2/go.mod h1:j9HUFwoQRsZL3V4n+qG+CUnEGHOarIxfC3Le2Yhbcts= -github.com/btcsuite/go-socks v0.0.0-20170105172521-4720035b7bfd/go.mod h1:HHNXQzUsZCxOoE+CPiyCTO6x34Zs86zZUiwtpXoGdtg= -github.com/btcsuite/goleveldb v0.0.0-20160330041536-7834afc9e8cd/go.mod h1:F+uVaaLLH7j4eDXPRvw78tMflu7Ie2bzYOH4Y8rRKBY= -github.com/btcsuite/goleveldb v1.0.0/go.mod h1:QiK9vBlgftBg6rWQIj6wFzbPfRjiykIEhBH4obrXJ/I= -github.com/btcsuite/snappy-go v0.0.0-20151229074030-0bdef8d06723/go.mod h1:8woku9dyThutzjeg+3xrA5iCpBRH8XEEg3lh6TiUghc= -github.com/btcsuite/snappy-go v1.0.0/go.mod h1:8woku9dyThutzjeg+3xrA5iCpBRH8XEEg3lh6TiUghc= -github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792/go.mod h1:ghJtEyQwv5/p4Mg4C0fgbePVuGr935/5ddU9Z3TmDRY= -github.com/btcsuite/winsvc v1.0.0/go.mod h1:jsenWakMcC0zFBFurPLEAyrnc/teJEM1O46fmI40EZs= +github.com/btcsuite/btcd/btcec/v2 v2.3.4 h1:3EJjcN70HCu/mwqlUsGK8GcNVyLVxFDlWurTXGPFfiQ= +github.com/btcsuite/btcd/btcec/v2 v2.3.4/go.mod h1:zYzJ8etWJQIv1Ogk7OzpWjowwOdXY1W/17j2MW85J04= +github.com/btcsuite/btcd/chaincfg/chainhash v1.0.2 h1:KdUfX2zKommPRa+PD0sWZUyXe9w277ABlgELO7H04IM= +github.com/btcsuite/btcd/chaincfg/chainhash v1.0.2/go.mod h1:7SFka0XMvUgj3hfZtydOrQY2mwhPclbT2snogU7SQQc= github.com/buger/jsonparser v0.0.0-20181115193947-bf1c66bbce23/go.mod h1:bbYlZJ7hK1yFx9hf58LP0zeX7UjIGs20ufpu3evjr+s= -github.com/bwesterb/go-ristretto v1.2.0/go.mod h1:fUIoIZaG73pV5biE2Blr2xEzDoMj7NFEuV9ekS419A0= -github.com/c-bata/go-prompt v0.2.2/go.mod h1:VzqtzE2ksDBcdln8G7mk2RX9QyGjH+OVqOCSiVIqS34= -github.com/casbin/casbin/v2 v2.1.2/go.mod h1:YcPU1XXisHhLzuxH9coDNf2FbKpjGlbCg3n9yuLkIJQ= -github.com/cenkalti/backoff v2.2.1+incompatible h1:tNowT99t7UNflLxfYYSlKYsBpXdEet03Pg2g16Swow4= -github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= -github.com/cenkalti/backoff/v4 v4.1.3 h1:cFAlzYUlVYDysBEH2T5hyJZMh3+5+WCBvSnK6Q8UtC4= -github.com/cenkalti/backoff/v4 v4.1.3/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= +github.com/bwesterb/go-ristretto v1.2.3/go.mod h1:fUIoIZaG73pV5biE2Blr2xEzDoMj7NFEuV9ekS419A0= +github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8= +github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/cespare/cp v0.1.0/go.mod h1:SOGHArjBr4JWaSDEVpWpo/hNg6RoKrls6Oh40hiwW+s= github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/cheekybits/genny v1.0.0/go.mod h1:+tQajlRqAUrPI7DOSpB0XAqZYtQakVtB7wXkRAgjxjQ= -github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= -github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= -github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= +github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= +github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/chengxilo/virtualterm v1.0.4 h1:Z6IpERbRVlfB8WkOmtbHiDbBANU7cimRIof7mk9/PwM= +github.com/chengxilo/virtualterm v1.0.4/go.mod h1:DyxxBZz/x1iqJjFxTFcr6/x+jSpqN0iwWCOK1q10rlY= github.com/cilium/ebpf v0.2.0/go.mod h1:To2CFviqOWL/M0gIMsvSMlqe7em/l1ALkX1PyjrX2Qs= -github.com/clbanning/x2j v0.0.0-20191024224557-825249438eec/go.mod h1:jMjuTZXRI4dUb/I5gc9Hdhagfvm9+RyrPryS/auMzxE= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/cloudflare/circl v1.1.0 h1:bZgT/A+cikZnKIwn7xL2OBj012Bmvho/o6RpRvv3GKY= -github.com/cloudflare/circl v1.1.0/go.mod h1:prBCrKB9DV4poKZY1l9zBXg2QJY7mvgRvtMxxK7fi4I= -github.com/cloudflare/cloudflare-go v0.10.2-0.20190916151808-a80f83b9add9/go.mod h1:1MxXX1Ux4x6mqPmjkUgTP1CdXIBXKX7T+Jk9Gxrmx+U= +github.com/cloudflare/circl v1.3.3 h1:fE/Qz0QdIGqeWfnwq0RE0R7MI51s0M2E4Ga9kq5AEMs= +github.com/cloudflare/circl v1.3.3/go.mod h1:5XYMA4rFBvNIrhs50XuiBJ15vF2pZn4nnUKZrLbUZFA= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= -github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= -github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= -github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= -github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI= -github.com/consensys/bavard v0.1.8-0.20210105233146-c16790d2aa8b/go.mod h1:Bpd0/3mZuaj6Sj+PqrmIquiOKy397AKGThQPaGzNXAQ= -github.com/consensys/goff v0.3.10/go.mod h1:xTldOBEHmFiYS0gPXd3NsaEqZWlnmeWcRLWgD3ba3xc= -github.com/consensys/gurvy v0.3.8/go.mod h1:sN75xnsiD593XnhbhvG2PkOy194pZBzqShWF/kwuW/g= -github.com/containerd/cgroups v0.0.0-20190919134610-bf292b21730f/go.mod h1:OApqhQ4XNSNC13gXIwDjhOQxjWa/NxkwZXJ1EvqT0ko= +github.com/cncf/xds/go v0.0.0-20250501225837-2ac532fd4443 h1:aQ3y1lwWyqYPiWZThqv1aFbZMiM9vblcSArJRf2Irls= +github.com/cncf/xds/go v0.0.0-20250501225837-2ac532fd4443/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8= +github.com/cockroachdb/crlib v0.0.0-20241015224233-894974b3ad94 h1:bvJv505UUfjzbaIPdNS4AEkHreDqQk6yuNpsdRHpwFA= +github.com/cockroachdb/crlib v0.0.0-20241015224233-894974b3ad94/go.mod h1:Gq51ZeKaFCXk6QwuGM0w1dnaOqc/F5zKT2zA9D6Xeac= +github.com/cockroachdb/datadriven v1.0.3-0.20240530155848-7682d40af056 h1:slXychO2uDM6hYRu4c0pD0udNI8uObfeKN6UInWViS8= +github.com/cockroachdb/datadriven v1.0.3-0.20240530155848-7682d40af056/go.mod h1:a9RdTaap04u637JoCzcUoIcDmvwSUtcUFtT/C3kJlTU= +github.com/cockroachdb/errors v1.11.3 h1:5bA+k2Y6r+oz/6Z/RFlNeVCesGARKuC6YymtcDrbC/I= +github.com/cockroachdb/errors v1.11.3/go.mod h1:m4UIW4CDjx+R5cybPsNrRbreomiFqt8o1h1wUVazSd8= +github.com/cockroachdb/fifo v0.0.0-20240606204812-0bbfbd93a7ce h1:giXvy4KSc/6g/esnpM7Geqxka4WSqI1SZc7sMJFd3y4= +github.com/cockroachdb/fifo v0.0.0-20240606204812-0bbfbd93a7ce/go.mod h1:9/y3cnZ5GKakj/H4y9r9GTjCvAFta7KLgSHPJJYc52M= +github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b h1:r6VH0faHjZeQy818SGhaone5OnYfxFR/+AzdY3sf5aE= +github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b/go.mod h1:Vz9DsVWQQhf3vs21MhPMZpMGSht7O/2vFW2xusFUVOs= +github.com/cockroachdb/metamorphic v0.0.0-20231108215700-4ba948b56895 h1:XANOgPYtvELQ/h4IrmPAohXqe2pWA8Bwhejr3VQoZsA= +github.com/cockroachdb/metamorphic v0.0.0-20231108215700-4ba948b56895/go.mod h1:aPd7gM9ov9M8v32Yy5NJrDyOcD8z642dqs+F0CeNXfA= +github.com/cockroachdb/pebble v1.1.5 h1:5AAWCBWbat0uE0blr8qzufZP5tBjkRyy/jWe1QWLnvw= +github.com/cockroachdb/pebble v1.1.5/go.mod h1:17wO9el1YEigxkP/YtV8NtCivQDgoCyBg5c4VR/eOWo= +github.com/cockroachdb/pebble/v2 v2.0.6 h1:eL54kX2AKp1ePJ/8vq4IO3xIEPpvVjlSP12dlLYilyE= +github.com/cockroachdb/pebble/v2 v2.0.6/go.mod h1:un1DXG73PKw3F7Ndd30YactyvsFviI9Fuhe0tENdnyA= +github.com/cockroachdb/redact v1.1.5 h1:u1PMllDkdFfPWaNGMyLD1+so+aq3uUItthCFqzwPJ30= +github.com/cockroachdb/redact v1.1.5/go.mod h1:BVNblN9mBWFyMyqK1k3AAiSxhvhfK2oOZZ2lK+dpvRg= +github.com/cockroachdb/swiss v0.0.0-20250624142022-d6e517c1d961 h1:Nua446ru3juLHLZd4AwKNzClZgL1co3pUPGv3o8FlcA= +github.com/cockroachdb/swiss v0.0.0-20250624142022-d6e517c1d961/go.mod h1:yBRu/cnL4ks9bgy4vAASdjIW+/xMlFwuHKqtmh3GZQg= +github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06 h1:zuQyyAKVxetITBuuhv3BI9cMrmStnpT18zmgmTxunpo= +github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06/go.mod h1:7nc4anLGjupUW/PeY5qiNYsdNXj7zopG+eqsS7To5IQ= +github.com/consensys/gnark-crypto v0.18.0 h1:vIye/FqI50VeAr0B3dx+YjeIvmc3LWz4yEfbWBpTUf0= +github.com/consensys/gnark-crypto v0.18.0/go.mod h1:L3mXGFTe1ZN+RSJ+CLjUt9x7PNdx8ubaYfDROyp2Z8c= github.com/containerd/cgroups v0.0.0-20201119153540-4cbc285b3327/go.mod h1:ZJeTFisyysqgcCdecO57Dj79RfL0LNeGiFUqLYQRYLE= -github.com/containerd/cgroups v1.0.4 h1:jN/mbWBEaz+T1pi5OFtnkQ+8qnmEbAr1Oo1FRm5B0dA= -github.com/containerd/cgroups v1.0.4/go.mod h1:nLNQtsF7Sl2HxNebu77i1R0oDlhiTG+kO4JTrUzo6IA= -github.com/containerd/console v0.0.0-20180822173158-c12b1e7919c1/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw= -github.com/containerd/containerd v1.3.0-beta.2.0.20190828155532-0293cbd26c69/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= -github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= -github.com/containerd/continuity v0.0.0-20200107194136-26c1120b8d41 h1:kIFnQBO7rQ0XkMe6xEwbybYHBEaWmh/f++laI6Emt7M= -github.com/containerd/continuity v0.0.0-20200107194136-26c1120b8d41/go.mod h1:Dq467ZllaHgAtVp4p1xUQWBrFXR9s/wyoTpG8zOJGkY= -github.com/containerd/fifo v0.0.0-20190226154929-a9fb20d87448/go.mod h1:ODA38xgv3Kuk8dQz2ZQXpnv/UZZUHUCL7pnLehbXgQI= -github.com/containerd/fifo v0.0.0-20191213151349-ff969a566b00 h1:lsjC5ENBl+Zgf38+B0ymougXFp0BaubeIVETltYZTQw= -github.com/containerd/fifo v0.0.0-20191213151349-ff969a566b00/go.mod h1:jPQ2IAeZRCYxpS/Cm1495vGFww6ecHmMk1YJH2Q5ln0= -github.com/containerd/go-runc v0.0.0-20180907222934-5a6d9f37cfa3/go.mod h1:IV7qH3hrUgRmyYrtgEeGWJfWbgcHL9CSRruz2Vqcph0= -github.com/containerd/ttrpc v0.0.0-20190828154514-0e0f228740de/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o= -github.com/containerd/typeurl v0.0.0-20180627222232-a93fcdb778cd/go.mod h1:Cm3kwCdlkCfMSHURc+r6fwoGH6/F1hH3S4sg0rLFWPc= +github.com/containerd/cgroups v1.1.0 h1:v8rEWFl6EoqHB+swVNjVoCJE8o3jX7e8nqBGPLaDFBM= +github.com/containerd/cgroups v1.1.0/go.mod h1:6ppBcbh/NOOUU+dMKrykgaBnK9lCIBxHqJDGwsa1mIw= +github.com/containerd/fifo v1.1.0 h1:4I2mbh5stb1u6ycIABlBw9zgtlK8viPI9QkQNRQEEmY= +github.com/containerd/fifo v1.1.0/go.mod h1:bmC4NWMbXlt2EZ0Hc7Fx7QzTFxgPID13eH0Qu+MAb2o= github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= -github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/coreos/go-semver v0.3.0 h1:wkHLiw0WNATZnSG7epLsujiMCgPAc9xhjJ4tgnAxmfM= github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= -github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd v0.0.0-20181012123002-c6f51f82210d/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd/v22 v22.1.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk= github.com/coreos/go-systemd/v22 v22.3.3-0.20220203105225-a9a7ef127534/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs= github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= -github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= -github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= +github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= +github.com/crate-crypto/go-eth-kzg v1.3.0 h1:05GrhASN9kDAidaFJOda6A4BEvgvuXbazXg/0E3OOdI= +github.com/crate-crypto/go-eth-kzg v1.3.0/go.mod h1:J9/u5sWfznSObptgfa92Jq8rTswn6ahQWEuiLHOjCUI= +github.com/crate-crypto/go-ipa v0.0.0-20240724233137-53bbb0ceb27a h1:W8mUrRp6NOVl3J+MYp5kPMoUZPp7aOYHtaua31lwRHg= +github.com/crate-crypto/go-ipa v0.0.0-20240724233137-53bbb0ceb27a/go.mod h1:sTwzHBvIzm2RfVCGNEBZgRyjwK40bVoun3ZnGOCafNM= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY= +github.com/creack/pty v1.1.18/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4= github.com/cskr/pubsub v1.0.2 h1:vlOzMhl6PFn60gRlTQQsIfVwaPB/B/8MziK8FhEPt/0= github.com/cskr/pubsub v1.0.2/go.mod h1:/8MzYXk/NJAz782G8RPkFzXTZVu63VotefPnR9TIRis= -github.com/dapperlabs/testingdock v0.4.4 h1:nDpnEjhs2gNv7rcb70PTfHlL3yr4eQycqp0+oFuhyNg= -github.com/dapperlabs/testingdock v0.4.4/go.mod h1:HeTbuHG1J4yt4n7NlZSyuk5c5fmyz6hECbyV+36Ku7Q= -github.com/dave/jennifer v1.2.0/go.mod h1:fIb+770HOpJ2fmN9EPPKOqm1vMGhB+TwXKMZhrIygKg= -github.com/davecgh/go-spew v0.0.0-20171005155431-ecdeabc65495/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/cyphar/filepath-securejoin v0.2.4 h1:Ugdm7cg7i6ZK6x3xDF1oEu1nfkyfH53EtKeQYTC3kyg= +github.com/cyphar/filepath-securejoin v0.2.4/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4= +github.com/danieljoos/wincred v1.2.0/go.mod h1:FzQLLMKBFdvu+osBrnFODiv32YGwCfx0SkRa/eYHgec= +github.com/dapperlabs/testingdock v0.4.5-0.20231020233342-a2853fe18724 h1:zOOpPLu5VvH8ixyoDWHnQHWoEHtryT1ne31vwz0G7Fo= +github.com/dapperlabs/testingdock v0.4.5-0.20231020233342-a2853fe18724/go.mod h1:U0cEcbf9hAwPSuuoPVqXKhcWV+IU4CStK75cJ52f2/A= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davidlazar/go-crypto v0.0.0-20170701192655-dcfb0a7ac018/go.mod h1:rQYf4tfk5sSwFsnDg3qYaBxSjsD9S8+59vW0dKUgme4= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c h1:pFUpOrbxDR6AkioZ1ySsx5yxlDQZ8stG2b88gTPxgJU= github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c/go.mod h1:6UhI8N9EjYm1c2odKpFpAYeR8dsBeM7PtzQhRgxRr9U= -github.com/deckarep/golang-set v0.0.0-20180603214616-504e848d77ea/go.mod h1:93vsz/8Wt4joVM7c2AVqh+YRMiUSc14yDtF28KmMOgQ= -github.com/decred/dcrd/crypto/blake256 v1.0.0 h1:/8DMNYp9SGi5f0w7uCm6d6M4OU2rGFK09Y2A4Xv7EE0= -github.com/decred/dcrd/dcrec/secp256k1/v4 v4.1.0 h1:HbphB4TFFXpv7MNrT52FGrrgVXF1owhMVTHFZIlnvd4= -github.com/decred/dcrd/dcrec/secp256k1/v4 v4.1.0/go.mod h1:DZGJHZMqrU4JJqFAWUS2UO1+lbSKsdiOoYi9Zzey7Fc= -github.com/decred/dcrd/lru v1.0.0/go.mod h1:mxKOwFd7lFjN2GZYsiz/ecgqR6kkYAl+0pz0tEMk218= -github.com/dgraph-io/badger v1.5.5-0.20190226225317-8115aed38f8f/go.mod h1:VZxzAIRPHRVNRKRo6AXrX9BJegn6il06VMTZVJYCIjQ= -github.com/dgraph-io/badger v1.6.0-rc1/go.mod h1:zwt7syl517jmP8s94KqSxTlM6IMsdhYy6psNgSztDR4= -github.com/dgraph-io/badger v1.6.0/go.mod h1:zwt7syl517jmP8s94KqSxTlM6IMsdhYy6psNgSztDR4= -github.com/dgraph-io/badger v1.6.1/go.mod h1:FRmFw3uxvcpa8zG3Rxs0th+hCLIuaQg8HlNV5bjgnuU= -github.com/dgraph-io/badger/v2 v2.2007.3/go.mod h1:26P/7fbL4kUZVEVKLAKXkBXKOydDmM2p1e+NhhnBCAE= +github.com/deckarep/golang-set/v2 v2.6.0 h1:XfcQbWM1LlMB8BsJ8N9vW5ehnnPVIw0je80NsVHagjM= +github.com/deckarep/golang-set/v2 v2.6.0/go.mod h1:VAky9rY/yGXJOLEDv3OMci+7wtDpOF4IN+y82NBOac4= +github.com/decred/dcrd/crypto/blake256 v1.0.1 h1:7PltbUIQB7u/FfZ39+DGa/ShuMyJ5ilcvdfma9wOH6Y= +github.com/decred/dcrd/crypto/blake256 v1.0.1/go.mod h1:2OfgNZ5wDpcsFmHmCK5gZTPcCXqlm2ArzUIkw9czNJo= +github.com/decred/dcrd/dcrec/secp256k1/v4 v4.3.0 h1:rpfIENRNNilwHwZeG5+P150SMrnNEcHYvcCuK6dPZSg= +github.com/decred/dcrd/dcrec/secp256k1/v4 v4.3.0/go.mod h1:v57UDF4pDQJcEfFUCRop3lJL149eHGSe9Jvczhzjo/0= github.com/dgraph-io/badger/v2 v2.2007.4 h1:TRWBQg8UrlUhaFdco01nO2uXwzKS7zd+HVdwV/GHc4o= github.com/dgraph-io/badger/v2 v2.2007.4/go.mod h1:vSw/ax2qojzbN6eXHIx6KPKtCSHJN/Uz0X0VPruTIhk= -github.com/dgraph-io/ristretto v0.0.2/go.mod h1:KPxhHT9ZxKefz+PCeOGsrHpl1qZ7i70dGTu2u+Ahh6E= github.com/dgraph-io/ristretto v0.0.3-0.20200630154024-f66de99634de/go.mod h1:KPxhHT9ZxKefz+PCeOGsrHpl1qZ7i70dGTu2u+Ahh6E= -github.com/dgraph-io/ristretto v0.0.3 h1:jh22xisGBjrEVnRZ1DVTpBVQm0Xndu8sMl0CWDzSIBI= -github.com/dgraph-io/ristretto v0.0.3/go.mod h1:KPxhHT9ZxKefz+PCeOGsrHpl1qZ7i70dGTu2u+Ahh6E= +github.com/dgraph-io/ristretto v0.1.0 h1:Jv3CGQHp9OjuMBSne1485aDpUkTKEcUqF+jm/LuerPI= +github.com/dgraph-io/ristretto v0.1.0/go.mod h1:fux0lOrBhrVCJd3lcTHsIJhq1T2rokOu6v9Vcb3Q9ug= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= -github.com/dgryski/go-bitstream v0.0.0-20180413035011-3522498ce2c8/go.mod h1:VMaSuZ+SZcx/wljOQKvp5srsbCiKDEb6K2wC4+PiBmQ= -github.com/dgryski/go-farm v0.0.0-20190104051053-3adb47b1fb0f/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13 h1:fAjc9m62+UWV/WAFKLNi6ZS0675eEUC9y3AlwSbQu1Y= github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= -github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78= -github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc= github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= -github.com/dlclark/regexp2 v1.2.0/go.mod h1:2pZnwuY/m+8K6iRw6wQdMtk+rH5tNGR1i55kozfMjCc= -github.com/docker/cli v0.0.0-20191105005515-99c5edceb48d h1:SknEFm9d070Wn2GeX8dyl7bMrX07cp3UMXuZ2Ct02Kw= -github.com/docker/cli v0.0.0-20191105005515-99c5edceb48d/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= -github.com/docker/distribution v2.6.0-rc.1.0.20171207180435-f4118485915a+incompatible h1:2YJcZ66ScSWjLY7lifaPjEav51u0EThWBHpfveH6p0g= -github.com/docker/distribution v2.6.0-rc.1.0.20171207180435-f4118485915a+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= -github.com/docker/docker v1.4.2-0.20180625184442-8e610b2b55bf/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= -github.com/docker/docker v1.4.2-0.20190513124817-8c8457b0f2f8 h1:vyqIlE9fpJ+cdE95qkW9ihHas6QT87AFLE72W5bGUEY= -github.com/docker/docker v1.4.2-0.20190513124817-8c8457b0f2f8/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= -github.com/docker/docker-credential-helpers v0.6.3 h1:zI2p9+1NQYdnG6sMU26EX4aVGlqbInSQxQXLvzJ4RPQ= -github.com/docker/docker-credential-helpers v0.6.3/go.mod h1:WRaJzqw3CTB9bk10avuGsjVBZsD05qeibJ1/TYlvc0Y= +github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK2OFGvA0= +github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= +github.com/docker/cli v24.0.6+incompatible h1:fF+XCQCgJjjQNIMjzaSmiKJSCcfcXb3TWTcc7GAneOY= +github.com/docker/cli v24.0.6+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= +github.com/docker/distribution v2.8.3+incompatible h1:AtKxIZ36LoNK51+Z6RpzLpddBirtxJnzDrHLEKxTAYk= +github.com/docker/distribution v2.8.3+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= +github.com/docker/docker v24.0.6+incompatible h1:hceabKCtUgDqPu+qm0NgsaXf28Ljf4/pWFL7xjWWDgE= +github.com/docker/docker v24.0.6+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker-credential-helpers v0.8.0 h1:YQFtbBQb4VrpoPxhFuzEBPQ9E16qz5SpHLS+uswaCp8= +github.com/docker/docker-credential-helpers v0.8.0/go.mod h1:UGFXcuoQ5TxPiB54nHOZ32AWRqQdECoh/Mg0AlEYb40= github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= github.com/docker/go-metrics v0.0.1 h1:AgB/0SvBxihN0X8OR4SjsblXkbMvalQ8cjmtKQ2rQV8= @@ -347,168 +261,152 @@ github.com/docker/go-metrics v0.0.1/go.mod h1:cG1hvH2utMXtqgqqYE9plW6lDxS3/5ayHz github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= -github.com/dop251/goja v0.0.0-20200721192441-a695b0cdd498/go.mod h1:Mw6PkjjMXWbTj+nnj4s3QPXq1jaT0s5pC0iFD4+BOAA= -github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= -github.com/dvyukov/go-fuzz v0.0.0-20200318091601-be3528f3a813/go.mod h1:11Gm+ccJnvAhCNLlf5+cS9KjtbaD5I5zaZpFMsTHWTw= -github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= -github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU= -github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I= -github.com/eclipse/paho.mqtt.golang v1.2.0/go.mod h1:H9keYFcgq3Qr5OUJm/JZI/i6U7joQ8SYLhZwfeOo6Ts= -github.com/edsrzf/mmap-go v0.0.0-20160512033002-935e0e8a636c/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M= -github.com/edsrzf/mmap-go v1.0.0/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M= github.com/ef-ds/deque v1.0.4 h1:iFAZNmveMT9WERAkqLJ+oaABF9AcVQ5AjXem/hroniI= github.com/ef-ds/deque v1.0.4/go.mod h1:gXDnTC3yqvBcHbq2lcExjtAcVrOnJCbMcZXmuj8Z4tg= github.com/elastic/gosigar v0.12.0/go.mod h1:iXRIGg2tLnu7LBdpqzyQfGDEidKCfWcCMS0WKyPWoMs= -github.com/elastic/gosigar v0.14.2 h1:Dg80n8cr90OZ7x+bAax/QjoW/XqTI11RmA79ZwIm9/4= -github.com/elastic/gosigar v0.14.2/go.mod h1:iXRIGg2tLnu7LBdpqzyQfGDEidKCfWcCMS0WKyPWoMs= +github.com/elastic/gosigar v0.14.3 h1:xwkKwPia+hSfg9GqrCUKYdId102m9qTJIIr7egmK/uo= +github.com/elastic/gosigar v0.14.3/go.mod h1:iXRIGg2tLnu7LBdpqzyQfGDEidKCfWcCMS0WKyPWoMs= +github.com/elazarl/goproxy v0.0.0-20230808193330-2592e75ae04a h1:mATvB/9r/3gvcejNsXKSkQ6lcIaNec2nyfOdlTBR2lU= +github.com/elazarl/goproxy v0.0.0-20230808193330-2592e75ae04a/go.mod h1:Ro8st/ElPeALwNFlcTpWmkr6IoMFfkjXAvTHpevnDsM= +github.com/emicklei/dot v1.6.2 h1:08GN+DD79cy/tzN6uLCT84+2Wk9u+wvqP+Hkx/dIR8A= +github.com/emicklei/dot v1.6.2/go.mod h1:DeV7GvQtIw4h2u73RKBkkFdvVAz0D9fzeJrgPW6gy/s= github.com/emirpasic/gods v1.18.1 h1:FXtiHYKDGKCW2KzwZKx0iC0PQmdlorYgdFG9jPXJ1Bc= github.com/emirpasic/gods v1.18.1/go.mod h1:8tpGGwCnJ5H4r6BWwaV6OrWmMoPhUl5jm/FMNAnJvWQ= -github.com/envoyproxy/go-control-plane v0.6.9/go.mod h1:SBwIajubJHhxtWwsL9s8ss4safvEdbitLhGGK48rN6g= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= -github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po= -github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= -github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= -github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= +github.com/envoyproxy/go-control-plane v0.13.4 h1:zEqyPVyku6IvWCFwux4x9RxkLOMUL+1vC9xUFv5l2/M= +github.com/envoyproxy/go-control-plane v0.13.4/go.mod h1:kDfuBlDVsSj2MjrLEtRWtHlsWIFcGyB2RMO44Dc5GZA= +github.com/envoyproxy/go-control-plane/envoy v1.32.4 h1:jb83lalDRZSpPWW2Z7Mck/8kXZ5CQAFYVjQcdVIr83A= +github.com/envoyproxy/go-control-plane/envoy v1.32.4/go.mod h1:Gzjc5k8JcJswLjAx1Zm+wSYE20UrLtt7JZMWiWQXQEw= +github.com/envoyproxy/go-control-plane/ratelimit v0.1.0 h1:/G9QYbddjL25KvtKTv3an9lx6VBE2cnb8wp1vEGNYGI= +github.com/envoyproxy/go-control-plane/ratelimit v0.1.0/go.mod h1:Wk+tMFAFbCXaJPzVVHnPgRKdUdwW/KdbRt94AzgRee4= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/ethereum/go-ethereum v1.9.25/go.mod h1:vMkFiYLHI4tgPw4k2j4MHKoovchFE8plZ0M9VMk4/oM= -github.com/ethereum/go-ethereum v1.10.1 h1:bGQezu+kqqRBczcSAruEoqVzTjtkeDnUGI2I4uroyUE= -github.com/ethereum/go-ethereum v1.10.1/go.mod h1:E5e/zvdfUVr91JZ0AwjyuJM3x+no51zZJRz61orLLSk= -github.com/fatih/color v1.3.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= -github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= -github.com/fjl/memsize v0.0.0-20180418122429-ca190fb6ffbc/go.mod h1:VvhXpOYNQvB+uIk2RvXzuaQtkQJzzIx6lSBe1xv7hi0= -github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5/go.mod h1:VvhXpOYNQvB+uIk2RvXzuaQtkQJzzIx6lSBe1xv7hi0= +github.com/envoyproxy/protoc-gen-validate v1.2.1 h1:DEo3O99U8j4hBFwbJfrz9VtgcDfUKS7KJ7spH3d86P8= +github.com/envoyproxy/protoc-gen-validate v1.2.1/go.mod h1:d/C80l/jxXLdfEIhX1W2TmLfsJ31lvEjwamM4DxlWXU= +github.com/ethereum/c-kzg-4844/v2 v2.1.0 h1:gQropX9YFBhl3g4HYhwE70zq3IHFRgbbNPw0Shwzf5w= +github.com/ethereum/c-kzg-4844/v2 v2.1.0/go.mod h1:TC48kOKjJKPbN7C++qIgt0TJzZ70QznYR7Ob+WXl57E= +github.com/ethereum/go-ethereum v1.16.3 h1:nDoBSrmsrPbrDIVLTkDQCy1U9KdHN+F2PzvMbDoS42Q= +github.com/ethereum/go-ethereum v1.16.3/go.mod h1:Lrsc6bt9Gm9RyvhfFK53vboCia8kpF9nv+2Ukntnl+8= +github.com/ethereum/go-verkle v0.2.2 h1:I2W0WjnrFUIzzVPwm8ykY+7pL2d4VhlsePn4j7cnFk8= +github.com/ethereum/go-verkle v0.2.2/go.mod h1:M3b90YRnzqKyyzBEWJGqj8Qff4IDeXnzFw0P9bFw3uk= +github.com/fanliao/go-promise v0.0.0-20141029170127-1890db352a72/go.mod h1:PjfxuH4FZdUyfMdtBio2lsRr1AKEaVPwelzuHuh8Lqc= +github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= +github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= +github.com/ferranbt/fastssz v0.1.4 h1:OCDB+dYDEQDvAgtAGnTSidK1Pe2tW3nFV40XyMkTeDY= +github.com/ferranbt/fastssz v0.1.4/go.mod h1:Ea3+oeoRGGLGm5shYAeDgu6PGUlcvQhE2fILyD9+tGg= +github.com/filecoin-project/go-clock v0.1.0 h1:SFbYIM75M8NnFm1yMHhN9Ahy3W5bEZV9gd6MPfXbKVU= +github.com/filecoin-project/go-clock v0.1.0/go.mod h1:4uB/O4PvOjlx1VCMdZ9MyDZXRm//gkj1ELEbxfI1AZs= github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc= -github.com/flynn/noise v0.0.0-20180327030543-2492fe189ae6/go.mod h1:1i71OnUq3iUe1ma7Lr6yG6/rjvM3emb6yoL7xLFzcVQ= -github.com/flynn/noise v1.0.0 h1:DlTHqmzmvcEiKj+4RYo/imoswx/4r6iBlCMfVtrMXpQ= -github.com/flynn/noise v1.0.0/go.mod h1:xbMo+0i6+IGbYdJhF31t2eR1BIU0CYc12+BNAKwUTag= -github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= +github.com/flynn/noise v1.1.0 h1:KjPQoQCEFdZDiP03phOvGi11+SVVhBG2wOWAorLsstg= +github.com/flynn/noise v1.1.0/go.mod h1:xbMo+0i6+IGbYdJhF31t2eR1BIU0CYc12+BNAKwUTag= github.com/francoispqt/gojay v1.2.13 h1:d2m3sFjloqoIUQU3TsHBgj6qg/BVGlTBeHDUmyJnXKk= github.com/francoispqt/gojay v1.2.13/go.mod h1:ehT5mTG4ua4581f1++1WLG0vPdaA9HaiDsoyrBGkyDY= -github.com/franela/goblin v0.0.0-20200105215937-c9ffbefa60db/go.mod h1:7dvUGVsVBjqR7JHJk0brhHOZYGmfBYOrK0ZhYMEtBr4= -github.com/franela/goreq v0.0.0-20171204163338-bcd34c9993f8/go.mod h1:ZhphrRTfi2rbfLwlschooIH4+wKKDR4Pdxhh+TRoA20= -github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k= -github.com/frankban/quicktest v1.14.0/go.mod h1:NeW+ay9A/U67EYXNFA1nPE8e/tnQv/09mUdL/ijj8og= -github.com/frankban/quicktest v1.14.3 h1:FJKSZTDHjyhriyC81FLQ0LY93eSai0ZyR/ZIkd3ZUKE= +github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8= +github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= -github.com/fsnotify/fsnotify v1.5.4 h1:jRbGcIw6P2Meqdwuo0H1p6JVLbL5DHKAKlYndzMwVZI= -github.com/fsnotify/fsnotify v1.5.4/go.mod h1:OVB6XrOHzAwXMpEM7uPOzcehqUV2UqJxmVXmkdnm1bU= -github.com/fxamacker/cbor/v2 v2.4.1-0.20220515183430-ad2eae63303f h1:dxTR4AaxCwuQv9LAVTAC2r1szlS+epeuPT5ClLKT6ZY= -github.com/fxamacker/cbor/v2 v2.4.1-0.20220515183430-ad2eae63303f/go.mod h1:TA1xS00nchWmaBnEIxPSE5oHLuJBAVvqrtAnWBwBCVo= +github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY= +github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw= +github.com/fxamacker/cbor/v2 v2.8.1-0.20250402194037-6f932b086829 h1:qOglMkJ5YBwog/GU/NXhP9gFqxUGMuqnmCkbj65JMhk= +github.com/fxamacker/cbor/v2 v2.8.1-0.20250402194037-6f932b086829/go.mod h1:vM4b+DJCtHn+zz7h3FFp/hDAI9WNWCsZj23V5ytsSxQ= github.com/fxamacker/circlehash v0.3.0 h1:XKdvTtIJV9t7DDUtsf0RIpC1OcxZtPbmgIH7ekx28WA= github.com/fxamacker/circlehash v0.3.0/go.mod h1:3aq3OfVvsWtkWMb6A1owjOQFA+TLsD5FgJflnaQwtMM= -github.com/gammazero/deque v0.1.0 h1:f9LnNmq66VDeuAlSAapemq/U7hJ2jpIWa4c09q8Dlik= -github.com/gammazero/deque v0.1.0/go.mod h1:KQw7vFau1hHuM8xmI9RbgKFbAsQFWmBpqQ2KenFLk6M= -github.com/gammazero/workerpool v1.1.2 h1:vuioDQbgrz4HoaCi2q1HLlOXdpbap5AET7xu5/qj87g= -github.com/gammazero/workerpool v1.1.2/go.mod h1:UelbXcO0zCIGFcufcirHhq2/xtLXJdQ29qZNlXG9OjQ= -github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff/go.mod h1:x7DCsMOv1taUwEWCzT4cmDeAkigA5/QCwUodaVOe8Ww= -github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk= +github.com/fxamacker/golang-lru/v2 v2.0.0-20250430153159-6f72f038a30f h1:/gqGg2NQVvwiLXs7ppw2uneC5AAd2Z9OTp0zgu42zNI= +github.com/fxamacker/golang-lru/v2 v2.0.0-20250430153159-6f72f038a30f/go.mod h1:qnbgnNzfydwuHjSCApF4bdul+tZ8T3y1MkZG/OFczLA= +github.com/fxamacker/golang-lru/v2 v2.0.0-20250716153046-22c8d17dc4ee h1:9RFHOj6xUdQRi1lz/BJXwi0IloXtv6Y2tp7rdSC7SQk= +github.com/fxamacker/golang-lru/v2 v2.0.0-20250716153046-22c8d17dc4ee/go.mod h1:1FYBKLDzpfjjoWMTK1cIOxsTomg/n35DWNLu6FoYEb8= +github.com/gabriel-vasile/mimetype v1.4.6 h1:3+PzJTKLkvgjeTbts6msPJt4DixhT4YtFNf1gtGe3zc= +github.com/gabriel-vasile/mimetype v1.4.6/go.mod h1:JX1qVKqZd40hUPpAfiNTe0Sne7hdfKSbOqqmkq8GCXc= +github.com/gammazero/deque v1.0.0 h1:LTmimT8H7bXkkCy6gZX7zNLtkbz4NdS2z8LZuor3j34= +github.com/gammazero/deque v1.0.0/go.mod h1:iflpYvtGfM3U8S8j+sZEKIak3SAKYpA5/SQewgfXDKo= +github.com/gammazero/workerpool v1.1.3 h1:WixN4xzukFoN0XSeXF6puqEqFTl2mECI9S6W44HWy9Q= +github.com/gammazero/workerpool v1.1.3/go.mod h1:wPjyBLDbyKnUn2XwwyD3EEwo9dHutia9/fwNmSHWACc= +github.com/getsentry/sentry-go v0.27.0 h1:Pv98CIbtB3LkMWmXi4Joa5OOcwbmnX88sF5qbK3r3Ps= +github.com/getsentry/sentry-go v0.27.0/go.mod h1:lc76E2QywIyW8WuBnwl8Lc4bkmQH4+w1gwTf25trprY= +github.com/ghemawat/stream v0.0.0-20171120220530-696b145b53b9 h1:r5GgOLGbza2wVHRzK7aAj6lWZjfbAwiu/RDCVOKjRyM= +github.com/ghemawat/stream v0.0.0-20171120220530-696b145b53b9/go.mod h1:106OIgooyS7OzLDOpUGgm9fA3bQENb/cFSyyBmMoJDs= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= -github.com/glebarez/go-sqlite v1.21.1 h1:7MZyUPh2XTrHS7xNEHQbrhfMZuPSzhkm2A1qgg0y5NY= -github.com/glebarez/go-sqlite v1.21.1/go.mod h1:ISs8MF6yk5cL4n/43rSOmVMGJJjHYr7L2MbZZ5Q4E2E= github.com/gliderlabs/ssh v0.1.1/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0= github.com/gliderlabs/ssh v0.3.5 h1:OcaySEmAQJgyYcArR+gGGTHCyE7nvhEMTlYY+Dp8CpY= github.com/gliderlabs/ssh v0.3.5/go.mod h1:8XB4KraRrX39qHhT6yxPsHedjA08I/uBVwj4xC+/+z4= -github.com/glycerine/go-unsnap-stream v0.0.0-20180323001048-9f0cb55181dd/go.mod h1:/20jfyN9Y5QPEAprSgKAUr+glWDY39ZiUEAYOEv5dsE= -github.com/glycerine/goconvey v0.0.0-20190410193231-58a59202ab31/go.mod h1:Ogl1Tioa0aV7gstGFO7KhffUsb9M4ydbEbbxpcEDc24= -github.com/go-check/check v0.0.0-20180628173108-788fd7840127/go.mod h1:9ES+weclKsC9YodN5RgxqK/VD9HM9JsCSh7rNhMZE98= github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q= -github.com/go-git/gcfg v1.5.0 h1:Q5ViNfGF8zFgyJWPqYwA7qGFoMTEiBmdlkcfRmpIMa4= -github.com/go-git/gcfg v1.5.0/go.mod h1:5m20vg6GwYabIxaOonVkTdrILxQMpEShl1xiMF4ua+E= -github.com/go-git/go-billy/v5 v5.3.1/go.mod h1:pmpqyWchKfYfrkb/UVH4otLvyi/5gJlGI4Hb3ZqZ3W0= -github.com/go-git/go-billy/v5 v5.4.0 h1:Vaw7LaSTRJOUric7pe4vnzBSgyuf2KrLsu2Y4ZpQBDE= -github.com/go-git/go-billy/v5 v5.4.0/go.mod h1:vjbugF6Fz7JIflbVpl1hJsGjSHNltrSw45YK/ukIvQg= -github.com/go-git/go-git-fixtures/v4 v4.3.1 h1:y5z6dd3qi8Hl+stezc8p3JxDkoTRqMAlKnXHuzrfjTQ= -github.com/go-git/go-git-fixtures/v4 v4.3.1/go.mod h1:8LHG1a3SRW71ettAD/jW13h8c6AqjVSeL11RAdgaqpo= -github.com/go-git/go-git/v5 v5.5.2 h1:v8lgZa5k9ylUw+OR/roJHTxR4QItsNFI5nKtAXFuynw= -github.com/go-git/go-git/v5 v5.5.2/go.mod h1:BE5hUJ5yaV2YMxhmaP4l6RBQ08kMxKSPD4BlxtH7OjI= -github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= -github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= -github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-errors/errors v1.4.2 h1:J6MZopCL4uSllY1OfXM374weqZFFItUbrImctkmUxIA= +github.com/go-errors/errors v1.4.2/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og= +github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 h1:+zs/tPmkDkHx3U66DAb0lQFJrpS6731Oaa12ikc+DiI= +github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376/go.mod h1:an3vInlBmSxCcxctByoQdvwPiA7DTK7jaaFDBTtu0ic= +github.com/go-git/go-billy/v5 v5.5.0 h1:yEY4yhzCDuMGSv83oGxiBotRzhwhNr8VZyphhiu+mTU= +github.com/go-git/go-billy/v5 v5.5.0/go.mod h1:hmexnoNsr2SJU1Ju67OaNz5ASJY3+sHgFRpCtpDCKow= +github.com/go-git/go-git-fixtures/v4 v4.3.2-0.20231010084843-55a94097c399 h1:eMje31YglSBqCdIqdhKBW8lokaMrL3uTkpGYlE2OOT4= +github.com/go-git/go-git-fixtures/v4 v4.3.2-0.20231010084843-55a94097c399/go.mod h1:1OCfN199q1Jm3HZlxleg+Dw/mwps2Wbk9frAWm+4FII= +github.com/go-git/go-git/v5 v5.11.0 h1:XIZc1p+8YzypNr34itUfSvYJcv+eYdTnTvOZ2vD3cA4= +github.com/go-git/go-git/v5 v5.11.0/go.mod h1:6GFcX2P3NM7FPBfpePbpLd21XxsgdAt+lKqXmCUiUCY= +github.com/go-jose/go-jose/v4 v4.1.1 h1:JYhSgy4mXXzAdF3nUx3ygx347LRXJRrpgyU3adRmkAI= +github.com/go-jose/go-jose/v4 v4.1.1/go.mod h1:BdsZGqgdO3b6tTc6LSE56wcDbMMLuPsw5d4ZD5f94kA= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-kit/kit v0.10.0/go.mod h1:xUsJbQ/Fp4kEt7AFgCuvyX4a71u8h9jB8tj/ORgOZ7o= github.com/go-kit/kit v0.12.0 h1:e4o3o3IsBfAKQh5Qbbiqyfu97Ku7jrO/JbohvztANh4= github.com/go-kit/kit v0.12.0/go.mod h1:lHd+EkCZPIwYItmGDDRdhinkzX2A1sj+M9biaEaizzs= github.com/go-kit/log v0.2.1 h1:MRVx0/zhvdseW+Gza6N9rVzU/IVzaeE1SFI4raAhmBU= github.com/go-kit/log v0.2.1/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= -github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= -github.com/go-logfmt/logfmt v0.5.1 h1:otpy5pqBCBZ1ng9RQ0dPu4PN7ba75Y/aA+UpowDyNVA= -github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= +github.com/go-logfmt/logfmt v0.6.0 h1:wGYYu3uicYdqXVgoYbvnkrPVXkuLM1p1ifugDMEdRi4= +github.com/go-logfmt/logfmt v0.6.0/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0= -github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI= +github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= -github.com/go-ole/go-ole v1.2.1/go.mod h1:7FAglXiTm7HKlQRDeOQ6ZNUHidzCWXuZWq/1dTyBNF8= -github.com/go-ole/go-ole v1.2.6 h1:/Fpf6oFPoeFik9ty7siob0G6Ke8QvQEuVcuChpwXzpY= +github.com/go-ole/go-ole v1.2.5/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= -github.com/go-redis/redis/v8 v8.11.5 h1:AcZZR7igkdvfVmQTPnu9WE37LRrO/YrBH5zWyjDC0oI= -github.com/go-redis/redis/v8 v8.11.5/go.mod h1:gREzHqY1hg6oD9ngVRbLStwAWKhA0FEgq8Jd4h5lpwo= -github.com/go-sourcemap/sourcemap v2.1.2+incompatible/go.mod h1:F8jJfvm2KbVjc5NqelyYJmf/v5J0dwNLS2mL4sNA1Jg= -github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= -github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= +github.com/go-ole/go-ole v1.3.0 h1:Dt6ye7+vXGIKZ7Xtk4s6/xVdGDQynvom7xCFEdWr6uE= +github.com/go-ole/go-ole v1.3.0/go.mod h1:5LS6F96DhAwUc7C+1HLexzMXY1xGRSryjyPPKW6zv78= +github.com/go-playground/assert/v2 v2.2.0 h1:JvknZsQTYeFEAhQwI4qEt9cyV5ONwRHC+lYKSsYSR8s= +github.com/go-playground/assert/v2 v2.2.0/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4= +github.com/go-playground/locales v0.14.1 h1:EWaQ/wswjilfKLTECiXz7Rh+3BjFhfDFKv/oXslEjJA= +github.com/go-playground/locales v0.14.1/go.mod h1:hxrqLVvrK65+Rwrd5Fc6F2O76J/NuW9t0sjnWqG1slY= +github.com/go-playground/universal-translator v0.18.1 h1:Bcnm0ZwsGyWbCzImXv+pAJnYK9S473LQFuzCbDbfSFY= +github.com/go-playground/universal-translator v0.18.1/go.mod h1:xekY+UJKNuX9WP91TpwSH2VMlDf28Uj24BCp08ZFTUY= +github.com/go-playground/validator/v10 v10.19.0 h1:ol+5Fu+cSq9JD7SoSqe04GMI92cbn0+wvQ3bZ8b/AU4= +github.com/go-playground/validator/v10 v10.19.0/go.mod h1:dbuPbCMFw/DrkbEynArYaCwl3amGuJotoKCe95atGMM= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= -github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0 h1:p104kn46Q8WdvHunIJ9dAyjPVtrBPhSr3KT2yUst43I= -github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= -github.com/go-test/deep v1.0.8 h1:TDsG77qcSprGbC6vTN8OuXp5g+J+b5Pcguhf7Zt61VM= -github.com/go-test/deep v1.0.8/go.mod h1:5C2ZWiW0ErCdrYzpqxLbTX7MG14M9iiw8DgHncVwcsE= +github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= +github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= github.com/go-yaml/yaml v2.1.0+incompatible h1:RYi2hDdss1u4YE7GwixGzWwVo47T8UQwnTLB6vQiq+o= github.com/go-yaml/yaml v2.1.0+incompatible/go.mod h1:w2MrLa16VYP0jy6N7M5kHaCkaLENm+P+Tv+MfurjSw0= -github.com/goccy/go-json v0.9.11 h1:/pAaQDLHEoCq/5FFmSKBswWmK6H0e8g4159Kc/X/nqk= -github.com/goccy/go-json v0.9.11/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I= -github.com/godbus/dbus v0.0.0-20190422162347-ade71ed3457e/go.mod h1:bBOAhwG1umN6/6ZUMtDFBMQR8jRg9O75tm9K00oMsK4= +github.com/goccy/go-json v0.10.4 h1:JSwxQzIqKfmFX1swYPpUThQZp/Ka4wzJdK0LWVytLPM= +github.com/goccy/go-json v0.10.4/go.mod h1:oq7eo15ShAhp70Anwd5lgX2pLfOS3QCiwU/PULtXL6M= github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/godbus/dbus/v5 v5.1.0 h1:4KLkAxT3aOY8Li4FRJe/KvhoNFFxo0m6fNuFUO8QJUk= github.com/godbus/dbus/v5 v5.1.0/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= -github.com/gofrs/uuid v3.3.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= -github.com/gogo/googleapis v1.1.0/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s= +github.com/gofrs/flock v0.8.1/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU= +github.com/gofrs/flock v0.12.1 h1:MTLVXXHf8ekldpJk3AKicLij9MdwOWkZ+a/jHHZby9E= +github.com/gofrs/flock v0.12.1/go.mod h1:9zxTsyu5xtJ9DK+1tFZyibEV7y3uwDxPPfbxeeHCoD0= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= -github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= -github.com/gogo/protobuf v1.3.0/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= -github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k= -github.com/golang/geo v0.0.0-20190916061304-5b978397cfec/go.mod h1:QZ0nwyI2jOfgRAoBvP+ab5aRr7c9x7lhGEJrKvBwjWI= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/glog v1.0.0 h1:nfP3RFugxnNRyKgeWd4oI1nYvXpxrx8ck8ZrcizshdQ= -github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4= -github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/glog v1.2.5 h1:DrW6hGnjIhtvhOIiAKT6Psh/Kd/ldepEa81DKeiRJ5I= +github.com/golang/glog v1.2.5/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w= github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20191027212112-611e8accdfc9/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:tluoj9z5200jBnyusfRPU2LqT6J+DAorxEvtC7LHB+E= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= -github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= -github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8= github.com/golang/mock v1.6.0 h1:ErTB+efbowRARo13NNdxyJji2egdxLGQhRaY+DUumQc= github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.0/go.mod h1:Qd/q+1AKNOZr9uGQzbzCmRO6sUih6GTPZv6a1/R87v0= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= -github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= @@ -518,688 +416,292 @@ github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QD github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= -github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= -github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/golang/snappy v0.0.3-0.20201103224600-674baa8c7fc3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= +github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golang/snappy v0.0.5-0.20231225225746-43d5d4cd4e0e h1:4bw4WeyTYPp0smaXiJZCNnLrvVBqirQVreixayXezGc= +github.com/golang/snappy v0.0.5-0.20231225225746-43d5d4cd4e0e/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= -github.com/google/flatbuffers v1.11.0/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= -github.com/google/flatbuffers v2.0.8+incompatible h1:ivUb1cGomAB101ZM1T0nOiWz9pSrTMoa9+EiY7igmkM= -github.com/google/flatbuffers v2.0.8+incompatible/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= +github.com/google/flatbuffers v23.5.26+incompatible h1:M9dgRyhJemaM4Sw8+66GHBu8ioaQmyPLg1b8VwK5WJg= +github.com/google/flatbuffers v23.5.26+incompatible/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= -github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= -github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= +github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ= github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/gofuzz v1.1.1-0.20200604201612-c04b05f3adfa/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/gopacket v1.1.17/go.mod h1:UdDNZ1OO62aGYVnPhxT1U6aI7ukYtA/kB8vaU0diBUM= +github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= +github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gopacket v1.1.19 h1:ves8RnFZPGiFnTS0uPQStjwru6uO6h+nlr9j6fL7kF8= github.com/google/gopacket v1.1.19/go.mod h1:iJ8V8n6KS+z2U1A8pUwu8bW5SyEMkXJB8Yo/Vo+TKTo= github.com/google/martian v2.1.0+incompatible h1:/CP5g8u/VJHijgedC/Legn3BAbAaWPgecwXBIDzw5no= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= -github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= -github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= -github.com/google/martian/v3 v3.3.2 h1:IqNFLAmvJOgVlpdEBiQbDc2EwKW77amAycfTuWKdfvw= +github.com/google/martian/v3 v3.3.3 h1:DIhPTQrbPkgs2yJYdXU/eNACCG5DVQjySNRNlflZ9Fc= +github.com/google/martian/v3 v3.3.3/go.mod h1:iEPrYcgCF7jA9OtScMFQyAlZZ4YXTKEtJ1E6RWzmBA0= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= -github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= -github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20201218002935-b9804c9f04c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20221219190121-3cb0bae90811 h1:wORs2YN3R3ona/CXYuTvLM31QlgoNKHvlCNuArCDDCU= -github.com/google/pprof v0.0.0-20221219190121-3cb0bae90811/go.mod h1:dDKJzRmX4S37WGHujM7tX//fmj1uioxKzKxz3lo4HJo= +github.com/google/pprof v0.0.0-20241210010833-40e02aabc2ad h1:a6HEuzUHeKH6hwfN/ZoQgRgVIWFJljSWa/zetS2WTvg= +github.com/google/pprof v0.0.0-20241210010833-40e02aabc2ad/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= -github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/s2a-go v0.1.9 h1:LGD7gtMgezd8a/Xak7mEWL0PjoTQFvpRudN895yqKW0= +github.com/google/s2a-go v0.1.9/go.mod h1:YA0Ei2ZQL3acow2O62kdp9UlnvMmU7kA6Eutn0dXayM= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.1.5/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/googleapis/enterprise-certificate-proxy v0.2.3 h1:yk9/cqRKtT9wXZSsRH9aurXEpJX+U6FLtpYTdC3R06k= -github.com/googleapis/enterprise-certificate-proxy v0.2.3/go.mod h1:AwSRAtLfXpU5Nm3pW+v7rGDHp09LsPtGY9MduiEsR9k= +github.com/google/uuid v1.3.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/enterprise-certificate-proxy v0.3.6 h1:GW/XbdyBFQ8Qe+YAmFU9uHLo7OnF5tL52HFAgMmyrf4= +github.com/googleapis/enterprise-certificate-proxy v0.3.6/go.mod h1:MkHOF77EYAE7qfSuSS9PU6g4Nt4e11cnsDUowfwewLA= github.com/googleapis/gax-go v2.0.0+incompatible/go.mod h1:SFVmujtThgffbyetf+mdk2eWhX2bMyUtNHzFKcPA9HY= github.com/googleapis/gax-go/v2 v2.0.3/go.mod h1:LLvjysVCY1JZeum8Z6l8qUty8fiNwE08qbEPm1M08qg= -github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= -github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= -github.com/googleapis/gax-go/v2 v2.7.1 h1:gF4c0zjUP2H/s/hEGyLA3I0fA2ZWjzYiONAD6cvPr8A= -github.com/googleapis/gax-go/v2 v2.7.1/go.mod h1:4orTrqY6hXxxaUL4LHIPl6lGo8vAE38/qKbhSAKP6QI= -github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g= -github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8= +github.com/googleapis/gax-go/v2 v2.15.0 h1:SyjDc1mGgZU5LncH8gimWo9lW1DtIfPibOG81vgd/bo= +github.com/googleapis/gax-go/v2 v2.15.0/go.mod h1:zVVkkxAQHa1RQpg9z2AUCMnKhi0Qld9rcmyfL1OZhoc= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= -github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= -github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= -github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= -github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= -github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= +github.com/gopherjs/gopherjs v0.0.0-20190430165422-3e4dfb77656c h1:7lF+Vz0LqiRidnzC1Oq86fpX1q/iEv2KJdrCtttYjT4= +github.com/gopherjs/gopherjs v0.0.0-20190430165422-3e4dfb77656c/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= +github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY= +github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ= github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= -github.com/gorilla/websocket v1.4.1-0.20190629185528-ae1634f6a989/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= -github.com/gorilla/websocket v1.4.1/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= -github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= -github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc= -github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= -github.com/graph-gophers/graphql-go v0.0.0-20191115155744-f33e81362277/go.mod h1:9CQHMSxwO4MprSdzoIEobiHpoLtHm77vfxsvsIN5Vuc= -github.com/graph-gophers/graphql-go v0.0.0-20201113091052-beb923fada29/go.mod h1:9CQHMSxwO4MprSdzoIEobiHpoLtHm77vfxsvsIN5Vuc= +github.com/gorilla/websocket v1.5.3 h1:saDtZ6Pbx/0u+bgYQ3q96pZgCzfhKXGPqt7kZ72aNNg= +github.com/gorilla/websocket v1.5.3/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= -github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= -github.com/grpc-ecosystem/go-grpc-middleware/providers/zerolog/v2 v2.0.0-rc.2 h1:uxUHSMwWDJ/9jVPHNumRC8WZOi3hrBL22ObVOoLg4ww= -github.com/grpc-ecosystem/go-grpc-middleware/providers/zerolog/v2 v2.0.0-rc.2/go.mod h1:BL7w7qd2l/j9jgY6WMhYutfOFQc0I8RTVwtjpnAMoTM= -github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.0.0-20200501113911-9a95f0fdbfea h1:1Tk1IbruXbunEnaIZEFb+Hpv9BIZti3OxKwKn5wWyKk= -github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.0.0-20200501113911-9a95f0fdbfea/go.mod h1:GugMBs30ZSAkckqXEAIEGyYdDH6EgqowG8ppA3Zt+AY= +github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.1.0 h1:pRhl55Yx1eC7BZ1N+BBWwnKaMyD8uC+34TLdndZMAKk= +github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.1.0/go.mod h1:XKMd7iuf/RGPSMJ/U4HP0zS2Z9Fh8Ps9a+6X26m/tmI= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= github.com/grpc-ecosystem/grpc-gateway v1.5.0/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw= github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= -github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= -github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0 h1:BZHcxBETFHIdVyhyEfOvn/RdU/QGdLI4y34qQGjGWO0= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0/go.mod h1:hgWBS7lorOAVIJEQMi4ZsPv9hVvWI6+ch50m39Pf2Ks= -github.com/gxed/hashland/keccakpg v0.0.1/go.mod h1:kRzw3HkwxFU1mpmPP8v1WyQzwdGfmKFJ6tItnhQ67kU= -github.com/gxed/hashland/murmur3 v0.0.1/go.mod h1:KjXop02n4/ckmZSnY2+HKcLud/tcmvhST0bie/0lS48= -github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q= -github.com/hashicorp/consul/api v1.3.0/go.mod h1:MmDNSzIMUjNpY/mQ398R4bk2FnqQLoPndWW5VkKPlCE= -github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= -github.com/hashicorp/consul/sdk v0.3.0/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= -github.com/hashicorp/errwrap v0.0.0-20141028054710-7554cd9344ce/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.22.0 h1:asbCHRVmodnJTuQ3qamDwqVOIjwqUPTYmYuemVOx+Ys= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.22.0/go.mod h1:ggCgvZ2r7uOoQjOyu2Y1NhHmEPPzzuhWgcza5M1Ji1I= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= -github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= -github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= -github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= -github.com/hashicorp/go-multierror v0.0.0-20161216184304-ed905158d874/go.mod h1:JMRHfdO9jKNzS/+BTlxCjKNQHg/jZAft8U7LloJvN7I= -github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= -github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU= -github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= -github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= -github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= -github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90= -github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= -github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d h1:dg1dEPuWpEqDnvIw251EVy4zlP8gWbsGj4BsUKCRpYs= -github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= +github.com/hashicorp/golang-lru v1.0.2 h1:dV3g9Z/unq5DpblPpw+Oqcv4dU/1omnb4Ok8iPY6p1c= +github.com/hashicorp/golang-lru v1.0.2/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= -github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= -github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ= -github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= -github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= +github.com/holiman/bloomfilter/v2 v2.0.3 h1:73e0e/V0tCydx14a0SCYS/EWCxgwLZ18CZcZKVu0fao= github.com/holiman/bloomfilter/v2 v2.0.3/go.mod h1:zpoh+gs7qcpqrHr3dB55AMiJwo0iURXE7ZOP9L9hSkA= -github.com/holiman/uint256 v1.1.1/go.mod h1:y4ga/t+u+Xwd7CpDgZESaRcWy0I7XMlTMA25ApIH5Jw= +github.com/holiman/uint256 v1.3.2 h1:a9EgMPSC1AAaj1SZL5zIQD3WbwTuHrMGOerLjGmM/TA= +github.com/holiman/uint256 v1.3.2/go.mod h1:EOMSn4q6Nyt9P6efbI3bueV4e1b3dGlUCXeiRV4ng7E= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= -github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmKTg= -github.com/huin/goupnp v1.0.0/go.mod h1:n9v9KO1tAxYH82qOn+UTIFQDmx5n1Zxd/ClZDMX7Bnc= -github.com/huin/goupnp v1.0.1-0.20200620063722-49508fba0031/go.mod h1:nNs7wvRfN1eKaMknBydLNQU6146XQim8t4h+q90biWo= -github.com/huin/goupnp v1.0.3 h1:N8No57ls+MnjlB+JPiCVSOyy/ot7MJTqlo7rn+NYSqQ= -github.com/huin/goupnp v1.0.3/go.mod h1:ZxNlw5WqJj6wSsRK5+YfflQGXYfccj5VgQsMNixHM7Y= -github.com/huin/goutil v0.0.0-20170803182201-1ca381bf3150/go.mod h1:PpLOETDnJ0o3iZrZfqZzyLl6l7F3c6L1oWn7OICBi6o= -github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= -github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= -github.com/imdario/mergo v0.3.13 h1:lFzP57bqS/wsqKssCGmtLAb8A0wKjLGrve2q3PPVcBk= -github.com/imdario/mergo v0.3.13/go.mod h1:4lJ1jqUDcsbIECGy0RUJAXNIhg+6ocWgb1ALK2O4oXg= +github.com/huandu/go-assert v1.1.5 h1:fjemmA7sSfYHJD7CUqs9qTwwfdNAx7/j2/ZlHXzNB3c= +github.com/huandu/go-assert v1.1.5/go.mod h1:yOLvuqZwmcHIC5rIzrBhT7D3Q9c3GFnd0JrPVhn/06U= +github.com/huandu/go-clone v1.7.2 h1:3+Aq0Ed8XK+zKkLjE2dfHg0XrpIfcohBE1K+c8Usxoo= +github.com/huandu/go-clone v1.7.2/go.mod h1:ReGivhG6op3GYr+UY3lS6mxjKp7MIGTknuU5TbTVaXE= +github.com/huandu/go-clone/generic v1.7.2 h1:47pQphxs1Xc9cVADjOHN+Bm5D0hNagwH9UXErbxgVKA= +github.com/huandu/go-clone/generic v1.7.2/go.mod h1:xgd9ZebcMsBWWcBx5mVMCoqMX24gLWr5lQicr+nVXNs= +github.com/hugelgupf/socketpair v0.0.0-20190730060125-05d35a94e714/go.mod h1:2Goc3h8EklBH5mspfHFxBnEoURQCGzQQH1ga9Myjvis= +github.com/huin/goupnp v1.3.0 h1:UvLUlWDNpoUdYzb2TCn+MuTWtcjXKSza2n6CBdQ0xXc= +github.com/huin/goupnp v1.3.0/go.mod h1:gnGPsThkYa7bFi/KWmEysQRf48l2dvR5bxr2OFckNX8= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= -github.com/influxdata/flux v0.65.1/go.mod h1:J754/zds0vvpfwuq7Gc2wRdVwEodfpCFM7mYlOw2LqY= -github.com/influxdata/influxdb v1.2.3-0.20180221223340-01288bdb0883/go.mod h1:qZna6X/4elxqT3yI9iZYdZrWWdeFOOprn86kgg4+IzY= -github.com/influxdata/influxdb v1.8.3/go.mod h1:JugdFhsvvI8gadxOI6noqNeeBHvWNTbfYGtiAn+2jhI= -github.com/influxdata/influxdb1-client v0.0.0-20191209144304-8bf82d3c094d/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo= -github.com/influxdata/influxql v1.1.1-0.20200828144457-65d3ef77d385/go.mod h1:gHp9y86a/pxhjJ+zMjNXiQAA197Xk9wLxaz+fGG+kWk= -github.com/influxdata/line-protocol v0.0.0-20180522152040-32c6aa80de5e/go.mod h1:4kt73NQhadE3daL3WhR5EJ/J2ocX0PZzwxQ0gXJ7oFE= -github.com/influxdata/promql/v2 v2.12.0/go.mod h1:fxOPu+DY0bqCTCECchSRtWfc+0X19ybifQhZoQNF5D8= -github.com/influxdata/roaring v0.4.13-0.20180809181101-fc520f41fab6/go.mod h1:bSgUQ7q5ZLSO+bKBGqJiCBGAl+9DxyW63zLTujjUlOE= -github.com/influxdata/tdigest v0.0.0-20181121200506-bf2b5ad3c0a9/go.mod h1:Js0mqiSBE6Ffsg94weZZ2c+v/ciT8QRHFOap7EKDrR0= -github.com/influxdata/usage-client v0.0.0-20160829180054-6d3895376368/go.mod h1:Wbbw6tYNvwa5dlB6304Sd+82Z3f7PmVZHVKU637d4po= -github.com/ipfs/bbloom v0.0.1/go.mod h1:oqo8CVWsJFMOZqTglBG4wydCE4IQA/G2/SEofB0rjUI= +github.com/insomniacslk/dhcp v0.0.0-20230516061539-49801966e6cb/go.mod h1:7474bZ1YNCvarT6WFKie4kEET6J0KYRDC4XJqqXzQW4= github.com/ipfs/bbloom v0.0.4 h1:Gi+8EGJ2y5qiD5FbsbpX/TMNcJw8gSqr7eyjHa4Fhvs= github.com/ipfs/bbloom v0.0.4/go.mod h1:cS9YprKXpoZ9lT0n/Mw/a6/aFV6DTjTLYHeA+gyqMG0= -github.com/ipfs/go-bitswap v0.1.8/go.mod h1:TOWoxllhccevbWFUR2N7B1MTSVVge1s6XSMiCSA4MzM= -github.com/ipfs/go-bitswap v0.3.4/go.mod h1:4T7fvNv/LmOys+21tnLzGKncMeeXUYUd1nUiJ2teMvI= -github.com/ipfs/go-bitswap v0.5.0/go.mod h1:WwyyYD33RHCpczgHjpx+xjWYIy8l41K+l5EMy4/ctSM= -github.com/ipfs/go-bitswap v0.9.0 h1:/dZi/XhUN/aIk78pI4kaZrilUglJ+7/SCmOHWIpiy8E= -github.com/ipfs/go-block-format v0.0.1/go.mod h1:DK/YYcsSUIVAFNwo/KZCdIIbpN0ROH/baNLgayt4pFc= -github.com/ipfs/go-block-format v0.0.2/go.mod h1:AWR46JfpcObNfg3ok2JHDUfdiHRgWhJgCQF+KIgOPJY= -github.com/ipfs/go-block-format v0.0.3 h1:r8t66QstRp/pd/or4dpnbVfXT5Gt7lOqRvC+/dDTpMc= -github.com/ipfs/go-block-format v0.0.3/go.mod h1:4LmD4ZUw0mhO+JSKdpWwrzATiEfM7WWgQ8H5l6P8MVk= -github.com/ipfs/go-blockservice v0.1.4/go.mod h1:OTZhFpkgY48kNzbgyvcexW9cHrpjBYIjSR0KoDOFOLU= -github.com/ipfs/go-blockservice v0.2.0/go.mod h1:Vzvj2fAnbbyly4+T7D5+p9n3+ZKVHA2bRMMo1QoILtQ= -github.com/ipfs/go-blockservice v0.4.0 h1:7MUijAW5SqdsqEW/EhnNFRJXVF8mGU5aGhZ3CQaCWbY= -github.com/ipfs/go-blockservice v0.4.0/go.mod h1:kRjO3wlGW9mS1aKuiCeGhx9K1DagQ10ACpVO59qgAx4= -github.com/ipfs/go-cid v0.0.1/go.mod h1:GHWU/WuQdMPmIosc4Yn1bcCT7dSeX4lBafM7iqUPQvM= -github.com/ipfs/go-cid v0.0.2/go.mod h1:GHWU/WuQdMPmIosc4Yn1bcCT7dSeX4lBafM7iqUPQvM= -github.com/ipfs/go-cid v0.0.3/go.mod h1:GHWU/WuQdMPmIosc4Yn1bcCT7dSeX4lBafM7iqUPQvM= -github.com/ipfs/go-cid v0.0.4/go.mod h1:4LLaPOQwmk5z9LBgQnpkivrx8BJjUyGwTXCd5Xfj6+M= -github.com/ipfs/go-cid v0.0.5/go.mod h1:plgt+Y5MnOey4vO4UlUazGqdbEXuFYitED67FexhXog= -github.com/ipfs/go-cid v0.0.6/go.mod h1:6Ux9z5e+HpkQdckYoX1PG/6xqKspzlEIR5SDmgqgC/I= +github.com/ipfs/go-block-format v0.2.0 h1:ZqrkxBA2ICbDRbK8KJs/u0O3dlp6gmAuuXUJNiW1Ycs= +github.com/ipfs/go-block-format v0.2.0/go.mod h1:+jpL11nFx5A/SPpsoBn6Bzkra/zaArfSmsknbPMYgzM= github.com/ipfs/go-cid v0.0.7/go.mod h1:6Ux9z5e+HpkQdckYoX1PG/6xqKspzlEIR5SDmgqgC/I= -github.com/ipfs/go-cid v0.1.0/go.mod h1:rH5/Xv83Rfy8Rw6xG+id3DYAMUVmem1MowoKwdXmN2o= -github.com/ipfs/go-cid v0.3.2 h1:OGgOd+JCFM+y1DjWPmVH+2/4POtpDzwcr7VgnB7mZXc= -github.com/ipfs/go-cid v0.3.2/go.mod h1:gQ8pKqT/sUxGY+tIwy1RPpAojYu7jAyCp5Tz1svoupw= -github.com/ipfs/go-cidutil v0.0.2/go.mod h1:ewllrvrxG6AMYStla3GD7Cqn+XYSLqjK0vc+086tB6s= +github.com/ipfs/go-cid v0.4.1 h1:A/T3qGvxi4kpKWWcPC/PgbvDA2bjVLO7n4UeVwnbs/s= +github.com/ipfs/go-cid v0.4.1/go.mod h1:uQHwDeX4c6CtyrFwdqyhpNcxVewur1M7l7fNU7LKwZk= github.com/ipfs/go-cidutil v0.1.0 h1:RW5hO7Vcf16dplUU60Hs0AKDkQAVPVplr7lk97CFL+Q= github.com/ipfs/go-cidutil v0.1.0/go.mod h1:e7OEVBMIv9JaOxt9zaGEmAoSlXW9jdFZ5lP/0PwcfpA= -github.com/ipfs/go-datastore v0.0.1/go.mod h1:d4KVXhMt913cLBEI/PXAy6ko+W7e9AhyAKBGh803qeE= -github.com/ipfs/go-datastore v0.0.5/go.mod h1:d4KVXhMt913cLBEI/PXAy6ko+W7e9AhyAKBGh803qeE= -github.com/ipfs/go-datastore v0.1.1/go.mod h1:w38XXW9kVFNp57Zj5knbKWM2T+KOZCGDRVNdgPHtbHw= -github.com/ipfs/go-datastore v0.4.0/go.mod h1:SX/xMIKoCszPqp+z9JhPYCmoOoXTvaa13XEbGtsFUhA= -github.com/ipfs/go-datastore v0.4.1/go.mod h1:SX/xMIKoCszPqp+z9JhPYCmoOoXTvaa13XEbGtsFUhA= -github.com/ipfs/go-datastore v0.4.4/go.mod h1:SX/xMIKoCszPqp+z9JhPYCmoOoXTvaa13XEbGtsFUhA= -github.com/ipfs/go-datastore v0.4.5/go.mod h1:eXTcaaiN6uOlVCLS9GjJUJtlvJfM3xk23w3fyfrmmJs= -github.com/ipfs/go-datastore v0.5.0/go.mod h1:9zhEApYMTl17C8YDp7JmU7sQZi2/wqiYh73hakZ90Bk= -github.com/ipfs/go-datastore v0.5.1/go.mod h1:9zhEApYMTl17C8YDp7JmU7sQZi2/wqiYh73hakZ90Bk= -github.com/ipfs/go-datastore v0.6.0 h1:JKyz+Gvz1QEZw0LsX1IBn+JFCJQH4SJVFtM4uWU0Myk= -github.com/ipfs/go-datastore v0.6.0/go.mod h1:rt5M3nNbSO/8q1t4LNkLyUwRs8HupMeN/8O4Vn9YAT8= +github.com/ipfs/go-datastore v0.8.2 h1:Jy3wjqQR6sg/LhyY0NIePZC3Vux19nLtg7dx0TVqr6U= +github.com/ipfs/go-datastore v0.8.2/go.mod h1:W+pI1NsUsz3tcsAACMtfC+IZdnQTnC/7VfPoJBQuts0= github.com/ipfs/go-detect-race v0.0.1 h1:qX/xay2W3E4Q1U7d9lNs1sU9nvguX0a7319XbyQ6cOk= github.com/ipfs/go-detect-race v0.0.1/go.mod h1:8BNT7shDZPo99Q74BpGMK+4D8Mn4j46UU0LZ723meps= -github.com/ipfs/go-ds-badger v0.0.2/go.mod h1:Y3QpeSFWQf6MopLTiZD+VT6IC1yZqaGmjvRcKeSGij8= -github.com/ipfs/go-ds-badger v0.0.5/go.mod h1:g5AuuCGmr7efyzQhLL8MzwqcauPojGPUaHzfGTzuE3s= -github.com/ipfs/go-ds-badger v0.2.1/go.mod h1:Tx7l3aTph3FMFrRS838dcSJh+jjA7cX9DrGVwx/NOwE= -github.com/ipfs/go-ds-badger v0.2.3/go.mod h1:pEYw0rgg3FIrywKKnL+Snr+w/LjJZVMTBRn4FS6UHUk= -github.com/ipfs/go-ds-badger2 v0.1.3 h1:Zo9JicXJ1DmXTN4KOw7oPXkspZ0AWHcAFCP1tQKnegg= -github.com/ipfs/go-ds-badger2 v0.1.3/go.mod h1:TPhhljfrgewjbtuL/tczP8dNrBYwwk+SdPYbms/NO9w= -github.com/ipfs/go-ds-leveldb v0.0.1/go.mod h1:feO8V3kubwsEF22n0YRQCffeb79OOYIykR4L04tMOYc= -github.com/ipfs/go-ds-leveldb v0.4.1/go.mod h1:jpbku/YqBSsBc1qgME8BkWS4AxzF2cEu1Ii2r79Hh9s= -github.com/ipfs/go-ds-leveldb v0.4.2/go.mod h1:jpbku/YqBSsBc1qgME8BkWS4AxzF2cEu1Ii2r79Hh9s= -github.com/ipfs/go-fetcher v1.5.0 h1:oreKTKBzja3S09rSmoZlA3KGVlRiUbJ1pQjtB4K6y3w= -github.com/ipfs/go-fetcher v1.5.0/go.mod h1:5pDZ0393oRF/fHiLmtFZtpMNBQfHOYNPtryWedVuSWE= -github.com/ipfs/go-ipfs-blockstore v0.0.1/go.mod h1:d3WClOmRQKFnJ0Jz/jj/zmksX0ma1gROTlovZKBmN08= -github.com/ipfs/go-ipfs-blockstore v0.1.4/go.mod h1:Jxm3XMVjh6R17WvxFEiyKBLUGr86HgIYJW/D/MwqeYQ= -github.com/ipfs/go-ipfs-blockstore v0.2.0/go.mod h1:SNeEpz/ICnMYZQYr7KNZTjdn7tEPB/99xpe8xI1RW7o= -github.com/ipfs/go-ipfs-blockstore v1.2.0 h1:n3WTeJ4LdICWs/0VSfjHrlqpPpl6MZ+ySd3j8qz0ykw= -github.com/ipfs/go-ipfs-blockstore v1.2.0/go.mod h1:eh8eTFLiINYNSNawfZOC7HOxNTxpB1PFuA5E1m/7exE= github.com/ipfs/go-ipfs-blocksutil v0.0.1 h1:Eh/H4pc1hsvhzsQoMEP3Bke/aW5P5rVM1IWFJMcGIPQ= github.com/ipfs/go-ipfs-blocksutil v0.0.1/go.mod h1:Yq4M86uIOmxmGPUHv/uI7uKqZNtLb449gwKqXjIsnRk= -github.com/ipfs/go-ipfs-delay v0.0.0-20181109222059-70721b86a9a8/go.mod h1:8SP1YXK1M1kXuc4KJZINY3TQQ03J2rwBG9QfXmbRPrw= github.com/ipfs/go-ipfs-delay v0.0.1 h1:r/UXYyRcddO6thwOnhiznIAiSvxMECGgtv35Xs1IeRQ= github.com/ipfs/go-ipfs-delay v0.0.1/go.mod h1:8SP1YXK1M1kXuc4KJZINY3TQQ03J2rwBG9QfXmbRPrw= -github.com/ipfs/go-ipfs-ds-help v0.0.1/go.mod h1:gtP9xRaZXqIQRh1HRpp595KbBEdgqWFxefeVKOV8sxo= -github.com/ipfs/go-ipfs-ds-help v0.1.1/go.mod h1:SbBafGJuGsPI/QL3j9Fc5YPLeAu+SzOkI0gFwAg+mOs= -github.com/ipfs/go-ipfs-ds-help v1.1.0 h1:yLE2w9RAsl31LtfMt91tRZcrx+e61O5mDxFRR994w4Q= -github.com/ipfs/go-ipfs-ds-help v1.1.0/go.mod h1:YR5+6EaebOhfcqVCyqemItCLthrpVNot+rsOU/5IatU= -github.com/ipfs/go-ipfs-exchange-interface v0.0.1/go.mod h1:c8MwfHjtQjPoDyiy9cFquVtVHkO9b9Ob3FG91qJnWCM= -github.com/ipfs/go-ipfs-exchange-interface v0.1.0/go.mod h1:ych7WPlyHqFvCi/uQI48zLZuAWVP5iTQPXEfVaw5WEI= -github.com/ipfs/go-ipfs-exchange-interface v0.2.0 h1:8lMSJmKogZYNo2jjhUs0izT+dck05pqUw4mWNW9Pw6Y= -github.com/ipfs/go-ipfs-exchange-interface v0.2.0/go.mod h1:z6+RhJuDQbqKguVyslSOuVDhqF9JtTrO3eptSAiW2/Y= -github.com/ipfs/go-ipfs-exchange-offline v0.0.1/go.mod h1:WhHSFCVYX36H/anEKQboAzpUws3x7UeEGkzQc3iNkM0= -github.com/ipfs/go-ipfs-exchange-offline v0.1.0/go.mod h1:YdJXa+yPF1na+gfYHYejtLwHFpuKv22eatApNiSfanM= -github.com/ipfs/go-ipfs-exchange-offline v0.3.0 h1:c/Dg8GDPzixGd0MC8Jh6mjOwU57uYokgWRFidfvEkuA= -github.com/ipfs/go-ipfs-pq v0.0.1/go.mod h1:LWIqQpqfRG3fNc5XsnIhz/wQ2XXGyugQwls7BgUmUfY= -github.com/ipfs/go-ipfs-pq v0.0.2 h1:e1vOOW6MuOwG2lqxcLA+wEn93i/9laCY8sXAw76jFOY= -github.com/ipfs/go-ipfs-pq v0.0.2/go.mod h1:LWIqQpqfRG3fNc5XsnIhz/wQ2XXGyugQwls7BgUmUfY= -github.com/ipfs/go-ipfs-provider v0.7.0 h1:5GpHv46eIS8h2mbbKg1ckU5paajDYJtE4GA/SBepOQg= -github.com/ipfs/go-ipfs-provider v0.7.0/go.mod h1:mgjsWgDt9j19N1REPxRa31p+eRIQmjNt5McNdQQ5CsA= -github.com/ipfs/go-ipfs-routing v0.1.0/go.mod h1:hYoUkJLyAUKhF58tysKpids8RNDPO42BVMgK5dNsoqY= -github.com/ipfs/go-ipfs-routing v0.2.0/go.mod h1:384byD/LHKhAgKE3NmwOjXCpDzhczROMBzidoYV7tfM= -github.com/ipfs/go-ipfs-routing v0.2.1 h1:E+whHWhJkdN9YeoHZNj5itzc+OR292AJ2uE9FFiW0BY= -github.com/ipfs/go-ipfs-util v0.0.1/go.mod h1:spsl5z8KUnrve+73pOhSVZND1SIxPW5RyBCNzQxlJBc= -github.com/ipfs/go-ipfs-util v0.0.2 h1:59Sswnk1MFaiq+VcaknX7aYEyGyGDAA73ilhEK2POp8= -github.com/ipfs/go-ipfs-util v0.0.2/go.mod h1:CbPtkWJzjLdEcezDns2XYaehFVNXG9zrdrtMecczcsQ= -github.com/ipfs/go-ipld-format v0.3.0 h1:Mwm2oRLzIuUwEPewWAWyMuuBQUsn3awfFEYVb8akMOQ= -github.com/ipfs/go-ipld-format v0.3.0/go.mod h1:co/SdBE8h99968X0hViiw1MNlh6fvxxnHpvVLnH7jSM= -github.com/ipfs/go-ipns v0.2.0 h1:BgmNtQhqOw5XEZ8RAfWEpK4DhqaYiuP6h71MhIp7xXU= -github.com/ipfs/go-ipns v0.2.0/go.mod h1:3cLT2rbvgPZGkHJoPO1YMJeh6LtkxopCkKFcio/wE24= +github.com/ipfs/go-ipfs-pq v0.0.3 h1:YpoHVJB+jzK15mr/xsWC574tyDLkezVrDNeaalQBsTE= +github.com/ipfs/go-ipfs-pq v0.0.3/go.mod h1:btNw5hsHBpRcSSgZtiNm/SLj5gYIZ18AKtv3kERkRb4= +github.com/ipfs/go-ipfs-util v0.0.3 h1:2RFdGez6bu2ZlZdI+rWfIdbQb1KudQp3VGwPtdNCmE0= +github.com/ipfs/go-ipfs-util v0.0.3/go.mod h1:LHzG1a0Ig4G+iZ26UUOMjHd+lfM84LZCrn17xAKWBvs= +github.com/ipfs/go-ipld-format v0.6.0 h1:VEJlA2kQ3LqFSIm5Vu6eIlSxD/Ze90xtc4Meten1F5U= +github.com/ipfs/go-ipld-format v0.6.0/go.mod h1:g4QVMTn3marU3qXchwjpKPKgJv+zF+OlaKMyhJ4LHPg= github.com/ipfs/go-log v0.0.1/go.mod h1:kL1d2/hzSpI0thNYjiKfjanbVNU+IIGA/WnNESY9leM= -github.com/ipfs/go-log v1.0.2/go.mod h1:1MNjMxe0u6xvJZgeqbJ8vdo2TKaGwZ1a0Bpza+sr2Sk= -github.com/ipfs/go-log v1.0.3/go.mod h1:OsLySYkwIbiSUR/yBTdv1qPtcE4FW3WPWk/ewz9Ru+A= -github.com/ipfs/go-log v1.0.4/go.mod h1:oDCg2FkjogeFOhqqb+N39l2RpTNPL6F/StPkB3kPgcs= github.com/ipfs/go-log v1.0.5 h1:2dOuUCB1Z7uoczMWgAyDck5JLb72zHzrMnGnCNNbvY8= github.com/ipfs/go-log v1.0.5/go.mod h1:j0b8ZoR+7+R99LD9jZ6+AJsrzkPbSXbZfGakb5JPtIo= -github.com/ipfs/go-log/v2 v2.0.2/go.mod h1:O7P1lJt27vWHhOwQmcFEvlmo49ry2VY2+JfBWFaa9+0= -github.com/ipfs/go-log/v2 v2.0.3/go.mod h1:O7P1lJt27vWHhOwQmcFEvlmo49ry2VY2+JfBWFaa9+0= -github.com/ipfs/go-log/v2 v2.0.5/go.mod h1:eZs4Xt4ZUJQFM3DlanGhy7TkwwawCZcSByscwkWG+dw= -github.com/ipfs/go-log/v2 v2.1.1/go.mod h1:2v2nsGfZsvvAJz13SyFzf9ObaqwHiHxsPLEHntrv9KM= github.com/ipfs/go-log/v2 v2.1.3/go.mod h1:/8d0SH3Su5Ooc31QlL1WysJhvyOTDCjcCZ9Axpmri6g= -github.com/ipfs/go-log/v2 v2.3.0/go.mod h1:QqGoj30OTpnKaG/LKTGTxoP2mmQtjVMEnK72gynbe/g= -github.com/ipfs/go-log/v2 v2.5.0/go.mod h1:prSpmC1Gpllc9UYWxDiZDreBYw7zp4Iqp1kOLU9U5UI= github.com/ipfs/go-log/v2 v2.5.1 h1:1XdUzF7048prq4aBjDQQ4SL5RxftpRGdXhNRwKSAlcY= github.com/ipfs/go-log/v2 v2.5.1/go.mod h1:prSpmC1Gpllc9UYWxDiZDreBYw7zp4Iqp1kOLU9U5UI= github.com/ipfs/go-metrics-interface v0.0.1 h1:j+cpbjYvu4R8zbleSs36gvB7jR+wsL2fGD6n0jO4kdg= github.com/ipfs/go-metrics-interface v0.0.1/go.mod h1:6s6euYU4zowdslK0GKHmqaIZ3j/b/tL7HTWtJ4VPgWY= -github.com/ipfs/go-peertaskqueue v0.1.1/go.mod h1:Jmk3IyCcfl1W3jTW3YpghSwSEC6IJ3Vzz/jUmWw8Z0U= -github.com/ipfs/go-peertaskqueue v0.2.0/go.mod h1:5/eNrBEbtSKWCG+kQK8K8fGNixoYUnr+P7jivavs9lY= -github.com/ipfs/go-peertaskqueue v0.7.0 h1:VyO6G4sbzX80K58N60cCaHsSsypbUNs1GjO5seGNsQ0= -github.com/ipfs/go-peertaskqueue v0.7.0/go.mod h1:M/akTIE/z1jGNXMU7kFB4TeSEFvj68ow0Rrb04donIU= -github.com/ipfs/go-verifcid v0.0.1 h1:m2HI7zIuR5TFyQ1b79Da5N9dnnCP1vcu2QqawmWlK2E= -github.com/ipfs/go-verifcid v0.0.1/go.mod h1:5Hrva5KBeIog4A+UpqlaIU+DEstipcJYQQZc0g37pY0= -github.com/ipld/go-ipld-prime v0.11.0/go.mod h1:+WIAkokurHmZ/KwzDOMUuoeJgaRQktHtEaLglS3ZeV8= -github.com/ipld/go-ipld-prime v0.14.1 h1:n9obcUnuqPK34HlfbiB+o9GhXE/x59uue4z9YTsaoj4= -github.com/ipld/go-ipld-prime v0.14.1/go.mod h1:QcE4Y9n/ZZr8Ijg5bGPT0GqYWgZ1704nH0RDcQtgTP0= -github.com/jackpal/gateway v1.0.5/go.mod h1:lTpwd4ACLXmpyiCTRtfiNyVnUmqT9RivzCDQetPfnjA= -github.com/jackpal/go-nat-pmp v1.0.1/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+4orBN1SBKc= -github.com/jackpal/go-nat-pmp v1.0.2-0.20160603034137-1fa385a6f458/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+4orBN1SBKc= +github.com/ipfs/go-peertaskqueue v0.8.2 h1:PaHFRaVFdxQk1Qo3OKiHPYjmmusQy7gKQUaL8JDszAU= +github.com/ipfs/go-peertaskqueue v0.8.2/go.mod h1:L6QPvou0346c2qPJNiJa6BvOibxDfaiPlqHInmzg0FA= +github.com/ipfs/go-test v0.0.4 h1:DKT66T6GBB6PsDFLoO56QZPrOmzJkqU1FZH5C9ySkew= +github.com/ipfs/go-test v0.0.4/go.mod h1:qhIM1EluEfElKKM6fnWxGn822/z9knUGM1+I/OAQNKI= +github.com/ipld/go-ipld-prime v0.21.0 h1:n4JmcpOlPDIxBcY037SVfpd1G+Sj1nKZah0m6QH9C2E= +github.com/ipld/go-ipld-prime v0.21.0/go.mod h1:3RLqy//ERg/y5oShXXdx5YIp50cFGOanyMctpPjsvxQ= github.com/jackpal/go-nat-pmp v1.0.2 h1:KzKSgb7qkJvOUTqYl9/Hg/me3pWgBmERKrTGD7BdWus= github.com/jackpal/go-nat-pmp v1.0.2/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+4orBN1SBKc= -github.com/jbenet/go-cienv v0.0.0-20150120210510-1bb1476777ec/go.mod h1:rGaEvXB4uRSZMmzKNLoXvTu1sfx+1kv/DojUlPrSZGs= github.com/jbenet/go-cienv v0.1.0/go.mod h1:TqNnHUmJgXau0nCzC7kXWeotg3J9W34CUv5Djy1+FlA= github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 h1:BQSFePA1RWJOlocH6Fxy8MmwDt+yVQYULKfN0RoTN8A= github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99/go.mod h1:1lJo3i6rXxKeerYnT8Nvf0QmHCRC1n8sfWVwXF2Frvo= -github.com/jbenet/go-temp-err-catcher v0.0.0-20150120210811-aac704a3f4f2/go.mod h1:8GXXJV31xl8whumTzdZsTt3RnUIiPqzkyf7mxToRCMs= github.com/jbenet/go-temp-err-catcher v0.1.0 h1:zpb3ZH6wIE8Shj2sKS+khgRvf7T7RABoLk/+KKHggpk= github.com/jbenet/go-temp-err-catcher v0.1.0/go.mod h1:0kJRvmDZXNMIiJirNPEYfhpPwbGVtZVWC34vc5WLsDk= -github.com/jbenet/goprocess v0.0.0-20160826012719-b497e2f366b8/go.mod h1:Ly/wlsjFq/qrU3Rar62tu1gASgGw6chQbSh/XgIIXCY= -github.com/jbenet/goprocess v0.1.3/go.mod h1:5yspPrukOVuOLORacaBi858NqyClJPQxYZlqdZVfqY4= github.com/jbenet/goprocess v0.1.4 h1:DRGOFReOMqqDNXwW70QkacFW0YN9QnwLV0Vqk+3oU0o= github.com/jbenet/goprocess v0.1.4/go.mod h1:5yspPrukOVuOLORacaBi858NqyClJPQxYZlqdZVfqY4= -github.com/jedisct1/go-minisign v0.0.0-20190909160543-45766022959e/go.mod h1:G1CVv03EnqU1wYL2dFwXxW2An0az9JTl/ZsqXQeBlkU= github.com/jellevandenhooff/dkim v0.0.0-20150330215556-f50fe3d243e1/go.mod h1:E0B/fFc00Y+Rasa88328GlI/XbtyysCtTHZS8h7IrBU= -github.com/jessevdk/go-flags v0.0.0-20141203071132-1679536dcc89/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= -github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= -github.com/jessevdk/go-flags v1.5.0/go.mod h1:Fw0T6WPc1dYxT4mKEZRfG5kJhaTDP9pj1c2EWnYs/m4= -github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= -github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= -github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8= github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= -github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= -github.com/jrick/logrotate v1.0.0/go.mod h1:LNinyqDIJnpAur+b8yyulnQw/wDuN1+BYKlTRt3OuAQ= +github.com/jordanschalm/lockctx v0.0.0-20250412215529-226f85c10956 h1:4iii8SOozVG1lpkdPELRsjPEBhU4DeFPz2r2Fjj3UDU= +github.com/jordanschalm/lockctx v0.0.0-20250412215529-226f85c10956/go.mod h1:qsnXMryYP9X7JbzskIn0+N40sE6XNXLr9kYRRP6rwXU= +github.com/josharian/native v1.0.0/go.mod h1:7X/raswPFr05uY3HiLlYeyQntB6OO7E/d2Cu7qoaN2w= +github.com/josharian/native v1.0.1-0.20221213033349-c1e37c09b531/go.mod h1:7X/raswPFr05uY3HiLlYeyQntB6OO7E/d2Cu7qoaN2w= +github.com/josharian/native v1.1.0/go.mod h1:7X/raswPFr05uY3HiLlYeyQntB6OO7E/d2Cu7qoaN2w= +github.com/jsimonetti/rtnetlink v0.0.0-20190606172950-9527aa82566a/go.mod h1:Oz+70psSo5OFh8DBl0Zv2ACw7Esh6pPUphlvZG9x7uw= +github.com/jsimonetti/rtnetlink v0.0.0-20200117123717-f846d4f6c1f4/go.mod h1:WGuG/smIU4J/54PblvSbh+xvCZmpJnFgr3ds6Z55XMQ= +github.com/jsimonetti/rtnetlink v0.0.0-20201009170750-9c6f07d100c1/go.mod h1:hqoO/u39cqLeBLebZ8fWdE96O7FxrAsRYhnVOdgHxok= +github.com/jsimonetti/rtnetlink v0.0.0-20201110080708-d2c240429e6c/go.mod h1:huN4d1phzjhlOsNIjFsw2SVRbwIHj3fJDMEU2SDPTmg= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= -github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= -github.com/jsternberg/zap-logfmt v1.0.0/go.mod h1:uvPs/4X51zdkcm5jXl5SYoN+4RK21K8mysFmDaM/h+o= github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo= github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= -github.com/julienschmidt/httprouter v1.1.1-0.20170430222011-975b5c4c7c21/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= -github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= -github.com/jung-kurt/gofpdf v1.0.3-0.20190309125859-24315acbbda5/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes= -github.com/jwilder/encoding v0.0.0-20170811194829-b4e1701a28ef/go.mod h1:Ct9fl0F6iIOGgxJ5npU/IUOhOhqlVrGjyIZc8/MagT0= -github.com/k0kubun/go-ansi v0.0.0-20180517002512-3bf9e2903213/go.mod h1:vNUNkEQ1e29fT/6vq2aBdFsgNPmy8qMdSay1npru+Sw= -github.com/kami-zh/go-capturer v0.0.0-20171211120116-e492ea43421d/go.mod h1:P2viExyCEfeWGU259JnaQ34Inuec4R38JCyBx2edgD0= -github.com/karalabe/usb v0.0.0-20190919080040-51dc0efba356/go.mod h1:Od972xHfMJowv7NGVDiWVxk2zxnWgjLlJzE+F4F7AGU= -github.com/kevinburke/go-bindata v3.23.0+incompatible h1:rqNOXZlqrYhMVVAsQx8wuc+LaA73YcfbQ407wAykyS8= -github.com/kevinburke/go-bindata v3.23.0+incompatible/go.mod h1:/pEEZ72flUW2p0yi30bslSp9YqD9pysLxunQDdb2CPM= +github.com/k0kubun/pp/v3 v3.5.0 h1:iYNlYA5HJAJvkD4ibuf9c8y6SHM0QFhaBuCqm1zHp0w= +github.com/k0kubun/pp/v3 v3.5.0/go.mod h1:5lzno5ZZeEeTV/Ky6vs3g6d1U3WarDrH8k240vMtGro= +github.com/kevinburke/go-bindata v3.24.0+incompatible h1:qajFA3D0pH94OTLU4zcCCKCDgR+Zr2cZK/RPJHDdFoY= +github.com/kevinburke/go-bindata v3.24.0+incompatible/go.mod h1:/pEEZ72flUW2p0yi30bslSp9YqD9pysLxunQDdb2CPM= github.com/kevinburke/ssh_config v1.2.0 h1:x584FjTGwHzMwvHx18PXxbBVzfnxogHaAReU4gf13a4= github.com/kevinburke/ssh_config v1.2.0/go.mod h1:CT57kijsi8u/K/BOFA39wgDQJ9CxiF4nAY/ojJ6r6mM= -github.com/kilic/bls12-381 v0.0.0-20201226121925-69dacb279461/go.mod h1:vDTTHJONJ6G+P2R74EhnyotQDTliQDnFEwhdmfzw1ig= github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6CZQHDETBtE9HaSEkGmuNXF86RwHhHUvq4= -github.com/klauspost/asmfmt v1.3.2 h1:4Ri7ox3EwapiOjCki+hw14RyKk201CN4rzyCJRFLpK4= -github.com/klauspost/asmfmt v1.3.2/go.mod h1:AG8TuvYojzulgDAMCnYn50l/5QV3Bs/tp6j0HLHbNSE= -github.com/klauspost/compress v1.4.0/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= github.com/klauspost/compress v1.12.3/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg= -github.com/klauspost/compress v1.15.13 h1:NFn1Wr8cfnenSJSA46lLq4wHCcBzKTSjnBIexDMMOV0= -github.com/klauspost/compress v1.15.13/go.mod h1:QPwzmACJjUTFsnSHH934V6woptycfrDDJnH7hvFVbGM= -github.com/klauspost/cpuid v0.0.0-20170728055534-ae7887de9fa5/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= -github.com/klauspost/cpuid/v2 v2.0.4/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= -github.com/klauspost/cpuid/v2 v2.0.6/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= -github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= -github.com/klauspost/cpuid/v2 v2.0.12/go.mod h1:g2LTdtYhdyuGPqyWyv7qRAmj1WBqxuObKfj5c0PQa7c= -github.com/klauspost/cpuid/v2 v2.2.3 h1:sxCkb+qR91z4vsqw4vGGZlDgPz3G7gjaLyK3V8y70BU= -github.com/klauspost/cpuid/v2 v2.2.3/go.mod h1:RVVoqg1df56z8g3pUjL/3lE5UfnlrJX8tyFgg4nqhuY= -github.com/klauspost/crc32 v0.0.0-20161016154125-cb6bfca970f6/go.mod h1:+ZoRqAPRLkC4NPOvfYeR5KNOrY6TD+/sAC3HXPZgDYg= -github.com/klauspost/pgzip v1.0.2-0.20170402124221-0bf5dcad4ada/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs= +github.com/klauspost/compress v1.17.11 h1:In6xLpyWOi1+C7tXUUWv2ot1QvBjxevKAaI6IXrJmUc= +github.com/klauspost/compress v1.17.11/go.mod h1:pMDklpSncoRMuLFrf1W9Ss9KT+0rH90U12bZKk7uwG0= +github.com/klauspost/cpuid/v2 v2.2.9 h1:66ze0taIn2H33fBvCkXuv9BmCwDfafmiIVpKV9kKGuY= +github.com/klauspost/cpuid/v2 v2.2.9/go.mod h1:rqkxqrZ1EhYM9G+hXH7YdowN5R5RGN6NK4QwQ3WMXF8= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/koron/go-ssdp v0.0.0-20180514024734-4a0ed625a78b/go.mod h1:5Ky9EC2xfoUKUor0Hjgi2BJhCSXJfMOFlmyYrVKGQMk= -github.com/koron/go-ssdp v0.0.0-20191105050749-2e1c40ed0b5d/go.mod h1:5Ky9EC2xfoUKUor0Hjgi2BJhCSXJfMOFlmyYrVKGQMk= -github.com/koron/go-ssdp v0.0.3 h1:JivLMY45N76b4p/vsWGOKewBQu6uf39y8l+AQ7sDKx8= -github.com/koron/go-ssdp v0.0.3/go.mod h1:b2MxI6yh02pKrsyNoQUsk4+YNikaGhe4894J+Q5lDvA= -github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= +github.com/koron/go-ssdp v0.0.4 h1:1IDwrghSKYM7yLf7XCzbByg2sJ/JcNOZRXS2jczTwz0= +github.com/koron/go-ssdp v0.0.4/go.mod h1:oDXq+E5IL5q0U8uSBcoAXzTzInwy5lEgC91HoKtbmZk= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= -github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= -github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= -github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/pty v1.1.3/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= -github.com/leanovate/gopter v0.2.8/go.mod h1:gNcbPWNEWRe4lm+bycKqxUYoH5uoVje5SkOJ3uoLer8= -github.com/leanovate/gopter v0.2.9 h1:fQjYxZaynp97ozCzfOyOuAGOU4aU/z37zf/tOujFk7c= -github.com/leanovate/gopter v0.2.9/go.mod h1:U2L/78B+KVFIx2VmW6onHJQzXtFb+p5y3y2Sh+Jxxv8= -github.com/lib/pq v0.0.0-20170810061220-e42267488fe3/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= -github.com/lib/pq v1.0.0 h1:X5PMW56eZitiTeO7tKzZxFCSpbFZJtkMMooicw2us9A= -github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= -github.com/libp2p/go-addr-util v0.0.1/go.mod h1:4ac6O7n9rIAKB1dnd+s8IbbMXkt+oBpzX4/+RACcnlQ= -github.com/libp2p/go-addr-util v0.0.2/go.mod h1:Ecd6Fb3yIuLzq4bD7VcywcVSBtefcAwnUISBM3WG15E= +github.com/leanovate/gopter v0.2.11 h1:vRjThO1EKPb/1NsDXuDrzldR28RLkBflWYcU9CvzWu4= +github.com/leanovate/gopter v0.2.11/go.mod h1:aK3tzZP/C+p1m3SPRE4SYZFGP7jjkuSI4f7Xvpt0S9c= +github.com/leodido/go-urn v1.4.0 h1:WT9HwE9SGECu3lg4d/dIA+jxlljEa1/ffXKmRjqdmIQ= +github.com/leodido/go-urn v1.4.0/go.mod h1:bvxc+MVxLKB4z00jd1z+Dvzr47oO32F/QSNjSBOlFxI= +github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw= +github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= github.com/libp2p/go-addr-util v0.1.0 h1:acKsntI33w2bTU7tC9a0SaPimJGfSI0bFKC18ChxeVI= github.com/libp2p/go-addr-util v0.1.0/go.mod h1:6I3ZYuFr2O/9D+SoyM0zEw0EF3YkldtTX406BpdQMqw= -github.com/libp2p/go-buffer-pool v0.0.1/go.mod h1:xtyIz9PMobb13WaxR6Zo1Pd1zXJKYg0a8KiIvDp3TzQ= -github.com/libp2p/go-buffer-pool v0.0.2/go.mod h1:MvaB6xw5vOrDl8rYZGLFdKAuk/hRoRZd1Vi32+RXyFM= github.com/libp2p/go-buffer-pool v0.1.0 h1:oK4mSFcQz7cTQIfqbe4MIj9gLW+mnanjyFtc6cdF0Y8= github.com/libp2p/go-buffer-pool v0.1.0/go.mod h1:N+vh8gMqimBzdKkSMVuydVDq+UV5QTWy5HSiZacSbPg= github.com/libp2p/go-cidranger v1.1.0 h1:ewPN8EZ0dd1LSnrtuwd4709PXVcITVeuwbag38yPW7c= github.com/libp2p/go-cidranger v1.1.0/go.mod h1:KWZTfSr+r9qEo9OkI9/SIEeAtw+NNoU0dXIXt15Okic= -github.com/libp2p/go-conn-security-multistream v0.1.0/go.mod h1:aw6eD7LOsHEX7+2hJkDxw1MteijaVcI+/eP2/x3J1xc= -github.com/libp2p/go-conn-security-multistream v0.2.0/go.mod h1:hZN4MjlNetKD3Rq5Jb/P5ohUnFLNzEAR4DLSzpn2QLU= -github.com/libp2p/go-conn-security-multistream v0.2.1/go.mod h1:cR1d8gA0Hr59Fj6NhaTpFhJZrjSYuNmhpT2r25zYR70= -github.com/libp2p/go-eventbus v0.1.0/go.mod h1:vROgu5cs5T7cv7POWlWxBaVLxfSegC5UGQf8A2eEmx4= -github.com/libp2p/go-eventbus v0.2.1/go.mod h1:jc2S4SoEVPP48H9Wpzm5aiGwUCBMfGhVhhBjyhhCJs8= -github.com/libp2p/go-flow-metrics v0.0.1/go.mod h1:Iv1GH0sG8DtYN3SVJ2eG221wMiNpZxBdp967ls1g+k8= -github.com/libp2p/go-flow-metrics v0.0.3/go.mod h1:HeoSNUrOJVK1jEpDqVEiUOIXqhbnS27omG0uWU5slZs= -github.com/libp2p/go-flow-metrics v0.1.0 h1:0iPhMI8PskQwzh57jB9WxIuIOQ0r+15PChFGkx3Q3WM= -github.com/libp2p/go-flow-metrics v0.1.0/go.mod h1:4Xi8MX8wj5aWNDAZttg6UPmc0ZrnFNsMtpsYUClFtro= -github.com/libp2p/go-libp2p v0.1.1/go.mod h1:I00BRo1UuUSdpuc8Q2mN7yDF/oTUTRAX6JWpTiK9Rp8= -github.com/libp2p/go-libp2p v0.6.1/go.mod h1:CTFnWXogryAHjXAKEbOf1OWY+VeAP3lDMZkfEI5sT54= -github.com/libp2p/go-libp2p v0.7.0/go.mod h1:hZJf8txWeCduQRDC/WSqBGMxaTHCOYHt2xSU1ivxn0k= -github.com/libp2p/go-libp2p v0.7.4/go.mod h1:oXsBlTLF1q7pxr+9w6lqzS1ILpyHsaBPniVO7zIHGMw= -github.com/libp2p/go-libp2p v0.8.1/go.mod h1:QRNH9pwdbEBpx5DTJYg+qxcVaDMAz3Ee/qDKwXujH5o= -github.com/libp2p/go-libp2p v0.13.0/go.mod h1:pM0beYdACRfHO1WcJlp65WXyG2A6NqYM+t2DTVAJxMo= -github.com/libp2p/go-libp2p v0.14.3/go.mod h1:d12V4PdKbpL0T1/gsUNN8DfgMuRPDX8bS2QxCZlwRH0= -github.com/libp2p/go-libp2p v0.24.2 h1:iMViPIcLY0D6zr/f+1Yq9EavCZu2i7eDstsr1nEwSAk= -github.com/libp2p/go-libp2p v0.24.2/go.mod h1:WuxtL2V8yGjam03D93ZBC19tvOUiPpewYv1xdFGWu1k= -github.com/libp2p/go-libp2p-asn-util v0.2.0 h1:rg3+Os8jbnO5DxkC7K/Utdi+DkY3q/d1/1q+8WeNAsw= -github.com/libp2p/go-libp2p-asn-util v0.2.0/go.mod h1:WoaWxbHKBymSN41hWSq/lGKJEca7TNm58+gGJi2WsLI= -github.com/libp2p/go-libp2p-autonat v0.1.0/go.mod h1:1tLf2yXxiE/oKGtDwPYWTSYG3PtvYlJmg7NeVtPRqH8= -github.com/libp2p/go-libp2p-autonat v0.1.1/go.mod h1:OXqkeGOY2xJVWKAGV2inNF5aKN/djNA3fdpCWloIudE= -github.com/libp2p/go-libp2p-autonat v0.2.0/go.mod h1:DX+9teU4pEEoZUqR1PiMlqliONQdNbfzE1C718tcViI= -github.com/libp2p/go-libp2p-autonat v0.2.1/go.mod h1:MWtAhV5Ko1l6QBsHQNSuM6b1sRkXrpk0/LqCr+vCVxI= -github.com/libp2p/go-libp2p-autonat v0.2.2/go.mod h1:HsM62HkqZmHR2k1xgX34WuWDzk/nBwNHoeyyT4IWV6A= -github.com/libp2p/go-libp2p-autonat v0.4.0/go.mod h1:YxaJlpr81FhdOv3W3BTconZPfhaYivRdf53g+S2wobk= -github.com/libp2p/go-libp2p-autonat v0.4.2/go.mod h1:YxaJlpr81FhdOv3W3BTconZPfhaYivRdf53g+S2wobk= -github.com/libp2p/go-libp2p-blankhost v0.1.1/go.mod h1:pf2fvdLJPsC1FsVrNP3DUUvMzUts2dsLLBEpo1vW1ro= -github.com/libp2p/go-libp2p-blankhost v0.1.4/go.mod h1:oJF0saYsAXQCSfDq254GMNmLNz6ZTHTOvtF4ZydUvwU= -github.com/libp2p/go-libp2p-blankhost v0.2.0/go.mod h1:eduNKXGTioTuQAUcZ5epXi9vMl+t4d8ugUBRQ4SqaNQ= -github.com/libp2p/go-libp2p-circuit v0.1.0/go.mod h1:Ahq4cY3V9VJcHcn1SBXjr78AbFkZeIRmfunbA7pmFh8= -github.com/libp2p/go-libp2p-circuit v0.1.4/go.mod h1:CY67BrEjKNDhdTk8UgBX1Y/H5c3xkAcs3gnksxY7osU= -github.com/libp2p/go-libp2p-circuit v0.2.1/go.mod h1:BXPwYDN5A8z4OEY9sOfr2DUQMLQvKt/6oku45YUmjIo= -github.com/libp2p/go-libp2p-circuit v0.4.0/go.mod h1:t/ktoFIUzM6uLQ+o1G6NuBl2ANhBKN9Bc8jRIk31MoA= -github.com/libp2p/go-libp2p-core v0.0.1/go.mod h1:g/VxnTZ/1ygHxH3dKok7Vno1VfpvGcGip57wjTU4fco= -github.com/libp2p/go-libp2p-core v0.0.2/go.mod h1:9dAcntw/n46XycV4RnlBq3BpgrmyUi9LuoTNdPrbUco= -github.com/libp2p/go-libp2p-core v0.0.3/go.mod h1:j+YQMNz9WNSkNezXOsahp9kwZBKBvxLpKD316QWSJXE= -github.com/libp2p/go-libp2p-core v0.0.4/go.mod h1:jyuCQP356gzfCFtRKyvAbNkyeuxb7OlyhWZ3nls5d2I= -github.com/libp2p/go-libp2p-core v0.2.0/go.mod h1:X0eyB0Gy93v0DZtSYbEM7RnMChm9Uv3j7yRXjO77xSI= -github.com/libp2p/go-libp2p-core v0.2.2/go.mod h1:8fcwTbsG2B+lTgRJ1ICZtiM5GWCWZVoVrLaDRvIRng0= -github.com/libp2p/go-libp2p-core v0.2.4/go.mod h1:STh4fdfa5vDYr0/SzYYeqnt+E6KfEV5VxfIrm0bcI0g= -github.com/libp2p/go-libp2p-core v0.3.0/go.mod h1:ACp3DmS3/N64c2jDzcV429ukDpicbL6+TrrxANBjPGw= -github.com/libp2p/go-libp2p-core v0.3.1/go.mod h1:thvWy0hvaSBhnVBaW37BvzgVV68OUhgJJLAa6almrII= -github.com/libp2p/go-libp2p-core v0.4.0/go.mod h1:49XGI+kc38oGVwqSBhDEwytaAxgZasHhFfQKibzTls0= -github.com/libp2p/go-libp2p-core v0.5.0/go.mod h1:49XGI+kc38oGVwqSBhDEwytaAxgZasHhFfQKibzTls0= -github.com/libp2p/go-libp2p-core v0.5.1/go.mod h1:uN7L2D4EvPCvzSH5SrhR72UWbnSGpt5/a35Sm4upn4Y= -github.com/libp2p/go-libp2p-core v0.5.4/go.mod h1:uN7L2D4EvPCvzSH5SrhR72UWbnSGpt5/a35Sm4upn4Y= -github.com/libp2p/go-libp2p-core v0.5.5/go.mod h1:vj3awlOr9+GMZJFH9s4mpt9RHHgGqeHCopzbYKZdRjM= -github.com/libp2p/go-libp2p-core v0.5.6/go.mod h1:txwbVEhHEXikXn9gfC7/UDDw7rkxuX0bJvM49Ykaswo= -github.com/libp2p/go-libp2p-core v0.5.7/go.mod h1:txwbVEhHEXikXn9gfC7/UDDw7rkxuX0bJvM49Ykaswo= -github.com/libp2p/go-libp2p-core v0.6.0/go.mod h1:txwbVEhHEXikXn9gfC7/UDDw7rkxuX0bJvM49Ykaswo= -github.com/libp2p/go-libp2p-core v0.7.0/go.mod h1:FfewUH/YpvWbEB+ZY9AQRQ4TAD8sJBt/G1rVvhz5XT8= -github.com/libp2p/go-libp2p-core v0.8.0/go.mod h1:FfewUH/YpvWbEB+ZY9AQRQ4TAD8sJBt/G1rVvhz5XT8= -github.com/libp2p/go-libp2p-core v0.8.1/go.mod h1:FfewUH/YpvWbEB+ZY9AQRQ4TAD8sJBt/G1rVvhz5XT8= -github.com/libp2p/go-libp2p-core v0.8.2/go.mod h1:FfewUH/YpvWbEB+ZY9AQRQ4TAD8sJBt/G1rVvhz5XT8= -github.com/libp2p/go-libp2p-core v0.8.5/go.mod h1:FfewUH/YpvWbEB+ZY9AQRQ4TAD8sJBt/G1rVvhz5XT8= -github.com/libp2p/go-libp2p-core v0.20.1 h1:fQz4BJyIFmSZAiTbKV8qoYhEH5Dtv/cVhZbG3Ib/+Cw= -github.com/libp2p/go-libp2p-core v0.20.1/go.mod h1:6zR8H7CvQWgYLsbG4on6oLNSGcyKaYFSEYyDt51+bIY= -github.com/libp2p/go-libp2p-crypto v0.1.0/go.mod h1:sPUokVISZiy+nNuTTH/TY+leRSxnFj/2GLjtOTW90hI= -github.com/libp2p/go-libp2p-discovery v0.1.0/go.mod h1:4F/x+aldVHjHDHuX85x1zWoFTGElt8HnoDzwkFZm29g= -github.com/libp2p/go-libp2p-discovery v0.2.0/go.mod h1:s4VGaxYMbw4+4+tsoQTqh7wfxg97AEdo4GYBt6BadWg= -github.com/libp2p/go-libp2p-discovery v0.3.0/go.mod h1:o03drFnz9BVAZdzC/QUQ+NeQOu38Fu7LJGEOK2gQltw= -github.com/libp2p/go-libp2p-discovery v0.5.0/go.mod h1:+srtPIU9gDaBNu//UHvcdliKBIcr4SfDcm0/PfPJLug= -github.com/libp2p/go-libp2p-kad-dht v0.19.0 h1:2HuiInHZTm9ZvQajaqdaPLHr0PCKKigWiflakimttE0= -github.com/libp2p/go-libp2p-kad-dht v0.19.0/go.mod h1:qPIXdiZsLczhV4/+4EO1jE8ae0YCW4ZOogc4WVIyTEU= -github.com/libp2p/go-libp2p-kbucket v0.5.0 h1:g/7tVm8ACHDxH29BGrpsQlnNeu+6OF1A9bno/4/U1oA= -github.com/libp2p/go-libp2p-kbucket v0.5.0/go.mod h1:zGzGCpQd78b5BNTDGHNDLaTt9aDK/A02xeZp9QeFC4U= -github.com/libp2p/go-libp2p-loggables v0.1.0 h1:h3w8QFfCt2UJl/0/NW4K829HX/0S4KD31PQ7m8UXXO8= -github.com/libp2p/go-libp2p-loggables v0.1.0/go.mod h1:EyumB2Y6PrYjr55Q3/tiJ/o3xoDasoRYM7nOzEpoa90= -github.com/libp2p/go-libp2p-mplex v0.2.0/go.mod h1:Ejl9IyjvXJ0T9iqUTE1jpYATQ9NM3g+OtR+EMMODbKo= -github.com/libp2p/go-libp2p-mplex v0.2.1/go.mod h1:SC99Rxs8Vuzrf/6WhmH41kNn13TiYdAWNYHrwImKLnE= -github.com/libp2p/go-libp2p-mplex v0.2.2/go.mod h1:74S9eum0tVQdAfFiKxAyKzNdSuLqw5oadDq7+L/FELo= -github.com/libp2p/go-libp2p-mplex v0.2.3/go.mod h1:CK3p2+9qH9x+7ER/gWWDYJ3QW5ZxWDkm+dVvjfuG3ek= -github.com/libp2p/go-libp2p-mplex v0.4.0/go.mod h1:yCyWJE2sc6TBTnFpjvLuEJgTSw/u+MamvzILKdX7asw= -github.com/libp2p/go-libp2p-mplex v0.4.1/go.mod h1:cmy+3GfqfM1PceHTLL7zQzAAYaryDu6iPSC+CIb094g= -github.com/libp2p/go-libp2p-nat v0.0.4/go.mod h1:N9Js/zVtAXqaeT99cXgTV9e75KpnWCvVOiGzlcHmBbY= -github.com/libp2p/go-libp2p-nat v0.0.5/go.mod h1:1qubaE5bTZMJE+E/uu2URroMbzdubFz1ChgiN79yKPE= -github.com/libp2p/go-libp2p-nat v0.0.6/go.mod h1:iV59LVhB3IkFvS6S6sauVTSOrNEANnINbI/fkaLimiw= -github.com/libp2p/go-libp2p-netutil v0.1.0 h1:zscYDNVEcGxyUpMd0JReUZTrpMfia8PmLKcKF72EAMQ= -github.com/libp2p/go-libp2p-netutil v0.1.0/go.mod h1:3Qv/aDqtMLTUyQeundkKsA+YCThNdbQD54k3TqjpbFU= -github.com/libp2p/go-libp2p-noise v0.1.1/go.mod h1:QDFLdKX7nluB7DEnlVPbz7xlLHdwHFA9HiohJRr3vwM= -github.com/libp2p/go-libp2p-noise v0.2.0/go.mod h1:IEbYhBBzGyvdLBoxxULL/SGbJARhUeqlO8lVSREYu2Q= -github.com/libp2p/go-libp2p-peer v0.2.0/go.mod h1:RCffaCvUyW2CJmG2gAWVqwePwW7JMgxjsHm7+J5kjWY= -github.com/libp2p/go-libp2p-peerstore v0.1.0/go.mod h1:2CeHkQsr8svp4fZ+Oi9ykN1HBb6u0MOvdJ7YIsmcwtY= -github.com/libp2p/go-libp2p-peerstore v0.1.3/go.mod h1:BJ9sHlm59/80oSkpWgr1MyY1ciXAXV397W6h1GH/uKI= -github.com/libp2p/go-libp2p-peerstore v0.2.0/go.mod h1:N2l3eVIeAitSg3Pi2ipSrJYnqhVnMNQZo9nkSCuAbnQ= -github.com/libp2p/go-libp2p-peerstore v0.2.1/go.mod h1:NQxhNjWxf1d4w6PihR8btWIRjwRLBr4TYKfNgrUkOPA= -github.com/libp2p/go-libp2p-peerstore v0.2.2/go.mod h1:NQxhNjWxf1d4w6PihR8btWIRjwRLBr4TYKfNgrUkOPA= -github.com/libp2p/go-libp2p-peerstore v0.2.6/go.mod h1:ss/TWTgHZTMpsU/oKVVPQCGuDHItOpf2W8RxAi50P2s= -github.com/libp2p/go-libp2p-peerstore v0.2.7/go.mod h1:ss/TWTgHZTMpsU/oKVVPQCGuDHItOpf2W8RxAi50P2s= -github.com/libp2p/go-libp2p-pnet v0.2.0/go.mod h1:Qqvq6JH/oMZGwqs3N1Fqhv8NVhrdYcO0BW4wssv21LA= -github.com/libp2p/go-libp2p-pubsub v0.8.2 h1:QLGUmkgKmwEVxVDYGsqc5t9CykOMY2Y21cXQHjR462I= -github.com/libp2p/go-libp2p-pubsub v0.8.2/go.mod h1:e4kT+DYjzPUYGZeWk4I+oxCSYTXizzXii5LDRRhjKSw= -github.com/libp2p/go-libp2p-quic-transport v0.10.0/go.mod h1:RfJbZ8IqXIhxBRm5hqUEJqjiiY8xmEuq3HUDS993MkA= -github.com/libp2p/go-libp2p-record v0.1.0/go.mod h1:ujNc8iuE5dlKWVy6wuL6dd58t0n7xI4hAIl8pE6wu5Q= +github.com/libp2p/go-flow-metrics v0.2.0 h1:EIZzjmeOE6c8Dav0sNv35vhZxATIXWZg6j/C08XmmDw= +github.com/libp2p/go-flow-metrics v0.2.0/go.mod h1:st3qqfu8+pMfh+9Mzqb2GTiwrAGjIPszEjZmtksN8Jc= +github.com/libp2p/go-libp2p v0.38.2 h1:9SZQDOCi82A25An4kx30lEtr6kGTxrtoaDkbs5xrK5k= +github.com/libp2p/go-libp2p v0.38.2/go.mod h1:QWV4zGL3O9nXKdHirIC59DoRcZ446dfkjbOJ55NEWFo= +github.com/libp2p/go-libp2p-asn-util v0.4.1 h1:xqL7++IKD9TBFMgnLPZR6/6iYhawHKHl950SO9L6n94= +github.com/libp2p/go-libp2p-asn-util v0.4.1/go.mod h1:d/NI6XZ9qxw67b4e+NgpQexCIiFYJjErASrYW4PFDN8= +github.com/libp2p/go-libp2p-kad-dht v0.28.2 h1:/VivUl/Ru0tVgkWNhDDBy8pK6q+gRdI+z8VfqmSUJWo= +github.com/libp2p/go-libp2p-kad-dht v0.28.2/go.mod h1:sUR/qh4p/5+YFXBtwOiCmIBeBA2YD94ttmL+Xk8+pTE= +github.com/libp2p/go-libp2p-kbucket v0.6.4 h1:OjfiYxU42TKQSB8t8WYd8MKhYhMJeO2If+NiuKfb6iQ= +github.com/libp2p/go-libp2p-kbucket v0.6.4/go.mod h1:jp6w82sczYaBsAypt5ayACcRJi0lgsba7o4TzJKEfWA= +github.com/libp2p/go-libp2p-pubsub v0.13.0 h1:RmFQ2XAy3zQtbt2iNPy7Tt0/3fwTnHpCQSSnmGnt1Ps= +github.com/libp2p/go-libp2p-pubsub v0.13.0/go.mod h1:m0gpUOyrXKXdE7c8FNQ9/HLfWbxaEw7xku45w+PaqZo= github.com/libp2p/go-libp2p-record v0.2.0 h1:oiNUOCWno2BFuxt3my4i1frNrt7PerzB3queqa1NkQ0= github.com/libp2p/go-libp2p-record v0.2.0/go.mod h1:I+3zMkvvg5m2OcSdoL0KPljyJyvNDFGKX7QdlpYUcwk= -github.com/libp2p/go-libp2p-secio v0.1.0/go.mod h1:tMJo2w7h3+wN4pgU2LSYeiKPrfqBgkOsdiKK77hE7c8= -github.com/libp2p/go-libp2p-secio v0.2.0/go.mod h1:2JdZepB8J5V9mBp79BmwsaPQhRPNN2NrnB2lKQcdy6g= -github.com/libp2p/go-libp2p-secio v0.2.1/go.mod h1:cWtZpILJqkqrSkiYcDBh5lA3wbT2Q+hz3rJQq3iftD8= -github.com/libp2p/go-libp2p-secio v0.2.2/go.mod h1:wP3bS+m5AUnFA+OFO7Er03uO1mncHG0uVwGrwvjYlNY= -github.com/libp2p/go-libp2p-swarm v0.1.0/go.mod h1:wQVsCdjsuZoc730CgOvh5ox6K8evllckjebkdiY5ta4= -github.com/libp2p/go-libp2p-swarm v0.2.2/go.mod h1:fvmtQ0T1nErXym1/aa1uJEyN7JzaTNyBcHImCxRpPKU= -github.com/libp2p/go-libp2p-swarm v0.2.3/go.mod h1:P2VO/EpxRyDxtChXz/VPVXyTnszHvokHKRhfkEgFKNM= -github.com/libp2p/go-libp2p-swarm v0.2.8/go.mod h1:JQKMGSth4SMqonruY0a8yjlPVIkb0mdNSwckW7OYziM= -github.com/libp2p/go-libp2p-swarm v0.3.0/go.mod h1:hdv95GWCTmzkgeJpP+GK/9D9puJegb7H57B5hWQR5Kk= -github.com/libp2p/go-libp2p-swarm v0.4.0/go.mod h1:XVFcO52VoLoo0eitSxNQWYq4D6sydGOweTOAjJNraCw= -github.com/libp2p/go-libp2p-swarm v0.5.0/go.mod h1:sU9i6BoHE0Ve5SKz3y9WfKrh8dUat6JknzUehFx8xW4= -github.com/libp2p/go-libp2p-testing v0.0.2/go.mod h1:gvchhf3FQOtBdr+eFUABet5a4MBLK8jM3V4Zghvmi+E= -github.com/libp2p/go-libp2p-testing v0.0.3/go.mod h1:gvchhf3FQOtBdr+eFUABet5a4MBLK8jM3V4Zghvmi+E= -github.com/libp2p/go-libp2p-testing v0.0.4/go.mod h1:gvchhf3FQOtBdr+eFUABet5a4MBLK8jM3V4Zghvmi+E= -github.com/libp2p/go-libp2p-testing v0.1.0/go.mod h1:xaZWMJrPUM5GlDBxCeGUi7kI4eqnjVyavGroI2nxEM0= -github.com/libp2p/go-libp2p-testing v0.1.1/go.mod h1:xaZWMJrPUM5GlDBxCeGUi7kI4eqnjVyavGroI2nxEM0= -github.com/libp2p/go-libp2p-testing v0.1.2-0.20200422005655-8775583591d8/go.mod h1:Qy8sAncLKpwXtS2dSnDOP8ktexIAHKu+J+pnZOFZLTc= -github.com/libp2p/go-libp2p-testing v0.3.0/go.mod h1:efZkql4UZ7OVsEfaxNHZPzIehtsBXMrXnCfJIgDti5g= -github.com/libp2p/go-libp2p-testing v0.4.0/go.mod h1:Q+PFXYoiYFN5CAEG2w3gLPEzotlKsNSbKQ/lImlOWF0= +github.com/libp2p/go-libp2p-routing-helpers v0.7.4 h1:6LqS1Bzn5CfDJ4tzvP9uwh42IB7TJLNFJA6dEeGBv84= +github.com/libp2p/go-libp2p-routing-helpers v0.7.4/go.mod h1:we5WDj9tbolBXOuF1hGOkR+r7Uh1408tQbAKaT5n1LE= github.com/libp2p/go-libp2p-testing v0.12.0 h1:EPvBb4kKMWO29qP4mZGyhVzUyR25dvfUIK5WDu6iPUA= -github.com/libp2p/go-libp2p-tls v0.1.3/go.mod h1:wZfuewxOndz5RTnCAxFliGjvYSDA40sKitV4c50uI1M= -github.com/libp2p/go-libp2p-transport-upgrader v0.1.1/go.mod h1:IEtA6or8JUbsV07qPW4r01GnTenLW4oi3lOPbUMGJJA= -github.com/libp2p/go-libp2p-transport-upgrader v0.2.0/go.mod h1:mQcrHj4asu6ArfSoMuyojOdjx73Q47cYD7s5+gZOlns= -github.com/libp2p/go-libp2p-transport-upgrader v0.3.0/go.mod h1:i+SKzbRnvXdVbU3D1dwydnTmKRPXiAR/fyvi1dXuL4o= -github.com/libp2p/go-libp2p-transport-upgrader v0.4.0/go.mod h1:J4ko0ObtZSmgn5BX5AmegP+dK3CSnU2lMCKsSq/EY0s= -github.com/libp2p/go-libp2p-transport-upgrader v0.4.2/go.mod h1:NR8ne1VwfreD5VIWIU62Agt/J18ekORFU/j1i2y8zvk= -github.com/libp2p/go-libp2p-yamux v0.2.0/go.mod h1:Db2gU+XfLpm6E4rG5uGCFX6uXA8MEXOxFcRoXUODaK8= -github.com/libp2p/go-libp2p-yamux v0.2.1/go.mod h1:1FBXiHDk1VyRM1C0aez2bCfHQ4vMZKkAQzZbkSQt5fI= -github.com/libp2p/go-libp2p-yamux v0.2.2/go.mod h1:lIohaR0pT6mOt0AZ0L2dFze9hds9Req3OfS+B+dv4qw= -github.com/libp2p/go-libp2p-yamux v0.2.5/go.mod h1:Zpgj6arbyQrmZ3wxSZxfBmbdnWtbZ48OpsfmQVTErwA= -github.com/libp2p/go-libp2p-yamux v0.2.7/go.mod h1:X28ENrBMU/nm4I3Nx4sZ4dgjZ6VhLEn0XhIoZ5viCwU= -github.com/libp2p/go-libp2p-yamux v0.2.8/go.mod h1:/t6tDqeuZf0INZMTgd0WxIRbtK2EzI2h7HbFm9eAKI4= -github.com/libp2p/go-libp2p-yamux v0.4.0/go.mod h1:+DWDjtFMzoAwYLVkNZftoucn7PelNoy5nm3tZ3/Zw30= -github.com/libp2p/go-libp2p-yamux v0.5.0/go.mod h1:AyR8k5EzyM2QN9Bbdg6X1SkVVuqLwTGf0L4DFq9g6po= -github.com/libp2p/go-libp2p-yamux v0.5.1/go.mod h1:dowuvDu8CRWmr0iqySMiSxK+W0iL5cMVO9S94Y6gkv4= -github.com/libp2p/go-libp2p-yamux v0.5.4/go.mod h1:tfrXbyaTqqSU654GTvK3ocnSZL3BuHoeTSqhcel1wsE= -github.com/libp2p/go-maddr-filter v0.0.4/go.mod h1:6eT12kSQMA9x2pvFQa+xesMKUBlj9VImZbj3B9FBH/Q= -github.com/libp2p/go-maddr-filter v0.0.5/go.mod h1:Jk+36PMfIqCJhAnaASRH83bdAvfDRp/w6ENFaC9bG+M= +github.com/libp2p/go-libp2p-testing v0.12.0/go.mod h1:KcGDRXyN7sQCllucn1cOOS+Dmm7ujhfEyXQL5lvkcPg= github.com/libp2p/go-maddr-filter v0.1.0/go.mod h1:VzZhTXkMucEGGEOSKddrwGiOv0tUhgnKqNEmIAz/bPU= -github.com/libp2p/go-mplex v0.0.3/go.mod h1:pK5yMLmOoBR1pNCqDlA2GQrdAVTMkqFalaTWe7l4Yd0= -github.com/libp2p/go-mplex v0.1.0/go.mod h1:SXgmdki2kwCUlCCbfGLEgHjC4pFqhTp0ZoV6aiKgxDU= -github.com/libp2p/go-mplex v0.1.1/go.mod h1:Xgz2RDCi3co0LeZfgjm4OgUF15+sVR8SRcu3SFXI1lk= -github.com/libp2p/go-mplex v0.1.2/go.mod h1:Xgz2RDCi3co0LeZfgjm4OgUF15+sVR8SRcu3SFXI1lk= -github.com/libp2p/go-mplex v0.2.0/go.mod h1:0Oy/A9PQlwBytDRp4wSkFnzHYDKcpLot35JQ6msjvYQ= -github.com/libp2p/go-mplex v0.3.0/go.mod h1:0Oy/A9PQlwBytDRp4wSkFnzHYDKcpLot35JQ6msjvYQ= -github.com/libp2p/go-msgio v0.0.2/go.mod h1:63lBBgOTDKQL6EWazRMCwXsEeEeK9O2Cd+0+6OOuipQ= -github.com/libp2p/go-msgio v0.0.4/go.mod h1:63lBBgOTDKQL6EWazRMCwXsEeEeK9O2Cd+0+6OOuipQ= -github.com/libp2p/go-msgio v0.0.6/go.mod h1:4ecVB6d9f4BDSL5fqvPiC4A3KivjWn+Venn/1ALLMWA= -github.com/libp2p/go-msgio v0.2.0 h1:W6shmB+FeynDrUVl2dgFQvzfBZcXiyqY4VmpQLu9FqU= -github.com/libp2p/go-msgio v0.2.0/go.mod h1:dBVM1gW3Jk9XqHkU4eKdGvVHdLa51hoGfll6jMJMSlY= -github.com/libp2p/go-nat v0.0.3/go.mod h1:88nUEt0k0JD45Bk93NIwDqjlhiOwOoV36GchpcVc1yI= -github.com/libp2p/go-nat v0.0.4/go.mod h1:Nmw50VAvKuk38jUBcmNh6p9lUJLoODbJRvYAa/+KSDo= -github.com/libp2p/go-nat v0.0.5/go.mod h1:B7NxsVNPZmRLvMOwiEO1scOSyjA56zxYAGv1yQgRkEU= -github.com/libp2p/go-nat v0.1.0 h1:MfVsH6DLcpa04Xr+p8hmVRG4juse0s3J8HyNWYHffXg= -github.com/libp2p/go-nat v0.1.0/go.mod h1:X7teVkwRHNInVNWQiO/tAiAVRwSr5zoRz4YSTC3uRBM= -github.com/libp2p/go-netroute v0.1.2/go.mod h1:jZLDV+1PE8y5XxBySEBgbuVAXbhtuHSdmLPL2n9MKbk= -github.com/libp2p/go-netroute v0.1.3/go.mod h1:jZLDV+1PE8y5XxBySEBgbuVAXbhtuHSdmLPL2n9MKbk= -github.com/libp2p/go-netroute v0.1.5/go.mod h1:V1SR3AaECRkEQCoFFzYwVYWvYIEtlxx89+O3qcpCl4A= -github.com/libp2p/go-netroute v0.1.6/go.mod h1:AqhkMh0VuWmfgtxKPp3Oc1LdU5QSWS7wl0QLhSZqXxQ= -github.com/libp2p/go-netroute v0.2.1 h1:V8kVrpD8GK0Riv15/7VN6RbUQ3URNZVosw7H2v9tksU= -github.com/libp2p/go-netroute v0.2.1/go.mod h1:hraioZr0fhBjG0ZRXJJ6Zj2IVEVNx6tDTFQfSmcq7mQ= -github.com/libp2p/go-openssl v0.0.2/go.mod h1:v8Zw2ijCSWBQi8Pq5GAixw6DbFfa9u6VIYDXnvOXkc0= -github.com/libp2p/go-openssl v0.0.3/go.mod h1:unDrJpgy3oFr+rqXsarWifmJuNnJR4chtO1HmaZjggc= -github.com/libp2p/go-openssl v0.0.4/go.mod h1:unDrJpgy3oFr+rqXsarWifmJuNnJR4chtO1HmaZjggc= -github.com/libp2p/go-openssl v0.0.5/go.mod h1:unDrJpgy3oFr+rqXsarWifmJuNnJR4chtO1HmaZjggc= -github.com/libp2p/go-openssl v0.0.7/go.mod h1:unDrJpgy3oFr+rqXsarWifmJuNnJR4chtO1HmaZjggc= -github.com/libp2p/go-openssl v0.1.0 h1:LBkKEcUv6vtZIQLVTegAil8jbNpJErQ9AnT+bWV+Ooo= -github.com/libp2p/go-openssl v0.1.0/go.mod h1:OiOxwPpL3n4xlenjx2h7AwSGaFSC/KZvf6gNdOBQMtc= -github.com/libp2p/go-reuseport v0.0.1/go.mod h1:jn6RmB1ufnQwl0Q1f+YxAj8isJgDCQzaaxIFYDhcYEA= -github.com/libp2p/go-reuseport v0.0.2/go.mod h1:SPD+5RwGC7rcnzngoYC86GjPzjSywuQyMVAheVBD9nQ= -github.com/libp2p/go-reuseport v0.2.0 h1:18PRvIMlpY6ZK85nIAicSBuXXvrYoSw3dsBAR7zc560= -github.com/libp2p/go-reuseport v0.2.0/go.mod h1:bvVho6eLMm6Bz5hmU0LYN3ixd3nPPvtIlaURZZgOY4k= -github.com/libp2p/go-reuseport-transport v0.0.2/go.mod h1:YkbSDrvjUVDL6b8XqriyA20obEtsW9BLkuOUyQAOCbs= -github.com/libp2p/go-reuseport-transport v0.0.3/go.mod h1:Spv+MPft1exxARzP2Sruj2Wb5JSyHNncjf1Oi2dEbzM= -github.com/libp2p/go-reuseport-transport v0.0.4/go.mod h1:trPa7r/7TJK/d+0hdBLOCGvpQQVOU74OXbNCIMkufGw= -github.com/libp2p/go-sockaddr v0.0.2/go.mod h1:syPvOmNs24S3dFVGJA1/mrqdeijPxLV2Le3BRLKd68k= -github.com/libp2p/go-sockaddr v0.1.0/go.mod h1:syPvOmNs24S3dFVGJA1/mrqdeijPxLV2Le3BRLKd68k= -github.com/libp2p/go-sockaddr v0.1.1/go.mod h1:syPvOmNs24S3dFVGJA1/mrqdeijPxLV2Le3BRLKd68k= -github.com/libp2p/go-stream-muxer v0.0.1/go.mod h1:bAo8x7YkSpadMTbtTaxGVHWUQsR/l5MEaHbKaliuT14= -github.com/libp2p/go-stream-muxer-multistream v0.2.0/go.mod h1:j9eyPol/LLRqT+GPLSxvimPhNph4sfYfMoDPd7HkzIc= -github.com/libp2p/go-stream-muxer-multistream v0.3.0/go.mod h1:yDh8abSIzmZtqtOt64gFJUXEryejzNb0lisTt+fAMJA= -github.com/libp2p/go-tcp-transport v0.1.0/go.mod h1:oJ8I5VXryj493DEJ7OsBieu8fcg2nHGctwtInJVpipc= -github.com/libp2p/go-tcp-transport v0.1.1/go.mod h1:3HzGvLbx6etZjnFlERyakbaYPdfjg2pWP97dFZworkY= -github.com/libp2p/go-tcp-transport v0.2.0/go.mod h1:vX2U0CnWimU4h0SGSEsg++AzvBcroCGYw28kh94oLe0= -github.com/libp2p/go-tcp-transport v0.2.1/go.mod h1:zskiJ70MEfWz2MKxvFB/Pv+tPIB1PpPUrHIWQ8aFw7M= -github.com/libp2p/go-tcp-transport v0.2.3/go.mod h1:9dvr03yqrPyYGIEN6Dy5UvdJZjyPFvl1S/igQ5QD1SU= -github.com/libp2p/go-ws-transport v0.1.0/go.mod h1:rjw1MG1LU9YDC6gzmwObkPd/Sqwhw7yT74kj3raBFuo= -github.com/libp2p/go-ws-transport v0.2.0/go.mod h1:9BHJz/4Q5A9ludYWKoGCFC5gUElzlHoKzu0yY9p/klM= -github.com/libp2p/go-ws-transport v0.3.0/go.mod h1:bpgTJmRZAvVHrgHybCVyqoBmyLQ1fiZuEaBYusP5zsk= -github.com/libp2p/go-ws-transport v0.4.0/go.mod h1:EcIEKqf/7GDjth6ksuS/6p7R49V4CBY6/E7R/iyhYUA= -github.com/libp2p/go-yamux v1.2.2/go.mod h1:FGTiPvoV/3DVdgWpX+tM0OW3tsM+W5bSE3gZwqQTcow= -github.com/libp2p/go-yamux v1.2.3/go.mod h1:FGTiPvoV/3DVdgWpX+tM0OW3tsM+W5bSE3gZwqQTcow= -github.com/libp2p/go-yamux v1.3.0/go.mod h1:FGTiPvoV/3DVdgWpX+tM0OW3tsM+W5bSE3gZwqQTcow= -github.com/libp2p/go-yamux v1.3.3/go.mod h1:FGTiPvoV/3DVdgWpX+tM0OW3tsM+W5bSE3gZwqQTcow= -github.com/libp2p/go-yamux v1.3.5/go.mod h1:FGTiPvoV/3DVdgWpX+tM0OW3tsM+W5bSE3gZwqQTcow= -github.com/libp2p/go-yamux v1.3.7/go.mod h1:fr7aVgmdNGJK+N1g+b6DW6VxzbRCjCOejR/hkmpooHE= -github.com/libp2p/go-yamux v1.4.0/go.mod h1:fr7aVgmdNGJK+N1g+b6DW6VxzbRCjCOejR/hkmpooHE= -github.com/libp2p/go-yamux v1.4.1/go.mod h1:fr7aVgmdNGJK+N1g+b6DW6VxzbRCjCOejR/hkmpooHE= -github.com/libp2p/go-yamux/v2 v2.0.0/go.mod h1:NVWira5+sVUIU6tu1JWvaRn1dRnG+cawOJiflsAM+7U= -github.com/libp2p/go-yamux/v2 v2.2.0/go.mod h1:3So6P6TV6r75R9jiBpiIKgU/66lOarCZjqROGxzPpPQ= -github.com/libp2p/go-yamux/v4 v4.0.0 h1:+Y80dV2Yx/kv7Y7JKu0LECyVdMXm1VUoko+VQ9rBfZQ= -github.com/libp2p/go-yamux/v4 v4.0.0/go.mod h1:NWjl8ZTLOGlozrXSOZ/HlfG++39iKNnM5wwmtQP1YB4= -github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM= -github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4= -github.com/logrusorgru/aurora v2.0.3+incompatible h1:tOpm7WcpBTn4fjmVfgpQq0EfczGlG91VSDkswnjF5A8= -github.com/logrusorgru/aurora v2.0.3+incompatible/go.mod h1:7rIyQOR62GCctdiQpZ/zOJlFyk6y+94wXzv6RNZgaR4= -github.com/lucas-clemente/quic-go v0.19.3/go.mod h1:ADXpNbTQjq1hIzCpB+y/k5iz4n4z4IwqoLb94Kh5Hu8= -github.com/lucas-clemente/quic-go v0.31.1 h1:O8Od7hfioqq0PMYHDyBkxU2aA7iZ2W9pjbrWuja2YR4= -github.com/lucas-clemente/quic-go v0.31.1/go.mod h1:0wFbizLgYzqHqtlyxyCaJKlE7bYgE6JQ+54TLd/Dq2g= +github.com/libp2p/go-msgio v0.3.0 h1:mf3Z8B1xcFN314sWX+2vOTShIE0Mmn2TXn3YCUQGNj0= +github.com/libp2p/go-msgio v0.3.0/go.mod h1:nyRM819GmVaF9LX3l03RMh10QdOroF++NBbxAb0mmDM= +github.com/libp2p/go-nat v0.2.0 h1:Tyz+bUFAYqGyJ/ppPPymMGbIgNRH+WqC5QrT5fKrrGk= +github.com/libp2p/go-nat v0.2.0/go.mod h1:3MJr+GRpRkyT65EpVPBstXLvOlAPzUVlG6Pwg9ohLJk= +github.com/libp2p/go-netroute v0.2.2 h1:Dejd8cQ47Qx2kRABg6lPwknU7+nBnFRpko45/fFPuZ8= +github.com/libp2p/go-netroute v0.2.2/go.mod h1:Rntq6jUAH0l9Gg17w5bFGhcC9a+vk4KNXs6s7IljKYE= +github.com/libp2p/go-reuseport v0.4.0 h1:nR5KU7hD0WxXCJbmw7r2rhRYruNRl2koHw8fQscQm2s= +github.com/libp2p/go-reuseport v0.4.0/go.mod h1:ZtI03j/wO5hZVDFo2jKywN6bYKWLOy8Se6DrI2E1cLU= +github.com/libp2p/go-yamux/v4 v4.0.1 h1:FfDR4S1wj6Bw2Pqbc8Uz7pCxeRBPbwsBbEdfwiCypkQ= +github.com/libp2p/go-yamux/v4 v4.0.1/go.mod h1:NWjl8ZTLOGlozrXSOZ/HlfG++39iKNnM5wwmtQP1YB4= +github.com/logrusorgru/aurora/v4 v4.0.0 h1:sRjfPpun/63iADiSvGGjgA1cAYegEWMPCJdUpJYn9JA= +github.com/logrusorgru/aurora/v4 v4.0.0/go.mod h1:lP0iIa2nrnT/qoFXcOZSrZQpJ1o6n2CUf/hyHi2Q4ZQ= github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 h1:6E+4a0GO5zZEnZ81pIr0yLvtUWk2if982qA3F3QD6H4= github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0/go.mod h1:zJYVVT2jmtg6P3p1VtQj7WsuWi/y4VnjVBn7F8KPB3I= github.com/lunixbochs/vtclean v1.0.0/go.mod h1:pHhQNgMf3btfWnGBVipUOjRYhoOsdGqdm/+2c2E2WMI= -github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ= -github.com/m4ksio/wal v1.0.1-0.20221209164835-154a17396e4c h1:OqVcb1Dkheracn4fgCjxlfhuSnM8jmPbrWkJbRIC4fo= -github.com/m4ksio/wal v1.0.1-0.20221209164835-154a17396e4c/go.mod h1:5/Yq7mnb+VdE44ff+FL8LSOPEquOVqm/7Hz40U4VUZo= github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= -github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= -github.com/magiconair/properties v1.8.6 h1:5ibWZ6iY0NctNGWo87LalDlEZ6R41TqbbDamhfG/Qzo= -github.com/magiconair/properties v1.8.6/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60= -github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY= +github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0= github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/marten-seemann/qpack v0.2.1/go.mod h1:F7Gl5L1jIgN1D11ucXefiuJS9UMVP2opoCp2jDKb7wc= -github.com/marten-seemann/qpack v0.3.0 h1:UiWstOgT8+znlkDPOg2+3rIuYXJ2CnGDkGUXN6ki6hE= -github.com/marten-seemann/qtls v0.10.0/go.mod h1:UvMd1oaYDACI99/oZUYLzMCkBXQVT0aGm99sJhbT8hs= -github.com/marten-seemann/qtls-go1-15 v0.1.1/go.mod h1:GyFwywLKkRt+6mfU99csTEY1joMZz5vmB1WNZH3P81I= -github.com/marten-seemann/qtls-go1-18 v0.1.3 h1:R4H2Ks8P6pAtUagjFty2p7BVHn3XiwDAl7TTQf5h7TI= -github.com/marten-seemann/qtls-go1-18 v0.1.3/go.mod h1:mJttiymBAByA49mhlNZZGrH5u1uXYZJ+RW28Py7f4m4= -github.com/marten-seemann/qtls-go1-19 v0.1.1 h1:mnbxeq3oEyQxQXwI4ReCgW9DPoPR94sNlqWoDZnjRIE= -github.com/marten-seemann/qtls-go1-19 v0.1.1/go.mod h1:5HTDWtVudo/WFsHKRNuOhWlbdjrfs5JHrYb0wIJqGpI= github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd h1:br0buuQ854V8u83wA0rVZ8ttrq5CpaPZdvrK0LP2lOk= github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd/go.mod h1:QuCEs1Nt24+FYQEqAAncTDPJIuGs+LxK1MCiFL25pMU= -github.com/marten-seemann/webtransport-go v0.4.3 h1:vkt5o/Ci+luknRteWdYGYH1KcB7ziup+J+1PzZJIvmg= -github.com/matryer/is v1.2.0 h1:92UTHpy8CDwaJ08GqLDzhhuixiBUUD1p3AU6PHddz4A= -github.com/matryer/is v1.2.0/go.mod h1:2fLPjFQM9rhQ15aVEtbuwhJinnOqrmgXPNdZsdwlWXA= -github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= -github.com/mattn/go-colorable v0.1.0/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= github.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcnceauSikq3lYCQ= -github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4= -github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= -github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= -github.com/mattn/go-ieproxy v0.0.0-20190610004146-91bb50d98149/go.mod h1:31jz6HNzdxOmlERGGEc4v/dMssOfmp2p5bT/okiKFFc= -github.com/mattn/go-ieproxy v0.0.0-20190702010315-6dee0af9227d/go.mod h1:31jz6HNzdxOmlERGGEc4v/dMssOfmp2p5bT/okiKFFc= -github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= -github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= -github.com/mattn/go-isatty v0.0.5-0.20180830101745-3fb116b82035/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mattn/go-colorable v0.1.14 h1:9A9LHSqF/7dyVVX6g0U9cwm9pG3kP9gSzcuIPHPsaIE= +github.com/mattn/go-colorable v0.1.14/go.mod h1:6LmQG8QLFO4G5z1gPvYEzlUgJ2wF+stgPZH1UqBm1s8= github.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= -github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= -github.com/mattn/go-isatty v0.0.13/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= -github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= -github.com/mattn/go-isatty v0.0.17 h1:BTarxUcIeDqL27Mc+vyvdWYSL28zpIhv3RoTdsLMPng= -github.com/mattn/go-isatty v0.0.17/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= -github.com/mattn/go-pointer v0.0.1 h1:n+XhsuGeVO6MEAp7xyEukFINEa+Quek5psIR/ylA6o0= -github.com/mattn/go-pointer v0.0.1/go.mod h1:2zXcozF6qYGgmsG+SeTZz3oAbFLdD3OWqnUbNvJZAlc= -github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= -github.com/mattn/go-runewidth v0.0.3/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= -github.com/mattn/go-runewidth v0.0.4/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= -github.com/mattn/go-runewidth v0.0.13 h1:lTGmDsbAYt5DmK6OnoV7EuIF1wEIFAcxld6ypU4OSgU= -github.com/mattn/go-runewidth v0.0.13/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= -github.com/mattn/go-sqlite3 v1.11.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= -github.com/mattn/go-tty v0.0.0-20180907095812-13ff1204f104/go.mod h1:XPvLUNfbS4fJH25nqRHfWLMa1ONC8Amw+mIA639KxkE= +github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= +github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= +github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= +github.com/mattn/go-runewidth v0.0.16 h1:E5ScNMtiwvlvB5paMFdw9p4kSQzbXFikJ5SQO6TULQc= +github.com/mattn/go-runewidth v0.0.16/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= -github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo= -github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= -github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b/go.mod h1:01TrycV0kFyexm33Z7vhZRXopbI8J3TDReVlkTgMUxE= +github.com/mdlayher/netlink v0.0.0-20190409211403-11939a169225/go.mod h1:eQB3mZE4aiYnlUsyGGCOpPETfdQq4Jhsgf1fk3cwQaA= +github.com/mdlayher/netlink v1.0.0/go.mod h1:KxeJAFOFLG6AjpyDkQ/iIhxygIUKD+vcwqcnu43w/+M= +github.com/mdlayher/netlink v1.1.0/go.mod h1:H4WCitaheIsdF9yOYu8CFmCgQthAPIWZmcKp9uZHgmY= +github.com/mdlayher/netlink v1.1.1/go.mod h1:WTYpFb/WTvlRJAyKhZL5/uy69TDDpHHu2VZmb2XgV7o= +github.com/mdlayher/packet v1.1.1/go.mod h1:DRvYY5mH4M4lUqAnMg04E60U4fjUKMZ/4g2cHElZkKo= +github.com/mdlayher/socket v0.4.0/go.mod h1:xxFqz5GRCUN3UEOm9CZqEJsAbe1C8OwSK46NlmWuVoc= github.com/microcosm-cc/bluemonday v1.0.1/go.mod h1:hsXNsILzKxV+sX77C5b8FSuKF00vh2OMYv+xgHpAMF4= -github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= -github.com/miekg/dns v1.1.12/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= -github.com/miekg/dns v1.1.28/go.mod h1:KNUDUusw/aVsxyTYZM1oqvCicbwhgbNgztCETuNZ7xM= -github.com/miekg/dns v1.1.31/go.mod h1:KNUDUusw/aVsxyTYZM1oqvCicbwhgbNgztCETuNZ7xM= -github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJysuI= -github.com/miekg/dns v1.1.50 h1:DQUfb9uc6smULcREF09Uc+/Gd46YWqJd5DbpPE9xkcA= -github.com/miekg/dns v1.1.50/go.mod h1:e3IlAVfNqAllflbibAZEWOXOQ+Ynzk/dDozDxY7XnME= +github.com/miekg/dns v1.1.62 h1:cN8OuEF1/x5Rq6Np+h1epln8OiyPWV+lROx9LxcGgIQ= +github.com/miekg/dns v1.1.62/go.mod h1:mvDlcItzm+br7MToIKqkglaGhlFMHJ9DTNNWONWXbNQ= github.com/mikioh/tcp v0.0.0-20190314235350-803a9b46060c h1:bzE/A84HN25pxAuk9Eej1Kz9OUelF97nAc82bDquQI8= github.com/mikioh/tcp v0.0.0-20190314235350-803a9b46060c/go.mod h1:0SQS9kMwD2VsyFEB++InYyBJroV/FRmBgcydeSUcJms= github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b h1:z78hV3sbSMAUoyUMM0I83AUIT6Hu17AWfgjzIbtrYFc= github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b/go.mod h1:lxPUiZwKoFL8DUUmalo2yJJUCxbPKtm8OKfqr2/FTNU= github.com/mikioh/tcpopt v0.0.0-20190314235656-172688c1accc h1:PTfri+PuQmWDqERdnNMiD9ZejrlswWrCpBEZgWOiTrc= github.com/mikioh/tcpopt v0.0.0-20190314235656-172688c1accc/go.mod h1:cGKTAVKx4SxOuR/czcZ/E2RSJ3sfHs8FpHhQ5CWMf9s= -github.com/minio/asm2plan9s v0.0.0-20200509001527-cdd76441f9d8 h1:AMFGa4R4MiIpspGNG7Z948v4n35fFGB3RR3G/ry4FWs= -github.com/minio/asm2plan9s v0.0.0-20200509001527-cdd76441f9d8/go.mod h1:mC1jAcsrzbxHt8iiaC+zU4b1ylILSosueou12R++wfY= github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1/go.mod h1:pD8RvIylQ358TN4wwqatJ8rNavkEINozVn9DtGI3dfQ= -github.com/minio/c2goasm v0.0.0-20190812172519-36a3d3bbc4f3 h1:+n/aFZefKZp7spd8DFdX7uMikMLXX4oubIzJF4kv/wI= -github.com/minio/c2goasm v0.0.0-20190812172519-36a3d3bbc4f3/go.mod h1:RagcQ7I8IeTMnF8JTXieKnO4Z6JCsikNEzj0DwauVzE= -github.com/minio/sha256-simd v0.0.0-20190131020904-2d45a736cd16/go.mod h1:2FMWW+8GMoPweT6+pI63m9YE3Lmw4J71hV56Chs1E/U= -github.com/minio/sha256-simd v0.0.0-20190328051042-05b4dd3047e5/go.mod h1:2FMWW+8GMoPweT6+pI63m9YE3Lmw4J71hV56Chs1E/U= -github.com/minio/sha256-simd v0.1.0/go.mod h1:2FMWW+8GMoPweT6+pI63m9YE3Lmw4J71hV56Chs1E/U= github.com/minio/sha256-simd v0.1.1-0.20190913151208-6de447530771/go.mod h1:B5e1o+1/KgNmWrSQK08Y6Z1Vb5pwIktudl0J58iy0KM= -github.com/minio/sha256-simd v0.1.1/go.mod h1:B5e1o+1/KgNmWrSQK08Y6Z1Vb5pwIktudl0J58iy0KM= -github.com/minio/sha256-simd v1.0.0 h1:v1ta+49hkWZyvaKwrQB8elexRqm6Y0aMLjCNsrYxo6g= -github.com/minio/sha256-simd v1.0.0/go.mod h1:OuYzVNI5vcoYIAmbIvHPl3N3jUzVedXbKy5RFepssQM= -github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= +github.com/minio/sha256-simd v1.0.1 h1:6kaan5IFmwTNynnKKpDHe6FWHohJOHhCPchzK49dzMM= +github.com/minio/sha256-simd v1.0.1/go.mod h1:Pz6AKMiUdngCLpeTL/RJY1M9rUuPMYujV5xJjtbRSN8= github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db h1:62I3jR2EmQ4l5rM/4FEfDWcRD+abF5XlKShorW5LRoQ= github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db/go.mod h1:l0dey0ia/Uv7NcFFVbCLtqEBQbrT4OCwCSKTEv6enCw= -github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= -github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= -github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg= -github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY= -github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/moby/sys/mountinfo v0.6.2/go.mod h1:IJb6JQeOklcdMU9F5xQ8ZALD+CUr5VlGpwtX+VE0rpI= +github.com/moby/term v0.5.0 h1:xt8Q1nalod/v7BqbG21f8mQPqH+xAaC9C3N3wfWbVP0= +github.com/moby/term v0.5.0/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y= +github.com/moby/vpnkit v0.5.0/go.mod h1:KyjUrL9cb6ZSNNAUwZfqRjhwwgJ3BJN+kXh0t43WTUQ= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= @@ -1207,297 +709,257 @@ github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3Rllmb github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A= github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= github.com/mr-tron/base58 v1.1.0/go.mod h1:xcD2VGqlgYjBdcBLw+TuYLr8afG+Hj8g2eTVqeSzSU8= -github.com/mr-tron/base58 v1.1.1/go.mod h1:xcD2VGqlgYjBdcBLw+TuYLr8afG+Hj8g2eTVqeSzSU8= github.com/mr-tron/base58 v1.1.2/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc= github.com/mr-tron/base58 v1.1.3/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc= github.com/mr-tron/base58 v1.2.0 h1:T/HDJBh4ZCPbU39/+c3rRvE0uKBQlU27+QI8LJ4t64o= github.com/mr-tron/base58 v1.2.0/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc= -github.com/mschoch/smat v0.0.0-20160514031455-90eadee771ae/go.mod h1:qAyveg+e4CE+eKJXWVjKXM4ck2QobLqTDytGJbLLhJg= github.com/multiformats/go-base32 v0.0.3/go.mod h1:pLiuGC8y0QR3Ue4Zug5UzK9LjgbkL8NSQj0zQ5Nz/AA= -github.com/multiformats/go-base32 v0.0.4/go.mod h1:jNLFzjPZtp3aIARHbJRZIaPuspdH0J6q39uUM5pnABM= github.com/multiformats/go-base32 v0.1.0 h1:pVx9xoSPqEIQG8o+UbAe7DNi51oej1NtK+aGkbLYxPE= github.com/multiformats/go-base32 v0.1.0/go.mod h1:Kj3tFY6zNr+ABYMqeUNeGvkIC/UYgtWibDcT0rExnbI= github.com/multiformats/go-base36 v0.1.0/go.mod h1:kFGE83c6s80PklsHO9sRn2NCoffoRdUUOENyW/Vv6sM= github.com/multiformats/go-base36 v0.2.0 h1:lFsAbNOGeKtuKozrtBsAkSVhv1p9D0/qedU9rQyccr0= github.com/multiformats/go-base36 v0.2.0/go.mod h1:qvnKE++v+2MWCfePClUEjE78Z7P2a1UV0xHgWc0hkp4= -github.com/multiformats/go-multiaddr v0.0.1/go.mod h1:xKVEak1K9cS1VdmPZW3LSIb6lgmoS58qz/pzqmAxV44= -github.com/multiformats/go-multiaddr v0.0.2/go.mod h1:xKVEak1K9cS1VdmPZW3LSIb6lgmoS58qz/pzqmAxV44= -github.com/multiformats/go-multiaddr v0.0.4/go.mod h1:xKVEak1K9cS1VdmPZW3LSIb6lgmoS58qz/pzqmAxV44= -github.com/multiformats/go-multiaddr v0.1.0/go.mod h1:xKVEak1K9cS1VdmPZW3LSIb6lgmoS58qz/pzqmAxV44= github.com/multiformats/go-multiaddr v0.1.1/go.mod h1:aMKBKNEYmzmDmxfX88/vz+J5IU55txyt0p4aiWVohjo= -github.com/multiformats/go-multiaddr v0.2.0/go.mod h1:0nO36NvPpyV4QzvTLi/lafl2y95ncPj0vFwVF6k6wJ4= -github.com/multiformats/go-multiaddr v0.2.1/go.mod h1:s/Apk6IyxfvMjDafnhJgJ3/46z7tZ04iMk5wP4QMGGE= github.com/multiformats/go-multiaddr v0.2.2/go.mod h1:NtfXiOtHvghW9KojvtySjH5y0u0xW5UouOmQQrn6a3Y= -github.com/multiformats/go-multiaddr v0.3.0/go.mod h1:dF9kph9wfJ+3VLAaeBqo9Of8x4fJxp6ggJGteB8HQTI= -github.com/multiformats/go-multiaddr v0.3.1/go.mod h1:uPbspcUPd5AfaP6ql3ujFY+QWzmBD8uLLL4bXW0XfGc= github.com/multiformats/go-multiaddr v0.3.3/go.mod h1:lCKNGP1EQ1eZ35Za2wlqnabm9xQkib3fyB+nZXHLag0= -github.com/multiformats/go-multiaddr v0.8.0 h1:aqjksEcqK+iD/Foe1RRFsGZh8+XFiGo7FgUCZlpv3LU= -github.com/multiformats/go-multiaddr v0.8.0/go.mod h1:Fs50eBDWvZu+l3/9S6xAE7ZYj6yhxlvaVZjakWN7xRs= -github.com/multiformats/go-multiaddr-dns v0.0.1/go.mod h1:9kWcqw/Pj6FwxAwW38n/9403szc57zJPs45fmnznu3Q= -github.com/multiformats/go-multiaddr-dns v0.0.2/go.mod h1:9kWcqw/Pj6FwxAwW38n/9403szc57zJPs45fmnznu3Q= -github.com/multiformats/go-multiaddr-dns v0.2.0/go.mod h1:TJ5pr5bBO7Y1B18djPuRsVkduhQH2YqYSbxWJzYGdK0= -github.com/multiformats/go-multiaddr-dns v0.3.1 h1:QgQgR+LQVt3NPTjbrLLpsaT2ufAA2y0Mkk+QRVJbW3A= -github.com/multiformats/go-multiaddr-dns v0.3.1/go.mod h1:G/245BRQ6FJGmryJCrOuTdB37AMA5AMOVuO6NY3JwTk= -github.com/multiformats/go-multiaddr-fmt v0.0.1/go.mod h1:aBYjqL4T/7j4Qx+R73XSv/8JsgnRFlf0w2KGLCmXl3Q= +github.com/multiformats/go-multiaddr v0.14.0 h1:bfrHrJhrRuh/NXH5mCnemjpbGjzRw/b+tJFOD41g2tU= +github.com/multiformats/go-multiaddr v0.14.0/go.mod h1:6EkVAxtznq2yC3QT5CM1UTAwG0GTP3EWAIcjHuzQ+r4= +github.com/multiformats/go-multiaddr-dns v0.4.1 h1:whi/uCLbDS3mSEUMb1MsoT4uzUeZB0N32yzufqS0i5M= +github.com/multiformats/go-multiaddr-dns v0.4.1/go.mod h1:7hfthtB4E4pQwirrz+J0CcDUfbWzTqEzVyYKKIKpgkc= github.com/multiformats/go-multiaddr-fmt v0.1.0 h1:WLEFClPycPkp4fnIzoFoV9FVd49/eQsuaL3/CWe167E= github.com/multiformats/go-multiaddr-fmt v0.1.0/go.mod h1:hGtDIW4PU4BqJ50gW2quDuPVjyWNZxToGUh/HwTZYJo= -github.com/multiformats/go-multiaddr-net v0.0.1/go.mod h1:nw6HSxNmCIQH27XPGBuX+d1tnvM7ihcFwHMSstNAVUU= -github.com/multiformats/go-multiaddr-net v0.1.0/go.mod h1:5JNbcfBOP4dnhoZOv10JJVkJO0pCCEf8mTnipAo2UZQ= -github.com/multiformats/go-multiaddr-net v0.1.1/go.mod h1:5JNbcfBOP4dnhoZOv10JJVkJO0pCCEf8mTnipAo2UZQ= -github.com/multiformats/go-multiaddr-net v0.1.2/go.mod h1:QsWt3XK/3hwvNxZJp92iMQKME1qHfpYmyIjFVsSOY6Y= -github.com/multiformats/go-multiaddr-net v0.1.3/go.mod h1:ilNnaM9HbmVFqsb/qcNysjCu4PVONlrBZpHIrw/qQuA= -github.com/multiformats/go-multiaddr-net v0.1.4/go.mod h1:ilNnaM9HbmVFqsb/qcNysjCu4PVONlrBZpHIrw/qQuA= -github.com/multiformats/go-multiaddr-net v0.1.5/go.mod h1:ilNnaM9HbmVFqsb/qcNysjCu4PVONlrBZpHIrw/qQuA= -github.com/multiformats/go-multiaddr-net v0.2.0/go.mod h1:gGdH3UXny6U3cKKYCvpXI5rnK7YaOIEOPVDI9tsJbEA= -github.com/multiformats/go-multibase v0.0.1/go.mod h1:bja2MqRZ3ggyXtZSEDKpl0uO/gviWFaSteVbWT51qgs= github.com/multiformats/go-multibase v0.0.3/go.mod h1:5+1R4eQrT3PkYZ24C3W2Ue2tPwIdYQD509ZjSb5y9Oc= -github.com/multiformats/go-multibase v0.1.1 h1:3ASCDsuLX8+j4kx58qnJ4YFq/JWTJpCyDW27ztsVTOI= -github.com/multiformats/go-multibase v0.1.1/go.mod h1:ZEjHE+IsUrgp5mhlEAYjMtZwK1k4haNkcaPg9aoe1a8= -github.com/multiformats/go-multicodec v0.3.0/go.mod h1:qGGaQmioCDh+TeFOnxrbU0DaIPw8yFgAZgFG0V7p1qQ= -github.com/multiformats/go-multicodec v0.7.0 h1:rTUjGOwjlhGHbEMbPoSUJowG1spZTVsITRANCjKTUAQ= -github.com/multiformats/go-multicodec v0.7.0/go.mod h1:GUC8upxSBE4oG+q3kWZRw/+6yC1BqO550bjhWsJbZlw= -github.com/multiformats/go-multihash v0.0.1/go.mod h1:w/5tugSrLEbWqlcgJabL3oHFKTwfvkofsjW2Qa1ct4U= -github.com/multiformats/go-multihash v0.0.5/go.mod h1:lt/HCbqlQwlPBz7lv0sQCdtfcMtlJvakRUn/0Ual8po= +github.com/multiformats/go-multibase v0.2.0 h1:isdYCVLvksgWlMW9OZRYJEa9pZETFivncJHmHnnd87g= +github.com/multiformats/go-multibase v0.2.0/go.mod h1:bFBZX4lKCA/2lyOFSAoKH5SS6oPyjtnzK/XTFDPkNuk= +github.com/multiformats/go-multicodec v0.9.0 h1:pb/dlPnzee/Sxv/j4PmkDRxCOi3hXTz3IbPKOXWJkmg= +github.com/multiformats/go-multicodec v0.9.0/go.mod h1:L3QTQvMIaVBkXOXXtVmYE+LI16i14xuaojr/H7Ai54k= github.com/multiformats/go-multihash v0.0.8/go.mod h1:YSLudS+Pi8NHE7o6tb3D8vrpKa63epEDmG8nTduyAew= -github.com/multiformats/go-multihash v0.0.10/go.mod h1:YSLudS+Pi8NHE7o6tb3D8vrpKa63epEDmG8nTduyAew= github.com/multiformats/go-multihash v0.0.13/go.mod h1:VdAWLKTwram9oKAatUcLxBNUjdtcVwxObEQBtRfuyjc= github.com/multiformats/go-multihash v0.0.14/go.mod h1:VdAWLKTwram9oKAatUcLxBNUjdtcVwxObEQBtRfuyjc= -github.com/multiformats/go-multihash v0.0.15/go.mod h1:D6aZrWNLFTV/ynMpKsNtB40mJzmCl4jb1alC0OvHiHg= -github.com/multiformats/go-multihash v0.0.16/go.mod h1:zhfEIgVnB/rPMfxgFw15ZmGoNaKyNUIE4IWHG/kC+Ag= -github.com/multiformats/go-multihash v0.1.0/go.mod h1:RJlXsxt6vHGaia+S8We0ErjhojtKzPP2AH4+kYM7k84= -github.com/multiformats/go-multihash v0.2.1 h1:aem8ZT0VA2nCHHk7bPJ1BjUbHNciqZC/d16Vve9l108= -github.com/multiformats/go-multihash v0.2.1/go.mod h1:WxoMcYG85AZVQUyRyo9s4wULvW5qrI9vb2Lt6evduFc= -github.com/multiformats/go-multistream v0.1.0/go.mod h1:fJTiDfXJVmItycydCnNx4+wSzZ5NwG2FEVAI30fiovg= -github.com/multiformats/go-multistream v0.1.1/go.mod h1:KmHZ40hzVxiaiwlj3MEbYgK9JFk2/9UktWZAF54Du38= -github.com/multiformats/go-multistream v0.2.0/go.mod h1:5GZPQZbkWOLOn3J2y4Y99vVW7vOfsAflxARk3x14o6k= -github.com/multiformats/go-multistream v0.2.1/go.mod h1:5GZPQZbkWOLOn3J2y4Y99vVW7vOfsAflxARk3x14o6k= -github.com/multiformats/go-multistream v0.2.2/go.mod h1:UIcnm7Zuo8HKG+HkWgfQsGL+/MIEhyTqbODbIUwSXKs= -github.com/multiformats/go-multistream v0.3.3 h1:d5PZpjwRgVlbwfdTDjife7XszfZd8KYWfROYFlGcR8o= -github.com/multiformats/go-multistream v0.3.3/go.mod h1:ODRoqamLUsETKS9BNcII4gcRsJBU5VAwRIv7O39cEXg= -github.com/multiformats/go-varint v0.0.1/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE= -github.com/multiformats/go-varint v0.0.2/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE= +github.com/multiformats/go-multihash v0.2.3 h1:7Lyc8XfX/IY2jWb/gI7JP+o7JEq9hOa7BFvVU9RSh+U= +github.com/multiformats/go-multihash v0.2.3/go.mod h1:dXgKXCXjBzdscBLk9JkjINiEsCKRVch90MdaGiKsvSM= +github.com/multiformats/go-multistream v0.6.0 h1:ZaHKbsL404720283o4c/IHQXiS6gb8qAN5EIJ4PN5EA= +github.com/multiformats/go-multistream v0.6.0/go.mod h1:MOyoG5otO24cHIg8kf9QW2/NozURlkP/rvi2FQJyCPg= github.com/multiformats/go-varint v0.0.5/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE= github.com/multiformats/go-varint v0.0.6/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE= github.com/multiformats/go-varint v0.0.7 h1:sWSGR+f/eu5ABZA2ZpYKBILXTTs9JWpdEM/nEGOHFS8= github.com/multiformats/go-varint v0.0.7/go.mod h1:r8PUYw/fD/SjBCiKOoDlGF6QawOELpZAu9eioSos/OU= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= -github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= -github.com/naoina/go-stringutil v0.1.0/go.mod h1:XJ2SJL9jCtBh+P9q5btrd/Ylo8XwT/h1USek5+NqSA0= -github.com/naoina/toml v0.1.2-0.20170918210437-9fafd6967416/go.mod h1:NBIhNtsFMo3G2szEBne+bO4gS192HuIYRqfvOWb4i1E= -github.com/nats-io/jwt v0.3.0/go.mod h1:fRYCDE99xlTsqUzISS1Bi75UBJ6ljOJQOAAu5VglpSg= -github.com/nats-io/jwt v0.3.2/go.mod h1:/euKqTS1ZD+zzjYrY7pseZrTtWQSjujC7xjPc8wL6eU= -github.com/nats-io/nats-server/v2 v2.1.2/go.mod h1:Afk+wRZqkMQs/p45uXdrVLuab3gwv3Z8C4HTBu8GD/k= -github.com/nats-io/nats.go v1.9.1/go.mod h1:ZjDU1L/7fJ09jvUSRVBR2e7+RnLiiIQyqyzEE/Zbp4w= -github.com/nats-io/nkeys v0.1.0/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w= -github.com/nats-io/nkeys v0.1.3/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w= -github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c= github.com/neelance/astrewrite v0.0.0-20160511093645-99348263ae86/go.mod h1:kHJEU3ofeGjhHklVoIGuVj85JJwZ6kWPaJwCIxgnFmo= github.com/neelance/sourcemap v0.0.0-20151028013722-8c68805598ab/go.mod h1:Qr6/a/Q4r9LP1IltGz7tA7iOK1WonHEYhu1HRBA7ZiM= -github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= -github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= -github.com/oklog/oklog v0.3.2/go.mod h1:FCV+B7mhrz4o+ueLpx+KqkyXRGMWOYEvfiXtdGtbWGs= -github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= +github.com/nxadm/tail v1.4.11 h1:8feyoE3OzPrcshW5/MJ4sGESc5cqmGkGCWlco4l0bqY= +github.com/nxadm/tail v1.4.11/go.mod h1:OTaG3NK980DZzxbRq6lEuzgU+mug70nY11sMd4JXXHc= github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= -github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= -github.com/olekukonko/tablewriter v0.0.1/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= -github.com/olekukonko/tablewriter v0.0.2-0.20190409134802-7e037d187b0c/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= -github.com/onflow/atree v0.5.0 h1:y3lh8hY2fUo8KVE2ALVcz0EiNTq0tXJ6YTXKYVDA+3E= -github.com/onflow/atree v0.5.0/go.mod h1:gBHU0M05qCbv9NN0kijLWMgC47gHVNBIp4KmsVFi0tc= -github.com/onflow/cadence v0.38.1 h1:8YpnE1ixAGB8hF3t+slkHGhjfIBJ95dqUS+sEHrM2kY= -github.com/onflow/cadence v0.38.1/go.mod h1:SpfjNhPsJxGIHbOthE9JD/e8JFaFY73joYLPsov+PY4= -github.com/onflow/flow-core-contracts/lib/go/contracts v1.2.3 h1:wV+gcgOY0oJK4HLZQYQoK+mm09rW1XSxf83yqJwj0n4= -github.com/onflow/flow-core-contracts/lib/go/contracts v1.2.3/go.mod h1:Osvy81E/+tscQM+d3kRFjktcIcZj2bmQ9ESqRQWDEx8= -github.com/onflow/flow-core-contracts/lib/go/templates v1.2.3 h1:X25A1dNajNUtE+KoV76wQ6BR6qI7G65vuuRXxDDqX7E= -github.com/onflow/flow-core-contracts/lib/go/templates v1.2.3/go.mod h1:dqAUVWwg+NlOhsuBHex7bEWmsUjsiExzhe/+t4xNH6A= -github.com/onflow/flow-emulator v0.48.1-0.20230502171545-1c91ebbf6870 h1:sUFgXYvNGN5mFIONJxkf75A7W28JMKkGpFGDASr8i0k= -github.com/onflow/flow-emulator v0.48.1-0.20230502171545-1c91ebbf6870/go.mod h1:EJ1SQpXtjVrdtf2WoAfS2WE53RD6X4TuePk6cDZPBHk= -github.com/onflow/flow-ft/lib/go/contracts v0.7.0 h1:XEKE6qJUw3luhsYmIOteXP53gtxNxrwTohgxJXCYqBE= -github.com/onflow/flow-ft/lib/go/contracts v0.7.0/go.mod h1:kTMFIySzEJJeupk+7EmXs0EJ6CBWY/MV9fv9iYQk+RU= -github.com/onflow/flow-go-sdk v0.40.0 h1:s8uwoyTquN8tjdXpqGmNkXTjf79yUII8JExc5QEl4Xw= -github.com/onflow/flow-go-sdk v0.40.0/go.mod h1:34dxXk9Hp/bQw6Zy6+H44Xo0kQU+aJyQoqdDxq00rJM= -github.com/onflow/flow-go/crypto v0.24.7 h1:RCLuB83At4z5wkAyUCF7MYEnPoIIOHghJaODuJyEoW0= -github.com/onflow/flow-go/crypto v0.24.7/go.mod h1:fqCzkIBBMRRkciVrvW21rECKq1oD7Q6u+bCI78lfNX0= -github.com/onflow/flow/protobuf/go/flow v0.3.2-0.20230428213521-89bcc9e8517e h1:QYEd3KWTt309YGBch4IGK6vJ6b7cOGx2NStEnd5NeHM= -github.com/onflow/flow/protobuf/go/flow v0.3.2-0.20230428213521-89bcc9e8517e/go.mod h1:NA2pX2nw8zuaxfKphhKsk00kWLwfd+tv8mS23YXO4Sk= -github.com/onflow/go-bitswap v0.0.0-20221017184039-808c5791a8a8 h1:XcSR/n2aSVO7lOEsKScYALcpHlfowLwicZ9yVbL6bnA= -github.com/onflow/go-bitswap v0.0.0-20221017184039-808c5791a8a8/go.mod h1:73C8FlT4L/Qe4Cf5iXUNL8b2pvu4zs5dJMMJ5V2TjUI= -github.com/onflow/sdks v0.5.0 h1:2HCRibwqDaQ1c9oUApnkZtEAhWiNY2GTpRD5+ftdkN8= -github.com/onflow/sdks v0.5.0/go.mod h1:F0dj0EyHC55kknLkeD10js4mo14yTdMotnWMslPirrU= +github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N7AbDhec= +github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY= +github.com/onflow/atree v0.10.1 h1:8sixWP3l3LitcyuKkVepbIsLbfr7JN3cCB/iA1j2JD8= +github.com/onflow/atree v0.10.1/go.mod h1:+BuiL0XuIigHJqwkdIuDNzxXvyDx1jYUog/w+iZhcE8= +github.com/onflow/boxo v0.0.0-20240201202436-f2477b92f483 h1:LpiQhTAfM9CAmNVEs0n//cBBgCg+vJSiIxTHYUklZ84= +github.com/onflow/boxo v0.0.0-20240201202436-f2477b92f483/go.mod h1:pIZgTWdm3k3pLF9Uq6MB8JEcW07UDwNJjlXW1HELW80= +github.com/onflow/cadence v1.7.1 h1:VpPiC13e4qrdpyqaagd+dpHtQwfJ/djr97FgT3SovWA= +github.com/onflow/cadence v1.7.1/go.mod h1:1lKdLNVHIoO0jEjkRPMtOmBWYCG1An9TXSoiCuGIIpo= +github.com/onflow/crypto v0.25.3 h1:XQ3HtLsw8h1+pBN+NQ1JYM9mS2mVXTyg55OldaAIF7U= +github.com/onflow/crypto v0.25.3/go.mod h1:+1igaXiK6Tjm9wQOBD1EGwW7bYWMUGKtwKJ/2QL/OWs= +github.com/onflow/fixed-point v0.1.1 h1:j0jYZVO8VGyk1476alGudEg7XqCkeTVxb5ElRJRKS90= +github.com/onflow/fixed-point v0.1.1/go.mod h1:gJdoHqKtToKdOZbvryJvDZfcpzC7d2fyWuo3ZmLtcGY= +github.com/onflow/flow-core-contracts/lib/go/contracts v1.9.0 h1:m6lHp0xDdmVWbpbTpFlq6XxVrB+2J8qwnzMV30zdZeM= +github.com/onflow/flow-core-contracts/lib/go/contracts v1.9.0/go.mod h1:jBDqVep0ICzhXky56YlyO4aiV2Jl/5r7wnqUPpvi7zE= +github.com/onflow/flow-core-contracts/lib/go/templates v1.9.0 h1:8jn4Lxp/dpyWdgJ+5XEDUkYOf2aveObZtHtkdnYIEco= +github.com/onflow/flow-core-contracts/lib/go/templates v1.9.0/go.mod h1:twSVyUt3rNrgzAmxtBX+1Gw64QlPemy17cyvnXYy1Ug= +github.com/onflow/flow-evm-bridge v0.1.0 h1:7X2osvo4NnQgHj8aERUmbYtv9FateX8liotoLnPL9nM= +github.com/onflow/flow-evm-bridge v0.1.0/go.mod h1:5UYwsnu6WcBNrwitGFxphCl5yq7fbWYGYuiCSTVF6pk= +github.com/onflow/flow-ft/lib/go/contracts v1.0.1 h1:Ts5ob+CoCY2EjEd0W6vdLJ7hLL3SsEftzXG2JlmSe24= +github.com/onflow/flow-ft/lib/go/contracts v1.0.1/go.mod h1:PwsL8fC81cjnUnTfmyL/HOIyHnyaw/JA474Wfj2tl6A= +github.com/onflow/flow-ft/lib/go/templates v1.0.1 h1:FDYKAiGowABtoMNusLuRCILIZDtVqJ/5tYI4VkF5zfM= +github.com/onflow/flow-ft/lib/go/templates v1.0.1/go.mod h1:uQ8XFqmMK2jxyBSVrmyuwdWjTEb+6zGjRYotfDJ5pAE= +github.com/onflow/flow-go-sdk v1.8.4 h1:WHtVjryOU6ZJx0jUSjBPOrWoGqGDr+eEejyIkfbiBCE= +github.com/onflow/flow-go-sdk v1.8.4/go.mod h1:Jli9sI78LAnoC3OVGeAs0ngOezoLTfE/GrKOAB9TbTw= +github.com/onflow/flow-nft/lib/go/contracts v1.3.0 h1:DmNop+O0EMyicZvhgdWboFG57xz5t9Qp81FKlfKyqJc= +github.com/onflow/flow-nft/lib/go/contracts v1.3.0/go.mod h1:eZ9VMMNfCq0ho6kV25xJn1kXeCfxnkhj3MwF3ed08gY= +github.com/onflow/flow-nft/lib/go/templates v1.3.0 h1:uGIBy4GEY6Z9hKP7sm5nA5kwvbvLWW4nWx5NN9Wg0II= +github.com/onflow/flow-nft/lib/go/templates v1.3.0/go.mod h1:gVbb5fElaOwKhV5UEUjM+JQTjlsguHg2jwRupfM/nng= +github.com/onflow/flow/protobuf/go/flow v0.4.16 h1:UADQeq/mpuqFk+EkwqDNoF70743raWQKmB/Dm/eKt2Q= +github.com/onflow/flow/protobuf/go/flow v0.4.16/go.mod h1:NA2pX2nw8zuaxfKphhKsk00kWLwfd+tv8mS23YXO4Sk= +github.com/onflow/go-ds-pebble v0.0.0-20251003225212-131edca3a897 h1:ZtFYJ3OSR00aiKMMxgm3fRYWqYzjvDXeoBGQm6yC8DE= +github.com/onflow/go-ds-pebble v0.0.0-20251003225212-131edca3a897/go.mod h1:aiCRVcj3K60sxc6k5C+HO9C6rouqiSkjR/WKnbTcMfQ= +github.com/onflow/go-ethereum v1.16.2 h1:yhC3DA5PTNmUmu7ziq8GmWyQ23KNjle4jCabxpKYyNk= +github.com/onflow/go-ethereum v1.16.2/go.mod h1:1vsrG/9APHPqt+mVFni60hIXkqkVdU9WQayNjYi/Ah4= +github.com/onflow/nft-storefront/lib/go/contracts v1.0.0 h1:sxyWLqGm/p4EKT6DUlQESDG1ZNMN9GjPCm1gTq7NGfc= +github.com/onflow/nft-storefront/lib/go/contracts v1.0.0/go.mod h1:kMeq9zUwCrgrSojEbTUTTJpZ4WwacVm2pA7LVFr+glk= +github.com/onflow/sdks v0.6.0-preview.1 h1:mb/cUezuqWEP1gFZNAgUI4boBltudv4nlfxke1KBp9k= +github.com/onflow/sdks v0.6.0-preview.1/go.mod h1:F0dj0EyHC55kknLkeD10js4mo14yTdMotnWMslPirrU= +github.com/onflow/wal v1.0.2 h1:5bgsJVf2O3cfMNK12fiiTyYZ8cOrUiELt3heBJfHOhc= +github.com/onflow/wal v1.0.2/go.mod h1:iMC8gkLqu4nkbkAla5HkSBb+FGyQOZiWz3DYm2wSXCk= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.12.0/go.mod h1:oUhWkIvk5aDxtKvDDuw8gItl8pKl42LzjC9KZE0HfGg= github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= -github.com/onsi/ginkgo/v2 v2.6.1 h1:1xQPCjcqYw/J5LchOcp4/2q/jzJFjiAOc25chhnDw+Q= -github.com/onsi/ginkgo/v2 v2.6.1/go.mod h1:yjiuMwPokqY1XauOgju45q3sJt6VzQ/Fict1LFVcsAo= -github.com/onsi/gomega v1.4.1/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= -github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= -github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= -github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= +github.com/onsi/ginkgo/v2 v2.22.0 h1:Yed107/8DjTr0lKCNt7Dn8yQ6ybuDRQoMGrNFKzMfHg= +github.com/onsi/ginkgo/v2 v2.22.0/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo= github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= -github.com/onsi/gomega v1.9.0/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoTdcA= github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= -github.com/onsi/gomega v1.24.1 h1:KORJXNNTzJXzu4ScJWssJfJMnJ+2QJqhoQSRwNlze9E= -github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk= -github.com/opencontainers/go-digest v0.0.0-20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= -github.com/opencontainers/go-digest v1.0.0-rc1 h1:WzifXhOVOEOuFYOJAW6aQqW0TooG2iki3E3Ii+WN7gQ= -github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= -github.com/opencontainers/image-spec v1.0.1 h1:JMemWkRwHx4Zj+fVxWoMCFm/8sYGGrUVojFA6h/TRcI= -github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= -github.com/opencontainers/runc v0.0.0-20190115041553-12f6a991201f/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= -github.com/opencontainers/runc v0.1.1 h1:GlxAyO6x8rfZYN9Tt0Kti5a/cP41iuiO2yYT0IJGY8Y= -github.com/opencontainers/runc v0.1.1/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= -github.com/opencontainers/runtime-spec v0.1.2-0.20190507144316-5b71a03e2700/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= -github.com/opencontainers/runtime-spec v1.0.2 h1:UfAcuLBJB9Coz72x1hgl8O5RVzTdNiaglX6v2DM6FI0= +github.com/onsi/gomega v1.34.2 h1:pNCwDkzrsv7MS9kpaQvVb1aVLahQXyJ/Tv5oAZMI3i8= +github.com/onsi/gomega v1.34.2/go.mod h1:v1xfxRgk0KIsG+QOdm7p8UosrOzPYRo60fd3B/1Dukc= +github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= +github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= +github.com/opencontainers/image-spec v1.0.2 h1:9yCKha/T5XdGtO0q9Q9a6T5NUCsTn/DrBg0D7ufOcFM= +github.com/opencontainers/image-spec v1.0.2/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= github.com/opencontainers/runtime-spec v1.0.2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= -github.com/opencontainers/runtime-tools v0.0.0-20181011054405-1d69bd0f9c39/go.mod h1:r3f7wjNzSs2extwzU3Y+6pKfobzPh+kKFJ3ofN+3nfs= -github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492/go.mod h1:Ngi6UdF0k5OKD5t5wlmGhe/EDKPoUM3BXZSSfIuJbis= -github.com/opentracing/basictracer-go v1.0.0/go.mod h1:QfBfYuafItcjQuMwinw9GhYKwFXS9KnPs5lxoYwgW74= +github.com/opencontainers/runtime-spec v1.2.0 h1:z97+pHb3uELt/yiAWD691HNHQIF07bE7dzrbT927iTk= +github.com/opencontainers/runtime-spec v1.2.0/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= -github.com/opentracing/opentracing-go v1.0.3-0.20180606204148-bd9c31933947/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= -github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= github.com/opentracing/opentracing-go v1.2.0 h1:uEJPy/1a5RIPAJ0Ov+OIO8OxWu77jEv+1B0VhjKrZUs= github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc= -github.com/openzipkin-contrib/zipkin-go-opentracing v0.4.5/go.mod h1:/wsWhb9smxSfWAKL3wpBW7V8scJMt8N8gnaMCS9E/cA= github.com/openzipkin/zipkin-go v0.1.1/go.mod h1:NtoC/o8u3JlF1lSlyPNswIbeQH9bJTmOf0Erfk+hxe8= -github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw= -github.com/openzipkin/zipkin-go v0.2.1/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4= -github.com/openzipkin/zipkin-go v0.2.2/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4= -github.com/pact-foundation/pact-go v1.0.4/go.mod h1:uExwJY4kCzNPcHRj+hCR/HBbOOIwwtUjcrb0b5/5kLM= -github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= -github.com/paulbellamy/ratecounter v0.2.0/go.mod h1:Hfx1hDpSGoqxkVVpBi/IlYD7kChlfo5C6hzIHwPqfFE= github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 h1:onHthvaw9LFnH4t2DcNVpwGmV9E1BkGknEliJkfwQj0= github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58/go.mod h1:DXv8WO4yhMYhSNPKjeNKa5WY9YCIEBRbNzFFPJbWO6Y= -github.com/pborman/uuid v0.0.0-20170112150404-1b00554d8222/go.mod h1:VyrYX9gd7irzKovcSS6BIIEwPRkP2Wm2m9ufcdFSJ34= -github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= -github.com/pelletier/go-toml v1.9.5 h1:4yBQzkHv+7BHq2PQUZF3Mx0IYxG7LsP222s7Agd3ve8= -github.com/pelletier/go-toml v1.9.5/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= -github.com/pelletier/go-toml/v2 v2.0.2 h1:+jQXlF3scKIcSEKkdHzXhCTDLPFi5r1wnK6yPS+49Gw= -github.com/pelletier/go-toml/v2 v2.0.2/go.mod h1:MovirKjgVRESsAvNZlAjtFwV867yGuwRkXbG66OzopI= -github.com/performancecopilot/speed v3.0.0+incompatible/go.mod h1:/CLtqpZ5gBg1M9iaPbIdPPGyKcA8hKdoy6hAWba7Yac= -github.com/peterh/liner v1.0.1-0.20180619022028-8c1271fcf47f/go.mod h1:xIteQHvHuaLYG9IFj6mSxM0fCKrs34IrEQUhOYuGPHc= -github.com/peterh/liner v1.1.1-0.20190123174540-a2c9a5303de7/go.mod h1:CRroGNssyjTd/qIG2FyxByd2S8JEAZXBl4qUrZf8GS0= -github.com/philhofer/fwd v1.0.0/go.mod h1:gk3iGcWd9+svBvR0sR+KPcfE+RNWozjowpeBVG3ZVNU= -github.com/pierrec/lz4 v1.0.2-0.20190131084431-473cd7ce01a1/go.mod h1:3/3N9NVKO0jef7pBehbT1qWhCMrIgbYNnFAZCqQ5LRc= -github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= -github.com/pierrec/lz4 v2.6.1+incompatible h1:9UY3+iC23yxF0UfGaYrGplQ+79Rg+h/q9FV9ix19jjM= -github.com/pierrec/lz4 v2.6.1+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= -github.com/pierrec/lz4/v4 v4.1.15 h1:MO0/ucJhngq7299dKLwIMtgTfbkoSPF6AoMYDd8Q4q0= -github.com/pierrec/lz4/v4 v4.1.15/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= -github.com/pjbgf/sha1cd v0.2.3 h1:uKQP/7QOzNtKYH7UTohZLcjF5/55EnTw0jO/Ru4jZwI= -github.com/pjbgf/sha1cd v0.2.3/go.mod h1:HOK9QrgzdHpbc2Kzip0Q1yi3M2MFGPADtR6HjG65m5M= +github.com/pelletier/go-toml/v2 v2.2.2 h1:aYUidT7k73Pcl9nb2gScu7NSrKCSHIDE89b3+6Wq+LM= +github.com/pelletier/go-toml/v2 v2.2.2/go.mod h1:1t835xjRzz80PqgE6HHgN2JOsmgYu/h4qDAS4n929Rs= +github.com/pierrec/lz4/v4 v4.1.14/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= +github.com/pierrec/lz4/v4 v4.1.17/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= +github.com/pierrec/lz4/v4 v4.1.22 h1:cKFw6uJDK+/gfw5BcDL0JL5aBsAFdsIT18eRtLj7VIU= +github.com/pierrec/lz4/v4 v4.1.22/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= +github.com/pingcap/errors v0.11.4 h1:lFuQV/oaUMGcD2tqt+01ROSmJs75VG1ToEOkZIZ4nE4= +github.com/pingcap/errors v0.11.4/go.mod h1:Oi8TUi2kEtXXLMJk9l1cGmz20kV3TaQ0usTwv5KuLY8= +github.com/pion/datachannel v1.5.10 h1:ly0Q26K1i6ZkGf42W7D4hQYR90pZwzFOjTq5AuCKk4o= +github.com/pion/datachannel v1.5.10/go.mod h1:p/jJfC9arb29W7WrxyKbepTU20CFgyx5oLo8Rs4Py/M= +github.com/pion/dtls/v2 v2.2.7/go.mod h1:8WiMkebSHFD0T+dIU+UeBaoV7kDhOW5oDCzZ7WZ/F9s= +github.com/pion/dtls/v2 v2.2.12 h1:KP7H5/c1EiVAAKUmXyCzPiQe5+bCJrpOeKg/L05dunk= +github.com/pion/dtls/v2 v2.2.12/go.mod h1:d9SYc9fch0CqK90mRk1dC7AkzzpwJj6u2GU3u+9pqFE= +github.com/pion/ice/v2 v2.3.37 h1:ObIdaNDu1rCo7hObhs34YSBcO7fjslJMZV0ux+uZWh0= +github.com/pion/ice/v2 v2.3.37/go.mod h1:mBF7lnigdqgtB+YHkaY/Y6s6tsyRyo4u4rPGRuOjUBQ= +github.com/pion/interceptor v0.1.37 h1:aRA8Zpab/wE7/c0O3fh1PqY0AJI3fCSEM5lRWJVorwI= +github.com/pion/interceptor v0.1.37/go.mod h1:JzxbJ4umVTlZAf+/utHzNesY8tmRkM2lVmkS82TTj8Y= +github.com/pion/logging v0.2.2 h1:M9+AIj/+pxNsDfAT64+MAVgJO0rsyLnoJKCqf//DoeY= +github.com/pion/logging v0.2.2/go.mod h1:k0/tDVsRCX2Mb2ZEmTqNa7CWsQPc+YYCB7Q+5pahoms= +github.com/pion/mdns v0.0.12 h1:CiMYlY+O0azojWDmxdNr7ADGrnZ+V6Ilfner+6mSVK8= +github.com/pion/mdns v0.0.12/go.mod h1:VExJjv8to/6Wqm1FXK+Ii/Z9tsVk/F5sD/N70cnYFbk= +github.com/pion/randutil v0.1.0 h1:CFG1UdESneORglEsnimhUjf33Rwjubwj6xfiOXBa3mA= +github.com/pion/randutil v0.1.0/go.mod h1:XcJrSMMbbMRhASFVOlj/5hQial/Y8oH/HVo7TBZq+j8= +github.com/pion/rtcp v1.2.12/go.mod h1:sn6qjxvnwyAkkPzPULIbVqSKI5Dv54Rv7VG0kNxh9L4= +github.com/pion/rtcp v1.2.15 h1:LZQi2JbdipLOj4eBjK4wlVoQWfrZbh3Q6eHtWtJBZBo= +github.com/pion/rtcp v1.2.15/go.mod h1:jlGuAjHMEXwMUHK78RgX0UmEJFV4zUKOFHR7OP+D3D0= +github.com/pion/rtp v1.8.3/go.mod h1:pBGHaFt/yW7bf1jjWAoUjpSNoDnw98KTMg+jWWvziqU= +github.com/pion/rtp v1.8.10 h1:puphjdbjPB+L+NFaVuZ5h6bt1g5q4kFIoI+r5q/g0CU= +github.com/pion/rtp v1.8.10/go.mod h1:8uMBJj32Pa1wwx8Fuv/AsFhn8jsgw+3rUC2PfoBZ8p4= +github.com/pion/sctp v1.8.35 h1:qwtKvNK1Wc5tHMIYgTDJhfZk7vATGVHhXbUDfHbYwzA= +github.com/pion/sctp v1.8.35/go.mod h1:EcXP8zCYVTRy3W9xtOF7wJm1L1aXfKRQzaM33SjQlzg= +github.com/pion/sdp/v3 v3.0.9 h1:pX++dCHoHUwq43kuwf3PyJfHlwIj4hXA7Vrifiq0IJY= +github.com/pion/sdp/v3 v3.0.9/go.mod h1:B5xmvENq5IXJimIO4zfp6LAe1fD9N+kFv+V/1lOdz8M= +github.com/pion/srtp/v2 v2.0.20 h1:HNNny4s+OUmG280ETrCdgFndp4ufx3/uy85EawYEhTk= +github.com/pion/srtp/v2 v2.0.20/go.mod h1:0KJQjA99A6/a0DOVTu1PhDSw0CXF2jTkqOoMg3ODqdA= +github.com/pion/stun v0.6.1 h1:8lp6YejULeHBF8NmV8e2787BogQhduZugh5PdhDyyN4= +github.com/pion/stun v0.6.1/go.mod h1:/hO7APkX4hZKu/D0f2lHzNyvdkTGtIy3NDmLR7kSz/8= +github.com/pion/stun/v2 v2.0.0 h1:A5+wXKLAypxQri59+tmQKVs7+l6mMM+3d+eER9ifRU0= +github.com/pion/stun/v2 v2.0.0/go.mod h1:22qRSh08fSEttYUmJZGlriq9+03jtVmXNODgLccj8GQ= +github.com/pion/transport/v2 v2.2.1/go.mod h1:cXXWavvCnFF6McHTft3DWS9iic2Mftcz1Aq29pGcU5g= +github.com/pion/transport/v2 v2.2.3/go.mod h1:q2U/tf9FEfnSBGSW6w5Qp5PFWRLRj3NjLhCCgpRK4p0= +github.com/pion/transport/v2 v2.2.4/go.mod h1:q2U/tf9FEfnSBGSW6w5Qp5PFWRLRj3NjLhCCgpRK4p0= +github.com/pion/transport/v2 v2.2.10 h1:ucLBLE8nuxiHfvkFKnkDQRYWYfp8ejf4YBOPfaQpw6Q= +github.com/pion/transport/v2 v2.2.10/go.mod h1:sq1kSLWs+cHW9E+2fJP95QudkzbK7wscs8yYgQToO5E= +github.com/pion/transport/v3 v3.0.1/go.mod h1:UY7kiITrlMv7/IKgd5eTUcaahZx5oUN3l9SzK5f5xE0= +github.com/pion/transport/v3 v3.0.7 h1:iRbMH05BzSNwhILHoBoAPxoB9xQgOaJk+591KC9P1o0= +github.com/pion/transport/v3 v3.0.7/go.mod h1:YleKiTZ4vqNxVwh77Z0zytYi7rXHl7j6uPLGhhz9rwo= +github.com/pion/turn/v2 v2.1.3/go.mod h1:huEpByKKHix2/b9kmTAM3YoX6MKP+/D//0ClgUYR2fY= +github.com/pion/turn/v2 v2.1.6 h1:Xr2niVsiPTB0FPtt+yAWKFUkU1eotQbGgpTIld4x1Gc= +github.com/pion/turn/v2 v2.1.6/go.mod h1:huEpByKKHix2/b9kmTAM3YoX6MKP+/D//0ClgUYR2fY= +github.com/pion/webrtc/v3 v3.3.5 h1:ZsSzaMz/i9nblPdiAkZoP+E6Kmjw+jnyq3bEmU3EtRg= +github.com/pion/webrtc/v3 v3.3.5/go.mod h1:liNa+E1iwyzyXqNUwvoMRNQ10x8h8FOeJKL8RkIbamE= +github.com/pjbgf/sha1cd v0.3.0 h1:4D5XXmUUBUl/xQ6IjCkEAbqXskkq/4O7LmGn0AqMDs4= +github.com/pjbgf/sha1cd v0.3.0/go.mod h1:nZ1rrWOcGJ5uZgEEVL1VUM9iRQiZvWdbZjkKyFzPPsI= +github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/errors v0.8.1-0.20171018195549-f15c970de5b7/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/profile v1.2.1/go.mod h1:hJw3o1OdXxsrSjjVksARp5W95eeEaEfptyVZyv6JUPA= -github.com/pkg/sftp v1.13.1/go.mod h1:3HaPG6Dq1ILlpPZRO0HVMrsydcdLt6HRDccSgb87qRg= -github.com/pkg/term v0.0.0-20180730021639-bffc007b7fd5/go.mod h1:eCbImbZ95eXtAUIbLAuAVnBnwf83mjf6QIVH8SHYwqQ= -github.com/plus3it/gorecurcopy v0.0.1 h1:H7AgvM0N/uIo7o1PQRlewEGQ92BNr7DqbPy5lnR3uJI= -github.com/plus3it/gorecurcopy v0.0.1/go.mod h1:NvVTm4RX68A1vQbHmHunDO4OtBLVroT6CrsiqAzNyJA= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 h1:GFCKgmp0tecUJ0sJuv4pzYCqS9+RGSn52M3FUwPs+uo= +github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10/go.mod h1:t/avpk3KcrXxUnYOhZhMXJlSEyie6gQbtLq5NM3loB8= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/polydawn/refmt v0.0.0-20201211092308-30ac6d18308e h1:ZOcivgkkFRnjfoTcGsDq3UQYiBmekwLA+qg0OjyB/ls= -github.com/polydawn/refmt v0.0.0-20201211092308-30ac6d18308e/go.mod h1:uIp+gprXxxrWSjjklXD+mN4wed/tMfjMMmN/9+JsA9o= -github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/polydawn/refmt v0.89.0 h1:ADJTApkvkeBZsN0tBTx8QjpD9JkmxbKp0cxfr9qszm4= +github.com/polydawn/refmt v0.89.0/go.mod h1:/zvteZs/GwLtCgZ4BL6CBsk9IKIlexP43ObX9AxTqTw= github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c h1:ncq/mPwQF4JjgDlrVEn3C11VoGHZN7m8qihwgMEtzYw= github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= github.com/prometheus/client_golang v0.8.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= -github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs= github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQP1xR9D75/vuwEF3g= -github.com/prometheus/client_golang v1.3.0/go.mod h1:hJaj2vgQTGQmVCsAACORcieXFeDPbaTKGT+JTgUa3og= -github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= -github.com/prometheus/client_golang v1.10.0/go.mod h1:WJM3cc3yu7XKBKa/I8WeZm+V3eltZnBwfENSU7mdogU= -github.com/prometheus/client_golang v1.14.0 h1:nJdhIvne2eSX/XRAFV9PcvFFRbrjbcTUj0VP62TMhnw= -github.com/prometheus/client_golang v1.14.0/go.mod h1:8vpkKitgIVNcqrRBWh1C4TIUQgYNtG/XQE4E/Zae36Y= +github.com/prometheus/client_golang v1.20.5 h1:cxppBPuYhUnsO6yo/aoRol4L7q7UFfdm+bR9r+8l63Y= +github.com/prometheus/client_golang v1.20.5/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= -github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.1.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.3.0 h1:UBgGFHqYdG/TPFD1B1ogZywDqEkwp3fBMvqdiQ7Xew4= -github.com/prometheus/client_model v0.3.0/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w= +github.com/prometheus/client_model v0.5.0/go.mod h1:dTiFglRmd66nLR9Pv9f0mZi7B7fk5Pm3gvsjB5tr+kI= +github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= +github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= github.com/prometheus/common v0.0.0-20180801064454-c7de2306084e/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= -github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc= -github.com/prometheus/common v0.7.0/go.mod h1:DjGbpBbp5NYNiECxcL/VnbXCCaQpKd3tt26CguLLsqA= -github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= -github.com/prometheus/common v0.18.0/go.mod h1:U+gB1OBLb1lF3O42bTCL+FK18tX9Oar16Clt/msog/s= -github.com/prometheus/common v0.39.0 h1:oOyhkDq05hPZKItWVBkJ6g6AtGxi+fy7F4JvUV8uhsI= -github.com/prometheus/common v0.39.0/go.mod h1:6XBZ7lYdLCbkAVhwRsWTZn+IN5AB9F/NXd5w0BbEX0Y= +github.com/prometheus/common v0.61.0 h1:3gv/GThfX0cV2lpO7gkTUwZru38mxevy90Bj8YFSRQQ= +github.com/prometheus/common v0.61.0/go.mod h1:zr29OCN/2BsJRaFwG8QOBr41D6kkchKbpeNH7pAjb/s= github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ= -github.com/prometheus/procfs v0.0.5/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ= -github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= -github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= -github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= -github.com/prometheus/procfs v0.9.0 h1:wzCHvIvM5SxWqYvwgVL7yJY8Lz3PKn49KQtpgMYJfhI= -github.com/prometheus/procfs v0.9.0/go.mod h1:+pB4zwohETzFnmlpe6yd2lSc+0/46IYZRB/chUwxUZY= -github.com/prometheus/tsdb v0.6.2-0.20190402121629-4f204dcbc150/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= +github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo= +github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= +github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= +github.com/prysmaticlabs/gohashtree v0.0.4-beta h1:H/EbCuXPeTV3lpKeXGPpEV9gsUpkqOOVnWapUyeWro4= +github.com/prysmaticlabs/gohashtree v0.0.4-beta/go.mod h1:BFdtALS+Ffhg3lGQIHv9HDWuHS8cTvHZzrHWxwOtGOs= github.com/psiemens/graceland v1.0.0 h1:L580AVV4Q2XLcPpmvxJRH9UpEAYr/eu2jBKmMglhvM8= github.com/psiemens/graceland v1.0.0/go.mod h1:1Tof+vt1LbmcZFE0lzgdwMN0QBymAChG3FRgDx8XisU= github.com/psiemens/sconfig v0.1.0 h1:xfWqW+TRpih7mXZIqKYTmpRhlZLQ1kbxV8EjllPv76s= github.com/psiemens/sconfig v0.1.0/go.mod h1:+MLKqdledP/8G3rOBpknbLh0IclCf4WneJUtS26JB2U= +github.com/quic-go/qpack v0.5.1 h1:giqksBPnT/HDtZ6VhtFKgoLOWmlyo9Ei6u9PqzIMbhI= +github.com/quic-go/qpack v0.5.1/go.mod h1:+PC4XFrEskIVkcLzpEkbLqq1uCoxPhQuvK5rH1ZgaEg= +github.com/quic-go/quic-go v0.48.2 h1:wsKXZPeGWpMpCGSWqOcqpW2wZYic/8T3aqiOID0/KWE= +github.com/quic-go/quic-go v0.48.2/go.mod h1:yBgs3rWBOADpga7F+jJsb6Ybg1LSYiQvwWlLX+/6HMs= +github.com/quic-go/webtransport-go v0.8.1-0.20241018022711-4ac2c9250e66 h1:4WFk6u3sOT6pLa1kQ50ZVdm8BQFgJNA117cepZxtLIg= +github.com/quic-go/webtransport-go v0.8.1-0.20241018022711-4ac2c9250e66/go.mod h1:Vp72IJajgeOL6ddqrAhmp7IM9zbTcgkQxD/YdxrVwMw= github.com/raulk/go-watchdog v1.3.0 h1:oUmdlHxdkXRJlwfG0O9omj8ukerm8MEQavSiDTEtBsk= github.com/raulk/go-watchdog v1.3.0/go.mod h1:fIvOnLbF0b0ZwkB9YU4mOW9Did//4vPZtDqv66NfsMU= -github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= -github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= -github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec h1:W09IVJc94icq4NjY3clb7Lk8O1qJ8BdBEF8z0ibU0rE= -github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= -github.com/retailnext/hllpp v1.0.1-0.20180308014038-101a6d2f8b52/go.mod h1:RDpi1RftBQPUCDRw6SmxeaREsAaRKnOclghuzp/WRzc= github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= -github.com/rivo/uniseg v0.2.1-0.20211004051800-57c86be7915a h1:s7GrsqeorVkFR1vGmQ6WVL9nup0eyQCC+YVUeSQLH/Q= -github.com/rivo/uniseg v0.2.1-0.20211004051800-57c86be7915a/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= -github.com/rjeczalik/notify v0.9.1/go.mod h1:rKwnCoCGeuQnwBtTSPL9Dad03Vh2n40ePRrjvIXnJho= +github.com/rivo/uniseg v0.4.7 h1:WUdvkW8uEhrYfLC4ZzdpI2ztxP1I582+49Oc5Mq64VQ= +github.com/rivo/uniseg v0.4.7/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88= github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= -github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= -github.com/rogpeppe/go-internal v1.9.0 h1:73kH8U+JUqXU8lRuOHeVHaa/SZPifC7BkcraZVejAe8= -github.com/rs/cors v0.0.0-20160617231935-a62a804a8a00/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU= -github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU= -github.com/rs/xhandler v0.0.0-20160618193221-ed27b6fd6521/go.mod h1:RvLn4FgxWubrpZHtQLnOf6EwhN2hEMusxZOhcW9H3UQ= -github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ= +github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= +github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= +github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= +github.com/rootless-containers/rootlesskit v1.1.1 h1:F5psKWoWY9/VjZ3ifVcaosjvFZJOagX85U22M0/EQZE= +github.com/rootless-containers/rootlesskit v1.1.1/go.mod h1:UD5GoA3dqKCJrnvnhVgQQnweMF2qZnf9KLw8EewcMZI= github.com/rs/xid v1.4.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg= -github.com/rs/zerolog v1.19.0/go.mod h1:IzD0RJ65iWH0w97OQQebJEvTZYvsCUm9WVLWBQrJRjo= github.com/rs/zerolog v1.29.0 h1:Zes4hju04hjbvkVkOhdl2HpZa+0PmVwigmo8XoORE5w= github.com/rs/zerolog v1.29.0/go.mod h1:NILgTygv/Uej1ra5XxGf82ZFSLk58MFGAUS2o6usyD0= github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= -github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E= -github.com/schollz/progressbar/v3 v3.8.3 h1:FnLGl3ewlDUP+YdSwveXBaXs053Mem/du+wr7XSYKl8= -github.com/schollz/progressbar/v3 v3.8.3/go.mod h1:pWnVCjSBZsT2X3nx9HfRdnCDrpbevliMeoEVhStwHko= -github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= -github.com/segmentio/kafka-go v0.1.0/go.mod h1:X6itGqS9L4jDletMsxZ7Dz+JFWxM6JHfPOCvTvk+EJo= -github.com/segmentio/kafka-go v0.2.0/go.mod h1:X6itGqS9L4jDletMsxZ7Dz+JFWxM6JHfPOCvTvk+EJo= +github.com/schollz/progressbar/v3 v3.18.0 h1:uXdoHABRFmNIjUfte/Ex7WtuyVslrw2wVPQmCN62HpA= +github.com/schollz/progressbar/v3 v3.18.0/go.mod h1:IsO3lpbaGuzh8zIMzgY3+J8l4C8GjO0Y9S69eFvNsec= github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= -github.com/sergi/go-diff v1.1.0 h1:we8PVUC3FE2uYfodKH/nBHMSetSfHDR6scGdBi+erh0= -github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= +github.com/sergi/go-diff v1.2.0 h1:XU+rvMAioB0UC3q1MFrIQy4Vo5/4VsRDQQXHsEya6xQ= +github.com/sergi/go-diff v1.2.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= github.com/sethvargo/go-retry v0.2.3 h1:oYlgvIvsju3jNbottWABtbnoLC+GDtLdBHxKWxQm/iU= github.com/sethvargo/go-retry v0.2.3/go.mod h1:1afjQuvh7s4gflMObvjLPaWgluLLyhA1wmVZ6KLpICw= -github.com/shirou/gopsutil v2.20.5+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA= -github.com/shirou/gopsutil/v3 v3.22.2 h1:wCrArWFkHYIdDxx/FSfF5RB4dpJYW6t7rcp3+zL8uks= -github.com/shirou/gopsutil/v3 v3.22.2/go.mod h1:WapW1AOOPlHyXr+yOyw3uYx36enocrtSoSBy0L5vUHY= +github.com/shirou/gopsutil v3.21.4-0.20210419000835-c7a38de76ee5+incompatible h1:Bn1aCHHRnjv4Bl16T8rcaFjYSrGrIZvpiGO6P3Q4GpU= +github.com/shirou/gopsutil v3.21.4-0.20210419000835-c7a38de76ee5+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA= +github.com/shirou/gopsutil/v3 v3.24.5 h1:i0t8kL+kQTvpAYToeuiVk3TgDeKOFioZO3Ztz/iZ9pI= +github.com/shirou/gopsutil/v3 v3.24.5/go.mod h1:bsoOS1aStSs9ErQ1WWfxllSeS1K5D+U30r2NfcubMVk= +github.com/shoenig/go-m1cpu v0.1.6 h1:nxdKQNcEB6vzgA2E2bvzKIYRuNj7XNJ4S/aRSwKzFtM= +github.com/shoenig/go-m1cpu v0.1.6/go.mod h1:1JJMcUBvfNwpq05QDQVAnx3gUHr9IYF7GNg9SUEw2VQ= +github.com/shoenig/test v0.6.4 h1:kVTaSd7WLz5WZ2IaoM0RSzRsUD+m8wRR+5qvntpn4LU= +github.com/shoenig/test v0.6.4/go.mod h1:byHiCGXqrVaflBLAMq/srcZIHynQPQgeyvkvXnjqq0k= github.com/shurcooL/component v0.0.0-20170202220835-f88ec8f54cc4/go.mod h1:XhFIlyj5a1fBNx5aJTbKoIq0mNaPvOagO+HjB3EtxrY= github.com/shurcooL/events v0.0.0-20181021180414-410e4ca65f48/go.mod h1:5u70Mqkb5O5cxEA8nxTsgrgLehJeAw6Oc4Ab1c/P1HM= github.com/shurcooL/github_flavored_markdown v0.0.0-20181002035957-2122de532470/go.mod h1:2dOwnU2uBioM+SGy2aZoq1f/Sd1l9OkAeAUvjSyvgU0= @@ -1521,160 +983,131 @@ github.com/shurcooL/sanitized_anchor_name v0.0.0-20170918181015-86672fcb3f95/go. github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= github.com/shurcooL/users v0.0.0-20180125191416-49c67e49c537/go.mod h1:QJTqeLYEDaXHZDBsXlPCDqdhQuJkuw4NOtaxYe3xii4= github.com/shurcooL/webdavfs v0.0.0-20170829043945-18c3829fa133/go.mod h1:hKmq5kWdCj2z2KEozexVbfEZIWiTjhE0+UjmZgPqehw= -github.com/sirupsen/logrus v1.0.4-0.20170822132746-89742aefa4b2/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= -github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= -github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= -github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= -github.com/sirupsen/logrus v1.8.1 h1:dJKuHgqk1NNQlqoA6BTlM1Wf9DOH3NBjQyu0h9+AZZE= -github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= -github.com/skeema/knownhosts v1.1.0 h1:Wvr9V0MxhjRbl3f9nMnKnFfiWTJmtECJ9Njkea3ysW0= -github.com/skeema/knownhosts v1.1.0/go.mod h1:sKFq3RD6/TKZkSWn8boUbDC7Qkgcv+8XXijpFO6roag= -github.com/slok/go-http-metrics v0.10.0 h1:rh0LaYEKza5eaYRGDXujKrOln57nHBi4TtVhmNEpbgM= -github.com/slok/go-http-metrics v0.10.0/go.mod h1:lFqdaS4kWMfUKCSukjC47PdCeTk+hXDUVm8kLHRqJ38= -github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM= +github.com/sirupsen/logrus v1.9.0/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= +github.com/sirupsen/logrus v1.9.2/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= +github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= +github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= +github.com/skeema/knownhosts v1.2.1 h1:SHWdIUa82uGZz+F+47k8SY4QhhI291cXCpopT1lK2AQ= +github.com/skeema/knownhosts v1.2.1/go.mod h1:xYbVRSPxqBZFrdmDyMmsOs+uX1UZC3nTN3ThzgDxUwo= +github.com/slok/go-http-metrics v0.12.0 h1:mAb7hrX4gB4ItU6NkFoKYdBslafg3o60/HbGBRsKaG8= +github.com/slok/go-http-metrics v0.12.0/go.mod h1:Ee/mdT9BYvGrlGzlClkK05pP2hRHmVbRF9dtUVS8LNA= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= -github.com/smartystreets/goconvey v1.6.4 h1:fv0U8FUIMPNf1L9lnHLvLhgicrIVChEkdzIKYqbNC9s= +github.com/smartystreets/assertions v1.2.0 h1:42S6lae5dvLc7BrLu/0ugRtcFVjoJNMC/N3yZFZkDFs= +github.com/smartystreets/assertions v1.2.0/go.mod h1:tcbTF8ujkAEcZ8TElKY+i30BzYlVhC/LOxJk7iOWnoo= github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= -github.com/smola/gocompat v0.2.0/go.mod h1:1B0MlxbmoZNo3h8guHp8HztB3BSYR5itql9qtVc0ypY= +github.com/smartystreets/goconvey v1.7.2 h1:9RBaZCeXEQ3UselpuwUQHltGVXvdwm6cv1hgR6gDIPg= +github.com/smartystreets/goconvey v1.7.2/go.mod h1:Vw0tHAZW6lzCRk3xgdin6fKYcG+G3Pg9vgXWeJpQFMM= github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= -github.com/sony/gobreaker v0.4.1/go.mod h1:ZKptC7FHNvhBz7dN2LGjPVBz2sZJmc0/PkyDJOjmxWY= +github.com/songgao/water v0.0.0-20200317203138-2b4b6d7c09d8/go.mod h1:P5HUIBuIWKbyjl083/loAegFkfbFNx5i2qEP4CNbm7E= +github.com/sony/gobreaker v0.5.0 h1:dRCvqm0P490vZPmy7ppEk2qCnCieBooFJ+YoXGYB+yg= +github.com/sony/gobreaker v0.5.0/go.mod h1:ZKptC7FHNvhBz7dN2LGjPVBz2sZJmc0/PkyDJOjmxWY= github.com/sourcegraph/annotate v0.0.0-20160123013949-f4cad6c6324d/go.mod h1:UdhH50NIW0fCiwBSr0co2m7BnFLdv4fQTgdqdJTHFeE= github.com/sourcegraph/syntaxhighlight v0.0.0-20170531221838-bd320f5d308e/go.mod h1:HuIsMU8RRBOtsCgI77wP899iHVBQpCmg4ErYMZB+2IA= -github.com/spacemonkeygo/openssl v0.0.0-20181017203307-c2dcc5cca94a/go.mod h1:7AyxJNCJ7SBZ1MfVQCWD6Uqo2oubI2Eq2y2eqf+A5r0= -github.com/spacemonkeygo/spacelog v0.0.0-20180420211403-2296661a0572 h1:RC6RW7j+1+HkWaX/Yh71Ee5ZHaHYt7ZP4sQgUrm6cDU= -github.com/spacemonkeygo/spacelog v0.0.0-20180420211403-2296661a0572/go.mod h1:w0SWMsp6j9O/dk4/ZpIhL+3CkG8ofA2vuv7k+ltqUMc= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI= github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= -github.com/spf13/afero v1.9.0 h1:sFSLUHgxdnN32Qy38hK3QkYBFXZj9DKjVjCUCtD7juY= -github.com/spf13/afero v1.9.0/go.mod h1:iUV7ddyEEZPO5gA3zD4fJt6iStLlL+Lg4m2cihcDf8Y= +github.com/spf13/afero v1.11.0 h1:WJQKhtpdm3v2IzqG8VMqrr6Rf3UYpEF239Jy9wNepM8= +github.com/spf13/afero v1.11.0/go.mod h1:GH9Y3pIexgf1MTIWtNGyogA5MwRIDXGUr+hbWNoBjkY= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cast v1.5.0 h1:rj3WzYc11XZaIZMPKmwP96zkFEnnAmV8s6XbB2aY32w= github.com/spf13/cast v1.5.0/go.mod h1:SpXXQ5YoyJw6s3/6cMTQuxvgRl3PCJiyaX9p6b155UU= -github.com/spf13/cobra v0.0.2-0.20171109065643-2da4a54c5cee/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= -github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= -github.com/spf13/cobra v1.1.1/go.mod h1:WnodtKOvamDL/PwE2M4iKs8aMDBZ5Q5klgD3qfVJQMI= -github.com/spf13/cobra v1.7.0 h1:hyqWnYt1ZQShIddO5kBpj3vu05/++x6tJ6dg8EC572I= -github.com/spf13/cobra v1.7.0/go.mod h1:uLxZILRyS/50WlhOIKD7W6V5bgeIt+4sICxh6uRMrb0= +github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM= +github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y= github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk= github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo= -github.com/spf13/pflag v1.0.1-0.20171106142849-4c012f6dcd95/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= -github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= -github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/pflag v1.0.6 h1:jFzHGLGAlb3ruxLB8MhbI6A8+AQX/2eW4qeyNZXNp2o= +github.com/spf13/pflag v1.0.6/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE= -github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg= -github.com/spf13/viper v1.12.0 h1:CZ7eSOd3kZoaYDLbXnmzgQI5RlciuXBMA+18HwHRfZQ= -github.com/spf13/viper v1.12.0/go.mod h1:b6COn30jlNxbm/V2IqWiNWkJ+vZNiMNksliPCiuKtSI= -github.com/src-d/envconfig v1.0.0/go.mod h1:Q9YQZ7BKITldTBnoxsE5gOeB5y66RyPXeue/R4aaNBc= -github.com/status-im/keycard-go v0.0.0-20190316090335-8537d3370df4/go.mod h1:RZLeN1LMWmRsyYjvAu+I6Dm9QmlDaIIt+Y+4Kd7Tp+Q= -github.com/steakknife/bloomfilter v0.0.0-20180922174646-6819c0d2a570/go.mod h1:8OR4w3TdeIHIh1g6EMY5p0gVNOovcWC+1vpc7naMuAw= -github.com/steakknife/hamming v0.0.0-20180906055917-c99c65617cd3/go.mod h1:hpGUWaI9xL8pRQCTXQgocU38Qw1g0Us7n5PxxTwTCYU= -github.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= -github.com/streadway/amqp v0.0.0-20190827072141-edfb9018d271/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= -github.com/streadway/handy v0.0.0-20190108123426-d5acb3125c2a/go.mod h1:qNTQ5P5JnDBl6z3cMAg/SywNDC5ABu5ApDIw6lUbRmI= +github.com/spf13/viper v1.15.0 h1:js3yy885G8xwJa6iOISGFwd+qlUo5AvyXb7CiihdtiU= +github.com/spf13/viper v1.15.0/go.mod h1:fFcTBJxvhhzSJiZy8n+PeW6t8l+KeT/uTARa0jHOQLA= +github.com/spiffe/go-spiffe/v2 v2.5.0 h1:N2I01KCUkv1FAjZXJMwh95KK1ZIQLYbPfhaxw8WS0hE= +github.com/spiffe/go-spiffe/v2 v2.5.0/go.mod h1:P+NxobPc6wXhVtINNtFjNWGBTreew1GBUCwT2wPmb7g= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= -github.com/stretchr/objx v0.5.0 h1:1zr/of2m5FGMsad5YfcqgdqdWrIhu+EBEJRhR1U7z/c= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= -github.com/stretchr/testify v1.2.0/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= +github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= -github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/stretchr/testify v1.8.2 h1:+h33VjcLVPDHtOdpUCuF+7gSuG3yGIftsP1YvFihtJ8= -github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= -github.com/subosito/gotenv v1.4.0 h1:yAzM1+SmVcz5R4tXGsNMu1jUl2aOJXoiWUCEwwnGrvs= -github.com/subosito/gotenv v1.4.0/go.mod h1:mZd6rFysKEcUhUHXJk0C/08wAgyDBFuwEYL7vWWGaGo= -github.com/supranational/blst v0.3.10 h1:CMciDZ/h4pXDDXQASe8ZGTNKUiVNxVVA5hpci2Uuhuk= -github.com/syndtr/gocapability v0.0.0-20170704070218-db04d3cc01c8/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= -github.com/syndtr/goleveldb v1.0.0/go.mod h1:ZVVdQEZoIme9iO1Ch2Jdy24qqXrMMOU6lpPAyBWyWuQ= -github.com/syndtr/goleveldb v1.0.1-0.20200815110645-5c35d600f0ca/go.mod h1:u2MKkTVTVJWe5D1rCvame8WqhBd88EuIwODJZ1VHCPM= +github.com/stretchr/testify v1.8.3/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= +github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= +github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= +github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= +github.com/subosito/gotenv v1.4.2 h1:X1TuBLAMDFbaTAChgCBLu3DU3UPyELpnF2jjJ2cz/S8= +github.com/subosito/gotenv v1.4.2/go.mod h1:ayKnFf/c6rvx/2iiLrJUk1e6plDbT3edrFNGqEflhK0= +github.com/supranational/blst v0.3.14 h1:xNMoHRJOTwMn63ip6qoWJ2Ymgvj7E2b9jY2FAwY+qRo= +github.com/supranational/blst v0.3.14/go.mod h1:jZJtfjgudtNl4en1tzwPIV3KjUnQUvG3/j+w+fVonLw= +github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 h1:epCh84lMvA70Z7CTTCmYQn2CKbY8j86K7/FAIr141uY= +github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7/go.mod h1:q4W45IWZaF22tdD+VEXcAWRA037jwmWEB5VWYORlTpc= github.com/tarm/serial v0.0.0-20180830185346-98f6abe2eb07/go.mod h1:kDXzergiv9cbyO7IOYJZWg1U88JhDg3PB6klq9Hg2pA= github.com/texttheater/golang-levenshtein/levenshtein v0.0.0-20200805054039-cae8b0eaed6c h1:HelZ2kAFadG0La9d+4htN4HzQ68Bm2iM9qKMSMES6xg= github.com/texttheater/golang-levenshtein/levenshtein v0.0.0-20200805054039-cae8b0eaed6c/go.mod h1:JlzghshsemAMDGZLytTFY8C1JQxQPhnatWqNwUXjggo= -github.com/tinylib/msgp v1.0.2/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE= -github.com/tklauser/go-sysconf v0.3.9 h1:JeUVdAOWhhxVcU6Eqr/ATFHgXk/mmiItdKeJPev3vTo= -github.com/tklauser/go-sysconf v0.3.9/go.mod h1:11DU/5sG7UexIrp/O6g35hrWzu0JxlwQ3LSFUzyeuhs= -github.com/tklauser/numcpus v0.3.0 h1:ILuRUQBtssgnxw0XXIjKUC56fgnOrFoQQ/4+DeU2biQ= -github.com/tklauser/numcpus v0.3.0/go.mod h1:yFGUr7TUHQRAhyqBcEg0Ge34zDBAsIvJJcyE6boqnA8= -github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/tklauser/go-sysconf v0.3.12 h1:0QaGUFOdQaIVdPgfITYzaTegZvdCjmYO52cSFAEVmqU= +github.com/tklauser/go-sysconf v0.3.12/go.mod h1:Ho14jnntGE1fpdOqQEEaiKRpvIavV0hSfmBq8nJbHYI= +github.com/tklauser/numcpus v0.6.1 h1:ng9scYS7az0Bk4OZLvrNXNSAO2Pxr1XXRAPyjhIx+Fk= +github.com/tklauser/numcpus v0.6.1/go.mod h1:1XfjsgE2zo8GVw7POkMbHENHzVg3GzmoZ9fESEdAacY= github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/turbolent/prettier v0.0.0-20220320183459-661cc755135d h1:5JInRQbk5UBX8JfUvKh2oYTLMVwj3p6n+wapDDm7hko= github.com/turbolent/prettier v0.0.0-20220320183459-661cc755135d/go.mod h1:Nlx5Y115XQvNcIdIy7dZXaNSUpzwBSge4/Ivk93/Yog= -github.com/tyler-smith/go-bip39 v1.0.1-0.20181017060643-dbb3b84ba2ef/go.mod h1:sJ5fKU0s6JVwZjjcUEX2zFOnvq0ASQ2K9Zr6cf67kNs= +github.com/u-root/uio v0.0.0-20230220225925-ffce2a382923/go.mod h1:eLL9Nub3yfAho7qB0MzZizFhTU2QkLeoVsWdHtDW264= +github.com/u-root/uio v0.0.0-20230305220412-3e8cd9d6bf63/go.mod h1:eLL9Nub3yfAho7qB0MzZizFhTU2QkLeoVsWdHtDW264= github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc= github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= -github.com/urfave/cli v0.0.0-20171014202726-7bc6a0acffa5/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= -github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= -github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= +github.com/urfave/cli v1.22.10/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= +github.com/urfave/cli/v2 v2.25.5/go.mod h1:GHupkWPMM0M/sj1a2b4wUrWBPzazNrIjouW6fmdJLxc= github.com/viant/assertly v0.4.8/go.mod h1:aGifi++jvCrUaklKEKT0BU95igDNaqkvz+49uaYMPRU= github.com/viant/toolbox v0.24.0/go.mod h1:OxMCG57V0PXuIP2HNQrtJf2CjqdmbrOx5EkMILuUhzM= -github.com/vmihailenco/msgpack v4.0.4+incompatible h1:dSLoQfGFAo3F6OoNhwUmLwVgaUXK79GlxNBwueZn0xI= -github.com/vmihailenco/msgpack v4.0.4+incompatible/go.mod h1:fy3FlTQTDXWkZ7Bh6AcGMlsjHatGryHQYUTf1ShIgkk= github.com/vmihailenco/msgpack/v4 v4.3.12 h1:07s4sz9IReOgdikxLTKNbBdqDMLsjPKXwvCazn8G65U= github.com/vmihailenco/msgpack/v4 v4.3.12/go.mod h1:gborTTJjAo/GWTqqRjrLCn9pgNN+NXzzngzBKDPIqw4= github.com/vmihailenco/tagparser v0.1.1/go.mod h1:OeAg3pn3UbLjkWt+rN9oFYB6u/cQgqMEUPoW2WPyhdI= github.com/vmihailenco/tagparser v0.1.2 h1:gnjoVuB/kljJ5wICEEOpx98oXMWPLj22G67Vbd1qPqc= github.com/vmihailenco/tagparser v0.1.2/go.mod h1:OeAg3pn3UbLjkWt+rN9oFYB6u/cQgqMEUPoW2WPyhdI= -github.com/warpfork/go-testmark v0.3.0 h1:Q81c4u7hT+BR5kNfNQhEF0VT2pmL7+Kk0wD+ORYl7iA= -github.com/warpfork/go-testmark v0.3.0/go.mod h1:jhEf8FVxd+F17juRubpmut64NEG6I2rgkUhlcqqXwE0= -github.com/warpfork/go-wish v0.0.0-20200122115046-b9ea61034e4a h1:G++j5e0OC488te356JvdhaM8YS6nMsjLAYF7JxCv07w= -github.com/warpfork/go-wish v0.0.0-20200122115046-b9ea61034e4a/go.mod h1:x6AKhvSSexNrVSrViXSHUEbICjmGXhtgABaHIySUSGw= +github.com/warpfork/go-testmark v0.12.1 h1:rMgCpJfwy1sJ50x0M0NgyphxYYPMOODIJHhsXyEHU0s= +github.com/warpfork/go-testmark v0.12.1/go.mod h1:kHwy7wfvGSPh1rQJYKayD4AbtNaeyZdcGi9tNJTaa5Y= +github.com/warpfork/go-wish v0.0.0-20220906213052-39a1cc7a02d0 h1:GDDkbFiaK8jsSDJfjId/PEGEShv6ugrt4kYsC5UIDaQ= +github.com/warpfork/go-wish v0.0.0-20220906213052-39a1cc7a02d0/go.mod h1:x6AKhvSSexNrVSrViXSHUEbICjmGXhtgABaHIySUSGw= github.com/whyrusleeping/go-keyspace v0.0.0-20160322163242-5b898ac5add1 h1:EKhdznlJHPMoKr0XTrX+IlJs1LH3lyx2nfr1dOlZ79k= github.com/whyrusleeping/go-keyspace v0.0.0-20160322163242-5b898ac5add1/go.mod h1:8UvriyWtv5Q5EOgjHaSseUEdkQfvwFv1I/In/O2M9gc= github.com/whyrusleeping/go-logging v0.0.0-20170515211332-0457bb6b88fc/go.mod h1:bopw91TMyo8J3tvftk8xmU2kPmlrt4nScJQZU2hE5EM= -github.com/whyrusleeping/go-logging v0.0.1/go.mod h1:lDPYj54zutzG1XYfHAhcc7oNXEburHQBn+Iqd4yS4vE= -github.com/whyrusleeping/go-notifier v0.0.0-20170827234753-097c5d47330f/go.mod h1:cZNvX9cFybI01GriPRMXDtczuvUhgbcYr9iCGaNlRv8= -github.com/whyrusleeping/mafmt v1.2.8/go.mod h1:faQJFPbLSxzD9xpA02ttW/tS9vZykNvXwGvqIpk20FA= -github.com/whyrusleeping/mdns v0.0.0-20180901202407-ef14215e6b30/go.mod h1:j4l84WPFclQPj320J9gp0XwNKBb3U0zt5CBqjPp22G4= -github.com/whyrusleeping/mdns v0.0.0-20190826153040-b9b60ed33aa9/go.mod h1:j4l84WPFclQPj320J9gp0XwNKBb3U0zt5CBqjPp22G4= -github.com/whyrusleeping/multiaddr-filter v0.0.0-20160516205228-e903e4adabd7/go.mod h1:X2c0RVCI1eSUFI8eLcY3c0423ykwiUdxLJtkDvruhjI= -github.com/whyrusleeping/timecache v0.0.0-20160911033111-cfcb2f1abfee h1:lYbXeSvJi5zk5GLKVuid9TVjS9a0OmLIDKTfoZBL6Ow= -github.com/whyrusleeping/timecache v0.0.0-20160911033111-cfcb2f1abfee/go.mod h1:m2aV4LZI4Aez7dP5PMyVKEHhUyEJ/RjmPEDOpDvudHg= -github.com/willf/bitset v1.1.3/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4= -github.com/wsddn/go-ecdh v0.0.0-20161211032359-48726bab9208/go.mod h1:IotVbo4F+mw0EzQ08zFqg7pK3FebNXpaMsRy2RT+Ees= -github.com/x-cray/logrus-prefixed-formatter v0.5.2/go.mod h1:2duySbKsL6M18s5GU7VPsoEPHyzalCE06qoARUCeBBE= +github.com/wlynxg/anet v0.0.3/go.mod h1:eay5PRQr7fIVAMbTbchTnO9gG65Hg/uYGdc7mguHxoA= +github.com/wlynxg/anet v0.0.5 h1:J3VJGi1gvo0JwZ/P1/Yc/8p63SoW98B5dHkYDmpgvvU= +github.com/wlynxg/anet v0.0.5/go.mod h1:eay5PRQr7fIVAMbTbchTnO9gG65Hg/uYGdc7mguHxoA= github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= github.com/xanzy/ssh-agent v0.3.3 h1:+/15pJfg/RsTxqYcX6fHqOXZwwMP+2VyYWJeWM2qQFM= github.com/xanzy/ssh-agent v0.3.3/go.mod h1:6dzNDKs0J9rVPHPhaGCukekBHKqfl+L3KghI1Bc68Uw= -github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= -github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= -github.com/xeipuuv/gojsonschema v0.0.0-20180618132009-1d523034197f/go.mod h1:5yf86TLmAcydyeJq5YvxkGPE2fm/u4myDekKRoLuqhs= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= -github.com/xlab/treeprint v0.0.0-20180616005107-d6fb6747feb6/go.mod h1:ce1O1j6UtZfjr22oyGxGLbauSBp2YVXpARAosm7dHBg= github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= -github.com/yhassanzadeh13/go-libp2p-pubsub v0.6.2-0.20221208234712-b44d9133e4ee h1:yFB2xjfswpuRh8FHagdBMKcBMltjr5u/XKzX6fkJO5E= -github.com/yhassanzadeh13/go-libp2p-pubsub v0.6.2-0.20221208234712-b44d9133e4ee/go.mod h1:Tylw4k1H86gbJx84i3r7qahN/mBaeMpUBvHY0Igshfw= -github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673/go.mod h1:N3UwUGtsrSj3ccvlPHLoLsHnpR27oXr4ZE984MbSER8= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= -github.com/yusufpapurcu/wmi v1.2.2 h1:KBNDSne4vP5mbSWnJbO+51IMOXJB67QiYCSBrubbPRg= -github.com/yusufpapurcu/wmi v1.2.2/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= -github.com/zeebo/assert v1.1.0/go.mod h1:Pq9JiuJQpG8JLJdtkwrJESF0Foym2/D9XMU5ciN/wJ0= +github.com/yusufpapurcu/wmi v1.2.4 h1:zFUKzehAFReQwLys1b/iSMl+JQGSCSjtVqQn9bBrPo0= +github.com/yusufpapurcu/wmi v1.2.4/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= github.com/zeebo/assert v1.3.0 h1:g7C04CbJuIDKNPFHmsk4hwZDO5O+kntRxzaUoNXj+IQ= -github.com/zeebo/blake3 v0.2.3 h1:TFoLXsjeXqRNFxSbk35Dk4YtszE/MQQGK10BH4ptoTg= -github.com/zeebo/blake3 v0.2.3/go.mod h1:mjJjZpnsyIVtVgTOSpJ9vmRE4wgDeyt2HU3qXvvKCaQ= +github.com/zeebo/assert v1.3.0/go.mod h1:Pq9JiuJQpG8JLJdtkwrJESF0Foym2/D9XMU5ciN/wJ0= +github.com/zeebo/blake3 v0.2.4 h1:KYQPkhpRtcqh0ssGYcKLG1JYvddkEA8QwCM/yBqhaZI= +github.com/zeebo/blake3 v0.2.4/go.mod h1:7eeQ6d2iXWRGF6npfaxl2CU+xy2Fjo2gxeyZGCRUjcE= +github.com/zeebo/errs v1.4.0 h1:XNdoD/RRMKP7HD0UhJnIzUy74ISdGGxURlYG8HSWSfM= +github.com/zeebo/errs v1.4.0/go.mod h1:sgbWHsvVuTPHcqJJGQ1WhI5KbWlHYz+2+2C/LSEtCw4= github.com/zeebo/pcg v1.0.1 h1:lyqfGeWiv4ahac6ttHs+I5hwtH/+1mrhlCtVNQM2kHo= github.com/zeebo/pcg v1.0.1/go.mod h1:09F0S9iiKrwn9rlI5yjLkmrug154/YRW6KnnXVDM/l4= github.com/zeebo/xxh3 v1.0.2 h1:xZmwmqxHZA8AI603jOQ0tMqmBr9lPeFwGg6d+xy9DC0= @@ -1682,233 +1115,151 @@ github.com/zeebo/xxh3 v1.0.2/go.mod h1:5NWz9Sef7zIDm2JHfFlcQvNekmcEl9ekUZQQKCYaD go.einride.tech/pid v0.1.0 h1:7eO7+9gXBMb+G3HIn/68wOfLhCu1gtdt55Jkj754/gg= go.einride.tech/pid v0.1.0/go.mod h1:wWWiiuBM69aJ3o/KK3OCDYlkhMKB5F+sVkybR/wRJVk= go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= -go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= -go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg= go.opencensus.io v0.18.0/go.mod h1:vKdFvxhtzZ9onBp9VKHK8z/sRpBMnKAsufL7wlDrCOA= -go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= -go.opencensus.io v0.20.2/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= -go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= -go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= -go.opencensus.io v0.22.1/go.mod h1:Ap50jQcDJrx6rB6VgeeFPtuPIf3wMRvRfrfYDO6+BmA= -go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= -go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= -go.opentelemetry.io/otel v1.8.0 h1:zcvBFizPbpa1q7FehvFiHbQwGzmPILebO0tyqIR5Djg= -go.opentelemetry.io/otel v1.8.0/go.mod h1:2pkj+iMj0o03Y+cW6/m8Y4WkRdYN3AvCXCnzRMp9yvM= -go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.8.0 h1:ao8CJIShCaIbaMsGxy+jp2YHSudketpDgDRcbirov78= -go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.8.0/go.mod h1:78XhIg8Ht9vR4tbLNUhXsiOnE2HOuSeKAiAcoVQEpOY= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.8.0 h1:LrHL1A3KqIgAgi6mK7Q0aczmzU414AONAGT5xtnp+uo= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.8.0/go.mod h1:w8aZL87GMOvOBa2lU/JlVXE1q4chk/0FX+8ai4513bw= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.8.0 h1:00hCSGLIxdYK/Z7r8GkaX0QIlfvgU3tmnLlQvcnix6U= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.8.0/go.mod h1:twhIvtDQW2sWP1O2cT1N8nkSBgKCRZv2z6COTTBrf8Q= -go.opentelemetry.io/otel/sdk v1.8.0 h1:xwu69/fNuwbSHWe/0PGS888RmjWY181OmcXDQKu7ZQk= -go.opentelemetry.io/otel/sdk v1.8.0/go.mod h1:uPSfc+yfDH2StDM/Rm35WE8gXSNdvCg023J6HeGNO0c= -go.opentelemetry.io/otel/trace v1.8.0 h1:cSy0DF9eGI5WIfNwZ1q2iUyGj00tGzP24dE1lOlHrfY= -go.opentelemetry.io/otel/trace v1.8.0/go.mod h1:0Bt3PXY8w+3pheS3hQUt+wow8b1ojPaTBoTCh2zIFI4= -go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= -go.opentelemetry.io/proto/otlp v0.18.0 h1:W5hyXNComRa23tGpKwG+FRAc4rfF6ZUg1JReK+QHS80= -go.opentelemetry.io/proto/otlp v0.18.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= -go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= +go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= +go.opentelemetry.io/contrib/detectors/gcp v1.36.0 h1:F7q2tNlCaHY9nMKHR6XH9/qkp8FktLnIcy6jJNyOCQw= +go.opentelemetry.io/contrib/detectors/gcp v1.36.0/go.mod h1:IbBN8uAIIx734PTonTPxAxnjc2pQTxWNkwfstZ+6H2k= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.61.0 h1:q4XOmH/0opmeuJtPsbFNivyl7bCt7yRBbeEm2sC/XtQ= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.61.0/go.mod h1:snMWehoOh2wsEwnvvwtDyFCxVeDAODenXHtn5vzrKjo= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.61.0 h1:F7Jx+6hwnZ41NSFTO5q4LYDtJRXBf2PD0rNBkeB/lus= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.61.0/go.mod h1:UHB22Z8QsdRDrnAtX4PntOl36ajSxcdUMt1sF7Y6E7Q= +go.opentelemetry.io/otel v1.37.0 h1:9zhNfelUvx0KBfu/gb+ZgeAfAgtWrfHJZcAqFC228wQ= +go.opentelemetry.io/otel v1.37.0/go.mod h1:ehE/umFRLnuLa/vSccNq9oS1ErUlkkK71gMcN34UG8I= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.31.0 h1:K0XaT3DwHAcV4nKLzcQvwAgSyisUghWoY20I7huthMk= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.31.0/go.mod h1:B5Ki776z/MBnVha1Nzwp5arlzBbE3+1jk+pGmaP5HME= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.31.0 h1:FFeLy03iVTXP6ffeN2iXrxfGsZGCjVx0/4KlizjyBwU= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.31.0/go.mod h1:TMu73/k1CP8nBUpDLc71Wj/Kf7ZS9FK5b53VapRsP9o= +go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.35.0 h1:PB3Zrjs1sG1GBX51SXyTSoOTqcDglmsk7nT6tkKPb/k= +go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.35.0/go.mod h1:U2R3XyVPzn0WX7wOIypPuptulsMcPDPs/oiSVOMVnHY= +go.opentelemetry.io/otel/metric v1.37.0 h1:mvwbQS5m0tbmqML4NqK+e3aDiO02vsf/WgbsdpcPoZE= +go.opentelemetry.io/otel/metric v1.37.0/go.mod h1:04wGrZurHYKOc+RKeye86GwKiTb9FKm1WHtO+4EVr2E= +go.opentelemetry.io/otel/sdk v1.37.0 h1:ItB0QUqnjesGRvNcmAcU0LyvkVyGJ2xftD29bWdDvKI= +go.opentelemetry.io/otel/sdk v1.37.0/go.mod h1:VredYzxUvuo2q3WRcDnKDjbdvmO0sCzOvVAiY+yUkAg= +go.opentelemetry.io/otel/sdk/metric v1.37.0 h1:90lI228XrB9jCMuSdA0673aubgRobVZFhbjxHHspCPc= +go.opentelemetry.io/otel/sdk/metric v1.37.0/go.mod h1:cNen4ZWfiD37l5NhS+Keb5RXVWZWpRE+9WyVCpbo5ps= +go.opentelemetry.io/otel/trace v1.37.0 h1:HLdcFNbRQBE2imdSEgm/kwqmQj1Or1l/7bW6mxVK7z4= +go.opentelemetry.io/otel/trace v1.37.0/go.mod h1:TlgrlQ+PtQO5XFerSPUYG0JSgGyryXewPGyayAWSBS0= +go.opentelemetry.io/proto/otlp v1.3.1 h1:TrMUixzpM0yuc/znrFTP9MMRh8trP93mkCiDVeXrui0= +go.opentelemetry.io/proto/otlp v1.3.1/go.mod h1:0X1WI4de4ZsLrrJNLAQbFeLCm3T7yBkR0XqQ7niQU+8= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= -go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= -go.uber.org/atomic v1.10.0 h1:9qC72Qh0+3MqyJbAn8YU5xVq1frD8bn3JtD2oXtafVQ= -go.uber.org/atomic v1.10.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= -go.uber.org/dig v1.15.0 h1:vq3YWr8zRj1eFGC7Gvf907hE0eRjPTZ1d3xHadD6liE= -go.uber.org/dig v1.15.0/go.mod h1:pKHs0wMynzL6brANhB2hLMro+zalv1osARTviTcqHLM= -go.uber.org/fx v1.18.2 h1:bUNI6oShr+OVFQeU8cDNbnN7VFsu+SsjHzUF51V/GAU= -go.uber.org/fx v1.18.2/go.mod h1:g0V1KMQ66zIRk8bLu3Ea5Jt2w/cHlOIp4wdRsgh0JaY= -go.uber.org/goleak v1.0.0/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= +go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE= +go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= +go.uber.org/dig v1.18.0 h1:imUL1UiY0Mg4bqbFfsRQO5G4CGRBec/ZujWTvSVp3pw= +go.uber.org/dig v1.18.0/go.mod h1:Us0rSJiThwCv2GteUN0Q7OKvU7n5J4dxZ9JKUXozFdE= +go.uber.org/fx v1.23.0 h1:lIr/gYWQGfTwGcSXWXu4vP5Ws6iqnNEIY+F/aFzCKTg= +go.uber.org/fx v1.23.0/go.mod h1:o/D9n+2mLP6v1EG+qsdT1O8wKopYAsqZasju97SDFCU= go.uber.org/goleak v1.1.11-0.20210813005559-691160354723/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= -go.uber.org/goleak v1.1.12 h1:gZAh5/EyT/HQwlpkCy6wTpqfH9H8Lz8zbm3dZh+OyzA= +go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= +go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= +go.uber.org/mock v0.5.0 h1:KAMbZvZPyBPWgD14IrIQ38QCyjwpvVVV6K/bHl1IwQU= +go.uber.org/mock v0.5.0/go.mod h1:ge71pBPLYDk7QIi1LupWxdAykm7KIEFchiOqd6z7qMM= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= -go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU= go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= -go.uber.org/multierr v1.7.0/go.mod h1:7EAYxJLBy9rStEaz58O2t4Uvip6FSURkq8/ppBp95ak= -go.uber.org/multierr v1.9.0 h1:7fIwc/ZtS0q++VgcfqFDxSBZVv/Xo49/SYnDFupUwlI= -go.uber.org/multierr v1.9.0/go.mod h1:X2jQV1h+kxSjClGpnseKVIxpmcjrj7MNnI0bnlfKTVQ= +go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= +go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= -go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= -go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= -go.uber.org/zap v1.14.1/go.mod h1:Mb2vm2krFEG5DV0W9qcHBYFtp/Wku1cvYaqPsS/WYfc= -go.uber.org/zap v1.15.0/go.mod h1:Mb2vm2krFEG5DV0W9qcHBYFtp/Wku1cvYaqPsS/WYfc= go.uber.org/zap v1.16.0/go.mod h1:MA8QOfq0BHJwdXa996Y4dYkAqRKB8/1K1QMMZVaNZjQ= go.uber.org/zap v1.19.1/go.mod h1:j3DNczoxDZroyBnOT1L/Q79cfUMGZxlv/9dzN7SM1rI= -go.uber.org/zap v1.24.0 h1:FiJd5l1UOLj0wCgbSE0rwwXHzEdAZS6hiiSnxJN/D60= -go.uber.org/zap v1.24.0/go.mod h1:2kMP+WWQ8aoFoedH3T2sq6iJ2yDWpHbP0f6MQbS9Gkg= +go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= +go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= go4.org v0.0.0-20180809161055-417644f6feb5/go.mod h1:MkTOUMDaeVYJUOUsaDXIhWPZYa1yOyC1qaOBpL57BhE= golang.org/x/build v0.0.0-20190111050920-041ab4dc3f9d/go.mod h1:OWs+y06UdEOHN4y+MfF/py+xQ/tYqIWW03b70/CG9Rw= -golang.org/x/crypto v0.0.0-20170930174604-9419663f5a44/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20171113213409-9f005a07e0d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181030102418-4d3f4d9ffa16/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20190225124518-7f87c0fbb88b/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190313024323-a1f597ede03a/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190426145343-a29dc8fdc734/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190513172903-22d7a77e9e5f/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190530122614-20be4c3c3ed5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190618222545-ea8f1a30c443/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190909091759-094676da4a83/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20200115085410-6d4e4cb37c7d/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20200221231518-2aa609cf4a9d/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20200423211502-4bdfaf469ed5/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20200510223506-06a226fb4e37/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200602180216-279210d13fed/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= -golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= -golang.org/x/crypto v0.0.0-20210506145944-38f3c27a63bf/go.mod h1:P+XmwS30IXTQdn5tA2iutPOUgjI07+tq3H3K9MVA1s8= -golang.org/x/crypto v0.0.0-20210513164829-c07d793c2f9a/go.mod h1:P+XmwS30IXTQdn5tA2iutPOUgjI07+tq3H3K9MVA1s8= -golang.org/x/crypto v0.0.0-20210817164053-32db794688a5/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.0.0-20211108221036-ceb1ce70b4fa/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.0.0-20220525230936-793ad666bf5e/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.0.0-20220826181053-bd7e27e6170d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.3.0/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4= -golang.org/x/crypto v0.4.0 h1:UVQgzMY87xqpKNgb+kDsll2Igd33HszWHFLmpaRMq/8= -golang.org/x/crypto v0.4.0/go.mod h1:3quD/ATkf6oY+rnes5c3ExXTbLc8mueNue5/DoinL80= -golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw= +golang.org/x/crypto v0.3.1-0.20221117191849-2c476679df9a/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4= +golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU= +golang.org/x/crypto v0.8.0/go.mod h1:mRqEX+O9/h5TFCrQhkgjo2yKi0yYA+9ecGkdQoHrywE= +golang.org/x/crypto v0.12.0/go.mod h1:NF0Gs7EO5K4qLn+Ylc+fih8BSTeIjAP05siRnAh98yw= +golang.org/x/crypto v0.18.0/go.mod h1:R0j02AL6hcrfOiy9T4ZYp/rcWeMxM3L6QYxlOuEG1mg= +golang.org/x/crypto v0.41.0 h1:WKYxWedPGCTVVl5+WHSSrOBT0O8lx32+zxmHxijgXp4= +golang.org/x/crypto v0.41.0/go.mod h1:pO5AFd7FA68rFak7rOAGVuygIISepHftHnr8dr6+sUc= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20190125153040-c74c464bbbf2/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= -golang.org/x/exp v0.0.0-20190731235908-ec7cb31e5a56/go.mod h1:JhuoJpWY28nO4Vef9tZUw9qufEGTyX1+7lmHxV5q5G4= -golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= -golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= -golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= -golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= -golang.org/x/exp v0.0.0-20221217163422-3c43f8badb15 h1:5oN1Pz/eDhCpbMbLstvIPa0b/BEQo6g6nwV3pLjfM6w= -golang.org/x/exp v0.0.0-20221217163422-3c43f8badb15/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc= -golang.org/x/image v0.0.0-20180708004352-c73c2afc3b81/go.mod h1:ux5Hcp/YLpHSI86hEcLt0YII63i6oz57MZXIpbrjZUs= -golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= -golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/exp v0.0.0-20241217172543-b2144cdd0a67 h1:1UoZQm6f0P/ZO0w1Ri+f+ifG/gXhegadRdwBIXEFWDo= +golang.org/x/exp v0.0.0-20241217172543-b2144cdd0a67/go.mod h1:qj5a5QZpwLU2NLQudwIN5koi3beDhSAlJwa67PuM98c= golang.org/x/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= -golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= -golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= -golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= -golang.org/x/mobile v0.0.0-20200801112145-973feb4309de/go.mod h1:skQtrUTUwhdJvXM/2KKJzY8pDgNr9I/FOMqDVRPBUS4= golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= -golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= -golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= -golang.org/x/mod v0.1.1-0.20191209134235-331c550502dd/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= -golang.org/x/mod v0.8.0 h1:LUYupSeNrTNCGzR/hVBk2NHZO4hXcVaW1k4Qx7rjPx8= +golang.org/x/mod v0.6.0/go.mod h1:4mET923SAdbXp2ki8ey+zGs1SLqsuM2Y0uvdZR/fUNI= golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/net v0.0.0-20180719180050-a680a1efc54d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/mod v0.27.0 h1:kb+q2PyFnEADO2IEF935ehFUXlWiNjJWtRNgBLSfbxQ= +golang.org/x/mod v0.27.0/go.mod h1:rWI627Fq0DEoudcK+MBkNkCe0EetEaDSwJJkCcjpazc= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181011144130-49bb7cea24b1/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181029044818-c44066c5c816/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181106065722-10aee1819953/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190125091013-d26f9f9a57f3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190227160552-c95aed5357e7/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190313220215-9f648a60d977/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= -golang.org/x/net v0.0.0-20190611141213-3f473d35a33a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191007182048-72f939374954/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200813134508-3edf25e44fcc/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20201010224723-4f7140c49acb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.0.0-20210423184538-5f58ad60dda6/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= -golang.org/x/net v0.0.0-20210726213435-c6fcb2dbf985/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.0.0-20220826154423-83b083e8dc8b/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= +golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= -golang.org/x/net v0.8.0 h1:Zrh2ngAOFYneWTAIAPethzeaQLuHwhuBkuV6ZiRnUaQ= +golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= +golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= +golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns= +golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= +golang.org/x/net v0.14.0/go.mod h1:PpSgVXXLK0OxS0F31C1/tv6XNguvCrnXIDrFMspZIUI= +golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY= +golang.org/x/net v0.43.0 h1:lat02VYK2j4aLzMzecihNvTlJNQUq316m2Mr9rnM6YE= +golang.org/x/net v0.43.0/go.mod h1:vhO1fvI4dGsIjh73sWfUVjj3N7CA9WkKJNQm2svM6Jg= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20181017192945-9dcd33a902f4/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.6.0 h1:Lh8GPgSKBfWSwFvtuWOfeI3aAAnbXTSutYxJiOJFgIw= -golang.org/x/oauth2 v0.6.0/go.mod h1:ycmewcwgD4Rpr3eZJLSB4Kyyljb3qDh40vJ8STE5HKw= +golang.org/x/oauth2 v0.30.0 h1:dnDm7JmhM45NNpd8FDDeLhK6FwqbOf4MLCM9zb1BOHI= +golang.org/x/oauth2 v0.30.0/go.mod h1:B++QgG3ZKulg6sRPGD/mqlHQs5rB3Ml9erfeDY7xKlU= golang.org/x/perf v0.0.0-20180704124530-6e6d33e29852/go.mod h1:JLpeXjPJfIyPr5TlbXLkXWLhP8nz10XfvxElABhCtcw= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -1916,346 +1267,179 @@ golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o= golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= +golang.org/x/sync v0.16.0 h1:ycBJEhp9p4vXvUZNszeOq0kGTPghopOL8q0fq3vstxw= +golang.org/x/sync v0.16.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= golang.org/x/sys v0.0.0-20180810173357-98c5dad5d1a0/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181029174526-d69651ed3497/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190219092855-153ac476189d/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190228124157-a34e9553db1e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190316082340-a2f829d7f35f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190405154228-4b34438f7a67/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190411185658-b44545bcd369/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190514135907-3a4b5fb9f71f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190526052359-791d8a0f4d09/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190610200419-93c9922d18ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191210023423-ac6580df4449/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191220142924-d4481acd189f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200107162124-548cf772de50/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200602225109-6fdc65e7d980/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200814200057-3d37ad5750ed/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200824131525-c12d262b63d8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201009025420-dfb3f7c4e634/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201101102859-da207088b7d1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210105210732-16f7687f5001/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210303074136-134d130e1a04/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210309074719-68d13333faf2/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210317225723-c4fcb01b228e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210426080607-c94f62235c83/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210816074244-15123e1e1f71/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210910150752-751e447fb3d0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211025112917-711f33c9992c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220111092808-5a964db01320/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220704084225-05e143d24a9e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220622161953-175b2fd9d664/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220825204002-c680a09ffe64/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.6.0 h1:MVltZSvRTcU2ljQOhs94SXPftV6DCNnZViHeQps87pQ= +golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= +golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.9.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.14.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.35.0 h1:vz1N37gP5bs89s7He8XuIYXpyY0+QlsKmzipCbUtyxI= +golang.org/x/sys v0.35.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.0.0-20220722155259-a9ba230a4035/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= -golang.org/x/term v0.6.0 h1:clScbb1cHjoCkyRbWwBEUZ5H/tIFu5TAXIqaZD0Gcjw= +golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U= -golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/term v0.7.0/go.mod h1:P32HKFT3hSsZrRxla30E9HqToFYAQPCMs/zFMBUFqPY= +golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= +golang.org/x/term v0.11.0/go.mod h1:zC9APTIj3jG3FdV/Ons+XE1riIZXG4aZ4GTHiPZJPIU= +golang.org/x/term v0.16.0/go.mod h1:yn7UURbUtPyrVJPGPq404EukNFxcm/foM+bV/bfcDsY= +golang.org/x/term v0.34.0 h1:O/2T7POpk0ZZ7MAzMeWFSg6S5IpWd/RXDlM9hgM3DR4= +golang.org/x/term v0.34.0/go.mod h1:5jC53AEywhIVebHgPVeg0mj8OD3VO9OzclacVrqpaAw= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.8.0 h1:57P1ETyNKtuIjB4SRd15iJxuhj8Gc416Y78H3qgMh68= +golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= +golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= +golang.org/x/text v0.12.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= +golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= +golang.org/x/text v0.28.0 h1:rhazDwis8INMIwQ4tpjLDzUhx6RlXqZNPEM0huQojng= +golang.org/x/text v0.28.0/go.mod h1:U8nCwOR8jO/marOQ0QbDiOngZVEBB7MAiitBuMjXiNU= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac h1:7zkz7BUtwNFFqcowJ+RIgu2MaV/MapERkDIy+mwPyjs= -golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.12.0 h1:ScB/8o8olJvc+CQPWrK3fPZNfh7qgwCrY0zJmoEQLSE= +golang.org/x/time v0.12.0/go.mod h1:CDIdPxbZBQxdj6cxyCIdrNogrJKMJ7pr37NYpMcMDSg= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181030000716-a0a13e073c7b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20181130052023-1c3d964395ce/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190206041539-40960b6deb8e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190624222133-a101b041ded4/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20190828213141-aed303cbaa74/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191112195655-aa38f8e97acc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191216052735-49a3e744a425/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200103221440-774c71fcf114/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200108203644-89082a384178/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200117012304-6edc0a871e69/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= -golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= -golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= -golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= -golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20210108195828-e2f9c7f1fc8e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= -golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.6-0.20210726203631-07bc1bf47fb2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= -golang.org/x/tools v0.6.0 h1:BOw41kyTf3PuCW1pVQf8+Cyg8pMlkYB1oo9iJ6D/lKM= +golang.org/x/tools v0.2.0/go.mod h1:y4OqIKeOV/fWJetJ8bXPU1sEVniLMIyDAZWeHdV+NTA= golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= +golang.org/x/tools v0.36.0 h1:kWS0uv/zsvHEle1LbV5LE8QujrxB3wfQyxHfhOk0Qkg= +golang.org/x/tools v0.36.0/go.mod h1:WBDiHKJK8YgLHlcQPYQzNCkUxUypCaa5ZegCVutKm+s= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 h1:H2TDz8ibqkAF6YGhCdN3jS9O0/s90v0rJh3X/OLHEUk= -golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= -gonum.org/v1/gonum v0.0.0-20180816165407-929014505bf4/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo= -gonum.org/v1/gonum v0.0.0-20181121035319-3f7ecaa7e8ca/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo= -gonum.org/v1/gonum v0.6.0/go.mod h1:9mxDZsDKxgMAuccQkewq682L+0eCu4dCN2yonUJTCLU= -gonum.org/v1/gonum v0.11.0 h1:f1IJhK4Km5tBJmaiJXtk/PkL4cdVX6J+tGiM187uT5E= -gonum.org/v1/netlib v0.0.0-20181029234149-ec6d1f5cefe6/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw= -gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw= -gonum.org/v1/plot v0.0.0-20190515093506-e2840ee46a6b/go.mod h1:Wt8AAjI+ypCyYX3nZBvf6cAIx93T+c/OS2HFAYskSZc= +golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da h1:noIWHXmPHxILtqtCOPIhSt0ABwskkZKjD3bXGnZGpNY= +golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da/go.mod h1:NDW/Ps6MPRej6fsCIbMTohpP40sJ/P/vI1MoTEGwX90= +gonum.org/v1/gonum v0.16.0 h1:5+ul4Swaf3ESvrOnidPp4GZbzf0mxVQpDCYUQE7OJfk= +gonum.org/v1/gonum v0.16.0/go.mod h1:fef3am4MQ93R2HHpKnLk4/Tbh/s0+wqD5nfa6Pnwy4E= google.golang.org/api v0.0.0-20180910000450-7ca32eb868bf/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= google.golang.org/api v0.0.0-20181030000543-1d582fd0359e/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= google.golang.org/api v0.1.0/go.mod h1:UGEZY7KEX120AnNLIHFMKIo4obdJhkp2tPbaPlQx13Y= -google.golang.org/api v0.3.1/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk= -google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= -google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= -google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= -google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= -google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= -google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= -google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= -google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= -google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= -google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE= -google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= -google.golang.org/api v0.114.0 h1:1xQPji6cO2E2vLiI+C/XiFAnsn1WV3mjaEwGLhi3grE= -google.golang.org/api v0.114.0/go.mod h1:ifYI2ZsFK6/uGddGfAD5BMxlnkBqCmqHSDUVi45N5Yg= +google.golang.org/api v0.247.0 h1:tSd/e0QrUlLsrwMKmkbQhYVa109qIintOls2Wh6bngc= +google.golang.org/api v0.247.0/go.mod h1:r1qZOPmxXffXg6xS5uhx16Fa/UFY8QU/K4bfKrnvovM= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.3.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= -google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.8 h1:IhEN5q69dyKagZPYMSdIjS2HqprW324FRQZJcGqPAsM= +google.golang.org/appengine v1.6.8/go.mod h1:1jJ3jBArFh5pcgW8gCtRJnepW8FzD1V44FJffLiz/Ds= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20180831171423-11092d34479b/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20181029155118-b69ba1387ce2/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20181202183823-bd91e49a0898/go.mod h1:7Ep/1NZk928CDR8SjdVbjWNpdIf6nzjE3BTgJDr2Atg= google.golang.org/genproto v0.0.0-20190306203927-b5d61aea6440/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190530194941-fb225487d101/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s= -google.golang.org/genproto v0.0.0-20190716160619-c506a9f90610/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= -google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200108215221-bd8f9a0ef82f/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= -google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= -google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210108203827-ffc7fda8c3d7/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210226172003-ab064af71705/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20230306155012-7f2fa6fef1f4 h1:DdoeryqhaXp1LtT/emMP1BRJPHHKFi5akj/nbx/zNTA= -google.golang.org/genproto v0.0.0-20230306155012-7f2fa6fef1f4/go.mod h1:NWraEVixdDnqcqQ30jipen1STv2r/n24Wb7twVTGR4s= +google.golang.org/genproto v0.0.0-20250603155806-513f23925822 h1:rHWScKit0gvAPuOnu87KpaYtjK5zBMLcULh7gxkCXu4= +google.golang.org/genproto v0.0.0-20250603155806-513f23925822/go.mod h1:HubltRL7rMh0LfnQPkMH4NPDFEWp0jw3vixw7jEM53s= +google.golang.org/genproto/googleapis/api v0.0.0-20250707201910-8d1bb00bc6a7 h1:FiusG7LWj+4byqhbvmB+Q93B/mOxJLN2DTozDuZm4EU= +google.golang.org/genproto/googleapis/api v0.0.0-20250707201910-8d1bb00bc6a7/go.mod h1:kXqgZtrWaf6qS3jZOCnCH7WYfrvFjkC51bM8fz3RsCA= +google.golang.org/genproto/googleapis/bytestream v0.0.0-20250804133106-a7a43d27e69b h1:YzmLjVBzUKrr0zPM1KkGPEicd3WHSccw1k9RivnvngU= +google.golang.org/genproto/googleapis/bytestream v0.0.0-20250804133106-a7a43d27e69b/go.mod h1:h6yxum/C2qRb4txaZRLDHK8RyS0H/o2oEDeKY4onY/Y= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250804133106-a7a43d27e69b h1:zPKJod4w6F1+nRGDI9ubnXYhU9NSWoFAijkHkUXeTK8= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250804133106-a7a43d27e69b/go.mod h1:qQ0YXyHHx3XkvlzUtpXDkS29lDSafHMZBAZDc03LQ3A= google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.16.0/go.mod h1:0JHn/cJsOMiMfNA9+DeHDlAU7KAAB5GDlYFpa9MZMio= google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -google.golang.org/grpc v1.20.0/go.mod h1:chYK+tFQF0nDUGJgXMSgLCQk3phJEuONr2DCgLDdAQM= -google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= -google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= -google.golang.org/grpc v1.22.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= -google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= -google.golang.org/grpc v1.28.1/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= -google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= -google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= -google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= -google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= -google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= -google.golang.org/grpc v1.53.0 h1:LAv2ds7cmFV/XTS3XG1NneeENYrXGmorPxsBbptIjNc= -google.golang.org/grpc v1.53.0/go.mod h1:OnIrk0ipVdj4N5d9IUoFUx72/VlD7+jUsHwZgwSMQpw= +google.golang.org/grpc v1.75.1 h1:/ODCNEuf9VghjgO3rqLcfg8fiOP0nSluljWFlDxELLI= +google.golang.org/grpc v1.75.1/go.mod h1:JtPAzKiq4v1xcAB2hydNlWI2RnF85XXcV0mhKXr2ecQ= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.2.0 h1:TLkBREm4nIsEcexnCjgQd5GQWaHcqMzwQV0TX9pq8S0= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.2.0/go.mod h1:DNq5QpG7LJqD2AamLZ7zvKE0DEpVl2BSEVjFycAAjRY= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= @@ -2266,86 +1450,56 @@ google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzi google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.30.0 h1:kPPoIgf3TsEvrm0PFe15JQ+570QVxYzEvvHqChK+cng= -google.golang.org/protobuf v1.30.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= -gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U= +google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.36.9 h1:w2gp2mA27hUeUzj9Ex9FBjsBm40zfaDtEWow293U7Iw= +google.golang.org/protobuf v1.36.9/go.mod h1:fuxRtAxBytpl4zzqUh6/eyUujkJdNiuEkXntxiD/uRU= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= -gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= -gopkg.in/gcfg.v1 v1.2.3/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o= -gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2/go.mod h1:Xk6kEKp8OKb+X14hQBKWaSkCsqBpgog8nAV2xsGOxlo= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= -gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= -gopkg.in/ini.v1 v1.66.6 h1:LATuAqN/shcYAOkv3wl2L4rkaKqkcgTBQjOyYDvcPKI= -gopkg.in/ini.v1 v1.66.6/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= -gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce/go.mod h1:5AcXVHNjg+BDxry382+8OKon8SEWiKktQR07RKPsv1c= -gopkg.in/olebedev/go-duktape.v3 v3.0.0-20200619000410-60c24ae608a6/go.mod h1:uAJfkITjFhyEEuUfm7bsmCZRbW5WRq8s9EY8HZ6hCns= +gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA= +gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= -gopkg.in/src-d/go-cli.v0 v0.0.0-20181105080154-d492247bbc0d/go.mod h1:z+K8VcOYVYcSwSjGebuDL6176A1XskgbtNl64NSg+n8= -gopkg.in/src-d/go-log.v1 v1.0.1/go.mod h1:GN34hKP0g305ysm2/hctJ0Y8nWP3zxXXJ8GFabTyABE= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= -gopkg.in/urfave/cli.v1 v1.20.0/go.mod h1:vuBzUtMdQeixQj8LVd+/98pzhxNGQoyuPBlsXHOQNO0= gopkg.in/warnings.v0 v0.1.2 h1:wFXVbFY8DY5/xOe1ECiWdKCzZlxgshcYVNkBHstARME= gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI= gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.0/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo= gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= -gotest.tools/v3 v3.0.3 h1:4AuOwCGf4lLR9u3YOe2awrHygurzhO/HeQ6laiA6Sx0= gotest.tools/v3 v3.0.3/go.mod h1:Z7Lb0S5l+klDB31fvDQX8ss/FlKDxtlFlw3Oa8Ymbl8= +gotest.tools/v3 v3.4.0/go.mod h1:CtbdzLSsqVhDgMtKsx03ird5YTGB3ar27v0u/yKBW5g= +gotest.tools/v3 v3.5.1 h1:EENdUnS3pdur5nybKYIh2Vfgc8IUNBjxDPSjtiJcOzU= +gotest.tools/v3 v3.5.1/go.mod h1:isy3WKz7GK6uNw/sbHzfKBLvlvXwUyV06n6brMxxopU= grpc.go4.org v0.0.0-20170609214715-11d0a25b4919/go.mod h1:77eQGdRu53HpSqPFJFmuJdjuHRquDANNeA4x7B8WQ9o= honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= -honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -k8s.io/kubernetes v1.13.0/go.mod h1:ocZa8+6APFNC2tX1DZASIbocyYT5jHzqFVsY5aoB7Jk= -lukechampine.com/blake3 v1.1.6/go.mod h1:tkKEOtDkNtklkXtLNEOGNq5tcV90tJiA1vAA12R78LA= -lukechampine.com/blake3 v1.1.7 h1:GgRMhmdsuK8+ii6UZFDL8Nb+VyMwadAgcJyfYHxG6n0= -lukechampine.com/blake3 v1.1.7/go.mod h1:tkKEOtDkNtklkXtLNEOGNq5tcV90tJiA1vAA12R78LA= -modernc.org/libc v1.22.3 h1:D/g6O5ftAfavceqlLOFwaZuA5KYafKwmr30A6iSqoyY= -modernc.org/libc v1.22.3/go.mod h1:MQrloYP209xa2zHome2a8HLiLm6k0UT8CoHpV74tOFw= -modernc.org/mathutil v1.5.0 h1:rV0Ko/6SfM+8G+yKiyI830l3Wuz1zRutdslNoQ0kfiQ= -modernc.org/mathutil v1.5.0/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E= -modernc.org/memory v1.5.0 h1:N+/8c5rE6EqugZwHii4IFsaJ7MUhoWX07J5tC/iI5Ds= -modernc.org/memory v1.5.0/go.mod h1:PkUhL0Mugw21sHPeskwZW4D6VscE/GQJOnIpCnW6pSU= -modernc.org/sqlite v1.21.1 h1:GyDFqNnESLOhwwDRaHGdp2jKLDzpyT/rNLglX3ZkMSU= -modernc.org/sqlite v1.21.1/go.mod h1:XwQ0wZPIh1iKb5mkvCJ3szzbhk+tykC8ZWqTRTgYRwI= -pgregory.net/rapid v0.4.7 h1:MTNRktPuv5FNqOO151TM9mDTa+XHcX6ypYeISDVD14g= -rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= -rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4= -rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= -rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= -sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= -sourcegraph.com/sourcegraph/appdash v0.0.0-20190731080439-ebfcffb1b5c0/go.mod h1:hI742Nqp5OhwiqlzhgfbWU4mW4yO10fP+LoT9WOswdU= +lukechampine.com/blake3 v1.4.1 h1:I3Smz7gso8w4/TunLKec6K2fn+kyKtDxr/xcQEN84Wg= +lukechampine.com/blake3 v1.4.1/go.mod h1:QFosUxmjB8mnrWFSNwKmvxHpfY72bmD2tQ0kBMM3kwo= +pgregory.net/rapid v1.1.0 h1:CMa0sjHSru3puNx+J0MIAuiiEV4N0qj8/cMWGBBCsjw= +pgregory.net/rapid v1.1.0/go.mod h1:PY5XlDGj0+V1FCq0o192FdRhpKHGTRIWBgqjDBTrq04= sourcegraph.com/sourcegraph/go-diff v0.5.0/go.mod h1:kuch7UrkMzY0X+p9CRK03kfuPQ2zzQcaEFbx8wA8rck= sourcegraph.com/sqs/pbtypes v0.0.0-20180604144634-d3ebe8f20ae4/go.mod h1:ketZ/q3QxT9HOBeFhu6RdvsftgpsbFHBF5Cas6cDKZ0= diff --git a/integration/internal/emulator/blockTicker.go b/integration/internal/emulator/blockTicker.go new file mode 100644 index 00000000000..40ef1ce7e34 --- /dev/null +++ b/integration/internal/emulator/blockTicker.go @@ -0,0 +1,56 @@ +/* + * Flow Emulator + * + * Copyright Flow Foundation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package emulator + +import ( + "time" +) + +type BlocksTicker struct { + emulator Emulator + ticker *time.Ticker + done chan bool +} + +func NewBlocksTicker( + emulator Emulator, + blockTime time.Duration, +) *BlocksTicker { + return &BlocksTicker{ + emulator: emulator, + ticker: time.NewTicker(blockTime), + done: make(chan bool, 1), + } +} + +func (t *BlocksTicker) Start() error { + for { + select { + case <-t.ticker.C: + _, _ = t.emulator.ExecuteBlock() + _, _ = t.emulator.CommitBlock() + case <-t.done: + return nil + } + } +} + +func (t *BlocksTicker) Stop() { + t.done <- true +} diff --git a/integration/internal/emulator/blockchain.go b/integration/internal/emulator/blockchain.go new file mode 100644 index 00000000000..c92288bda1a --- /dev/null +++ b/integration/internal/emulator/blockchain.go @@ -0,0 +1,1081 @@ +/* + * Flow Emulator + * + * Copyright Flow Foundation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// Package emulator provides an emulated version of the Flow emulator that can be used +// for development purposes. +// +// This package can be used as a library or as a standalone application. +// +// When used as a library, this package provides tools to write programmatic tests for +// Flow applications. +// +// When used as a standalone application, this package implements the Flow Access API +// and is fully-compatible with Flow gRPC client libraries. +package emulator + +import ( + "context" + _ "embed" + "encoding/hex" + "errors" + "fmt" + "strings" + "sync" + "time" + + "github.com/rs/zerolog" + + "github.com/onflow/cadence" + "github.com/onflow/cadence/runtime" + + "github.com/onflow/flow-core-contracts/lib/go/templates" + + "github.com/onflow/flow-go/access/validator" + "github.com/onflow/flow-go/engine" + "github.com/onflow/flow-go/fvm" + "github.com/onflow/flow-go/fvm/environment" + fvmerrors "github.com/onflow/flow-go/fvm/errors" + reusableRuntime "github.com/onflow/flow-go/fvm/runtime" + accessmodel "github.com/onflow/flow-go/model/access" + flowgo "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module/metrics" +) + +// systemChunkTransactionTemplate looks for the RandomBeaconHistory +// heartbeat resource on the service account and calls it. +// +//go:embed templates/systemChunkTransactionTemplate.cdc +var systemChunkTransactionTemplate string + +var _ Emulator = &Blockchain{} + +// New instantiates a new emulated emulator with the provided options. +func New(opts ...Option) (*Blockchain, error) { + + // apply options to the default config + conf := defaultConfig + for _, opt := range opts { + opt(&conf) + } + b := &Blockchain{ + storage: conf.GetStore(), + broadcaster: engine.NewBroadcaster(), + serviceKey: conf.GetServiceKey(), + conf: conf, + entropyProvider: &blockHashEntropyProvider{}, + } + return b.ReloadBlockchain() +} + +func (b *Blockchain) Now() time.Time { + if b.clockOverride != nil { + return b.clockOverride() + } + return time.Now().UTC() +} + +// Blockchain emulates the functionality of the Flow emulator. +type Blockchain struct { + // committed chain state: blocks, transactions, registers, events + storage EmulatorStorage + broadcaster *engine.Broadcaster + + // mutex protecting pending block + mu sync.RWMutex + + // pending block containing block info, register state, pending transactions + pendingBlock *pendingBlock + clockOverride func() time.Time + // used to execute transactions and scripts + vm *fvm.VirtualMachine + vmCtx fvm.Context + transactionValidator *validator.TransactionValidator + serviceKey ServiceKey + conf config + entropyProvider *blockHashEntropyProvider +} + +func (b *Blockchain) Broadcaster() *engine.Broadcaster { + return b.broadcaster +} + +func (b *Blockchain) ReloadBlockchain() (*Blockchain, error) { + + b.vm = fvm.NewVirtualMachine() + b.vmCtx = fvm.NewContext( + fvm.WithLogger(b.conf.Logger), + fvm.WithCadenceLogging(true), + fvm.WithChain(b.conf.GetChainID().Chain()), + fvm.WithBlocks(b.storage), + fvm.WithContractDeploymentRestricted(false), + fvm.WithContractRemovalRestricted(!b.conf.ContractRemovalEnabled), + fvm.WithComputationLimit(b.conf.ScriptGasLimit), + fvm.WithAccountStorageLimit(b.conf.StorageLimitEnabled), + fvm.WithTransactionFeesEnabled(b.conf.TransactionFeesEnabled), + fvm.WithReusableCadenceRuntimePool( + reusableRuntime.NewReusableCadenceRuntimePool( + 0, + runtime.Config{}), + ), + fvm.WithEntropyProvider(b.entropyProvider), + fvm.WithEVMEnabled(true), + fvm.WithAuthorizationChecksEnabled(b.conf.TransactionValidationEnabled), + fvm.WithSequenceNumberCheckAndIncrementEnabled(b.conf.TransactionValidationEnabled), + ) + + latestBlock, latestLedger, err := configureLedger( + b.conf, + b.storage, + b.vm, + b.vmCtx) + if err != nil { + return nil, err + } + + b.pendingBlock = newPendingBlock(latestBlock, latestLedger, b.conf.GetChainID(), b.Now()) + err = b.configureTransactionValidator() + if err != nil { + return nil, err + } + + return b, nil +} + +func (b *Blockchain) EnableAutoMine() { + b.conf.AutoMine = true +} + +func (b *Blockchain) DisableAutoMine() { + b.conf.AutoMine = false +} + +func (b *Blockchain) Ping() error { + return nil +} + +func (b *Blockchain) GetChain() flowgo.Chain { + return b.vmCtx.Chain +} + +func (b *Blockchain) GetNetworkParameters() accessmodel.NetworkParameters { + return accessmodel.NetworkParameters{ + ChainID: b.GetChain().ChainID(), + } +} + +// `blockHashEntropyProvider implements `environment.EntropyProvider` +// which provides a source of entropy to fvm context (required for Cadence's randomness), +// by using the latest block hash. +type blockHashEntropyProvider struct { + LatestBlock flowgo.Identifier +} + +func (gen *blockHashEntropyProvider) RandomSource() ([]byte, error) { + return gen.LatestBlock[:], nil +} + +// make sure `blockHashEntropyProvider implements `environment.EntropyProvider` +var _ environment.EntropyProvider = &blockHashEntropyProvider{} + +func (b *Blockchain) configureTransactionValidator() error { + validator, err := validator.NewTransactionValidator( + b.storage, + b.conf.GetChainID().Chain(), + metrics.NewNoopCollector(), + validator.TransactionValidationOptions{ + Expiry: b.conf.TransactionExpiry, + ExpiryBuffer: 0, + AllowEmptyReferenceBlockID: b.conf.TransactionExpiry == 0, + AllowUnknownReferenceBlockID: false, + MaxGasLimit: b.conf.TransactionMaxGasLimit, + CheckScriptsParse: true, + MaxTransactionByteSize: flowgo.DefaultMaxTransactionByteSize, + MaxCollectionByteSize: flowgo.DefaultMaxCollectionByteSize, + CheckPayerBalanceMode: validator.Disabled, + }, + nil, + ) + if err != nil { + return err + } + b.transactionValidator = validator + return nil +} + +func (b *Blockchain) setFVMContextFromHeader(header *flowgo.Header) fvm.Context { + b.vmCtx = fvm.NewContextFromParent( + b.vmCtx, + fvm.WithBlockHeader(header), + ) + return b.vmCtx +} + +// ServiceKey returns the service private key for this emulator. +func (b *Blockchain) ServiceKey() ServiceKey { + serviceAccount, err := b.getAccount(b.serviceKey.Address) + if err != nil { + return b.serviceKey + } + + if len(serviceAccount.Keys) > 0 { + b.serviceKey.Index = 0 + b.serviceKey.SequenceNumber = serviceAccount.Keys[0].SeqNumber + b.serviceKey.Weight = serviceAccount.Keys[0].Weight + } + + return b.serviceKey +} + +// PendingBlockID returns the ID of the pending block. +func (b *Blockchain) PendingBlockID() flowgo.Identifier { + return b.pendingBlock.Block().ID() +} + +// PendingBlockView returns the view of the pending block. +func (b *Blockchain) PendingBlockView() uint64 { + return b.pendingBlock.view +} + +// PendingBlockTimestamp returns the Timestamp of the pending block (Unix time in milliseconds). +func (b *Blockchain) PendingBlockTimestamp() uint64 { + return b.pendingBlock.Block().Timestamp +} + +// GetLatestBlock gets the latest sealed block. +func (b *Blockchain) GetLatestBlock() (*flowgo.Block, error) { + b.mu.RLock() + defer b.mu.RUnlock() + return b.getLatestBlock() +} + +func (b *Blockchain) getLatestBlock() (*flowgo.Block, error) { + block, err := b.storage.LatestBlock(context.Background()) + if err != nil { + return nil, err + } + + return &block, nil +} + +// GetBlockByID gets a block by ID. +func (b *Blockchain) GetBlockByID(id flowgo.Identifier) (*flowgo.Block, error) { + b.mu.RLock() + defer b.mu.RUnlock() + return b.getBlockByID(id) +} + +func (b *Blockchain) getBlockByID(id flowgo.Identifier) (*flowgo.Block, error) { + block, err := b.storage.BlockByID(context.Background(), id) + if err != nil { + if errors.Is(err, ErrNotFound) { + return nil, &BlockNotFoundByIDError{ID: id} + } + + return nil, err + } + + return block, nil +} + +// GetBlockByHeight gets a block by height. +func (b *Blockchain) GetBlockByHeight(height uint64) (*flowgo.Block, error) { + b.mu.RLock() + defer b.mu.RUnlock() + + block, err := b.getBlockByHeight(height) + if err != nil { + return nil, err + } + + return block, nil +} + +func (b *Blockchain) getBlockByHeight(height uint64) (*flowgo.Block, error) { + block, err := b.storage.BlockByHeight(context.Background(), height) + if err != nil { + if errors.Is(err, ErrNotFound) { + return nil, &BlockNotFoundByHeightError{Height: height} + } + return nil, err + } + + return block, nil +} + +func (b *Blockchain) GetCollectionByID(colID flowgo.Identifier) (*flowgo.LightCollection, error) { + b.mu.RLock() + defer b.mu.RUnlock() + return b.getCollectionByID(colID) +} + +func (b *Blockchain) getCollectionByID(colID flowgo.Identifier) (*flowgo.LightCollection, error) { + col, err := b.storage.CollectionByID(context.Background(), colID) + if err != nil { + if errors.Is(err, ErrNotFound) { + return nil, &CollectionNotFoundError{ID: colID} + } + return nil, err + } + + return &col, nil +} + +func (b *Blockchain) GetFullCollectionByID(colID flowgo.Identifier) (*flowgo.Collection, error) { + b.mu.RLock() + defer b.mu.RUnlock() + return b.getFullCollectionByID(colID) +} + +func (b *Blockchain) getFullCollectionByID(colID flowgo.Identifier) (*flowgo.Collection, error) { + col, err := b.storage.FullCollectionByID(context.Background(), colID) + if err != nil { + if errors.Is(err, ErrNotFound) { + return nil, &CollectionNotFoundError{ID: colID} + } + return nil, err + } + + return &col, nil +} + +// GetTransaction gets an existing transaction by ID. +// +// The function first looks in the pending block, then the current emulator state. +func (b *Blockchain) GetTransaction(txID flowgo.Identifier) (*flowgo.TransactionBody, error) { + b.mu.RLock() + defer b.mu.RUnlock() + return b.getTransaction(txID) +} + +func (b *Blockchain) getTransaction(txID flowgo.Identifier) (*flowgo.TransactionBody, error) { + pendingTx := b.pendingBlock.GetTransaction(txID) + if pendingTx != nil { + return pendingTx, nil + } + + tx, err := b.storage.TransactionByID(context.Background(), txID) + if err != nil { + if errors.Is(err, ErrNotFound) { + return nil, &TransactionNotFoundError{ID: txID} + } + return nil, err + } + + return &tx, nil +} + +func (b *Blockchain) GetTransactionResult(txID flowgo.Identifier) (*accessmodel.TransactionResult, error) { + b.mu.RLock() + defer b.mu.RUnlock() + + return b.getTransactionResult(txID) +} + +func (b *Blockchain) getTransactionResult(txID flowgo.Identifier) (*accessmodel.TransactionResult, error) { + if b.pendingBlock.ContainsTransaction(txID) { + return &accessmodel.TransactionResult{ + Status: flowgo.TransactionStatusPending, + }, nil + } + + storedResult, err := b.storage.TransactionResultByID(context.Background(), txID) + if err != nil { + if errors.Is(err, ErrNotFound) { + return &accessmodel.TransactionResult{ + Status: flowgo.TransactionStatusUnknown, + }, nil + } + return nil, err + } + + statusCode := 0 + if storedResult.ErrorCode > 0 { + statusCode = 1 + } + result := accessmodel.TransactionResult{ + Status: flowgo.TransactionStatusSealed, + StatusCode: uint(statusCode), + ErrorMessage: storedResult.ErrorMessage, + Events: storedResult.Events, + TransactionID: txID, + BlockHeight: storedResult.BlockHeight, + BlockID: storedResult.BlockID, + } + + return &result, nil +} + +// GetAccountByIndex returns the account for the given address. +func (b *Blockchain) GetAccountByIndex(index uint) (*flowgo.Account, error) { + + address, err := b.vmCtx.Chain.ChainID().Chain().AddressAtIndex(uint64(index)) + if err != nil { + return nil, err + } + + latestBlock, err := b.getLatestBlock() + if err != nil { + return nil, err + } + return b.getAccountAtBlock(address, latestBlock.Height) + +} + +// GetAccount returns the account for the given address. +func (b *Blockchain) GetAccount(address flowgo.Address) (*flowgo.Account, error) { + b.mu.RLock() + defer b.mu.RUnlock() + return b.getAccount(address) +} + +// getAccount returns the account for the given address. +func (b *Blockchain) getAccount(address flowgo.Address) (*flowgo.Account, error) { + latestBlock, err := b.getLatestBlock() + if err != nil { + return nil, err + } + return b.getAccountAtBlock(address, latestBlock.Height) +} + +// GetAccountAtBlockHeight returns the account for the given address at specified block height. +func (b *Blockchain) GetAccountAtBlockHeight(address flowgo.Address, blockHeight uint64) (*flowgo.Account, error) { + b.mu.RLock() + defer b.mu.RUnlock() + + account, err := b.getAccountAtBlock(address, blockHeight) + if err != nil { + return nil, err + } + + return account, nil +} + +// GetAccountAtBlock returns the account for the given address at specified block height. +func (b *Blockchain) getAccountAtBlock(address flowgo.Address, blockHeight uint64) (*flowgo.Account, error) { + ledger, err := b.storage.LedgerByHeight(context.Background(), blockHeight) + if err != nil { + return nil, err + } + + account, err := fvm.GetAccount(b.vmCtx, address, ledger) + if fvmerrors.IsAccountNotFoundError(err) { + return nil, &AccountNotFoundError{Address: address} + } + + return account, nil +} + +func (b *Blockchain) GetEventsForBlockIDs(eventType string, blockIDs []flowgo.Identifier) (result []flowgo.BlockEvents, err error) { + b.mu.RLock() + defer b.mu.RUnlock() + + for _, blockID := range blockIDs { + block, err := b.storage.BlockByID(context.Background(), blockID) + if err != nil { + break + } + events, err := b.storage.EventsByHeight(context.Background(), block.Height, eventType) + if err != nil { + break + } + result = append(result, flowgo.BlockEvents{ + BlockID: block.ID(), + BlockHeight: block.Height, + BlockTimestamp: time.UnixMilli(int64(block.Timestamp)).UTC(), + Events: events, + }) + } + + return result, err +} + +func (b *Blockchain) GetEventsForHeightRange(eventType string, startHeight, endHeight uint64) (result []flowgo.BlockEvents, err error) { + b.mu.RLock() + defer b.mu.RUnlock() + + for blockHeight := startHeight; blockHeight <= endHeight; blockHeight++ { + block, err := b.storage.BlockByHeight(context.Background(), blockHeight) + if err != nil { + break + } + + events, err := b.storage.EventsByHeight(context.Background(), blockHeight, eventType) + if err != nil { + break + } + + result = append(result, flowgo.BlockEvents{ + BlockID: block.ID(), + BlockHeight: block.Height, + BlockTimestamp: time.UnixMilli(int64(block.Timestamp)).UTC(), + Events: events, + }) + } + + return result, err +} + +// GetEventsByHeight returns the events in the block at the given height, optionally filtered by type. +func (b *Blockchain) GetEventsByHeight(blockHeight uint64, eventType string) ([]flowgo.Event, error) { + b.mu.RLock() + defer b.mu.RUnlock() + + return b.storage.EventsByHeight(context.Background(), blockHeight, eventType) +} + +// SendTransaction submits a transaction to the network. +func (b *Blockchain) SendTransaction(flowTx *flowgo.TransactionBody) error { + b.mu.Lock() + defer b.mu.Unlock() + + err := b.addTransaction(*flowTx) + if err != nil { + return err + } + + if b.conf.AutoMine { + _, _, err := b.executeAndCommitBlock() + if err != nil { + return err + } + } + + return nil +} + +// AddTransaction validates a transaction and adds it to the current pending block. +func (b *Blockchain) AddTransaction(tx flowgo.TransactionBody) error { + b.mu.Lock() + defer b.mu.Unlock() + + return b.addTransaction(tx) +} + +func (b *Blockchain) addTransaction(tx flowgo.TransactionBody) error { + + // If index > 0, pending block has begun execution (cannot add more transactions) + if b.pendingBlock.ExecutionStarted() { + return &PendingBlockMidExecutionError{BlockID: b.pendingBlock.Block().ID()} + } + + if b.pendingBlock.ContainsTransaction(tx.ID()) { + return &DuplicateTransactionError{TxID: tx.ID()} + } + + _, err := b.storage.TransactionByID(context.Background(), tx.ID()) + if err == nil { + // Found the transaction, this is a duplicate + return &DuplicateTransactionError{TxID: tx.ID()} + } else if !errors.Is(err, ErrNotFound) { + // Error in the storage provider + return fmt.Errorf("failed to check storage for transaction %w", err) + } + + err = b.transactionValidator.Validate(context.Background(), &tx) + if err != nil { + return ConvertAccessError(err) + } + + // add transaction to pending block + b.pendingBlock.AddTransaction(tx) + + return nil +} + +// ExecuteBlock executes the remaining transactions in pending block. +func (b *Blockchain) ExecuteBlock() ([]*TransactionResult, error) { + b.mu.Lock() + defer b.mu.Unlock() + + return b.executeBlock() +} + +func (b *Blockchain) executeBlock() ([]*TransactionResult, error) { + results := make([]*TransactionResult, 0) + + // empty blocks do not require execution, treat as a no-op + if b.pendingBlock.Empty() { + return results, nil + } + + header := b.pendingBlock.Block().ToHeader() + blockContext := b.setFVMContextFromHeader(header) + + // cannot execute a block that has already executed + if b.pendingBlock.ExecutionComplete() { + return results, &PendingBlockTransactionsExhaustedError{ + BlockID: b.pendingBlock.Block().ID(), + } + } + + // continue executing transactions until execution is complete + for !b.pendingBlock.ExecutionComplete() { + result, err := b.executeNextTransaction(blockContext) + if err != nil { + return results, err + } + + results = append(results, result) + } + + return results, nil +} + +// ExecuteNextTransaction executes the next indexed transaction in pending block. +func (b *Blockchain) ExecuteNextTransaction() (*TransactionResult, error) { + b.mu.Lock() + defer b.mu.Unlock() + + header := b.pendingBlock.Block().ToHeader() + blockContext := b.setFVMContextFromHeader(header) + return b.executeNextTransaction(blockContext) +} + +// executeNextTransaction is a helper function for ExecuteBlock and ExecuteNextTransaction that +// executes the next transaction in the pending block. +func (b *Blockchain) executeNextTransaction(ctx fvm.Context) (*TransactionResult, error) { + // check if there are remaining txs to be executed + if b.pendingBlock.ExecutionComplete() { + return nil, &PendingBlockTransactionsExhaustedError{ + BlockID: b.pendingBlock.Block().ID(), + } + } + + txnBody := b.pendingBlock.NextTransaction() + txnId := txnBody.ID() + + // use the computer to execute the next transaction + output, err := b.pendingBlock.ExecuteNextTransaction(b.vm, ctx) + if err != nil { + // fail fast if fatal error occurs + return nil, err + } + + tr, err := VMTransactionResultToEmulator(txnId, output) + if err != nil { + // fail fast if fatal error occurs + return nil, err + } + + return tr, nil +} + +// CommitBlock seals the current pending block and saves it to storage. +// +// This function clears the pending transaction pool and resets the pending block. +func (b *Blockchain) CommitBlock() (*flowgo.Block, error) { + b.mu.Lock() + defer b.mu.Unlock() + + block, err := b.commitBlock() + if err != nil { + return nil, err + } + + return block, nil +} + +func (b *Blockchain) commitBlock() (*flowgo.Block, error) { + // pending block cannot be committed before execution starts (unless empty) + if !b.pendingBlock.ExecutionStarted() && !b.pendingBlock.Empty() { + return nil, &PendingBlockCommitBeforeExecutionError{BlockID: b.pendingBlock.Block().ID()} + } + + // pending block cannot be committed before execution completes + if b.pendingBlock.ExecutionStarted() && !b.pendingBlock.ExecutionComplete() { + return nil, &PendingBlockMidExecutionError{BlockID: b.pendingBlock.Block().ID()} + } + + block := b.pendingBlock.Block() + collections := b.pendingBlock.Collections() + transactions := b.pendingBlock.Transactions() + transactionResults, err := convertToSealedResults(b.pendingBlock.TransactionResults(), b.pendingBlock.Block().ID(), b.pendingBlock.height) + if err != nil { + return nil, err + } + + // lastly we execute the system chunk transaction + err = b.executeSystemChunkTransaction() + if err != nil { + return nil, err + } + + executionSnapshot := b.pendingBlock.Finalize() + events := b.pendingBlock.Events() + + // commit the pending block to storage + err = b.storage.CommitBlock( + context.Background(), + block, + collections, + transactions, + transactionResults, + executionSnapshot, + events) + if err != nil { + return nil, err + } + + ledger, err := b.storage.LedgerByHeight( + context.Background(), + block.Height, + ) + if err != nil { + return nil, err + } + + // notify listeners on new block + b.broadcaster.Publish() + + // reset pending block using current block and ledger state + b.pendingBlock = newPendingBlock(block, ledger, b.conf.GetChainID(), b.Now()) + b.entropyProvider.LatestBlock = block.ID() + + return block, nil +} + +// ExecuteAndCommitBlock is a utility that combines ExecuteBlock with CommitBlock. +func (b *Blockchain) ExecuteAndCommitBlock() (*flowgo.Block, []*TransactionResult, error) { + b.mu.Lock() + defer b.mu.Unlock() + + return b.executeAndCommitBlock() +} + +// ExecuteAndCommitBlock is a utility that combines ExecuteBlock with CommitBlock. +func (b *Blockchain) executeAndCommitBlock() (*flowgo.Block, []*TransactionResult, error) { + + results, err := b.executeBlock() + if err != nil { + return nil, nil, err + } + + block, err := b.commitBlock() + if err != nil { + return nil, results, err + } + + blockID := block.ID() + b.conf.ServerLogger.Debug().Fields(map[string]any{ + "blockHeight": block.Height, + "blockID": hex.EncodeToString(blockID[:]), + }).Msgf("📦 Block #%d committed", block.Height) + + return block, results, nil +} + +// ResetPendingBlock clears the transactions in pending block. +func (b *Blockchain) ResetPendingBlock() error { + b.mu.Lock() + defer b.mu.Unlock() + + latestBlock, err := b.storage.LatestBlock(context.Background()) + if err != nil { + return err + } + + latestLedger, err := b.storage.LedgerByHeight( + context.Background(), + latestBlock.Height, + ) + if err != nil { + return err + } + + // reset pending block using latest committed block and ledger state + b.pendingBlock = newPendingBlock(&latestBlock, latestLedger, b.conf.GetChainID(), b.Now()) + + return nil +} + +// ExecuteScript executes a read-only script against the world state and returns the result. +func (b *Blockchain) ExecuteScript( + script []byte, + arguments [][]byte, +) (*ScriptResult, error) { + b.mu.RLock() + defer b.mu.RUnlock() + + latestBlock, err := b.getLatestBlock() + if err != nil { + return nil, err + } + + return b.executeScriptAtBlockID(script, arguments, latestBlock.ID()) +} + +func (b *Blockchain) ExecuteScriptAtBlockID(script []byte, arguments [][]byte, id flowgo.Identifier) (*ScriptResult, error) { + b.mu.RLock() + defer b.mu.RUnlock() + + return b.executeScriptAtBlockID(script, arguments, id) +} + +func (b *Blockchain) executeScriptAtBlockID(script []byte, arguments [][]byte, id flowgo.Identifier) (*ScriptResult, error) { + requestedBlock, err := b.storage.BlockByID(context.Background(), id) + if err != nil { + return nil, err + } + + requestedLedgerSnapshot, err := b.storage.LedgerByHeight( + context.Background(), + requestedBlock.Height, + ) + if err != nil { + return nil, err + } + + blockContext := fvm.NewContextFromParent( + b.vmCtx, + fvm.WithBlockHeader(requestedBlock.ToHeader()), + ) + + scriptProc := fvm.Script(script).WithArguments(arguments...) + + _, output, err := b.vm.Run( + blockContext, + scriptProc, + requestedLedgerSnapshot) + if err != nil { + return nil, err + } + + scriptID := flowgo.MakeIDFromFingerPrint(script) + + var scriptError error = nil + var convertedValue cadence.Value = nil + + if output.Err == nil { + convertedValue = output.Value + } else { + scriptError = VMErrorToEmulator(output.Err) + } + + scriptResult := &ScriptResult{ + ScriptID: scriptID, + Value: convertedValue, + Error: scriptError, + Logs: output.Logs, + Events: output.Events, + ComputationUsed: output.ComputationUsed, + MemoryEstimate: output.MemoryEstimate, + } + + return scriptResult, nil +} + +func (b *Blockchain) ExecuteScriptAtBlockHeight( + script []byte, + arguments [][]byte, + blockHeight uint64, +) (*ScriptResult, error) { + b.mu.RLock() + defer b.mu.RUnlock() + + requestedBlock, err := b.getBlockByHeight(blockHeight) + if err != nil { + return nil, err + } + + return b.executeScriptAtBlockID(script, arguments, requestedBlock.ID()) +} + +func convertToSealedResults( + results map[flowgo.Identifier]IndexedTransactionResult, + blockID flowgo.Identifier, + blockHeight uint64, +) (map[flowgo.Identifier]*StorableTransactionResult, error) { + + output := make(map[flowgo.Identifier]*StorableTransactionResult) + + for id, result := range results { + temp, err := ToStorableResult(result.ProcedureOutput, blockID, blockHeight) + if err != nil { + return nil, err + } + output[id] = &temp + } + + return output, nil +} + +func (b *Blockchain) GetTransactionsByBlockID(blockID flowgo.Identifier) ([]*flowgo.TransactionBody, error) { + b.mu.RLock() + defer b.mu.RUnlock() + + block, err := b.getBlockByID(blockID) + if err != nil { + return nil, fmt.Errorf("failed to get block %s: %w", blockID, err) + } + + var transactions []*flowgo.TransactionBody + for i, guarantee := range block.Payload.Guarantees { + c, err := b.getCollectionByID(guarantee.CollectionID) + if err != nil { + return nil, fmt.Errorf("failed to get collection [%d] %s: %w", i, guarantee.CollectionID, err) + } + + for j, txID := range c.Transactions { + tx, err := b.getTransaction(txID) + if err != nil { + return nil, fmt.Errorf("failed to get transaction [%d] %s: %w", j, txID, err) + } + transactions = append(transactions, tx) + } + } + return transactions, nil +} + +func (b *Blockchain) GetTransactionResultsByBlockID(blockID flowgo.Identifier) ([]*accessmodel.TransactionResult, error) { + b.mu.RLock() + defer b.mu.RUnlock() + + block, err := b.getBlockByID(blockID) + if err != nil { + return nil, fmt.Errorf("failed to get block %s: %w", blockID, err) + } + + var results []*accessmodel.TransactionResult + for i, guarantee := range block.Payload.Guarantees { + c, err := b.getCollectionByID(guarantee.CollectionID) + if err != nil { + return nil, fmt.Errorf("failed to get collection [%d] %s: %w", i, guarantee.CollectionID, err) + } + + for j, txID := range c.Transactions { + result, err := b.getTransactionResult(txID) + if err != nil { + return nil, fmt.Errorf("failed to get transaction result [%d] %s: %w", j, txID, err) + } + results = append(results, result) + } + } + return results, nil +} + +func (b *Blockchain) GetLogs(identifier flowgo.Identifier) ([]string, error) { + txResult, err := b.storage.TransactionResultByID(context.Background(), identifier) + if err != nil { + return nil, err + + } + return txResult.Logs, nil +} + +// SetClock sets the given clock on blockchain's pending block. +func (b *Blockchain) SetClock(clock func() time.Time) { + b.clockOverride = clock + b.pendingBlock.SetTimestamp(clock()) +} + +// NewScriptEnvironment returns an environment.Environment by +// using as a storage snapshot the blockchain's ledger state. +// Useful for tools that use the emulator's blockchain as a library. +func (b *Blockchain) NewScriptEnvironment() environment.Environment { + return environment.NewScriptEnvironmentFromStorageSnapshot( + b.vmCtx.EnvironmentParams, + b.pendingBlock.ledgerState.NewChild(), + ) +} + +func (b *Blockchain) systemChunkTransaction() (*flowgo.TransactionBody, error) { + serviceAddress := b.GetChain().ServiceAddress() + + script := templates.ReplaceAddresses( + systemChunkTransactionTemplate, + templates.Environment{ + RandomBeaconHistoryAddress: serviceAddress.Hex(), + EVMAddress: serviceAddress.Hex(), + }, + ) + + script = strings.ReplaceAll( + script, + `import Migration from "Migration"`, + fmt.Sprintf( + `import Migration from %s`, + serviceAddress.HexWithPrefix(), + ), + ) + + tx, err := flowgo.NewTransactionBodyBuilder(). + SetScript([]byte(script)). + SetComputeLimit(flowgo.DefaultMaxTransactionGasLimit). + AddAuthorizer(serviceAddress). + SetPayer(serviceAddress). + SetReferenceBlockID(b.pendingBlock.parentID). + Build() + if err != nil { + return nil, fmt.Errorf("failed to build transaction body: %w", err) + } + + return tx, nil +} + +func (b *Blockchain) executeSystemChunkTransaction() error { + txn, err := b.systemChunkTransaction() + if err != nil { + return err + } + ctx := fvm.NewContextFromParent( + b.vmCtx, + fvm.WithLogger(zerolog.Nop()), + fvm.WithAuthorizationChecksEnabled(false), + fvm.WithSequenceNumberCheckAndIncrementEnabled(false), + fvm.WithRandomSourceHistoryCallAllowed(true), + fvm.WithBlockHeader(b.pendingBlock.Block().ToHeader()), + fvm.WithAccountStorageLimit(false), + ) + + executionSnapshot, output, err := b.vm.Run( + ctx, + fvm.Transaction(txn, uint32(len(b.pendingBlock.Transactions()))), + b.pendingBlock.ledgerState, + ) + if err != nil { + return err + } + + if output.Err != nil { + return output.Err + } + + b.pendingBlock.events = append(b.pendingBlock.events, output.Events...) + + err = b.pendingBlock.ledgerState.Merge(executionSnapshot) + if err != nil { + return err + } + + return nil +} + +func (b *Blockchain) GetRegisterValues(registerIDs flowgo.RegisterIDs, height uint64) (values []flowgo.RegisterValue, err error) { + ledger, err := b.storage.LedgerByHeight(context.Background(), height) + if err != nil { + return nil, err + } + for _, registerID := range registerIDs { + value, err := ledger.Get(registerID) + if err != nil { + return nil, err + } + values = append(values, value) + } + return values, nil +} diff --git a/integration/internal/emulator/config.go b/integration/internal/emulator/config.go new file mode 100644 index 00000000000..d87be0a162c --- /dev/null +++ b/integration/internal/emulator/config.go @@ -0,0 +1,274 @@ +package emulator + +import ( + "fmt" + + "github.com/rs/zerolog" + + "github.com/onflow/cadence" + "github.com/onflow/crypto" + "github.com/onflow/crypto/hash" + + "github.com/onflow/flow-go/fvm" + "github.com/onflow/flow-go/fvm/meter" + flowgo "github.com/onflow/flow-go/model/flow" +) + +// config is a set of configuration options for an emulated emulator. +type config struct { + ServiceKey *ServiceKey + Store EmulatorStorage + SimpleAddresses bool + GenesisTokenSupply cadence.UFix64 + TransactionMaxGasLimit uint64 + ScriptGasLimit uint64 + TransactionExpiry uint + StorageLimitEnabled bool + TransactionFeesEnabled bool + ExecutionEffortWeights meter.ExecutionEffortWeights + ContractRemovalEnabled bool + MinimumStorageReservation cadence.UFix64 + StorageMBPerFLOW cadence.UFix64 + Logger zerolog.Logger + ServerLogger zerolog.Logger + TransactionValidationEnabled bool + ChainID flowgo.ChainID + AutoMine bool +} + +const defaultGenesisTokenSupply = "1000000000.0" +const defaultScriptGasLimit = 100000 +const defaultTransactionMaxGasLimit = flowgo.DefaultMaxTransactionGasLimit + +// defaultConfig is the default configuration for an emulated emulator. +var defaultConfig = func() config { + genesisTokenSupply, err := cadence.NewUFix64(defaultGenesisTokenSupply) + if err != nil { + panic(fmt.Sprintf("Failed to parse default genesis token supply: %s", err.Error())) + } + + return config{ + ServiceKey: DefaultServiceKey(), + Store: nil, + SimpleAddresses: false, + GenesisTokenSupply: genesisTokenSupply, + ScriptGasLimit: defaultScriptGasLimit, + TransactionMaxGasLimit: defaultTransactionMaxGasLimit, + MinimumStorageReservation: fvm.DefaultMinimumStorageReservation, + StorageMBPerFLOW: fvm.DefaultStorageMBPerFLOW, + TransactionExpiry: 0, // TODO: replace with sensible default + StorageLimitEnabled: true, + Logger: zerolog.Nop(), + ServerLogger: zerolog.Nop(), + TransactionValidationEnabled: true, + ChainID: flowgo.Emulator, + AutoMine: false, + } +}() + +func (conf config) GetStore() EmulatorStorage { + if conf.Store == nil { + conf.Store = NewMemoryStore() + } + return conf.Store +} + +func (conf config) GetChainID() flowgo.ChainID { + if conf.SimpleAddresses { + return flowgo.MonotonicEmulator + } + return conf.ChainID +} + +func (conf config) GetServiceKey() ServiceKey { + // set up service key + serviceKey := conf.ServiceKey + if serviceKey == nil { + serviceKey = DefaultServiceKey() + } + serviceKey.Address = conf.GetChainID().Chain().ServiceAddress() + serviceKey.Weight = fvm.AccountKeyWeightThreshold + return *serviceKey +} + +// Option is a function applying a change to the emulator config. +type Option func(*config) + +// WithLogger sets the fvm logger +func WithLogger( + logger zerolog.Logger, +) Option { + return func(c *config) { + c.Logger = logger + } +} + +// WithServerLogger sets the logger +func WithServerLogger( + logger zerolog.Logger, +) Option { + return func(c *config) { + c.ServerLogger = logger + } +} + +// WithServicePublicKey sets the service key from a public key. +func WithServicePublicKey( + servicePublicKey crypto.PublicKey, + sigAlgo crypto.SigningAlgorithm, + hashAlgo hash.HashingAlgorithm, +) Option { + return func(c *config) { + c.ServiceKey = &ServiceKey{ + PublicKey: servicePublicKey, + SigAlgo: sigAlgo, + HashAlgo: hashAlgo, + } + } +} + +// WithServicePrivateKey sets the service key from private key. +func WithServicePrivateKey( + privateKey crypto.PrivateKey, + sigAlgo crypto.SigningAlgorithm, + hashAlgo hash.HashingAlgorithm, +) Option { + return func(c *config) { + c.ServiceKey = &ServiceKey{ + PrivateKey: privateKey, + PublicKey: privateKey.PublicKey(), + HashAlgo: hashAlgo, + SigAlgo: sigAlgo, + } + } +} + +// WithStore sets the persistent storage provider. +func WithStore(store EmulatorStorage) Option { + return func(c *config) { + c.Store = store + } +} + +// WithSimpleAddresses enables simple addresses, which are sequential starting with 0x01. +func WithSimpleAddresses() Option { + return func(c *config) { + c.SimpleAddresses = true + } +} + +// WithGenesisTokenSupply sets the genesis token supply. +func WithGenesisTokenSupply(supply cadence.UFix64) Option { + return func(c *config) { + c.GenesisTokenSupply = supply + } +} + +// WithTransactionMaxGasLimit sets the maximum gas limit for transactions. +// +// Individual transactions will still be bounded by the limit they declare. +// This function sets the maximum limit that any transaction can declare. +// +// This limit does not affect script executions. Use WithScriptGasLimit +// to set the gas limit for script executions. +func WithTransactionMaxGasLimit(maxLimit uint64) Option { + return func(c *config) { + c.TransactionMaxGasLimit = maxLimit + } +} + +// WithScriptGasLimit sets the gas limit for scripts. +// +// This limit does not affect transactions, which declare their own limit. +// Use WithTransactionMaxGasLimit to set the maximum gas limit for transactions. +func WithScriptGasLimit(limit uint64) Option { + return func(c *config) { + c.ScriptGasLimit = limit + } +} + +// WithTransactionExpiry sets the transaction expiry measured in blocks. +// +// If set to zero, transaction expiry is disabled and the reference block ID field +// is not required. +func WithTransactionExpiry(expiry uint) Option { + return func(c *config) { + c.TransactionExpiry = expiry + } +} + +// WithStorageLimitEnabled enables/disables limiting account storage used to their storage capacity. +// +// If set to false, accounts can store any amount of data, +// otherwise they can only store as much as their storage capacity. +// The default is true. +func WithStorageLimitEnabled(enabled bool) Option { + return func(c *config) { + c.StorageLimitEnabled = enabled + } +} + +// WithMinimumStorageReservation sets the minimum account balance. +// +// The cost of creating new accounts is also set to this value. +// The default is taken from fvm.DefaultMinimumStorageReservation +func WithMinimumStorageReservation(minimumStorageReservation cadence.UFix64) Option { + return func(c *config) { + c.MinimumStorageReservation = minimumStorageReservation + } +} + +// WithStorageMBPerFLOW sets the cost of a megabyte of storage in FLOW +// +// the default is taken from fvm.DefaultStorageMBPerFLOW +func WithStorageMBPerFLOW(storageMBPerFLOW cadence.UFix64) Option { + return func(c *config) { + c.StorageMBPerFLOW = storageMBPerFLOW + } +} + +// WithTransactionFeesEnabled enables/disables transaction fees. +// +// If set to false transactions don't cost any flow. +// The default is false. +func WithTransactionFeesEnabled(enabled bool) Option { + return func(c *config) { + c.TransactionFeesEnabled = enabled + } +} + +// WithExecutionEffortWeights sets the execution effort weights. +// default is the Mainnet values. +func WithExecutionEffortWeights(weights meter.ExecutionEffortWeights) Option { + return func(c *config) { + c.ExecutionEffortWeights = weights + } +} + +// WithContractRemovalEnabled restricts/allows removal of already deployed contracts. +// +// The default is provided by on-chain value. +func WithContractRemovalEnabled(enabled bool) Option { + return func(c *config) { + c.ContractRemovalEnabled = enabled + } +} + +// WithTransactionValidationEnabled enables/disables transaction validation. +// +// If set to false, the emulator will not verify transaction signatures or validate sequence numbers. +// +// The default is true. +func WithTransactionValidationEnabled(enabled bool) Option { + return func(c *config) { + c.TransactionValidationEnabled = enabled + } +} + +// WithChainID sets chain type for address generation +// The default is emulator. +func WithChainID(chainID flowgo.ChainID) Option { + return func(c *config) { + c.ChainID = chainID + } +} diff --git a/integration/internal/emulator/convert.go b/integration/internal/emulator/convert.go new file mode 100644 index 00000000000..2eb946cae43 --- /dev/null +++ b/integration/internal/emulator/convert.go @@ -0,0 +1,386 @@ +/* + * Flow Emulator + * + * Copyright Flow Foundation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package emulator + +import ( + "fmt" + + "github.com/onflow/cadence" + "github.com/onflow/cadence/encoding/ccf" + + sdk "github.com/onflow/flow-go-sdk" + + "github.com/onflow/flow-go/fvm" + fvmerrors "github.com/onflow/flow-go/fvm/errors" + accessmodel "github.com/onflow/flow-go/model/access" + flowgo "github.com/onflow/flow-go/model/flow" +) + +func SDKIdentifierToFlow(sdkIdentifier sdk.Identifier) flowgo.Identifier { + return flowgo.Identifier(sdkIdentifier) +} + +func SDKIdentifiersToFlow(sdkIdentifiers []sdk.Identifier) []flowgo.Identifier { + ret := make([]flowgo.Identifier, len(sdkIdentifiers)) + for i, sdkIdentifier := range sdkIdentifiers { + ret[i] = SDKIdentifierToFlow(sdkIdentifier) + } + return ret +} + +func FlowIdentifierToSDK(flowIdentifier flowgo.Identifier) sdk.Identifier { + return sdk.Identifier(flowIdentifier) +} + +func FlowIdentifiersToSDK(flowIdentifiers []flowgo.Identifier) []sdk.Identifier { + ret := make([]sdk.Identifier, len(flowIdentifiers)) + for i, flowIdentifier := range flowIdentifiers { + ret[i] = FlowIdentifierToSDK(flowIdentifier) + } + return ret +} + +func SDKProposalKeyToFlow(sdkProposalKey sdk.ProposalKey) flowgo.ProposalKey { + return flowgo.ProposalKey{ + Address: SDKAddressToFlow(sdkProposalKey.Address), + KeyIndex: sdkProposalKey.KeyIndex, + SequenceNumber: sdkProposalKey.SequenceNumber, + } +} + +func FlowProposalKeyToSDK(flowProposalKey flowgo.ProposalKey) sdk.ProposalKey { + return sdk.ProposalKey{ + Address: FlowAddressToSDK(flowProposalKey.Address), + KeyIndex: flowProposalKey.KeyIndex, + SequenceNumber: flowProposalKey.SequenceNumber, + } +} + +func SDKAddressToFlow(sdkAddress sdk.Address) flowgo.Address { + return flowgo.Address(sdkAddress) +} + +func FlowAddressToSDK(flowAddress flowgo.Address) sdk.Address { + return sdk.Address(flowAddress) +} + +func SDKAddressesToFlow(sdkAddresses []sdk.Address) []flowgo.Address { + ret := make([]flowgo.Address, len(sdkAddresses)) + for i, sdkAddress := range sdkAddresses { + ret[i] = SDKAddressToFlow(sdkAddress) + } + return ret +} + +func FlowAddressesToSDK(flowAddresses []flowgo.Address) []sdk.Address { + ret := make([]sdk.Address, len(flowAddresses)) + for i, flowAddress := range flowAddresses { + ret[i] = FlowAddressToSDK(flowAddress) + } + return ret +} + +func SDKTransactionSignatureToFlow(sdkTransactionSignature sdk.TransactionSignature) flowgo.TransactionSignature { + return flowgo.TransactionSignature{ + Address: SDKAddressToFlow(sdkTransactionSignature.Address), + SignerIndex: sdkTransactionSignature.SignerIndex, + KeyIndex: sdkTransactionSignature.KeyIndex, + Signature: sdkTransactionSignature.Signature, + } +} + +func FlowTransactionSignatureToSDK(flowTransactionSignature flowgo.TransactionSignature) sdk.TransactionSignature { + return sdk.TransactionSignature{ + Address: FlowAddressToSDK(flowTransactionSignature.Address), + SignerIndex: flowTransactionSignature.SignerIndex, + KeyIndex: flowTransactionSignature.KeyIndex, + Signature: flowTransactionSignature.Signature, + } +} + +func SDKTransactionSignaturesToFlow(sdkTransactionSignatures []sdk.TransactionSignature) []flowgo.TransactionSignature { + ret := make([]flowgo.TransactionSignature, len(sdkTransactionSignatures)) + for i, sdkTransactionSignature := range sdkTransactionSignatures { + ret[i] = SDKTransactionSignatureToFlow(sdkTransactionSignature) + } + return ret +} + +func FlowTransactionSignaturesToSDK(flowTransactionSignatures []flowgo.TransactionSignature) []sdk.TransactionSignature { + ret := make([]sdk.TransactionSignature, len(flowTransactionSignatures)) + for i, flowTransactionSignature := range flowTransactionSignatures { + ret[i] = FlowTransactionSignatureToSDK(flowTransactionSignature) + } + return ret +} + +func SDKTransactionToFlow(sdkTx sdk.Transaction) *flowgo.TransactionBody { + return &flowgo.TransactionBody{ + ReferenceBlockID: SDKIdentifierToFlow(sdkTx.ReferenceBlockID), + Script: sdkTx.Script, + Arguments: sdkTx.Arguments, + GasLimit: sdkTx.GasLimit, + ProposalKey: SDKProposalKeyToFlow(sdkTx.ProposalKey), + Payer: SDKAddressToFlow(sdkTx.Payer), + Authorizers: SDKAddressesToFlow(sdkTx.Authorizers), + PayloadSignatures: SDKTransactionSignaturesToFlow(sdkTx.PayloadSignatures), + EnvelopeSignatures: SDKTransactionSignaturesToFlow(sdkTx.EnvelopeSignatures), + } +} + +func FlowTransactionToSDK(flowTx flowgo.TransactionBody) sdk.Transaction { + transaction := sdk.Transaction{ + ReferenceBlockID: FlowIdentifierToSDK(flowTx.ReferenceBlockID), + Script: flowTx.Script, + Arguments: flowTx.Arguments, + GasLimit: flowTx.GasLimit, + ProposalKey: FlowProposalKeyToSDK(flowTx.ProposalKey), + Payer: FlowAddressToSDK(flowTx.Payer), + Authorizers: FlowAddressesToSDK(flowTx.Authorizers), + PayloadSignatures: FlowTransactionSignaturesToSDK(flowTx.PayloadSignatures), + EnvelopeSignatures: FlowTransactionSignaturesToSDK(flowTx.EnvelopeSignatures), + } + return transaction +} + +func FlowTransactionResultToSDK(result *accessmodel.TransactionResult) (*sdk.TransactionResult, error) { + + events, err := FlowEventsToSDK(result.Events) + if err != nil { + return nil, err + } + + if result.ErrorMessage != "" { + err = &ExecutionError{Code: int(result.StatusCode), Message: result.ErrorMessage} + } + + sdkResult := &sdk.TransactionResult{ + Status: sdk.TransactionStatus(result.Status), + Error: err, + Events: events, + TransactionID: sdk.Identifier(result.TransactionID), + BlockHeight: result.BlockHeight, + BlockID: sdk.Identifier(result.BlockID), + } + + return sdkResult, nil +} + +// SDKEventToFlow converts an SDK event model type to the flow-go internal event model type. +// All errors indicate the input cannot be converted to a valid event. +func SDKEventToFlow(event sdk.Event) (*flowgo.Event, error) { + payload, err := ccf.EventsEncMode.Encode(event.Value) + if err != nil { + return nil, err + } + + e, err := flowgo.NewEvent( + flowgo.UntrustedEvent{ + Type: flowgo.EventType(event.Type), + TransactionID: SDKIdentifierToFlow(event.TransactionID), + TransactionIndex: uint32(event.TransactionIndex), + EventIndex: uint32(event.EventIndex), + Payload: payload, + }, + ) + if err != nil { + return nil, fmt.Errorf("could not construct event: %w", err) + } + + return e, nil +} + +func FlowEventToSDK(flowEvent flowgo.Event) (sdk.Event, error) { + cadenceValue, err := ccf.EventsDecMode.Decode(nil, flowEvent.Payload) + if err != nil { + return sdk.Event{}, err + } + + cadenceEvent, ok := cadenceValue.(cadence.Event) + if !ok { + return sdk.Event{}, fmt.Errorf("cadence value not of type event: %s", cadenceValue) + } + + return sdk.Event{ + Type: string(flowEvent.Type), + TransactionID: FlowIdentifierToSDK(flowEvent.TransactionID), + TransactionIndex: int(flowEvent.TransactionIndex), + EventIndex: int(flowEvent.EventIndex), + Value: cadenceEvent, + }, nil +} + +func FlowEventsToSDK(flowEvents []flowgo.Event) ([]sdk.Event, error) { + ret := make([]sdk.Event, len(flowEvents)) + var err error + for i, flowEvent := range flowEvents { + ret[i], err = FlowEventToSDK(flowEvent) + if err != nil { + return nil, err + } + } + return ret, nil +} + +func FlowAccountPublicKeyToSDK(flowPublicKey flowgo.AccountPublicKey, index uint32) (sdk.AccountKey, error) { + + return sdk.AccountKey{ + Index: index, + PublicKey: flowPublicKey.PublicKey, + SigAlgo: flowPublicKey.SignAlgo, + HashAlgo: flowPublicKey.HashAlgo, + Weight: flowPublicKey.Weight, + SequenceNumber: flowPublicKey.SeqNumber, + Revoked: flowPublicKey.Revoked, + }, nil +} + +func SDKAccountKeyToFlow(key *sdk.AccountKey) (flowgo.AccountPublicKey, error) { + + return flowgo.AccountPublicKey{ + Index: key.Index, + PublicKey: key.PublicKey, + SignAlgo: key.SigAlgo, + HashAlgo: key.HashAlgo, + Weight: key.Weight, + SeqNumber: key.SequenceNumber, + Revoked: key.Revoked, + }, nil +} + +func SDKAccountKeysToFlow(keys []*sdk.AccountKey) ([]flowgo.AccountPublicKey, error) { + accountKeys := make([]flowgo.AccountPublicKey, len(keys)) + + for i, key := range keys { + accountKey, err := SDKAccountKeyToFlow(key) + if err != nil { + return nil, err + } + + accountKeys[i] = accountKey + } + + return accountKeys, nil +} + +func FlowAccountPublicKeysToSDK(flowPublicKeys []flowgo.AccountPublicKey) ([]*sdk.AccountKey, error) { + ret := make([]*sdk.AccountKey, len(flowPublicKeys)) + for i, flowPublicKey := range flowPublicKeys { + v, err := FlowAccountPublicKeyToSDK(flowPublicKey, uint32(i)) + if err != nil { + return nil, err + } + + ret[i] = &v + } + return ret, nil +} + +func FlowAccountToSDK(flowAccount flowgo.Account) (*sdk.Account, error) { + sdkPublicKeys, err := FlowAccountPublicKeysToSDK(flowAccount.Keys) + if err != nil { + return &sdk.Account{}, err + } + + return &sdk.Account{ + Address: FlowAddressToSDK(flowAccount.Address), + Balance: flowAccount.Balance, + Code: nil, + Keys: sdkPublicKeys, + Contracts: flowAccount.Contracts, + }, nil +} + +func SDKAccountToFlow(account *sdk.Account) (*flowgo.Account, error) { + keys, err := SDKAccountKeysToFlow(account.Keys) + if err != nil { + return nil, err + } + + return &flowgo.Account{ + Address: SDKAddressToFlow(account.Address), + Balance: account.Balance, + Keys: keys, + Contracts: account.Contracts, + }, nil +} + +func FlowLightCollectionToSDK(flowCollection flowgo.LightCollection) sdk.Collection { + return sdk.Collection{ + TransactionIDs: FlowIdentifiersToSDK(flowCollection.Transactions), + } +} + +func VMTransactionResultToEmulator( + txnId flowgo.Identifier, + output fvm.ProcedureOutput, +) ( + *TransactionResult, + error, +) { + txID := FlowIdentifierToSDK(txnId) + + sdkEvents, err := FlowEventsToSDK(output.Events) + if err != nil { + return nil, err + } + + return &TransactionResult{ + TransactionID: txID, + ComputationUsed: output.ComputationUsed, + MemoryEstimate: output.MemoryEstimate, + Error: VMErrorToEmulator(output.Err), + Logs: output.Logs, + Events: sdkEvents, + }, nil +} + +func VMErrorToEmulator(vmError fvmerrors.CodedError) error { + if vmError == nil { + return nil + } + + return &FVMError{FlowError: vmError} +} + +func ToStorableResult( + output fvm.ProcedureOutput, + blockID flowgo.Identifier, + blockHeight uint64, +) ( + StorableTransactionResult, + error, +) { + var errorCode int + var errorMessage string + + if output.Err != nil { + errorCode = int(output.Err.Code()) + errorMessage = output.Err.Error() + } + + return StorableTransactionResult{ + BlockID: blockID, + BlockHeight: blockHeight, + ErrorCode: errorCode, + ErrorMessage: errorMessage, + Logs: output.Logs, + Events: output.Events, + }, nil +} diff --git a/integration/internal/emulator/doc.go b/integration/internal/emulator/doc.go new file mode 100644 index 00000000000..416e7d84b3c --- /dev/null +++ b/integration/internal/emulator/doc.go @@ -0,0 +1,11 @@ +// Package emulator is a minimal version of the Flow Emulator (https://github.com/onflow/flow-emulator) +// for use within some integration tests for flow-go. +// Using an Emulator is desirable for test cases where: +// - we don't want to, or can't, run the test case against a local Docker network (package integration/testnet) +// - we want the test to include execution of smart contract code in a realistic environment +// +// Before using this package, flow-go's integration tests used the Flow Emulator directly. +// This created a repository-wise circular dependency and complicated version upgrades (see https://github.com/onflow/flow-go/issues/2863). +// The main purpose for this package is to replace that dependency with minimal ongoing +// maintenance overhead. +package emulator diff --git a/integration/internal/emulator/emulator.go b/integration/internal/emulator/emulator.go new file mode 100644 index 00000000000..32d212c7756 --- /dev/null +++ b/integration/internal/emulator/emulator.go @@ -0,0 +1,173 @@ +/* + * Flow Emulator + * + * Copyright Flow Foundation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package emulator + +import ( + "fmt" + + sdkcrypto "github.com/onflow/flow-go-sdk/crypto" + + "github.com/onflow/crypto" + "github.com/onflow/crypto/hash" + + accessmodel "github.com/onflow/flow-go/model/access" + flowgo "github.com/onflow/flow-go/model/flow" +) + +// SignatureAlgorithm is an identifier for a signature algorithm (and parameters if applicable). +type SignatureAlgorithm = crypto.SigningAlgorithm + +const ( + UnknownSignatureAlgorithm SignatureAlgorithm = crypto.UnknownSigningAlgorithm + // ECDSA_P256 is ECDSA on NIST P-256 curve + ECDSA_P256 = crypto.ECDSAP256 + // ECDSA_secp256k1 is ECDSA on secp256k1 curve + ECDSA_secp256k1 = crypto.ECDSASecp256k1 + // BLS_BLS12_381 is BLS on BLS12-381 curve + BLS_BLS12_381 = crypto.BLSBLS12381 +) + +// StringToSignatureAlgorithm converts a string to a SignatureAlgorithm. +func StringToSignatureAlgorithm(s string) SignatureAlgorithm { + switch s { + case ECDSA_P256.String(): + return ECDSA_P256 + case ECDSA_secp256k1.String(): + return ECDSA_secp256k1 + case BLS_BLS12_381.String(): + return BLS_BLS12_381 + default: + return UnknownSignatureAlgorithm + } +} + +type ServiceKey struct { + Index uint32 + Address flowgo.Address + SequenceNumber uint64 + PrivateKey crypto.PrivateKey + PublicKey crypto.PublicKey + HashAlgo hash.HashingAlgorithm + SigAlgo SignatureAlgorithm + Weight int +} + +const defaultServiceKeyPrivateKeySeed = "elephant ears space cowboy octopus rodeo potato cannon pineapple" +const DefaultServiceKeySigAlgo = sdkcrypto.ECDSA_P256 +const DefaultServiceKeyHashAlgo = sdkcrypto.SHA3_256 + +func DefaultServiceKey() *ServiceKey { + return GenerateDefaultServiceKey(DefaultServiceKeySigAlgo, DefaultServiceKeyHashAlgo) +} + +func GenerateDefaultServiceKey( + sigAlgo crypto.SigningAlgorithm, + hashAlgo hash.HashingAlgorithm, +) *ServiceKey { + privateKey, err := crypto.GeneratePrivateKey( + sigAlgo, + []byte(defaultServiceKeyPrivateKeySeed), + ) + if err != nil { + panic(fmt.Sprintf("Failed to generate default service key: %s", err.Error())) + } + + return &ServiceKey{ + PrivateKey: privateKey, + PublicKey: privateKey.PublicKey(), + SigAlgo: sigAlgo, + HashAlgo: hashAlgo, + } +} + +func (s ServiceKey) Signer() (sdkcrypto.Signer, error) { + return sdkcrypto.NewInMemorySigner(s.PrivateKey, s.HashAlgo) +} + +func (s ServiceKey) AccountKey() (crypto.PublicKey, crypto.PrivateKey) { + + var publicKey crypto.PublicKey + if s.PublicKey != nil { + publicKey = s.PublicKey + } + + if s.PrivateKey != nil { + publicKey = s.PrivateKey.PublicKey() + } + + return publicKey, s.PrivateKey + +} + +type AccessProvider interface { + Ping() error + GetNetworkParameters() accessmodel.NetworkParameters + + GetLatestBlock() (*flowgo.Block, error) + GetBlockByID(id flowgo.Identifier) (*flowgo.Block, error) + GetBlockByHeight(height uint64) (*flowgo.Block, error) + + GetCollectionByID(colID flowgo.Identifier) (*flowgo.LightCollection, error) + GetFullCollectionByID(colID flowgo.Identifier) (*flowgo.Collection, error) + + GetTransaction(txID flowgo.Identifier) (*flowgo.TransactionBody, error) + GetTransactionResult(txID flowgo.Identifier) (*accessmodel.TransactionResult, error) + GetTransactionsByBlockID(blockID flowgo.Identifier) ([]*flowgo.TransactionBody, error) + GetTransactionResultsByBlockID(blockID flowgo.Identifier) ([]*accessmodel.TransactionResult, error) + + GetAccount(address flowgo.Address) (*flowgo.Account, error) + GetAccountAtBlockHeight(address flowgo.Address, blockHeight uint64) (*flowgo.Account, error) + GetAccountByIndex(uint) (*flowgo.Account, error) + + GetEventsByHeight(blockHeight uint64, eventType string) ([]flowgo.Event, error) + GetEventsForBlockIDs(eventType string, blockIDs []flowgo.Identifier) ([]flowgo.BlockEvents, error) + GetEventsForHeightRange(eventType string, startHeight, endHeight uint64) ([]flowgo.BlockEvents, error) + + ExecuteScript(script []byte, arguments [][]byte) (*ScriptResult, error) + ExecuteScriptAtBlockHeight(script []byte, arguments [][]byte, blockHeight uint64) (*ScriptResult, error) + ExecuteScriptAtBlockID(script []byte, arguments [][]byte, id flowgo.Identifier) (*ScriptResult, error) + + SendTransaction(tx *flowgo.TransactionBody) error + AddTransaction(tx flowgo.TransactionBody) error +} + +type AutoMineCapable interface { + EnableAutoMine() + DisableAutoMine() +} + +type ExecutionCapable interface { + ExecuteAndCommitBlock() (*flowgo.Block, []*TransactionResult, error) + ExecuteNextTransaction() (*TransactionResult, error) + ExecuteBlock() ([]*TransactionResult, error) + CommitBlock() (*flowgo.Block, error) +} + +type Contract struct { + Name string + Source string +} + +// Emulator defines the method set of an emulated emulator. +type Emulator interface { + ServiceKey() ServiceKey + AccessProvider + AutoMineCapable + ExecutionCapable +} diff --git a/integration/internal/emulator/errors.go b/integration/internal/emulator/errors.go new file mode 100644 index 00000000000..985e794d168 --- /dev/null +++ b/integration/internal/emulator/errors.go @@ -0,0 +1,274 @@ +/* + * Flow Emulator + * + * Copyright Flow Foundation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package emulator + +import ( + "errors" + "fmt" + + "github.com/onflow/flow-go-sdk/crypto" + + "github.com/onflow/flow-go/access/validator" + fvmerrors "github.com/onflow/flow-go/fvm/errors" + flowgo "github.com/onflow/flow-go/model/flow" +) + +var ErrNotFound = errors.New("could not find entity") + +type InvalidArgumentError struct { + msg string +} + +func (e InvalidArgumentError) Error() string { + return fmt.Sprintf("Invalid argument error: %s", e.msg) +} + +func NewInvalidArgumentError(msg string) *InvalidArgumentError { + return &InvalidArgumentError{msg: msg} +} + +type InternalError struct { + msg string +} + +func (e InternalError) Error() string { + return fmt.Sprintf("Internal error: %s", e.msg) +} + +func NewInternalError(msg string) *InternalError { + return &InternalError{msg: msg} +} + +// A NotFoundError indicates that an entity could not be found. +type NotFoundError interface { + isNotFoundError() +} + +// A BlockNotFoundError indicates that a block could not be found. +type BlockNotFoundError interface { + isBlockNotFoundError() +} + +// A BlockNotFoundByHeightError indicates that a block could not be found at the specified height. +type BlockNotFoundByHeightError struct { + Height uint64 +} + +func (e *BlockNotFoundByHeightError) isNotFoundError() {} +func (e *BlockNotFoundByHeightError) isBlockNotFoundError() {} + +func (e *BlockNotFoundByHeightError) Error() string { + return fmt.Sprintf("could not find block at height %d", e.Height) +} + +// A BlockNotFoundByIDError indicates that a block with the specified ID could not be found. +type BlockNotFoundByIDError struct { + ID flowgo.Identifier +} + +func (e *BlockNotFoundByIDError) isNotFoundError() {} +func (e *BlockNotFoundByIDError) isBlockNotFoundError() {} + +func (e *BlockNotFoundByIDError) Error() string { + return fmt.Sprintf("could not find block with ID %s", e.ID) +} + +// A CollectionNotFoundError indicates that a collection could not be found. +type CollectionNotFoundError struct { + ID flowgo.Identifier +} + +func (e *CollectionNotFoundError) isNotFoundError() {} + +func (e *CollectionNotFoundError) Error() string { + return fmt.Sprintf("could not find collection with ID %s", e.ID) +} + +// A TransactionNotFoundError indicates that a transaction could not be found. +type TransactionNotFoundError struct { + ID flowgo.Identifier +} + +func (e *TransactionNotFoundError) isNotFoundError() {} + +func (e *TransactionNotFoundError) Error() string { + return fmt.Sprintf("could not find transaction with ID %s", e.ID) +} + +// An AccountNotFoundError indicates that an account could not be found. +type AccountNotFoundError struct { + Address flowgo.Address +} + +func (e *AccountNotFoundError) isNotFoundError() {} + +func (e *AccountNotFoundError) Error() string { + return fmt.Sprintf("could not find account with address %s", e.Address) +} + +// A TransactionValidationError indicates that a submitted transaction is invalid. +type TransactionValidationError interface { + isTransactionValidationError() +} + +// A DuplicateTransactionError indicates that a transaction has already been submitted. +type DuplicateTransactionError struct { + TxID flowgo.Identifier +} + +func (e *DuplicateTransactionError) isTransactionValidationError() {} + +func (e *DuplicateTransactionError) Error() string { + return fmt.Sprintf("transaction with ID %s has already been submitted", e.TxID) +} + +// IncompleteTransactionError indicates that a transaction is missing one or more required fields. +type IncompleteTransactionError struct { + MissingFields []string +} + +func (e *IncompleteTransactionError) isTransactionValidationError() {} + +func (e *IncompleteTransactionError) Error() string { + return fmt.Sprintf("transaction is missing required fields: %s", e.MissingFields) +} + +// ExpiredTransactionError indicates that a transaction has expired. +type ExpiredTransactionError struct { + RefHeight, FinalHeight uint64 +} + +func (e *ExpiredTransactionError) isTransactionValidationError() {} + +func (e *ExpiredTransactionError) Error() string { + return fmt.Sprintf("transaction is expired: ref_height=%d final_height=%d", e.RefHeight, e.FinalHeight) +} + +// InvalidTransactionScriptError indicates that a transaction contains an invalid Cadence script. +type InvalidTransactionScriptError struct { + ParserErr error +} + +func (e *InvalidTransactionScriptError) isTransactionValidationError() {} + +func (e *InvalidTransactionScriptError) Error() string { + return fmt.Sprintf("failed to parse transaction Cadence script: %s", e.ParserErr) +} + +func (e *InvalidTransactionScriptError) Unwrap() error { + return e.ParserErr +} + +// InvalidTransactionGasLimitError indicates that a transaction specifies a gas limit that exceeds the maximum. +type InvalidTransactionGasLimitError struct { + Maximum uint64 + Actual uint64 +} + +func (e *InvalidTransactionGasLimitError) isTransactionValidationError() {} + +func (e *InvalidTransactionGasLimitError) Error() string { + return fmt.Sprintf("transaction gas limit (%d) exceeds the maximum gas limit (%d)", e.Actual, e.Maximum) +} + +// An InvalidStateVersionError indicates that a state version hash provided is invalid. +type InvalidStateVersionError struct { + Version crypto.Hash +} + +func (e *InvalidStateVersionError) Error() string { + return fmt.Sprintf("execution state with version hash %x is invalid", e.Version) +} + +// A PendingBlockCommitBeforeExecutionError indicates that the current pending block has not been executed (cannot commit). +type PendingBlockCommitBeforeExecutionError struct { + BlockID flowgo.Identifier +} + +func (e *PendingBlockCommitBeforeExecutionError) Error() string { + return fmt.Sprintf("pending block with ID %s cannot be committed before execution", e.BlockID) +} + +// A PendingBlockMidExecutionError indicates that the current pending block is mid-execution. +type PendingBlockMidExecutionError struct { + BlockID flowgo.Identifier +} + +func (e *PendingBlockMidExecutionError) Error() string { + return fmt.Sprintf("pending block with ID %s is currently being executed", e.BlockID) +} + +// A PendingBlockTransactionsExhaustedError indicates that the current pending block has finished executing (no more transactions to execute). +type PendingBlockTransactionsExhaustedError struct { + BlockID flowgo.Identifier +} + +func (e *PendingBlockTransactionsExhaustedError) Error() string { + return fmt.Sprintf("pending block with ID %s contains no more transactions to execute", e.BlockID) +} + +// A StorageError indicates that an error occurred in the storage provider. +type StorageError struct { + inner error +} + +func (e *StorageError) Error() string { + return fmt.Sprintf("storage failure: %v", e.inner) +} + +func (e *StorageError) Unwrap() error { + return e.inner +} + +// An ExecutionError occurs when a transaction fails to execute. +type ExecutionError struct { + Code int + Message string +} + +func (e *ExecutionError) Error() string { + return fmt.Sprintf("execution error code %d: %s", e.Code, e.Message) +} + +type FVMError struct { + FlowError fvmerrors.CodedError +} + +func (f *FVMError) Error() string { + return f.FlowError.Error() +} + +func (f *FVMError) Unwrap() error { + return f.FlowError +} + +func ConvertAccessError(err error) error { + switch typedErr := err.(type) { + case validator.IncompleteTransactionError: + return &IncompleteTransactionError{MissingFields: typedErr.MissingFields} + case validator.ExpiredTransactionError: + return &ExpiredTransactionError{RefHeight: typedErr.RefHeight, FinalHeight: typedErr.FinalHeight} + case validator.InvalidGasLimitError: + return &InvalidTransactionGasLimitError{Maximum: typedErr.Maximum, Actual: typedErr.Actual} + case validator.InvalidScriptError: + return &InvalidTransactionScriptError{ParserErr: typedErr.ParserErr} + } + + return err +} diff --git a/integration/internal/emulator/ledger.go b/integration/internal/emulator/ledger.go new file mode 100644 index 00000000000..915c0961d4a --- /dev/null +++ b/integration/internal/emulator/ledger.go @@ -0,0 +1,144 @@ +package emulator + +import ( + "context" + "errors" + "fmt" + "math" + + "github.com/onflow/cadence" + + "github.com/onflow/flow-go/fvm" + "github.com/onflow/flow-go/fvm/environment" + "github.com/onflow/flow-go/fvm/meter" + "github.com/onflow/flow-go/fvm/storage/snapshot" + flowgo "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/utils/unittest" +) + +func configureLedger( + conf config, + store EmulatorStorage, + vm *fvm.VirtualMachine, + ctx fvm.Context, +) ( + *flowgo.Block, + snapshot.StorageSnapshot, + error, +) { + + latestBlock, err := store.LatestBlock(context.Background()) + if err != nil && !errors.Is(err, ErrNotFound) { + return nil, nil, err + } + + if errors.Is(err, ErrNotFound) { + // bootstrap the ledger with the genesis block + ledger, err := store.LedgerByHeight(context.Background(), 0) + if err != nil { + return nil, nil, err + } + + genesisExecutionSnapshot, err := bootstrapLedger(vm, ctx, ledger, conf) + if err != nil { + return nil, nil, fmt.Errorf("failed to bootstrap execution state: %w", err) + } + + // commit the genesis block to storage + genesis := unittest.Block.Genesis(conf.GetChainID()) + latestBlock = *genesis + + err = store.CommitBlock( + context.Background(), + genesis, + nil, + nil, + nil, + genesisExecutionSnapshot, + nil, + ) + if err != nil { + return nil, nil, err + } + } + + latestLedger, err := store.LedgerByHeight( + context.Background(), + latestBlock.Height, + ) + + if err != nil { + return nil, nil, err + } + + return &latestBlock, latestLedger, nil +} + +func bootstrapLedger( + vm *fvm.VirtualMachine, + ctx fvm.Context, + ledger snapshot.StorageSnapshot, + conf config, +) ( + *snapshot.ExecutionSnapshot, + error, +) { + serviceKey := conf.GetServiceKey() + + ctx = fvm.NewContextFromParent( + ctx, + fvm.WithAccountStorageLimit(false), + ) + + flowAccountKey := flowgo.AccountPublicKey{ + PublicKey: serviceKey.PublicKey, + SignAlgo: serviceKey.SigAlgo, + HashAlgo: serviceKey.HashAlgo, + Weight: fvm.AccountKeyWeightThreshold, + } + + bootstrap := configureBootstrapProcedure(conf, flowAccountKey, conf.GenesisTokenSupply) + + executionSnapshot, output, err := vm.Run(ctx, bootstrap, ledger) + if err != nil { + return nil, err + } + + if output.Err != nil { + return nil, output.Err + } + + return executionSnapshot, nil +} + +func configureBootstrapProcedure(conf config, flowAccountKey flowgo.AccountPublicKey, supply cadence.UFix64) *fvm.BootstrapProcedure { + options := make([]fvm.BootstrapProcedureOption, 0) + options = append(options, + fvm.WithInitialTokenSupply(supply), + fvm.WithRestrictedAccountCreationEnabled(false), + // This enables variable transaction fees AND execution effort metering + // as described in Variable Transaction Fees: + // Execution Effort FLIP: https://github.com/onflow/flow/pull/753 + fvm.WithTransactionFee(fvm.DefaultTransactionFees), + fvm.WithExecutionMemoryLimit(math.MaxUint32), + fvm.WithExecutionMemoryWeights(meter.DefaultMemoryWeights), + fvm.WithExecutionEffortWeights(environment.MainnetExecutionEffortWeights), + ) + + if conf.ExecutionEffortWeights != nil { + options = append(options, + fvm.WithExecutionEffortWeights(conf.ExecutionEffortWeights), + ) + } + if conf.StorageLimitEnabled { + options = append(options, + fvm.WithAccountCreationFee(conf.MinimumStorageReservation), + fvm.WithMinimumStorageReservation(conf.MinimumStorageReservation), + fvm.WithStorageMBPerFLOW(conf.StorageMBPerFLOW), + ) + } + return fvm.Bootstrap( + flowAccountKey, + options..., + ) +} diff --git a/integration/internal/emulator/memstore.go b/integration/internal/emulator/memstore.go new file mode 100644 index 00000000000..58c9c9c9aa3 --- /dev/null +++ b/integration/internal/emulator/memstore.go @@ -0,0 +1,395 @@ +/* + * Flow Emulator + * + * Copyright Flow Foundation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package emulator + +import ( + "context" + "errors" + "fmt" + "sync" + + "github.com/onflow/flow-go/access/validator" + "github.com/onflow/flow-go/fvm/environment" + "github.com/onflow/flow-go/fvm/storage/snapshot" + flowgo "github.com/onflow/flow-go/model/flow" +) + +// Store implements the Store interface with an in-memory store. +type Store struct { + mu sync.RWMutex + // block ID to block height + blockIDToHeight map[flowgo.Identifier]uint64 + // blocks by height + blocks map[uint64]flowgo.Block + // collections by ID + collections map[flowgo.Identifier]flowgo.LightCollection + // transactions by ID + transactions map[flowgo.Identifier]flowgo.TransactionBody + // Transaction results by ID + transactionResults map[flowgo.Identifier]StorableTransactionResult + // Ledger states by block height + ledger map[uint64]snapshot.SnapshotTree + // events by block height + eventsByBlockHeight map[uint64][]flowgo.Event + // highest block height + blockHeight uint64 +} + +var _ environment.Blocks = &Store{} +var _ validator.Blocks = &Store{} +var _ EmulatorStorage = &Store{} + +func (b *Store) HeaderByID(id flowgo.Identifier) (*flowgo.Header, error) { + block, err := b.BlockByID(context.Background(), id) + if err != nil { + if errors.Is(err, ErrNotFound) { + return nil, nil + } + return nil, err + } + return block.ToHeader(), nil +} + +func (b *Store) FinalizedHeader() (*flowgo.Header, error) { + block, err := b.LatestBlock(context.Background()) + if err != nil { + return nil, err + } + + return block.ToHeader(), nil +} + +func (b *Store) SealedHeader() (*flowgo.Header, error) { + block, err := b.LatestBlock(context.Background()) + if err != nil { + return nil, err + } + + return block.ToHeader(), nil +} + +func (b *Store) IndexedHeight() (uint64, error) { + block, err := b.LatestBlock(context.Background()) + if err != nil { + return 0, err + } + + return block.Height, nil +} + +// ByHeightFrom We don't have to do anything complex here, as emulator does not fork the chain +func (b *Store) ByHeightFrom(height uint64, header *flowgo.Header) (*flowgo.Header, error) { + if height > header.Height { + return nil, ErrNotFound + } + block, err := b.BlockByHeight(context.Background(), height) + if err != nil { + return nil, err + } + + return block.ToHeader(), nil +} + +// New returns a new in-memory Store implementation. +func NewMemoryStore() *Store { + return &Store{ + mu: sync.RWMutex{}, + blockIDToHeight: make(map[flowgo.Identifier]uint64), + blocks: make(map[uint64]flowgo.Block), + collections: make(map[flowgo.Identifier]flowgo.LightCollection), + transactions: make(map[flowgo.Identifier]flowgo.TransactionBody), + transactionResults: make(map[flowgo.Identifier]StorableTransactionResult), + ledger: make(map[uint64]snapshot.SnapshotTree), + eventsByBlockHeight: make(map[uint64][]flowgo.Event), + } +} + +func (b *Store) Start() error { + return nil +} + +func (b *Store) Stop() { +} + +func (b *Store) LatestBlockHeight(ctx context.Context) (uint64, error) { + block, err := b.LatestBlock(ctx) + if err != nil { + return 0, err + } + + return block.Height, nil +} + +func (b *Store) LatestBlock(_ context.Context) (flowgo.Block, error) { + b.mu.RLock() + defer b.mu.RUnlock() + + latestBlock, ok := b.blocks[b.blockHeight] + if !ok { + return flowgo.Block{}, ErrNotFound + } + return latestBlock, nil +} + +func (b *Store) StoreBlock(_ context.Context, block *flowgo.Block) error { + b.mu.Lock() + defer b.mu.Unlock() + + return b.storeBlock(block) +} + +func (b *Store) storeBlock(block *flowgo.Block) error { + b.blocks[block.Height] = *block + b.blockIDToHeight[block.ID()] = block.Height + + if block.Height > b.blockHeight { + b.blockHeight = block.Height + } + + return nil +} + +func (b *Store) BlockByID(_ context.Context, blockID flowgo.Identifier) (*flowgo.Block, error) { + b.mu.RLock() + defer b.mu.RUnlock() + + blockHeight, ok := b.blockIDToHeight[blockID] + if !ok { + return nil, ErrNotFound + } + + block, ok := b.blocks[blockHeight] + if !ok { + return nil, ErrNotFound + } + + return &block, nil + +} + +func (b *Store) BlockByHeight(_ context.Context, height uint64) (*flowgo.Block, error) { + b.mu.RLock() + defer b.mu.RUnlock() + + block, ok := b.blocks[height] + if !ok { + return nil, ErrNotFound + } + + return &block, nil +} + +func (b *Store) CommitBlock( + _ context.Context, + block *flowgo.Block, + collections []*flowgo.LightCollection, + transactions map[flowgo.Identifier]*flowgo.TransactionBody, + transactionResults map[flowgo.Identifier]*StorableTransactionResult, + executionSnapshot *snapshot.ExecutionSnapshot, + events []flowgo.Event, +) error { + b.mu.Lock() + defer b.mu.Unlock() + + if len(transactions) != len(transactionResults) { + return fmt.Errorf( + "transactions count (%d) does not match result count (%d)", + len(transactions), + len(transactionResults), + ) + } + + err := b.storeBlock(block) + if err != nil { + return err + } + + for _, col := range collections { + err := b.InsertCollection(col) + if err != nil { + return err + } + } + + for _, tx := range transactions { + err := b.InsertTransaction(tx.ID(), *tx) + if err != nil { + return err + } + } + + for txID, result := range transactionResults { + err := b.InsertTransactionResult(txID, *result) + if err != nil { + return err + } + } + + err = b.InsertExecutionSnapshot( + block.Height, + executionSnapshot) + if err != nil { + return err + } + + err = b.InsertEvents(block.Height, events) + if err != nil { + return err + } + + return nil +} + +func (b *Store) CollectionByID( + _ context.Context, + collectionID flowgo.Identifier, +) (flowgo.LightCollection, error) { + b.mu.RLock() + defer b.mu.RUnlock() + + tx, ok := b.collections[collectionID] + if !ok { + return flowgo.LightCollection{}, ErrNotFound + } + return tx, nil +} + +func (b *Store) FullCollectionByID( + _ context.Context, + collectionID flowgo.Identifier, +) (flowgo.Collection, error) { + b.mu.RLock() + defer b.mu.RUnlock() + + light, ok := b.collections[collectionID] + if !ok { + return flowgo.Collection{}, ErrNotFound + } + + txs := make([]*flowgo.TransactionBody, len(light.Transactions)) + for i, txID := range light.Transactions { + tx, ok := b.transactions[txID] + if !ok { + return flowgo.Collection{}, ErrNotFound + } + txs[i] = &tx + } + + return flowgo.Collection{ + Transactions: txs, + }, nil +} + +func (b *Store) TransactionByID( + _ context.Context, + transactionID flowgo.Identifier, +) (flowgo.TransactionBody, error) { + b.mu.RLock() + defer b.mu.RUnlock() + + tx, ok := b.transactions[transactionID] + if !ok { + return flowgo.TransactionBody{}, ErrNotFound + } + return tx, nil + +} + +func (b *Store) TransactionResultByID( + _ context.Context, + transactionID flowgo.Identifier, +) (StorableTransactionResult, error) { + b.mu.RLock() + defer b.mu.RUnlock() + + result, ok := b.transactionResults[transactionID] + if !ok { + return StorableTransactionResult{}, ErrNotFound + } + return result, nil + +} + +func (b *Store) LedgerByHeight( + _ context.Context, + blockHeight uint64, +) (snapshot.StorageSnapshot, error) { + return b.ledger[blockHeight], nil +} + +func (b *Store) EventsByHeight( + _ context.Context, + blockHeight uint64, + eventType string, +) ([]flowgo.Event, error) { + b.mu.RLock() + defer b.mu.RUnlock() + + allEvents := b.eventsByBlockHeight[blockHeight] + + events := make([]flowgo.Event, 0) + + for _, event := range allEvents { + if eventType == "" { + events = append(events, event) + } else { + if string(event.Type) == eventType { + events = append(events, event) + } + } + } + + return events, nil +} + +func (b *Store) InsertCollection(col *flowgo.LightCollection) error { + b.collections[col.ID()] = *col + return nil +} + +func (b *Store) InsertTransaction(txID flowgo.Identifier, tx flowgo.TransactionBody) error { + b.transactions[txID] = tx + return nil +} + +func (b *Store) InsertTransactionResult(txID flowgo.Identifier, result StorableTransactionResult) error { + b.transactionResults[txID] = result + return nil +} + +func (b *Store) InsertExecutionSnapshot( + blockHeight uint64, + executionSnapshot *snapshot.ExecutionSnapshot, +) error { + oldLedger := b.ledger[blockHeight-1] + + b.ledger[blockHeight] = oldLedger.Append(executionSnapshot) + + return nil +} + +func (b *Store) InsertEvents(blockHeight uint64, events []flowgo.Event) error { + if b.eventsByBlockHeight[blockHeight] == nil { + b.eventsByBlockHeight[blockHeight] = events + } else { + b.eventsByBlockHeight[blockHeight] = append(b.eventsByBlockHeight[blockHeight], events...) + } + + return nil +} diff --git a/integration/internal/emulator/mocks/emulator.go b/integration/internal/emulator/mocks/emulator.go new file mode 100644 index 00000000000..78b67d3a6c5 --- /dev/null +++ b/integration/internal/emulator/mocks/emulator.go @@ -0,0 +1,469 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: github.com/onflow/flow-go/integration/emulator (interfaces: Emulator) +// +// Generated by this command: +// +// mockgen -destination=emulator/mocks/emulator.go -package=mocks github.com/onflow/flow-go/integration/emulator Emulator +// + +// Package mocks is a generated GoMock package. +package mocks + +import ( + reflect "reflect" + + gomock "go.uber.org/mock/gomock" + + access "github.com/onflow/flow-go/model/access" + emulator "github.com/onflow/flow-go/integration/internal/emulator" + flow "github.com/onflow/flow-go/model/flow" +) + +// MockEmulator is a mock of Emulator interface. +type MockEmulator struct { + ctrl *gomock.Controller + recorder *MockEmulatorMockRecorder + isgomock struct{} +} + +// MockEmulatorMockRecorder is the mock recorder for MockEmulator. +type MockEmulatorMockRecorder struct { + mock *MockEmulator +} + +// NewMockEmulator creates a new mock instance. +func NewMockEmulator(ctrl *gomock.Controller) *MockEmulator { + mock := &MockEmulator{ctrl: ctrl} + mock.recorder = &MockEmulatorMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockEmulator) EXPECT() *MockEmulatorMockRecorder { + return m.recorder +} + +// AddTransaction mocks base method. +func (m *MockEmulator) AddTransaction(tx flow.TransactionBody) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "AddTransaction", tx) + ret0, _ := ret[0].(error) + return ret0 +} + +// AddTransaction indicates an expected call of AddTransaction. +func (mr *MockEmulatorMockRecorder) AddTransaction(tx any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddTransaction", reflect.TypeOf((*MockEmulator)(nil).AddTransaction), tx) +} + +// CommitBlock mocks base method. +func (m *MockEmulator) CommitBlock() (*flow.Block, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CommitBlock") + ret0, _ := ret[0].(*flow.Block) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// CommitBlock indicates an expected call of CommitBlock. +func (mr *MockEmulatorMockRecorder) CommitBlock() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CommitBlock", reflect.TypeOf((*MockEmulator)(nil).CommitBlock)) +} + +// DisableAutoMine mocks base method. +func (m *MockEmulator) DisableAutoMine() { + m.ctrl.T.Helper() + m.ctrl.Call(m, "DisableAutoMine") +} + +// DisableAutoMine indicates an expected call of DisableAutoMine. +func (mr *MockEmulatorMockRecorder) DisableAutoMine() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DisableAutoMine", reflect.TypeOf((*MockEmulator)(nil).DisableAutoMine)) +} + +// EnableAutoMine mocks base method. +func (m *MockEmulator) EnableAutoMine() { + m.ctrl.T.Helper() + m.ctrl.Call(m, "EnableAutoMine") +} + +// EnableAutoMine indicates an expected call of EnableAutoMine. +func (mr *MockEmulatorMockRecorder) EnableAutoMine() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "EnableAutoMine", reflect.TypeOf((*MockEmulator)(nil).EnableAutoMine)) +} + +// ExecuteAndCommitBlock mocks base method. +func (m *MockEmulator) ExecuteAndCommitBlock() (*flow.Block, []*emulator.TransactionResult, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ExecuteAndCommitBlock") + ret0, _ := ret[0].(*flow.Block) + ret1, _ := ret[1].([]*emulator.TransactionResult) + ret2, _ := ret[2].(error) + return ret0, ret1, ret2 +} + +// ExecuteAndCommitBlock indicates an expected call of ExecuteAndCommitBlock. +func (mr *MockEmulatorMockRecorder) ExecuteAndCommitBlock() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ExecuteAndCommitBlock", reflect.TypeOf((*MockEmulator)(nil).ExecuteAndCommitBlock)) +} + +// ExecuteBlock mocks base method. +func (m *MockEmulator) ExecuteBlock() ([]*emulator.TransactionResult, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ExecuteBlock") + ret0, _ := ret[0].([]*emulator.TransactionResult) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ExecuteBlock indicates an expected call of ExecuteBlock. +func (mr *MockEmulatorMockRecorder) ExecuteBlock() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ExecuteBlock", reflect.TypeOf((*MockEmulator)(nil).ExecuteBlock)) +} + +// ExecuteNextTransaction mocks base method. +func (m *MockEmulator) ExecuteNextTransaction() (*emulator.TransactionResult, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ExecuteNextTransaction") + ret0, _ := ret[0].(*emulator.TransactionResult) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ExecuteNextTransaction indicates an expected call of ExecuteNextTransaction. +func (mr *MockEmulatorMockRecorder) ExecuteNextTransaction() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ExecuteNextTransaction", reflect.TypeOf((*MockEmulator)(nil).ExecuteNextTransaction)) +} + +// ExecuteScript mocks base method. +func (m *MockEmulator) ExecuteScript(script []byte, arguments [][]byte) (*emulator.ScriptResult, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ExecuteScript", script, arguments) + ret0, _ := ret[0].(*emulator.ScriptResult) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ExecuteScript indicates an expected call of ExecuteScript. +func (mr *MockEmulatorMockRecorder) ExecuteScript(script, arguments any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ExecuteScript", reflect.TypeOf((*MockEmulator)(nil).ExecuteScript), script, arguments) +} + +// ExecuteScriptAtBlockHeight mocks base method. +func (m *MockEmulator) ExecuteScriptAtBlockHeight(script []byte, arguments [][]byte, blockHeight uint64) (*emulator.ScriptResult, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ExecuteScriptAtBlockHeight", script, arguments, blockHeight) + ret0, _ := ret[0].(*emulator.ScriptResult) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ExecuteScriptAtBlockHeight indicates an expected call of ExecuteScriptAtBlockHeight. +func (mr *MockEmulatorMockRecorder) ExecuteScriptAtBlockHeight(script, arguments, blockHeight any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ExecuteScriptAtBlockHeight", reflect.TypeOf((*MockEmulator)(nil).ExecuteScriptAtBlockHeight), script, arguments, blockHeight) +} + +// ExecuteScriptAtBlockID mocks base method. +func (m *MockEmulator) ExecuteScriptAtBlockID(script []byte, arguments [][]byte, id flow.Identifier) (*emulator.ScriptResult, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ExecuteScriptAtBlockID", script, arguments, id) + ret0, _ := ret[0].(*emulator.ScriptResult) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ExecuteScriptAtBlockID indicates an expected call of ExecuteScriptAtBlockID. +func (mr *MockEmulatorMockRecorder) ExecuteScriptAtBlockID(script, arguments, id any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ExecuteScriptAtBlockID", reflect.TypeOf((*MockEmulator)(nil).ExecuteScriptAtBlockID), script, arguments, id) +} + +// GetAccount mocks base method. +func (m *MockEmulator) GetAccount(address flow.Address) (*flow.Account, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetAccount", address) + ret0, _ := ret[0].(*flow.Account) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetAccount indicates an expected call of GetAccount. +func (mr *MockEmulatorMockRecorder) GetAccount(address any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetAccount", reflect.TypeOf((*MockEmulator)(nil).GetAccount), address) +} + +// GetAccountAtBlockHeight mocks base method. +func (m *MockEmulator) GetAccountAtBlockHeight(address flow.Address, blockHeight uint64) (*flow.Account, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetAccountAtBlockHeight", address, blockHeight) + ret0, _ := ret[0].(*flow.Account) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetAccountAtBlockHeight indicates an expected call of GetAccountAtBlockHeight. +func (mr *MockEmulatorMockRecorder) GetAccountAtBlockHeight(address, blockHeight any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetAccountAtBlockHeight", reflect.TypeOf((*MockEmulator)(nil).GetAccountAtBlockHeight), address, blockHeight) +} + +// GetAccountByIndex mocks base method. +func (m *MockEmulator) GetAccountByIndex(arg0 uint) (*flow.Account, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetAccountByIndex", arg0) + ret0, _ := ret[0].(*flow.Account) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetAccountByIndex indicates an expected call of GetAccountByIndex. +func (mr *MockEmulatorMockRecorder) GetAccountByIndex(arg0 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetAccountByIndex", reflect.TypeOf((*MockEmulator)(nil).GetAccountByIndex), arg0) +} + +// GetBlockByHeight mocks base method. +func (m *MockEmulator) GetBlockByHeight(height uint64) (*flow.Block, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetBlockByHeight", height) + ret0, _ := ret[0].(*flow.Block) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetBlockByHeight indicates an expected call of GetBlockByHeight. +func (mr *MockEmulatorMockRecorder) GetBlockByHeight(height any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBlockByHeight", reflect.TypeOf((*MockEmulator)(nil).GetBlockByHeight), height) +} + +// GetBlockByID mocks base method. +func (m *MockEmulator) GetBlockByID(id flow.Identifier) (*flow.Block, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetBlockByID", id) + ret0, _ := ret[0].(*flow.Block) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetBlockByID indicates an expected call of GetBlockByID. +func (mr *MockEmulatorMockRecorder) GetBlockByID(id any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBlockByID", reflect.TypeOf((*MockEmulator)(nil).GetBlockByID), id) +} + +// GetCollectionByID mocks base method. +func (m *MockEmulator) GetCollectionByID(colID flow.Identifier) (*flow.LightCollection, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetCollectionByID", colID) + ret0, _ := ret[0].(*flow.LightCollection) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetCollectionByID indicates an expected call of GetCollectionByID. +func (mr *MockEmulatorMockRecorder) GetCollectionByID(colID any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetCollectionByID", reflect.TypeOf((*MockEmulator)(nil).GetCollectionByID), colID) +} + +// GetEventsByHeight mocks base method. +func (m *MockEmulator) GetEventsByHeight(blockHeight uint64, eventType string) ([]flow.Event, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetEventsByHeight", blockHeight, eventType) + ret0, _ := ret[0].([]flow.Event) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetEventsByHeight indicates an expected call of GetEventsByHeight. +func (mr *MockEmulatorMockRecorder) GetEventsByHeight(blockHeight, eventType any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetEventsByHeight", reflect.TypeOf((*MockEmulator)(nil).GetEventsByHeight), blockHeight, eventType) +} + +// GetEventsForBlockIDs mocks base method. +func (m *MockEmulator) GetEventsForBlockIDs(eventType string, blockIDs []flow.Identifier) ([]flow.BlockEvents, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetEventsForBlockIDs", eventType, blockIDs) + ret0, _ := ret[0].([]flow.BlockEvents) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetEventsForBlockIDs indicates an expected call of GetEventsForBlockIDs. +func (mr *MockEmulatorMockRecorder) GetEventsForBlockIDs(eventType, blockIDs any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetEventsForBlockIDs", reflect.TypeOf((*MockEmulator)(nil).GetEventsForBlockIDs), eventType, blockIDs) +} + +// GetEventsForHeightRange mocks base method. +func (m *MockEmulator) GetEventsForHeightRange(eventType string, startHeight, endHeight uint64) ([]flow.BlockEvents, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetEventsForHeightRange", eventType, startHeight, endHeight) + ret0, _ := ret[0].([]flow.BlockEvents) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetEventsForHeightRange indicates an expected call of GetEventsForHeightRange. +func (mr *MockEmulatorMockRecorder) GetEventsForHeightRange(eventType, startHeight, endHeight any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetEventsForHeightRange", reflect.TypeOf((*MockEmulator)(nil).GetEventsForHeightRange), eventType, startHeight, endHeight) +} + +// GetFullCollectionByID mocks base method. +func (m *MockEmulator) GetFullCollectionByID(colID flow.Identifier) (*flow.Collection, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetFullCollectionByID", colID) + ret0, _ := ret[0].(*flow.Collection) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetFullCollectionByID indicates an expected call of GetFullCollectionByID. +func (mr *MockEmulatorMockRecorder) GetFullCollectionByID(colID any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetFullCollectionByID", reflect.TypeOf((*MockEmulator)(nil).GetFullCollectionByID), colID) +} + +// GetLatestBlock mocks base method. +func (m *MockEmulator) GetLatestBlock() (*flow.Block, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetLatestBlock") + ret0, _ := ret[0].(*flow.Block) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetLatestBlock indicates an expected call of GetLatestBlock. +func (mr *MockEmulatorMockRecorder) GetLatestBlock() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetLatestBlock", reflect.TypeOf((*MockEmulator)(nil).GetLatestBlock)) +} + +// GetNetworkParameters mocks base method. +func (m *MockEmulator) GetNetworkParameters() access.NetworkParameters { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetNetworkParameters") + ret0, _ := ret[0].(access.NetworkParameters) + return ret0 +} + +// GetNetworkParameters indicates an expected call of GetNetworkParameters. +func (mr *MockEmulatorMockRecorder) GetNetworkParameters() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetNetworkParameters", reflect.TypeOf((*MockEmulator)(nil).GetNetworkParameters)) +} + +// GetTransaction mocks base method. +func (m *MockEmulator) GetTransaction(txID flow.Identifier) (*flow.TransactionBody, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetTransaction", txID) + ret0, _ := ret[0].(*flow.TransactionBody) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetTransaction indicates an expected call of GetTransaction. +func (mr *MockEmulatorMockRecorder) GetTransaction(txID any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetTransaction", reflect.TypeOf((*MockEmulator)(nil).GetTransaction), txID) +} + +// GetTransactionResult mocks base method. +func (m *MockEmulator) GetTransactionResult(txID flow.Identifier) (*access.TransactionResult, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetTransactionResult", txID) + ret0, _ := ret[0].(*access.TransactionResult) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetTransactionResult indicates an expected call of GetTransactionResult. +func (mr *MockEmulatorMockRecorder) GetTransactionResult(txID any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetTransactionResult", reflect.TypeOf((*MockEmulator)(nil).GetTransactionResult), txID) +} + +// GetTransactionResultsByBlockID mocks base method. +func (m *MockEmulator) GetTransactionResultsByBlockID(blockID flow.Identifier) ([]*access.TransactionResult, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetTransactionResultsByBlockID", blockID) + ret0, _ := ret[0].([]*access.TransactionResult) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetTransactionResultsByBlockID indicates an expected call of GetTransactionResultsByBlockID. +func (mr *MockEmulatorMockRecorder) GetTransactionResultsByBlockID(blockID any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetTransactionResultsByBlockID", reflect.TypeOf((*MockEmulator)(nil).GetTransactionResultsByBlockID), blockID) +} + +// GetTransactionsByBlockID mocks base method. +func (m *MockEmulator) GetTransactionsByBlockID(blockID flow.Identifier) ([]*flow.TransactionBody, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetTransactionsByBlockID", blockID) + ret0, _ := ret[0].([]*flow.TransactionBody) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetTransactionsByBlockID indicates an expected call of GetTransactionsByBlockID. +func (mr *MockEmulatorMockRecorder) GetTransactionsByBlockID(blockID any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetTransactionsByBlockID", reflect.TypeOf((*MockEmulator)(nil).GetTransactionsByBlockID), blockID) +} + +// Ping mocks base method. +func (m *MockEmulator) Ping() error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Ping") + ret0, _ := ret[0].(error) + return ret0 +} + +// Ping indicates an expected call of Ping. +func (mr *MockEmulatorMockRecorder) Ping() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Ping", reflect.TypeOf((*MockEmulator)(nil).Ping)) +} + +// SendTransaction mocks base method. +func (m *MockEmulator) SendTransaction(tx *flow.TransactionBody) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SendTransaction", tx) + ret0, _ := ret[0].(error) + return ret0 +} + +// SendTransaction indicates an expected call of SendTransaction. +func (mr *MockEmulatorMockRecorder) SendTransaction(tx any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendTransaction", reflect.TypeOf((*MockEmulator)(nil).SendTransaction), tx) +} + +// ServiceKey mocks base method. +func (m *MockEmulator) ServiceKey() emulator.ServiceKey { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ServiceKey") + ret0, _ := ret[0].(emulator.ServiceKey) + return ret0 +} + +// ServiceKey indicates an expected call of ServiceKey. +func (mr *MockEmulatorMockRecorder) ServiceKey() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ServiceKey", reflect.TypeOf((*MockEmulator)(nil).ServiceKey)) +} diff --git a/integration/internal/emulator/mocks/emulatorStorage.go b/integration/internal/emulator/mocks/emulatorStorage.go new file mode 100644 index 00000000000..32f9b4d6170 --- /dev/null +++ b/integration/internal/emulator/mocks/emulatorStorage.go @@ -0,0 +1,324 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: github.com/onflow/flow-emulator/emulator (interfaces: EmulatorStorage) +// +// Generated by this command: +// +// mockgen -destination=emulator/mocks/emulatorStorage.go -package=mocks github.com/onflow/flow-emulator/emulator EmulatorStorage +// + +// Package mocks is a generated GoMock package. +package mocks + +import ( + context "context" + reflect "reflect" + + gomock "go.uber.org/mock/gomock" + + snapshot "github.com/onflow/flow-go/fvm/storage/snapshot" + "github.com/onflow/flow-go/integration/internal/emulator" + flow "github.com/onflow/flow-go/model/flow" +) + +// MockEmulatorStorage is a mock of EmulatorStorage interface. +type MockEmulatorStorage struct { + ctrl *gomock.Controller + recorder *MockEmulatorStorageMockRecorder + isgomock struct{} +} + +// MockEmulatorStorageMockRecorder is the mock recorder for MockEmulatorStorage. +type MockEmulatorStorageMockRecorder struct { + mock *MockEmulatorStorage +} + +// NewMockEmulatorStorage creates a new mock instance. +func NewMockEmulatorStorage(ctrl *gomock.Controller) *MockEmulatorStorage { + mock := &MockEmulatorStorage{ctrl: ctrl} + mock.recorder = &MockEmulatorStorageMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockEmulatorStorage) EXPECT() *MockEmulatorStorageMockRecorder { + return m.recorder +} + +// BlockByHeight mocks base method. +func (m *MockEmulatorStorage) BlockByHeight(ctx context.Context, height uint64) (*flow.Block, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "BlockByHeight", ctx, height) + ret0, _ := ret[0].(*flow.Block) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// BlockByHeight indicates an expected call of BlockByHeight. +func (mr *MockEmulatorStorageMockRecorder) BlockByHeight(ctx, height any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "BlockByHeight", reflect.TypeOf((*MockEmulatorStorage)(nil).BlockByHeight), ctx, height) +} + +// BlockByID mocks base method. +func (m *MockEmulatorStorage) BlockByID(ctx context.Context, blockID flow.Identifier) (*flow.Block, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "BlockByID", ctx, blockID) + ret0, _ := ret[0].(*flow.Block) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// BlockByID indicates an expected call of BlockByID. +func (mr *MockEmulatorStorageMockRecorder) BlockByID(ctx, blockID any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "BlockByID", reflect.TypeOf((*MockEmulatorStorage)(nil).BlockByID), ctx, blockID) +} + +// ByHeightFrom mocks base method. +func (m *MockEmulatorStorage) ByHeightFrom(height uint64, header *flow.Header) (*flow.Header, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ByHeightFrom", height, header) + ret0, _ := ret[0].(*flow.Header) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ByHeightFrom indicates an expected call of ByHeightFrom. +func (mr *MockEmulatorStorageMockRecorder) ByHeightFrom(height, header any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ByHeightFrom", reflect.TypeOf((*MockEmulatorStorage)(nil).ByHeightFrom), height, header) +} + +// CollectionByID mocks base method. +func (m *MockEmulatorStorage) CollectionByID(ctx context.Context, collectionID flow.Identifier) (flow.LightCollection, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CollectionByID", ctx, collectionID) + ret0, _ := ret[0].(flow.LightCollection) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// CollectionByID indicates an expected call of CollectionByID. +func (mr *MockEmulatorStorageMockRecorder) CollectionByID(ctx, collectionID any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CollectionByID", reflect.TypeOf((*MockEmulatorStorage)(nil).CollectionByID), ctx, collectionID) +} + +// CommitBlock mocks base method. +func (m *MockEmulatorStorage) CommitBlock(ctx context.Context, block flow.Block, collections []*flow.LightCollection, transactions map[flow.Identifier]*flow.TransactionBody, transactionResults map[flow.Identifier]*emulator.StorableTransactionResult, executionSnapshot *snapshot.ExecutionSnapshot, events []flow.Event) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CommitBlock", ctx, block, collections, transactions, transactionResults, executionSnapshot, events) + ret0, _ := ret[0].(error) + return ret0 +} + +// CommitBlock indicates an expected call of CommitBlock. +func (mr *MockEmulatorStorageMockRecorder) CommitBlock(ctx, block, collections, transactions, transactionResults, executionSnapshot, events any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CommitBlock", reflect.TypeOf((*MockEmulatorStorage)(nil).CommitBlock), ctx, block, collections, transactions, transactionResults, executionSnapshot, events) +} + +// EventsByHeight mocks base method. +func (m *MockEmulatorStorage) EventsByHeight(ctx context.Context, blockHeight uint64, eventType string) ([]flow.Event, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "EventsByHeight", ctx, blockHeight, eventType) + ret0, _ := ret[0].([]flow.Event) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// EventsByHeight indicates an expected call of EventsByHeight. +func (mr *MockEmulatorStorageMockRecorder) EventsByHeight(ctx, blockHeight, eventType any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "EventsByHeight", reflect.TypeOf((*MockEmulatorStorage)(nil).EventsByHeight), ctx, blockHeight, eventType) +} + +// FinalizedHeader mocks base method. +func (m *MockEmulatorStorage) FinalizedHeader() (*flow.Header, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "FinalizedHeader") + ret0, _ := ret[0].(*flow.Header) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// FinalizedHeader indicates an expected call of FinalizedHeader. +func (mr *MockEmulatorStorageMockRecorder) FinalizedHeader() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FinalizedHeader", reflect.TypeOf((*MockEmulatorStorage)(nil).FinalizedHeader)) +} + +// FullCollectionByID mocks base method. +func (m *MockEmulatorStorage) FullCollectionByID(ctx context.Context, collectionID flow.Identifier) (flow.Collection, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "FullCollectionByID", ctx, collectionID) + ret0, _ := ret[0].(flow.Collection) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// FullCollectionByID indicates an expected call of FullCollectionByID. +func (mr *MockEmulatorStorageMockRecorder) FullCollectionByID(ctx, collectionID any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FullCollectionByID", reflect.TypeOf((*MockEmulatorStorage)(nil).FullCollectionByID), ctx, collectionID) +} + +// HeaderByID mocks base method. +func (m *MockEmulatorStorage) HeaderByID(id flow.Identifier) (*flow.Header, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "HeaderByID", id) + ret0, _ := ret[0].(*flow.Header) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// HeaderByID indicates an expected call of HeaderByID. +func (mr *MockEmulatorStorageMockRecorder) HeaderByID(id any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "HeaderByID", reflect.TypeOf((*MockEmulatorStorage)(nil).HeaderByID), id) +} + +// IndexedHeight mocks base method. +func (m *MockEmulatorStorage) IndexedHeight() (uint64, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "IndexedHeight") + ret0, _ := ret[0].(uint64) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// IndexedHeight indicates an expected call of IndexedHeight. +func (mr *MockEmulatorStorageMockRecorder) IndexedHeight() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "IndexedHeight", reflect.TypeOf((*MockEmulatorStorage)(nil).IndexedHeight)) +} + +// LatestBlock mocks base method. +func (m *MockEmulatorStorage) LatestBlock(ctx context.Context) (flow.Block, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "LatestBlock", ctx) + ret0, _ := ret[0].(flow.Block) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// LatestBlock indicates an expected call of LatestBlock. +func (mr *MockEmulatorStorageMockRecorder) LatestBlock(ctx any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "LatestBlock", reflect.TypeOf((*MockEmulatorStorage)(nil).LatestBlock), ctx) +} + +// LatestBlockHeight mocks base method. +func (m *MockEmulatorStorage) LatestBlockHeight(ctx context.Context) (uint64, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "LatestBlockHeight", ctx) + ret0, _ := ret[0].(uint64) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// LatestBlockHeight indicates an expected call of LatestBlockHeight. +func (mr *MockEmulatorStorageMockRecorder) LatestBlockHeight(ctx any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "LatestBlockHeight", reflect.TypeOf((*MockEmulatorStorage)(nil).LatestBlockHeight), ctx) +} + +// LedgerByHeight mocks base method. +func (m *MockEmulatorStorage) LedgerByHeight(ctx context.Context, blockHeight uint64) (snapshot.StorageSnapshot, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "LedgerByHeight", ctx, blockHeight) + ret0, _ := ret[0].(snapshot.StorageSnapshot) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// LedgerByHeight indicates an expected call of LedgerByHeight. +func (mr *MockEmulatorStorageMockRecorder) LedgerByHeight(ctx, blockHeight any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "LedgerByHeight", reflect.TypeOf((*MockEmulatorStorage)(nil).LedgerByHeight), ctx, blockHeight) +} + +// SealedHeader mocks base method. +func (m *MockEmulatorStorage) SealedHeader() (*flow.Header, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SealedHeader") + ret0, _ := ret[0].(*flow.Header) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// SealedHeader indicates an expected call of SealedHeader. +func (mr *MockEmulatorStorageMockRecorder) SealedHeader() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SealedHeader", reflect.TypeOf((*MockEmulatorStorage)(nil).SealedHeader)) +} + +// Start mocks base method. +func (m *MockEmulatorStorage) Start() error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Start") + ret0, _ := ret[0].(error) + return ret0 +} + +// Start indicates an expected call of Start. +func (mr *MockEmulatorStorageMockRecorder) Start() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Start", reflect.TypeOf((*MockEmulatorStorage)(nil).Start)) +} + +// Stop mocks base method. +func (m *MockEmulatorStorage) Stop() { + m.ctrl.T.Helper() + m.ctrl.Call(m, "Stop") +} + +// Stop indicates an expected call of Stop. +func (mr *MockEmulatorStorageMockRecorder) Stop() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Stop", reflect.TypeOf((*MockEmulatorStorage)(nil).Stop)) +} + +// StoreBlock mocks base method. +func (m *MockEmulatorStorage) StoreBlock(ctx context.Context, block *flow.Block) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StoreBlock", ctx, block) + ret0, _ := ret[0].(error) + return ret0 +} + +// StoreBlock indicates an expected call of StoreBlock. +func (mr *MockEmulatorStorageMockRecorder) StoreBlock(ctx, block any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StoreBlock", reflect.TypeOf((*MockEmulatorStorage)(nil).StoreBlock), ctx, block) +} + +// TransactionByID mocks base method. +func (m *MockEmulatorStorage) TransactionByID(ctx context.Context, transactionID flow.Identifier) (flow.TransactionBody, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "TransactionByID", ctx, transactionID) + ret0, _ := ret[0].(flow.TransactionBody) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// TransactionByID indicates an expected call of TransactionByID. +func (mr *MockEmulatorStorageMockRecorder) TransactionByID(ctx, transactionID any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "TransactionByID", reflect.TypeOf((*MockEmulatorStorage)(nil).TransactionByID), ctx, transactionID) +} + +// TransactionResultByID mocks base method. +func (m *MockEmulatorStorage) TransactionResultByID(ctx context.Context, transactionID flow.Identifier) (emulator.StorableTransactionResult, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "TransactionResultByID", ctx, transactionID) + ret0, _ := ret[0].(emulator.StorableTransactionResult) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// TransactionResultByID indicates an expected call of TransactionResultByID. +func (mr *MockEmulatorStorageMockRecorder) TransactionResultByID(ctx, transactionID any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "TransactionResultByID", reflect.TypeOf((*MockEmulatorStorage)(nil).TransactionResultByID), ctx, transactionID) +} diff --git a/integration/internal/emulator/pendingBlock.go b/integration/internal/emulator/pendingBlock.go new file mode 100644 index 00000000000..fda3e0d8d58 --- /dev/null +++ b/integration/internal/emulator/pendingBlock.go @@ -0,0 +1,238 @@ +/* + * Flow Emulator + * + * Copyright Flow Foundation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package emulator + +import ( + "math/rand" + "time" + + "github.com/onflow/flow-go/fvm" + "github.com/onflow/flow-go/fvm/storage/snapshot" + "github.com/onflow/flow-go/fvm/storage/state" + flowgo "github.com/onflow/flow-go/model/flow" +) + +type IndexedTransactionResult struct { + fvm.ProcedureOutput + Index uint32 +} + +// MaxViewIncrease represents the largest difference in view number between +// two consecutive blocks. The minimum view increment is 1. +const MaxViewIncrease = 3 + +// A pendingBlock contains the pending state required to form a new block. +type pendingBlock struct { + height uint64 + view uint64 + chainID flowgo.ChainID + parentID flowgo.Identifier + timestamp time.Time + // mapping from transaction ID to transaction + transactions map[flowgo.Identifier]*flowgo.TransactionBody + // list of transaction IDs in the block + transactionIDs []flowgo.Identifier + // mapping from transaction ID to transaction result + transactionResults map[flowgo.Identifier]IndexedTransactionResult + // current working ledger, updated after each transaction execution + ledgerState *state.ExecutionState + // events emitted during execution + events []flowgo.Event + // index of transaction execution + index uint32 +} + +// newPendingBlock creates a new pending block sequentially after a specified block. +func newPendingBlock( + prevBlock *flowgo.Block, + ledgerSnapshot snapshot.StorageSnapshot, + chainID flowgo.ChainID, + timestamp time.Time, +) *pendingBlock { + pb := &pendingBlock{ + height: prevBlock.Height + 1, + // the view increments by between 1 and MaxViewIncrease to match + // behaviour on a real network, where views are not consecutive + view: prevBlock.View + uint64(rand.Intn(MaxViewIncrease)+1), + chainID: chainID, + parentID: prevBlock.ID(), + timestamp: timestamp, + transactions: make(map[flowgo.Identifier]*flowgo.TransactionBody), + transactionIDs: make([]flowgo.Identifier, 0), + transactionResults: make(map[flowgo.Identifier]IndexedTransactionResult), + ledgerState: state.NewExecutionState( + ledgerSnapshot, + state.DefaultParameters()), + events: make([]flowgo.Event, 0), + index: 0, + } + + return pb +} + +// Block returns the block information for the pending block. +func (b *pendingBlock) Block() *flowgo.Block { + collections := b.Collections() + + guarantees := make([]*flowgo.CollectionGuarantee, len(collections)) + for i, collection := range collections { + guarantees[i] = &flowgo.CollectionGuarantee{ + CollectionID: collection.ID(), + } + } + + //nolint:structwrite - safe because Emulator is strictly used for integration tests + return &flowgo.Block{ + HeaderBody: flowgo.HeaderBody{ + ChainID: b.chainID, + Height: b.height, + View: b.view, + ParentID: b.parentID, + Timestamp: uint64(b.timestamp.UnixMilli()), + }, + Payload: flowgo.Payload{ + Guarantees: guarantees, + }, + } +} + +func (b *pendingBlock) Collections() []*flowgo.LightCollection { + if len(b.transactionIDs) == 0 { + return []*flowgo.LightCollection{} + } + + transactionIDs := make([]flowgo.Identifier, len(b.transactionIDs)) + + // TODO: remove once SDK models are removed + copy(transactionIDs, b.transactionIDs) + + collection := flowgo.LightCollection{Transactions: transactionIDs} + + return []*flowgo.LightCollection{&collection} +} + +func (b *pendingBlock) Transactions() map[flowgo.Identifier]*flowgo.TransactionBody { + return b.transactions +} + +func (b *pendingBlock) TransactionResults() map[flowgo.Identifier]IndexedTransactionResult { + return b.transactionResults +} + +// Finalize returns the execution snapshot for the pending block. +func (b *pendingBlock) Finalize() *snapshot.ExecutionSnapshot { + return b.ledgerState.Finalize() +} + +// AddTransaction adds a transaction to the pending block. +func (b *pendingBlock) AddTransaction(tx flowgo.TransactionBody) { + b.transactionIDs = append(b.transactionIDs, tx.ID()) + b.transactions[tx.ID()] = &tx +} + +// ContainsTransaction checks if a transaction is included in the pending block. +func (b *pendingBlock) ContainsTransaction(txID flowgo.Identifier) bool { + _, exists := b.transactions[txID] + return exists +} + +// GetTransaction retrieves a transaction in the pending block by ID. +func (b *pendingBlock) GetTransaction(txID flowgo.Identifier) *flowgo.TransactionBody { + return b.transactions[txID] +} + +// NextTransaction returns the next indexed transaction. +func (b *pendingBlock) NextTransaction() *flowgo.TransactionBody { + if int(b.index) > len(b.transactionIDs) { + return nil + } + + txID := b.transactionIDs[b.index] + return b.GetTransaction(txID) +} + +// ExecuteNextTransaction executes the next transaction in the pending block. +// +// This function uses the provided execute function to perform the actual +// execution, then updates the pending block with the output. +func (b *pendingBlock) ExecuteNextTransaction( + vm *fvm.VirtualMachine, + ctx fvm.Context, +) ( + fvm.ProcedureOutput, + error, +) { + txnBody := b.NextTransaction() + txnIndex := b.index + + // increment transaction index even if transaction reverts + b.index++ + + executionSnapshot, output, err := vm.Run( + ctx, + fvm.Transaction(txnBody, txnIndex), + b.ledgerState) + if err != nil { + // fail fast if fatal error occurs + return fvm.ProcedureOutput{}, err + } + + b.events = append(b.events, output.Events...) + + err = b.ledgerState.Merge(executionSnapshot) + if err != nil { + // fail fast if fatal error occurs + return fvm.ProcedureOutput{}, err + } + + b.transactionResults[txnBody.ID()] = IndexedTransactionResult{ + ProcedureOutput: output, + Index: txnIndex, + } + + return output, nil +} + +// Events returns all events captured during the execution of the pending block. +func (b *pendingBlock) Events() []flowgo.Event { + return b.events +} + +// ExecutionStarted returns true if the pending block has started executing. +func (b *pendingBlock) ExecutionStarted() bool { + return b.index > 0 +} + +// ExecutionComplete returns true if the pending block is fully executed. +func (b *pendingBlock) ExecutionComplete() bool { + return b.index >= uint32(b.Size()) +} + +// Size returns the number of transactions in the pending block. +func (b *pendingBlock) Size() int { + return len(b.transactionIDs) +} + +// Empty returns true if the pending block is empty. +func (b *pendingBlock) Empty() bool { + return b.Size() == 0 +} + +func (b *pendingBlock) SetTimestamp(timestamp time.Time) { + b.timestamp = timestamp +} diff --git a/integration/internal/emulator/result.go b/integration/internal/emulator/result.go new file mode 100644 index 00000000000..a726a91d011 --- /dev/null +++ b/integration/internal/emulator/result.go @@ -0,0 +1,104 @@ +/* + * Flow Emulator + * + * Copyright Flow Foundation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package emulator + +import ( + "fmt" + + "github.com/onflow/cadence" + + flowsdk "github.com/onflow/flow-go-sdk" + + flowgo "github.com/onflow/flow-go/model/flow" +) + +type StorableTransactionResult struct { + ErrorCode int + ErrorMessage string + Logs []string + Events []flowgo.Event + BlockID flowgo.Identifier + BlockHeight uint64 +} + +// A TransactionResult is the result of executing a transaction. +type TransactionResult struct { + TransactionID flowsdk.Identifier + ComputationUsed uint64 + MemoryEstimate uint64 + Error error + Logs []string + Events []flowsdk.Event + Debug *TransactionResultDebug +} + +// Succeeded returns true if the transaction executed without errors. +func (r TransactionResult) Succeeded() bool { + return r.Error == nil +} + +// Reverted returns true if the transaction executed with errors. +func (r TransactionResult) Reverted() bool { + return !r.Succeeded() +} + +// TransactionResultDebug provides details about unsuccessful transaction execution +type TransactionResultDebug struct { + Message string + Meta map[string]any +} + +// NewTransactionInvalidSignature creates more debug details for transactions with invalid signature +func NewTransactionInvalidSignature( + tx *flowgo.TransactionBody, +) *TransactionResultDebug { + return &TransactionResultDebug{ + Message: "", + Meta: map[string]any{ + "payer": tx.Payer.String(), + "proposer": tx.ProposalKey.Address.String(), + "proposerKeyIndex": fmt.Sprintf("%d", tx.ProposalKey.KeyIndex), + "authorizers": fmt.Sprintf("%v", tx.Authorizers), + "gasLimit": fmt.Sprintf("%d", tx.GasLimit), + }, + } +} + +// TODO - this class should be part of SDK for consistency + +// A ScriptResult is the result of executing a script. +type ScriptResult struct { + ScriptID flowgo.Identifier + Value cadence.Value + Error error + Logs []string + Events []flowgo.Event + ComputationUsed uint64 + MemoryEstimate uint64 +} + +// Succeeded returns true if the script executed without errors. +func (r ScriptResult) Succeeded() bool { + return r.Error == nil +} + +// Reverted returns true if the script executed with errors. +func (r ScriptResult) Reverted() bool { + return !r.Succeeded() +} diff --git a/integration/internal/emulator/sdk.go b/integration/internal/emulator/sdk.go new file mode 100644 index 00000000000..2919d4539f9 --- /dev/null +++ b/integration/internal/emulator/sdk.go @@ -0,0 +1,552 @@ +package emulator + +import ( + "context" + "fmt" + "time" + + "github.com/rs/zerolog" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + + "github.com/onflow/cadence" + jsoncdc "github.com/onflow/cadence/encoding/json" + "github.com/onflow/cadence/stdlib" + "github.com/onflow/flow/protobuf/go/flow/entities" + + sdk "github.com/onflow/flow-go-sdk" + "github.com/onflow/flow-go-sdk/templates" + + accessmodel "github.com/onflow/flow-go/model/access" + flowgo "github.com/onflow/flow-go/model/flow" +) + +// SDKAdapter wraps an emulated emulator and implements the RPC handlers +// required by the Access API. +type SDKAdapter struct { + logger *zerolog.Logger + emulator Emulator +} + +func (b *SDKAdapter) EnableAutoMine() { + b.emulator.EnableAutoMine() +} +func (b *SDKAdapter) DisableAutoMine() { + b.emulator.DisableAutoMine() +} + +func (b *SDKAdapter) Emulator() Emulator { + return b.emulator +} + +// NewSDKAdapter returns a new SDKAdapter. +func NewSDKAdapter(logger *zerolog.Logger, emulator Emulator) *SDKAdapter { + return &SDKAdapter{ + logger: logger, + emulator: emulator, + } +} + +func (b *SDKAdapter) Ping(ctx context.Context) error { + return b.emulator.Ping() +} + +func (b *SDKAdapter) GetChainID(ctx context.Context) sdk.ChainID { + return sdk.ChainID(b.emulator.GetNetworkParameters().ChainID) +} + +// GetLatestBlockHeader gets the latest sealed block header. +func (b *SDKAdapter) GetLatestBlockHeader( + _ context.Context, + _ bool, +) ( + *sdk.BlockHeader, + sdk.BlockStatus, + error, +) { + block, err := b.emulator.GetLatestBlock() + if err != nil { + return nil, sdk.BlockStatusUnknown, status.Error(codes.Internal, err.Error()) + } + blockHeader := sdk.BlockHeader{ + ID: sdk.Identifier(block.ID()), + ParentID: sdk.Identifier(block.ParentID), + Height: block.Height, + Timestamp: time.UnixMilli(int64(block.Timestamp)).UTC(), + } + return &blockHeader, sdk.BlockStatusSealed, nil +} + +// GetBlockHeaderByHeight gets a block header by height. +func (b *SDKAdapter) GetBlockHeaderByHeight( + _ context.Context, + height uint64, +) ( + *sdk.BlockHeader, + sdk.BlockStatus, + error, +) { + block, err := b.emulator.GetBlockByHeight(height) + if err != nil { + return nil, sdk.BlockStatusUnknown, status.Error(codes.Internal, err.Error()) + } + blockHeader := sdk.BlockHeader{ + ID: sdk.Identifier(block.ID()), + ParentID: sdk.Identifier(block.ParentID), + Height: block.Height, + Timestamp: time.UnixMilli(int64(block.Timestamp)).UTC(), + } + return &blockHeader, sdk.BlockStatusSealed, nil +} + +// GetBlockHeaderByID gets a block header by ID. +func (b *SDKAdapter) GetBlockHeaderByID( + _ context.Context, + id sdk.Identifier, +) ( + *sdk.BlockHeader, + sdk.BlockStatus, + error, +) { + block, err := b.emulator.GetBlockByID(SDKIdentifierToFlow(id)) + if err != nil { + return nil, sdk.BlockStatusUnknown, err + } + blockHeader := sdk.BlockHeader{ + ID: sdk.Identifier(block.ID()), + ParentID: sdk.Identifier(block.ParentID), + Height: block.Height, + Timestamp: time.UnixMilli(int64(block.Timestamp)).UTC(), + } + return &blockHeader, sdk.BlockStatusSealed, nil +} + +// GetLatestBlock gets the latest sealed block. +func (b *SDKAdapter) GetLatestBlock( + _ context.Context, + _ bool, +) ( + *sdk.Block, + sdk.BlockStatus, + error, +) { + flowBlock, err := b.emulator.GetLatestBlock() + if err != nil { + return nil, sdk.BlockStatusUnknown, status.Error(codes.Internal, err.Error()) + } + block := sdk.Block{ + BlockHeader: sdk.BlockHeader{ + ID: sdk.Identifier(flowBlock.ID()), + ParentID: sdk.Identifier(flowBlock.ParentID), + Height: flowBlock.Height, + Timestamp: time.UnixMilli(int64(flowBlock.Timestamp)).UTC(), + }, + BlockPayload: convertBlockPayload(&flowBlock.Payload), + } + return &block, sdk.BlockStatusSealed, nil +} + +// GetBlockByHeight gets a block by height. +func (b *SDKAdapter) GetBlockByHeight( + ctx context.Context, + height uint64, +) ( + *sdk.Block, + sdk.BlockStatus, + error, +) { + flowBlock, err := b.emulator.GetBlockByHeight(height) + if err != nil { + return nil, sdk.BlockStatusUnknown, status.Error(codes.Internal, err.Error()) + } + block := sdk.Block{ + BlockHeader: sdk.BlockHeader{ + ID: sdk.Identifier(flowBlock.ID()), + ParentID: sdk.Identifier(flowBlock.ParentID), + Height: flowBlock.Height, + Timestamp: time.UnixMilli(int64(flowBlock.Timestamp)).UTC(), + }, + BlockPayload: convertBlockPayload(&flowBlock.Payload), + } + return &block, sdk.BlockStatusSealed, nil +} + +// GetBlockByID gets a block by ID. +func (b *SDKAdapter) GetBlockByID( + _ context.Context, + id sdk.Identifier, +) ( + *sdk.Block, + sdk.BlockStatus, + error, +) { + flowBlock, err := b.emulator.GetBlockByID(SDKIdentifierToFlow(id)) + if err != nil { + return nil, sdk.BlockStatusUnknown, status.Error(codes.Internal, err.Error()) + } + block := sdk.Block{ + BlockHeader: sdk.BlockHeader{ + ID: sdk.Identifier(flowBlock.ID()), + ParentID: sdk.Identifier(flowBlock.ParentID), + Height: flowBlock.Height, + Timestamp: time.UnixMilli(int64(flowBlock.Timestamp)).UTC(), + }, + BlockPayload: convertBlockPayload(&flowBlock.Payload), + } + return &block, sdk.BlockStatusSealed, nil +} + +func convertBlockPayload(payload *flowgo.Payload) sdk.BlockPayload { + var seals []*sdk.BlockSeal + sealCount := len(payload.Seals) + if sealCount > 0 { + seals = make([]*sdk.BlockSeal, 0, sealCount) + for _, seal := range payload.Seals { + seals = append(seals, &sdk.BlockSeal{ + BlockID: sdk.Identifier(seal.BlockID), + ExecutionReceiptID: sdk.Identifier(seal.ResultID), + }) + } + } + + var collectionGuarantees []*sdk.CollectionGuarantee + guaranteesCount := len(payload.Guarantees) + if guaranteesCount > 0 { + collectionGuarantees = make([]*sdk.CollectionGuarantee, 0, guaranteesCount) + for _, guarantee := range payload.Guarantees { + collectionGuarantees = append(collectionGuarantees, &sdk.CollectionGuarantee{ + CollectionID: sdk.Identifier(guarantee.CollectionID), + }) + } + } + + return sdk.BlockPayload{ + Seals: seals, + CollectionGuarantees: collectionGuarantees, + } +} + +// GetCollectionByID gets a collection by ID. +func (b *SDKAdapter) GetCollectionByID( + _ context.Context, + id sdk.Identifier, +) (*sdk.Collection, error) { + flowCollection, err := b.emulator.GetCollectionByID(SDKIdentifierToFlow(id)) + if err != nil { + return nil, err + } + collection := FlowLightCollectionToSDK(*flowCollection) + return &collection, nil +} + +func (b *SDKAdapter) SendTransaction(ctx context.Context, tx sdk.Transaction) error { + flowTx := SDKTransactionToFlow(tx) + return b.emulator.SendTransaction(flowTx) +} + +// GetTransaction gets a transaction by ID. +func (b *SDKAdapter) GetTransaction( + ctx context.Context, + id sdk.Identifier, +) (*sdk.Transaction, error) { + tx, err := b.emulator.GetTransaction(SDKIdentifierToFlow(id)) + if err != nil { + return nil, err + } + sdkTx := FlowTransactionToSDK(*tx) + return &sdkTx, nil +} + +// GetTransactionResult gets a transaction by ID. +func (b *SDKAdapter) GetTransactionResult( + ctx context.Context, + id sdk.Identifier, +) (*sdk.TransactionResult, error) { + flowResult, err := b.emulator.GetTransactionResult(SDKIdentifierToFlow(id)) + if err != nil { + return nil, err + } + return FlowTransactionResultToSDK(flowResult) +} + +// GetAccount returns an account by address at the latest sealed block. +func (b *SDKAdapter) GetAccount( + ctx context.Context, + address sdk.Address, +) (*sdk.Account, error) { + account, err := b.getAccount(address) + if err != nil { + return nil, err + } + return account, nil +} + +// GetAccountAtLatestBlock returns an account by address at the latest sealed block. +func (b *SDKAdapter) GetAccountAtLatestBlock( + ctx context.Context, + address sdk.Address, +) (*sdk.Account, error) { + account, err := b.getAccount(address) + if err != nil { + return nil, err + } + return account, nil +} + +func (b *SDKAdapter) getAccount(address sdk.Address) (*sdk.Account, error) { + account, err := b.emulator.GetAccount(SDKAddressToFlow(address)) + if err != nil { + return nil, err + } + return FlowAccountToSDK(*account) +} + +func (b *SDKAdapter) GetAccountAtBlockHeight( + ctx context.Context, + address sdk.Address, + height uint64, +) (*sdk.Account, error) { + account, err := b.emulator.GetAccountAtBlockHeight(SDKAddressToFlow(address), height) + if err != nil { + return nil, err + } + return FlowAccountToSDK(*account) +} + +// ExecuteScriptAtLatestBlock executes a script at a the latest block +func (b *SDKAdapter) ExecuteScriptAtLatestBlock( + ctx context.Context, + script []byte, + arguments [][]byte, +) ([]byte, error) { + block, err := b.emulator.GetLatestBlock() + if err != nil { + return nil, err + } + return b.executeScriptAtBlock(script, arguments, block.Height) +} + +// ExecuteScriptAtBlockHeight executes a script at a specific block height +func (b *SDKAdapter) ExecuteScriptAtBlockHeight( + ctx context.Context, + blockHeight uint64, + script []byte, + arguments [][]byte, +) ([]byte, error) { + return b.executeScriptAtBlock(script, arguments, blockHeight) +} + +// ExecuteScriptAtBlockID executes a script at a specific block ID +func (b *SDKAdapter) ExecuteScriptAtBlockID( + ctx context.Context, + blockID sdk.Identifier, + script []byte, + arguments [][]byte, +) ([]byte, error) { + block, err := b.emulator.GetBlockByID(SDKIdentifierToFlow(blockID)) + if err != nil { + return nil, err + } + return b.executeScriptAtBlock(script, arguments, block.Height) +} + +// executeScriptAtBlock is a helper for executing a script at a specific block +func (b *SDKAdapter) executeScriptAtBlock(script []byte, arguments [][]byte, blockHeight uint64) ([]byte, error) { + result, err := b.emulator.ExecuteScriptAtBlockHeight(script, arguments, blockHeight) + if err != nil { + return nil, err + } + if !result.Succeeded() { + return nil, result.Error + } + valueBytes, err := jsoncdc.Encode(result.Value) + if err != nil { + return nil, err + } + return valueBytes, nil +} + +func (b *SDKAdapter) GetLatestProtocolStateSnapshot(_ context.Context) ([]byte, error) { + return nil, nil +} + +func (a *SDKAdapter) GetProtocolStateSnapshotByBlockID(_ context.Context, _ flowgo.Identifier) ([]byte, error) { + return nil, nil +} + +func (a *SDKAdapter) GetProtocolStateSnapshotByHeight(_ context.Context, _ uint64) ([]byte, error) { + return nil, nil +} + +func (b *SDKAdapter) GetExecutionResultForBlockID(_ context.Context, _ sdk.Identifier) (*sdk.ExecutionResult, error) { + return nil, nil +} + +func (b *SDKAdapter) GetSystemTransaction(_ context.Context, _ flowgo.Identifier) (*flowgo.TransactionBody, error) { + return nil, nil +} + +func (b *SDKAdapter) GetSystemTransactionResult(_ context.Context, _ flowgo.Identifier, _ entities.EventEncodingVersion) (*accessmodel.TransactionResult, error) { + return nil, nil +} + +func (b *SDKAdapter) GetTransactionsByBlockID(ctx context.Context, id sdk.Identifier) ([]*sdk.Transaction, error) { + result := []*sdk.Transaction{} + transactions, err := b.emulator.GetTransactionsByBlockID(SDKIdentifierToFlow(id)) + if err != nil { + return nil, err + } + for _, transaction := range transactions { + sdkTransaction := FlowTransactionToSDK(*transaction) + result = append(result, &sdkTransaction) + + } + return result, nil +} + +func (b *SDKAdapter) GetTransactionResultsByBlockID(ctx context.Context, id sdk.Identifier) ([]*sdk.TransactionResult, error) { + result := []*sdk.TransactionResult{} + transactionResults, err := b.emulator.GetTransactionResultsByBlockID(SDKIdentifierToFlow(id)) + if err != nil { + return nil, err + } + for _, transactionResult := range transactionResults { + sdkResult, err := FlowTransactionResultToSDK(transactionResult) + if err != nil { + return nil, err + } + result = append(result, sdkResult) + } + return result, nil +} + +func (b *SDKAdapter) GetEventsForBlockIDs(ctx context.Context, eventType string, blockIDs []sdk.Identifier) ([]*sdk.BlockEvents, error) { + result := []*sdk.BlockEvents{} + flowBlockEvents, err := b.emulator.GetEventsForBlockIDs(eventType, SDKIdentifiersToFlow(blockIDs)) + if err != nil { + return nil, err + } + + for _, flowBlockEvent := range flowBlockEvents { + sdkEvents, err := FlowEventsToSDK(flowBlockEvent.Events) + if err != nil { + return nil, err + } + + sdkBlockEvents := &sdk.BlockEvents{ + BlockID: sdk.Identifier(flowBlockEvent.BlockID), + Height: flowBlockEvent.BlockHeight, + BlockTimestamp: flowBlockEvent.BlockTimestamp, + Events: sdkEvents, + } + + result = append(result, sdkBlockEvents) + + } + + return result, nil +} + +func (b *SDKAdapter) GetEventsForHeightRange(ctx context.Context, eventType string, startHeight, endHeight uint64) ([]*sdk.BlockEvents, error) { + result := []*sdk.BlockEvents{} + + flowBlockEvents, err := b.emulator.GetEventsForHeightRange(eventType, startHeight, endHeight) + if err != nil { + return nil, err + } + + for _, flowBlockEvent := range flowBlockEvents { + sdkEvents, err := FlowEventsToSDK(flowBlockEvent.Events) + + if err != nil { + return nil, err + } + + sdkBlockEvents := &sdk.BlockEvents{ + BlockID: sdk.Identifier(flowBlockEvent.BlockID), + Height: flowBlockEvent.BlockHeight, + BlockTimestamp: flowBlockEvent.BlockTimestamp, + Events: sdkEvents, + } + + result = append(result, sdkBlockEvents) + + } + + return result, nil +} + +// CreateAccount submits a transaction to create a new account with the given +// account keys and contracts. The transaction is paid by the service account. +func (b *SDKAdapter) CreateAccount(ctx context.Context, publicKeys []*sdk.AccountKey, contracts []templates.Contract) (sdk.Address, error) { + + serviceKey := b.emulator.ServiceKey() + latestBlock, err := b.emulator.GetLatestBlock() + serviceAddress := FlowAddressToSDK(serviceKey.Address) + + if err != nil { + return sdk.Address{}, err + } + + if publicKeys == nil { + publicKeys = []*sdk.AccountKey{} + } + tx, err := templates.CreateAccount(publicKeys, contracts, serviceAddress) + if err != nil { + return sdk.Address{}, err + } + + tx.SetComputeLimit(flowgo.DefaultMaxTransactionGasLimit). + SetReferenceBlockID(sdk.Identifier(latestBlock.ID())). + SetProposalKey(serviceAddress, serviceKey.Index, serviceKey.SequenceNumber). + SetPayer(serviceAddress) + + signer, err := serviceKey.Signer() + if err != nil { + return sdk.Address{}, err + } + + err = tx.SignEnvelope(serviceAddress, serviceKey.Index, signer) + if err != nil { + return sdk.Address{}, err + } + + err = b.SendTransaction(ctx, *tx) + if err != nil { + return sdk.Address{}, err + } + + _, results, err := b.emulator.ExecuteAndCommitBlock() + if err != nil { + return sdk.Address{}, err + } + lastResult := results[len(results)-1] + + _, err = b.emulator.CommitBlock() + if err != nil { + return sdk.Address{}, err + } + + if !lastResult.Succeeded() { + return sdk.Address{}, lastResult.Error + } + + var address sdk.Address + + for _, event := range lastResult.Events { + if event.Type == sdk.EventAccountCreated { + addressFieldValue := cadence.SearchFieldByName( + event.Value, + stdlib.AccountEventAddressParameter.Identifier, + ) + address = sdk.Address(addressFieldValue.(cadence.Address)) + break + } + } + + if address == (sdk.Address{}) { + return sdk.Address{}, fmt.Errorf("failed to find AccountCreated event") + } + + return address, nil +} diff --git a/integration/internal/emulator/store.go b/integration/internal/emulator/store.go new file mode 100644 index 00000000000..e9f673705cc --- /dev/null +++ b/integration/internal/emulator/store.go @@ -0,0 +1,98 @@ +/* + * Flow Emulator + * + * Copyright Flow Foundation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// Package storage defines the interface and implementations for interacting with +// persistent chain state. +package emulator + +import ( + "context" + + "github.com/psiemens/graceland" + + "github.com/onflow/flow-go/access/validator" + "github.com/onflow/flow-go/fvm/environment" + "github.com/onflow/flow-go/fvm/storage/snapshot" + flowgo "github.com/onflow/flow-go/model/flow" +) + +// EmulatorStorage defines the storage layer for persistent chain state. +// +// This includes finalized blocks and transactions, and the resultant register +// states and emitted events. It does not include pending state, such as pending +// transactions and register states. +// +// Implementations must distinguish between not found errors and errors with +// the underlying storage by returning an instance of store.ErrNotFound if a +// resource cannot be found. +// +// Implementations must be safe for use by multiple goroutines. + +type EmulatorStorage interface { + graceland.Routine + environment.Blocks + validator.Blocks + LatestBlockHeight(ctx context.Context) (uint64, error) + + // LatestBlock returns the block with the highest block height. + LatestBlock(ctx context.Context) (flowgo.Block, error) + + // StoreBlock stores the block in storage. If the exactly same block is already in a storage, return successfully + StoreBlock(ctx context.Context, block *flowgo.Block) error + + // BlockByID returns the block with the given hash. It is available for + // finalized and ambiguous blocks. + BlockByID(ctx context.Context, blockID flowgo.Identifier) (*flowgo.Block, error) + + // BlockByHeight returns the block at the given height. It is only available + // for finalized blocks. + BlockByHeight(ctx context.Context, height uint64) (*flowgo.Block, error) + + // CommitBlock atomically saves the execution results for a block. + CommitBlock( + ctx context.Context, + block *flowgo.Block, + collections []*flowgo.LightCollection, + transactions map[flowgo.Identifier]*flowgo.TransactionBody, + transactionResults map[flowgo.Identifier]*StorableTransactionResult, + executionSnapshot *snapshot.ExecutionSnapshot, + events []flowgo.Event, + ) error + + // CollectionByID gets the collection (transaction IDs only) with the given ID. + CollectionByID(ctx context.Context, collectionID flowgo.Identifier) (flowgo.LightCollection, error) + + // FullCollectionByID gets the full collection (including transaction bodies) with the given ID. + FullCollectionByID(ctx context.Context, collectionID flowgo.Identifier) (flowgo.Collection, error) + + // TransactionByID gets the transaction with the given ID. + TransactionByID(ctx context.Context, transactionID flowgo.Identifier) (flowgo.TransactionBody, error) + + // TransactionResultByID gets the transaction result with the given ID. + TransactionResultByID(ctx context.Context, transactionID flowgo.Identifier) (StorableTransactionResult, error) + + // LedgerByHeight returns a storage snapshot into the ledger state + // at a given block. + LedgerByHeight( + ctx context.Context, + blockHeight uint64, + ) (snapshot.StorageSnapshot, error) + + // EventsByHeight returns the events in the block at the given height, optionally filtered by type. + EventsByHeight(ctx context.Context, blockHeight uint64, eventType string) ([]flowgo.Event, error) +} diff --git a/integration/internal/emulator/templates/systemChunkTransactionTemplate.cdc b/integration/internal/emulator/templates/systemChunkTransactionTemplate.cdc new file mode 100644 index 00000000000..81edd4fffbf --- /dev/null +++ b/integration/internal/emulator/templates/systemChunkTransactionTemplate.cdc @@ -0,0 +1,21 @@ +import "RandomBeaconHistory" +import "EVM" +import Migration from "Migration" + +transaction { + prepare(serviceAccount: auth(BorrowValue) &Account) { + let randomBeaconHistoryHeartbeat = serviceAccount.storage + .borrow<&RandomBeaconHistory.Heartbeat>(from: RandomBeaconHistory.HeartbeatStoragePath) + ?? panic("Couldn't borrow RandomBeaconHistory.Heartbeat Resource") + randomBeaconHistoryHeartbeat.heartbeat(randomSourceHistory: randomSourceHistory()) + + let evmHeartbeat = serviceAccount.storage + .borrow<&EVM.Heartbeat>(from: /storage/EVMHeartbeat) + ?? panic("Couldn't borrow EVM.Heartbeat Resource") + evmHeartbeat.heartbeat() + + let migrationAdmin = serviceAccount.storage + .borrow<&Migration.Admin>(from: Migration.adminStoragePath) + migrationAdmin?.migrate() + } +} diff --git a/integration/internal/emulator/tests/accounts_test.go b/integration/internal/emulator/tests/accounts_test.go new file mode 100644 index 00000000000..5102eb43f66 --- /dev/null +++ b/integration/internal/emulator/tests/accounts_test.go @@ -0,0 +1,1218 @@ +/* + * Flow Emulator + * + * Copyright Flow Foundation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package tests + +import ( + "context" + "fmt" + "testing" + + "github.com/rs/zerolog" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + flowsdk "github.com/onflow/flow-go-sdk" + "github.com/onflow/flow-go-sdk/crypto" + "github.com/onflow/flow-go-sdk/templates" + "github.com/onflow/flow-go-sdk/test" + + fvmerrors "github.com/onflow/flow-go/fvm/errors" + emulator "github.com/onflow/flow-go/integration/internal/emulator" + flowgo "github.com/onflow/flow-go/model/flow" +) + +const testContract = "access(all) contract Test {}" + +func setupAccountTests(t *testing.T, opts ...emulator.Option) ( + *emulator.Blockchain, + *emulator.SDKAdapter, +) { + b, err := emulator.New( + opts..., + ) + require.NoError(t, err) + + logger := zerolog.Nop() + return b, emulator.NewSDKAdapter(&logger, b) +} + +func TestGetAccount(t *testing.T) { + + t.Parallel() + + t.Run("Get account at latest block height", func(t *testing.T) { + + t.Parallel() + b, adapter := setupAccountTests(t) + serviceAccountAddress := flowsdk.Address(b.ServiceKey().Address) + acc, err := adapter.GetAccount(context.Background(), serviceAccountAddress) + assert.NoError(t, err) + + assert.Equal(t, uint64(0), acc.Keys[0].SequenceNumber) + + }) + + t.Run("Get account at latest block by index", func(t *testing.T) { + + t.Parallel() + b, adapter := setupAccountTests(t) + serviceAccountAddress := flowsdk.Address(b.ServiceKey().Address) + + acc, err := adapter.GetAccount(context.Background(), serviceAccountAddress) + assert.NoError(t, err) + + assert.Equal(t, uint64(0), acc.Keys[0].SequenceNumber) + + flowAccount, err := b.GetAccountByIndex(1) //service account + assert.NoError(t, err) + + assert.Equal(t, uint64(0), flowAccount.Keys[0].SeqNumber) + assert.Equal(t, acc.Address.String(), flowAccount.Address.String()) + + }) + + t.Run("Get account at latest block height", func(t *testing.T) { + + t.Parallel() + b, adapter := setupAccountTests(t) + serviceAccountAddress := flowsdk.Address(b.ServiceKey().Address) + + acc, err := adapter.GetAccount(context.Background(), serviceAccountAddress) + assert.NoError(t, err) + + assert.Equal(t, uint64(0), acc.Keys[0].SequenceNumber) + + flowAccount, err := b.GetAccountByIndex(1) //service account + assert.NoError(t, err) + + assert.Equal(t, uint64(0), flowAccount.Keys[0].SeqNumber) + assert.Equal(t, acc.Address.String(), flowAccount.Address.String()) + + }) + + t.Run("Get account at specified block height", func(t *testing.T) { + + t.Parallel() + + b, adapter := setupAccountTests(t) + serviceAccountAddress := flowsdk.Address(b.ServiceKey().Address) + + acc, err := adapter.GetAccount(context.Background(), serviceAccountAddress) + assert.NoError(t, err) + + assert.Equal(t, uint64(0), acc.Keys[0].SequenceNumber) + contract := templates.Contract{ + Name: "Test", + Source: testContract, + } + + tx := templates.AddAccountContract(serviceAccountAddress, contract) + + tx.SetComputeLimit(flowgo.DefaultMaxTransactionGasLimit). + SetProposalKey(serviceAccountAddress, b.ServiceKey().Index, b.ServiceKey().SequenceNumber). + SetPayer(serviceAccountAddress) + + signer, err := b.ServiceKey().Signer() + require.NoError(t, err) + + err = tx.SignEnvelope(serviceAccountAddress, b.ServiceKey().Index, signer) + require.NoError(t, err) + + err = adapter.SendTransaction(context.Background(), *tx) + require.NoError(t, err) + + result, err := b.ExecuteNextTransaction() + assert.NoError(t, err) + AssertTransactionSucceeded(t, result) + + bl, err := b.CommitBlock() + assert.NoError(t, err) + + accNow, err := adapter.GetAccountAtBlockHeight(context.Background(), serviceAccountAddress, bl.Height) + assert.NoError(t, err) + + accPrev, err := adapter.GetAccountAtBlockHeight(context.Background(), serviceAccountAddress, bl.Height-uint64(1)) + assert.NoError(t, err) + + assert.Equal(t, accNow.Keys[0].SequenceNumber, uint64(1)) + assert.Equal(t, accPrev.Keys[0].SequenceNumber, uint64(0)) + }) +} + +func TestCreateAccount(t *testing.T) { + + t.Parallel() + + accountKeys := test.AccountKeyGenerator() + + t.Run("Simple addresses", func(t *testing.T) { + b, adapter := setupAccountTests( + t, + emulator.WithSimpleAddresses(), + ) + + accountKey := accountKeys.New() + serviceAccountAddress := flowsdk.Address(b.ServiceKey().Address) + + tx, err := templates.CreateAccount( + []*flowsdk.AccountKey{accountKey}, + nil, + serviceAccountAddress, + ) + assert.NoError(t, err) + + tx.SetComputeLimit(flowgo.DefaultMaxTransactionGasLimit). + SetProposalKey(serviceAccountAddress, b.ServiceKey().Index, b.ServiceKey().SequenceNumber). + SetPayer(serviceAccountAddress) + + signer, err := b.ServiceKey().Signer() + require.NoError(t, err) + + err = tx.SignEnvelope(serviceAccountAddress, b.ServiceKey().Index, signer) + require.NoError(t, err) + + err = adapter.SendTransaction(context.Background(), *tx) + require.NoError(t, err) + + result, err := b.ExecuteNextTransaction() + assert.NoError(t, err) + AssertTransactionSucceeded(t, result) + + _, err = b.CommitBlock() + assert.NoError(t, err) + + account, err := LastCreatedAccount(b, result) + require.NoError(t, err) + + assert.Equal(t, "0000000000000006", account.Address.Hex()) + assert.Equal(t, uint64(0x186a0), account.Balance) + require.Len(t, account.Keys, 1) + assert.Equal(t, accountKey.PublicKey.Encode(), account.Keys[0].PublicKey.Encode()) + assert.Empty(t, account.Contracts) + }) + + t.Run("Single public keys", func(t *testing.T) { + b, adapter := setupAccountTests(t) + + accountKey := accountKeys.New() + serviceAccountAddress := flowsdk.Address(b.ServiceKey().Address) + + tx, err := templates.CreateAccount( + []*flowsdk.AccountKey{accountKey}, + nil, + serviceAccountAddress, + ) + assert.NoError(t, err) + + tx.SetComputeLimit(flowgo.DefaultMaxTransactionGasLimit). + SetProposalKey(serviceAccountAddress, b.ServiceKey().Index, b.ServiceKey().SequenceNumber). + SetPayer(serviceAccountAddress) + + signer, err := b.ServiceKey().Signer() + require.NoError(t, err) + + err = tx.SignEnvelope(serviceAccountAddress, b.ServiceKey().Index, signer) + require.NoError(t, err) + + err = adapter.SendTransaction(context.Background(), *tx) + require.NoError(t, err) + + result, err := b.ExecuteNextTransaction() + assert.NoError(t, err) + AssertTransactionSucceeded(t, result) + + _, err = b.CommitBlock() + assert.NoError(t, err) + + account, err := LastCreatedAccount(b, result) + require.NoError(t, err) + + require.Len(t, account.Keys, 1) + assert.Equal(t, accountKey.PublicKey.Encode(), account.Keys[0].PublicKey.Encode()) + assert.Empty(t, account.Contracts) + }) + + t.Run("Multiple public keys", func(t *testing.T) { + b, adapter := setupAccountTests(t) + + accountKeyA := accountKeys.New() + accountKeyB := accountKeys.New() + serviceAccountAddress := flowsdk.Address(b.ServiceKey().Address) + + tx, err := templates.CreateAccount( + []*flowsdk.AccountKey{accountKeyA, accountKeyB}, + nil, + serviceAccountAddress, + ) + assert.NoError(t, err) + + tx.SetComputeLimit(flowgo.DefaultMaxTransactionGasLimit). + SetProposalKey(serviceAccountAddress, b.ServiceKey().Index, b.ServiceKey().SequenceNumber). + SetPayer(serviceAccountAddress) + + signer, err := b.ServiceKey().Signer() + require.NoError(t, err) + + err = tx.SignEnvelope(serviceAccountAddress, b.ServiceKey().Index, signer) + require.NoError(t, err) + + err = adapter.SendTransaction(context.Background(), *tx) + require.NoError(t, err) + + result, err := b.ExecuteNextTransaction() + require.NoError(t, err) + AssertTransactionSucceeded(t, result) + + _, err = b.CommitBlock() + assert.NoError(t, err) + + account, err := LastCreatedAccount(b, result) + require.NoError(t, err) + + require.Len(t, account.Keys, 2) + assert.Equal(t, accountKeyA.PublicKey.Encode(), account.Keys[0].PublicKey.Encode()) + assert.Equal(t, accountKeyB.PublicKey.Encode(), account.Keys[1].PublicKey.Encode()) + assert.Empty(t, account.Contracts) + }) + + t.Run("Public keys and contract", func(t *testing.T) { + b, adapter := setupAccountTests(t) + serviceAccountAddress := flowsdk.Address(b.ServiceKey().Address) + + accountKeyA := accountKeys.New() + accountKeyB := accountKeys.New() + + contracts := []templates.Contract{ + { + Name: "Test", + Source: testContract, + }, + } + + tx, err := templates.CreateAccount( + []*flowsdk.AccountKey{accountKeyA, accountKeyB}, + contracts, + serviceAccountAddress, + ) + assert.NoError(t, err) + + tx.SetComputeLimit(flowgo.DefaultMaxTransactionGasLimit). + SetProposalKey(serviceAccountAddress, b.ServiceKey().Index, b.ServiceKey().SequenceNumber). + SetPayer(serviceAccountAddress) + + signer, err := b.ServiceKey().Signer() + require.NoError(t, err) + + err = tx.SignEnvelope(serviceAccountAddress, b.ServiceKey().Index, signer) + require.NoError(t, err) + + err = adapter.SendTransaction(context.Background(), *tx) + require.NoError(t, err) + + result, err := b.ExecuteNextTransaction() + require.NoError(t, err) + AssertTransactionSucceeded(t, result) + + _, err = b.CommitBlock() + assert.NoError(t, err) + + account, err := LastCreatedAccount(b, result) + require.NoError(t, err) + + require.Len(t, account.Keys, 2) + assert.Equal(t, accountKeyA.PublicKey.Encode(), account.Keys[0].PublicKey.Encode()) + assert.Equal(t, accountKeyB.PublicKey.Encode(), account.Keys[1].PublicKey.Encode()) + assert.Equal(t, + map[string][]byte{ + "Test": []byte(testContract), + }, + account.Contracts, + ) + }) + + t.Run("Public keys and two contracts", func(t *testing.T) { + b, adapter := setupAccountTests(t) + serviceAccountAddress := flowsdk.Address(b.ServiceKey().Address) + + codeA := ` + access(all) contract Test1 { + access(all) fun a(): Int { + return 1 + } + } + ` + codeB := ` + access(all) contract Test2 { + access(all) fun b(): Int { + return 2 + } + } + ` + + accountKey := accountKeys.New() + + contracts := []templates.Contract{ + { + Name: "Test1", + Source: codeA, + }, + { + Name: "Test2", + Source: codeB, + }, + } + + tx, err := templates.CreateAccount( + []*flowsdk.AccountKey{accountKey}, + contracts, + serviceAccountAddress, + ) + assert.NoError(t, err) + + tx.SetComputeLimit(flowgo.DefaultMaxTransactionGasLimit). + SetProposalKey(serviceAccountAddress, b.ServiceKey().Index, b.ServiceKey().SequenceNumber). + SetPayer(serviceAccountAddress) + + signer, err := b.ServiceKey().Signer() + require.NoError(t, err) + + err = tx.SignEnvelope(serviceAccountAddress, b.ServiceKey().Index, signer) + require.NoError(t, err) + + err = adapter.SendTransaction(context.Background(), *tx) + require.NoError(t, err) + + result, err := b.ExecuteNextTransaction() + require.NoError(t, err) + AssertTransactionSucceeded(t, result) + + _, err = b.CommitBlock() + assert.NoError(t, err) + + account, err := LastCreatedAccount(b, result) + require.NoError(t, err) + + require.Len(t, account.Keys, 1) + assert.Equal(t, accountKey.PublicKey.Encode(), account.Keys[0].PublicKey.Encode()) + assert.Equal(t, + map[string][]byte{ + "Test1": []byte(codeA), + "Test2": []byte(codeB), + }, + account.Contracts, + ) + }) + + t.Run("Code and no keys", func(t *testing.T) { + b, adapter := setupAccountTests(t) + + contracts := []templates.Contract{ + { + Name: "Test", + Source: testContract, + }, + } + serviceAccountAddress := flowsdk.Address(b.ServiceKey().Address) + + tx, err := templates.CreateAccount( + nil, + contracts, + serviceAccountAddress, + ) + assert.NoError(t, err) + + tx.SetComputeLimit(flowgo.DefaultMaxTransactionGasLimit). + SetProposalKey(serviceAccountAddress, b.ServiceKey().Index, b.ServiceKey().SequenceNumber). + SetPayer(serviceAccountAddress) + + signer, err := b.ServiceKey().Signer() + require.NoError(t, err) + + err = tx.SignEnvelope(serviceAccountAddress, b.ServiceKey().Index, signer) + require.NoError(t, err) + + err = adapter.SendTransaction(context.Background(), *tx) + assert.NoError(t, err) + + result, err := b.ExecuteNextTransaction() + require.NoError(t, err) + AssertTransactionSucceeded(t, result) + + _, err = b.CommitBlock() + assert.NoError(t, err) + + account, err := LastCreatedAccount(b, result) + require.NoError(t, err) + + assert.Empty(t, account.Keys) + assert.Equal(t, + map[string][]byte{ + "Test": []byte(testContract), + }, + account.Contracts, + ) + }) + + t.Run("Event emitted", func(t *testing.T) { + b, adapter := setupAccountTests(t) + + accountKey := accountKeys.New() + + contracts := []templates.Contract{ + { + Name: "Test", + Source: testContract, + }, + } + serviceAccountAddress := flowsdk.Address(b.ServiceKey().Address) + + tx, err := templates.CreateAccount( + []*flowsdk.AccountKey{accountKey}, + contracts, + serviceAccountAddress, + ) + assert.NoError(t, err) + + tx.SetComputeLimit(flowgo.DefaultMaxTransactionGasLimit). + SetProposalKey(serviceAccountAddress, b.ServiceKey().Index, b.ServiceKey().SequenceNumber). + SetPayer(serviceAccountAddress) + + signer, err := b.ServiceKey().Signer() + require.NoError(t, err) + + err = tx.SignEnvelope(serviceAccountAddress, b.ServiceKey().Index, signer) + require.NoError(t, err) + + err = adapter.SendTransaction(context.Background(), *tx) + assert.NoError(t, err) + + result, err := b.ExecuteNextTransaction() + require.NoError(t, err) + AssertTransactionSucceeded(t, result) + + block, err := b.CommitBlock() + require.NoError(t, err) + + events, err := adapter.GetEventsForHeightRange(context.Background(), flowsdk.EventAccountCreated, block.Height, block.Height) + require.NoError(t, err) + require.Len(t, events, 1) + + accountEvent := flowsdk.AccountCreatedEvent(events[0].Events[0]) + + account, err := adapter.GetAccount(context.Background(), accountEvent.Address()) + assert.NoError(t, err) + + require.Len(t, account.Keys, 1) + assert.Equal(t, accountKey.PublicKey, account.Keys[0].PublicKey) + assert.Equal(t, + map[string][]byte{ + "Test": []byte(testContract), + }, + account.Contracts, + ) + }) + + t.Run("Invalid hash algorithm", func(t *testing.T) { + b, adapter := setupAccountTests(t) + + accountKey := accountKeys.New() + accountKey.SetHashAlgo(crypto.SHA3_384) // SHA3_384 is invalid for ECDSA_P256 + serviceAccountAddress := flowsdk.Address(b.ServiceKey().Address) + + tx, err := templates.CreateAccount( + []*flowsdk.AccountKey{accountKey}, + nil, + serviceAccountAddress, + ) + assert.NoError(t, err) + + tx.SetComputeLimit(flowgo.DefaultMaxTransactionGasLimit). + SetProposalKey(serviceAccountAddress, b.ServiceKey().Index, b.ServiceKey().SequenceNumber). + SetPayer(serviceAccountAddress) + + signer, err := b.ServiceKey().Signer() + require.NoError(t, err) + + err = tx.SignEnvelope(serviceAccountAddress, b.ServiceKey().Index, signer) + require.NoError(t, err) + + err = adapter.SendTransaction(context.Background(), *tx) + assert.NoError(t, err) + + result, err := b.ExecuteNextTransaction() + require.NoError(t, err) + + assert.True(t, result.Reverted()) + }) + + t.Run("Invalid code", func(t *testing.T) { + b, adapter := setupAccountTests(t) + + contracts := []templates.Contract{ + { + Name: "Test", + Source: "not a valid script", + }, + } + serviceAccountAddress := flowsdk.Address(b.ServiceKey().Address) + + tx, err := templates.CreateAccount( + nil, + contracts, + serviceAccountAddress, + ) + assert.NoError(t, err) + + tx.SetComputeLimit(flowgo.DefaultMaxTransactionGasLimit). + SetProposalKey(serviceAccountAddress, b.ServiceKey().Index, b.ServiceKey().SequenceNumber). + SetPayer(serviceAccountAddress) + + signer, err := b.ServiceKey().Signer() + require.NoError(t, err) + + err = tx.SignEnvelope(serviceAccountAddress, b.ServiceKey().Index, signer) + require.NoError(t, err) + + err = adapter.SendTransaction(context.Background(), *tx) + assert.NoError(t, err) + + result, err := b.ExecuteNextTransaction() + require.NoError(t, err) + + assert.True(t, result.Reverted()) + }) + + t.Run("Invalid contract name", func(t *testing.T) { + b, adapter := setupAccountTests(t) + + contracts := []templates.Contract{ + { + Name: "Test2", + Source: testContract, + }, + } + serviceAccountAddress := flowsdk.Address(b.ServiceKey().Address) + + tx, err := templates.CreateAccount( + nil, + contracts, + serviceAccountAddress, + ) + assert.NoError(t, err) + + tx.SetComputeLimit(flowgo.DefaultMaxTransactionGasLimit). + SetProposalKey(serviceAccountAddress, b.ServiceKey().Index, b.ServiceKey().SequenceNumber). + SetPayer(serviceAccountAddress) + + signer, err := b.ServiceKey().Signer() + require.NoError(t, err) + + err = tx.SignEnvelope(serviceAccountAddress, b.ServiceKey().Index, signer) + require.NoError(t, err) + + err = adapter.SendTransaction(context.Background(), *tx) + assert.NoError(t, err) + + result, err := b.ExecuteNextTransaction() + require.NoError(t, err) + + assert.True(t, result.Reverted()) + }) +} + +func TestAddAccountKey(t *testing.T) { + + t.Parallel() + + accountKeys := test.AccountKeyGenerator() + + t.Run("Valid key", func(t *testing.T) { + b, adapter := setupAccountTests(t) + + newAccountKey, newSigner := accountKeys.NewWithSigner() + serviceAccountAddress := flowsdk.Address(b.ServiceKey().Address) + + tx1, err := templates.AddAccountKey(serviceAccountAddress, newAccountKey) + assert.NoError(t, err) + + tx1.SetComputeLimit(flowgo.DefaultMaxTransactionGasLimit). + SetProposalKey(serviceAccountAddress, b.ServiceKey().Index, b.ServiceKey().SequenceNumber). + SetPayer(serviceAccountAddress) + + signer, err := b.ServiceKey().Signer() + require.NoError(t, err) + + err = tx1.SignEnvelope(serviceAccountAddress, b.ServiceKey().Index, signer) + require.NoError(t, err) + + err = adapter.SendTransaction(context.Background(), *tx1) + assert.NoError(t, err) + + result, err := b.ExecuteNextTransaction() + assert.NoError(t, err) + AssertTransactionSucceeded(t, result) + + _, err = b.CommitBlock() + assert.NoError(t, err) + + script := []byte("transaction { execute {} }") + + var newKeyID = uint32(1) // new key will have ID 1 + var newKeySequenceNum uint64 = 0 + + tx2 := flowsdk.NewTransaction(). + SetScript(script). + SetComputeLimit(flowgo.DefaultMaxTransactionGasLimit). + SetProposalKey(serviceAccountAddress, newKeyID, newKeySequenceNum). + SetPayer(serviceAccountAddress) + + err = tx2.SignEnvelope(serviceAccountAddress, newKeyID, newSigner) + assert.NoError(t, err) + + err = adapter.SendTransaction(context.Background(), *tx2) + require.NoError(t, err) + + result, err = b.ExecuteNextTransaction() + require.NoError(t, err) + AssertTransactionSucceeded(t, result) + + _, err = b.CommitBlock() + assert.NoError(t, err) + }) + + t.Run("Invalid hash algorithm", func(t *testing.T) { + b, adapter := setupAccountTests(t) + + accountKey := accountKeys.New() + accountKey.SetHashAlgo(crypto.SHA3_384) // SHA3_384 is invalid for ECDSA_P256 + serviceAccountAddress := flowsdk.Address(b.ServiceKey().Address) + + tx, err := templates.AddAccountKey(serviceAccountAddress, accountKey) + assert.NoError(t, err) + + tx.SetComputeLimit(flowgo.DefaultMaxTransactionGasLimit). + SetProposalKey(serviceAccountAddress, b.ServiceKey().Index, b.ServiceKey().SequenceNumber). + SetPayer(serviceAccountAddress) + + signer, err := b.ServiceKey().Signer() + require.NoError(t, err) + + err = tx.SignEnvelope(serviceAccountAddress, b.ServiceKey().Index, signer) + require.NoError(t, err) + + err = adapter.SendTransaction(context.Background(), *tx) + assert.NoError(t, err) + + result, err := b.ExecuteNextTransaction() + assert.NoError(t, err) + assert.True(t, result.Reverted()) + }) +} + +func TestRemoveAccountKey(t *testing.T) { + + t.Parallel() + + b, adapter := setupAccountTests(t) + + accountKeys := test.AccountKeyGenerator() + + newAccountKey, newSigner := accountKeys.NewWithSigner() + serviceAccountAddress := flowsdk.Address(b.ServiceKey().Address) + + // create transaction that adds public key to account keys + tx1, err := templates.AddAccountKey(serviceAccountAddress, newAccountKey) + assert.NoError(t, err) + + // create transaction that adds public key to account keys + tx1.SetComputeLimit(flowgo.DefaultMaxTransactionGasLimit). + SetProposalKey(serviceAccountAddress, b.ServiceKey().Index, b.ServiceKey().SequenceNumber). + SetPayer(serviceAccountAddress) + + // sign with service key + signer, err := b.ServiceKey().Signer() + require.NoError(t, err) + + err = tx1.SignEnvelope(serviceAccountAddress, b.ServiceKey().Index, signer) + require.NoError(t, err) + + // submit tx1 (should succeed) + err = adapter.SendTransaction(context.Background(), *tx1) + assert.NoError(t, err) + + result, err := b.ExecuteNextTransaction() + assert.NoError(t, err) + AssertTransactionSucceeded(t, result) + + _, err = b.CommitBlock() + assert.NoError(t, err) + + account, err := adapter.GetAccount(context.Background(), serviceAccountAddress) + assert.NoError(t, err) + + require.Len(t, account.Keys, 2) + assert.False(t, account.Keys[0].Revoked) + assert.False(t, account.Keys[1].Revoked) + + // create transaction that removes service key + tx2 := templates.RemoveAccountKey(serviceAccountAddress, 0) + + tx2.SetComputeLimit(flowgo.DefaultMaxTransactionGasLimit). + SetProposalKey(serviceAccountAddress, b.ServiceKey().Index, b.ServiceKey().SequenceNumber). + SetPayer(serviceAccountAddress) + + // sign with service key + signer, err = b.ServiceKey().Signer() + assert.NoError(t, err) + err = tx2.SignEnvelope(serviceAccountAddress, b.ServiceKey().Index, signer) + assert.NoError(t, err) + + // submit tx2 (should succeed) + err = adapter.SendTransaction(context.Background(), *tx2) + assert.NoError(t, err) + + result, err = b.ExecuteNextTransaction() + assert.NoError(t, err) + AssertTransactionSucceeded(t, result) + + _, err = b.CommitBlock() + assert.NoError(t, err) + + account, err = adapter.GetAccount(context.Background(), serviceAccountAddress) + assert.NoError(t, err) + + // key at index 0 should be revoked + require.Len(t, account.Keys, 2) + assert.True(t, account.Keys[0].Revoked) + assert.False(t, account.Keys[1].Revoked) + + // create transaction that removes remaining account key + tx3 := templates.RemoveAccountKey(serviceAccountAddress, 0) + + tx3.SetComputeLimit(flowgo.DefaultMaxTransactionGasLimit). + SetProposalKey(serviceAccountAddress, b.ServiceKey().Index, b.ServiceKey().SequenceNumber). + SetPayer(serviceAccountAddress) + + // sign with service key (that has been removed) + signer, err = b.ServiceKey().Signer() + assert.NoError(t, err) + err = tx3.SignEnvelope(serviceAccountAddress, b.ServiceKey().Index, signer) + assert.NoError(t, err) + + // submit tx3 (should fail) + err = adapter.SendTransaction(context.Background(), *tx3) + assert.NoError(t, err) + + result, err = b.ExecuteNextTransaction() + assert.NoError(t, err) + + var sigErr fvmerrors.CodedError + assert.ErrorAs(t, result.Error, &sigErr) + assert.True(t, fvmerrors.HasErrorCode(result.Error, fvmerrors.ErrCodeInvalidProposalSignatureError)) + + _, err = b.CommitBlock() + assert.NoError(t, err) + + account, err = adapter.GetAccount(context.Background(), serviceAccountAddress) + assert.NoError(t, err) + + // key at index 1 should not be revoked + require.Len(t, account.Keys, 2) + assert.True(t, account.Keys[0].Revoked) + assert.False(t, account.Keys[1].Revoked) + + // create transaction that removes remaining account key + tx4 := templates.RemoveAccountKey(serviceAccountAddress, 1) + + tx4.SetComputeLimit(flowgo.DefaultMaxTransactionGasLimit). + SetProposalKey(serviceAccountAddress, account.Keys[1].Index, account.Keys[1].SequenceNumber). + SetPayer(serviceAccountAddress) + + // sign with remaining account key + err = tx4.SignEnvelope(serviceAccountAddress, account.Keys[1].Index, newSigner) + assert.NoError(t, err) + + // submit tx4 (should succeed) + err = adapter.SendTransaction(context.Background(), *tx4) + assert.NoError(t, err) + + result, err = b.ExecuteNextTransaction() + assert.NoError(t, err) + AssertTransactionSucceeded(t, result) + + _, err = b.CommitBlock() + assert.NoError(t, err) + + account, err = adapter.GetAccount(context.Background(), serviceAccountAddress) + assert.NoError(t, err) + + // all keys should be revoked + for _, key := range account.Keys { + assert.True(t, key.Revoked) + } +} + +func TestUpdateAccountCode(t *testing.T) { + + t.Parallel() + + const codeA = ` + access(all) contract Test { + access(all) fun a(): Int { + return 1 + } + } + ` + + const codeB = ` + access(all) contract Test { + access(all) fun b(): Int { + return 2 + } + } + ` + + accountKeys := test.AccountKeyGenerator() + + accountKeyB, signerB := accountKeys.NewWithSigner() + + t.Run("Valid signature", func(t *testing.T) { + b, adapter := setupAccountTests(t) + + contracts := []templates.Contract{ + { + Name: "Test", + Source: codeA, + }, + } + + accountAddressB, err := adapter.CreateAccount( + context.Background(), + []*flowsdk.AccountKey{accountKeyB}, + contracts, + ) + require.NoError(t, err) + + account, err := adapter.GetAccount(context.Background(), accountAddressB) + require.NoError(t, err) + + assert.Equal(t, + map[string][]byte{ + "Test": []byte(codeA), + }, + account.Contracts, + ) + + tx := templates.UpdateAccountContract( + accountAddressB, + templates.Contract{ + Name: "Test", + Source: codeB, + }, + ) + serviceAccountAddress := flowsdk.Address(b.ServiceKey().Address) + + tx.SetComputeLimit(flowgo.DefaultMaxTransactionGasLimit). + SetProposalKey(serviceAccountAddress, b.ServiceKey().Index, b.ServiceKey().SequenceNumber). + SetPayer(serviceAccountAddress) + + err = tx.SignPayload(accountAddressB, 0, signerB) + assert.NoError(t, err) + + signer, err := b.ServiceKey().Signer() + require.NoError(t, err) + + err = tx.SignEnvelope(serviceAccountAddress, b.ServiceKey().Index, signer) + require.NoError(t, err) + + err = adapter.SendTransaction(context.Background(), *tx) + require.NoError(t, err) + + result, err := b.ExecuteNextTransaction() + require.NoError(t, err) + AssertTransactionSucceeded(t, result) + + _, err = b.CommitBlock() + assert.NoError(t, err) + + account, err = adapter.GetAccount(context.Background(), accountAddressB) + assert.NoError(t, err) + + assert.Equal(t, codeB, string(account.Contracts["Test"])) + }) + + t.Run("Invalid signature", func(t *testing.T) { + b, adapter := setupAccountTests(t) + + contracts := []templates.Contract{ + { + Name: "Test", + Source: codeA, + }, + } + + accountAddressB, err := adapter.CreateAccount( + context.Background(), + []*flowsdk.AccountKey{accountKeyB}, + contracts, + ) + require.NoError(t, err) + + account, err := adapter.GetAccount(context.Background(), accountAddressB) + require.NoError(t, err) + + assert.Equal(t, codeA, string(account.Contracts["Test"])) + + tx := templates.UpdateAccountContract( + accountAddressB, + templates.Contract{ + Name: "Test", + Source: codeB, + }, + ) + serviceAccountAddress := flowsdk.Address(b.ServiceKey().Address) + + tx.SetComputeLimit(flowgo.DefaultMaxTransactionGasLimit). + SetProposalKey(serviceAccountAddress, b.ServiceKey().Index, b.ServiceKey().SequenceNumber). + SetPayer(serviceAccountAddress) + + signer, err := b.ServiceKey().Signer() + require.NoError(t, err) + + err = tx.SignEnvelope(serviceAccountAddress, b.ServiceKey().Index, signer) + require.NoError(t, err) + + err = adapter.SendTransaction(context.Background(), *tx) + assert.NoError(t, err) + + result, err := b.ExecuteNextTransaction() + assert.NoError(t, err) + + assert.True(t, fvmerrors.HasErrorCode(result.Error, fvmerrors.ErrCodeAccountAuthorizationError)) + + _, err = b.CommitBlock() + assert.NoError(t, err) + + account, err = adapter.GetAccount(context.Background(), accountAddressB) + assert.NoError(t, err) + + // code should not be updated + assert.Equal(t, codeA, string(account.Contracts["Test"])) + }) +} + +func TestImportAccountCode(t *testing.T) { + + t.Parallel() + + b, adapter := setupAccountTests(t) + + accountContracts := []templates.Contract{ + { + Name: "Computer", + Source: ` + access(all) contract Computer { + access(all) fun answer(): Int { + return 42 + } + } + `, + }, + } + + address, err := adapter.CreateAccount(context.Background(), nil, accountContracts) + assert.NoError(t, err) + + script := []byte(fmt.Sprintf(` + // address imports can omit leading zeros + import 0x%s + + transaction { + execute { + let answer = Computer.answer() + if answer != 42 { + panic("?!") + } + } + } + `, address)) + serviceAccountAddress := flowsdk.Address(b.ServiceKey().Address) + + tx := flowsdk.NewTransaction(). + SetScript(script). + SetComputeLimit(flowgo.DefaultMaxTransactionGasLimit). + SetProposalKey(serviceAccountAddress, b.ServiceKey().Index, b.ServiceKey().SequenceNumber). + SetPayer(serviceAccountAddress) + + signer, err := b.ServiceKey().Signer() + require.NoError(t, err) + + err = tx.SignEnvelope(serviceAccountAddress, b.ServiceKey().Index, signer) + require.NoError(t, err) + + err = adapter.SendTransaction(context.Background(), *tx) + assert.NoError(t, err) + + result, err := b.ExecuteNextTransaction() + assert.NoError(t, err) + AssertTransactionSucceeded(t, result) +} + +func TestAccountAccess(t *testing.T) { + + t.Parallel() + + b, adapter := setupAccountTests(t) + + // Create first account and deploy a contract A + // which has a field + // which only other code in the same should be allowed to access + + accountContracts := []templates.Contract{ + { + Name: "A", + Source: ` + access(all) contract A { + access(account) let a: Int + + init() { + self.a = 1 + } + } + `, + }, + } + + accountKeys := test.AccountKeyGenerator() + + accountKey1, signer1 := accountKeys.NewWithSigner() + + address1, err := adapter.CreateAccount( + context.Background(), + []*flowsdk.AccountKey{accountKey1}, + accountContracts, + ) + assert.NoError(t, err) + + // Deploy another contract B to the same account + // which accesses the field in contract A + // which allows access to code in the same account + + tx := templates.AddAccountContract( + address1, + templates.Contract{ + Name: "B", + Source: fmt.Sprintf(` + import A from 0x%s + + access(all) contract B { + access(all) fun use() { + let b = A.a + } + } + `, + address1.Hex(), + ), + }, + ) + serviceAccountAddress := flowsdk.Address(b.ServiceKey().Address) + + tx.SetComputeLimit(flowgo.DefaultMaxTransactionGasLimit). + SetProposalKey(serviceAccountAddress, b.ServiceKey().Index, b.ServiceKey().SequenceNumber). + SetPayer(serviceAccountAddress) + + err = tx.SignPayload(address1, 0, signer1) + assert.NoError(t, err) + + signer, err := b.ServiceKey().Signer() + require.NoError(t, err) + + err = tx.SignEnvelope(serviceAccountAddress, b.ServiceKey().Index, signer) + require.NoError(t, err) + + err = adapter.SendTransaction(context.Background(), *tx) + require.NoError(t, err) + + result, err := b.ExecuteNextTransaction() + require.NoError(t, err) + AssertTransactionSucceeded(t, result) + + _, err = b.CommitBlock() + require.NoError(t, err) + + // Create another account 2 + + accountKey2, signer2 := accountKeys.NewWithSigner() + + address2, err := adapter.CreateAccount( + context.Background(), + []*flowsdk.AccountKey{accountKey2}, + nil, + ) + assert.NoError(t, err) + + // Deploy a contract C to the second account + // which accesses the field in contract A of the first account + // which allows access to code in the same account + + tx = templates.AddAccountContract( + address2, + templates.Contract{ + Name: "C", + Source: fmt.Sprintf(` + import A from 0x%s + + access(all) contract C { + access(all) fun use() { + let b = A.a + } + } + `, + address1.Hex(), + ), + }, + ) + + tx.SetComputeLimit(flowgo.DefaultMaxTransactionGasLimit). + SetProposalKey(serviceAccountAddress, b.ServiceKey().Index, b.ServiceKey().SequenceNumber). + SetPayer(serviceAccountAddress) + + err = tx.SignPayload(address2, 0, signer2) + require.NoError(t, err) + + signer, err = b.ServiceKey().Signer() + assert.NoError(t, err) + err = tx.SignEnvelope(serviceAccountAddress, b.ServiceKey().Index, signer) + require.NoError(t, err) + + err = adapter.SendTransaction(context.Background(), *tx) + require.NoError(t, err) + + result, err = b.ExecuteNextTransaction() + require.NoError(t, err) + + require.False(t, result.Succeeded()) + require.Error(t, result.Error) + + require.Contains( + t, + result.Error.Error(), + "error: access denied: cannot access `a` because field requires `account` authorization", + ) +} diff --git a/integration/internal/emulator/tests/attachments_test.go b/integration/internal/emulator/tests/attachments_test.go new file mode 100644 index 00000000000..7d8f72dcdc5 --- /dev/null +++ b/integration/internal/emulator/tests/attachments_test.go @@ -0,0 +1,50 @@ +/* + * Flow Emulator + * + * Copyright Flow Foundation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package tests + +import ( + "testing" + + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/integration/internal/emulator" +) + +func TestAttachments(t *testing.T) { + + t.Parallel() + + b, err := emulator.New() + require.NoError(t, err) + + script := ` + access(all) resource R {} + + access(all) attachment A for R {} + + access(all) fun main() { + let r <- create R() + r[A] + destroy r + } + ` + + _, err = b.ExecuteScript([]byte(script), nil) + require.NoError(t, err) +} diff --git a/integration/internal/emulator/tests/block_info_test.go b/integration/internal/emulator/tests/block_info_test.go new file mode 100644 index 00000000000..c9785c09d1c --- /dev/null +++ b/integration/internal/emulator/tests/block_info_test.go @@ -0,0 +1,113 @@ +/* + * Flow Emulator + * + * Copyright Flow Foundation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package tests + +import ( + "context" + "fmt" + "testing" + + "github.com/rs/zerolog" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + flowsdk "github.com/onflow/flow-go-sdk" + + "github.com/onflow/flow-go/integration/internal/emulator" + flowgo "github.com/onflow/flow-go/model/flow" +) + +func TestBlockInfo(t *testing.T) { + + t.Parallel() + + b, err := emulator.New() + require.NoError(t, err) + + serviceAccountAddress := flowsdk.Address(b.ServiceKey().Address) + + logger := zerolog.Nop() + adapter := emulator.NewSDKAdapter(&logger, b) + + block1, err := b.CommitBlock() + require.NoError(t, err) + + block2, err := b.CommitBlock() + require.NoError(t, err) + + t.Run("works as transaction", func(t *testing.T) { + tx := flowsdk.NewTransaction(). + SetScript([]byte(` + transaction { + execute { + let block = getCurrentBlock() + log(block) + + let lastBlock = getBlock(at: block.height - 1) + log(lastBlock) + } + } + `)). + SetComputeLimit(flowgo.DefaultMaxTransactionGasLimit). + SetProposalKey(serviceAccountAddress, b.ServiceKey().Index, b.ServiceKey().SequenceNumber). + SetPayer(serviceAccountAddress) + + signer, err := b.ServiceKey().Signer() + require.NoError(t, err) + + err = tx.SignEnvelope(serviceAccountAddress, b.ServiceKey().Index, signer) + require.NoError(t, err) + + err = adapter.SendTransaction(context.Background(), *tx) + require.NoError(t, err) + + result, err := b.ExecuteNextTransaction() + assert.NoError(t, err) + AssertTransactionSucceeded(t, result) + + require.Len(t, result.Logs, 2) + assert.Equal(t, fmt.Sprintf("Block(height: %v, view: %v, id: 0x%x, timestamp: %.8f)", block2.Height+1, + b.PendingBlockView(), b.PendingBlockID(), float64(b.PendingBlockTimestamp()/1000)), result.Logs[0]) + assert.Equal(t, fmt.Sprintf("Block(height: %v, view: %v, id: 0x%x, timestamp: %.8f)", block2.Height, + block2.View, block2.ID(), float64(block2.Timestamp/1000)), result.Logs[1]) + }) + + t.Run("works as script", func(t *testing.T) { + script := []byte(` + access(all) fun main() { + let block = getCurrentBlock() + log(block) + + let lastBlock = getBlock(at: block.height - 1) + log(lastBlock) + } + `) + + result, err := b.ExecuteScript(script, nil) + assert.NoError(t, err) + + assert.True(t, result.Succeeded()) + + require.Len(t, result.Logs, 2) + assert.Equal(t, fmt.Sprintf("Block(height: %v, view: %v, id: 0x%x, timestamp: %.8f)", block2.Height, + block2.View, block2.ID(), float64(block2.Timestamp/1000)), result.Logs[0]) + assert.Equal(t, fmt.Sprintf("Block(height: %v, view: %v, id: 0x%x, timestamp: %.8f)", block1.Height, + block1.View, block1.ID(), float64(block1.Timestamp/1000)), result.Logs[1]) + }) +} diff --git a/integration/internal/emulator/tests/block_test.go b/integration/internal/emulator/tests/block_test.go new file mode 100644 index 00000000000..83c59b8d645 --- /dev/null +++ b/integration/internal/emulator/tests/block_test.go @@ -0,0 +1,178 @@ +/* + * Flow Emulator + * + * Copyright Flow Foundation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package tests + +import ( + "context" + "fmt" + "testing" + + "github.com/rs/zerolog" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + flowsdk "github.com/onflow/flow-go-sdk" + + emulator "github.com/onflow/flow-go/integration/internal/emulator" + flowgo "github.com/onflow/flow-go/model/flow" +) + +func TestCommitBlock(t *testing.T) { + + t.Parallel() + + b, err := emulator.New( + emulator.WithStorageLimitEnabled(false), + ) + + require.NoError(t, err) + + logger := zerolog.Nop() + adapter := emulator.NewSDKAdapter(&logger, b) + serviceAccountAddress := flowsdk.Address(b.ServiceKey().Address) + + addTwoScript, _ := DeployAndGenerateAddTwoScript(t, adapter) + + tx1 := flowsdk.NewTransaction(). + SetScript([]byte(addTwoScript)). + SetComputeLimit(flowgo.DefaultMaxTransactionGasLimit). + SetProposalKey(serviceAccountAddress, b.ServiceKey().Index, b.ServiceKey().SequenceNumber). + SetPayer(serviceAccountAddress). + AddAuthorizer(serviceAccountAddress) + + signer, err := b.ServiceKey().Signer() + require.NoError(t, err) + + err = tx1.SignEnvelope(serviceAccountAddress, b.ServiceKey().Index, signer) + require.NoError(t, err) + + // Add tx1 to pending block + err = adapter.SendTransaction(context.Background(), *tx1) + assert.NoError(t, err) + + tx1Result, err := adapter.GetTransactionResult(context.Background(), tx1.ID()) + assert.NoError(t, err) + assert.Equal(t, flowsdk.TransactionStatusPending, tx1Result.Status) + + tx2 := flowsdk.NewTransaction(). + SetScript([]byte(`transaction { execute { panic("revert!") } }`)). + SetComputeLimit(flowgo.DefaultMaxTransactionGasLimit). + SetProposalKey(serviceAccountAddress, b.ServiceKey().Index, b.ServiceKey().SequenceNumber). + SetPayer(serviceAccountAddress). + AddAuthorizer(serviceAccountAddress) + + signer, err = b.ServiceKey().Signer() + require.NoError(t, err) + + err = tx2.SignEnvelope(serviceAccountAddress, b.ServiceKey().Index, signer) + require.NoError(t, err) + + // Add tx2 to pending block + err = adapter.SendTransaction(context.Background(), *tx2) + require.NoError(t, err) + + tx2Result, err := adapter.GetTransactionResult(context.Background(), tx2.ID()) + assert.NoError(t, err) + assert.Equal(t, flowsdk.TransactionStatusPending, tx2Result.Status) + + // Execute tx1 + result, err := b.ExecuteNextTransaction() + assert.NoError(t, err) + assert.True(t, result.Succeeded()) + + // Execute tx2 + result, err = b.ExecuteNextTransaction() + assert.NoError(t, err) + assert.True(t, result.Reverted()) + + // Commit tx1 and tx2 into new block + _, err = b.CommitBlock() + assert.NoError(t, err) + + // tx1 status becomes TransactionStatusSealed + tx1Result, err = adapter.GetTransactionResult(context.Background(), tx1.ID()) + require.NoError(t, err) + assert.Equal(t, flowsdk.TransactionStatusSealed, tx1Result.Status) + + // tx2 status also becomes TransactionStatusSealed, even though it is reverted + tx2Result, err = adapter.GetTransactionResult(context.Background(), tx2.ID()) + require.NoError(t, err) + assert.Equal(t, flowsdk.TransactionStatusSealed, tx2Result.Status) + assert.Error(t, tx2Result.Error) +} + +func TestBlockView(t *testing.T) { + + t.Parallel() + + const nBlocks = 3 + + b, err := emulator.New() + require.NoError(t, err) + + logger := zerolog.Nop() + adapter := emulator.NewSDKAdapter(&logger, b) + serviceAccountAddress := flowsdk.Address(b.ServiceKey().Address) + + t.Run("genesis should have 0 view", func(t *testing.T) { + block, err := b.GetBlockByHeight(0) + require.NoError(t, err) + assert.Equal(t, uint64(0), block.Height) + assert.Equal(t, uint64(0), block.View) + }) + + addTwoScript, _ := DeployAndGenerateAddTwoScript(t, adapter) + + // create a few blocks, each with one transaction + for i := 0; i < nBlocks; i++ { + + tx := flowsdk.NewTransaction(). + SetScript([]byte(addTwoScript)). + SetComputeLimit(flowgo.DefaultMaxTransactionGasLimit). + SetProposalKey(serviceAccountAddress, b.ServiceKey().Index, b.ServiceKey().SequenceNumber). + SetPayer(serviceAccountAddress). + AddAuthorizer(serviceAccountAddress) + + signer, err := b.ServiceKey().Signer() + require.NoError(t, err) + + err = tx.SignEnvelope(serviceAccountAddress, b.ServiceKey().Index, signer) + require.NoError(t, err) + + // Add tx to pending block + err = adapter.SendTransaction(context.Background(), *tx) + assert.NoError(t, err) + + // execute and commit the block + _, _, err = b.ExecuteAndCommitBlock() + require.NoError(t, err) + } + const MaxViewIncrease = 3 + + for height := uint64(1); height <= nBlocks+1; height++ { + block, err := b.GetBlockByHeight(height) + require.NoError(t, err) + + maxView := height * MaxViewIncrease + t.Run(fmt.Sprintf("block %d should have view <%d", height, maxView), func(t *testing.T) { + assert.Equal(t, height, block.Height) + assert.LessOrEqual(t, block.View, maxView) + }) + } +} diff --git a/integration/internal/emulator/tests/blockchain_test.go b/integration/internal/emulator/tests/blockchain_test.go new file mode 100644 index 00000000000..c42f81fa940 --- /dev/null +++ b/integration/internal/emulator/tests/blockchain_test.go @@ -0,0 +1,153 @@ +/* + * Flow Emulator + * + * Copyright Flow Foundation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package tests + +import ( + "context" + "fmt" + "testing" + + "github.com/rs/zerolog" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/onflow/cadence" + "github.com/onflow/cadence/stdlib" + + flowsdk "github.com/onflow/flow-go-sdk" + "github.com/onflow/flow-go-sdk/templates" + + emulator "github.com/onflow/flow-go/integration/internal/emulator" +) + +const counterScript = ` + + access(all) contract Counting { + + access(all) event CountIncremented(count: Int) + + access(all) resource Counter { + access(all) var count: Int + + init() { + self.count = 0 + } + + access(all) fun add(_ count: Int) { + self.count = self.count + count + emit CountIncremented(count: self.count) + } + } + + access(all) fun createCounter(): @Counter { + return <-create Counter() + } + } +` + +// generateAddTwoToCounterScript generates a script that increments a counter. +// If no counter exists, it is created. +func GenerateAddTwoToCounterScript(counterAddress flowsdk.Address) string { + return fmt.Sprintf( + ` + import 0x%s + + transaction { + prepare(signer: auth(Storage, Capabilities) &Account) { + var counter = signer.storage.borrow<&Counting.Counter>(from: /storage/counter) + if counter == nil { + signer.storage.save(<-Counting.createCounter(), to: /storage/counter) + counter = signer.storage.borrow<&Counting.Counter>(from: /storage/counter) + + // Also publish this for others to borrow. + let cap = signer.capabilities.storage.issue<&Counting.Counter>(/storage/counter) + signer.capabilities.publish(cap, at: /public/counter) + } + counter?.add(2) + } + } + `, + counterAddress, + ) +} + +func DeployAndGenerateAddTwoScript(t *testing.T, adapter *emulator.SDKAdapter) (string, flowsdk.Address) { + + contracts := []templates.Contract{ + { + Name: "Counting", + Source: counterScript, + }, + } + + counterAddress, err := adapter.CreateAccount( + context.Background(), + nil, + contracts, + ) + require.NoError(t, err) + + return GenerateAddTwoToCounterScript(counterAddress), counterAddress +} + +func GenerateGetCounterCountScript(counterAddress flowsdk.Address, accountAddress flowsdk.Address) string { + return fmt.Sprintf( + ` + import 0x%s + + access(all) fun main(): Int { + return getAccount(0x%s).capabilities.borrow<&Counting.Counter>(/public/counter)?.count ?? 0 + } + `, + counterAddress, + accountAddress, + ) +} + +func AssertTransactionSucceeded(t *testing.T, result *emulator.TransactionResult) { + if !assert.True(t, result.Succeeded()) { + t.Error(result.Error) + } +} + +func LastCreatedAccount(b *emulator.Blockchain, result *emulator.TransactionResult) (*flowsdk.Account, error) { + logger := zerolog.Nop() + adapter := emulator.NewSDKAdapter(&logger, b) + + address, err := LastCreatedAccountAddress(result) + if err != nil { + return nil, err + } + + return adapter.GetAccount(context.Background(), address) +} + +func LastCreatedAccountAddress(result *emulator.TransactionResult) (flowsdk.Address, error) { + for _, event := range result.Events { + if event.Type == flowsdk.EventAccountCreated { + addressFieldValue := cadence.SearchFieldByName( + event.Value, + stdlib.AccountEventAddressParameter.Identifier, + ) + return flowsdk.Address(addressFieldValue.(cadence.Address)), nil + } + } + + return flowsdk.Address{}, fmt.Errorf("no account created in this result") +} diff --git a/integration/internal/emulator/tests/capcons_test.go b/integration/internal/emulator/tests/capcons_test.go new file mode 100644 index 00000000000..7fe37ecbd12 --- /dev/null +++ b/integration/internal/emulator/tests/capcons_test.go @@ -0,0 +1,44 @@ +/* + * Flow Emulator + * + * Copyright Flow Foundation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package tests + +import ( + "testing" + + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/integration/internal/emulator" +) + +func TestCapabilityControllers(t *testing.T) { + + t.Parallel() + + b, err := emulator.New() + require.NoError(t, err) + + script := ` + access(all) fun main() { + getAccount(0x1).capabilities.get + } + ` + + _, err = b.ExecuteScript([]byte(script), nil) + require.NoError(t, err) +} diff --git a/integration/internal/emulator/tests/collection_test.go b/integration/internal/emulator/tests/collection_test.go new file mode 100644 index 00000000000..c798d239433 --- /dev/null +++ b/integration/internal/emulator/tests/collection_test.go @@ -0,0 +1,117 @@ +/* + * Flow Emulator + * + * Copyright Flow Foundation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package tests + +import ( + "context" + "testing" + + "github.com/rs/zerolog" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + flowsdk "github.com/onflow/flow-go-sdk" + + emulator "github.com/onflow/flow-go/integration/internal/emulator" + flowgo "github.com/onflow/flow-go/model/flow" +) + +func TestCollections(t *testing.T) { + + t.Parallel() + + t.Run("Empty block", func(t *testing.T) { + + t.Parallel() + + b, err := emulator.New() + require.NoError(t, err) + + block, err := b.CommitBlock() + require.NoError(t, err) + + // block should not contain any collections + assert.Empty(t, block.Payload.Guarantees) + }) + + t.Run("Non-empty block", func(t *testing.T) { + + t.Parallel() + + b, err := emulator.New( + emulator.WithStorageLimitEnabled(false), + ) + serviceAccountAddress := flowsdk.Address(b.ServiceKey().Address) + + require.NoError(t, err) + + logger := zerolog.Nop() + adapter := emulator.NewSDKAdapter(&logger, b) + + addTwoScript, _ := DeployAndGenerateAddTwoScript(t, adapter) + + tx1 := flowsdk.NewTransaction(). + SetScript([]byte(addTwoScript)). + SetComputeLimit(flowgo.DefaultMaxTransactionGasLimit). + SetProposalKey(serviceAccountAddress, b.ServiceKey().Index, b.ServiceKey().SequenceNumber). + SetPayer(serviceAccountAddress). + AddAuthorizer(serviceAccountAddress) + + signer, err := b.ServiceKey().Signer() + require.NoError(t, err) + + err = tx1.SignEnvelope(serviceAccountAddress, b.ServiceKey().Index, signer) + require.NoError(t, err) + + tx2 := flowsdk.NewTransaction(). + SetScript([]byte(addTwoScript)). + SetComputeLimit(flowgo.DefaultMaxTransactionGasLimit). + SetProposalKey(serviceAccountAddress, b.ServiceKey().Index, b.ServiceKey().SequenceNumber). + SetPayer(serviceAccountAddress). + AddAuthorizer(serviceAccountAddress) + + err = tx2.SignEnvelope(serviceAccountAddress, b.ServiceKey().Index, signer) + require.NoError(t, err) + + // generate a list of transactions + transactions := []*flowsdk.Transaction{tx1, tx2} + + // add all transactions to block + for _, tx := range transactions { + err = adapter.SendTransaction(context.Background(), *tx) + require.NoError(t, err) + } + + block, _, err := b.ExecuteAndCommitBlock() + require.NoError(t, err) + + // block should contain at least one collection + assert.NotEmpty(t, block.Payload.Guarantees) + + i := 0 + for _, guarantee := range block.Payload.Guarantees { + collection, err := adapter.GetCollectionByID(context.Background(), emulator.FlowIdentifierToSDK(guarantee.CollectionID)) + require.NoError(t, err) + + for _, txID := range collection.TransactionIDs { + assert.Equal(t, transactions[i].ID(), txID) + i++ + } + } + }) +} diff --git a/integration/internal/emulator/tests/events_test.go b/integration/internal/emulator/tests/events_test.go new file mode 100644 index 00000000000..0be7628a894 --- /dev/null +++ b/integration/internal/emulator/tests/events_test.go @@ -0,0 +1,202 @@ +/* + * Flow Emulator + * + * Copyright Flow Foundation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package tests + +import ( + "context" + "fmt" + "testing" + + "github.com/rs/zerolog" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/onflow/cadence" + "github.com/onflow/cadence/common" + + flowsdk "github.com/onflow/flow-go-sdk" + "github.com/onflow/flow-go-sdk/templates" + + "github.com/onflow/flow-go/integration/internal/emulator" + flowgo "github.com/onflow/flow-go/model/flow" +) + +func TestEventEmitted(t *testing.T) { + + t.Parallel() + + t.Run("EmittedFromScript", func(t *testing.T) { + + t.Parallel() + + // Emitting events in scripts is not supported + + b, err := emulator.New() + require.NoError(t, err) + + script := []byte(` + access(all) event MyEvent(x: Int, y: Int) + + access(all) fun main() { + emit MyEvent(x: 1, y: 2) + } + `) + + result, err := b.ExecuteScript(script, nil) + assert.NoError(t, err) + require.NoError(t, result.Error) + require.Empty(t, result.Events) + }) + + t.Run("EmittedFromAccount", func(t *testing.T) { + + t.Parallel() + + b, err := emulator.New( + emulator.WithStorageLimitEnabled(false), + ) + require.NoError(t, err) + serviceAccountAddress := flowsdk.Address(b.ServiceKey().Address) + + logger := zerolog.Nop() + adapter := emulator.NewSDKAdapter(&logger, b) + + accountContracts := []templates.Contract{ + { + Name: "Test", + Source: ` + access(all) contract Test { + access(all) event MyEvent(x: Int, y: Int) + + access(all) fun emitMyEvent(x: Int, y: Int) { + emit MyEvent(x: x, y: y) + } + } + `, + }, + } + + publicKey := b.ServiceKey() + accountKey := &flowsdk.AccountKey{ + Index: publicKey.Index, + PublicKey: publicKey.PublicKey, + SigAlgo: publicKey.SigAlgo, + HashAlgo: publicKey.HashAlgo, + Weight: publicKey.Weight, + SequenceNumber: publicKey.SequenceNumber, + } + + address, err := adapter.CreateAccount( + context.Background(), + []*flowsdk.AccountKey{accountKey}, + accountContracts, + ) + assert.NoError(t, err) + + script := []byte(fmt.Sprintf(` + import 0x%s + + transaction { + execute { + Test.emitMyEvent(x: 1, y: 2) + } + } + `, address.Hex())) + + tx := flowsdk.NewTransaction(). + SetScript(script). + SetComputeLimit(flowgo.DefaultMaxTransactionGasLimit). + SetProposalKey(serviceAccountAddress, b.ServiceKey().Index, b.ServiceKey().SequenceNumber). + SetPayer(serviceAccountAddress) + + signer, err := b.ServiceKey().Signer() + require.NoError(t, err) + + err = tx.SignEnvelope(serviceAccountAddress, b.ServiceKey().Index, signer) + require.NoError(t, err) + + err = adapter.SendTransaction(context.Background(), *tx) + assert.NoError(t, err) + + result, err := b.ExecuteNextTransaction() + assert.NoError(t, err) + assert.True(t, result.Succeeded()) + + block, err := b.CommitBlock() + require.NoError(t, err) + + addr, _ := common.BytesToAddress(address.Bytes()) + location := common.AddressLocation{ + Address: addr, + Name: "Test", + } + expectedType := location.TypeID(nil, "Test.MyEvent") + + events, err := adapter.GetEventsForHeightRange(context.Background(), string(expectedType), block.Height, block.Height) + require.NoError(t, err) + require.Len(t, events, 1) + + actualEvent := events[0].Events[0] + decodedEvent := actualEvent.Value + decodedEventType := decodedEvent.Type().(*cadence.EventType) + expectedID := flowsdk.Event{TransactionID: tx.ID(), EventIndex: 0}.ID() + + assert.Equal(t, string(expectedType), actualEvent.Type) + assert.Equal(t, expectedID, actualEvent.ID()) + + fields := decodedEventType.FieldsMappedByName() + + assert.Contains(t, fields, "x") + assert.Contains(t, fields, "y") + + fieldValues := decodedEvent.FieldsMappedByName() + + assert.Equal(t, cadence.NewInt(1), fieldValues["x"]) + assert.Equal(t, cadence.NewInt(2), fieldValues["y"]) + + events, err = adapter.GetEventsForBlockIDs( + context.Background(), + string(expectedType), + []flowsdk.Identifier{ + flowsdk.Identifier(block.ID()), + }, + ) + require.NoError(t, err) + require.Len(t, events, 1) + + actualEvent = events[0].Events[0] + decodedEvent = actualEvent.Value + decodedEventType = decodedEvent.Type().(*cadence.EventType) + expectedID = flowsdk.Event{TransactionID: tx.ID(), EventIndex: 0}.ID() + + assert.Equal(t, string(expectedType), actualEvent.Type) + assert.Equal(t, expectedID, actualEvent.ID()) + + fields = decodedEventType.FieldsMappedByName() + + assert.Contains(t, fields, "x") + assert.Contains(t, fields, "y") + + fieldValues = decodedEvent.FieldsMappedByName() + + assert.Equal(t, cadence.NewInt(1), fieldValues["x"]) + assert.Equal(t, cadence.NewInt(2), fieldValues["y"]) + + }) +} diff --git a/integration/internal/emulator/tests/logs_test.go b/integration/internal/emulator/tests/logs_test.go new file mode 100644 index 00000000000..af65ef14b89 --- /dev/null +++ b/integration/internal/emulator/tests/logs_test.go @@ -0,0 +1,46 @@ +/* + * Flow Emulator + * + * Copyright Flow Foundation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package tests + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/integration/internal/emulator" +) + +func TestRuntimeLogs(t *testing.T) { + + t.Parallel() + + b, err := emulator.New() + require.NoError(t, err) + + script := []byte(` + access(all) fun main() { + log("elephant ears") + } + `) + + result, err := b.ExecuteScript(script, nil) + assert.NoError(t, err) + assert.Equal(t, []string{`"elephant ears"`}, result.Logs) +} diff --git a/integration/internal/emulator/tests/memstore_test.go b/integration/internal/emulator/tests/memstore_test.go new file mode 100644 index 00000000000..a28696d14be --- /dev/null +++ b/integration/internal/emulator/tests/memstore_test.go @@ -0,0 +1,118 @@ +/* + * Flow Emulator + * + * Copyright Flow Foundation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package tests + +import ( + "context" + "sync" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/fvm/storage/snapshot" + "github.com/onflow/flow-go/integration/internal/emulator" + "github.com/onflow/flow-go/model/flow" + flowgo "github.com/onflow/flow-go/model/flow" +) + +func TestMemstore(t *testing.T) { + + t.Parallel() + + const blockHeight = 0 + key := flow.NewRegisterID(flowgo.EmptyAddress, "foo") + value := []byte("bar") + store := emulator.NewMemoryStore() + + err := store.InsertExecutionSnapshot( + blockHeight, + &snapshot.ExecutionSnapshot{ + WriteSet: map[flowgo.RegisterID]flowgo.RegisterValue{ + key: value, + }, + }, + ) + require.NoError(t, err) + + var wg sync.WaitGroup + + for i := 0; i < 100; i++ { + wg.Add(1) + go func() { + defer wg.Done() + + snapshot, err := store.LedgerByHeight( + context.Background(), + blockHeight) + require.NoError(t, err) + actualValue, err := snapshot.Get(key) + + require.NoError(t, err) + assert.Equal(t, value, actualValue) + }() + } + + wg.Wait() +} + +func TestMemstoreSetValueToNil(t *testing.T) { + + t.Parallel() + + store := emulator.NewMemoryStore() + key := flow.NewRegisterID(flowgo.EmptyAddress, "foo") + value := []byte("bar") + var nilByte []byte + nilValue := nilByte + + // set initial value + err := store.InsertExecutionSnapshot( + 0, + &snapshot.ExecutionSnapshot{ + WriteSet: map[flowgo.RegisterID]flowgo.RegisterValue{ + key: value, + }, + }) + require.NoError(t, err) + + // check initial value + ledger, err := store.LedgerByHeight(context.Background(), 0) + require.NoError(t, err) + register, err := ledger.Get(key) + require.NoError(t, err) + require.Equal(t, string(value), string(register)) + + // set value to nil + err = store.InsertExecutionSnapshot( + 1, + &snapshot.ExecutionSnapshot{ + WriteSet: map[flowgo.RegisterID]flowgo.RegisterValue{ + key: nilValue, + }, + }) + require.NoError(t, err) + + // check value is nil + ledger, err = store.LedgerByHeight(context.Background(), 1) + require.NoError(t, err) + register, err = ledger.Get(key) + require.NoError(t, err) + require.Equal(t, string(nilValue), string(register)) +} diff --git a/integration/internal/emulator/tests/pendingBlock_test.go b/integration/internal/emulator/tests/pendingBlock_test.go new file mode 100644 index 00000000000..be210742839 --- /dev/null +++ b/integration/internal/emulator/tests/pendingBlock_test.go @@ -0,0 +1,459 @@ +/* + * Flow Emulator + * + * Copyright Flow Foundation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package tests + +import ( + "context" + "fmt" + "testing" + "time" + + "github.com/rs/zerolog" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + flowsdk "github.com/onflow/flow-go-sdk" + + emulator "github.com/onflow/flow-go/integration/internal/emulator" + flowgo "github.com/onflow/flow-go/model/flow" +) + +func setupPendingBlockTests(t *testing.T) ( + *emulator.Blockchain, + *emulator.SDKAdapter, + *flowsdk.Transaction, + *flowsdk.Transaction, + *flowsdk.Transaction, +) { + b, err := emulator.New( + emulator.WithStorageLimitEnabled(false), + ) + require.NoError(t, err) + logger := zerolog.Nop() + adapter := emulator.NewSDKAdapter(&logger, b) + serviceAccountAddress := flowsdk.Address(b.ServiceKey().Address) + + addTwoScript, _ := DeployAndGenerateAddTwoScript(t, adapter) + + tx1 := flowsdk.NewTransaction(). + SetScript([]byte(addTwoScript)). + SetComputeLimit(flowgo.DefaultMaxTransactionGasLimit). + SetProposalKey(serviceAccountAddress, b.ServiceKey().Index, b.ServiceKey().SequenceNumber). + SetPayer(serviceAccountAddress). + AddAuthorizer(serviceAccountAddress) + + signer, err := b.ServiceKey().Signer() + require.NoError(t, err) + + err = tx1.SignEnvelope(serviceAccountAddress, b.ServiceKey().Index, signer) + require.NoError(t, err) + + tx2 := flowsdk.NewTransaction(). + SetScript([]byte(addTwoScript)). + SetComputeLimit(flowgo.DefaultMaxTransactionGasLimit). + SetProposalKey(serviceAccountAddress, b.ServiceKey().Index, b.ServiceKey().SequenceNumber+1). + SetPayer(serviceAccountAddress). + AddAuthorizer(serviceAccountAddress) + + signer, err = b.ServiceKey().Signer() + assert.NoError(t, err) + err = tx2.SignEnvelope(serviceAccountAddress, b.ServiceKey().Index, signer) + require.NoError(t, err) + + invalid := flowsdk.NewTransaction(). + SetScript([]byte(`transaction { execute { panic("revert!") } }`)). + SetComputeLimit(flowgo.DefaultMaxTransactionGasLimit). + SetProposalKey(serviceAccountAddress, b.ServiceKey().Index, b.ServiceKey().SequenceNumber). + SetPayer(serviceAccountAddress). + AddAuthorizer(serviceAccountAddress) + + signer, err = b.ServiceKey().Signer() + assert.NoError(t, err) + err = invalid.SignEnvelope(serviceAccountAddress, b.ServiceKey().Index, signer) + require.NoError(t, err) + + return b, adapter, tx1, tx2, invalid +} + +func TestPendingBlockBeforeExecution(t *testing.T) { + + t.Parallel() + + t.Run("EmptyPendingBlock", func(t *testing.T) { + + t.Parallel() + + b, _, _, _, _ := setupPendingBlockTests(t) + + // Execute empty pending block + _, err := b.ExecuteBlock() + assert.NoError(t, err) + + // Commit empty pending block + _, err = b.CommitBlock() + assert.NoError(t, err) + + err = b.ResetPendingBlock() + assert.NoError(t, err) + }) + + t.Run("AddDuplicateTransaction", func(t *testing.T) { + + t.Parallel() + + b, adapter, tx, _, _ := setupPendingBlockTests(t) + + // Add tx1 to pending block + err := adapter.SendTransaction(context.Background(), *tx) + assert.NoError(t, err) + + // Add tx1 again + err = adapter.SendTransaction(context.Background(), *tx) + assert.IsType(t, &emulator.DuplicateTransactionError{}, err) + + err = b.ResetPendingBlock() + assert.NoError(t, err) + }) + + t.Run("CommitBeforeExecution", func(t *testing.T) { + + t.Parallel() + + b, adapter, tx, _, _ := setupPendingBlockTests(t) + + // Add tx1 to pending block + err := adapter.SendTransaction(context.Background(), *tx) + assert.NoError(t, err) + + // Attempt to commit block before execution begins + _, err = b.CommitBlock() + assert.IsType(t, &emulator.PendingBlockCommitBeforeExecutionError{}, err) + + err = b.ResetPendingBlock() + assert.NoError(t, err) + }) +} + +func TestPendingBlockDuringExecution(t *testing.T) { + + t.Parallel() + + t.Run("ExecuteNextTransaction", func(t *testing.T) { + + t.Parallel() + + b, adapter, tx1, _, invalid := setupPendingBlockTests(t) + + // Add tx1 to pending block + err := adapter.SendTransaction(context.Background(), *tx1) + require.NoError(t, err) + + // Add invalid script tx to pending block + err = adapter.SendTransaction(context.Background(), *invalid) + require.NoError(t, err) + + // Execute tx1 (succeeds) + result, err := b.ExecuteNextTransaction() + assert.NoError(t, err) + AssertTransactionSucceeded(t, result) + + // Execute invalid script tx (reverts) + result, err = b.ExecuteNextTransaction() + assert.NoError(t, err) + assert.True(t, result.Reverted()) + + err = b.ResetPendingBlock() + assert.NoError(t, err) + }) + + t.Run("ExecuteBlock", func(t *testing.T) { + + t.Parallel() + + b, adapter, tx1, _, invalid := setupPendingBlockTests(t) + + // Add tx1 to pending block + err := adapter.SendTransaction(context.Background(), *tx1) + require.NoError(t, err) + + // Add invalid script tx to pending block + err = adapter.SendTransaction(context.Background(), *invalid) + require.NoError(t, err) + + // Execute all tx in pending block (tx1, invalid) + results, err := b.ExecuteBlock() + assert.NoError(t, err) + + // tx1 result + assert.True(t, results[0].Succeeded()) + // invalid script tx result + assert.True(t, results[1].Reverted()) + + err = b.ResetPendingBlock() + assert.NoError(t, err) + }) + + t.Run("ExecuteNextThenBlock", func(t *testing.T) { + + t.Parallel() + + b, adapter, tx1, tx2, invalid := setupPendingBlockTests(t) + + // Add tx1 to pending block + err := adapter.SendTransaction(context.Background(), *tx1) + assert.NoError(t, err) + + // Add tx2 to pending block + err = adapter.SendTransaction(context.Background(), *tx2) + assert.NoError(t, err) + + // Add invalid script tx to pending block + err = adapter.SendTransaction(context.Background(), *invalid) + assert.NoError(t, err) + + // Execute tx1 first (succeeds) + result, err := b.ExecuteNextTransaction() + assert.NoError(t, err) + AssertTransactionSucceeded(t, result) + + // Execute rest of tx in pending block (tx2, invalid) + results, err := b.ExecuteBlock() + assert.NoError(t, err) + // tx2 result + assert.True(t, results[0].Succeeded()) + // invalid script tx result + assert.True(t, results[1].Reverted()) + + err = b.ResetPendingBlock() + assert.NoError(t, err) + }) + + t.Run("AddTransactionMidExecution", func(t *testing.T) { + + t.Parallel() + + b, adapter, tx1, tx2, invalid := setupPendingBlockTests(t) + + // Add tx1 to pending block + err := adapter.SendTransaction(context.Background(), *tx1) + assert.NoError(t, err) + + // Add invalid to pending block + err = adapter.SendTransaction(context.Background(), *invalid) + assert.NoError(t, err) + + // Execute tx1 first (succeeds) + result, err := b.ExecuteNextTransaction() + assert.NoError(t, err) + AssertTransactionSucceeded(t, result) + + // Attempt to add tx2 to pending block after execution begins + err = adapter.SendTransaction(context.Background(), *tx2) + assert.IsType(t, &emulator.PendingBlockMidExecutionError{}, err) + + err = b.ResetPendingBlock() + assert.NoError(t, err) + }) + + t.Run("CommitMidExecution", func(t *testing.T) { + + t.Parallel() + + b, adapter, tx1, _, invalid := setupPendingBlockTests(t) + + // Add tx1 to pending block + err := adapter.SendTransaction(context.Background(), *tx1) + assert.NoError(t, err) + + // Add invalid to pending block + err = adapter.SendTransaction(context.Background(), *invalid) + assert.NoError(t, err) + + // Execute tx1 first (succeeds) + result, err := b.ExecuteNextTransaction() + assert.NoError(t, err) + AssertTransactionSucceeded(t, result) + + // Attempt to commit block before execution finishes + _, err = b.CommitBlock() + assert.IsType(t, &emulator.PendingBlockMidExecutionError{}, err) + + err = b.ResetPendingBlock() + assert.NoError(t, err) + }) + + t.Run("TransactionsExhaustedDuringExecution", func(t *testing.T) { + + t.Parallel() + + b, adapter, tx1, _, _ := setupPendingBlockTests(t) + + // Add tx1 to pending block + err := adapter.SendTransaction(context.Background(), *tx1) + assert.NoError(t, err) + + // Execute tx1 (succeeds) + result, err := b.ExecuteNextTransaction() + assert.NoError(t, err) + AssertTransactionSucceeded(t, result) + + // Attempt to execute nonexistent next tx (fails) + _, err = b.ExecuteNextTransaction() + assert.IsType(t, &emulator.PendingBlockTransactionsExhaustedError{}, err) + + // Attempt to execute rest of block tx (fails) + _, err = b.ExecuteBlock() + assert.IsType(t, &emulator.PendingBlockTransactionsExhaustedError{}, err) + + err = b.ResetPendingBlock() + assert.NoError(t, err) + }) +} + +func TestPendingBlockCommit(t *testing.T) { + + t.Parallel() + + b, err := emulator.New( + emulator.WithStorageLimitEnabled(false), + ) + require.NoError(t, err) + serviceAccountAddress := flowsdk.Address(b.ServiceKey().Address) + + logger := zerolog.Nop() + adapter := emulator.NewSDKAdapter(&logger, b) + + addTwoScript, _ := DeployAndGenerateAddTwoScript(t, adapter) + + t.Run("CommitBlock", func(t *testing.T) { + tx1 := flowsdk.NewTransaction(). + SetScript([]byte(addTwoScript)). + SetComputeLimit(flowgo.DefaultMaxTransactionGasLimit). + SetProposalKey(serviceAccountAddress, b.ServiceKey().Index, b.ServiceKey().SequenceNumber). + SetPayer(serviceAccountAddress). + AddAuthorizer(serviceAccountAddress) + + signer, err := b.ServiceKey().Signer() + require.NoError(t, err) + + err = tx1.SignEnvelope(serviceAccountAddress, b.ServiceKey().Index, signer) + require.NoError(t, err) + + // Add tx1 to pending block + err = adapter.SendTransaction(context.Background(), *tx1) + require.NoError(t, err) + + // Enter execution mode (block hash should not change after this point) + blockID := b.PendingBlockID() + + // Execute tx1 (succeeds) + result, err := b.ExecuteNextTransaction() + assert.NoError(t, err) + AssertTransactionSucceeded(t, result) + + // Commit pending block + block, err := b.CommitBlock() + assert.NoError(t, err) + assert.Equal(t, blockID, block.ID()) + }) + + t.Run("ExecuteAndCommitBlock", func(t *testing.T) { + tx1 := flowsdk.NewTransaction(). + SetScript([]byte(addTwoScript)). + SetComputeLimit(flowgo.DefaultMaxTransactionGasLimit). + SetProposalKey(serviceAccountAddress, b.ServiceKey().Index, b.ServiceKey().SequenceNumber). + SetPayer(serviceAccountAddress). + AddAuthorizer(serviceAccountAddress) + + signer, err := b.ServiceKey().Signer() + require.NoError(t, err) + + err = tx1.SignEnvelope(serviceAccountAddress, b.ServiceKey().Index, signer) + require.NoError(t, err) + + // Add tx1 to pending block + err = adapter.SendTransaction(context.Background(), *tx1) + assert.NoError(t, err) + + // Enter execution mode (block hash should not change after this point) + blockID := b.PendingBlockID() + + // Execute and commit pending block + block, results, err := b.ExecuteAndCommitBlock() + assert.NoError(t, err) + assert.Equal(t, blockID, block.ID()) + assert.Len(t, results, 1) + }) +} + +type testClock struct { + Time time.Time +} + +func (tc testClock) Now() time.Time { + return tc.Time.UTC() +} + +func TestPendingBlockSetTimestamp(t *testing.T) { + + t.Parallel() + + b, adapter, _, _, _ := setupPendingBlockTests(t) + clock := testClock{ + Time: time.Now().UTC(), + } + b.SetClock(clock.Now) + _, _ = b.CommitBlock() + + script := []byte(` + access(all) fun main(): UFix64 { + return getCurrentBlock().timestamp + } + `) + scriptResult, err := adapter.ExecuteScriptAtLatestBlock( + context.Background(), + script, + [][]byte{}, + ) + require.NoError(t, err) + + expected := fmt.Sprintf( + "{\"value\":\"%d.00000000\",\"type\":\"UFix64\"}\n", + clock.Time.Unix(), + ) + assert.Equal(t, expected, string(scriptResult)) + + clock = testClock{ + Time: time.Now().Add(time.Hour * 24 * 7).UTC(), + } + b.SetClock(clock.Now) + _, _ = b.CommitBlock() + + _, err = adapter.ExecuteScriptAtLatestBlock( + context.Background(), + script, + [][]byte{}, + ) + require.NoError(t, err) + + /*expected = fmt.Sprintf( + "{\"value\":\"%d.00000000\",\"type\":\"UFix64\"}\n", + clock.Time.Unix(), + )*/ + //assert.Equal(t, expected, string(scriptResult)) +} diff --git a/integration/internal/emulator/tests/result_test.go b/integration/internal/emulator/tests/result_test.go new file mode 100644 index 00000000000..c5679a509b3 --- /dev/null +++ b/integration/internal/emulator/tests/result_test.go @@ -0,0 +1,87 @@ +/* + * Flow Emulator + * + * Copyright Flow Foundation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package tests + +import ( + "errors" + "testing" + + "github.com/onflow/cadence" + "github.com/stretchr/testify/assert" + + flowsdk "github.com/onflow/flow-go-sdk" + "github.com/onflow/flow-go-sdk/test" + + emulator "github.com/onflow/flow-go/integration/internal/emulator" + "github.com/onflow/flow-go/model/flow" +) + +func TestResult(t *testing.T) { + + t.Parallel() + + t.Run("should return correct boolean", func(t *testing.T) { + + t.Parallel() + + idGenerator := test.IdentifierGenerator() + + trSucceed := &emulator.TransactionResult{ + TransactionID: idGenerator.New(), + ComputationUsed: 20, + MemoryEstimate: 2048, + Error: nil, + Logs: []string{}, + Events: []flowsdk.Event{}, + } + assert.True(t, trSucceed.Succeeded()) + assert.False(t, trSucceed.Reverted()) + + trReverted := &emulator.TransactionResult{ + TransactionID: idGenerator.New(), + ComputationUsed: 20, + MemoryEstimate: 2048, + Error: errors.New("transaction execution error"), + Logs: []string{}, + Events: []flowsdk.Event{}, + } + assert.True(t, trReverted.Reverted()) + assert.False(t, trReverted.Succeeded()) + + srSucceed := &emulator.ScriptResult{ + ScriptID: emulator.SDKIdentifierToFlow(idGenerator.New()), + Value: cadence.Value(cadence.NewInt(1)), + Error: nil, + Logs: []string{}, + Events: []flow.Event{}, + } + assert.True(t, srSucceed.Succeeded()) + assert.False(t, srSucceed.Reverted()) + + srReverted := &emulator.ScriptResult{ + ScriptID: emulator.SDKIdentifierToFlow(idGenerator.New()), + Value: cadence.Value(cadence.NewInt(1)), + Error: errors.New("transaction execution error"), + Logs: []string{}, + Events: []flow.Event{}, + } + assert.True(t, srReverted.Reverted()) + assert.False(t, srReverted.Succeeded()) + }) +} diff --git a/integration/internal/emulator/tests/script_test.go b/integration/internal/emulator/tests/script_test.go new file mode 100644 index 00000000000..ad7e4782512 --- /dev/null +++ b/integration/internal/emulator/tests/script_test.go @@ -0,0 +1,316 @@ +/* + * Flow Emulator + * + * Copyright Flow Foundation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package tests + +import ( + "context" + "fmt" + "testing" + + "github.com/onflow/cadence" + jsoncdc "github.com/onflow/cadence/encoding/json" + "github.com/rs/zerolog" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + flowsdk "github.com/onflow/flow-go-sdk" + + fvmerrors "github.com/onflow/flow-go/fvm/errors" + "github.com/onflow/flow-go/fvm/evm/stdlib" + emulator "github.com/onflow/flow-go/integration/internal/emulator" + flowgo "github.com/onflow/flow-go/model/flow" +) + +func TestExecuteScript(t *testing.T) { + + t.Parallel() + + b, err := emulator.New( + emulator.WithStorageLimitEnabled(false), + ) + require.NoError(t, err) + serviceAccountAddress := flowsdk.Address(b.ServiceKey().Address) + + logger := zerolog.Nop() + adapter := emulator.NewSDKAdapter(&logger, b) + + addTwoScript, counterAddress := DeployAndGenerateAddTwoScript(t, adapter) + + tx := flowsdk.NewTransaction(). + SetScript([]byte(addTwoScript)). + SetComputeLimit(flowgo.DefaultMaxTransactionGasLimit). + SetProposalKey(serviceAccountAddress, b.ServiceKey().Index, b.ServiceKey().SequenceNumber). + SetPayer(serviceAccountAddress). + AddAuthorizer(serviceAccountAddress) + + signer, err := b.ServiceKey().Signer() + require.NoError(t, err) + + err = tx.SignEnvelope(serviceAccountAddress, b.ServiceKey().Index, signer) + require.NoError(t, err) + + callScript := GenerateGetCounterCountScript(counterAddress, serviceAccountAddress) + + // Sample call (value is 0) + scriptResult, err := b.ExecuteScript([]byte(callScript), nil) + require.NoError(t, err) + assert.Equal(t, cadence.NewInt(0), scriptResult.Value) + + // Submit tx (script adds 2) + err = adapter.SendTransaction(context.Background(), *tx) + assert.NoError(t, err) + + txResult, err := b.ExecuteNextTransaction() + assert.NoError(t, err) + AssertTransactionSucceeded(t, txResult) + + t.Run("BeforeCommit", func(t *testing.T) { + t.Skip("TODO: fix stored ledger") + + // Sample call (value is still 0) + result, err := b.ExecuteScript([]byte(callScript), nil) + require.NoError(t, err) + assert.Equal(t, cadence.NewInt(0), result.Value) + }) + + _, err = b.CommitBlock() + assert.NoError(t, err) + + t.Run("AfterCommit", func(t *testing.T) { + // Sample call (value is 2) + result, err := b.ExecuteScript([]byte(callScript), nil) + require.NoError(t, err) + assert.Equal(t, cadence.NewInt(2), result.Value) + }) +} + +func TestExecuteScript_WithArguments(t *testing.T) { + + t.Parallel() + + t.Run("Int", func(t *testing.T) { + + t.Parallel() + + b, err := emulator.New() + require.NoError(t, err) + + scriptWithArgs := ` + access(all) fun main(n: Int): Int { + return n + } + ` + + arg, err := jsoncdc.Encode(cadence.NewInt(10)) + require.NoError(t, err) + + scriptResult, err := b.ExecuteScript([]byte(scriptWithArgs), [][]byte{arg}) + require.NoError(t, err) + + assert.Equal(t, cadence.NewInt(10), scriptResult.Value) + }) + + t.Run("String", func(t *testing.T) { + + t.Parallel() + + b, err := emulator.New() + require.NoError(t, err) + + scriptWithArgs := ` + access(all) fun main(n: String): Int { + log(n) + return 0 + } + ` + + arg, err := jsoncdc.Encode(cadence.String("Hello, World")) + require.NoError(t, err) + scriptResult, err := b.ExecuteScript([]byte(scriptWithArgs), [][]byte{arg}) + require.NoError(t, err) + assert.Contains(t, scriptResult.Logs, "\"Hello, World\"") + }) +} + +func TestExecuteScript_FlowServiceAccountBalance(t *testing.T) { + + t.Parallel() + + b, err := emulator.New() + require.NoError(t, err) + + code := fmt.Sprintf( + ` + import FlowServiceAccount from %[1]s + + access(all) + fun main(): UFix64 { + let acct = getAccount(%[1]s) + return FlowServiceAccount.defaultTokenBalance(acct) + } + `, + b.GetChain().ServiceAddress().HexWithPrefix(), + ) + + res, err := b.ExecuteScript([]byte(code), nil) + require.NoError(t, err) + require.NoError(t, res.Error) + + require.Positive(t, res.Value) +} + +func TestInfiniteScript(t *testing.T) { + + t.Parallel() + + const limit = 18 + b, err := emulator.New( + emulator.WithScriptGasLimit(limit), + ) + require.NoError(t, err) + + const code = ` + access(all) fun main() { + main() + } + ` + result, err := b.ExecuteScript([]byte(code), nil) + require.NoError(t, err) + + require.True(t, fvmerrors.IsComputationLimitExceededError(result.Error)) +} + +func TestScriptExecutionLimit(t *testing.T) { + + t.Parallel() + + const code = ` + access(all) fun main() { + var s: Int256 = 1024102410241024 + var i: Int256 = 0 + var a: Int256 = 7 + var b: Int256 = 5 + var c: Int256 = 2 + + while i < 150000 { + s = s * a + s = s / b + s = s / c + i = i + 1 + } + } + ` + + t.Run("ExceedingLimit", func(t *testing.T) { + + t.Parallel() + + const limit = 2000 + b, err := emulator.New( + emulator.WithScriptGasLimit(limit), + ) + require.NoError(t, err) + + result, err := b.ExecuteScript([]byte(code), nil) + require.NoError(t, err) + + require.True(t, fvmerrors.IsComputationLimitExceededError(result.Error)) + }) + + t.Run("SufficientLimit", func(t *testing.T) { + + t.Parallel() + + const limit = 19000 + b, err := emulator.New( + emulator.WithScriptGasLimit(limit), + ) + require.NoError(t, err) + + result, err := b.ExecuteScript([]byte(code), nil) + require.NoError(t, err) + require.NoError(t, result.Error) + }) +} + +// TestScriptWithCadenceRandom checks Cadence's random function works +// within a script +func TestScriptWithCadenceRandom(t *testing.T) { + + //language=cadence + code := ` + access(all) + fun main() { + assert(revertibleRandom<UInt64>() >= 0) + } + ` + + const limit = 200 + b, err := emulator.New( + emulator.WithScriptGasLimit(limit), + ) + require.NoError(t, err) + + result, err := b.ExecuteScript([]byte(code), nil) + require.NoError(t, err) + require.NoError(t, result.Error) +} + +// TestEVM checks evm functionality +func TestEVM(t *testing.T) { + serviceAddr := flowgo.Emulator.Chain().ServiceAddress() + code := []byte(fmt.Sprintf( + ` + import EVM from 0x%s + + access(all) + fun main(bytes: [UInt8; 20]) { + log(EVM.EVMAddress(bytes: bytes)) + } + `, + serviceAddr, + )) + + gasLimit := uint64(100_000) + + b, err := emulator.New( + emulator.WithScriptGasLimit(gasLimit), + ) + require.NoError(t, err) + + addressBytesArray := cadence.NewArray([]cadence.Value{ + cadence.UInt8(1), cadence.UInt8(1), + cadence.UInt8(2), cadence.UInt8(2), + cadence.UInt8(3), cadence.UInt8(3), + cadence.UInt8(4), cadence.UInt8(4), + cadence.UInt8(5), cadence.UInt8(5), + cadence.UInt8(6), cadence.UInt8(6), + cadence.UInt8(7), cadence.UInt8(7), + cadence.UInt8(8), cadence.UInt8(8), + cadence.UInt8(9), cadence.UInt8(9), + cadence.UInt8(10), cadence.UInt8(10), + }).WithType(stdlib.EVMAddressBytesCadenceType) + + result, err := b.ExecuteScript(code, [][]byte{jsoncdc.MustEncode(addressBytesArray)}) + require.NoError(t, err) + require.NoError(t, result.Error) + require.Len(t, result.Logs, 1) + require.Equal(t, result.Logs[0], fmt.Sprintf("A.%s.EVM.EVMAddress(bytes: %s)", serviceAddr, addressBytesArray.String())) + +} diff --git a/integration/internal/emulator/tests/store_test.go b/integration/internal/emulator/tests/store_test.go new file mode 100644 index 00000000000..623bd92c738 --- /dev/null +++ b/integration/internal/emulator/tests/store_test.go @@ -0,0 +1,482 @@ +/* + * Flow Emulator + * + * Copyright Flow Foundation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package tests_test + +import ( + "context" + "fmt" + "testing" + + "github.com/onflow/flow/protobuf/go/flow/entities" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go-sdk/test" + + "github.com/onflow/flow-go/fvm/storage/snapshot" + "github.com/onflow/flow-go/integration/internal/emulator" + "github.com/onflow/flow-go/integration/internal/emulator/utils/unittest" + "github.com/onflow/flow-go/model/flow" + flowgo "github.com/onflow/flow-go/model/flow" + commonunittest "github.com/onflow/flow-go/utils/unittest" +) + +func TestBlocks(t *testing.T) { + + t.Parallel() + + store := setupStore(t) + + block1 := commonunittest.BlockFixture( + commonunittest.Block.WithHeight(1), + ) + + block2 := commonunittest.BlockFixture( + commonunittest.Block.WithHeight(2), + ) + + t.Run("should return error for not found", func(t *testing.T) { + t.Run("BlockByID", func(t *testing.T) { + freshId := test.IdentifierGenerator().New() + _, err := store.BlockByID(context.Background(), flowgo.Identifier(freshId)) + if assert.Error(t, err) { + assert.Equal(t, emulator.ErrNotFound, err) + } + }) + + t.Run("BlockByHeight", func(t *testing.T) { + _, err := store.BlockByHeight(context.Background(), block1.Height) + if assert.Error(t, err) { + assert.Equal(t, emulator.ErrNotFound, err) + } + }) + + t.Run("LatestBlock", func(t *testing.T) { + _, err := store.LatestBlock(context.Background()) + if assert.Error(t, err) { + assert.Equal(t, emulator.ErrNotFound, err) + } + }) + }) + + t.Run("should be able to insert block", func(t *testing.T) { + err := store.StoreBlock(context.Background(), block1) + assert.NoError(t, err) + }) + + // insert block 1 + err := store.StoreBlock(context.Background(), block1) + assert.NoError(t, err) + + t.Run("should be able to get inserted block", func(t *testing.T) { + t.Run("BlockByHeight", func(t *testing.T) { + block, err := store.BlockByHeight(context.Background(), block1.Height) + assert.NoError(t, err) + assert.Equal(t, block1, block) + }) + + t.Run("BlockByID", func(t *testing.T) { + block, err := store.BlockByID(context.Background(), block1.ID()) + assert.NoError(t, err) + assert.Equal(t, block1, block) + }) + + t.Run("LatestBlock", func(t *testing.T) { + block, err := store.LatestBlock(context.Background()) + assert.NoError(t, err) + assert.Equal(t, *block1, block) + }) + }) + + // insert block 2 + err = store.StoreBlock(context.Background(), block2) + assert.NoError(t, err) + + t.Run("Latest block should update", func(t *testing.T) { + block, err := store.LatestBlock(context.Background()) + assert.NoError(t, err) + assert.Equal(t, *block2, block) + }) +} + +func TestCollections(t *testing.T) { + + t.Parallel() + + store := setupStore(t) + + // collection with 3 transactions + col := unittest.FullCollectionFixture(3) + + t.Run("should return error for not found", func(t *testing.T) { + _, err := store.CollectionByID(context.Background(), col.ID()) + if assert.Error(t, err) { + assert.Equal(t, emulator.ErrNotFound, err) + } + }) + + t.Run("should be able to insert collection", func(t *testing.T) { + err := store.InsertCollection(col.Light()) + assert.NoError(t, err) + + t.Run("should be able to get inserted collection", func(t *testing.T) { + storedCol, err := store.CollectionByID(context.Background(), col.ID()) + require.NoError(t, err) + assert.Equal(t, *col.Light(), storedCol) + }) + }) +} + +func TestTransactions(t *testing.T) { + + t.Parallel() + + store := setupStore(t) + + tx := unittest.TransactionFixture() + + t.Run("should return error for not found", func(t *testing.T) { + _, err := store.TransactionByID(context.Background(), tx.ID()) + if assert.Error(t, err) { + assert.Equal(t, emulator.ErrNotFound, err) + } + }) + + t.Run("should be able to insert tx", func(t *testing.T) { + err := store.InsertTransaction(tx.ID(), tx) + assert.NoError(t, err) + + t.Run("should be able to get inserted tx", func(t *testing.T) { + storedTx, err := store.TransactionByID(context.Background(), tx.ID()) + require.NoError(t, err) + assert.Equal(t, tx.ID(), storedTx.ID()) + }) + }) +} + +func TestFullCollection(t *testing.T) { + t.Parallel() + store := setupStore(t) + + col := unittest.FullCollectionFixture(3) + + t.Run("should be able to insert full collection", func(t *testing.T) { + _, err := store.CollectionByID(context.Background(), col.ID()) + require.Error(t, emulator.ErrNotFound, err) + + _, err = store.FullCollectionByID(context.Background(), col.ID()) + require.Error(t, emulator.ErrNotFound, err) + + err = store.InsertCollection(col.Light()) + require.NoError(t, err) + + for _, tx := range col.Transactions { + err = store.InsertTransaction(tx.ID(), *tx) + require.NoError(t, err) + } + + c, err := store.FullCollectionByID(context.Background(), col.ID()) + require.NoError(t, err) + require.Equal(t, col, c) + }) + +} + +func TestTransactionResults(t *testing.T) { + + t.Parallel() + + test := func(eventEncodingVersion entities.EventEncodingVersion) { + + t.Run(eventEncodingVersion.String(), func(t *testing.T) { + t.Parallel() + + store := setupStore(t) + + ids := test.IdentifierGenerator() + + result := unittest.StorableTransactionResultFixture(eventEncodingVersion) + + t.Run("should return error for not found", func(t *testing.T) { + txID := flowgo.Identifier(ids.New()) + + _, err := store.TransactionResultByID(context.Background(), txID) + if assert.Error(t, err) { + assert.Equal(t, emulator.ErrNotFound, err) + } + }) + + t.Run("should be able to insert result", func(t *testing.T) { + txID := flowgo.Identifier(ids.New()) + + err := store.InsertTransactionResult(txID, result) + assert.NoError(t, err) + + t.Run("should be able to get inserted result", func(t *testing.T) { + storedResult, err := store.TransactionResultByID(context.Background(), txID) + require.NoError(t, err) + assert.Equal(t, result, storedResult) + }) + }) + }) + } + + test(entities.EventEncodingVersion_CCF_V0) + test(entities.EventEncodingVersion_JSON_CDC_V0) +} + +func TestLedger(t *testing.T) { + + t.Parallel() + + t.Run("get/set", func(t *testing.T) { + + t.Parallel() + + store := setupStore(t) + + var blockHeight uint64 = 1 + + owner := flow.HexToAddress("0x01") + const key = "foo" + expected := []byte("bar") + + executionSnapshot := &snapshot.ExecutionSnapshot{ + WriteSet: map[flow.RegisterID]flow.RegisterValue{ + flow.NewRegisterID(owner, key): expected, + }, + } + + t.Run("should get able to set ledger", func(t *testing.T) { + err := store.InsertExecutionSnapshot( + blockHeight, + executionSnapshot) + assert.NoError(t, err) + }) + + t.Run("should be to get set ledger", func(t *testing.T) { + gotLedger, err := store.LedgerByHeight(context.Background(), blockHeight) + assert.NoError(t, err) + actual, err := gotLedger.Get(flow.NewRegisterID(owner, key)) + assert.NoError(t, err) + assert.Equal(t, expected, actual) + }) + }) + + t.Run("versioning", func(t *testing.T) { + + t.Parallel() + store := setupStore(t) + + owner := flow.HexToAddress("0x01") + + // Create a list of ledgers, where the ledger at index i has + // keys (i+2)-1->(i+2)+1 set to value i-1. + totalBlocks := 10 + var snapshots []*snapshot.ExecutionSnapshot + for i := 2; i < totalBlocks+2; i++ { + writeSet := map[flow.RegisterID]flow.RegisterValue{} + for j := i - 1; j <= i+1; j++ { + key := fmt.Sprintf("%d", j) + writeSet[flow.NewRegisterID(owner, key)] = []byte{byte(i - 1)} + } + snapshots = append( + snapshots, + &snapshot.ExecutionSnapshot{WriteSet: writeSet}) + } + require.Equal(t, totalBlocks, len(snapshots)) + + // Insert all the ledgers, starting with block 1. + // This will result in a ledger state that looks like this: + // Block 1: {1: 1, 2: 1, 3: 1} + // Block 2: {2: 2, 3: 2, 4: 2} + // ... + // The combined state at block N looks like: + // {1: 1, 2: 2, 3: 3, ..., N+1: N, N+2: N} + for i, snapshot := range snapshots { + err := store.InsertExecutionSnapshot( + uint64(i+1), + snapshot) + require.NoError(t, err) + } + + // View at block 1 should have keys 1, 2, 3 + t.Run("should version the first written block", func(t *testing.T) { + gotLedger, err := store.LedgerByHeight(context.Background(), 1) + assert.NoError(t, err) + for i := 1; i <= 3; i++ { + val, err := gotLedger.Get(flow.NewRegisterID(owner, fmt.Sprintf("%d", i))) + assert.NoError(t, err) + assert.Equal(t, []byte{byte(1)}, val) + } + }) + + // View at block N should have values 1->N+2 + t.Run("should version all blocks", func(t *testing.T) { + for block := 2; block < totalBlocks; block++ { + gotLedger, err := store.LedgerByHeight(context.Background(), uint64(block)) + assert.NoError(t, err) + // The keys 1->N-1 are defined in previous blocks + for i := 1; i < block; i++ { + val, err := gotLedger.Get(flow.NewRegisterID(owner, fmt.Sprintf("%d", i))) + assert.NoError(t, err) + assert.Equal(t, []byte{byte(i)}, val) + } + // The keys N->N+2 are defined in the queried block + for i := block; i <= block+2; i++ { + val, err := gotLedger.Get(flow.NewRegisterID(owner, fmt.Sprintf("%d", i))) + assert.NoError(t, err) + assert.Equal(t, []byte{byte(block)}, val) + } + } + }) + }) +} + +func TestInsertEvents(t *testing.T) { + + t.Parallel() + + test := func(eventEncodingVersion entities.EventEncodingVersion) { + + t.Run(eventEncodingVersion.String(), func(t *testing.T) { + t.Parallel() + + store := setupStore(t) + + events := test.EventGenerator(eventEncodingVersion) + + t.Run("should be able to insert events", func(t *testing.T) { + event, err := emulator.SDKEventToFlow(events.New()) + assert.NoError(t, err) + + events := []flowgo.Event{*event} + + var blockHeight uint64 = 1 + + err = store.InsertEvents(blockHeight, events) + assert.NoError(t, err) + + t.Run("should be able to get inserted events", func(t *testing.T) { + gotEvents, err := store.EventsByHeight(context.Background(), blockHeight, "") + assert.NoError(t, err) + assert.Equal(t, events, gotEvents) + }) + }) + }) + } + + test(entities.EventEncodingVersion_CCF_V0) + test(entities.EventEncodingVersion_JSON_CDC_V0) +} + +func TestEventsByHeight(t *testing.T) { + + t.Parallel() + test := func(eventEncodingVersion entities.EventEncodingVersion) { + + t.Run(eventEncodingVersion.String(), func(t *testing.T) { + t.Parallel() + + store := setupStore(t) + + events := test.EventGenerator(eventEncodingVersion) + + var ( + nonEmptyBlockHeight uint64 = 1 + emptyBlockHeight uint64 = 2 + nonExistentBlockHeight uint64 = 3 + + allEvents = make([]flowgo.Event, 10) + eventsA = make([]flowgo.Event, 0, 5) + eventsB = make([]flowgo.Event, 0, 5) + ) + + for i := range allEvents { + event, _ := emulator.SDKEventToFlow(events.New()) + + event.TransactionIndex = uint32(i) + event.EventIndex = uint32(i * 2) + + // interleave events of both types + if i%2 == 0 { + event.Type = "A" + eventsA = append(eventsA, *event) + } else { + event.Type = "B" + eventsB = append(eventsB, *event) + } + + allEvents[i] = *event + } + + err := store.InsertEvents(nonEmptyBlockHeight, allEvents) + assert.NoError(t, err) + + err = store.InsertEvents(emptyBlockHeight, nil) + assert.NoError(t, err) + + t.Run("should be able to query by block", func(t *testing.T) { + t.Run("non-empty block", func(t *testing.T) { + events, err := store.EventsByHeight(context.Background(), nonEmptyBlockHeight, "") + assert.NoError(t, err) + assert.Equal(t, allEvents, events) + }) + + t.Run("empty block", func(t *testing.T) { + events, err := store.EventsByHeight(context.Background(), emptyBlockHeight, "") + assert.NoError(t, err) + assert.Empty(t, events) + }) + + t.Run("non-existent block", func(t *testing.T) { + events, err := store.EventsByHeight(context.Background(), nonExistentBlockHeight, "") + assert.NoError(t, err) + assert.Empty(t, events) + }) + }) + + t.Run("should be able to query by event type", func(t *testing.T) { + t.Run("type=A, block=1", func(t *testing.T) { + // should be one event type=1 in block 1 + events, err := store.EventsByHeight(context.Background(), nonEmptyBlockHeight, "A") + assert.NoError(t, err) + assert.Equal(t, eventsA, events) + }) + + t.Run("type=B, block=1", func(t *testing.T) { + // should be 0 type=2 events here + events, err := store.EventsByHeight(context.Background(), nonEmptyBlockHeight, "B") + assert.NoError(t, err) + assert.Equal(t, eventsB, events) + }) + }) + }) + } + + test(entities.EventEncodingVersion_CCF_V0) + test(entities.EventEncodingVersion_JSON_CDC_V0) +} + +// setupStore creates a temporary file for the Sqlite and creates a +// sqlite.Store instance. The caller is responsible for closing the store +// and deleting the temporary directory. +func setupStore(t *testing.T) *emulator.Store { + return emulator.NewMemoryStore() +} diff --git a/integration/internal/emulator/tests/temp_dep_test.go b/integration/internal/emulator/tests/temp_dep_test.go new file mode 100644 index 00000000000..6bd6219f1b6 --- /dev/null +++ b/integration/internal/emulator/tests/temp_dep_test.go @@ -0,0 +1,25 @@ +/* + * Flow Emulator + * + * Copyright Flow Foundation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package tests + +import "github.com/btcsuite/btcd/chaincfg/chainhash" + +// this is added to resolve the issue with chainhash ambiguous import, +// the code is not used, but it's needed to force go.mod specify and retain chainhash version +// workaround for issue: https://github.com/golang/go/issues/27899 +var _ = chainhash.Hash{} diff --git a/integration/internal/emulator/tests/transaction_test.go b/integration/internal/emulator/tests/transaction_test.go new file mode 100644 index 00000000000..074f3d50851 --- /dev/null +++ b/integration/internal/emulator/tests/transaction_test.go @@ -0,0 +1,2145 @@ +/* + * Flow Emulator + * + * Copyright Flow Foundation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package tests + +import ( + "bufio" + "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "strings" + "testing" + + "github.com/rs/zerolog" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/onflow/cadence" + "github.com/onflow/cadence/common" + "github.com/onflow/cadence/interpreter" + + "github.com/onflow/flow-go-sdk" + flowsdk "github.com/onflow/flow-go-sdk" + "github.com/onflow/flow-go-sdk/crypto" + "github.com/onflow/flow-go-sdk/templates" + "github.com/onflow/flow-go-sdk/test" + + fvmerrors "github.com/onflow/flow-go/fvm/errors" + "github.com/onflow/flow-go/fvm/evm/stdlib" + emulator "github.com/onflow/flow-go/integration/internal/emulator" + flowgo "github.com/onflow/flow-go/model/flow" +) + +func setupTransactionTests(t *testing.T, opts ...emulator.Option) ( + *emulator.Blockchain, + *emulator.SDKAdapter, +) { + b, err := emulator.New(opts...) + require.NoError(t, err) + + logger := zerolog.Nop() + return b, emulator.NewSDKAdapter(&logger, b) +} + +func TestSubmitTransaction(t *testing.T) { + + t.Parallel() + + b, adapter := setupTransactionTests(t) + serviceAccountAddress := flowsdk.Address(b.ServiceKey().Address) + + addTwoScript, _ := DeployAndGenerateAddTwoScript(t, adapter) + + tx1 := flowsdk.NewTransaction(). + SetScript([]byte(addTwoScript)). + SetComputeLimit(flowgo.DefaultMaxTransactionGasLimit). + SetProposalKey(serviceAccountAddress, b.ServiceKey().Index, b.ServiceKey().SequenceNumber). + SetPayer(serviceAccountAddress). + AddAuthorizer(serviceAccountAddress) + + signer, err := b.ServiceKey().Signer() + require.NoError(t, err) + + err = tx1.SignEnvelope(serviceAccountAddress, b.ServiceKey().Index, signer) + require.NoError(t, err) + + // Submit tx1 + err = adapter.SendTransaction(context.Background(), *tx1) + assert.NoError(t, err) + + // Execute tx1 + result, err := b.ExecuteNextTransaction() + assert.NoError(t, err) + AssertTransactionSucceeded(t, result) + + _, err = b.CommitBlock() + assert.NoError(t, err) + + // tx1 status becomes TransactionStatusSealed + tx1Result, err := adapter.GetTransactionResult(context.Background(), tx1.ID()) + assert.NoError(t, err) + assert.Equal(t, flowsdk.TransactionStatusSealed, tx1Result.Status) +} + +// TODO: Add test case for missing ReferenceBlockID +// TODO: Add test case for missing ProposalKey +func TestSubmitTransaction_Invalid(t *testing.T) { + + t.Parallel() + + t.Run("Empty transaction", func(t *testing.T) { + + t.Parallel() + + b, adapter := setupTransactionTests(t) + serviceAccountAddress := flowsdk.Address(b.ServiceKey().Address) + + // Create empty transaction (no required fields) + tx := flowsdk.NewTransaction() + + signer, err := b.ServiceKey().Signer() + require.NoError(t, err) + + err = tx.SignEnvelope(serviceAccountAddress, b.ServiceKey().Index, signer) + require.NoError(t, err) + + // Submit tx + err = adapter.SendTransaction(context.Background(), *tx) + assert.IsType(t, err, &emulator.IncompleteTransactionError{}) + }) + + t.Run("Missing script", func(t *testing.T) { + + t.Parallel() + + b, adapter := setupTransactionTests(t) + serviceAccountAddress := flowsdk.Address(b.ServiceKey().Address) + + // Create transaction with no Script field + tx := flowsdk.NewTransaction(). + SetComputeLimit(flowgo.DefaultMaxTransactionGasLimit). + SetProposalKey(serviceAccountAddress, b.ServiceKey().Index, b.ServiceKey().SequenceNumber). + SetPayer(serviceAccountAddress) + + signer, err := b.ServiceKey().Signer() + require.NoError(t, err) + + err = tx.SignEnvelope(serviceAccountAddress, b.ServiceKey().Index, signer) + require.NoError(t, err) + + err = adapter.SendTransaction(context.Background(), *tx) + assert.IsType(t, err, &emulator.IncompleteTransactionError{}) + }) + + t.Run("Invalid script", func(t *testing.T) { + + t.Parallel() + + b, adapter := setupTransactionTests(t) + serviceAccountAddress := flowsdk.Address(b.ServiceKey().Address) + + // Create transaction with invalid Script field + tx := flowsdk.NewTransaction(). + SetScript([]byte("this script cannot be parsed")). + SetComputeLimit(flowgo.DefaultMaxTransactionGasLimit). + SetProposalKey(serviceAccountAddress, b.ServiceKey().Index, b.ServiceKey().SequenceNumber). + SetPayer(serviceAccountAddress) + + signer, err := b.ServiceKey().Signer() + require.NoError(t, err) + + err = tx.SignEnvelope(serviceAccountAddress, b.ServiceKey().Index, signer) + require.NoError(t, err) + + // Submit tx + err = adapter.SendTransaction(context.Background(), *tx) + assert.IsType(t, &emulator.InvalidTransactionScriptError{}, err) + }) + + t.Run("Missing gas limit", func(t *testing.T) { + + t.Parallel() + + t.Skip("TODO: transaction validation") + + b, adapter := setupTransactionTests( + t, + emulator.WithStorageLimitEnabled(false), + ) + serviceAccountAddress := flowsdk.Address(b.ServiceKey().Address) + + addTwoScript, _ := DeployAndGenerateAddTwoScript(t, adapter) + + // Create transaction with no GasLimit field + tx := flowsdk.NewTransaction(). + SetScript([]byte(addTwoScript)). + SetProposalKey(serviceAccountAddress, b.ServiceKey().Index, b.ServiceKey().SequenceNumber). + SetPayer(serviceAccountAddress) + + signer, err := b.ServiceKey().Signer() + require.NoError(t, err) + + err = tx.SignEnvelope(serviceAccountAddress, b.ServiceKey().Index, signer) + require.NoError(t, err) + + // Submit tx + err = adapter.SendTransaction(context.Background(), *tx) + assert.IsType(t, &emulator.IncompleteTransactionError{}, err) + }) + + t.Run("Missing payer account", func(t *testing.T) { + + t.Parallel() + + b, adapter := setupTransactionTests( + t, + emulator.WithStorageLimitEnabled(false), + ) + serviceAccountAddress := flowsdk.Address(b.ServiceKey().Address) + + addTwoScript, _ := DeployAndGenerateAddTwoScript(t, adapter) + + // Create transaction with no PayerAccount field + tx := flowsdk.NewTransaction(). + SetScript([]byte(addTwoScript)). + SetProposalKey(serviceAccountAddress, b.ServiceKey().Index, b.ServiceKey().SequenceNumber). + SetComputeLimit(flowgo.DefaultMaxTransactionGasLimit) + + signer, err := b.ServiceKey().Signer() + require.NoError(t, err) + + err = tx.SignEnvelope(serviceAccountAddress, b.ServiceKey().Index, signer) + require.NoError(t, err) + + // Submit tx + err = adapter.SendTransaction(context.Background(), *tx) + assert.IsType(t, err, &emulator.IncompleteTransactionError{}) + }) + + t.Run("Missing proposal key", func(t *testing.T) { + + t.Parallel() + + b, adapter := setupTransactionTests( + t, + emulator.WithStorageLimitEnabled(false), + ) + serviceAccountAddress := flowsdk.Address(b.ServiceKey().Address) + + addTwoScript, _ := DeployAndGenerateAddTwoScript(t, adapter) + + // Create transaction with no PayerAccount field + tx := flowsdk.NewTransaction(). + SetScript([]byte(addTwoScript)). + SetComputeLimit(flowgo.DefaultMaxTransactionGasLimit) + + tx.ProposalKey = flowsdk.ProposalKey{} + + signer, err := b.ServiceKey().Signer() + require.NoError(t, err) + + err = tx.SignEnvelope(serviceAccountAddress, b.ServiceKey().Index, signer) + require.NoError(t, err) + + // Submit tx + err = adapter.SendTransaction(context.Background(), *tx) + assert.IsType(t, &emulator.IncompleteTransactionError{}, err) + }) + + t.Run("Invalid sequence number", func(t *testing.T) { + + t.Parallel() + + b, adapter := setupTransactionTests( + t, + emulator.WithStorageLimitEnabled(false), + ) + serviceAccountAddress := flowsdk.Address(b.ServiceKey().Address) + + addTwoScript, _ := DeployAndGenerateAddTwoScript(t, adapter) + + invalidSequenceNumber := b.ServiceKey().SequenceNumber + 2137 + tx := flowsdk.NewTransaction(). + SetScript([]byte(addTwoScript)). + SetPayer(serviceAccountAddress). + SetProposalKey(serviceAccountAddress, b.ServiceKey().Index, invalidSequenceNumber). + SetComputeLimit(flowgo.DefaultMaxTransactionGasLimit). + AddAuthorizer(serviceAccountAddress) + + signer, err := b.ServiceKey().Signer() + require.NoError(t, err) + + err = tx.SignEnvelope(serviceAccountAddress, b.ServiceKey().Index, signer) + require.NoError(t, err) + + // Submit tx + err = adapter.SendTransaction(context.Background(), *tx) + require.NoError(t, err) + + result, err := b.ExecuteNextTransaction() + require.NoError(t, err) + + require.Error(t, result.Error) + + assert.IsType(t, &emulator.FVMError{}, result.Error) + seqErr := fvmerrors.InvalidProposalSeqNumberError{} + ok := errors.As(result.Error, &seqErr) + assert.True(t, ok) + assert.Equal(t, invalidSequenceNumber, seqErr.ProvidedSeqNumber()) + }) + + const expiry = 10 + + t.Run("Missing reference block ID", func(t *testing.T) { + + t.Parallel() + + b, adapter := setupTransactionTests( + t, + emulator.WithTransactionExpiry(expiry), + emulator.WithStorageLimitEnabled(false), + ) + serviceAccountAddress := flowsdk.Address(b.ServiceKey().Address) + + addTwoScript, _ := DeployAndGenerateAddTwoScript(t, adapter) + + tx := flowsdk.NewTransaction(). + SetScript([]byte(addTwoScript)). + SetComputeLimit(flowgo.DefaultMaxTransactionGasLimit). + SetProposalKey(serviceAccountAddress, b.ServiceKey().Index, b.ServiceKey().SequenceNumber). + SetPayer(serviceAccountAddress) + + signer, err := b.ServiceKey().Signer() + require.NoError(t, err) + + err = tx.SignEnvelope(serviceAccountAddress, b.ServiceKey().Index, signer) + require.NoError(t, err) + + err = adapter.SendTransaction(context.Background(), *tx) + assert.IsType(t, &emulator.IncompleteTransactionError{}, err) + }) + + t.Run("Expired transaction", func(t *testing.T) { + + t.Parallel() + + b, adapter := setupTransactionTests( + t, + emulator.WithTransactionExpiry(expiry), + emulator.WithStorageLimitEnabled(false), + ) + serviceAccountAddress := flowsdk.Address(b.ServiceKey().Address) + + addTwoScript, _ := DeployAndGenerateAddTwoScript(t, adapter) + + expiredBlock, err := b.GetLatestBlock() + require.NoError(t, err) + + // commit blocks until expiry window is exceeded + for i := 0; i < expiry+1; i++ { + _, _, err := b.ExecuteAndCommitBlock() + require.NoError(t, err) + } + + tx := flowsdk.NewTransaction(). + SetScript([]byte(addTwoScript)). + SetReferenceBlockID(flowsdk.Identifier(expiredBlock.ID())). + SetComputeLimit(flowgo.DefaultMaxTransactionGasLimit). + SetProposalKey(serviceAccountAddress, b.ServiceKey().Index, b.ServiceKey().SequenceNumber). + SetPayer(serviceAccountAddress) + + signer, err := b.ServiceKey().Signer() + require.NoError(t, err) + + err = tx.SignEnvelope(serviceAccountAddress, b.ServiceKey().Index, signer) + require.NoError(t, err) + + err = adapter.SendTransaction(context.Background(), *tx) + assert.IsType(t, &emulator.ExpiredTransactionError{}, err) + }) + + t.Run("Invalid signature for provided data", func(t *testing.T) { + + t.Parallel() + + b, adapter := setupTransactionTests( + t, + emulator.WithStorageLimitEnabled(false), + ) + serviceAccountAddress := flowsdk.Address(b.ServiceKey().Address) + + addTwoScript, _ := DeployAndGenerateAddTwoScript(t, adapter) + + tx := flowsdk.NewTransaction(). + SetScript([]byte(addTwoScript)). + SetComputeLimit(flowgo.DefaultMaxTransactionGasLimit). + SetProposalKey(serviceAccountAddress, b.ServiceKey().Index, b.ServiceKey().SequenceNumber). + SetPayer(serviceAccountAddress). + AddAuthorizer(serviceAccountAddress) + + signer, err := b.ServiceKey().Signer() + require.NoError(t, err) + + err = tx.SignEnvelope(serviceAccountAddress, b.ServiceKey().Index, signer) + require.NoError(t, err) + + tx.SetComputeLimit(100) // change data after signing + + err = adapter.SendTransaction(context.Background(), *tx) + assert.NoError(t, err) + + result, err := b.ExecuteNextTransaction() + assert.NoError(t, err) + + debug := emulator.NewTransactionInvalidSignature(&flowgo.TransactionBody{ + ReferenceBlockID: flowgo.Identifier{}, + Script: nil, + Arguments: nil, + GasLimit: flowgo.DefaultMaxTransactionGasLimit, + ProposalKey: flowgo.ProposalKey{ + Address: emulator.SDKAddressToFlow(serviceAccountAddress), + KeyIndex: b.ServiceKey().Index, + SequenceNumber: b.ServiceKey().SequenceNumber, + }, + Payer: emulator.SDKAddressToFlow(serviceAccountAddress), + Authorizers: emulator.SDKAddressesToFlow([]flowsdk.Address{serviceAccountAddress}), + PayloadSignatures: nil, + EnvelopeSignatures: nil, + }) + + assert.NotNil(t, result.Error) + assert.IsType(t, result.Debug, debug) + }) +} + +func TestSubmitTransaction_Duplicate(t *testing.T) { + + t.Parallel() + + b, adapter := setupTransactionTests( + t, + emulator.WithStorageLimitEnabled(false), + ) + serviceAccountAddress := flowsdk.Address(b.ServiceKey().Address) + + addTwoScript, _ := DeployAndGenerateAddTwoScript(t, adapter) + + tx := flowsdk.NewTransaction(). + SetScript([]byte(addTwoScript)). + SetComputeLimit(flowgo.DefaultMaxTransactionGasLimit). + SetProposalKey(serviceAccountAddress, b.ServiceKey().Index, b.ServiceKey().SequenceNumber). + SetPayer(serviceAccountAddress). + AddAuthorizer(serviceAccountAddress) + + signer, err := b.ServiceKey().Signer() + require.NoError(t, err) + + err = tx.SignEnvelope(serviceAccountAddress, b.ServiceKey().Index, signer) + require.NoError(t, err) + + // Submit tx + err = adapter.SendTransaction(context.Background(), *tx) + assert.NoError(t, err) + + result, err := b.ExecuteNextTransaction() + assert.NoError(t, err) + AssertTransactionSucceeded(t, result) + + _, err = b.CommitBlock() + assert.NoError(t, err) + + // Submit same tx again (errors) + err = adapter.SendTransaction(context.Background(), *tx) + assert.IsType(t, err, &emulator.DuplicateTransactionError{}) +} + +func TestSubmitTransaction_Reverted(t *testing.T) { + + t.Parallel() + + b, adapter := setupTransactionTests(t) + serviceAccountAddress := flowsdk.Address(b.ServiceKey().Address) + + tx := flowsdk.NewTransaction(). + SetScript([]byte(`transaction { execute { panic("revert!") } }`)). + SetComputeLimit(flowgo.DefaultMaxTransactionGasLimit). + SetProposalKey(serviceAccountAddress, b.ServiceKey().Index, b.ServiceKey().SequenceNumber). + SetPayer(serviceAccountAddress). + AddAuthorizer(serviceAccountAddress) + + signer, err := b.ServiceKey().Signer() + require.NoError(t, err) + + err = tx.SignEnvelope(serviceAccountAddress, b.ServiceKey().Index, signer) + require.NoError(t, err) + + // Submit invalid tx1 + err = adapter.SendTransaction(context.Background(), *tx) + assert.NoError(t, err) + + result, err := b.ExecuteNextTransaction() + assert.NoError(t, err) + assert.True(t, result.Reverted()) + + _, err = b.CommitBlock() + assert.NoError(t, err) + + // tx1 status becomes TransactionStatusSealed + tx1Result, err := adapter.GetTransactionResult(context.Background(), tx.ID()) + assert.NoError(t, err) + assert.Equal(t, flowsdk.TransactionStatusSealed, tx1Result.Status) + assert.Error(t, tx1Result.Error) +} + +func TestSubmitTransaction_Authorizers(t *testing.T) { + + t.Parallel() + + b, adapter := setupTransactionTests( + t, + emulator.WithStorageLimitEnabled(false), + ) + serviceAccountAddress := flowsdk.Address(b.ServiceKey().Address) + + accountKeys := test.AccountKeyGenerator() + + accountKeyB, signerB := accountKeys.NewWithSigner() + accountKeyB.SetWeight(flowsdk.AccountKeyWeightThreshold) + + accountAddressB, err := adapter.CreateAccount(context.Background(), []*flowsdk.AccountKey{accountKeyB}, nil) + assert.NoError(t, err) + + t.Run("Extra authorizers", func(t *testing.T) { + // script only supports one account + script := []byte(` + transaction { + prepare(signer: &Account) {} + } + `) + + // create transaction with two authorizing accounts + tx := flowsdk.NewTransaction(). + SetScript(script). + SetComputeLimit(flowgo.DefaultMaxTransactionGasLimit). + SetProposalKey(serviceAccountAddress, b.ServiceKey().Index, b.ServiceKey().SequenceNumber). + SetPayer(serviceAccountAddress). + AddAuthorizer(serviceAccountAddress). + AddAuthorizer(accountAddressB) + + err = tx.SignPayload(accountAddressB, 0, signerB) + assert.NoError(t, err) + + signer, err := b.ServiceKey().Signer() + require.NoError(t, err) + + err = tx.SignEnvelope(serviceAccountAddress, b.ServiceKey().Index, signer) + require.NoError(t, err) + + err = adapter.SendTransaction(context.Background(), *tx) + require.NoError(t, err) + + result, err := b.ExecuteNextTransaction() + require.NoError(t, err) + assert.True(t, result.Reverted()) + + _, err = b.CommitBlock() + assert.NoError(t, err) + }) + + t.Run("Insufficient authorizers", func(t *testing.T) { + // script requires two accounts + script := []byte(` + transaction { + prepare(signerA: &Account, signerB: &Account) {} + } + `) + + // create transaction with two accounts + tx := flowsdk.NewTransaction(). + SetScript(script). + SetComputeLimit(flowgo.DefaultMaxTransactionGasLimit). + SetProposalKey(serviceAccountAddress, b.ServiceKey().Index, b.ServiceKey().SequenceNumber). + SetPayer(serviceAccountAddress). + AddAuthorizer(serviceAccountAddress) + + signer, err := b.ServiceKey().Signer() + require.NoError(t, err) + + err = tx.SignEnvelope(serviceAccountAddress, b.ServiceKey().Index, signer) + require.NoError(t, err) + + err = adapter.SendTransaction(context.Background(), *tx) + assert.NoError(t, err) + + result, err := b.ExecuteNextTransaction() + assert.NoError(t, err) + assert.True(t, result.Reverted()) + + _, err = b.CommitBlock() + assert.NoError(t, err) + }) +} + +func TestSubmitTransaction_EnvelopeSignature(t *testing.T) { + + t.Parallel() + + t.Run("Missing envelope signature", func(t *testing.T) { + + t.Parallel() + + b, adapter := setupTransactionTests( + t, + emulator.WithStorageLimitEnabled(false), + ) + serviceAccountAddress := flowsdk.Address(b.ServiceKey().Address) + + addTwoScript, _ := DeployAndGenerateAddTwoScript(t, adapter) + + tx := flowsdk.NewTransaction(). + SetScript([]byte(addTwoScript)). + SetComputeLimit(flowgo.DefaultMaxTransactionGasLimit). + SetProposalKey(serviceAccountAddress, b.ServiceKey().Index, b.ServiceKey().SequenceNumber). + SetPayer(serviceAccountAddress). + AddAuthorizer(serviceAccountAddress) + + signer, err := b.ServiceKey().Signer() + require.NoError(t, err) + + err = tx.SignPayload(serviceAccountAddress, b.ServiceKey().Index, signer) + require.NoError(t, err) + + err = adapter.SendTransaction(context.Background(), *tx) + assert.NoError(t, err) + + result, err := b.ExecuteNextTransaction() + assert.NoError(t, err) + + assert.True(t, fvmerrors.HasErrorCode(result.Error, fvmerrors.ErrCodeAccountAuthorizationError)) + }) + + t.Run("Invalid account", func(t *testing.T) { + + t.Parallel() + + b, adapter := setupTransactionTests(t) + serviceAccountAddress := flowsdk.Address(b.ServiceKey().Address) + + addresses := flowsdk.NewAddressGenerator(flowsdk.Emulator) + for { + _, err := adapter.GetAccount(context.Background(), addresses.NextAddress()) + if err != nil { + break + } + } + + nonExistentAccountAddress := addresses.Address() + + script := []byte(` + transaction { + prepare(signer: &Account) {} + } + `) + + tx := flowsdk.NewTransaction(). + SetScript(script). + SetComputeLimit(flowgo.DefaultMaxTransactionGasLimit). + SetProposalKey(serviceAccountAddress, b.ServiceKey().Index, b.ServiceKey().SequenceNumber). + SetPayer(serviceAccountAddress). + AddAuthorizer(nonExistentAccountAddress) + + signer, err := b.ServiceKey().Signer() + require.NoError(t, err) + + err = tx.SignPayload(nonExistentAccountAddress, b.ServiceKey().Index, signer) + require.NoError(t, err) + + err = tx.SignEnvelope(serviceAccountAddress, b.ServiceKey().Index, signer) + require.NoError(t, err) + + err = adapter.SendTransaction(context.Background(), *tx) + assert.NoError(t, err) + + result, err := b.ExecuteNextTransaction() + assert.NoError(t, err) + + assert.Error(t, result.Error) + assert.True(t, fvmerrors.IsAccountPublicKeyNotFoundError(result.Error)) + }) + + t.Run("Mismatched authorizer count", func(t *testing.T) { + + t.Parallel() + + b, adapter := setupTransactionTests( + t, + emulator.WithTransactionValidationEnabled(false), + ) + serviceAccountAddress := flowsdk.Address(b.ServiceKey().Address) + + addresses := flowsdk.NewAddressGenerator(flowsdk.Emulator) + for { + _, err := adapter.GetAccount(context.Background(), addresses.NextAddress()) + if err != nil { + break + } + } + + nonExistentAccountAddress := addresses.Address() + + script := []byte(` + transaction { + prepare() {} + } + `) + + tx := flowsdk.NewTransaction(). + SetScript(script). + SetComputeLimit(flowgo.DefaultMaxTransactionGasLimit). + SetProposalKey(serviceAccountAddress, b.ServiceKey().Index, b.ServiceKey().SequenceNumber). + SetPayer(serviceAccountAddress). + AddAuthorizer(nonExistentAccountAddress) + + signer, err := b.ServiceKey().Signer() + require.NoError(t, err) + + err = tx.SignPayload(nonExistentAccountAddress, b.ServiceKey().Index, signer) + require.NoError(t, err) + + err = tx.SignEnvelope(serviceAccountAddress, b.ServiceKey().Index, signer) + require.NoError(t, err) + + err = adapter.SendTransaction(context.Background(), *tx) + assert.NoError(t, err) + + result, err := b.ExecuteNextTransaction() + assert.NoError(t, err) + + assert.ErrorContains(t, result.Error, "authorizer count mismatch") + }) + + t.Run("Invalid key", func(t *testing.T) { + + t.Parallel() + + b, adapter := setupTransactionTests( + t, + emulator.WithStorageLimitEnabled(false), + ) + serviceAccountAddress := flowsdk.Address(b.ServiceKey().Address) + + addTwoScript, _ := DeployAndGenerateAddTwoScript(t, adapter) + + // use key that does not exist on service account + invalidKey, _ := crypto.GeneratePrivateKey(crypto.ECDSA_P256, + []byte("invalid key invalid key invalid key invalid key invalid key invalid key")) + invalidSigner, err := crypto.NewNaiveSigner(invalidKey, crypto.SHA3_256) + require.NoError(t, err) + + tx := flowsdk.NewTransaction(). + SetScript([]byte(addTwoScript)). + SetComputeLimit(flowgo.DefaultMaxTransactionGasLimit). + SetProposalKey(serviceAccountAddress, b.ServiceKey().Index, b.ServiceKey().SequenceNumber). + SetPayer(serviceAccountAddress). + AddAuthorizer(serviceAccountAddress) + + err = tx.SignEnvelope(serviceAccountAddress, b.ServiceKey().Index, invalidSigner) + require.NoError(t, err) + + err = adapter.SendTransaction(context.Background(), *tx) + assert.NoError(t, err) + + result, err := b.ExecuteNextTransaction() + assert.NoError(t, err) + + assert.True(t, fvmerrors.HasErrorCode(result.Error, fvmerrors.ErrCodeInvalidProposalSignatureError)) + }) + + t.Run("Key weights", func(t *testing.T) { + + t.Parallel() + + b, adapter := setupTransactionTests( + t, + emulator.WithStorageLimitEnabled(false), + ) + + accountKeys := test.AccountKeyGenerator() + + accountKeyA, signerA := accountKeys.NewWithSigner() + accountKeyA.SetWeight(flowsdk.AccountKeyWeightThreshold / 2) + + accountKeyB, signerB := accountKeys.NewWithSigner() + accountKeyB.SetWeight(flowsdk.AccountKeyWeightThreshold / 2) + + accountAddressA, err := adapter.CreateAccount(context.Background(), []*flowsdk.AccountKey{accountKeyA, accountKeyB}, nil) + assert.NoError(t, err) + + script := []byte(` + transaction { + prepare(signer: &Account) {} + } + `) + + tx := flowsdk.NewTransaction(). + SetScript(script). + SetComputeLimit(flowgo.DefaultMaxTransactionGasLimit). + SetProposalKey(accountAddressA, 1, 0). + SetPayer(accountAddressA). + AddAuthorizer(accountAddressA) + + // Insufficient keys + err = tx.SignEnvelope(accountAddressA, 1, signerB) + assert.NoError(t, err) + + err = adapter.SendTransaction(context.Background(), *tx) + assert.NoError(t, err) + + // Add key so we have sufficient keys + err = tx.SignEnvelope(accountAddressA, 0, signerA) + assert.NoError(t, err) + + err = adapter.SendTransaction(context.Background(), *tx) + assert.NoError(t, err) + + t.Run("Insufficient key weight", func(t *testing.T) { + result, err := b.ExecuteNextTransaction() + assert.NoError(t, err) + + assert.True(t, fvmerrors.HasErrorCode(result.Error, fvmerrors.ErrCodeAccountAuthorizationError)) + }) + + t.Run("Sufficient key weight", func(t *testing.T) { + result, err := b.ExecuteNextTransaction() + assert.NoError(t, err) + + AssertTransactionSucceeded(t, result) + }) + }) +} + +func TestSubmitTransaction_PayloadSignatures(t *testing.T) { + + t.Parallel() + + t.Run("Missing payload signature", func(t *testing.T) { + + t.Parallel() + + b, adapter := setupTransactionTests( + t, + emulator.WithStorageLimitEnabled(false), + ) + serviceAccountAddress := flowsdk.Address(b.ServiceKey().Address) + + addTwoScript, _ := DeployAndGenerateAddTwoScript(t, adapter) + + // create a new account, + // authorizer must be different from payer + + accountKeys := test.AccountKeyGenerator() + + accountKeyB, _ := accountKeys.NewWithSigner() + accountKeyB.SetWeight(flowsdk.AccountKeyWeightThreshold) + + accountAddressB, err := adapter.CreateAccount(context.Background(), []*flowsdk.AccountKey{accountKeyB}, nil) + assert.NoError(t, err) + + tx := flowsdk.NewTransaction(). + SetScript([]byte(addTwoScript)). + SetComputeLimit(flowgo.DefaultMaxTransactionGasLimit). + SetProposalKey(serviceAccountAddress, b.ServiceKey().Index, b.ServiceKey().SequenceNumber). + SetPayer(serviceAccountAddress). + AddAuthorizer(accountAddressB) + + signer, err := b.ServiceKey().Signer() + require.NoError(t, err) + + err = tx.SignEnvelope(serviceAccountAddress, b.ServiceKey().Index, signer) + require.NoError(t, err) + + err = adapter.SendTransaction(context.Background(), *tx) + assert.NoError(t, err) + + result, err := b.ExecuteNextTransaction() + assert.NoError(t, err) + + assert.True(t, fvmerrors.HasErrorCode(result.Error, fvmerrors.ErrCodeAccountAuthorizationError)) + }) + + t.Run("Multiple payload signers", func(t *testing.T) { + + t.Parallel() + + b, adapter := setupTransactionTests( + t, + emulator.WithStorageLimitEnabled(false), + ) + serviceAccountAddress := flowsdk.Address(b.ServiceKey().Address) + + accountKeys := test.AccountKeyGenerator() + + accountKeyB, signerB := accountKeys.NewWithSigner() + accountKeyB.SetWeight(flowsdk.AccountKeyWeightThreshold) + + accountAddressB, err := adapter.CreateAccount(context.Background(), []*flowsdk.AccountKey{accountKeyB}, nil) + assert.NoError(t, err) + + multipleAccountScript := []byte(` + transaction { + prepare(signerA: &Account, signerB: &Account) { + log(signerA.address) + log(signerB.address) + } + } + `) + + tx := flowsdk.NewTransaction(). + SetScript(multipleAccountScript). + SetComputeLimit(flowgo.DefaultMaxTransactionGasLimit). + SetProposalKey(serviceAccountAddress, b.ServiceKey().Index, b.ServiceKey().SequenceNumber). + SetPayer(serviceAccountAddress). + AddAuthorizer(serviceAccountAddress). + AddAuthorizer(accountAddressB) + + err = tx.SignPayload(accountAddressB, 0, signerB) + assert.NoError(t, err) + + signer, err := b.ServiceKey().Signer() + require.NoError(t, err) + + err = tx.SignEnvelope(serviceAccountAddress, b.ServiceKey().Index, signer) + require.NoError(t, err) + + err = adapter.SendTransaction(context.Background(), *tx) + require.NoError(t, err) + + result, err := b.ExecuteNextTransaction() + require.NoError(t, err) + AssertTransactionSucceeded(t, result) + + assert.Contains(t, + result.Logs, + interpreter.NewUnmeteredAddressValueFromBytes(serviceAccountAddress.Bytes()).String(), + ) + + assert.Contains(t, + result.Logs, + interpreter.NewUnmeteredAddressValueFromBytes(accountAddressB.Bytes()).String(), + ) + }) +} + +func TestSubmitTransaction_Arguments(t *testing.T) { + + t.Parallel() + + addresses := test.AddressGenerator() + + fix64Value, _ := cadence.NewFix64("123456.00000") + uFix64Value, _ := cadence.NewUFix64("123456.00000") + + var tests = []struct { + argType cadence.Type + arg cadence.Value + }{ + { + cadence.BoolType, + cadence.NewBool(true), + }, + { + cadence.StringType, + cadence.String("foo"), + }, + { + cadence.AddressType, + cadence.NewAddress(addresses.New()), + }, + { + cadence.IntType, + cadence.NewInt(42), + }, + { + cadence.Int8Type, + cadence.NewInt8(42), + }, + { + cadence.Int16Type, + cadence.NewInt16(42), + }, + { + cadence.Int32Type, + cadence.NewInt32(42), + }, + { + cadence.Int64Type, + cadence.NewInt64(42), + }, + { + cadence.Int128Type, + cadence.NewInt128(42), + }, + { + cadence.Int256Type, + cadence.NewInt256(42), + }, + { + cadence.UIntType, + cadence.NewUInt(42), + }, + { + cadence.UInt8Type, + cadence.NewUInt8(42), + }, + { + cadence.UInt16Type, + cadence.NewUInt16(42), + }, + { + cadence.UInt32Type, + cadence.NewUInt32(42), + }, + { + cadence.UInt64Type, + cadence.NewUInt64(42), + }, + { + cadence.UInt128Type, + cadence.NewUInt128(42), + }, + { + cadence.UInt256Type, + cadence.NewUInt256(42), + }, + { + cadence.Word8Type, + cadence.NewWord8(42), + }, + { + cadence.Word16Type, + cadence.NewWord16(42), + }, + { + cadence.Word32Type, + cadence.NewWord32(42), + }, + { + cadence.Word64Type, + cadence.NewWord64(42), + }, + { + cadence.Fix64Type, + fix64Value, + }, + { + cadence.UFix64Type, + uFix64Value, + }, + { + &cadence.ConstantSizedArrayType{ + Size: 3, + ElementType: cadence.IntType, + }, + cadence.NewArray([]cadence.Value{ + cadence.NewInt(1), + cadence.NewInt(2), + cadence.NewInt(3), + }), + }, + { + &cadence.DictionaryType{ + KeyType: cadence.StringType, + ElementType: cadence.IntType, + }, + cadence.NewDictionary([]cadence.KeyValuePair{ + { + Key: cadence.String("a"), + Value: cadence.NewInt(1), + }, + { + Key: cadence.String("b"), + Value: cadence.NewInt(2), + }, + { + Key: cadence.String("c"), + Value: cadence.NewInt(3), + }, + }), + }, + } + + var script = func(argType cadence.Type) []byte { + return []byte(fmt.Sprintf(` + transaction(x: %s) { + execute { + log(x) + } + } + `, argType.ID())) + } + + for _, tt := range tests { + t.Run(tt.argType.ID(), func(t *testing.T) { + + b, adapter := setupTransactionTests(t) + serviceAccountAddress := flowsdk.Address(b.ServiceKey().Address) + + tx := flowsdk.NewTransaction(). + SetScript(script(tt.argType)). + SetComputeLimit(flowgo.DefaultMaxTransactionGasLimit). + SetProposalKey(serviceAccountAddress, b.ServiceKey().Index, b.ServiceKey().SequenceNumber). + SetPayer(serviceAccountAddress) + + err := tx.AddArgument(tt.arg) + assert.NoError(t, err) + + signer, err := b.ServiceKey().Signer() + require.NoError(t, err) + + err = tx.SignEnvelope(serviceAccountAddress, b.ServiceKey().Index, signer) + require.NoError(t, err) + + err = adapter.SendTransaction(context.Background(), *tx) + assert.NoError(t, err) + + result, err := b.ExecuteNextTransaction() + require.NoError(t, err) + AssertTransactionSucceeded(t, result) + + assert.Len(t, result.Logs, 1) + }) + } + + t.Run("Log", func(t *testing.T) { + b, adapter := setupTransactionTests(t) + serviceAccountAddress := flowsdk.Address(b.ServiceKey().Address) + + script := []byte(` + transaction(x: Int) { + execute { + log(x * 6) + } + } + `) + + x := 7 + + tx := flowsdk.NewTransaction(). + SetScript(script). + SetComputeLimit(flowgo.DefaultMaxTransactionGasLimit). + SetProposalKey(serviceAccountAddress, b.ServiceKey().Index, b.ServiceKey().SequenceNumber). + SetPayer(serviceAccountAddress) + + err := tx.AddArgument(cadence.NewInt(x)) + assert.NoError(t, err) + + signer, err := b.ServiceKey().Signer() + require.NoError(t, err) + + err = tx.SignEnvelope(serviceAccountAddress, b.ServiceKey().Index, signer) + require.NoError(t, err) + + err = adapter.SendTransaction(context.Background(), *tx) + assert.NoError(t, err) + + result, err := b.ExecuteNextTransaction() + require.NoError(t, err) + AssertTransactionSucceeded(t, result) + + require.Len(t, result.Logs, 1) + assert.Equal(t, "42", result.Logs[0]) + }) +} + +func TestSubmitTransaction_ProposerSequence(t *testing.T) { + + t.Parallel() + + t.Run("Valid transaction increases sequence number", func(t *testing.T) { + + t.Parallel() + + b, adapter := setupTransactionTests( + t, + emulator.WithStorageLimitEnabled(false), + ) + serviceAccountAddress := flowsdk.Address(b.ServiceKey().Address) + + script := []byte(` + transaction { + prepare(signer: &Account) {} + } + `) + prevSeq := b.ServiceKey().SequenceNumber + + tx := flowsdk.NewTransaction(). + SetScript(script). + SetComputeLimit(flowgo.DefaultMaxTransactionGasLimit). + SetProposalKey(serviceAccountAddress, b.ServiceKey().Index, b.ServiceKey().SequenceNumber). + SetPayer(serviceAccountAddress). + AddAuthorizer(serviceAccountAddress) + + signer, err := b.ServiceKey().Signer() + require.NoError(t, err) + + err = tx.SignEnvelope(serviceAccountAddress, b.ServiceKey().Index, signer) + require.NoError(t, err) + + err = adapter.SendTransaction(context.Background(), *tx) + assert.NoError(t, err) + + result, err := b.ExecuteNextTransaction() + assert.NoError(t, err) + AssertTransactionSucceeded(t, result) + + _, err = b.CommitBlock() + assert.NoError(t, err) + + tx1Result, err := adapter.GetTransactionResult(context.Background(), tx.ID()) + assert.NoError(t, err) + assert.Equal(t, flowsdk.TransactionStatusSealed, tx1Result.Status) + + assert.Equal(t, prevSeq+1, b.ServiceKey().SequenceNumber) + }) + + t.Run("Reverted transaction increases sequence number", func(t *testing.T) { + + t.Parallel() + + b, adapter := setupTransactionTests( + t, + emulator.WithStorageLimitEnabled(false), + ) + serviceAccountAddress := flowsdk.Address(b.ServiceKey().Address) + + prevSeq := b.ServiceKey().SequenceNumber + script := []byte(` + transaction { + prepare(signer: &Account) {} + execute { panic("revert!") } + } + `) + + tx := flowsdk.NewTransaction(). + SetScript(script). + SetComputeLimit(flowgo.DefaultMaxTransactionGasLimit). + SetProposalKey(serviceAccountAddress, b.ServiceKey().Index, b.ServiceKey().SequenceNumber). + SetPayer(serviceAccountAddress). + AddAuthorizer(serviceAccountAddress) + + signer, err := b.ServiceKey().Signer() + require.NoError(t, err) + + err = tx.SignEnvelope(serviceAccountAddress, b.ServiceKey().Index, signer) + require.NoError(t, err) + + err = adapter.SendTransaction(context.Background(), *tx) + assert.NoError(t, err) + + _, err = b.ExecuteNextTransaction() + assert.NoError(t, err) + + _, err = b.CommitBlock() + assert.NoError(t, err) + + tx1Result, err := adapter.GetTransactionResult(context.Background(), tx.ID()) + assert.NoError(t, err) + assert.Equal(t, prevSeq+1, b.ServiceKey().SequenceNumber) + assert.Equal(t, flowsdk.TransactionStatusSealed, tx1Result.Status) + assert.Len(t, tx1Result.Events, 0) + assert.IsType(t, &emulator.ExecutionError{}, tx1Result.Error) + }) +} + +func TestGetTransaction(t *testing.T) { + + t.Parallel() + + b, adapter := setupTransactionTests( + t, + emulator.WithStorageLimitEnabled(false), + ) + serviceAccountAddress := flowsdk.Address(b.ServiceKey().Address) + + addTwoScript, _ := DeployAndGenerateAddTwoScript(t, adapter) + + tx1 := flowsdk.NewTransaction(). + SetScript([]byte(addTwoScript)). + SetComputeLimit(flowgo.DefaultMaxTransactionGasLimit). + SetProposalKey(serviceAccountAddress, b.ServiceKey().Index, b.ServiceKey().SequenceNumber). + SetPayer(serviceAccountAddress). + AddAuthorizer(serviceAccountAddress) + + signer, err := b.ServiceKey().Signer() + require.NoError(t, err) + + err = tx1.SignEnvelope(serviceAccountAddress, b.ServiceKey().Index, signer) + require.NoError(t, err) + + err = adapter.SendTransaction(context.Background(), *tx1) + assert.NoError(t, err) + + result, err := b.ExecuteNextTransaction() + assert.NoError(t, err) + AssertTransactionSucceeded(t, result) + + t.Run("Nonexistent", func(t *testing.T) { + _, err := adapter.GetTransaction(context.Background(), flowsdk.EmptyID) + if assert.Error(t, err) { + assert.IsType(t, &emulator.TransactionNotFoundError{}, err) + } + }) + + t.Run("Existent", func(t *testing.T) { + tx2, err := adapter.GetTransaction(context.Background(), tx1.ID()) + require.NoError(t, err) + + assert.Equal(t, tx1.ID(), tx2.ID()) + }) +} + +func TestGetTransactionResult(t *testing.T) { + + t.Parallel() + + b, adapter := setupTransactionTests( + t, + emulator.WithStorageLimitEnabled(false), + ) + serviceAccountAddress := flowsdk.Address(b.ServiceKey().Address) + + addTwoScript, counterAddress := DeployAndGenerateAddTwoScript(t, adapter) + + tx := flowsdk.NewTransaction(). + SetScript([]byte(addTwoScript)). + SetComputeLimit(flowgo.DefaultMaxTransactionGasLimit). + SetProposalKey(serviceAccountAddress, b.ServiceKey().Index, b.ServiceKey().SequenceNumber). + SetPayer(serviceAccountAddress). + AddAuthorizer(serviceAccountAddress) + + signer, err := b.ServiceKey().Signer() + require.NoError(t, err) + + err = tx.SignEnvelope(serviceAccountAddress, b.ServiceKey().Index, signer) + require.NoError(t, err) + + result, err := adapter.GetTransactionResult(context.Background(), tx.ID()) + assert.NoError(t, err) + assert.Equal(t, flowsdk.TransactionStatusUnknown, result.Status) + require.Empty(t, result.Events) + + err = adapter.SendTransaction(context.Background(), *tx) + assert.NoError(t, err) + + result, err = adapter.GetTransactionResult(context.Background(), tx.ID()) + assert.NoError(t, err) + assert.Equal(t, flowsdk.TransactionStatusPending, result.Status) + require.Empty(t, result.Events) + + _, err = b.ExecuteNextTransaction() + assert.NoError(t, err) + + result, err = adapter.GetTransactionResult(context.Background(), tx.ID()) + assert.NoError(t, err) + assert.Equal(t, flowsdk.TransactionStatusPending, result.Status) + require.Empty(t, result.Events) + + _, err = b.CommitBlock() + assert.NoError(t, err) + + result, err = adapter.GetTransactionResult(context.Background(), tx.ID()) + assert.NoError(t, err) + assert.Equal(t, flowsdk.TransactionStatusSealed, result.Status) + + require.Len(t, result.Events, 3) + + event1 := result.Events[0] + assert.Equal(t, tx.ID(), event1.TransactionID) + assert.Equal(t, "flow.StorageCapabilityControllerIssued", event1.Type) + assert.Equal(t, 0, event1.EventIndex) + + event2 := result.Events[1] + assert.Equal(t, tx.ID(), event2.TransactionID) + assert.Equal(t, "flow.CapabilityPublished", event2.Type) + assert.Equal(t, 1, event2.EventIndex) + + event3 := result.Events[2] + addr, _ := common.BytesToAddress(counterAddress.Bytes()) + location := common.AddressLocation{ + Address: addr, + Name: "Counting", + } + assert.Equal(t, tx.ID(), event3.TransactionID) + assert.Equal(t, + string(location.TypeID(nil, "Counting.CountIncremented")), + event3.Type, + ) + assert.Equal(t, 2, event3.EventIndex) + fields := cadence.FieldsMappedByName(event3.Value) + assert.Len(t, fields, 1) + assert.Equal(t, + cadence.NewInt(2), + fields["count"], + ) +} + +// TestGetTxByBlockIDMethods tests the GetTransactionByBlockID and GetTransactionResultByBlockID +// methods return the correct transaction and transaction result for a given block ID. +func TestGetTxByBlockIDMethods(t *testing.T) { + + t.Parallel() + + b, adapter := setupTransactionTests( + t, + emulator.WithStorageLimitEnabled(false), + ) + + const code = ` + transaction { + execute { + log("Hello, World!") + } + } + ` + + serviceKey := b.ServiceKey() + serviceAccountAddress := flowsdk.Address(serviceKey.Address) + + signer, err := serviceKey.Signer() + require.NoError(t, err) + + submittedTx := make([]*flowsdk.Transaction, 0) + + // submit 5 tx to be executed in a single block + for i := uint64(0); i < 5; i++ { + tx := flowsdk.NewTransaction(). + SetScript([]byte(code)). + SetComputeLimit(flowgo.DefaultMaxTransactionGasLimit). + SetProposalKey(serviceAccountAddress, serviceKey.Index, serviceKey.SequenceNumber). + SetPayer(serviceAccountAddress). + AddAuthorizer(serviceAccountAddress) + + err = tx.SignEnvelope(serviceAccountAddress, serviceKey.Index, signer) + require.NoError(t, err) + + err = adapter.SendTransaction(context.Background(), *tx) + assert.NoError(t, err) + + // added to fix tx matching (nil vs empty slice) + tx.PayloadSignatures = []flow.TransactionSignature{} + + submittedTx = append(submittedTx, tx) + + // tx will be executed in the order they were submitted + serviceKey.SequenceNumber++ + } + + // execute the batch of transactions + block, expectedResults, err := b.ExecuteAndCommitBlock() + assert.NoError(t, err) + assert.Len(t, expectedResults, len(submittedTx)) + + results, err := adapter.GetTransactionResultsByBlockID(context.Background(), flowsdk.Identifier(block.ID())) + require.NoError(t, err) + assert.Len(t, results, len(submittedTx)) + + transactions, err := adapter.GetTransactionsByBlockID(context.Background(), flowsdk.Identifier(block.ID())) + require.NoError(t, err) + assert.Len(t, transactions, len(submittedTx)) + + // make sure the results and transactions returned match the transactions submitted, and are in + // the same order + for i, tx := range submittedTx { + assert.Equal(t, tx.ID(), transactions[i].ID()) + assert.Equal(t, submittedTx[i], transactions[i]) + + assert.Equal(t, tx.ID(), results[i].TransactionID) + assert.Equal(t, tx.ID(), expectedResults[i].TransactionID) + // note: expectedResults from ExecuteAndCommitBlock and results from GetTransactionResultsByBlockID + // use different representations. results is missing some data included in the flow.TransactionResult + // struct, so we can't compare them directly. + } +} + +const helloWorldContract = ` + access(all) contract HelloWorld { + + access(all) fun hello(): String { + return "Hello, World!" + } + } +` + +const callHelloTxTemplate = ` + import HelloWorld from 0x%s + transaction { + prepare() { + assert(HelloWorld.hello() == "Hello, World!") + } + } +` + +func TestHelloWorld_NewAccount(t *testing.T) { + + t.Parallel() + + accountKeys := test.AccountKeyGenerator() + + b, adapter := setupTransactionTests( + t, + emulator.WithStorageLimitEnabled(false), + ) + serviceAccountAddress := flowsdk.Address(b.ServiceKey().Address) + accountKey, accountSigner := accountKeys.NewWithSigner() + + contracts := []templates.Contract{ + { + Name: "HelloWorld", + Source: helloWorldContract, + }, + } + + createAccountTx, err := templates.CreateAccount( + []*flowsdk.AccountKey{accountKey}, + contracts, + serviceAccountAddress, + ) + require.NoError(t, err) + + createAccountTx.SetComputeLimit(flowgo.DefaultMaxTransactionGasLimit). + SetProposalKey(serviceAccountAddress, b.ServiceKey().Index, b.ServiceKey().SequenceNumber). + SetPayer(serviceAccountAddress) + + signer, err := b.ServiceKey().Signer() + require.NoError(t, err) + + err = createAccountTx.SignEnvelope(serviceAccountAddress, b.ServiceKey().Index, signer) + require.NoError(t, err) + + err = adapter.SendTransaction(context.Background(), *createAccountTx) + assert.NoError(t, err) + + result, err := b.ExecuteNextTransaction() + assert.NoError(t, err) + AssertTransactionSucceeded(t, result) + + _, err = b.CommitBlock() + assert.NoError(t, err) + + // createAccountTx status becomes TransactionStatusSealed + createAccountTxResult, err := adapter.GetTransactionResult(context.Background(), createAccountTx.ID()) + assert.NoError(t, err) + assert.Equal(t, flowsdk.TransactionStatusSealed, createAccountTxResult.Status) + + var newAccountAddress flowsdk.Address + for _, event := range createAccountTxResult.Events { + if event.Type != flowsdk.EventAccountCreated { + continue + } + accountCreatedEvent := flowsdk.AccountCreatedEvent(event) + newAccountAddress = accountCreatedEvent.Address() + break + } + + if newAccountAddress == flowsdk.EmptyAddress { + assert.Fail(t, "missing account created event") + } + + t.Logf("new account address: 0x%s", newAccountAddress.Hex()) + + account, err := adapter.GetAccount(context.Background(), newAccountAddress) + assert.NoError(t, err) + + assert.Equal(t, newAccountAddress, account.Address) + + // call hello world code + + accountKey = account.Keys[0] + + callHelloCode := []byte(fmt.Sprintf(callHelloTxTemplate, newAccountAddress.Hex())) + callHelloTx := flowsdk.NewTransaction(). + SetComputeLimit(flowgo.DefaultMaxTransactionGasLimit). + SetScript(callHelloCode). + SetProposalKey(newAccountAddress, accountKey.Index, accountKey.SequenceNumber). + SetPayer(newAccountAddress) + + err = callHelloTx.SignEnvelope(newAccountAddress, accountKey.Index, accountSigner) + assert.NoError(t, err) + + err = adapter.SendTransaction(context.Background(), *callHelloTx) + assert.NoError(t, err) + + result, err = b.ExecuteNextTransaction() + assert.NoError(t, err) + AssertTransactionSucceeded(t, result) + + _, err = b.CommitBlock() + assert.NoError(t, err) +} + +func TestHelloWorld_UpdateAccount(t *testing.T) { + + t.Parallel() + + accountKeys := test.AccountKeyGenerator() + + b, adapter := setupTransactionTests( + t, + emulator.WithStorageLimitEnabled(false), + ) + serviceAccountAddress := flowsdk.Address(b.ServiceKey().Address) + + accountKey, accountSigner := accountKeys.NewWithSigner() + _ = accountSigner + + contracts := []templates.Contract{ + { + Name: "HelloWorld", + Source: `access(all) contract HelloWorld {}`, + }, + } + + createAccountTx, err := templates.CreateAccount( + []*flowsdk.AccountKey{accountKey}, + contracts, + serviceAccountAddress, + ) + assert.NoError(t, err) + + createAccountTx. + SetComputeLimit(flowgo.DefaultMaxTransactionGasLimit). + SetProposalKey(serviceAccountAddress, b.ServiceKey().Index, b.ServiceKey().SequenceNumber). + SetPayer(serviceAccountAddress) + + signer, err := b.ServiceKey().Signer() + require.NoError(t, err) + + err = createAccountTx.SignEnvelope(serviceAccountAddress, b.ServiceKey().Index, signer) + require.NoError(t, err) + + err = adapter.SendTransaction(context.Background(), *createAccountTx) + assert.NoError(t, err) + + result, err := b.ExecuteNextTransaction() + assert.NoError(t, err) + AssertTransactionSucceeded(t, result) + + _, err = b.CommitBlock() + assert.NoError(t, err) + + // createAccountTx status becomes TransactionStatusSealed + createAccountTxResult, err := adapter.GetTransactionResult(context.Background(), createAccountTx.ID()) + assert.NoError(t, err) + assert.Equal(t, flowsdk.TransactionStatusSealed, createAccountTxResult.Status) + + var newAccountAddress flowsdk.Address + for _, event := range createAccountTxResult.Events { + if event.Type != flowsdk.EventAccountCreated { + continue + } + accountCreatedEvent := flowsdk.AccountCreatedEvent(event) + newAccountAddress = accountCreatedEvent.Address() + break + } + + if newAccountAddress == flowsdk.EmptyAddress { + assert.Fail(t, "missing account created event") + } + + t.Logf("new account address: 0x%s", newAccountAddress.Hex()) + + account, err := adapter.GetAccount(context.Background(), newAccountAddress) + assert.NoError(t, err) + + accountKey = account.Keys[0] + + updateAccountCodeTx := templates.UpdateAccountContract( + newAccountAddress, + templates.Contract{ + Name: "HelloWorld", + Source: helloWorldContract, + }, + ) + updateAccountCodeTx. + SetComputeLimit(flowgo.DefaultMaxTransactionGasLimit). + SetProposalKey(newAccountAddress, accountKey.Index, accountKey.SequenceNumber). + SetPayer(newAccountAddress) + + err = updateAccountCodeTx.SignEnvelope(newAccountAddress, accountKey.Index, accountSigner) + assert.NoError(t, err) + + err = adapter.SendTransaction(context.Background(), *updateAccountCodeTx) + assert.NoError(t, err) + + result, err = b.ExecuteNextTransaction() + assert.NoError(t, err) + AssertTransactionSucceeded(t, result) + + _, err = b.CommitBlock() + assert.NoError(t, err) + + // call hello world code + + accountKey.SequenceNumber++ + + callHelloCode := []byte(fmt.Sprintf(callHelloTxTemplate, newAccountAddress.Hex())) + callHelloTx := flowsdk.NewTransaction(). + SetComputeLimit(flowgo.DefaultMaxTransactionGasLimit). + SetScript(callHelloCode). + SetProposalKey(newAccountAddress, accountKey.Index, accountKey.SequenceNumber). + SetPayer(newAccountAddress) + + err = callHelloTx.SignEnvelope(newAccountAddress, accountKey.Index, accountSigner) + assert.NoError(t, err) + + err = adapter.SendTransaction(context.Background(), *callHelloTx) + assert.NoError(t, err) + + result, err = b.ExecuteNextTransaction() + assert.NoError(t, err) + AssertTransactionSucceeded(t, result) + + _, err = b.CommitBlock() + assert.NoError(t, err) +} + +func TestInfiniteTransaction(t *testing.T) { + + t.Parallel() + + const limit = 18 + + b, adapter := setupTransactionTests( + t, + emulator.WithStorageLimitEnabled(false), + emulator.WithTransactionMaxGasLimit(limit), + ) + + const code = ` + access(all) fun test() { + test() + } + + transaction { + execute { + test() + } + } + ` + + // Create a new account + + accountKeys := test.AccountKeyGenerator() + accountKey, signer := accountKeys.NewWithSigner() + accountAddress, err := adapter.CreateAccount(context.Background(), []*flowsdk.AccountKey{accountKey}, nil) + assert.NoError(t, err) + + // Sign the transaction using the new account. + // Do not test using the service account, + // as the computation limit is disabled for it + + tx := flowsdk.NewTransaction(). + SetScript([]byte(code)). + SetComputeLimit(limit). + SetProposalKey(accountAddress, 0, 0). + SetPayer(accountAddress) + + err = tx.SignEnvelope(accountAddress, 0, signer) + assert.NoError(t, err) + + // Submit tx + err = adapter.SendTransaction(context.Background(), *tx) + assert.NoError(t, err) + + // Execute tx + result, err := b.ExecuteNextTransaction() + assert.NoError(t, err) + + require.True(t, fvmerrors.IsComputationLimitExceededError(result.Error)) +} + +func TestTransactionExecutionLimit(t *testing.T) { + + t.Parallel() + + const code = ` + transaction { + execute { + var s: Int256 = 1024102410241024 + var i: Int256 = 0 + var a: Int256 = 7 + var b: Int256 = 5 + var c: Int256 = 2 + + while i < 150000 { + s = s * a + s = s / b + s = s / c + i = i + 1 + } + } + } + ` + + t.Run("ExceedingLimit", func(t *testing.T) { + + t.Parallel() + + const limit = 2000 + + b, adapter := setupTransactionTests( + t, + emulator.WithStorageLimitEnabled(false), + emulator.WithTransactionMaxGasLimit(limit), + ) + + // Create a new account + + accountKeys := test.AccountKeyGenerator() + accountKey, signer := accountKeys.NewWithSigner() + accountAddress, err := adapter.CreateAccount(context.Background(), []*flowsdk.AccountKey{accountKey}, nil) + assert.NoError(t, err) + + // Sign the transaction using the new account. + // Do not test using the service account, + // as the computation limit is disabled for it + + tx := flowsdk.NewTransaction(). + SetScript([]byte(code)). + SetComputeLimit(limit). + SetProposalKey(accountAddress, 0, 0). + SetPayer(accountAddress) + + err = tx.SignEnvelope(accountAddress, 0, signer) + assert.NoError(t, err) + + // Submit tx + err = adapter.SendTransaction(context.Background(), *tx) + assert.NoError(t, err) + + // Execute tx + result, err := b.ExecuteNextTransaction() + assert.NoError(t, err) + + require.True(t, fvmerrors.IsComputationLimitExceededError(result.Error)) + }) + + t.Run("SufficientLimit", func(t *testing.T) { + + t.Parallel() + + const limit = 19000 + + b, adapter := setupTransactionTests( + t, + emulator.WithStorageLimitEnabled(false), + emulator.WithTransactionMaxGasLimit(limit), + ) + + // Create a new account + + accountKeys := test.AccountKeyGenerator() + accountKey, signer := accountKeys.NewWithSigner() + accountAddress, err := adapter.CreateAccount(context.Background(), []*flowsdk.AccountKey{accountKey}, nil) + assert.NoError(t, err) + + // Sign the transaction using the new account. + // Do not test using the service account, + // as the computation limit is disabled for it + + tx := flowsdk.NewTransaction(). + SetScript([]byte(code)). + SetComputeLimit(limit). + SetProposalKey(accountAddress, 0, 0). + SetPayer(accountAddress) + + err = tx.SignEnvelope(accountAddress, 0, signer) + assert.NoError(t, err) + + // Submit tx + err = adapter.SendTransaction(context.Background(), *tx) + assert.NoError(t, err) + + // Execute tx + result, err := b.ExecuteNextTransaction() + assert.NoError(t, err) + assert.NoError(t, result.Error) + }) +} + +func TestSubmitTransactionWithCustomLogger(t *testing.T) { + + t.Parallel() + + var memlog bytes.Buffer + memlogWrite := io.Writer(&memlog) + logger := zerolog.New(memlogWrite).Level(zerolog.DebugLevel) + + b, adapter := setupTransactionTests( + t, + emulator.WithStorageLimitEnabled(false), + emulator.WithLogger(logger), + emulator.WithTransactionFeesEnabled(true), + ) + serviceAccountAddress := flowsdk.Address(b.ServiceKey().Address) + + addTwoScript, _ := DeployAndGenerateAddTwoScript(t, adapter) + + tx1 := flowsdk.NewTransaction(). + SetScript([]byte(addTwoScript)). + SetComputeLimit(flowgo.DefaultMaxTransactionGasLimit). + SetProposalKey(serviceAccountAddress, b.ServiceKey().Index, b.ServiceKey().SequenceNumber). + SetPayer(serviceAccountAddress). + AddAuthorizer(serviceAccountAddress) + + signer, err := b.ServiceKey().Signer() + require.NoError(t, err) + + err = tx1.SignEnvelope(serviceAccountAddress, b.ServiceKey().Index, signer) + require.NoError(t, err) + + // Submit tx1 + err = adapter.SendTransaction(context.Background(), *tx1) + assert.NoError(t, err) + + // Execute tx1 + result, err := b.ExecuteNextTransaction() + assert.NoError(t, err) + AssertTransactionSucceeded(t, result) + + _, err = b.CommitBlock() + assert.NoError(t, err) + + // tx1 status becomes TransactionStatusSealed + tx1Result, err := adapter.GetTransactionResult(context.Background(), tx1.ID()) + assert.NoError(t, err) + assert.Equal(t, flowsdk.TransactionStatusSealed, tx1Result.Status) + + var meter Meter + scanner := bufio.NewScanner(&memlog) + for scanner.Scan() { + txt := scanner.Text() + if strings.Contains(txt, "transaction execution data") { + err = json.Unmarshal([]byte(txt), &meter) + } + } + + assert.NoError(t, err) + assert.Greater(t, meter.LedgerInteractionUsed, 0) + assert.Greater(t, meter.ComputationUsed, 0) + assert.Greater(t, meter.MemoryEstimate, 0) + assert.Greater(t, len(meter.ComputationIntensities), 0) + assert.Greater(t, len(meter.MemoryAmounts), 0) + +} + +type Meter struct { + LedgerInteractionUsed int `json:"ledgerInteractionUsed"` + ComputationUsed int `json:"computationUsed"` + MemoryEstimate int `json:"memoryEstimate"` + ComputationIntensities MeteredComputationIntensities `json:"computationIntensities"` + MemoryAmounts MeteredMemoryAmounts `json:"memoryAmounts"` +} + +type MeteredComputationIntensities map[common.ComputationKind]uint64 + +type MeteredMemoryAmounts map[common.MemoryKind]uint64 + +func IncrementHelper( + t *testing.T, + b emulator.Emulator, + adapter *emulator.SDKAdapter, + counterAddress flowsdk.Address, + addTwoScript string, + expected int, + expectSetup bool, +) { + serviceAccountAddress := flowsdk.Address(b.ServiceKey().Address) + + tx := flowsdk.NewTransaction(). + SetScript([]byte(addTwoScript)). + SetComputeLimit(flowgo.DefaultMaxTransactionGasLimit). + SetProposalKey(serviceAccountAddress, b.ServiceKey().Index, b.ServiceKey().SequenceNumber). + SetPayer(serviceAccountAddress). + AddAuthorizer(serviceAccountAddress) + + signer, err := b.ServiceKey().Signer() + require.NoError(t, err) + + err = tx.SignEnvelope(serviceAccountAddress, b.ServiceKey().Index, signer) + require.NoError(t, err) + + result, err := adapter.GetTransactionResult(context.Background(), tx.ID()) + assert.NoError(t, err) + assert.Equal(t, flowsdk.TransactionStatusUnknown, result.Status) + require.Empty(t, result.Events) + + err = adapter.SendTransaction(context.Background(), *tx) + assert.NoError(t, err) + + result, err = adapter.GetTransactionResult(context.Background(), tx.ID()) + assert.NoError(t, err) + assert.Equal(t, flowsdk.TransactionStatusPending, result.Status) + require.Empty(t, result.Events) + + _, err = b.ExecuteNextTransaction() + assert.NoError(t, err) + + result, err = adapter.GetTransactionResult(context.Background(), tx.ID()) + assert.NoError(t, err) + assert.Equal(t, flowsdk.TransactionStatusPending, result.Status) + require.Empty(t, result.Events) + + _, err = b.CommitBlock() + assert.NoError(t, err) + + result, err = adapter.GetTransactionResult(context.Background(), tx.ID()) + assert.NoError(t, err) + assert.Equal(t, flowsdk.TransactionStatusSealed, result.Status) + + var expectedEventIndex int + if expectSetup { + require.Len(t, result.Events, 3) + + event1 := result.Events[0] + assert.Equal(t, tx.ID(), event1.TransactionID) + assert.Equal(t, "flow.StorageCapabilityControllerIssued", event1.Type) + assert.Equal(t, 0, event1.EventIndex) + + event2 := result.Events[1] + assert.Equal(t, tx.ID(), event2.TransactionID) + assert.Equal(t, "flow.CapabilityPublished", event2.Type) + assert.Equal(t, 1, event2.EventIndex) + + expectedEventIndex = 2 + } else { + require.Len(t, result.Events, 1) + expectedEventIndex = 0 + } + incrementedEvent := result.Events[expectedEventIndex] + + addr, _ := common.BytesToAddress(counterAddress.Bytes()) + location := common.AddressLocation{ + Address: addr, + Name: "Counting", + } + assert.Equal(t, tx.ID(), incrementedEvent.TransactionID) + assert.Equal(t, + string(location.TypeID(nil, "Counting.CountIncremented")), + incrementedEvent.Type, + ) + assert.Equal(t, expectedEventIndex, incrementedEvent.EventIndex) + fields := cadence.FieldsMappedByName(incrementedEvent.Value) + assert.Len(t, fields, 1) + assert.Equal(t, + cadence.NewInt(expected), + fields["count"], + ) +} + +// TestTransactionWithCadenceRandom checks Cadence's random function works +// within a transaction +func TestTransactionWithCadenceRandom(t *testing.T) { + b, adapter := setupTransactionTests(t) + serviceAccountAddress := flowsdk.Address(b.ServiceKey().Address) + + code := ` + transaction { + prepare() { + assert(revertibleRandom<UInt64>() >= 0) + } + } + ` + callRandomTx := flowsdk.NewTransaction(). + SetComputeLimit(flowgo.DefaultMaxTransactionGasLimit). + SetScript([]byte(code)). + SetProposalKey(serviceAccountAddress, b.ServiceKey().Index, b.ServiceKey().SequenceNumber). + SetPayer(serviceAccountAddress) + + signer, err := b.ServiceKey().Signer() + require.NoError(t, err) + + err = callRandomTx.SignEnvelope(serviceAccountAddress, b.ServiceKey().Index, signer) + require.NoError(t, err) + + err = adapter.SendTransaction(context.Background(), *callRandomTx) + assert.NoError(t, err) + + result, err := b.ExecuteNextTransaction() + assert.NoError(t, err) + AssertTransactionSucceeded(t, result) + + _, err = b.CommitBlock() + assert.NoError(t, err) +} + +func TestEVMTransaction(t *testing.T) { + serviceAddr := flowgo.Emulator.Chain().ServiceAddress() + code := []byte(fmt.Sprintf( + ` + import EVM from %s + + transaction(bytes: [UInt8; 20]) { + execute { + let addr = EVM.EVMAddress(bytes: bytes) + log(addr) + } + } + `, + serviceAddr.HexWithPrefix(), + )) + + b, adapter := setupTransactionTests(t) + serviceAccountAddress := flowsdk.Address(b.ServiceKey().Address) + + // generate random address + genArr := make([]cadence.Value, 20) + for i := range genArr { + genArr[i] = cadence.UInt8(i) + } + addressBytesArray := cadence.NewArray(genArr).WithType(stdlib.EVMAddressBytesCadenceType) + + tx := flowsdk.NewTransaction(). + SetScript(code). + SetComputeLimit(flowgo.DefaultMaxTransactionGasLimit). + SetProposalKey(serviceAccountAddress, b.ServiceKey().Index, b.ServiceKey().SequenceNumber). + SetPayer(serviceAccountAddress) + + err := tx.AddArgument(addressBytesArray) + assert.NoError(t, err) + + signer, err := b.ServiceKey().Signer() + require.NoError(t, err) + + err = tx.SignEnvelope(serviceAccountAddress, b.ServiceKey().Index, signer) + require.NoError(t, err) + + err = adapter.SendTransaction(context.Background(), *tx) + assert.NoError(t, err) + + result, err := b.ExecuteNextTransaction() + require.NoError(t, err) + AssertTransactionSucceeded(t, result) + + require.Len(t, result.Logs, 1) + require.Equal(t, result.Logs[0], fmt.Sprintf("A.%s.EVM.EVMAddress(bytes: %s)", serviceAddr, addressBytesArray.String())) +} diff --git a/integration/internal/emulator/tests/vm_test.go b/integration/internal/emulator/tests/vm_test.go new file mode 100644 index 00000000000..39725f276b9 --- /dev/null +++ b/integration/internal/emulator/tests/vm_test.go @@ -0,0 +1,82 @@ +/* + * Flow Emulator + * + * Copyright Flow Foundation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package tests_test + +import ( + "testing" + + "github.com/onflow/flow/protobuf/go/flow/entities" + "github.com/stretchr/testify/assert" + + "github.com/onflow/flow-go-sdk/test" + + "github.com/onflow/flow-go/fvm" + "github.com/onflow/flow-go/integration/internal/emulator" + flowgo "github.com/onflow/flow-go/model/flow" +) + +func TestVm(t *testing.T) { + + t.Parallel() + + test := func(eventEncodingVersion entities.EventEncodingVersion) { + t.Run(eventEncodingVersion.String(), func(t *testing.T) { + t.Parallel() + t.Run("should be able to convert", func(t *testing.T) { + + t.Parallel() + + idGenerator := test.IdentifierGenerator() + + eventGenerator := test.EventGenerator(eventEncodingVersion) + event1, err := emulator.SDKEventToFlow(eventGenerator.New()) + assert.NoError(t, err) + + event2, err := emulator.SDKEventToFlow(eventGenerator.New()) + assert.NoError(t, err) + + txnId := flowgo.Identifier(idGenerator.New()) + output := fvm.ProcedureOutput{ + Logs: []string{"TestLog1", "TestLog2"}, + Events: []flowgo.Event{*event1, *event2}, + ComputationUsed: 5, + MemoryEstimate: 1211, + Err: nil, + } + + tr, err := emulator.VMTransactionResultToEmulator(txnId, output) + assert.NoError(t, err) + + assert.Equal(t, txnId, flowgo.Identifier(tr.TransactionID)) + assert.Equal(t, output.Logs, tr.Logs) + + flowEvents, err := emulator.FlowEventsToSDK(output.Events) + assert.NoError(t, err) + assert.Equal(t, flowEvents, tr.Events) + + assert.Equal(t, output.ComputationUsed, tr.ComputationUsed) + assert.Equal(t, output.MemoryEstimate, tr.MemoryEstimate) + assert.Equal(t, output.Err, tr.Error) + }) + }) + } + + test(entities.EventEncodingVersion_JSON_CDC_V0) + test(entities.EventEncodingVersion_CCF_V0) +} diff --git a/integration/internal/emulator/utils/unittest/fixtures.go b/integration/internal/emulator/utils/unittest/fixtures.go new file mode 100644 index 00000000000..c4a8397897e --- /dev/null +++ b/integration/internal/emulator/utils/unittest/fixtures.go @@ -0,0 +1,61 @@ +/* + * Flow Emulator + * + * Copyright Flow Foundation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package unittest + +import ( + "github.com/onflow/flow/protobuf/go/flow/entities" + + "github.com/onflow/flow-go-sdk/test" + + "github.com/onflow/flow-go/integration/internal/emulator" + flowgo "github.com/onflow/flow-go/model/flow" +) + +func TransactionFixture() flowgo.TransactionBody { + return *emulator.SDKTransactionToFlow(*test.TransactionGenerator().New()) +} + +func StorableTransactionResultFixture(eventEncodingVersion entities.EventEncodingVersion) emulator.StorableTransactionResult { + events := test.EventGenerator(eventEncodingVersion) + + eventA, _ := emulator.SDKEventToFlow(events.New()) + eventB, _ := emulator.SDKEventToFlow(events.New()) + + return emulator.StorableTransactionResult{ + ErrorCode: 42, + ErrorMessage: "foo", + Logs: []string{"a", "b", "c"}, + Events: []flowgo.Event{ + *eventA, + *eventB, + }, + } +} + +func FullCollectionFixture(n int) flowgo.Collection { + transactions := make([]*flowgo.TransactionBody, n) + for i := 0; i < n; i++ { + tx := TransactionFixture() + transactions[i] = &tx + } + + return flowgo.Collection{ + Transactions: transactions, + } +} diff --git a/integration/localnet/Makefile b/integration/localnet/Makefile index f35cb0643e0..c1185858109 100644 --- a/integration/localnet/Makefile +++ b/integration/localnet/Makefile @@ -3,13 +3,17 @@ CONSENSUS = 2 VALID_CONSENSUS := $(shell test $(CONSENSUS) -ge 2; echo $$?) EXECUTION = 2 VALID_EXECUTION := $(shell test $(EXECUTION) -ge 2; echo $$?) +TEST_EXECUTION = 0 VERIFICATION = 1 ACCESS = 1 OBSERVER = 0 NCLUSTERS=1 -EPOCHLEN=10000 # 0 means use default -STAKINGLEN=2000 # 0 means use default -DKGLEN=2000 # 0 means use default +EPOCHLEN=10000 # 0 means use default +STAKINGLEN=2000 # 0 means use default +DKGLEN=2000 # 0 means use default +KVSTORE_VERSION=default +EPOCH_EXTENSION_LEN=600 +FINALIZATION_SAFETY_THRESHOLD=100 # 0 means use default CONSENSUS_DELAY=800ms COLLECTION_DELAY=950ms @@ -17,12 +21,15 @@ PROFILER=false PROFILE_UPLOADER=false TRACING=true EXTENSIVE_TRACING=false -CADENCE_TRACING=false LOGLEVEL=DEBUG # The Git commit hash COMMIT=$(shell git rev-parse HEAD) -VERSION=localnetbuild + +# The version to include in container builds. Must be semver compliant +ifeq ($(VERSION),) + VERSION := $(shell git describe --tags --abbrev=2 --match "v*" 2>/dev/null)-localnetbuild +endif CURRENT_DIRECTORY=$(shell pwd) @@ -43,7 +50,7 @@ ifeq ($(strip $(VALID_EXECUTION)), 1) else ifeq ($(strip $(VALID_CONSENSUS)), 1) $(error Number of Consensus nodes should be no less than 2) else - go run -tags relic \ + go run \ -ldflags="-X 'github.com/onflow/flow-go/cmd/build.commit=${COMMIT}' \ -X 'github.com/onflow/flow-go/cmd/build.semver=${VERSION}'" \ builder/*.go \ @@ -54,14 +61,17 @@ else -verification=$(VERIFICATION) \ -access=$(ACCESS) \ -observer=$(OBSERVER) \ + -test-execution=$(TEST_EXECUTION) \ -nclusters=$(NCLUSTERS) \ -epoch-length=$(EPOCHLEN) \ -epoch-staking-phase-length=$(STAKINGLEN) \ -epoch-dkg-phase-length=$(DKGLEN) \ + -kvstore-version=$(KVSTORE_VERSION) \ + -kvstore-epoch-extension-view-count=$(EPOCH_EXTENSION_LEN) \ + -kvstore-finalization-safety-threshold=$(FINALIZATION_SAFETY_THRESHOLD) \ -profiler=$(PROFILER) \ -profile-uploader=$(PROFILE_UPLOADER) \ -tracing=$(TRACING) \ - -cadence-tracing=$(CADENCE_TRACING) \ -extensive-tracing=$(EXTENSIVE_TRACING) \ -consensus-delay=$(CONSENSUS_DELAY) \ -collection-delay=$(COLLECTION_DELAY) @@ -73,6 +83,9 @@ endif bootstrap-light: $(MAKE) -e COLLECTION=1 CONSENSUS=2 EXECUTION=2 VERIFICATION=1 ACCESS=1 NCLUSTERS=1 bootstrap +bootstrap-test-en: + $(MAKE) -e COLLECTION=1 CONSENSUS=2 EXECUTION=2 VERIFICATION=1 ACCESS=1 NCLUSTERS=1 TEST_EXECUTION=1 bootstrap + # CI tests have a larger number of nodes .PHONY: bootstrap-ci bootstrap-ci: @@ -81,7 +94,7 @@ bootstrap-ci: # Creates a version of localnet configured with short epochs .PHONY: bootstrap-short-epochs bootstrap-short-epochs: - $(MAKE) -e EPOCHLEN=200 STAKINGLEN=10 DKGLEN=50 bootstrap + $(MAKE) -e EPOCHLEN=200 STAKINGLEN=5 DKGLEN=50 FINALIZATIONSAFETYTHRESHOLD=20 bootstrap # Starts the network - must have been bootstrapped first. Builds fresh images. .PHONY: start @@ -95,44 +108,44 @@ start-cached: start-metrics start-flow-cached # Starts metrics services .PHONY: start-metrics start-metrics: - DOCKER_BUILDKIT=1 COMPOSE_DOCKER_CLI_BUILD=1 docker-compose -f docker-compose.metrics.yml up -d + DOCKER_BUILDKIT=1 COMPOSE_DOCKER_CLI_BUILD=1 docker compose -f docker-compose.metrics.yml up -d # Starts a version of localnet with just flow nodes and without metrics services. # This prevents port collision and consumption when these services are not needed. # All images are re-built prior to being started. .PHONY: start-flow start-flow: - DOCKER_BUILDKIT=1 COMPOSE_DOCKER_CLI_BUILD=1 docker-compose -f docker-compose.nodes.yml up -d --build + DOCKER_BUILDKIT=1 COMPOSE_DOCKER_CLI_BUILD=1 docker compose -f docker-compose.nodes.yml up -d --build # Same as start-flow, but most recently built images are used. .PHONY: start-flow-cached start-flow-cached: - DOCKER_BUILDKIT=1 COMPOSE_DOCKER_CLI_BUILD=1 docker-compose -f docker-compose.nodes.yml up -d + DOCKER_BUILDKIT=1 COMPOSE_DOCKER_CLI_BUILD=1 docker compose -f docker-compose.nodes.yml up -d .PHONY: build-flow build-flow: - DOCKER_BUILDKIT=1 COMPOSE_DOCKER_CLI_BUILD=1 docker-compose -f docker-compose.nodes.yml build + DOCKER_BUILDKIT=1 COMPOSE_DOCKER_CLI_BUILD=1 docker compose -f docker-compose.nodes.yml build .PHONY: stop stop: - DOCKER_BUILDKIT=1 COMPOSE_DOCKER_CLI_BUILD=1 docker-compose -f docker-compose.metrics.yml -f docker-compose.nodes.yml down -v --remove-orphans + DOCKER_BUILDKIT=1 COMPOSE_DOCKER_CLI_BUILD=1 docker compose -f docker-compose.metrics.yml -f docker-compose.nodes.yml down -v --remove-orphans .PHONY: load load: - go run --tags relic ../benchmark/cmd/manual -log-level info -tps 1,10,100 -tps-durations 30s,30s + go run ../benchmark/cmd/manual -log-level info -tps 1,1,1 -tps-durations 30s,30s .PHONY: tps-ci-smoke tps-ci-smoke: - go run --tags relic ../benchmark/cmd/ci -log-level info -tps-initial 1 -tps-min 1 -tps-max 10 -duration 20s -tps-adjust-interval 1s -stat-interval 1s -bigquery-upload=false + go run ../benchmark/cmd/ci -log-level info -tps-initial 1 -tps-min 1 -tps-max 10 -duration 20s -tps-adjust-interval 1s -stat-interval 1s -bigquery-upload=false .PHONY: tps-ci tps-ci: bootstrap-ci build-flow start-flow - go run --tags relic ../benchmark/cmd/ci -log-level info -tps-initial $(TPS_INIT) -tps-min $(TPS_MIN) -tps-max $(TPS_MAX) -duration $(DURATION) + go run ../benchmark/cmd/ci -log-level info -tps-initial $(TPS_INIT) -tps-min $(TPS_MIN) -tps-max $(TPS_MAX) -duration $(DURATION) .PHONY: clean-data clean-data: DOCKER_BUILDKIT=1 docker build -t environment-clean ../../cmd - docker run --mount=type=bind,source="$(CURRENT_DIRECTORY)"/data,target=/data environment-clean chmod -R 777 /data + docker run --rm --mount=type=bind,source="$(CURRENT_DIRECTORY)"/data,target=/data environment-clean chmod -R 777 /data # deletes all generated files and folders from bootstrap and test running rm -rf ./data @@ -141,9 +154,10 @@ clean-data: rm -rf ./profiler rm -f ./targets.nodes.json rm -f ./docker-compose.nodes.yml + rm -f ./ports.nodes.json # deletes the stopped environment-clean container(s) - running this command inside another target doesn't delete the containers so it's isolated to run in a separate target -# Note: running this target shows an error on the command line "make: *** [clean-data2] Error 1" but the container is still deletes +# Note: running this target shows an error on the command line "make: *** [clean-data2] Error 1" but the container is still deleted .PHONY: clean-data2 clean-data2: docker rm $(shell docker ps -aq --filter ancestor=environment-clean) diff --git a/integration/localnet/README.md b/integration/localnet/README.md index 7dafa747969..c5846dabbf5 100644 --- a/integration/localnet/README.md +++ b/integration/localnet/README.md @@ -313,9 +313,10 @@ Create a file (for example `my_script.cdc`) containing following cadence code: import FungibleToken from 0xee82856bf20e2aa6 import FlowToken from 0x0ae53cb6e3f42a79 -pub fun main(address: Address): UFix64 { +access(all) +fun main(address: Address): UFix64 { let acct = getAccount(address) - let vaultRef = acct.getCapability(/public/flowTokenBalance)!.borrow<&FlowToken.Vault{FungibleToken.Balance}>() + let vaultRef = acct.capabilities.borrow<&FlowToken.Vault{FungibleToken.Balance}>(/public/flowTokenBalance) ?? panic("Could not borrow Balance reference to the Vault") return vaultRef.balance } @@ -331,7 +332,8 @@ The script should output the account balance of the specified account. You can also execute simple script without creating files, by providing the script in the command, for example: ``` # flow scripts execute -n localnet <(echo """ -pub fun main(address: Address): UFix64 { +access(all) +fun main(address: Address): UFix64 { return getAccount(address).balance } """) "<ACCOUNT_ADDRESS>" @@ -341,8 +343,8 @@ pub fun main(address: Address): UFix64 { Create new cadence contract file from [this template.](https://github.com/onflow/flow-core-contracts/blob/master/transactions/flowToken/transfer_tokens.cdc) Make sure that contract imports have values that match your cli config, following the CLI configuration chapter above it should look like: ``` -import FungibleToken from "cadence/contracts/FungibleToken.cdc" -import FlowToken from "cadence/contracts/FlowToken.cdc" +import "FungibleToken" +import "FlowToken" ``` Send the transaction with this contract to localnet: ``` diff --git a/integration/localnet/builder/bootstrap.go b/integration/localnet/builder/bootstrap.go index 201aaaade58..58349db161c 100644 --- a/integration/localnet/builder/bootstrap.go +++ b/integration/localnet/builder/bootstrap.go @@ -1,6 +1,7 @@ package main import ( + crand "crypto/rand" "encoding/json" "errors" "flag" @@ -10,65 +11,72 @@ import ( "os" "path/filepath" "runtime" + "strconv" "time" "github.com/go-yaml/yaml" - "github.com/plus3it/gorecurcopy" "github.com/onflow/flow-go/cmd/build" - "github.com/onflow/flow-go/integration/testnet" - "github.com/onflow/flow-go/model/bootstrap" "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/state/protocol" + "github.com/onflow/flow-go/state/protocol/protocol_state" + "github.com/onflow/flow-go/state/protocol/protocol_state/kvstore" + + "github.com/onflow/flow-go/integration/testnet" ) const ( - BootstrapDir = "./bootstrap" - ProfilerDir = "./profiler" - DataDir = "./data" - TrieDir = "./trie" - DockerComposeFile = "./docker-compose.nodes.yml" - DockerComposeFileVersion = "3.7" - PrometheusTargetsFile = "./targets.nodes.json" - PortMapFile = "./ports.nodes.json" - DefaultObserverRole = "observer" - DefaultLogLevel = "DEBUG" - DefaultGOMAXPROCS = 8 - DefaultMaxObservers = 100 - DefaultCollectionCount = 3 - DefaultConsensusCount = 3 - DefaultExecutionCount = 1 - DefaultVerificationCount = 1 - DefaultAccessCount = 1 - DefaultObserverCount = 0 - DefaultNClusters = 1 - DefaultProfiler = false - DefaultProfileUploader = false - DefaultTracing = true - DefaultCadenceTracing = false - DefaultExtensiveTracing = false - DefaultConsensusDelay = 800 * time.Millisecond - DefaultCollectionDelay = 950 * time.Millisecond + BootstrapDir = "./bootstrap" + ProfilerDir = "./profiler" + DataDir = "./data" + TrieDir = "./trie" + DockerComposeFile = "./docker-compose.nodes.yml" + DockerComposeFileVersion = "3.7" + PrometheusTargetsFile = "./targets.nodes.json" + PortMapFile = "./ports.nodes.json" + DefaultObserverRole = "observer" + DefaultLogLevel = "DEBUG" + DefaultGOMAXPROCS = 8 + DefaultMaxObservers = 100 + DefaultCollectionCount = 3 + DefaultConsensusCount = 3 + DefaultExecutionCount = 1 + DefaultVerificationCount = 1 + DefaultAccessCount = 1 + DefaultObserverCount = 0 + DefaultTestExecutionCount = 0 + DefaultNClusters = 1 + DefaultProfiler = false + DefaultProfileUploader = false + DefaultTracing = true + DefaultExtensiveTracing = false + DefaultConsensusDelay = 800 * time.Millisecond + DefaultCollectionDelay = 950 * time.Millisecond ) var ( - collectionCount int - consensusCount int - executionCount int - verificationCount int - accessCount int - observerCount int - nClusters uint - numViewsInStakingPhase uint64 - numViewsInDKGPhase uint64 - numViewsEpoch uint64 - profiler bool - profileUploader bool - tracing bool - cadenceTracing bool - extesiveTracing bool - consensusDelay time.Duration - collectionDelay time.Duration - logLevel string + collectionCount int + consensusCount int + executionCount int + verificationCount int + accessCount int + observerCount int + testExecutionCount int + nClusters uint + numViewsInStakingPhase uint64 + numViewsInDKGPhase uint64 + numViewsEpoch uint64 + kvStoreVersion string + epochExtensionViewCount uint64 + numViewsPerSecond uint64 + finalizationSafetyThreshold uint64 + profiler bool + profileUploader bool + tracing bool + extensiveTracing bool + consensusDelay time.Duration + collectionDelay time.Duration + logLevel string ports *PortAllocator ) @@ -80,15 +88,19 @@ func init() { flag.IntVar(&verificationCount, "verification", DefaultVerificationCount, "number of verification nodes") flag.IntVar(&accessCount, "access", DefaultAccessCount, "number of staked access nodes") flag.IntVar(&observerCount, "observer", DefaultObserverCount, "number of observers") + flag.IntVar(&testExecutionCount, "test-execution", DefaultTestExecutionCount, "number of test execution") flag.UintVar(&nClusters, "nclusters", DefaultNClusters, "number of collector clusters") flag.Uint64Var(&numViewsEpoch, "epoch-length", 10000, "number of views in epoch") flag.Uint64Var(&numViewsInStakingPhase, "epoch-staking-phase-length", 2000, "number of views in epoch staking phase") flag.Uint64Var(&numViewsInDKGPhase, "epoch-dkg-phase-length", 2000, "number of views in epoch dkg phase") + flag.StringVar(&kvStoreVersion, "kvstore-version", "default", "protocol state KVStore version to initialize ('default' or an integer equal to a supported protocol version: '0', '1', '2', ...)") + flag.Uint64Var(&epochExtensionViewCount, "kvstore-epoch-extension-view-count", 0, "length of epoch extension in views, default is 100_000 which is approximately 1 day") + flag.Uint64Var(&finalizationSafetyThreshold, "kvstore-finalization-safety-threshold", 0, "number of views for safety threshold T (assume: one finalization occurs within T blocks)") + flag.Uint64Var(&numViewsPerSecond, "target-view-rate", 1, "target number of views per second") flag.BoolVar(&profiler, "profiler", DefaultProfiler, "whether to enable the auto-profiler") flag.BoolVar(&profileUploader, "profile-uploader", DefaultProfileUploader, "whether to upload profiles to the cloud") flag.BoolVar(&tracing, "tracing", DefaultTracing, "whether to enable low-overhead tracing in flow") - flag.BoolVar(&cadenceTracing, "cadence-tracing", DefaultCadenceTracing, "whether to enable the tracing in cadance") - flag.BoolVar(&extesiveTracing, "extensive-tracing", DefaultExtensiveTracing, "enables high-overhead tracing in fvm") + flag.BoolVar(&extensiveTracing, "extensive-tracing", DefaultExtensiveTracing, "enables high-overhead tracing in fvm") flag.DurationVar(&consensusDelay, "consensus-delay", DefaultConsensusDelay, "delay on consensus node block proposals") flag.DurationVar(&collectionDelay, "collection-delay", DefaultCollectionDelay, "delay on collection node block proposals") flag.StringVar(&logLevel, "loglevel", DefaultLogLevel, "log level for all nodes") @@ -118,17 +130,49 @@ func main() { // Prepare test node configurations of each type, access, execution, verification, etc flowNodes := prepareFlowNodes() + defaultEpochSafetyParams, err := protocol.DefaultEpochSafetyParams(flow.Localnet) + if err != nil { + panic(fmt.Sprintf("could not get default epoch commit safety parameters: %s", err)) + } + // Generate a Flow network config for localnet flowNetworkOpts := []testnet.NetworkConfigOpt{testnet.WithClusters(nClusters)} if numViewsEpoch != 0 { flowNetworkOpts = append(flowNetworkOpts, testnet.WithViewsInEpoch(numViewsEpoch)) } + if numViewsPerSecond != 0 { + flowNetworkOpts = append(flowNetworkOpts, testnet.WithViewsPerSecond(numViewsPerSecond)) + } if numViewsInStakingPhase != 0 { flowNetworkOpts = append(flowNetworkOpts, testnet.WithViewsInStakingAuction(numViewsInStakingPhase)) } if numViewsInDKGPhase != 0 { flowNetworkOpts = append(flowNetworkOpts, testnet.WithViewsInDKGPhase(numViewsInDKGPhase)) } + + // Set default finalizationSafetyThreshold if not explicitly set + if finalizationSafetyThreshold == 0 { + finalizationSafetyThreshold = defaultEpochSafetyParams.FinalizationSafetyThreshold + } + // Set default epochExtensionViewCount if not explicitly set + if epochExtensionViewCount == 0 { + epochExtensionViewCount = defaultEpochSafetyParams.EpochExtensionViewCount + } + + kvStoreFactory := func(epochStateID flow.Identifier) (protocol_state.KVStoreAPI, error) { + if kvStoreVersion != "default" { + version, err := strconv.ParseUint(kvStoreVersion, 10, 64) + if err != nil { + panic(fmt.Sprintf("--kvstore-version must be a supported integer version number: (eg. '0', '1' or '2', etc.) got %s ", kvStoreVersion)) + } + return kvstore.NewKVStore(version, finalizationSafetyThreshold, epochExtensionViewCount, epochStateID) + + } else { + return kvstore.NewDefaultKVStore(finalizationSafetyThreshold, epochExtensionViewCount, epochStateID) + } + } + flowNetworkOpts = append(flowNetworkOpts, testnet.WithKVStoreFactory(kvStoreFactory)) + flowNetworkConf := testnet.NewNetworkConfig("localnet", flowNodes, flowNetworkOpts...) displayFlowNetworkConf(flowNetworkConf) @@ -139,12 +183,13 @@ func main() { dockerServices := make(Services) dockerServices = prepareFlowServices(dockerServices, flowNodeContainerConfigs) serviceDisc := prepareServiceDiscovery(flowNodeContainerConfigs) - err := writePrometheusConfig(serviceDisc) + err = writePrometheusConfig(serviceDisc) if err != nil { panic(err) } dockerServices = prepareObserverServices(dockerServices, flowNodeContainerConfigs) + dockerServices = prepareTestExecutionService(dockerServices, flowNodeContainerConfigs) err = writeDockerComposeConfig(dockerServices) if err != nil { @@ -336,8 +381,9 @@ func prepareConsensusService(container testnet.ContainerConfig, i int, n int) Se timeout := 1200*time.Millisecond + consensusDelay service.Command = append(service.Command, - fmt.Sprintf("--block-rate-delay=%s", consensusDelay), + fmt.Sprintf("--cruise-ctl-fallback-proposal-duration=%s", consensusDelay), fmt.Sprintf("--hotstuff-min-timeout=%s", timeout), + "--cruise-ctl-max-view-duration=2s", "--chunk-alpha=1", "--emergency-sealing-active=false", "--insecure-access-api=false", @@ -352,6 +398,7 @@ func prepareVerificationService(container testnet.ContainerConfig, i int, n int) service.Command = append(service.Command, "--chunk-alpha=1", + "--scheduled-callbacks-enabled=true", ) return service @@ -363,7 +410,6 @@ func prepareCollectionService(container testnet.ContainerConfig, i int, n int) S timeout := 1200*time.Millisecond + collectionDelay service.Command = append(service.Command, - fmt.Sprintf("--block-rate-delay=%s", collectionDelay), fmt.Sprintf("--hotstuff-min-timeout=%s", timeout), fmt.Sprintf("--ingress-addr=%s:%s", container.ContainerName, testnet.GRPCPort), "--insecure-access-api=false", @@ -383,19 +429,15 @@ func prepareExecutionService(container testnet.ContainerConfig, i int, n int) Se panic(err) } - // we need to actually copy the execution state into the directory for bootstrapping - sourceDir := "./" + filepath.Join(BootstrapDir, bootstrap.DirnameExecutionState) - err = gorecurcopy.CopyDirectory(sourceDir, trieDir) - if err != nil { - panic(err) - } - service.Command = append(service.Command, "--triedir=/trie", fmt.Sprintf("--rpc-addr=%s:%s", container.ContainerName, testnet.GRPCPort), - fmt.Sprintf("--cadence-tracing=%t", cadenceTracing), - fmt.Sprintf("--extensive-tracing=%t", extesiveTracing), + fmt.Sprintf("--extensive-tracing=%t", extensiveTracing), "--execution-data-dir=/data/execution-data", + "--chunk-data-pack-dir=/data/chunk-data-pack", + "--pruning-config-threshold=20", + "--pruning-config-sleep-after-iteration=1m", + "--scheduled-callbacks-enabled=true", ) service.Volumes = append(service.Volumes, @@ -415,7 +457,7 @@ func prepareAccessService(container testnet.ContainerConfig, i int, n int) Servi fmt.Sprintf("--secure-rpc-addr=%s:%s", container.ContainerName, testnet.GRPCSecurePort), fmt.Sprintf("--http-addr=%s:%s", container.ContainerName, testnet.GRPCWebPort), fmt.Sprintf("--rest-addr=%s:%s", container.ContainerName, testnet.RESTPort), - fmt.Sprintf("--state-stream-addr=%s:%s", container.ContainerName, testnet.ExecutionStatePort), + fmt.Sprintf("--state-stream-addr=%s:%s", container.ContainerName, testnet.GRPCPort), fmt.Sprintf("--collection-ingress-port=%s", testnet.GRPCPort), "--supports-observer=true", fmt.Sprintf("--public-network-address=%s:%s", container.ContainerName, testnet.PublicNetworkPort), @@ -424,7 +466,13 @@ func prepareAccessService(container testnet.ContainerConfig, i int, n int) Servi "--log-tx-time-to-finalized-executed", "--execution-data-sync-enabled=true", "--execution-data-dir=/data/execution-data", - fmt.Sprintf("--state-stream-addr=%s:%s", container.ContainerName, testnet.ExecutionStatePort), + "--public-network-execution-data-sync-enabled=true", + "--execution-data-indexing-enabled=true", + "--execution-state-dir=/data/execution-state", + "--script-execution-mode=execution-nodes-only", + "--event-query-mode=execution-nodes-only", + "--tx-result-query-mode=execution-nodes-only", + "--scheduled-callbacks-enabled=true", ) service.AddExposedPorts( @@ -432,7 +480,6 @@ func prepareAccessService(container testnet.ContainerConfig, i int, n int) Servi testnet.GRPCSecurePort, testnet.GRPCWebPort, testnet.RESTPort, - testnet.ExecutionStatePort, testnet.PublicNetworkPort, ) @@ -445,21 +492,29 @@ func prepareObserverService(i int, observerName string, agPublicKey string) Serv service := defaultService(observerName, DefaultObserverRole, dataDir, profilerDir, i) service.Command = append(service.Command, - fmt.Sprintf("--bootstrap-node-addresses=%s:%s", testnet.PrimaryAN, testnet.PublicNetworkPort), - fmt.Sprintf("--bootstrap-node-public-keys=%s", agPublicKey), + fmt.Sprintf("--observer-mode-bootstrap-node-addresses=%s:%s", testnet.PrimaryAN, testnet.PublicNetworkPort), + fmt.Sprintf("--observer-mode-bootstrap-node-public-keys=%s", agPublicKey), fmt.Sprintf("--upstream-node-addresses=%s:%s", testnet.PrimaryAN, testnet.GRPCSecurePort), fmt.Sprintf("--upstream-node-public-keys=%s", agPublicKey), fmt.Sprintf("--observer-networking-key-path=/bootstrap/private-root-information/%s_key", observerName), - "--bind=0.0.0.0:0", + "--bind=0.0.0.0:3569", fmt.Sprintf("--rpc-addr=%s:%s", observerName, testnet.GRPCPort), fmt.Sprintf("--secure-rpc-addr=%s:%s", observerName, testnet.GRPCSecurePort), fmt.Sprintf("--http-addr=%s:%s", observerName, testnet.GRPCWebPort), + fmt.Sprintf("--rest-addr=%s:%s", observerName, testnet.RESTPort), + fmt.Sprintf("--state-stream-addr=%s:%s", observerName, testnet.GRPCPort), + "--execution-data-dir=/data/execution-data", + "--execution-data-sync-enabled=true", + "--execution-data-indexing-enabled=true", + "--execution-state-dir=/data/execution-state", + "--event-query-mode=execution-nodes-only", ) service.AddExposedPorts( testnet.GRPCPort, testnet.GRPCSecurePort, testnet.GRPCWebPort, + testnet.RESTPort, ) // observer services rely on the access gateway @@ -517,7 +572,7 @@ func defaultService(name, role, dataDir, profilerDir string, i int) Service { Dockerfile: "cmd/Dockerfile", Args: map[string]string{ "TARGET": fmt.Sprintf("./cmd/%s", role), - "VERSION": build.Semver(), + "VERSION": build.Version(), "COMMIT": build.Commit(), "GOARCH": runtime.GOARCH, }, @@ -552,6 +607,18 @@ func writeDockerComposeConfig(services Services) error { return err } + // add networks section + _, err = f.WriteString(` +networks: + default: + name: localnet_network + driver: bridge + attachable: true +`) + if err != nil { + return err + } + return nil } @@ -637,6 +704,24 @@ func getAccessGatewayPublicKey(flowNodeContainerConfigs []testnet.ContainerConfi return "", fmt.Errorf("Unable to find public key for Access Gateway expected in container '%s'", testnet.PrimaryAN) } +func getAccessID(flowNodeContainerConfigs []testnet.ContainerConfig) (string, error) { + for _, container := range flowNodeContainerConfigs { + if container.Role == flow.RoleAccess { + return container.NodeID.String(), nil + } + } + return "", fmt.Errorf("Unable to find Access node") +} + +func getExecutionNodeConfig(flowNodeContainerConfigs []testnet.ContainerConfig) (testnet.ContainerConfig, error) { + for _, container := range flowNodeContainerConfigs { + if container.Role == flow.RoleExecution { + return container, nil + } + } + return testnet.ContainerConfig{}, fmt.Errorf("Unable to find execution node") +} + func prepareObserverServices(dockerServices Services, flowNodeContainerConfigs []testnet.ContainerConfig) Services { if observerCount == 0 { return dockerServices @@ -664,14 +749,68 @@ func prepareObserverServices(dockerServices Services, flowNodeContainerConfigs [ dockerServices[observerName] = observerService // Generate observer private key (localnet only, not for production) - err := testnet.WriteObserverPrivateKey(observerName, BootstrapDir) + _, err := testnet.WriteObserverPrivateKey(observerName, BootstrapDir) if err != nil { panic(err) } } + fmt.Println() fmt.Println("Observer services bootstrapping data generated...") fmt.Printf("Access Gateway (%s) public network libp2p key: %s\n\n", testnet.PrimaryAN, agPublicKey) return dockerServices } + +func prepareTestExecutionService(dockerServices Services, flowNodeContainerConfigs []testnet.ContainerConfig) Services { + if testExecutionCount == 0 { + return dockerServices + } + + agPublicKey, err := getAccessGatewayPublicKey(flowNodeContainerConfigs) + if err != nil { + panic(err) + } + + publicAccessID, err := getAccessID(flowNodeContainerConfigs) + if err != nil { + panic(err) + } + + containerConfig, err := getExecutionNodeConfig(flowNodeContainerConfigs) + if err != nil { + panic(err) + } + + var nodeid flow.Identifier + _, _ = crand.Read(nodeid[:]) + address := "test_execution_1:2137" + + observerName := fmt.Sprintf("%s_%d", "test_execution", 1) + // Generate observer private key (localnet only, not for production) + nodeinfo, err := testnet.WriteTestExecutionService(nodeid, address, observerName, BootstrapDir) + if err != nil { + panic(err) + } + + containerConfig.NodeInfo = nodeinfo + containerConfig.ContainerName = observerName + fmt.Println("NodeID: ", containerConfig) + + observerService := prepareExecutionService(containerConfig, 1, 1) + observerService.Command = append(observerService.Command, + "--observer-mode=true", + fmt.Sprintf("--observer-mode-bootstrap-node-addresses=%s:%s", testnet.PrimaryAN, testnet.PublicNetworkPort), + fmt.Sprintf("--observer-mode-bootstrap-node-public-keys=%s", agPublicKey), + fmt.Sprintf("--public-access-id=%s", publicAccessID), + ) + + // Add a docker container for this named Observer + dockerServices[observerName] = observerService + + fmt.Println() + fmt.Println("Test execution services bootstrapping data generated...") + fmt.Printf("Access Gateway (%s) public network libp2p key: %s\n\n", testnet.PrimaryAN, agPublicKey) + + return dockerServices +} diff --git a/integration/localnet/builder/ports.go b/integration/localnet/builder/ports.go index 2bea33701fb..f3b8b581004 100644 --- a/integration/localnet/builder/ports.go +++ b/integration/localnet/builder/ports.go @@ -29,7 +29,7 @@ var config = map[string]*portConfig{ portCount: 10, }, "observer": { - start: 5000, // 5000-6000 => 100 nodes + start: 5001, // 5001-6000 => 100 nodes end: 6000, portCount: 10, }, @@ -144,7 +144,6 @@ func (a *PortAllocator) Print() { testnet.GRPCSecurePort, testnet.GRPCWebPort, testnet.RESTPort, - testnet.ExecutionStatePort, testnet.PublicNetworkPort, } { if hostPort, ok := a.exposedPorts[node][containerPort]; ok { @@ -165,8 +164,6 @@ func portName(containerPort string) string { return "GRPC-Web" case testnet.RESTPort: return "REST" - case testnet.ExecutionStatePort: - return "Execution Data" case testnet.AdminPort: return "Admin" case testnet.PublicNetworkPort: diff --git a/integration/localnet/client/Dockerfile b/integration/localnet/client/Dockerfile index ac1fbb8d8e7..f80ab6c59d4 100644 --- a/integration/localnet/client/Dockerfile +++ b/integration/localnet/client/Dockerfile @@ -1,13 +1,13 @@ -FROM golang:1.17 +FROM golang:1.25 COPY flow-localnet.json /go WORKDIR /go -RUN curl -L https://github.com/onflow/flow-cli/archive/refs/tags/v0.36.2.tar.gz | tar -xzv -RUN cd flow-cli-0.36.2 && go mod download -RUN cd flow-cli-0.36.2 && make -RUN /go/flow-cli-0.36.2/cmd/flow/flow version -RUN cp /go/flow-cli-0.36.2/cmd/flow/flow /go/flow +RUN curl -L https://github.com/onflow/flow-cli/archive/refs/tags/v1.3.3.tar.gz | tar -xzv +RUN cd flow-cli-1.3.3 && go mod download +RUN cd flow-cli-1.3.3 && make +RUN /go/flow-cli-1.3.3/cmd/flow/flow version +RUN cp /go/flow-cli-1.3.3/cmd/flow/flow /go/flow CMD /go/flow -f /go/flow-localnet.json -n observer blocks get latest diff --git a/integration/localnet/conf/grafana-loader.json b/integration/localnet/conf/grafana-loader.json new file mode 100644 index 00000000000..b8f2d4eb718 --- /dev/null +++ b/integration/localnet/conf/grafana-loader.json @@ -0,0 +1,690 @@ +{ + "annotations": { + "list": [ + { + "builtIn": 1, + "datasource": { + "type": "grafana", + "uid": "-- Grafana --" + }, + "enable": true, + "hide": true, + "iconColor": "rgba(0, 211, 255, 1)", + "name": "Annotations & Alerts", + "type": "dashboard" + } + ] + }, + "editable": true, + "fiscalYearStartMonth": 0, + "graphTooltip": 0, + "id": 3, + "links": [], + "liveNow": false, + "panels": [ + { + "datasource": { + "type": "prometheus", + "uid": "prometheus" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 18, + "w": 12, + "x": 0, + "y": 0 + }, + "id": 1, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "prometheus" + }, + "editorMode": "builder", + "expr": "rate(flow_execution_effort_estimation_transactions_sent[$__rate_interval])", + "instant": false, + "legendFormat": "sent", + "range": true, + "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "prometheus" + }, + "editorMode": "builder", + "expr": "rate(flow_execution_effort_estimation_transactions_received[$__rate_interval])", + "hide": false, + "instant": false, + "legendFormat": "received", + "range": true, + "refId": "B" + }, + { + "datasource": { + "type": "prometheus", + "uid": "prometheus" + }, + "editorMode": "builder", + "expr": "flow_execution_effort_estimation_transactions_in_flight", + "hide": false, + "instant": false, + "legendFormat": "in flight", + "range": true, + "refId": "C" + } + ], + "title": "Loader total transactions", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "prometheus" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 18, + "w": 12, + "x": 12, + "y": 0 + }, + "id": 2, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "prometheus" + }, + "editorMode": "builder", + "expr": "rate(flow_execution_effort_estimation_loader_transaction_sent[$__rate_interval])", + "instant": false, + "legendFormat": "sent", + "range": true, + "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "prometheus" + }, + "editorMode": "builder", + "expr": "rate(flow_execution_effort_estimation_loader_transaction_received[$__rate_interval])", + "hide": false, + "instant": false, + "legendFormat": "received", + "range": true, + "refId": "B" + }, + { + "datasource": { + "type": "prometheus", + "uid": "prometheus" + }, + "editorMode": "builder", + "expr": "flow_execution_effort_estimation_loader_desired_tps", + "hide": false, + "instant": false, + "legendFormat": "desired", + "range": true, + "refId": "C" + } + ], + "title": "Loader TPS", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "prometheus" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 17, + "w": 12, + "x": 0, + "y": 18 + }, + "id": 3, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "prometheus" + }, + "editorMode": "builder", + "expr": "rate(flow_execution_effort_estimation_accounts_created[$__rate_interval])", + "instant": false, + "legendFormat": "accounts created", + "range": true, + "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "prometheus" + }, + "editorMode": "builder", + "expr": "flow_execution_effort_estimation_account_demand", + "hide": false, + "instant": false, + "legendFormat": "accounts demanded", + "range": true, + "refId": "B" + } + ], + "title": "Accounts", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "prometheus" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 17, + "w": 12, + "x": 12, + "y": 18 + }, + "id": 4, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "prometheus" + }, + "editorMode": "builder", + "expr": "flow_execution_effort_estimation_transaction_flight_time_seconds", + "instant": false, + "legendFormat": "quantile {{quantile}}", + "range": true, + "refId": "A" + } + ], + "title": "Transaction flight time", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "prometheus" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 16, + "w": 12, + "x": 0, + "y": 35 + }, + "id": 6, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "prometheus" + }, + "editorMode": "builder", + "expr": "flow_execution_effort_estimation_average_flight_duration", + "instant": false, + "range": true, + "refId": "A" + } + ], + "title": "Panel Title", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "prometheus" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [ + { + "__systemRef": "hideSeriesFrom", + "matcher": { + "id": "byNames", + "options": { + "mode": "exclude", + "names": [ + "error" + ], + "prefix": "All except:", + "readOnly": true + } + }, + "properties": [ + { + "id": "custom.hideFrom", + "value": { + "legend": false, + "tooltip": false, + "viz": true + } + } + ] + } + ] + }, + "gridPos": { + "h": 16, + "w": 12, + "x": 12, + "y": 35 + }, + "id": 5, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "prometheus" + }, + "editorMode": "builder", + "expr": "flow_execution_effort_estimation_loader_controller_parameters", + "instant": false, + "legendFormat": "{{parameter}}", + "range": true, + "refId": "A" + } + ], + "title": "TPS controller parameters", + "type": "timeseries" + } + ], + "refresh": "5s", + "schemaVersion": 38, + "style": "dark", + "tags": [], + "templating": { + "list": [] + }, + "time": { + "from": "now-15m", + "to": "now" + }, + "timepicker": {}, + "timezone": "", + "title": "Loader", + "uid": "c08ec574-5504-4601-8246-b9ea7f6d2c60", + "version": 8, + "weekStart": "" +} diff --git a/integration/localnet/conf/prometheus.yaml b/integration/localnet/conf/prometheus.yaml index 83932524a5e..3dd48eff60f 100755 --- a/integration/localnet/conf/prometheus.yaml +++ b/integration/localnet/conf/prometheus.yaml @@ -1,10 +1,10 @@ global: - scrape_interval: 15s + scrape_interval: 15s evaluation_interval: 15s scrape_configs: - job_name: node static_configs: - - targets: ["exporter:9100"] + - targets: [ "exporter:9100" ] labels: network: localnet - job_name: flow @@ -15,4 +15,10 @@ scrape_configs: - job_name: pushgateway honor_labels: true static_configs: - - targets: ["pushgateway:9091"] + - targets: [ "pushgateway:9091" ] + + # this is used if the loader is added to the network later + - job_name: loader + scrape_interval: 1s + static_configs: + - targets: [ "loader:8443" ] diff --git a/integration/localnet/conf/tempo-local.yaml b/integration/localnet/conf/tempo-local.yaml index d2f4089bbf8..fd453459942 100644 --- a/integration/localnet/conf/tempo-local.yaml +++ b/integration/localnet/conf/tempo-local.yaml @@ -41,7 +41,7 @@ storage: index_downsample_bytes: 1000 # number of bytes per index record encoding: zstd # block encoding/compression. options: none, gzip, lz4-64k, lz4-256k, lz4-1M, lz4, snappy, zstd, s2 wal: - path: /tmp/tempo/wal # where to store the the wal locally + path: /tmp/tempo/wal # where to store the wal locally encoding: snappy # wal encoding/compression. options: none, gzip, lz4-64k, lz4-256k, lz4-1M, lz4, snappy, zstd, s2 local: path: /tmp/tempo/blocks @@ -50,4 +50,4 @@ storage: queue_depth: 10000 overrides: - metrics_generator_processors: [service-graphs, span-metrics] \ No newline at end of file + metrics_generator_processors: [service-graphs, span-metrics] diff --git a/integration/localnet/docker-compose.metrics.yml b/integration/localnet/docker-compose.metrics.yml index ed19edf70c4..6e0fbfffe96 100644 --- a/integration/localnet/docker-compose.metrics.yml +++ b/integration/localnet/docker-compose.metrics.yml @@ -1,7 +1,7 @@ version: '3.7' services: tempo: - image: grafana/tempo:main-4d7e191 + image: grafana/tempo:main-afb0389 user: root command: [ "-config.file=/etc/tempo.yaml" ] volumes: @@ -11,7 +11,7 @@ services: - "14268" loki: - image: grafana/loki:main-9218e46 + image: grafana/loki:main-f1bbdc5 user: root command: [ "-config.file=/etc/loki/local-config.yaml" ] volumes: @@ -20,14 +20,14 @@ services: - "3100:3100" promtail: - image: grafana/promtail:main-9218e46 + image: grafana/promtail:main-f1bbdc5 command: -config.file=/etc/promtail/promtail-config.yaml volumes: - ./conf/promtail-config.yaml:/etc/promtail/promtail-config.yaml:z - /var/run/docker.sock:/var/run/docker.sock:z prometheus: - image: prom/prometheus:v2.36.0 + image: prom/prometheus:v2.46.0 user: root volumes: - ./conf/prometheus.yaml:/etc/prometheus/prometheus.yaml:z @@ -38,22 +38,23 @@ services: - 9090:9090 pushgateway: - image: prom/pushgateway:v1.4.3 + image: prom/pushgateway:v1.6.0 ports: - 9091:9091 exporter: - image: prom/node-exporter:v1.3.1 + image: prom/node-exporter:v1.6.1 ports: - "9100:9100" grafana: - image: grafana/grafana:9.0.0-beta3 + image: grafana/grafana:10.0.4 volumes: - ./conf/grafana-datasources.yaml:/etc/grafana/provisioning/datasources/datasources.yaml:z - ./conf/grafana-dashboards.yaml:/etc/grafana/provisioning/dashboards/dashboards.yaml:z - ./conf/grafana-localnet.json:/etc/grafana/provisioning/dashboards/localnet.json:z - ./conf/grafana-exec-sync.json:/etc/grafana/provisioning/dashboards/exec-sync.json:z + - ./conf/grafana-loader.json:/etc/grafana/provisioning/dashboards/loader.json:z - ./conf/grafana.ini:/etc/grafana/grafana.ini environment: - GF_AUTH_ANONYMOUS_ENABLED=true @@ -61,3 +62,9 @@ services: - GF_AUTH_DISABLE_LOGIN_FORM=true ports: - "3000:3000" + +networks: + default: + name: localnet_network + driver: bridge + attachable: true diff --git a/integration/testnet/client.go b/integration/testnet/client.go index ab2eb0b751e..f12330dfc6a 100644 --- a/integration/testnet/client.go +++ b/integration/testnet/client.go @@ -2,22 +2,28 @@ package testnet import ( "context" + "encoding/base64" + "encoding/json" "fmt" + "slices" "time" - "github.com/onflow/flow-go-sdk/templates" - "google.golang.org/grpc" + "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials/insecure" + "google.golang.org/grpc/status" "github.com/onflow/cadence" - + "github.com/onflow/crypto/hash" sdk "github.com/onflow/flow-go-sdk" client "github.com/onflow/flow-go-sdk/access/grpc" - "github.com/onflow/flow-go-sdk/crypto" sdkcrypto "github.com/onflow/flow-go-sdk/crypto" + "github.com/onflow/flow-go-sdk/templates" + accessproto "github.com/onflow/flow/protobuf/go/flow/access" "github.com/onflow/flow-go/engine/common/rpc/convert" + "github.com/onflow/flow-go/fvm/crypto" + "github.com/onflow/flow-go/model/encoding/rlp" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/state/protocol/inmem" "github.com/onflow/flow-go/utils/dsl" @@ -32,7 +38,6 @@ type Client struct { accountKey *sdk.AccountKey accountKeyPriv sdkcrypto.PrivateKey signer sdkcrypto.InMemorySigner - seqNo uint64 Chain flow.Chain account *sdk.Account } @@ -40,19 +45,27 @@ type Client struct { // NewClientWithKey returns a new client to an Access API listening at the given // address, using the given account key for signing transactions. func NewClientWithKey(accessAddr string, accountAddr sdk.Address, key sdkcrypto.PrivateKey, chain flow.Chain) (*Client, error) { - - flowClient, err := client.NewClient(accessAddr, grpc.WithTransportCredentials(insecure.NewCredentials())) + flowClient, err := client.NewClient( + accessAddr, + client.WithGRPCDialOptions( + grpc.WithTransportCredentials(insecure.NewCredentials()), + ), + ) if err != nil { return nil, fmt.Errorf("could not create new flow client: %w", err) } - acc, err := flowClient.GetAccount(context.Background(), accountAddr) + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + + acc, err := getAccount(ctx, flowClient, accountAddr) if err != nil { - return nil, fmt.Errorf("could not get the account %x: %w", accountAddr, err) + return nil, fmt.Errorf("could not get the account %v: %w", accountAddr, err) } + accountKey := acc.Keys[0] - mySigner, err := crypto.NewInMemorySigner(key, accountKey.HashAlgo) + mySigner, err := sdkcrypto.NewInMemorySigner(key, accountKey.HashAlgo) if err != nil { return nil, fmt.Errorf("could not create a signer: %w", err) } @@ -63,7 +76,6 @@ func NewClientWithKey(accessAddr string, accountAddr sdk.Address, key sdkcrypto. accountKeyPriv: key, signer: mySigner, Chain: chain, - seqNo: accountKey.SequenceNumber, account: acc, } return tc, nil @@ -79,18 +91,18 @@ func NewClient(addr string, chain flow.Chain) (*Client, error) { } // Uncomment for debugging keys - //json, err := key.MarshalJSON() - //if err != nil { + // json, err := key.MarshalJSON() + // if err != nil { // return nil, fmt.Errorf("cannot marshal key json: %w", err) - //} - //public := key.PublicKey(1000) - //publicJson, err := public.MarshalJSON() - //if err != nil { + // } + // public := key.PublicKey(1000) + // publicJson, err := public.MarshalJSON() + // if err != nil { // return nil, fmt.Errorf("cannot marshal key json: %w", err) - //} - - //fmt.Printf("New client with private key: \n%s\n", json) - //fmt.Printf("and public key: \n%s\n", publicJson) + // } + // + // fmt.Printf("New client with private key: \n%s\n", json) + // fmt.Printf("and public key: \n%s\n", publicJson) return NewClientWithKey(addr, sdk.Address(chain.ServiceAddress()), privateKey, chain) } @@ -100,9 +112,9 @@ func (c *Client) AccountKeyPriv() sdkcrypto.PrivateKey { return c.accountKeyPriv } -func (c *Client) GetSeqNumber() uint64 { - n := c.seqNo - c.seqNo++ +func (c *Client) GetAndIncrementSeqNumber() uint64 { + n := c.accountKey.SequenceNumber + c.accountKey.SequenceNumber++ return n } @@ -112,23 +124,45 @@ func (c *Client) Events(ctx context.Context, typ string) ([]sdk.BlockEvents, err // DeployContract submits a transaction to deploy a contract with the given // code to the root account. -func (c *Client) DeployContract(ctx context.Context, refID sdk.Identifier, contract dsl.Contract) error { +func (c *Client) DeployContract(ctx context.Context, refID sdk.Identifier, contract dsl.Contract) (*sdk.Transaction, error) { + return c.deployContract(ctx, refID, dsl.Transaction{ + Content: dsl.Prepare{ + Content: dsl.SetAccountCode{ + Code: contract.ToCadence(), + Name: contract.Name, + }, + }, + }) +} - code := dsl.Transaction{ - Import: dsl.Import{}, +// UpdateContract submits a transaction to deploy a contract update with the given +// code to the root account. +func (c *Client) UpdateContract(ctx context.Context, refID sdk.Identifier, contract dsl.Contract) (*sdk.Transaction, error) { + return c.deployContract(ctx, refID, dsl.Transaction{ Content: dsl.Prepare{ - Content: dsl.UpdateAccountCode{Code: contract.ToCadence(), Name: contract.Name}, + Content: dsl.SetAccountCode{ + Code: contract.ToCadence(), + Name: contract.Name, + Update: true, + }, }, - } + }) +} +func (c *Client) deployContract(ctx context.Context, refID sdk.Identifier, code dsl.Transaction) (*sdk.Transaction, error) { tx := sdk.NewTransaction(). SetScript([]byte(code.ToCadence())). SetReferenceBlockID(refID). - SetProposalKey(c.SDKServiceAddress(), 0, c.GetSeqNumber()). + SetProposalKey(c.SDKServiceAddress(), 0, c.GetAndIncrementSeqNumber()). SetPayer(c.SDKServiceAddress()). AddAuthorizer(c.SDKServiceAddress()) - return c.SignAndSendTransaction(ctx, tx) + err := c.SignAndSendTransaction(ctx, tx) + if err != nil { + return nil, fmt.Errorf("could not deploy contract: %w", err) + } + + return tx, nil } // SignTransaction signs the transaction using the proposer's key @@ -142,6 +176,60 @@ func (c *Client) SignTransaction(tx *sdk.Transaction) (*sdk.Transaction, error) return tx, err } +func (c *Client) SignTransactionWebAuthN(tx *sdk.Transaction) (*sdk.Transaction, error) { + transactionMessage := tx.EnvelopeMessage() + + extensionData, messageToSign, err := c.validWebAuthnExtensionData(transactionMessage) + if err != nil { + return nil, err + } + sig, err := c.signer.Sign(messageToSign) + if err != nil { + return nil, err + } + tx.AddEnvelopeSignature(tx.Payer, tx.ProposalKey.KeyIndex, sig) + tx.EnvelopeSignatures[0].ExtensionData = slices.Concat([]byte{byte(flow.WebAuthnScheme)}, extensionData) + return tx, nil +} + +func (c *Client) validWebAuthnExtensionData(transactionMessage []byte) ([]byte, []byte, error) { + hasher, err := crypto.NewPrefixedHashing(hash.SHA2_256, flow.TransactionTagString) + if err != nil { + return nil, nil, err + } + authNChallenge := hasher.ComputeHash(transactionMessage) + authNChallengeBase64Url := base64.RawURLEncoding.EncodeToString(authNChallenge) + validUserFlag := byte(0x01) + validClientDataOrigin := "https://testing.com" + rpIDHash := unittest.RandomBytes(32) + sigCounter := unittest.RandomBytes(4) + + // For use in cases where you're testing the other value + validAuthenticatorData := slices.Concat(rpIDHash, []byte{validUserFlag}, sigCounter) + validClientDataJSON := map[string]string{ + "type": flow.WebAuthnTypeGet, + "challenge": authNChallengeBase64Url, + "origin": validClientDataOrigin, + } + + clientDataJsonBytes, err := json.Marshal(validClientDataJSON) + if err != nil { + return nil, nil, err + } + + extensionData := flow.WebAuthnExtensionData{ + AuthenticatorData: validAuthenticatorData, + ClientDataJson: clientDataJsonBytes, + } + extensionDataRLPBytes := rlp.NewMarshaler().MustMarshal(extensionData) + + var clientDataHash [hash.HashLenSHA2_256]byte + hash.ComputeSHA2_256(&clientDataHash, clientDataJsonBytes) + messageToSign := slices.Concat(validAuthenticatorData, clientDataHash[:]) + + return extensionDataRLPBytes, messageToSign, nil +} + // SendTransaction submits the transaction to the Access API. The caller must // set up the transaction, including signing it. func (c *Client) SendTransaction(ctx context.Context, tx *sdk.Transaction) error { @@ -159,6 +247,10 @@ func (c *Client) SignAndSendTransaction(ctx context.Context, tx *sdk.Transaction return c.SendTransaction(ctx, tx) } +func (c *Client) GetTransactionResult(ctx context.Context, txID sdk.Identifier) (*sdk.TransactionResult, error) { + return c.client.GetTransactionResult(ctx, txID) +} + func (c *Client) ExecuteScript(ctx context.Context, script dsl.Main) (cadence.Value, error) { code := script.ToCadence() @@ -171,6 +263,15 @@ func (c *Client) ExecuteScript(ctx context.Context, script dsl.Main) (cadence.Va return res, nil } +func (c *Client) ExecuteScriptAtBlock(ctx context.Context, script dsl.Main, blockID sdk.Identifier) (cadence.Value, error) { + res, err := c.client.ExecuteScriptAtBlockID(ctx, blockID, []byte(script.ToCadence()), nil) + if err != nil { + return nil, fmt.Errorf("could not execute script: %w", err) + } + + return res, nil +} + func (c *Client) ExecuteScriptBytes(ctx context.Context, script []byte, args []cadence.Value) (cadence.Value, error) { res, err := c.client.ExecuteScriptAtLatestBlock(ctx, script, args) if err != nil { @@ -193,15 +294,49 @@ func (c *Client) Account() *sdk.Account { return c.account } +// WaitForFinalized waits for the transaction to be finalized, then returns the result. +func (c *Client) WaitForFinalized(ctx context.Context, id sdk.Identifier) (*sdk.TransactionResult, error) { + return c.waitForStatus(ctx, id, sdk.TransactionStatusFinalized) +} + // WaitForSealed waits for the transaction to be sealed, then returns the result. func (c *Client) WaitForSealed(ctx context.Context, id sdk.Identifier) (*sdk.TransactionResult, error) { + return c.waitForStatus(ctx, id, sdk.TransactionStatusSealed) +} + +// WaitForExecuted waits for the transaction to be executed, then returns the result. +func (c *Client) WaitForExecuted(ctx context.Context, id sdk.Identifier) (*sdk.TransactionResult, error) { + return c.waitForStatus(ctx, id, sdk.TransactionStatusExecuted) +} + +// WaitUntilIndexed blocks until the node has indexed the given height. +func (c *Client) WaitUntilIndexed(ctx context.Context, height uint64) error { + for { + resp, err := c.client.RPCClient().GetLatestBlockHeader(ctx, &accessproto.GetLatestBlockHeaderRequest{ + IsSealed: true, + }) + if err != nil { + return fmt.Errorf("could not get metadata response: %w", err) + } + if resp.GetMetadata().GetHighestIndexedHeight() >= height { + return nil + } + time.Sleep(250 * time.Millisecond) + } +} - fmt.Printf("Waiting for transaction %s to be sealed...\n", id) +// waitForStatus waits for the transaction to be in a certain status, then returns the result. +func (c *Client) waitForStatus( + ctx context.Context, + id sdk.Identifier, + targetStatus sdk.TransactionStatus, +) (*sdk.TransactionResult, error) { + fmt.Printf("Waiting for transaction %s to be %v...\n", id, targetStatus) errCount := 0 var result *sdk.TransactionResult var err error - for result == nil || (result.Status != sdk.TransactionStatusSealed) { - childCtx, cancel := context.WithTimeout(ctx, time.Second*5) + for result == nil || (result.Status != targetStatus) { + childCtx, cancel := context.WithTimeout(ctx, time.Second*30) result, err = c.client.GetTransactionResult(childCtx, id) cancel() if err != nil { @@ -219,7 +354,7 @@ func (c *Client) WaitForSealed(ctx context.Context, id sdk.Identifier) (*sdk.Tra } fmt.Println() - fmt.Printf("(Wait for Seal) Transaction %s sealed\n", id) + fmt.Printf("(Wait for Seal) Transaction %s %s\n", id, targetStatus) return result, err } @@ -340,22 +475,27 @@ func (c *Client) GetAccount(accountAddress sdk.Address) (*sdk.Account, error) { return account, nil } +func (c *Client) GetAccountAtBlockHeight(ctx context.Context, accountAddress sdk.Address, blockHeight uint64) (*sdk.Account, error) { + account, err := c.client.GetAccountAtBlockHeight(ctx, accountAddress, blockHeight) + if err != nil { + return nil, fmt.Errorf("could not get account at block height: %w", err) + } + return account, nil +} + func (c *Client) CreateAccount( ctx context.Context, accountKey *sdk.AccountKey, - payerAccount *sdk.Account, - payer sdk.Address, latestBlockID sdk.Identifier, ) (sdk.Address, error) { - - payerKey := payerAccount.Keys[0] + payer := c.SDKServiceAddress() tx, err := templates.CreateAccount([]*sdk.AccountKey{accountKey}, nil, payer) if err != nil { return sdk.Address{}, fmt.Errorf("failed cusnctruct create account transaction %w", err) } - tx.SetGasLimit(1000). + tx.SetComputeLimit(1000). SetReferenceBlockID(latestBlockID). - SetProposalKey(payer, 0, payerKey.SequenceNumber). + SetProposalKey(payer, 0, c.GetAndIncrementSeqNumber()). SetPayer(payer) err = c.SignAndSendTransaction(ctx, tx) @@ -368,9 +508,63 @@ func (c *Client) CreateAccount( return sdk.Address{}, fmt.Errorf("failed to wait for create account transaction to seal %w", err) } + if result.Error != nil { + return sdk.Address{}, fmt.Errorf("failed to create new account %w", result.Error) + } + if address, ok := c.UserAddress(result); ok { return address, nil } return sdk.Address{}, fmt.Errorf("failed to get account address of the created flow account") } + +func (c *Client) GetEventsForBlockIDs( + ctx context.Context, + eventType string, + blockIDs []sdk.Identifier, +) ([]sdk.BlockEvents, error) { + events, err := c.client.GetEventsForBlockIDs(ctx, eventType, blockIDs) + if err != nil { + return nil, fmt.Errorf("could not get events for block ids: %w", err) + } + + return events, nil +} + +func (c *Client) GetEventsForHeightRange( + ctx context.Context, + eventType string, + startHeight uint64, + endHeight uint64, +) ([]sdk.BlockEvents, error) { + events, err := c.client.GetEventsForHeightRange(ctx, eventType, startHeight, endHeight) + if err != nil { + return nil, fmt.Errorf("could not get events for height range: %w", err) + } + return events, nil +} + +func getAccount(ctx context.Context, client *client.Client, address sdk.Address) (*sdk.Account, error) { + header, err := client.GetLatestBlockHeader(ctx, true) + if err != nil { + return nil, fmt.Errorf("could not get latest block header: %w", err) + } + + // when this is run against an Access node with indexing enabled, occasionally the indexed height + // lags behind the sealed height, especially after first starting up (like in a test). + // Retry using the same block until we get the account. + for { + acc, err := client.GetAccountAtBlockHeight(ctx, address, header.Height) + if err == nil { + return acc, nil + } + + switch status.Code(err) { + case codes.OutOfRange, codes.ResourceExhausted: + time.Sleep(100 * time.Millisecond) + continue + } + return nil, fmt.Errorf("could not get the account %v: %w", address, err) + } +} diff --git a/integration/testnet/container.go b/integration/testnet/container.go index 2ee74894ac1..b32af817506 100644 --- a/integration/testnet/container.go +++ b/integration/testnet/container.go @@ -8,10 +8,12 @@ import ( "strings" "time" + "github.com/cockroachdb/pebble/v2" "github.com/dapperlabs/testingdock" - "github.com/dgraph-io/badger/v2" "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/container" "github.com/docker/go-connections/nat" + "github.com/onflow/crypto" "github.com/rs/zerolog" "github.com/stretchr/testify/require" "google.golang.org/grpc" @@ -21,7 +23,6 @@ import ( sdkclient "github.com/onflow/flow-go-sdk/access/grpc" "github.com/onflow/flow-go/cmd/bootstrap/utils" - "github.com/onflow/flow-go/crypto" ghostclient "github.com/onflow/flow-go/engine/ghost/client" "github.com/onflow/flow-go/integration/client" "github.com/onflow/flow-go/model/bootstrap" @@ -30,7 +31,11 @@ import ( "github.com/onflow/flow-go/module/metrics" state "github.com/onflow/flow-go/state/protocol/badger" "github.com/onflow/flow-go/state/protocol/inmem" - storage "github.com/onflow/flow-go/storage/badger" + "github.com/onflow/flow-go/storage" + "github.com/onflow/flow-go/storage/operation/pebbleimpl" + storagepebble "github.com/onflow/flow-go/storage/pebble" + "github.com/onflow/flow-go/storage/store" + "github.com/onflow/flow-go/utils/unittest" ) var ( @@ -95,8 +100,9 @@ func GetPrivateNodeInfoAddress(nodeName string) string { return fmt.Sprintf("%s:%d", nodeName, DefaultFlowPort) } -func NewContainerConfig(nodeName string, conf NodeConfig, networkKey, stakingKey crypto.PrivateKey) ContainerConfig { - info := bootstrap.NewPrivateNodeInfo( +func NewContainerConfig(nodeName string, conf NodeConfig, networkKey, stakingKey crypto.PrivateKey, +) (ContainerConfig, error) { + info, err := bootstrap.NewPrivateNodeInfo( conf.Identifier, conf.Role, GetPrivateNodeInfoAddress(nodeName), @@ -104,6 +110,9 @@ func NewContainerConfig(nodeName string, conf NodeConfig, networkKey, stakingKey networkKey, stakingKey, ) + if err != nil { + return ContainerConfig{}, err + } containerConf := ContainerConfig{ NodeInfo: info, @@ -116,7 +125,7 @@ func NewContainerConfig(nodeName string, conf NodeConfig, networkKey, stakingKey Corrupted: conf.Corrupted, } - return containerConf + return containerConf, nil } // ImageName returns the Docker image name for the given config. @@ -231,25 +240,17 @@ func (c *Container) Name() string { } // DB returns the node's database. -func (c *Container) DB() (*badger.DB, error) { - opts := badger. - DefaultOptions(c.DBPath()). - WithKeepL0InMemory(true). - WithLogger(nil) - - db, err := badger.Open(opts) - return db, err +func (c *Container) DB() (storage.DB, error) { + pdb, err := storagepebble.SafeOpen(unittest.Logger(), c.DBPath()) + if err != nil { + return nil, err + } + return pebbleimpl.ToDB(pdb), nil } // DB returns the node's execution data database. -func (c *Container) ExecutionDataDB() (*badger.DB, error) { - opts := badger. - DefaultOptions(c.ExecutionDataDBPath()). - WithKeepL0InMemory(true). - WithLogger(nil) - - db, err := badger.Open(opts) - return db, err +func (c *Container) ExecutionDataDB() (*pebble.DB, error) { + return storagepebble.SafeOpen(unittest.Logger(), c.ExecutionDataDBPath()) } func (c *Container) DBPath() string { @@ -287,7 +288,11 @@ func (c *Container) Pause() error { ctx, cancel := context.WithTimeout(context.Background(), checkContainerTimeout) defer cancel() - err := c.net.cli.ContainerStop(ctx, c.ID, &checkContainerTimeout) + timeout := int(checkContainerTimeout.Seconds()) + err := c.net.cli.ContainerStop(ctx, c.ID, + container.StopOptions{ + Timeout: &timeout, + }) if err != nil { return fmt.Errorf("could not stop container with ID (%s): %w", c.ID, err) } @@ -373,29 +378,33 @@ func (c *Container) Connect() error { } func (c *Container) OpenState() (*state.State, error) { + lockManager := storage.NewTestingLockManager() db, err := c.DB() if err != nil { return nil, err } - metrics := metrics.NewNoopCollector() - index := storage.NewIndex(metrics, db) - headers := storage.NewHeaders(metrics, db) - seals := storage.NewSeals(metrics, db) - results := storage.NewExecutionResults(metrics, db) - receipts := storage.NewExecutionReceipts(metrics, db, results, storage.DefaultCacheSize) - guarantees := storage.NewGuarantees(metrics, db, storage.DefaultCacheSize) - payloads := storage.NewPayloads(db, index, guarantees, seals, receipts, results) - blocks := storage.NewBlocks(db, headers, payloads) - qcs := storage.NewQuorumCertificates(metrics, db, storage.DefaultCacheSize) - setups := storage.NewEpochSetups(metrics, db) - commits := storage.NewEpochCommits(metrics, db) - statuses := storage.NewEpochStatuses(metrics, db) - versionBeacons := storage.NewVersionBeacons(db) + index := store.NewIndex(metrics, db) + headers := store.NewHeaders(metrics, db) + seals := store.NewSeals(metrics, db) + results := store.NewExecutionResults(metrics, db) + receipts := store.NewExecutionReceipts(metrics, db, results, store.DefaultCacheSize) + guarantees := store.NewGuarantees(metrics, db, store.DefaultCacheSize, store.DefaultCacheSize) + payloads := store.NewPayloads(db, index, guarantees, seals, receipts, results) + blocks := store.NewBlocks(db, headers, payloads) + qcs := store.NewQuorumCertificates(metrics, db, store.DefaultCacheSize) + setups := store.NewEpochSetups(metrics, db) + commits := store.NewEpochCommits(metrics, db) + protocolState := store.NewEpochProtocolStateEntries(metrics, setups, commits, db, + store.DefaultEpochProtocolStateCacheSize, store.DefaultProtocolStateIndexCacheSize) + protocolKVStates := store.NewProtocolKVStore(metrics, db, + store.DefaultProtocolKVStoreCacheSize, store.DefaultProtocolKVStoreByBlockIDCacheSize) + versionBeacons := store.NewVersionBeacons(db) return state.OpenState( metrics, db, + lockManager, headers, seals, results, @@ -403,7 +412,8 @@ func (c *Container) OpenState() (*state.State, error) { qcs, setups, commits, - statuses, + protocolState, + protocolKVStates, versionBeacons, ) } @@ -460,7 +470,7 @@ func (c *Container) TestnetClient() (*Client, error) { return nil, fmt.Errorf("container does not implement flow.access.AccessAPI") } - chain := c.net.Root().Header.ChainID.Chain() + chain := c.net.Root().ChainID.Chain() return NewClient(c.Addr(GRPCPort), chain) } @@ -470,7 +480,12 @@ func (c *Container) SDKClient() (*sdkclient.Client, error) { return nil, fmt.Errorf("container does not implement flow.access.AccessAPI") } - return sdkclient.NewClient(c.Addr(GRPCPort), grpc.WithTransportCredentials(insecure.NewCredentials())) + return sdkclient.NewClient( + c.Addr(GRPCPort), + sdkclient.WithGRPCDialOptions( + grpc.WithTransportCredentials(insecure.NewCredentials()), + ), + ) } // GhostClient returns a ghostnode client that connects to this node. diff --git a/integration/testnet/network.go b/integration/testnet/network.go index 1520725b335..5d59aae34a3 100644 --- a/integration/testnet/network.go +++ b/integration/testnet/network.go @@ -6,6 +6,7 @@ import ( "encoding/hex" "fmt" gonet "net" + "net/http" "os" "path/filepath" "sort" @@ -14,15 +15,24 @@ import ( "testing" "time" + "github.com/onflow/flow-go/follower/database" + "github.com/onflow/flow-go/state/protocol/datastore" + "github.com/onflow/flow-go/state/protocol/protocol_state" + "github.com/onflow/flow-go/storage/operation/pebbleimpl" + "github.com/dapperlabs/testingdock" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/container" + dockercontainer "github.com/docker/docker/api/types/container" dockerclient "github.com/docker/docker/client" - "github.com/onflow/cadence" + io_prometheus_client "github.com/prometheus/client_model/go" + "github.com/prometheus/common/expfmt" "github.com/rs/zerolog" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "github.com/onflow/cadence" + "github.com/onflow/flow-go-sdk/crypto" "github.com/onflow/flow-go/cmd/bootstrap/dkg" @@ -38,14 +48,14 @@ import ( "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/model/flow/factory" "github.com/onflow/flow-go/model/flow/filter" - "github.com/onflow/flow-go/model/flow/order" "github.com/onflow/flow-go/module/epochs" "github.com/onflow/flow-go/module/signature" "github.com/onflow/flow-go/network/p2p/keyutils" "github.com/onflow/flow-go/network/p2p/translator" clusterstate "github.com/onflow/flow-go/state/cluster" - "github.com/onflow/flow-go/state/protocol/badger" "github.com/onflow/flow-go/state/protocol/inmem" + "github.com/onflow/flow-go/state/protocol/protocol_state/kvstore" + "github.com/onflow/flow-go/storage" "github.com/onflow/flow-go/utils/io" "github.com/onflow/flow-go/utils/unittest" ) @@ -70,12 +80,19 @@ const ( DefaultFlowSecretsDBDir = "/data/secrets" // DefaultExecutionRootDir is the default directory for the execution node state database. DefaultExecutionRootDir = "/data/exedb" + // DefaultRegisterDir is the default directory for the register store database. + DefaultRegisterDir = "/data/register" // DefaultExecutionDataServiceDir for the execution data service blobstore. DefaultExecutionDataServiceDir = "/data/execution_data" + // DefaultExecutionStateDir for the execution data service blobstore. + DefaultExecutionStateDir = "/data/execution_state" + // DefaultChunkDataPackDir for the chunk data packs + DefaultChunkDataPackDir = "/data/chunk_data_pack" // DefaultProfilerDir is the default directory for the profiler DefaultProfilerDir = "/data/profiler" // GRPCPort is the GRPC API port. + // Use this same port for the ExecutionDataAPI GRPCPort = "9000" // GRPCSecurePort is the secure GRPC API port. GRPCSecurePort = "9001" @@ -87,8 +104,6 @@ const ( MetricsPort = "8080" // AdminPort is the admin server port AdminPort = "9002" - // ExecutionStatePort is the execution state server port - ExecutionStatePort = "9003" // PublicNetworkPort is the access node network port accessible from outside any docker container PublicNetworkPort = "9876" // DebuggerPort is the go debugger port @@ -100,9 +115,15 @@ const ( // PrimaryAN is the container name for the primary access node to use for API requests PrimaryAN = "access_1" - DefaultViewsInStakingAuction uint64 = 5 - DefaultViewsInDKGPhase uint64 = 50 - DefaultViewsInEpoch uint64 = 180 + // PrimaryON is the container name for the primary observer node to use for API requests + PrimaryON = "observer_1" + + DefaultViewsInStakingAuction uint64 = 5 + DefaultViewsInDKGPhase uint64 = 50 + DefaultViewsInEpoch uint64 = 200 + DefaultViewsPerSecond uint64 = 1 + DefaultFinalizationSafetyThreshold uint64 = 20 + DefaultEpochExtensionViewCount uint64 = 50 // DefaultMinimumNumOfAccessNodeIDS at-least 1 AN ID must be configured for LN & SN DefaultMinimumNumOfAccessNodeIDS = 1 @@ -170,10 +191,10 @@ func (net *FlowNetwork) Identities() flow.IdentityList { } // ContainersByRole returns all the containers in the network with the specified role -func (net *FlowNetwork) ContainersByRole(role flow.Role) []*Container { +func (net *FlowNetwork) ContainersByRole(role flow.Role, ghost bool) []*Container { cl := make([]*Container, 0, len(net.Containers)) for _, c := range net.Containers { - if c.Config.Role == role { + if c.Config.Role == role && c.Config.Ghost == ghost { cl = append(cl, c) } } @@ -261,7 +282,7 @@ func (net *FlowNetwork) RemoveContainers() { // DropDBs resets the protocol state database for all containers in the network // matching the given filter. -func (net *FlowNetwork) DropDBs(filter flow.IdentityFilter) { +func (net *FlowNetwork) DropDBs(filter flow.IdentityFilter[flow.Identity]) { if net == nil || net.suite == nil { return } @@ -317,9 +338,9 @@ func (net *FlowNetwork) ContainerByName(name string) *Container { func (net *FlowNetwork) PrintPorts() { var builder strings.Builder builder.WriteString("endpoints by container name:\n") - for containerName, container := range net.Containers { - builder.WriteString(fmt.Sprintf("\t%s\n", containerName)) - for portName, port := range container.Ports { + for cName, c := range net.Containers { + builder.WriteString(fmt.Sprintf("\t%s\n", cName)) + for portName, port := range c.Ports { switch portName { case MetricsPort: builder.WriteString(fmt.Sprintf("\t\t%s: localhost:%s/metrics\n", portName, port)) @@ -331,6 +352,57 @@ func (net *FlowNetwork) PrintPorts() { fmt.Print(builder.String()) } +// PortsByContainerName returns the specified port for each container in the network. +// Args: +// - portName: name of the port. +// - withGhost: when set to true will include urls's for ghost containers, otherwise ghost containers will be filtered. +// +// Returns: +// - map[string]string: a map of container name to the specified port on the host machine. +func (net *FlowNetwork) PortsByContainerName(portName string, withGhost bool) map[string]string { + portsByContainer := make(map[string]string) + for cName, c := range net.Containers { + if !withGhost && c.Config.Ghost { + continue + } + portsByContainer[cName] = c.Ports[portName] + } + return portsByContainer +} + +// GetMetricFromContainers returns the specified metric for all containers. +// Args: +// +// t: testing pointer +// metricName: name of the metric +// metricsURLs: map of container name to metrics url +// +// Returns: +// +// map[string][]*io_prometheus_client.Metric map of container name to metric result. +func (net *FlowNetwork) GetMetricFromContainers(t *testing.T, metricName string, metricsURLs map[string]string) map[string][]*io_prometheus_client.Metric { + allMetrics := make(map[string][]*io_prometheus_client.Metric, len(metricsURLs)) + for containerName, metricsURL := range metricsURLs { + allMetrics[containerName] = net.GetMetricFromContainer(t, containerName, metricsURL, metricName) + } + return allMetrics +} + +// GetMetricFromContainer makes an HTTP GET request to the metrics url and returns the metric families for each container. +func (net *FlowNetwork) GetMetricFromContainer(t *testing.T, containerName, metricsURL, metricName string) []*io_prometheus_client.Metric { + // download root snapshot from provided URL + res, err := http.Get(metricsURL) + require.NoError(t, err, fmt.Sprintf("failed to get metrics for container %s at url %s: %s", containerName, metricsURL, err)) + defer res.Body.Close() + + var parser expfmt.TextParser + mf, err := parser.TextToMetricFamilies(res.Body) + require.NoError(t, err, fmt.Sprintf("failed to parse metrics for container %s at url %s: %s", containerName, metricsURL, err)) + m, ok := mf[metricName] + require.True(t, ok, "failed to get metric %s for container %s at url %s metric does not exist", metricName, containerName, metricsURL) + return m.GetMetric() +} + type ConsensusFollowerConfig struct { NodeID flow.Identifier NetworkingPrivKey crypto.PrivateKey @@ -353,15 +425,16 @@ func NewConsensusFollowerConfig(t *testing.T, networkingPrivKey crypto.PrivateKe // NetworkConfig is the config for the network. type NetworkConfig struct { - Nodes NodeConfigs - ConsensusFollowers []ConsensusFollowerConfig - Observers []ObserverConfig - Name string - NClusters uint - ViewsInDKGPhase uint64 - ViewsInStakingAuction uint64 - ViewsInEpoch uint64 - EpochCommitSafetyThreshold uint64 + Nodes NodeConfigs + ConsensusFollowers []ConsensusFollowerConfig + Observers []ObserverConfig + Name string + NClusters uint + ViewsInDKGPhase uint64 + ViewsInStakingAuction uint64 + ViewsInEpoch uint64 + ViewsPerSecond uint64 + KVStoreFactory func(epochStateID flow.Identifier) (protocol_state.KVStoreAPI, error) } type NetworkConfigOpt func(*NetworkConfig) @@ -374,6 +447,10 @@ func NewNetworkConfig(name string, nodes NodeConfigs, opts ...NetworkConfigOpt) ViewsInStakingAuction: DefaultViewsInStakingAuction, ViewsInDKGPhase: DefaultViewsInDKGPhase, ViewsInEpoch: DefaultViewsInEpoch, + ViewsPerSecond: DefaultViewsPerSecond, + KVStoreFactory: func(epochStateID flow.Identifier) (protocol_state.KVStoreAPI, error) { + return kvstore.NewDefaultKVStore(DefaultFinalizationSafetyThreshold, DefaultEpochExtensionViewCount, epochStateID) + }, } for _, apply := range opts { @@ -383,16 +460,11 @@ func NewNetworkConfig(name string, nodes NodeConfigs, opts ...NetworkConfigOpt) return c } -func NewNetworkConfigWithEpochConfig(name string, nodes NodeConfigs, viewsInStakingAuction, viewsInDKGPhase, viewsInEpoch, safetyThreshold uint64, opts ...NetworkConfigOpt) NetworkConfig { - c := NetworkConfig{ - Nodes: nodes, - Name: name, - NClusters: 1, // default to 1 cluster - ViewsInStakingAuction: viewsInStakingAuction, - ViewsInDKGPhase: viewsInDKGPhase, - ViewsInEpoch: viewsInEpoch, - EpochCommitSafetyThreshold: safetyThreshold, - } +func NewNetworkConfigWithEpochConfig(name string, nodes NodeConfigs, viewsInStakingAuction, viewsInDKGPhase, viewsInEpoch uint64, opts ...NetworkConfigOpt) NetworkConfig { + c := NewNetworkConfig(name, nodes, + WithViewsInStakingAuction(viewsInStakingAuction), + WithViewsInDKGPhase(viewsInDKGPhase), + WithViewsInEpoch(viewsInEpoch)) for _, apply := range opts { apply(&c) @@ -413,15 +485,21 @@ func WithViewsInEpoch(views uint64) func(*NetworkConfig) { } } +func WithViewsPerSecond(views uint64) func(*NetworkConfig) { + return func(config *NetworkConfig) { + config.ViewsPerSecond = views + } +} + func WithViewsInDKGPhase(views uint64) func(*NetworkConfig) { return func(config *NetworkConfig) { config.ViewsInDKGPhase = views } } -func WithEpochCommitSafetyThreshold(threshold uint64) func(*NetworkConfig) { +func WithKVStoreFactory(factory func(flow.Identifier) (protocol_state.KVStoreAPI, error)) func(*NetworkConfig) { return func(config *NetworkConfig) { - config.EpochCommitSafetyThreshold = threshold + config.KVStoreFactory = factory } } @@ -496,7 +574,7 @@ func PrepareFlowNetwork(t *testing.T, networkConf NetworkConfig, chainID flow.Ch t.Logf("BootstrapDir: %s \n", bootstrapDir) bootstrapData, err := BootstrapNetwork(networkConf, bootstrapDir, chainID) - require.Nil(t, err) + require.NoError(t, err) root := bootstrapData.Root result := bootstrapData.Result @@ -561,6 +639,11 @@ func PrepareFlowNetwork(t *testing.T, networkConf NetworkConfig, chainID flow.Ch nodeContainer.AddFlag("insecure-access-api", "false") nodeContainer.AddFlag("access-node-ids", strings.Join(accessNodeIDS, ",")) } + // Increase the maximum view duration to accommodate the default Localnet block rate of 1bps + if nodeConf.Role == flow.RoleConsensus { + nodeContainer := flowNetwork.Containers[nodeConf.ContainerName] + nodeContainer.AddFlag("cruise-ctl-max-view-duration", "2s") + } } for i, observerConf := range networkConf.Observers { @@ -568,7 +651,7 @@ func PrepareFlowNetwork(t *testing.T, networkConf NetworkConfig, chainID flow.Ch observerConf.ContainerName = fmt.Sprintf("observer_%d", i+1) } t.Logf("add observer %v", observerConf.ContainerName) - flowNetwork.addObserver(t, observerConf) + flowNetwork.AddObserver(t, observerConf) } rootProtocolSnapshotPath := filepath.Join(bootstrapDir, bootstrap.PathRootProtocolStateSnapshot) @@ -584,26 +667,35 @@ func PrepareFlowNetwork(t *testing.T, networkConf NetworkConfig, chainID flow.Ch return flowNetwork } -func (net *FlowNetwork) addConsensusFollower(t *testing.T, rootProtocolSnapshotPath string, followerConf ConsensusFollowerConfig, containers []ContainerConfig) { +func (net *FlowNetwork) addConsensusFollower(t *testing.T, rootProtocolSnapshotPath string, followerConf ConsensusFollowerConfig, _ []ContainerConfig) { tmpdir := makeTempSubDir(t, net.baseTempdir, "flow-consensus-follower") // create a directory for the follower database dataDir := makeDir(t, tmpdir, DefaultFlowDBDir) + pebbleDB, _, err := database.InitPebbleDB(net.log, dataDir) + require.NoError(t, err) + // create a follower-specific directory for the bootstrap files followerBootstrapDir := makeDir(t, tmpdir, DefaultBootstrapDir) + makeDir(t, followerBootstrapDir, bootstrap.DirnamePublicBootstrap) - // strip out the node addresses from root-protocol-state-snapshot.json and copy it to the follower-specific + // copy root protocol snapshot to the follower-specific folder // bootstrap/public-root-information directory - err := rootProtocolJsonWithoutAddresses(rootProtocolSnapshotPath, filepath.Join(followerBootstrapDir, bootstrap.PathRootProtocolStateSnapshot)) + err = io.Copy(rootProtocolSnapshotPath, filepath.Join(followerBootstrapDir, bootstrap.PathRootProtocolStateSnapshot)) require.NoError(t, err) // consensus follower + lockManager := storage.NewTestingLockManager() + bindAddr := gonet.JoinHostPort("localhost", testingdock.RandomPort(t)) + protocolDB := pebbleimpl.ToDB(pebbleDB) opts := append( followerConf.Opts, - consensus_follower.WithDataDir(dataDir), + consensus_follower.WithProtocolDB(protocolDB), consensus_follower.WithBootstrapDir(followerBootstrapDir), + // each consenesus follower will have a different lock manager singleton + consensus_follower.WithLockManager(lockManager), ) stakedANContainer := net.ContainerByID(followerConf.StakedNodeID) @@ -634,7 +726,7 @@ func (net *FlowNetwork) StopContainerByName(ctx context.Context, containerName s if container == nil { return fmt.Errorf("%s container not found", containerName) } - return net.cli.ContainerStop(ctx, container.ID, nil) + return net.cli.ContainerStop(ctx, container.ID, dockercontainer.StopOptions{}) } type ObserverConfig struct { @@ -644,7 +736,7 @@ type ObserverConfig struct { BootstrapAccessName string } -func (net *FlowNetwork) addObserver(t *testing.T, conf ObserverConfig) { +func (net *FlowNetwork) AddObserver(t *testing.T, conf ObserverConfig) *Container { if conf.BootstrapAccessName == "" { conf.BootstrapAccessName = PrimaryAN } @@ -664,7 +756,7 @@ func (net *FlowNetwork) addObserver(t *testing.T, conf ObserverConfig) { accessPublicKey := hex.EncodeToString(accessNode.Config.NetworkPubKey().Encode()) require.NotEmptyf(t, accessPublicKey, "failed to find the staked conf for access node with container name '%s'", conf.BootstrapAccessName) - err = WriteObserverPrivateKey(conf.ContainerName, nodeBootstrapDir) + _, err = WriteObserverPrivateKey(conf.ContainerName, nodeBootstrapDir) require.NoError(t, err) containerOpts := testingdock.ContainerOpts{ @@ -674,14 +766,14 @@ func (net *FlowNetwork) addObserver(t *testing.T, conf ObserverConfig) { Image: "gcr.io/flow-container-registry/observer:latest", User: currentUser(), Cmd: append([]string{ - "--bind=0.0.0.0:0", + "--bind=0.0.0.0:3569", fmt.Sprintf("--bootstrapdir=%s", DefaultBootstrapDir), fmt.Sprintf("--datadir=%s", DefaultFlowDBDir), fmt.Sprintf("--secretsdir=%s", DefaultFlowSecretsDBDir), fmt.Sprintf("--profiler-dir=%s", DefaultProfilerDir), fmt.Sprintf("--loglevel=%s", conf.LogLevel.String()), - fmt.Sprintf("--bootstrap-node-addresses=%s", accessNode.ContainerAddr(PublicNetworkPort)), - fmt.Sprintf("--bootstrap-node-public-keys=%s", accessPublicKey), + fmt.Sprintf("--observer-mode-bootstrap-node-addresses=%s", accessNode.ContainerAddr(PublicNetworkPort)), + fmt.Sprintf("--observer-mode-bootstrap-node-public-keys=%s", accessPublicKey), fmt.Sprintf("--upstream-node-addresses=%s", accessNode.ContainerAddr(GRPCSecurePort)), fmt.Sprintf("--upstream-node-public-keys=%s", accessPublicKey), fmt.Sprintf("--observer-networking-key-path=%s/private-root-information/%s_key", DefaultBootstrapDir, conf.ContainerName), @@ -696,6 +788,13 @@ func (net *FlowNetwork) addObserver(t *testing.T, conf ObserverConfig) { } nodeContainer := &Container{ + Config: ContainerConfig{ + NodeInfo: bootstrap.NodeInfo{ + Role: flow.RoleAccess, + }, + ContainerName: conf.ContainerName, + LogLevel: conf.LogLevel, + }, Ports: make(map[string]string), datadir: tmpdir, net: net, @@ -704,6 +803,7 @@ func (net *FlowNetwork) addObserver(t *testing.T, conf ObserverConfig) { nodeContainer.exposePort(GRPCPort, testingdock.RandomPort(t)) nodeContainer.AddFlag("rpc-addr", nodeContainer.ContainerAddr(GRPCPort)) + nodeContainer.AddFlag("state-stream-addr", nodeContainer.ContainerAddr(GRPCPort)) nodeContainer.exposePort(GRPCSecurePort, testingdock.RandomPort(t)) nodeContainer.AddFlag("secure-rpc-addr", nodeContainer.ContainerAddr(GRPCSecurePort)) @@ -714,6 +814,9 @@ func (net *FlowNetwork) addObserver(t *testing.T, conf ObserverConfig) { nodeContainer.exposePort(AdminPort, testingdock.RandomPort(t)) nodeContainer.AddFlag("admin-addr", nodeContainer.ContainerAddr(AdminPort)) + nodeContainer.exposePort(RESTPort, testingdock.RandomPort(t)) + nodeContainer.AddFlag("rest-addr", nodeContainer.ContainerAddr(RESTPort)) + nodeContainer.opts.HealthCheck = testingdock.HealthCheckCustom(nodeContainer.HealthcheckCallback()) suiteContainer := net.suite.Container(containerOpts) @@ -722,6 +825,7 @@ func (net *FlowNetwork) addObserver(t *testing.T, conf ObserverConfig) { // start after the bootstrap access node accessNode.After(suiteContainer) + return nodeContainer } // AddNode creates a node container with the given config and adds it to the @@ -804,10 +908,13 @@ func (net *FlowNetwork) AddNode(t *testing.T, bootstrapDir string, nodeConf Cont nodeContainer.AddFlag("triedir", DefaultExecutionRootDir) nodeContainer.AddFlag("execution-data-dir", DefaultExecutionDataServiceDir) + nodeContainer.AddFlag("chunk-data-pack-dir", DefaultChunkDataPackDir) + nodeContainer.AddFlag("register-dir", DefaultRegisterDir) case flow.RoleAccess: nodeContainer.exposePort(GRPCPort, testingdock.RandomPort(t)) nodeContainer.AddFlag("rpc-addr", nodeContainer.ContainerAddr(GRPCPort)) + nodeContainer.AddFlag("state-stream-addr", nodeContainer.ContainerAddr(GRPCPort)) nodeContainer.exposePort(GRPCSecurePort, testingdock.RandomPort(t)) nodeContainer.AddFlag("secure-rpc-addr", nodeContainer.ContainerAddr(GRPCSecurePort)) @@ -818,9 +925,6 @@ func (net *FlowNetwork) AddNode(t *testing.T, bootstrapDir string, nodeConf Cont nodeContainer.exposePort(RESTPort, testingdock.RandomPort(t)) nodeContainer.AddFlag("rest-addr", nodeContainer.ContainerAddr(RESTPort)) - nodeContainer.exposePort(ExecutionStatePort, testingdock.RandomPort(t)) - nodeContainer.AddFlag("state-stream-addr", nodeContainer.ContainerAddr(ExecutionStatePort)) - // uncomment line below to point the access node exclusively to a single collection node // nodeContainer.AddFlag("static-collection-ingress-addr", "collection_1:9000") nodeContainer.AddFlag("collection-ingress-port", GRPCPort) @@ -839,8 +943,13 @@ func (net *FlowNetwork) AddNode(t *testing.T, bootstrapDir string, nodeConf Cont // tests only start 1 verification node nodeContainer.AddFlag("chunk-alpha", "1") } - t.Logf("%v hotstuff startup time will be in 8 seconds: %v", time.Now().UTC(), hotstuffStartupTime) - nodeContainer.AddFlag("hotstuff-startup-time", hotstuffStartupTime) + // We generally don't need cruise control for integration tests, so disable it by default + if !nodeContainer.IsFlagSet("cruise-ctl-enabled") { + nodeContainer.AddFlag("cruise-ctl-enabled", "false") + } + if !nodeContainer.IsFlagSet("cruise-ctl-fallback-proposal-duration") { + nodeContainer.AddFlag("cruise-ctl-fallback-proposal-duration", "250ms") + } case flow.RoleVerification: if !nodeContainer.IsFlagSet("chunk-alpha") { @@ -926,7 +1035,7 @@ func followerNodeInfos(confs []ConsensusFollowerConfig) ([]bootstrap.NodeInfo, e dummyStakingKey := unittest.StakingPrivKeyFixture() for _, conf := range confs { - info := bootstrap.NewPrivateNodeInfo( + info, err := bootstrap.NewPrivateNodeInfo( conf.NodeID, flow.RoleAccess, // use Access role "", // no address @@ -934,6 +1043,9 @@ func followerNodeInfos(confs []ConsensusFollowerConfig) ([]bootstrap.NodeInfo, e conf.NetworkingPrivKey, dummyStakingKey, ) + if err != nil { + return nil, err + } nodeInfos = append(nodeInfos, info) } @@ -961,7 +1073,6 @@ func BootstrapNetwork(networkConf NetworkConfig, bootstrapDir string, chainID fl // Sort so that access nodes start up last sort.Sort(&networkConf) - // generate staking and networking keys for each configured node stakedConfs, err := setupKeys(networkConf) if err != nil { @@ -976,11 +1087,11 @@ func BootstrapNetwork(networkConf NetworkConfig, bootstrapDir string, chainID fl allNodeInfos := append(toNodeInfos(stakedConfs), followerInfos...) - // IMPORTANT: we must use this ordering when writing the DKG keys as - // this ordering defines the DKG participant's indices - stakedNodeInfos := bootstrap.Sort(toNodeInfos(stakedConfs), order.Canonical) + // IMPORTANT: we must use this ordering when writing the Random Beacon keys as + // this ordering defines the DKG participants' indices + stakedNodeInfos := bootstrap.Sort(toNodeInfos(stakedConfs), flow.Canonical[flow.Identity]) - dkg, err := runBeaconKG(stakedConfs) + dkg, dkgIndexMap, err := runBeaconKG(stakedConfs) if err != nil { return nil, fmt.Errorf("failed to run DKG: %w", err) } @@ -1019,34 +1130,25 @@ func BootstrapNetwork(networkConf NetworkConfig, bootstrapDir string, chainID fl return nil, fmt.Errorf("failed to write machine account files: %w", err) } + err = utils.WriteNodeInternalPubInfos(allNodeInfos, writeJSONFile) + if err != nil { + return nil, fmt.Errorf("failed to node pub info file: %w", err) + } + // define root block parameters parentID := flow.ZeroID height := uint64(0) + view := uint64(0) timestamp := time.Now().UTC() epochCounter := uint64(0) participants := bootstrap.ToIdentityList(stakedNodeInfos) // generate root block - root := run.GenerateRootBlock(chainID, parentID, height, timestamp) - - // generate QC - signerData, err := run.GenerateQCParticipantData(consensusNodes, consensusNodes, dkg) - if err != nil { - return nil, err - } - votes, err := run.GenerateRootBlockVotes(root, signerData) - if err != nil { - return nil, err - } - qc, invalidVotesErr, err := run.GenerateRootQC(root, votes, signerData, signerData.Identities()) + rootHeaderBody, err := run.GenerateRootHeaderBody(chainID, parentID, height, view, timestamp) if err != nil { return nil, err } - if len(invalidVotesErr) > 0 { - return nil, fmt.Errorf("has invalid votes: %v", invalidVotesErr) - } - // generate root blocks for each collector cluster clusterRootBlocks, clusterAssignments, clusterQCs, err := setupClusterGenesisBlockQCs(networkConf.NClusters, epochCounter, stakedConfs) if err != nil { @@ -1075,26 +1177,60 @@ func BootstrapNetwork(networkConf NetworkConfig, bootstrapDir string, chainID fl return nil, err } - dkgOffsetView := root.Header.View + networkConf.ViewsInStakingAuction - 1 + dkgOffsetView := rootHeaderBody.View + networkConf.ViewsInStakingAuction - 1 + + // target number of seconds in epoch + targetDuration := networkConf.ViewsInEpoch / networkConf.ViewsPerSecond // generate epoch service events - epochSetup := &flow.EpochSetup{ - Counter: epochCounter, - FirstView: root.Header.View, - DKGPhase1FinalView: dkgOffsetView + networkConf.ViewsInDKGPhase, - DKGPhase2FinalView: dkgOffsetView + networkConf.ViewsInDKGPhase*2, - DKGPhase3FinalView: dkgOffsetView + networkConf.ViewsInDKGPhase*3, - FinalView: root.Header.View + networkConf.ViewsInEpoch - 1, - Participants: participants, - Assignments: clusterAssignments, - RandomSource: randomSource, + epochSetup, err := flow.NewEpochSetup( + flow.UntrustedEpochSetup{ + Counter: epochCounter, + FirstView: rootHeaderBody.View, + DKGPhase1FinalView: dkgOffsetView + networkConf.ViewsInDKGPhase, + DKGPhase2FinalView: dkgOffsetView + networkConf.ViewsInDKGPhase*2, + DKGPhase3FinalView: dkgOffsetView + networkConf.ViewsInDKGPhase*3, + FinalView: rootHeaderBody.View + networkConf.ViewsInEpoch - 1, + Participants: participants.ToSkeleton(), + Assignments: clusterAssignments, + RandomSource: randomSource, + TargetDuration: targetDuration, + TargetEndTime: uint64(time.Now().Unix()) + targetDuration, + }, + ) + if err != nil { + return nil, fmt.Errorf("could not construct epoch setup: %w", err) + } + + epochCommit, err := flow.NewEpochCommit( + flow.UntrustedEpochCommit{ + Counter: epochCounter, + ClusterQCs: flow.ClusterQCVoteDatasFromQCs(qcsWithSignerIDs), + DKGGroupKey: dkg.PubGroupKey, + DKGParticipantKeys: dkg.PubKeyShares, + DKGIndexMap: dkgIndexMap, + }, + ) + if err != nil { + return nil, fmt.Errorf("could not construct epoch commit: %w", err) } - epochCommit := &flow.EpochCommit{ - Counter: epochCounter, - ClusterQCs: flow.ClusterQCVoteDatasFromQCs(qcsWithSignerIDs), - DKGGroupKey: dkg.PubGroupKey, - DKGParticipantKeys: dkg.PubKeyShares, + minEpochStateEntry, err := inmem.EpochProtocolStateFromServiceEvents(epochSetup, epochCommit) + if err != nil { + return nil, fmt.Errorf("could not construct epoch protocol state: %w", err) + } + rootProtocolState, err := networkConf.KVStoreFactory(minEpochStateEntry.ID()) + if err != nil { + return nil, err + } + root, err := flow.NewRootBlock( + flow.UntrustedBlock{ + HeaderBody: *rootHeaderBody, + Payload: unittest.PayloadFixture(unittest.WithProtocolStateID(rootProtocolState.ID())), + }, + ) + if err != nil { + return nil, fmt.Errorf("could not construct root block: %w", err) } cdcRandomSource, err := cadence.NewString(hex.EncodeToString(randomSource)) @@ -1112,7 +1248,7 @@ func BootstrapNetwork(networkConf NetworkConfig, bootstrapDir string, chainID fl RandomSource: cdcRandomSource, CollectorClusters: clusterAssignments, ClusterQCs: clusterQCs, - DKGPubKeys: dkg.PubKeyShares, + DKGPubKeys: encodable.WrapRandomBeaconPubKeys(dkg.PubKeyShares), } // generate the initial execution state @@ -1125,27 +1261,54 @@ func BootstrapNetwork(networkConf NetworkConfig, bootstrapDir string, chainID fl fvm.WithAccountCreationFee(fvm.DefaultAccountCreationFee), fvm.WithMinimumStorageReservation(fvm.DefaultMinimumStorageReservation), fvm.WithStorageMBPerFLOW(fvm.DefaultStorageMBPerFLOW), - fvm.WithRootBlock(root.Header), + fvm.WithRootBlock(root.ToHeader()), fvm.WithEpochConfig(epochConfig), - fvm.WithIdentities(participants), + fvm.WithNodes(stakedNodeInfos), ) if err != nil { return nil, err } // generate execution result and block seal - result := run.GenerateRootResult(root, commit, epochSetup, epochCommit) + result, err := run.GenerateRootResult(root, commit, epochSetup, epochCommit) + if err != nil { + return nil, fmt.Errorf("generating root result failed: %w", err) + } seal, err := run.GenerateRootSeal(result) if err != nil { return nil, fmt.Errorf("generating root seal failed: %w", err) } - snapshot, err := inmem.SnapshotFromBootstrapStateWithParams(root, result, seal, qc, flow.DefaultProtocolVersion, networkConf.EpochCommitSafetyThreshold) + // generate QC + signerData, err := run.GenerateQCParticipantData(consensusNodes, consensusNodes, dkg) + if err != nil { + return nil, err + } + votes, err := run.GenerateRootBlockVotes(root, signerData) + if err != nil { + return nil, err + } + qc, invalidVotesErr, err := run.GenerateRootQC(root, votes, signerData, signerData.Identities()) + if err != nil { + return nil, err + } + + if len(invalidVotesErr) > 0 { + return nil, fmt.Errorf("has invalid votes: %v", invalidVotesErr) + } + + snapshot, err := inmem.SnapshotFromBootstrapStateWithParams( + root, + result, + seal, + qc, + networkConf.KVStoreFactory, + ) if err != nil { return nil, fmt.Errorf("could not create bootstrap state snapshot: %w", err) } - err = badger.IsValidRootSnapshotQCs(snapshot) + err = datastore.IsValidRootSnapshotQCs(snapshot) if err != nil { return nil, fmt.Errorf("invalid root snapshot qcs: %w", err) } @@ -1184,14 +1347,13 @@ func setupKeys(networkConf NetworkConfig) ([]ContainerConfig, error) { // create node container configs and corresponding public identities confs := make([]ContainerConfig, 0, nNodes) for i, conf := range networkConf.Nodes { - // define the node's name <role>_<n> and address <name>:<port> name := fmt.Sprintf("%s_%d", conf.Role.String(), roleCounter[conf.Role]+1) addr := fmt.Sprintf("%s:%d", name, DefaultFlowPort) roleCounter[conf.Role]++ - info := bootstrap.NewPrivateNodeInfo( + info, err := bootstrap.NewPrivateNodeInfo( conf.Identifier, conf.Role, addr, @@ -1199,15 +1361,19 @@ func setupKeys(networkConf NetworkConfig) ([]ContainerConfig, error) { networkKeys[i], stakingKeys[i], ) + if err != nil { + return nil, err + } containerConf := ContainerConfig{ - NodeInfo: info, - ContainerName: name, - LogLevel: conf.LogLevel, - Ghost: conf.Ghost, - AdditionalFlags: conf.AdditionalFlags, - Debug: conf.Debug, - Corrupted: conf.Corrupted, + NodeInfo: info, + ContainerName: name, + LogLevel: conf.LogLevel, + Ghost: conf.Ghost, + AdditionalFlags: conf.AdditionalFlags, + Debug: conf.Debug, + Corrupted: conf.Corrupted, + EnableMetricsServer: conf.EnableMetricsServer, } confs = append(confs, containerConf) @@ -1220,23 +1386,28 @@ func setupKeys(networkConf NetworkConfig) ([]ContainerConfig, error) { // and returns all DKG data. This includes the group private key, node indices, // and per-node public and private key-shares. // Only consensus nodes participate in the DKG. -func runBeaconKG(confs []ContainerConfig) (dkgmod.DKGData, error) { +func runBeaconKG(confs []ContainerConfig) (dkgmod.ThresholdKeySet, flow.DKGIndexMap, error) { // filter by consensus nodes - consensusNodes := bootstrap.FilterByRole(toNodeInfos(confs), flow.RoleConsensus) + consensusNodes := bootstrap.Sort(bootstrap.FilterByRole(toNodeInfos(confs), flow.RoleConsensus), flow.Canonical[flow.Identity]) nConsensusNodes := len(consensusNodes) dkgSeed, err := getSeed() if err != nil { - return dkgmod.DKGData{}, err + return dkgmod.ThresholdKeySet{}, nil, err } - dkg, err := dkg.RandomBeaconKG(nConsensusNodes, dkgSeed) + randomBeaconData, err := dkg.RandomBeaconKG(nConsensusNodes, dkgSeed) if err != nil { - return dkgmod.DKGData{}, err + return dkgmod.ThresholdKeySet{}, nil, err + } + + indexMap := make(flow.DKGIndexMap, nConsensusNodes) + for i, node := range consensusNodes { + indexMap[node.NodeID] = i } - return dkg, nil + return randomBeaconData, indexMap, nil } // setupClusterGenesisBlockQCs generates bootstrapping resources necessary for each collector cluster: @@ -1245,8 +1416,8 @@ func runBeaconKG(confs []ContainerConfig) (dkgmod.DKGData, error) { func setupClusterGenesisBlockQCs(nClusters uint, epochCounter uint64, confs []ContainerConfig) ([]*cluster.Block, flow.AssignmentList, []*flow.QuorumCertificate, error) { participantsUnsorted := toParticipants(confs) - participants := participantsUnsorted.Sort(order.Canonical) - collectors := participants.Filter(filter.HasRole(flow.RoleCollection)) + participants := participantsUnsorted.Sort(flow.Canonical[flow.Identity]) + collectors := participants.Filter(filter.HasRole[flow.Identity](flow.RoleCollection)).ToSkeleton() assignments := unittest.ClusterAssignment(nClusters, collectors) clusters, err := factory.NewClusterList(assignments, collectors) if err != nil { @@ -1258,7 +1429,10 @@ func setupClusterGenesisBlockQCs(nClusters uint, epochCounter uint64, confs []Co for _, cluster := range clusters { // generate root cluster block - block := clusterstate.CanonicalRootBlock(epochCounter, cluster) + block, err := clusterstate.CanonicalRootBlock(epochCounter, cluster) + if err != nil { + return nil, nil, nil, fmt.Errorf("failed to generate canonical root block: %w", err) + } lookup := make(map[flow.Identifier]struct{}) for _, node := range cluster { @@ -1278,7 +1452,7 @@ func setupClusterGenesisBlockQCs(nClusters uint, epochCounter uint64, confs []Co } // must order in canonical ordering otherwise decoding signer indices from cluster QC would fail - clusterCommittee := bootstrap.ToIdentityList(clusterNodeInfos).Sort(order.Canonical) + clusterCommittee := bootstrap.ToIdentityList(clusterNodeInfos).Sort(flow.Canonical[flow.Identity]).ToSkeleton() qc, err := run.GenerateClusterRootQC(clusterNodeInfos, clusterCommittee, block) if err != nil { return nil, nil, nil, fmt.Errorf("fail to generate cluster root QC with clusterNodeInfos %v, %w", diff --git a/integration/testnet/node_config.go b/integration/testnet/node_config.go index e8b28fded58..011d280ac2a 100644 --- a/integration/testnet/node_config.go +++ b/integration/testnet/node_config.go @@ -1,6 +1,7 @@ package testnet import ( + "fmt" "math/rand" "strconv" "strings" @@ -32,12 +33,15 @@ type NodeConfig struct { func (n NodeConfigs) Filter(filters ...NodeConfigFilter) NodeConfigs { nodeConfigs := make(NodeConfigs, 0) for _, config := range n { - filter := false + passedAllFilters := true for _, f := range filters { - filter = f(config) + if !f(config) { + passedAllFilters = false + break + } } - if filter { + if passedAllFilters { nodeConfigs = append(nodeConfigs, config) } } @@ -140,3 +144,15 @@ func WithAdditionalFlag(flag string) func(config *NodeConfig) { config.AdditionalFlags = append(config.AdditionalFlags, flag) } } + +// WithAdditionalFlagf adds additional flags to the command using a formatted string +func WithAdditionalFlagf(format string, a ...any) func(config *NodeConfig) { + return WithAdditionalFlag(fmt.Sprintf(format, a...)) +} + +// WithMetricsServer exposes the metrics server +func WithMetricsServer() func(config *NodeConfig) { + return func(config *NodeConfig) { + config.EnableMetricsServer = true + } +} diff --git a/integration/testnet/node_config_test.go b/integration/testnet/node_config_test.go new file mode 100644 index 00000000000..9566c9c0b98 --- /dev/null +++ b/integration/testnet/node_config_test.go @@ -0,0 +1,49 @@ +package testnet_test + +import ( + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/onflow/flow-go/integration/testnet" + "github.com/onflow/flow-go/model/flow" +) + +func TestFilter(t *testing.T) { + t.Run("filters by role", func(t *testing.T) { + configs := testnet.NewNodeConfigSet(5, flow.RoleAccess) + + // add another role to the set to ensure it is filtered out + configs = append(configs, testnet.NewNodeConfig(flow.RoleExecution)) + filters := configs.Filter(func(n testnet.NodeConfig) bool { return n.Role == flow.RoleAccess }) + assert.Len(t, filters, 5) // should exclude execution node + for _, config := range filters { + assert.Equal(t, flow.RoleAccess, config.Role) + } + }) + + t.Run("filters by multiple conditions", func(t *testing.T) { + configs := testnet.NodeConfigs{ + testnet.NewNodeConfig(flow.RoleAccess, testnet.WithDebugImage(true)), + testnet.NewNodeConfig(flow.RoleExecution, testnet.WithDebugImage(true)), + } + + filters := configs.Filter( + func(n testnet.NodeConfig) bool { return n.Role == flow.RoleAccess }, + func(n testnet.NodeConfig) bool { + return n.Debug + }, + ) + + assert.Len(t, filters, 1) // should exclude execution node + assert.True(t, filters[0].Debug) + }) + + t.Run("no matching filters", func(t *testing.T) { + configs := testnet.NewNodeConfigSet(5, flow.RoleConsensus) + + filters := configs.Filter(func(n testnet.NodeConfig) bool { return n.Role == flow.RoleAccess }) + + assert.Len(t, filters, 0) + }) +} diff --git a/integration/testnet/util.go b/integration/testnet/util.go index ad45be97c82..f8e619e1bec 100644 --- a/integration/testnet/util.go +++ b/integration/testnet/util.go @@ -11,15 +11,19 @@ import ( "path/filepath" "testing" + "github.com/libp2p/go-libp2p/core/peer" + "github.com/onflow/crypto" + "github.com/rs/zerolog/log" "github.com/stretchr/testify/require" "github.com/onflow/flow-go/cmd/bootstrap/cmd" "github.com/onflow/flow-go/cmd/bootstrap/utils" - "github.com/onflow/flow-go/crypto" "github.com/onflow/flow-go/model/bootstrap" "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/state/protocol/inmem" + "github.com/onflow/flow-go/network/p2p/keyutils" + "github.com/onflow/flow-go/network/p2p/translator" "github.com/onflow/flow-go/utils/io" + "github.com/onflow/flow-go/utils/unittest" ) func makeDir(t *testing.T, base string, subdir string) string { @@ -71,7 +75,7 @@ func toNodeInfos(confs []ContainerConfig) []bootstrap.NodeInfo { } func getSeed() ([]byte, error) { - seedLen := int(math.Max(crypto.SeedMinLenDKG, crypto.KeyGenSeedMinLen)) + seedLen := int(math.Max(crypto.KeyGenSeedMinLen, crypto.KeyGenSeedMinLen)) seed := make([]byte, seedLen) n, err := rand.Read(seed) if err != nil || n != seedLen { @@ -99,33 +103,37 @@ func WriteFile(path string, data []byte) error { return err } -// rootProtocolJsonWithoutAddresses strips out all node addresses from the root protocol json file specified as srcFile -// and creates the dstFile with the modified contents -func rootProtocolJsonWithoutAddresses(srcfile string, dstFile string) error { - - data, err := io.ReadFile(filepath.Join(srcfile)) +func WriteObserverPrivateKey(observerName, bootstrapDir string) (crypto.PrivateKey, error) { + // make the observer private key for named observer + // only used for localnet, not for use with production + networkSeed := cmd.GenerateRandomSeed(crypto.KeyGenSeedMinLen) + networkKey, err := utils.GeneratePublicNetworkingKey(networkSeed) if err != nil { - return err + return nil, fmt.Errorf("could not generate networking key: %w", err) } - var rootSnapshot inmem.EncodableSnapshot - err = json.Unmarshal(data, &rootSnapshot) + // hex encode + keyBytes := networkKey.Encode() + output := make([]byte, hex.EncodedLen(len(keyBytes))) + hex.Encode(output, keyBytes) + + // write to file + outputFile := fmt.Sprintf("%s/private-root-information/%s_key", bootstrapDir, observerName) + err = os.WriteFile(outputFile, output, 0600) if err != nil { - return err + return nil, fmt.Errorf("could not write private key to file: %w", err) } - strippedSnapshot := inmem.StrippedInmemSnapshot(rootSnapshot) - - return WriteJSON(dstFile, strippedSnapshot) + return networkKey, nil } -func WriteObserverPrivateKey(observerName, bootstrapDir string) error { +func WriteTestExecutionService(_ flow.Identifier, address, observerName, bootstrapDir string) (bootstrap.NodeInfo, error) { // make the observer private key for named observer // only used for localnet, not for use with production networkSeed := cmd.GenerateRandomSeed(crypto.KeyGenSeedMinLen) networkKey, err := utils.GeneratePublicNetworkingKey(networkSeed) if err != nil { - return fmt.Errorf("could not generate networking key: %w", err) + return bootstrap.NodeInfo{}, fmt.Errorf("could not generate networking key: %w", err) } // hex encode @@ -133,12 +141,74 @@ func WriteObserverPrivateKey(observerName, bootstrapDir string) error { output := make([]byte, hex.EncodedLen(len(keyBytes))) hex.Encode(output, keyBytes) - // write to file - outputFile := fmt.Sprintf("%s/private-root-information/%s_key", bootstrapDir, observerName) + encryptionKey, err := utils.GenerateSecretsDBEncryptionKey() + if err != nil { + return bootstrap.NodeInfo{}, err + } + + pubKey, err := keyutils.LibP2PPublicKeyFromFlow(networkKey.PublicKey()) + if err != nil { + return bootstrap.NodeInfo{}, fmt.Errorf("could not get libp2p public key from flow public key: %w", err) + } + + peerID, err := peer.IDFromPublicKey(pubKey) + if err != nil { + return bootstrap.NodeInfo{}, fmt.Errorf("could not get peer ID from public key: %w", err) + } + + nodeID, err := translator.NewPublicNetworkIDTranslator().GetFlowID(peerID) + if err != nil { + return bootstrap.NodeInfo{}, fmt.Errorf("could not get flow node ID: %w", err) + } + + k, err := pubKey.Raw() + if err != nil { + return bootstrap.NodeInfo{}, err + } + + ks := unittest.StakingKeys(1) + stakingKey := ks[0] + + log.Info().Msgf("test execution node private key: %v, public key: %x, peerID: %v, nodeID: %v", networkKey, k, peerID, nodeID) + + nodeInfo, err := bootstrap.NewPrivateNodeInfo( + nodeID, + flow.RoleExecution, + address, + 0, + networkKey, + stakingKey, + ) + if err != nil { + return bootstrap.NodeInfo{}, fmt.Errorf("failed to create node info: %w", err) + } + + path := fmt.Sprintf("%s/private-root-information/private-node-info_%v/%vjson", + bootstrapDir, nodeID, bootstrap.PathPrivNodeInfoPrefix) + + private, err := nodeInfo.Private() + if err != nil { + return bootstrap.NodeInfo{}, err + } + + err = io.WriteJSON(path, private) + if err != nil { + return bootstrap.NodeInfo{}, err + } + + path = fmt.Sprintf("%s/private-root-information/private-node-info_%v/%v", + bootstrapDir, nodeID, bootstrap.FilenameSecretsEncryptionKey) + err = os.WriteFile(path, encryptionKey, 0644) + if err != nil { + return bootstrap.NodeInfo{}, err + } + + // write network private key + outputFile := fmt.Sprintf("%s/private-root-information/private-node-info_%v/network_private_key", bootstrapDir, nodeID) err = os.WriteFile(outputFile, output, 0600) if err != nil { - return fmt.Errorf("could not write private key to file: %w", err) + return bootstrap.NodeInfo{}, fmt.Errorf("could not write private key to file: %w", err) } - return nil + return nodeInfo, nil } diff --git a/integration/tests/access/access_test.go b/integration/tests/access/access_test.go deleted file mode 100644 index 82d268d9a65..00000000000 --- a/integration/tests/access/access_test.go +++ /dev/null @@ -1,199 +0,0 @@ -package access - -import ( - "context" - "net" - "testing" - "time" - - "github.com/onflow/flow-go/consensus/hotstuff/committees" - "github.com/onflow/flow-go/consensus/hotstuff/signature" - "github.com/onflow/flow-go/engine/common/rpc/convert" - - "github.com/rs/zerolog" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - "github.com/stretchr/testify/suite" - "google.golang.org/grpc" - "google.golang.org/grpc/credentials/insecure" - - accessproto "github.com/onflow/flow/protobuf/go/flow/access" - - "github.com/onflow/flow-go/integration/testnet" - "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/utils/unittest" -) - -func TestAccess(t *testing.T) { - suite.Run(t, new(AccessSuite)) -} - -type AccessSuite struct { - suite.Suite - - log zerolog.Logger - - // root context for the current test - ctx context.Context - cancel context.CancelFunc - - net *testnet.FlowNetwork -} - -func (s *AccessSuite) TearDownTest() { - s.log.Info().Msg("================> Start TearDownTest") - s.net.Remove() - s.cancel() - s.log.Info().Msg("================> Finish TearDownTest") -} - -func (s *AccessSuite) SetupTest() { - s.log = unittest.LoggerForTest(s.Suite.T(), zerolog.InfoLevel) - s.log.Info().Msg("================> SetupTest") - defer func() { - s.log.Info().Msg("================> Finish SetupTest") - }() - - nodeConfigs := []testnet.NodeConfig{ - testnet.NewNodeConfig(flow.RoleAccess, testnet.WithLogLevel(zerolog.InfoLevel)), - } - - // need one dummy execution node (unused ghost) - exeConfig := testnet.NewNodeConfig(flow.RoleExecution, testnet.WithLogLevel(zerolog.FatalLevel), testnet.AsGhost()) - nodeConfigs = append(nodeConfigs, exeConfig) - - // need one dummy verification node (unused ghost) - verConfig := testnet.NewNodeConfig(flow.RoleVerification, testnet.WithLogLevel(zerolog.FatalLevel), testnet.AsGhost()) - nodeConfigs = append(nodeConfigs, verConfig) - - // need one controllable collection node (unused ghost) - collConfig := testnet.NewNodeConfig(flow.RoleCollection, testnet.WithLogLevel(zerolog.FatalLevel), testnet.AsGhost()) - nodeConfigs = append(nodeConfigs, collConfig) - - // need three consensus nodes (unused ghost) - for n := 0; n < 3; n++ { - conID := unittest.IdentifierFixture() - nodeConfig := testnet.NewNodeConfig(flow.RoleConsensus, - testnet.WithLogLevel(zerolog.FatalLevel), - testnet.WithID(conID), - testnet.AsGhost()) - nodeConfigs = append(nodeConfigs, nodeConfig) - } - - conf := testnet.NewNetworkConfig("access_api_test", nodeConfigs) - s.net = testnet.PrepareFlowNetwork(s.T(), conf, flow.Localnet) - - // start the network - s.T().Logf("starting flow network with docker containers") - s.ctx, s.cancel = context.WithCancel(context.Background()) - - s.net.Start(s.ctx) -} - -func (s *AccessSuite) TestAPIsAvailable() { - - s.T().Run("TestHTTPProxyPortOpen", func(t *testing.T) { - httpProxyAddress := s.net.ContainerByName(testnet.PrimaryAN).Addr(testnet.GRPCWebPort) - - conn, err := net.DialTimeout("tcp", httpProxyAddress, 1*time.Second) - require.NoError(s.T(), err, "http proxy port not open on the access node") - - conn.Close() - }) - - s.T().Run("TestAccessConnection", func(t *testing.T) { - ctx, cancel := context.WithTimeout(s.ctx, 1*time.Second) - defer cancel() - - grpcAddress := s.net.ContainerByName(testnet.PrimaryAN).Addr(testnet.GRPCPort) - conn, err := grpc.DialContext(ctx, grpcAddress, grpc.WithTransportCredentials(insecure.NewCredentials())) - require.NoError(t, err, "failed to connect to access node") - defer conn.Close() - - client := accessproto.NewAccessAPIClient(conn) - - _, err = client.Ping(s.ctx, &accessproto.PingRequest{}) - assert.NoError(t, err, "failed to ping access node") - }) -} - -// TestSignerIndicesDecoding tests that access node uses signer indices' decoder to correctly parse encoded data in blocks. -// This test receives blocks from consensus follower and then requests same blocks from access API and checks if returned data -// matches. -func (s *AccessSuite) TestSignerIndicesDecoding() { - - container := s.net.ContainerByName(testnet.PrimaryAN) - - ctx, cancel := context.WithCancel(s.ctx) - defer cancel() - - // create access API - grpcAddress := container.Addr(testnet.GRPCPort) - conn, err := grpc.DialContext(ctx, grpcAddress, grpc.WithTransportCredentials(insecure.NewCredentials())) - require.NoError(s.T(), err, "failed to connect to access node") - defer conn.Close() - - client := accessproto.NewAccessAPIClient(conn) - - // query latest finalized block - latestFinalizedBlock, err := makeApiRequest(client.GetLatestBlockHeader, ctx, &accessproto.GetLatestBlockHeaderRequest{ - IsSealed: false, - }) - require.NoError(s.T(), err) - - blockByID, err := makeApiRequest(client.GetBlockHeaderByID, ctx, &accessproto.GetBlockHeaderByIDRequest{Id: latestFinalizedBlock.Block.Id}) - require.NoError(s.T(), err) - - require.Equal(s.T(), latestFinalizedBlock, blockByID, "expect to receive same block by ID") - - blockByHeight, err := makeApiRequest(client.GetBlockHeaderByHeight, ctx, - &accessproto.GetBlockHeaderByHeightRequest{Height: latestFinalizedBlock.Block.Height}) - require.NoError(s.T(), err) - - require.Equal(s.T(), blockByID, blockByHeight, "expect to receive same block by height") - - // stop container, so we can access it's state and perform assertions - err = s.net.StopContainerByName(ctx, testnet.PrimaryAN) - require.NoError(s.T(), err) - - err = container.WaitForContainerStopped(5 * time.Second) - require.NoError(s.T(), err) - - // open state to build a block singer decoder - state, err := container.OpenState() - require.NoError(s.T(), err) - - // create committee so we can create decoder to assert validity of data - committee, err := committees.NewConsensusCommittee(state, container.Config.NodeID) - require.NoError(s.T(), err) - blockSignerDecoder := signature.NewBlockSignerDecoder(committee) - - expectedFinalizedBlock, err := state.AtBlockID(flow.HashToID(latestFinalizedBlock.Block.Id)).Head() - require.NoError(s.T(), err) - - // since all blocks should be equal we will execute just check on one of them - require.Equal(s.T(), latestFinalizedBlock.Block.ParentVoterIndices, expectedFinalizedBlock.ParentVoterIndices) - - // check if the response contains valid encoded signer IDs. - msg := latestFinalizedBlock.Block - block, err := convert.MessageToBlockHeader(msg) - require.NoError(s.T(), err) - decodedIdentities, err := blockSignerDecoder.DecodeSignerIDs(block) - require.NoError(s.T(), err) - // transform to assert - var transformed [][]byte - for _, identity := range decodedIdentities { - identity := identity - transformed = append(transformed, identity[:]) - } - assert.ElementsMatch(s.T(), transformed, msg.ParentVoterIds, "response must contain correctly encoded signer IDs") -} - -// makeApiRequest is a helper function that encapsulates context creation for grpc client call, used to avoid repeated creation -// of new context for each call. -func makeApiRequest[Func func(context.Context, *Req, ...grpc.CallOption) (*Resp, error), Req any, Resp any](apiCall Func, ctx context.Context, req *Req) (*Resp, error) { - clientCtx, cancel := context.WithTimeout(ctx, 1*time.Second) - resp, err := apiCall(clientCtx, req) - cancel() - return resp, err -} diff --git a/integration/tests/access/cohort1/access_api_test.go b/integration/tests/access/cohort1/access_api_test.go new file mode 100644 index 00000000000..933c4bb4dd4 --- /dev/null +++ b/integration/tests/access/cohort1/access_api_test.go @@ -0,0 +1,1159 @@ +package cohort1 + +import ( + "context" + "fmt" + "io" + "testing" + "time" + + "github.com/onflow/flow-go-sdk/templates" + "github.com/onflow/flow-go-sdk/test" + + accessproto "github.com/onflow/flow/protobuf/go/flow/access" + "github.com/onflow/flow/protobuf/go/flow/entities" + "google.golang.org/grpc" + "google.golang.org/grpc/credentials/insecure" + + "github.com/onflow/flow-go/engine/access/rpc/backend/query_mode" + "github.com/onflow/flow-go/integration/tests/mvp" + "github.com/onflow/flow-go/utils/dsl" + + "github.com/rs/zerolog" + "github.com/stretchr/testify/suite" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + + "github.com/onflow/cadence" + + sdk "github.com/onflow/flow-go-sdk" + client "github.com/onflow/flow-go-sdk/access/grpc" + + "github.com/onflow/flow-go/integration/testnet" + "github.com/onflow/flow-go/integration/tests/lib" + "github.com/onflow/flow-go/integration/utils" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/utils/unittest" +) + +// This is a collection of tests that validate various Access API endpoints work as expected. + +var ( + simpleScript = `access(all) fun main(): Int { return 42; }` + simpleScriptResult = cadence.NewInt(42) + + OriginalContract = dsl.Contract{ + Name: "TestingContract", + Members: []dsl.CadenceCode{ + dsl.Code(` + access(all) fun message(): String { + return "Initial Contract" + }`, + ), + }, + } + + UpdatedContract = dsl.Contract{ + Name: "TestingContract", + Members: []dsl.CadenceCode{ + dsl.Code(` + access(all) fun message(): String { + return "Updated Contract" + }`, + ), + }, + } +) + +const ( + GetMessageScript = ` +import TestingContract from 0x%s + +access(all) +fun main(): String { + return TestingContract.message() +}` +) + +func TestAccessAPI(t *testing.T) { + suite.Run(t, new(AccessAPISuite)) +} + +type AccessAPISuite struct { + suite.Suite + + log zerolog.Logger + + // root context for the current test + ctx context.Context + cancel context.CancelFunc + + net *testnet.FlowNetwork + + accessNode2 *testnet.Container + an1Client *client.Client + an2Client *client.Client + serviceClient *testnet.Client +} + +func (s *AccessAPISuite) TearDownTest() { + s.log.Info().Msg("================> Start TearDownTest") + s.net.Remove() + s.cancel() + s.log.Info().Msg("================> Finish TearDownTest") +} + +func (s *AccessAPISuite) SetupTest() { + s.log = unittest.LoggerForTest(s.Suite.T(), zerolog.InfoLevel) + s.log.Info().Msg("================> SetupTest") + defer func() { + s.log.Info().Msg("================> Finish SetupTest") + }() + + // access node + defaultAccessConfig := testnet.NewNodeConfig( + flow.RoleAccess, + testnet.WithLogLevel(zerolog.FatalLevel), + // make sure test continues to test as expected if the default config changes + testnet.WithAdditionalFlagf("--script-execution-mode=%s", query_mode.IndexQueryModeExecutionNodesOnly), + testnet.WithAdditionalFlagf("--tx-result-query-mode=%s", query_mode.IndexQueryModeExecutionNodesOnly), + ) + + indexingAccessConfig := testnet.NewNodeConfig( + flow.RoleAccess, + testnet.WithLogLevel(zerolog.InfoLevel), + testnet.WithAdditionalFlag("--execution-data-sync-enabled=true"), + testnet.WithAdditionalFlagf("--execution-data-dir=%s", testnet.DefaultExecutionDataServiceDir), + testnet.WithAdditionalFlag("--execution-data-retry-delay=1s"), + testnet.WithAdditionalFlag("--execution-data-indexing-enabled=true"), + testnet.WithAdditionalFlagf("--execution-state-dir=%s", testnet.DefaultExecutionStateDir), + testnet.WithAdditionalFlagf("--script-execution-mode=%s", query_mode.IndexQueryModeLocalOnly), + ) + + consensusConfigs := []func(config *testnet.NodeConfig){ + // `cruise-ctl-fallback-proposal-duration` is set to 250ms instead to of 100ms + // to purposely slow down the block rate. This is needed since the crypto module + // update providing faster BLS operations. + // TODO: fix the access integration test logic to function without slowing down + // the block rate + testnet.WithAdditionalFlag("--cruise-ctl-fallback-proposal-duration=250ms"), + testnet.WithAdditionalFlagf("--required-verification-seal-approvals=%d", 1), + testnet.WithAdditionalFlagf("--required-construction-seal-approvals=%d", 1), + testnet.WithLogLevel(zerolog.FatalLevel), + } + + nodeConfigs := []testnet.NodeConfig{ + testnet.NewNodeConfig(flow.RoleCollection, testnet.WithLogLevel(zerolog.FatalLevel)), + testnet.NewNodeConfig(flow.RoleCollection, testnet.WithLogLevel(zerolog.FatalLevel)), + testnet.NewNodeConfig(flow.RoleExecution, testnet.WithLogLevel(zerolog.FatalLevel)), + testnet.NewNodeConfig(flow.RoleExecution, testnet.WithLogLevel(zerolog.FatalLevel)), + testnet.NewNodeConfig(flow.RoleConsensus, consensusConfigs...), + testnet.NewNodeConfig(flow.RoleConsensus, consensusConfigs...), + testnet.NewNodeConfig(flow.RoleConsensus, consensusConfigs...), + testnet.NewNodeConfig(flow.RoleVerification, testnet.WithLogLevel(zerolog.FatalLevel)), + + // AN1 should be the a vanilla node to allow other nodes to bootstrap successfully + defaultAccessConfig, + + // Tests will focus on AN2 + indexingAccessConfig, + } + + conf := testnet.NewNetworkConfig("access_api_test", nodeConfigs) + s.net = testnet.PrepareFlowNetwork(s.T(), conf, flow.Localnet) + + // start the network + s.T().Logf("starting flow network with docker containers") + s.ctx, s.cancel = context.WithCancel(context.Background()) + + s.net.Start(s.ctx) + + var err error + s.accessNode2 = s.net.ContainerByName("access_2") + + s.an2Client, err = s.accessNode2.SDKClient() + s.Require().NoError(err) + + s.an1Client, err = s.net.ContainerByName(testnet.PrimaryAN).SDKClient() + s.Require().NoError(err) + + // pause until the network is progressing + var header *sdk.BlockHeader + s.Require().Eventually(func() bool { + header, err = s.an2Client.GetLatestBlockHeader(s.ctx, true) + s.Require().NoError(err) + + return header.Height > 0 + }, 30*time.Second, 1*time.Second) + + // the service client uses GetAccount and requires the first block to be indexed + s.Require().Eventually(func() bool { + s.serviceClient, err = s.accessNode2.TestnetClient() + return err == nil + }, 30*time.Second, 1*time.Second) +} + +// TestScriptExecutionAndGetAccountsAN1 test the Access API endpoints for executing scripts and getting +// accounts using execution nodes. +// +// Note: not combining AN1, AN2 tests together because that causes a drastic increase in test run times. test cases are read-only +// and should not interfere with each other. +func (s *AccessAPISuite) TestScriptExecutionAndGetAccountsAN1() { + // deploy the test contract + _ = s.deployContract(lib.CounterContract, false) + txResult := s.deployCounter() + targetHeight := txResult.BlockHeight + 1 + s.waitUntilIndexed(targetHeight) + + // Run tests against Access 1, which uses the execution node + s.testGetAccount(s.an1Client) + s.testExecuteScriptWithSimpleScript(s.an1Client) + s.testExecuteScriptWithSimpleContract(s.an1Client, targetHeight) +} + +// TestScriptExecutionAndGetAccountsAN2 test the Access API endpoints for executing scripts and getting +// accounts using local storage. +// +// Note: not combining AN1, AN2 tests together because that causes a drastic increase in test run times. test cases are read-only +// and should not interfere with each other. +func (s *AccessAPISuite) TestScriptExecutionAndGetAccountsAN2() { + // deploy the test contract + _ = s.deployContract(lib.CounterContract, false) + txResult := s.deployCounter() + targetHeight := txResult.BlockHeight + 1 + s.waitUntilIndexed(targetHeight) + + // Run tests against Access 2, which uses local storage + s.testGetAccount(s.an2Client) + s.testExecuteScriptWithSimpleScript(s.an2Client) + s.testExecuteScriptWithSimpleContract(s.an2Client, targetHeight) +} + +func (s *AccessAPISuite) TestMVPScriptExecutionLocalStorage() { + // this is a specialized test that creates accounts, deposits funds, deploys contracts, etc, and + // uses the provided access node to handle the Access API calls. there is an existing test that + // covers the default config, so we only need to test with local storage. + mvp.RunMVPTest(s.T(), s.ctx, s.net, s.accessNode2) +} + +// TestSendAndSubscribeTransactionStatuses tests the functionality of sending and subscribing to transaction statuses. +// +// This test verifies that a transaction can be created, signed, sent to the access API, and then the status of the transaction +// can be subscribed to. It performs the following steps: +// 1. Establishes a connection to the access API. +// 2. Creates a new account key and prepares a transaction for account creation. +// 3. Signs the transaction. +// 4. Sends and subscribes to the transaction status using the access API. +// 5. Verifies the received transaction statuses, ensuring they are received in order and the final status is "SEALED". +func (s *AccessAPISuite) TestSendAndSubscribeTransactionStatuses() { + accessNodeContainer := s.net.ContainerByName(testnet.PrimaryAN) + + // Establish a gRPC connection to the access API + conn, err := grpc.Dial(accessNodeContainer.Addr(testnet.GRPCPort), grpc.WithTransportCredentials(insecure.NewCredentials())) + s.Require().NoError(err) + s.Require().NotNil(conn) + + // Create a client for the access API + accessClient := accessproto.NewAccessAPIClient(conn) + serviceClient, err := accessNodeContainer.TestnetClient() + s.Require().NoError(err) + s.Require().NotNil(serviceClient) + + // Get the latest block ID + latestBlockID, err := serviceClient.GetLatestBlockID(s.ctx) + s.Require().NoError(err) + + // Generate a new account transaction + accountKey := test.AccountKeyGenerator().New() + payer := serviceClient.SDKServiceAddress() + + tx, err := templates.CreateAccount([]*sdk.AccountKey{accountKey}, nil, payer) + s.Require().NoError(err) + tx.SetComputeLimit(1000). + SetReferenceBlockID(sdk.HexToID(latestBlockID.String())). + SetProposalKey(payer, 0, serviceClient.GetAndIncrementSeqNumber()). + SetPayer(payer) + + tx, err = serviceClient.SignTransaction(tx) + s.Require().NoError(err) + + // Convert the transaction to a message format expected by the access API + authorizers := make([][]byte, len(tx.Authorizers)) + for i, auth := range tx.Authorizers { + authorizers[i] = auth.Bytes() + } + + convertToMessageSig := func(sigs []sdk.TransactionSignature) []*entities.Transaction_Signature { + msgSigs := make([]*entities.Transaction_Signature, len(sigs)) + for i, sig := range sigs { + msgSigs[i] = &entities.Transaction_Signature{ + Address: sig.Address.Bytes(), + KeyId: uint32(sig.KeyIndex), + Signature: sig.Signature, + } + } + + return msgSigs + } + + transactionMsg := &entities.Transaction{ + Script: tx.Script, + Arguments: tx.Arguments, + ReferenceBlockId: tx.ReferenceBlockID.Bytes(), + GasLimit: tx.GasLimit, + ProposalKey: &entities.Transaction_ProposalKey{ + Address: tx.ProposalKey.Address.Bytes(), + KeyId: uint32(tx.ProposalKey.KeyIndex), + SequenceNumber: tx.ProposalKey.SequenceNumber, + }, + Payer: tx.Payer.Bytes(), + Authorizers: authorizers, + PayloadSignatures: convertToMessageSig(tx.PayloadSignatures), + EnvelopeSignatures: convertToMessageSig(tx.EnvelopeSignatures), + } + + // Send and subscribe to the transaction status using the access API + subClient, err := accessClient.SendAndSubscribeTransactionStatuses(s.ctx, &accessproto.SendAndSubscribeTransactionStatusesRequest{ + Transaction: transactionMsg, + EventEncodingVersion: entities.EventEncodingVersion_CCF_V0, + }) + s.Require().NoError(err) + + expectedCounter := uint64(0) + lastReportedTxStatus := entities.TransactionStatus_UNKNOWN + var txID sdk.Identifier + + for { + resp, err := subClient.Recv() + if err != nil { + if err == io.EOF { + break + } + + s.Require().NoError(err) + } + + if txID == sdk.EmptyID { + txID = sdk.Identifier(resp.TransactionResults.TransactionId) + } + + s.Assert().Equal(expectedCounter, resp.GetMessageIndex()) + s.Assert().Equal(txID, sdk.Identifier(resp.TransactionResults.TransactionId)) + // Check if all statuses received one by one. The subscription should send responses for each of the statuses, + // and the message should be sent in the order of transaction statuses. + // Expected order: pending(1) -> finalized(2) -> executed(3) -> sealed(4) + s.Assert().Equal(lastReportedTxStatus, resp.TransactionResults.Status-1) + + expectedCounter++ + lastReportedTxStatus = resp.TransactionResults.Status + } + + // Check, if the final transaction status is sealed. + s.Assert().Equal(entities.TransactionStatus_SEALED, lastReportedTxStatus) +} + +// TestContractUpdate tests that the Access API can index contract updates, and that the program cache +// is invalidated when a contract is updated. +func (s *AccessAPISuite) TestContractUpdate() { + txResult := s.deployContract(OriginalContract, false) + targetHeight := txResult.BlockHeight + 1 + s.waitUntilIndexed(targetHeight) + + script := fmt.Sprintf(GetMessageScript, s.serviceClient.SDKServiceAddress().Hex()) + + // execute script and verify we get the original message + result, err := s.an2Client.ExecuteScriptAtBlockHeight(s.ctx, targetHeight, []byte(script), nil) + s.Require().NoError(err) + s.Require().Equal("Initial Contract", string(result.(cadence.String))) + + txResult = s.deployContract(UpdatedContract, true) + targetHeight = txResult.BlockHeight + 1 + s.waitUntilIndexed(targetHeight) + + // execute script and verify we get the updated message + result, err = s.an2Client.ExecuteScriptAtBlockHeight(s.ctx, targetHeight, []byte(script), nil) + s.Require().NoError(err) + s.Require().Equal("Updated Contract", string(result.(cadence.String))) +} + +func (s *AccessAPISuite) testGetAccount(client *client.Client) { + header, err := client.GetLatestBlockHeader(s.ctx, true) + s.Require().NoError(err) + + serviceAddress := s.serviceClient.SDKServiceAddress() + + s.Run("get account at latest block", func() { + account, err := s.waitAccountsUntilIndexed(func() (*sdk.Account, error) { + return client.GetAccount(s.ctx, serviceAddress) + }) + s.Require().NoError(err) + s.Assert().Equal(serviceAddress, account.Address) + s.Assert().NotZero(account.Balance) + }) + + s.Run("get account block ID", func() { + account, err := s.waitAccountsUntilIndexed(func() (*sdk.Account, error) { + return client.GetAccountAtLatestBlock(s.ctx, serviceAddress) + }) + s.Require().NoError(err) + s.Assert().Equal(serviceAddress, account.Address) + s.Assert().NotZero(account.Balance) + }) + + s.Run("get account block height", func() { + account, err := s.waitAccountsUntilIndexed(func() (*sdk.Account, error) { + return client.GetAccountAtBlockHeight(s.ctx, serviceAddress, header.Height) + }) + s.Require().NoError(err) + s.Assert().Equal(serviceAddress, account.Address) + s.Assert().NotZero(account.Balance) + }) + + s.Run("get newly created account", func() { + addr, err := utils.CreateFlowAccount(s.ctx, s.serviceClient) + s.Require().NoError(err) + acc, err := client.GetAccount(s.ctx, addr) + s.Require().NoError(err) + s.Assert().Equal(addr, acc.Address) + }) +} + +func (s *AccessAPISuite) testExecuteScriptWithSimpleScript(client *client.Client) { + header, err := client.GetLatestBlockHeader(s.ctx, true) + s.Require().NoError(err) + + s.Run("execute at latest block", func() { + result, err := s.waitScriptExecutionUntilIndexed(func() (cadence.Value, error) { + return client.ExecuteScriptAtLatestBlock(s.ctx, []byte(simpleScript), nil) + }) + s.Require().NoError(err) + s.Assert().Equal(simpleScriptResult, result) + }) + + s.Run("execute at block height", func() { + result, err := s.waitScriptExecutionUntilIndexed(func() (cadence.Value, error) { + return client.ExecuteScriptAtBlockHeight(s.ctx, header.Height, []byte(simpleScript), nil) + }) + s.Require().NoError(err) + s.Assert().Equal(simpleScriptResult, result) + }) + + s.Run("execute at block ID", func() { + result, err := s.waitScriptExecutionUntilIndexed(func() (cadence.Value, error) { + return client.ExecuteScriptAtBlockID(s.ctx, header.ID, []byte(simpleScript), nil) + }) + s.Require().NoError(err) + s.Assert().Equal(simpleScriptResult, result) + }) +} + +func (s *AccessAPISuite) testExecuteScriptWithSimpleContract(client *client.Client, targetHeight uint64) { + header, err := client.GetBlockHeaderByHeight(s.ctx, targetHeight) + s.Require().NoError(err) + + // Check that the initialized value is set + serviceAccount := s.serviceClient.Account() + script := lib.ReadCounterScript(serviceAccount.Address, serviceAccount.Address).ToCadence() + + s.Run("execute at latest block", func() { + result, err := s.waitScriptExecutionUntilIndexed(func() (cadence.Value, error) { + return client.ExecuteScriptAtLatestBlock(s.ctx, []byte(script), nil) + }) + s.Require().NoError(err) + s.Assert().Equal(lib.CounterInitializedValue, result.(cadence.Int).Int()) + }) + + s.Run("execute at block height", func() { + result, err := s.waitScriptExecutionUntilIndexed(func() (cadence.Value, error) { + return client.ExecuteScriptAtBlockHeight(s.ctx, header.Height, []byte(script), nil) + }) + s.Require().NoError(err) + s.Assert().Equal(lib.CounterInitializedValue, result.(cadence.Int).Int()) + }) + + s.Run("execute at block ID", func() { + result, err := s.waitScriptExecutionUntilIndexed(func() (cadence.Value, error) { + return client.ExecuteScriptAtBlockID(s.ctx, header.ID, []byte(script), nil) + }) + s.Require().NoError(err) + s.Assert().Equal(lib.CounterInitializedValue, result.(cadence.Int).Int()) + }) + + s.Run("execute at past block height", func() { + // targetHeight is when the counter was deployed, use a height before that to check that + // the contract was deployed, but the value was not yet set + pastHeight := targetHeight - 2 + + result, err := client.ExecuteScriptAtBlockHeight(s.ctx, pastHeight, []byte(script), nil) + s.Require().NoError(err) + + s.Assert().Equal(lib.CounterDefaultValue, result.(cadence.Int).Int()) + }) +} + +func (s *AccessAPISuite) deployContract(contract dsl.Contract, isUpdate bool) *sdk.TransactionResult { + header, err := s.serviceClient.GetLatestSealedBlockHeader(s.ctx) + s.Require().NoError(err) + + // Deploy the contract + var tx *sdk.Transaction + if isUpdate { + tx, err = s.serviceClient.UpdateContract(s.ctx, header.ID, contract) + } else { + tx, err = s.serviceClient.DeployContract(s.ctx, header.ID, contract) + } + s.Require().NoError(err) + + result, err := s.serviceClient.WaitForExecuted(s.ctx, tx.ID()) + s.Require().NoError(err) + s.Require().Empty(result.Error, "deploy tx should be accepted but got: %s", result.Error) + + return result +} + +func (s *AccessAPISuite) deployCounter() *sdk.TransactionResult { + header, err := s.serviceClient.GetLatestSealedBlockHeader(s.ctx) + s.Require().NoError(err) + + // Add counter to service account + serviceAddress := s.serviceClient.SDKServiceAddress() + tx := sdk.NewTransaction(). + SetScript([]byte(lib.CreateCounterTx(serviceAddress).ToCadence())). + SetReferenceBlockID(sdk.Identifier(header.ID)). + SetProposalKey(serviceAddress, 0, s.serviceClient.GetAndIncrementSeqNumber()). + SetPayer(serviceAddress). + AddAuthorizer(serviceAddress). + SetComputeLimit(9999) + + err = s.serviceClient.SignAndSendTransaction(s.ctx, tx) + s.Require().NoError(err) + + result, err := s.serviceClient.WaitForSealed(s.ctx, tx.ID()) + s.Require().NoError(err) + s.Require().Empty(result.Error, "create counter tx should be accepted but got: %s", result.Error) + + return result +} + +type getAccount func() (*sdk.Account, error) +type executeScript func() (cadence.Value, error) + +var indexDelay = 10 * time.Second +var indexRetry = 100 * time.Millisecond + +// wait for sealed block to get indexed, as there is a delay in syncing blocks between nodes +func (s *AccessAPISuite) waitAccountsUntilIndexed(get getAccount) (*sdk.Account, error) { + var account *sdk.Account + var err error + s.Require().Eventually(func() bool { + account, err = get() + return notOutOfRangeError(err) + }, indexDelay, indexRetry) + + return account, err +} + +func (s *AccessAPISuite) waitScriptExecutionUntilIndexed(execute executeScript) (cadence.Value, error) { + var val cadence.Value + var err error + s.Require().Eventually(func() bool { + val, err = execute() + return notOutOfRangeError(err) + }, indexDelay, indexRetry) + + return val, err +} + +func (s *AccessAPISuite) waitUntilIndexed(height uint64) { + // wait until the block is indexed + // This relying on the fact that the API is configured to only use the local db, and will return + // an error if the height is not indexed yet. + // + // TODO: once the indexed height is include in the Access API's metadata response, we can get + // ride of this + s.Require().Eventually(func() bool { + _, err := s.an2Client.ExecuteScriptAtBlockHeight(s.ctx, height, []byte(simpleScript), nil) + return err == nil + }, 30*time.Second, 1*time.Second) +} + +// make sure we either don't have an error or the error is not out of range error, since in that case we have to wait a bit longer for index to get synced +func notOutOfRangeError(err error) bool { + statusErr, ok := status.FromError(err) + if !ok || err == nil { + return true + } + return statusErr.Code() != codes.OutOfRange +} + +// Helper function to convert signatures with ExtensionData +// - if input `extensionData` is non-nil, its value is used as the transaction signature extension data. +// - if input `extensionData` is nil, `sigs` extension data is used, and input `extensionData` is replaced +func convertToMessageSigWithExtensionData(sigs []sdk.TransactionSignature, extensionData *[]byte) []*entities.Transaction_Signature { + msgSigs := make([]*entities.Transaction_Signature, len(sigs)) + + for i, sig := range sigs { + if *extensionData == nil { + // replace extension data by sig.ExtensionData + newExtensionData := make([]byte, len(sig.ExtensionData)) + copy(newExtensionData, sig.ExtensionData) + *extensionData = newExtensionData + } + + msgSigs[i] = &entities.Transaction_Signature{ + Address: sig.Address.Bytes(), + KeyId: uint32(sig.KeyIndex), + Signature: sig.Signature, + ExtensionData: *extensionData, + } + } + return msgSigs +} + +// TestTransactionSignaturePlainExtensionData tests that the Access API properly handles the ExtensionData field +// in transaction signatures for different authentication schemes. +func (s *AccessAPISuite) TestTransactionSignaturePlainExtensionData() { + accessNodeContainer := s.net.ContainerByName(testnet.PrimaryAN) + + // Establish a gRPC connection to the access API + conn, err := grpc.Dial(accessNodeContainer.Addr(testnet.GRPCPort), grpc.WithTransportCredentials(insecure.NewCredentials())) + s.Require().NoError(err) + s.Require().NotNil(conn) + defer conn.Close() + + // Create a client for the access API + accessClient := accessproto.NewAccessAPIClient(conn) + serviceClient, err := accessNodeContainer.TestnetClient() + s.Require().NoError(err) + s.Require().NotNil(serviceClient) + + // Get the latest block ID + latestBlockID, err := serviceClient.GetLatestBlockID(s.ctx) + s.Require().NoError(err) + + // Generate a new account transaction + accountKey := test.AccountKeyGenerator().New() + payer := serviceClient.SDKServiceAddress() + + tx, err := templates.CreateAccount([]*sdk.AccountKey{accountKey}, nil, payer) + s.Require().NoError(err) + tx.SetComputeLimit(1000). + SetReferenceBlockID(sdk.HexToID(latestBlockID.String())). + SetProposalKey(payer, 0, serviceClient.GetAndIncrementSeqNumber()). + SetPayer(payer) + + tx, err = serviceClient.SignTransaction(tx) + s.Require().NoError(err) + + // Convert the transaction to a message format expected by the access API + authorizers := make([][]byte, len(tx.Authorizers)) + for i, auth := range tx.Authorizers { + authorizers[i] = auth.Bytes() + } + + // Test cases for different ExtensionData values + testCases := []struct { + name string + extensionData []byte + description string + expectSuccess bool + }{ + { + name: "plain_scheme_nil", + extensionData: nil, + description: "Plain authentication scheme with nil ExtensionData", + expectSuccess: true, + }, + { + name: "plain_scheme_empty", + extensionData: []byte{}, + description: "Plain authentication scheme with empty ExtensionData", + expectSuccess: true, + }, + { + name: "plain_scheme_explicit", + extensionData: []byte{0x0}, + description: "Plain authentication scheme with explicit ExtensionData", + expectSuccess: true, + }, + { + name: "custom_extension_data", + extensionData: []byte{0x02, 0xAA, 0xBB, 0xCC}, // Invalid scheme with custom data + description: "Custom ExtensionData with invalid scheme", + expectSuccess: false, // Expect failure at the access API level due to invalid scheme + }, + } + + for _, tc := range testCases { + s.Run(tc.name, func() { + s.T().Logf("Testing: %s", tc.description) + + transactionMsg := &entities.Transaction{ + Script: tx.Script, + Arguments: tx.Arguments, + ReferenceBlockId: tx.ReferenceBlockID.Bytes(), + GasLimit: tx.GasLimit, + ProposalKey: &entities.Transaction_ProposalKey{ + Address: tx.ProposalKey.Address.Bytes(), + KeyId: uint32(tx.ProposalKey.KeyIndex), + SequenceNumber: tx.ProposalKey.SequenceNumber, + }, + Payer: tx.Payer.Bytes(), + Authorizers: authorizers, + PayloadSignatures: convertToMessageSigWithExtensionData(tx.PayloadSignatures, &tc.extensionData), + EnvelopeSignatures: convertToMessageSigWithExtensionData(tx.EnvelopeSignatures, &tc.extensionData), + } + + // Send and subscribe to the transaction status using the access API + subClient, err := accessClient.SendAndSubscribeTransactionStatuses(s.ctx, &accessproto.SendAndSubscribeTransactionStatusesRequest{ + Transaction: transactionMsg, + EventEncodingVersion: entities.EventEncodingVersion_CCF_V0, + }) + s.Require().NoError(err) + + expectedCounter := uint64(0) + lastReportedTxStatus := entities.TransactionStatus_UNKNOWN + var txID sdk.Identifier + var statusCode uint32 + + for { + resp, err := subClient.Recv() + if err != nil { + if err == io.EOF { + break + } + if tc.expectSuccess { // For invalid cases, access API rejects the transaction + s.Require().NoError(err) + } else { + s.Require().Error(err) + s.Require().ErrorContains(err, "has invalid extension data") + break + } + } + + if txID == sdk.EmptyID { + txID = sdk.Identifier(resp.TransactionResults.TransactionId) + } + + s.Assert().Equal(expectedCounter, resp.GetMessageIndex()) + s.Assert().Equal(txID, sdk.Identifier(resp.TransactionResults.TransactionId)) + + // Check if all statuses received one by one. The subscription should send responses for each of the statuses, + // and the message should be sent in the order of transaction statuses. + // Expected order: pending(1) -> finalized(2) -> executed(3) -> sealed(4) + s.Assert().Equal(lastReportedTxStatus, resp.TransactionResults.Status-1) + + expectedCounter++ + lastReportedTxStatus = resp.TransactionResults.Status + statusCode = resp.TransactionResults.GetStatusCode() + } + + if tc.expectSuccess { + // Check that the final transaction status is sealed + s.Assert().Equal(entities.TransactionStatus_SEALED, lastReportedTxStatus) + s.Assert().Equal(uint32(codes.OK), statusCode, "Expected transaction to be successful, but got status code: %d", statusCode) + } + }) + } +} + +// TestTransactionSignatureWebAuthnExtensionData tests the WebAuthn authentication scheme with properly constructed extension data. +func (s *AccessAPISuite) TestTransactionSignatureWebAuthnExtensionData() { + accessNodeContainer := s.net.ContainerByName(testnet.PrimaryAN) + + // Establish a gRPC connection to the access API + conn, err := grpc.Dial(accessNodeContainer.Addr(testnet.GRPCPort), grpc.WithTransportCredentials(insecure.NewCredentials())) + s.Require().NoError(err) + s.Require().NotNil(conn) + defer conn.Close() + + // Create a client for the access API + accessClient := accessproto.NewAccessAPIClient(conn) + serviceClient, err := accessNodeContainer.TestnetClient() + s.Require().NoError(err) + s.Require().NotNil(serviceClient) + + // Get the latest block ID + latestBlockID, err := serviceClient.GetLatestBlockID(s.ctx) + s.Require().NoError(err) + + // Generate a new account transaction + accountKey := test.AccountKeyGenerator().New() + payer := serviceClient.SDKServiceAddress() + + tx, err := templates.CreateAccount([]*sdk.AccountKey{accountKey}, nil, payer) + s.Require().NoError(err) + tx.SetComputeLimit(1000). + SetReferenceBlockID(sdk.HexToID(latestBlockID.String())). + SetProposalKey(payer, 0, serviceClient.GetAndIncrementSeqNumber()). + SetPayer(payer) + + tx, err = serviceClient.SignTransactionWebAuthN(tx) + s.Require().NoError(err) + + // Convert the transaction to a message format expected by the access API + authorizers := make([][]byte, len(tx.Authorizers)) + for i, auth := range tx.Authorizers { + authorizers[i] = auth.Bytes() + } + s.Require().NoError(err) + + // Test WebAuthn extension data with different scenarios + testCases := []struct { + name string + extensionDataReplacement []byte // If nil, use the original extension data from the signed transaction + description string + expectSuccess bool + }{ + { + name: "webauthn_valid", + extensionDataReplacement: nil, // Use the original extension data from the signed transaction, which should be valid + description: "WebAuthn scheme with minimal extension data", + expectSuccess: true, + }, + { + name: "webauthn_invalid_minimal", + extensionDataReplacement: []byte{0x1}, // WebAuthn scheme identifier only + description: "WebAuthn scheme with minimal extension data", + expectSuccess: false, // Should fail validation due to incomplete WebAuthn data + }, + { + name: "webauthn_invalid_scheme", + extensionDataReplacement: []byte{0x3, 0x01, 0x02, 0x03}, // Invalid scheme identifier + description: "Invalid authentication scheme", + expectSuccess: false, + }, + { + name: "webauthn_malformed_data", + extensionDataReplacement: []byte{0x1, 0x01, 0x02}, // WebAuthn scheme with malformed data + description: "WebAuthn scheme with malformed extension data", + expectSuccess: false, + }, + } + + for _, tc := range testCases { + s.Run(tc.name, func() { + s.T().Logf("Testing: %s", tc.description) + + transactionMsg := &entities.Transaction{ + Script: tx.Script, + Arguments: tx.Arguments, + ReferenceBlockId: tx.ReferenceBlockID.Bytes(), + GasLimit: tx.GasLimit, + ProposalKey: &entities.Transaction_ProposalKey{ + Address: tx.ProposalKey.Address.Bytes(), + KeyId: uint32(tx.ProposalKey.KeyIndex), + SequenceNumber: tx.ProposalKey.SequenceNumber, + }, + Payer: tx.Payer.Bytes(), + Authorizers: authorizers, + PayloadSignatures: convertToMessageSigWithExtensionData(tx.PayloadSignatures, nil), + EnvelopeSignatures: convertToMessageSigWithExtensionData(tx.EnvelopeSignatures, &tc.extensionDataReplacement), + } + + // Validate that the ExtensionData is set correctly before sending + for _, sig := range transactionMsg.EnvelopeSignatures { + // For these test cases specifically, we expect ExtensionData to be set + s.Assert().GreaterOrEqual(len(sig.ExtensionData), 1, "ExtensionData should have at least 1 byte for scheme identifier") + } + + // Send and subscribe to the transaction status using the access API + subClient, err := accessClient.SendAndSubscribeTransactionStatuses(s.ctx, &accessproto.SendAndSubscribeTransactionStatusesRequest{ + Transaction: transactionMsg, + EventEncodingVersion: entities.EventEncodingVersion_CCF_V0, + }) + + s.Require().NoError(err) + + expectedCounter := uint64(0) + lastReportedTxStatus := entities.TransactionStatus_UNKNOWN + var txID sdk.Identifier + var statusCode uint32 + var errorMessage string + + for { + resp, err := subClient.Recv() + if err != nil { + if err == io.EOF { + break + } + if tc.expectSuccess { // For invalid cases, access API rejects the transaction + s.Require().NoError(err) + } else { + s.Require().Error(err) + s.Require().ErrorContains(err, "has invalid extension data") + break + } + } + + if txID == sdk.EmptyID { + txID = sdk.Identifier(resp.TransactionResults.TransactionId) + } + + s.Assert().Equal(expectedCounter, resp.GetMessageIndex()) + s.Assert().Equal(txID, sdk.Identifier(resp.TransactionResults.TransactionId)) + + // Check if all statuses received one by one. The subscription should send responses for each of the statuses, + // and the message should be sent in the order of transaction statuses. + // Expected order: pending(1) -> finalized(2) -> executed(3) -> sealed(4) + s.Assert().Equal(lastReportedTxStatus, resp.TransactionResults.Status-1) + + expectedCounter++ + lastReportedTxStatus = resp.TransactionResults.Status + statusCode = resp.TransactionResults.GetStatusCode() + errorMessage = resp.TransactionResults.GetErrorMessage() + } + + if tc.expectSuccess { + // Check that the final transaction status is sealed + s.Assert().Equal(entities.TransactionStatus_SEALED, lastReportedTxStatus) + + if !tc.expectSuccess { + // For invalid cases, we expect the transaction to be rejected + s.Assert().NotEqual(uint32(codes.OK), statusCode, "Expected transaction to fail, but got status code: %d", statusCode) + return + } + + s.Assert().Equal(uint32(codes.OK), statusCode, "Expected transaction to be successful, but got status code: %d with message: %s", statusCode, errorMessage) + } + }) + } +} + +// TestExtensionDataPreservation tests that the ExtensionData field is properly preserved +// when transactions are submitted and retrieved through the Access API. +func (s *AccessAPISuite) TestExtensionDataPreservation() { + accessNodeContainer := s.net.ContainerByName(testnet.PrimaryAN) + + // Establish a gRPC connection to the access API + conn, err := grpc.Dial(accessNodeContainer.Addr(testnet.GRPCPort), grpc.WithTransportCredentials(insecure.NewCredentials())) + s.Require().NoError(err) + s.Require().NotNil(conn) + defer conn.Close() + + // Create a client for the access API + accessClient := accessproto.NewAccessAPIClient(conn) + serviceClient, err := accessNodeContainer.TestnetClient() + s.Require().NoError(err) + s.Require().NotNil(serviceClient) + + // Get the latest block ID + latestBlockID, err := serviceClient.GetLatestBlockID(s.ctx) + s.Require().NoError(err) + + // Generate a new account transaction + accountKey := test.AccountKeyGenerator().New() + payer := serviceClient.SDKServiceAddress() + + // Test with different ExtensionData values to ensure they are preserved + testCases := []struct { + name string + extensionData []byte + description string + }{ + { + name: "plain_scheme_preservation", + extensionData: []byte{0x0}, + description: "Plain authentication scheme ExtensionData preservation", + }, + { + name: "webauthn_scheme_preservation", + extensionData: nil, // valid extension data is populated below in the test + description: "WebAuthn authentication scheme ExtensionData preservation", + }, + } + + for _, tc := range testCases { + s.Run(tc.name, func() { + s.T().Logf("Testing: %s", tc.description) + + tx, err := templates.CreateAccount([]*sdk.AccountKey{accountKey}, nil, payer) + s.Require().NoError(err) + tx.SetComputeLimit(1000). + SetReferenceBlockID(sdk.HexToID(latestBlockID.String())). + SetProposalKey(payer, 0, serviceClient.GetAndIncrementSeqNumber()). + SetPayer(payer) + + switch tc.name { + case "plain_scheme_preservation": + tx, err = serviceClient.SignTransaction(tx) + case "webauthn_scheme_preservation": + tx, err = serviceClient.SignTransactionWebAuthN(tx) + default: + err = fmt.Errorf("test must be signed for plain or webauthn schemes") + } + s.Require().NoError(err) + + // Convert the transaction to a message format expected by the access API + authorizers := make([][]byte, len(tx.Authorizers)) + for i, auth := range tx.Authorizers { + authorizers[i] = auth.Bytes() + } + + transactionMsg := &entities.Transaction{ + Script: tx.Script, + Arguments: tx.Arguments, + ReferenceBlockId: tx.ReferenceBlockID.Bytes(), + GasLimit: tx.GasLimit, + ProposalKey: &entities.Transaction_ProposalKey{ + Address: tx.ProposalKey.Address.Bytes(), + KeyId: uint32(tx.ProposalKey.KeyIndex), + SequenceNumber: tx.ProposalKey.SequenceNumber, + }, + Payer: tx.Payer.Bytes(), + Authorizers: authorizers, + PayloadSignatures: convertToMessageSigWithExtensionData(tx.PayloadSignatures, &tc.extensionData), + EnvelopeSignatures: convertToMessageSigWithExtensionData(tx.EnvelopeSignatures, &tc.extensionData), + } + + // Validate that the ExtensionData is set correctly before sending in the webauthn case + if tc.name == "webauthn_scheme_preservation" { + for _, sig := range transactionMsg.EnvelopeSignatures { + // For these test cases specifically, we expect ExtensionData to be at least 2 bytes + s.Assert().GreaterOrEqual(len(sig.ExtensionData), 2, "ExtensionData should have at least 2 byte for webauthn scheme") + } + } + + // Send and subscribe to the transaction status using the access API + subClient, err := accessClient.SendAndSubscribeTransactionStatuses(s.ctx, &accessproto.SendAndSubscribeTransactionStatusesRequest{ + Transaction: transactionMsg, + EventEncodingVersion: entities.EventEncodingVersion_CCF_V0, + }) + s.Require().NoError(err) + + var txID sdk.Identifier + lastReportedTxStatus := entities.TransactionStatus_UNKNOWN + + // Wait for the transaction to be sealed + for { + resp, err := subClient.Recv() + if err != nil { + if err == io.EOF { + break + } + s.Require().NoError(err) + } + + if txID == sdk.EmptyID { + txID = sdk.Identifier(resp.TransactionResults.TransactionId) + } + + lastReportedTxStatus = resp.TransactionResults.Status + + if lastReportedTxStatus == entities.TransactionStatus_SEALED { + break + } + } + + // Verify the transaction was sealed + s.Assert().Equal(entities.TransactionStatus_SEALED, lastReportedTxStatus) + + // Now retrieve the transaction and verify ExtensionData is preserved + s.T().Logf("Transaction %s was successfully processed with ExtensionData: %v", txID, tc.extensionData) + + txFromAccess, err := accessClient.GetTransaction(s.ctx, &accessproto.GetTransactionRequest{ + Id: txID.Bytes(), + }) + s.Require().NoError(err) + + // Verify the retrieved transaction matches the original + envelopSigs := txFromAccess.GetTransaction().EnvelopeSignatures + s.Assert().Equal(tc.extensionData, envelopSigs[0].ExtensionData, "ExtensionData should be preserved in the envelope signature") + }) + } +} + +// TestInvalidTransactionSignature tests that the access API performs sanity checks +// on the transaction signature format and rejects invalid formats +func (s *AccessAPISuite) TestRejectedInvalidSignatureFormat() { + accessNodeContainer := s.net.ContainerByName(testnet.PrimaryAN) + + // Establish a gRPC connection to the access API + conn, err := grpc.Dial(accessNodeContainer.Addr(testnet.GRPCPort), grpc.WithTransportCredentials(insecure.NewCredentials())) + s.Require().NoError(err) + s.Require().NotNil(conn) + defer conn.Close() + + // Create a client for the access API + accessClient := accessproto.NewAccessAPIClient(conn) + serviceClient, err := accessNodeContainer.TestnetClient() + s.Require().NoError(err) + s.Require().NotNil(serviceClient) + + // Get the latest block ID + latestBlockID, err := serviceClient.GetLatestBlockID(s.ctx) + s.Require().NoError(err) + + // Generate a new account transaction + accountKey := test.AccountKeyGenerator().New() + payer := serviceClient.SDKServiceAddress() + + tx, err := templates.CreateAccount([]*sdk.AccountKey{accountKey}, nil, payer) + s.Require().NoError(err) + tx.SetComputeLimit(1000). + SetReferenceBlockID(sdk.HexToID(latestBlockID.String())). + SetProposalKey(payer, 0, serviceClient.GetAndIncrementSeqNumber()). + SetPayer(payer) + + tx, err = serviceClient.SignTransaction(tx) + s.Require().NoError(err) + + // Convert the transaction to a message format expected by the access API + authorizers := make([][]byte, len(tx.Authorizers)) + for i, auth := range tx.Authorizers { + authorizers[i] = auth.Bytes() + } + + // Test with different ExtensionData values to ensure they are preserved + testCases := []struct { + name string + extensionData []byte + }{ + { + name: "invalid_plain_scheme", + extensionData: []byte{0x0, 0x1}, + }, + { + name: "invalid_webauthn_scheme", + extensionData: []byte{0x1, 0x2, 0x3, 0x4, 0x5}, + }, + { + name: "invalid_authentication_scheme", + extensionData: []byte{0x02, 0x11, 0x22, 0x33, 0x44, 0x55}, + }, + } + + for _, tc := range testCases { + s.Run(tc.name, func() { + s.T().Logf("Testing: %s", tc.name) + + transactionMsg := &entities.Transaction{ + Script: tx.Script, + Arguments: tx.Arguments, + ReferenceBlockId: tx.ReferenceBlockID.Bytes(), + GasLimit: tx.GasLimit, + ProposalKey: &entities.Transaction_ProposalKey{ + Address: tx.ProposalKey.Address.Bytes(), + KeyId: uint32(tx.ProposalKey.KeyIndex), + SequenceNumber: tx.ProposalKey.SequenceNumber, + }, + Payer: tx.Payer.Bytes(), + Authorizers: authorizers, + PayloadSignatures: convertToMessageSigWithExtensionData(tx.PayloadSignatures, &tc.extensionData), + EnvelopeSignatures: convertToMessageSigWithExtensionData(tx.EnvelopeSignatures, &tc.extensionData), + } + + // Send and subscribe to the transaction status using the access API + subClient, err := accessClient.SendAndSubscribeTransactionStatuses(s.ctx, &accessproto.SendAndSubscribeTransactionStatusesRequest{ + Transaction: transactionMsg, + EventEncodingVersion: entities.EventEncodingVersion_CCF_V0, + }) + s.Require().NoError(err) + + // check that the tx submission errors at the access API level + _, err = subClient.Recv() + s.Require().Error(err) + s.Require().ErrorContains(err, "has invalid extension data") + }) + } +} diff --git a/integration/tests/access/cohort2/observer_indexer_enabled_test.go b/integration/tests/access/cohort2/observer_indexer_enabled_test.go new file mode 100644 index 00000000000..1149ad65acf --- /dev/null +++ b/integration/tests/access/cohort2/observer_indexer_enabled_test.go @@ -0,0 +1,744 @@ +package cohort2 + +import ( + "bytes" + "context" + "fmt" + "net/http" + "testing" + "time" + + "github.com/rs/zerolog" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + + sdk "github.com/onflow/flow-go-sdk" + sdkcrypto "github.com/onflow/flow-go-sdk/crypto" + "github.com/onflow/flow-go-sdk/templates" + + "github.com/onflow/flow-go/engine/access/rpc/backend/query_mode" + "github.com/onflow/flow-go/engine/common/rpc/convert" + "github.com/onflow/flow-go/integration/testnet" + "github.com/onflow/flow-go/integration/tests/lib" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/utils/unittest" + + accessproto "github.com/onflow/flow/protobuf/go/flow/access" + "github.com/onflow/flow/protobuf/go/flow/entities" +) + +var ( + simpleScript = `access(all) fun main(): Int { return 42; }` +) + +func TestObserverIndexerEnabled(t *testing.T) { + unittest.SkipUnless(t, unittest.TEST_FLAKY, "flaky") + suite.Run(t, new(ObserverIndexerEnabledSuite)) +} + +// ObserverIndexerEnabledSuite tests the observer with the indexer enabled. +// It uses ObserverSuite as a base to reuse the test cases that need to be run for any observer variation. +type ObserverIndexerEnabledSuite struct { + ObserverSuite +} + +// SetupTest sets up the test suite by starting the network and preparing the observers client. +// By overriding this function, we can ensure that the observers are started with correct parameters and select +// the RPCs and REST endpoints that are tested. +func (s *ObserverIndexerEnabledSuite) SetupTest() { + s.localRpc = map[string]struct{}{ + "Ping": {}, + "GetLatestBlockHeader": {}, + "GetBlockHeaderByID": {}, + "GetBlockHeaderByHeight": {}, + "GetLatestBlock": {}, + "GetBlockByID": {}, + "GetBlockByHeight": {}, + "GetLatestProtocolStateSnapshot": {}, + "GetNetworkParameters": {}, + "GetTransactionsByBlockID": {}, + "GetTransaction": {}, + "GetCollectionByID": {}, + "ExecuteScriptAtBlockID": {}, + "ExecuteScriptAtLatestBlock": {}, + "ExecuteScriptAtBlockHeight": {}, + "GetAccount": {}, + "GetAccountAtLatestBlock": {}, + "GetAccountAtBlockHeight": {}, + } + + s.localRest = map[string]struct{}{ + "getBlocksByIDs": {}, + "getBlocksByHeight": {}, + "getBlockPayloadByID": {}, + "getNetworkParameters": {}, + "getNodeVersionInfo": {}, + } + + s.testedRPCs = s.getRPCs + s.testedRestEndpoints = s.getRestEndpoints + + consensusConfigs := []func(config *testnet.NodeConfig){ + // `cruise-ctl-fallback-proposal-duration` is set to 250ms instead to of 100ms + // to purposely slow down the block rate. This is needed since the crypto module + // update providing faster BLS operations. + // TODO: fix the access integration test logic to function without slowing down + // the block rate + testnet.WithAdditionalFlag("--cruise-ctl-fallback-proposal-duration=250ms"), + testnet.WithAdditionalFlagf("--required-verification-seal-approvals=%d", 1), + testnet.WithAdditionalFlagf("--required-construction-seal-approvals=%d", 1), + testnet.WithLogLevel(zerolog.FatalLevel), + } + + nodeConfigs := []testnet.NodeConfig{ + // access node with unstaked nodes supported + testnet.NewNodeConfig(flow.RoleAccess, testnet.WithLogLevel(zerolog.InfoLevel), + testnet.WithAdditionalFlag("--supports-observer=true"), + testnet.WithAdditionalFlagf("--public-network-execution-data-sync-enabled=true"), + testnet.WithAdditionalFlagf("--script-execution-mode=%s", query_mode.IndexQueryModeExecutionNodesOnly), + testnet.WithAdditionalFlagf("--tx-result-query-mode=%s", query_mode.IndexQueryModeExecutionNodesOnly), + testnet.WithAdditionalFlag("--event-query-mode=execution-nodes-only"), + ), + + testnet.NewNodeConfig(flow.RoleCollection, testnet.WithLogLevel(zerolog.FatalLevel)), + testnet.NewNodeConfig(flow.RoleCollection, testnet.WithLogLevel(zerolog.FatalLevel)), + testnet.NewNodeConfig(flow.RoleExecution, testnet.WithLogLevel(zerolog.FatalLevel)), + testnet.NewNodeConfig(flow.RoleExecution, testnet.WithLogLevel(zerolog.FatalLevel)), + testnet.NewNodeConfig(flow.RoleConsensus, consensusConfigs...), + testnet.NewNodeConfig(flow.RoleConsensus, consensusConfigs...), + testnet.NewNodeConfig(flow.RoleConsensus, consensusConfigs...), + testnet.NewNodeConfig(flow.RoleVerification, testnet.WithLogLevel(zerolog.FatalLevel)), + } + + observers := []testnet.ObserverConfig{ + { + LogLevel: zerolog.InfoLevel, + AdditionalFlags: []string{ + fmt.Sprintf("--execution-data-dir=%s", testnet.DefaultExecutionDataServiceDir), + fmt.Sprintf("--execution-state-dir=%s", testnet.DefaultExecutionStateDir), + "--execution-data-sync-enabled=true", + "--execution-data-indexing-enabled=true", + "--local-service-api-enabled=true", + "--event-query-mode=execution-nodes-only", + }, + }, + { + ContainerName: "observer_2", + LogLevel: zerolog.InfoLevel, + }, + } + + // prepare the network + conf := testnet.NewNetworkConfig("observer_indexing_enabled_test", nodeConfigs, testnet.WithObservers(observers...)) + s.net = testnet.PrepareFlowNetwork(s.T(), conf, flow.Localnet) + + // start the network + ctx, cancel := context.WithCancel(context.Background()) + s.cancel = cancel + + s.net.Start(ctx) +} + +// TestObserverIndexedRPCsHappyPath tests RPCs that are handled by the observer by using a dedicated indexer for the events. +// To ensure that the observer is handling these RPCs, we stop the upstream access node and verify that the observer client +// returns success for valid requests and errors for invalid ones. +func (s *ObserverIndexerEnabledSuite) TestObserverIndexedRPCsHappyPath() { + unittest.SkipUnless(s.T(), unittest.TEST_FLAKY, "flaky") + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + t := s.T() + + // prepare environment to create a new account + serviceAccountClient, err := s.net.ContainerByName(testnet.PrimaryAN).TestnetClient() + require.NoError(t, err) + + latestBlockID, err := serviceAccountClient.GetLatestBlockID(ctx) + require.NoError(t, err) + + // create new account to deploy Counter to + accountPrivateKey := lib.RandomPrivateKey() + + accountKey := sdk.NewAccountKey(). + FromPrivateKey(accountPrivateKey). + SetHashAlgo(sdkcrypto.SHA3_256). + SetWeight(sdk.AccountKeyWeightThreshold) + + serviceAddress := sdk.Address(serviceAccountClient.Chain.ServiceAddress()) + + // Generate the account creation transaction + createAccountTx, err := templates.CreateAccount( + []*sdk.AccountKey{accountKey}, + []templates.Contract{ + { + Name: lib.CounterContract.Name, + Source: lib.CounterContract.ToCadence(), + }, + }, serviceAddress) + require.NoError(t, err) + createAccountTx. + SetReferenceBlockID(sdk.Identifier(latestBlockID)). + SetProposalKey(serviceAddress, 0, serviceAccountClient.GetAndIncrementSeqNumber()). + SetPayer(serviceAddress). + SetComputeLimit(9999) + + // send the create account tx + childCtx, cancel := context.WithTimeout(ctx, 10*time.Second) + err = serviceAccountClient.SignAndSendTransaction(childCtx, createAccountTx) + require.NoError(t, err) + + cancel() + + // wait for account to be created + var accountCreationTxRes *sdk.TransactionResult + unittest.RequireReturnsBefore(t, func() { + accountCreationTxRes, err = serviceAccountClient.WaitForSealed(context.Background(), createAccountTx.ID()) + require.NoError(t, err) + }, 20*time.Second, "has to seal before timeout") + + // obtain the account address + var accountCreatedPayload []byte + var newAccountAddress sdk.Address + for _, event := range accountCreationTxRes.Events { + if event.Type == sdk.EventAccountCreated { + accountCreatedEvent := sdk.AccountCreatedEvent(event) + accountCreatedPayload = accountCreatedEvent.Payload + newAccountAddress = accountCreatedEvent.Address() + break + } + } + require.NotEqual(t, sdk.EmptyAddress, newAccountAddress) + + // now we can query events using observer to data which has to be locally indexed + + // get an observer client + observer, err := s.getObserverClient() + require.NoError(t, err) + + // wait for data to be synced by observer + require.Eventually(t, func() bool { + _, err := observer.GetAccountAtBlockHeight(ctx, &accessproto.GetAccountAtBlockHeightRequest{ + Address: newAccountAddress.Bytes(), + BlockHeight: accountCreationTxRes.BlockHeight, + }) + statusErr, ok := status.FromError(err) + if !ok || err == nil { + return true + } + return statusErr.Code() != codes.OutOfRange + }, 30*time.Second, 1*time.Second) + + blockWithAccount, err := observer.GetBlockHeaderByID(ctx, &accessproto.GetBlockHeaderByIDRequest{ + Id: accountCreationTxRes.BlockID[:], + }) + require.NoError(t, err) + + // stop the upstream access container + err = s.net.StopContainerByName(ctx, testnet.PrimaryAN) + require.NoError(t, err) + + eventsByBlockID, err := observer.GetEventsForBlockIDs(ctx, &accessproto.GetEventsForBlockIDsRequest{ + Type: sdk.EventAccountCreated, + BlockIds: [][]byte{blockWithAccount.Block.Id}, + EventEncodingVersion: entities.EventEncodingVersion_CCF_V0, + }) + require.NoError(t, err) + + eventsByHeight, err := observer.GetEventsForHeightRange(ctx, &accessproto.GetEventsForHeightRangeRequest{ + Type: sdk.EventAccountCreated, + StartHeight: blockWithAccount.Block.Height, + EndHeight: blockWithAccount.Block.Height, + EventEncodingVersion: entities.EventEncodingVersion_CCF_V0, + }) + require.NoError(t, err) + + // validate that there is an event that we are looking for + require.Equal(t, eventsByHeight.Results, eventsByBlockID.Results) + found := false + for _, eventsInBlock := range eventsByHeight.Results { + for _, event := range eventsInBlock.Events { + if event.Type == sdk.EventAccountCreated { + if !bytes.Equal(event.Payload, accountCreatedPayload) { + t.Fatalf("event payloads don't match") + } + found = true + } + } + } + require.True(t, found) +} + +// TestAllObserverIndexedRPCsHappyPath tests the observer with the indexer enabled, +// observer configured to proxy requests to an access node and access node itself. All responses are compared +// to ensure all of the endpoints are working as expected. +// For now the observer only supports the following RPCs: +// -GetAccountAtBlockHeight +// -GetEventsForHeightRange +// -GetEventsForBlockIDs +// -GetSystemTransaction +// -GetTransactionsByBlockID +// -GetTransactionResultsByBlockID +// -ExecuteScriptAtBlockID +// -ExecuteScriptAtBlockHeight +// -GetExecutionResultByID +// -GetCollectionByID +// -GetTransaction +// -GetTransactionResult +// -GetTransactionResultByIndex +func (s *ObserverIndexerEnabledSuite) TestAllObserverIndexedRPCsHappyPath() { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + t := s.T() + + // prepare environment to create a new account + serviceAccountClient, err := s.net.ContainerByName(testnet.PrimaryAN).TestnetClient() + require.NoError(t, err) + + latestBlockID, err := serviceAccountClient.GetLatestBlockID(ctx) + require.NoError(t, err) + + // create new account to deploy Counter to + accountPrivateKey := lib.RandomPrivateKey() + + accountKey := sdk.NewAccountKey(). + FromPrivateKey(accountPrivateKey). + SetHashAlgo(sdkcrypto.SHA3_256). + SetWeight(sdk.AccountKeyWeightThreshold) + + serviceAddress := sdk.Address(serviceAccountClient.Chain.ServiceAddress()) + + // Generate the account creation transaction + createAccountTx, err := templates.CreateAccount( + []*sdk.AccountKey{accountKey}, + []templates.Contract{ + { + Name: lib.CounterContract.Name, + Source: lib.CounterContract.ToCadence(), + }, + }, serviceAddress) + require.NoError(t, err) + + createAccountTx. + SetReferenceBlockID(sdk.Identifier(latestBlockID)). + SetProposalKey(serviceAddress, 0, serviceAccountClient.GetAndIncrementSeqNumber()). + SetPayer(serviceAddress). + SetComputeLimit(9999) + + // send the create account tx + childCtx, cancel := context.WithTimeout(ctx, 10*time.Second) + err = serviceAccountClient.SignAndSendTransaction(childCtx, createAccountTx) + require.NoError(t, err) + + cancel() + + // wait for account to be created + var accountCreationTxRes *sdk.TransactionResult + unittest.RequireReturnsBefore(t, func() { + accountCreationTxRes, err = serviceAccountClient.WaitForSealed(context.Background(), createAccountTx.ID()) + require.NoError(t, err) + }, 20*time.Second, "has to seal before timeout") + + // obtain the account address + var accountCreatedPayload []byte + var newAccountAddress sdk.Address + for _, event := range accountCreationTxRes.Events { + if event.Type == sdk.EventAccountCreated { + accountCreatedEvent := sdk.AccountCreatedEvent(event) + accountCreatedPayload = accountCreatedEvent.Payload + newAccountAddress = accountCreatedEvent.Address() + break + } + } + require.NotEqual(t, sdk.EmptyAddress, newAccountAddress) + + // now we can query events using observerLocal to data which has to be locally indexed + + // get an access node client + accessNode, err := s.getClient(s.net.ContainerByName(testnet.PrimaryAN).Addr(testnet.GRPCPort)) + require.NoError(t, err) + + // get an observer with indexer enabled client + observerLocal, err := s.getObserverClient() + require.NoError(t, err) + + // get an upstream observer client + observerUpstream, err := s.getClient(s.net.ContainerByName("observer_2").Addr(testnet.GRPCPort)) + require.NoError(t, err) + + // wait for data to be synced by observerLocal + require.Eventually(t, func() bool { + _, err := observerLocal.GetAccountAtBlockHeight(ctx, &accessproto.GetAccountAtBlockHeightRequest{ + Address: newAccountAddress.Bytes(), + BlockHeight: accountCreationTxRes.BlockHeight, + }) + statusErr, ok := status.FromError(err) + if !ok || err == nil { + return true + } + return statusErr.Code() != codes.OutOfRange + }, 30*time.Second, 1*time.Second) + + blockWithAccount, err := observerLocal.GetBlockByID(ctx, &accessproto.GetBlockByIDRequest{ + Id: accountCreationTxRes.BlockID[:], + FullBlockResponse: true, + }) + require.NoError(t, err) + + checkRPC := func(rpcCall func(client accessproto.AccessAPIClient) (any, error)) { + observerRes, err := rpcCall(observerLocal) + require.NoError(s.T(), err) + observerUpstreamRes, err := rpcCall(observerUpstream) + require.NoError(s.T(), err) + accessRes, err := rpcCall(accessNode) + require.NoError(s.T(), err) + + require.Equal(s.T(), observerRes, observerUpstreamRes) + require.Equal(s.T(), observerRes, accessRes) + } + + // GetEventsForBlockIDs + checkRPC(func(client accessproto.AccessAPIClient) (any, error) { + res, err := client.GetEventsForBlockIDs(ctx, &accessproto.GetEventsForBlockIDsRequest{ + Type: sdk.EventAccountCreated, + BlockIds: [][]byte{blockWithAccount.Block.Id}, + EventEncodingVersion: entities.EventEncodingVersion_JSON_CDC_V0, + }) + return res.Results, err + }) + + var txIndex uint32 + found := false + + // GetEventsForHeightRange + checkRPC(func(client accessproto.AccessAPIClient) (any, error) { + res, err := client.GetEventsForHeightRange(ctx, &accessproto.GetEventsForHeightRangeRequest{ + Type: sdk.EventAccountCreated, + StartHeight: blockWithAccount.Block.Height, + EndHeight: blockWithAccount.Block.Height, + EventEncodingVersion: entities.EventEncodingVersion_JSON_CDC_V0, + }) + + // Iterating through response Results to get txIndex of event + for _, eventsInBlock := range res.Results { + for _, event := range eventsInBlock.Events { + if event.Type == sdk.EventAccountCreated { + if bytes.Equal(event.Payload, accountCreatedPayload) { + found = true + txIndex = event.TransactionIndex + } + } + } + } + require.True(t, found) + return res.Results, err + }) + + // GetSystemTransaction + checkRPC(func(client accessproto.AccessAPIClient) (any, error) { + res, err := client.GetSystemTransaction(ctx, &accessproto.GetSystemTransactionRequest{ + BlockId: blockWithAccount.Block.Id, + }) + return res.Transaction, err + }) + + // GetExecutionResultByID + checkRPC(func(client accessproto.AccessAPIClient) (any, error) { + converted, err := convert.MessageToBlock(blockWithAccount.Block) + require.NoError(t, err) + + resultId := converted.Payload.Results[0].ID() + res, err := client.GetExecutionResultByID(ctx, &accessproto.GetExecutionResultByIDRequest{ + Id: convert.IdentifierToMessage(resultId), + }) + return res.ExecutionResult, err + }) + + // GetTransaction + checkRPC(func(client accessproto.AccessAPIClient) (any, error) { + res, err := client.GetTransaction(ctx, &accessproto.GetTransactionRequest{ + Id: accountCreationTxRes.TransactionID.Bytes(), + BlockId: blockWithAccount.Block.Id, + CollectionId: nil, + }) + return res.Transaction, err + }) + + // GetTransactionResult + checkRPC(func(client accessproto.AccessAPIClient) (any, error) { + res, err := client.GetTransactionResult(ctx, &accessproto.GetTransactionRequest{ + Id: accountCreationTxRes.TransactionID.Bytes(), + BlockId: blockWithAccount.Block.Id, + CollectionId: accountCreationTxRes.CollectionID.Bytes(), + }) + return res.Events, err + }) + + // GetTransactionResultByIndex + checkRPC(func(client accessproto.AccessAPIClient) (any, error) { + res, err := client.GetTransactionResultByIndex(ctx, &accessproto.GetTransactionByIndexRequest{ + BlockId: blockWithAccount.Block.Id, + Index: txIndex, + EventEncodingVersion: entities.EventEncodingVersion_JSON_CDC_V0, + }) + return res.Events, err + }) + + // GetTransactionResultsByBlockID + checkRPC(func(client accessproto.AccessAPIClient) (any, error) { + res, err := client.GetTransactionResultsByBlockID(ctx, &accessproto.GetTransactionsByBlockIDRequest{ + BlockId: blockWithAccount.Block.Id, + EventEncodingVersion: entities.EventEncodingVersion_JSON_CDC_V0, + }) + return res.TransactionResults, err + }) + + // GetTransactionsByBlockID + checkRPC(func(client accessproto.AccessAPIClient) (any, error) { + res, err := client.GetTransactionsByBlockID(ctx, &accessproto.GetTransactionsByBlockIDRequest{ + BlockId: blockWithAccount.Block.Id, + }) + return res.Transactions, err + }) + + // GetCollectionByID + checkRPC(func(client accessproto.AccessAPIClient) (any, error) { + res, err := client.GetCollectionByID(ctx, &accessproto.GetCollectionByIDRequest{ + Id: accountCreationTxRes.CollectionID.Bytes(), + }) + return res.Collection, err + }) + + // ExecuteScriptAtBlockHeight + checkRPC(func(client accessproto.AccessAPIClient) (any, error) { + res, err := client.ExecuteScriptAtBlockHeight(ctx, &accessproto.ExecuteScriptAtBlockHeightRequest{ + BlockHeight: blockWithAccount.Block.Height, + Script: []byte(simpleScript), + Arguments: make([][]byte, 0), + }) + return res.Value, err + }) + + // ExecuteScriptAtBlockID + checkRPC(func(client accessproto.AccessAPIClient) (any, error) { + res, err := client.ExecuteScriptAtBlockID(ctx, &accessproto.ExecuteScriptAtBlockIDRequest{ + BlockId: blockWithAccount.Block.Id, + Script: []byte(simpleScript), + Arguments: make([][]byte, 0), + }) + return res.Value, err + }) + + // GetAccountAtBlockHeight + checkRPC(func(client accessproto.AccessAPIClient) (any, error) { + res, err := client.GetAccountAtBlockHeight(ctx, &accessproto.GetAccountAtBlockHeightRequest{ + Address: newAccountAddress.Bytes(), + BlockHeight: accountCreationTxRes.BlockHeight, + }) + return res.Account, err + }) +} + +func (s *ObserverIndexerEnabledSuite) getRPCs() []RPCTest { + return []RPCTest{ + {name: "Ping", call: func(ctx context.Context, client accessproto.AccessAPIClient) error { + _, err := client.Ping(ctx, &accessproto.PingRequest{}) + return err + }}, + {name: "GetLatestBlockHeader", call: func(ctx context.Context, client accessproto.AccessAPIClient) error { + _, err := client.GetLatestBlockHeader(ctx, &accessproto.GetLatestBlockHeaderRequest{}) + return err + }}, + {name: "GetBlockHeaderByID", call: func(ctx context.Context, client accessproto.AccessAPIClient) error { + _, err := client.GetBlockHeaderByID(ctx, &accessproto.GetBlockHeaderByIDRequest{ + Id: make([]byte, 32), + }) + return err + }}, + {name: "GetBlockHeaderByHeight", call: func(ctx context.Context, client accessproto.AccessAPIClient) error { + _, err := client.GetBlockHeaderByHeight(ctx, &accessproto.GetBlockHeaderByHeightRequest{}) + return err + }}, + {name: "GetLatestBlock", call: func(ctx context.Context, client accessproto.AccessAPIClient) error { + _, err := client.GetLatestBlock(ctx, &accessproto.GetLatestBlockRequest{}) + return err + }}, + {name: "GetBlockByID", call: func(ctx context.Context, client accessproto.AccessAPIClient) error { + _, err := client.GetBlockByID(ctx, &accessproto.GetBlockByIDRequest{Id: make([]byte, 32)}) + return err + }}, + {name: "GetBlockByHeight", call: func(ctx context.Context, client accessproto.AccessAPIClient) error { + _, err := client.GetBlockByHeight(ctx, &accessproto.GetBlockByHeightRequest{}) + return err + }}, + {name: "GetCollectionByID", call: func(ctx context.Context, client accessproto.AccessAPIClient) error { + _, err := client.GetCollectionByID(ctx, &accessproto.GetCollectionByIDRequest{Id: make([]byte, 32)}) + return err + }}, + {name: "SendTransaction", call: func(ctx context.Context, client accessproto.AccessAPIClient) error { + _, err := client.SendTransaction(ctx, &accessproto.SendTransactionRequest{}) + return err + }}, + {name: "GetTransaction", call: func(ctx context.Context, client accessproto.AccessAPIClient) error { + _, err := client.GetTransaction(ctx, &accessproto.GetTransactionRequest{Id: make([]byte, 32)}) + return err + }}, + {name: "GetTransactionResult", call: func(ctx context.Context, client accessproto.AccessAPIClient) error { + _, err := client.GetTransactionResult(ctx, &accessproto.GetTransactionRequest{}) + return err + }}, + {name: "GetTransactionResultByIndex", call: func(ctx context.Context, client accessproto.AccessAPIClient) error { + _, err := client.GetTransactionResultByIndex(ctx, &accessproto.GetTransactionByIndexRequest{}) + return err + }}, + {name: "GetTransactionResultsByBlockID", call: func(ctx context.Context, client accessproto.AccessAPIClient) error { + _, err := client.GetTransactionResultsByBlockID(ctx, &accessproto.GetTransactionsByBlockIDRequest{}) + return err + }}, + {name: "GetTransactionsByBlockID", call: func(ctx context.Context, client accessproto.AccessAPIClient) error { + _, err := client.GetTransactionsByBlockID(ctx, &accessproto.GetTransactionsByBlockIDRequest{BlockId: make([]byte, 32)}) + return err + }}, + {name: "GetAccount", call: func(ctx context.Context, client accessproto.AccessAPIClient) error { + _, err := client.GetAccount(ctx, &accessproto.GetAccountRequest{ + Address: flow.Localnet.Chain().ServiceAddress().Bytes(), + }) + return err + }}, + {name: "GetAccountAtLatestBlock", call: func(ctx context.Context, client accessproto.AccessAPIClient) error { + _, err := client.GetAccountAtLatestBlock(ctx, &accessproto.GetAccountAtLatestBlockRequest{ + Address: flow.Localnet.Chain().ServiceAddress().Bytes(), + }) + return err + }}, + {name: "GetAccountAtBlockHeight", call: func(ctx context.Context, client accessproto.AccessAPIClient) error { + _, err := client.GetAccountAtBlockHeight(ctx, &accessproto.GetAccountAtBlockHeightRequest{ + Address: flow.Localnet.Chain().ServiceAddress().Bytes(), + BlockHeight: 0, + }) + return err + }}, + {name: "ExecuteScriptAtLatestBlock", call: func(ctx context.Context, client accessproto.AccessAPIClient) error { + _, err := client.ExecuteScriptAtLatestBlock(ctx, &accessproto.ExecuteScriptAtLatestBlockRequest{ + Script: []byte(simpleScript), + Arguments: make([][]byte, 0), + }) + return err + }}, + {name: "ExecuteScriptAtBlockID", call: func(ctx context.Context, client accessproto.AccessAPIClient) error { + _, err := client.ExecuteScriptAtBlockID(ctx, &accessproto.ExecuteScriptAtBlockIDRequest{ + BlockId: make([]byte, 32), + Script: []byte("dummy script"), + Arguments: make([][]byte, 0), + }) + return err + }}, + {name: "ExecuteScriptAtBlockHeight", call: func(ctx context.Context, client accessproto.AccessAPIClient) error { + _, err := client.ExecuteScriptAtBlockHeight(ctx, &accessproto.ExecuteScriptAtBlockHeightRequest{ + BlockHeight: 0, + Script: []byte(simpleScript), + Arguments: make([][]byte, 0), + }) + return err + }}, + {name: "GetNetworkParameters", call: func(ctx context.Context, client accessproto.AccessAPIClient) error { + _, err := client.GetNetworkParameters(ctx, &accessproto.GetNetworkParametersRequest{}) + return err + }}, + {name: "GetLatestProtocolStateSnapshot", call: func(ctx context.Context, client accessproto.AccessAPIClient) error { + _, err := client.GetLatestProtocolStateSnapshot(ctx, &accessproto.GetLatestProtocolStateSnapshotRequest{}) + return err + }}, + {name: "GetExecutionResultForBlockID", call: func(ctx context.Context, client accessproto.AccessAPIClient) error { + _, err := client.GetExecutionResultForBlockID(ctx, &accessproto.GetExecutionResultForBlockIDRequest{}) + return err + }}, + } +} + +func (s *ObserverIndexerEnabledSuite) getRestEndpoints() []RestEndpointTest { + transactionId := unittest.IdentifierFixture().String() + account := flow.Localnet.Chain().ServiceAddress().String() + block := unittest.BlockFixture() + executionResult := unittest.ExecutionResultFixture() + collection := unittest.CollectionFixture(2) + eventType := unittest.EventTypeFixture(flow.Localnet) + + return []RestEndpointTest{ + { + name: "getTransactionByID", + method: http.MethodGet, + path: "/transactions/" + transactionId, + }, + { + name: "createTransaction", + method: http.MethodPost, + path: "/transactions", + body: createTx(s.T(), s.net), + }, + { + name: "getTransactionResultByID", + method: http.MethodGet, + path: fmt.Sprintf("/transaction_results/%s?block_id=%s&collection_id=%s", transactionId, block.ID().String(), collection.ID().String()), + }, + { + name: "getBlocksByIDs", + method: http.MethodGet, + path: "/blocks/" + block.ID().String(), + }, + { + name: "getBlocksByHeight", + method: http.MethodGet, + path: "/blocks?height=1", + }, + { + name: "getBlockPayloadByID", + method: http.MethodGet, + path: "/blocks/" + block.ID().String() + "/payload", + }, + { + name: "getExecutionResultByID", + method: http.MethodGet, + path: "/execution_results/" + executionResult.ID().String(), + }, + { + name: "getExecutionResultByBlockID", + method: http.MethodGet, + path: "/execution_results?block_id=" + block.ID().String(), + }, + { + name: "getCollectionByID", + method: http.MethodGet, + path: "/collections/" + collection.ID().String(), + }, + { + name: "executeScript", + method: http.MethodPost, + path: "/scripts", + body: createScript(), + }, + { + name: "getAccount", + method: http.MethodGet, + path: "/accounts/" + account + "?block_height=1", + }, + { + name: "getEvents", + method: http.MethodGet, + path: fmt.Sprintf("/events?type=%s&start_height=%d&end_height=%d", eventType, 0, 3), + }, + { + name: "getNetworkParameters", + method: http.MethodGet, + path: "/network/parameters", + }, + { + name: "getNodeVersionInfo", + method: http.MethodGet, + path: "/node_version_info", + }, + } +} diff --git a/integration/tests/access/cohort2/observer_test.go b/integration/tests/access/cohort2/observer_test.go new file mode 100644 index 00000000000..797ed6b8045 --- /dev/null +++ b/integration/tests/access/cohort2/observer_test.go @@ -0,0 +1,525 @@ +package cohort2 + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "net/http" + "testing" + + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/credentials/insecure" + "google.golang.org/grpc/status" + + "github.com/rs/zerolog" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" + + "github.com/onflow/flow-go/engine/access/rest/util" + "github.com/onflow/flow-go/integration/testnet" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/utils/unittest" + + accessproto "github.com/onflow/flow/protobuf/go/flow/access" +) + +func TestObserver(t *testing.T) { + suite.Run(t, new(ObserverSuite)) +} + +// ObserverSuite is a general test suite for observer nodes APIs. +// It is used to test the observer node's RPC and REST APIs. +// It verified that the observer's API behaves similarly to the access node's API. +type ObserverSuite struct { + suite.Suite + net *testnet.FlowNetwork + localRpc map[string]struct{} // RPC methods handled locally by observer + localRest map[string]struct{} // REST endpoints handled locally by observer + + // we use functors to allow reusing the same test suite for different sets of RPCs and REST endpoints + testedRPCs func() []RPCTest // RPC methods to test + testedRestEndpoints func() []RestEndpointTest // REST endpoints to test + + cancel context.CancelFunc +} + +func (s *ObserverSuite) TearDownTest() { + if s.net != nil { + s.net.Remove() + s.net = nil + } + if s.cancel != nil { + s.cancel() + s.cancel = nil + } +} + +func (s *ObserverSuite) SetupTest() { + s.localRpc = map[string]struct{}{ + "Ping": {}, + "GetLatestBlockHeader": {}, + "GetBlockHeaderByID": {}, + "GetBlockHeaderByHeight": {}, + "GetLatestBlock": {}, + "GetBlockByID": {}, + "GetBlockByHeight": {}, + "GetLatestProtocolStateSnapshot": {}, + "GetNetworkParameters": {}, + } + + s.localRest = map[string]struct{}{ + "getBlocksByIDs": {}, + "getBlocksByHeight": {}, + "getBlockPayloadByID": {}, + "getNetworkParameters": {}, + "getNodeVersionInfo": {}, + } + + s.testedRPCs = s.getRPCs + s.testedRestEndpoints = s.getRestEndpoints + + nodeConfigs := []testnet.NodeConfig{ + // access node with unstaked nodes supported + testnet.NewNodeConfig(flow.RoleAccess, testnet.WithLogLevel(zerolog.InfoLevel), + testnet.WithAdditionalFlag("--supports-observer=true"), + ), + + // need one dummy execution node + testnet.NewNodeConfig(flow.RoleExecution, testnet.WithLogLevel(zerolog.FatalLevel)), + + // need one dummy verification node (unused ghost) + testnet.NewNodeConfig(flow.RoleVerification, testnet.WithLogLevel(zerolog.FatalLevel), testnet.AsGhost()), + + // need one controllable collection node + testnet.NewNodeConfig(flow.RoleCollection, testnet.WithLogLevel(zerolog.FatalLevel)), + + // need three consensus nodes (unused ghost) + testnet.NewNodeConfig(flow.RoleConsensus, testnet.WithLogLevel(zerolog.FatalLevel), testnet.AsGhost()), + testnet.NewNodeConfig(flow.RoleConsensus, testnet.WithLogLevel(zerolog.FatalLevel), testnet.AsGhost()), + testnet.NewNodeConfig(flow.RoleConsensus, testnet.WithLogLevel(zerolog.FatalLevel), testnet.AsGhost()), + } + + observers := []testnet.ObserverConfig{{ + LogLevel: zerolog.InfoLevel, + }} + + // prepare the network + conf := testnet.NewNetworkConfig("observer_api_test", nodeConfigs, testnet.WithObservers(observers...)) + s.net = testnet.PrepareFlowNetwork(s.T(), conf, flow.Localnet) + + // start the network + ctx, cancel := context.WithCancel(context.Background()) + s.cancel = cancel + + s.net.Start(ctx) +} + +// TestObserverRPC runs the following tests: +// 1. CompareRPCs: verifies that the observer client returns the same errors as the access client for rpcs proxied to the upstream AN +// 2. HandledByUpstream: stops the upstream AN and verifies that the observer client returns errors for all rpcs handled by the upstream +// 3. HandledByObserver: stops the upstream AN and verifies that the observer client handles all other queries +func (s *ObserverSuite) TestObserverRPC() { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + t := s.T() + + // get an observer client + observer, err := s.getObserverClient() + require.NoError(t, err) + + access, err := s.getAccessClient() + require.NoError(t, err) + + t.Run("CompareRPCs", func(t *testing.T) { + // verify that both clients return the same errors for proxied rpcs + for _, rpc := range s.testedRPCs() { + // skip rpcs handled locally by observer + if _, local := s.localRpc[rpc.name]; local { + continue + } + t.Run(rpc.name, func(t *testing.T) { + accessErr := rpc.call(ctx, access) + observerErr := rpc.call(ctx, observer) + assert.Equal(t, accessErr, observerErr) + }) + } + }) + + // stop the upstream access container + err = s.net.StopContainerByName(ctx, testnet.PrimaryAN) + require.NoError(t, err) + + t.Run("HandledByUpstream", func(t *testing.T) { + // verify that we receive Unavailable errors from all rpcs handled upstream + for _, rpc := range s.testedRPCs() { + if _, local := s.localRpc[rpc.name]; local { + continue + } + t.Run(rpc.name, func(t *testing.T) { + err := rpc.call(ctx, observer) + assert.Equal(t, codes.Unavailable, status.Code(err)) + }) + } + }) + + t.Run("HandledByObserver", func(t *testing.T) { + // verify that we receive NotFound or no error from all rpcs handled locally + for _, rpc := range s.testedRPCs() { + if _, local := s.localRpc[rpc.name]; !local { + continue + } + t.Run(rpc.name, func(t *testing.T) { + err := rpc.call(ctx, observer) + if err == nil { + return + } + assert.Equal(t, codes.NotFound, status.Code(err)) + }) + } + }) +} + +// TestObserverRest runs the following tests: +// 1. CompareEndpoints: verifies that the observer client returns the same errors as the access client for rests proxied to the upstream AN +// 2. HandledByUpstream: stops the upstream AN and verifies that the observer client returns errors for all rests handled by the upstream +// 3. HandledByObserver: stops the upstream AN and verifies that the observer client handles all other queries +func (s *ObserverSuite) TestObserverRest() { + t := s.T() + + accessAddr := s.net.ContainerByName(testnet.PrimaryAN).Addr(testnet.RESTPort) + observerAddr := s.net.ContainerByName("observer_1").Addr(testnet.RESTPort) + + httpClient := http.DefaultClient + makeHttpCall := func(method string, url string, body interface{}) (*http.Response, error) { + switch method { + case http.MethodGet: + return httpClient.Get(url) + case http.MethodPost: + jsonBody, _ := json.Marshal(body) + return httpClient.Post(url, "application/json", bytes.NewBuffer(jsonBody)) + } + panic("not supported") + } + makeObserverCall := func(method string, path string, body interface{}) (*http.Response, error) { + return makeHttpCall(method, "http://"+observerAddr+"/v1"+path, body) + } + makeAccessCall := func(method string, path string, body interface{}) (*http.Response, error) { + return makeHttpCall(method, "http://"+accessAddr+"/v1"+path, body) + } + + t.Run("CompareEndpoints", func(t *testing.T) { + // verify that both clients return the same errors for proxied rests + for _, endpoint := range s.testedRestEndpoints() { + // skip rest handled locally by observer + if _, local := s.localRest[endpoint.name]; local { + continue + } + t.Run(endpoint.name, func(t *testing.T) { + accessResp, accessErr := makeAccessCall(endpoint.method, endpoint.path, endpoint.body) + observerResp, observerErr := makeObserverCall(endpoint.method, endpoint.path, endpoint.body) + assert.NoError(t, accessErr) + assert.NoError(t, observerErr) + assert.Equal(t, accessResp.Status, observerResp.Status) + assert.Equal(t, accessResp.StatusCode, observerResp.StatusCode) + assert.Contains(t, [...]int{ + http.StatusNotFound, + http.StatusOK, + }, observerResp.StatusCode) + }) + } + }) + + // stop the upstream access container + err := s.net.StopContainerByName(context.Background(), testnet.PrimaryAN) + require.NoError(t, err) + + t.Run("HandledByUpstream", func(t *testing.T) { + // verify that we receive StatusServiceUnavailable errors from all rests handled upstream + for _, endpoint := range s.testedRestEndpoints() { + if _, local := s.localRest[endpoint.name]; local { + continue + } + t.Run(endpoint.name, func(t *testing.T) { + observerResp, observerErr := makeObserverCall(endpoint.method, endpoint.path, endpoint.body) + require.NoError(t, observerErr) + assert.Contains(t, [...]int{ + http.StatusServiceUnavailable}, observerResp.StatusCode) + }) + } + }) + + t.Run("HandledByObserver", func(t *testing.T) { + // verify that we receive NotFound or no error from all rests handled locally + for _, endpoint := range s.testedRestEndpoints() { + if _, local := s.localRest[endpoint.name]; !local { + continue + } + t.Run(endpoint.name, func(t *testing.T) { + observerResp, observerErr := makeObserverCall(endpoint.method, endpoint.path, endpoint.body) + require.NoError(t, observerErr) + assert.Contains(t, [...]int{http.StatusNotFound, http.StatusOK}, observerResp.StatusCode) + }) + } + }) +} + +func (s *ObserverSuite) getAccessClient() (accessproto.AccessAPIClient, error) { + return s.getClient(s.net.ContainerByName(testnet.PrimaryAN).Addr(testnet.GRPCPort)) +} + +func (s *ObserverSuite) getObserverClient() (accessproto.AccessAPIClient, error) { + return s.getClient(s.net.ContainerByName("observer_1").Addr(testnet.GRPCPort)) +} + +func (s *ObserverSuite) getClient(address string) (accessproto.AccessAPIClient, error) { + // helper func to create an access client + conn, err := grpc.Dial(address, grpc.WithTransportCredentials(insecure.NewCredentials())) + if err != nil { + return nil, err + } + + client := accessproto.NewAccessAPIClient(conn) + return client, nil +} + +type RPCTest struct { + name string + call func(ctx context.Context, client accessproto.AccessAPIClient) error +} + +func (s *ObserverSuite) getRPCs() []RPCTest { + return []RPCTest{ + {name: "Ping", call: func(ctx context.Context, client accessproto.AccessAPIClient) error { + _, err := client.Ping(ctx, &accessproto.PingRequest{}) + return err + }}, + {name: "GetLatestBlockHeader", call: func(ctx context.Context, client accessproto.AccessAPIClient) error { + _, err := client.GetLatestBlockHeader(ctx, &accessproto.GetLatestBlockHeaderRequest{}) + return err + }}, + {name: "GetBlockHeaderByID", call: func(ctx context.Context, client accessproto.AccessAPIClient) error { + _, err := client.GetBlockHeaderByID(ctx, &accessproto.GetBlockHeaderByIDRequest{ + Id: make([]byte, 32), + }) + return err + }}, + {name: "GetBlockHeaderByHeight", call: func(ctx context.Context, client accessproto.AccessAPIClient) error { + _, err := client.GetBlockHeaderByHeight(ctx, &accessproto.GetBlockHeaderByHeightRequest{}) + return err + }}, + {name: "GetLatestBlock", call: func(ctx context.Context, client accessproto.AccessAPIClient) error { + _, err := client.GetLatestBlock(ctx, &accessproto.GetLatestBlockRequest{}) + return err + }}, + {name: "GetBlockByID", call: func(ctx context.Context, client accessproto.AccessAPIClient) error { + _, err := client.GetBlockByID(ctx, &accessproto.GetBlockByIDRequest{Id: make([]byte, 32)}) + return err + }}, + {name: "GetBlockByHeight", call: func(ctx context.Context, client accessproto.AccessAPIClient) error { + _, err := client.GetBlockByHeight(ctx, &accessproto.GetBlockByHeightRequest{}) + return err + }}, + {name: "GetCollectionByID", call: func(ctx context.Context, client accessproto.AccessAPIClient) error { + _, err := client.GetCollectionByID(ctx, &accessproto.GetCollectionByIDRequest{Id: make([]byte, 32)}) + return err + }}, + {name: "SendTransaction", call: func(ctx context.Context, client accessproto.AccessAPIClient) error { + _, err := client.SendTransaction(ctx, &accessproto.SendTransactionRequest{}) + return err + }}, + {name: "GetTransaction", call: func(ctx context.Context, client accessproto.AccessAPIClient) error { + _, err := client.GetTransaction(ctx, &accessproto.GetTransactionRequest{}) + return err + }}, + {name: "GetTransactionResult", call: func(ctx context.Context, client accessproto.AccessAPIClient) error { + _, err := client.GetTransactionResult(ctx, &accessproto.GetTransactionRequest{}) + return err + }}, + {name: "GetTransactionResultByIndex", call: func(ctx context.Context, client accessproto.AccessAPIClient) error { + _, err := client.GetTransactionResultByIndex(ctx, &accessproto.GetTransactionByIndexRequest{}) + return err + }}, + {name: "GetTransactionResultsByBlockID", call: func(ctx context.Context, client accessproto.AccessAPIClient) error { + _, err := client.GetTransactionResultsByBlockID(ctx, &accessproto.GetTransactionsByBlockIDRequest{}) + return err + }}, + {name: "GetTransactionsByBlockID", call: func(ctx context.Context, client accessproto.AccessAPIClient) error { + _, err := client.GetTransactionsByBlockID(ctx, &accessproto.GetTransactionsByBlockIDRequest{}) + return err + }}, + {name: "GetAccount", call: func(ctx context.Context, client accessproto.AccessAPIClient) error { + _, err := client.GetAccount(ctx, &accessproto.GetAccountRequest{}) + return err + }}, + {name: "GetAccountAtLatestBlock", call: func(ctx context.Context, client accessproto.AccessAPIClient) error { + _, err := client.GetAccountAtLatestBlock(ctx, &accessproto.GetAccountAtLatestBlockRequest{}) + return err + }}, + {name: "GetAccountAtBlockHeight", call: func(ctx context.Context, client accessproto.AccessAPIClient) error { + _, err := client.GetAccountAtBlockHeight(ctx, &accessproto.GetAccountAtBlockHeightRequest{}) + return err + }}, + {name: "ExecuteScriptAtLatestBlock", call: func(ctx context.Context, client accessproto.AccessAPIClient) error { + _, err := client.ExecuteScriptAtLatestBlock(ctx, &accessproto.ExecuteScriptAtLatestBlockRequest{}) + return err + }}, + {name: "ExecuteScriptAtBlockID", call: func(ctx context.Context, client accessproto.AccessAPIClient) error { + _, err := client.ExecuteScriptAtBlockID(ctx, &accessproto.ExecuteScriptAtBlockIDRequest{}) + return err + }}, + {name: "ExecuteScriptAtBlockHeight", call: func(ctx context.Context, client accessproto.AccessAPIClient) error { + _, err := client.ExecuteScriptAtBlockHeight(ctx, &accessproto.ExecuteScriptAtBlockHeightRequest{}) + return err + }}, + {name: "GetEventsForHeightRange", call: func(ctx context.Context, client accessproto.AccessAPIClient) error { + _, err := client.GetEventsForHeightRange(ctx, &accessproto.GetEventsForHeightRangeRequest{}) + return err + }}, + {name: "GetEventsForBlockIDs", call: func(ctx context.Context, client accessproto.AccessAPIClient) error { + _, err := client.GetEventsForBlockIDs(ctx, &accessproto.GetEventsForBlockIDsRequest{}) + return err + }}, + {name: "GetNetworkParameters", call: func(ctx context.Context, client accessproto.AccessAPIClient) error { + _, err := client.GetNetworkParameters(ctx, &accessproto.GetNetworkParametersRequest{}) + return err + }}, + {name: "GetLatestProtocolStateSnapshot", call: func(ctx context.Context, client accessproto.AccessAPIClient) error { + _, err := client.GetLatestProtocolStateSnapshot(ctx, &accessproto.GetLatestProtocolStateSnapshotRequest{}) + return err + }}, + {name: "GetExecutionResultForBlockID", call: func(ctx context.Context, client accessproto.AccessAPIClient) error { + _, err := client.GetExecutionResultForBlockID(ctx, &accessproto.GetExecutionResultForBlockIDRequest{}) + return err + }}, + } +} + +type RestEndpointTest struct { + name string + method string + path string + body interface{} +} + +func (s *ObserverSuite) getRestEndpoints() []RestEndpointTest { + transactionId := unittest.IdentifierFixture().String() + account := flow.Localnet.Chain().ServiceAddress().String() + block := unittest.BlockFixture() + executionResult := unittest.ExecutionResultFixture() + collection := unittest.CollectionFixture(2) + eventType := unittest.EventTypeFixture(flow.Localnet) + + return []RestEndpointTest{ + { + name: "getTransactionByID", + method: http.MethodGet, + path: "/transactions/" + transactionId, + }, + { + name: "createTransaction", + method: http.MethodPost, + path: "/transactions", + body: createTx(s.T(), s.net), + }, + { + name: "getTransactionResultByID", + method: http.MethodGet, + path: fmt.Sprintf("/transaction_results/%s?block_id=%s&collection_id=%s", transactionId, block.ID().String(), collection.ID().String()), + }, + { + name: "getBlocksByIDs", + method: http.MethodGet, + path: "/blocks/" + block.ID().String(), + }, + { + name: "getBlocksByHeight", + method: http.MethodGet, + path: "/blocks?height=1", + }, + { + name: "getBlockPayloadByID", + method: http.MethodGet, + path: "/blocks/" + block.ID().String() + "/payload", + }, + { + name: "getExecutionResultByID", + method: http.MethodGet, + path: "/execution_results/" + executionResult.ID().String(), + }, + { + name: "getExecutionResultByBlockID", + method: http.MethodGet, + path: "/execution_results?block_id=" + block.ID().String(), + }, + { + name: "getCollectionByID", + method: http.MethodGet, + path: "/collections/" + collection.ID().String(), + }, + { + name: "executeScript", + method: http.MethodPost, + path: "/scripts", + body: createScript(), + }, + { + name: "getAccount", + method: http.MethodGet, + path: "/accounts/" + account + "?block_height=1", + }, + { + name: "getEvents", + method: http.MethodGet, + path: fmt.Sprintf("/events?type=%s&start_height=%d&end_height=%d", eventType, 0, 3), + }, + { + name: "getNetworkParameters", + method: http.MethodGet, + path: "/network/parameters", + }, + { + name: "getNodeVersionInfo", + method: http.MethodGet, + path: "/node_version_info", + }, + } +} + +func createTx(t *testing.T, net *testnet.FlowNetwork) interface{} { + flowAddr := flow.Localnet.Chain().ServiceAddress() + payloadSignature := unittest.TransactionSignatureFixture() + envelopeSignature := unittest.TransactionSignatureFixture() + + payloadSignature.Address = flowAddr + + envelopeSignature.Address = flowAddr + envelopeSignature.KeyIndex = 2 + + tx, err := flow.NewTransactionBodyBuilder(). + AddAuthorizer(flowAddr). + SetPayer(flowAddr). + SetScript(unittest.NoopTxScript()). + SetReferenceBlockID(net.Root().ID()). + SetProposalKey(flowAddr, 1, 0). + Build() + require.NoError(t, err) + + tx.PayloadSignatures = []flow.TransactionSignature{payloadSignature} + tx.EnvelopeSignatures = []flow.TransactionSignature{envelopeSignature} + + return unittest.CreateSendTxHttpPayload(*tx) +} + +func createScript() interface{} { + validCode := []byte(`access(all) fun main(foo: String): String { return foo }`) + validArgs := []byte(`{ "type": "String", "value": "hello world" }`) + body := map[string]interface{}{ + "script": util.ToBase64(validCode), + "arguments": []string{util.ToBase64(validArgs)}, + } + return body +} diff --git a/integration/tests/access/cohort3/access_circuit_breaker_test.go b/integration/tests/access/cohort3/access_circuit_breaker_test.go new file mode 100644 index 00000000000..e172be9515d --- /dev/null +++ b/integration/tests/access/cohort3/access_circuit_breaker_test.go @@ -0,0 +1,185 @@ +package cohort3 + +import ( + "context" + "fmt" + "testing" + "time" + + "github.com/rs/zerolog" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + + sdk "github.com/onflow/flow-go-sdk" + sdkcrypto "github.com/onflow/flow-go-sdk/crypto" + "github.com/onflow/flow-go-sdk/templates" + + "github.com/onflow/flow-go/integration/testnet" + "github.com/onflow/flow-go/integration/tests/lib" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/utils/unittest" +) + +func TestAccessCircuitBreaker(t *testing.T) { + suite.Run(t, new(AccessCircuitBreakerSuite)) +} + +type AccessCircuitBreakerSuite struct { + suite.Suite + + log zerolog.Logger + + // root context for the current test + ctx context.Context + cancel context.CancelFunc + + net *testnet.FlowNetwork +} + +var requestTimeout = 1500 * time.Millisecond +var cbRestoreTimeout = 6 * time.Second + +func (s *AccessCircuitBreakerSuite) TearDownTest() { + s.log.Info().Msg("================> Start TearDownTest") + s.net.Remove() + s.cancel() + s.log.Info().Msg("================> Finish TearDownTest") +} + +func (s *AccessCircuitBreakerSuite) SetupTest() { + s.log = unittest.LoggerForTest(s.Suite.T(), zerolog.InfoLevel) + s.log.Info().Msg("================> SetupTest") + defer func() { + s.log.Info().Msg("================> Finish SetupTest") + }() + + // need one access node with enabled circuit breaker + nodeConfigs := []testnet.NodeConfig{ + testnet.NewNodeConfig( + flow.RoleAccess, + testnet.WithLogLevel(zerolog.InfoLevel), + testnet.WithAdditionalFlag("--circuit-breaker-enabled=true"), + testnet.WithAdditionalFlag(fmt.Sprintf("--circuit-breaker-restore-timeout=%s", cbRestoreTimeout.String())), + testnet.WithAdditionalFlag("--circuit-breaker-max-requests=1"), + testnet.WithAdditionalFlag("--circuit-breaker-max-failures=1"), + testnet.WithAdditionalFlag(fmt.Sprintf("--collection-client-timeout=%s", requestTimeout.String())), + ), + } + // need one execution node + exeConfig := testnet.NewNodeConfig(flow.RoleExecution, testnet.WithLogLevel(zerolog.FatalLevel)) + nodeConfigs = append(nodeConfigs, exeConfig) + + // need one dummy verification node (unused ghost) + verConfig := testnet.NewNodeConfig(flow.RoleVerification, testnet.WithLogLevel(zerolog.FatalLevel), testnet.AsGhost()) + nodeConfigs = append(nodeConfigs, verConfig) + + // need one controllable collection node + collConfig := testnet.NewNodeConfig(flow.RoleCollection, testnet.WithLogLevel(zerolog.FatalLevel), testnet.WithAdditionalFlag("--hotstuff-proposal-duration=100ms")) + nodeConfigs = append(nodeConfigs, collConfig) + + // need three consensus nodes (unused ghost) + for n := 0; n < 3; n++ { + conID := unittest.IdentifierFixture() + nodeConfig := testnet.NewNodeConfig(flow.RoleConsensus, + testnet.WithLogLevel(zerolog.FatalLevel), + testnet.WithID(conID), + testnet.AsGhost()) + nodeConfigs = append(nodeConfigs, nodeConfig) + } + + conf := testnet.NewNetworkConfig("access_api_test", nodeConfigs) + s.net = testnet.PrepareFlowNetwork(s.T(), conf, flow.Localnet) + + // start the network + s.T().Logf("starting flow network with docker containers") + s.ctx, s.cancel = context.WithCancel(context.Background()) + + s.net.Start(s.ctx) +} + +// TestCircuitBreaker tests the behavior of the circuit breaker. It verifies the circuit breaker's ability to open, +// prevent further requests, and restore after a timeout. It is done in a few steps: +// 1. Get the collection node and disconnect it from the network. +// 2. Try to send a transaction multiple times to observe the decrease in waiting time for a failed response. +// 3. Connect the collection node to the network and wait for the circuit breaker restore time. +// 4. Successfully send a transaction. +func (s *AccessCircuitBreakerSuite) TestCircuitBreaker() { + // 1. Get the collection node + collectionContainer := s.net.ContainerByName("collection_1") + + // 2. Get the Access Node container and client + accessContainer := s.net.ContainerByName(testnet.PrimaryAN) + + // Check if access node was created with circuit breaker flags + require.True(s.T(), accessContainer.IsFlagSet("circuit-breaker-enabled")) + require.True(s.T(), accessContainer.IsFlagSet("circuit-breaker-restore-timeout")) + require.True(s.T(), accessContainer.IsFlagSet("circuit-breaker-max-requests")) + require.True(s.T(), accessContainer.IsFlagSet("circuit-breaker-max-failures")) + + accessClient, err := accessContainer.TestnetClient() + require.NoError(s.T(), err, "failed to get access node client") + require.NotNil(s.T(), accessClient, "failed to get access node client") + + latestBlockID, err := accessClient.GetLatestBlockID(s.ctx) + require.NoError(s.T(), err) + + // Create a new account to deploy Counter to + accountPrivateKey := lib.RandomPrivateKey() + + accountKey := sdk.NewAccountKey(). + FromPrivateKey(accountPrivateKey). + SetHashAlgo(sdkcrypto.SHA3_256). + SetWeight(sdk.AccountKeyWeightThreshold) + + serviceAddress := sdk.Address(accessClient.Chain.ServiceAddress()) + + // Generate the account creation transaction + createAccountTx, err := templates.CreateAccount( + []*sdk.AccountKey{accountKey}, + []templates.Contract{ + { + Name: lib.CounterContract.Name, + Source: lib.CounterContract.ToCadence(), + }, + }, serviceAddress) + require.NoError(s.T(), err) + + createAccountTx. + SetReferenceBlockID(sdk.Identifier(latestBlockID)). + SetProposalKey(serviceAddress, 0, accessClient.GetAndIncrementSeqNumber()). + SetPayer(serviceAddress). + SetComputeLimit(9999) + + // Sign the transaction + signedTx, err := accessClient.SignTransaction(createAccountTx) + require.NoError(s.T(), err) + + // 3. Disconnect the collection node from the network to activate the Circuit Breaker + err = collectionContainer.Disconnect() + require.NoError(s.T(), err, "failed to pause connection node") + + // 4. Send a couple of transactions to test if the circuit breaker opens correctly + // Try to send the transaction for the first time. It should wait at least the timeout time and return Unavailable error + err = accessClient.SendTransaction(s.ctx, signedTx) + assert.Equal(s.T(), codes.Unavailable, status.Code(err)) + + // Try to send the transaction for the second time. It should wait less than a second because the circuit breaker + // is configured to break after the first failure + err = accessClient.SendTransaction(s.ctx, signedTx) + // Here we catch the codes.Unknown error, as this is the one that comes from the Circuit Breaker when the state is Open. + assert.Equal(s.T(), codes.Unknown, status.Code(err)) + + // Reconnect the collection node + err = collectionContainer.Connect() + require.NoError(s.T(), err, "failed to start collection node") + + // Wait for the circuit breaker to restore + time.Sleep(cbRestoreTimeout) + + // Try to send the transaction for the third time. The transaction should be sent successfully + err = accessClient.SendTransaction(s.ctx, signedTx) + require.NoError(s.T(), err, "transaction should be sent") +} diff --git a/integration/tests/access/cohort3/collection_indexing_test.go b/integration/tests/access/cohort3/collection_indexing_test.go new file mode 100644 index 00000000000..97eee746be5 --- /dev/null +++ b/integration/tests/access/cohort3/collection_indexing_test.go @@ -0,0 +1,120 @@ +package cohort3 + +import ( + "context" + "fmt" + "testing" + "time" + + "github.com/rs/zerolog" + "github.com/stretchr/testify/suite" + + "github.com/onflow/flow-go/integration/testnet" + "github.com/onflow/flow-go/model/flow" +) + +// This suite tests collection syncing using the ingestion engine and the indexer. + +const lastFullBlockMetric = "access_ingestion_last_full_finalized_block_height" + +func TestCollectionIndexing(t *testing.T) { + suite.Run(t, new(CollectionIndexingSuite)) +} + +type CollectionIndexingSuite struct { + suite.Suite + net *testnet.FlowNetwork + + cancel context.CancelFunc +} + +func (s *CollectionIndexingSuite) SetupTest() { + // access_1 is not running the indexer, so all collections are indexed using the ingestion engine + defaultAccessOpts := []func(config *testnet.NodeConfig){ + testnet.WithLogLevel(zerolog.FatalLevel), + testnet.WithAdditionalFlag("--execution-data-sync-enabled=true"), + testnet.WithAdditionalFlagf("--execution-data-dir=%s", testnet.DefaultExecutionDataServiceDir), + testnet.WithAdditionalFlagf("--execution-state-dir=%s", testnet.DefaultExecutionStateDir), + testnet.WithMetricsServer(), + } + // access_2 is running the indexer, so all collections are indexed using the indexer + testANOpts := append(defaultAccessOpts, + testnet.WithLogLevel(zerolog.InfoLevel), + testnet.WithAdditionalFlag("--execution-data-indexing-enabled=true"), + ) + + nodeConfigs := []testnet.NodeConfig{ + testnet.NewNodeConfig(flow.RoleCollection, testnet.WithLogLevel(zerolog.FatalLevel)), + testnet.NewNodeConfig(flow.RoleCollection, testnet.WithLogLevel(zerolog.FatalLevel)), + testnet.NewNodeConfig(flow.RoleExecution, testnet.WithLogLevel(zerolog.FatalLevel)), + testnet.NewNodeConfig(flow.RoleExecution, testnet.WithLogLevel(zerolog.FatalLevel)), + testnet.NewNodeConfig(flow.RoleConsensus, testnet.WithLogLevel(zerolog.FatalLevel)), + testnet.NewNodeConfig(flow.RoleConsensus, testnet.WithLogLevel(zerolog.FatalLevel)), + testnet.NewNodeConfig(flow.RoleConsensus, testnet.WithLogLevel(zerolog.FatalLevel)), + testnet.NewNodeConfig(flow.RoleVerification, testnet.WithLogLevel(zerolog.FatalLevel)), + testnet.NewNodeConfig(flow.RoleAccess, defaultAccessOpts...), + testnet.NewNodeConfig(flow.RoleAccess, testANOpts...), + } + + // prepare the network + conf := testnet.NewNetworkConfig("access_collection_indexing_test", nodeConfigs) + s.net = testnet.PrepareFlowNetwork(s.T(), conf, flow.Localnet) + + // start the network + ctx, cancel := context.WithCancel(context.Background()) + s.cancel = cancel + + s.net.Start(ctx) +} + +func (s *CollectionIndexingSuite) TearDownTest() { + if s.net != nil { + s.net.Remove() + s.net = nil + } + if s.cancel != nil { + s.cancel() + s.cancel = nil + } +} + +func (s *CollectionIndexingSuite) Test() { + // start the network with access_2 disconnected. + // this simulates it falling behind on syncing collections + access2 := s.net.ContainerByName("access_2") + s.Require().NoError(access2.Disconnect()) + + // wait for access_1 to sync collections + targetBlockCount := uint64(50) + s.Eventually(func() bool { + value, err := s.getLastFullHeight("access_1") + s.T().Logf("access_1 last full height: %d", value) + return err == nil && value > targetBlockCount + }, 60*time.Second, 1*time.Second) + + // stop the collection nodes + // this will prevent access_2 from syncing collections from the network + s.Require().NoError(s.net.ContainerByName("collection_1").Pause()) + s.Require().NoError(s.net.ContainerByName("collection_2").Pause()) + + // now start access_2, and wait for it to catch up with collections + s.Require().NoError(access2.Connect()) + + s.Eventually(func() bool { + value, err := s.getLastFullHeight("access_2") + s.T().Logf("access_2 last full height: %d", value) + return err == nil && value > targetBlockCount + }, 60*time.Second, 1*time.Second) +} + +func (s *CollectionIndexingSuite) getLastFullHeight(containerName string) (uint64, error) { + node := s.net.ContainerByName(containerName) + metricsURL := fmt.Sprintf("http://0.0.0.0:%s/metrics", node.Port(testnet.MetricsPort)) + values := s.net.GetMetricFromContainer(s.T(), containerName, metricsURL, lastFullBlockMetric) + + if len(values) == 0 { + return 0, fmt.Errorf("no values found") + } + + return uint64(values[0].GetGauge().GetValue()), nil +} diff --git a/integration/tests/access/consensus_follower_test.go b/integration/tests/access/cohort3/consensus_follower_test.go similarity index 98% rename from integration/tests/access/consensus_follower_test.go rename to integration/tests/access/cohort3/consensus_follower_test.go index 2eed7e46445..26817eeef69 100644 --- a/integration/tests/access/consensus_follower_test.go +++ b/integration/tests/access/cohort3/consensus_follower_test.go @@ -1,4 +1,4 @@ -package access +package cohort3 import ( "context" @@ -10,9 +10,10 @@ import ( "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" + "github.com/onflow/crypto" + "github.com/onflow/flow-go/cmd/bootstrap/utils" "github.com/onflow/flow-go/consensus/hotstuff/model" - "github.com/onflow/flow-go/crypto" consensus_follower "github.com/onflow/flow-go/follower" "github.com/onflow/flow-go/integration/testnet" "github.com/onflow/flow-go/model/flow" @@ -117,7 +118,6 @@ func (s *ConsensusFollowerSuite) TestReceiveBlocks() { func (s *ConsensusFollowerSuite) buildNetworkConfig() { // staked access node - unittest.IdentityFixture() s.stakedID = unittest.IdentifierFixture() stakedConfig := testnet.NewNodeConfig( flow.RoleAccess, @@ -132,7 +132,7 @@ func (s *ConsensusFollowerSuite) buildNetworkConfig() { } consensusConfigs := []func(config *testnet.NodeConfig){ - testnet.WithAdditionalFlag("--block-rate-delay=100ms"), + testnet.WithAdditionalFlag("--cruise-ctl-fallback-proposal-duration=100ms"), testnet.WithAdditionalFlag(fmt.Sprintf("--required-verification-seal-approvals=%d", 1)), testnet.WithAdditionalFlag(fmt.Sprintf("--required-construction-seal-approvals=%d", 1)), testnet.WithLogLevel(zerolog.FatalLevel), diff --git a/integration/tests/access/cohort3/execution_state_sync_test.go b/integration/tests/access/cohort3/execution_state_sync_test.go new file mode 100644 index 00000000000..08cb0d6c8dc --- /dev/null +++ b/integration/tests/access/cohort3/execution_state_sync_test.go @@ -0,0 +1,238 @@ +package cohort3 + +import ( + "context" + "fmt" + "path/filepath" + "testing" + "time" + + "github.com/ipfs/go-datastore" + pebbleds "github.com/ipfs/go-ds-pebble" + sdk "github.com/onflow/flow-go-sdk" + sdkclient "github.com/onflow/flow-go-sdk/access/grpc" + "github.com/onflow/flow/protobuf/go/flow/entities" + "github.com/onflow/flow/protobuf/go/flow/executiondata" + "github.com/rs/zerolog" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + + "github.com/onflow/flow-go/engine/common/rpc/convert" + "github.com/onflow/flow-go/engine/ghost/client" + "github.com/onflow/flow-go/integration/testnet" + "github.com/onflow/flow-go/integration/tests/lib" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module/blobs" + "github.com/onflow/flow-go/module/executiondatasync/execution_data" + "github.com/onflow/flow-go/utils/unittest" +) + +func TestExecutionStateSync(t *testing.T) { + suite.Run(t, new(ExecutionStateSyncSuite)) +} + +type ExecutionStateSyncSuite struct { + suite.Suite + lib.TestnetStateTracker + + log zerolog.Logger + + bridgeID flow.Identifier + ghostID flow.Identifier + observerName string + + // root context for the current test + ctx context.Context + cancel context.CancelFunc + + net *testnet.FlowNetwork +} + +func (s *ExecutionStateSyncSuite) SetupTest() { + s.log = unittest.LoggerForTest(s.Suite.T(), zerolog.InfoLevel) + s.log.Info().Msg("================> SetupTest") + s.ctx, s.cancel = context.WithCancel(context.Background()) + + s.buildNetworkConfig() + + // start the network + s.net.Start(s.ctx) + + s.Track(s.T(), s.ctx, s.Ghost()) +} + +func (s *ExecutionStateSyncSuite) TearDownTest() { + s.log.Info().Msg("================> Start TearDownTest") + s.net.Remove() + s.cancel() + s.log.Info().Msgf("================> Finish TearDownTest") +} + +func (s *ExecutionStateSyncSuite) Ghost() *client.GhostClient { + client, err := s.net.ContainerByID(s.ghostID).GhostClient() + require.NoError(s.T(), err, "could not get ghost client") + return client +} + +func (s *ExecutionStateSyncSuite) buildNetworkConfig() { + // access node + s.bridgeID = unittest.IdentifierFixture() + bridgeANConfig := testnet.NewNodeConfig( + flow.RoleAccess, + testnet.WithID(s.bridgeID), + testnet.WithLogLevel(zerolog.InfoLevel), + testnet.WithAdditionalFlag("--supports-observer=true"), + testnet.WithAdditionalFlag("--execution-data-sync-enabled=true"), + testnet.WithAdditionalFlag(fmt.Sprintf("--execution-data-dir=%s", testnet.DefaultExecutionDataServiceDir)), + testnet.WithAdditionalFlag("--execution-data-retry-delay=1s"), + testnet.WithAdditionalFlagf("--public-network-execution-data-sync-enabled=true"), + ) + + // add the ghost (access) node config + s.ghostID = unittest.IdentifierFixture() + ghostNode := testnet.NewNodeConfig( + flow.RoleAccess, + testnet.WithID(s.ghostID), + testnet.WithLogLevel(zerolog.FatalLevel), + testnet.AsGhost()) + + consensusConfigs := []func(config *testnet.NodeConfig){ + testnet.WithAdditionalFlag("--cruise-ctl-fallback-proposal-duration=100ms"), + testnet.WithAdditionalFlag(fmt.Sprintf("--required-verification-seal-approvals=%d", 1)), + testnet.WithAdditionalFlag(fmt.Sprintf("--required-construction-seal-approvals=%d", 1)), + testnet.WithLogLevel(zerolog.FatalLevel), + } + + net := []testnet.NodeConfig{ + testnet.NewNodeConfig(flow.RoleCollection, testnet.WithLogLevel(zerolog.FatalLevel)), + testnet.NewNodeConfig(flow.RoleCollection, testnet.WithLogLevel(zerolog.FatalLevel)), + testnet.NewNodeConfig(flow.RoleExecution, testnet.WithLogLevel(zerolog.FatalLevel)), + testnet.NewNodeConfig(flow.RoleExecution, testnet.WithLogLevel(zerolog.FatalLevel)), + testnet.NewNodeConfig(flow.RoleConsensus, consensusConfigs...), + testnet.NewNodeConfig(flow.RoleConsensus, consensusConfigs...), + testnet.NewNodeConfig(flow.RoleConsensus, consensusConfigs...), + testnet.NewNodeConfig(flow.RoleVerification, testnet.WithLogLevel(zerolog.FatalLevel)), + bridgeANConfig, + ghostNode, + } + + // add the observer node config + s.observerName = testnet.PrimaryON + observers := []testnet.ObserverConfig{{ + ContainerName: s.observerName, + LogLevel: zerolog.InfoLevel, + AdditionalFlags: []string{ + fmt.Sprintf("--execution-data-dir=%s", testnet.DefaultExecutionDataServiceDir), + "--execution-data-sync-enabled=true", + "--event-query-mode=execution-nodes-only", + }, + }} + + conf := testnet.NewNetworkConfig("execution state sync test", net, testnet.WithObservers(observers...)) + s.net = testnet.PrepareFlowNetwork(s.T(), conf, flow.Localnet) +} + +// TestBadgerDBHappyPath tests that Execution Nodes generate execution data, and Access Nodes are able to +// successfully sync the data to badger DB +func (s *ExecutionStateSyncSuite) TestBadgerDBHappyPath() { + s.executionStateSyncTest() +} + +func (s *ExecutionStateSyncSuite) executionStateSyncTest() { + // Let the network run for this many blocks + runBlocks := uint64(60) + + // We will check that execution data was downloaded for this many blocks + // It has to be less than runBlocks since it's not possible to see which height the AN stopped + // downloading execution data for + checkBlocks := runBlocks / 2 + + // get the first block height + currentFinalized := s.BlockState.HighestFinalizedHeight() + blockA := s.BlockState.WaitForHighestFinalizedProgress(s.T(), currentFinalized) + s.T().Logf("got block height %v ID %v", blockA.Height, blockA.ID()) + + // Loop through checkBlocks and verify the execution data was downloaded correctly + an := s.net.ContainerByName(testnet.PrimaryAN) + anClient, err := an.SDKClient() + require.NoError(s.T(), err, "could not get access node testnet client") + + on := s.net.ContainerByName(s.observerName) + onClient, err := on.SDKClient() + require.NoError(s.T(), err, "could not get observer testnet client") + + ctx, cancel := context.WithTimeout(s.ctx, 5*time.Minute) + defer cancel() + + for i := blockA.Height; i <= blockA.Height+checkBlocks; i++ { + anBED, err := s.executionDataForHeight(ctx, anClient, i) + require.NoError(s.T(), err, "could not get execution data from AN for height %v", i) + + onBED, err := s.executionDataForHeight(ctx, onClient, i) + require.NoError(s.T(), err, "could not get execution data from ON for height %v", i) + + assert.Equal(s.T(), anBED.BlockID, onBED.BlockID) + } +} + +// executionDataForHeight returns the execution data for the given height from the given node +// It retries the request until the data is available or the context is canceled +func (s *ExecutionStateSyncSuite) executionDataForHeight(ctx context.Context, nodeClient *sdkclient.Client, height uint64) (*execution_data.BlockExecutionData, error) { + execDataClient := nodeClient.ExecutionDataRPCClient() + + var header *sdk.BlockHeader + s.Require().NoError(retryNotFound(ctx, 200*time.Millisecond, func() error { + var err error + header, err = nodeClient.GetBlockHeaderByHeight(s.ctx, height) + return err + }), "could not get block header for block %d", height) + + var blockED *execution_data.BlockExecutionData + s.Require().NoError(retryNotFound(ctx, 200*time.Millisecond, func() error { + ed, err := execDataClient.GetExecutionDataByBlockID(s.ctx, &executiondata.GetExecutionDataByBlockIDRequest{ + BlockId: header.ID[:], + EventEncodingVersion: entities.EventEncodingVersion_CCF_V0, + }) + if err != nil { + return err + } + + blockED, err = convert.MessageToBlockExecutionData(ed.GetBlockExecutionData(), flow.Localnet.Chain()) + s.Require().NoError(err, "could not convert execution data") + + return err + }), "could not get execution data for block %d", height) + + return blockED, nil +} + +// retryNotFound retries the given function until it returns an error that is not NotFound or the context is canceled +func retryNotFound(ctx context.Context, delay time.Duration, f func() error) error { + for ctx.Err() == nil { + err := f() + if status.Code(err) == codes.NotFound { + select { + case <-ctx.Done(): + return ctx.Err() + case <-time.After(delay): + } + continue + } + return err + } + return ctx.Err() +} + +func (s *ExecutionStateSyncSuite) nodeExecutionDataStore(node *testnet.Container) execution_data.ExecutionDataStore { + var ds datastore.Batching + var err error + dsPath := filepath.Join(node.ExecutionDataDBPath(), "blobstore") + + ds, err = pebbleds.NewDatastore(dsPath, nil) + require.NoError(s.T(), err, "could not get execution datastore") + + return execution_data.NewExecutionDataStore(blobs.NewBlobstore(ds), execution_data.DefaultSerializer) +} diff --git a/integration/tests/access/cohort3/pebble_execution_state_sync_test.go b/integration/tests/access/cohort3/pebble_execution_state_sync_test.go new file mode 100644 index 00000000000..f10f0bba552 --- /dev/null +++ b/integration/tests/access/cohort3/pebble_execution_state_sync_test.go @@ -0,0 +1,21 @@ +package cohort3 + +import ( + "testing" + + "github.com/stretchr/testify/suite" +) + +func TestPebbleExecutionStateSync(t *testing.T) { + suite.Run(t, new(PebbleExecutionStateSync)) +} + +type PebbleExecutionStateSync struct { + ExecutionStateSyncSuite +} + +// TestPebbleDBHappyPath+ tests that Execution Nodes generate execution data, and Access Nodes are able to +// successfully sync the data to pebble DB +func (s *PebbleExecutionStateSync) TestPebbleDBHappyPath() { + s.executionStateSyncTest() +} diff --git a/integration/tests/access/cohort4/access_store_tx_error_messages_test.go b/integration/tests/access/cohort4/access_store_tx_error_messages_test.go new file mode 100644 index 00000000000..e5278418382 --- /dev/null +++ b/integration/tests/access/cohort4/access_store_tx_error_messages_test.go @@ -0,0 +1,239 @@ +package cohort4 + +import ( + "context" + "fmt" + "testing" + "time" + + "github.com/rs/zerolog" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" + + sdk "github.com/onflow/flow-go-sdk" + sdkcrypto "github.com/onflow/flow-go-sdk/crypto" + "github.com/onflow/flow-go-sdk/templates" + + "github.com/onflow/flow-go/integration/client" + "github.com/onflow/flow-go/integration/convert" + "github.com/onflow/flow-go/integration/testnet" + "github.com/onflow/flow-go/integration/tests/lib" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module/metrics" + "github.com/onflow/flow-go/storage/store" +) + +const maxReceiptHeightMetric = "access_ingestion_max_receipt_height" + +func TestAccessStoreTxErrorMessages(t *testing.T) { + suite.Run(t, new(AccessStoreTxErrorMessagesSuite)) +} + +// AccessStoreTxErrorMessagesSuite tests the access for storing transaction error messages. +type AccessStoreTxErrorMessagesSuite struct { + suite.Suite + + log zerolog.Logger + + // root context for the current test + ctx context.Context + cancel context.CancelFunc + + net *testnet.FlowNetwork + + accessContainerName string +} + +func (s *AccessStoreTxErrorMessagesSuite) TearDownTest() { + s.log.Info().Msg("================> Start TearDownTest") + s.net.Remove() + s.cancel() + s.log.Info().Msg("================> Finish TearDownTest") +} + +// SetupTest sets up the test suite by starting the network. +// The access are started with correct parameters and store transaction error messages. +func (s *AccessStoreTxErrorMessagesSuite) SetupTest() { + defaultAccess := testnet.NewNodeConfig( + flow.RoleAccess, + testnet.WithLogLevel(zerolog.FatalLevel), + ) + + s.accessContainerName = "access_2" + storeTxAccess := testnet.NewNodeConfig( + flow.RoleAccess, + testnet.WithLogLevel(zerolog.InfoLevel), + testnet.WithAdditionalFlag("--store-tx-result-error-messages=true"), + testnet.WithAdditionalFlag("--execution-data-indexing-enabled=true"), + testnet.WithAdditionalFlagf("--execution-state-dir=%s", testnet.DefaultExecutionStateDir), + testnet.WithAdditionalFlagf("--execution-data-dir=%s", testnet.DefaultExecutionDataServiceDir), + testnet.WithMetricsServer(), + ) + + consensusConfigs := []func(config *testnet.NodeConfig){ + // `cruise-ctl-fallback-proposal-duration` is set to 250ms instead to of 100ms + // to purposely slow down the block rate. This is needed since the crypto module + // update providing faster BLS operations. + testnet.WithAdditionalFlag("--cruise-ctl-fallback-proposal-duration=250ms"), + testnet.WithAdditionalFlagf("--required-verification-seal-approvals=%d", 1), + testnet.WithAdditionalFlagf("--required-construction-seal-approvals=%d", 1), + testnet.WithLogLevel(zerolog.FatalLevel), + } + + nodeConfigs := []testnet.NodeConfig{ + testnet.NewNodeConfig(flow.RoleCollection, testnet.WithLogLevel(zerolog.FatalLevel)), + testnet.NewNodeConfig(flow.RoleCollection, testnet.WithLogLevel(zerolog.FatalLevel)), + testnet.NewNodeConfig(flow.RoleExecution, testnet.WithLogLevel(zerolog.FatalLevel)), + testnet.NewNodeConfig(flow.RoleExecution, testnet.WithLogLevel(zerolog.FatalLevel)), + testnet.NewNodeConfig(flow.RoleConsensus, consensusConfigs...), + testnet.NewNodeConfig(flow.RoleConsensus, consensusConfigs...), + testnet.NewNodeConfig(flow.RoleConsensus, consensusConfigs...), + testnet.NewNodeConfig(flow.RoleVerification, testnet.WithLogLevel(zerolog.FatalLevel)), + + defaultAccess, + storeTxAccess, + } + + // prepare the network + conf := testnet.NewNetworkConfig("access_store_tx_error_messages_test", nodeConfigs) + s.net = testnet.PrepareFlowNetwork(s.T(), conf, flow.Localnet) + + // start the network + s.ctx, s.cancel = context.WithCancel(context.Background()) + + s.net.Start(s.ctx) +} + +// TestAccessStoreTxErrorMessages verifies that transaction result error messages +// are stored correctly in the database by sending a transaction, generating an error, +// and checking if the error message is properly stored and retrieved from the database. +func (s *AccessStoreTxErrorMessagesSuite) TestAccessStoreTxErrorMessages() { + ctx, cancel := context.WithTimeout(s.ctx, 60*time.Second) + defer cancel() + + // Create and send a transaction that will result in an error. + txResult := s.createAndSendTxWithTxError() + + client, err := s.net.ContainerByName(s.accessContainerName).TestnetClient() + s.Require().NoError(err) + + // wait until the node has indexed a few blocks past the transaction block height + err = client.WaitUntilIndexed(ctx, txResult.BlockHeight+10) + s.Require().NoError(err) + + // Stop the network containers before checking the results. + s.net.StopContainers() + + txResults := []*sdk.TransactionResult{txResult} + txErrorMessages := s.fetchTxErrorMessages(txResults, s.accessContainerName) + s.verifyTxErrorMessage(txResults, txErrorMessages) +} + +// TestBackfillTxErrorMessages verifies that transaction error messages are backfilled correctly +// by creating a transaction that results in an error, running the backfill command, and checking +// if the error message is stored and retrieved from the database. +func (s *AccessStoreTxErrorMessagesSuite) TestBackfillTxErrorMessages() { + // Create and send a transactions that will result in an error. + transactionCount := 5 + txResults := make([]*sdk.TransactionResult, transactionCount) + for i := 0; i < transactionCount; i++ { + txResults[i] = s.createAndSendTxWithTxError() + } + + serverAddr := fmt.Sprintf("localhost:%s", s.net.ContainerByName(s.accessContainerName).Port(testnet.AdminPort)) + adminClient := client.NewAdminClient(serverAddr) + + startHeight := 1 + endHeight := txResults[len(txResults)-1].BlockHeight // last tx result block height + data := map[string]interface{}{"start-height": startHeight, "endHeight": endHeight} + // executes the backfill command for transaction error messages + _, err := adminClient.RunCommand(context.Background(), "backfill-tx-error-messages", data) + require.NoError(s.T(), err) + + // Stop the network containers before checking the results. + s.net.StopContainers() + + txErrorMessages := s.fetchTxErrorMessages(txResults, s.accessContainerName) + s.verifyTxErrorMessage(txResults, txErrorMessages) +} + +// createAndSendTxWithTxError creates and sends a transaction that will result in an error. +// This function creates a new account, causing an error during execution. +func (s *AccessStoreTxErrorMessagesSuite) createAndSendTxWithTxError() *sdk.TransactionResult { + // prepare environment to create a new account + serviceAccountClient, err := s.net.ContainerByName(testnet.PrimaryAN).TestnetClient() + s.Require().NoError(err) + + latestBlockID, err := serviceAccountClient.GetLatestBlockID(s.ctx) + s.Require().NoError(err) + + // create new account to deploy Counter to + accountPrivateKey := lib.RandomPrivateKey() + + accountKey := sdk.NewAccountKey(). + FromPrivateKey(accountPrivateKey). + SetHashAlgo(sdkcrypto.SHA3_256). + SetWeight(sdk.AccountKeyWeightThreshold) + + serviceAddress := sdk.Address(serviceAccountClient.Chain.ServiceAddress()) + + // Generate the account creation transaction + createAccountTx, err := templates.CreateAccount( + []*sdk.AccountKey{accountKey}, + []templates.Contract{}, serviceAddress) + s.Require().NoError(err) + + // Generate the account creation transaction + createAccountTx. + SetReferenceBlockID(sdk.Identifier(latestBlockID)). + SetProposalKey(serviceAddress, 1, serviceAccountClient.GetAndIncrementSeqNumber()). + SetPayer(serviceAddress). + SetComputeLimit(9999) + + // Sign and send the transaction. + err = serviceAccountClient.SignAndSendTransaction(s.ctx, createAccountTx) + s.Require().NoError(err) + + // Wait for the transaction to be sealed and return the transaction result. + accountCreationTxRes, err := serviceAccountClient.WaitForSealed(s.ctx, createAccountTx.ID()) + s.Require().NoError(err) + + return accountCreationTxRes +} + +// fetchTxErrorMessage retrieves the stored transaction error message for a given transaction result. +func (s *AccessStoreTxErrorMessagesSuite) fetchTxErrorMessages(txResults []*sdk.TransactionResult, containerName string) []*flow.TransactionResultErrorMessage { + accessNode := s.net.ContainerByName(containerName) + anDB, err := accessNode.DB() + require.NoError(s.T(), err, "could not open db") + + metrics := metrics.NewNoopCollector() + anTxErrorMessages := store.NewTransactionResultErrorMessages(metrics, anDB, store.DefaultCacheSize) + + txResultErrorMessages := make([]*flow.TransactionResultErrorMessage, len(txResults)) + for i, txResult := range txResults { + txBlockID := convert.IDFromSDK(txResult.BlockID) + txID := convert.IDFromSDK(txResult.TransactionID) + + errMsgResult, err := anTxErrorMessages.ByBlockIDTransactionID(txBlockID, txID) + s.Require().NoError(err) + + txResultErrorMessages[i] = errMsgResult + } + + return txResultErrorMessages +} + +// verifyTxErrorMessage compares the expected and retrieved error messages to verify accuracy. +func (s *AccessStoreTxErrorMessagesSuite) verifyTxErrorMessage(txResults []*sdk.TransactionResult, errMsgResults []*flow.TransactionResultErrorMessage) { + s.Require().Equal(len(txResults), len(errMsgResults)) + + for i, txResult := range txResults { + expectedTxResultErrorMessage := txResult.Error.Error() + expectedTxID := convert.IDFromSDK(txResult.TransactionID) + + s.Require().Equal(expectedTxID, errMsgResults[i].TransactionID) + s.Require().Equal(expectedTxResultErrorMessage, errMsgResults[i].ErrorMessage) + } + +} diff --git a/integration/tests/access/cohort4/access_test.go b/integration/tests/access/cohort4/access_test.go new file mode 100644 index 00000000000..5f059783f2c --- /dev/null +++ b/integration/tests/access/cohort4/access_test.go @@ -0,0 +1,194 @@ +package cohort4 + +import ( + "context" + "net" + "testing" + "time" + + "github.com/rs/zerolog" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" + "google.golang.org/grpc" + "google.golang.org/grpc/credentials/insecure" + + accessproto "github.com/onflow/flow/protobuf/go/flow/access" + + "github.com/onflow/flow-go/consensus/hotstuff/committees" + "github.com/onflow/flow-go/consensus/hotstuff/signature" + "github.com/onflow/flow-go/engine/common/rpc/convert" + "github.com/onflow/flow-go/integration/testnet" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/utils/unittest" +) + +func TestAccess(t *testing.T) { + suite.Run(t, new(AccessSuite)) +} + +type AccessSuite struct { + suite.Suite + + log zerolog.Logger + + // root context for the current test + ctx context.Context + cancel context.CancelFunc + + net *testnet.FlowNetwork +} + +func (s *AccessSuite) TearDownTest() { + s.log.Info().Msg("================> Start TearDownTest") + s.net.Remove() + s.cancel() + s.log.Info().Msg("================> Finish TearDownTest") +} + +func (s *AccessSuite) SetupTest() { + s.log = unittest.LoggerForTest(s.Suite.T(), zerolog.InfoLevel) + s.log.Info().Msg("================> SetupTest") + defer func() { + s.log.Info().Msg("================> Finish SetupTest") + }() + + nodeConfigs := []testnet.NodeConfig{ + testnet.NewNodeConfig(flow.RoleAccess, testnet.WithLogLevel(zerolog.InfoLevel)), + testnet.NewNodeConfig(flow.RoleCollection, testnet.WithLogLevel(zerolog.FatalLevel)), + testnet.NewNodeConfig(flow.RoleCollection, testnet.WithLogLevel(zerolog.FatalLevel)), + testnet.NewNodeConfig(flow.RoleExecution, testnet.WithLogLevel(zerolog.FatalLevel)), + testnet.NewNodeConfig(flow.RoleExecution, testnet.WithLogLevel(zerolog.FatalLevel)), + testnet.NewNodeConfig(flow.RoleConsensus, testnet.WithLogLevel(zerolog.FatalLevel)), + testnet.NewNodeConfig(flow.RoleConsensus, testnet.WithLogLevel(zerolog.FatalLevel)), + testnet.NewNodeConfig(flow.RoleConsensus, testnet.WithLogLevel(zerolog.FatalLevel)), + testnet.NewNodeConfig(flow.RoleVerification, testnet.WithLogLevel(zerolog.FatalLevel)), + } + + conf := testnet.NewNetworkConfig("access_api_test", nodeConfigs) + s.net = testnet.PrepareFlowNetwork(s.T(), conf, flow.Localnet) + + // start the network + s.T().Logf("starting flow network with docker containers") + s.ctx, s.cancel = context.WithCancel(context.Background()) + + s.net.Start(s.ctx) +} + +func (s *AccessSuite) TestAllTheThings() { + s.runTestAPIsAvailable() + + // run this test last because it stops the container + s.runTestSignerIndicesDecoding() +} + +func (s *AccessSuite) runTestAPIsAvailable() { + s.T().Run("TestHTTPProxyPortOpen", func(t *testing.T) { + httpProxyAddress := s.net.ContainerByName(testnet.PrimaryAN).Addr(testnet.GRPCWebPort) + + conn, err := net.DialTimeout("tcp", httpProxyAddress, 1*time.Second) + require.NoError(s.T(), err, "http proxy port not open on the access node") + + conn.Close() + }) + + s.T().Run("TestAccessConnection", func(t *testing.T) { + ctx, cancel := context.WithTimeout(s.ctx, 1*time.Second) + defer cancel() + + grpcAddress := s.net.ContainerByName(testnet.PrimaryAN).Addr(testnet.GRPCPort) + conn, err := grpc.NewClient(grpcAddress, grpc.WithTransportCredentials(insecure.NewCredentials())) + require.NoError(t, err, "failed to connect to access node") + defer conn.Close() + + client := accessproto.NewAccessAPIClient(conn) + + _, err = client.Ping(ctx, &accessproto.PingRequest{}) + assert.NoError(t, err, "failed to ping access node") + }) +} + +// runTestSignerIndicesDecoding tests that access node uses signer indices' decoder to correctly parse encoded data in blocks. +// This test receives blocks from consensus follower and then requests same blocks from access API and checks if returned data +// matches. +// CAUTION: must be run last if running multiple tests using the same network since it stops the containers. +func (s *AccessSuite) runTestSignerIndicesDecoding() { + container := s.net.ContainerByName(testnet.PrimaryAN) + + ctx, cancel := context.WithCancel(s.ctx) + defer cancel() + + // create access API + grpcAddress := container.Addr(testnet.GRPCPort) + conn, err := grpc.NewClient(grpcAddress, grpc.WithTransportCredentials(insecure.NewCredentials())) + require.NoError(s.T(), err, "failed to connect to access node") + defer conn.Close() + + client := accessproto.NewAccessAPIClient(conn) + + // query latest finalized block. wait until at least two blocks have been finalized. + // otherwise, we may get the root block which does not have any voter indices or its + // immediate child who's parent voter indices are empty. + var latestFinalizedBlock *accessproto.BlockHeaderResponse + require.Eventually(s.T(), func() bool { + latestFinalizedBlock, err = MakeApiRequest(client.GetLatestBlockHeader, ctx, &accessproto.GetLatestBlockHeaderRequest{ + IsSealed: false, + }) + require.NoError(s.T(), err) + return latestFinalizedBlock.GetBlock().Height > 1 + }, 30*time.Second, 100*time.Millisecond) + + // verify we get the same block when querying by ID and height + blockByID, err := MakeApiRequest(client.GetBlockHeaderByID, ctx, &accessproto.GetBlockHeaderByIDRequest{Id: latestFinalizedBlock.Block.Id}) + require.NoError(s.T(), err) + require.Equal(s.T(), latestFinalizedBlock, blockByID, "expect to receive same block by ID") + + blockByHeight, err := MakeApiRequest(client.GetBlockHeaderByHeight, ctx, &accessproto.GetBlockHeaderByHeightRequest{Height: latestFinalizedBlock.Block.Height}) + require.NoError(s.T(), err) + require.Equal(s.T(), latestFinalizedBlock, blockByHeight, "expect to receive same block by height") + + // stop container, so we can access it's state and perform assertions + err = s.net.StopContainerByName(ctx, testnet.PrimaryAN) + require.NoError(s.T(), err) + + err = container.WaitForContainerStopped(5 * time.Second) + require.NoError(s.T(), err) + + // open state to build a block singer decoder + state, err := container.OpenState() + require.NoError(s.T(), err) + + // create committee so we can create decoder to assert validity of data + committee, err := committees.NewConsensusCommittee(state, container.Config.NodeID) + require.NoError(s.T(), err) + blockSignerDecoder := signature.NewBlockSignerDecoder(committee) + + expectedFinalizedBlock, err := state.AtBlockID(flow.HashToID(latestFinalizedBlock.Block.Id)).Head() + require.NoError(s.T(), err) + + // since all blocks should be equal we will execute just check on one of them + require.Equal(s.T(), latestFinalizedBlock.Block.ParentVoterIndices, expectedFinalizedBlock.ParentVoterIndices) + + // check if the response contains valid encoded signer IDs. + msg := latestFinalizedBlock.Block + block, err := convert.MessageToBlockHeader(msg) + require.NoError(s.T(), err) + decodedIdentities, err := blockSignerDecoder.DecodeSignerIDs(block) + require.NoError(s.T(), err) + // transform to assert + var transformed [][]byte + for _, identity := range decodedIdentities { + identity := identity + transformed = append(transformed, identity[:]) + } + assert.ElementsMatch(s.T(), transformed, msg.ParentVoterIds, "response must contain correctly encoded signer IDs") +} + +// MakeApiRequest is a helper function that encapsulates context creation for grpc client call, used to avoid repeated creation +// of new context for each call. +func MakeApiRequest[Func func(context.Context, *Req, ...grpc.CallOption) (*Resp, error), Req any, Resp any](apiCall Func, ctx context.Context, req *Req) (*Resp, error) { + clientCtx, cancel := context.WithTimeout(ctx, 1*time.Second) + resp, err := apiCall(clientCtx, req) + cancel() + return resp, err +} diff --git a/integration/tests/access/cohort4/execution_data_pruning_test.go b/integration/tests/access/cohort4/execution_data_pruning_test.go new file mode 100644 index 00000000000..fe850cec917 --- /dev/null +++ b/integration/tests/access/cohort4/execution_data_pruning_test.go @@ -0,0 +1,276 @@ +package cohort4 + +import ( + "context" + "fmt" + "path/filepath" + "testing" + "time" + + pebbleds "github.com/ipfs/go-ds-pebble" + sdk "github.com/onflow/flow-go-sdk" + "github.com/rs/zerolog" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" + + "github.com/onflow/flow-go/integration/testnet" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module/blobs" + "github.com/onflow/flow-go/module/executiondatasync/execution_data" + "github.com/onflow/flow-go/module/metrics" + "github.com/onflow/flow-go/storage" + "github.com/onflow/flow-go/storage/store" + "github.com/onflow/flow-go/utils/unittest" + + accessproto "github.com/onflow/flow/protobuf/go/flow/access" + "github.com/onflow/flow/protobuf/go/flow/entities" + "github.com/onflow/flow/protobuf/go/flow/executiondata" +) + +func TestExecutionDataPruning(t *testing.T) { + suite.Run(t, new(ExecutionDataPruningSuite)) +} + +type ExecutionDataPruningSuite struct { + suite.Suite + + log zerolog.Logger + + accessNodeName string + observerNodeName string + // threshold defines the maximum height range and how frequently pruning is performed. + threshold uint64 + heightRangeTarget uint64 + pruningInterval string + + // root context for the current test + ctx context.Context + cancel context.CancelFunc + + net *testnet.FlowNetwork +} + +func (s *ExecutionDataPruningSuite) TearDownTest() { + s.log.Info().Msg("================> Start TearDownTest") + s.net.Remove() + s.cancel() + s.log.Info().Msg("================> Finish TearDownTest") +} + +func (s *ExecutionDataPruningSuite) SetupTest() { + s.log = unittest.LoggerForTest(s.Suite.T(), zerolog.InfoLevel) + s.log.Info().Msg("================> SetupTest") + defer func() { + s.log.Info().Msg("================> Finish SetupTest") + }() + + s.threshold = 50 + s.heightRangeTarget = 100 + s.pruningInterval = "10s" + + // access node + s.accessNodeName = testnet.PrimaryAN + accessNodeConfig := testnet.NewNodeConfig( + flow.RoleAccess, + testnet.WithLogLevel(zerolog.FatalLevel), + testnet.WithAdditionalFlag("--supports-observer=true"), + testnet.WithAdditionalFlag("--execution-data-sync-enabled=true"), + testnet.WithAdditionalFlagf("--execution-data-dir=%s", testnet.DefaultExecutionDataServiceDir), + testnet.WithAdditionalFlag("--execution-data-retry-delay=1s"), + testnet.WithAdditionalFlag("--execution-data-indexing-enabled=true"), + testnet.WithAdditionalFlagf("--execution-state-dir=%s", testnet.DefaultExecutionStateDir), + testnet.WithAdditionalFlagf("--public-network-execution-data-sync-enabled=true"), + testnet.WithAdditionalFlagf("--event-query-mode=local-only"), + testnet.WithAdditionalFlagf("--execution-data-height-range-target=%d", s.heightRangeTarget), + testnet.WithAdditionalFlagf("--execution-data-height-range-threshold=%d", s.threshold), + testnet.WithAdditionalFlagf("--execution-data-pruning-interval=%s", s.pruningInterval), + ) + + consensusConfigs := []func(config *testnet.NodeConfig){ + testnet.WithAdditionalFlag("--cruise-ctl-fallback-proposal-duration=400ms"), + testnet.WithAdditionalFlag(fmt.Sprintf("--required-verification-seal-approvals=%d", 1)), + testnet.WithAdditionalFlag(fmt.Sprintf("--required-construction-seal-approvals=%d", 1)), + testnet.WithLogLevel(zerolog.FatalLevel), + } + + nodeConfigs := []testnet.NodeConfig{ + testnet.NewNodeConfig(flow.RoleCollection, testnet.WithLogLevel(zerolog.FatalLevel)), + testnet.NewNodeConfig(flow.RoleCollection, testnet.WithLogLevel(zerolog.FatalLevel)), + testnet.NewNodeConfig(flow.RoleExecution, testnet.WithLogLevel(zerolog.FatalLevel)), + testnet.NewNodeConfig(flow.RoleExecution, testnet.WithLogLevel(zerolog.FatalLevel)), + testnet.NewNodeConfig(flow.RoleConsensus, consensusConfigs...), + testnet.NewNodeConfig(flow.RoleConsensus, consensusConfigs...), + testnet.NewNodeConfig(flow.RoleConsensus, consensusConfigs...), + testnet.NewNodeConfig(flow.RoleVerification, testnet.WithLogLevel(zerolog.FatalLevel)), + accessNodeConfig, // access_1 + } + + // add the observer node config + s.observerNodeName = testnet.PrimaryON + + observers := []testnet.ObserverConfig{{ + ContainerName: s.observerNodeName, + LogLevel: zerolog.InfoLevel, + AdditionalFlags: []string{ + fmt.Sprintf("--execution-data-dir=%s", testnet.DefaultExecutionDataServiceDir), + fmt.Sprintf("--execution-state-dir=%s", testnet.DefaultExecutionStateDir), + "--execution-data-sync-enabled=true", + "--execution-data-indexing-enabled=true", + "--execution-data-retry-delay=1s", + "--event-query-mode=local-only", + "--local-service-api-enabled=true", + fmt.Sprintf("--execution-data-height-range-target=%d", s.heightRangeTarget), + fmt.Sprintf("--execution-data-height-range-threshold=%d", s.threshold), + fmt.Sprintf("--execution-data-pruning-interval=%s", s.pruningInterval), + }, + }} + + conf := testnet.NewNetworkConfig("execution_data_pruning", nodeConfigs, testnet.WithObservers(observers...)) + s.net = testnet.PrepareFlowNetwork(s.T(), conf, flow.Localnet) + + // start the network + s.T().Logf("starting flow network with docker containers") + s.ctx, s.cancel = context.WithCancel(context.Background()) + + s.net.Start(s.ctx) +} + +// TestHappyPath tests the execution data pruning process in a happy path scenario. +// The test follows these steps: +// +// 1. Define a target block height (waitingBlockHeight) for which execution data will be indexed. +// 2. Wait until the execution data for the specified block height is indexed by the observer node. +// - Set up a gRPC client to communicate with the observer node. +// - Ensure the observer node has indexed the execution data up to the target block height by +// subscribing to execution data events and monitoring the event stream. +// +// 3. Stop all Flow network containers to simulate a network shutdown and ensure the indexing process is complete. +// 4. Verify the results of execution data pruning: +// - Check that the Access and Observer Nodes execution data up to the pruning threshold height has been correctly pruned. +func (s *ExecutionDataPruningSuite) TestHappyPath() { + accessNode := s.net.ContainerByName(s.accessNodeName) + observerNode := s.net.ContainerByName(s.observerNodeName) + + waitingBlockHeight := uint64(200) + s.waitUntilExecutionDataForBlockIndexed(waitingBlockHeight) + s.net.StopContainers() + + metrics := metrics.NewNoopCollector() + + // start an execution data service using the Access Node's execution data db + anEds := s.nodeExecutionDataStore(accessNode) + + // setup storage objects needed to get the execution data id + db, err := accessNode.DB() + require.NoError(s.T(), err, "could not open db") + anHeaders := store.NewHeaders(metrics, db) + anResults := store.NewExecutionResults(metrics, db) + + // start an execution data service using the Observer Node's execution data db + + onEds := s.nodeExecutionDataStore(observerNode) + // setup storage objects needed to get the execution data id + onDB, err := observerNode.DB() + require.NoError(s.T(), err, "could not open db") + + onResults := store.NewExecutionResults(metrics, onDB) + + s.checkResults(anHeaders, anResults, onResults, anEds, onEds) +} + +// waitUntilExecutionDataForBlockIndexed waits until the execution data for the specified block height is indexed. +// It subscribes to events from the start height and waits until the execution data for the specified block height is indexed. +func (s *ExecutionDataPruningSuite) waitUntilExecutionDataForBlockIndexed(waitingBlockHeight uint64) { + observerNode := s.net.ContainerByName(s.observerNodeName) + + sdkClient, err := observerNode.SDKClient() + s.Require().NoError(err) + + // creating execution data api client + accessClient := sdkClient.RPCClient() + execClient := sdkClient.ExecutionDataRPCClient() + + // pause until the observer node start indexing blocks, + // getting events from 1-nd block to make sure that 1-st block already indexed, and we can start subscribing + s.Require().Eventually(func() bool { + _, err := accessClient.GetEventsForHeightRange(s.ctx, &accessproto.GetEventsForHeightRangeRequest{ + Type: sdk.EventAccountCreated, + StartHeight: 1, + EndHeight: 1, + EventEncodingVersion: entities.EventEncodingVersion_CCF_V0, + }) + + return err == nil + }, 2*time.Minute, 10*time.Second) + + // subscribe on events till waitingBlockHeight to make sure that execution data for block indexed till waitingBlockHeight and pruner + // pruned execution data at least once + // SubscribeEventsFromStartHeight used as subscription here because we need to make sure that execution data are already indexed + stream, err := execClient.SubscribeEventsFromStartHeight(s.ctx, &executiondata.SubscribeEventsFromStartHeightRequest{ + EventEncodingVersion: entities.EventEncodingVersion_CCF_V0, + Filter: &executiondata.EventFilter{}, + HeartbeatInterval: 1, + StartBlockHeight: 0, + }) + s.Require().NoError(err) + eventsChan, errChan, err := SubscribeHandler(s.ctx, stream.Recv, eventsResponseHandler) + s.Require().NoError(err) + + duration := 3 * time.Minute + for { + select { + case err := <-errChan: + s.Require().NoErrorf(err, "unexpected %s error", s.observerNodeName) + case event := <-eventsChan: + if event.Height >= waitingBlockHeight { + return + } + case <-time.After(duration): + s.T().Fatalf("failed to index to %d block within %s", waitingBlockHeight, duration.String()) + } + } +} + +// checkResults checks the results of execution data pruning to ensure correctness. +func (s *ExecutionDataPruningSuite) checkResults( + headers storage.Headers, + anResults storage.ExecutionResults, + onResults storage.ExecutionResults, + anEds execution_data.ExecutionDataStore, + onEds execution_data.ExecutionDataStore, +) { + // Loop through blocks and verify the execution data was pruned correctly + // execution data till height equal threshold + 1 should be pruned + + // checking execution results from 1 block + startBlockHeight := uint64(1) + for i := startBlockHeight; i <= s.threshold+1; i++ { + header, err := headers.ByHeight(i) + require.NoError(s.T(), err, "%s: could not get header", s.accessNodeName) + + result, err := anResults.ByBlockID(header.ID()) + require.NoError(s.T(), err, "%s: could not get sealed result", s.accessNodeName) + + var blobNotFoundError *execution_data.BlobNotFoundError + + // verify AN execution data + _, err = anEds.Get(s.ctx, result.ExecutionDataID) + require.Error(s.T(), err, "%s: could not prune execution data for height %v", s.accessNodeName, i) + require.ErrorAs(s.T(), err, &blobNotFoundError) + + result, err = onResults.ByID(result.ID()) + require.NoError(s.T(), err, "%s: could not get sealed result from ON`s storage", s.observerNodeName) + + // verify ON execution data + _, err = onEds.Get(s.ctx, result.ExecutionDataID) + require.Error(s.T(), err, "%s: could not prune execution data for height %v", s.observerNodeName, i) + require.ErrorAs(s.T(), err, &blobNotFoundError) + } +} + +func (s *ExecutionDataPruningSuite) nodeExecutionDataStore(node *testnet.Container) execution_data.ExecutionDataStore { + ds, err := pebbleds.NewDatastore(filepath.Join(node.ExecutionDataDBPath(), "blobstore"), nil) + require.NoError(s.T(), err, "could not get execution datastore") + + return execution_data.NewExecutionDataStore(blobs.NewBlobstore(ds), execution_data.DefaultSerializer) +} diff --git a/integration/tests/access/cohort4/grpc_compression_test.go b/integration/tests/access/cohort4/grpc_compression_test.go new file mode 100644 index 00000000000..807262350dd --- /dev/null +++ b/integration/tests/access/cohort4/grpc_compression_test.go @@ -0,0 +1,103 @@ +package cohort4 + +import ( + "context" + "testing" + + "github.com/rs/zerolog" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" + + sdk "github.com/onflow/flow-go-sdk" + + "github.com/onflow/flow-go/integration/testnet" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/utils/unittest" +) + +func TestAccessGRPCCompression(t *testing.T) { + suite.Run(t, new(AccessGRPCSuite)) +} + +type AccessGRPCSuite struct { + suite.Suite + + log zerolog.Logger + + // root context for the current test + ctx context.Context + cancel context.CancelFunc + + net *testnet.FlowNetwork +} + +func (s *AccessGRPCSuite) TearDownTest() { + s.log.Info().Msg("================> Start TearDownTest") + s.net.Remove() + s.cancel() + s.log.Info().Msg("================> Finish TearDownTest") +} + +func (s *AccessGRPCSuite) SetupTest() { + s.log = unittest.LoggerForTest(s.Suite.T(), zerolog.InfoLevel) + s.log.Info().Msg("================> SetupTest") + defer func() { + s.log.Info().Msg("================> Finish SetupTest") + }() + + nodeConfigs := []testnet.NodeConfig{ + testnet.NewNodeConfig(flow.RoleAccess, testnet.WithLogLevel(zerolog.InfoLevel), + testnet.WithAdditionalFlag("--grpc-compressor=gzip")), + } + + // need one execution node + exeConfig := testnet.NewNodeConfig(flow.RoleExecution, testnet.WithLogLevel(zerolog.FatalLevel)) + nodeConfigs = append(nodeConfigs, exeConfig) + + // need one dummy verification node (unused ghost) + verConfig := testnet.NewNodeConfig(flow.RoleVerification, testnet.WithLogLevel(zerolog.FatalLevel), testnet.AsGhost()) + nodeConfigs = append(nodeConfigs, verConfig) + + // need one controllable collection node + collConfig := testnet.NewNodeConfig(flow.RoleCollection, testnet.WithLogLevel(zerolog.FatalLevel), testnet.WithAdditionalFlag("--hotstuff-proposal-duration=100ms")) + nodeConfigs = append(nodeConfigs, collConfig) + + // need three consensus nodes (unused ghost) + for n := 0; n < 3; n++ { + conID := unittest.IdentifierFixture() + nodeConfig := testnet.NewNodeConfig(flow.RoleConsensus, + testnet.WithLogLevel(zerolog.FatalLevel), + testnet.WithID(conID), + testnet.AsGhost()) + nodeConfigs = append(nodeConfigs, nodeConfig) + } + + conf := testnet.NewNetworkConfig("access_api_test", nodeConfigs) + s.net = testnet.PrepareFlowNetwork(s.T(), conf, flow.Localnet) + + // start the network + s.T().Logf("starting flow network with docker containers") + s.ctx, s.cancel = context.WithCancel(context.Background()) + + s.net.Start(s.ctx) +} + +// TestGRPCCompression is a test suite function that tests GRPC compression in access nodes by providing a flag for compressor name. +func (s *AccessGRPCSuite) TestGRPCCompression() { + // Get the access node container and client + accessContainer := s.net.ContainerByName(testnet.PrimaryAN) + accessClient, err := accessContainer.TestnetClient() + require.NoError(s.T(), err, "failed to get access node client") + require.NotNil(s.T(), accessClient, "failed to get access node client") + + lastBlockID, err := accessClient.GetLatestBlockID(s.ctx) + require.NoError(s.T(), err) + + blockIds := []sdk.Identifier{sdk.Identifier(lastBlockID)} + eventType := string(flow.EventAccountCreated) + + events, err := accessClient.GetEventsForBlockIDs(s.ctx, eventType, blockIds) + require.NoError(s.T(), err) + + require.GreaterOrEqual(s.T(), len(events), 1, "expect received event") +} diff --git a/integration/tests/access/cohort4/grpc_state_stream_test.go b/integration/tests/access/cohort4/grpc_state_stream_test.go new file mode 100644 index 00000000000..6c414b4d5e1 --- /dev/null +++ b/integration/tests/access/cohort4/grpc_state_stream_test.go @@ -0,0 +1,522 @@ +package cohort4 + +import ( + "bytes" + "context" + "fmt" + "io" + "log" + "sync" + "testing" + + jsoncdc "github.com/onflow/cadence/encoding/json" + "github.com/rs/zerolog" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" + + "github.com/onflow/flow-go-sdk/test" + + "github.com/onflow/flow-go/engine/access/state_stream/backend" + "github.com/onflow/flow-go/engine/common/rpc/convert" + "github.com/onflow/flow-go/engine/ghost/client" + "github.com/onflow/flow-go/integration/testnet" + "github.com/onflow/flow-go/integration/tests/lib" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module/counters" + "github.com/onflow/flow-go/utils/unittest" + + sdk "github.com/onflow/flow-go-sdk" + + "github.com/onflow/flow/protobuf/go/flow/entities" + "github.com/onflow/flow/protobuf/go/flow/executiondata" +) + +var ( + jsonOptions = []jsoncdc.Option{jsoncdc.WithAllowUnstructuredStaticTypes(true)} +) + +// SubscribeEventsResponse represents the subscription response containing events for a specific block and messageIndex +type SubscribeEventsResponse struct { + backend.EventsResponse + MessageIndex uint64 +} + +func TestGrpcStateStream(t *testing.T) { + suite.Run(t, new(GrpcStateStreamSuite)) +} + +type GrpcStateStreamSuite struct { + suite.Suite + lib.TestnetStateTracker + + log zerolog.Logger + + // root context for the current test + ctx context.Context + cancel context.CancelFunc + + net *testnet.FlowNetwork + + // RPC methods to test + testedRPCs func() []subscribeEventsRPCTest + + ghostID flow.Identifier +} + +func (s *GrpcStateStreamSuite) TearDownTest() { + s.log.Info().Msg("================> Start TearDownTest") + s.net.Remove() + s.cancel() + s.log.Info().Msg("================> Finish TearDownTest") +} + +func (s *GrpcStateStreamSuite) SetupTest() { + s.log = unittest.LoggerForTest(s.Suite.T(), zerolog.InfoLevel) + s.log.Info().Msg("================> SetupTest") + defer func() { + s.log.Info().Msg("================> Finish SetupTest") + }() + + // access node + testANConfig := testnet.NewNodeConfig( + flow.RoleAccess, + testnet.WithLogLevel(zerolog.InfoLevel), + testnet.WithAdditionalFlag("--execution-data-sync-enabled=true"), + testnet.WithAdditionalFlagf("--execution-data-dir=%s", testnet.DefaultExecutionDataServiceDir), + testnet.WithAdditionalFlag("--execution-data-retry-delay=1s"), + testnet.WithAdditionalFlag("--execution-data-indexing-enabled=true"), + testnet.WithAdditionalFlagf("--execution-state-dir=%s", testnet.DefaultExecutionStateDir), + testnet.WithAdditionalFlag("--event-query-mode=local-only"), + testnet.WithAdditionalFlag("--supports-observer=true"), + testnet.WithAdditionalFlagf("--public-network-execution-data-sync-enabled=true"), + ) + controlANConfig := testnet.NewNodeConfig( + flow.RoleAccess, + testnet.WithLogLevel(zerolog.InfoLevel), + testnet.WithAdditionalFlag("--execution-data-sync-enabled=true"), + testnet.WithAdditionalFlagf("--execution-data-dir=%s", testnet.DefaultExecutionDataServiceDir), + testnet.WithAdditionalFlag("--execution-data-retry-delay=1s"), + testnet.WithAdditionalFlag("--execution-data-indexing-enabled=true"), + testnet.WithAdditionalFlagf("--execution-state-dir=%s", testnet.DefaultExecutionStateDir), + testnet.WithAdditionalFlag("--event-query-mode=execution-nodes-only"), + ) + + // add the ghost (access) node config + s.ghostID = unittest.IdentifierFixture() + ghostNode := testnet.NewNodeConfig( + flow.RoleAccess, + testnet.WithID(s.ghostID), + testnet.WithLogLevel(zerolog.FatalLevel), + testnet.AsGhost()) + + consensusConfigs := []func(config *testnet.NodeConfig){ + testnet.WithAdditionalFlag(fmt.Sprintf("--required-verification-seal-approvals=%d", 1)), + testnet.WithAdditionalFlag(fmt.Sprintf("--required-construction-seal-approvals=%d", 1)), + testnet.WithLogLevel(zerolog.FatalLevel), + } + + nodeConfigs := []testnet.NodeConfig{ + testnet.NewNodeConfig(flow.RoleCollection, testnet.WithLogLevel(zerolog.FatalLevel)), + testnet.NewNodeConfig(flow.RoleCollection, testnet.WithLogLevel(zerolog.FatalLevel)), + testnet.NewNodeConfig(flow.RoleExecution, testnet.WithLogLevel(zerolog.FatalLevel)), + testnet.NewNodeConfig(flow.RoleExecution, testnet.WithLogLevel(zerolog.FatalLevel)), + testnet.NewNodeConfig(flow.RoleConsensus, consensusConfigs...), + testnet.NewNodeConfig(flow.RoleConsensus, consensusConfigs...), + testnet.NewNodeConfig(flow.RoleConsensus, consensusConfigs...), + testnet.NewNodeConfig(flow.RoleVerification, testnet.WithLogLevel(zerolog.FatalLevel)), + testANConfig, // access_1 + controlANConfig, // access_2 + ghostNode, // access ghost + } + + // add the observer node config + observers := []testnet.ObserverConfig{{ + ContainerName: testnet.PrimaryON, + LogLevel: zerolog.InfoLevel, + AdditionalFlags: []string{ + fmt.Sprintf("--execution-data-dir=%s", testnet.DefaultExecutionDataServiceDir), + fmt.Sprintf("--execution-state-dir=%s", testnet.DefaultExecutionStateDir), + "--execution-data-sync-enabled=true", + "--event-query-mode=execution-nodes-only", + "--execution-data-indexing-enabled=true", + }, + }} + + conf := testnet.NewNetworkConfig("access_event_streaming_test", nodeConfigs, testnet.WithObservers(observers...)) + s.net = testnet.PrepareFlowNetwork(s.T(), conf, flow.Localnet) + + // start the network + s.T().Logf("starting flow network with docker containers") + s.ctx, s.cancel = context.WithCancel(context.Background()) + + s.testedRPCs = s.getRPCs + + s.net.Start(s.ctx) + s.Track(s.T(), s.ctx, s.Ghost()) +} + +func (s *GrpcStateStreamSuite) Ghost() *client.GhostClient { + client, err := s.net.ContainerByID(s.ghostID).GhostClient() + require.NoError(s.T(), err, "could not get ghost client") + return client +} + +// TestRestEventStreaming tests gRPC event streaming +func (s *GrpcStateStreamSuite) TestHappyPath() { + unittest.SkipUnless(s.T(), unittest.TEST_FLAKY, "flaky tests: https://github.com/onflow/flow-go/issues/5825") + testAN := s.net.ContainerByName(testnet.PrimaryAN) + sdkClientTestAN := getClient(s.T(), testAN) + + controlAN := s.net.ContainerByName("access_2") + sdkClientControlAN := getClient(s.T(), controlAN) + + testON := s.net.ContainerByName(testnet.PrimaryON) + sdkClientTestON := getClient(s.T(), testON) + + // get the first block height + currentFinalized := s.BlockState.HighestFinalizedHeight() + blockA := s.BlockState.WaitForHighestFinalizedProgress(s.T(), currentFinalized) + + // Let the network run for this many blocks + blockCount := uint64(5) + // wait for the requested number of sealed blocks + s.BlockState.WaitForSealedHeight(s.T(), blockA.Height+blockCount) + + txGenerator, err := s.net.ContainerByName(testnet.PrimaryAN).TestnetClient() + s.Require().NoError(err) + + var startValue interface{} + txCount := 10 + + for _, rpc := range s.testedRPCs() { + s.T().Run(rpc.name, func(t *testing.T) { + if rpc.name == "SubscribeEventsFromStartBlockID" { + startValue = convert.IdentifierToMessage(blockA.ID()) + } else { + startValue = blockA.Height + } + + testANRecv := rpc.call(s.ctx, sdkClientTestAN, startValue, &executiondata.EventFilter{}) + testANEvents, testANErrs, err := SubscribeHandler(s.ctx, testANRecv, eventsResponseHandler) + s.Require().NoError(err) + + controlANRecv := rpc.call(s.ctx, sdkClientControlAN, startValue, &executiondata.EventFilter{}) + controlANEvents, controlANErrs, err := SubscribeHandler(s.ctx, controlANRecv, eventsResponseHandler) + s.Require().NoError(err) + + testONRecv := rpc.call(s.ctx, sdkClientTestON, startValue, &executiondata.EventFilter{}) + testONEvents, testONErrs, err := SubscribeHandler(s.ctx, testONRecv, eventsResponseHandler) + s.Require().NoError(err) + + if rpc.generateEvents { + // generate events + go func() { + s.generateEvents(txGenerator, txCount) + }() + } + + has := func(events []flow.Event, eventType flow.EventType) bool { + for _, event := range events { + if event.Type == eventType { + return true + } + } + return false + } + + targetEvent := flow.EventType("flow.AccountCreated") + + foundANTxTestCount := 0 + foundANTxControlCount := 0 + foundONTxCount := 0 + + messageIndex := counters.NewMonotonicCounter(0) + + r := NewResponseTracker(compareEventsResponse, 3) + + for { + select { + case err := <-testANErrs: + s.Require().NoErrorf(err, "unexpected test AN error") + case err := <-controlANErrs: + s.Require().NoErrorf(err, "unexpected control AN error") + case err := <-testONErrs: + s.Require().NoErrorf(err, "unexpected test ON error") + case event := <-testANEvents: + if has(event.Events, targetEvent) { + s.T().Logf("adding access test events: %d %d %v", event.Height, len(event.Events), event.Events) + r.Add(s.T(), event.Height, "access_test", event) + foundANTxTestCount++ + } + case event := <-controlANEvents: + if has(event.Events, targetEvent) { + if ok := messageIndex.Set(event.MessageIndex); !ok { + s.Require().NoErrorf(err, "messageIndex isn`t sequential") + } + s.T().Logf("adding control events: %d %d %v", event.Height, len(event.Events), event.Events) + r.Add(s.T(), event.Height, "access_control", event) + foundANTxControlCount++ + } + case event := <-testONEvents: + if has(event.Events, targetEvent) { + s.T().Logf("adding observer test events: %d %d %v", event.Height, len(event.Events), event.Events) + r.Add(s.T(), event.Height, "observer_test", event) + foundONTxCount++ + } + } + + if foundANTxTestCount >= txCount && + foundONTxCount >= txCount && + foundANTxControlCount >= txCount { + break + } + } + + r.AssertAllResponsesHandled(t, txCount) + }) + } +} + +// generateEvents is a helper function for generating AccountCreated events +func (s *GrpcStateStreamSuite) generateEvents(client *testnet.Client, txCount int) { + refBlockID, err := client.GetLatestBlockID(s.ctx) + s.Require().NoError(err) + + for i := 0; i < txCount; i++ { + accountKey := test.AccountKeyGenerator().New() + address, err := client.CreateAccount(s.ctx, accountKey, sdk.HexToID(refBlockID.String())) + if err != nil { + i-- + continue + } + s.T().Logf("created account: %s", address) + } +} + +type subscribeEventsRPCTest struct { + name string + call func(ctx context.Context, client executiondata.ExecutionDataAPIClient, startValue interface{}, filter *executiondata.EventFilter) func() (*executiondata.SubscribeEventsResponse, error) + generateEvents bool // add ability to integration test generate new events or use old events to decrease running test time +} + +func (s *GrpcStateStreamSuite) getRPCs() []subscribeEventsRPCTest { + return []subscribeEventsRPCTest{ + { + name: "SubscribeEventsFromLatest", + call: func(ctx context.Context, client executiondata.ExecutionDataAPIClient, _ interface{}, filter *executiondata.EventFilter) func() (*executiondata.SubscribeEventsResponse, error) { + stream, err := client.SubscribeEventsFromLatest(ctx, &executiondata.SubscribeEventsFromLatestRequest{ + EventEncodingVersion: entities.EventEncodingVersion_CCF_V0, + Filter: filter, + HeartbeatInterval: 1, + }) + s.Require().NoError(err) + return stream.Recv + }, + generateEvents: true, + }, + { + name: "SubscribeEvents", + call: func(ctx context.Context, client executiondata.ExecutionDataAPIClient, _ interface{}, filter *executiondata.EventFilter) func() (*executiondata.SubscribeEventsResponse, error) { + // Ignore deprecation warning. keeping these tests until endpoint is removed + //nolint:staticcheck + stream, err := client.SubscribeEvents(ctx, &executiondata.SubscribeEventsRequest{ + StartBlockId: convert.IdentifierToMessage(flow.ZeroID), + StartBlockHeight: 0, + EventEncodingVersion: entities.EventEncodingVersion_CCF_V0, + Filter: filter, + HeartbeatInterval: 1, + }) + s.Require().NoError(err) + return stream.Recv + }, + generateEvents: true, + }, + { + name: "SubscribeEventsFromStartBlockID", + call: func(ctx context.Context, client executiondata.ExecutionDataAPIClient, startValue interface{}, filter *executiondata.EventFilter) func() (*executiondata.SubscribeEventsResponse, error) { + stream, err := client.SubscribeEventsFromStartBlockID(ctx, &executiondata.SubscribeEventsFromStartBlockIDRequest{ + StartBlockId: startValue.([]byte), + EventEncodingVersion: entities.EventEncodingVersion_CCF_V0, + Filter: filter, + HeartbeatInterval: 1, + }) + s.Require().NoError(err) + return stream.Recv + }, + generateEvents: false, // use previous events + }, + { + name: "SubscribeEventsFromStartHeight", + call: func(ctx context.Context, client executiondata.ExecutionDataAPIClient, startValue interface{}, filter *executiondata.EventFilter) func() (*executiondata.SubscribeEventsResponse, error) { + stream, err := client.SubscribeEventsFromStartHeight(ctx, &executiondata.SubscribeEventsFromStartHeightRequest{ + StartBlockHeight: startValue.(uint64), + EventEncodingVersion: entities.EventEncodingVersion_CCF_V0, + Filter: filter, + HeartbeatInterval: 1, + }) + s.Require().NoError(err) + return stream.Recv + }, + generateEvents: false, // use previous events + }, + } +} + +// ResponseTracker is a generic tracker for responses. +type ResponseTracker[T any] struct { + r map[uint64]map[string]T + mu sync.RWMutex + compare func(t *testing.T, responses map[uint64]map[string]T, blockHeight uint64) error + checkCount int // actual common count of responses we want to check + responsesCountToCompare int // count of responses that we want to compare with each other +} + +// NewResponseTracker creates a new ResponseTracker. +func NewResponseTracker[T any]( + compare func(t *testing.T, responses map[uint64]map[string]T, blockHeight uint64) error, + responsesCountToCompare int, +) *ResponseTracker[T] { + return &ResponseTracker[T]{ + r: make(map[uint64]map[string]T), + compare: compare, + responsesCountToCompare: responsesCountToCompare, + } +} + +func (r *ResponseTracker[T]) AssertAllResponsesHandled(t *testing.T, expectedCheckCount int) { + assert.Equal(t, expectedCheckCount, r.checkCount) + + // we check if response tracker has some responses which were not checked, but should be checked + hasNotComparedResponses := false + for _, valueMap := range r.r { + if len(valueMap) == r.responsesCountToCompare { + hasNotComparedResponses = true + break + } + } + assert.False(t, hasNotComparedResponses) +} + +func (r *ResponseTracker[T]) Add(t *testing.T, blockHeight uint64, name string, response T) { + r.mu.Lock() + defer r.mu.Unlock() + + if _, ok := r.r[blockHeight]; !ok { + r.r[blockHeight] = make(map[string]T) + } + r.r[blockHeight][name] = response + + if len(r.r[blockHeight]) != r.responsesCountToCompare { + return + } + + r.checkCount += 1 + err := r.compare(t, r.r, blockHeight) + if err != nil { + log.Fatalf("comparison error at block height %d: %v", blockHeight, err) + } + + delete(r.r, blockHeight) +} + +func eventsResponseHandler(msg *executiondata.SubscribeEventsResponse) (*SubscribeEventsResponse, error) { + events, err := convert.MessagesToEvents(msg.GetEvents()) + if err != nil { + return nil, err + } + + return &SubscribeEventsResponse{ + EventsResponse: backend.EventsResponse{ + Height: msg.GetBlockHeight(), + BlockID: convert.MessageToIdentifier(msg.GetBlockId()), + Events: events, + BlockTimestamp: msg.GetBlockTimestamp().AsTime(), + }, + MessageIndex: msg.MessageIndex, + }, nil +} + +func compareEventsResponse(t *testing.T, responses map[uint64]map[string]*SubscribeEventsResponse, blockHeight uint64) error { + + accessControlData := responses[blockHeight]["access_control"] + accessTestData := responses[blockHeight]["access_test"] + observerTestData := responses[blockHeight]["observer_test"] + + // Compare access_control with access_test + compareEvents(t, accessControlData, accessTestData) + + // Compare access_control with observer_test + compareEvents(t, accessControlData, observerTestData) + + return nil +} + +func compareEvents(t *testing.T, controlData, testData *SubscribeEventsResponse) { + require.Equal(t, controlData.BlockID, testData.BlockID) + require.Equal(t, controlData.Height, testData.Height) + require.Equal(t, controlData.BlockTimestamp, testData.BlockTimestamp) + require.Equal(t, controlData.MessageIndex, testData.MessageIndex) + require.Equal(t, len(controlData.Events), len(testData.Events)) + + for i := range controlData.Events { + require.Equal(t, controlData.Events[i].Type, testData.Events[i].Type) + require.Equal(t, controlData.Events[i].TransactionID, testData.Events[i].TransactionID) + require.Equal(t, controlData.Events[i].TransactionIndex, testData.Events[i].TransactionIndex) + require.Equal(t, controlData.Events[i].EventIndex, testData.Events[i].EventIndex) + require.True(t, bytes.Equal(controlData.Events[i].Payload, testData.Events[i].Payload)) + } +} + +func getClient(t *testing.T, node *testnet.Container) executiondata.ExecutionDataAPIClient { + accessClient, err := node.SDKClient() + require.NoError(t, err, "could not get access client") + return accessClient.ExecutionDataRPCClient() +} + +func SubscribeHandler[T any, V any]( + ctx context.Context, + recv func() (T, error), + responseHandler func(T) (V, error), +) (<-chan V, <-chan error, error) { + sub := make(chan V) + errChan := make(chan error) + + sendErr := func(err error) { + select { + case <-ctx.Done(): + case errChan <- err: + } + } + + go func() { + defer close(sub) + defer close(errChan) + + for { + resp, err := recv() + if err != nil { + if err == io.EOF { + return + } + + sendErr(fmt.Errorf("error receiving response: %w", err)) + return + } + + response, err := responseHandler(resp) + if err != nil { + sendErr(fmt.Errorf("error converting response: %w", err)) + return + } + + select { + case <-ctx.Done(): + return + case sub <- response: + } + } + }() + + return sub, errChan, nil +} diff --git a/integration/tests/access/cohort4/grpc_streaming_blocks_test.go b/integration/tests/access/cohort4/grpc_streaming_blocks_test.go new file mode 100644 index 00000000000..769df5fccfd --- /dev/null +++ b/integration/tests/access/cohort4/grpc_streaming_blocks_test.go @@ -0,0 +1,287 @@ +package cohort4 + +import ( + "context" + "fmt" + "testing" + + "github.com/rs/zerolog" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" + + "github.com/onflow/flow-go/engine/common/rpc/convert" + "github.com/onflow/flow-go/engine/ghost/client" + "github.com/onflow/flow-go/integration/testnet" + "github.com/onflow/flow-go/integration/tests/access/common" + "github.com/onflow/flow-go/integration/tests/lib" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/utils/unittest" + + accessproto "github.com/onflow/flow/protobuf/go/flow/access" + "github.com/onflow/flow/protobuf/go/flow/entities" +) + +func TestGrpcBlocksStream(t *testing.T) { + suite.Run(t, new(GrpcBlocksStreamSuite)) +} + +type GrpcBlocksStreamSuite struct { + suite.Suite + lib.TestnetStateTracker + + log zerolog.Logger + + // root context for the current test + ctx context.Context + cancel context.CancelFunc + + net *testnet.FlowNetwork + + // RPC methods to test + testedRPCs func() []subscribeBlocksRPCTest + + ghostID flow.Identifier +} + +func (s *GrpcBlocksStreamSuite) TearDownTest() { + s.log.Info().Msg("================> Start TearDownTest") + s.net.Remove() + s.cancel() + s.log.Info().Msg("================> Finish TearDownTest") +} + +func (s *GrpcBlocksStreamSuite) SetupTest() { + s.log = unittest.LoggerForTest(s.Suite.T(), zerolog.InfoLevel) + s.log.Info().Msg("================> SetupTest") + defer func() { + s.log.Info().Msg("================> Finish SetupTest") + }() + + // access node + accessConfig := testnet.NewNodeConfig( + flow.RoleAccess, + testnet.WithLogLevel(zerolog.InfoLevel), + testnet.WithAdditionalFlag("--execution-data-sync-enabled=true"), + testnet.WithAdditionalFlagf("--execution-data-dir=%s", testnet.DefaultExecutionDataServiceDir), + testnet.WithAdditionalFlag("--execution-data-retry-delay=1s"), + testnet.WithAdditionalFlag("--execution-data-indexing-enabled=true"), + testnet.WithAdditionalFlagf("--execution-state-dir=%s", testnet.DefaultExecutionStateDir), + testnet.WithAdditionalFlag("--event-query-mode=local-only"), + testnet.WithAdditionalFlag("--supports-observer=true"), + testnet.WithAdditionalFlagf("--public-network-execution-data-sync-enabled=true"), + ) + + consensusConfigs := []func(config *testnet.NodeConfig){ + testnet.WithAdditionalFlag(fmt.Sprintf("--required-verification-seal-approvals=%d", 1)), + testnet.WithAdditionalFlag(fmt.Sprintf("--required-construction-seal-approvals=%d", 1)), + testnet.WithLogLevel(zerolog.FatalLevel), + } + + // add the ghost (access) node config + s.ghostID = unittest.IdentifierFixture() + ghostNode := testnet.NewNodeConfig( + flow.RoleAccess, + testnet.WithID(s.ghostID), + testnet.WithLogLevel(zerolog.FatalLevel), + testnet.AsGhost()) + + nodeConfigs := []testnet.NodeConfig{ + testnet.NewNodeConfig(flow.RoleCollection, testnet.WithLogLevel(zerolog.FatalLevel)), + testnet.NewNodeConfig(flow.RoleCollection, testnet.WithLogLevel(zerolog.FatalLevel)), + testnet.NewNodeConfig(flow.RoleExecution, testnet.WithLogLevel(zerolog.FatalLevel)), + testnet.NewNodeConfig(flow.RoleExecution, testnet.WithLogLevel(zerolog.FatalLevel)), + testnet.NewNodeConfig(flow.RoleConsensus, consensusConfigs...), + testnet.NewNodeConfig(flow.RoleConsensus, consensusConfigs...), + testnet.NewNodeConfig(flow.RoleConsensus, consensusConfigs...), + testnet.NewNodeConfig(flow.RoleVerification, testnet.WithLogLevel(zerolog.FatalLevel)), + accessConfig, + ghostNode, // access ghost + } + + // add the observer node config + observers := []testnet.ObserverConfig{{ + ContainerName: testnet.PrimaryON, + LogLevel: zerolog.InfoLevel, + AdditionalFlags: []string{ + fmt.Sprintf("--execution-data-dir=%s", testnet.DefaultExecutionDataServiceDir), + fmt.Sprintf("--execution-state-dir=%s", testnet.DefaultExecutionStateDir), + "--execution-data-sync-enabled=true", + "--event-query-mode=execution-nodes-only", + "--execution-data-indexing-enabled=true", + }, + }} + + conf := testnet.NewNetworkConfig("access_blocks_streaming_test", nodeConfigs, testnet.WithObservers(observers...)) + s.net = testnet.PrepareFlowNetwork(s.T(), conf, flow.Localnet) + + // start the network + s.T().Logf("starting flow network with docker containers") + s.ctx, s.cancel = context.WithCancel(context.Background()) + + s.testedRPCs = s.getRPCs + + s.net.Start(s.ctx) + s.Track(s.T(), s.ctx, s.Ghost()) +} + +func (s *GrpcBlocksStreamSuite) Ghost() *client.GhostClient { + client, err := s.net.ContainerByID(s.ghostID).GhostClient() + require.NoError(s.T(), err, "could not get ghost client") + return client +} + +// TestRestEventStreaming tests gRPC event streaming +func (s *GrpcBlocksStreamSuite) TestHappyPath() { + accessUrl := fmt.Sprintf("localhost:%s", s.net.ContainerByName(testnet.PrimaryAN).Port(testnet.GRPCPort)) + accessClient, err := common.GetAccessAPIClient(accessUrl) + s.Require().NoError(err) + + observerURL := fmt.Sprintf("localhost:%s", s.net.ContainerByName(testnet.PrimaryON).Port(testnet.GRPCPort)) + observerClient, err := common.GetAccessAPIClient(observerURL) + s.Require().NoError(err) + + // get the first block height + currentFinalized := s.BlockState.HighestFinalizedHeight() + blockA := s.BlockState.WaitForHighestFinalizedProgress(s.T(), currentFinalized) + + var startValue interface{} + txCount := 10 + + for _, rpc := range s.testedRPCs() { + s.T().Run(rpc.name, func(t *testing.T) { + if rpc.name == "SubscribeBlocksFromStartBlockID" { + startValue = convert.IdentifierToMessage(blockA.ID()) + } else { + startValue = blockA.Height + } + + accessRecv := rpc.call(s.ctx, accessClient, startValue) + accessBlocks, accessBlockErrs, err := SubscribeHandler(s.ctx, accessRecv, blockResponseHandler) + s.Require().NoError(err) + + observerRecv := rpc.call(s.ctx, observerClient, startValue) + observerBlocks, observerBlockErrs, err := SubscribeHandler(s.ctx, observerRecv, blockResponseHandler) + s.Require().NoError(err) + + foundANTxCount := 0 + foundONTxCount := 0 + + r := NewResponseTracker(compareBlocksResponse, 2) + + for { + select { + case err := <-accessBlockErrs: + s.Require().NoErrorf(err, "unexpected AN error") + case err := <-observerBlockErrs: + s.Require().NoErrorf(err, "unexpected ON error") + case block := <-accessBlocks: + s.T().Logf("AN block received: height: %d", block.Height) + r.Add(s.T(), block.Height, "access", block) + foundANTxCount++ + case block := <-observerBlocks: + s.T().Logf("ON block received: height: %d", block.Height) + s.addObserverBlock(block, r, rpc.name, &foundONTxCount) + } + + if foundANTxCount >= txCount && foundONTxCount >= txCount { + break + } + } + + r.AssertAllResponsesHandled(t, txCount) + }) + } +} + +// addObserverBlock adds a block received from the observer node to the response tracker +// and increments the transaction count for that node. +// +// Parameters: +// - block: The block received from the node. +// - responseTracker: The response tracker to which the block should be added. +// - rpcCallName: The name of the rpc subscription call which is testing. +// - txCount: A pointer to an integer that tracks the number of transactions received from the node. +func (s *GrpcBlocksStreamSuite) addObserverBlock( + block *flow.Block, + responseTracker *ResponseTracker[*flow.Block], + rpcCallName string, + txCount *int, +) { + // the response tracker expects to receive data for the same heights from each node. + // when subscribing to the latest block, the specific start height depends on the node's + // local sealed height, so it may vary. + // check only the responses for ON that are also tracked by AN and compare them + isANResponseExist := len(responseTracker.r[block.Height]) > 0 + if rpcCallName == "SubscribeBlocksFromLatest" && !isANResponseExist { + return + } + + responseTracker.Add(s.T(), block.Height, "observer", block) + *txCount++ +} + +func blockResponseHandler(msg *accessproto.SubscribeBlocksResponse) (*flow.Block, error) { + return convert.MessageToBlock(msg.GetBlock()) +} + +func compareBlocksResponse(t *testing.T, responses map[uint64]map[string]*flow.Block, blockHeight uint64) error { + accessData := responses[blockHeight]["access"] + observerData := responses[blockHeight]["observer"] + + // Compare access with observer + compareBlocks(t, accessData, observerData) + + return nil +} + +func compareBlocks(t *testing.T, accessBlock *flow.Block, observerBlock *flow.Block) { + require.Equal(t, accessBlock.ID(), observerBlock.ID()) + require.Equal(t, accessBlock.Height, observerBlock.Height) + require.Equal(t, accessBlock.Timestamp, observerBlock.Timestamp) + require.Equal(t, accessBlock.Payload.Hash(), observerBlock.Payload.Hash()) +} + +type subscribeBlocksRPCTest struct { + name string + call func(ctx context.Context, client accessproto.AccessAPIClient, startValue interface{}) func() (*accessproto.SubscribeBlocksResponse, error) +} + +func (s *GrpcBlocksStreamSuite) getRPCs() []subscribeBlocksRPCTest { + return []subscribeBlocksRPCTest{ + { + name: "SubscribeBlocksFromLatest", + call: func(ctx context.Context, client accessproto.AccessAPIClient, _ interface{}) func() (*accessproto.SubscribeBlocksResponse, error) { + stream, err := client.SubscribeBlocksFromLatest(ctx, &accessproto.SubscribeBlocksFromLatestRequest{ + BlockStatus: entities.BlockStatus_BLOCK_FINALIZED, + FullBlockResponse: true, + }) + s.Require().NoError(err) + return stream.Recv + }, + }, + { + name: "SubscribeBlocksFromStartBlockID", + call: func(ctx context.Context, client accessproto.AccessAPIClient, startValue interface{}) func() (*accessproto.SubscribeBlocksResponse, error) { + stream, err := client.SubscribeBlocksFromStartBlockID(ctx, &accessproto.SubscribeBlocksFromStartBlockIDRequest{ + StartBlockId: startValue.([]byte), + BlockStatus: entities.BlockStatus_BLOCK_FINALIZED, + FullBlockResponse: true, + }) + s.Require().NoError(err) + return stream.Recv + }, + }, + { + name: "SubscribeBlocksFromStartHeight", + call: func(ctx context.Context, client accessproto.AccessAPIClient, startValue interface{}) func() (*accessproto.SubscribeBlocksResponse, error) { + stream, err := client.SubscribeBlocksFromStartHeight(ctx, &accessproto.SubscribeBlocksFromStartHeightRequest{ + StartBlockHeight: startValue.(uint64), + BlockStatus: entities.BlockStatus_BLOCK_FINALIZED, + FullBlockResponse: true, + }) + s.Require().NoError(err) + return stream.Recv + }, + }, + } +} diff --git a/integration/tests/access/cohort4/rest_state_stream_test.go b/integration/tests/access/cohort4/rest_state_stream_test.go new file mode 100644 index 00000000000..a268d815948 --- /dev/null +++ b/integration/tests/access/cohort4/rest_state_stream_test.go @@ -0,0 +1,237 @@ +package cohort4 + +import ( + "context" + "fmt" + "net/url" + "strings" + "testing" + "time" + + "github.com/gorilla/websocket" + "github.com/rs/zerolog" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" + "google.golang.org/grpc" + "google.golang.org/grpc/credentials/insecure" + + "github.com/onflow/flow-go/engine/access/rest/http/request" + "github.com/onflow/flow-go/engine/access/state_stream/backend" + "github.com/onflow/flow-go/engine/common/rpc/convert" + "github.com/onflow/flow-go/integration/testnet" + "github.com/onflow/flow-go/integration/tests/access/common" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/utils/unittest" + + accessproto "github.com/onflow/flow/protobuf/go/flow/access" +) + +func TestRestStateStream(t *testing.T) { + suite.Run(t, new(RestStateStreamSuite)) +} + +type RestStateStreamSuite struct { + suite.Suite + + log zerolog.Logger + + // root context for the current test + ctx context.Context + cancel context.CancelFunc + + net *testnet.FlowNetwork +} + +func (s *RestStateStreamSuite) TearDownTest() { + s.log.Info().Msg("================> Start TearDownTest") + s.net.Remove() + s.cancel() + s.log.Info().Msg("================> Finish TearDownTest") +} + +func (s *RestStateStreamSuite) SetupTest() { + s.log = unittest.LoggerForTest(s.Suite.T(), zerolog.InfoLevel) + s.log.Info().Msg("================> SetupTest") + defer func() { + s.log.Info().Msg("================> Finish SetupTest") + }() + + // access node + bridgeANConfig := testnet.NewNodeConfig( + flow.RoleAccess, + testnet.WithLogLevel(zerolog.InfoLevel), + testnet.WithAdditionalFlag("--execution-data-sync-enabled=true"), + testnet.WithAdditionalFlagf("--execution-data-dir=%s", testnet.DefaultExecutionDataServiceDir), + testnet.WithAdditionalFlag("--execution-data-retry-delay=1s"), + testnet.WithAdditionalFlag("--execution-data-indexing-enabled=true"), + testnet.WithAdditionalFlagf("--execution-state-dir=%s", testnet.DefaultExecutionStateDir), + ) + + // add the ghost (access) node config + ghostNode := testnet.NewNodeConfig( + flow.RoleAccess, + testnet.WithLogLevel(zerolog.FatalLevel), + testnet.AsGhost()) + + consensusConfigs := []func(config *testnet.NodeConfig){ + testnet.WithAdditionalFlag("--cruise-ctl-fallback-proposal-duration=100ms"), + testnet.WithAdditionalFlag(fmt.Sprintf("--required-verification-seal-approvals=%d", 1)), + testnet.WithAdditionalFlag(fmt.Sprintf("--required-construction-seal-approvals=%d", 1)), + testnet.WithLogLevel(zerolog.FatalLevel), + } + + nodeConfigs := []testnet.NodeConfig{ + testnet.NewNodeConfig(flow.RoleCollection, testnet.WithLogLevel(zerolog.FatalLevel)), + testnet.NewNodeConfig(flow.RoleCollection, testnet.WithLogLevel(zerolog.FatalLevel)), + testnet.NewNodeConfig(flow.RoleExecution, testnet.WithLogLevel(zerolog.FatalLevel)), + testnet.NewNodeConfig(flow.RoleExecution, testnet.WithLogLevel(zerolog.FatalLevel)), + testnet.NewNodeConfig(flow.RoleConsensus, consensusConfigs...), + testnet.NewNodeConfig(flow.RoleConsensus, consensusConfigs...), + testnet.NewNodeConfig(flow.RoleConsensus, consensusConfigs...), + testnet.NewNodeConfig(flow.RoleVerification, testnet.WithLogLevel(zerolog.FatalLevel)), + bridgeANConfig, + ghostNode, + } + + conf := testnet.NewNetworkConfig("access_api_test", nodeConfigs) + s.net = testnet.PrepareFlowNetwork(s.T(), conf, flow.Localnet) + + // start the network + s.T().Logf("starting flow network with docker containers") + s.ctx, s.cancel = context.WithCancel(context.Background()) + + s.net.Start(s.ctx) +} + +// TestRestEventStreaming tests event streaming route on REST +func (s *RestStateStreamSuite) TestRestEventStreaming() { + restAddr := s.net.ContainerByName(testnet.PrimaryAN).Addr(testnet.RESTPort) + + s.T().Run("subscribe events", func(t *testing.T) { + startBlockId := flow.ZeroID + startHeight := uint64(0) + url := getSubscribeEventsRequest(restAddr, startBlockId, startHeight, nil, nil, nil) + + client, err := common.GetWSClient(s.ctx, url) + require.NoError(t, err) + + var receivedEventsResponse []*backend.EventsResponse + + go func() { + time.Sleep(10 * time.Second) + // close connection after 10 seconds + client.Close() + }() + + eventChan := make(chan *backend.EventsResponse) + go func() { + for { + resp := &backend.EventsResponse{} + err := client.ReadJSON(resp) + if err != nil { + if websocket.IsUnexpectedCloseError(err, websocket.CloseGoingAway) { + s.T().Logf("unexpected close error: %v", err) + require.NoError(s.T(), err) + } + close(eventChan) // Close the event channel when the client connection is closed + return + } + eventChan <- resp + } + }() + + // collect received events during 10 seconds + for eventResponse := range eventChan { + receivedEventsResponse = append(receivedEventsResponse, eventResponse) + } + + // check events + s.requireEvents(receivedEventsResponse) + }) +} + +// requireEvents is a helper function that encapsulates logic for comparing received events from rest state streaming and +// events which received from grpc api +func (s *RestStateStreamSuite) requireEvents(receivedEventsResponse []*backend.EventsResponse) { + // make sure there are received events + require.GreaterOrEqual(s.T(), len(receivedEventsResponse), 1, "expect received events") + + grpcCtx, grpcCancel := context.WithCancel(s.ctx) + defer grpcCancel() + + grpcAddr := s.net.ContainerByName(testnet.PrimaryAN).Addr(testnet.GRPCPort) + + grpcConn, err := grpc.DialContext(grpcCtx, grpcAddr, grpc.WithTransportCredentials(insecure.NewCredentials())) + require.NoError(s.T(), err, "failed to connect to access node") + defer grpcConn.Close() + + grpcClient := accessproto.NewAccessAPIClient(grpcConn) + + // Variable to keep track of non-empty event response count + nonEmptyResponseCount := 0 + for _, receivedEventResponse := range receivedEventsResponse { + // Create a map where key is EventType and value is list of events with this EventType + receivedEventMap := make(map[flow.EventType][]flow.Event) + for _, event := range receivedEventResponse.Events { + eventType := event.Type + receivedEventMap[eventType] = append(receivedEventMap[eventType], event) + } + + for eventType, receivedEventList := range receivedEventMap { + // get events by block id and event type + response, err := MakeApiRequest( + grpcClient.GetEventsForBlockIDs, + grpcCtx, + &accessproto.GetEventsForBlockIDsRequest{ + BlockIds: [][]byte{convert.IdentifierToMessage(receivedEventResponse.BlockID)}, + Type: string(eventType), + }, + ) + require.NoError(s.T(), err) + require.Equal(s.T(), 1, len(response.Results), "expect to get 1 result") + + expectedEventsResult := response.Results[0] + require.Equal(s.T(), expectedEventsResult.BlockHeight, receivedEventResponse.Height, "expect the same block height") + require.Equal(s.T(), len(expectedEventsResult.Events), len(receivedEventList), "expect the same count of events: want: %+v, got: %+v", expectedEventsResult.Events, receivedEventList) + + for i, event := range receivedEventList { + require.Equal(s.T(), expectedEventsResult.Events[i].EventIndex, event.EventIndex, "expect the same event index") + require.Equal(s.T(), convert.MessageToIdentifier(expectedEventsResult.Events[i].TransactionId), event.TransactionID, "expect the same transaction id") + } + + // Check if the current response has non-empty events + if len(receivedEventResponse.Events) > 0 { + nonEmptyResponseCount++ + } + } + } + // Ensure that at least one response had non-empty events + require.GreaterOrEqual(s.T(), nonEmptyResponseCount, 1, "expect at least one response with non-empty events") +} + +// getSubscribeEventsRequest is a helper function that creates SubscribeEventsRequest +func getSubscribeEventsRequest(accessAddr string, startBlockId flow.Identifier, startHeight uint64, eventTypes []string, addresses []string, contracts []string) string { + u, _ := url.Parse("http://" + accessAddr + "/v1/subscribe_events") + q := u.Query() + + if startBlockId != flow.ZeroID { + q.Add("start_block_id", startBlockId.String()) + } + + if startHeight != request.EmptyHeight { + q.Add("start_height", fmt.Sprintf("%d", startHeight)) + } + + if len(eventTypes) > 0 { + q.Add("event_types", strings.Join(eventTypes, ",")) + } + if len(addresses) > 0 { + q.Add("addresses", strings.Join(addresses, ",")) + } + if len(contracts) > 0 { + q.Add("contracts", strings.Join(contracts, ",")) + } + + u.RawQuery = q.Encode() + return u.String() +} diff --git a/integration/tests/access/cohort4/websocket_subscriptions_test.go b/integration/tests/access/cohort4/websocket_subscriptions_test.go new file mode 100644 index 00000000000..7ba34508ec5 --- /dev/null +++ b/integration/tests/access/cohort4/websocket_subscriptions_test.go @@ -0,0 +1,1267 @@ +package cohort4 + +import ( + "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "net/http" + "net/url" + "strconv" + "testing" + "time" + + "github.com/gorilla/websocket" + sdk "github.com/onflow/flow-go-sdk" + sdkcrypto "github.com/onflow/flow-go-sdk/crypto" + "github.com/onflow/flow-go-sdk/templates" + "github.com/rs/zerolog" + "github.com/stretchr/testify/suite" + + restcommon "github.com/onflow/flow-go/engine/access/rest/common" + commonmodels "github.com/onflow/flow-go/engine/access/rest/common/models" + "github.com/onflow/flow-go/engine/access/rest/common/parser" + "github.com/onflow/flow-go/engine/access/rest/util" + "github.com/onflow/flow-go/engine/access/rest/websockets" + "github.com/onflow/flow-go/engine/access/rest/websockets/data_providers" + dpmodels "github.com/onflow/flow-go/engine/access/rest/websockets/data_providers/models" + "github.com/onflow/flow-go/engine/access/rest/websockets/models" + "github.com/onflow/flow-go/engine/common/rpc/convert" + "github.com/onflow/flow-go/integration/testnet" + "github.com/onflow/flow-go/integration/tests/access/common" + "github.com/onflow/flow-go/integration/tests/lib" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/utils/unittest" + + accessproto "github.com/onflow/flow/protobuf/go/flow/access" +) + +const InactivityTimeout = 20 +const MaxSubscriptionsPerConnection = 5 + +func TestWebsocketSubscription(t *testing.T) { + suite.Run(t, new(WebsocketSubscriptionSuite)) +} + +type WebsocketSubscriptionSuite struct { + suite.Suite + + log zerolog.Logger + + // root context for the current test + ctx context.Context + cancel context.CancelFunc + + net *testnet.FlowNetwork + + grpcClient accessproto.AccessAPIClient + serviceClient *testnet.Client + restAccessAddress string +} + +func (s *WebsocketSubscriptionSuite) TearDownTest() { + s.log.Info().Msg("================> Start TearDownTest") + s.net.Remove() + s.cancel() + s.log.Info().Msg("================> Finish TearDownTest") +} + +func (s *WebsocketSubscriptionSuite) SetupTest() { + s.log = unittest.LoggerForTest(s.Suite.T(), zerolog.InfoLevel) + s.log.Info().Msg("================> SetupTest") + defer func() { + s.log.Info().Msg("================> Finish SetupTest") + }() + + // access node + bridgeANConfig := testnet.NewNodeConfig( + flow.RoleAccess, + testnet.WithLogLevel(zerolog.ErrorLevel), + testnet.WithAdditionalFlag("--execution-data-sync-enabled=true"), + testnet.WithAdditionalFlagf("--execution-data-dir=%s", testnet.DefaultExecutionDataServiceDir), + testnet.WithAdditionalFlag("--execution-data-retry-delay=1s"), + testnet.WithAdditionalFlag("--execution-data-indexing-enabled=true"), + testnet.WithAdditionalFlagf("--execution-state-dir=%s", testnet.DefaultExecutionStateDir), + testnet.WithAdditionalFlagf("--websocket-inactivity-timeout=%ds", InactivityTimeout), + testnet.WithAdditionalFlagf("--websocket-max-subscriptions-per-connection=%d", MaxSubscriptionsPerConnection), + testnet.WithMetricsServer(), + ) + + // add the ghost (access) node config + ghostNode := testnet.NewNodeConfig( + flow.RoleAccess, + testnet.WithLogLevel(zerolog.FatalLevel), + testnet.AsGhost()) + + consensusConfigs := []func(config *testnet.NodeConfig){ + testnet.WithAdditionalFlag("--cruise-ctl-fallback-proposal-duration=100ms"), + testnet.WithAdditionalFlag(fmt.Sprintf("--required-verification-seal-approvals=%d", 1)), + testnet.WithAdditionalFlag(fmt.Sprintf("--required-construction-seal-approvals=%d", 1)), + testnet.WithLogLevel(zerolog.FatalLevel), + } + + nodeConfigs := []testnet.NodeConfig{ + testnet.NewNodeConfig(flow.RoleCollection, testnet.WithLogLevel(zerolog.FatalLevel)), + testnet.NewNodeConfig(flow.RoleCollection, testnet.WithLogLevel(zerolog.FatalLevel)), + testnet.NewNodeConfig(flow.RoleExecution, testnet.WithLogLevel(zerolog.FatalLevel)), + testnet.NewNodeConfig(flow.RoleExecution, testnet.WithLogLevel(zerolog.FatalLevel)), + testnet.NewNodeConfig(flow.RoleConsensus, consensusConfigs...), + testnet.NewNodeConfig(flow.RoleConsensus, consensusConfigs...), + testnet.NewNodeConfig(flow.RoleConsensus, consensusConfigs...), + testnet.NewNodeConfig(flow.RoleVerification, testnet.WithLogLevel(zerolog.FatalLevel)), + bridgeANConfig, + ghostNode, + } + + conf := testnet.NewNetworkConfig("websockets_subscriptions_test", nodeConfigs) + s.net = testnet.PrepareFlowNetwork(s.T(), conf, flow.Localnet) + + // start the network + s.T().Logf("starting flow network with docker containers") + s.ctx, s.cancel = context.WithCancel(context.Background()) + + s.net.Start(s.ctx) + + sdkClient, err := s.net.ContainerByName(testnet.PrimaryAN).SDKClient() + s.Require().NoError(err) + + s.grpcClient = sdkClient.RPCClient() + + s.serviceClient, err = s.net.ContainerByName(testnet.PrimaryAN).TestnetClient() + s.Require().NoError(err) + + s.restAccessAddress = s.net.ContainerByName(testnet.PrimaryAN).Addr(testnet.RESTPort) + + // pause until the network is progressing + var header *sdk.BlockHeader + s.Require().Eventually(func() bool { + header, err = s.serviceClient.GetLatestSealedBlockHeader(s.ctx) + s.Require().NoError(err) + + return header.Height > 0 + }, 30*time.Second, 1*time.Second) +} + +// TestWebsocketSubscriptions initializes a WebSocket client and runs a suite of WebSocket-related tests. +// +// This function ensures that all WebSocket tests run within a single setup to minimize system initialization time. +// New WebSocket-related tests should be added here to maintain efficiency. +// +// It executes the following test cases: +// - Inactivity tracking +// - Maximum subscriptions per connection +// - Subscription error handling +// - Unsubscription error handling +// - Listing active subscriptions +// - Valid subscription scenarios (happy cases) +// - Subscription multiplexing +// +// The WebSocket client is properly closed after each sub-test execution to avoid resource leaks. +func (s *WebsocketSubscriptionSuite) TestWebsocketSubscriptions() { + // NOTE: To minimize the system setup time for WebSocket tests, + // the setup is performed once, and all tests run as sub-functions. + // When adding a new WebSocket test, please include it here. + s.testInactivityTracker() + s.testMaxSubscriptionsPerConnection() + s.testSubscriptionErrorCases() + s.testUnsubscriptionErrorCases() + s.testListOfSubscriptions() + s.testHappyCases() + s.testSubscriptionMultiplexing() +} + +// TestInactivityTracker tests that the WebSocket connection closes due to inactivity +// after the specified timeout duration. +func (s *WebsocketSubscriptionSuite) testInactivityTracker() { + // Steps: + // 1. Establish a WebSocket connection to the server. + // 2. Start a goroutine to listen for messages from the server. + // 3. Wait for the server to close the connection due to inactivity. + // 4. Validate that the actual inactivity duration is within the expected range. + + inactivityTickerPeriod := InactivityTimeout / 10 // determines the interval at which the inactivity ticker checks for inactivity + expectedMinInactivityDuration := time.Duration(InactivityTimeout+inactivityTickerPeriod) * time.Second + + s.T().Run("no active subscription after connection creation", func(t *testing.T) { + wsClient, err := common.GetWSClient(s.ctx, getWebsocketsUrl(s.restAccessAddress)) + s.Require().NoError(err) + defer func() { s.Require().NoError(wsClient.Close()) }() + + actualInactivityDuration := monitorInactivity(t, wsClient, expectedMinInactivityDuration) + // Verify that the connection does not close before the InactivityTimeout + inactivity ticker period. + s.GreaterOrEqual(actualInactivityDuration, expectedMinInactivityDuration) + }) + + // Steps: + // 1. Establish a WebSocket connection to the server. + // 2. Subscribe to a topic and validate the subscription response. + // 3. Unsubscribe from the topic and validate the unsubscription response. + // 4. Wait for the server to close the connection due to inactivity. + s.T().Run("all active subscriptions unsubscribed", func(t *testing.T) { + // Step 1: Establish WebSocket connection + wsClient, err := common.GetWSClient(s.ctx, getWebsocketsUrl(s.restAccessAddress)) + s.Require().NoError(err) + defer func() { s.Require().NoError(wsClient.Close()) }() + + // Step 2: Subscribe to a topic + subscriptionRequest := models.SubscribeMessageRequest{ + BaseMessageRequest: models.BaseMessageRequest{ + Action: models.SubscribeAction, + SubscriptionID: "events_id", + }, + Topic: data_providers.EventsTopic, + } + + s.Require().NoError(wsClient.WriteJSON(subscriptionRequest)) + + _, baseResponses, _ := s.listenWebSocketResponses( + wsClient, + 5*time.Second, + subscriptionRequest.SubscriptionID, + ) + s.Require().Equal(1, len(baseResponses)) + s.Require().Nil(baseResponses[0].Error) + + // Step 3: Unsubscribe from the topic + unsubscribeRequest := models.UnsubscribeMessageRequest{ + BaseMessageRequest: models.BaseMessageRequest{ + Action: models.UnsubscribeAction, + SubscriptionID: subscriptionRequest.SubscriptionID, + }, + } + + s.Require().NoError(wsClient.WriteJSON(unsubscribeRequest)) + + var response models.BaseMessageResponse + err = wsClient.ReadJSON(&response) + s.Require().NoError(err, "failed to read unsubscribe response") + s.Require().Nil(response.Error) + + // Step 4: Monitor inactivity after unsubscription + actualInactivityDuration := monitorInactivity(s.T(), wsClient, expectedMinInactivityDuration) + // Verify that the connection does not close before the InactivityTimeout + inactivity ticker period. + s.GreaterOrEqual(actualInactivityDuration, expectedMinInactivityDuration) + }) +} + +// testMaxSubscriptionsPerConnection validates the behavior of the WebSocket server +// when the number of subscriptions exceeds the configured maximum limit. +// +// Expected behavior: +// - For the first `MaxSubscriptionsPerConnection` requests, the server should respond with successful subscription messages. +// - On exceeding the subscription limit, the server should return an error response with a message. +func (s *WebsocketSubscriptionSuite) testMaxSubscriptionsPerConnection() { + websocketsUrl := getWebsocketsUrl(s.restAccessAddress) + wsClient, err := common.GetWSClient(s.ctx, websocketsUrl) + s.Require().NoError(err) + + defer func() { s.Require().NoError(wsClient.Close()) }() + + blocksSubscriptionArguments := models.Arguments{"block_status": parser.Finalized} + // Expected error message when exceeding the maximum subscription limit. + expectedErrorMessage := fmt.Sprintf("error creating new subscription: %s", websockets.ErrMaxSubscriptionsReached.Error()) + + // Loop to send subscription requests, including one request exceeding the limit. + for i := 1; i <= MaxSubscriptionsPerConnection+1; i++ { + // Create a subscription message request with a unique ID. + subscriptionToBlocksRequest := s.subscribeMessageRequest( + strconv.Itoa(i), + data_providers.BlocksTopic, + blocksSubscriptionArguments, + ) + + // send blocks subscription message + err := wsClient.WriteJSON(subscriptionToBlocksRequest) + s.Require().NoError(err, "failed to send subscription message") + + // Receive response + _, baseResponses, _ := s.listenWebSocketResponses(wsClient, 2*time.Second, subscriptionToBlocksRequest.SubscriptionID) + s.Require().Equal(1, len(baseResponses)) + subscribeResponse := baseResponses[0] + + if i <= MaxSubscriptionsPerConnection { + s.Require().Nil(subscribeResponse.Error) + } else { + // Validate error response for exceeding the subscription limit. + s.Require().Equal(expectedErrorMessage, subscribeResponse.Error.Message) + s.Require().Equal(http.StatusTooManyRequests, subscribeResponse.Error.Code) + } + } +} + +// monitorInactivity monitors the WebSocket connection for inactivity. +func monitorInactivity(t *testing.T, client *websocket.Conn, timeout time.Duration) time.Duration { + start := time.Now() + errChan := make(chan error, 1) + + go func() { + for { + if _, _, err := client.ReadMessage(); err != nil { + errChan <- err + return + } + } + }() + + select { + case <-time.After(timeout * 2): + t.Fatal("Test timed out waiting for WebSocket closure due to inactivity") + return 0 + case <-errChan: + return time.Since(start) + } +} + +// testSubscriptionErrorCases tests error cases for subscriptions. +func (s *WebsocketSubscriptionSuite) testSubscriptionErrorCases() { + tests := []struct { + name string + message models.SubscribeMessageRequest + expectedErrMsg string + expectedErrCode int + }{ + { + name: "Invalid Subscription ID", + message: s.subscribeMessageRequest("invalid_subscription_id", data_providers.BlocksTopic, models.Arguments{}), // id length > 20 symbols + expectedErrMsg: "error parsing subscription id: subscription ID provided by the client must not exceed 20 characters", + expectedErrCode: http.StatusBadRequest, + }, + { + name: "Invalid Topic", + message: s.subscribeMessageRequest("", "invalid_topic", models.Arguments{}), + expectedErrMsg: "error creating data provider", // Update based on expected error message + expectedErrCode: http.StatusBadRequest, + }, + { + name: "Invalid Arguments", + message: s.subscribeMessageRequest("", data_providers.BlocksTopic, models.Arguments{"invalid_arg": 42}), + expectedErrMsg: "error creating data provider", + expectedErrCode: http.StatusBadRequest, + }, + { + name: "Empty Topic", + message: s.subscribeMessageRequest("", "", models.Arguments{}), + expectedErrMsg: "error creating data provider", + expectedErrCode: http.StatusBadRequest, + }, + } + + wsClient, err := common.GetWSClient(s.ctx, getWebsocketsUrl(s.restAccessAddress)) + s.Require().NoError(err) + defer func() { s.Require().NoError(wsClient.Close()) }() + + for _, tt := range tests { + s.Run(tt.name, func() { + // Send subscription message + err := wsClient.WriteJSON(tt.message) + s.Require().NoError(err, "failed to send subscription message") + + // Receive response + var response models.BaseMessageResponse + err = wsClient.ReadJSON(&response) + s.Require().NoError(err, "failed to read subscription response") + + // Validate response + s.Require().Equal(models.SubscribeAction, response.Action) + s.Require().NotNil(response.Error) + s.Contains(response.Error.Message, tt.expectedErrMsg) + s.Require().Equal(tt.expectedErrCode, response.Error.Code) + }) + } +} + +// testUnsubscriptionErrorCases tests error cases for unsubscriptions. +func (s *WebsocketSubscriptionSuite) testUnsubscriptionErrorCases() { + tests := []struct { + name string + message models.UnsubscribeMessageRequest + expectedErrMsg string + expectedErrCode int + }{ + { + name: "Invalid Subscription ID", + message: s.unsubscribeMessageRequest("invalid_subscription_id"), + expectedErrMsg: "error parsing subscription id: subscription ID provided by the client must not exceed 20 characters", // id length > 20 symbols + expectedErrCode: http.StatusBadRequest, + }, + { + name: "Non-Existent Subscription ID", + message: s.unsubscribeMessageRequest("non_existent_id"), + expectedErrMsg: "subscription not found", // not associated with an active subscription + expectedErrCode: http.StatusNotFound, + }, + { + name: "Empty Subscription ID", + message: s.unsubscribeMessageRequest(""), + expectedErrMsg: "error parsing subscription id: subscription ID provided by the client must not be empty", + expectedErrCode: http.StatusBadRequest, + }, + } + + wsClient, err := common.GetWSClient(s.ctx, getWebsocketsUrl(s.restAccessAddress)) + s.Require().NoError(err) + defer func() { s.Require().NoError(wsClient.Close()) }() + + for _, tt := range tests { + s.Run(tt.name, func() { + // Send unsubscription message + err := wsClient.WriteJSON(tt.message) + s.Require().NoError(err, "failed to send unsubscription message") + + // Receive response + var response models.BaseMessageResponse + err = wsClient.ReadJSON(&response) + s.Require().NoError(err, "failed to read unsubscription response") + + // Validate response + s.Require().Equal(models.UnsubscribeAction, response.Action) + s.Require().NotNil(response.Error) + s.Contains(response.Error.Message, tt.expectedErrMsg) + s.Require().Equal(tt.expectedErrCode, response.Error.Code) + }) + } +} + +// testListOfSubscriptions tests the websocket request for the list of active subscription and its response. +func (s *WebsocketSubscriptionSuite) testListOfSubscriptions() { + wsClient, err := common.GetWSClient(s.ctx, getWebsocketsUrl(s.restAccessAddress)) + s.Require().NoError(err) + defer func() { s.Require().NoError(wsClient.Close()) }() + + // 1. Create blocks subscription request message + blocksSubscriptionID := "blocks_id" + blocksSubscriptionArguments := models.Arguments{"block_status": parser.Finalized} + subscriptionToBlocksRequest := s.subscribeMessageRequest( + blocksSubscriptionID, + data_providers.BlocksTopic, + blocksSubscriptionArguments, + ) + // send blocks subscription message + s.Require().NoError(wsClient.WriteJSON(subscriptionToBlocksRequest)) + + // verify success subscribe response + _, baseResponses, _ := s.listenWebSocketResponses(wsClient, 1*time.Second, blocksSubscriptionID) + s.Require().Equal(1, len(baseResponses)) + s.Require().Nil(baseResponses[0].Error) + + // 2. Create block headers subscription request message + blockHeadersSubscriptionID := "block_headers_id" + blockHeadersSubscriptionArguments := models.Arguments{"block_status": parser.Finalized} + subscriptionToBlockHeadersRequest := s.subscribeMessageRequest( + blockHeadersSubscriptionID, + data_providers.BlockHeadersTopic, + blockHeadersSubscriptionArguments, + ) + // send block headers subscription message + s.Require().NoError(wsClient.WriteJSON(subscriptionToBlockHeadersRequest)) + + // verify success subscribe response + _, baseResponses, _ = s.listenWebSocketResponses(wsClient, 1*time.Second, blockHeadersSubscriptionID) + s.Require().Equal(1, len(baseResponses)) + s.Require().Nil(baseResponses[0].Error) + + // 3. Create list of subscription request message + listOfSubscriptionRequest := models.ListSubscriptionsMessageRequest{ + BaseMessageRequest: models.BaseMessageRequest{ + Action: models.ListSubscriptionsAction, + }, + } + // send list of subscription message + s.Require().NoError(wsClient.WriteJSON(listOfSubscriptionRequest)) + + _, _, responses := s.listenWebSocketResponses(wsClient, 1*time.Second, "") + + // validate list of active subscriptions response + s.Require().Equal(1, len(responses)) + listOfSubscriptionResponse := responses[0] + expectedSubscriptions := []*models.SubscriptionEntry{ + { + SubscriptionID: blocksSubscriptionID, + Topic: data_providers.BlocksTopic, + Arguments: blocksSubscriptionArguments, + }, + { + SubscriptionID: blockHeadersSubscriptionID, + Topic: data_providers.BlockHeadersTopic, + Arguments: blockHeadersSubscriptionArguments, + }, + } + s.Require().Len(listOfSubscriptionResponse.Subscriptions, len(expectedSubscriptions)) + + for i, expected := range expectedSubscriptions { + actual := listOfSubscriptionResponse.Subscriptions[i] + s.Require().Equal(expected.SubscriptionID, actual.SubscriptionID) + for key, value := range expected.Arguments { + s.Require().Equal(value, actual.Arguments[key]) + } + } +} + +// testHappyCases tests various scenarios for websocket subscriptions including +// streaming blocks, block headers, block digests, events, account statuses, +// and transaction statuses. +func (s *WebsocketSubscriptionSuite) testHappyCases() { + tests := []struct { + name string + topic string + prepareArguments func() models.Arguments + listenSubscriptionResponseDuration time.Duration + testUnsubscribe bool + }{ + { + name: "Blocks streaming", + topic: data_providers.BlocksTopic, + prepareArguments: func() models.Arguments { + return models.Arguments{"block_status": parser.Finalized} + }, + listenSubscriptionResponseDuration: 5 * time.Second, + testUnsubscribe: true, + }, + { + name: "Block headers streaming", + topic: data_providers.BlockHeadersTopic, + prepareArguments: func() models.Arguments { + return models.Arguments{"block_status": parser.Finalized} + }, + listenSubscriptionResponseDuration: 5 * time.Second, + testUnsubscribe: true, + }, + { + name: "Block digests streaming", + topic: data_providers.BlockDigestsTopic, + prepareArguments: func() models.Arguments { + return models.Arguments{"block_status": parser.Finalized} + }, + listenSubscriptionResponseDuration: 5 * time.Second, + testUnsubscribe: true, + }, + { + name: "Events streaming", + topic: data_providers.EventsTopic, + prepareArguments: func() models.Arguments { + return models.Arguments{} + }, + listenSubscriptionResponseDuration: 5 * time.Second, + testUnsubscribe: true, + }, + { + name: "Account statuses streaming", + topic: data_providers.AccountStatusesTopic, + prepareArguments: func() models.Arguments { + tx := s.createAccountTx() + err := s.serviceClient.SendTransaction(s.ctx, tx) + s.Require().NoError(err) + s.T().Logf("txId %v", flow.Identifier(tx.ID())) + + return models.Arguments{ + "event_types": []string{"flow.AccountCreated", "flow.AccountKeyAdded"}, + } + }, + listenSubscriptionResponseDuration: 10 * time.Second, + testUnsubscribe: true, + }, + { + name: "Transaction statuses streaming", + topic: data_providers.TransactionStatusesTopic, + prepareArguments: func() models.Arguments { + tx := s.createAccountTx() + + // Send the transaction + err := s.serviceClient.SendTransaction(s.ctx, tx) + s.Require().NoError(err) + s.T().Logf("txId %v", flow.Identifier(tx.ID())) + + return models.Arguments{ + "tx_id": tx.ID().String(), + } + }, + listenSubscriptionResponseDuration: 10 * time.Second, + testUnsubscribe: false, + }, + { + name: "Send and subscribe to transaction statuses", + topic: data_providers.SendAndGetTransactionStatusesTopic, + prepareArguments: func() models.Arguments { + tx := s.createAccountTx() + + convertToProposalKey := func(key sdk.ProposalKey) commonmodels.ProposalKey { + return commonmodels.ProposalKey{ + Address: flow.Address(key.Address).String(), + KeyIndex: strconv.FormatUint(uint64(key.KeyIndex), 10), + SequenceNumber: strconv.FormatUint(key.SequenceNumber, 10), + } + } + + convertToArguments := func(arguments [][]byte) []string { + wsArguments := make([]string, len(arguments)) + for i, arg := range arguments { + wsArguments[i] = util.ToBase64(arg) + } + + return wsArguments + } + + convertToAuthorizers := func(authorizers []sdk.Address) []string { + wsAuthorizers := make([]string, len(authorizers)) + for i, authorizer := range authorizers { + wsAuthorizers[i] = authorizer.String() + } + + return wsAuthorizers + } + + convertToSig := func(sigs []sdk.TransactionSignature) []commonmodels.TransactionSignature { + wsSigs := make([]commonmodels.TransactionSignature, len(sigs)) + for i, sig := range sigs { + wsSigs[i] = commonmodels.TransactionSignature{ + Address: sig.Address.String(), + KeyIndex: strconv.FormatUint(uint64(sig.KeyIndex), 10), + Signature: util.ToBase64(sig.Signature), + } + } + + return wsSigs + } + return models.Arguments{ + "script": util.ToBase64(tx.Script), + "arguments": convertToArguments(tx.Arguments), + "reference_block_id": tx.ReferenceBlockID.String(), + "gas_limit": strconv.FormatUint(tx.GasLimit, 10), + "payer": tx.Payer.String(), + "proposal_key": convertToProposalKey(tx.ProposalKey), + "authorizers": convertToAuthorizers(tx.Authorizers), + "payload_signatures": convertToSig(tx.PayloadSignatures), + "envelope_signatures": convertToSig(tx.EnvelopeSignatures), + } + }, + listenSubscriptionResponseDuration: 10 * time.Second, + testUnsubscribe: false, + }, + } + + for _, tt := range tests { + // This test cases handles the lifecycle of a websocket connection for a specific subscription, + // including sending a subscription and unsubscription requests, listening for incoming responses, and validating + // them using a provided validation function. + s.Run(tt.name, func() { + // Step 1: Establish a WebSocket connection + wsClient, err := common.GetWSClient(s.ctx, getWebsocketsUrl(s.restAccessAddress)) + s.Require().NoError(err) + defer func() { s.Require().NoError(wsClient.Close()) }() + + // Step 2: Create and send the subscription request + subscriptionRequest := s.subscribeMessageRequest( + "dummy_id", + tt.topic, + tt.prepareArguments(), + ) + s.Require().NoError(wsClient.WriteJSON(subscriptionRequest)) + + // Step 3: Listen for WebSocket responses for the specified duration + responses, baseMessageResponses, _ := s.listenWebSocketResponses( + wsClient, + tt.listenSubscriptionResponseDuration, + subscriptionRequest.SubscriptionID, + ) + + // Step 4: Validate the subscription response + s.Require().Equal(1, len(baseMessageResponses), "expected one subscription response") + s.Require().Nil(baseMessageResponses[0].Error) + + // Step 5: Use the provided validation function to check received responses + s.validate( + subscriptionRequest.SubscriptionID, + subscriptionRequest.Topic, + responses, + ) + + // Step 6: Optionally unsubscribe from the topic + if tt.testUnsubscribe { + // Create an unsubscription request + unsubscriptionRequest := s.unsubscribeMessageRequest(subscriptionRequest.SubscriptionID) + + // Send the unsubscription request to the WebSocket server + s.Require().NoError(wsClient.WriteJSON(unsubscriptionRequest)) + + // Step 6.1: Read and validate the unsubscription response + var response models.BaseMessageResponse + err := wsClient.ReadJSON(&response) + s.Require().NoError(err, "failed to read unsubscription response") + s.Require().Nil(response.Error) + } + }) + } +} + +// testSubscriptionMultiplexing verifies that when subscribing to multiple channels simultaneously, +// all expected messages are received correctly, ensuring subscription multiplexing works as expected. +func (s *WebsocketSubscriptionSuite) testSubscriptionMultiplexing() { + // Define the list of subscriptions with topic names and arguments required for each topic. + subscriptions := []struct { + name string + topic string + prepareArguments func() models.Arguments + }{ + { + name: "Blocks streaming", + topic: data_providers.BlocksTopic, + prepareArguments: func() models.Arguments { + return models.Arguments{"block_status": parser.Finalized} + }, + }, + { + name: "Block headers streaming", + topic: data_providers.BlockHeadersTopic, + prepareArguments: func() models.Arguments { + return models.Arguments{"block_status": parser.Finalized} + }, + }, + { + name: "Block digests streaming", + topic: data_providers.BlockDigestsTopic, + prepareArguments: func() models.Arguments { + return models.Arguments{"block_status": parser.Finalized} + }, + }, + } + + // Step 1: Establish a WebSocket connection to the server. + wsClient, err := common.GetWSClient(s.ctx, getWebsocketsUrl(s.restAccessAddress)) + s.Require().NoError(err) + defer func() { s.Require().NoError(wsClient.Close()) }() + + // Step 2: Subscribe to all topics and handle the responses + subscriptionRequests := make(map[string]models.SubscribeMessageRequest) + subscribeResponses := make([]models.SubscribeMessageResponse, 0) + unsubscribeResponses := make([]models.UnsubscribeMessageResponse, 0) + messageBuckets := make(map[string][]dpmodels.BaseDataProvidersResponse) + + parseResponse := func(t *testing.T, msg json.RawMessage) (string, interface{}) { + var message models.BaseMessageResponse + err := json.Unmarshal(msg, &message) + s.Require().NoError(err, "failed to unmarshal message") + + switch message.Action { + case models.SubscribeAction: + var m models.SubscribeMessageResponse + err = json.Unmarshal(msg, &m) + s.Require().NoError(err, "failed to unmarshal subscribe message") + return message.SubscriptionID, m + + case models.UnsubscribeAction: + var m models.UnsubscribeMessageResponse + err = json.Unmarshal(msg, &m) + s.Require().NoError(err, "failed to unmarshal unsubscribe message") + return message.SubscriptionID, m + + default: + var m dpmodels.BaseDataProvidersResponse + err = json.Unmarshal(msg, &m) + s.Require().NoError(err, "failed to unmarshal unsubscribe message") + return message.SubscriptionID, m + } + } + + // Step 2. Launch a router to collect messages from the WebSocket server into per subscription buckets + // will shutdown once all subscriptions are done + routerStopped := make(chan struct{}) + go func() { + defer close(routerStopped) + + for { + var rawMessage json.RawMessage + err := wsClient.ReadJSON(&rawMessage) + s.Require().NoError(err) + + subID, message := parseResponse(s.T(), rawMessage) + + switch v := message.(type) { + case models.SubscribeMessageResponse: + subscribeResponses = append(subscribeResponses, v) + case models.UnsubscribeMessageResponse: + unsubscribeResponses = append(unsubscribeResponses, v) + case dpmodels.BaseDataProvidersResponse: + messageBuckets[subID] = append(messageBuckets[subID], v) + default: + s.Failf("unexpected message type", "got type: %T: %+v", message, message) + } + + // break out of router once the last expected unsubscribe message is received + if len(unsubscribeResponses) == len(subscriptions) { + return + } + } + }() + + // Step 3: Subscribe to all topics and handle the responses + for i, sub := range subscriptions { + subId := fmt.Sprintf("sub_%d", i+1) // Generate a unique subscription ID for each subscription. + subscriptionRequest := s.subscribeMessageRequest(subId, sub.topic, sub.prepareArguments()) + + // Send the subscription request. + s.Require().NoError(wsClient.WriteJSON(subscriptionRequest)) + subscriptionRequests[subId] = subscriptionRequest + } + + // Step 4. Unsubscribe from all topics after a short delay. + time.Sleep(time.Second) + for _, sub := range subscriptionRequests { + // Send the unsubscription request. + unsubscriptionRequest := s.unsubscribeMessageRequest(sub.SubscriptionID) + s.Require().NoError(wsClient.WriteJSON(unsubscriptionRequest)) + } + + unittest.RequireCloseBefore(s.T(), routerStopped, 5*time.Second, "timed out waiting for router to stop") + + // Step 5: Validate the collected messages to ensure they are received for all active subscriptions. + + s.Require().Len(subscribeResponses, len(subscriptions), "Missing subscribe messages: have: %+v", subscribeResponses) + s.Require().Len(unsubscribeResponses, len(subscriptions), "Missing unsubscribe messages: have: %+v", unsubscribeResponses) + + blockResponses := make(map[string]int) + + for subID, responses := range messageBuckets { + s.Require().NotEmpty(responses, "Expected at least 1 messages for subscription ID: %s", subID) + s.validate(subID, subscriptionRequests[subID].Topic, responses) + + for _, response := range responses { + payloadRaw := s.validateBaseDataProvidersResponse(response.SubscriptionID, response.Topic, response) + + switch response.Topic { + case data_providers.BlockDigestsTopic: + var payload dpmodels.BlockDigest + err := restcommon.ParseBody(bytes.NewReader(payloadRaw), &payload) + s.Require().NoError(err) + blockResponses[payload.Height]++ + case data_providers.BlockHeadersTopic: + var payload commonmodels.BlockHeader + err := restcommon.ParseBody(bytes.NewReader(payloadRaw), &payload) + s.Require().NoError(err) + blockResponses[payload.Height]++ + case data_providers.BlocksTopic: + var payload commonmodels.Block + err := restcommon.ParseBody(bytes.NewReader(payloadRaw), &payload) + s.Require().NoError(err) + blockResponses[payload.Header.Height]++ + default: + s.Failf("unexpected message topic", "got: %s", response.Topic) + } + } + } + + for height, count := range blockResponses { + s.Assert().Equalf(len(subscriptions), count, "Expected %d responses for block height %s, but got: %d", len(subscriptions), height, count) + } +} + +// validate checks if the received responses for a given subscription ID and topic +// match the expected data format and correctness. +// +// It dispatches validation to specific topic handlers based on the topic type. +// +// Parameters: +// - subscriptionId: The unique identifier of the WebSocket subscription. +// - topic: The topic associated with the subscription (e.g., blocks, events, transactions). +// - responses: A slice of BaseDataProvidersResponse containing the received data. +// +// If the topic is invalid or unsupported, it logs a warning instead of failing the test. +func (s *WebsocketSubscriptionSuite) validate(subscriptionId string, topic string, responses []dpmodels.BaseDataProvidersResponse) { + switch topic { + case data_providers.BlocksTopic: + s.validateBlocks(subscriptionId, topic, responses) + case data_providers.BlockHeadersTopic: + s.validateBlockHeaders(subscriptionId, topic, responses) + case data_providers.BlockDigestsTopic: + s.validateBlockDigests(subscriptionId, topic, responses) + case data_providers.EventsTopic: + s.validateEvents(subscriptionId, topic, responses) + case data_providers.AccountStatusesTopic: + s.validateAccountStatuses(subscriptionId, topic, responses) + case data_providers.TransactionStatusesTopic, data_providers.SendAndGetTransactionStatusesTopic: + s.validateTransactionStatuses(subscriptionId, topic, responses) + default: + s.T().Logf("invalid topic to validate %s", topic) + } +} + +// validateBlocks validates the received block responses against gRPC responses. +func (s *WebsocketSubscriptionSuite) validateBlocks( + expectedSubscriptionID string, + expectedTopic string, + receivedResponses []dpmodels.BaseDataProvidersResponse, +) { + s.Require().NotEmpty(receivedResponses, "expected received block headers") + + for _, response := range receivedResponses { + payloadRaw := s.validateBaseDataProvidersResponse(expectedSubscriptionID, expectedTopic, response) + + var payload commonmodels.Block + err := restcommon.ParseBody(bytes.NewReader(payloadRaw), &payload) + s.Require().NoError(err) + + id, err := flow.HexStringToIdentifier(payload.Header.Id) + s.Require().NoError(err) + + grpcResponse, err := s.grpcClient.GetBlockByID(s.ctx, &accessproto.GetBlockByIDRequest{ + Id: convert.IdentifierToMessage(id), + }) + s.Require().NoError(err) + + grpcExpected := grpcResponse.Block + s.Require().Equal(convert.MessageToIdentifier(grpcExpected.Id).String(), payload.Header.Id) + s.Require().Equal(util.FromUint(grpcExpected.Height), payload.Header.Height) + s.Require().Equal(grpcExpected.Timestamp.AsTime(), payload.Header.Timestamp) + s.Require().Equal(convert.MessageToIdentifier(grpcExpected.ParentId).String(), payload.Header.ParentId) + } +} + +// validateBlockHeaders validates the received block header responses against gRPC responses. +func (s *WebsocketSubscriptionSuite) validateBlockHeaders( + expectedSubscriptionID string, + expectedTopic string, + receivedResponses []dpmodels.BaseDataProvidersResponse, +) { + s.Require().NotEmpty(receivedResponses, "expected received block headers") + + for _, response := range receivedResponses { + payloadRaw := s.validateBaseDataProvidersResponse(expectedSubscriptionID, expectedTopic, response) + + var payload commonmodels.BlockHeader + err := restcommon.ParseBody(bytes.NewReader(payloadRaw), &payload) + s.Require().NoError(err) + + id, err := flow.HexStringToIdentifier(payload.Id) + s.Require().NoError(err) + + grpcResponse, err := s.grpcClient.GetBlockHeaderByID(s.ctx, &accessproto.GetBlockHeaderByIDRequest{ + Id: convert.IdentifierToMessage(id), + }) + s.Require().NoError(err) + + grpcExpected := grpcResponse.Block + + s.Require().Equal(convert.MessageToIdentifier(grpcExpected.Id).String(), payload.Id) + s.Require().Equal(util.FromUint(grpcExpected.Height), payload.Height) + s.Require().Equal(grpcExpected.Timestamp.AsTime(), payload.Timestamp) + s.Require().Equal(convert.MessageToIdentifier(grpcExpected.ParentId).String(), payload.ParentId) + } +} + +// validateBlockDigests validates the received block digest responses against gRPC responses. +func (s *WebsocketSubscriptionSuite) validateBlockDigests( + expectedSubscriptionID string, + expectedTopic string, + receivedResponses []dpmodels.BaseDataProvidersResponse, +) { + s.Require().NotEmpty(receivedResponses, "expected received block digests") + + for _, response := range receivedResponses { + payloadRaw := s.validateBaseDataProvidersResponse(expectedSubscriptionID, expectedTopic, response) + + var payload dpmodels.BlockDigest + err := restcommon.ParseBody(bytes.NewReader(payloadRaw), &payload) + s.Require().NoError(err) + + id, err := flow.HexStringToIdentifier(payload.BlockId) + s.Require().NoError(err) + + grpcResponse, err := s.grpcClient.GetBlockHeaderByID(s.ctx, &accessproto.GetBlockHeaderByIDRequest{ + Id: convert.IdentifierToMessage(id), + }) + s.Require().NoError(err) + + grpcExpected := grpcResponse.Block + + s.Require().Equal(convert.MessageToIdentifier(grpcExpected.Id).String(), payload.BlockId) + s.Require().Equal(util.FromUint(grpcExpected.Height), payload.Height) + s.Require().Equal(grpcExpected.Timestamp.AsTime(), payload.Timestamp) + } +} + +// validateEvents is a helper function that encapsulates logic for comparing received events from rest state streaming and +// events which received from grpc api. +func (s *WebsocketSubscriptionSuite) validateEvents( + expectedSubscriptionID string, + expectedTopic string, + receivedResponses []dpmodels.BaseDataProvidersResponse, +) { + // make sure there are received events + s.Require().NotEmpty(receivedResponses, "expect received events") + + expectedCounter := uint64(0) + for _, response := range receivedResponses { + payloadRaw := s.validateBaseDataProvidersResponse(expectedSubscriptionID, expectedTopic, response) + + var payload dpmodels.EventResponse + err := restcommon.ParseBody(bytes.NewReader(payloadRaw), &payload) + s.Require().NoError(err) + + s.Require().Equal(expectedCounter, payload.MessageIndex) + expectedCounter++ + + blockId, err := flow.HexStringToIdentifier(payload.BlockId) + s.Require().NoError(err) + + s.validateEventsForBlock( + payload.BlockHeight, + payload.Events, + blockId, + ) + } +} + +// validateAccountStatuses is a helper function that encapsulates logic for comparing received account statuses. +func (s *WebsocketSubscriptionSuite) validateAccountStatuses( + expectedSubscriptionID string, + expectedTopic string, + receivedResponses []dpmodels.BaseDataProvidersResponse, +) { + s.Require().NotEmpty(receivedResponses, "expected received block digests") + + expectedCounter := uint64(0) + for _, response := range receivedResponses { + payloadRaw := s.validateBaseDataProvidersResponse(expectedSubscriptionID, expectedTopic, response) + + var payload dpmodels.AccountStatusesResponse + err := restcommon.ParseBody(bytes.NewReader(payloadRaw), &payload) + s.Require().NoError(err) + + s.Require().Equal(expectedCounter, payload.MessageIndex) + expectedCounter++ + + blockId, err := flow.HexStringToIdentifier(payload.BlockID) + s.Require().NoError(err) + + for _, events := range payload.AccountEvents { + s.validateEventsForBlock(payload.Height, events, blockId) + } + } +} + +// groupEventsByType groups events by their type. +func groupEventsByType(events commonmodels.Events) map[string]commonmodels.Events { + eventMap := make(map[string]commonmodels.Events) + for _, event := range events { + eventType := event.Type_ + eventMap[eventType] = append(eventMap[eventType], event) + } + + return eventMap +} + +// validateEventsForBlock validates events against the gRPC response for a specific block. +func (s *WebsocketSubscriptionSuite) validateEventsForBlock(blockHeight string, events []commonmodels.Event, blockID flow.Identifier) { + receivedEventMap := groupEventsByType(events) + + for eventType, receivedEventList := range receivedEventMap { + // Get events by block ID and event type + response, err := s.grpcClient.GetEventsForBlockIDs( + s.ctx, + &accessproto.GetEventsForBlockIDsRequest{ + BlockIds: [][]byte{convert.IdentifierToMessage(blockID)}, + Type: eventType, + }, + ) + s.Require().NoError(err) + s.Require().Equal(1, len(response.Results), "expect to get 1 result") + + expectedEventsResult := response.Results[0] + s.Require().Equal(util.FromUint(expectedEventsResult.BlockHeight), blockHeight, "expect the same block height") + s.Require().Equal(len(expectedEventsResult.Events), len(receivedEventList), "expect the same count of events: want: %+v, got: %+v", expectedEventsResult.Events, receivedEventList) + + for i, event := range receivedEventList { + expectedEvent := expectedEventsResult.Events[i] + + s.Require().Equal(util.FromUint(expectedEvent.EventIndex), event.EventIndex, "expect the same event index") + s.Require().Equal(convert.MessageToIdentifier(expectedEvent.TransactionId).String(), event.TransactionId, "expect the same transaction id") + s.Require().Equal(util.FromUint(expectedEvent.TransactionIndex), event.TransactionIndex, "expect the same transaction index") + } + } +} + +// validateTransactionStatuses is a helper function that encapsulates logic for comparing received transaction statuses. +func (s *WebsocketSubscriptionSuite) validateTransactionStatuses( + expectedSubscriptionID string, + expectedTopic string, + receivedResponses []dpmodels.BaseDataProvidersResponse, +) { + expectedCount := 4 // pending, finalized, executed, sealed + s.Require().Equal(expectedCount, len(receivedResponses), fmt.Sprintf("expected %d transaction statuses", expectedCount)) + + expectedCounter := uint64(0) + lastReportedTxStatus := commonmodels.PENDING_TransactionStatus + + // Define the expected sequence of statuses + // Expected order: pending(0) -> finalized(1) -> executed(2) -> sealed(3) + expectedStatuses := []commonmodels.TransactionStatus{ + commonmodels.PENDING_TransactionStatus, + commonmodels.FINALIZED_TransactionStatus, + commonmodels.EXECUTED_TransactionStatus, + commonmodels.SEALED_TransactionStatus, + } + + for _, response := range receivedResponses { + payloadRaw := s.validateBaseDataProvidersResponse(expectedSubscriptionID, expectedTopic, response) + + var payload dpmodels.TransactionStatusesResponse + err := restcommon.ParseBody(bytes.NewReader(payloadRaw), &payload) + s.Require().NoError(err) + + s.Require().Equal(expectedCounter, payload.MessageIndex) + + payloadStatus := *payload.TransactionResult.Status + + // Check if all statuses received one by one. The subscription should send responses for each of the statuses, + // and the message should be sent in the order of transaction statuses. + s.Require().Equal(expectedStatuses[expectedCounter], payloadStatus) + + expectedCounter++ + lastReportedTxStatus = payloadStatus + } + // Check, if the last transaction status is sealed. + s.Require().Equal(commonmodels.SEALED_TransactionStatus, lastReportedTxStatus) +} + +// validateBaseDataProvidersResponse validates the subscription ID, topic, and converts the payload to JSON. +func (s *WebsocketSubscriptionSuite) validateBaseDataProvidersResponse( + expectedSubscriptionID string, + expectedTopic string, + response dpmodels.BaseDataProvidersResponse, +) []byte { + // Step 1: Validate Subscription ID and Topic + s.Require().Equal(expectedSubscriptionID, response.SubscriptionID) + s.Require().Equal(expectedTopic, response.Topic) + + // Step 2: Convert the payload map to JSON + payloadRaw, err := json.Marshal(response.Payload) + s.Require().NoError(err, "failed to marshal payload: %w", err) + + return payloadRaw +} + +// subscribeMessageRequest creates a subscription message request. +func (s *WebsocketSubscriptionSuite) subscribeMessageRequest( + subscriptionID string, + topic string, + arguments models.Arguments, +) models.SubscribeMessageRequest { + return models.SubscribeMessageRequest{ + BaseMessageRequest: models.BaseMessageRequest{ + Action: models.SubscribeAction, + SubscriptionID: subscriptionID, + }, + Topic: topic, + Arguments: arguments, + } +} + +// unsubscribeMessageRequest creates an unsubscribe message request. +func (s *WebsocketSubscriptionSuite) unsubscribeMessageRequest(subscriptionID string) models.UnsubscribeMessageRequest { + return models.UnsubscribeMessageRequest{ + BaseMessageRequest: models.BaseMessageRequest{ + Action: models.UnsubscribeAction, + SubscriptionID: subscriptionID, + }, + } +} + +// getWebsocketsUrl is a helper function that creates websocket url. +func getWebsocketsUrl(accessAddr string) string { + u, _ := url.Parse("http://" + accessAddr + "/v1/ws") + return u.String() +} + +// listenWebSocketResponses listens for websocket responses for a specified duration +// and unmarshalls them into expected types. +// +// Parameters: +// - client: The websocket connection to read messages from. +// - duration: The maximum time to listen for messages before stopping. +// - subscriptionID: The subscription ID used to filter relevant responses. +func (s *WebsocketSubscriptionSuite) listenWebSocketResponses( + client *websocket.Conn, + duration time.Duration, + subscriptionID string, +) ( + baseDataProvidersResponses []dpmodels.BaseDataProvidersResponse, + baseMessageResponses []models.BaseMessageResponse, + listSubscriptionsMessageResponses []models.ListSubscriptionsMessageResponse, +) { + ctx, cancel := context.WithTimeout(context.Background(), duration) + defer cancel() + + for { + select { + case <-ctx.Done(): + s.T().Logf("stopping websocket response listener after %s", duration) + return baseDataProvidersResponses, baseMessageResponses, listSubscriptionsMessageResponses + default: + _, messageBytes, err := client.ReadMessage() + if err != nil { + s.T().Logf("websocket error: %v", err) + + var closeErr *websocket.CloseError + if errors.As(err, &closeErr) { + return baseDataProvidersResponses, baseMessageResponses, listSubscriptionsMessageResponses + } + + s.Require().FailNow(fmt.Sprintf("unexpected websocket error, %v", err)) + } + + var baseResp models.BaseMessageResponse + err = restcommon.ParseBody(bytes.NewReader(messageBytes), &baseResp) + if err == nil && baseResp.SubscriptionID == subscriptionID { + baseMessageResponses = append(baseMessageResponses, baseResp) + continue + } + + var listResp models.ListSubscriptionsMessageResponse + err = restcommon.ParseBody(bytes.NewReader(messageBytes), &listResp) + if err == nil && listResp.Action == models.ListSubscriptionsAction { + listSubscriptionsMessageResponses = append(listSubscriptionsMessageResponses, listResp) + continue + } + + var baseDataProvidersResponse dpmodels.BaseDataProvidersResponse + err = restcommon.ParseBody(bytes.NewReader(messageBytes), &baseDataProvidersResponse) + if err == nil && baseDataProvidersResponse.SubscriptionID == subscriptionID { + baseDataProvidersResponses = append(baseDataProvidersResponses, baseDataProvidersResponse) + } + } + } +} + +// createAndSendTx creates a new account transaction. +func (s *WebsocketSubscriptionSuite) createAccountTx() *sdk.Transaction { + latestBlockID, err := s.serviceClient.GetLatestBlockID(s.ctx) + s.Require().NoError(err) + + // create new account to deploy Counter to + accountPrivateKey := lib.RandomPrivateKey() + + accountKey := sdk.NewAccountKey(). + FromPrivateKey(accountPrivateKey). + SetHashAlgo(sdkcrypto.SHA3_256). + SetWeight(sdk.AccountKeyWeightThreshold) + + serviceAddress := sdk.Address(s.serviceClient.Chain.ServiceAddress()) + + // Generate the account creation transaction + createAccountTx, err := templates.CreateAccount( + []*sdk.AccountKey{accountKey}, + nil, serviceAddress) + s.Require().NoError(err) + + // Generate the account creation transaction + createAccountTx. + SetReferenceBlockID(sdk.Identifier(latestBlockID)). + SetProposalKey(serviceAddress, 0, s.serviceClient.GetAndIncrementSeqNumber()). + SetPayer(serviceAddress). + SetComputeLimit(flow.DefaultMaxTransactionGasLimit) + + createAccountTx, err = s.serviceClient.SignTransaction(createAccountTx) + s.Require().NoError(err) + + return createAccountTx +} diff --git a/integration/tests/access/common/utils.go b/integration/tests/access/common/utils.go new file mode 100644 index 00000000000..6e24579398c --- /dev/null +++ b/integration/tests/access/common/utils.go @@ -0,0 +1,32 @@ +package common + +import ( + "context" + "strings" + + "github.com/gorilla/websocket" + "google.golang.org/grpc" + "google.golang.org/grpc/credentials/insecure" + + accessproto "github.com/onflow/flow/protobuf/go/flow/access" +) + +// GetAccessAPIClient is a helper function that creates client API for AccessAPI service +func GetAccessAPIClient(address string) (accessproto.AccessAPIClient, error) { + conn, err := grpc.NewClient(address, grpc.WithTransportCredentials(insecure.NewCredentials())) + if err != nil { + return nil, err + } + + return accessproto.NewAccessAPIClient(conn), nil +} + +// GetWSClient is a helper function that creates a websocket client +func GetWSClient(ctx context.Context, address string) (*websocket.Conn, error) { + // helper func to create WebSocket client + client, _, err := websocket.DefaultDialer.DialContext(ctx, strings.Replace(address, "http", "ws", 1), nil) + if err != nil { + return nil, err + } + return client, nil +} diff --git a/integration/tests/access/execution_state_sync_test.go b/integration/tests/access/execution_state_sync_test.go deleted file mode 100644 index b75b45704f9..00000000000 --- a/integration/tests/access/execution_state_sync_test.go +++ /dev/null @@ -1,181 +0,0 @@ -package access - -import ( - "context" - "fmt" - "path/filepath" - "testing" - - badgerds "github.com/ipfs/go-ds-badger2" - "github.com/rs/zerolog" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - "github.com/stretchr/testify/suite" - - "github.com/onflow/flow-go/engine/ghost/client" - "github.com/onflow/flow-go/integration/testnet" - "github.com/onflow/flow-go/integration/tests/lib" - "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/module/blobs" - "github.com/onflow/flow-go/module/executiondatasync/execution_data" - "github.com/onflow/flow-go/module/metrics" - storage "github.com/onflow/flow-go/storage/badger" - "github.com/onflow/flow-go/utils/unittest" -) - -func TestExecutionStateSync(t *testing.T) { - unittest.SkipUnless(t, unittest.TEST_FLAKY, "flaky as it constantly runs into badger errors or blob not found errors") - suite.Run(t, new(ExecutionStateSyncSuite)) -} - -type ExecutionStateSyncSuite struct { - suite.Suite - lib.TestnetStateTracker - - log zerolog.Logger - - bridgeID flow.Identifier - ghostID flow.Identifier - - // root context for the current test - ctx context.Context - cancel context.CancelFunc - - net *testnet.FlowNetwork -} - -func (s *ExecutionStateSyncSuite) SetupTest() { - s.log = unittest.LoggerForTest(s.Suite.T(), zerolog.InfoLevel) - s.log.Info().Msg("================> SetupTest") - s.ctx, s.cancel = context.WithCancel(context.Background()) - - s.buildNetworkConfig() - - // start the network - s.net.Start(s.ctx) - - s.Track(s.T(), s.ctx, s.Ghost()) -} - -func (s *ExecutionStateSyncSuite) TearDownTest() { - s.log.Info().Msg("================> Start TearDownTest") - s.net.Remove() - s.cancel() - s.log.Info().Msgf("================> Finish TearDownTest") -} - -func (s *ExecutionStateSyncSuite) Ghost() *client.GhostClient { - client, err := s.net.ContainerByID(s.ghostID).GhostClient() - require.NoError(s.T(), err, "could not get ghost client") - return client -} - -func (s *ExecutionStateSyncSuite) buildNetworkConfig() { - // access node - s.bridgeID = unittest.IdentifierFixture() - bridgeANConfig := testnet.NewNodeConfig( - flow.RoleAccess, - testnet.WithID(s.bridgeID), - testnet.WithLogLevel(zerolog.DebugLevel), - testnet.WithAdditionalFlag("--supports-observer=true"), - testnet.WithAdditionalFlag("--execution-data-sync-enabled=true"), - testnet.WithAdditionalFlag(fmt.Sprintf("--execution-data-dir=%s", testnet.DefaultExecutionDataServiceDir)), - testnet.WithAdditionalFlag("--execution-data-retry-delay=1s"), - ) - - // add the ghost (access) node config - s.ghostID = unittest.IdentifierFixture() - ghostNode := testnet.NewNodeConfig( - flow.RoleAccess, - testnet.WithID(s.ghostID), - testnet.WithLogLevel(zerolog.FatalLevel), - testnet.AsGhost()) - - consensusConfigs := []func(config *testnet.NodeConfig){ - testnet.WithAdditionalFlag("--block-rate-delay=100ms"), - testnet.WithAdditionalFlag(fmt.Sprintf("--required-verification-seal-approvals=%d", 1)), - testnet.WithAdditionalFlag(fmt.Sprintf("--required-construction-seal-approvals=%d", 1)), - testnet.WithLogLevel(zerolog.FatalLevel), - } - - net := []testnet.NodeConfig{ - testnet.NewNodeConfig(flow.RoleCollection, testnet.WithLogLevel(zerolog.FatalLevel)), - testnet.NewNodeConfig(flow.RoleCollection, testnet.WithLogLevel(zerolog.FatalLevel)), - testnet.NewNodeConfig(flow.RoleExecution, testnet.WithLogLevel(zerolog.FatalLevel)), - testnet.NewNodeConfig(flow.RoleExecution, testnet.WithLogLevel(zerolog.FatalLevel)), - testnet.NewNodeConfig(flow.RoleConsensus, consensusConfigs...), - testnet.NewNodeConfig(flow.RoleConsensus, consensusConfigs...), - testnet.NewNodeConfig(flow.RoleConsensus, consensusConfigs...), - testnet.NewNodeConfig(flow.RoleVerification, testnet.WithLogLevel(zerolog.FatalLevel)), - bridgeANConfig, - ghostNode, - // TODO: add observer - } - - conf := testnet.NewNetworkConfig("execution state sync test", net) - s.net = testnet.PrepareFlowNetwork(s.T(), conf, flow.Localnet) -} - -// TestHappyPath tests that Execution Nodes generate execution data, and Access Nodes are able to -// successfully sync the data -func (s *ExecutionStateSyncSuite) TestHappyPath() { - // Let the network run for this many blocks - runBlocks := uint64(20) - - // We will check that execution data was downloaded for this many blocks - // It has to be less than runBlocks since it's not possible to see which height the AN stopped - // downloading execution data for - checkBlocks := runBlocks / 2 - - // get the first block height - currentFinalized := s.BlockState.HighestFinalizedHeight() - blockA := s.BlockState.WaitForHighestFinalizedProgress(s.T(), currentFinalized) - s.T().Logf("got block height %v ID %v", blockA.Header.Height, blockA.Header.ID()) - - // wait for the requested number of sealed blocks, then pause the network so we can inspect the dbs - s.BlockState.WaitForSealed(s.T(), blockA.Header.Height+runBlocks) - s.net.StopContainers() - - // start an execution data service using the Access Node's execution data db - an := s.net.ContainerByID(s.bridgeID) - eds := s.nodeExecutionDataStore(an) - - // setup storage objects needed to get the execution data id - db, err := an.DB() - require.NoError(s.T(), err, "could not open db") - - metrics := metrics.NewNoopCollector() - headers := storage.NewHeaders(metrics, db) - results := storage.NewExecutionResults(metrics, db) - - // Loop through checkBlocks and verify the execution data was downloaded correctly - for i := blockA.Header.Height; i <= blockA.Header.Height+checkBlocks; i++ { - header, err := headers.ByHeight(i) - require.NoError(s.T(), err, "could not get header") - - result, err := results.ByBlockID(header.ID()) - require.NoError(s.T(), err, "could not get sealed result") - - s.T().Logf("getting execution data for height %d, block %s, execution_data %s", header.Height, header.ID(), result.ExecutionDataID) - - ed, err := eds.GetExecutionData(s.ctx, result.ExecutionDataID) - if assert.NoError(s.T(), err, "could not get execution data for height %v", i) { - s.T().Logf("got execution data for height %d", i) - assert.Equal(s.T(), header.ID(), ed.BlockID) - } - } -} - -func (s *ExecutionStateSyncSuite) nodeExecutionDataStore(node *testnet.Container) execution_data.ExecutionDataStore { - ds, err := badgerds.NewDatastore(filepath.Join(node.ExecutionDataDBPath(), "blobstore"), &badgerds.DefaultOptions) - require.NoError(s.T(), err, "could not get execution datastore") - - go func() { - <-s.ctx.Done() - if err := ds.Close(); err != nil { - s.T().Logf("could not close execution data datastore: %v", err) - } - }() - - return execution_data.NewExecutionDataStore(blobs.NewBlobstore(ds), execution_data.DefaultSerializer) -} diff --git a/integration/tests/access/observer_test.go b/integration/tests/access/observer_test.go deleted file mode 100644 index 29b96da49e6..00000000000 --- a/integration/tests/access/observer_test.go +++ /dev/null @@ -1,289 +0,0 @@ -package access - -import ( - "context" - "testing" - - "github.com/rs/zerolog" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - "github.com/stretchr/testify/suite" - "google.golang.org/grpc" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/credentials/insecure" - "google.golang.org/grpc/status" - - accessproto "github.com/onflow/flow/protobuf/go/flow/access" - - "github.com/onflow/flow-go/integration/testnet" - "github.com/onflow/flow-go/model/flow" -) - -func TestObserver(t *testing.T) { - suite.Run(t, new(ObserverSuite)) -} - -type ObserverSuite struct { - suite.Suite - net *testnet.FlowNetwork - teardown func() - local map[string]struct{} - - cancel context.CancelFunc -} - -func (s *ObserverSuite) TearDownTest() { - if s.net != nil { - s.net.Remove() - s.net = nil - } - if s.cancel != nil { - s.cancel() - s.cancel = nil - } -} - -func (s *ObserverSuite) SetupTest() { - s.local = map[string]struct{}{ - "Ping": {}, - "GetLatestBlockHeader": {}, - "GetBlockHeaderByID": {}, - "GetBlockHeaderByHeight": {}, - "GetLatestBlock": {}, - "GetBlockByID": {}, - "GetBlockByHeight": {}, - "GetLatestProtocolStateSnapshot": {}, - "GetNetworkParameters": {}, - } - - nodeConfigs := []testnet.NodeConfig{ - // access node with unstaked nodes supported - testnet.NewNodeConfig(flow.RoleAccess, testnet.WithLogLevel(zerolog.InfoLevel), testnet.WithAdditionalFlag("--supports-observer=true")), - - // need one dummy execution node (unused ghost) - testnet.NewNodeConfig(flow.RoleExecution, testnet.WithLogLevel(zerolog.FatalLevel), testnet.AsGhost()), - - // need one dummy verification node (unused ghost) - testnet.NewNodeConfig(flow.RoleVerification, testnet.WithLogLevel(zerolog.FatalLevel), testnet.AsGhost()), - - // need one controllable collection node (unused ghost) - testnet.NewNodeConfig(flow.RoleCollection, testnet.WithLogLevel(zerolog.FatalLevel), testnet.AsGhost()), - - // need three consensus nodes (unused ghost) - testnet.NewNodeConfig(flow.RoleConsensus, testnet.WithLogLevel(zerolog.FatalLevel), testnet.AsGhost()), - testnet.NewNodeConfig(flow.RoleConsensus, testnet.WithLogLevel(zerolog.FatalLevel), testnet.AsGhost()), - testnet.NewNodeConfig(flow.RoleConsensus, testnet.WithLogLevel(zerolog.FatalLevel), testnet.AsGhost()), - } - - observers := []testnet.ObserverConfig{{ - LogLevel: zerolog.InfoLevel, - }} - - // prepare the network - conf := testnet.NewNetworkConfig("observer_api_test", nodeConfigs, testnet.WithObservers(observers...)) - s.net = testnet.PrepareFlowNetwork(s.T(), conf, flow.Localnet) - - // start the network - ctx, cancel := context.WithCancel(context.Background()) - s.cancel = cancel - - s.net.Start(ctx) -} - -// TestObserver runs the following tests: -// 1. CompareRPCs: verifies that the observer client returns the same errors as the access client for rpcs proxied to the upstream AN -// 2. HandledByUpstream: stops the upstream AN and verifies that the observer client returns errors for all rpcs handled by the upstream -// 3. HandledByObserver: stops the upstream AN and verifies that the observer client handles all other queries -func (s *ObserverSuite) TestObserver() { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - t := s.T() - - // get an observer client - observer, err := s.getObserverClient() - require.NoError(t, err) - - access, err := s.getAccessClient() - require.NoError(t, err) - - t.Run("CompareRPCs", func(t *testing.T) { - // verify that both clients return the same errors for proxied rpcs - for _, rpc := range s.getRPCs() { - // skip rpcs handled locally by observer - if _, local := s.local[rpc.name]; local { - continue - } - t.Run(rpc.name, func(t *testing.T) { - accessErr := rpc.call(ctx, access) - observerErr := rpc.call(ctx, observer) - assert.Equal(t, accessErr, observerErr) - }) - } - }) - - // stop the upstream access container - err = s.net.StopContainerByName(ctx, testnet.PrimaryAN) - require.NoError(t, err) - - t.Run("HandledByUpstream", func(t *testing.T) { - // verify that we receive Unavailable errors from all rpcs handled upstream - for _, rpc := range s.getRPCs() { - if _, local := s.local[rpc.name]; local { - continue - } - t.Run(rpc.name, func(t *testing.T) { - err := rpc.call(ctx, observer) - assert.Equal(t, codes.Unavailable, status.Code(err)) - }) - } - }) - - t.Run("HandledByObserver", func(t *testing.T) { - // verify that we receive NotFound or no error from all rpcs handled locally - for _, rpc := range s.getRPCs() { - if _, local := s.local[rpc.name]; !local { - continue - } - t.Run(rpc.name, func(t *testing.T) { - err := rpc.call(ctx, observer) - if err == nil { - return - } - assert.Equal(t, codes.NotFound, status.Code(err)) - }) - } - }) - -} - -func (s *ObserverSuite) getAccessClient() (accessproto.AccessAPIClient, error) { - return s.getClient(s.net.ContainerByName(testnet.PrimaryAN).Addr(testnet.GRPCPort)) -} - -func (s *ObserverSuite) getObserverClient() (accessproto.AccessAPIClient, error) { - return s.getClient(s.net.ContainerByName("observer_1").Addr(testnet.GRPCPort)) -} - -func (s *ObserverSuite) getClient(address string) (accessproto.AccessAPIClient, error) { - // helper func to create an access client - conn, err := grpc.Dial(address, grpc.WithTransportCredentials(insecure.NewCredentials())) - if err != nil { - return nil, err - } - - client := accessproto.NewAccessAPIClient(conn) - return client, nil -} - -type RPCTest struct { - name string - call func(ctx context.Context, client accessproto.AccessAPIClient) error -} - -func (s *ObserverSuite) getRPCs() []RPCTest { - return []RPCTest{ - {name: "Ping", call: func(ctx context.Context, client accessproto.AccessAPIClient) error { - _, err := client.Ping(ctx, &accessproto.PingRequest{}) - return err - }}, - {name: "GetLatestBlockHeader", call: func(ctx context.Context, client accessproto.AccessAPIClient) error { - _, err := client.GetLatestBlockHeader(ctx, &accessproto.GetLatestBlockHeaderRequest{}) - return err - }}, - {name: "GetBlockHeaderByID", call: func(ctx context.Context, client accessproto.AccessAPIClient) error { - _, err := client.GetBlockHeaderByID(ctx, &accessproto.GetBlockHeaderByIDRequest{ - Id: make([]byte, 32), - }) - return err - }}, - {name: "GetBlockHeaderByHeight", call: func(ctx context.Context, client accessproto.AccessAPIClient) error { - _, err := client.GetBlockHeaderByHeight(ctx, &accessproto.GetBlockHeaderByHeightRequest{}) - return err - }}, - {name: "GetLatestBlock", call: func(ctx context.Context, client accessproto.AccessAPIClient) error { - _, err := client.GetLatestBlock(ctx, &accessproto.GetLatestBlockRequest{}) - return err - }}, - {name: "GetBlockByID", call: func(ctx context.Context, client accessproto.AccessAPIClient) error { - _, err := client.GetBlockByID(ctx, &accessproto.GetBlockByIDRequest{Id: make([]byte, 32)}) - return err - }}, - {name: "GetBlockByHeight", call: func(ctx context.Context, client accessproto.AccessAPIClient) error { - _, err := client.GetBlockByHeight(ctx, &accessproto.GetBlockByHeightRequest{}) - return err - }}, - {name: "GetCollectionByID", call: func(ctx context.Context, client accessproto.AccessAPIClient) error { - _, err := client.GetCollectionByID(ctx, &accessproto.GetCollectionByIDRequest{Id: make([]byte, 32)}) - return err - }}, - {name: "SendTransaction", call: func(ctx context.Context, client accessproto.AccessAPIClient) error { - _, err := client.SendTransaction(ctx, &accessproto.SendTransactionRequest{}) - return err - }}, - {name: "GetTransaction", call: func(ctx context.Context, client accessproto.AccessAPIClient) error { - _, err := client.GetTransaction(ctx, &accessproto.GetTransactionRequest{}) - return err - }}, - {name: "GetTransactionResult", call: func(ctx context.Context, client accessproto.AccessAPIClient) error { - _, err := client.GetTransactionResult(ctx, &accessproto.GetTransactionRequest{}) - return err - }}, - {name: "GetTransactionResultByIndex", call: func(ctx context.Context, client accessproto.AccessAPIClient) error { - _, err := client.GetTransactionResultByIndex(ctx, &accessproto.GetTransactionByIndexRequest{}) - return err - }}, - {name: "GetTransactionResultsByBlockID", call: func(ctx context.Context, client accessproto.AccessAPIClient) error { - _, err := client.GetTransactionResultsByBlockID(ctx, &accessproto.GetTransactionsByBlockIDRequest{}) - return err - }}, - {name: "GetTransactionsByBlockID", call: func(ctx context.Context, client accessproto.AccessAPIClient) error { - _, err := client.GetTransactionsByBlockID(ctx, &accessproto.GetTransactionsByBlockIDRequest{}) - return err - }}, - {name: "GetAccount", call: func(ctx context.Context, client accessproto.AccessAPIClient) error { - _, err := client.GetAccount(ctx, &accessproto.GetAccountRequest{}) - return err - }}, - {name: "GetAccountAtLatestBlock", call: func(ctx context.Context, client accessproto.AccessAPIClient) error { - _, err := client.GetAccountAtLatestBlock(ctx, &accessproto.GetAccountAtLatestBlockRequest{}) - return err - }}, - {name: "GetAccountAtBlockHeight", call: func(ctx context.Context, client accessproto.AccessAPIClient) error { - _, err := client.GetAccountAtBlockHeight(ctx, &accessproto.GetAccountAtBlockHeightRequest{}) - return err - }}, - {name: "ExecuteScriptAtLatestBlock", call: func(ctx context.Context, client accessproto.AccessAPIClient) error { - _, err := client.ExecuteScriptAtLatestBlock(ctx, &accessproto.ExecuteScriptAtLatestBlockRequest{}) - return err - }}, - {name: "ExecuteScriptAtBlockID", call: func(ctx context.Context, client accessproto.AccessAPIClient) error { - _, err := client.ExecuteScriptAtBlockID(ctx, &accessproto.ExecuteScriptAtBlockIDRequest{}) - return err - }}, - {name: "ExecuteScriptAtBlockHeight", call: func(ctx context.Context, client accessproto.AccessAPIClient) error { - _, err := client.ExecuteScriptAtBlockHeight(ctx, &accessproto.ExecuteScriptAtBlockHeightRequest{}) - return err - }}, - {name: "GetEventsForHeightRange", call: func(ctx context.Context, client accessproto.AccessAPIClient) error { - _, err := client.GetEventsForHeightRange(ctx, &accessproto.GetEventsForHeightRangeRequest{}) - return err - }}, - {name: "GetEventsForBlockIDs", call: func(ctx context.Context, client accessproto.AccessAPIClient) error { - _, err := client.GetEventsForBlockIDs(ctx, &accessproto.GetEventsForBlockIDsRequest{}) - return err - }}, - {name: "GetNetworkParameters", call: func(ctx context.Context, client accessproto.AccessAPIClient) error { - _, err := client.GetNetworkParameters(ctx, &accessproto.GetNetworkParametersRequest{}) - return err - }}, - {name: "GetLatestProtocolStateSnapshot", call: func(ctx context.Context, client accessproto.AccessAPIClient) error { - _, err := client.GetLatestProtocolStateSnapshot(ctx, &accessproto.GetLatestProtocolStateSnapshotRequest{}) - return err - }}, - {name: "GetExecutionResultForBlockID", call: func(ctx context.Context, client accessproto.AccessAPIClient) error { - _, err := client.GetExecutionResultForBlockID(ctx, &accessproto.GetExecutionResultForBlockIDRequest{}) - return err - }}, - } -} diff --git a/integration/tests/admin/command_runner_test.go b/integration/tests/admin/command_runner_test.go index bc85f048efc..8166bcd01cf 100644 --- a/integration/tests/admin/command_runner_test.go +++ b/integration/tests/admin/command_runner_test.go @@ -31,9 +31,9 @@ import ( "github.com/onflow/flow-go/admin" pb "github.com/onflow/flow-go/admin/admin" + commonrpc "github.com/onflow/flow-go/engine/common/rpc" "github.com/onflow/flow-go/integration/client" "github.com/onflow/flow-go/module/irrecoverable" - "github.com/onflow/flow-go/utils/grpcutils" "github.com/onflow/flow-go/utils/unittest" ) @@ -80,7 +80,7 @@ func (suite *CommandRunnerSuite) SetupCommandRunner(opts ...admin.CommandRunnerO signalerCtx := irrecoverable.NewMockSignalerContext(suite.T(), ctx) suite.grpcAddressSock = fmt.Sprintf("%s/%s-flow-node-admin.sock", os.TempDir(), unittest.GenerateRandomStringWithLen(16)) - opts = append(opts, admin.WithGRPCAddress(suite.grpcAddressSock), admin.WithMaxMsgSize(grpcutils.DefaultMaxMsgSize)) + opts = append(opts, admin.WithGRPCAddress(suite.grpcAddressSock), admin.WithMaxMsgSize(commonrpc.DefaultMaxResponseMsgSize)) logger := zerolog.New(zerolog.NewConsoleWriter()) suite.runner = suite.bootstrapper.Bootstrap(logger, suite.httpAddress, opts...) diff --git a/integration/tests/bft/admin/blocklist/admin_command_blocklist_test.go b/integration/tests/bft/admin/blocklist/admin_command_blocklist_test.go deleted file mode 100644 index d0245e40e19..00000000000 --- a/integration/tests/bft/admin/blocklist/admin_command_blocklist_test.go +++ /dev/null @@ -1,56 +0,0 @@ -package blocklist - -import ( - "fmt" - "testing" - "time" - - "github.com/stretchr/testify/require" - "github.com/stretchr/testify/suite" - - "github.com/onflow/flow-go/utils/unittest" -) - -type AdminCommandBlockListTestSuite struct { - Suite -} - -func TestAdminCommandBlockList(t *testing.T) { - suite.Run(t, new(AdminCommandBlockListTestSuite)) -} - -// TestAdminCommandBlockList ensures that the blocklist admin command works as expected. When a node is blocked via the admin blocklist command -// the libp2p connection to that node should be pruned immediately and the connection gater should start to block incoming connection requests. This test -// sets up 2 corrupt nodes a sender and receiver, the sender will send messages before and after being blocked by the receiver node via -// the blocklist admin command. The receiver node is expected to receive messages like normal before blocking the sender, after blocking the sender -// it should not receive any messages. The reason this test is conducted via two corrupt nodes is to empower the test logic to command one (corrupt) node to send a message and to examine the other (corrupt) node to check whether it has received the message. -func (a *AdminCommandBlockListTestSuite) TestAdminCommandBlockList() { - // send some authorized messages indicating the network is working as expected - a.Orchestrator.sendAuthorizedMsgs(a.T()) - unittest.RequireReturnsBefore(a.T(), a.Orchestrator.authorizedEventsReceivedWg.Wait, 5*time.Second, "could not receive authorized messages on time") - // messages with correct message signatures are expected to always pass libp2p signature verification and be delivered to the victim EN. - require.Equal(a.T(), int64(numOfAuthorizedEvents), a.Orchestrator.authorizedEventsReceived.Load(), fmt.Sprintf("expected to receive %d authorized events got: %d", numOfAuthorizedEvents, a.Orchestrator.expectedBlockedEventsReceived.Load())) - - // after blocking node a.senderVN we should not receive any messages from that node. - // This is an asynchronous process with a number of sub processes involved including not limited to ; - // - submitting request to admin server for node to be blocked - // - node block list must update - // - peer manager needs to prune the connection - // - connection gater will start blocking incoming connections - //We wait for 500 milliseconds to reduce the small chance of a race condition between the time a node is blocked - // and the time the blocked node sends the first unauthorized message. - a.blockNode(a.senderVN) - time.Sleep(500 * time.Millisecond) - - // send unauthorized messages and sleep for 3 seconds to allow all requests to be processed - // in normal situations if the node is not block listed, these messages would be considered - // legit and hence would be delivered to the recipients. - a.Orchestrator.sendExpectedBlockedMsgs(a.T()) - // The sleep is unavoidable for the following reasons these messages are sent between 2 running libp2p nodes we don't have any hooks in between - // These are messages sent after the node is blocked meaning that these messages are not expected to be delivered to the receiver node, - // so we sleep this approximate amount of time to ensure all messages were attempted, processed and dropped. - time.Sleep(3 * time.Second) - - // messages sent after the node is block listed are considered unauthorized, we don't expect to receive any of them. - require.Equal(a.T(), int64(0), a.Orchestrator.expectedBlockedEventsReceived.Load(), fmt.Sprintf("expected to not receive any unauthorized messages instead got: %d", a.Orchestrator.expectedBlockedEventsReceived.Load())) -} diff --git a/integration/tests/bft/admin/blocklist/orchestrator.go b/integration/tests/bft/admin/blocklist/orchestrator.go deleted file mode 100644 index fe4039960ec..00000000000 --- a/integration/tests/bft/admin/blocklist/orchestrator.go +++ /dev/null @@ -1,107 +0,0 @@ -package blocklist - -import ( - "sync" - "testing" - - "github.com/rs/zerolog" - "github.com/stretchr/testify/require" - "go.uber.org/atomic" - - "github.com/onflow/flow-go/insecure" - "github.com/onflow/flow-go/integration/tests/bft" - "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/network" - "github.com/onflow/flow-go/utils/unittest" -) - -const ( - // numOfAuthorizedEvents number of events to send before blocking the sender node via the block list command. - numOfAuthorizedEvents = 10 - - // numOfUnauthorizedEvents number of events to send after blocking the sender node via the block list command. - numOfUnauthorizedEvents = 10 -) - -// Orchestrator represents a simple `insecure.AttackOrchestrator` that tracks messages received before and after the senderVN is blocked by the receiverEN via the admin blocklist command. -type Orchestrator struct { - *bft.BaseOrchestrator - sync.Mutex - codec network.Codec - expectedBlockedEventsReceived *atomic.Int64 - authorizedEventsReceived *atomic.Int64 - expectedBlockedEvents map[flow.Identifier]*insecure.EgressEvent - authorizedEvents map[flow.Identifier]*insecure.EgressEvent - authorizedEventsReceivedWg sync.WaitGroup - senderVN flow.Identifier - receiverEN flow.Identifier -} - -var _ insecure.AttackOrchestrator = &Orchestrator{} - -func NewOrchestrator(t *testing.T, logger zerolog.Logger, senderVN, receiverEN flow.Identifier) *Orchestrator { - orchestrator := &Orchestrator{ - BaseOrchestrator: &bft.BaseOrchestrator{ - T: t, - Logger: logger, - }, - codec: unittest.NetworkCodec(), - expectedBlockedEventsReceived: atomic.NewInt64(0), - authorizedEventsReceived: atomic.NewInt64(0), - expectedBlockedEvents: make(map[flow.Identifier]*insecure.EgressEvent), - authorizedEvents: make(map[flow.Identifier]*insecure.EgressEvent), - authorizedEventsReceivedWg: sync.WaitGroup{}, - senderVN: senderVN, - receiverEN: receiverEN, - } - - orchestrator.OnIngressEvent = append(orchestrator.OnIngressEvent, orchestrator.trackIngressEvents) - - return orchestrator -} - -// trackIngressEvents callback that will track authorized messages that are expected to be received by the receiverEN before we block the sender. -// It also tracks unauthorized messages received if any that are expected to be blocked after the senderVN is blocked via the admin blocklist command. -func (a *Orchestrator) trackIngressEvents(event *insecure.IngressEvent) error { - // Track any unauthorized events that are received, these events are sent after the admin blocklist command - // is used to block the sender node. - if _, ok := a.expectedBlockedEvents[event.FlowProtocolEventID]; ok { - if event.OriginID == a.senderVN { - a.expectedBlockedEventsReceived.Inc() - a.Logger.Warn().Str("event_id", event.FlowProtocolEventID.String()).Msg("unauthorized ingress event received") - } - } - - // track all authorized events sent before the sender node is blocked. - if _, ok := a.authorizedEvents[event.FlowProtocolEventID]; ok { - // ensure event received intact no changes have been made to the underlying message - //a.assertEventsEqual(expectedEvent, event) - a.authorizedEventsReceived.Inc() - a.authorizedEventsReceivedWg.Done() - } - - return nil -} - -// sendAuthorizedMsgs publishes a number of authorized messages from the senderVN. Authorized messages are messages -// that are sent before the senderVN is blocked. -func (a *Orchestrator) sendAuthorizedMsgs(t *testing.T) { - for i := 0; i < numOfAuthorizedEvents; i++ { - event := bft.RequestChunkDataPackEgressFixture(a.T, a.senderVN, a.receiverEN, insecure.Protocol_PUBLISH) - err := a.OrchestratorNetwork.SendEgress(event) - require.NoError(t, err) - a.authorizedEvents[event.FlowProtocolEventID] = event - a.authorizedEventsReceivedWg.Add(1) - } -} - -// sendExpectedBlockedMsgs publishes a number of unauthorized messages. Unauthorized messages are messages that are sent -// after the senderVN is blocked via the admin blocklist command. These messages are not expected to be received. -func (a *Orchestrator) sendExpectedBlockedMsgs(t *testing.T) { - for i := 0; i < numOfUnauthorizedEvents; i++ { - event := bft.RequestChunkDataPackEgressFixture(a.T, a.senderVN, a.receiverEN, insecure.Protocol_PUBLISH) - err := a.OrchestratorNetwork.SendEgress(event) - require.NoError(t, err) - a.expectedBlockedEvents[event.FlowProtocolEventID] = event - } -} diff --git a/integration/tests/bft/admin/blocklist/suite.go b/integration/tests/bft/admin/blocklist/suite.go deleted file mode 100644 index 48c3547f8b4..00000000000 --- a/integration/tests/bft/admin/blocklist/suite.go +++ /dev/null @@ -1,72 +0,0 @@ -package blocklist - -import ( - "context" - "fmt" - - "github.com/rs/zerolog" - "github.com/stretchr/testify/require" - - "github.com/onflow/flow-go/insecure" - "github.com/onflow/flow-go/integration/client" - "github.com/onflow/flow-go/integration/testnet" - "github.com/onflow/flow-go/integration/tests/bft" - "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/utils/unittest" -) - -// Suite represents a test suite ensures the admin block list command works as expected. -type Suite struct { - bft.BaseSuite - senderVN flow.Identifier // node ID of corrupted node that will send messages in the test. The sender node will be blocked. - receiverEN flow.Identifier // node ID of corrupted node that will receive messages in the test - Orchestrator *Orchestrator -} - -// SetupSuite runs a bare minimum Flow network to function correctly along with 2 attacker nodes and 1 victim node. -// - Corrupt VN that will be used to send messages, this node will be the node that is blocked by the receiver corrupt EN. -// - Corrupt EN that will receive messages from the corrupt VN, we will execute the admin command on this node. -func (s *Suite) SetupSuite() { - s.BaseSuite.SetupSuite() - - // generate 1 corrupt verification node - s.senderVN = unittest.IdentifierFixture() - s.NodeConfigs = append(s.NodeConfigs, testnet.NewNodeConfig(flow.RoleVerification, - testnet.WithID(s.senderVN), - testnet.WithLogLevel(zerolog.FatalLevel), - testnet.AsCorrupted())) - - // generate 1 corrupt execution node - s.receiverEN = unittest.IdentifierFixture() - s.NodeConfigs = append(s.NodeConfigs, testnet.NewNodeConfig(flow.RoleExecution, - testnet.WithID(s.receiverEN), - testnet.WithLogLevel(zerolog.FatalLevel), - testnet.AsCorrupted())) - - s.BaseSuite.StartCorruptedNetwork( - "bft_signature_validation_test", - 10_000, - 100_000, - func() insecure.AttackOrchestrator { - s.Orchestrator = NewOrchestrator(s.T(), s.Log, s.senderVN, s.receiverEN) - return s.Orchestrator - }, - ) -} - -// blockNode submit request to our EN admin server to block sender VN. -func (s *Suite) blockNode(nodeID flow.Identifier) { - serverAddr := fmt.Sprintf("localhost:%s", s.Net.ContainerByID(s.receiverEN).Port(testnet.AdminPort)) - adminClient := client.NewAdminClient(serverAddr) - - data := map[string]interface{}{"network-id-provider-blocklist": []string{nodeID.String()}} - resp, err := adminClient.RunCommand(context.Background(), "set-config", data) - require.NoError(s.T(), err) - - output, ok := resp.Output.(map[string]interface{}) - require.True(s.T(), ok) - - newList, ok := output["newValue"].([]interface{}) - require.True(s.T(), ok) - require.Contains(s.T(), newList, nodeID.String()) -} diff --git a/integration/tests/bft/base_suite.go b/integration/tests/bft/base_suite.go index a1942f05b7d..2e6e74de881 100644 --- a/integration/tests/bft/base_suite.go +++ b/integration/tests/bft/base_suite.go @@ -1,3 +1,4 @@ +// Package bft provides testing facilities for Flow BFT protocols. package bft import ( @@ -18,6 +19,19 @@ import ( "github.com/onflow/flow-go/utils/unittest" ) +// BaseSuite serves as a base test suite offering various utility functions +// and default setup/teardown steps. It facilitates the creation of Flow networks +// with pre-configured nodes and allows for easier interaction with the network, +// reducing boilerplate code in individual tests. +// +// BaseSuite comes with a lot of functionality out-of-the-box, including the ability to: +// - Create a bare-minimum Flow network. +// - Start and stop the network. +// - Track messages over testnet using TestnetStateTracker. +// - Tear down the testnet environment. +// - Handle Ghost nodes and Orchestrator network. +// +// BaseSuite embeds testify's Suite to leverage setup, teardown, and assertion capabilities. type BaseSuite struct { suite.Suite Log zerolog.Logger @@ -28,50 +42,54 @@ type BaseSuite struct { GhostID flow.Identifier // represents id of ghost node NodeConfigs testnet.NodeConfigs // used to keep configuration of nodes in testnet OrchestratorNetwork *orchestrator.Network - BlockRateFlag string } -// Ghost returns a client to interact with the Ghost node on testnet. +// Ghost returns a client to interact with the Ghost node on the testnet. +// It is essential for observing the messages exchanged in the network. func (b *BaseSuite) Ghost() *client.GhostClient { - client, err := b.Net.ContainerByID(b.GhostID).GhostClient() + c, err := b.Net.ContainerByID(b.GhostID).GhostClient() require.NoError(b.T(), err, "could not get ghost client") - return client + return c } // AccessClient returns a client to interact with the access node api on testnet. func (b *BaseSuite) AccessClient() *testnet.Client { - client, err := b.Net.ContainerByName(testnet.PrimaryAN).TestnetClient() + c, err := b.Net.ContainerByName(testnet.PrimaryAN).TestnetClient() require.NoError(b.T(), err, "could not get access client") - return client + return c } -// SetupSuite sets up node configs to run a bare minimum Flow network to function correctly. +// SetupSuite initializes the BaseSuite, setting up a bare-minimum Flow network. +// It configures nodes with roles such as access, consensus, verification, execution, +// and collection. It also sets up a Ghost node for observing messages exchanged on the network. func (b *BaseSuite) SetupSuite() { b.Log = unittest.LoggerForTest(b.Suite.T(), zerolog.InfoLevel) - b.BlockRateFlag = "--block-rate-delay=1ms" - // setup access nodes + // setup single access node b.NodeConfigs = append(b.NodeConfigs, testnet.NewNodeConfig(flow.RoleAccess, testnet.WithLogLevel(zerolog.FatalLevel)), - testnet.NewNodeConfig(flow.RoleAccess, testnet.WithLogLevel(zerolog.FatalLevel)), ) // setup consensus nodes - for _, nodeID := range unittest.IdentifierListFixture(4) { + for _, nodeID := range unittest.IdentifierListFixture(3) { nodeConfig := testnet.NewNodeConfig(flow.RoleConsensus, testnet.WithID(nodeID), testnet.WithLogLevel(zerolog.FatalLevel), testnet.WithAdditionalFlag("--required-verification-seal-approvals=1"), testnet.WithAdditionalFlag("--required-construction-seal-approvals=1"), - testnet.WithAdditionalFlag(b.BlockRateFlag), + // `cruise-ctl-fallback-proposal-duration` is set to 250ms instead to of 1ms + // to purposely slow down the block rate. This is needed since the crypto module + // update providing faster BLS operations. + // TODO: fix the access integration test logic to function without slowing down + // the block rate + testnet.WithAdditionalFlag("--cruise-ctl-fallback-proposal-duration=250ms"), ) b.NodeConfigs = append(b.NodeConfigs, nodeConfig) } - // setup verification nodes + // setup single verification node b.NodeConfigs = append(b.NodeConfigs, testnet.NewNodeConfig(flow.RoleVerification, testnet.WithLogLevel(zerolog.FatalLevel)), - testnet.NewNodeConfig(flow.RoleVerification, testnet.WithLogLevel(zerolog.FatalLevel)), ) // setup execution nodes @@ -82,8 +100,8 @@ func (b *BaseSuite) SetupSuite() { // setup collection nodes b.NodeConfigs = append(b.NodeConfigs, - testnet.NewNodeConfig(flow.RoleCollection, testnet.WithLogLevel(zerolog.FatalLevel), testnet.WithAdditionalFlag(b.BlockRateFlag)), - testnet.NewNodeConfig(flow.RoleCollection, testnet.WithLogLevel(zerolog.FatalLevel), testnet.WithAdditionalFlag(b.BlockRateFlag)), + testnet.NewNodeConfig(flow.RoleCollection, testnet.WithLogLevel(zerolog.FatalLevel), testnet.WithAdditionalFlag("--hotstuff-proposal-duration=1ms")), + testnet.NewNodeConfig(flow.RoleCollection, testnet.WithLogLevel(zerolog.FatalLevel), testnet.WithAdditionalFlag("--hotstuff-proposal-duration=1ms")), ) // Ghost Node @@ -98,14 +116,21 @@ func (b *BaseSuite) SetupSuite() { b.NodeConfigs = append(b.NodeConfigs, ghostConfig) } -// TearDownSuite tears down the test network of Flow as well as the BFT testing orchestrator network. +// TearDownSuite cleans up the resources, stopping both the Flow network and the +// orchestrator network if they have been initialized. func (b *BaseSuite) TearDownSuite() { b.Net.Remove() b.Cancel() - unittest.RequireCloseBefore(b.T(), b.OrchestratorNetwork.Done(), 1*time.Second, "could not stop orchestrator network on time") + // check if orchestrator network is set on the base suite, not all tests use the corrupted network. + if b.OrchestratorNetwork != nil { + unittest.RequireCloseBefore(b.T(), b.OrchestratorNetwork.Done(), 1*time.Second, "could not stop orchestrator network on time") + } } -// StartCorruptedNetwork starts the corrupted network with the configured node configs, this func should be used after test suite is setup. +// StartCorruptedNetwork initializes and starts a corrupted Flow network. +// This should be called after the test suite is set up. The function accepts +// configurations like the name of the network, the number of views in the staking auction, +// the number of views in an epoch, and a function to attack the orchestrator. func (b *BaseSuite) StartCorruptedNetwork(name string, viewsInStakingAuction, viewsInEpoch uint64, attackOrchestrator func() insecure.AttackOrchestrator) { // generates, initializes, and starts the Flow network netConfig := testnet.NewNetworkConfig( diff --git a/integration/tests/bft/framework/README.md b/integration/tests/bft/framework/README.md new file mode 100644 index 00000000000..55423be21c0 --- /dev/null +++ b/integration/tests/bft/framework/README.md @@ -0,0 +1,10 @@ +# Framework BFT Tests +The framework BFT tests are designed to assess the health of the BFT testing framework. + +## Passthrough Sealing and Verification Test +The `PassThroughTestSuite` test within the `framework` package includes a specific test method `TestSealingAndVerificationPassThrough`. +This test evaluates the health of the BFT testing framework for Byzantine Fault Tolerance (BFT) testing. +1. **Simulates a Scenario**: It sets up a scenario with two corrupt execution nodes and one corrupt verification node, controlled by a dummy orchestrator that lets all incoming events pass through. +2. **Deploys Transaction and Verifies Chunks**: Deploys a transaction leading to an execution result with multiple chunks, assigns them to a verification node, and verifies the generation of result approvals for all chunks. +3. **Sealing and Verification**: Enables sealing based on result approvals and verifies the sealing of a block with a specific multi-chunk execution result. +4. **Evaluates Events**: The test also assesses whether critical sealing-and-verification-related events from corrupt nodes are passed through the orchestrator, by checking both egress and ingress events. diff --git a/integration/tests/bft/passthrough/dummy_orchestrator.go b/integration/tests/bft/framework/dummy_orchestrator.go similarity index 89% rename from integration/tests/bft/passthrough/dummy_orchestrator.go rename to integration/tests/bft/framework/dummy_orchestrator.go index 36143988c7d..421434aa451 100644 --- a/integration/tests/bft/passthrough/dummy_orchestrator.go +++ b/integration/tests/bft/framework/dummy_orchestrator.go @@ -1,4 +1,4 @@ -package passthrough +package framework import ( "errors" @@ -60,14 +60,24 @@ func NewDummyOrchestrator(t *testing.T, Logger zerolog.Logger) *orchestrator { // trackEgressEvent tracks egress events by event type, this func is used as a callback in the BaseOrchestrator OnEgressEvent callback list. func (o *orchestrator) trackEgressEvents(event *insecure.EgressEvent) error { switch e := event.FlowProtocolEvent.(type) { - case *flow.ExecutionReceipt: - o.egressEventTracker[typeExecutionReceipt] = append(o.egressEventTracker[typeExecutionReceipt], e.ID()) + case *messages.ExecutionReceipt: + internalEvent, err := e.ToInternal() + if err != nil { + o.Logger.Err(err).Msgf("failed to convert event %T to internal", e) + return nil + } + o.egressEventTracker[typeExecutionReceipt] = append(o.egressEventTracker[typeExecutionReceipt], internalEvent.(*flow.ExecutionReceipt).ID()) case *messages.ChunkDataRequest: o.egressEventTracker[typeChunkDataRequest] = append(o.egressEventTracker[typeChunkDataRequest], e.ChunkID) case *messages.ChunkDataResponse: o.egressEventTracker[typeChunkDataResponse] = append(o.egressEventTracker[typeChunkDataResponse], e.ChunkDataPack.ChunkID) - case *flow.ResultApproval: - o.egressEventTracker[typeResultApproval] = append(o.egressEventTracker[typeResultApproval], e.ID()) + case *messages.ResultApproval: + internalResultApproval, err := e.ToInternal() + if err != nil { + o.Logger.Err(err).Msgf("failed to convert event %T to internal", e) + return nil + } + o.egressEventTracker[typeResultApproval] = append(o.egressEventTracker[typeResultApproval], internalResultApproval.(*flow.ResultApproval).ID()) } return nil } diff --git a/integration/tests/bft/passthrough/passthrough_test.go b/integration/tests/bft/framework/passthrough_test.go similarity index 95% rename from integration/tests/bft/passthrough/passthrough_test.go rename to integration/tests/bft/framework/passthrough_test.go index eaf04b3923c..255f5a4069b 100644 --- a/integration/tests/bft/passthrough/passthrough_test.go +++ b/integration/tests/bft/framework/passthrough_test.go @@ -1,8 +1,10 @@ -package passthrough +package framework import ( "testing" + "github.com/onflow/flow-go/utils/unittest" + "github.com/stretchr/testify/suite" "github.com/onflow/flow-go/integration/tests/common" @@ -27,6 +29,7 @@ func TestPassThrough(t *testing.T) { // affected by the emitted result approvals. // Finally, it evaluates whether critical sealing-and-verification-related events from corrupted nodes are passed through the orchestrator. func (p *PassThroughTestSuite) TestSealingAndVerificationPassThrough() { + unittest.SkipUnless(p.T(), unittest.TEST_TODO, "flaky") receipts, approvals := common.SealingAndVerificationHappyPathTest( p.T(), p.BlockState, diff --git a/integration/tests/bft/framework/suite.go b/integration/tests/bft/framework/suite.go new file mode 100644 index 00000000000..330099f4d17 --- /dev/null +++ b/integration/tests/bft/framework/suite.go @@ -0,0 +1,69 @@ +package framework + +import ( + "github.com/rs/zerolog" + + "github.com/onflow/flow-go/insecure" + "github.com/onflow/flow-go/integration/testnet" + "github.com/onflow/flow-go/integration/tests/bft" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/utils/unittest" +) + +// Suite represents a test suite evaluating the integration of the testnet against +// happy path of Corrupted Conduit Framework (CCF) for BFT testing. +type Suite struct { + bft.BaseSuite + exe1ID flow.Identifier // corrupted execution node 1 + exe2ID flow.Identifier // corrupted execution node 2 + verID flow.Identifier // corrupted verification node + Orchestrator *orchestrator +} + +// SetupSuite runs a bare minimum Flow network to function correctly with the following corrupted nodes: +// - two corrupted execution node +// - One corrupted verification node +func (s *Suite) SetupSuite() { + s.BaseSuite.SetupSuite() + + // filter out base suite execution and verification nodes + s.NodeConfigs = s.NodeConfigs.Filter(func(n testnet.NodeConfig) bool { + if n.Ghost { + return true + } + return n.Role != flow.RoleExecution && n.Role != flow.RoleVerification + }) + + // generates one corrupted verification node + s.verID = unittest.IdentifierFixture() + verConfig := testnet.NewNodeConfig(flow.RoleVerification, + testnet.WithID(s.verID), + testnet.WithLogLevel(zerolog.ErrorLevel), + testnet.AsCorrupted()) + s.NodeConfigs = append(s.NodeConfigs, verConfig) + + // generates two corrupted execution nodes + s.exe1ID = unittest.IdentifierFixture() + exe1Config := testnet.NewNodeConfig(flow.RoleExecution, + testnet.WithID(s.exe1ID), + testnet.WithLogLevel(zerolog.ErrorLevel), + testnet.AsCorrupted()) + s.NodeConfigs = append(s.NodeConfigs, exe1Config) + + s.exe2ID = unittest.IdentifierFixture() + exe2Config := testnet.NewNodeConfig(flow.RoleExecution, + testnet.WithID(s.exe2ID), + testnet.WithLogLevel(zerolog.ErrorLevel), + testnet.AsCorrupted()) + s.NodeConfigs = append(s.NodeConfigs, exe2Config) + + s.BaseSuite.StartCorruptedNetwork( + "bft_passthrough_test", + 10_000, + 100_000, + func() insecure.AttackOrchestrator { + s.Orchestrator = NewDummyOrchestrator(s.T(), s.Log) + return s.Orchestrator + }, + ) +} diff --git a/integration/tests/bft/gossipsub/README.md b/integration/tests/bft/gossipsub/README.md new file mode 100644 index 00000000000..29de8880900 --- /dev/null +++ b/integration/tests/bft/gossipsub/README.md @@ -0,0 +1,32 @@ +# GossipSub BFT Tests +GossipSub BFT tests are designed to test the behavior of the GossipSub protocol in a network environment with Byzantine nodes. + +## Topic Validator Test +The `TopicValidatorTestSuite` in the `topicvalidator` package is specifically designed to test the functionality of the +libp2p topic validator within a network scenario. +This suite includes an end-to-end test to verify the topic validator's behavior in different situations, +focusing on both unauthorized and authorized message handling. +The method `TestTopicValidatorE2E` is a comprehensive test that mimics an environment with a corrupt byzantine attacker node +attempting to send unauthorized messages to a victim node. +These messages should be dropped by the topic validator, as they fail the message authorization validation. +The test simultaneously sends authorized messages to the victim node, ensuring that they are processed correctly, +demonstrating the validator's correct operation. +The test confirms two main aspects: +1. Unauthorized messages must be dropped, and the victim node should not receive any of them. +2. Authorized messages should be correctly delivered and processed by the victim node. + +## Signature Requirement Test +The `TestGossipSubSignatureRequirement` test sets up a test environment consisting of three corrupt nodes: two attackers and one victim. +One (malicious) attacker is configured without message signing, intending to send unsigned messages that should be rejected by the victim. +The other (benign) attacker sends valid signed messages that should be received by the victim. +The test is broken down into the following main parts: +1. **Unauthorized Messages Testing**: The victim node should not receive any messages sent without correct signatures from the unauthorized attacker. The test checks for zero unauthorized messages received by the victim. +2. **Authorized Messages Testing**: Messages sent by the authorized attacker, with the correct signature, must pass the libp2p signature verification process and be delivered to the victim. The test checks for all authorized messages received by the victim within a certain time frame. + +## RPC Inspector False Positive Test +The `GossipsubRPCInspectorFalsePositiveNotificationsTestSuite` test within the `rpc_inspector` package test suite aims to ensure that the underlying libp2p libraries related to GossipSub RPC control message inspection do not trigger false positives during their validation processes. +Here's a breakdown of the `TestGossipsubRPCInspectorFalsePositiveNotifications` method: +1. **Configuration and Context Setup**: A specific duration for loading and intervals is defined, and a context with a timeout is created for the test scenario. +2. **Simulating Network Activity**: The method triggers a "loader loop" with a specific number of test accounts and intervals, intending to create artificial network activity. It does this by submitting transactions to create Flow accounts, waiting for them to be sealed. +3. **State Commitments**: The method waits for 20 state commitment changes, ensuring that the simulated network load behaves as expected. +4. **Verification of Control Messages**: After simulating network activity, the method checks to ensure that no node in the network has disseminated an invalid control message notification. This is done by collecting metrics from the network containers and verifying that no false notifications are detected. diff --git a/integration/tests/bft/gossipsub/rpc_inspector/false_positive_test.go b/integration/tests/bft/gossipsub/rpc_inspector/false_positive_test.go new file mode 100644 index 00000000000..c948b9f99e1 --- /dev/null +++ b/integration/tests/bft/gossipsub/rpc_inspector/false_positive_test.go @@ -0,0 +1,41 @@ +package rpc_inspector + +import ( + "context" + "testing" + "time" + + "github.com/stretchr/testify/suite" +) + +const numOfTestAccounts = 1000 + +type GossipsubRPCInspectorFalsePositiveNotificationsTestSuite struct { + Suite +} + +func TestGossipSubRpcInspectorFalsePositiveNotifications(t *testing.T) { + suite.Run(t, new(GossipsubRPCInspectorFalsePositiveNotificationsTestSuite)) +} + +// TestGossipsubRPCInspectorFalsePositiveNotifications this test ensures that any underlying changes or updates to any of the underlying libp2p libraries +// does not result in any of the gossip sub rpc control message inspector validation rules being broken. Anytime a validation rule is broken an invalid +// control message notification is disseminated. Using this fact, this tests sets up a full flow network and submits some transactions to generate network +// activity. After some time we ensure that no invalid control message notifications are disseminated. +func (s *GossipsubRPCInspectorFalsePositiveNotificationsTestSuite) TestGossipsubRPCInspectorFalsePositiveNotifications() { + loaderLoopDuration := 5 * time.Second + loaderLoopInterval := 500 * time.Millisecond + ctx, cancel := context.WithTimeout(s.Ctx, loaderLoopDuration) + defer cancel() + // the network has started submit some transactions to create flow accounts. + // We wait for each of these transactions to be sealed ensuring we generate + // some artificial network activity. + go s.loaderLoop(ctx, numOfTestAccounts, loaderLoopInterval) + // wait 20 state commitment changes, this ensures we simulated load on network as expected. + s.waitForStateCommitments(s.Ctx, 3, time.Minute, 500*time.Millisecond) + + // ensure no node in the network has disseminated an invalid control message notification + metricName := s.inspectorNotificationQSizeMetricName() + metricsByContainer := s.Net.GetMetricFromContainers(s.T(), metricName, s.metricsUrls()) + s.ensureNoNotificationsDisseminated(metricsByContainer) +} diff --git a/integration/tests/bft/gossipsub/rpc_inspector/suite.go b/integration/tests/bft/gossipsub/rpc_inspector/suite.go new file mode 100644 index 00000000000..815d5416b96 --- /dev/null +++ b/integration/tests/bft/gossipsub/rpc_inspector/suite.go @@ -0,0 +1,125 @@ +package rpc_inspector + +import ( + "context" + "fmt" + "time" + + io_prometheus_client "github.com/prometheus/client_model/go" + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/integration/testnet" + "github.com/onflow/flow-go/integration/tests/bft" + "github.com/onflow/flow-go/integration/utils" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module/metrics" +) + +// Suite represents a test suite that sets up a full flow network. +type Suite struct { + bft.BaseSuite + client *testnet.Client +} + +// SetupSuite generates, initializes, and starts the Flow network. +func (s *Suite) SetupSuite() { + s.BaseSuite.SetupSuite() + + // enable metrics server for all nodes + for i := range s.NodeConfigs { + s.NodeConfigs[i].EnableMetricsServer = true + } + + name := "bft_control_message_validation_false_positive_test" + // short epoch lens ensure faster state commitments + stakingAuctionLen := uint64(10) + dkgPhaseLen := uint64(50) + epochLen := uint64(300) + netConfig := testnet.NewNetworkConfigWithEpochConfig(name, s.NodeConfigs, stakingAuctionLen, dkgPhaseLen, epochLen) + s.Net = testnet.PrepareFlowNetwork(s.T(), netConfig, flow.BftTestnet) + + s.Ctx, s.Cancel = context.WithCancel(context.Background()) + s.Net.Start(s.Ctx) + + // starts tracking blocks by the ghost node + s.Track(s.T(), s.Ctx, s.Ghost()) + + client, err := s.Net.ContainerByName(testnet.PrimaryAN).TestnetClient() + require.NoError(s.T(), err) + s.client = client +} + +// submitSmokeTestTransaction will submit a create account transaction to smoke test network +// This ensures a single transaction can be sealed by the network. +func (s *Suite) submitSmokeTestTransaction(ctx context.Context) { + _, err := utils.CreateFlowAccount(ctx, s.client) + require.NoError(s.T(), err) +} + +// ensureNoNotificationsDisseminated ensures the metrics result for the rpc inspector notification queue cache size metric for each container is 0 +// indicating no notifications have been disseminated. +func (s *Suite) ensureNoNotificationsDisseminated(metricEndpoints map[string][]*io_prometheus_client.Metric) { + for containerName, metric := range metricEndpoints { + val := metric[0].GetGauge().GetValue() + require.Zerof(s.T(), val, fmt.Sprintf("expected inspector notification queue cache size for container %s to be 0 got %v", containerName, val)) + } +} + +// inspectorNotifQSizeMetricName returns the metric name for the rpc inspector notification queue cache size. +func (s *Suite) inspectorNotificationQSizeMetricName() string { + return fmt.Sprintf("network_hero_cache_%s_successful_write_count_total", metrics.ResourceNetworkingRpcInspectorNotificationQueue) +} + +// metricsUrls returns a list of metrics urls for each node configured on the test suite. +func (s *Suite) metricsUrls() map[string]string { + urls := make(map[string]string, 0) + for containerName, port := range s.Net.PortsByContainerName(testnet.MetricsPort, false) { + urls[containerName] = fmt.Sprintf("http://0.0.0.0:%s/metrics", port) + } + return urls +} + +// loaderLoop submits load to the network in the form of account creation on the provided interval simulating some network traffic. +func (s *Suite) loaderLoop(ctx context.Context, numOfTestAccounts int, interval time.Duration) { + ticker := time.NewTicker(interval) + defer ticker.Stop() + go func() { + for { + select { + case <-ctx.Done(): + return + case <-ticker.C: + for i := 0; i < numOfTestAccounts; i++ { + s.submitSmokeTestTransaction(s.Ctx) + } + } + } + }() +} + +// waitStateCommitments waits for n number of state commitment changes. +func (s *Suite) waitForStateCommitments(ctx context.Context, n int, waitFor, tick time.Duration) { + prevStateComm := s.getCurrentFinalExecutionStateCommitment(ctx) + numOfStateCommChanges := 0 + require.Eventually(s.T(), func() bool { + currStateComm := s.getCurrentFinalExecutionStateCommitment(ctx) + if prevStateComm != currStateComm { + numOfStateCommChanges++ + prevStateComm = currStateComm + } + return numOfStateCommChanges >= n + }, waitFor, tick) +} + +// getCurrentFinalizedHeight returns the current finalized height. +func (s *Suite) getCurrentFinalExecutionStateCommitment(ctx context.Context) string { + snapshot, err := s.client.GetLatestProtocolSnapshot(ctx) + require.NoError(s.T(), err) + executionResult, _, err := snapshot.SealedResult() + require.NoError(s.T(), err) + sc, err := executionResult.FinalStateCommitment() + require.NoError(s.T(), err) + bz, err := sc.MarshalJSON() + require.NoError(s.T(), err) + return string(bz) +} diff --git a/integration/tests/bft/gossipsub/signature/requirement/orchestrator.go b/integration/tests/bft/gossipsub/signature/requirement/orchestrator.go index 6a7faab3dae..4a0c5e268ce 100644 --- a/integration/tests/bft/gossipsub/signature/requirement/orchestrator.go +++ b/integration/tests/bft/gossipsub/signature/requirement/orchestrator.go @@ -21,10 +21,16 @@ const ( // The numOfAuthorizedEvents allows us to wait for a certain number of authorized messages to be received, this should // give the network enough time to process the unauthorized messages. This ensures us that the unauthorized messages // were indeed dropped and not unprocessed. - numOfAuthorizedEvents = 50 + // This threshold must be set to a low value to make the test conclude faster + // by waiting for fewer events, which is beneficial when running the test + // on an asynchronous network where event delivery can be unpredictable. + numOfAuthorizedEvents = 5 // numOfUnauthorizedEvents the number of unauthorized events to send by the test orchestrator. - numOfUnauthorizedEvents = 10 + // This threshold must be set to a low value to make the test conclude faster + // by waiting for fewer events, which is beneficial when running the test + // on an asynchronous network where event delivery can be unpredictable. + numOfUnauthorizedEvents = 5 ) // Orchestrator represents a simple `insecure.AttackOrchestrator` that tracks any unsigned messages received by victim nodes as well as the typically expected messages. @@ -74,7 +80,6 @@ func (s *Orchestrator) trackIngressEvents(event *insecure.IngressEvent) error { s.unauthorizedEventsReceived.Inc() s.Logger.Warn().Str("event_id", event.FlowProtocolEventID.String()).Msg("unauthorized ingress event received") } - // track all authorized events sent during test if expectedEvent, ok := s.authorizedEvents[event.FlowProtocolEventID]; ok { // ensure event received intact no changes have been made to the underlying message diff --git a/integration/tests/bft/gossipsub/signature/requirement/signature_requirement_test.go b/integration/tests/bft/gossipsub/signature/requirement/signature_requirement_test.go index ee5a417446c..0ef58021b06 100644 --- a/integration/tests/bft/gossipsub/signature/requirement/signature_requirement_test.go +++ b/integration/tests/bft/gossipsub/signature/requirement/signature_requirement_test.go @@ -35,5 +35,7 @@ func (s *GossipSubSignatureRequirementTestSuite) TestGossipSubSignatureRequireme require.Equal(s.T(), int64(0), s.Orchestrator.unauthorizedEventsReceived.Load(), fmt.Sprintf("expected to not receive any unauthorized messages instead got: %d", s.Orchestrator.unauthorizedEventsReceived.Load())) // messages with correct message signatures are expected to always pass libp2p signature verification and be delivered to the victim EN. - require.Equal(s.T(), int64(numOfAuthorizedEvents), s.Orchestrator.authorizedEventsReceived.Load(), fmt.Sprintf("expected to receive %d authorized events got: %d", numOfAuthorizedEvents, s.Orchestrator.unauthorizedEventsReceived.Load())) + require.Eventually(s.T(), func() bool { + return s.Orchestrator.authorizedEventsReceived.Load() == int64(numOfAuthorizedEvents) + }, 5*time.Second, 500*time.Millisecond, fmt.Sprintf("expected to receive %d authorized events got: %d", numOfAuthorizedEvents, s.Orchestrator.unauthorizedEventsReceived.Load())) } diff --git a/integration/tests/bft/gossipsub/topicvalidator/orchestrator.go b/integration/tests/bft/gossipsub/topicvalidator/orchestrator.go new file mode 100644 index 00000000000..916ab92e6fb --- /dev/null +++ b/integration/tests/bft/gossipsub/topicvalidator/orchestrator.go @@ -0,0 +1,233 @@ +package topicvalidator + +import ( + "sync" + "testing" + + "github.com/rs/zerolog" + "github.com/stretchr/testify/require" + "golang.org/x/exp/rand" + + "github.com/onflow/flow-go/insecure" + "github.com/onflow/flow-go/integration/tests/bft" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/model/messages" + "github.com/onflow/flow-go/network/channels" + "github.com/onflow/flow-go/utils/unittest" +) + +const ( + // numOfAuthorizedEvents the number of authorized events that will be created when the test orchestrator is initialized. + // The numOfAuthorizedEvents allows us to wait for a certain number of authorized messages to be received, this should + // give the network enough time to process the unauthorized messages. This ensures us that the unauthorized messages + // were indeed dropped and not unprocessed. + // This threshold must be set to a low value to make the test conclude faster + // by waiting for fewer events, which is beneficial when running the test + // on an asynchronous network where event delivery can be unpredictable. + numOfAuthorizedEvents = 5 + + // numOfUnauthorizedEvents the number of unauthorized events per type to send by the test orchestrator. + // This threshold must be set to a low value to make the test conclude faster + // by waiting for fewer events, which is beneficial when running the test + // on an asynchronous network where event delivery can be unpredictable. + numOfUnauthorizedEvents = 5 +) + +// Orchestrator represents an insecure.AttackOrchestrator track incoming unauthorized messages and authorized messages received by victim nodes. +type Orchestrator struct { + *bft.BaseOrchestrator + unauthorizedEventsReceived []flow.Identifier + authorizedEventsReceived []flow.Identifier + unauthorizedEvents map[flow.Identifier]*insecure.EgressEvent + authorizedEvents map[flow.Identifier]*insecure.EgressEvent + authorizedEventReceivedWg sync.WaitGroup + attackerAN flow.Identifier + attackerEN flow.Identifier + victimEN flow.Identifier + victimVN flow.Identifier +} + +var _ insecure.AttackOrchestrator = &Orchestrator{} + +func NewOrchestrator(t *testing.T, logger zerolog.Logger, attackerAN, attackerEN, victimEN, victimVN flow.Identifier) *Orchestrator { + orchestrator := &Orchestrator{ + BaseOrchestrator: &bft.BaseOrchestrator{ + T: t, + Logger: logger.With().Str("component", "bft-test-orchestrator").Logger(), + }, + unauthorizedEventsReceived: make([]flow.Identifier, 0), + authorizedEventsReceived: make([]flow.Identifier, 0), + unauthorizedEvents: make(map[flow.Identifier]*insecure.EgressEvent), + authorizedEvents: make(map[flow.Identifier]*insecure.EgressEvent), + authorizedEventReceivedWg: sync.WaitGroup{}, + attackerAN: attackerAN, + attackerEN: attackerEN, + victimEN: victimEN, + victimVN: victimVN, + } + + orchestrator.initUnauthorizedEvents() + orchestrator.initAuthorizedEvents() + orchestrator.OnIngressEvent = append(orchestrator.OnIngressEvent, orchestrator.trackIngressEvents) + return orchestrator +} + +// trackIngressEvents callback that will track any unauthorized messages that are expected to be blocked at the topic validator. +// It also tracks all the authorized messages that are expected to be delivered to the node. +func (o *Orchestrator) trackIngressEvents(event *insecure.IngressEvent) error { + // Track any unauthorized events that are received by corrupted nodes. + // These events are unauthorized combinations of messages & channels and should be + // dropped at the topic validator level. + if _, ok := o.unauthorizedEvents[event.FlowProtocolEventID]; ok { + o.unauthorizedEventsReceived = append(o.unauthorizedEventsReceived, event.FlowProtocolEventID) + o.Logger.Warn().Str("event_id", event.FlowProtocolEventID.String()).Msg("unauthorized ingress event received") + } + + // track all authorized events sent during test + if _, ok := o.authorizedEvents[event.FlowProtocolEventID]; ok { + o.authorizedEventsReceived = append(o.authorizedEventsReceived, event.FlowProtocolEventID) + o.authorizedEventReceivedWg.Done() + } + return nil +} + +// sendUnauthorizedMsgs publishes a few combinations of unauthorized messages from the corrupted AN to the corrupted EN. +func (o *Orchestrator) sendUnauthorizedMsgs(t *testing.T) { + for _, event := range o.unauthorizedEvents { + err := o.OrchestratorNetwork.SendEgress(event) + require.NoError(t, err) + } +} + +// sendAuthorizedMsgs sends a number of authorized messages. +func (o *Orchestrator) sendAuthorizedMsgs(t *testing.T) { + for _, event := range o.authorizedEvents { + err := o.OrchestratorNetwork.SendEgress(event) + require.NoError(t, err) + } +} + +// initUnauthorizedEvents returns combinations of unauthorized messages and channels. +func (o *Orchestrator) initUnauthorizedEvents() { + // message sent by unauthorized sender, AN is not authorized to publish block proposals + o.initUnauthorizedMsgByRoleEvents(numOfUnauthorizedEvents) + + // message sent on unauthorized channel, AN is not authorized send sync request on consensus committee channel + o.initUnauthorizedMsgOnChannelEvents(numOfUnauthorizedEvents) + + // message is not authorized to be sent via insecure.Protocol_UNICAST + // unicast stream handler is expected to drop this message + o.initUnauthorizedUnicastOnChannelEvents(numOfUnauthorizedEvents) + + // message is not authorized to be sent via insecure.Protocol_PUBLISH + o.initUnauthorizedPublishOnChannelEvents(numOfUnauthorizedEvents) +} + +// initAuthorizedEvents returns combinations of unauthorized messages and channels. +func (o *Orchestrator) initAuthorizedEvents() { + channel := channels.RequestChunks + for i := uint64(0); i < numOfAuthorizedEvents; i++ { + chunkDataReq := &messages.ChunkDataRequest{ + ChunkID: unittest.IdentifierFixture(), + Nonce: rand.Uint64(), + } + eventID := unittest.GetFlowProtocolEventID(o.T, channel, chunkDataReq) + event := &insecure.EgressEvent{ + CorruptOriginId: o.victimVN, + Channel: channel, + Protocol: insecure.Protocol_PUBLISH, + TargetNum: 0, + TargetIds: flow.IdentifierList{o.attackerEN}, + FlowProtocolEvent: chunkDataReq, + FlowProtocolEventID: eventID, + } + o.authorizedEvents[eventID] = event + o.authorizedEventReceivedWg.Add(1) + } +} + +// initUnauthorizedMsgByRoleEvents sets n number of events where the sender is unauthorized to +// send the FlowProtocolEvent. In this case AN's are not authorized to send block proposals. +func (o *Orchestrator) initUnauthorizedMsgByRoleEvents(n int) { + channel := channels.SyncCommittee + for i := 0; i < n; i++ { + unauthorizedProposal := (*messages.Proposal)(unittest.ProposalFixture()) + eventID := unittest.GetFlowProtocolEventID(o.T, channel, unauthorizedProposal) + unauthorizedMsgByRole := &insecure.EgressEvent{ + CorruptOriginId: o.attackerAN, + Channel: channel, + Protocol: insecure.Protocol_PUBLISH, + TargetNum: 0, + TargetIds: flow.IdentifierList{o.victimEN}, + FlowProtocolEvent: unauthorizedProposal, + FlowProtocolEventID: eventID, + } + o.unauthorizedEvents[eventID] = unauthorizedMsgByRole + } +} + +// initUnauthorizedMsgOnChannelEvents sets n number of events where the message is not +// authorized to be sent on the event channel. +func (o *Orchestrator) initUnauthorizedMsgOnChannelEvents(n int) { + channel := channels.PushReceipts + for i := 0; i < n; i++ { + syncReq := &messages.SyncRequest{ + Nonce: rand.Uint64(), + Height: rand.Uint64(), + } + eventID := unittest.GetFlowProtocolEventID(o.T, channel, syncReq) + unauthorizedMsgOnChannel := &insecure.EgressEvent{ + CorruptOriginId: o.attackerAN, + Channel: channel, + Protocol: insecure.Protocol_PUBLISH, + TargetNum: 0, + TargetIds: flow.IdentifierList{o.victimEN}, + FlowProtocolEvent: syncReq, + FlowProtocolEventID: eventID, + } + o.unauthorizedEvents[eventID] = unauthorizedMsgOnChannel + } +} + +// initUnauthorizedUnicastOnChannelEvents sets n number of events where the message is not +// authorized to be sent via insecure.Protocol_UNICAST on the event channel. +func (o *Orchestrator) initUnauthorizedUnicastOnChannelEvents(n int) { + channel := channels.SyncCommittee + for i := 0; i < n; i++ { + syncReq := &messages.SyncRequest{ + Nonce: rand.Uint64(), + Height: rand.Uint64(), + } + eventID := unittest.GetFlowProtocolEventID(o.T, channel, syncReq) + unauthorizedUnicastOnChannel := &insecure.EgressEvent{ + CorruptOriginId: o.attackerAN, + Channel: channel, + Protocol: insecure.Protocol_UNICAST, + TargetNum: 0, + TargetIds: flow.IdentifierList{o.victimEN}, + FlowProtocolEvent: syncReq, + FlowProtocolEventID: eventID, + } + o.unauthorizedEvents[eventID] = unauthorizedUnicastOnChannel + } +} + +// initUnauthorizedPublishOnChannelEvents sets n number of events where the message is not +// authorized to be sent via insecure.Protocol_PUBLISH on the event channel. +func (o *Orchestrator) initUnauthorizedPublishOnChannelEvents(n int) { + channel := channels.ProvideChunks + for i := 0; i < n; i++ { + chunkDataResponse := unittest.ChunkDataResponseMsgFixture(unittest.IdentifierFixture()) + eventID := unittest.GetFlowProtocolEventID(o.T, channel, chunkDataResponse) + unauthorizedPublishOnChannel := &insecure.EgressEvent{ + CorruptOriginId: o.attackerEN, + Channel: channel, + Protocol: insecure.Protocol_PUBLISH, + TargetNum: 0, + TargetIds: flow.IdentifierList{o.victimVN}, + FlowProtocolEvent: chunkDataResponse, + FlowProtocolEventID: eventID, + } + o.unauthorizedEvents[eventID] = unauthorizedPublishOnChannel + } +} diff --git a/integration/tests/bft/gossipsub/topicvalidator/suite.go b/integration/tests/bft/gossipsub/topicvalidator/suite.go new file mode 100644 index 00000000000..b48d4c57bc1 --- /dev/null +++ b/integration/tests/bft/gossipsub/topicvalidator/suite.go @@ -0,0 +1,85 @@ +package topicvalidator + +import ( + "github.com/rs/zerolog" + + "github.com/onflow/flow-go/insecure" + "github.com/onflow/flow-go/integration/testnet" + "github.com/onflow/flow-go/integration/tests/bft" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/utils/unittest" +) + +// Suite represents a test suite evaluating the correctness of different p2p topic validator +// validation conditions. +type Suite struct { + bft.BaseSuite + attackerANID flow.Identifier // corrupt attacker AN id + attackerENID flow.Identifier // corrupt attacker EN id + victimENID flow.Identifier // corrupt victim EN id + victimVNID flow.Identifier // corrupt victim VN id + Orchestrator *Orchestrator +} + +// SetupSuite runs a bare minimum Flow network to function correctly along with 2 attacker nodes +// and 2 victim nodes. +// - Corrupt AN that will serve as an attacker and send unauthorized messages to a victim EN. +// - Corrupt EN that will serve as an attacker and send unauthorized messages to a victim VN. +// - Corrupt EN with the topic validator enabled that will serve as a victim. +// - Corrupt VN with the topic validator enabled that will serve as a victim. +func (s *Suite) SetupSuite() { + s.BaseSuite.SetupSuite() + + // filter out base suite execution, verification and access nodes + s.NodeConfigs = s.NodeConfigs.Filter(func(n testnet.NodeConfig) bool { + if n.Ghost { + return true + } + return n.Role != flow.RoleExecution && n.Role != flow.RoleVerification && n.Role != flow.RoleAccess + }) + + // create corrupt access node + s.attackerANID = unittest.IdentifierFixture() + s.NodeConfigs = append(s.NodeConfigs, testnet.NewNodeConfig(flow.RoleAccess, + testnet.WithID(s.attackerANID), + testnet.WithLogLevel(zerolog.FatalLevel), + testnet.AsCorrupted())) + + // create corrupt verification node with the topic validator enabled. This is the victim + // node that will be published unauthorized messages from the attacker execution node. + s.victimVNID = unittest.IdentifierFixture() + verConfig := testnet.NewNodeConfig(flow.RoleVerification, + testnet.WithID(s.victimVNID), + testnet.WithAdditionalFlag("--topic-validator-disabled=false"), + testnet.WithLogLevel(zerolog.FatalLevel), + testnet.AsCorrupted()) + s.NodeConfigs = append(s.NodeConfigs, verConfig) + + // generates two execution nodes, 1 of them will be corrupt + s.attackerENID = unittest.IdentifierFixture() + exe1Config := testnet.NewNodeConfig(flow.RoleExecution, + testnet.WithID(s.attackerENID), + testnet.WithLogLevel(zerolog.FatalLevel), + testnet.AsCorrupted()) + s.NodeConfigs = append(s.NodeConfigs, exe1Config) + + // create corrupt execution node with the topic validator enabled. This is the victim + // node that will be published unauthorized messages from the attacker execution node. + s.victimENID = unittest.IdentifierFixture() + exe2Config := testnet.NewNodeConfig(flow.RoleExecution, + testnet.WithID(s.victimENID), + testnet.WithLogLevel(zerolog.FatalLevel), + testnet.WithAdditionalFlag("--topic-validator-disabled=false"), + testnet.AsCorrupted()) + s.NodeConfigs = append(s.NodeConfigs, exe2Config) + + s.BaseSuite.StartCorruptedNetwork( + "bft_topic_validator_test", + 10_000, + 100_000, + func() insecure.AttackOrchestrator { + s.Orchestrator = NewOrchestrator(s.T(), s.Log, s.attackerANID, s.attackerENID, s.victimENID, s.victimVNID) + return s.Orchestrator + }, + ) +} diff --git a/integration/tests/bft/gossipsub/topicvalidator/topic_validator_test.go b/integration/tests/bft/gossipsub/topicvalidator/topic_validator_test.go new file mode 100644 index 00000000000..aff4fc6b6a0 --- /dev/null +++ b/integration/tests/bft/gossipsub/topicvalidator/topic_validator_test.go @@ -0,0 +1,50 @@ +package topicvalidator + +import ( + "fmt" + "testing" + "time" + + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" + + "github.com/onflow/flow-go/utils/unittest" +) + +type TopicValidatorTestSuite struct { + Suite +} + +func TestTopicValidator(t *testing.T) { + suite.Run(t, new(TopicValidatorTestSuite)) +} + +// TestTopicValidatorE2E ensures that the libp2p topic validator is working as expected. +// This test will attempt to send multiple combinations of unauthorized messages + channel from +// a corrupted byzantine attacker node. The victim node should not receive any of these messages as they should +// be dropped due to failing message authorization validation at the topic validator. This test will also send +// a number of authorized messages that will be delivered and processed by the victim node, ensuring that the topic +// validator behaves as expected in the happy path. +func (s *TopicValidatorTestSuite) TestTopicValidatorE2E() { + s.Orchestrator.sendUnauthorizedMsgs(s.T()) + s.Orchestrator.sendAuthorizedMsgs(s.T()) + unittest.RequireReturnsBefore( + s.T(), + s.Orchestrator.authorizedEventReceivedWg.Wait, + 5*time.Second, + "could not send authorized messages on time") + + // Victim nodes are configured with the topic validator enabled, therefore they should not have + // received any of the unauthorized messages. + require.Equal( + s.T(), + 0, + len(s.Orchestrator.unauthorizedEventsReceived), + fmt.Sprintf("expected to not receive any unauthorized messages instead got: %d", len(s.Orchestrator.unauthorizedEventsReceived))) + + // Victim nodes should receive all the authorized events sent. + require.Eventually(s.T(), func() bool { + return len(s.Orchestrator.authorizedEventsReceived) == numOfAuthorizedEvents + }, 5*time.Second, 500*time.Millisecond, + fmt.Sprintf("expected to receive %d authorized events got: %d", numOfAuthorizedEvents, len(s.Orchestrator.unauthorizedEventsReceived))) +} diff --git a/integration/tests/bft/passthrough/suite.go b/integration/tests/bft/passthrough/suite.go deleted file mode 100644 index d735489de8b..00000000000 --- a/integration/tests/bft/passthrough/suite.go +++ /dev/null @@ -1,69 +0,0 @@ -package passthrough - -import ( - "github.com/rs/zerolog" - - "github.com/onflow/flow-go/insecure" - "github.com/onflow/flow-go/integration/testnet" - "github.com/onflow/flow-go/integration/tests/bft" - "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/utils/unittest" -) - -// Suite represents a test suite evaluating the integration of the testnet against -// happy path of Corrupted Conduit Framework (CCF) for BFT testing. -type Suite struct { - bft.BaseSuite - exe1ID flow.Identifier // corrupted execution node 1 - exe2ID flow.Identifier // corrupted execution node 2 - verID flow.Identifier // corrupted verification node - Orchestrator *orchestrator -} - -// SetupSuite runs a bare minimum Flow network to function correctly with the following corrupted nodes: -// - two corrupted execution node -// - One corrupted verification node -func (s *Suite) SetupSuite() { - s.BaseSuite.SetupSuite() - - // filter out base suite execution and verification nodes - s.NodeConfigs = s.NodeConfigs.Filter(func(n testnet.NodeConfig) bool { - if n.Ghost { - return true - } - return n.Role != flow.RoleExecution && n.Role != flow.RoleVerification - }) - - // generates one corrupted verification node - s.verID = unittest.IdentifierFixture() - verConfig := testnet.NewNodeConfig(flow.RoleVerification, - testnet.WithID(s.verID), - testnet.WithLogLevel(zerolog.ErrorLevel), - testnet.AsCorrupted()) - s.NodeConfigs = append(s.NodeConfigs, verConfig) - - // generates two corrupted execution nodes - s.exe1ID = unittest.IdentifierFixture() - exe1Config := testnet.NewNodeConfig(flow.RoleExecution, - testnet.WithID(s.exe1ID), - testnet.WithLogLevel(zerolog.ErrorLevel), - testnet.AsCorrupted()) - s.NodeConfigs = append(s.NodeConfigs, exe1Config) - - s.exe2ID = unittest.IdentifierFixture() - exe2Config := testnet.NewNodeConfig(flow.RoleExecution, - testnet.WithID(s.exe2ID), - testnet.WithLogLevel(zerolog.ErrorLevel), - testnet.AsCorrupted()) - s.NodeConfigs = append(s.NodeConfigs, exe2Config) - - s.BaseSuite.StartCorruptedNetwork( - "bft_passthrough_test", - 10_000, - 100_000, - func() insecure.AttackOrchestrator { - s.Orchestrator = NewDummyOrchestrator(s.T(), s.Log) - return s.Orchestrator - }, - ) -} diff --git a/integration/tests/bft/protocol/README.md b/integration/tests/bft/protocol/README.md new file mode 100644 index 00000000000..7411bf8f25a --- /dev/null +++ b/integration/tests/bft/protocol/README.md @@ -0,0 +1,22 @@ +# Protocol BFT Tests +This package contains BFT tests concerning the core Flow protocol. These tests are run as part of the integration test suite. + + +## Admin Command Disallow List Test +The `AdminCommandDisallowListTestSuite` in the `disallowlisting` package is designed to test the functionality of the disallow-list admin command within a network context. +It ensures that connections to a blocked node are immediately pruned and incoming connection requests are blocked. +The test simulates the setup of two corrupt nodes (a sender and a receiver) and examines the behavior of the network before and after the sender node is disallowed. +It includes steps to send authorized messages to verify normal behavior, implement disallow-listing, send unauthorized messages, and validate the expectation that no unauthorized messages are received. +Various timing controls are put in place to handle asynchronous processes and potential race conditions. +The entire suite assures that the disallow-listing command behaves as intended, safeguarding network integrity. + +## Wintermute Attack Test +The `WintermuteTestSuite` in the `wintermute` package is focused on validating a specific attack scenario within the network, termed the "wintermute attack." +This attack involves an Attack Orchestrator corrupting an execution result and then leveraging corrupt verification nodes to verify it. +The suite includes a constant timeout to define the attack window and a detailed test sequence. +The `TestWintermuteAttack` method carries out the attack process. +It first waits for an execution result to be corrupted and identifies the corresponding victim block. +It ensures that the corrupt execution nodes generate the correct result for the victim block and then waits for a specific number of approvals from corrupt verification nodes for each chunk of the corrupted result. +Further, the test waits for a block height equal to the victim block height to be sealed and verifies that the original victim block is correctly identified. +Additional methods and logging information assist in detailing and controlling the flow of the attack. +The entire suite is instrumental in evaluating the system's behavior under this specific attack condition and ensuring that the expected actions and responses are observed. \ No newline at end of file diff --git a/integration/tests/bft/protocol/disallowlisting/admin_command_disallowlisting_test.go b/integration/tests/bft/protocol/disallowlisting/admin_command_disallowlisting_test.go new file mode 100644 index 00000000000..145d10a2c04 --- /dev/null +++ b/integration/tests/bft/protocol/disallowlisting/admin_command_disallowlisting_test.go @@ -0,0 +1,66 @@ +package disallowlisting + +import ( + "fmt" + "testing" + "time" + + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" + + "github.com/onflow/flow-go/utils/unittest" +) + +type AdminCommandDisallowListTestSuite struct { + Suite +} + +func TestAdminCommandDisallowList(t *testing.T) { + suite.Run(t, new(AdminCommandDisallowListTestSuite)) +} + +// TestAdminCommandDisallowList ensures that the disallow-list admin command works as expected. When a node is blocked via the admin disallow-list command +// the libp2p connection to that node should be pruned immediately and the connection gater should start to block incoming connection requests. This test +// sets up 2 corrupt nodes a sender and receiver, the sender will send messages before and after being blocked by the receiver node via +// the disallow-list admin command. The receiver node is expected to receive messages like normal before disallow-listing the sender, after disallow-listing the sender +// it should not receive any messages. The reason this test is conducted via two corrupt nodes is to empower the test logic to command one (corrupt) node to send a message and +// to examine the other (corrupt) node to check whether it has received the message. +func (a *AdminCommandDisallowListTestSuite) TestAdminCommandDisallowList() { + // send some authorized messages indicating the network is working as expected + a.Orchestrator.sendAuthorizedMsgs(a.T()) + unittest.RequireReturnsBefore(a.T(), a.Orchestrator.authorizedEventsReceivedWg.Wait, 5*time.Second, "could not receive authorized messages on time") + // messages with correct message signatures are expected to always pass libp2p signature verification and be delivered to the victim EN. + require.Equal( + a.T(), + int64(numOfAuthorizedEvents), + a.Orchestrator.authorizedEventsReceived.Load(), + fmt.Sprintf("expected to receive %d authorized events got: %d", numOfAuthorizedEvents, a.Orchestrator.expectedBlockedEventsReceived.Load())) + + // after disallow-listing node (a.senderVN) we should not receive any messages from that node. + // This is an asynchronous process with a number of sub processes involved including but not limited to: + // - submitting request to admin server for node to be disallow-listed. + // - node disallow-list must update. + // - peer manager needs to prune the connection (takes at least 1 second). + // - connection gater will start blocking incoming connections (takes at least 1 second). + // We wait for 5 seconds to reduce the small chance of a race condition between the time a node is blocked + // and the time the blocked node sends the first unauthorized message. + a.disallowListNode(a.senderVN) + time.Sleep(5 * time.Second) + + // send unauthorized messages and sleep for 3 seconds to allow all requests to be processed + // in normal situations if the node is not disallow-listed, these messages would be considered + // legit and hence would be delivered to the recipients. + a.Orchestrator.sendExpectedBlockedMsgs(a.T()) + + // The sleep is unavoidable for the following reasons these messages are sent between 2 running libp2p nodes we don't have any hooks in between + // These are messages sent after the node is blocked meaning that these messages are not expected to be delivered to the receiver node, + // so we sleep this approximate amount of time to ensure all messages were attempted, processed and dropped. + time.Sleep(3 * time.Second) + + // messages sent after the node is disallow-listed are considered unauthorized, we don't expect to receive any of them. + require.Equal( + a.T(), + int64(0), + a.Orchestrator.expectedBlockedEventsReceived.Load(), + fmt.Sprintf("expected to not receive any unauthorized messages instead got: %d", a.Orchestrator.expectedBlockedEventsReceived.Load())) +} diff --git a/integration/tests/bft/protocol/disallowlisting/orchestrator.go b/integration/tests/bft/protocol/disallowlisting/orchestrator.go new file mode 100644 index 00000000000..41cedc4883b --- /dev/null +++ b/integration/tests/bft/protocol/disallowlisting/orchestrator.go @@ -0,0 +1,113 @@ +package disallowlisting + +import ( + "sync" + "testing" + + "github.com/rs/zerolog" + "github.com/stretchr/testify/require" + "go.uber.org/atomic" + + "github.com/onflow/flow-go/insecure" + "github.com/onflow/flow-go/integration/tests/bft" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/network" + "github.com/onflow/flow-go/utils/unittest" +) + +const ( + // numOfAuthorizedEvents number of events to send before disallow-listing the sender node via the disallow-list command. + // This threshold must be set to a low value to make the test conclude faster + // by waiting for fewer events, which is beneficial when running the test + numOfAuthorizedEvents = 5 + + // numOfUnauthorizedEvents number of events to send after disallow-listing the sender node via the disallow-list command. + // This threshold must be set to a low value to make the test conclude faster + // by waiting for fewer events, which is beneficial when running the test + // on an asynchronous network where event delivery can be unpredictable. + numOfUnauthorizedEvents = 5 +) + +// Orchestrator represents a simple `insecure.AttackOrchestrator` that tracks messages received before and after the +// senderVN is disallow-listed by the receiverEN via the admin disallow-list command. +type Orchestrator struct { + *bft.BaseOrchestrator + sync.Mutex + codec network.Codec + expectedBlockedEventsReceived *atomic.Int64 + authorizedEventsReceived *atomic.Int64 + expectedBlockedEvents map[flow.Identifier]*insecure.EgressEvent + authorizedEvents map[flow.Identifier]*insecure.EgressEvent + authorizedEventsReceivedWg sync.WaitGroup + senderVN flow.Identifier + receiverEN flow.Identifier +} + +var _ insecure.AttackOrchestrator = &Orchestrator{} + +func NewOrchestrator(t *testing.T, logger zerolog.Logger, senderVN, receiverEN flow.Identifier) *Orchestrator { + orchestrator := &Orchestrator{ + BaseOrchestrator: &bft.BaseOrchestrator{ + T: t, + Logger: logger, + }, + codec: unittest.NetworkCodec(), + expectedBlockedEventsReceived: atomic.NewInt64(0), + authorizedEventsReceived: atomic.NewInt64(0), + expectedBlockedEvents: make(map[flow.Identifier]*insecure.EgressEvent), + authorizedEvents: make(map[flow.Identifier]*insecure.EgressEvent), + authorizedEventsReceivedWg: sync.WaitGroup{}, + senderVN: senderVN, + receiverEN: receiverEN, + } + + orchestrator.OnIngressEvent = append(orchestrator.OnIngressEvent, orchestrator.trackIngressEvents) + + return orchestrator +} + +// trackIngressEvents callback that will track authorized messages that are expected to be received by the receiverEN before we disallow-list the sender. +// It also tracks unauthorized messages received if any that are expected to be blocked after the senderVN is disallow-listed via the admin disallow-list command. +func (a *Orchestrator) trackIngressEvents(event *insecure.IngressEvent) error { + // Track any unauthorized events that are received, these events are sent after the admin disallow-list command + // is used to disallow-list the sender node. + if _, ok := a.expectedBlockedEvents[event.FlowProtocolEventID]; ok { + if event.OriginID == a.senderVN { + a.expectedBlockedEventsReceived.Inc() + a.Logger.Warn().Str("event_id", event.FlowProtocolEventID.String()).Msg("unauthorized ingress event received") + } + } + + // track all authorized events sent before the sender node is disallow-listed. + if _, ok := a.authorizedEvents[event.FlowProtocolEventID]; ok { + // ensure event received intact no changes have been made to the underlying message + //a.assertEventsEqual(expectedEvent, event) + a.authorizedEventsReceived.Inc() + a.authorizedEventsReceivedWg.Done() + } + + return nil +} + +// sendAuthorizedMsgs publishes a number of authorized messages from the senderVN. Authorized messages are messages +// that are sent before the senderVN is disallow-listed. +func (a *Orchestrator) sendAuthorizedMsgs(t *testing.T) { + for i := 0; i < numOfAuthorizedEvents; i++ { + event := bft.RequestChunkDataPackEgressFixture(a.T, a.senderVN, a.receiverEN, insecure.Protocol_PUBLISH) + err := a.OrchestratorNetwork.SendEgress(event) + require.NoError(t, err) + a.authorizedEvents[event.FlowProtocolEventID] = event + a.authorizedEventsReceivedWg.Add(1) + } +} + +// sendExpectedBlockedMsgs publishes a number of unauthorized messages. Unauthorized messages are messages that are sent +// after the senderVN is blocked via the admin disallow-listed command. These messages are not expected to be received. +func (a *Orchestrator) sendExpectedBlockedMsgs(t *testing.T) { + for i := 0; i < numOfUnauthorizedEvents; i++ { + event := bft.RequestChunkDataPackEgressFixture(a.T, a.senderVN, a.receiverEN, insecure.Protocol_PUBLISH) + err := a.OrchestratorNetwork.SendEgress(event) + require.NoError(t, err) + a.expectedBlockedEvents[event.FlowProtocolEventID] = event + } +} diff --git a/integration/tests/bft/protocol/disallowlisting/suite.go b/integration/tests/bft/protocol/disallowlisting/suite.go new file mode 100644 index 00000000000..94a7c747e94 --- /dev/null +++ b/integration/tests/bft/protocol/disallowlisting/suite.go @@ -0,0 +1,72 @@ +package disallowlisting + +import ( + "context" + "fmt" + + "github.com/rs/zerolog" + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/insecure" + "github.com/onflow/flow-go/integration/client" + "github.com/onflow/flow-go/integration/testnet" + "github.com/onflow/flow-go/integration/tests/bft" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/utils/unittest" +) + +// Suite represents a test suite ensures the admin block list command works as expected. +type Suite struct { + bft.BaseSuite + senderVN flow.Identifier // node ID of corrupted node that will send messages in the test. The sender node will be blocked. + receiverEN flow.Identifier // node ID of corrupted node that will receive messages in the test + Orchestrator *Orchestrator +} + +// SetupSuite runs a bare minimum Flow network to function correctly along with 2 attacker nodes and 1 victim node. +// - Corrupt VN that will be used to send messages, this node will be the node that is blocked by the receiver corrupt EN. +// - Corrupt EN that will receive messages from the corrupt VN, we will execute the admin command on this node. +func (s *Suite) SetupSuite() { + s.BaseSuite.SetupSuite() + + // generate 1 corrupt verification node + s.senderVN = unittest.IdentifierFixture() + s.NodeConfigs = append(s.NodeConfigs, testnet.NewNodeConfig(flow.RoleVerification, + testnet.WithID(s.senderVN), + testnet.WithLogLevel(zerolog.FatalLevel), + testnet.AsCorrupted())) + + // generate 1 corrupt execution node + s.receiverEN = unittest.IdentifierFixture() + s.NodeConfigs = append(s.NodeConfigs, testnet.NewNodeConfig(flow.RoleExecution, + testnet.WithID(s.receiverEN), + testnet.WithLogLevel(zerolog.FatalLevel), + testnet.AsCorrupted())) + + s.BaseSuite.StartCorruptedNetwork( + "bft_signature_validation_test", + 10_000, + 100_000, + func() insecure.AttackOrchestrator { + s.Orchestrator = NewOrchestrator(s.T(), s.Log, s.senderVN, s.receiverEN) + return s.Orchestrator + }, + ) +} + +// disallowListNode submit request to our EN admin server to block sender VN. +func (s *Suite) disallowListNode(nodeID flow.Identifier) { + serverAddr := fmt.Sprintf("localhost:%s", s.Net.ContainerByID(s.receiverEN).Port(testnet.AdminPort)) + adminClient := client.NewAdminClient(serverAddr) + + data := map[string]interface{}{"network-id-provider-blocklist": []string{nodeID.String()}} + resp, err := adminClient.RunCommand(context.Background(), "set-config", data) + require.NoError(s.T(), err) + + output, ok := resp.Output.(map[string]interface{}) + require.True(s.T(), ok) + + newList, ok := output["newValue"].([]interface{}) + require.True(s.T(), ok) + require.Contains(s.T(), newList, nodeID.String()) +} diff --git a/integration/tests/bft/wintermute/suite.go b/integration/tests/bft/protocol/wintermute/suite.go similarity index 100% rename from integration/tests/bft/wintermute/suite.go rename to integration/tests/bft/protocol/wintermute/suite.go diff --git a/integration/tests/bft/wintermute/wintermute_test.go b/integration/tests/bft/protocol/wintermute/wintermute_test.go similarity index 93% rename from integration/tests/bft/wintermute/wintermute_test.go rename to integration/tests/bft/protocol/wintermute/wintermute_test.go index 2cb721751a8..1530a8eab6f 100644 --- a/integration/tests/bft/wintermute/wintermute_test.go +++ b/integration/tests/bft/protocol/wintermute/wintermute_test.go @@ -31,9 +31,9 @@ func (w *WintermuteTestSuite) TestWintermuteAttack() { victimBlock := w.BlockState.WaitForBlockById(w.T(), corruptedResult.BlockID) // waits for the execution receipt of victim block from both CORRUPTED execution nodes. - receiptB1 := w.ReceiptState.WaitForReceiptFrom(w.T(), victimBlock.Header.ID(), w.corruptedEN1Id) + receiptB1 := w.ReceiptState.WaitForReceiptFrom(w.T(), victimBlock.ID(), w.corruptedEN1Id) w.T().Logf("receipt for victim block generated by execution node-1: %x result ID: %x\n", w.corruptedEN1Id, receiptB1.ExecutionResult.ID()) - receiptB2 := w.ReceiptState.WaitForReceiptFrom(w.T(), victimBlock.Header.ID(), w.corruptedEN2Id) + receiptB2 := w.ReceiptState.WaitForReceiptFrom(w.T(), victimBlock.ID(), w.corruptedEN2Id) w.T().Logf("receipt for victim block generated by execution node-2: %x result ID: %x\n", w.corruptedEN2Id, receiptB2.ExecutionResult.ID()) // makes sure corrupted execution nodes generated the corrupted result for this victim block. @@ -50,11 +50,11 @@ func (w *WintermuteTestSuite) TestWintermuteAttack() { } // waits until we seal a height equal to the victim block height - w.BlockState.WaitForSealed(w.T(), victimBlock.Header.Height) + w.BlockState.WaitForSealedHeight(w.T(), victimBlock.Height) // then checks querying victim block by height returns the original victim block. - blockByHeight, ok := w.BlockState.FinalizedHeight(victimBlock.Header.Height) + blockByHeight, ok := w.BlockState.FinalizedHeight(victimBlock.Height) require.True(w.T(), ok) - require.Equal(w.T(), blockByHeight.Header.ID(), victimBlock.Header.ID()) + require.Equal(w.T(), blockByHeight.ID(), victimBlock.ID()) } // waitForExecutionResultCorruption waits within a timeout till wintermute orchestrator corrupts an execution result. diff --git a/integration/tests/bft/topicvalidator/orchestrator.go b/integration/tests/bft/topicvalidator/orchestrator.go deleted file mode 100644 index fac5f2a672a..00000000000 --- a/integration/tests/bft/topicvalidator/orchestrator.go +++ /dev/null @@ -1,227 +0,0 @@ -package topicvalidator - -import ( - "sync" - "testing" - - "github.com/rs/zerolog" - "github.com/stretchr/testify/require" - "golang.org/x/exp/rand" - - "github.com/onflow/flow-go/insecure" - "github.com/onflow/flow-go/integration/tests/bft" - "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/model/messages" - "github.com/onflow/flow-go/network/channels" - "github.com/onflow/flow-go/utils/unittest" -) - -const ( - // numOfAuthorizedEvents the number of authorized events that will be created when the test orchestrator is initialized. - // The numOfAuthorizedEvents allows us to wait for a certain number of authorized messages to be received, this should - // give the network enough time to process the unauthorized messages. This ensures us that the unauthorized messages - // were indeed dropped and not unprocessed. - numOfAuthorizedEvents = 100 - - // numOfUnauthorizedEvents the number of unauthorized events per type to send by the test orchestrator. - numOfUnauthorizedEvents = 10 -) - -// Orchestrator represents an insecure.AttackOrchestrator track incoming unauthorized messages and authorized messages received by victim nodes. -type Orchestrator struct { - *bft.BaseOrchestrator - unauthorizedEventsReceived []flow.Identifier - authorizedEventsReceived []flow.Identifier - unauthorizedEvents map[flow.Identifier]*insecure.EgressEvent - authorizedEvents map[flow.Identifier]*insecure.EgressEvent - authorizedEventReceivedWg sync.WaitGroup - attackerAN flow.Identifier - attackerEN flow.Identifier - victimEN flow.Identifier - victimVN flow.Identifier -} - -var _ insecure.AttackOrchestrator = &Orchestrator{} - -func NewOrchestrator(t *testing.T, logger zerolog.Logger, attackerAN, attackerEN, victimEN, victimVN flow.Identifier) *Orchestrator { - orchestrator := &Orchestrator{ - BaseOrchestrator: &bft.BaseOrchestrator{ - T: t, - Logger: logger.With().Str("component", "bft-test-orchestrator").Logger(), - }, - unauthorizedEventsReceived: make([]flow.Identifier, 0), - authorizedEventsReceived: make([]flow.Identifier, 0), - unauthorizedEvents: make(map[flow.Identifier]*insecure.EgressEvent), - authorizedEvents: make(map[flow.Identifier]*insecure.EgressEvent), - authorizedEventReceivedWg: sync.WaitGroup{}, - attackerAN: attackerAN, - attackerEN: attackerEN, - victimEN: victimEN, - victimVN: victimVN, - } - - orchestrator.initUnauthorizedEvents() - orchestrator.initAuthorizedEvents() - orchestrator.OnIngressEvent = append(orchestrator.OnIngressEvent, orchestrator.trackIngressEvents) - return orchestrator -} - -// trackIngressEvents callback that will track any unauthorized messages that are expected to be blocked at the topic validator. -// It also tracks all the authorized messages that are expected to be delivered to the node. -func (o *Orchestrator) trackIngressEvents(event *insecure.IngressEvent) error { - // Track any unauthorized events that are received by corrupted nodes. - // These events are unauthorized combinations of messages & channels and should be - // dropped at the topic validator level. - if _, ok := o.unauthorizedEvents[event.FlowProtocolEventID]; ok { - o.unauthorizedEventsReceived = append(o.unauthorizedEventsReceived, event.FlowProtocolEventID) - o.Logger.Warn().Str("event_id", event.FlowProtocolEventID.String()).Msg("unauthorized ingress event received") - } - - // track all authorized events sent during test - if _, ok := o.authorizedEvents[event.FlowProtocolEventID]; ok { - o.authorizedEventsReceived = append(o.authorizedEventsReceived, event.FlowProtocolEventID) - o.authorizedEventReceivedWg.Done() - } - return nil -} - -// sendUnauthorizedMsgs publishes a few combinations of unauthorized messages from the corrupted AN to the corrupted EN. -func (o *Orchestrator) sendUnauthorizedMsgs(t *testing.T) { - for _, event := range o.unauthorizedEvents { - err := o.OrchestratorNetwork.SendEgress(event) - require.NoError(t, err) - } -} - -// sendAuthorizedMsgs sends a number of authorized messages. -func (o *Orchestrator) sendAuthorizedMsgs(t *testing.T) { - for _, event := range o.authorizedEvents { - err := o.OrchestratorNetwork.SendEgress(event) - require.NoError(t, err) - } -} - -// initUnauthorizedEvents returns combinations of unauthorized messages and channels. -func (o *Orchestrator) initUnauthorizedEvents() { - // message sent by unauthorized sender, AN is not authorized to publish block proposals - o.initUnauthorizedMsgByRoleEvents(numOfUnauthorizedEvents) - - // message sent on unauthorized channel, AN is not authorized send sync request on consensus committee channel - o.initUnauthorizedMsgOnChannelEvents(numOfUnauthorizedEvents) - - // message is not authorized to be sent via insecure.Protocol_UNICAST - // unicast stream handler is expected to drop this message - o.initUnauthorizedUnicastOnChannelEvents(numOfUnauthorizedEvents) - - // message is not authorized to be sent via insecure.Protocol_PUBLISH - o.initUnauthorizedPublishOnChannelEvents(numOfUnauthorizedEvents) -} - -// initAuthorizedEvents returns combinations of unauthorized messages and channels. -func (o *Orchestrator) initAuthorizedEvents() { - channel := channels.RequestChunks - for i := uint64(0); i < numOfAuthorizedEvents; i++ { - chunkDataReq := &messages.ChunkDataRequest{ - ChunkID: unittest.IdentifierFixture(), - Nonce: rand.Uint64(), - } - eventID := unittest.GetFlowProtocolEventID(o.T, channel, chunkDataReq) - event := &insecure.EgressEvent{ - CorruptOriginId: o.victimVN, - Channel: channel, - Protocol: insecure.Protocol_PUBLISH, - TargetNum: 0, - TargetIds: flow.IdentifierList{o.attackerEN}, - FlowProtocolEvent: chunkDataReq, - FlowProtocolEventID: eventID, - } - o.authorizedEvents[eventID] = event - o.authorizedEventReceivedWg.Add(1) - } -} - -// initUnauthorizedMsgByRoleEvents sets n number of events where the sender is unauthorized to -// send the FlowProtocolEvent. In this case AN's are not authorized to send block proposals. -func (o *Orchestrator) initUnauthorizedMsgByRoleEvents(n int) { - channel := channels.SyncCommittee - for i := 0; i < n; i++ { - unauthorizedProposal := unittest.ProposalFixture() - eventID := unittest.GetFlowProtocolEventID(o.T, channel, unauthorizedProposal) - unauthorizedMsgByRole := &insecure.EgressEvent{ - CorruptOriginId: o.attackerAN, - Channel: channel, - Protocol: insecure.Protocol_PUBLISH, - TargetNum: 0, - TargetIds: flow.IdentifierList{o.victimEN}, - FlowProtocolEvent: unauthorizedProposal, - FlowProtocolEventID: eventID, - } - o.unauthorizedEvents[eventID] = unauthorizedMsgByRole - } -} - -// initUnauthorizedMsgOnChannelEvents sets n number of events where the message is not -// authorized to be sent on the event channel. -func (o *Orchestrator) initUnauthorizedMsgOnChannelEvents(n int) { - channel := channels.PushReceipts - for i := 0; i < n; i++ { - syncReq := &messages.SyncRequest{ - Nonce: rand.Uint64(), - Height: rand.Uint64(), - } - eventID := unittest.GetFlowProtocolEventID(o.T, channel, syncReq) - unauthorizedMsgOnChannel := &insecure.EgressEvent{ - CorruptOriginId: o.attackerAN, - Channel: channel, - Protocol: insecure.Protocol_PUBLISH, - TargetNum: 0, - TargetIds: flow.IdentifierList{o.victimEN}, - FlowProtocolEvent: syncReq, - FlowProtocolEventID: eventID, - } - o.unauthorizedEvents[eventID] = unauthorizedMsgOnChannel - } -} - -// initUnauthorizedUnicastOnChannelEvents sets n number of events where the message is not -// authorized to be sent via insecure.Protocol_UNICAST on the event channel. -func (o *Orchestrator) initUnauthorizedUnicastOnChannelEvents(n int) { - channel := channels.SyncCommittee - for i := 0; i < n; i++ { - syncReq := &messages.SyncRequest{ - Nonce: rand.Uint64(), - Height: rand.Uint64(), - } - eventID := unittest.GetFlowProtocolEventID(o.T, channel, syncReq) - unauthorizedUnicastOnChannel := &insecure.EgressEvent{ - CorruptOriginId: o.attackerAN, - Channel: channel, - Protocol: insecure.Protocol_UNICAST, - TargetNum: 0, - TargetIds: flow.IdentifierList{o.victimEN}, - FlowProtocolEvent: syncReq, - FlowProtocolEventID: eventID, - } - o.unauthorizedEvents[eventID] = unauthorizedUnicastOnChannel - } -} - -// initUnauthorizedPublishOnChannelEvents sets n number of events where the message is not -// authorized to be sent via insecure.Protocol_PUBLISH on the event channel. -func (o *Orchestrator) initUnauthorizedPublishOnChannelEvents(n int) { - channel := channels.ProvideChunks - for i := 0; i < n; i++ { - chunkDataResponse := unittest.ChunkDataResponseMsgFixture(unittest.IdentifierFixture()) - eventID := unittest.GetFlowProtocolEventID(o.T, channel, chunkDataResponse) - unauthorizedPublishOnChannel := &insecure.EgressEvent{ - CorruptOriginId: o.attackerEN, - Channel: channel, - Protocol: insecure.Protocol_PUBLISH, - TargetNum: 0, - TargetIds: flow.IdentifierList{o.victimVN}, - FlowProtocolEvent: chunkDataResponse, - FlowProtocolEventID: eventID, - } - o.unauthorizedEvents[eventID] = unauthorizedPublishOnChannel - } -} diff --git a/integration/tests/bft/topicvalidator/suite.go b/integration/tests/bft/topicvalidator/suite.go deleted file mode 100644 index 3493d299611..00000000000 --- a/integration/tests/bft/topicvalidator/suite.go +++ /dev/null @@ -1,87 +0,0 @@ -package topicvalidator - -import ( - "github.com/rs/zerolog" - - "github.com/onflow/flow-go/insecure" - "github.com/onflow/flow-go/integration/testnet" - "github.com/onflow/flow-go/integration/tests/bft" - "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/utils/unittest" -) - -// Suite represents a test suite evaluating the correctness of different p2p topic validator -// validation conditions. -type Suite struct { - bft.BaseSuite - attackerANID flow.Identifier // corrupt attacker AN id - attackerENID flow.Identifier // corrupt attacker EN id - victimENID flow.Identifier // corrupt victim EN id - victimVNID flow.Identifier // corrupt victim VN id - Orchestrator *Orchestrator -} - -// SetupSuite runs a bare minimum Flow network to function correctly along with 2 attacker nodes -// and 2 victim nodes. -// - Corrupt AN that will serve as an attacker and send unauthorized messages to a victim EN. -// - Corrupt EN that will serve as an attacker and send unauthorized messages to a victim VN. -// - Corrupt EN with the topic validator enabled that will serve as a victim. -// - Corrupt VN with the topic validator enabled that will serve as a victim. -func (s *Suite) SetupSuite() { - s.BaseSuite.SetupSuite() - - // filter out base suite execution, verification and access nodes - s.NodeConfigs = s.NodeConfigs.Filter(func(n testnet.NodeConfig) bool { - if n.Ghost { - return true - } - return n.Role != flow.RoleExecution && n.Role != flow.RoleVerification && n.Role != flow.RoleAccess - }) - - s.NodeConfigs = append(s.NodeConfigs, testnet.NewNodeConfig(flow.RoleAccess, testnet.WithLogLevel(zerolog.FatalLevel))) - - // create corrupt access node - s.attackerANID = unittest.IdentifierFixture() - s.NodeConfigs = append(s.NodeConfigs, testnet.NewNodeConfig(flow.RoleAccess, - testnet.WithID(s.attackerANID), - testnet.WithLogLevel(zerolog.FatalLevel), - testnet.AsCorrupted())) - - // create corrupt verification node with the topic validator enabled. This is the victim - // node that will be published unauthorized messages from the attacker execution node. - s.victimVNID = unittest.IdentifierFixture() - verConfig := testnet.NewNodeConfig(flow.RoleVerification, - testnet.WithID(s.victimVNID), - testnet.WithAdditionalFlag("--topic-validator-disabled=false"), - testnet.WithLogLevel(zerolog.FatalLevel), - testnet.AsCorrupted()) - s.NodeConfigs = append(s.NodeConfigs, verConfig) - - // generates two execution nodes, 1 of them will be corrupt - s.attackerENID = unittest.IdentifierFixture() - exe1Config := testnet.NewNodeConfig(flow.RoleExecution, - testnet.WithID(s.attackerENID), - testnet.WithLogLevel(zerolog.FatalLevel), - testnet.AsCorrupted()) - s.NodeConfigs = append(s.NodeConfigs, exe1Config) - - // create corrupt execution node with the topic validator enabled. This is the victim - // node that will be published unauthorized messages from the attacker execution node. - s.victimENID = unittest.IdentifierFixture() - exe2Config := testnet.NewNodeConfig(flow.RoleExecution, - testnet.WithID(s.victimENID), - testnet.WithLogLevel(zerolog.FatalLevel), - testnet.WithAdditionalFlag("--topic-validator-disabled=false"), - testnet.AsCorrupted()) - s.NodeConfigs = append(s.NodeConfigs, exe2Config) - - s.BaseSuite.StartCorruptedNetwork( - "bft_topic_validator_test", - 10_000, - 100_000, - func() insecure.AttackOrchestrator { - s.Orchestrator = NewOrchestrator(s.T(), s.Log, s.attackerANID, s.attackerENID, s.victimENID, s.victimVNID) - return s.Orchestrator - }, - ) -} diff --git a/integration/tests/bft/topicvalidator/topic_validator_test.go b/integration/tests/bft/topicvalidator/topic_validator_test.go deleted file mode 100644 index b0fab269309..00000000000 --- a/integration/tests/bft/topicvalidator/topic_validator_test.go +++ /dev/null @@ -1,39 +0,0 @@ -package topicvalidator - -import ( - "fmt" - "testing" - "time" - - "github.com/stretchr/testify/require" - "github.com/stretchr/testify/suite" - - "github.com/onflow/flow-go/utils/unittest" -) - -type TopicValidatorTestSuite struct { - Suite -} - -func TestTopicValidator(t *testing.T) { - suite.Run(t, new(TopicValidatorTestSuite)) -} - -// TestTopicValidatorE2E ensures that the libp2p topic validator is working as expected. -// This test will attempt to send multiple combinations of unauthorized messages + channel from -// a corrupted byzantine attacker node. The victim node should not receive any of these messages as they should -// be dropped due to failing message authorization validation at the topic validator. This test will also send -// a number of authorized messages that will be delivered and processed by the victim node, ensuring that the topic -// validator behaves as expected in the happy path. -func (s *TopicValidatorTestSuite) TestTopicValidatorE2E() { - s.Orchestrator.sendUnauthorizedMsgs(s.T()) - s.Orchestrator.sendAuthorizedMsgs(s.T()) - unittest.RequireReturnsBefore(s.T(), s.Orchestrator.authorizedEventReceivedWg.Wait, 5*time.Second, "could not send authorized messages on time") - - // Victim nodes are configured with the topic validator enabled, therefore they should not have - // received any of the unauthorized messages. - require.Equal(s.T(), 0, len(s.Orchestrator.unauthorizedEventsReceived), fmt.Sprintf("expected to not receive any unauthorized messages instead got: %d", len(s.Orchestrator.unauthorizedEventsReceived))) - - // Victim nodes should receive all the authorized events sent. - require.Equal(s.T(), numOfAuthorizedEvents, len(s.Orchestrator.authorizedEventsReceived), fmt.Sprintf("expected to receive %d authorized events got: %d", numOfAuthorizedEvents, len(s.Orchestrator.unauthorizedEventsReceived))) -} diff --git a/integration/tests/collection/ingress_test.go b/integration/tests/collection/ingress_test.go index bf6e5ec2535..7b0e0442642 100644 --- a/integration/tests/collection/ingress_test.go +++ b/integration/tests/collection/ingress_test.go @@ -11,7 +11,7 @@ import ( sdk "github.com/onflow/flow-go-sdk" - "github.com/onflow/flow-go/access" + "github.com/onflow/flow-go/access/validator" "github.com/onflow/flow-go/integration/convert" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/utils/unittest" @@ -47,7 +47,7 @@ func (suite *IngressSuite) TestTransactionIngress_InvalidTransaction() { col1 := suite.Collector(0, 0) client, err := col1.SDKClient() - require.Nil(t, err) + require.NoError(t, err) t.Run("missing reference block id", logStartFinish(func(t *testing.T) { malformed := suite.NextTransaction(func(tx *sdk.Transaction) { @@ -65,7 +65,7 @@ func (suite *IngressSuite) TestTransactionIngress_InvalidTransaction() { tx.SetScript(nil) }) - expected := access.IncompleteTransactionError{ + expected := validator.IncompleteTransactionError{ MissingFields: []string{flow.TransactionFieldScript.String()}, } @@ -112,17 +112,16 @@ func (suite *IngressSuite) TestTxIngress_SingleCluster() { col1 := suite.Collector(0, 0) client, err := col1.SDKClient() - require.Nil(t, err) + require.NoError(t, err) tx := suite.NextTransaction() - require.Nil(t, err) t.Log("sending transaction: ", tx.ID()) ctx, cancel := context.WithTimeout(suite.ctx, defaultTimeout) err = client.SendTransaction(ctx, *tx) cancel() - assert.Nil(t, err) + assert.NoError(t, err) t.Log("sent transaction: ", tx.ID()) // wait for the transaction to be included in a collection @@ -170,7 +169,7 @@ func (suite *IngressSuite) TestTxIngressMultiCluster_CorrectCluster() { // get a client pointing to the cluster member client, err := targetNode.SDKClient() - require.Nil(t, err) + require.NoError(t, err) tx := suite.TxForCluster(targetCluster) @@ -246,7 +245,7 @@ func (suite *IngressSuite) TestTxIngressMultiCluster_OtherCluster() { // create clients pointing to each other node client, err := otherNode.SDKClient() - require.Nil(t, err) + require.NoError(t, err) // create a transaction that will be routed to the target cluster tx := suite.TxForCluster(targetCluster) diff --git a/integration/tests/collection/proposal_test.go b/integration/tests/collection/proposal_test.go index 778e0af1800..68ec0e67e3f 100644 --- a/integration/tests/collection/proposal_test.go +++ b/integration/tests/collection/proposal_test.go @@ -13,11 +13,9 @@ import ( "github.com/onflow/flow-go/integration/convert" "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/utils/unittest" ) func TestMultiCluster(t *testing.T) { - unittest.SkipUnless(t, unittest.TEST_FLAKY, "flaky as it often hits port already allocated, since too many containers are created") suite.Run(t, new(MultiClusterSuite)) } diff --git a/integration/tests/collection/recovery_test.go b/integration/tests/collection/recovery_test.go index 6d1309df18c..182d5c9b274 100644 --- a/integration/tests/collection/recovery_test.go +++ b/integration/tests/collection/recovery_test.go @@ -8,6 +8,7 @@ import ( "github.com/stretchr/testify/suite" client "github.com/onflow/flow-go-sdk/access/grpc" + "github.com/onflow/flow-go/integration/convert" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/utils/unittest" @@ -118,9 +119,9 @@ func (suite *RecoverySuite) TestProposal_Recovery() { // ensure no progress was made (3/5 nodes cannot make progress) proposals := suite.AwaitProposals(10) - height := proposals[0].Header.Height + height := proposals[0].Height for _, prop := range proposals { - suite.Assert().LessOrEqual(prop.Header.Height, height+2) + suite.Assert().LessOrEqual(prop.Height, height+2) } // restart the paused collectors diff --git a/integration/tests/collection/suite.go b/integration/tests/collection/suite.go index edf06a1730a..218a9bca097 100644 --- a/integration/tests/collection/suite.go +++ b/integration/tests/collection/suite.go @@ -19,7 +19,6 @@ import ( "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/model/flow/factory" "github.com/onflow/flow-go/model/flow/filter" - "github.com/onflow/flow-go/model/messages" clusterstate "github.com/onflow/flow-go/state/cluster" clusterstateimpl "github.com/onflow/flow-go/state/cluster/badger" "github.com/onflow/flow-go/utils/unittest" @@ -82,7 +81,7 @@ func (suite *CollectorSuite) SetupTest(name string, nNodes, nClusters uint) { } colNodes := testnet.NewNodeConfigSet(nNodes, flow.RoleCollection, testnet.WithLogLevel(zerolog.InfoLevel), - testnet.WithAdditionalFlag("--block-rate-delay=1ms"), + testnet.WithAdditionalFlag("--hotstuff-proposal-duration=1ms"), ) suite.nClusters = nClusters @@ -102,7 +101,7 @@ func (suite *CollectorSuite) SetupTest(name string, nNodes, nClusters uint) { // create an account to use for sending transactions var err error - suite.acct.addr, suite.acct.key, suite.acct.signer, err = lib.GetAccount(suite.net.Root().Header.ChainID.Chain()) + suite.acct.addr, suite.acct.key, suite.acct.signer, err = lib.GetAccount(suite.net.Root().ChainID.Chain()) require.NoError(suite.T(), err) suite.serviceAccountIdx = 2 @@ -142,7 +141,7 @@ func (suite *CollectorSuite) Clusters() flow.ClusterList { setup, ok := result.ServiceEvents[0].Event.(*flow.EpochSetup) suite.Require().True(ok) - collectors := suite.net.Identities().Filter(filter.HasRole(flow.RoleCollection)) + collectors := suite.net.Identities().Filter(filter.HasRole[flow.Identity](flow.RoleCollection)).ToSkeleton() clusters, err := factory.NewClusterList(setup.Assignments, collectors) suite.Require().Nil(err) return clusters @@ -163,14 +162,14 @@ func (suite *CollectorSuite) NextTransaction(opts ...func(*sdk.Transaction)) *sd } err := tx.SignEnvelope(acct.addr, acct.key.Index, acct.signer) - require.Nil(suite.T(), err) + require.NoError(suite.T(), err) suite.acct.key.SequenceNumber++ return tx } -func (suite *CollectorSuite) TxForCluster(target flow.IdentityList) *sdk.Transaction { +func (suite *CollectorSuite) TxForCluster(target flow.IdentitySkeletonList) *sdk.Transaction { acct := suite.acct tx := suite.NextTransaction() @@ -179,12 +178,12 @@ func (suite *CollectorSuite) TxForCluster(target flow.IdentityList) *sdk.Transac // hash-grind the script until the transaction will be routed to target cluster for { - serviceAccountAddr, err := suite.net.Root().Header.ChainID.Chain().AddressAtIndex(suite.serviceAccountIdx) + serviceAccountAddr, err := suite.net.Root().ChainID.Chain().AddressAtIndex(suite.serviceAccountIdx) suite.Require().NoError(err) suite.serviceAccountIdx++ tx.SetScript(append(tx.Script, '/', '/')) err = tx.SignEnvelope(sdk.Address(serviceAccountAddr), acct.key.Index, acct.signer) - require.Nil(suite.T(), err) + require.NoError(suite.T(), err) routed, ok := clusters.ByTxID(convert.IDFromSDK(tx.ID())) require.True(suite.T(), ok) if routed.ID() == target.ID() { @@ -211,9 +210,8 @@ func (suite *CollectorSuite) AwaitProposals(n uint) []cluster.Block { suite.T().Logf("ghost recv: %T", msg) switch val := msg.(type) { - case *messages.ClusterBlockProposal: - block := val.Block.ToInternal() - blocks = append(blocks, *block) + case *cluster.Proposal: + blocks = append(blocks, val.Block) if len(blocks) == int(n) { return blocks } @@ -260,11 +258,10 @@ func (suite *CollectorSuite) AwaitTransactionsIncluded(txIDs ...flow.Identifier) require.Nil(suite.T(), err, "could not read next message") switch val := msg.(type) { - case *messages.ClusterBlockProposal: - block := val.Block.ToInternal() - header := block.Header + case *cluster.Proposal: + block := val.Block collection := block.Payload.Collection - suite.T().Logf("got collection from %v height=%d col_id=%x size=%d", originID, header.Height, collection.ID(), collection.Len()) + suite.T().Logf("got collection from %v height=%d col_id=%x size=%d", originID, block.Height, collection.ID(), collection.Len()) if guarantees[collection.ID()] { for _, txID := range collection.Light().Transactions { delete(lookup, txID) @@ -331,7 +328,7 @@ func (suite *CollectorSuite) Collector(clusterIdx, nodeIdx uint) *testnet.Contai node, ok := cluster.ByIndex(nodeIdx) require.True(suite.T(), ok, "invalid node index") - return suite.net.ContainerByID(node.ID()) + return suite.net.ContainerByID(node.NodeID) } // ClusterStateFor returns a cluster state instance for the collector node with the given ID. @@ -342,7 +339,8 @@ func (suite *CollectorSuite) ClusterStateFor(id flow.Identifier) *clusterstateim setup, ok := suite.net.Result().ServiceEvents[0].Event.(*flow.EpochSetup) suite.Require().True(ok, "could not get root seal setup") - rootBlock := clusterstate.CanonicalRootBlock(setup.Counter, myCluster) + rootBlock, err := clusterstate.CanonicalRootBlock(setup.Counter, myCluster) + suite.Require().NoError(err) node := suite.net.ContainerByID(id) db, err := node.DB() diff --git a/integration/tests/common/sealing_and_verification.go b/integration/tests/common/sealing_and_verification.go index e8833c5a07a..585cb3777be 100644 --- a/integration/tests/common/sealing_and_verification.go +++ b/integration/tests/common/sealing_and_verification.go @@ -35,21 +35,22 @@ func SealingAndVerificationHappyPathTest( // wait for next height finalized (potentially first height), called blockA, just to make sure consensus progresses. currentFinalized := blockState.HighestFinalizedHeight() blockA := blockState.WaitForHighestFinalizedProgress(t, currentFinalized) - t.Logf("blockA generated, height: %v ID: %v\n", blockA.Header.Height, blockA.Header.ID()) + t.Logf("blockA generated, height: %v ID: %v\n", blockA.Height, blockA.ID()) // sends a transaction - err := accessClient.DeployContract(context.Background(), sdk.Identifier(rootBlockId), lib.CounterContract) + tx, err := accessClient.DeployContract(context.Background(), sdk.Identifier(rootBlockId), lib.CounterContract) require.NoError(t, err, "could not deploy counter") - // waits until for a different state commitment for a finalized block, call that block blockB, - // which has more than one chunk on its execution result. - blockB, _ := lib.WaitUntilFinalizedStateCommitmentChanged(t, blockState, receiptState, lib.WithMinimumChunks(2)) - t.Logf("got blockB height %v ID %v\n", blockB.Header.Height, blockB.Header.ID()) + txRes, err := accessClient.WaitForExecuted(context.Background(), tx.ID()) + require.NoError(t, err, "could not wait for tx to be executed") + + blockBID := flow.Identifier(txRes.BlockID) + blockBHeight := txRes.BlockHeight // waits for the execution receipt of blockB from both execution nodes, and makes sure that there is no execution fork. - receiptB1 := receiptState.WaitForReceiptFrom(t, blockB.Header.ID(), exe1Id) + receiptB1 := receiptState.WaitForReceiptFrom(t, blockBID, exe1Id) t.Logf("receipt for blockB generated by execution node-1: %x result ID: %x\n", exe1Id, receiptB1.ExecutionResult.ID()) - receiptB2 := receiptState.WaitForReceiptFrom(t, blockB.Header.ID(), exe2Id) + receiptB2 := receiptState.WaitForReceiptFrom(t, blockBID, exe2Id) t.Logf("receipt for blockB generated by execution node-2: %x result ID: %x\n", exe2Id, receiptB2.ExecutionResult.ID()) require.Equal(t, receiptB1.ExecutionResult.ID(), receiptB2.ExecutionResult.ID(), "execution fork happened at blockB") @@ -68,11 +69,11 @@ func SealingAndVerificationHappyPathTest( // waits until blockB is sealed by consensus nodes after result approvals for all of its chunks emitted. // waits until we seal a height equal to the victim block height - blockState.WaitForSealed(t, blockB.Header.Height) + blockState.WaitForSealedHeight(t, blockBHeight) // then checks querying victim block by height returns the victim block itself. - blockByHeight, ok := blockState.FinalizedHeight(blockB.Header.Height) + blockByHeight, ok := blockState.FinalizedHeight(blockBHeight) require.True(t, ok) - require.Equal(t, blockByHeight.Header.ID(), blockB.Header.ID()) + require.Equal(t, blockByHeight.ID(), blockBID) return []*flow.ExecutionReceipt{receiptB1, receiptB2}, approvals } diff --git a/integration/tests/consensus/inclusion_test.go b/integration/tests/consensus/inclusion_test.go index e36ef7dae8e..0716d716009 100644 --- a/integration/tests/consensus/inclusion_test.go +++ b/integration/tests/consensus/inclusion_test.go @@ -43,8 +43,6 @@ func (is *InclusionSuite) SetupTest() { is.log = unittest.LoggerForTest(is.Suite.T(), zerolog.InfoLevel) is.log.Info().Msgf("================> SetupTest") - // seed random generator - // to collect node confiis... var nodeConfigs []testnet.NodeConfig @@ -119,7 +117,7 @@ func (is *InclusionSuite) TestCollectionGuaranteeIncluded() { require.NoError(t, err) sentinel.SignerIndices = signerIndices sentinel.ReferenceBlockID = is.net.Root().ID() - sentinel.ChainID = is.net.BootstrapData.ClusterRootBlocks[0].Header.ChainID + sentinel.ClusterChainID = is.net.BootstrapData.ClusterRootBlocks[0].ChainID colID := sentinel.CollectionID is.waitUntilSeenProposal(deadline) @@ -127,11 +125,11 @@ func (is *InclusionSuite) TestCollectionGuaranteeIncluded() { is.T().Logf("seen a proposal") // send collection to one consensus node - is.sendCollectionToConsensus(deadline, sentinel, is.conIDs[0]) + is.sendCollectionToConsensus(deadline, (*messages.CollectionGuarantee)(sentinel), is.conIDs[0]) proposal := is.waitUntilCollectionIncludeInProposal(deadline, sentinel) - is.T().Logf("collection guarantee %x included in a proposal %x\n", colID, proposal.Header.ID()) + is.T().Logf("collection guarantee %x included in a proposal %x\n", colID, proposal.ToHeader().ID()) is.waitUntilProposalConfirmed(deadline, sentinel, proposal) @@ -149,22 +147,22 @@ func (is *InclusionSuite) waitUntilSeenProposal(deadline time.Time) { } // we only care about block proposals at the moment - proposal, ok := msg.(*messages.BlockProposal) + proposal, ok := msg.(*flow.Proposal) if !ok { continue } - block := proposal.Block.ToInternal() + block := proposal.Block - is.T().Logf("receive block proposal from %v, height %v", originID, block.Header.Height) + is.T().Logf("receive block proposal from %v, height %v", originID, block.Height) // wait until proposal finalized - if block.Header.Height >= 1 { + if block.Height >= 1 { return } } is.T().Fatalf("%s timeout (deadline %s) waiting to see proposal", time.Now(), deadline) } -func (is *InclusionSuite) sendCollectionToConsensus(deadline time.Time, sentinel *flow.CollectionGuarantee, conID flow.Identifier) { +func (is *InclusionSuite) sendCollectionToConsensus(deadline time.Time, sentinel *messages.CollectionGuarantee, conID flow.Identifier) { colID := sentinel.CollectionID // keep trying to send collection guarantee to at least one consensus node @@ -198,22 +196,22 @@ func (is *InclusionSuite) waitUntilCollectionIncludeInProposal(deadline time.Tim } // we only care about block proposals at the moment - proposal, ok := msg.(*messages.BlockProposal) + proposal, ok := msg.(*flow.Proposal) if !ok { continue } - block := proposal.Block.ToInternal() + block := proposal.Block guarantees := block.Payload.Guarantees - height := block.Header.Height + height := block.Height is.T().Logf("receive block proposal height %v from %v, %v guarantees included in the payload!", height, originID, len(guarantees)) // check if the collection guarantee is included for _, guarantee := range guarantees { if guarantee.CollectionID == sentinel.CollectionID { - proposalID := block.Header.ID() + proposalID := block.ID() is.T().Logf("%x: collection guarantee %x included!\n", proposalID, colID) - return block + return &block } } } @@ -229,7 +227,7 @@ func (is *InclusionSuite) waitUntilProposalConfirmed(deadline time.Time, sentine // we try to find a block with the guarantee included and three confirmations confirmations := make(map[flow.Identifier]uint) // add the proposal that includes the guarantee - confirmations[block.Header.ID()] = 0 + confirmations[block.ID()] = 0 for time.Now().Before(deadline) { @@ -241,14 +239,14 @@ func (is *InclusionSuite) waitUntilProposalConfirmed(deadline time.Time, sentine } // we only care about block proposals at the moment - proposal, ok := msg.(*messages.BlockProposal) + proposal, ok := msg.(*flow.Proposal) if !ok { continue } - nextBlock := proposal.Block.ToInternal() + nextBlock := proposal.Block // check if the proposal was already processed - proposalID := nextBlock.Header.ID() + proposalID := nextBlock.ID() is.T().Logf("proposal %v received from %v", proposalID, originID) _, processed := confirmations[proposalID] @@ -259,7 +257,7 @@ func (is *InclusionSuite) waitUntilProposalConfirmed(deadline time.Time, sentine // if the parent is in the map, it is on a chain that included the // guarantee; take parent confirmatians plus one as the confirmations // for the follow-up block - n, ok := confirmations[nextBlock.Header.ParentID] + n, ok := confirmations[nextBlock.ParentID] if ok { confirmations[proposalID] = n + 1 is.T().Logf("%x: collection guarantee %x confirmed! (count: %d)\n", proposalID, colID, n+1) diff --git a/integration/tests/consensus/sealing_test.go b/integration/tests/consensus/sealing_test.go index 4ef4aa57c88..b287c98764c 100644 --- a/integration/tests/consensus/sealing_test.go +++ b/integration/tests/consensus/sealing_test.go @@ -5,11 +5,11 @@ import ( "testing" "time" + "github.com/onflow/crypto" "github.com/rs/zerolog" "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" - "github.com/onflow/flow-go/crypto" exeUtils "github.com/onflow/flow-go/engine/execution/utils" "github.com/onflow/flow-go/engine/ghost/client" verUtils "github.com/onflow/flow-go/engine/verification/utils" @@ -36,6 +36,7 @@ type SealingSuite struct { exe2SK crypto.PrivateKey verID flow.Identifier verSK crypto.PrivateKey + spocks []crypto.Signature reader *client.FlowMessageStreamReader } @@ -94,6 +95,8 @@ func (ss *SealingSuite) SetupTest() { nodeConfigs = append(nodeConfigs, verConfig) ss.log.Info().Msgf("verification ID: %v\n", ss.verID) + ss.spocks = unittest.SignaturesFixture(1) + nodeConfigs = append(nodeConfigs, testnet.NewNodeConfig(flow.RoleAccess, testnet.WithLogLevel(zerolog.FatalLevel)), ) @@ -164,14 +167,14 @@ SearchLoop: } // we only care about block proposals at the moment - proposal, ok := msg.(*messages.BlockProposal) + proposal, ok := msg.(*flow.Proposal) if !ok { continue } - block := proposal.Block.ToInternal() + block := proposal.Block // make sure we skip duplicates - proposalID := block.Header.ID() + proposalID := block.ID() _, processed := confirmations[proposalID] if processed { continue @@ -179,12 +182,12 @@ SearchLoop: confirmations[proposalID] = 0 // we map the proposal to its parent for later - parentID := block.Header.ParentID + parentID := block.ParentID parents[proposalID] = parentID ss.T().Logf("received block proposal height %v, view %v, id %v", - block.Header.Height, - block.Header.View, + block.Height, + block.View, proposalID) // we add one confirmation for each ancestor @@ -237,43 +240,46 @@ SearchLoop: // create the execution result for the target block result := flow.ExecutionResult{ - PreviousResultID: resultID, // need genesis result - BlockID: targetID, // refer the target block - Chunks: flow.ChunkList{&chunk}, // include only chunk + PreviousResultID: resultID, // need genesis result + BlockID: targetID, // refer the target block + Chunks: flow.ChunkList{&chunk}, // include only chunk + ExecutionDataID: unittest.IdentifierFixture(), // our fake execution data ID } ss.T().Logf("execution result generated (result: %x)\n", result.ID()) // create the execution receipt for the only execution node - receipt := flow.ExecutionReceipt{ - ExecutorID: ss.exeID, // our fake execution node - ExecutionResult: result, // result for target block - Spocks: nil, // ignored - ExecutorSignature: crypto.Signature{}, + receiptBody := flow.UnsignedExecutionReceipt{ + ExecutorID: ss.exeID, // our fake execution node + ExecutionResult: result, // result for target block + Spocks: ss.spocks, // our fake spocks } - // generates a signature over the execution result - id := receipt.ID() - sig, err := ss.exeSK.Sign(id[:], exeUtils.NewExecutionReceiptHasher()) + // create Full Execution Receipt by signing the previously-created receipt's body + unsignedReceiptID := receiptBody.ID() + sig, err := ss.exeSK.Sign(unsignedReceiptID[:], exeUtils.NewExecutionReceiptHasher()) require.NoError(ss.T(), err) - - receipt.ExecutorSignature = sig + receipt := messages.ExecutionReceipt{ + UnsignedExecutionReceipt: receiptBody, + ExecutorSignature: sig, + } // keep trying to send 2 matching execution receipt to the first consensus node - receipt2 := flow.ExecutionReceipt{ - ExecutorID: ss.exe2ID, // our fake execution node - ExecutionResult: result, // result for target block - Spocks: nil, // ignored - ExecutorSignature: crypto.Signature{}, + receiptBody2 := flow.UnsignedExecutionReceipt{ + ExecutorID: ss.exe2ID, // our fake execution node + ExecutionResult: result, // result for target block + Spocks: ss.spocks, // our fake spocks } - id = receipt2.ID() - sig2, err := ss.exe2SK.Sign(id[:], exeUtils.NewExecutionReceiptHasher()) + unsignedReceiptID2 := receiptBody2.ID() + sig2, err := ss.exe2SK.Sign(unsignedReceiptID2[:], exeUtils.NewExecutionReceiptHasher()) require.NoError(ss.T(), err) + receipt2 := messages.ExecutionReceipt{ + UnsignedExecutionReceipt: receiptBody2, + ExecutorSignature: sig2, + } - receipt2.ExecutorSignature = sig2 - - valid, err := ss.exe2SK.PublicKey().Verify(receipt2.ExecutorSignature, id[:], exeUtils.NewExecutionReceiptHasher()) + valid, err := ss.exe2SK.PublicKey().Verify(receipt2.ExecutorSignature, unsignedReceiptID2[:], exeUtils.NewExecutionReceiptHasher()) require.NoError(ss.T(), err) require.True(ss.T(), valid) @@ -310,7 +316,7 @@ ReceiptLoop: Attestation: atst, ApproverID: ss.verID, AttestationSignature: atstSign, - Spock: nil, + Spock: unittest.SignatureFixture(), } // generates a signature over result approval body @@ -318,7 +324,7 @@ ReceiptLoop: bodySign, err := ss.verSK.Sign(bodyID[:], verUtils.NewResultApprovalHasher()) require.NoError(ss.T(), err) - approval := flow.ResultApproval{ + approval := messages.ResultApproval{ Body: body, VerifierSignature: bodySign, } @@ -336,8 +342,13 @@ ApprovalLoop: } break ApprovalLoop } + internal, err := approval.ToInternal() + require.NoError(ss.T(), err) + + internalApproval, ok := internal.(*flow.ResultApproval) + require.True(ss.T(), ok) - ss.T().Logf("result approval submitted (approval: %x, result: %x)\n", approval.ID(), approval.Body.ExecutionResultID) + ss.T().Logf("result approval submitted (approval: %x, result: %x)\n", internalApproval.ID(), approval.Body.ExecutionResultID) // we try to find a block with the guarantee included and three confirmations found := false @@ -352,14 +363,14 @@ SealingLoop: } // we only care about block proposals at the moment - proposal, ok := msg.(*messages.BlockProposal) + proposal, ok := msg.(*flow.Proposal) if !ok { continue } - block := proposal.Block.ToInternal() + block := proposal.Block // log the proposal details - proposalID := block.Header.ID() + proposalID := block.ID() seals := block.Payload.Seals // if the block seal is included, we add the block to those we diff --git a/integration/tests/epochs/base_suite.go b/integration/tests/epochs/base_suite.go new file mode 100644 index 00000000000..7fcae7a2a7b --- /dev/null +++ b/integration/tests/epochs/base_suite.go @@ -0,0 +1,231 @@ +// Package epochs contains common functionality for the epoch integration test suite. +// Individual tests exist in sub-directories of this: cohort1, cohort2... +// Each cohort is run as a separate, sequential CI job. Since the epoch tests are long +// and resource-heavy, we split them into several cohorts, which can be run in parallel. +// +// If a new cohort is added in the future, it must be added to: +// - ci.yml, flaky-test-monitor.yml, bors.toml (ensure new cohort of tests is run) +// - Makefile (include new cohort in integration-test directive, etc.) +package epochs + +import ( + "context" + "fmt" + "time" + + "github.com/rs/zerolog" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" + + "github.com/onflow/flow-go/engine/ghost/client" + "github.com/onflow/flow-go/integration/testnet" + "github.com/onflow/flow-go/integration/tests/lib" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/state/protocol/inmem" + "github.com/onflow/flow-go/utils/unittest" +) + +// BaseSuite encapsulates common functionality for epoch integration tests. +type BaseSuite struct { + suite.Suite + lib.TestnetStateTracker + cancel context.CancelFunc + Log zerolog.Logger + Net *testnet.FlowNetwork + ghostID flow.Identifier + + Client *testnet.Client + Ctx context.Context + + // these are used for any helper goroutines started for the test + // we need to shut them down before stopping the network, however canceling the network's + // context before stopping causes the testdock shutdown to fail. + HelperCtx context.Context + stopHelpers context.CancelFunc + + // Epoch config (lengths in views) + StakingAuctionLen uint64 + DKGPhaseLen uint64 + EpochLen uint64 + FinalizationSafetyThreshold uint64 + NumOfCollectionClusters int + // Whether approvals are required for sealing (we only enable for VN tests because + // requiring approvals requires a longer DKG period to avoid flakiness) + RequiredSealApprovals uint // defaults to 0 (no approvals required) + // Consensus Node proposal duration + ConsensusProposalDuration time.Duration + // NumOfConsensusNodes is the number of consensus nodes in the network + NumOfConsensusNodes uint +} + +// SetupTest is run automatically by the testing framework before each test case. +func (s *BaseSuite) SetupTest() { + if s.ConsensusProposalDuration == 0 { + s.ConsensusProposalDuration = time.Millisecond * 250 + } + if s.NumOfConsensusNodes == 0 { + s.NumOfConsensusNodes = 2 + } + + minEpochLength := s.StakingAuctionLen + s.DKGPhaseLen*3 + 20 + // ensure epoch lengths are set correctly + require.Greater(s.T(), s.EpochLen, minEpochLength+s.FinalizationSafetyThreshold, "epoch too short") + + s.Ctx, s.cancel = context.WithCancel(context.Background()) + s.HelperCtx, s.stopHelpers = context.WithCancel(s.Ctx) + s.Log = unittest.LoggerForTest(s.Suite.T(), zerolog.InfoLevel) + s.Log.Info().Msg("================> SetupTest") + defer func() { + s.Log.Info().Msg("================> Finish SetupTest") + }() + + accessConfig := []func(*testnet.NodeConfig){ + testnet.WithLogLevel(zerolog.WarnLevel), + testnet.WithAdditionalFlag("--supports-observer=true"), + } + + collectionConfigs := []func(*testnet.NodeConfig){ + testnet.WithAdditionalFlag("--hotstuff-proposal-duration=100ms"), + testnet.WithLogLevel(zerolog.WarnLevel)} + + consensusConfigs := []func(config *testnet.NodeConfig){ + testnet.WithAdditionalFlag(fmt.Sprintf("--cruise-ctl-fallback-proposal-duration=%s", s.ConsensusProposalDuration)), + testnet.WithAdditionalFlag("--cruise-ctl-enabled=false"), // disable cruise control for integration tests + testnet.WithAdditionalFlag(fmt.Sprintf("--required-verification-seal-approvals=%d", s.RequiredSealApprovals)), + testnet.WithAdditionalFlag(fmt.Sprintf("--required-construction-seal-approvals=%d", s.RequiredSealApprovals)), + testnet.WithLogLevel(zerolog.WarnLevel)} + + // a ghost node masquerading as an access node + s.ghostID = unittest.IdentifierFixture() + ghostNode := testnet.NewNodeConfig( + flow.RoleAccess, + testnet.WithLogLevel(zerolog.FatalLevel), + testnet.WithID(s.ghostID), + testnet.AsGhost()) + + confs := []testnet.NodeConfig{ + testnet.NewNodeConfig(flow.RoleAccess, accessConfig...), + testnet.NewNodeConfig(flow.RoleAccess, testnet.WithLogLevel(zerolog.WarnLevel)), + testnet.NewNodeConfig(flow.RoleCollection, collectionConfigs...), + testnet.NewNodeConfig(flow.RoleExecution, testnet.WithLogLevel(zerolog.WarnLevel), testnet.WithAdditionalFlag("--extensive-logging=true")), + testnet.NewNodeConfig(flow.RoleExecution, testnet.WithLogLevel(zerolog.WarnLevel)), + testnet.NewNodeConfig(flow.RoleVerification, testnet.WithLogLevel(zerolog.WarnLevel)), + ghostNode, + } + + for i := uint(0); i < s.NumOfConsensusNodes; i++ { + confs = append(confs, testnet.NewNodeConfig(flow.RoleConsensus, consensusConfigs...)) + } + + netConf := testnet.NewNetworkConfigWithEpochConfig("epochs-tests", confs, s.StakingAuctionLen, s.DKGPhaseLen, s.EpochLen) + + // initialize the network + s.Net = testnet.PrepareFlowNetwork(s.T(), netConf, flow.Localnet) + + // start the network + s.Net.Start(s.Ctx) + + // start tracking blocks + s.Track(s.T(), s.HelperCtx, s.Ghost()) + + // use AN1 for test-related queries - the AN join/leave test will replace AN2 + client, err := s.Net.ContainerByName(testnet.PrimaryAN).TestnetClient() + require.NoError(s.T(), err) + + s.Client = client + + // log network info periodically to aid in debugging future flaky tests + go lib.LogStatusPeriodically(s.T(), s.HelperCtx, s.Log, s.Client, 5*time.Second) +} + +func (s *BaseSuite) TearDownTest() { + s.Log.Info().Msg("================> Start TearDownTest") + s.stopHelpers() // cancel before stopping network to ensure helper goroutines are stopped + s.Net.Remove() + s.cancel() + s.Log.Info().Msg("================> Finish TearDownTest") +} + +func (s *BaseSuite) Ghost() *client.GhostClient { + client, err := s.Net.ContainerByID(s.ghostID).GhostClient() + require.NoError(s.T(), err, "could not get ghost Client") + return client +} + +// TimedLogf logs the message using t.Log and the suite logger, but prefixes the current time. +// This enables viewing logs inline with Docker logs as well as other test logs. +func (s *BaseSuite) TimedLogf(msg string, args ...interface{}) { + s.Log.Info().Msgf(msg, args...) + args = append([]interface{}{time.Now().String()}, args...) + s.T().Logf("%s - "+msg, args...) +} + +// AwaitEpochPhase waits for the given phase, in the given epoch. +func (s *BaseSuite) AwaitEpochPhase(ctx context.Context, expectedEpoch uint64, expectedPhase flow.EpochPhase, waitFor, tick time.Duration) { + var actualEpoch uint64 + var actualPhase flow.EpochPhase + condition := func() bool { + snapshot, err := s.Client.GetLatestProtocolSnapshot(ctx) + require.NoError(s.T(), err) + epoch, err := snapshot.Epochs().Current() + require.NoError(s.T(), err) + actualEpoch = epoch.Counter() + actualPhase, err = snapshot.EpochPhase() + require.NoError(s.T(), err) + + return actualEpoch == expectedEpoch && actualPhase == expectedPhase + } + require.Eventuallyf(s.T(), condition, waitFor, tick, "did not reach expectedEpoch %d phase %s within %s. Last saw epoch=%d and phase=%s", expectedEpoch, expectedPhase, waitFor, actualEpoch, actualPhase) +} + +// GetContainersByRole returns all containers from the network for the specified role, making sure the containers are not ghost nodes. +// Since go maps have random iteration order the list of containers returned will be in random order. +func (s *BaseSuite) GetContainersByRole(role flow.Role) []*testnet.Container { + nodes := s.Net.ContainersByRole(role, false) + require.True(s.T(), len(nodes) > 0) + return nodes +} + +// AwaitFinalizedView polls until it observes that the latest finalized block has a view +// greater than or equal to the input view. This is used to wait until when an epoch +// transition must have happened. +func (s *BaseSuite) AwaitFinalizedView(ctx context.Context, view uint64, waitFor, tick time.Duration) { + require.Eventually(s.T(), func() bool { + finalized := s.GetLatestFinalizedHeader(ctx) + return finalized.View >= view + }, waitFor, tick) +} + +// GetLatestFinalizedHeader retrieves the latest finalized block, as reported in LatestSnapshot. +func (s *BaseSuite) GetLatestFinalizedHeader(ctx context.Context) *flow.Header { + snapshot := s.GetLatestProtocolSnapshot(ctx) + finalized, err := snapshot.Head() + require.NoError(s.T(), err) + return finalized +} + +// AssertInEpoch requires that the current epoch's counter (as of the latest finalized block) is equal to the counter value provided. +func (s *BaseSuite) AssertInEpoch(ctx context.Context, expectedEpoch uint64) { + actualEpoch := s.CurrentEpoch(ctx) + require.Equalf(s.T(), expectedEpoch, actualEpoch, "expected to be in epoch %d got %d", expectedEpoch, actualEpoch) +} + +// CurrentEpoch returns the current epoch counter (as of the latest finalized block). +func (s *BaseSuite) CurrentEpoch(ctx context.Context) uint64 { + snapshot := s.GetLatestProtocolSnapshot(ctx) + epoch, err := snapshot.Epochs().Current() + require.NoError(s.T(), err) + return epoch.Counter() +} + +// GetLatestProtocolSnapshot returns the protocol snapshot as of the latest finalized block. +func (s *BaseSuite) GetLatestProtocolSnapshot(ctx context.Context) *inmem.Snapshot { + snapshot, err := s.Client.GetLatestProtocolSnapshot(ctx) + require.NoError(s.T(), err) + return snapshot +} + +// GetDKGEndView returns the end view of the dkg. +func (s *BaseSuite) GetDKGEndView() uint64 { + return s.StakingAuctionLen + (s.DKGPhaseLen * 3) +} diff --git a/integration/tests/epochs/cohort1/epoch_dynamic_bootstrap_in_efm_test.go b/integration/tests/epochs/cohort1/epoch_dynamic_bootstrap_in_efm_test.go new file mode 100644 index 00000000000..5f9927cd5ea --- /dev/null +++ b/integration/tests/epochs/cohort1/epoch_dynamic_bootstrap_in_efm_test.go @@ -0,0 +1,73 @@ +package cohort1 + +import ( + "testing" + "time" + + "github.com/rs/zerolog" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" + + "github.com/onflow/flow-go/integration/testnet" + "github.com/onflow/flow-go/integration/tests/epochs" + "github.com/onflow/flow-go/model/flow" +) + +func TestDynamicBootstrapInEFM(t *testing.T) { + suite.Run(t, new(DynamicBootstrapInEFMSuite)) +} + +type DynamicBootstrapInEFMSuite struct { + epochs.DynamicEpochTransitionSuite +} + +// TestDynamicBootstrapInEFM tests the dynamic bootstrap in EFM. First, the test pauses the collection node to trigger EFM. +// After triggering EFM, the test waits for the EpochPhaseFallback phase of the first epoch to begin and then starts an observer. +// We specifically start an observer node since it can join the network anytime. +// Finally, we ensure that the node makes progress and finalizes blocks after dynamic bootstrap in EFM. +func (s *DynamicBootstrapInEFMSuite) TestDynamicBootstrapInEFM() { + // pause collection node to trigger EFM because of failed DKG + ln := s.GetContainersByRole(flow.RoleCollection)[0] + _ = ln.Pause() + + s.TimedLogf("waiting for EpochPhaseFallback phase of first epoch to begin") + s.AwaitEpochPhase(s.Ctx, 0, flow.EpochPhaseFallback, 2*time.Minute, time.Second) + s.TimedLogf("successfully reached EpochPhaseFallback phase of first epoch") + + snapshot, err := s.Client.GetLatestProtocolSnapshot(s.Ctx) + require.NoError(s.T(), err) + epochProtocolState, err := snapshot.EpochProtocolState() + require.NoError(s.T(), err) + require.True(s.T(), epochProtocolState.EpochFallbackTriggered()) + + header, err := snapshot.Head() + require.NoError(s.T(), err) + segment, err := snapshot.SealingSegment() + require.NoError(s.T(), err) + s.TimedLogf("retrieved header after entering EpochPhaseFallback phase: root_height=%d, root_view=%d, segment_heights=[%d-%d], segment_views=[%d-%d]", + header.Height, header.View, + segment.Sealed().Height, segment.Highest().Height, + segment.Sealed().View, segment.Highest().View) + + observerConf := testnet.ObserverConfig{ + ContainerName: "observer_1", + LogLevel: zerolog.WarnLevel, + } + testContainer := s.Net.AddObserver(s.T(), observerConf) + testContainer.WriteRootSnapshot(snapshot) + testContainer.Container.Start(s.Ctx) + s.TimedLogf("successfully started observer") + + observerClient, err := testContainer.TestnetClient() + require.NoError(s.T(), err) + + // ensure node makes progress and finalizes blocks after dynamic bootstrap in EFM + targetFinalizedView := header.View + 10 + require.Eventually(s.T(), func() bool { + observerSnapshot, err := observerClient.GetLatestProtocolSnapshot(s.Ctx) + require.NoError(s.T(), err) + finalized, err := observerSnapshot.Head() + require.NoError(s.T(), err) + return finalized.View >= targetFinalizedView + }, 30*time.Second, time.Second) +} diff --git a/integration/tests/epochs/cohort1/epoch_join_and_leave_an_test.go b/integration/tests/epochs/cohort1/epoch_join_and_leave_an_test.go new file mode 100644 index 00000000000..2341e2d31ac --- /dev/null +++ b/integration/tests/epochs/cohort1/epoch_join_and_leave_an_test.go @@ -0,0 +1,24 @@ +package cohort1 + +import ( + "testing" + + "github.com/stretchr/testify/suite" + + "github.com/onflow/flow-go/integration/tests/epochs" + "github.com/onflow/flow-go/model/flow" +) + +func TestEpochJoinAndLeaveAN(t *testing.T) { + suite.Run(t, new(EpochJoinAndLeaveANSuite)) +} + +type EpochJoinAndLeaveANSuite struct { + epochs.DynamicEpochTransitionSuite +} + +// TestEpochJoinAndLeaveAN should update access nodes and assert healthy network conditions +// after the epoch transition completes. See health check function for details. +func (s *EpochJoinAndLeaveANSuite) TestEpochJoinAndLeaveAN() { + s.RunTestEpochJoinAndLeave(flow.RoleAccess, s.AssertNetworkHealthyAfterANChange) +} diff --git a/integration/tests/epochs/cohort1/epoch_join_and_leave_ln_test.go b/integration/tests/epochs/cohort1/epoch_join_and_leave_ln_test.go new file mode 100644 index 00000000000..26c40b102d1 --- /dev/null +++ b/integration/tests/epochs/cohort1/epoch_join_and_leave_ln_test.go @@ -0,0 +1,24 @@ +package cohort1 + +import ( + "testing" + + "github.com/stretchr/testify/suite" + + "github.com/onflow/flow-go/integration/tests/epochs" + "github.com/onflow/flow-go/model/flow" +) + +func TestEpochJoinAndLeaveLN(t *testing.T) { + suite.Run(t, new(EpochJoinAndLeaveLNSuite)) +} + +type EpochJoinAndLeaveLNSuite struct { + epochs.DynamicEpochTransitionSuite +} + +// TestEpochJoinAndLeaveLN should update collection nodes and assert healthy network conditions +// after the epoch transition completes. See health check function for details. +func (s *EpochJoinAndLeaveLNSuite) TestEpochJoinAndLeaveLN() { + s.RunTestEpochJoinAndLeave(flow.RoleCollection, s.AssertNetworkHealthyAfterLNChange) +} diff --git a/integration/tests/epochs/epoch_static_transition_test.go b/integration/tests/epochs/cohort1/epoch_static_transition_test.go similarity index 79% rename from integration/tests/epochs/epoch_static_transition_test.go rename to integration/tests/epochs/cohort1/epoch_static_transition_test.go index d8ede87166f..13775697794 100644 --- a/integration/tests/epochs/epoch_static_transition_test.go +++ b/integration/tests/epochs/cohort1/epoch_static_transition_test.go @@ -1,4 +1,4 @@ -package epochs +package cohort1 import ( "testing" @@ -7,6 +7,7 @@ import ( "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" + "github.com/onflow/flow-go/integration/tests/epochs" "github.com/onflow/flow-go/model/flow" ) @@ -17,7 +18,7 @@ func TestEpochStaticTransition(t *testing.T) { // StaticEpochTransitionSuite is the suite used for epoch transition tests // with a static identity table. type StaticEpochTransitionSuite struct { - Suite + epochs.DynamicEpochTransitionSuite } func (s *StaticEpochTransitionSuite) SetupTest() { @@ -26,10 +27,10 @@ func (s *StaticEpochTransitionSuite) SetupTest() { s.StakingAuctionLen = 10 s.DKGPhaseLen = 50 s.EpochLen = 300 - s.EpochCommitSafetyThreshold = 50 + s.FinalizationSafetyThreshold = 50 // run the generic setup, which starts up the network - s.Suite.SetupTest() + s.BaseSuite.SetupTest() } // TestStaticEpochTransition asserts epoch state transitions over full epoch @@ -40,32 +41,32 @@ func (s *StaticEpochTransitionSuite) SetupTest() { func (s *StaticEpochTransitionSuite) TestStaticEpochTransition() { s.TimedLogf("waiting for EpochSetup phase of first epoch to begin") - s.AwaitEpochPhase(s.ctx, 0, flow.EpochPhaseSetup, time.Minute, 500*time.Millisecond) + s.AwaitEpochPhase(s.Ctx, 0, flow.EpochPhaseSetup, time.Minute, 500*time.Millisecond) s.TimedLogf("successfully reached EpochSetup phase of first epoch") - snapshot, err := s.client.GetLatestProtocolSnapshot(s.ctx) + snapshot, err := s.Client.GetLatestProtocolSnapshot(s.Ctx) require.NoError(s.T(), err) header, err := snapshot.Head() require.NoError(s.T(), err) s.TimedLogf("retrieved header after entering EpochSetup phase: height=%d, view=%d", header.Height, header.View) - epoch1FinalView, err := snapshot.Epochs().Current().FinalView() - require.NoError(s.T(), err) - epoch1Counter, err := snapshot.Epochs().Current().Counter() + epoch1, err := snapshot.Epochs().Current() require.NoError(s.T(), err) + epoch1FinalView := epoch1.FinalView() + epoch1Counter := epoch1.Counter() // wait for the first view of the second epoch s.TimedLogf("waiting for the first view (%d) of second epoch %d", epoch1FinalView+1, epoch1Counter+1) - s.AwaitFinalizedView(s.ctx, epoch1FinalView+1, 4*time.Minute, 500*time.Millisecond) + s.AwaitFinalizedView(s.Ctx, epoch1FinalView+1, 4*time.Minute, 500*time.Millisecond) s.TimedLogf("finalized first view (%d) of second epoch %d", epoch1FinalView+1, epoch1Counter+1) // assert transition to second epoch happened as expected // if counter is still 0, epoch emergency fallback was triggered and we can fail early - s.AssertInEpoch(s.ctx, epoch1Counter+1) + s.AssertInEpoch(s.Ctx, epoch1Counter+1) // submit a smoke test transaction to verify the network can seal a transaction s.TimedLogf("sending smoke test transaction in second epoch") - s.submitSmokeTestTransaction(s.ctx) + s.SubmitSmokeTestTransaction(s.Ctx) s.TimedLogf("successfully submitted and observed sealing of smoke test transaction") } diff --git a/integration/tests/epochs/cohort2/epoch_join_and_leave_sn_test.go b/integration/tests/epochs/cohort2/epoch_join_and_leave_sn_test.go new file mode 100644 index 00000000000..1dfd01fec0a --- /dev/null +++ b/integration/tests/epochs/cohort2/epoch_join_and_leave_sn_test.go @@ -0,0 +1,28 @@ +package cohort2 + +import ( + "testing" + + "github.com/stretchr/testify/suite" + + "github.com/onflow/flow-go/integration/tests/epochs" + "github.com/onflow/flow-go/model/flow" +) + +func TestEpochJoinAndLeaveSN(t *testing.T) { + suite.Run(t, new(EpochJoinAndLeaveSNSuite)) +} + +type EpochJoinAndLeaveSNSuite struct { + epochs.DynamicEpochTransitionSuite +} + +func (s *EpochJoinAndLeaveSNSuite) SetupTest() { + s.DynamicEpochTransitionSuite.SetupTest() +} + +// TestEpochJoinAndLeaveSN should update consensus nodes and assert healthy network conditions +// after the epoch transition completes. See health check function for details. +func (s *EpochJoinAndLeaveSNSuite) TestEpochJoinAndLeaveSN() { + s.RunTestEpochJoinAndLeave(flow.RoleConsensus, s.AssertNetworkHealthyAfterSNChange) +} diff --git a/integration/tests/epochs/epoch_join_and_leave_vn_test.go b/integration/tests/epochs/cohort2/epoch_join_and_leave_vn_test.go similarity index 81% rename from integration/tests/epochs/epoch_join_and_leave_vn_test.go rename to integration/tests/epochs/cohort2/epoch_join_and_leave_vn_test.go index f5ea2b09de0..8bf79e52d7b 100644 --- a/integration/tests/epochs/epoch_join_and_leave_vn_test.go +++ b/integration/tests/epochs/cohort2/epoch_join_and_leave_vn_test.go @@ -1,10 +1,11 @@ -package epochs +package cohort2 import ( "testing" "github.com/stretchr/testify/suite" + "github.com/onflow/flow-go/integration/tests/epochs" "github.com/onflow/flow-go/model/flow" ) @@ -13,7 +14,7 @@ func TestEpochJoinAndLeaveVN(t *testing.T) { } type EpochJoinAndLeaveVNSuite struct { - DynamicEpochTransitionSuite + epochs.DynamicEpochTransitionSuite } func (s *EpochJoinAndLeaveVNSuite) SetupTest() { @@ -26,12 +27,12 @@ func (s *EpochJoinAndLeaveVNSuite) SetupTest() { s.StakingAuctionLen = 100 s.DKGPhaseLen = 100 s.EpochLen = 450 - s.EpochCommitSafetyThreshold = 20 - s.DynamicEpochTransitionSuite.SetupTest() + s.FinalizationSafetyThreshold = 20 + s.BaseSuite.SetupTest() } // TestEpochJoinAndLeaveVN should update verification nodes and assert healthy network conditions // after the epoch transition completes. See health check function for details. func (s *EpochJoinAndLeaveVNSuite) TestEpochJoinAndLeaveVN() { - s.runTestEpochJoinAndLeave(flow.RoleVerification, s.assertNetworkHealthyAfterVNChange) + s.RunTestEpochJoinAndLeave(flow.RoleVerification, s.AssertNetworkHealthyAfterVNChange) } diff --git a/integration/tests/epochs/cohort2/epoch_recover_from_efm_test.go b/integration/tests/epochs/cohort2/epoch_recover_from_efm_test.go new file mode 100644 index 00000000000..5e652c5140c --- /dev/null +++ b/integration/tests/epochs/cohort2/epoch_recover_from_efm_test.go @@ -0,0 +1,431 @@ +package cohort2 + +import ( + "fmt" + "strings" + "testing" + "time" + + "github.com/onflow/cadence" + "github.com/onflow/flow-core-contracts/lib/go/templates" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" + + sdk "github.com/onflow/flow-go-sdk" + + "github.com/onflow/flow-go/cmd/bootstrap/run" + "github.com/onflow/flow-go/cmd/util/cmd/common" + "github.com/onflow/flow-go/integration/tests/epochs" + "github.com/onflow/flow-go/integration/utils" + "github.com/onflow/flow-go/model/bootstrap" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/model/flow/filter" + "github.com/onflow/flow-go/utils/unittest" +) + +func TestRecoverEpoch(t *testing.T) { + suite.Run(t, new(RecoverEpochSuite)) +} + +// Suite encapsulates common functionality for epoch integration tests. +type RecoverEpochSuite struct { + epochs.BaseSuite +} + +func (s *RecoverEpochSuite) SetupTest() { + // use a shorter staking auction because we don't have staking operations in this case + s.StakingAuctionLen = 2 + // to manually trigger EFM we assign very short dkg phase len ensuring the dkg will fail + s.DKGPhaseLen = 30 + s.EpochLen = 150 + s.FinalizationSafetyThreshold = 20 + s.NumOfCollectionClusters = 1 + // we need to use 4 consensus nodes to be able to eject a single node and still have a super-majority and + // have a Random Beacon committee which meets the protocol.RandomBeaconSafetyThreshold. + s.NumOfConsensusNodes = 4 + + // run the generic setup, which starts up the network + s.BaseSuite.SetupTest() +} + +// getNodeInfoDirs returns the internal node private info dir and the node config dir from a container with the specified role. +func (s *RecoverEpochSuite) getNodeInfoDirs(role flow.Role) (string, string) { + bootstrapPath := s.GetContainersByRole(role)[0].BootstrapPath() + internalNodePrivInfoDir := fmt.Sprintf("%s/%s", bootstrapPath, bootstrap.DirPrivateRoot) + nodeConfigJson := fmt.Sprintf("%s/%s", bootstrapPath, bootstrap.PathNodeInfosPub) + return internalNodePrivInfoDir, nodeConfigJson +} + +// executeEFMRecoverTXArgsCMD executes the efm-recover-tx-args CLI command to generate EpochRecover transaction arguments. +// Args: +// +// collectionClusters: the number of collector clusters. +// numViewsInEpoch: the number of views in the recovery epoch. +// numViewsInStakingAuction: the number of views in the staking auction of the recovery epoch. +// epochCounter: the epoch counter. +// targetDuration: the target duration for the recover epoch. +// targetEndTime: the target end time for the recover epoch. +// +// Returns: +// +// []cadence.Value: the transaction arguments. +func (s *RecoverEpochSuite) executeEFMRecoverTXArgsCMD( + collectionClusters int, + numViewsInEpoch, + numViewsInStakingAuction, + recoveryEpochCounter, + recoveryEpochTargetDuration uint64, + unsafeAllowOverWrite bool, +) []cadence.Value { + // read internal node info from one of the consensus nodes + internalNodePrivInfoDir, nodeConfigJson := s.getNodeInfoDirs(flow.RoleConsensus) + snapshot := s.GetLatestProtocolSnapshot(s.Ctx) + txArgs, err := run.GenerateRecoverEpochTxArgs( + s.Log, + internalNodePrivInfoDir, + nodeConfigJson, + collectionClusters, + recoveryEpochCounter, + flow.Localnet, + numViewsInStakingAuction, + numViewsInEpoch, + recoveryEpochTargetDuration, + unsafeAllowOverWrite, + nil, + nil, + snapshot, + ) + require.NoError(s.T(), err) + return txArgs +} + +// recoverEpoch submits the recover epoch transaction to the network. +func (s *RecoverEpochSuite) recoverEpoch(env templates.Environment, args []cadence.Value) *sdk.TransactionResult { + latestBlockID, err := s.Client.GetLatestBlockID(s.Ctx) + require.NoError(s.T(), err) + + tx, err := utils.MakeRecoverEpochTx( + env, + s.Client.Account(), + 0, + sdk.Identifier(latestBlockID), + args, + ) + require.NoError(s.T(), err) + + err = s.Client.SignAndSendTransaction(s.Ctx, tx) + require.NoError(s.T(), err) + result, err := s.Client.WaitForSealed(s.Ctx, tx.ID()) + require.NoError(s.T(), err) + s.Client.Account().Keys[0].SequenceNumber++ + require.NoError(s.T(), result.Error) + + return result +} + +// TestRecoverEpoch ensures that the recover epoch governance transaction flow works as expected, i.e. +// a network that entered Epoch Fallback Mode successfully recovers. +// For this specific scenario, we are testing a scenario where the consensus committee is equal to the Random Beacon committee, i.e., +// no changes to the identity table between epoch start and submitting the recover epoch transaction were made. +// This test will do the following: +// 1. Triggers EFM by turning off the sole collection node before the end of the DKG forcing the DKG to fail. +// 2. Generates epoch recover transaction args using the epoch efm-recover-tx-args. +// 3. Submit recover epoch transaction. +// 4. Ensure expected EpochRecover event is emitted. +// 5. Ensure the network transitions into the recovery epoch and finalizes the first view of the recovery epoch. +func (s *RecoverEpochSuite) TestRecoverEpoch() { + // 1. Manually trigger EFM + // pause the collection node to trigger EFM by failing DKG + ln := s.GetContainersByRole(flow.RoleCollection)[0] + require.NoError(s.T(), ln.Pause()) + s.AwaitFinalizedView(s.Ctx, s.GetDKGEndView(), 2*time.Minute, 500*time.Millisecond) + // start the paused collection node now that we are in EFM + require.NoError(s.T(), ln.Start()) + + // get final view from the latest snapshot + epoch1, err := s.Net.BootstrapSnapshot.Epochs().Current() + require.NoError(s.T(), err) + epoch1FinalView := epoch1.FinalView() + + // Wait for at least the first view past the current epoch's original FinalView to be finalized. + s.TimedLogf("waiting for epoch transition (finalized view %d)", epoch1FinalView+1) + s.AwaitFinalizedView(s.Ctx, epoch1FinalView+1, 2*time.Minute, 500*time.Millisecond) + s.TimedLogf("observed finalized view %d", epoch1FinalView+1) + + // assert that we are in EFM + snapshot, err := s.Client.GetLatestProtocolSnapshot(s.Ctx) + require.NoError(s.T(), err) + epochPhase, err := snapshot.EpochPhase() + require.NoError(s.T(), err) + require.Equal(s.T(), flow.EpochPhaseFallback, epochPhase, "network must enter EFM by this point") + + // 2. Generate transaction arguments for epoch recover transaction. + collectionClusters := s.NumOfCollectionClusters + numViewsInRecoveryEpoch := s.EpochLen + numViewsInStakingAuction := s.StakingAuctionLen + epochCounter := uint64(1) + + txArgs := s.executeEFMRecoverTXArgsCMD( + collectionClusters, + numViewsInRecoveryEpoch, + numViewsInStakingAuction, + epochCounter, + // cruise control is disabled for integration tests + // targetDuration and targetEndTime will be ignored + 3000, + // unsafeAllowOverWrite set to false, initialize new epoch + false, + ) + + // 3. Submit recover epoch transaction to the network. + env := utils.LocalnetEnv() + result := s.recoverEpoch(env, txArgs) + require.NoError(s.T(), result.Error) + require.Equal(s.T(), result.Status, sdk.TransactionStatusSealed) + + // 4. Ensure expected EpochRecover event is emitted. + eventType := "" + for _, evt := range result.Events { + if strings.Contains(evt.Type, "FlowEpoch.EpochRecover") { + eventType = evt.Type + break + } + } + require.NotEmpty(s.T(), eventType, "expected FlowEpoch.EpochRecover event type") + events, err := s.Client.GetEventsForBlockIDs(s.Ctx, eventType, []sdk.Identifier{result.BlockID}) + require.NoError(s.T(), err) + require.Equal(s.T(), events[0].Events[0].Type, eventType) + + // 5. Ensure the network transitions into the recovery epoch and finalizes the first view of the recovery epoch. + startViewOfNextEpoch := uint64(txArgs[1].(cadence.UInt64)) + s.TimedLogf("waiting to transition into recovery epoch (finalized view %d)", startViewOfNextEpoch) + s.AwaitFinalizedView(s.Ctx, startViewOfNextEpoch, 2*time.Minute, 500*time.Millisecond) + s.TimedLogf("observed finalized first view of recovery epoch %d", startViewOfNextEpoch) + + s.AssertInEpoch(s.Ctx, 1) +} + +// TestRecoverEpochNodeEjected ensures that the recover epoch governance transaction flow works as expected, and a network that +// enters Epoch Fallback Mode can successfully recover. +// For this specific scenario, we are testing a scenario where the consensus committee is a subset of the Random Beacon committee, i.e., +// a node was ejected between epoch start and submitting the recover epoch transaction. +// This test will do the following: +// 1. Triggers EFM by turning off the sole collection node before the end of the DKG forcing the DKG to fail. +// 2. Generates epoch recover transaction args using the epoch efm-recover-tx-args. +// 3. Eject consensus node by modifying the snapshot before generating the recover epoch transaction args. +// 4. Submit recover epoch transaction. +// 5. Ensure expected EpochRecover event is emitted. +// 6. Ensure the network transitions into the recovery epoch and finalizes the first view of the recovery epoch. +func (s *RecoverEpochSuite) TestRecoverEpochNodeEjected() { + // 1. Manually trigger EFM + + // pause the collection node to trigger EFM by failing DKG + ln := s.GetContainersByRole(flow.RoleCollection)[0] + require.NoError(s.T(), ln.Pause()) + s.AwaitFinalizedView(s.Ctx, s.GetDKGEndView(), 2*time.Minute, 500*time.Millisecond) + // start the paused collection node now that we are in EFM + require.NoError(s.T(), ln.Start()) + + // get final view from the latest snapshot + epoch1, err := s.Net.BootstrapSnapshot.Epochs().Current() + require.NoError(s.T(), err) + epoch1FinalView := epoch1.FinalView() + + // Wait for at least the first view past the current epoch's original FinalView to be finalized. + s.TimedLogf("waiting for epoch transition (finalized view %d)", epoch1FinalView+1) + s.AwaitFinalizedView(s.Ctx, epoch1FinalView+1, 2*time.Minute, 500*time.Millisecond) + s.TimedLogf("observed finalized view %d", epoch1FinalView+1) + + // assert that we are in EFM + snapshot, err := s.Client.GetLatestProtocolSnapshot(s.Ctx) + require.NoError(s.T(), err) + epochPhase, err := snapshot.EpochPhase() + require.NoError(s.T(), err) + require.Equal(s.T(), flow.EpochPhaseFallback, epochPhase, "network must enter EFM by this point") + + // 2. Generate transaction arguments for epoch recover transaction. + collectionClusters := s.NumOfCollectionClusters + recoveryEpochCounter := uint64(1) + + // read internal node info from one of the consensus nodes + internalNodePrivInfoDir, nodeConfigJson := s.getNodeInfoDirs(flow.RoleConsensus) + // 3. Eject consensus node by modifying the snapshot before generating the recover epoch transaction args. + snapshot.Encodable().SealingSegment.LatestProtocolStateEntry().EpochEntry.CurrentEpochIdentityTable. + Filter(filter.HasRole[flow.Identity](flow.RoleConsensus))[0].EpochParticipationStatus = flow.EpochParticipationStatusEjected + + txArgs, err := run.GenerateRecoverEpochTxArgs( + s.Log, + internalNodePrivInfoDir, + nodeConfigJson, + collectionClusters, + recoveryEpochCounter, + flow.Localnet, + s.StakingAuctionLen, + s.EpochLen, + 3000, + false, + nil, + nil, + snapshot, + ) + require.NoError(s.T(), err) + + // 4. Submit recover epoch transaction to the network. + env := utils.LocalnetEnv() + result := s.recoverEpoch(env, txArgs) + require.NoError(s.T(), result.Error) + require.Equal(s.T(), result.Status, sdk.TransactionStatusSealed) + + // 5. Ensure expected EpochRecover event is emitted. + eventType := "" + for _, evt := range result.Events { + if strings.Contains(evt.Type, "FlowEpoch.EpochRecover") { + eventType = evt.Type + break + } + } + require.NotEmpty(s.T(), eventType, "expected FlowEpoch.EpochRecover event type") + events, err := s.Client.GetEventsForBlockIDs(s.Ctx, eventType, []sdk.Identifier{result.BlockID}) + require.NoError(s.T(), err) + require.Equal(s.T(), events[0].Events[0].Type, eventType) + + // 6. Ensure the network transitions into the recovery epoch and finalizes the first view of the recovery epoch. + startViewOfNextEpoch := uint64(txArgs[1].(cadence.UInt64)) + s.TimedLogf("waiting to transition into recovery epoch (finalized view %d)", startViewOfNextEpoch) + s.AwaitFinalizedView(s.Ctx, startViewOfNextEpoch, 2*time.Minute, 500*time.Millisecond) + s.TimedLogf("observed finalized first view of recovery epoch %d", startViewOfNextEpoch) + + s.AssertInEpoch(s.Ctx, 1) +} + +// TestRecoverEpochEjectNodeDifferentDKG ensures that the recover epoch governance transaction flow works as expected, and a network that +// enters Epoch Fallback Mode can successfully recover. +// Here, we are testing a scenario where the consensus committee 𝒞 and Random Beacon committee 𝒟 form a symmetric difference with +// cardinality 1. Formally, |𝒞 ∖ 𝒟| = 1 and |𝒟 \ 𝒞| = 1. In other words, there is a node which is part of the consensus committee but not +// part of the Random Beacon committee and another node which is part of the Random Beacon committee but not part of the consensus committee. +// We remove the first consensus node from the Consensus Committee, and the last consensus node from the Random Beacon Committee. For example, +// if the original consensus set is {A, B, C, D} then: +// - the post-recovery consensus committee is {B, C, D} +// - the post-recovery random beacon committee is {A, B, C} +// +// This test will do the following: +// 1. Triggers EFM by turning off the sole collection node before the end of the DKG forcing the DKG to fail. +// 2. Eject the first consensus node by modifying the epoch snapshot. +// 3. Drop the last consensus node from the Random Beacon committee. This hack works only for threshold systems with an even number of participants, +// without changing the threshold - hence we need to start this test with 4 consensus nodes. +// 4. Generates epoch recover transaction args using the tooling [run.GenerateRecoverTxArgsWithDKG] provided for the governance committee. +// 5. Submit recover epoch transaction. +// 6. Ensure expected EpochRecover event is emitted. +// 7. Ensure the network transitions into the recovery epoch and finalizes the first view of the recovery epoch. +func (s *RecoverEpochSuite) TestRecoverEpochEjectNodeDifferentDKG() { + // 1. Triggers EFM by turning off the sole collection node before the end of the DKG forcing the DKG to fail. + + // pause the collection node to trigger EFM by failing DKG + ln := s.GetContainersByRole(flow.RoleCollection)[0] + require.NoError(s.T(), ln.Pause()) + s.AwaitFinalizedView(s.Ctx, s.GetDKGEndView(), 2*time.Minute, 500*time.Millisecond) + // start the paused collection node now that we are in EFM + require.NoError(s.T(), ln.Start()) + + // get final view from the latest snapshot + epoch1, err := s.Net.BootstrapSnapshot.Epochs().Current() + require.NoError(s.T(), err) + epoch1FinalView := epoch1.FinalView() + + // Wait for at least the first view past the current epoch's original FinalView to be finalized. + s.TimedLogf("waiting for epoch transition (finalized view %d)", epoch1FinalView+1) + s.AwaitFinalizedView(s.Ctx, epoch1FinalView+1, 2*time.Minute, 500*time.Millisecond) + s.TimedLogf("observed finalized view %d", epoch1FinalView+1) + + // assert that we are in EFM + snapshot, err := s.Client.GetLatestProtocolSnapshot(s.Ctx) + require.NoError(s.T(), err) + epochPhase, err := snapshot.EpochPhase() + require.NoError(s.T(), err) + require.Equal(s.T(), flow.EpochPhaseFallback, epochPhase, "network must enter EFM by this point") + + // 2. Eject the FIRST consensus node by modifying the snapshot before generating the recover epoch transaction args. + // By ejecting a node from the consensus committee but keeping it in the Random Beacon committee, we ensure that the there is a node + // which is not part of the consensus committee but is part of the Random Beacon committee. + currentIdentityTable := snapshot.Encodable().SealingSegment.LatestProtocolStateEntry().EpochEntry.CurrentEpochIdentityTable + ejectedIdentity := currentIdentityTable.Filter(filter.HasRole[flow.Identity](flow.RoleConsensus))[0] + ejectedIdentity.EpochParticipationStatus = flow.EpochParticipationStatusEjected // writes through to `currentIdentityTable` + + // 3. Modify DKG data by removing the last node of the consensus committee from DKG committee. This way we ensure that consensus + // committee has a node which is not part of the Random Beacon committee. For threshold committees of *even size*, we can remove a + // single node without changing the threshold (see [ref. 1] for details). In other words, we can just pretend that there was originally + // one node less in the DKG, while the same number of signatures (threshold +1) are sufficient to construct a group signature. + // + // [ref. 1] function `RandomBeaconThreshold` for computing the threshold in package module/signature; note + // that for reconstructing the group sig, _strictly more_ than `threshold` sig shares are required. + randomBeaconParticipants := currentIdentityTable.Filter(filter.HasRole[flow.Identity](flow.RoleConsensus)) + nConsensusNodes := len(randomBeaconParticipants) - 1 + + // 4. Generates epoch recover transaction args using the tooling [run.GenerateRecoverTxArgsWithDKG] provided for the governance committee. + recoveryDkgIndexMap := make(flow.DKGIndexMap, nConsensusNodes) + for i, participant := range randomBeaconParticipants[:nConsensusNodes] { + recoveryDkgIndexMap[participant.NodeID] = i + } + + epochProtocolState, err := snapshot.EpochProtocolState() + require.NoError(s.T(), err) + dkg, err := epochProtocolState.DKG() + require.NoError(s.T(), err) + recoveryThresholdKeyShares := dkg.KeyShares()[:nConsensusNodes] + recoveryThresholdGroupKey := dkg.GroupKey() + + // read internal node info from one of the consensus nodes + internalNodePrivInfoDir, nodeConfigJson := s.getNodeInfoDirs(flow.RoleConsensus) + internalNodes, err := common.ReadFullInternalNodeInfos(unittest.Logger(), internalNodePrivInfoDir, nodeConfigJson) + require.NoError(s.T(), err) + + // At this point we have a node which is part of the consensus committee but not part of the Random Beacon committee and + // another node which is part of the Random Beacon committee but not part of the consensus committee. + collectionClusters := s.NumOfCollectionClusters + recoveryEpochCounter := uint64(1) + txArgs, err := run.GenerateRecoverTxArgsWithDKG( + s.Log, + internalNodes, + collectionClusters, + recoveryEpochCounter, + flow.Localnet, + s.StakingAuctionLen, + s.EpochLen, + 3000, + false, + recoveryDkgIndexMap, + recoveryThresholdKeyShares, + recoveryThresholdGroupKey, + nil, + nil, + snapshot, + ) + require.NoError(s.T(), err) + + // 5. Submit recover epoch transaction to the network. + env := utils.LocalnetEnv() + result := s.recoverEpoch(env, txArgs) + require.NoError(s.T(), result.Error) + require.Equal(s.T(), result.Status, sdk.TransactionStatusSealed) + + // 6. Ensure expected EpochRecover event is emitted. + eventType := "" + for _, evt := range result.Events { + if strings.Contains(evt.Type, "FlowEpoch.EpochRecover") { + eventType = evt.Type + break + } + } + require.NotEmpty(s.T(), eventType, "expected FlowEpoch.EpochRecover event type") + events, err := s.Client.GetEventsForBlockIDs(s.Ctx, eventType, []sdk.Identifier{result.BlockID}) + require.NoError(s.T(), err) + require.Equal(s.T(), events[0].Events[0].Type, eventType) + + // 7. Ensure the network transitions into the recovery epoch and finalizes the first view of the recovery epoch. + startViewOfNextEpoch := uint64(txArgs[1].(cadence.UInt64)) + s.TimedLogf("waiting to transition into recovery epoch (finalized view %d)", startViewOfNextEpoch) + s.AwaitFinalizedView(s.Ctx, startViewOfNextEpoch, 2*time.Minute, 500*time.Millisecond) + s.TimedLogf("observed finalized first view of recovery epoch %d", startViewOfNextEpoch) + + s.AssertInEpoch(s.Ctx, 1) +} diff --git a/integration/tests/epochs/dynamic_epoch_transition_suite.go b/integration/tests/epochs/dynamic_epoch_transition_suite.go new file mode 100644 index 00000000000..4791ca51bd8 --- /dev/null +++ b/integration/tests/epochs/dynamic_epoch_transition_suite.go @@ -0,0 +1,564 @@ +// Package epochs contains common functionality for the epoch integration test suite. +// Individual tests exist in sub-directories of this: cohort1, cohort2... +// Each cohort is run as a separate, sequential CI job. Since the epoch tests are long +// and resource-heavy, we split them into several cohorts, which can be run in parallel. +// +// If a new cohort is added in the future, it must be added to: +// - ci.yml, flaky-test-monitor.yml, bors.toml (ensure new cohort of tests is run) +// - Makefile (include new cohort in integration-test directive, etc.) +package epochs + +import ( + "context" + "fmt" + "strings" + "time" + + "github.com/onflow/cadence" + "github.com/onflow/crypto" + "github.com/onflow/flow-core-contracts/lib/go/templates" + "github.com/rs/zerolog" + "github.com/stretchr/testify/require" + + sdk "github.com/onflow/flow-go-sdk" + sdkcrypto "github.com/onflow/flow-go-sdk/crypto" + + "github.com/onflow/flow-go/fvm/blueprints" + "github.com/onflow/flow-go/integration/testnet" + "github.com/onflow/flow-go/integration/utils" + "github.com/onflow/flow-go/model/bootstrap" + "github.com/onflow/flow-go/model/encodable" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/state/protocol" + "github.com/onflow/flow-go/state/protocol/inmem" + "github.com/onflow/flow-go/utils/unittest" +) + +// nodeUpdateValidation func that will be used to validate the health of the network +// after an identity table change during an epoch transition. This is used in +// tandem with RunTestEpochJoinAndLeave. +// NOTE: The snapshot must reference a block within the second epoch. +type nodeUpdateValidation func(ctx context.Context, env templates.Environment, snapshot *inmem.Snapshot, info *StakedNodeOperationInfo) + +// DynamicEpochTransitionSuite is the suite used for epoch transitions tests +// with a dynamic identity table. +type DynamicEpochTransitionSuite struct { + BaseSuite +} + +func (s *DynamicEpochTransitionSuite) SetupTest() { + // use a longer staking auction length to accommodate staking operations for joining/leaving nodes + // NOTE: this value is set fairly aggressively to ensure shorter test times. + // If flakiness due to failure to complete staking operations in time is observed, + // try increasing (by 10-20 views). + s.StakingAuctionLen = 50 + s.DKGPhaseLen = 50 + s.EpochLen = 250 + s.FinalizationSafetyThreshold = 20 + + // run the generic setup, which starts up the network + s.BaseSuite.SetupTest() +} + +// StakedNodeOperationInfo struct contains all the node information needed to +// start a node after it is onboarded (staked and registered). +type StakedNodeOperationInfo struct { + NodeID flow.Identifier + Role flow.Role + StakingAccountAddress sdk.Address + FullAccountKey *sdk.AccountKey + StakingAccountKey sdkcrypto.PrivateKey + NetworkingKey sdkcrypto.PrivateKey + StakingKey sdkcrypto.PrivateKey + // machine account info defined only for consensus/collection nodes + MachineAccountAddress sdk.Address + MachineAccountKey sdkcrypto.PrivateKey + MachineAccountPublicKey *sdk.AccountKey + ContainerName string +} + +// StakeNode will generate initial keys needed for a SN/LN node and onboard this node using the following steps: +// 1. Generate keys (networking, staking, machine) +// 2. Create a new account, this will be the staking account +// 3. Transfer token amount for the given role to the staking account +// 4. Add additional funds to staking account for storage +// 5. Create Staking collection for node +// 6. Register node using staking collection object +// 7. Add the node to the approved list +// +// NOTE: assumes staking occurs in first epoch (counter 0) +// NOTE 2: This function performs steps 1-6 in one custom transaction, to reduce +// the time taken by each test case. Individual transactions for each step can be +// found in Git history, for example: 9867056a8b7246655047bc457f9000398f6687c0. +func (s *DynamicEpochTransitionSuite) StakeNode(ctx context.Context, env templates.Environment, role flow.Role) *StakedNodeOperationInfo { + + stakingAccountKey, networkingKey, stakingKey, machineAccountKey, machineAccountPubKey := s.generateAccountKeys(role) + nodeID := flow.MakeID(stakingKey.PublicKey().Encode()) + fullStakingAcctKey := sdk.NewAccountKey(). + SetPublicKey(stakingAccountKey.PublicKey()). + SetHashAlgo(sdkcrypto.SHA2_256). + SetWeight(sdk.AccountKeyWeightThreshold) + + _, stakeAmount, err := s.Client.TokenAmountByRole(role) + require.NoError(s.T(), err) + containerName := s.getTestContainerName(role) + + latestBlockID, err := s.Client.GetLatestBlockID(ctx) + require.NoError(s.T(), err) + + stakingKeyPoP, err := crypto.BLSGeneratePOP(stakingKey) + require.NoError(s.T(), err) + + // create and register node + tx, err := utils.MakeCreateAndSetupNodeTx( + env, + s.Client.Account(), + sdk.Identifier(latestBlockID), + fullStakingAcctKey, + fmt.Sprintf("%f", stakeAmount+10.0), + nodeID, + role, + testnet.GetPrivateNodeInfoAddress(containerName), + strings.TrimPrefix(networkingKey.PublicKey().String(), "0x"), + strings.TrimPrefix(stakingKey.PublicKey().String(), "0x"), + strings.TrimPrefix(stakingKeyPoP.String(), "0x"), + machineAccountPubKey, + ) + require.NoError(s.T(), err) + + err = s.Client.SignAndSendTransaction(ctx, tx) + require.NoError(s.T(), err) + result, err := s.Client.WaitForSealed(ctx, tx.ID()) + require.NoError(s.T(), err) + s.Client.Account().Keys[0].SequenceNumber++ + require.NoError(s.T(), result.Error) + + accounts := s.Client.CreatedAccounts(result) + stakingAccountAddress := accounts[0] + var machineAccountAddr sdk.Address + if role == flow.RoleCollection || role == flow.RoleConsensus { + machineAccountAddr = accounts[1] + } + + result = s.SubmitSetApprovedListTx(ctx, env, append(s.Net.Identities().NodeIDs(), nodeID)...) + require.NoError(s.T(), result.Error) + + // ensure we are still in staking auction + s.AssertInEpochPhase(ctx, 0, flow.EpochPhaseStaking) + + return &StakedNodeOperationInfo{ + NodeID: nodeID, + Role: role, + StakingAccountAddress: stakingAccountAddress, + FullAccountKey: fullStakingAcctKey, + StakingAccountKey: stakingAccountKey, + StakingKey: stakingKey, + NetworkingKey: networkingKey, + MachineAccountKey: machineAccountKey, + MachineAccountPublicKey: machineAccountPubKey, + MachineAccountAddress: machineAccountAddr, + ContainerName: containerName, + } +} + +// generates initial keys needed to bootstrap account +func (s *DynamicEpochTransitionSuite) generateAccountKeys(role flow.Role) ( + operatorAccountKey, + networkingKey, + stakingKey, + machineAccountKey crypto.PrivateKey, + machineAccountPubKey *sdk.AccountKey, +) { + operatorAccountKey = unittest.PrivateKeyFixture(crypto.ECDSAP256) + networkingKey = unittest.NetworkingPrivKeyFixture() + stakingKey = unittest.StakingPrivKeyFixture() + + // create a machine account + if role == flow.RoleConsensus || role == flow.RoleCollection { + machineAccountKey = unittest.PrivateKeyFixture(crypto.ECDSAP256) + + machineAccountPubKey = &sdk.AccountKey{ + PublicKey: machineAccountKey.PublicKey(), + SigAlgo: machineAccountKey.PublicKey().Algorithm(), + HashAlgo: bootstrap.DefaultMachineAccountHashAlgo, + Weight: 1000, + } + } + + return +} + +// removeNodeFromProtocol removes the given node from the protocol. +// NOTE: assumes staking occurs in first epoch (counter 0) +func (s *DynamicEpochTransitionSuite) removeNodeFromProtocol(ctx context.Context, env templates.Environment, nodeID flow.Identifier) { + result, err := s.submitAdminRemoveNodeTx(ctx, env, nodeID) + require.NoError(s.T(), err) + require.NoError(s.T(), result.Error) + + // ensure we submit transaction while in staking phase + s.AssertInEpochPhase(ctx, 0, flow.EpochPhaseStaking) +} + +// submitAdminRemoveNodeTx will submit the admin remove node transaction +func (s *DynamicEpochTransitionSuite) submitAdminRemoveNodeTx(ctx context.Context, + env templates.Environment, + nodeID flow.Identifier, +) (*sdk.TransactionResult, error) { + latestBlockID, err := s.Client.GetLatestBlockID(ctx) + require.NoError(s.T(), err) + + closeStakeTx, err := utils.MakeAdminRemoveNodeTx( + env, + s.Client.Account(), + 0, + sdk.Identifier(latestBlockID), + nodeID, + ) + require.NoError(s.T(), err) + + err = s.Client.SignAndSendTransaction(ctx, closeStakeTx) + require.NoError(s.T(), err) + + result, err := s.Client.WaitForSealed(ctx, closeStakeTx.ID()) + require.NoError(s.T(), err) + s.Client.Account().Keys[0].SequenceNumber++ + return result, nil +} + +func (s *DynamicEpochTransitionSuite) ExecuteGetProposedTableScript(ctx context.Context, env templates.Environment, nodeID flow.Identifier) cadence.Value { + v, err := s.Client.ExecuteScriptBytes(ctx, templates.GenerateReturnProposedTableScript(env), []cadence.Value{}) + require.NoError(s.T(), err) + return v +} + +// ExecuteGetNodeInfoScript executes a script to get staking info about the given node. +func (s *DynamicEpochTransitionSuite) ExecuteGetNodeInfoScript(ctx context.Context, env templates.Environment, nodeID flow.Identifier) cadence.Value { + cdcNodeID, err := cadence.NewString(nodeID.String()) + require.NoError(s.T(), err) + v, err := s.Client.ExecuteScriptBytes(ctx, templates.GenerateGetNodeInfoScript(env), []cadence.Value{cdcNodeID}) + require.NoError(s.T(), err) + return v +} + +// SubmitSetApprovedListTx adds a node to the approved node list, this must be done when a node joins the protocol during the epoch staking phase +func (s *DynamicEpochTransitionSuite) SubmitSetApprovedListTx(ctx context.Context, env templates.Environment, identities ...flow.Identifier) *sdk.TransactionResult { + latestBlockID, err := s.Client.GetLatestBlockID(ctx) + require.NoError(s.T(), err) + + idTableAddress := sdk.HexToAddress(env.IDTableAddress) + tx := sdk.NewTransaction(). + SetScript(templates.GenerateSetApprovedNodesScript(env)). + SetComputeLimit(9999). + SetReferenceBlockID(sdk.Identifier(latestBlockID)). + SetProposalKey(s.Client.SDKServiceAddress(), 0, s.Client.Account().Keys[0].SequenceNumber). + SetPayer(s.Client.SDKServiceAddress()). + AddAuthorizer(idTableAddress) + err = tx.AddArgument(blueprints.SetStakingAllowlistTxArg(identities)) + require.NoError(s.T(), err) + + err = s.Client.SignAndSendTransaction(ctx, tx) + require.NoError(s.T(), err) + + result, err := s.Client.WaitForSealed(ctx, tx.ID()) + require.NoError(s.T(), err) + s.Client.Account().Keys[0].SequenceNumber++ + + return result +} + +// ExecuteReadApprovedNodesScript executes the return proposal table script and returns a list of approved nodes +func (s *DynamicEpochTransitionSuite) ExecuteReadApprovedNodesScript(ctx context.Context, env templates.Environment) cadence.Value { + v, err := s.Client.ExecuteScriptBytes(ctx, templates.GenerateGetApprovedNodesScript(env), []cadence.Value{}) + require.NoError(s.T(), err) + + return v +} + +// getTestContainerName returns a name for a test container in the form of ${role}_${nodeID}_test +func (s *DynamicEpochTransitionSuite) getTestContainerName(role flow.Role) string { + i := len(s.Net.ContainersByRole(role, false)) + 1 + return fmt.Sprintf("%s_test_%d", role, i) +} + +// assertNodeApprovedAndProposed executes the read approved nodes list and get proposed table scripts +// and checks that the info.NodeID is in both list +func (s *DynamicEpochTransitionSuite) assertNodeApprovedAndProposed(ctx context.Context, env templates.Environment, info *StakedNodeOperationInfo) { + // ensure node ID in approved list + //approvedNodes := s.ExecuteReadApprovedNodesScript(Ctx, env) + //require.Containsf(s.T(), approvedNodes.(cadence.Array).Values, cadence.String(info.NodeID.String()), "expected new node to be in approved nodes list: %x", info.NodeID) + + // Access Nodes go through a separate selection process, so they do not immediately + // appear on the proposed table -- skip checking for them here. + if info.Role == flow.RoleAccess { + s.T().Logf("skipping checking proposed table for joining Access Node") + return + } + + // check if node is in proposed table + proposedTable := s.ExecuteGetProposedTableScript(ctx, env, info.NodeID) + require.Containsf(s.T(), proposedTable.(cadence.Array).Values, cadence.String(info.NodeID.String()), "expected new node to be in proposed table: %x", info.NodeID) +} + +// NewTestContainerOnNetwork configures a new container on the suites network +func (s *DynamicEpochTransitionSuite) NewTestContainerOnNetwork(role flow.Role, info *StakedNodeOperationInfo) *testnet.Container { + containerConfigs := []func(config *testnet.NodeConfig){ + testnet.WithLogLevel(zerolog.WarnLevel), + testnet.WithID(info.NodeID), + } + + nodeConfig := testnet.NewNodeConfig(role, containerConfigs...) + testContainerConfig, err := testnet.NewContainerConfig(info.ContainerName, nodeConfig, info.NetworkingKey, info.StakingKey) + require.NoError(s.T(), err) + err = testContainerConfig.WriteKeyFiles(s.Net.BootstrapDir, info.MachineAccountAddress, encodable.MachineAccountPrivKey{PrivateKey: info.MachineAccountKey}, role) + require.NoError(s.T(), err) + + //add our container to the network + err = s.Net.AddNode(s.T(), s.Net.BootstrapDir, testContainerConfig) + require.NoError(s.T(), err, "failed to add container to network") + + // if node is of LN/SN role type add additional flags to node container for secure GRPC connection + if role == flow.RoleConsensus || role == flow.RoleCollection { + // ghost containers don't participate in the network skip any SN/LN ghost containers + nodeContainer := s.Net.ContainerByID(testContainerConfig.NodeID) + nodeContainer.AddFlag("insecure-access-api", "false") + + accessNodeIDS := make([]string, 0) + for _, c := range s.Net.ContainersByRole(flow.RoleAccess, false) { + accessNodeIDS = append(accessNodeIDS, c.Config.NodeID.String()) + } + nodeContainer.AddFlag("access-node-ids", strings.Join(accessNodeIDS, ",")) + } + + return s.Net.ContainerByID(info.NodeID) +} + +// StakeNewNode will stake a new node, and create the corresponding docker container for that node +func (s *DynamicEpochTransitionSuite) StakeNewNode(ctx context.Context, env templates.Environment, role flow.Role) (*StakedNodeOperationInfo, *testnet.Container) { + // stake our new node + info := s.StakeNode(ctx, env, role) + + // make sure our node is in the approved nodes list and the proposed nodes table + s.assertNodeApprovedAndProposed(ctx, env, info) + + // add a new container to the network with the info used to stake our node + testContainer := s.NewTestContainerOnNetwork(role, info) + + return info, testContainer +} + +// AssertInEpochPhase checks if we are in the phase of the given epoch. +func (s *DynamicEpochTransitionSuite) AssertInEpochPhase(ctx context.Context, expectedEpoch uint64, expectedPhase flow.EpochPhase) { + snapshot, err := s.Client.GetLatestProtocolSnapshot(ctx) + require.NoError(s.T(), err) + epoch, err := snapshot.Epochs().Current() + require.NoError(s.T(), err) + actualEpoch := epoch.Counter() + actualPhase, err := snapshot.EpochPhase() + require.NoError(s.T(), err) + require.Equal(s.T(), expectedPhase, actualPhase, "not in correct phase") + require.Equal(s.T(), expectedEpoch, actualEpoch, "not in correct epoch") + + head, err := snapshot.Head() + require.NoError(s.T(), err) + s.TimedLogf("asserted in epoch %d, phase %s, finalized height/view: %d/%d", expectedEpoch, expectedPhase, head.Height, head.View) +} + +// AssertNodeNotParticipantInEpoch asserts that the given node ID does not exist +// in the epoch's identity table. +func (s *DynamicEpochTransitionSuite) AssertNodeNotParticipantInEpoch(epoch protocol.TentativeEpoch, nodeID flow.Identifier) { + identities := epoch.InitialIdentities() + require.NotContains(s.T(), identities.NodeIDs(), nodeID) +} + +// AwaitSealedHeightExceedsSnapshotByBuffer polls until it observes that the latest +// sealed block height has exceeded the snapshot height by `buffer` blocks. +func (s *DynamicEpochTransitionSuite) AwaitSealedHeightExceedsSnapshotByBuffer(ctx context.Context, snapshot *inmem.Snapshot, buffer uint64, waitFor, tick time.Duration) { + header, err := snapshot.Head() + require.NoError(s.T(), err) + snapshotHeight := header.Height + thresholdHeight := snapshotHeight + buffer + + s.AwaitSealedHeight(ctx, thresholdHeight, waitFor, tick) +} + +// AwaitSealedHeight polls until it observes that the latest finalized block has a height +// greater than or equal to the input height. +func (s *DynamicEpochTransitionSuite) AwaitSealedHeight(ctx context.Context, thresholdHeight uint64, waitFor, tick time.Duration) { + require.Eventually(s.T(), func() bool { + latestSealed := s.LatestSealedBlockHeader(ctx) + s.TimedLogf("waiting for sealed height: %d < %d", latestSealed.Height, thresholdHeight) + return latestSealed.Height >= thresholdHeight + }, waitFor, tick) +} + +// AwaitSealedView polls until it observes that the latest sealed block has a view +// greater than or equal to the input view. This is used to wait until when an epoch +// transition must have happened. +func (s *DynamicEpochTransitionSuite) AwaitSealedView(ctx context.Context, thresholdView uint64, waitFor, tick time.Duration) { + require.Eventually(s.T(), func() bool { + latestSealed := s.LatestSealedBlockHeader(ctx) + s.TimedLogf("waiting for sealed view: %d < %d", latestSealed.View, thresholdView) + return latestSealed.View >= thresholdView + }, waitFor, tick) +} + +// LatestSealedBlockHeader retrieves the latest sealed block, as reported in LatestSnapshot. +func (s *DynamicEpochTransitionSuite) LatestSealedBlockHeader(ctx context.Context) *flow.Header { + snapshot, err := s.Client.GetLatestProtocolSnapshot(ctx) + require.NoError(s.T(), err) + segment, err := snapshot.SealingSegment() + require.NoError(s.T(), err) + sealed := segment.Sealed() + return sealed.ToHeader() +} + +// SubmitSmokeTestTransaction will submit a create account transaction to smoke test network +// This ensures a single transaction can be sealed by the network. +func (s *DynamicEpochTransitionSuite) SubmitSmokeTestTransaction(ctx context.Context) { + _, err := utils.CreateFlowAccount(ctx, s.Client) + require.NoError(s.T(), err) +} + +// AssertNetworkHealthyAfterANChange performs a basic network health check after replacing an access node. +// 1. Check that there is no problem connecting directly to the AN provided and retrieve a protocol snapshot +// 2. Check that the chain moved at least 20 blocks from when the node was bootstrapped by comparing +// head of the rootSnapshot with the head of the snapshot we retrieved directly from the AN +// 3. Check that we can execute a script on the AN +// +// TODO test sending and observing result of a transaction via the new AN (blocked by https://github.com/onflow/flow-go/issues/3642) +func (s *DynamicEpochTransitionSuite) AssertNetworkHealthyAfterANChange(ctx context.Context, env templates.Environment, snapshotInSecondEpoch *inmem.Snapshot, info *StakedNodeOperationInfo) { + + // get snapshot directly from new AN and compare head with head from the + // snapshot that was used to bootstrap the node + client, err := s.Net.ContainerByName(info.ContainerName).TestnetClient() + require.NoError(s.T(), err) + + // overwrite Client to point to the new AN (since we have stopped the initial AN at this point) + s.Client = client + // assert atleast 20 blocks have been finalized since the node replacement + s.AwaitSealedHeightExceedsSnapshotByBuffer(ctx, snapshotInSecondEpoch, 10, 30*time.Second, time.Millisecond*100) + + // execute script directly on new AN to ensure it's functional + proposedTable, err := client.ExecuteScriptBytes(ctx, templates.GenerateReturnProposedTableScript(env), []cadence.Value{}) + require.NoError(s.T(), err) + require.Contains(s.T(), proposedTable.(cadence.Array).Values, cadence.String(info.NodeID.String()), "expected node ID to be present in proposed table returned by new AN.") +} + +// AssertNetworkHealthyAfterVNChange performs a basic network health check after replacing a verification node. +// 1. Ensure sealing continues into the second epoch (post-replacement) by observing +// at least 10 blocks of sealing progress within the epoch +func (s *DynamicEpochTransitionSuite) AssertNetworkHealthyAfterVNChange(ctx context.Context, _ templates.Environment, snapshotInSecondEpoch *inmem.Snapshot, _ *StakedNodeOperationInfo) { + s.AwaitSealedHeightExceedsSnapshotByBuffer(ctx, snapshotInSecondEpoch, 10, 30*time.Second, time.Millisecond*100) +} + +// AssertNetworkHealthyAfterLNChange performs a basic network health check after replacing a collection node. +// 1. Submit transaction to network that will target the newly staked LN by making +// sure the reference block ID is after the first epoch. +func (s *DynamicEpochTransitionSuite) AssertNetworkHealthyAfterLNChange(ctx context.Context, _ templates.Environment, _ *inmem.Snapshot, _ *StakedNodeOperationInfo) { + // At this point we have reached the second epoch and our new LN is the only LN in the network. + // To validate the LN joined the network successfully and is processing transactions we create + // an account, which submits a transaction and verifies it is sealed. + s.SubmitSmokeTestTransaction(ctx) +} + +// AssertNetworkHealthyAfterSNChange performs a basic network health check after replacing a consensus node. +// The RunTestEpochJoinAndLeave function running prior to this health check already asserts that we successfully: +// 1. enter the second epoch (DKG succeeds; epoch fallback is not triggered) +// 2. seal at least the first block within the second epoch (consensus progresses into second epoch). +// +// The test is configured so that one offline committee member is enough to prevent progress, +// therefore the newly joined consensus node must be participating in consensus. +// +// In addition, here, we submit a transaction and verify that it is sealed. +func (s *DynamicEpochTransitionSuite) AssertNetworkHealthyAfterSNChange(ctx context.Context, _ templates.Environment, _ *inmem.Snapshot, _ *StakedNodeOperationInfo) { + s.SubmitSmokeTestTransaction(ctx) +} + +// RunTestEpochJoinAndLeave coordinates adding and removing one node with the given +// role during the first epoch, then running the network health validation function +// once the network has successfully transitioned into the second epoch. +// +// This tests: +// * that nodes can stake and join the network at an epoch boundary +// * that nodes can unstake and leave the network at an epoch boundary +// * role-specific network health validation after the swap has completed +func (s *DynamicEpochTransitionSuite) RunTestEpochJoinAndLeave(role flow.Role, checkNetworkHealth nodeUpdateValidation) { + + env := utils.LocalnetEnv() + + var containerToReplace *testnet.Container + + // replace access_2, avoid replacing access_1 the container used for Client connections + if role == flow.RoleAccess { + containerToReplace = s.Net.ContainerByName("access_2") + require.NotNil(s.T(), containerToReplace) + } else { + // grab the first container of this node role type, this is the container we will replace + containerToReplace = s.GetContainersByRole(role)[0] + require.NotNil(s.T(), containerToReplace) + } + + // staking our new node and add get the corresponding container for that node + s.TimedLogf("staking joining node with role %s", role.String()) + info, testContainer := s.StakeNewNode(s.Ctx, env, role) + s.TimedLogf("successfully staked joining node: %s", info.NodeID) + + // use admin transaction to remove node, this simulates a node leaving the network + s.TimedLogf("removing node %s with role %s", containerToReplace.Config.NodeID, role.String()) + s.removeNodeFromProtocol(s.Ctx, env, containerToReplace.Config.NodeID) + s.TimedLogf("successfully removed node: %s", containerToReplace.Config.NodeID) + + // wait for epoch setup phase before we start our container and pause the old container + s.TimedLogf("waiting for EpochSetup phase of first epoch to begin") + s.AwaitEpochPhase(s.Ctx, 0, flow.EpochPhaseSetup, time.Minute, 500*time.Millisecond) + s.TimedLogf("successfully reached EpochSetup phase of first epoch") + + // get the latest snapshot and start new container with it + rootSnapshot, err := s.Client.GetLatestProtocolSnapshot(s.Ctx) + require.NoError(s.T(), err) + + header, err := rootSnapshot.Head() + require.NoError(s.T(), err) + segment, err := rootSnapshot.SealingSegment() + require.NoError(s.T(), err) + + s.TimedLogf("retrieved header after entering EpochSetup phase: root_height=%d, root_view=%d, segment_heights=[%d-%d], segment_views=[%d-%d]", + header.Height, header.View, + segment.Sealed().Height, segment.Highest().Height, + segment.Sealed().View, segment.Highest().View) + + testContainer.WriteRootSnapshot(rootSnapshot) + testContainer.Container.Start(s.Ctx) + + epoch1, err := rootSnapshot.Epochs().Current() + require.NoError(s.T(), err) + epoch1FinalView := epoch1.FinalView() + + // wait for at least the first block of the next epoch to be sealed before we pause our container to replace + s.TimedLogf("waiting for epoch transition (finalized view %d) before pausing container", epoch1FinalView+1) + s.AwaitFinalizedView(s.Ctx, epoch1FinalView+1, 4*time.Minute, 500*time.Millisecond) + s.TimedLogf("observed finalized view %d -> pausing container", epoch1FinalView+1) + + // make sure container to replace is not a member of epoch 2 + nextEpoch, err := rootSnapshot.Epochs().NextUnsafe() + require.NoError(s.T(), err) + s.AssertNodeNotParticipantInEpoch(nextEpoch, containerToReplace.Config.NodeID) + + // assert transition to second epoch happened as expected + // if counter is still 0, epoch emergency fallback was triggered and we can fail early + s.AssertInEpoch(s.Ctx, 1) + + // wait until we have sealed all blocks in epoch 1 before stopping the replaced container + // in particular, this avoids an edge case where sealing halts if we stop the single VN + // assigned in epoch 1 between finalizing and sealing the transition into epoch 2 + s.AwaitSealedView(s.Ctx, epoch1FinalView+1, time.Minute, 500*time.Millisecond) + err = containerToReplace.Pause() + require.NoError(s.T(), err) + + // retrieve a snapshot after observing that we have entered the second epoch + secondEpochSnapshot, err := s.Client.GetLatestProtocolSnapshot(s.Ctx) + require.NoError(s.T(), err) + + // make sure the network is healthy after adding new node + checkNetworkHealth(s.Ctx, env, secondEpochSnapshot, info) +} diff --git a/integration/tests/epochs/epoch_join_and_leave_an_test.go b/integration/tests/epochs/epoch_join_and_leave_an_test.go deleted file mode 100644 index 25b96bf425a..00000000000 --- a/integration/tests/epochs/epoch_join_and_leave_an_test.go +++ /dev/null @@ -1,23 +0,0 @@ -package epochs - -import ( - "testing" - - "github.com/stretchr/testify/suite" - - "github.com/onflow/flow-go/model/flow" -) - -func TestEpochJoinAndLeaveAN(t *testing.T) { - suite.Run(t, new(EpochJoinAndLeaveANSuite)) -} - -type EpochJoinAndLeaveANSuite struct { - DynamicEpochTransitionSuite -} - -// TestEpochJoinAndLeaveAN should update access nodes and assert healthy network conditions -// after the epoch transition completes. See health check function for details. -func (s *EpochJoinAndLeaveANSuite) TestEpochJoinAndLeaveAN() { - s.runTestEpochJoinAndLeave(flow.RoleAccess, s.assertNetworkHealthyAfterANChange) -} diff --git a/integration/tests/epochs/epoch_join_and_leave_ln_test.go b/integration/tests/epochs/epoch_join_and_leave_ln_test.go deleted file mode 100644 index 5ca72eee9f4..00000000000 --- a/integration/tests/epochs/epoch_join_and_leave_ln_test.go +++ /dev/null @@ -1,23 +0,0 @@ -package epochs - -import ( - "testing" - - "github.com/stretchr/testify/suite" - - "github.com/onflow/flow-go/model/flow" -) - -func TestEpochJoinAndLeaveLN(t *testing.T) { - suite.Run(t, new(EpochJoinAndLeaveLNSuite)) -} - -type EpochJoinAndLeaveLNSuite struct { - DynamicEpochTransitionSuite -} - -// TestEpochJoinAndLeaveLN should update collection nodes and assert healthy network conditions -// after the epoch transition completes. See health check function for details. -func (s *EpochJoinAndLeaveLNSuite) TestEpochJoinAndLeaveLN() { - s.runTestEpochJoinAndLeave(flow.RoleCollection, s.assertNetworkHealthyAfterLNChange) -} diff --git a/integration/tests/epochs/epoch_join_and_leave_sn_test.go b/integration/tests/epochs/epoch_join_and_leave_sn_test.go deleted file mode 100644 index a3763420cdc..00000000000 --- a/integration/tests/epochs/epoch_join_and_leave_sn_test.go +++ /dev/null @@ -1,23 +0,0 @@ -package epochs - -import ( - "testing" - - "github.com/stretchr/testify/suite" - - "github.com/onflow/flow-go/model/flow" -) - -func TestEpochJoinAndLeaveSN(t *testing.T) { - suite.Run(t, new(EpochJoinAndLeaveSNSuite)) -} - -type EpochJoinAndLeaveSNSuite struct { - DynamicEpochTransitionSuite -} - -// TestEpochJoinAndLeaveSN should update consensus nodes and assert healthy network conditions -// after the epoch transition completes. See health check function for details. -func (s *EpochJoinAndLeaveSNSuite) TestEpochJoinAndLeaveSN() { - s.runTestEpochJoinAndLeave(flow.RoleConsensus, s.assertNetworkHealthyAfterSNChange) -} diff --git a/integration/tests/epochs/suite.go b/integration/tests/epochs/suite.go deleted file mode 100644 index d3d0e169781..00000000000 --- a/integration/tests/epochs/suite.go +++ /dev/null @@ -1,726 +0,0 @@ -package epochs - -import ( - "context" - "fmt" - "strings" - "time" - - "github.com/onflow/cadence" - "github.com/onflow/flow-core-contracts/lib/go/templates" - "github.com/rs/zerolog" - "github.com/stretchr/testify/require" - "github.com/stretchr/testify/suite" - - sdk "github.com/onflow/flow-go-sdk" - sdkcrypto "github.com/onflow/flow-go-sdk/crypto" - - "github.com/onflow/flow-go/fvm/blueprints" - "github.com/onflow/flow-go/state/protocol" - - "github.com/onflow/flow-go/crypto" - "github.com/onflow/flow-go/engine/ghost/client" - "github.com/onflow/flow-go/integration/testnet" - "github.com/onflow/flow-go/integration/tests/lib" - "github.com/onflow/flow-go/integration/utils" - "github.com/onflow/flow-go/model/bootstrap" - "github.com/onflow/flow-go/model/encodable" - "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/state/protocol/inmem" - "github.com/onflow/flow-go/utils/unittest" -) - -// nodeUpdateValidation func that will be used to validate the health of the network -// after an identity table change during an epoch transition. This is used in -// tandem with runTestEpochJoinAndLeave. -// NOTE: The snapshot must reference a block within the second epoch. -type nodeUpdateValidation func(ctx context.Context, env templates.Environment, snapshot *inmem.Snapshot, info *StakedNodeOperationInfo) - -// Suite encapsulates common functionality for epoch integration tests. -type Suite struct { - suite.Suite - lib.TestnetStateTracker - ctx context.Context - cancel context.CancelFunc - log zerolog.Logger - net *testnet.FlowNetwork - ghostID flow.Identifier - client *testnet.Client - - // Epoch config (lengths in views) - StakingAuctionLen uint64 - DKGPhaseLen uint64 - EpochLen uint64 - EpochCommitSafetyThreshold uint64 - // Whether approvals are required for sealing (we only enable for VN tests because - // requiring approvals requires a longer DKG period to avoid flakiness) - RequiredSealApprovals uint // defaults to 0 (no approvals required) -} - -// SetupTest is run automatically by the testing framework before each test case. -func (s *Suite) SetupTest() { - - minEpochLength := s.StakingAuctionLen + s.DKGPhaseLen*3 + 20 - // ensure epoch lengths are set correctly - require.Greater(s.T(), s.EpochLen, minEpochLength+s.EpochCommitSafetyThreshold, "epoch too short") - - s.ctx, s.cancel = context.WithCancel(context.Background()) - s.log = unittest.LoggerForTest(s.Suite.T(), zerolog.InfoLevel) - s.log.Info().Msg("================> SetupTest") - defer func() { - s.log.Info().Msg("================> Finish SetupTest") - }() - - collectionConfigs := []func(*testnet.NodeConfig){ - testnet.WithAdditionalFlag("--block-rate-delay=100ms"), - testnet.WithLogLevel(zerolog.WarnLevel)} - - consensusConfigs := []func(config *testnet.NodeConfig){ - testnet.WithAdditionalFlag("--block-rate-delay=100ms"), - testnet.WithAdditionalFlag(fmt.Sprintf("--required-verification-seal-approvals=%d", s.RequiredSealApprovals)), - testnet.WithAdditionalFlag(fmt.Sprintf("--required-construction-seal-approvals=%d", s.RequiredSealApprovals)), - testnet.WithLogLevel(zerolog.WarnLevel)} - - // a ghost node masquerading as an access node - s.ghostID = unittest.IdentifierFixture() - ghostNode := testnet.NewNodeConfig( - flow.RoleAccess, - testnet.WithLogLevel(zerolog.FatalLevel), - testnet.WithID(s.ghostID), - testnet.AsGhost()) - - confs := []testnet.NodeConfig{ - testnet.NewNodeConfig(flow.RoleAccess, testnet.WithLogLevel(zerolog.WarnLevel)), - testnet.NewNodeConfig(flow.RoleAccess, testnet.WithLogLevel(zerolog.WarnLevel)), - testnet.NewNodeConfig(flow.RoleCollection, collectionConfigs...), - testnet.NewNodeConfig(flow.RoleConsensus, consensusConfigs...), - testnet.NewNodeConfig(flow.RoleConsensus, consensusConfigs...), - testnet.NewNodeConfig(flow.RoleExecution, testnet.WithLogLevel(zerolog.WarnLevel), testnet.WithAdditionalFlag("--extensive-logging=true")), - testnet.NewNodeConfig(flow.RoleExecution, testnet.WithLogLevel(zerolog.WarnLevel)), - testnet.NewNodeConfig(flow.RoleVerification, testnet.WithLogLevel(zerolog.WarnLevel)), - ghostNode, - } - - netConf := testnet.NewNetworkConfigWithEpochConfig("epochs-tests", confs, s.StakingAuctionLen, s.DKGPhaseLen, s.EpochLen, s.EpochCommitSafetyThreshold) - - // initialize the network - s.net = testnet.PrepareFlowNetwork(s.T(), netConf, flow.Localnet) - - // start the network - s.net.Start(s.ctx) - - // start tracking blocks - s.Track(s.T(), s.ctx, s.Ghost()) - - // use AN1 for test-related queries - the AN join/leave test will replace AN2 - client, err := s.net.ContainerByName(testnet.PrimaryAN).TestnetClient() - require.NoError(s.T(), err) - - s.client = client - - // log network info periodically to aid in debugging future flaky tests - go lib.LogStatusPeriodically(s.T(), s.ctx, s.log, s.client, 5*time.Second) -} - -func (s *Suite) Ghost() *client.GhostClient { - client, err := s.net.ContainerByID(s.ghostID).GhostClient() - require.NoError(s.T(), err, "could not get ghost client") - return client -} - -// TimedLogf logs the message using t.Log and the suite logger, but prefixes the current time. -// This enables viewing logs inline with Docker logs as well as other test logs. -func (s *Suite) TimedLogf(msg string, args ...interface{}) { - s.log.Info().Msgf(msg, args...) - args = append([]interface{}{time.Now().String()}, args...) - s.T().Logf("%s - "+msg, args...) -} - -func (s *Suite) TearDownTest() { - s.log.Info().Msg("================> Start TearDownTest") - s.net.Remove() - s.cancel() - s.log.Info().Msg("================> Finish TearDownTest") -} - -// StakedNodeOperationInfo struct contains all the node information needed to -// start a node after it is onboarded (staked and registered). -type StakedNodeOperationInfo struct { - NodeID flow.Identifier - Role flow.Role - StakingAccountAddress sdk.Address - FullAccountKey *sdk.AccountKey - StakingAccountKey sdkcrypto.PrivateKey - NetworkingKey sdkcrypto.PrivateKey - StakingKey sdkcrypto.PrivateKey - // machine account info defined only for consensus/collection nodes - MachineAccountAddress sdk.Address - MachineAccountKey sdkcrypto.PrivateKey - MachineAccountPublicKey *sdk.AccountKey - ContainerName string -} - -// StakeNode will generate initial keys needed for a SN/LN node and onboard this node using the following steps: -// 1. Generate keys (networking, staking, machine) -// 2. Create a new account, this will be the staking account -// 3. Transfer token amount for the given role to the staking account -// 4. Add additional funds to staking account for storage -// 5. Create Staking collection for node -// 6. Register node using staking collection object -// 7. Add the node to the approved list -// -// NOTE: assumes staking occurs in first epoch (counter 0) -// NOTE 2: This function performs steps 1-6 in one custom transaction, to reduce -// the time taken by each test case. Individual transactions for each step can be -// found in Git history, for example: 9867056a8b7246655047bc457f9000398f6687c0. -func (s *Suite) StakeNode(ctx context.Context, env templates.Environment, role flow.Role) *StakedNodeOperationInfo { - - stakingAccountKey, networkingKey, stakingKey, machineAccountKey, machineAccountPubKey := s.generateAccountKeys(role) - nodeID := flow.MakeID(stakingKey.PublicKey().Encode()) - fullStakingAcctKey := sdk.NewAccountKey(). - SetPublicKey(stakingAccountKey.PublicKey()). - SetHashAlgo(sdkcrypto.SHA2_256). - SetWeight(sdk.AccountKeyWeightThreshold) - - _, stakeAmount, err := s.client.TokenAmountByRole(role) - require.NoError(s.T(), err) - - containerName := s.getTestContainerName(role) - - latestBlockID, err := s.client.GetLatestBlockID(ctx) - require.NoError(s.T(), err) - - // create and register node - tx, err := utils.MakeCreateAndSetupNodeTx( - env, - s.client.Account(), - sdk.Identifier(latestBlockID), - fullStakingAcctKey, - fmt.Sprintf("%f", stakeAmount+10.0), - nodeID, - role, - testnet.GetPrivateNodeInfoAddress(containerName), - strings.TrimPrefix(networkingKey.PublicKey().String(), "0x"), - strings.TrimPrefix(stakingKey.PublicKey().String(), "0x"), - machineAccountPubKey, - ) - require.NoError(s.T(), err) - - err = s.client.SignAndSendTransaction(ctx, tx) - require.NoError(s.T(), err) - result, err := s.client.WaitForSealed(ctx, tx.ID()) - require.NoError(s.T(), err) - s.client.Account().Keys[0].SequenceNumber++ - require.NoError(s.T(), result.Error) - - accounts := s.client.CreatedAccounts(result) - stakingAccountAddress := accounts[0] - var machineAccountAddr sdk.Address - if role == flow.RoleCollection || role == flow.RoleConsensus { - machineAccountAddr = accounts[1] - } - - result = s.SubmitSetApprovedListTx(ctx, env, append(s.net.Identities().NodeIDs(), nodeID)...) - require.NoError(s.T(), result.Error) - - // ensure we are still in staking auction - s.AssertInEpochPhase(ctx, 0, flow.EpochPhaseStaking) - - return &StakedNodeOperationInfo{ - NodeID: nodeID, - Role: role, - StakingAccountAddress: stakingAccountAddress, - FullAccountKey: fullStakingAcctKey, - StakingAccountKey: stakingAccountKey, - StakingKey: stakingKey, - NetworkingKey: networkingKey, - MachineAccountKey: machineAccountKey, - MachineAccountPublicKey: machineAccountPubKey, - MachineAccountAddress: machineAccountAddr, - ContainerName: containerName, - } -} - -// generates initial keys needed to bootstrap account -func (s *Suite) generateAccountKeys(role flow.Role) ( - operatorAccountKey, - networkingKey, - stakingKey, - machineAccountKey crypto.PrivateKey, - machineAccountPubKey *sdk.AccountKey, -) { - operatorAccountKey = unittest.PrivateKeyFixture(crypto.ECDSAP256, crypto.KeyGenSeedMinLen) - networkingKey = unittest.NetworkingPrivKeyFixture() - stakingKey = unittest.StakingPrivKeyFixture() - - // create a machine account - if role == flow.RoleConsensus || role == flow.RoleCollection { - machineAccountKey = unittest.PrivateKeyFixture(crypto.ECDSAP256, crypto.KeyGenSeedMinLen) - - machineAccountPubKey = &sdk.AccountKey{ - PublicKey: machineAccountKey.PublicKey(), - SigAlgo: machineAccountKey.PublicKey().Algorithm(), - HashAlgo: bootstrap.DefaultMachineAccountHashAlgo, - Weight: 1000, - } - } - - return -} - -// createAccount creates a new flow account, can be used to test staking -func (s *Suite) createAccount(ctx context.Context, - accountKey *sdk.AccountKey, - payerAccount *sdk.Account, - payer sdk.Address, -) (sdk.Address, error) { - latestBlockID, err := s.client.GetLatestBlockID(ctx) - require.NoError(s.T(), err) - - addr, err := s.client.CreateAccount(ctx, accountKey, payerAccount, payer, sdk.Identifier(latestBlockID)) - require.NoError(s.T(), err) - - payerAccount.Keys[0].SequenceNumber++ - return addr, nil -} - -// removeNodeFromProtocol removes the given node from the protocol. -// NOTE: assumes staking occurs in first epoch (counter 0) -func (s *Suite) removeNodeFromProtocol(ctx context.Context, env templates.Environment, nodeID flow.Identifier) { - result, err := s.submitAdminRemoveNodeTx(ctx, env, nodeID) - require.NoError(s.T(), err) - require.NoError(s.T(), result.Error) - - // ensure we submit transaction while in staking phase - s.AssertInEpochPhase(ctx, 0, flow.EpochPhaseStaking) -} - -// submitAdminRemoveNodeTx will submit the admin remove node transaction -func (s *Suite) submitAdminRemoveNodeTx(ctx context.Context, - env templates.Environment, - nodeID flow.Identifier, -) (*sdk.TransactionResult, error) { - latestBlockID, err := s.client.GetLatestBlockID(ctx) - require.NoError(s.T(), err) - - closeStakeTx, err := utils.MakeAdminRemoveNodeTx( - env, - s.client.Account(), - 0, - sdk.Identifier(latestBlockID), - nodeID, - ) - require.NoError(s.T(), err) - - err = s.client.SignAndSendTransaction(ctx, closeStakeTx) - require.NoError(s.T(), err) - - result, err := s.client.WaitForSealed(ctx, closeStakeTx.ID()) - require.NoError(s.T(), err) - s.client.Account().Keys[0].SequenceNumber++ - return result, nil -} - -func (s *Suite) ExecuteGetProposedTableScript(ctx context.Context, env templates.Environment, nodeID flow.Identifier) cadence.Value { - v, err := s.client.ExecuteScriptBytes(ctx, templates.GenerateReturnProposedTableScript(env), []cadence.Value{}) - require.NoError(s.T(), err) - return v -} - -// ExecuteGetNodeInfoScript executes a script to get staking info about the given node. -func (s *Suite) ExecuteGetNodeInfoScript(ctx context.Context, env templates.Environment, nodeID flow.Identifier) cadence.Value { - cdcNodeID, err := cadence.NewString(nodeID.String()) - require.NoError(s.T(), err) - v, err := s.client.ExecuteScriptBytes(ctx, templates.GenerateGetNodeInfoScript(env), []cadence.Value{cdcNodeID}) - require.NoError(s.T(), err) - return v -} - -// SubmitSetApprovedListTx adds a node to the approved node list, this must be done when a node joins the protocol during the epoch staking phase -func (s *Suite) SubmitSetApprovedListTx(ctx context.Context, env templates.Environment, identities ...flow.Identifier) *sdk.TransactionResult { - latestBlockID, err := s.client.GetLatestBlockID(ctx) - require.NoError(s.T(), err) - - idTableAddress := sdk.HexToAddress(env.IDTableAddress) - tx := sdk.NewTransaction(). - SetScript(templates.GenerateSetApprovedNodesScript(env)). - SetGasLimit(9999). - SetReferenceBlockID(sdk.Identifier(latestBlockID)). - SetProposalKey(s.client.SDKServiceAddress(), 0, s.client.Account().Keys[0].SequenceNumber). - SetPayer(s.client.SDKServiceAddress()). - AddAuthorizer(idTableAddress) - err = tx.AddArgument(blueprints.SetStakingAllowlistTxArg(identities)) - require.NoError(s.T(), err) - - err = s.client.SignAndSendTransaction(ctx, tx) - require.NoError(s.T(), err) - - result, err := s.client.WaitForSealed(ctx, tx.ID()) - require.NoError(s.T(), err) - s.client.Account().Keys[0].SequenceNumber++ - - return result -} - -// ExecuteReadApprovedNodesScript executes the return proposal table script and returns a list of approved nodes -func (s *Suite) ExecuteReadApprovedNodesScript(ctx context.Context, env templates.Environment) cadence.Value { - v, err := s.client.ExecuteScriptBytes(ctx, templates.GenerateGetApprovedNodesScript(env), []cadence.Value{}) - require.NoError(s.T(), err) - - return v -} - -// getTestContainerName returns a name for a test container in the form of ${role}_${nodeID}_test -func (s *Suite) getTestContainerName(role flow.Role) string { - i := len(s.net.ContainersByRole(role)) + 1 - return fmt.Sprintf("%s_test_%d", role, i) -} - -// assertNodeApprovedAndProposed executes the read approved nodes list and get proposed table scripts -// and checks that the info.NodeID is in both list -func (s *Suite) assertNodeApprovedAndProposed(ctx context.Context, env templates.Environment, info *StakedNodeOperationInfo) { - // ensure node ID in approved list - //approvedNodes := s.ExecuteReadApprovedNodesScript(ctx, env) - //require.Containsf(s.T(), approvedNodes.(cadence.Array).Values, cadence.String(info.NodeID.String()), "expected new node to be in approved nodes list: %x", info.NodeID) - - // Access Nodes go through a separate selection process, so they do not immediately - // appear on the proposed table -- skip checking for them here. - if info.Role == flow.RoleAccess { - s.T().Logf("skipping checking proposed table for joining Access Node") - return - } - - // check if node is in proposed table - proposedTable := s.ExecuteGetProposedTableScript(ctx, env, info.NodeID) - require.Containsf(s.T(), proposedTable.(cadence.Array).Values, cadence.String(info.NodeID.String()), "expected new node to be in proposed table: %x", info.NodeID) -} - -// newTestContainerOnNetwork configures a new container on the suites network -func (s *Suite) newTestContainerOnNetwork(role flow.Role, info *StakedNodeOperationInfo) *testnet.Container { - containerConfigs := []func(config *testnet.NodeConfig){ - testnet.WithLogLevel(zerolog.WarnLevel), - testnet.WithID(info.NodeID), - } - - nodeConfig := testnet.NewNodeConfig(role, containerConfigs...) - testContainerConfig := testnet.NewContainerConfig(info.ContainerName, nodeConfig, info.NetworkingKey, info.StakingKey) - err := testContainerConfig.WriteKeyFiles(s.net.BootstrapDir, info.MachineAccountAddress, encodable.MachineAccountPrivKey{PrivateKey: info.MachineAccountKey}, role) - require.NoError(s.T(), err) - - //add our container to the network - err = s.net.AddNode(s.T(), s.net.BootstrapDir, testContainerConfig) - require.NoError(s.T(), err, "failed to add container to network") - - // if node is of LN/SN role type add additional flags to node container for secure GRPC connection - if role == flow.RoleConsensus || role == flow.RoleCollection { - // ghost containers don't participate in the network skip any SN/LN ghost containers - nodeContainer := s.net.ContainerByID(testContainerConfig.NodeID) - nodeContainer.AddFlag("insecure-access-api", "false") - - accessNodeIDS := make([]string, 0) - for _, c := range s.net.ContainersByRole(flow.RoleAccess) { - if c.Config.Role == flow.RoleAccess && !c.Config.Ghost { - accessNodeIDS = append(accessNodeIDS, c.Config.NodeID.String()) - } - } - nodeContainer.AddFlag("access-node-ids", strings.Join(accessNodeIDS, ",")) - } - - return s.net.ContainerByID(info.NodeID) -} - -// StakeNewNode will stake a new node, and create the corresponding docker container for that node -func (s *Suite) StakeNewNode(ctx context.Context, env templates.Environment, role flow.Role) (*StakedNodeOperationInfo, *testnet.Container) { - // stake our new node - info := s.StakeNode(ctx, env, role) - - // make sure our node is in the approved nodes list and the proposed nodes table - s.assertNodeApprovedAndProposed(ctx, env, info) - - // add a new container to the network with the info used to stake our node - testContainer := s.newTestContainerOnNetwork(role, info) - - return info, testContainer -} - -// getContainerToReplace return a container from the network, make sure the container is not a ghost -func (s *Suite) getContainerToReplace(role flow.Role) *testnet.Container { - nodes := s.net.ContainersByRole(role) - require.True(s.T(), len(nodes) > 0) - - for _, c := range nodes { - if !c.Config.Ghost { - return c - } - } - - return nil -} - -// AwaitEpochPhase waits for the given phase, in the given epoch. -func (s *Suite) AwaitEpochPhase(ctx context.Context, expectedEpoch uint64, expectedPhase flow.EpochPhase, waitFor, tick time.Duration) { - condition := func() bool { - snapshot, err := s.client.GetLatestProtocolSnapshot(ctx) - require.NoError(s.T(), err) - - actualEpoch, err := snapshot.Epochs().Current().Counter() - require.NoError(s.T(), err) - actualPhase, err := snapshot.Phase() - require.NoError(s.T(), err) - - return actualEpoch == expectedEpoch && actualPhase == expectedPhase - } - require.Eventuallyf(s.T(), condition, waitFor, tick, "did not reach expectedEpoch %d phase %s within %s", expectedEpoch, expectedPhase, waitFor) -} - -// AssertInEpochPhase checks if we are in the phase of the given epoch. -func (s *Suite) AssertInEpochPhase(ctx context.Context, expectedEpoch uint64, expectedPhase flow.EpochPhase) { - snapshot, err := s.client.GetLatestProtocolSnapshot(ctx) - require.NoError(s.T(), err) - actualEpoch, err := snapshot.Epochs().Current().Counter() - require.NoError(s.T(), err) - actualPhase, err := snapshot.Phase() - require.NoError(s.T(), err) - require.Equal(s.T(), expectedPhase, actualPhase, "not in correct phase") - require.Equal(s.T(), expectedEpoch, actualEpoch, "not in correct epoch") - - head, err := snapshot.Head() - require.NoError(s.T(), err) - s.TimedLogf("asserted in epoch %d, phase %s, finalized height/view: %d/%d", expectedEpoch, expectedPhase, head.Height, head.View) -} - -// AssertInEpoch requires actual epoch counter is equal to counter provided. -func (s *Suite) AssertInEpoch(ctx context.Context, expectedEpoch uint64) { - snapshot, err := s.client.GetLatestProtocolSnapshot(ctx) - require.NoError(s.T(), err) - actualEpoch, err := snapshot.Epochs().Current().Counter() - require.NoError(s.T(), err) - require.Equalf(s.T(), expectedEpoch, actualEpoch, "expected to be in epoch %d got %d", expectedEpoch, actualEpoch) -} - -// AssertNodeNotParticipantInEpoch asserts that the given node ID does not exist -// in the epoch's identity table. -func (s *Suite) AssertNodeNotParticipantInEpoch(epoch protocol.Epoch, nodeID flow.Identifier) { - identities, err := epoch.InitialIdentities() - require.NoError(s.T(), err) - require.NotContains(s.T(), identities.NodeIDs(), nodeID) -} - -// AwaitSealedBlockHeightExceedsSnapshot polls until it observes that the latest -// sealed block height has exceeded the snapshot height by numOfBlocks -// the snapshot height and latest finalized height is greater than numOfBlocks. -func (s *Suite) AwaitSealedBlockHeightExceedsSnapshot(ctx context.Context, snapshot *inmem.Snapshot, threshold uint64, waitFor, tick time.Duration) { - header, err := snapshot.Head() - require.NoError(s.T(), err) - snapshotHeight := header.Height - - require.Eventually(s.T(), func() bool { - latestSealed := s.getLatestSealedHeader(ctx) - s.TimedLogf("waiting for sealed block height: %d+%d < %d", snapshotHeight, threshold, latestSealed.Height) - return snapshotHeight+threshold < latestSealed.Height - }, waitFor, tick) -} - -// AwaitFinalizedView polls until it observes that the latest finalized block has a view -// greater than or equal to the input view. This is used to wait until when an epoch -// transition must have happened. -func (s *Suite) AwaitFinalizedView(ctx context.Context, view uint64, waitFor, tick time.Duration) { - require.Eventually(s.T(), func() bool { - sealed := s.getLatestFinalizedHeader(ctx) - return sealed.View >= view - }, waitFor, tick) -} - -// getLatestSealedHeader retrieves the latest sealed block, as reported in LatestSnapshot. -func (s *Suite) getLatestSealedHeader(ctx context.Context) *flow.Header { - snapshot, err := s.client.GetLatestProtocolSnapshot(ctx) - require.NoError(s.T(), err) - segment, err := snapshot.SealingSegment() - require.NoError(s.T(), err) - sealed := segment.Sealed() - return sealed.Header -} - -// getLatestFinalizedHeader retrieves the latest finalized block, as reported in LatestSnapshot. -func (s *Suite) getLatestFinalizedHeader(ctx context.Context) *flow.Header { - snapshot, err := s.client.GetLatestProtocolSnapshot(ctx) - require.NoError(s.T(), err) - finalized, err := snapshot.Head() - require.NoError(s.T(), err) - return finalized -} - -// submitSmokeTestTransaction will submit a create account transaction to smoke test network -// This ensures a single transaction can be sealed by the network. -func (s *Suite) submitSmokeTestTransaction(ctx context.Context) { - fullAccountKey := sdk.NewAccountKey(). - SetPublicKey(unittest.PrivateKeyFixture(crypto.ECDSAP256, crypto.KeyGenSeedMinLen).PublicKey()). - SetHashAlgo(sdkcrypto.SHA2_256). - SetWeight(sdk.AccountKeyWeightThreshold) - - // createAccount will submit a create account transaction and wait for it to be sealed - _, err := s.createAccount( - ctx, - fullAccountKey, - s.client.Account(), - s.client.SDKServiceAddress(), - ) - require.NoError(s.T(), err) -} - -// assertNetworkHealthyAfterANChange performs a basic network health check after replacing an access node. -// 1. Check that there is no problem connecting directly to the AN provided and retrieve a protocol snapshot -// 2. Check that the chain moved at least 20 blocks from when the node was bootstrapped by comparing -// head of the rootSnapshot with the head of the snapshot we retrieved directly from the AN -// 3. Check that we can execute a script on the AN -// -// TODO test sending and observing result of a transaction via the new AN (blocked by https://github.com/onflow/flow-go/issues/3642) -func (s *Suite) assertNetworkHealthyAfterANChange(ctx context.Context, env templates.Environment, snapshotInSecondEpoch *inmem.Snapshot, info *StakedNodeOperationInfo) { - - // get snapshot directly from new AN and compare head with head from the - // snapshot that was used to bootstrap the node - client, err := s.net.ContainerByName(info.ContainerName).TestnetClient() - require.NoError(s.T(), err) - - // overwrite client to point to the new AN (since we have stopped the initial AN at this point) - s.client = client - // assert atleast 20 blocks have been finalized since the node replacement - s.AwaitSealedBlockHeightExceedsSnapshot(ctx, snapshotInSecondEpoch, 10, 30*time.Second, time.Millisecond*100) - - // execute script directly on new AN to ensure it's functional - proposedTable, err := client.ExecuteScriptBytes(ctx, templates.GenerateReturnProposedTableScript(env), []cadence.Value{}) - require.NoError(s.T(), err) - require.Contains(s.T(), proposedTable.(cadence.Array).Values, cadence.String(info.NodeID.String()), "expected node ID to be present in proposed table returned by new AN.") -} - -// assertNetworkHealthyAfterVNChange performs a basic network health check after replacing a verification node. -// 1. Ensure sealing continues into the second epoch (post-replacement) by observing -// at least 10 blocks of sealing progress within the epoch -func (s *Suite) assertNetworkHealthyAfterVNChange(ctx context.Context, _ templates.Environment, snapshotInSecondEpoch *inmem.Snapshot, _ *StakedNodeOperationInfo) { - s.AwaitSealedBlockHeightExceedsSnapshot(ctx, snapshotInSecondEpoch, 10, 30*time.Second, time.Millisecond*100) -} - -// assertNetworkHealthyAfterLNChange performs a basic network health check after replacing a collection node. -// 1. Submit transaction to network that will target the newly staked LN by making -// sure the reference block ID is after the first epoch. -func (s *Suite) assertNetworkHealthyAfterLNChange(ctx context.Context, _ templates.Environment, _ *inmem.Snapshot, _ *StakedNodeOperationInfo) { - // At this point we have reached the second epoch and our new LN is the only LN in the network. - // To validate the LN joined the network successfully and is processing transactions we create - // an account, which submits a transaction and verifies it is sealed. - s.submitSmokeTestTransaction(ctx) -} - -// assertNetworkHealthyAfterSNChange performs a basic network health check after replacing a consensus node. -// The runTestEpochJoinAndLeave function running prior to this health check already asserts that we successfully: -// 1. enter the second epoch (DKG succeeds; epoch fallback is not triggered) -// 2. seal at least the first block within the second epoch (consensus progresses into second epoch). -// -// The test is configured so that one offline committee member is enough to prevent progress, -// therefore the newly joined consensus node must be participating in consensus. -// -// In addition, here, we submit a transaction and verify that it is sealed. -func (s *Suite) assertNetworkHealthyAfterSNChange(ctx context.Context, _ templates.Environment, _ *inmem.Snapshot, _ *StakedNodeOperationInfo) { - s.submitSmokeTestTransaction(ctx) -} - -// runTestEpochJoinAndLeave coordinates adding and removing one node with the given -// role during the first epoch, then running the network health validation function -// once the network has successfully transitioned into the second epoch. -// -// This tests: -// * that nodes can stake and join the network at an epoch boundary -// * that nodes can unstake and leave the network at an epoch boundary -// * role-specific network health validation after the swap has completed -func (s *Suite) runTestEpochJoinAndLeave(role flow.Role, checkNetworkHealth nodeUpdateValidation) { - - env := utils.LocalnetEnv() - - var containerToReplace *testnet.Container - - // replace access_2, avoid replacing access_1 the container used for client connections - if role == flow.RoleAccess { - containerToReplace = s.net.ContainerByName("access_2") - require.NotNil(s.T(), containerToReplace) - } else { - // grab the first container of this node role type, this is the container we will replace - containerToReplace = s.getContainerToReplace(role) - require.NotNil(s.T(), containerToReplace) - } - - // staking our new node and add get the corresponding container for that node - s.TimedLogf("staking joining node with role %s", role.String()) - info, testContainer := s.StakeNewNode(s.ctx, env, role) - s.TimedLogf("successfully staked joining node: %s", info.NodeID) - - // use admin transaction to remove node, this simulates a node leaving the network - s.TimedLogf("removing node %s with role %s", containerToReplace.Config.NodeID, role.String()) - s.removeNodeFromProtocol(s.ctx, env, containerToReplace.Config.NodeID) - s.TimedLogf("successfully removed node: %s", containerToReplace.Config.NodeID) - - // wait for epoch setup phase before we start our container and pause the old container - s.TimedLogf("waiting for EpochSetup phase of first epoch to begin") - s.AwaitEpochPhase(s.ctx, 0, flow.EpochPhaseSetup, 3*time.Minute, 500*time.Millisecond) - s.TimedLogf("successfully reached EpochSetup phase of first epoch") - - // get the latest snapshot and start new container with it - rootSnapshot, err := s.client.GetLatestProtocolSnapshot(s.ctx) - require.NoError(s.T(), err) - - header, err := rootSnapshot.Head() - require.NoError(s.T(), err) - segment, err := rootSnapshot.SealingSegment() - require.NoError(s.T(), err) - - s.TimedLogf("retrieved header after entering EpochSetup phase: root_height=%d, root_view=%d, segment_heights=[%d-%d], segment_views=[%d-%d]", - header.Height, header.View, - segment.Sealed().Header.Height, segment.Highest().Header.Height, - segment.Sealed().Header.View, segment.Highest().Header.View) - - testContainer.WriteRootSnapshot(rootSnapshot) - testContainer.Container.Start(s.ctx) - - epoch1FinalView, err := rootSnapshot.Epochs().Current().FinalView() - require.NoError(s.T(), err) - - // wait for at least the first block of the next epoch to be sealed before we pause our container to replace - s.TimedLogf("waiting for epoch transition (finalized view %d) before pausing container", epoch1FinalView+1) - s.AwaitFinalizedView(s.ctx, epoch1FinalView+1, 4*time.Minute, 500*time.Millisecond) - s.TimedLogf("observed finalized view %d -> pausing container", epoch1FinalView+1) - - // make sure container to replace is not a member of epoch 2 - s.AssertNodeNotParticipantInEpoch(rootSnapshot.Epochs().Next(), containerToReplace.Config.NodeID) - - // assert transition to second epoch happened as expected - // if counter is still 0, epoch emergency fallback was triggered and we can fail early - s.AssertInEpoch(s.ctx, 1) - - err = containerToReplace.Pause() - require.NoError(s.T(), err) - - // retrieve a snapshot after observing that we have entered the second epoch - secondEpochSnapshot, err := s.client.GetLatestProtocolSnapshot(s.ctx) - require.NoError(s.T(), err) - - // make sure the network is healthy after adding new node - checkNetworkHealth(s.ctx, env, secondEpochSnapshot, info) -} - -// DynamicEpochTransitionSuite is the suite used for epoch transitions tests -// with a dynamic identity table. -type DynamicEpochTransitionSuite struct { - Suite -} - -func (s *DynamicEpochTransitionSuite) SetupTest() { - // use a longer staking auction length to accommodate staking operations for joining/leaving nodes - // NOTE: this value is set fairly aggressively to ensure shorter test times. - // If flakiness due to failure to complete staking operations in time is observed, - // try increasing (by 10-20 views). - s.StakingAuctionLen = 50 - s.DKGPhaseLen = 50 - s.EpochLen = 250 - s.EpochCommitSafetyThreshold = 20 - - // run the generic setup, which starts up the network - s.Suite.SetupTest() -} diff --git a/integration/tests/execution/chunk_data_pack_test.go b/integration/tests/execution/chunk_data_pack_test.go index 566e45c57d1..2ad5843b973 100644 --- a/integration/tests/execution/chunk_data_pack_test.go +++ b/integration/tests/execution/chunk_data_pack_test.go @@ -9,6 +9,7 @@ import ( "github.com/stretchr/testify/suite" sdk "github.com/onflow/flow-go-sdk" + "github.com/onflow/flow-go/integration/tests/lib" "github.com/onflow/flow-go/ledger" "github.com/onflow/flow-go/ledger/common/proof" @@ -30,10 +31,10 @@ func (gs *ChunkDataPacksSuite) TestVerificationNodesRequestChunkDataPacks() { // wait for next height finalized (potentially first height), called blockA currentFinalized := gs.BlockState.HighestFinalizedHeight() blockA := gs.BlockState.WaitForHighestFinalizedProgress(gs.T(), currentFinalized) - gs.T().Logf("got blockA height %v ID %v", blockA.Header.Height, blockA.Header.ID()) + gs.T().Logf("got blockA height %v ID %v", blockA.Height, blockA.ID()) // wait for execution receipt for blockA from execution node 1 - erExe1BlockA := gs.ReceiptState.WaitForReceiptFrom(gs.T(), blockA.Header.ID(), gs.exe1ID) + erExe1BlockA := gs.ReceiptState.WaitForReceiptFrom(gs.T(), blockA.ID(), gs.exe1ID) finalStateErExec1BlockA, err := erExe1BlockA.ExecutionResult.FinalStateCommitment() require.NoError(gs.T(), err) gs.T().Logf("got erExe1BlockA with SC %x", finalStateErExec1BlockA) @@ -43,15 +44,14 @@ func (gs *ChunkDataPacksSuite) TestVerificationNodesRequestChunkDataPacks() { "expected no ChunkDataRequest to be sent before a transaction existed") // send transaction - err = gs.AccessClient().DeployContract(context.Background(), sdk.Identifier(gs.net.Root().ID()), lib.CounterContract) + tx, err := gs.AccessClient().DeployContract(context.Background(), sdk.Identifier(gs.net.Root().ID()), lib.CounterContract) require.NoError(gs.T(), err, "could not deploy counter") - // wait until we see a different state commitment for a finalized block, call that block blockB - blockB, _ := lib.WaitUntilFinalizedStateCommitmentChanged(gs.T(), gs.BlockState, gs.ReceiptState) - gs.T().Logf("got blockB height %v ID %v", blockB.Header.Height, blockB.Header.ID()) + txRes, err := gs.AccessClient().WaitForExecuted(context.Background(), tx.ID()) + require.NoError(gs.T(), err, "could not wait for tx to be executed") // wait for execution receipt for blockB from execution node 1 - erExe1BlockB := gs.ReceiptState.WaitForReceiptFrom(gs.T(), blockB.Header.ID(), gs.exe1ID) + erExe1BlockB := gs.ReceiptState.WaitForReceiptFrom(gs.T(), flow.Identifier(txRes.BlockID), gs.exe1ID) finalStateErExec1BlockB, err := erExe1BlockB.ExecutionResult.FinalStateCommitment() require.NoError(gs.T(), err) gs.T().Logf("got erExe1BlockB with SC %x", finalStateErExec1BlockB) @@ -76,7 +76,7 @@ func (gs *ChunkDataPacksSuite) TestVerificationNodesRequestChunkDataPacks() { // wait for ChunkDataResponse msg2 := gs.MsgState.WaitForMsgFrom(gs.T(), lib.MsgIsChunkDataPackResponse, gs.exe1ID, "chunk data response from execution node") - pack2 := msg2.(*messages.ChunkDataResponse) + pack2 := msg2.(*flow.ChunkDataResponse) require.Equal(gs.T(), chunkID, pack2.ChunkDataPack.ChunkID) require.Equal(gs.T(), erExe1BlockB.ExecutionResult.Chunks[0].StartState, pack2.ChunkDataPack.StartState) diff --git a/integration/tests/execution/failing_tx_reverted_test.go b/integration/tests/execution/failing_tx_reverted_test.go index e5de67022d3..1d3818c2b00 100644 --- a/integration/tests/execution/failing_tx_reverted_test.go +++ b/integration/tests/execution/failing_tx_reverted_test.go @@ -10,6 +10,7 @@ import ( sdk "github.com/onflow/flow-go-sdk" "github.com/onflow/flow-go/integration/tests/lib" + "github.com/onflow/flow-go/model/flow" ) func TestExecutionFailingTxReverted(t *testing.T) { @@ -22,61 +23,65 @@ type FailingTxRevertedSuite struct { func (s *FailingTxRevertedSuite) TestExecutionFailingTxReverted() { - chainID := s.net.Root().Header.ChainID + chainID := s.net.Root().ChainID chain := chainID.Chain() serviceAddress := chain.ServiceAddress() // wait for next height finalized (potentially first height), called blockA currentFinalized := s.BlockState.HighestFinalizedHeight() blockA := s.BlockState.WaitForHighestFinalizedProgress(s.T(), currentFinalized) - s.T().Logf("got blockA height %v ID %v\n", blockA.Header.Height, blockA.Header.ID()) + s.T().Logf("got blockA height %v ID %v\n", blockA.Height, blockA.ID()) // send transaction - err := s.AccessClient().DeployContract(context.Background(), sdk.Identifier(s.net.Root().ID()), lib.CounterContract) + tx, err := s.AccessClient().DeployContract(context.Background(), sdk.Identifier(s.net.Root().ID()), lib.CounterContract) require.NoError(s.T(), err, "could not deploy counter") - // wait until we see a different state commitment for a finalized block, call that block blockB - blockB, erBlockB := lib.WaitUntilFinalizedStateCommitmentChanged(s.T(), s.BlockState, s.ReceiptState) - s.T().Logf("got blockB height %v ID %v\n", blockB.Header.Height, blockB.Header.ID()) - - // final states - finalStateBlockB, err := erBlockB.ExecutionResult.FinalStateCommitment() - require.NoError(s.T(), err) + _, err = s.AccessClient().WaitForExecuted(context.Background(), tx.ID()) + require.NoError(s.T(), err, "could not wait for tx to be executed") // send transaction that panics and should revert - tx := lib.SDKTransactionFixture( + failingTx := lib.SDKTransactionFixture( lib.WithTransactionDSL(lib.CreateCounterPanicTx(chain)), lib.WithReferenceBlock(sdk.Identifier(s.net.Root().ID())), lib.WithChainID(chainID), ) - err = s.AccessClient().SendTransaction(context.Background(), &tx) + err = s.AccessClient().SendTransaction(context.Background(), &failingTx) require.NoError(s.T(), err, "could not send tx to create counter that should panic") + txResult, err := s.AccessClient().WaitForExecuted(context.Background(), failingTx.ID()) + require.NoError(s.T(), err, "could not wait for tx to be executed") + + erBlock := s.ReceiptState.WaitForReceiptFrom(s.T(), flow.Identifier(txResult.BlockID), s.exe1ID) + s.T().Logf("got blockB result ID %v\n", erBlock.ExecutionResult.BlockID) + + // expected two chunks (one for the transaction, one for the system) + require.Len(s.T(), erBlock.Chunks, 2) + + //// assert that state did not change in the first chunk + require.Equal(s.T(), erBlock.Chunks[0].StartState, erBlock.Chunks[0].EndState) + // send transaction that has no sigs and should not execute - tx = lib.SDKTransactionFixture( + failingTx = lib.SDKTransactionFixture( lib.WithTransactionDSL(lib.CreateCounterTx(sdk.Address(serviceAddress))), lib.WithReferenceBlock(sdk.Identifier(s.net.Root().ID())), lib.WithChainID(chainID), ) - tx.PayloadSignatures = nil - tx.EnvelopeSignatures = nil + failingTx.PayloadSignatures = nil + failingTx.EnvelopeSignatures = nil - currentProposed := s.BlockState.HighestProposedHeight() - err = s.AccessClient().SendTransaction(context.Background(), &tx) + err = s.AccessClient().SendTransaction(context.Background(), &failingTx) require.NoError(s.T(), err, "could not send tx to create counter with wrong sig") - // wait until the next proposed block is finalized, called blockC - blockC := s.BlockState.WaitUntilNextHeightFinalized(s.T(), currentProposed) - s.T().Logf("got blockC height %v ID %v\n", blockC.Header.Height, blockC.Header.ID()) + txResult, err = s.AccessClient().WaitForExecuted(context.Background(), failingTx.ID()) + require.NoError(s.T(), err, "could not wait for tx to be executed") - // wait for execution receipt for blockC from execution node 1 - erBlockC := s.ReceiptState.WaitForReceiptFrom(s.T(), blockC.Header.ID(), s.exe1ID) - finalStateBlockC, err := erBlockC.ExecutionResult.FinalStateCommitment() - require.NoError(s.T(), err) + erBlock = s.ReceiptState.WaitForReceiptFrom(s.T(), flow.Identifier(txResult.BlockID), s.exe1ID) + s.T().Logf("got blockB result ID %v\n", erBlock.ExecutionResult.BlockID) - s.T().Logf("got erBlockC with SC %x\n", finalStateBlockC) + // expected two chunks (one for the transaction, one for the system) + require.Len(s.T(), erBlock.Chunks, 2) - // assert that state did not change between blockB and blockC - require.Equal(s.T(), finalStateBlockB, finalStateBlockC) + //// assert that state did not change in the first chunk + require.Equal(s.T(), erBlock.Chunks[0].StartState, erBlock.Chunks[0].EndState) } diff --git a/integration/tests/execution/scheduled_callbacks_test.go b/integration/tests/execution/scheduled_callbacks_test.go new file mode 100644 index 00000000000..adddf92040d --- /dev/null +++ b/integration/tests/execution/scheduled_callbacks_test.go @@ -0,0 +1,249 @@ +package execution + +import ( + "context" + "fmt" + "testing" + "time" + + "github.com/onflow/cadence" + sdk "github.com/onflow/flow-go-sdk" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" + + "github.com/onflow/flow-go/fvm/systemcontracts" + "github.com/onflow/flow-go/integration/tests/lib" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/utils/dsl" +) + +func TestScheduledCallbacks(t *testing.T) { + suite.Run(t, new(ScheduledCallbacksSuite)) +} + +type ScheduledCallbacksSuite struct { + Suite +} + +func (s *ScheduledCallbacksSuite) TestScheduleCallback_DeployAndGetStatus() { + // wait for next height finalized (potentially first height) + currentFinalized := s.BlockState.HighestFinalizedHeight() + blockA := s.BlockState.WaitForHighestFinalizedProgress(s.T(), currentFinalized) + s.T().Logf("got blockA height %v ID %v", blockA.HeaderBody.Height, blockA.ID()) + + // Execute script to call getStatus(id: 10) on the contract + result, ok := s.getCallbackStatus(10) + s.T().Logf("result: %v, ok: %v", result, ok) + require.False(s.T(), ok, "getStatus(10) should return false for non-existent callback") + + // Wait for a block to be executed to ensure everything is processed + blockB := s.BlockState.WaitForHighestFinalizedProgress(s.T(), blockA.HeaderBody.Height) + erBlock := s.ReceiptState.WaitForReceiptFrom(s.T(), flow.Identifier(blockB.ID()), s.exe1ID) + s.T().Logf("got block result ID %v", erBlock.ExecutionResult.BlockID) +} + +func (s *ScheduledCallbacksSuite) TestScheduleCallback_ScheduledAndExecuted() { + sc := systemcontracts.SystemContractsForChain(s.net.Root().HeaderBody.ChainID) + + // Wait for next height finalized (potentially first height) + currentFinalized := s.BlockState.HighestFinalizedHeight() + blockA := s.BlockState.WaitForHighestFinalizedProgress(s.T(), currentFinalized) + s.T().Logf("got blockA height %v ID %v", blockA.HeaderBody.Height, blockA.ID()) + + // Deploy the test contract first + err := lib.DeployScheduledCallbackTestContract( + s.AccessClient(), + sdk.Address(sc.FlowCallbackScheduler.Address), + sdk.Address(sc.FlowToken.Address), + sdk.Address(sc.FungibleToken.Address), + sdk.Identifier(s.net.Root().ID()), + ) + require.NoError(s.T(), err, "could not deploy test contract") + + // Wait for next height finalized before scheduling callback + s.BlockState.WaitForHighestFinalizedProgress(s.T(), s.BlockState.HighestFinalizedHeight()) + + // Schedule a callback for 10 seconds in the future + scheduleDelta := int64(10) + futureTimestamp := time.Now().Unix() + scheduleDelta + + s.T().Logf("scheduling callback at timestamp: %v, current timestamp: %v", futureTimestamp, time.Now().Unix()) + callbackID, err := lib.ScheduleCallbackAtTimestamp( + futureTimestamp, + s.AccessClient(), + sdk.Address(sc.FlowCallbackScheduler.Address), + sdk.Address(sc.FlowToken.Address), + sdk.Address(sc.FungibleToken.Address), + ) + require.NoError(s.T(), err, "could not schedule callback transaction") + s.T().Logf("scheduled callback with ID: %d", callbackID) + + const scheduledStatus = 1 + const executedStatus = 2 + + // Check the status of the callback right after scheduling + status, ok := s.getCallbackStatus(callbackID) + require.True(s.T(), ok, "callback status should not be nil after scheduling") + require.Equal(s.T(), scheduledStatus, status, "status should be equal to scheduled") + s.T().Logf("callback status after scheduling: %v", status) + + // Verify the callback is scheduled (not executed yet) + executedCallbacks := s.getExecutedCallbacks() + require.NotContains(s.T(), executedCallbacks, callbackID, "callback should not be executed immediately") + + // Wait to ensure the callback has time to be executed + s.T().Log("waiting for callback execution...") + time.Sleep(time.Duration(scheduleDelta)*time.Second + 2) + + // Wait for blocks to be processed after the callback execution time + // blockC := s.BlockState.WaitForHighestFinalizedProgress(s.T(), blockA.Header.Height+2) + // erBlock := s.ReceiptState.WaitForReceiptFrom(s.T(), flow.Identifier(blockC.Header.ID()), s.exe1ID) + // s.T().Logf("got block result ID %v after waiting", erBlock.ExecutionResult.BlockID) + + // Check the status again - it should still exist but be marked as executed + statusAfter, ok := s.getCallbackStatus(callbackID) + require.True(s.T(), ok, "callback status should not be nil after scheduling") + require.Equal(s.T(), executedStatus, statusAfter, "status should be equal to executed") + + // Verify the callback was executed by checking our test contract + executedCallbacksAfter := s.getExecutedCallbacks() + s.T().Logf("executed callbacks: %v", executedCallbacksAfter) + require.Len(s.T(), executedCallbacksAfter, 1, "should have exactly one executed callback") + require.Contains(s.T(), executedCallbacksAfter, callbackID, "callback should have been executed") +} + +func (s *ScheduledCallbacksSuite) TestScheduleCallback_ScheduleAndCancelCallback() { + sc := systemcontracts.SystemContractsForChain(s.net.Root().HeaderBody.ChainID) + + // Wait for next height finalized (potentially first height) + currentFinalized := s.BlockState.HighestFinalizedHeight() + blockA := s.BlockState.WaitForHighestFinalizedProgress(s.T(), currentFinalized) + s.T().Logf("got blockA height %v ID %v", blockA.HeaderBody.Height, blockA.ID()) + + // Deploy the test contract first + err := lib.DeployScheduledCallbackTestContract( + s.AccessClient(), + sdk.Address(sc.FlowCallbackScheduler.Address), + sdk.Address(sc.FlowToken.Address), + sdk.Address(sc.FungibleToken.Address), + sdk.Identifier(s.net.Root().ID()), + ) + require.NoError(s.T(), err, "could not deploy test contract") + + // Wait for next height finalized before scheduling callback + s.BlockState.WaitForHighestFinalizedProgress(s.T(), s.BlockState.HighestFinalizedHeight()) + + // Schedule a callback for 10 seconds in the future + scheduleDelta := int64(10) + futureTimestamp := time.Now().Unix() + scheduleDelta + + s.T().Logf("scheduling callback at timestamp: %v, current timestamp: %v", futureTimestamp, time.Now().Unix()) + callbackID, err := lib.ScheduleCallbackAtTimestamp( + futureTimestamp, + s.AccessClient(), + sdk.Address(sc.FlowCallbackScheduler.Address), + sdk.Address(sc.FlowToken.Address), + sdk.Address(sc.FungibleToken.Address), + ) + require.NoError(s.T(), err, "could not schedule callback transaction") + s.T().Logf("scheduled callback with ID: %d", callbackID) + + const scheduledStatus = 1 + const canceledStatus = 3 + + // Wait fraction of the scheduled time + s.T().Log("waiting for callback execution...") + time.Sleep(time.Second + 2) + + // Check the status of the callback + status, ok := s.getCallbackStatus(callbackID) + require.True(s.T(), ok, "callback status should not be nil after scheduling") + require.Equal(s.T(), scheduledStatus, status, "status should be equal to scheduled") + s.T().Logf("callback status after scheduling: %v", status) + + // Verify the callback is scheduled (not executed yet) + executedCallbacks := s.getExecutedCallbacks() + require.NotContains(s.T(), executedCallbacks, callbackID, "callback should not be executed immediately") + + // Cancel the callback + canceledID, err := lib.CancelCallbackByID( + callbackID, + s.AccessClient(), + sdk.Address(sc.FlowCallbackScheduler.Address), + sdk.Address(sc.FlowToken.Address), + sdk.Address(sc.FungibleToken.Address), + ) + require.NoError(s.T(), err, "could not cancel callback transaction") + require.Equal(s.T(), callbackID, canceledID, "canceled callback ID should be the same as scheduled") + + // Wait for callback scheduled time to make sure it was not executed + time.Sleep(time.Duration(scheduleDelta) * time.Second) + + // Check the status of the callback + status, ok = s.getCallbackStatus(callbackID) + require.True(s.T(), ok, "callback status should not be nil after scheduling") + require.Equal(s.T(), canceledStatus, status, "status should be equal to canceled") +} + +func (s *ScheduledCallbacksSuite) getCallbackStatus(callbackID uint64) (int, bool) { + getStatusScript := dsl.Main{ + Import: dsl.Import{ + Address: s.AccessClient().SDKServiceAddress(), + Names: []string{"FlowTransactionScheduler"}, + }, + ReturnType: "FlowTransactionScheduler.Status?", + Code: fmt.Sprintf("return FlowTransactionScheduler.getStatus(id: %d)", callbackID), + } + + latest, err := s.AccessClient().GetLatestFinalizedBlockHeader(context.Background()) + require.NoError(s.T(), err, "could not get latest finalized block header") + + result, err := s.AccessClient().ExecuteScriptAtBlock(context.Background(), getStatusScript, latest.ID) + require.NoError(s.T(), err, "could not execute getStatus script") + + optionalResult, ok := result.(cadence.Optional) + require.True(s.T(), ok, "result should be a cadence.Optional") + + if optionalResult.Value == nil { + return 0, false + } + + enumValue, ok := optionalResult.Value.(cadence.Enum) + require.True(s.T(), ok, "status should be a cadence.Enum") + + raw := enumValue.FieldsMappedByName()["rawValue"] + val, ok := raw.(cadence.UInt8) + require.True(s.T(), ok, "status should be a cadence.UInt8") + + return int(val), true +} + +func (s *ScheduledCallbacksSuite) getExecutedCallbacks() []uint64 { + getExecutedScript := dsl.Main{ + Import: dsl.Import{ + Address: s.AccessClient().SDKServiceAddress(), + Names: []string{"TestFlowCallbackHandler"}, + }, + ReturnType: "[UInt64]", + Code: "return TestFlowCallbackHandler.getExecutedCallbacks()", + } + + latest, err := s.AccessClient().GetLatestFinalizedBlockHeader(context.Background()) + require.NoError(s.T(), err, "could not get latest finalized block header") + + result, err := s.AccessClient().ExecuteScriptAtBlock(context.Background(), getExecutedScript, latest.ID) + require.NoError(s.T(), err, "could not execute getStatus script") + + // Convert cadence array to Go slice + cadenceArray, ok := result.(cadence.Array) + require.True(s.T(), ok, "result should be a cadence array") + + var executedIDs []uint64 + for _, value := range cadenceArray.Values { + if id, ok := value.(cadence.UInt64); ok { + executedIDs = append(executedIDs, uint64(id)) + } + } + + return executedIDs +} diff --git a/integration/tests/execution/suite.go b/integration/tests/execution/suite.go index 09666c24aa2..5d69a79b82b 100644 --- a/integration/tests/execution/suite.go +++ b/integration/tests/execution/suite.go @@ -22,13 +22,14 @@ type Suite struct { suite.Suite log zerolog.Logger lib.TestnetStateTracker - cancel context.CancelFunc - net *testnet.FlowNetwork - nodeConfigs []testnet.NodeConfig - nodeIDs []flow.Identifier - ghostID flow.Identifier - exe1ID flow.Identifier - verID flow.Identifier + cancel context.CancelFunc + net *testnet.FlowNetwork + nodeConfigs []testnet.NodeConfig + nodeIDs []flow.Identifier + ghostID flow.Identifier + exe1ID flow.Identifier + verID flow.Identifier + accessClient *testnet.Client } func (s *Suite) Ghost() *client.GhostClient { @@ -38,9 +39,13 @@ func (s *Suite) Ghost() *client.GhostClient { } func (s *Suite) AccessClient() *testnet.Client { - client, err := s.net.ContainerByName(testnet.PrimaryAN).TestnetClient() - require.NoError(s.T(), err, "could not get access client") - return client + if s.accessClient == nil { // cache access client + client, err := s.net.ContainerByName(testnet.PrimaryAN).TestnetClient() + require.NoError(s.T(), err, "could not get access client") + s.accessClient = client + } + + return s.accessClient } type AdminCommandRequest struct { @@ -105,7 +110,9 @@ func (s *Suite) SetupTest() { s.log = unittest.LoggerForTest(s.Suite.T(), zerolog.InfoLevel) s.log.Info().Msg("================> SetupTest") - blockRateFlag := "--block-rate-delay=1ms" + // Reset state to ensure clean state between tests + s.nodeConfigs = nil + s.accessClient = nil s.nodeConfigs = append(s.nodeConfigs, testnet.NewNodeConfig(flow.RoleAccess)) @@ -114,7 +121,7 @@ func (s *Suite) SetupTest() { for _, nodeID := range s.nodeIDs { nodeConfig := testnet.NewNodeConfig(flow.RoleConsensus, testnet.WithID(nodeID), testnet.WithLogLevel(zerolog.FatalLevel), - testnet.WithAdditionalFlag(blockRateFlag), + testnet.WithAdditionalFlag("cruise-ctl-fallback-proposal-duration=1ms"), ) s.nodeConfigs = append(s.nodeConfigs, nodeConfig) } @@ -122,17 +129,18 @@ func (s *Suite) SetupTest() { // need one execution nodes s.exe1ID = unittest.IdentifierFixture() exe1Config := testnet.NewNodeConfig(flow.RoleExecution, testnet.WithID(s.exe1ID), - testnet.WithLogLevel(zerolog.InfoLevel)) + testnet.WithLogLevel(zerolog.InfoLevel), + testnet.WithAdditionalFlag("--scheduled-callbacks-enabled=true")) s.nodeConfigs = append(s.nodeConfigs, exe1Config) // need two collection node coll1Config := testnet.NewNodeConfig(flow.RoleCollection, testnet.WithLogLevel(zerolog.FatalLevel), - testnet.WithAdditionalFlag(blockRateFlag), + testnet.WithAdditionalFlag("--hotstuff-proposal-duration=1ms"), ) coll2Config := testnet.NewNodeConfig(flow.RoleCollection, testnet.WithLogLevel(zerolog.FatalLevel), - testnet.WithAdditionalFlag(blockRateFlag), + testnet.WithAdditionalFlag("--hotstuff-proposal-duration=1ms"), ) s.nodeConfigs = append(s.nodeConfigs, coll1Config, coll2Config) diff --git a/integration/tests/execution/transaction_metrics_test.go b/integration/tests/execution/transaction_metrics_test.go new file mode 100644 index 00000000000..0fa4460cd27 --- /dev/null +++ b/integration/tests/execution/transaction_metrics_test.go @@ -0,0 +1,116 @@ +package execution + +import ( + "bytes" + "context" + "testing" + + "github.com/onflow/flow/protobuf/go/flow/execution" + "google.golang.org/grpc" + "google.golang.org/grpc/credentials/insecure" + + "github.com/onflow/flow-go/integration/testnet" + + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" + + sdk "github.com/onflow/flow-go-sdk" + + "github.com/onflow/flow-go/integration/tests/lib" +) + +func TestTransactionMetrics(t *testing.T) { + suite.Run(t, new(TransactionMetricsSuite)) +} + +type TransactionMetricsSuite struct { + Suite +} + +func (s *TransactionMetricsSuite) TestTransactionMetrics() { + accessClient := s.AccessClient() + + // wait for next height finalized (potentially first height), called blockA + currentFinalized := s.BlockState.HighestFinalizedHeight() + blockA := s.BlockState.WaitForHighestFinalizedProgress(s.T(), currentFinalized) + s.T().Logf("got blockA height %v ID %v\n", blockA.Height, blockA.ID()) + + // send transaction + tx, err := accessClient.DeployContract(context.Background(), sdk.Identifier(s.net.Root().ID()), lib.CounterContract) + require.NoError(s.T(), err, "could not deploy counter") + + txres, err := accessClient.WaitForExecuted(context.Background(), tx.ID()) + require.NoError(s.T(), err, "could not wait for tx to be executed") + require.NoError(s.T(), txres.Error) + + client, closeClient := s.getClient() + defer func() { + _ = closeClient() + }() + + result, err := client.GetTransactionExecutionMetricsAfter( + context.Background(), + &execution.GetTransactionExecutionMetricsAfterRequest{ + BlockHeight: 0, + }, + ) + + require.NoError(s.T(), err, "could not get transaction execution metrics") + require.NotNil(s.T(), result.Results) + // there should be at least some results, due to each block having at least 1 transaction + require.Greater(s.T(), len(result.Results), 10) + + latestBlockResult := uint64(0) + for _, result := range result.Results { + if result.BlockHeight > latestBlockResult { + latestBlockResult = result.BlockHeight + } + } + + // send another transaction + tx, err = accessClient.UpdateContract(context.Background(), sdk.Identifier(s.net.Root().ID()), lib.CounterContract) + require.NoError(s.T(), err, "could not deploy counter") + + txres, err = accessClient.WaitForExecuted(context.Background(), tx.ID()) + require.NoError(s.T(), err, "could not wait for tx to be executed") + require.NoError(s.T(), txres.Error) + + result, err = client.GetTransactionExecutionMetricsAfter( + context.Background(), + &execution.GetTransactionExecutionMetricsAfterRequest{ + BlockHeight: latestBlockResult, + }, + ) + + require.NoError(s.T(), err, "could not get transaction execution metrics") + // there could be only 1 block since the last time + require.Greater(s.T(), len(result.Results), 0) + + transactionExists := false + for _, result := range result.Results { + for _, transaction := range result.Transactions { + if bytes.Equal(transaction.TransactionId, tx.ID().Bytes()) { + transactionExists = true + + // check that the transaction metrics are not 0 + require.Greater(s.T(), transaction.ExecutionTime, uint64(0)) + require.Greater(s.T(), len(transaction.ExecutionEffortWeights), 0) + } + } + require.Less(s.T(), latestBlockResult, result.BlockHeight) + + } + require.True(s.T(), transactionExists) +} + +func (s *TransactionMetricsSuite) getClient() (execution.ExecutionAPIClient, func() error) { + + exe1ID := s.net.ContainerByID(s.exe1ID) + addr := exe1ID.Addr(testnet.GRPCPort) + + conn, err := grpc.Dial(addr, grpc.WithTransportCredentials(insecure.NewCredentials())) + require.NoError(s.T(), err, "could not create execution client") + + grpcClient := execution.NewExecutionAPIClient(conn) + return grpcClient, conn.Close +} diff --git a/integration/tests/ghost/ghost_node_example_test.go b/integration/tests/ghost/ghost_node_example_test.go index a8ad9da0b3f..0836c11e5a2 100644 --- a/integration/tests/ghost/ghost_node_example_test.go +++ b/integration/tests/ghost/ghost_node_example_test.go @@ -25,10 +25,10 @@ func TestGhostNodeExample_Send(t *testing.T) { var ( // one real collection node - realCollNode = testnet.NewNodeConfig(flow.RoleCollection, testnet.WithLogLevel(zerolog.DebugLevel), testnet.WithIDInt(1)) + realCollNode = testnet.NewNodeConfig(flow.RoleCollection, testnet.WithLogLevel(zerolog.InfoLevel), testnet.WithIDInt(1)) // a ghost node masquerading as a collection node - ghostCollNode = testnet.NewNodeConfig(flow.RoleCollection, testnet.WithLogLevel(zerolog.DebugLevel), testnet.WithIDInt(2), + ghostCollNode = testnet.NewNodeConfig(flow.RoleCollection, testnet.WithLogLevel(zerolog.InfoLevel), testnet.WithIDInt(2), testnet.AsGhost()) // three consensus nodes @@ -60,7 +60,7 @@ func TestGhostNodeExample_Send(t *testing.T) { assert.NoError(t, err) // generate a test transaction - tx := unittest.TransactionBodyFixture() + tx := (messages.TransactionBody)(unittest.TransactionBodyFixture()) // send the transaction as an event to a real collection node err = ghostClient.Send(ctx, channels.PushTransactions, &tx, realCollNode.Identifier) @@ -86,7 +86,7 @@ func TestGhostNodeExample_Subscribe(t *testing.T) { realExeNode = testnet.NewNodeConfig(flow.RoleExecution, testnet.WithLogLevel(zerolog.FatalLevel), testnet.WithIDInt(2)) // a ghost node masquerading as an execution node - ghostExeNode = testnet.NewNodeConfig(flow.RoleExecution, testnet.WithLogLevel(zerolog.DebugLevel), testnet.WithIDInt(3), + ghostExeNode = testnet.NewNodeConfig(flow.RoleExecution, testnet.WithLogLevel(zerolog.InfoLevel), testnet.WithIDInt(3), testnet.AsGhost()) // a verification node @@ -124,8 +124,8 @@ func TestGhostNodeExample_Subscribe(t *testing.T) { // the following switch should be similar to the one defined in the actual node that is being emulated switch v := event.(type) { - case *messages.BlockProposal: - fmt.Printf("Received block proposal: %s from %s\n", v.Block.Header.ID().String(), from.String()) + case *flow.Proposal: + fmt.Printf("Received block proposal: %s from %s\n", v.Block.ID().String(), from.String()) i++ default: t.Logf(" ignoring event: :%T: %v", v, v) diff --git a/integration/tests/lib/block_state.go b/integration/tests/lib/block_state.go index a0cb14ee5cc..341ca0b04e6 100644 --- a/integration/tests/lib/block_state.go +++ b/integration/tests/lib/block_state.go @@ -9,7 +9,6 @@ import ( "github.com/stretchr/testify/require" "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/model/messages" ) const blockStateTimeout = 120 * time.Second @@ -20,7 +19,7 @@ type BlockState struct { blocksByHeight map[uint64][]*flow.Block finalizedByHeight map[uint64]*flow.Block highestFinalized uint64 - highestProposed uint64 + highestProposed *flow.Block highestSealed *flow.Block } @@ -33,27 +32,71 @@ func NewBlockState() *BlockState { } } -func (bs *BlockState) Add(t *testing.T, msg *messages.BlockProposal) { - b := msg.Block.ToInternal() +// ByBlockID returns the block by ID if it is known by BlockState. +func (bs *BlockState) ByBlockID(id flow.Identifier) (*flow.Block, bool) { + block, ok := bs.blocksByID[id] + return block, ok +} + +// WaitForHalt attempts to detect when consensus has halted by observing a certain duration +// pass without progress (proposals with newer views). +func (bs *BlockState) WaitForHalt(t *testing.T, requiredDurationWithoutProgress, tick, timeout time.Duration) { + timeSinceLastProgress := time.Duration(0) + lastView := bs.HighestProposedView() + start := time.Now() + lastProgress := start + + t.Logf("waiting for halt: lastView=%d", lastView) + + ticker := time.NewTicker(tick) + timer := time.NewTimer(timeout) + defer ticker.Stop() + defer timer.Stop() + for timeSinceLastProgress < requiredDurationWithoutProgress { + select { + case <-timer.C: + t.Fatalf("failed to observe progress halt after %s, requiring %s without progress to succeed", timeout, requiredDurationWithoutProgress) + case <-ticker.C: + } + + latestView := bs.HighestProposedView() + if latestView > lastView { + lastView = latestView + lastProgress = time.Now() + } + timeSinceLastProgress = time.Since(lastProgress) + } + t.Logf("successfully observed progress halt for %s after %s of waiting", requiredDurationWithoutProgress, time.Since(start)) +} + +// Add inserts a new proposal message into BlockState. +// It validates and tracks the proposal and updating finalized and sealed blocks. +// +// All errors indicate that the input message could not be converted to a valid proposal. +func (bs *BlockState) Add(t *testing.T, proposal *flow.Proposal) error { bs.Lock() defer bs.Unlock() - bs.blocksByID[b.Header.ID()] = b - bs.blocksByHeight[b.Header.Height] = append(bs.blocksByHeight[b.Header.Height], b) - if b.Header.Height > bs.highestProposed { - bs.highestProposed = b.Header.Height + b := &proposal.Block + bs.blocksByID[b.ID()] = b + bs.blocksByHeight[b.Height] = append(bs.blocksByHeight[b.Height], b) + if bs.highestProposed == nil { + bs.highestProposed = b + } else if b.View > bs.highestProposed.View { + bs.highestProposed = b } - if b.Header.Height < 3 { - return + if b.Height < 3 { + return nil } - confirmsHeight := b.Header.Height - uint64(3) + confirmsHeight := b.Height - uint64(3) if confirmsHeight < bs.highestFinalized { - return + return nil } bs.processAncestors(t, b, confirmsHeight) + return nil } func (bs *BlockState) WaitForBlockById(t *testing.T, blockId flow.Identifier) *flow.Block { @@ -104,13 +147,13 @@ func (bs *BlockState) WaitForBlocksByHeight(t *testing.T, height uint64) []*flow // It also processes the seals of blocks being finalized. func (bs *BlockState) processAncestors(t *testing.T, b *flow.Block, confirmsHeight uint64) { // puts this block proposal and all ancestors into `finalizedByHeight` - t.Logf("%v new height arrived: %d\n", time.Now().UTC(), b.Header.Height) + t.Logf("%v new height arrived: %d\n", time.Now().UTC(), b.Height) ancestor := b - for ancestor.Header.Height > bs.highestFinalized { - heightDistance := b.Header.Height - ancestor.Header.Height - viewDistance := b.Header.View - ancestor.Header.View - if ancestor.Header.Height <= confirmsHeight { - // Since we are running on a trusted setup on localnet, when we receive block height b.Header.Height, + for ancestor.Height > bs.highestFinalized { + heightDistance := b.Height - ancestor.Height + viewDistance := b.View - ancestor.View + if ancestor.Height <= confirmsHeight { + // Since we are running on a trusted setup on localnet, when we receive block height b.Height, // it can finalize all ancestor blocks at height < confirmsHeight given the following conditions both satisfied: // (1) we already received ancestor block. // (2) there is no fork: the view distance between received block and ancestor block is the same as their height distance. @@ -119,14 +162,14 @@ func (bs *BlockState) processAncestors(t *testing.T, b *flow.Block, confirmsHeig if viewDistance == heightDistance { finalized := ancestor - bs.finalizedByHeight[finalized.Header.Height] = finalized - if finalized.Header.Height > bs.highestFinalized { // updates highestFinalized height - bs.highestFinalized = finalized.Header.Height + bs.finalizedByHeight[finalized.Height] = finalized + if finalized.Height > bs.highestFinalized { // updates highestFinalized height + bs.highestFinalized = finalized.Height } t.Logf("%v height %d finalized %d, highest finalized %d \n", time.Now().UTC(), - b.Header.Height, - finalized.Header.Height, + b.Height, + finalized.Height, bs.highestFinalized) // update last sealed height for _, seal := range finalized.Payload.Seals { @@ -136,7 +179,7 @@ func (bs *BlockState) processAncestors(t *testing.T, b *flow.Block, confirmsHeig } if bs.highestSealed == nil || - sealed.Header.Height > bs.highestSealed.Header.Height { + sealed.Height > bs.highestSealed.Height { bs.highestSealed = sealed } } @@ -149,7 +192,7 @@ func (bs *BlockState) processAncestors(t *testing.T, b *flow.Block, confirmsHeig // find parent var ok bool - ancestor, ok = bs.blocksByID[ancestor.Header.ParentID] + ancestor, ok = bs.blocksByID[ancestor.ParentID] // stop if parent not found if !ok { @@ -166,7 +209,7 @@ func (bs *BlockState) WaitForHighestFinalizedProgress(t *testing.T, currentFinal bs.RLock() defer bs.RUnlock() - t.Logf("%v checking highest finalized: %d, highest proposed: %d\n", time.Now().UTC(), bs.highestFinalized, bs.highestProposed) + t.Logf("%v checking highest finalized: %d, highest proposed: %v\n", time.Now().UTC(), bs.highestFinalized, bs.highestProposed) return bs.highestFinalized > currentFinalized }, blockStateTimeout, 100*time.Millisecond, fmt.Sprintf("did not receive progress on highest finalized height (%v) from (%v) within %v seconds", @@ -204,15 +247,15 @@ func (bs *BlockState) WaitForFinalizedChild(t *testing.T, parent *flow.Block) *f bs.RLock() defer bs.RUnlock() - _, ok := bs.finalizedByHeight[parent.Header.Height+1] + _, ok := bs.finalizedByHeight[parent.Height+1] return ok }, blockStateTimeout, 100*time.Millisecond, fmt.Sprintf("did not receive finalized child block for parent block height %v within %v seconds", - parent.Header.Height, blockStateTimeout)) + parent.Height, blockStateTimeout)) bs.RLock() defer bs.RUnlock() - return bs.finalizedByHeight[parent.Header.Height+1] + return bs.finalizedByHeight[parent.Height+1] } // HighestFinalized returns the highest finalized block after genesis and a boolean indicating whether a highest @@ -232,7 +275,14 @@ func (bs *BlockState) HighestFinalized() (*flow.Block, bool) { func (bs *BlockState) HighestProposedHeight() uint64 { bs.RLock() defer bs.RUnlock() - return bs.highestProposed + return bs.highestProposed.Height +} + +// HighestProposedView returns the view of the highest proposed block. +func (bs *BlockState) HighestProposedView() uint64 { + bs.RLock() + defer bs.RUnlock() + return bs.highestProposed.View } // HighestFinalizedHeight returns the height of the highest finalized block. @@ -242,25 +292,48 @@ func (bs *BlockState) HighestFinalizedHeight() uint64 { return bs.highestFinalized } -// WaitForSealed returns the sealed block after a certain height has been sealed. -func (bs *BlockState) WaitForSealed(t *testing.T, height uint64) *flow.Block { - require.Eventually(t, +// WaitForSealedHeight returns the highest sealed block once the sealed height reaches +// or exceeds `height`. The returned block may have height equal to or greater than `height`. +func (bs *BlockState) WaitForSealedHeight(t *testing.T, height uint64) *flow.Block { + require.Eventuallyf(t, func() bool { - bs.RLock() - defer bs.RUnlock() + highestSealed, ok := bs.HighestSealed() + if !ok { + return false + } + + t.Logf("%v waiting for sealed height (%d/%d), last finalized %d", time.Now().UTC(), bs.highestSealed.Height, height, bs.highestFinalized) + return highestSealed.Height >= height + }, + blockStateTimeout, + 100*time.Millisecond, + "did not receive sealed block for height (%v) within %v seconds", height, blockStateTimeout) - if bs.highestSealed != nil { - t.Logf("%v waiting for sealed height (%d/%d), last finalized %d", time.Now().UTC(), bs.highestSealed.Header.Height, height, bs.highestFinalized) + highestSealed, ok := bs.HighestSealed() + require.True(t, ok) + return highestSealed +} + +// WaitForSealedView returns the highest sealed block once the sealed view reaches +// or exceeds `view`. The returned block may have view equal to or greater than `view`. +func (bs *BlockState) WaitForSealedView(t *testing.T, view uint64) *flow.Block { + require.Eventuallyf(t, + func() bool { + highestSealed, ok := bs.HighestSealed() + if !ok { + return false } - return bs.highestSealed != nil && bs.highestSealed.Header.Height >= height + + t.Logf("%v waiting for sealed view (%d/%d), last finalized %d", time.Now().UTC(), bs.highestSealed.Height, view, bs.highestFinalized) + return highestSealed.View >= view }, blockStateTimeout, 100*time.Millisecond, - fmt.Sprintf("did not receive sealed block for height (%v) within %v seconds", height, blockStateTimeout)) + "did not receive sealed block for view (%v) within %v seconds", view, blockStateTimeout) - bs.RLock() - defer bs.RUnlock() - return bs.highestSealed + highestSealed, ok := bs.HighestSealed() + require.True(t, ok) + return highestSealed } func (bs *BlockState) HighestSealed() (*flow.Block, bool) { diff --git a/integration/tests/lib/msg_state.go b/integration/tests/lib/msg_state.go index 455aff12cef..928f63058ed 100644 --- a/integration/tests/lib/msg_state.go +++ b/integration/tests/lib/msg_state.go @@ -9,7 +9,6 @@ import ( "github.com/stretchr/testify/require" "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/model/messages" ) const msgStateTimeout = 20 * time.Second @@ -77,17 +76,7 @@ func (ms *MsgState) WaitForMsgFrom(t *testing.T, predicate func(msg interface{}) return m } -func MsgIsChunkDataRequest(msg interface{}) bool { - _, ok := msg.(*messages.ChunkDataRequest) - return ok -} - func MsgIsChunkDataPackResponse(msg interface{}) bool { - _, ok := msg.(*messages.ChunkDataResponse) - return ok -} - -func MsgIsResultApproval(msg interface{}) bool { - _, ok := msg.(*flow.ResultApproval) + _, ok := msg.(*flow.ChunkDataResponse) return ok } diff --git a/integration/tests/lib/receipt_state.go b/integration/tests/lib/receipt_state.go index 35ba3a4db82..8a24ab78148 100644 --- a/integration/tests/lib/receipt_state.go +++ b/integration/tests/lib/receipt_state.go @@ -12,7 +12,6 @@ import ( ) const receiptTimeout = 120 * time.Second -const receiptStateTimeout = 120 * time.Second type ReceiptState struct { sync.RWMutex diff --git a/integration/tests/lib/testnet_state_tracker.go b/integration/tests/lib/testnet_state_tracker.go index 6ff6820baf4..87ea775f2ce 100644 --- a/integration/tests/lib/testnet_state_tracker.go +++ b/integration/tests/lib/testnet_state_tracker.go @@ -2,7 +2,6 @@ package lib import ( "context" - "fmt" "strings" "testing" "time" @@ -11,8 +10,6 @@ import ( "github.com/onflow/flow-go/engine/ghost/client" "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/model/messages" - "github.com/onflow/flow-go/utils/unittest" ) type TestnetStateTracker struct { @@ -83,14 +80,17 @@ func (tst *TestnetStateTracker) Track(t *testing.T, ctx context.Context, ghost * tst.MsgState.Add(sender, msg) switch m := msg.(type) { - case *messages.BlockProposal: - tst.BlockState.Add(t, m) + case *flow.Proposal: + err = tst.BlockState.Add(t, m) + require.NoError(t, err) + + block := m.Block t.Logf("%v block proposal received from %s at height %v, view %v: %x\n", time.Now().UTC(), sender, - m.Block.Header.Height, - m.Block.Header.View, - m.Block.Header.ID()) + block.Height, + block.View, + block.ID()) case *flow.ResultApproval: tst.ApprovalState.Add(sender, m) t.Logf("%v result approval received from %s for execution result ID %x and chunk index %v\n", @@ -111,73 +111,16 @@ func (tst *TestnetStateTracker) Track(t *testing.T, ctx context.Context, ghost * finalState, m.ExecutionResult.ID(), len(m.ExecutionResult.Chunks)) + case *flow.ChunkDataResponse: + // consuming this explicitly to avoid logging full msg which is usually very large because of proof + t.Logf("%x chunk data pack received from %x\n", + m.ChunkDataPack.ChunkID, + sender) default: - t.Logf("%v other msg received from %s: %#v\n", time.Now().UTC(), sender, msg) + t.Logf("%v other msg received from %s: %T\n", time.Now().UTC(), sender, msg) continue } } }() } - -// WaitUntilFinalizedStateCommitmentChanged waits until a different state commitment for a finalized block is received -// compared to the latest one from any execution node and returns the corresponding block and execution receipt -func WaitUntilFinalizedStateCommitmentChanged(t *testing.T, bs *BlockState, rs *ReceiptState, - qualifiers ...func(receipt flow.ExecutionReceipt) bool) (*flow.Block, *flow.ExecutionReceipt) { - - // get the state commitment for the highest finalized block - initialFinalizedSC := unittest.GenesisStateCommitment - var err error - b1, ok := bs.HighestFinalized() - if ok { - r1 := rs.WaitForReceiptFromAny(t, b1.Header.ID()) - initialFinalizedSC, err = r1.ExecutionResult.FinalStateCommitment() - require.NoError(t, err) - } - - initFinalizedheight := b1.Header.Height - currentHeight := initFinalizedheight + 1 - - currentID := b1.Header.ID() - var b2 *flow.Block - var r2 *flow.ExecutionReceipt - require.Eventually(t, func() bool { - var ok bool - b2, ok = bs.FinalizedHeight(currentHeight) - if !ok { - return false - } - currentID = b2.Header.ID() - r2 = rs.WaitForReceiptFromAny(t, b2.Header.ID()) - r2finalState, err := r2.ExecutionResult.FinalStateCommitment() - require.NoError(t, err) - if initialFinalizedSC == r2finalState { - // received a new execution result for the next finalized block, but it has the same final state commitment - // check the next finalized block - currentHeight++ - return false - } - - for _, qualifier := range qualifiers { - if !qualifier(*r2) { - return false - } - } - - return true - }, receiptStateTimeout, 100*time.Millisecond, - fmt.Sprintf("did not receive an execution receipt with a different state commitment from %x within %v seconds,"+ - " initial finalized height: %v "+ - " last block checked height %v, last block checked ID %x", initialFinalizedSC, receiptTimeout, - initFinalizedheight, - currentHeight, currentID)) - - return b2, r2 -} - -// WithMinimumChunks creates a qualifier that returns true if receipt has the specified minimum number of chunks. -func WithMinimumChunks(chunkNum int) func(flow.ExecutionReceipt) bool { - return func(receipt flow.ExecutionReceipt) bool { - return len(receipt.ExecutionResult.Chunks) >= chunkNum - } -} diff --git a/integration/tests/lib/util.go b/integration/tests/lib/util.go index 0fb11fbb4b2..1e8c521771d 100644 --- a/integration/tests/lib/util.go +++ b/integration/tests/lib/util.go @@ -4,6 +4,7 @@ import ( "context" "crypto/rand" "fmt" + "strings" "testing" "time" @@ -14,11 +15,17 @@ import ( sdk "github.com/onflow/flow-go-sdk" sdkcrypto "github.com/onflow/flow-go-sdk/crypto" - "github.com/onflow/flow-go/integration/convert" - "github.com/onflow/flow-go/integration/testnet" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/utils/dsl" "github.com/onflow/flow-go/utils/unittest" + + "github.com/onflow/flow-go/integration/convert" + "github.com/onflow/flow-go/integration/testnet" +) + +const ( + CounterDefaultValue = -3 + CounterInitializedValue = 2 ) var ( @@ -29,17 +36,17 @@ var ( dsl.Resource{ Name: "Counter", Code: ` - pub var count: Int + access(all) var count: Int init() { self.count = 0 } - pub fun add(_ count: Int) { + access(all) fun add(_ count: Int) { self.count = self.count + count }`, }, dsl.Code(` - pub fun createCounter(): @Counter { + access(all) fun createCounter(): @Counter { return <-create Counter() }`, ), @@ -47,24 +54,96 @@ var ( } ) +// TestFlowCallbackHandlerContract creates a test contract DSL for testing FlowTransactionScheduler +func TestFlowCallbackHandlerContract(callbackScheduler sdk.Address, flowToken sdk.Address, fungibleToken sdk.Address) dsl.Contract { + return dsl.Contract{ + Name: "TestFlowCallbackHandler", + Imports: []dsl.Import{ + { + Names: []string{"FlowTransactionScheduler"}, + Address: callbackScheduler, + }, + { + Names: []string{"FlowToken"}, + Address: flowToken, + }, + { + Names: []string{"FungibleToken"}, + Address: fungibleToken, + }, + }, + Members: []dsl.CadenceCode{ + dsl.Code(` + access(all) var scheduledCallbacks: @{UInt64: FlowTransactionScheduler.ScheduledTransaction} + access(all) var executedCallbacks: [UInt64] + + access(all) let HandlerStoragePath: StoragePath + access(all) let HandlerPublicPath: PublicPath + + access(all) resource Handler: FlowTransactionScheduler.TransactionHandler { + + access(FlowTransactionScheduler.Execute) + fun executeTransaction(id: UInt64, data: AnyStruct?) { + TestFlowCallbackHandler.executedCallbacks.append(id) + } + } + + access(all) fun createHandler(): @Handler { + return <- create Handler() + } + + access(all) fun addScheduledCallback(callback: @FlowTransactionScheduler.ScheduledTransaction) { + self.scheduledCallbacks[callback.id] <-! callback + } + + access(all) fun cancelCallback(id: UInt64): @FlowToken.Vault { + let callback <- self.scheduledCallbacks.remove(key: id) + ?? panic("Invalid ID: \(id) callback not found") + return <-FlowTransactionScheduler.cancel(scheduledTx: <-callback) + } + + access(all) fun getExecutedCallbacks(): [UInt64] { + return self.executedCallbacks + } + + access(all) init() { + self.scheduledCallbacks <- {} + self.executedCallbacks = [] + + self.HandlerStoragePath = /storage/testCallbackHandler + self.HandlerPublicPath = /public/testCallbackHandler + } + `), + }, + } +} + // CreateCounterTx is a transaction script for creating an instance of the counter in the account storage of the // authorizing account NOTE: the counter contract must be deployed first func CreateCounterTx(counterAddress sdk.Address) dsl.Transaction { return dsl.Transaction{ - Import: dsl.Import{Address: counterAddress}, + Imports: dsl.Imports{ + dsl.Import{ + Address: counterAddress, + }, + }, Content: dsl.Prepare{ - Content: dsl.Code(` - var maybeCounter <- signer.load<@Testing.Counter>(from: /storage/counter) - - if maybeCounter == nil { - maybeCounter <-! Testing.createCounter() - } - - maybeCounter?.add(2) - signer.save(<-maybeCounter!, to: /storage/counter) - - signer.link<&Testing.Counter>(/public/counter, target: /storage/counter) - `), + Content: dsl.Code(fmt.Sprintf( + ` + var maybeCounter <- signer.storage.load<@Testing.Counter>(from: /storage/counter) + + if maybeCounter == nil { + maybeCounter <-! Testing.createCounter() + } + + maybeCounter?.add(%d) + signer.storage.save(<-maybeCounter!, to: /storage/counter) + + let counterCap = signer.capabilities.storage.issue<&Testing.Counter>(/storage/counter) + signer.capabilities.publish(counterCap, at: /public/counter) + `, + CounterInitializedValue, + )), }, } } @@ -80,10 +159,11 @@ func ReadCounterScript(contractAddress sdk.Address, accountAddress sdk.Address) Code: fmt.Sprintf( ` let account = getAccount(0x%s) - let cap = account.getCapability(/public/counter) - return cap.borrow<&Testing.Counter>()?.count ?? -3 - `, + let counter = account.capabilities.borrow<&Testing.Counter>(/public/counter) + return counter?.count ?? %d + `, accountAddress.Hex(), + CounterDefaultValue, ), } } @@ -93,19 +173,24 @@ func ReadCounterScript(contractAddress sdk.Address, accountAddress sdk.Address) // contract must be deployed first func CreateCounterPanicTx(chain flow.Chain) dsl.Transaction { return dsl.Transaction{ - Import: dsl.Import{Address: sdk.Address(chain.ServiceAddress())}, + Imports: dsl.Imports{ + dsl.Import{ + Address: sdk.Address(chain.ServiceAddress()), + }, + }, Content: dsl.Prepare{ Content: dsl.Code(` - var maybeCounter <- signer.load<@Testing.Counter>(from: /storage/counter) + var maybeCounter <- signer.storage.load<@Testing.Counter>(from: /storage/counter) if maybeCounter == nil { maybeCounter <-! Testing.createCounter() } maybeCounter?.add(2) - signer.save(<-maybeCounter!, to: /storage/counter) + signer.storage.save(<-maybeCounter!, to: /storage/counter) - signer.link<&Testing.Counter>(/public/counter, target: /storage/counter) + let counterCap = signer.capabilities.storage.issue<&Testing.Counter>(/storage/counter) + signer.capabilities.publish(counterCap, at: /public/counter) panic("fail for testing purposes") `), @@ -163,7 +248,7 @@ func RandomPrivateKey() sdkcrypto.PrivateKey { func SDKTransactionFixture(opts ...func(*sdk.Transaction)) sdk.Transaction { tx := sdk.Transaction{ - Script: []byte("pub fun main() {}"), + Script: []byte("access(all) fun main() {}"), ReferenceBlockID: sdk.Identifier(unittest.IdentifierFixture()), GasLimit: 10, ProposalKey: convert.ToSDKProposalKey(unittest.ProposalKeyFixture()), @@ -214,26 +299,26 @@ func WithChainID(chainID flow.ChainID) func(tx *sdk.Transaction) { // LogStatus logs current information about the test network state. func LogStatus(t *testing.T, ctx context.Context, log zerolog.Logger, client *testnet.Client) { + // retrieves latest FINALIZED snapshot snapshot, err := client.GetLatestProtocolSnapshot(ctx) if err != nil { - log.Err(err).Msg("failed to get sealed snapshot") - return - } - finalized, err := client.GetLatestFinalizedBlockHeader(ctx) - if err != nil { - log.Err(err).Msg("failed to get finalized header") + log.Err(err).Msg("failed to get finalized snapshot") return } - sealed, err := snapshot.Head() + sealingSegment, err := snapshot.SealingSegment() require.NoError(t, err) - phase, err := snapshot.Phase() + sealed := sealingSegment.Sealed() + finalized := sealingSegment.Finalized() + + phase, err := snapshot.EpochPhase() require.NoError(t, err) - epoch := snapshot.Epochs().Current() - counter, err := epoch.Counter() + epoch, err := snapshot.Epochs().Current() require.NoError(t, err) + counter := epoch.Counter() log.Info().Uint64("final_height", finalized.Height). + Uint64("final_view", finalized.View). Uint64("sealed_height", sealed.Height). Uint64("sealed_view", sealed.View). Str("cur_epoch_phase", phase.String()). @@ -257,3 +342,205 @@ func LogStatusPeriodically(t *testing.T, parent context.Context, log zerolog.Log cancel() } } + +// ScheduleCallbackAtTimestamp sends a test transaction to schedule a callback on FlowTransactionScheduler +// at a given timestamp and returns the scheduled callback ID. +func ScheduleCallbackAtTimestamp( + timestamp int64, + client *testnet.Client, + flowCallbackScheduler sdk.Address, + flowToken sdk.Address, + fungibleToken sdk.Address, +) (uint64, error) { + header, err := client.GetLatestFinalizedBlockHeader(context.Background()) + if err != nil { + return 0, fmt.Errorf("could not get latest block ID: %w", err) + } + + serviceAccount, err := client.GetAccountAtBlockHeight(context.Background(), client.SDKServiceAddress(), header.Height) + if err != nil { + return 0, fmt.Errorf("could not get account: %w", err) + } + + script := []byte(fmt.Sprintf(` + import FlowTransactionScheduler from 0x%s + import TestFlowCallbackHandler from 0x%s + import FlowToken from 0x%s + import FungibleToken from 0x%s + + transaction(timestamp: UFix64) { + + prepare(account: auth(BorrowValue, SaveValue, IssueStorageCapabilityController, PublishCapability, GetStorageCapabilityController) &Account) { + if !account.storage.check<@TestFlowCallbackHandler.Handler>(from: TestFlowCallbackHandler.HandlerStoragePath) { + let handler <- TestFlowCallbackHandler.createHandler() + + account.storage.save(<-handler, to: TestFlowCallbackHandler.HandlerStoragePath) + account.capabilities.storage.issue<auth(FlowTransactionScheduler.Execute) &{FlowTransactionScheduler.TransactionHandler}>(TestFlowCallbackHandler.HandlerStoragePath) + } + + let callbackCap = account.capabilities.storage + .getControllers(forPath: TestFlowCallbackHandler.HandlerStoragePath)[0] + .capability as! Capability<auth(FlowTransactionScheduler.Execute) &{FlowTransactionScheduler.TransactionHandler}> + + let vault = account.storage.borrow<auth(FungibleToken.Withdraw) &FlowToken.Vault>(from: /storage/flowTokenVault) + ?? panic("Could not borrow FlowToken vault") + + let testData = "test data" + let feeAmount = 1.0 + let effort = UInt64(1000) + let priority = FlowTransactionScheduler.Priority.High + + let fees <- vault.withdraw(amount: feeAmount) as! @FlowToken.Vault + + let scheduledCallback <- FlowTransactionScheduler.schedule( + handlerCap: callbackCap, + data: testData, + timestamp: timestamp, + priority: priority, + executionEffort: effort, + fees: <-fees + ) + + TestFlowCallbackHandler.addScheduledCallback(callback: <-scheduledCallback) + } + } + `, serviceAccount.Address.Hex(), flowCallbackScheduler.Hex(), flowToken.Hex(), fungibleToken.Hex())) + + timeArg, err := cadence.NewUFix64(fmt.Sprintf("%d.0", timestamp)) + if err != nil { + return 0, fmt.Errorf("could not create time argument: %w", err) + } + + tx := sdk.NewTransaction(). + SetScript(script). + SetReferenceBlockID(header.ID). + SetProposalKey(serviceAccount.Address, serviceAccount.Keys[0].Index, serviceAccount.Keys[0].SequenceNumber). + SetPayer(serviceAccount.Address). + AddAuthorizer(serviceAccount.Address) + + err = tx.AddArgument(timeArg) + if err != nil { + return 0, fmt.Errorf("could not add argument to transaction: %w", err) + } + + return sendCallbackTx(client, tx) +} + +// CancelCallbackByID sends a test transaction for canceling a callback on FlowCallbackScheduler by ID. +func CancelCallbackByID( + callbackID uint64, + client *testnet.Client, + flowCallbackScheduler sdk.Address, + flowToken sdk.Address, + fungibleToken sdk.Address, +) (uint64, error) { + + header, err := client.GetLatestFinalizedBlockHeader(context.Background()) + if err != nil { + return 0, fmt.Errorf("could not get latest block ID: %w", err) + } + + serviceAccount, err := client.GetAccountAtBlockHeight(context.Background(), client.SDKServiceAddress(), header.Height) + if err != nil { + return 0, fmt.Errorf("could not get account: %w", err) + } + + cancelTx := fmt.Sprintf(` + import FlowTransactionScheduler from 0x%s + import TestFlowCallbackHandler from 0x%s + import FlowToken from 0x%s + import FungibleToken from 0x%s + + transaction(id: UInt64) { + + prepare(account: auth(BorrowValue, SaveValue, IssueStorageCapabilityController, PublishCapability, GetStorageCapabilityController) &Account) { + + let vault = account.storage.borrow<auth(FungibleToken.Withdraw) &FlowToken.Vault>(from: /storage/flowTokenVault) + ?? panic("Could not borrow FlowToken vault") + + vault.deposit(from: <-TestFlowCallbackHandler.cancelCallback(id: id)) + } + } + `, serviceAccount.Address.Hex(), flowCallbackScheduler.Hex(), flowToken.Hex(), fungibleToken.Hex()) + + tx := sdk.NewTransaction(). + SetScript([]byte(cancelTx)). + SetReferenceBlockID(header.ID). + SetProposalKey(serviceAccount.Address, serviceAccount.Keys[0].Index, serviceAccount.Keys[0].SequenceNumber). + SetPayer(serviceAccount.Address). + AddAuthorizer(serviceAccount.Address) + + err = tx.AddArgument(cadence.UInt64(callbackID)) + if err != nil { + return 0, fmt.Errorf("could not add argument to transaction: %w", err) + } + + return sendCallbackTx(client, tx) +} + +// ExtractCallbackIDFromEvents extracts the callback ID from the events of a transaction result. +func ExtractCallbackIDFromEvents(result *sdk.TransactionResult) uint64 { + for _, event := range result.Events { + if strings.Contains(string(event.Type), "FlowTransactionScheduler.Scheduled") || + strings.Contains(string(event.Type), "FlowTransactionScheduler.Canceled") || + strings.Contains(string(event.Type), "FlowTransactionScheduler.Executed") || + strings.Contains(string(event.Type), "FlowTransactionScheduler.PendingExecution") { + + if id := event.Value.SearchFieldByName("id"); id != nil { + return uint64(id.(cadence.UInt64)) + } + } + } + + return 0 +} + +// DeployScheduledCallbackTestContract deploys the test contract for scheduled callbacks. +func DeployScheduledCallbackTestContract( + client *testnet.Client, + callbackScheduler sdk.Address, + flowToken sdk.Address, + fungibleToken sdk.Address, + refID sdk.Identifier, +) error { + testContract := TestFlowCallbackHandlerContract(callbackScheduler, flowToken, fungibleToken) + tx, err := client.DeployContract(context.Background(), refID, testContract) + if err != nil { + return fmt.Errorf("could not deploy test contract: %w", err) + } + + res, err := client.WaitForExecuted(context.Background(), tx.ID()) + if err != nil { + return fmt.Errorf("could not wait for deploy transaction to be sealed: %w", err) + } + + if res.Error != nil { + return fmt.Errorf("deploy transaction should not have error: %w", res.Error) + } + + return nil +} + +func sendCallbackTx(client *testnet.Client, tx *sdk.Transaction) (uint64, error) { + err := client.SignAndSendTransaction(context.Background(), tx) + if err != nil { + return 0, fmt.Errorf("could not send schedule transaction: %w", err) + } + + // Wait for the transaction to be executed + executedResult, err := client.WaitForExecuted(context.Background(), tx.ID()) + if err != nil { + return 0, fmt.Errorf("could not wait for schedule transaction to be executed: %w", err) + } + + if executedResult.Error != nil { + return 0, fmt.Errorf("schedule transaction should not have error: %w", executedResult.Error) + } + + callbackID := ExtractCallbackIDFromEvents(executedResult) + if callbackID == 0 { + return 0, fmt.Errorf("callback ID should not be 0") + } + + return callbackID, nil +} diff --git a/integration/tests/mvp/common.go b/integration/tests/mvp/common.go new file mode 100644 index 00000000000..99341792f62 --- /dev/null +++ b/integration/tests/mvp/common.go @@ -0,0 +1,179 @@ +package mvp + +import ( + "context" + "fmt" + "testing" + "time" + + "github.com/onflow/cadence" + "github.com/stretchr/testify/require" + + sdk "github.com/onflow/flow-go-sdk" + sdkcrypto "github.com/onflow/flow-go-sdk/crypto" + "github.com/onflow/flow-go-sdk/templates" + + "github.com/onflow/flow-go/fvm/systemcontracts" + + "github.com/onflow/flow-go/integration/testnet" + "github.com/onflow/flow-go/integration/tests/lib" +) + +// timeout for individual actions +const defaultTimeout = time.Second * 10 + +func RunMVPTest(t *testing.T, ctx context.Context, net *testnet.FlowNetwork, accessNode *testnet.Container) { + + chain := net.Root().ChainID.Chain() + + serviceAccountClient, err := accessNode.TestnetClient() + require.NoError(t, err) + + latestBlockID, err := serviceAccountClient.GetLatestBlockID(ctx) + require.NoError(t, err) + + // create new account to deploy Counter to + accountPrivateKey := lib.RandomPrivateKey() + + accountKey := sdk.NewAccountKey(). + FromPrivateKey(accountPrivateKey). + SetHashAlgo(sdkcrypto.SHA3_256). + SetWeight(sdk.AccountKeyWeightThreshold) + + serviceAddress := sdk.Address(serviceAccountClient.Chain.ServiceAddress()) + + // Generate the account creation transaction + createAccountTx, err := templates.CreateAccount( + []*sdk.AccountKey{accountKey}, + []templates.Contract{ + { + Name: lib.CounterContract.Name, + Source: lib.CounterContract.ToCadence(), + }, + }, serviceAddress) + require.NoError(t, err) + createAccountTx. + SetReferenceBlockID(sdk.Identifier(latestBlockID)). + SetProposalKey(serviceAddress, 0, serviceAccountClient.GetAndIncrementSeqNumber()). + SetPayer(serviceAddress). + SetComputeLimit(9999) + + childCtx, cancel := context.WithTimeout(ctx, defaultTimeout) + err = serviceAccountClient.SignAndSendTransaction(childCtx, createAccountTx) + require.NoError(t, err) + + cancel() + + // wait for account to be created + accountCreationTxRes, err := serviceAccountClient.WaitForSealed(context.Background(), createAccountTx.ID()) + require.NoError(t, err) + t.Log(accountCreationTxRes) + + var newAccountAddress sdk.Address + for _, event := range accountCreationTxRes.Events { + if event.Type == sdk.EventAccountCreated { + accountCreatedEvent := sdk.AccountCreatedEvent(event) + newAccountAddress = accountCreatedEvent.Address() + } + } + require.NotEqual(t, sdk.EmptyAddress, newAccountAddress) + + sc := systemcontracts.SystemContractsForChain(chain.ChainID()) + + t.Log(">> new account address: ", newAccountAddress) + + // Generate the fund account transaction (so account can be used as a payer) + fundAccountTx := sdk.NewTransaction(). + SetScript([]byte(fmt.Sprintf(` + import FungibleToken from 0x%s + import FlowToken from 0x%s + + transaction(amount: UFix64, recipient: Address) { + let sentVault: @{FungibleToken.Vault} + prepare(signer: auth(BorrowValue) &Account) { + let vaultRef = signer.storage.borrow<auth(FungibleToken.Withdraw) &FlowToken.Vault>(from: /storage/flowTokenVault) + ?? panic("failed to borrow reference to sender vault") + self.sentVault <- vaultRef.withdraw(amount: amount) + } + execute { + let receiverRef = getAccount(recipient) + .capabilities.borrow<&{FungibleToken.Receiver}>(/public/flowTokenReceiver) + ?? panic("failed to borrow reference to recipient vault") + receiverRef.deposit(from: <-self.sentVault) + } + }`, + sc.FungibleToken.Address.Hex(), + sc.FlowToken.Address.Hex(), + ))). + AddAuthorizer(serviceAddress). + SetReferenceBlockID(sdk.Identifier(latestBlockID)). + SetProposalKey(serviceAddress, 0, serviceAccountClient.GetAndIncrementSeqNumber()). + SetPayer(serviceAddress). + SetComputeLimit(9999) + + err = fundAccountTx.AddArgument(cadence.UFix64(1_0000_0000)) + require.NoError(t, err) + err = fundAccountTx.AddArgument(cadence.NewAddress(newAccountAddress)) + require.NoError(t, err) + + t.Log(">> funding new account...") + + childCtx, cancel = context.WithTimeout(ctx, defaultTimeout) + err = serviceAccountClient.SignAndSendTransaction(childCtx, fundAccountTx) + require.NoError(t, err) + + cancel() + + fundCreationTxRes, err := serviceAccountClient.WaitForSealed(context.Background(), fundAccountTx.ID()) + require.NoError(t, err) + t.Log(fundCreationTxRes) + + accountClient, err := testnet.NewClientWithKey( + accessNode.Addr(testnet.GRPCPort), + newAccountAddress, + accountPrivateKey, + chain, + ) + require.NoError(t, err) + + // contract is deployed, but no instance is created yet + childCtx, cancel = context.WithTimeout(ctx, defaultTimeout) + counter, err := lib.ReadCounter(childCtx, accountClient, newAccountAddress) + cancel() + require.NoError(t, err) + require.Equal(t, lib.CounterDefaultValue, counter) + + // create counter instance + createCounterTx := sdk.NewTransaction(). + SetScript([]byte(lib.CreateCounterTx(newAccountAddress).ToCadence())). + SetReferenceBlockID(sdk.Identifier(latestBlockID)). + SetProposalKey(newAccountAddress, 0, 0). + SetPayer(newAccountAddress). + AddAuthorizer(newAccountAddress). + SetComputeLimit(9999) + + t.Log(">> creating counter...") + + childCtx, cancel = context.WithTimeout(ctx, defaultTimeout) + err = accountClient.SignAndSendTransaction(ctx, createCounterTx) + cancel() + + require.NoError(t, err) + + resp, err := accountClient.WaitForSealed(context.Background(), createCounterTx.ID()) + require.NoError(t, err) + + require.NoError(t, resp.Error) + t.Log(resp) + + t.Log(">> awaiting counter incrementing...") + + // counter is created and incremented eventually + require.Eventually(t, func() bool { + childCtx, cancel = context.WithTimeout(ctx, defaultTimeout) + counter, err = lib.ReadCounter(ctx, serviceAccountClient, newAccountAddress) + cancel() + + return err == nil && counter == lib.CounterInitializedValue + }, 30*time.Second, time.Second) +} diff --git a/integration/tests/mvp/mvp_test.go b/integration/tests/mvp/mvp_test.go index c06a018c4b6..68eceae79d4 100644 --- a/integration/tests/mvp/mvp_test.go +++ b/integration/tests/mvp/mvp_test.go @@ -1,33 +1,22 @@ -package common +package mvp import ( "context" "fmt" "testing" - "time" "github.com/dapperlabs/testingdock" - "github.com/onflow/cadence" "github.com/rs/zerolog" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - sdk "github.com/onflow/flow-go-sdk" - sdkcrypto "github.com/onflow/flow-go-sdk/crypto" - "github.com/onflow/flow-go-sdk/templates" - - "github.com/onflow/flow-go/fvm" "github.com/onflow/flow-go/integration/testnet" - "github.com/onflow/flow-go/integration/tests/lib" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/model/flow/filter" "github.com/onflow/flow-go/utils/rand" "github.com/onflow/flow-go/utils/unittest" ) -// timeout for individual actions -const defaultTimeout = time.Second * 10 - func TestMVP_Network(t *testing.T) { logger := unittest.LoggerForTest(t, zerolog.InfoLevel) logger.Info().Msgf("================> START TESTING") @@ -43,7 +32,7 @@ func TestMVP_Network(t *testing.T) { logger.Info().Msg("================> Finish TearDownTest") }() - runMVPTest(t, ctx, flowNetwork) + RunMVPTest(t, ctx, flowNetwork, flowNetwork.ContainerByName(testnet.PrimaryAN)) } func TestMVP_Bootstrap(t *testing.T) { @@ -65,13 +54,15 @@ func TestMVP_Bootstrap(t *testing.T) { flowNetwork.Start(ctx) - client, err := flowNetwork.ContainerByName(testnet.PrimaryAN).TestnetClient() + accessNode := flowNetwork.ContainerByName(testnet.PrimaryAN) + + client, err := accessNode.TestnetClient() require.NoError(t, err) t.Log("@@ running mvp test 1") // run mvp test to build a few blocks - runMVPTest(t, ctx, flowNetwork) + RunMVPTest(t, ctx, flowNetwork, accessNode) t.Log("@@ finished running mvp test 1") @@ -82,7 +73,7 @@ func TestMVP_Bootstrap(t *testing.T) { // verify that the downloaded snapshot is not for the root block header, err := snapshot.Head() require.NoError(t, err) - assert.True(t, header.ID() != flowNetwork.Root().Header.ID()) + assert.True(t, header.ID() != flowNetwork.Root().ID()) t.Log("@@ restarting network with new root snapshot") @@ -90,14 +81,14 @@ func TestMVP_Bootstrap(t *testing.T) { flowNetwork.RemoveContainers() // pick 1 consensus node to restart with empty database and downloaded snapshot - cons := flowNetwork.Identities().Filter(filter.HasRole(flow.RoleConsensus)) + cons := flowNetwork.Identities().Filter(filter.HasRole[flow.Identity](flow.RoleConsensus)) random, err := rand.Uintn(uint(len(cons))) require.NoError(t, err) con1 := cons[random] t.Log("@@ booting from non-root state on consensus node ", con1.NodeID) - flowNetwork.DropDBs(filter.HasNodeID(con1.NodeID)) + flowNetwork.DropDBs(filter.HasNodeID[flow.Identity](con1.NodeID)) con1Container := flowNetwork.ContainerByID(con1.NodeID) con1Container.DropDB() con1Container.WriteRootSnapshot(snapshot) @@ -107,19 +98,19 @@ func TestMVP_Bootstrap(t *testing.T) { flowNetwork.Start(ctx) // Run MVP tests - runMVPTest(t, ctx, flowNetwork) + RunMVPTest(t, ctx, flowNetwork, accessNode) t.Log("@@ finished running mvp test 2") } func buildMVPNetConfig() testnet.NetworkConfig { collectionConfigs := []func(*testnet.NodeConfig){ - testnet.WithAdditionalFlag("--block-rate-delay=100ms"), + testnet.WithAdditionalFlag("--hotstuff-proposal-duration=100ms"), testnet.WithLogLevel(zerolog.FatalLevel), } consensusConfigs := []func(config *testnet.NodeConfig){ - testnet.WithAdditionalFlag("--block-rate-delay=100ms"), + testnet.WithAdditionalFlag("--cruise-ctl-fallback-proposal-duration=100ms"), testnet.WithAdditionalFlag(fmt.Sprintf("--required-verification-seal-approvals=%d", 1)), testnet.WithAdditionalFlag(fmt.Sprintf("--required-construction-seal-approvals=%d", 1)), testnet.WithLogLevel(zerolog.FatalLevel), @@ -139,157 +130,3 @@ func buildMVPNetConfig() testnet.NetworkConfig { return testnet.NewNetworkConfig("mvp", net) } - -func runMVPTest(t *testing.T, ctx context.Context, net *testnet.FlowNetwork) { - - chain := net.Root().Header.ChainID.Chain() - - serviceAccountClient, err := net.ContainerByName(testnet.PrimaryAN).TestnetClient() - require.NoError(t, err) - - latestBlockID, err := serviceAccountClient.GetLatestBlockID(ctx) - require.NoError(t, err) - - // create new account to deploy Counter to - accountPrivateKey := lib.RandomPrivateKey() - - accountKey := sdk.NewAccountKey(). - FromPrivateKey(accountPrivateKey). - SetHashAlgo(sdkcrypto.SHA3_256). - SetWeight(sdk.AccountKeyWeightThreshold) - - serviceAddress := sdk.Address(serviceAccountClient.Chain.ServiceAddress()) - - // Generate the account creation transaction - createAccountTx, err := templates.CreateAccount( - []*sdk.AccountKey{accountKey}, - []templates.Contract{ - { - Name: lib.CounterContract.Name, - Source: lib.CounterContract.ToCadence(), - }, - }, serviceAddress) - require.NoError(t, err) - createAccountTx. - SetReferenceBlockID(sdk.Identifier(latestBlockID)). - SetProposalKey(serviceAddress, 0, serviceAccountClient.GetSeqNumber()). - SetPayer(serviceAddress). - SetGasLimit(9999) - - childCtx, cancel := context.WithTimeout(ctx, defaultTimeout) - err = serviceAccountClient.SignAndSendTransaction(childCtx, createAccountTx) - require.NoError(t, err) - - cancel() - - // wait for account to be created - accountCreationTxRes, err := serviceAccountClient.WaitForSealed(context.Background(), createAccountTx.ID()) - require.NoError(t, err) - t.Log(accountCreationTxRes) - - var newAccountAddress sdk.Address - for _, event := range accountCreationTxRes.Events { - if event.Type == sdk.EventAccountCreated { - accountCreatedEvent := sdk.AccountCreatedEvent(event) - newAccountAddress = accountCreatedEvent.Address() - } - } - require.NotEqual(t, sdk.EmptyAddress, newAccountAddress) - - t.Log(">> new account address: ", newAccountAddress) - - // Generate the fund account transaction (so account can be used as a payer) - fundAccountTx := sdk.NewTransaction(). - SetScript([]byte(fmt.Sprintf(` - import FungibleToken from 0x%s - import FlowToken from 0x%s - - transaction(amount: UFix64, recipient: Address) { - let sentVault: @FungibleToken.Vault - prepare(signer: AuthAccount) { - let vaultRef = signer.borrow<&FlowToken.Vault>(from: /storage/flowTokenVault) - ?? panic("failed to borrow reference to sender vault") - self.sentVault <- vaultRef.withdraw(amount: amount) - } - execute { - let receiverRef = getAccount(recipient) - .getCapability(/public/flowTokenReceiver) - .borrow<&{FungibleToken.Receiver}>() - ?? panic("failed to borrow reference to recipient vault") - receiverRef.deposit(from: <-self.sentVault) - } - }`, - fvm.FungibleTokenAddress(chain).Hex(), - fvm.FlowTokenAddress(chain).Hex()))). - AddAuthorizer(serviceAddress). - SetReferenceBlockID(sdk.Identifier(latestBlockID)). - SetProposalKey(serviceAddress, 0, serviceAccountClient.GetSeqNumber()). - SetPayer(serviceAddress). - SetGasLimit(9999) - - err = fundAccountTx.AddArgument(cadence.UFix64(1_0000_0000)) - require.NoError(t, err) - err = fundAccountTx.AddArgument(cadence.NewAddress(newAccountAddress)) - require.NoError(t, err) - - t.Log(">> funding new account...") - - childCtx, cancel = context.WithTimeout(ctx, defaultTimeout) - err = serviceAccountClient.SignAndSendTransaction(childCtx, fundAccountTx) - require.NoError(t, err) - - cancel() - - fundCreationTxRes, err := serviceAccountClient.WaitForSealed(context.Background(), fundAccountTx.ID()) - require.NoError(t, err) - t.Log(fundCreationTxRes) - - accountClient, err := testnet.NewClientWithKey( - net.ContainerByName(testnet.PrimaryAN).Addr(testnet.GRPCPort), - newAccountAddress, - accountPrivateKey, - chain, - ) - require.NoError(t, err) - - // contract is deployed, but no instance is created yet - childCtx, cancel = context.WithTimeout(ctx, defaultTimeout) - counter, err := lib.ReadCounter(childCtx, accountClient, newAccountAddress) - cancel() - require.NoError(t, err) - require.Equal(t, -3, counter) - - // create counter instance - createCounterTx := sdk.NewTransaction(). - SetScript([]byte(lib.CreateCounterTx(newAccountAddress).ToCadence())). - SetReferenceBlockID(sdk.Identifier(latestBlockID)). - SetProposalKey(newAccountAddress, 0, 0). - SetPayer(newAccountAddress). - AddAuthorizer(newAccountAddress). - SetGasLimit(9999) - - t.Log(">> creating counter...") - - childCtx, cancel = context.WithTimeout(ctx, defaultTimeout) - err = accountClient.SignAndSendTransaction(ctx, createCounterTx) - cancel() - - require.NoError(t, err) - - resp, err := accountClient.WaitForSealed(context.Background(), createCounterTx.ID()) - require.NoError(t, err) - - require.NoError(t, resp.Error) - t.Log(resp) - - t.Log(">> awaiting counter incrementing...") - - // counter is created and incremented eventually - require.Eventually(t, func() bool { - childCtx, cancel = context.WithTimeout(ctx, defaultTimeout) - counter, err = lib.ReadCounter(ctx, serviceAccountClient, newAccountAddress) - cancel() - - return err == nil && counter == 2 - }, 30*time.Second, time.Second) -} diff --git a/integration/tests/network/network_test.go b/integration/tests/network/network_test.go index 50cd1cb3a27..286d0bac960 100644 --- a/integration/tests/network/network_test.go +++ b/integration/tests/network/network_test.go @@ -121,7 +121,7 @@ func launchReadLoop( } switch v := event.(type) { - case *message.TestMessage: + case *flow.TestMessage: t.Logf("%s: %s: %s\n", id.String(), actualOriginID.String(), v.Text) assert.Equal(t, expectedOrigin, actualOriginID) assert.Equal(t, expectedMsg, v.Text) diff --git a/integration/tests/upgrades/protocol_version_upgrade_test.go b/integration/tests/upgrades/protocol_version_upgrade_test.go new file mode 100644 index 00000000000..88986fefee5 --- /dev/null +++ b/integration/tests/upgrades/protocol_version_upgrade_test.go @@ -0,0 +1,146 @@ +package upgrades + +import ( + "context" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" + + "github.com/onflow/flow-core-contracts/lib/go/templates" + + sdk "github.com/onflow/flow-go-sdk" + + "github.com/onflow/flow-go/integration/utils" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/state/protocol/protocol_state" + "github.com/onflow/flow-go/state/protocol/protocol_state/kvstore" +) + +type ProtocolVersionUpgradeSuite struct { + Suite +} + +func TestProtocolVersionUpgrade(t *testing.T) { + suite.Run(t, new(ProtocolVersionUpgradeSuite)) +} + +func (s *ProtocolVersionUpgradeSuite) SetupTest() { + // Begin the test with a v0 kvstore, rather than the default v1. + // This lets us test upgrading v0->v1 + s.KVStoreFactory = func(epochStateID flow.Identifier) (protocol_state.KVStoreAPI, error) { + return kvstore.NewKVStoreV0(10, 50, epochStateID) + } + s.Suite.SetupTest() +} + +// TestProtocolStateVersionUpgradeServiceEvent tests the process of upgrading the protocol +// state version using a service event. +// 1. Validate that an invalid upgrade event is ignored. +// 2. Validate that a valid upgrade event is accepted and results in an upgrade. +// 3. Validate that a valid upgrade event to an unknown version is accepted and results in a halt. +func (s *ProtocolVersionUpgradeSuite) TestProtocolStateVersionUpgradeServiceEvent() { + ctx := context.Background() + + serviceAddress := sdk.Address(s.net.Root().ChainID.Chain().ServiceAddress()) + env := templates.Environment{ + NodeVersionBeaconAddress: serviceAddress.String(), + } + + const ACTIVE_VIEW_DIFF = 20 // active view is 20 above execution block view + const INITIAL_PROTOCOL_VERSION = uint64(0) + const NEXT_PROTOCOL_VERSION = uint64(1) // valid version to upgrade to + const UNKNOWN_PROTOCOL_VERSION = uint64(3) // invalid version to upgrade to + + // sanity check: we should start with a v0 kvstore + snapshot := s.LatestProtocolStateSnapshot() + actualProtocolVersion := snapshot.Encodable().SealingSegment.LatestProtocolStateEntry().KVStore.Version + assert.Equal(s.T(), INITIAL_PROTOCOL_VERSION, actualProtocolVersion, "should have v0 initially") + + // 1. Invalid upgrade event should be ignored + newProtocolVersion := uint64(1) // version is valid + txResult := s.sendUpgradeProtocolVersionTx(ctx, env, newProtocolVersion, 1) // invalid activeView + s.Require().NoError(txResult.Error) + + // ensure the service event was included in a block + sealed := s.ReceiptState.WaitForReceiptFromAny(s.T(), flow.Identifier(txResult.BlockID)) + s.Require().Len(sealed.ExecutionResult.ServiceEvents, 1) + s.Require().IsType(&flow.ProtocolStateVersionUpgrade{}, sealed.ExecutionResult.ServiceEvents[0].Event) + + executedInBlock, ok := s.BlockState.ByBlockID(flow.Identifier(txResult.BlockID)) + require.True(s.T(), ok) + invalidUpgradeActiveView := executedInBlock.View + 1 // because we use a too-short activeViewDiff of 1 + + // after an invalid protocol version upgrade event, we should still have a v0 kvstore + snapshot = s.AwaitSnapshotAtView(invalidUpgradeActiveView, time.Minute, 500*time.Millisecond) + actualProtocolVersion = snapshot.Encodable().SealingSegment.LatestProtocolStateEntry().KVStore.Version + require.Equal(s.T(), INITIAL_PROTOCOL_VERSION, actualProtocolVersion, "should have v0 still after invalid upgrade") + + // 2. Valid service event should cause a version upgrade + txResult = s.sendUpgradeProtocolVersionTx(ctx, env, NEXT_PROTOCOL_VERSION, ACTIVE_VIEW_DIFF) + s.Require().NoError(txResult.Error) + + _ = s.ReceiptState.WaitForReceiptFromAny(s.T(), flow.Identifier(txResult.BlockID)) + + executedInBlock, ok = s.BlockState.ByBlockID(flow.Identifier(txResult.BlockID)) + require.True(s.T(), ok) + v1ActiveView := executedInBlock.View + ACTIVE_VIEW_DIFF + + // wait for the version to become active, then validate our kvstore has upgraded to v1 + snapshot = s.AwaitSnapshotAtView(v1ActiveView, time.Minute, 500*time.Millisecond) + actualProtocolVersion = snapshot.Encodable().SealingSegment.LatestProtocolStateEntry().KVStore.Version + require.Equal(s.T(), NEXT_PROTOCOL_VERSION, actualProtocolVersion, "should have v1 after upgrade") + + // 3. Upgrade to unknown version should halt progress + // For now, we just upgrade through all versions until we reach latest+1 (unknown) + for upgradeToVersion := actualProtocolVersion + 1; upgradeToVersion < UNKNOWN_PROTOCOL_VERSION; upgradeToVersion++ { + txResult = s.sendUpgradeProtocolVersionTx(ctx, env, upgradeToVersion, ACTIVE_VIEW_DIFF) + s.Require().NoError(txResult.Error) + s.AwaitProtocolVersion(upgradeToVersion, 30*time.Second, 500*time.Millisecond) + } + txResult = s.sendUpgradeProtocolVersionTx(ctx, env, UNKNOWN_PROTOCOL_VERSION, ACTIVE_VIEW_DIFF) + s.Require().NoError(txResult.Error) + _ = s.ReceiptState.WaitForReceiptFromAny(s.T(), flow.Identifier(txResult.BlockID)) + + executedInBlock, ok = s.BlockState.ByBlockID(flow.Identifier(txResult.BlockID)) + require.True(s.T(), ok) + unknownVersionActiveView := executedInBlock.View + ACTIVE_VIEW_DIFF + + // once consensus reaches unknownVersionActiveView, progress should halt + s.BlockState.WaitForHalt(s.T(), 10*time.Second, 100*time.Millisecond, time.Minute) + require.LessOrEqual(s.T(), s.BlockState.HighestProposedView(), unknownVersionActiveView) +} + +// sendUpgradeProtocolVersionTx sends a governance transaction to upgrade the protocol state version. +// This causes a corresponding flow.ProtocolStateVersionUpgrade service event to be emitted. +// For these tests we use a special transaction which chooses the activation view for the +// new version relative to the execution view, to remove a potential source of flakiness. +func (s *ProtocolVersionUpgradeSuite) sendUpgradeProtocolVersionTx( + ctx context.Context, + env templates.Environment, + newProtocolVersion, activeViewDiff uint64, +) *sdk.TransactionResult { + latestBlockID, err := s.AccessClient().GetLatestBlockID(ctx) + require.NoError(s.T(), err) + + tx, err := utils.MakeSetProtocolStateVersionTx( + env, + s.AccessClient().Account(), + 0, + sdk.Identifier(latestBlockID), + newProtocolVersion, + activeViewDiff, + ) + require.NoError(s.T(), err) + + err = s.AccessClient().SignAndSendTransaction(ctx, tx) + require.NoError(s.T(), err) + + result, err := s.AccessClient().WaitForSealed(ctx, tx.ID()) + require.NoError(s.T(), err) + s.AccessClient().Account().Keys[0].SequenceNumber++ + + return result +} diff --git a/integration/tests/upgrades/stop_at_height_test.go b/integration/tests/upgrades/stop_at_height_test.go index 35598b84e70..1b21e169488 100644 --- a/integration/tests/upgrades/stop_at_height_test.go +++ b/integration/tests/upgrades/stop_at_height_test.go @@ -27,6 +27,7 @@ type StopAtHeightRequest struct { } func (s *TestStopAtHeightSuite) TestStopAtHeight() { + enContainer := s.net.ContainerByID(s.exe1ID) serverAddr := fmt.Sprintf("localhost:%s", enContainer.Port(testnet.AdminPort)) @@ -65,11 +66,11 @@ func (s *TestStopAtHeightSuite) TestStopAtHeight() { shouldExecute := s.BlockState.WaitForBlocksByHeight(s.T(), stopHeight-1) shouldNotExecute := s.BlockState.WaitForBlocksByHeight(s.T(), stopHeight) - s.ReceiptState.WaitForReceiptFrom(s.T(), shouldExecute[0].Header.ID(), s.exe1ID) + s.ReceiptState.WaitForReceiptFrom(s.T(), shouldExecute[0].ID(), s.exe1ID) s.ReceiptState.WaitForNoReceiptFrom( s.T(), 5*time.Second, - shouldNotExecute[0].Header.ID(), + shouldNotExecute[0].ID(), s.exe1ID, ) diff --git a/integration/tests/upgrades/suite.go b/integration/tests/upgrades/suite.go index ea01ea1d7e1..806379cd568 100644 --- a/integration/tests/upgrades/suite.go +++ b/integration/tests/upgrades/suite.go @@ -2,7 +2,10 @@ package upgrades import ( "context" - "fmt" + "time" + + "github.com/onflow/flow-go/state/protocol/inmem" + "github.com/onflow/flow-go/state/protocol/protocol_state" "github.com/rs/zerolog" "github.com/stretchr/testify/require" @@ -23,6 +26,9 @@ type Suite struct { net *testnet.FlowNetwork ghostID flow.Identifier exe1ID flow.Identifier + + // Determines which kvstore version is used for root state + KVStoreFactory func(flow.Identifier) (protocol_state.KVStoreAPI, error) } func (s *Suite) Ghost() *client.GhostClient { @@ -45,25 +51,15 @@ func (s *Suite) SetupTest() { }() collectionConfigs := []func(*testnet.NodeConfig){ - testnet.WithAdditionalFlag("--block-rate-delay=10ms"), + testnet.WithAdditionalFlag("--hotstuff-proposal-duration=100ms"), testnet.WithLogLevel(zerolog.WarnLevel), } consensusConfigs := []func(config *testnet.NodeConfig){ - testnet.WithAdditionalFlag("--block-rate-delay=10ms"), - testnet.WithAdditionalFlag( - fmt.Sprintf( - "--required-verification-seal-approvals=%d", - 1, - ), - ), - testnet.WithAdditionalFlag( - fmt.Sprintf( - "--required-construction-seal-approvals=%d", - 1, - ), - ), - testnet.WithLogLevel(zerolog.WarnLevel), + testnet.WithAdditionalFlag("--cruise-ctl-fallback-proposal-duration=500ms"), + testnet.WithAdditionalFlag("--required-verification-seal-approvals=0"), + testnet.WithAdditionalFlag("--required-construction-seal-approvals=0"), + testnet.WithLogLevel(zerolog.InfoLevel), } // a ghost node masquerading as an access node @@ -83,10 +79,12 @@ func (s *Suite) SetupTest() { testnet.WithLogLevel(zerolog.WarnLevel), testnet.WithID(s.exe1ID), testnet.WithAdditionalFlag("--extensive-logging=true"), + testnet.WithAdditionalFlag("--max-graceful-stop-duration=1s"), ), testnet.NewNodeConfig( flow.RoleExecution, testnet.WithLogLevel(zerolog.WarnLevel), + testnet.WithAdditionalFlag("--max-graceful-stop-duration=1s"), ), testnet.NewNodeConfig(flow.RoleConsensus, consensusConfigs...), testnet.NewNodeConfig(flow.RoleConsensus, consensusConfigs...), @@ -98,13 +96,15 @@ func (s *Suite) SetupTest() { ghostNode, } - netConfig := testnet.NewNetworkConfig( - "upgrade_tests", - confs, + netConfigOpts := []testnet.NetworkConfigOpt{ // set long staking phase to avoid QC/DKG transactions during test run testnet.WithViewsInStakingAuction(10_000), testnet.WithViewsInEpoch(100_000), - ) + } + if s.KVStoreFactory != nil { + netConfigOpts = append(netConfigOpts, testnet.WithKVStoreFactory(s.KVStoreFactory)) + } + netConfig := testnet.NewNetworkConfig("upgrade_tests", confs, netConfigOpts...) // initialize the network s.net = testnet.PrepareFlowNetwork(s.T(), netConfig, flow.Localnet) @@ -117,6 +117,33 @@ func (s *Suite) SetupTest() { s.Track(s.T(), ctx, s.Ghost()) } +func (s *Suite) LatestProtocolStateSnapshot() *inmem.Snapshot { + snap, err := s.AccessClient().GetLatestProtocolSnapshot(context.Background()) + require.NoError(s.T(), err) + return snap +} + +// AwaitSnapshotAtView polls until it observes a finalized snapshot with a reference +// block greater than or equal to the input target view. +func (s *Suite) AwaitSnapshotAtView(view uint64, waitFor, tick time.Duration) (snapshot *inmem.Snapshot) { + require.Eventually(s.T(), func() bool { + snapshot = s.LatestProtocolStateSnapshot() + head, err := snapshot.Head() + require.NoError(s.T(), err) + return head.View >= view + }, waitFor, tick) + return +} + +func (s *Suite) AwaitProtocolVersion(v uint64, waitFor, tick time.Duration) { + require.Eventually(s.T(), func() bool { + snapshot := s.LatestProtocolStateSnapshot() + kvstore, err := snapshot.ProtocolState() + s.Require().NoError(err) + return kvstore.GetProtocolStateVersion() == v + }, waitFor, tick) +} + func (s *Suite) TearDownTest() { s.log.Info().Msg("================> Start TearDownTest") s.net.Remove() diff --git a/integration/tests/upgrades/version_beacon_service_event_test.go b/integration/tests/upgrades/version_beacon_service_event_test.go index 9422ba6abc8..ec5f1d0b5f1 100644 --- a/integration/tests/upgrades/version_beacon_service_event_test.go +++ b/integration/tests/upgrades/version_beacon_service_event_test.go @@ -2,14 +2,19 @@ package upgrades import ( "context" + "math" "testing" + "time" "github.com/coreos/go-semver/semver" "github.com/onflow/cadence" "github.com/onflow/flow-core-contracts/lib/go/templates" + "github.com/stretchr/testify/require" sdk "github.com/onflow/flow-go-sdk" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/utils/unittest" "github.com/stretchr/testify/suite" ) @@ -19,42 +24,71 @@ type TestServiceEventVersionControl struct { } func (s *TestServiceEventVersionControl) TestEmittingVersionBeaconServiceEvent() { - // version 0.3.7 - major := uint8(0) - minor := uint8(3) - patch := uint8(7) - preRelease := "" - - serviceAddress := s.net.Root().Header.ChainID.Chain().ServiceAddress() + unittest.SkipUnless(s.T(), unittest.TEST_FLAKY, + "flaky in CI but works 100% of the time locally") + + // freezePeriodForTheseTests controls the version beacon freeze period. The longer the + // freeze period the more blocks we need to wait for the version beacon to take effect, + // making the test slower. But if the freeze period is too short + // we might execute to many blocks, before the version beacon takes effect. + // + // - If the test is flaky try increasing this value. + // - If the test is too slow try decreasing this value. + freezePeriodForTheseTests := uint64(100) ctx := context.Background() + serviceAddress := sdk.Address(s.net.Root().ChainID.Chain().ServiceAddress()) env := templates.Environment{ NodeVersionBeaconAddress: serviceAddress.String(), } - freezePeriodScript := templates.GenerateGetVersionBoundaryFreezePeriodScript(env) - // Contract should be deployed at bootstrap, - // so we expect this script to succeed, but ignore the return value - freezePeriodRaw, err := s.AccessClient(). - ExecuteScriptBytes(ctx, freezePeriodScript, nil) - s.Require().NoError(err) + freezePeriod := s.getFreezePeriod(ctx, env) + s.Run("set freeze period script should work", func() { + // we also want to do this for the next test to conclude faster + newFreezePeriod := freezePeriodForTheseTests + + s.Require().NotEqual( + newFreezePeriod, + freezePeriod, + "the test is pointless, "+ + "please change the freeze period in the test") + + setFreezePeriodScript := templates.GenerateChangeVersionFreezePeriodScript(env) + latestBlockID, err := s.AccessClient().GetLatestBlockID(ctx) + require.NoError(s.T(), err) + + tx := sdk.NewTransaction(). + SetScript(setFreezePeriodScript). + SetReferenceBlockID(sdk.Identifier(latestBlockID)). + SetProposalKey(serviceAddress, + 0, s.AccessClient().GetAndIncrementSeqNumber()). // todo track sequence number + AddAuthorizer(serviceAddress). + SetPayer(serviceAddress) + + err = tx.AddArgument(cadence.NewUInt64(newFreezePeriod)) + s.Require().NoError(err) - freezePeriod := uint64(0) + err = s.AccessClient().SignAndSendTransaction(ctx, tx) + s.Require().NoError(err) - if cadenceBuffer, is := freezePeriodRaw.(cadence.UInt64); is { - freezePeriod = cadenceBuffer.ToGoValue().(uint64) - } else { - s.Require().Failf( - "version freezePeriod script returned unknown type", - "%t", - freezePeriodRaw, - ) - } + result, err := s.AccessClient().WaitForSealed(ctx, tx.ID()) + require.NoError(s.T(), err) + + s.Require().NoError(result.Error) + + freezePeriod = s.getFreezePeriod(ctx, env) + s.Require().Equal(newFreezePeriod, freezePeriod) + }) s.Run("should fail adding version boundary inside the freeze period", func() { + latestFinalized, err := s.AccessClient().GetLatestFinalizedBlockHeader(ctx) + require.NoError(s.T(), err) - height := freezePeriod / 2 + height := latestFinalized.Height + freezePeriod - 5 + major := uint8(0) + minor := uint8(0) + patch := uint8(1) txResult := s.sendSetVersionBoundaryTransaction( ctx, @@ -63,7 +97,7 @@ func (s *TestServiceEventVersionControl) TestEmittingVersionBeaconServiceEvent() Major: major, Minor: minor, Patch: patch, - PreRelease: preRelease, + PreRelease: "", BlockHeight: height, }) s.Require().Error(txResult.Error) @@ -75,11 +109,19 @@ func (s *TestServiceEventVersionControl) TestEmittingVersionBeaconServiceEvent() }) s.Run("should add version boundary after the freeze period", func() { + latestFinalized, err := s.AccessClient().GetLatestFinalizedBlockHeader(ctx) + require.NoError(s.T(), err) // make sure target height is correct // the height at which the version change will take effect should be after // the current height + the freeze period - height := freezePeriod + 200 + height := latestFinalized.Height + freezePeriod + 100 + + // version 0.0.1 + // low version to not interfere with other tests + major := uint8(0) + minor := uint8(0) + patch := uint8(1) txResult := s.sendSetVersionBoundaryTransaction( ctx, @@ -88,7 +130,7 @@ func (s *TestServiceEventVersionControl) TestEmittingVersionBeaconServiceEvent() Major: major, Minor: minor, Patch: patch, - PreRelease: preRelease, + PreRelease: "", BlockHeight: height, }) s.Require().NoError(txResult.Error) @@ -126,6 +168,83 @@ func (s *TestServiceEventVersionControl) TestEmittingVersionBeaconServiceEvent() s.Require().Equal(patch, uint8(version.Patch)) }) + s.Run("stop with version beacon", func() { + latestFinalized, err := s.AccessClient().GetLatestFinalizedBlockHeader(ctx) + require.NoError(s.T(), err) + + // make sure target height is correct + // the height at which the version change will take effect should be after + // the current height + the freeze period + height := latestFinalized.Height + freezePeriod + 100 + + // max version to be sure that the node version is lower so we force a stop + major := uint8(math.MaxUint8) + minor := uint8(math.MaxUint8) + patch := uint8(math.MaxUint8) + + txResult := s.sendSetVersionBoundaryTransaction( + ctx, + env, + versionBoundary{ + Major: major, + Minor: minor, + Patch: patch, + PreRelease: "", + BlockHeight: height, + }) + s.Require().NoError(txResult.Error) + + sealed := s.ReceiptState.WaitForReceiptFromAny( + s.T(), + flow.Identifier(txResult.BlockID)) + + s.Require().Len(sealed.ExecutionResult.ServiceEvents, 1) + s.Require().IsType( + &flow.VersionBeacon{}, + sealed.ExecutionResult.ServiceEvents[0].Event) + + versionTable := sealed.ExecutionResult.ServiceEvents[0].Event.(*flow.VersionBeacon) + + s.Require().Equal(height, versionTable.VersionBoundaries[len(versionTable.VersionBoundaries)-1].BlockHeight) + version, err := semver.NewVersion(versionTable.VersionBoundaries[len(versionTable.VersionBoundaries)-1].Version) + s.Require().NoError(err) + s.Require().Equal(major, uint8(version.Major)) + s.Require().Equal(minor, uint8(version.Minor)) + s.Require().Equal(patch, uint8(version.Patch)) + + shouldExecute := s.BlockState.WaitForBlocksByHeight(s.T(), height-1) + shouldNotExecute := s.BlockState.WaitForBlocksByHeight(s.T(), height) + + s.ReceiptState.WaitForReceiptFrom(s.T(), shouldExecute[0].ID(), s.exe1ID) + s.ReceiptState.WaitForNoReceiptFrom( + s.T(), + 5*time.Second, + shouldNotExecute[0].ID(), + s.exe1ID, + ) + + enContainer := s.net.ContainerByID(s.exe1ID) + err = enContainer.WaitForContainerStopped(30 * time.Second) + s.NoError(err) + }) +} + +func (s *TestServiceEventVersionControl) getFreezePeriod( + ctx context.Context, + env templates.Environment, +) uint64 { + + freezePeriodScript := templates.GenerateGetVersionBoundaryFreezePeriodScript(env) + + freezePeriodRaw, err := s.AccessClient(). + ExecuteScriptBytes(ctx, freezePeriodScript, nil) + s.Require().NoError(err) + + cadenceBuffer, is := freezePeriodRaw.(cadence.UInt64) + + s.Require().True(is, "version freezePeriod script returned unknown type") + + return uint64(cadenceBuffer) } type versionBoundary struct { @@ -141,13 +260,13 @@ func (s *TestServiceEventVersionControl) sendSetVersionBoundaryTransaction( env templates.Environment, boundary versionBoundary, ) *sdk.TransactionResult { - serviceAddress := s.net.Root().Header.ChainID.Chain().ServiceAddress() + serviceAddress := s.net.Root().ChainID.Chain().ServiceAddress() versionTableChangeScript := templates.GenerateSetVersionBoundaryScript(env) latestBlockId, err := s.AccessClient().GetLatestBlockID(ctx) s.Require().NoError(err) - seq := s.AccessClient().GetSeqNumber() + seq := s.AccessClient().GetAndIncrementSeqNumber() tx := sdk.NewTransaction(). SetScript(versionTableChangeScript). @@ -156,27 +275,14 @@ func (s *TestServiceEventVersionControl) sendSetVersionBoundaryTransaction( SetPayer(sdk.Address(serviceAddress)). AddAuthorizer(sdk.Address(serviceAddress)) - // args - // newMajor: UInt8, - // newMinor: UInt8, - // newPatch: UInt8, - // newPreRelease: String?, - // targetBlockHeight: UInt64 - err = tx.AddArgument(cadence.NewUInt8(boundary.Major)) s.Require().NoError(err) - err = tx.AddArgument(cadence.NewUInt8(boundary.Minor)) s.Require().NoError(err) - err = tx.AddArgument(cadence.NewUInt8(boundary.Patch)) s.Require().NoError(err) - - preReleaseCadenceString, err := cadence.NewString(boundary.PreRelease) - s.Require().NoError(err) - err = tx.AddArgument(preReleaseCadenceString) + err = tx.AddArgument(cadence.String(boundary.PreRelease)) s.Require().NoError(err) - err = tx.AddArgument(cadence.NewUInt64(boundary.BlockHeight)) s.Require().NoError(err) diff --git a/integration/tests/verification/suite.go b/integration/tests/verification/suite.go index 0bef62132f4..f4a8b7d101c 100644 --- a/integration/tests/verification/suite.go +++ b/integration/tests/verification/suite.go @@ -65,8 +65,6 @@ func (s *Suite) SetupSuite() { s.log = unittest.LoggerForTest(s.Suite.T(), zerolog.InfoLevel) s.log.Info().Msg("================> SetupTest") - blockRateFlag := "--block-rate-delay=1ms" - s.nodeConfigs = append(s.nodeConfigs, testnet.NewNodeConfig(flow.RoleAccess, testnet.WithLogLevel(zerolog.FatalLevel))) // generate the four consensus identities @@ -77,7 +75,7 @@ func (s *Suite) SetupSuite() { testnet.WithLogLevel(zerolog.FatalLevel), testnet.WithAdditionalFlag("--required-verification-seal-approvals=1"), testnet.WithAdditionalFlag("--required-construction-seal-approvals=1"), - testnet.WithAdditionalFlag(blockRateFlag), + testnet.WithAdditionalFlag("cruise-ctl-fallback-proposal-duration=1ms"), ) s.nodeConfigs = append(s.nodeConfigs, nodeConfig) } @@ -88,7 +86,9 @@ func (s *Suite) SetupSuite() { testnet.WithID(s.verID), testnet.WithLogLevel(zerolog.WarnLevel), // only verification and execution nodes run with preferred unicast protocols - testnet.WithAdditionalFlag(fmt.Sprintf("--preferred-unicast-protocols=%s", s.PreferredUnicasts))) + testnet.WithAdditionalFlag(fmt.Sprintf("--preferred-unicast-protocols=%s", s.PreferredUnicasts)), + testnet.WithAdditionalFlag("--scheduled-callbacks-enabled=true"), + ) s.nodeConfigs = append(s.nodeConfigs, verConfig) // generates two execution nodes @@ -97,7 +97,9 @@ func (s *Suite) SetupSuite() { testnet.WithID(s.exe1ID), testnet.WithLogLevel(zerolog.InfoLevel), // only verification and execution nodes run with preferred unicast protocols - testnet.WithAdditionalFlag(fmt.Sprintf("--preferred-unicast-protocols=%s", s.PreferredUnicasts))) + testnet.WithAdditionalFlag(fmt.Sprintf("--preferred-unicast-protocols=%s", s.PreferredUnicasts)), + testnet.WithAdditionalFlag("--scheduled-callbacks-enabled=true"), + ) s.nodeConfigs = append(s.nodeConfigs, exe1Config) s.exe2ID = unittest.IdentifierFixture() @@ -105,17 +107,19 @@ func (s *Suite) SetupSuite() { testnet.WithID(s.exe2ID), testnet.WithLogLevel(zerolog.InfoLevel), // only verification and execution nodes run with preferred unicast protocols - testnet.WithAdditionalFlag(fmt.Sprintf("--preferred-unicast-protocols=%s", s.PreferredUnicasts))) + testnet.WithAdditionalFlag(fmt.Sprintf("--preferred-unicast-protocols=%s", s.PreferredUnicasts)), + testnet.WithAdditionalFlag("--scheduled-callbacks-enabled=true"), + ) s.nodeConfigs = append(s.nodeConfigs, exe2Config) // generates two collection node coll1Config := testnet.NewNodeConfig(flow.RoleCollection, testnet.WithLogLevel(zerolog.FatalLevel), - testnet.WithAdditionalFlag(blockRateFlag), + testnet.WithAdditionalFlag("--hotstuff-proposal-duration=1ms"), ) coll2Config := testnet.NewNodeConfig(flow.RoleCollection, testnet.WithLogLevel(zerolog.FatalLevel), - testnet.WithAdditionalFlag(blockRateFlag), + testnet.WithAdditionalFlag("--hotstuff-proposal-duration=1ms"), ) s.nodeConfigs = append(s.nodeConfigs, coll1Config, coll2Config) @@ -127,7 +131,8 @@ func (s *Suite) SetupSuite() { ghostConfig := testnet.NewNodeConfig(flow.RoleExecution, testnet.WithID(s.ghostID), testnet.AsGhost(), - testnet.WithLogLevel(zerolog.FatalLevel)) + testnet.WithLogLevel(zerolog.FatalLevel), + ) s.nodeConfigs = append(s.nodeConfigs, ghostConfig) // generates, initializes, and starts the Flow network diff --git a/integration/tests/verification/verification_stream_negotiation_test.go b/integration/tests/verification/verification_stream_negotiation_test.go index e2c8a940072..c87c6270ed3 100644 --- a/integration/tests/verification/verification_stream_negotiation_test.go +++ b/integration/tests/verification/verification_stream_negotiation_test.go @@ -40,10 +40,10 @@ func testVerificationNodeHappyPath(t *testing.T, // wait for next height finalized (potentially first height), called blockA currentFinalized := blocks.HighestFinalizedHeight() blockA := blocks.WaitForHighestFinalizedProgress(t, currentFinalized) - t.Logf("blockA generated, height: %v ID: %v\n", blockA.Header.Height, blockA.Header.ID()) + t.Logf("blockA generated, height: %v ID: %v\n", blockA.Height, blockA.ID()) // waits for execution receipt for blockA from execution node, called receiptA - receiptA := receipts.WaitForReceiptFrom(t, blockA.Header.ID(), exeID) + receiptA := receipts.WaitForReceiptFrom(t, blockA.ID(), exeID) resultID := receiptA.ExecutionResult.ID() t.Logf("receipt for blockA generated: result ID: %x\n", resultID) diff --git a/integration/tests/verification/verify_scheduled_callback_test.go b/integration/tests/verification/verify_scheduled_callback_test.go new file mode 100644 index 00000000000..3030948d894 --- /dev/null +++ b/integration/tests/verification/verify_scheduled_callback_test.go @@ -0,0 +1,86 @@ +package verification + +import ( + "context" + "fmt" + "testing" + "time" + + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" + + sdk "github.com/onflow/flow-go-sdk" + + "github.com/onflow/flow-go/fvm/systemcontracts" + "github.com/onflow/flow-go/integration/tests/lib" + "github.com/onflow/flow-go/model/flow" +) + +func TestVerifyScheduledCallback(t *testing.T) { + suite.Run(t, new(VerifyScheduledCallbackSuite)) +} + +type VerifyScheduledCallbackSuite struct { + Suite +} + +func (s *VerifyScheduledCallbackSuite) TestVerifyScheduledCallback() { + sc := systemcontracts.SystemContractsForChain(s.net.Root().HeaderBody.ChainID) + + // Wait for next height finalized (potentially first height) + currentFinalized := s.BlockState.HighestFinalizedHeight() + blockA := s.BlockState.WaitForHighestFinalizedProgress(s.T(), currentFinalized) + s.T().Logf("got blockA height %v ID %v", blockA.HeaderBody.Height, blockA.ID()) + + // Deploy the test contract first + err := lib.DeployScheduledCallbackTestContract( + s.AccessClient(), + sdk.Address(sc.FlowCallbackScheduler.Address), + sdk.Address(sc.FlowToken.Address), + sdk.Address(sc.FungibleToken.Address), + sdk.Identifier(s.net.Root().ID()), + ) + require.NoError(s.T(), err, "could not deploy test contract") + + // Wait for next height finalized before scheduling callback + s.BlockState.WaitForHighestFinalizedProgress(s.T(), s.BlockState.HighestFinalizedHeight()) + + // Schedule a callback for 10 seconds in the future + scheduleDelta := int64(10) + futureTimestamp := time.Now().Unix() + scheduleDelta + + s.T().Logf("scheduling callback at timestamp: %v, current timestamp: %v", futureTimestamp, time.Now().Unix()) + callbackID, err := lib.ScheduleCallbackAtTimestamp( + futureTimestamp, + s.AccessClient(), + sdk.Address(sc.FlowCallbackScheduler.Address), + sdk.Address(sc.FlowToken.Address), + sdk.Address(sc.FungibleToken.Address), + ) + require.NoError(s.T(), err, "could not schedule callback transaction") + s.T().Logf("scheduled callback with ID: %d", callbackID) + + // wait for block that executed the scheduled callbacks to be sealed (plus some buffer) + var sealedBlock *flow.Block + require.Eventually(s.T(), func() bool { + sealed, ok := s.BlockState.HighestSealed() + require.True(s.T(), ok) + sealedBlock = sealed + // sealed timestamp /1000 to drop the ms, and +2 to add some buffer + return uint64(sealed.Timestamp/1000) > uint64(futureTimestamp+5) + }, 30*time.Second, 1000*time.Millisecond) + + // make sure callback executed event was emitted + eventTypeString := fmt.Sprintf("A.%v.FlowTransactionScheduler.Executed", sc.FlowCallbackScheduler.Address) + events, err := s.AccessClient().GetEventsForHeightRange(context.Background(), eventTypeString, blockA.HeaderBody.Height, sealedBlock.Height) + require.NoError(s.T(), err) + + eventCount := 0 + for _, event := range events { + for range event.Events { + eventCount++ + } + } + + require.Equal(s.T(), eventCount, 1, "expected 1 callback executed event") +} diff --git a/integration/tests/verification/verify_system_chunk_test.go b/integration/tests/verification/verify_system_chunk_test.go index f972adaa23d..1c41e5bf091 100644 --- a/integration/tests/verification/verify_system_chunk_test.go +++ b/integration/tests/verification/verify_system_chunk_test.go @@ -21,30 +21,22 @@ func (suite *VerifySystemChunkSuite) TestSystemChunkIDsShouldBeDifferent() { // // wait for next height finalized (potentially first height), called blockA currentFinalized := suite.BlockState.HighestFinalizedHeight() blockA := suite.BlockState.WaitForHighestFinalizedProgress(suite.T(), currentFinalized) - suite.T().Logf("blockA generated, height: %v ID: %v\n", blockA.Header.Height, blockA.Header.ID()) + suite.T().Logf("blockA generated, height: %v ID: %v\n", blockA.Height, blockA.ID()) // waits for the next finalized block after blockA, called blockB. blockB := suite.BlockState.WaitForFinalizedChild(suite.T(), blockA) - suite.T().Logf("blockB generated, height: %v ID: %v\n", blockB.Header.Height, blockB.Header.ID()) + suite.T().Logf("blockB generated, height: %v ID: %v\n", blockB.Height, blockB.ID()) // waits for execution receipt for blockA from execution node, called receiptA. - receiptA := suite.ReceiptState.WaitForReceiptFrom(suite.T(), blockA.Header.ID(), suite.exe1ID) + receiptA := suite.ReceiptState.WaitForReceiptFrom(suite.T(), blockA.ID(), suite.exe1ID) resultAId := receiptA.ExecutionResult.ID() suite.T().Logf("receipt for blockA generated: result ID: %x\n", resultAId) // waits for execution receipt for blockB from execution node, called receiptB. - receiptB := suite.ReceiptState.WaitForReceiptFrom(suite.T(), blockB.Header.ID(), suite.exe1ID) + receiptB := suite.ReceiptState.WaitForReceiptFrom(suite.T(), blockB.ID(), suite.exe1ID) resultBId := receiptB.ExecutionResult.ID() suite.T().Logf("receipt for blockB generated: result ID: %x\n", resultBId) - // Todo: drop this part once system chunk changes the state - // requires that execution state is not changed between block A and B - stateA, err := receiptA.ExecutionResult.FinalStateCommitment() - require.NoError(suite.T(), err) - stateB, err := receiptB.ExecutionResult.FinalStateCommitment() - require.NoError(suite.T(), err) - require.Equal(suite.T(), stateA, stateB) - // computes ids of system chunk for result A and B systemChunkA := receiptA.ExecutionResult.Chunks[0] systemChunkAId := systemChunkA.ID() diff --git a/integration/utils/arguments.go b/integration/utils/arguments.go new file mode 100644 index 00000000000..d14d730265f --- /dev/null +++ b/integration/utils/arguments.go @@ -0,0 +1,42 @@ +package utils + +import ( + "encoding/json" + + "github.com/onflow/cadence" + jsoncdc "github.com/onflow/cadence/encoding/json" +) + +type cadenceArgument struct { + Value cadence.Value +} + +func (v *cadenceArgument) MarshalJSON() ([]byte, error) { + return jsoncdc.Encode(v.Value) +} + +func (v *cadenceArgument) UnmarshalJSON(b []byte) (err error) { + v.Value, err = jsoncdc.Decode(nil, b) + if err != nil { + return err + } + return nil +} + +// ParseJSON parses string representing JSON array with Cadence arguments. +// +// Cadence arguments must be defined in the JSON-Cadence format https://developers.flow.com/cadence/json-cadence-spec +func ParseJSON(args []byte) ([]cadence.Value, error) { + var arg []cadenceArgument + err := json.Unmarshal(args, &arg) + + if err != nil { + return nil, err + } + + cadenceArgs := make([]cadence.Value, len(arg)) + for i, arg := range arg { + cadenceArgs[i] = arg.Value + } + return cadenceArgs, nil +} diff --git a/integration/utils/blob_service.go b/integration/utils/blob_service.go index ba18f68ae8c..d394446b04f 100644 --- a/integration/utils/blob_service.go +++ b/integration/utils/blob_service.go @@ -4,10 +4,10 @@ import ( "context" "fmt" - "github.com/ipfs/go-blockservice" + "github.com/ipfs/boxo/blockservice" + "github.com/ipfs/boxo/blockstore" "github.com/ipfs/go-cid" "github.com/ipfs/go-datastore" - blockstore "github.com/ipfs/go-ipfs-blockstore" "github.com/onflow/flow-go/module/blobs" "github.com/onflow/flow-go/module/component" diff --git a/integration/utils/dependency_test.go b/integration/utils/dependency_test.go new file mode 100644 index 00000000000..8ac0fac8cc2 --- /dev/null +++ b/integration/utils/dependency_test.go @@ -0,0 +1,8 @@ +package utils + +import "github.com/btcsuite/btcd/chaincfg/chainhash" + +// this is added to resolve the issue with chainhash ambiguous import, +// the code is not used, but it's needed to force go.mod specify and retain chainhash version +// workaround for issue: https://github.com/golang/go/issues/27899 +var _ = chainhash.Hash{} diff --git a/integration/utils/emulator_client.go b/integration/utils/emulator_client.go index 18ddee8cfde..6d89ccf45ac 100644 --- a/integration/utils/emulator_client.go +++ b/integration/utils/emulator_client.go @@ -6,31 +6,37 @@ import ( "github.com/onflow/cadence" jsoncdc "github.com/onflow/cadence/encoding/json" - emulator "github.com/onflow/flow-emulator" + "github.com/rs/zerolog" sdk "github.com/onflow/flow-go-sdk" + "github.com/onflow/flow-go-sdk/templates" + + emulator "github.com/onflow/flow-go/integration/internal/emulator" "github.com/onflow/flow-go/model/flow" ) // EmulatorClient is a wrapper around the emulator to implement the same interface // used by the SDK client. Used for testing against the emulator. type EmulatorClient struct { - blockchain *emulator.Blockchain + adapter *emulator.SDKAdapter } -func NewEmulatorClient(blockchain *emulator.Blockchain) *EmulatorClient { +func NewEmulatorClient(blockchain emulator.Emulator) *EmulatorClient { + logger := zerolog.Nop() + + adapter := emulator.NewSDKAdapter(&logger, blockchain) client := &EmulatorClient{ - blockchain: blockchain, + adapter: adapter, } return client } func (c *EmulatorClient) GetAccount(ctx context.Context, address sdk.Address) (*sdk.Account, error) { - return c.blockchain.GetAccount(address) + return c.adapter.GetAccount(ctx, address) } func (c *EmulatorClient) GetAccountAtLatestBlock(ctx context.Context, address sdk.Address) (*sdk.Account, error) { - return c.blockchain.GetAccount(address) + return c.adapter.GetAccount(ctx, address) } func (c *EmulatorClient) SendTransaction(ctx context.Context, tx sdk.Transaction) error { @@ -39,24 +45,19 @@ func (c *EmulatorClient) SendTransaction(ctx context.Context, tx sdk.Transaction } func (c *EmulatorClient) GetLatestBlock(ctx context.Context, isSealed bool) (*sdk.Block, error) { - block, err := c.blockchain.GetLatestBlock() + block, _, err := c.adapter.GetLatestBlock(ctx, true) if err != nil { return nil, err } - blockID := block.ID() - - var id sdk.Identifier - copy(id[:], blockID[:]) - sdkBlock := &sdk.Block{ - BlockHeader: sdk.BlockHeader{ID: id}, + BlockHeader: sdk.BlockHeader{ID: block.ID}, } return sdkBlock, nil } func (c *EmulatorClient) GetTransactionResult(ctx context.Context, txID sdk.Identifier) (*sdk.TransactionResult, error) { - return c.blockchain.GetTransactionResult(txID) + return c.adapter.GetTransactionResult(ctx, txID) } func (c *EmulatorClient) ExecuteScriptAtLatestBlock(ctx context.Context, script []byte, args []cadence.Value) (cadence.Value, error) { @@ -70,12 +71,17 @@ func (c *EmulatorClient) ExecuteScriptAtLatestBlock(ctx context.Context, script arguments = append(arguments, val) } - scriptResult, err := c.blockchain.ExecuteScript(script, arguments) + scriptResult, err := c.adapter.ExecuteScriptAtLatestBlock(ctx, script, arguments) + if err != nil { + return nil, err + } + + value, err := jsoncdc.Decode(nil, scriptResult) if err != nil { return nil, err } - return scriptResult.Value, nil + return value, nil } func (c *EmulatorClient) ExecuteScriptAtBlockID(ctx context.Context, blockID sdk.Identifier, script []byte, args []cadence.Value) (cadence.Value, error) { @@ -90,31 +96,38 @@ func (c *EmulatorClient) ExecuteScriptAtBlockID(ctx context.Context, blockID sdk } // get block by ID - block, err := c.blockchain.GetBlockByID(blockID) + block, _, err := c.adapter.GetBlockByID(ctx, blockID) if err != nil { return nil, err } - scriptResult, err := c.blockchain.ExecuteScriptAtBlock(script, arguments, block.Header.Height) + scriptResult, err := c.adapter.ExecuteScriptAtBlockHeight(ctx, block.BlockHeader.Height, script, arguments) + if err != nil { - return nil, err + return nil, fmt.Errorf("error in script: %w", err) } - if scriptResult.Error != nil { - return nil, fmt.Errorf("error in script: %w", scriptResult.Error) + value, err := jsoncdc.Decode(nil, scriptResult) + if err != nil { + return nil, err } - return scriptResult.Value, nil + return value, nil +} + +func (c *EmulatorClient) CreateAccount(keys []*sdk.AccountKey, contracts []templates.Contract) (sdk.Address, error) { + return c.adapter.CreateAccount(context.Background(), keys, contracts) + } func (c *EmulatorClient) Submit(tx *sdk.Transaction) (*flow.Block, error) { // submit the signed transaction - err := c.blockchain.AddTransaction(*tx) + err := c.adapter.SendTransaction(context.Background(), *tx) if err != nil { return nil, err } - block, _, err := c.blockchain.ExecuteAndCommitBlock() + block, _, err := c.adapter.Emulator().ExecuteAndCommitBlock() if err != nil { return nil, err } diff --git a/integration/utils/temp_dep_test.go b/integration/utils/temp_dep_test.go new file mode 100644 index 00000000000..05fc5b87e1f --- /dev/null +++ b/integration/utils/temp_dep_test.go @@ -0,0 +1,8 @@ +package utils + +import "github.com/btcsuite/btcd/chaincfg/chainhash" + +// this is added to resolve the issue with chainhash ambiguous import, +// the code is not used, but it's needed to force go.mod to specify and retain chainhash version +// workaround for issue: https://github.com/golang/go/issues/27899 +var _ = chainhash.Hash{} diff --git a/integration/utils/templates/create-and-setup-node.cdc b/integration/utils/templates/create-and-setup-node.cdc index 0a631e9276d..8f22579a00a 100644 --- a/integration/utils/templates/create-and-setup-node.cdc +++ b/integration/utils/templates/create-and-setup-node.cdc @@ -1,8 +1,8 @@ import Crypto -import FungibleToken from 0xFUNGIBLETOKENADDRESS -import FlowToken from 0xFLOWTOKENADDRESS -import FlowIDTableStaking from 0xIDENTITYTABLEADDRESS -import FlowStakingCollection from 0xSTAKINGCOLLECTIONADDRESS +import "FungibleToken" +import "FlowToken" +import "FlowIDTableStaking" +import "FlowStakingCollection" transaction( stakingAcctKey: Crypto.KeyListEntry, @@ -12,35 +12,47 @@ transaction( networkingAddress: String, networkingKey: String, stakingKey: String, + stakingKeyPoP: String, machineAcctKey: Crypto.KeyListEntry?) { - prepare(service: AuthAccount) { + prepare(service: auth(BorrowValue) &Account) { // 1 - create the staking account for the new node. // - let stakingAccount = AuthAccount(payer: service) + let stakingAccount = Account(payer: service) stakingAccount.keys.add(publicKey: stakingAcctKey.publicKey, hashAlgorithm: stakingAcctKey.hashAlgorithm, weight: stakingAcctKey.weight) // 2 - fund the new staking account // - let stakeDst = stakingAccount.getCapability(/public/flowTokenReceiver).borrow<&{FungibleToken.Receiver}>() + let stakeDst = stakingAccount.capabilities.borrow<&{FungibleToken.Receiver}>(/public/flowTokenReceiver) ?? panic("Could not borrow receiver reference to the recipient's Vault") // withdraw stake from service account - let stakeSrc = service.borrow<&FlowToken.Vault>(from: /storage/flowTokenVault) + let stakeSrc = service.storage.borrow<auth(FungibleToken.Withdraw) &FlowToken.Vault>(from: /storage/flowTokenVault) ?? panic("Could not borrow reference to the owner's Vault!") stakeDst.deposit(from: <-stakeSrc.withdraw(amount: stake)) // 3 - set up the staking collection // - let flowToken = stakingAccount.link<&FlowToken.Vault>(/private/flowTokenVault, target: /storage/flowTokenVault)! + let vaultCap = stakingAccount.capabilities.storage.issue<auth(FungibleToken.Withdraw) &FlowToken.Vault>(/storage/flowTokenVault) + // Create a new Staking Collection and put it in storage - let stakingCollection <-FlowStakingCollection.createStakingCollection(unlockedVault: flowToken, tokenHolder: nil) - let stakingCollectionRef = &stakingCollection as &FlowStakingCollection.StakingCollection - stakingAccount.save(<-stakingCollection, to: FlowStakingCollection.StakingCollectionStoragePath) + let stakingCollection <-FlowStakingCollection.createStakingCollection(unlockedVault: vaultCap, tokenHolder: nil) + stakingAccount.storage.save(<-stakingCollection, to: FlowStakingCollection.StakingCollectionStoragePath) + + // Reference must be taken after storing in the storage. + // Otherwise the reference gets invalidated upon move. + let stakingCollectionRef = stakingAccount.storage + .borrow<auth(FlowStakingCollection.CollectionOwner) &FlowStakingCollection.StakingCollection>( + from: FlowStakingCollection.StakingCollectionStoragePath, + ) + ?? panic("Could not borrow reference to the staking collection") // Create a public link to the staking collection - stakingAccount.link <&FlowStakingCollection.StakingCollection{FlowStakingCollection.StakingCollectionPublic}> ( - FlowStakingCollection.StakingCollectionPublicPath, - target: FlowStakingCollection.StakingCollectionStoragePath + let stakingCollectionCap = stakingAccount.capabilities.storage + .issue<&FlowStakingCollection.StakingCollection>(FlowStakingCollection.StakingCollectionStoragePath) + + stakingAccount.capabilities.publish( + stakingCollectionCap, + at: FlowStakingCollection.StakingCollectionPublicPath, ) // 4 - register the node @@ -51,6 +63,7 @@ transaction( networkingAddress: networkingAddress, networkingKey: networkingKey, stakingKey: stakingKey, + stakingKeyPoP: stakingKeyPoP, amount: stake, payer: service, ) { diff --git a/integration/utils/templates/remove-node.cdc b/integration/utils/templates/remove-node.cdc index 3cc185b87fe..db8fef92f0a 100644 --- a/integration/utils/templates/remove-node.cdc +++ b/integration/utils/templates/remove-node.cdc @@ -1,4 +1,4 @@ -import FlowIDTableStaking from 0xIDENTITYTABLEADDRESS +import "FlowIDTableStaking" // This transaction removes an existing node from the identity table // by unstaking it and removing it from the approved list @@ -7,9 +7,9 @@ transaction(id: String) { // Local variable for a reference to the ID Table Admin object let adminRef: &FlowIDTableStaking.Admin - prepare(acct: AuthAccount) { + prepare(acct: auth(BorrowValue) &Account) { // borrow a reference to the admin object - self.adminRef = acct.borrow<&FlowIDTableStaking.Admin>(from: FlowIDTableStaking.StakingAdminStoragePath) + self.adminRef = acct.storage.borrow<&FlowIDTableStaking.Admin>(from: FlowIDTableStaking.StakingAdminStoragePath) ?? panic("Could not borrow reference to staking admin") } diff --git a/integration/utils/templates/set-protocol-state-version.cdc b/integration/utils/templates/set-protocol-state-version.cdc new file mode 100644 index 00000000000..0dd86c4eb61 --- /dev/null +++ b/integration/utils/templates/set-protocol-state-version.cdc @@ -0,0 +1,29 @@ +import NodeVersionBeacon from "NodeVersionBeacon" + +/// Transaction that allows NodeVersionAdmin to specify a new protocol state version. +/// The new version will become active at view `activeView` if the service event +/// is processed and applied to the protocol state within a block `B` such that +/// `B.view + ∆ < activeView`, for a protocol-defined safety threshold ∆. +/// Service events not meeting this threshold are discarded. +/// +/// This is a special version of the admin transaction for use in integration tests. +/// We allow the sender to pass in a value to add to the current view, to reduce +/// the liklihood that a test spuriously fails due to timing. +transaction(newProtocolVersion: UInt64, activeViewDiff: UInt64) { + + let adminRef: &NodeVersionBeacon.Admin + + prepare(acct: auth(BorrowValue) &Account) { + // Borrow a reference to the NodeVersionAdmin implementing resource + self.adminRef = acct.storage.borrow<&NodeVersionBeacon.Admin>(from: NodeVersionBeacon.AdminStoragePath) + ?? panic("Couldn't borrow NodeVersionBeacon.Admin Resource") + } + + execute { + let block = getCurrentBlock() + self.adminRef.emitProtocolStateVersionUpgrade( + newProtocolVersion: newProtocolVersion, + activeView: block.view + activeViewDiff + ) + } +} diff --git a/integration/utils/transactions.go b/integration/utils/transactions.go index 26e1eb2012a..ec83807e3e5 100644 --- a/integration/utils/transactions.go +++ b/integration/utils/transactions.go @@ -1,14 +1,21 @@ package utils import ( + "context" _ "embed" + "fmt" "github.com/onflow/cadence" + "github.com/onflow/crypto" "github.com/onflow/flow-core-contracts/lib/go/templates" sdk "github.com/onflow/flow-go-sdk" + sdkcrypto "github.com/onflow/flow-go-sdk/crypto" sdktemplates "github.com/onflow/flow-go-sdk/templates" + + "github.com/onflow/flow-go/integration/testnet" "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/utils/unittest" ) //go:embed templates/create-and-setup-node.cdc @@ -17,8 +24,12 @@ var createAndSetupNodeTxScript string //go:embed templates/remove-node.cdc var removeNodeTxScript string +//go:embed "templates/set-protocol-state-version.cdc" +var setProtocolStateVersionScript string + func LocalnetEnv() templates.Environment { return templates.Environment{ + EpochAddress: "f8d6e0586b0a20c7", IDTableAddress: "f8d6e0586b0a20c7", FungibleTokenAddress: "ee82856bf20e2aa6", FlowTokenAddress: "0ae53cb6e3f42a79", @@ -43,6 +54,7 @@ func MakeCreateAndSetupNodeTx( networkingAddress string, networkingKey string, stakingKey string, + stakingKeyPoP string, machineKey *sdk.AccountKey, ) ( *sdk.Transaction, @@ -52,7 +64,7 @@ func MakeCreateAndSetupNodeTx( script := []byte(templates.ReplaceAddresses(createAndSetupNodeTxScript, env)) tx := sdk.NewTransaction(). SetScript(script). - SetGasLimit(9999). + SetComputeLimit(9999). SetReferenceBlockID(latestBlockID). SetProposalKey(service.Address, 0, service.Keys[0].SequenceNumber). AddAuthorizer(service.Address). @@ -125,6 +137,16 @@ func MakeCreateAndSetupNodeTx( return nil, err } + // 7 - staking key PoP + stakingKeyPoPCDC, err := cadence.NewString(stakingKeyPoP) + if err != nil { + return nil, err + } + err = tx.AddArgument(stakingKeyPoPCDC) + if err != nil { + return nil, err + } + if machineKey != nil { // for collection/consensus nodes, register the machine account key cdcMachineAcctKey, err := sdktemplates.AccountKeyToCadenceCryptoKey(machineKey) @@ -151,14 +173,14 @@ func MakeCreateAndSetupNodeTx( func MakeAdminRemoveNodeTx( env templates.Environment, adminAccount *sdk.Account, - adminAccountKeyID int, + adminAccountKeyID uint32, latestBlockID sdk.Identifier, nodeID flow.Identifier, ) (*sdk.Transaction, error) { accountKey := adminAccount.Keys[adminAccountKeyID] tx := sdk.NewTransaction(). SetScript([]byte(templates.ReplaceAddresses(removeNodeTxScript, env))). - SetGasLimit(9999). + SetComputeLimit(9999). SetReferenceBlockID(latestBlockID). SetProposalKey(adminAccount.Address, adminAccountKeyID, accountKey.SequenceNumber). SetPayer(adminAccount.Address). @@ -172,3 +194,84 @@ func MakeAdminRemoveNodeTx( return tx, nil } + +// MakeSetProtocolStateVersionTx makes an admin transaction to set the protocol state version. +// See the Cadence transaction file for detailed documentation. +func MakeSetProtocolStateVersionTx( + env templates.Environment, + adminAccount *sdk.Account, + adminAccountKeyID uint32, + latestBlockID sdk.Identifier, + newProtocolVersion uint64, + activeViewDiff uint64, +) (*sdk.Transaction, error) { + accountKey := adminAccount.Keys[adminAccountKeyID] + + tx := sdk.NewTransaction(). + SetScript([]byte(templates.ReplaceAddresses(setProtocolStateVersionScript, env))). + SetComputeLimit(9999). + SetReferenceBlockID(latestBlockID). + SetProposalKey(adminAccount.Address, adminAccountKeyID, accountKey.SequenceNumber). + SetPayer(adminAccount.Address). + AddAuthorizer(adminAccount.Address) + + err := tx.AddArgument(cadence.NewUInt64(newProtocolVersion)) + if err != nil { + return nil, err + } + err = tx.AddArgument(cadence.NewUInt64(activeViewDiff)) + if err != nil { + return nil, err + } + + return tx, nil +} + +// CreateFlowAccount will submit a create account transaction to smoke test network +// This ensures a single transaction can be sealed by the network. +func CreateFlowAccount(ctx context.Context, client *testnet.Client) (sdk.Address, error) { + fullAccountKey := sdk.NewAccountKey(). + SetPublicKey(unittest.PrivateKeyFixture(crypto.ECDSAP256).PublicKey()). + SetHashAlgo(sdkcrypto.SHA2_256). + SetWeight(sdk.AccountKeyWeightThreshold) + + latestBlockID, err := client.GetLatestBlockID(ctx) + if err != nil { + return sdk.EmptyAddress, fmt.Errorf("failed to get latest block id: %w", err) + } + + // createAccount will submit a create account transaction and wait for it to be sealed + addr, err := client.CreateAccount(ctx, fullAccountKey, sdk.Identifier(latestBlockID)) + if err != nil { + return sdk.EmptyAddress, fmt.Errorf("failed to create account: %w", err) + } + + return addr, nil +} + +// MakeRecoverEpochTx makes an admin transaction to recover the network when it is in EFM mode. +func MakeRecoverEpochTx( + env templates.Environment, + adminAccount *sdk.Account, + adminAccountKeyID uint32, + latestBlockID sdk.Identifier, + args []cadence.Value, +) (*sdk.Transaction, error) { + accountKey := adminAccount.Keys[adminAccountKeyID] + tx := sdk.NewTransaction(). + SetScript(templates.GenerateRecoverEpochScript(env)). + SetComputeLimit(9999). + SetReferenceBlockID(latestBlockID). + SetProposalKey(adminAccount.Address, adminAccountKeyID, accountKey.SequenceNumber). + SetPayer(adminAccount.Address). + AddAuthorizer(adminAccount.Address) + + for _, arg := range args { + err := tx.AddArgument(arg) + if err != nil { + return nil, err + } + } + + return tx, nil +} diff --git a/ledger/common/bitutils/utils_test.go b/ledger/common/bitutils/utils_test.go index d8f23dfd1a4..f168c058ffa 100644 --- a/ledger/common/bitutils/utils_test.go +++ b/ledger/common/bitutils/utils_test.go @@ -5,9 +5,8 @@ import ( "math/big" "math/bits" "math/rand" - "time" - "testing" + "time" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" diff --git a/ledger/common/convert/convert.go b/ledger/common/convert/convert.go new file mode 100644 index 00000000000..d81912f0cc3 --- /dev/null +++ b/ledger/common/convert/convert.go @@ -0,0 +1,57 @@ +package convert + +import ( + "fmt" + + "github.com/onflow/flow-go/ledger" + "github.com/onflow/flow-go/model/flow" +) + +// UnexpectedLedgerKeyFormat is returned when a ledger key is not in the expected format +var UnexpectedLedgerKeyFormat = fmt.Errorf("unexpected ledger key format") + +// LedgerKeyToRegisterID converts a ledger key to a register id +// returns an UnexpectedLedgerKeyFormat error if the key is not in the expected format +func LedgerKeyToRegisterID(key ledger.Key) (flow.RegisterID, error) { + parts := key.KeyParts + if len(parts) != 2 || + parts[0].Type != ledger.KeyPartOwner || + parts[1].Type != ledger.KeyPartKey { + return flow.RegisterID{}, fmt.Errorf("ledger key %s: %w", key.String(), UnexpectedLedgerKeyFormat) + } + + return flow.NewRegisterID( + flow.BytesToAddress(parts[0].Value), + string(parts[1].Value), + ), nil +} + +// RegisterIDToLedgerKey converts a register id to a ledger key +func RegisterIDToLedgerKey(registerID flow.RegisterID) ledger.Key { + return ledger.Key{ + KeyParts: []ledger.KeyPart{ + { + Type: ledger.KeyPartOwner, + Value: []byte(registerID.Owner), + }, + { + Type: ledger.KeyPartKey, + Value: []byte(registerID.Key), + }, + }, + } +} + +// PayloadToRegister converts a payload to a register id and value +func PayloadToRegister(payload *ledger.Payload) (flow.RegisterID, flow.RegisterValue, error) { + key, err := payload.Key() + if err != nil { + return flow.RegisterID{}, flow.RegisterValue{}, fmt.Errorf("could not parse register key from payload: %w", err) + } + regID, err := LedgerKeyToRegisterID(key) + if err != nil { + return flow.RegisterID{}, flow.RegisterValue{}, fmt.Errorf("could not convert register key into register id: %w", err) + } + + return regID, payload.Value(), nil +} diff --git a/ledger/common/convert/convert_test.go b/ledger/common/convert/convert_test.go new file mode 100644 index 00000000000..dca50f1f19d --- /dev/null +++ b/ledger/common/convert/convert_test.go @@ -0,0 +1,174 @@ +package convert_test + +import ( + "testing" + + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/ledger" + "github.com/onflow/flow-go/ledger/common/convert" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/utils/unittest" +) + +func TestLedgerKeyToRegisterID(t *testing.T) { + expectedRegisterID := unittest.RegisterIDFixture() + + key := ledger.Key{ + KeyParts: []ledger.KeyPart{ + { + Type: ledger.KeyPartOwner, + Value: []byte(expectedRegisterID.Owner), + }, + { + Type: ledger.KeyPartKey, + Value: []byte("key"), + }, + }, + } + + registerID, err := convert.LedgerKeyToRegisterID(key) + require.NoError(t, err) + require.Equal(t, expectedRegisterID, registerID) + + p := ledger.NewPayload(key, ledger.Value("value")) + + address, err := p.Address() + + require.NoError(t, err) + require.Equal(t, registerID.Owner, flow.AddressToRegisterOwner(address)) + require.Equal(t, registerID.Owner, string(address[:])) +} + +func TestLedgerKeyToRegisterID_Global(t *testing.T) { + key := ledger.Key{ + KeyParts: []ledger.KeyPart{ + { + Type: ledger.KeyPartOwner, + Value: []byte(""), + }, + { + Type: ledger.KeyPartKey, + Value: []byte("uuid"), + }, + }, + } + + expectedRegisterID := flow.UUIDRegisterID(0) + registerID, err := convert.LedgerKeyToRegisterID(key) + require.NoError(t, err) + require.Equal(t, expectedRegisterID, registerID) + + p := ledger.NewPayload(key, ledger.Value("value")) + + address, err := p.Address() + + require.NoError(t, err) + require.Equal(t, registerID.Owner, flow.AddressToRegisterOwner(address)) + require.NotEqual(t, registerID.Owner, string(address[:])) +} + +func TestLedgerKeyToRegisterID_Error(t *testing.T) { + key := ledger.Key{ + KeyParts: []ledger.KeyPart{ + { + Type: 999, // Invalid type + Value: []byte("owner"), + }, + { + Type: ledger.KeyPartKey, + Value: []byte("key"), + }, + }, + } + + _, err := convert.LedgerKeyToRegisterID(key) + require.Error(t, err) + require.ErrorIs(t, err, convert.UnexpectedLedgerKeyFormat) +} + +func TestRegisterIDToLedgerKey(t *testing.T) { + registerID := unittest.RegisterIDFixture() + expectedKey := ledger.Key{ + KeyParts: []ledger.KeyPart{ + { + Type: ledger.KeyPartOwner, + // Note: the owner field is extended to address length during NewRegisterID + // so we have to do the same here + Value: []byte(registerID.Owner), + }, + { + Type: ledger.KeyPartKey, + Value: []byte("key"), + }, + }, + } + + key := convert.RegisterIDToLedgerKey(registerID) + require.Equal(t, expectedKey, key) +} + +func TestRegisterIDToLedgerKey_Global(t *testing.T) { + registerID := flow.UUIDRegisterID(0) + expectedKey := ledger.Key{ + KeyParts: []ledger.KeyPart{ + { + Type: ledger.KeyPartOwner, + Value: []byte(""), + }, + { + Type: ledger.KeyPartKey, + Value: []byte("uuid"), + }, + }, + } + + key := convert.RegisterIDToLedgerKey(registerID) + require.Equal(t, expectedKey, key) +} + +func TestPayloadToRegister(t *testing.T) { + expected := unittest.RegisterIDFixture() + t.Run("can convert", func(t *testing.T) { + value := []byte("value") + p := ledger.NewPayload( + ledger.NewKey( + []ledger.KeyPart{ + ledger.NewKeyPart(ledger.KeyPartOwner, []byte(expected.Owner)), + ledger.NewKeyPart(ledger.KeyPartKey, []byte(expected.Key)), + }, + ), + value, + ) + regID, regValue, err := convert.PayloadToRegister(p) + require.NoError(t, err) + require.Equal(t, expected, regID) + require.Equal(t, value, regValue) + }) + + t.Run("global key", func(t *testing.T) { + value := []byte("1") + p := ledger.NewPayload( + ledger.NewKey( + []ledger.KeyPart{ + ledger.NewKeyPart(ledger.KeyPartOwner, []byte("")), + ledger.NewKeyPart(ledger.KeyPartKey, []byte("uuid")), + }, + ), + value, + ) + regID, regValue, err := convert.PayloadToRegister(p) + require.NoError(t, err) + require.Equal(t, flow.NewRegisterID(flow.EmptyAddress, "uuid"), regID) + require.Equal(t, "", regID.Owner) + require.Equal(t, "uuid", regID.Key) + require.True(t, regID.IsInternalState()) + require.Equal(t, value, regValue) + }) + + t.Run("empty payload", func(t *testing.T) { + p := ledger.EmptyPayload() + _, _, err := convert.PayloadToRegister(p) + require.Error(t, err) + }) +} diff --git a/ledger/common/hash/hash_test.go b/ledger/common/hash/hash_test.go index 69a1102e358..1b49293761c 100644 --- a/ledger/common/hash/hash_test.go +++ b/ledger/common/hash/hash_test.go @@ -4,12 +4,12 @@ import ( "crypto/rand" "testing" - "golang.org/x/crypto/sha3" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "golang.org/x/crypto/sha3" + + cryhash "github.com/onflow/crypto/hash" - cryhash "github.com/onflow/flow-go/crypto/hash" "github.com/onflow/flow-go/ledger" "github.com/onflow/flow-go/ledger/common/hash" ) diff --git a/ledger/common/pathfinder/pathfinder.go b/ledger/common/pathfinder/pathfinder.go index 4d91f451fd1..7849cf28256 100644 --- a/ledger/common/pathfinder/pathfinder.go +++ b/ledger/common/pathfinder/pathfinder.go @@ -5,7 +5,8 @@ import ( "crypto/sha256" "fmt" - "github.com/onflow/flow-go/crypto/hash" + "github.com/onflow/crypto/hash" + "github.com/onflow/flow-go/ledger" ) @@ -97,7 +98,7 @@ func PayloadsToValues(payloads []*ledger.Payload) ([]ledger.Value, error) { } // PathsFromPayloads constructs paths from an slice of payload -func PathsFromPayloads(payloads []ledger.Payload, version uint8) ([]ledger.Path, error) { +func PathsFromPayloads(payloads []*ledger.Payload, version uint8) ([]ledger.Path, error) { paths := make([]ledger.Path, len(payloads)) for i, pay := range payloads { k, err := pay.Key() diff --git a/ledger/common/pathfinder/pathfinder_test.go b/ledger/common/pathfinder/pathfinder_test.go index 44eb963735c..321c03fa64c 100644 --- a/ledger/common/pathfinder/pathfinder_test.go +++ b/ledger/common/pathfinder/pathfinder_test.go @@ -4,9 +4,9 @@ import ( "crypto/sha256" "testing" + "github.com/onflow/crypto/hash" "github.com/stretchr/testify/require" - "github.com/onflow/flow-go/crypto/hash" "github.com/onflow/flow-go/ledger" "github.com/onflow/flow-go/ledger/common/pathfinder" "github.com/onflow/flow-go/ledger/common/testutils" diff --git a/ledger/common/testutils/testutils.go b/ledger/common/testutils/testutils.go index ab30000c47c..e0e100ee46c 100644 --- a/ledger/common/testutils/testutils.go +++ b/ledger/common/testutils/testutils.go @@ -206,7 +206,7 @@ func RandomValues(n int, minByteSize, maxByteSize int) []l.Value { byteSize = minByteSize + rand.Intn(maxByteSize-minByteSize) } value := make([]byte, byteSize) - _, err := rand.Read(value) + _, err := crand.Read(value) if err != nil { panic("random generation failed") } diff --git a/ledger/complete/checkpoint_benchmark_test.go b/ledger/complete/checkpoint_benchmark_test.go index 177804be5a7..5aba09403cd 100644 --- a/ledger/complete/checkpoint_benchmark_test.go +++ b/ledger/complete/checkpoint_benchmark_test.go @@ -58,7 +58,7 @@ func benchmarkStoreCheckpoint(b *testing.B, version int, concurrent bool) { }() // Load checkpoint - tries, err := wal.LoadCheckpoint(*checkpointFile, &log) + tries, err := wal.LoadCheckpoint(*checkpointFile, log) if err != nil { b.Fatalf("cannot load checkpoint: %s", err) } @@ -69,12 +69,12 @@ func benchmarkStoreCheckpoint(b *testing.B, version int, concurrent bool) { // Serialize checkpoint V5. switch version { case 5: - err = wal.StoreCheckpointV5(outputDir, fileName, &log, tries...) + err = wal.StoreCheckpointV5(outputDir, fileName, log, tries...) case 6: if concurrent { - err = wal.StoreCheckpointV6Concurrently(tries, outputDir, fileName, &log) + err = wal.StoreCheckpointV6Concurrently(tries, outputDir, fileName, log) } else { - err = wal.StoreCheckpointV6SingleThread(tries, outputDir, fileName, &log) + err = wal.StoreCheckpointV6SingleThread(tries, outputDir, fileName, log) } } @@ -102,7 +102,7 @@ func BenchmarkLoadCheckpoint(b *testing.B) { b.ResetTimer() // Load checkpoint - _, err = wal.LoadCheckpoint(*checkpointFile, &log) + _, err = wal.LoadCheckpoint(*checkpointFile, log) b.StopTimer() elapsed := time.Since(start) diff --git a/ledger/complete/compactor.go b/ledger/complete/compactor.go index cafc9ed2a48..a08a36d2232 100644 --- a/ledger/complete/compactor.go +++ b/ledger/complete/compactor.go @@ -13,6 +13,7 @@ import ( "github.com/onflow/flow-go/ledger" "github.com/onflow/flow-go/ledger/complete/mtrie/trie" realWAL "github.com/onflow/flow-go/ledger/complete/wal" + "github.com/onflow/flow-go/module" "github.com/onflow/flow-go/module/lifecycle" "github.com/onflow/flow-go/module/observable" ) @@ -57,6 +58,7 @@ type Compactor struct { stopCh chan chan struct{} trieUpdateCh <-chan *WALTrieUpdate triggerCheckpointOnNextSegmentFinish *atomic.Bool // to trigger checkpoint manually + metrics module.WALMetrics } // NewCompactor creates new Compactor which writes WAL record and triggers @@ -76,6 +78,7 @@ func NewCompactor( checkpointDistance uint, checkpointsToKeep uint, triggerCheckpointOnNextSegmentFinish *atomic.Bool, + metrics module.WALMetrics, ) (*Compactor, error) { if checkpointDistance < 1 { checkpointDistance = 1 @@ -114,6 +117,7 @@ func NewCompactor( checkpointDistance: checkpointDistance, checkpointsToKeep: checkpointsToKeep, triggerCheckpointOnNextSegmentFinish: triggerCheckpointOnNextSegmentFinish, + metrics: metrics, }, nil } @@ -288,7 +292,7 @@ Loop: // Since this function is only for checkpointing, Compactor isn't affected by returned error. func (c *Compactor) checkpoint(ctx context.Context, tries []*trie.MTrie, checkpointNum int) error { - err := createCheckpoint(c.checkpointer, c.logger, tries, checkpointNum) + err := createCheckpoint(c.checkpointer, c.logger, tries, checkpointNum, c.metrics) if err != nil { return &createCheckpointError{num: checkpointNum, err: err} } @@ -325,18 +329,25 @@ func (c *Compactor) checkpoint(ctx context.Context, tries []*trie.MTrie, checkpo // createCheckpoint creates checkpoint with given checkpointNum and tries. // Errors indicate that checkpoint file can't be created. // Caller should handle returned errors by retrying checkpointing when appropriate. -func createCheckpoint(checkpointer *realWAL.Checkpointer, logger zerolog.Logger, tries []*trie.MTrie, checkpointNum int) error { +func createCheckpoint(checkpointer *realWAL.Checkpointer, logger zerolog.Logger, tries []*trie.MTrie, checkpointNum int, metrics module.WALMetrics) error { logger.Info().Msgf("serializing checkpoint %d with %v tries", checkpointNum, len(tries)) startTime := time.Now() fileName := realWAL.NumberToFilename(checkpointNum) - err := realWAL.StoreCheckpointV6SingleThread(tries, checkpointer.Dir(), fileName, &logger) + err := realWAL.StoreCheckpointV6SingleThread(tries, checkpointer.Dir(), fileName, logger) if err != nil { return fmt.Errorf("error serializing checkpoint (%d): %w", checkpointNum, err) } + size, err := realWAL.ReadCheckpointFileSize(checkpointer.Dir(), fileName) + if err != nil { + return fmt.Errorf("error reading checkpoint file size (%d): %w", checkpointNum, err) + } + + metrics.ExecutionCheckpointSize(size) + duration := time.Since(startTime) logger.Info().Float64("total_time_s", duration.Seconds()).Msgf("created checkpoint %d", checkpointNum) diff --git a/ledger/complete/compactor_test.go b/ledger/complete/compactor_test.go index 7617c7bb9b2..15cf89a446f 100644 --- a/ledger/complete/compactor_test.go +++ b/ledger/complete/compactor_test.go @@ -10,7 +10,7 @@ import ( "testing" "time" - prometheusWAL "github.com/m4ksio/wal/wal" + prometheusWAL "github.com/onflow/wal/wal" "github.com/rs/zerolog" "github.com/rs/zerolog/log" "github.com/stretchr/testify/assert" @@ -90,7 +90,7 @@ func TestCompactorCreation(t *testing.T) { // WAL segments are 32kB, so here we generate 2 keys 64kB each, times `size` // so we should get at least `size` segments - compactor, err := NewCompactor(l, wal, unittest.Logger(), forestCapacity, checkpointDistance, checkpointsToKeep, atomic.NewBool(false)) + compactor, err := NewCompactor(l, wal, unittest.Logger(), forestCapacity, checkpointDistance, checkpointsToKeep, atomic.NewBool(false), metrics.NewNoopCollector()) require.NoError(t, err) co := CompactorObserver{fromBound: 8, done: make(chan struct{})} @@ -316,7 +316,7 @@ func TestCompactorSkipCheckpointing(t *testing.T) { // WAL segments are 32kB, so here we generate 2 keys 64kB each, times `size` // so we should get at least `size` segments - compactor, err := NewCompactor(l, wal, unittest.Logger(), forestCapacity, checkpointDistance, checkpointsToKeep, atomic.NewBool(false)) + compactor, err := NewCompactor(l, wal, unittest.Logger(), forestCapacity, checkpointDistance, checkpointsToKeep, atomic.NewBool(false), metrics.NewNoopCollector()) require.NoError(t, err) co := CompactorObserver{fromBound: 8, done: make(chan struct{})} @@ -442,7 +442,7 @@ func TestCompactorAccuracy(t *testing.T) { l, err := NewLedger(wal, forestCapacity, metricsCollector, zerolog.Logger{}, DefaultPathFinderVersion) require.NoError(t, err) - compactor, err := NewCompactor(l, wal, unittest.Logger(), forestCapacity, checkpointDistance, checkpointsToKeep, atomic.NewBool(false)) + compactor, err := NewCompactor(l, wal, unittest.Logger(), forestCapacity, checkpointDistance, checkpointsToKeep, atomic.NewBool(false), metrics.NewNoopCollector()) require.NoError(t, err) fromBound := lastCheckpointNum + (size / 2) @@ -552,7 +552,7 @@ func TestCompactorTriggeredByAdminTool(t *testing.T) { l, err := NewLedger(wal, forestCapacity, metricsCollector, unittest.LoggerWithName("ledger"), DefaultPathFinderVersion) require.NoError(t, err) - compactor, err := NewCompactor(l, wal, unittest.LoggerWithName("compactor"), forestCapacity, checkpointDistance, checkpointsToKeep, atomic.NewBool(true)) + compactor, err := NewCompactor(l, wal, unittest.LoggerWithName("compactor"), forestCapacity, checkpointDistance, checkpointsToKeep, atomic.NewBool(true), metrics.NewNoopCollector()) require.NoError(t, err) fmt.Println("should stop as soon as segment 5 is generated, which should trigger checkpoint 5 to be created") @@ -656,7 +656,7 @@ func TestCompactorConcurrency(t *testing.T) { l, err := NewLedger(wal, forestCapacity, metricsCollector, zerolog.Logger{}, DefaultPathFinderVersion) require.NoError(t, err) - compactor, err := NewCompactor(l, wal, unittest.Logger(), forestCapacity, checkpointDistance, checkpointsToKeep, atomic.NewBool(false)) + compactor, err := NewCompactor(l, wal, unittest.Logger(), forestCapacity, checkpointDistance, checkpointsToKeep, atomic.NewBool(false), metrics.NewNoopCollector()) require.NoError(t, err) fromBound := lastCheckpointNum + (size / 2 * numGoroutine) @@ -816,7 +816,7 @@ func replaySegments( updateFn func(update *ledger.TrieUpdate) error, deleteFn func(rootHash ledger.RootHash) error, ) error { - sr, err := prometheusWAL.NewSegmentsRangeReader(prometheusWAL.SegmentRange{ + sr, err := prometheusWAL.NewSegmentsRangeReader(unittest.Logger(), prometheusWAL.SegmentRange{ Dir: dir, First: 0, Last: to, diff --git a/ledger/complete/ledger.go b/ledger/complete/ledger.go index 1a2b6fd1e35..82ff8e7f477 100644 --- a/ledger/complete/ledger.go +++ b/ledger/complete/ledger.go @@ -1,10 +1,8 @@ package complete import ( - "encoding/json" "fmt" "io" - "os" "time" "github.com/rs/zerolog" @@ -15,12 +13,15 @@ import ( "github.com/onflow/flow-go/ledger/complete/mtrie" "github.com/onflow/flow-go/ledger/complete/mtrie/trie" realWAL "github.com/onflow/flow-go/ledger/complete/wal" + "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/module" ) -const DefaultCacheSize = 1000 -const DefaultPathFinderVersion = 1 -const defaultTrieUpdateChanSize = 500 +const ( + DefaultCacheSize = 1000 + DefaultPathFinderVersion = 1 + defaultTrieUpdateChanSize = 500 +) // Ledger (complete) is a fast memory-efficient fork-aware thread-safe trie-based key/value storage. // Ledger holds an array of registers (key-value pairs) and keeps tracks of changes over a limited time. @@ -197,14 +198,18 @@ func (l *Ledger) Get(query *ledger.Query) (values []ledger.Value, err error) { // Set updates the ledger given an update. // It returns the state after update and errors (if any) func (l *Ledger) Set(update *ledger.Update) (newState ledger.State, trieUpdate *ledger.TrieUpdate, err error) { - start := time.Now() - - // TODO: add test case if update.Size() == 0 { - // return current state root unchanged - return update.State(), nil, nil + return update.State(), + &ledger.TrieUpdate{ + RootHash: ledger.RootHash(update.State()), + Paths: []ledger.Path{}, + Payloads: []*ledger.Payload{}, + }, + nil } + start := time.Now() + trieUpdate, err = pathfinder.UpdateToTrieUpdate(update, l.pathFinderVersion) if err != nil { return ledger.State(hash.DummyHash), nil, err @@ -319,6 +324,11 @@ func (l *Ledger) Tries() ([]*trie.MTrie, error) { return l.forest.GetTries() } +// Trie returns the trie stored in the forest +func (l *Ledger) Trie(rootHash ledger.RootHash) (*trie.MTrie, error) { + return l.forest.GetTrie(rootHash) +} + // Checkpointer returns a checkpointer instance func (l *Ledger) Checkpointer() (*realWAL.Checkpointer, error) { checkpointer, err := l.wal.NewCheckpointer() @@ -328,20 +338,14 @@ func (l *Ledger) Checkpointer() (*realWAL.Checkpointer, error) { return checkpointer, nil } -// ExportCheckpointAt exports a checkpoint at specific state commitment after applying migrations and returns the new state (after migration) and any errors -func (l *Ledger) ExportCheckpointAt( +func (l *Ledger) MigrateAt( state ledger.State, - migrations []ledger.Migration, - preCheckpointReporters []ledger.Reporter, - postCheckpointReporters []ledger.Reporter, + migration ledger.Migration, targetPathFinderVersion uint8, - outputDir, outputFile string, -) (ledger.State, error) { - +) (*trie.MTrie, error) { l.logger.Info().Msgf( - "Ledger is loaded, checkpoint export has started for state %s, and %d migrations have been planed", + "Ledger is loaded, checkpoint export has started for state %s", state.String(), - len(migrations), ) // get trie @@ -351,57 +355,29 @@ func (l *Ledger) ExportCheckpointAt( l.logger.Info(). Str("hash", rh.String()). Msgf("Most recently touched root hash.") - return ledger.State(hash.DummyHash), + return nil, fmt.Errorf("cannot get trie at the given state commitment: %w", err) } // clean up tries to release memory err = l.keepOnlyOneTrie(state) if err != nil { - return ledger.State(hash.DummyHash), + return nil, fmt.Errorf("failed to clean up tries to reduce memory usage: %w", err) } - var payloads []ledger.Payload + var payloads []*ledger.Payload var newTrie *trie.MTrie - noMigration := len(migrations) == 0 - - if noMigration { + if migration == nil { // when there is no migration, reuse the trie without rebuilding it newTrie = t - // when there is no migration, we don't generate the payloads here until later running the - // postCheckpointReporters, because the ExportReporter is currently the only - // preCheckpointReporters, which doesn't use the payloads. } else { // get all payloads payloads = t.AllPayloads() - payloadSize := len(payloads) - - // migrate payloads - for i, migrate := range migrations { - l.logger.Info().Msgf("migration %d/%d is underway", i, len(migrations)) - - start := time.Now() - payloads, err = migrate(payloads) - elapsed := time.Since(start) - - if err != nil { - return ledger.State(hash.DummyHash), fmt.Errorf("error applying migration (%d): %w", i, err) - } - - newPayloadSize := len(payloads) - - if payloadSize != newPayloadSize { - l.logger.Warn(). - Int("migration_step", i). - Int("expected_size", payloadSize). - Int("outcome_size", newPayloadSize). - Msg("payload counts has changed during migration, make sure this is expected.") - } - l.logger.Info().Str("timeTaken", elapsed.String()).Msgf("migration %d is done", i) - - payloadSize = newPayloadSize + payloads, err = migration(payloads) + if err != nil { + return nil, fmt.Errorf("error applying migration: %w", err) } l.logger.Info().Msgf("creating paths for %v payloads", len(payloads)) @@ -409,79 +385,31 @@ func (l *Ledger) ExportCheckpointAt( // get paths paths, err := pathfinder.PathsFromPayloads(payloads, targetPathFinderVersion) if err != nil { - return ledger.State(hash.DummyHash), fmt.Errorf("cannot export checkpoint, can't construct paths: %w", err) + return nil, fmt.Errorf("cannot export checkpoint, can't construct paths: %w", err) } l.logger.Info().Msgf("constructing a new trie with migrated payloads (count: %d)...", len(payloads)) emptyTrie := trie.NewEmptyMTrie() - // no need to prune the data since it has already been prunned through migrations - applyPruning := false - newTrie, _, err = trie.NewTrieWithUpdatedRegisters(emptyTrie, paths, payloads, applyPruning) - if err != nil { - return ledger.State(hash.DummyHash), fmt.Errorf("constructing updated trie failed: %w", err) + derefPayloads := make([]ledger.Payload, len(payloads)) + for i, p := range payloads { + derefPayloads[i] = *p } - } - statecommitment := ledger.State(newTrie.RootHash()) - - l.logger.Info().Msgf("successfully built new trie. NEW ROOT STATECOMMIEMENT: %v", statecommitment.String()) - - l.logger.Info().Msgf("running pre-checkpoint reporters") - // run post migration reporters - for i, reporter := range preCheckpointReporters { - l.logger.Info().Msgf("running a pre-checkpoint generation reporter: %s, (%v/%v)", reporter.Name(), i, len(preCheckpointReporters)) - err := runReport(reporter, payloads, statecommitment, l.logger) + // no need to prune the data since it has already been prunned through migrations + const applyPruning = false + newTrie, _, err = trie.NewTrieWithUpdatedRegisters(emptyTrie, paths, derefPayloads, applyPruning) if err != nil { - return ledger.State(hash.DummyHash), err + return nil, fmt.Errorf("constructing updated trie failed: %w", err) } } - l.logger.Info().Msgf("finished running pre-checkpoint reporters") + stateCommitment := ledger.State(newTrie.RootHash()) - l.logger.Info().Msg("creating a checkpoint for the new trie, storing the checkpoint to the file") - - err = os.MkdirAll(outputDir, os.ModePerm) - if err != nil { - return ledger.State(hash.DummyHash), fmt.Errorf("could not create output dir %s: %w", outputDir, err) - } + l.logger.Info().Msgf("successfully built new trie. NEW ROOT STATECOMMIEMENT: %v", stateCommitment.String()) - err = realWAL.StoreCheckpointV6Concurrently([]*trie.MTrie{newTrie}, outputDir, outputFile, &l.logger) - - // Writing the checkpoint takes time to write and copy. - // Without relying on an exit code or stdout, we need to know when the copy is complete. - writeStatusFileErr := writeStatusFile("checkpoint_status.json", err) - if writeStatusFileErr != nil { - return ledger.State(hash.DummyHash), fmt.Errorf("failed to write checkpoint status file: %w", writeStatusFileErr) - } - - if err != nil { - return ledger.State(hash.DummyHash), fmt.Errorf("failed to store the checkpoint: %w", err) - } - - l.logger.Info().Msgf("checkpoint file successfully stored at: %v %v", outputDir, outputFile) - - l.logger.Info().Msgf("start running post-checkpoint reporters") - - if noMigration { - // when there is no mgiration, we generate the payloads now before - // running the postCheckpointReporters - payloads = newTrie.AllPayloads() - } - - // running post checkpoint reporters - for i, reporter := range postCheckpointReporters { - l.logger.Info().Msgf("running a post-checkpoint generation reporter: %s, (%v/%v)", reporter.Name(), i, len(postCheckpointReporters)) - err := runReport(reporter, payloads, statecommitment, l.logger) - if err != nil { - return ledger.State(hash.DummyHash), err - } - } - - l.logger.Info().Msgf("ran all post-checkpoint reporters") - - return statecommitment, nil + return newTrie, nil } // MostRecentTouchedState returns a state which is most recently touched. @@ -513,28 +441,19 @@ func (l *Ledger) keepOnlyOneTrie(state ledger.State) error { return l.forest.PurgeCacheExcept(ledger.RootHash(state)) } -func runReport(r ledger.Reporter, p []ledger.Payload, commit ledger.State, l zerolog.Logger) error { - l.Info(). - Str("name", r.Name()). - Msg("starting reporter") - - start := time.Now() - err := r.Report(p, commit) - elapsed := time.Since(start) - - l.Info(). - Str("timeTaken", elapsed.String()). - Str("name", r.Name()). - Msg("reporter done") +// FindTrieByStateCommit iterates over the ledger tries and compares the root hash to the state commitment +// if a match is found it is returned, otherwise a nil value is returned indicating no match was found +func (l *Ledger) FindTrieByStateCommit(commitment flow.StateCommitment) (*trie.MTrie, error) { + tries, err := l.Tries() if err != nil { - return fmt.Errorf("error running reporter (%s): %w", r.Name(), err) + return nil, err + } + + for _, t := range tries { + if t.RootHash().Equals(ledger.RootHash(commitment)) { + return t, nil + } } - return nil -} -func writeStatusFile(fileName string, e error) error { - checkpointStatus := map[string]bool{"succeeded": e == nil} - checkpointStatusJson, _ := json.MarshalIndent(checkpointStatus, "", " ") - err := os.WriteFile(fileName, checkpointStatusJson, 0644) - return err + return nil, nil } diff --git a/ledger/complete/ledger_benchmark_test.go b/ledger/complete/ledger_benchmark_test.go index 6c0855be914..a97257ac2a6 100644 --- a/ledger/complete/ledger_benchmark_test.go +++ b/ledger/complete/ledger_benchmark_test.go @@ -47,7 +47,7 @@ func benchmarkStorage(steps int, b *testing.B) { led, err := complete.NewLedger(diskWal, steps+1, &metrics.NoopCollector{}, zerolog.Logger{}, complete.DefaultPathFinderVersion) require.NoError(b, err) - compactor, err := complete.NewCompactor(led, diskWal, zerolog.Nop(), uint(steps+1), checkpointDistance, checkpointsToKeep, atomic.NewBool(false)) + compactor, err := complete.NewCompactor(led, diskWal, zerolog.Nop(), uint(steps+1), checkpointDistance, checkpointsToKeep, atomic.NewBool(false), metrics.NewNoopCollector()) require.NoError(b, err) <-compactor.Ready() @@ -160,7 +160,7 @@ func BenchmarkTrieUpdate(b *testing.B) { led, err := complete.NewLedger(diskWal, capacity, &metrics.NoopCollector{}, zerolog.Logger{}, complete.DefaultPathFinderVersion) require.NoError(b, err) - compactor, err := complete.NewCompactor(led, diskWal, zerolog.Nop(), capacity, checkpointDistance, checkpointsToKeep, atomic.NewBool(false)) + compactor, err := complete.NewCompactor(led, diskWal, zerolog.Nop(), capacity, checkpointDistance, checkpointsToKeep, atomic.NewBool(false), metrics.NewNoopCollector()) require.NoError(b, err) <-compactor.Ready() @@ -212,7 +212,7 @@ func BenchmarkTrieRead(b *testing.B) { led, err := complete.NewLedger(diskWal, capacity, &metrics.NoopCollector{}, zerolog.Logger{}, complete.DefaultPathFinderVersion) require.NoError(b, err) - compactor, err := complete.NewCompactor(led, diskWal, zerolog.Nop(), capacity, checkpointDistance, checkpointsToKeep, atomic.NewBool(false)) + compactor, err := complete.NewCompactor(led, diskWal, zerolog.Nop(), capacity, checkpointDistance, checkpointsToKeep, atomic.NewBool(false), metrics.NewNoopCollector()) require.NoError(b, err) <-compactor.Ready() @@ -273,7 +273,7 @@ func BenchmarkLedgerGetOneValue(b *testing.B) { led, err := complete.NewLedger(diskWal, capacity, &metrics.NoopCollector{}, zerolog.Logger{}, complete.DefaultPathFinderVersion) require.NoError(b, err) - compactor, err := complete.NewCompactor(led, diskWal, zerolog.Nop(), capacity, checkpointDistance, checkpointsToKeep, atomic.NewBool(false)) + compactor, err := complete.NewCompactor(led, diskWal, zerolog.Nop(), capacity, checkpointDistance, checkpointsToKeep, atomic.NewBool(false), metrics.NewNoopCollector()) require.NoError(b, err) <-compactor.Ready() @@ -351,7 +351,7 @@ func BenchmarkTrieProve(b *testing.B) { led, err := complete.NewLedger(diskWal, capacity, &metrics.NoopCollector{}, zerolog.Logger{}, complete.DefaultPathFinderVersion) require.NoError(b, err) - compactor, err := complete.NewCompactor(led, diskWal, zerolog.Nop(), capacity, checkpointDistance, checkpointsToKeep, atomic.NewBool(false)) + compactor, err := complete.NewCompactor(led, diskWal, zerolog.Nop(), capacity, checkpointDistance, checkpointsToKeep, atomic.NewBool(false), metrics.NewNoopCollector()) require.NoError(b, err) <-compactor.Ready() diff --git a/ledger/complete/ledger_stats.go b/ledger/complete/ledger_stats.go index 74062d5718a..c706004fc6c 100644 --- a/ledger/complete/ledger_stats.go +++ b/ledger/complete/ledger_stats.go @@ -6,6 +6,7 @@ import ( "github.com/onflow/flow-go/ledger" "github.com/onflow/flow-go/ledger/complete/mtrie/flattener" "github.com/onflow/flow-go/ledger/complete/mtrie/node" + "github.com/onflow/flow-go/ledger/complete/mtrie/trie" ) type LedgerStats struct { @@ -16,14 +17,18 @@ type LedgerStats struct { } func (l *Ledger) CollectStats(payloadCallBack func(payload *ledger.Payload)) (*LedgerStats, error) { - visitedNodes := make(map[*node.Node]uint64) - var interimNodeCounter, leafNodeCounter, totalNodeCounter uint64 - tries, err := l.Tries() if err != nil { return nil, err } + return CollectStats(tries, payloadCallBack) +} + +func CollectStats(tries []*trie.MTrie, payloadCallBack func(payload *ledger.Payload)) (*LedgerStats, error) { + visitedNodes := make(map[*node.Node]uint64) + var interimNodeCounter, leafNodeCounter, totalNodeCounter uint64 + bar := progressbar.Default(int64(len(tries)), "collecting ledger stats") for _, trie := range tries { for itr := flattener.NewUniqueNodeIterator(trie.RootNode(), visitedNodes); itr.Next(); { @@ -38,7 +43,7 @@ func (l *Ledger) CollectStats(payloadCallBack func(payload *ledger.Payload)) (*L visitedNodes[n] = totalNodeCounter totalNodeCounter++ } - if err = bar.Add(1); err != nil { + if err := bar.Add(1); err != nil { return nil, err } } diff --git a/ledger/complete/ledger_test.go b/ledger/complete/ledger_test.go index a723d2a58f1..d7021516440 100644 --- a/ledger/complete/ledger_test.go +++ b/ledger/complete/ledger_test.go @@ -2,7 +2,6 @@ package complete_test import ( "bytes" - "errors" "fmt" "math" "math/rand" @@ -53,8 +52,9 @@ func TestLedger_Update(t *testing.T) { up, err := ledger.NewEmptyUpdate(currentState) require.NoError(t, err) - newState, _, err := l.Set(up) + newState, trieUpdate, err := l.Set(up) require.NoError(t, err) + require.True(t, trieUpdate.IsEmpty()) // state shouldn't change assert.Equal(t, currentState, newState) @@ -513,7 +513,7 @@ func Test_WAL(t *testing.T) { led, err := complete.NewLedger(diskWal, size, metricsCollector, logger, complete.DefaultPathFinderVersion) require.NoError(t, err) - compactor, err := complete.NewCompactor(led, diskWal, zerolog.Nop(), size, checkpointDistance, checkpointsToKeep, atomic.NewBool(false)) + compactor, err := complete.NewCompactor(led, diskWal, zerolog.Nop(), size, checkpointDistance, checkpointsToKeep, atomic.NewBool(false), metrics.NewNoopCollector()) require.NoError(t, err) <-compactor.Ready() @@ -550,7 +550,7 @@ func Test_WAL(t *testing.T) { led2, err := complete.NewLedger(diskWal2, size+10, metricsCollector, logger, complete.DefaultPathFinderVersion) require.NoError(t, err) - compactor2, err := complete.NewCompactor(led2, diskWal2, zerolog.Nop(), uint(size), checkpointDistance, checkpointsToKeep, atomic.NewBool(false)) + compactor2, err := complete.NewCompactor(led2, diskWal2, zerolog.Nop(), uint(size), checkpointDistance, checkpointsToKeep, atomic.NewBool(false), metrics.NewNoopCollector()) require.NoError(t, err) <-compactor2.Ready() @@ -612,7 +612,7 @@ func TestLedgerFunctionality(t *testing.T) { require.NoError(t, err) led, err := complete.NewLedger(diskWal, activeTries, metricsCollector, logger, complete.DefaultPathFinderVersion) assert.NoError(t, err) - compactor, err := complete.NewCompactor(led, diskWal, zerolog.Nop(), uint(activeTries), checkpointDistance, checkpointsToKeep, atomic.NewBool(false)) + compactor, err := complete.NewCompactor(led, diskWal, zerolog.Nop(), uint(activeTries), checkpointDistance, checkpointsToKeep, atomic.NewBool(false), metrics.NewNoopCollector()) require.NoError(t, err) <-compactor.Ready() @@ -703,180 +703,6 @@ func TestLedgerFunctionality(t *testing.T) { } } -func Test_ExportCheckpointAt(t *testing.T) { - t.Run("noop migration", func(t *testing.T) { - // the exported state has two key/value pairs - // (/1/1/22/2, "A") and (/1/3/22/4, "B") - // this tests the migration at the specific state - // without any special migration so we expect both - // register to show up in the new trie and with the same values - unittest.RunWithTempDir(t, func(dbDir string) { - unittest.RunWithTempDir(t, func(dir2 string) { - - const ( - capacity = 100 - checkpointDistance = math.MaxInt // A large number to prevent checkpoint creation. - checkpointsToKeep = 1 - ) - - diskWal, err := wal.NewDiskWAL(zerolog.Nop(), nil, metrics.NewNoopCollector(), dbDir, capacity, pathfinder.PathByteSize, wal.SegmentSize) - require.NoError(t, err) - led, err := complete.NewLedger(diskWal, capacity, &metrics.NoopCollector{}, zerolog.Logger{}, complete.DefaultPathFinderVersion) - require.NoError(t, err) - compactor, err := complete.NewCompactor(led, diskWal, zerolog.Nop(), capacity, checkpointDistance, checkpointsToKeep, atomic.NewBool(false)) - require.NoError(t, err) - <-compactor.Ready() - - state := led.InitialState() - u := testutils.UpdateFixture() - u.SetState(state) - - state, _, err = led.Set(u) - require.NoError(t, err) - - newState, err := led.ExportCheckpointAt(state, []ledger.Migration{noOpMigration}, []ledger.Reporter{}, []ledger.Reporter{}, complete.DefaultPathFinderVersion, dir2, "root.checkpoint") - require.NoError(t, err) - assert.Equal(t, newState, state) - - diskWal2, err := wal.NewDiskWAL(zerolog.Nop(), nil, metrics.NewNoopCollector(), dir2, capacity, pathfinder.PathByteSize, wal.SegmentSize) - require.NoError(t, err) - led2, err := complete.NewLedger(diskWal2, capacity, &metrics.NoopCollector{}, zerolog.Logger{}, complete.DefaultPathFinderVersion) - require.NoError(t, err) - compactor2, err := complete.NewCompactor(led2, diskWal2, zerolog.Nop(), capacity, checkpointDistance, checkpointsToKeep, atomic.NewBool(false)) - require.NoError(t, err) - <-compactor2.Ready() - - q, err := ledger.NewQuery(state, u.Keys()) - require.NoError(t, err) - - retValues, err := led2.Get(q) - require.NoError(t, err) - - for i, v := range u.Values() { - assert.Equal(t, v, retValues[i]) - } - - <-led.Done() - <-compactor.Done() - <-led2.Done() - <-compactor2.Done() - }) - }) - }) - t.Run("migration by value", func(t *testing.T) { - // the exported state has two key/value pairs - // ("/1/1/22/2", "A") and ("/1/3/22/4", "B") - // during the migration we change all keys with value "A" to "C" - // so in this case the resulting exported trie is ("/1/1/22/2", "C"), ("/1/3/22/4", "B") - unittest.RunWithTempDir(t, func(dbDir string) { - unittest.RunWithTempDir(t, func(dir2 string) { - - const ( - capacity = 100 - checkpointDistance = math.MaxInt // A large number to prevent checkpoint creation. - checkpointsToKeep = 1 - ) - - diskWal, err := wal.NewDiskWAL(zerolog.Nop(), nil, metrics.NewNoopCollector(), dbDir, capacity, pathfinder.PathByteSize, wal.SegmentSize) - require.NoError(t, err) - led, err := complete.NewLedger(diskWal, capacity, &metrics.NoopCollector{}, zerolog.Logger{}, complete.DefaultPathFinderVersion) - require.NoError(t, err) - compactor, err := complete.NewCompactor(led, diskWal, zerolog.Nop(), capacity, checkpointDistance, checkpointsToKeep, atomic.NewBool(false)) - require.NoError(t, err) - <-compactor.Ready() - - state := led.InitialState() - u := testutils.UpdateFixture() - u.SetState(state) - - state, _, err = led.Set(u) - require.NoError(t, err) - - newState, err := led.ExportCheckpointAt(state, []ledger.Migration{migrationByValue}, []ledger.Reporter{}, []ledger.Reporter{}, complete.DefaultPathFinderVersion, dir2, "root.checkpoint") - require.NoError(t, err) - - diskWal2, err := wal.NewDiskWAL(zerolog.Nop(), nil, metrics.NewNoopCollector(), dir2, capacity, pathfinder.PathByteSize, wal.SegmentSize) - require.NoError(t, err) - led2, err := complete.NewLedger(diskWal2, capacity, &metrics.NoopCollector{}, zerolog.Logger{}, complete.DefaultPathFinderVersion) - require.NoError(t, err) - compactor2, err := complete.NewCompactor(led2, diskWal2, zerolog.Nop(), capacity, checkpointDistance, checkpointsToKeep, atomic.NewBool(false)) - require.NoError(t, err) - <-compactor2.Ready() - - q, err := ledger.NewQuery(newState, u.Keys()) - require.NoError(t, err) - - retValues, err := led2.Get(q) - require.NoError(t, err) - - assert.Equal(t, retValues[0], ledger.Value([]byte{'C'})) - assert.Equal(t, retValues[1], ledger.Value([]byte{'B'})) - - <-led.Done() - <-compactor.Done() - <-led2.Done() - <-compactor2.Done() - }) - }) - }) - t.Run("migration by key", func(t *testing.T) { - // the exported state has two key/value pairs - // ("/1/1/22/2", "A") and ("/1/3/22/4", "B") - // during the migration we change the value to "D" for key "zero" - // so in this case the resulting exported trie is ("/1/1/22/2", "D"), ("/1/3/22/4", "B") - unittest.RunWithTempDir(t, func(dbDir string) { - unittest.RunWithTempDir(t, func(dir2 string) { - - const ( - capacity = 100 - checkpointDistance = math.MaxInt // A large number to prevent checkpoint creation. - checkpointsToKeep = 1 - ) - - diskWal, err := wal.NewDiskWAL(zerolog.Nop(), nil, metrics.NewNoopCollector(), dbDir, capacity, pathfinder.PathByteSize, wal.SegmentSize) - require.NoError(t, err) - led, err := complete.NewLedger(diskWal, capacity, &metrics.NoopCollector{}, zerolog.Logger{}, complete.DefaultPathFinderVersion) - require.NoError(t, err) - compactor, err := complete.NewCompactor(led, diskWal, zerolog.Nop(), capacity, checkpointDistance, checkpointsToKeep, atomic.NewBool(false)) - require.NoError(t, err) - <-compactor.Ready() - - state := led.InitialState() - u := testutils.UpdateFixture() - u.SetState(state) - - state, _, err = led.Set(u) - require.NoError(t, err) - - newState, err := led.ExportCheckpointAt(state, []ledger.Migration{migrationByKey}, []ledger.Reporter{}, []ledger.Reporter{}, complete.DefaultPathFinderVersion, dir2, "root.checkpoint") - require.NoError(t, err) - - diskWal2, err := wal.NewDiskWAL(zerolog.Nop(), nil, metrics.NewNoopCollector(), dir2, capacity, pathfinder.PathByteSize, wal.SegmentSize) - require.NoError(t, err) - led2, err := complete.NewLedger(diskWal2, capacity, &metrics.NoopCollector{}, zerolog.Logger{}, complete.DefaultPathFinderVersion) - require.NoError(t, err) - compactor2, err := complete.NewCompactor(led2, diskWal2, zerolog.Nop(), capacity, checkpointDistance, checkpointsToKeep, atomic.NewBool(false)) - require.NoError(t, err) - <-compactor2.Ready() - - q, err := ledger.NewQuery(newState, u.Keys()) - require.NoError(t, err) - - retValues, err := led2.Get(q) - require.NoError(t, err) - - assert.Equal(t, retValues[0], ledger.Value([]byte{'D'})) - assert.Equal(t, retValues[1], ledger.Value([]byte{'B'})) - - <-led.Done() - <-compactor.Done() - <-led2.Done() - <-compactor2.Done() - }) - }) - }) -} - func TestWALUpdateFailuresBubbleUp(t *testing.T) { unittest.RunWithTempDir(t, func(dir string) { @@ -903,7 +729,7 @@ func TestWALUpdateFailuresBubbleUp(t *testing.T) { led, err := complete.NewLedger(w, capacity, &metrics.NoopCollector{}, zerolog.Logger{}, complete.DefaultPathFinderVersion) require.NoError(t, err) - compactor, err := complete.NewCompactor(led, w, zerolog.Nop(), capacity, checkpointDistance, checkpointsToKeep, atomic.NewBool(false)) + compactor, err := complete.NewCompactor(led, w, zerolog.Nop(), capacity, checkpointDistance, checkpointsToKeep, atomic.NewBool(false), metrics.NewNoopCollector()) require.NoError(t, err) <-compactor.Ready() @@ -920,8 +746,7 @@ func TestWALUpdateFailuresBubbleUp(t *testing.T) { require.NoError(t, err) _, _, err = led.Set(update) - require.Error(t, err) - require.True(t, errors.Is(err, theError)) + require.ErrorIs(t, err, theError) }) } diff --git a/ledger/complete/mtrie/flattener/encoding.go b/ledger/complete/mtrie/flattener/encoding.go index f82c921850e..4fedb0c14c1 100644 --- a/ledger/complete/mtrie/flattener/encoding.go +++ b/ledger/complete/mtrie/flattener/encoding.go @@ -29,6 +29,7 @@ const ( encPayloadLengthSize = 4 encodedTrieSize = encNodeIndexSize + encRegCountSize + encRegSizeSize + encHashSize + EncodedTrieSize = encodedTrieSize ) const payloadEncodingVersion = 1 @@ -268,6 +269,13 @@ func ReadNode(reader io.Reader, scratch []byte, getNode func(nodeIndex uint64) ( return n, nil } +type EncodedTrie struct { + RootIndex uint64 + RegCount uint64 + RegSize uint64 + RootHash hash.Hash +} + // EncodeTrie encodes trie in the following format: // - root node index (8 byte) // - allocated reg count (8 byte) @@ -305,9 +313,7 @@ func EncodeTrie(trie *trie.MTrie, rootIndex uint64, scratch []byte) []byte { return buf[:pos] } -// ReadTrie reconstructs a trie from data read from reader. -func ReadTrie(reader io.Reader, scratch []byte, getNode func(nodeIndex uint64) (*node.Node, error)) (*trie.MTrie, error) { - +func ReadEncodedTrie(reader io.Reader, scratch []byte) (EncodedTrie, error) { if len(scratch) < encodedTrieSize { scratch = make([]byte, encodedTrieSize) } @@ -315,7 +321,7 @@ func ReadTrie(reader io.Reader, scratch []byte, getNode func(nodeIndex uint64) ( // Read encoded trie _, err := io.ReadFull(reader, scratch[:encodedTrieSize]) if err != nil { - return nil, fmt.Errorf("failed to read serialized trie: %w", err) + return EncodedTrie{}, fmt.Errorf("failed to read serialized trie: %w", err) } pos := 0 @@ -335,21 +341,36 @@ func ReadTrie(reader io.Reader, scratch []byte, getNode func(nodeIndex uint64) ( // Decode root node hash readRootHash, err := hash.ToHash(scratch[pos : pos+encHashSize]) if err != nil { - return nil, fmt.Errorf("failed to decode hash of serialized trie: %w", err) + return EncodedTrie{}, fmt.Errorf("failed to decode hash of serialized trie: %w", err) + } + + return EncodedTrie{ + RootIndex: rootIndex, + RegCount: regCount, + RegSize: regSize, + RootHash: readRootHash, + }, nil +} + +// ReadTrie reconstructs a trie from data read from reader. +func ReadTrie(reader io.Reader, scratch []byte, getNode func(nodeIndex uint64) (*node.Node, error)) (*trie.MTrie, error) { + encodedTrie, err := ReadEncodedTrie(reader, scratch) + if err != nil { + return nil, err } - rootNode, err := getNode(rootIndex) + rootNode, err := getNode(encodedTrie.RootIndex) if err != nil { return nil, fmt.Errorf("failed to find root node of serialized trie: %w", err) } - mtrie, err := trie.NewMTrie(rootNode, regCount, regSize) + mtrie, err := trie.NewMTrie(rootNode, encodedTrie.RegCount, encodedTrie.RegSize) if err != nil { return nil, fmt.Errorf("failed to restore serialized trie: %w", err) } rootHash := mtrie.RootHash() - if !rootHash.Equals(ledger.RootHash(readRootHash)) { + if !rootHash.Equals(ledger.RootHash(encodedTrie.RootHash)) { return nil, fmt.Errorf("failed to restore serialized trie: roothash doesn't match") } diff --git a/ledger/complete/mtrie/node/node.go b/ledger/complete/mtrie/node/node.go index 94d713cc0d3..bd1d6b08140 100644 --- a/ledger/complete/mtrie/node/node.go +++ b/ledger/complete/mtrie/node/node.go @@ -211,18 +211,18 @@ func (n *Node) Path() *ledger.Path { return nil } -// Payload returns the the Node's payload. +// Payload returns the Node's payload. // Do NOT MODIFY returned slices! func (n *Node) Payload() *ledger.Payload { return n.payload } -// LeftChild returns the the Node's left child. +// LeftChild returns the Node's left child. // Only INTERIM nodes have children. // Do NOT MODIFY returned Node! func (n *Node) LeftChild() *Node { return n.lChild } -// RightChild returns the the Node's right child. +// RightChild returns the Node's right child. // Only INTERIM nodes have children. // Do NOT MODIFY returned Node! func (n *Node) RightChild() *Node { return n.rChild } @@ -253,18 +253,18 @@ func (n *Node) FmtStr(prefix string, subpath string) string { } // AllPayloads returns the payload of this node and all payloads of the subtrie -func (n *Node) AllPayloads() []ledger.Payload { - return n.appendSubtreePayloads([]ledger.Payload{}) +func (n *Node) AllPayloads() []*ledger.Payload { + return n.appendSubtreePayloads([]*ledger.Payload{}) } // appendSubtreePayloads appends the payloads of the subtree with this node as root // to the provided Payload slice. Follows same pattern as Go's native append method. -func (n *Node) appendSubtreePayloads(result []ledger.Payload) []ledger.Payload { +func (n *Node) appendSubtreePayloads(result []*ledger.Payload) []*ledger.Payload { if n == nil { return result } if n.IsLeaf() { - return append(result, *n.Payload()) + return append(result, n.Payload()) } result = n.lChild.appendSubtreePayloads(result) result = n.rChild.appendSubtreePayloads(result) diff --git a/ledger/complete/mtrie/trie/trie.go b/ledger/complete/mtrie/trie/trie.go index b2ec3106f5d..064e7f157e3 100644 --- a/ledger/complete/mtrie/trie/trie.go +++ b/ledger/complete/mtrie/trie/trie.go @@ -78,7 +78,7 @@ func (mt *MTrie) AllocatedRegCount() uint64 { return mt.regCount } -// AllocatedRegSize returns the size of allocated registers in the trie. +// AllocatedRegSize returns the size (number of bytes) of allocated registers in the trie. // Concurrency safe (as Tries are immutable structures by convention) func (mt *MTrie) AllocatedRegSize() uint64 { return mt.regSize @@ -748,7 +748,7 @@ func EmptyTrieRootHash() ledger.RootHash { } // AllPayloads returns all payloads -func (mt *MTrie) AllPayloads() []ledger.Payload { +func (mt *MTrie) AllPayloads() []*ledger.Payload { return mt.root.AllPayloads() } @@ -831,3 +831,31 @@ func minInt(a, b int) int { } return b } + +// TraverseNodes traverses all nodes of the trie in DFS order +func TraverseNodes(trie *MTrie, processNode func(*node.Node) error) error { + return traverseRecursive(trie.root, processNode) +} + +func traverseRecursive(n *node.Node, processNode func(*node.Node) error) error { + if n == nil { + return nil + } + + err := processNode(n) + if err != nil { + return err + } + + err = traverseRecursive(n.LeftChild(), processNode) + if err != nil { + return err + } + + err = traverseRecursive(n.RightChild(), processNode) + if err != nil { + return err + } + + return nil +} diff --git a/ledger/complete/wal/checkpoint_v5_test.go b/ledger/complete/wal/checkpoint_v5_test.go index 9721a50d04e..4422d3376c0 100644 --- a/ledger/complete/wal/checkpoint_v5_test.go +++ b/ledger/complete/wal/checkpoint_v5_test.go @@ -15,12 +15,12 @@ func TestCopyCheckpointFileV5(t *testing.T) { tries := createSimpleTrie(t) fileName := "checkpoint" logger := unittest.Logger() - require.NoErrorf(t, StoreCheckpointV5(dir, fileName, &logger, tries...), "fail to store checkpoint") + require.NoErrorf(t, StoreCheckpointV5(dir, fileName, logger, tries...), "fail to store checkpoint") to := filepath.Join(dir, "newfolder") newPaths, err := CopyCheckpointFile(fileName, dir, to) require.NoError(t, err) log.Info().Msgf("copied to :%v", newPaths) - decoded, err := LoadCheckpoint(filepath.Join(to, fileName), &logger) + decoded, err := LoadCheckpoint(filepath.Join(to, fileName), logger) require.NoErrorf(t, err, "fail to read checkpoint %v/%v", dir, fileName) requireTriesEqual(t, tries, decoded) }) diff --git a/ledger/complete/wal/checkpoint_v6_leaf_reader.go b/ledger/complete/wal/checkpoint_v6_leaf_reader.go index 77dbc0716b5..8505b37ef39 100644 --- a/ledger/complete/wal/checkpoint_v6_leaf_reader.go +++ b/ledger/complete/wal/checkpoint_v6_leaf_reader.go @@ -29,12 +29,24 @@ func nodeToLeaf(leaf *node.Node) *LeafNode { // OpenAndReadLeafNodesFromCheckpointV6 takes a channel for pushing the leaf nodes that are read from // the given checkpoint file specified by dir and fileName. // It returns when finish reading the checkpoint file and the input channel can be closed. -func OpenAndReadLeafNodesFromCheckpointV6(allLeafNodesCh chan<- *LeafNode, dir string, fileName string, logger *zerolog.Logger) (errToReturn error) { +// It requires the checkpoint file only has one trie. +func OpenAndReadLeafNodesFromCheckpointV6( + allLeafNodesCh chan<- *LeafNode, + dir string, + fileName string, + expectedRootHash ledger.RootHash, + logger zerolog.Logger) ( + errToReturn error) { // we are the only sender of the channel, closing it after done defer func() { close(allLeafNodesCh) }() + err := checkpointHasSingleRootHash(logger, dir, fileName, expectedRootHash) + if err != nil { + return fmt.Errorf("fail to check checkpoint has single root hash: %w", err) + } + filepath := filePathCheckpointHeader(dir, fileName) f, err := os.Open(filepath) @@ -68,7 +80,7 @@ func OpenAndReadLeafNodesFromCheckpointV6(allLeafNodesCh chan<- *LeafNode, dir s return nil } -func readCheckpointSubTrieLeafNodes(leafNodesCh chan<- *LeafNode, dir string, fileName string, index int, checksum uint32, logger *zerolog.Logger) error { +func readCheckpointSubTrieLeafNodes(leafNodesCh chan<- *LeafNode, dir string, fileName string, index int, checksum uint32, logger zerolog.Logger) error { return processCheckpointSubTrie(dir, fileName, index, checksum, logger, func(reader *Crc32Reader, nodesCount uint64) error { scratch := make([]byte, 1024*4) // must not be less than 1024 diff --git a/ledger/complete/wal/checkpoint_v6_reader.go b/ledger/complete/wal/checkpoint_v6_reader.go index 98a9b2f4b77..88b8df09c18 100644 --- a/ledger/complete/wal/checkpoint_v6_reader.go +++ b/ledger/complete/wal/checkpoint_v6_reader.go @@ -11,6 +11,7 @@ import ( "github.com/rs/zerolog" + "github.com/onflow/flow-go/ledger" "github.com/onflow/flow-go/ledger/complete/mtrie/flattener" "github.com/onflow/flow-go/ledger/complete/mtrie/node" "github.com/onflow/flow-go/ledger/complete/mtrie/trie" @@ -19,6 +20,20 @@ import ( // ErrEOFNotReached for indicating end of file not reached error var ErrEOFNotReached = errors.New("expect to reach EOF, but actually didn't") +func ReadTriesRootHash(logger zerolog.Logger, dir string, fileName string) ( + []ledger.RootHash, + error, +) { + err := validateCheckpointFile(logger, dir, fileName) + if err != nil { + return nil, err + } + return readTriesRootHash(logger, dir, fileName) +} + +var CheckpointHasRootHash = checkpointHasRootHash +var CheckpointHasSingleRootHash = checkpointHasSingleRootHash + // readCheckpointV6 reads checkpoint file from a main file and 17 file parts. // the main file stores: // - version @@ -31,7 +46,7 @@ var ErrEOFNotReached = errors.New("expect to reach EOF, but actually didn't") // it returns (nil, os.ErrNotExist) if a certain file is missing, use (os.IsNotExist to check) // it returns (nil, ErrEOFNotReached) if a certain part file is malformed // it returns (nil, err) if running into any exception -func readCheckpointV6(headerFile *os.File, logger *zerolog.Logger) ([]*trie.MTrie, error) { +func readCheckpointV6(headerFile *os.File, logger zerolog.Logger) ([]*trie.MTrie, error) { // the full path of header file headerPath := headerFile.Name() dir, fileName := filepath.Split(headerPath) @@ -53,7 +68,7 @@ func readCheckpointV6(headerFile *os.File, logger *zerolog.Logger) ([]*trie.MTri // TODO making number of goroutine configable for reading subtries, which can help us // test the code on machines that don't have as much RAM as EN by using fewer goroutines. - subtrieNodes, err := readSubTriesConcurrently(dir, fileName, subtrieChecksums, &lg) + subtrieNodes, err := readSubTriesConcurrently(dir, fileName, subtrieChecksums, lg) if err != nil { return nil, fmt.Errorf("could not read subtrie from dir: %w", err) } @@ -61,7 +76,7 @@ func readCheckpointV6(headerFile *os.File, logger *zerolog.Logger) ([]*trie.MTri lg.Info().Uint32("topsum", topTrieChecksum). Msg("finish reading all v6 subtrie files, start reading top level tries") - tries, err := readTopLevelTries(dir, fileName, subtrieNodes, topTrieChecksum, &lg) + tries, err := readTopLevelTries(dir, fileName, subtrieNodes, topTrieChecksum, lg) if err != nil { return nil, fmt.Errorf("could not read top level nodes or tries: %w", err) } @@ -83,21 +98,50 @@ func readCheckpointV6(headerFile *os.File, logger *zerolog.Logger) ([]*trie.MTri } // OpenAndReadCheckpointV6 open the checkpoint file and read it with readCheckpointV6 -func OpenAndReadCheckpointV6(dir string, fileName string, logger *zerolog.Logger) ( - tries []*trie.MTrie, +func OpenAndReadCheckpointV6(dir string, fileName string, logger zerolog.Logger) ( + triesToReturn []*trie.MTrie, errToReturn error, ) { + filepath := filePathCheckpointHeader(dir, fileName) + errToReturn = withFile(logger, filepath, func(file *os.File) error { + tries, err := readCheckpointV6(file, logger) + if err != nil { + return err + } + triesToReturn = tries + return nil + }) - f, err := os.Open(filepath) - if err != nil { - return nil, fmt.Errorf("could not open file %v: %w", filepath, err) + return triesToReturn, errToReturn +} + +// ReadCheckpointFileSize returns the total size of the checkpoint file +func ReadCheckpointFileSize(dir string, fileName string) (uint64, error) { + paths := allFilePaths(dir, fileName) + totalSize := uint64(0) + for _, path := range paths { + fileInfo, err := os.Stat(path) + if err != nil { + return 0, fmt.Errorf("could not get file info for %v: %w", path, err) + } + + totalSize += uint64(fileInfo.Size()) } - defer func(file *os.File) { - errToReturn = closeAndMergeError(file, errToReturn) - }(f) - return readCheckpointV6(f, logger) + return totalSize, nil +} + +func allFilePaths(dir string, fileName string) []string { + paths := make([]string, 0, 1+subtrieCount+1) + paths = append(paths, filePathCheckpointHeader(dir, fileName)) + for i := 0; i < subtrieCount; i++ { + subTriePath, _, _ := filePathSubTries(dir, fileName, i) + paths = append(paths, subTriePath) + } + topTriePath, _ := filePathTopTries(dir, fileName) + paths = append(paths, topTriePath) + return paths } func filePathCheckpointHeader(dir string, fileName string) string { @@ -127,7 +171,7 @@ func filePathPattern(dir string, fileName string) string { // readCheckpointHeader takes a file path and returns subtrieChecksums and topTrieChecksum // any error returned are exceptions -func readCheckpointHeader(filepath string, logger *zerolog.Logger) ( +func readCheckpointHeader(filepath string, logger zerolog.Logger) ( checksumsOfSubtries []uint32, checksumOfTopTrie uint32, errToReturn error, @@ -278,7 +322,7 @@ type resultReadSubTrie struct { Err error } -func readSubTriesConcurrently(dir string, fileName string, subtrieChecksums []uint32, logger *zerolog.Logger) ([][]*node.Node, error) { +func readSubTriesConcurrently(dir string, fileName string, subtrieChecksums []uint32, logger zerolog.Logger) ([][]*node.Node, error) { numOfSubTries := len(subtrieChecksums) jobs := make(chan jobReadSubtrie, numOfSubTries) @@ -325,7 +369,7 @@ func readSubTriesConcurrently(dir string, fileName string, subtrieChecksums []ui return nodesGroups, nil } -func readCheckpointSubTrie(dir string, fileName string, index int, checksum uint32, logger *zerolog.Logger) ( +func readCheckpointSubTrie(dir string, fileName string, index int, checksum uint32, logger zerolog.Logger) ( []*node.Node, error, ) { @@ -372,93 +416,81 @@ func processCheckpointSubTrie( fileName string, index int, checksum uint32, - logger *zerolog.Logger, + logger zerolog.Logger, processNode func(*Crc32Reader, uint64) error, -) ( - errToReturn error, -) { +) error { + filepath, _, err := filePathSubTries(dir, fileName, index) if err != nil { return err } - f, err := os.Open(filepath) - if err != nil { - return fmt.Errorf("could not open file %v: %w", filepath, err) - } - defer func(file *os.File) { - evictErr := evictFileFromLinuxPageCache(file, false, logger) - if evictErr != nil { - logger.Warn().Msgf("failed to evict subtrie file %s from Linux page cache: %s", filepath, evictErr) - // No need to return this error because it's possible to continue normal operations. + return withFile(logger, filepath, func(f *os.File) error { + // valite the magic bytes and version + err := validateFileHeader(MagicBytesCheckpointSubtrie, VersionV6, f) + if err != nil { + return err } - errToReturn = closeAndMergeError(file, errToReturn) - }(f) - // valite the magic bytes and version - err = validateFileHeader(MagicBytesCheckpointSubtrie, VersionV6, f) - if err != nil { - return err - } - - nodesCount, expectedSum, err := readSubTriesFooter(f) - if err != nil { - return fmt.Errorf("cannot read sub trie node count: %w", err) - } + nodesCount, expectedSum, err := readSubTriesFooter(f) + if err != nil { + return fmt.Errorf("cannot read sub trie node count: %w", err) + } - if checksum != expectedSum { - return fmt.Errorf("mismatch checksum in subtrie file. checksum from checkpoint header %v does not "+ - "match with the checksum in subtrie file %v", checksum, expectedSum) - } + if checksum != expectedSum { + return fmt.Errorf("mismatch checksum in subtrie file. checksum from checkpoint header %v does not "+ + "match with the checksum in subtrie file %v", checksum, expectedSum) + } - // restart from the beginning of the file, make sure Crc32Reader has seen all the bytes - // in order to compute the correct checksum - _, err = f.Seek(0, io.SeekStart) - if err != nil { - return fmt.Errorf("cannot seek to start of file: %w", err) - } + // restart from the beginning of the file, make sure Crc32Reader has seen all the bytes + // in order to compute the correct checksum + _, err = f.Seek(0, io.SeekStart) + if err != nil { + return fmt.Errorf("cannot seek to start of file: %w", err) + } - reader := NewCRC32Reader(bufio.NewReaderSize(f, defaultBufioReadSize)) + reader := NewCRC32Reader(bufio.NewReaderSize(f, defaultBufioReadSize)) - // read version again for calculating checksum - _, _, err = readFileHeader(reader) - if err != nil { - return fmt.Errorf("could not read version again for subtrie: %w", err) - } + // read version again for calculating checksum + _, _, err = readFileHeader(reader) + if err != nil { + return fmt.Errorf("could not read version again for subtrie: %w", err) + } - // read file part index and verify + // read file part index and verify - err = processNode(reader, nodesCount) - if err != nil { - return err - } + err = processNode(reader, nodesCount) + if err != nil { + return err + } - scratch := make([]byte, 1024) - // read footer and discard, since we only care about checksum - _, err = io.ReadFull(reader, scratch[:encNodeCountSize]) - if err != nil { - return fmt.Errorf("cannot read footer: %w", err) - } + scratch := make([]byte, 1024) + // read footer and discard, since we only care about checksum + _, err = io.ReadFull(reader, scratch[:encNodeCountSize]) + if err != nil { + return fmt.Errorf("cannot read footer: %w", err) + } - // calculate the actual checksum - actualSum := reader.Crc32() + // calculate the actual checksum + actualSum := reader.Crc32() - if actualSum != expectedSum { - return fmt.Errorf("invalid checksum in subtrie checkpoint, expected %v, actual %v", - expectedSum, actualSum) - } + if actualSum != expectedSum { + return fmt.Errorf("invalid checksum in subtrie checkpoint, expected %v, actual %v", + expectedSum, actualSum) + } - // read the checksum and discard, since we only care about whether ensureReachedEOF - _, err = io.ReadFull(reader, scratch[:crc32SumSize]) - if err != nil { - return fmt.Errorf("could not read subtrie file's checksum: %w", err) - } + // read the checksum and discard, since we only care about whether ensureReachedEOF + _, err = io.ReadFull(reader, scratch[:crc32SumSize]) + if err != nil { + return fmt.Errorf("could not read subtrie file's checksum: %w", err) + } - err = ensureReachedEOF(reader) - if err != nil { - return fmt.Errorf("fail to read %v-th sutrie file: %w", index, err) - } + err = ensureReachedEOF(reader) + if err != nil { + return fmt.Errorf("fail to read %v-th sutrie file: %w", index, err) + } - return nil + return nil + }) } func readSubTriesFooter(f *os.File) (uint64, uint32, error) { @@ -498,137 +530,216 @@ func readSubTriesFooter(f *os.File) (uint64, uint32, error) { // 5. node count // 6. trie count // 7. checksum -func readTopLevelTries(dir string, fileName string, subtrieNodes [][]*node.Node, topTrieChecksum uint32, logger *zerolog.Logger) ( - rootTries []*trie.MTrie, +func readTopLevelTries(dir string, fileName string, subtrieNodes [][]*node.Node, topTrieChecksum uint32, logger zerolog.Logger) ( + rootTriesToReturn []*trie.MTrie, errToReturn error, ) { + filepath, _ := filePathTopTries(dir, fileName) - file, err := os.Open(filepath) - if err != nil { - return nil, fmt.Errorf("could not open file %v: %w", filepath, err) - } - defer func(file *os.File) { - evictErr := evictFileFromLinuxPageCache(file, false, logger) - if evictErr != nil { - logger.Warn().Msgf("failed to evict top trie file %s from Linux page cache: %s", filepath, evictErr) - // No need to return this error because it's possible to continue normal operations. + errToReturn = withFile(logger, filepath, func(file *os.File) error { + // read and validate magic bytes and version + err := validateFileHeader(MagicBytesCheckpointToptrie, VersionV6, file) + if err != nil { + return err } - errToReturn = closeAndMergeError(file, errToReturn) - }(file) - // read and validate magic bytes and version - err = validateFileHeader(MagicBytesCheckpointToptrie, VersionV6, file) - if err != nil { - return nil, err - } + // read subtrie Node count and validate + topLevelNodesCount, triesCount, expectedSum, err := readTopTriesFooter(file) + if err != nil { + return fmt.Errorf("could not read top tries footer: %w", err) + } - // read subtrie Node count and validate - topLevelNodesCount, triesCount, expectedSum, err := readTopTriesFooter(file) - if err != nil { - return nil, fmt.Errorf("could not read top tries footer: %w", err) - } + if topTrieChecksum != expectedSum { + return fmt.Errorf("mismatch top trie checksum, header file has %v, toptrie file has %v", + topTrieChecksum, expectedSum) + } - if topTrieChecksum != expectedSum { - return nil, fmt.Errorf("mismatch top trie checksum, header file has %v, toptrie file has %v", - topTrieChecksum, expectedSum) - } + // restart from the beginning of the file, make sure CRC32Reader has seen all the bytes + // in order to compute the correct checksum + _, err = file.Seek(0, io.SeekStart) + if err != nil { + return fmt.Errorf("could not seek to 0: %w", err) + } - // restart from the beginning of the file, make sure CRC32Reader has seen all the bytes - // in order to compute the correct checksum - _, err = file.Seek(0, io.SeekStart) - if err != nil { - return nil, fmt.Errorf("could not seek to 0: %w", err) - } + reader := NewCRC32Reader(bufio.NewReaderSize(file, defaultBufioReadSize)) - reader := NewCRC32Reader(bufio.NewReaderSize(file, defaultBufioReadSize)) + // read version again for calculating checksum + _, _, err = readFileHeader(reader) + if err != nil { + return fmt.Errorf("could not read version for top trie: %w", err) + } - // read version again for calculating checksum - _, _, err = readFileHeader(reader) - if err != nil { - return nil, fmt.Errorf("could not read version for top trie: %w", err) - } + // read subtrie count and validate + buf := make([]byte, encNodeCountSize) + _, err = io.ReadFull(reader, buf) + if err != nil { + return fmt.Errorf("could not read subtrie node count: %w", err) + } + readSubtrieNodeCount, err := decodeNodeCount(buf) + if err != nil { + return fmt.Errorf("could not decode node count: %w", err) + } - // read subtrie count and validate - buf := make([]byte, encNodeCountSize) - _, err = io.ReadFull(reader, buf) - if err != nil { - return nil, fmt.Errorf("could not read subtrie node count: %w", err) - } - readSubtrieNodeCount, err := decodeNodeCount(buf) - if err != nil { - return nil, fmt.Errorf("could not decode node count: %w", err) - } + totalSubTrieNodeCount := computeTotalSubTrieNodeCount(subtrieNodes) - totalSubTrieNodeCount := computeTotalSubTrieNodeCount(subtrieNodes) + if readSubtrieNodeCount != totalSubTrieNodeCount { + return fmt.Errorf("mismatch subtrie node count, read from disk (%v), but got actual node count (%v)", + readSubtrieNodeCount, totalSubTrieNodeCount) + } - if readSubtrieNodeCount != totalSubTrieNodeCount { - return nil, fmt.Errorf("mismatch subtrie node count, read from disk (%v), but got actual node count (%v)", - readSubtrieNodeCount, totalSubTrieNodeCount) - } + topLevelNodes := make([]*node.Node, topLevelNodesCount+1) //+1 for 0 index meaning nil + tries := make([]*trie.MTrie, triesCount) + + // Scratch buffer is used as temporary buffer that reader can read into. + // Raw data in scratch buffer should be copied or converted into desired + // objects before next Read operation. If the scratch buffer isn't large + // enough, a new buffer will be allocated. However, 4096 bytes will + // be large enough to handle almost all payloads and 100% of interim nodes. + scratch := make([]byte, 1024*4) // must not be less than 1024 + + // read the nodes from subtrie level to the root level + for i := uint64(1); i <= topLevelNodesCount; i++ { + node, err := flattener.ReadNode(reader, scratch, func(nodeIndex uint64) (*node.Node, error) { + if nodeIndex >= i+uint64(totalSubTrieNodeCount) { + return nil, fmt.Errorf("sequence of serialized nodes does not satisfy Descendents-First-Relationship") + } - topLevelNodes := make([]*node.Node, topLevelNodesCount+1) //+1 for 0 index meaning nil - tries := make([]*trie.MTrie, triesCount) + return getNodeByIndex(subtrieNodes, totalSubTrieNodeCount, topLevelNodes, nodeIndex) + }) + if err != nil { + return fmt.Errorf("cannot read node at index %d: %w", i, err) + } + + topLevelNodes[i] = node + } - // Scratch buffer is used as temporary buffer that reader can read into. - // Raw data in scratch buffer should be copied or converted into desired - // objects before next Read operation. If the scratch buffer isn't large - // enough, a new buffer will be allocated. However, 4096 bytes will - // be large enough to handle almost all payloads and 100% of interim nodes. - scratch := make([]byte, 1024*4) // must not be less than 1024 + // read the trie root nodes + for i := uint16(0); i < triesCount; i++ { + trie, err := flattener.ReadTrie(reader, scratch, func(nodeIndex uint64) (*node.Node, error) { + return getNodeByIndex(subtrieNodes, totalSubTrieNodeCount, topLevelNodes, nodeIndex) + }) - // read the nodes from subtrie level to the root level - for i := uint64(1); i <= topLevelNodesCount; i++ { - node, err := flattener.ReadNode(reader, scratch, func(nodeIndex uint64) (*node.Node, error) { - if nodeIndex >= i+uint64(totalSubTrieNodeCount) { - return nil, fmt.Errorf("sequence of serialized nodes does not satisfy Descendents-First-Relationship") + if err != nil { + return fmt.Errorf("cannot read root trie at index %d: %w", i, err) } + tries[i] = trie + } - return getNodeByIndex(subtrieNodes, totalSubTrieNodeCount, topLevelNodes, nodeIndex) - }) + // read footer and discard, since we only care about checksum + _, err = io.ReadFull(reader, scratch[:encNodeCountSize+encTrieCountSize]) if err != nil { - return nil, fmt.Errorf("cannot read node at index %d: %w", i, err) + return fmt.Errorf("cannot read footer: %w", err) } - topLevelNodes[i] = node - } + actualSum := reader.Crc32() - // read the trie root nodes - for i := uint16(0); i < triesCount; i++ { - trie, err := flattener.ReadTrie(reader, scratch, func(nodeIndex uint64) (*node.Node, error) { - return getNodeByIndex(subtrieNodes, totalSubTrieNodeCount, topLevelNodes, nodeIndex) - }) + if actualSum != expectedSum { + return fmt.Errorf("invalid checksum in top level trie, expected %v, actual %v", + expectedSum, actualSum) + } + // read the checksum and discard, since we only care about whether ensureReachedEOF + _, err = io.ReadFull(reader, scratch[:crc32SumSize]) if err != nil { - return nil, fmt.Errorf("cannot read root trie at index %d: %w", i, err) + return fmt.Errorf("could not read checksum from top trie file: %w", err) + } + + err = ensureReachedEOF(reader) + if err != nil { + return fmt.Errorf("fail to read top trie file: %w", err) + } + + rootTriesToReturn = tries + return nil + }) + return rootTriesToReturn, errToReturn +} + +func readTriesRootHash(logger zerolog.Logger, dir string, fileName string) ( + trieRootsToReturn []ledger.RootHash, + errToReturn error, +) { + + filepath, _ := filePathTopTries(dir, fileName) + errToReturn = withFile(logger, filepath, func(file *os.File) error { + var err error + + // read and validate magic bytes and version + err = validateFileHeader(MagicBytesCheckpointToptrie, VersionV6, file) + if err != nil { + return err + } + + // read subtrie Node count and validate + _, triesCount, _, err := readTopTriesFooter(file) + if err != nil { + return fmt.Errorf("could not read top tries footer: %w", err) + } + + footerOffset := encNodeCountSize + encTrieCountSize + crc32SumSize + trieRootOffset := footerOffset + flattener.EncodedTrieSize*int(triesCount) + + _, err = file.Seek(int64(-trieRootOffset), io.SeekEnd) + if err != nil { + return fmt.Errorf("could not seek to 0: %w", err) + } + + reader := bufio.NewReaderSize(file, defaultBufioReadSize) + trieRoots := make([]ledger.RootHash, 0, triesCount) + scratch := make([]byte, 1024*4) // must not be less than 1024 + for i := 0; i < int(triesCount); i++ { + trieRootNode, err := flattener.ReadEncodedTrie(reader, scratch) + if err != nil { + return fmt.Errorf("could not read trie root node: %w", err) + } + + trieRoots = append(trieRoots, ledger.RootHash(trieRootNode.RootHash)) } - tries[i] = trie - } - // read footer and discard, since we only care about checksum - _, err = io.ReadFull(reader, scratch[:encNodeCountSize+encTrieCountSize]) + trieRootsToReturn = trieRoots + return nil + }) + return trieRootsToReturn, errToReturn +} + +// checkpointHasRootHash check if the given checkpoint file contains the expected root hash +func checkpointHasRootHash(logger zerolog.Logger, bootstrapDir, filename string, expectedRootHash ledger.RootHash) error { + roots, err := ReadTriesRootHash(logger, bootstrapDir, filename) if err != nil { - return nil, fmt.Errorf("cannot read footer: %w", err) + return fmt.Errorf("could not read checkpoint root hash: %w", err) } - actualSum := reader.Crc32() + if len(roots) == 0 { + return fmt.Errorf("no root hash found in checkpoint file") + } - if actualSum != expectedSum { - return nil, fmt.Errorf("invalid checksum in top level trie, expected %v, actual %v", - expectedSum, actualSum) + for i, root := range roots { + if root == expectedRootHash { + logger.Info().Msgf("found matching checkpoint root hash at index: %v, checkpoint total trie roots: %v", + i, len(roots)) + // found the expected commit + return nil + } } - // read the checksum and discard, since we only care about whether ensureReachedEOF - _, err = io.ReadFull(reader, scratch[:crc32SumSize]) + return fmt.Errorf("could not find expected root hash %v in checkpoint file which contains: %v ", expectedRootHash, roots) +} + +func checkpointHasSingleRootHash(logger zerolog.Logger, bootstrapDir, filename string, expectedRootHash ledger.RootHash) error { + roots, err := ReadTriesRootHash(logger, bootstrapDir, filename) if err != nil { - return nil, fmt.Errorf("could not read checksum from top trie file: %w", err) + return fmt.Errorf("could not read checkpoint root hash: %w", err) } - err = ensureReachedEOF(reader) - if err != nil { - return nil, fmt.Errorf("fail to read top trie file: %w", err) + if len(roots) != 1 { + return fmt.Errorf("expected 1 root hash in checkpoint file, but got %v", len(roots)) } - return tries, nil + if roots[0] != expectedRootHash { + return fmt.Errorf("expected root hash %v, but got %v", expectedRootHash, roots[0]) + } + + return nil } func readFileHeader(reader io.Reader) (uint16, uint16, error) { @@ -647,7 +758,7 @@ func validateFileHeader(expectedMagic uint16, expectedVersion uint16, reader io. } if magic != expectedMagic { - return fmt.Errorf("wrong magic bytes, expect %v, bot got: %v", expectedMagic, magic) + return fmt.Errorf("wrong magic bytes, expect %#x, bot got: %#x", expectedMagic, magic) } if version != expectedVersion { @@ -765,3 +876,58 @@ func ensureReachedEOF(reader io.Reader) error { return fmt.Errorf("fail to check if reached EOF: %w", err) } + +func validateCheckpointFile(logger zerolog.Logger, dir, fileName string) error { + headerPath := filePathCheckpointHeader(dir, fileName) + // validate header file + subtrieChecksums, topTrieChecksum, err := readCheckpointHeader(headerPath, logger) + if err != nil { + return err + } + + // validate subtrie files + for index, expectedSum := range subtrieChecksums { + filepath, _, err := filePathSubTries(dir, fileName, index) + if err != nil { + return err + } + err = withFile(logger, filepath, func(f *os.File) error { + _, checksum, err := readSubTriesFooter(f) + if err != nil { + return fmt.Errorf("cannot read sub trie node count: %w", err) + } + + if checksum != expectedSum { + return fmt.Errorf("mismatch checksum in subtrie file. checksum from checkpoint header %v does not "+ + "match with the checksum in subtrie file %v", checksum, expectedSum) + } + return nil + }) + + if err != nil { + return err + } + } + + // validate top trie file + filepath, _ := filePathTopTries(dir, fileName) + err = withFile(logger, filepath, func(file *os.File) error { + // read subtrie Node count and validate + _, _, checkSum, err := readTopTriesFooter(file) + if err != nil { + return err + } + + if topTrieChecksum != checkSum { + return fmt.Errorf("mismatch top trie checksum, header file has %v, toptrie file has %v", + topTrieChecksum, checkSum) + } + + return nil + }) + if err != nil { + return err + } + + return nil +} diff --git a/ledger/complete/wal/checkpoint_v6_test.go b/ledger/complete/wal/checkpoint_v6_test.go index fb98777e0ec..83bbcb2a4c7 100644 --- a/ledger/complete/wal/checkpoint_v6_test.go +++ b/ledger/complete/wal/checkpoint_v6_test.go @@ -4,6 +4,7 @@ import ( "bufio" "bytes" "crypto/rand" + "encoding/hex" "errors" "fmt" "io" @@ -81,7 +82,7 @@ func createSimpleTrie(t *testing.T) []*trie.MTrie { updatedTrie, _, err := trie.NewTrieWithUpdatedRegisters(emptyTrie, paths, payloads, true) require.NoError(t, err) - tries := []*trie.MTrie{emptyTrie, updatedTrie} + tries := []*trie.MTrie{updatedTrie} return tries } @@ -130,10 +131,29 @@ func createMultipleRandomTries(t *testing.T) []*trie.MTrie { require.NoError(t, err, "update registers") tries = append(tries, activeTrie) + // trie must be deep enough to test the subtrie + if !isTrieDeepEnough(activeTrie) { + // if not deep enough, keep re-trying + return createMultipleRandomTries(t) + } + return tries } -func createMultipleRandomTriesMini(t *testing.T) []*trie.MTrie { +func isTrieDeepEnough(trie *trie.MTrie) bool { + nodes := getNodesAtLevel(trie.RootNode(), subtrieLevel) + for _, n := range nodes { + if n == nil || n.IsLeaf() { + return false + } + } + + return true +} + +// createMultipleRandomTriesMini creates a set of tries with some shared paths, +// the second returned trie is the last trie in the set, and it is guaranteed to be deep enough +func createMultipleRandomTriesMini(t *testing.T) ([]*trie.MTrie, *trie.MTrie) { tries := make([]*trie.MTrie, 0) activeTrie := trie.NewEmptyMTrie() @@ -157,7 +177,13 @@ func createMultipleRandomTriesMini(t *testing.T) []*trie.MTrie { require.NoError(t, err, "update registers") tries = append(tries, activeTrie) - return tries + // trie must be deep enough to test the subtrie + if !isTrieDeepEnough(activeTrie) { + // if not deep enough, keep re-trying + return createMultipleRandomTriesMini(t) + } + + return tries, activeTrie } func TestEncodeSubTrie(t *testing.T) { @@ -170,7 +196,7 @@ func TestEncodeSubTrie(t *testing.T) { for index, roots := range subtrieRoots { unittest.RunWithTempDir(t, func(dir string) { uniqueIndices, nodeCount, checksum, err := storeCheckpointSubTrie( - index, roots, estimatedSubtrieNodeCount, dir, file, &logger) + index, roots, estimatedSubtrieNodeCount, dir, file, logger) require.NoError(t, err) // subtrie roots might have duplciates, that why we group the them, @@ -205,7 +231,7 @@ func TestEncodeSubTrie(t *testing.T) { uniqueIndices, nodeCount, checksum) // all the nodes - nodes, err := readCheckpointSubTrie(dir, file, index, checksum, &logger) + nodes, err := readCheckpointSubTrie(dir, file, index, checksum, logger) require.NoError(t, err) for _, root := range roots { @@ -243,10 +269,10 @@ func TestGetNodesByIndex(t *testing.T) { ns[i] = randomNode() } subtrieNodes := [][]*node.Node{ - []*node.Node{ns[0], ns[1]}, - []*node.Node{ns[2]}, - []*node.Node{}, - []*node.Node{}, + {ns[0], ns[1]}, + {ns[2]}, + {}, + {}, } topLevelNodes := []*node.Node{nil, ns[3]} totalSubTrieNodeCount := computeTotalSubTrieNodeCount(subtrieNodes) @@ -262,8 +288,8 @@ func TestWriteAndReadCheckpointV6EmptyTrie(t *testing.T) { tries := []*trie.MTrie{trie.NewEmptyMTrie()} fileName := "checkpoint-empty-trie" logger := unittest.Logger() - require.NoErrorf(t, StoreCheckpointV6Concurrently(tries, dir, fileName, &logger), "fail to store checkpoint") - decoded, err := OpenAndReadCheckpointV6(dir, fileName, &logger) + require.NoErrorf(t, StoreCheckpointV6Concurrently(tries, dir, fileName, logger), "fail to store checkpoint") + decoded, err := OpenAndReadCheckpointV6(dir, fileName, logger) require.NoErrorf(t, err, "fail to read checkpoint %v/%v", dir, fileName) requireTriesEqual(t, tries, decoded) }) @@ -274,8 +300,8 @@ func TestWriteAndReadCheckpointV6SimpleTrie(t *testing.T) { tries := createSimpleTrie(t) fileName := "checkpoint" logger := unittest.Logger() - require.NoErrorf(t, StoreCheckpointV6Concurrently(tries, dir, fileName, &logger), "fail to store checkpoint") - decoded, err := OpenAndReadCheckpointV6(dir, fileName, &logger) + require.NoErrorf(t, StoreCheckpointV6Concurrently(tries, dir, fileName, logger), "fail to store checkpoint") + decoded, err := OpenAndReadCheckpointV6(dir, fileName, logger) require.NoErrorf(t, err, "fail to read checkpoint %v/%v", dir, fileName) requireTriesEqual(t, tries, decoded) }) @@ -286,8 +312,8 @@ func TestWriteAndReadCheckpointV6MultipleTries(t *testing.T) { tries := createMultipleRandomTries(t) fileName := "checkpoint-multi-file" logger := unittest.Logger() - require.NoErrorf(t, StoreCheckpointV6Concurrently(tries, dir, fileName, &logger), "fail to store checkpoint") - decoded, err := OpenAndReadCheckpointV6(dir, fileName, &logger) + require.NoErrorf(t, StoreCheckpointV6Concurrently(tries, dir, fileName, logger), "fail to store checkpoint") + decoded, err := OpenAndReadCheckpointV6(dir, fileName, logger) require.NoErrorf(t, err, "fail to read checkpoint %v/%v", dir, fileName) requireTriesEqual(t, tries, decoded) }) @@ -298,8 +324,8 @@ func TestCheckpointV6IsDeterminstic(t *testing.T) { unittest.RunWithTempDir(t, func(dir string) { tries := createMultipleRandomTries(t) logger := unittest.Logger() - require.NoErrorf(t, StoreCheckpointV6Concurrently(tries, dir, "checkpoint1", &logger), "fail to store checkpoint") - require.NoErrorf(t, StoreCheckpointV6Concurrently(tries, dir, "checkpoint2", &logger), "fail to store checkpoint") + require.NoErrorf(t, StoreCheckpointV6Concurrently(tries, dir, "checkpoint1", logger), "fail to store checkpoint") + require.NoErrorf(t, StoreCheckpointV6Concurrently(tries, dir, "checkpoint2", logger), "fail to store checkpoint") partFiles1 := filePaths(dir, "checkpoint1", subtrieLevel) partFiles2 := filePaths(dir, "checkpoint2", subtrieLevel) for i, partFile1 := range partFiles1 { @@ -317,12 +343,12 @@ func TestWriteAndReadCheckpointV6LeafEmptyTrie(t *testing.T) { tries := []*trie.MTrie{trie.NewEmptyMTrie()} fileName := "checkpoint-empty-trie" logger := unittest.Logger() - require.NoErrorf(t, StoreCheckpointV6Concurrently(tries, dir, fileName, &logger), "fail to store checkpoint") + require.NoErrorf(t, StoreCheckpointV6Concurrently(tries, dir, fileName, logger), "fail to store checkpoint") bufSize := 10 leafNodesCh := make(chan *LeafNode, bufSize) go func() { - err := OpenAndReadLeafNodesFromCheckpointV6(leafNodesCh, dir, fileName, &logger) + err := OpenAndReadLeafNodesFromCheckpointV6(leafNodesCh, dir, fileName, tries[0].RootHash(), logger) require.NoErrorf(t, err, "fail to read checkpoint %v/%v", dir, fileName) }() for range leafNodesCh { @@ -336,41 +362,76 @@ func TestWriteAndReadCheckpointV6LeafSimpleTrie(t *testing.T) { tries := createSimpleTrie(t) fileName := "checkpoint" logger := unittest.Logger() - require.NoErrorf(t, StoreCheckpointV6Concurrently(tries, dir, fileName, &logger), "fail to store checkpoint") + require.NoErrorf(t, StoreCheckpointV6Concurrently(tries, dir, fileName, logger), "fail to store checkpoint") bufSize := 1 leafNodesCh := make(chan *LeafNode, bufSize) + go func() { - err := OpenAndReadLeafNodesFromCheckpointV6(leafNodesCh, dir, fileName, &logger) + err := OpenAndReadLeafNodesFromCheckpointV6(leafNodesCh, dir, fileName, tries[0].RootHash(), logger) require.NoErrorf(t, err, "fail to read checkpoint %v/%v", dir, fileName) }() - resultPayloads := make([]ledger.Payload, 0) + resultPayloads := make([]*ledger.Payload, 0) for leafNode := range leafNodesCh { // avoid dummy payload from empty trie if leafNode.Payload != nil { - resultPayloads = append(resultPayloads, *leafNode.Payload) + resultPayloads = append(resultPayloads, leafNode.Payload) } } - require.EqualValues(t, tries[1].AllPayloads(), resultPayloads) + require.EqualValues(t, tries[0].AllPayloads(), resultPayloads) + }) +} + +func TestWriteAndReadCheckpointV6LeafMultipleTriesFail(t *testing.T) { + unittest.RunWithTempDir(t, func(dir string) { + fileName := "checkpoint-multi-leaf-file" + tries, _ := createMultipleRandomTriesMini(t) + logger := unittest.Logger() + require.NoErrorf(t, StoreCheckpointV6Concurrently(tries, dir, fileName, logger), "fail to store checkpoint") + bufSize := 5 + leafNodesCh := make(chan *LeafNode, bufSize) + + // verify it should fail because the checkpoint has multiple trie + require.Error(t, OpenAndReadLeafNodesFromCheckpointV6(leafNodesCh, dir, fileName, tries[0].RootHash(), logger)) }) } -func TestWriteAndReadCheckpointV6LeafMultipleTries(t *testing.T) { +func TestWriteAndReadCheckpointV6LeafMultipleTriesOK(t *testing.T) { unittest.RunWithTempDir(t, func(dir string) { fileName := "checkpoint-multi-leaf-file" - tries := createMultipleRandomTriesMini(t) + _, last := createMultipleRandomTriesMini(t) + + tries := []*trie.MTrie{last} + logger := unittest.Logger() - require.NoErrorf(t, StoreCheckpointV6Concurrently(tries, dir, fileName, &logger), "fail to store checkpoint") + require.NoErrorf(t, StoreCheckpointV6Concurrently(tries, dir, fileName, logger), "fail to store checkpoint") bufSize := 5 leafNodesCh := make(chan *LeafNode, bufSize) + go func() { - err := OpenAndReadLeafNodesFromCheckpointV6(leafNodesCh, dir, fileName, &logger) + err := OpenAndReadLeafNodesFromCheckpointV6(leafNodesCh, dir, fileName, tries[0].RootHash(), logger) require.NoErrorf(t, err, "fail to read checkpoint %v/%v", dir, fileName) }() - resultPayloads := make([]ledger.Payload, 0) + + allPayloads := tries[0].AllPayloads() + payloadMap := make(map[string]ledger.Payload, len(allPayloads)) + for _, payload := range allPayloads { + key := payload.EncodedKey() + + payloadMap[hex.EncodeToString(key)] = *payload + } + for leafNode := range leafNodesCh { - resultPayloads = append(resultPayloads, *leafNode.Payload) + // avoid dummy payload from empty trie + if leafNode.Payload != nil { + key := hex.EncodeToString(leafNode.Payload.EncodedKey()) + expected, ok := payloadMap[key] + require.True(t, ok, "payload not found") + require.Equal(t, expected, *leafNode.Payload, "payload not equal") + delete(payloadMap, key) + } } - require.NotEmpty(t, resultPayloads) + + require.Empty(t, payloadMap, fmt.Sprintf("not all payloads are read: %v", len(payloadMap))) }) } @@ -422,7 +483,7 @@ func compareFiles(file1, file2 string) error { return nil } -func storeCheckpointV5(tries []*trie.MTrie, dir string, fileName string, logger *zerolog.Logger) error { +func storeCheckpointV5(tries []*trie.MTrie, dir string, fileName string, logger zerolog.Logger) error { return StoreCheckpointV5(dir, fileName, logger, tries...) } @@ -432,8 +493,8 @@ func TestWriteAndReadCheckpointV5(t *testing.T) { fileName := "checkpoint1" logger := unittest.Logger() - require.NoErrorf(t, storeCheckpointV5(tries, dir, fileName, &logger), "fail to store checkpoint") - decoded, err := LoadCheckpoint(filepath.Join(dir, fileName), &logger) + require.NoErrorf(t, storeCheckpointV5(tries, dir, fileName, logger), "fail to store checkpoint") + decoded, err := LoadCheckpoint(filepath.Join(dir, fileName), logger) require.NoErrorf(t, err, "fail to load checkpoint") requireTriesEqual(t, tries, decoded) }) @@ -447,13 +508,13 @@ func TestWriteAndReadCheckpointV6ThenBackToV5(t *testing.T) { logger := unittest.Logger() // store tries into v6 then read back, then store into v5 - require.NoErrorf(t, StoreCheckpointV6Concurrently(tries, dir, "checkpoint-v6", &logger), "fail to store checkpoint") - decoded, err := OpenAndReadCheckpointV6(dir, "checkpoint-v6", &logger) + require.NoErrorf(t, StoreCheckpointV6Concurrently(tries, dir, "checkpoint-v6", logger), "fail to store checkpoint") + decoded, err := OpenAndReadCheckpointV6(dir, "checkpoint-v6", logger) require.NoErrorf(t, err, "fail to read checkpoint %v/checkpoint-v6", dir) - require.NoErrorf(t, storeCheckpointV5(decoded, dir, "checkpoint-v6-v5", &logger), "fail to store checkpoint") + require.NoErrorf(t, storeCheckpointV5(decoded, dir, "checkpoint-v6-v5", logger), "fail to store checkpoint") // store tries directly into v5 checkpoint - require.NoErrorf(t, storeCheckpointV5(tries, dir, "checkpoint-v5", &logger), "fail to store checkpoint") + require.NoErrorf(t, storeCheckpointV5(tries, dir, "checkpoint-v5", logger), "fail to store checkpoint") // compare the two v5 checkpoint files should be identical require.NoError(t, compareFiles( @@ -476,7 +537,7 @@ func TestCleanupOnErrorIfNotExist(t *testing.T) { logger := unittest.Logger() // store tries into v6 then read back, then store into v5 - require.NoErrorf(t, StoreCheckpointV6Concurrently(tries, dir, "checkpoint-v6", &logger), "fail to store checkpoint") + require.NoErrorf(t, StoreCheckpointV6Concurrently(tries, dir, "checkpoint-v6", logger), "fail to store checkpoint") require.NoError(t, deleteCheckpointFiles(dir, "checkpoint-v6")) // verify all files are removed @@ -505,13 +566,13 @@ func TestAllPartFileExist(t *testing.T) { require.NoErrorf(t, err, "fail to find sub trie file path") logger := unittest.Logger() - require.NoErrorf(t, StoreCheckpointV6Concurrently(tries, dir, fileName, &logger), "fail to store checkpoint") + require.NoErrorf(t, StoreCheckpointV6Concurrently(tries, dir, fileName, logger), "fail to store checkpoint") // delete i-th part file, then the error should mention i-th file missing err = os.Remove(fileToDelete) require.NoError(t, err, "fail to remove part file") - _, err = OpenAndReadCheckpointV6(dir, fileName, &logger) + _, err = OpenAndReadCheckpointV6(dir, fileName, logger) require.ErrorIs(t, err, os.ErrNotExist, "wrong error type returned") } }) @@ -533,7 +594,7 @@ func TestAllPartFileExistLeafReader(t *testing.T) { require.NoErrorf(t, err, "fail to find sub trie file path") logger := unittest.Logger() - require.NoErrorf(t, StoreCheckpointV6Concurrently(tries, dir, fileName, &logger), "fail to store checkpoint") + require.NoErrorf(t, StoreCheckpointV6Concurrently(tries, dir, fileName, logger), "fail to store checkpoint") // delete i-th part file, then the error should mention i-th file missing err = os.Remove(fileToDelete) @@ -541,7 +602,7 @@ func TestAllPartFileExistLeafReader(t *testing.T) { bufSize := 10 leafNodesCh := make(chan *LeafNode, bufSize) - err = OpenAndReadLeafNodesFromCheckpointV6(leafNodesCh, dir, fileName, &logger) + err = OpenAndReadLeafNodesFromCheckpointV6(leafNodesCh, dir, fileName, tries[0].RootHash(), logger) require.ErrorIs(t, err, os.ErrNotExist, "wrong error type returned") } }) @@ -553,9 +614,9 @@ func TestCannotStoreTwice(t *testing.T) { tries := createSimpleTrie(t) fileName := "checkpoint" logger := unittest.Logger() - require.NoErrorf(t, StoreCheckpointV6Concurrently(tries, dir, fileName, &logger), "fail to store checkpoint") + require.NoErrorf(t, StoreCheckpointV6Concurrently(tries, dir, fileName, logger), "fail to store checkpoint") // checkpoint already exist, can't store again - require.Error(t, StoreCheckpointV6Concurrently(tries, dir, fileName, &logger)) + require.Error(t, StoreCheckpointV6Concurrently(tries, dir, fileName, logger)) }) } @@ -580,13 +641,92 @@ func TestCopyCheckpointFileV6(t *testing.T) { tries := createSimpleTrie(t) fileName := "checkpoint" logger := unittest.Logger() - require.NoErrorf(t, StoreCheckpointV6Concurrently(tries, dir, fileName, &logger), "fail to store checkpoint") + require.NoErrorf(t, StoreCheckpointV6Concurrently(tries, dir, fileName, logger), "fail to store checkpoint") to := filepath.Join(dir, "newfolder") newPaths, err := CopyCheckpointFile(fileName, dir, to) require.NoError(t, err) log.Info().Msgf("copied to :%v", newPaths) - decoded, err := OpenAndReadCheckpointV6(to, fileName, &logger) + decoded, err := OpenAndReadCheckpointV6(to, fileName, logger) require.NoErrorf(t, err, "fail to read checkpoint %v/%v", dir, fileName) requireTriesEqual(t, tries, decoded) }) } + +func TestReadCheckpointRootHash(t *testing.T) { + unittest.RunWithTempDir(t, func(dir string) { + tries := createSimpleTrie(t) + fileName := "checkpoint" + logger := unittest.Logger() + require.NoErrorf(t, StoreCheckpointV6Concurrently(tries, dir, fileName, logger), "fail to store checkpoint") + + trieRoots, err := ReadTriesRootHash(logger, dir, fileName) + require.NoError(t, err) + for i, root := range trieRoots { + expectedHash := tries[i].RootHash() + require.Equal(t, expectedHash, root) + } + require.Equal(t, len(tries), len(trieRoots)) + }) +} + +func TestReadCheckpointRootHashValidateChecksum(t *testing.T) { + unittest.RunWithTempDir(t, func(dir string) { + tries := createSimpleTrie(t) + fileName := "checkpoint" + logger := unittest.Logger() + require.NoErrorf(t, StoreCheckpointV6Concurrently(tries, dir, fileName, logger), "fail to store checkpoint") + + // add a wrong checksum to top trie file + topTrieFilePath, _ := filePathTopTries(dir, fileName) + file, err := os.OpenFile(topTrieFilePath, os.O_RDWR, 0644) + require.NoError(t, err) + + fileInfo, err := file.Stat() + require.NoError(t, err) + fileSize := fileInfo.Size() + + invalidSum := encodeCRC32Sum(10) + _, err = file.WriteAt(invalidSum, fileSize-crc32SumSize) + require.NoError(t, err) + require.NoError(t, file.Close()) + + // ReadTriesRootHash will first validate the checksum and detect the error + _, err = ReadTriesRootHash(logger, dir, fileName) + require.Error(t, err) + }) +} + +func TestReadCheckpointRootHashMulti(t *testing.T) { + unittest.RunWithTempDir(t, func(dir string) { + tries := createMultipleRandomTries(t) + fileName := "checkpoint" + logger := unittest.Logger() + require.NoErrorf(t, StoreCheckpointV6Concurrently(tries, dir, fileName, logger), "fail to store checkpoint") + + trieRoots, err := ReadTriesRootHash(logger, dir, fileName) + require.NoError(t, err) + for i, root := range trieRoots { + expectedHash := tries[i].RootHash() + require.Equal(t, expectedHash, root) + } + require.Equal(t, len(tries), len(trieRoots)) + }) +} + +func TestCheckpointHasRootHash(t *testing.T) { + unittest.RunWithTempDir(t, func(dir string) { + tries := createMultipleRandomTries(t) + fileName := "checkpoint" + logger := unittest.Logger() + require.NoErrorf(t, StoreCheckpointV6Concurrently(tries, dir, fileName, logger), "fail to store checkpoint") + + trieRoots, err := ReadTriesRootHash(logger, dir, fileName) + require.NoError(t, err) + for _, root := range trieRoots { + require.NoError(t, CheckpointHasRootHash(logger, dir, fileName, root)) + } + + nonExist := ledger.RootHash(unittest.StateCommitmentFixture()) + require.Error(t, CheckpointHasRootHash(logger, dir, fileName, nonExist)) + }) +} diff --git a/ledger/complete/wal/checkpoint_v6_writer.go b/ledger/complete/wal/checkpoint_v6_writer.go index 7b138a61085..b72eff4392e 100644 --- a/ledger/complete/wal/checkpoint_v6_writer.go +++ b/ledger/complete/wal/checkpoint_v6_writer.go @@ -10,6 +10,7 @@ import ( "path" "path/filepath" + "github.com/docker/go-units" "github.com/hashicorp/go-multierror" "github.com/rs/zerolog" @@ -18,6 +19,7 @@ import ( "github.com/onflow/flow-go/ledger/complete/mtrie/node" "github.com/onflow/flow-go/ledger/complete/mtrie/trie" utilsio "github.com/onflow/flow-go/utils/io" + "github.com/onflow/flow-go/utils/merr" ) const subtrieLevel = 4 @@ -29,13 +31,13 @@ func subtrieCountByLevel(level uint16) int { // StoreCheckpointV6SingleThread stores checkpoint file in v6 in a single threaded manner, // useful when EN is executing block. -func StoreCheckpointV6SingleThread(tries []*trie.MTrie, outputDir string, outputFile string, logger *zerolog.Logger) error { +func StoreCheckpointV6SingleThread(tries []*trie.MTrie, outputDir string, outputFile string, logger zerolog.Logger) error { return StoreCheckpointV6(tries, outputDir, outputFile, logger, 1) } // StoreCheckpointV6Concurrently stores checkpoint file in v6 in max workers, // useful during state extraction -func StoreCheckpointV6Concurrently(tries []*trie.MTrie, outputDir string, outputFile string, logger *zerolog.Logger) error { +func StoreCheckpointV6Concurrently(tries []*trie.MTrie, outputDir string, outputFile string, logger zerolog.Logger) error { return StoreCheckpointV6(tries, outputDir, outputFile, logger, 16) } @@ -49,7 +51,7 @@ func StoreCheckpointV6Concurrently(tries []*trie.MTrie, outputDir string, output // // nWorker specifies how many workers to encode subtrie concurrently, valid range [1,16] func StoreCheckpointV6( - tries []*trie.MTrie, outputDir string, outputFile string, logger *zerolog.Logger, nWorker uint) error { + tries []*trie.MTrie, outputDir string, outputFile string, logger zerolog.Logger, nWorker uint) error { err := storeCheckpointV6(tries, outputDir, outputFile, logger, nWorker) if err != nil { cleanupErr := deleteCheckpointFiles(outputDir, outputFile) @@ -63,7 +65,7 @@ func StoreCheckpointV6( } func storeCheckpointV6( - tries []*trie.MTrie, outputDir string, outputFile string, logger *zerolog.Logger, nWorker uint) error { + tries []*trie.MTrie, outputDir string, outputFile string, logger zerolog.Logger, nWorker uint) error { if len(tries) == 0 { logger.Info().Msg("no tries to be checkpointed") return nil @@ -79,8 +81,10 @@ func storeCheckpointV6( lg.Info(). Str("first_hash", first.RootHash().String()). Uint64("first_reg_count", first.AllocatedRegCount()). + Str("first_reg_size", units.BytesSize(float64(first.AllocatedRegSize()))). Str("last_hash", last.RootHash().String()). Uint64("last_reg_count", last.AllocatedRegCount()). + Str("last_reg_size", units.BytesSize(float64(last.AllocatedRegSize()))). Msg("storing checkpoint") // make sure a checkpoint file with same name doesn't exist @@ -103,7 +107,7 @@ func storeCheckpointV6( subTrieRootAndTopLevelTrieCount(tries), outputDir, outputFile, - &lg, + lg, nWorker, ) if err != nil { @@ -113,12 +117,12 @@ func storeCheckpointV6( lg.Info().Msgf("subtrie have been stored. sub trie node count: %v", subTriesNodeCount) topTrieChecksum, err := storeTopLevelNodesAndTrieRoots( - tries, subTrieRootIndices, subTriesNodeCount, outputDir, outputFile, &lg) + tries, subTrieRootIndices, subTriesNodeCount, outputDir, outputFile, lg) if err != nil { return fmt.Errorf("could not store top level tries: %w", err) } - err = storeCheckpointHeader(subTrieChecksums, topTrieChecksum, outputDir, outputFile, &lg) + err = storeCheckpointHeader(subTrieChecksums, topTrieChecksum, outputDir, outputFile, lg) if err != nil { return fmt.Errorf("could not store checkpoint header: %w", err) } @@ -136,7 +140,7 @@ func storeCheckpointHeader( topTrieChecksum uint32, outputDir string, outputFile string, - logger *zerolog.Logger, + logger zerolog.Logger, ) ( errToReturn error, ) { @@ -207,7 +211,7 @@ func storeTopLevelNodesAndTrieRoots( subTriesNodeCount uint64, outputDir string, outputFile string, - logger *zerolog.Logger, + logger zerolog.Logger, ) ( checksumOfTopTriePartFile uint32, errToReturn error, @@ -319,7 +323,7 @@ func storeSubTrieConcurrently( subAndTopNodeCount int, // useful for preallocating memory for the node indices map to be returned outputDir string, outputFile string, - logger *zerolog.Logger, + logger zerolog.Logger, nWorker uint, ) ( map[*node.Node]uint64, // node indices @@ -399,13 +403,13 @@ func storeSubTrieConcurrently( return results, nodeCounter, checksums, nil } -func createWriterForTopTries(dir string, file string, logger *zerolog.Logger) (io.WriteCloser, error) { +func createWriterForTopTries(dir string, file string, logger zerolog.Logger) (io.WriteCloser, error) { _, topTriesFileName := filePathTopTries(dir, file) return createClosableWriter(dir, topTriesFileName, logger) } -func createWriterForSubtrie(dir string, file string, logger *zerolog.Logger, index int) (io.WriteCloser, error) { +func createWriterForSubtrie(dir string, file string, logger zerolog.Logger, index int) (io.WriteCloser, error) { _, subTriesFileName, err := filePathSubTries(dir, file, index) if err != nil { return nil, err @@ -414,7 +418,7 @@ func createWriterForSubtrie(dir string, file string, logger *zerolog.Logger, ind return createClosableWriter(dir, subTriesFileName, logger) } -func createClosableWriter(dir string, fileName string, logger *zerolog.Logger) (io.WriteCloser, error) { +func createClosableWriter(dir string, fileName string, logger zerolog.Logger) (io.WriteCloser, error) { fullPath := path.Join(dir, fileName) if utilsio.FileExists(fullPath) { return nil, fmt.Errorf("checkpoint part file %v already exists", fullPath) @@ -447,7 +451,7 @@ func storeCheckpointSubTrie( estimatedSubtrieNodeCount int, // for estimate the amount of memory to be preallocated outputDir string, outputFile string, - logger *zerolog.Logger, + logger zerolog.Logger, ) ( rootNodesOfAllSubtries map[*node.Node]uint64, // the stored position of each unique root node totalSubtrieNodeCount uint64, @@ -705,36 +709,26 @@ func decodeSubtrieCount(encoded []byte) (uint16, error) { return binary.BigEndian.Uint16(encoded), nil } -// closeAndMergeError close the closable and merge the closeErr with the given err into a multierror -// Note: when using this function in a defer function, don't use as below: -// func XXX() ( -// -// err error, -// ) { -// def func() { -// // bad, because the definition of err might get overwritten -// err = closeAndMergeError(closable, err) -// }() -// -// Better to use as below: -// func XXX() ( -// -// errToReturn error, -// ) { -// def func() { -// // good, because the error to returned is only updated here, and guaranteed to be returned -// errToReturn = closeAndMergeError(closable, errToReturn) -// }() -func closeAndMergeError(closable io.Closer, err error) error { - var merr *multierror.Error - if err != nil { - merr = multierror.Append(merr, err) - } +var closeAndMergeError = merr.CloseAndMergeError - closeError := closable.Close() - if closeError != nil { - merr = multierror.Append(merr, closeError) +// withFile opens the file at the given path, and calls the given function with the opened file. +// it handles closing the file and evicting the file from Linux page cache. +func withFile(logger zerolog.Logger, filepath string, f func(file *os.File) error) ( + errToReturn error, +) { + + file, err := os.Open(filepath) + if err != nil { + return fmt.Errorf("could not open file %v: %w", filepath, err) } + defer func(file *os.File) { + evictErr := evictFileFromLinuxPageCache(file, false, logger) + if evictErr != nil { + logger.Warn().Msgf("failed to evict top trie file %s from Linux page cache: %s", filepath, evictErr) + // No need to return this error because it's possible to continue normal operations. + } + errToReturn = closeAndMergeError(file, errToReturn) + }(file) - return merr.ErrorOrNil() + return f(file) } diff --git a/ledger/complete/wal/checkpointer.go b/ledger/complete/wal/checkpointer.go index 6b9239f1c22..b67f2385440 100644 --- a/ledger/complete/wal/checkpointer.go +++ b/ledger/complete/wal/checkpointer.go @@ -13,6 +13,7 @@ import ( "strconv" "strings" + "github.com/docker/go-units" "github.com/rs/zerolog" "github.com/rs/zerolog/log" "golang.org/x/sync/errgroup" @@ -30,9 +31,12 @@ import ( const checkpointFilenamePrefix = "checkpoint." -const MagicBytesCheckpointHeader uint16 = 0x2137 -const MagicBytesCheckpointSubtrie uint16 = 0x2136 -const MagicBytesCheckpointToptrie uint16 = 0x2135 +const ( + MagicBytesCheckpointHeader uint16 = 0x2137 + MagicBytesCheckpointSubtrie uint16 = 0x2136 + MagicBytesCheckpointToptrie uint16 = 0x2135 + MagicBytesPayloadHeader uint16 = 0x2138 +) const VersionV1 uint16 = 0x01 @@ -246,13 +250,20 @@ func (c *Checkpointer) Checkpoint(to int) (err error) { fileName := NumberToFilename(to) - err = StoreCheckpointV6SingleThread(tries, c.wal.dir, fileName, &c.wal.log) + err = StoreCheckpointV6SingleThread(tries, c.wal.dir, fileName, c.wal.log) if err != nil { return fmt.Errorf("could not create checkpoint for %v: %w", to, err) } - c.wal.log.Info().Msgf("created checkpoint %d with %d tries", to, len(tries)) + checkpointFileSize, err := ReadCheckpointFileSize(c.wal.dir, fileName) + if err != nil { + return fmt.Errorf("could not read checkpoint file size: %w", err) + } + + c.wal.log.Info(). + Str("checkpoint_file_size", units.BytesSize(float64(checkpointFileSize))). + Msgf("created checkpoint %d with %d tries", to, len(tries)) return nil } @@ -267,7 +278,7 @@ func NumberToFilename(n int) string { } func (c *Checkpointer) CheckpointWriter(to int) (io.WriteCloser, error) { - return CreateCheckpointWriterForFile(c.dir, NumberToFilename(to), &c.wal.log) + return CreateCheckpointWriterForFile(c.dir, NumberToFilename(to), c.wal.log) } func (c *Checkpointer) Dir() string { @@ -275,7 +286,7 @@ func (c *Checkpointer) Dir() string { } // CreateCheckpointWriterForFile returns a file writer that will write to a temporary file and then move it to the checkpoint folder by renaming it. -func CreateCheckpointWriterForFile(dir, filename string, logger *zerolog.Logger) (io.WriteCloser, error) { +func CreateCheckpointWriterForFile(dir, filename string, logger zerolog.Logger) (io.WriteCloser, error) { fullname := path.Join(dir, filename) @@ -312,7 +323,7 @@ func CreateCheckpointWriterForFile(dir, filename string, logger *zerolog.Logger) // as for each node, the children have been previously encountered. // TODO: evaluate alternatives to CRC32 since checkpoint file is many GB in size. // TODO: add concurrency if the performance gains are enough to offset complexity. -func StoreCheckpointV5(dir string, fileName string, logger *zerolog.Logger, tries ...*trie.MTrie) ( +func StoreCheckpointV5(dir string, fileName string, logger zerolog.Logger, tries ...*trie.MTrie) ( // error // Note, the above code, which didn't define the name "err" for the returned error, would be wrong, // beause err needs to be defined in order to be updated by the defer function @@ -428,7 +439,7 @@ func StoreCheckpointV5(dir string, fileName string, logger *zerolog.Logger, trie // Index 0 is a special case with nil node. traversedSubtrieNodes[nil] = 0 - logging := logProgress(fmt.Sprintf("storing %v-th sub trie roots", i), estimatedSubtrieNodeCount, &log.Logger) + logging := logProgress(fmt.Sprintf("storing %v-th sub trie roots", i), estimatedSubtrieNodeCount, log.Logger) for _, root := range subTrieRoot { // Empty trie is always added to forest as starting point and // empty trie's root is nil. It remains in the forest until evicted @@ -516,10 +527,16 @@ func StoreCheckpointV5(dir string, fileName string, logger *zerolog.Logger, trie return nil } -func logProgress(msg string, estimatedSubtrieNodeCount int, logger *zerolog.Logger) func(nodeCounter uint64) { - lg := util.LogProgress(msg, estimatedSubtrieNodeCount, logger) +func logProgress(msg string, estimatedSubtrieNodeCount int, logger zerolog.Logger) func(nodeCounter uint64) { + lg := util.LogProgress( + logger, + util.DefaultLogProgressConfig( + msg, + estimatedSubtrieNodeCount, + ), + ) return func(index uint64) { - lg(int(index)) + lg(1) } } @@ -601,12 +618,12 @@ func getNodesAtLevel(root *node.Node, level uint) []*node.Node { func (c *Checkpointer) LoadCheckpoint(checkpoint int) ([]*trie.MTrie, error) { filepath := path.Join(c.dir, NumberToFilename(checkpoint)) - return LoadCheckpoint(filepath, &c.wal.log) + return LoadCheckpoint(filepath, c.wal.log) } func (c *Checkpointer) LoadRootCheckpoint() ([]*trie.MTrie, error) { filepath := path.Join(c.dir, bootstrap.FilenameWALRootCheckpoint) - return LoadCheckpoint(filepath, &c.wal.log) + return LoadCheckpoint(filepath, c.wal.log) } func (c *Checkpointer) HasRootCheckpoint() (bool, error) { @@ -628,7 +645,7 @@ func (c *Checkpointer) RemoveCheckpoint(checkpoint int) error { return deleteCheckpointFiles(c.dir, name) } -func LoadCheckpoint(filepath string, logger *zerolog.Logger) ( +func LoadCheckpoint(filepath string, logger zerolog.Logger) ( tries []*trie.MTrie, errToReturn error) { file, err := os.Open(filepath) @@ -648,7 +665,7 @@ func LoadCheckpoint(filepath string, logger *zerolog.Logger) ( return readCheckpoint(file, logger) } -func readCheckpoint(f *os.File, logger *zerolog.Logger) ([]*trie.MTrie, error) { +func readCheckpoint(f *os.File, logger zerolog.Logger) ([]*trie.MTrie, error) { // Read header: magic (2 bytes) + version (2 bytes) header := make([]byte, headerSize) @@ -888,7 +905,7 @@ func readCheckpointV4(f *os.File) ([]*trie.MTrie, error) { // readCheckpointV5 decodes checkpoint file (version 5) and returns a list of tries. // Checkpoint file header (magic and version) are verified by the caller. -func readCheckpointV5(f *os.File, logger *zerolog.Logger) ([]*trie.MTrie, error) { +func readCheckpointV5(f *os.File, logger zerolog.Logger) ([]*trie.MTrie, error) { logger.Info().Msgf("reading v5 checkpoint file") // Scratch buffer is used as temporary buffer that reader can read into. @@ -1006,7 +1023,7 @@ func readCheckpointV5(f *os.File, logger *zerolog.Logger) ([]*trie.MTrie, error) // causes two checkpoint files to be cached for each checkpointing, eventually // caching hundreds of GB. // CAUTION: no-op when GOOS != linux. -func evictFileFromLinuxPageCache(f *os.File, fsync bool, logger *zerolog.Logger) error { +func evictFileFromLinuxPageCache(f *os.File, fsync bool, logger zerolog.Logger) error { err := fadviseNoLinuxPageCache(f.Fd(), fsync) if err != nil { return err @@ -1030,7 +1047,7 @@ func CopyCheckpointFile(filename string, from string, to string) ( []string, error, ) { - // It's possible that the trie dir does not yet exist. If not this will create the the required path + // It's possible that the trie dir does not yet exist. If not this will create the required path err := os.MkdirAll(to, 0700) if err != nil { return nil, err @@ -1069,3 +1086,35 @@ func CopyCheckpointFile(filename string, from string, to string) ( return newPaths, nil } + +// SoftlinkCheckpointFile creates soft links of the checkpoint file including the part files from the given `from` to +// the `to` directory +func SoftlinkCheckpointFile(filename string, from string, to string) ([]string, error) { + + // It's possible that the trie dir does not yet exist. If not this will create the required path + err := os.MkdirAll(to, 0700) + if err != nil { + return nil, err + } + + // checkpoint V6 produces multiple checkpoint part files that need to be copied over + pattern := filePathPattern(from, filename) + matched, err := filepath.Glob(pattern) + if err != nil { + return nil, fmt.Errorf("could not glob checkpoint file with pattern %v: %w", pattern, err) + } + + newPaths := make([]string, len(matched)) + for i, match := range matched { + _, partfile := filepath.Split(match) + newPath := filepath.Join(to, partfile) + newPaths[i] = newPath + + err := os.Symlink(match, newPath) + if err != nil { + return nil, fmt.Errorf("cannot link file from %v to %v: %w", match, newPath, err) + } + } + + return newPaths, nil +} diff --git a/ledger/complete/wal/checkpointer_test.go b/ledger/complete/wal/checkpointer_test.go index 40ec3ff5925..dd46ffdb85e 100644 --- a/ledger/complete/wal/checkpointer_test.go +++ b/ledger/complete/wal/checkpointer_test.go @@ -59,7 +59,7 @@ func Test_WAL(t *testing.T) { led, err := complete.NewLedger(diskWal, size*10, metricsCollector, logger, complete.DefaultPathFinderVersion) require.NoError(t, err) - compactor, err := complete.NewCompactor(led, diskWal, unittest.Logger(), size, checkpointDistance, checkpointsToKeep, atomic.NewBool(false)) + compactor, err := complete.NewCompactor(led, diskWal, unittest.Logger(), size, checkpointDistance, checkpointsToKeep, atomic.NewBool(false), metrics.NewNoopCollector()) require.NoError(t, err) <-compactor.Ready() @@ -531,12 +531,12 @@ func Test_StoringLoadingCheckpoints(t *testing.T) { fullpath := path.Join(dir, "temp-checkpoint") - err = realWAL.StoreCheckpointV5(dir, "temp-checkpoint", &logger, updatedTrie) + err = realWAL.StoreCheckpointV5(dir, "temp-checkpoint", logger, updatedTrie) require.NoError(t, err) t.Run("works without data modification", func(t *testing.T) { logger := unittest.Logger() - tries, err := realWAL.LoadCheckpoint(fullpath, &logger) + tries, err := realWAL.LoadCheckpoint(fullpath, logger) require.NoError(t, err) require.Equal(t, 1, len(tries)) require.Equal(t, updatedTrie, tries[0]) @@ -554,7 +554,7 @@ func Test_StoringLoadingCheckpoints(t *testing.T) { require.NoError(t, err) logger := unittest.Logger() - tries, err := realWAL.LoadCheckpoint(fullpath, &logger) + tries, err := realWAL.LoadCheckpoint(fullpath, logger) require.Error(t, err) require.Nil(t, tries) require.Contains(t, err.Error(), "checksum") diff --git a/ledger/complete/wal/checkpointer_versioning_test.go b/ledger/complete/wal/checkpointer_versioning_test.go index 58c85a3d2dc..af2d6ab4acd 100644 --- a/ledger/complete/wal/checkpointer_versioning_test.go +++ b/ledger/complete/wal/checkpointer_versioning_test.go @@ -20,7 +20,7 @@ func TestLoadCheckpointV1(t *testing.T) { } logger := zerolog.Nop() - tries, err := LoadCheckpoint("test_data/checkpoint.v1", &logger) + tries, err := LoadCheckpoint("test_data/checkpoint.v1", logger) require.NoError(t, err) require.Equal(t, len(expectedRootHash), len(tries)) @@ -40,7 +40,7 @@ func TestLoadCheckpointV3(t *testing.T) { } logger := zerolog.Nop() - tries, err := LoadCheckpoint("test_data/checkpoint.v3", &logger) + tries, err := LoadCheckpoint("test_data/checkpoint.v3", logger) require.NoError(t, err) require.Equal(t, len(expectedRootHash), len(tries)) @@ -60,7 +60,7 @@ func TestLoadCheckpointV4(t *testing.T) { } logger := zerolog.Nop() - tries, err := LoadCheckpoint("test_data/checkpoint.v4", &logger) + tries, err := LoadCheckpoint("test_data/checkpoint.v4", logger) require.NoError(t, err) require.Equal(t, len(expectedRootHash), len(tries)) diff --git a/ledger/complete/wal/encoding.go b/ledger/complete/wal/encoding.go index 8bc5f8d6d13..dce4f084f93 100644 --- a/ledger/complete/wal/encoding.go +++ b/ledger/complete/wal/encoding.go @@ -51,6 +51,10 @@ func EncodeDelete(rootHash ledger.RootHash) []byte { return buf } +// Decode decodes the given data into a WAL operation, root hash and trie update. +// It returns (WALDelete, rootHash, nil, nil) if the operation is WALDelete. +// It returns (WALUpdate, hash.DummyHash, update, nil) if the operation is WALUpdate. +// To read the root hash of the trie update, use update.RootHash. func Decode(data []byte) (operation WALOperation, rootHash ledger.RootHash, update *ledger.TrieUpdate, err error) { if len(data) < 4 { // 1 byte op + 2 size + actual data = 4 minimum err = fmt.Errorf("data corrupted, too short to represent operation - hexencoded data: %x", data) diff --git a/ledger/complete/wal/syncrename.go b/ledger/complete/wal/syncrename.go index 140d4534006..28a0e47cfea 100644 --- a/ledger/complete/wal/syncrename.go +++ b/ledger/complete/wal/syncrename.go @@ -21,7 +21,7 @@ type WriterSeekerCloser interface { // to target one as the last step. This help avoid situation when writing is // interrupted and unusable file but with target name exists. type SyncOnCloseRenameFile struct { - logger *zerolog.Logger + logger zerolog.Logger file *os.File targetName string savedError error // savedError is the first error returned from Write. Close() renames temp file to target file only if savedError is nil. diff --git a/ledger/complete/wal/syncrename_test.go b/ledger/complete/wal/syncrename_test.go index 406905a631b..c8ee860f487 100644 --- a/ledger/complete/wal/syncrename_test.go +++ b/ledger/complete/wal/syncrename_test.go @@ -34,7 +34,7 @@ func Test_RenameHappensAfterClosing(t *testing.T) { file: file, targetName: fullFileName, Writer: writer, - logger: &logger, + logger: logger, } sampleBytes := []byte{2, 1, 3, 7} diff --git a/ledger/complete/wal/wal.go b/ledger/complete/wal/wal.go index 6a9b38d1b3f..4f8d04082c2 100644 --- a/ledger/complete/wal/wal.go +++ b/ledger/complete/wal/wal.go @@ -4,7 +4,7 @@ import ( "fmt" "sort" - prometheusWAL "github.com/m4ksio/wal/wal" + prometheusWAL "github.com/onflow/wal/wal" "github.com/prometheus/client_golang/prometheus" "github.com/rs/zerolog" @@ -29,7 +29,7 @@ type DiskWAL struct { func NewDiskWAL(logger zerolog.Logger, reg prometheus.Registerer, metrics module.WALMetrics, dir string, forestCapacity int, pathByteSize int, segmentSize int) (*DiskWAL, error) { w, err := prometheusWAL.NewSize(logger, reg, dir, segmentSize, false) if err != nil { - return nil, err + return nil, fmt.Errorf("could not create disk wal from dir %v, segmentSize %v: %w", dir, segmentSize, err) } return &DiskWAL{ wal: w, @@ -149,7 +149,7 @@ func (w *DiskWAL) replay( useCheckpoints bool, ) error { - w.log.Info().Msgf("loading checkpoint with WAL from %d to %d", from, to) + w.log.Info().Msgf("loading checkpoint with WAL from %d to %d, useCheckpoints %v", from, to, useCheckpoints) if to < from { return fmt.Errorf("end of range cannot be smaller than beginning") @@ -178,6 +178,8 @@ func (w *DiskWAL) replay( availableCheckpoints = getPossibleCheckpoints(allCheckpoints, from-1, to) } + w.log.Info().Ints("checkpoints", availableCheckpoints).Msg("available checkpoints") + for len(availableCheckpoints) > 0 { // as long as there are checkpoints to try, we always try with the last checkpoint file, since // it allows us to load less segments. @@ -194,7 +196,16 @@ func (w *DiskWAL) replay( continue } - w.log.Info().Int("checkpoint", latestCheckpoint).Msg("checkpoint loaded") + if len(forestSequencing) == 0 { + return fmt.Errorf("checkpoint loaded but has no trie") + } + + firstTrie := forestSequencing[0].RootHash() + lastTrie := forestSequencing[len(forestSequencing)-1].RootHash() + w.log.Info().Int("checkpoint", latestCheckpoint). + Hex("first_trie", firstTrie[:]). + Hex("last_trie", lastTrie[:]). + Msg("checkpoint loaded") err = checkpointFn(forestSequencing) if err != nil { @@ -206,12 +217,17 @@ func (w *DiskWAL) replay( } if loadedCheckpoint != -1 && loadedCheckpoint == to { + w.log.Info().Msgf("no checkpoint to load") return nil } if loadedCheckpoint >= 0 { startSegment = loadedCheckpoint + 1 } + + w.log.Info(). + Int("start_segment", startSegment). + Msg("starting replay from checkpoint segment") } if loadedCheckpoint == -1 && startSegment == 0 { @@ -231,8 +247,13 @@ func (w *DiskWAL) replay( return fmt.Errorf("error while handling root checkpoint: %w", err) } - w.log.Info().Msgf("root checkpoint loaded") + rootHash := flattenedForest[len(flattenedForest)-1].RootHash() + w.log.Info(). + Hex("root_hash", rootHash[:]). + Msg("root checkpoint loaded") checkpointLoaded = true + } else { + w.log.Info().Msgf("no root checkpoint was found") } } @@ -241,7 +262,7 @@ func (w *DiskWAL) replay( Int("loaded_checkpoint", loadedCheckpoint). Msgf("replaying segments from %d to %d", startSegment, to) - sr, err := prometheusWAL.NewSegmentsRangeReader(prometheusWAL.SegmentRange{ + sr, err := prometheusWAL.NewSegmentsRangeReader(w.log, prometheusWAL.SegmentRange{ Dir: w.wal.Dir(), First: startSegment, Last: to, diff --git a/ledger/ledger.go b/ledger/ledger.go index 55689767c32..3e5b8c2a906 100644 --- a/ledger/ledger.go +++ b/ledger/ledger.go @@ -387,7 +387,7 @@ func (v *Value) UnmarshalJSON(b []byte) error { } // Migration defines how to convert the given slice of input payloads into an slice of output payloads -type Migration func(payloads []Payload) ([]Payload, error) +type Migration func(payloads []*Payload) ([]*Payload, error) // Reporter reports on data from the state type Reporter interface { diff --git a/ledger/mock/ledger.go b/ledger/mock/ledger.go index 552dd9b7719..82a9de94e63 100644 --- a/ledger/mock/ledger.go +++ b/ledger/mock/ledger.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mock @@ -12,10 +12,14 @@ type Ledger struct { mock.Mock } -// Done provides a mock function with given fields: +// Done provides a mock function with no fields func (_m *Ledger) Done() <-chan struct{} { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Done") + } + var r0 <-chan struct{} if rf, ok := ret.Get(0).(func() <-chan struct{}); ok { r0 = rf() @@ -32,6 +36,10 @@ func (_m *Ledger) Done() <-chan struct{} { func (_m *Ledger) Get(query *ledger.Query) ([]ledger.Value, error) { ret := _m.Called(query) + if len(ret) == 0 { + panic("no return value specified for Get") + } + var r0 []ledger.Value var r1 error if rf, ok := ret.Get(0).(func(*ledger.Query) ([]ledger.Value, error)); ok { @@ -58,6 +66,10 @@ func (_m *Ledger) Get(query *ledger.Query) ([]ledger.Value, error) { func (_m *Ledger) GetSingleValue(query *ledger.QuerySingleValue) (ledger.Value, error) { ret := _m.Called(query) + if len(ret) == 0 { + panic("no return value specified for GetSingleValue") + } + var r0 ledger.Value var r1 error if rf, ok := ret.Get(0).(func(*ledger.QuerySingleValue) (ledger.Value, error)); ok { @@ -84,6 +96,10 @@ func (_m *Ledger) GetSingleValue(query *ledger.QuerySingleValue) (ledger.Value, func (_m *Ledger) HasState(state ledger.State) bool { ret := _m.Called(state) + if len(ret) == 0 { + panic("no return value specified for HasState") + } + var r0 bool if rf, ok := ret.Get(0).(func(ledger.State) bool); ok { r0 = rf(state) @@ -94,10 +110,14 @@ func (_m *Ledger) HasState(state ledger.State) bool { return r0 } -// InitialState provides a mock function with given fields: +// InitialState provides a mock function with no fields func (_m *Ledger) InitialState() ledger.State { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for InitialState") + } + var r0 ledger.State if rf, ok := ret.Get(0).(func() ledger.State); ok { r0 = rf() @@ -114,6 +134,10 @@ func (_m *Ledger) InitialState() ledger.State { func (_m *Ledger) Prove(query *ledger.Query) (ledger.Proof, error) { ret := _m.Called(query) + if len(ret) == 0 { + panic("no return value specified for Prove") + } + var r0 ledger.Proof var r1 error if rf, ok := ret.Get(0).(func(*ledger.Query) (ledger.Proof, error)); ok { @@ -136,10 +160,14 @@ func (_m *Ledger) Prove(query *ledger.Query) (ledger.Proof, error) { return r0, r1 } -// Ready provides a mock function with given fields: +// Ready provides a mock function with no fields func (_m *Ledger) Ready() <-chan struct{} { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Ready") + } + var r0 <-chan struct{} if rf, ok := ret.Get(0).(func() <-chan struct{}); ok { r0 = rf() @@ -156,6 +184,10 @@ func (_m *Ledger) Ready() <-chan struct{} { func (_m *Ledger) Set(update *ledger.Update) (ledger.State, *ledger.TrieUpdate, error) { ret := _m.Called(update) + if len(ret) == 0 { + panic("no return value specified for Set") + } + var r0 ledger.State var r1 *ledger.TrieUpdate var r2 error @@ -187,13 +219,12 @@ func (_m *Ledger) Set(update *ledger.Update) (ledger.State, *ledger.TrieUpdate, return r0, r1, r2 } -type mockConstructorTestingTNewLedger interface { +// NewLedger creates a new instance of Ledger. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewLedger(t interface { mock.TestingT Cleanup(func()) -} - -// NewLedger creates a new instance of Ledger. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewLedger(t mockConstructorTestingTNewLedger) *Ledger { +}) *Ledger { mock := &Ledger{} mock.Mock.Test(t) diff --git a/ledger/mock/migration.go b/ledger/mock/migration.go deleted file mode 100644 index 3ae65acd657..00000000000 --- a/ledger/mock/migration.go +++ /dev/null @@ -1,54 +0,0 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. - -package mock - -import ( - ledger "github.com/onflow/flow-go/ledger" - mock "github.com/stretchr/testify/mock" -) - -// Migration is an autogenerated mock type for the Migration type -type Migration struct { - mock.Mock -} - -// Execute provides a mock function with given fields: payloads -func (_m *Migration) Execute(payloads []ledger.Payload) ([]ledger.Payload, error) { - ret := _m.Called(payloads) - - var r0 []ledger.Payload - var r1 error - if rf, ok := ret.Get(0).(func([]ledger.Payload) ([]ledger.Payload, error)); ok { - return rf(payloads) - } - if rf, ok := ret.Get(0).(func([]ledger.Payload) []ledger.Payload); ok { - r0 = rf(payloads) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).([]ledger.Payload) - } - } - - if rf, ok := ret.Get(1).(func([]ledger.Payload) error); ok { - r1 = rf(payloads) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -type mockConstructorTestingTNewMigration interface { - mock.TestingT - Cleanup(func()) -} - -// NewMigration creates a new instance of Migration. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewMigration(t mockConstructorTestingTNewMigration) *Migration { - mock := &Migration{} - mock.Mock.Test(t) - - t.Cleanup(func() { mock.AssertExpectations(t) }) - - return mock -} diff --git a/ledger/mock/reporter.go b/ledger/mock/reporter.go index 5d5e05c4bed..fdd5e3e8a0b 100644 --- a/ledger/mock/reporter.go +++ b/ledger/mock/reporter.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mock @@ -12,10 +12,14 @@ type Reporter struct { mock.Mock } -// Name provides a mock function with given fields: +// Name provides a mock function with no fields func (_m *Reporter) Name() string { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Name") + } + var r0 string if rf, ok := ret.Get(0).(func() string); ok { r0 = rf() @@ -30,6 +34,10 @@ func (_m *Reporter) Name() string { func (_m *Reporter) Report(payloads []ledger.Payload, statecommitment ledger.State) error { ret := _m.Called(payloads, statecommitment) + if len(ret) == 0 { + panic("no return value specified for Report") + } + var r0 error if rf, ok := ret.Get(0).(func([]ledger.Payload, ledger.State) error); ok { r0 = rf(payloads, statecommitment) @@ -40,13 +48,12 @@ func (_m *Reporter) Report(payloads []ledger.Payload, statecommitment ledger.Sta return r0 } -type mockConstructorTestingTNewReporter interface { +// NewReporter creates a new instance of Reporter. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewReporter(t interface { mock.TestingT Cleanup(func()) -} - -// NewReporter creates a new instance of Reporter. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewReporter(t mockConstructorTestingTNewReporter) *Reporter { +}) *Reporter { mock := &Reporter{} mock.Mock.Test(t) diff --git a/ledger/partial/ledger.go b/ledger/partial/ledger.go index 91eb9e5fbe7..33b3d141935 100644 --- a/ledger/partial/ledger.go +++ b/ledger/partial/ledger.go @@ -120,10 +120,14 @@ func (l *Ledger) Get(query *ledger.Query) (values []ledger.Value, err error) { // Set updates the ledger given an update // it returns the state after update and errors (if any) func (l *Ledger) Set(update *ledger.Update) (newState ledger.State, trieUpdate *ledger.TrieUpdate, err error) { - // TODO: add test case if update.Size() == 0 { - // return current state root unchanged - return update.State(), nil, nil + return update.State(), + &ledger.TrieUpdate{ + RootHash: ledger.RootHash(update.State()), + Paths: []ledger.Path{}, + Payloads: []*ledger.Payload{}, + }, + nil } trieUpdate, err = pathfinder.UpdateToTrieUpdate(update, l.pathFinderVersion) diff --git a/ledger/partial/ledger_test.go b/ledger/partial/ledger_test.go index f7fa9b77eb4..209bf707ed0 100644 --- a/ledger/partial/ledger_test.go +++ b/ledger/partial/ledger_test.go @@ -9,12 +9,14 @@ import ( executionState "github.com/onflow/flow-go/engine/execution/state" "github.com/onflow/flow-go/ledger" + "github.com/onflow/flow-go/ledger/common/convert" "github.com/onflow/flow-go/ledger/common/testutils" "github.com/onflow/flow-go/ledger/complete" "github.com/onflow/flow-go/ledger/complete/wal/fixtures" "github.com/onflow/flow-go/ledger/partial" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/module/metrics" + "github.com/onflow/flow-go/utils/unittest" ) func TestFunctionalityWithCompleteTrie(t *testing.T) { @@ -125,9 +127,9 @@ func TestProofsForEmptyRegisters(t *testing.T) { require.NoError(t, err) // Read one register during execution. - registerID := flow.NewRegisterID("b", "nk") + registerID := flow.NewRegisterID(unittest.RandomAddressFixture(), "nk") allKeys := []ledger.Key{ - executionState.RegisterIDToKey(registerID), + convert.RegisterIDToLedgerKey(registerID), } newState := updated.State() @@ -142,7 +144,7 @@ func TestProofsForEmptyRegisters(t *testing.T) { assert.NoError(t, err) assert.Equal(t, pled.InitialState(), emptyState) - query, err := ledger.NewQuery(newState, []ledger.Key{executionState.RegisterIDToKey(registerID)}) + query, err := ledger.NewQuery(newState, []ledger.Key{convert.RegisterIDToLedgerKey(registerID)}) require.NoError(t, err) results, err := pled.Get(query) @@ -150,5 +152,20 @@ func TestProofsForEmptyRegisters(t *testing.T) { require.Len(t, results, 1) require.Empty(t, results[0]) +} + +func TestEmptyLedger(t *testing.T) { + l, err := complete.NewLedger(&fixtures.NoopWAL{}, 100, &metrics.NoopCollector{}, zerolog.Logger{}, complete.DefaultPathFinderVersion) + require.NoError(t, err) + u, err := ledger.NewUpdate( + ledger.State(unittest.StateCommitmentFixture()), + []ledger.Key{}, + []ledger.Value{}, + ) + require.NoError(t, err) + newState, trieUpdate, err := l.Set(u) + require.NoError(t, err) + require.True(t, trieUpdate.IsEmpty()) + require.Equal(t, u.State(), newState) } diff --git a/ledger/trie.go b/ledger/trie.go index 17f2ba1a232..94224dc7185 100644 --- a/ledger/trie.go +++ b/ledger/trie.go @@ -9,9 +9,11 @@ import ( "github.com/fxamacker/cbor/v2" - cryptoHash "github.com/onflow/flow-go/crypto/hash" + cryptoHash "github.com/onflow/crypto/hash" + "github.com/onflow/flow-go/ledger/common/bitutils" "github.com/onflow/flow-go/ledger/common/hash" + "github.com/onflow/flow-go/model/flow" ) // Path captures storage path of a payload; @@ -236,6 +238,14 @@ func (k encKey) DeepCopy() encKey { return newK } +const ( + KeyPartOwner = uint16(0) + // Deprecated: KeyPartController was only used by the very first + // version of Cadence for access control, which was later retired + _ = uint16(1) // DO NOT REUSE + KeyPartKey = uint16(2) +) + // Payload is the smallest immutable storable unit in ledger type Payload struct { // encKey is key encoded using PayloadVersion. @@ -319,6 +329,38 @@ func (p *Payload) Key() (Key, error) { return *k, nil } +// EncodedKey returns payload key. +// CAUTION: do not modify returned encoded key +// because it shares underlying data with payload key. +func (p *Payload) EncodedKey() []byte { + if p == nil { + return nil + } + return p.encKey +} + +// Address returns: +// - (address, nil) if the payload is for an account, the account address is returned +// - (flow.EmptyAddress, nil) if the payload is not for an account (global register) +// - (flow.EmptyAddress, err) if running into any exception +// The zero address is used for global Payloads and is not an actual account +func (p *Payload) Address() (flow.Address, error) { + if p == nil { + return flow.EmptyAddress, fmt.Errorf("failed to get payload address: payload is nil") + } + if len(p.encKey) == 0 { + return flow.EmptyAddress, fmt.Errorf("failed to get payload address: encoded key is empty") + } + b, found, err := decodeKeyPartValueByType(p.encKey, KeyPartOwner, true, PayloadVersion) + if err != nil { + return flow.EmptyAddress, err + } + if !found { + return flow.EmptyAddress, fmt.Errorf("failed to find address by type %d", KeyPartOwner) + } + return flow.BytesToAddress(b), nil +} + // Value returns payload value. // CAUTION: do not modify returned value because it shares underlying data with payload value. func (p *Payload) Value() Value { diff --git a/ledger/trie_encoder.go b/ledger/trie_encoder.go index 27bbf9e9a84..442bb46e28a 100644 --- a/ledger/trie_encoder.go +++ b/ledger/trie_encoder.go @@ -148,28 +148,28 @@ func DecodeKeyPart(encodedKeyPart []byte) (*KeyPart, error) { } // decode the key part content (zerocopy) - key, err := decodeKeyPart(rest, true, version) + kpt, kpv, err := decodeKeyPart(rest, true, version) if err != nil { return nil, fmt.Errorf("error decoding key part: %w", err) } - return key, nil + return &KeyPart{Type: kpt, Value: kpv}, nil } // decodeKeyPart decodes inp into KeyPart. If zeroCopy is true, KeyPart // references data in inp. Otherwise, it is copied. -func decodeKeyPart(inp []byte, zeroCopy bool, _ uint16) (*KeyPart, error) { +func decodeKeyPart(inp []byte, zeroCopy bool, _ uint16) (uint16, []byte, error) { // read key part type and the rest is the key item part kpt, kpv, err := utils.ReadUint16(inp) if err != nil { - return nil, fmt.Errorf("error decoding key part (content): %w", err) + return 0, nil, fmt.Errorf("error decoding key part (content): %w", err) } if zeroCopy { - return &KeyPart{Type: kpt, Value: kpv}, nil + return kpt, kpv, nil } v := make([]byte, len(kpv)) copy(v, kpv) - return &KeyPart{Type: kpt, Value: v}, nil + return kpt, v, nil } // EncodeKey encodes a key into a byte slice @@ -210,6 +210,9 @@ func encodeAndAppendKey(buffer []byte, k *Key, version uint16) []byte { } func encodedKeyLength(k *Key, version uint16) int { + // NOTE: RegisterSize() in fvm/environment/accounts.go needs to be in sync with encodedKeyLength(). + // Please update RegisterSize() when this function is updated. + // Key is encoded as: number of key parts (2 bytes) and for each key part, // the key part size (4 bytes) + encoded key part (n bytes). size := 2 + 4*len(k.KeyParts) @@ -240,6 +243,63 @@ func DecodeKey(encodedKey []byte) (*Key, error) { return key, nil } +func decodeKeyPartValueByType(inp []byte, typ uint16, zeroCopy bool, version uint16) ([]byte, bool, error) { + // Read number of key parts + numOfParts, rest, err := utils.ReadUint16(inp) + if err != nil { + return nil, false, fmt.Errorf("error decoding number of key parts: %w", err) + } + + for i := 0; i < int(numOfParts); i++ { + var kpt uint16 + var kpv []byte + + kpt, kpv, rest, err = decodeKeyPartWithEncodedSizeInfo(rest, zeroCopy, version) + if err != nil { + return nil, false, err + } + if kpt == typ { + return kpv, true, nil + } + } + + return nil, false, nil +} + +func decodeKeyPartWithEncodedSizeInfo( + inp []byte, + zeroCopy bool, + version uint16, +) ( + // kp KeyPart, + kpt uint16, + kpv []byte, + rest []byte, + err error, +) { + + // Read encoded key part size + kpEncSize, rest, err := utils.ReadUint32(inp) + if err != nil { + return 0, nil, nil, fmt.Errorf("error decoding key part: %w", err) + } + + // Read encoded key part + var kpEnc []byte + kpEnc, rest, err = utils.ReadSlice(rest, int(kpEncSize)) + if err != nil { + return 0, nil, nil, fmt.Errorf("error decoding key part: %w", err) + } + + // Decode encoded key part + kpType, kpValue, err := decodeKeyPart(kpEnc, zeroCopy, version) + if err != nil { + return 0, nil, nil, fmt.Errorf("error decoding key part: %w", err) + } + + return kpType, kpValue, rest, nil +} + // decodeKey decodes inp into Key. If zeroCopy is true, returned key // references data in inp. Otherwise, it is copied. func decodeKey(inp []byte, zeroCopy bool, version uint16) (*Key, error) { @@ -257,27 +317,15 @@ func decodeKey(inp []byte, zeroCopy bool, version uint16) (*Key, error) { key.KeyParts = make([]KeyPart, numOfParts) for i := 0; i < int(numOfParts); i++ { - var kpEncSize uint32 - var kpEnc []byte - // read encoded key part size - kpEncSize, rest, err = utils.ReadUint32(rest) - if err != nil { - return nil, fmt.Errorf("error decoding key (content): %w", err) - } - - // read encoded key part - kpEnc, rest, err = utils.ReadSlice(rest, int(kpEncSize)) - if err != nil { - return nil, fmt.Errorf("error decoding key (content): %w", err) - } + var kpt uint16 + var kpv []byte - // decode encoded key part - kp, err := decodeKeyPart(kpEnc, zeroCopy, version) + kpt, kpv, rest, err = decodeKeyPartWithEncodedSizeInfo(rest, zeroCopy, version) if err != nil { - return nil, fmt.Errorf("error decoding key (content): %w", err) + return nil, err } - key.KeyParts[i] = *kp + key.KeyParts[i] = KeyPart{kpt, kpv} } return key, nil } diff --git a/ledger/trie_test.go b/ledger/trie_test.go index 8edc97b32a7..3b5e6031da4 100644 --- a/ledger/trie_test.go +++ b/ledger/trie_test.go @@ -6,6 +6,8 @@ import ( "github.com/fxamacker/cbor/v2" "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/model/flow" ) // TestPayloadEquals tests equality of payloads. It tests: @@ -308,26 +310,52 @@ func TestPayloadKey(t *testing.T) { k, err := p.Key() require.NoError(t, err) require.Equal(t, Key{}, k) + + _, err = p.Address() + require.Error(t, err) }) t.Run("empty payload", func(t *testing.T) { p := Payload{} k, err := p.Key() require.NoError(t, err) require.Equal(t, Key{}, k) + + _, err = p.Address() + require.Error(t, err) }) t.Run("empty key", func(t *testing.T) { p := NewPayload(Key{}, Value{}) k, err := p.Key() require.NoError(t, err) require.Equal(t, Key{}, k) + + _, err = p.Address() + require.Error(t, err) + }) + t.Run("global key", func(t *testing.T) { + key := Key{KeyParts: []KeyPart{{Type: 0, Value: []byte{}}, {Type: 1, Value: []byte("def")}}} + value := Value([]byte{0, 1, 2}) + p := NewPayload(key, value) + k, err := p.Key() + require.NoError(t, err) + require.Equal(t, key, k) + + addr, err := p.Address() + require.NoError(t, err) + require.Equal(t, flow.EmptyAddress, addr) }) t.Run("key", func(t *testing.T) { - key := Key{KeyParts: []KeyPart{{Type: 0, Value: []byte("abc")}, {Type: 1, Value: []byte("def")}}} + address := []byte{1, 2, 3, 4, 5, 6, 7, 8} + key := Key{KeyParts: []KeyPart{{Type: 0, Value: address}, {Type: 1, Value: []byte("def")}}} value := Value([]byte{0, 1, 2}) p := NewPayload(key, value) k, err := p.Key() require.NoError(t, err) require.Equal(t, key, k) + + addr, err := p.Address() + require.NoError(t, err) + require.Equal(t, flow.Address(address), addr) }) } diff --git a/model/access/compatible_range.go b/model/access/compatible_range.go new file mode 100644 index 00000000000..6ae74f7cddc --- /dev/null +++ b/model/access/compatible_range.go @@ -0,0 +1,10 @@ +package access + +// CompatibleRange contains the first and the last height that the node's version supports. +type CompatibleRange struct { + // StartHeight is the first block that the version supports. + StartHeight uint64 + + // EndHeight is the last block that the version supports. + EndHeight uint64 +} diff --git a/model/access/network.go b/model/access/network.go new file mode 100644 index 00000000000..bd983fd5b48 --- /dev/null +++ b/model/access/network.go @@ -0,0 +1,10 @@ +package access + +import ( + "github.com/onflow/flow-go/model/flow" +) + +// NetworkParameters contains the network-wide parameters for the Flow blockchain. +type NetworkParameters struct { + ChainID flow.ChainID +} diff --git a/model/access/node_version_info.go b/model/access/node_version_info.go new file mode 100644 index 00000000000..a6938acb72a --- /dev/null +++ b/model/access/node_version_info.go @@ -0,0 +1,23 @@ +package access + +import ( + "github.com/onflow/flow-go/model/flow" +) + +// NodeVersionInfo contains information about node, such as semver, commit, sporkID, protocolVersion, etc +type NodeVersionInfo struct { + Semver string + Commit string + SporkId flow.Identifier + // ProtocolVersion is the deprecated protocol version number. + // Deprecated: Previously this referred to the major software version as of the most recent spork. + // Replaced by protocol_state_version. + ProtocolVersion uint64 + // ProtocolStateVersion is the Protocol State version as of the latest finalized block. + // This tracks the schema version of the Protocol State and is used to coordinate breaking changes in the Protocol. + // Version numbers are monotonically increasing. + ProtocolStateVersion uint64 + SporkRootBlockHeight uint64 + NodeRootBlockHeight uint64 + CompatibleRange *CompatibleRange +} diff --git a/model/access/transaction_result.go b/model/access/transaction_result.go new file mode 100644 index 00000000000..ef6f53707bf --- /dev/null +++ b/model/access/transaction_result.go @@ -0,0 +1,25 @@ +package access + +import ( + "github.com/onflow/flow-go/model/flow" +) + +// TransactionResult represents a flow.TransactionResult with additional fields required for the Access API +type TransactionResult struct { + Status flow.TransactionStatus + StatusCode uint + Events []flow.Event + ErrorMessage string + BlockID flow.Identifier + TransactionID flow.Identifier + CollectionID flow.Identifier + BlockHeight uint64 +} + +func (r *TransactionResult) IsExecuted() bool { + return r.Status == flow.TransactionStatusExecuted || r.Status == flow.TransactionStatusSealed +} + +func (r *TransactionResult) IsFinal() bool { + return r.Status == flow.TransactionStatusSealed || r.Status == flow.TransactionStatusExpired +} diff --git a/model/bootstrap/filenames.go b/model/bootstrap/filenames.go index 8933aa31563..8da9f564fd4 100644 --- a/model/bootstrap/filenames.go +++ b/model/bootstrap/filenames.go @@ -23,8 +23,9 @@ var ( DirnameRootBlockVotes = filepath.Join(DirnamePublicBootstrap, "root-block-votes") FileNamePartnerWeights = "partner-weights.json" - PathRootBlockData = filepath.Join(DirnamePublicBootstrap, "root-block.json") - PathRootProtocolStateSnapshot = filepath.Join(DirnamePublicBootstrap, "root-protocol-state-snapshot.json") + PathRootBlockData = filepath.Join(DirnamePublicBootstrap, "root-block.json") + PathIntermediaryBootstrappingData = filepath.Join(DirnamePublicBootstrap, "intermediary-bootstrapping-data.json") + PathRootProtocolStateSnapshot = filepath.Join(DirnamePublicBootstrap, "root-protocol-state-snapshot.json") FilenameWALRootCheckpoint = "root.checkpoint" PathRootCheckpoint = filepath.Join(DirnameExecutionState, FilenameWALRootCheckpoint) // only available on an execution node diff --git a/model/bootstrap/node_info.go b/model/bootstrap/node_info.go index cdc6f855c4a..24da2e19b20 100644 --- a/model/bootstrap/node_info.go +++ b/model/bootstrap/node_info.go @@ -4,13 +4,14 @@ package bootstrap import ( "encoding/json" "fmt" - "sort" "strings" + "github.com/onflow/crypto" + "golang.org/x/exp/slices" + sdk "github.com/onflow/flow-go-sdk" sdkcrypto "github.com/onflow/flow-go-sdk/crypto" - "github.com/onflow/flow-go/crypto" "github.com/onflow/flow-go/model/encodable" "github.com/onflow/flow-go/model/flow" ) @@ -25,9 +26,9 @@ const ( ) const ( - DefaultMachineAccountSignAlgo = sdkcrypto.ECDSA_P256 - DefaultMachineAccountHashAlgo = sdkcrypto.SHA3_256 - DefaultMachineAccountKeyIndex uint = 0 + DefaultMachineAccountSignAlgo = sdkcrypto.ECDSA_P256 + DefaultMachineAccountHashAlgo = sdkcrypto.SHA3_256 + DefaultMachineAccountKeyIndex uint32 = 0 ) // ErrMissingPrivateInfo is returned when a method is called on NodeInfo @@ -55,7 +56,7 @@ type NodeMachineAccountInfo struct { EncodedPrivateKey []byte // KeyIndex is the index of the key in the associated machine account - KeyIndex uint + KeyIndex uint32 // SigningAlgorithm is the algorithm used by the machine account along with // the above private key to create cryptographic signatures @@ -158,6 +159,7 @@ type NodeInfoPub struct { Weight uint64 NetworkPubKey encodable.NetworkPubKey StakingPubKey encodable.StakingPubKey + StakingPoP encodable.StakingKeyPoP } // decodableNodeInfoPub provides backward-compatible decoding of old models @@ -169,11 +171,24 @@ type decodableNodeInfoPub struct { Weight uint64 NetworkPubKey encodable.NetworkPubKey StakingPubKey encodable.StakingPubKey + StakingPoP encodable.StakingKeyPoP // Stake previously was used in place of the Weight field. // Deprecated: supported in decoding for backward-compatibility Stake uint64 } +func (info *NodeInfoPub) Equals(other *NodeInfoPub) bool { + if other == nil { + return false + } + return info.Address == other.Address && + info.NodeID == other.NodeID && + info.Role == other.Role && + info.Weight == other.Weight && + info.NetworkPubKey.PublicKey.Equals(other.NetworkPubKey.PublicKey) && + info.StakingPubKey.PublicKey.Equals(other.StakingPubKey.PublicKey) +} + func (info *NodeInfoPub) UnmarshalJSON(b []byte) error { var decodable decodableNodeInfoPub err := json.Unmarshal(b, &decodable) @@ -193,6 +208,7 @@ func (info *NodeInfoPub) UnmarshalJSON(b []byte) error { info.Weight = decodable.Weight info.NetworkPubKey = decodable.NetworkPubKey info.StakingPubKey = decodable.StakingPubKey + info.StakingPoP = decodable.StakingPoP return nil } @@ -210,6 +226,16 @@ type NodePrivateKeys struct { // A NodeInfo instance can contain EITHER public keys OR private keys, not both. // This can be ensured by using only using the provided constructors and NOT // manually constructing an instance. +// +// Deprecated: There is a concern about the current usage pattern of `NodeInfo“. +// There are no build-time enforcements of using `NodeInfo` for either the private or public usage. The struct +// can mistakenly be used for both cases. Other than introducing a confusing design, developers can accidentally +// confuse the private usage as a public one, for instance by writing the private info (including the private +// keys) into a file that is publicly shared. +// There is an ongoing attempt to replace `NodeInfo` by the explicit structures `NodeInfoPriv` and `NodeInfoPub` +// in https://github.com/onflow/flow-go/pull/7476. +// It is recommended to not use `NodeInfo` in new code development in order to limit the structure usage, and to +// use `NodeInfoPriv` and `NodeInfoPub` instead. type NodeInfo struct { // NodeID is the unique identifier of the node in the network @@ -226,11 +252,17 @@ type NodeInfo struct { // Weight is the weight of the node Weight uint64 - // key information is private - networkPubKey crypto.PublicKey + // PRIVATE Variant: networkPrivKey crypto.PrivateKey - stakingPubKey crypto.PublicKey stakingPrivKey crypto.PrivateKey + + // By convention, `NodeInfo` must either include the public fields and exclude the private fields, or + // vice versa. Mixtures are not allowed. Please check function [NodeInfoType] for the precise convention. + // + // PUBLIC Variant: + networkPubKey crypto.PublicKey + stakingPubKey crypto.PublicKey + stakingPoP crypto.Signature } func NewPublicNodeInfo( @@ -240,6 +272,7 @@ func NewPublicNodeInfo( weight uint64, networkKey crypto.PublicKey, stakingKey crypto.PublicKey, + stakingPoP crypto.Signature, ) NodeInfo { return NodeInfo{ NodeID: nodeID, @@ -248,6 +281,7 @@ func NewPublicNodeInfo( Weight: weight, networkPubKey: networkKey, stakingPubKey: stakingKey, + stakingPoP: stakingPoP, } } @@ -258,7 +292,12 @@ func NewPrivateNodeInfo( weight uint64, networkKey crypto.PrivateKey, stakingKey crypto.PrivateKey, -) NodeInfo { +) (NodeInfo, error) { + pop, err := crypto.BLSGeneratePOP(stakingKey) + if err != nil { + return NodeInfo{}, fmt.Errorf("failed to generate PoP: %w", err) + } + return NodeInfo{ NodeID: nodeID, Role: role, @@ -268,7 +307,8 @@ func NewPrivateNodeInfo( stakingPrivKey: stakingKey, networkPubKey: networkKey.PublicKey(), stakingPubKey: stakingKey.PublicKey(), - } + stakingPoP: pop, + }, nil } // Type returns the type of the node info instance. @@ -276,7 +316,7 @@ func (node NodeInfo) Type() NodeInfoType { if node.networkPrivKey != nil && node.stakingPrivKey != nil { return NodeInfoTypePrivate } - if node.networkPubKey != nil && node.stakingPubKey != nil { + if node.networkPubKey != nil && node.stakingPubKey != nil && node.stakingPoP != nil { return NodeInfoTypePublic } return NodeInfoTypeInvalid @@ -296,6 +336,17 @@ func (node NodeInfo) StakingPubKey() crypto.PublicKey { return node.stakingPrivKey.PublicKey() } +func (node NodeInfo) StakingPoP() (crypto.Signature, error) { + if node.stakingPoP != nil { + return node.stakingPoP, nil + } + pop, err := crypto.BLSGeneratePOP(node.stakingPrivKey) + if err != nil { + return nil, fmt.Errorf("staking PoP generation failed: %w", err) + } + return pop, nil +} + func (node NodeInfo) PrivateKeys() (*NodePrivateKeys, error) { if node.Type() != NodeInfoTypePrivate { return nil, ErrMissingPrivateInfo @@ -321,8 +372,17 @@ func (node NodeInfo) Private() (NodeInfoPriv, error) { }, nil } -// Public returns the canonical public encodable structure -func (node NodeInfo) Public() NodeInfoPub { +// Public returns the canonical encodable structure holding the node's public information. +// It derives the networking and staking public keys, as well as the Proof of Possession (PoP) of the staking private key +// if they are not already provided in the NodeInfo. +// +// It errors, if there is a problem generating the staking key PoP. +func (node NodeInfo) Public() (NodeInfoPub, error) { + stakingPoP, err := node.StakingPoP() + if err != nil { + return NodeInfoPub{}, fmt.Errorf("failed to generate staking PoP: %w", err) + } + return NodeInfoPub{ Role: node.Role, Address: node.Address, @@ -330,50 +390,53 @@ func (node NodeInfo) Public() NodeInfoPub { Weight: node.Weight, NetworkPubKey: encodable.NetworkPubKey{PublicKey: node.NetworkPubKey()}, StakingPubKey: encodable.StakingPubKey{PublicKey: node.StakingPubKey()}, - } + StakingPoP: encodable.StakingKeyPoP{Signature: stakingPoP}, + }, nil } // PartnerPublic returns the public data for a partner node. -func (node NodeInfo) PartnerPublic() PartnerNodeInfoPub { +func (node NodeInfo) PartnerPublic() (PartnerNodeInfoPub, error) { + + stakingPoP, err := node.StakingPoP() + if err != nil { + return PartnerNodeInfoPub{}, fmt.Errorf("failed to generate staking PoP: %w", err) + } return PartnerNodeInfoPub{ Role: node.Role, Address: node.Address, NodeID: node.NodeID, NetworkPubKey: encodable.NetworkPubKey{PublicKey: node.NetworkPubKey()}, StakingPubKey: encodable.StakingPubKey{PublicKey: node.StakingPubKey()}, - } + StakingPoP: stakingPoP, + }, nil } // Identity returns the node info as a public Flow identity. func (node NodeInfo) Identity() *flow.Identity { identity := &flow.Identity{ - NodeID: node.NodeID, - Address: node.Address, - Role: node.Role, - Weight: node.Weight, - StakingPubKey: node.StakingPubKey(), - NetworkPubKey: node.NetworkPubKey(), + IdentitySkeleton: flow.IdentitySkeleton{ + NodeID: node.NodeID, + Address: node.Address, + Role: node.Role, + InitialWeight: node.Weight, + StakingPubKey: node.stakingPubKey, + NetworkPubKey: node.networkPubKey, + }, + DynamicIdentity: flow.DynamicIdentity{ + EpochParticipationStatus: flow.EpochParticipationStatusActive, + }, } return identity } -// NodeInfoFromIdentity converts an identity to a public NodeInfo -func NodeInfoFromIdentity(identity *flow.Identity) NodeInfo { - return NewPublicNodeInfo( - identity.NodeID, - identity.Role, - identity.Address, - identity.Weight, - identity.NetworkPubKey, - identity.StakingPubKey) -} - -func PrivateNodeInfoFromIdentity(identity *flow.Identity, networkKey, stakingKey crypto.PrivateKey) NodeInfo { +// PrivateNodeInfoFromIdentity builds a NodeInfo from a flow Identity. +// WARNING: Nothing enforces that the output NodeInfo's keys are corresponding to the input Identity. +func PrivateNodeInfoFromIdentity(identity *flow.Identity, networkKey, stakingKey crypto.PrivateKey) (NodeInfo, error) { return NewPrivateNodeInfo( identity.NodeID, identity.Role, identity.Address, - identity.Weight, + identity.InitialWeight, networkKey, stakingKey, ) @@ -391,11 +454,13 @@ func FilterByRole(nodes []NodeInfo, role flow.Role) []NodeInfo { } // Sort sorts the NodeInfo list using the given ordering. -func Sort(nodes []NodeInfo, order flow.IdentityOrder) []NodeInfo { +// +// The sorted list is returned and the original list is untouched. +func Sort(nodes []NodeInfo, order flow.IdentityOrder[flow.Identity]) []NodeInfo { dup := make([]NodeInfo, len(nodes)) copy(dup, nodes) - sort.Slice(dup, func(i, j int) bool { - return order(dup[i].Identity(), dup[j].Identity()) + slices.SortFunc(dup, func(i, j NodeInfo) int { + return order(i.Identity(), j.Identity()) }) return dup } @@ -408,10 +473,14 @@ func ToIdentityList(nodes []NodeInfo) flow.IdentityList { return il } -func ToPublicNodeInfoList(nodes []NodeInfo) []NodeInfoPub { +func ToPublicNodeInfoList(nodes []NodeInfo) ([]NodeInfoPub, error) { pub := make([]NodeInfoPub, 0, len(nodes)) for _, node := range nodes { - pub = append(pub, node.Public()) + info, err := node.Public() + if err != nil { + return nil, fmt.Errorf("could not read public info: %w", err) + } + pub = append(pub, info) } - return pub + return pub, nil } diff --git a/model/bootstrap/node_info_test.go b/model/bootstrap/node_info_test.go index 536c0c808f9..4c67b4bf81c 100644 --- a/model/bootstrap/node_info_test.go +++ b/model/bootstrap/node_info_test.go @@ -9,14 +9,36 @@ import ( "github.com/stretchr/testify/require" "github.com/onflow/flow-go/model/bootstrap" - "github.com/onflow/flow-go/model/flow/order" + "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/utils/unittest" ) -func TestSort(t *testing.T) { +func TestIdentityListCanonical(t *testing.T) { nodes := unittest.NodeInfosFixture(20) - nodes = bootstrap.Sort(nodes, order.Canonical) - require.True(t, bootstrap.ToIdentityList(nodes).Sorted(order.Canonical)) + // make sure the list is not sorted + nodes[0].NodeID[0], nodes[1].NodeID[0] = 2, 1 + require.False(t, flow.IsIdentifierCanonical(nodes[0].NodeID, nodes[1].NodeID)) + ids := bootstrap.ToIdentityList(nodes) + assert.False(t, flow.IsIdentityListCanonical(ids)) + + // make a copy of the original list of nodes + nodesCopy := make([]bootstrap.NodeInfo, len(nodes)) + copy(nodesCopy, nodes) + + sortedNodes := bootstrap.Sort(nodes, flow.Canonical[flow.Identity]) + sortedIds := bootstrap.ToIdentityList(sortedNodes) + require.True(t, flow.IsIdentityListCanonical(sortedIds)) + // make sure original list didn't change + assert.Equal(t, nodesCopy, nodes) + + // check `IsIdentityListCanonical` detects order equality in a sorted list + nodes[1] = nodes[10] // add a duplication + copy(nodesCopy, nodes) + sortedNodes = bootstrap.Sort(nodes, flow.Canonical[flow.Identity]) + sortedIds = bootstrap.ToIdentityList(sortedNodes) + assert.False(t, flow.IsIdentityListCanonical(sortedIds)) + // make sure original list didn't change + assert.Equal(t, nodesCopy, nodes) } func TestNodeConfigEncodingJSON(t *testing.T) { @@ -44,16 +66,18 @@ func TestNodeConfigEncodingJSON(t *testing.T) { func TestNodeInfoPubEncodingJSON(t *testing.T) { t.Run("normal node info", func(t *testing.T) { - conf := unittest.NodeInfoFixture().Public() + conf, err := unittest.NodeInfoFixture().Public() + require.NoError(t, err) enc, err := json.Marshal(conf) require.NoError(t, err) var dec bootstrap.NodeInfoPub err = json.Unmarshal(enc, &dec) require.NoError(t, err) - assert.Equal(t, conf, dec) + assert.True(t, dec.Equals(&conf)) }) t.Run("compat: should accept old files using Stake field", func(t *testing.T) { - conf := unittest.NodeInfoFixture().Public() + conf, err := unittest.NodeInfoFixture().Public() + require.NoError(t, err) enc, err := json.Marshal(conf) require.NoError(t, err) // emulate the old encoding by replacing the new field with old field name @@ -61,6 +85,6 @@ func TestNodeInfoPubEncodingJSON(t *testing.T) { var dec bootstrap.NodeInfoPub err = json.Unmarshal(enc, &dec) require.NoError(t, err) - assert.Equal(t, conf, dec) + assert.True(t, dec.Equals(&conf)) }) } diff --git a/model/bootstrap/partner_nodes.go.go b/model/bootstrap/partner_nodes.go.go index a65f09d2e18..36e5c9cc41a 100644 --- a/model/bootstrap/partner_nodes.go.go +++ b/model/bootstrap/partner_nodes.go.go @@ -14,4 +14,5 @@ type PartnerNodeInfoPub struct { NodeID flow.Identifier NetworkPubKey encodable.NetworkPubKey StakingPubKey encodable.StakingPubKey + StakingPoP []byte } diff --git a/model/chainsync/range.go b/model/chainsync/range.go index 98d970641e1..0294593a485 100644 --- a/model/chainsync/range.go +++ b/model/chainsync/range.go @@ -1,5 +1,3 @@ -// (c) 2019 Dapper Labs - ALL RIGHTS RESERVED - package chainsync import "github.com/onflow/flow-go/model/flow" diff --git a/model/chunks/chunkFaults.go b/model/chunks/chunkFaults.go index 2c50194d6a9..eaab5edec99 100644 --- a/model/chunks/chunkFaults.go +++ b/model/chunks/chunkFaults.go @@ -2,19 +2,26 @@ package chunks import ( "encoding/hex" + "errors" "fmt" "github.com/onflow/flow-go/model/flow" ) -// ChunkFault holds information about a fault that is found while +// ChunkFaultError holds information about a fault that is found while // verifying a chunk -type ChunkFault interface { +type ChunkFaultError interface { + error ChunkIndex() uint64 ExecutionResultID() flow.Identifier String() string } +func IsChunkFaultError(err error) bool { + var cfErr ChunkFaultError + return errors.As(err, &cfErr) +} + // CFMissingRegisterTouch is returned when a register touch is missing (read or update) type CFMissingRegisterTouch struct { regsterIDs []string @@ -23,6 +30,8 @@ type CFMissingRegisterTouch struct { txID flow.Identifier // very first transaction inside the chunk that required this register } +var _ ChunkFaultError = (*CFMissingRegisterTouch)(nil) + func (cf CFMissingRegisterTouch) String() string { hexStrings := make([]string, len(cf.regsterIDs)) for i, s := range cf.regsterIDs { @@ -32,6 +41,10 @@ func (cf CFMissingRegisterTouch) String() string { return fmt.Sprintf("at least one register touch was missing inside the chunk data package that was needed while running transactions of chunk %d of result %s (tx hash of one of them: %s), hex-encoded register ids: %s", cf.chunkIndex, cf.execResID.String(), cf.txID.String(), hexStrings) } +func (cf CFMissingRegisterTouch) Error() string { + return cf.String() +} + // ChunkIndex returns chunk index of the faulty chunk func (cf CFMissingRegisterTouch) ChunkIndex() uint64 { return cf.chunkIndex @@ -59,10 +72,16 @@ type CFNonMatchingFinalState struct { execResID flow.Identifier } +var _ ChunkFaultError = (*CFNonMatchingFinalState)(nil) + func (cf CFNonMatchingFinalState) String() string { return fmt.Sprintf("final state commitment doesn't match, expected [%x] but computed [%x]", cf.expected, cf.computed) } +func (cf CFNonMatchingFinalState) Error() string { + return cf.String() +} + // ChunkIndex returns chunk index of the faulty chunk func (cf CFNonMatchingFinalState) ChunkIndex() uint64 { return cf.chunkIndex @@ -90,6 +109,8 @@ type CFInvalidEventsCollection struct { eventIDs flow.IdentifierList } +var _ ChunkFaultError = (*CFInvalidEventsCollection)(nil) + func NewCFInvalidEventsCollection(expected flow.Identifier, computed flow.Identifier, chInx uint64, execResID flow.Identifier, events flow.EventsList) *CFInvalidEventsCollection { return &CFInvalidEventsCollection{ expected: expected, @@ -113,6 +134,10 @@ func (c *CFInvalidEventsCollection) String() string { c.chunkIndex, c.resultID, c.eventIDs) } +func (cf CFInvalidEventsCollection) Error() string { + return cf.String() +} + // CFInvalidServiceEventsEmitted is returned when service events are different from the chunk's one type CFInvalidServiceEventsEmitted struct { expected flow.ServiceEventList @@ -121,6 +146,8 @@ type CFInvalidServiceEventsEmitted struct { resultID flow.Identifier } +var _ ChunkFaultError = (*CFInvalidServiceEventsEmitted)(nil) + func CFInvalidServiceSystemEventsEmitted(expected flow.ServiceEventList, computed flow.ServiceEventList, chInx uint64, execResID flow.Identifier) *CFInvalidServiceEventsEmitted { return &CFInvalidServiceEventsEmitted{ expected: expected, @@ -142,6 +169,10 @@ func (c *CFInvalidServiceEventsEmitted) String() string { return fmt.Sprintf("service events differs, got [%s] expected [%s] for chunk %d with result ID %s", c.computed, c.expected, c.chunkIndex, c.resultID) } +func (cf CFInvalidServiceEventsEmitted) Error() string { + return cf.String() +} + // CFInvalidVerifiableChunk is returned when a verifiable chunk is invalid // this includes cases that code fails to construct a partial trie, // collection hashes doesn't match @@ -153,10 +184,16 @@ type CFInvalidVerifiableChunk struct { execResID flow.Identifier } +var _ ChunkFaultError = (*CFInvalidVerifiableChunk)(nil) + func (cf CFInvalidVerifiableChunk) String() string { return fmt.Sprint("invalid verifiable chunk due to ", cf.reason, cf.details.Error()) } +func (cf CFInvalidVerifiableChunk) Error() string { + return cf.String() +} + // ChunkIndex returns chunk index of the faulty chunk func (cf CFInvalidVerifiableChunk) ChunkIndex() uint64 { return cf.chunkIndex @@ -174,3 +211,37 @@ func NewCFInvalidVerifiableChunk(reason string, err error, chInx uint64, execRes chunkIndex: chInx, execResID: execResID} } + +// CFSystemChunkIncludedCollection is returned when a system chunk includes a collection +type CFSystemChunkIncludedCollection struct { + chunkIndex uint64 + execResID flow.Identifier +} + +var _ ChunkFaultError = (*CFSystemChunkIncludedCollection)(nil) + +func (cf CFSystemChunkIncludedCollection) String() string { + return fmt.Sprintf("system chunk data pack must not include a collection but did for chunk %d with result ID %s", cf.chunkIndex, cf.execResID) +} + +func (cf CFSystemChunkIncludedCollection) Error() string { + return cf.String() +} + +// ChunkIndex returns chunk index of the faulty chunk +func (cf CFSystemChunkIncludedCollection) ChunkIndex() uint64 { + return cf.chunkIndex +} + +// ExecutionResultID returns the execution result identifier including the faulty chunk +func (cf CFSystemChunkIncludedCollection) ExecutionResultID() flow.Identifier { + return cf.execResID +} + +// NewCFSystemChunkIncludedCollection creates a new instance of Chunk Fault (SystemChunkIncludedCollection) +func NewCFSystemChunkIncludedCollection(chInx uint64, execResID flow.Identifier) *CFSystemChunkIncludedCollection { + return &CFSystemChunkIncludedCollection{ + chunkIndex: chInx, + execResID: execResID, + } +} diff --git a/model/chunks/chunkFaults_test.go b/model/chunks/chunkFaults_test.go new file mode 100644 index 00000000000..39a82a3e732 --- /dev/null +++ b/model/chunks/chunkFaults_test.go @@ -0,0 +1,102 @@ +package chunks_test + +import ( + "fmt" + "testing" + + "github.com/ipfs/go-cid" + "github.com/stretchr/testify/assert" + + "github.com/onflow/flow-go/model/chunks" + "github.com/onflow/flow-go/utils/unittest" +) + +// TestIsChunkFaultError tests the IsChunkFaultError function returns true for known chunk fault errors +// and false for any other error. +func TestIsChunkFaultError(t *testing.T) { + t.Run("CFMissingRegisterTouch", func(t *testing.T) { + cf := chunks.NewCFMissingRegisterTouch(nil, 0, unittest.IdentifierFixture(), unittest.IdentifierFixture()) + assert.Error(t, cf) + assert.True(t, chunks.IsChunkFaultError(cf)) + + var errType *chunks.CFMissingRegisterTouch + assert.ErrorAs(t, cf, &errType) + }) + + t.Run("CFNonMatchingFinalState", func(t *testing.T) { + cf := chunks.NewCFNonMatchingFinalState(unittest.StateCommitmentFixture(), unittest.StateCommitmentFixture(), 0, unittest.IdentifierFixture()) + assert.Error(t, cf) + assert.True(t, chunks.IsChunkFaultError(cf)) + + var errType *chunks.CFNonMatchingFinalState + assert.ErrorAs(t, cf, &errType) + }) + + t.Run("CFInvalidEventsCollection", func(t *testing.T) { + cf := chunks.NewCFInvalidEventsCollection(unittest.IdentifierFixture(), unittest.IdentifierFixture(), 0, unittest.IdentifierFixture(), nil) + assert.Error(t, cf) + assert.True(t, chunks.IsChunkFaultError(cf)) + + var errType *chunks.CFInvalidEventsCollection + assert.ErrorAs(t, cf, &errType) + }) + + t.Run("CFInvalidVerifiableChunk", func(t *testing.T) { + cf := chunks.NewCFInvalidVerifiableChunk("", nil, 0, unittest.IdentifierFixture()) + assert.Error(t, cf) + assert.True(t, chunks.IsChunkFaultError(cf)) + + var errType *chunks.CFInvalidVerifiableChunk + assert.ErrorAs(t, cf, &errType) + }) + + t.Run("CFSystemChunkIncludedCollection", func(t *testing.T) { + cf := chunks.NewCFSystemChunkIncludedCollection(0, unittest.IdentifierFixture()) + assert.Error(t, cf) + assert.True(t, chunks.IsChunkFaultError(cf)) + + var errType *chunks.CFSystemChunkIncludedCollection + assert.ErrorAs(t, cf, &errType) + }) + + t.Run("CFExecutionDataBlockIDMismatch", func(t *testing.T) { + cf := chunks.NewCFExecutionDataBlockIDMismatch(unittest.IdentifierFixture(), unittest.IdentifierFixture(), 0, unittest.IdentifierFixture()) + assert.Error(t, cf) + assert.True(t, chunks.IsChunkFaultError(cf)) + + var errType *chunks.CFExecutionDataBlockIDMismatch + assert.ErrorAs(t, cf, &errType) + }) + + t.Run("CFExecutionDataChunksLengthMismatch", func(t *testing.T) { + cf := chunks.NewCFExecutionDataChunksLengthMismatch(0, 0, 0, unittest.IdentifierFixture()) + assert.Error(t, cf) + assert.True(t, chunks.IsChunkFaultError(cf)) + + var errType *chunks.CFExecutionDataChunksLengthMismatch + assert.ErrorAs(t, cf, &errType) + }) + + t.Run("CFExecutionDataInvalidChunkCID", func(t *testing.T) { + cf := chunks.NewCFExecutionDataInvalidChunkCID(cid.Cid{}, cid.Cid{}, 0, unittest.IdentifierFixture()) + assert.Error(t, cf) + assert.True(t, chunks.IsChunkFaultError(cf)) + + var errType *chunks.CFExecutionDataInvalidChunkCID + assert.ErrorAs(t, cf, &errType) + }) + + t.Run("CFInvalidExecutionDataID", func(t *testing.T) { + cf := chunks.NewCFInvalidExecutionDataID(unittest.IdentifierFixture(), unittest.IdentifierFixture(), 0, unittest.IdentifierFixture()) + assert.Error(t, cf) + assert.True(t, chunks.IsChunkFaultError(cf)) + + var errType *chunks.CFInvalidExecutionDataID + assert.ErrorAs(t, cf, &errType) + }) + + t.Run("Non ChunkFaultError", func(t *testing.T) { + err := fmt.Errorf("some error") + assert.False(t, chunks.IsChunkFaultError(err)) + }) +} diff --git a/model/chunks/chunkLocator.go b/model/chunks/chunkLocator.go index f8791f668d7..4cd1c61e6ab 100644 --- a/model/chunks/chunkLocator.go +++ b/model/chunks/chunkLocator.go @@ -1,33 +1,62 @@ package chunks import ( + "fmt" + "github.com/onflow/flow-go/model/flow" ) // Locator is used to locate a chunk by providing the execution result the chunk belongs to as well as the chunk index within that execution result. // Since a chunk is unique by the result ID and its index in the result's chunk list. +// +//structwrite:immutable - mutations allowed only within the constructor type Locator struct { ResultID flow.Identifier // execution result id that chunk belongs to Index uint64 // index of chunk in the execution result } +// UntrustedLocator is an untrusted input-only representation of a Locator, +// used for construction. +// +// This type exists to ensure that constructor functions are invoked explicitly +// with named fields, which improves clarity and reduces the risk of incorrect field +// ordering during construction. +// +// An instance of UntrustedLocator should be validated and converted into +// a trusted Locator using NewLocator constructor. +type UntrustedLocator Locator + +// NewLocator creates a new instance of Locator. +// Construction Locator allowed only within the constructor. +// +// All errors indicate a valid Locator cannot be constructed from the input. +func NewLocator(untrusted UntrustedLocator) (*Locator, error) { + if untrusted.ResultID == flow.ZeroID { + return nil, fmt.Errorf("ResultID must not be zero") + } + return &Locator{ + ResultID: untrusted.ResultID, + Index: untrusted.Index, + }, nil +} + // ID returns a unique id for chunk locator. func (c Locator) ID() flow.Identifier { return flow.MakeID(c) } -// Checksum provides a cryptographic commitment for a chunk locator content. -func (c Locator) Checksum() flow.Identifier { - return flow.MakeID(c) -} +// EqualTo returns true if the two Locator are equivalent. +func (c *Locator) EqualTo(other *Locator) bool { + // Shortcut if `t` and `other` point to the same object; covers case where both are nil. + if c == other { + return true + } + if c == nil || other == nil { // only one is nil, the other not (otherwise we would have returned above) + return false + } -// ChunkLocatorID is a util function that returns identifier of corresponding chunk locator to -// the specified result and chunk index. -func ChunkLocatorID(resultID flow.Identifier, chunkIndex uint64) flow.Identifier { - return Locator{ - ResultID: resultID, - Index: chunkIndex, - }.ID() + return c.ResultID == other.ResultID && + c.Index == other.Index } // LocatorMap maps keeps chunk locators based on their locator id. diff --git a/model/chunks/chunkLocator_test.go b/model/chunks/chunkLocator_test.go index 46df3d7462f..fba4bcf16fd 100644 --- a/model/chunks/chunkLocator_test.go +++ b/model/chunks/chunkLocator_test.go @@ -1,10 +1,13 @@ package chunks_test import ( + "math/rand" "testing" "github.com/stretchr/testify/require" + "github.com/onflow/flow-go/model/chunks" + "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/utils/unittest" ) @@ -22,3 +25,96 @@ func TestChunkLocatorConvert(t *testing.T) { convertedList := locatorMap.ToList() require.ElementsMatch(t, originalList, convertedList) } + +// TestChunkLocatorMalleability verifies that the chunk locator which implements the [flow.IDEntity] interface is not malleable. +func TestChunkLocatorMalleability(t *testing.T) { + unittest.RequireEntityNonMalleable(t, unittest.ChunkLocatorFixture(unittest.IdentifierFixture(), rand.Uint64())) +} + +// TestNewLocator tests the NewLocator constructor with valid and invalid inputs. +// +// Valid Case: +// +// 1. Valid input with non-zero ResultID and any index: +// - Should successfully construct a Locator. +// +// Invalid Case: +// +// 2. Invalid input with zero ResultID: +// - Should return an error indicating ResultID must not be zero. +func TestNewLocator(t *testing.T) { + t.Run("valid input with non-zero ResultID", func(t *testing.T) { + locator, err := chunks.NewLocator( + chunks.UntrustedLocator{ + ResultID: unittest.IdentifierFixture(), + Index: 1, + }, + ) + require.NoError(t, err) + require.NotNil(t, locator) + }) + + t.Run("invalid input with zero ResultID", func(t *testing.T) { + locator, err := chunks.NewLocator( + chunks.UntrustedLocator{ + ResultID: flow.ZeroID, + Index: 1, + }, + ) + require.Error(t, err) + require.Contains(t, err.Error(), "ResultID must not be zero") + require.Nil(t, locator) + }) +} + +// TestLocator_EqualTo verifies the correctness of the EqualTo method on Locator. +// It checks that Locators are considered equal if and only if all fields match. +func TestLocator_EqualTo(t *testing.T) { + loc1 := unittest.ChunkLocatorFixture(unittest.IdentifierFixture(), 0) + loc2 := unittest.ChunkLocatorFixture(unittest.IdentifierFixture(), 1) + + require.False(t, loc1.EqualTo(loc2), "Initially, all fields differ; EqualTo should return false") + + // List of mutations to gradually make loc1 equal to loc2 + mutations := []func(){ + func() { + loc1.ResultID = loc2.ResultID + }, + func() { + loc1.Index = loc2.Index + }, + } + + // Shuffle mutation order + rand.Shuffle(len(mutations), func(i, j int) { + mutations[i], mutations[j] = mutations[j], mutations[i] + }) + + // Apply each mutation one at a time, except the last + for _, mutation := range mutations[:len(mutations)-1] { + mutation() + require.False(t, loc1.EqualTo(loc2)) + } + + // Final mutation: should now be equal + mutations[len(mutations)-1]() + require.True(t, loc1.EqualTo(loc2)) +} + +// TestLocator_EqualTo_Nil verifies the behavior of EqualTo when one or both inputs are nil. +func TestLocator_EqualTo_Nil(t *testing.T) { + var nilLoc *chunks.Locator + nonNil := unittest.ChunkLocatorFixture(unittest.IdentifierFixture(), 0) + + t.Run("nil receiver", func(t *testing.T) { + require.False(t, nilLoc.EqualTo(nonNil)) + }) + + t.Run("nil input", func(t *testing.T) { + require.False(t, nonNil.EqualTo(nilLoc)) + }) + + t.Run("both nil", func(t *testing.T) { + require.True(t, nilLoc.EqualTo(nil)) + }) +} diff --git a/model/chunks/chunkassignment.go b/model/chunks/chunkassignment.go index 9642eb8b9ab..52cf03c8944 100644 --- a/model/chunks/chunkassignment.go +++ b/model/chunks/chunkassignment.go @@ -1,52 +1,89 @@ package chunks import ( + "errors" + "fmt" + + "golang.org/x/exp/maps" + "github.com/onflow/flow-go/model/flow" ) -// Assignment is assignment map of the chunks to the list of the verifier nodes +var ErrUnknownChunkIndex = errors.New("verifier assignment for invalid chunk requested") + +// Assignment is an immutable list that, for each chunk (in order of chunk.Index), +// records the set of verifier nodes that are assigned to verify that chunk. +// Assignments are only constructed using AssignmentBuilder, and cannot be modified after construction. type Assignment struct { - // TODO: use a slice here instead of a map, which will be more performant - verifiersForChunk map[uint64]map[flow.Identifier]struct{} + verifiersForChunk []map[flow.Identifier]struct{} } -func NewAssignment() *Assignment { - return &Assignment{ - verifiersForChunk: make(map[uint64]map[flow.Identifier]struct{}), +// AssignmentBuilder is a helper for constructing a single new Assignment, +// and should be discarded after calling `Build()`. +// AssignmentBuilder is not safe for concurrent use by multiple goroutines. +type AssignmentBuilder struct { + verifiersForChunk []map[flow.Identifier]struct{} +} + +func NewAssignmentBuilder() *AssignmentBuilder { + return &AssignmentBuilder{ + verifiersForChunk: make([]map[flow.Identifier]struct{}, 0, 1), } } -// Verifiers returns the list of verifier nodes assigned to a chunk -func (a *Assignment) Verifiers(chunk *flow.Chunk) flow.IdentifierList { - v := make([]flow.Identifier, 0) - for id := range a.verifiersForChunk[chunk.Index] { - v = append(v, id) +// Build constructs and returns the immutable assignment. The AssignmentBuilder +// should be discarded after this call, and further method calls will panic. +func (a *AssignmentBuilder) Build() *Assignment { + if a.verifiersForChunk == nil { + panic("method `AssignmentBuilder.Build` has previously been called - do not reuse AssignmentBuilder") } - return v + assignment := &Assignment{verifiersForChunk: a.verifiersForChunk} + a.verifiersForChunk = nil // revoke builder's reference, to prevent modification of assignment + return assignment +} + +// Verifiers returns the list of verifier nodes assigned to a chunk. The protocol mandates +// that for each chunk in a block, a verifier assignment exists (though it may be empty) and +// that each block must at least have one chunk. +// Errors: ErrUnknownChunkIndex if the chunk index is not present in the assignment +func (a *Assignment) Verifiers(chunkIdx uint64) (map[flow.Identifier]struct{}, error) { + if chunkIdx >= uint64(len(a.verifiersForChunk)) { + return nil, ErrUnknownChunkIndex + } + assignedVerifiers := a.verifiersForChunk[chunkIdx] + // return a copy to prevent modification + return maps.Clone(assignedVerifiers), nil } // HasVerifier checks if a chunk is assigned to the given verifier -// TODO: method should probably error if chunk has unknown index -func (a *Assignment) HasVerifier(chunk *flow.Chunk, identifier flow.Identifier) bool { - assignedVerifiers, found := a.verifiersForChunk[chunk.Index] - if !found { - // is verifier assigned to this chunk? - // No, because we only assign verifiers to existing chunks - return false +// Errors: ErrUnknownChunkIndex if the chunk index is not present in the assignment +func (a *Assignment) HasVerifier(chunkIdx uint64, identifier flow.Identifier) (bool, error) { + if chunkIdx >= uint64(len(a.verifiersForChunk)) { + return false, ErrUnknownChunkIndex } + assignedVerifiers := a.verifiersForChunk[chunkIdx] _, isAssigned := assignedVerifiers[identifier] - return isAssigned + return isAssigned, nil } -// Add records the list of verifier nodes as the assigned verifiers of the chunk -// it returns an error if the list of verifiers is empty or contains duplicate ids -func (a *Assignment) Add(chunk *flow.Chunk, verifiers flow.IdentifierList) { - // sorts verifiers list based on their identifier - v := make(map[flow.Identifier]struct{}) - for _, id := range verifiers { - v[id] = struct{}{} +// Add records the list of verifier nodes as the assigned verifiers of the chunk. +// Requires chunks to be added in order of their Index (starting at 0 and increasing +// by 1 with each addition); otherwise an exception is returned. +func (a *AssignmentBuilder) Add(chunkIdx uint64, verifiers flow.IdentifierList) error { + if a.verifiersForChunk == nil { + panic("method `AssignmentBuilder.Build` has previously been called - do not reuse AssignmentBuilder") + } + if chunkIdx != uint64(len(a.verifiersForChunk)) { + return fmt.Errorf("chunk added out of order, got index %v but expecting %v", chunkIdx, len(a.verifiersForChunk)) } - a.verifiersForChunk[chunk.Index] = v + // Formally, the flow protocol mandates that the same verifier is not assigned + // repeatedly to the same chunk (as this would weaken the protocol's security). + vs := verifiers.Lookup() + if len(vs) != len(verifiers) { + return fmt.Errorf("repeated assignment of the same verifier to the same chunk is a violation of protocol rules") + } + a.verifiersForChunk = append(a.verifiersForChunk, vs) + return nil } // ByNodeID returns the indices of all chunks assigned to the given verifierID @@ -57,7 +94,7 @@ func (a *Assignment) ByNodeID(verifierID flow.Identifier) []uint64 { for chunkIdx, assignedVerifiers := range a.verifiersForChunk { _, isAssigned := assignedVerifiers[verifierID] if isAssigned { - chunks = append(chunks, chunkIdx) + chunks = append(chunks, uint64(chunkIdx)) } } return chunks @@ -67,35 +104,3 @@ func (a *Assignment) ByNodeID(verifierID flow.Identifier) []uint64 { func (a *Assignment) Len() int { return len(a.verifiersForChunk) } - -// AssignmentDataPack -// -// AssignmentDataPack provides a storable representation of chunk assignments on -// mempool -type AssignmentDataPack struct { - assignment *Assignment - fingerprint flow.Identifier -} - -// NewAssignmentDataPack casts an assignment and its fingerprint into an assignment data pack -func NewAssignmentDataPack(fingerprint flow.Identifier, assignment *Assignment) *AssignmentDataPack { - return &AssignmentDataPack{ - assignment: assignment, - fingerprint: fingerprint, - } -} - -// ID returns the unique identifier for assignment data pack -func (a *AssignmentDataPack) ID() flow.Identifier { - return a.fingerprint -} - -// Checksum returns the checksum of the assignment data pack -func (a *AssignmentDataPack) Checksum() flow.Identifier { - return flow.MakeID(a) -} - -// Assignment returns the assignment part of the assignment data pack -func (a *AssignmentDataPack) Assignment() *Assignment { - return a.assignment -} diff --git a/model/chunks/chunks.go b/model/chunks/chunks.go index d792c389953..7a208799a73 100644 --- a/model/chunks/chunks.go +++ b/model/chunks/chunks.go @@ -4,14 +4,11 @@ import ( "github.com/onflow/flow-go/model/flow" ) -// ChunkListFromCommit creates a chunklist with one chunk whos final state is +// ChunkListFromCommit creates a chunklist with one chunk whose final state is // the commit func ChunkListFromCommit(commit flow.StateCommitment) flow.ChunkList { chunks := flow.ChunkList{} - chunk := &flow.Chunk{ - Index: 0, - EndState: commit, - } + chunk := flow.NewRootChunk(commit) chunks.Insert(chunk) return chunks diff --git a/model/chunks/executionDataFaults.go b/model/chunks/executionDataFaults.go new file mode 100644 index 00000000000..9148d75ed81 --- /dev/null +++ b/model/chunks/executionDataFaults.go @@ -0,0 +1,188 @@ +package chunks + +// This file contains the ChunkFaultErrors returned during chunk verification of ExecutionData. + +import ( + "fmt" + + "github.com/ipfs/go-cid" + + "github.com/onflow/flow-go/model/flow" +) + +// CFExecutionDataBlockIDMismatch is returned when the block ID contained in the execution data +// root is different from chunk's block ID +type CFExecutionDataBlockIDMismatch struct { + chunkIndex uint64 // chunk's index + execResID flow.Identifier // chunk's ExecutionResult identifier + executionDataRootBlockID flow.Identifier // blockID from chunk's ExecutionDataRoot + chunkBlockID flow.Identifier // chunk's blockID +} + +var _ ChunkFaultError = (*CFExecutionDataBlockIDMismatch)(nil) + +func (cf CFExecutionDataBlockIDMismatch) String() string { + return fmt.Sprintf("execution data root's block ID (%s) is different than chunk's block ID (%s) for chunk %d with result ID %s", + cf.executionDataRootBlockID, cf.chunkBlockID, cf.chunkIndex, cf.execResID.String()) +} + +func (cf CFExecutionDataBlockIDMismatch) Error() string { + return cf.String() +} + +// ChunkIndex returns chunk index of the faulty chunk +func (cf CFExecutionDataBlockIDMismatch) ChunkIndex() uint64 { + return cf.chunkIndex +} + +// ExecutionResultID returns the execution result identifier including the faulty chunk +func (cf CFExecutionDataBlockIDMismatch) ExecutionResultID() flow.Identifier { + return cf.execResID +} + +// NewCFExecutionDataBlockIDMismatch creates a new instance of Chunk Fault (ExecutionDataBlockIDMismatch) +func NewCFExecutionDataBlockIDMismatch( + executionDataRootBlockID flow.Identifier, + chunkBlockID flow.Identifier, + chInx uint64, + execResID flow.Identifier, +) *CFExecutionDataBlockIDMismatch { + return &CFExecutionDataBlockIDMismatch{ + chunkIndex: chInx, + execResID: execResID, + executionDataRootBlockID: executionDataRootBlockID, + chunkBlockID: chunkBlockID, + } +} + +// CFExecutionDataChunksLengthMismatch is returned when execution data chunks list has different length than number of chunks for a block +type CFExecutionDataChunksLengthMismatch struct { + chunkIndex uint64 // chunk's index + execResID flow.Identifier // chunk's ExecutionResult identifier + executionDataRootChunkLength int // number of ChunkExecutionDataIDs in ExecutionDataRoot + executionResultChunkListLength int // number of chunks in ExecutionResult +} + +var _ ChunkFaultError = (*CFExecutionDataChunksLengthMismatch)(nil) + +func (cf CFExecutionDataChunksLengthMismatch) String() string { + return fmt.Sprintf("execution data root chunk length (%d) is different than execution result chunk list length (%d) for chunk %d with result ID %s", + cf.executionDataRootChunkLength, cf.executionResultChunkListLength, cf.chunkIndex, cf.execResID.String()) +} + +func (cf CFExecutionDataChunksLengthMismatch) Error() string { + return cf.String() +} + +// ChunkIndex returns chunk index of the faulty chunk +func (cf CFExecutionDataChunksLengthMismatch) ChunkIndex() uint64 { + return cf.chunkIndex +} + +// ExecutionResultID returns the execution result identifier including the faulty chunk +func (cf CFExecutionDataChunksLengthMismatch) ExecutionResultID() flow.Identifier { + return cf.execResID +} + +// NewCFExecutionDataChunksLengthMismatch creates a new instance of Chunk Fault (ExecutionDataBlockIDMismatch) +func NewCFExecutionDataChunksLengthMismatch( + executionDataRootChunkLength int, + executionResultChunkListLength int, + chInx uint64, + execResID flow.Identifier, +) *CFExecutionDataChunksLengthMismatch { + return &CFExecutionDataChunksLengthMismatch{ + chunkIndex: chInx, + execResID: execResID, + executionDataRootChunkLength: executionDataRootChunkLength, + executionResultChunkListLength: executionResultChunkListLength, + } +} + +// CFExecutionDataInvalidChunkCID is returned when execution data chunk's CID is different from computed +type CFExecutionDataInvalidChunkCID struct { + chunkIndex uint64 // chunk's index + execResID flow.Identifier // chunk's ExecutionResult identifier + executionDataRootChunkCID cid.Cid // ExecutionDataRoot's CID for the chunk + computedChunkCID cid.Cid // computed CID for the chunk +} + +var _ ChunkFaultError = (*CFExecutionDataInvalidChunkCID)(nil) + +func (cf CFExecutionDataInvalidChunkCID) String() string { + return fmt.Sprintf("execution data chunk CID (%s) is different than computed (%s) for chunk %d with result ID %s", + cf.executionDataRootChunkCID, cf.computedChunkCID, cf.chunkIndex, cf.execResID.String()) +} + +func (cf CFExecutionDataInvalidChunkCID) Error() string { + return cf.String() +} + +// ChunkIndex returns chunk index of the faulty chunk +func (cf CFExecutionDataInvalidChunkCID) ChunkIndex() uint64 { + return cf.chunkIndex +} + +// ExecutionResultID returns the execution result identifier including the faulty chunk +func (cf CFExecutionDataInvalidChunkCID) ExecutionResultID() flow.Identifier { + return cf.execResID +} + +// NewCFExecutionDataInvalidChunkCID creates a new instance of Chunk Fault (NewCFExecutionDataInvalidChunkCID) +func NewCFExecutionDataInvalidChunkCID( + executionDataRootChunkCID cid.Cid, + computedChunkCID cid.Cid, + chInx uint64, + execResID flow.Identifier, +) *CFExecutionDataInvalidChunkCID { + return &CFExecutionDataInvalidChunkCID{ + chunkIndex: chInx, + execResID: execResID, + executionDataRootChunkCID: executionDataRootChunkCID, + computedChunkCID: computedChunkCID, + } +} + +// CFInvalidExecutionDataID is returned when ExecutionResult's ExecutionDataID is different from computed +type CFInvalidExecutionDataID struct { + chunkIndex uint64 // chunk's index + execResID flow.Identifier // chunk's ExecutionResult identifier + erExecutionDataID flow.Identifier // ExecutionResult's ExecutionDataID + computedExecutionDataID flow.Identifier // computed ExecutionDataID +} + +var _ ChunkFaultError = (*CFInvalidExecutionDataID)(nil) + +func (cf CFInvalidExecutionDataID) String() string { + return fmt.Sprintf("execution data ID (%s) is different than computed (%s) for chunk %d with result ID %s", + cf.erExecutionDataID, cf.computedExecutionDataID, cf.chunkIndex, cf.execResID.String()) +} + +func (cf CFInvalidExecutionDataID) Error() string { + return cf.String() +} + +// ChunkIndex returns chunk index of the faulty chunk +func (cf CFInvalidExecutionDataID) ChunkIndex() uint64 { + return cf.chunkIndex +} + +// ExecutionResultID returns the execution result identifier including the faulty chunk +func (cf CFInvalidExecutionDataID) ExecutionResultID() flow.Identifier { + return cf.execResID +} + +// NewCFInvalidExecutionDataID creates a new instance of Chunk Fault (CFInvalidExecutionDataID) +func NewCFInvalidExecutionDataID( + erExecutionDataID flow.Identifier, + computedExecutionDataID flow.Identifier, + chInx uint64, + execResID flow.Identifier, +) *CFInvalidExecutionDataID { + return &CFInvalidExecutionDataID{ + chunkIndex: chInx, + execResID: execResID, + erExecutionDataID: erExecutionDataID, + computedExecutionDataID: computedExecutionDataID, + } +} diff --git a/model/cluster/block.go b/model/cluster/block.go index 30155d391db..148eef6c603 100644 --- a/model/cluster/block.go +++ b/model/cluster/block.go @@ -3,41 +3,147 @@ package cluster import ( + "fmt" + "github.com/onflow/flow-go/model/flow" ) -func Genesis() *Block { - header := &flow.Header{ - View: 0, - ChainID: "cluster", - Timestamp: flow.GenesisTime, - ParentID: flow.ZeroID, +// Block represents a block in collection node cluster consensus. It contains +// a standard block header with a payload containing only a single collection. +// +// Zero values for certain HeaderBody fields are allowed only for root blocks, which must be constructed +// using the NewRootBlock constructor. All non-root blocks must be constructed +// using NewBlock to ensure validation of the block fields. +// +//structwrite:immutable - mutations allowed only within the constructor +type Block = flow.GenericBlock[Payload] + +// UntrustedBlock is an untrusted input-only representation of a cluster Block, +// used for construction. +// +// This type exists to ensure that constructor functions are invoked explicitly +// with named fields, which improves clarity and reduces the risk of incorrect field +// ordering during construction. +// +// An instance of UntrustedBlock should be validated and converted into +// a trusted cluster Block using the NewBlock constructor (or NewRootBlock +// for the root block). +type UntrustedBlock Block + +// NewBlock creates a new block in collection node cluster consensus. +// This constructor enforces validation rules to ensure the block is well-formed. +// It must be used to construct all non-root blocks. +// +// All errors indicate that a valid Block cannot be constructed from the input. +func NewBlock(untrusted UntrustedBlock) (*Block, error) { + // validate header body + headerBody, err := flow.NewHeaderBody(flow.UntrustedHeaderBody(untrusted.HeaderBody)) + if err != nil { + return nil, fmt.Errorf("invalid header body: %w", err) } - payload := EmptyPayload(flow.ZeroID) + // validate payload + payload, err := NewPayload(UntrustedPayload(untrusted.Payload)) + if err != nil { + return nil, fmt.Errorf("invalid cluster payload: %w", err) + } - block := &Block{ - Header: header, + return &Block{ + HeaderBody: *headerBody, + Payload: *payload, + }, nil +} + +// NewRootBlock creates a root block in collection node cluster consensus. +// +// This constructor must be used **only** for constructing the root block, +// which is the only case where zero values are allowed. +func NewRootBlock(untrusted UntrustedBlock) (*Block, error) { + rootHeaderBody, err := flow.NewRootHeaderBody(flow.UntrustedHeaderBody(untrusted.HeaderBody)) + if err != nil { + return nil, fmt.Errorf("invalid root header body: %w", err) + } + + if rootHeaderBody.ParentID != flow.ZeroID { + return nil, fmt.Errorf("ParentID must be zero") } - block.SetPayload(payload) - return block + rootPayload, err := NewRootPayload(UntrustedPayload(untrusted.Payload)) + if err != nil { + return nil, fmt.Errorf("invalid root cluster payload: %w", err) + } + + return &Block{ + HeaderBody: *rootHeaderBody, + Payload: *rootPayload, + }, nil } -// Block represents a block in collection node cluster consensus. It contains -// a standard block header with a payload containing only a single collection. -type Block struct { - Header *flow.Header - Payload *Payload +// Proposal represents a signed proposed block in collection node cluster consensus. +// +//structwrite:immutable - mutations allowed only within the constructor. +type Proposal struct { + Block Block + ProposerSigData []byte +} + +// UntrustedProposal is an untrusted input-only representation of a cluster.Proposal, +// used for construction. +// +// This type exists to ensure that constructor functions are invoked explicitly +// with named fields, which improves clarity and reduces the risk of incorrect field +// ordering during construction. +// +// An instance of UntrustedProposal should be validated and converted into +// a trusted cluster Proposal using the NewProposal constructor (or NewRootProposal +// for the root proposal). +type UntrustedProposal Proposal + +// NewProposal creates a new cluster Proposal. +// This constructor enforces validation rules to ensure the Proposal is well-formed. +// +// All errors indicate that a valid cluster.Proposal cannot be constructed from the input. +func NewProposal(untrusted UntrustedProposal) (*Proposal, error) { + block, err := NewBlock(UntrustedBlock(untrusted.Block)) + if err != nil { + return nil, fmt.Errorf("invalid block: %w", err) + } + if len(untrusted.ProposerSigData) == 0 { + return nil, fmt.Errorf("proposer signature must not be empty") + } + + return &Proposal{ + Block: *block, + ProposerSigData: untrusted.ProposerSigData, + }, nil } -// ID returns the ID of the underlying block header. -func (b Block) ID() flow.Identifier { - return b.Header.ID() +// NewRootProposal creates a root cluster proposal. +// This constructor must be used **only** for constructing the root proposal, +// which is the only case where zero values are allowed. +func NewRootProposal(untrusted UntrustedProposal) (*Proposal, error) { + block, err := NewRootBlock(UntrustedBlock(untrusted.Block)) + if err != nil { + return nil, fmt.Errorf("invalid root block: %w", err) + } + if len(untrusted.ProposerSigData) > 0 { + return nil, fmt.Errorf("proposer signature must be empty") + } + + return &Proposal{ + Block: *block, + ProposerSigData: untrusted.ProposerSigData, + }, nil + } -// SetPayload sets the payload and payload hash. -func (b *Block) SetPayload(payload Payload) { - b.Payload = &payload - b.Header.PayloadHash = payload.Hash() +// ProposalHeader converts the proposal into a compact [ProposalHeader] representation, +// where the payload is compressed to a hash reference. +func (p *Proposal) ProposalHeader() *flow.ProposalHeader { + return &flow.ProposalHeader{Header: p.Block.ToHeader(), ProposerSigData: p.ProposerSigData} } + +// BlockResponse is the same as flow.BlockResponse, but for cluster +// consensus. It contains a list of structurally validated cluster block proposals +// that should correspond to the request. +type BlockResponse flow.GenericBlockResponse[Proposal] diff --git a/model/cluster/block_test.go b/model/cluster/block_test.go new file mode 100644 index 00000000000..7449c5c6b43 --- /dev/null +++ b/model/cluster/block_test.go @@ -0,0 +1,298 @@ +package cluster_test + +import ( + "encoding/json" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/model/cluster" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/utils/unittest" +) + +// TestClusterBlockMalleability checks that cluster.Block is not malleable: any change in its data +// should result in a different ID. +// Because our NewHeaderBody constructor enforces ParentView < View we use +// WithFieldGenerator to safely pass it. +func TestClusterBlockMalleability(t *testing.T) { + clusterBlock := unittest.ClusterBlockFixture() + unittest.RequireEntityNonMalleable( + t, + clusterBlock, + unittest.WithFieldGenerator("HeaderBody.ParentView", func() uint64 { + return clusterBlock.View - 1 // ParentView must stay below View, so set it to View-1 + }), + unittest.WithFieldGenerator("Payload.Collection", func() flow.Collection { + return unittest.CollectionFixture(3) + }), + ) +} + +// TestNewBlock verifies the behavior of the NewBlock constructor. +// It ensures proper handling of both valid and invalid untrusted input fields. +// +// Test Cases: +// +// 1. Valid input: +// - Verifies that a properly populated UntrustedBlock results in a valid Block. +// +// 2. Invalid input with invalid HeaderBody: +// - Ensures an error is returned when the HeaderBody.ParentID is flow.ZeroID. +// +// 3. Invalid input with invalid Payload: +// - Ensures an error is returned when the Payload contains a Collection with invalid transaction IDs. +func TestNewBlock(t *testing.T) { + t.Run("valid input", func(t *testing.T) { + block := unittest.ClusterBlockFixture() + + res, err := cluster.NewBlock(cluster.UntrustedBlock(*block)) + require.NoError(t, err) + require.NotNil(t, res) + }) + + t.Run("invalid input with invalid header body", func(t *testing.T) { + block := unittest.ClusterBlockFixture() + block.ParentID = flow.ZeroID + + res, err := cluster.NewBlock(cluster.UntrustedBlock(*block)) + require.Error(t, err) + require.Nil(t, res) + require.Contains(t, err.Error(), "invalid header body") + }) + + t.Run("invalid input with invalid payload", func(t *testing.T) { + block := unittest.ClusterBlockFixture() + collection := unittest.CollectionFixture(5) + collection.Transactions[2] = nil + block.Payload.Collection = collection + + res, err := cluster.NewBlock(cluster.UntrustedBlock(*block)) + require.Error(t, err) + require.Nil(t, res) + require.Contains(t, err.Error(), "invalid cluster payload") + }) +} + +// TestNewRootBlock verifies the behavior of the NewRootBlock constructor. +// It ensures proper handling of both valid and invalid untrusted input fields. +// +// Test Cases: +// +// 1. Valid input: +// - Verifies that a properly populated UntrustedBlock results in a valid root Block. +// +// 2. Invalid input with invalid HeaderBody: +// - Ensures an error is returned when the HeaderBody.ParentView is not zero. +// +// 3. Invalid input with invalid ParentID: +// - Ensures an error is returned when the HeaderBody.ParentID is not zero. +// +// 4. Invalid input with invalid Payload: +// - Ensures an error is returned when the Payload.ReferenceBlockID is not flow.ZeroID. +func TestNewRootBlock(t *testing.T) { + // validRootBlockFixture returns a new valid root cluster.UntrustedBlock for use in tests. + validRootBlockFixture := func() cluster.UntrustedBlock { + return cluster.UntrustedBlock{ + HeaderBody: flow.HeaderBody{ + ChainID: flow.Emulator, + ParentID: flow.ZeroID, + Height: 10, + Timestamp: uint64(time.Now().UnixMilli()), + View: 0, + ParentView: 0, + ParentVoterIndices: []byte{}, + ParentVoterSigData: []byte{}, + ProposerID: flow.ZeroID, + LastViewTC: nil, + }, + Payload: *cluster.NewEmptyPayload(flow.ZeroID), + } + } + + t.Run("valid input", func(t *testing.T) { + res, err := cluster.NewRootBlock(validRootBlockFixture()) + require.NoError(t, err) + require.NotNil(t, res) + }) + + t.Run("invalid input with invalid header body", func(t *testing.T) { + block := validRootBlockFixture() + block.ParentView = 1 + + res, err := cluster.NewRootBlock(block) + require.Error(t, err) + require.Nil(t, res) + require.Contains(t, err.Error(), "invalid root header body") + }) + + t.Run("invalid input with invalid ParentID", func(t *testing.T) { + block := validRootBlockFixture() + block.ParentID = unittest.IdentifierFixture() + + res, err := cluster.NewRootBlock(block) + require.Error(t, err) + require.Nil(t, res) + require.Contains(t, err.Error(), "ParentID must be zero") + }) + + t.Run("invalid input with invalid payload", func(t *testing.T) { + block := validRootBlockFixture() + block.Payload.ReferenceBlockID = unittest.IdentifierFixture() + + res, err := cluster.NewRootBlock(block) + require.Error(t, err) + require.Nil(t, res) + require.Contains(t, err.Error(), "invalid root cluster payload") + }) +} + +// TestNewProposal verifies the behavior of the NewProposal constructor. +// It ensures proper handling of both valid and invalid input fields. +// +// Test Cases: +// +// 1. Valid input: +// - Verifies that a properly populated UntrustedProposal results in a valid Proposal. +// +// 2. Invalid input with invalid Block: +// - Ensures an error is returned when the Block.ParentID is flow.ZeroID. +// +// 3. Invalid input with nil ProposerSigData: +// - Ensures an error is returned when the ProposerSigData is nil. +// +// 4. Invalid input with empty ProposerSigData: +// - Ensures an error is returned when the ProposerSigData is an empty byte slice. +func TestNewProposal(t *testing.T) { + t.Run("valid input", func(t *testing.T) { + res, err := cluster.NewProposal(cluster.UntrustedProposal(*unittest.ClusterProposalFixture())) + require.NoError(t, err) + require.NotNil(t, res) + }) + + t.Run("invalid input with invalid block", func(t *testing.T) { + untrustedProposal := cluster.UntrustedProposal(*unittest.ClusterProposalFixture()) + untrustedProposal.Block.ParentID = flow.ZeroID + + res, err := cluster.NewProposal(untrustedProposal) + require.Error(t, err) + require.Nil(t, res) + require.Contains(t, err.Error(), "invalid block") + }) + + t.Run("invalid input with nil ProposerSigData", func(t *testing.T) { + untrustedProposal := cluster.UntrustedProposal(*unittest.ClusterProposalFixture()) + untrustedProposal.ProposerSigData = nil + + res, err := cluster.NewProposal(untrustedProposal) + require.Error(t, err) + require.Nil(t, res) + require.Contains(t, err.Error(), "proposer signature must not be empty") + }) + + t.Run("invalid input with empty ProposerSigData", func(t *testing.T) { + untrustedProposal := cluster.UntrustedProposal(*unittest.ClusterProposalFixture()) + untrustedProposal.ProposerSigData = []byte{} + + res, err := cluster.NewProposal(untrustedProposal) + require.Error(t, err) + require.Nil(t, res) + require.Contains(t, err.Error(), "proposer signature must not be empty") + }) +} + +// TestNewRootProposal verifies the behavior of the NewRootProposal constructor. +// It ensures proper handling of both valid and invalid untrusted input fields. +// +// Test Cases: +// +// 1. Valid input with nil ProposerSigData: +// - Verifies that a root proposal with nil ProposerSigData is accepted. +// +// 2. Valid input with empty ProposerSigData: +// - Verifies that an empty (but non-nil) ProposerSigData is also accepted, +// since root proposals must not include a signature. +// +// 3. Invalid input with invalid Block: +// - Ensures an error is returned if the Block.ParentView is non-zero, which is disallowed for root blocks. +// +// 4. Invalid input with non-empty ProposerSigData: +// - Ensures an error is returned when a ProposerSigData is included, as this is not permitted for root proposals. +func TestNewRootProposal(t *testing.T) { + // validRootProposalFixture returns a new valid cluster.UntrustedProposal for use in tests. + validRootProposalFixture := func() cluster.UntrustedProposal { + block, err := cluster.NewRootBlock(cluster.UntrustedBlock{ + HeaderBody: flow.HeaderBody{ + ChainID: flow.Emulator, + ParentID: flow.ZeroID, + Height: 10, + Timestamp: uint64(time.Now().UnixMilli()), + View: 0, + ParentView: 0, + ParentVoterIndices: []byte{}, + ParentVoterSigData: []byte{}, + ProposerID: flow.ZeroID, + LastViewTC: nil, + }, + Payload: *cluster.NewEmptyPayload(flow.ZeroID), + }) + if err != nil { + panic(err) + } + return cluster.UntrustedProposal{ + Block: *block, + ProposerSigData: nil, + } + } + + t.Run("valid input with nil ProposerSigData", func(t *testing.T) { + res, err := cluster.NewRootProposal(validRootProposalFixture()) + + require.NoError(t, err) + require.NotNil(t, res) + }) + + t.Run("valid input with empty ProposerSigData", func(t *testing.T) { + untrustedProposal := validRootProposalFixture() + untrustedProposal.ProposerSigData = []byte{} + + res, err := cluster.NewRootProposal(untrustedProposal) + require.NoError(t, err) + require.NotNil(t, res) + }) + + t.Run("invalid input with invalid block", func(t *testing.T) { + untrustedProposal := validRootProposalFixture() + untrustedProposal.Block.ParentView = 1 + + res, err := cluster.NewRootProposal(untrustedProposal) + require.Error(t, err) + require.Nil(t, res) + require.Contains(t, err.Error(), "invalid root block") + }) + + t.Run("invalid input with non-empty proposer signature", func(t *testing.T) { + untrustedProposal := validRootProposalFixture() + untrustedProposal.ProposerSigData = unittest.SignatureFixture() + + res, err := cluster.NewRootProposal(untrustedProposal) + require.Error(t, err) + require.Nil(t, res) + require.Contains(t, err.Error(), "proposer signature must be empty") + }) +} + +// TestBlockEncodingJSON_IDField ensures that the explicit ID field added to the +// block when encoded as JSON is present and accurate. +func TestBlockEncodingJSON_IDField(t *testing.T) { + block := unittest.ClusterBlockFixture() + blockID := block.ID() + data, err := json.Marshal(block) + require.NoError(t, err) + var decodedIDField struct{ ID flow.Identifier } + err = json.Unmarshal(data, &decodedIDField) + require.NoError(t, err) + assert.Equal(t, blockID, decodedIDField.ID) +} diff --git a/model/cluster/payload.go b/model/cluster/payload.go index 959eb20575c..8647db73c11 100644 --- a/model/cluster/payload.go +++ b/model/cluster/payload.go @@ -1,12 +1,15 @@ package cluster import ( - "github.com/onflow/flow-go/model/fingerprint" + "fmt" + "github.com/onflow/flow-go/model/flow" ) // Payload is the payload for blocks in collection node cluster consensus. // It contains only a single collection. +// +//structwrite:immutable - mutations allowed only within the constructor type Payload struct { // Collection is the collection being created. @@ -33,38 +36,62 @@ type Payload struct { ReferenceBlockID flow.Identifier } -// EmptyPayload returns a payload with an empty collection and the given +// NewEmptyPayload returns a payload with an empty collection and the given // reference block ID. -func EmptyPayload(refID flow.Identifier) Payload { - return PayloadFromTransactions(refID) +func NewEmptyPayload(refID flow.Identifier) *Payload { + return &Payload{ + Collection: *flow.NewEmptyCollection(), + ReferenceBlockID: refID, + } } -// PayloadFromTransactions creates a payload given a reference block ID and a +// UntrustedPayload is an untrusted input-only representation of a cluster Payload, +// used for construction. +// +// This type exists to ensure that constructor functions are invoked explicitly +// with named fields, which improves clarity and reduces the risk of incorrect field +// ordering during construction. +// +// An instance of UntrustedPayload should be validated and converted into +// a trusted cluster Payload using NewPayload constructor. +type UntrustedPayload Payload + +// NewPayload creates a payload given a reference block ID and a // list of transaction hashes. -func PayloadFromTransactions(refID flow.Identifier, transactions ...*flow.TransactionBody) Payload { - // avoid a nil transaction list - if len(transactions) == 0 { - transactions = []*flow.TransactionBody{} +// Construction cluster Payload allowed only within the constructor. +// +// All errors indicate a valid Payload cannot be constructed from the input. +func NewPayload(untrusted UntrustedPayload) (*Payload, error) { + collection, err := flow.NewCollection(flow.UntrustedCollection(untrusted.Collection)) + if err != nil { + return nil, fmt.Errorf("could not construct collection: %w", err) } - return Payload{ - Collection: flow.Collection{ - Transactions: transactions, - }, - ReferenceBlockID: refID, + return &Payload{ + Collection: *collection, + ReferenceBlockID: untrusted.ReferenceBlockID, + }, nil +} + +// NewRootPayload creates a root payload for a root cluster block. +// +// This constructor must be used **only** for constructing the root payload, +// which is the only case where zero values are allowed. +func NewRootPayload(untrusted UntrustedPayload) (*Payload, error) { + if untrusted.ReferenceBlockID != flow.ZeroID { + return nil, fmt.Errorf("ReferenceBlockID must be empty") + } + + if len(untrusted.Collection.Transactions) != 0 { + return nil, fmt.Errorf("Collection must be empty") } + + return &Payload{ + Collection: untrusted.Collection, + ReferenceBlockID: untrusted.ReferenceBlockID, + }, nil } // Hash returns the hash of the payload. func (p Payload) Hash() flow.Identifier { return flow.MakeID(p) } - -func (p Payload) Fingerprint() []byte { - return fingerprint.Fingerprint(struct { - Collection []byte - ReferenceBlockID flow.Identifier - }{ - Collection: p.Collection.Fingerprint(), - ReferenceBlockID: p.ReferenceBlockID, - }) -} diff --git a/model/cluster/payload_test.go b/model/cluster/payload_test.go new file mode 100644 index 00000000000..bbfe1f70f09 --- /dev/null +++ b/model/cluster/payload_test.go @@ -0,0 +1,106 @@ +package cluster_test + +import ( + "testing" + + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/model/cluster" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/utils/unittest" +) + +// TestNewPayload verifies the behavior of the NewPayload constructor. +// It ensures proper handling of both valid and invalid untrusted input fields. +// +// Test Cases: +// +// 1. Valid input: +// - Verifies that a properly populated UntrustedPayload results in a valid Payload. +// +// 2. Valid input with zero reference block ID: +// - Ensures that Payload is still constructed when reference block ID is zero, +// since it's allowed only for root blocks (validation should happen elsewhere). +// +// 3. Invalid input with malformed collection: +// - Ensures an error is returned when the Collection contains invalid transaction IDs. +func TestNewPayload(t *testing.T) { + t.Run("valid input", func(t *testing.T) { + payload := unittest.ClusterPayloadFixture(5) + + res, err := cluster.NewPayload(cluster.UntrustedPayload(*payload)) + require.NoError(t, err) + require.NotNil(t, res) + }) + + t.Run("valid input with zero ReferenceBlockID (root block)", func(t *testing.T) { + payload := unittest.ClusterPayloadFixture(5) + payload.ReferenceBlockID = flow.ZeroID + + res, err := cluster.NewPayload(cluster.UntrustedPayload(*payload)) + require.NoError(t, err) + require.NotNil(t, res) + require.Equal(t, flow.ZeroID, res.ReferenceBlockID) + }) + + t.Run("invalid input with malformed collection", func(t *testing.T) { + payload := unittest.ClusterPayloadFixture(5) + collection := unittest.CollectionFixture(5) + collection.Transactions[2] = nil + payload.Collection = collection + + res, err := cluster.NewPayload(cluster.UntrustedPayload(*payload)) + require.Error(t, err) + require.Nil(t, res) + require.Contains(t, err.Error(), "could not construct collection") + }) +} + +// TestNewRootPayload verifies the behavior of the NewRootPayload constructor. +// It ensures proper handling of both valid and invalid untrusted input fields. +// +// Test Cases: +// +// 1. Valid input: +// - Verifies that a properly populated UntrustedPayload results in a valid root Payload. +// +// 2. Valid input with non-zero reference block ID: +// - Ensures an error is returned when reference block ID is not flow.ZeroID. +// +// 3. Invalid input with non-empty collection: +// - Ensures an error is returned when the Collection contains transaction IDs. +func TestNewRootPayload(t *testing.T) { + // validRootPayloadFixture returns a new valid root cluster.UntrustedPayload for use in tests. + validRootPayloadFixture := func() cluster.UntrustedPayload { + return cluster.UntrustedPayload{ + ReferenceBlockID: flow.ZeroID, + Collection: *flow.NewEmptyCollection(), + } + } + + t.Run("valid input", func(t *testing.T) { + res, err := cluster.NewRootPayload(validRootPayloadFixture()) + require.NoError(t, err) + require.NotNil(t, res) + }) + + t.Run("valid input with non-zero ReferenceBlockID", func(t *testing.T) { + payload := validRootPayloadFixture() + payload.ReferenceBlockID = unittest.IdentifierFixture() + + res, err := cluster.NewRootPayload(payload) + require.Error(t, err) + require.Nil(t, res) + require.Contains(t, err.Error(), "ReferenceBlockID must be empty") + }) + + t.Run("invalid input with collection", func(t *testing.T) { + payload := validRootPayloadFixture() + payload.Collection = unittest.CollectionFixture(5) + + res, err := cluster.NewRootPayload(payload) + require.Error(t, err) + require.Nil(t, res) + require.Contains(t, err.Error(), "Collection must be empty") + }) +} diff --git a/model/convert/fixtures_test.go b/model/convert/fixtures_test.go deleted file mode 100644 index 5c99d8709ee..00000000000 --- a/model/convert/fixtures_test.go +++ /dev/null @@ -1,1229 +0,0 @@ -package convert_test - -import ( - "github.com/onflow/flow-go/crypto" - "github.com/onflow/flow-go/fvm/systemcontracts" - "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/utils/unittest" -) - -// This file contains service event fixtures for testing purposes. -// The Cadence form is represented by JSON-CDC-encoded string variables. - -// EpochSetupFixture returns an EpochSetup service event as a Cadence event -// representation and as a protocol model representation. -func EpochSetupFixture(chain flow.ChainID) (flow.Event, *flow.EpochSetup) { - events, err := systemcontracts.ServiceEventsForChain(chain) - if err != nil { - panic(err) - } - - event := unittest.EventFixture(events.EpochSetup.EventType(), 1, 1, unittest.IdentifierFixture(), 0) - event.Payload = []byte(epochSetupFixtureJSON) - - // randomSource is [0,0,...,1,2,3,4] - randomSource := make([]uint8, flow.EpochSetupRandomSourceLength) - for i := 0; i < 4; i++ { - randomSource[flow.EpochSetupRandomSourceLength-1-i] = uint8(4 - i) - } - - expected := &flow.EpochSetup{ - Counter: 1, - FirstView: 100, - FinalView: 200, - DKGPhase1FinalView: 150, - DKGPhase2FinalView: 160, - DKGPhase3FinalView: 170, - RandomSource: randomSource, - Assignments: flow.AssignmentList{ - { - flow.MustHexStringToIdentifier("0000000000000000000000000000000000000000000000000000000000000001"), - flow.MustHexStringToIdentifier("0000000000000000000000000000000000000000000000000000000000000002"), - }, - { - flow.MustHexStringToIdentifier("0000000000000000000000000000000000000000000000000000000000000003"), - flow.MustHexStringToIdentifier("0000000000000000000000000000000000000000000000000000000000000004"), - }, - }, - Participants: flow.IdentityList{ - { - Role: flow.RoleCollection, - NodeID: flow.MustHexStringToIdentifier("0000000000000000000000000000000000000000000000000000000000000001"), - Address: "1.flow.com", - NetworkPubKey: unittest.MustDecodePublicKeyHex(crypto.ECDSAP256, "378dbf45d85c614feb10d8bd4f78f4b6ef8eec7d987b937e123255444657fb3da031f232a507e323df3a6f6b8f50339c51d188e80c0e7a92420945cc6ca893fc"), - StakingPubKey: unittest.MustDecodePublicKeyHex(crypto.BLSBLS12381, "af4aade26d76bb2ab15dcc89adcef82a51f6f04b3cb5f4555214b40ec89813c7a5f95776ea4fe449de48166d0bbc59b919b7eabebaac9614cf6f9461fac257765415f4d8ef1376a2365ec9960121888ea5383d88a140c24c29962b0a14e4e4e7"), - Weight: 100, - }, - { - Role: flow.RoleCollection, - NodeID: flow.MustHexStringToIdentifier("0000000000000000000000000000000000000000000000000000000000000002"), - Address: "2.flow.com", - NetworkPubKey: unittest.MustDecodePublicKeyHex(crypto.ECDSAP256, "378dbf45d85c614feb10d8bd4f78f4b6ef8eec7d987b937e123255444657fb3da031f232a507e323df3a6f6b8f50339c51d188e80c0e7a92420945cc6ca893fc"), - StakingPubKey: unittest.MustDecodePublicKeyHex(crypto.BLSBLS12381, "af4aade26d76bb2ab15dcc89adcef82a51f6f04b3cb5f4555214b40ec89813c7a5f95776ea4fe449de48166d0bbc59b919b7eabebaac9614cf6f9461fac257765415f4d8ef1376a2365ec9960121888ea5383d88a140c24c29962b0a14e4e4e7"), - Weight: 100, - }, - { - Role: flow.RoleCollection, - NodeID: flow.MustHexStringToIdentifier("0000000000000000000000000000000000000000000000000000000000000003"), - Address: "3.flow.com", - NetworkPubKey: unittest.MustDecodePublicKeyHex(crypto.ECDSAP256, "378dbf45d85c614feb10d8bd4f78f4b6ef8eec7d987b937e123255444657fb3da031f232a507e323df3a6f6b8f50339c51d188e80c0e7a92420945cc6ca893fc"), - StakingPubKey: unittest.MustDecodePublicKeyHex(crypto.BLSBLS12381, "af4aade26d76bb2ab15dcc89adcef82a51f6f04b3cb5f4555214b40ec89813c7a5f95776ea4fe449de48166d0bbc59b919b7eabebaac9614cf6f9461fac257765415f4d8ef1376a2365ec9960121888ea5383d88a140c24c29962b0a14e4e4e7"), - Weight: 100, - }, - { - Role: flow.RoleCollection, - NodeID: flow.MustHexStringToIdentifier("0000000000000000000000000000000000000000000000000000000000000004"), - Address: "4.flow.com", - NetworkPubKey: unittest.MustDecodePublicKeyHex(crypto.ECDSAP256, "378dbf45d85c614feb10d8bd4f78f4b6ef8eec7d987b937e123255444657fb3da031f232a507e323df3a6f6b8f50339c51d188e80c0e7a92420945cc6ca893fc"), - StakingPubKey: unittest.MustDecodePublicKeyHex(crypto.BLSBLS12381, "af4aade26d76bb2ab15dcc89adcef82a51f6f04b3cb5f4555214b40ec89813c7a5f95776ea4fe449de48166d0bbc59b919b7eabebaac9614cf6f9461fac257765415f4d8ef1376a2365ec9960121888ea5383d88a140c24c29962b0a14e4e4e7"), - Weight: 100, - }, - { - Role: flow.RoleConsensus, - NodeID: flow.MustHexStringToIdentifier("0000000000000000000000000000000000000000000000000000000000000011"), - Address: "11.flow.com", - NetworkPubKey: unittest.MustDecodePublicKeyHex(crypto.ECDSAP256, "cfdfe8e4362c8f79d11772cb7277ab16e5033a63e8dd5d34caf1b041b77e5b2d63c2072260949ccf8907486e4cfc733c8c42ca0e4e208f30470b0d950856cd47"), - StakingPubKey: unittest.MustDecodePublicKeyHex(crypto.BLSBLS12381, "8207559cd7136af378bba53a8f0196dee3849a3ab02897c1995c3e3f6ca0c4a776c3ae869d1ddbb473090054be2400ad06d7910aa2c5d1780220fdf3765a3c1764bce10c6fe66a5a2be51a422e878518bd750424bb56b8a0ecf0f8ad2057e83f"), - Weight: 100, - }, - { - Role: flow.RoleExecution, - NodeID: flow.MustHexStringToIdentifier("0000000000000000000000000000000000000000000000000000000000000021"), - Address: "21.flow.com", - NetworkPubKey: unittest.MustDecodePublicKeyHex(crypto.ECDSAP256, "d64318ba0dbf68f3788fc81c41d507c5822bf53154530673127c66f50fe4469ccf1a054a868a9f88506a8999f2386d86fcd2b901779718cba4fb53c2da258f9e"), - StakingPubKey: unittest.MustDecodePublicKeyHex(crypto.BLSBLS12381, "880b162b7ec138b36af401d07868cb08d25746d905395edbb4625bdf105d4bb2b2f4b0f4ae273a296a6efefa7ce9ccb914e39947ce0e83745125cab05d62516076ff0173ed472d3791ccef937597c9ea12381d76f547a092a4981d77ff3fba83"), - Weight: 100, - }, - { - Role: flow.RoleVerification, - NodeID: flow.MustHexStringToIdentifier("0000000000000000000000000000000000000000000000000000000000000031"), - Address: "31.flow.com", - NetworkPubKey: unittest.MustDecodePublicKeyHex(crypto.ECDSAP256, "697241208dcc9142b6f53064adc8ff1c95760c68beb2ba083c1d005d40181fd7a1b113274e0163c053a3addd47cd528ec6a1f190cf465aac87c415feaae011ae"), - StakingPubKey: unittest.MustDecodePublicKeyHex(crypto.BLSBLS12381, "b1f97d0a06020eca97352e1adde72270ee713c7daf58da7e74bf72235321048b4841bdfc28227964bf18e371e266e32107d238358848bcc5d0977a0db4bda0b4c33d3874ff991e595e0f537c7b87b4ddce92038ebc7b295c9ea20a1492302aa7"), - Weight: 100, - }, - }, - } - - return event, expected -} - -// EpochCommitFixture returns an EpochCommit service event as a Cadence event -// representation and as a protocol model representation. -func EpochCommitFixture(chain flow.ChainID) (flow.Event, *flow.EpochCommit) { - - events, err := systemcontracts.ServiceEventsForChain(chain) - if err != nil { - panic(err) - } - - event := unittest.EventFixture(events.EpochCommit.EventType(), 1, 1, unittest.IdentifierFixture(), 0) - event.Payload = []byte(epochCommitFixtureJSON) - - expected := &flow.EpochCommit{ - Counter: 1, - ClusterQCs: []flow.ClusterQCVoteData{ - { - VoterIDs: []flow.Identifier{ - flow.MustHexStringToIdentifier("0000000000000000000000000000000000000000000000000000000000000001"), - flow.MustHexStringToIdentifier("0000000000000000000000000000000000000000000000000000000000000002"), - }, - SigData: unittest.MustDecodeSignatureHex("b072ed22ed305acd44818a6c836e09b4e844eebde6a4fdbf5cec983e2872b86c8b0f6c34c0777bf52e385ab7c45dc55d"), - }, - { - VoterIDs: []flow.Identifier{ - flow.MustHexStringToIdentifier("0000000000000000000000000000000000000000000000000000000000000003"), - flow.MustHexStringToIdentifier("0000000000000000000000000000000000000000000000000000000000000004"), - }, - SigData: unittest.MustDecodeSignatureHex("899e266a543e1b3a564f68b22f7be571f2e944ec30fadc4b39e2d5f526ba044c0f3cb2648f8334fc216fa3360a0418b2"), - }, - }, - DKGGroupKey: unittest.MustDecodePublicKeyHex(crypto.BLSBLS12381, "8c588266db5f5cda629e83f8aa04ae9413593fac19e4865d06d291c9d14fbdd9bdb86a7a12f9ef8590c79cb635e3163315d193087e9336092987150d0cd2b14ac6365f7dc93eec573752108b8c12368abb65f0652d9f644e5aed611c37926950"), - DKGParticipantKeys: []crypto.PublicKey{ - unittest.MustDecodePublicKeyHex(crypto.BLSBLS12381, "87a339e4e5c74f089da20a33f515d8c8f4464ab53ede5a74aa2432cd1ae66d522da0c122249ee176cd747ddc83ca81090498389384201614caf51eac392c1c0a916dfdcfbbdf7363f9552b6468434add3d3f6dc91a92bbe3ee368b59b7828488"), - }, - } - - return event, expected -} - -var epochSetupFixtureJSON = ` -{ - "type": "Event", - "value": { - "id": "A.01cf0e2f2f715450.FlowEpoch.EpochSetup", - "fields": [ - { - "name": "counter", - "value": { - "type": "UInt64", - "value": "1" - } - }, - { - "name": "nodeInfo", - "value": { - "type": "Array", - "value": [ - { - "type": "Struct", - "value": { - "id": "A.01cf0e2f2f715450.FlowIDTableStaking.NodeInfo", - "fields": [ - { - "name": "id", - "value": { - "type": "String", - "value": "0000000000000000000000000000000000000000000000000000000000000001" - } - }, - { - "name": "role", - "value": { - "type": "UInt8", - "value": "1" - } - }, - { - "name": "networkingAddress", - "value": { - "type": "String", - "value": "1.flow.com" - } - }, - { - "name": "networkingKey", - "value": { - "type": "String", - "value": "378dbf45d85c614feb10d8bd4f78f4b6ef8eec7d987b937e123255444657fb3da031f232a507e323df3a6f6b8f50339c51d188e80c0e7a92420945cc6ca893fc" - } - }, - { - "name": "stakingKey", - "value": { - "type": "String", - "value": "af4aade26d76bb2ab15dcc89adcef82a51f6f04b3cb5f4555214b40ec89813c7a5f95776ea4fe449de48166d0bbc59b919b7eabebaac9614cf6f9461fac257765415f4d8ef1376a2365ec9960121888ea5383d88a140c24c29962b0a14e4e4e7" - } - }, - { - "name": "tokensStaked", - "value": { - "type": "UFix64", - "value": "0.00000000" - } - }, - { - "name": "tokensCommitted", - "value": { - "type": "UFix64", - "value": "1350000.00000000" - } - }, - { - "name": "tokensUnstaking", - "value": { - "type": "UFix64", - "value": "0.00000000" - } - }, - { - "name": "tokensUnstaked", - "value": { - "type": "UFix64", - "value": "0.00000000" - } - }, - { - "name": "tokensRewarded", - "value": { - "type": "UFix64", - "value": "0.00000000" - } - }, - { - "name": "delegators", - "value": { - "type": "Array", - "value": [] - } - }, - { - "name": "delegatorIDCounter", - "value": { - "type": "UInt32", - "value": "0" - } - }, - { - "name": "tokensRequestedToUnstake", - "value": { - "type": "UFix64", - "value": "0.00000000" - } - }, - { - "name": "initialWeight", - "value": { - "type": "UInt64", - "value": "100" - } - } - ] - } - }, - { - "type": "Struct", - "value": { - "id": "A.01cf0e2f2f715450.FlowIDTableStaking.NodeInfo", - "fields": [ - { - "name": "id", - "value": { - "type": "String", - "value": "0000000000000000000000000000000000000000000000000000000000000002" - } - }, - { - "name": "role", - "value": { - "type": "UInt8", - "value": "1" - } - }, - { - "name": "networkingAddress", - "value": { - "type": "String", - "value": "2.flow.com" - } - }, - { - "name": "networkingKey", - "value": { - "type": "String", - "value": "378dbf45d85c614feb10d8bd4f78f4b6ef8eec7d987b937e123255444657fb3da031f232a507e323df3a6f6b8f50339c51d188e80c0e7a92420945cc6ca893fc" - } - }, - { - "name": "stakingKey", - "value": { - "type": "String", - "value": "af4aade26d76bb2ab15dcc89adcef82a51f6f04b3cb5f4555214b40ec89813c7a5f95776ea4fe449de48166d0bbc59b919b7eabebaac9614cf6f9461fac257765415f4d8ef1376a2365ec9960121888ea5383d88a140c24c29962b0a14e4e4e7" - } - }, - { - "name": "tokensStaked", - "value": { - "type": "UFix64", - "value": "0.00000000" - } - }, - { - "name": "tokensCommitted", - "value": { - "type": "UFix64", - "value": "1350000.00000000" - } - }, - { - "name": "tokensUnstaking", - "value": { - "type": "UFix64", - "value": "0.00000000" - } - }, - { - "name": "tokensUnstaked", - "value": { - "type": "UFix64", - "value": "0.00000000" - } - }, - { - "name": "tokensRewarded", - "value": { - "type": "UFix64", - "value": "0.00000000" - } - }, - { - "name": "delegators", - "value": { - "type": "Array", - "value": [] - } - }, - { - "name": "delegatorIDCounter", - "value": { - "type": "UInt32", - "value": "0" - } - }, - { - "name": "tokensRequestedToUnstake", - "value": { - "type": "UFix64", - "value": "0.00000000" - } - }, - { - "name": "initialWeight", - "value": { - "type": "UInt64", - "value": "100" - } - } - ] - } - }, - { - "type": "Struct", - "value": { - "id": "A.01cf0e2f2f715450.FlowIDTableStaking.NodeInfo", - "fields": [ - { - "name": "id", - "value": { - "type": "String", - "value": "0000000000000000000000000000000000000000000000000000000000000003" - } - }, - { - "name": "role", - "value": { - "type": "UInt8", - "value": "1" - } - }, - { - "name": "networkingAddress", - "value": { - "type": "String", - "value": "3.flow.com" - } - }, - { - "name": "networkingKey", - "value": { - "type": "String", - "value": "378dbf45d85c614feb10d8bd4f78f4b6ef8eec7d987b937e123255444657fb3da031f232a507e323df3a6f6b8f50339c51d188e80c0e7a92420945cc6ca893fc" - } - }, - { - "name": "stakingKey", - "value": { - "type": "String", - "value": "af4aade26d76bb2ab15dcc89adcef82a51f6f04b3cb5f4555214b40ec89813c7a5f95776ea4fe449de48166d0bbc59b919b7eabebaac9614cf6f9461fac257765415f4d8ef1376a2365ec9960121888ea5383d88a140c24c29962b0a14e4e4e7" - } - }, - { - "name": "tokensStaked", - "value": { - "type": "UFix64", - "value": "0.00000000" - } - }, - { - "name": "tokensCommitted", - "value": { - "type": "UFix64", - "value": "1350000.00000000" - } - }, - { - "name": "tokensUnstaking", - "value": { - "type": "UFix64", - "value": "0.00000000" - } - }, - { - "name": "tokensUnstaked", - "value": { - "type": "UFix64", - "value": "0.00000000" - } - }, - { - "name": "tokensRewarded", - "value": { - "type": "UFix64", - "value": "0.00000000" - } - }, - { - "name": "delegators", - "value": { - "type": "Array", - "value": [] - } - }, - { - "name": "delegatorIDCounter", - "value": { - "type": "UInt32", - "value": "0" - } - }, - { - "name": "tokensRequestedToUnstake", - "value": { - "type": "UFix64", - "value": "0.00000000" - } - }, - { - "name": "initialWeight", - "value": { - "type": "UInt64", - "value": "100" - } - } - ] - } - }, - { - "type": "Struct", - "value": { - "id": "A.01cf0e2f2f715450.FlowIDTableStaking.NodeInfo", - "fields": [ - { - "name": "id", - "value": { - "type": "String", - "value": "0000000000000000000000000000000000000000000000000000000000000004" - } - }, - { - "name": "role", - "value": { - "type": "UInt8", - "value": "1" - } - }, - { - "name": "networkingAddress", - "value": { - "type": "String", - "value": "4.flow.com" - } - }, - { - "name": "networkingKey", - "value": { - "type": "String", - "value": "378dbf45d85c614feb10d8bd4f78f4b6ef8eec7d987b937e123255444657fb3da031f232a507e323df3a6f6b8f50339c51d188e80c0e7a92420945cc6ca893fc" - } - }, - { - "name": "stakingKey", - "value": { - "type": "String", - "value": "af4aade26d76bb2ab15dcc89adcef82a51f6f04b3cb5f4555214b40ec89813c7a5f95776ea4fe449de48166d0bbc59b919b7eabebaac9614cf6f9461fac257765415f4d8ef1376a2365ec9960121888ea5383d88a140c24c29962b0a14e4e4e7" - } - }, - { - "name": "tokensStaked", - "value": { - "type": "UFix64", - "value": "0.00000000" - } - }, - { - "name": "tokensCommitted", - "value": { - "type": "UFix64", - "value": "1350000.00000000" - } - }, - { - "name": "tokensUnstaking", - "value": { - "type": "UFix64", - "value": "0.00000000" - } - }, - { - "name": "tokensUnstaked", - "value": { - "type": "UFix64", - "value": "0.00000000" - } - }, - { - "name": "tokensRewarded", - "value": { - "type": "UFix64", - "value": "0.00000000" - } - }, - { - "name": "delegators", - "value": { - "type": "Array", - "value": [] - } - }, - { - "name": "delegatorIDCounter", - "value": { - "type": "UInt32", - "value": "0" - } - }, - { - "name": "tokensRequestedToUnstake", - "value": { - "type": "UFix64", - "value": "0.00000000" - } - }, - { - "name": "initialWeight", - "value": { - "type": "UInt64", - "value": "100" - } - } - ] - } - }, - { - "type": "Struct", - "value": { - "id": "A.01cf0e2f2f715450.FlowIDTableStaking.NodeInfo", - "fields": [ - { - "name": "id", - "value": { - "type": "String", - "value": "0000000000000000000000000000000000000000000000000000000000000011" - } - }, - { - "name": "role", - "value": { - "type": "UInt8", - "value": "2" - } - }, - { - "name": "networkingAddress", - "value": { - "type": "String", - "value": "11.flow.com" - } - }, - { - "name": "networkingKey", - "value": { - "type": "String", - "value": "cfdfe8e4362c8f79d11772cb7277ab16e5033a63e8dd5d34caf1b041b77e5b2d63c2072260949ccf8907486e4cfc733c8c42ca0e4e208f30470b0d950856cd47" - } - }, - { - "name": "stakingKey", - "value": { - "type": "String", - "value": "8207559cd7136af378bba53a8f0196dee3849a3ab02897c1995c3e3f6ca0c4a776c3ae869d1ddbb473090054be2400ad06d7910aa2c5d1780220fdf3765a3c1764bce10c6fe66a5a2be51a422e878518bd750424bb56b8a0ecf0f8ad2057e83f" - } - }, - { - "name": "tokensStaked", - "value": { - "type": "UFix64", - "value": "0.00000000" - } - }, - { - "name": "tokensCommitted", - "value": { - "type": "UFix64", - "value": "1350000.00000000" - } - }, - { - "name": "tokensUnstaking", - "value": { - "type": "UFix64", - "value": "0.00000000" - } - }, - { - "name": "tokensUnstaked", - "value": { - "type": "UFix64", - "value": "0.00000000" - } - }, - { - "name": "tokensRewarded", - "value": { - "type": "UFix64", - "value": "0.00000000" - } - }, - { - "name": "delegators", - "value": { - "type": "Array", - "value": [] - } - }, - { - "name": "delegatorIDCounter", - "value": { - "type": "UInt32", - "value": "0" - } - }, - { - "name": "tokensRequestedToUnstake", - "value": { - "type": "UFix64", - "value": "0.00000000" - } - }, - { - "name": "initialWeight", - "value": { - "type": "UInt64", - "value": "100" - } - } - ] - } - }, - { - "type": "Struct", - "value": { - "id": "A.01cf0e2f2f715450.FlowIDTableStaking.NodeInfo", - "fields": [ - { - "name": "id", - "value": { - "type": "String", - "value": "0000000000000000000000000000000000000000000000000000000000000021" - } - }, - { - "name": "role", - "value": { - "type": "UInt8", - "value": "3" - } - }, - { - "name": "networkingAddress", - "value": { - "type": "String", - "value": "21.flow.com" - } - }, - { - "name": "networkingKey", - "value": { - "type": "String", - "value": "d64318ba0dbf68f3788fc81c41d507c5822bf53154530673127c66f50fe4469ccf1a054a868a9f88506a8999f2386d86fcd2b901779718cba4fb53c2da258f9e" - } - }, - { - "name": "stakingKey", - "value": { - "type": "String", - "value": "880b162b7ec138b36af401d07868cb08d25746d905395edbb4625bdf105d4bb2b2f4b0f4ae273a296a6efefa7ce9ccb914e39947ce0e83745125cab05d62516076ff0173ed472d3791ccef937597c9ea12381d76f547a092a4981d77ff3fba83" - } - }, - { - "name": "tokensStaked", - "value": { - "type": "UFix64", - "value": "0.00000000" - } - }, - { - "name": "tokensCommitted", - "value": { - "type": "UFix64", - "value": "1350000.00000000" - } - }, - { - "name": "tokensUnstaking", - "value": { - "type": "UFix64", - "value": "0.00000000" - } - }, - { - "name": "tokensUnstaked", - "value": { - "type": "UFix64", - "value": "0.00000000" - } - }, - { - "name": "tokensRewarded", - "value": { - "type": "UFix64", - "value": "0.00000000" - } - }, - { - "name": "delegators", - "value": { - "type": "Array", - "value": [] - } - }, - { - "name": "delegatorIDCounter", - "value": { - "type": "UInt32", - "value": "0" - } - }, - { - "name": "tokensRequestedToUnstake", - "value": { - "type": "UFix64", - "value": "0.00000000" - } - }, - { - "name": "initialWeight", - "value": { - "type": "UInt64", - "value": "100" - } - } - ] - } - }, - { - "type": "Struct", - "value": { - "id": "A.01cf0e2f2f715450.FlowIDTableStaking.NodeInfo", - "fields": [ - { - "name": "id", - "value": { - "type": "String", - "value": "0000000000000000000000000000000000000000000000000000000000000031" - } - }, - { - "name": "role", - "value": { - "type": "UInt8", - "value": "4" - } - }, - { - "name": "networkingAddress", - "value": { - "type": "String", - "value": "31.flow.com" - } - }, - { - "name": "networkingKey", - "value": { - "type": "String", - "value": "697241208dcc9142b6f53064adc8ff1c95760c68beb2ba083c1d005d40181fd7a1b113274e0163c053a3addd47cd528ec6a1f190cf465aac87c415feaae011ae" - } - }, - { - "name": "stakingKey", - "value": { - "type": "String", - "value": "b1f97d0a06020eca97352e1adde72270ee713c7daf58da7e74bf72235321048b4841bdfc28227964bf18e371e266e32107d238358848bcc5d0977a0db4bda0b4c33d3874ff991e595e0f537c7b87b4ddce92038ebc7b295c9ea20a1492302aa7" - } - }, - { - "name": "tokensStaked", - "value": { - "type": "UFix64", - "value": "0.00000000" - } - }, - { - "name": "tokensCommitted", - "value": { - "type": "UFix64", - "value": "1350000.00000000" - } - }, - { - "name": "tokensUnstaking", - "value": { - "type": "UFix64", - "value": "0.00000000" - } - }, - { - "name": "tokensUnstaked", - "value": { - "type": "UFix64", - "value": "0.00000000" - } - }, - { - "name": "tokensRewarded", - "value": { - "type": "UFix64", - "value": "0.00000000" - } - }, - { - "name": "delegators", - "value": { - "type": "Array", - "value": [] - } - }, - { - "name": "delegatorIDCounter", - "value": { - "type": "UInt32", - "value": "0" - } - }, - { - "name": "tokensRequestedToUnstake", - "value": { - "type": "UFix64", - "value": "0.00000000" - } - }, - { - "name": "initialWeight", - "value": { - "type": "UInt64", - "value": "100" - } - } - ] - } - } - ] - } - }, - { - "name": "firstView", - "value": { - "type": "UInt64", - "value": "100" - } - }, - { - "name": "finalView", - "value": { - "type": "UInt64", - "value": "200" - } - }, - { - "name": "collectorClusters", - "value": { - "type": "Array", - "value": [ - { - "type": "Struct", - "value": { - "id": "A.01cf0e2f2f715450.FlowClusterQC.Cluster", - "fields": [ - { - "name": "index", - "value": { - "type": "UInt16", - "value": "0" - } - }, - { - "name": "nodeWeights", - "value": { - "type": "Dictionary", - "value": [ - { - "key": { - "type": "String", - "value": "0000000000000000000000000000000000000000000000000000000000000001" - }, - "value": { - "type": "UInt64", - "value": "100" - } - }, - { - "key": { - "type": "String", - "value": "0000000000000000000000000000000000000000000000000000000000000002" - }, - "value": { - "type": "UInt64", - "value": "100" - } - } - ] - } - }, - { - "name": "totalWeight", - "value": { - "type": "UInt64", - "value": "100" - } - }, - { - "name": "votes", - "value": { - "type": "Array", - "value": [] - } - } - ] - } - }, - { - "type": "Struct", - "value": { - "id": "A.01cf0e2f2f715450.FlowClusterQC.Cluster", - "fields": [ - { - "name": "index", - "value": { - "type": "UInt16", - "value": "1" - } - }, - { - "name": "nodeWeights", - "value": { - "type": "Dictionary", - "value": [ - { - "key": { - "type": "String", - "value": "0000000000000000000000000000000000000000000000000000000000000003" - }, - "value": { - "type": "UInt64", - "value": "100" - } - }, - { - "key": { - "type": "String", - "value": "0000000000000000000000000000000000000000000000000000000000000004" - }, - "value": { - "type": "UInt64", - "value": "100" - } - } - ] - } - }, - { - "name": "totalWeight", - "value": { - "type": "UInt64", - "value": "0" - } - }, - { - "name": "votes", - "value": { - "type": "Array", - "value": [] - } - } - ] - } - } - ] - } - }, - { - "name": "randomSource", - "value": { - "type": "String", - "value": "01020304" - } - }, - { - "name": "DKGPhase1FinalView", - "value": { - "type": "UInt64", - "value": "150" - } - }, - { - "name": "DKGPhase2FinalView", - "value": { - "type": "UInt64", - "value": "160" - } - }, - { - "name": "DKGPhase3FinalView", - "value": { - "type": "UInt64", - "value": "170" - } - } - ] - } -} -` - -var epochCommitFixtureJSON = ` -{ - "type": "Event", - "value": { - "id": "A.01cf0e2f2f715450.FlowEpoch.EpochCommitted", - "fields": [ - { - "name": "counter", - "value": { - "type": "UInt64", - "value": "1" - } - }, - { - "name": "clusterQCs", - "value": { - "type": "Array", - "value": [ - { - "type": "Struct", - "value": { - "id": "A.01cf0e2f2f715450.FlowClusterQC.ClusterQC", - "fields": [ - { - "name": "index", - "value": { - "type": "UInt16", - "value": "0" - } - }, - { - "name": "voteSignatures", - "value": { - "type": "Array", - "value": [ - { - "type": "String", - "value": "a39cd1e1bf7e2fb0609b7388ce5215a6a4c01eef2aee86e1a007faa28a6b2a3dc876e11bb97cdb26c3846231d2d01e4d" - }, - { - "type": "String", - "value": "91673ad9c717d396c9a0953617733c128049ac1a639653d4002ab245b121df1939430e313bcbfd06948f6a281f6bf853" - } - ] - } - }, - { - "name": "voteMessage", - "value": { - "type": "String", - "value": "irrelevant_for_these_purposes" - } - }, - { - "name": "voterIDs", - "value": { - "type": "Array", - "value": [ - { - "type": "String", - "value": "0000000000000000000000000000000000000000000000000000000000000001" - }, - { - "type": "String", - "value": "0000000000000000000000000000000000000000000000000000000000000002" - } - ] - } - } - ] - } - }, - { - "type": "Struct", - "value": { - "id": "A.01cf0e2f2f715450.FlowClusterQC.ClusterQC", - "fields": [ - { - "name": "index", - "value": { - "type": "UInt16", - "value": "1" - } - }, - { - "name": "voteSignatures", - "value": { - "type": "Array", - "value": [ - { - "type": "String", - "value": "b2bff159971852ed63e72c37991e62c94822e52d4fdcd7bf29aaf9fb178b1c5b4ce20dd9594e029f3574cb29533b857a" - }, - { - "type": "String", - "value": "9931562f0248c9195758da3de4fb92f24fa734cbc20c0cb80280163560e0e0348f843ac89ecbd3732e335940c1e8dccb" - } - ] - } - }, - { - "name": "voteMessage", - "value": { - "type": "String", - "value": "irrelevant_for_these_purposes" - } - }, - { - "name": "voterIDs", - "value": { - "type": "Array", - "value": [ - { - "type": "String", - "value": "0000000000000000000000000000000000000000000000000000000000000003" - }, - { - "type": "String", - "value": "0000000000000000000000000000000000000000000000000000000000000004" - } - ] - } - } - ] - } - } - ] - } - }, - { - "name": "dkgPubKeys", - "value": { - "type": "Array", - "value": [ - { - "type": "String", - "value": "8c588266db5f5cda629e83f8aa04ae9413593fac19e4865d06d291c9d14fbdd9bdb86a7a12f9ef8590c79cb635e3163315d193087e9336092987150d0cd2b14ac6365f7dc93eec573752108b8c12368abb65f0652d9f644e5aed611c37926950" - }, - { - "type": "String", - "value": "87a339e4e5c74f089da20a33f515d8c8f4464ab53ede5a74aa2432cd1ae66d522da0c122249ee176cd747ddc83ca81090498389384201614caf51eac392c1c0a916dfdcfbbdf7363f9552b6468434add3d3f6dc91a92bbe3ee368b59b7828488" - } - ] - } - } - ] - } -}` diff --git a/model/convert/service_event.go b/model/convert/service_event.go index 30d40eee33c..07e249d3c01 100644 --- a/model/convert/service_event.go +++ b/model/convert/service_event.go @@ -5,26 +5,22 @@ import ( "fmt" "github.com/coreos/go-semver/semver" - "github.com/onflow/cadence" - "github.com/onflow/cadence/encoding/json" + "github.com/onflow/cadence/encoding/ccf" + "github.com/onflow/crypto" - "github.com/onflow/flow-go/crypto" "github.com/onflow/flow-go/fvm/systemcontracts" "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/model/flow/assignment" - "github.com/onflow/flow-go/model/flow/order" ) // ServiceEvent converts a service event encoded as the generic flow.Event // type to a flow.ServiceEvent type for use within protocol software and protocol // state. This acts as the conversion from the Cadence type to the flow-go type. +// CAUTION: This function must only be used for input events computed locally, by an +// Execution or Verification Node; it is not resilient to malicious inputs. +// No errors are expected during normal operation. func ServiceEvent(chainID flow.ChainID, event flow.Event) (*flow.ServiceEvent, error) { - - events, err := systemcontracts.ServiceEventsForChain(chainID) - if err != nil { - return nil, fmt.Errorf("could not get service event info: %w", err) - } + events := systemcontracts.ServiceEventsForChain(chainID) // depending on type of service event construct Go type switch event.Type { @@ -32,146 +28,170 @@ func ServiceEvent(chainID flow.ChainID, event flow.Event) (*flow.ServiceEvent, e return convertServiceEventEpochSetup(event) case events.EpochCommit.EventType(): return convertServiceEventEpochCommit(event) + case events.EpochRecover.EventType(): + return convertServiceEventEpochRecover(event) case events.VersionBeacon.EventType(): return convertServiceEventVersionBeacon(event) + case events.ProtocolStateVersionUpgrade.EventType(): + return convertServiceEventProtocolStateVersionUpgrade(event) default: return nil, fmt.Errorf("invalid event type: %s", event.Type) } } +func getField[T cadence.Value](fields map[string]cadence.Value, fieldName string) (T, error) { + field, ok := fields[fieldName] + if !ok || field == nil { + var zero T + return zero, fmt.Errorf( + "required field not found: %s", + fieldName, + ) + } + + value, ok := field.(T) + if !ok { + var zero T + return zero, invalidCadenceTypeError(fieldName, field, zero) + } + + return value, nil +} + // convertServiceEventEpochSetup converts a service event encoded as the generic // flow.Event type to a ServiceEvent type for an EpochSetup event +// CONVENTION: in the returned `EpochSetup` event, +// - Node identities listed in `EpochSetup.Participants` are in CANONICAL ORDER +// - for each cluster assignment (i.e. element in `EpochSetup.Assignments`), the nodeIDs are listed in CANONICAL ORDER +// +// CAUTION: This function must only be used for input events computed locally, by an +// Execution or Verification Node; it is not resilient to malicious inputs. +// No errors are expected during normal operation. func convertServiceEventEpochSetup(event flow.Event) (*flow.ServiceEvent, error) { - - // decode bytes using jsoncdc - payload, err := json.Decode(nil, event.Payload) + // decode bytes using ccf + payload, err := ccf.Decode(nil, event.Payload) if err != nil { return nil, fmt.Errorf("could not unmarshal event payload: %w", err) } - // parse cadence types to required fields - setup := new(flow.EpochSetup) - // NOTE: variable names prefixed with cdc represent cadence types cdcEvent, ok := payload.(cadence.Event) if !ok { return nil, invalidCadenceTypeError("payload", payload, cadence.Event{}) } - if len(cdcEvent.Fields) < 9 { + if cdcEvent.Type() == nil { + return nil, fmt.Errorf("EpochSetup event doesn't have type") + } + + fields := cadence.FieldsMappedByName(cdcEvent) + + const expectedFieldCount = 11 + if len(fields) < expectedFieldCount { return nil, fmt.Errorf( - "insufficient fields in EpochSetup event (%d < 9)", - len(cdcEvent.Fields), + "insufficient fields in EpochSetup event (%d < %d)", + len(fields), + expectedFieldCount, ) } - // extract simple fields - counter, ok := cdcEvent.Fields[0].(cadence.UInt64) - if !ok { - return nil, invalidCadenceTypeError( - "counter", - cdcEvent.Fields[0], - cadence.UInt64(0), - ) + // parse EpochSetup event + + counter, err := getField[cadence.UInt64](fields, "counter") + if err != nil { + return nil, fmt.Errorf("failed to decode EpochSetup event: %w", err) } - setup.Counter = uint64(counter) - firstView, ok := cdcEvent.Fields[2].(cadence.UInt64) - if !ok { - return nil, invalidCadenceTypeError( - "firstView", - cdcEvent.Fields[2], - cadence.UInt64(0), - ) + + cdcParticipants, err := getField[cadence.Array](fields, "nodeInfo") + if err != nil { + return nil, fmt.Errorf("failed to decode EpochSetup event: %w", err) } - setup.FirstView = uint64(firstView) - finalView, ok := cdcEvent.Fields[3].(cadence.UInt64) - if !ok { - return nil, invalidCadenceTypeError( - "finalView", - cdcEvent.Fields[3], - cadence.UInt64(0), - ) + + firstView, err := getField[cadence.UInt64](fields, "firstView") + if err != nil { + return nil, fmt.Errorf("failed to decode EpochSetup event: %w", err) } - setup.FinalView = uint64(finalView) - randomSrcHex, ok := cdcEvent.Fields[5].(cadence.String) - if !ok { - return nil, invalidCadenceTypeError( - "randomSource", - cdcEvent.Fields[5], - cadence.String(""), - ) + + finalView, err := getField[cadence.UInt64](fields, "finalView") + if err != nil { + return nil, fmt.Errorf("failed to decode EpochSetup event: %w", err) } - // Cadence's unsafeRandom().toString() produces a string of variable length. - // Here we pad it with enough 0s to meet the required length. - paddedRandomSrcHex := fmt.Sprintf( - "%0*s", - 2*flow.EpochSetupRandomSourceLength, - string(randomSrcHex), - ) - setup.RandomSource, err = hex.DecodeString(paddedRandomSrcHex) + + cdcClusters, err := getField[cadence.Array](fields, "collectorClusters") if err != nil { - return nil, fmt.Errorf( - "could not decode random source hex (%v): %w", - paddedRandomSrcHex, - err, - ) + return nil, fmt.Errorf("failed to decode EpochSetup event: %w", err) } - dkgPhase1FinalView, ok := cdcEvent.Fields[6].(cadence.UInt64) - if !ok { - return nil, invalidCadenceTypeError( - "dkgPhase1FinalView", - cdcEvent.Fields[6], - cadence.UInt64(0), - ) + randomSrcHex, err := getField[cadence.String](fields, "randomSource") + if err != nil { + return nil, fmt.Errorf("failed to decode EpochSetup event: %w", err) } - setup.DKGPhase1FinalView = uint64(dkgPhase1FinalView) - dkgPhase2FinalView, ok := cdcEvent.Fields[7].(cadence.UInt64) - if !ok { - return nil, invalidCadenceTypeError( - "dkgPhase2FinalView", - cdcEvent.Fields[7], - cadence.UInt64(0), - ) + + targetDuration, err := getField[cadence.UInt64](fields, "targetDuration") // Epoch duration [seconds] + if err != nil { + return nil, fmt.Errorf("failed to decode EpochSetup event: %w", err) } - setup.DKGPhase2FinalView = uint64(dkgPhase2FinalView) - dkgPhase3FinalView, ok := cdcEvent.Fields[8].(cadence.UInt64) - if !ok { - return nil, invalidCadenceTypeError( - "dkgPhase3FinalView", - cdcEvent.Fields[8], - cadence.UInt64(0), - ) + + targetEndTimeUnix, err := getField[cadence.UInt64](fields, "targetEndTime") // Unix time [seconds] + if err != nil { + return nil, fmt.Errorf("failed to decode EpochSetup event: %w", err) } - setup.DKGPhase3FinalView = uint64(dkgPhase3FinalView) - // parse cluster assignments - cdcClusters, ok := cdcEvent.Fields[4].(cadence.Array) - if !ok { - return nil, invalidCadenceTypeError( - "clusters", - cdcEvent.Fields[4], - cadence.Array{}, - ) + dkgPhase1FinalView, err := getField[cadence.UInt64](fields, "DKGPhase1FinalView") + if err != nil { + return nil, fmt.Errorf("failed to decode EpochSetup event: %w", err) } - setup.Assignments, err = convertClusterAssignments(cdcClusters.Values) + + dkgPhase2FinalView, err := getField[cadence.UInt64](fields, "DKGPhase2FinalView") if err != nil { - return nil, fmt.Errorf("could not convert cluster assignments: %w", err) + return nil, fmt.Errorf("failed to decode EpochSetup event: %w", err) } - // parse epoch participants - cdcParticipants, ok := cdcEvent.Fields[1].(cadence.Array) - if !ok { - return nil, invalidCadenceTypeError( - "participants", - cdcEvent.Fields[1], - cadence.Array{}, + dkgPhase3FinalView, err := getField[cadence.UInt64](fields, "DKGPhase3FinalView") + if err != nil { + return nil, fmt.Errorf("failed to decode EpochSetup event: %w", err) + } + + // random source from the event must be a hex string + // containing exactly 128 bits (equivalent to 16 bytes or 32 hex characters) + randomSource, err := hex.DecodeString(string(randomSrcHex)) + if err != nil { + return nil, fmt.Errorf( + "could not decode random source hex (%v): %w", + randomSrcHex, + err, ) } - setup.Participants, err = convertParticipants(cdcParticipants.Values) + + // parse cluster assignments; returned assignments are in canonical order + assignments, err := convertClusterAssignments(cdcClusters.Values) + if err != nil { + return nil, fmt.Errorf("could not convert cluster assignments: %w", err) + } + + // parse epoch participants; returned node identities are in canonical order + participants, err := convertParticipants(cdcParticipants.Values) if err != nil { return nil, fmt.Errorf("could not convert participants: %w", err) } + setup, err := flow.NewEpochSetup( + flow.UntrustedEpochSetup{ + Counter: uint64(counter), + FirstView: uint64(firstView), + DKGPhase1FinalView: uint64(dkgPhase1FinalView), + DKGPhase2FinalView: uint64(dkgPhase2FinalView), + DKGPhase3FinalView: uint64(dkgPhase3FinalView), + FinalView: uint64(finalView), + Participants: participants, + Assignments: assignments, + RandomSource: randomSource, + TargetDuration: uint64(targetDuration), + TargetEndTime: uint64(targetEndTimeUnix), + }, + ) + if err != nil { + return nil, fmt.Errorf("could not construct epoch setup: %w", err) + } // construct the service event serviceEvent := &flow.ServiceEvent{ @@ -183,37 +203,126 @@ func convertServiceEventEpochSetup(event flow.Event) (*flow.ServiceEvent, error) } // convertServiceEventEpochCommit converts a service event encoded as the generic -// flow.Event type to a ServiceEvent type for an EpochCommit event +// flow.Event type to a ServiceEvent type for an EpochCommit event. +// CAUTION: This function must only be used for input events computed locally, by an +// Execution or Verification Node; it is not resilient to malicious inputs. +// No errors are expected during normal operation. func convertServiceEventEpochCommit(event flow.Event) (*flow.ServiceEvent, error) { - - // decode bytes using jsoncdc - payload, err := json.Decode(nil, event.Payload) + // decode bytes using ccf + payload, err := ccf.Decode(nil, event.Payload) if err != nil { return nil, fmt.Errorf("could not unmarshal event payload: %w", err) } - // parse cadence types to Go types - commit := new(flow.EpochCommit) - commit.Counter = uint64(payload.(cadence.Event).Fields[0].(cadence.UInt64)) + cdcEvent, ok := payload.(cadence.Event) + if !ok { + return nil, invalidCadenceTypeError("payload", payload, cadence.Event{}) + } + + if cdcEvent.Type() == nil { + return nil, fmt.Errorf("EpochCommit event doesn't have type") + } + + fields := cadence.FieldsMappedByName(cdcEvent) + + const expectedFieldCount = 5 + if len(fields) < expectedFieldCount { + return nil, fmt.Errorf( + "insufficient fields in EpochCommit event (%d < %d)", + len(fields), + expectedFieldCount, + ) + } + + // Extract EpochCommit event fields + + counter, err := getField[cadence.UInt64](fields, "counter") + if err != nil { + return nil, fmt.Errorf("failed to decode EpochCommit event: %w", err) + } + + cdcClusterQCVotes, err := getField[cadence.Array](fields, "clusterQCs") + if err != nil { + return nil, fmt.Errorf("failed to decode EpochCommit event: %w", err) + } + + cdcDKGKeys, err := getField[cadence.Array](fields, "dkgPubKeys") + if err != nil { + return nil, fmt.Errorf("failed to decode EpochCommit event: %w", err) + } + + cdcDKGGroupKey, err := getField[cadence.String](fields, "dkgGroupKey") + if err != nil { + return nil, fmt.Errorf("failed to decode EpochCommit event: %w", err) + } + + cdcDKGIndexMap, err := getField[cadence.Dictionary](fields, "dkgIdMapping") + if err != nil { + return nil, fmt.Errorf("failed to decode EpochCommit event: %w", err) + } // parse cluster qc votes - cdcClusterQCVotes := payload.(cadence.Event).Fields[1].(cadence.Array).Values - commit.ClusterQCs, err = convertClusterQCVotes(cdcClusterQCVotes) + clusterQCs, err := convertClusterQCVotes(cdcClusterQCVotes.Values) if err != nil { return nil, fmt.Errorf("could not convert cluster qc votes: %w", err) } - // parse DKG group key and participants - // Note: this is read in the same order as `DKGClient.SubmitResult` ie. with the group public key first followed by individual keys - // https://github.com/onflow/flow-go/blob/feature/dkg/module/dkg/client.go#L182-L183 - cdcDKGKeys := payload.(cadence.Event).Fields[2].(cadence.Array).Values - dkgGroupKey, dkgParticipantKeys, err := convertDKGKeys(cdcDKGKeys) + // parse DKG participants + dKGParticipantKeys, err := convertDKGKeys(cdcDKGKeys.Values) if err != nil { - return nil, fmt.Errorf("could not convert DKG keys: %w", err) + return nil, fmt.Errorf("could not convert Random Beacon keys: %w", err) } - commit.DKGGroupKey = dkgGroupKey - commit.DKGParticipantKeys = dkgParticipantKeys + // parse DKG group key + dKGGroupKey, err := convertDKGKey(cdcDKGGroupKey) + if err != nil { + return nil, fmt.Errorf("could not convert Random Beacon group key: %w", err) + } + + // parse DKG Index Map + // + // CAUTION: When the Execution or Verification Node serializes the EpochCommit to compute its ID, the DKGIndexMap + // is converted from a map to a slice. This is necessary because maps don't have a deterministic order. For *valid* + // EpochCommit events, the following convention holds (see DKGIndexMap type declaration for details): + // - For the DKG committee 𝒟, its size is n = |𝒟| = len(DKGIndexMap). + // - The values in DKGIndexMap must form the set {0, 1, …, n-1}, as required by the low level cryptography + // module (convention simplifying the implementation). + // Therefore, a valid `DKGIndexMap` can always be represented as an `IdentifierList` slice `s` such that + // nodeID := s[i] for i ∈ {0, …, n-1} corresponds to a key value pair (nodeID, i) in DKGIndexMap. The + // `EpochCommit.EncodeRLP` method performs this conversion (and panics when the convention is violated). + // Generally, execution should be permissive and forward all system events to the Protocol State, which then + // performs comprehensive validity checks and decides whether events are accepted or rejected. However, we can + // only compute the ID of an EpochCommit whose DKGIndexMap satisfies the convention above. Furthermore, we do + // not want to depend on the System Smart Contracts to _always_ emit valid DKGIndexMap - especially for Epoch + // Recovery, where humans provide some of the parameters in the EpochCommit event. + // Therefore, we check here that DKGIndexMap satisfies the convention required by `EncodeRLP` and error + // otherwise. When we error here, the corresponding event will just be omitted from `ExecutionResult.ServiceEvents`. + // (In contrast, erroring during the ID computation will result in an irrecoverable execution halt, because the + // ExecutionResult has already been fully constructed, but can't be broadcast). + // We will only drop service events whose DKGIndexMap is invalid. As the Protocol State will anyway discard + // such events, it is fine to not relay them in the first place. + dKGIndexMap := make(flow.DKGIndexMap, len(cdcDKGIndexMap.Pairs)) + for _, pair := range cdcDKGIndexMap.Pairs { + nodeID, err := flow.HexStringToIdentifier(string(pair.Key.(cadence.String))) + if err != nil { + return nil, fmt.Errorf("failed to decode flow.Identifer in DKGIndexMap entry from EpochRecover event: %w", err) + } + index := pair.Value.(cadence.Int).Int() + dKGIndexMap[nodeID] = index + } + + commit, err := flow.NewEpochCommit( + flow.UntrustedEpochCommit{ + Counter: uint64(counter), + ClusterQCs: clusterQCs, + DKGGroupKey: dKGGroupKey, + DKGParticipantKeys: dKGParticipantKeys, + DKGIndexMap: dKGIndexMap, + }, + ) + if err != nil { + return nil, fmt.Errorf("could not construct epoch commit: %w", err) + } // create the service event serviceEvent := &flow.ServiceEvent{ @@ -224,41 +333,322 @@ func convertServiceEventEpochCommit(event flow.Event) (*flow.ServiceEvent, error return serviceEvent, nil } +// convertServiceEventEpochRecover converts a service event encoded as the generic +// flow.Event type to a ServiceEvent type for an EpochRecover event. +// CAUTION: This function must only be used for input events computed locally, by an +// Execution or Verification Node; it is not resilient to malicious inputs. +// No errors are expected during normal operation. +func convertServiceEventEpochRecover(event flow.Event) (*flow.ServiceEvent, error) { + // decode bytes using ccf + payload, err := ccf.Decode(nil, event.Payload) + if err != nil { + return nil, fmt.Errorf("could not unmarshal event payload: %w", err) + } + + // NOTE: variable names prefixed with cdc represent cadence types + cdcEvent, ok := payload.(cadence.Event) + if !ok { + return nil, invalidCadenceTypeError("payload", payload, cadence.Event{}) + } + + if cdcEvent.Type() == nil { + return nil, fmt.Errorf("EpochRecover event doesn't have type") + } + + fields := cadence.FieldsMappedByName(cdcEvent) + + const expectedFieldCount = 15 + if len(fields) < expectedFieldCount { + return nil, fmt.Errorf( + "insufficient fields in EpochRecover event (%d < %d)", + len(fields), + expectedFieldCount, + ) + } + + // parse EpochRecover event + + counter, err := getField[cadence.UInt64](fields, "counter") + if err != nil { + return nil, fmt.Errorf("failed to decode EpochRecover event: %w", err) + } + + cdcParticipants, err := getField[cadence.Array](fields, "nodeInfo") + if err != nil { + return nil, fmt.Errorf("failed to decode EpochRecover event: %w", err) + } + + firstView, err := getField[cadence.UInt64](fields, "firstView") + if err != nil { + return nil, fmt.Errorf("failed to decode EpochRecover event: %w", err) + } + + finalView, err := getField[cadence.UInt64](fields, "finalView") + if err != nil { + return nil, fmt.Errorf("failed to decode EpochRecover event: %w", err) + } + + cdcClusters, err := getField[cadence.Array](fields, "clusterAssignments") + if err != nil { + return nil, fmt.Errorf("failed to decode EpochRecover event: %w", err) + } + + randomSrcHex, err := getField[cadence.String](fields, "randomSource") + if err != nil { + return nil, fmt.Errorf("failed to decode EpochRecover event: %w", err) + } + + targetDuration, err := getField[cadence.UInt64](fields, "targetDuration") // Epoch duration [seconds] + if err != nil { + return nil, fmt.Errorf("failed to decode EpochRecover event: %w", err) + } + + targetEndTimeUnix, err := getField[cadence.UInt64](fields, "targetEndTime") // Unix time [seconds] + if err != nil { + return nil, fmt.Errorf("failed to decode EpochRecover event: %w", err) + } + + dkgPhase1FinalView, err := getField[cadence.UInt64](fields, "DKGPhase1FinalView") + if err != nil { + return nil, fmt.Errorf("failed to decode EpochRecover event: %w", err) + } + + dkgPhase2FinalView, err := getField[cadence.UInt64](fields, "DKGPhase2FinalView") + if err != nil { + return nil, fmt.Errorf("failed to decode EpochRecover event: %w", err) + } + + dkgPhase3FinalView, err := getField[cadence.UInt64](fields, "DKGPhase3FinalView") + if err != nil { + return nil, fmt.Errorf("failed to decode EpochRecover event: %w", err) + } + + cdcClusterQCVoteData, err := getField[cadence.Array](fields, "clusterQCVoteData") + if err != nil { + return nil, fmt.Errorf("failed to decode EpochRecover event: %w", err) + } + + cdcDKGKeys, err := getField[cadence.Array](fields, "dkgPubKeys") + if err != nil { + return nil, fmt.Errorf("failed to decode EpochRecover event: %w", err) + } + + cdcDKGGroupKey, err := getField[cadence.String](fields, "dkgGroupKey") + if err != nil { + return nil, fmt.Errorf("failed to decode EpochRecover event: %w", err) + } + + cdcDKGIndexMap, err := getField[cadence.Dictionary](fields, "dkgIdMapping") + if err != nil { + return nil, fmt.Errorf("failed to decode EpochRecover event: %w", err) + } + + // random source from the event must be a hex string + // containing exactly 128 bits (equivalent to 16 bytes or 32 hex characters) + randomSource, err := hex.DecodeString(string(randomSrcHex)) + if err != nil { + return nil, fmt.Errorf( + "failed to decode random source hex (%v) from EpochRecover event: %w", + randomSrcHex, + err, + ) + } + + // parse cluster assignments; returned assignments are in canonical order + assignments, err := convertEpochRecoverCollectorClusterAssignments(cdcClusters.Values) + if err != nil { + return nil, fmt.Errorf("failed to convert cluster assignments from EpochRecover event: %w", err) + } + + // parse epoch participants; returned node identities are in canonical order + participants, err := convertParticipants(cdcParticipants.Values) + if err != nil { + return nil, fmt.Errorf("failed to convert participants from EpochRecover event: %w", err) + } + + setup, err := flow.NewEpochSetup( + flow.UntrustedEpochSetup{ + Counter: uint64(counter), + FirstView: uint64(firstView), + DKGPhase1FinalView: uint64(dkgPhase1FinalView), + DKGPhase2FinalView: uint64(dkgPhase2FinalView), + DKGPhase3FinalView: uint64(dkgPhase3FinalView), + FinalView: uint64(finalView), + Participants: participants, + Assignments: assignments, + RandomSource: randomSource, + TargetDuration: uint64(targetDuration), + TargetEndTime: uint64(targetEndTimeUnix), + }, + ) + if err != nil { + return nil, fmt.Errorf("could not construct epoch setup: %w", err) + } + + // parse cluster qc votes + clusterQCs, err := convertClusterQCVoteData(cdcClusterQCVoteData.Values) + if err != nil { + return nil, fmt.Errorf("failed to decode clusterQCVoteData from EpochRecover event: %w", err) + } + + // parse DKG participants + dKGParticipantKeys, err := convertDKGKeys(cdcDKGKeys.Values) + if err != nil { + return nil, fmt.Errorf("failed to decode Random Beacon key shares from EpochRecover event: %w", err) + } + + // parse DKG group key + dKGGroupKey, err := convertDKGKey(cdcDKGGroupKey) + if err != nil { + return nil, fmt.Errorf("failed to decode Random Beacon group key from EpochRecover event: %w", err) + } + + // parse DKG Index Map + // + // CAUTION: When the Execution or Verification Node serializes the EpochCommit to compute its ID, the DKGIndexMap + // is converted from a map to a slice. This is necessary because maps don't have a deterministic order. For *valid* + // EpochCommit events, the following convention holds (see DKGIndexMap type declaration for details): + // - For the DKG committee 𝒟, its size is n = |𝒟| = len(DKGIndexMap). + // - The values in DKGIndexMap must form the set {0, 1, …, n-1}, as required by the low level cryptography + // module (convention simplifying the implementation). + // Therefore, a valid `DKGIndexMap` can always be represented as an `IdentifierList` slice `s` such that + // nodeID := s[i] for i ∈ {0, …, n-1} corresponds to a key value pair (nodeID, i) in DKGIndexMap. The + // `EpochCommit.EncodeRLP` method performs this conversion (and panics when the convention is violated). + // Generally, execution should be permissive and forward all system events to the Protocol State, which then + // performs comprehensive validity checks and decides whether events are accepted or rejected. However, we can + // only compute the ID of an EpochCommit whose DKGIndexMap satisfies the convention above. Furthermore, we do + // not want to depend on the System Smart Contracts to _always_ emit valid DKGIndexMap - especially for Epoch + // Recovery, where humans provide some of the parameters in the EpochCommit event. + // Therefore, we check here that DKGIndexMap satisfies the convention required by `EncodeRLP` and error + // otherwise. When we error here, the corresponding event will just be omitted from `ExecutionResult.ServiceEvents`. + // (In contrast, erroring during the ID computation will result in an irrecoverable execution halt, because the + // ExecutionResult has already been fully constructed, but can't be broadcast). + // We will only drop service events whose DKGIndexMap is invalid. As the Protocol State will anyway discard + // such events, it is fine to not relay them in the first place. + dKGIndexMap := make(flow.DKGIndexMap, len(cdcDKGIndexMap.Pairs)) + for _, pair := range cdcDKGIndexMap.Pairs { + nodeID, err := flow.HexStringToIdentifier(string(pair.Key.(cadence.String))) + if err != nil { + return nil, fmt.Errorf("failed to decode flow.Identifer in DKGIndexMap entry from EpochRecover event: %w", err) + } + index := pair.Value.(cadence.Int).Int() + dKGIndexMap[nodeID] = index + } + + commit, err := flow.NewEpochCommit( + flow.UntrustedEpochCommit{ + Counter: uint64(counter), + ClusterQCs: clusterQCs, + DKGGroupKey: dKGGroupKey, + DKGParticipantKeys: dKGParticipantKeys, + DKGIndexMap: dKGIndexMap, + }, + ) + if err != nil { + return nil, fmt.Errorf("could not construct epoch commit: %w", err) + } + + // create the service event + epochRecover, err := flow.NewEpochRecover( + flow.UntrustedEpochRecover{ + EpochSetup: *setup, + EpochCommit: *commit, + }, + ) + if err != nil { + return nil, fmt.Errorf("could not construct epoch recover: %w", err) + } + + serviceEvent := &flow.ServiceEvent{ + Type: flow.ServiceEventRecover, + Event: epochRecover, + } + + return serviceEvent, nil +} + +// convertEpochRecoverCollectorClusterAssignments converts collector cluster assignments for EpochRecover event. +// This is a simplified version compared to the `convertClusterAssignments` function since we are dealing with +// a list of participants that don't need to be ordered by index or node weights. +// No errors are expected during normal operation. +func convertEpochRecoverCollectorClusterAssignments(cdcClusters []cadence.Value) (flow.AssignmentList, error) { + // parse cluster assignments to Go types + clusterAssignments := make([]flow.IdentifierList, 0, len(cdcClusters)) + // we are dealing with a nested array where each element is a list of node IDs, + // this way we represent the cluster assignments. + for _, value := range cdcClusters { + cdcCluster, ok := value.(cadence.Array) + if !ok { + return nil, invalidCadenceTypeError("collectorClusters[i]", cdcCluster, cadence.Array{}) + } + + clusterMembers := make(flow.IdentifierList, 0, len(cdcCluster.Values)) + for _, cdcClusterParticipant := range cdcCluster.Values { + nodeIDString, ok := cdcClusterParticipant.(cadence.String) + if !ok { + return nil, invalidCadenceTypeError( + "collectorClusters[i][j]", + cdcClusterParticipant, + cadence.String(""), + ) + } + + nodeID, err := flow.HexStringToIdentifier(string(nodeIDString)) + if err != nil { + return nil, fmt.Errorf( + "could not convert hex string to identifer: %w", + err, + ) + } + clusterMembers = append(clusterMembers, nodeID) + } + + // IMPORTANT: for each cluster, node IDs must be in *canonical order* + clusterAssignments = append(clusterAssignments, clusterMembers.Sort(flow.IdentifierCanonical)) + } + + return clusterAssignments, nil +} + // convertClusterAssignments converts the Cadence representation of cluster // assignments included in the EpochSetup into the protocol AssignmentList // representation. +// CONVENTION: for each cluster assignment (i.e. element in `AssignmentList`), the nodeIDs are listed in CANONICAL ORDER +// No errors are expected during normal operation. func convertClusterAssignments(cdcClusters []cadence.Value) (flow.AssignmentList, error) { - // ensure we don't have duplicate cluster indices indices := make(map[uint]struct{}) // parse cluster assignments to Go types - identifierLists := make([]flow.IdentifierList, len(cdcClusters)) + clusterAssignments := make([]flow.IdentifierList, len(cdcClusters)) for _, value := range cdcClusters { - cdcCluster, ok := value.(cadence.Struct) if !ok { return nil, invalidCadenceTypeError("cluster", cdcCluster, cadence.Struct{}) } - expectedFields := 2 - if len(cdcCluster.Fields) < expectedFields { + if cdcCluster.Type() == nil { + return nil, fmt.Errorf("cluster struct doesn't have type") + } + + fields := cadence.FieldsMappedByName(cdcCluster) + + const expectedFieldCount = 2 + if len(fields) < expectedFieldCount { return nil, fmt.Errorf( "insufficient fields (%d < %d)", - len(cdcCluster.Fields), - expectedFields, + len(fields), + expectedFieldCount, ) } - // ensure cluster index is valid - clusterIndex, ok := cdcCluster.Fields[0].(cadence.UInt16) - if !ok { - return nil, invalidCadenceTypeError( - "clusterIndex", - cdcCluster.Fields[0], - cadence.UInt16(0), - ) + // Extract cluster fields + + clusterIndex, err := getField[cadence.UInt16](fields, "index") + if err != nil { + return nil, fmt.Errorf("failed to decode cluster struct: %w", err) } + // ensure cluster index is valid if int(clusterIndex) >= len(cdcClusters) { return nil, fmt.Errorf( "invalid cdcCluster index (%d) outside range [0,%d]", @@ -271,18 +661,14 @@ func convertClusterAssignments(cdcClusters []cadence.Value) (flow.AssignmentList return nil, fmt.Errorf("duplicate cdcCluster index (%d)", clusterIndex) } - // read weights to retrieve node IDs of cdcCluster members - weightsByNodeID, ok := cdcCluster.Fields[1].(cadence.Dictionary) - if !ok { - return nil, invalidCadenceTypeError( - "clusterWeights", - cdcCluster.Fields[1], - cadence.Dictionary{}, - ) + weightsByNodeID, err := getField[cadence.Dictionary](fields, "nodeWeights") + if err != nil { + return nil, fmt.Errorf("failed to decode cluster struct: %w", err) } + // read weights to retrieve node IDs of cdcCluster members + clusterMembers := make(flow.IdentifierList, 0, len(weightsByNodeID.Pairs)) for _, pair := range weightsByNodeID.Pairs { - nodeIDString, ok := pair.Key.(cadence.String) if !ok { return nil, invalidCadenceTypeError( @@ -298,26 +684,24 @@ func convertClusterAssignments(cdcClusters []cadence.Value) (flow.AssignmentList err, ) } - - identifierLists[clusterIndex] = append(identifierLists[clusterIndex], nodeID) + clusterMembers = append(clusterMembers, nodeID) } - } - // sort identifier lists in Canonical order - assignments := assignment.FromIdentifierLists(identifierLists) + // IMPORTANT: for each cluster, node IDs must be in *canonical order* + clusterAssignments[clusterIndex] = clusterMembers.Sort(flow.IdentifierCanonical) + } - return assignments, nil + return clusterAssignments, nil } // convertParticipants converts the network participants specified in the // EpochSetup event into an IdentityList. -func convertParticipants(cdcParticipants []cadence.Value) (flow.IdentityList, error) { - - participants := make(flow.IdentityList, 0, len(cdcParticipants)) - var err error +// CONVENTION: returned IdentityList is in CANONICAL ORDER +func convertParticipants(cdcParticipants []cadence.Value) (flow.IdentitySkeletonList, error) { + participants := make(flow.IdentitySkeletonList, 0, len(cdcParticipants)) for _, value := range cdcParticipants { - + // checking compliance with expected format cdcNodeInfoStruct, ok := value.(cadence.Struct) if !ok { return nil, invalidCadenceTypeError( @@ -326,75 +710,68 @@ func convertParticipants(cdcParticipants []cadence.Value) (flow.IdentityList, er cadence.Struct{}, ) } - cdcNodeInfoFields := cdcNodeInfoStruct.Fields - expectedFields := 14 - if len(cdcNodeInfoFields) < expectedFields { + if cdcNodeInfoStruct.Type() == nil { + return nil, fmt.Errorf("nodeInfo struct doesn't have type") + } + + fields := cadence.FieldsMappedByName(cdcNodeInfoStruct) + + const expectedFieldCount = 14 + if len(fields) < expectedFieldCount { return nil, fmt.Errorf( "insufficient fields (%d < %d)", - len(cdcNodeInfoFields), - expectedFields, + len(fields), + expectedFieldCount, ) } - // create and assign fields to identity from cadence Struct - identity := new(flow.Identity) - role, ok := cdcNodeInfoFields[1].(cadence.UInt8) - if !ok { - return nil, invalidCadenceTypeError( - "nodeInfo.role", - cdcNodeInfoFields[1], - cadence.UInt8(0), - ) + nodeIDHex, err := getField[cadence.String](fields, "id") + if err != nil { + return nil, fmt.Errorf("failed to decode nodeInfo struct: %w", err) + } + + role, err := getField[cadence.UInt8](fields, "role") + if err != nil { + return nil, fmt.Errorf("failed to decode nodeInfo struct: %w", err) + } + if !flow.Role(role).Valid() { + return nil, fmt.Errorf("invalid role %d", role) + } + + address, err := getField[cadence.String](fields, "networkingAddress") + if err != nil { + return nil, fmt.Errorf("failed to decode nodeInfo struct: %w", err) + } + + networkKeyHex, err := getField[cadence.String](fields, "networkingKey") + if err != nil { + return nil, fmt.Errorf("failed to decode nodeInfo struct: %w", err) } - identity.Role = flow.Role(role) - if !identity.Role.Valid() { - return nil, fmt.Errorf("invalid role %d", role) + + stakingKeyHex, err := getField[cadence.String](fields, "stakingKey") + if err != nil { + return nil, fmt.Errorf("failed to decode nodeInfo struct: %w", err) } - address, ok := cdcNodeInfoFields[2].(cadence.String) - if !ok { - return nil, invalidCadenceTypeError( - "nodeInfo.address", - cdcNodeInfoFields[2], - cadence.String(""), - ) + initialWeight, err := getField[cadence.UInt64](fields, "initialWeight") + if err != nil { + return nil, fmt.Errorf("failed to decode nodeInfo struct: %w", err) } - identity.Address = string(address) - initialWeight, ok := cdcNodeInfoFields[13].(cadence.UInt64) - if !ok { - return nil, invalidCadenceTypeError( - "nodeInfo.initialWeight", - cdcNodeInfoFields[13], - cadence.UInt64(0), - ) + identity := &flow.IdentitySkeleton{ + InitialWeight: uint64(initialWeight), + Address: string(address), + Role: flow.Role(role), } - identity.Weight = uint64(initialWeight) // convert nodeID string into identifier - nodeIDHex, ok := cdcNodeInfoFields[0].(cadence.String) - if !ok { - return nil, invalidCadenceTypeError( - "nodeInfo.id", - cdcNodeInfoFields[0], - cadence.String(""), - ) - } identity.NodeID, err = flow.HexStringToIdentifier(string(nodeIDHex)) if err != nil { return nil, fmt.Errorf("could not convert hex string to identifer: %w", err) } // parse to PublicKey the networking key hex string - networkKeyHex, ok := cdcNodeInfoFields[3].(cadence.String) - if !ok { - return nil, invalidCadenceTypeError( - "nodeInfo.networkKey", - cdcNodeInfoFields[3], - cadence.String(""), - ) - } networkKeyBytes, err := hex.DecodeString(string(networkKeyHex)) if err != nil { return nil, fmt.Errorf( @@ -411,14 +788,6 @@ func convertParticipants(cdcParticipants []cadence.Value) (flow.IdentityList, er } // parse to PublicKey the staking key hex string - stakingKeyHex, ok := cdcNodeInfoFields[4].(cadence.String) - if !ok { - return nil, invalidCadenceTypeError( - "nodeInfo.stakingKey", - cdcNodeInfoFields[4], - cadence.String(""), - ) - } stakingKeyBytes, err := hex.DecodeString(string(stakingKeyHex)) if err != nil { return nil, fmt.Errorf( @@ -437,10 +806,109 @@ func convertParticipants(cdcParticipants []cadence.Value) (flow.IdentityList, er participants = append(participants, identity) } - participants = participants.Sort(order.Canonical) + // IMPORTANT: returned identities must be in *canonical order* + participants = participants.Sort(flow.Canonical[flow.IdentitySkeleton]) return participants, nil } +// convertClusterQCVoteData converts cluster QC vote data from the EpochRecover event +// to a representation suitable for inclusion in the protocol state. Votes are +// aggregated as part of this conversion. +// TODO(efm-recovery): update this function for new QCVoteData structure (see https://github.com/onflow/flow-go/pull/5943#discussion_r1605267444) +func convertClusterQCVoteData(cdcClusterQCVoteData []cadence.Value) ([]flow.ClusterQCVoteData, error) { + qcVoteDatas := make([]flow.ClusterQCVoteData, 0, len(cdcClusterQCVoteData)) + + // CAUTION: Votes are not validated prior to aggregation. This means a single + // invalid vote submission will result in a fully invalid QC for that cluster. + // Votes must be validated by the ClusterQC smart contract. + + for _, cdcClusterQC := range cdcClusterQCVoteData { + cdcClusterQCStruct, ok := cdcClusterQC.(cadence.Struct) + if !ok { + return nil, invalidCadenceTypeError( + "clusterQC", + cdcClusterQC, + cadence.Struct{}, + ) + } + + if cdcClusterQCStruct.Type() == nil { + return nil, fmt.Errorf("clusterQCVoteData struct doesn't have type") + } + + fields := cadence.FieldsMappedByName(cdcClusterQCStruct) + + const expectedFieldCount = 2 + if len(fields) < expectedFieldCount { + return nil, fmt.Errorf( + "insufficient fields (%d < %d)", + len(fields), + expectedFieldCount, + ) + } + + cdcVoterIDs, err := getField[cadence.Array](fields, "voterIDs") + if err != nil { + return nil, fmt.Errorf("failed to decode clusterQCVoteData struct: %w", err) + } + + voterIDs := make([]flow.Identifier, 0, len(cdcVoterIDs.Values)) + for _, cdcVoterID := range cdcVoterIDs.Values { + voterIDHex, ok := cdcVoterID.(cadence.String) + if !ok { + return nil, invalidCadenceTypeError( + "clusterQC[i].voterID", + cdcVoterID, + cadence.String(""), + ) + } + voterID, err := flow.HexStringToIdentifier(string(voterIDHex)) + if err != nil { + return nil, fmt.Errorf("could not convert voter ID from hex: %w", err) + } + voterIDs = append(voterIDs, voterID) + } + + cdcAggSignature, err := getField[cadence.String](fields, "aggregatedSignature") + if err != nil { + return nil, fmt.Errorf("failed to decode clusterQCVoteData struct: %w", err) + } + + aggregatedSignature, err := hex.DecodeString(string(cdcAggSignature)) + if err != nil { + return nil, fmt.Errorf("could not convert raw vote from hex: %w", err) + } + + // check that aggregated signature is not identity, because an identity signature + // is invalid if verified under an identity public key. This can happen in two cases: + // - If the quorum has at least one honest signer, and given all staking key proofs of possession + // are valid, it's extremely unlikely for the aggregated public key (and the corresponding + // aggregated signature) to be identity. + // - If all quorum is malicious and intentionally forge an identity aggregate. As of the previous point, + // this is only possible if there is no honest collector involved in constructing the cluster QC. + // Hence, the cluster would need to contain a supermajority of malicious collectors. + // As we are assuming that the fraction of malicious collectors overall does not exceed 1/3 (measured + // by stake), the probability for randomly assigning 2/3 or more byzantine collectors to a single cluster + // vanishes (provided a sufficiently high collector count in total). + // + // Note that at this level, all individual signatures are guaranteed to be valid + // w.r.t their corresponding staking public key. It is therefore enough to check + // the aggregated signature to conclude whether the aggregated public key is identity. + // This check is therefore a sanity check to catch a potential issue early. + if crypto.IsBLSSignatureIdentity(aggregatedSignature) { + return nil, fmt.Errorf("cluster qc vote aggregation failed because resulting BLS signature is identity") + } + + // set the fields on the QC vote data object + qcVoteDatas = append(qcVoteDatas, flow.ClusterQCVoteData{ + SigData: aggregatedSignature, + VoterIDs: voterIDs, + }) + } + + return qcVoteDatas, nil +} + // convertClusterQCVotes converts raw cluster QC votes from the EpochCommit event // to a representation suitable for inclusion in the protocol state. Votes are // aggregated as part of this conversion. @@ -466,25 +934,37 @@ func convertClusterQCVotes(cdcClusterQCs []cadence.Value) ( cadence.Struct{}, ) } - cdcClusterQCFields := cdcClusterQCStruct.Fields - expectedFields := 4 - if len(cdcClusterQCFields) < expectedFields { + if cdcClusterQCStruct.Type() == nil { + return nil, fmt.Errorf("clusterQC struct doesn't have type") + } + + fields := cadence.FieldsMappedByName(cdcClusterQCStruct) + + const expectedFieldCount = 4 + if len(fields) < expectedFieldCount { return nil, fmt.Errorf( "insufficient fields (%d < %d)", - len(cdcClusterQCFields), - expectedFields, + len(fields), + expectedFieldCount, ) } - index, ok := cdcClusterQCFields[0].(cadence.UInt16) - if !ok { - return nil, invalidCadenceTypeError( - "clusterQC.index", - cdcClusterQCFields[0], - cadence.UInt16(0), - ) + index, err := getField[cadence.UInt16](fields, "index") + if err != nil { + return nil, fmt.Errorf("failed to decode clusterQC struct: %w", err) + } + + cdcRawVotes, err := getField[cadence.Array](fields, "voteSignatures") + if err != nil { + return nil, fmt.Errorf("failed to decode clusterQC struct: %w", err) + } + + cdcVoterIDs, err := getField[cadence.Array](fields, "voterIDs") + if err != nil { + return nil, fmt.Errorf("failed to decode clusterQC struct: %w", err) } + if int(index) >= len(cdcClusterQCs) { return nil, fmt.Errorf( "invalid index (%d) not in range [0,%d]", @@ -497,15 +977,6 @@ func convertClusterQCVotes(cdcClusterQCs []cadence.Value) ( return nil, fmt.Errorf("duplicate cluster QC index (%d)", index) } - cdcVoterIDs, ok := cdcClusterQCFields[3].(cadence.Array) - if !ok { - return nil, invalidCadenceTypeError( - "clusterQC.voterIDs", - cdcClusterQCFields[2], - cadence.Array{}, - ) - } - voterIDs := make([]flow.Identifier, 0, len(cdcVoterIDs.Values)) for _, cdcVoterID := range cdcVoterIDs.Values { voterIDHex, ok := cdcVoterID.(cadence.String) @@ -524,7 +995,6 @@ func convertClusterQCVotes(cdcClusterQCs []cadence.Value) ( } // gather all the vote signatures - cdcRawVotes := cdcClusterQCFields[1].(cadence.Array) signatures := make([]crypto.Signature, 0, len(cdcRawVotes.Values)) for _, cdcRawVote := range cdcRawVotes.Values { rawVoteHex, ok := cdcRawVote.(cadence.String) @@ -582,101 +1052,176 @@ func convertClusterQCVotes(cdcClusterQCs []cadence.Value) ( return qcVoteDatas, nil } -// convertDKGKeys converts hex-encoded DKG public keys as received by the DKG +// convertDKGKeys converts hex-encoded public beacon keys as received by the DKG // smart contract into crypto.PublicKey representations suitable for inclusion // in the protocol state. -func convertDKGKeys(cdcDKGKeys []cadence.Value) ( - groupKey crypto.PublicKey, - participantKeys []crypto.PublicKey, - err error, -) { - - hexDKGKeys := make([]string, 0, len(cdcDKGKeys)) +func convertDKGKeys(cdcDKGKeys []cadence.Value) ([]crypto.PublicKey, error) { + convertedKeys := make([]crypto.PublicKey, 0, len(cdcDKGKeys)) for _, value := range cdcDKGKeys { - keyHex, ok := value.(cadence.String) - if !ok { - return nil, nil, invalidCadenceTypeError("dkgKey", value, cadence.String("")) + pubKey, err := convertDKGKey(value) + if err != nil { + return nil, fmt.Errorf("could not decode public beacon key share: %w", err) } - hexDKGKeys = append(hexDKGKeys, string(keyHex)) + convertedKeys = append(convertedKeys, pubKey) } + return convertedKeys, nil +} - // pop first element - group public key hex string - groupPubKeyHex := hexDKGKeys[0] - hexDKGKeys = hexDKGKeys[1:] +// convertDKGKey converts a single hex-encoded public beacon keys as received by the DKG +// smart contract into crypto.PublicKey representations suitable for inclusion +// in the protocol state. +func convertDKGKey(cdcDKGKeys cadence.Value) (crypto.PublicKey, error) { + // extract string representation from Cadence Value + keyHex, ok := cdcDKGKeys.(cadence.String) + if !ok { + return nil, invalidCadenceTypeError("dkgKey", cdcDKGKeys, cadence.String("")) + } - // decode group public key - groupKeyBytes, err := hex.DecodeString(groupPubKeyHex) + // decode individual public keys + pubKeyBytes, err := hex.DecodeString(string(keyHex)) if err != nil { - return nil, nil, fmt.Errorf( - "could not decode group public key into bytes: %w", - err, - ) + return nil, fmt.Errorf("converting hex to bytes failed: %w", err) } - groupKey, err = crypto.DecodePublicKey(crypto.BLSBLS12381, groupKeyBytes) + pubKey, err := crypto.DecodePublicKey(crypto.BLSBLS12381, pubKeyBytes) if err != nil { - return nil, nil, fmt.Errorf("could not decode group public key: %w", err) - } - - // decode individual public keys - dkgParticipantKeys := make([]crypto.PublicKey, 0, len(hexDKGKeys)) - for _, pubKeyString := range hexDKGKeys { - - pubKeyBytes, err := hex.DecodeString(pubKeyString) - if err != nil { - return nil, nil, fmt.Errorf( - "could not decode individual public key into bytes: %w", - err, - ) - } - pubKey, err := crypto.DecodePublicKey(crypto.BLSBLS12381, pubKeyBytes) - if err != nil { - return nil, nil, fmt.Errorf("could not decode dkg public key: %w", err) - } - dkgParticipantKeys = append(dkgParticipantKeys, pubKey) + return nil, fmt.Errorf("could not decode bytes into a public key: %w", err) } - - return groupKey, dkgParticipantKeys, nil + return pubKey, nil } func invalidCadenceTypeError( fieldName string, actualType, expectedType cadence.Value, ) error { + // NOTE: This error is reported if the Go-types are different (not if Cadence types are different). + // Therefore, print the Go-type instead of cadence type. + // Cadence type can be `nil`, since the `expectedType` is always the zero-value of the Go type. return fmt.Errorf( - "invalid Cadence type for field %s (got=%s, expected=%s)", + "invalid Cadence type for field %s (got=%T, expected=%T)", fieldName, - actualType.Type().ID(), - expectedType.Type().ID(), + actualType, + expectedType, ) } +// convertServiceEventProtocolStateVersionUpgrade converts a Cadence instance of the VersionBeacon +// service event to the protocol-internal representation. +// CAUTION: This function must only be used for input events computed locally, by an +// Execution or Verification Node; it is not resilient to malicious inputs. +// No errors are expected during normal operation. +func convertServiceEventProtocolStateVersionUpgrade(event flow.Event) (*flow.ServiceEvent, error) { + payload, err := ccf.Decode(nil, event.Payload) + if err != nil { + return nil, fmt.Errorf("could not unmarshal event payload: %w", err) + } + + versionUpgrade, err := DecodeCadenceValue("ProtocolStateVersionUpgrade payload", payload, + func(cdcEvent cadence.Event) (*flow.ProtocolStateVersionUpgrade, error) { + + if cdcEvent.Type() == nil { + return nil, fmt.Errorf("ProtocolStateVersionUpgrade event doesn't have type") + } + + fields := cadence.FieldsMappedByName(cdcEvent) + + const expectedFieldCount = 2 + if len(fields) < expectedFieldCount { + return nil, fmt.Errorf("unexpected number of fields in ProtocolStateVersionUpgrade (%d < %d)", + len(fields), expectedFieldCount) + } + + newProtocolVersionValue, err := getField[cadence.Value](fields, "newProtocolVersion") + if err != nil { + return nil, fmt.Errorf("failed to decode VersionBeacon event: %w", err) + } + + activeViewValue, err := getField[cadence.Value](fields, "activeView") + if err != nil { + return nil, fmt.Errorf("failed to decode VersionBeacon event: %w", err) + } + + newProtocolVersion, err := DecodeCadenceValue( + ".newProtocolVersion", newProtocolVersionValue, func(cadenceVal cadence.UInt64) (uint64, error) { + return uint64(cadenceVal), err + }, + ) + if err != nil { + return nil, err + } + activeView, err := DecodeCadenceValue( + ".activeView", activeViewValue, func(cadenceVal cadence.UInt64) (uint64, error) { + return uint64(cadenceVal), err + }, + ) + if err != nil { + return nil, err + } + + return &flow.ProtocolStateVersionUpgrade{ + NewProtocolStateVersion: newProtocolVersion, + ActiveView: activeView, + }, nil + }) + if err != nil { + return nil, fmt.Errorf("could not decode cadence value: %w", err) + } + + // create the service event + serviceEvent := &flow.ServiceEvent{ + Type: flow.ServiceEventProtocolStateVersionUpgrade, + Event: versionUpgrade, + } + return serviceEvent, nil +} + +// convertServiceEventVersionBeacon converts a Cadence instance of the VersionBeacon +// service event to the protocol-internal representation. +// CAUTION: This function must only be used for input events computed locally, by an +// Execution or Verification Node; it is not resilient to malicious inputs. +// No errors are expected during normal operation. func convertServiceEventVersionBeacon(event flow.Event) (*flow.ServiceEvent, error) { - payload, err := json.Decode(nil, event.Payload) + payload, err := ccf.Decode(nil, event.Payload) if err != nil { return nil, fmt.Errorf("could not unmarshal event payload: %w", err) } versionBeacon, err := DecodeCadenceValue( - "VersionBeacon payload", payload, func(event cadence.Event) ( - flow.VersionBeacon, - error, - ) { - if len(event.Fields) != 2 { - return flow.VersionBeacon{}, fmt.Errorf( - "incorrect number of fields (%d != 2)", - len(event.Fields), + "VersionBeacon payload", payload, func(cdcEvent cadence.Event) (*flow.VersionBeacon, error) { + + if cdcEvent.Type() == nil { + return nil, fmt.Errorf("VersionBeacon event doesn't have type") + } + + fields := cadence.FieldsMappedByName(cdcEvent) + + const expectedFieldCount = 2 + if len(fields) != expectedFieldCount { + return nil, fmt.Errorf( + "unexpected number of fields in VersionBeacon event (%d != %d)", + len(fields), + expectedFieldCount, ) } + versionBoundariesValue, err := getField[cadence.Value](fields, "versionBoundaries") + if err != nil { + return nil, fmt.Errorf("failed to decode VersionBeacon event: %w", err) + } + + sequenceValue, err := getField[cadence.Value](fields, "sequence") + if err != nil { + return nil, fmt.Errorf("failed to decode VersionBeacon event: %w", err) + } + versionBoundaries, err := DecodeCadenceValue( - ".Fields[0]", event.Fields[0], convertVersionBoundaries, + ".versionBoundaries", versionBoundariesValue, convertVersionBoundaries, ) if err != nil { - return flow.VersionBeacon{}, err + return nil, err } sequence, err := DecodeCadenceValue( - ".Fields[1]", event.Fields[1], func(cadenceVal cadence.UInt64) ( + ".sequence", sequenceValue, func(cadenceVal cadence.UInt64) ( uint64, error, ) { @@ -684,10 +1229,10 @@ func convertServiceEventVersionBeacon(event flow.Event) (*flow.ServiceEvent, err }, ) if err != nil { - return flow.VersionBeacon{}, err + return nil, err } - return flow.VersionBeacon{ + return &flow.VersionBeacon{ VersionBoundaries: versionBoundaries, Sequence: sequence, }, err @@ -697,10 +1242,15 @@ func convertServiceEventVersionBeacon(event flow.Event) (*flow.ServiceEvent, err return nil, err } + // a converted version beacon event should also be valid + if err := versionBeacon.Validate(); err != nil { + return nil, fmt.Errorf("invalid VersionBeacon event: %w", err) + } + // create the service event serviceEvent := &flow.ServiceEvent{ Type: flow.ServiceEventVersionBeacon, - Event: &versionBeacon, + Event: versionBeacon, } return serviceEvent, nil @@ -713,51 +1263,12 @@ func convertVersionBoundaries(array cadence.Array) ( boundaries := make([]flow.VersionBoundary, len(array.Values)) for i, cadenceVal := range array.Values { - boundary, err := DecodeCadenceValue( - fmt.Sprintf(".Values[%d]", i), - cadenceVal, - func(structVal cadence.Struct) ( - flow.VersionBoundary, - error, - ) { - if len(structVal.Fields) < 2 { - return flow.VersionBoundary{}, fmt.Errorf( - "incorrect number of fields (%d != 2)", - len(structVal.Fields), - ) - } - - height, err := DecodeCadenceValue( - ".Fields[0]", - structVal.Fields[0], - func(cadenceVal cadence.UInt64) ( - uint64, - error, - ) { - return uint64(cadenceVal), nil - }, - ) - if err != nil { - return flow.VersionBoundary{}, err - } - - version, err := DecodeCadenceValue( - ".Fields[1]", - structVal.Fields[1], - convertSemverVersion, - ) - if err != nil { - return flow.VersionBoundary{}, err - } - - return flow.VersionBoundary{ - BlockHeight: height, - Version: version, - }, nil - }, - ) + boundary, err := VersionBoundary(cadenceVal) if err != nil { - return nil, err + return nil, decodeError{ + location: fmt.Sprintf(".Values[%d]", i), + err: err, + } } boundaries[i] = boundary } @@ -765,20 +1276,117 @@ func convertVersionBoundaries(array cadence.Array) ( return boundaries, nil } +// VersionBoundary decodes a single version boundary from the given Cadence value. +func VersionBoundary(value cadence.Value) ( + flow.VersionBoundary, + error, +) { + boundary, err := DecodeCadenceValue( + "VersionBoundary", + value, + func(structVal cadence.Struct) ( + flow.VersionBoundary, + error, + ) { + if structVal.Type() == nil { + return flow.VersionBoundary{}, fmt.Errorf("VersionBoundary struct doesn't have type") + } + + fields := cadence.FieldsMappedByName(structVal) + + const expectedFieldCount = 2 + if len(fields) < expectedFieldCount { + return flow.VersionBoundary{}, fmt.Errorf( + "incorrect number of fields (%d != %d)", + len(fields), + expectedFieldCount, + ) + } + + blockHeightValue, err := getField[cadence.Value](fields, "blockHeight") + if err != nil { + return flow.VersionBoundary{}, fmt.Errorf("failed to decode VersionBoundary struct: %w", err) + } + + versionValue, err := getField[cadence.Value](fields, "version") + if err != nil { + return flow.VersionBoundary{}, fmt.Errorf("failed to decode VersionBoundary struct: %w", err) + } + + height, err := DecodeCadenceValue( + ".blockHeight", + blockHeightValue, + func(cadenceVal cadence.UInt64) ( + uint64, + error, + ) { + return uint64(cadenceVal), nil + }, + ) + if err != nil { + return flow.VersionBoundary{}, err + } + + version, err := DecodeCadenceValue( + ".version", + versionValue, + convertSemverVersion, + ) + if err != nil { + return flow.VersionBoundary{}, err + } + + return flow.VersionBoundary{ + BlockHeight: height, + Version: version, + }, nil + }, + ) + return boundary, err +} + func convertSemverVersion(structVal cadence.Struct) ( string, error, ) { - if len(structVal.Fields) < 4 { + if structVal.Type() == nil { + return "", fmt.Errorf("Semver struct doesn't have type") + } + + fields := cadence.FieldsMappedByName(structVal) + + const expectedFieldCount = 4 + if len(fields) < expectedFieldCount { return "", fmt.Errorf( - "incorrect number of fields (%d != 4)", - len(structVal.Fields), + "incorrect number of fields (%d != %d)", + len(fields), + expectedFieldCount, ) } + majorValue, err := getField[cadence.Value](fields, "major") + if err != nil { + return "", fmt.Errorf("failed to decode SemVer struct: %w", err) + } + + minorValue, err := getField[cadence.Value](fields, "minor") + if err != nil { + return "", fmt.Errorf("failed to decode SemVer struct: %w", err) + } + + patchValue, err := getField[cadence.Value](fields, "patch") + if err != nil { + return "", fmt.Errorf("failed to decode SemVer struct: %w", err) + } + + preReleaseValue, err := getField[cadence.Value](fields, "preRelease") + if err != nil { + return "", fmt.Errorf("failed to decode SemVer struct: %w", err) + } + major, err := DecodeCadenceValue( - ".Fields[0]", - structVal.Fields[0], + ".major", + majorValue, func(cadenceVal cadence.UInt8) ( uint64, error, @@ -789,9 +1397,10 @@ func convertSemverVersion(structVal cadence.Struct) ( if err != nil { return "", err } + minor, err := DecodeCadenceValue( - ".Fields[1]", - structVal.Fields[1], + ".minor", + minorValue, func(cadenceVal cadence.UInt8) ( uint64, error, @@ -802,9 +1411,10 @@ func convertSemverVersion(structVal cadence.Struct) ( if err != nil { return "", err } + patch, err := DecodeCadenceValue( - ".Fields[2]", - structVal.Fields[2], + ".patch", + patchValue, func(cadenceVal cadence.UInt8) ( uint64, error, @@ -815,9 +1425,10 @@ func convertSemverVersion(structVal cadence.Struct) ( if err != nil { return "", err } + preRelease, err := DecodeCadenceValue( - ".Fields[3]", - structVal.Fields[3], + ".preRelease", + preReleaseValue, func(cadenceVal cadence.Optional) ( string, error, @@ -842,22 +1453,14 @@ func convertSemverVersion(structVal cadence.Struct) ( return "", err } - version := fmt.Sprintf( - "%d.%d.%d%s", - major, - minor, - patch, - preRelease, - ) - _, err = semver.NewVersion(version) - if err != nil { - return "", fmt.Errorf( - "invalid semver %s: %w", - version, - err, - ) + version := semver.Version{ + Major: int64(major), + Minor: int64(minor), + Patch: int64(patch), + PreRelease: semver.PreRelease(preRelease), } - return version, nil + + return version.String(), nil } diff --git a/model/convert/service_event_test.go b/model/convert/service_event_test.go index 6652f3e3b8e..9f307609a73 100644 --- a/model/convert/service_event_test.go +++ b/model/convert/service_event_test.go @@ -4,11 +4,12 @@ import ( "fmt" "testing" + "github.com/onflow/cadence" + "github.com/onflow/cadence/encoding/ccf" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "github.com/onflow/cadence" - + "github.com/onflow/flow-go/fvm/systemcontracts" "github.com/onflow/flow-go/model/convert" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/utils/unittest" @@ -18,60 +19,132 @@ func TestEventConversion(t *testing.T) { chainID := flow.Emulator - t.Run( - "epoch setup", func(t *testing.T) { + t.Run("epoch setup", func(t *testing.T) { + fixture, expected := unittest.EpochSetupFixtureByChainID(chainID) - fixture, expected := unittest.EpochSetupFixtureByChainID(chainID) + // convert Cadence types to Go types + event, err := convert.ServiceEvent(chainID, fixture) + require.NoError(t, err) + require.NotNil(t, event) - // convert Cadence types to Go types - event, err := convert.ServiceEvent(chainID, fixture) - require.NoError(t, err) - require.NotNil(t, event) + // cast event type to epoch setup + actual, ok := event.Event.(*flow.EpochSetup) + require.True(t, ok) - // cast event type to epoch setup - actual, ok := event.Event.(*flow.EpochSetup) - require.True(t, ok) + assert.Equal(t, expected, actual) - assert.Equal(t, expected, actual) + }, + ) - }, + t.Run("epoch setup with random source with leading zeroes", func(t *testing.T) { + + fixture, _ := unittest.EpochSetupFixtureByChainID(chainID) + // all zero source to cover all cases of endiannesses + randomSource := make([]byte, flow.EpochSetupRandomSourceLength) + // update the random source in event fixture + fixture.Payload = unittest.EpochSetupFixtureCCF(randomSource) + + // convert Cadence types to Go types + event, err := convert.ServiceEvent(chainID, fixture) + require.NoError(t, err) + require.NotNil(t, event) + + // cast event type to epoch setup + _, ok := event.Event.(*flow.EpochSetup) + require.True(t, ok) + }, ) - t.Run( - "epoch commit", func(t *testing.T) { + t.Run("epoch setup with short random source", func(t *testing.T) { - fixture, expected := unittest.EpochCommitFixtureByChainID(chainID) + fixture, _ := unittest.EpochSetupFixtureByChainID(chainID) + // update the random source in event fixture + randomSource := unittest.EpochSetupRandomSourceFixture() + fixture.Payload = unittest.EpochSetupFixtureCCF(randomSource[:flow.EpochSetupRandomSourceLength-1]) - // convert Cadence types to Go types - event, err := convert.ServiceEvent(chainID, fixture) - require.NoError(t, err) - require.NotNil(t, event) + // convert Cadence types to Go types + event, err := convert.ServiceEvent(chainID, fixture) + require.Error(t, err) + require.Nil(t, event) + }, + ) - // cast event type to epoch commit - actual, ok := event.Event.(*flow.EpochCommit) - require.True(t, ok) + t.Run("epoch setup with non-hex random source", func(t *testing.T) { - assert.Equal(t, expected, actual) - }, + fixture, _ := unittest.EpochSetupFixtureByChainID(chainID) + // update the random source in event fixture + fixture.Payload = unittest.EpochSetupCCFWithNonHexRandomSource() + + // convert Cadence types to Go types + event, err := convert.ServiceEvent(chainID, fixture) + require.Error(t, err) + require.Nil(t, event) + }, ) - t.Run( - "version beacon", func(t *testing.T) { + t.Run("epoch commit", func(t *testing.T) { - fixture, expected := unittest.VersionBeaconFixtureByChainID(chainID) + fixture, expected := unittest.EpochCommitFixtureByChainID(chainID) - // convert Cadence types to Go types - event, err := convert.ServiceEvent(chainID, fixture) - require.NoError(t, err) - require.NotNil(t, event) + // convert Cadence types to Go types + event, err := convert.ServiceEvent(chainID, fixture) + require.NoError(t, err) + require.NotNil(t, event) - // cast event type to version beacon - actual, ok := event.Event.(*flow.VersionBeacon) - require.True(t, ok) + // cast event type to epoch commit + actual, ok := event.Event.(*flow.EpochCommit) + require.True(t, ok) - assert.Equal(t, expected, actual) - }, + assert.Equal(t, expected, actual) + }, + ) + + t.Run("epoch recover", func(t *testing.T) { + fixture, expected := unittest.EpochRecoverFixtureByChainID(chainID) + + // convert Cadence types to Go types + event, err := convert.ServiceEvent(chainID, fixture) + require.NoError(t, err) + require.NotNil(t, event) + + // cast event type to epoch recover + actual, ok := event.Event.(*flow.EpochRecover) + require.True(t, ok) + + assert.Equal(t, expected, actual) + }) + + t.Run("version beacon", func(t *testing.T) { + + fixture, expected := unittest.VersionBeaconFixtureByChainID(chainID) + + // convert Cadence types to Go types + event, err := convert.ServiceEvent(chainID, fixture) + require.NoError(t, err) + require.NotNil(t, event) + + // cast event type to version beacon + actual, ok := event.Event.(*flow.VersionBeacon) + require.True(t, ok) + + assert.Equal(t, expected, actual) + }, ) + + t.Run("protocol state version upgrade", func(t *testing.T) { + fixture, expected := unittest.ProtocolStateVersionUpgradeFixtureByChainID(chainID) + + // convert Cadence types to Go types + event, err := convert.ServiceEvent(chainID, fixture) + require.NoError(t, err) + require.NotNil(t, event) + + // cast event type to version upgrade + actual, ok := event.Event.(*flow.ProtocolStateVersionUpgrade) + require.True(t, ok) + + assert.Equal(t, expected, actual) + }) } func TestDecodeCadenceValue(t *testing.T) { @@ -143,24 +216,196 @@ func TestDecodeCadenceValue(t *testing.T) { } for _, tt := range tests { - t.Run( - tt.name, func(t *testing.T) { - result, err := convert.DecodeCadenceValue( - tt.location, - tt.value, - tt.decodeInner, - ) + t.Run(tt.name, func(t *testing.T) { + result, err := convert.DecodeCadenceValue( + tt.location, + tt.value, + tt.decodeInner, + ) - if tt.expectError { - assert.Error(t, err) - if tt.expectedLocation != "" { - assert.Contains(t, err.Error(), tt.expectedLocation) - } - } else { - assert.NoError(t, err) - assert.Equal(t, tt.expected, result) + if tt.expectError { + assert.Error(t, err) + if tt.expectedLocation != "" { + assert.Contains(t, err.Error(), tt.expectedLocation) } - }, + } else { + assert.NoError(t, err) + assert.Equal(t, tt.expected, result) + } + }, ) } } + +func TestVersionBeaconEventConversion(t *testing.T) { + versionBoundaryType := unittest.NewNodeVersionBeaconVersionBoundaryStructType() + semverType := unittest.NewNodeVersionBeaconSemverStructType() + eventType := unittest.NewNodeVersionBeaconVersionBeaconEventType() + + type vbTestCase struct { + name string + event cadence.Event + converted *flow.VersionBeacon + expectAndHandleError func(t *testing.T, err error) + } + + runVersionBeaconTestCase := func(t *testing.T, test vbTestCase) { + chainID := flow.Emulator + t.Run(test.name, func(t *testing.T) { + events := systemcontracts.ServiceEventsForChain(chainID) + + payload, err := ccf.Encode(test.event) + require.NoError(t, err) + event := unittest.EventFixture( + unittest.Event.WithEventType(events.VersionBeacon.EventType()), + unittest.Event.WithPayload(payload), + ) + + // convert Cadence types to Go types + serviceEvent, err := convert.ServiceEvent(chainID, event) + + if test.expectAndHandleError != nil { + require.Error(t, err) + test.expectAndHandleError(t, err) + return + } + + require.NoError(t, err) + require.NotNil(t, event) + + // cast event type to version beacon + actual, ok := serviceEvent.Event.(*flow.VersionBeacon) + require.True(t, ok) + + require.Equal(t, test.converted, actual) + }) + } + + runVersionBeaconTestCase(t, + vbTestCase{ + name: "with pre-release", + event: cadence.NewEvent( + []cadence.Value{ + // versionBoundaries + cadence.NewArray( + []cadence.Value{ + cadence.NewStruct( + []cadence.Value{ + // blockHeight + cadence.UInt64(44), + // version + cadence.NewStruct( + []cadence.Value{ + // major + cadence.UInt8(2), + // minor + cadence.UInt8(13), + // patch + cadence.UInt8(7), + // preRelease + cadence.NewOptional(cadence.String("test")), + }, + ).WithType(semverType), + }, + ).WithType(versionBoundaryType), + }, + ).WithType(cadence.NewVariableSizedArrayType(versionBoundaryType)), + // sequence + cadence.UInt64(5), + }, + ).WithType(eventType), + converted: &flow.VersionBeacon{ + VersionBoundaries: []flow.VersionBoundary{ + { + BlockHeight: 44, + Version: "2.13.7-test", + }, + }, + Sequence: 5, + }, + }, + ) + + runVersionBeaconTestCase(t, + vbTestCase{ + name: "without pre-release", + event: cadence.NewEvent( + []cadence.Value{ + // versionBoundaries + cadence.NewArray( + []cadence.Value{ + cadence.NewStruct( + []cadence.Value{ + // blockHeight + cadence.UInt64(44), + // version + cadence.NewStruct( + []cadence.Value{ + // major + cadence.UInt8(2), + // minor + cadence.UInt8(13), + // patch + cadence.UInt8(7), + // preRelease + cadence.NewOptional(nil), + }, + ).WithType(semverType), + }, + ).WithType(versionBoundaryType), + }, + ).WithType(cadence.NewVariableSizedArrayType(versionBoundaryType)), + // sequence + cadence.UInt64(5), + }, + ).WithType(eventType), + converted: &flow.VersionBeacon{ + VersionBoundaries: []flow.VersionBoundary{ + { + BlockHeight: 44, + Version: "2.13.7", + }, + }, + Sequence: 5, + }, + }, + ) + runVersionBeaconTestCase(t, + vbTestCase{ + name: "invalid pre-release", + event: cadence.NewEvent( + []cadence.Value{ + // versionBoundaries + cadence.NewArray( + []cadence.Value{ + cadence.NewStruct( + []cadence.Value{ + // blockHeight + cadence.UInt64(44), + // version + cadence.NewStruct( + []cadence.Value{ + // major + cadence.UInt8(2), + // minor + cadence.UInt8(13), + // patch + cadence.UInt8(7), + // preRelease + cadence.NewOptional(cadence.String("/slashes.not.allowed")), + }, + ).WithType(semverType), + }, + ).WithType(versionBoundaryType), + }, + ).WithType(cadence.NewVariableSizedArrayType(versionBoundaryType)), + // sequence + cadence.UInt64(5), + }, + ).WithType(eventType), + expectAndHandleError: func(t *testing.T, err error) { + require.ErrorContains(t, err, "failed to validate pre-release") + }, + }, + ) +} diff --git a/model/dkg/dkg.go b/model/dkg/dkg.go index 255815710dd..30bfb8cedfa 100644 --- a/model/dkg/dkg.go +++ b/model/dkg/dkg.go @@ -1,12 +1,13 @@ package dkg import ( - "github.com/onflow/flow-go/crypto" + "github.com/onflow/crypto" ) -// DKGData represents all the output data from the DKG process, including private information. -// It is used while running the DKG during bootstrapping. -type DKGData struct { +// ThresholdKeySet represents all the output data from the KG process needed for a threshold signature scheme that +// is used in random beacon protocol, including private information. +// Typically, the ThresholdKeySet is used with a trusted setup during bootstrapping. +type ThresholdKeySet struct { PrivKeyShares []crypto.PrivateKey PubGroupKey crypto.PublicKey PubKeyShares []crypto.PublicKey diff --git a/model/encodable/keys.go b/model/encodable/keys.go index 0049d4c24eb..0176abfbf9c 100644 --- a/model/encodable/keys.go +++ b/model/encodable/keys.go @@ -8,9 +8,9 @@ import ( "github.com/ethereum/go-ethereum/rlp" "github.com/fxamacker/cbor/v2" - "github.com/vmihailenco/msgpack" + "github.com/vmihailenco/msgpack/v4" - "github.com/onflow/flow-go/crypto" + "github.com/onflow/crypto" ) // ConsensusVoteSigLen is the length of a consensus vote as well as aggregated consensus votes. @@ -26,7 +26,7 @@ func toHex(bs []byte) string { func fromJSONHex(b []byte) ([]byte, error) { var x string if err := json.Unmarshal(b, &x); err != nil { - return nil, fmt.Errorf("could not unmarshal the key: %w", err) + return nil, fmt.Errorf("could not unmarshal the value: %w", err) } return hex.DecodeString(x) } @@ -34,7 +34,7 @@ func fromJSONHex(b []byte) ([]byte, error) { func fromMsgPackHex(b []byte) ([]byte, error) { var x string if err := msgpack.Unmarshal(b, &x); err != nil { - return nil, fmt.Errorf("could not unmarshal the key: %w", err) + return nil, fmt.Errorf("could not unmarshal the value: %w", err) } return hex.DecodeString(x) } @@ -42,7 +42,7 @@ func fromMsgPackHex(b []byte) ([]byte, error) { func fromCBORPackHex(b []byte) ([]byte, error) { var x string if err := cbor.Unmarshal(b, &x); err != nil { - return nil, fmt.Errorf("could not unmarshal the key: %w", err) + return nil, fmt.Errorf("could not unmarshal the value: %w", err) } return hex.DecodeString(x) } @@ -162,6 +162,14 @@ type RandomBeaconPubKey struct { crypto.PublicKey } +func WrapRandomBeaconPubKeys(keys []crypto.PublicKey) []RandomBeaconPubKey { + encodables := make([]RandomBeaconPubKey, len(keys)) + for i := range keys { + encodables[i] = RandomBeaconPubKey{PublicKey: keys[i]} + } + return encodables +} + func (pub RandomBeaconPubKey) MarshalJSON() ([]byte, error) { if pub.PublicKey == nil { return json.Marshal(nil) @@ -295,3 +303,21 @@ func (priv *MachineAccountPrivKey) UnmarshalJSON(b []byte) error { priv.PrivateKey, err = crypto.DecodePrivateKey(crypto.ECDSAP256, bz) return err } + +// StakingKeyPoP wraps a crypto signature and allows it to be JSON encoded and decoded. +type StakingKeyPoP struct { + crypto.Signature +} + +func (pub StakingKeyPoP) MarshalJSON() ([]byte, error) { + if pub.Signature == nil { + return json.Marshal(nil) + } + return json.Marshal(toHex(pub.Signature)) +} + +func (pub *StakingKeyPoP) UnmarshalJSON(b []byte) error { + var err error + pub.Signature, err = fromJSONHex(b) + return err +} diff --git a/model/encodable/keys_test.go b/model/encodable/keys_test.go index ccdf63cd044..48f816ad37a 100644 --- a/model/encodable/keys_test.go +++ b/model/encodable/keys_test.go @@ -9,7 +9,7 @@ import ( "github.com/stretchr/testify/require" - "github.com/onflow/flow-go/crypto" + "github.com/onflow/crypto" ) func isHexString(enc []byte) error { @@ -247,13 +247,31 @@ func TestEncodableRandomBeaconPrivKeyMsgPack(t *testing.T) { err = key.UnmarshalMsgpack(b) require.NoError(t, err) - require.Equal(t, oldPubKey, key.PublicKey) + require.True(t, oldPubKey.Equals(key.PublicKey)) } func generateRandomSeed(t *testing.T) []byte { seed := make([]byte, 48) n, err := rand.Read(seed) - require.Nil(t, err) + require.NoError(t, err) require.Equal(t, n, 48) return seed } + +func TestEncodableStakingKeyPoP(t *testing.T) { + sig := crypto.Signature(make([]byte, crypto.SignatureLenBLSBLS12381)) + _, err := rand.Read(sig) + require.NoError(t, err) + pop := StakingKeyPoP{sig} + + enc, err := json.Marshal(pop) + require.NoError(t, err) + require.NotEmpty(t, enc) + require.NoError(t, isHexString(enc)) + + var dec StakingKeyPoP + err = json.Unmarshal(enc, &dec) + require.NoError(t, err) + + require.Equal(t, sig, dec.Signature, "encoded/decoded signature equality check failed") +} diff --git a/model/encoding/cbor/codec.go b/model/encoding/cbor/codec.go index 65b889c4a2d..20737549c15 100644 --- a/model/encoding/cbor/codec.go +++ b/model/encoding/cbor/codec.go @@ -32,8 +32,19 @@ var EncMode = func() cbor.EncMode { return encMode }() -// DecMode is the default DecMode to use when creating a new cbor Decoder -var DecMode, _ = cbor.DecOptions{}.DecMode() +// UnsafeDecMode is a permissive mode for creating a new cbor Decoder. +// +// CAUTION: this encoding should only be used for encoding/decoding data within a node. +// If used for decoding data that is shared between nodes, it makes the recipient VULNERABLE +// to RESOURCE EXHAUSTION ATTACKS, where a byzantine sender could include garbage data in the +// encoding, which would not be noticed by the recipient because the garbage data is dropped +// at the decoding step - yet, it consumes the recipient's networking bandwidth. +var UnsafeDecMode, _ = cbor.DecOptions{}.DecMode() + +// DefaultDecMode is the DecMode used for decoding messages over the network. +// It returns an error if the message contains any extra field not present in the +// target (struct we are unmarshalling into), which prevents some classes of resource exhaustion attacks. +var DefaultDecMode, _ = cbor.DecOptions{ExtraReturnErrors: cbor.ExtraDecErrorUnknownField}.DecMode() func (m *Marshaler) Marshal(val interface{}) ([]byte, error) { return EncMode.Marshal(val) @@ -87,7 +98,7 @@ type Codec struct { func NewCodec(opts ...Option) *Codec { c := &Codec{ encMode: EncMode, - decMode: DecMode, + decMode: UnsafeDecMode, } for _, opt := range opts { diff --git a/model/encoding/rlp/rlp_test.go b/model/encoding/rlp/rlp_test.go new file mode 100644 index 00000000000..2982e5d4cf4 --- /dev/null +++ b/model/encoding/rlp/rlp_test.go @@ -0,0 +1,29 @@ +package rlp_test + +import ( + "testing" + + "github.com/ethereum/go-ethereum/rlp" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +// TestRLPStructFieldOrder tests the field ordering property of RLP encoding. +// It provides evidence that RLP encoding depends on struct field ordering. +func TestRLPStructFieldOrder(t *testing.T) { + a := struct { + A uint32 // A first + B uint32 + }{A: 2, B: 3} + + b := struct { + B uint32 // B first + A uint32 + }{A: 2, B: 3} + + abin, err := rlp.EncodeToBytes(a) + require.NoError(t, err) + bbin, err := rlp.EncodeToBytes(b) + require.NoError(t, err) + assert.NotEqual(t, abin, bbin) +} diff --git a/model/events/parse.go b/model/events/parse.go new file mode 100644 index 00000000000..db29faa0e38 --- /dev/null +++ b/model/events/parse.go @@ -0,0 +1,80 @@ +package events + +import ( + "fmt" + "strings" + + "github.com/onflow/flow-go/model/flow" +) + +type ParsedEventType int + +const ( + ProtocolEventType ParsedEventType = iota + 1 + AccountEventType +) + +type ParsedEvent struct { + Type ParsedEventType + EventType flow.EventType + Address string + Contract string + ContractName string + Name string +} + +// ParseEvent parses an event type into its parts. There are 2 valid EventType formats: +// - flow.[EventName] +// - A.[Address].[Contract].[EventName] +// Any other format results in an error. +func ParseEvent(eventType flow.EventType) (*ParsedEvent, error) { + parts := strings.Split(string(eventType), ".") + + switch parts[0] { + case "flow": + if len(parts) == 2 { + return &ParsedEvent{ + Type: ProtocolEventType, + EventType: eventType, + Contract: parts[0], + ContractName: parts[0], + Name: parts[1], + }, nil + } + + case "A": + if len(parts) >= 4 { + return &ParsedEvent{ + Type: AccountEventType, + EventType: eventType, + Address: parts[1], + Contract: fmt.Sprintf("A.%s.%s", parts[1], parts[2]), + ContractName: parts[2], + Name: parts[len(parts)-1], + }, nil + } + } + + return nil, fmt.Errorf("invalid event type: %s", eventType) +} + +// ValidateEvent validates an event type is properly formed and for the correct network, and returns +// a parsed event. If the event type is invalid, an error is returned. +func ValidateEvent(eventType flow.EventType, chain flow.Chain) (*ParsedEvent, error) { + parsed, err := ParseEvent(eventType) + if err != nil { + return nil, err + } + + // only account type events have an address field + if parsed.Type != AccountEventType { + return parsed, nil + } + + contractAddress := flow.HexToAddress(parsed.Address) + if !chain.IsValid(contractAddress) { + return nil, fmt.Errorf("invalid event contract address") + } + + return parsed, nil +} diff --git a/model/events/parse_test.go b/model/events/parse_test.go new file mode 100644 index 00000000000..56543fe7fd4 --- /dev/null +++ b/model/events/parse_test.go @@ -0,0 +1,159 @@ +package events_test + +import ( + "fmt" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/model/events" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/utils/unittest" +) + +func TestParseEvent(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + eventType flow.EventType + expected events.ParsedEvent + }{ + { + name: "flow event", + eventType: "flow.AccountCreated", + expected: events.ParsedEvent{ + Type: events.ProtocolEventType, + EventType: "flow.AccountCreated", + Contract: "flow", + ContractName: "flow", + Name: "AccountCreated", + }, + }, + { + name: "account event", + eventType: "A.0000000000000001.Contract1.EventA", + expected: events.ParsedEvent{ + Type: events.AccountEventType, + EventType: "A.0000000000000001.Contract1.EventA", + Address: "0000000000000001", + Contract: "A.0000000000000001.Contract1", + ContractName: "Contract1", + Name: "EventA", + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + event, err := events.ParseEvent(test.eventType) + require.NoError(t, err) + + assert.Equal(t, test.expected.Type, event.Type) + assert.Equal(t, test.expected.EventType, event.EventType) + assert.Equal(t, test.expected.Address, event.Address) + assert.Equal(t, test.expected.Contract, event.Contract) + assert.Equal(t, test.expected.Name, event.Name) + }) + } +} + +func TestParseEvent_Invalid(t *testing.T) { + t.Parallel() + + eventTypes := []flow.EventType{ + "", // not enough parts + "invalid", // not enough parts + "invalid.event", // invalid first part + "B.0000000000000001.invalid.event", // invalid first part + "flow", // incorrect number of parts for protocol event + "flow.invalid.event", // incorrect number of parts for protocol event + "A.0000000000000001.invalid", // incorrect number of parts for account event + + } + + for _, eventType := range eventTypes { + _, err := events.ParseEvent(eventType) + assert.Error(t, err, "expected error for event type: %s", eventType) + } +} + +func TestValidateEvent(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + eventType flow.EventType + expected events.ParsedEvent + }{ + { + name: "flow event", + eventType: "flow.AccountCreated", + expected: events.ParsedEvent{ + Type: events.ProtocolEventType, + EventType: "flow.AccountCreated", + Contract: "flow", + ContractName: "flow", + Name: "AccountCreated", + }, + }, + { + name: "account event", + eventType: "A.0000000000000001.Contract1.EventA", + expected: events.ParsedEvent{ + Type: events.AccountEventType, + EventType: "A.0000000000000001.Contract1.EventA", + Address: "0000000000000001", + Contract: "A.0000000000000001.Contract1", + ContractName: "Contract1", + Name: "EventA", + }, + }, + { + name: "resource destroyed event", + eventType: "A.0000000000000001.Contract1.ResourceA.ResourceB.ResourceC.ResourceDestroyed", + expected: events.ParsedEvent{ + Type: events.AccountEventType, + EventType: "A.0000000000000001.Contract1.ResourceA.ResourceB.ResourceC.ResourceDestroyed", + Address: "0000000000000001", + Contract: "A.0000000000000001.Contract1", + ContractName: "Contract1", + Name: "ResourceDestroyed", + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + event, err := events.ValidateEvent(test.eventType, flow.MonotonicEmulator.Chain()) + require.NoError(t, err) + + assert.Equal(t, test.expected.Type, event.Type) + assert.Equal(t, test.expected.EventType, event.EventType) + assert.Equal(t, test.expected.Address, event.Address) + assert.Equal(t, test.expected.Contract, event.Contract) + assert.Equal(t, test.expected.Name, event.Name) + }) + } +} + +func TestValidateEvent_Invalid(t *testing.T) { + t.Parallel() + + eventTypes := []flow.EventType{ + "", // not enough parts + "invalid", // not enough parts + "invalid.event", // invalid first part + "B.0000000000000001.invalid.event", // invalid first part + "flow", // incorrect number of parts for protocol event + "flow.invalid.event", // incorrect number of parts for protocol event + "A.0000000000000001.invalid", // incorrect number of parts for account event + flow.EventType(fmt.Sprintf("A.%s.Contract1.EventA", unittest.RandomAddressFixture())), // address from wrong chain + } + + for _, eventType := range eventTypes { + _, err := events.ValidateEvent(eventType, flow.MonotonicEmulator.Chain()) + assert.Error(t, err, "expected error for event type: %s", eventType) + } +} diff --git a/model/events/synchronization.go b/model/events/synchronization.go deleted file mode 100644 index c2a6140489a..00000000000 --- a/model/events/synchronization.go +++ /dev/null @@ -1,16 +0,0 @@ -package events - -import ( - "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/model/messages" -) - -type SyncedBlock struct { - OriginID flow.Identifier - Block messages.UntrustedBlock -} - -type SyncedClusterBlock struct { - OriginID flow.Identifier - Block messages.UntrustedClusterBlock -} diff --git a/model/fingerprint/mock/fingerprinter.go b/model/fingerprint/mock/fingerprinter.go index d4ddc59ab9d..3b55c5a3048 100644 --- a/model/fingerprint/mock/fingerprinter.go +++ b/model/fingerprint/mock/fingerprinter.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mock @@ -9,10 +9,14 @@ type Fingerprinter struct { mock.Mock } -// Fingerprint provides a mock function with given fields: +// Fingerprint provides a mock function with no fields func (_m *Fingerprinter) Fingerprint() []byte { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Fingerprint") + } + var r0 []byte if rf, ok := ret.Get(0).(func() []byte); ok { r0 = rf() @@ -25,13 +29,12 @@ func (_m *Fingerprinter) Fingerprint() []byte { return r0 } -type mockConstructorTestingTNewFingerprinter interface { +// NewFingerprinter creates a new instance of Fingerprinter. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewFingerprinter(t interface { mock.TestingT Cleanup(func()) -} - -// NewFingerprinter creates a new instance of Fingerprinter. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewFingerprinter(t mockConstructorTestingTNewFingerprinter) *Fingerprinter { +}) *Fingerprinter { mock := &Fingerprinter{} mock.Mock.Test(t) diff --git a/model/flow/account.go b/model/flow/account.go index 35606897abd..1aa2ab5ef6f 100644 --- a/model/flow/account.go +++ b/model/flow/account.go @@ -1,13 +1,11 @@ -// (c) 2019 Dapper Labs - ALL RIGHTS RESERVED - package flow import ( "encoding/json" "fmt" - "github.com/onflow/flow-go/crypto" - "github.com/onflow/flow-go/crypto/hash" + "github.com/onflow/crypto" + "github.com/onflow/crypto/hash" ) // Account represents an account on the Flow network. @@ -24,7 +22,7 @@ type Account struct { // // An account public key contains the public key, signing and hashing algorithms, and a key weight. type AccountPublicKey struct { - Index int + Index uint32 PublicKey crypto.PublicKey SignAlgo crypto.SigningAlgorithm HashAlgo hash.HashingAlgorithm @@ -131,3 +129,14 @@ func CompatibleAlgorithms(sigAlgo crypto.SigningAlgorithm, hashAlgo hash.Hashing } return false } + +// RuntimeAccountPublicKey is a public key associated with an account for Cadence runtime that doesn't need sequence number. +// A runtime account public key contains the public key, signing and hashing algorithms, a key weight, and revoked status. +type RuntimeAccountPublicKey struct { + PublicKey crypto.PublicKey + SignAlgo crypto.SigningAlgorithm + HashAlgo hash.HashingAlgorithm + Weight int + Index uint32 + Revoked bool +} diff --git a/model/flow/account_encoder.go b/model/flow/account_encoder.go index 9b4bf1ad5ae..56ce4be9154 100644 --- a/model/flow/account_encoder.go +++ b/model/flow/account_encoder.go @@ -7,8 +7,8 @@ import ( "github.com/onflow/cadence" - "github.com/onflow/flow-go/crypto" - "github.com/onflow/flow-go/crypto/hash" + "github.com/onflow/crypto" + "github.com/onflow/crypto/hash" ) // accountPublicKeyWrapper is used for encoding and decoding. @@ -40,6 +40,14 @@ type runtimeAccountPublicKeyWrapper struct { Weight uint } +// StoredPublicKey represents public key stored on chain in batch public key register. +// Weight is stored separately to reduce duplicate public keys. +type StoredPublicKey struct { + PublicKey crypto.PublicKey + SignAlgo crypto.SigningAlgorithm + HashAlgo hash.HashingAlgorithm +} + // accountPrivateKeyWrapper is used for encoding and decoding. type accountPrivateKeyWrapper struct { PrivateKey []byte @@ -91,6 +99,19 @@ func EncodeRuntimeAccountPublicKey(a AccountPublicKey) ([]byte, error) { return rlp.EncodeToBytes(&w) } +func EncodeStoredPublicKey(a StoredPublicKey) ([]byte, error) { + w := struct { + PublicKey []byte + SignAlgo uint + HashAlgo uint + }{ + PublicKey: a.PublicKey.Encode(), + SignAlgo: uint(a.SignAlgo), + HashAlgo: uint(a.HashAlgo), + } + return rlp.EncodeToBytes(&w) +} + func decodeAccountPublicKeyWrapper(b []byte) (accountPublicKeyWrapper, error) { var wrapper accountPublicKeyWrapper @@ -117,7 +138,7 @@ func decodeAccountPublicKeyWrapper(b []byte) (accountPublicKeyWrapper, error) { return wrapper, nil } -func DecodeAccountPublicKey(b []byte, index uint64) (AccountPublicKey, error) { +func DecodeAccountPublicKey(b []byte, index uint32) (AccountPublicKey, error) { w, err := decodeAccountPublicKeyWrapper(b) if err != nil { return AccountPublicKey{}, err @@ -132,7 +153,7 @@ func DecodeAccountPublicKey(b []byte, index uint64) (AccountPublicKey, error) { } return AccountPublicKey{ - Index: int(index), + Index: index, PublicKey: publicKey, SignAlgo: signAlgo, HashAlgo: hashAlgo, @@ -171,6 +192,32 @@ func DecodeRuntimeAccountPublicKey(b []byte, seqNumber uint64) (AccountPublicKey }, nil } +func DecodeStoredPublicKey(b []byte) (StoredPublicKey, error) { + var w struct { + PublicKey []byte + SignAlgo uint + HashAlgo uint + } + err := rlp.DecodeBytes(b, &w) + if err != nil { + return StoredPublicKey{}, err + } + + signAlgo := crypto.SigningAlgorithm(w.SignAlgo) + hashAlgo := hash.HashingAlgorithm(w.HashAlgo) + + publicKey, err := crypto.DecodePublicKey(signAlgo, w.PublicKey) + if err != nil { + return StoredPublicKey{}, err + } + + return StoredPublicKey{ + PublicKey: publicKey, + SignAlgo: signAlgo, + HashAlgo: hashAlgo, + }, nil +} + func EncodeAccountPrivateKey(a AccountPrivateKey) ([]byte, error) { privateKey := a.PrivateKey.Encode() @@ -205,3 +252,18 @@ func DecodeAccountPrivateKey(b []byte) (AccountPrivateKey, error) { HashAlgo: hashAlgo, }, nil } + +// Sequence Number + +func EncodeSequenceNumber(num uint64) ([]byte, error) { + return rlp.EncodeToBytes(num) +} + +func DecodeSequenceNumber(b []byte) (uint64, error) { + var num uint64 + err := rlp.DecodeBytes(b, &num) + if err != nil { + return 0, err + } + return num, nil +} diff --git a/model/flow/account_encoder_test.go b/model/flow/account_encoder_test.go index 761511c7c1d..f57ccea1416 100644 --- a/model/flow/account_encoder_test.go +++ b/model/flow/account_encoder_test.go @@ -5,12 +5,13 @@ import ( "testing" "github.com/ethereum/go-ethereum/rlp" + "github.com/onflow/crypto" + "github.com/onflow/crypto/hash" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "github.com/onflow/flow-go/crypto" - "github.com/onflow/flow-go/crypto/hash" "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/utils/unittest" ) type legacyAccountPublicKeyWrapper struct { @@ -49,7 +50,7 @@ func TestDecodeAccountPublicKey_Legacy(t *testing.T) { accountKey, err := flow.DecodeAccountPublicKey(b, 1) require.NoError(t, err) - assert.Equal(t, 1, accountKey.Index) + assert.Equal(t, uint32(1), accountKey.Index) assert.Equal(t, publicKey, accountKey.PublicKey) assert.Equal(t, sigAlgo, accountKey.SignAlgo) assert.Equal(t, hashAlgo, accountKey.HashAlgo) @@ -59,3 +60,34 @@ func TestDecodeAccountPublicKey_Legacy(t *testing.T) { // legacy account key should not be revoked assert.False(t, accountKey.Revoked) } + +func TestStoredPublicKey(t *testing.T) { + sk, err := unittest.AccountKeyDefaultFixture() + assert.NoError(t, err) + + pk := flow.StoredPublicKey{ + PublicKey: sk.PrivateKey.PublicKey(), + SignAlgo: sk.SignAlgo, + HashAlgo: sk.HashAlgo, + } + + b, err := flow.EncodeStoredPublicKey(pk) + assert.NoError(t, err) + + decodedPk, err := flow.DecodeStoredPublicKey(b) + assert.NoError(t, err) + + assert.Equal(t, pk, decodedPk) +} + +func TestSequenceNumber(t *testing.T) { + seqNumber := uint64(42) + + b, err := flow.EncodeSequenceNumber(seqNumber) + assert.NoError(t, err) + + decodedSeqNumber, err := flow.DecodeSequenceNumber(b) + assert.NoError(t, err) + + assert.Equal(t, seqNumber, decodedSeqNumber) +} diff --git a/model/flow/address.go b/model/flow/address.go index a0b054f28fb..941531cd850 100644 --- a/model/flow/address.go +++ b/model/flow/address.go @@ -1,5 +1,3 @@ -// (c) 2019 Dapper Labs - ALL RIGHTS RESERVED - package flow import ( @@ -25,12 +23,21 @@ func ConvertAddress(b [AddressLength]byte) Address { // HexToAddress converts a hex string to an Address. func HexToAddress(h string) Address { + addr, _ := StringToAddress(h) + return addr +} + +// StringToAddress converts a string to an Address and return an error if the string is malformed +func StringToAddress(h string) (Address, error) { trimmed := strings.TrimPrefix(h, "0x") if len(trimmed)%2 == 1 { trimmed = "0" + trimmed } - b, _ := hex.DecodeString(trimmed) - return BytesToAddress(b) + b, err := hex.DecodeString(trimmed) + if err != nil { + return EmptyAddress, fmt.Errorf("can not decode hex string (%v) to address: %w", h, err) + } + return BytesToAddress(b), nil } // BytesToAddress returns Address with value b. @@ -252,8 +259,15 @@ const ( maxIndex = (1 << linearCodeK) - 1 ) -// The following are invalid code-words in the [64,45] code. -// These constants are used to generate non-Flow-Mainnet addresses +// The following constants are invalid code-words in the [64,45] code, generated randomly. +// These constants are used to generate non-Flow-Mainnet addresses. +// +// Flow-Mainnet address space uses the original [64,45] code, while each network +// uses an orthogonal space obtained by adding a specific invalid code word to the +// original [64,45] code. The linearity of the code guarantees that all the obtained +// spaces are disjoint, as long as all invalid code words are distinct. +// +// Spaces intersection is validated in `testAddressesIntersection`. // invalidCodeTestNetwork is the invalid codeword used for long-lived test networks. const invalidCodeTestNetwork = uint64(0x6834ba37b3980209) @@ -264,6 +278,9 @@ const invalidCodeTransientNetwork = uint64(0x1cb159857af02018) // invalidCodeSandboxNetwork is the invalid codeword used for Sandbox network. const invalidCodeSandboxNetwork = uint64(0x1035ce4eff92ae01) +// invalidCodePreviewNetwork is the invalid codeword used for Preview networks. +const invalidCodePreviewNetwork = uint64(0x5211829E88528817) + // encodeWord encodes a word into a code word. // In Flow, the word is the account index while the code word // is the corresponding address. diff --git a/model/flow/address_test.go b/model/flow/address_test.go index b71a0d567ed..527dce77bef 100644 --- a/model/flow/address_test.go +++ b/model/flow/address_test.go @@ -5,11 +5,9 @@ import ( "math/bits" "math/rand" "testing" - "time" "github.com/onflow/cadence" - "github.com/onflow/cadence/runtime/common" - + "github.com/onflow/cadence/common" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -72,13 +70,13 @@ func TestHexToAddress(t *testing.T) { func TestAddressJSON(t *testing.T) { addr := Mainnet.Chain().ServiceAddress() data, err := json.Marshal(addressWrapper{Address: addr}) - require.Nil(t, err) + require.NoError(t, err) t.Log(string(data)) var out addressWrapper err = json.Unmarshal(data, &out) - require.Nil(t, err) + require.NoError(t, err) assert.Equal(t, addr, out.Address) } @@ -130,19 +128,19 @@ func testAddressConstants(t *testing.T) { Testnet, Emulator, Sandboxnet, + Previewnet, } for _, chainID := range chainIDs { chain := chainID.Chain() - if chainID != Emulator { - // check the Zero and Root constants - expected := uint64ToAddress(uint64(chainID.getChainCodeWord())) - assert.Equal(t, chain.zeroAddress(), expected) - expected = uint64ToAddress(generatorMatrixRows[0] ^ uint64(chainID.getChainCodeWord())) - assert.Equal(t, chain.ServiceAddress(), expected) - } + // check the Zero and Root constants + expected := uint64ToAddress(uint64(chainID.getChainCodeWord())) + + assert.Equal(t, chain.zeroAddress(), expected) + expected = uint64ToAddress(generatorMatrixRows[0] ^ uint64(chainID.getChainCodeWord())) + assert.Equal(t, chain.ServiceAddress(), expected) // check the transition from account zero to root state := chain.NewAddressGenerator() @@ -151,7 +149,7 @@ func testAddressConstants(t *testing.T) { assert.Equal(t, address, chain.ServiceAddress()) // check high state values: generation should fail for high value states - state = chain.newAddressGeneratorAtIndex(maxIndex - 1) + state = chain.NewAddressGeneratorAtIndex(maxIndex - 1) _, err = state.NextAddress() assert.NoError(t, err) _, err = state.NextAddress() @@ -164,12 +162,10 @@ func testAddressConstants(t *testing.T) { } } +// invalid code word for all networks const invalidCodeWord = uint64(0xab2ae42382900010) func testAddressGeneration(t *testing.T) { - // seed random generator - rand.Seed(time.Now().UnixNano()) - // loops in each test const loop = 50 @@ -179,6 +175,7 @@ func testAddressGeneration(t *testing.T) { Testnet, Emulator, Sandboxnet, + Previewnet, } for _, chainID := range chainIDs { @@ -199,11 +196,11 @@ func testAddressGeneration(t *testing.T) { } // sanity check of addresses weights in Flow. - // All addresses hamming weights must be less than d. + // All addresses hamming weights must be larger than d. // this is only a sanity check of the implementation and not an exhaustive proof if chainID == Mainnet { r := uint64(rand.Intn(maxIndex - loop)) - state = chain.newAddressGeneratorAtIndex(r) + state = chain.NewAddressGeneratorAtIndex(r) for i := 0; i < loop; i++ { address, err := state.NextAddress() require.NoError(t, err) @@ -212,28 +209,24 @@ func testAddressGeneration(t *testing.T) { } } - if chainID == Mainnet { - - // sanity check of address distances. - // All distances between any two addresses must be less than d. - // this is only a sanity check of the implementation and not an exhaustive proof - r := uint64(rand.Intn(maxIndex - loop - 1)) - state = chain.newAddressGeneratorAtIndex(r) - refAddress, err := state.NextAddress() + // sanity check of address distances. + // All distances between any two addresses must be larger than d. + // this is only a sanity check of the implementation and not an exhaustive proof + r := uint64(rand.Intn(maxIndex - loop - 1)) + state = chain.NewAddressGeneratorAtIndex(r) + refAddress, err := state.NextAddress() + require.NoError(t, err) + for i := 0; i < loop; i++ { + address, err := state.NextAddress() require.NoError(t, err) - for i := 0; i < loop; i++ { - address, err := state.NextAddress() - require.NoError(t, err) - distance := bits.OnesCount64(address.uint64() ^ refAddress.uint64()) - assert.LessOrEqual(t, linearCodeD, distance) - } - + distance := bits.OnesCount64(address.uint64() ^ refAddress.uint64()) + assert.LessOrEqual(t, linearCodeD, distance) } // sanity check of valid account addresses. // All valid addresses must pass IsValid. - r := uint64(rand.Intn(maxIndex - loop)) - state = chain.newAddressGeneratorAtIndex(r) + r = uint64(rand.Intn(maxIndex - loop)) + state = chain.NewAddressGeneratorAtIndex(r) for i := 0; i < loop; i++ { address, err := state.NextAddress() require.NoError(t, err) @@ -248,7 +241,7 @@ func testAddressGeneration(t *testing.T) { assert.False(t, check, "account address format should be invalid") r = uint64(rand.Intn(maxIndex - loop)) - state = chain.newAddressGeneratorAtIndex(r) + state = chain.NewAddressGeneratorAtIndex(r) for i := 0; i < loop; i++ { address, err := state.NextAddress() require.NoError(t, err) @@ -260,9 +253,6 @@ func testAddressGeneration(t *testing.T) { } func testAddressesIntersection(t *testing.T) { - // seed random generator - rand.Seed(time.Now().UnixNano()) - // loops in each test const loop = 25 @@ -272,22 +262,24 @@ func testAddressesIntersection(t *testing.T) { Testnet, Emulator, Sandboxnet, + Previewnet, } for _, chainID := range chainIDs { chain := chainID.Chain() - // All valid test addresses must fail Flow Mainnet check + // a valid address in one network must be invalid in all other networks r := uint64(rand.Intn(maxIndex - loop)) - state := chain.newAddressGeneratorAtIndex(r) + state := chain.NewAddressGeneratorAtIndex(r) for k := 0; k < loop; k++ { address, err := state.NextAddress() require.NoError(t, err) for _, otherChain := range chainIDs { if chainID != otherChain { check := otherChain.Chain().IsValid(address) - assert.False(t, check, "test account address format should be invalid in Flow") + assert.False(t, check, "address %s belongs to %s and should be invalid in %s", + address, chainID, otherChain) } else { sameChainCheck := chain.IsValid(address) require.True(t, sameChainCheck) @@ -295,33 +287,21 @@ func testAddressesIntersection(t *testing.T) { } } - // sanity check: mainnet addresses must fail the test check - r = uint64(rand.Intn(maxIndex - loop)) - for k := 0; k < loop; k++ { - for _, otherChain := range chainIDs { - if chainID != otherChain { - invalidAddress, err := otherChain.Chain().newAddressGeneratorAtIndex(r).NextAddress() - require.NoError(t, err) - check := chain.IsValid(invalidAddress) - assert.False(t, check, "account address format should be invalid") - } - } - } - - // sanity check of invalid account addresses in all networks + // `invalidCodeWord` must be invalid in all networks + // for the remaining section of the test require.NotEqual(t, invalidCodeWord, uint64(0)) invalidAddress := uint64ToAddress(invalidCodeWord) check := chain.IsValid(invalidAddress) - assert.False(t, check, "account address format should be invalid") - r = uint64(rand.Intn(maxIndex - loop)) + require.False(t, check, "account address format should be invalid") - state = chain.newAddressGeneratorAtIndex(r) + // build invalid addresses using `invalidCodeWord` and make sure they all + // fail the check for all networks + r = uint64(rand.Intn(maxIndex - loop)) + state = chain.NewAddressGeneratorAtIndex(r) for k := 0; k < loop; k++ { address, err := state.NextAddress() require.NoError(t, err) invalidAddress = uint64ToAddress(address.uint64() ^ invalidCodeWord) - - // must fail test network check check = chain.IsValid(invalidAddress) assert.False(t, check, "account address format should be invalid") } @@ -329,9 +309,6 @@ func testAddressesIntersection(t *testing.T) { } func testIndexFromAddress(t *testing.T) { - // seed random generator - rand.Seed(time.Now().UnixNano()) - // loops in each test const loop = 50 @@ -341,6 +318,7 @@ func testIndexFromAddress(t *testing.T) { testnet, emulator, sandboxnet, + previewnet, } for _, chain := range chains { @@ -350,7 +328,7 @@ func testIndexFromAddress(t *testing.T) { // random valid index r := uint64(rand.Intn(maxIndex)) + 1 // generate the address - address := chain.newAddressGeneratorAtIndex(r).CurrentAddress() + address := chain.NewAddressGeneratorAtIndex(r).CurrentAddress() // extract the index and compare index, err := chain.IndexFromAddress(address) assert.NoError(t, err) // address should be valid @@ -370,9 +348,6 @@ func testIndexFromAddress(t *testing.T) { } func TestUint48(t *testing.T) { - // seed random generator - rand.Seed(time.Now().UnixNano()) - const loop = 50 // test consistensy of putUint48 and uint48 for i := 0; i < loop; i++ { diff --git a/model/flow/aggregated_signature.go b/model/flow/aggregated_signature.go index a3a5c9076f5..fe53c57a5a4 100644 --- a/model/flow/aggregated_signature.go +++ b/model/flow/aggregated_signature.go @@ -1,10 +1,10 @@ package flow import ( - "github.com/onflow/flow-go/crypto" + "github.com/onflow/crypto" ) -// AggregatedSignature contains a set of of signatures from verifiers attesting +// AggregatedSignature contains a set of signatures from verifiers attesting // to the validity of an execution result chunk. // TODO: this will be replaced with BLS aggregation type AggregatedSignature struct { diff --git a/model/flow/assignment/sort.go b/model/flow/assignment/sort.go index 3b135d91152..90864ea6500 100644 --- a/model/flow/assignment/sort.go +++ b/model/flow/assignment/sort.go @@ -2,16 +2,14 @@ package assignment import ( "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/model/flow/order" ) // FromIdentifierLists creates a `flow.AssignmentList` with canonical ordering from // the given `identifierLists`. func FromIdentifierLists(identifierLists []flow.IdentifierList) flow.AssignmentList { assignments := make(flow.AssignmentList, 0, len(identifierLists)) - // in place sort to order the assignment in canonical order for _, identities := range identifierLists { - assignment := flow.IdentifierList(identities).Sort(order.IdentifierCanonical) + assignment := identities.Sort(flow.IdentifierCanonical) // sort each cluster in canonical order (already creates copy) assignments = append(assignments, assignment) } return assignments diff --git a/model/flow/block.go b/model/flow/block.go index abd62ff8595..392e59d25c2 100644 --- a/model/flow/block.go +++ b/model/flow/block.go @@ -1,59 +1,149 @@ -// (c) 2019 Dapper Labs - ALL RIGHTS RESERVED - package flow -import "fmt" +import ( + "encoding/json" + "fmt" + "time" +) -func Genesis(chainID ChainID) *Block { +// HashablePayload is a temporary interface used to generalize the payload type of GenericBlock. +// It defines the minimal interface required for a payload to participate in block hashing. +// +// TODO(malleability, #7164): remove this interface after renaming IDEntity's method `ID` to `Hash`, +// and replace all usages of HashablePayload with IDEntity. +type HashablePayload interface { + Hash() Identifier +} - // create the raw content for the genesis block - payload := Payload{} +// GenericBlock represents a generic Flow block structure parameterized by a payload type. +// It includes both the block header metadata and the block payload. +// +// Zero values for certain HeaderBody fields are allowed only for root blocks, which must be constructed +// using the NewRootBlock constructor. All non-root blocks must be constructed +// using NewBlock to ensure validation of the block fields. +// +//structwrite:immutable - mutations allowed only within the constructor +type GenericBlock[T HashablePayload] struct { + // HeaderBody is a container encapsulating most of the header fields - *excluding* the payload hash + // and the proposer signature. Generally, the type [HeaderBody] should not be used on its own. + // CAUTION regarding security: + // * HeaderBody does not contain the hash of the block payload. Therefore, it is not a cryptographic digest + // of the block and should not be confused with a "proper" header, which commits to the _entire_ content + // of a block. + // * With a byzantine HeaderBody alone, an honest node cannot prove who created that faulty data structure, + // because HeaderBody does not include the proposer's signature. + HeaderBody + Payload T +} - // create the header - header := Header{ - ChainID: chainID, - ParentID: ZeroID, - Height: 0, - PayloadHash: payload.Hash(), - Timestamp: GenesisTime, - View: 0, - } +// ID returns a collision-resistant hash of the Block struct. +func (b *GenericBlock[T]) ID() Identifier { + return b.ToHeader().ID() +} - // combine to block - genesis := Block{ - Header: &header, - Payload: &payload, +// ToHeader converts the block into a compact [flow.Header] representation, +// where the payload is compressed to a hash reference. +// The receiver Block must be well-formed (enforced by mutation protection on the type). +// This function may panic if invoked on a malformed Block. +func (b *GenericBlock[T]) ToHeader() *Header { + if !b.ContainsParentQC() { + rootHeader, err := NewRootHeader(UntrustedHeader{ + HeaderBody: b.HeaderBody, + PayloadHash: b.Payload.Hash(), + }) + if err != nil { + panic(fmt.Errorf("could not build root header from block: %w", err)) + } + return rootHeader } - return &genesis + header, err := NewHeader(UntrustedHeader{ + HeaderBody: b.HeaderBody, + PayloadHash: b.Payload.Hash(), + }) + if err != nil { + panic(fmt.Errorf("could not build header from block: %w", err)) + } + return header } -// Block (currently) includes the header, the payload hashes as well as the -// payload contents. -type Block struct { - Header *Header - Payload *Payload +// MarshalJSON implements JSON encoding logic for blocks. +// We include a top-level ID field equal to the hash of the block, for visibility in automations. +// The ID field is ignored when unmarshaling a JSON structure back into a block. +func (b *GenericBlock[T]) MarshalJSON() ([]byte, error) { + return json.Marshal(struct { + GenericBlock[T] + ID Identifier + }{ + GenericBlock: *b, + ID: b.ID(), + }) } -// SetPayload sets the payload and updates the payload hash. -func (b *Block) SetPayload(payload Payload) { - b.Payload = &payload - b.Header.PayloadHash = b.Payload.Hash() -} +// Block is the canonical instantiation of GenericBlock using flow.Payload as the payload type. +// +// Zero values for certain HeaderBody fields are allowed only for root blocks, which must be constructed +// using the NewRootBlock constructor. All non-root blocks must be constructed +// using NewBlock to ensure validation of the block fields. +// +//structwrite:immutable - mutations allowed only within the constructor +type Block = GenericBlock[Payload] -// Valid will check whether the block is valid bottom-up. -func (b Block) Valid() bool { - return b.Header.PayloadHash == b.Payload.Hash() -} +// UntrustedBlock is an untrusted input-only representation of a Block, +// used for construction. +// +// This type exists to ensure that constructor functions are invoked explicitly +// with named fields, which improves clarity and reduces the risk of incorrect field +// ordering during construction. +// +// An instance of UntrustedBlock should be validated and converted into +// a trusted Block using the NewBlock constructor (or NewRootBlock +// for the root block). +type UntrustedBlock Block -// ID returns the ID of the header. -func (b Block) ID() Identifier { - return b.Header.ID() +// NewBlock creates a new block. +// This constructor enforces validation rules to ensure the block is well-formed. +// It must be used to construct all non-root blocks. +// +// All errors indicate that a valid Block cannot be constructed from the input. +func NewBlock(untrusted UntrustedBlock) (*Block, error) { + // validate header body + headerBody, err := NewHeaderBody(UntrustedHeaderBody(untrusted.HeaderBody)) + if err != nil { + return nil, fmt.Errorf("invalid header body: %w", err) + } + + // validate payload + payload, err := NewPayload(UntrustedPayload(untrusted.Payload)) + if err != nil { + return nil, fmt.Errorf("invalid payload: %w", err) + } + + return &Block{ + HeaderBody: *headerBody, + Payload: *payload, + }, nil } -// Checksum returns the checksum of the header. -func (b Block) Checksum() Identifier { - return b.Header.Checksum() +// NewRootBlock creates a root block. +// This constructor must be used **only** for constructing the root block, +// which is the only case where zero values are allowed. +func NewRootBlock(untrusted UntrustedBlock) (*Block, error) { + rootHeaderBody, err := NewRootHeaderBody(UntrustedHeaderBody(untrusted.HeaderBody)) + if err != nil { + return nil, fmt.Errorf("invalid root header body: %w", err) + } + + // validate payload + payload, err := NewPayload(UntrustedPayload(untrusted.Payload)) + if err != nil { + return nil, fmt.Errorf("invalid payload: %w", err) + } + + return &Block{ + HeaderBody: *rootHeaderBody, + Payload: *payload, + }, nil } // BlockStatus represents the status of a block. @@ -73,12 +163,99 @@ func (s BlockStatus) String() string { return [...]string{"BLOCK_UNKNOWN", "BLOCK_FINALIZED", "BLOCK_SEALED"}[s] } -// CertifiedBlock holds a certified block, which is a block and a QC that is pointing to -// the block. A QC is the aggregated form of votes from a supermajority of HotStuff and -// therefore proves validity of the block. A certified block satisfies: +// Proposal is a signed proposal that includes the block payload, in addition to the required header and signature. +// +//structwrite:immutable - mutations allowed only within the constructor +type Proposal struct { + Block Block + ProposerSigData []byte +} + +// UntrustedProposal is an untrusted input-only representation of a Proposal, +// used for construction. +// +// This type exists to ensure that constructor functions are invoked explicitly +// with named fields, which improves clarity and reduces the risk of incorrect field +// ordering during construction. +// +// An instance of UntrustedProposal should be validated and converted into +// a trusted Proposal using the NewProposal constructor (or NewRootProposal +// for the root proposal). +type UntrustedProposal Proposal + +// NewProposal creates a new Proposal. +// This constructor enforces validation rules to ensure the Proposal is well-formed. +// It must be used to construct all non-root proposal. +// +// All errors indicate that a valid Proposal cannot be constructed from the input. +func NewProposal(untrusted UntrustedProposal) (*Proposal, error) { + block, err := NewBlock(UntrustedBlock(untrusted.Block)) + if err != nil { + return nil, fmt.Errorf("invalid block: %w", err) + } + if len(untrusted.ProposerSigData) == 0 { + return nil, fmt.Errorf("proposer signature must not be empty") + } + + return &Proposal{ + Block: *block, + ProposerSigData: untrusted.ProposerSigData, + }, nil +} + +// NewRootProposal creates a root proposal. +// This constructor must be used **only** for constructing the root proposal, +// which is the only case where zero values are allowed. +func NewRootProposal(untrusted UntrustedProposal) (*Proposal, error) { + block, err := NewRootBlock(UntrustedBlock(untrusted.Block)) + if err != nil { + return nil, fmt.Errorf("invalid root block: %w", err) + } + if len(untrusted.ProposerSigData) > 0 { + return nil, fmt.Errorf("proposer signature must be empty") + } + + return &Proposal{ + Block: *block, + ProposerSigData: untrusted.ProposerSigData, + }, nil +} + +// ProposalHeader converts the proposal into a compact [ProposalHeader] representation, +// where the payload is compressed to a hash reference. +func (b *Proposal) ProposalHeader() *ProposalHeader { + return &ProposalHeader{Header: b.Block.ToHeader(), ProposerSigData: b.ProposerSigData} +} + +// GenericBlockResponse is part of the synchronization protocol and represents the +// reply to any active synchronization attempts. It is a generic container that +// holds a list of structurally validated blocks of type T, which should +// correspond to the synchronization request. +type GenericBlockResponse[T any] struct { + Nonce uint64 + Blocks []T +} + +// BlockResponse is a specialization of GenericBlockResponse for Proposal blocks. +// It is used as the concrete response type for block synchronization requests. +type BlockResponse GenericBlockResponse[Proposal] + +// CertifiedBlock holds a certified block, which is a block and a Quorum Certificate [QC] pointing +// to the block. A QC is the aggregated form of votes from a supermajority of HotStuff and therefore +// proves validity of the block. A certified block satisfies: // Block.View == QC.View and Block.BlockID == QC.BlockID +// +// Conceptually, blocks must always be signed by the proposer. Once a block is certified, the +// proposer's signature is included in the QC and does not need to be provided individually anymore. +// Therefore, from the protocol perspective, the canonical data structures are either a block proposal +// (including the proposer's signature) or a certified block (including a QC for the block). +// Though, for simplicity, we just extend the Proposal structure to represent a certified block, +// including proof that the proposer has signed their block twice. Thereby it is easy to convert +// a [CertifiedBlock] into a [Proposal], which otherwise would not be possible because the QC only +// contains an aggregated signature (including the proposer's signature), which cannot be separated +// into individual signatures. type CertifiedBlock struct { - Block *Block + Proposal *Proposal CertifyingQC *QuorumCertificate } @@ -86,19 +263,20 @@ type CertifiedBlock struct { // requirements and errors otherwise: // // Block.View == QC.View and Block.BlockID == QC.BlockID -func NewCertifiedBlock(block *Block, qc *QuorumCertificate) (CertifiedBlock, error) { - if block.Header.View != qc.View { - return CertifiedBlock{}, fmt.Errorf("block's view (%d) should equal the qc's view (%d)", block.Header.View, qc.View) +func NewCertifiedBlock(proposal *Proposal, qc *QuorumCertificate) (CertifiedBlock, error) { + if proposal.Block.View != qc.View { + return CertifiedBlock{}, fmt.Errorf("block's view (%d) should equal the qc's view (%d)", proposal.Block.View, qc.View) } - if block.ID() != qc.BlockID { - return CertifiedBlock{}, fmt.Errorf("block's ID (%v) should equal the block referenced by the qc (%d)", block.ID(), qc.BlockID) + if proposal.Block.ID() != qc.BlockID { + return CertifiedBlock{}, fmt.Errorf("block's ID (%v) should equal the block referenced by the qc (%d)", proposal.Block.ID(), qc.BlockID) } - return CertifiedBlock{Block: block, CertifyingQC: qc}, nil + return CertifiedBlock{Proposal: proposal, CertifyingQC: qc}, nil } -// ID returns unique identifier for the block. +// BlockID returns a unique identifier for the block (the ID signed to produce a block vote). // To avoid repeated computation, we use value from the QC. -func (b *CertifiedBlock) ID() Identifier { +// CAUTION: This is not a cryptographic commitment for the CertifiedBlock model. +func (b *CertifiedBlock) BlockID() Identifier { return b.CertifyingQC.BlockID } @@ -109,5 +287,54 @@ func (b *CertifiedBlock) View() uint64 { // Height returns height of the block. func (b *CertifiedBlock) Height() uint64 { - return b.Block.Header.Height + return b.Proposal.Block.Height +} + +// BlockDigest holds lightweight block information which includes only the block's id, height and timestamp +type BlockDigest struct { + BlockID Identifier + Height uint64 + Timestamp time.Time +} + +// NewBlockDigest constructs a new block digest. +func NewBlockDigest( + blockID Identifier, + height uint64, + timestamp time.Time, +) *BlockDigest { + return &BlockDigest{ + BlockID: blockID, + Height: height, + Timestamp: timestamp, + } +} + +// BlockVote is part of the consensus protocol and represents a consensus node +// voting on the proposal of the leader of a given round. +type BlockVote struct { + BlockID Identifier + View uint64 + SigData []byte +} + +// NewBlockVote constructs a new block vote.It checks the consistency +// requirements and errors otherwise: +// +// BlockID == ZeroID and len(Block.SigData) == 0 +func NewBlockVote(blockID Identifier, view uint64, sigData []byte) (*BlockVote, error) { + + if blockID == ZeroID { + return nil, fmt.Errorf("BlockID must not be empty") + } + + if len(sigData) == 0 { + return nil, fmt.Errorf("SigData must not be empty") + } + + return &BlockVote{ + BlockID: blockID, + View: view, + SigData: sigData, + }, nil } diff --git a/model/flow/block_test.go b/model/flow/block_test.go index e30720bad7e..14cb137faa2 100644 --- a/model/flow/block_test.go +++ b/model/flow/block_test.go @@ -3,6 +3,7 @@ package flow_test import ( "encoding/json" "testing" + "time" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -13,7 +14,7 @@ import ( ) func TestGenesisEncodingJSON(t *testing.T) { - genesis := flow.Genesis(flow.Mainnet) + genesis := unittest.Block.Genesis(flow.Mainnet) genesisID := genesis.ID() data, err := json.Marshal(genesis) require.NoError(t, err) @@ -26,7 +27,7 @@ func TestGenesisEncodingJSON(t *testing.T) { } func TestGenesisDecodingMsgpack(t *testing.T) { - genesis := flow.Genesis(flow.Mainnet) + genesis := unittest.Block.Genesis(flow.Mainnet) genesisID := genesis.ID() data, err := msgpack.Marshal(genesis) require.NoError(t, err) @@ -48,7 +49,20 @@ func TestBlockEncodingJSON(t *testing.T) { require.NoError(t, err) decodedID := decoded.ID() assert.Equal(t, blockID, decodedID) - assert.Equal(t, block, decoded) + assert.Equal(t, block, &decoded) +} + +// TestBlockEncodingJSON_IDField ensures that the explicit ID field added to the +// block when encoded as JSON is present and accurate. +func TestBlockEncodingJSON_IDField(t *testing.T) { + block := unittest.BlockFixture() + blockID := block.ID() + data, err := json.Marshal(block) + require.NoError(t, err) + var decodedIDField struct{ ID flow.Identifier } + err = json.Unmarshal(data, &decodedIDField) + require.NoError(t, err) + assert.Equal(t, blockID, decodedIDField.ID) } func TestBlockEncodingMsgpack(t *testing.T) { @@ -61,11 +75,10 @@ func TestBlockEncodingMsgpack(t *testing.T) { require.NoError(t, err) decodedID := decoded.ID() assert.Equal(t, blockID, decodedID) - assert.Equal(t, block, decoded) + assert.Equal(t, block, &decoded) } func TestNilProducesSameHashAsEmptySlice(t *testing.T) { - nilPayload := flow.Payload{ Guarantees: nil, Seals: nil, @@ -80,7 +93,6 @@ func TestNilProducesSameHashAsEmptySlice(t *testing.T) { } func TestOrderingChangesHash(t *testing.T) { - seals := unittest.Seal.Fixtures(5) payload1 := flow.Payload{ @@ -105,3 +117,259 @@ func TestBlock_Status(t *testing.T) { assert.Equal(t, status.String(), value) } } + +// TestBlockMalleability checks that flow.Block is not malleable: any change in its data +// should result in a different ID. +// Because our NewHeaderBody constructor enforces ParentView < View we use +// WithFieldGenerator to safely pass it. +func TestBlockMalleability(t *testing.T) { + block := unittest.FullBlockFixture() + unittest.RequireEntityNonMalleable( + t, + unittest.FullBlockFixture(), + unittest.WithFieldGenerator("HeaderBody.ParentView", func() uint64 { + return block.View - 1 // ParentView must stay below View, so set it to View-1 + }), + unittest.WithFieldGenerator("Payload.Results", func() flow.ExecutionResultList { + return flow.ExecutionResultList{unittest.ExecutionResultFixture()} + }), + ) +} + +// TestNewBlock verifies the behavior of the NewBlock constructor. +// It ensures proper handling of both valid and invalid untrusted input fields. +// +// Test Cases: +// +// 1. Valid input: +// - Verifies that a properly populated UntrustedBlock results in a valid Block. +// +// 2. Invalid input with invalid HeaderBody: +// - Ensures an error is returned when the HeaderBody.ParentID is flow.ZeroID. +// +// 3. Invalid input with invalid Payload: +// - Ensures an error is returned when the Payload.ProtocolStateID is flow.ZeroID. +func TestNewBlock(t *testing.T) { + t.Run("valid input", func(t *testing.T) { + block := unittest.BlockFixture() + + res, err := flow.NewBlock(flow.UntrustedBlock(*block)) + require.NoError(t, err) + require.NotNil(t, res) + }) + + t.Run("invalid input with invalid header body", func(t *testing.T) { + block := unittest.BlockFixture() + block.ParentID = flow.ZeroID + + res, err := flow.NewBlock(flow.UntrustedBlock(*block)) + require.Error(t, err) + require.Nil(t, res) + require.Contains(t, err.Error(), "invalid header body") + }) + + t.Run("invalid input with invalid payload", func(t *testing.T) { + block := unittest.BlockFixture() + block.Payload.ProtocolStateID = flow.ZeroID + + res, err := flow.NewBlock(flow.UntrustedBlock(*block)) + require.Error(t, err) + require.Nil(t, res) + require.Contains(t, err.Error(), "invalid payload") + }) +} + +// TestNewRootBlock verifies the behavior of the NewRootBlock constructor. +// It ensures proper handling of both valid and invalid untrusted input fields. +// +// Test Cases: +// +// 1. Valid input: +// - Verifies that a properly populated UntrustedBlock results in a valid root Block. +// +// 2. Invalid input with invalid HeaderBody: +// - Ensures an error is returned when the HeaderBody.ParentView is not zero. +// +// 3. Invalid input with invalid Payload: +// - Ensures an error is returned when the Payload.ProtocolStateID is flow.ZeroID. +func TestNewRootBlock(t *testing.T) { + // validRootBlockFixture returns a new valid root flow.UntrustedBlock for use in tests. + validRootBlockFixture := func() flow.UntrustedBlock { + return flow.UntrustedBlock{ + HeaderBody: flow.HeaderBody{ + ChainID: flow.Emulator, + ParentID: unittest.IdentifierFixture(), + Height: 10, + Timestamp: uint64(time.Now().UnixMilli()), + View: 0, + ParentView: 0, + ParentVoterIndices: []byte{}, + ParentVoterSigData: []byte{}, + ProposerID: flow.ZeroID, + LastViewTC: nil, + }, + Payload: unittest.PayloadFixture(), + } + } + + t.Run("valid input", func(t *testing.T) { + res, err := flow.NewRootBlock(validRootBlockFixture()) + require.NoError(t, err) + require.NotNil(t, res) + }) + + t.Run("invalid input with invalid header body", func(t *testing.T) { + block := validRootBlockFixture() + block.ParentView = 1 + + res, err := flow.NewRootBlock(block) + require.Error(t, err) + require.Nil(t, res) + require.Contains(t, err.Error(), "invalid root header body") + }) + + t.Run("invalid input with invalid payload", func(t *testing.T) { + block := validRootBlockFixture() + block.Payload.ProtocolStateID = flow.ZeroID + + res, err := flow.NewRootBlock(block) + require.Error(t, err) + require.Nil(t, res) + require.Contains(t, err.Error(), "invalid payload") + }) +} + +// TestNewProposal verifies the behavior of the NewProposal constructor. +// It ensures proper handling of both valid and invalid input fields. +// +// Test Cases: +// +// 1. Valid input: +// - Verifies that a properly populated UntrustedProposal results in a valid Proposal. +// +// 2. Invalid input with invalid Block: +// - Ensures an error is returned when the Block.ParentID is flow.ZeroID. +// +// 3. Invalid input with nil ProposerSigData: +// - Ensures an error is returned when the ProposerSigData is nil. +// +// 4. Invalid input with empty ProposerSigData: +// - Ensures an error is returned when the ProposerSigData is an empty byte slice. +func TestNewProposal(t *testing.T) { + t.Run("valid input", func(t *testing.T) { + res, err := flow.NewProposal(flow.UntrustedProposal(*unittest.ProposalFixture())) + require.NoError(t, err) + require.NotNil(t, res) + }) + + t.Run("invalid input with invalid block", func(t *testing.T) { + untrustedProposal := flow.UntrustedProposal(*unittest.ProposalFixture()) + untrustedProposal.Block.ParentID = flow.ZeroID + + res, err := flow.NewProposal(untrustedProposal) + require.Error(t, err) + require.Nil(t, res) + require.Contains(t, err.Error(), "invalid block") + }) + + t.Run("invalid input with nil ProposerSigData", func(t *testing.T) { + untrustedProposal := flow.UntrustedProposal(*unittest.ProposalFixture()) + untrustedProposal.ProposerSigData = nil + + res, err := flow.NewProposal(untrustedProposal) + require.Error(t, err) + require.Nil(t, res) + require.Contains(t, err.Error(), "proposer signature must not be empty") + }) + + t.Run("invalid input with empty ProposerSigData", func(t *testing.T) { + untrustedProposal := flow.UntrustedProposal(*unittest.ProposalFixture()) + untrustedProposal.ProposerSigData = []byte{} + + res, err := flow.NewProposal(untrustedProposal) + require.Error(t, err) + require.Nil(t, res) + require.Contains(t, err.Error(), "proposer signature must not be empty") + }) +} + +// TestNewRootProposal verifies the behavior of the NewRootProposal constructor. +// It ensures proper handling of both valid and invalid untrusted input fields. +// +// Test Cases: +// +// 1. Valid input with nil ProposerSigData: +// - Verifies that a root proposal with nil ProposerSigData is accepted. +// +// 2. Valid input with empty ProposerSigData: +// - Verifies that an empty (but non-nil) ProposerSigData is also accepted, +// since root proposals must not include a signature. +// +// 3. Invalid input with invalid Block: +// - Ensures an error is returned if the Block.ParentView is non-zero, which is disallowed for root blocks. +// +// 4. Invalid input with non-empty ProposerSigData: +// - Ensures an error is returned when a ProposerSigData is included, as this is not permitted for root proposals. +func TestNewRootProposal(t *testing.T) { + // validRootProposalFixture returns a new valid root flow.UntrustedProposal for use in tests. + validRootProposalFixture := func() flow.UntrustedProposal { + block, err := flow.NewRootBlock(flow.UntrustedBlock{ + HeaderBody: flow.HeaderBody{ + ChainID: flow.Emulator, + ParentID: unittest.IdentifierFixture(), + Height: 10, + Timestamp: uint64(time.Now().UnixMilli()), + View: 0, + ParentView: 0, + ParentVoterIndices: []byte{}, + ParentVoterSigData: []byte{}, + ProposerID: flow.ZeroID, + LastViewTC: nil, + }, + Payload: unittest.PayloadFixture(), + }) + if err != nil { + panic(err) + } + return flow.UntrustedProposal{ + Block: *block, + ProposerSigData: nil, + } + } + + t.Run("valid input with nil ProposerSigData", func(t *testing.T) { + res, err := flow.NewRootProposal(validRootProposalFixture()) + + require.NoError(t, err) + require.NotNil(t, res) + }) + + t.Run("valid input with empty ProposerSigData", func(t *testing.T) { + untrustedProposal := validRootProposalFixture() + untrustedProposal.ProposerSigData = []byte{} + + res, err := flow.NewRootProposal(untrustedProposal) + require.NoError(t, err) + require.NotNil(t, res) + }) + + t.Run("invalid input with invalid block", func(t *testing.T) { + untrustedProposal := validRootProposalFixture() + untrustedProposal.Block.ParentView = 1 + + res, err := flow.NewRootProposal(untrustedProposal) + require.Error(t, err) + require.Nil(t, res) + require.Contains(t, err.Error(), "invalid root block") + }) + + t.Run("invalid input with non-empty proposer signature", func(t *testing.T) { + untrustedProposal := validRootProposalFixture() + untrustedProposal.ProposerSigData = unittest.SignatureFixture() + + res, err := flow.NewRootProposal(untrustedProposal) + require.Error(t, err) + require.Nil(t, res) + require.Contains(t, err.Error(), "proposer signature must be empty") + }) +} diff --git a/model/flow/chain.go b/model/flow/chain.go index 32ceb62467d..1aeed8313e2 100644 --- a/model/flow/chain.go +++ b/model/flow/chain.go @@ -12,6 +12,7 @@ import ( // // Chain IDs are used used to prevent replay attacks and to support network-specific address generation. type ChainID string +type ChainIDList []ChainID const ( // Mainnet is the chain ID for the mainnet chain. @@ -23,6 +24,8 @@ const ( Testnet ChainID = "flow-testnet" // Sandboxnet is the chain ID for internal sandboxnet chain. Sandboxnet ChainID = "flow-sandboxnet" + // Previewet is the chain ID for an external preview chain. + Previewnet ChainID = "flow-previewnet" // Transient test networks @@ -39,9 +42,24 @@ const ( MonotonicEmulator ChainID = "flow-emulator-monotonic" ) +// AllChainIDs returns a list of all supported chain IDs. +func AllChainIDs() ChainIDList { + return ChainIDList{ + Mainnet, + Testnet, + Sandboxnet, + Previewnet, + Benchnet, + Localnet, + Emulator, + BftTestnet, + MonotonicEmulator, + } +} + // Transient returns whether the chain ID is for a transient network. func (c ChainID) Transient() bool { - return c == Emulator || c == Localnet || c == Benchnet || c == BftTestnet + return c == Emulator || c == Localnet || c == Benchnet || c == BftTestnet || c == Previewnet } // getChainCodeWord derives the network type used for address generation from the globally @@ -54,6 +72,8 @@ func (c ChainID) getChainCodeWord() uint64 { return invalidCodeTestNetwork case Sandboxnet: return invalidCodeSandboxNetwork + case Previewnet: + return invalidCodePreviewNetwork case Emulator, Localnet, Benchnet, BftTestnet: return invalidCodeTransientNetwork default: @@ -62,7 +82,7 @@ func (c ChainID) getChainCodeWord() uint64 { } type chainImpl interface { - newAddressGeneratorAtIndex(index uint64) AddressGenerator + NewAddressGeneratorAtIndex(index uint64) AddressGenerator // IsValid returns true if a given address is a valid account address on a given chain, // and false otherwise. // @@ -82,7 +102,7 @@ type chainImpl interface { // where addresses are simply the index of the account. type monotonicImpl struct{} -func (m *monotonicImpl) newAddressGeneratorAtIndex(index uint64) AddressGenerator { +func (m *monotonicImpl) NewAddressGeneratorAtIndex(index uint64) AddressGenerator { return &MonotonicAddressGenerator{ index: index, } @@ -111,7 +131,7 @@ type linearCodeImpl struct { chainID ChainID } -func (l *linearCodeImpl) newAddressGeneratorAtIndex(index uint64) AddressGenerator { +func (l *linearCodeImpl) NewAddressGeneratorAtIndex(index uint64) AddressGenerator { return &linearCodeAddressGenerator{ index: index, chainCodeWord: l.chainID.getChainCodeWord(), @@ -181,6 +201,12 @@ var sandboxnet = &addressedChain{ }, } +var previewnet = &addressedChain{ + chainImpl: &linearCodeImpl{ + chainID: Previewnet, + }, +} + var benchnet = &addressedChain{ chainImpl: &linearCodeImpl{ chainID: Benchnet, @@ -212,6 +238,8 @@ func (c ChainID) Chain() Chain { return testnet case Sandboxnet: return sandboxnet + case Previewnet: + return previewnet case Benchnet: return benchnet case Localnet: @@ -234,6 +262,7 @@ func (c ChainID) String() string { // Chain is the interface for address generation implementations. type Chain interface { NewAddressGenerator() AddressGenerator + NewAddressGeneratorAtIndex(index uint64) AddressGenerator AddressAtIndex(index uint64) (Address, error) ServiceAddress() Address BytesToAddressGenerator(b []byte) AddressGenerator @@ -243,13 +272,12 @@ type Chain interface { ChainID() ChainID // required for tests zeroAddress() Address - newAddressGeneratorAtIndex(index uint64) AddressGenerator } // NewAddressGenerator returns a new AddressGenerator with an // initialized index. func (id *addressedChain) NewAddressGenerator() AddressGenerator { - return id.newAddressGeneratorAtIndex(0) + return id.NewAddressGeneratorAtIndex(0) } // AddressAtIndex returns the index-th generated account address. @@ -257,7 +285,7 @@ func (id *addressedChain) AddressAtIndex(index uint64) (Address, error) { if index > maxIndex { return EmptyAddress, fmt.Errorf("index must be less or equal to %x", maxIndex) } - return id.newAddressGeneratorAtIndex(index).CurrentAddress(), nil + return id.NewAddressGeneratorAtIndex(index).CurrentAddress(), nil } // ServiceAddress returns the root (first) generated account address. @@ -279,7 +307,7 @@ func (id *addressedChain) BytesToAddressGenerator(b []byte) AddressGenerator { bytes := slices.EnsureByteSliceSize(b, addressIndexLength) index := uint48(bytes[:]) - return id.newAddressGeneratorAtIndex(index) + return id.NewAddressGeneratorAtIndex(index) } // ChainID returns the chain ID of the chain. diff --git a/model/flow/chunk.go b/model/flow/chunk.go index 03ea37ca105..f54caf754e5 100644 --- a/model/flow/chunk.go +++ b/model/flow/chunk.go @@ -1,32 +1,53 @@ package flow -type ChunkBody struct { - // Block id of the execution result this chunk belongs to - BlockID Identifier - - CollectionIndex uint +import ( + "fmt" + "io" + "log" - // start state when starting executing this chunk - StartState StateCommitment + "github.com/ipfs/go-cid" + "github.com/onflow/go-ethereum/rlp" + "github.com/vmihailenco/msgpack/v4" +) - // - // execution info - // +var EmptyEventCollectionID Identifier - // number of transactions inside the collection - NumberOfTransactions uint64 +func init() { + // Convert hexadecimal string to a byte slice. + var err error + emptyEventCollectionHex := "0e5751c026e543b2e8ab2eb06099daa1d1e5df47778f7787faab45cdf12fe3a8" + EmptyEventCollectionID, err = HexStringToIdentifier(emptyEventCollectionHex) + if err != nil { + log.Fatalf("Failed to decode hex: %v", err) + } +} - // Events generated by executing results - EventCollection Identifier +type ChunkBody struct { + CollectionIndex uint + // execution info + StartState StateCommitment // start state when starting executing this chunk + EventCollection Identifier // Events generated by executing results + // ServiceEventCount defines how many service events were emitted in this chunk. + // By reading these fields from the prior chunks in the same ExecutionResult, we can + // compute exactly what service events were emitted in this chunk. // - // Computation consumption info + // Let C be this chunk, K be the set of chunks in the ExecutionResult containing C. + // Then the service event indices for C are given by: + // StartIndex = ∑Ci.ServiceEventCount : Ci ∈ K, Ci.Index < C.Index + // EndIndex = StartIndex + C.ServiceEventCount + // The service events for C are given by: + // ExecutionResult.ServiceEvents[StartIndex:EndIndex] // + ServiceEventCount uint16 + BlockID Identifier // Block id of the execution result this chunk belongs to - // total amount of computation used by running all txs in this chunk - TotalComputationUsed uint64 + // Computation consumption info + TotalComputationUsed uint64 // total amount of computation used by running all txs in this chunk + NumberOfTransactions uint64 // number of transactions inside the collection } +//structwrite:immutable - mutations allowed only within the constructor type Chunk struct { ChunkBody @@ -35,74 +56,155 @@ type Chunk struct { EndState StateCommitment } -func NewChunk( - blockID Identifier, - collectionIndex int, - startState StateCommitment, - numberOfTransactions int, - eventCollection Identifier, - endState StateCommitment, +// UntrustedChunk is an untrusted input-only representation of an Chunk, +// used for construction. +// +// This type exists to ensure that constructor functions are invoked explicitly +// with named fields, which improves clarity and reduces the risk of incorrect field +// ordering during construction. +// +// An instance of UntrustedChunk should be validated and converted into +// a trusted Chunk using NewChunk constructor. +type UntrustedChunk Chunk + +// NewChunk returns a Chunk compliant with Protocol Version 2 and later. +// Construction Chunk allowed only within the constructor. +// +// All errors indicate a valid Chunk cannot be constructed from the input. +func NewChunk(untrusted UntrustedChunk) (*Chunk, error) { + if untrusted.BlockID == ZeroID { + return nil, fmt.Errorf("BlockID must not be empty") + } + + if untrusted.StartState == (StateCommitment{}) { + return nil, fmt.Errorf("StartState must not be zero-value") + } + + if untrusted.EventCollection == ZeroID { + return nil, fmt.Errorf("EventCollection must not be empty") + } + + if untrusted.EndState == (StateCommitment{}) { + return nil, fmt.Errorf("EndState must not be zero-value") + } + + return &Chunk{ + ChunkBody: ChunkBody{ + BlockID: untrusted.BlockID, + CollectionIndex: untrusted.CollectionIndex, + StartState: untrusted.StartState, + NumberOfTransactions: untrusted.NumberOfTransactions, + EventCollection: untrusted.EventCollection, + ServiceEventCount: untrusted.ServiceEventCount, + TotalComputationUsed: untrusted.TotalComputationUsed, + }, + Index: untrusted.Index, + EndState: untrusted.EndState, + }, nil +} + +// NewRootChunk creates a chunk whose final state is the given commit, with all other fields set to zero. +// This is a special kind of chunk used only as the sole chunk of a root execution result, which forms +// a part of the root protocol state snapshot used as the trusted root for a spork. +func NewRootChunk( + commit StateCommitment, ) *Chunk { return &Chunk{ ChunkBody: ChunkBody{ - BlockID: blockID, - CollectionIndex: uint(collectionIndex), - StartState: startState, - NumberOfTransactions: uint64(numberOfTransactions), - EventCollection: eventCollection, - TotalComputationUsed: 0, // TODO: record gas used + BlockID: Identifier{}, + CollectionIndex: 0, + StartState: StateCommitment{}, + EventCollection: Identifier{}, + ServiceEventCount: 0, + TotalComputationUsed: 0, + NumberOfTransactions: 0, }, - Index: uint64(collectionIndex), - EndState: endState, + Index: 0, + EndState: commit, } } -// ID returns a unique id for this entity +// ID returns the unique identifier of the Chunk func (ch *Chunk) ID() Identifier { - return MakeID(ch.ChunkBody) -} - -// Checksum provides a cryptographic commitment for a chunk content -func (ch *Chunk) Checksum() Identifier { return MakeID(ch) } // ChunkDataPack holds all register touches (any read, or write). // -// Note that we have to capture a read proof for each write before updating the registers. -// `Proof` includes proofs for all registers read to execute the chunck. +// Note that we have to include merkle paths as storage proof for all registers touched (read or written) for +// the _starting_ state of the chunk (i.e. before the chunk computation updates the registers). +// For instance, if an execution state contains three registers: { A: 1, B: 2, C: 3}, and a certain +// chunk has a tx that assigns A = A + B, then its chunk data pack should include the merkle +// paths for { A: 1, B: 2 } as storage proof. +// C is not included because it's neither read or written by the chunk. +// B is included because it's read by the chunk. +// A is included because it's updated by the chunk, and its value 1 is included because it's +// the value before the chunk computation. +// This is necessary for Verification Nodes to (i) check that the read register values are +// consistent with the starting state's root hash and (ii) verify the correctness of the resulting +// state after the chunk computation. `Proof` includes merkle proofs for all touched registers +// during the execution of the chunk. // Register proofs order must not be correlated to the order of register reads during // the chunk execution in order to enforce the SPoCK secret high entropy. type ChunkDataPack struct { - ChunkID Identifier - StartState StateCommitment - Proof StorageProof - Collection *Collection + ChunkID Identifier // ID of the chunk this data pack is for + StartState StateCommitment // commitment for starting state + Proof StorageProof // proof for all registers touched (read or written) during the chunk execution + Collection *Collection // collection executed in this chunk + + // ExecutionDataRoot is the root data structure of an execution_data.BlockExecutionData. + // It contains the necessary information for a verification node to validate that the + // BlockExecutionData produced is valid. + ExecutionDataRoot BlockExecutionDataRoot } +// UntrustedChunkDataPack is an untrusted input-only representation of an ChunkDataPack, +// used for construction. +// +// This type exists to ensure that constructor functions are invoked explicitly +// with named fields, which improves clarity and reduces the risk of incorrect field +// ordering during construction. +// +// An instance of UntrustedChunkDataPack should be validated and converted into +// a trusted ChunkDataPack using NewChunkDataPack constructor. +type UntrustedChunkDataPack ChunkDataPack + // NewChunkDataPack returns an initialized chunk data pack. -func NewChunkDataPack( - chunkID Identifier, - startState StateCommitment, - proof StorageProof, - collection *Collection, -) *ChunkDataPack { - return &ChunkDataPack{ - ChunkID: chunkID, - StartState: startState, - Proof: proof, - Collection: collection, +// Construction ChunkDataPack allowed only within the constructor. +// +// All errors indicate a valid ChunkDataPack cannot be constructed from the input. +func NewChunkDataPack(untrusted UntrustedChunkDataPack) (*ChunkDataPack, error) { + if untrusted.ChunkID == ZeroID { + return nil, fmt.Errorf("ChunkID must not be empty") } -} -// ID returns the unique identifier for the concrete view, which is the ID of -// the chunk the view is for. -func (c *ChunkDataPack) ID() Identifier { - return c.ChunkID + if untrusted.StartState == (StateCommitment{}) { + return nil, fmt.Errorf("StartState must not be zero-value") + } + + if len(untrusted.Proof) == 0 { + return nil, fmt.Errorf("Proof must not be empty") + } + + if untrusted.ExecutionDataRoot.BlockID == ZeroID { + return nil, fmt.Errorf("ExecutionDataRoot.BlockID must not be empty") + } + + if len(untrusted.ExecutionDataRoot.ChunkExecutionDataIDs) == 0 { + return nil, fmt.Errorf("ExecutionDataRoot.ChunkExecutionDataIDs must not be empty") + } + + return &ChunkDataPack{ + ChunkID: untrusted.ChunkID, + StartState: untrusted.StartState, + Proof: untrusted.Proof, + Collection: untrusted.Collection, + ExecutionDataRoot: untrusted.ExecutionDataRoot, + }, nil } -// Checksum returns the checksum of the chunk data pack. -func (c *ChunkDataPack) Checksum() Identifier { +// ID returns a collision-resistant hash of the ChunkDataPack struct. +func (c *ChunkDataPack) ID() Identifier { return MakeID(c) } @@ -135,16 +237,6 @@ func (cl ChunkList) Indices() []uint64 { return indices } -// ByChecksum returns an entity from the list by entity fingerprint -func (cl ChunkList) ByChecksum(cs Identifier) (*Chunk, bool) { - for _, ch := range cl { - if ch.Checksum() == cs { - return ch, true - } - } - return nil, false -} - // ByIndex returns an entity from the list by index // if requested chunk is within range of list, it returns chunk and true // if requested chunk is out of the range, it returns nil and false @@ -162,3 +254,131 @@ func (cl ChunkList) ByIndex(i uint64) (*Chunk, bool) { func (cl ChunkList) Len() int { return len(cl) } + +// BlockExecutionDataRoot represents the root of a serialized execution_data.BlockExecutionData. +// The hash of the serialized BlockExecutionDataRoot is the ExecutionDataID used within an +// flow.ExecutionResult. +// Context: +// - The trie updates in BlockExecutionDataRoot contain the _mutated_ registers only, which is +// helpful for clients to truslessly replicate the state. +// - In comparison, the chunk data packs contains all the register values at the chunk's starting +// state that were _touched_ (written and/or read). This is necessary for Verification Nodes to +// re-run the chunk the computation. +type BlockExecutionDataRoot struct { + // BlockID is the ID of the block, whose result this execution data is for. + BlockID Identifier + + // ChunkExecutionDataIDs is a list of the root CIDs for each serialized execution_data.ChunkExecutionData + // associated with this block. + ChunkExecutionDataIDs []cid.Cid +} + +// EncodeRLP defines an RLP encoding BlockExecutionDataRoot. We need to define a custom RLP encoding since [cid.Cid] doesn't have one. Without it we can't produce a collision-resistant hash. +// No errors are expected during normal operations. +func (b BlockExecutionDataRoot) EncodeRLP(w io.Writer) error { + encodingCanonicalForm := struct { + BlockID Identifier + ChunkExecutionDataIDs []string + }{ + BlockID: b.BlockID, + ChunkExecutionDataIDs: cidsToStrings(b.ChunkExecutionDataIDs), + } + + return rlp.Encode(w, encodingCanonicalForm) +} + +// MarshalMsgpack implements the msgpack.Marshaler interface +func (b BlockExecutionDataRoot) MarshalMsgpack() ([]byte, error) { + return msgpack.Marshal(struct { + BlockID Identifier + ChunkExecutionDataIDs []string + }{ + BlockID: b.BlockID, + ChunkExecutionDataIDs: cidsToStrings(b.ChunkExecutionDataIDs), + }) +} + +// UnmarshalMsgpack implements the msgpack.Unmarshaler interface +func (b *BlockExecutionDataRoot) UnmarshalMsgpack(data []byte) error { + var temp struct { + BlockID Identifier + ChunkExecutionDataIDs []string + } + + if err := msgpack.Unmarshal(data, &temp); err != nil { + return err + } + + b.BlockID = temp.BlockID + cids, err := stringsToCids(temp.ChunkExecutionDataIDs) + + if err != nil { + return fmt.Errorf("failed to decode chunk execution data ids: %w", err) + } + + b.ChunkExecutionDataIDs = cids + + return nil +} + +// ChunkDataRequest represents a request for the chunk data pack +// which is specified by a chunk ID. +type ChunkDataRequest struct { + ChunkID Identifier + Nonce uint64 +} + +// ChunkDataResponse is the structurally validated response to a chunk data pack request. +// It contains the chunk data pack of the interest. +type ChunkDataResponse struct { + ChunkDataPack ChunkDataPack + Nonce uint64 +} + +// Helper function to convert a slice of cid.Cid to a slice of strings +func cidsToStrings(cids []cid.Cid) []string { + if cids == nil { + return nil + } + strs := make([]string, len(cids)) + for i, c := range cids { + strs[i] = c.String() + } + return strs +} + +// Helper function to convert a slice of strings to a slice of cid.Cid +func stringsToCids(strs []string) ([]cid.Cid, error) { + if strs == nil { + return nil, nil + } + cids := make([]cid.Cid, len(strs)) + for i, s := range strs { + c, err := cid.Decode(s) + if err != nil { + return nil, fmt.Errorf("failed to decode cid %v: %w", s, err) + } + cids[i] = c + } + return cids, nil +} + +// Equals returns true if and only if receiver BlockExecutionDataRoot is equal to the `other`. +func (b BlockExecutionDataRoot) Equals(other BlockExecutionDataRoot) bool { + // Compare BlockID fields + if b.BlockID != other.BlockID { + return false + } + + // Compare ChunkExecutionDataIDs slices + if len(b.ChunkExecutionDataIDs) != len(other.ChunkExecutionDataIDs) { + return false + } + for i, cid := range b.ChunkExecutionDataIDs { + if !cid.Equals(other.ChunkExecutionDataIDs[i]) { + return false + } + } + + return true +} diff --git a/model/flow/chunk_test.go b/model/flow/chunk_test.go index 1fe256b4ed0..2b472fd96e0 100644 --- a/model/flow/chunk_test.go +++ b/model/flow/chunk_test.go @@ -3,9 +3,12 @@ package flow_test import ( "testing" + "github.com/ipfs/go-cid" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/utils/rand" "github.com/onflow/flow-go/utils/unittest" ) @@ -24,61 +27,9 @@ func TestChunkList_ByIndex(t *testing.T) { require.True(t, ok) } -// TestDistinctChunkIDs_EmptyChunks evaluates that two empty chunks -// with the distinct block ids would have distinct chunk ids. -func TestDistinctChunkIDs_EmptyChunks(t *testing.T) { - // generates two random block ids and requires them - // being distinct - blockIdA := unittest.IdentifierFixture() - blockIdB := unittest.IdentifierFixture() - require.NotEqual(t, blockIdA, blockIdB) - - // generates a chunk associated with each block id - chunkA := &flow.Chunk{ - ChunkBody: flow.ChunkBody{ - BlockID: blockIdA, - }, - } - - chunkB := &flow.Chunk{ - ChunkBody: flow.ChunkBody{ - BlockID: blockIdB, - }, - } - - require.NotEqual(t, chunkA.ID(), chunkB.ID()) -} - -// TestDistinctChunkIDs_FullChunks evaluates that two full chunks -// with completely identical fields but distinct block ids have -// distinct chunk ids. -func TestDistinctChunkIDs_FullChunks(t *testing.T) { - // generates two random block ids and requires them - // being distinct - blockIdA := unittest.IdentifierFixture() - blockIdB := unittest.IdentifierFixture() - require.NotEqual(t, blockIdA, blockIdB) - - // generates a chunk associated with blockA - chunkA := unittest.ChunkFixture(blockIdA, 42) - - // generates a deep copy of chunkA in chunkB - chunkB := *chunkA - - // since chunkB is a deep copy of chunkA their - // chunk ids should be the same - require.Equal(t, chunkA.ID(), chunkB.ID()) - - // changes block id in chunkB - chunkB.BlockID = blockIdB - - // chunks with distinct block ids should have distinct chunk ids - require.NotEqual(t, chunkA.ID(), chunkB.ID()) -} - // TestChunkList_Indices evaluates the Indices method of ChunkList on lists of different sizes. func TestChunkList_Indices(t *testing.T) { - cl := unittest.ChunkListFixture(5, unittest.IdentifierFixture()) + cl := unittest.ChunkListFixture(5, unittest.IdentifierFixture(), unittest.StateCommitmentFixture()) t.Run("empty chunk subset indices", func(t *testing.T) { // subset of chunk list that is empty should return an empty list subset := flow.ChunkList{} @@ -104,3 +55,305 @@ func TestChunkList_Indices(t *testing.T) { require.Contains(t, indices, uint64(0), uint64(2), uint64(4)) }) } + +func TestChunkIndexIsSet(t *testing.T) { + i, err := rand.Uint() + require.NoError(t, err) + + chunk, err := flow.NewChunk(flow.UntrustedChunk{ + ChunkBody: flow.ChunkBody{ + CollectionIndex: i, + StartState: unittest.StateCommitmentFixture(), + EventCollection: unittest.IdentifierFixture(), + ServiceEventCount: 0, + BlockID: unittest.IdentifierFixture(), + TotalComputationUsed: 17995, + NumberOfTransactions: uint64(21), + }, + Index: uint64(i), + EndState: unittest.StateCommitmentFixture(), + }) + + require.NoError(t, err) + assert.Equal(t, i, uint(chunk.Index)) + assert.Equal(t, i, uint(chunk.CollectionIndex)) +} + +func TestChunkNumberOfTxsIsSet(t *testing.T) { + i, err := rand.Uint32() + require.NoError(t, err) + + chunk, err := flow.NewChunk(flow.UntrustedChunk{ + ChunkBody: flow.ChunkBody{ + CollectionIndex: 3, + StartState: unittest.StateCommitmentFixture(), + EventCollection: unittest.IdentifierFixture(), + ServiceEventCount: 0, + BlockID: unittest.IdentifierFixture(), + TotalComputationUsed: 17995, + NumberOfTransactions: uint64(i), + }, + Index: 3, + EndState: unittest.StateCommitmentFixture(), + }) + + require.NoError(t, err) + assert.Equal(t, i, uint32(chunk.NumberOfTransactions)) +} + +func TestChunkTotalComputationUsedIsSet(t *testing.T) { + i, err := rand.Uint64() + require.NoError(t, err) + + chunk, err := flow.NewChunk(flow.UntrustedChunk{ + ChunkBody: flow.ChunkBody{ + CollectionIndex: 3, + StartState: unittest.StateCommitmentFixture(), + EventCollection: unittest.IdentifierFixture(), + ServiceEventCount: 0, + BlockID: unittest.IdentifierFixture(), + TotalComputationUsed: i, + NumberOfTransactions: uint64(21), + }, + Index: 3, + EndState: unittest.StateCommitmentFixture(), + }) + + require.NoError(t, err) + assert.Equal(t, i, chunk.TotalComputationUsed) +} + +// TestChunkMalleability performs sanity checks to ensure that chunk is not malleable. +func TestChunkMalleability(t *testing.T) { + t.Run("Chunk with non-nil ServiceEventCount", func(t *testing.T) { + unittest.RequireEntityNonMalleable(t, unittest.ChunkFixture(unittest.IdentifierFixture(), 0, unittest.StateCommitmentFixture())) + }) +} + +// TestChunkDataPackMalleability performs sanity checks to ensure that ChunkDataPack is not malleable. +func TestChunkDataPackMalleability(t *testing.T) { + unittest.RequireEntityNonMalleable( + t, + unittest.ChunkDataPackFixture(unittest.IdentifierFixture()), + unittest.WithTypeGenerator[cid.Cid](func() cid.Cid { + return flow.IdToCid(unittest.IdentifierFixture()) + }), + ) +} + +// TestNewChunkDataPack verifies the behavior of the NewChunkDataPack constructor. +// It ensures that a fully‐populated UntrustedChunkDataPack yields a valid ChunkDataPack, +// and that missing or invalid required fields produce an error. +// +// Test Cases: +// +// 1. Valid input: +// - Ensures a ChunkDataPack is returned when all fields are populated. +// +// 2. Missing ChunkID: +// - Ensures an error is returned when ChunkID is ZeroID. +// +// 3. Zero StartState: +// - Ensures an error is returned when StartState is zero-value. +// +// 4. Nil Proof: +// - Ensures an error is returned when Proof is nil. +// +// 5. Empty Proof: +// - Ensures an error is returned when Proof is empty. +// +// 6. Nil Collection: +// - Ensures an error is returned when Collection is nil. +// +// 7. Missing ExecutionDataRoot.BlockID: +// - Ensures an error is returned when ExecutionDataRoot.BlockID is ZeroID. +// +// 8. Nil ExecutionDataRoot.ChunkExecutionDataIDs: +// - Ensures an error is returned when ChunkExecutionDataIDs is nil. +// +// 9. Empty ExecutionDataRoot.ChunkExecutionDataIDs: +// - Ensures an error is returned when ChunkExecutionDataIDs is empty. +func TestNewChunkDataPack(t *testing.T) { + chunkID := unittest.IdentifierFixture() + startState := unittest.StateCommitmentFixture() + proof := []byte{0x1, 0x2} + collection := unittest.CollectionFixture(1) + root := flow.BlockExecutionDataRoot{ + BlockID: unittest.IdentifierFixture(), + ChunkExecutionDataIDs: []cid.Cid{flow.IdToCid(unittest.IdentifierFixture())}, + } + + baseChunkDataPack := flow.UntrustedChunkDataPack{ + ChunkID: chunkID, + StartState: startState, + Proof: proof, + Collection: &collection, + ExecutionDataRoot: root, + } + + t.Run("valid chunk data pack", func(t *testing.T) { + pack, err := flow.NewChunkDataPack(baseChunkDataPack) + assert.NoError(t, err) + assert.NotNil(t, pack) + assert.Equal(t, *pack, flow.ChunkDataPack(baseChunkDataPack)) + }) + + t.Run("missing ChunkID", func(t *testing.T) { + untrusted := baseChunkDataPack + untrusted.ChunkID = flow.ZeroID + + pack, err := flow.NewChunkDataPack(untrusted) + assert.Error(t, err) + assert.Nil(t, pack) + assert.Contains(t, err.Error(), "ChunkID") + }) + + t.Run("zero StartState", func(t *testing.T) { + untrusted := baseChunkDataPack + untrusted.StartState = flow.StateCommitment{} + + pack, err := flow.NewChunkDataPack(untrusted) + assert.Error(t, err) + assert.Nil(t, pack) + assert.Contains(t, err.Error(), "StartState") + }) + + t.Run("nil Proof", func(t *testing.T) { + untrusted := baseChunkDataPack + untrusted.Proof = nil + + pack, err := flow.NewChunkDataPack(untrusted) + assert.Error(t, err) + assert.Nil(t, pack) + assert.Contains(t, err.Error(), "Proof") + }) + + t.Run("empty Proof", func(t *testing.T) { + untrusted := baseChunkDataPack + untrusted.Proof = []byte{} + + pack, err := flow.NewChunkDataPack(untrusted) + assert.Error(t, err) + assert.Nil(t, pack) + assert.Contains(t, err.Error(), "Proof") + }) + + t.Run("missing ExecutionDataRoot.BlockID", func(t *testing.T) { + untrusted := baseChunkDataPack + untrusted.ExecutionDataRoot.BlockID = flow.ZeroID + + pack, err := flow.NewChunkDataPack(untrusted) + assert.Error(t, err) + assert.Nil(t, pack) + assert.Contains(t, err.Error(), "ExecutionDataRoot.BlockID") + }) + + t.Run("nil ExecutionDataRoot.ChunkExecutionDataIDs", func(t *testing.T) { + untrusted := baseChunkDataPack + untrusted.ExecutionDataRoot.ChunkExecutionDataIDs = nil + + pack, err := flow.NewChunkDataPack(untrusted) + assert.Error(t, err) + assert.Nil(t, pack) + assert.Contains(t, err.Error(), "ExecutionDataRoot.ChunkExecutionDataIDs") + }) + + t.Run("empty ExecutionDataRoot.ChunkExecutionDataIDs", func(t *testing.T) { + untrusted := baseChunkDataPack + untrusted.ExecutionDataRoot.ChunkExecutionDataIDs = []cid.Cid{} + + pack, err := flow.NewChunkDataPack(untrusted) + assert.Error(t, err) + assert.Nil(t, pack) + assert.Contains(t, err.Error(), "ExecutionDataRoot.ChunkExecutionDataIDs") + }) +} + +// TestNewChunk verifies that NewChunk constructs a valid Chunk when given +// complete, nonzero fields, and returns an error if any required field is +// missing or zero. +// +// Test Cases: +// +// 1. Valid input: +// - Ensures a Chunk is returned when all fields are populated. +// +// 2. Missing BlockID: +// - Ensures an error is returned when BlockID is ZeroID. +// +// 3. Zero StartState: +// - Ensures an error is returned when StartState is zero-value. +// +// 4. Nil ServiceEventCount: +// - Ensures an error is returned when ServiceEventCount is nil. +// +// 5. Missing EventCollection: +// - Ensures an error is returned when EventCollection is ZeroID. +// +// 6. Zero EndState: +// - Ensures an error is returned when EndState is zero-value. +func TestNewChunk(t *testing.T) { + validID := unittest.IdentifierFixture() + validState := unittest.StateCommitmentFixture() + + base := flow.UntrustedChunk{ + ChunkBody: flow.ChunkBody{ + BlockID: validID, + CollectionIndex: 3, + StartState: validState, + EventCollection: validID, + ServiceEventCount: 2, + TotalComputationUsed: 10, + NumberOfTransactions: 5, + }, + Index: 1, + EndState: validState, + } + + t.Run("valid chunk", func(t *testing.T) { + ch, err := flow.NewChunk(base) + assert.NoError(t, err) + assert.NotNil(t, ch) + assert.Equal(t, *ch, flow.Chunk(base)) + }) + + t.Run("missing BlockID", func(t *testing.T) { + u := base + u.ChunkBody.BlockID = flow.ZeroID + + ch, err := flow.NewChunk(u) + assert.Error(t, err) + assert.Nil(t, ch) + assert.Contains(t, err.Error(), "BlockID") + }) + + t.Run("zero StartState", func(t *testing.T) { + u := base + u.ChunkBody.StartState = flow.StateCommitment{} + + ch, err := flow.NewChunk(u) + assert.Error(t, err) + assert.Nil(t, ch) + assert.Contains(t, err.Error(), "StartState") + }) + + t.Run("missing EventCollection", func(t *testing.T) { + u := base + u.ChunkBody.EventCollection = flow.ZeroID + + ch, err := flow.NewChunk(u) + assert.Error(t, err) + assert.Nil(t, ch) + assert.Contains(t, err.Error(), "EventCollection") + }) + + t.Run("zero EndState", func(t *testing.T) { + u := base + u.EndState = flow.StateCommitment{} + + ch, err := flow.NewChunk(u) + assert.Error(t, err) + assert.Nil(t, ch) + assert.Contains(t, err.Error(), "EndState") + }) +} diff --git a/model/flow/cluster.go b/model/flow/cluster.go index 9e4eb289ff6..7696d834184 100644 --- a/model/flow/cluster.go +++ b/model/flow/cluster.go @@ -11,7 +11,7 @@ type AssignmentList []IdentifierList // ClusterList is a list of identity lists. Each `IdentityList` represents the // nodes assigned to a specific cluster. -type ClusterList []IdentityList +type ClusterList []IdentitySkeletonList func (al AssignmentList) EqualTo(other AssignmentList) bool { if len(al) != len(other) { @@ -45,10 +45,10 @@ func (cl ClusterList) Assignments() AssignmentList { // NewClusterList creates a new cluster list based on the given cluster assignment // and the provided list of identities. -func NewClusterList(assignments AssignmentList, collectors IdentityList) (ClusterList, error) { +func NewClusterList(assignments AssignmentList, collectors IdentitySkeletonList) (ClusterList, error) { // build a lookup for all the identities by node identifier - lookup := make(map[Identifier]*Identity) + lookup := make(map[Identifier]*IdentitySkeleton) for _, collector := range collectors { _, ok := lookup[collector.NodeID] if ok { @@ -60,7 +60,7 @@ func NewClusterList(assignments AssignmentList, collectors IdentityList) (Cluste // replicate the identifier list but use identities instead clusters := make(ClusterList, 0, len(assignments)) for _, participants := range assignments { - cluster := make(IdentityList, 0, len(participants)) + cluster := make(IdentitySkeletonList, 0, len(participants)) for _, participantID := range participants { participant, found := lookup[participantID] if !found { @@ -81,7 +81,7 @@ func NewClusterList(assignments AssignmentList, collectors IdentityList) (Cluste } // ByIndex retrieves the list of identities that are part of the given cluster. -func (cl ClusterList) ByIndex(index uint) (IdentityList, bool) { +func (cl ClusterList) ByIndex(index uint) (IdentitySkeletonList, bool) { if index >= uint(len(cl)) { return nil, false } @@ -93,7 +93,7 @@ func (cl ClusterList) ByIndex(index uint) (IdentityList, bool) { // // For evenly distributed transaction IDs, this will evenly distribute // transactions between clusters. -func (cl ClusterList) ByTxID(txID Identifier) (IdentityList, bool) { +func (cl ClusterList) ByTxID(txID Identifier) (IdentitySkeletonList, bool) { bigTxID := new(big.Int).SetBytes(txID[:]) bigIndex := new(big.Int).Mod(bigTxID, big.NewInt(int64(len(cl)))) return cl.ByIndex(uint(bigIndex.Uint64())) @@ -103,7 +103,7 @@ func (cl ClusterList) ByTxID(txID Identifier) (IdentityList, bool) { // // Nodes will be divided into equally sized clusters as far as possible. // The last return value will indicate if the look up was successful -func (cl ClusterList) ByNodeID(nodeID Identifier) (IdentityList, uint, bool) { +func (cl ClusterList) ByNodeID(nodeID Identifier) (IdentitySkeletonList, uint, bool) { for index, cluster := range cl { for _, participant := range cluster { if participant.NodeID == nodeID { @@ -115,7 +115,7 @@ func (cl ClusterList) ByNodeID(nodeID Identifier) (IdentityList, uint, bool) { } // IndexOf returns the index of the given cluster. -func (cl ClusterList) IndexOf(cluster IdentityList) (uint, bool) { +func (cl ClusterList) IndexOf(cluster IdentitySkeletonList) (uint, bool) { clusterFingerprint := cluster.ID() for index, other := range cl { if other.ID() == clusterFingerprint { diff --git a/model/flow/cluster_test.go b/model/flow/cluster_test.go index 52d8f39e72c..9bd245cafb9 100644 --- a/model/flow/cluster_test.go +++ b/model/flow/cluster_test.go @@ -15,10 +15,10 @@ import ( func TestClusterAssignments(t *testing.T) { identities := unittest.IdentityListFixture(100, unittest.WithRole(flow.RoleCollection)) - assignments := unittest.ClusterAssignment(10, identities) + assignments := unittest.ClusterAssignment(10, identities.ToSkeleton()) assert.Len(t, assignments, 10) - clusters, err := factory.NewClusterList(assignments, identities) + clusters, err := factory.NewClusterList(assignments, identities.ToSkeleton()) require.NoError(t, err) assert.Equal(t, assignments, clusters.Assignments()) } diff --git a/model/flow/collection.go b/model/flow/collection.go index 5e365678eef..788a7dfbe3d 100644 --- a/model/flow/collection.go +++ b/model/flow/collection.go @@ -1,119 +1,105 @@ package flow -import "github.com/onflow/flow-go/model/fingerprint" +import "fmt" -// Collection is set of transactions. +// Collection is an ordered list of transactions. +// Collections form a part of the payload of cluster blocks, produced by Collection Nodes. +// Every Collection maps 1-1 to a Chunk, which is used for transaction execution. +// +//structwrite:immutable - mutations allowed only within the constructor type Collection struct { Transactions []*TransactionBody } -// CollectionFromTransactions creates a new collection from the list of -// transactions. -func CollectionFromTransactions(transactions []*Transaction) Collection { - coll := Collection{Transactions: make([]*TransactionBody, 0, len(transactions))} - for _, tx := range transactions { - coll.Transactions = append(coll.Transactions, &tx.TransactionBody) +// UntrustedCollection is an untrusted input-only representation of an Collection, +// used for construction. +// +// This type exists to ensure that constructor functions are invoked explicitly +// with named fields, which improves clarity and reduces the risk of incorrect field +// ordering during construction. +// +// An instance of UntrustedCollection should be validated and converted into +// a trusted Collection using NewCollection constructor. +type UntrustedCollection Collection + +// NewCollection creates a new instance of Collection. +// Construction Collection allowed only within the constructor +// +// All errors indicate a valid Collection cannot be constructed from the input. +func NewCollection(untrusted UntrustedCollection) (*Collection, error) { + for i, tx := range untrusted.Transactions { + if tx == nil { + return nil, fmt.Errorf("transaction at index %d is nil", i) + } } - return coll + + return &Collection{ + Transactions: untrusted.Transactions, + }, nil } -// Light returns the light, reference-only version of the collection. -func (c Collection) Light() LightCollection { - lc := LightCollection{Transactions: make([]Identifier, 0, len(c.Transactions))} - for _, tx := range c.Transactions { - lc.Transactions = append(lc.Transactions, tx.ID()) +// NewEmptyCollection creates a new empty instance of Collection. +func NewEmptyCollection() *Collection { + return &Collection{ + Transactions: []*TransactionBody{}, } - return lc } -// Guarantee returns a collection guarantee for this collection. -func (c *Collection) Guarantee() CollectionGuarantee { - return CollectionGuarantee{ - CollectionID: c.ID(), +// Light returns a LightCollection, which contains only the list of transaction IDs from the Collection. +func (c Collection) Light() *LightCollection { + txIDs := make([]Identifier, 0, len(c.Transactions)) + for _, tx := range c.Transactions { + txIDs = append(txIDs, tx.ID()) } + return NewLightCollection(UntrustedLightCollection{ + Transactions: txIDs, + }) } +// ID returns a cryptographic commitment to the Collection. +// The ID of a Collection is equivalent to the ID of its corresponding LightCollection. func (c Collection) ID() Identifier { return c.Light().ID() } +// Len returns the number of transactions in the collection. func (c Collection) Len() int { return len(c.Transactions) } -func (c Collection) Checksum() Identifier { - return c.Light().Checksum() -} - -func (c Collection) Fingerprint() []byte { - var txs []byte - for _, tx := range c.Transactions { - txs = append(txs, tx.Fingerprint()...) - } - - return fingerprint.Fingerprint(struct { - Transactions []byte - }{ - Transactions: txs, - }) -} - -// LightCollection is a collection containing references to the constituent -// transactions rather than full transaction bodies. It is used for indexing -// transactions by collection and for computing the collection fingerprint. +// LightCollection contains cryptographic commitments to the constituent transactions instead of transaction bodies. +// It is used for indexing transactions by collection and for computing the collection fingerprint. +// +//structwrite:immutable - mutations allowed only within the constructor type LightCollection struct { Transactions []Identifier } -func (lc LightCollection) ID() Identifier { - return MakeID(lc) +// UntrustedLightCollection is an untrusted input-only representation of a LightCollection, +// used for construction. +// +// This type exists to ensure that constructor functions are invoked explicitly +// with named fields, which improves clarity and reduces the risk of incorrect field +// ordering during construction. +// +// An instance of UntrustedLightCollection should be validated and converted into +// a trusted LightCollection using NewLightCollection constructor. +type UntrustedLightCollection LightCollection + +// NewLightCollection constructs a new LightCollection instance. +func NewLightCollection(untrusted UntrustedLightCollection) *LightCollection { + return &LightCollection{ + Transactions: untrusted.Transactions, + } } -func (lc LightCollection) Checksum() Identifier { +// ID returns a cryptographic commitment to the LightCollection. +// The ID of a LightCollection is equivalent to the ID for its corresponding Collection. +func (lc LightCollection) ID() Identifier { return MakeID(lc) } +// Len returns the number of transactions in the collection. func (lc LightCollection) Len() int { return len(lc.Transactions) } - -func (lc LightCollection) Has(txID Identifier) bool { - for _, id := range lc.Transactions { - if txID == id { - return true - } - } - return false -} - -// Note that this is the basic version of the List, we need to substitute it with something like Merkle tree at some point -type CollectionList struct { - collections []*Collection -} - -func (cl *CollectionList) Fingerprint() Identifier { - return MerkleRoot(GetIDs(cl.collections)...) -} - -func (cl *CollectionList) Insert(ch *Collection) { - cl.collections = append(cl.collections, ch) -} - -func (cl *CollectionList) Items() []*Collection { - return cl.collections -} - -// ByChecksum returns an entity from the list by entity fingerprint -func (cl *CollectionList) ByChecksum(cs Identifier) (*Collection, bool) { - for _, coll := range cl.collections { - if coll.Checksum() == cs { - return coll, true - } - } - return nil, false -} - -// ByIndex returns an entity from the list by index -func (cl *CollectionList) ByIndex(i uint64) *Collection { - return cl.collections[i] -} diff --git a/model/flow/collectionGuarantee.go b/model/flow/collectionGuarantee.go index c05307c11a7..10948baf550 100644 --- a/model/flow/collectionGuarantee.go +++ b/model/flow/collectionGuarantee.go @@ -1,28 +1,71 @@ -// (c) 2019 Dapper Labs - ALL RIGHTS RESERVED - package flow import ( - "github.com/onflow/flow-go/crypto" + "fmt" + + "github.com/onflow/crypto" ) // CollectionGuarantee is a signed hash for a collection, which is used // to announce collections to consensus nodes. +// +//structwrite:immutable - mutations allowed only within the constructor type CollectionGuarantee struct { CollectionID Identifier // ID of the collection being guaranteed ReferenceBlockID Identifier // defines expiry of the collection - ChainID ChainID // the chainID of the cluster in order to determine which cluster this guarantee belongs to + ClusterChainID ChainID // the chainID of the cluster in order to determine which cluster this guarantee belongs to SignerIndices []byte // encoded indices of the signers Signature crypto.Signature // guarantor signatures } -// ID returns the fingerprint of the collection guarantee. -func (cg *CollectionGuarantee) ID() Identifier { - return cg.CollectionID +// UntrustedCollectionGuarantee is an untrusted input-only representation of an CollectionGuarantee, +// used for construction. +// +// This type exists to ensure that constructor functions are invoked explicitly +// with named fields, which improves clarity and reduces the risk of incorrect field +// ordering during construction. +// +// An instance of UntrustedCollectionGuarantee should be validated and converted into +// a trusted CollectionGuarantee using NewCollectionGuarantee constructor. +type UntrustedCollectionGuarantee CollectionGuarantee + +// NewCollectionGuarantee creates a new instance of CollectionGuarantee. +// Construction of CollectionGuarantee is allowed only within the constructor. +// +// This constructor enforces basic structural validity, ensuring critical fields like +// CollectionID and ReferenceBlockID are non-zero. +// The Signature field is not validated here for the following reasons: +// +// - Signature is currently unused and set to nil when generating a CollectionGuarantee, +// as the consensus nodes are currently unable to easily verify it. +func NewCollectionGuarantee(untrusted UntrustedCollectionGuarantee) (*CollectionGuarantee, error) { + if untrusted.CollectionID == ZeroID { + return nil, fmt.Errorf("CollectionID must not be empty") + } + + if untrusted.ReferenceBlockID == ZeroID { + return nil, fmt.Errorf("ReferenceBlockID must not be empty") + } + + if len(untrusted.SignerIndices) == 0 { + return nil, fmt.Errorf("SignerIndices must not be empty") + } + + if len(untrusted.ClusterChainID) == 0 { + return nil, fmt.Errorf("ClusterChainID must not be empty") + } + + return &CollectionGuarantee{ + CollectionID: untrusted.CollectionID, + ReferenceBlockID: untrusted.ReferenceBlockID, + ClusterChainID: untrusted.ClusterChainID, + SignerIndices: untrusted.SignerIndices, + Signature: untrusted.Signature, + }, nil } -// Checksum returns a checksum of the collection guarantee including the -// signatures. -func (cg *CollectionGuarantee) Checksum() Identifier { +// ID returns a collision-resistant hash of the CollectionGuarantee struct. +// This is distinct from the ID of the corresponding collection. +func (cg *CollectionGuarantee) ID() Identifier { return MakeID(cg) } diff --git a/model/flow/collectionGuarantee_test.go b/model/flow/collectionGuarantee_test.go new file mode 100644 index 00000000000..1f2b258cecf --- /dev/null +++ b/model/flow/collectionGuarantee_test.go @@ -0,0 +1,71 @@ +package flow_test + +import ( + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/utils/unittest" +) + +// TestCollectionGuaranteeID_Malleability confirms that the CollectionGuarantee struct, which implements +// the [flow.IDEntity] interface, is resistant to tampering. +func TestCollectionGuaranteeID_Malleability(t *testing.T) { + unittest.RequireEntityNonMalleable(t, unittest.CollectionGuaranteeFixture()) +} + +func TestNewCollectionGuarantee(t *testing.T) { + t.Run("valid guarantee", func(t *testing.T) { + ug := flow.UntrustedCollectionGuarantee{ + CollectionID: unittest.IdentifierFixture(), + ReferenceBlockID: unittest.IdentifierFixture(), + ClusterChainID: flow.Testnet, + SignerIndices: []byte{0, 1, 2}, + Signature: unittest.SignatureFixture(), + } + + guarantee, err := flow.NewCollectionGuarantee(ug) + assert.NoError(t, err) + assert.NotNil(t, guarantee) + }) + + t.Run("missing collection ID", func(t *testing.T) { + ug := flow.UntrustedCollectionGuarantee{ + CollectionID: flow.ZeroID, + ReferenceBlockID: unittest.IdentifierFixture(), + SignerIndices: []byte{1}, + } + + guarantee, err := flow.NewCollectionGuarantee(ug) + assert.Error(t, err) + assert.Nil(t, guarantee) + assert.Contains(t, err.Error(), "CollectionID") + }) + + t.Run("missing reference block ID", func(t *testing.T) { + ug := flow.UntrustedCollectionGuarantee{ + CollectionID: unittest.IdentifierFixture(), + ReferenceBlockID: flow.ZeroID, + SignerIndices: []byte{1}, + } + + guarantee, err := flow.NewCollectionGuarantee(ug) + assert.Error(t, err) + assert.Nil(t, guarantee) + assert.Contains(t, err.Error(), "ReferenceBlockID") + }) + + t.Run("missing signer indices", func(t *testing.T) { + ug := flow.UntrustedCollectionGuarantee{ + CollectionID: unittest.IdentifierFixture(), + ReferenceBlockID: unittest.IdentifierFixture(), + SignerIndices: []byte{}, + } + + guarantee, err := flow.NewCollectionGuarantee(ug) + assert.Error(t, err) + assert.Nil(t, guarantee) + assert.Contains(t, err.Error(), "SignerIndices") + }) +} diff --git a/model/flow/collection_test.go b/model/flow/collection_test.go index 3f6e096a72f..d53ddd669db 100644 --- a/model/flow/collection_test.go +++ b/model/flow/collection_test.go @@ -5,19 +5,100 @@ import ( "github.com/stretchr/testify/assert" - "github.com/onflow/flow-go/model/encoding/rlp" - "github.com/onflow/flow-go/model/fingerprint" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/utils/unittest" ) -func TestLightCollectionFingerprint(t *testing.T) { - col := unittest.CollectionFixture(2) - colID := col.ID() - data := fingerprint.Fingerprint(col.Light()) - var decoded flow.LightCollection - rlp.NewMarshaler().MustUnmarshal(data, &decoded) - decodedID := decoded.ID() - assert.Equal(t, colID, decodedID) - assert.Equal(t, col.Light(), decoded) +// TestLightCollectionID_Malleability confirms that the LightCollection struct, which implements +// the [flow.IDEntity] interface, is resistant to tampering. +func TestLightCollectionID_Malleability(t *testing.T) { + unittest.RequireEntityNonMalleable(t, &flow.LightCollection{ + Transactions: unittest.IdentifierListFixture(5), + }) +} + +// TestCollectionID_Malleability confirms that the Collection struct, which implements +// the [flow.IDEntity] interface, is resistant to tampering. +func TestCollectionID_Malleability(t *testing.T) { + collection := unittest.CollectionFixture(5) + unittest.RequireEntityNonMalleable(t, &collection, unittest.WithTypeGenerator[flow.TransactionBody](func() flow.TransactionBody { + return unittest.TransactionBodyFixture() + })) +} + +func TestNewCollection(t *testing.T) { + t.Run("valid untrusted collection", func(t *testing.T) { + tx := unittest.TransactionBodyFixture() + ub := flow.UntrustedCollection{ + Transactions: []*flow.TransactionBody{&tx}, + } + + col, err := flow.NewCollection(ub) + assert.NoError(t, err) + assert.NotNil(t, col) + assert.Len(t, col.Transactions, 1) + + t.Run("convert to LightCollection", func(t *testing.T) { + light := col.Light() + assert.Equal(t, light.Len(), col.Len()) + for i := range light.Len() { + assert.Equal(t, col.Transactions[i].ID(), light.Transactions[i]) + } + }) + }) + + t.Run("empty transaction list", func(t *testing.T) { + ub := flow.UntrustedCollection{ + Transactions: []*flow.TransactionBody{}, + } + + col, err := flow.NewCollection(ub) + assert.NoError(t, err) + assert.NotNil(t, col) + assert.Empty(t, col.Transactions) + }) + + t.Run("nil transaction list", func(t *testing.T) { + ub := flow.UntrustedCollection{ + Transactions: nil, + } + + col, err := flow.NewCollection(ub) + assert.NoError(t, err) + assert.NotNil(t, col) + assert.Empty(t, col.Transactions) + }) + + t.Run("nil transaction in list", func(t *testing.T) { + ub := flow.UntrustedCollection{ + Transactions: []*flow.TransactionBody{nil}, + } + + col, err := flow.NewCollection(ub) + assert.Error(t, err) + assert.Nil(t, col) + assert.Contains(t, err.Error(), "transaction at index") + }) +} + +// TestNewLightCollection tests creating a new LightCollection. +// All possible inputs should produce a valid LightCollection, including nil/empty transaction lists. +func TestNewLightCollection(t *testing.T) { + t.Run("valid untrusted light collection", func(t *testing.T) { + untrusted := flow.UntrustedLightCollection{ + Transactions: []flow.Identifier{unittest.IdentifierFixture()}, + } + + col := flow.NewLightCollection(untrusted) + assert.NotNil(t, col) + assert.Equal(t, untrusted.Transactions, col.Transactions) + }) + + t.Run("valid empty untrusted light collection", func(t *testing.T) { + untrusted := flow.UntrustedLightCollection{} + + col := flow.NewLightCollection(untrusted) + assert.NotNil(t, col) + assert.Len(t, col.Transactions, 0) + }) } diff --git a/model/flow/constants.go b/model/flow/constants.go index 4f172c36528..a66bddb7b4e 100644 --- a/model/flow/constants.go +++ b/model/flow/constants.go @@ -99,3 +99,21 @@ func paddedDomainTag(s string) [DomainTagLength]byte { return tag } + +// EstimatedComputationPerMillisecond is the approximate number of computation units that can be performed in a millisecond. +// this was calibrated during the Variable Transaction Fees: Execution Effort FLIP https://github.com/onflow/flow/pull/753. +// Updated after the FLIP: +// https://github.com/onflow/flips/blob/14c5ec4/governance/20240508-computation-limit-hike.md#flip-267-increasing-the-transaction-computation-limit +const EstimatedComputationPerMillisecond = 9999.0 / 1000.0 + +// NormalizedExecutionTimePerComputationUnit returns the normalized time per computation unit +// If the computation estimation is correct (as per the FLIP https://github.com/onflow/flow/pull/753) the value should be 1. +// If the value is greater than 1, the computation estimation is too low; we are underestimating transaction complexity (and thus undercharging). +// If the value is less than 1, the computation estimation is too high; we are overestimating transaction complexity (and thus overcharging). +func NormalizedExecutionTimePerComputationUnit(execTime time.Duration, computationUsed uint64) float64 { + if computationUsed == 0 { + return 0 + } + + return (float64(execTime.Milliseconds()) / float64(computationUsed)) * EstimatedComputationPerMillisecond +} diff --git a/model/flow/dkg.go b/model/flow/dkg.go index 3d4fede20e4..11a75825ae0 100644 --- a/model/flow/dkg.go +++ b/model/flow/dkg.go @@ -1,32 +1,119 @@ package flow -// DKGEndState captures the final state of a completed DKG. -type DKGEndState uint32 +// DKGState captures all possible states of the Recoverable Random Beacon State Machine. +type DKGState uint32 const ( - // DKGEndStateUnknown - zero value for this enum, indicates unset value - DKGEndStateUnknown DKGEndState = iota - // DKGEndStateSuccess - the DKG completed, this node has a valid beacon key. - DKGEndStateSuccess - // DKGEndStateInconsistentKey - the DKG completed, this node has an invalid beacon key. - DKGEndStateInconsistentKey - // DKGEndStateNoKey - this node did not store a key, typically caused by a crash mid-DKG. - DKGEndStateNoKey - // DKGEndStateDKGFailure - the underlying DKG library reported an error. - DKGEndStateDKGFailure + // DKGStateUninitialized - zero value for this enum, indicates that there is no initialized state. + // Conceptually, this is the 'initial' state of a finite state machine before any transitions. + DKGStateUninitialized DKGState = iota + // DKGStateStarted - the DKG process has been started. This state is set when the node enters the [flow.EpochPhaseSetup] + // phase and starts the DKG process, which will on the happy path result in generating a Random Beacon key. + DKGStateStarted + // DKGStateCompleted - the DKG process has been locally completed by this node. This state is set when the node successfully + // completes the DKG process and has generated a Random Beacon key. + // ATTENTION: This state does not imply that there is a safe Random Beacon key available for the next epoch. Only after + // the node enters [flow.EpochPhaseCommitted] and the [flow.EpochCommit] service event has been finalized, we can be sure + // that our beacon key share is part of the Random Beacon Committee for the next epoch, in this case the state will be [flow.RandomBeaconKeyCommitted]. + DKGStateCompleted + // RandomBeaconKeyCommitted - the Random Beacon key has been committed. This state is set when the node has observed an [flow.EpochCommit] + // which contains the public key share that matches the private key share that the node has obtained. + // A node can obtain a key share by successfully completing the DKG process or by manually injecting a key share obtained + // by other means (e.g. key recovery). + // Regardless of the key origin, this is a terminal state which defines a safe Random Beacon key for the next epoch and allows the node + // to participate in the Random Beacon protocol. + RandomBeaconKeyCommitted + // DKGStateFailure - DKG process has failed, this state indicates that we have left the happy path. + DKGStateFailure ) -func (state DKGEndState) String() string { +func (state DKGState) String() string { switch state { - case DKGEndStateSuccess: - return "DKGEndStateSuccess" - case DKGEndStateInconsistentKey: - return "DKGEndStateInconsistentKey" - case DKGEndStateNoKey: - return "DKGEndStateNoKey" - case DKGEndStateDKGFailure: - return "DKGEndStateDKGFailure" + case DKGStateStarted: + return "DKGStateStarted" + case DKGStateCompleted: + return "DKGStateCompleted" + case RandomBeaconKeyCommitted: + return "RandomBeaconKeyCommitted" + case DKGStateFailure: + return "DKGStateFailure" default: - return "DKGEndStateUnknown" + return "DKGStateUninitialized" } } + +// DKGIndexMap completely describes the DKG committee 𝒟 of size |𝒟| = n. +// +// Formal specification: +// - If n parties are authorized to participate in the DKG, DKGIndexMap must contain exactly n +// elements, i.e. n = len(DKGIndexMap) +// - The values in DKGIndexMap must form the set {0, 1, …, n-1}, as required by the low level cryptography +// module (convention simplifying the implementation). +// +// Flow's Random Beacon utilizes a threshold signature scheme run by the committee 𝒟. +// In the formal cryptographic protocol for a threshold signature with n parties, the +// individual participants are identified by n public distinct non-negative integers, or simply indices. +// These public indices are agreed upon by all participants and are used by the low-level +// Shamir Secret Sharing [SSS]. +// In Flow, the threshold signature keys are generated by a Distributed Key Generation [DKG]. The DKG +// therefore requires the same SSS indices as an input to generate the private key shares of each participant. +// Accordingly, the lower-level cryptographic implementation of the threshold signature and DKG +// works with these indices. The lower-level cryptographic interface requires that the indices are exactly +// the set {0, 1, ..., n-1}. +// +// On the protocol level, only consensus nodes (identified by their nodeIDs) are allowed to contribute +// Random Beacon signature shares. Hence, the protocol level needs to map nodeIDs to the indices when +// calling into the lower-level cryptographic primitives. +// +// CAUTION: It is important to cleanly differentiate between the consensus committee 𝒞, the DKG committee 𝒟 +// and the committee ℛ: +// - For an epoch, the consensus committee 𝒞 contains all nodes that are authorized to vote for blocks. Authority +// to vote (i.e. membership in the consensus committee) is irrevocably granted for an epoch (though, honest nodes +// will reject votes and proposals from ejected nodes; nevertheless, ejected nodes formally remain members of +// the consensus committee). +// - The DKG committee 𝒟 is the set of parties that were authorized to participate in the DKG (happy path; or +// eligible to receive a private key share from an alternative source on the fallback path). Mathematically, +// the DKGIndexMap is a bijective function DKGIndexMap: 𝒟 ↦ {0,1,…,n-1}. +// - Only consensus nodes are allowed to contribute to the Random Beacon. Informally, we define ℛ as the +// as the subset of the consensus committee (ℛ ⊆ 𝒞), which _successfully_ completed the DKG (hence ℛ ⊆ 𝒟). +// Specifically, r ∈ ℛ iff and only if r has a private Random Beacon key share matching the respective public +// key share in the `EpochCommit` event. In other words, consensus nodes are in ℛ iff and only if they are able +// to submit valid Random Beacon votes. Based on this definition we note that ℛ ⊆ (𝒟 ∩ 𝒞). +// +// The protocol explicitly ALLOWS additional parties outside the current epoch's consensus committee to participate. +// In particular, there can be a key-value pair (d,i) ∈ DKGIndexMap, such that the nodeID d is *not* a consensus +// committee member, i.e. d ∉ 𝒞. This may be the case when a DKG is run off-protocol to bootstrap the network. +// In terms of sets, this implies we must consistently work with the relatively general +// assumption that 𝒟 \ 𝒞 ≠ ∅ and 𝒞 \ 𝒟 ≠ ∅. +// Nevertheless, in the vast majority of cases (happy path, roughly 98% of epochs) it will be the case that 𝒟 = 𝒞. +// Therefore, we can optimize for the case 𝒟 = 𝒞, as long as we still support the more general case 𝒟 ≠ 𝒞. +// Broadly, this makes the protocol more robust against temporary disruptions and sudden, large fluctuations in node +// participation. +// +// Nevertheless, there is an important liveness constraint: the committee ℛ should be a large number of nodes. +// Specifically, an honest supermajority of consensus nodes must contain enough successful DKG participants +// (about |𝒟|/2 + 1) to produce a valid group signature for the Random Beacon at each block [1, 3]. +// Therefore, we have the approximate lower bound |ℛ| ≳ n/2 + 1 = |𝒟|/2 + 1 = len(DKGIndexMap)/2 + 1. +// Operating close to this lower bound would require that every Random Beacon key-holder ϱ ∈ ℛ remaining in the consensus committee is honest +// (incl. quickly responsive) *all the time*. Such a reliability assumption is unsuited for decentralized production networks. +// To reject configurations that are vulnerable to liveness failures, the protocol uses the threshold `t_safety` +// (heuristic, see [2]), which is implemented on the smart contract level. +// Ideally, |ℛ| and therefore |𝒟 ∩ 𝒞| (given that |ℛ| <= |𝒟 ∩ 𝒞|) should be well above 70% . |𝒟|. +// Values in the range 70%-62% of |𝒟| should be considered for short-term recovery cases. +// Values of 62% * |𝒟| or lower (i.e. |ℛ| ≤ 0.62·|𝒟|) are not recommended for any +// production network, as single-node crashes may already be enough to halt consensus. +// +// For further details, see +// - [1] https://www.notion.so/flowfoundation/Threshold-Signatures-7e26c6dd46ae40f7a83689ba75a785e3?pvs=4 +// - [2] https://www.notion.so/flowfoundation/DKG-contract-success-threshold-86c6bf2b92034855b3c185d7616eb6f1?pvs=4 +// - [3] https://www.notion.so/flowfoundation/Architecture-for-Concurrent-Vote-Processing-41704666bc414a03869b70ba1043605f?pvs=4 +type DKGIndexMap map[Identifier]int + +// DKGMessage is the type of message exchanged between DKG nodes. +type DKGMessage struct { + // Data holds the DKG message data, as provided by the DKG crypto layer. + // The contents are opaque to the DKG engine layer. + Data []byte + // DKGInstanceID is a unique identifier for the DKG instance this message is part of. + DKGInstanceID string +} diff --git a/model/flow/entity.go b/model/flow/entity.go index d91a9fa6b34..33b8c5a0107 100644 --- a/model/flow/entity.go +++ b/model/flow/entity.go @@ -1,5 +1,11 @@ package flow +type IDEntity interface { + // ID returns a unique id for this entity using a hash of the immutable + // fields of the entity. + ID() Identifier +} + // Entity defines how flow entities should be defined // Entities are flat data structures holding multiple data fields. // Entities don't include nested entities, they only include pointers to @@ -7,12 +13,55 @@ package flow // of keeping a slice of entity object itself. This simplifies storage, signature and validation // of entities. type Entity interface { + IDEntity +} - // ID returns a unique id for this entity using a hash of the immutable - // fields of the entity. - ID() Identifier +func EntitiesToIDs[T Entity](entities []T) []Identifier { + ids := make([]Identifier, 0, len(entities)) + for _, entity := range entities { + ids = append(ids, entity.ID()) + } + return ids +} + +// Deduplicate entities in a slice by the ID method +// The original order of the entities is preserved. +func Deduplicate[T IDEntity](entities []T) []T { + if entities == nil { + return nil + } + + seen := make(map[Identifier]struct{}, len(entities)) + result := make([]T, 0, len(entities)) + + for _, entity := range entities { + id := entity.ID() + if _, ok := seen[id]; ok { + continue + } + + seen[id] = struct{}{} + result = append(result, entity) + } + + return result +} + +// EntityRequest is a request for a set of entities, each keyed by an +// identifier. The relationship between the identifiers and the entity is not +// specified here. In the typical case, the identifier is simply the ID of the +// entity being requested, but more complex identifier-entity relationships can +// be used as well. +type EntityRequest struct { + Nonce uint64 + EntityIDs []Identifier +} - // Checksum returns a unique checksum for the entity, including the mutable - // data such as signatures. - Checksum() Identifier +// EntityResponse is a response to an entity request, containing a set of +// serialized entities and the identifiers used to request them. The returned +// entity set may be empty or incomplete. +type EntityResponse struct { + Nonce uint64 + EntityIDs []Identifier + Blobs [][]byte } diff --git a/model/flow/entity_test.go b/model/flow/entity_test.go new file mode 100644 index 00000000000..bb926159675 --- /dev/null +++ b/model/flow/entity_test.go @@ -0,0 +1,24 @@ +package flow_test + +import ( + "testing" + + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/utils/unittest" +) + +func TestDeduplicate(t *testing.T) { + require.Nil(t, flow.Deduplicate[*flow.Collection](nil)) + + cols := unittest.CollectionListFixture(5) + require.Equal(t, cols, flow.Deduplicate(cols)) + + // create duplicates, and validate + require.Equal(t, cols, flow.Deduplicate[*flow.Collection](append(cols, cols...))) + + // verify the original order should be preserved + require.Equal(t, cols, flow.Deduplicate[*flow.Collection]( + append(cols, cols[3], cols[1], cols[4], cols[2], cols[0]))) +} diff --git a/model/flow/epoch.go b/model/flow/epoch.go index 3f27586f2a2..41e12df481a 100644 --- a/model/flow/epoch.go +++ b/model/flow/epoch.go @@ -8,19 +8,81 @@ import ( "github.com/ethereum/go-ethereum/rlp" "github.com/fxamacker/cbor/v2" + "github.com/onflow/crypto" "github.com/vmihailenco/msgpack/v4" + "golang.org/x/exp/maps" + "golang.org/x/exp/slices" - "github.com/onflow/flow-go/crypto" "github.com/onflow/flow-go/model/encodable" ) -// EpochPhase represents a phase of the Epoch Preparation Protocol. The phase -// of an epoch is resolved based on a block reference and is fork-dependent. -// An epoch begins in the staking phase, then transitions to the setup phase in -// the block containing the EpochSetup service event, then to the committed -// phase in the block containing the EpochCommit service event. -// |<-- EpochPhaseStaking -->|<-- EpochPhaseSetup -->|<-- EpochPhaseCommitted -->|<-- EpochPhaseStaking -->... -// |<------------------------------- Epoch N ------------------------------------>|<-- Epoch N + 1 --... +// EpochPhase represents a phase of the Epoch Preparation Protocol. +// The phase of an epoch is resolved based on a block reference and is fork-dependent. +// During normal operations, each Epoch transitions through the phases: +// +// ║ Epoch N +// ║ ╭─────────────────────────────────┴─────────────────────────────────╮ +// ║ finalize view EpochSetup EpochCommit +// ║ in epoch N service event service event +// ║ ⇣ ⇣ ⇣ +// ║ ┌─────────────────┐ ┌───────────────┐ ┌───────────────────┐ +// ║ │EpochPhaseStaking├─────►EpochPhaseSetup├─────►EpochPhaseCommitted├ ┄> +// ║ └─────────────────┘ └───────────────┘ └───────────────────┘ +// ║ ⇣ ⇣ ⇣ +// ║ EpochTransition EpochSetupPhaseStarted EpochCommittedPhaseStarted +// ║ Notification Notification Notification +// +// However, if the Protocol State encounters any unexpected epoch service events, or the subsequent epoch +// fails to be committed by the `FinalizationSafetyThreshold`, then we enter Epoch Fallback Mode [EFM]. +// Depending on whether the subsequent epoch has already been committed, the EFM progress differs slightly. +// In a nutshell, we always enter the _latest_ epoch already committed on the happy path (if there is any) +// and then follow the fallback protocol. +// +// SCENARIO A: the future Epoch N is already committed, when we enter EFM +// +// ║ Epoch N-1 Epoch N +// ║ ···──┴─────────────────────────╮ ╭─────────────┴───────────────────────────────────────────────╮ +// ║ invalid service finalize view EpochRecover +// ║ event in epoch N service event +// ║ ⇣ ⇣ ┊ ⇣ +// ║ ┌──────────────────────────┐ ┌────────────────────┊────┐ ┌───────────────────────────┐ +// ║ │ EpochPhaseCommitted ├────► EpochPhaseFallback ├─────► EpochPhaseCommitted ├ ┄> +// ║ └──────────────────────────┘ └────────────────────┊────┘ └───────────────────────────┘ +// ║ ⇣ ⇣ ┊ ⇣ +// ║ EpochFallbackModeTriggered EpochTransition EpochExtended* EpochFallbackModeExited +// ║ Notification Notification Notification + EpochCommittedPhaseStarted Notifications +// ║ ┆ ┆ +// ║ ╰┄┄┄┄┄┄┄┄┄┄ EpochFallbackTriggered is true ┄┄┄┄┄┄┄┄┄┄┄┄╯ +// +// With 'EpochExtended*' we denote that there can be zero, one, or more Epoch Extension (depending on when +// we receive a valid EpochRecover service event. +// +// SCENARIO B: we are in Epoch N without any subsequent epoch being committed when entering EFM +// +// ║ Epoch N +// ║ ···────────────────────────┴───────────────────────────────────────────────────────────────╮ +// ║ invalid service event or EpochRecover +// ║ FinalizationSafetyThreshold reached service event +// ║ ⇣ ┊ ⇣ +// ║ ┌────────────────────┐ ┌──────────────────────┊──────┐ ┌───────────────────────────┐ +// ║ │ EpochPhaseStaking │ │ EpochPhaseFallback │ │ EpochPhaseCommitted │ +// ║ │ or EpochPhaseSetup ├───► ┊ ├─────► ├ ┄> +// ║ └────────────────────┘ └──────────────────────┊──────┘ └───────────────────────────┘ +// ║ ⇣ ┊ ⇣ +// ║ EpochFallbackModeTriggered EpochExtended* EpochFallbackModeExited +// ║ Notification Notification + EpochCommittedPhaseStarted Notifications +// ║ ┆ ┆ +// ║ ╰┄┄ EpochFallbackTriggered true ┄┄┄┄╯ +// +// A state machine diagram containing all possible phase transitions is below: +// +// ┌──────────────────────────────────────────────────────────┐ +// ┌────────▼────────┐ ┌───────────────┐ ┌───────────────────┐ │ +// │EpochPhaseStaking├─────►EpochPhaseSetup├─────►EpochPhaseCommitted├─┘ +// └────────┬────────┘ └───────────┬───┘ └───┬──────────▲────┘ +// │ ┌─▼─────────────▼──┐ │ +// └────────────────────────►EpochPhaseFallback├───────┘ +// └──────────────────┘ type EpochPhase int const ( @@ -28,6 +90,7 @@ const ( EpochPhaseStaking EpochPhaseSetup EpochPhaseCommitted + EpochPhaseFallback ) func (p EpochPhase) String() string { @@ -36,6 +99,7 @@ func (p EpochPhase) String() string { "EpochPhaseStaking", "EpochPhaseSetup", "EpochPhaseCommitted", + "EpochPhaseFallback", }[p] } @@ -45,6 +109,7 @@ func GetEpochPhase(phase string) EpochPhase { EpochPhaseStaking, EpochPhaseSetup, EpochPhaseCommitted, + EpochPhaseFallback, } for _, p := range phases { if p.String() == phase { @@ -62,16 +127,93 @@ const EpochSetupRandomSourceLength = 16 // EpochSetup is a service event emitted when the network is ready to set up // for the upcoming epoch. It contains the participants in the epoch, the // length, the cluster assignment, and the seed for leader selection. +// EpochSetup is a service event emitted when the preparation process for the next epoch begins. +// EpochSetup events must: +// - be emitted exactly once per epoch before the corresponding EpochCommit event +// - be emitted prior to the epoch commitment deadline (defined by FinalizationSafetyThreshold) +// +// If either of the above constraints are not met, the service event will be rejected and Epoch Fallback Mode [EFM] will be triggered. +// +// When an EpochSetup event is accepted and incorporated into the Protocol State, this triggers the +// Distributed Key Generation [DKG] and cluster QC voting process for the next epoch. +// It also causes the current epoch to enter the EpochPhaseSetup phase. +// +//structwrite:immutable - mutations allowed only within the constructor type EpochSetup struct { - Counter uint64 // the number of the epoch - FirstView uint64 // the first view of the epoch - DKGPhase1FinalView uint64 // the final view of DKG phase 1 - DKGPhase2FinalView uint64 // the final view of DKG phase 2 - DKGPhase3FinalView uint64 // the final view of DKG phase 3 - FinalView uint64 // the final view of the epoch - Participants IdentityList // all participants of the epoch - Assignments AssignmentList // cluster assignment for the epoch - RandomSource []byte // source of randomness for epoch-specific setup tasks + Counter uint64 // the number of the epoch being setup (current+1) + FirstView uint64 // the first view of the epoch being setup + DKGPhase1FinalView uint64 // the final view of DKG phase 1 + DKGPhase2FinalView uint64 // the final view of DKG phase 2 + DKGPhase3FinalView uint64 // the final view of DKG phase 3 + FinalView uint64 // the final view of the epoch + Participants IdentitySkeletonList // all participants of the epoch in canonical order + Assignments AssignmentList // cluster assignment for the epoch + RandomSource []byte // source of randomness for epoch-specific setup tasks + TargetDuration uint64 // desired real-world duration for the epoch [seconds] + TargetEndTime uint64 // desired real-world end time for the epoch in UNIX time [seconds] +} + +// UntrustedEpochSetup is an untrusted input-only representation of an EpochSetup, +// used for construction. +// +// This type exists to ensure that constructor functions are invoked explicitly +// with named fields, which improves clarity and reduces the risk of incorrect field +// ordering during construction. +// +// An instance of UntrustedEpochSetup should be validated and converted into +// a trusted EpochSetup using NewEpochSetup constructor. +type UntrustedEpochSetup EpochSetup + +// NewEpochSetup creates a new instance of EpochSetup. +// Construction EpochSetup allowed only within the constructor. +// +// All errors indicate a valid EpochSetup cannot be constructed from the input. +func NewEpochSetup(untrusted UntrustedEpochSetup) (*EpochSetup, error) { + if untrusted.FirstView >= untrusted.FinalView { + return nil, fmt.Errorf("invalid timing - first view (%d) ends after the final view (%d)", untrusted.FirstView, untrusted.FinalView) + } + if untrusted.FirstView >= untrusted.DKGPhase1FinalView { + return nil, fmt.Errorf("invalid timing - first view (%d) ends after dkg phase 1 (%d)", untrusted.FirstView, untrusted.DKGPhase1FinalView) + } + if untrusted.DKGPhase1FinalView >= untrusted.DKGPhase2FinalView { + return nil, fmt.Errorf("invalid dkg timing - phase 1 (%d) ends after phase 2 (%d)", untrusted.DKGPhase1FinalView, untrusted.DKGPhase2FinalView) + } + if untrusted.DKGPhase2FinalView >= untrusted.DKGPhase3FinalView { + return nil, fmt.Errorf("invalid dkg timing - phase 2 (%d) ends after phase 3 (%d)", untrusted.DKGPhase2FinalView, untrusted.DKGPhase3FinalView) + } + if untrusted.DKGPhase3FinalView >= untrusted.FinalView { + return nil, fmt.Errorf("invalid timing - dkg phase 3 (%d) ends after final view (%d)", untrusted.DKGPhase3FinalView, untrusted.FinalView) + } + if untrusted.Participants == nil { + return nil, fmt.Errorf("participants must not be nil") + } + if untrusted.Assignments == nil { + return nil, fmt.Errorf("assignments must not be nil") + } + if len(untrusted.RandomSource) != EpochSetupRandomSourceLength { + return nil, fmt.Errorf( + "random source must be of (%d) bytes, got (%d)", + EpochSetupRandomSourceLength, + len(untrusted.RandomSource), + ) + } + if untrusted.TargetDuration == 0 { + return nil, fmt.Errorf("target duration must be greater than 0") + } + + return &EpochSetup{ + Counter: untrusted.Counter, + FirstView: untrusted.FirstView, + DKGPhase1FinalView: untrusted.DKGPhase1FinalView, + DKGPhase2FinalView: untrusted.DKGPhase2FinalView, + DKGPhase3FinalView: untrusted.DKGPhase3FinalView, + FinalView: untrusted.FinalView, + Participants: untrusted.Participants, + Assignments: untrusted.Assignments, + RandomSource: untrusted.RandomSource, + TargetDuration: untrusted.TargetDuration, + TargetEndTime: untrusted.TargetEndTime, + }, nil } func (setup *EpochSetup) ServiceEvent() ServiceEvent { @@ -105,7 +247,13 @@ func (setup *EpochSetup) EqualTo(other *EpochSetup) bool { if setup.FinalView != other.FinalView { return false } - if !setup.Participants.EqualTo(other.Participants) { + if setup.TargetDuration != other.TargetDuration { + return false + } + if setup.TargetEndTime != other.TargetEndTime { + return false + } + if !IdentitySkeletonListEqualTo(setup.Participants, other.Participants) { return false } if !setup.Assignments.EqualTo(other.Assignments) { @@ -114,14 +262,169 @@ func (setup *EpochSetup) EqualTo(other *EpochSetup) bool { return bytes.Equal(setup.RandomSource, other.RandomSource) } -// EpochCommit is a service event emitted when epoch setup has been completed. -// When an EpochCommit event is emitted, the network is ready to transition to -// the epoch. +// EpochRecover service event is emitted when network is in Epoch Fallback Mode(EFM) in an attempt to return to happy path. +// It contains data from EpochSetup, and EpochCommit events to so replicas can create a committed epoch from which they +// can continue operating on the happy path. +// +//structwrite:immutable - mutations allowed only within the constructor +type EpochRecover struct { + EpochSetup EpochSetup + EpochCommit EpochCommit +} + +// UntrustedEpochRecover is an untrusted input-only representation of an EpochRecover, +// used for construction. +// +// This type exists to ensure that constructor functions are invoked explicitly +// with named fields, which improves clarity and reduces the risk of incorrect field +// ordering during construction. +// +// An instance of UntrustedEpochRecover should be validated and converted into +// a trusted EpochRecover using NewEpochRecover constructor. +type UntrustedEpochRecover EpochRecover + +// NewEpochRecover creates a new instance of EpochRecover. +// Construction EpochRecover allowed only within the constructor. +// +// All errors indicate a valid EpochRecover cannot be constructed from the input. +func NewEpochRecover(untrusted UntrustedEpochRecover) (*EpochRecover, error) { + // EpochSetup and must be non-empty and is intended to be constructed solely through the constructor. + if untrusted.EpochSetup.EqualTo(new(EpochSetup)) { + return nil, fmt.Errorf("EpochSetup is empty") + } + // EpochCommit and must be non-empty and is intended to be constructed solely through the constructor. + if untrusted.EpochCommit.EqualTo(new(EpochCommit)) { + return nil, fmt.Errorf("EpochCommit is empty") + } + + if untrusted.EpochCommit.Counter != untrusted.EpochSetup.Counter { + return nil, fmt.Errorf("inconsistent epoch counter between commit (%d) and setup (%d) events in same epoch", untrusted.EpochCommit.Counter, untrusted.EpochSetup.Counter) + } + if len(untrusted.EpochSetup.Assignments) != len(untrusted.EpochCommit.ClusterQCs) { + return nil, fmt.Errorf("number of clusters (%d) does not match number of QCs (%d)", len(untrusted.EpochSetup.Assignments), len(untrusted.EpochCommit.ClusterQCs)) + } + + return &EpochRecover{ + EpochSetup: untrusted.EpochSetup, + EpochCommit: untrusted.EpochCommit, + }, nil +} + +func (er *EpochRecover) ServiceEvent() ServiceEvent { + return ServiceEvent{ + Type: ServiceEventRecover, + Event: er, + } +} + +// ID returns the hash of the event contents. +func (er *EpochRecover) ID() Identifier { + return MakeID(er) +} + +func (er *EpochRecover) EqualTo(other *EpochRecover) bool { + if !er.EpochSetup.EqualTo(&other.EpochSetup) { + return false + } + if !er.EpochCommit.EqualTo(&other.EpochCommit) { + return false + } + return true +} + +// EpochCommit is a service event emitted when the preparation process for the next epoch is complete. +// EpochCommit events must: +// - be emitted exactly once per epoch after the corresponding EpochSetup event +// - be emitted prior to the epoch commitment deadline (defined by FinalizationSafetyThreshold) +// +// If either of the above constraints are not met, the service event will be rejected and Epoch Fallback Mode [EFM] will be triggered. +// +// When an EpochCommit event is accepted and incorporated into the Protocol State, this guarantees that +// the network will proceed through that epoch's defined view range with its defined committee. It also +// causes the current epoch to enter the EpochPhaseCommitted phase. +// +// TERMINOLOGY NOTE: In the context of the Epoch Preparation Protocol and the EpochCommit event, +// artifacts produced by the DKG are referred to with the "DKG" prefix (for example, DKGGroupKey). +// These artifacts are *produced by* the DKG, but used for the Random Beacon. As such, other +// components refer to these same artifacts with the "RandomBeacon" prefix. +// +//structwrite:immutable - mutations allowed only within the constructor type EpochCommit struct { - Counter uint64 // the number of the epoch - ClusterQCs []ClusterQCVoteData // quorum certificates for each cluster - DKGGroupKey crypto.PublicKey // group key from DKG - DKGParticipantKeys []crypto.PublicKey // public keys for DKG participants + // Counter is the epoch counter of the epoch being committed + Counter uint64 + // ClusterQCs is an ordered list of root quorum certificates, one per cluster. + // EpochCommit.ClustersQCs[i] is the QC for EpochSetup.Assignments[i] + ClusterQCs []ClusterQCVoteData + // DKGGroupKey is the group public key produced by the DKG associated with this epoch. + // It is used to verify Random Beacon signatures for the epoch with counter, Counter. + DKGGroupKey crypto.PublicKey + // DKGParticipantKeys is a list of public keys, one per DKG participant, ordered by Random Beacon index. + // This list is the output of the DKG associated with this epoch. + // It is used to verify Random Beacon signatures for the epoch with counter, Counter. + // CAUTION: This list may include keys for nodes which do not exist in the consensus committee + // and may NOT include keys for all nodes in the consensus committee. + DKGParticipantKeys []crypto.PublicKey + + // DKGIndexMap is a mapping from node identifier to Random Beacon index. + // It has the following invariants: + // - len(DKGParticipantKeys) == len(DKGIndexMap) + // - DKGIndexMap values form the set {0, 1, ..., n-1} where n=len(DKGParticipantKeys) + // CAUTION: This mapping may include identifiers for nodes which do not exist in the consensus committee + // and may NOT include identifiers for all nodes in the consensus committee. + // + DKGIndexMap DKGIndexMap +} + +// UntrustedEpochCommit is an untrusted input-only representation of an EpochCommit, +// used for construction. +// +// This type exists to ensure that constructor functions are invoked explicitly +// with named fields, which improves clarity and reduces the risk of incorrect field +// ordering during construction. +// +// An instance of UntrustedEpochCommit should be validated and converted into +// a trusted EpochCommit using NewEpochCommit constructor. +type UntrustedEpochCommit EpochCommit + +// NewEpochCommit creates a new instance of EpochCommit. +// Construction EpochCommit allowed only within the constructor. +// +// All errors indicate a valid EpochCommit cannot be constructed from the input. +func NewEpochCommit(untrusted UntrustedEpochCommit) (*EpochCommit, error) { + if untrusted.DKGGroupKey == nil { + return nil, fmt.Errorf("DKG group key must not be nil") + } + if len(untrusted.ClusterQCs) == 0 { + return nil, fmt.Errorf("cluster QCs list must not be empty") + } + // TODO(mainnet27): remove this conditional: https://github.com/onflow/flow-go/issues/6772 + if untrusted.DKGIndexMap != nil { + // enforce invariant: len(DKGParticipantKeys) == len(DKGIndexMap) + n := len(untrusted.DKGIndexMap) // size of the DKG committee + if len(untrusted.DKGParticipantKeys) != n { + return nil, fmt.Errorf("number of %d Random Beacon key shares is inconsistent with number of DKG participants (len=%d)", len(untrusted.DKGParticipantKeys), len(untrusted.DKGIndexMap)) + } + + // enforce invariant: DKGIndexMap values form the set {0, 1, ..., n-1} where n=len(DKGParticipantKeys) + encounteredIndex := make([]bool, n) + for _, index := range untrusted.DKGIndexMap { + if index < 0 || index >= n { + return nil, fmt.Errorf("index %d is outside allowed range [0,n-1] for a DKG committee of size n=%d", index, n) + } + if encounteredIndex[index] { + return nil, fmt.Errorf("duplicated DKG index %d", index) + } + encounteredIndex[index] = true + } + } + + return &EpochCommit{ + Counter: untrusted.Counter, + ClusterQCs: untrusted.ClusterQCs, + DKGGroupKey: untrusted.DKGGroupKey, + DKGParticipantKeys: untrusted.DKGParticipantKeys, + DKGIndexMap: untrusted.DKGIndexMap, + }, nil } // ClusterQCVoteData represents the votes for a cluster quorum certificate, as @@ -177,6 +480,7 @@ type encodableCommit struct { ClusterQCs []ClusterQCVoteData DKGGroupKey encodable.RandomBeaconPubKey DKGParticipantKeys []encodable.RandomBeaconPubKey + DKGIndexMap DKGIndexMap } func encodableFromCommit(commit *EpochCommit) encodableCommit { @@ -189,24 +493,28 @@ func encodableFromCommit(commit *EpochCommit) encodableCommit { ClusterQCs: commit.ClusterQCs, DKGGroupKey: encodable.RandomBeaconPubKey{PublicKey: commit.DKGGroupKey}, DKGParticipantKeys: encKeys, + DKGIndexMap: commit.DKGIndexMap, } } -func commitFromEncodable(enc encodableCommit) EpochCommit { +func commitFromEncodable(enc encodableCommit) (*EpochCommit, error) { dkgKeys := make([]crypto.PublicKey, 0, len(enc.DKGParticipantKeys)) for _, key := range enc.DKGParticipantKeys { dkgKeys = append(dkgKeys, key.PublicKey) } - return EpochCommit{ - Counter: enc.Counter, - ClusterQCs: enc.ClusterQCs, - DKGGroupKey: enc.DKGGroupKey.PublicKey, - DKGParticipantKeys: dkgKeys, - } + return NewEpochCommit( + UntrustedEpochCommit{ + Counter: enc.Counter, + ClusterQCs: enc.ClusterQCs, + DKGGroupKey: enc.DKGGroupKey.PublicKey, + DKGParticipantKeys: dkgKeys, + DKGIndexMap: enc.DKGIndexMap, + }, + ) } -func (commit EpochCommit) MarshalJSON() ([]byte, error) { - return json.Marshal(encodableFromCommit(&commit)) +func (commit *EpochCommit) MarshalJSON() ([]byte, error) { + return json.Marshal(encodableFromCommit(commit)) } func (commit *EpochCommit) UnmarshalJSON(b []byte) error { @@ -216,7 +524,12 @@ func (commit *EpochCommit) UnmarshalJSON(b []byte) error { return err } - *commit = commitFromEncodable(enc) + newCommit, err := commitFromEncodable(enc) + if err != nil { + return err + } + *commit = *newCommit + return nil } @@ -231,7 +544,12 @@ func (commit *EpochCommit) UnmarshalCBOR(b []byte) error { return err } - *commit = commitFromEncodable(enc) + newCommit, err := commitFromEncodable(enc) + if err != nil { + return err + } + *commit = *newCommit + return nil } @@ -245,7 +563,12 @@ func (commit *EpochCommit) UnmarshalMsgpack(b []byte) error { if err != nil { return err } - *commit = commitFromEncodable(enc) + newCommit, err := commitFromEncodable(enc) + if err != nil { + return err + } + *commit = *newCommit + return nil } @@ -259,15 +582,21 @@ func (commit *EpochCommit) EncodeRLP(w io.Writer) error { ClusterQCs []ClusterQCVoteData DKGGroupKey []byte DKGParticipantKeys [][]byte + DKGIndexMap IdentifierList }{ Counter: commit.Counter, ClusterQCs: commit.ClusterQCs, DKGGroupKey: commit.DKGGroupKey.Encode(), DKGParticipantKeys: make([][]byte, 0, len(commit.DKGParticipantKeys)), + DKGIndexMap: make(IdentifierList, len(commit.DKGIndexMap)), } for _, key := range commit.DKGParticipantKeys { rlpEncodable.DKGParticipantKeys = append(rlpEncodable.DKGParticipantKeys, key.Encode()) } + // ensure index map is serialized in a consistent ordered manner + for id, index := range commit.DKGIndexMap { + rlpEncodable.DKGIndexMap[index] = id + } return rlp.Encode(w, rlpEncodable) } @@ -281,14 +610,13 @@ func (commit *EpochCommit) EqualTo(other *EpochCommit) bool { if commit.Counter != other.Counter { return false } - if len(commit.ClusterQCs) != len(other.ClusterQCs) { + + if !slices.EqualFunc(commit.ClusterQCs, other.ClusterQCs, func(qc1 ClusterQCVoteData, qc2 ClusterQCVoteData) bool { + return qc1.EqualTo(&qc2) + }) { return false } - for i, qc := range commit.ClusterQCs { - if !qc.EqualTo(&other.ClusterQCs[i]) { - return false - } - } + if (commit.DKGGroupKey == nil && other.DKGGroupKey != nil) || (commit.DKGGroupKey != nil && other.DKGGroupKey == nil) { return false @@ -296,37 +624,38 @@ func (commit *EpochCommit) EqualTo(other *EpochCommit) bool { if commit.DKGGroupKey != nil && other.DKGGroupKey != nil && !commit.DKGGroupKey.Equals(other.DKGGroupKey) { return false } - if len(commit.DKGParticipantKeys) != len(other.DKGParticipantKeys) { + + if !slices.EqualFunc(commit.DKGParticipantKeys, other.DKGParticipantKeys, func(k1 crypto.PublicKey, k2 crypto.PublicKey) bool { + return k1.Equals(k2) + }) { return false } - for i, key := range commit.DKGParticipantKeys { - if !key.Equals(other.DKGParticipantKeys[i]) { - return false - } + if !maps.Equal(commit.DKGIndexMap, other.DKGIndexMap) { + return false } return true } -// ToDKGParticipantLookup constructs a DKG participant lookup from an identity -// list and a key list. The identity list must be EXACTLY the same (order and -// contents) as that used when initializing the corresponding DKG instance. -func ToDKGParticipantLookup(participants IdentityList, keys []crypto.PublicKey) (map[Identifier]DKGParticipant, error) { - if len(participants) != len(keys) { - return nil, fmt.Errorf("participant list (len=%d) does not match key list (len=%d)", len(participants), len(keys)) - } +// EjectNode is a service event emitted when a node has to be ejected from the network. +// The Dynamic Protocol State observes these events and updates the identity table accordingly. +// It contains a single field which is the identifier of the node being ejected. +type EjectNode struct { + NodeID Identifier +} - lookup := make(map[Identifier]DKGParticipant, len(participants)) - for i := 0; i < len(participants); i++ { - part := participants[i] - key := keys[i] - lookup[part.NodeID] = DKGParticipant{ - Index: uint(i), - KeyShare: key, - } +// EqualTo returns true if the two events are equivalent. +func (e *EjectNode) EqualTo(other *EjectNode) bool { + return e.NodeID == other.NodeID +} + +// ServiceEvent returns the event as a generic ServiceEvent type. +func (e *EjectNode) ServiceEvent() ServiceEvent { + return ServiceEvent{ + Type: ServiceEventEjectNode, + Event: e, } - return lookup, nil } type DKGParticipant struct { @@ -403,29 +732,6 @@ func (part DKGParticipant) EncodeRLP(w io.Writer) error { return rlp.Encode(w, encodableFromDKGParticipant(part)) } -// EpochStatus represents the status of the current and next epoch with respect -// to a reference block. Concretely, it contains the IDs for all relevant -// service events emitted as of the reference block. Events not yet emitted are -// represented by ZeroID. -type EpochStatus struct { - PreviousEpoch EventIDs // EpochSetup and EpochCommit events for the previous epoch - CurrentEpoch EventIDs // EpochSetup and EpochCommit events for the current epoch - NextEpoch EventIDs // EpochSetup and EpochCommit events for the next epoch - // InvalidServiceEventIncorporated encodes whether an invalid service event is - // incorporated in this fork. When this happens, epoch fallback is triggered - // AFTER the fork is finalized. - InvalidServiceEventIncorporated bool -} - -// Copy returns a copy of the epoch status. -func (es *EpochStatus) Copy() *EpochStatus { - return &EpochStatus{ - PreviousEpoch: es.PreviousEpoch, - CurrentEpoch: es.CurrentEpoch, - NextEpoch: es.NextEpoch, - } -} - // EventIDs is a container for IDs of epoch service events. type EventIDs struct { // SetupID is the ID of the EpochSetup event for the respective Epoch @@ -433,69 +739,3 @@ type EventIDs struct { // CommitID is the ID of the EpochCommit event for the respective Epoch CommitID Identifier } - -func NewEpochStatus(previousSetup, previousCommit, currentSetup, currentCommit, nextSetup, nextCommit Identifier) (*EpochStatus, error) { - status := &EpochStatus{ - PreviousEpoch: EventIDs{ - SetupID: previousSetup, - CommitID: previousCommit, - }, - CurrentEpoch: EventIDs{ - SetupID: currentSetup, - CommitID: currentCommit, - }, - NextEpoch: EventIDs{ - SetupID: nextSetup, - CommitID: nextCommit, - }, - } - - err := status.Check() - if err != nil { - return nil, err - } - return status, nil -} - -// Check checks that the status is well-formed, returning an error if it is not. -// All errors indicate a malformed EpochStatus. -func (es *EpochStatus) Check() error { - - if es == nil { - return fmt.Errorf("nil epoch status") - } - // must reference either both or neither event IDs for previous epoch - if (es.PreviousEpoch.SetupID == ZeroID) != (es.PreviousEpoch.CommitID == ZeroID) { - return fmt.Errorf("epoch status with only setup or only commit service event") - } - // must reference event IDs for current epoch - if es.CurrentEpoch.SetupID == ZeroID || es.CurrentEpoch.CommitID == ZeroID { - return fmt.Errorf("epoch status with empty current epoch service events") - } - // must not reference a commit without a setup - if es.NextEpoch.SetupID == ZeroID && es.NextEpoch.CommitID != ZeroID { - return fmt.Errorf("epoch status with commit but no setup service event") - } - return nil -} - -// Phase returns the phase for the CURRENT epoch, given this epoch status. -// All errors indicate a malformed EpochStatus. -func (es *EpochStatus) Phase() (EpochPhase, error) { - - err := es.Check() - if err != nil { - return EpochPhaseUndefined, err - } - if es.NextEpoch.SetupID == ZeroID { - return EpochPhaseStaking, nil - } - if es.NextEpoch.CommitID == ZeroID { - return EpochPhaseSetup, nil - } - return EpochPhaseCommitted, nil -} - -func (es *EpochStatus) HasPrevious() bool { - return es.PreviousEpoch.SetupID != ZeroID && es.PreviousEpoch.CommitID != ZeroID -} diff --git a/model/flow/epoch_test.go b/model/flow/epoch_test.go index 9c9a542540d..815a8c5de6f 100644 --- a/model/flow/epoch_test.go +++ b/model/flow/epoch_test.go @@ -2,18 +2,56 @@ package flow_test import ( "testing" + "time" + + "github.com/onflow/crypto" + "github.com/stretchr/testify/require" - "github.com/onflow/flow-go/crypto" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/utils/unittest" - - "github.com/stretchr/testify/require" ) -func TestClusterQCVoteData_Equality(t *testing.T) { +// TestMalleability performs sanity checks to ensure that epoch related entities are not malleable. +func TestMalleability(t *testing.T) { + t.Run("EpochSetup", func(t *testing.T) { + unittest.RequireEntityNonMalleable(t, unittest.EpochSetupFixture()) + }) + t.Run("EpochCommit with nil DKGIndexMap", func(t *testing.T) { + require.Nil(t, unittest.EpochCommitFixture().DKGIndexMap) // sanity check that the fixture has left `DKGIndexMap` nil + unittest.RequireEntityNonMalleable(t, unittest.EpochCommitFixture(), + // We pin the `DKGIndexMap` to the current value (nil), so `MalleabilityChecker` will not mutate this field: + unittest.WithPinnedField("DKGIndexMap"), + ) + }) - pks := unittest.PublicKeysFixture(2, crypto.BLSBLS12381) + t.Run("EpochCommit with proper DKGIndexMap", func(t *testing.T) { + checker := unittest.NewMalleabilityChecker(unittest.WithFieldGenerator("DKGIndexMap", func() flow.DKGIndexMap { + return flow.DKGIndexMap{unittest.IdentifierFixture(): 0, unittest.IdentifierFixture(): 1} + })) + err := checker.CheckEntity(unittest.EpochCommitFixture(func(commit *flow.EpochCommit) { + commit.DKGIndexMap = flow.DKGIndexMap{unittest.IdentifierFixture(): 0, unittest.IdentifierFixture(): 1} + })) + require.NoError(t, err) + }) + t.Run("EpochRecover", func(t *testing.T) { + checker := unittest.NewMalleabilityChecker(unittest.WithFieldGenerator("EpochCommit.DKGIndexMap", func() flow.DKGIndexMap { + return flow.DKGIndexMap{unittest.IdentifierFixture(): 0, unittest.IdentifierFixture(): 1} + })) + err := checker.CheckEntity(unittest.EpochRecoverFixture()) + require.NoError(t, err) + }) + + t.Run("EpochStateContainer", func(t *testing.T) { + unittest.RequireEntityNonMalleable(t, unittest.EpochStateContainerFixture()) + }) + + t.Run("MinEpochStateEntry", func(t *testing.T) { + unittest.RequireEntityNonMalleable(t, unittest.EpochStateFixture(unittest.WithNextEpochProtocolState()).MinEpochStateEntry) + }) +} +func TestClusterQCVoteData_Equality(t *testing.T) { + pks := unittest.PublicKeysFixture(2, crypto.BLSBLS12381) _ = len(pks) t.Run("empty structures are equal", func(t *testing.T) { @@ -77,7 +115,6 @@ func TestClusterQCVoteData_Equality(t *testing.T) { } func TestEpochCommit_EqualTo(t *testing.T) { - qcA := flow.ClusterQCVoteData{ SigData: []byte{3, 3, 3}, VoterIDs: []flow.Identifier{flow.HashToID([]byte{1, 2, 3}), flow.HashToID([]byte{3, 2, 1})}, @@ -175,12 +212,47 @@ func TestEpochCommit_EqualTo(t *testing.T) { require.True(t, a.EqualTo(b)) require.True(t, b.EqualTo(a)) }) + + t.Run("DKGData different length", func(t *testing.T) { + + a := &flow.EpochCommit{DKGIndexMap: flow.DKGIndexMap{flow.HashToID([]byte{1}): 1}} + b := &flow.EpochCommit{DKGIndexMap: flow.DKGIndexMap{}} + + require.False(t, a.EqualTo(b)) + require.False(t, b.EqualTo(a)) + }) + + t.Run("DKGData different data", func(t *testing.T) { + + a := &flow.EpochCommit{DKGIndexMap: flow.DKGIndexMap{flow.HashToID([]byte{1}): 1}} + b := &flow.EpochCommit{DKGIndexMap: flow.DKGIndexMap{flow.HashToID([]byte{1}): 2}} + + require.False(t, a.EqualTo(b)) + require.False(t, b.EqualTo(a)) + }) + + t.Run("DKGData different data - zero value", func(t *testing.T) { + + a := &flow.EpochCommit{DKGIndexMap: flow.DKGIndexMap{flow.HashToID([]byte{1}): 0}} + b := &flow.EpochCommit{DKGIndexMap: flow.DKGIndexMap{flow.HashToID([]byte{2}): 1}} + + require.False(t, a.EqualTo(b)) + require.False(t, b.EqualTo(a)) + }) + + t.Run("DKGData same data", func(t *testing.T) { + + a := &flow.EpochCommit{DKGIndexMap: flow.DKGIndexMap{flow.HashToID([]byte{1, 2, 3}): 1}} + b := &flow.EpochCommit{DKGIndexMap: flow.DKGIndexMap{flow.HashToID([]byte{1, 2, 3}): 1}} + + require.True(t, a.EqualTo(b)) + require.True(t, b.EqualTo(a)) + }) } func TestEpochSetup_EqualTo(t *testing.T) { - - identityA := unittest.IdentityFixture() - identityB := unittest.IdentityFixture() + identityA := &unittest.IdentityFixture().IdentitySkeleton + identityB := &unittest.IdentityFixture().IdentitySkeleton assignmentA := flow.AssignmentList{[]flow.Identifier{[32]byte{1, 2, 3}, [32]byte{2, 2, 2}}} assignmentB := flow.AssignmentList{[]flow.Identifier{[32]byte{1, 2, 3}, [32]byte{}}} @@ -243,8 +315,8 @@ func TestEpochSetup_EqualTo(t *testing.T) { t.Run("Participants length differ", func(t *testing.T) { - a := &flow.EpochSetup{Participants: flow.IdentityList{identityA}} - b := &flow.EpochSetup{Participants: flow.IdentityList{}} + a := &flow.EpochSetup{Participants: flow.IdentitySkeletonList{identityA}} + b := &flow.EpochSetup{Participants: flow.IdentitySkeletonList{}} require.False(t, a.EqualTo(b)) require.False(t, b.EqualTo(a)) @@ -252,8 +324,8 @@ func TestEpochSetup_EqualTo(t *testing.T) { t.Run("Participants length same but different data", func(t *testing.T) { - a := &flow.EpochSetup{Participants: flow.IdentityList{identityA}} - b := &flow.EpochSetup{Participants: flow.IdentityList{identityB}} + a := &flow.EpochSetup{Participants: flow.IdentitySkeletonList{identityA}} + b := &flow.EpochSetup{Participants: flow.IdentitySkeletonList{identityB}} require.False(t, a.EqualTo(b)) require.False(t, b.EqualTo(a)) @@ -261,8 +333,8 @@ func TestEpochSetup_EqualTo(t *testing.T) { t.Run("Participants length same with same data", func(t *testing.T) { - a := &flow.EpochSetup{Participants: flow.IdentityList{identityA}} - b := &flow.EpochSetup{Participants: flow.IdentityList{identityA}} + a := &flow.EpochSetup{Participants: flow.IdentitySkeletonList{identityA}} + b := &flow.EpochSetup{Participants: flow.IdentitySkeletonList{identityA}} require.True(t, a.EqualTo(b)) require.True(t, b.EqualTo(a)) @@ -304,3 +376,469 @@ func TestEpochSetup_EqualTo(t *testing.T) { require.False(t, b.EqualTo(a)) }) } + +// TestNewEpochSetup verifies the behavior of the NewEpochSetup constructor function. +// It checks for correct handling of both valid and invalid inputs. +// +// Test Cases: +// +// 1. Valid input returns setup: +// - Ensures that providing all required and correctly formatted fields results in a successful creation of an EpochSetup instance. +// +// 2. Invalid FirstView and FinalView: +// - Verifies that an error is returned when FirstView is not less than FinalView, as this violates the expected chronological order. +// +// 3. Invalid FirstView >= DKGPhase1FinalView: +// - Ensures that FirstView must end before DKG Phase 1 ends. +// +// 4. Invalid DKGPhase1FinalView >= DKGPhase2FinalView: +// - Ensures DKG Phase 1 must end before DKG Phase 2 ends. +// +// 5. Invalid DKGPhase2FinalView >= DKGPhase3FinalView: +// - Ensures DKG Phase 2 must end before DKG Phase 3 ends. +// +// 6. Invalid DKGPhase3FinalView >= FinalView: +// - Ensures DKG Phase 3 must end before FinalView. +// +// 7. Invalid participants: +// - Checks that an error is returned when the Participants field is nil. +// +// 8. Invalid assignments: +// - Ensures that an error is returned when the Assignments field is nil. +// +// 9. Invalid RandomSource: +// - Validates that an error is returned when the RandomSource does not meet the required length. +// +// 10. Invalid TargetDuration: +// - Confirms that an error is returned when TargetDuration is zero. +func TestNewEpochSetup(t *testing.T) { + participants := unittest.IdentityListFixture(5, unittest.WithAllRoles()) + validParticipants := participants.Sort(flow.Canonical[flow.Identity]).ToSkeleton() + validRandomSource := unittest.SeedFixture(flow.EpochSetupRandomSourceLength) + validAssignments := unittest.ClusterAssignment(1, validParticipants) + + t.Run("valid input", func(t *testing.T) { + untrusted := flow.UntrustedEpochSetup{ + Counter: 1, + FirstView: 10, + DKGPhase1FinalView: 20, + DKGPhase2FinalView: 30, + DKGPhase3FinalView: 40, + FinalView: 50, + Participants: validParticipants, + Assignments: validAssignments, + RandomSource: validRandomSource, + TargetDuration: 60 * 60, + TargetEndTime: uint64(time.Now().Unix()) + 1000, + } + + setup, err := flow.NewEpochSetup(untrusted) + require.NoError(t, err) + require.NotNil(t, setup) + }) + + t.Run("invalid FirstView and FinalView", func(t *testing.T) { + untrusted := flow.UntrustedEpochSetup{ + FirstView: 100, + FinalView: 90, + DKGPhase1FinalView: 20, + DKGPhase2FinalView: 30, + DKGPhase3FinalView: 40, + Participants: validParticipants, + Assignments: validAssignments, + RandomSource: validRandomSource, + } + setup, err := flow.NewEpochSetup(untrusted) + require.Error(t, err) + require.Nil(t, setup) + require.Contains(t, err.Error(), "invalid timing - first view (100) ends after the final view (90)") + }) + + t.Run("invalid FirstView >= DKGPhase1FinalView", func(t *testing.T) { + untrusted := flow.UntrustedEpochSetup{ + FirstView: 20, + DKGPhase1FinalView: 10, + DKGPhase2FinalView: 30, + DKGPhase3FinalView: 40, + FinalView: 50, + Participants: validParticipants, + Assignments: validAssignments, + RandomSource: validRandomSource, + TargetDuration: 60, + } + setup, err := flow.NewEpochSetup(untrusted) + require.Error(t, err) + require.Nil(t, setup) + require.Contains(t, err.Error(), "invalid timing - first view (20) ends after dkg phase 1 (10)") + }) + + t.Run("invalid DKGPhase1FinalView >= DKGPhase2FinalView", func(t *testing.T) { + untrusted := flow.UntrustedEpochSetup{ + FirstView: 10, + DKGPhase1FinalView: 30, + DKGPhase2FinalView: 20, + DKGPhase3FinalView: 40, + FinalView: 50, + Participants: validParticipants, + Assignments: validAssignments, + RandomSource: validRandomSource, + TargetDuration: 60, + } + setup, err := flow.NewEpochSetup(untrusted) + require.Error(t, err) + require.Nil(t, setup) + require.Contains(t, err.Error(), "invalid dkg timing - phase 1 (30) ends after phase 2 (20)") + }) + + t.Run("invalid DKGPhase2FinalView >= DKGPhase3FinalView", func(t *testing.T) { + untrusted := flow.UntrustedEpochSetup{ + FirstView: 10, + DKGPhase1FinalView: 20, + DKGPhase2FinalView: 40, + DKGPhase3FinalView: 30, + FinalView: 50, + Participants: validParticipants, + Assignments: validAssignments, + RandomSource: validRandomSource, + TargetDuration: 60, + } + setup, err := flow.NewEpochSetup(untrusted) + require.Error(t, err) + require.Nil(t, setup) + require.Contains(t, err.Error(), "invalid dkg timing - phase 2 (40) ends after phase 3 (30)") + }) + + t.Run("invalid DKGPhase3FinalView >= FinalView", func(t *testing.T) { + untrusted := flow.UntrustedEpochSetup{ + FirstView: 10, + DKGPhase1FinalView: 20, + DKGPhase2FinalView: 30, + DKGPhase3FinalView: 60, + FinalView: 50, + Participants: validParticipants, + Assignments: validAssignments, + RandomSource: validRandomSource, + TargetDuration: 60, + } + setup, err := flow.NewEpochSetup(untrusted) + require.Error(t, err) + require.Nil(t, setup) + require.Contains(t, err.Error(), "invalid timing - dkg phase 3 (60) ends after final view (50)") + }) + + t.Run("invalid participants", func(t *testing.T) { + untrusted := flow.UntrustedEpochSetup{ + FirstView: 10, + DKGPhase1FinalView: 20, + DKGPhase2FinalView: 30, + DKGPhase3FinalView: 40, + FinalView: 50, + Participants: nil, + Assignments: validAssignments, + RandomSource: validRandomSource, + } + setup, err := flow.NewEpochSetup(untrusted) + require.Error(t, err) + require.Nil(t, setup) + require.Contains(t, err.Error(), "participants must not be nil") + }) + + t.Run("invalid assignments", func(t *testing.T) { + untrusted := flow.UntrustedEpochSetup{ + FirstView: 10, + DKGPhase1FinalView: 20, + DKGPhase2FinalView: 30, + DKGPhase3FinalView: 40, + FinalView: 50, + Participants: validParticipants, + Assignments: nil, + RandomSource: validRandomSource, + } + setup, err := flow.NewEpochSetup(untrusted) + require.Error(t, err) + require.Nil(t, setup) + require.Contains(t, err.Error(), "assignments must not be nil") + }) + + t.Run("invalid RandomSource", func(t *testing.T) { + untrusted := flow.UntrustedEpochSetup{ + FirstView: 10, + DKGPhase1FinalView: 20, + DKGPhase2FinalView: 30, + DKGPhase3FinalView: 40, + FinalView: 50, + Participants: validParticipants, + Assignments: validAssignments, + RandomSource: make([]byte, flow.EpochSetupRandomSourceLength-1), // too short + } + setup, err := flow.NewEpochSetup(untrusted) + require.Error(t, err) + require.Nil(t, setup) + require.Contains(t, err.Error(), "random source must be of") + }) + t.Run("invalid TargetDuration", func(t *testing.T) { + untrusted := flow.UntrustedEpochSetup{ + FirstView: 10, + DKGPhase1FinalView: 20, + DKGPhase2FinalView: 30, + DKGPhase3FinalView: 40, + FinalView: 50, + Participants: validParticipants, + Assignments: validAssignments, + RandomSource: validRandomSource, + TargetDuration: 0, + } + setup, err := flow.NewEpochSetup(untrusted) + require.Error(t, err) + require.Nil(t, setup) + require.Contains(t, err.Error(), "target duration must be greater than 0") + }) +} + +// TestNewEpochCommit validates the behavior of the NewEpochCommit constructor function. +// It checks for correct handling of both valid and invalid inputs. +// +// Test Cases: +// +// 1. Valid input returns commit: +// - Ensures that providing all required and correctly formatted fields results in a successful creation of an EpochCommit instance. +// +// 2. Nil DKGGroupKey: +// - Verifies that an error is returned when DKGGroupKey is nil. + +// 3. Empty cluster QCs list: +// - Verifies that an error is returned when cluster QCs list is empty. +// +// 4. Mismatched DKGParticipantKeys and DKGIndexMap lengths: +// - Checks that an error is returned when the number of DKGParticipantKeys does not match the length of DKGIndexMap. +// +// 5. DKGIndexMap with out-of-range index: +// - Ensures that an error is returned when DKGIndexMap contains an index outside the valid range. +// +// 6. DKGIndexMap with duplicate indices: +// - Validates that an error is returned when DKGIndexMap contains duplicate indices. +func TestNewEpochCommit(t *testing.T) { + // Setup common valid data + validParticipantKeys := unittest.PublicKeysFixture(2, crypto.BLSBLS12381) + validDKGGroupKey := unittest.KeyFixture(crypto.BLSBLS12381).PublicKey() + validIndexMap := flow.DKGIndexMap{ + unittest.IdentifierFixture(): 0, + unittest.IdentifierFixture(): 1, + } + validClusterQCs := []flow.ClusterQCVoteData{ + { + VoterIDs: []flow.Identifier{ + unittest.IdentifierFixture(), + unittest.IdentifierFixture(), + }, + SigData: []byte{1, 1, 1}, + }, + { + VoterIDs: []flow.Identifier{ + unittest.IdentifierFixture(), + unittest.IdentifierFixture(), + }, + SigData: []byte{2, 2, 2}, + }, + } + + t.Run("valid input", func(t *testing.T) { + untrusted := flow.UntrustedEpochCommit{ + Counter: 1, + ClusterQCs: validClusterQCs, + DKGGroupKey: validDKGGroupKey, + DKGParticipantKeys: validParticipantKeys, + DKGIndexMap: validIndexMap, + } + + commit, err := flow.NewEpochCommit(untrusted) + require.NoError(t, err) + require.NotNil(t, commit) + }) + + t.Run("nil DKGGroupKey", func(t *testing.T) { + untrusted := flow.UntrustedEpochCommit{ + Counter: 1, + ClusterQCs: validClusterQCs, + DKGGroupKey: nil, + DKGParticipantKeys: validParticipantKeys, + DKGIndexMap: validIndexMap, + } + + commit, err := flow.NewEpochCommit(untrusted) + require.Error(t, err) + require.Nil(t, commit) + require.Contains(t, err.Error(), "DKG group key must not be nil") + }) + + t.Run("empty list of cluster QCs", func(t *testing.T) { + untrusted := flow.UntrustedEpochCommit{ + Counter: 1, + ClusterQCs: []flow.ClusterQCVoteData{}, + DKGGroupKey: validDKGGroupKey, + DKGParticipantKeys: validParticipantKeys, + DKGIndexMap: validIndexMap, + } + + commit, err := flow.NewEpochCommit(untrusted) + require.Error(t, err) + require.Nil(t, commit) + require.Contains(t, err.Error(), "cluster QCs list must not be empty") + }) + + t.Run("mismatched DKGParticipantKeys and DKGIndexMap lengths", func(t *testing.T) { + untrusted := flow.UntrustedEpochCommit{ + Counter: 1, + ClusterQCs: validClusterQCs, + DKGGroupKey: validDKGGroupKey, + DKGParticipantKeys: unittest.PublicKeysFixture(1, crypto.BLSBLS12381), // Only one key + DKGIndexMap: validIndexMap, // Two entries + } + + commit, err := flow.NewEpochCommit(untrusted) + require.Error(t, err) + require.Nil(t, commit) + require.Contains(t, err.Error(), "number of 1 Random Beacon key shares is inconsistent with number of DKG participants (len=2)") + }) + + t.Run("DKGIndexMap with out-of-range index", func(t *testing.T) { + invalidIndexMap := flow.DKGIndexMap{ + unittest.IdentifierFixture(): 0, + unittest.IdentifierFixture(): 2, // Index out of range for 2 participants + } + + untrusted := flow.UntrustedEpochCommit{ + Counter: 1, + ClusterQCs: validClusterQCs, + DKGGroupKey: validDKGGroupKey, + DKGParticipantKeys: validParticipantKeys, + DKGIndexMap: invalidIndexMap, + } + + commit, err := flow.NewEpochCommit(untrusted) + require.Error(t, err) + require.Nil(t, commit) + require.Contains(t, err.Error(), "index 2 is outside allowed range [0,n-1] for a DKG committee of size n=2") + }) + + t.Run("DKGIndexMap with duplicate indices", func(t *testing.T) { + duplicateIndexMap := flow.DKGIndexMap{ + unittest.IdentifierFixture(): 0, + unittest.IdentifierFixture(): 0, // Duplicate index + } + + untrusted := flow.UntrustedEpochCommit{ + Counter: 1, + ClusterQCs: validClusterQCs, + DKGGroupKey: validDKGGroupKey, + DKGParticipantKeys: validParticipantKeys, + DKGIndexMap: duplicateIndexMap, + } + + commit, err := flow.NewEpochCommit(untrusted) + require.Error(t, err) + require.Nil(t, commit) + require.Contains(t, err.Error(), "duplicated DKG index 0") + }) +} + +// TestNewEpochRecover validates the behavior of the NewEpochRecover constructor function. +// It checks for correct handling of both valid and invalid inputs. +// +// Test Cases: +// +// 1. Valid input returns recover: +// - Ensures that providing non-empty EpochSetup and EpochCommit results in a successful creation of an EpochRecover instance. +// +// 2. Empty EpochSetup: +// - Verifies that an error is returned when EpochSetup is empty. +// +// 3. Empty EpochCommit: +// - Checks that an error is returned when EpochCommit is empty. +// +// 4. Mismatched cluster counts: +// - Validates that an error is returned when the number of Assignments in EpochSetup does not match the number of ClusterQCs in EpochCommit. +// +// 5. Mismatched epoch counters: +// - Ensures that an error is returned when the Counter values in EpochSetup and EpochCommit do not match. + +func TestNewEpochRecover(t *testing.T) { + // Setup common valid data + setupParticipants := unittest.IdentityListFixture(5, unittest.WithAllRoles()).Sort(flow.Canonical[flow.Identity]) + + validSetup := unittest.EpochSetupFixture( + unittest.SetupWithCounter(1), + unittest.WithParticipants(setupParticipants.ToSkeleton()), + ) + validCommit := unittest.EpochCommitFixture( + unittest.CommitWithCounter(1), + unittest.WithDKGFromParticipants(validSetup.Participants), + ) + + t.Run("valid input", func(t *testing.T) { + untrusted := flow.UntrustedEpochRecover{ + EpochSetup: *validSetup, + EpochCommit: *validCommit, + } + + recoverEpoch, err := flow.NewEpochRecover(untrusted) + require.NoError(t, err) + require.NotNil(t, recoverEpoch) + }) + + t.Run("empty EpochSetup", func(t *testing.T) { + untrusted := flow.UntrustedEpochRecover{ + EpochSetup: *new(flow.EpochSetup), // Empty setup + EpochCommit: *validCommit, + } + + recoverEpoch, err := flow.NewEpochRecover(untrusted) + require.Error(t, err) + require.Nil(t, recoverEpoch) + require.Contains(t, err.Error(), "EpochSetup is empty") + }) + + t.Run("empty EpochCommit", func(t *testing.T) { + untrusted := flow.UntrustedEpochRecover{ + EpochSetup: *validSetup, + EpochCommit: *new(flow.EpochCommit), // Empty commit + } + + recoverEpoch, err := flow.NewEpochRecover(untrusted) + require.Error(t, err) + require.Nil(t, recoverEpoch) + require.Contains(t, err.Error(), "EpochCommit is empty") + }) + + t.Run("mismatched cluster counts", func(t *testing.T) { + // Create a copy of validSetup with an extra assignment + mismatchedSetup := *validSetup + mismatchedSetup.Assignments = unittest.ClusterAssignment(2, setupParticipants.ToSkeleton()) + + untrusted := flow.UntrustedEpochRecover{ + EpochSetup: mismatchedSetup, + EpochCommit: *validCommit, + } + + recoverEpoch, err := flow.NewEpochRecover(untrusted) + require.Error(t, err) + require.Nil(t, recoverEpoch) + require.Contains(t, err.Error(), "does not match number of QCs") + }) + + t.Run("mismatched epoch counters", func(t *testing.T) { + // Create a copy of validCommit with a different counter + mismatchedCommit := *validCommit + mismatchedCommit.Counter = validSetup.Counter + 1 + + untrusted := flow.UntrustedEpochRecover{ + EpochSetup: *validSetup, + EpochCommit: mismatchedCommit, + } + + recoverEpoch, err := flow.NewEpochRecover(untrusted) + require.Error(t, err) + require.Nil(t, recoverEpoch) + require.Contains(t, err.Error(), "inconsistent epoch counter") + }) +} diff --git a/model/flow/event.go b/model/flow/event.go index c645bf22603..7e9cf0652f2 100644 --- a/model/flow/event.go +++ b/model/flow/event.go @@ -1,12 +1,9 @@ -// (c) 2019 Dapper Labs - ALL RIGHTS RESERVED - package flow import ( "fmt" "time" - "github.com/onflow/flow-go/model/encoding/json" "github.com/onflow/flow-go/model/fingerprint" "github.com/onflow/flow-go/storage/merkle" ) @@ -19,6 +16,10 @@ const ( type EventType string +// Event represents an event emitted during the execution of a transaction. +// Events are generated by smart contracts and can be used to observe and react to changes in the blockchain state. +// +//structwrite:immutable - mutations allowed only within the constructor type Event struct { // Type is the qualified event type. Type EventType @@ -34,6 +35,40 @@ type Event struct { Payload []byte } +// UntrustedEvent is an untrusted input-only representation of an Event, +// used for construction. +// +// This type exists to ensure that constructor functions are invoked explicitly +// with named fields, which improves clarity and reduces the risk of incorrect field +// ordering during construction. +// +// An instance of UntrustedEvent should be validated and converted into +// a trusted Event using NewEvent constructor. +type UntrustedEvent Event + +// NewEvent creates a new instance of Event. +// Construction Event allowed only within the constructor. +// +// All errors indicate a valid Event cannot be constructed from the input. +func NewEvent(untrusted UntrustedEvent) (*Event, error) { + if len(untrusted.Type) == 0 { + return nil, fmt.Errorf("event type must not be empty") + } + if untrusted.TransactionID == ZeroID { + return nil, fmt.Errorf("transaction ID must not be zero") + } + if len(untrusted.Payload) == 0 { + return nil, fmt.Errorf("payload must not be empty") + } + return &Event{ + Type: untrusted.Type, + TransactionID: untrusted.TransactionID, + TransactionIndex: untrusted.TransactionIndex, + EventIndex: untrusted.EventIndex, + Payload: untrusted.Payload, + }, nil +} + // String returns the string representation of this event. func (e Event) String() string { return fmt.Sprintf("%s: %s", e.Type, e.ID()) @@ -41,44 +76,9 @@ func (e Event) String() string { // ID returns a canonical identifier that is guaranteed to be unique. func (e Event) ID() Identifier { - return MakeID(wrapEventID(e)) -} - -func (e Event) Checksum() Identifier { return MakeID(e) } -// Encode returns the canonical encoding of this event, containing only the fields necessary to uniquely identify it. -func (e Event) Encode() []byte { - w := wrapEventID(e) - return json.NewMarshaler().MustMarshal(w) -} - -func (e Event) Fingerprint() []byte { - return fingerprint.Fingerprint(wrapEvent(e)) -} - -// Defines only the fields needed to uniquely identify an event. -type eventIDWrapper struct { - TxID []byte - Index uint32 -} - -type eventWrapper struct { - TxID []byte - Index uint32 - Type string - TransactionIndex uint32 - Payload []byte -} - -func wrapEventID(e Event) eventIDWrapper { - return eventIDWrapper{ - TxID: e.TransactionID[:], - Index: e.EventIndex, - } -} - // byteSize returns the number of bytes needed to store the wrapped version of the event. // returned int is an approximate measure, ignoring the number of bytes needed as headers. func (e Event) byteSize() int { @@ -89,16 +89,6 @@ func (e Event) byteSize() int { len(e.Payload) // Payload } -func wrapEvent(e Event) eventWrapper { - return eventWrapper{ - TxID: e.TransactionID[:], - Index: e.EventIndex, - Type: string(e.Type), - TransactionIndex: e.TransactionIndex, - Payload: e.Payload[:], - } -} - // BlockEvents contains events emitted in a single block. type BlockEvents struct { BlockID Identifier @@ -109,7 +99,7 @@ type BlockEvents struct { type EventsList []Event -// byteSize returns an approximate number of bytes needed to store the wrapped version of the event. +// ByteSize returns an approximate number of bytes needed to store the wrapped version of the event. func (el EventsList) ByteSize() int { size := 0 for _, event := range el { @@ -127,10 +117,10 @@ func EventsMerkleRootHash(el EventsList) (Identifier, error) { } for _, event := range el { - // event fingerprint is the rlp encoding of the wrapperevent + // event fingerprint is the rlp encoding of the event // eventID is the standard sha3 hash of the event fingerprint - fingerPrint := event.Fingerprint() - // computing enityID from the fingerprint + fingerPrint := fingerprint.Fingerprint(event) + // computing entityID from the fingerprint (this is equivalent to `event.ID()`) eventID := MakeIDFromFingerPrint(fingerPrint) _, err = tree.Put(eventID[:], fingerPrint) if err != nil { diff --git a/model/flow/event_test.go b/model/flow/event_test.go index 37ab7cf78e6..ef8cc742e29 100644 --- a/model/flow/event_test.go +++ b/model/flow/event_test.go @@ -4,6 +4,7 @@ import ( "testing" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" "github.com/onflow/flow-go/model/encoding/rlp" "github.com/onflow/flow-go/model/fingerprint" @@ -11,58 +12,30 @@ import ( "github.com/onflow/flow-go/utils/unittest" ) -type eventWrapper struct { - TxID []byte - Index uint32 - Type string - TransactionIndex uint32 - Payload []byte -} - -func wrapEvent(e flow.Event) eventWrapper { - return eventWrapper{ - TxID: e.TransactionID[:], - Index: e.EventIndex, - Type: string(e.Type), - TransactionIndex: e.TransactionIndex, - Payload: e.Payload, - } -} - +// TestEventFingerprint verifies that the Fingerprint function produces +// a consistent RLP-encoded representation of an Event. It ensures that +// decoding the fingerprint results in a correctly ordered structure. func TestEventFingerprint(t *testing.T) { - evt := unittest.EventFixture(flow.EventAccountCreated, 13, 12, unittest.IdentifierFixture(), 32) + evt := unittest.EventFixture() data := fingerprint.Fingerprint(evt) - var decoded eventWrapper + var decoded flow.Event rlp.NewMarshaler().MustUnmarshal(data, &decoded) - assert.Equal(t, wrapEvent(evt), decoded) + assert.Equal(t, evt, decoded) } -func TestEventID(t *testing.T) { - - // EventID was historically calculated from just TxID and eventIndex which are enough to uniquely identify it in a system - // This test ensures we don't break this promise while introducing proper fingerprinting (which accounts for all the fields) +// TestEventMalleability checks that Event is not malleable: any change in its data +// should result in a different ID. +func TestEventMalleability(t *testing.T) { + event := unittest.EventFixture() - txID := unittest.IdentifierFixture() - evtA := unittest.EventFixture(flow.EventAccountUpdated, 21, 37, txID, 2) - evtB := unittest.EventFixture(flow.EventAccountCreated, 0, 37, txID, 22) - - evtC := unittest.EventFixture(evtA.Type, evtA.TransactionIndex, evtA.EventIndex+1, txID, 2) - evtC.Payload = evtA.Payload - - a := evtA.ID() - b := evtB.ID() - c := evtC.ID() - - assert.Equal(t, a, b) - assert.NotEqual(t, a, c) + unittest.RequireEntityNonMalleable(t, &event) } func TestEventsList(t *testing.T) { - - eventA := unittest.EventFixture(flow.EventAccountUpdated, 21, 37, unittest.IdentifierFixture(), 2) - eventB := unittest.EventFixture(flow.EventAccountCreated, 0, 37, unittest.IdentifierFixture(), 22) - eventC := unittest.EventFixture(flow.EventAccountCreated, 0, 37, unittest.IdentifierFixture(), 22) + eventA := unittest.EventFixture() + eventB := unittest.EventFixture() + eventC := unittest.EventFixture() listAB := flow.EventsList{ eventA, @@ -112,9 +85,110 @@ func TestEventsMerkleRootHash(t *testing.T) { TransactionID: [flow.IdentifierLen]byte{1, 2, 3}, } - expectedRootHashHex := "355446d7b2b9653403abe28ccc405f46c059d2059cb7863f4964c401ee1aa83b" + expectedRootHashHex := "c53a6592de573a24547b616172abd9131651d6b7d829e5694a25fa183db7ae01" ABHash, err := flow.EventsMerkleRootHash([]flow.Event{eventA, eventB}) assert.NoError(t, err) assert.Equal(t, expectedRootHashHex, ABHash.String()) } + +func TestEmptyEventsMerkleRootHash(t *testing.T) { + actualHash, err := flow.EventsMerkleRootHash([]flow.Event{}) + require.NoError(t, err) + require.Equal(t, flow.EmptyEventCollectionID, actualHash) +} + +// TestNewEvent verifies the behavior of the NewEvent constructor. +// It ensures proper handling of both valid and invalid untrusted input fields. +// +// Test Cases: +// +// 1. Valid input: +// - Verifies that a properly populated UntrustedEvent results in a valid Event. +// +// 2. Invalid input with empty event type: +// - Ensures an error is returned when the Type field is an empty string. +// +// 3. Invalid input with zero transaction ID: +// - Ensures an error is returned when the TransactionID is zero. +// +// 4. Invalid input with nil Payload: +// - Ensures an error is returned when the Payload field is nil. +// +// 5. Invalid input with empty Payload: +// - Ensures an error is returned when the Payload field is an empty byte slice. +func TestNewEvent(t *testing.T) { + t.Run("valid input", func(t *testing.T) { + event, err := flow.NewEvent( + flow.UntrustedEvent{ + Type: flow.EventAccountCreated, + TransactionID: unittest.IdentifierFixture(), + TransactionIndex: 1, + EventIndex: 1, + Payload: []byte("cadence-json encoded data"), + }, + ) + require.NoError(t, err) + require.NotNil(t, event) + }) + + t.Run("invalid input, type is empty", func(t *testing.T) { + event, err := flow.NewEvent( + flow.UntrustedEvent{ + Type: "", + TransactionID: unittest.IdentifierFixture(), + TransactionIndex: 1, + EventIndex: 1, + Payload: []byte("cadence-json encoded data"), + }, + ) + require.Error(t, err) + require.Nil(t, event) + assert.Contains(t, err.Error(), "event type must not be empty") + }) + + t.Run("invalid input, transaction ID is zero", func(t *testing.T) { + event, err := flow.NewEvent( + flow.UntrustedEvent{ + Type: flow.EventAccountCreated, + TransactionID: flow.ZeroID, + TransactionIndex: 1, + EventIndex: 1, + Payload: []byte("cadence-json encoded data"), + }, + ) + require.Error(t, err) + require.Nil(t, event) + assert.Contains(t, err.Error(), "transaction ID must not be zero") + }) + + t.Run("invalid input with nil payload", func(t *testing.T) { + event, err := flow.NewEvent( + flow.UntrustedEvent{ + Type: flow.EventAccountCreated, + TransactionID: unittest.IdentifierFixture(), + TransactionIndex: 1, + EventIndex: 1, + Payload: nil, + }, + ) + require.Error(t, err) + require.Nil(t, event) + assert.Contains(t, err.Error(), "payload must not be empty") + }) + + t.Run("invalid input with empty payload", func(t *testing.T) { + event, err := flow.NewEvent( + flow.UntrustedEvent{ + Type: flow.EventAccountCreated, + TransactionID: unittest.IdentifierFixture(), + TransactionIndex: 1, + EventIndex: 1, + Payload: []byte{}, + }, + ) + require.Error(t, err) + require.Nil(t, event) + assert.Contains(t, err.Error(), "payload must not be empty") + }) +} diff --git a/model/flow/executed_transaction.go b/model/flow/executed_transaction.go deleted file mode 100644 index cb7954a8445..00000000000 --- a/model/flow/executed_transaction.go +++ /dev/null @@ -1,10 +0,0 @@ -package flow - -// ExecutedTransaction are generated by computer and pass to chunker -type ExecutedTransaction struct { - Tx *Transaction - GasSpent uint64 - MaxGas uint64 - StartState StateCommitment - EndState StateCommitment -} diff --git a/model/flow/execution_receipt.go b/model/flow/execution_receipt.go index 7c272df64f2..d11ecd413d6 100644 --- a/model/flow/execution_receipt.go +++ b/model/flow/execution_receipt.go @@ -2,79 +2,177 @@ package flow import ( "encoding/json" + "fmt" - "github.com/onflow/flow-go/crypto" + "github.com/onflow/crypto" ) type Spock []byte // ExecutionReceipt is the full execution receipt, as sent by the Execution Node. -// Specifically, it contains the detailed execution result. +// Specifically, it contains the detailed execution result. The `ExecutorSignature` +// signs the `UnsignedExecutionReceipt`. +// +//structwrite:immutable - mutations allowed only within the constructor type ExecutionReceipt struct { - ExecutorID Identifier - ExecutionResult - Spocks []crypto.Signature + UnsignedExecutionReceipt ExecutorSignature crypto.Signature } +// UntrustedExecutionReceipt is an untrusted input-only representation of a ExecutionReceipt, +// used for construction. +// +// This type exists to ensure that constructor functions are invoked explicitly +// with named fields, which improves clarity and reduces the risk of incorrect field +// ordering during construction. +// +// An instance of UntrustedExecutionReceipt should be validated and converted into +// a trusted ExecutionReceipt using NewExecutionReceipt constructor. +type UntrustedExecutionReceipt ExecutionReceipt + +// NewExecutionReceipt creates a new instance of ExecutionReceipt. +// Construction ExecutionReceipt allowed only within the constructor. +// +// All errors indicate a valid ExecutionReceipt cannot be constructed from the input. +func NewExecutionReceipt(untrusted UntrustedExecutionReceipt) (*ExecutionReceipt, error) { + unsignedExecutionReceipt, err := NewUnsignedExecutionReceipt(UntrustedUnsignedExecutionReceipt(untrusted.UnsignedExecutionReceipt)) + if err != nil { + return nil, fmt.Errorf("invalid unsigned execution receipt: %w", err) + } + if len(untrusted.ExecutorSignature) == 0 { + return nil, fmt.Errorf("executor signature must not be empty") + } + return &ExecutionReceipt{ + UnsignedExecutionReceipt: *unsignedExecutionReceipt, + ExecutorSignature: untrusted.ExecutorSignature, + }, nil +} + // ID returns the canonical ID of the execution receipt. func (er *ExecutionReceipt) ID() Identifier { - return er.Meta().ID() + return er.Stub().ID() } -// Checksum returns a checksum for the execution receipt including the signatures. -func (er *ExecutionReceipt) Checksum() Identifier { - return MakeID(er) +// Stub returns a stub of the full ExecutionReceipt, where the ExecutionResult is replaced by its cryptographic hash. +func (er *ExecutionReceipt) Stub() *ExecutionReceiptStub { + // Constructor is skipped since we're using an already-valid ExecutionReceipt object. + //nolint:structwrite + return &ExecutionReceiptStub{ + UnsignedExecutionReceiptStub: *er.UnsignedExecutionReceipt.Stub(), + ExecutorSignature: er.ExecutorSignature, + } +} + +// UnsignedExecutionReceipt represents the unsigned execution receipt, whose contents the +// Execution Node testifies to be correct by its signature. +// +//structwrite:immutable - mutations allowed only within the constructor +type UnsignedExecutionReceipt struct { + ExecutorID Identifier + ExecutionResult + Spocks []crypto.Signature } -// Meta returns the receipt metadata for the receipt. -func (er *ExecutionReceipt) Meta() *ExecutionReceiptMeta { - return &ExecutionReceiptMeta{ - ExecutorID: er.ExecutorID, - ResultID: er.ExecutionResult.ID(), - Spocks: er.Spocks, - ExecutorSignature: er.ExecutorSignature, +// UntrustedUnsignedExecutionReceipt is an untrusted input-only representation of a UnsignedExecutionReceipt, +// used for construction. +// +// This type exists to ensure that constructor functions are invoked explicitly +// with named fields, which improves clarity and reduces the risk of incorrect field +// ordering during construction. +// +// An instance of UntrustedUnsignedExecutionReceipt should be validated and converted into +// a trusted UnsignedExecutionReceipt using NewUnsignedExecutionReceipt constructor. +type UntrustedUnsignedExecutionReceipt UnsignedExecutionReceipt + +// NewUnsignedExecutionReceipt creates a new instance of UnsignedExecutionReceipt. +// Construction UnsignedExecutionReceipt allowed only within the constructor. +// +// All errors indicate a valid UnsignedExecutionReceipt cannot be constructed from the input. +func NewUnsignedExecutionReceipt(untrusted UntrustedUnsignedExecutionReceipt) (*UnsignedExecutionReceipt, error) { + if untrusted.ExecutorID == ZeroID { + return nil, fmt.Errorf("executor ID must not be zero") + } + executionResult, err := NewExecutionResult(UntrustedExecutionResult(untrusted.ExecutionResult)) + if err != nil { + return nil, fmt.Errorf("invalid execution result: %w", err) } + if len(untrusted.Spocks) == 0 { + return nil, fmt.Errorf("spocks must not be empty") + } + return &UnsignedExecutionReceipt{ + ExecutorID: untrusted.ExecutorID, + ExecutionResult: *executionResult, + Spocks: untrusted.Spocks, + }, nil +} + +// ID returns a hash over the data of the execution receipt. +// This is what is signed by the executor and verified by recipients. +// Necessary to override ExecutionResult.ID(). +func (erb UnsignedExecutionReceipt) ID() Identifier { + return erb.Stub().ID() } -// ExecutionReceiptMeta contains the fields from the Execution Receipts +// Stub returns a stub of the UnsignedExecutionReceipt, where the ExecutionResult is replaced by its cryptographic hash. +func (erb UnsignedExecutionReceipt) Stub() *UnsignedExecutionReceiptStub { + // Constructor is skipped since we're using an already-valid UnsignedExecutionReceipt object. + //nolint:structwrite + return &UnsignedExecutionReceiptStub{ + ExecutorID: erb.ExecutorID, + ResultID: erb.ExecutionResult.ID(), + Spocks: erb.Spocks, + } +} + +// ExecutionReceiptStub contains the fields from the Execution Receipts // that vary from one executor to another (assuming they commit to the same // result). It only contains the ID (cryptographic hash) of the execution -// result the receipt commits to. The ExecutionReceiptMeta is useful for +// result the receipt commits to. The ExecutionReceiptStub is useful for // storing results and receipts separately in a composable way. -type ExecutionReceiptMeta struct { - ExecutorID Identifier - ResultID Identifier - Spocks []crypto.Signature +// +//structwrite:immutable - mutations allowed only within the constructor +type ExecutionReceiptStub struct { + UnsignedExecutionReceiptStub ExecutorSignature crypto.Signature } -func ExecutionReceiptFromMeta(meta ExecutionReceiptMeta, result ExecutionResult) *ExecutionReceipt { - return &ExecutionReceipt{ - ExecutorID: meta.ExecutorID, - ExecutionResult: result, - Spocks: meta.Spocks, - ExecutorSignature: meta.ExecutorSignature, +// UntrustedExecutionReceiptStub is an untrusted input-only representation of a ExecutionReceiptStub, +// used for construction. +// +// This type exists to ensure that constructor functions are invoked explicitly +// with named fields, which improves clarity and reduces the risk of incorrect field +// ordering during construction. +// +// An instance of UntrustedExecutionReceiptStub should be validated and converted into +// a trusted ExecutionReceiptStub using NewExecutionReceiptStub constructor. +type UntrustedExecutionReceiptStub ExecutionReceiptStub + +// NewExecutionReceiptStub creates a new instance of ExecutionReceiptStub. +// Construction ExecutionReceiptStub allowed only within the constructor. +// +// All errors indicate a valid ExecutionReceiptStub cannot be constructed from the input. +func NewExecutionReceiptStub(untrusted UntrustedExecutionReceiptStub) (*ExecutionReceiptStub, error) { + unsignedExecutionReceiptStub, err := NewUnsignedExecutionReceiptStub(UntrustedUnsignedExecutionReceiptStub(untrusted.UnsignedExecutionReceiptStub)) + if err != nil { + return nil, fmt.Errorf("invalid unsigned execution receipt stub: %w", err) + } + if len(untrusted.ExecutorSignature) == 0 { + return nil, fmt.Errorf("executor signature must not be empty") } + return &ExecutionReceiptStub{ + UnsignedExecutionReceiptStub: *unsignedExecutionReceiptStub, + ExecutorSignature: untrusted.ExecutorSignature, + }, nil } // ID returns the canonical ID of the execution receipt. // It is identical to the ID of the full receipt. -func (er *ExecutionReceiptMeta) ID() Identifier { - body := struct { - ExecutorID Identifier - ResultID Identifier - Spocks []crypto.Signature - }{ - ExecutorID: er.ExecutorID, - ResultID: er.ResultID, - Spocks: er.Spocks, - } - return MakeID(body) +func (er *ExecutionReceiptStub) ID() Identifier { + return MakeID(er) } -func (er ExecutionReceiptMeta) MarshalJSON() ([]byte, error) { - type Alias ExecutionReceiptMeta +func (er ExecutionReceiptStub) MarshalJSON() ([]byte, error) { + type Alias ExecutionReceiptStub return json.Marshal(struct { Alias ID string @@ -84,9 +182,79 @@ func (er ExecutionReceiptMeta) MarshalJSON() ([]byte, error) { }) } -// Checksum returns a checksum for the execution receipt including the signatures. -func (er *ExecutionReceiptMeta) Checksum() Identifier { - return MakeID(er) +// ExecutionReceiptFromStub creates ExecutionReceipt from execution result and ExecutionReceiptStub. +// No errors are expected during normal operation. +func ExecutionReceiptFromStub(stub ExecutionReceiptStub, result ExecutionResult) (*ExecutionReceipt, error) { + unsignedExecutionReceipt, err := NewUnsignedExecutionReceipt( + UntrustedUnsignedExecutionReceipt{ + ExecutorID: stub.ExecutorID, + ExecutionResult: result, + Spocks: stub.Spocks, + }, + ) + if err != nil { + return nil, fmt.Errorf("could not construct unsigned execution receipt: %w", err) + } + + executionReceipt, err := NewExecutionReceipt( + UntrustedExecutionReceipt{ + UnsignedExecutionReceipt: *unsignedExecutionReceipt, + ExecutorSignature: stub.ExecutorSignature, + }, + ) + if err != nil { + return nil, fmt.Errorf("could not construct execution receipt: %w", err) + } + + return executionReceipt, nil +} + +// UnsignedExecutionReceiptStub contains the fields of ExecutionReceiptStub that are signed by the executor. +// +//structwrite:immutable - mutations allowed only within the constructor +type UnsignedExecutionReceiptStub struct { + ExecutorID Identifier + ResultID Identifier + Spocks []crypto.Signature +} + +// UntrustedUnsignedExecutionReceiptStub is an untrusted input-only representation of a UnsignedExecutionReceiptStub, +// used for construction. +// +// This type exists to ensure that constructor functions are invoked explicitly +// with named fields, which improves clarity and reduces the risk of incorrect field +// ordering during construction. +// +// An instance of UntrustedUnsignedExecutionReceiptStub should be validated and converted into +// a trusted UnsignedExecutionReceiptStub using NewUnsignedExecutionReceiptStub constructor. +type UntrustedUnsignedExecutionReceiptStub UnsignedExecutionReceiptStub + +// NewUnsignedExecutionReceiptStub creates a new instance of UnsignedExecutionReceiptStub. +// Construction UnsignedExecutionReceiptStub allowed only within the constructor. +// +// All errors indicate a valid UnsignedExecutionReceiptStub cannot be constructed from the input. +func NewUnsignedExecutionReceiptStub(untrusted UntrustedUnsignedExecutionReceiptStub) (*UnsignedExecutionReceiptStub, error) { + if untrusted.ExecutorID == ZeroID { + return nil, fmt.Errorf("executor ID must not be zero") + } + if untrusted.ResultID == ZeroID { + return nil, fmt.Errorf("result ID must not be zero") + } + if len(untrusted.Spocks) == 0 { + return nil, fmt.Errorf("spocks must not be empty") + } + return &UnsignedExecutionReceiptStub{ + ExecutorID: untrusted.ExecutorID, + ResultID: untrusted.ResultID, + Spocks: untrusted.Spocks, + }, nil +} + +// ID returns cryptographic hash of unsigned execution receipt. +// This is what is signed by the executor and verified by recipients. +// It is identical to the ID of the full UnsignedExecutionReceipt. +func (erb UnsignedExecutionReceiptStub) ID() Identifier { + return MakeID(erb) } /******************************************************************************* @@ -135,6 +303,15 @@ func (l ExecutionReceiptList) Size() int { return len(l) } +// Stubs converts the ExecutionReceiptList to an ExecutionReceiptStubList +func (l ExecutionReceiptList) Stubs() ExecutionReceiptStubList { + stubs := make(ExecutionReceiptStubList, len(l)) + for i, receipt := range l { + stubs[i] = receipt.Stub() + } + return stubs +} + // GetGroup returns the receipts that were mapped to the same identifier by the // grouping function. Returns an empty (nil) ExecutionReceiptList if groupID does not exist. func (g ExecutionReceiptGroupedList) GetGroup(groupID Identifier) ExecutionReceiptList { @@ -147,25 +324,25 @@ func (g ExecutionReceiptGroupedList) NumberGroups() int { } /******************************************************************************* -GROUPING for ExecutionReceiptMeta information: -allows to split a list of receipt meta information by some property +GROUPING for ExecutionReceiptStub information: +allows to split a list of receipt stub information by some property *******************************************************************************/ -// ExecutionReceiptMetaList is a slice of ExecutionResultMetas with the additional +// ExecutionReceiptStubList is a slice of ExecutionResultStubs with the additional // functionality to group them by various properties -type ExecutionReceiptMetaList []*ExecutionReceiptMeta +type ExecutionReceiptStubList []*ExecutionReceiptStub -// ExecutionReceiptMetaGroupedList is a partition of an ExecutionReceiptMetaList -type ExecutionReceiptMetaGroupedList map[Identifier]ExecutionReceiptMetaList +// ExecutionReceiptStubGroupedList is a partition of an ExecutionReceiptStubList +type ExecutionReceiptStubGroupedList map[Identifier]ExecutionReceiptStubList -// ExecutionReceiptMetaGroupingFunction is a function that assigns an identifier to each receipt meta -type ExecutionReceiptMetaGroupingFunction func(*ExecutionReceiptMeta) Identifier +// ExecutionReceiptStubGroupingFunction is a function that assigns an identifier to each receipt stub +type ExecutionReceiptStubGroupingFunction func(*ExecutionReceiptStub) Identifier -// GroupBy partitions the ExecutionReceiptMetaList. All receipts that are mapped +// GroupBy partitions the ExecutionReceiptStubList. All receipts that are mapped // by the grouping function to the same identifier are placed in the same group. // Within each group, the order and multiplicity of the receipts is preserved. -func (l ExecutionReceiptMetaList) GroupBy(grouper ExecutionReceiptMetaGroupingFunction) ExecutionReceiptMetaGroupedList { - groups := make(map[Identifier]ExecutionReceiptMetaList) +func (l ExecutionReceiptStubList) GroupBy(grouper ExecutionReceiptStubGroupingFunction) ExecutionReceiptStubGroupedList { + groups := make(map[Identifier]ExecutionReceiptStubList) for _, rcpt := range l { groupID := grouper(rcpt) groups[groupID] = append(groups[groupID], rcpt) @@ -173,39 +350,39 @@ func (l ExecutionReceiptMetaList) GroupBy(grouper ExecutionReceiptMetaGroupingFu return groups } -// GroupByExecutorID partitions the ExecutionReceiptMetaList by the receipts' ExecutorIDs. +// GroupByExecutorID partitions the ExecutionReceiptStubList by the receipts' ExecutorIDs. // Within each group, the order and multiplicity of the receipts is preserved. -func (l ExecutionReceiptMetaList) GroupByExecutorID() ExecutionReceiptMetaGroupedList { - grouper := func(receipt *ExecutionReceiptMeta) Identifier { return receipt.ExecutorID } +func (l ExecutionReceiptStubList) GroupByExecutorID() ExecutionReceiptStubGroupedList { + grouper := func(receipt *ExecutionReceiptStub) Identifier { return receipt.ExecutorID } return l.GroupBy(grouper) } -// GroupByResultID partitions the ExecutionReceiptMetaList by the receipts' Result IDs. +// GroupByResultID partitions the ExecutionReceiptStubList by the receipts' Result IDs. // Within each group, the order and multiplicity of the receipts is preserved. -func (l ExecutionReceiptMetaList) GroupByResultID() ExecutionReceiptMetaGroupedList { - grouper := func(receipt *ExecutionReceiptMeta) Identifier { return receipt.ResultID } +func (l ExecutionReceiptStubList) GroupByResultID() ExecutionReceiptStubGroupedList { + grouper := func(receipt *ExecutionReceiptStub) Identifier { return receipt.ResultID } return l.GroupBy(grouper) } // Size returns the number of receipts in the list -func (l ExecutionReceiptMetaList) Size() int { +func (l ExecutionReceiptStubList) Size() int { return len(l) } // GetGroup returns the receipts that were mapped to the same identifier by the -// grouping function. Returns an empty (nil) ExecutionReceiptMetaList if groupID does not exist. -func (g ExecutionReceiptMetaGroupedList) GetGroup(groupID Identifier) ExecutionReceiptMetaList { +// grouping function. Returns an empty (nil) ExecutionReceiptStubList if groupID does not exist. +func (g ExecutionReceiptStubGroupedList) GetGroup(groupID Identifier) ExecutionReceiptStubList { return g[groupID] } // NumberGroups returns the number of groups -func (g ExecutionReceiptMetaGroupedList) NumberGroups() int { +func (g ExecutionReceiptStubGroupedList) NumberGroups() int { return len(g) } -// Lookup generates a map from ExecutionReceipt ID to ExecutionReceiptMeta -func (l ExecutionReceiptMetaList) Lookup() map[Identifier]*ExecutionReceiptMeta { - receiptsByID := make(map[Identifier]*ExecutionReceiptMeta, len(l)) +// Lookup generates a map from ExecutionReceipt ID to ExecutionReceiptStub +func (l ExecutionReceiptStubList) Lookup() map[Identifier]*ExecutionReceiptStub { + receiptsByID := make(map[Identifier]*ExecutionReceiptStub, len(l)) for _, receipt := range l { receiptsByID[receipt.ID()] = receipt } diff --git a/model/flow/execution_receipt_test.go b/model/flow/execution_receipt_test.go index ee196dfb869..16fc5554661 100644 --- a/model/flow/execution_receipt_test.go +++ b/model/flow/execution_receipt_test.go @@ -3,17 +3,36 @@ package flow_test import ( "testing" + "github.com/onflow/crypto" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/utils/unittest" ) +// TestExecutionReceiptID_Malleability confirms that ExecutionReceipt and ExecutionReceiptStub, which implement +// the [flow.IDEntity] interface, are resistant to tampering. +func TestExecutionReceiptID_Malleability(t *testing.T) { + receipt := unittest.ExecutionReceiptFixture( + unittest.WithResult(unittest.ExecutionResultFixture(unittest.WithServiceEvents(3))), + unittest.WithSpocks(unittest.SignaturesFixture(3)), + ) + receiptMeta := receipt.Stub() + // check ID of body used for signature + unittest.RequireEntityNonMalleable(t, &receiptMeta.UnsignedExecutionReceiptStub) + // check full ID used for indexing + unittest.RequireEntityNonMalleable(t, receiptMeta) + unittest.RequireEntityNonMalleable(t, receipt, + unittest.WithFieldGenerator("UnsignedExecutionReceipt.ExecutionResult.ServiceEvents", func() []flow.ServiceEvent { + return unittest.ServiceEventsFixture(3) + })) +} + // TestExecutionReceiptGroupBy tests the GroupBy method of ExecutionReceiptList: // * grouping should preserve order and multiplicity of elements // * group for unknown identifier should be empty func TestExecutionReceiptGroupBy(t *testing.T) { - er1 := unittest.ExecutionReceiptFixture() er2 := unittest.ExecutionReceiptFixture() er3 := unittest.ExecutionReceiptFixture() @@ -42,18 +61,17 @@ func TestExecutionReceiptGroupBy(t *testing.T) { assert.Equal(t, 0, unknown.Size()) } -// TestExecutionReceiptMetaGroupBy tests the GroupBy method of ExecutionReceiptMetaList: +// TestExecutionReceiptStubGroupBy tests the GroupBy method of ExecutionReceiptStubList: // * grouping should preserve order and multiplicity of elements // * group for unknown identifier should be empty -func TestExecutionReceiptMetaGroupBy(t *testing.T) { - - er1 := unittest.ExecutionReceiptFixture().Meta() - er2 := unittest.ExecutionReceiptFixture().Meta() - er3 := unittest.ExecutionReceiptFixture().Meta() +func TestExecutionReceiptStubGroupBy(t *testing.T) { + er1 := unittest.ExecutionReceiptFixture().Stub() + er2 := unittest.ExecutionReceiptFixture().Stub() + er3 := unittest.ExecutionReceiptFixture().Stub() idA := unittest.IdentifierFixture() idB := unittest.IdentifierFixture() - grouperFunc := func(er *flow.ExecutionReceiptMeta) flow.Identifier { + grouperFunc := func(er *flow.ExecutionReceiptStub) flow.Identifier { switch er.ID() { case er1.ID(): return idA @@ -66,11 +84,258 @@ func TestExecutionReceiptMetaGroupBy(t *testing.T) { } } - groups := flow.ExecutionReceiptMetaList{er1, er2, er3, er1}.GroupBy(grouperFunc) + groups := flow.ExecutionReceiptStubList{er1, er2, er3, er1}.GroupBy(grouperFunc) assert.Equal(t, 2, groups.NumberGroups()) - assert.Equal(t, flow.ExecutionReceiptMetaList{er1, er3, er1}, groups.GetGroup(idA)) - assert.Equal(t, flow.ExecutionReceiptMetaList{er2}, groups.GetGroup(idB)) + assert.Equal(t, flow.ExecutionReceiptStubList{er1, er3, er1}, groups.GetGroup(idA)) + assert.Equal(t, flow.ExecutionReceiptStubList{er2}, groups.GetGroup(idB)) unknown := groups.GetGroup(unittest.IdentifierFixture()) assert.Equal(t, 0, unknown.Size()) } + +// TestNewExecutionReceipt verifies the behavior of the NewExecutionReceipt constructor. +// It ensures proper handling of both valid and invalid untrusted input fields. +// +// Test Cases: +// +// 1. Valid input: +// - Verifies that a properly populated UntrustedExecutionReceipt results in a valid ExecutionReceipt. +// +// 2. Invalid input with invalid UnsignedExecutionReceipt: +// - Ensures an error is returned when the UnsignedExecutionReceipt.ExecutorID is flow.ZeroID. +// +// 3. Invalid input with nil ExecutorSignature: +// - Ensures an error is returned when the ExecutorSignature is nil. +// +// 4. Invalid input with empty ExecutorSignature: +// - Ensures an error is returned when the ExecutorSignature is an empty byte slice. +func TestNewExecutionReceipt(t *testing.T) { + t.Run("valid input", func(t *testing.T) { + receipt := unittest.ExecutionReceiptFixture() + res, err := flow.NewExecutionReceipt(flow.UntrustedExecutionReceipt(*receipt)) + require.NoError(t, err) + require.NotNil(t, res) + }) + + t.Run("invalid input with invalid unsigned execution receipt", func(t *testing.T) { + receipt := unittest.ExecutionReceiptFixture() + receipt.UnsignedExecutionReceipt.ExecutorID = flow.ZeroID + + res, err := flow.NewExecutionReceipt(flow.UntrustedExecutionReceipt(*receipt)) + require.Error(t, err) + require.Nil(t, res) + assert.Contains(t, err.Error(), "invalid unsigned execution receipt: executor ID must not be zero") + }) + + t.Run("invalid input with nil ExecutorSignature", func(t *testing.T) { + receipt := unittest.ExecutionReceiptFixture() + receipt.ExecutorSignature = nil + + res, err := flow.NewExecutionReceipt(flow.UntrustedExecutionReceipt(*receipt)) + require.Error(t, err) + require.Nil(t, res) + assert.Contains(t, err.Error(), "executor signature must not be empty") + }) + + t.Run("invalid input with empty ExecutorSignature", func(t *testing.T) { + receipt := unittest.ExecutionReceiptFixture() + receipt.ExecutorSignature = []byte{} + + res, err := flow.NewExecutionReceipt(flow.UntrustedExecutionReceipt(*receipt)) + require.Error(t, err) + require.Nil(t, res) + assert.Contains(t, err.Error(), "executor signature must not be empty") + }) +} + +// TestNewUnsignedExecutionReceipt verifies the behavior of the NewUnsignedExecutionReceipt constructor. +// It ensures proper handling of both valid and invalid untrusted input fields. +// +// Test Cases: +// +// 1. Valid input: +// - Verifies that a properly populated UntrustedUnsignedExecutionReceipt results in a valid UnsignedExecutionReceipt. +// +// 2. Invalid input with zero ExecutorID: +// - Ensures an error is returned when the ExecutorID is flow.ZeroID. +// +// 3. Invalid input with invalid ExecutionResult: +// - Ensures an error is returned when the ExecutionResult.BlockID is flow.ZeroID. +// +// 4. Invalid input with nil Spocks: +// - Ensures an error is returned when the Spocks field is nil. +// +// 5. Invalid input with empty Spocks: +// - Ensures an error is returned when the Spocks field is an empty slice. +func TestNewUnsignedExecutionReceipt(t *testing.T) { + t.Run("valid input", func(t *testing.T) { + receipt := unittest.UnsignedExecutionReceiptFixture() + res, err := flow.NewUnsignedExecutionReceipt(flow.UntrustedUnsignedExecutionReceipt(*receipt)) + + require.NoError(t, err) + require.NotNil(t, res) + }) + + t.Run("invalid input with zero ExecutorID", func(t *testing.T) { + receipt := unittest.UnsignedExecutionReceiptFixture() + receipt.ExecutorID = flow.ZeroID + + res, err := flow.NewUnsignedExecutionReceipt(flow.UntrustedUnsignedExecutionReceipt(*receipt)) + require.Error(t, err) + require.Nil(t, res) + assert.Contains(t, err.Error(), "executor ID must not be zero") + }) + + t.Run("invalid input with invalid execution result", func(t *testing.T) { + receipt := unittest.UnsignedExecutionReceiptFixture() + receipt.ExecutionResult.BlockID = flow.ZeroID + + res, err := flow.NewUnsignedExecutionReceipt(flow.UntrustedUnsignedExecutionReceipt(*receipt)) + require.Error(t, err) + require.Nil(t, res) + assert.Contains(t, err.Error(), "invalid execution result: BlockID must not be empty") + }) + + t.Run("invalid input with nil Spocks", func(t *testing.T) { + receipt := unittest.UnsignedExecutionReceiptFixture() + receipt.Spocks = nil + + res, err := flow.NewUnsignedExecutionReceipt(flow.UntrustedUnsignedExecutionReceipt(*receipt)) + require.Error(t, err) + require.Nil(t, res) + assert.Contains(t, err.Error(), "spocks must not be empty") + }) + + t.Run("invalid input with empty Spocks", func(t *testing.T) { + receipt := unittest.UnsignedExecutionReceiptFixture() + receipt.Spocks = []crypto.Signature{} + + res, err := flow.NewUnsignedExecutionReceipt(flow.UntrustedUnsignedExecutionReceipt(*receipt)) + require.Error(t, err) + require.Nil(t, res) + assert.Contains(t, err.Error(), "spocks must not be empty") + }) +} + +// TestNewExecutionReceiptStub verifies the behavior of the NewExecutionReceiptStub constructor. +// It ensures proper handling of both valid and invalid untrusted input fields. +// +// Test Cases: +// +// 1. Valid input: +// - Verifies that a properly populated UntrustedExecutionReceiptStub results in a valid ExecutionReceiptStub. +// +// 2. Invalid input with invalid UnsignedExecutionReceiptStub: +// - Ensures an error is returned when the UnsignedExecutionReceiptStub.ExecutorID is flow.ZeroID. +// +// 3. Invalid input with nil ExecutorSignature: +// - Ensures an error is returned when the ExecutorSignature is nil. +// +// 4. Invalid input with empty ExecutorSignature: +// - Ensures an error is returned when the ExecutorSignature is an empty byte slice. +func TestNewExecutionReceiptStub(t *testing.T) { + t.Run("valid input", func(t *testing.T) { + receipt := unittest.ExecutionReceiptFixture().Stub() + res, err := flow.NewExecutionReceiptStub(flow.UntrustedExecutionReceiptStub(*receipt)) + require.NoError(t, err) + require.NotNil(t, res) + }) + + t.Run("invalid input with invalid unsigned execution receipt stub", func(t *testing.T) { + receipt := unittest.ExecutionReceiptFixture().Stub() + receipt.UnsignedExecutionReceiptStub.ExecutorID = flow.ZeroID + + res, err := flow.NewExecutionReceiptStub(flow.UntrustedExecutionReceiptStub(*receipt)) + require.Error(t, err) + require.Nil(t, res) + assert.Contains(t, err.Error(), "invalid unsigned execution receipt stub: executor ID must not be zero") + }) + + t.Run("invalid input with nil ExecutorSignature", func(t *testing.T) { + receipt := unittest.ExecutionReceiptFixture().Stub() + receipt.ExecutorSignature = nil + + res, err := flow.NewExecutionReceiptStub(flow.UntrustedExecutionReceiptStub(*receipt)) + require.Error(t, err) + require.Nil(t, res) + assert.Contains(t, err.Error(), "executor signature must not be empty") + }) + + t.Run("invalid input with empty ExecutorSignature", func(t *testing.T) { + receipt := unittest.ExecutionReceiptFixture().Stub() + receipt.ExecutorSignature = []byte{} + + res, err := flow.NewExecutionReceiptStub(flow.UntrustedExecutionReceiptStub(*receipt)) + require.Error(t, err) + require.Nil(t, res) + assert.Contains(t, err.Error(), "executor signature must not be empty") + }) +} + +// TestNewUnsignedExecutionReceiptStub verifies the behavior of the NewUnsignedExecutionReceiptStub constructor. +// It ensures proper handling of both valid and invalid untrusted input fields. +// +// Test Cases: +// +// 1. Valid input: +// - Verifies that a properly populated UntrustedUnsignedExecutionReceiptStub results in a valid UnsignedExecutionReceiptStub. +// +// 2. Invalid input with zero executor ID: +// - Ensures an error is returned when the ExecutorID is flow.ZeroID. +// +// 3. Invalid input with zero result ID: +// - Ensures an error is returned when the ResultID is flow.ZeroID. +// +// 4. Invalid input with nil spocks: +// - Ensures an error is returned when the spocks field is nil. +// +// 5. Invalid input with empty spocks: +// - Ensures an error is returned when the spocks field is an empty slice. +func TestNewUnsignedExecutionReceiptStub(t *testing.T) { + t.Run("valid input", func(t *testing.T) { + receipt := unittest.UnsignedExecutionReceiptFixture().Stub() + res, err := flow.NewUnsignedExecutionReceiptStub(flow.UntrustedUnsignedExecutionReceiptStub(*receipt)) + require.NoError(t, err) + require.NotNil(t, res) + }) + + t.Run("invalid input with zero executor ID", func(t *testing.T) { + receipt := unittest.UnsignedExecutionReceiptFixture().Stub() + receipt.ExecutorID = flow.ZeroID + + res, err := flow.NewUnsignedExecutionReceiptStub(flow.UntrustedUnsignedExecutionReceiptStub(*receipt)) + require.Error(t, err) + require.Nil(t, res) + assert.Contains(t, err.Error(), "executor ID must not be zero") + }) + + t.Run("invalid input with zero result ID", func(t *testing.T) { + receipt := unittest.UnsignedExecutionReceiptFixture().Stub() + receipt.ResultID = flow.ZeroID + + res, err := flow.NewUnsignedExecutionReceiptStub(flow.UntrustedUnsignedExecutionReceiptStub(*receipt)) + require.Error(t, err) + require.Nil(t, res) + assert.Contains(t, err.Error(), "result ID must not be zero") + }) + + t.Run("invalid input with nil spocks", func(t *testing.T) { + receipt := unittest.UnsignedExecutionReceiptFixture().Stub() + receipt.Spocks = nil + + res, err := flow.NewUnsignedExecutionReceiptStub(flow.UntrustedUnsignedExecutionReceiptStub(*receipt)) + require.Error(t, err) + require.Nil(t, res) + assert.Contains(t, err.Error(), "spocks must not be empty") + }) + + t.Run("invalid input with empty spocks", func(t *testing.T) { + receipt := unittest.UnsignedExecutionReceiptFixture().Stub() + receipt.Spocks = []crypto.Signature{} + + res, err := flow.NewUnsignedExecutionReceiptStub(flow.UntrustedUnsignedExecutionReceiptStub(*receipt)) + require.Error(t, err) + require.Nil(t, res) + assert.Contains(t, err.Error(), "spocks must not be empty") + }) +} diff --git a/model/flow/execution_result.go b/model/flow/execution_result.go index 9661ff3d3c2..fbcf00e6e22 100644 --- a/model/flow/execution_result.go +++ b/model/flow/execution_result.go @@ -3,34 +3,85 @@ package flow import ( "encoding/json" "errors" + "fmt" ) var ErrNoChunks = errors.New("execution result has no chunks") // ExecutionResult is cryptographic commitment to the computation // result(s) from executing a block +// +//structwrite:immutable - mutations allowed only within the constructor type ExecutionResult struct { PreviousResultID Identifier // commit of the previous ER BlockID Identifier // commit of the current block Chunks ChunkList ServiceEvents ServiceEventList - ExecutionDataID Identifier + ExecutionDataID Identifier // hash commitment to flow.BlockExecutionDataRoot } -func NewExecutionResult( - previousResultID Identifier, - blockID Identifier, - chunks ChunkList, - serviceEvents ServiceEventList, - executionDataID Identifier, -) *ExecutionResult { +// UntrustedExecutionResult is an untrusted input-only representation of an ExecutionResult, +// used for construction. +// +// This type exists to ensure that constructor functions are invoked explicitly +// with named fields, which improves clarity and reduces the risk of incorrect field +// ordering during construction. +// +// An instance of UntrustedExecutionResult should be validated and converted into +// a trusted ExecutionResult using NewExecutionResult constructor. +type UntrustedExecutionResult ExecutionResult + +// NewExecutionResult creates a new instance of ExecutionResult. +// Construction ExecutionResult allowed only within the constructor. +// +// All errors indicate a valid ExecutionResult cannot be constructed from the input. +func NewExecutionResult(untrusted UntrustedExecutionResult) (*ExecutionResult, error) { + if untrusted.PreviousResultID == ZeroID { + return nil, fmt.Errorf("PreviousResultID must not be empty") + } + + if untrusted.BlockID == ZeroID { + return nil, fmt.Errorf("BlockID must not be empty") + } + + if len(untrusted.Chunks) == 0 { + return nil, fmt.Errorf("Chunks must not be empty") + } + + if untrusted.ExecutionDataID == ZeroID { + return nil, fmt.Errorf("ExecutionDataID must not be empty") + } + return &ExecutionResult{ - PreviousResultID: previousResultID, - BlockID: blockID, - Chunks: chunks, - ServiceEvents: serviceEvents, - ExecutionDataID: executionDataID, + PreviousResultID: untrusted.PreviousResultID, + BlockID: untrusted.BlockID, + Chunks: untrusted.Chunks, + ServiceEvents: untrusted.ServiceEvents, + ExecutionDataID: untrusted.ExecutionDataID, + }, nil +} + +// NewRootExecutionResult creates a new instance of root ExecutionResult +// with empty PreviousResultID and ExecutionDataID fields. +// Construction ExecutionResult allowed only within the constructor. +// +// All errors indicate a valid root ExecutionResult cannot be constructed from the input. +func NewRootExecutionResult(untrusted UntrustedExecutionResult) (*ExecutionResult, error) { + if untrusted.BlockID == ZeroID { + return nil, fmt.Errorf("BlockID must not be empty") } + + if len(untrusted.Chunks) == 0 { + return nil, fmt.Errorf("Chunks must not be empty") + } + + return &ExecutionResult{ + PreviousResultID: untrusted.PreviousResultID, + BlockID: untrusted.BlockID, + Chunks: untrusted.Chunks, + ServiceEvents: untrusted.ServiceEvents, + ExecutionDataID: untrusted.ExecutionDataID, + }, nil } // ID returns the hash of the execution result body @@ -38,12 +89,7 @@ func (er ExecutionResult) ID() Identifier { return MakeID(er) } -// Checksum ... -func (er ExecutionResult) Checksum() Identifier { - return MakeID(er) -} - -// ValidateChunksLength checks whether the number of chuncks is zero. +// ValidateChunksLength checks whether the number of chunks is zero. // // It returns false if the number of chunks is zero (invalid). // By protocol definition, each ExecutionReceipt must contain at least one @@ -54,7 +100,8 @@ func (er ExecutionResult) ValidateChunksLength() bool { // FinalStateCommitment returns the Execution Result's commitment to the final // execution state of the block, i.e. the last chunk's output state. -// Error returns: +// +// This function is side-effect free. The only possible error it returns is of type: // - ErrNoChunks: if there are no chunks (ExecutionResult is malformed) func (er ExecutionResult) FinalStateCommitment() (StateCommitment, error) { if !er.ValidateChunksLength() { @@ -65,7 +112,8 @@ func (er ExecutionResult) FinalStateCommitment() (StateCommitment, error) { // InitialStateCommit returns a commitment to the execution state used as input // for computing the block, i.e. the leading chunk's input state. -// Error returns: +// +// This function is side-effect free. The only possible error it returns is of type // - ErrNoChunks: if there are no chunks (ExecutionResult is malformed) func (er ExecutionResult) InitialStateCommit() (StateCommitment, error) { if !er.ValidateChunksLength() { @@ -74,6 +122,26 @@ func (er ExecutionResult) InitialStateCommit() (StateCommitment, error) { return er.Chunks[0].StartState, nil } +// SystemChunk is a system-generated chunk added to every block. +// It is always the final chunk in an execution result. +func (er ExecutionResult) SystemChunk() *Chunk { + return er.Chunks[len(er.Chunks)-1] +} + +// ServiceEventsByChunk returns the list of service events emitted during the given chunk. +func (er ExecutionResult) ServiceEventsByChunk(chunkIndex uint64) ServiceEventList { + serviceEventCount := er.Chunks[chunkIndex].ServiceEventCount + if serviceEventCount == 0 { + return nil + } + + startIndex := 0 + for i := uint64(0); i < chunkIndex; i++ { + startIndex += int(er.Chunks[i].ServiceEventCount) + } + return er.ServiceEvents[startIndex : startIndex+int(serviceEventCount)] +} + func (er ExecutionResult) MarshalJSON() ([]byte, error) { type Alias ExecutionResult return json.Marshal(struct { diff --git a/model/flow/execution_result_test.go b/model/flow/execution_result_test.go index b697488b86d..c7b4169a6d5 100644 --- a/model/flow/execution_result_test.go +++ b/model/flow/execution_result_test.go @@ -1,6 +1,7 @@ package flow_test import ( + "math/rand" "testing" "github.com/stretchr/testify/assert" @@ -9,6 +10,16 @@ import ( "github.com/onflow/flow-go/utils/unittest" ) +// TestExecutionResultID_Malleability confirms that the ExecutionResult struct, which implements +// the [flow.IDEntity] interface, is resistant to tampering. +func TestExecutionResultID_Malleability(t *testing.T) { + unittest.RequireEntityNonMalleable(t, + unittest.ExecutionResultFixture(), + unittest.WithFieldGenerator("ServiceEvents", func() []flow.ServiceEvent { + return unittest.ServiceEventsFixture(3) + })) +} + // TestExecutionResultGroupBy tests the GroupBy method of ExecutionResultList: // * grouping should preserve order and multiplicity of elements // * group for unknown identifier should be empty @@ -41,3 +52,276 @@ func TestExecutionResultGroupBy(t *testing.T) { unknown := groups.GetGroup(unittest.IdentifierFixture()) assert.Equal(t, 0, unknown.Size()) } + +// Tests that [ExecutionResult.ServiceEventsByChunk] method works in a variety of circumstances. +func TestExecutionResult_ServiceEventsByChunk(t *testing.T) { + t.Run("no service events", func(t *testing.T) { + result := unittest.ExecutionResultFixture() + for _, chunk := range result.Chunks { + chunk.ServiceEventCount = 0 + } + // should return empty list for all chunks + for chunkIndex := 0; chunkIndex < result.Chunks.Len(); chunkIndex++ { + serviceEvents := result.ServiceEventsByChunk(uint64(chunkIndex)) + assert.Len(t, serviceEvents, 0) + } + }) + + t.Run("service events only in system chunk", func(t *testing.T) { + nServiceEvents := rand.Intn(10) + 1 + result := unittest.ExecutionResultFixture(unittest.WithServiceEvents(nServiceEvents)) + for _, chunk := range result.Chunks[:result.Chunks.Len()-1] { + chunk.ServiceEventCount = 0 + } + result.SystemChunk().ServiceEventCount = uint16(nServiceEvents) + + // should return empty list for all non-system chunks + for chunkIndex := 0; chunkIndex < result.Chunks.Len()-1; chunkIndex++ { + serviceEvents := result.ServiceEventsByChunk(uint64(chunkIndex)) + assert.Len(t, serviceEvents, 0) + } + // should return list of service events for system chunk + assert.Equal(t, result.ServiceEvents, result.ServiceEventsByChunk(result.SystemChunk().Index)) + }) + + t.Run("service only in non-system chunks", func(t *testing.T) { + result := unittest.ExecutionResultFixture() + unittest.WithServiceEvents(result.Chunks.Len() - 1)(result) // one service event per non-system chunk + + for _, chunk := range result.Chunks { + chunk.ServiceEventCount = 1 + } + result.SystemChunk().ServiceEventCount = 0 + + // should return one service event per non-system chunk + for chunkIndex := 0; chunkIndex < result.Chunks.Len()-1; chunkIndex++ { + serviceEvents := result.ServiceEventsByChunk(uint64(chunkIndex)) + assert.Equal(t, result.ServiceEvents[chunkIndex:chunkIndex+1], serviceEvents) + } + // should return empty list for system chunk + assert.Len(t, result.ServiceEventsByChunk(result.SystemChunk().Index), 0) + }) + + t.Run("service events in all chunks", func(t *testing.T) { + result := unittest.ExecutionResultFixture() + unittest.WithServiceEvents(result.Chunks.Len())(result) // one service event per chunk + + for _, chunk := range result.Chunks { + chunk.ServiceEventCount = 1 + } + + // should return one service event per chunk + for chunkIndex := 0; chunkIndex < result.Chunks.Len(); chunkIndex++ { + serviceEvents := result.ServiceEventsByChunk(uint64(chunkIndex)) + assert.Equal(t, result.ServiceEvents[chunkIndex:chunkIndex+1], serviceEvents) + } + }) +} + +// TestNewExecutionResult verifies the behavior of the NewExecutionResult constructor. +// It ensures that a fully populated UntrustedExecutionResult yields a valid ExecutionResult, +// and that missing or invalid required fields produce an error. +// +// Test Cases: +// +// 1. Valid input with non‐nil Chunks and non‐nil ServiceEvents: +// - PreviousResultID, BlockID, Chunks, ServiceEvents, and ExecutionDataID are all set. +// - Expect no error and a properly populated ExecutionResult. +// +// 2. Valid input with non‐nil Chunks and nil ServiceEvents: +// - ServiceEvents omitted (nil) but all other fields valid. +// - Expect no error and ExecutionResult.ServiceEvents == nil. +// +// 3. Invalid input: PreviousResultID is ZeroID: +// - Ensures error when the previous result ID is missing. +// +// 4. Invalid input: BlockID is ZeroID: +// - Ensures error when the block ID is missing. +// +// 5. Invalid input: Chunks is nil: +// - Ensures error when the chunk list is nil. +// +// 6. Invalid input: Chunks are empty: +// - Ensures error when the chunk list is empty. +// +// 7. Invalid input: ExecutionDataID is ZeroID: +// - Ensures error when the execution data ID is missing. +func TestNewExecutionResult(t *testing.T) { + validPrevID := unittest.IdentifierFixture() + validBlockID := unittest.IdentifierFixture() + validExecDataID := unittest.IdentifierFixture() + chunks := unittest.ChunkListFixture(5, unittest.IdentifierFixture(), unittest.StateCommitmentFixture()) + + t.Run("valid result with non-nil slices", func(t *testing.T) { + u := flow.UntrustedExecutionResult{ + PreviousResultID: validPrevID, + BlockID: validBlockID, + Chunks: chunks, + ServiceEvents: flow.ServiceEventList{}, + ExecutionDataID: validExecDataID, + } + res, err := flow.NewExecutionResult(u) + assert.NoError(t, err) + assert.NotNil(t, res) + assert.Equal(t, *res, flow.ExecutionResult(u)) + }) + + t.Run("valid result with nil ServiceEvents", func(t *testing.T) { + u := flow.UntrustedExecutionResult{ + PreviousResultID: validPrevID, + BlockID: validBlockID, + Chunks: chunks, + // ServiceEvents left nil + ExecutionDataID: validExecDataID, + } + res, err := flow.NewExecutionResult(u) + assert.NoError(t, err) + assert.NotNil(t, res) + assert.Nil(t, res.ServiceEvents) + }) + + t.Run("missing PreviousResultID", func(t *testing.T) { + u := flow.UntrustedExecutionResult{ + PreviousResultID: flow.ZeroID, + BlockID: validBlockID, + Chunks: chunks, + ExecutionDataID: validExecDataID, + } + res, err := flow.NewExecutionResult(u) + assert.Error(t, err) + assert.Nil(t, res) + assert.Contains(t, err.Error(), "PreviousResultID") + }) + + t.Run("missing BlockID", func(t *testing.T) { + u := flow.UntrustedExecutionResult{ + PreviousResultID: validPrevID, + BlockID: flow.ZeroID, + Chunks: chunks, + ExecutionDataID: validExecDataID, + } + res, err := flow.NewExecutionResult(u) + assert.Error(t, err) + assert.Nil(t, res) + assert.Contains(t, err.Error(), "BlockID") + }) + + t.Run("nil Chunks", func(t *testing.T) { + u := flow.UntrustedExecutionResult{ + PreviousResultID: validPrevID, + BlockID: validBlockID, + Chunks: nil, + ExecutionDataID: validExecDataID, + } + res, err := flow.NewExecutionResult(u) + assert.Error(t, err) + assert.Nil(t, res) + assert.Contains(t, err.Error(), "Chunks") + }) + + t.Run("empty Chunks", func(t *testing.T) { + u := flow.UntrustedExecutionResult{ + PreviousResultID: validPrevID, + BlockID: validBlockID, + Chunks: flow.ChunkList{}, + ExecutionDataID: validExecDataID, + } + res, err := flow.NewExecutionResult(u) + assert.Error(t, err) + assert.Nil(t, res) + assert.Contains(t, err.Error(), "Chunks") + }) + + t.Run("missing ExecutionDataID", func(t *testing.T) { + u := flow.UntrustedExecutionResult{ + PreviousResultID: validPrevID, + BlockID: validBlockID, + Chunks: chunks, + ExecutionDataID: flow.ZeroID, + } + res, err := flow.NewExecutionResult(u) + assert.Error(t, err) + assert.Nil(t, res) + assert.Contains(t, err.Error(), "ExecutionDataID") + }) +} + +// TestNewRootExecutionResult verifies the behavior of the NewRootExecutionResult constructor. +// It ensures that a “root” ExecutionResult can be created with an empty PreviousResultID +// and ExecutionDataID, given a valid BlockID and non‐empty Chunks, and that missing +// required fields produce an error. +// +// Test Cases: +// +// 1. Valid root input with non‐empty Chunks: +// - BlockID set, Chunks non‐empty, PreviousResultID and ExecutionDataID left at ZeroID. +// - Expect no error and ExecutionResult with zero PreviousResultID/ExecutionDataID. +// +// 2. Invalid input: BlockID is ZeroID: +// - Ensures error when the block ID is missing. +// +// 3. Invalid input: Chunks are empty: +// - Ensures error when the chunk list is empty. +// +// 4. Invalid input: Chunks is nil: +// - Ensures error when the chunk list is nil. +func TestNewRootExecutionResult(t *testing.T) { + validPrevID := unittest.IdentifierFixture() + validBlockID := unittest.IdentifierFixture() + validExecDataID := unittest.IdentifierFixture() + chunks := unittest.ChunkListFixture(5, unittest.IdentifierFixture(), unittest.StateCommitmentFixture()) + + t.Run("valid root result with non-nil slices", func(t *testing.T) { + u := flow.UntrustedExecutionResult{ + PreviousResultID: flow.ZeroID, + BlockID: validBlockID, + Chunks: chunks, + ServiceEvents: flow.ServiceEventList{}, + ExecutionDataID: flow.ZeroID, + } + res, err := flow.NewRootExecutionResult(u) + assert.NoError(t, err) + assert.NotNil(t, res) + assert.Equal(t, *res, flow.ExecutionResult(u)) + }) + + t.Run("missing BlockID", func(t *testing.T) { + u := flow.UntrustedExecutionResult{ + PreviousResultID: validPrevID, + BlockID: flow.ZeroID, + Chunks: chunks, + ServiceEvents: flow.ServiceEventList{}, + ExecutionDataID: validExecDataID, + } + res, err := flow.NewRootExecutionResult(u) + assert.Error(t, err) + assert.Nil(t, res) + assert.Contains(t, err.Error(), "BlockID") + }) + + t.Run("nil Chunks", func(t *testing.T) { + u := flow.UntrustedExecutionResult{ + PreviousResultID: validPrevID, + BlockID: validBlockID, + Chunks: nil, + ExecutionDataID: validExecDataID, + } + res, err := flow.NewRootExecutionResult(u) + assert.Error(t, err) + assert.Nil(t, res) + assert.Contains(t, err.Error(), "Chunks") + }) + + t.Run("empty Chunks", func(t *testing.T) { + u := flow.UntrustedExecutionResult{ + PreviousResultID: validPrevID, + BlockID: validBlockID, + Chunks: flow.ChunkList{}, + ExecutionDataID: validExecDataID, + } + res, err := flow.NewRootExecutionResult(u) + assert.Error(t, err) + assert.Nil(t, res) + assert.Contains(t, err.Error(), "Chunks") + }) +} diff --git a/model/flow/factory/cluster_list.go b/model/flow/factory/cluster_list.go index 29bf374ac23..2ecb16e8d72 100644 --- a/model/flow/factory/cluster_list.go +++ b/model/flow/factory/cluster_list.go @@ -4,46 +4,60 @@ import ( "fmt" "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/model/flow/order" ) -// NewClusterList creates a new cluster list based on the given cluster assignment -// and the provided list of identities. +// NewClusterList creates a new cluster list based on the given cluster assignment and the provided list of identities. +// The implementation enforces the following protocol rules and errors in case they are violated: +// +// (a) input `collectors` only contains collector nodes with positive weight +// (b) collectors have unique node IDs +// (c) each collector is assigned exactly to one cluster and is only listed once within that cluster +// +// Furthermore, for each cluster (i.e. element in `assignments`) we enforce: +// +// (d) cluster contains at least one collector (i.e. is not empty) +// (e) cluster is composed of known nodes, i.e. for each nodeID in `assignments` an IdentitySkeleton is given in `collectors` +// (f) cluster assignment lists the nodes in canonical ordering +// // The caller must ensure each assignment contains identities ordered in canonical order, so that // each cluster in the returned cluster list is ordered in canonical order as well. If not, // an error will be returned. -func NewClusterList(assignments flow.AssignmentList, collectors flow.IdentityList) (flow.ClusterList, error) { - +// This is a side-effect-free function. Any error return indicates that the input violate protocol rules. +func NewClusterList(assignments flow.AssignmentList, collectors flow.IdentitySkeletonList) (flow.ClusterList, error) { // build a lookup for all the identities by node identifier - lookup := make(map[flow.Identifier]*flow.Identity) - for _, collector := range collectors { + lookup := collectors.Lookup() + for _, collector := range collectors { // enforce (a): `collectors` only contains collector nodes with positive weight + if collector.Role != flow.RoleCollection { + return nil, fmt.Errorf("node %v is not a collector", collector.NodeID) + } + if collector.InitialWeight == 0 { + return nil, fmt.Errorf("node %v has zero weight", collector.NodeID) + } lookup[collector.NodeID] = collector } - if len(lookup) != len(collectors) { + if len(lookup) != len(collectors) { // enforce (b): collectors have unique node IDs return nil, fmt.Errorf("duplicate collector in list") } - // replicate the identifier list but use identities instead + // assignments only contains the NodeIDs for each cluster. In the following, we substitute them with the respective IdentitySkeletons. clusters := make(flow.ClusterList, 0, len(assignments)) for i, participants := range assignments { - cluster := make(flow.IdentityList, 0, len(participants)) - if len(participants) == 0 { - return nil, fmt.Errorf("particpants in assignment list is empty, cluster index %v", i) + cluster := make(flow.IdentitySkeletonList, 0, len(participants)) + if len(participants) == 0 { // enforce (d): each cluster contains at least one collector (i.e. is not empty) + return nil, fmt.Errorf("participants in assignment list is empty, cluster index %v", i) } - // Check assignments is sorted in canonical order - prev := participants[0] - + prev := participants[0] // for checking that cluster participants are listed in canonical order for i, participantID := range participants { - participant, found := lookup[participantID] + participant, found := lookup[participantID] // enforce (e): for each nodeID in assignments an IdentitySkeleton is given in `collectors` if !found { return nil, fmt.Errorf("could not find collector identity (%x)", participantID) } cluster = append(cluster, participant) - delete(lookup, participantID) + delete(lookup, participantID) // enforce (c) part 1: reject repeated assignment of the same node - if i > 0 { - if !order.IdentifierCanonical(prev, participantID) { + if i > 0 { // enforce (f): canonical ordering + if !flow.IsIdentifierCanonical(prev, participantID) { return nil, fmt.Errorf("the assignments is not sorted in canonical order in cluster index %v, prev %v, next %v", i, prev, participantID) } @@ -54,8 +68,7 @@ func NewClusterList(assignments flow.AssignmentList, collectors flow.IdentityLis clusters = append(clusters, cluster) } - // check that every collector was assigned - if len(lookup) != 0 { + if len(lookup) != 0 { // enforce (c) part 2: every collector was assigned return nil, fmt.Errorf("missing collector assignments (%s)", lookup) } diff --git a/model/flow/factory/cluster_list_test.go b/model/flow/factory/cluster_list_test.go index 0c938d5e8da..894c416d456 100644 --- a/model/flow/factory/cluster_list_test.go +++ b/model/flow/factory/cluster_list_test.go @@ -10,16 +10,67 @@ import ( "github.com/onflow/flow-go/utils/unittest" ) -// NewClusterList assumes the input assignments are sorted, and fail if not. -// This tests verifies that NewClusterList has implemented the check on the assumption. -func TestNewClusterListFail(t *testing.T) { +// TestNewClusterList ensures that implementation enforces the following protocol rules in case they are violated: +// +// (a) input `collectors` only contains collector nodes with positive weight +// (b) collectors have unique node IDs +// (c) each collector is assigned exactly to one cluster and is only listed once within that cluster +// (d) cluster contains at least one collector (i.e. is not empty) +// (e) cluster is composed of known nodes, i.e. for each nodeID in `assignments` an IdentitySkeleton is given in `collectors` +// (f) cluster assignment lists the nodes in canonical ordering +func TestNewClusterList(t *testing.T) { identities := unittest.IdentityListFixture(100, unittest.WithRole(flow.RoleCollection)) - assignments := unittest.ClusterAssignment(10, identities) - tmp := assignments[1][0] - assignments[1][0] = assignments[1][1] - assignments[1][1] = tmp - - _, err := factory.NewClusterList(assignments, identities) - require.Error(t, err) + t.Run("valid inputs", func(t *testing.T) { + assignments := unittest.ClusterAssignment(10, identities.ToSkeleton()) + _, err := factory.NewClusterList(assignments, identities.ToSkeleton()) + require.NoError(t, err) + }) + t.Run("(a) input `collectors` only contains collector nodes with positive weight", func(t *testing.T) { + identities := identities.Copy() + identities[0].InitialWeight = 0 + assignments := unittest.ClusterAssignment(10, identities.ToSkeleton()) + _, err := factory.NewClusterList(assignments, identities.ToSkeleton()) + require.Error(t, err) + }) + t.Run("(b) collectors have unique node IDs", func(t *testing.T) { + identities := identities.Copy() + identities[0].NodeID = identities[1].NodeID + assignments := unittest.ClusterAssignment(10, identities.ToSkeleton()) + _, err := factory.NewClusterList(assignments, identities.ToSkeleton()) + require.Error(t, err) + }) + t.Run("(c) each collector is assigned exactly to one cluster", func(t *testing.T) { + assignments := unittest.ClusterAssignment(10, identities.ToSkeleton()) + assignments[1][0] = assignments[0][0] + _, err := factory.NewClusterList(assignments, identities.ToSkeleton()) + require.Error(t, err) + }) + t.Run("(c) each collector is only listed once within that cluster", func(t *testing.T) { + assignments := unittest.ClusterAssignment(10, identities.ToSkeleton()) + assignments[0][0] = assignments[0][1] + _, err := factory.NewClusterList(assignments, identities.ToSkeleton()) + require.Error(t, err) + }) + t.Run("(d) cluster contains at least one collector (i.e. is not empty)", func(t *testing.T) { + assignments := unittest.ClusterAssignment(10, identities.ToSkeleton()) + assignments[0] = flow.IdentifierList{} + _, err := factory.NewClusterList(assignments, identities.ToSkeleton()) + require.Error(t, err) + }) + t.Run("(e) cluster is composed of known nodes, i.e. for each nodeID in `assignments` an IdentitySkeleton is given in `collectors` ", func(t *testing.T) { + assignments := unittest.ClusterAssignment(10, identities.ToSkeleton()) + assignments[0][0] = unittest.IdentifierFixture() + _, err := factory.NewClusterList(assignments, identities.ToSkeleton()) + require.Error(t, err) + }) + t.Run("(f) cluster assignment lists the nodes in canonical ordering", func(t *testing.T) { + assignments := unittest.ClusterAssignment(10, identities.ToSkeleton()) + // sort in non-canonical order + assignments[0] = assignments[0].Sort(func(lhs flow.Identifier, rhs flow.Identifier) int { + return -flow.IdentifierCanonical(lhs, rhs) + }) + _, err := factory.NewClusterList(assignments, identities.ToSkeleton()) + require.Error(t, err) + }) } diff --git a/model/flow/filter/id/identifier.go b/model/flow/filter/id/identifier.go index 63b7f61e6b9..749edbe575d 100644 --- a/model/flow/filter/id/identifier.go +++ b/model/flow/filter/id/identifier.go @@ -1,4 +1,3 @@ -// (c) 2021 Dapper Labs - ALL RIGHTS RESERVED package id import "github.com/onflow/flow-go/model/flow" diff --git a/model/flow/filter/identity.go b/model/flow/filter/identity.go index 2c312c05028..21b1813f8b3 100644 --- a/model/flow/filter/identity.go +++ b/model/flow/filter/identity.go @@ -1,20 +1,28 @@ -// (c) 2019 Dapper Labs - ALL RIGHTS RESERVED - package filter import ( - "github.com/onflow/flow-go/crypto" + "github.com/onflow/crypto" + "github.com/onflow/flow-go/model/flow" ) +// Adapt takes an IdentityFilter on the domain of IdentitySkeletons +// and adapts the filter to the domain of full Identities. In other words, it converts +// flow.IdentityFilter[flow.IdentitySkeleton] to flow.IdentityFilter[flow.Identity]. +func Adapt(f flow.IdentityFilter[flow.IdentitySkeleton]) flow.IdentityFilter[flow.Identity] { + return func(i *flow.Identity) bool { + return f(&i.IdentitySkeleton) + } +} + // Any will always be true. func Any(*flow.Identity) bool { return true } // And combines two or more filters that all need to be true. -func And(filters ...flow.IdentityFilter) flow.IdentityFilter { - return func(identity *flow.Identity) bool { +func And[T flow.GenericIdentity](filters ...flow.IdentityFilter[T]) flow.IdentityFilter[T] { + return func(identity *T) bool { for _, filter := range filters { if !filter(identity) { return false @@ -25,8 +33,8 @@ func And(filters ...flow.IdentityFilter) flow.IdentityFilter { } // Or combines two or more filters and only needs one of them to be true. -func Or(filters ...flow.IdentityFilter) flow.IdentityFilter { - return func(identity *flow.Identity) bool { +func Or[T flow.GenericIdentity](filters ...flow.IdentityFilter[T]) flow.IdentityFilter[T] { + return func(identity *T) bool { for _, filter := range filters { if filter(identity) { return true @@ -37,34 +45,37 @@ func Or(filters ...flow.IdentityFilter) flow.IdentityFilter { } // Not returns a filter equivalent to the inverse of the input filter. -func Not(filter flow.IdentityFilter) flow.IdentityFilter { - return func(identity *flow.Identity) bool { +func Not[T flow.GenericIdentity](filter flow.IdentityFilter[T]) flow.IdentityFilter[T] { + return func(identity *T) bool { return !filter(identity) } } -// In returns a filter for identities within the input list. This is equivalent -// to HasNodeID, but for list-typed inputs. -func In(list flow.IdentityList) flow.IdentityFilter { - return HasNodeID(list.NodeIDs()...) +// In returns a filter for identities within the input list. For an input identity i, +// the filter returns true if and only if i ∈ list. +// Caution: The filter solely operates on NodeIDs. Other identity fields are not compared. +// This function is just a compact representation of `HasNodeID[T](list.NodeIDs()...)` +// which behaves algorithmically the same way. +func In[T flow.GenericIdentity](list flow.GenericIdentityList[T]) flow.IdentityFilter[T] { + return HasNodeID[T](list.NodeIDs()...) } // HasNodeID returns a filter that returns true for any identity with an ID // matching any of the inputs. -func HasNodeID(nodeIDs ...flow.Identifier) flow.IdentityFilter { +func HasNodeID[T flow.GenericIdentity](nodeIDs ...flow.Identifier) flow.IdentityFilter[T] { lookup := make(map[flow.Identifier]struct{}) for _, nodeID := range nodeIDs { lookup[nodeID] = struct{}{} } - return func(identity *flow.Identity) bool { - _, ok := lookup[identity.NodeID] + return func(identity *T) bool { + _, ok := lookup[(*identity).GetNodeID()] return ok } } // HasNetworkingKey returns a filter that returns true for any identity with a // networking public key matching any of the inputs. -func HasNetworkingKey(keys ...crypto.PublicKey) flow.IdentityFilter { +func HasNetworkingKey(keys ...crypto.PublicKey) flow.IdentityFilter[flow.Identity] { return func(identity *flow.Identity) bool { for _, key := range keys { if key.Equals(identity.NetworkPubKey) { @@ -75,45 +86,74 @@ func HasNetworkingKey(keys ...crypto.PublicKey) flow.IdentityFilter { } } -// HasWeight returns a filter for nodes with non-zero weight. -func HasWeight(hasWeight bool) flow.IdentityFilter { - return func(identity *flow.Identity) bool { - return (identity.Weight > 0) == hasWeight +// HasInitialWeight returns a filter for nodes with non-zero initial weight. +func HasInitialWeight[T flow.GenericIdentity](hasWeight bool) flow.IdentityFilter[T] { + return func(identity *T) bool { + return ((*identity).GetInitialWeight() > 0) == hasWeight } } -// Ejected is a filter that returns true if the node is ejected. -func Ejected(identity *flow.Identity) bool { - return identity.Ejected +// HasParticipationStatus is a filter that returns true if the node epoch participation status matches the input. +func HasParticipationStatus(status flow.EpochParticipationStatus) flow.IdentityFilter[flow.Identity] { + return func(identity *flow.Identity) bool { + return identity.EpochParticipationStatus == status + } } // HasRole returns a filter for nodes with one of the input roles. -func HasRole(roles ...flow.Role) flow.IdentityFilter { +func HasRole[T flow.GenericIdentity](roles ...flow.Role) flow.IdentityFilter[T] { lookup := make(map[flow.Role]struct{}) for _, role := range roles { lookup[role] = struct{}{} } - return func(identity *flow.Identity) bool { - _, ok := lookup[identity.Role] + return func(identity *T) bool { + _, ok := lookup[(*identity).GetRole()] return ok } } // IsValidCurrentEpochParticipant is an identity filter for members of the // current epoch in good standing. -var IsValidCurrentEpochParticipant = And( - HasWeight(true), - Not(Ejected), // ejection will change signer index +// Effective it means that node is an active identity in current epoch and has not been ejected. +var IsValidCurrentEpochParticipant = HasParticipationStatus(flow.EpochParticipationStatusActive) + +// IsValidCurrentEpochParticipantOrJoining is an identity filter for members of the current epoch or that are going to join in next epoch. +var IsValidCurrentEpochParticipantOrJoining = Or(IsValidCurrentEpochParticipant, HasParticipationStatus(flow.EpochParticipationStatusJoining)) + +// IsConsensusCommitteeMember is an identity filter for all members of the consensus committee. +// Formally, a Node X is a Consensus Committee Member if and only if X is a consensus node with +// positive initial weight. This is specified by the EpochSetup Event and remains static +// throughout the epoch. +var IsConsensusCommitteeMember = And( + HasRole[flow.IdentitySkeleton](flow.RoleConsensus), + HasInitialWeight[flow.IdentitySkeleton](true), ) -// IsVotingConsensusCommitteeMember is a identity filter for all members of +// IsVotingConsensusCommitteeMember is an identity filter for all members of // the consensus committee allowed to vote. -var IsVotingConsensusCommitteeMember = And( - HasRole(flow.RoleConsensus), - IsValidCurrentEpochParticipant, +// Formally, a Node X has authority to vote in the consensus process, if and only if +// 1. Node X is an active member of the current epoch AND +// 2. X is a consensus node with positive initial weight in the current Epoch. This +// is specified by the EpochSetup Event for the current epoch and remains static +// throughout the epoch. +var IsVotingConsensusCommitteeMember = And[flow.Identity]( + IsValidCurrentEpochParticipant, // enforces 1. + Adapt(IsConsensusCommitteeMember), // enforces 2. ) -// IsValidDKGParticipant is an identity filter for all DKG participants. It is -// equivalent to the filter for consensus committee members, as these are -// the same group for now. -var IsValidDKGParticipant = IsVotingConsensusCommitteeMember +// NotEjectedFilter is an identity filter for peers that are not ejected. +var NotEjectedFilter = Not(HasParticipationStatus(flow.EpochParticipationStatusEjected)) + +// HasWeightGreaterThanZero returns a filter for nodes with a weight greater than zero. +func HasWeightGreaterThanZero[T flow.GenericIdentity](identity *T) bool { + return (*identity).GetInitialWeight() > 0 +} + +// IsValidProtocolParticipant is an identity filter for all valid protocol participants. +// A protocol participant is considered valid if and only if the following are both true. +// 1. The node is not ejected. +// 2. The node has a weight greater than 0. +var IsValidProtocolParticipant = And[flow.Identity]( + NotEjectedFilter, // enforces 1 + HasWeightGreaterThanZero[flow.Identity], // enforces 2 +) diff --git a/model/flow/header.go b/model/flow/header.go index a4ee2efbc78..927855537c0 100644 --- a/model/flow/header.go +++ b/model/flow/header.go @@ -2,7 +2,7 @@ package flow import ( "encoding/json" - "time" + "fmt" "github.com/fxamacker/cbor/v2" "github.com/vmihailenco/msgpack/v4" @@ -11,21 +11,36 @@ import ( "github.com/onflow/flow-go/model/fingerprint" ) -// Header contains all meta-data for a block, as well as a hash representing -// the combined payload of the entire block. It is what consensus nodes agree -// on after validating the contents against the payload hash. -type Header struct { +// ProposalHeader is a block header and the proposer's signature for the block. +type ProposalHeader struct { + Header *Header + // ProposerSigData is a signature of the proposer over the new block. Not a single cryptographic + // signature since the data represents cryptographic signatures serialized in some way (concatenation or other) + ProposerSigData []byte +} + +// HeaderBody contains all block header metadata, except for the payload hash. +// HeaderBody generally should not be used on its own. It is merely a container used by other +// data structures in the code base. For example, it is embedded within [Block], [Header], and the +// respective collector cluster structs - those types should be used in almost all circumstances. +// CAUTION regarding security: +// - HeaderBody does not contain the hash of the block payload. Therefore, it is not a cryptographic digest +// of the block and should not be confused with a "proper" header, which commits to the _entire_ content +// of a block. +// - With a byzantine HeaderBody alone, an honest node cannot prove who created that faulty data structure, +// because HeaderBody does not include the proposer's signature. +// +//structwrite:immutable - mutations allowed only within the constructor +type HeaderBody struct { // ChainID is a chain-specific value to prevent replay attacks. ChainID ChainID // ParentID is the ID of this block's parent. ParentID Identifier // Height is the height of the parent + 1 Height uint64 - // PayloadHash is a hash of the payload of this block. - PayloadHash Identifier - // Timestamp is the time at which this block was proposed. + // Timestamp is the time at which this block was proposed, in Unix milliseconds. // The proposer can choose any time, so this should not be trusted as accurate. - Timestamp time.Time + Timestamp uint64 // View number at which this block was proposed. View uint64 // ParentView number at which parent block was proposed. @@ -39,17 +54,194 @@ type Header struct { ParentVoterSigData []byte // ProposerID is a proposer identifier for the block ProposerID Identifier - // ProposerSigData is a signature of the proposer over the new block. Not a single cryptographic - // signature since the data represents cryptographic signatures serialized in some way (concatenation or other) - ProposerSigData []byte // LastViewTC is a timeout certificate for previous view, it can be nil // it has to be present if previous round ended with timeout. LastViewTC *TimeoutCertificate } -// Body returns the immutable part of the block header. -func (h Header) Body() interface{} { - return struct { +// UntrustedHeaderBody is an untrusted input-only representation of a HeaderBody, +// used for construction. +// +// This type exists to ensure that constructor functions are invoked explicitly +// with named fields, which improves clarity and reduces the risk of incorrect field +// ordering during construction. +// +// An instance of UntrustedHeaderBody should be validated and converted into +// a trusted HeaderBody using NewHeaderBody constructor. +type UntrustedHeaderBody HeaderBody + +// NewHeaderBody creates a new instance of HeaderBody. +// Construction of HeaderBody is allowed only within the constructor +// +// All errors indicate a valid HeaderBody cannot be constructed from the input. +func NewHeaderBody(untrusted UntrustedHeaderBody) (*HeaderBody, error) { + if untrusted.ChainID == "" { + return nil, fmt.Errorf("ChainID must not be empty") + } + + // Require each of the four parent-QC fields explicitly, so we get + // precise errors instead of a generic “missing parent QC.” + if untrusted.ParentID == ZeroID { + return nil, fmt.Errorf("ParentID must not be empty") + } + if len(untrusted.ParentVoterIndices) == 0 { + return nil, fmt.Errorf("ParentVoterIndices must not be empty") + } + if len(untrusted.ParentVoterSigData) == 0 { + return nil, fmt.Errorf("ParentVoterSigData must not be empty") + } + if untrusted.ProposerID == ZeroID { + return nil, fmt.Errorf("ProposerID must not be empty") + } + + // Now enforce non-root semantics: + if untrusted.Height == 0 { + return nil, fmt.Errorf("Height must be > 0 for non-root header") + } + if untrusted.View == 0 { + return nil, fmt.Errorf("View must be > 0 for non-root header") + } + if untrusted.ParentView >= untrusted.View { + return nil, fmt.Errorf( + "ParentView (%d) must be less than View (%d)", + untrusted.ParentView, untrusted.View, + ) + } + if untrusted.Timestamp == 0 { + return nil, fmt.Errorf("Timestamp must not be zero-value") + } + + hb := HeaderBody(untrusted) + return &hb, nil +} + +// NewRootHeaderBody creates a new instance of root HeaderBody. +// This constructor must be used **only** for constructing the root header body, +// which is the only case where zero values are allowed. +func NewRootHeaderBody(untrusted UntrustedHeaderBody) (*HeaderBody, error) { + if untrusted.ChainID == "" { + return nil, fmt.Errorf("ChainID of root header body must not be empty") + } + + if len(untrusted.ParentVoterIndices) != 0 { + return nil, fmt.Errorf("root header body must not set ParentVoterIndices") + } + if len(untrusted.ParentVoterSigData) != 0 { + return nil, fmt.Errorf("root header body must not set ParentVoterSigData") + } + if untrusted.ProposerID != ZeroID { + return nil, fmt.Errorf("root header body must not set ProposerID") + } + if untrusted.ParentView != 0 { + return nil, fmt.Errorf("ParentView of root header body must be zero") + } + if untrusted.Timestamp == 0 { + return nil, fmt.Errorf("Timestamp of root header body must not be zero") + } + + hb := HeaderBody(untrusted) + return &hb, nil +} + +// ParentQC returns quorum certificate that is incorporated in the block header. +// Callers *must* first verify that a parent QC is present (e.g. via ContainsParentQC) +// before calling ParentQC. If no valid parent QC data exists (such as on a spork‐root +// header), ParentQC will panic. +func (h HeaderBody) ParentQC() *QuorumCertificate { + qc, err := NewQuorumCertificate(UntrustedQuorumCertificate{ + BlockID: h.ParentID, + View: h.ParentView, + SignerIndices: h.ParentVoterIndices, + SigData: h.ParentVoterSigData, + }) + if err != nil { + panic(fmt.Errorf("could not build parent quorum certificate: %w", err)) + } + + return qc +} + +// ContainsParentQC reports whether this header carries a valid parent QC. +// It returns true only if all of the fields required to build a QC are non-zero/nil, +// indicating that ParentQC() can be safely called without panicking. +// Only spork root blocks or network genesis blocks do not contain a parent QC. +func (h HeaderBody) ContainsParentQC() bool { + return h.ParentID != ZeroID && + h.ParentVoterIndices != nil && + h.ParentVoterSigData != nil && + h.ProposerID != ZeroID +} + +// Header contains all meta-data for a block, as well as a hash of the block payload. +// Headers are used when the metadata about a block is needed, but the payload is not. +// Because [Header] includes the payload hash for the block, and the block ID is Merkle-ized +// with the Payload field as a Merkle tree node, the block ID can be computed from the [Header]. +// CAUTION regarding security: +// - With a byzantine HeaderBody alone, an honest node cannot prove who created that faulty data structure, +// because HeaderBody does not include the proposer's signature. +// +//structwrite:immutable - mutations allowed only within the constructor +type Header struct { + HeaderBody + // PayloadHash is a hash of the payload of this block. + PayloadHash Identifier +} + +// UntrustedHeader is an untrusted input-only representation of a Header, +// used for construction. +// +// This type exists to ensure that constructor functions are invoked explicitly +// with named fields, which improves clarity and reduces the risk of incorrect field +// ordering during construction. +// +// An instance of UntrustedHeader should be validated and converted into +// a trusted Header using NewHeader constructor. +type UntrustedHeader Header + +// NewHeader creates a new instance of Header. +// Construction of Header is allowed only within the constructor +// +// All errors indicate a valid Header cannot be constructed from the input. +func NewHeader(untrusted UntrustedHeader) (*Header, error) { + headerBody, err := NewHeaderBody(UntrustedHeaderBody(untrusted.HeaderBody)) + if err != nil { + return nil, fmt.Errorf("invalid header body: %w", err) + } + + if untrusted.PayloadHash == ZeroID { + return nil, fmt.Errorf("PayloadHash must not be empty") + } + + return &Header{ + HeaderBody: *headerBody, + PayloadHash: untrusted.PayloadHash, + }, nil +} + +// NewRootHeader creates a root header. +// +// This constructor must be used **only** for constructing the root header, +// which is the only case where zero values are allowed. +func NewRootHeader(untrusted UntrustedHeader) (*Header, error) { + rootHeaderBody, err := NewRootHeaderBody(UntrustedHeaderBody(untrusted.HeaderBody)) + if err != nil { + return nil, fmt.Errorf("invalid root header body: %w", err) + } + + if untrusted.PayloadHash == ZeroID { + return nil, fmt.Errorf("PayloadHash must not be empty") + } + + return &Header{ + HeaderBody: *rootHeaderBody, + PayloadHash: untrusted.PayloadHash, + }, nil +} + +// Fingerprint defines custom encoding for the header to calculate its ID. +// The hash of the LastViewTC is used instead of directly encoding the Header. +func (h Header) Fingerprint() []byte { + return fingerprint.Fingerprint(struct { ChainID ChainID ParentID Identifier Height uint64 @@ -66,28 +258,14 @@ func (h Header) Body() interface{} { ParentID: h.ParentID, Height: h.Height, PayloadHash: h.PayloadHash, - Timestamp: uint64(h.Timestamp.UnixNano()), + Timestamp: h.Timestamp, View: h.View, ParentView: h.ParentView, ParentVoterIndices: h.ParentVoterIndices, ParentVoterSigData: h.ParentVoterSigData, ProposerID: h.ProposerID, LastViewTCID: h.LastViewTC.ID(), - } -} - -// QuorumCertificate returns quorum certificate that is incorporated in the block header. -func (h Header) QuorumCertificate() *QuorumCertificate { - return &QuorumCertificate{ - BlockID: h.ParentID, - View: h.ParentView, - SignerIndices: h.ParentVoterIndices, - SigData: h.ParentVoterSigData, - } -} - -func (h Header) Fingerprint() []byte { - return fingerprint.Fingerprint(h.Body()) + }) } // ID returns a unique ID to singularly identify the header and its block @@ -96,20 +274,11 @@ func (h Header) ID() Identifier { return MakeID(h) } -// Checksum returns the checksum of the header. -func (h Header) Checksum() Identifier { - return MakeID(h) -} - // MarshalJSON makes sure the timestamp is encoded in UTC. +// +//nolint:structwrite func (h Header) MarshalJSON() ([]byte, error) { - // NOTE: this is just a sanity check to make sure that we don't get - // different encodings if someone forgets to use UTC timestamps - if h.Timestamp.Location() != time.UTC { - h.Timestamp = h.Timestamp.UTC() - } - // we use an alias to avoid endless recursion; the alias will not have the // marshal function and encode like a raw header type Encodable Header @@ -123,6 +292,8 @@ func (h Header) MarshalJSON() ([]byte, error) { } // UnmarshalJSON makes sure the timestamp is decoded in UTC. +// +//nolint:structwrite func (h *Header) UnmarshalJSON(data []byte) error { // we use an alias to avoid endless recursion; the alias will not have the @@ -130,25 +301,14 @@ func (h *Header) UnmarshalJSON(data []byte) error { type Decodable *Header err := json.Unmarshal(data, Decodable(h)) - // NOTE: the timezone check is not required for JSON, as it already encodes - // timezones, but it doesn't hurt to add it in case someone messes with the - // raw encoded format - if h.Timestamp.Location() != time.UTC { - h.Timestamp = h.Timestamp.UTC() - } - return err } // MarshalCBOR makes sure the timestamp is encoded in UTC. +// +//nolint:structwrite func (h Header) MarshalCBOR() ([]byte, error) { - // NOTE: this is just a sanity check to make sure that we don't get - // different encodings if someone forgets to use UTC timestamps - if h.Timestamp.Location() != time.UTC { - h.Timestamp = h.Timestamp.UTC() - } - // we use an alias to avoid endless recursion; the alias will not have the // marshal function and encode like a raw header type Encodable Header @@ -156,6 +316,8 @@ func (h Header) MarshalCBOR() ([]byte, error) { } // UnmarshalCBOR makes sure the timestamp is decoded in UTC. +// +//nolint:structwrite func (h *Header) UnmarshalCBOR(data []byte) error { // we use an alias to avoid endless recursion; the alias will not have the @@ -167,25 +329,14 @@ func (h *Header) UnmarshalCBOR(data []byte) error { err := cbor.Unmarshal(data, &decodable) *h = Header(decodable) - // NOTE: the timezone check is not required for CBOR, as it already encodes - // timezones, but it doesn't hurt to add it in case someone messes with the - // raw encoded format - if h.Timestamp.Location() != time.UTC { - h.Timestamp = h.Timestamp.UTC() - } - return err } // MarshalMsgpack makes sure the timestamp is encoded in UTC. +// +//nolint:structwrite func (h Header) MarshalMsgpack() ([]byte, error) { - // NOTE: this is just a sanity check to make sure that we don't get - // different encodings if someone forgets to use UTC timestamps - if h.Timestamp.Location() != time.UTC { - h.Timestamp = h.Timestamp.UTC() - } - // we use an alias to avoid endless recursion; the alias will not have the // marshal function and encode like a raw header type Encodable Header @@ -193,6 +344,8 @@ func (h Header) MarshalMsgpack() ([]byte, error) { } // UnmarshalMsgpack makes sure the timestamp is decoded in UTC. +// +//nolint:structwrite func (h *Header) UnmarshalMsgpack(data []byte) error { // we use an alias to avoid endless recursion; the alias will not have the @@ -204,12 +357,5 @@ func (h *Header) UnmarshalMsgpack(data []byte) error { err := msgpack.Unmarshal(data, &decodable) *h = Header(decodable) - // NOTE: Msgpack unmarshals timestamps with the local timezone, which means - // that a block ID would suddenly be different after encoding and decoding - // on a machine with non-UTC local time - if h.Timestamp.Location() != time.UTC { - h.Timestamp = h.Timestamp.UTC() - } - return err } diff --git a/model/flow/header_body_builder.go b/model/flow/header_body_builder.go new file mode 100644 index 00000000000..1b01297e68e --- /dev/null +++ b/model/flow/header_body_builder.go @@ -0,0 +1,128 @@ +package flow + +import ( + "fmt" + + "github.com/onflow/flow-go/ledger/common/bitutils" +) + +// headerBodyFieldBitIndex enumerates required fields in HeaderBody so that HeaderBodyBuilder +// can enforce that all required fields are explicitly set (including to zero values) prior to building. +type headerBodyFieldBitIndex int + +const ( + chainIDFieldBitIndex headerBodyFieldBitIndex = iota + parentIDFieldBitIndex + heightFieldBitIndex + timestampFieldBitIndex + viewFieldBitIndex + parentViewFieldBitIndex + parentVoterIndicesFieldBitIndex + parentVoterSigDataFieldBitIndex + proposerIDFieldBitIndex + numHeaderBodyFields // always keep this last +) + +// String returns the name of the field corresponding to this bit index. +func (f headerBodyFieldBitIndex) String() string { + switch f { + case chainIDFieldBitIndex: + return "ChainID" + case parentIDFieldBitIndex: + return "ParentID" + case heightFieldBitIndex: + return "Height" + case timestampFieldBitIndex: + return "Timestamp" + case viewFieldBitIndex: + return "View" + case parentViewFieldBitIndex: + return "ParentView" + case parentVoterIndicesFieldBitIndex: + return "ParentVoterIndices" + case parentVoterSigDataFieldBitIndex: + return "ParentVoterSigData" + case proposerIDFieldBitIndex: + return "ProposerID" + default: + return fmt.Sprintf("UnknownField(%d)", int(f)) + } +} + +// HeaderBodyBuilder constructs a validated, immutable HeaderBody in two phases: +// first by setting individual fields using fluent WithX methods, then by calling Build() +// to perform minimal validity and sanity checks and return the final [HeaderBody]. +type HeaderBodyBuilder struct { + u UntrustedHeaderBody + present []byte +} + +// NewHeaderBodyBuilder helps to build a new Header. +func NewHeaderBodyBuilder() *HeaderBodyBuilder { + return &HeaderBodyBuilder{ + present: bitutils.MakeBitVector(int(numHeaderBodyFields)), + } +} + +// Build validates and returns an immutable HeaderBody. All required fields must be explicitly set (even if they are zero). +// All errors indicate that a valid HeaderBody cannot be created from the current builder state. +func (b *HeaderBodyBuilder) Build() (*HeaderBody, error) { + // make sure every required field was initialized + for bit := 0; bit < int(numHeaderBodyFields); bit++ { + if bitutils.ReadBit(b.present, bit) == 0 { + return nil, fmt.Errorf("HeaderBodyBuilder: missing field %s", headerBodyFieldBitIndex(bit)) + } + } + + return NewHeaderBody(b.u) +} + +func (h *HeaderBodyBuilder) WithChainID(id ChainID) *HeaderBodyBuilder { + h.u.ChainID = id + bitutils.WriteBit(h.present, int(chainIDFieldBitIndex), 1) + return h +} +func (h *HeaderBodyBuilder) WithParentID(pid Identifier) *HeaderBodyBuilder { + h.u.ParentID = pid + bitutils.WriteBit(h.present, int(parentIDFieldBitIndex), 1) + return h +} +func (h *HeaderBodyBuilder) WithHeight(height uint64) *HeaderBodyBuilder { + h.u.Height = height + bitutils.WriteBit(h.present, int(heightFieldBitIndex), 1) + return h +} +func (h *HeaderBodyBuilder) WithTimestamp(t uint64) *HeaderBodyBuilder { + h.u.Timestamp = t + bitutils.WriteBit(h.present, int(timestampFieldBitIndex), 1) + return h +} +func (h *HeaderBodyBuilder) WithView(v uint64) *HeaderBodyBuilder { + h.u.View = v + bitutils.WriteBit(h.present, int(viewFieldBitIndex), 1) + return h +} +func (h *HeaderBodyBuilder) WithParentView(pv uint64) *HeaderBodyBuilder { + h.u.ParentView = pv + bitutils.WriteBit(h.present, int(parentViewFieldBitIndex), 1) + return h +} +func (h *HeaderBodyBuilder) WithParentVoterIndices(idx []byte) *HeaderBodyBuilder { + h.u.ParentVoterIndices = idx + bitutils.WriteBit(h.present, int(parentVoterIndicesFieldBitIndex), 1) + return h +} +func (h *HeaderBodyBuilder) WithParentVoterSigData(sig []byte) *HeaderBodyBuilder { + h.u.ParentVoterSigData = sig + bitutils.WriteBit(h.present, int(parentVoterSigDataFieldBitIndex), 1) + return h +} +func (h *HeaderBodyBuilder) WithProposerID(id Identifier) *HeaderBodyBuilder { + h.u.ProposerID = id + bitutils.WriteBit(h.present, int(proposerIDFieldBitIndex), 1) + return h +} +func (h *HeaderBodyBuilder) WithLastViewTC(tc *TimeoutCertificate) *HeaderBodyBuilder { + h.u.LastViewTC = tc + return h +} diff --git a/model/flow/header_test.go b/model/flow/header_test.go index cbc6b2fc272..a5bea15d9ba 100644 --- a/model/flow/header_test.go +++ b/model/flow/header_test.go @@ -2,6 +2,7 @@ package flow_test import ( "encoding/json" + "fmt" "testing" "time" @@ -11,8 +12,6 @@ import ( "github.com/vmihailenco/msgpack/v4" "github.com/onflow/flow-go/consensus/hotstuff/helper" - "github.com/onflow/flow-go/crypto" - "github.com/onflow/flow-go/model/encoding/rlp" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/utils/unittest" ) @@ -30,44 +29,6 @@ func TestHeaderEncodingJSON(t *testing.T) { assert.Equal(t, *header, decoded) } -func TestHeaderFingerprint(t *testing.T) { - header := unittest.BlockHeaderFixture() - header.LastViewTC = helper.MakeTC() - headerID := header.ID() - data := header.Fingerprint() - var decoded struct { - ChainID flow.ChainID - ParentID flow.Identifier - Height uint64 - PayloadHash flow.Identifier - Timestamp uint64 - View uint64 - ParentView uint64 - ParentVoterIndices []byte - ParentVoterSigData crypto.Signature - ProposerID flow.Identifier - LastViewTC interface{} - } - rlp.NewMarshaler().MustUnmarshal(data, &decoded) - decHeader := &flow.Header{ - ChainID: decoded.ChainID, - ParentID: decoded.ParentID, - Height: decoded.Height, - PayloadHash: decoded.PayloadHash, - Timestamp: time.Unix(0, int64(decoded.Timestamp)).UTC(), - View: decoded.View, - ParentView: decoded.ParentView, - ParentVoterIndices: decoded.ParentVoterIndices, - ParentVoterSigData: decoded.ParentVoterSigData, - ProposerID: decoded.ProposerID, - ProposerSigData: header.ProposerSigData, // since this field is not encoded/decoded, just set it to the original value to pass test - LastViewTC: header.LastViewTC, - } - decodedID := decHeader.ID() - assert.Equal(t, headerID, decodedID) - assert.Equal(t, *header, *decHeader) -} - func TestHeaderEncodingMsgpack(t *testing.T) { header := unittest.BlockHeaderFixture() headerID := header.ID() @@ -94,11 +55,642 @@ func TestHeaderEncodingCBOR(t *testing.T) { assert.Equal(t, *header, decoded) } -func TestNonUTCTimestampSameHashAsUTC(t *testing.T) { +func TestHeaderMalleability(t *testing.T) { header := unittest.BlockHeaderFixture() - headerID := header.ID() - loc := time.FixedZone("UTC-8", -8*60*60) - header.Timestamp = header.Timestamp.In(loc) - checkedID := header.ID() - assert.Equal(t, headerID, checkedID) + // Require that LastViewTC (TimeoutCertificate) is not malleable, since its ID is incorporated in Header's ID + unittest.RequireEntityNonMalleable(t, helper.MakeTC()) + unittest.RequireEntityNonMalleable(t, header) +} + +// TestNewRootHeaderBody verifies that NewRootHeaderBody enforces root‐only constraints, +// using a fixture helper to supply only the field overrides needed per case. +// +// Test Cases: +// +// 1. Valid root input with non‐zero ParentID: +// - Ensures a HeaderBody is returned when only ChainID and Timestamp are set, +// ParentView is zero, QC fields empty, and ParentID != ZeroID. +// +// 2. Valid root input with zero ParentID: +// - Same as above, but ParentID==ZeroID. Should also succeed. +// +// 3. Missing ChainID: +// - Ensures an error is returned when ChainID is empty. +// +// 4. Non‐empty ParentVoterIndices: +// - Ensures an error is returned when ParentVoterIndices is non‐empty. +// +// 5. Non‐empty ParentVoterSigData: +// - Ensures an error is returned when ParentVoterSigData is non‐empty. +// +// 6. Non‐zero ProposerID: +// - Ensures an error is returned when ProposerID is non‐zero. +// +// 7. Non‐zero ParentView: +// - Ensures an error is returned when ParentView is non‐zero. +// +// 8. Zero Timestamp: +// - Ensures an error is returned when Timestamp is the zero value. +func TestNewRootHeaderBody(t *testing.T) { + t.Run("valid root input with non-zero ParentID", func(t *testing.T) { + u := UntrustedHeaderBodyFixture(WithRootDefaults()) + hb, err := flow.NewRootHeaderBody(u) + assert.NoError(t, err) + assert.NotNil(t, hb) + }) + + t.Run("valid root input with zero ParentID", func(t *testing.T) { + u := UntrustedHeaderBodyFixture( + WithRootDefaults(), + func(u *flow.UntrustedHeaderBody) { + u.ParentID = flow.ZeroID + }) + hb, err := flow.NewRootHeaderBody(u) + assert.NoError(t, err) + assert.NotNil(t, hb) + }) + + t.Run("missing ChainID", func(t *testing.T) { + u := UntrustedHeaderBodyFixture( + WithRootDefaults(), + func(u *flow.UntrustedHeaderBody) { + u.ChainID = "" + }) + hb, err := flow.NewRootHeaderBody(u) + assert.Error(t, err) + assert.Nil(t, hb) + assert.Contains(t, err.Error(), "ChainID of root header body must not be empty") + }) + + t.Run("non-empty ParentVoterIndices", func(t *testing.T) { + u := UntrustedHeaderBodyFixture( + WithRootDefaults(), + func(u *flow.UntrustedHeaderBody) { + u.ParentVoterIndices = unittest.SignerIndicesFixture(4) + }) + hb, err := flow.NewRootHeaderBody(u) + assert.Error(t, err) + assert.Nil(t, hb) + assert.Contains(t, err.Error(), "must not set ParentVoterIndices") + }) + + t.Run("non-empty ParentVoterSigData", func(t *testing.T) { + u := UntrustedHeaderBodyFixture( + WithRootDefaults(), + func(u *flow.UntrustedHeaderBody) { + u.ParentVoterSigData = unittest.QCSigDataFixture() + }) + hb, err := flow.NewRootHeaderBody(u) + assert.Error(t, err) + assert.Nil(t, hb) + assert.Contains(t, err.Error(), "must not set ParentVoterSigData") + }) + + t.Run("non-zero ProposerID", func(t *testing.T) { + u := UntrustedHeaderBodyFixture( + WithRootDefaults(), + func(u *flow.UntrustedHeaderBody) { + u.ProposerID = unittest.IdentifierFixture() + }) + hb, err := flow.NewRootHeaderBody(u) + assert.Error(t, err) + assert.Nil(t, hb) + assert.Contains(t, err.Error(), "must not set ProposerID") + }) + + t.Run("non-zero ParentView", func(t *testing.T) { + u := UntrustedHeaderBodyFixture( + WithRootDefaults(), + func(u *flow.UntrustedHeaderBody) { + u.ParentView = 7 + }) + hb, err := flow.NewRootHeaderBody(u) + assert.Error(t, err) + assert.Nil(t, hb) + assert.Contains(t, err.Error(), "ParentView of root header body must be zero") + }) + + t.Run("zero Timestamp", func(t *testing.T) { + u := UntrustedHeaderBodyFixture( + WithRootDefaults(), + func(u *flow.UntrustedHeaderBody) { + u.Timestamp = 0 + }) + hb, err := flow.NewRootHeaderBody(u) + assert.Error(t, err) + assert.Nil(t, hb) + assert.Contains(t, err.Error(), "Timestamp of root header body must not be zero") + }) +} + +// TestNewHeaderBody verifies the behavior of the NewHeaderBody constructor after +// grouping of the parent‐QC checks and addition of height/view ordering checks. +// +// Test Cases: +// +// 1. Valid input: +// - All required fields are set (ChainID, parent‐QC, Height>0, View>0, ParentView<View, non-zero Timestamp). +// - Expect no error. +// +// 2. Missing ChainID: +// - ChainID is empty. +// - Ensures an error mentioning "ChainID". +// +// 3. Missing parent‐QC fields: +// - ParentID, ParentVoterIndices, ParentVoterSigData or ProposerID is missing (nil/zero). +// - Ensures an error mentioning "missing parent QC". +// +// 4. Zero Height: +// - Height set to 0. +// - Ensures an error mentioning "Height must be > 0". +// +// 5. Zero View: +// - View set to 0. +// - Ensures an error mentioning "View must be > 0". +// +// 6. ParentView ≥ View: +// - ParentView is equal to or greater than View. +// - Ensures an error mentioning "ParentView". +// +// 7. Zero Timestamp: +// - Timestamp is the zero value. +// - Ensures an error mentioning "Timestamp must not be zero-value". +func TestNewHeaderBody(t *testing.T) { + t.Run("valid input", func(t *testing.T) { + u := UntrustedHeaderBodyFixture() + hb, err := flow.NewHeaderBody(u) + assert.NoError(t, err) + assert.NotNil(t, hb) + assert.Equal(t, flow.HeaderBody(u), *hb) + }) + + t.Run("missing ChainID", func(t *testing.T) { + u := UntrustedHeaderBodyFixture(func(u *flow.UntrustedHeaderBody) { + u.ChainID = "" + }) + hb, err := flow.NewHeaderBody(u) + assert.Error(t, err) + assert.Nil(t, hb) + assert.Contains(t, err.Error(), "ChainID must not be empty") + }) + + t.Run("missing parent QC: ParentID", func(t *testing.T) { + u := UntrustedHeaderBodyFixture(func(u *flow.UntrustedHeaderBody) { + u.ParentID = flow.ZeroID + }) + hb, err := flow.NewHeaderBody(u) + assert.Error(t, err) + assert.Nil(t, hb) + assert.Contains(t, err.Error(), "ParentID must not be empty") + }) + + t.Run("missing parent QC: ParentVoterIndices", func(t *testing.T) { + u := UntrustedHeaderBodyFixture(func(u *flow.UntrustedHeaderBody) { + u.ParentVoterIndices = nil + }) + hb, err := flow.NewHeaderBody(u) + assert.Error(t, err) + assert.Nil(t, hb) + assert.Contains(t, err.Error(), "ParentVoterIndices must not be empty") + }) + + t.Run("missing parent QC: ParentVoterSigData", func(t *testing.T) { + u := UntrustedHeaderBodyFixture(func(u *flow.UntrustedHeaderBody) { + u.ParentVoterSigData = nil + }) + hb, err := flow.NewHeaderBody(u) + assert.Error(t, err) + assert.Nil(t, hb) + assert.Contains(t, err.Error(), "ParentVoterSigData must not be empty") + }) + + t.Run("missing parent QC: ProposerID", func(t *testing.T) { + u := UntrustedHeaderBodyFixture(func(u *flow.UntrustedHeaderBody) { + u.ProposerID = flow.ZeroID + }) + hb, err := flow.NewHeaderBody(u) + assert.Error(t, err) + assert.Nil(t, hb) + assert.Contains(t, err.Error(), "ProposerID must not be empty") + }) + + t.Run("zero Height", func(t *testing.T) { + u := UntrustedHeaderBodyFixture(func(u *flow.UntrustedHeaderBody) { + u.Height = 0 + }) + hb, err := flow.NewHeaderBody(u) + assert.Error(t, err) + assert.Nil(t, hb) + assert.Contains(t, err.Error(), "Height must be > 0") + }) + + t.Run("zero View", func(t *testing.T) { + u := UntrustedHeaderBodyFixture(func(u *flow.UntrustedHeaderBody) { + u.View = 0 + }) + hb, err := flow.NewHeaderBody(u) + assert.Error(t, err) + assert.Nil(t, hb) + assert.Contains(t, err.Error(), "View must be > 0") + }) + + t.Run("ParentView ≥ View", func(t *testing.T) { + u := UntrustedHeaderBodyFixture(func(u *flow.UntrustedHeaderBody) { + // set ParentView equal to the generated View + u.ParentView = u.View + }) + hb, err := flow.NewHeaderBody(u) + assert.Error(t, err) + assert.Nil(t, hb) + assert.Contains(t, err.Error(), "ParentView") + }) + + t.Run("zero Timestamp", func(t *testing.T) { + u := UntrustedHeaderBodyFixture(func(u *flow.UntrustedHeaderBody) { + u.Timestamp = 0 + }) + hb, err := flow.NewHeaderBody(u) + assert.Error(t, err) + assert.Nil(t, hb) + assert.Contains(t, err.Error(), "Timestamp must not be zero-value") + }) +} + +// TestHeaderBodyBuilder_PresenceChecks verifies that HeaderBodyBuilder.Build +// returns an error when any required setter was not called (tracked via bits in `present`). +func TestHeaderBodyBuilder_PresenceChecks(t *testing.T) { + validID := unittest.IdentifierFixture() + ts := uint64(time.Unix(1_600_000_000, 0).UnixMilli()) + + // Each entry names the field and provides the setter to call. + setters := []struct { + field string + fn func(*flow.HeaderBodyBuilder) + }{ + {"ChainID", func(b *flow.HeaderBodyBuilder) { + b.WithChainID("chain") + }}, + {"ParentID", func(b *flow.HeaderBodyBuilder) { + b.WithParentID(validID) + }}, + {"Height", func(b *flow.HeaderBodyBuilder) { + b.WithHeight(42) + }}, + {"Timestamp", func(b *flow.HeaderBodyBuilder) { + b.WithTimestamp(ts) + }}, + {"View", func(b *flow.HeaderBodyBuilder) { + b.WithView(7) + }}, + {"ParentView", func(b *flow.HeaderBodyBuilder) { + b.WithParentView(6) + }}, + {"ParentVoterIndices", func(b *flow.HeaderBodyBuilder) { + b.WithParentVoterIndices(unittest.SignerIndicesFixture(4)) + }}, + {"ParentVoterSigData", func(b *flow.HeaderBodyBuilder) { + b.WithParentVoterSigData(unittest.QCSigDataFixture()) + }}, + {"ProposerID", func(b *flow.HeaderBodyBuilder) { + b.WithProposerID(validID) + }}, + } + + // When all setters are called, Build should succeed. + t.Run("all setters present", func(t *testing.T) { + b := flow.NewHeaderBodyBuilder() + for _, s := range setters { + s.fn(b) + } + hb, err := b.Build() + assert.NoError(t, err) + assert.NotNil(t, hb) + }) + + // Omitting each setter in turn should produce an error mentioning that field. + for _, s := range setters { + t.Run(fmt.Sprintf("missing %s", s.field), func(t *testing.T) { + b := flow.NewHeaderBodyBuilder() + // call every setter except the one we're omitting + for _, other := range setters { + if other.field == s.field { + continue + } + other.fn(b) + } + hb, err := b.Build() + assert.Error(t, err) + assert.Nil(t, hb) + assert.Contains(t, err.Error(), s.field) + }) + } +} + +// TestNewRootHeader verifies the behavior of the NewRootHeader constructor. +// +// Test Cases: +// +// 1. Valid root input: +// - Embedded HeaderBody is a valid root body and PayloadHash is non-zero. +// - Ensures no error and returned Header has the correct fields. +// +// 2. Valid root input with ParentID == ZeroID and PayloadHash non-zero: +// - Same as above but ParentID is zero. Should still succeed. +// +// 3. Invalid root body: +// - Embedded HeaderBody fails root-only constraints (e.g. missing ChainID). +// - Ensures an error wrapping "invalid root header body". +// +// 4. Empty PayloadHash: +// - PayloadHash is ZeroID. +// - Ensures an error mentioning "PayloadHash must not be empty". +// +// 5. Body with ParentVoterIndices set: +// - Ensures an error wrapping "invalid root header body" is returned when HeaderBody.ParentVoterIndices is non-empty. +// +// 6. Body with ParentVoterSigData set: +// - Ensures an error wrapping "invalid root header body" is returned when HeaderBody.ParentVoterSigData is non-empty. +// +// 7. Body with ProposerID set: +// - Ensures an error wrapping "invalid root header body" is returned when HeaderBody.ProposerID is non-zero. +func TestNewRootHeader(t *testing.T) { + nonZeroHash := unittest.IdentifierFixture() + validID := unittest.IdentifierFixture() + + t.Run("valid root input", func(t *testing.T) { + u := UntrustedHeaderBodyFixture( + WithRootDefaults(), + ) + h, err := flow.NewRootHeader(flow.UntrustedHeader{ + HeaderBody: flow.HeaderBody(u), + PayloadHash: nonZeroHash, + }) + assert.NoError(t, err) + assert.NotNil(t, h) + assert.Equal(t, nonZeroHash, h.PayloadHash) + }) + + t.Run("valid root input with zero ParentID", func(t *testing.T) { + u := UntrustedHeaderBodyFixture( + WithRootDefaults(), + func(u *flow.UntrustedHeaderBody) { + u.ParentID = flow.ZeroID + }, + ) + h, err := flow.NewRootHeader(flow.UntrustedHeader{ + HeaderBody: flow.HeaderBody(u), + PayloadHash: nonZeroHash, + }) + assert.NoError(t, err) + assert.Zero(t, h.ParentID) + }) + + t.Run("invalid root body (missing ChainID)", func(t *testing.T) { + u := UntrustedHeaderBodyFixture( + WithRootDefaults(), + func(u *flow.UntrustedHeaderBody) { + u.ChainID = "" + }, + ) + h, err := flow.NewRootHeader(flow.UntrustedHeader{ + HeaderBody: flow.HeaderBody(u), + PayloadHash: nonZeroHash, + }) + assert.Error(t, err) + assert.Nil(t, h) + assert.Contains(t, err.Error(), "invalid root header body") + }) + + t.Run("empty PayloadHash", func(t *testing.T) { + u := UntrustedHeaderBodyFixture(WithRootDefaults()) + h, err := flow.NewRootHeader(flow.UntrustedHeader{ + HeaderBody: flow.HeaderBody(u), + PayloadHash: flow.ZeroID, + }) + assert.Error(t, err) + assert.Nil(t, h) + assert.Contains(t, err.Error(), "PayloadHash must not be empty") + }) + + t.Run("body with ParentVoterIndices set", func(t *testing.T) { + u := UntrustedHeaderBodyFixture( + WithRootDefaults(), + func(u *flow.UntrustedHeaderBody) { + u.ParentVoterIndices = unittest.SignerIndicesFixture(3) + }, + ) + h, err := flow.NewRootHeader(flow.UntrustedHeader{ + HeaderBody: flow.HeaderBody(u), + PayloadHash: nonZeroHash, + }) + assert.Error(t, err) + assert.Nil(t, h) + assert.Contains(t, err.Error(), "invalid root header body") + }) + + t.Run("body with ParentVoterSigData set", func(t *testing.T) { + u := UntrustedHeaderBodyFixture( + WithRootDefaults(), + func(u *flow.UntrustedHeaderBody) { + u.ParentVoterSigData = unittest.QCSigDataFixture() + }, + ) + h, err := flow.NewRootHeader(flow.UntrustedHeader{ + HeaderBody: flow.HeaderBody(u), + PayloadHash: nonZeroHash, + }) + assert.Error(t, err) + assert.Nil(t, h) + assert.Contains(t, err.Error(), "invalid root header body") + }) + + t.Run("body with ProposerID set", func(t *testing.T) { + u := UntrustedHeaderBodyFixture( + WithRootDefaults(), + func(u *flow.UntrustedHeaderBody) { + u.ProposerID = validID + }, + ) + h, err := flow.NewRootHeader(flow.UntrustedHeader{ + HeaderBody: flow.HeaderBody(u), + PayloadHash: nonZeroHash, + }) + assert.Error(t, err) + assert.Nil(t, h) + assert.Contains(t, err.Error(), "invalid root header body") + }) +} + +// TestNewHeader verifies the behavior of the NewHeader constructor. +// +// Test Cases: +// +// 1. Valid input: +// - Ensures a Header is returned when HeaderBody is valid and PayloadHash is non-zero. +// +// 2. Empty ChainID: +// - Ensures an error is returned when the embedded HeaderBody.ChainID is empty. +// +// 3. Zero ParentID: +// - Ensures an error is returned when the embedded HeaderBody.ParentID is ZeroID. +// +// 4. Zero Timestamp: +// - Ensures an error is returned when the embedded HeaderBody.Timestamp is zero. +// +// 5. Empty ParentVoterIndices: +// - Ensures an error is returned when the embedded HeaderBody.ParentVoterIndices is empty. +// +// 6. Empty ParentVoterSigData: +// - Ensures an error is returned when the embedded HeaderBody.ParentVoterSigData is empty. +// +// 7. Zero ProposerID: +// - Ensures an error is returned when the embedded HeaderBody.ProposerID is ZeroID. +// +// 8. Missing PayloadHash: +// - Ensures an error is returned when PayloadHash is ZeroID. +func TestNewHeader(t *testing.T) { + validHash := unittest.IdentifierFixture() + + t.Run("valid input", func(t *testing.T) { + // start from a fully-populated, valid untrusted header body + uBody := UntrustedHeaderBodyFixture() + u := flow.UntrustedHeader{ + HeaderBody: flow.HeaderBody(uBody), + PayloadHash: validHash, + } + h, err := flow.NewHeader(u) + assert.NoError(t, err) + assert.NotNil(t, h) + assert.Equal(t, flow.HeaderBody(uBody), h.HeaderBody) + assert.Equal(t, validHash, h.PayloadHash) + }) + + t.Run("empty ChainID", func(t *testing.T) { + uBody := UntrustedHeaderBodyFixture(func(u *flow.UntrustedHeaderBody) { + u.ChainID = "" + }) + u := flow.UntrustedHeader{ + HeaderBody: flow.HeaderBody(uBody), + PayloadHash: validHash, + } + h, err := flow.NewHeader(u) + assert.Error(t, err) + assert.Nil(t, h) + assert.Contains(t, err.Error(), "invalid header body") + }) + + t.Run("zero ParentID", func(t *testing.T) { + uBody := UntrustedHeaderBodyFixture(func(u *flow.UntrustedHeaderBody) { + u.ParentID = flow.ZeroID + }) + u := flow.UntrustedHeader{ + HeaderBody: flow.HeaderBody(uBody), + PayloadHash: validHash, + } + h, err := flow.NewHeader(u) + assert.Error(t, err) + assert.Nil(t, h) + assert.Contains(t, err.Error(), "invalid header body") + }) + + t.Run("zero Timestamp", func(t *testing.T) { + uBody := UntrustedHeaderBodyFixture(func(u *flow.UntrustedHeaderBody) { + u.Timestamp = 0 + }) + u := flow.UntrustedHeader{ + HeaderBody: flow.HeaderBody(uBody), + PayloadHash: validHash, + } + h, err := flow.NewHeader(u) + assert.Error(t, err) + assert.Nil(t, h) + assert.Contains(t, err.Error(), "invalid header body") + }) + + t.Run("empty ParentVoterIndices", func(t *testing.T) { + uBody := UntrustedHeaderBodyFixture( + func(u *flow.UntrustedHeaderBody) { + u.ParentVoterIndices = []byte{} + }, + func(u *flow.UntrustedHeaderBody) { + u.ParentVoterSigData = []byte{} + }, + ) + u := flow.UntrustedHeader{ + HeaderBody: flow.HeaderBody(uBody), + PayloadHash: validHash, + } + h, err := flow.NewHeader(u) + assert.Error(t, err) + assert.Nil(t, h) + assert.Contains(t, err.Error(), "invalid header body") + }) + + t.Run("empty ParentVoterSigData", func(t *testing.T) { + uBody := UntrustedHeaderBodyFixture(func(u *flow.UntrustedHeaderBody) { + u.ParentVoterSigData = []byte{} + }) + u := flow.UntrustedHeader{ + HeaderBody: flow.HeaderBody(uBody), + PayloadHash: validHash, + } + h, err := flow.NewHeader(u) + assert.Error(t, err) + assert.Nil(t, h) + assert.Contains(t, err.Error(), "invalid header body") + }) + + t.Run("zero ProposerID", func(t *testing.T) { + uBody := UntrustedHeaderBodyFixture(func(u *flow.UntrustedHeaderBody) { + u.ProposerID = flow.ZeroID + }) + u := flow.UntrustedHeader{ + HeaderBody: flow.HeaderBody(uBody), + PayloadHash: validHash, + } + h, err := flow.NewHeader(u) + assert.Error(t, err) + assert.Nil(t, h) + assert.Contains(t, err.Error(), "invalid header body") + }) + + t.Run("missing PayloadHash", func(t *testing.T) { + // use a valid body but an empty payload hash + uBody := UntrustedHeaderBodyFixture() + u := flow.UntrustedHeader{ + HeaderBody: flow.HeaderBody(uBody), + PayloadHash: flow.ZeroID, + } + h, err := flow.NewHeader(u) + assert.Error(t, err) + assert.Nil(t, h) + assert.Contains(t, err.Error(), "PayloadHash must not be empty") + }) +} + +// UntrustedHeaderBodyFixture returns an UntrustedHeaderBody +// pre‐populated with sane defaults. Any opts override those defaults. +func UntrustedHeaderBodyFixture(opts ...func(*flow.UntrustedHeaderBody)) flow.UntrustedHeaderBody { + u := flow.UntrustedHeaderBody(unittest.HeaderBodyFixture()) + for _, opt := range opts { + opt(&u) + } + return u +} + +// WithRootDefaults zeroes out all parent‐QC fields and enforces root constraints. +func WithRootDefaults() func(*flow.UntrustedHeaderBody) { + ts := uint64(time.Unix(1_600_000_000, 0).UnixMilli()) + return func(u *flow.UntrustedHeaderBody) { + u.ChainID = flow.Emulator // still must be non‐empty + u.ParentID = flow.ZeroID // allowed to be zero + u.Height = 0 + u.View = 0 + u.Timestamp = ts // non‐zero + u.ParentView = 0 + u.ParentVoterIndices = []byte{} + u.ParentVoterSigData = []byte{} + u.ProposerID = flow.ZeroID + u.LastViewTC = nil + } } diff --git a/model/flow/identifier.go b/model/flow/identifier.go index 62ad2a64735..e9c7861bc94 100644 --- a/model/flow/identifier.go +++ b/model/flow/identifier.go @@ -1,21 +1,20 @@ -// (c) 2019 Dapper Labs - ALL RIGHTS RESERVED - package flow import ( "encoding/binary" "encoding/hex" "fmt" - "math/rand" "reflect" "github.com/ipfs/go-cid" mh "github.com/multiformats/go-multihash" - "github.com/onflow/flow-go/crypto" - "github.com/onflow/flow-go/crypto/hash" + "github.com/onflow/crypto" + "github.com/onflow/crypto/hash" + "github.com/onflow/flow-go/model/fingerprint" "github.com/onflow/flow-go/storage/merkle" + "github.com/onflow/flow-go/utils/rand" ) const IdentifierLen = 32 @@ -26,8 +25,16 @@ type Identifier [IdentifierLen]byte // IdentifierFilter is a filter on identifiers. type IdentifierFilter func(Identifier) bool -// IdentifierOrder is a sort for identifier -type IdentifierOrder func(Identifier, Identifier) bool +// IdentifierOrder is an order function for identifiers. +// +// It defines a strict weak ordering between identifiers. +// It returns a negative number if the first identifier is "strictly less" than the second, +// a positive number if the second identifier is "strictly less" than the first, +// and zero if the two identifiers are equal. +// +// `IdentifierOrder` can be used to sort identifiers with +// https://pkg.go.dev/golang.org/x/exp/slices#SortFunc. +type IdentifierOrder func(Identifier, Identifier) int var ( // ZeroID is the lowest value in the 32-byte ID space. @@ -37,6 +44,12 @@ var ( // HexStringToIdentifier converts a hex string to an identifier. The input // must be 64 characters long and contain only valid hex characters. func HexStringToIdentifier(hexString string) (Identifier, error) { + // hex.Decode does not check that there is enough room to decode the input into the destination + // slice, so we do it explicitly here + if len(hexString) != 64 { + return ZeroID, fmt.Errorf("malformed input, expected 64 characters, got %d", len(hexString)) + } + var identifier Identifier i, err := hex.Decode(identifier[:], []byte(hexString)) if err != nil { @@ -179,21 +192,24 @@ func CheckConcatSum(sum Identifier, fps ...Identifier) bool { return sum == computed } -// Sample returns random sample of length 'size' of the ids -// [Fisher-Yates shuffle](https://en.wikipedia.org/wiki/Fisher-Yates_shuffle). -func Sample(size uint, ids ...Identifier) []Identifier { +// Sample returns non-deterministic random sample of length 'size' of the ids +func Sample(size uint, ids ...Identifier) ([]Identifier, error) { n := uint(len(ids)) dup := make([]Identifier, 0, n) dup = append(dup, ids...) // if sample size is greater than total size, return all the elements if n <= size { - return dup + return dup, nil } - for i := uint(0); i < size; i++ { - j := uint(rand.Intn(int(n - i))) - dup[i], dup[j+i] = dup[j+i], dup[i] + swap := func(i, j uint) { + dup[i], dup[j] = dup[j], dup[i] + } + + err := rand.Samples(n, size, swap) + if err != nil { + return nil, fmt.Errorf("generating randoms failed: %w", err) } - return dup[:size] + return dup[:size], nil } func CidToId(c cid.Cid) (Identifier, error) { diff --git a/model/flow/identifierList.go b/model/flow/identifierList.go index ec77a04a98f..afbeadc7a09 100644 --- a/model/flow/identifierList.go +++ b/model/flow/identifierList.go @@ -1,17 +1,15 @@ package flow import ( - "bytes" - "math/rand" - "sort" + "fmt" - "github.com/rs/zerolog/log" + "golang.org/x/exp/slices" ) // IdentifierList defines a sortable list of identifiers type IdentifierList []Identifier -// Len returns length of the IdentiferList in the number of stored identifiers. +// Len returns length of the IdentifierList in the number of stored identifiers. // It satisfies the sort.Interface making the IdentifierList sortable. func (il IdentifierList) Len() int { return len(il) @@ -30,16 +28,7 @@ func (il IdentifierList) Lookup() map[Identifier]struct{} { // Otherwise it returns true. // It satisfies the sort.Interface making the IdentifierList sortable. func (il IdentifierList) Less(i, j int) bool { - // bytes package already implements Comparable for []byte. - switch bytes.Compare(il[i][:], il[j][:]) { - case -1: - return true - case 0, 1: - return false - default: - log.Error().Msg("not fail-able with `bytes.Comparable` bounded [-1, 1].") - return false - } + return IsIdentifierCanonical(il[i], il[j]) } // Swap swaps the element i and j in the IdentifierList. @@ -60,11 +49,6 @@ func (il IdentifierList) Strings() []string { // ID returns a cryptographic commitment to the list of identifiers. // Since an IdentifierList has no mutable fields, it is equal to the checksum. func (il IdentifierList) ID() Identifier { - return il.Checksum() -} - -// Checksum returns a cryptographic commitment to the list of identifiers. -func (il IdentifierList) Checksum() Identifier { return MakeID(il) } @@ -103,15 +87,8 @@ func (il IdentifierList) Union(other IdentifierList) IdentifierList { return union } -// DeterministicSample returns deterministic random sample from the `IdentifierList` using the given seed -func (il IdentifierList) DeterministicSample(size uint, seed int64) IdentifierList { - rand.Seed(seed) - return il.Sample(size) -} - // Sample returns random sample of length 'size' of the ids -// [Fisher-Yates shuffle](https://en.wikipedia.org/wiki/Fisher–Yates_shuffle). -func (il IdentifierList) Sample(size uint) IdentifierList { +func (il IdentifierList) Sample(size uint) (IdentifierList, error) { return Sample(size, il...) } @@ -128,22 +105,23 @@ IDLoop: return dup } +// Sort returns a sorted _copy_ of the IdentifierList, leaving the original invariant. func (il IdentifierList) Sort(less IdentifierOrder) IdentifierList { dup := il.Copy() - sort.Slice(dup, func(i int, j int) bool { - return less(dup[i], dup[j]) - }) + slices.SortFunc(dup, less) return dup } -// Sorted returns whether the list is sorted by the input ordering. -func (il IdentifierList) Sorted(less IdentifierOrder) bool { - for i := 0; i < len(il)-1; i++ { - a := il[i] - b := il[i+1] - if !less(a, b) { - return false +// IdentifierListFromHex builds an [IdentifierList] by parsing a list of hex strings. +// It returns an error when a first string which is not a valid hex string or invalid identifier is encountered. +func IdentifierListFromHex(ids []string) (IdentifierList, error) { + idList := make(IdentifierList, len(ids)) + for i, idStr := range ids { + id, err := HexStringToIdentifier(idStr) + if err != nil { + return nil, fmt.Errorf("failed to convert string representation %s to Flow Identifier: %w", id, err) } + idList[i] = id } - return true + return idList, nil } diff --git a/model/flow/identifierList_test.go b/model/flow/identifierList_test.go index b878938a5e3..af920be132d 100644 --- a/model/flow/identifierList_test.go +++ b/model/flow/identifierList_test.go @@ -5,7 +5,6 @@ import ( "math/rand" "sort" "testing" - "time" "github.com/stretchr/testify/require" @@ -13,6 +12,14 @@ import ( "github.com/onflow/flow-go/utils/unittest" ) +// Test the canonical ordering of identity and identifier match +func TestCanonicalOrderingMatch(t *testing.T) { + identities := unittest.IdentityListFixture(100) + require.Equal(t, + identities.Sort(flow.Canonical[flow.Identity]).NodeIDs(), + identities.NodeIDs().Sort(flow.IdentifierCanonical)) +} + // TestIdentifierListSort tests the IdentityList against its implemented sort interface // it generates and sorts a list of ids, and then evaluates sorting in ascending order func TestIdentifierListSort(t *testing.T) { @@ -21,7 +28,7 @@ func TestIdentifierListSort(t *testing.T) { var ids flow.IdentifierList = unittest.IdentifierListFixture(count) // shuffles array before sorting to enforce some pseudo-randomness - rand.Seed(time.Now().UnixNano()) + rand.Shuffle(ids.Len(), ids.Swap) sort.Sort(ids) @@ -54,3 +61,9 @@ func TestIdentifierListContains(t *testing.T) { nonExistent := unittest.IdentifierFixture() require.False(t, ids.Contains(nonExistent)) } + +// TestIdentifierListMalleability verifies that the IdentifierList which implements the [flow.IDEntity] interface is not malleable. +func TestIdentifierListMalleability(t *testing.T) { + identifierList := unittest.IdentifierListFixture(5) + unittest.RequireEntityNonMalleable(t, &identifierList) +} diff --git a/model/flow/identifier_order.go b/model/flow/identifier_order.go new file mode 100644 index 00000000000..74ad37a314d --- /dev/null +++ b/model/flow/identifier_order.go @@ -0,0 +1,53 @@ +package flow + +import ( + "bytes" +) + +// IdentifierCanonical is a function that defines a weak strict ordering "<" for identifiers. +// It returns: +// - a strict negative number if id1 < id2 +// - a strict positive number if id2 < id1 +// - zero if id1 and id2 are equal +// +// By definition, two Identifiers (id1, id2) are in canonical order if id1 is lexicographically +// _strictly_ smaller than id2. The strictness is important, meaning that duplicates do not +// satisfy canonical ordering (order is irreflexive). Hence, only a returned strictly negative +// value means the pair is in canonical order. +// Use `IsIdentifierCanonical` for canonical order checks. +// +// The current function is based on the identifiers bytes lexicographic comparison. +// Example: +// +// IdentifierCanonical(Identifier{1}, Identifier{2}) // -1 +// IdentifierCanonical(Identifier{2}, Identifier{1}) // 1 +// IdentifierCanonical(Identifier{1}, Identifier{1}) // 0 +// IdentifierCanonical(Identifier{0, 1}, Identifier{0, 2}) // -1 +func IdentifierCanonical(id1 Identifier, id2 Identifier) int { + return bytes.Compare(id1[:], id2[:]) +} + +// IsIdentifierCanonical returns true if and only if the given identifiers are in canonical order. +// +// By convention, two identifiers (i1, i2) are in canonical order if i1's bytes +// are lexicographically _strictly_ smaller than i2's bytes. +// +// The strictness is important, meaning that the canonical order +// is irreflexive ((i,i) isn't in canonical order). +func IsIdentifierCanonical(i1, i2 Identifier) bool { + return IdentifierCanonical(i1, i2) < 0 +} + +// IsIdentifierListCanonical returns true if and only if the given list is +// _strictly_ sorted with regards to the canonical order. +// +// The strictness is important here, meaning that a list with 2 equal identifiers +// isn't considered well sorted. +func IsIdentifierListCanonical(il IdentifierList) bool { + for i := 0; i < len(il)-1; i++ { + if !IsIdentifierCanonical(il[i], il[i+1]) { + return false + } + } + return true +} diff --git a/model/flow/identifier_test.go b/model/flow/identifier_test.go index a4362e95f37..2e75b27b421 100644 --- a/model/flow/identifier_test.go +++ b/model/flow/identifier_test.go @@ -1,10 +1,10 @@ package flow_test import ( + "crypto/rand" "encoding/binary" "encoding/json" "fmt" - "math/rand" "testing" blocks "github.com/ipfs/go-block-format" @@ -66,20 +66,23 @@ func TestIdentifierSample(t *testing.T) { t.Run("Sample creates a random sample", func(t *testing.T) { sampleSize := uint(5) - sample := flow.Sample(sampleSize, ids...) + sample, err := flow.Sample(sampleSize, ids...) + require.NoError(t, err) require.Len(t, sample, int(sampleSize)) require.NotEqual(t, sample, ids[:sampleSize]) }) t.Run("sample size greater than total size results in the original list", func(t *testing.T) { sampleSize := uint(len(ids) + 1) - sample := flow.Sample(sampleSize, ids...) + sample, err := flow.Sample(sampleSize, ids...) + require.NoError(t, err) require.Equal(t, sample, ids) }) t.Run("sample size of zero results in an empty list", func(t *testing.T) { sampleSize := uint(0) - sample := flow.Sample(sampleSize, ids...) + sample, err := flow.Sample(sampleSize, ids...) + require.NoError(t, err) require.Empty(t, sample) }) } @@ -131,7 +134,8 @@ func TestCIDConversion(t *testing.T) { // generate random CID data := make([]byte, 4) - rand.Read(data) + _, err = rand.Read(data) + require.NoError(t, err) cid = blocks.NewBlock(data).Cid() id, err = flow.CidToId(cid) @@ -151,3 +155,38 @@ func TestByteConversionRoundTrip(t *testing.T) { require.Equal(t, len(ids), len(converted)) require.ElementsMatch(t, ids, converted) } + +func TestHexStringToIdentifier(t *testing.T) { + type testcase struct { + hex string + expectError string + } + + cases := []testcase{{ + // non-hex characters + hex: "123456789012345678901234567890123456789012345678901234567890123z", + expectError: "encoding/hex: invalid byte: U+007A 'z'", + }, { + // too short + hex: "1234", + expectError: "malformed input, expected 64 characters, got 4", + }, { + // too long + hex: "123456789012345678901234567890123456789012345678901234567890123456", + expectError: "malformed input, expected 64 characters, got 66", + }, { + // just right + hex: "1234567890123456789012345678901234567890123456789012345678901234", + expectError: "", + }} + + for _, tcase := range cases { + id, err := flow.HexStringToIdentifier(tcase.hex) + if tcase.expectError != "" { + assert.Equal(t, tcase.expectError, err.Error()) + } else { + assert.NoError(t, err) + assert.Equal(t, tcase.hex, id.String()) + } + } +} diff --git a/model/flow/identity.go b/model/flow/identity.go index f05188988e6..eb248ac97f6 100644 --- a/model/flow/identity.go +++ b/model/flow/identity.go @@ -1,34 +1,22 @@ package flow import ( - "bytes" "encoding/json" "fmt" "io" - "math" - "math/rand" - "regexp" - "strconv" - - "golang.org/x/exp/slices" "github.com/ethereum/go-ethereum/rlp" "github.com/fxamacker/cbor/v2" - "github.com/pkg/errors" - "github.com/vmihailenco/msgpack" - - "github.com/onflow/flow-go/crypto" + "github.com/onflow/crypto" + "github.com/vmihailenco/msgpack/v4" ) // DefaultInitialWeight is the default initial weight for a node identity. // It is equal to the default initial weight in the FlowIDTableStaking smart contract. const DefaultInitialWeight = 100 -// rxid is the regex for parsing node identity entries. -var rxid = regexp.MustCompile(`^(collection|consensus|execution|verification|access)-([0-9a-fA-F]{64})@([\w\d]+|[\w\d][\w\d\-]*[\w\d](?:\.*[\w\d][\w\d\-]*[\w\d])*|[\w\d][\w\d\-]*[\w\d])(:[\d]+)?=(\d{1,20})$`) - -// Identity represents the public identity of one network participant (node). -type Identity struct { +// IdentitySkeleton represents the static part of a network participant's (i.e. node's) public identity. +type IdentitySkeleton struct { // NodeID uniquely identifies a particular node. A node's ID is fixed for // the duration of that node's participation in the network. NodeID Identifier @@ -37,111 +25,182 @@ type Identity struct { // Role is the node's role in the network and defines its abilities and // responsibilities. Role Role - // Weight represents the node's authority to perform certain tasks relative - // to other nodes. For example, in the consensus committee, the node's weight - // represents the weight assigned to its votes. - // - // A node's weight is distinct from its stake. Stake represents the quantity - // of FLOW tokens held by the network in escrow during the course of the node's - // participation in the network. The stake is strictly managed by the service - // account smart contracts. - // - // Nodes which are registered to join at the next epoch will appear in the - // identity table but are considered to have zero weight up until their first - // epoch begins. Likewise nodes which were registered in the previous epoch - // but have left at the most recent epoch boundary will appear in the identity - // table with zero weight. - Weight uint64 - // Ejected represents whether a node has been permanently removed from the - // network. A node may be ejected for either: - // * committing one protocol felony - // * committing a series of protocol misdemeanours - Ejected bool + // InitialWeight is a 'trust score' initially assigned by EpochSetup event after + // the staking phase. The initial weights define the supermajority thresholds for + // the cluster and security node consensus throughout the Epoch. + InitialWeight uint64 StakingPubKey crypto.PublicKey NetworkPubKey crypto.PublicKey } -// ParseIdentity parses a string representation of an identity. -func ParseIdentity(identity string) (*Identity, error) { - - // use the regex to match the four parts of an identity - matches := rxid.FindStringSubmatch(identity) - if len(matches) != 6 { - return nil, errors.New("invalid identity string format") - } +// EpochParticipationStatus represents the status of a node's participation. Depending on what +// changes were applied to the protocol state, a node may be in one of four states: +// / - joining - the node is not active in the current epoch and will be active in the next epoch. +// / - active - the node was included in the EpochSetup event for the current epoch and is actively participating in the current epoch. +// / - leaving - the node was active in the previous epoch but is not active in the current epoch. +// / - ejected - the node has been permanently removed from the network. +// +// / EpochSetup +// / ┌────────────⬤ unregistered ◯◄───────────┐ +// / ┌─────▼─────┐ ┌───────────┐ ┌─────┴─────┐ +// / │ JOINING ├───────►│ ACTIVE ├───────►│ LEAVING │ +// / └─────┬─────┘ └─────┬─────┘ └─────┬─────┘ +// / │ ┌─────▼─────┐ │ +// / └─────────────►│ EJECTED │◄─────────────┘ +// / └───────────┘ +// +// Only active nodes are allowed to perform certain tasks relative to other nodes. +// Nodes which are registered to join at the next epoch will appear in the +// identity table but aren't considered active until their first +// epoch begins. Likewise, nodes which were registered in the previous epoch +// but have left at the most recent epoch boundary will appear in the identity +// table with leaving participation status. +// A node may be ejected by either: +// - requesting self-ejection to protect its stake in case the node operator suspects +// the node's keys to be compromised +// - committing a serious protocol violation or multiple smaller misdemeanours. +type EpochParticipationStatus int + +const ( + EpochParticipationStatusJoining EpochParticipationStatus = iota + EpochParticipationStatusActive + EpochParticipationStatusLeaving + EpochParticipationStatusEjected +) - // none of these will error as they are checked by the regex - var nodeID Identifier - nodeID, err := HexStringToIdentifier(matches[2]) +// String returns string representation of enum value. +func (s EpochParticipationStatus) String() string { + return [...]string{ + "EpochParticipationStatusJoining", + "EpochParticipationStatusActive", + "EpochParticipationStatusLeaving", + "EpochParticipationStatusEjected", + }[s] +} + +// ParseEpochParticipationStatus converts string representation of EpochParticipationStatus into a typed value. +// An exception will be returned if failed to convert. +func ParseEpochParticipationStatus(s string) (EpochParticipationStatus, error) { + switch s { + case EpochParticipationStatusJoining.String(): + return EpochParticipationStatusJoining, nil + case EpochParticipationStatusActive.String(): + return EpochParticipationStatusActive, nil + case EpochParticipationStatusLeaving.String(): + return EpochParticipationStatusLeaving, nil + case EpochParticipationStatusEjected.String(): + return EpochParticipationStatusEjected, nil + default: + return 0, fmt.Errorf("invalid epoch participation status") + } +} + +// EncodeRLP performs RLP encoding of custom type, it's need to be able to hash structures that include EpochParticipationStatus. +// No errors are expected during normal operations. +func (s EpochParticipationStatus) EncodeRLP(w io.Writer) error { + encodable := s.String() + err := rlp.Encode(w, encodable) if err != nil { - return nil, err + return fmt.Errorf("could not encode rlp: %w", err) } - address := matches[3] + matches[4] - role, _ := ParseRole(matches[1]) - weight, _ := strconv.ParseUint(matches[5], 10, 64) + return nil +} - // create the identity - iy := Identity{ - NodeID: nodeID, - Address: address, - Role: role, - Weight: weight, - } +// DynamicIdentity represents the dynamic part of public identity of one network participant (node). +type DynamicIdentity struct { + EpochParticipationStatus +} + +// Identity is combined from static and dynamic part and represents the full public identity of one network participant (node). +type Identity struct { + IdentitySkeleton + DynamicIdentity +} - return &iy, nil +// IsEjected returns true if the node is ejected from the network. +func (iy *DynamicIdentity) IsEjected() bool { + return iy.EpochParticipationStatus == EpochParticipationStatusEjected } // String returns a string representation of the identity. func (iy Identity) String() string { - return fmt.Sprintf("%s-%s@%s=%d", iy.Role, iy.NodeID.String(), iy.Address, iy.Weight) + return fmt.Sprintf("%s-%s@%s=%s", iy.Role, iy.NodeID.String(), iy.Address, iy.EpochParticipationStatus.String()) +} + +// String returns a string representation of the identity. +func (iy IdentitySkeleton) String() string { + return fmt.Sprintf("%s-%s@%s", iy.Role, iy.NodeID.String(), iy.Address) } -// ID returns a unique, persistent identifier for the identity. -// CAUTION: the ID may be chosen by a node operator, so long as it is unique. -func (iy Identity) ID() Identifier { +// GetNodeID returns node ID for the identity. It is needed to satisfy GenericIdentity constraint. +func (iy IdentitySkeleton) GetNodeID() Identifier { return iy.NodeID } -// Checksum returns a checksum for the identity including mutable attributes. -func (iy Identity) Checksum() Identifier { - return MakeID(iy) +// GetRole returns a node role for the identity. It is needed to satisfy GenericIdentity constraint. +func (iy IdentitySkeleton) GetRole() Role { + return iy.Role } -type encodableIdentity struct { +// GetStakingPubKey returns staking public key for the identity. It is needed to satisfy GenericIdentity constraint. +func (iy IdentitySkeleton) GetStakingPubKey() crypto.PublicKey { + return iy.StakingPubKey +} + +// GetNetworkPubKey returns network public key for the identity. It is needed to satisfy GenericIdentity constraint. +func (iy IdentitySkeleton) GetNetworkPubKey() crypto.PublicKey { + return iy.NetworkPubKey +} + +// GetInitialWeight returns initial weight for the identity. It is needed to satisfy GenericIdentity constraint. +func (iy IdentitySkeleton) GetInitialWeight() uint64 { + return iy.InitialWeight +} + +// GetSkeleton returns the skeleton part for the identity. It is needed to satisfy GenericIdentity constraint. +func (iy IdentitySkeleton) GetSkeleton() IdentitySkeleton { + return iy +} + +type encodableIdentitySkeleton struct { NodeID Identifier Address string `json:",omitempty"` Role Role - Weight uint64 + InitialWeight uint64 StakingPubKey []byte NetworkPubKey []byte } -// decodableIdentity provides backward-compatible decoding of old models -// which use the Stake field in place of Weight. -type decodableIdentity struct { - encodableIdentity - // Stake previously was used in place of the Weight field. - // Deprecated: supported in decoding for backward-compatibility - Stake uint64 +type encodableIdentity struct { + EncodableIdentitySkeleton encodableIdentitySkeleton + ParticipationStatus string } -func encodableFromIdentity(iy Identity) (encodableIdentity, error) { - ie := encodableIdentity{iy.NodeID, iy.Address, iy.Role, iy.Weight, nil, nil} +func encodableSkeletonFromIdentity(iy IdentitySkeleton) encodableIdentitySkeleton { + ie := encodableIdentitySkeleton{ + NodeID: iy.NodeID, + Address: iy.Address, + Role: iy.Role, + InitialWeight: iy.InitialWeight, + } if iy.StakingPubKey != nil { ie.StakingPubKey = iy.StakingPubKey.Encode() } if iy.NetworkPubKey != nil { ie.NetworkPubKey = iy.NetworkPubKey.Encode() } - return ie, nil + return ie } -func (iy Identity) MarshalJSON() ([]byte, error) { - encodable, err := encodableFromIdentity(iy) - if err != nil { - return nil, fmt.Errorf("could not convert identity to encodable: %w", err) +func encodableFromIdentity(iy Identity) encodableIdentity { + return encodableIdentity{ + EncodableIdentitySkeleton: encodableSkeletonFromIdentity(iy.IdentitySkeleton), + ParticipationStatus: iy.EpochParticipationStatus.String(), } +} +func (iy IdentitySkeleton) MarshalJSON() ([]byte, error) { + encodable := encodableSkeletonFromIdentity(iy) data, err := json.Marshal(encodable) if err != nil { return nil, fmt.Errorf("could not encode json: %w", err) @@ -149,11 +208,44 @@ func (iy Identity) MarshalJSON() ([]byte, error) { return data, nil } -func (iy Identity) MarshalCBOR() ([]byte, error) { - encodable, err := encodableFromIdentity(iy) +func (iy IdentitySkeleton) MarshalCBOR() ([]byte, error) { + encodable := encodableSkeletonFromIdentity(iy) + data, err := cbor.Marshal(encodable) if err != nil { - return nil, fmt.Errorf("could not convert identity to encodable: %w", err) + return nil, fmt.Errorf("could not encode cbor: %w", err) + } + return data, nil +} + +func (iy IdentitySkeleton) MarshalMsgpack() ([]byte, error) { + encodable := encodableSkeletonFromIdentity(iy) + data, err := msgpack.Marshal(encodable) + if err != nil { + return nil, fmt.Errorf("could not encode msgpack: %w", err) + } + return data, nil +} + +func (iy IdentitySkeleton) EncodeRLP(w io.Writer) error { + encodable := encodableSkeletonFromIdentity(iy) + err := rlp.Encode(w, encodable) + if err != nil { + return fmt.Errorf("could not encode rlp: %w", err) } + return nil +} + +func (iy Identity) MarshalJSON() ([]byte, error) { + encodable := encodableFromIdentity(iy) + data, err := json.Marshal(encodable) + if err != nil { + return nil, fmt.Errorf("could not encode json: %w", err) + } + return data, nil +} + +func (iy Identity) MarshalCBOR() ([]byte, error) { + encodable := encodableFromIdentity(iy) data, err := cbor.Marshal(encodable) if err != nil { return nil, fmt.Errorf("could not encode cbor: %w", err) @@ -162,10 +254,7 @@ func (iy Identity) MarshalCBOR() ([]byte, error) { } func (iy Identity) MarshalMsgpack() ([]byte, error) { - encodable, err := encodableFromIdentity(iy) - if err != nil { - return nil, fmt.Errorf("could not convert to encodable: %w", err) - } + encodable := encodableFromIdentity(iy) data, err := msgpack.Marshal(encodable) if err != nil { return nil, fmt.Errorf("could not encode msgpack: %w", err) @@ -174,22 +263,19 @@ func (iy Identity) MarshalMsgpack() ([]byte, error) { } func (iy Identity) EncodeRLP(w io.Writer) error { - encodable, err := encodableFromIdentity(iy) - if err != nil { - return fmt.Errorf("could not convert to encodable: %w", err) - } - err = rlp.Encode(w, encodable) + encodable := encodableFromIdentity(iy) + err := rlp.Encode(w, encodable) if err != nil { return fmt.Errorf("could not encode rlp: %w", err) } return nil } -func identityFromEncodable(ie encodableIdentity, identity *Identity) error { +func identitySkeletonFromEncodable(ie encodableIdentitySkeleton, identity *IdentitySkeleton) error { identity.NodeID = ie.NodeID identity.Address = ie.Address identity.Role = ie.Role - identity.Weight = ie.Weight + identity.InitialWeight = ie.InitialWeight var err error if ie.StakingPubKey != nil { if identity.StakingPubKey, err = crypto.DecodePublicKey(crypto.BLSBLS12381, ie.StakingPubKey); err != nil { @@ -204,20 +290,65 @@ func identityFromEncodable(ie encodableIdentity, identity *Identity) error { return nil } -func (iy *Identity) UnmarshalJSON(b []byte) error { - var decodable decodableIdentity +func identityFromEncodable(ie encodableIdentity, identity *Identity) error { + err := identitySkeletonFromEncodable(ie.EncodableIdentitySkeleton, &identity.IdentitySkeleton) + if err != nil { + return fmt.Errorf("could not decode identity skeleton: %w", err) + } + participationStatus, err := ParseEpochParticipationStatus(ie.ParticipationStatus) + if err != nil { + return fmt.Errorf("could not decode epoch participation status: %w", err) + } + identity.EpochParticipationStatus = participationStatus + return nil +} + +func (iy *IdentitySkeleton) UnmarshalJSON(b []byte) error { + var decodable encodableIdentitySkeleton err := json.Unmarshal(b, &decodable) if err != nil { return fmt.Errorf("could not decode json: %w", err) } - // compat: translate Stake fields to Weight - if decodable.Stake != 0 { - if decodable.Weight != 0 { - return fmt.Errorf("invalid identity with both Stake and Weight fields") - } - decodable.Weight = decodable.Stake + err = identitySkeletonFromEncodable(decodable, iy) + if err != nil { + return fmt.Errorf("could not convert from encodable json: %w", err) + } + return nil +} + +func (iy *IdentitySkeleton) UnmarshalCBOR(b []byte) error { + var encodable encodableIdentitySkeleton + err := cbor.Unmarshal(b, &encodable) + if err != nil { + return fmt.Errorf("could not decode json: %w", err) + } + err = identitySkeletonFromEncodable(encodable, iy) + if err != nil { + return fmt.Errorf("could not convert from encodable cbor: %w", err) + } + return nil +} + +func (iy *IdentitySkeleton) UnmarshalMsgpack(b []byte) error { + var encodable encodableIdentitySkeleton + err := msgpack.Unmarshal(b, &encodable) + if err != nil { + return fmt.Errorf("could not decode json: %w", err) + } + err = identitySkeletonFromEncodable(encodable, iy) + if err != nil { + return fmt.Errorf("could not convert from encodable msgpack: %w", err) } - err = identityFromEncodable(decodable.encodableIdentity, iy) + return nil +} + +func (iy *Identity) UnmarshalJSON(b []byte) error { + var decodable encodableIdentity + err := json.Unmarshal(b, &decodable) + if err != nil { + return fmt.Errorf("could not decode json: %w", err) + } + err = identityFromEncodable(decodable, iy) if err != nil { return fmt.Errorf("could not convert from encodable json: %w", err) } @@ -250,7 +381,7 @@ func (iy *Identity) UnmarshalMsgpack(b []byte) error { return nil } -func (iy *Identity) EqualTo(other *Identity) bool { +func (iy *IdentitySkeleton) EqualTo(other *IdentitySkeleton) bool { if iy.NodeID != other.NodeID { return false } @@ -260,10 +391,7 @@ func (iy *Identity) EqualTo(other *Identity) bool { if iy.Role != other.Role { return false } - if iy.Weight != other.Weight { - return false - } - if iy.Ejected != other.Ejected { + if iy.InitialWeight != other.InitialWeight { return false } if (iy.StakingPubKey != nil && other.StakingPubKey == nil) || @@ -285,298 +413,16 @@ func (iy *Identity) EqualTo(other *Identity) bool { return true } -// IdentityFilter is a filter on identities. -type IdentityFilter func(*Identity) bool - -// IdentityOrder is a sort for identities. -type IdentityOrder func(*Identity, *Identity) bool - -// IdentityMapFunc is a modifier function for map operations for identities. -// Identities are COPIED from the source slice. -type IdentityMapFunc func(Identity) Identity - -// IdentityList is a list of nodes. -type IdentityList []*Identity - -// Filter will apply a filter to the identity list. -func (il IdentityList) Filter(filter IdentityFilter) IdentityList { - var dup IdentityList -IDLoop: - for _, identity := range il { - if !filter(identity) { - continue IDLoop - } - dup = append(dup, identity) - } - return dup -} - -// Map returns a new identity list with the map function f applied to a copy of -// each identity. -// -// CAUTION: this relies on structure copy semantics. Map functions that modify -// an object referenced by the input Identity structure will modify identities -// in the source slice as well. -func (il IdentityList) Map(f IdentityMapFunc) IdentityList { - dup := make(IdentityList, 0, len(il)) - for _, identity := range il { - next := f(*identity) - dup = append(dup, &next) - } - return dup -} - -// Copy returns a copy of the receiver. The resulting slice uses a different -// backing array, meaning appends and insert operations on either slice are -// guaranteed to only affect that slice. -// -// Copy should be used when modifying an existing identity list by either -// appending new elements, re-ordering, or inserting new elements in an -// existing index. -func (il IdentityList) Copy() IdentityList { - dup := make(IdentityList, 0, len(il)) - - lenList := len(il) - - // performance tests show this is faster than 'range' - for i := 0; i < lenList; i++ { - // copy the object - next := *(il[i]) - dup = append(dup, &next) - } - return dup -} - -// Selector returns an identity filter function that selects only identities -// within this identity list. -func (il IdentityList) Selector() IdentityFilter { - - lookup := il.Lookup() - return func(identity *Identity) bool { - _, exists := lookup[identity.NodeID] - return exists - } -} - -func (il IdentityList) Lookup() map[Identifier]*Identity { - lookup := make(map[Identifier]*Identity, len(il)) - for _, identity := range il { - lookup[identity.NodeID] = identity - } - return lookup -} - -// Sort will sort the list using the given ordering. This is -// not recommended for performance. Expand the 'less' function -// in place for best performance, and don't use this function. -func (il IdentityList) Sort(less IdentityOrder) IdentityList { - dup := il.Copy() - slices.SortFunc(dup, less) - return dup -} - -// Sorted returns whether the list is sorted by the input ordering. -func (il IdentityList) Sorted(less IdentityOrder) bool { - return slices.IsSortedFunc(il, less) +func (iy *DynamicIdentity) EqualTo(other *DynamicIdentity) bool { + return iy.EpochParticipationStatus == other.EpochParticipationStatus } -// NodeIDs returns the NodeIDs of the nodes in the list. -func (il IdentityList) NodeIDs() IdentifierList { - nodeIDs := make([]Identifier, 0, len(il)) - for _, id := range il { - nodeIDs = append(nodeIDs, id.NodeID) - } - return nodeIDs -} - -// PublicStakingKeys returns a list with the public staking keys (order preserving). -func (il IdentityList) PublicStakingKeys() []crypto.PublicKey { - pks := make([]crypto.PublicKey, 0, len(il)) - for _, id := range il { - pks = append(pks, id.StakingPubKey) - } - return pks -} - -// ID uniquely identifies a list of identities, by node ID. This can be used -// to perpetually identify a group of nodes, even if mutable fields of some nodes -// are changed, as node IDs are immutable. -// CAUTION: -// - An IdentityList's ID is a cryptographic commitment to only node IDs. A node operator -// can freely choose the ID for their node. There is no relationship whatsoever between -// a node's ID and keys. -// - To generate a cryptographic commitment for the full IdentityList, use method `Checksum()`. -// - The outputs of `IdentityList.ID()` and `IdentityList.Checksum()` are both order-sensitive. -// Therefore, the `IdentityList` must be in canonical order, unless explicitly specified -// otherwise by the protocol. -func (il IdentityList) ID() Identifier { - return il.NodeIDs().ID() -} - -// Checksum generates a cryptographic commitment to the full IdentityList, including mutable fields. -// The checksum for the same group of identities (by NodeID) may change from block to block. -func (il IdentityList) Checksum() Identifier { - return MakeID(il) -} - -// TotalWeight returns the total weight of all given identities. -func (il IdentityList) TotalWeight() uint64 { - var total uint64 - for _, identity := range il { - total += identity.Weight - } - return total -} - -// Count returns the count of identities. -func (il IdentityList) Count() uint { - return uint(len(il)) -} - -// ByIndex returns the node at the given index. -func (il IdentityList) ByIndex(index uint) (*Identity, bool) { - if index >= uint(len(il)) { - return nil, false - } - return il[int(index)], true -} - -// ByNodeID gets a node from the list by node ID. -func (il IdentityList) ByNodeID(nodeID Identifier) (*Identity, bool) { - for _, identity := range il { - if identity.NodeID == nodeID { - return identity, true - } - } - return nil, false -} - -// ByNetworkingKey gets a node from the list by network public key. -func (il IdentityList) ByNetworkingKey(key crypto.PublicKey) (*Identity, bool) { - for _, identity := range il { - if identity.NetworkPubKey.Equals(key) { - return identity, true - } - } - return nil, false -} - -// Sample returns simple random sample from the `IdentityList` -func (il IdentityList) Sample(size uint) IdentityList { - return il.sample(size, rand.Intn) -} - -// DeterministicSample returns deterministic random sample from the `IdentityList` using the given seed -func (il IdentityList) DeterministicSample(size uint, seed int64) IdentityList { - rng := rand.New(rand.NewSource(seed)) - return il.sample(size, rng.Intn) -} - -func (il IdentityList) sample(size uint, intn func(int) int) IdentityList { - n := uint(len(il)) - if size > n { - size = n - } - - dup := il.Copy() - for i := uint(0); i < size; i++ { - j := uint(intn(int(n - i))) - dup[i], dup[j+i] = dup[j+i], dup[i] - } - return dup[:size] -} - -// DeterministicShuffle randomly and deterministically shuffles the identity -// list, returning the shuffled list without modifying the receiver. -func (il IdentityList) DeterministicShuffle(seed int64) IdentityList { - dup := il.Copy() - rng := rand.New(rand.NewSource(seed)) - rng.Shuffle(len(il), func(i, j int) { - dup[i], dup[j] = dup[j], dup[i] - }) - return dup -} - -// SamplePct returns a random sample from the receiver identity list. The -// sample contains `pct` percentage of the list. The sample is rounded up -// if `pct>0`, so this will always select at least one identity. -// -// NOTE: The input must be between 0-1. -func (il IdentityList) SamplePct(pct float64) IdentityList { - if pct <= 0 { - return IdentityList{} - } - - count := float64(il.Count()) * pct - size := uint(math.Round(count)) - // ensure we always select at least 1, for non-zero input - if size == 0 { - size = 1 - } - - return il.Sample(size) -} - -// Union returns a new identity list containing every identity that occurs in -// either `il`, or `other`, or both. There are no duplicates in the output, -// where duplicates are identities with the same node ID. -// The returned IdentityList is sorted -func (il IdentityList) Union(other IdentityList) IdentityList { - maxLen := len(il) + len(other) - - union := make(IdentityList, 0, maxLen) - set := make(map[Identifier]struct{}, maxLen) - - for _, list := range []IdentityList{il, other} { - for _, id := range list { - if _, isDuplicate := set[id.NodeID]; !isDuplicate { - set[id.NodeID] = struct{}{} - union = append(union, id) - } - } +func (iy *Identity) EqualTo(other *Identity) bool { + if !iy.IdentitySkeleton.EqualTo(&other.IdentitySkeleton) { + return false } - - slices.SortFunc(union, func(a, b *Identity) bool { - return bytes.Compare(a.NodeID[:], b.NodeID[:]) < 0 - }) - - return union -} - -// EqualTo checks if the other list if the same, that it contains the same elements -// in the same order -func (il IdentityList) EqualTo(other IdentityList) bool { - return slices.EqualFunc(il, other, func(a, b *Identity) bool { - return a.EqualTo(b) - }) -} - -// Exists takes a previously sorted Identity list and searches it for the target value -// This code is optimized, so the coding style will be different -// target: value to search for -// CAUTION: The identity list MUST be sorted prior to calling this method -func (il IdentityList) Exists(target *Identity) bool { - return il.IdentifierExists(target.NodeID) -} - -// IdentifierExists takes a previously sorted Identity list and searches it for the target value -// target: value to search for -// CAUTION: The identity list MUST be sorted prior to calling this method -func (il IdentityList) IdentifierExists(target Identifier) bool { - _, ok := slices.BinarySearchFunc(il, &Identity{NodeID: target}, func(a, b *Identity) int { - return bytes.Compare(a.NodeID[:], b.NodeID[:]) - }) - return ok -} - -// GetIndex returns the index of the identifier in the IdentityList and true -// if the identifier is found. -func (il IdentityList) GetIndex(target Identifier) (uint, bool) { - i := slices.IndexFunc(il, func(a *Identity) bool { - return a.NodeID == target - }) - if i == -1 { - return 0, false + if !iy.DynamicIdentity.EqualTo(&other.DynamicIdentity) { + return false } - return uint(i), true + return true } diff --git a/model/flow/identity_list.go b/model/flow/identity_list.go new file mode 100644 index 00000000000..efe042f2f53 --- /dev/null +++ b/model/flow/identity_list.go @@ -0,0 +1,352 @@ +package flow + +import ( + "bytes" + "fmt" + "math" + + "github.com/onflow/crypto" + "golang.org/x/exp/slices" + + "github.com/onflow/flow-go/utils/rand" +) + +// Notes on runtime EFFICIENCY of GENERIC TYPES: +// DO NOT pass an interface to a generic function (100x runtime cost as of go 1.20). +// For example, consider the function +// +// func f[T GenericIdentity]() +// +// The call `f(identity)` is completely ok and doesn't introduce overhead when `identity` is a struct type, +// such as `var identity *flow.Identity`. +// In contrast `f(identity)` where identity is declared as an interface `var identity GenericIdentity` is drastically slower, +// since golang involves a global hash table lookup for every method call to dispatch the underlying type behind the interface. + +// GenericIdentity defines a constraint for generic identities. +// Golang doesn't support constraint with fields(for time being) so we have to define this interface +// with getter methods. +// Details here: https://github.com/golang/go/issues/51259. +type GenericIdentity interface { + Identity | IdentitySkeleton + GetNodeID() Identifier + GetRole() Role + GetStakingPubKey() crypto.PublicKey + GetNetworkPubKey() crypto.PublicKey + GetInitialWeight() uint64 + GetSkeleton() IdentitySkeleton +} + +// IdentityFilter is a filter on identities. Mathematically, an IdentityFilter F +// can be described as a function F: 𝓘 → 𝐼, where 𝓘 denotes the set of all identities +// and 𝐼 ⊆ 𝓘. For an input identity i, F(i) returns true if and only if i passed the +// filter, i.e. i ∈ 𝐼. Returning false means that some necessary criterion was violated +// and identity i should be dropped, i.e. i ∉ 𝐼. +type IdentityFilter[T GenericIdentity] func(*T) bool + +// IdentityOrder is an order function for identities. +// +// It defines a strict weak ordering between identities. +// It returns a negative number if the first identity is "strictly less" than the second, +// a positive number if the second identity is "strictly less" than the first, +// and zero if the two identities are equal. +// +// `IdentityOrder` can be used to sort identities with +// https://pkg.go.dev/golang.org/x/exp/slices#SortFunc. +type IdentityOrder[T GenericIdentity] func(*T, *T) int + +// IdentityMapFunc is a modifier function for map operations for identities. +// Identities are COPIED from the source slice. +type IdentityMapFunc[T GenericIdentity] func(T) T + +// IdentitySkeletonList is a list of nodes skeletons. We use a type alias instead of defining a new type +// since go generics doesn't support implicit conversion between types. +type IdentitySkeletonList = GenericIdentityList[IdentitySkeleton] + +// IdentityList is a list of nodes. We use a type alias instead of defining a new type +// since go generics doesn't support implicit conversion between types. +type IdentityList = GenericIdentityList[Identity] + +type GenericIdentityList[T GenericIdentity] []*T + +// Filter will apply a filter to the identity list. +// The resulting list will only contain entries that match the filtering criteria. +func (il GenericIdentityList[T]) Filter(filter IdentityFilter[T]) GenericIdentityList[T] { + var dup GenericIdentityList[T] + for _, identity := range il { + if filter(identity) { + dup = append(dup, identity) + } + } + return dup +} + +// Map returns a new identity list with the map function f applied to a copy of +// each identity. +// +// CAUTION: this relies on structure copy semantics. Map functions that modify +// an object referenced by the input Identity structure will modify identities +// in the source slice as well. +func (il GenericIdentityList[T]) Map(f IdentityMapFunc[T]) GenericIdentityList[T] { + dup := make(GenericIdentityList[T], 0, len(il)) + for _, identity := range il { + next := f(*identity) + dup = append(dup, &next) + } + return dup +} + +// Copy returns a copy of IdentityList. The resulting slice uses a different +// backing array, meaning appends and insert operations on either slice are +// guaranteed to only affect that slice. +// +// Copy should be used when modifying an existing identity list by either +// appending new elements, re-ordering, or inserting new elements in an +// existing index. +// +// CAUTION: +// All Identity fields are deep-copied, _except_ for their keys, which +// are copied by reference as they are treated as immutable by convention. +func (il GenericIdentityList[T]) Copy() GenericIdentityList[T] { + dup := make(GenericIdentityList[T], 0, len(il)) + lenList := len(il) + for i := 0; i < lenList; i++ { // performance tests show this is faster than 'range' + next := *(il[i]) // copy the object + dup = append(dup, &next) + } + return dup +} + +// Selector returns an identity filter function that selects only identities +// within this identity list. +func (il GenericIdentityList[T]) Selector() IdentityFilter[T] { + lookup := il.Lookup() + return func(identity *T) bool { + _, exists := lookup[(*identity).GetNodeID()] + return exists + } +} + +// Lookup converts the identity slice to a map using the NodeIDs as keys. This +// is useful when _repeatedly_ querying identities by their NodeIDs. The +// conversation from slice to map incurs cost O(n), for `n` the slice length. +// For a _single_ lookup, use method `ByNodeID(Identifier)` (avoiding conversion). +func (il GenericIdentityList[T]) Lookup() map[Identifier]*T { + lookup := make(map[Identifier]*T, len(il)) + for _, identity := range il { + lookup[(*identity).GetNodeID()] = identity + } + return lookup +} + +// Sort will sort the list using the given ordering. This is +// not recommended for performance. Expand the 'less' function +// in place for best performance, and don't use this function. +func (il GenericIdentityList[T]) Sort(less IdentityOrder[T]) GenericIdentityList[T] { + dup := il.Copy() + slices.SortFunc(dup, less) + return dup +} + +// Sorted returns whether the list is sorted by the input ordering. +func (il GenericIdentityList[T]) Sorted(less IdentityOrder[T]) bool { + return slices.IsSortedFunc(il, less) +} + +// NodeIDs returns the NodeIDs of the nodes in the list (order preserving). +func (il GenericIdentityList[T]) NodeIDs() IdentifierList { + nodeIDs := make([]Identifier, 0, len(il)) + for _, id := range il { + nodeIDs = append(nodeIDs, (*id).GetNodeID()) + } + return nodeIDs +} + +// PublicStakingKeys returns a list with the public staking keys (order preserving). +func (il GenericIdentityList[T]) PublicStakingKeys() []crypto.PublicKey { + pks := make([]crypto.PublicKey, 0, len(il)) + for _, id := range il { + pks = append(pks, (*id).GetStakingPubKey()) + } + return pks +} + +// ID generates a cryptographic commitment to the full IdentityList, including mutable fields. +// CAUTION: +// - The outputs of `IdentityList.ID()` are order-sensitive. +// Therefore, the `IdentityList` must be in canonical order, unless explicitly specified +// otherwise by the protocol. +func (il GenericIdentityList[T]) ID() Identifier { + return MakeID(il) +} + +// TotalWeight returns the total weight of all given identities. +func (il GenericIdentityList[T]) TotalWeight() uint64 { + var total uint64 + for _, identity := range il { + total += (*identity).GetInitialWeight() + } + return total +} + +// Count returns the count of identities. +func (il GenericIdentityList[T]) Count() uint { + return uint(len(il)) +} + +// ByIndex returns the node at the given index. +func (il GenericIdentityList[T]) ByIndex(index uint) (*T, bool) { + if index >= uint(len(il)) { + return nil, false + } + return il[int(index)], true +} + +// ByNodeID gets a node from the list by node ID. +func (il GenericIdentityList[T]) ByNodeID(nodeID Identifier) (*T, bool) { + for _, identity := range il { + if (*identity).GetNodeID() == nodeID { + return identity, true + } + } + return nil, false +} + +// ByNetworkingKey gets a node from the list by network public key. +func (il GenericIdentityList[T]) ByNetworkingKey(key crypto.PublicKey) (*T, bool) { + for _, identity := range il { + if (*identity).GetNetworkPubKey().Equals(key) { + return identity, true + } + } + return nil, false +} + +// Sample returns non-deterministic random sample from the `IdentityList` +func (il GenericIdentityList[T]) Sample(size uint) (GenericIdentityList[T], error) { + n := uint(len(il)) + dup := make(GenericIdentityList[T], 0, n) + dup = append(dup, il...) + if n < size { + size = n + } + swap := func(i, j uint) { + dup[i], dup[j] = dup[j], dup[i] + } + err := rand.Samples(n, size, swap) + if err != nil { + return nil, fmt.Errorf("failed to sample identity list: %w", err) + } + return dup[:size], nil +} + +// Shuffle randomly shuffles the identity list (non-deterministic), +// and returns the shuffled list without modifying the receiver. +func (il GenericIdentityList[T]) Shuffle() (GenericIdentityList[T], error) { + return il.Sample(uint(len(il))) +} + +// SamplePct returns a random sample from the receiver identity list. The +// sample contains `pct` percentage of the list. The sample is rounded up +// if `pct>0`, so this will always select at least one identity. +// +// NOTE: The input must be between in the interval [0, 1.0] +func (il GenericIdentityList[T]) SamplePct(pct float64) (GenericIdentityList[T], error) { + if pct <= 0 { + return GenericIdentityList[T]{}, nil + } + + count := float64(il.Count()) * pct + size := uint(math.Round(count)) + // ensure we always select at least 1, for non-zero input + if size == 0 { + size = 1 + } + + return il.Sample(size) +} + +// Union returns a new identity list containing every identity that occurs in +// either `il`, or `other`, or both. There are no duplicates in the output, +// where duplicates are identities with the same node ID. In case an entry +// with the same NodeID exists in the receiver `il` as well as in `other`, +// the identity from `il` is included in the output. +// Receiver `il` and/or method input `other` can be nil or empty. +// The returned IdentityList is sorted in canonical order. +func (il GenericIdentityList[T]) Union(other GenericIdentityList[T]) GenericIdentityList[T] { + maxLen := len(il) + len(other) + + union := make(GenericIdentityList[T], 0, maxLen) + set := make(map[Identifier]struct{}, maxLen) + + for _, list := range []GenericIdentityList[T]{il, other} { + for _, id := range list { + if _, isDuplicate := set[(*id).GetNodeID()]; !isDuplicate { + set[(*id).GetNodeID()] = struct{}{} + union = append(union, id) + } + } + } + + slices.SortFunc(union, Canonical[T]) + return union +} + +// IdentityListEqualTo checks if the other list if the same, that it contains the same elements +// in the same order. +// NOTE: currently a generic comparison is not possible, so we have to use a specific function. +func IdentityListEqualTo(lhs, rhs IdentityList) bool { + return slices.EqualFunc(lhs, rhs, func(a, b *Identity) bool { + return a.EqualTo(b) + }) +} + +// IdentitySkeletonListEqualTo checks if the other list if the same, that it contains the same elements +// in the same order. +// NOTE: currently a generic comparison is not possible, so we have to use a specific function. +func IdentitySkeletonListEqualTo(lhs, rhs IdentitySkeletonList) bool { + return slices.EqualFunc(lhs, rhs, func(a, b *IdentitySkeleton) bool { + return a.EqualTo(b) + }) +} + +// Exists takes a previously sorted Identity list and searches it for the target +// identity by its NodeID. +// CAUTION: +// - Other identity fields are not compared. +// - The identity list MUST be sorted prior to calling this method. +func (il GenericIdentityList[T]) Exists(target *T) bool { + return il.IdentifierExists((*target).GetNodeID()) +} + +// IdentifierExists takes a previously sorted Identity list and searches it for the target value +// target: value to search for +// CAUTION: The identity list MUST be sorted prior to calling this method +func (il GenericIdentityList[T]) IdentifierExists(target Identifier) bool { + _, ok := slices.BinarySearchFunc(il, target, func(a *T, b Identifier) int { + lhs := (*a).GetNodeID() + return bytes.Compare(lhs[:], b[:]) + }) + return ok +} + +// GetIndex returns the index of the identifier in the IdentityList and true +// if the identifier is found. +func (il GenericIdentityList[T]) GetIndex(target Identifier) (uint, bool) { + i := slices.IndexFunc(il, func(a *T) bool { + return (*a).GetNodeID() == target + }) + if i == -1 { + return 0, false + } + return uint(i), true +} + +// ToSkeleton converts the identity list to a list of identity skeletons. +func (il GenericIdentityList[T]) ToSkeleton() IdentitySkeletonList { + skeletons := make(IdentitySkeletonList, len(il)) + for i, id := range il { + v := (*id).GetSkeleton() + skeletons[i] = &v + } + return skeletons +} diff --git a/model/flow/identity_order.go b/model/flow/identity_order.go new file mode 100644 index 00000000000..d46653d1dbf --- /dev/null +++ b/model/flow/identity_order.go @@ -0,0 +1,60 @@ +package flow + +// Canonical is a function that defines a weak strict ordering "<" for identities. +// It returns: +// - a strict negative number if id1 < id2 +// - a strict positive number if id2 < id1 +// - zero if id1 and id2 are equal +// +// By definition, two identities (id1, id2) are in canonical order if id1's NodeID is lexicographically +// _strictly_ smaller than id2's NodeID. The strictness is important, meaning that identities +// with equal NodeIDs do not satisfy canonical ordering (order is irreflexive). +// Hence, only a returned strictly negative value means the pair is in canonical order. +// Use `IsCanonical` for canonical order checks. +// +// The current function is based on the identifiers bytes lexicographic comparison. +func Canonical[T GenericIdentity](identity1 *T, identity2 *T) int { + return IdentifierCanonical((*identity1).GetNodeID(), (*identity2).GetNodeID()) +} + +// IsCanonical returns true if and only if the given Identities are in canonical order. +// +// By convention, two Identities (i1, i2) are in canonical order if i1's NodeID bytes +// are lexicographically _strictly_ smaller than i2's NodeID bytes. +// +// The strictness is important, meaning that two identities with the same +// NodeID do not satisfy the canonical order. +// This also implies that the canonical order is irreflexive ((i,i) isn't in canonical order). +func IsCanonical[T GenericIdentity](i1, i2 *T) bool { + return Canonical(i1, i2) < 0 +} + +// ByReferenceOrder return a function for sorting identities based on the order +// of the given nodeIDs +func ByReferenceOrder(nodeIDs []Identifier) func(*Identity, *Identity) int { + indices := make(map[Identifier]int) + for index, nodeID := range nodeIDs { + _, ok := indices[nodeID] + if ok { + panic("should never order by reference order with duplicate node IDs") + } + indices[nodeID] = index + } + return func(identity1 *Identity, identity2 *Identity) int { + return indices[identity1.NodeID] - indices[identity2.NodeID] + } +} + +// IsIdentityListCanonical returns true if and only if the given IdentityList is +// _strictly_ sorted with regards to the canonical order. +// +// The strictness is important here, meaning that a list with 2 successive entities +// with equal NodeID isn't considered well sorted. +func IsIdentityListCanonical[T GenericIdentity](il GenericIdentityList[T]) bool { + for i := 0; i < len(il)-1; i++ { + if !IsCanonical(il[i], il[i+1]) { + return false + } + } + return true +} diff --git a/model/flow/identity_test.go b/model/flow/identity_test.go index 9c1a137d8ab..338b30a47ec 100644 --- a/model/flow/identity_test.go +++ b/model/flow/identity_test.go @@ -2,55 +2,20 @@ package flow_test import ( "encoding/json" - "math/rand" + "math/rand/v2" "strings" "testing" - "time" + "github.com/onflow/crypto" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/vmihailenco/msgpack/v4" - "github.com/onflow/flow-go/crypto" "github.com/onflow/flow-go/model/encodable" "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/model/flow/order" "github.com/onflow/flow-go/utils/unittest" ) -func TestHexStringToIdentifier(t *testing.T) { - type testcase struct { - hex string - expectError bool - } - - cases := []testcase{{ - // non-hex characters - hex: "123456789012345678901234567890123456789012345678901234567890123z", - expectError: true, - }, { - // too short - hex: "1234", - expectError: true, - }, { - // just right - hex: "1234567890123456789012345678901234567890123456789012345678901234", - expectError: false, - }} - - for _, tcase := range cases { - id, err := flow.HexStringToIdentifier(tcase.hex) - if tcase.expectError { - assert.Error(t, err) - continue - } else { - assert.NoError(t, err) - } - - assert.Equal(t, tcase.hex, id.String()) - } -} - func TestIdentityEncodingJSON(t *testing.T) { t.Run("normal identity", func(t *testing.T) { @@ -60,7 +25,7 @@ func TestIdentityEncodingJSON(t *testing.T) { var dec flow.Identity err = json.Unmarshal(enc, &dec) require.NoError(t, err) - require.Equal(t, identity, &dec) + require.True(t, identity.EqualTo(&dec)) }) t.Run("empty address should be omitted", func(t *testing.T) { @@ -73,19 +38,7 @@ func TestIdentityEncodingJSON(t *testing.T) { var dec flow.Identity err = json.Unmarshal(enc, &dec) require.NoError(t, err) - require.Equal(t, identity, &dec) - }) - - t.Run("compat: should accept old files using Stake field", func(t *testing.T) { - identity := unittest.IdentityFixture(unittest.WithRandomPublicKeys()) - enc, err := json.Marshal(identity) - require.NoError(t, err) - // emulate the old encoding by replacing the new field with old field name - enc = []byte(strings.Replace(string(enc), "Weight", "Stake", 1)) - var dec flow.Identity - err = json.Unmarshal(enc, &dec) - require.NoError(t, err) - require.Equal(t, identity, &dec) + require.True(t, identity.EqualTo(&dec)) }) } @@ -96,7 +49,7 @@ func TestIdentityEncodingMsgpack(t *testing.T) { var dec flow.Identity err = msgpack.Unmarshal(enc, &dec) require.NoError(t, err) - require.Equal(t, identity, &dec) + require.True(t, identity.EqualTo(&dec)) } func TestIdentityList_Exists(t *testing.T) { @@ -105,7 +58,7 @@ func TestIdentityList_Exists(t *testing.T) { il2 := unittest.IdentityListFixture(1) // sort the first list - il1 = il1.Sort(order.Canonical) + il1 = il1.Sort(flow.Canonical[flow.Identity]) for i := 0; i < 10; i++ { assert.True(t, il1.Exists(il1[i])) @@ -120,7 +73,7 @@ func TestIdentityList_IdentifierExists(t *testing.T) { il2 := unittest.IdentityListFixture(1) // sort the first list - il1 = il1.Sort(order.Canonical) + il1 = il1.Sort(flow.Canonical[flow.Identity]) for i := 0; i < 10; i++ { assert.True(t, il1.IdentifierExists(il1[i].NodeID)) @@ -195,31 +148,54 @@ func TestIdentityList_Union(t *testing.T) { }) } +// TestGenericIdentityList_Malleability verifies that the GenericIdentityList which implements the [flow.IDEntity] +// interface is not malleable. +func TestGenericIdentityList_Malleability(t *testing.T) { + t.Run("IdentityList", func(t *testing.T) { + identityList := unittest.IdentityListFixture(10) + unittest.RequireEntityNonMalleable(t, &identityList, unittest.WithTypeGenerator(func() flow.EpochParticipationStatus { + return flow.EpochParticipationStatus(rand.IntN(int(flow.EpochParticipationStatusEjected) + 1)) + })) + }) + + t.Run("IdentitySkeletonList", func(t *testing.T) { + identitySkeletonList := unittest.IdentityListFixture(10).ToSkeleton() + unittest.RequireEntityNonMalleable(t, &identitySkeletonList) + }) +} + func TestSample(t *testing.T) { t.Run("Sample max", func(t *testing.T) { il := unittest.IdentityListFixture(10) - require.Equal(t, uint(10), il.Sample(10).Count()) + sam, err := il.Sample(10) + require.NoError(t, err) + require.Equal(t, uint(10), sam.Count()) }) t.Run("Sample oversized", func(t *testing.T) { il := unittest.IdentityListFixture(10) - require.Equal(t, uint(10), il.Sample(11).Count()) + sam, err := il.Sample(11) + require.NoError(t, err) + require.Equal(t, uint(10), sam.Count()) }) } func TestShuffle(t *testing.T) { t.Run("should be shuffled", func(t *testing.T) { il := unittest.IdentityListFixture(15) // ~1/billion chance of shuffling to input state - shuffled := il.DeterministicShuffle(rand.Int63()) + shuffled, err := il.Shuffle() + require.NoError(t, err) assert.Equal(t, len(il), len(shuffled)) assert.ElementsMatch(t, il, shuffled) }) - t.Run("should be deterministic", func(t *testing.T) { + t.Run("should not be deterministic", func(t *testing.T) { il := unittest.IdentityListFixture(10) - seed := rand.Int63() - shuffled1 := il.DeterministicShuffle(seed) - shuffled2 := il.DeterministicShuffle(seed) - assert.Equal(t, shuffled1, shuffled2) + shuffled1, err := il.Shuffle() + require.NoError(t, err) + shuffled2, err := il.Shuffle() + require.NoError(t, err) + assert.NotEqual(t, shuffled1, shuffled2) + assert.ElementsMatch(t, shuffled1, shuffled2) }) } @@ -238,11 +214,18 @@ func TestIdentity_ID(t *testing.T) { func TestIdentity_Sort(t *testing.T) { il := unittest.IdentityListFixture(20) - random := il.DeterministicShuffle(time.Now().UnixNano()) - assert.False(t, random.Sorted(order.Canonical)) - - canonical := il.Sort(order.Canonical) - assert.True(t, canonical.Sorted(order.Canonical)) + // make sure the list is not sorted + il[0].NodeID[0], il[1].NodeID[0] = 2, 1 + require.False(t, flow.IsCanonical(il[0], il[1])) + assert.False(t, flow.IsIdentityListCanonical(il)) + + canonical := il.Sort(flow.Canonical[flow.Identity]) + assert.True(t, flow.IsIdentityListCanonical(canonical)) + + // check `IsIdentityListCanonical` detects order equality in a sorted list + il[1] = il[10] // add a duplication + canonical = il.Sort(flow.Canonical[flow.Identity]) + assert.False(t, flow.IsIdentityListCanonical(canonical)) } func TestIdentity_EqualTo(t *testing.T) { @@ -258,56 +241,56 @@ func TestIdentity_EqualTo(t *testing.T) { }) t.Run("NodeID diff", func(t *testing.T) { - a := &flow.Identity{NodeID: [32]byte{1, 2, 3}} - b := &flow.Identity{NodeID: [32]byte{2, 2, 2}} + a := &flow.Identity{IdentitySkeleton: flow.IdentitySkeleton{NodeID: [32]byte{1, 2, 3}}} + b := &flow.Identity{IdentitySkeleton: flow.IdentitySkeleton{NodeID: [32]byte{2, 2, 2}}} require.False(t, a.EqualTo(b)) require.False(t, b.EqualTo(a)) }) t.Run("Address diff", func(t *testing.T) { - a := &flow.Identity{Address: "b"} - b := &flow.Identity{Address: "c"} + a := &flow.Identity{IdentitySkeleton: flow.IdentitySkeleton{Address: "b"}} + b := &flow.Identity{IdentitySkeleton: flow.IdentitySkeleton{Address: "c"}} require.False(t, a.EqualTo(b)) require.False(t, b.EqualTo(a)) }) t.Run("Role diff", func(t *testing.T) { - a := &flow.Identity{Role: flow.RoleCollection} - b := &flow.Identity{Role: flow.RoleExecution} + a := &flow.Identity{IdentitySkeleton: flow.IdentitySkeleton{Role: flow.RoleCollection}} + b := &flow.Identity{IdentitySkeleton: flow.IdentitySkeleton{Role: flow.RoleExecution}} require.False(t, a.EqualTo(b)) require.False(t, b.EqualTo(a)) }) - t.Run("Weight diff", func(t *testing.T) { - a := &flow.Identity{Weight: 1} - b := &flow.Identity{Weight: 2} + t.Run("Initial weight diff", func(t *testing.T) { + a := &flow.Identity{IdentitySkeleton: flow.IdentitySkeleton{InitialWeight: 1}} + b := &flow.Identity{IdentitySkeleton: flow.IdentitySkeleton{InitialWeight: 2}} require.False(t, a.EqualTo(b)) require.False(t, b.EqualTo(a)) }) - t.Run("Ejected diff", func(t *testing.T) { - a := &flow.Identity{Ejected: true} - b := &flow.Identity{Ejected: false} + t.Run("status diff", func(t *testing.T) { + a := &flow.Identity{DynamicIdentity: flow.DynamicIdentity{EpochParticipationStatus: flow.EpochParticipationStatusActive}} + b := &flow.Identity{DynamicIdentity: flow.DynamicIdentity{EpochParticipationStatus: flow.EpochParticipationStatusLeaving}} require.False(t, a.EqualTo(b)) require.False(t, b.EqualTo(a)) }) t.Run("StakingPubKey diff", func(t *testing.T) { - a := &flow.Identity{StakingPubKey: pks[0]} - b := &flow.Identity{StakingPubKey: pks[1]} + a := &flow.Identity{IdentitySkeleton: flow.IdentitySkeleton{StakingPubKey: pks[0]}} + b := &flow.Identity{IdentitySkeleton: flow.IdentitySkeleton{StakingPubKey: pks[1]}} require.False(t, a.EqualTo(b)) require.False(t, b.EqualTo(a)) }) t.Run("NetworkPubKey diff", func(t *testing.T) { - a := &flow.Identity{NetworkPubKey: pks[0]} - b := &flow.Identity{NetworkPubKey: pks[1]} + a := &flow.Identity{IdentitySkeleton: flow.IdentitySkeleton{NetworkPubKey: pks[0]}} + b := &flow.Identity{IdentitySkeleton: flow.IdentitySkeleton{NetworkPubKey: pks[1]}} require.False(t, a.EqualTo(b)) require.False(t, b.EqualTo(a)) @@ -315,22 +298,30 @@ func TestIdentity_EqualTo(t *testing.T) { t.Run("Same data equals", func(t *testing.T) { a := &flow.Identity{ - NodeID: flow.Identifier{1, 2, 3}, - Address: "address", - Role: flow.RoleCollection, - Weight: 23, - Ejected: false, - StakingPubKey: pks[0], - NetworkPubKey: pks[1], + IdentitySkeleton: flow.IdentitySkeleton{ + NodeID: flow.Identifier{1, 2, 3}, + Address: "address", + Role: flow.RoleCollection, + InitialWeight: 23, + StakingPubKey: pks[0], + NetworkPubKey: pks[1], + }, + DynamicIdentity: flow.DynamicIdentity{ + EpochParticipationStatus: flow.EpochParticipationStatusActive, + }, } b := &flow.Identity{ - NodeID: flow.Identifier{1, 2, 3}, - Address: "address", - Role: flow.RoleCollection, - Weight: 23, - Ejected: false, - StakingPubKey: pks[0], - NetworkPubKey: pks[1], + IdentitySkeleton: flow.IdentitySkeleton{ + NodeID: flow.Identifier{1, 2, 3}, + Address: "address", + Role: flow.RoleCollection, + InitialWeight: 23, + StakingPubKey: pks[0], + NetworkPubKey: pks[1], + }, + DynamicIdentity: flow.DynamicIdentity{ + EpochParticipationStatus: flow.EpochParticipationStatusActive, + }, } require.True(t, a.EqualTo(b)) @@ -344,8 +335,8 @@ func TestIdentityList_EqualTo(t *testing.T) { a := flow.IdentityList{} b := flow.IdentityList{} - require.True(t, a.EqualTo(b)) - require.True(t, b.EqualTo(a)) + require.True(t, flow.IdentityListEqualTo(a, b)) + require.True(t, flow.IdentityListEqualTo(b, a)) }) t.Run("different len arent equal", func(t *testing.T) { @@ -354,8 +345,8 @@ func TestIdentityList_EqualTo(t *testing.T) { a := flow.IdentityList{identityA} b := flow.IdentityList{} - require.False(t, a.EqualTo(b)) - require.False(t, b.EqualTo(a)) + require.False(t, flow.IdentityListEqualTo(a, b)) + require.False(t, flow.IdentityListEqualTo(b, a)) }) t.Run("different data means not equal", func(t *testing.T) { @@ -365,8 +356,8 @@ func TestIdentityList_EqualTo(t *testing.T) { a := flow.IdentityList{identityA} b := flow.IdentityList{identityB} - require.False(t, a.EqualTo(b)) - require.False(t, b.EqualTo(a)) + require.False(t, flow.IdentityListEqualTo(a, b)) + require.False(t, flow.IdentityListEqualTo(b, a)) }) t.Run("same data means equal", func(t *testing.T) { @@ -375,8 +366,8 @@ func TestIdentityList_EqualTo(t *testing.T) { a := flow.IdentityList{identityA, identityA} b := flow.IdentityList{identityA, identityA} - require.True(t, a.EqualTo(b)) - require.True(t, b.EqualTo(a)) + require.True(t, flow.IdentityListEqualTo(a, b)) + require.True(t, flow.IdentityListEqualTo(b, a)) }) } diff --git a/model/flow/incorporated_result.go b/model/flow/incorporated_result.go index 7d9c29611b8..0bb6bd16d80 100644 --- a/model/flow/incorporated_result.go +++ b/model/flow/incorporated_result.go @@ -1,7 +1,11 @@ package flow +import "fmt" + // IncorporatedResult is a wrapper around an ExecutionResult which contains the // ID of the first block on its fork in which it was incorporated. +// +//structwrite:immutable - mutations allowed only within the constructor type IncorporatedResult struct { // IncorporatedBlockID is the ID of the first block on its fork where a // receipt for this result was incorporated. Within a fork, multiple blocks @@ -14,22 +18,38 @@ type IncorporatedResult struct { Result *ExecutionResult } -func NewIncorporatedResult(incorporatedBlockID Identifier, result *ExecutionResult) *IncorporatedResult { - return &IncorporatedResult{ - IncorporatedBlockID: incorporatedBlockID, - Result: result, +// UntrustedIncorporatedResult is an untrusted input-only representation of an IncorporatedResult, +// used for construction. +// +// This type exists to ensure that constructor functions are invoked explicitly +// with named fields, which improves clarity and reduces the risk of incorrect field +// ordering during construction. +// +// An instance of UntrustedIncorporatedResult should be validated and converted into +// a trusted IncorporatedResult using NewIncorporatedResult constructor. +type UntrustedIncorporatedResult IncorporatedResult + +// NewIncorporatedResult creates a new instance of IncorporatedResult. +// Construction IncorporatedResult allowed only within the constructor +// +// All errors indicate a valid IncorporatedResult cannot be constructed from the input. +func NewIncorporatedResult(untrusted UntrustedIncorporatedResult) (*IncorporatedResult, error) { + if untrusted.IncorporatedBlockID == ZeroID { + return nil, fmt.Errorf("IncorporatedBlockID must not be empty") } -} -// ID implements flow.Entity.ID for IncorporatedResult to make it capable of -// being stored directly in mempools and storage. -func (ir *IncorporatedResult) ID() Identifier { - return MakeID([2]Identifier{ir.IncorporatedBlockID, ir.Result.ID()}) + if untrusted.Result == nil { + return nil, fmt.Errorf("Result must not be empty") + } + + return &IncorporatedResult{ + IncorporatedBlockID: untrusted.IncorporatedBlockID, + Result: untrusted.Result, + }, nil } -// CheckSum implements flow.Entity.CheckSum for IncorporatedResult to make it -// capable of being stored directly in mempools and storage. -func (ir *IncorporatedResult) Checksum() Identifier { +// ID returns a collision-resistant hash for the [IncorporatedResult] structure. +func (ir *IncorporatedResult) ID() Identifier { return MakeID(ir) } diff --git a/model/flow/incorporated_result_seal.go b/model/flow/incorporated_result_seal.go index 4c1e0b0011c..298233c021d 100644 --- a/model/flow/incorporated_result_seal.go +++ b/model/flow/incorporated_result_seal.go @@ -17,14 +17,8 @@ type IncorporatedResultSeal struct { Header *Header } -// ID implements flow.Entity.ID for IncorporatedResultSeal to make it capable of -// being stored directly in mempools and storage. -func (s *IncorporatedResultSeal) ID() Identifier { +// IncorporatedResultID returns the identifier of the IncorporatedResult +// associated with the IncorporatedResultSeal. +func (s *IncorporatedResultSeal) IncorporatedResultID() Identifier { return s.IncorporatedResult.ID() } - -// CheckSum implements flow.Entity.CheckSum for IncorporatedResultSeal to make -// it capable of being stored directly in mempools and storage. -func (s *IncorporatedResultSeal) Checksum() Identifier { - return MakeID(s) -} diff --git a/model/flow/incorporated_result_test.go b/model/flow/incorporated_result_test.go index 0f01f5913e7..60a3d5403a2 100644 --- a/model/flow/incorporated_result_test.go +++ b/model/flow/incorporated_result_test.go @@ -4,6 +4,7 @@ import ( "testing" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/utils/unittest" @@ -13,10 +14,23 @@ import ( // * grouping should preserve order and multiplicity of elements // * group for unknown identifier should be empty func TestIncorporatedResultGroupBy(t *testing.T) { + ir1, err := flow.NewIncorporatedResult(flow.UntrustedIncorporatedResult{ + IncorporatedBlockID: unittest.IdentifierFixture(), + Result: unittest.ExecutionResultFixture(), + }) + require.NoError(t, err) - ir1 := flow.NewIncorporatedResult(unittest.IdentifierFixture(), unittest.ExecutionResultFixture()) - ir2 := flow.NewIncorporatedResult(unittest.IdentifierFixture(), unittest.ExecutionResultFixture()) - ir3 := flow.NewIncorporatedResult(unittest.IdentifierFixture(), unittest.ExecutionResultFixture()) + ir2, err := flow.NewIncorporatedResult(flow.UntrustedIncorporatedResult{ + IncorporatedBlockID: unittest.IdentifierFixture(), + Result: unittest.ExecutionResultFixture(), + }) + require.NoError(t, err) + + ir3, err := flow.NewIncorporatedResult(flow.UntrustedIncorporatedResult{ + IncorporatedBlockID: unittest.IdentifierFixture(), + Result: unittest.ExecutionResultFixture(), + }) + require.NoError(t, err) idA := unittest.IdentifierFixture() idB := unittest.IdentifierFixture() @@ -41,3 +55,69 @@ func TestIncorporatedResultGroupBy(t *testing.T) { unknown := groups.GetGroup(unittest.IdentifierFixture()) assert.Equal(t, 0, unknown.Size()) } + +// TestIncorporatedResultID_Malleability confirms that the IncorporatedResult struct, which implements +// the [flow.IDEntity] interface, is resistant to tampering. +func TestIncorporatedResultID_Malleability(t *testing.T) { + incorporatedResult, err := flow.NewIncorporatedResult(flow.UntrustedIncorporatedResult{ + IncorporatedBlockID: unittest.IdentifierFixture(), + Result: unittest.ExecutionResultFixture(), + }) + require.NoError(t, err) + unittest.RequireEntityNonMalleable(t, + incorporatedResult, + unittest.WithFieldGenerator("Result.ServiceEvents", func() []flow.ServiceEvent { + return unittest.ServiceEventsFixture(3) + })) +} + +// TestNewIncorporatedResult verifies that NewIncorporatedResult constructs a valid +// IncorporatedResult when given complete, non-zero fields, and returns an error +// if any required field is missing. +// It covers: +// - valid incorporated result creation +// - missing IncorporatedBlockID +// - nil Result +func TestNewIncorporatedResult(t *testing.T) { + t.Run("valid untrusted incorporated result", func(t *testing.T) { + id := unittest.IdentifierFixture() + // Use a real ExecutionResult fixture and take its address + er := unittest.ExecutionResultFixture() + uc := flow.UntrustedIncorporatedResult{ + IncorporatedBlockID: id, + Result: er, + } + + ir, err := flow.NewIncorporatedResult(uc) + assert.NoError(t, err) + assert.NotNil(t, ir) + assert.Equal(t, id, ir.IncorporatedBlockID) + assert.Equal(t, er, ir.Result) + }) + + t.Run("missing IncorporatedBlockID", func(t *testing.T) { + er := unittest.ExecutionResultFixture() + uc := flow.UntrustedIncorporatedResult{ + IncorporatedBlockID: flow.ZeroID, + Result: er, + } + + ir, err := flow.NewIncorporatedResult(uc) + assert.Error(t, err) + assert.Nil(t, ir) + assert.Contains(t, err.Error(), "IncorporatedBlockID") + }) + + t.Run("nil Result", func(t *testing.T) { + id := unittest.IdentifierFixture() + uc := flow.UntrustedIncorporatedResult{ + IncorporatedBlockID: id, + Result: nil, + } + + ir, err := flow.NewIncorporatedResult(uc) + assert.Error(t, err) + assert.Nil(t, ir) + assert.Contains(t, err.Error(), "Result") + }) +} diff --git a/model/flow/index.go b/model/flow/index.go index 6f71575aa51..66562a0d76d 100644 --- a/model/flow/index.go +++ b/model/flow/index.go @@ -1,8 +1,9 @@ package flow type Index struct { - CollectionIDs []Identifier - SealIDs []Identifier - ReceiptIDs []Identifier - ResultIDs []Identifier + GuaranteeIDs []Identifier + SealIDs []Identifier + ReceiptIDs []Identifier + ResultIDs []Identifier + ProtocolStateID Identifier } diff --git a/model/flow/kvstore.go b/model/flow/kvstore.go new file mode 100644 index 00000000000..bd9804ed508 --- /dev/null +++ b/model/flow/kvstore.go @@ -0,0 +1,22 @@ +package flow + +// SetEpochExtensionViewCount is a service event emitted by the FlowServiceAccount for updating +// the `EpochExtensionViewCount` parameter in the protocol state's key-value store. +// NOTE: A SetEpochExtensionViewCount event `E` is accepted while processing block `B` +// which seals `E` if and only if E.Value > 2*FinalizationSafetyThreshold. +type SetEpochExtensionViewCount struct { + Value uint64 +} + +// EqualTo returns true if the two events are equivalent. +func (s *SetEpochExtensionViewCount) EqualTo(other *SetEpochExtensionViewCount) bool { + return s.Value == other.Value +} + +// ServiceEvent returns the event as a generic ServiceEvent type. +func (s *SetEpochExtensionViewCount) ServiceEvent() ServiceEvent { + return ServiceEvent{ + Type: ServiceEventSetEpochExtensionViewCount, + Event: s, + } +} diff --git a/model/flow/ledger.go b/model/flow/ledger.go index 78c1f128c06..792168d1109 100644 --- a/model/flow/ledger.go +++ b/model/flow/ledger.go @@ -13,15 +13,19 @@ import ( const ( // Service level keys (owner is empty): - UUIDKey = "uuid" + UUIDKeyPrefix = "uuid" AddressStateKey = "account_address_state" // Account level keys - AccountKeyPrefix = "a." - AccountStatusKey = AccountKeyPrefix + "s" - CodeKeyPrefix = "code." - ContractNamesKey = "contract_names" - PublicKeyKeyPrefix = "public_key_" + AccountKeyPrefix = "a." + AccountStatusKey = AccountKeyPrefix + "s" + CodeKeyPrefix = "code." + ContractNamesKey = "contract_names" + AccountPublicKey0RegisterKey = "apk_0" + SequenceNumberRegisterKeyPattern = "sn_%d" + SequenceNumberRegisterKeyPrefix = "sn_" + BatchPublicKeyRegisterKeyPrefix = "pk_b" + BatchPublicKeyRegisterKeyPattern = "pk_b%d" ) func addressToOwner(address Address) string { @@ -38,9 +42,17 @@ var AddressStateRegisterID = RegisterID{ Key: AddressStateKey, } -var UUIDRegisterID = RegisterID{ - Owner: "", - Key: UUIDKey, +func UUIDRegisterID(partition byte) RegisterID { + // NOTE: partition 0 uses "uuid" as key to maintain backwards compatibility. + key := UUIDKeyPrefix + if partition != 0 { + key = fmt.Sprintf("%s_%d", UUIDKeyPrefix, partition) + } + + return RegisterID{ + Owner: "", + Key: key, + } } func AccountStatusRegisterID(address Address) RegisterID { @@ -50,10 +62,24 @@ func AccountStatusRegisterID(address Address) RegisterID { } } -func PublicKeyRegisterID(address Address, index uint64) RegisterID { +func AccountPublicKey0RegisterID(address Address) RegisterID { + return RegisterID{ + Owner: addressToOwner(address), + Key: AccountPublicKey0RegisterKey, + } +} + +func AccountBatchPublicKeyRegisterID(address Address, batchIndex uint32) RegisterID { return RegisterID{ Owner: addressToOwner(address), - Key: fmt.Sprintf("public_key_%d", index), + Key: fmt.Sprintf(BatchPublicKeyRegisterKeyPattern, batchIndex), + } +} + +func AccountPublicKeySequenceNumberRegisterID(address Address, keyIndex uint32) RegisterID { + return RegisterID{ + Owner: addressToOwner(address), + Key: fmt.Sprintf(SequenceNumberRegisterKeyPattern, keyIndex), } } @@ -67,20 +93,51 @@ func ContractNamesRegisterID(address Address) RegisterID { func ContractRegisterID(address Address, contractName string) RegisterID { return RegisterID{ Owner: addressToOwner(address), - Key: CodeKeyPrefix + contractName, + Key: ContractKey(contractName), } } +func ContractKey(contractName string) string { + return CodeKeyPrefix + contractName +} + +func IsContractKey(key string) bool { + return strings.HasPrefix(key, CodeKeyPrefix) +} + +func KeyContractName(key string) string { + if !IsContractKey(key) { + return "" + } + return key[len(CodeKeyPrefix):] +} + +func IsContractNamesRegisterID(registerID RegisterID) bool { + return registerID.Key == ContractNamesKey +} + func CadenceRegisterID(owner []byte, key []byte) RegisterID { return RegisterID{ - Owner: string(BytesToAddress(owner).Bytes()), + Owner: addressToOwner(BytesToAddress(owner)), Key: string(key), } } -func NewRegisterID(owner, key string) RegisterID { +// AddressToRegisterOwner converts 8-byte address to register owner. +// If given address is ZeroAddress, register owner is "" (global register). +func AddressToRegisterOwner(address Address) string { + // Global registers have address zero and an empty owner field + if address == EmptyAddress { + return "" + } + + // All other registers have the account's address + return addressToOwner(address) +} + +func NewRegisterID(owner Address, key string) RegisterID { return RegisterID{ - Owner: addressToOwner(BytesToAddress([]byte(owner))), + Owner: AddressToRegisterOwner(owner), Key: key, } } @@ -90,24 +147,32 @@ func NewRegisterID(owner, key string) RegisterID { func (id RegisterID) IsInternalState() bool { // check if is a service level key (owner is empty) // cases: - // - "", "uuid" + // - "", "uuid" (for shard index 0) + // - "", "uuid_%d" (for shard index > 0) // - "", "account_address_state" - if len(id.Owner) == 0 && (id.Key == UUIDKey || id.Key == AddressStateKey) { - return true + if len(id.Owner) == 0 { + return strings.HasPrefix(id.Key, UUIDKeyPrefix) || + id.Key == AddressStateKey } // check account level keys // cases: // - address, "contract_names" // - address, "code.%s" (contract name) - // - address, "public_key_%d" (index) + // - address, "apk_0" + // - address, "pk_b%d" (batch index) + // - address, "sn_%d" (key index) // - address, "a.s" (account status) - return strings.HasPrefix(id.Key, PublicKeyKeyPrefix) || - id.Key == ContractNamesKey || + return id.Key == ContractNamesKey || strings.HasPrefix(id.Key, CodeKeyPrefix) || + id.Key == AccountPublicKey0RegisterKey || + strings.HasPrefix(id.Key, BatchPublicKeyRegisterKeyPrefix) || + strings.HasPrefix(id.Key, SequenceNumberRegisterKeyPrefix) || id.Key == AccountStatusKey } +const SlabIndexPrefix = '$' + // IsSlabIndex returns true if the key is a slab index for an account's ordered fields // map. // @@ -115,15 +180,19 @@ func (id RegisterID) IsInternalState() bool { // only to cadence. Cadence encodes this map into bytes and split the bytes // into slab chunks before storing the slabs into the ledger. func (id RegisterID) IsSlabIndex() bool { - return len(id.Key) == 9 && id.Key[0] == '$' + return IsSlabIndexKey(id.Key) +} + +func IsSlabIndexKey(key string) bool { + return len(key) == 9 && key[0] == SlabIndexPrefix } // String returns formatted string representation of the RegisterID. func (id RegisterID) String() string { formattedKey := "" if id.IsSlabIndex() { - i := uint64(binary.BigEndian.Uint64([]byte(id.Key[1:]))) - formattedKey = fmt.Sprintf("$%d", i) + i := binary.BigEndian.Uint64([]byte(id.Key[1:])) + formattedKey = fmt.Sprintf("%c%d", SlabIndexPrefix, i) } else { formattedKey = fmt.Sprintf("#%x", []byte(id.Key)) } @@ -206,6 +275,9 @@ type StorageProof = []byte // TODO: solve the circular dependency and define StateCommitment as ledger.State type StateCommitment hash.Hash +// EmptyStateCommitment is the zero-value state commitment. +var EmptyStateCommitment = StateCommitment{} + // DummyStateCommitment is an arbitrary value used in function failure cases, // although it can represent a valid state commitment. var DummyStateCommitment = StateCommitment(hash.DummyHash) @@ -224,6 +296,11 @@ func ToStateCommitment(stateBytes []byte) (StateCommitment, error) { return state, nil } +func (s StateCommitment) String() string { + // Just use the string function of the parent type + return hash.Hash(s).String() +} + func (s StateCommitment) MarshalJSON() ([]byte, error) { return json.Marshal(hex.EncodeToString(s[:])) } diff --git a/model/flow/ledger_test.go b/model/flow/ledger_test.go index 13d18e2977d..9fa8b1f4e62 100644 --- a/model/flow/ledger_test.go +++ b/model/flow/ledger_test.go @@ -1,4 +1,4 @@ -package flow +package flow_test import ( "encoding/hex" @@ -6,8 +6,13 @@ import ( "testing" "unicode/utf8" - "github.com/onflow/atree" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + + "github.com/onflow/atree" + + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/utils/unittest" ) // this benchmark can run with this command: @@ -18,7 +23,7 @@ var length int func BenchmarkString(b *testing.B) { - r := NewRegisterID("theowner", "123412341234") + r := flow.NewRegisterID(unittest.RandomAddressFixture(), "123412341234") ownerLen := len(r.Owner) @@ -38,7 +43,7 @@ func BenchmarkString(b *testing.B) { func BenchmarkOriginalString(b *testing.B) { - r := NewRegisterID("theowner", "123412341234") + r := flow.NewRegisterID(unittest.RandomAddressFixture(), "123412341234") ret := fmt.Sprintf("%x/%x", r.Owner, r.Key) @@ -46,46 +51,72 @@ func BenchmarkOriginalString(b *testing.B) { } func TestRegisterID_IsInternalState(t *testing.T) { - requireTrue := func(owner string, key string) { - id := NewRegisterID(owner, key) + requireTrue := func(owner flow.Address, key string) { + id := flow.NewRegisterID(owner, key) require.True(t, id.IsInternalState()) } - requireFalse := func(owner string, key string) { - id := NewRegisterID(owner, key) + requireFalse := func(owner flow.Address, key string) { + id := flow.NewRegisterID(owner, key) require.False(t, id.IsInternalState()) } - require.True(t, UUIDRegisterID.IsInternalState()) - requireFalse("", UUIDKey) - require.True(t, AddressStateRegisterID.IsInternalState()) - requireFalse("", AddressStateKey) - requireFalse("", "other") - requireFalse("Address", UUIDKey) - requireFalse("Address", AddressStateKey) - requireTrue("Address", "public_key_12") - requireTrue("Address", ContractNamesKey) - requireTrue("Address", "code.MYCODE") - requireTrue("Address", AccountStatusKey) - requireFalse("Address", "anything else") + + for i := 0; i < 256; i++ { + uuid := flow.UUIDRegisterID(byte(i)) + if i == 0 { + require.Equal(t, uuid.Key, flow.UUIDKeyPrefix) + requireTrue(flow.EmptyAddress, flow.UUIDKeyPrefix) + } else { + require.Equal(t, uuid.Key, fmt.Sprintf("%s_%d", flow.UUIDKeyPrefix, i)) + requireTrue(flow.EmptyAddress, fmt.Sprintf("%s_%d", flow.UUIDKeyPrefix, i)) + } + require.True(t, uuid.IsInternalState()) + } + require.True(t, flow.AddressStateRegisterID.IsInternalState()) + requireTrue(flow.EmptyAddress, flow.AddressStateKey) + requireFalse(flow.EmptyAddress, "other") + requireFalse(unittest.RandomAddressFixture(), flow.UUIDKeyPrefix) + requireFalse(unittest.RandomAddressFixture(), flow.AddressStateKey) + requireFalse(unittest.RandomAddressFixture(), "public_key_12") + requireTrue(unittest.RandomAddressFixture(), "apk_0") + requireTrue(unittest.RandomAddressFixture(), "pk_b0") + requireTrue(unittest.RandomAddressFixture(), "sn_0") + requireTrue(unittest.RandomAddressFixture(), flow.ContractNamesKey) + requireTrue(unittest.RandomAddressFixture(), "code.MYCODE") + requireTrue(unittest.RandomAddressFixture(), flow.AccountStatusKey) + requireFalse(unittest.RandomAddressFixture(), "anything else") } func TestRegisterID_String(t *testing.T) { - // slab with 189 should result in \\xbd - slabIndex := atree.StorageIndex([8]byte{0, 0, 0, 0, 0, 0, 0, 189}) - - id := NewRegisterID( - string([]byte{1, 2, 3, 10}), - string(atree.SlabIndexToLedgerKey(slabIndex))) - require.False(t, utf8.ValidString(id.Key)) - printable := id.String() - require.True(t, utf8.ValidString(printable)) - require.Equal(t, "000000000102030a/$189", printable) - - // non slab invalid utf-8 - id = NewRegisterID("b\xc5y", "a\xc5z") - require.False(t, utf8.ValidString(id.Owner)) - require.False(t, utf8.ValidString(id.Key)) - printable = id.String() - require.True(t, utf8.ValidString(printable)) - require.Equal(t, "000000000062c579/#61c57a", printable) + t.Run("atree slab", func(t *testing.T) { + // slab with 189 should result in \\xbd + slabIndex := atree.SlabIndex([8]byte{0, 0, 0, 0, 0, 0, 0, 189}) + + id := flow.NewRegisterID( + flow.BytesToAddress([]byte{1, 2, 3, 10}), + string(atree.SlabIndexToLedgerKey(slabIndex))) + require.False(t, utf8.ValidString(id.Key)) + printable := id.String() + require.True(t, utf8.ValidString(printable)) + require.Equal(t, "000000000102030a/$189", printable) + }) + + t.Run("non slab invalid utf-8", func(t *testing.T) { + id := flow.NewRegisterID(flow.BytesToAddress([]byte("b\xc5y")), "a\xc5z") + require.False(t, utf8.ValidString(id.Owner)) + require.False(t, utf8.ValidString(id.Key)) + printable := id.String() + require.True(t, utf8.ValidString(printable)) + require.Equal(t, "000000000062c579/#61c57a", printable) + }) + + t.Run("global register", func(t *testing.T) { + uuidRegisterID := flow.UUIDRegisterID(0) + id := flow.NewRegisterID(flow.EmptyAddress, uuidRegisterID.Key) + require.Equal(t, uuidRegisterID.Owner, id.Owner) + require.Equal(t, uuidRegisterID.Key, id.Key) + printable := id.String() + assert.True(t, utf8.ValidString(printable)) + assert.Equal(t, "/#75756964", printable) + }) } diff --git a/model/flow/mapfunc/identity.go b/model/flow/mapfunc/identity.go index 89fc568b039..3e21793b43a 100644 --- a/model/flow/mapfunc/identity.go +++ b/model/flow/mapfunc/identity.go @@ -4,9 +4,22 @@ import ( "github.com/onflow/flow-go/model/flow" ) -func WithWeight(weight uint64) flow.IdentityMapFunc { +// WithInitialWeight returns an anonymous function that assigns the given weight value +// to `Identity.InitialWeight`. This function is primarily intended for testing, as +// Identity structs should be immutable by convention. +func WithInitialWeight(weight uint64) flow.IdentityMapFunc[flow.Identity] { return func(identity flow.Identity) flow.Identity { - identity.Weight = weight + identity.InitialWeight = weight + return identity + } +} + +// WithEpochParticipationStatus returns an anonymous function that assigns the given epoch participation status value +// to `Identity.EpochParticipationStatus`. This function is primarily intended for testing, as +// Identity structs should be immutable by convention. +func WithEpochParticipationStatus(status flow.EpochParticipationStatus) flow.IdentityMapFunc[flow.Identity] { + return func(identity flow.Identity) flow.Identity { + identity.EpochParticipationStatus = status return identity } } diff --git a/model/flow/order/identifier.go b/model/flow/order/identifier.go deleted file mode 100644 index 0102005b1b8..00000000000 --- a/model/flow/order/identifier.go +++ /dev/null @@ -1,13 +0,0 @@ -package order - -import ( - "bytes" - - "github.com/onflow/flow-go/model/flow" -) - -// IdentifierCanonical is a function for sorting IdentifierList into -// canonical order -func IdentifierCanonical(id1 flow.Identifier, id2 flow.Identifier) bool { - return bytes.Compare(id1[:], id2[:]) < 0 -} diff --git a/model/flow/order/identity.go b/model/flow/order/identity.go deleted file mode 100644 index 5b78c7a3dd4..00000000000 --- a/model/flow/order/identity.go +++ /dev/null @@ -1,47 +0,0 @@ -// (c) 2019 Dapper Labs - ALL RIGHTS RESERVED - -package order - -import ( - "github.com/onflow/flow-go/model/flow" -) - -// Canonical represents the canonical ordering for identity lists. -func Canonical(identity1 *flow.Identity, identity2 *flow.Identity) bool { - return IdentifierCanonical(identity1.NodeID, identity2.NodeID) -} - -// ByReferenceOrder return a function for sorting identities based on the order -// of the given nodeIDs -func ByReferenceOrder(nodeIDs []flow.Identifier) func(*flow.Identity, *flow.Identity) bool { - indices := make(map[flow.Identifier]uint) - for index, nodeID := range nodeIDs { - _, ok := indices[nodeID] - if ok { - panic("should never order by reference order with duplicate node IDs") - } - indices[nodeID] = uint(index) - } - return func(identity1 *flow.Identity, identity2 *flow.Identity) bool { - return indices[identity1.NodeID] < indices[identity2.NodeID] - } -} - -// IdentityListCanonical takes a list of identities and -// check if it's ordered in canonical order. -func IdentityListCanonical(identities flow.IdentityList) bool { - if len(identities) == 0 { - return true - } - - prev := identities[0].ID() - for i := 1; i < len(identities); i++ { - id := identities[i].ID() - if !IdentifierCanonical(prev, id) { - return false - } - prev = id - } - - return true -} diff --git a/model/flow/order/identity_test.go b/model/flow/order/identity_test.go deleted file mode 100644 index 2c79b61ab4a..00000000000 --- a/model/flow/order/identity_test.go +++ /dev/null @@ -1,18 +0,0 @@ -// (c) 2019 Dapper Labs - ALL RIGHTS RESERVED - -package order_test - -import ( - "testing" - - "github.com/stretchr/testify/require" - - "github.com/onflow/flow-go/model/flow/order" - "github.com/onflow/flow-go/utils/unittest" -) - -// Test the canonical ordering of identity and identifier match -func TestCanonicalOrderingMatch(t *testing.T) { - identities := unittest.IdentityListFixture(100) - require.Equal(t, identities.Sort(order.Canonical).NodeIDs(), identities.NodeIDs().Sort(order.IdentifierCanonical)) -} diff --git a/model/flow/payload.go b/model/flow/payload.go index a6af04000a3..d2c97dc5698 100644 --- a/model/flow/payload.go +++ b/model/flow/payload.go @@ -2,9 +2,12 @@ package flow import ( "encoding/json" + "fmt" ) // Payload is the actual content of each block. +// +//structwrite:immutable - mutations allowed only within the constructor type Payload struct { // Guarantees are ordered in execution order. May be empty, in which case // only the system chunk is executed for this block. @@ -14,28 +17,70 @@ type Payload struct { // Seals must be internally connected, containing no seals with duplicate block IDs or heights. // Seals may be empty. It presents a set, i.e. there is no protocol-defined ordering. Seals []*Seal - Receipts ExecutionReceiptMetaList + Receipts ExecutionReceiptStubList Results ExecutionResultList + // ProtocolStateID is the root hash of protocol state. Per convention, this is the resulting + // state after applying all identity-changing operations potentially contained in the block. + // The block payload itself is validated wrt to the protocol state committed to by its parent. + // Thereby, we are only accepting protocol states that have been certified by a valid QC. + ProtocolStateID Identifier } -// EmptyPayload returns an empty block payload. -func EmptyPayload() Payload { - return Payload{} +// UntrustedPayload is an untrusted input-only representation of the main consensus Payload, +// used for construction. +// +// This type exists to ensure that constructor functions are invoked explicitly +// with named fields, which improves clarity and reduces the risk of incorrect field +// ordering during construction. +// +// An instance of UntrustedPayload should be validated and converted into +// a trusted main consensus Payload using NewPayload constructor. +type UntrustedPayload Payload + +// NewPayload creates a new payload. +// Construction of Payload is allowed only within the constructor. +// +// All errors indicate a valid Payload cannot be constructed from the input. +func NewPayload(untrusted UntrustedPayload) (*Payload, error) { + if untrusted.ProtocolStateID == ZeroID { + return nil, fmt.Errorf("ProtocolStateID must not be zero") + } + + return &Payload{ + Guarantees: untrusted.Guarantees, + Seals: untrusted.Seals, + Receipts: untrusted.Receipts, + Results: untrusted.Results, + ProtocolStateID: untrusted.ProtocolStateID, + }, nil +} + +// NewEmptyPayload returns an empty block payload. +func NewEmptyPayload() *Payload { + return &Payload{} } // MarshalJSON defines the JSON marshalling for block payloads. Enforce a // consistent representation for empty slices. func (p Payload) MarshalJSON() ([]byte, error) { if len(p.Guarantees) == 0 { + // It is safe to mutate here because this is part of custom JSON marshaling logic. + //nolint:structwrite p.Guarantees = nil } if len(p.Receipts) == 0 { + // It is safe to mutate here because this is part of custom JSON marshaling logic. + //nolint:structwrite p.Receipts = nil } if len(p.Seals) == 0 { + // It is safe to mutate here because this is part of custom JSON marshaling logic. + //nolint:structwrite p.Seals = nil } if len(p.Results) == 0 { + // It is safe to mutate here because this is part of custom JSON marshaling logic. + //nolint:structwrite p.Results = nil } @@ -47,20 +92,21 @@ func (p Payload) MarshalJSON() ([]byte, error) { // Hash returns the root hash of the payload. func (p Payload) Hash() Identifier { - collHash := MerkleRoot(GetIDs(p.Guarantees)...) + guaranteesHash := MerkleRoot(GetIDs(p.Guarantees)...) sealHash := MerkleRoot(GetIDs(p.Seals)...) recHash := MerkleRoot(GetIDs(p.Receipts)...) resHash := MerkleRoot(GetIDs(p.Results)...) - return ConcatSum(collHash, sealHash, recHash, resHash) + return ConcatSum(guaranteesHash, sealHash, recHash, resHash, p.ProtocolStateID) } // Index returns the index for the payload. func (p Payload) Index() *Index { idx := &Index{ - CollectionIDs: GetIDs(p.Guarantees), - SealIDs: GetIDs(p.Seals), - ReceiptIDs: GetIDs(p.Receipts), - ResultIDs: GetIDs(p.Results), + GuaranteeIDs: GetIDs(p.Guarantees), + SealIDs: GetIDs(p.Seals), + ReceiptIDs: GetIDs(p.Receipts), + ResultIDs: GetIDs(p.Results), + ProtocolStateID: p.ProtocolStateID, } return idx } diff --git a/model/flow/payload_test.go b/model/flow/payload_test.go index 52bf8369b86..c4a1f267648 100644 --- a/model/flow/payload_test.go +++ b/model/flow/payload_test.go @@ -14,7 +14,7 @@ import ( func TestPayloadEncodeEmptyJSON(t *testing.T) { // nil slices - payload := unittest.PayloadFixture() + payload := *flow.NewEmptyPayload() payloadHash1 := payload.Hash() encoded1, err := json.Marshal(payload) require.NoError(t, err) @@ -29,7 +29,7 @@ func TestPayloadEncodeEmptyJSON(t *testing.T) { payloadHash2 := payload.Hash() assert.Equal(t, payloadHash2, payloadHash1) encoded2, err := json.Marshal(payload) - assert.Equal(t, `{"Guarantees":null,"Seals":null,"Receipts":null,"Results":null}`, string(encoded2)) + assert.Equal(t, `{"Guarantees":null,"Seals":null,"Receipts":null,"Results":null,"ProtocolStateID":"0000000000000000000000000000000000000000000000000000000000000000"}`, string(encoded2)) assert.Equal(t, string(encoded1), string(encoded2)) require.NoError(t, err) err = json.Unmarshal(encoded2, &decoded) @@ -63,3 +63,35 @@ func TestPayloadEncodingMsgpack(t *testing.T) { assert.Equal(t, payloadHash, decodedHash) assert.Equal(t, payload, decoded) } + +// TestNewPayload verifies the behavior of the NewPayload constructor. +// It ensures proper handling of both valid and invalid untrusted input fields. +// +// Test Cases: +// +// 1. Valid input: +// - Verifies that a properly populated UntrustedPayload results in a valid Payload. +// +// 2. Valid input with zero ProtocolStateID: +// - Ensures that an error is returned when ProtocolStateID is flow.ZeroID. +func TestNewPayload(t *testing.T) { + t.Run("valid input", func(t *testing.T) { + payload := unittest.PayloadFixture( + unittest.WithProtocolStateID(unittest.IdentifierFixture()), + ) + + res, err := flow.NewPayload(flow.UntrustedPayload(payload)) + require.NoError(t, err) + require.NotNil(t, res) + }) + + t.Run("valid input with zero ProtocolStateID", func(t *testing.T) { + payload := unittest.PayloadFixture() + payload.ProtocolStateID = flow.ZeroID + + res, err := flow.NewPayload(flow.UntrustedPayload(payload)) + require.Error(t, err) + require.Nil(t, res) + require.Contains(t, err.Error(), "ProtocolStateID must not be zero") + }) +} diff --git a/model/flow/protocol_state.go b/model/flow/protocol_state.go new file mode 100644 index 00000000000..420d77f543b --- /dev/null +++ b/model/flow/protocol_state.go @@ -0,0 +1,721 @@ +package flow + +import ( + "fmt" + + "golang.org/x/exp/slices" +) + +// DynamicIdentityEntry encapsulates nodeID and dynamic portion of identity. +type DynamicIdentityEntry struct { + NodeID Identifier + Ejected bool +} + +// EqualTo returns true if the two DynamicIdentityEntry are equivalent. +func (d *DynamicIdentityEntry) EqualTo(other *DynamicIdentityEntry) bool { + // Shortcut if `t` and `other` point to the same object; covers case where both are nil. + if d == other { + return true + } + if d == nil || other == nil { // only one is nil, the other not (otherwise we would have returned above) + return false + } + + return d.NodeID == other.NodeID && + d.Ejected == other.Ejected +} + +type DynamicIdentityEntryList []*DynamicIdentityEntry + +// MinEpochStateEntry is the most compact snapshot of the epoch state and identity table (set of all notes authorized to +// be part of the network) at some specific block. This struct is optimized for persisting the identity table +// in the database, in that it only includes data that is variable during the course of an epoch to avoid +// storage of redundant data. The Epoch Setup and Commit events, which carry the portion of the identity +// table that is constant throughout an epoch, are only referenced by their hash commitment. +// Note that a MinEpochStateEntry does not hold the entire data for the identity table directly. It +// allows reconstructing the identity table with the referenced epoch setup events and dynamic identities. +// +//structwrite:immutable - mutations allowed only within the constructor +type MinEpochStateEntry struct { + PreviousEpoch *EpochStateContainer // minimal dynamic properties for previous epoch [optional, nil for first epoch after spork, genesis] + CurrentEpoch EpochStateContainer // minimal dynamic properties for current epoch + NextEpoch *EpochStateContainer // minimal dynamic properties for next epoch [optional, nil iff we are in staking phase] + + // EpochFallbackTriggered encodes whether an invalid epoch transition + // has been detected in this fork. Under normal operations, this value is false. + // Node-internally, the EpochFallback notification is emitted when a block is + // finalized that changes this flag from false to true. + // A state transition from true -> false is possible only when protocol undergoes epoch recovery. + EpochFallbackTriggered bool +} + +// UntrustedMinEpochStateEntry is an untrusted input-only representation of a MinEpochStateEntry, +// used for construction. +// +// This type exists to ensure that constructor functions are invoked explicitly +// with named fields, which improves clarity and reduces the risk of incorrect field +// ordering during construction. +// +// An instance of UntrustedMinEpochStateEntry should be validated and converted into +// a trusted MinEpochStateEntry using NewMinEpochStateEntry constructor. +type UntrustedMinEpochStateEntry MinEpochStateEntry + +// NewMinEpochStateEntry creates a new instance of MinEpochStateEntry. +// Construction MinEpochStateEntry allowed only within the constructor. +// +// All errors indicate a valid MinEpochStateEntry cannot be constructed from the input. +func NewMinEpochStateEntry(untrusted UntrustedMinEpochStateEntry) (*MinEpochStateEntry, error) { + if untrusted.CurrentEpoch.EqualTo(new(EpochStateContainer)) { + return nil, fmt.Errorf("current epoch must not be empty") + } + return &MinEpochStateEntry{ + PreviousEpoch: untrusted.PreviousEpoch, + CurrentEpoch: untrusted.CurrentEpoch, + NextEpoch: untrusted.NextEpoch, + EpochFallbackTriggered: untrusted.EpochFallbackTriggered, + }, nil +} + +// EpochStateContainer holds the data pertaining to a _single_ epoch but no information about +// any adjacent epochs. To perform a transition from epoch N to N+1, EpochStateContainers for +// both epochs are necessary. +// +//structwrite:immutable - mutations allowed only within the constructor +type EpochStateContainer struct { + // ID of setup event for this epoch, never nil. + SetupID Identifier + // ID of commit event for this epoch. Could be ZeroID if epoch was not committed. + CommitID Identifier + // ActiveIdentities contains the dynamic identity properties for the nodes that + // are active in this epoch. Active means that these nodes are authorized to contribute to + // extending the chain. Nodes are listed in `ActiveIdentities` if and only if + // they are part of the EpochSetup event for the respective epoch. + // The dynamic identity properties can change from block to block. Each non-deferred + // identity-mutating operation is applied independently to the `ActiveIdentities` + // of the relevant epoch's EpochStateContainer separately. + // Identities are always sorted in canonical order. + // + // Context: In comparison, nodes that are joining in the next epoch or left as of this + // epoch are only allowed to listen to the network but not actively contribute. Such + // nodes are _not_ part of `Identities`. + ActiveIdentities DynamicIdentityEntryList + + // EpochExtensions contains potential EFM-extensions of this epoch. In the happy path + // it is nil or empty. An Epoch in which Epoch-Fallback-Mode [EFM] is triggered, will + // have at least one extension. By convention, the initial extension must satisfy + // EpochSetup.FinalView + 1 = EpochExtensions[0].FirstView + // and each consecutive pair of slice elements must obey + // EpochExtensions[i].FinalView+1 = EpochExtensions[i+1].FirstView + EpochExtensions []EpochExtension +} + +// UntrustedEpochStateContainer is an untrusted input-only representation of a EpochStateContainer, +// used for construction. +// +// This type exists to ensure that constructor functions are invoked explicitly +// with named fields, which improves clarity and reduces the risk of incorrect field +// ordering during construction. +// +// An instance of UntrustedEpochStateContainer should be validated and converted into +// a trusted EpochStateContainer using NewEpochStateContainer constructor. +type UntrustedEpochStateContainer EpochStateContainer + +// NewEpochStateContainer creates a new instance of EpochStateContainer. +// Construction EpochStateContainer allowed only within the constructor. +// +// All errors indicate a valid EpochStateContainer cannot be constructed from the input. +func NewEpochStateContainer(untrusted UntrustedEpochStateContainer) (*EpochStateContainer, error) { + if untrusted.SetupID == ZeroID { + return nil, fmt.Errorf("SetupID must not be zero") + } + if untrusted.ActiveIdentities == nil { + return nil, fmt.Errorf("ActiveIdentities must not be nil") + } + if !untrusted.ActiveIdentities.Sorted(IdentifierCanonical) { + return nil, fmt.Errorf("ActiveIdentities are not sorted") + } + + return &EpochStateContainer{ + SetupID: untrusted.SetupID, + CommitID: untrusted.CommitID, + ActiveIdentities: untrusted.ActiveIdentities, + EpochExtensions: untrusted.EpochExtensions, + }, nil +} + +// EpochExtension represents a range of views, which contiguously extends this epoch. +type EpochExtension struct { + FirstView uint64 + FinalView uint64 +} + +// EqualTo returns true if the two EpochExtension are equivalent. +func (e *EpochExtension) EqualTo(other *EpochExtension) bool { + // Shortcut if `t` and `other` point to the same object; covers case where both are nil. + if e == other { + return true + } + if e == nil || other == nil { // only one is nil, the other not (otherwise we would have returned above) + return false + } + + return e.FirstView == other.FirstView && + e.FinalView == other.FinalView +} + +// ID returns an identifier for this EpochStateContainer by hashing internal fields. +// Per convention, the ID of a `nil` EpochStateContainer is `flow.ZeroID`. +func (c *EpochStateContainer) ID() Identifier { + if c == nil { + return ZeroID + } + return MakeID(c) +} + +// EventIDs returns the `flow.EventIDs` with the hashes of the EpochSetup and EpochCommit events. +// Per convention, for a `nil` EpochStateContainer, we return `flow.ZeroID` for both events. +func (c *EpochStateContainer) EventIDs() EventIDs { + if c == nil { + return EventIDs{ZeroID, ZeroID} + } + return EventIDs{c.SetupID, c.CommitID} +} + +// Copy returns a full copy of the entry. +// Embedded Identities are deep-copied, _except_ for their keys, which are copied by reference. +// Per convention, the ID of a `nil` EpochStateContainer is `flow.ZeroID`. +func (c *EpochStateContainer) Copy() *EpochStateContainer { + if c == nil { + return nil + } + var ext []EpochExtension + if c.EpochExtensions != nil { + ext = make([]EpochExtension, len(c.EpochExtensions)) + copy(ext, c.EpochExtensions) + } + + // Constructor is skipped since we're copying an already-valid object. + //nolint:structwrite + return &EpochStateContainer{ + SetupID: c.SetupID, + CommitID: c.CommitID, + ActiveIdentities: c.ActiveIdentities.Copy(), + EpochExtensions: ext, + } +} + +// EqualTo returns true if the two EpochStateContainer are equivalent. +func (c *EpochStateContainer) EqualTo(other *EpochStateContainer) bool { + // Shortcut if `t` and `other` point to the same object; covers case where both are nil. + if c == other { + return true + } + if c == nil || other == nil { // only one is nil, the other not (otherwise we would have returned above) + return false + } + // both are not nil, so we can compare the fields + if c.SetupID != other.SetupID { + return false + } + if c.CommitID != other.CommitID { + return false + } + if !slices.EqualFunc(c.ActiveIdentities, other.ActiveIdentities, func(e1 *DynamicIdentityEntry, e2 *DynamicIdentityEntry) bool { + return e1.EqualTo(e2) + }) { + return false + } + + if !slices.EqualFunc(c.EpochExtensions, other.EpochExtensions, func(e1 EpochExtension, e2 EpochExtension) bool { + return e1.EqualTo(&e2) + }) { + return false + } + + return true +} + +// EpochStateEntry is a MinEpochStateEntry that has additional fields that are cached from the +// storage layer for convenience. It holds all the information needed to construct a snapshot of +// the identity table (set of all notes authorized to be part of the network) at some specific +// block without any database queries. Specifically, `MinEpochStateEntry` is a snapshot of the +// variable portion of the identity table. The portion of the identity table that is constant +// throughout an epoch is contained in the Epoch Setup and Epoch Commit events. +// Convention: +// - CurrentEpochSetup and CurrentEpochCommit are for the same epoch. Never nil. +// - PreviousEpochSetup and PreviousEpochCommit are for the same epoch. Can be nil. +// - NextEpochSetup and NextEpochCommit are for the same epoch. Can be nil. +// +//structwrite:immutable - mutations allowed only within the constructor. +type EpochStateEntry struct { + *MinEpochStateEntry + + // by convention, all epoch service events are immutable + PreviousEpochSetup *EpochSetup + PreviousEpochCommit *EpochCommit + CurrentEpochSetup *EpochSetup + CurrentEpochCommit *EpochCommit + NextEpochSetup *EpochSetup + NextEpochCommit *EpochCommit +} + +// UntrustedEpochStateEntry is an untrusted input-only representation of an EpochStateEntry, +// used for construction. +// +// This type exists to ensure that constructor functions are invoked explicitly +// with named fields, which improves clarity and reduces the risk of incorrect field +// ordering during construction. +// +// An instance of UntrustedEpochStateEntry should be validated and converted into +// a trusted EpochStateEntry using NewEpochStateEntry constructor. +type UntrustedEpochStateEntry EpochStateEntry + +// NewEpochStateEntry constructs an EpochStateEntry from a MinEpochStateEntry and additional data. +// +// All errors indicate a valid EpochStateEntry cannot be constructed from the input. +func NewEpochStateEntry(untrusted UntrustedEpochStateEntry) (*EpochStateEntry, error) { + // If previous epoch is specified: ensure respective epoch service events are not nil and consistent with commitments in `MinEpochStateEntry.PreviousEpoch` + if untrusted.PreviousEpoch != nil { + if untrusted.PreviousEpoch.SetupID != untrusted.PreviousEpochSetup.ID() { // calling ID() will panic is EpochSetup event is nil + return nil, fmt.Errorf("supplied previous epoch's setup event (%x) does not match commitment (%x) in MinEpochStateEntry", untrusted.PreviousEpochSetup.ID(), untrusted.PreviousEpoch.SetupID) + } + if untrusted.PreviousEpoch.CommitID != untrusted.PreviousEpochCommit.ID() { // calling ID() will panic is EpochCommit event is nil + return nil, fmt.Errorf("supplied previous epoch's commit event (%x) does not match commitment (%x) in MinEpochStateEntry", untrusted.PreviousEpochCommit.ID(), untrusted.PreviousEpoch.CommitID) + } + } else { + if untrusted.PreviousEpochSetup != nil { + return nil, fmt.Errorf("no previous epoch but gotten non-nil EpochSetup event") + } + if untrusted.PreviousEpochCommit != nil { + return nil, fmt.Errorf("no previous epoch but gotten non-nil EpochCommit event") + } + } + + // For current epoch: ensure respective epoch service events are not nil and consistent with commitments in `MinEpochStateEntry.CurrentEpoch` + if untrusted.CurrentEpoch.SetupID != untrusted.CurrentEpochSetup.ID() { // calling ID() will panic is EpochSetup event is nil + return nil, fmt.Errorf("supplied current epoch's setup event (%x) does not match commitment (%x) in MinEpochStateEntry", untrusted.CurrentEpochSetup.ID(), untrusted.CurrentEpoch.SetupID) + } + if untrusted.CurrentEpoch.CommitID != untrusted.CurrentEpochCommit.ID() { // calling ID() will panic is EpochCommit event is nil + return nil, fmt.Errorf("supplied current epoch's commit event (%x) does not match commitment (%x) in MinEpochStateEntry", untrusted.CurrentEpochCommit.ID(), untrusted.CurrentEpoch.CommitID) + } + + // If we are in staking phase (i.e. epochState.NextEpoch == nil): + // (1) Full identity table contains active identities from current epoch. + // If previous epoch exists, we add nodes from previous epoch that are leaving in the current epoch with `EpochParticipationStatusLeaving` status. + // Otherwise, we are in epoch setup or epoch commit phase (i.e. epochState.NextEpoch ≠ nil): + // (2a) Full identity table contains active identities from current epoch + nodes joining in next epoch with `EpochParticipationStatusJoining` status. + // (2b) Furthermore, we also build the full identity table for the next epoch's staking phase: + // active identities from next epoch + nodes from current epoch that are leaving at the end of the current epoch with `flow.EpochParticipationStatusLeaving` status. + nextEpoch := untrusted.NextEpoch + if nextEpoch == nil { // in staking phase: build full identity table for current epoch according to (1) + if untrusted.NextEpochSetup != nil { + return nil, fmt.Errorf("no next epoch but gotten non-nil EpochSetup event") + } + if untrusted.NextEpochCommit != nil { + return nil, fmt.Errorf("no next epoch but gotten non-nil EpochCommit event") + } + } else { // epochState.NextEpoch ≠ nil, i.e. we are in epoch setup or epoch commit phase + // ensure respective epoch service events are not nil and consistent with commitments in `MinEpochStateEntry.NextEpoch` + if nextEpoch.SetupID != untrusted.NextEpochSetup.ID() { + return nil, fmt.Errorf("supplied next epoch's setup event (%x) does not match commitment (%x) in MinEpochStateEntry", nextEpoch.SetupID, untrusted.NextEpochSetup.ID()) + } + if nextEpoch.CommitID != ZeroID { + if nextEpoch.CommitID != untrusted.NextEpochCommit.ID() { + return nil, fmt.Errorf("supplied next epoch's commit event (%x) does not match commitment (%x) in MinEpochStateEntry", nextEpoch.CommitID, untrusted.NextEpochCommit.ID()) + } + } else { + if untrusted.NextEpochCommit != nil { + return nil, fmt.Errorf("next epoch not yet committed but got EpochCommit event") + } + } + } + return &EpochStateEntry{ + MinEpochStateEntry: untrusted.MinEpochStateEntry, + PreviousEpochSetup: untrusted.PreviousEpochSetup, + PreviousEpochCommit: untrusted.PreviousEpochCommit, + CurrentEpochSetup: untrusted.CurrentEpochSetup, + CurrentEpochCommit: untrusted.CurrentEpochCommit, + NextEpochSetup: untrusted.NextEpochSetup, + NextEpochCommit: untrusted.NextEpochCommit, + }, nil +} + +// RichEpochStateEntry is a EpochStateEntry that additionally holds the canonical representation of the +// identity table (set of all notes authorized to be part of the network) at some specific block. +// This data structure is optimized for frequent reads of the same identity table, which is +// the prevalent case during normal operations (node ejections and epoch fallback are rare). +// Conventions: +// - Invariants inherited from EpochStateEntry. +// - CurrentEpochIdentityTable is the full (dynamic) identity table for the current epoch. +// Identities are sorted in canonical order. Without duplicates. Never nil. +// - NextEpochIdentityTable is the full (dynamic) identity table for the next epoch. Can be nil. +// +// NOTE regarding `CurrentEpochIdentityTable` and `NextEpochIdentityTable`: +// The Identity Table is generally a super-set of the identities listed in the Epoch +// Service Events for the respective epoch. This is because the service events only list +// nodes that are authorized to _actively_ contribute to extending the chain. In contrast, +// the Identity Table additionally contains nodes (with weight zero) from the previous or +// upcoming epoch, which are transitioning into / out of the network and are only allowed +// to listen but not to actively contribute. +// +//structwrite:immutable - mutations allowed only within the constructor +type RichEpochStateEntry struct { + *EpochStateEntry + + CurrentEpochIdentityTable IdentityList + NextEpochIdentityTable IdentityList +} + +// NewRichEpochStateEntry constructs a RichEpochStateEntry from an EpochStateEntry. +// Construction RichEpochStateEntry allowed only within the constructor. +// +// All errors indicate a valid RichEpochStateEntry cannot be constructed from the input. +func NewRichEpochStateEntry(epochState *EpochStateEntry) (*RichEpochStateEntry, error) { + if epochState == nil { + return nil, fmt.Errorf("epoch state must not be nil") + } + var currentEpochIdentityTable IdentityList + nextEpochIdentityTable := IdentityList{} + // If we are in staking phase (i.e. epochState.NextEpoch == nil): + // (1) Full identity table contains active identities from current epoch. + // If previous epoch exists, we add nodes from previous epoch that are leaving in the current epoch with status `EpochParticipationStatusLeaving`. + // Otherwise, we are in epoch setup or epoch commit phase (i.e. epochState.NextEpoch ≠ nil): + // (2a) Full identity table contains active identities from current epoch + nodes joining in next epoch with status `EpochParticipationStatusJoining`. + // (2b) Furthermore, we also build the full identity table for the next epoch's staking phase: + // active identities from next epoch + nodes from current epoch that are leaving at the end of the current epoch with `flow.EpochParticipationStatusLeaving` status. + var err error + nextEpoch := epochState.NextEpoch + if nextEpoch == nil { // in staking phase: build full identity table for current epoch according to (1) + var previousEpochIdentitySkeletons IdentitySkeletonList + var previousEpochDynamicIdentities DynamicIdentityEntryList + if previousEpochSetup := epochState.PreviousEpochSetup; previousEpochSetup != nil { + previousEpochIdentitySkeletons = previousEpochSetup.Participants + previousEpochDynamicIdentities = epochState.PreviousEpoch.ActiveIdentities + } + currentEpochIdentityTable, err = BuildIdentityTable( + epochState.CurrentEpochSetup.Participants, + epochState.CurrentEpoch.ActiveIdentities, + previousEpochIdentitySkeletons, + previousEpochDynamicIdentities, + EpochParticipationStatusLeaving, + ) + if err != nil { + return nil, fmt.Errorf("could not build identity table for staking phase: %w", err) + } + } else { // epochState.NextEpoch ≠ nil, i.e. we are in epoch setup or epoch commit phase + currentEpochIdentityTable, err = BuildIdentityTable( + epochState.CurrentEpochSetup.Participants, + epochState.CurrentEpoch.ActiveIdentities, + epochState.NextEpochSetup.Participants, + nextEpoch.ActiveIdentities, + EpochParticipationStatusJoining, + ) + if err != nil { + return nil, fmt.Errorf("could not build identity table for setup/commit phase: %w", err) + } + + nextEpochIdentityTable, err = BuildIdentityTable( + epochState.NextEpochSetup.Participants, + nextEpoch.ActiveIdentities, + epochState.CurrentEpochSetup.Participants, + epochState.CurrentEpoch.ActiveIdentities, + EpochParticipationStatusLeaving, + ) + if err != nil { + return nil, fmt.Errorf("could not build next epoch identity table: %w", err) + } + } + + return &RichEpochStateEntry{ + EpochStateEntry: epochState, + CurrentEpochIdentityTable: currentEpochIdentityTable, + NextEpochIdentityTable: nextEpochIdentityTable, + }, nil +} + +// ID returns hash of entry by hashing all fields. +func (e *MinEpochStateEntry) ID() Identifier { + if e == nil { + return ZeroID + } + return MakeID(e) +} + +// Copy returns a full copy of the entry. +// Embedded Identities are deep-copied, _except_ for their keys, which are copied by reference. +func (e *MinEpochStateEntry) Copy() *MinEpochStateEntry { + if e == nil { + return nil + } + // Constructor is skipped since we're copying an already-valid object. + //nolint:structwrite + return &MinEpochStateEntry{ + PreviousEpoch: e.PreviousEpoch.Copy(), + CurrentEpoch: *e.CurrentEpoch.Copy(), + NextEpoch: e.NextEpoch.Copy(), + EpochFallbackTriggered: e.EpochFallbackTriggered, + } +} + +// Copy returns a full copy of the EpochStateEntry. +// - Embedded service events are copied by reference (not deep-copied). +func (e *EpochStateEntry) Copy() *EpochStateEntry { + if e == nil { + return nil + } + + // Constructor is skipped since we're copying an already-valid object. + //nolint:structwrite + return &EpochStateEntry{ + MinEpochStateEntry: e.MinEpochStateEntry.Copy(), + PreviousEpochSetup: e.PreviousEpochSetup, + PreviousEpochCommit: e.PreviousEpochCommit, + CurrentEpochSetup: e.CurrentEpochSetup, + CurrentEpochCommit: e.CurrentEpochCommit, + NextEpochSetup: e.NextEpochSetup, + NextEpochCommit: e.NextEpochCommit, + } +} + +// Copy returns a full copy of the RichEpochStateEntry. +// - Embedded service events are copied by reference (not deep-copied). +// - CurrentEpochIdentityTable and NextEpochIdentityTable are deep-copied, _except_ for their keys, which are copied by reference. +func (e *RichEpochStateEntry) Copy() *RichEpochStateEntry { + if e == nil { + return nil + } + // Constructor is skipped since we're copying an already-valid object. + //nolint:structwrite + return &RichEpochStateEntry{ + EpochStateEntry: e.EpochStateEntry.Copy(), + CurrentEpochIdentityTable: e.CurrentEpochIdentityTable.Copy(), + NextEpochIdentityTable: e.NextEpochIdentityTable.Copy(), + } +} + +// CurrentEpochFinalView returns the final view of the current epoch, taking into account possible epoch extensions. +// If there are no epoch extensions, the final view is the final view of the current epoch setup, +// otherwise it is the final view of the last epoch extension. +func (e *EpochStateEntry) CurrentEpochFinalView() uint64 { + l := len(e.CurrentEpoch.EpochExtensions) + if l > 0 { + return e.CurrentEpoch.EpochExtensions[l-1].FinalView + } + return e.CurrentEpochSetup.FinalView +} + +// EpochPhase returns the current epoch phase. +// The receiver MinEpochStateEntry must be properly constructed. +// See flow.EpochPhase for detailed documentation. +func (e *MinEpochStateEntry) EpochPhase() EpochPhase { + // CAUTION: the logic below that deduces the EpochPhase must be consistent with `epochs.FallbackStateMachine`, + // which sets the fields we are using here. Specifically, we require that the FallbackStateMachine clears out + // any tentative values for a subsequent epoch _unless_ that epoch is already committed. + if e.EpochFallbackTriggered { + // If the next epoch has been committed, we are in EpochPhaseCommitted regardless of EFM status. + // We will enter EpochPhaseFallback after completing the transition into the committed next epoch. + if e.NextEpoch != nil && e.NextEpoch.CommitID != ZeroID { + return EpochPhaseCommitted + } + // If the next epoch has not been committed and EFM is triggered, we immediately enter EpochPhaseFallback. + return EpochPhaseFallback + } + + // The epoch phase is determined by how much information we have about the next epoch + if e.NextEpoch == nil { + return EpochPhaseStaking // if no information about the next epoch is known, we are in the Staking Phase + } + // Per convention, NextEpoch ≠ nil if and only if NextEpoch.SetupID is specified. + if e.NextEpoch.CommitID == ZeroID { + return EpochPhaseSetup // if only the Setup event is known for the next epoch but not the Commit event, we are in the Setup Phase + } + return EpochPhaseCommitted // if the Setup and Commit events are known for the next epoch, we are in the Committed Phase +} + +// EpochCounter returns the current epoch counter. +// The receiver RichEpochStateEntry must be properly constructed. +func (e *EpochStateEntry) EpochCounter() uint64 { + return e.CurrentEpochSetup.Counter +} + +func (ll DynamicIdentityEntryList) Lookup() map[Identifier]*DynamicIdentityEntry { + result := make(map[Identifier]*DynamicIdentityEntry, len(ll)) + for _, entry := range ll { + result[entry.NodeID] = entry + } + return result +} + +// Sorted returns whether the list is sorted by the input ordering. +func (ll DynamicIdentityEntryList) Sorted(less IdentifierOrder) bool { + return slices.IsSortedFunc(ll, func(lhs, rhs *DynamicIdentityEntry) int { + return less(lhs.NodeID, rhs.NodeID) + }) +} + +// ByNodeID gets a node from the list by node ID. +func (ll DynamicIdentityEntryList) ByNodeID(nodeID Identifier) (*DynamicIdentityEntry, bool) { + for _, identity := range ll { + if identity.NodeID == nodeID { + return identity, true + } + } + return nil, false +} + +// Copy returns a copy of the DynamicIdentityEntryList. The resulting slice uses +// a different backing array, meaning appends and insert operations on either slice +// are guaranteed to only affect that slice. +// +// Copy should be used when modifying an existing identity list by either +// appending new elements, re-ordering, or inserting new elements in an +// existing index. +// +// CAUTION: +// All Identity fields are deep-copied, _except_ for their keys, which +// are copied by reference. +func (ll DynamicIdentityEntryList) Copy() DynamicIdentityEntryList { + lenList := len(ll) + dup := make(DynamicIdentityEntryList, 0, lenList) + for i := 0; i < lenList; i++ { + // copy the object + next := *(ll[i]) + dup = append(dup, &next) + } + return dup +} + +// Sort sorts the list by the input ordering. Returns a new, sorted list without modifying the input. +// CAUTION: +// All Identity fields are deep-copied, _except_ for their keys, which are copied by reference. +func (ll DynamicIdentityEntryList) Sort(less IdentifierOrder) DynamicIdentityEntryList { + dup := ll.Copy() + slices.SortFunc(dup, func(lhs, rhs *DynamicIdentityEntry) int { + return less(lhs.NodeID, rhs.NodeID) + }) + return dup +} + +// BuildIdentityTable constructs the full identity table for the target epoch by combining data from: +// 1. The IdentitySkeletons for the nodes that are _active_ in the target epoch +// (recorded in EpochSetup event and immutable throughout the epoch). +// 2. The Dynamic Identities for the nodes that are _active_ in the target epoch (i.e. the dynamic identity +// fields for the IdentitySkeletons contained in the EpochSetup event for the respective epoch). +// +// Optionally, identity information for an adjacent epoch is given if and only if an adjacent epoch exists. For +// a target epoch N, the epochs N-1 and N+1 are defined to be adjacent. Adjacent epochs do not necessarily exist +// (e.g. consider a spork comprising only a single epoch), in which case the respective inputs are nil or empty. +// 3. [optional] An adjacent epoch's IdentitySkeletons as recorded in the adjacent epoch's setup event. +// 4. [optional] An adjacent epoch's Dynamic Identities. +// 5. An adjacent epoch's identities participation status, this could be joining or leaving depending on epoch phase. +// +// The function enforces that the input slices pertaining to the same epoch contain the same identities +// (compared by nodeID) in the same order. Otherwise, an exception is returned. +// No errors are expected during normal operation. All errors indicate inconsistent or invalid inputs. +func BuildIdentityTable( + targetEpochIdentitySkeletons IdentitySkeletonList, + targetEpochDynamicIdentities DynamicIdentityEntryList, + adjacentEpochIdentitySkeletons IdentitySkeletonList, + adjacentEpochDynamicIdentities DynamicIdentityEntryList, + adjacentIdentitiesStatus EpochParticipationStatus, +) (IdentityList, error) { + if adjacentIdentitiesStatus != EpochParticipationStatusLeaving && + adjacentIdentitiesStatus != EpochParticipationStatusJoining { + return nil, fmt.Errorf("invalid adjacent identity status, expect %s or %s, got %s", + EpochParticipationStatusLeaving.String(), + EpochParticipationStatusJoining.String(), + adjacentIdentitiesStatus) + } + targetEpochParticipants, err := ComposeFullIdentities(targetEpochIdentitySkeletons, targetEpochDynamicIdentities, EpochParticipationStatusActive) + if err != nil { + return nil, fmt.Errorf("could not reconstruct participants for target epoch: %w", err) + } + adjacentEpochParticipants, err := ComposeFullIdentities(adjacentEpochIdentitySkeletons, adjacentEpochDynamicIdentities, adjacentIdentitiesStatus) + if err != nil { + return nil, fmt.Errorf("could not reconstruct participants for adjacent epoch: %w", err) + } + + // Combine the participants of the current and adjacent epoch. The method `GenericIdentityList.Union` + // already implements the following required conventions: + // 1. Preference for IdentitySkeleton of the target epoch: + // In case an IdentitySkeleton with the same NodeID exists in the target epoch as well as + // in the adjacent epoch, we use the IdentitySkeleton for the target epoch (for example, + // to account for changes of keys, address, initial weight, etc). + // 2. Canonical ordering + return targetEpochParticipants.Union(adjacentEpochParticipants), nil +} + +// DynamicIdentityEntryListFromIdentities converts IdentityList to DynamicIdentityEntryList. +func DynamicIdentityEntryListFromIdentities(identities IdentityList) DynamicIdentityEntryList { + dynamicIdentities := make(DynamicIdentityEntryList, 0, len(identities)) + for _, identity := range identities { + dynamicIdentities = append(dynamicIdentities, &DynamicIdentityEntry{ + NodeID: identity.NodeID, + Ejected: identity.IsEjected(), + }) + } + return dynamicIdentities +} + +// ComposeFullIdentities combines identity skeletons and dynamic identities to produce a flow.IdentityList. +// It enforces that the input slices `skeletons` and `dynamics` list the same identities (compared by nodeID) +// in the same order. Otherwise, an exception is returned. For each identity i, we set +// `i.EpochParticipationStatus` to the `defaultEpochParticipationStatus` _unless_ i is ejected. +// No errors are expected during normal operations. +func ComposeFullIdentities( + skeletons IdentitySkeletonList, + dynamics DynamicIdentityEntryList, + defaultEpochParticipationStatus EpochParticipationStatus, +) (IdentityList, error) { + // sanity check: list of skeletons and dynamic should be the same + if len(skeletons) != len(dynamics) { + return nil, fmt.Errorf("invalid number of identities to reconstruct: expected %d, got %d", len(skeletons), len(dynamics)) + } + + // reconstruct identities from skeleton and dynamic parts + var result IdentityList + for i := range dynamics { + // sanity check: identities should be sorted in the same order + if dynamics[i].NodeID != skeletons[i].NodeID { + return nil, fmt.Errorf("identites in protocol state are not consistently ordered: expected %s, got %s", skeletons[i].NodeID, dynamics[i].NodeID) + } + status := defaultEpochParticipationStatus + if dynamics[i].Ejected { + status = EpochParticipationStatusEjected + } + result = append(result, &Identity{ + IdentitySkeleton: *skeletons[i], + DynamicIdentity: DynamicIdentity{ + EpochParticipationStatus: status, + }, + }) + } + return result, nil +} + +// PSKeyValueStoreData is a binary blob with a version attached, specifying the format +// of the marshaled data. In a nutshell, it serves as a binary snapshot of a ProtocolKVStore. +// This structure is useful for version-agnostic storage, where snapshots with different versions +// can co-exist. The PSKeyValueStoreData is a generic format that can be later decoded to +// potentially different strongly typed structures based on version. When reading from the store, +// callers must know how to deal with the binary representation. +type PSKeyValueStoreData struct { + Version uint64 + Data []byte +} + +func (d PSKeyValueStoreData) Equal(d2 *PSKeyValueStoreData) bool { + return d.Version == d2.Version && + slices.Equal(d.Data, d2.Data) +} + +// VersionedInstanceParams is a binary instance params blob with a version attached, specifying the format +// of the marshaled data. In a nutshell, it serves as a binary of an InstanceParams. +// The VersionedInstanceParams is a generic format that can be later decoded to +// potentially different strongly typed structures based on version. When reading from the store, +// callers must know how to deal with the binary representation. +type VersionedInstanceParams struct { + Version uint64 + Data []byte +} diff --git a/model/flow/protocol_state_test.go b/model/flow/protocol_state_test.go new file mode 100644 index 00000000000..a40f5b24f1d --- /dev/null +++ b/model/flow/protocol_state_test.go @@ -0,0 +1,1289 @@ +package flow_test + +import ( + "fmt" + "math/rand" + "testing" + + clone "github.com/huandu/go-clone/generic" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/utils/unittest" +) + +// TestEpochProtocolStateEntry_EpochPhase tests that all possible instances of an MinEpochStateEntry +// correctly compute the current epoch phase, taking into account EFM status and incorporated service events. +func TestEpochProtocolStateEntry_EpochPhase(t *testing.T) { + + t.Run("EFM triggered", func(t *testing.T) { + t.Run("tentatively in staking phase", func(t *testing.T) { + entry := unittest.EpochProtocolStateEntryFixture(flow.EpochPhaseStaking, true) + assert.Equal(t, flow.EpochPhaseFallback, entry.EpochPhase()) + }) + t.Run("tentatively in setup phase", func(t *testing.T) { + // Caution, the following is a degenerate edge-case that should _never_ be generated by the + // FallbackStateMachine. Specifically, the FallbackStateMachine should clear out + // any tentative values for a subsequent epoch _unless_ that epoch is already committed. + entry := unittest.EpochProtocolStateEntryFixture(flow.EpochPhaseSetup, true) + assert.Equal(t, flow.EpochPhaseFallback, entry.EpochPhase()) + }) + t.Run("tentatively in committed phase", func(t *testing.T) { + entry := unittest.EpochProtocolStateEntryFixture(flow.EpochPhaseCommitted, true) + assert.Equal(t, flow.EpochPhaseCommitted, entry.EpochPhase()) + }) + }) + + t.Run("EFM not triggered", func(t *testing.T) { + t.Run("tentatively in staking phase", func(t *testing.T) { + entry := unittest.EpochProtocolStateEntryFixture(flow.EpochPhaseStaking, false) + assert.Equal(t, flow.EpochPhaseStaking, entry.EpochPhase()) + }) + t.Run("tentatively in setup phase", func(t *testing.T) { + entry := unittest.EpochProtocolStateEntryFixture(flow.EpochPhaseSetup, false) + assert.Equal(t, flow.EpochPhaseSetup, entry.EpochPhase()) + }) + t.Run("tentatively in committed phase", func(t *testing.T) { + entry := unittest.EpochProtocolStateEntryFixture(flow.EpochPhaseCommitted, false) + assert.Equal(t, flow.EpochPhaseCommitted, entry.EpochPhase()) + }) + }) +} + +// TestNewRichProtocolStateEntry checks that NewRichEpochStateEntry creates valid identity tables depending on the state +// of epoch which is derived from the protocol state entry. +// It checks for correct handling of both valid and invalid inputs, ensuring that the function +// correctly validates epoch service event consistency and presence. +// +// Valid Cases: +// +// 1. staking-root-protocol-state: +// - No previous epoch; current epoch is in staking phase. +// +// 2. staking-phase: +// - Previous and current epochs exist; no next epoch. +// +// 3. setup-phase: +// - Next epoch setup is present; next epoch commit is nil. +// +// 4. setup-after-spork: +// - First epoch after spork; no previous epoch; next epoch setup is present. +// +// 5. commit-phase: +// - Previous, current, and next epochs are fully populated. +// +// 6. commit-after-spork: +// - First epoch after spork; current and next epochs are committed. +// +// Invalid Cases: +// +// 7. invalid - epoch state is nil: +// - Verifies that constructor returns an error if EpochStateEntry is nil. +func TestNewRichProtocolStateEntry(t *testing.T) { + // 1. Conditions right after a spork: + // * no previous epoch exists from the perspective of the freshly-sporked protocol state + // * network is currently in the staking phase for the next epoch, hence no service events for the next epoch exist + t.Run("staking-root-protocol-state", func(t *testing.T) { + setup := unittest.EpochSetupFixture() + currentEpochCommit := unittest.EpochCommitFixture() + identities := make(flow.DynamicIdentityEntryList, 0, len(setup.Participants)) + for _, identity := range setup.Participants { + identities = append(identities, &flow.DynamicIdentityEntry{ + NodeID: identity.NodeID, + Ejected: false, + }) + } + minStateEntry := &flow.MinEpochStateEntry{ + PreviousEpoch: nil, + CurrentEpoch: flow.EpochStateContainer{ + SetupID: setup.ID(), + CommitID: currentEpochCommit.ID(), + ActiveIdentities: identities, + }, + EpochFallbackTriggered: false, + } + stateEntry, err := flow.NewEpochStateEntry( + flow.UntrustedEpochStateEntry{ + MinEpochStateEntry: minStateEntry, + PreviousEpochSetup: nil, + PreviousEpochCommit: nil, + CurrentEpochSetup: setup, + CurrentEpochCommit: currentEpochCommit, + NextEpochSetup: nil, + NextEpochCommit: nil, + }, + ) + assert.NoError(t, err) + assert.Equal(t, flow.EpochPhaseStaking, stateEntry.EpochPhase()) + + richStateEntry, err := flow.NewRichEpochStateEntry(stateEntry) + assert.NoError(t, err) + + expectedIdentities, err := flow.BuildIdentityTable( + setup.Participants, + identities, + nil, + nil, + flow.EpochParticipationStatusLeaving, + ) + assert.NoError(t, err) + assert.Equal(t, expectedIdentities, richStateEntry.CurrentEpochIdentityTable, "should be equal to current epoch setup participants") + }) + + // 2. Common situation during the staking phase for epoch N+1 + // * we are currently in Epoch N + // * previous epoch N-1 is known (specifically EpochSetup and EpochCommit events) + // * network is currently in the staking phase for the next epoch, hence no service events for the next epoch exist + t.Run("staking-phase", func(t *testing.T) { + stateEntryFixture := unittest.EpochStateFixture() + epochStateEntry, err := flow.NewEpochStateEntry( + flow.UntrustedEpochStateEntry{ + MinEpochStateEntry: stateEntryFixture.MinEpochStateEntry, + PreviousEpochSetup: stateEntryFixture.PreviousEpochSetup, + PreviousEpochCommit: stateEntryFixture.PreviousEpochCommit, + CurrentEpochSetup: stateEntryFixture.CurrentEpochSetup, + CurrentEpochCommit: stateEntryFixture.CurrentEpochCommit, + NextEpochSetup: nil, + NextEpochCommit: nil, + }, + ) + assert.NoError(t, err) + assert.Equal(t, flow.EpochPhaseStaking, epochStateEntry.EpochPhase()) + + epochRichStateEntry, err := flow.NewRichEpochStateEntry(epochStateEntry) + assert.NoError(t, err) + expectedIdentities, err := flow.BuildIdentityTable( + stateEntryFixture.CurrentEpochSetup.Participants, + stateEntryFixture.CurrentEpoch.ActiveIdentities, + stateEntryFixture.PreviousEpochSetup.Participants, + stateEntryFixture.PreviousEpoch.ActiveIdentities, + flow.EpochParticipationStatusLeaving, + ) + assert.NoError(t, err) + assert.Equal(t, expectedIdentities, epochRichStateEntry.CurrentEpochIdentityTable, "should be equal to current epoch setup participants + previous epoch setup participants") + assert.Nil(t, epochRichStateEntry.NextEpoch) + }) + + // 3. Common situation during the epoch setup phase for epoch N+1 + // * we are currently in Epoch N + // * previous epoch N-1 is known (specifically EpochSetup and EpochCommit events) + // * network is currently in the setup phase for the next epoch, i.e. EpochSetup event (starting setup phase) has already been observed + t.Run("setup-phase", func(t *testing.T) { + stateEntryFixture := unittest.EpochStateFixture(unittest.WithNextEpochProtocolState(), func(entry *flow.RichEpochStateEntry) { + entry.NextEpochCommit = nil + entry.NextEpoch.CommitID = flow.ZeroID + }) + + stateEntry, err := flow.NewEpochStateEntry( + flow.UntrustedEpochStateEntry{ + MinEpochStateEntry: stateEntryFixture.MinEpochStateEntry, + PreviousEpochSetup: stateEntryFixture.PreviousEpochSetup, + PreviousEpochCommit: stateEntryFixture.PreviousEpochCommit, + CurrentEpochSetup: stateEntryFixture.CurrentEpochSetup, + CurrentEpochCommit: stateEntryFixture.CurrentEpochCommit, + NextEpochSetup: stateEntryFixture.NextEpochSetup, + NextEpochCommit: nil, + }, + ) + assert.NoError(t, err) + assert.Equal(t, flow.EpochPhaseSetup, stateEntry.EpochPhase()) + + richStateEntry, err := flow.NewRichEpochStateEntry(stateEntry) + assert.NoError(t, err) + expectedIdentities, err := flow.BuildIdentityTable( + stateEntryFixture.CurrentEpochSetup.Participants, + stateEntryFixture.CurrentEpoch.ActiveIdentities, + stateEntryFixture.NextEpochSetup.Participants, + stateEntryFixture.NextEpoch.ActiveIdentities, + flow.EpochParticipationStatusJoining, + ) + assert.NoError(t, err) + assert.Equal(t, expectedIdentities, richStateEntry.CurrentEpochIdentityTable, "should be equal to current epoch setup participants + next epoch setup participants") + assert.Nil(t, richStateEntry.NextEpochCommit) + expectedIdentities, err = flow.BuildIdentityTable( + stateEntryFixture.NextEpochSetup.Participants, + stateEntryFixture.NextEpoch.ActiveIdentities, + stateEntryFixture.CurrentEpochSetup.Participants, + stateEntryFixture.CurrentEpoch.ActiveIdentities, + flow.EpochParticipationStatusLeaving, + ) + assert.NoError(t, err) + assert.Equal(t, expectedIdentities, richStateEntry.NextEpochIdentityTable, "should be equal to next epoch setup participants + current epoch setup participants") + }) + + // 4. Common situation during the epoch setup phase for first epoch after the spork + // * we are currently in Epoch N + // * there is no previous epoch as we are in the first epoch after the spork + // * network is currently in the setup phase for the next epoch, i.e. EpochSetup event (starting setup phase) has already been observed + t.Run("setup-after-spork", func(t *testing.T) { + stateEntryFixture := unittest.EpochStateFixture(unittest.WithNextEpochProtocolState(), func(entry *flow.RichEpochStateEntry) { + // no previous epoch since we are in the first epoch + entry.PreviousEpochSetup = nil + entry.PreviousEpochCommit = nil + entry.PreviousEpoch = nil + + // next epoch is setup but not committed + entry.NextEpochCommit = nil + entry.NextEpoch.CommitID = flow.ZeroID + }) + // sanity check that previous epoch is not populated in `stateEntry` + assert.Nil(t, stateEntryFixture.PreviousEpoch) + assert.Nil(t, stateEntryFixture.PreviousEpochSetup) + assert.Nil(t, stateEntryFixture.PreviousEpochCommit) + + stateEntry, err := flow.NewEpochStateEntry( + flow.UntrustedEpochStateEntry{ + MinEpochStateEntry: stateEntryFixture.MinEpochStateEntry, + PreviousEpochSetup: stateEntryFixture.PreviousEpochSetup, + PreviousEpochCommit: stateEntryFixture.PreviousEpochCommit, + CurrentEpochSetup: stateEntryFixture.CurrentEpochSetup, + CurrentEpochCommit: stateEntryFixture.CurrentEpochCommit, + NextEpochSetup: stateEntryFixture.NextEpochSetup, + NextEpochCommit: nil, + }, + ) + assert.NoError(t, err) + assert.Equal(t, flow.EpochPhaseSetup, stateEntry.EpochPhase()) + + richStateEntry, err := flow.NewRichEpochStateEntry(stateEntry) + assert.NoError(t, err) + expectedIdentities, err := flow.BuildIdentityTable( + stateEntry.CurrentEpochSetup.Participants, + stateEntry.CurrentEpoch.ActiveIdentities, + stateEntry.NextEpochSetup.Participants, + stateEntry.NextEpoch.ActiveIdentities, + flow.EpochParticipationStatusJoining, + ) + assert.NoError(t, err) + assert.Equal(t, expectedIdentities, richStateEntry.CurrentEpochIdentityTable, "should be equal to current epoch setup participants + next epoch setup participants") + assert.Nil(t, richStateEntry.NextEpochCommit) + expectedIdentities, err = flow.BuildIdentityTable( + stateEntry.NextEpochSetup.Participants, + stateEntry.NextEpoch.ActiveIdentities, + stateEntry.CurrentEpochSetup.Participants, + stateEntry.CurrentEpoch.ActiveIdentities, + flow.EpochParticipationStatusLeaving, + ) + assert.NoError(t, err) + assert.Equal(t, expectedIdentities, richStateEntry.NextEpochIdentityTable, "should be equal to next epoch setup participants + current epoch setup participants") + }) + + // 5. Common situation during the epoch commit phase for epoch N+1 + // * we are currently in Epoch N + // * previous epoch N-1 is known (specifically EpochSetup and EpochCommit events) + // * The network has completed the epoch commit phase, i.e. published the EpochSetup and EpochCommit events for epoch N+1. + t.Run("commit-phase", func(t *testing.T) { + stateEntryFixture := unittest.EpochStateFixture(unittest.WithNextEpochProtocolState()) + + stateEntry, err := flow.NewEpochStateEntry( + flow.UntrustedEpochStateEntry{ + MinEpochStateEntry: stateEntryFixture.MinEpochStateEntry, + PreviousEpochSetup: stateEntryFixture.PreviousEpochSetup, + PreviousEpochCommit: stateEntryFixture.PreviousEpochCommit, + CurrentEpochSetup: stateEntryFixture.CurrentEpochSetup, + CurrentEpochCommit: stateEntryFixture.CurrentEpochCommit, + NextEpochSetup: stateEntryFixture.NextEpochSetup, + NextEpochCommit: stateEntryFixture.NextEpochCommit, + }, + ) + assert.NoError(t, err) + assert.Equal(t, flow.EpochPhaseCommitted, stateEntry.EpochPhase()) + + richStateEntry, err := flow.NewRichEpochStateEntry(stateEntry) + assert.NoError(t, err) + expectedIdentities, err := flow.BuildIdentityTable( + stateEntry.CurrentEpochSetup.Participants, + stateEntry.CurrentEpoch.ActiveIdentities, + stateEntry.NextEpochSetup.Participants, + stateEntry.NextEpoch.ActiveIdentities, + flow.EpochParticipationStatusJoining, + ) + assert.NoError(t, err) + assert.Equal(t, expectedIdentities, richStateEntry.CurrentEpochIdentityTable, "should be equal to current epoch setup participants + next epoch setup participants") + expectedIdentities, err = flow.BuildIdentityTable( + stateEntry.NextEpochSetup.Participants, + stateEntry.NextEpoch.ActiveIdentities, + stateEntry.CurrentEpochSetup.Participants, + stateEntry.CurrentEpoch.ActiveIdentities, + flow.EpochParticipationStatusLeaving, + ) + assert.NoError(t, err) + assert.Equal(t, expectedIdentities, richStateEntry.NextEpochIdentityTable, "should be equal to next epoch setup participants + current epoch setup participants") + }) + + // 6. Common situation during the epoch commit phase for first epoch after the spork + // * we are currently in Epoch N + // * there is no previous epoch as we are in the first epoch after the spork + // * The network has completed the epoch commit phase, i.e. published the EpochSetup and EpochCommit events for epoch N+1. + t.Run("commit-after-spork", func(t *testing.T) { + stateEntryFixture := unittest.EpochStateFixture(unittest.WithNextEpochProtocolState(), func(entry *flow.RichEpochStateEntry) { + // no previous epoch since we are in the first epoch + entry.PreviousEpochSetup = nil + entry.PreviousEpochCommit = nil + entry.PreviousEpoch = nil + }) + // sanity check that previous epoch is not populated in `stateEntryFixture` + assert.Nil(t, stateEntryFixture.PreviousEpoch) + assert.Nil(t, stateEntryFixture.PreviousEpochSetup) + assert.Nil(t, stateEntryFixture.PreviousEpochCommit) + + stateEntry, err := flow.NewEpochStateEntry( + flow.UntrustedEpochStateEntry{ + MinEpochStateEntry: stateEntryFixture.MinEpochStateEntry, + PreviousEpochSetup: stateEntryFixture.PreviousEpochSetup, + PreviousEpochCommit: stateEntryFixture.PreviousEpochCommit, + CurrentEpochSetup: stateEntryFixture.CurrentEpochSetup, + CurrentEpochCommit: stateEntryFixture.CurrentEpochCommit, + NextEpochSetup: stateEntryFixture.NextEpochSetup, + NextEpochCommit: stateEntryFixture.NextEpochCommit, + }, + ) + assert.NoError(t, err) + assert.Equal(t, flow.EpochPhaseCommitted, stateEntry.EpochPhase()) + + richStateEntry, err := flow.NewRichEpochStateEntry(stateEntry) + assert.NoError(t, err) + expectedIdentities, err := flow.BuildIdentityTable( + stateEntryFixture.CurrentEpochSetup.Participants, + stateEntryFixture.CurrentEpoch.ActiveIdentities, + stateEntryFixture.NextEpochSetup.Participants, + stateEntryFixture.NextEpoch.ActiveIdentities, + flow.EpochParticipationStatusJoining, + ) + assert.NoError(t, err) + assert.Equal(t, expectedIdentities, richStateEntry.CurrentEpochIdentityTable, "should be equal to current epoch setup participants + next epoch setup participants") + expectedIdentities, err = flow.BuildIdentityTable( + stateEntryFixture.NextEpochSetup.Participants, + stateEntryFixture.NextEpoch.ActiveIdentities, + stateEntryFixture.CurrentEpochSetup.Participants, + stateEntryFixture.CurrentEpoch.ActiveIdentities, + flow.EpochParticipationStatusLeaving, + ) + assert.NoError(t, err) + assert.Equal(t, expectedIdentities, richStateEntry.NextEpochIdentityTable, "should be equal to next epoch setup participants + current epoch setup participants") + }) + + // 7. Invalid: epochState is nil + t.Run("invalid - epoch state is nil", func(t *testing.T) { + _, err := flow.NewRichEpochStateEntry(nil) + require.Error(t, err) + require.Contains(t, err.Error(), "epoch state must not be nil") + }) +} + +// TestNewEpochStateEntry tests the NewEpochStateEntry constructor with various valid and invalid inputs. +// +// Valid Cases: +// +// 1. Valid input with all fields populated: +// - Should successfully create an EpochStateEntry without error. +// +// 2. Valid input without NextEpochProtocolState: +// - Should successfully create an EpochStateEntry even if next epoch protocol state is not set. +// +// Invalid Cases: +// +// 3. PreviousEpoch.SetupID mismatch with PreviousEpochSetup.ID: +// - Should return an error for mismatched setup commitment. +// +// 4. PreviousEpoch.CommitID mismatch with PreviousEpochCommit.ID: +// - Should return an error for mismatched commit commitment. +// +// 5. PreviousEpoch is nil but PreviousEpochSetup is non-nil: +// - Should return an error for unexpected previous epoch's setup event. +// +// 6. PreviousEpoch is nil but PreviousEpochCommit is non-nil: +// - Should return an error for unexpected previous epoch's commit event. +// +// 7. CurrentEpoch.SetupID mismatch with CurrentEpochSetup.ID: +// - Should return an error for mismatched current epoch's setup event. +// +// 8. CurrentEpoch.CommitID mismatch with CurrentEpochCommit.ID: +// - Should return an error for mismatched current epoch's commit event. +// +// 9. NextEpoch is nil but NextEpochSetup is non-nil: +// - Should return an error for unexpected next epoch's setup event. +// +// 10. NextEpoch is nil but NextEpochCommit is non-nil: +// - Should return an error for unexpected next epoch's commit event. +// +// 11. NextEpoch.SetupID is non-zero but mismatches NextEpochSetup.ID: +// - Should return an error for next epoch's mismatched setup event. +// +// 12. NextEpoch.CommitID is non-zero but mismatches NextEpochCommit.ID: +// - Should return an error for mismatched next epoch's commit event. +// +// 13. NextEpoch.CommitID is zero but NextEpochCommit is non-nil: +// - Should return an error for unexpected commit event. +func TestNewEpochStateEntry(t *testing.T) { + // 1. Valid input with all fields + t.Run("valid input with all fields", func(t *testing.T) { + stateEntryFixture := unittest.EpochStateFixture(unittest.WithNextEpochProtocolState()) + entry, err := flow.NewEpochStateEntry( + flow.UntrustedEpochStateEntry{ + MinEpochStateEntry: stateEntryFixture.MinEpochStateEntry, + PreviousEpochSetup: stateEntryFixture.PreviousEpochSetup, + PreviousEpochCommit: stateEntryFixture.PreviousEpochCommit, + CurrentEpochSetup: stateEntryFixture.CurrentEpochSetup, + CurrentEpochCommit: stateEntryFixture.CurrentEpochCommit, + NextEpochSetup: stateEntryFixture.NextEpochSetup, + NextEpochCommit: stateEntryFixture.NextEpochCommit, + }, + ) + require.NoError(t, err) + require.NotNil(t, entry) + }) + + // 2. Valid input without NextEpochProtocolState + t.Run("valid input without NextEpochProtocolState", func(t *testing.T) { + stateEntryFixture := unittest.EpochStateFixture() + entry, err := flow.NewEpochStateEntry( + flow.UntrustedEpochStateEntry{ + MinEpochStateEntry: stateEntryFixture.MinEpochStateEntry, + PreviousEpochSetup: stateEntryFixture.PreviousEpochSetup, + PreviousEpochCommit: stateEntryFixture.PreviousEpochCommit, + CurrentEpochSetup: stateEntryFixture.CurrentEpochSetup, + CurrentEpochCommit: stateEntryFixture.CurrentEpochCommit, + NextEpochSetup: stateEntryFixture.NextEpochSetup, + NextEpochCommit: stateEntryFixture.NextEpochCommit, + }, + ) + require.NoError(t, err) + require.NotNil(t, entry) + }) + + // 3. Invalid: PreviousEpoch is set, but PreviousEpochSetup is nil + t.Run("invalid - previous epoch set but no setup event", func(t *testing.T) { + stateEntryFixture := unittest.EpochStateFixture(unittest.WithNextEpochProtocolState(), func(entry *flow.RichEpochStateEntry) { + entry.PreviousEpochSetup = nil + }) + _, err := flow.NewEpochStateEntry( + flow.UntrustedEpochStateEntry{ + MinEpochStateEntry: stateEntryFixture.MinEpochStateEntry, + PreviousEpochSetup: stateEntryFixture.PreviousEpochSetup, + PreviousEpochCommit: stateEntryFixture.PreviousEpochCommit, + CurrentEpochSetup: stateEntryFixture.CurrentEpochSetup, + CurrentEpochCommit: stateEntryFixture.CurrentEpochCommit, + NextEpochSetup: stateEntryFixture.NextEpochSetup, + NextEpochCommit: stateEntryFixture.NextEpochCommit, + }, + ) + require.Error(t, err) + expectedMsg := fmt.Sprintf( + "supplied previous epoch's setup event (%x) does not match commitment (%x) in MinEpochStateEntry", + stateEntryFixture.PreviousEpochSetup.ID(), + stateEntryFixture.MinEpochStateEntry.PreviousEpoch.SetupID, + ) + require.Contains(t, err.Error(), expectedMsg) + }) + + // 4. Invalid: PreviousEpoch.CommitID doesn't match PreviousEpochCommit.ID() + t.Run("invalid - previous commit ID mismatch", func(t *testing.T) { + stateEntryFixture := unittest.EpochStateFixture(unittest.WithNextEpochProtocolState(), func(entry *flow.RichEpochStateEntry) { + entry.PreviousEpoch.CommitID = flow.ZeroID // incorrect + }) + _, err := flow.NewEpochStateEntry( + flow.UntrustedEpochStateEntry{ + MinEpochStateEntry: stateEntryFixture.MinEpochStateEntry, + PreviousEpochSetup: stateEntryFixture.PreviousEpochSetup, + PreviousEpochCommit: stateEntryFixture.PreviousEpochCommit, + CurrentEpochSetup: stateEntryFixture.CurrentEpochSetup, + CurrentEpochCommit: stateEntryFixture.CurrentEpochCommit, + NextEpochSetup: stateEntryFixture.NextEpochSetup, + NextEpochCommit: stateEntryFixture.NextEpochCommit, + }, + ) + require.Error(t, err) + expectedMsg := fmt.Sprintf( + "supplied previous epoch's commit event (%x) does not match commitment (%x) in MinEpochStateEntry", + stateEntryFixture.PreviousEpochCommit.ID(), + stateEntryFixture.PreviousEpoch.CommitID, + ) + require.Contains(t, err.Error(), expectedMsg) + }) + + // 5. Invalid: PreviousEpoch is nil, but PreviousEpochSetup is non-nil + t.Run("invalid - nil previous epoch but has setup event", func(t *testing.T) { + stateEntryFixture := unittest.EpochStateFixture(unittest.WithNextEpochProtocolState(), func(entry *flow.RichEpochStateEntry) { + entry.PreviousEpoch = nil + }) + _, err := flow.NewEpochStateEntry( + flow.UntrustedEpochStateEntry{ + MinEpochStateEntry: stateEntryFixture.MinEpochStateEntry, + PreviousEpochSetup: stateEntryFixture.PreviousEpochSetup, + PreviousEpochCommit: stateEntryFixture.PreviousEpochCommit, + CurrentEpochSetup: stateEntryFixture.CurrentEpochSetup, + CurrentEpochCommit: stateEntryFixture.CurrentEpochCommit, + NextEpochSetup: stateEntryFixture.NextEpochSetup, + NextEpochCommit: stateEntryFixture.NextEpochCommit, + }, + ) + require.Error(t, err) + require.Contains(t, err.Error(), "no previous epoch but gotten non-nil EpochSetup event") + }) + + // 6. Invalid: PreviousEpoch is nil, but PreviousEpochCommit is non-nil + t.Run("invalid - nil previous epoch but has commit event", func(t *testing.T) { + stateEntryFixture := unittest.EpochStateFixture(unittest.WithNextEpochProtocolState(), func(entry *flow.RichEpochStateEntry) { + entry.PreviousEpoch = nil + entry.PreviousEpochSetup = nil + }) + _, err := flow.NewEpochStateEntry( + flow.UntrustedEpochStateEntry{ + MinEpochStateEntry: stateEntryFixture.MinEpochStateEntry, + PreviousEpochSetup: stateEntryFixture.PreviousEpochSetup, + PreviousEpochCommit: stateEntryFixture.PreviousEpochCommit, + CurrentEpochSetup: stateEntryFixture.CurrentEpochSetup, + CurrentEpochCommit: stateEntryFixture.CurrentEpochCommit, + NextEpochSetup: stateEntryFixture.NextEpochSetup, + NextEpochCommit: stateEntryFixture.NextEpochCommit, + }, + ) + require.Error(t, err) + require.Contains(t, err.Error(), "no previous epoch but gotten non-nil EpochCommit event") + }) + + // 7. Invalid: CurrentEpoch.SetupID doesn't match CurrentEpochSetup.ID() + t.Run("invalid - current setup ID mismatch", func(t *testing.T) { + stateEntryFixture := unittest.EpochStateFixture(unittest.WithNextEpochProtocolState(), func(entry *flow.RichEpochStateEntry) { + entry.CurrentEpoch.SetupID = unittest.IdentifierFixture() // incorrect + }) + _, err := flow.NewEpochStateEntry( + flow.UntrustedEpochStateEntry{ + MinEpochStateEntry: stateEntryFixture.MinEpochStateEntry, + PreviousEpochSetup: stateEntryFixture.PreviousEpochSetup, + PreviousEpochCommit: stateEntryFixture.PreviousEpochCommit, + CurrentEpochSetup: stateEntryFixture.CurrentEpochSetup, + CurrentEpochCommit: stateEntryFixture.CurrentEpochCommit, + NextEpochSetup: stateEntryFixture.NextEpochSetup, + NextEpochCommit: stateEntryFixture.NextEpochCommit, + }, + ) + require.Error(t, err) + expectedMsg := fmt.Sprintf( + "supplied current epoch's setup event (%x) does not match commitment (%x) in MinEpochStateEntry", + stateEntryFixture.CurrentEpochSetup.ID(), + stateEntryFixture.CurrentEpoch.SetupID, + ) + require.Contains(t, err.Error(), expectedMsg) + }) + + // 8. Invalid: CurrentEpoch.CommitID doesn't match CurrentEpochCommit.ID() + t.Run("invalid - current commit ID mismatch", func(t *testing.T) { + stateEntryFixture := unittest.EpochStateFixture(unittest.WithNextEpochProtocolState(), func(entry *flow.RichEpochStateEntry) { + entry.CurrentEpoch.CommitID = unittest.IdentifierFixture() // incorrect + }) + _, err := flow.NewEpochStateEntry( + flow.UntrustedEpochStateEntry{ + MinEpochStateEntry: stateEntryFixture.MinEpochStateEntry, + PreviousEpochSetup: stateEntryFixture.PreviousEpochSetup, + PreviousEpochCommit: stateEntryFixture.PreviousEpochCommit, + CurrentEpochSetup: stateEntryFixture.CurrentEpochSetup, + CurrentEpochCommit: stateEntryFixture.CurrentEpochCommit, + NextEpochSetup: stateEntryFixture.NextEpochSetup, + NextEpochCommit: stateEntryFixture.NextEpochCommit, + }, + ) + require.Error(t, err) + expectedMsg := fmt.Sprintf( + "supplied current epoch's commit event (%x) does not match commitment (%x) in MinEpochStateEntry", + stateEntryFixture.CurrentEpochCommit.ID(), + stateEntryFixture.CurrentEpoch.CommitID, + ) + require.Contains(t, err.Error(), expectedMsg) + }) + + // 9. Invalid: NextEpoch is nil, but NextEpochSetup is non-nil + t.Run("invalid - nil next epoch but has setup event", func(t *testing.T) { + stateEntryFixture := unittest.EpochStateFixture(unittest.WithNextEpochProtocolState(), func(entry *flow.RichEpochStateEntry) { + entry.NextEpoch = nil + }) + _, err := flow.NewEpochStateEntry( + flow.UntrustedEpochStateEntry{ + MinEpochStateEntry: stateEntryFixture.MinEpochStateEntry, + PreviousEpochSetup: stateEntryFixture.PreviousEpochSetup, + PreviousEpochCommit: stateEntryFixture.PreviousEpochCommit, + CurrentEpochSetup: stateEntryFixture.CurrentEpochSetup, + CurrentEpochCommit: stateEntryFixture.CurrentEpochCommit, + NextEpochSetup: stateEntryFixture.NextEpochSetup, + NextEpochCommit: stateEntryFixture.NextEpochCommit, + }, + ) + require.Error(t, err) + require.Contains(t, err.Error(), "no next epoch but gotten non-nil EpochSetup event") + }) + + // 10. Invalid: NextEpoch is nil, but NextEpochCommit is non-nil + t.Run("invalid - nil next epoch but has commit event", func(t *testing.T) { + stateEntryFixture := unittest.EpochStateFixture(unittest.WithNextEpochProtocolState(), func(entry *flow.RichEpochStateEntry) { + entry.NextEpoch = nil + entry.NextEpochSetup = nil + }) + _, err := flow.NewEpochStateEntry( + flow.UntrustedEpochStateEntry{ + MinEpochStateEntry: stateEntryFixture.MinEpochStateEntry, + PreviousEpochSetup: stateEntryFixture.PreviousEpochSetup, + PreviousEpochCommit: stateEntryFixture.PreviousEpochCommit, + CurrentEpochSetup: stateEntryFixture.CurrentEpochSetup, + CurrentEpochCommit: stateEntryFixture.CurrentEpochCommit, + NextEpochSetup: stateEntryFixture.NextEpochSetup, + NextEpochCommit: stateEntryFixture.NextEpochCommit, + }, + ) + require.Error(t, err) + require.Contains(t, err.Error(), "no next epoch but gotten non-nil EpochCommit") + }) + + // 11. Invalid: NextEpoch.SetupID ≠ NextEpochSetup.ID + t.Run("invalid - next commit ID mismatch", func(t *testing.T) { + stateEntryFixture := unittest.EpochStateFixture(unittest.WithNextEpochProtocolState(), func(entry *flow.RichEpochStateEntry) { + entry.NextEpoch.SetupID = unittest.IdentifierFixture() // incorrect + }) + _, err := flow.NewEpochStateEntry( + flow.UntrustedEpochStateEntry{ + MinEpochStateEntry: stateEntryFixture.MinEpochStateEntry, + PreviousEpochSetup: stateEntryFixture.PreviousEpochSetup, + PreviousEpochCommit: stateEntryFixture.PreviousEpochCommit, + CurrentEpochSetup: stateEntryFixture.CurrentEpochSetup, + CurrentEpochCommit: stateEntryFixture.CurrentEpochCommit, + NextEpochSetup: stateEntryFixture.NextEpochSetup, + NextEpochCommit: stateEntryFixture.NextEpochCommit, + }, + ) + require.Error(t, err) + expectedMsg := fmt.Sprintf( + "supplied next epoch's setup event (%x) does not match commitment (%x) in MinEpochStateEntry", + stateEntryFixture.NextEpoch.SetupID, + stateEntryFixture.NextEpochSetup.ID(), + ) + require.Contains(t, err.Error(), expectedMsg) + }) + + // 12. Invalid: NextEpoch.CommitID ≠ ZeroID, but NextEpochCommit.ID doesn't match + t.Run("invalid - next commit ID mismatch", func(t *testing.T) { + stateEntryFixture := unittest.EpochStateFixture(unittest.WithNextEpochProtocolState(), func(entry *flow.RichEpochStateEntry) { + entry.NextEpoch.CommitID = unittest.IdentifierFixture() // incorrect + }) + _, err := flow.NewEpochStateEntry( + flow.UntrustedEpochStateEntry{ + MinEpochStateEntry: stateEntryFixture.MinEpochStateEntry, + PreviousEpochSetup: stateEntryFixture.PreviousEpochSetup, + PreviousEpochCommit: stateEntryFixture.PreviousEpochCommit, + CurrentEpochSetup: stateEntryFixture.CurrentEpochSetup, + CurrentEpochCommit: stateEntryFixture.CurrentEpochCommit, + NextEpochSetup: stateEntryFixture.NextEpochSetup, + NextEpochCommit: stateEntryFixture.NextEpochCommit, + }, + ) + require.Error(t, err) + expectedMsg := fmt.Sprintf( + "supplied next epoch's commit event (%x) does not match commitment (%x) in MinEpochStateEntry", + stateEntryFixture.NextEpoch.CommitID, + stateEntryFixture.NextEpochCommit.ID(), + ) + require.Contains(t, err.Error(), expectedMsg) + }) + + // 13. Invalid: NextEpoch.CommitID == ZeroID, but NextEpochCommit is non-nil + t.Run("invalid - uncommitted next epoch but has commit event", func(t *testing.T) { + stateEntryFixture := unittest.EpochStateFixture(unittest.WithNextEpochProtocolState(), func(entry *flow.RichEpochStateEntry) { + entry.NextEpoch.CommitID = flow.ZeroID + }) + _, err := flow.NewEpochStateEntry( + flow.UntrustedEpochStateEntry{ + MinEpochStateEntry: stateEntryFixture.MinEpochStateEntry, + PreviousEpochSetup: stateEntryFixture.PreviousEpochSetup, + PreviousEpochCommit: stateEntryFixture.PreviousEpochCommit, + CurrentEpochSetup: stateEntryFixture.CurrentEpochSetup, + CurrentEpochCommit: stateEntryFixture.CurrentEpochCommit, + NextEpochSetup: stateEntryFixture.NextEpochSetup, + NextEpochCommit: stateEntryFixture.NextEpochCommit, + }, + ) + require.Error(t, err) + expectedMsg := "next epoch not yet committed but got EpochCommit event" + require.Contains(t, err.Error(), expectedMsg) + }) +} + +// TestProtocolStateEntry_Copy tests if the copy method returns a deep copy of the entry. +// All changes to copy shouldn't affect the original entry -- except for key changes. +func TestProtocolStateEntry_Copy(t *testing.T) { + entry := unittest.EpochStateFixture(unittest.WithNextEpochProtocolState()).MinEpochStateEntry + cpy := entry.Copy() + assert.Equal(t, entry, cpy) + assert.NotSame(t, entry.NextEpoch, cpy.NextEpoch) + assert.NotSame(t, entry.PreviousEpoch, cpy.PreviousEpoch) + + cpy.EpochFallbackTriggered = !entry.EpochFallbackTriggered + assert.NotEqual(t, entry, cpy) + + assertEpochContainer := func(entry, cpy *flow.EpochStateContainer) { + assert.Equal(t, entry.ActiveIdentities[0], cpy.ActiveIdentities[0]) + cpy.ActiveIdentities[0].Ejected = true + assert.NotEqual(t, entry.ActiveIdentities[0], cpy.ActiveIdentities[0]) + + cpy.ActiveIdentities = append(cpy.ActiveIdentities, &flow.DynamicIdentityEntry{ + NodeID: unittest.IdentifierFixture(), + Ejected: false, + }) + assert.NotEqual(t, entry.ActiveIdentities, cpy.ActiveIdentities) + + cpy.EpochExtensions = append(cpy.EpochExtensions, flow.EpochExtension{ + FirstView: 13, + }) + assert.NotEqual(t, entry.EpochExtensions, cpy.EpochExtensions) + } + assertEpochContainer(entry.PreviousEpoch, cpy.PreviousEpoch) + assertEpochContainer(&entry.CurrentEpoch, &cpy.CurrentEpoch) + assertEpochContainer(entry.NextEpoch, cpy.NextEpoch) +} + +// TestEpochStateEntry_EpochCounter tests if the epoch counter is correctly computed for the entry. +// The epoch counter should be equal to the counter of the current epoch setup and commit regardless of the previous or next epoch. +func TestEpochStateEntry_EpochCounter(t *testing.T) { + t.Run("with-previous-epoch", func(t *testing.T) { + entry := unittest.EpochStateFixture() + assert.Equal(t, entry.EpochCounter(), entry.CurrentEpochSetup.Counter) + assert.Equal(t, entry.EpochCounter(), entry.CurrentEpochCommit.Counter) + }) + t.Run("root-epoch", func(t *testing.T) { + entry := unittest.EpochStateFixture(func(entry *flow.RichEpochStateEntry) { + entry.PreviousEpoch = nil + entry.PreviousEpochSetup = nil + entry.PreviousEpochCommit = nil + }) + assert.Equal(t, entry.EpochCounter(), entry.CurrentEpochSetup.Counter) + assert.Equal(t, entry.EpochCounter(), entry.CurrentEpochCommit.Counter) + }) + t.Run("with-next-epoch", func(t *testing.T) { + entry := unittest.EpochStateFixture(unittest.WithNextEpochProtocolState()) + assert.Equal(t, entry.EpochCounter(), entry.CurrentEpochSetup.Counter) + assert.Equal(t, entry.EpochCounter(), entry.CurrentEpochCommit.Counter) + }) +} + +// TestEpochStateEntry_CurrentEpochFinalView tests if the final view of the current epoch is correctly computed, +// it has to be equal: +// - to the final view of the current epoch setup if there are no extensions +// - to the final view of the last extension if there are multiple extensions +func TestEpochStateEntry_CurrentEpochFinalView(t *testing.T) { + t.Run("no-extension", func(t *testing.T) { + entry := unittest.EpochStateFixture() + assert.Equal(t, entry.CurrentEpochSetup.FinalView, entry.CurrentEpochFinalView()) + }) + t.Run("multiple-extension", func(t *testing.T) { + entry := unittest.EpochStateFixture() + extraViews := uint64(1000) + entry.CurrentEpoch.EpochExtensions = []flow.EpochExtension{ + { + FirstView: entry.CurrentEpochSetup.FinalView + 1, + FinalView: entry.CurrentEpochSetup.FinalView + extraViews, + }, + } + assert.Equal(t, entry.CurrentEpochSetup.FinalView+extraViews, entry.CurrentEpochFinalView()) + entry.CurrentEpoch.EpochExtensions = append(entry.CurrentEpoch.EpochExtensions, flow.EpochExtension{ + FirstView: entry.CurrentEpoch.EpochExtensions[0].FinalView + 1, + FinalView: entry.CurrentEpoch.EpochExtensions[0].FinalView + extraViews, + }) + assert.Equal(t, entry.CurrentEpochSetup.FinalView+2*extraViews, entry.CurrentEpochFinalView()) + }) +} + +// TestBuildIdentityTable tests if BuildIdentityTable returns a correct identity, whenever we pass arguments with or without +// overlap. It also tests if the function returns an error when the arguments are not ordered in the same order. +func TestBuildIdentityTable(t *testing.T) { + t.Run("invalid-adjacent-identity-status", func(t *testing.T) { + targetEpochIdentities := unittest.IdentityListFixture(10).Sort(flow.Canonical[flow.Identity]) + adjacentEpochIdentities := unittest.IdentityListFixture(10).Sort(flow.Canonical[flow.Identity]) + + // Per convention, BuildIdentityTable only accepts EpochParticipationStatusLeaving or EpochParticipationStatusJoining + // for the *adjacent* epoch, because these are the only sensible values. + for _, status := range []flow.EpochParticipationStatus{flow.EpochParticipationStatusActive, flow.EpochParticipationStatusEjected} { + identityList, err := flow.BuildIdentityTable( + targetEpochIdentities.ToSkeleton(), + flow.DynamicIdentityEntryListFromIdentities(targetEpochIdentities), + adjacentEpochIdentities.ToSkeleton(), + flow.DynamicIdentityEntryListFromIdentities(adjacentEpochIdentities), + status, + ) + assert.Error(t, err) + assert.Empty(t, identityList) + } + }) + t.Run("happy-path-no-identities-overlap", func(t *testing.T) { + targetEpochIdentities := unittest.IdentityListFixture(10).Sort(flow.Canonical[flow.Identity]) + adjacentEpochIdentities := unittest.IdentityListFixture(10).Sort(flow.Canonical[flow.Identity]) + + identityList, err := flow.BuildIdentityTable( + targetEpochIdentities.ToSkeleton(), + flow.DynamicIdentityEntryListFromIdentities(targetEpochIdentities), + adjacentEpochIdentities.ToSkeleton(), + flow.DynamicIdentityEntryListFromIdentities(adjacentEpochIdentities), + flow.EpochParticipationStatusLeaving, + ) + assert.NoError(t, err) + + expectedIdentities := targetEpochIdentities.Union(adjacentEpochIdentities.Map(func(identity flow.Identity) flow.Identity { + identity.EpochParticipationStatus = flow.EpochParticipationStatusLeaving + return identity + })) + assert.Equal(t, expectedIdentities, identityList) + }) + t.Run("happy-path-identities-overlap", func(t *testing.T) { + targetEpochIdentities := unittest.IdentityListFixture(10).Sort(flow.Canonical[flow.Identity]) + adjacentEpochIdentities := unittest.IdentityListFixture(10) + sampledIdentities, err := targetEpochIdentities.Sample(2) + // change address so we can assert that we take identities from target epoch and not adjacent epoch + for i, identity := range sampledIdentities.Copy() { + identity.Address = fmt.Sprintf("%d", i) + adjacentEpochIdentities = append(adjacentEpochIdentities, identity) + } + assert.NoError(t, err) + adjacentEpochIdentities = adjacentEpochIdentities.Sort(flow.Canonical[flow.Identity]) + + identityList, err := flow.BuildIdentityTable( + targetEpochIdentities.ToSkeleton(), + flow.DynamicIdentityEntryListFromIdentities(targetEpochIdentities), + adjacentEpochIdentities.ToSkeleton(), + flow.DynamicIdentityEntryListFromIdentities(adjacentEpochIdentities), + flow.EpochParticipationStatusJoining, + ) + assert.NoError(t, err) + + expectedIdentities := targetEpochIdentities.Union(adjacentEpochIdentities.Map(func(identity flow.Identity) flow.Identity { + identity.EpochParticipationStatus = flow.EpochParticipationStatusJoining + return identity + })) + assert.Equal(t, expectedIdentities, identityList) + }) + t.Run("target-epoch-identities-not-ordered", func(t *testing.T) { + targetEpochIdentities := unittest.IdentityListFixture(10).Sort(flow.Canonical[flow.Identity]) + targetEpochIdentitySkeletons, err := targetEpochIdentities.ToSkeleton().Shuffle() + assert.NoError(t, err) + targetEpochDynamicIdentities := flow.DynamicIdentityEntryListFromIdentities(targetEpochIdentities) + + adjacentEpochIdentities := unittest.IdentityListFixture(10).Sort(flow.Canonical[flow.Identity]) + identityList, err := flow.BuildIdentityTable( + targetEpochIdentitySkeletons, + targetEpochDynamicIdentities, + adjacentEpochIdentities.ToSkeleton(), + flow.DynamicIdentityEntryListFromIdentities(adjacentEpochIdentities), + flow.EpochParticipationStatusLeaving, + ) + assert.Error(t, err) + assert.Empty(t, identityList) + }) + t.Run("adjacent-epoch-identities-not-ordered", func(t *testing.T) { + adjacentEpochIdentities := unittest.IdentityListFixture(10).Sort(flow.Canonical[flow.Identity]) + adjacentEpochIdentitySkeletons, err := adjacentEpochIdentities.ToSkeleton().Shuffle() + assert.NoError(t, err) + adjacentEpochDynamicIdentities := flow.DynamicIdentityEntryListFromIdentities(adjacentEpochIdentities) + + targetEpochIdentities := unittest.IdentityListFixture(10).Sort(flow.Canonical[flow.Identity]) + identityList, err := flow.BuildIdentityTable( + targetEpochIdentities.ToSkeleton(), + flow.DynamicIdentityEntryListFromIdentities(targetEpochIdentities), + adjacentEpochIdentitySkeletons, + adjacentEpochDynamicIdentities, + flow.EpochParticipationStatusLeaving, + ) + assert.Error(t, err) + assert.Empty(t, identityList) + }) +} + +// TestNewEpochStateContainer tests the NewEpochStateContainer constructor with valid and invalid inputs. +// +// Valid Cases: +// +// 1. Valid input with all fields: +// - Should successfully construct an EpochStateContainer. +// +// 2. Valid input with zero CommitID and nil EpochExtensions: +// - Should successfully construct an EpochStateContainer. +// +// Invalid Cases: +// +// 3. Invalid input with zero SetupID: +// - Should return an error indicating SetupID must not be zero. +// +// 4. Invalid input with nil ActiveIdentities: +// - Should return an error indicating ActiveIdentities must not be nil. +// +// 5. Invalid input with unsorted ActiveIdentities: +// - Should return an error indicating ActiveIdentities are not sorted. +func TestNewEpochStateContainer(t *testing.T) { + identities := unittest.DynamicIdentityEntryListFixture(3) + sortedIdentities := identities.Sort(flow.IdentifierCanonical) + + // Copy and shuffle to ensure it's unsorted + unsortedIdentities := sortedIdentities.Copy() + unsortedIdentities[0], unsortedIdentities[1] = unsortedIdentities[1], unsortedIdentities[0] + + // 1. Valid input with all fields + t.Run("valid input with all fields", func(t *testing.T) { + container, err := flow.NewEpochStateContainer( + flow.UntrustedEpochStateContainer{ + SetupID: unittest.IdentifierFixture(), + CommitID: unittest.IdentifierFixture(), + ActiveIdentities: sortedIdentities, + EpochExtensions: []flow.EpochExtension{ + {FirstView: 100, FinalView: 200}, + }, + }, + ) + + require.NoError(t, err) + require.NotNil(t, container) + }) + + // 2. Valid input with zero CommitID and nil EpochExtensions + t.Run("valid input with zero CommitID and nil EpochExtensions", func(t *testing.T) { + container, err := flow.NewEpochStateContainer( + flow.UntrustedEpochStateContainer{ + SetupID: unittest.IdentifierFixture(), + CommitID: flow.ZeroID, + ActiveIdentities: sortedIdentities, + EpochExtensions: nil, + }, + ) + + require.NoError(t, err) + require.NotNil(t, container) + }) + + // 3. Invalid input with zero SetupID + t.Run("invalid - zero SetupID", func(t *testing.T) { + _, err := flow.NewEpochStateContainer( + flow.UntrustedEpochStateContainer{ + SetupID: flow.ZeroID, + ActiveIdentities: sortedIdentities, + }, + ) + require.Error(t, err) + require.Contains(t, err.Error(), "SetupID must not be zero") + }) + + // 4. Invalid input with nil ActiveIdentities + t.Run("invalid - nil ActiveIdentities", func(t *testing.T) { + _, err := flow.NewEpochStateContainer( + flow.UntrustedEpochStateContainer{ + SetupID: unittest.IdentifierFixture(), + }, + ) + require.Error(t, err) + require.Contains(t, err.Error(), "ActiveIdentities must not be nil") + }) + + // 5. Invalid input with unsorted ActiveIdentities + t.Run("invalid - unsorted ActiveIdentities", func(t *testing.T) { + _, err := flow.NewEpochStateContainer( + flow.UntrustedEpochStateContainer{ + SetupID: unittest.IdentifierFixture(), + ActiveIdentities: unsortedIdentities, + }, + ) + require.Error(t, err) + require.Contains(t, err.Error(), "ActiveIdentities are not sorted") + }) +} + +// TestNewMinEpochStateEntry validates the behavior of the NewMinEpochStateEntry constructor function. +// It checks for correct handling of both valid and invalid inputs. +// +// Test Cases: +// +// 1. Valid input with all fields: +// - Ensures that providing a valid current epoch and optional previous/next epochs creates a MinEpochStateEntry. +// +// 2. Valid input with nil PreviousEpoch and NextEpoch: +// - Ensures that entry construction still succeeds with only CurrentEpoch. +// +// 3. Invalid input: empty CurrentEpoch: +// - Verifies that constructor returns an error if CurrentEpoch is not populated. +func TestNewMinEpochStateEntry(t *testing.T) { + identities := unittest.DynamicIdentityEntryListFixture(3) + + currentEpoch := flow.EpochStateContainer{ + SetupID: unittest.IdentifierFixture(), + CommitID: unittest.IdentifierFixture(), + ActiveIdentities: identities, + } + + previousEpoch := &flow.EpochStateContainer{ + SetupID: unittest.IdentifierFixture(), + CommitID: unittest.IdentifierFixture(), + ActiveIdentities: identities, + } + + nextEpoch := &flow.EpochStateContainer{ + SetupID: unittest.IdentifierFixture(), + CommitID: unittest.IdentifierFixture(), + ActiveIdentities: identities, + } + + // 1. Valid input with all fields + t.Run("valid input with all fields", func(t *testing.T) { + untrusted := flow.UntrustedMinEpochStateEntry{ + PreviousEpoch: previousEpoch, + CurrentEpoch: currentEpoch, + NextEpoch: nextEpoch, + EpochFallbackTriggered: true, + } + + entry, err := flow.NewMinEpochStateEntry(untrusted) + require.NoError(t, err) + require.NotNil(t, entry) + }) + + // 2. Valid input with nil PreviousEpoch and NextEpoch + t.Run("valid input with nil PreviousEpoch and NextEpoch", func(t *testing.T) { + untrusted := flow.UntrustedMinEpochStateEntry{ + PreviousEpoch: nil, + CurrentEpoch: currentEpoch, + NextEpoch: nil, + EpochFallbackTriggered: false, + } + + entry, err := flow.NewMinEpochStateEntry(untrusted) + require.NoError(t, err) + require.NotNil(t, entry) + }) + + // 3. Invalid input: empty CurrentEpoch + t.Run("empty CurrentEpoch", func(t *testing.T) { + untrusted := flow.UntrustedMinEpochStateEntry{ + PreviousEpoch: nil, + CurrentEpoch: flow.EpochStateContainer{}, // Empty + NextEpoch: nil, + EpochFallbackTriggered: false, + } + + entry, err := flow.NewMinEpochStateEntry(untrusted) + require.Error(t, err) + require.Nil(t, entry) + require.Contains(t, err.Error(), "current epoch must not be empty") + }) +} + +// TestEpochStateContainer_EqualTo verifies the correctness of the EqualTo method on EpochStateContainer. +// It checks that containers are considered equal if and only if all fields match. +func TestEpochStateContainer_EqualTo(t *testing.T) { + // Create two containers with different values + identities1 := unittest.DynamicIdentityEntryListFixture(3) + identities2 := unittest.DynamicIdentityEntryListFixture(3) + + c1 := &flow.EpochStateContainer{ + SetupID: unittest.IdentifierFixture(), + CommitID: unittest.IdentifierFixture(), + ActiveIdentities: identities1, + EpochExtensions: []flow.EpochExtension{ + { + FirstView: 201, + FinalView: 300, + }, + }, + } + + c2 := &flow.EpochStateContainer{ + SetupID: unittest.IdentifierFixture(), + CommitID: unittest.IdentifierFixture(), + ActiveIdentities: identities2, + EpochExtensions: []flow.EpochExtension{ + { + FirstView: 301, + FinalView: 400, + }, + }, + } + + require.False(t, c1.EqualTo(c2), "Initially, all fields differ; EqualTo should return false") + + // List of mutations to apply to c1 to gradually make it equal to c2 + mutations := []func(){ + func() { + c1.SetupID = c2.SetupID + }, + func() { + c1.CommitID = c2.CommitID + }, + func() { + c1.ActiveIdentities = clone.Clone(c2.ActiveIdentities) + }, + func() { + c1.EpochExtensions = clone.Clone(c2.EpochExtensions) + }, + } + + // Shuffle the order of mutations + rand.Shuffle(len(mutations), func(i, j int) { + mutations[i], mutations[j] = mutations[j], mutations[i] + }) + + // Apply each mutation one at a time, except the last. + // After each step, the containers should still not be equal. + for _, mutation := range mutations[:len(mutations)-1] { + mutation() + require.False(t, c1.EqualTo(c2)) + } + + // Final mutation should make the containers fully equal. + mutations[len(mutations)-1]() + require.True(t, c1.EqualTo(c2)) +} + +// TestEpochStateContainer_EqualTo_Nil verifies the behavior of the EqualTo method on EpochStateContainer when either +// or both the receiver and the function input are nil. +func TestEpochStateContainer_EqualTo_Nil(t *testing.T) { + var nilContainer *flow.EpochStateContainer + nonNil := &flow.EpochStateContainer{ + SetupID: unittest.IdentifierFixture(), + CommitID: unittest.IdentifierFixture(), + ActiveIdentities: unittest.DynamicIdentityEntryListFixture(3), + EpochExtensions: []flow.EpochExtension{ + { + FirstView: 201, + FinalView: 300, + }, + }, + } + + t.Run("nil receiver", func(t *testing.T) { + require.False(t, nilContainer.EqualTo(nonNil)) + }) + + t.Run("nil input", func(t *testing.T) { + require.False(t, nonNil.EqualTo(nilContainer)) + }) + + t.Run("both nil", func(t *testing.T) { + require.True(t, nilContainer.EqualTo(nil)) + }) +} + +// TestEpochExtension_EqualTo verifies the correctness of the EqualTo method on EpochExtension. +// It checks that EpochExtensions are considered equal if and only if all fields match. +func TestEpochExtension_EqualTo(t *testing.T) { + // Create two extensions with different values + ext1 := &flow.EpochExtension{ + FirstView: 100, + FinalView: 200, + } + ext2 := &flow.EpochExtension{ + FirstView: 300, + FinalView: 400, + } + + require.False(t, ext1.EqualTo(ext2), "Initially, all fields differ; EqualTo should return false") + + // List of mutations to apply to ext1 to gradually make it equal to ext2 + mutations := []func(){ + func() { + ext1.FirstView = ext2.FirstView + }, + func() { + ext1.FinalView = ext2.FinalView + }, + } + + // Shuffle the order of mutations + rand.Shuffle(len(mutations), func(i, j int) { + mutations[i], mutations[j] = mutations[j], mutations[i] + }) + + // Apply each mutation one at a time, except the last. + // After each step, the extensions should still not be equal. + for _, mutation := range mutations[:len(mutations)-1] { + mutation() + require.False(t, ext1.EqualTo(ext2)) + } + + // Final mutation should make the extensions fully equal. + mutations[len(mutations)-1]() + require.True(t, ext1.EqualTo(ext2)) +} + +// TestEpochExtension_EqualTo_Nil verifies the behavior of the EqualTo method on EpochExtension when either +// or both the receiver and the function input are nil. +func TestEpochExtension_EqualTo_Nil(t *testing.T) { + var nilExt *flow.EpochExtension + nonNil := &flow.EpochExtension{ + FirstView: 1, + FinalView: 2, + } + + t.Run("nil receiver", func(t *testing.T) { + require.False(t, nilExt.EqualTo(nonNil)) + }) + + t.Run("nil input", func(t *testing.T) { + require.False(t, nonNil.EqualTo(nilExt)) + }) + + t.Run("both nil", func(t *testing.T) { + require.True(t, nilExt.EqualTo(nil)) + }) +} + +// TestDynamicIdentityEntry_EqualTo verifies the correctness of the EqualTo method on DynamicIdentityEntry. +// It checks that DynamicIdentityEntries are considered equal if and only if all fields match. +func TestDynamicIdentityEntry_EqualTo(t *testing.T) { + entry1 := &flow.DynamicIdentityEntry{ + NodeID: unittest.IdentifierFixture(), + Ejected: false, + } + entry2 := &flow.DynamicIdentityEntry{ + NodeID: unittest.IdentifierFixture(), + Ejected: true, + } + + require.False(t, entry1.EqualTo(entry2), "Initially, all fields differ; EqualTo should return false") + + // List of mutations to gradually make entry1 equal to entry2 + mutations := []func(){ + func() { + entry1.NodeID = entry2.NodeID + }, + func() { + entry1.Ejected = entry2.Ejected + }, + } + + // Shuffle mutation order + rand.Shuffle(len(mutations), func(i, j int) { + mutations[i], mutations[j] = mutations[j], mutations[i] + }) + + // Apply each mutation one at a time, except the last. + for _, mutation := range mutations[:len(mutations)-1] { + mutation() + require.False(t, entry1.EqualTo(entry2)) + } + + // Final mutation: should now be equal + mutations[len(mutations)-1]() + require.True(t, entry1.EqualTo(entry2)) +} + +// TestDynamicIdentityEntry_EqualTo_Nil verifies the behavior of EqualTo on DynamicIdentityEntry when one or both inputs are nil. +func TestDynamicIdentityEntry_EqualTo_Nil(t *testing.T) { + var nilEntry *flow.DynamicIdentityEntry + nonNil := &flow.DynamicIdentityEntry{ + NodeID: unittest.IdentifierFixture(), + Ejected: false, + } + + t.Run("nil receiver", func(t *testing.T) { + require.False(t, nilEntry.EqualTo(nonNil)) + }) + + t.Run("nil input", func(t *testing.T) { + require.False(t, nonNil.EqualTo(nilEntry)) + }) + + t.Run("both nil", func(t *testing.T) { + require.True(t, nilEntry.EqualTo(nil)) + }) +} diff --git a/model/flow/quorum_certificate.go b/model/flow/quorum_certificate.go index 3fac30f4cf1..2570492656b 100644 --- a/model/flow/quorum_certificate.go +++ b/model/flow/quorum_certificate.go @@ -1,8 +1,15 @@ package flow +import ( + "bytes" + "fmt" +) + // QuorumCertificate represents a quorum certificate for a block proposal as defined in the HotStuff algorithm. // A quorum certificate is a collection of votes for a particular block proposal. Valid quorum certificates contain // signatures from a super-majority of consensus committee members. +// +//structwrite:immutable - mutations allowed only within the constructor type QuorumCertificate struct { View uint64 BlockID Identifier @@ -22,6 +29,42 @@ type QuorumCertificate struct { SigData []byte } +// UntrustedQuorumCertificate is an untrusted input-only representation of a QuorumCertificate, +// used for construction. +// +// This type exists to ensure that constructor functions are invoked explicitly +// with named fields, which improves clarity and reduces the risk of incorrect field +// ordering during construction. +// +// An instance of UntrustedQuorumCertificate should be validated and converted into +// a trusted QuorumCertificate using NewQuorumCertificate constructor. +type UntrustedQuorumCertificate QuorumCertificate + +// NewQuorumCertificate creates a new instance of QuorumCertificate. +// Construction of QuorumCertificate is allowed only within the constructor +// +// All errors indicate a valid QuorumCertificate cannot be constructed from the input. +func NewQuorumCertificate(untrusted UntrustedQuorumCertificate) (*QuorumCertificate, error) { + if untrusted.BlockID == ZeroID { + return nil, fmt.Errorf("BlockID must not be empty") + } + + if len(untrusted.SignerIndices) == 0 { + return nil, fmt.Errorf("SignerIndices must not be empty") + } + + if len(untrusted.SigData) == 0 { + return nil, fmt.Errorf("SigData must not be empty") + } + + return &QuorumCertificate{ + View: untrusted.View, + BlockID: untrusted.BlockID, + SignerIndices: untrusted.SignerIndices, + SigData: untrusted.SigData, + }, nil +} + // ID returns the QuorumCertificate's identifier func (qc *QuorumCertificate) ID() Identifier { if qc == nil { @@ -30,6 +73,22 @@ func (qc *QuorumCertificate) ID() Identifier { return MakeID(qc) } +// Equals returns true if and only if receiver QuorumCertificate is equal to the `other`. Nil values are supported. +func (qc *QuorumCertificate) Equals(other *QuorumCertificate) bool { + // Shortcut if `qc` and `other` point to the same object; covers case where both are nil. + if qc == other { + return true + } + if qc == nil || other == nil { // only one is nil, the other not (otherwise we would have returned above) + return false + } + // both are not nil, so we can compare the fields + return (qc.View == other.View) && + (qc.BlockID == other.BlockID) && + bytes.Equal(qc.SignerIndices, other.SignerIndices) && + bytes.Equal(qc.SigData, other.SigData) +} + // QuorumCertificateWithSignerIDs is a QuorumCertificate, where the signing nodes are // identified via their `flow.Identifier`s instead of indices. Working with IDs as opposed to // indices is less efficient, but simpler, because we don't require a canonical node order. diff --git a/model/flow/quorum_certificate_test.go b/model/flow/quorum_certificate_test.go new file mode 100644 index 00000000000..cb84468dafa --- /dev/null +++ b/model/flow/quorum_certificate_test.go @@ -0,0 +1,168 @@ +package flow_test + +import ( + "math/rand" + "testing" + + clone "github.com/huandu/go-clone/generic" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/utils/unittest" +) + +// TestQuorumCertificateID_Malleability confirms that the QuorumCertificate struct, which implements +// the [flow.IDEntity] interface, is resistant to tampering. +func TestQuorumCertificateID_Malleability(t *testing.T) { + unittest.RequireEntityNonMalleable(t, unittest.QuorumCertificateFixture()) +} + +// TestQuorumCertificate_Equals verifies the correctness of the Equals method on QuorumCertificates. +// It checks that QuorumCertificates are considered equal if and only if all fields match. +func TestQuorumCertificate_Equals(t *testing.T) { + // Create two QuorumCertificates with random but different values. Note: random selection for `SignerIndices` has limited variability and + // yields sometimes the same value for both qc1 and qc2. Therefore, we explicitly set different values for `SignerIndices`. + qc1 := unittest.QuorumCertificateFixture(unittest.QCWithSignerIndices([]byte{85, 0})) + qc2 := unittest.QuorumCertificateFixture(unittest.QCWithSignerIndices([]byte{90, 0})) + require.False(t, qc1.Equals(qc2), "Initially, all fields are different, so the objects should not be equal") + + // List of mutations to apply on qc1 to gradually make it equal to qc2 + mutations := []func(){ + func() { + qc1.View = qc2.View + }, func() { + qc1.BlockID = qc2.BlockID + }, func() { + qc1.SignerIndices = clone.Clone(qc2.SignerIndices) // deep copy + }, func() { + qc1.SigData = clone.Clone(qc2.SigData) // deep copy + }, + } + + // Shuffle the order of mutations + rand.Shuffle(len(mutations), func(i, j int) { + mutations[i], mutations[j] = mutations[j], mutations[i] + }) + + // Apply each mutation one at a time, except the last. + // After each step, the objects should still not be equal. + for _, mutation := range mutations[:len(mutations)-1] { + mutation() + require.False(t, qc1.Equals(qc2)) + } + + // Apply the final mutation; now all relevant fields should match, so the objects must be equal. + mutations[len(mutations)-1]() + require.True(t, qc1.Equals(qc2)) +} + +// TestQuorumCertificate_Equals_Nil verifies the behavior of the Equals method when either +// or both the receiver and the function input are nil +func TestQuorumCertificate_Equals_Nil(t *testing.T) { + var nilQC *flow.QuorumCertificate + qc := unittest.QuorumCertificateFixture() + t.Run("nil receiver", func(t *testing.T) { + require.False(t, nilQC.Equals(qc)) + }) + t.Run("nil input", func(t *testing.T) { + require.False(t, qc.Equals(nilQC)) + }) + t.Run("both nil", func(t *testing.T) { + require.True(t, nilQC.Equals(nil)) + }) +} + +// TestNewQuorumCertificate verifies the behavior of the NewQuorumCertificate constructor. +// Test Cases: +// +// 1. Valid input: +// - Ensures a QuorumCertificate is returned when all fields are populated. +// +// 2. Missing BlockID: +// - Ensures an error is returned when BlockID is ZeroID. +// +// 3. Nil SignerIndices: +// - Ensures an error is returned when SignerIndices is nil. +// +// 4. Empty SignerIndices slice: +// - Ensures an error is returned when SignerIndices is empty. +// +// 5. Nil SigData: +// - Ensures an error is returned when SigData is nil. +// +// 6. Empty SigData slice: +// - Ensures an error is returned when SigData is empty. +func TestNewQuorumCertificate(t *testing.T) { + view := uint64(10) + blockID := unittest.IdentifierFixture() + signerIndices := []byte{0x01, 0x02} + sigData := []byte{0x03, 0x04} + + base := flow.UntrustedQuorumCertificate{ + View: view, + BlockID: blockID, + SignerIndices: signerIndices, + SigData: sigData, + } + + t.Run("valid input", func(t *testing.T) { + qc, err := flow.NewQuorumCertificate(base) + assert.NoError(t, err) + assert.NotNil(t, qc) + assert.Equal(t, view, qc.View) + assert.Equal(t, blockID, qc.BlockID) + assert.Equal(t, signerIndices, qc.SignerIndices) + assert.Equal(t, sigData, qc.SigData) + }) + + t.Run("missing BlockID", func(t *testing.T) { + u := base + u.BlockID = flow.ZeroID + + qc, err := flow.NewQuorumCertificate(u) + assert.Error(t, err) + assert.Nil(t, qc) + assert.Contains(t, err.Error(), "BlockID") + }) + + t.Run("nil SignerIndices", func(t *testing.T) { + u := base + u.SignerIndices = nil + + qc, err := flow.NewQuorumCertificate(u) + assert.Error(t, err) + assert.Nil(t, qc) + assert.Contains(t, err.Error(), "SignerIndices") + }) + + t.Run("empty SignerIndices slice", func(t *testing.T) { + u := base + u.SignerIndices = []byte{} + + qc, err := flow.NewQuorumCertificate(u) + assert.Error(t, err) + assert.Nil(t, qc) + assert.Contains(t, err.Error(), "SignerIndices") + }) + + t.Run("nil SigData", func(t *testing.T) { + u := base + u.SigData = nil + + qc, err := flow.NewQuorumCertificate(u) + assert.Error(t, err) + assert.Nil(t, qc) + assert.Contains(t, err.Error(), "SigData") + }) + + t.Run("empty SigData slice", func(t *testing.T) { + u := base + u.SigData = []byte{} + + qc, err := flow.NewQuorumCertificate(u) + assert.Error(t, err) + assert.Nil(t, qc) + assert.Contains(t, err.Error(), "SigData") + }) +} diff --git a/model/flow/resultApproval.go b/model/flow/resultApproval.go index 3f1b3f5701e..c42708549ef 100644 --- a/model/flow/resultApproval.go +++ b/model/flow/resultApproval.go @@ -1,22 +1,60 @@ package flow import ( - "github.com/onflow/flow-go/crypto" + "fmt" + + "github.com/onflow/crypto" ) // Attestation confirms correctness of a chunk of an exec result +// +//structwrite:immutable - mutations allowed only within the constructor type Attestation struct { BlockID Identifier // ID of the block included the collection ExecutionResultID Identifier // ID of the execution result ChunkIndex uint64 // index of the approved chunk } +// UntrustedAttestation is an untrusted input-only representation of an Attestation, +// used for construction. +// +// This type exists to ensure that constructor functions are invoked explicitly +// with named fields, which improves clarity and reduces the risk of incorrect field +// ordering during construction. +// +// An instance of UntrustedAttestation should be validated and converted into +// a trusted Attestation using NewAttestation constructor. +type UntrustedAttestation Attestation + +// NewAttestation creates a new instance of Attestation. +// Construction Attestation allowed only within the constructor. +// +// All errors indicate a valid Attestation cannot be constructed from the input. +// ChunkIndex can be zero in principle, so we don’t check it. +func NewAttestation(untrusted UntrustedAttestation) (*Attestation, error) { + if untrusted.BlockID == ZeroID { + return nil, fmt.Errorf("BlockID must not be empty") + } + + if untrusted.ExecutionResultID == ZeroID { + return nil, fmt.Errorf("ExecutionResultID must not be empty") + } + + return &Attestation{ + BlockID: untrusted.BlockID, + ExecutionResultID: untrusted.ExecutionResultID, + ChunkIndex: untrusted.ChunkIndex, + }, nil +} + // ID generates a unique identifier using attestation func (a Attestation) ID() Identifier { return MakeID(a) } // ResultApprovalBody holds body part of a result approval +// +//structwrite:immutable - mutations allowed only within the constructor type ResultApprovalBody struct { Attestation ApproverID Identifier // node id generating this result approval @@ -24,6 +62,47 @@ type ResultApprovalBody struct { Spock crypto.Signature // proof of re-computation, one per each chunk } +// UntrustedResultApprovalBody is an untrusted input-only representation of an ResultApprovalBody, +// used for construction. +// +// This type exists to ensure that constructor functions are invoked explicitly +// with named fields, which improves clarity and reduces the risk of incorrect field +// ordering during construction. +// +// An instance of UntrustedResultApprovalBody should be validated and converted into +// a trusted ResultApprovalBody using NewResultApprovalBody constructor. +type UntrustedResultApprovalBody ResultApprovalBody + +// NewResultApprovalBody creates a new instance of ResultApprovalBody. +// Construction ResultApprovalBody allowed only within the constructor. +// +// All errors indicate a valid Collection cannot be constructed from the input. +func NewResultApprovalBody(untrusted UntrustedResultApprovalBody) (*ResultApprovalBody, error) { + att, err := NewAttestation(UntrustedAttestation(untrusted.Attestation)) + if err != nil { + return nil, fmt.Errorf("invalid attestation: %w", err) + } + + if untrusted.ApproverID == ZeroID { + return nil, fmt.Errorf("ApproverID must not be empty") + } + + if len(untrusted.AttestationSignature) == 0 { + return nil, fmt.Errorf("AttestationSignature must not be empty") + } + + if len(untrusted.Spock) == 0 { + return nil, fmt.Errorf("Spock proof must not be empty") + } + + return &ResultApprovalBody{ + Attestation: *att, + ApproverID: untrusted.ApproverID, + AttestationSignature: untrusted.AttestationSignature, + Spock: untrusted.Spock, + }, nil +} + // PartialID generates a unique identifier using Attestation + ApproverID func (rab ResultApprovalBody) PartialID() Identifier { data := struct { @@ -43,17 +122,76 @@ func (rab ResultApprovalBody) ID() Identifier { } // ResultApproval includes an approval for a chunk, verified by a verification node +// +//structwrite:immutable - mutations allowed only within the constructor type ResultApproval struct { - Body ResultApprovalBody - VerifierSignature crypto.Signature // signature over all above fields + Body ResultApprovalBody + // VerifierSignature is a signature over all fields in the Body. + // This is a temporary measure intended to provide message attribution. + // In the long term, all messages should be attributable as a property of the networking layer. + // Currently, the networking layer only provides authentication, which we already check for this type in + // the Sealing Engine: https://github.com/onflow/flow-go/blob/6efcbb7e8fa1578a3dd8f3f7f9857eb920e510e0/engine/consensus/sealing/engine.go#L417-L421 + // Although the attribution property is not currently used, the current structure makes it available in + // the absence of support in the networking layer. + VerifierSignature crypto.Signature } -// ID generates a unique identifier using result approval body -func (ra ResultApproval) ID() Identifier { - return MakeID(ra.Body) +var _ Entity = (*ResultApproval)(nil) + +// UntrustedResultApproval is an untrusted input-only representation of an ResultApproval, +// used for construction. +// +// This type exists to ensure that constructor functions are invoked explicitly +// with named fields, which improves clarity and reduces the risk of incorrect field +// ordering during construction. +// +// An instance of UntrustedResultApproval should be validated and converted into +// a trusted ResultApproval using NewResultApproval constructor. +type UntrustedResultApproval ResultApproval + +// NewResultApproval creates a new instance of ResultApproval. +// Construction ResultApproval allowed only within the constructor. +// +// All errors indicate a valid Collection cannot be constructed from the input. +func NewResultApproval(untrusted UntrustedResultApproval) (*ResultApproval, error) { + rab, err := NewResultApprovalBody(UntrustedResultApprovalBody(untrusted.Body)) + if err != nil { + return nil, fmt.Errorf("invalid result approval body: %w", err) + } + + if len(untrusted.VerifierSignature) == 0 { + return nil, fmt.Errorf("VerifierSignature must not be empty") + } + + return &ResultApproval{ + Body: *rab, + VerifierSignature: untrusted.VerifierSignature, + }, nil } -// Checksum generates checksum using the result approval full content -func (ra ResultApproval) Checksum() Identifier { +// ID generates a unique identifier using result approval full content +func (ra ResultApproval) ID() Identifier { return MakeID(ra) } + +// ApprovalRequest represents a request for a ResultApproval corresponding to +// a specific chunk. +// +// It is created from the corresponding untrusted network message +// (messages.ApprovalRequest) during decoding. Construction is only allowed +// through validation logic, ensuring the fields have passed structural checks. +type ApprovalRequest struct { + Nonce uint64 + ResultID Identifier + ChunkIndex uint64 +} + +// ApprovalResponse contains a response to an approval request. +// +// It is created from the corresponding untrusted network message +// (messages.ApprovalResponse) during decoding and validation. The embedded +// ResultApproval is guaranteed to have passed all structural validity checks. +type ApprovalResponse struct { + Nonce uint64 + Approval ResultApproval +} diff --git a/model/flow/resultApproval_test.go b/model/flow/resultApproval_test.go index ee6b52ad922..5f8f35e1235 100644 --- a/model/flow/resultApproval_test.go +++ b/model/flow/resultApproval_test.go @@ -3,14 +3,309 @@ package flow_test import ( "testing" + "github.com/onflow/crypto" "github.com/stretchr/testify/assert" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/utils/unittest" ) +const chunkIdx = uint64(7) + +// TestNewAttestation verifies that NewAttestation constructs a valid Attestation +// when given complete, non-zero fields, and returns an error when any required +// field is missing. +// It covers: +// - valid attestation creation +// - missing BlockID +// - missing ExecutionResultID +func TestNewAttestation(t *testing.T) { + + t.Run("valid attestation", func(t *testing.T) { + blockID := unittest.IdentifierFixture() + resultID := unittest.IdentifierFixture() + + ua := flow.UntrustedAttestation{ + BlockID: blockID, + ExecutionResultID: resultID, + ChunkIndex: chunkIdx, + } + + at, err := flow.NewAttestation(ua) + assert.NoError(t, err) + assert.NotNil(t, at) + assert.Equal(t, blockID, at.BlockID) + assert.Equal(t, resultID, at.ExecutionResultID) + assert.Equal(t, chunkIdx, at.ChunkIndex) + }) + + t.Run("missing BlockID", func(t *testing.T) { + resultID := unittest.IdentifierFixture() + + ua := flow.UntrustedAttestation{ + BlockID: flow.ZeroID, + ExecutionResultID: resultID, + ChunkIndex: chunkIdx, + } + + at, err := flow.NewAttestation(ua) + assert.Error(t, err) + assert.Nil(t, at) + assert.Contains(t, err.Error(), "BlockID") + }) + + t.Run("missing ExecutionResultID", func(t *testing.T) { + blockID := unittest.IdentifierFixture() + + ua := flow.UntrustedAttestation{ + BlockID: blockID, + ExecutionResultID: flow.ZeroID, + ChunkIndex: chunkIdx, + } + + at, err := flow.NewAttestation(ua) + assert.Error(t, err) + assert.Nil(t, at) + assert.Contains(t, err.Error(), "ExecutionResultID") + }) +} + +// TestNewResultApprovalBody checks that NewResultApprovalBody builds a valid +// ResultApprovalBody when given a correct Attestation and non-empty +// fields, and returns errors for invalid nested Attestation or missing fields. +// It covers: +// - valid result approval body creation +// - invalid nested Attestation +// - missing ApproverID +// - missing AttestationSignature +// - missing Spock proof +func TestNewResultApprovalBody(t *testing.T) { + blockID := unittest.IdentifierFixture() + resultID := unittest.IdentifierFixture() + approver := unittest.IdentifierFixture() + attestSig := unittest.SignatureFixture() + spockSig := unittest.SignatureFixture() + + t.Run("valid result approval body", func(t *testing.T) { + att, err := flow.NewAttestation(flow.UntrustedAttestation{ + BlockID: blockID, + ExecutionResultID: resultID, + ChunkIndex: chunkIdx, + }) + assert.NoError(t, err) + + uc := flow.UntrustedResultApprovalBody{ + Attestation: *att, + ApproverID: approver, + AttestationSignature: attestSig, + Spock: spockSig, + } + + rab, err := flow.NewResultApprovalBody(uc) + assert.NoError(t, err) + assert.NotNil(t, rab) + assert.Equal(t, *att, rab.Attestation) + assert.Equal(t, approver, rab.ApproverID) + assert.Equal(t, attestSig, rab.AttestationSignature) + assert.Equal(t, spockSig, rab.Spock) + }) + + t.Run("invalid attestation", func(t *testing.T) { + uc := flow.UntrustedResultApprovalBody{ + Attestation: flow.Attestation{ + BlockID: flow.ZeroID, + ExecutionResultID: resultID, + ChunkIndex: chunkIdx, + }, + ApproverID: approver, + AttestationSignature: attestSig, + Spock: spockSig, + } + + rab, err := flow.NewResultApprovalBody(uc) + assert.Error(t, err) + assert.Nil(t, rab) + assert.Contains(t, err.Error(), "attestation") + }) + + t.Run("empty ApproverID", func(t *testing.T) { + att, err := flow.NewAttestation(flow.UntrustedAttestation{ + BlockID: blockID, + ExecutionResultID: resultID, + ChunkIndex: chunkIdx, + }) + assert.NoError(t, err) + + uc := flow.UntrustedResultApprovalBody{ + Attestation: *att, + ApproverID: flow.ZeroID, + AttestationSignature: attestSig, + Spock: spockSig, + } + + rab, err := flow.NewResultApprovalBody(uc) + assert.Error(t, err) + assert.Nil(t, rab) + assert.Contains(t, err.Error(), "ApproverID") + }) + + t.Run("empty AttestationSignature", func(t *testing.T) { + att, err := flow.NewAttestation(flow.UntrustedAttestation{ + BlockID: blockID, + ExecutionResultID: resultID, + ChunkIndex: chunkIdx, + }) + assert.NoError(t, err) + + uc := flow.UntrustedResultApprovalBody{ + Attestation: *att, + ApproverID: approver, + AttestationSignature: crypto.Signature{}, + Spock: spockSig, + } + + rab, err := flow.NewResultApprovalBody(uc) + assert.Error(t, err) + assert.Nil(t, rab) + assert.Contains(t, err.Error(), "AttestationSignature") + }) + + t.Run("empty Spock proof", func(t *testing.T) { + att, err := flow.NewAttestation(flow.UntrustedAttestation{ + BlockID: blockID, + ExecutionResultID: resultID, + ChunkIndex: chunkIdx, + }) + assert.NoError(t, err) + + uc := flow.UntrustedResultApprovalBody{ + Attestation: *att, + ApproverID: approver, + AttestationSignature: attestSig, + Spock: crypto.Signature{}, + } + + rab, err := flow.NewResultApprovalBody(uc) + assert.Error(t, err) + assert.Nil(t, rab) + assert.Contains(t, err.Error(), "Spock") + }) +} + +// TestNewResultApproval ensures NewResultApproval combines a valid +// ResultApprovalBody and VerifierSignature into a ResultApproval, and returns +// errors for invalid ResultApprovalBody or missing VerifierSignature. +// It covers: +// - valid result approval creation +// - invalid ResultApprovalBody +// - missing verifier signature +func TestNewResultApproval(t *testing.T) { + blockID := unittest.IdentifierFixture() + execResID := unittest.IdentifierFixture() + approver := unittest.IdentifierFixture() + attestSig := unittest.SignatureFixture() + spockSig := unittest.SignatureFixture() + verifierSig := unittest.SignatureFixture() + + t.Run("valid result approval", func(t *testing.T) { + att, err := flow.NewAttestation(flow.UntrustedAttestation{ + BlockID: blockID, + ExecutionResultID: execResID, + ChunkIndex: chunkIdx, + }) + assert.NoError(t, err) + + rab, err := flow.NewResultApprovalBody(flow.UntrustedResultApprovalBody{ + Attestation: *att, + ApproverID: approver, + AttestationSignature: attestSig, + Spock: spockSig, + }) + assert.NoError(t, err) + + uv := flow.UntrustedResultApproval{ + Body: *rab, + VerifierSignature: verifierSig, + } + + ra, err := flow.NewResultApproval(uv) + assert.NoError(t, err) + assert.NotNil(t, ra) + assert.Equal(t, *rab, ra.Body) + assert.Equal(t, verifierSig, ra.VerifierSignature) + }) + + // An invalid ResultApprovalBody must cause NewResultApproval to error + t.Run("invalid body", func(t *testing.T) { + uv := flow.UntrustedResultApproval{ + Body: flow.ResultApprovalBody{ + Attestation: flow.Attestation{ + BlockID: flow.ZeroID, + ExecutionResultID: execResID, + ChunkIndex: chunkIdx, + }, + ApproverID: approver, + AttestationSignature: attestSig, + Spock: spockSig, + }, + VerifierSignature: verifierSig, + } + + ra, err := flow.NewResultApproval(uv) + assert.Error(t, err) + assert.Nil(t, ra) + assert.Contains(t, err.Error(), "invalid result approval body") + }) + + // Missing VerifierSignature must cause NewResultApproval to error + t.Run("empty verifier signature", func(t *testing.T) { + att, err := flow.NewAttestation(flow.UntrustedAttestation{ + BlockID: blockID, + ExecutionResultID: execResID, + ChunkIndex: 3, + }) + assert.NoError(t, err) + + rab, err := flow.NewResultApprovalBody(flow.UntrustedResultApprovalBody{ + Attestation: *att, + ApproverID: approver, + AttestationSignature: attestSig, + Spock: spockSig, + }) + assert.NoError(t, err) + + uv := flow.UntrustedResultApproval{ + Body: *rab, + VerifierSignature: crypto.Signature{}, + } + + ra, err := flow.NewResultApproval(uv) + assert.Error(t, err) + assert.Nil(t, ra) + assert.Contains(t, err.Error(), "VerifierSignature") + }) +} + func TestResultApprovalEncode(t *testing.T) { ra := unittest.ResultApprovalFixture() id := ra.ID() assert.NotEqual(t, flow.ZeroID, id) } + +func TestResultApprovalBodyNonMalleable(t *testing.T) { + ra := unittest.ResultApprovalFixture() + unittest.RequireEntityNonMalleable(t, &ra.Body) +} + +// TestResultApprovalNonMalleable confirms that the ResultApproval struct, which implements +// the [flow.IDEntity] interface, is resistant to tampering. +func TestResultApprovalNonMalleable(t *testing.T) { + ra := unittest.ResultApprovalFixture() + unittest.RequireEntityNonMalleable(t, ra) +} + +// TestAttestationID_Malleability confirms that the Attestation struct, which implements +// the [flow.IDEntity] interface, is resistant to tampering. +func TestAttestationID_Malleability(t *testing.T) { + unittest.RequireEntityNonMalleable(t, unittest.AttestationFixture()) +} diff --git a/model/flow/role.go b/model/flow/role.go index f138a185d75..7ea3d26cda8 100644 --- a/model/flow/role.go +++ b/model/flow/role.go @@ -1,5 +1,3 @@ -// (c) 2019 Dapper Labs - ALL RIGHTS RESERVED - package flow import ( diff --git a/model/flow/role_test.go b/model/flow/role_test.go index 7f4e39dd9eb..f678612dc7e 100644 --- a/model/flow/role_test.go +++ b/model/flow/role_test.go @@ -8,6 +8,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/utils/unittest" ) func TestRoleJSON(t *testing.T) { @@ -77,3 +78,8 @@ func TestRoleList_ID(t *testing.T) { otherID := other.ID() assert.NotEqual(t, thisID, otherID) } + +// TestRoleListMalleability verifies that the RoleList which implements the [flow.IDEntity] interface is not malleable. +func TestRoleListMalleability(t *testing.T) { + unittest.RequireEntityNonMalleable(t, &flow.RoleList{flow.RoleConsensus, flow.RoleVerification}) +} diff --git a/model/flow/schemes.go b/model/flow/schemes.go new file mode 100644 index 00000000000..4778fd08c09 --- /dev/null +++ b/model/flow/schemes.go @@ -0,0 +1,27 @@ +package flow + +type AuthenticationScheme byte + +const ( + PlainScheme AuthenticationScheme = iota // 0x0 + WebAuthnScheme // 0x01 + InvalidScheme // 0x02 +) + +func AuthenticationSchemeFromByte(b byte) AuthenticationScheme { + if b < byte(InvalidScheme) { + return AuthenticationScheme(b) + } + return InvalidScheme +} + +func (s AuthenticationScheme) String() string { + switch s { + case PlainScheme: + return "PlainScheme" + case WebAuthnScheme: + return "WebAuthnScheme" + default: + return "InvalidScheme" + } +} diff --git a/model/flow/seal.go b/model/flow/seal.go index 0828fb10662..9e5c4a31033 100644 --- a/model/flow/seal.go +++ b/model/flow/seal.go @@ -1,8 +1,8 @@ -// (c) 2019 Dapper Labs - ALL RIGHTS RESERVED - package flow -import "encoding/json" +import ( + "fmt" +) // A Seal is produced when an Execution Result (referenced by `ResultID`) for // particular block (referenced by `BlockID`) is committed into the chain. @@ -36,42 +36,49 @@ import "encoding/json" // Therefore, to retrieve valid blocks from storage, it is required that // the Seal.ID includes all fields with independent degrees of freedom // (such as AggregatedApprovalSigs). +// +//structwrite:immutable - mutations allowed only within the constructor type Seal struct { - BlockID Identifier - ResultID Identifier - FinalState StateCommitment + BlockID Identifier + ResultID Identifier + FinalState StateCommitment + // AggregatedApprovalSigs can be nil/empty when verification is disabled or for the root seal. AggregatedApprovalSigs []AggregatedSignature // one AggregatedSignature per chunk } -func (s Seal) Body() interface{} { - return struct { - BlockID Identifier - ResultID Identifier - FinalState StateCommitment - AggregatedApprovalSigs []AggregatedSignature - }{ - BlockID: s.BlockID, - ResultID: s.ResultID, - FinalState: s.FinalState, - AggregatedApprovalSigs: s.AggregatedApprovalSigs, +// UntrustedSeal is an untrusted input-only representation of a Seal, +// used for construction. +// +// This type exists to ensure that constructor functions are invoked explicitly +// with named fields, which improves clarity and reduces the risk of incorrect field +// ordering during construction. +// +// An instance of UntrustedSeal should be validated and converted into +// a trusted Seal using NewSeal constructor. +type UntrustedSeal Seal + +// NewSeal creates a new instance of Seal. +// Construction Seal allowed only within the constructor. +// +// All errors indicate a valid Seal cannot be constructed from the input. +func NewSeal(untrusted UntrustedSeal) (*Seal, error) { + if untrusted.BlockID == ZeroID { + return nil, fmt.Errorf("block ID must not be zero") } + if untrusted.ResultID == ZeroID { + return nil, fmt.Errorf("result ID must not be zero") + } + if untrusted.FinalState == EmptyStateCommitment { + return nil, fmt.Errorf("final state must not be empty") + } + return &Seal{ + BlockID: untrusted.BlockID, + ResultID: untrusted.ResultID, + FinalState: untrusted.FinalState, + AggregatedApprovalSigs: untrusted.AggregatedApprovalSigs, + }, nil } func (s Seal) ID() Identifier { - return MakeID(s.Body()) -} - -func (s Seal) Checksum() Identifier { return MakeID(s) } - -func (s Seal) MarshalJSON() ([]byte, error) { - type Alias Seal - return json.Marshal(struct { - Alias - ID string - }{ - Alias: Alias(s), - ID: s.ID().String(), - }) -} diff --git a/model/flow/seal_test.go b/model/flow/seal_test.go index a980192b323..7cbd163421f 100644 --- a/model/flow/seal_test.go +++ b/model/flow/seal_test.go @@ -4,23 +4,89 @@ import ( "testing" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/utils/unittest" ) -// Test_SealID checks that two seals that only differ in their approval -// signatures have different IDs. This is REQUIRED FOR the STORAGE layer +// TestSealMalleability checks that Seal is not malleable: any change in its data +// should result in a different ID. This is REQUIRED FOR the STORAGE layer // to correctly retrieve the block payload! -func Test_SealID(t *testing.T) { - +func TestSealMalleability(t *testing.T) { seal := unittest.Seal.Fixture() - id := seal.ID() - cs := seal.Checksum() + unittest.RequireEntityNonMalleable(t, seal) +} + +// TestNewSeal verifies the behavior of the NewSeal constructor. +// It ensures proper handling of both valid and invalid untrusted input fields. +// +// Test Cases: +// +// 1. Valid input: +// - Verifies that a properly populated UntrustedSeal results in a valid Seal. +// +// 2. Invalid input with zero block ID: +// - Ensures an error is returned when the BlockID is zero. +// +// 3. Invalid input with zero result ID: +// - Ensures an error is returned when the ResultID is zero. +// +// 4. Invalid input with empty final state: +// - Ensures an error is returned when the FinalState is empty. +func TestNewSeal(t *testing.T) { + t.Run("valid input", func(t *testing.T) { + seal, err := flow.NewSeal( + flow.UntrustedSeal{ + BlockID: unittest.IdentifierFixture(), + ResultID: unittest.IdentifierFixture(), + FinalState: unittest.StateCommitmentFixture(), + AggregatedApprovalSigs: unittest.Seal.AggregatedSignatureFixtures(3), + }, + ) + require.NoError(t, err) + require.NotNil(t, seal) + }) + + t.Run("invalid input, block ID is zero", func(t *testing.T) { + seal, err := flow.NewSeal( + flow.UntrustedSeal{ + BlockID: flow.ZeroID, + ResultID: unittest.IdentifierFixture(), + FinalState: unittest.StateCommitmentFixture(), + AggregatedApprovalSigs: unittest.Seal.AggregatedSignatureFixtures(3), + }, + ) + require.Error(t, err) + require.Nil(t, seal) + assert.Contains(t, err.Error(), "block ID must not be zero") + }) - // Change signatures of first chunk - seal.AggregatedApprovalSigs[0] = unittest.Seal.AggregatedSignatureFixture() + t.Run("invalid input, result ID is zero", func(t *testing.T) { + seal, err := flow.NewSeal( + flow.UntrustedSeal{ + BlockID: unittest.IdentifierFixture(), + ResultID: flow.ZeroID, + FinalState: unittest.StateCommitmentFixture(), + AggregatedApprovalSigs: unittest.Seal.AggregatedSignatureFixtures(3), + }, + ) + require.Error(t, err) + require.Nil(t, seal) + assert.Contains(t, err.Error(), "result ID must not be zero") + }) - // They should not have changed - assert.NotEqual(t, id, seal.ID()) - assert.NotEqual(t, cs, seal.Checksum()) + t.Run("invalid input, final state is empty", func(t *testing.T) { + seal, err := flow.NewSeal( + flow.UntrustedSeal{ + BlockID: unittest.IdentifierFixture(), + ResultID: unittest.IdentifierFixture(), + FinalState: flow.EmptyStateCommitment, + AggregatedApprovalSigs: unittest.Seal.AggregatedSignatureFixtures(3), + }, + ) + require.Error(t, err) + require.Nil(t, seal) + assert.Contains(t, err.Error(), "final state must not be empty") + }) } diff --git a/model/flow/sealing_segment.go b/model/flow/sealing_segment.go index e0a04cb9eec..553cde28b0a 100644 --- a/model/flow/sealing_segment.go +++ b/model/flow/sealing_segment.go @@ -1,6 +1,8 @@ package flow import ( + "bytes" + "errors" "fmt" "golang.org/x/exp/slices" @@ -18,22 +20,25 @@ import ( // Lets denote the highest block in the sealing segment as `head`. Per convention, `head` must be a // finalized block. Consider the chain of blocks leading up to `head` (included). The highest block // in chain leading up to `head` that is sealed, we denote as B. +// In other words, head is the last finalized block, and B is the last sealed block, +// block at height (B.Height + 1) is not sealed. type SealingSegment struct { // Blocks contain the chain `B <- ... <- Head` in ascending height order. // Formally, Blocks contains exactly (not more!) the history to satisfy condition // (see sealing_segment.md for details): // (i) The highest sealed block as of `head` needs to be included in the sealing segment. // This is relevant if `head` does not contain any seals. - Blocks []*Block + Blocks []*Proposal // ExtraBlocks [optional] holds ancestors of `Blocks` in ascending height order. // Formally, ExtraBlocks contains at least the additional history to satisfy conditions // (see sealing_segment.md for details): // (ii) All blocks that are sealed by `head`. This is relevant if `head` contains _multiple_ seals. // (iii) The sealing segment holds the history of all non-expired collection guarantees, i.e. - // limitHeight := max(head.Height - flow.DefaultTransactionExpiry, SporkRootBlockHeight) + // limitHeight := max(blockSealedAtHead.Height - flow.DefaultTransactionExpiry, SporkRootBlockHeight) + // where blockSealedAtHead is the block sealed by `head` block. // (Potentially longer history is permitted) - ExtraBlocks []*Block + ExtraBlocks []*Proposal // ExecutionResults contain any results which are referenced by receipts // or seals in the sealing segment, but not included in any segment block @@ -50,31 +55,68 @@ type SealingSegment struct { // the lowest one, are contained in the blocks of the sealing segment. LatestSeals map[Identifier]Identifier - // FirstSeal contains the latest seal as of the first block in the segment. - // Per convention, this field holds a seal that was included _prior_ to the - // first block of the sealing segment. If the first block in the segment - // contains a seal, then this field is `nil`. + // FirstSeal contains the latest seal in the fork as of the first block in the + // segment, i.e. `Blocks[0]`. Per convention, this field is `nil` if and only if + // the first block in the segment contains a seal. In other words, `FirstSeal` + // holding a non-nil value indicates that `Blocks[0]` did not seal any blocks, + // i.e. the latest sealed block as of `Blocks[0]` is carried over from an ancestor. // This information is needed for the `Commit` method of protocol snapshot // to return the sealed state, when the first block contains no seal. FirstSeal *Seal + + // ProtocolStateEntries contains every protocol state entry committed to + // by any block in the SealingSegment (including ExtraBlocks). + ProtocolStateEntries map[Identifier]*ProtocolStateEntryWrapper + // SporkRootBlock is the root block in the current spork + SporkRootBlock *Block +} + +// ProtocolStateEntryWrapper is a wrapper coupling two data sources. +// Conceptually, the SealingSegment stores one Protocol State Entry (aka `KVStoreEntry`) +// per unique ProtocolStateID field within the segment's blocks. +// Currently, although epoch data is conceptually a part of the protocol data entry associated +// with each block, it is stored separately as a matter of technical debt (only a hash commitment +// `RichEpochStateEntry.ID()` is stored within the `KVStoreEntry`. +// +// Deprecated: avoid using this in new code; this is a temporary measure until epoch data is moved into protocol KV store +// TODO: move epoch data into the KVStore as part of a future upgrade +type ProtocolStateEntryWrapper struct { + KVStore PSKeyValueStoreData + EpochEntry *RichEpochStateEntry } // Highest is the highest block in the sealing segment and the reference block from snapshot that was // used to produce this sealing segment. func (segment *SealingSegment) Highest() *Block { - return segment.Blocks[len(segment.Blocks)-1] + return &segment.Blocks[len(segment.Blocks)-1].Block +} + +// Finalized returns the last finalized block, which is an alias of Highest +func (segment *SealingSegment) Finalized() *Block { + return segment.Highest() } // Sealed returns the most recently sealed block based on head of sealing segment(highest block). func (segment *SealingSegment) Sealed() *Block { - return segment.Blocks[0] + return &segment.Blocks[0].Block } // AllBlocks returns all blocks within the sealing segment, including extra blocks, in ascending height order. -func (segment *SealingSegment) AllBlocks() []*Block { +func (segment *SealingSegment) AllBlocks() []*Proposal { return append(segment.ExtraBlocks, segment.Blocks...) } +// IsSporkRoot returns true if this SealingSegment represents a spork root snapshot. +// The Flow protocol explicitly defines a spork root block (incl. also the network's +// genesis block) to be finalized and sealed and to have a specific execution state +// commitment attached. Mathematically, this is a protocol axiom, as no block exists +// that contains an execution result or seal for the spork root block (nor any children +// at the time of the spork that could possibly finalize the root block). +// Therefore, a spork root block is a degenerate sealing segment with a length of 1. +func (segment *SealingSegment) IsSporkRoot() bool { + return len(segment.Blocks) == 1 +} + // FinalizedSeal returns the seal that seals the lowest block. // Per specification, this seal must be included in a SealingSegment. // The SealingSegment must be validated. @@ -97,13 +139,22 @@ func (segment *SealingSegment) FinalizedSeal() (*Seal, error) { return seal, nil } +// LatestProtocolStateEntry returns the Protocol State entry corresponding to +// the highest block in the sealing segment. This represents the Dynamic Protocol State +// after applying all state changes sealed in `SealingSegment.Highest().Payload`. +// Caution: `segment` must be a valid SealingSegment. +func (segment *SealingSegment) LatestProtocolStateEntry() *ProtocolStateEntryWrapper { + highest := segment.Highest() + return segment.ProtocolStateEntries[highest.Payload.ProtocolStateID] +} + // Validate validates the sealing segment structure and returns an error if // the segment isn't valid. This is done by re-building the segment from scratch, // re-using the validation logic already present in the SealingSegmentBuilder. // The node logic requires a valid sealing segment to bootstrap. -// No errors are expected during normal operation. +// Errors expected during normal operation: +// - InvalidSealingSegmentError if `segment` is an invalid SealingSegment. func (segment *SealingSegment) Validate() error { - // populate lookup of seals and results in the segment to satisfy builder seals := make(map[Identifier]*Seal) results := segment.ExecutionResults.Lookup() @@ -111,11 +162,11 @@ func (segment *SealingSegment) Validate() error { if segment.FirstSeal != nil { seals[segment.FirstSeal.ID()] = segment.FirstSeal } - for _, block := range segment.Blocks { - for _, result := range block.Payload.Results { + for _, proposal := range segment.Blocks { + for _, result := range proposal.Block.Payload.Results { results[result.ID()] = result } - for _, seal := range block.Payload.Seals { + for _, seal := range proposal.Block.Payload.Seals { seals[seal.ID()] = seal } } @@ -138,10 +189,17 @@ func (segment *SealingSegment) Validate() error { } return seal, nil } + getProtocolStateEntry := func(protocolStateID Identifier) (*ProtocolStateEntryWrapper, error) { + entry, ok := segment.ProtocolStateEntries[protocolStateID] + if !ok { + return nil, fmt.Errorf("protocol state (id=%x) not found in segment", protocolStateID) + } + return entry, nil + } - builder := NewSealingSegmentBuilder(getResult, getSeal) - for _, block := range segment.Blocks { - err := builder.AddBlock(block) + builder := NewSealingSegmentBuilder(getResult, getSeal, getProtocolStateEntry, segment.SporkRootBlock) + for _, proposal := range segment.Blocks { + err := builder.AddBlock(proposal) if err != nil { return fmt.Errorf("invalid segment: %w", err) } @@ -162,13 +220,32 @@ func (segment *SealingSegment) Validate() error { return nil } -var ( - ErrSegmentMissingSeal = fmt.Errorf("sealing segment failed sanity check: missing seal referenced by segment") - ErrSegmentBlocksWrongLen = fmt.Errorf("sealing segment failed sanity check: non-root sealing segment must have at least 2 blocks") - ErrSegmentInvalidBlockHeight = fmt.Errorf("sealing segment failed sanity check: blocks must be in ascending order") - ErrSegmentResultLookup = fmt.Errorf("failed to lookup execution result") - ErrSegmentSealLookup = fmt.Errorf("failed to lookup seal") -) +// InvalidSealingSegmentError is returned either when building or validating a SealingSegment, +// when the segment is found to be invalid, or when attempting to add an entity to a segment +// under construction would cause the resulting SealingSegment to become invalid. +type InvalidSealingSegmentError struct { + err error +} + +func NewInvalidSealingSegmentError(msg string, args ...any) InvalidSealingSegmentError { + return InvalidSealingSegmentError{ + err: fmt.Errorf(msg, args...), + } +} + +func (err InvalidSealingSegmentError) Error() string { + return err.err.Error() +} + +func (err InvalidSealingSegmentError) Unwrap() error { + return err.err +} + +// IsInvalidSealingSegmentError returns true if err is or wraps an instance of InvalidSealingSegmentError. +func IsInvalidSealingSegmentError(err error) bool { + var invalidSealingSegmentError InvalidSealingSegmentError + return errors.As(err, &invalidSealingSegmentError) +} // GetResultFunc is a getter function for results by ID. // No errors are expected during normal operation. @@ -179,111 +256,121 @@ type GetResultFunc func(resultID Identifier) (*ExecutionResult, error) // No errors are expected during normal operation. type GetSealByBlockIDFunc func(blockID Identifier) (*Seal, error) +// GetProtocolStateEntryFunc is a getter function for protocol state entries +// No errors are expected during normal operation. +type GetProtocolStateEntryFunc func(protocolStateID Identifier) (*ProtocolStateEntryWrapper, error) + // SealingSegmentBuilder is a utility for incrementally building a sealing segment. type SealingSegmentBuilder struct { // access to storage to read referenced by not included resources resultLookup GetResultFunc sealByBlockIDLookup GetSealByBlockIDFunc + protocolStateLookup GetProtocolStateEntryFunc // keep track of resources included in payloads includedResults map[Identifier]struct{} // resources to include in the sealing segment - blocks []*Block - results []*ExecutionResult - latestSeals map[Identifier]Identifier - firstSeal *Seal + blocks []*Proposal + results []*ExecutionResult + latestSeals map[Identifier]Identifier + protocolStateEntries map[Identifier]*ProtocolStateEntryWrapper + firstSeal *Seal // extraBlocks included in sealing segment, must connect to the lowest block of segment // stored in descending order for simpler population logic - extraBlocks []*Block + extraBlocks []*Proposal + // sporkRootBlock is the root block in the current spork + sporkRootBlock *Block } // AddBlock appends a block to the sealing segment under construction. -// No errors are expected during normal operation. -func (builder *SealingSegmentBuilder) AddBlock(block *Block) error { +// Errors expected during normal operation: +// - InvalidSealingSegmentError if the added block would cause an invalid resulting segment +func (builder *SealingSegmentBuilder) AddBlock(block *Proposal) error { // sanity check: all blocks have to be added before adding extra blocks if len(builder.extraBlocks) > 0 { return fmt.Errorf("cannot add sealing segment block after extra block is added") } // sanity check: block should be 1 height higher than current highest - if !builder.isValidHeight(block) { - return fmt.Errorf("invalid block height (%d): %w", block.Header.Height, ErrSegmentInvalidBlockHeight) + if !builder.isValidHeight(&block.Block) { + return NewInvalidSealingSegmentError("invalid block height (%d)", block.Block.Height) } - blockID := block.ID() - - // a block might contain receipts or seals that refer to results that are included in blocks - // whose height is below the first block of the segment. - // In order to include those missing results into the segment, we construct a list of those - // missing result IDs referenced by this block - missingResultIDs := make(map[Identifier]struct{}) + blockID := block.Block.ID() // for the first (lowest) block, if it contains no seal, store the latest // seal incorporated prior to the first block if len(builder.blocks) == 0 { - if len(block.Payload.Seals) == 0 { + if len(block.Block.Payload.Seals) == 0 { seal, err := builder.sealByBlockIDLookup(blockID) if err != nil { - return fmt.Errorf("%w: %v", ErrSegmentSealLookup, err) + return fmt.Errorf("could not look up seal: %w", err) } builder.firstSeal = seal - // add first seal result ID here, since it isn't in payload - missingResultIDs[seal.ResultID] = struct{}{} } } // index the latest seal for this block latestSeal, err := builder.sealByBlockIDLookup(blockID) if err != nil { - return fmt.Errorf("%w: %v", ErrSegmentSealLookup, err) + return fmt.Errorf("could not look up seal: %w", err) } builder.latestSeals[blockID] = latestSeal.ID() // cache included results and seals // they could be referenced in a future block in the segment - for _, result := range block.Payload.Results { + for _, result := range block.Block.Payload.Results { builder.includedResults[result.ID()] = struct{}{} } - for _, receipt := range block.Payload.Receipts { - if _, ok := builder.includedResults[receipt.ResultID]; !ok { - missingResultIDs[receipt.ResultID] = struct{}{} - } - } - for _, seal := range block.Payload.Seals { - if _, ok := builder.includedResults[seal.ResultID]; !ok { - missingResultIDs[seal.ResultID] = struct{}{} - } + // if the block commits to an unseen ProtocolStateID, add the corresponding data entry + err = builder.addProtocolStateEntryIfUnseen(block.Block.Payload.ProtocolStateID) + if err != nil { + return fmt.Errorf("could not check or add protocol state entry: %w", err) } - // add the missing results - for resultID := range missingResultIDs { - result, err := builder.resultLookup(resultID) + builder.blocks = append(builder.blocks, block) + return nil +} - if err != nil { - return fmt.Errorf("%w: (%x) %v", ErrSegmentResultLookup, resultID, err) - } - builder.addExecutionResult(result) - builder.includedResults[resultID] = struct{}{} +// addProtocolStateEntryIfUnseen checks whether the given protocolStateID corresponds +// to a previously unseen protocol state entry. If it does, retrieves the state entry +// and persists it for inclusion in the resulting SealingSegment. +// No errors expected during normal operation. +func (builder *SealingSegmentBuilder) addProtocolStateEntryIfUnseen(protocolStateID Identifier) error { + _, exists := builder.protocolStateEntries[protocolStateID] + if exists { + return nil } - builder.blocks = append(builder.blocks, block) + protocolStateEntry, err := builder.protocolStateLookup(protocolStateID) + if err != nil { + return fmt.Errorf("could not look up protocol state entry with id=%x: %w", protocolStateID, err) + } + builder.protocolStateEntries[protocolStateID] = protocolStateEntry return nil } // AddExtraBlock appends an extra block to sealing segment under construction. // Extra blocks needs to be added in descending order and the first block must connect to the lowest block // of sealing segment, this way they form a continuous chain. -// No errors are expected during normal operation. -func (builder *SealingSegmentBuilder) AddExtraBlock(block *Block) error { +// Errors expected during normal operation: +// - InvalidSealingSegmentError if the added block would cause an invalid resulting segment +func (builder *SealingSegmentBuilder) AddExtraBlock(block *Proposal) error { if len(builder.extraBlocks) == 0 { if len(builder.blocks) == 0 { return fmt.Errorf("cannot add extra blocks before adding lowest sealing segment block") } // first extra block has to match the lowest block of sealing segment - if (block.Header.Height + 1) != builder.lowest().Header.Height { - return fmt.Errorf("invalid extra block height (%d), doesn't connect to sealing segment: %w", block.Header.Height, ErrSegmentInvalidBlockHeight) + if (block.Block.Height + 1) != builder.lowest().Height { + return NewInvalidSealingSegmentError("invalid extra block height (%d), doesn't connect to sealing segment", block.Block.Height) } - } else if (block.Header.Height + 1) != builder.extraBlocks[len(builder.extraBlocks)-1].Header.Height { - return fmt.Errorf("invalid extra block height (%d), doesn't connect to last extra block: %w", block.Header.Height, ErrSegmentInvalidBlockHeight) + } else if (block.Block.Height + 1) != builder.extraBlocks[len(builder.extraBlocks)-1].Block.Height { + return NewInvalidSealingSegmentError("invalid extra block height (%d), doesn't connect to last extra block", block.Block.Height) + } + + // if the block commits to an unseen ProtocolStateID, add the corresponding data entry + err := builder.addProtocolStateEntryIfUnseen(block.Block.Payload.ProtocolStateID) + if err != nil { + return fmt.Errorf("could not check or add protocol state entry: %w", err) } builder.extraBlocks = append(builder.extraBlocks, block) @@ -293,31 +380,80 @@ func (builder *SealingSegmentBuilder) AddExtraBlock(block *Block) error { // AddExecutionResult adds result to executionResults func (builder *SealingSegmentBuilder) addExecutionResult(result *ExecutionResult) { builder.results = append(builder.results, result) + builder.includedResults[result.ID()] = struct{}{} } // SealingSegment completes building the sealing segment, validating the segment // constructed so far, and returning it as a SealingSegment if it is valid. // -// All errors indicate the SealingSegmentBuilder internal state does not represent -// a valid sealing segment. -// No errors are expected during normal operation. +// Errors expected during normal operation: +// - InvalidSealingSegmentError if the added block would cause an invalid resulting segment func (builder *SealingSegmentBuilder) SealingSegment() (*SealingSegment, error) { + + // at this point, go through all blocks and store any results which are referenced + // by blocks in the segment, but not contained within any blocks in the segment + missingExecutionResultMap := make(map[Identifier]struct{}) + + if builder.firstSeal != nil { + _, ok := builder.includedResults[builder.firstSeal.ResultID] + if !ok { + missingExecutionResultMap[builder.firstSeal.ResultID] = struct{}{} + } + } + + for _, block := range append(builder.extraBlocks, builder.blocks...) { + for _, receipt := range block.Block.Payload.Receipts { + _, included := builder.includedResults[receipt.ResultID] + if included { + continue + } + missingExecutionResultMap[receipt.ResultID] = struct{}{} + } + for _, seal := range block.Block.Payload.Seals { + _, included := builder.includedResults[seal.ResultID] + if included { + continue + } + missingExecutionResultMap[seal.ResultID] = struct{}{} + } + } + + // sort execution results to canonical order for consistent serialization + missingExecutionResults := make([]Identifier, 0, len(missingExecutionResultMap)) + for resultID := range missingExecutionResultMap { + missingExecutionResults = append(missingExecutionResults, resultID) + } + slices.SortFunc(missingExecutionResults, func(a, b Identifier) int { + return bytes.Compare(a[:], b[:]) + }) + + // retrieve and store all missing execution results + for _, resultID := range missingExecutionResults { + result, err := builder.resultLookup(resultID) + if err != nil { + return nil, fmt.Errorf("could not retrieve missing result (id=%x): %w", resultID, err) + } + builder.addExecutionResult(result) + } + if err := builder.validateSegment(); err != nil { return nil, fmt.Errorf("failed to validate sealing segment: %w", err) } // SealingSegment must store extra blocks in ascending order, builder stores them in descending. // Apply a sort to reverse the slice and use correct ordering. - slices.SortFunc(builder.extraBlocks, func(lhs, rhs *Block) bool { - return lhs.Header.Height < rhs.Header.Height + slices.SortFunc(builder.extraBlocks, func(lhs, rhs *Proposal) int { + return int(lhs.Block.Height) - int(rhs.Block.Height) }) return &SealingSegment{ - Blocks: builder.blocks, - ExtraBlocks: builder.extraBlocks, - ExecutionResults: builder.results, - LatestSeals: builder.latestSeals, - FirstSeal: builder.firstSeal, + Blocks: builder.blocks, + ExtraBlocks: builder.extraBlocks, + ExecutionResults: builder.results, + ProtocolStateEntries: builder.protocolStateEntries, + LatestSeals: builder.latestSeals, + FirstSeal: builder.firstSeal, + SporkRootBlock: builder.sporkRootBlock, }, nil } @@ -327,62 +463,62 @@ func (builder *SealingSegmentBuilder) isValidHeight(block *Block) bool { return true } - return block.Header.Height == builder.highest().Header.Height+1 + return block.Height == builder.highest().Height+1 } // validateRootSegment will check that the current builder state represents a valid // root sealing segment. In particular: -// * the root block must be the first block (least height) in the segment -// * no blocks in the segment may contain any seals (by the minimality requirement) +// - the root block must be the first block (least height) in the segment +// - no blocks in the segment may contain any seals (by the minimality requirement) // -// All errors indicate an invalid root segment, and either a bug in SealingSegmentBuilder -// or a corrupted underlying protocol state. -// No errors are expected during normal operation. +// Errors expected during normal operation: +// - InvalidSealingSegmentError if the added block would cause an invalid resulting segment func (builder *SealingSegmentBuilder) validateRootSegment() error { if len(builder.blocks) == 0 { - return fmt.Errorf("root segment must have at least 1 block") + return NewInvalidSealingSegmentError("root segment must have at least 1 block") } if len(builder.extraBlocks) > 0 { - return fmt.Errorf("root segment cannot have extra blocks") + return NewInvalidSealingSegmentError("root segment cannot have extra blocks") } - if builder.lowest().Header.View != 0 { - return fmt.Errorf("root block has unexpected view (%d != 0)", builder.lowest().Header.View) + if builder.lowest().View != builder.sporkRootBlock.View { + return NewInvalidSealingSegmentError("root block has unexpected view (%d != %d)", builder.lowest().View, builder.sporkRootBlock.View) } if len(builder.results) != 1 { - return fmt.Errorf("expected %d results, got %d", 1, len(builder.results)) + return NewInvalidSealingSegmentError("expected %d results, got %d", 1, len(builder.results)) } if builder.firstSeal == nil { - return fmt.Errorf("firstSeal must not be nil for root segment") + return NewInvalidSealingSegmentError("firstSeal must not be nil for root segment") } if builder.results[0].BlockID != builder.lowest().ID() { - return fmt.Errorf("result (block_id=%x) is not for root block (id=%x)", builder.results[0].BlockID, builder.lowest().ID()) + return NewInvalidSealingSegmentError("result (block_id=%x) is not for root block (id=%x)", builder.results[0].BlockID, builder.lowest().ID()) } if builder.results[0].ID() != builder.firstSeal.ResultID { - return fmt.Errorf("firstSeal (result_id=%x) is not for root result (id=%x)", builder.firstSeal.ResultID, builder.results[0].ID()) + return NewInvalidSealingSegmentError("firstSeal (result_id=%x) is not for root result (id=%x)", builder.firstSeal.ResultID, builder.results[0].ID()) } if builder.results[0].BlockID != builder.firstSeal.BlockID { - return fmt.Errorf("root seal (block_id=%x) references different block than root result (block_id=%x)", builder.firstSeal.BlockID, builder.results[0].BlockID) + return NewInvalidSealingSegmentError("root seal (block_id=%x) references different block than root result (block_id=%x)", builder.firstSeal.BlockID, builder.results[0].BlockID) } for _, block := range builder.blocks { - if len(block.Payload.Seals) > 0 { - return fmt.Errorf("root segment cannot contain blocks with seals (minimality requirement) - block (height=%d,id=%x) has %d seals", - block.Header.Height, block.ID(), len(block.Payload.Seals)) + if len(block.Block.Payload.Seals) > 0 { + return NewInvalidSealingSegmentError("root segment cannot contain blocks with seals (minimality requirement) - block (height=%d,id=%x) has %d seals", + block.Block.Height, block.Block.ID(), len(block.Block.Payload.Seals)) } } return nil } // validateSegment will validate if builder satisfies conditions for a valid sealing segment. -// No errors are expected during normal operation. +// Errors expected during normal operation: +// - InvalidSealingSegmentError if the added block would cause an invalid resulting segment func (builder *SealingSegmentBuilder) validateSegment() error { // sealing cannot be empty if len(builder.blocks) == 0 { - return fmt.Errorf("expect at least 2 blocks in a sealing segment or 1 block in the case of root segments, but got an empty sealing segment: %w", ErrSegmentBlocksWrongLen) + return NewInvalidSealingSegmentError("expect at least 2 blocks in a sealing segment or 1 block in the case of root segments, but got an empty sealing segment") } if len(builder.extraBlocks) > 0 { - if builder.extraBlocks[0].Header.Height+1 != builder.lowest().Header.Height { - return fmt.Errorf("extra blocks don't connect to lowest block in segment") + if builder.extraBlocks[0].Block.Height+1 != builder.lowest().Height { + return NewInvalidSealingSegmentError("extra blocks don't connect to lowest block in segment") } } @@ -398,7 +534,7 @@ func (builder *SealingSegmentBuilder) validateSegment() error { // validate the latest seal is for the lowest block _, err := findLatestSealForLowestBlock(builder.blocks, builder.latestSeals) if err != nil { - return fmt.Errorf("sealing segment missing seal (lowest block id: %x) (highest block id: %x) %v: %w", builder.lowest().ID(), builder.highest().ID(), err, ErrSegmentMissingSeal) + return NewInvalidSealingSegmentError("sealing segment missing seal (lowest block id: %x) (highest block id: %x): %w", builder.lowest().ID(), builder.highest().ID(), err) } return nil @@ -410,24 +546,32 @@ func (builder *SealingSegmentBuilder) highest() *Block { return nil } - return builder.blocks[len(builder.blocks)-1] + return &builder.blocks[len(builder.blocks)-1].Block } // lowest returns the lowest block in segment. func (builder *SealingSegmentBuilder) lowest() *Block { - return builder.blocks[0] + return &builder.blocks[0].Block } // NewSealingSegmentBuilder returns *SealingSegmentBuilder -func NewSealingSegmentBuilder(resultLookup GetResultFunc, sealLookup GetSealByBlockIDFunc) *SealingSegmentBuilder { +func NewSealingSegmentBuilder( + resultLookup GetResultFunc, + sealLookup GetSealByBlockIDFunc, + protocolStateLookup GetProtocolStateEntryFunc, + sporkRootBlock *Block, +) *SealingSegmentBuilder { return &SealingSegmentBuilder{ - resultLookup: resultLookup, - sealByBlockIDLookup: sealLookup, - includedResults: make(map[Identifier]struct{}), - latestSeals: make(map[Identifier]Identifier), - blocks: make([]*Block, 0), - extraBlocks: make([]*Block, 0), - results: make(ExecutionResultList, 0), + resultLookup: resultLookup, + sealByBlockIDLookup: sealLookup, + protocolStateLookup: protocolStateLookup, + includedResults: make(map[Identifier]struct{}), + latestSeals: make(map[Identifier]Identifier), + protocolStateEntries: make(map[Identifier]*ProtocolStateEntryWrapper), + blocks: make([]*Proposal, 0, 10), + extraBlocks: make([]*Proposal, 0, DefaultTransactionExpiry), + results: make(ExecutionResultList, 0, 3), + sporkRootBlock: sporkRootBlock, } } @@ -449,18 +593,18 @@ func NewSealingSegmentBuilder(resultLookup GetResultFunc, sealLookup GetSealByBl // A <- B <- C <- D(seal_A) <- E(seal_B) ==> invalid, because latest seal is B, but lowest block is A // A(seal_A) ==> invalid, because this is impossible for non-root sealing segments // -// The node logic requires a valid sealing segment to bootstrap. There are no -// errors expected during normal operations. -func findLatestSealForLowestBlock(blocks []*Block, latestSeals map[Identifier]Identifier) (*Seal, error) { - lowestBlockID := blocks[0].ID() - highestBlockID := blocks[len(blocks)-1].ID() +// The node logic requires a valid sealing segment to bootstrap. +// No errors are expected during normal operations. +func findLatestSealForLowestBlock(blocks []*Proposal, latestSeals map[Identifier]Identifier) (*Seal, error) { + lowestBlockID := blocks[0].Block.ID() + highestBlockID := blocks[len(blocks)-1].Block.ID() // get the ID of the latest seal for highest block latestSealID := latestSeals[highestBlockID] // find the seal within the block payloads for i := len(blocks) - 1; i >= 0; i-- { - block := blocks[i] + block := blocks[i].Block // look for latestSealID in the payload for _, seal := range block.Payload.Seals { // if we found the latest seal, confirm it seals lowest diff --git a/model/flow/sealing_segment.md b/model/flow/sealing_segment.md index 1cc6544ec37..475ea178789 100644 --- a/model/flow/sealing_segment.md +++ b/model/flow/sealing_segment.md @@ -2,31 +2,31 @@ The `SealingSegment` is a section of the finalized chain. It is part of the data need to initialize a new node to join the network. Informally, the `SealingSegment` is continuous section -of recently finalized blocks that is long enough for the new node to execute its business logic. +of recently finalized blocks that is long enough for the new node to execute its business logic. -## History length covered by the Sealing Segment +## History length covered by the Sealing Segment The `SealingSegment` is created from a `protocol.Snapshot` via the method `SealingSegment`. -Lets denote the block that the `protocol.Snapshot` refers to as `head`. Per convention, -`head` must be a finalized block. +Lets denote the block that the `protocol.Snapshot` refers to as `head`. Per convention, +`head` must be a finalized block. -### Part 1: from `head` back to the latest sealed block +### Part 1: from `head` back to the latest sealed block The SealingSegment is a chain segment such that the last block (greatest height) is this snapshot's reference block (i.e. `head`) and the first (least height) is the most recently sealed block as of this snapshot. In other words, the most recently incorporated seal as of the highest block references the lowest block. The highest block does not need to contain this seal. -* Example 1: block `E` seals `A` +* Example 1: block `E` seals `B` ``` - A <- B <- C <- D <- E(SA) + B <- C <- D <- E(SB) ``` - Here, `SA` denotes the seal for block `A`. - In the sealing segment's last block (`E`) has a seal for block `A`, which is the first block of the sealing segment. + Here, `SB` denotes the seal for block `B`. + In the sealing segment's last block (`E`) has a seal for block `B`, which is the first block of the sealing segment. -* Example 2: `E` contains no seals, but latest seal prior to `E` seals `A` +* Example 2: `F` contains no seals, but latest seal prior to `F` seals `B` ``` - A <- B <- C <- D(SA) <- E + B <- C <- D <- E(SB) <- F ``` * Example 3: `E` contains multiple seals ``` @@ -40,16 +40,24 @@ type SealingSegment struct { Blocks []*Block ⋮ -} +} ``` -**Minimality Requirement for `SealingSegment.Blocks`**: +**Minimality Requirement for `SealingSegment.Blocks`**: In example 3, note that block `B` is the highest sealed block as of `E`. Therefore, the lowest block in `SealingSegment.Blocks` must be `B`. Essentially, this is a minimality requirement for the history: it shouldn't be longer than necessary. So extending the chain segment above to `A <- B <- C <- D <- E(SA, SB)` would be an invalid value for field `SealingSegment.Blocks`. + +**Nomenclature**: +* After bootstrapping, we refer to the latest sealed block in the sealing segment as `SealedRoot` block. This is + `SealingSegment.Blocks[0]` (block `B` in example 3). +* The highest finalized block in the sealing segment is called `FinalizedRoot` + This is always the last block in the `SealingSegment.Blocks` slice (block `E` in example 3). + + ### Part 2: `ExtraBlocks` In addition, the `SealingSegment` contains the field `ExtraBlocks`: @@ -68,10 +76,10 @@ ExtraBlocks []*Block ``` **In case `head` contains multiple seals, we need _all_ the sealed blocks**, for the following reason: -* All nodes locally maintain a copy of the protocol state. A service event may change the state of the protocol state. +* All nodes locally maintain a copy of the protocol state. A service event may change the state of the protocol state. * For Byzantine resilience, we don't want protocol-state changes to take effect immediately. Therefore, we process system events only after receiving a QC for the block. - + Now let us consider the situation where a newly initialized node comes online and processes the first child of `head`. Lets reuse the example from above, where our head was block `E` and we are now processing the child `X` ``` @@ -80,14 +88,14 @@ ExtraBlocks []*Block ExtraBlocks Blocks block ``` `X` carries the QC for `E`, hence the protocol-state changes in `E` take effect for `X`. Therefore, when processing `X`, - we go through the seals in `E` and look through the sealed execution results for service events. + we go through the seals in `E` and look through the sealed execution results for service events. * As the service events are order-sensitive, we need to process the seals in the correct order, which is by increasing height - of the sealed block. The seals don't contain the block's height information, hence we need to resolve the block. + of the sealed block. The seals don't contain the block's height information, hence we need to resolve the block. **Extended history to check for duplicated collection guarantees in blocks** is required by nodes that _validate_ block payloads (e.g. consensus nodes). Also Access Nodes require these blocks. Collections expire after `flow.DefaultTransactionExpiry` blocks. Hence, we desire a history of `flow.DefaultTransactionExpiry` blocks. However, there is the edge case of a recent spork (or genesis), -where the history is simply less that `flow.DefaultTransactionExpiry`. +where the history is simply less that `flow.DefaultTransactionExpiry`. ### Formal definition @@ -98,10 +106,11 @@ The descriptions from the previous section can be formalized as follows * (ii) All blocks that are sealed by `head`. This is relevant if `head` contains _multiple_ seals. * (iii) The sealing segment should contain the history back to (including): ``` - limitHeight := max(head.Height - flow.DefaultTransactionExpiry, SporkRootBlockHeight) + limitHeight := max(blockSealedAtHead.Height - flow.DefaultTransactionExpiry, SporkRootBlockHeight) ``` + where `blockSealedAtHead` is the block sealed by `head` block. Note that all three conditions have to be satisfied by a sealing segment. Therefore, it must contain the longest history -required by any of the three conditions. The 'Spork Root Block' is the cutoff. +required by any of the three conditions. The 'Spork Root Block' is the cutoff. Per convention, we include the blocks for (i) in the `SealingSegment.Blocks`, while the additional blocks for (ii) and optionally (iii) are contained in as `SealingSegment.ExtraBlocks`. @@ -143,9 +152,7 @@ the segment. In particular: ## Outlook -In its current state, the sealing segment has been evolving driven by different needs. Most likely, there is some room for simplifications -and other improvements. However, an important aspect of the sealing segment is to allow newly-joining nodes to build an internal representation -of the protocol state, in particular the identity table. There are large changes coming around when we move to the dynamic identity table. -Therefore, we accept that the Sealing Segment currently has some technical debt and unnecessary complexity. Once we have implemented the -dynamic identity table, we will have a much more solidified understanding of the data in the sealing segment. +An important aspect of the sealing segment is to allow newly-joining nodes to build an internal representation +of the protocol state, in particular the identity table. In its current state, the sealing segment has been evolving +driven by different needs. Most likely, there is some room for simplifications and other improvements. diff --git a/model/flow/sealing_segment_test.go b/model/flow/sealing_segment_test.go index 94034d25b41..138301d9940 100644 --- a/model/flow/sealing_segment_test.go +++ b/model/flow/sealing_segment_test.go @@ -4,6 +4,7 @@ import ( "fmt" "testing" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" @@ -21,12 +22,14 @@ import ( type SealingSegmentSuite struct { suite.Suite - results map[flow.Identifier]*flow.ExecutionResult - sealsByBlockID map[flow.Identifier]*flow.Seal + results map[flow.Identifier]*flow.ExecutionResult + sealsByBlockID map[flow.Identifier]*flow.Seal + protocolStateEntries map[flow.Identifier]*flow.ProtocolStateEntryWrapper // bootstrap each test case with a block which is before, and receipt+seal for the block - priorBlock *flow.Block - priorReceipt *flow.ExecutionReceipt - priorSeal *flow.Seal + priorBlock *flow.Block + priorReceipt *flow.ExecutionReceipt + priorSeal *flow.Seal + defaultProtocolStateID flow.Identifier builder *flow.SealingSegmentBuilder } @@ -45,6 +48,10 @@ func (suite *SealingSegmentSuite) addSeal(blockID flow.Identifier, seal *flow.Se suite.sealsByBlockID[blockID] = seal } +func (suite *SealingSegmentSuite) addProtocolStateEntry(protocolStateID flow.Identifier, entry *flow.ProtocolStateEntryWrapper) { + suite.protocolStateEntries[protocolStateID] = entry +} + // GetResult gets a result by ID from the map in the suite. func (suite *SealingSegmentSuite) GetResult(resultID flow.Identifier) (*flow.ExecutionResult, error) { result, ok := suite.results[resultID] @@ -63,30 +70,69 @@ func (suite *SealingSegmentSuite) GetSealByBlockID(blockID flow.Identifier) (*fl return seal, nil } +// GetProtocolStateEntry gets a protocol state entry from the map in the suite. +func (suite *SealingSegmentSuite) GetProtocolStateEntry(protocolStateID flow.Identifier) (*flow.ProtocolStateEntryWrapper, error) { + entry, ok := suite.protocolStateEntries[protocolStateID] + if !ok { + return nil, fmt.Errorf("not found") + } + return entry, nil +} + // SetupTest resets maps and creates a new builder for a new test case. func (suite *SealingSegmentSuite) SetupTest() { suite.results = make(map[flow.Identifier]*flow.ExecutionResult) suite.sealsByBlockID = make(map[flow.Identifier]*flow.Seal) - suite.builder = flow.NewSealingSegmentBuilder(suite.GetResult, suite.GetSealByBlockID) + suite.protocolStateEntries = make(map[flow.Identifier]*flow.ProtocolStateEntryWrapper) + root, _, _ := unittest.BootstrapFixture(unittest.IdentityListFixture(5, unittest.WithAllRoles())) + suite.builder = flow.NewSealingSegmentBuilder(suite.GetResult, suite.GetSealByBlockID, suite.GetProtocolStateEntry, root) + + suite.defaultProtocolStateID = unittest.IdentifierFixture() + suite.protocolStateEntries[suite.defaultProtocolStateID] = suite.ProtocolStateEntryWrapperFixture() - priorBlock := unittest.BlockFixture() - priorReceipt, priorSeal := unittest.ReceiptAndSealForBlock(&priorBlock) + priorBlock := suite.BlockFixture() + priorReceipt, priorSeal := unittest.ReceiptAndSealForBlock(priorBlock) suite.results[priorReceipt.ExecutionResult.ID()] = &priorReceipt.ExecutionResult - suite.priorBlock = &priorBlock + suite.priorBlock = priorBlock suite.priorReceipt = priorReceipt suite.priorSeal = priorSeal } +// BlockFixture returns a Block fixture with the default protocol state ID. +func (suite *SealingSegmentSuite) BlockFixture() *flow.Block { + return unittest.BlockFixture( + unittest.Block.WithPayload(suite.PayloadFixture()), + ) +} + +// PayloadFixture returns a Payload fixture with the default protocol state ID. +func (suite *SealingSegmentSuite) PayloadFixture(opts ...func(payload *flow.Payload)) flow.Payload { + opts = append(opts, unittest.WithProtocolStateID(suite.defaultProtocolStateID)) + return unittest.PayloadFixture(opts...) +} + +// BlockWithParentFixture returns a Block fixtures with the default protocol state. +func (suite *SealingSegmentSuite) BlockWithParentFixture(parent *flow.Header) *flow.Block { + return unittest.BlockWithParentAndPayload(parent, suite.PayloadFixture()) +} + +// ProtocolStateEntryWrapperFixture returns a ProtocolStateEntryWrapper. +// For these tests, only the ID matters, so we can just return an empty struct. +func (suite *SealingSegmentSuite) ProtocolStateEntryWrapperFixture() *flow.ProtocolStateEntryWrapper { + return &flow.ProtocolStateEntryWrapper{} +} + // FirstBlock returns a first block which contains a seal and receipt referencing // priorBlock (this is the simplest case for a sealing segment). func (suite *SealingSegmentSuite) FirstBlock() *flow.Block { - block := unittest.BlockFixture() - block.SetPayload(unittest.PayloadFixture( - unittest.WithSeals(suite.priorSeal), - unittest.WithReceipts(suite.priorReceipt), - )) + block := unittest.BlockFixture( + unittest.Block.WithPayload(suite.PayloadFixture( + unittest.WithSeals(suite.priorSeal), + unittest.WithReceipts(suite.priorReceipt), + )), + ) suite.addSeal(block.ID(), suite.priorSeal) - return &block + return block } // AddBlocks is a short-hand for adding a sequence of blocks, in order. @@ -100,7 +146,7 @@ func (suite *SealingSegmentSuite) AddBlocks(blocks ...*flow.Block) { latestSeal = seal } suite.addSeal(block.ID(), latestSeal) - err := suite.builder.AddBlock(block) + err := suite.builder.AddBlock(unittest.ProposalFromBlock(block)) require.NoError(suite.T(), err) } } @@ -110,25 +156,32 @@ func (suite *SealingSegmentSuite) AddBlocks(blocks ...*flow.Block) { // // B1(R*,S*) <- B2(R1) <- B4(S1) func (suite *SealingSegmentSuite) TestBuild_MissingResultFromReceipt() { - // B1 contains a receipt (but no result) and seal for a prior block - block1 := unittest.BlockFixture() - block1.SetPayload(unittest.PayloadFixture(unittest.WithReceiptsAndNoResults(suite.priorReceipt), unittest.WithSeals(suite.priorSeal))) + block1 := unittest.BlockFixture( + unittest.Block.WithPayload(suite.PayloadFixture( + unittest.WithReceiptsAndNoResults(suite.priorReceipt), + unittest.WithSeals(suite.priorSeal), + )), + ) - block2 := unittest.BlockWithParentFixture(block1.Header) - receipt1, seal1 := unittest.ReceiptAndSealForBlock(&block1) - block2.SetPayload(unittest.PayloadFixture(unittest.WithReceipts(receipt1))) + receipt1, seal1 := unittest.ReceiptAndSealForBlock(block1) + block2 := unittest.BlockWithParentAndPayload( + block1.ToHeader(), + suite.PayloadFixture(unittest.WithReceipts(receipt1)), + ) - block3 := unittest.BlockWithParentFixture(block2.Header) - block3.SetPayload(unittest.PayloadFixture(unittest.WithSeals(seal1))) + block3 := unittest.BlockWithParentAndPayload( + block2.ToHeader(), + suite.PayloadFixture(unittest.WithSeals(seal1)), + ) - suite.AddBlocks(&block1, block2, block3) + suite.AddBlocks(block1, block2, block3) segment, err := suite.builder.SealingSegment() require.NoError(suite.T(), err) require.NoError(suite.T(), segment.Validate()) - unittest.AssertEqualBlocksLenAndOrder(suite.T(), []*flow.Block{&block1, block2, block3}, segment.Blocks) + unittest.AssertEqualBlockSequences(suite.T(), []*flow.Block{block1, block2, block3}, segment.Blocks) require.Equal(suite.T(), 1, segment.ExecutionResults.Size()) require.Equal(suite.T(), suite.priorReceipt.ExecutionResult.ID(), segment.ExecutionResults[0].ID()) } @@ -140,24 +193,28 @@ func (suite *SealingSegmentSuite) TestBuild_MissingResultFromReceipt() { func (suite *SealingSegmentSuite) TestBuild_MissingFirstBlockSeal() { // B1 contains an empty payload - block1 := unittest.BlockFixture() + block1 := suite.BlockFixture() // latest seal as of B1 is priorSeal suite.sealsByBlockID[block1.ID()] = suite.priorSeal - receipt1, seal1 := unittest.ReceiptAndSealForBlock(&block1) - block2 := unittest.BlockWithParentFixture(block1.Header) - block2.SetPayload(unittest.PayloadFixture(unittest.WithReceipts(receipt1))) + receipt1, seal1 := unittest.ReceiptAndSealForBlock(block1) + block2 := unittest.BlockWithParentAndPayload( + block1.ToHeader(), + suite.PayloadFixture(unittest.WithReceipts(receipt1)), + ) - block3 := unittest.BlockWithParentFixture(block2.Header) - block3.SetPayload(unittest.PayloadFixture(unittest.WithSeals(seal1))) + block3 := unittest.BlockWithParentAndPayload( + block2.ToHeader(), + suite.PayloadFixture(unittest.WithSeals(seal1)), + ) - suite.AddBlocks(&block1, block2, block3) + suite.AddBlocks(block1, block2, block3) segment, err := suite.builder.SealingSegment() require.NoError(suite.T(), err) require.NoError(suite.T(), segment.Validate()) - unittest.AssertEqualBlocksLenAndOrder(suite.T(), []*flow.Block{&block1, block2, block3}, segment.Blocks) + unittest.AssertEqualBlockSequences(suite.T(), []*flow.Block{block1, block2, block3}, segment.Blocks) // should contain priorSeal as first seal require.Equal(suite.T(), suite.priorSeal, segment.FirstSeal) // should contain result referenced by first seal @@ -180,11 +237,15 @@ func (suite *SealingSegmentSuite) TestBuild_MissingResultFromPayloadSeal() { pastSeal.ResultID = pastResult.ID() receipt1, seal1 := unittest.ReceiptAndSealForBlock(block1) - block2 := unittest.BlockWithParentFixture(block1.Header) - block2.SetPayload(unittest.PayloadFixture(unittest.WithReceipts(receipt1), unittest.WithSeals(pastSeal))) + block2 := unittest.BlockWithParentAndPayload( + block1.ToHeader(), + suite.PayloadFixture(unittest.WithReceipts(receipt1), unittest.WithSeals(pastSeal)), + ) - block3 := unittest.BlockWithParentFixture(block2.Header) - block3.SetPayload(unittest.PayloadFixture(unittest.WithSeals(seal1))) + block3 := unittest.BlockWithParentAndPayload( + block2.ToHeader(), + suite.PayloadFixture(unittest.WithSeals(seal1)), + ) suite.AddBlocks(block1, block2, block3) @@ -192,7 +253,7 @@ func (suite *SealingSegmentSuite) TestBuild_MissingResultFromPayloadSeal() { require.NoError(suite.T(), err) require.NoError(suite.T(), segment.Validate()) - unittest.AssertEqualBlocksLenAndOrder(suite.T(), []*flow.Block{block1, block2, block3}, segment.Blocks) + unittest.AssertEqualBlockSequences(suite.T(), []*flow.Block{block1, block2, block3}, segment.Blocks) require.Equal(suite.T(), 1, segment.ExecutionResults.Size()) require.Equal(suite.T(), pastResult.ID(), segment.ExecutionResults[0].ID()) } @@ -205,21 +266,25 @@ func (suite *SealingSegmentSuite) TestBuild_MissingResultFromPayloadSeal() { func (suite *SealingSegmentSuite) TestBuild_WrongLatestSeal() { block1 := suite.FirstBlock() - block2 := unittest.BlockWithParentFixture(block1.Header) + block2 := suite.BlockWithParentFixture(block1.ToHeader()) receipt1, seal1 := unittest.ReceiptAndSealForBlock(block1) receipt2, seal2 := unittest.ReceiptAndSealForBlock(block2) - block3 := unittest.BlockWithParentFixture(block2.Header) - block3.SetPayload(unittest.PayloadFixture(unittest.WithReceipts(receipt1, receipt2))) + block3 := unittest.BlockWithParentAndPayload( + block2.ToHeader(), + suite.PayloadFixture(unittest.WithReceipts(receipt1, receipt2)), + ) - block4 := unittest.BlockWithParentFixture(block3.Header) - block4.SetPayload(unittest.PayloadFixture(unittest.WithSeals(seal1, seal2))) + block4 := unittest.BlockWithParentAndPayload( + block3.ToHeader(), + suite.PayloadFixture(unittest.WithSeals(seal1, seal2)), + ) suite.AddBlocks(block1, block2, block3, block4) _, err := suite.builder.SealingSegment() - require.ErrorIs(suite.T(), err, flow.ErrSegmentMissingSeal) + require.True(suite.T(), flow.IsInvalidSealingSegmentError(err)) } // Tests the case where the final block in the segment seals multiple @@ -238,18 +303,22 @@ func (suite *SealingSegmentSuite) TestBuild_MultipleFinalBlockSeals() { pastSeal := unittest.Seal.Fixture() pastSeal.ResultID = pastResult.ID() - block2 := unittest.BlockWithParentFixture(block1.Header) - block2.SetPayload(unittest.PayloadFixture(unittest.WithReceipts(receipt1))) + block2 := unittest.BlockWithParentAndPayload( + block1.ToHeader(), + suite.PayloadFixture(unittest.WithReceipts(receipt1)), + ) - block3 := unittest.BlockWithParentFixture(block2.Header) - block3.SetPayload(unittest.PayloadFixture(unittest.WithSeals(pastSeal, seal1))) + block3 := unittest.BlockWithParentAndPayload( + block2.ToHeader(), + suite.PayloadFixture(unittest.WithSeals(pastSeal, seal1)), + ) suite.AddBlocks(block1, block2, block3) segment, err := suite.builder.SealingSegment() require.NoError(suite.T(), err) - unittest.AssertEqualBlocksLenAndOrder(suite.T(), []*flow.Block{block1, block2, block3}, segment.Blocks) + unittest.AssertEqualBlockSequences(suite.T(), []*flow.Block{block1, block2, block3}, segment.Blocks) require.Equal(suite.T(), 1, segment.ExecutionResults.Size()) require.Equal(suite.T(), pastResult.ID(), segment.ExecutionResults[0].ID()) require.NoError(suite.T(), segment.Validate()) @@ -257,18 +326,18 @@ func (suite *SealingSegmentSuite) TestBuild_MultipleFinalBlockSeals() { // TestBuild_RootSegment tests we can build a valid root sealing segment. func (suite *SealingSegmentSuite) TestBuild_RootSegment() { - root, result, seal := unittest.BootstrapFixture(unittest.IdentityListFixture(5, unittest.WithAllRoles())) suite.sealsByBlockID[root.ID()] = seal + suite.addProtocolStateEntry(root.Payload.ProtocolStateID, suite.ProtocolStateEntryWrapperFixture()) suite.addResult(result) - err := suite.builder.AddBlock(root) + err := suite.builder.AddBlock(unittest.ProposalFromBlock(root)) require.NoError(suite.T(), err) segment, err := suite.builder.SealingSegment() require.NoError(suite.T(), err) require.NoError(suite.T(), segment.Validate()) - unittest.AssertEqualBlocksLenAndOrder(suite.T(), []*flow.Block{root}, segment.Blocks) + unittest.AssertEqualBlockSequences(suite.T(), []*flow.Block{root}, segment.Blocks) require.Equal(suite.T(), segment.Highest().ID(), root.ID()) require.Equal(suite.T(), segment.Sealed().ID(), root.ID()) } @@ -276,12 +345,15 @@ func (suite *SealingSegmentSuite) TestBuild_RootSegment() { // TestBuild_RootSegmentWrongView tests that we return ErrSegmentInvalidRootView for // a single-block sealing segment with a block view not equal to 0. func (suite *SealingSegmentSuite) TestBuild_RootSegmentWrongView() { - - root, result, seal := unittest.BootstrapFixture(unittest.IdentityListFixture(5, unittest.WithAllRoles())) - root.Header.View = 10 // invalid root block view + root, result, seal := unittest.BootstrapFixture( + unittest.IdentityListFixture(5, unittest.WithAllRoles()), + func(block *flow.Block) { + block.View = 10 // invalid root block view + }) suite.sealsByBlockID[root.ID()] = seal + suite.addProtocolStateEntry(root.Payload.ProtocolStateID, suite.ProtocolStateEntryWrapperFixture()) suite.addResult(result) - err := suite.builder.AddBlock(root) + err := suite.builder.AddBlock(unittest.ProposalFromBlock(root)) require.NoError(suite.T(), err) _, err = suite.builder.SealingSegment() @@ -297,13 +369,17 @@ func (suite *SealingSegmentSuite) TestBuild_HighestContainsNoSeals() { block1 := suite.FirstBlock() receipt1, seal1 := unittest.ReceiptAndSealForBlock(block1) - block2 := unittest.BlockWithParentFixture(block1.Header) - block2.SetPayload(unittest.PayloadFixture(unittest.WithReceipts(receipt1))) + block2 := unittest.BlockWithParentAndPayload( + block1.ToHeader(), + suite.PayloadFixture(unittest.WithReceipts(receipt1)), + ) - block3 := unittest.BlockWithParentFixture(block2.Header) - block3.SetPayload(unittest.PayloadFixture(unittest.WithSeals(seal1))) + block3 := unittest.BlockWithParentAndPayload( + block2.ToHeader(), + suite.PayloadFixture(unittest.WithSeals(seal1)), + ) - block4 := unittest.BlockWithParentFixture(block3.Header) + block4 := suite.BlockWithParentFixture(block3.ToHeader()) suite.AddBlocks(block1, block2, block3, block4) @@ -311,10 +387,10 @@ func (suite *SealingSegmentSuite) TestBuild_HighestContainsNoSeals() { require.NoError(suite.T(), err) require.NoError(suite.T(), segment.Validate()) - unittest.AssertEqualBlocksLenAndOrder(suite.T(), []*flow.Block{block1, block2, block3, block4}, segment.Blocks) + unittest.AssertEqualBlockSequences(suite.T(), []*flow.Block{block1, block2, block3, block4}, segment.Blocks) } -// Test that we should return ErrSegmentMissingSeal if highest block contains +// Test that we should return InvalidSealingSegmentError if highest block contains // seals but does not contain seal for lowest, when sealing segment is built. // // B1(S*) <- B2(R1) <- B3(S1,R2) <- B4(S2) @@ -322,24 +398,30 @@ func (suite *SealingSegmentSuite) TestBuild_HighestContainsWrongSeal() { block1 := suite.FirstBlock() receipt1, seal1 := unittest.ReceiptAndSealForBlock(block1) - block2 := unittest.BlockWithParentFixture(block1.Header) - block2.SetPayload(unittest.PayloadFixture(unittest.WithReceipts(receipt1))) + block2 := unittest.BlockWithParentAndPayload( + block1.ToHeader(), + suite.PayloadFixture(unittest.WithReceipts(receipt1)), + ) receipt2, seal2 := unittest.ReceiptAndSealForBlock(block2) - block3 := unittest.BlockWithParentFixture(block2.Header) - block3.SetPayload(unittest.PayloadFixture(unittest.WithReceipts(receipt2), unittest.WithSeals(seal1))) + block3 := unittest.BlockWithParentAndPayload( + block2.ToHeader(), + suite.PayloadFixture(unittest.WithReceipts(receipt2), unittest.WithSeals(seal1)), + ) // highest block contains wrong seal - invalid - block4 := unittest.BlockWithParentFixture(block3.Header) - block4.SetPayload(unittest.PayloadFixture(unittest.WithSeals(seal2))) + block4 := unittest.BlockWithParentAndPayload( + block3.ToHeader(), + suite.PayloadFixture(unittest.WithSeals(seal2)), + ) suite.AddBlocks(block1, block2, block3, block4) _, err := suite.builder.SealingSegment() - require.ErrorIs(suite.T(), err, flow.ErrSegmentMissingSeal) + require.True(suite.T(), flow.IsInvalidSealingSegmentError(err)) } -// Test that we should return ErrSegmentMissingSeal if highest block contains +// Test that we should return InvalidSealingSegmentError if highest block contains // no seals and first ancestor with seals does not seal lowest, when sealing // segment is built // @@ -348,122 +430,259 @@ func (suite *SealingSegmentSuite) TestBuild_HighestAncestorContainsWrongSeal() { block1 := suite.FirstBlock() receipt1, seal1 := unittest.ReceiptAndSealForBlock(block1) - block2 := unittest.BlockWithParentFixture(block1.Header) - block2.SetPayload(unittest.PayloadFixture(unittest.WithReceipts(receipt1))) + block2 := unittest.BlockWithParentAndPayload( + block1.ToHeader(), + suite.PayloadFixture(unittest.WithReceipts(receipt1)), + ) receipt2, seal2 := unittest.ReceiptAndSealForBlock(block2) - block3 := unittest.BlockWithParentFixture(block2.Header) - block3.SetPayload(unittest.PayloadFixture(unittest.WithReceipts(receipt2), unittest.WithSeals(seal1))) + block3 := unittest.BlockWithParentAndPayload( + block2.ToHeader(), + suite.PayloadFixture(unittest.WithReceipts(receipt2), unittest.WithSeals(seal1)), + ) // ancestor of highest block contains wrong seal - invalid - block4 := unittest.BlockWithParentFixture(block3.Header) - block4.SetPayload(unittest.PayloadFixture(unittest.WithSeals(seal2))) + block4 := unittest.BlockWithParentAndPayload( + block3.ToHeader(), + suite.PayloadFixture(unittest.WithSeals(seal2)), + ) - block5 := unittest.BlockWithParentFixture(block4.Header) + block5 := suite.BlockWithParentFixture(block4.ToHeader()) suite.AddBlocks(block1, block2, block3, block4, block5) _, err := suite.builder.SealingSegment() - require.ErrorIs(suite.T(), err, flow.ErrSegmentMissingSeal) + require.True(suite.T(), flow.IsInvalidSealingSegmentError(err)) } -// Test that we should return ErrSegmentBlocksWrongLen if sealing segment is +// TestBuild_ChangingProtocolStateID_Blocks tests constructing a sealing segment where +// the primary segment (`Blocks`) contains blocks with different protocol state entries. +// In this test, B1 commits to the default protocol state ID, and blocks B2 and B3 +// commit to a different protocol state ID (PS2). +// B1(R*,S*) <- B2(R1,PS2) <- B3(S1,PS2) +func (suite *SealingSegmentSuite) TestBuild_ChangingProtocolStateID_Blocks() { + block1 := unittest.BlockFixture( + unittest.Block.WithPayload(suite.PayloadFixture( + unittest.WithReceipts(suite.priorReceipt), + unittest.WithSeals(suite.priorSeal), + )), + ) + + protocolStateID2 := unittest.IdentifierFixture() + suite.addProtocolStateEntry(protocolStateID2, suite.ProtocolStateEntryWrapperFixture()) + + receipt1, seal1 := unittest.ReceiptAndSealForBlock(block1) + block2 := unittest.BlockWithParentAndPayload( + block1.ToHeader(), + unittest.PayloadFixture(unittest.WithReceipts(receipt1), unittest.WithProtocolStateID(protocolStateID2)), + ) + + block3 := unittest.BlockWithParentAndPayload( + block2.ToHeader(), + suite.PayloadFixture(unittest.WithSeals(seal1), unittest.WithProtocolStateID(protocolStateID2)), + ) + + suite.AddBlocks(block1, block2, block3) + + segment, err := suite.builder.SealingSegment() + require.NoError(suite.T(), err) + require.NoError(suite.T(), segment.Validate()) + + unittest.AssertEqualBlockSequences(suite.T(), []*flow.Block{block1, block2, block3}, segment.Blocks) + // resulting segment must contain both protocol state IDs + assert.Len(suite.T(), segment.ProtocolStateEntries, 2) + _, ok := segment.ProtocolStateEntries[suite.defaultProtocolStateID] + assert.True(suite.T(), ok) + _, ok = segment.ProtocolStateEntries[protocolStateID2] + assert.True(suite.T(), ok) +} + +// TestBuild_ChangingProtocolStateID_ExtraBlocks tests constructing a sealing segment where +// `ExtraBlocks` contains blocks with different protocol state entries. +// In this test, blocks B1, B2, and B3 commits to the default protocol state ID. +// Extra blocks EB1 and EB2 commit to a different protocol state ID (PS2). +// EB2(PS2) <- EB1 <- B1(R*,S*) <- B2(R1) <- B3(S1) +func (suite *SealingSegmentSuite) TestBuild_ChangingProtocolStateID_ExtraBlocks() { + block1 := unittest.BlockFixture( + unittest.Block.WithPayload(suite.PayloadFixture( + unittest.WithReceipts(suite.priorReceipt), + unittest.WithSeals(suite.priorSeal), + )), + ) + + receipt1, seal1 := unittest.ReceiptAndSealForBlock(block1) + block2 := unittest.BlockWithParentAndPayload( + block1.ToHeader(), + suite.PayloadFixture(unittest.WithReceipts(receipt1)), + ) + + block3 := unittest.BlockWithParentAndPayload( + block2.ToHeader(), + suite.PayloadFixture(unittest.WithSeals(seal1)), + ) + + suite.AddBlocks(block1, block2, block3) + + // construct two extra blocks that connect to the lowest block and add them to builder + protocolStateID2 := unittest.IdentifierFixture() + suite.addProtocolStateEntry(protocolStateID2, suite.ProtocolStateEntryWrapperFixture()) + + extraBlock1 := unittest.BlockFixture( + unittest.Block.WithHeight(block1.Height-1), + unittest.Block.WithPayload(unittest.PayloadFixture( + unittest.WithProtocolStateID(protocolStateID2), + )), + ) + err := suite.builder.AddExtraBlock(unittest.ProposalFromBlock(extraBlock1)) + require.NoError(suite.T(), err) + + extraBlock2 := suite.BlockFixture() + extraBlock2.Height = extraBlock1.Height - 1 + err = suite.builder.AddExtraBlock(unittest.ProposalFromBlock(extraBlock2)) + require.NoError(suite.T(), err) + + segment, err := suite.builder.SealingSegment() + require.NoError(suite.T(), err) + err = segment.Validate() + require.NoError(suite.T(), err) + + unittest.AssertEqualBlockSequences(suite.T(), []*flow.Block{block1, block2, block3}, segment.Blocks) + unittest.AssertEqualBlockSequences(suite.T(), []*flow.Block{extraBlock2, extraBlock1}, segment.ExtraBlocks) + // resulting segment must contain both protocol state IDs + assert.Len(suite.T(), segment.ProtocolStateEntries, 2) + _, ok := segment.ProtocolStateEntries[suite.defaultProtocolStateID] + assert.True(suite.T(), ok) + _, ok = segment.ProtocolStateEntries[protocolStateID2] + assert.True(suite.T(), ok) +} + +// Test that we should return InvalidSealingSegmentError if sealing segment is // built with no blocks. func (suite *SealingSegmentSuite) TestBuild_NoBlocks() { - builder := flow.NewSealingSegmentBuilder(nil, nil) + root, _, _ := unittest.BootstrapFixture(unittest.IdentityListFixture(5, unittest.WithAllRoles())) + builder := flow.NewSealingSegmentBuilder(nil, nil, nil, root) _, err := builder.SealingSegment() - require.ErrorIs(suite.T(), err, flow.ErrSegmentBlocksWrongLen) + require.True(suite.T(), flow.IsInvalidSealingSegmentError(err)) } -// should return ErrSegmentInvalidBlockHeight if block has invalid height +// should return InvalidSealingSegmentError if block has invalid height func (suite *SealingSegmentSuite) TestAddBlock_InvalidHeight() { block1 := suite.FirstBlock() // block 2 has an invalid height - block2 := unittest.BlockFixture() - block2.Header.Height = block1.Header.Height + 2 + block2 := suite.BlockFixture() + block2.Height = block1.Height + 2 - err := suite.builder.AddBlock(block1) + err := suite.builder.AddBlock(unittest.ProposalFromBlock(block1)) require.NoError(suite.T(), err) - err = suite.builder.AddBlock(&block2) - require.ErrorIs(suite.T(), err, flow.ErrSegmentInvalidBlockHeight) + err = suite.builder.AddBlock(unittest.ProposalFromBlock(block2)) + require.True(suite.T(), flow.IsInvalidSealingSegmentError(err)) } // TestAddBlock_StorageError tests that errors in the resource getters bubble up. func TestAddBlock_StorageError(t *testing.T) { - + root, _, _ := unittest.BootstrapFixture(unittest.IdentityListFixture(5, unittest.WithAllRoles())) t.Run("missing result", func(t *testing.T) { // create a receipt to include in the first block, whose result is not in storage missingReceipt := unittest.ExecutionReceiptFixture() - block1 := unittest.BlockFixture() - sealLookup := func(flow.Identifier) (*flow.Seal, error) { return unittest.Seal.Fixture(), nil } - resultLookup := func(flow.Identifier) (*flow.ExecutionResult, error) { return nil, fmt.Errorf("not found") } - builder := flow.NewSealingSegmentBuilder(resultLookup, sealLookup) - block1.SetPayload(unittest.PayloadFixture( - unittest.WithReceiptsAndNoResults(missingReceipt), - unittest.WithSeals(unittest.Seal.Fixture(unittest.Seal.WithResult(&missingReceipt.ExecutionResult))), - )) + exception := fmt.Errorf("") + sealLookup := func(flow.Identifier) (*flow.Seal, error) { return unittest.Seal.Fixture(), nil } + resultLookup := func(flow.Identifier) (*flow.ExecutionResult, error) { return nil, exception } + protocolStateEntryLookup := func(flow.Identifier) (*flow.ProtocolStateEntryWrapper, error) { + return &flow.ProtocolStateEntryWrapper{}, nil + } + builder := flow.NewSealingSegmentBuilder(resultLookup, sealLookup, protocolStateEntryLookup, root) + block1 := unittest.BlockFixture( + unittest.Block.WithPayload(unittest.PayloadFixture( + unittest.WithReceiptsAndNoResults(missingReceipt), + unittest.WithSeals(unittest.Seal.Fixture(unittest.Seal.WithResult(&missingReceipt.ExecutionResult))), + )), + ) + + err := builder.AddBlock(unittest.ProposalFromBlock(block1)) + require.NoError(t, err) - err := builder.AddBlock(&block1) - require.ErrorIs(t, err, flow.ErrSegmentResultLookup) + _, err = builder.SealingSegment() + require.ErrorIs(t, err, exception) }) // create a first block which contains no seal, and the seal isn't in storage t.Run("missing seal", func(t *testing.T) { + exception := fmt.Errorf("") resultLookup := func(flow.Identifier) (*flow.ExecutionResult, error) { return unittest.ExecutionResultFixture(), nil } - sealLookup := func(flow.Identifier) (*flow.Seal, error) { return nil, fmt.Errorf("not found") } - block1 := unittest.BlockFixture() - block1.SetPayload(flow.EmptyPayload()) - builder := flow.NewSealingSegmentBuilder(resultLookup, sealLookup) + sealLookup := func(flow.Identifier) (*flow.Seal, error) { return nil, exception } + protocolStateEntryLookup := func(flow.Identifier) (*flow.ProtocolStateEntryWrapper, error) { + return &flow.ProtocolStateEntryWrapper{}, nil + } + block1 := unittest.BlockFixture( + unittest.Block.WithPayload(*flow.NewEmptyPayload()), + ) + builder := flow.NewSealingSegmentBuilder(resultLookup, sealLookup, protocolStateEntryLookup, root) - err := builder.AddBlock(&block1) - require.ErrorIs(t, err, flow.ErrSegmentSealLookup) + err := builder.AddBlock(unittest.ProposalFromBlock(block1)) + require.ErrorIs(t, err, exception) + }) + + t.Run("missing protocol state entry", func(t *testing.T) { + exception := fmt.Errorf("") + resultLookup := func(flow.Identifier) (*flow.ExecutionResult, error) { return unittest.ExecutionResultFixture(), nil } + sealLookup := func(flow.Identifier) (*flow.Seal, error) { return unittest.Seal.Fixture(), nil } + protocolStateEntryLookup := func(flow.Identifier) (*flow.ProtocolStateEntryWrapper, error) { return nil, exception } + block1 := unittest.BlockFixture( + unittest.Block.WithPayload(*flow.NewEmptyPayload()), + ) + builder := flow.NewSealingSegmentBuilder(resultLookup, sealLookup, protocolStateEntryLookup, root) + + err := builder.AddBlock(unittest.ProposalFromBlock(block1)) + require.ErrorIs(t, err, exception) }) } // TestAddExtraBlock tests different scenarios for adding extra blocks, covers happy and unhappy path scenarios. func (suite *SealingSegmentSuite) TestAddExtraBlock() { + root, _, _ := unittest.BootstrapFixture(unittest.IdentityListFixture(5, unittest.WithAllRoles())) // populate sealing segment with one block firstBlock := suite.FirstBlock() - firstBlock.Header.Height += 100 + firstBlock.Height += 100 + firstProposal := unittest.ProposalFromBlock(firstBlock) suite.AddBlocks(firstBlock) suite.T().Run("empty-segment", func(t *testing.T) { - builder := flow.NewSealingSegmentBuilder(nil, nil) - block := unittest.BlockFixture() - err := builder.AddExtraBlock(&block) + builder := flow.NewSealingSegmentBuilder(nil, nil, nil, root) + block := suite.BlockFixture() + err := builder.AddExtraBlock(unittest.ProposalFromBlock(block)) require.Error(t, err) }) suite.T().Run("extra-block-does-not-connect", func(t *testing.T) { // adding extra block that doesn't connect to the lowest is an error - extraBlock := unittest.BlockFixture() - extraBlock.Header.Height = firstBlock.Header.Height + 10 // make sure it doesn't connect by height - err := suite.builder.AddExtraBlock(&extraBlock) - require.ErrorIs(t, err, flow.ErrSegmentInvalidBlockHeight) + extraBlock := suite.BlockFixture() + extraBlock.Height = firstBlock.Height + 10 // make sure it doesn't connect by height + err := suite.builder.AddExtraBlock(unittest.ProposalFromBlock(extraBlock)) + require.True(suite.T(), flow.IsInvalidSealingSegmentError(err)) }) suite.T().Run("extra-block-not-continuous", func(t *testing.T) { - builder := flow.NewSealingSegmentBuilder(suite.GetResult, suite.GetSealByBlockID) - err := builder.AddBlock(firstBlock) + builder := flow.NewSealingSegmentBuilder(suite.GetResult, suite.GetSealByBlockID, suite.GetProtocolStateEntry, root) + err := builder.AddBlock(firstProposal) require.NoError(t, err) - extraBlock := unittest.BlockFixture() - extraBlock.Header.Height = firstBlock.Header.Height - 1 // make it connect - err = builder.AddExtraBlock(&extraBlock) + extraBlock := suite.BlockFixture() + extraBlock.Height = firstBlock.Height - 1 // make it connect + err = builder.AddExtraBlock(unittest.ProposalFromBlock(extraBlock)) require.NoError(t, err) - extraBlockWithSkip := unittest.BlockFixture() - extraBlockWithSkip.Header.Height = extraBlock.Header.Height - 2 // skip one height - err = builder.AddExtraBlock(&extraBlockWithSkip) - require.ErrorIs(t, err, flow.ErrSegmentInvalidBlockHeight) + extraBlockWithSkip := suite.BlockFixture() + extraBlockWithSkip.Height = extraBlock.Height - 2 // skip one height + err = builder.AddExtraBlock(unittest.ProposalFromBlock(extraBlockWithSkip)) + require.True(suite.T(), flow.IsInvalidSealingSegmentError(err)) }) suite.T().Run("root-segment-extra-blocks", func(t *testing.T) { - builder := flow.NewSealingSegmentBuilder(suite.GetResult, suite.GetSealByBlockID) - err := builder.AddBlock(firstBlock) + builder := flow.NewSealingSegmentBuilder(suite.GetResult, suite.GetSealByBlockID, suite.GetProtocolStateEntry, root) + err := builder.AddBlock(firstProposal) require.NoError(t, err) - extraBlock := unittest.BlockFixture() - extraBlock.Header.Height = firstBlock.Header.Height - 1 - err = builder.AddExtraBlock(&extraBlock) + extraBlock := suite.BlockFixture() + extraBlock.Height = firstBlock.Height - 1 + err = builder.AddExtraBlock(unittest.ProposalFromBlock(extraBlock)) require.NoError(t, err) _, err = builder.SealingSegment() // root segment cannot have extra blocks @@ -474,24 +693,28 @@ func (suite *SealingSegmentSuite) TestAddExtraBlock() { // B1(S*) <- B2(R1) <- B3(S1) receipt, seal := unittest.ReceiptAndSealForBlock(firstBlock) - blockWithER := unittest.BlockWithParentFixture(firstBlock.Header) - blockWithER.SetPayload(unittest.PayloadFixture(unittest.WithReceipts(receipt))) + blockWithER := unittest.BlockWithParentAndPayload( + firstBlock.ToHeader(), + suite.PayloadFixture(unittest.WithReceipts(receipt)), + ) // add one more block, with seal to the ER - highestBlock := unittest.BlockWithParentFixture(blockWithER.Header) - highestBlock.SetPayload(unittest.PayloadFixture(unittest.WithSeals(seal))) + highestBlock := unittest.BlockWithParentAndPayload( + blockWithER.ToHeader(), + suite.PayloadFixture(unittest.WithSeals(seal)), + ) suite.AddBlocks(blockWithER, highestBlock) // construct two extra blocks that connect to the lowest block and add them to builder // EB2 <- EB1 <- B1(S*) <- B2(R1) <- B3(S1) - extraBlock := unittest.BlockFixture() - extraBlock.Header.Height = firstBlock.Header.Height - 1 - err := suite.builder.AddExtraBlock(&extraBlock) + extraBlock := suite.BlockFixture() + extraBlock.Height = firstBlock.Height - 1 + err := suite.builder.AddExtraBlock(unittest.ProposalFromBlock(extraBlock)) require.NoError(t, err) - secondExtraBlock := unittest.BlockFixture() - secondExtraBlock.Header.Height = extraBlock.Header.Height - 1 - err = suite.builder.AddExtraBlock(&secondExtraBlock) + secondExtraBlock := suite.BlockFixture() + secondExtraBlock.Height = extraBlock.Height - 1 + err = suite.builder.AddExtraBlock(unittest.ProposalFromBlock(secondExtraBlock)) require.NoError(t, err) segment, err := suite.builder.SealingSegment() require.NoError(t, err) diff --git a/model/flow/service_event.go b/model/flow/service_event.go index 7467a9e8f2f..325665c7cd7 100644 --- a/model/flow/service_event.go +++ b/model/flow/service_event.go @@ -19,9 +19,13 @@ func (set ServiceEventType) String() string { } const ( - ServiceEventSetup ServiceEventType = "setup" - ServiceEventCommit ServiceEventType = "commit" - ServiceEventVersionBeacon ServiceEventType = "version-beacon" + ServiceEventSetup ServiceEventType = "setup" + ServiceEventCommit ServiceEventType = "commit" + ServiceEventRecover ServiceEventType = "recover" + ServiceEventVersionBeacon ServiceEventType = "version-beacon" // VersionBeacon only controls version of ENs, describing software compatability via semantic versioning + ServiceEventProtocolStateVersionUpgrade ServiceEventType = "protocol-state-version-upgrade" // Protocol State version applies to all nodes and uses an _integer version_ of the _protocol_ + ServiceEventSetEpochExtensionViewCount ServiceEventType = "set-epoch-extension-view-count" // Sets value for parameter `EpochExtensionViewCount` in the protocol state's KV store. + ServiceEventEjectNode ServiceEventType = "eject-node" // Marks node with specified NodeID as 'ejected' in the protocol state ) // ServiceEvent represents a service event, which is a special event that when @@ -61,82 +65,124 @@ func (sel ServiceEventList) EqualTo(other ServiceEventList) (bool, error) { return true, nil } +// ServiceEventMarshaller marshals and unmarshals all types of service events. type ServiceEventMarshaller interface { - Unmarshal(b []byte) (ServiceEvent, error) - UnmarshalWithType( - b []byte, - eventType ServiceEventType, - ) ( - ServiceEvent, - error, - ) + // UnmarshalWrapped unmarshals the service event and returns it as a wrapped ServiceEvent type. + // The input bytes must be encoded as a generic wrapped ServiceEvent type. + // Forwards errors from the underlying marshaller (treat errors as you would from eg. json.Unmarshal) + UnmarshalWrapped(b []byte) (ServiceEvent, error) + // UnmarshalWithType unmarshals the service event and returns it as a wrapped ServiceEvent type. + // The input bytes must be encoded as a specific event type (for example, EpochSetup). + // Forwards errors from the underlying marshaller (treat errors as you would from eg. json.Unmarshal) + UnmarshalWithType(b []byte, eventType ServiceEventType) (ServiceEvent, error) } type marshallerImpl struct { - MarshalFunc func(v interface{}) ([]byte, error) - UnmarshalFunc func(data []byte, v interface{}) error + marshalFunc func(v interface{}) ([]byte, error) + unmarshalFunc func(data []byte, v interface{}) error } +var _ ServiceEventMarshaller = (*marshallerImpl)(nil) + var ( + // CAUTION: Json and MsgPack are to be used only for trusted data sources ServiceEventJSONMarshaller = marshallerImpl{ - MarshalFunc: json.Marshal, - UnmarshalFunc: json.Unmarshal, + marshalFunc: json.Marshal, + unmarshalFunc: json.Unmarshal, } + // CAUTION: Json and MsgPack are to be used only for trusted data sources ServiceEventMSGPACKMarshaller = marshallerImpl{ - MarshalFunc: msgpack.Marshal, - UnmarshalFunc: msgpack.Unmarshal, + marshalFunc: msgpack.Marshal, + unmarshalFunc: msgpack.Unmarshal, } ServiceEventCBORMarshaller = marshallerImpl{ - MarshalFunc: cborcodec.EncMode.Marshal, - UnmarshalFunc: cbor.Unmarshal, + marshalFunc: cborcodec.EncMode.Marshal, + unmarshalFunc: cbor.Unmarshal, } ) -func (marshaller marshallerImpl) Unmarshal(b []byte) ( - ServiceEvent, - error, -) { - var enc map[string]interface{} - err := marshaller.UnmarshalFunc(b, &enc) +// UnmarshalWrapped unmarshals the service event `b` and returns it as a wrapped ServiceEvent type. +// The input bytes must be encoded as a generic wrapped ServiceEvent type. +// Forwards errors from the underlying marshaller (treat errors as you would from eg. json.Unmarshal) +func (marshaller marshallerImpl) UnmarshalWrapped(b []byte) (ServiceEvent, error) { + var eventTypeWrapper struct { + Type ServiceEventType + } + err := marshaller.unmarshalFunc(b, &eventTypeWrapper) if err != nil { return ServiceEvent{}, err } + eventType := eventTypeWrapper.Type - tp, ok := enc["Type"].(string) - if !ok { - return ServiceEvent{}, fmt.Errorf("missing type key") + var event any + switch eventType { + case ServiceEventSetup: + event, err = unmarshalWrapped[EpochSetup](b, marshaller) + case ServiceEventCommit: + event, err = unmarshalWrapped[EpochCommit](b, marshaller) + case ServiceEventRecover: + event, err = unmarshalWrapped[EpochRecover](b, marshaller) + case ServiceEventVersionBeacon: + event, err = unmarshalWrapped[VersionBeacon](b, marshaller) + case ServiceEventProtocolStateVersionUpgrade: + event, err = unmarshalWrapped[ProtocolStateVersionUpgrade](b, marshaller) + case ServiceEventSetEpochExtensionViewCount: + event, err = unmarshalWrapped[SetEpochExtensionViewCount](b, marshaller) + case ServiceEventEjectNode: + event, err = unmarshalWrapped[EjectNode](b, marshaller) + default: + return ServiceEvent{}, fmt.Errorf("invalid type: %s", eventType) } - ev, ok := enc["Event"] - if !ok { - return ServiceEvent{}, fmt.Errorf("missing event key") + if err != nil { + return ServiceEvent{}, fmt.Errorf("failed to unmarshal to service event to type %s: %w", eventType, err) } + return ServiceEvent{ + Type: eventType, + Event: event, + }, nil +} - // re-marshal the event, we'll unmarshal it into the appropriate type - evb, err := marshaller.MarshalFunc(ev) +// unmarshalWrapped is a helper function for UnmarshalWrapped which unmarshals the +// Event portion of a ServiceEvent into a specific typed structure. +// Forwards errors from the underlying marshaller (treat errors as you would from eg. json.Unmarshal) +func unmarshalWrapped[E any](b []byte, marshaller marshallerImpl) (*E, error) { + wrapper := struct { + Type ServiceEventType + Event E + }{} + err := marshaller.unmarshalFunc(b, &wrapper) if err != nil { - return ServiceEvent{}, err + return nil, err } - return marshaller.UnmarshalWithType(evb, ServiceEventType(tp)) + return &wrapper.Event, nil } -func (marshaller marshallerImpl) UnmarshalWithType( - b []byte, - eventType ServiceEventType, -) (ServiceEvent, error) { +// UnmarshalWithType unmarshals the service event and returns it as a wrapped ServiceEvent type. +// The input bytes must be encoded as a specific event type (for example, EpochSetup). +// Forwards errors from the underlying marshaller (treat errors as you would from eg. json.Unmarshal) +func (marshaller marshallerImpl) UnmarshalWithType(b []byte, eventType ServiceEventType) (ServiceEvent, error) { var event interface{} switch eventType { case ServiceEventSetup: event = new(EpochSetup) case ServiceEventCommit: event = new(EpochCommit) + case ServiceEventRecover: + event = new(EpochRecover) case ServiceEventVersionBeacon: event = new(VersionBeacon) + case ServiceEventProtocolStateVersionUpgrade: + event = new(ProtocolStateVersionUpgrade) + case ServiceEventSetEpochExtensionViewCount: + event = new(SetEpochExtensionViewCount) + case ServiceEventEjectNode: + event = new(EjectNode) default: return ServiceEvent{}, fmt.Errorf("invalid type: %s", eventType) } - err := marshaller.UnmarshalFunc(b, event) + err := marshaller.unmarshalFunc(b, event) if err != nil { return ServiceEvent{}, fmt.Errorf( @@ -153,7 +199,7 @@ func (marshaller marshallerImpl) UnmarshalWithType( } func (se *ServiceEvent) UnmarshalJSON(b []byte) error { - e, err := ServiceEventJSONMarshaller.Unmarshal(b) + e, err := ServiceEventJSONMarshaller.UnmarshalWrapped(b) if err != nil { return err } @@ -162,7 +208,7 @@ func (se *ServiceEvent) UnmarshalJSON(b []byte) error { } func (se *ServiceEvent) UnmarshalMsgpack(b []byte) error { - e, err := ServiceEventMSGPACKMarshaller.Unmarshal(b) + e, err := ServiceEventMSGPACKMarshaller.UnmarshalWrapped(b) if err != nil { return err } @@ -171,7 +217,7 @@ func (se *ServiceEvent) UnmarshalMsgpack(b []byte) error { } func (se *ServiceEvent) UnmarshalCBOR(b []byte) error { - e, err := ServiceEventCBORMarshaller.Unmarshal(b) + e, err := ServiceEventCBORMarshaller.UnmarshalWrapped(b) if err != nil { return err } @@ -179,6 +225,9 @@ func (se *ServiceEvent) UnmarshalCBOR(b []byte) error { return nil } +// EqualTo checks whether two service events are equal, as defined by the underlying Event type. +// Inputs must have already been independently validated and well-formed. +// No errors are expected during normal operation. func (se *ServiceEvent) EqualTo(other *ServiceEvent) (bool, error) { if se.Type != other.Type { return false, nil @@ -218,6 +267,23 @@ func (se *ServiceEvent) EqualTo(other *ServiceEvent) (bool, error) { } return commit.EqualTo(otherCommit), nil + case ServiceEventRecover: + ev, ok := se.Event.(*EpochRecover) + if !ok { + return false, fmt.Errorf( + "internal invalid type for ServiceEventRecover: %T", + se.Event, + ) + } + otherEv, ok := other.Event.(*EpochRecover) + if !ok { + return false, fmt.Errorf( + "internal invalid type for ServiceEventRecover: %T", + other.Event, + ) + } + return ev.EqualTo(otherEv), nil + case ServiceEventVersionBeacon: version, ok := se.Event.(*VersionBeacon) if !ok { @@ -235,6 +301,57 @@ func (se *ServiceEvent) EqualTo(other *ServiceEvent) (bool, error) { ) } return version.EqualTo(otherVersion), nil + case ServiceEventProtocolStateVersionUpgrade: + version, ok := se.Event.(*ProtocolStateVersionUpgrade) + if !ok { + return false, fmt.Errorf( + "internal invalid type for ProtocolStateVersionUpgrade: %T", + se.Event, + ) + } + otherVersion, ok := other.Event.(*ProtocolStateVersionUpgrade) + if !ok { + return false, + fmt.Errorf( + "internal invalid type for ProtocolStateVersionUpgrade: %T", + other.Event, + ) + } + return version.EqualTo(otherVersion), nil + case ServiceEventSetEpochExtensionViewCount: + typedEvent, ok := se.Event.(*SetEpochExtensionViewCount) + if !ok { + return false, fmt.Errorf( + "internal invalid type for SetEpochExtensionViewCount: %T", + se.Event, + ) + } + otherTypedEvent, ok := other.Event.(*SetEpochExtensionViewCount) + if !ok { + return false, + fmt.Errorf( + "internal invalid type for SetEpochExtensionViewCount: %T", + other.Event, + ) + } + return typedEvent.EqualTo(otherTypedEvent), nil + case ServiceEventEjectNode: + typedEvent, ok := se.Event.(*EjectNode) + if !ok { + return false, fmt.Errorf( + "internal invalid type for EjectNode: %T", + se.Event, + ) + } + otherTypedEvent, ok := other.Event.(*EjectNode) + if !ok { + return false, + fmt.Errorf( + "internal invalid type for EjectNode: %T", + other.Event, + ) + } + return typedEvent.EqualTo(otherTypedEvent), nil default: return false, fmt.Errorf("unknown serice event type: %s", se.Type) diff --git a/model/flow/service_event_test.go b/model/flow/service_event_test.go index 90c571fc4ba..02877d068a7 100644 --- a/model/flow/service_event_test.go +++ b/model/flow/service_event_test.go @@ -2,25 +2,29 @@ package flow_test import ( "encoding/json" + "math/rand" "testing" "github.com/fxamacker/cbor/v2" "github.com/google/go-cmp/cmp" + gocmp "github.com/google/go-cmp/cmp" + "github.com/onflow/crypto" "github.com/stretchr/testify/require" - "github.com/vmihailenco/msgpack" + "github.com/vmihailenco/msgpack/v4" "gotest.tools/assert" - "github.com/onflow/flow-go/crypto" - cborcodec "github.com/onflow/flow-go/model/encoding/cbor" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/utils/unittest" ) func TestEncodeDecode(t *testing.T) { - setup := unittest.EpochSetupFixture() commit := unittest.EpochCommitFixture() + epochRecover := unittest.EpochRecoverFixture() versionBeacon := unittest.VersionBeaconFixture() + protocolVersionUpgrade := unittest.ProtocolStateVersionUpgradeFixture() + setEpochExtensionViewCount := &flow.SetEpochExtensionViewCount{Value: uint64(rand.Uint32())} + ejectionEvent := &flow.EjectNode{NodeID: unittest.IdentifierFixture()} comparePubKey := cmp.FilterValues(func(a, b crypto.PublicKey) bool { return true @@ -34,214 +38,221 @@ func TestEncodeDecode(t *testing.T) { t.Run("json", func(t *testing.T) { t.Run("specific event types", func(t *testing.T) { // EpochSetup - b, err := json.Marshal(setup) - require.NoError(t, err) - - gotSetup := new(flow.EpochSetup) - err = json.Unmarshal(b, gotSetup) - require.NoError(t, err) - assert.DeepEqual(t, setup, gotSetup, comparePubKey) + assertJsonConvert(t, setup, comparePubKey) // EpochCommit - b, err = json.Marshal(commit) - require.NoError(t, err) + assertJsonConvert(t, commit, comparePubKey) - gotCommit := new(flow.EpochCommit) - err = json.Unmarshal(b, gotCommit) - require.NoError(t, err) - assert.DeepEqual(t, commit, gotCommit, comparePubKey) + // EpochRecover + assertJsonConvert(t, epochRecover, comparePubKey) // VersionBeacon - b, err = json.Marshal(versionBeacon) - require.NoError(t, err) + assertJsonConvert(t, versionBeacon) + + // ProtocolStateVersionUpgrade + assertJsonConvert(t, protocolVersionUpgrade) - gotVersionBeacon := new(flow.VersionBeacon) - err = json.Unmarshal(b, gotVersionBeacon) - require.NoError(t, err) - assert.DeepEqual(t, versionBeacon, gotVersionBeacon) + // SetEpochExtensionViewCount + assertJsonConvert(t, setEpochExtensionViewCount) + + // EjectNode + assertJsonConvert(t, ejectionEvent) }) t.Run("generic type", func(t *testing.T) { // EpochSetup - b, err := json.Marshal(setup.ServiceEvent()) - require.NoError(t, err) - - outer := new(flow.ServiceEvent) - err = json.Unmarshal(b, outer) - require.NoError(t, err) - gotSetup, ok := outer.Event.(*flow.EpochSetup) - require.True(t, ok) - assert.DeepEqual(t, setup, gotSetup, comparePubKey) + assertJsonGenericConvert(t, setup, comparePubKey) // EpochCommit - t.Logf("- debug: setup.ServiceEvent()=%+v\n", setup.ServiceEvent()) - b, err = json.Marshal(commit.ServiceEvent()) - require.NoError(t, err) - - outer = new(flow.ServiceEvent) - t.Logf("- debug: outer=%+v <-- before .Unmarshal()\n", outer) - err = json.Unmarshal(b, outer) - t.Logf("- debug: outer=%+v <-- after .Unmarshal()\n", outer) - require.NoError(t, err) - gotCommit, ok := outer.Event.(*flow.EpochCommit) - require.True(t, ok) - assert.DeepEqual(t, commit, gotCommit, comparePubKey) + assertJsonGenericConvert(t, commit, comparePubKey) + + // EpochRecover + assertJsonGenericConvert(t, epochRecover, comparePubKey) // VersionBeacon - t.Logf("- debug: versionBeacon.ServiceEvent()=%+v\n", versionBeacon.ServiceEvent()) - b, err = json.Marshal(versionBeacon.ServiceEvent()) - require.NoError(t, err) - - outer = new(flow.ServiceEvent) - t.Logf("- debug: outer=%+v <-- before .Unmarshal()\n", outer) - err = json.Unmarshal(b, outer) - t.Logf("- debug: outer=%+v <-- after .Unmarshal()\n", outer) - require.NoError(t, err) - gotVersionTable, ok := outer.Event.(*flow.VersionBeacon) - require.True(t, ok) - assert.DeepEqual(t, versionBeacon, gotVersionTable) + assertJsonGenericConvert(t, versionBeacon) + + // ProtocolStateVersionUpgrade + assertJsonGenericConvert(t, protocolVersionUpgrade) + + // SetEpochExtensionViewCount + assertJsonGenericConvert(t, setEpochExtensionViewCount) + + // EjectNode + assertJsonGenericConvert(t, ejectionEvent) }) }) t.Run("msgpack", func(t *testing.T) { t.Run("specific event types", func(t *testing.T) { // EpochSetup - b, err := msgpack.Marshal(setup) - require.NoError(t, err) - - gotSetup := new(flow.EpochSetup) - err = msgpack.Unmarshal(b, gotSetup) - require.NoError(t, err) - assert.DeepEqual(t, setup, gotSetup, comparePubKey) + assertMsgPackConvert(t, setup, comparePubKey) // EpochCommit - b, err = msgpack.Marshal(commit) - require.NoError(t, err) + assertMsgPackConvert(t, commit, comparePubKey) - gotCommit := new(flow.EpochCommit) - err = msgpack.Unmarshal(b, gotCommit) - require.NoError(t, err) - assert.DeepEqual(t, commit, gotCommit, comparePubKey) + // EpochRecover + assertMsgPackConvert(t, epochRecover, comparePubKey) // VersionBeacon - b, err = msgpack.Marshal(versionBeacon) - require.NoError(t, err) + assertMsgPackConvert(t, versionBeacon) - gotVersionTable := new(flow.VersionBeacon) - err = msgpack.Unmarshal(b, gotVersionTable) - require.NoError(t, err) - assert.DeepEqual(t, versionBeacon, gotVersionTable) + // ProtocolStateVersionUpgrade + assertMsgPackConvert(t, protocolVersionUpgrade) + + // SetEpochExtensionViewCount + assertMsgPackConvert(t, setEpochExtensionViewCount) + + // EjectNode + assertMsgPackConvert(t, ejectionEvent) }) t.Run("generic type", func(t *testing.T) { - b, err := msgpack.Marshal(setup.ServiceEvent()) - require.NoError(t, err) - - outer := new(flow.ServiceEvent) - err = msgpack.Unmarshal(b, outer) - require.NoError(t, err) - gotSetup, ok := outer.Event.(*flow.EpochSetup) - require.True(t, ok) - assert.DeepEqual(t, setup, gotSetup, comparePubKey) + // EpochSetup + assertMsgPackGenericConvert(t, setup, comparePubKey) // EpochCommit - t.Logf("- debug: setup.ServiceEvent()=%+v\n", setup.ServiceEvent()) - b, err = msgpack.Marshal(commit.ServiceEvent()) - require.NoError(t, err) - - outer = new(flow.ServiceEvent) - t.Logf("- debug: outer=%+v <-- before .Unmarshal()\n", outer) - err = msgpack.Unmarshal(b, outer) - t.Logf("- debug: outer=%+v <-- after .Unmarshal()\n", outer) - require.NoError(t, err) - gotCommit, ok := outer.Event.(*flow.EpochCommit) - require.True(t, ok) - assert.DeepEqual(t, commit, gotCommit, comparePubKey) + assertMsgPackGenericConvert(t, commit, comparePubKey) + + // EpochRecover + assertMsgPackGenericConvert(t, epochRecover, comparePubKey) // VersionBeacon - t.Logf("- debug: versionTable.ServiceEvent()=%+v\n", versionBeacon.ServiceEvent()) - b, err = msgpack.Marshal(versionBeacon.ServiceEvent()) - require.NoError(t, err) - - outer = new(flow.ServiceEvent) - t.Logf("- debug: outer=%+v <-- before .Unmarshal()\n", outer) - err = msgpack.Unmarshal(b, outer) - t.Logf("- debug: outer=%+v <-- after .Unmarshal()\n", outer) - require.NoError(t, err) - gotVersionTable, ok := outer.Event.(*flow.VersionBeacon) - require.True(t, ok) - assert.DeepEqual(t, versionBeacon, gotVersionTable, comparePubKey) + assertMsgPackGenericConvert(t, versionBeacon, comparePubKey) + + // ProtocolStateVersionUpgrade + assertMsgPackGenericConvert(t, protocolVersionUpgrade) + + // SetEpochExtensionViewCount + assertMsgPackGenericConvert(t, setEpochExtensionViewCount) + + // EjectNode + assertMsgPackGenericConvert(t, ejectionEvent) }) }) t.Run("cbor", func(t *testing.T) { t.Run("specific event types", func(t *testing.T) { // EpochSetup - b, err := cborcodec.EncMode.Marshal(setup) - require.NoError(t, err) - - gotSetup := new(flow.EpochSetup) - err = cbor.Unmarshal(b, gotSetup) - require.NoError(t, err) - assert.DeepEqual(t, setup, gotSetup, comparePubKey) + assertCborConvert(t, setup, comparePubKey) // EpochCommit - b, err = cborcodec.EncMode.Marshal(commit) - require.NoError(t, err) + assertCborConvert(t, commit, comparePubKey) - gotCommit := new(flow.EpochCommit) - err = cbor.Unmarshal(b, gotCommit) - require.NoError(t, err) - assert.DeepEqual(t, commit, gotCommit, comparePubKey) + // EpochRecover + assertCborConvert(t, epochRecover, comparePubKey) // VersionBeacon - b, err = cborcodec.EncMode.Marshal(versionBeacon) - require.NoError(t, err) + assertCborConvert(t, versionBeacon) + + // ProtocolStateVersionUpgrade + assertCborConvert(t, protocolVersionUpgrade) - gotVersionTable := new(flow.VersionBeacon) - err = cbor.Unmarshal(b, gotVersionTable) - require.NoError(t, err) - assert.DeepEqual(t, versionBeacon, gotVersionTable) + // SetEpochExtensionViewCount + assertCborConvert(t, setEpochExtensionViewCount) + // EjectNode + assertCborConvert(t, ejectionEvent) }) t.Run("generic type", func(t *testing.T) { // EpochSetup - t.Logf("- debug: setup.ServiceEvent()=%+v\n", setup.ServiceEvent()) - b, err := cborcodec.EncMode.Marshal(setup.ServiceEvent()) - require.NoError(t, err) - - outer := new(flow.ServiceEvent) - t.Logf("- debug: outer=%+v <-- before .Unmarshal()\n", outer) - err = cbor.Unmarshal(b, outer) - t.Logf("- debug: outer=%+v <-- after .Unmarshal()\n", outer) - require.NoError(t, err) - gotSetup, ok := outer.Event.(*flow.EpochSetup) - require.True(t, ok) - assert.DeepEqual(t, setup, gotSetup, comparePubKey) + assertCborGenericConvert(t, setup, comparePubKey) // EpochCommit - b, err = cborcodec.EncMode.Marshal(commit.ServiceEvent()) - require.NoError(t, err) + assertCborGenericConvert(t, commit, comparePubKey) - outer = new(flow.ServiceEvent) - err = cbor.Unmarshal(b, outer) - require.NoError(t, err) - gotCommit, ok := outer.Event.(*flow.EpochCommit) - require.True(t, ok) - assert.DeepEqual(t, commit, gotCommit, comparePubKey) + // EpochRecover + assertCborGenericConvert(t, epochRecover, comparePubKey) // VersionBeacon - t.Logf("- debug: setup.ServiceEvent()=%+v\n", versionBeacon.ServiceEvent()) - b, err = cborcodec.EncMode.Marshal(versionBeacon.ServiceEvent()) - require.NoError(t, err) - - outer = new(flow.ServiceEvent) - err = cbor.Unmarshal(b, outer) - require.NoError(t, err) - gotVersionTable, ok := outer.Event.(*flow.VersionBeacon) - require.True(t, ok) - assert.DeepEqual(t, versionBeacon, gotVersionTable) + assertCborGenericConvert(t, versionBeacon) + + // ProtocolStateVersionUpgrade + assertCborGenericConvert(t, protocolVersionUpgrade) + + // SetEpochExtensionViewCount + assertCborGenericConvert(t, setEpochExtensionViewCount) + + // EjectNode + assertCborGenericConvert(t, ejectionEvent) }) }) } + +// ServiceEventCapable is an interface to convert a specific event type to a generic ServiceEvent type. +type ServiceEventCapable interface { + ServiceEvent() flow.ServiceEvent +} + +// assertJsonConvert asserts that value `v` can be marshaled and unmarshaled to/from JSON. +func assertJsonConvert[T any](t *testing.T, v *T, opts ...gocmp.Option) { + b, err := json.Marshal(v) + require.NoError(t, err) + + got := new(T) + err = json.Unmarshal(b, got) + require.NoError(t, err) + assert.DeepEqual(t, v, got, opts...) +} + +// assertJsonGenericConvert asserts that value `v` can be marshaled and unmarshaled to/from JSON as a generic ServiceEvent. +func assertJsonGenericConvert[T ServiceEventCapable](t *testing.T, v T, opts ...gocmp.Option) { + b, err := json.Marshal(v.ServiceEvent()) + require.NoError(t, err) + + outer := new(flow.ServiceEvent) + err = json.Unmarshal(b, outer) + require.NoError(t, err) + got, ok := outer.Event.(T) + require.True(t, ok) + assert.DeepEqual(t, v, got, opts...) +} + +// assertMsgPackConvert asserts that value `v` can be marshaled and unmarshaled to/from MessagePack. +func assertMsgPackConvert[T any](t *testing.T, v *T, opts ...gocmp.Option) { + b, err := msgpack.Marshal(v) + require.NoError(t, err) + + got := new(T) + err = msgpack.Unmarshal(b, got) + require.NoError(t, err) + assert.DeepEqual(t, v, got, opts...) +} + +// assertMsgPackGenericConvert asserts that value `v` can be marshaled and unmarshaled to/from MessagePack as a generic ServiceEvent. +func assertMsgPackGenericConvert[T ServiceEventCapable](t *testing.T, v T, opts ...gocmp.Option) { + b, err := msgpack.Marshal(v.ServiceEvent()) + require.NoError(t, err) + + outer := new(flow.ServiceEvent) + err = msgpack.Unmarshal(b, outer) + require.NoError(t, err) + got, ok := outer.Event.(T) + require.True(t, ok) + assert.DeepEqual(t, v, got, opts...) +} + +// assertCborConvert asserts that value `v` can be marshaled and unmarshaled to/from CBOR. +func assertCborConvert[T any](t *testing.T, v *T, opts ...gocmp.Option) { + b, err := cbor.Marshal(v) + require.NoError(t, err) + + got := new(T) + err = cbor.Unmarshal(b, got) + require.NoError(t, err) + assert.DeepEqual(t, v, got, opts...) +} + +// assertCborGenericConvert asserts that value `v` can be marshaled and unmarshaled to/from CBOR as a generic ServiceEvent. +func assertCborGenericConvert[T ServiceEventCapable](t *testing.T, v T, opts ...gocmp.Option) { + b, err := cbor.Marshal(v.ServiceEvent()) + require.NoError(t, err) + + outer := new(flow.ServiceEvent) + err = cbor.Unmarshal(b, outer) + require.NoError(t, err) + got, ok := outer.Event.(T) + require.True(t, ok) + assert.DeepEqual(t, v, got, opts...) +} diff --git a/model/flow/synchronization.go b/model/flow/synchronization.go new file mode 100644 index 00000000000..ff98d2937a5 --- /dev/null +++ b/model/flow/synchronization.go @@ -0,0 +1,35 @@ +package flow + +// SyncRequest is part of the synchronization protocol and represents a node on +// the network sharing the height of its latest finalized block and requesting +// the same information from the recipient. +type SyncRequest struct { + Nonce uint64 + Height uint64 +} + +// SyncResponse is part of the synchronization protocol and represents the reply +// to a synchronization request that contains the latest finalized block height +// of the responding node. +type SyncResponse struct { + Nonce uint64 + Height uint64 +} + +// BatchRequest is part of the synchronization protocol and represents an active +// (pulling) attempt to synchronize with the consensus state of the network. It +// requests finalized or unfinalized blocks by a list of block IDs. +type BatchRequest struct { + Nonce uint64 + BlockIDs []Identifier +} + +// RangeRequest is part of the synchronization protocol and represents an active +// (pulling) attempt to synchronize with the consensus state of the network. It +// requests finalized blocks by a range of block heights, including from and to +// heights. +type RangeRequest struct { + Nonce uint64 + FromHeight uint64 + ToHeight uint64 +} diff --git a/model/flow/testmessage.go b/model/flow/testmessage.go new file mode 100644 index 00000000000..276bd7f5088 --- /dev/null +++ b/model/flow/testmessage.go @@ -0,0 +1,6 @@ +package flow + +// TestMessage is the trusted internal representation of a test message. +type TestMessage struct { + Text string +} diff --git a/model/flow/timeout_certificate.go b/model/flow/timeout_certificate.go index 481d86d90b6..0a3997e6bc0 100644 --- a/model/flow/timeout_certificate.go +++ b/model/flow/timeout_certificate.go @@ -1,10 +1,17 @@ package flow -import "github.com/onflow/flow-go/crypto" +import ( + "bytes" + "fmt" + + "github.com/onflow/crypto" +) // TimeoutCertificate proves that a super-majority of consensus participants want to abandon the specified View. // At its core, a timeout certificate is an aggregation of TimeoutObjects, which individual nodes send to signal // their intent to leave the active view. +// +//structwrite:immutable - mutations allowed only within the constructor type TimeoutCertificate struct { View uint64 // NewestQCViews lists for each signer (in the same order) the view of the newest QC they supplied @@ -21,24 +28,93 @@ type TimeoutCertificate struct { SigData crypto.Signature } +// UntrustedTimeoutCertificate is an untrusted input-only representation of a TimeoutCertificate, +// used for construction. +// +// This type exists to ensure that constructor functions are invoked explicitly +// with named fields, which improves clarity and reduces the risk of incorrect field +// ordering during construction. +// +// An instance of UntrustedTimeoutCertificate should be validated and converted into +// a trusted TimeoutCertificate using NewTimeoutCertificate constructor. +type UntrustedTimeoutCertificate TimeoutCertificate + +// NewTimeoutCertificate creates a new instance of TimeoutCertificate. +// Construction TimeoutCertificate allowed only within the constructor. +// +// All errors indicate a valid TimeoutCertificate cannot be constructed from the input. +func NewTimeoutCertificate(untrusted UntrustedTimeoutCertificate) (*TimeoutCertificate, error) { + if untrusted.NewestQC == nil { + return nil, fmt.Errorf("newest QC must not be nil") + } + if len(untrusted.SignerIndices) == 0 { + return nil, fmt.Errorf("signer indices must not be empty") + } + if len(untrusted.SigData) == 0 { + return nil, fmt.Errorf("signature must not be empty") + } + + // The TC's view cannot be smaller than the view of the QC it contains. + // Note: we specifically allow for the TC to have the same view as the highest QC. + // This is useful as a fallback, because it allows replicas other than the designated + // leader to also collect votes and generate a QC. + if untrusted.View < untrusted.NewestQC.View { + return nil, fmt.Errorf("TC's QC view (%d) cannot be newer than the TC's view (%d)", untrusted.NewestQC.View, untrusted.View) + } + + // verifying that tc.NewestQC is the QC with the highest view. + // Note: A byzantine TC could include `nil` for tc.NewestQCViews + if len(untrusted.NewestQCViews) == 0 { + return nil, fmt.Errorf("newest QC views must not be empty") + } + + newestQCView := untrusted.NewestQCViews[0] + for _, view := range untrusted.NewestQCViews { + if newestQCView < view { + newestQCView = view + } + } + if newestQCView > untrusted.NewestQC.View { + return nil, fmt.Errorf("included QC (view=%d) should be equal or higher to highest contributed view: %d", untrusted.NewestQC.View, newestQCView) + } + + return &TimeoutCertificate{ + View: untrusted.View, + NewestQCViews: untrusted.NewestQCViews, + NewestQC: untrusted.NewestQC, + SignerIndices: untrusted.SignerIndices, + SigData: untrusted.SigData, + }, nil +} + +// Equals returns true if and only if receiver TimeoutCertificate is equal to the `other`. Nil values are supported. +func (t *TimeoutCertificate) Equals(other *TimeoutCertificate) bool { + // Shortcut if `t` and `other` point to the same object; covers case where both are nil. + if t == other { + return true + } + if t == nil || other == nil { // only one is nil, the other not (otherwise we would have returned above) + return false + } + // both are not nil, so we can compare the fields + if len(t.NewestQCViews) != len(other.NewestQCViews) { + return false + } + for idx, v := range t.NewestQCViews { + if v != other.NewestQCViews[idx] { + return false + } + } + return (t.View == other.View) && + t.NewestQC.Equals(other.NewestQC) && + bytes.Equal(t.SignerIndices, other.SignerIndices) && + bytes.Equal(t.SigData, other.SigData) +} + // ID returns the TimeoutCertificate's identifier func (t *TimeoutCertificate) ID() Identifier { if t == nil { return ZeroID } - - body := struct { - View uint64 - NewestQCViews []uint64 - NewestQCID Identifier - SignerIndices []byte - SigData crypto.Signature - }{ - View: t.View, - NewestQCViews: t.NewestQCViews, - NewestQCID: t.NewestQC.ID(), - SignerIndices: t.SignerIndices, - SigData: t.SigData, - } - return MakeID(body) + return MakeID(t) } diff --git a/model/flow/timeout_certificate_test.go b/model/flow/timeout_certificate_test.go new file mode 100644 index 00000000000..e71abd4b6db --- /dev/null +++ b/model/flow/timeout_certificate_test.go @@ -0,0 +1,261 @@ +package flow_test + +import ( + "math/rand" + "testing" + + clone "github.com/huandu/go-clone/generic" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/consensus/hotstuff/helper" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/utils/unittest" +) + +// TestTimeoutCertificateID_Malleability confirms that the TimeoutCertificate struct, which implements +// the [flow.IDEntity] interface, is resistant to tampering. +func TestTimeoutCertificateID_Malleability(t *testing.T) { + unittest.RequireEntityNonMalleable(t, helper.MakeTC()) +} + +// TestTimeoutCertificate_Equals verifies the correctness of the Equals method on TimeoutCertificates. +// It checks that TimeoutCertificates are considered equal if and only if all fields match. +func TestTimeoutCertificate_Equals(t *testing.T) { + // Create two TimeoutCertificates with random but different values. Note: random selection for `SignerIndices` has limited variability + // and yields sometimes the same value for both tc1 and tc2. Therefore, we explicitly set different values for `SignerIndices`. + tc1, tc2 := helper.MakeTC(helper.WithTCSigners([]byte{74, 0})), helper.MakeTC(helper.WithTCSigners([]byte{37, 0})) + require.False(t, tc1.Equals(tc2), "Initially, all fields are different, so the objects should not be equal") + + // List of mutations to apply on tc1 to gradually make it equal to tc2 + mutations := []func(){ + func() { + tc1.View = tc2.View + }, func() { + tc1.NewestQCViews = clone.Clone(tc2.NewestQCViews) // deep copy + }, func() { + tc1.NewestQC = clone.Clone(tc2.NewestQC) // deep copy + }, func() { + tc1.SignerIndices = clone.Clone(tc2.SignerIndices) // deep copy + }, func() { + tc1.SigData = clone.Clone(tc2.SigData) // deep copy + }, + } + + // Shuffle the order of mutations + rand.Shuffle(len(mutations), func(i, j int) { + mutations[i], mutations[j] = mutations[j], mutations[i] + }) + + // Apply each mutation one at a time, except the last. + // After each step, the objects should still not be equal. + for _, mutation := range mutations[:len(mutations)-1] { + mutation() + require.False(t, tc1.Equals(tc2)) + } + + // Apply the final mutation; now all relevant fields should match, so the objects must be equal. + mutations[len(mutations)-1]() + require.True(t, tc1.Equals(tc2)) +} + +// TestTimeoutCertificate_Equals_EmptyNewestQCViews verifies the behavior of the Equals method when either +// or both `NewestQCViews` are nil in the receiver and/or the function input. +func TestTimeoutCertificate_Equals_EmptyNewestQCViews(t *testing.T) { + // Create two identical TimeoutCertificates + tc1 := helper.MakeTC() + tc2 := clone.Clone(tc1) + require.True(t, tc1.Equals(tc2), "Initially, all fields are identical, so the objects should be equal") + require.True(t, len(tc1.NewestQCViews) > 0, "sanity check that NewestQCViews is not empty") + + t.Run("NewestQCViews is nil in tc2 only", func(t *testing.T) { + tc2.NewestQCViews = nil + require.False(t, tc1.Equals(tc2)) + require.False(t, tc2.Equals(tc1)) + }) + t.Run("NewestQCViews is empty slice in tc2 only", func(t *testing.T) { + tc2.NewestQCViews = []uint64{} + require.False(t, tc1.Equals(tc2)) + require.False(t, tc2.Equals(tc1)) + }) + t.Run("NewestQCViews is nil in tc1 and tc2", func(t *testing.T) { + tc1.NewestQCViews = nil + tc2.NewestQCViews = nil + require.True(t, tc1.Equals(tc2)) + require.True(t, tc2.Equals(tc1)) + }) + t.Run("NewestQCViews is empty slice in tc1 and tc2", func(t *testing.T) { + tc1.NewestQCViews = []uint64{} + tc2.NewestQCViews = []uint64{} + require.True(t, tc1.Equals(tc2)) + require.True(t, tc2.Equals(tc1)) + }) + t.Run("NewestQCViews is empty slice in tc1 and nil tc2", func(t *testing.T) { + tc1.NewestQCViews = []uint64{} + tc2.NewestQCViews = nil + require.True(t, tc1.Equals(tc2)) + require.True(t, tc2.Equals(tc1)) + }) +} + +// TestTimeoutCertificate_Equals_Nil verifies the behavior of the Equals method when either +// or both the receiver and the function input are nil +func TestTimeoutCertificate_Equals_Nil(t *testing.T) { + var nilTC *flow.TimeoutCertificate + tc := helper.MakeTC() + t.Run("nil receiver", func(t *testing.T) { + require.False(t, nilTC.Equals(tc)) + }) + t.Run("nil input", func(t *testing.T) { + require.False(t, tc.Equals(nilTC)) + }) + t.Run("both nil", func(t *testing.T) { + require.True(t, nilTC.Equals(nil)) + }) +} + +// TestNewTimeoutCertificate verifies the behavior of the NewTimeoutCertificate constructor. +// It ensures proper handling of both valid and invalid untrusted input fields. +// +// Test Cases: +// +// 1. Valid input: +// - Verifies that a properly populated UntrustedTimeoutCertificate results in a valid TimeoutCertificate. +// +// 2. Invalid input with nil NewestQC: +// - Ensures an error is returned when the NewestQC field is nil. +// +// 3. Invalid input with nil SignerIndices: +// - Ensures an error is returned when the SignerIndices field is nil. +// +// 4. Invalid input with empty SignerIndices: +// - Ensures an error is returned when the SignerIndices field is an empty slice. +// +// 5. Invalid input with nil SigData: +// - Ensures an error is returned when the SigData field is nil. +// +// 6. Invalid input with empty SigData: +// - Ensures an error is returned when the SigData field is an empty byte slice. +// +// 7. Invalid input with nil NewestQCViews: +// - Ensures an error is returned when the NewestQCViews field is nil. +// +// 8. Invalid input with empty NewestQCViews: +// - Ensures an error is returned when the NewestQCViews field is an empty slice. +// +// 9. Invalid input when the View is lower than NewestQC's View: +// - Ensures an error is returned when the TimeoutCertificate's View is less than the included QuorumCertificate's View. +// +// 10. Invalid input when NewestQCViews contains view higher than NewestQC.View: +// - Ensures an error is returned if NewestQCViews includes a view that exceeds the view of the NewestQC. +func TestNewTimeoutCertificate(t *testing.T) { + t.Run("valid input", func(t *testing.T) { + validTC := helper.MakeTC() + tc, err := flow.NewTimeoutCertificate( + flow.UntrustedTimeoutCertificate(*validTC), + ) + require.NoError(t, err) + require.NotNil(t, tc) + }) + + t.Run("invalid input with nil NewestQC", func(t *testing.T) { + tc := helper.MakeTC() + tc.NewestQC = nil + + res, err := flow.NewTimeoutCertificate(flow.UntrustedTimeoutCertificate(*tc)) + require.Error(t, err) + require.Nil(t, res) + assert.Contains(t, err.Error(), "newest QC must not be nil") + }) + + t.Run("invalid input with nil SignerIndices", func(t *testing.T) { + tc := helper.MakeTC() + tc.SignerIndices = nil + + res, err := flow.NewTimeoutCertificate(flow.UntrustedTimeoutCertificate(*tc)) + require.Error(t, err) + require.Nil(t, res) + assert.Contains(t, err.Error(), "signer indices must not be empty") + }) + + t.Run("invalid input with empty SignerIndices", func(t *testing.T) { + tc := helper.MakeTC() + tc.SignerIndices = []byte{} + + res, err := flow.NewTimeoutCertificate(flow.UntrustedTimeoutCertificate(*tc)) + require.Error(t, err) + require.Nil(t, res) + assert.Contains(t, err.Error(), "signer indices must not be empty") + }) + + t.Run("invalid input with nil SigData", func(t *testing.T) { + tc := helper.MakeTC() + tc.SigData = nil + + res, err := flow.NewTimeoutCertificate(flow.UntrustedTimeoutCertificate(*tc)) + require.Error(t, err) + require.Nil(t, res) + assert.Contains(t, err.Error(), "signature must not be empty") + }) + + t.Run("invalid input with empty SigData", func(t *testing.T) { + tc := helper.MakeTC() + tc.SigData = []byte{} + + res, err := flow.NewTimeoutCertificate(flow.UntrustedTimeoutCertificate(*tc)) + require.Error(t, err) + require.Nil(t, res) + assert.Contains(t, err.Error(), "signature must not be empty") + }) + + t.Run("invalid input with nil NewestQCViews", func(t *testing.T) { + tc := helper.MakeTC() + tc.NewestQCViews = nil + + res, err := flow.NewTimeoutCertificate(flow.UntrustedTimeoutCertificate(*tc)) + require.Error(t, err) + require.Nil(t, res) + assert.Contains(t, err.Error(), "newest QC views must not be empty") + }) + + t.Run("invalid input with empty NewestQCViews", func(t *testing.T) { + tc := helper.MakeTC() + tc.NewestQCViews = []uint64{} + + res, err := flow.NewTimeoutCertificate(flow.UntrustedTimeoutCertificate(*tc)) + require.Error(t, err) + require.Nil(t, res) + assert.Contains(t, err.Error(), "newest QC views must not be empty") + }) + + t.Run("invalid input with TC.View < QC.View", func(t *testing.T) { + qc := helper.MakeQC(helper.WithQCView(100)) + tc, err := flow.NewTimeoutCertificate( + flow.UntrustedTimeoutCertificate( + *helper.MakeTC( + helper.WithTCView(99), + helper.WithTCNewestQC(qc), + )), + ) + require.Error(t, err) + require.Nil(t, tc) + assert.Contains(t, err.Error(), "TC's QC view (100) cannot be newer than the TC's view (99)") + }) + + t.Run("invalid input when NewestQCViews has view higher than NewestQC.View", func(t *testing.T) { + qc := helper.MakeQC(helper.WithQCView(50)) + tc, err := flow.NewTimeoutCertificate( + flow.UntrustedTimeoutCertificate( + *helper.MakeTC( + helper.WithTCView(51), + helper.WithTCNewestQC(qc), + helper.WithTCHighQCViews([]uint64{40, 50, 60}), // highest = 60 > QC.View = 50 + ), + ), + ) + require.Error(t, err) + require.Nil(t, tc) + assert.Contains(t, err.Error(), "included QC (view=50) should be equal or higher to highest contributed view: 60") + }) +} diff --git a/model/flow/transaction.go b/model/flow/transaction.go index 95556ff4c05..b87e3154d6a 100644 --- a/model/flow/transaction.go +++ b/model/flow/transaction.go @@ -2,14 +2,18 @@ package flow import ( "fmt" - "sort" + "io" + "slices" - "github.com/onflow/flow-go/crypto" - "github.com/onflow/flow-go/crypto/hash" + "github.com/ethereum/go-ethereum/rlp" + + flowrlp "github.com/onflow/flow-go/model/encoding/rlp" "github.com/onflow/flow-go/model/fingerprint" ) // TransactionBody includes the main contents of a transaction +// +//structwrite:immutable - mutations allowed only within the constructor type TransactionBody struct { // A reference to a previous block @@ -34,7 +38,7 @@ type TransactionBody struct { // Account that pays for this transaction fees Payer Address - // A ordered (ascending) list of addresses that scripts will touch their assets (including payer address) + // An ordered (ascending) list of addresses that scripts will touch their assets (including payer address) // Accounts listed here all have to provide signatures // Each account might provide multiple signatures (sum of weight should be at least 1) // If code touches accounts that is not listed here, tx fails @@ -47,21 +51,60 @@ type TransactionBody struct { EnvelopeSignatures []TransactionSignature } -// NewTransactionBody initializes and returns an empty transaction body -func NewTransactionBody() *TransactionBody { - return &TransactionBody{} -} +// UntrustedTransactionBody is an untrusted input-only representation of a TransactionBody, +// used for construction. +// +// This type exists to ensure that constructor functions are invoked explicitly +// with named fields, which improves clarity and reduces the risk of incorrect field +// ordering during construction. +// +// An instance of UntrustedTransactionBody should be validated and converted into +// a trusted TransactionBody using NewTransactionBody constructor. +type UntrustedTransactionBody TransactionBody + +// NewTransactionBody creates a new instance of TransactionBody. +// Construction of TransactionBody is allowed only within the constructor. +// +// All errors indicate a valid TransactionBody cannot be constructed from the input. +func NewTransactionBody(untrusted UntrustedTransactionBody) (*TransactionBody, error) { + if len(untrusted.Script) == 0 { + return nil, fmt.Errorf("Script must not be empty") + } + + return &TransactionBody{ + ReferenceBlockID: untrusted.ReferenceBlockID, + Script: untrusted.Script, + Arguments: untrusted.Arguments, + GasLimit: untrusted.GasLimit, + ProposalKey: untrusted.ProposalKey, + Payer: untrusted.Payer, + Authorizers: untrusted.Authorizers, + PayloadSignatures: untrusted.PayloadSignatures, + EnvelopeSignatures: untrusted.EnvelopeSignatures, + }, nil +} + +// Fingerprint returns the canonical, unique byte representation for the TransactionBody. +// As RLP encoding logic for TransactionBody is over-ridden by EncodeRLP below, this is +// equivalent to directly RLP encoding the TransactionBody. +// This public function is retained primarily for backward compatibility. func (tb TransactionBody) Fingerprint() []byte { - return fingerprint.Fingerprint(struct { + return flowrlp.NewMarshaler().MustMarshal(tb) +} + +// EncodeRLP defines RLP encoding behaviour for TransactionBody. +func (tb TransactionBody) EncodeRLP(w io.Writer) error { + encodingCanonicalForm := struct { Payload interface{} PayloadSignatures interface{} EnvelopeSignatures interface{} }{ - Payload: tb.payloadCanonicalForm(), + Payload: tb.PayloadCanonicalForm(), PayloadSignatures: signaturesList(tb.PayloadSignatures).canonicalForm(), EnvelopeSignatures: signaturesList(tb.EnvelopeSignatures).canonicalForm(), - }) + } + return rlp.Encode(w, encodingCanonicalForm) } func (tb TransactionBody) ByteSize() uint { @@ -96,76 +139,6 @@ func (tb TransactionBody) ID() Identifier { return MakeID(tb) } -func (tb TransactionBody) Checksum() Identifier { - return MakeID(tb) -} - -// SetScript sets the Cadence script for this transaction. -func (tb *TransactionBody) SetScript(script []byte) *TransactionBody { - tb.Script = script - return tb -} - -// SetArguments sets the Cadence arguments list for this transaction. -func (tb *TransactionBody) SetArguments(args [][]byte) *TransactionBody { - tb.Arguments = args - return tb -} - -// AddArgument adds an argument to the Cadence arguments list for this transaction. -func (tb *TransactionBody) AddArgument(arg []byte) *TransactionBody { - tb.Arguments = append(tb.Arguments, arg) - return tb -} - -// SetReferenceBlockID sets the reference block ID for this transaction. -func (tb *TransactionBody) SetReferenceBlockID(blockID Identifier) *TransactionBody { - tb.ReferenceBlockID = blockID - return tb -} - -// SetGasLimit sets the gas limit for this transaction. -func (tb *TransactionBody) SetGasLimit(limit uint64) *TransactionBody { - tb.GasLimit = limit - return tb -} - -// SetProposalKey sets the proposal key and sequence number for this transaction. -// -// The first two arguments specify the account key to be used, and the last argument is the sequence -// number being declared. -func (tb *TransactionBody) SetProposalKey(address Address, keyID uint64, sequenceNum uint64) *TransactionBody { - proposalKey := ProposalKey{ - Address: address, - KeyIndex: keyID, - SequenceNumber: sequenceNum, - } - tb.ProposalKey = proposalKey - return tb -} - -// SetPayer sets the payer account for this transaction. -func (tb *TransactionBody) SetPayer(address Address) *TransactionBody { - tb.Payer = address - return tb -} - -// AddAuthorizer adds an authorizer account to this transaction. -func (tb *TransactionBody) AddAuthorizer(address Address) *TransactionBody { - tb.Authorizers = append(tb.Authorizers, address) - return tb -} - -// Transaction is the smallest unit of task. -type Transaction struct { - TransactionBody - Status TransactionStatus - Events []Event - ComputationSpent uint64 - StartState StateCommitment - EndState StateCommitment -} - // MissingFields checks if a transaction is missing any required fields and returns those that are missing. func (tb *TransactionBody) MissingFields() []string { // Required fields are Script, ReferenceBlockID, Payer @@ -186,160 +159,11 @@ func (tb *TransactionBody) MissingFields() []string { return missingFields } -// signerList returns a list of unique accounts required to sign this transaction. -// -// The list is returned in the following order: -// 1. PROPOSER -// 2. PAYER -// 2. AUTHORIZERS (in insertion order) -// -// The only exception to the above ordering is for deduplication; if the same account -// is used in multiple signing roles, only the first occurrence is included in the list. -func (tb *TransactionBody) signerList() []Address { - signers := make([]Address, 0) - seen := make(map[Address]struct{}) - - var addSigner = func(address Address) { - _, ok := seen[address] - if ok { - return - } - - signers = append(signers, address) - seen[address] = struct{}{} - } - - if tb.ProposalKey.Address != EmptyAddress { - addSigner(tb.ProposalKey.Address) - } - - if tb.Payer != EmptyAddress { - addSigner(tb.Payer) - } - - for _, authorizer := range tb.Authorizers { - addSigner(authorizer) - } - - return signers -} - -// signerMap returns a mapping from address to signer index. -func (tb *TransactionBody) signerMap() map[Address]int { - signers := make(map[Address]int) - - for i, signer := range tb.signerList() { - signers[signer] = i - } - - return signers -} - -// SignPayload signs the transaction payload (TransactionDomainTag + payload) with the specified account key using the default transaction domain tag. -// -// The resulting signature is combined with the account address and key ID before -// being added to the transaction. -// -// This function returns an error if the signature cannot be generated. -func (tb *TransactionBody) SignPayload( - address Address, - keyID uint64, - privateKey crypto.PrivateKey, - hasher hash.Hasher, -) error { - sig, err := tb.Sign(tb.PayloadMessage(), privateKey, hasher) - - if err != nil { - return fmt.Errorf("failed to sign transaction payload with given key: %w", err) - } - - tb.AddPayloadSignature(address, keyID, sig) - - return nil -} - -// SignEnvelope signs the full transaction (TransactionDomainTag + payload + payload signatures) with the specified account key using the default transaction domain tag. -// -// The resulting signature is combined with the account address and key ID before -// being added to the transaction. -// -// This function returns an error if the signature cannot be generated. -func (tb *TransactionBody) SignEnvelope( - address Address, - keyID uint64, - privateKey crypto.PrivateKey, - hasher hash.Hasher, -) error { - sig, err := tb.Sign(tb.EnvelopeMessage(), privateKey, hasher) - - if err != nil { - return fmt.Errorf("failed to sign transaction envelope with given key: %w", err) - } - - tb.AddEnvelopeSignature(address, keyID, sig) - - return nil -} - -// Sign signs the data (transaction_tag + message) with the specified private key -// and hasher. -// -// This function returns an error if: -// - crypto.InvalidInputsError if the private key cannot sign with the given hasher -// - other error if an unexpected error occurs -func (tb *TransactionBody) Sign( - message []byte, - privateKey crypto.PrivateKey, - hasher hash.Hasher, -) ([]byte, error) { - message = append(TransactionDomainTag[:], message...) - sig, err := privateKey.Sign(message, hasher) - if err != nil { - return nil, fmt.Errorf("failed to sign message with given key: %w", err) - } - - return sig, nil -} - -// AddPayloadSignature adds a payload signature to the transaction for the given address and key ID. -func (tb *TransactionBody) AddPayloadSignature(address Address, keyID uint64, sig []byte) *TransactionBody { - s := tb.createSignature(address, keyID, sig) - - tb.PayloadSignatures = append(tb.PayloadSignatures, s) - sort.Slice(tb.PayloadSignatures, compareSignatures(tb.PayloadSignatures)) - - return tb -} - -// AddEnvelopeSignature adds an envelope signature to the transaction for the given address and key ID. -func (tb *TransactionBody) AddEnvelopeSignature(address Address, keyID uint64, sig []byte) *TransactionBody { - s := tb.createSignature(address, keyID, sig) - - tb.EnvelopeSignatures = append(tb.EnvelopeSignatures, s) - sort.Slice(tb.EnvelopeSignatures, compareSignatures(tb.EnvelopeSignatures)) - - return tb -} - -func (tb *TransactionBody) createSignature(address Address, keyID uint64, sig []byte) TransactionSignature { - signerIndex, signerExists := tb.signerMap()[address] - if !signerExists { - signerIndex = -1 - } - - return TransactionSignature{ - Address: address, - SignerIndex: signerIndex, - KeyIndex: keyID, - Signature: sig, - } -} - func (tb *TransactionBody) PayloadMessage() []byte { - return fingerprint.Fingerprint(tb.payloadCanonicalForm()) + return fingerprint.Fingerprint(tb.PayloadCanonicalForm()) } -func (tb *TransactionBody) payloadCanonicalForm() interface{} { +func (tb *TransactionBody) PayloadCanonicalForm() interface{} { authorizers := make([][]byte, len(tb.Authorizers)) for i, auth := range tb.Authorizers { authorizers[i] = auth.Bytes() @@ -351,7 +175,7 @@ func (tb *TransactionBody) payloadCanonicalForm() interface{} { ReferenceBlockID []byte GasLimit uint64 ProposalKeyAddress []byte - ProposalKeyID uint64 + ProposalKeyID uint32 ProposalKeySequenceNumber uint64 Payer []byte Authorizers [][]byte @@ -380,25 +204,11 @@ func (tb *TransactionBody) envelopeCanonicalForm() interface{} { Payload interface{} PayloadSignatures interface{} }{ - tb.payloadCanonicalForm(), + tb.PayloadCanonicalForm(), signaturesList(tb.PayloadSignatures).canonicalForm(), } } -func (tx *Transaction) PayloadMessage() []byte { - return fingerprint.Fingerprint(tx.TransactionBody.payloadCanonicalForm()) -} - -// Checksum provides a cryptographic commitment for a chunk content -func (tx *Transaction) Checksum() Identifier { - return MakeID(tx) -} - -func (tx *Transaction) String() string { - return fmt.Sprintf("Transaction %v submitted by %v (block %v)", - tx.ID(), tx.Payer.Hex(), tx.ReferenceBlockID) -} - // TransactionStatus represents the status of a transaction. type TransactionStatus int @@ -440,7 +250,7 @@ func (f TransactionField) String() string { // A ProposalKey is the key that specifies the proposal key and sequence number for a transaction. type ProposalKey struct { Address Address - KeyIndex uint64 + KeyIndex uint32 SequenceNumber uint64 } @@ -453,52 +263,109 @@ func (p ProposalKey) ByteSize() int { // A TransactionSignature is a signature associated with a specific account key. type TransactionSignature struct { - Address Address - SignerIndex int - KeyIndex uint64 - Signature []byte + Address Address + SignerIndex int + KeyIndex uint32 + Signature []byte + ExtensionData []byte } // String returns the string representation of a transaction signature. func (s TransactionSignature) String() string { - return fmt.Sprintf("Address: %s. SignerIndex: %d. KeyID: %d. Signature: %s", - s.Address, s.SignerIndex, s.KeyIndex, s.Signature) + return fmt.Sprintf("Address: %s. SignerIndex: %d. KeyID: %d. Signature: %x. Extension Data: %x", + s.Address, s.SignerIndex, s.KeyIndex, s.Signature, s.ExtensionData) } // ByteSize returns the byte size of the transaction signature func (s TransactionSignature) ByteSize() int { signerIndexLen := 8 keyIDLen := 8 - return len(s.Address) + signerIndexLen + keyIDLen + len(s.Signature) + return len(s.Address) + signerIndexLen + keyIDLen + len(s.Signature) + len(s.ExtensionData) } func (s TransactionSignature) Fingerprint() []byte { return fingerprint.Fingerprint(s.canonicalForm()) } -func (s TransactionSignature) canonicalForm() interface{} { - return struct { - SignerIndex uint - KeyID uint - Signature []byte - }{ - SignerIndex: uint(s.SignerIndex), // int is not RLP-serializable - KeyID: uint(s.KeyIndex), // int is not RLP-serializable - Signature: s.Signature, +// ValidateExtensionDataAndReconstructMessage checks the format validity of the extension data in the given TransactionSignature +// and reconstructs the verification message based on payload, authentication scheme and extension data. +// +// The output message is constructed by adapting the input payload to the intended authentication scheme of the signature. +// +// The output message is the message that will be cryptographically +// checked against the account public key and signature. +// +// returns +// - (false, nil) if the extension data is not formed correctly +// - (true, message) if the extension data is valid +// +// The current implementation simply returns false if the extension data is invalid, could consider adding more visibility +// into reason of validation failure +func (s TransactionSignature) ValidateExtensionDataAndReconstructMessage(payload []byte) (bool, []byte) { + // Default to Plain scheme if extension data is nil or empty + scheme := PlainScheme + if len(s.ExtensionData) > 0 { + scheme = AuthenticationSchemeFromByte(s.ExtensionData[0]) + } + + switch scheme { + case PlainScheme: + if len(s.ExtensionData) > 1 { + return false, nil + } + return true, slices.Concat(TransactionDomainTag[:], payload) + case WebAuthnScheme: // See FLIP 264 for more details + return validateWebAuthNExtensionData(s.ExtensionData, payload) + default: + // authentication scheme not found + return false, nil } } -func compareSignatures(signatures []TransactionSignature) func(i, j int) bool { - return func(i, j int) bool { - sigA := signatures[i] - sigB := signatures[j] +// Checks if the scheme is plain authentication scheme, and indicate that it +// is required to use the legacy canonical form. +// We check for a valid scheme identifier, as this should be the only case +// where the extension data can be left out of the cannonical form. +// All other non-valid cases that are similar to the plain scheme, but is not valid, +// should be included in the canonical form, as they are not valid signatures +func (s TransactionSignature) shouldUseLegacyCanonicalForm() bool { + // len check covers nil case + return len(s.ExtensionData) == 0 || (len(s.ExtensionData) == 1 && s.ExtensionData[0] == byte(PlainScheme)) +} - if sigA.SignerIndex == sigB.SignerIndex { - return sigA.KeyIndex < sigB.KeyIndex +func (s TransactionSignature) canonicalForm() interface{} { + // int is not RLP-serializable, therefore s.SignerIndex and s.KeyIndex are converted to uint + if s.shouldUseLegacyCanonicalForm() { + // This is the legacy cononical form, mainly here for backward compatibility + return struct { + SignerIndex uint + KeyID uint + Signature []byte + }{ + SignerIndex: uint(s.SignerIndex), + KeyID: uint(s.KeyIndex), + Signature: s.Signature, } + } + return struct { + SignerIndex uint + KeyID uint + Signature []byte + ExtensionData []byte + }{ + SignerIndex: uint(s.SignerIndex), + KeyID: uint(s.KeyIndex), + Signature: s.Signature, + ExtensionData: s.ExtensionData, + } +} - return sigA.SignerIndex < sigB.SignerIndex +func compareSignatures(sigA, sigB TransactionSignature) int { + if sigA.SignerIndex == sigB.SignerIndex { + return int(sigA.KeyIndex) - int(sigB.KeyIndex) } + + return sigA.SignerIndex - sigB.SignerIndex } type signaturesList []TransactionSignature diff --git a/model/flow/transaction_body_builder.go b/model/flow/transaction_body_builder.go new file mode 100644 index 00000000000..6ae7f177739 --- /dev/null +++ b/model/flow/transaction_body_builder.go @@ -0,0 +1,295 @@ +package flow + +import ( + "fmt" + "slices" + + "github.com/onflow/crypto" + "github.com/onflow/crypto/hash" + + "github.com/onflow/flow-go/model/fingerprint" +) + +// TransactionBodyBuilder constructs a validated, immutable [TransactionBody] in two phases: +// first by setting individual fields using fluent SetX methods, then by calling Build() +// to perform minimal validity and sanity checks and return the final [TransactionBody]. +// Caution: TransactionBodyBuilder is not safe for concurrent use by multiple goroutines. +type TransactionBodyBuilder struct { + u UntrustedTransactionBody +} + +// NewTransactionBodyBuilder constructs an empty transaction builder. +func NewTransactionBodyBuilder() *TransactionBodyBuilder { + return &TransactionBodyBuilder{} +} + +// Build validates and returns an immutable TransactionBody. +// All errors indicate that a valid TransactionBody cannot be created from the current builder state. +func (tb *TransactionBodyBuilder) Build() (*TransactionBody, error) { + slices.SortFunc(tb.u.PayloadSignatures, compareSignatures) + slices.SortFunc(tb.u.EnvelopeSignatures, compareSignatures) + return NewTransactionBody(tb.u) +} + +// SetReferenceBlockID sets the reference block ID for this transaction. +func (tb *TransactionBodyBuilder) SetReferenceBlockID(blockID Identifier) *TransactionBodyBuilder { + tb.u.ReferenceBlockID = blockID + return tb +} + +// SetScript sets the Cadence script for this transaction. +func (tb *TransactionBodyBuilder) SetScript(script []byte) *TransactionBodyBuilder { + tb.u.Script = script + return tb +} + +// SetArguments sets the Cadence arguments list for this transaction. +func (tb *TransactionBodyBuilder) SetArguments(args [][]byte) *TransactionBodyBuilder { + tb.u.Arguments = args + return tb +} + +// AddArgument adds an argument to the Cadence arguments list for this transaction. +func (tb *TransactionBodyBuilder) AddArgument(arg []byte) *TransactionBodyBuilder { + tb.u.Arguments = append(tb.u.Arguments, arg) + return tb +} + +// SetComputeLimit sets the gas limit for this transaction. +func (tb *TransactionBodyBuilder) SetComputeLimit(gasLimit uint64) *TransactionBodyBuilder { + tb.u.GasLimit = gasLimit + return tb +} + +// SetProposalKey sets the proposal key and sequence number for this transaction. +// +// The first two arguments specify the account key to be used, and the last argument is the sequence +// number being declared. +func (tb *TransactionBodyBuilder) SetProposalKey(address Address, keyID uint32, sequenceNum uint64) *TransactionBodyBuilder { + tb.u.ProposalKey = ProposalKey{ + Address: address, + KeyIndex: keyID, + SequenceNumber: sequenceNum, + } + return tb +} + +// SetPayer sets the payer account for this transaction. +func (tb *TransactionBodyBuilder) SetPayer(payer Address) *TransactionBodyBuilder { + tb.u.Payer = payer + return tb +} + +// AddAuthorizer adds an authorizer account to this transaction. +func (tb *TransactionBodyBuilder) AddAuthorizer(authorizer Address) *TransactionBodyBuilder { + tb.u.Authorizers = append(tb.u.Authorizers, authorizer) + return tb +} + +// AddPayloadSignature adds a payload signature to the transaction for the given address and key ID. +func (tb *TransactionBodyBuilder) AddPayloadSignature(address Address, keyID uint32, sig []byte) *TransactionBodyBuilder { + s := tb.createSignature(address, keyID, sig, nil) + tb.u.PayloadSignatures = append(tb.u.PayloadSignatures, s) + return tb +} + +// AddEnvelopeSignature adds an envelope signature to the transaction for the given address and key ID. +func (tb *TransactionBodyBuilder) AddEnvelopeSignature(address Address, keyID uint32, sig []byte) *TransactionBodyBuilder { + s := tb.createSignature(address, keyID, sig, nil) + tb.u.EnvelopeSignatures = append(tb.u.EnvelopeSignatures, s) + return tb +} + +// AddPayloadSignature adds a payload signature to the transaction for the given address and key ID. +func (tb *TransactionBodyBuilder) AddPayloadSignatureWithExtensionData(address Address, keyID uint32, sig []byte, extensionData []byte) *TransactionBodyBuilder { + s := tb.createSignature(address, keyID, sig, extensionData) + tb.u.PayloadSignatures = append(tb.u.PayloadSignatures, s) + return tb +} + +// AddEnvelopeSignature adds an envelope signature to the transaction for the given address and key ID. +func (tb *TransactionBodyBuilder) AddEnvelopeSignatureWithExtensionData(address Address, keyID uint32, sig []byte, extensionData []byte) *TransactionBodyBuilder { + s := tb.createSignature(address, keyID, sig, extensionData) + tb.u.EnvelopeSignatures = append(tb.u.EnvelopeSignatures, s) + return tb +} + +func (tb *TransactionBodyBuilder) payloadMessage() []byte { + return fingerprint.Fingerprint(tb.payloadCanonicalForm()) +} + +func (tb *TransactionBodyBuilder) payloadCanonicalForm() interface{} { + authorizers := make([][]byte, len(tb.u.Authorizers)) + for i, auth := range tb.u.Authorizers { + authorizers[i] = auth.Bytes() + } + + return struct { + Script []byte + Arguments [][]byte + ReferenceBlockID []byte + GasLimit uint64 + ProposalKeyAddress []byte + ProposalKeyID uint32 + ProposalKeySequenceNumber uint64 + Payer []byte + Authorizers [][]byte + }{ + Script: tb.u.Script, + Arguments: tb.u.Arguments, + ReferenceBlockID: tb.u.ReferenceBlockID[:], + GasLimit: tb.u.GasLimit, + ProposalKeyAddress: tb.u.ProposalKey.Address.Bytes(), + ProposalKeyID: tb.u.ProposalKey.KeyIndex, + ProposalKeySequenceNumber: tb.u.ProposalKey.SequenceNumber, + Payer: tb.u.Payer.Bytes(), + Authorizers: authorizers, + } +} + +// EnvelopeMessage returns the signable message for transaction envelope. +// +// This message is only signed by the payer account. +func (tb *TransactionBodyBuilder) EnvelopeMessage() []byte { + return fingerprint.Fingerprint(tb.envelopeCanonicalForm()) +} + +func (tb *TransactionBodyBuilder) envelopeCanonicalForm() interface{} { + return struct { + Payload interface{} + PayloadSignatures interface{} + }{ + tb.payloadCanonicalForm(), + signaturesList(tb.u.PayloadSignatures).canonicalForm(), + } +} + +func (tb *TransactionBodyBuilder) createSignature(address Address, keyID uint32, sig []byte, extensionData []byte) TransactionSignature { + signerIndex, signerExists := tb.signerMap()[address] + if !signerExists { + signerIndex = -1 + } + + return TransactionSignature{ + Address: address, + SignerIndex: signerIndex, + KeyIndex: keyID, + Signature: sig, + ExtensionData: extensionData, + } +} + +// signerMap returns a mapping from address to signer index. +func (tb *TransactionBodyBuilder) signerMap() map[Address]int { + signers := make(map[Address]int) + + for i, signer := range tb.signerList() { + signers[signer] = i + } + + return signers +} + +// signerList returns a list of unique accounts required to sign this transaction. +// +// The list is returned in the following order: +// 1. PROPOSER +// 2. PAYER +// 2. AUTHORIZERS (in insertion order) +// +// The only exception to the above ordering is for deduplication; if the same account +// is used in multiple signing roles, only the first occurrence is included in the list. +func (tb *TransactionBodyBuilder) signerList() []Address { + signers := make([]Address, 0) + seen := make(map[Address]struct{}) + + var addSigner = func(address Address) { + _, ok := seen[address] + if ok { + return + } + + signers = append(signers, address) + seen[address] = struct{}{} + } + + if tb.u.ProposalKey.Address != EmptyAddress { + addSigner(tb.u.ProposalKey.Address) + } + + if tb.u.Payer != EmptyAddress { + addSigner(tb.u.Payer) + } + + for _, authorizer := range tb.u.Authorizers { + addSigner(authorizer) + } + + return signers +} + +// SignPayload signs the transaction payload (TransactionDomainTag + payload) with the specified account key using the default transaction domain tag. +// +// The resulting signature is combined with the account address and key ID before +// being added to the transaction. +// +// This function returns an error if the signature cannot be generated. +func (tb *TransactionBodyBuilder) SignPayload( + address Address, + keyID uint32, + privateKey crypto.PrivateKey, + hasher hash.Hasher, +) error { + sig, err := tb.Sign(tb.payloadMessage(), privateKey, hasher) + + if err != nil { + return fmt.Errorf("failed to sign transaction payload with given key: %w", err) + } + + tb.AddPayloadSignature(address, keyID, sig) + + return nil +} + +// SignEnvelope signs the full transaction (TransactionDomainTag + payload + payload signatures) with the specified account key using the default transaction domain tag. +// +// The resulting signature is combined with the account address and key ID before +// being added to the transaction. +// +// This function returns an error if the signature cannot be generated. +func (tb *TransactionBodyBuilder) SignEnvelope( + address Address, + keyID uint32, + privateKey crypto.PrivateKey, + hasher hash.Hasher, +) error { + sig, err := tb.Sign(tb.EnvelopeMessage(), privateKey, hasher) + + if err != nil { + return fmt.Errorf("failed to sign transaction envelope with given key: %w", err) + } + + tb.AddEnvelopeSignature(address, keyID, sig) + + return nil +} + +// Sign signs the data (transaction_tag + message) with the specified private key +// and hasher. +// +// This function returns an error if: +// - crypto.InvalidInputsError if the private key cannot sign with the given hasher +// - other error if an unexpected error occurs +func (tb *TransactionBodyBuilder) Sign( + message []byte, + privateKey crypto.PrivateKey, + hasher hash.Hasher, +) ([]byte, error) { + message = append(TransactionDomainTag[:], message...) + sig, err := privateKey.Sign(message, hasher) + if err != nil { + return nil, fmt.Errorf("failed to sign message with given key: %w", err) + } + + return sig, nil +} diff --git a/model/flow/transaction_result.go b/model/flow/transaction_result.go index 1514fe9486f..3c11d5e54d6 100644 --- a/model/flow/transaction_result.go +++ b/model/flow/transaction_result.go @@ -1,4 +1,3 @@ -// (c) 2019 Dapper Labs - ALL RIGHTS RESERVED package flow import ( @@ -22,14 +21,39 @@ func (t TransactionResult) String() string { return fmt.Sprintf("Transaction ID: %s, Error Message: %s", t.TransactionID.String(), t.ErrorMessage) } -// ID returns a canonical identifier that is guaranteed to be unique. -func (t TransactionResult) ID() Identifier { - return t.TransactionID -} +// TODO(ramtin): add canonical encoding and ID +type TransactionResults []TransactionResult -func (te *TransactionResult) Checksum() Identifier { - return te.ID() +// LightTransactionResult represents a TransactionResult, omitting any fields that are prone to +// non-determinism; i.e. the error message and memory used estimate. +// +// While the net causes of a transaction failing are deterministic, the specific error and message +// propagated back to FVM are prone to bugs resulting in slight variations. Rather than including +// the error and risking execution forks if an undetected bug is introduced, we simplify it to just +// a boolean value. This will likely change in the future to include some additional information +// about the error. +// +// Additionally, MemoryUsed is omitted because it is an estimate from the specific execution node, +// and will vary across nodes. +type LightTransactionResult struct { + // TransactionID is the ID of the transaction this result was emitted from. + TransactionID Identifier + // Failed is true if the transaction's execution failed resulting in an error, false otherwise. + Failed bool + // ComputationUsed is amount of computation used while executing the transaction. + ComputationUsed uint64 } -// TODO(ramtin): add canonical encoding and ID -type TransactionResults []TransactionResult +// TransactionResultErrorMessage represents an error message resulting from a transaction's execution. +// This struct holds the transaction's ID, its index, any error message generated during execution, +// and the identifier of the execution node that provided the error message. +type TransactionResultErrorMessage struct { + // TransactionID is the ID of the transaction this result error was emitted from. + TransactionID Identifier + // Index is the index of the transaction this result error was emitted from. + Index uint32 + // ErrorMessage contains the error message of any error that may have occurred when the transaction was executed. + ErrorMessage string + // Executor node ID of the execution node that the message was received from. + ExecutorID Identifier +} diff --git a/model/flow/transaction_test.go b/model/flow/transaction_test.go index 34ba942570c..3bac17ed9ab 100644 --- a/model/flow/transaction_test.go +++ b/model/flow/transaction_test.go @@ -1,39 +1,53 @@ package flow_test import ( + "crypto/rand" + "encoding/base64" + "encoding/json" + "fmt" + mrand "math/rand" + "slices" "testing" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "github.com/onflow/crypto" + "github.com/onflow/crypto/hash" + "github.com/onflow/go-ethereum/rlp" + + fvmCrypto "github.com/onflow/flow-go/fvm/crypto" + modelrlp "github.com/onflow/flow-go/model/encoding/rlp" + "github.com/onflow/flow-go/model/fingerprint" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/utils/unittest" ) func TestTransaction_SignatureOrdering(t *testing.T) { - tx := flow.NewTransactionBody() proposerAddress := unittest.RandomAddressFixture() - proposerKeyIndex := uint64(1) + proposerKeyIndex := uint32(1) proposerSequenceNumber := uint64(42) proposerSignature := []byte{1, 2, 3} authorizerAddress := unittest.RandomAddressFixture() - authorizerKeyIndex := uint64(0) + authorizerKeyIndex := uint32(0) authorizerSignature := []byte{4, 5, 6} payerAddress := unittest.RandomAddressFixture() - payerKeyIndex := uint64(0) + payerKeyIndex := uint32(0) payerSignature := []byte{7, 8, 9} - tx.SetProposalKey(proposerAddress, proposerKeyIndex, proposerSequenceNumber) - tx.AddPayloadSignature(proposerAddress, proposerKeyIndex, proposerSignature) - - tx.SetPayer(payerAddress) - tx.AddEnvelopeSignature(payerAddress, payerKeyIndex, payerSignature) - - tx.AddAuthorizer(authorizerAddress) - tx.AddPayloadSignature(authorizerAddress, authorizerKeyIndex, authorizerSignature) + tx, err := flow.NewTransactionBodyBuilder(). + SetScript([]byte(`transaction(){}`)). + SetProposalKey(proposerAddress, proposerKeyIndex, proposerSequenceNumber). + AddPayloadSignature(proposerAddress, proposerKeyIndex, proposerSignature). + SetPayer(payerAddress). + AddEnvelopeSignature(payerAddress, payerKeyIndex, payerSignature). + AddAuthorizer(authorizerAddress). + AddPayloadSignature(authorizerAddress, authorizerKeyIndex, authorizerSignature). + Build() + require.NoError(t, err) require.Len(t, tx.PayloadSignatures, 2) @@ -58,3 +72,619 @@ func TestTransaction_Status(t *testing.T) { assert.Equal(t, status.String(), value) } } + +// TestTransactionAuthenticationSchemes tests transaction signature verifications +// with a focus on authentication schemes. +func TestTransactionAuthenticationSchemes(t *testing.T) { + seedLength := 32 + h := hash.SHA2_256 + s := crypto.ECDSAP256 + payerAddress := unittest.AddressFixture() + authorizerAddress := flow.EmptyAddress + require.NotEqual(t, payerAddress, authorizerAddress) + + transactionBody := flow.TransactionBody{ + Script: []byte("some script"), + Arguments: [][]byte{ + []byte("arg1"), + }, + ReferenceBlockID: flow.HashToID([]byte("some block id")), + GasLimit: 1000, + Payer: payerAddress, + ProposalKey: flow.ProposalKey{ + Address: authorizerAddress, + KeyIndex: 0, + SequenceNumber: 0, + }, + Authorizers: []flow.Address{ + authorizerAddress, + }, + PayloadSignatures: []flow.TransactionSignature{ + { + Address: authorizerAddress, + KeyIndex: 0, + Signature: []byte("signature"), // Mock signature, not validated + SignerIndex: 0, + ExtensionData: unittest.RandomBytes(3), + }, + }, + EnvelopeSignatures: []flow.TransactionSignature{ + { + Address: payerAddress, + KeyIndex: 0, + Signature: []byte("placeholder"), + SignerIndex: 0, + ExtensionData: unittest.RandomBytes(3), + }, + }, + } + + // test transaction envelope canonical form constructions + t.Run("Transaction envelope canonical form", func(t *testing.T) { + + legacyEnvelopeSignatureCanonicalForm := func(tb flow.TransactionBody) []byte { + return fingerprint.Fingerprint(struct { + Payload interface{} + PayloadSignatures interface{} + }{ + tb.PayloadCanonicalForm(), + []interface{}{ + // Expected canonical form of payload signature + struct { + SignerIndex uint + KeyID uint + Signature []byte + }{ + SignerIndex: uint(tb.PayloadSignatures[0].SignerIndex), + KeyID: uint(tb.PayloadSignatures[0].KeyIndex), + Signature: tb.PayloadSignatures[0].Signature, + }, + }, + }) + } + randomExtensionData := unittest.RandomBytes(20) + + cases := []struct { + payloadExtensionData []byte + expectedEnvelopeSignatureCanonicalForm func(tb flow.TransactionBody) []byte + }{ + { + // nil extension data + payloadExtensionData: nil, + expectedEnvelopeSignatureCanonicalForm: legacyEnvelopeSignatureCanonicalForm, + }, { + // empty extension data + payloadExtensionData: []byte{}, + expectedEnvelopeSignatureCanonicalForm: legacyEnvelopeSignatureCanonicalForm, + }, { + // zero scheme identifier + payloadExtensionData: []byte{0x0}, + expectedEnvelopeSignatureCanonicalForm: legacyEnvelopeSignatureCanonicalForm, + }, { + // zero scheme identifier but invalid extension (should be taken into account in the canonical form) + payloadExtensionData: []byte{0x0, 1, 2, 3}, + expectedEnvelopeSignatureCanonicalForm: func(tb flow.TransactionBody) []byte { + return fingerprint.Fingerprint(struct { + Payload interface{} + PayloadSignatures interface{} + }{ + tb.PayloadCanonicalForm(), + []interface{}{ + // Expected canonical form of payload signature + struct { + SignerIndex uint + KeyID uint + Signature []byte + ExtensionData []byte + }{ + SignerIndex: uint(tb.PayloadSignatures[0].SignerIndex), + KeyID: uint(tb.PayloadSignatures[0].KeyIndex), + Signature: tb.PayloadSignatures[0].Signature, + ExtensionData: []byte{0x0, 1, 2, 3}, + }, + }, + }) + }, + }, { + // non-plain authentication scheme + payloadExtensionData: slices.Concat([]byte{0x5}, randomExtensionData[:]), + expectedEnvelopeSignatureCanonicalForm: func(tb flow.TransactionBody) []byte { + return fingerprint.Fingerprint(struct { + Payload interface{} + PayloadSignatures interface{} + }{ + tb.PayloadCanonicalForm(), + []interface{}{ + // Expected canonical form of payload signature + struct { + SignerIndex uint + KeyID uint + Signature []byte + ExtensionData []byte + }{ + SignerIndex: uint(tb.PayloadSignatures[0].SignerIndex), + KeyID: uint(tb.PayloadSignatures[0].KeyIndex), + Signature: tb.PayloadSignatures[0].Signature, + ExtensionData: slices.Concat([]byte{0x5}, randomExtensionData[:]), + }, + }, + }) + }, + }, { + // webauthn scheme + payloadExtensionData: slices.Concat([]byte{0x1}, randomExtensionData[:]), + expectedEnvelopeSignatureCanonicalForm: func(tb flow.TransactionBody) []byte { + return fingerprint.Fingerprint(struct { + Payload interface{} + PayloadSignatures interface{} + }{ + tb.PayloadCanonicalForm(), + []interface{}{ + // Expected canonical form of payload signature + struct { + SignerIndex uint + KeyID uint + Signature []byte + ExtensionData []byte + }{ + SignerIndex: uint(tb.PayloadSignatures[0].SignerIndex), + KeyID: uint(tb.PayloadSignatures[0].KeyIndex), + Signature: tb.PayloadSignatures[0].Signature, + ExtensionData: slices.Concat([]byte{0x1}, randomExtensionData[:]), + }, + }, + }) + }, + }, + } + // test all cases + for _, c := range cases { + t.Run(fmt.Sprintf("auth scheme, payloadExtensionData: %v", c.payloadExtensionData), func(t *testing.T) { + transactionBody.PayloadSignatures[0].ExtensionData = c.payloadExtensionData + transactionMessage := transactionBody.EnvelopeMessage() + + // generate expected envelope data + expectedEnvelopeMessage := c.expectedEnvelopeSignatureCanonicalForm(transactionBody) + // compare canonical forms + require.Equal(t, transactionMessage, expectedEnvelopeMessage) + }) + } + }) + + // test transaction canonical form constructions (ID computation) + t.Run("Transaction canonical form", func(t *testing.T) { + + legacyTransactionCanonicalForm := func(tb flow.TransactionBody) []byte { + return fingerprint.Fingerprint(struct { + Payload interface{} + PayloadSignatures interface{} + EnvelopeSignatures interface{} + }{ + tb.PayloadCanonicalForm(), + []interface{}{ + // Expected canonical form of payload signature + struct { + SignerIndex uint + KeyID uint + Signature []byte + }{ + SignerIndex: uint(tb.PayloadSignatures[0].SignerIndex), + KeyID: uint(tb.PayloadSignatures[0].KeyIndex), + Signature: tb.PayloadSignatures[0].Signature, + }, + }, + []interface{}{ + // Expected canonical form of payload signature + struct { + SignerIndex uint + KeyID uint + Signature []byte + }{ + SignerIndex: uint(tb.EnvelopeSignatures[0].SignerIndex), + KeyID: uint(tb.EnvelopeSignatures[0].KeyIndex), + Signature: tb.EnvelopeSignatures[0].Signature, + }, + }, + }) + } + + randomExtensionData := unittest.RandomBytes(20) + + cases := []struct { + payloadExtensionData []byte + expectedTransactionCanonicalForm func(tb flow.TransactionBody) []byte + }{ + // nil extension data + { + payloadExtensionData: nil, + expectedTransactionCanonicalForm: legacyTransactionCanonicalForm, + }, + // empty extension data + { + payloadExtensionData: []byte{}, + expectedTransactionCanonicalForm: legacyTransactionCanonicalForm, + }, { + // zero extension data + payloadExtensionData: []byte{0x0}, + expectedTransactionCanonicalForm: legacyTransactionCanonicalForm, + }, { + // webauthn scheme + payloadExtensionData: slices.Concat([]byte{0x1}, randomExtensionData[:]), + expectedTransactionCanonicalForm: func(tb flow.TransactionBody) []byte { + return fingerprint.Fingerprint(struct { + Payload interface{} + PayloadSignatures interface{} + EnvelopeSignatures interface{} + }{ + tb.PayloadCanonicalForm(), + []interface{}{ + // Expected canonical form of payload signature + struct { + SignerIndex uint + KeyID uint + Signature []byte + ExtensionData []byte + }{ + SignerIndex: uint(tb.PayloadSignatures[0].SignerIndex), + KeyID: uint(tb.PayloadSignatures[0].KeyIndex), + Signature: tb.PayloadSignatures[0].Signature, + ExtensionData: slices.Concat([]byte{0x1}, randomExtensionData[:]), + }, + }, + []interface{}{ + // Expected canonical form of payload signature + struct { + SignerIndex uint + KeyID uint + Signature []byte + ExtensionData []byte + }{ + SignerIndex: uint(tb.EnvelopeSignatures[0].SignerIndex), + KeyID: uint(tb.EnvelopeSignatures[0].KeyIndex), + Signature: tb.EnvelopeSignatures[0].Signature, + ExtensionData: slices.Concat([]byte{0x1}, randomExtensionData[:]), + }, + }, + }) + }, + }, + } + // test all cases + for _, c := range cases { + t.Run(fmt.Sprintf("auth scheme (payloadExtensionData): %v", c.payloadExtensionData), func(t *testing.T) { + transactionBody.PayloadSignatures[0].ExtensionData = c.payloadExtensionData + transactionBody.EnvelopeSignatures[0].ExtensionData = c.payloadExtensionData + transactionID := transactionBody.ID() + + // generate expected envelope data + sha3 := hash.NewSHA3_256() + expectedID := flow.Identifier(sha3.ComputeHash(c.expectedTransactionCanonicalForm(transactionBody))) + // compare canonical forms + require.Equal(t, transactionID, expectedID) + }) + } + }) + + // test `VerifySignatureFromTransaction` in the plain authentication scheme + t.Run("plain authentication scheme", func(t *testing.T) { + cases := []struct { + payloadExtensionData []byte + extensionOk bool + signatureOk bool + }{ + { + // nil extension data + payloadExtensionData: nil, + extensionOk: true, + signatureOk: true, + }, + { + // empty extension data + payloadExtensionData: []byte{}, + extensionOk: true, + signatureOk: true, + }, { + // correct extension data + payloadExtensionData: []byte{0x0}, + extensionOk: true, + signatureOk: true, + }, { + // incorrect extension data + payloadExtensionData: []byte{0x1}, + extensionOk: false, + signatureOk: false, + }, { + // incorrect extension data: correct identifier but with extra bytes + payloadExtensionData: []byte{0, 1, 2, 3}, + extensionOk: false, + signatureOk: false, + }, + } + // test all cases + for _, c := range cases { + // payload data (the transaction envelope to sign/verify) + payload := unittest.RandomBytes(20) + t.Run(fmt.Sprintf("auth scheme (payloadExtensionData): %v", c.payloadExtensionData), func(t *testing.T) { + seed := make([]byte, seedLength) + _, err := rand.Read(seed) + require.NoError(t, err) + sk, err := crypto.GeneratePrivateKey(s, seed) + require.NoError(t, err) + hasher, err := fvmCrypto.NewPrefixedHashing(h, flow.TransactionTagString) + require.NoError(t, err) + signature, err := sk.Sign(payload, hasher) + require.NoError(t, err) + + transactionBody.PayloadSignatures[0].ExtensionData = c.payloadExtensionData + transactionBody.PayloadSignatures[0].Signature = signature + + extensionDataValid, message := transactionBody.PayloadSignatures[0].ValidateExtensionDataAndReconstructMessage(payload) + signatureValid, err := fvmCrypto.VerifySignatureFromTransaction(signature, message, sk.PublicKey(), h) + + require.NoError(t, err) + require.Equal(t, c.extensionOk, extensionDataValid) + if c.extensionOk { + require.Equal(t, c.signatureOk, signatureValid) + } + }) + } + }) + + // test `VerifySignatureFromTransaction` in the WebAuthn authentication scheme + t.Run("webauthn authentication scheme", func(t *testing.T) { + hasher, err := fvmCrypto.NewPrefixedHashing(hash.SHA2_256, flow.TransactionTagString) + require.NoError(t, err) + + transactionMessage := transactionBody.EnvelopeMessage() + authNChallenge := hasher.ComputeHash(transactionMessage) + authNChallengeBase64Url := base64.RawURLEncoding.EncodeToString(authNChallenge) + validUserFlag := byte(0x01) + validClientDataOrigin := "https://testing.com" + rpIDHash := unittest.RandomBytes(32) + sigCounter := unittest.RandomBytes(4) + + // For use in cases where you're testing the other value + validAuthenticatorData := slices.Concat(rpIDHash, []byte{validUserFlag}, sigCounter) + validClientDataJSON := map[string]string{ + "type": flow.WebAuthnTypeGet, + "challenge": authNChallengeBase64Url, + "origin": validClientDataOrigin, + } + + cases := []struct { + description string + authenticatorData []byte + clientDataJSON map[string]string + extensionOk bool + signatureOk bool + }{ + { + description: "Cannot be just the scheme, not enough extension data", + authenticatorData: []byte{}, + clientDataJSON: map[string]string{}, + extensionOk: false, + signatureOk: false, + }, { + description: "invalid user flag, UP not set", + authenticatorData: slices.Concat(rpIDHash, []byte{0x0}, sigCounter), + clientDataJSON: validClientDataJSON, + extensionOk: false, + signatureOk: false, + }, { + description: "invalid user flag, extensions exist but flag AT and ED are not set", + authenticatorData: slices.Concat(rpIDHash, []byte{validUserFlag}, sigCounter, unittest.RandomBytes(1+mrand.Intn(20))), + clientDataJSON: validClientDataJSON, + extensionOk: false, + signatureOk: false, + }, { + description: "invalid user flag, extensions do not exist but flag AT is set", + authenticatorData: slices.Concat(rpIDHash, []byte{validUserFlag | 0x40}, sigCounter), + clientDataJSON: map[string]string{ + "type": flow.WebAuthnTypeGet, + "challenge": authNChallengeBase64Url, + "origin": validClientDataOrigin, + }, + extensionOk: false, + signatureOk: false, + }, { + description: "invalid user flag, extensions do not exist but flag ED is set", + authenticatorData: slices.Concat(rpIDHash, []byte{validUserFlag | 0x80}, sigCounter), + clientDataJSON: map[string]string{ + "type": flow.WebAuthnTypeGet, + "challenge": authNChallengeBase64Url, + "origin": validClientDataOrigin, + }, + extensionOk: false, + signatureOk: false, + }, { + description: "invalid client data type", + authenticatorData: validAuthenticatorData, + clientDataJSON: map[string]string{ + "type": "invalid_type", + "challenge": authNChallengeBase64Url, + "origin": validClientDataOrigin, + }, + extensionOk: false, + signatureOk: false, + }, { + description: "empty origin (valid)", + authenticatorData: validAuthenticatorData, + clientDataJSON: map[string]string{ + "type": flow.WebAuthnTypeGet, + "challenge": authNChallengeBase64Url, + "origin": "", + }, + extensionOk: true, + signatureOk: true, + }, { + description: "valid authn scheme signature", + authenticatorData: validAuthenticatorData, + clientDataJSON: validClientDataJSON, + extensionOk: true, + signatureOk: true, + }, + { + description: "more client data fields", + authenticatorData: validAuthenticatorData, + clientDataJSON: map[string]string{ + "type": flow.WebAuthnTypeGet, + "challenge": authNChallengeBase64Url, + "origin": validClientDataOrigin, + "other1": "random", + "other2": "random", + }, + extensionOk: true, + signatureOk: true, + }, + } + + // run all cases above + for _, c := range cases { + t.Run(fmt.Sprintf("auth scheme - %s (authenticatorData)", c.description), func(t *testing.T) { + // This will be the equivalent of possible client side actions, while mocking out the majority of + // the webauthn process. + // Could eventually consider using flow-go-sdk here if it makes sense + seed := make([]byte, seedLength) + _, err := rand.Read(seed) + require.NoError(t, err) + sk, err := crypto.GeneratePrivateKey(s, seed) + require.NoError(t, err) + + // generate the extension data, based on the client data and authenticator data indicated by the test case + clientDataJsonBytes, err := json.Marshal(c.clientDataJSON) + require.NoError(t, err) + + extensionData := flow.WebAuthnExtensionData{ + AuthenticatorData: c.authenticatorData, + ClientDataJson: clientDataJsonBytes, + } + + // RLP Encode the extension data + // This is the equivalent of the client side encoding + extensionDataRLPBytes := modelrlp.NewMarshaler().MustMarshal(extensionData) + + // Construct the message to sign in the same way a client would, as per + // https://github.com/onflow/flips/blob/tarak/webauthn/protocol/20250203-webauthn-credential-support.md#fvm-transaction-validation-changes + var clientDataHash [hash.HashLenSHA2_256]byte + hash.ComputeSHA2_256(&clientDataHash, clientDataJsonBytes) + messageToSign := slices.Concat(c.authenticatorData, clientDataHash[:]) + + // Sign as "client" + accountHasher, err := fvmCrypto.NewPrefixedHashing(h, "") + require.NoError(t, err) + signature, err := sk.Sign(messageToSign, accountHasher) + require.NoError(t, err) + + // Verify as "server" + transactionBody.PayloadSignatures[0].ExtensionData = slices.Concat([]byte{byte(flow.WebAuthnScheme)}, extensionDataRLPBytes[:]) + transactionBody.PayloadSignatures[0].Signature = signature + + extensionDataValid, message := transactionBody.PayloadSignatures[0].ValidateExtensionDataAndReconstructMessage(transactionMessage) + signatureValid, err := fvmCrypto.VerifySignatureFromTransaction(signature, message, sk.PublicKey(), h) + require.NoError(t, err) + require.Equal(t, c.extensionOk, extensionDataValid) + if c.extensionOk { + require.Equal(t, c.signatureOk, signatureValid) + } + }) + } + }) + + t.Run("invalid authentication schemes", func(t *testing.T) { + cases := []struct { + description string + scheme flow.AuthenticationScheme + extensionOk bool + }{ + { + description: "invalid scheme (0x02)", + scheme: flow.InvalidScheme, + extensionOk: false, + }, { + description: "invalid scheme, parsed using AuthenticationSchemeFromByte (0xFF)", + scheme: flow.AuthenticationSchemeFromByte(0xFF), + extensionOk: false, + }, + } + + for _, c := range cases { + + t.Run(fmt.Sprintf("%s - auth scheme - %v", c.description, c.scheme), func(t *testing.T) { + // apply the extention + transactionBody.PayloadSignatures[0].ExtensionData = []byte{byte(c.scheme)} + + // Validate the transaction signature extension + transactionMessage := unittest.RandomBytes(20) + extensionDataValid, message := transactionBody.PayloadSignatures[0].ValidateExtensionDataAndReconstructMessage(transactionMessage) + require.Nil(t, message) + require.Equal(t, c.extensionOk, extensionDataValid) + }) + + } + }) +} + +// TestTransactionBodyID_Malleability provides basic validation that [flow.TransactionBody] is not malleable. +func TestTransactionBodyID_Malleability(t *testing.T) { + txbody := unittest.TransactionBodyFixture() + unittest.RequireEntityNonMalleable(t, &txbody, unittest.WithTypeGenerator[flow.TransactionSignature](func() flow.TransactionSignature { + return unittest.TransactionSignatureFixture() + })) +} + +// TestTransactionBody_Fingerprint provides basic validation that the [TransactionBody] fingerprint +// is equivalent to its canonical RLP encoding. +func TestTransactionBody_Fingerprint(t *testing.T) { + txbody := unittest.TransactionBodyFixture() + fp1 := txbody.Fingerprint() + fp2 := fingerprint.Fingerprint(txbody) + fp3, err := rlp.EncodeToBytes(txbody) + require.NoError(t, err) + assert.Equal(t, fp1, fp2) + assert.Equal(t, fp2, fp3) +} + +// TestNewTransactionBody verifies that NewTransactionBody constructs a valid TransactionBody +// when given all required fields, and returns an error if any mandatory field is missing. +// +// Test Cases: +// +// 1. Valid input: +// - Payer is non-empty and Script is non-empty. +// - Ensures a TransactionBody is returned with all fields populated correctly. +// +// 2. Empty Script: +// - Script slice is empty. +// - Ensures an error is returned mentioning "Script must not be empty". +func TestNewTransactionBody(t *testing.T) { + t.Run("valid input", func(t *testing.T) { + utb := UntrustedTransactionBodyFixture() + + tb, err := flow.NewTransactionBody(utb) + assert.NoError(t, err) + assert.NotNil(t, tb) + + assert.Equal(t, flow.TransactionBody(utb), *tb) + }) + + t.Run("empty Script", func(t *testing.T) { + utb := UntrustedTransactionBodyFixture(func(u *flow.UntrustedTransactionBody) { + u.Script = []byte{} + }) + + tb, err := flow.NewTransactionBody(utb) + assert.Error(t, err) + assert.Nil(t, tb) + assert.Contains(t, err.Error(), "Script must not be empty") + }) +} + +// UntrustedTransactionBodyFixture returns an UntrustedTransactionBody +// pre‐populated with sane defaults. Any opts override those defaults. +func UntrustedTransactionBodyFixture(opts ...func(*flow.UntrustedTransactionBody)) flow.UntrustedTransactionBody { + u := flow.UntrustedTransactionBody(unittest.TransactionBodyFixture()) + for _, opt := range opts { + opt(&u) + } + return u +} diff --git a/model/flow/transaction_timing.go b/model/flow/transaction_timing.go index 5f2c58812de..3b2b760bf28 100644 --- a/model/flow/transaction_timing.go +++ b/model/flow/transaction_timing.go @@ -10,12 +10,5 @@ type TransactionTiming struct { Received time.Time Finalized time.Time Executed time.Time -} - -func (t TransactionTiming) ID() Identifier { - return t.TransactionID -} - -func (t TransactionTiming) Checksum() Identifier { - return t.TransactionID + Sealed time.Time } diff --git a/model/flow/version_beacon.go b/model/flow/version_beacon.go index 98a2090dbc0..adbedd41c91 100644 --- a/model/flow/version_beacon.go +++ b/model/flow/version_beacon.go @@ -1,6 +1,7 @@ package flow import ( + "bytes" "fmt" "github.com/coreos/go-semver/semver" @@ -19,7 +20,8 @@ func (v VersionBoundary) Semver() (*semver.Version, error) { } // VersionBeacon represents a service event specifying the required software versions -// for upcoming blocks. +// for executing upcoming blocks. It ensures that Execution and Verification Nodes are +// using consistent versions of Cadence when executing the same blocks. // // It contains a VersionBoundaries field, which is an ordered list of VersionBoundary // (sorted by VersionBoundary.BlockHeight). While heights are strictly @@ -39,7 +41,8 @@ type VersionBeacon struct { } // SealedVersionBeacon is a VersionBeacon with a SealHeight field. -// Version beacons are effective only after they are sealed. +// Version beacons are effective only after the results containing the version beacon +// are sealed. type SealedVersionBeacon struct { *VersionBeacon SealHeight uint64 @@ -145,3 +148,52 @@ func (v *VersionBeacon) Validate() error { return nil } + +func (v *VersionBeacon) String() string { + var buffer bytes.Buffer + for _, boundary := range v.VersionBoundaries { + buffer.WriteString(fmt.Sprintf("%d:%s ", boundary.BlockHeight, boundary.Version)) + } + return buffer.String() +} + +// ProtocolStateVersionUpgrade is a service event emitted by the FlowServiceAccount +// to signal an upgrade to the Protocol State version. `NewProtocolStateVersion` +// must be strictly greater than the currently active Protocol State Version, +// otherwise the service event is ignored. +// If the node software supports `NewProtocolStateVersion`, then it uses the +// specified Protocol State Version, beginning with the first block `X` where BOTH: +// 1. The `ProtocolStateVersionUpgrade` service event has been sealed in X's ancestry +// 2. X.view >= `ActiveView` +// +// NOTE: A ProtocolStateVersionUpgrade event `E` is accepted while processing block `B` +// which seals `E` if and only if E.ActiveView > B.View + SafetyThreshold. +// SafetyThreshold is a protocol parameter set so that it is overwhelmingly likely that +// block `B` is finalized (ergo the protocol version switch at the specified view `E.ActiveView`) +// within any stretch of SafetyThreshold-many views. +// TODO: This concept mirrors `FinalizationSafetyThreshold` and `versionBoundaryFreezePeriod` +// These parameters should be consolidated. +// +// Otherwise, the node software stops processing blocks, until it is manually updated +// to a compatible software version. +// The Protocol State version must be incremented when: +// - a change is made to the Protocol State Machine +// - a new key is added or removed from the Protocol State Key-Value Store +type ProtocolStateVersionUpgrade struct { + NewProtocolStateVersion uint64 + ActiveView uint64 +} + +// EqualTo returns true if the two events are equivalent. +func (u *ProtocolStateVersionUpgrade) EqualTo(other *ProtocolStateVersionUpgrade) bool { + return u.NewProtocolStateVersion == other.NewProtocolStateVersion && + u.ActiveView == other.ActiveView +} + +// ServiceEvent returns the event as a generic ServiceEvent type. +func (u *ProtocolStateVersionUpgrade) ServiceEvent() ServiceEvent { + return ServiceEvent{ + Type: ServiceEventProtocolStateVersionUpgrade, + Event: u, + } +} diff --git a/model/flow/version_beacon_test.go b/model/flow/version_beacon_test.go index 83f4542e827..5296a63c65f 100644 --- a/model/flow/version_beacon_test.go +++ b/model/flow/version_beacon_test.go @@ -213,3 +213,14 @@ func TestValidate(t *testing.T) { }) } } + +func TestVersionBeaconString(t *testing.T) { + vb := &flow.VersionBeacon{ + VersionBoundaries: []flow.VersionBoundary{ + {BlockHeight: 1, Version: "0.21.37"}, + {BlockHeight: 200, Version: "0.21.37-patch.1"}, + }, + Sequence: 1, + } + require.Equal(t, "1:0.21.37 200:0.21.37-patch.1 ", vb.String()) +} diff --git a/model/flow/webauthn.go b/model/flow/webauthn.go new file mode 100644 index 00000000000..5e6de31ddcc --- /dev/null +++ b/model/flow/webauthn.go @@ -0,0 +1,137 @@ +package flow + +import ( + "bytes" + "encoding/base64" + "encoding/json" + "errors" + "slices" + "strings" + + "github.com/ethereum/go-ethereum/rlp" + + "github.com/onflow/crypto/hash" +) + +// Consolidation of WebAuthn related constants, types and functions +// All WebAuthn related constants, types and functions should be defined here + +const webAuthnChallengeLength = 32 +const webAuthnExtensionDataMinimumLength = 37 +const WebAuthnTypeGet = "webauthn.get" + +type WebAuthnExtensionData struct { + // The WebAuthn extension data + AuthenticatorData []byte + ClientDataJson []byte +} + +type CollectedClientData struct { + Type string `json:"type"` + Challenge string `json:"challenge"` + Origin string `json:"origin"` +} + +func (w *WebAuthnExtensionData) GetUnmarshalledCollectedClientData() (*CollectedClientData, error) { + clientData := new(CollectedClientData) + err := json.Unmarshal(w.ClientDataJson, clientData) + if err != nil { + return nil, err + } + return clientData, err +} + +// Helper functions for WebAuthn related operations + +// As per https://github.com/onflow/flips/blob/tarak/webauthn/protocol/20250203-webauthn-credential-support.md#fvm-transaction-validation-changes +// check UP is set, BS is not set if BE is not set, AT is only set if attested data is included, ED is set only if extension data is included. If any of the checks fail, return "invalid". +func validateFlags(flags byte, extensions []byte) error { + // Parse flags + if userPresent := (flags & 0x01) != 0; !userPresent { // Bit 0: User Present (UP). + return errors.New("invalid flags: user presence (UP) not set") + } + + backupEligibility := (flags & 0x08) != 0 // Bit 3: Backup Eligibility (BE). + backupState := (flags & 0x10) != 0 // Bit 4: Backup State (BS). + + if backupState && !backupEligibility { + return errors.New("invalid flags: backup state (BS) set without backup eligibility (BE)") + } + + attestationCredentialData := (flags & 0x40) != 0 // Bit 6: Attestation Credential Data (AT). + extensionData := (flags & 0x80) != 0 // Bit 7: Extension Data (ED). + + // For now, just check if there is a mismatch in expected state, + // i.e. no extension data but flags are set. + if (len(extensions) > 0) != (attestationCredentialData || extensionData) { + return errors.New("invalid flags: Attestation Credential Data (AT) or Extension Data (ED) flag are not matching the corresponding extension data") + } + + // If all checks pass, return nil + return nil +} + +func validateWebAuthNExtensionData(extensionData []byte, payload []byte) (bool, []byte) { + // See FLIP 264 for more details + if len(extensionData) == 0 { + return false, nil + } + rlpEncodedWebAuthnData := extensionData[1:] + decodedWebAuthnData := &WebAuthnExtensionData{} + if err := rlp.DecodeBytes(rlpEncodedWebAuthnData, decodedWebAuthnData); err != nil { + return false, nil + } + + clientData, err := decodedWebAuthnData.GetUnmarshalledCollectedClientData() + if err != nil { + return false, nil + } + + // base64url decode the challenge, as that's the encoding used client side according to https://www.w3.org/TR/webauthn-3/#dictionary-client-data + clientDataChallenge, err := base64.RawURLEncoding.DecodeString(clientData.Challenge) + if err != nil { + return false, nil + } + + if strings.Compare(clientData.Type, WebAuthnTypeGet) != 0 || len(clientDataChallenge) != webAuthnChallengeLength { + // invalid client data + return false, nil + } + + // make sure the challenge is the hash of the transaction payload + hasher := hash.NewSHA2_256() + _, err = hasher.Write(TransactionDomainTag[:]) + if err != nil { + return false, nil + } + _, err = hasher.Write(payload) + if err != nil { + return false, nil + } + computedChallenge := hasher.SumHash() + if !computedChallenge.Equal(clientDataChallenge) { + return false, nil + } + + // Validate authenticatorData + if len(decodedWebAuthnData.AuthenticatorData) < webAuthnExtensionDataMinimumLength { + return false, nil + } + + // extract rpIdHash, userFlags, sigCounter, extensions + rpIdHash := decodedWebAuthnData.AuthenticatorData[:webAuthnChallengeLength] + userFlags := decodedWebAuthnData.AuthenticatorData[webAuthnChallengeLength] + extensions := decodedWebAuthnData.AuthenticatorData[webAuthnExtensionDataMinimumLength:] + if bytes.Equal(TransactionDomainTag[:], rpIdHash) { + return false, nil + } + + // validate user flags according to FLIP 264 + if err := validateFlags(userFlags, extensions); err != nil { + return false, nil + } + + clientDataHash := hash.NewSHA2_256().ComputeHash(decodedWebAuthnData.ClientDataJson) + + return true, slices.Concat(decodedWebAuthnData.AuthenticatorData, clientDataHash) +} diff --git a/model/hash/hash.go b/model/hash/hash.go index e4568851e26..ec494525b49 100644 --- a/model/hash/hash.go +++ b/model/hash/hash.go @@ -1,28 +1,15 @@ package hash import ( - "sync" - - "github.com/onflow/flow-go/crypto/hash" + "github.com/onflow/crypto/hash" ) -// DefaultHasher is the default hasher used by Flow. -var DefaultHasher hash.Hasher - -type defaultHasher struct { - hash.Hasher - sync.Mutex -} - -func (h *defaultHasher) ComputeHash(b []byte) hash.Hash { - h.Lock() - defer h.Unlock() - return h.Hasher.ComputeHash(b) -} - -func init() { - DefaultHasher = &defaultHasher{ - hash.NewSHA3_256(), - sync.Mutex{}, - } +// DefaultComputeHash is the default hasher used by Flow. +// +// `ComputeSHA3_256` can be used directly +// to minimize heap allocations +func DefaultComputeHash(data []byte) hash.Hash { + var res [hash.HashLenSHA3_256]byte + hash.ComputeSHA3_256(&res, data) + return hash.Hash(res[:]) } diff --git a/model/libp2p/message/testmessage.go b/model/libp2p/message/testmessage.go index af740d95289..b93cf5a0f05 100644 --- a/model/libp2p/message/testmessage.go +++ b/model/libp2p/message/testmessage.go @@ -1,6 +1,26 @@ package message -// TestMessage is used for testing the network layer. -type TestMessage struct { - Text string +import "github.com/onflow/flow-go/model/flow" + +// TestMessage is the untrusted network-level representation of a test message. +// +// This type exists purely for exercising the network layer in tests and carries +// no guarantees about structural validity. It implements messages.UntrustedMessage interface so +// it can be transmitted and decoded like any other message type. +// +// Use ToInternal to convert this to the internal flow.TestMessage before +// consuming it inside the node. +type TestMessage flow.TestMessage + +// ToInternal converts the untrusted TestMessage into its trusted internal +// representation. +// +// Since TestMessage is only used for testing, the conversion is effectively +// a passthrough that wraps the network-level data in a flow.TestMessage. +// In production message types, this method would enforce structural validity +// through flow constructors. +func (t *TestMessage) ToInternal() (any, error) { + return &flow.TestMessage{ + Text: t.Text, + }, nil } diff --git a/model/libp2p/peer/filters.go b/model/libp2p/peer/filters.go index dc42ac44d99..eaa2d954299 100644 --- a/model/libp2p/peer/filters.go +++ b/model/libp2p/peer/filters.go @@ -1,5 +1,3 @@ -// (c) 2019 Dapper Labs - ALL RIGHTS RESERVED - package peer import ( diff --git a/model/messages/collection.go b/model/messages/collection.go index 3ef3251698b..79e86cca7d8 100644 --- a/model/messages/collection.go +++ b/model/messages/collection.go @@ -1,14 +1,24 @@ package messages import ( + "fmt" + "github.com/onflow/flow-go/model/cluster" "github.com/onflow/flow-go/model/flow" ) -// SubmitCollectionGuarantee is a request to submit the given collection -// guarantee to consensus nodes. Only valid as a node-local message. -type SubmitCollectionGuarantee struct { - Guarantee flow.CollectionGuarantee +// ClusterProposal is a signed cluster block proposal in collection node cluster consensus. +type ClusterProposal cluster.UntrustedProposal + +// ToInternal returns the internal type representation for ClusterProposal. +// +// All errors indicate that the decode target contains a structurally invalid representation of the internal cluster.Proposal. +func (p *ClusterProposal) ToInternal() (any, error) { + internal, err := cluster.NewProposal(cluster.UntrustedProposal(*p)) + if err != nil { + return nil, fmt.Errorf("could not convert %T to internal type: %w", p, err) + } + return internal, nil } // CollectionRequest request all transactions from a collection with the given @@ -24,76 +34,59 @@ type CollectionResponse struct { Nonce uint64 // so that we aren't deduplicated by the network layer } -// UntrustedClusterBlockPayload is a duplicate of cluster.Payload used within -// untrusted messages. It exists only to provide a memory-safe structure for -// decoding messages and should be replaced in the future by updating the core -// cluster.Payload type. -// Deprecated: Please update cluster.Payload.Collection to use []flow.TransactionBody, -// then replace instances of this type with cluster.Payload -type UntrustedClusterBlockPayload struct { - Collection []flow.TransactionBody - ReferenceBlockID flow.Identifier -} - -// UntrustedClusterBlock is a duplicate of cluster.Block used within -// untrusted messages. It exists only to provide a memory-safe structure for -// decoding messages and should be replaced in the future by updating the core -// cluster.Block type. -// Deprecated: Please update cluster.Payload.Collection to use []flow.TransactionBody, -// then replace instances of this type with cluster.Block -type UntrustedClusterBlock struct { - Header flow.Header - Payload UntrustedClusterBlockPayload -} +// ClusterBlockVote is a vote for a proposed block in collection node cluster +// consensus; effectively a vote for a particular collection. +type ClusterBlockVote flow.BlockVote -// ToInternal returns the internal representation of the type. -func (ub *UntrustedClusterBlock) ToInternal() *cluster.Block { - block := &cluster.Block{ - Header: &ub.Header, - Payload: &cluster.Payload{ - ReferenceBlockID: ub.Payload.ReferenceBlockID, - }, - } - for _, tx := range ub.Payload.Collection { - tx := tx - block.Payload.Collection.Transactions = append(block.Payload.Collection.Transactions, &tx) +// ToInternal converts the untrusted ClusterBlockVote into its trusted internal +// representation. +func (c *ClusterBlockVote) ToInternal() (any, error) { + internal, err := flow.NewBlockVote(c.BlockID, c.View, c.SigData) + if err != nil { + return nil, fmt.Errorf("could not construct cluster block vote: %w", err) } - return block + return internal, nil } -// UntrustedClusterBlockFromInternal converts the internal cluster.Block representation -// to the representation used in untrusted messages. -func UntrustedClusterBlockFromInternal(clusterBlock *cluster.Block) UntrustedClusterBlock { - block := UntrustedClusterBlock{ - Header: *clusterBlock.Header, - Payload: UntrustedClusterBlockPayload{ - ReferenceBlockID: clusterBlock.Payload.ReferenceBlockID, - Collection: make([]flow.TransactionBody, 0, clusterBlock.Payload.Collection.Len()), - }, - } - for _, tx := range clusterBlock.Payload.Collection.Transactions { - block.Payload.Collection = append(block.Payload.Collection, *tx) +// ClusterTimeoutObject is part of the collection cluster protocol and represents a collection node +// timing out in given round. Contains a sequential number for deduplication purposes. +type ClusterTimeoutObject TimeoutObject + +// ToInternal returns the internal type representation for ClusterTimeoutObject. +// +// All errors indicate that the decode target contains a structurally invalid representation of the internal model.TimeoutObject. +func (c *ClusterTimeoutObject) ToInternal() (any, error) { + internal, err := (*TimeoutObject)(c).ToInternal() + if err != nil { + return nil, fmt.Errorf("could not convert %T to internal type: %w", c, err) } - return block + return internal, nil } -// ClusterBlockProposal is a proposal for a block in collection node cluster -// consensus. The header contains information about consensus state and the -// payload contains the proposed collection (may be empty). -type ClusterBlockProposal struct { - Block UntrustedClusterBlock -} +// CollectionGuarantee is a message representation of an CollectionGuarantee, which is used +// to announce collections to consensus nodes. +type CollectionGuarantee flow.UntrustedCollectionGuarantee -func NewClusterBlockProposal(internal *cluster.Block) *ClusterBlockProposal { - return &ClusterBlockProposal{ - Block: UntrustedClusterBlockFromInternal(internal), +// ToInternal returns the internal type representation for CollectionGuarantee. +// +// All errors indicate that the decode target contains a structurally invalid representation of the internal flow.CollectionGuarantee. +func (c *CollectionGuarantee) ToInternal() (any, error) { + internal, err := flow.NewCollectionGuarantee(flow.UntrustedCollectionGuarantee(*c)) + if err != nil { + return nil, fmt.Errorf("could not construct guarantee: %w", err) } + return internal, nil } -// ClusterBlockVote is a vote for a proposed block in collection node cluster -// consensus; effectively a vote for a particular collection. -type ClusterBlockVote BlockVote +// TransactionBody is a message representation of a TransactionBody, which includes the main contents of a transaction +type TransactionBody flow.UntrustedTransactionBody -// ClusterTimeoutObject is part of the collection cluster protocol and represents a collection node -// timing out in given round. Contains a sequential number for deduplication purposes. -type ClusterTimeoutObject TimeoutObject +// ToInternal converts the untrusted TransactionBody into its trusted internal +// representation. +func (tb *TransactionBody) ToInternal() (any, error) { + internal, err := flow.NewTransactionBody(flow.UntrustedTransactionBody(*tb)) + if err != nil { + return nil, fmt.Errorf("could not construct transaction body: %w", err) + } + return internal, nil +} diff --git a/model/messages/consensus.go b/model/messages/consensus.go index 85c2730d1fc..3274bcf77cd 100644 --- a/model/messages/consensus.go +++ b/model/messages/consensus.go @@ -1,151 +1,51 @@ package messages import ( + "fmt" + + "github.com/onflow/flow-go/consensus/hotstuff/model" "github.com/onflow/flow-go/model/flow" ) -// UntrustedExecutionResult is a duplicate of flow.ExecutionResult used within -// untrusted messages. It exists only to provide a memory-safe structure for -// decoding messages and should be replaced in the future by updating the core -// flow.ExecutionResult type. -// Deprecated: Please update flow.ExecutionResult to use []flow.Chunk, then -// replace instances of this type with flow.ExecutionResult -type UntrustedExecutionResult struct { - PreviousResultID flow.Identifier - BlockID flow.Identifier - Chunks []flow.Chunk - ServiceEvents flow.ServiceEventList - ExecutionDataID flow.Identifier -} +// Proposal is part of the consensus protocol and represents a signed proposal from a consensus node. +type Proposal flow.UntrustedProposal -// ToInternal returns the internal representation of the type. -func (ur *UntrustedExecutionResult) ToInternal() *flow.ExecutionResult { - result := flow.ExecutionResult{ - PreviousResultID: ur.PreviousResultID, - BlockID: ur.BlockID, - Chunks: make(flow.ChunkList, 0, len(ur.Chunks)), - ServiceEvents: ur.ServiceEvents, - ExecutionDataID: ur.ExecutionDataID, - } - for _, chunk := range ur.Chunks { - chunk := chunk - result.Chunks = append(result.Chunks, &chunk) - } - return &result -} - -// UntrustedExecutionResultFromInternal converts the internal flow.ExecutionResult representation -// to the representation used in untrusted messages. -func UntrustedExecutionResultFromInternal(internal *flow.ExecutionResult) UntrustedExecutionResult { - result := UntrustedExecutionResult{ - PreviousResultID: internal.PreviousResultID, - BlockID: internal.BlockID, - ServiceEvents: internal.ServiceEvents, - ExecutionDataID: internal.ExecutionDataID, - } - for _, chunk := range internal.Chunks { - result.Chunks = append(result.Chunks, *chunk) - } - return result -} - -// UntrustedBlockPayload is a duplicate of flow.Payload used within -// untrusted messages. It exists only to provide a memory-safe structure for -// decoding messages and should be replaced in the future by updating the core -// flow.Payload type. -// Deprecated: Please update flow.Payload to use []flow.Guarantee etc., then -// replace instances of this type with flow.Payload -type UntrustedBlockPayload struct { - Guarantees []flow.CollectionGuarantee - Seals []flow.Seal - Receipts []flow.ExecutionReceiptMeta - Results []UntrustedExecutionResult -} - -// UntrustedBlock is a duplicate of flow.Block used within -// untrusted messages. It exists only to provide a memory-safe structure for -// decoding messages and should be replaced in the future by updating the core -// flow.Block type. -// Deprecated: Please update flow.Payload to use []flow.Guarantee etc., then -// replace instances of this type with flow.Block -type UntrustedBlock struct { - Header flow.Header - Payload UntrustedBlockPayload -} - -// ToInternal returns the internal representation of the type. -func (ub *UntrustedBlock) ToInternal() *flow.Block { - block := flow.Block{ - Header: &ub.Header, - Payload: &flow.Payload{}, - } - for _, guarantee := range ub.Payload.Guarantees { - guarantee := guarantee - block.Payload.Guarantees = append(block.Payload.Guarantees, &guarantee) - } - for _, seal := range ub.Payload.Seals { - seal := seal - block.Payload.Seals = append(block.Payload.Seals, &seal) - } - for _, receipt := range ub.Payload.Receipts { - receipt := receipt - block.Payload.Receipts = append(block.Payload.Receipts, &receipt) - } - for _, result := range ub.Payload.Results { - result := result - block.Payload.Results = append(block.Payload.Results, result.ToInternal()) - } - - return &block -} - -// UntrustedBlockFromInternal converts the internal flow.Block representation -// to the representation used in untrusted messages. -func UntrustedBlockFromInternal(flowBlock *flow.Block) UntrustedBlock { - block := UntrustedBlock{ - Header: *flowBlock.Header, - } - for _, guarantee := range flowBlock.Payload.Guarantees { - block.Payload.Guarantees = append(block.Payload.Guarantees, *guarantee) - } - for _, seal := range flowBlock.Payload.Seals { - block.Payload.Seals = append(block.Payload.Seals, *seal) - } - for _, receipt := range flowBlock.Payload.Receipts { - block.Payload.Receipts = append(block.Payload.Receipts, *receipt) - } - for _, result := range flowBlock.Payload.Results { - block.Payload.Results = append(block.Payload.Results, UntrustedExecutionResultFromInternal(result)) - } - return block -} - -// BlockProposal is part of the consensus protocol and represents the leader -// of a consensus round pushing a new proposal to the network. -type BlockProposal struct { - Block UntrustedBlock -} - -func NewBlockProposal(internal *flow.Block) *BlockProposal { - return &BlockProposal{ - Block: UntrustedBlockFromInternal(internal), +// ToInternal returns the internal type representation for Proposal. +// +// All errors indicate that the decode target contains a structurally invalid representation of the internal flow.Proposal. +func (p *Proposal) ToInternal() (any, error) { + internal, err := flow.NewProposal(flow.UntrustedProposal(*p)) + if err != nil { + return nil, fmt.Errorf("could not convert %T to internal type: %w", p, err) } + return internal, nil } // BlockVote is part of the consensus protocol and represents a consensus node // voting on the proposal of the leader of a given round. -type BlockVote struct { - BlockID flow.Identifier - View uint64 - SigData []byte +type BlockVote flow.BlockVote + +// ToInternal converts the untrusted BlockVote into its trusted internal +// representation. +func (b *BlockVote) ToInternal() (any, error) { + internal, err := flow.NewBlockVote(b.BlockID, b.View, b.SigData) + if err != nil { + return nil, fmt.Errorf("could not construct cluster block vote: %w", err) + } + return internal, nil } // TimeoutObject is part of the consensus protocol and represents a consensus node // timing out in given round. Contains a sequential number for deduplication purposes. -type TimeoutObject struct { - TimeoutTick uint64 - View uint64 - NewestQC *flow.QuorumCertificate - LastViewTC *flow.TimeoutCertificate - SigData []byte +type TimeoutObject model.UntrustedTimeoutObject + +// ToInternal returns the internal type representation for TimeoutObject. +// +// All errors indicate that the decode target contains a structurally invalid representation of the internal model.TimeoutObject. +func (t *TimeoutObject) ToInternal() (any, error) { + internal, err := model.NewTimeoutObject(model.UntrustedTimeoutObject(*t)) + if err != nil { + return nil, fmt.Errorf("could not convert %T to internal type: %w", t, err) + } + return internal, nil } diff --git a/model/messages/convert_test.go b/model/messages/convert_test.go index d7554020abe..bbcb98bae48 100644 --- a/model/messages/convert_test.go +++ b/model/messages/convert_test.go @@ -4,48 +4,42 @@ import ( "testing" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" "github.com/onflow/flow-go/model/cluster" + "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/model/messages" "github.com/onflow/flow-go/utils/unittest" ) -func TestBlockProposal(t *testing.T) { - block := unittest.FullBlockFixture() - proposal := messages.NewBlockProposal(&block) - converted := proposal.Block.ToInternal() - assert.Equal(t, &block, converted) -} - -func TestClusterBlockProposal(t *testing.T) { - block := unittest.ClusterBlockFixture() - proposal := messages.NewClusterBlockProposal(&block) - converted := proposal.Block.ToInternal() - assert.Equal(t, &block, converted) -} - func TestBlockResponse(t *testing.T) { - expected := unittest.BlockFixtures(2) + expected := []flow.Proposal{*unittest.ProposalFixture(), *unittest.ProposalFixture()} res := messages.BlockResponse{ - Blocks: []messages.UntrustedBlock{ - messages.UntrustedBlockFromInternal(expected[0]), - messages.UntrustedBlockFromInternal(expected[1]), + Blocks: []flow.UntrustedProposal{ + flow.UntrustedProposal(expected[0]), + flow.UntrustedProposal(expected[1]), }, } - converted := res.BlocksInternal() - assert.Equal(t, expected, converted) + internal, err := res.ToInternal() + require.NoError(t, err) + converted, ok := internal.(*flow.BlockResponse) + require.True(t, ok) + assert.Equal(t, expected, converted.Blocks) } func TestClusterBlockResponse(t *testing.T) { b1 := unittest.ClusterBlockFixture() b2 := unittest.ClusterBlockFixture() - expected := []*cluster.Block{&b1, &b2} + expected := []cluster.Proposal{*unittest.ClusterProposalFromBlock(b1), *unittest.ClusterProposalFromBlock(b2)} res := messages.ClusterBlockResponse{ - Blocks: []messages.UntrustedClusterBlock{ - messages.UntrustedClusterBlockFromInternal(expected[0]), - messages.UntrustedClusterBlockFromInternal(expected[1]), + Blocks: []cluster.UntrustedProposal{ + cluster.UntrustedProposal(expected[0]), + cluster.UntrustedProposal(expected[1]), }, } - converted := res.BlocksInternal() - assert.Equal(t, expected, converted) + internal, err := res.ToInternal() + require.NoError(t, err) + converted, ok := internal.(*cluster.BlockResponse) + require.True(t, ok) + assert.Equal(t, expected, converted.Blocks) } diff --git a/model/messages/dkg.go b/model/messages/dkg.go index 9555f5cd7ba..bf58fe521f3 100644 --- a/model/messages/dkg.go +++ b/model/messages/dkg.go @@ -1,22 +1,19 @@ package messages import ( - "github.com/onflow/flow-go/crypto" + "github.com/onflow/crypto" + "github.com/onflow/flow-go/model/flow" ) // DKGMessage is the type of message exchanged between DKG nodes. -type DKGMessage struct { - Data []byte - DKGInstanceID string -} +type DKGMessage flow.DKGMessage -// NewDKGMessage creates a new DKGMessage. -func NewDKGMessage(data []byte, dkgInstanceID string) DKGMessage { - return DKGMessage{ - Data: data, - DKGInstanceID: dkgInstanceID, - } +// ToInternal returns the internal type representation for DKGMessage. +// +// All errors indicate that the decode target contains a structurally invalid representation of the internal flow.DKGMessage. +func (d *DKGMessage) ToInternal() (any, error) { + return (*flow.DKGMessage)(d), nil } // PrivDKGMessageIn is a wrapper around a DKGMessage containing the network ID diff --git a/model/messages/exchange.go b/model/messages/exchange.go index 62c48e9c650..31c0bae738d 100644 --- a/model/messages/exchange.go +++ b/model/messages/exchange.go @@ -9,16 +9,23 @@ import ( // specified here. In the typical case, the identifier is simply the ID of the // entity being requested, but more complex identifier-entity relationships can // be used as well. -type EntityRequest struct { - Nonce uint64 - EntityIDs []flow.Identifier +type EntityRequest flow.EntityRequest + +// ToInternal converts the untrusted EntityRequest into its trusted internal +// representation. +// No errors are expected during normal operations. +func (e *EntityRequest) ToInternal() (any, error) { + return (*flow.EntityRequest)(e), nil } // EntityResponse is a response to an entity request, containing a set of // serialized entities and the identifiers used to request them. The returned // entity set may be empty or incomplete. -type EntityResponse struct { - Nonce uint64 - EntityIDs []flow.Identifier - Blobs [][]byte +type EntityResponse flow.EntityResponse + +// ToInternal converts the untrusted EntityResponse into its trusted internal +// representation. +// No errors are expected during normal operations. +func (e *EntityResponse) ToInternal() (any, error) { + return (*flow.EntityResponse)(e), nil } diff --git a/model/messages/execution.go b/model/messages/execution.go index e306a2d0d89..a9ccf0d9967 100644 --- a/model/messages/execution.go +++ b/model/messages/execution.go @@ -1,19 +1,54 @@ package messages import ( + "fmt" + "github.com/onflow/flow-go/model/flow" ) -// ChunkDataRequest represents a request for the a chunk data pack +// ChunkDataRequest represents a request for the chunk data pack // which is specified by a chunk ID. -type ChunkDataRequest struct { - ChunkID flow.Identifier - Nonce uint64 // so that we aren't deduplicated by the network layer +type ChunkDataRequest flow.ChunkDataRequest + +// ToInternal returns the internal type representation for ChunkDataRequest. +// +// All errors indicate that the decode target contains a structurally invalid representation of the internal flow.ChunkDataRequest. +func (c *ChunkDataRequest) ToInternal() (any, error) { + return (*flow.ChunkDataRequest)(c), nil } // ChunkDataResponse is the response to a chunk data pack request. // It contains the chunk data pack of the interest. type ChunkDataResponse struct { - ChunkDataPack flow.ChunkDataPack + ChunkDataPack flow.UntrustedChunkDataPack Nonce uint64 // so that we aren't deduplicated by the network layer } + +// ToInternal returns the internal type representation for ChunkDataResponse. +// +// All errors indicate that the decode target contains a structurally invalid representation of the internal flow.ChunkDataResponse. +func (c *ChunkDataResponse) ToInternal() (any, error) { + chunkDataPack, err := flow.NewChunkDataPack(c.ChunkDataPack) + if err != nil { + return nil, fmt.Errorf("could not convert %T to internal type: %w", c.ChunkDataPack, err) + } + return &flow.ChunkDataResponse{ + Nonce: c.Nonce, + ChunkDataPack: *chunkDataPack, + }, nil +} + +// ExecutionReceipt is the full execution receipt, as sent by the Execution Node. +// Specifically, it contains the detailed execution result. +type ExecutionReceipt flow.UntrustedExecutionReceipt + +// ToInternal returns the internal type representation for ExecutionReceipt. +// +// All errors indicate that the decode target contains a structurally invalid representation of the internal flow.ExecutionReceipt. +func (er *ExecutionReceipt) ToInternal() (any, error) { + internal, err := flow.NewExecutionReceipt(flow.UntrustedExecutionReceipt(*er)) + if err != nil { + return nil, fmt.Errorf("could not convert %T to internal type: %w", er, err) + } + return internal, nil +} diff --git a/model/messages/synchronization.go b/model/messages/synchronization.go index 2b0a310c5e7..b579d2501d8 100644 --- a/model/messages/synchronization.go +++ b/model/messages/synchronization.go @@ -1,6 +1,8 @@ package messages import ( + "fmt" + "github.com/onflow/flow-go/model/cluster" "github.com/onflow/flow-go/model/flow" ) @@ -8,35 +10,56 @@ import ( // SyncRequest is part of the synchronization protocol and represents a node on // the network sharing the height of its latest finalized block and requesting // the same information from the recipient. -type SyncRequest struct { - Nonce uint64 - Height uint64 +// All SyncRequest messages are validated before being processed. If validation fails, then a misbehavior report is created. +// See synchronization.validateSyncRequestForALSP for more details. +type SyncRequest flow.SyncRequest + +// ToInternal returns the internal type representation for SyncRequest. +// +// All errors indicate that the decode target contains a structurally invalid representation of the internal flow.SyncRequest. +func (s *SyncRequest) ToInternal() (any, error) { + return (*flow.SyncRequest)(s), nil } // SyncResponse is part of the synchronization protocol and represents the reply // to a synchronization request that contains the latest finalized block height // of the responding node. -type SyncResponse struct { - Nonce uint64 - Height uint64 +type SyncResponse flow.SyncResponse + +// ToInternal returns the internal type representation for SyncResponse. +// +// All errors indicate that the decode target contains a structurally invalid representation of the internal flow.SyncResponse. +func (s *SyncResponse) ToInternal() (any, error) { + return (*flow.SyncResponse)(s), nil } // RangeRequest is part of the synchronization protocol and represents an active // (pulling) attempt to synchronize with the consensus state of the network. It // requests finalized blocks by a range of block heights, including from and to // heights. -type RangeRequest struct { - Nonce uint64 - FromHeight uint64 - ToHeight uint64 +// All RangeRequest messages are validated before being processed. If validation fails, then a misbehavior report is created. +// See synchronization.validateRangeRequestForALSP for more details. +type RangeRequest flow.RangeRequest + +// ToInternal returns the internal type representation for RangeRequest. +// +// All errors indicate that the decode target contains a structurally invalid representation of the internal flow.RangeRequest. +func (r *RangeRequest) ToInternal() (any, error) { + return (*flow.RangeRequest)(r), nil } -// BatchRequest is part of the sychronization protocol and represents an active +// BatchRequest is part of the synchronization protocol and represents an active // (pulling) attempt to synchronize with the consensus state of the network. It // requests finalized or unfinalized blocks by a list of block IDs. -type BatchRequest struct { - Nonce uint64 - BlockIDs []flow.Identifier +// All BatchRequest messages are validated before being processed. If validation fails, then a misbehavior report is created. +// See synchronization.validateBatchRequestForALSP for more details. +type BatchRequest flow.BatchRequest + +// ToInternal returns the internal type representation for BatchRequest. +// +// All errors indicate that the decode target contains a structurally invalid representation of the internal flow.BatchRequest. +func (b *BatchRequest) ToInternal() (any, error) { + return (*flow.BatchRequest)(b), nil } // BlockResponse is part of the synchronization protocol and represents the @@ -44,30 +67,48 @@ type BatchRequest struct { // that should correspond to the request. type BlockResponse struct { Nonce uint64 - Blocks []UntrustedBlock + Blocks []flow.UntrustedProposal } -func (br *BlockResponse) BlocksInternal() []*flow.Block { - internal := make([]*flow.Block, len(br.Blocks)) - for i, block := range br.Blocks { - block := block - internal[i] = block.ToInternal() +// ToInternal returns the internal type representation for BlockResponse. +// +// All errors indicate that the decode target contains a structurally invalid representation of the internal flow.BlockResponse. +func (br *BlockResponse) ToInternal() (any, error) { + internal := make([]flow.Proposal, len(br.Blocks)) + for i, untrusted := range br.Blocks { + proposal, err := flow.NewProposal(untrusted) + if err != nil { + return nil, fmt.Errorf("could not build proposal: %w", err) + } + internal[i] = *proposal } - return internal + return &flow.BlockResponse{ + Nonce: br.Nonce, + Blocks: internal, + }, nil } // ClusterBlockResponse is the same thing as BlockResponse, but for cluster // consensus. type ClusterBlockResponse struct { Nonce uint64 - Blocks []UntrustedClusterBlock + Blocks []cluster.UntrustedProposal } -func (br *ClusterBlockResponse) BlocksInternal() []*cluster.Block { - internal := make([]*cluster.Block, len(br.Blocks)) - for i, block := range br.Blocks { - block := block - internal[i] = block.ToInternal() +// ToInternal returns the internal type representation for ClusterBlockResponse. +// +// All errors indicate that the decode target contains a structurally invalid representation of the internal cluster.BlockResponse. +func (br *ClusterBlockResponse) ToInternal() (any, error) { + internal := make([]cluster.Proposal, len(br.Blocks)) + for i, untrusted := range br.Blocks { + proposal, err := cluster.NewProposal(untrusted) + if err != nil { + return nil, fmt.Errorf("could not build proposal: %w", err) + } + internal[i] = *proposal } - return internal + return &cluster.BlockResponse{ + Nonce: br.Nonce, + Blocks: internal, + }, nil } diff --git a/model/messages/untrusted_message.go b/model/messages/untrusted_message.go new file mode 100644 index 00000000000..334ff6aeff9 --- /dev/null +++ b/model/messages/untrusted_message.go @@ -0,0 +1,76 @@ +package messages + +import ( + "fmt" + + "github.com/onflow/flow-go/model/cluster" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/model/libp2p/message" +) + +// UntrustedMessage represents the set of allowed decode target types for messages received over the network. +// Conceptually, an UntrustedMessage implementation makes no guarantees whatsoever about its contents. +// UntrustedMessage's must implement a ToInternal method, which converts the network message to a corresponding internal type (typically in the model/flow package) +// This conversion provides an opportunity to: +// - perform basic structural validity checks (required fields are non-nil, fields reference one another in a consistent way) +// - attach auxiliary information (for example, caching the hash of a model) +// +// Internal models abide by basic structural validity requirements, but are not trusted. +// They may still represent invalid or Byzantine inputs in the context of the broader application state. +// It is the responsibility of engines operating on these models to fully validate them. +type UntrustedMessage interface { + + // ToInternal returns the internal type (from flow.* constructors) representation. + // All errors indicate that the decode target contains a structurally invalid representation of the internal model. + ToInternal() (any, error) +} + +// InternalToMessage converts an internal types into the +// corresponding UntrustedMessage for network. +// +// This is the inverse of ToInternal: instead of decoding a network +// message into an internal model, it wraps or casts internal objects +// so they can be encoded and sent over the network. Encoding and always +// requires an UntrustedMessage. +// +// No errors are expected during normal operation. +// TODO: investigate how to eliminate this workaround in both ghost/rpc.go and corruptnet/message_processor.go +func InternalToMessage(event interface{}) (UntrustedMessage, error) { + switch internal := event.(type) { + case *flow.Proposal: + return (*Proposal)(internal), nil + case *cluster.Proposal: + return (*ClusterProposal)(internal), nil + case *flow.EntityRequest: + return (*EntityRequest)(internal), nil + case *flow.EntityResponse: + return (*EntityResponse)(internal), nil + case *flow.TransactionBody: + return (*TransactionBody)(internal), nil + case *flow.CollectionGuarantee: + return (*CollectionGuarantee)(internal), nil + case *flow.SyncRequest: + return (*SyncRequest)(internal), nil + case *flow.SyncResponse: + return (*SyncResponse)(internal), nil + case *flow.BatchRequest: + return (*BatchRequest)(internal), nil + case *flow.ChunkDataRequest: + return (*ChunkDataRequest)(internal), nil + case *flow.ChunkDataResponse: + return &ChunkDataResponse{ + ChunkDataPack: flow.UntrustedChunkDataPack(internal.ChunkDataPack), + Nonce: internal.Nonce, + }, nil + case *flow.RangeRequest: + return (*RangeRequest)(internal), nil + case *flow.ExecutionReceipt: + return (*ExecutionReceipt)(internal), nil + case *flow.ResultApproval: + return (*ResultApproval)(internal), nil + case *flow.TestMessage: + return (*message.TestMessage)(internal), nil + default: + return nil, fmt.Errorf("cannot convert unsupported type %T", event) + } +} diff --git a/model/messages/verification.go b/model/messages/verification.go index 40389c98512..786c66e87ea 100644 --- a/model/messages/verification.go +++ b/model/messages/verification.go @@ -1,6 +1,10 @@ package messages -import "github.com/onflow/flow-go/model/flow" +import ( + "fmt" + + "github.com/onflow/flow-go/model/flow" +) // ApprovalRequest represents a request for a ResultApproval corresponding to // a specific chunk. @@ -10,8 +14,49 @@ type ApprovalRequest struct { ChunkIndex uint64 } +// ToInternal converts the untrusted ApprovalRequest into its trusted internal +// representation. +func (a *ApprovalRequest) ToInternal() (any, error) { + if a.ResultID == flow.ZeroID { + return nil, fmt.Errorf("ResultID of approval request must not be zero") + } + + return &flow.ApprovalRequest{ + Nonce: a.Nonce, + ResultID: a.ResultID, + ChunkIndex: a.ChunkIndex, + }, nil +} + // ApprovalResponse contains a response to an approval request. type ApprovalResponse struct { Nonce uint64 - Approval flow.ResultApproval + Approval flow.UntrustedResultApproval +} + +// ToInternal converts the untrusted ApprovalResponse into its trusted internal +// representation. +func (a *ApprovalResponse) ToInternal() (any, error) { + approval, err := flow.NewResultApproval(a.Approval) + if err != nil { + return nil, fmt.Errorf("invalid result approval: %w", err) + } + + return &flow.ApprovalResponse{ + Nonce: a.Nonce, + Approval: *approval, + }, nil +} + +// ResultApproval is a message representation of a ResultApproval, which includes an approval for a chunk, verified by a verification n +type ResultApproval flow.UntrustedResultApproval + +// ToInternal converts the untrusted ResultApproval into its trusted internal +// representation. +func (a *ResultApproval) ToInternal() (any, error) { + internal, err := flow.NewResultApproval(flow.UntrustedResultApproval(*a)) + if err != nil { + return nil, fmt.Errorf("could not convert %T to internal type: %w", a, err) + } + return internal, nil } diff --git a/model/verification/chunkDataPackRequest.go b/model/verification/chunkDataPackRequest.go index 0c0cd4cd92a..52a257e982b 100644 --- a/model/verification/chunkDataPackRequest.go +++ b/model/verification/chunkDataPackRequest.go @@ -1,6 +1,8 @@ package verification import ( + "fmt" + "github.com/onflow/flow-go/model/chunks" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/model/flow/filter" @@ -8,11 +10,51 @@ import ( // ChunkDataPackRequest is an internal data structure in fetcher engine that is passed between the engine // and requester module. It conveys required information for requesting a chunk data pack. +// +//structwrite:immutable - mutations allowed only within the constructor type ChunkDataPackRequest struct { chunks.Locator // uniquely identifies chunk ChunkDataPackRequestInfo } +// UntrustedChunkDataPackRequest is an untrusted input-only representation of a ChunkDataPackRequest, +// used for construction. +// +// This type exists to ensure that constructor functions are invoked explicitly +// with named fields, which improves clarity and reduces the risk of incorrect field +// ordering during construction. +// +// An instance of UntrustedChunkDataPackRequest should be validated and converted into +// a trusted ChunkDataPackRequest using NewChunkDataPackRequest constructor. +type UntrustedChunkDataPackRequest ChunkDataPackRequest + +// NewChunkDataPackRequest creates a new instance of ChunkDataPackRequest. +// Construction ChunkDataPackRequest allowed only within the constructor. +// +// All errors indicate a valid ChunkDataPackRequest cannot be constructed from the input. +func NewChunkDataPackRequest(untrusted UntrustedChunkDataPackRequest) (*ChunkDataPackRequest, error) { + if untrusted.Locator.EqualTo(new(chunks.Locator)) { + return nil, fmt.Errorf("locator is empty") + } + if untrusted.ChunkID == flow.ZeroID { + return nil, fmt.Errorf("chunk ID must not be zero") + } + if len(untrusted.Agrees) == 0 { + return nil, fmt.Errorf("agrees list must not be empty") + } + if len(untrusted.Targets) == 0 { + return nil, fmt.Errorf("targets list must not be empty") + } + filteredTargets := untrusted.Targets.Filter(filter.HasRole[flow.Identity](flow.RoleExecution)) + if len(filteredTargets) < len(untrusted.Targets) { + return nil, fmt.Errorf("only execution nodes identities must be provided in target list: %v", untrusted.Targets) + } + return &ChunkDataPackRequest{ + Locator: untrusted.Locator, + ChunkDataPackRequestInfo: untrusted.ChunkDataPackRequestInfo, + }, nil +} + type ChunkDataPackRequestInfo struct { ChunkID flow.Identifier Height uint64 // block height of execution result of the chunk, used to drop chunk requests of sealed heights. @@ -23,10 +65,14 @@ type ChunkDataPackRequestInfo struct { // SampleTargets returns identifier of execution nodes that can be asked for the chunk data pack, based on // the agreeing and disagreeing execution nodes of the chunk data pack request. -func (c ChunkDataPackRequestInfo) SampleTargets(count int) flow.IdentifierList { +func (c ChunkDataPackRequestInfo) SampleTargets(count int) (flow.IdentifierList, error) { // if there are enough receipts produced the same result (agrees), we sample from them. if len(c.Agrees) >= count { - return c.Targets.Filter(filter.HasNodeID(c.Agrees...)).Sample(uint(count)).NodeIDs() + sample, err := c.Targets.Filter(filter.HasNodeID[flow.Identity](c.Agrees...)).Sample(uint(count)) + if err != nil { + return nil, fmt.Errorf("sampling target failed: %w", err) + } + return sample.NodeIDs(), nil } // since there is at least one agree, then usually, we just need `count - 1` extra nodes as backup. @@ -35,8 +81,11 @@ func (c ChunkDataPackRequestInfo) SampleTargets(count int) flow.IdentifierList { // fetch from the one produced the same result (the only agree) need := uint(count - len(c.Agrees)) - nonResponders := c.Targets.Filter(filter.Not(filter.HasNodeID(c.Disagrees...))).Sample(need).NodeIDs() - return append(c.Agrees, nonResponders...) + nonResponders, err := c.Targets.Filter(filter.Not(filter.HasNodeID[flow.Identity](c.Disagrees...))).Sample(need) + if err != nil { + return nil, fmt.Errorf("sampling target failed: %w", err) + } + return append(c.Agrees, nonResponders.NodeIDs()...), nil } type ChunkDataPackRequestInfoList []*ChunkDataPackRequestInfo diff --git a/model/verification/chunkDataPackRequest_test.go b/model/verification/chunkDataPackRequest_test.go index cb64b7ec502..f681f03383c 100644 --- a/model/verification/chunkDataPackRequest_test.go +++ b/model/verification/chunkDataPackRequest_test.go @@ -7,8 +7,8 @@ import ( "github.com/stretchr/testify/require" + "github.com/onflow/flow-go/model/chunks" "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/model/flow/order" "github.com/onflow/flow-go/model/verification" "github.com/onflow/flow-go/utils/unittest" ) @@ -56,7 +56,7 @@ func TestChunkDataPackRequestList_UniqueRequestInfo(t *testing.T) { return bytes.Compare(thisChunkIDReqInfo.Disagrees[p][:], thisChunkIDReqInfo.Disagrees[q][:]) < 0 }) - thisChunkIDReqInfo.Targets = thisChunkIDReqInfo.Targets.Sort(order.Canonical) + thisChunkIDReqInfo.Targets = thisChunkIDReqInfo.Targets.Sort(flow.Canonical[flow.Identity]) require.Equal(t, thisChunkIDReqInfo.Agrees, thisReq1.Agrees.Union(thisReq2.Agrees)) require.Equal(t, thisChunkIDReqInfo.Disagrees, thisReq1.Disagrees.Union(thisReq2.Disagrees)) @@ -67,3 +67,144 @@ func TestChunkDataPackRequestList_UniqueRequestInfo(t *testing.T) { otherChunkIDReqInfo := reqInfoMap[otherChunkID] require.Equal(t, *otherChunkIDReqInfo, otherReq.ChunkDataPackRequestInfo) } + +// TestNewChunkDataPackRequest tests the NewChunkDataPackRequest constructor with valid and invalid inputs. +// +// Valid Case: +// +// 1. Valid input with non-empty locator, non-zero ChunkID, and all required lists: +// - Should successfully construct a ChunkDataPackRequest. +// +// Invalid Cases: +// +// 2. Invalid input with empty locator: +// - Should return an error indicating the locator is empty. +// +// 3. Invalid input with zero ChunkID: +// - Should return an error indicating chunk ID must not be zero. +// +// 4. Invalid input with empty and nil Agrees list: +// - Should return an error indicating agrees list must not be empty. +// +// 5. Invalid input with empty and nil Targets list: +// - Should return an error indicating targets list must not be empty. +// +// 6. Invalid input with non-execution node in Targets list: +// - Should return an error indicating only execution node identities are allowed in the Targets list. +func TestNewChunkDataPackRequest(t *testing.T) { + chunkDataPackRequestInfo := unittest.ChunkDataPackRequestInfoFixture() + locator := *unittest.ChunkLocatorFixture(unittest.IdentifierFixture(), 0) + + t.Run("valid input with all required fields", func(t *testing.T) { + request, err := verification.NewChunkDataPackRequest( + verification.UntrustedChunkDataPackRequest{ + Locator: locator, + ChunkDataPackRequestInfo: *chunkDataPackRequestInfo, + }, + ) + + require.NoError(t, err) + require.NotNil(t, request) + }) + + t.Run("invalid input with empty locator", func(t *testing.T) { + _, err := verification.NewChunkDataPackRequest( + verification.UntrustedChunkDataPackRequest{ + Locator: chunks.Locator{}, + ChunkDataPackRequestInfo: *chunkDataPackRequestInfo, + }, + ) + + require.Error(t, err) + require.Contains(t, err.Error(), "locator is empty") + }) + + t.Run("invalid input with zero chunk ID", func(t *testing.T) { + info := *chunkDataPackRequestInfo + info.ChunkID = flow.ZeroID + + _, err := verification.NewChunkDataPackRequest( + verification.UntrustedChunkDataPackRequest{ + Locator: locator, + ChunkDataPackRequestInfo: info, + }, + ) + + require.Error(t, err) + require.Contains(t, err.Error(), "chunk ID must not be zero") + }) + + t.Run("input with invalid agrees", func(t *testing.T) { + info := *chunkDataPackRequestInfo + + // with nil agrees + info.Agrees = nil + _, err := verification.NewChunkDataPackRequest( + verification.UntrustedChunkDataPackRequest{ + Locator: locator, + ChunkDataPackRequestInfo: info, + }, + ) + require.Error(t, err) + require.Contains(t, err.Error(), "agrees list must not be empty") + + // with empty agrees + info.Agrees = flow.IdentifierList{} + _, err = verification.NewChunkDataPackRequest( + verification.UntrustedChunkDataPackRequest{ + Locator: locator, + ChunkDataPackRequestInfo: info, + }, + ) + + require.Error(t, err) + require.Contains(t, err.Error(), "agrees list must not be empty") + }) + + t.Run("input with invalid targets", func(t *testing.T) { + info := *chunkDataPackRequestInfo + + // with nil targets + info.Targets = nil + _, err := verification.NewChunkDataPackRequest( + verification.UntrustedChunkDataPackRequest{ + Locator: locator, + ChunkDataPackRequestInfo: info, + }, + ) + require.Error(t, err) + require.Contains(t, err.Error(), "targets list must not be empty") + + // with empty targets + info.Targets = flow.IdentityList{} + _, err = verification.NewChunkDataPackRequest( + verification.UntrustedChunkDataPackRequest{ + Locator: locator, + ChunkDataPackRequestInfo: info, + }, + ) + + require.Error(t, err) + require.Contains(t, err.Error(), "targets list must not be empty") + }) + + t.Run("invalid input with non-execution node in targets list", func(t *testing.T) { + info := *chunkDataPackRequestInfo + + // Append a non-execution identity + info.Targets = append(info.Targets, unittest.IdentityFixture( + unittest.WithNodeID(unittest.IdentifierFixture()), + unittest.WithRole(flow.RoleAccess), + )) + + _, err := verification.NewChunkDataPackRequest( + verification.UntrustedChunkDataPackRequest{ + Locator: locator, + ChunkDataPackRequestInfo: info, + }, + ) + + require.Error(t, err) + require.Contains(t, err.Error(), "only execution nodes identities must be provided in target list") + }) +} diff --git a/model/verification/chunkDataPackResponse.go b/model/verification/chunkDataPackResponse.go index 1aa6412ac8b..f5bb639e77e 100644 --- a/model/verification/chunkDataPackResponse.go +++ b/model/verification/chunkDataPackResponse.go @@ -1,6 +1,8 @@ package verification import ( + "fmt" + "github.com/onflow/flow-go/model/chunks" "github.com/onflow/flow-go/model/flow" ) @@ -8,7 +10,37 @@ import ( // ChunkDataPackResponse is an internal data structure in fetcher engine that is passed between the fetcher // and requester engine. It conveys requested chunk data pack as well as meta-data for fetcher engine to // process the chunk data pack. +// +//structwrite:immutable - mutations allowed only within the constructor type ChunkDataPackResponse struct { chunks.Locator Cdp *flow.ChunkDataPack } + +// UntrustedChunkDataPackResponse is an untrusted input-only representation of a ChunkDataPackResponse, +// used for construction. +// +// This type exists to ensure that constructor functions are invoked explicitly +// with named fields, which improves clarity and reduces the risk of incorrect field +// ordering during construction. +// +// An instance of UntrustedChunkDataPackResponse should be validated and converted into +// a trusted ChunkDataPackResponse using NewChunkDataPackResponse constructor. +type UntrustedChunkDataPackResponse ChunkDataPackResponse + +// NewChunkDataPackResponse creates a new instance of ChunkDataPackResponse. +// Construction ChunkDataPackResponse allowed only within the constructor. +// +// All errors indicate a valid ChunkDataPackResponse cannot be constructed from the input. +func NewChunkDataPackResponse(untrusted UntrustedChunkDataPackResponse) (*ChunkDataPackResponse, error) { + if untrusted.Locator.EqualTo(new(chunks.Locator)) { + return nil, fmt.Errorf("locator is empty") + } + if untrusted.Cdp == nil { + return nil, fmt.Errorf("chunk data pack must not be nil") + } + return &ChunkDataPackResponse{ + Locator: untrusted.Locator, + Cdp: untrusted.Cdp, + }, nil +} diff --git a/model/verification/chunkDataPackResponse_test.go b/model/verification/chunkDataPackResponse_test.go new file mode 100644 index 00000000000..9ebc35e1d05 --- /dev/null +++ b/model/verification/chunkDataPackResponse_test.go @@ -0,0 +1,62 @@ +package verification_test + +import ( + "testing" + + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/model/chunks" + "github.com/onflow/flow-go/model/verification" + "github.com/onflow/flow-go/utils/unittest" +) + +// TestNewChunkDataPackResponse tests the NewChunkDataPackResponse constructor with valid and invalid inputs. +// +// Valid Case: +// +// 1. Valid input with non-empty locator and non-nil ChunkDataPack: +// - Should successfully construct a ChunkDataPackResponse. +// +// Invalid Cases: +// +// 2. Invalid input with empty locator: +// - Should return an error indicating the locator is empty. +// +// 3. Invalid input with nil ChunkDataPack: +// - Should return an error indicating the chunk data pack must not be nil. +func TestNewChunkDataPackResponse(t *testing.T) { + t.Run("valid input with locator and chunk data pack", func(t *testing.T) { + response, err := verification.NewChunkDataPackResponse( + verification.UntrustedChunkDataPackResponse{ + Locator: *unittest.ChunkLocatorFixture(unittest.IdentifierFixture(), 0), + Cdp: unittest.ChunkDataPackFixture(unittest.IdentifierFixture()), + }) + + require.NoError(t, err) + require.NotNil(t, response) + }) + + t.Run("invalid input with empty locator", func(t *testing.T) { + _, err := verification.NewChunkDataPackResponse( + verification.UntrustedChunkDataPackResponse{ + Locator: chunks.Locator{}, // empty locator + Cdp: unittest.ChunkDataPackFixture(unittest.IdentifierFixture()), + }, + ) + + require.Error(t, err) + require.Contains(t, err.Error(), "locator is empty") + }) + + t.Run("invalid input with nil chunk data pack", func(t *testing.T) { + _, err := verification.NewChunkDataPackResponse( + verification.UntrustedChunkDataPackResponse{ + Locator: *unittest.ChunkLocatorFixture(unittest.IdentifierFixture(), 0), + Cdp: nil, + }, + ) + + require.Error(t, err) + require.Contains(t, err.Error(), "chunk data pack must not be nil") + }) +} diff --git a/model/verification/chunkStatus.go b/model/verification/chunkStatus.go index 675011d0990..fbe2208fc28 100644 --- a/model/verification/chunkStatus.go +++ b/model/verification/chunkStatus.go @@ -1,7 +1,6 @@ package verification import ( - "github.com/onflow/flow-go/model/chunks" "github.com/onflow/flow-go/model/flow" ) @@ -16,10 +15,6 @@ func (s ChunkStatus) Chunk() *flow.Chunk { return s.ExecutionResult.Chunks[s.ChunkIndex] } -func (s ChunkStatus) ChunkLocatorID() flow.Identifier { - return chunks.ChunkLocatorID(s.ExecutionResult.ID(), s.ChunkIndex) -} - type ChunkStatusList []*ChunkStatus func (l ChunkStatusList) Chunks() flow.ChunkList { diff --git a/model/verification/convert/convert.go b/model/verification/convert/convert.go new file mode 100644 index 00000000000..3ffbcd3a8d8 --- /dev/null +++ b/model/verification/convert/convert.go @@ -0,0 +1,81 @@ +package convert + +import ( + "fmt" + + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/model/verification" + "github.com/onflow/flow-go/state/protocol" +) + +func FromChunkDataPack( + chunk *flow.Chunk, + chunkDataPack *flow.ChunkDataPack, + header *flow.Header, + snapshot protocol.SnapshotExecutionSubset, + result *flow.ExecutionResult, +) (*verification.VerifiableChunkData, error) { + + // system chunk is the last chunk + isSystemChunk := IsSystemChunk(chunk.Index, result) + + endState, err := EndStateCommitment(result, chunk.Index, isSystemChunk) + if err != nil { + return nil, fmt.Errorf("could not compute end state of chunk: %w", err) + } + + transactionOffset, err := TransactionOffsetForChunk(result.Chunks, chunk.Index) + if err != nil { + return nil, fmt.Errorf("cannot compute transaction offset for chunk: %w", err) + } + + return &verification.VerifiableChunkData{ + IsSystemChunk: isSystemChunk, + Chunk: chunk, + Header: header, + Snapshot: snapshot, + Result: result, + ChunkDataPack: chunkDataPack, + EndState: endState, + TransactionOffset: transactionOffset, + }, nil +} + +// EndStateCommitment computes the end state of the given chunk. +func EndStateCommitment(result *flow.ExecutionResult, chunkIndex uint64, systemChunk bool) (flow.StateCommitment, error) { + var endState flow.StateCommitment + if systemChunk { + var err error + // last chunk in a result is the system chunk and takes final state commitment + endState, err = result.FinalStateCommitment() + if err != nil { + return flow.DummyStateCommitment, fmt.Errorf("can not read final state commitment, likely a bug:%w", err) + } + } else { + // any chunk except last takes the subsequent chunk's start state + endState = result.Chunks[chunkIndex+1].StartState + } + + return endState, nil +} + +// TransactionOffsetForChunk calculates transaction offset for a given chunk which is the index of the first +// transaction of this chunk within the whole block +func TransactionOffsetForChunk(chunks flow.ChunkList, chunkIndex uint64) (uint32, error) { + if int(chunkIndex) > len(chunks)-1 { + return 0, fmt.Errorf("chunk list out of bounds, len %d asked for chunk %d", len(chunks), chunkIndex) + } + var offset uint32 = 0 + for i := 0; i < int(chunkIndex); i++ { + offset += uint32(chunks[i].NumberOfTransactions) + } + return offset, nil +} + +// IsSystemChunk returns true if `chunkIndex` points to a system chunk in `result`. +// Otherwise, it returns false. +// In the current version, a chunk is a system chunk if it is the last chunk of the +// execution result. +func IsSystemChunk(chunkIndex uint64, result *flow.ExecutionResult) bool { + return chunkIndex == uint64(len(result.Chunks)-1) +} diff --git a/model/verification/receiptDataPack.go b/model/verification/receiptDataPack.go deleted file mode 100644 index 69fa436c0bf..00000000000 --- a/model/verification/receiptDataPack.go +++ /dev/null @@ -1,26 +0,0 @@ -package verification - -import ( - "context" - - "github.com/onflow/flow-go/model/flow" -) - -// ReceiptDataPack represents an execution receipt with some metadata. -// This is an internal entity for verification node. -type ReceiptDataPack struct { - Receipt *flow.ExecutionReceipt - OriginID flow.Identifier - Ctx context.Context // used for span tracing -} - -// ID returns the unique identifier for the ReceiptDataPack which is the -// id of its execution receipt. -func (r *ReceiptDataPack) ID() flow.Identifier { - return r.Receipt.ID() -} - -// Checksum returns the checksum of the ReceiptDataPack. -func (r *ReceiptDataPack) Checksum() flow.Identifier { - return flow.MakeID(r) -} diff --git a/model/verification/resultDataPack.go b/model/verification/resultDataPack.go deleted file mode 100644 index 85d0983014e..00000000000 --- a/model/verification/resultDataPack.go +++ /dev/null @@ -1,23 +0,0 @@ -package verification - -import ( - "github.com/onflow/flow-go/model/flow" -) - -// ResultDataPack represents an execution result with some metadata. -// This is an internal entity for verification node. -type ResultDataPack struct { - ExecutorID flow.Identifier - ExecutionResult *flow.ExecutionResult -} - -// ID returns the unique identifier for the ResultDataPack which is the -// id of its execution result. -func (r *ResultDataPack) ID() flow.Identifier { - return r.ExecutionResult.ID() -} - -// Checksum returns the checksum of the ResultDataPack. -func (r *ResultDataPack) Checksum() flow.Identifier { - return flow.MakeID(r) -} diff --git a/model/verification/verifiableChunkData.go b/model/verification/verifiableChunkData.go index 298beece37f..ec2ec448351 100644 --- a/model/verification/verifiableChunkData.go +++ b/model/verification/verifiableChunkData.go @@ -2,16 +2,18 @@ package verification import ( "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/state/protocol" ) // VerifiableChunkData represents a ready-to-verify chunk // It contains the execution result as well as all resources needed to verify it type VerifiableChunkData struct { - IsSystemChunk bool // indicates whether this is a system chunk - Chunk *flow.Chunk // the chunk to be verified - Header *flow.Header // BlockHeader that contains this chunk - Result *flow.ExecutionResult // execution result of this block - ChunkDataPack *flow.ChunkDataPack // chunk data package needed to verify this chunk - EndState flow.StateCommitment // state commitment at the end of this chunk - TransactionOffset uint32 // index of the first transaction in a chunk within a block + IsSystemChunk bool // indicates whether this is a system chunk + Chunk *flow.Chunk // the chunk to be verified + Header *flow.Header // BlockHeader that contains this chunk + Snapshot protocol.SnapshotExecutionSubset // state snapshot at the chunk's block + Result *flow.ExecutionResult // execution result of this block + ChunkDataPack *flow.ChunkDataPack // chunk data package needed to verify this chunk + EndState flow.StateCommitment // state commitment at the end of this chunk + TransactionOffset uint32 // index of the first transaction in a chunk within a block } diff --git a/module/blobs/blob_io.go b/module/blobs/blob_io.go index b9ff0482173..366d97eab3e 100644 --- a/module/blobs/blob_io.go +++ b/module/blobs/blob_io.go @@ -64,7 +64,9 @@ func (bw *BlobChannelWriter) Write(data []byte) (int, error) { // sendNewBlob sends the currently buffered data to the blob channel and resets the buffer. func (bw *BlobChannelWriter) sendNewBlob() { blob := NewBlob(bw.buf.Bytes()) - bw.blobs <- blob + if bw.blobs != nil { + bw.blobs <- blob + } bw.cids = append(bw.cids, blob.Cid()) bw.bytesSent += uint64(bw.buf.Len()) diff --git a/module/blobs/blob_store.go b/module/blobs/blob_store.go index 903d3358ae9..f298a465cd4 100644 --- a/module/blobs/blob_store.go +++ b/module/blobs/blob_store.go @@ -4,9 +4,9 @@ import ( "context" "errors" + "github.com/ipfs/boxo/blockstore" "github.com/ipfs/go-cid" "github.com/ipfs/go-datastore" - blockstore "github.com/ipfs/go-ipfs-blockstore" ipld "github.com/ipfs/go-ipld-format" ) @@ -86,6 +86,10 @@ func (bs *blobstoreImpl) HashOnRead(enabled bool) { // BlockExecutionData IDs without storing the data. type NoopBlobstore struct{} +func NewNoopBlobstore() *NoopBlobstore { + return &NoopBlobstore{} +} + func (n *NoopBlobstore) DeleteBlob(context.Context, cid.Cid) error { return nil } diff --git a/module/block_iterator.go b/module/block_iterator.go new file mode 100644 index 00000000000..ca16572ca2b --- /dev/null +++ b/module/block_iterator.go @@ -0,0 +1,76 @@ +package module + +import ( + "github.com/onflow/flow-go/model/flow" +) + +// IteratorRange defines the range of blocks to iterate over +// the range could be either view based range or height based range. +// when specifying the range, the start and end are inclusive, and the end must be greater than or +// equal to the start +type IteratorRange struct { + Start uint64 // the start of the range + End uint64 // the end of the range +} + +// IteratorState is an interface for reading and writing the progress of the iterator +type IteratorState interface { + IteratorStateReader + IteratorStateWriter +} + +// IteratorStateReader reads the progress of the iterator, useful for resuming the iteration +// after restart +type IteratorStateReader interface { + // LoadState reads the next block to iterate + // caller must ensure the state is initialized, otherwise LoadState would return exception. + LoadState() (progress uint64, exception error) +} + +// IteratorStateWriter saves the progress of the iterator +type IteratorStateWriter interface { + // SaveState persists the next block to be iterated + SaveState(uint64) (exception error) +} + +// BlockIterator is an interface for iterating over blocks +type BlockIterator interface { + // Next returns the next block in the iterator + // Note: this method is not concurrent-safe + // Note: a block will only be iterated once in a single iteration, however + // if the iteration is interrupted (e.g. by a restart), the iterator can be + // resumed from the last checkpoint, which might result in the same block being + // iterated again. + // TODO: once upgraded to go 1.23, consider using the Range iterator + // Range() iter.Seq2[flow.Identifier, error] + // so that the iterator can be used in a for loop: + // for blockID, err := range heightIterator.Range() + Next() (blockID flow.Identifier, hasNext bool, exception error) + + // Checkpoint saves the current state of the iterator so that it can be resumed later + // when Checkpoint is called, if SaveStateFunc is called with block A, + // then after restart, the iterator will resume from A. + // make sure to call this after all the blocks for processing the block IDs returned by + // Next() are completed. + // It returns the saved index (next index to iterate) + // any error returned are exceptions + Checkpoint() (savedIndex uint64, exception error) + + // Progress returns the progress of the iterator + Progress() (start, end, next uint64) +} + +// IteratorCreator creates block iterators. +// a block iterator iterates through a saved index to the latest block. +// after iterating through all the blocks in the range, the iterator can be discarded. +// a new block iterator can be created to iterate through the next range. +// if there is no block to iterate, hasNext is false +// any error returned are exception +type IteratorCreator interface { + // Create return the next block iterator + Create() (fromSavedIndexToLatest BlockIterator, hasNext bool, exception error) + + // IteratorState returns the iterate state, useful to know the progress of the iterator + // after each round of iteration + IteratorState() IteratorStateReader +} diff --git a/module/block_iterator/creator.go b/module/block_iterator/creator.go new file mode 100644 index 00000000000..746c95ffd4d --- /dev/null +++ b/module/block_iterator/creator.go @@ -0,0 +1,121 @@ +package block_iterator + +import ( + "fmt" + + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module" + "github.com/onflow/flow-go/storage" +) + +// Creator creates block iterators. +// a block iterator iterates through a saved index to the latest block. +// after iterating through all the blocks in the range, the iterator can be discarded. +// a new block iterator can be created to iterate through the next range. +type Creator struct { + getBlockIDByIndex func(uint64) (flow.Identifier, bool, error) + progress *PersistentIteratorState +} + +var _ module.IteratorCreator = (*Creator)(nil) + +// NewCreator creates a block iterator that iterates through blocks by index. +// the root is the block index to start iterating from. (it could either root height or root view) +// the latest is a function that returns the latest block index. +// since latest is a function, the caller can reuse the creator to create block iterator one +// after another to iterate from the root to the latest, and from last iterated to the new latest. +func NewCreator( + getBlockIDByIndex func(uint64) (blockID flow.Identifier, indexed bool, exception error), + progressStorage storage.ConsumerProgressInitializer, + root uint64, + latest func() (uint64, error), +) (*Creator, error) { + // initialize the progress in storage, saving the root block index in storage + progress, err := NewPersistentIteratorState(progressStorage, root, latest) + if err != nil { + return nil, fmt.Errorf("failed to initialize progress: %w", err) + } + + return &Creator{ + getBlockIDByIndex: getBlockIDByIndex, + progress: progress, + }, nil +} + +func (c *Creator) Create() (iter module.BlockIterator, hasNext bool, exception error) { + // create a iteration range from the first un-iterated to the latest block + iterRange, hasNext, err := c.progress.NextRange() + if err != nil { + return nil, false, fmt.Errorf("failed to create range for block iteration: %w", err) + } + + if !hasNext { + // no block to iterate + return nil, false, nil + } + + // create a block iterator with + // the function to get block ID by index, + // the progress writer to update the progress in storage, + // and the iteration range + return NewIndexedBlockIterator(c.getBlockIDByIndex, c.progress, iterRange), true, nil +} + +func (c *Creator) IteratorState() module.IteratorStateReader { + return c.progress +} + +// NewHeightBasedCreator creates a block iterator that iterates through blocks +// from root to the latest (either finalized or sealed) by height. +func NewHeightBasedCreator( + getBlockIDByHeight func(height uint64) (flow.Identifier, error), + progress storage.ConsumerProgressInitializer, + root *flow.Header, + latest func() (*flow.Header, error), +) (*Creator, error) { + + return NewCreator( + func(height uint64) (flow.Identifier, bool, error) { + blockID, err := getBlockIDByHeight(height) + if err != nil { + return flow.Identifier{}, false, fmt.Errorf("failed to get block ID by height: %w", err) + } + // each height between root and latest (either finalized or sealed) must be indexed. + // so it's always true + alwaysIndexed := true + return blockID, alwaysIndexed, nil + }, + progress, + root.Height, + func() (uint64, error) { + latestBlock, err := latest() + if err != nil { + return 0, fmt.Errorf("failed to get latest block: %w", err) + } + return latestBlock.Height, nil + }, + ) +} + +// NewViewBasedCreator creates a block iterator that iterates through blocks +// from root to the latest (either finalized or sealed) by view. +// since view has gaps, the iterator will skip views that have no blocks. +func NewViewBasedCreator( + getBlockIDByView func(view uint64) (blockID flow.Identifier, viewIndexed bool, exception error), + progress storage.ConsumerProgressInitializer, + root *flow.Header, + latest func() (*flow.Header, error), +) (*Creator, error) { + return NewCreator( + getBlockIDByView, + progress, + root.View, + func() (uint64, error) { + latestBlock, err := latest() + if err != nil { + return 0, fmt.Errorf("failed to get latest block: %w", err) + } + return latestBlock.View, nil + }, + ) +} diff --git a/module/block_iterator/creator_test.go b/module/block_iterator/creator_test.go new file mode 100644 index 00000000000..9d5cb24822b --- /dev/null +++ b/module/block_iterator/creator_test.go @@ -0,0 +1,383 @@ +package block_iterator + +import ( + "fmt" + "testing" + + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/storage" +) + +// TestCanIterateNewLatest: iterate through all heights from root to latest, +// and after finish iteration, iterate again with an updated latest height, +// verify it can iterate from latest + 1 to new latest +// TestCanResume: stop at a height, and take checkpoint, create a new iterator, +// verify it will resume from the next height to the latest +// TestCanSkipViewsIfNotIndexed: iterate through all views, and skip views that are not indexed +// TestCanSkipIfThereIsNoBlockToIterate: skip iterationg if there is no block to iterate + +func TestCanIterate(t *testing.T) { + root := &flow.Header{HeaderBody: flow.HeaderBody{Height: 0}} + // Create mock blocks + blocks := []*flow.Header{ + {HeaderBody: flow.HeaderBody{Height: 1}}, + {HeaderBody: flow.HeaderBody{Height: 2}}, + {HeaderBody: flow.HeaderBody{Height: 3}}, + {HeaderBody: flow.HeaderBody{Height: 4}}, + {HeaderBody: flow.HeaderBody{Height: 5}}, + } + + // Mock getBlockIDByHeight function + getBlockIDByHeight := func(height uint64) (flow.Identifier, error) { + for _, block := range blocks { + if block.Height == height { + return block.ID(), nil + } + } + return flow.Identifier{}, fmt.Errorf("block not found at height %d", height) + } + + // Mock progress tracker + progress := &mockProgress{} + + // Mock latest functions + latest := func() (*flow.Header, error) { + return blocks[len(blocks)-1], nil + } + + // Create iterator + creator, err := NewHeightBasedCreator( + getBlockIDByHeight, + progress, + root, + latest, + ) + require.NoError(t, err) + + iterator, hasNext, err := creator.Create() + require.NoError(t, err) + require.True(t, hasNext) + + // Iterate through blocks + visitedBlocks := make([]flow.Identifier, 0, len(blocks)) + for { + blockID, ok, err := iterator.Next() + require.NoError(t, err) + if !ok { + break + } + + visitedBlocks = append(visitedBlocks, blockID) + } + + // Verify all blocks were visited exactly once + for i, block := range blocks { + require.Equal(t, block.ID(), visitedBlocks[i], "Block %v was not visited", block.Height) + } + + // Verify no extra blocks were visited + require.Equal(t, len(blocks), len(visitedBlocks), "Unexpected number of blocks visited") + + // Verify the final checkpoint + next, err := iterator.Checkpoint() + require.NoError(t, err) + require.Equal(t, uint64(6), next, "Expected next height to be 6 (last height + 1)") + savedNextHeight, err := progress.ProcessedIndex() + require.NoError(t, err) + require.Equal(t, uint64(6), savedNextHeight, "Expected next height to be 6 (last height + 1)") + + // Additional blocks to be added later + additionalBlocks := []*flow.Header{ + {HeaderBody: flow.HeaderBody{Height: 6}}, + {HeaderBody: flow.HeaderBody{Height: 7}}, + {HeaderBody: flow.HeaderBody{Height: 8}}, + } + visitedBlocks = make([]flow.Identifier, 0, len(additionalBlocks)) + + // Update blocks so that the latest block is updated, and getBlockIDByHeight + // will return the new blocks + blocks = append(blocks, additionalBlocks...) + + // Create another iterator + iterator, hasNext, err = creator.Create() + require.NoError(t, err) + require.True(t, hasNext) + + // Iterate through initial blocks + for i := 0; i < len(additionalBlocks); i++ { + blockID, ok, err := iterator.Next() + require.NoError(t, err) + require.True(t, ok) + visitedBlocks = append(visitedBlocks, blockID) + } + + // No more blocks to iterate + _, ok, err := iterator.Next() + require.NoError(t, err) + require.False(t, ok) + + // Verify all additional blocks were visited exactly once + for i, block := range additionalBlocks { + require.Equal(t, block.ID(), visitedBlocks[i], "Block %v was not visited", block.Height) + } + + // Verify no extra blocks were visited + require.Equal(t, len(additionalBlocks), len(visitedBlocks), "Unexpected number of blocks visited") + + // Verify the final checkpoint + next, err = iterator.Checkpoint() + require.NoError(t, err) + require.Equal(t, uint64(9), next, "Expected next height to be 9 (last height + 1)") + savedHeight, err := progress.ProcessedIndex() + require.NoError(t, err) + require.Equal(t, uint64(9), savedHeight, "Expected next height to be 9 (last height + 1)") +} + +func TestCanResume(t *testing.T) { + + root := &flow.Header{HeaderBody: flow.HeaderBody{Height: 0}} + // Create mock blocks + blocks := []*flow.Header{ + {HeaderBody: flow.HeaderBody{Height: 1}}, + {HeaderBody: flow.HeaderBody{Height: 2}}, + {HeaderBody: flow.HeaderBody{Height: 3}}, + {HeaderBody: flow.HeaderBody{Height: 4}}, + {HeaderBody: flow.HeaderBody{Height: 5}}, + } + + // Mock getBlockIDByHeight function + getBlockIDByHeight := func(height uint64) (flow.Identifier, error) { + for _, block := range blocks { + if block.Height == height { + return block.ID(), nil + } + } + return flow.Identifier{}, fmt.Errorf("block not found at height %d", height) + } + + // Mock progress tracker + progress := &mockProgress{} + + // Mock latest functions + latest := func() (*flow.Header, error) { + return blocks[len(blocks)-1], nil + } + + // Create iterator + creator, err := NewHeightBasedCreator( + getBlockIDByHeight, + progress, + root, + latest, + ) + require.NoError(t, err) + + iterator, hasNext, err := creator.Create() + require.NoError(t, err) + require.True(t, hasNext) + + // Iterate through blocks + visitedBlocks := make([]flow.Identifier, 0, len(blocks)) + for i := 0; i < 3; i++ { // iterate up to Height 3 + blockID, ok, err := iterator.Next() + require.NoError(t, err) + if !ok { + break + } + visitedBlocks = append(visitedBlocks, blockID) + } + + // save the progress + _, err = iterator.Checkpoint() + require.NoError(t, err) + + // Additional blocks to be added later + additionalBlocks := []*flow.Header{ + {HeaderBody: flow.HeaderBody{Height: 6}}, + {HeaderBody: flow.HeaderBody{Height: 7}}, + {HeaderBody: flow.HeaderBody{Height: 8}}, + } + + // Update blocks so that the latest block is updated, and getBlockIDByHeight + // will return the new blocks + blocks = append(blocks, additionalBlocks...) + + // simulate a restart by creating a new creator with a different latest + newCreator, err := NewHeightBasedCreator( + getBlockIDByHeight, + progress, + root, + latest, + ) + require.NoError(t, err) + + newIterator, hasNext, err := newCreator.Create() + require.NoError(t, err) + require.True(t, hasNext) + + // iterate until the end + for { + blockID, ok, err := newIterator.Next() + require.NoError(t, err) + if !ok { + break + } + + visitedBlocks = append(visitedBlocks, blockID) + } + + // verify all blocks are visited + for i, block := range blocks { + require.Equal(t, block.ID(), visitedBlocks[i], "Block %v was not visited", block.Height) + } + + // Verify no extra blocks were visited + require.Equal(t, len(blocks), len(visitedBlocks), "Unexpected number of blocks visited") +} + +func TestCanSkipViewsIfNotIndexed(t *testing.T) { + // Create mock blocks with some indexed and some not + blocks := []*flow.Header{ + {HeaderBody: flow.HeaderBody{View: 1}}, + {HeaderBody: flow.HeaderBody{View: 2}}, + {HeaderBody: flow.HeaderBody{View: 3}}, + {HeaderBody: flow.HeaderBody{View: 5}}, + {HeaderBody: flow.HeaderBody{View: 7}}, + } + + // Mock getBlockIDByHeight function + getBlockIDByView := func(view uint64) (blockID flow.Identifier, viewIndexed bool, exception error) { + for _, block := range blocks { + if block.View == view { + return block.ID(), true, nil + } + } + + return flow.Identifier{}, false, nil + } + + // Mock progress tracker + progress := &mockProgress{} + + // Mock getRoot and latest functions + root := &flow.Header{HeaderBody: flow.HeaderBody{View: 0}} + latest := func() (*flow.Header, error) { + return blocks[len(blocks)-1], nil + } + + // Create iterator + creator, err := NewViewBasedCreator( + getBlockIDByView, + progress, + root, + latest, + ) + require.NoError(t, err) + + iterator, hasNext, err := creator.Create() + require.NoError(t, err) + require.True(t, hasNext) + + // Iterate through blocks + visitedBlocks := make(map[flow.Identifier]struct{}) + for { + blockID, ok, err := iterator.Next() + require.NoError(t, err) + if !ok { + break + } + visitedBlocks[blockID] = struct{}{} + } + + // Verify all blocks were visited exactly once + for _, block := range blocks { + _, ok := visitedBlocks[block.ID()] + require.True(t, ok, "Block %v was not visited", block.View) + delete(visitedBlocks, block.ID()) + } + + // Verify no extra blocks were visited + require.Empty(t, visitedBlocks, "Unexpected number of blocks visited") + + // Verify the final checkpoint + next, err := iterator.Checkpoint() + require.NoError(t, err) + require.Equal(t, uint64(8), next, "Expected next height to be 8 (last height + 1)") + savedView, err := progress.ProcessedIndex() + require.NoError(t, err) + require.Equal(t, uint64(8), savedView, "Expected next view to be 8 (last View + 1)") +} + +func TestCanSkipIfThereIsNoBlockToIterate(t *testing.T) { + // Set up root block + root := &flow.Header{HeaderBody: flow.HeaderBody{Height: 10}} + + // Mock getBlockIDByHeight function + getBlockIDByHeight := func(height uint64) (flow.Identifier, error) { + return flow.Identifier{}, fmt.Errorf("block not found at height %d", height) + } + + // Mock progress tracker + progress := &mockProgress{} + + // Mock latest function that returns the same height as root + latest := func() (*flow.Header, error) { + return root, nil + } + + // Create iterator + creator, err := NewHeightBasedCreator( + getBlockIDByHeight, + progress, + root, + latest, + ) + require.NoError(t, err) + + // Create the iterator + _, hasNext, err := creator.Create() + require.NoError(t, err) + require.False(t, hasNext, "Expected no blocks to iterate") + + savedHeight, err := progress.ProcessedIndex() + require.NoError(t, err) + require.Equal(t, root.Height+1, savedHeight, "Expected saved height to be root height + 1") +} + +type mockProgress struct { + index uint64 + initialized bool +} + +var _ storage.ConsumerProgress = (*mockProgress)(nil) +var _ storage.ConsumerProgressInitializer = (*mockProgress)(nil) + +func (m *mockProgress) ProcessedIndex() (uint64, error) { + if !m.initialized { + return 0, fmt.Errorf("processed index not initialized: %w", storage.ErrNotFound) + } + return m.index, nil +} + +func (m *mockProgress) Initialize(defaultIndex uint64) (storage.ConsumerProgress, error) { + if m.initialized { + return m, nil + } + m.index = defaultIndex + m.initialized = true + return m, nil +} + +func (m *mockProgress) SetProcessedIndex(processed uint64) error { + if !m.initialized { + return fmt.Errorf("processed index not initialized") + } + m.index = processed + return nil +} + +func (m *mockProgress) BatchSetProcessedIndex(uint64, storage.ReaderBatchWriter) error { + return fmt.Errorf("batch not supported") +} diff --git a/module/block_iterator/executor/aggregator.go b/module/block_iterator/executor/aggregator.go new file mode 100644 index 00000000000..6892b67106b --- /dev/null +++ b/module/block_iterator/executor/aggregator.go @@ -0,0 +1,30 @@ +package executor + +import ( + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/storage" +) + +// AggregatedExecutor allows to aggregate multiple IterationExecutor instances into one +// so that they can be executed in a single call within the same batch. +type AggregatedExecutor struct { + executors []IterationExecutor +} + +var _ IterationExecutor = (*AggregatedExecutor)(nil) + +func NewAggregatedExecutor(executors []IterationExecutor) *AggregatedExecutor { + return &AggregatedExecutor{ + executors: executors, + } +} + +func (a *AggregatedExecutor) ExecuteByBlockID(blockID flow.Identifier, batch storage.ReaderBatchWriter) (exception error) { + for _, executor := range a.executors { + exception = executor.ExecuteByBlockID(blockID, batch) + if exception != nil { + return exception + } + } + return nil +} diff --git a/module/block_iterator/executor/aggregator_test.go b/module/block_iterator/executor/aggregator_test.go new file mode 100644 index 00000000000..b30c7d87768 --- /dev/null +++ b/module/block_iterator/executor/aggregator_test.go @@ -0,0 +1,68 @@ +package executor + +import ( + "fmt" + "testing" + + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/storage" + "github.com/onflow/flow-go/storage/mock" + "github.com/onflow/flow-go/utils/unittest" +) + +func TestAggregator(t *testing.T) { + // Create mock executors + mockExecutor1 := &MockExecutor{} + mockExecutor2 := &MockExecutor{} + mockExecutor3 := &MockExecutor{} + + // Create AggregatedExecutor + aggregator := NewAggregatedExecutor([]IterationExecutor{mockExecutor1, mockExecutor2, mockExecutor3}) + + // Test case 1: All executors succeed + blockID := unittest.IdentifierFixture() + batch := mock.NewReaderBatchWriter(t) + + err := aggregator.ExecuteByBlockID(blockID, batch) + + require.NoError(t, err) + require.Equal(t, 1, mockExecutor1.CallCount) + require.Equal(t, 1, mockExecutor2.CallCount) + require.Equal(t, 1, mockExecutor3.CallCount) + + // Test case 2: Second executor fails + mockExecutor1.Reset() + mockExecutor2.Reset() + mockExecutor3.Reset() + mockExecutor2.ShouldFail = true + + err = aggregator.ExecuteByBlockID(blockID, batch) + + require.Error(t, err) + require.Equal(t, 1, mockExecutor1.CallCount) + require.Equal(t, 1, mockExecutor2.CallCount) + require.Equal(t, 0, mockExecutor3.CallCount) +} + +// MockExecutor is a mock implementation of IterationExecutor +type MockExecutor struct { + CallCount int + ShouldFail bool +} + +var _ IterationExecutor = (*MockExecutor)(nil) + +func (m *MockExecutor) ExecuteByBlockID(blockID flow.Identifier, batch storage.ReaderBatchWriter) error { + m.CallCount++ + if m.ShouldFail { + return fmt.Errorf("mock error") + } + return nil +} + +func (m *MockExecutor) Reset() { + m.CallCount = 0 + m.ShouldFail = false +} diff --git a/module/block_iterator/executor/executor.go b/module/block_iterator/executor/executor.go new file mode 100644 index 00000000000..bfefdba8de2 --- /dev/null +++ b/module/block_iterator/executor/executor.go @@ -0,0 +1,141 @@ +package executor + +import ( + "context" + "fmt" + "time" + + "github.com/rs/zerolog" + + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module" + "github.com/onflow/flow-go/storage" +) + +// IterationExecutor allows the caller to customize the task to be executed for each block. +// for instance, the executor can prune data indexed by the block, or build another index for +// each iterated block. +type IterationExecutor interface { + // ExecuteByBlockID executes the task for the block indexed by the blockID + ExecuteByBlockID(blockID flow.Identifier, batch storage.ReaderBatchWriter) (exception error) +} + +// IterateExecuteAndCommitInBatch iterates over blocks and execute tasks with data that was indexed by the block. +// the update to the storage database is done in batch, and the batch is committed when it's full. +// the iteration progress is saved after batch is committed, so that the iteration progress +// can be resumed after restart. +// it sleeps after each batch is committed in order to minimizing the impact on the system. +func IterateExecuteAndCommitInBatch( + // ctx is used for cancelling the iteration when the context is done + ctx context.Context, + log zerolog.Logger, + metrics module.ExecutionMetrics, + // iterator decides how to iterate over blocks + iter module.BlockIterator, + // executor decides what data in the storage will be updated for a certain block + executor IterationExecutor, + // db creates a new batch for each block, and passed to the executor for adding updates, + // the batch is commited when it's full + db storage.DB, + // batchSize decides the batch size for each commit. + batchSize uint, + // sleepAfterEachBatchCommit allows the caller to slow down the iteration after each batch is committed + // in order to minimize the impact on the system + sleepAfterEachBatchCommit time.Duration, +) error { + noMoreBlocks := false + + for !noMoreBlocks { + select { + case <-ctx.Done(): + return nil + default: + } + + start := time.Now() + iteratedCountInCurrentBatch := uint(0) + + err := db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + for { + // if the batch is full, commit it and enter the outer loop to + // start a new batch + if iteratedCountInCurrentBatch >= batchSize { + return nil + } + + blockID, hasNext, err := iter.Next() + if err != nil { + return err + } + if !hasNext { + // no more blocks to iterate, we are done. + // update the flag and prepare to exit the loop after committing the last batch + noMoreBlocks = true + return nil + } + + err = executor.ExecuteByBlockID(blockID, rw) + if err != nil { + return err + } + + iteratedCountInCurrentBatch++ + } + }) + if err != nil { + return err + } + + // save the progress of the iteration, so that it can be resumed later + _, err = iter.Checkpoint() + if err != nil { + return fmt.Errorf("failed to checkpoint iterator: %w", err) + } + + // report the progress of the iteration + startIndex, endIndex, nextIndex := iter.Progress() + progress := CalculateProgress(startIndex, endIndex, nextIndex) + + log.Info(). + Str("commit-dur", time.Since(start).String()). + Uint64("start-index", startIndex). + Uint64("end-index", endIndex). + Uint64("next-index", nextIndex). + Str("progress", fmt.Sprintf("%.2f%%", progress)). + Msg("batch committed") + + metrics.ExecutionLastChunkDataPackPrunedHeight(nextIndex - 1) + + // sleep after each batch commit to minimize the impact on the system + select { + case <-ctx.Done(): + return nil + case <-time.After(sleepAfterEachBatchCommit): + // continue to next iteration + } + } + + return nil +} + +// CalculateProgress calculates the progress of the iteration, it returns a percentage +// of the progress. [0, 100] +// start, end are both inclusive +func CalculateProgress(start, end, current uint64) float64 { + if end < start { + return 100.0 + } + // If start == end, there is one more to process + + if current < start { + return 0.0 // If current is below start, assume 0% + } + if current > end { + return 100.0 // If current is above end, assume 100% + } + progress := float64(current-start) / float64(end-start) * 100.0 + if progress > 100.0 { + return 100.0 // Cap at 100% + } + return progress +} diff --git a/module/block_iterator/executor/executor_test.go b/module/block_iterator/executor/executor_test.go new file mode 100644 index 00000000000..92bc9f86684 --- /dev/null +++ b/module/block_iterator/executor/executor_test.go @@ -0,0 +1,234 @@ +package executor_test + +import ( + "context" + "errors" + "fmt" + "testing" + "time" + + "github.com/cockroachdb/pebble/v2" + "github.com/jordanschalm/lockctx" + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module" + "github.com/onflow/flow-go/module/block_iterator/executor" + "github.com/onflow/flow-go/module/metrics" + "github.com/onflow/flow-go/storage" + "github.com/onflow/flow-go/storage/operation" + "github.com/onflow/flow-go/storage/operation/pebbleimpl" + "github.com/onflow/flow-go/utils/unittest" +) + +// verify the executor is able to iterate through all blocks from the iterator. +func TestExecute(t *testing.T) { + unittest.RunWithPebbleDB(t, func(db *pebble.DB) { + lockManager := storage.NewTestingLockManager() + blockCount := 10 + + // prepare data + cdps := make([]*flow.ChunkDataPack, 0, blockCount) + bs := make([]flow.Identifier, 0, blockCount) + for i := 0; i < blockCount; i++ { + cdp := unittest.ChunkDataPackFixture(unittest.IdentifierFixture()) + cdps = append(cdps, cdp) + bs = append(bs, cdp.ChunkID) + } + + pdb := pebbleimpl.ToDB(db) + + // store the chunk data packs to be pruned later + for _, cdp := range cdps { + sc := storage.ToStoredChunkDataPack(cdp) + require.NoError(t, unittest.WithLock(t, lockManager, storage.LockInsertChunkDataPack, func(lctx lockctx.Context) error { + return pdb.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return operation.InsertChunkDataPack(lctx, rw, sc) + }) + })) + } + + // it's ok the chunk ids is used as block ids, because the iterator + // basically just iterate over identifiers + iter := &iterator{blocks: bs} + pr := &testExecutor{ + executeByBlockID: func(id flow.Identifier, batch storage.ReaderBatchWriter) error { + return operation.RemoveChunkDataPack(batch.Writer(), id) + }, + } + + // prune blocks + batchSize := uint(3) + nosleep := time.Duration(0) + require.NoError(t, executor.IterateExecuteAndCommitInBatch( + context.Background(), unittest.Logger(), metrics.NewNoopCollector(), iter, pr, pdb, batchSize, nosleep)) + + // expect all blocks are pruned + for _, b := range bs { + // verify they are pruned + var c storage.StoredChunkDataPack + err := operation.RetrieveChunkDataPack(pdb.Reader(), b, &c) + require.True(t, errors.Is(err, storage.ErrNotFound), "expected ErrNotFound but got %v", err) + } + }) +} + +// verify the pruning can be interrupted and resumed +func TestExecuteCanBeResumed(t *testing.T) { + unittest.RunWithPebbleDB(t, func(db *pebble.DB) { + lockManager := storage.NewTestingLockManager() + blockCount := 10 + + cdps := make([]*flow.ChunkDataPack, 0, blockCount) + bs := make([]flow.Identifier, 0, blockCount) + for i := 0; i < blockCount; i++ { + cdp := unittest.ChunkDataPackFixture(unittest.IdentifierFixture()) + cdps = append(cdps, cdp) + bs = append(bs, cdp.ChunkID) + } + + pdb := pebbleimpl.ToDB(db) + + // store the chunk data packs to be pruned later + for _, cdp := range cdps { + sc := storage.ToStoredChunkDataPack(cdp) + require.NoError(t, unittest.WithLock(t, lockManager, storage.LockInsertChunkDataPack, func(lctx lockctx.Context) error { + return pdb.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return operation.InsertChunkDataPack(lctx, rw, sc) + }) + })) + } + + // it's ok the chunk ids is used as block ids, because the iterator + // basically just iterate over identifiers + iter := &iterator{blocks: bs} + interrupted := fmt.Errorf("interrupted") + pruneUntilInterrupted := &testExecutor{ + executeByBlockID: func(id flow.Identifier, batch storage.ReaderBatchWriter) error { + // the 5th block will interrupt the pruning + // since the 5th block belongs to the 2nd batch, + // only the first batch is actually pruned, + // which means blocks from 0 to 2 are pruned + if id == bs[5] { + return interrupted // return sentinel error to interrupt the pruning + } + return operation.RemoveChunkDataPack(batch.Writer(), id) + }, + } + + // prune blocks until interrupted at block 5 + batchSize := uint(3) + nosleep := time.Duration(0) + err := executor.IterateExecuteAndCommitInBatch( + context.Background(), unittest.Logger(), metrics.NewNoopCollector(), iter, pruneUntilInterrupted, pdb, batchSize, nosleep) + require.True(t, errors.Is(err, interrupted), fmt.Errorf("expected %v but got %v", interrupted, err)) + + // expect all blocks are pruned + for i, b := range bs { + + // verify they are pruned + var c storage.StoredChunkDataPack + + if i < 3 { + // the first 3 blocks in the first batch are pruned + err = operation.RetrieveChunkDataPack(pdb.Reader(), b, &c) + require.True(t, errors.Is(err, storage.ErrNotFound), "expected ErrNotFound for block %v but got %v", i, err) + continue + } + + // verify the remaining blocks are not pruned yet + require.NoError(t, operation.RetrieveChunkDataPack(pdb.Reader(), b, &c)) + } + + // now resume the pruning + iterToAll := restoreBlockIterator(iter.blocks, iter.stored) + + pr := &testExecutor{ + executeByBlockID: func(id flow.Identifier, batch storage.ReaderBatchWriter) error { + return operation.RemoveChunkDataPack(batch.Writer(), id) + }, + } + + require.NoError(t, executor.IterateExecuteAndCommitInBatch( + context.Background(), unittest.Logger(), metrics.NewNoopCollector(), iterToAll, pr, pdb, batchSize, nosleep)) + + // verify all blocks are pruned + for _, b := range bs { + var c storage.StoredChunkDataPack + // the first 5 blocks are pruned + err = operation.RetrieveChunkDataPack(pdb.Reader(), b, &c) + require.True(t, errors.Is(err, storage.ErrNotFound), "expected ErrNotFound but got %v", err) + } + }) +} + +type iterator struct { + blocks []flow.Identifier + cur int + stored int +} + +var _ module.BlockIterator = (*iterator)(nil) + +func (b *iterator) Next() (flow.Identifier, bool, error) { + if b.cur >= len(b.blocks) { + return flow.Identifier{}, false, nil + } + + id := b.blocks[b.cur] + b.cur++ + return id, true, nil +} + +func (b *iterator) Checkpoint() (uint64, error) { + b.stored = b.cur + return uint64(b.cur), nil +} + +func (b *iterator) Progress() (uint64, uint64, uint64) { + return 0, uint64(len(b.blocks) - 1), uint64(b.cur) +} + +func restoreBlockIterator(blocks []flow.Identifier, stored int) *iterator { + return &iterator{ + blocks: blocks, + cur: stored, + stored: stored, + } +} + +type testExecutor struct { + executeByBlockID func(id flow.Identifier, batchWriter storage.ReaderBatchWriter) error +} + +var _ executor.IterationExecutor = (*testExecutor)(nil) + +func (p *testExecutor) ExecuteByBlockID(id flow.Identifier, batchWriter storage.ReaderBatchWriter) error { + return p.executeByBlockID(id, batchWriter) +} + +func TestCalculateProgress(t *testing.T) { + tests := []struct { + start uint64 + end uint64 + current uint64 + want float64 + }{ + {1, 100, 1, 0.0}, // Just started + {1, 100, 50, 49.49}, // Midway + {1, 100, 100, 100.0}, // Completed + {1, 100, 150, 100.0}, // Exceeds end + {1, 100, 0, 0.0}, // Below start + {1, 1, 1, 0.0}, // Start = End + {1, 1, 0, 0.0}, // Start = End, but below + {1, 100, 10, 9.09}, // Early progress + {1, 100, 99, 98.99}, // Near completion + } + + for _, tt := range tests { + got := executor.CalculateProgress(tt.start, tt.end, tt.current) + if (got-tt.want) > 0.01 || (tt.want-got) > 0.01 { // Allow small floating-point errors + t.Errorf("calculateProgress(%d, %d, %d) = %.2f; want %.2f", tt.start, tt.end, tt.current, got, tt.want) + } + } +} diff --git a/module/block_iterator/iterator.go b/module/block_iterator/iterator.go new file mode 100644 index 00000000000..7afca3add8c --- /dev/null +++ b/module/block_iterator/iterator.go @@ -0,0 +1,81 @@ +package block_iterator + +import ( + "fmt" + + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module" +) + +// IndexedBlockIterator is a block iterator that iterates over blocks by height or view +// when index is height, it iterates from lower height to higher height +// when index is view, it iterates from lower view to higher view +// caller must ensure that the range is finalized, otherwise the iteration might miss some blocks +// it's not concurrent safe, so don't use it in multiple goroutines +type IndexedBlockIterator struct { + // dependencies + getBlockIDByIndex func(uint64) (blockID flow.Identifier, indexed bool, exception error) + progress module.IteratorStateWriter // for saving the next index to be iterated for resuming the iteration + startIndex uint64 // the start index to iterate, this never change + endIndex uint64 // the end index to iterate, this never change + nextIndex uint64 // the start index to iterate, this will be updated after each iteration +} + +var _ module.BlockIterator = (*IndexedBlockIterator)(nil) + +// caller must ensure that both iterRange.Start and iterRange.End are finalized +func NewIndexedBlockIterator( + getBlockIDByIndex func(uint64) (blockID flow.Identifier, indexed bool, exception error), + progress module.IteratorStateWriter, + iterRange module.IteratorRange, +) module.BlockIterator { + return &IndexedBlockIterator{ + getBlockIDByIndex: getBlockIDByIndex, + progress: progress, + startIndex: iterRange.Start, + endIndex: iterRange.End, + nextIndex: iterRange.Start, + } +} + +// Next returns the next block ID in the iteration +// it iterates from lower index to higher index. +// Note: this method is not concurrent-safe +func (b *IndexedBlockIterator) Next() (flow.Identifier, bool, error) { + if b.nextIndex > b.endIndex { + return flow.ZeroID, false, nil + } + + next, indexed, err := b.getBlockIDByIndex(b.nextIndex) + if err != nil { + return flow.ZeroID, false, fmt.Errorf("failed to fetch block at index (height or view) %v: %w", b.nextIndex, err) + } + + // if the block is not indexed, skip it. This is only possible when we are iterating by view. + // when iterating by height, all blocks should be indexed, so `indexed` should always be true. + if !indexed { + // if the view is not indexed, then iterate next view + b.nextIndex++ + return b.Next() + } + + b.nextIndex++ + + return next, true, nil +} + +// Checkpoint saves the iteration progress to storage +// make sure to call this after all the blocks for processing the block IDs returned by +// Next() are completed. +func (b *IndexedBlockIterator) Checkpoint() (uint64, error) { + err := b.progress.SaveState(b.nextIndex) + if err != nil { + return 0, fmt.Errorf("failed to save progress at view %v: %w", b.nextIndex, err) + } + return b.nextIndex, nil +} + +// Progress returns the current progress of the iterator +func (b *IndexedBlockIterator) Progress() (uint64, uint64, uint64) { + return b.startIndex, b.endIndex, b.nextIndex +} diff --git a/module/block_iterator/iterator_test.go b/module/block_iterator/iterator_test.go new file mode 100644 index 00000000000..ee5a6115f03 --- /dev/null +++ b/module/block_iterator/iterator_test.go @@ -0,0 +1,102 @@ +package block_iterator + +import ( + "fmt" + "testing" + + "github.com/jordanschalm/lockctx" + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module" + "github.com/onflow/flow-go/module/metrics" + "github.com/onflow/flow-go/storage" + "github.com/onflow/flow-go/storage/operation" + "github.com/onflow/flow-go/storage/operation/dbtest" + "github.com/onflow/flow-go/storage/store" + "github.com/onflow/flow-go/utils/unittest" +) + +func TestIterateHeight(t *testing.T) { + lockManager := storage.NewTestingLockManager() + dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { + // create blocks with siblings + b1 := &flow.Header{HeaderBody: flow.HeaderBody{Height: 1}} + b2 := &flow.Header{HeaderBody: flow.HeaderBody{Height: 2}} + b3 := &flow.Header{HeaderBody: flow.HeaderBody{Height: 3}} + bs := []*flow.Header{b1, b2, b3} + + // index height + for _, b := range bs { + err := unittest.WithLock(t, lockManager, storage.LockFinalizeBlock, func(lctx lockctx.Context) error { + return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return operation.IndexFinalizedBlockByHeight(lctx, rw, b.Height, b.ID()) + }) + }) + require.NoError(t, err) + } + + progress := &saveNextHeight{} + + // create iterator + // b0 is the root block, iterate from b1 to b3 + iterRange := module.IteratorRange{Start: b1.Height, End: b3.Height} + headers := store.NewHeaders(&metrics.NoopCollector{}, db) + getBlockIDByIndex := func(height uint64) (flow.Identifier, bool, error) { + blockID, err := headers.BlockIDByHeight(height) + if err != nil { + return flow.ZeroID, false, err + } + + return blockID, true, nil + } + iter := NewIndexedBlockIterator(getBlockIDByIndex, progress, iterRange) + + // iterate through all blocks + visited := make([]flow.Identifier, 0, len(bs)) + for { + id, ok, err := iter.Next() + require.NoError(t, err) + if !ok { + break + } + + visited = append(visited, id) + } + + // verify all blocks are visited in the same order + for i, b := range bs { + require.Equal(t, b.ID(), visited[i]) + } + + require.Equal(t, len(bs), len(visited)) + + // save the next to iterate height and verify + next, err := iter.Checkpoint() + require.NoError(t, err) + require.Equal(t, b3.Height+1, next) + + savedNextHeight, err := progress.LoadState() + require.NoError(t, err) + + require.Equal(t, b3.Height+1, savedNextHeight, + fmt.Sprintf("saved next height should be %v, but got %v", b3.Height, savedNextHeight)) + + }) +} + +type saveNextHeight struct { + savedNextHeight uint64 +} + +var _ module.IteratorStateWriter = (*saveNextHeight)(nil) +var _ module.IteratorStateReader = (*saveNextHeight)(nil) + +func (s *saveNextHeight) SaveState(height uint64) error { + s.savedNextHeight = height + return nil +} + +func (s *saveNextHeight) LoadState() (uint64, error) { + return s.savedNextHeight, nil +} diff --git a/module/block_iterator/latest/sealed_and_executed.go b/module/block_iterator/latest/sealed_and_executed.go new file mode 100644 index 00000000000..0cc60a4b5e4 --- /dev/null +++ b/module/block_iterator/latest/sealed_and_executed.go @@ -0,0 +1,85 @@ +package latest + +import ( + "fmt" + + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/state/protocol" + "github.com/onflow/flow-go/storage" + "github.com/onflow/flow-go/storage/operation" +) + +type LatestSealedAndExecuted struct { + root *flow.Header + state protocol.State + executedBlockDB storage.DB +} + +func NewLatestSealedAndExecuted( + root *flow.Header, + state protocol.State, + executedBlockDB storage.DB, +) *LatestSealedAndExecuted { + return &LatestSealedAndExecuted{ + root: root, + state: state, + executedBlockDB: executedBlockDB, + } +} + +// BelowLatest returns the header at the given threshold below the latest sealed and executed block. +func (l *LatestSealedAndExecuted) BelowLatest(threshold uint64) (*flow.Header, error) { + + minHeight := l.root.Height + threshold + latest, err := l.Latest() + if err != nil { + return nil, err + } + + if minHeight > latest.Height { + return l.root, nil + } + + height := latest.Height - threshold + return l.state.AtHeight(height).Head() +} + +// Latest returns the latest sealed and executed block. +func (l *LatestSealedAndExecuted) Latest() (*flow.Header, error) { + height, err := LatestSealedAndExecutedHeight(l.state, l.executedBlockDB) + if err != nil { + return nil, err + } + + header, err := l.state.AtHeight(height).Head() + if err != nil { + return nil, err + } + + return header, nil +} + +// LatestSealedAndExecutedHeight returns the height of the latest sealed and executed block. +func LatestSealedAndExecutedHeight(state protocol.State, db storage.DB) (uint64, error) { + lastSealed, err := state.Sealed().Head() + if err != nil { + return 0, err + } + + var blockID flow.Identifier + err = operation.RetrieveExecutedBlock(db.Reader(), &blockID) + if err != nil { + return 0, err + } + + lastExecuted, err := state.AtBlockID(blockID).Head() + if err != nil { + return 0, fmt.Errorf("failed to get executed block: %w", err) + } + + // the last sealed executed is min(last_sealed, last_executed) + if lastExecuted.Height < lastSealed.Height { + return lastExecuted.Height, nil + } + return lastSealed.Height, nil +} diff --git a/module/block_iterator/state.go b/module/block_iterator/state.go new file mode 100644 index 00000000000..f0cee322e93 --- /dev/null +++ b/module/block_iterator/state.go @@ -0,0 +1,73 @@ +package block_iterator + +import ( + "fmt" + + "github.com/onflow/flow-go/module" + "github.com/onflow/flow-go/storage" +) + +// PersistentIteratorState stores the state of the iterator in a persistent storage +type PersistentIteratorState struct { + store storage.ConsumerProgress + latest func() (uint64, error) +} + +var _ module.IteratorState = (*PersistentIteratorState)(nil) + +func NewPersistentIteratorState(initializer storage.ConsumerProgressInitializer, root uint64, latest func() (uint64, error)) (*PersistentIteratorState, error) { + store, err := initializer.Initialize(root + 1) + if err != nil { + return nil, fmt.Errorf("failed to init processed index: %w", err) + } + + return &PersistentIteratorState{ + store: store, + latest: latest, + }, nil +} + +func (n *PersistentIteratorState) LoadState() (uint64, error) { + // TODO: adding cache + return n.store.ProcessedIndex() +} + +func (n *PersistentIteratorState) SaveState(next uint64) error { + return n.store.SetProcessedIndex(next) +} + +// NextRange returns the next range of blocks to iterate over +// the range is inclusive, and the end is the latest block +// if there is no block to iterate, hasNext is false +func (n *PersistentIteratorState) NextRange() (rg module.IteratorRange, hasNext bool, exception error) { + next, err := n.LoadState() + if err != nil { + return module.IteratorRange{}, false, fmt.Errorf("failed to read next height: %w", err) + } + + latest, err := n.latest() + if err != nil { + return module.IteratorRange{}, false, fmt.Errorf("failed to get latest block: %w", err) + } + + // if the next is the next of the latest, then there is no block to iterate + if latest+1 == next { + return module.IteratorRange{}, false, nil + } + + // latest might go backwards if user updated some parameters that controls that. + // for instance, if the user uses block iterator to prune data by height, and then updated + // the threshold to retain more data, which makes the latest block to go backwards. + // + // in that case, we need to respect the latest block and return an empty iteration range, so + // that we ensure that the block iteration is always forward. + if latest < next { + return module.IteratorRange{}, false, nil + } + + // iterate from next to latest (inclusive) + return module.IteratorRange{ + Start: next, + End: latest, + }, true, nil +} diff --git a/module/block_iterator/state_test.go b/module/block_iterator/state_test.go new file mode 100644 index 00000000000..7617f80fe62 --- /dev/null +++ b/module/block_iterator/state_test.go @@ -0,0 +1,80 @@ +package block_iterator + +import ( + "testing" + + "github.com/cockroachdb/pebble/v2" + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/storage/operation/pebbleimpl" + "github.com/onflow/flow-go/storage/store" + "github.com/onflow/flow-go/utils/unittest" +) + +func TestProgress(t *testing.T) { + unittest.RunWithPebbleDB(t, func(db *pebble.DB) { + root := uint64(10) + + latest := uint64(20) + getLatest := func() (uint64, error) { + return latest, nil + } + + initializer := store.NewConsumerProgress(pebbleimpl.ToDB(db), "test") + + progress, err := NewPersistentIteratorState(initializer, root, getLatest) + require.NoError(t, err) + + // 1. verify initial state should be the next of root + next, err := progress.LoadState() + require.NoError(t, err) + require.Equal(t, root+1, next) + + rg, hasNext, err := progress.NextRange() + require.NoError(t, err) + require.True(t, hasNext) + require.Equal(t, root+1, rg.Start) + require.Equal(t, latest, rg.End) + + // save the state + err = progress.SaveState(latest + 1) + require.NoError(t, err) + + // 2. verify the saved state + next, err = progress.LoadState() + require.NoError(t, err) + require.Equal(t, latest+1, next) + + // 3. verify when latest is updated to a higher height + // the end height of the next range should be updated + oldLatest := latest + latest = latest + 20 + rg, hasNext, err = progress.NextRange() + require.NoError(t, err) + require.True(t, hasNext) + + // verify the new range + require.Equal(t, oldLatest+1, rg.Start) + require.Equal(t, latest, rg.End) + + // 4. verify when state is up to date, and latest + // does not change, the next range should include no block + err = progress.SaveState(latest + 1) + require.NoError(t, err) + + // verify that NextRange will return an error indicating that + // there is no block to iterate + rg, hasNext, err = progress.NextRange() + require.NoError(t, err) + require.False(t, hasNext) + + // now initialize again with a different latest that which cause latest < next + // verify there will be no block to iterate + initializer = store.NewConsumerProgress(pebbleimpl.ToDB(db), "test") + progress, err = NewPersistentIteratorState(initializer, root, getLatest) + require.NoError(t, err) + _, hasNext, err = progress.NextRange() + require.NoError(t, err) + require.False(t, hasNext) // has no block to iterate + }) +} diff --git a/module/buffer.go b/module/buffer.go index 30bd3df6f5f..0937a49ad54 100644 --- a/module/buffer.go +++ b/module/buffer.go @@ -11,11 +11,11 @@ import ( // children once the parent is received. // Safe for concurrent use. type PendingBlockBuffer interface { - Add(originID flow.Identifier, block *flow.Block) bool + Add(block flow.Slashable[*flow.Proposal]) bool - ByID(blockID flow.Identifier) (flow.Slashable[*flow.Block], bool) + ByID(blockID flow.Identifier) (flow.Slashable[*flow.Proposal], bool) - ByParentID(parentID flow.Identifier) ([]flow.Slashable[*flow.Block], bool) + ByParentID(parentID flow.Identifier) ([]flow.Slashable[*flow.Proposal], bool) DropForParent(parentID flow.Identifier) @@ -29,11 +29,11 @@ type PendingBlockBuffer interface { // collection node cluster consensus. // Safe for concurrent use. type PendingClusterBlockBuffer interface { - Add(originID flow.Identifier, block *cluster.Block) bool + Add(block flow.Slashable[*cluster.Proposal]) bool - ByID(blockID flow.Identifier) (flow.Slashable[*cluster.Block], bool) + ByID(blockID flow.Identifier) (flow.Slashable[*cluster.Proposal], bool) - ByParentID(parentID flow.Identifier) ([]flow.Slashable[*cluster.Block], bool) + ByParentID(parentID flow.Identifier) ([]flow.Slashable[*cluster.Proposal], bool) DropForParent(parentID flow.Identifier) diff --git a/module/buffer/backend.go b/module/buffer/backend.go index 328cdc3b534..52d09e33ed6 100644 --- a/module/buffer/backend.go +++ b/module/buffer/backend.go @@ -6,60 +6,65 @@ import ( "github.com/onflow/flow-go/model/flow" ) -// item represents an item in the cache: a block header, payload, and the ID -// of the node that sent it to us. The payload is generic. -type item struct { - originID flow.Identifier - header *flow.Header - payload interface{} +// item represents an item in the cache: the main block and auxiliary data that is +// needed to implement indexed lookups by parent ID and pruning by view. +type item[T any] struct { + view uint64 + parentID flow.Identifier + block flow.Slashable[T] } -// backend implements a simple cache of pending blocks, indexed by parent ID. -type backend struct { +// extractProposalHeader is a type constraint for the generic type which allows to extract flow.ProposalHeader +// from the underlying type. +type extractProposalHeader interface { + ProposalHeader() *flow.ProposalHeader +} + +// backend implements a simple cache of pending blocks, indexed by parent ID and pruned by view. +type backend[T extractProposalHeader] struct { mu sync.RWMutex - // map of pending block IDs, keyed by parent ID for ByParentID lookups + // map of pending header IDs, keyed by parent ID for ByParentID lookups blocksByParent map[flow.Identifier][]flow.Identifier // set of pending blocks, keyed by ID to avoid duplication - blocksByID map[flow.Identifier]*item + blocksByID map[flow.Identifier]*item[T] } -// newBackend returns a new pending block cache. -func newBackend() *backend { - cache := &backend{ +// newBackend returns a new pending header cache. +func newBackend[T extractProposalHeader]() *backend[T] { + cache := &backend[T]{ blocksByParent: make(map[flow.Identifier][]flow.Identifier), - blocksByID: make(map[flow.Identifier]*item), + blocksByID: make(map[flow.Identifier]*item[T]), } return cache } // add adds the item to the cache, returning false if it already exists and // true otherwise. -func (b *backend) add(originID flow.Identifier, header *flow.Header, payload interface{}) bool { +func (b *backend[T]) add(block flow.Slashable[T]) bool { + header := block.Message.ProposalHeader().Header + blockID := header.ID() b.mu.Lock() defer b.mu.Unlock() - blockID := header.ID() - _, exists := b.blocksByID[blockID] if exists { return false } - item := &item{ - header: header, - originID: originID, - payload: payload, + item := &item[T]{ + view: header.View, + parentID: header.ParentID, + block: block, } b.blocksByID[blockID] = item - b.blocksByParent[header.ParentID] = append(b.blocksByParent[header.ParentID], blockID) + b.blocksByParent[item.parentID] = append(b.blocksByParent[item.parentID], blockID) return true } -func (b *backend) byID(id flow.Identifier) (*item, bool) { - +func (b *backend[T]) byID(id flow.Identifier) (*item[T], bool) { b.mu.RLock() defer b.mu.RUnlock() @@ -73,8 +78,7 @@ func (b *backend) byID(id flow.Identifier) (*item, bool) { // byParentID returns a list of cached blocks with the given parent. If no such // blocks exist, returns false. -func (b *backend) byParentID(parentID flow.Identifier) ([]*item, bool) { - +func (b *backend[T]) byParentID(parentID flow.Identifier) ([]*item[T], bool) { b.mu.RLock() defer b.mu.RUnlock() @@ -83,7 +87,7 @@ func (b *backend) byParentID(parentID flow.Identifier) ([]*item, bool) { return nil, false } - items := make([]*item, 0, len(forParent)) + items := make([]*item[T], 0, len(forParent)) for _, blockID := range forParent { items = append(items, b.blocksByID[blockID]) } @@ -92,8 +96,7 @@ func (b *backend) byParentID(parentID flow.Identifier) ([]*item, bool) { } // dropForParent removes all cached blocks with the given parent (non-recursively). -func (b *backend) dropForParent(parentID flow.Identifier) { - +func (b *backend[T]) dropForParent(parentID flow.Identifier) { b.mu.Lock() defer b.mu.Unlock() @@ -110,21 +113,20 @@ func (b *backend) dropForParent(parentID flow.Identifier) { // pruneByView prunes any items in the cache that have view less than or // equal to the given view. The pruning view should be the finalized view. -func (b *backend) pruneByView(view uint64) { - +func (b *backend[T]) pruneByView(view uint64) { b.mu.Lock() defer b.mu.Unlock() for id, item := range b.blocksByID { - if item.header.View <= view { + if item.view <= view { delete(b.blocksByID, id) - delete(b.blocksByParent, item.header.ParentID) + delete(b.blocksByParent, item.parentID) } } } // size returns the number of elements stored in teh backend -func (b *backend) size() uint { +func (b *backend[T]) size() uint { b.mu.RLock() defer b.mu.RUnlock() return uint(len(b.blocksByID)) diff --git a/module/buffer/backend_test.go b/module/buffer/backend_test.go index fa0fd3165b4..588ed674297 100644 --- a/module/buffer/backend_test.go +++ b/module/buffer/backend_test.go @@ -12,7 +12,7 @@ import ( type BackendSuite struct { suite.Suite - backend *backend + backend *backend[*flow.Proposal] } func TestBackendSuite(t *testing.T) { @@ -20,36 +20,39 @@ func TestBackendSuite(t *testing.T) { } func (suite *BackendSuite) SetupTest() { - suite.backend = newBackend() + suite.backend = newBackend[*flow.Proposal]() } -func (suite *BackendSuite) Item() *item { +func (suite *BackendSuite) item() *item[*flow.Proposal] { parent := unittest.BlockHeaderFixture() - return suite.ItemWithParent(parent) + return suite.itemWithParent(parent) } -func (suite *BackendSuite) ItemWithParent(parent *flow.Header) *item { - header := unittest.BlockHeaderWithParentFixture(parent) - return &item{ - header: header, - payload: unittest.IdentifierFixture(), - originID: unittest.IdentifierFixture(), +func (suite *BackendSuite) itemWithParent(parent *flow.Header) *item[*flow.Proposal] { + block := unittest.BlockWithParentFixture(parent) + return &item[*flow.Proposal]{ + view: block.View, + parentID: block.ParentID, + block: flow.Slashable[*flow.Proposal]{ + OriginID: unittest.IdentifierFixture(), + Message: unittest.ProposalFromBlock(block), + }, } } -func (suite *BackendSuite) Add(item *item) { - suite.backend.add(item.originID, item.header, item.payload) +func (suite *BackendSuite) Add(item *item[*flow.Proposal]) { + suite.backend.add(item.block) } func (suite *BackendSuite) TestAdd() { - expected := suite.Item() - suite.backend.add(expected.originID, expected.header, expected.payload) + expected := suite.item() + suite.backend.add(expected.block) - actual, ok := suite.backend.byID(expected.header.ID()) + actual, ok := suite.backend.byID(expected.block.Message.Block.ID()) suite.Assert().True(ok) suite.Assert().Equal(expected, actual) - byParent, ok := suite.backend.byParentID(expected.header.ParentID) + byParent, ok := suite.backend.byParentID(expected.parentID) suite.Assert().True(ok) suite.Assert().Len(byParent, 1) suite.Assert().Equal(expected, byParent[0]) @@ -57,11 +60,11 @@ func (suite *BackendSuite) TestAdd() { func (suite *BackendSuite) TestChildIndexing() { - parent := suite.Item() - child1 := suite.ItemWithParent(parent.header) - child2 := suite.ItemWithParent(parent.header) - grandchild := suite.ItemWithParent(child1.header) - unrelated := suite.Item() + parent := suite.item() + child1 := suite.itemWithParent(parent.block.Message.Block.ToHeader()) + child2 := suite.itemWithParent(parent.block.Message.Block.ToHeader()) + grandchild := suite.itemWithParent(child1.block.Message.Block.ToHeader()) + unrelated := suite.item() suite.Add(child1) suite.Add(child2) @@ -69,7 +72,7 @@ func (suite *BackendSuite) TestChildIndexing() { suite.Add(unrelated) suite.Run("retrieve by parent ID", func() { - byParent, ok := suite.backend.byParentID(parent.header.ID()) + byParent, ok := suite.backend.byParentID(parent.block.Message.Block.ID()) suite.Assert().True(ok) // should only include direct children suite.Assert().Len(byParent, 2) @@ -78,22 +81,22 @@ func (suite *BackendSuite) TestChildIndexing() { }) suite.Run("drop for parent ID", func() { - suite.backend.dropForParent(parent.header.ID()) + suite.backend.dropForParent(parent.block.Message.Block.ID()) // should only drop direct children - _, exists := suite.backend.byID(child1.header.ID()) + _, exists := suite.backend.byID(child1.block.Message.Block.ID()) suite.Assert().False(exists) - _, exists = suite.backend.byID(child2.header.ID()) + _, exists = suite.backend.byID(child2.block.Message.Block.ID()) suite.Assert().False(exists) // grandchildren should be unaffected - _, exists = suite.backend.byParentID(child1.header.ID()) + _, exists = suite.backend.byParentID(child1.block.Message.Block.ID()) suite.Assert().True(exists) - _, exists = suite.backend.byID(grandchild.header.ID()) + _, exists = suite.backend.byID(grandchild.block.Message.Block.ID()) suite.Assert().True(exists) // nothing else should be affected - _, exists = suite.backend.byID(unrelated.header.ID()) + _, exists = suite.backend.byID(unrelated.block.Message.Block.ID()) suite.Assert().True(exists) }) } @@ -101,36 +104,36 @@ func (suite *BackendSuite) TestChildIndexing() { func (suite *BackendSuite) TestPruneByView() { const N = 100 // number of items we're testing with - items := make([]*item, 0, N) + items := make([]*item[*flow.Proposal], 0, N) // build a pending buffer for i := 0; i < N; i++ { - // 10% of the time, add a new unrelated pending block + // 10% of the time, add a new unrelated pending header if i%10 == 0 { - item := suite.Item() + item := suite.item() suite.Add(item) items = append(items, item) continue } - // 90% of the time, build on an existing block + // 90% of the time, build on an existing header if i%2 == 1 { parent := items[rand.Intn(len(items))] - item := suite.ItemWithParent(parent.header) + item := suite.itemWithParent(parent.block.Message.Block.ToHeader()) suite.Add(item) items = append(items, item) } } // pick a height to prune that's guaranteed to prune at least one item - pruneAt := items[rand.Intn(len(items))].header.View + pruneAt := items[rand.Intn(len(items))].view suite.backend.pruneByView(pruneAt) for _, item := range items { - view := item.header.View - id := item.header.ID() - parentID := item.header.ParentID + view := item.view + id := item.block.Message.Block.ID() + parentID := item.parentID // check that items below the prune view were removed if view <= pruneAt { @@ -141,7 +144,7 @@ func (suite *BackendSuite) TestPruneByView() { } // check that other items were not removed - if view > item.header.View { + if view > item.view { _, exists := suite.backend.byID(id) suite.Assert().True(exists) _, exists = suite.backend.byParentID(parentID) diff --git a/module/buffer/pending_blocks.go b/module/buffer/pending_blocks.go index ca4e54b4924..ea9c43b1e0a 100644 --- a/module/buffer/pending_blocks.go +++ b/module/buffer/pending_blocks.go @@ -5,57 +5,43 @@ import ( "github.com/onflow/flow-go/module" ) +// PendingBlocks is a mempool for holding blocks. Furthermore, given a block ID, we can +// query all children that are currently stored in the mempool. The mempool's backend +// is intended to work generically for consensus blocks as well as cluster blocks. type PendingBlocks struct { - backend *backend + backend *backend[*flow.Proposal] } var _ module.PendingBlockBuffer = (*PendingBlocks)(nil) func NewPendingBlocks() *PendingBlocks { - b := &PendingBlocks{backend: newBackend()} + b := &PendingBlocks{backend: newBackend[*flow.Proposal]()} return b } -func (b *PendingBlocks) Add(originID flow.Identifier, block *flow.Block) bool { - return b.backend.add(originID, block.Header, block.Payload) +func (b *PendingBlocks) Add(block flow.Slashable[*flow.Proposal]) bool { + return b.backend.add(block) } -func (b *PendingBlocks) ByID(blockID flow.Identifier) (flow.Slashable[*flow.Block], bool) { +func (b *PendingBlocks) ByID(blockID flow.Identifier) (flow.Slashable[*flow.Proposal], bool) { item, ok := b.backend.byID(blockID) if !ok { - return flow.Slashable[*flow.Block]{}, false + return flow.Slashable[*flow.Proposal]{}, false } - - block := flow.Slashable[*flow.Block]{ - OriginID: item.originID, - Message: &flow.Block{ - Header: item.header, - Payload: item.payload.(*flow.Payload), - }, - } - - return block, true + return item.block, true } -func (b *PendingBlocks) ByParentID(parentID flow.Identifier) ([]flow.Slashable[*flow.Block], bool) { +func (b *PendingBlocks) ByParentID(parentID flow.Identifier) ([]flow.Slashable[*flow.Proposal], bool) { items, ok := b.backend.byParentID(parentID) if !ok { return nil, false } - blocks := make([]flow.Slashable[*flow.Block], 0, len(items)) + proposals := make([]flow.Slashable[*flow.Proposal], 0, len(items)) for _, item := range items { - block := flow.Slashable[*flow.Block]{ - OriginID: item.originID, - Message: &flow.Block{ - Header: item.header, - Payload: item.payload.(*flow.Payload), - }, - } - blocks = append(blocks, block) + proposals = append(proposals, item.block) } - - return blocks, true + return proposals, true } func (b *PendingBlocks) DropForParent(parentID flow.Identifier) { diff --git a/module/buffer/pending_cluster_blocks.go b/module/buffer/pending_cluster_blocks.go index df4a3324770..1e2bf5e2b9a 100644 --- a/module/buffer/pending_cluster_blocks.go +++ b/module/buffer/pending_cluster_blocks.go @@ -6,54 +6,37 @@ import ( ) type PendingClusterBlocks struct { - backend *backend + backend *backend[*cluster.Proposal] } func NewPendingClusterBlocks() *PendingClusterBlocks { - b := &PendingClusterBlocks{backend: newBackend()} + b := &PendingClusterBlocks{backend: newBackend[*cluster.Proposal]()} return b } -func (b *PendingClusterBlocks) Add(originID flow.Identifier, block *cluster.Block) bool { - return b.backend.add(originID, block.Header, block.Payload) +func (b *PendingClusterBlocks) Add(block flow.Slashable[*cluster.Proposal]) bool { + return b.backend.add(block) } -func (b *PendingClusterBlocks) ByID(blockID flow.Identifier) (flow.Slashable[*cluster.Block], bool) { +func (b *PendingClusterBlocks) ByID(blockID flow.Identifier) (flow.Slashable[*cluster.Proposal], bool) { item, ok := b.backend.byID(blockID) if !ok { - return flow.Slashable[*cluster.Block]{}, false + return flow.Slashable[*cluster.Proposal]{}, false } - - block := flow.Slashable[*cluster.Block]{ - OriginID: item.originID, - Message: &cluster.Block{ - Header: item.header, - Payload: item.payload.(*cluster.Payload), - }, - } - - return block, true + return item.block, true } -func (b *PendingClusterBlocks) ByParentID(parentID flow.Identifier) ([]flow.Slashable[*cluster.Block], bool) { +func (b *PendingClusterBlocks) ByParentID(parentID flow.Identifier) ([]flow.Slashable[*cluster.Proposal], bool) { items, ok := b.backend.byParentID(parentID) if !ok { return nil, false } - blocks := make([]flow.Slashable[*cluster.Block], 0, len(items)) + proposals := make([]flow.Slashable[*cluster.Proposal], 0, len(items)) for _, item := range items { - block := flow.Slashable[*cluster.Block]{ - OriginID: item.originID, - Message: &cluster.Block{ - Header: item.header, - Payload: item.payload.(*cluster.Payload), - }, - } - blocks = append(blocks, block) + proposals = append(proposals, item.block) } - - return blocks, true + return proposals, true } func (b *PendingClusterBlocks) DropForParent(parentID flow.Identifier) { diff --git a/module/builder.go b/module/builder.go index 2138134ec89..b42d66b09a8 100644 --- a/module/builder.go +++ b/module/builder.go @@ -1,10 +1,6 @@ -// (c) 2019 Dapper Labs - ALL RIGHTS RESERVED - package module -import ( - "github.com/onflow/flow-go/model/flow" -) +import "github.com/onflow/flow-go/model/flow" // Builder represents an abstracted block construction module that can be used // in more than one consensus algorithm. The resulting block is consistent @@ -15,9 +11,15 @@ type Builder interface { // BuildOn generates a new payload that is valid with respect to the parent // being built upon, with the view being provided by the consensus algorithm. // The builder stores the block and validates it against the protocol state - // before returning it. + // before returning it. The specified parent block must exist in the protocol state. // // NOTE: Since the block is stored within Builder, HotStuff MUST propose the - // block once BuildOn succcessfully returns. - BuildOn(parentID flow.Identifier, setter func(*flow.Header) error) (*flow.Header, error) + // block once BuildOn successfully returns. + // + // # Errors + // This function does not produce any expected errors. + // However, it will pass through all errors returned by `setter` and `sign`. + // Callers must be aware of possible error returns from the `setter` and `sign` arguments they provide, + // and handle them accordingly when handling errors returned from BuildOn. + BuildOn(parentID flow.Identifier, setter func(*flow.HeaderBodyBuilder) error, sign func(*flow.Header) ([]byte, error)) (*flow.ProposalHeader, error) } diff --git a/module/builder/collection/builder.go b/module/builder/collection/builder.go index 91f7fe93e37..8fe93e81a8d 100644 --- a/module/builder/collection/builder.go +++ b/module/builder/collection/builder.go @@ -6,7 +6,7 @@ import ( "fmt" "time" - "github.com/dgraph-io/badger/v2" + "github.com/jordanschalm/lockctx" "github.com/rs/zerolog" "github.com/onflow/flow-go/model/cluster" @@ -19,8 +19,7 @@ import ( "github.com/onflow/flow-go/state/fork" "github.com/onflow/flow-go/state/protocol" "github.com/onflow/flow-go/storage" - "github.com/onflow/flow-go/storage/badger/operation" - "github.com/onflow/flow-go/storage/badger/procedure" + "github.com/onflow/flow-go/storage/operation" "github.com/onflow/flow-go/utils/logging" ) @@ -31,26 +30,30 @@ import ( // HotStuff event loop is the only consumer of this interface and is single // threaded, this is OK. type Builder struct { - db *badger.DB - mainHeaders storage.Headers - clusterHeaders storage.Headers - protoState protocol.State - clusterState clusterstate.State - payloads storage.ClusterPayloads - transactions mempool.Transactions - tracer module.Tracer - config Config - log zerolog.Logger - clusterEpoch uint64 // the operating epoch for this cluster + db storage.DB + lockManager lockctx.Manager + mainHeaders storage.Headers + metrics module.CollectionMetrics + clusterHeaders storage.Headers + protoState protocol.State + clusterState clusterstate.State + payloads storage.ClusterPayloads + transactions mempool.Transactions + tracer module.Tracer + config Config + bySealingRateLimiterConfig module.ReadonlySealingLagRateLimiterConfig + log zerolog.Logger + clusterEpoch uint64 // the operating epoch for this cluster // cache of values about the operating epoch which never change - refEpochFirstHeight uint64 // first height of this cluster's operating epoch - epochFinalHeight *uint64 // last height of this cluster's operating epoch (nil if epoch not ended) - epochFinalID *flow.Identifier // ID of last block in this cluster's operating epoch (nil if epoch not ended) + epochFinalHeight *uint64 // last height of this cluster's operating epoch (nil if epoch not ended) + epochFinalID *flow.Identifier // ID of last block in this cluster's operating epoch (nil if epoch not ended) } func NewBuilder( - db *badger.DB, + db storage.DB, tracer module.Tracer, + lockManager lockctx.Manager, + metrics module.CollectionMetrics, protoState protocol.State, clusterState clusterstate.State, mainHeaders storage.Headers, @@ -59,25 +62,24 @@ func NewBuilder( transactions mempool.Transactions, log zerolog.Logger, epochCounter uint64, + bySealingRateLimiterConfig module.ReadonlySealingLagRateLimiterConfig, opts ...Opt, ) (*Builder, error) { b := Builder{ - db: db, - tracer: tracer, - protoState: protoState, - clusterState: clusterState, - mainHeaders: mainHeaders, - clusterHeaders: clusterHeaders, - payloads: payloads, - transactions: transactions, - config: DefaultConfig(), - log: log.With().Str("component", "cluster_builder").Logger(), - clusterEpoch: epochCounter, - } - - err := db.View(operation.RetrieveEpochFirstHeight(epochCounter, &b.refEpochFirstHeight)) - if err != nil { - return nil, fmt.Errorf("could not get epoch first height: %w", err) + db: db, + tracer: tracer, + lockManager: lockManager, + metrics: metrics, + protoState: protoState, + clusterState: clusterState, + mainHeaders: mainHeaders, + clusterHeaders: clusterHeaders, + payloads: payloads, + transactions: transactions, + config: DefaultConfig(), + bySealingRateLimiterConfig: bySealingRateLimiterConfig, + log: log.With().Str("component", "cluster_builder").Logger(), + clusterEpoch: epochCounter, } for _, apply := range opts { @@ -92,9 +94,20 @@ func NewBuilder( return &b, nil } -// BuildOn creates a new block built on the given parent. It produces a payload -// that is valid with respect to the un-finalized chain it extends. -func (b *Builder) BuildOn(parentID flow.Identifier, setter func(*flow.Header) error) (*flow.Header, error) { +// BuildOn generates a new payload that is valid with respect to the parent +// being built upon, with the view being provided by the consensus algorithm. +// The builder stores the block and validates it against the cluster state +// before returning it. The specified parent block must exist in the cluster state. +// +// NOTE: Since the block is stored within Builder, HotStuff MUST propose the +// block once BuildOn successfully returns. +// +// # Errors +// This function does not produce any expected errors. +// However, it will pass through all errors returned by `setter` and `sign`. +// Callers must be aware of possible error returns from the `setter` and `sign` arguments they provide, +// and handle them accordingly when handling errors returned from BuildOn. +func (b *Builder) BuildOn(parentID flow.Identifier, setter func(*flow.HeaderBodyBuilder) error, sign func(*flow.Header) ([]byte, error)) (*flow.ProposalHeader, error) { parentSpan, ctx := b.tracer.StartSpanFromContext(context.Background(), trace.COLBuildOn) defer parentSpan.End() @@ -158,11 +171,18 @@ func (b *Builder) BuildOn(parentID flow.Identifier, setter func(*flow.Header) er return nil, fmt.Errorf("could not populate un-finalized ancestry lookout (parent_id=%x): %w", parentID, err) } + lctx := b.lockManager.NewContext() + defer lctx.Release() + err = lctx.AcquireLock(storage.LockInsertOrFinalizeClusterBlock) + if err != nil { + return nil, err + } + // STEP 1b: create a lookup of all transactions previously included in // the finalized collections. Any transactions already included in finalized // collections can be removed from the mempool. span, _ = b.tracer.StartSpanFromContext(ctx, trace.COLBuildOnFinalizedLookup) - err = b.populateFinalizedAncestryLookup(buildCtx) + err = b.populateFinalizedAncestryLookup(lctx, buildCtx) span.End() if err != nil { return nil, fmt.Errorf("could not populate finalized ancestry lookup: %w", err) @@ -180,26 +200,44 @@ func (b *Builder) BuildOn(parentID flow.Identifier, setter func(*flow.Header) er // STEP 3: we have a set of transactions that are valid to include on this fork. // Now we create the header for the cluster block. span, _ = b.tracer.StartSpanFromContext(ctx, trace.COLBuildOnCreateHeader) - header, err := b.buildHeader(buildCtx, payload, setter) + proposal, err := b.buildHeader(buildCtx, payload, setter, sign) span.End() if err != nil { return nil, fmt.Errorf("could not build header: %w", err) } - proposal := cluster.Block{ - Header: header, - Payload: payload, + block, err := cluster.NewBlock( + cluster.UntrustedBlock{ + HeaderBody: proposal.Header.HeaderBody, + Payload: *payload, + }, + ) + if err != nil { + return nil, fmt.Errorf("could not build cluster block: %w", err) + } + + blockProposal, err := cluster.NewProposal( + cluster.UntrustedProposal{ + Block: *block, + ProposerSigData: proposal.ProposerSigData, + }, + ) + if err != nil { + return nil, fmt.Errorf("could not build cluster proposal: %w", err) } // STEP 4: insert the cluster block to the database. span, _ = b.tracer.StartSpanFromContext(ctx, trace.COLBuildOnDBInsert) - err = operation.RetryOnConflict(b.db.Update, procedure.InsertClusterBlock(&proposal)) + + err = b.db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return operation.InsertClusterBlock(lctx, rw, blockProposal) + }) span.End() if err != nil { return nil, fmt.Errorf("could not insert built block: %w", err) } - return proposal.Header, nil + return proposal, nil } // getBlockBuildContext retrieves the required contextual information from the database @@ -210,13 +248,25 @@ func (b *Builder) getBlockBuildContext(parentID flow.Identifier) (*blockBuildCon ctx.config = b.config ctx.parentID = parentID ctx.lookup = newTransactionLookup() - var err error + ctx.config.MaxCollectionSize, err = GetMaxCollectionSizeForSealingLag( + b.protoState, + b.bySealingRateLimiterConfig.MinSealingLag(), + b.bySealingRateLimiterConfig.MaxSealingLag(), + b.bySealingRateLimiterConfig.HalvingInterval(), + b.bySealingRateLimiterConfig.MinCollectionSize(), + b.config.MaxCollectionSize, + ) + if err != nil { + return nil, fmt.Errorf("could not create by sealing lag rate limiter: %w", err) + } + b.metrics.CollectionMaxSize(ctx.config.MaxCollectionSize) + ctx.parent, err = b.clusterHeaders.ByBlockID(parentID) if err != nil { return nil, fmt.Errorf("could not get parent: %w", err) } - ctx.limiter = newRateLimiter(b.config, ctx.parent.Height+1) + ctx.limiter = newRateLimiter(ctx.config, ctx.parent.Height+1) // retrieve the finalized boundary ON THE CLUSTER CHAIN ctx.clusterChainFinalizedBlock, err = b.clusterState.Final().Head() @@ -240,37 +290,34 @@ func (b *Builder) getBlockBuildContext(parentID flow.Identifier) (*blockBuildCon return ctx, nil } - // otherwise, attempt to read them from storage - err = b.db.View(func(btx *badger.Txn) error { - var refEpochFinalHeight uint64 - var refEpochFinalID flow.Identifier + r := b.db.Reader() - err = operation.RetrieveEpochLastHeight(b.clusterEpoch, &refEpochFinalHeight)(btx) - if err != nil { - if errors.Is(err, storage.ErrNotFound) { - return nil - } - return fmt.Errorf("unexpected failure to retrieve final height of operating epoch: %w", err) - } - err = operation.LookupBlockHeight(refEpochFinalHeight, &refEpochFinalID)(btx) - if err != nil { - // if we are able to retrieve the epoch's final height, the block must be finalized - // therefore failing to look up its height here is an unexpected error - return irrecoverable.NewExceptionf("could not retrieve ID of finalized final block of operating epoch: %w", err) - } + var refEpochFinalHeight uint64 + var refEpochFinalID flow.Identifier - // cache the values - b.epochFinalHeight = &refEpochFinalHeight - b.epochFinalID = &refEpochFinalID - // store the values in the build context - ctx.refEpochFinalID = b.epochFinalID - ctx.refEpochFinalHeight = b.epochFinalHeight + err = operation.RetrieveEpochLastHeight(r, b.clusterEpoch, &refEpochFinalHeight) + if err != nil { + if errors.Is(err, storage.ErrNotFound) { + return ctx, nil + } + return nil, fmt.Errorf("unexpected failure to retrieve final height of operating epoch: %w", err) + } - return nil - }) + // this does not require a lock, as block ID of an height does not change + err = operation.LookupBlockHeight(r, refEpochFinalHeight, &refEpochFinalID) if err != nil { - return nil, fmt.Errorf("could not get block build context: %w", err) + // if we are able to retrieve the epoch's final height, the block must be finalized + // therefore failing to look up its height here is an unexpected error + return nil, irrecoverable.NewExceptionf("could not retrieve ID of finalized final block of operating epoch: %w", err) } + + // cache the values + b.epochFinalHeight = &refEpochFinalHeight + b.epochFinalID = &refEpochFinalID + // store the values in the build context + ctx.refEpochFinalID = b.epochFinalID + ctx.refEpochFinalHeight = b.epochFinalHeight + return ctx, nil } @@ -303,7 +350,7 @@ func (b *Builder) populateUnfinalizedAncestryLookup(ctx *blockBuildContext) erro // The traversal is structured so that we check every collection whose reference // block height translates to a possible constituent transaction which could also // appear in the collection we are building. -func (b *Builder) populateFinalizedAncestryLookup(ctx *blockBuildContext) error { +func (b *Builder) populateFinalizedAncestryLookup(lctx lockctx.Proof, ctx *blockBuildContext) error { minRefHeight := ctx.lowestPossibleReferenceBlockHeight() maxRefHeight := ctx.highestPossibleReferenceBlockHeight() lookup := ctx.lookup @@ -331,7 +378,7 @@ func (b *Builder) populateFinalizedAncestryLookup(ctx *blockBuildContext) error // the finalized cluster blocks which could possibly contain any conflicting transactions var clusterBlockIDs []flow.Identifier start, end := findRefHeightSearchRangeForConflictingClusterBlocks(minRefHeight, maxRefHeight) - err := b.db.View(operation.LookupClusterBlocksByReferenceHeightRange(start, end, &clusterBlockIDs)) + err := operation.LookupClusterBlocksByReferenceHeightRange(lctx, b.db.Reader(), start, end, &clusterBlockIDs) if err != nil { return fmt.Errorf("could not lookup finalized cluster blocks by reference height range [%d,%d]: %w", start, end, err) } @@ -360,6 +407,7 @@ func (b *Builder) populateFinalizedAncestryLookup(ctx *blockBuildContext) error func (b *Builder) buildPayload(buildCtx *blockBuildContext) (*cluster.Payload, error) { lookup := buildCtx.lookup limiter := buildCtx.limiter + config := buildCtx.config maxRefHeight := buildCtx.highestPossibleReferenceBlockHeight() // keep track of the actual smallest reference height of all included transactions minRefHeight := maxRefHeight @@ -368,36 +416,50 @@ func (b *Builder) buildPayload(buildCtx *blockBuildContext) (*cluster.Payload, e var transactions []*flow.TransactionBody var totalByteSize uint64 var totalGas uint64 - for _, tx := range b.transactions.All() { + + // ATTENTION: this is a temporary measure to ensure that we give some prioritization to the service account + // transactions. This is experimental approach to increase likelihood of service account transactions being included in the collection. + var priorityTransactions []*flow.TransactionBody + for payer := range config.PriorityPayers { + priorityTransactions = append(priorityTransactions, b.transactions.ByPayer(payer)...) + } + // txDedup is a map to deduplicate transactions by their ID, since we are merging service account transactions + // and all transactions from the mempool, we need to ensure that we don't include the same transaction twice. + txDedup := make(map[flow.Identifier]struct{}, config.MaxCollectionSize) + for _, tx := range append(priorityTransactions, b.transactions.Values()...) { // if we have reached maximum number of transactions, stop - if uint(len(transactions)) >= b.config.MaxCollectionSize { + if uint(len(transactions)) >= config.MaxCollectionSize { break } + txID := tx.ID() + if _, exists := txDedup[txID]; exists { + continue + } txByteSize := uint64(tx.ByteSize()) // ignore transactions with tx byte size bigger that the max amount per collection // this case shouldn't happen ever since we keep a limit on tx byte size but in case // we keep this condition - if txByteSize > b.config.MaxCollectionByteSize { + if txByteSize > config.MaxCollectionByteSize { continue } // because the max byte size per tx is way smaller than the max collection byte size, we can stop here and not continue. // to make it more effective in the future we can continue adding smaller ones - if totalByteSize+txByteSize > b.config.MaxCollectionByteSize { + if totalByteSize+txByteSize > config.MaxCollectionByteSize { break } // ignore transactions with max gas bigger that the max total gas per collection // this case shouldn't happen ever but in case we keep this condition - if tx.GasLimit > b.config.MaxCollectionTotalGas { + if tx.GasLimit > config.MaxCollectionTotalGas { continue } // cause the max gas limit per tx is way smaller than the total max gas per collection, we can stop here and not continue. // to make it more effective in the future we can continue adding smaller ones - if totalGas+tx.GasLimit > b.config.MaxCollectionTotalGas { + if totalGas+tx.GasLimit > config.MaxCollectionTotalGas { break } @@ -415,7 +477,6 @@ func (b *Builder) buildPayload(buildCtx *blockBuildContext) (*cluster.Payload, e continue } - txID := tx.ID() // make sure the reference block is finalized and not orphaned blockIDFinalizedAtRefHeight, err := b.mainHeaders.BlockIDByHeight(refHeader.Height) if err != nil { @@ -448,18 +509,18 @@ func (b *Builder) buildPayload(buildCtx *blockBuildContext) (*cluster.Payload, e // enforce rate limiting rules if limiter.shouldRateLimit(tx) { - if b.config.DryRunRateLimit { + if config.DryRunRateLimit { // log that this transaction would have been rate-limited, but we will still include it in the collection b.log.Info(). Hex("tx_id", logging.ID(txID)). Str("payer_addr", tx.Payer.String()). - Float64("rate_limit", b.config.MaxPayerTransactionRate). + Float64("rate_limit", config.MaxPayerTransactionRate). Msg("dry-run: observed transaction that would have been rate limited") } else { b.log.Debug(). Hex("tx_id", logging.ID(txID)). Str("payer_addr", tx.Payer.String()). - Float64("rate_limit", b.config.MaxPayerTransactionRate). + Float64("rate_limit", config.MaxPayerTransactionRate). Msg("transaction is rate-limited") continue } @@ -475,37 +536,70 @@ func (b *Builder) buildPayload(buildCtx *blockBuildContext) (*cluster.Payload, e limiter.transactionIncluded(tx) transactions = append(transactions, tx) + txDedup[txID] = struct{}{} totalByteSize += txByteSize totalGas += tx.GasLimit } // build the payload from the transactions - payload := cluster.PayloadFromTransactions(minRefID, transactions...) - return &payload, nil + collection, err := flow.NewCollection(flow.UntrustedCollection{Transactions: transactions}) + if err != nil { + return nil, fmt.Errorf("could not build the collection from the transactions: %w", err) + } + + payload, err := cluster.NewPayload( + cluster.UntrustedPayload{ + ReferenceBlockID: minRefID, + Collection: *collection, + }, + ) + if err != nil { + return nil, fmt.Errorf("could not build a payload: %w", err) + } + return payload, nil } // buildHeader constructs the header for the cluster block being built. // It invokes the HotStuff setter to set fields related to HotStuff (QC, etc.). // No errors are expected during normal operation. -func (b *Builder) buildHeader(ctx *blockBuildContext, payload *cluster.Payload, setter func(header *flow.Header) error) (*flow.Header, error) { - - header := &flow.Header{ - ChainID: ctx.parent.ChainID, - ParentID: ctx.parentID, - Height: ctx.parent.Height + 1, - PayloadHash: payload.Hash(), - Timestamp: time.Now().UTC(), - - // NOTE: we rely on the HotStuff-provided setter to set the other - // fields, which are related to signatures and HotStuff internals - } +func (b *Builder) buildHeader( + ctx *blockBuildContext, + payload *cluster.Payload, + setter func(header *flow.HeaderBodyBuilder) error, + sign func(header *flow.Header) ([]byte, error), +) (*flow.ProposalHeader, error) { + // NOTE: we rely on the HotStuff-provided setter to set the other + // fields, which are related to signatures and HotStuff internals + headerBodyBuilder := flow.NewHeaderBodyBuilder(). + WithChainID(ctx.parent.ChainID). + WithParentID(ctx.parentID). + WithHeight(ctx.parent.Height + 1). + WithTimestamp(uint64(time.Now().UnixMilli())) // set fields specific to the consensus algorithm - err := setter(header) + err := setter(headerBodyBuilder) if err != nil { return nil, fmt.Errorf("could not set fields to header: %w", err) } - return header, nil + headerBody, err := headerBodyBuilder.Build() + if err != nil { + return nil, irrecoverable.NewExceptionf("unexpected error when building header body: %w", err) + } + header, err := flow.NewHeader(flow.UntrustedHeader{ + HeaderBody: *headerBody, + PayloadHash: payload.Hash(), + }) + if err != nil { + return nil, fmt.Errorf("could not build header: %w", err) + } + sig, err := sign(header) + if err != nil { + return nil, fmt.Errorf("could not sign proposal: %w", err) + } + return &flow.ProposalHeader{ + Header: header, + ProposerSigData: sig, + }, nil } // findRefHeightSearchRangeForConflictingClusterBlocks computes the range of reference diff --git a/module/builder/collection/builder_test.go b/module/builder/collection/builder_test.go index 699046a5bb1..a63e3583e81 100644 --- a/module/builder/collection/builder_test.go +++ b/module/builder/collection/builder_test.go @@ -2,17 +2,19 @@ package collection_test import ( "context" + "errors" + "fmt" "math/rand" "os" "testing" - "time" - "github.com/dgraph-io/badger/v2" + "github.com/jordanschalm/lockctx" "github.com/rs/zerolog" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" + hotstuffmodel "github.com/onflow/flow-go/consensus/hotstuff/model" model "github.com/onflow/flow-go/model/cluster" "github.com/onflow/flow-go/model/flow" builder "github.com/onflow/flow-go/module/builder/collection" @@ -20,27 +22,41 @@ import ( "github.com/onflow/flow-go/module/mempool/herocache" "github.com/onflow/flow-go/module/metrics" "github.com/onflow/flow-go/module/trace" + "github.com/onflow/flow-go/module/updatable_configs" "github.com/onflow/flow-go/state/cluster" clusterkv "github.com/onflow/flow-go/state/cluster/badger" "github.com/onflow/flow-go/state/protocol" pbadger "github.com/onflow/flow-go/state/protocol/badger" "github.com/onflow/flow-go/state/protocol/events" "github.com/onflow/flow-go/state/protocol/inmem" + "github.com/onflow/flow-go/state/protocol/protocol_state/kvstore" "github.com/onflow/flow-go/state/protocol/util" "github.com/onflow/flow-go/storage" - bstorage "github.com/onflow/flow-go/storage/badger" - "github.com/onflow/flow-go/storage/badger/operation" - "github.com/onflow/flow-go/storage/badger/procedure" - sutil "github.com/onflow/flow-go/storage/util" + "github.com/onflow/flow-go/storage/operation" + "github.com/onflow/flow-go/storage/operation/pebbleimpl" + "github.com/onflow/flow-go/storage/store" "github.com/onflow/flow-go/utils/unittest" ) -var noopSetter = func(*flow.Header) error { return nil } +var signer = func(*flow.Header) ([]byte, error) { return unittest.SignatureFixture(), nil } +var setter = func(h *flow.HeaderBodyBuilder) error { + h.WithHeight(42). + WithChainID(flow.Emulator). + WithParentID(unittest.IdentifierFixture()). + WithView(1337). + WithParentView(1336). + WithParentVoterIndices(unittest.SignerIndicesFixture(4)). + WithParentVoterSigData(unittest.QCSigDataFixture()). + WithProposerID(unittest.IdentifierFixture()) + + return nil +} type BuilderSuite struct { suite.Suite - db *badger.DB - dbdir string + db storage.DB + dbdir string + lockManager lockctx.Manager genesis *model.Block chainID flow.ChainID @@ -61,58 +77,77 @@ type BuilderSuite struct { // runs before each test runs func (suite *BuilderSuite) SetupTest() { + fmt.Println("SetupTest>>>>") + suite.lockManager = storage.NewTestingLockManager() var err error - // seed the RNG - rand.Seed(time.Now().UnixNano()) - - suite.genesis = model.Genesis() - suite.chainID = suite.genesis.Header.ChainID + suite.genesis, err = unittest.ClusterBlock.Genesis() + require.NoError(suite.T(), err) + suite.chainID = suite.genesis.ChainID suite.pool = herocache.NewTransactions(1000, unittest.Logger(), metrics.NewNoopCollector()) suite.dbdir = unittest.TempDir(suite.T()) - suite.db = unittest.BadgerDB(suite.T(), suite.dbdir) + pdb := unittest.PebbleDB(suite.T(), suite.dbdir) + suite.db = pebbleimpl.ToDB(pdb) metrics := metrics.NewNoopCollector() tracer := trace.NewNoopTracer() log := zerolog.Nop() - all := sutil.StorageLayer(suite.T(), suite.db) + + all := store.InitAll(metrics, suite.db) consumer := events.NewNoop() suite.headers = all.Headers suite.blocks = all.Blocks - suite.payloads = bstorage.NewClusterPayloads(metrics, suite.db) + suite.payloads = store.NewClusterPayloads(metrics, suite.db) // just bootstrap with a genesis block, we'll use this as reference root, result, seal := unittest.BootstrapFixture(unittest.IdentityListFixture(5, unittest.WithAllRoles())) // ensure we don't enter a new epoch for tests that build many blocks - result.ServiceEvents[0].Event.(*flow.EpochSetup).FinalView = root.Header.View + 100000 + result.ServiceEvents[0].Event.(*flow.EpochSetup).FinalView = root.View + 100000 seal.ResultID = result.ID() - rootSnapshot, err := inmem.SnapshotFromBootstrapState(root, result, seal, unittest.QuorumCertificateFixture(unittest.QCWithRootBlockID(root.ID()))) + safetyParams, err := protocol.DefaultEpochSafetyParams(root.ChainID) + require.NoError(suite.T(), err) + minEpochStateEntry, err := inmem.EpochProtocolStateFromServiceEvents( + result.ServiceEvents[0].Event.(*flow.EpochSetup), + result.ServiceEvents[1].Event.(*flow.EpochCommit), + ) require.NoError(suite.T(), err) - suite.epochCounter = rootSnapshot.Encodable().Epochs.Current.Counter + rootProtocolState, err := kvstore.NewDefaultKVStore( + safetyParams.FinalizationSafetyThreshold, + safetyParams.EpochExtensionViewCount, + minEpochStateEntry.ID(), + ) + require.NoError(suite.T(), err) + root.Payload.ProtocolStateID = rootProtocolState.ID() + rootSnapshot, err := unittest.SnapshotFromBootstrapState(root, result, seal, unittest.QuorumCertificateFixture(unittest.QCWithRootBlockID(root.ID()))) + require.NoError(suite.T(), err) + suite.epochCounter = rootSnapshot.Encodable().SealingSegment.LatestProtocolStateEntry().EpochEntry.EpochCounter() + require.NoError(suite.T(), err) clusterQC := unittest.QuorumCertificateFixture(unittest.QCWithRootBlockID(suite.genesis.ID())) clusterStateRoot, err := clusterkv.NewStateRoot(suite.genesis, clusterQC, suite.epochCounter) suite.Require().NoError(err) - clusterState, err := clusterkv.Bootstrap(suite.db, clusterStateRoot) + clusterState, err := clusterkv.Bootstrap(suite.db, suite.lockManager, clusterStateRoot) suite.Require().NoError(err) - suite.state, err = clusterkv.NewMutableState(clusterState, tracer, suite.headers, suite.payloads) + suite.state, err = clusterkv.NewMutableState(clusterState, suite.lockManager, tracer, suite.headers, suite.payloads) suite.Require().NoError(err) state, err := pbadger.Bootstrap( metrics, suite.db, + suite.lockManager, all.Headers, all.Seals, all.Results, all.Blocks, all.QuorumCertificates, - all.Setups, + all.EpochSetups, all.EpochCommits, - all.Statuses, + all.EpochProtocolStateEntries, + all.ProtocolKVStore, all.VersionBeacons, rootSnapshot, ) @@ -136,11 +171,25 @@ func (suite *BuilderSuite) SetupTest() { tx.ProposalKey.SequenceNumber = uint64(i) tx.GasLimit = uint64(9999) }) - added := suite.pool.Add(&transaction) + added := suite.pool.Add(transaction.ID(), &transaction) suite.Assert().True(added) } - suite.builder, _ = builder.NewBuilder(suite.db, tracer, suite.protoState, suite.state, suite.headers, suite.headers, suite.payloads, suite.pool, unittest.Logger(), suite.epochCounter) + suite.builder, _ = builder.NewBuilder( + suite.db, + tracer, + suite.lockManager, + metrics, + suite.protoState, + suite.state, + suite.headers, + suite.headers, + suite.payloads, + suite.pool, + unittest.Logger(), + suite.epochCounter, + updatable_configs.DefaultBySealingLagRateLimiterConfigs(), + ) } // runs after each test finishes @@ -151,26 +200,31 @@ func (suite *BuilderSuite) TearDownTest() { suite.Assert().NoError(err) } -func (suite *BuilderSuite) InsertBlock(block model.Block) { - err := suite.db.Update(procedure.InsertClusterBlock(&block)) - suite.Assert().NoError(err) +func (suite *BuilderSuite) InsertBlock(block *model.Block) { + err := unittest.WithLock(suite.T(), suite.lockManager, storage.LockInsertOrFinalizeClusterBlock, func(lctx lockctx.Context) error { + return suite.db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return operation.InsertClusterBlock(lctx, rw, unittest.ClusterProposalFromBlock(block)) + }) + }) + suite.Require().NoError(err) } func (suite *BuilderSuite) FinalizeBlock(block model.Block) { - err := suite.db.Update(func(tx *badger.Txn) error { - var refBlock flow.Header - err := operation.RetrieveHeader(block.Payload.ReferenceBlockID, &refBlock)(tx) - if err != nil { - return err - } - err = procedure.FinalizeClusterBlock(block.ID())(tx) - if err != nil { - return err - } - err = operation.IndexClusterBlockByReferenceHeight(refBlock.Height, block.ID())(tx) - return err + err := unittest.WithLock(suite.T(), suite.lockManager, storage.LockInsertOrFinalizeClusterBlock, func(lctx lockctx.Context) error { + return suite.db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + var refBlock flow.Header + err := operation.RetrieveHeader(rw.GlobalReader(), block.Payload.ReferenceBlockID, &refBlock) + if err != nil { + return err + } + err = operation.FinalizeClusterBlock(lctx, rw, block.ID()) + if err != nil { + return err + } + return operation.IndexClusterBlockByReferenceHeight(lctx, rw.Writer(), refBlock.Height, block.ID()) + }) }) - suite.Assert().NoError(err) + suite.Require().NoError(err) } // Payload returns a payload containing the given transactions, with a valid @@ -178,29 +232,33 @@ func (suite *BuilderSuite) FinalizeBlock(block model.Block) { func (suite *BuilderSuite) Payload(transactions ...*flow.TransactionBody) model.Payload { final, err := suite.protoState.Final().Head() suite.Require().NoError(err) - return model.PayloadFromTransactions(final.ID(), transactions...) + + payload, err := model.NewPayload( + model.UntrustedPayload{ + ReferenceBlockID: final.ID(), + Collection: flow.Collection{Transactions: transactions}, + }, + ) + suite.Require().NoError(err) + + return *payload } // ProtoStateRoot returns the root block of the protocol state. func (suite *BuilderSuite) ProtoStateRoot() *flow.Header { - root, err := suite.protoState.Params().Root() - suite.Require().NoError(err) - return root + return suite.protoState.Params().FinalizedRoot() } // ClearPool removes all items from the pool func (suite *BuilderSuite) ClearPool() { - // TODO use Clear() - for _, tx := range suite.pool.All() { - suite.pool.Remove(tx.ID()) - } + suite.pool.Clear() } // FillPool adds n transactions to the pool, using the given generator function. func (suite *BuilderSuite) FillPool(n int, create func() *flow.TransactionBody) { for i := 0; i < n; i++ { tx := create() - suite.pool.Add(tx) + suite.pool.Add(tx.ID(), tx) } } @@ -212,27 +270,22 @@ func (suite *BuilderSuite) TestBuildOn_NonExistentParent() { // use a non-existent parent ID parentID := unittest.IdentifierFixture() - _, err := suite.builder.BuildOn(parentID, noopSetter) + _, err := suite.builder.BuildOn(parentID, setter, signer) suite.Assert().Error(err) } func (suite *BuilderSuite) TestBuildOn_Success() { - var expectedHeight uint64 = 42 - setter := func(h *flow.Header) error { - h.Height = expectedHeight - return nil - } - header, err := suite.builder.BuildOn(suite.genesis.ID(), setter) + proposal, err := suite.builder.BuildOn(suite.genesis.ID(), setter, signer) suite.Require().NoError(err) // setter should have been run - suite.Assert().Equal(expectedHeight, header.Height) + suite.Assert().Equal(expectedHeight, proposal.Header.Height) // should be able to retrieve built block from storage var built model.Block - err = suite.db.View(procedure.RetrieveClusterBlock(header.ID(), &built)) + err = operation.RetrieveClusterBlock(suite.db.Reader(), proposal.Header.ID(), &built) suite.Assert().NoError(err) builtCollection := built.Payload.Collection @@ -243,28 +296,59 @@ func (suite *BuilderSuite) TestBuildOn_Success() { suite.Assert().Equal(mainGenesis.ID(), built.Payload.ReferenceBlockID) // payload should include only items from mempool - mempoolTransactions := suite.pool.All() + mempoolTransactions := suite.pool.Values() suite.Assert().Len(builtCollection.Transactions, 3) suite.Assert().True(collectionContains(builtCollection, flow.GetIDs(mempoolTransactions)...)) } +// TestBuildOn_SetterErrorPassthrough validates that errors from the setter function are passed through to the caller. +func (suite *BuilderSuite) TestBuildOn_SetterErrorPassthrough() { + sentinel := errors.New("sentinel") + setter := func(h *flow.HeaderBodyBuilder) error { + return sentinel + } + _, err := suite.builder.BuildOn(suite.genesis.ID(), setter, signer) + suite.Assert().ErrorIs(err, sentinel) +} + +// TestBuildOn_SignerErrorPassthrough validates that errors from the sign function are passed through to the caller. +func (suite *BuilderSuite) TestBuildOn_SignerErrorPassthrough() { + suite.T().Run("unexpected Exception", func(t *testing.T) { + exception := errors.New("exception") + sign := func(h *flow.Header) ([]byte, error) { + return nil, exception + } + _, err := suite.builder.BuildOn(suite.genesis.ID(), setter, sign) + suite.Assert().ErrorIs(err, exception) + }) + suite.T().Run("NoVoteError", func(t *testing.T) { + // the EventHandler relies on this sentinel in particular to be passed through + sentinel := hotstuffmodel.NewNoVoteErrorf("not voting") + sign := func(h *flow.Header) ([]byte, error) { + return nil, sentinel + } + _, err := suite.builder.BuildOn(suite.genesis.ID(), setter, sign) + suite.Assert().ErrorIs(err, sentinel) + }) +} + // when there are transactions with an unknown reference block in the pool, we should not include them in collections func (suite *BuilderSuite) TestBuildOn_WithUnknownReferenceBlock() { // before modifying the mempool, note the valid transactions already in the pool - validMempoolTransactions := suite.pool.All() + validMempoolTransactions := suite.pool.Values() // add a transaction unknown reference block to the pool unknownReferenceTx := unittest.TransactionBodyFixture() unknownReferenceTx.ReferenceBlockID = unittest.IdentifierFixture() - suite.pool.Add(&unknownReferenceTx) + suite.pool.Add(unknownReferenceTx.ID(), &unknownReferenceTx) - header, err := suite.builder.BuildOn(suite.genesis.ID(), noopSetter) + header, err := suite.builder.BuildOn(suite.genesis.ID(), setter, signer) suite.Require().NoError(err) // should be able to retrieve built block from storage var built model.Block - err = suite.db.View(procedure.RetrieveClusterBlock(header.ID(), &built)) + err = operation.RetrieveClusterBlock(suite.db.Reader(), header.Header.ID(), &built) suite.Assert().NoError(err) builtCollection := built.Payload.Collection @@ -279,28 +363,33 @@ func (suite *BuilderSuite) TestBuildOn_WithUnknownReferenceBlock() { func (suite *BuilderSuite) TestBuildOn_WithUnfinalizedReferenceBlock() { // before modifying the mempool, note the valid transactions already in the pool - validMempoolTransactions := suite.pool.All() + validMempoolTransactions := suite.pool.Values() // add an unfinalized block to the protocol state genesis, err := suite.protoState.Final().Head() suite.Require().NoError(err) - unfinalizedReferenceBlock := unittest.BlockWithParentFixture(genesis) - unfinalizedReferenceBlock.SetPayload(flow.EmptyPayload()) - err = suite.protoState.ExtendCertified(context.Background(), unfinalizedReferenceBlock, - unittest.CertifyBlock(unfinalizedReferenceBlock.Header)) + protocolState, err := suite.protoState.Final().ProtocolState() + suite.Require().NoError(err) + protocolStateID := protocolState.ID() + + unfinalizedReferenceBlock := unittest.BlockWithParentAndPayload( + genesis, + unittest.PayloadFixture(unittest.WithProtocolStateID(protocolStateID)), + ) + err = suite.protoState.ExtendCertified(context.Background(), unittest.NewCertifiedBlock(unfinalizedReferenceBlock)) suite.Require().NoError(err) // add a transaction with unfinalized reference block to the pool unfinalizedReferenceTx := unittest.TransactionBodyFixture() unfinalizedReferenceTx.ReferenceBlockID = unfinalizedReferenceBlock.ID() - suite.pool.Add(&unfinalizedReferenceTx) + suite.pool.Add(unfinalizedReferenceTx.ID(), &unfinalizedReferenceTx) - header, err := suite.builder.BuildOn(suite.genesis.ID(), noopSetter) + header, err := suite.builder.BuildOn(suite.genesis.ID(), setter, signer) suite.Require().NoError(err) // should be able to retrieve built block from storage var built model.Block - err = suite.db.View(procedure.RetrieveClusterBlock(header.ID(), &built)) + err = operation.RetrieveClusterBlock(suite.db.Reader(), header.Header.ID(), &built) suite.Assert().NoError(err) builtCollection := built.Payload.Collection @@ -315,20 +404,28 @@ func (suite *BuilderSuite) TestBuildOn_WithUnfinalizedReferenceBlock() { func (suite *BuilderSuite) TestBuildOn_WithOrphanedReferenceBlock() { // before modifying the mempool, note the valid transactions already in the pool - validMempoolTransactions := suite.pool.All() + validMempoolTransactions := suite.pool.Values() // add an orphaned block to the protocol state genesis, err := suite.protoState.Final().Head() suite.Require().NoError(err) + protocolState, err := suite.protoState.Final().ProtocolState() + suite.Require().NoError(err) + protocolStateID := protocolState.ID() + // create a block extending genesis which will be orphaned - orphan := unittest.BlockWithParentFixture(genesis) - orphan.SetPayload(flow.EmptyPayload()) - err = suite.protoState.ExtendCertified(context.Background(), orphan, unittest.CertifyBlock(orphan.Header)) + orphan := unittest.BlockWithParentAndPayload( + genesis, + unittest.PayloadFixture(unittest.WithProtocolStateID(protocolStateID)), + ) + err = suite.protoState.ExtendCertified(context.Background(), unittest.NewCertifiedBlock(orphan)) suite.Require().NoError(err) // create and finalize a block on top of genesis, orphaning `orphan` - block1 := unittest.BlockWithParentFixture(genesis) - block1.SetPayload(flow.EmptyPayload()) - err = suite.protoState.ExtendCertified(context.Background(), block1, unittest.CertifyBlock(block1.Header)) + block1 := unittest.BlockWithParentAndPayload( + genesis, + unittest.PayloadFixture(unittest.WithProtocolStateID(protocolStateID)), + ) + err = suite.protoState.ExtendCertified(context.Background(), unittest.NewCertifiedBlock(block1)) suite.Require().NoError(err) err = suite.protoState.Finalize(context.Background(), block1.ID()) suite.Require().NoError(err) @@ -336,14 +433,14 @@ func (suite *BuilderSuite) TestBuildOn_WithOrphanedReferenceBlock() { // add a transaction with orphaned reference block to the pool orphanedReferenceTx := unittest.TransactionBodyFixture() orphanedReferenceTx.ReferenceBlockID = orphan.ID() - suite.pool.Add(&orphanedReferenceTx) + suite.pool.Add(orphanedReferenceTx.ID(), &orphanedReferenceTx) - header, err := suite.builder.BuildOn(suite.genesis.ID(), noopSetter) + header, err := suite.builder.BuildOn(suite.genesis.ID(), setter, signer) suite.Require().NoError(err) // should be able to retrieve built block from storage var built model.Block - err = suite.db.View(procedure.RetrieveClusterBlock(header.ID(), &built)) + err = operation.RetrieveClusterBlock(suite.db.Reader(), header.Header.ID(), &built) suite.Assert().NoError(err) builtCollection := built.Payload.Collection @@ -359,34 +456,34 @@ func (suite *BuilderSuite) TestBuildOn_WithOrphanedReferenceBlock() { func (suite *BuilderSuite) TestBuildOn_WithForks() { t := suite.T() - mempoolTransactions := suite.pool.All() + mempoolTransactions := suite.pool.Values() tx1 := mempoolTransactions[0] // in fork 1 tx2 := mempoolTransactions[1] // in fork 2 tx3 := mempoolTransactions[2] // in no block // build first fork on top of genesis - payload1 := suite.Payload(tx1) - block1 := unittest.ClusterBlockWithParent(suite.genesis) - block1.SetPayload(payload1) - + block1 := unittest.ClusterBlockFixture( + unittest.ClusterBlock.WithParent(suite.genesis), + unittest.ClusterBlock.WithPayload(suite.Payload(tx1)), + ) // insert block on fork 1 suite.InsertBlock(block1) // build second fork on top of genesis - payload2 := suite.Payload(tx2) - block2 := unittest.ClusterBlockWithParent(suite.genesis) - block2.SetPayload(payload2) - + block2 := unittest.ClusterBlockFixture( + unittest.ClusterBlock.WithParent(suite.genesis), + unittest.ClusterBlock.WithPayload(suite.Payload(tx2)), + ) // insert block on fork 2 suite.InsertBlock(block2) // build on top of fork 1 - header, err := suite.builder.BuildOn(block1.ID(), noopSetter) + header, err := suite.builder.BuildOn(block1.ID(), setter, signer) require.NoError(t, err) // should be able to retrieve built block from storage var built model.Block - err = suite.db.View(procedure.RetrieveClusterBlock(header.ID(), &built)) + err = operation.RetrieveClusterBlock(suite.db.Reader(), header.Header.ID(), &built) assert.NoError(t, err) builtCollection := built.Payload.Collection @@ -399,7 +496,7 @@ func (suite *BuilderSuite) TestBuildOn_WithForks() { func (suite *BuilderSuite) TestBuildOn_ConflictingFinalizedBlock() { t := suite.T() - mempoolTransactions := suite.pool.All() + mempoolTransactions := suite.pool.Values() tx1 := mempoolTransactions[0] // in a finalized block tx2 := mempoolTransactions[1] // in an un-finalized block tx3 := mempoolTransactions[2] // in no blocks @@ -408,28 +505,32 @@ func (suite *BuilderSuite) TestBuildOn_ConflictingFinalizedBlock() { // build a block containing tx1 on genesis finalizedPayload := suite.Payload(tx1) - finalizedBlock := unittest.ClusterBlockWithParent(suite.genesis) - finalizedBlock.SetPayload(finalizedPayload) + finalizedBlock := unittest.ClusterBlockFixture( + unittest.ClusterBlock.WithParent(suite.genesis), + unittest.ClusterBlock.WithPayload(finalizedPayload), + ) suite.InsertBlock(finalizedBlock) - t.Logf("finalized: height=%d id=%s txs=%s parent_id=%s\t\n", finalizedBlock.Header.Height, finalizedBlock.ID(), finalizedPayload.Collection.Light(), finalizedBlock.Header.ParentID) + t.Logf("finalized: height=%d id=%s txs=%s parent_id=%s\t\n", finalizedBlock.Height, finalizedBlock.ID(), finalizedPayload.Collection.Light(), finalizedBlock.ParentID) // build a block containing tx2 on the first block unFinalizedPayload := suite.Payload(tx2) - unFinalizedBlock := unittest.ClusterBlockWithParent(&finalizedBlock) - unFinalizedBlock.SetPayload(unFinalizedPayload) + unFinalizedBlock := unittest.ClusterBlockFixture( + unittest.ClusterBlock.WithParent(finalizedBlock), + unittest.ClusterBlock.WithPayload(unFinalizedPayload), + ) suite.InsertBlock(unFinalizedBlock) - t.Logf("finalized: height=%d id=%s txs=%s parent_id=%s\t\n", unFinalizedBlock.Header.Height, unFinalizedBlock.ID(), unFinalizedPayload.Collection.Light(), unFinalizedBlock.Header.ParentID) + t.Logf("finalized: height=%d id=%s txs=%s parent_id=%s\t\n", unFinalizedBlock.Height, unFinalizedBlock.ID(), unFinalizedPayload.Collection.Light(), unFinalizedBlock.ParentID) // finalize first block - suite.FinalizeBlock(finalizedBlock) + suite.FinalizeBlock(*finalizedBlock) // build on the un-finalized block - header, err := suite.builder.BuildOn(unFinalizedBlock.ID(), noopSetter) + header, err := suite.builder.BuildOn(unFinalizedBlock.ID(), setter, signer) require.NoError(t, err) // retrieve the built block from storage var built model.Block - err = suite.db.View(procedure.RetrieveClusterBlock(header.ID(), &built)) + err = operation.RetrieveClusterBlock(suite.db.Reader(), header.Header.ID(), &built) assert.NoError(t, err) builtCollection := built.Payload.Collection @@ -447,7 +548,7 @@ func (suite *BuilderSuite) TestBuildOn_ConflictingFinalizedBlock() { func (suite *BuilderSuite) TestBuildOn_ConflictingInvalidatedForks() { t := suite.T() - mempoolTransactions := suite.pool.All() + mempoolTransactions := suite.pool.Values() tx1 := mempoolTransactions[0] // in a finalized block tx2 := mempoolTransactions[1] // in an invalidated block tx3 := mempoolTransactions[2] // in no blocks @@ -455,30 +556,31 @@ func (suite *BuilderSuite) TestBuildOn_ConflictingInvalidatedForks() { t.Logf("tx1: %s\ntx2: %s\ntx3: %s", tx1.ID(), tx2.ID(), tx3.ID()) // build a block containing tx1 on genesis - will be finalized - finalizedPayload := suite.Payload(tx1) - finalizedBlock := unittest.ClusterBlockWithParent(suite.genesis) - finalizedBlock.SetPayload(finalizedPayload) - + finalizedBlock := unittest.ClusterBlockFixture( + unittest.ClusterBlock.WithParent(suite.genesis), + unittest.ClusterBlock.WithPayload(suite.Payload(tx1)), + ) suite.InsertBlock(finalizedBlock) - t.Logf("finalized: id=%s\tparent_id=%s\theight=%d\n", finalizedBlock.ID(), finalizedBlock.Header.ParentID, finalizedBlock.Header.Height) + t.Logf("finalized: id=%s\tparent_id=%s\theight=%d\n", finalizedBlock.ID(), finalizedBlock.ParentID, finalizedBlock.Height) // build a block containing tx2 ALSO on genesis - will be invalidated - invalidatedPayload := suite.Payload(tx2) - invalidatedBlock := unittest.ClusterBlockWithParent(suite.genesis) - invalidatedBlock.SetPayload(invalidatedPayload) + invalidatedBlock := unittest.ClusterBlockFixture( + unittest.ClusterBlock.WithParent(suite.genesis), + unittest.ClusterBlock.WithPayload(suite.Payload(tx2)), + ) suite.InsertBlock(invalidatedBlock) - t.Logf("invalidated: id=%s\tparent_id=%s\theight=%d\n", invalidatedBlock.ID(), invalidatedBlock.Header.ParentID, invalidatedBlock.Header.Height) + t.Logf("invalidated: id=%s\tparent_id=%s\theight=%d\n", invalidatedBlock.ID(), invalidatedBlock.ParentID, invalidatedBlock.Height) // finalize first block - this indirectly invalidates the second block - suite.FinalizeBlock(finalizedBlock) + suite.FinalizeBlock(*finalizedBlock) // build on the finalized block - header, err := suite.builder.BuildOn(finalizedBlock.ID(), noopSetter) + header, err := suite.builder.BuildOn(finalizedBlock.ID(), setter, signer) require.NoError(t, err) // retrieve the built block from storage var built model.Block - err = suite.db.View(procedure.RetrieveClusterBlock(header.ID(), &built)) + err = operation.RetrieveClusterBlock(suite.db.Reader(), header.Header.ID(), &built) assert.NoError(t, err) builtCollection := built.Payload.Collection @@ -493,7 +595,22 @@ func (suite *BuilderSuite) TestBuildOn_LargeHistory() { // use a mempool with 2000 transactions, one per block suite.pool = herocache.NewTransactions(2000, unittest.Logger(), metrics.NewNoopCollector()) - suite.builder, _ = builder.NewBuilder(suite.db, trace.NewNoopTracer(), suite.protoState, suite.state, suite.headers, suite.headers, suite.payloads, suite.pool, unittest.Logger(), suite.epochCounter, builder.WithMaxCollectionSize(10000)) + suite.builder, _ = builder.NewBuilder( + suite.db, + trace.NewNoopTracer(), + suite.lockManager, + metrics.NewNoopCollector(), + suite.protoState, + suite.state, + suite.headers, + suite.headers, + suite.payloads, + suite.pool, + unittest.Logger(), + suite.epochCounter, + updatable_configs.DefaultBySealingLagRateLimiterConfigs(), + builder.WithMaxCollectionSize(10000), + ) // get a valid reference block ID final, err := suite.protoState.Final().Head() @@ -501,7 +618,7 @@ func (suite *BuilderSuite) TestBuildOn_LargeHistory() { refID := final.ID() // keep track of the head of the chain - head := *suite.genesis + head := suite.genesis // keep track of invalidated transaction IDs var invalidatedTxIds []flow.Identifier @@ -515,7 +632,7 @@ func (suite *BuilderSuite) TestBuildOn_LargeHistory() { tx.ReferenceBlockID = refID tx.ProposalKey.SequenceNumber = uint64(i) }) - added := suite.pool.Add(&tx) + added := suite.pool.Add(tx.ID(), &tx) assert.True(t, added) // 1/3 of the time create a conflicting fork that will be invalidated @@ -525,31 +642,32 @@ func (suite *BuilderSuite) TestBuildOn_LargeHistory() { // by default, build on the head - if we are building a // conflicting fork, build on the parent of the head - parent := head + parent := *head if conflicting { - err = suite.db.View(procedure.RetrieveClusterBlock(parent.Header.ParentID, &parent)) + err = operation.RetrieveClusterBlock(suite.db.Reader(), parent.ParentID, &parent) assert.NoError(t, err) // add the transaction to the invalidated list invalidatedTxIds = append(invalidatedTxIds, tx.ID()) } // create a block containing the transaction - block := unittest.ClusterBlockWithParent(&head) - payload := suite.Payload(&tx) - block.SetPayload(payload) + block := unittest.ClusterBlockFixture( + unittest.ClusterBlock.WithParent(head), + unittest.ClusterBlock.WithPayload(suite.Payload(&tx)), + ) suite.InsertBlock(block) // reset the valid head if we aren't building a conflicting fork if !conflicting { head = block - suite.FinalizeBlock(block) + suite.FinalizeBlock(*block) assert.NoError(t, err) } // stop building blocks once we've built a history which exceeds the transaction // expiry length - this tests that deduplication works properly against old blocks // which nevertheless have a potentially conflicting reference block - if head.Header.Height > flow.DefaultTransactionExpiry+100 { + if head.Height > flow.DefaultTransactionExpiry+100 { break } } @@ -557,12 +675,12 @@ func (suite *BuilderSuite) TestBuildOn_LargeHistory() { t.Log("conflicting: ", len(invalidatedTxIds)) // build on the head block - header, err := suite.builder.BuildOn(head.ID(), noopSetter) + header, err := suite.builder.BuildOn(head.ID(), setter, signer) require.NoError(t, err) // retrieve the built block from storage var built model.Block - err = suite.db.View(procedure.RetrieveClusterBlock(header.ID(), &built)) + err = operation.RetrieveClusterBlock(suite.db.Reader(), header.Header.ID(), &built) require.NoError(t, err) builtCollection := built.Payload.Collection @@ -573,15 +691,30 @@ func (suite *BuilderSuite) TestBuildOn_LargeHistory() { func (suite *BuilderSuite) TestBuildOn_MaxCollectionSize() { // set the max collection size to 1 - suite.builder, _ = builder.NewBuilder(suite.db, trace.NewNoopTracer(), suite.protoState, suite.state, suite.headers, suite.headers, suite.payloads, suite.pool, unittest.Logger(), suite.epochCounter, builder.WithMaxCollectionSize(1)) + suite.builder, _ = builder.NewBuilder( + suite.db, + trace.NewNoopTracer(), + suite.lockManager, + metrics.NewNoopCollector(), + suite.protoState, + suite.state, + suite.headers, + suite.headers, + suite.payloads, + suite.pool, + unittest.Logger(), + suite.epochCounter, + updatable_configs.DefaultBySealingLagRateLimiterConfigs(), + builder.WithMaxCollectionSize(1), + ) // build a block - header, err := suite.builder.BuildOn(suite.genesis.ID(), noopSetter) + header, err := suite.builder.BuildOn(suite.genesis.ID(), setter, signer) suite.Require().NoError(err) // retrieve the built block from storage var built model.Block - err = suite.db.View(procedure.RetrieveClusterBlock(header.ID(), &built)) + err = operation.RetrieveClusterBlock(suite.db.Reader(), header.Header.ID(), &built) suite.Require().NoError(err) builtCollection := built.Payload.Collection @@ -591,15 +724,30 @@ func (suite *BuilderSuite) TestBuildOn_MaxCollectionSize() { func (suite *BuilderSuite) TestBuildOn_MaxCollectionByteSize() { // set the max collection byte size to 400 (each tx is about 150 bytes) - suite.builder, _ = builder.NewBuilder(suite.db, trace.NewNoopTracer(), suite.protoState, suite.state, suite.headers, suite.headers, suite.payloads, suite.pool, unittest.Logger(), suite.epochCounter, builder.WithMaxCollectionByteSize(400)) + suite.builder, _ = builder.NewBuilder( + suite.db, + trace.NewNoopTracer(), + suite.lockManager, + metrics.NewNoopCollector(), + suite.protoState, + suite.state, + suite.headers, + suite.headers, + suite.payloads, + suite.pool, + unittest.Logger(), + suite.epochCounter, + updatable_configs.DefaultBySealingLagRateLimiterConfigs(), + builder.WithMaxCollectionByteSize(400), + ) // build a block - header, err := suite.builder.BuildOn(suite.genesis.ID(), noopSetter) + header, err := suite.builder.BuildOn(suite.genesis.ID(), setter, signer) suite.Require().NoError(err) // retrieve the built block from storage var built model.Block - err = suite.db.View(procedure.RetrieveClusterBlock(header.ID(), &built)) + err = operation.RetrieveClusterBlock(suite.db.Reader(), header.Header.ID(), &built) suite.Require().NoError(err) builtCollection := built.Payload.Collection @@ -609,15 +757,30 @@ func (suite *BuilderSuite) TestBuildOn_MaxCollectionByteSize() { func (suite *BuilderSuite) TestBuildOn_MaxCollectionTotalGas() { // set the max gas to 20,000 - suite.builder, _ = builder.NewBuilder(suite.db, trace.NewNoopTracer(), suite.protoState, suite.state, suite.headers, suite.headers, suite.payloads, suite.pool, unittest.Logger(), suite.epochCounter, builder.WithMaxCollectionTotalGas(20000)) + suite.builder, _ = builder.NewBuilder( + suite.db, + trace.NewNoopTracer(), + suite.lockManager, + metrics.NewNoopCollector(), + suite.protoState, + suite.state, + suite.headers, + suite.headers, + suite.payloads, + suite.pool, + unittest.Logger(), + suite.epochCounter, + updatable_configs.DefaultBySealingLagRateLimiterConfigs(), + builder.WithMaxCollectionTotalGas(20000), + ) // build a block - header, err := suite.builder.BuildOn(suite.genesis.ID(), noopSetter) + header, err := suite.builder.BuildOn(suite.genesis.ID(), setter, signer) suite.Require().NoError(err) // retrieve the built block from storage var built model.Block - err = suite.db.View(procedure.RetrieveClusterBlock(header.ID(), &built)) + err = operation.RetrieveClusterBlock(suite.db.Reader(), header.Header.ID(), &built) suite.Require().NoError(err) builtCollection := built.Payload.Collection @@ -630,30 +793,51 @@ func (suite *BuilderSuite) TestBuildOn_ExpiredTransaction() { // create enough main-chain blocks that an expired transaction is possible genesis, err := suite.protoState.Final().Head() suite.Require().NoError(err) + protocolState, err := suite.protoState.Final().ProtocolState() + suite.Require().NoError(err) + protocolStateID := protocolState.ID() head := genesis for i := 0; i < flow.DefaultTransactionExpiry+1; i++ { - block := unittest.BlockWithParentFixture(head) - block.Payload.Guarantees = nil - block.Payload.Seals = nil - block.Header.PayloadHash = block.Payload.Hash() - err = suite.protoState.ExtendCertified(context.Background(), block, unittest.CertifyBlock(block.Header)) + block := unittest.BlockWithParentAndPayload( + head, + unittest.PayloadFixture(unittest.WithProtocolStateID(protocolStateID)), + ) + err = suite.protoState.ExtendCertified(context.Background(), unittest.NewCertifiedBlock(block)) suite.Require().NoError(err) err = suite.protoState.Finalize(context.Background(), block.ID()) suite.Require().NoError(err) - head = block.Header + head = block.ToHeader() } + config := updatable_configs.DefaultBySealingLagRateLimiterConfigs() + require.NoError(suite.T(), config.SetMaxSealingLag(flow.DefaultTransactionExpiry*2)) + require.NoError(suite.T(), config.SetHalvingInterval(flow.DefaultTransactionExpiry*2)) + // reset the pool and builder suite.pool = herocache.NewTransactions(10, unittest.Logger(), metrics.NewNoopCollector()) - suite.builder, _ = builder.NewBuilder(suite.db, trace.NewNoopTracer(), suite.protoState, suite.state, suite.headers, suite.headers, suite.payloads, suite.pool, unittest.Logger(), suite.epochCounter) + suite.builder, _ = builder.NewBuilder( + suite.db, + trace.NewNoopTracer(), + suite.lockManager, + metrics.NewNoopCollector(), + suite.protoState, + suite.state, + suite.headers, + suite.headers, + suite.payloads, + suite.pool, + unittest.Logger(), + suite.epochCounter, + config, + ) // insert a transaction referring genesis (now expired) tx1 := unittest.TransactionBodyFixture(func(tx *flow.TransactionBody) { tx.ReferenceBlockID = genesis.ID() tx.ProposalKey.SequenceNumber = 0 }) - added := suite.pool.Add(&tx1) + added := suite.pool.Add(tx1.ID(), &tx1) suite.Assert().True(added) // insert a transaction referencing the head (valid) @@ -661,19 +845,19 @@ func (suite *BuilderSuite) TestBuildOn_ExpiredTransaction() { tx.ReferenceBlockID = head.ID() tx.ProposalKey.SequenceNumber = 1 }) - added = suite.pool.Add(&tx2) + added = suite.pool.Add(tx2.ID(), &tx2) suite.Assert().True(added) suite.T().Log("tx1: ", tx1.ID()) suite.T().Log("tx2: ", tx2.ID()) // build a block - header, err := suite.builder.BuildOn(suite.genesis.ID(), noopSetter) + header, err := suite.builder.BuildOn(suite.genesis.ID(), setter, signer) suite.Require().NoError(err) // retrieve the built block from storage var built model.Block - err = suite.db.View(procedure.RetrieveClusterBlock(header.ID(), &built)) + err = operation.RetrieveClusterBlock(suite.db.Reader(), header.Header.ID(), &built) suite.Require().NoError(err) builtCollection := built.Payload.Collection @@ -688,13 +872,27 @@ func (suite *BuilderSuite) TestBuildOn_EmptyMempool() { // start with an empty mempool suite.pool = herocache.NewTransactions(1000, unittest.Logger(), metrics.NewNoopCollector()) - suite.builder, _ = builder.NewBuilder(suite.db, trace.NewNoopTracer(), suite.protoState, suite.state, suite.headers, suite.headers, suite.payloads, suite.pool, unittest.Logger(), suite.epochCounter) + suite.builder, _ = builder.NewBuilder( + suite.db, + trace.NewNoopTracer(), + suite.lockManager, + metrics.NewNoopCollector(), + suite.protoState, + suite.state, + suite.headers, + suite.headers, + suite.payloads, + suite.pool, + unittest.Logger(), + suite.epochCounter, + updatable_configs.DefaultBySealingLagRateLimiterConfigs(), + ) - header, err := suite.builder.BuildOn(suite.genesis.ID(), noopSetter) + header, err := suite.builder.BuildOn(suite.genesis.ID(), setter, signer) suite.Require().NoError(err) var built model.Block - err = suite.db.View(procedure.RetrieveClusterBlock(header.ID(), &built)) + err = operation.RetrieveClusterBlock(suite.db.Reader(), header.Header.ID(), &built) suite.Require().NoError(err) // should reference a valid reference block @@ -715,7 +913,20 @@ func (suite *BuilderSuite) TestBuildOn_NoRateLimiting() { suite.ClearPool() // create builder with no rate limit and max 10 tx/collection - suite.builder, _ = builder.NewBuilder(suite.db, trace.NewNoopTracer(), suite.protoState, suite.state, suite.headers, suite.headers, suite.payloads, suite.pool, unittest.Logger(), suite.epochCounter, + suite.builder, _ = builder.NewBuilder( + suite.db, + trace.NewNoopTracer(), + suite.lockManager, + metrics.NewNoopCollector(), + suite.protoState, + suite.state, + suite.headers, + suite.headers, + suite.payloads, + suite.pool, + unittest.Logger(), + suite.epochCounter, + updatable_configs.DefaultBySealingLagRateLimiterConfigs(), builder.WithMaxCollectionSize(10), builder.WithMaxPayerTransactionRate(0), ) @@ -732,14 +943,27 @@ func (suite *BuilderSuite) TestBuildOn_NoRateLimiting() { // since we have no rate limiting we should fill all collections and in 10 blocks parentID := suite.genesis.ID() + setter := func(h *flow.HeaderBodyBuilder) error { + h.WithHeight(1). + WithChainID(flow.Emulator). + WithParentID(parentID). + WithView(1337). + WithParentView(1336). + WithParentVoterIndices(unittest.SignerIndicesFixture(4)). + WithParentVoterSigData(unittest.QCSigDataFixture()). + WithProposerID(unittest.IdentifierFixture()) + + return nil + } + for i := 0; i < 10; i++ { - header, err := suite.builder.BuildOn(parentID, noopSetter) + header, err := suite.builder.BuildOn(parentID, setter, signer) suite.Require().NoError(err) - parentID = header.ID() + parentID = header.Header.ID() // each collection should be full with 10 transactions var built model.Block - err = suite.db.View(procedure.RetrieveClusterBlock(header.ID(), &built)) + err = operation.RetrieveClusterBlock(suite.db.Reader(), header.Header.ID(), &built) suite.Assert().NoError(err) suite.Assert().Len(built.Payload.Collection.Transactions, 10) } @@ -756,7 +980,20 @@ func (suite *BuilderSuite) TestBuildOn_RateLimitNonPayer() { suite.ClearPool() // create builder with 5 tx/payer and max 10 tx/collection - suite.builder, _ = builder.NewBuilder(suite.db, trace.NewNoopTracer(), suite.protoState, suite.state, suite.headers, suite.headers, suite.payloads, suite.pool, unittest.Logger(), suite.epochCounter, + suite.builder, _ = builder.NewBuilder( + suite.db, + trace.NewNoopTracer(), + suite.lockManager, + metrics.NewNoopCollector(), + suite.protoState, + suite.state, + suite.headers, + suite.headers, + suite.payloads, + suite.pool, + unittest.Logger(), + suite.epochCounter, + updatable_configs.DefaultBySealingLagRateLimiterConfigs(), builder.WithMaxCollectionSize(10), builder.WithMaxPayerTransactionRate(5), ) @@ -770,7 +1007,7 @@ func (suite *BuilderSuite) TestBuildOn_RateLimitNonPayer() { tx.Payer = unittest.RandomAddressFixture() tx.ProposalKey = flow.ProposalKey{ Address: proposer, - KeyIndex: rand.Uint64(), + KeyIndex: rand.Uint32(), SequenceNumber: rand.Uint64(), } return &tx @@ -779,14 +1016,25 @@ func (suite *BuilderSuite) TestBuildOn_RateLimitNonPayer() { // since rate limiting does not apply to non-payer keys, we should fill all collections in 10 blocks parentID := suite.genesis.ID() + setter := func(h *flow.HeaderBodyBuilder) error { + h.WithChainID(flow.Emulator). + WithParentID(parentID). + WithView(1337). + WithParentView(1336). + WithParentVoterIndices(unittest.SignerIndicesFixture(4)). + WithParentVoterSigData(unittest.QCSigDataFixture()). + WithProposerID(unittest.IdentifierFixture()) + + return nil + } for i := 0; i < 10; i++ { - header, err := suite.builder.BuildOn(parentID, noopSetter) + header, err := suite.builder.BuildOn(parentID, setter, signer) suite.Require().NoError(err) - parentID = header.ID() + parentID = header.Header.ID() // each collection should be full with 10 transactions var built model.Block - err = suite.db.View(procedure.RetrieveClusterBlock(header.ID(), &built)) + err = operation.RetrieveClusterBlock(suite.db.Reader(), header.Header.ID(), &built) suite.Assert().NoError(err) suite.Assert().Len(built.Payload.Collection.Transactions, 10) } @@ -800,7 +1048,20 @@ func (suite *BuilderSuite) TestBuildOn_HighRateLimit() { suite.ClearPool() // create builder with 5 tx/payer and max 10 tx/collection - suite.builder, _ = builder.NewBuilder(suite.db, trace.NewNoopTracer(), suite.protoState, suite.state, suite.headers, suite.headers, suite.payloads, suite.pool, unittest.Logger(), suite.epochCounter, + suite.builder, _ = builder.NewBuilder( + suite.db, + trace.NewNoopTracer(), + suite.lockManager, + metrics.NewNoopCollector(), + suite.protoState, + suite.state, + suite.headers, + suite.headers, + suite.payloads, + suite.pool, + unittest.Logger(), + suite.epochCounter, + updatable_configs.DefaultBySealingLagRateLimiterConfigs(), builder.WithMaxCollectionSize(10), builder.WithMaxPayerTransactionRate(5), ) @@ -817,19 +1078,113 @@ func (suite *BuilderSuite) TestBuildOn_HighRateLimit() { // rate-limiting should be applied, resulting in half-full collections (5/10) parentID := suite.genesis.ID() + setter := func(h *flow.HeaderBodyBuilder) error { + h.WithChainID(flow.Emulator). + WithParentID(parentID). + WithView(1337). + WithParentView(1336). + WithParentVoterIndices(unittest.SignerIndicesFixture(4)). + WithParentVoterSigData(unittest.QCSigDataFixture()). + WithProposerID(unittest.IdentifierFixture()) + + return nil + } for i := 0; i < 10; i++ { - header, err := suite.builder.BuildOn(parentID, noopSetter) + header, err := suite.builder.BuildOn(parentID, setter, signer) suite.Require().NoError(err) - parentID = header.ID() + parentID = header.Header.ID() // each collection should be half-full with 5 transactions var built model.Block - err = suite.db.View(procedure.RetrieveClusterBlock(header.ID(), &built)) + err = operation.RetrieveClusterBlock(suite.db.Reader(), header.Header.ID(), &built) suite.Assert().NoError(err) suite.Assert().Len(built.Payload.Collection.Transactions, 5) } } +// TestBuildOn_MaxCollectionSizeRateLimiting tests that when sealing lag is larger than maximum allowed value, +// then the builder will apply rate-limiting to the collection size, resulting in minimal collection size. +func (suite *BuilderSuite) TestBuildOn_MaxCollectionSizeRateLimiting() { + + // start with an empty mempool + suite.ClearPool() + + cfg := updatable_configs.DefaultBySealingLagRateLimiterConfigs() + suite.Require().NoError(cfg.SetMinSealingLag(50)) // set min sealing lag to 50 blocks so we can hit rate limiting + suite.Require().NoError(cfg.SetMaxSealingLag(50)) // set max sealing lag to 50 blocks so we can hit rate limiting + suite.builder, _ = builder.NewBuilder( + suite.db, + trace.NewNoopTracer(), + suite.lockManager, + metrics.NewNoopCollector(), + suite.protoState, + suite.state, + suite.headers, + suite.headers, + suite.payloads, + suite.pool, + unittest.Logger(), + suite.epochCounter, + cfg, + builder.WithMaxCollectionSize(100), + ) + + // fill the pool with 50 transactions from the same payer + payer := unittest.RandomAddressFixture() + create := func() *flow.TransactionBody { + tx := unittest.TransactionBodyFixture() + tx.ReferenceBlockID = suite.ProtoStateRoot().ID() + tx.Payer = payer + return &tx + } + suite.FillPool(50, create) + + // add an unfinalized block to the protocol state + genesis, err := suite.protoState.Final().Head() + suite.Require().NoError(err) + protocolState, err := suite.protoState.Final().ProtocolState() + suite.Require().NoError(err) + protocolStateID := protocolState.ID() + + head := genesis + // build a long chain of blocks that were finalized but not sealed + // this will lead to a big sealing lag. + for i := 0; i < 100; i++ { + block := unittest.BlockWithParentAndPayload(head, unittest.PayloadFixture(unittest.WithProtocolStateID(protocolStateID))) + err = suite.protoState.ExtendCertified(context.Background(), unittest.NewCertifiedBlock(block)) + suite.Require().NoError(err) + err = suite.protoState.Finalize(context.Background(), block.ID()) + suite.Require().NoError(err) + head = block.ToHeader() + } + + rateLimiterCfg := updatable_configs.DefaultBySealingLagRateLimiterConfigs() + + // rate-limiting should be applied, resulting in minimum collection size. + parentID := suite.genesis.ID() + setter := func(h *flow.HeaderBodyBuilder) error { + h.WithChainID(flow.Emulator). + WithParentID(parentID). + WithView(1337). + WithParentView(1336). + WithParentVoterIndices(unittest.SignerIndicesFixture(4)). + WithParentVoterSigData(unittest.QCSigDataFixture()). + WithProposerID(unittest.IdentifierFixture()) + return nil + } + for i := 0; i < 10; i++ { + header, err := suite.builder.BuildOn(parentID, setter, signer) + suite.Require().NoError(err) + parentID = header.Header.ID() + + // each collection should be equal to the minimum collection size + var built model.Block + err = operation.RetrieveClusterBlock(suite.db.Reader(), header.Header.ID(), &built) + suite.Assert().NoError(err) + suite.Assert().Len(built.Payload.Collection.Transactions, int(rateLimiterCfg.MinCollectionSize())) + } +} + // When configured with a rate limit of k<1, we should be able to include 1 // transactions with a given payer every ceil(1/k) collections func (suite *BuilderSuite) TestBuildOn_LowRateLimit() { @@ -838,7 +1193,20 @@ func (suite *BuilderSuite) TestBuildOn_LowRateLimit() { suite.ClearPool() // create builder with .5 tx/payer and max 10 tx/collection - suite.builder, _ = builder.NewBuilder(suite.db, trace.NewNoopTracer(), suite.protoState, suite.state, suite.headers, suite.headers, suite.payloads, suite.pool, unittest.Logger(), suite.epochCounter, + suite.builder, _ = builder.NewBuilder( + suite.db, + trace.NewNoopTracer(), + suite.lockManager, + metrics.NewNoopCollector(), + suite.protoState, + suite.state, + suite.headers, + suite.headers, + suite.payloads, + suite.pool, + unittest.Logger(), + suite.epochCounter, + updatable_configs.DefaultBySealingLagRateLimiterConfigs(), builder.WithMaxCollectionSize(10), builder.WithMaxPayerTransactionRate(.5), ) @@ -856,14 +1224,25 @@ func (suite *BuilderSuite) TestBuildOn_LowRateLimit() { // rate-limiting should be applied, resulting in every ceil(1/k) collections // having one transaction and empty collections otherwise parentID := suite.genesis.ID() + setter := func(h *flow.HeaderBodyBuilder) error { + h.WithChainID(flow.Emulator). + WithParentID(parentID). + WithView(1337). + WithParentView(1336). + WithParentVoterIndices(unittest.SignerIndicesFixture(4)). + WithParentVoterSigData(unittest.QCSigDataFixture()). + WithProposerID(unittest.IdentifierFixture()) + + return nil + } for i := 0; i < 10; i++ { - header, err := suite.builder.BuildOn(parentID, noopSetter) + header, err := suite.builder.BuildOn(parentID, setter, signer) suite.Require().NoError(err) - parentID = header.ID() + parentID = header.Header.ID() // collections should either be empty or have 1 transaction var built model.Block - err = suite.db.View(procedure.RetrieveClusterBlock(header.ID(), &built)) + err = operation.RetrieveClusterBlock(suite.db.Reader(), header.Header.ID(), &built) suite.Assert().NoError(err) if i%2 == 0 { suite.Assert().Len(built.Payload.Collection.Transactions, 1) @@ -880,7 +1259,20 @@ func (suite *BuilderSuite) TestBuildOn_UnlimitedPayer() { // create builder with 5 tx/payer and max 10 tx/collection // configure an unlimited payer payer := unittest.RandomAddressFixture() - suite.builder, _ = builder.NewBuilder(suite.db, trace.NewNoopTracer(), suite.protoState, suite.state, suite.headers, suite.headers, suite.payloads, suite.pool, unittest.Logger(), suite.epochCounter, + suite.builder, _ = builder.NewBuilder( + suite.db, + trace.NewNoopTracer(), + suite.lockManager, + metrics.NewNoopCollector(), + suite.protoState, + suite.state, + suite.headers, + suite.headers, + suite.payloads, + suite.pool, + unittest.Logger(), + suite.epochCounter, + updatable_configs.DefaultBySealingLagRateLimiterConfigs(), builder.WithMaxCollectionSize(10), builder.WithMaxPayerTransactionRate(5), builder.WithUnlimitedPayers(payer), @@ -897,14 +1289,25 @@ func (suite *BuilderSuite) TestBuildOn_UnlimitedPayer() { // rate-limiting should not be applied, since the payer is marked as unlimited parentID := suite.genesis.ID() + setter := func(h *flow.HeaderBodyBuilder) error { + h.WithChainID(flow.Emulator). + WithParentID(parentID). + WithView(1337). + WithParentView(1336). + WithParentVoterIndices(unittest.SignerIndicesFixture(4)). + WithParentVoterSigData(unittest.QCSigDataFixture()). + WithProposerID(unittest.IdentifierFixture()) + + return nil + } for i := 0; i < 10; i++ { - header, err := suite.builder.BuildOn(parentID, noopSetter) + header, err := suite.builder.BuildOn(parentID, setter, signer) suite.Require().NoError(err) - parentID = header.ID() + parentID = header.Header.ID() // each collection should be full with 10 transactions var built model.Block - err = suite.db.View(procedure.RetrieveClusterBlock(header.ID(), &built)) + err = operation.RetrieveClusterBlock(suite.db.Reader(), header.Header.ID(), &built) suite.Assert().NoError(err) suite.Assert().Len(built.Payload.Collection.Transactions, 10) @@ -921,7 +1324,20 @@ func (suite *BuilderSuite) TestBuildOn_RateLimitDryRun() { // create builder with 5 tx/payer and max 10 tx/collection // configure an unlimited payer payer := unittest.RandomAddressFixture() - suite.builder, _ = builder.NewBuilder(suite.db, trace.NewNoopTracer(), suite.protoState, suite.state, suite.headers, suite.headers, suite.payloads, suite.pool, unittest.Logger(), suite.epochCounter, + suite.builder, _ = builder.NewBuilder( + suite.db, + trace.NewNoopTracer(), + suite.lockManager, + metrics.NewNoopCollector(), + suite.protoState, + suite.state, + suite.headers, + suite.headers, + suite.payloads, + suite.pool, + unittest.Logger(), + suite.epochCounter, + updatable_configs.DefaultBySealingLagRateLimiterConfigs(), builder.WithMaxCollectionSize(10), builder.WithMaxPayerTransactionRate(5), builder.WithRateLimitDryRun(true), @@ -938,19 +1354,84 @@ func (suite *BuilderSuite) TestBuildOn_RateLimitDryRun() { // rate-limiting should not be applied, since dry-run setting is enabled parentID := suite.genesis.ID() + setter := func(h *flow.HeaderBodyBuilder) error { + h.WithChainID(flow.Emulator). + WithParentID(parentID). + WithView(1337). + WithParentView(1336). + WithParentVoterIndices(unittest.SignerIndicesFixture(4)). + WithParentVoterSigData(unittest.QCSigDataFixture()). + WithProposerID(unittest.IdentifierFixture()) + + return nil + } for i := 0; i < 10; i++ { - header, err := suite.builder.BuildOn(parentID, noopSetter) + header, err := suite.builder.BuildOn(parentID, setter, signer) suite.Require().NoError(err) - parentID = header.ID() + parentID = header.Header.ID() // each collection should be full with 10 transactions var built model.Block - err = suite.db.View(procedure.RetrieveClusterBlock(header.ID(), &built)) + err = operation.RetrieveClusterBlock(suite.db.Reader(), header.Header.ID(), &built) suite.Assert().NoError(err) suite.Assert().Len(built.Payload.Collection.Transactions, 10) } } +// TestBuildOn_SystemTxAlwaysIncluded tests that transaction made by a priority payer will always be included +// in the collection, even if mempool has more transactions than the maximum collection size. +func (suite *BuilderSuite) TestBuildOn_SystemTxAlwaysIncluded() { + // start with an empty mempool + suite.ClearPool() + + // create builder with 5 tx/payer and max 10 tx/collection + // configure an unlimited payer + serviceAccountAddress := unittest.AddressFixture() // priority address + suite.builder, _ = builder.NewBuilder( + suite.db, + trace.NewNoopTracer(), + suite.lockManager, + metrics.NewNoopCollector(), + suite.protoState, + suite.state, + suite.headers, + suite.headers, + suite.payloads, + suite.pool, + unittest.Logger(), + suite.epochCounter, + updatable_configs.DefaultBySealingLagRateLimiterConfigs(), + builder.WithMaxCollectionSize(2), + builder.WithPriorityPayers(serviceAccountAddress), + ) + + // fill the pool with 100 transactions + suite.FillPool(100, func() *flow.TransactionBody { + tx := unittest.TransactionBodyFixture() + tx.ReferenceBlockID = suite.ProtoStateRoot().ID() + return &tx + }) + suite.FillPool(2, func() *flow.TransactionBody { + tx := unittest.TransactionBodyFixture() + tx.ReferenceBlockID = suite.ProtoStateRoot().ID() + tx.Payer = serviceAccountAddress + return &tx + }) + + // rate-limiting should not be applied, since the payer is marked as unlimited + parentID := suite.genesis.ID() + header, err := suite.builder.BuildOn(parentID, setter, signer) + suite.Require().NoError(err) + + var built model.Block + err = operation.RetrieveClusterBlock(suite.db.Reader(), header.Header.ID(), &built) + suite.Assert().NoError(err) + suite.Assert().Len(built.Payload.Collection.Transactions, 2) + for _, tx := range built.Payload.Collection.Transactions { + suite.Assert().Equal(serviceAccountAddress, tx.Payer) + } +} + // helper to check whether a collection contains each of the given transactions. func collectionContains(collection flow.Collection, txIDs ...flow.Identifier) bool { @@ -988,13 +1469,16 @@ func benchmarkBuildOn(b *testing.B, size int) { { var err error - suite.genesis = model.Genesis() - suite.chainID = suite.genesis.Header.ChainID + suite.genesis, err = unittest.ClusterBlock.Genesis() + require.NoError(suite.T(), err) + suite.chainID = suite.genesis.ChainID + suite.lockManager = storage.NewTestingLockManager() suite.pool = herocache.NewTransactions(1000, unittest.Logger(), metrics.NewNoopCollector()) suite.dbdir = unittest.TempDir(b) - suite.db = unittest.BadgerDB(b, suite.dbdir) + pdb := unittest.PebbleDB(suite.T(), suite.dbdir) + suite.db = pebbleimpl.ToDB(pdb) defer func() { err = suite.db.Close() assert.NoError(b, err) @@ -1004,49 +1488,73 @@ func benchmarkBuildOn(b *testing.B, size int) { metrics := metrics.NewNoopCollector() tracer := trace.NewNoopTracer() - all := sutil.StorageLayer(suite.T(), suite.db) + all := store.InitAll(metrics, suite.db) suite.headers = all.Headers suite.blocks = all.Blocks - suite.payloads = bstorage.NewClusterPayloads(metrics, suite.db) + suite.payloads = store.NewClusterPayloads(metrics, suite.db) qc := unittest.QuorumCertificateFixture(unittest.QCWithRootBlockID(suite.genesis.ID())) stateRoot, err := clusterkv.NewStateRoot(suite.genesis, qc, suite.epochCounter) - state, err := clusterkv.Bootstrap(suite.db, stateRoot) + state, err := clusterkv.Bootstrap(suite.db, suite.lockManager, stateRoot) assert.NoError(b, err) - suite.state, err = clusterkv.NewMutableState(state, tracer, suite.headers, suite.payloads) + suite.state, err = clusterkv.NewMutableState(state, suite.lockManager, tracer, suite.headers, suite.payloads) assert.NoError(b, err) // add some transactions to transaction pool for i := 0; i < 3; i++ { tx := unittest.TransactionBodyFixture() - added := suite.pool.Add(&tx) + added := suite.pool.Add(tx.ID(), &tx) assert.True(b, added) } // create the builder - suite.builder, _ = builder.NewBuilder(suite.db, tracer, suite.protoState, suite.state, suite.headers, suite.headers, suite.payloads, suite.pool, unittest.Logger(), suite.epochCounter) + suite.builder, _ = builder.NewBuilder( + suite.db, + tracer, + suite.lockManager, + metrics, + suite.protoState, + suite.state, + suite.headers, + suite.headers, + suite.payloads, + suite.pool, + unittest.Logger(), + suite.epochCounter, + updatable_configs.DefaultBySealingLagRateLimiterConfigs(), + ) } // create a block history to test performance against final := suite.genesis for i := 0; i < size; i++ { - block := unittest.ClusterBlockWithParent(final) - err := suite.db.Update(procedure.InsertClusterBlock(&block)) + block := unittest.ClusterBlockFixture( + unittest.ClusterBlock.WithParent(final), + ) + err := unittest.WithLock(b, suite.lockManager, storage.LockInsertOrFinalizeClusterBlock, func(lctx lockctx.Context) error { + return suite.db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return operation.InsertClusterBlock(lctx, rw, unittest.ClusterProposalFromBlock(block)) + }) + }) require.NoError(b, err) // finalize the block 80% of the time, resulting in a fork-rate of 20% if rand.Intn(100) < 80 { - err = suite.db.Update(procedure.FinalizeClusterBlock(block.ID())) + err = unittest.WithLock(b, suite.lockManager, storage.LockInsertOrFinalizeClusterBlock, func(lctx lockctx.Context) error { + return suite.db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return operation.FinalizeClusterBlock(lctx, rw, block.ID()) + }) + }) require.NoError(b, err) - final = &block + final = block } } b.StartTimer() for n := 0; n < b.N; n++ { - _, err := suite.builder.BuildOn(final.ID(), noopSetter) + _, err := suite.builder.BuildOn(final.ID(), setter, signer) assert.NoError(b, err) } } diff --git a/module/builder/collection/config.go b/module/builder/collection/config.go index d3bd9281ac6..1c7e9b457da 100644 --- a/module/builder/collection/config.go +++ b/module/builder/collection/config.go @@ -39,6 +39,10 @@ type Config struct { // rate limiting. UnlimitedPayers map[flow.Address]struct{} + // PriorityPayers is a set of addresses which are prioritized for inclusion + // in the transaction selection algorithm. + PriorityPayers map[flow.Address]struct{} + // MaxCollectionByteSize is the maximum byte size of a collection. MaxCollectionByteSize uint64 @@ -88,7 +92,7 @@ func WithMaxPayerTransactionRate(rate float64) Opt { } func WithUnlimitedPayers(payers ...flow.Address) Opt { - lookup := make(map[flow.Address]struct{}) + lookup := make(map[flow.Address]struct{}, len(payers)) for _, payer := range payers { lookup[payer] = struct{}{} } @@ -97,6 +101,16 @@ func WithUnlimitedPayers(payers ...flow.Address) Opt { } } +func WithPriorityPayers(payers ...flow.Address) Opt { + lookup := make(map[flow.Address]struct{}, len(payers)) + for _, payer := range payers { + lookup[payer] = struct{}{} + } + return func(c *Config) { + c.PriorityPayers = lookup + } +} + func WithMaxCollectionByteSize(limit uint64) Opt { return func(c *Config) { c.MaxCollectionByteSize = limit diff --git a/module/builder/collection/rate_limiter.go b/module/builder/collection/rate_limiter.go index 3643beae89b..9f773c3acde 100644 --- a/module/builder/collection/rate_limiter.go +++ b/module/builder/collection/rate_limiter.go @@ -1,9 +1,11 @@ package collection import ( + "fmt" "math" "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/state/protocol" ) // rateLimiter implements payer-based rate limiting. See Config for details. @@ -112,3 +114,50 @@ func (limiter *rateLimiter) shouldRateLimit(tx *flow.TransactionBody) bool { return false } + +// GetMaxCollectionSizeForSealingLag computes the maximum collection size based on the sealing lag using a step halving algorithm. +// The function takes the current protocol state, minimum and maximum sealing lag, halving interval, minimum and maximum collection size as input parameters +// and outputs a single uint representing the maximum collection size allowed for the current sealing lag. +// No errors are expected during normal operations. +func GetMaxCollectionSizeForSealingLag( + state protocol.State, + minSealingLag, maxSealingLag, halvingInterval, minCollectionSize, maxCollectionSize uint, +) (uint, error) { + lastFinalized, err := state.Final().Head() + if err != nil { + return 0, fmt.Errorf("could not retrieve finalized block: %w", err) + } + lastSealed, err := state.Sealed().Head() + if err != nil { + return 0, fmt.Errorf("could not retrieve sealed block: %w", err) + } + sealingLag := uint(lastFinalized.Height - lastSealed.Height) + collectionSize := StepHalving( + [2]uint{minSealingLag, maxSealingLag}, // [minSealingLag, maxSealingLag] is the range of input values where the halving is applied + [2]uint{minCollectionSize, maxCollectionSize}, // [minCollectionSize, maxCollectionSize] is the range of collection sizes that halving function outputs + sealingLag, // the current sealing lag + halvingInterval, // interval in blocks in which the halving is applied + ) + return collectionSize, nil +} + +// StepHalving applies a step halving algorithm to determine the maximum collection size based on the sealing lag. +// minValue is the minimum collection size, maxValue is the maximum collection size, +func StepHalving(sealingLagBounds, collectionMaxSizeBounds [2]uint, sealingLag, interval uint) uint { + if sealingLag <= sealingLagBounds[0] { + return collectionMaxSizeBounds[1] + } + if sealingLag >= sealingLagBounds[1] { + return collectionMaxSizeBounds[0] + } + sealingLag = sealingLag - sealingLagBounds[0] // normalize sealingLag to start from 0 + if sealingLag <= 0 { + return collectionMaxSizeBounds[1] + } + halvings := sealingLag / interval + result := uint(float64(collectionMaxSizeBounds[1]) / math.Pow(2, float64(halvings))) + if result < collectionMaxSizeBounds[0] { + return collectionMaxSizeBounds[0] + } + return result +} diff --git a/module/builder/collection/rate_limiter_test.go b/module/builder/collection/rate_limiter_test.go new file mode 100644 index 00000000000..05938d1e459 --- /dev/null +++ b/module/builder/collection/rate_limiter_test.go @@ -0,0 +1,86 @@ +package collection + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + statepkg "github.com/onflow/flow-go/state" + protocol "github.com/onflow/flow-go/state/protocol/mock" + "github.com/onflow/flow-go/utils/unittest" +) + +// TestGetMaxCollectionSizeForSealingLag tests different sealing lag values and their corresponding to the expected value. +func TestGetMaxCollectionSizeForSealingLag(t *testing.T) { + testCases := []struct { + name string + minSealingLag uint + maxSealingLag uint + halvingInterval uint + minCollectionSize uint + maxCollectionSize uint + sealingLag uint + expectedCollectionSize uint + }{ + {"no-halving", 0, 10, 5, 0, 10, 2, 10}, + {"one-halving", 0, 10, 5, 0, 10, 6, 5}, + {"two-halving", 0, 11, 5, 0, 10, 10, 2}, + {"max-reached", 0, 10, 5, 0, 10, 11, 0}, + {"almost-binary", 300, 600, 299, 0, 10, 599, 5}, + {"binary", 300, 600, 300, 0, 10, 599, 10}, + } + + state := protocol.NewState(t) + sealedBlock := unittest.BlockHeaderFixture(unittest.WithHeaderHeight(100)) + state.On("Sealed").Return(unittest.StateSnapshotForKnownBlock(sealedBlock, nil)) + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + finalBlock := unittest.BlockHeaderFixture(unittest.WithHeaderHeight(sealedBlock.Height + uint64(tc.sealingLag))) + state.On("Final").Return(unittest.StateSnapshotForKnownBlock(finalBlock, nil)).Once() + result, err := GetMaxCollectionSizeForSealingLag( + state, + tc.minSealingLag, + tc.maxSealingLag, + tc.halvingInterval, + tc.minCollectionSize, + tc.maxCollectionSize, + ) + require.NoError(t, err) + assert.Equal(t, tc.expectedCollectionSize, result) + }) + } +} + +// TestTestGetMaxCollectionSizeForSealingLag_Errors tests error scenarios when retrieving finalized or sealed snapshots fails and the +// error is propagated to the caller. +func TestTestGetMaxCollectionSizeForSealingLag_Errors(t *testing.T) { + t.Run("finalized-err", func(t *testing.T) { + state := protocol.NewState(t) + state.On("Final").Return(unittest.StateSnapshotForUnknownBlock()).Once() + _, err := GetMaxCollectionSizeForSealingLag( + state, + 0, + 10, + 5, + 0, + 10, + ) + assert.ErrorIs(t, err, statepkg.ErrUnknownSnapshotReference) + }) + t.Run("sealed-err", func(t *testing.T) { + state := protocol.NewState(t) + state.On("Final").Return(unittest.StateSnapshotForKnownBlock(unittest.BlockHeaderFixture(), nil)).Once() + state.On("Sealed").Return(unittest.StateSnapshotForUnknownBlock()).Once() + _, err := GetMaxCollectionSizeForSealingLag( + state, + 0, + 10, + 5, + 0, + 10, + ) + assert.ErrorIs(t, err, statepkg.ErrUnknownSnapshotReference) + }) +} diff --git a/module/builder/consensus/builder.go b/module/builder/consensus/builder.go index b9a279a0dcc..22bdcca907d 100644 --- a/module/builder/consensus/builder.go +++ b/module/builder/consensus/builder.go @@ -1,5 +1,3 @@ -// (c) 2019 Dapper Labs - ALL RIGHTS RESERVED - package consensus import ( @@ -7,7 +5,6 @@ import ( "fmt" "time" - "github.com/dgraph-io/badger/v2" otelTrace "go.opentelemetry.io/otel/trace" "github.com/onflow/flow-go/model/flow" @@ -19,32 +16,31 @@ import ( "github.com/onflow/flow-go/state/protocol" "github.com/onflow/flow-go/state/protocol/blocktimer" "github.com/onflow/flow-go/storage" - "github.com/onflow/flow-go/storage/badger/operation" + "github.com/onflow/flow-go/storage/deferred" ) // Builder is the builder for consensus block payloads. Upon providing a payload // hash, it also memorizes which entities were included into the payload. type Builder struct { - metrics module.MempoolMetrics - tracer module.Tracer - db *badger.DB - state protocol.ParticipantState - seals storage.Seals - headers storage.Headers - index storage.Index - blocks storage.Blocks - resultsDB storage.ExecutionResults - receiptsDB storage.ExecutionReceipts - guarPool mempool.Guarantees - sealPool mempool.IncorporatedResultSeals - recPool mempool.ExecutionTree - cfg Config + metrics module.MempoolMetrics + tracer module.Tracer + state protocol.ParticipantState + seals storage.Seals + headers storage.Headers + index storage.Index + blocks storage.Blocks + resultsDB storage.ExecutionResults + receiptsDB storage.ExecutionReceipts + guarPool mempool.Guarantees + sealPool mempool.IncorporatedResultSeals + recPool mempool.ExecutionTree + mutableProtocolState protocol.MutableProtocolState + cfg Config } // NewBuilder creates a new block builder. func NewBuilder( metrics module.MempoolMetrics, - db *badger.DB, state protocol.ParticipantState, headers storage.Headers, seals storage.Seals, @@ -52,6 +48,7 @@ func NewBuilder( blocks storage.Blocks, resultsDB storage.ExecutionResults, receiptsDB storage.ExecutionReceipts, + mutableProtocolState protocol.MutableProtocolState, guarPool mempool.Guarantees, sealPool mempool.IncorporatedResultSeals, recPool mempool.ExecutionTree, @@ -59,7 +56,7 @@ func NewBuilder( options ...func(*Config), ) (*Builder, error) { - blockTimer, err := blocktimer.NewBlockTimer(500*time.Millisecond, 10*time.Second) + blockTimer, err := blocktimer.NewBlockTimer(500, 10_000) if err != nil { return nil, fmt.Errorf("could not create default block timer: %w", err) } @@ -79,20 +76,20 @@ func NewBuilder( } b := &Builder{ - metrics: metrics, - db: db, - tracer: tracer, - state: state, - headers: headers, - seals: seals, - index: index, - blocks: blocks, - resultsDB: resultsDB, - receiptsDB: receiptsDB, - guarPool: guarPool, - sealPool: sealPool, - recPool: recPool, - cfg: cfg, + metrics: metrics, + tracer: tracer, + state: state, + headers: headers, + seals: seals, + index: index, + blocks: blocks, + resultsDB: resultsDB, + receiptsDB: receiptsDB, + guarPool: guarPool, + sealPool: sealPool, + recPool: recPool, + mutableProtocolState: mutableProtocolState, + cfg: cfg, } err = b.repopulateExecutionTree() @@ -103,10 +100,20 @@ func NewBuilder( return b, nil } -// BuildOn creates a new block header on top of the provided parent, using the -// given view and applying the custom setter function to allow the caller to -// make changes to the header before storing it. -func (b *Builder) BuildOn(parentID flow.Identifier, setter func(*flow.Header) error) (*flow.Header, error) { +// BuildOn generates a new payload that is valid with respect to the parent +// being built upon, with the view being provided by the consensus algorithm. +// The builder stores the block and validates it against the protocol state +// before returning it. The specified parent block must exist in the protocol state. +// +// NOTE: Since the block is stored within Builder, HotStuff MUST propose the +// block once BuildOn successfully returns. +// +// # Errors +// This function does not produce any expected errors. +// However, it will pass through all errors returned by `setter` and `sign`. +// Callers must be aware of possible error returns from the `setter` and `sign` arguments they provide, +// and handle them accordingly when handling errors returned from BuildOn. +func (b *Builder) BuildOn(parentID flow.Identifier, setter func(*flow.HeaderBodyBuilder) error, sign func(*flow.Header) ([]byte, error)) (*flow.ProposalHeader, error) { // since we don't know the blockID when building the block we track the // time indirectly and insert the span directly at the end @@ -132,24 +139,25 @@ func (b *Builder) BuildOn(parentID flow.Identifier, setter func(*flow.Header) er } // assemble the block proposal - proposal, err := b.createProposal(parentID, + blockProposal, err := b.createProposal(parentID, insertableGuarantees, insertableSeals, insertableReceipts, - setter) + setter, + sign) if err != nil { return nil, fmt.Errorf("could not assemble proposal: %w", err) } - span, ctx := b.tracer.StartBlockSpan(context.Background(), proposal.ID(), trace.CONBuilderBuildOn, otelTrace.WithTimestamp(startTime)) + span, ctx := b.tracer.StartBlockSpan(context.Background(), blockProposal.Block.ID(), trace.CONBuilderBuildOn, otelTrace.WithTimestamp(startTime)) defer span.End() - err = b.state.Extend(ctx, proposal) + err = b.state.Extend(ctx, blockProposal) if err != nil { return nil, fmt.Errorf("could not extend state with built proposal: %w", err) } - return proposal.Header, nil + return blockProposal.ProposalHeader(), nil } // repopulateExecutionTree restores latest state of execution tree mempool based on local chain state information. @@ -267,13 +275,9 @@ func (b *Builder) getInsertableGuarantees(parentID flow.Identifier) ([]*flow.Col limit = 0 } - // look up the root height so we don't look too far back - // initially this is the genesis block height (aka 0). - var rootHeight uint64 - err = b.db.View(operation.RetrieveRootHeight(&rootHeight)) - if err != nil { - return nil, fmt.Errorf("could not retrieve root block height: %w", err) - } + // the finalized root height is the height where we bootstrapped from. + // we should not include guarantees that are older than the finalized root + rootHeight := b.state.Params().FinalizedRoot().Height if limit < rootHeight { limit = rootHeight } @@ -296,8 +300,8 @@ func (b *Builder) getInsertableGuarantees(parentID flow.Identifier) ([]*flow.Col return fmt.Errorf("could not get ancestor payload (%x): %w", ancestorID, err) } - for _, collID := range index.CollectionIDs { - receiptLookup[collID] = struct{}{} + for _, guaranteeID := range index.GuaranteeIDs { + receiptLookup[guaranteeID] = struct{}{} } return nil @@ -317,10 +321,8 @@ func (b *Builder) getInsertableGuarantees(parentID flow.Identifier) ([]*flow.Col break } - collID := guarantee.ID() - // skip collections that are already included in a block on the fork - _, duplicated := receiptLookup[collID] + _, duplicated := receiptLookup[guarantee.ID()] if duplicated { continue } @@ -411,15 +413,18 @@ func (b *Builder) getInsertableSeals(parentID flow.Identifier) ([]*flow.Seal, er // re-assemble the IncorporatedResult because we need its ID to // check if it is in the seal mempool. - incorporatedResult := flow.NewIncorporatedResult( - blockID, - result, - ) + incorporatedResult, err := flow.NewIncorporatedResult(flow.UntrustedIncorporatedResult{ + IncorporatedBlockID: blockID, + Result: result, + }) + if err != nil { + return fmt.Errorf("could not create incorporated result for block %x: %w", blockID, err) + } // enforce condition (0): candidate seals are only constructed once sufficient // approvals have been collected. Hence, any incorporated result for which we // find a candidate seal satisfies condition (0) - irSeal, ok := b.sealPool.ByID(incorporatedResult.ID()) + irSeal, ok := b.sealPool.Get(incorporatedResult.ID()) if !ok { continue } @@ -483,12 +488,12 @@ func connectingSeal(sealsForNextBlock []*flow.IncorporatedResultSeal, lastSealed } type InsertableReceipts struct { - receipts []*flow.ExecutionReceiptMeta + receipts []*flow.ExecutionReceiptStub results []*flow.ExecutionResult } // getInsertableReceipts constructs: -// - (i) the meta information of the ExecutionReceipts (i.e. ExecutionReceiptMeta) +// - (i) the meta information of the ExecutionReceipts (i.e. ExecutionReceiptStub) // that should be inserted in the next payload // - (ii) the ExecutionResults the receipts from step (i) commit to // (deduplicated w.r.t. the block under construction as well as ancestor blocks) @@ -561,14 +566,14 @@ func (b *Builder) getInsertableReceipts(parentID flow.Identifier) (*InsertableRe // TODO: we should probably remove this edge case by _synchronously_ populating // the Execution Tree in the Fork's finalizationCallback if err != nil && !mempool.IsUnknownExecutionResultError(err) { - return nil, fmt.Errorf("failed to retrieve reachable receipts from memool: %w", err) + return nil, fmt.Errorf("failed to retrieve reachable receipts from mempool: %w", err) } insertables := toInsertables(receipts, includedResults, b.cfg.maxReceiptCount) return insertables, nil } -// toInsertables separates the provided receipts into ExecutionReceiptMeta and +// toInsertables separates the provided receipts into ExecutionReceiptStub and // ExecutionResult. Results that are in includedResults are skipped. // We also limit the number of receipts to maxReceiptCount. func toInsertables(receipts []*flow.ExecutionReceipt, includedResults map[flow.Identifier]struct{}, maxReceiptCount uint) *InsertableReceipts { @@ -580,11 +585,11 @@ func toInsertables(receipts []*flow.ExecutionReceipt, includedResults map[flow.I count = maxReceiptCount } - filteredReceipts := make([]*flow.ExecutionReceiptMeta, 0, count) + filteredReceipts := make([]*flow.ExecutionReceiptStub, 0, count) for i := uint(0); i < count; i++ { receipt := receipts[i] - meta := receipt.Meta() + meta := receipt.Stub() resultID := meta.ResultID if _, inserted := includedResults[resultID]; !inserted { results = append(results, &receipt.ExecutionResult) @@ -606,16 +611,9 @@ func (b *Builder) createProposal(parentID flow.Identifier, guarantees []*flow.CollectionGuarantee, seals []*flow.Seal, insertableReceipts *InsertableReceipts, - setter func(*flow.Header) error) (*flow.Block, error) { - - // build the payload so we can get the hash - payload := &flow.Payload{ - Guarantees: guarantees, - Seals: seals, - Receipts: insertableReceipts.receipts, - Results: insertableReceipts.results, - } - + setter func(*flow.HeaderBodyBuilder) error, + sign func(*flow.Header) ([]byte, error), +) (*flow.Proposal, error) { parent, err := b.headers.ByBlockID(parentID) if err != nil { return nil, fmt.Errorf("could not retrieve parent: %w", err) @@ -624,23 +622,67 @@ func (b *Builder) createProposal(parentID flow.Identifier, timestamp := b.cfg.blockTimer.Build(parent.Timestamp) // construct default block on top of the provided parent - header := &flow.Header{ - ChainID: parent.ChainID, - ParentID: parentID, - Height: parent.Height + 1, - Timestamp: timestamp, - PayloadHash: payload.Hash(), + headerBodyBuilder := flow.NewHeaderBodyBuilder(). + WithChainID(parent.ChainID). + WithParentID(parentID). + WithHeight(parent.Height + 1). + WithTimestamp(timestamp) + + // apply the custom fields setter of the consensus algorithm, we must do this before applying service events + // since we need to know the correct view of the block. + err = setter(headerBodyBuilder) + if err != nil { + return nil, fmt.Errorf("could not apply setter: %w", err) + } + headerBody, err := headerBodyBuilder.Build() + if err != nil { + return nil, fmt.Errorf("could not build header: %w", err) } - // apply the custom fields setter of the consensus algorithm - err = setter(header) + // Evolve the Protocol State starting from the parent block's state. Information that may change the state is: + // the candidate block's view and Service Events from execution results sealed in the candidate block. + deferredBlockPersist := deferred.NewDeferredBlockPersist() + protocolStateID, err := b.mutableProtocolState.EvolveState(deferredBlockPersist, headerBody.ParentID, headerBody.View, seals) if err != nil { - return nil, fmt.Errorf("could not apply setter: %w", err) + return nil, fmt.Errorf("evolving protocol state failed: %w", err) + } + + payload, err := flow.NewPayload( + flow.UntrustedPayload{ + Guarantees: guarantees, + Seals: seals, + Receipts: insertableReceipts.receipts, + Results: insertableReceipts.results, + ProtocolStateID: protocolStateID, + }, + ) + if err != nil { + return nil, fmt.Errorf("could not build the payload: %w", err) } - proposal := &flow.Block{ - Header: header, - Payload: payload, + block, err := flow.NewBlock( + flow.UntrustedBlock{ + HeaderBody: *headerBody, + Payload: *payload, + }, + ) + if err != nil { + return nil, fmt.Errorf("could not build the block: %w", err) + } + + // sign the proposal + sig, err := sign(block.ToHeader()) + if err != nil { + return nil, fmt.Errorf("could not sign the block: %w", err) + } + proposal, err := flow.NewProposal( + flow.UntrustedProposal{ + Block: *block, + ProposerSigData: sig, + }, + ) + if err != nil { + return nil, fmt.Errorf("could not construct proposal: %w", err) } return proposal, nil diff --git a/module/builder/consensus/builder_test.go b/module/builder/consensus/builder_test.go index d8f82c8eda8..71671b4222b 100644 --- a/module/builder/consensus/builder_test.go +++ b/module/builder/consensus/builder_test.go @@ -1,16 +1,18 @@ package consensus import ( + "errors" "math/rand" "os" "testing" - "github.com/dgraph-io/badger/v2" + "github.com/jordanschalm/lockctx" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" + hotstuffmodel "github.com/onflow/flow-go/consensus/hotstuff/model" "github.com/onflow/flow-go/model/flow" mempoolAPIs "github.com/onflow/flow-go/module/mempool" mempoolImpl "github.com/onflow/flow-go/module/mempool/consensus" @@ -18,10 +20,12 @@ import ( "github.com/onflow/flow-go/module/metrics" "github.com/onflow/flow-go/module/trace" realproto "github.com/onflow/flow-go/state/protocol" + "github.com/onflow/flow-go/state/protocol/datastore" protocol "github.com/onflow/flow-go/state/protocol/mock" - storerr "github.com/onflow/flow-go/storage" - "github.com/onflow/flow-go/storage/badger/operation" - storage "github.com/onflow/flow-go/storage/mock" + "github.com/onflow/flow-go/storage" + storagemock "github.com/onflow/flow-go/storage/mock" + "github.com/onflow/flow-go/storage/operation" + "github.com/onflow/flow-go/storage/operation/pebbleimpl" "github.com/onflow/flow-go/utils/unittest" ) @@ -63,20 +67,22 @@ type BuilderSuite struct { // real dependencies dir string - db *badger.DB + db storage.DB sentinel uint64 - setter func(*flow.Header) error + setter func(*flow.HeaderBodyBuilder) error + sign func(*flow.Header) ([]byte, error) // mocked dependencies - state *protocol.ParticipantState - headerDB *storage.Headers - sealDB *storage.Seals - indexDB *storage.Index - blockDB *storage.Blocks - resultDB *storage.ExecutionResults - receiptsDB *storage.ExecutionReceipts - - guarPool *mempool.Guarantees + state *protocol.ParticipantState + headerDB *storagemock.Headers + sealDB *storagemock.Seals + indexDB *storagemock.Index + blockDB *storagemock.Blocks + resultDB *storagemock.ExecutionResults + receiptsDB *storagemock.ExecutionReceipts + stateMutator *protocol.MutableProtocolState + + guarPool *mempool.Mempool[flow.Identifier, *flow.CollectionGuarantee] sealPool *mempool.IncorporatedResultSeals recPool *mempool.ExecutionTree @@ -88,10 +94,10 @@ type BuilderSuite struct { } func (bs *BuilderSuite) storeBlock(block *flow.Block) { - bs.headers[block.ID()] = block.Header + bs.headers[block.ID()] = block.ToHeader() bs.blocks[block.ID()] = block bs.index[block.ID()] = block.Payload.Index() - bs.blockChildren[block.Header.ParentID] = append(bs.blockChildren[block.Header.ParentID], block.ID()) + bs.blockChildren[block.ParentID] = append(bs.blockChildren[block.ParentID], block.ID()) for _, result := range block.Payload.Results { bs.resultByID[result.ID()] = result } @@ -103,7 +109,7 @@ func (bs *BuilderSuite) storeBlock(block *flow.Block) { // and the result are combined in an IncorporatedResultSeal which is a candidate // for the seals mempool. func (bs *BuilderSuite) createAndRecordBlock(parentBlock *flow.Block, candidateSealForParent bool) *flow.Block { - block := unittest.BlockWithParentFixture(parentBlock.Header) + block := unittest.BlockWithParentFixture(parentBlock.ToHeader()) // Create a receipt for a result of the parentBlock block, // and add it to the payload. The corresponding IncorporatedResult will be used to @@ -114,7 +120,7 @@ func (bs *BuilderSuite) createAndRecordBlock(parentBlock *flow.Block, candidateS panic("missing execution result for parent") } receipt := unittest.ExecutionReceiptFixture(unittest.WithResult(previousResult)) - block.Payload.Receipts = append(block.Payload.Receipts, receipt.Meta()) + block.Payload.Receipts = append(block.Payload.Receipts, receipt.Stub()) block.Payload.Results = append(block.Payload.Results, &receipt.ExecutionResult) incorporatedResultForPrevBlock = unittest.IncorporatedResult.Fixture( @@ -153,7 +159,7 @@ func (bs *BuilderSuite) chainSeal(incorporatedResult *flow.IncorporatedResult) { ) bs.chain = append(bs.chain, incorporatedResultSeal.Seal) - bs.irsMap[incorporatedResultSeal.ID()] = incorporatedResultSeal + bs.irsMap[incorporatedResultSeal.IncorporatedResultID()] = incorporatedResultSeal bs.irsList = append(bs.irsList, incorporatedResultSeal) } @@ -173,7 +179,6 @@ func (bs *BuilderSuite) chainSeal(incorporatedResult *flow.IncorporatedResult) { // For the verifiers to start checking a result R, they need a source of randomness for the block _incorporating_ // result R. The result for block [A3] is incorporated in [parent], which does _not_ have a child yet. func (bs *BuilderSuite) SetupTest() { - // set up no-op dependencies noopMetrics := metrics.NewNoopCollector() noopTracer := trace.NewNoopTracer() @@ -202,7 +207,6 @@ func (bs *BuilderSuite) SetupTest() { // initialise the dbs bs.lastSeal = nil bs.headers = make(map[flow.Identifier]*flow.Header) - //bs.heights = make(map[uint64]*flow.Header) bs.index = make(map[flow.Identifier]*flow.Index) bs.blocks = make(map[flow.Identifier]*flow.Block) bs.blockChildren = make(map[flow.Identifier][]flow.Identifier) @@ -212,15 +216,15 @@ func (bs *BuilderSuite) SetupTest() { // Construct the [first] block: first := unittest.BlockFixture() - bs.storeBlock(&first) + bs.storeBlock(first) bs.firstID = first.ID() - firstResult := unittest.ExecutionResultFixture(unittest.WithBlock(&first)) + firstResult := unittest.ExecutionResultFixture(unittest.WithBlock(first)) bs.lastSeal = unittest.Seal.Fixture(unittest.Seal.WithResult(firstResult)) bs.resultForBlock[firstResult.BlockID] = firstResult bs.resultByID[firstResult.ID()] = firstResult // Construct finalized blocks [F0] ... [F4] - previous := &first + previous := first for n := 0; n < numFinalizedBlocks; n++ { finalized := bs.createAndRecordBlock(previous, n > 0) // Do not construct candidate seal for [first], as it is already sealed bs.finalizedBlockIDs = append(bs.finalizedBlockIDs, finalized.ID()) @@ -244,48 +248,73 @@ func (bs *BuilderSuite) SetupTest() { bs.parentID = parent.ID() // set up temporary database for tests - bs.db, bs.dir = unittest.TempBadgerDB(bs.T()) - - err := bs.db.Update(operation.InsertFinalizedHeight(final.Header.Height)) - bs.Require().NoError(err) - err = bs.db.Update(operation.IndexBlockHeight(final.Header.Height, bs.finalID)) - bs.Require().NoError(err) - - err = bs.db.Update(operation.InsertRootHeight(13)) - bs.Require().NoError(err) - - err = bs.db.Update(operation.InsertSealedHeight(first.Header.Height)) - bs.Require().NoError(err) - err = bs.db.Update(operation.IndexBlockHeight(first.Header.Height, first.ID())) - bs.Require().NoError(err) + pdb, dir := unittest.TempPebbleDB(bs.T()) + bs.db = pebbleimpl.ToDB(pdb) + bs.dir = dir + + lockManager := storage.NewTestingLockManager() + + // insert finalized height and root height + db := bs.db + err := unittest.WithLocks(bs.T(), lockManager, []string{storage.LockFinalizeBlock, storage.LockBootstrapping}, func(lctx lockctx.Context) error { + return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + enc, err := datastore.NewVersionedInstanceParams( + datastore.DefaultInstanceParamsVersion, + unittest.IdentifierFixture(), + unittest.IdentifierFixture(), + unittest.IdentifierFixture(), + ) + require.NoError(bs.T(), err) + require.NoError(bs.T(), operation.InsertInstanceParams(lctx, rw, *enc)) + require.NoError(bs.T(), operation.UpsertFinalizedHeight(lctx, rw.Writer(), final.Height)) + require.NoError(bs.T(), operation.IndexFinalizedBlockByHeight(lctx, rw, final.Height, bs.finalID)) + require.NoError(bs.T(), operation.UpsertSealedHeight(lctx, rw.Writer(), first.Height)) + require.NoError(bs.T(), operation.IndexFinalizedBlockByHeight(lctx, rw, first.Height, first.ID())) + return nil + }) + }) + require.NoError(bs.T(), err) bs.sentinel = 1337 + bs.setter = func(h *flow.HeaderBodyBuilder) error { + h.WithHeight(42). + WithChainID(flow.Emulator). + WithView(1337). + WithParentView(1300). + WithParentID(unittest.IdentifierFixture()). + WithParentVoterIndices(unittest.SignerIndicesFixture(4)). + WithParentVoterSigData(unittest.QCSigDataFixture()). + WithProposerID(unittest.IdentifierFixture()) - bs.setter = func(header *flow.Header) error { - header.View = 1337 return nil } + bs.sign = func(_ *flow.Header) ([]byte, error) { + return unittest.SignatureFixture(), nil + } bs.state = &protocol.ParticipantState{} bs.state.On("Extend", mock.Anything, mock.Anything).Run(func(args mock.Arguments) { - block := args.Get(1).(*flow.Block) - bs.Assert().Equal(bs.sentinel, block.Header.View) - bs.assembled = block.Payload + proposal := args.Get(1).(*flow.Proposal) + bs.Assert().Equal(bs.sentinel, proposal.Block.View) + bs.assembled = &proposal.Block.Payload }).Return(nil) bs.state.On("Final").Return(func() realproto.Snapshot { if block, ok := bs.blocks[bs.finalID]; ok { - snapshot := unittest.StateSnapshotForKnownBlock(block.Header, nil) + snapshot := unittest.StateSnapshotForKnownBlock(block.ToHeader(), nil) snapshot.On("Descendants").Return(bs.blockChildren[bs.finalID], nil) return snapshot } return unittest.StateSnapshotForUnknownBlock() }) + params := new(protocol.Params) + params.On("FinalizedRoot").Return(first.ToHeader()) + bs.state.On("Params").Return(params) // set up storage mocks for tests - bs.sealDB = &storage.Seals{} + bs.sealDB = &storagemock.Seals{} bs.sealDB.On("HighestInFork", mock.Anything).Return(bs.lastSeal, nil) - bs.headerDB = &storage.Headers{} + bs.headerDB = &storagemock.Headers{} bs.headerDB.On("ByBlockID", mock.Anything).Return( func(blockID flow.Identifier) *flow.Header { return bs.headers[blockID] @@ -293,13 +322,13 @@ func (bs *BuilderSuite) SetupTest() { func(blockID flow.Identifier) error { _, exists := bs.headers[blockID] if !exists { - return storerr.ErrNotFound + return storage.ErrNotFound } return nil }, ) - bs.indexDB = &storage.Index{} + bs.indexDB = &storagemock.Index{} bs.indexDB.On("ByBlockID", mock.Anything).Return( func(blockID flow.Identifier) *flow.Index { return bs.index[blockID] @@ -307,13 +336,13 @@ func (bs *BuilderSuite) SetupTest() { func(blockID flow.Identifier) error { _, exists := bs.index[blockID] if !exists { - return storerr.ErrNotFound + return storage.ErrNotFound } return nil }, ) - bs.blockDB = &storage.Blocks{} + bs.blockDB = &storagemock.Blocks{} bs.blockDB.On("ByID", mock.Anything).Return( func(blockID flow.Identifier) *flow.Block { return bs.blocks[blockID] @@ -321,13 +350,13 @@ func (bs *BuilderSuite) SetupTest() { func(blockID flow.Identifier) error { _, exists := bs.blocks[blockID] if !exists { - return storerr.ErrNotFound + return storage.ErrNotFound } return nil }, ) - bs.resultDB = &storage.ExecutionResults{} + bs.resultDB = &storagemock.ExecutionResults{} bs.resultDB.On("ByID", mock.Anything).Return( func(resultID flow.Identifier) *flow.ExecutionResult { return bs.resultByID[resultID] @@ -335,13 +364,13 @@ func (bs *BuilderSuite) SetupTest() { func(resultID flow.Identifier) error { _, exists := bs.resultByID[resultID] if !exists { - return storerr.ErrNotFound + return storage.ErrNotFound } return nil }, ) - bs.receiptsDB = &storage.ExecutionReceipts{} + bs.receiptsDB = &storagemock.ExecutionReceipts{} bs.receiptsDB.On("ByID", mock.Anything).Return( func(receiptID flow.Identifier) *flow.ExecutionReceipt { return bs.receiptsByID[receiptID] @@ -349,7 +378,7 @@ func (bs *BuilderSuite) SetupTest() { func(receiptID flow.Identifier) error { _, exists := bs.receiptsByID[receiptID] if !exists { - return storerr.ErrNotFound + return storage.ErrNotFound } return nil }, @@ -361,18 +390,22 @@ func (bs *BuilderSuite) SetupTest() { func(blockID flow.Identifier) error { _, exists := bs.receiptsByBlockID[blockID] if !exists { - return storerr.ErrNotFound + return storage.ErrNotFound } return nil }, ) // set up memory pool mocks for tests - bs.guarPool = &mempool.Guarantees{} + bs.guarPool = &mempool.Mempool[flow.Identifier, *flow.CollectionGuarantee]{} bs.guarPool.On("Size").Return(uint(0)) // only used by metrics bs.guarPool.On("All").Return( - func() []*flow.CollectionGuarantee { - return bs.pendingGuarantees + func() map[flow.Identifier]*flow.CollectionGuarantee { + guaranteeMap := make(map[flow.Identifier]*flow.CollectionGuarantee, len(bs.pendingGuarantees)) + for _, guarantee := range bs.pendingGuarantees { + guaranteeMap[guarantee.CollectionID] = guarantee + } + return guaranteeMap }, ) @@ -387,7 +420,7 @@ func (bs *BuilderSuite) SetupTest() { return res }, ) - bs.sealPool.On("ByID", mock.Anything).Return( + bs.sealPool.On("Get", mock.Anything).Return( func(id flow.Identifier) *flow.IncorporatedResultSeal { return bs.pendingSeals[id] }, @@ -409,10 +442,13 @@ func (bs *BuilderSuite) SetupTest() { nil, ) + // setup mock state mutator, we don't need a real once since we are using mocked participant state. + bs.stateMutator = protocol.NewMutableProtocolState(bs.T()) + bs.stateMutator.On("EvolveState", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(unittest.IdentifierFixture(), nil).Maybe() + // initialize the builder bs.build, err = NewBuilder( noopMetrics, - bs.db, bs.state, bs.headerDB, bs.sealDB, @@ -420,6 +456,7 @@ func (bs *BuilderSuite) SetupTest() { bs.blockDB, bs.resultDB, bs.receiptsDB, + bs.stateMutator, bs.guarPool, bs.sealPool, bs.recPool, @@ -440,17 +477,48 @@ func (bs *BuilderSuite) TearDownTest() { func (bs *BuilderSuite) TestPayloadEmptyValid() { // we should build an empty block with default setup - _, err := bs.build.BuildOn(bs.parentID, bs.setter) + _, err := bs.build.BuildOn(bs.parentID, bs.setter, bs.sign) bs.Require().NoError(err) bs.Assert().Empty(bs.assembled.Guarantees, "should have no guarantees in payload with empty mempool") bs.Assert().Empty(bs.assembled.Seals, "should have no seals in payload with empty mempool") } +// TestSetterErrorPassthrough validates that errors from the setter function are passed through to the caller. +func (bs *BuilderSuite) TestSetterErrorPassthrough() { + sentinel := errors.New("sentinel") + setter := func(header *flow.HeaderBodyBuilder) error { + return sentinel + } + _, err := bs.build.BuildOn(bs.parentID, setter, bs.sign) + bs.Assert().ErrorIs(err, sentinel) +} + +// TestSignErrorPassthrough validates that errors from the sign function are passed through to the caller. +func (bs *BuilderSuite) TestSignErrorPassthrough() { + bs.T().Run("unexpected Exception", func(t *testing.T) { + exception := errors.New("exception") + sign := func(header *flow.Header) ([]byte, error) { + return nil, exception + } + _, err := bs.build.BuildOn(bs.parentID, bs.setter, sign) + bs.Assert().ErrorIs(err, exception) + }) + bs.T().Run("NoVoteError", func(t *testing.T) { + // the EventHandler relies on this sentinel in particular to be passed through + sentinel := hotstuffmodel.NewNoVoteErrorf("not voting") + sign := func(header *flow.Header) ([]byte, error) { + return nil, sentinel + } + _, err := bs.build.BuildOn(bs.parentID, bs.setter, sign) + bs.Assert().ErrorIs(err, sentinel) + }) +} + func (bs *BuilderSuite) TestPayloadGuaranteeValid() { // add sixteen guarantees to the pool bs.pendingGuarantees = unittest.CollectionGuaranteesFixture(16, unittest.WithCollRef(bs.finalID)) - _, err := bs.build.BuildOn(bs.parentID, bs.setter) + _, err := bs.build.BuildOn(bs.parentID, bs.setter, bs.sign) bs.Require().NoError(err) bs.Assert().ElementsMatch(bs.pendingGuarantees, bs.assembled.Guarantees, "should have guarantees from mempool in payload") } @@ -467,13 +535,13 @@ func (bs *BuilderSuite) TestPayloadGuaranteeDuplicate() { for _, guarantee := range duplicated { blockID := forkBlocks[rand.Intn(len(forkBlocks))] index := bs.index[blockID] - index.CollectionIDs = append(index.CollectionIDs, guarantee.ID()) + index.GuaranteeIDs = append(index.GuaranteeIDs, guarantee.ID()) bs.index[blockID] = index } // add sixteen guarantees to the pool bs.pendingGuarantees = append(valid, duplicated...) - _, err := bs.build.BuildOn(bs.parentID, bs.setter) + _, err := bs.build.BuildOn(bs.parentID, bs.setter, bs.sign) bs.Require().NoError(err) bs.Assert().ElementsMatch(valid, bs.assembled.Guarantees, "should have valid guarantees from mempool in payload") } @@ -488,7 +556,7 @@ func (bs *BuilderSuite) TestPayloadGuaranteeReferenceUnknown() { // add all guarantees to the pool bs.pendingGuarantees = append(valid, unknown...) - _, err := bs.build.BuildOn(bs.parentID, bs.setter) + _, err := bs.build.BuildOn(bs.parentID, bs.setter, bs.sign) bs.Require().NoError(err) bs.Assert().ElementsMatch(valid, bs.assembled.Guarantees, "should have valid from mempool in payload") } @@ -506,7 +574,7 @@ func (bs *BuilderSuite) TestPayloadGuaranteeReferenceExpired() { // add all guarantees to the pool bs.pendingGuarantees = append(valid, expired...) - _, err := bs.build.BuildOn(bs.parentID, bs.setter) + _, err := bs.build.BuildOn(bs.parentID, bs.setter, bs.sign) bs.Require().NoError(err) bs.Assert().ElementsMatch(valid, bs.assembled.Guarantees, "should have valid from mempool in payload") } @@ -532,7 +600,7 @@ func (bs *BuilderSuite) TestPayloadSeals_AllValid() { // Populate seals mempool with valid chain of seals for blocks [F0], ..., [A2] bs.pendingSeals = bs.irsMap - _, err := bs.build.BuildOn(bs.parentID, bs.setter) + _, err := bs.build.BuildOn(bs.parentID, bs.setter, bs.sign) bs.Require().NoError(err) bs.Assert().Empty(bs.assembled.Guarantees, "should have no guarantees in payload with empty mempool") bs.Assert().ElementsMatch(bs.chain, bs.assembled.Seals, "should have included valid chain of seals") @@ -547,7 +615,7 @@ func (bs *BuilderSuite) TestPayloadSeals_Limit() { limit := uint(2) bs.build.cfg.maxSealCount = limit - _, err := bs.build.BuildOn(bs.parentID, bs.setter) + _, err := bs.build.BuildOn(bs.parentID, bs.setter, bs.sign) bs.Require().NoError(err) bs.Assert().Empty(bs.assembled.Guarantees, "should have no guarantees in payload with empty mempool") bs.Assert().Equal(bs.chain[:limit], bs.assembled.Seals, "should have excluded seals above maxSealCount") @@ -573,7 +641,7 @@ func (bs *BuilderSuite) TestPayloadSeals_OnlyFork() { } bs.pendingSeals = bs.irsMap - _, err := bs.build.BuildOn(forkHead.ID(), bs.setter) + _, err := bs.build.BuildOn(forkHead.ID(), bs.setter, bs.sign) bs.Require().NoError(err) // expected seals: [F0] <- ... <- [final] <- [B0] <- ... <- [B5] @@ -625,20 +693,22 @@ func (bs *BuilderSuite) TestPayloadSeals_EnforceGap() { // create blocks B1 to B4: b1 := bs.createAndRecordBlock(bs.blocks[bs.parentID], true) - bchain := unittest.ChainFixtureFrom(3, b1.Header) // creates blocks b2, b3, b4 - b4 := bchain[2] + bchain := unittest.ChainFixtureFrom(2, b1.ToHeader()) // creates blocks b2, b3 - // Incorporate result for block B1 into payload of block B4 + // create block B4: includes result for block B1 in its payload resultB1 := bs.resultForBlock[b1.ID()] receiptB1 := unittest.ExecutionReceiptFixture(unittest.WithResult(resultB1)) - b4.SetPayload( + b4 := unittest.BlockWithParentAndPayload( + bchain[1].ToHeader(), flow.Payload{ Results: []*flow.ExecutionResult{&receiptB1.ExecutionResult}, - Receipts: []*flow.ExecutionReceiptMeta{receiptB1.Meta()}, - }) + Receipts: []*flow.ExecutionReceiptStub{receiptB1.Stub()}, + }, + ) + bchain = append(bchain, b4) // add blocks B2, B3, B4, A5 to the mocked storage layer (block b0 and b1 are already added): - a5 := unittest.BlockWithParentFixture(b4.Header) + a5 := unittest.BlockWithParentFixture(b4.ToHeader()) for _, b := range append(bchain, a5) { bs.storeBlock(b) } @@ -648,24 +718,24 @@ func (bs *BuilderSuite) TestPayloadSeals_EnforceGap() { b1seal := storeSealForIncorporatedResult(resultB1, b4.ID(), bs.pendingSeals) // mock for seals storage layer: - bs.sealDB = &storage.Seals{} + bs.sealDB = &storagemock.Seals{} bs.build.seals = bs.sealDB bs.T().Run("Build on top of B4 and check that no seals are included", func(t *testing.T) { bs.sealDB.On("HighestInFork", b4.ID()).Return(b0seal, nil) - _, err := bs.build.BuildOn(b4.ID(), bs.setter) + _, err := bs.build.BuildOn(b4.ID(), bs.setter, bs.sign) require.NoError(t, err) bs.recPool.AssertExpectations(t) require.Empty(t, bs.assembled.Seals, "should not include any seals") }) bs.T().Run("Build on top of B5 and check that seals for B1 is included", func(t *testing.T) { - b5 := unittest.BlockWithParentFixture(b4.Header) // creating block b5 + b5 := unittest.BlockWithParentFixture(b4.ToHeader()) // creating block b5 bs.storeBlock(b5) bs.sealDB.On("HighestInFork", b5.ID()).Return(b0seal, nil) - _, err := bs.build.BuildOn(b5.ID(), bs.setter) + _, err := bs.build.BuildOn(b5.ID(), bs.setter, bs.sign) require.NoError(t, err) bs.recPool.AssertExpectations(t) require.Equal(t, 1, len(bs.assembled.Seals), "only seal for B1 expected") @@ -687,14 +757,14 @@ func (bs *BuilderSuite) TestPayloadSeals_Duplicate() { // Pretend that the first n blocks are already sealed n := 4 lastSeal := bs.chain[n-1] - mockSealDB := &storage.Seals{} + mockSealDB := &storagemock.Seals{} mockSealDB.On("HighestInFork", mock.Anything).Return(lastSeal, nil) bs.build.seals = mockSealDB // seals for all blocks [F0], ..., [A3] are still in the mempool: bs.pendingSeals = bs.irsMap - _, err := bs.build.BuildOn(bs.parentID, bs.setter) + _, err := bs.build.BuildOn(bs.parentID, bs.setter, bs.sign) bs.Require().NoError(err) bs.Assert().Equal(bs.chain[n:], bs.assembled.Seals, "should have rejected duplicate seals") } @@ -714,10 +784,10 @@ func (bs *BuilderSuite) TestPayloadSeals_Duplicate() { func (bs *BuilderSuite) TestPayloadSeals_MissingNextSeal() { // remove the seal for block [F0] firstSeal := bs.irsList[0] - delete(bs.irsMap, firstSeal.ID()) + delete(bs.irsMap, firstSeal.IncorporatedResultID()) bs.pendingSeals = bs.irsMap - _, err := bs.build.BuildOn(bs.parentID, bs.setter) + _, err := bs.build.BuildOn(bs.parentID, bs.setter, bs.sign) bs.Require().NoError(err) bs.Assert().Empty(bs.assembled.Guarantees, "should have no guarantees in payload with empty mempool") bs.Assert().Empty(bs.assembled.Seals, "should not have included any seals from cutoff chain") @@ -738,10 +808,10 @@ func (bs *BuilderSuite) TestPayloadSeals_MissingNextSeal() { func (bs *BuilderSuite) TestPayloadSeals_MissingInterimSeal() { // remove a seal for block [F4] seal := bs.irsList[3] - delete(bs.irsMap, seal.ID()) + delete(bs.irsMap, seal.IncorporatedResultID()) bs.pendingSeals = bs.irsMap - _, err := bs.build.BuildOn(bs.parentID, bs.setter) + _, err := bs.build.BuildOn(bs.parentID, bs.setter, bs.sign) bs.Require().NoError(err) bs.Assert().Empty(bs.assembled.Guarantees, "should have no guarantees in payload with empty mempool") bs.Assert().ElementsMatch(bs.chain[:3], bs.assembled.Seals, "should have included only beginning of broken chain") @@ -779,24 +849,40 @@ func (bs *BuilderSuite) TestValidatePayloadSeals_ExecutionForks() { blockF := bs.blocks[bs.finalID] blocks := []*flow.Block{blockF} - blocks = append(blocks, unittest.ChainFixtureFrom(4, blockF.Header)...) // elements [F, A, B, C, D] + blocks = append(blocks, unittest.ChainFixtureFrom(4, blockF.ToHeader())...) // elements [F, A, B, C, D] receiptChain1 := unittest.ReceiptChainFor(blocks, unittest.ExecutionResultFixture()) // elements [Result[F]_1, Result[A]_1, Result[B]_1, ...] receiptChain2 := unittest.ReceiptChainFor(blocks, unittest.ExecutionResultFixture()) // elements [Result[F]_2, Result[A]_2, Result[B]_2, ...] - for i := 1; i <= 3; i++ { // set payload for blocks A, B, C - blocks[i].SetPayload(flow.Payload{ - Results: []*flow.ExecutionResult{&receiptChain1[i-1].ExecutionResult, &receiptChain2[i-1].ExecutionResult}, - Receipts: []*flow.ExecutionReceiptMeta{receiptChain1[i-1].Meta(), receiptChain2[i-1].Meta()}, - }) + // set payload for blocks A, B, C + for i := 1; i <= 3; i++ { + var err error + blocks[i], err = flow.NewBlock( + flow.UntrustedBlock{ + HeaderBody: blocks[i].HeaderBody, + Payload: unittest.PayloadFixture( + unittest.WithReceipts(receiptChain1[i-1], receiptChain2[i-1]), + ), + }, + ) + require.NoError(bs.T(), err) } sealedResult := receiptChain1[0].ExecutionResult sealF := unittest.Seal.Fixture(unittest.Seal.WithResult(&sealedResult)) - blocks[4].SetPayload(flow.Payload{ // set payload for block D - Seals: []*flow.Seal{sealF}, - }) + var err error + // set payload for block D + blocks[4], err = flow.NewBlock( + flow.UntrustedBlock{ + HeaderBody: blocks[4].HeaderBody, + Payload: unittest.PayloadFixture( + unittest.WithSeals(sealF), + ), + }, + ) + require.NoError(bs.T(), err) + for i := 0; i <= 4; i++ { // we need to run this several times, as in each iteration as we have _multiple_ execution chains. - // In each iteration, we only mange to reconnect one additional height + // In each iteration, we only manage to reconnect one additional height unittest.ReconnectBlocksAndReceipts(blocks, receiptChain1) unittest.ReconnectBlocksAndReceipts(blocks, receiptChain2) } @@ -804,7 +890,7 @@ func (bs *BuilderSuite) TestValidatePayloadSeals_ExecutionForks() { for _, b := range blocks { bs.storeBlock(b) } - bs.sealDB = &storage.Seals{} + bs.sealDB = &storagemock.Seals{} bs.build.seals = bs.sealDB bs.sealDB.On("HighestInFork", mock.Anything).Return(sealF, nil) bs.resultByID[sealedResult.ID()] = &sealedResult @@ -813,7 +899,7 @@ func (bs *BuilderSuite) TestValidatePayloadSeals_ExecutionForks() { bs.pendingSeals = make(map[flow.Identifier]*flow.IncorporatedResultSeal) storeSealForIncorporatedResult(&receiptChain2[1].ExecutionResult, blocks[2].ID(), bs.pendingSeals) - _, err := bs.build.BuildOn(blocks[4].ID(), bs.setter) + _, err := bs.build.BuildOn(blocks[4].ID(), bs.setter, bs.sign) require.NoError(t, err) require.Empty(t, bs.assembled.Seals, "should not have included seal for conflicting execution fork") }) @@ -825,7 +911,7 @@ func (bs *BuilderSuite) TestValidatePayloadSeals_ExecutionForks() { storeSealForIncorporatedResult(&receiptChain2[1].ExecutionResult, blocks[2].ID(), bs.pendingSeals) storeSealForIncorporatedResult(&receiptChain2[2].ExecutionResult, blocks[3].ID(), bs.pendingSeals) - _, err := bs.build.BuildOn(blocks[4].ID(), bs.setter) + _, err := bs.build.BuildOn(blocks[4].ID(), bs.setter, bs.sign) require.NoError(t, err) require.ElementsMatch(t, []*flow.Seal{sealResultA_1.Seal, sealResultB_1.Seal}, bs.assembled.Seals, "valid fork should have been sealed") }) @@ -853,7 +939,7 @@ func (bs *BuilderSuite) TestPayloadReceipts_TraverseExecutionTreeFromLastSealedR f2 := bs.blocks[bs.finalizedBlockIDs[2]] f2eal := unittest.Seal.Fixture(unittest.Seal.WithResult(bs.resultForBlock[f2.ID()])) f4Seal := unittest.Seal.Fixture(unittest.Seal.WithResult(bs.resultForBlock[bs.finalID])) - bs.sealDB = &storage.Seals{} + bs.sealDB = &storagemock.Seals{} bs.build.seals = bs.sealDB // reset receipts mempool to verify calls made by Builder @@ -864,21 +950,21 @@ func (bs *BuilderSuite) TestPayloadReceipts_TraverseExecutionTreeFromLastSealedR // building on top of X0: latest finalized block in fork is [lastSeal]; expect search to start with sealed result bs.sealDB.On("HighestInFork", x0.ID()).Return(bs.lastSeal, nil) bs.recPool.On("ReachableReceipts", bs.lastSeal.ResultID, mock.Anything, mock.Anything).Return([]*flow.ExecutionReceipt{}, nil).Once() - _, err := bs.build.BuildOn(x0.ID(), bs.setter) + _, err := bs.build.BuildOn(x0.ID(), bs.setter, bs.sign) bs.Require().NoError(err) bs.recPool.AssertExpectations(bs.T()) // building on top of X1: latest finalized block in fork is [F4]; expect search to start with sealed result bs.sealDB.On("HighestInFork", x1.ID()).Return(f4Seal, nil) bs.recPool.On("ReachableReceipts", f4Seal.ResultID, mock.Anything, mock.Anything).Return([]*flow.ExecutionReceipt{}, nil).Once() - _, err = bs.build.BuildOn(x1.ID(), bs.setter) + _, err = bs.build.BuildOn(x1.ID(), bs.setter, bs.sign) bs.Require().NoError(err) bs.recPool.AssertExpectations(bs.T()) // building on top of A3 (with ID bs.parentID): latest finalized block in fork is [F4]; expect search to start with sealed result bs.sealDB.On("HighestInFork", bs.parentID).Return(f2eal, nil) bs.recPool.On("ReachableReceipts", f2eal.ResultID, mock.Anything, mock.Anything).Return([]*flow.ExecutionReceipt{}, nil).Once() - _, err = bs.build.BuildOn(bs.parentID, bs.setter) + _, err = bs.build.BuildOn(bs.parentID, bs.setter, bs.sign) bs.Require().NoError(err) bs.recPool.AssertExpectations(bs.T()) } @@ -917,7 +1003,7 @@ func (bs *BuilderSuite) TestPayloadReceipts_IncludeOnlyReceiptsForCurrentFork() // set last sealed blocks: b1Seal := unittest.Seal.Fixture(unittest.Seal.WithResult(bs.resultForBlock[b1.ID()])) - bs.sealDB = &storage.Seals{} + bs.sealDB = &storagemock.Seals{} bs.sealDB.On("HighestInFork", b5.ID()).Return(b1Seal, nil) bs.build.seals = bs.sealDB @@ -927,16 +1013,16 @@ func (bs *BuilderSuite) TestPayloadReceipts_IncludeOnlyReceiptsForCurrentFork() bs.recPool.On("ReachableReceipts", b1Seal.ResultID, mock.Anything, mock.Anything).Run( func(args mock.Arguments) { blockFilter := args[1].(mempoolAPIs.BlockFilter) - for _, h := range []*flow.Header{b1.Header, b2.Header, b3.Header, b4.Header, b5.Header} { + for _, h := range []*flow.Header{b1.ToHeader(), b2.ToHeader(), b3.ToHeader(), b4.ToHeader(), b5.ToHeader()} { assert.True(bs.T(), blockFilter(h)) } - for _, h := range []*flow.Header{bs.blocks[bs.finalID].Header, x1.Header, y2.Header, a6.Header, c3.Header, c4.Header, d4.Header} { + for _, h := range []*flow.Header{bs.blocks[bs.finalID].ToHeader(), x1.ToHeader(), y2.ToHeader(), a6.ToHeader(), c3.ToHeader(), c4.ToHeader(), d4.ToHeader()} { assert.False(bs.T(), blockFilter(h)) } }).Return([]*flow.ExecutionReceipt{}, nil).Once() bs.build.recPool = bs.recPool - _, err := bs.build.BuildOn(b5.ID(), bs.setter) + _, err := bs.build.BuildOn(b5.ID(), bs.setter, bs.sign) bs.Require().NoError(err) bs.recPool.AssertExpectations(bs.T()) } @@ -959,7 +1045,8 @@ func (bs *BuilderSuite) TestPayloadReceipts_SkipDuplicatedReceipts() { resultByID := block.Payload.Results.Lookup() for _, meta := range block.Payload.Receipts { result := resultByID[meta.ResultID] - rcpt := flow.ExecutionReceiptFromMeta(*meta, *result) + rcpt, err := flow.ExecutionReceiptFromStub(*meta, *result) + bs.NoError(err) assert.False(bs.T(), receiptFilter(rcpt)) } } @@ -973,7 +1060,7 @@ func (bs *BuilderSuite) TestPayloadReceipts_SkipDuplicatedReceipts() { }).Return([]*flow.ExecutionReceipt{}, nil).Once() bs.build.recPool = bs.recPool - _, err := bs.build.BuildOn(bs.parentID, bs.setter) + _, err := bs.build.BuildOn(bs.parentID, bs.setter, bs.sign) bs.Require().NoError(err) bs.recPool.AssertExpectations(bs.T()) } @@ -1003,7 +1090,7 @@ func (bs *BuilderSuite) TestPayloadReceipts_SkipReceiptsForSealedBlock() { }).Return([]*flow.ExecutionReceipt{}, nil).Once() bs.build.recPool = bs.recPool - _, err := bs.build.BuildOn(bs.parentID, bs.setter) + _, err := bs.build.BuildOn(bs.parentID, bs.setter, bs.sign) bs.Require().NoError(err) bs.recPool.AssertExpectations(bs.T()) } @@ -1014,14 +1101,14 @@ func (bs *BuilderSuite) TestPayloadReceipts_BlockLimit() { // Populate the mempool with 5 valid receipts receipts := []*flow.ExecutionReceipt{} - metas := []*flow.ExecutionReceiptMeta{} + metas := []*flow.ExecutionReceiptStub{} expectedResults := []*flow.ExecutionResult{} var i uint64 for i = 0; i < 5; i++ { blockOnFork := bs.blocks[bs.irsList[i].Seal.BlockID] pendingReceipt := unittest.ReceiptForBlockFixture(blockOnFork) receipts = append(receipts, pendingReceipt) - metas = append(metas, pendingReceipt.Meta()) + metas = append(metas, pendingReceipt.Stub()) expectedResults = append(expectedResults, &pendingReceipt.ExecutionResult) } bs.pendingReceipts = receipts @@ -1031,7 +1118,7 @@ func (bs *BuilderSuite) TestPayloadReceipts_BlockLimit() { bs.build.cfg.maxReceiptCount = limit // ensure that only 3 of the 5 receipts were included - _, err := bs.build.BuildOn(bs.parentID, bs.setter) + _, err := bs.build.BuildOn(bs.parentID, bs.setter, bs.sign) bs.Require().NoError(err) bs.Assert().ElementsMatch(metas[:limit], bs.assembled.Receipts, "should have excluded receipts above maxReceiptCount") bs.Assert().ElementsMatch(expectedResults[:limit], bs.assembled.Results, "should have excluded results above maxReceiptCount") @@ -1041,11 +1128,11 @@ func (bs *BuilderSuite) TestPayloadReceipts_BlockLimit() { // Expectation: Builder should embed the Receipts as provided by the ExecutionTree func (bs *BuilderSuite) TestPayloadReceipts_AsProvidedByReceiptForest() { var expectedReceipts []*flow.ExecutionReceipt - var expectedMetas []*flow.ExecutionReceiptMeta + var expectedMetas []*flow.ExecutionReceiptStub var expectedResults []*flow.ExecutionResult for i := 0; i < 10; i++ { expectedReceipts = append(expectedReceipts, unittest.ExecutionReceiptFixture()) - expectedMetas = append(expectedMetas, expectedReceipts[i].Meta()) + expectedMetas = append(expectedMetas, expectedReceipts[i].Stub()) expectedResults = append(expectedResults, &expectedReceipts[i].ExecutionResult) } bs.recPool = &mempool.ExecutionTree{} @@ -1054,7 +1141,7 @@ func (bs *BuilderSuite) TestPayloadReceipts_AsProvidedByReceiptForest() { bs.recPool.On("ReachableReceipts", mock.Anything, mock.Anything, mock.Anything).Return(expectedReceipts, nil).Once() bs.build.recPool = bs.recPool - _, err := bs.build.BuildOn(bs.parentID, bs.setter) + _, err := bs.build.BuildOn(bs.parentID, bs.setter, bs.sign) bs.Require().NoError(err) bs.Assert().ElementsMatch(expectedMetas, bs.assembled.Receipts, "should include receipts as returned by ExecutionTree") bs.Assert().ElementsMatch(expectedResults, bs.assembled.Results, "should include results as returned by ExecutionTree") @@ -1079,15 +1166,18 @@ func (bs *BuilderSuite) TestPayloadReceipts_AsProvidedByReceiptForest() { func (bs *BuilderSuite) TestIntegration_PayloadReceiptNoParentResult() { // make blocks S, A, B, C parentReceipt := unittest.ExecutionReceiptFixture(unittest.WithResult(bs.resultForBlock[bs.parentID])) - blockSABC := unittest.ChainFixtureFrom(4, bs.blocks[bs.parentID].Header) + blockSABC := unittest.ChainFixtureFrom(4, bs.blocks[bs.parentID].ToHeader()) + + // fully create blockSABC[0] + blockSABC[0].Payload.Receipts = []*flow.ExecutionReceiptStub{parentReceipt.Stub()} + blockSABC[0].Payload.Results = []*flow.ExecutionResult{&parentReceipt.ExecutionResult} + resultS := unittest.ExecutionResultFixture(unittest.WithBlock(blockSABC[0]), unittest.WithPreviousResult(*bs.resultForBlock[bs.parentID])) receiptSABC := unittest.ReceiptChainFor(blockSABC, resultS) - blockSABC[0].Payload.Receipts = []*flow.ExecutionReceiptMeta{parentReceipt.Meta()} - blockSABC[0].Payload.Results = []*flow.ExecutionResult{&parentReceipt.ExecutionResult} - blockSABC[1].Payload.Receipts = []*flow.ExecutionReceiptMeta{receiptSABC[0].Meta()} + blockSABC[1].Payload.Receipts = []*flow.ExecutionReceiptStub{receiptSABC[0].Stub()} blockSABC[1].Payload.Results = []*flow.ExecutionResult{&receiptSABC[0].ExecutionResult} - blockSABC[2].Payload.Receipts = []*flow.ExecutionReceiptMeta{} - blockSABC[3].Payload.Receipts = []*flow.ExecutionReceiptMeta{} + blockSABC[2].Payload.Receipts = []*flow.ExecutionReceiptStub{} + blockSABC[3].Payload.Receipts = []*flow.ExecutionReceiptStub{} unittest.ReconnectBlocksAndReceipts(blockSABC, receiptSABC) // update block header so that blocks are chained together bs.storeBlock(blockSABC[0]) @@ -1101,18 +1191,19 @@ func (bs *BuilderSuite) TestIntegration_PayloadReceiptNoParentResult() { resultByID := block.Payload.Results.Lookup() for _, meta := range block.Payload.Receipts { result := resultByID[meta.ResultID] - rcpt := flow.ExecutionReceiptFromMeta(*meta, *result) - _, err := bs.build.recPool.AddReceipt(rcpt, bs.blocks[rcpt.ExecutionResult.BlockID].Header) + rcpt, err := flow.ExecutionReceiptFromStub(*meta, *result) + bs.NoError(err) + _, err = bs.build.recPool.AddReceipt(rcpt, bs.blocks[rcpt.ExecutionResult.BlockID].ToHeader()) bs.NoError(err) } } // for receipts _not_ included in blocks, add only receipt for A and C but NOT B - _, _ = bs.build.recPool.AddReceipt(receiptSABC[1], blockSABC[1].Header) - _, _ = bs.build.recPool.AddReceipt(receiptSABC[3], blockSABC[3].Header) + _, _ = bs.build.recPool.AddReceipt(receiptSABC[1], blockSABC[1].ToHeader()) + _, _ = bs.build.recPool.AddReceipt(receiptSABC[3], blockSABC[3].ToHeader()) - _, err := bs.build.BuildOn(blockSABC[3].ID(), bs.setter) + _, err := bs.build.BuildOn(blockSABC[3].ID(), bs.setter, bs.sign) bs.Require().NoError(err) - expectedReceipts := flow.ExecutionReceiptMetaList{receiptSABC[1].Meta()} + expectedReceipts := flow.ExecutionReceiptStubList{receiptSABC[1].Stub()} expectedResults := flow.ExecutionResultList{&receiptSABC[1].ExecutionResult} bs.Assert().Equal(expectedReceipts, bs.assembled.Receipts, "payload should contain only receipt for block a") bs.Assert().ElementsMatch(expectedResults, bs.assembled.Results, "payload should contain only result for block a") @@ -1128,11 +1219,13 @@ func (bs *BuilderSuite) TestIntegration_ExtendDifferentExecutionPathsOnSameFork( // A is a block containing a valid receipt for block P recP := unittest.ExecutionReceiptFixture(unittest.WithResult(bs.resultForBlock[bs.parentID])) - A := unittest.BlockWithParentFixture(bs.headers[bs.parentID]) - A.SetPayload(flow.Payload{ - Receipts: []*flow.ExecutionReceiptMeta{recP.Meta()}, - Results: []*flow.ExecutionResult{&recP.ExecutionResult}, - }) + A := unittest.BlockWithParentAndPayload( + bs.headers[bs.parentID], + flow.Payload{ + Receipts: []*flow.ExecutionReceiptStub{recP.Stub()}, + Results: []*flow.ExecutionResult{&recP.ExecutionResult}, + }, + ) // B is a block containing two valid receipts, with different results, for // block A @@ -1140,11 +1233,13 @@ func (bs *BuilderSuite) TestIntegration_ExtendDifferentExecutionPathsOnSameFork( recA1 := unittest.ExecutionReceiptFixture(unittest.WithResult(resA1)) resA2 := unittest.ExecutionResultFixture(unittest.WithBlock(A), unittest.WithPreviousResult(recP.ExecutionResult)) recA2 := unittest.ExecutionReceiptFixture(unittest.WithResult(resA2)) - B := unittest.BlockWithParentFixture(A.Header) - B.SetPayload(flow.Payload{ - Receipts: []*flow.ExecutionReceiptMeta{recA1.Meta(), recA2.Meta()}, - Results: []*flow.ExecutionResult{&recA1.ExecutionResult, &recA2.ExecutionResult}, - }) + B := unittest.BlockWithParentAndPayload( + A.ToHeader(), + flow.Payload{ + Receipts: []*flow.ExecutionReceiptStub{recA1.Stub(), recA2.Stub()}, + Results: []*flow.ExecutionResult{&recA1.ExecutionResult, &recA2.ExecutionResult}, + }, + ) bs.storeBlock(A) bs.storeBlock(B) @@ -1155,8 +1250,9 @@ func (bs *BuilderSuite) TestIntegration_ExtendDifferentExecutionPathsOnSameFork( resultByID := block.Payload.Results.Lookup() for _, meta := range block.Payload.Receipts { result := resultByID[meta.ResultID] - rcpt := flow.ExecutionReceiptFromMeta(*meta, *result) - _, err := bs.build.recPool.AddReceipt(rcpt, bs.blocks[rcpt.ExecutionResult.BlockID].Header) + rcpt, err := flow.ExecutionReceiptFromStub(*meta, *result) + bs.NoError(err) + _, err = bs.build.recPool.AddReceipt(rcpt, bs.blocks[rcpt.ExecutionResult.BlockID].ToHeader()) bs.NoError(err) } } @@ -1170,12 +1266,12 @@ func (bs *BuilderSuite) TestIntegration_ExtendDifferentExecutionPathsOnSameFork( recB2 := unittest.ExecutionReceiptFixture(unittest.WithResult(resB2)) // Add recB1 and recB2 to the mempool for inclusion in the next candidate - _, _ = bs.build.recPool.AddReceipt(recB1, B.Header) - _, _ = bs.build.recPool.AddReceipt(recB2, B.Header) + _, _ = bs.build.recPool.AddReceipt(recB1, B.ToHeader()) + _, _ = bs.build.recPool.AddReceipt(recB2, B.ToHeader()) - _, err := bs.build.BuildOn(B.ID(), bs.setter) + _, err := bs.build.BuildOn(B.ID(), bs.setter, bs.sign) bs.Require().NoError(err) - expectedReceipts := flow.ExecutionReceiptMetaList{recB1.Meta(), recB2.Meta()} + expectedReceipts := flow.ExecutionReceiptStubList{recB1.Stub(), recB2.Stub()} expectedResults := flow.ExecutionResultList{&recB1.ExecutionResult, &recB2.ExecutionResult} bs.Assert().Equal(expectedReceipts, bs.assembled.Receipts, "payload should contain receipts from valid execution forks") bs.Assert().ElementsMatch(expectedResults, bs.assembled.Results, "payload should contain results from valid execution forks") @@ -1204,30 +1300,36 @@ func (bs *BuilderSuite) TestIntegration_ExtendDifferentExecutionPathsOnSameFork( func (bs *BuilderSuite) TestIntegration_ExtendDifferentExecutionPathsOnDifferentForks() { // A is a block containing a valid receipt for block P recP := unittest.ExecutionReceiptFixture(unittest.WithResult(bs.resultForBlock[bs.parentID])) - A := unittest.BlockWithParentFixture(bs.headers[bs.parentID]) - A.SetPayload(flow.Payload{ - Receipts: []*flow.ExecutionReceiptMeta{recP.Meta()}, - Results: []*flow.ExecutionResult{&recP.ExecutionResult}, - }) + A := unittest.BlockWithParentAndPayload( + bs.headers[bs.parentID], + flow.Payload{ + Receipts: []*flow.ExecutionReceiptStub{recP.Stub()}, + Results: []*flow.ExecutionResult{&recP.ExecutionResult}, + }, + ) // B is a block that builds on A containing a valid receipt for A resA1 := unittest.ExecutionResultFixture(unittest.WithBlock(A), unittest.WithPreviousResult(recP.ExecutionResult)) recA1 := unittest.ExecutionReceiptFixture(unittest.WithResult(resA1)) - B := unittest.BlockWithParentFixture(A.Header) - B.SetPayload(flow.Payload{ - Receipts: []*flow.ExecutionReceiptMeta{recA1.Meta()}, - Results: []*flow.ExecutionResult{&recA1.ExecutionResult}, - }) + B := unittest.BlockWithParentAndPayload( + A.ToHeader(), + flow.Payload{ + Receipts: []*flow.ExecutionReceiptStub{recA1.Stub()}, + Results: []*flow.ExecutionResult{&recA1.ExecutionResult}, + }, + ) // C is another block that builds on A containing a valid receipt for A but // different from the receipt contained in B resA2 := unittest.ExecutionResultFixture(unittest.WithBlock(A), unittest.WithPreviousResult(recP.ExecutionResult)) recA2 := unittest.ExecutionReceiptFixture(unittest.WithResult(resA2)) - C := unittest.BlockWithParentFixture(A.Header) - C.SetPayload(flow.Payload{ - Receipts: []*flow.ExecutionReceiptMeta{recA2.Meta()}, - Results: []*flow.ExecutionResult{&recA2.ExecutionResult}, - }) + C := unittest.BlockWithParentAndPayload( + A.ToHeader(), + flow.Payload{ + Receipts: []*flow.ExecutionReceiptStub{recA2.Stub()}, + Results: []*flow.ExecutionResult{&recA2.ExecutionResult}, + }, + ) bs.storeBlock(A) bs.storeBlock(B) @@ -1239,8 +1341,9 @@ func (bs *BuilderSuite) TestIntegration_ExtendDifferentExecutionPathsOnDifferent resultByID := block.Payload.Results.Lookup() for _, meta := range block.Payload.Receipts { result := resultByID[meta.ResultID] - rcpt := flow.ExecutionReceiptFromMeta(*meta, *result) - _, err := bs.build.recPool.AddReceipt(rcpt, bs.blocks[rcpt.ExecutionResult.BlockID].Header) + rcpt, err := flow.ExecutionReceiptFromStub(*meta, *result) + bs.NoError(err) + _, err = bs.build.recPool.AddReceipt(rcpt, bs.blocks[rcpt.ExecutionResult.BlockID].ToHeader()) bs.NoError(err) } } @@ -1252,14 +1355,14 @@ func (bs *BuilderSuite) TestIntegration_ExtendDifferentExecutionPathsOnDifferent resB2 := unittest.ExecutionResultFixture(unittest.WithBlock(B), unittest.WithPreviousResult(recA2.ExecutionResult)) recB2 := unittest.ExecutionReceiptFixture(unittest.WithResult(resB2)) - _, err := bs.build.recPool.AddReceipt(recB1, B.Header) + _, err := bs.build.recPool.AddReceipt(recB1, B.ToHeader()) bs.Require().NoError(err) - _, err = bs.build.recPool.AddReceipt(recB2, B.Header) + _, err = bs.build.recPool.AddReceipt(recB2, B.ToHeader()) bs.Require().NoError(err) - _, err = bs.build.BuildOn(B.ID(), bs.setter) + _, err = bs.build.BuildOn(B.ID(), bs.setter, bs.sign) bs.Require().NoError(err) - expectedReceipts := []*flow.ExecutionReceiptMeta{recA2.Meta(), recB1.Meta(), recB2.Meta()} + expectedReceipts := []*flow.ExecutionReceiptStub{recA2.Stub(), recB1.Stub(), recB2.Stub()} expectedResults := []*flow.ExecutionResult{&recA2.ExecutionResult, &recB1.ExecutionResult, &recB2.ExecutionResult} bs.Assert().ElementsMatch(expectedReceipts, bs.assembled.Receipts, "builder should extend different execution paths") bs.Assert().ElementsMatch(expectedResults, bs.assembled.Results, "builder should extend different execution paths") @@ -1272,20 +1375,23 @@ func (bs *BuilderSuite) TestIntegration_ExtendDifferentExecutionPathsOnDifferent func (bs *BuilderSuite) TestIntegration_DuplicateReceipts() { // A is a block containing a valid receipt for block P recP := unittest.ExecutionReceiptFixture(unittest.WithResult(bs.resultForBlock[bs.parentID])) - A := unittest.BlockWithParentFixture(bs.headers[bs.parentID]) - A.SetPayload(flow.Payload{ - Receipts: []*flow.ExecutionReceiptMeta{recP.Meta()}, - Results: []*flow.ExecutionResult{&recP.ExecutionResult}, - }) + A := unittest.BlockWithParentAndPayload( + bs.headers[bs.parentID], + flow.Payload{ + Receipts: []*flow.ExecutionReceiptStub{recP.Stub()}, + Results: []*flow.ExecutionResult{&recP.ExecutionResult}, + }, + ) // B is a block that builds on A containing a valid receipt for A resA1 := unittest.ExecutionResultFixture(unittest.WithBlock(A), unittest.WithPreviousResult(recP.ExecutionResult)) recA1 := unittest.ExecutionReceiptFixture(unittest.WithResult(resA1)) - B := unittest.BlockWithParentFixture(A.Header) - B.SetPayload(flow.Payload{ - Receipts: []*flow.ExecutionReceiptMeta{recA1.Meta()}, - Results: []*flow.ExecutionResult{&recA1.ExecutionResult}, - }) + B := unittest.BlockWithParentAndPayload( + A.ToHeader(), + flow.Payload{ + Receipts: []*flow.ExecutionReceiptStub{recA1.Stub()}, + Results: []*flow.ExecutionResult{&recA1.ExecutionResult}, + }) bs.storeBlock(A) bs.storeBlock(B) @@ -1296,15 +1402,16 @@ func (bs *BuilderSuite) TestIntegration_DuplicateReceipts() { resultByID := block.Payload.Results.Lookup() for _, meta := range block.Payload.Receipts { result := resultByID[meta.ResultID] - rcpt := flow.ExecutionReceiptFromMeta(*meta, *result) - _, err := bs.build.recPool.AddReceipt(rcpt, bs.blocks[rcpt.ExecutionResult.BlockID].Header) + rcpt, err := flow.ExecutionReceiptFromStub(*meta, *result) + bs.NoError(err) + _, err = bs.build.recPool.AddReceipt(rcpt, bs.blocks[rcpt.ExecutionResult.BlockID].ToHeader()) bs.NoError(err) } } - _, err := bs.build.BuildOn(B.ID(), bs.setter) + _, err := bs.build.BuildOn(B.ID(), bs.setter, bs.sign) bs.Require().NoError(err) - expectedReceipts := []*flow.ExecutionReceiptMeta{} + expectedReceipts := []*flow.ExecutionReceiptStub{} expectedResults := []*flow.ExecutionResult{} bs.Assert().ElementsMatch(expectedReceipts, bs.assembled.Receipts, "builder should not include receipts that are already incorporated in the current fork") bs.Assert().ElementsMatch(expectedResults, bs.assembled.Results, "builder should not include results that were already incorporated") @@ -1317,11 +1424,13 @@ func (bs *BuilderSuite) TestIntegration_DuplicateReceipts() { func (bs *BuilderSuite) TestIntegration_ResultAlreadyIncorporated() { // A is a block containing a valid receipt for block P recP := unittest.ExecutionReceiptFixture(unittest.WithResult(bs.resultForBlock[bs.parentID])) - A := unittest.BlockWithParentFixture(bs.headers[bs.parentID]) - A.SetPayload(flow.Payload{ - Receipts: []*flow.ExecutionReceiptMeta{recP.Meta()}, - Results: []*flow.ExecutionResult{&recP.ExecutionResult}, - }) + A := unittest.BlockWithParentAndPayload( + bs.headers[bs.parentID], + flow.Payload{ + Receipts: []*flow.ExecutionReceiptStub{recP.Stub()}, + Results: []*flow.ExecutionResult{&recP.ExecutionResult}, + }, + ) recP_B := unittest.ExecutionReceiptFixture(unittest.WithResult(&recP.ExecutionResult)) @@ -1333,18 +1442,19 @@ func (bs *BuilderSuite) TestIntegration_ResultAlreadyIncorporated() { resultByID := block.Payload.Results.Lookup() for _, meta := range block.Payload.Receipts { result := resultByID[meta.ResultID] - rcpt := flow.ExecutionReceiptFromMeta(*meta, *result) - _, err := bs.build.recPool.AddReceipt(rcpt, bs.blocks[rcpt.ExecutionResult.BlockID].Header) + rcpt, err := flow.ExecutionReceiptFromStub(*meta, *result) + bs.NoError(err) + _, err = bs.build.recPool.AddReceipt(rcpt, bs.blocks[rcpt.ExecutionResult.BlockID].ToHeader()) bs.NoError(err) } } - _, err := bs.build.recPool.AddReceipt(recP_B, bs.blocks[recP_B.ExecutionResult.BlockID].Header) + _, err := bs.build.recPool.AddReceipt(recP_B, bs.blocks[recP_B.ExecutionResult.BlockID].ToHeader()) bs.NoError(err) - _, err = bs.build.BuildOn(A.ID(), bs.setter) + _, err = bs.build.BuildOn(A.ID(), bs.setter, bs.sign) bs.Require().NoError(err) - expectedReceipts := []*flow.ExecutionReceiptMeta{recP_B.Meta()} + expectedReceipts := []*flow.ExecutionReceiptStub{recP_B.Stub()} expectedResults := []*flow.ExecutionResult{} bs.Assert().ElementsMatch(expectedReceipts, bs.assembled.Receipts, "builder should include receipt metas for results that were already incorporated") bs.Assert().ElementsMatch(expectedResults, bs.assembled.Results, "builder should not include results that were already incorporated") @@ -1355,7 +1465,7 @@ func storeSealForIncorporatedResult(result *flow.ExecutionResult, incorporatingB unittest.IncorporatedResultSeal.WithResult(result), unittest.IncorporatedResultSeal.WithIncorporatedBlockID(incorporatingBlockID), ) - pendingSeals[incorporatedResultSeal.ID()] = incorporatedResultSeal + pendingSeals[incorporatedResultSeal.IncorporatedResultID()] = incorporatedResultSeal return incorporatedResultSeal } @@ -1370,11 +1480,13 @@ func (bs *BuilderSuite) TestIntegration_RepopulateExecutionTreeAtStartup() { // setup initial state // A is a block containing a valid receipt for block P recP := unittest.ExecutionReceiptFixture(unittest.WithResult(bs.resultForBlock[bs.parentID])) - A := unittest.BlockWithParentFixture(bs.headers[bs.parentID]) - A.SetPayload(flow.Payload{ - Receipts: []*flow.ExecutionReceiptMeta{recP.Meta()}, - Results: []*flow.ExecutionResult{&recP.ExecutionResult}, - }) + A := unittest.BlockWithParentAndPayload( + bs.headers[bs.parentID], + flow.Payload{ + Receipts: []*flow.ExecutionReceiptStub{recP.Stub()}, + Results: []*flow.ExecutionResult{&recP.ExecutionResult}, + }, + ) // B is a block containing two valid receipts, with different results, for // block A @@ -1382,13 +1494,15 @@ func (bs *BuilderSuite) TestIntegration_RepopulateExecutionTreeAtStartup() { recA1 := unittest.ExecutionReceiptFixture(unittest.WithResult(resA1)) resA2 := unittest.ExecutionResultFixture(unittest.WithBlock(A), unittest.WithPreviousResult(recP.ExecutionResult)) recA2 := unittest.ExecutionReceiptFixture(unittest.WithResult(resA2)) - B := unittest.BlockWithParentFixture(A.Header) - B.SetPayload(flow.Payload{ - Receipts: []*flow.ExecutionReceiptMeta{recA1.Meta(), recA2.Meta()}, - Results: []*flow.ExecutionResult{&recA1.ExecutionResult, &recA2.ExecutionResult}, - }) + B := unittest.BlockWithParentAndPayload( + A.ToHeader(), + flow.Payload{ + Receipts: []*flow.ExecutionReceiptStub{recA1.Stub(), recA2.Stub()}, + Results: []*flow.ExecutionResult{&recA1.ExecutionResult, &recA2.ExecutionResult}, + }, + ) - C := unittest.BlockWithParentFixture(B.Header) + C := unittest.BlockWithParentFixture(B.ToHeader()) bs.storeBlock(A) bs.storeBlock(B) @@ -1403,7 +1517,8 @@ func (bs *BuilderSuite) TestIntegration_RepopulateExecutionTreeAtStartup() { bs.resultByID[result.ID()] = result } for _, meta := range block.Payload.Receipts { - receipt := flow.ExecutionReceiptFromMeta(*meta, *bs.resultByID[meta.ResultID]) + receipt, err := flow.ExecutionReceiptFromStub(*meta, *bs.resultByID[meta.ResultID]) + bs.NoError(err) bs.receiptsByID[meta.ID()] = receipt bs.receiptsByBlockID[receipt.ExecutionResult.BlockID] = append(bs.receiptsByBlockID[receipt.ExecutionResult.BlockID], receipt) } @@ -1423,7 +1538,6 @@ func (bs *BuilderSuite) TestIntegration_RepopulateExecutionTreeAtStartup() { var err error bs.build, err = NewBuilder( noopMetrics, - bs.db, bs.state, bs.headerDB, bs.sealDB, @@ -1431,6 +1545,7 @@ func (bs *BuilderSuite) TestIntegration_RepopulateExecutionTreeAtStartup() { bs.blockDB, bs.resultDB, bs.receiptsDB, + bs.stateMutator, bs.guarPool, bs.sealPool, recPool, @@ -1450,13 +1565,13 @@ func (bs *BuilderSuite) TestIntegration_RepopulateExecutionTreeAtStartup() { recC := unittest.ExecutionReceiptFixture(unittest.WithResult(resC)) // Add recB1 and recB2 to the mempool for inclusion in the next candidate - _, _ = bs.build.recPool.AddReceipt(recB1, B.Header) - _, _ = bs.build.recPool.AddReceipt(recB2, B.Header) - _, _ = bs.build.recPool.AddReceipt(recC, C.Header) + _, _ = bs.build.recPool.AddReceipt(recB1, B.ToHeader()) + _, _ = bs.build.recPool.AddReceipt(recB2, B.ToHeader()) + _, _ = bs.build.recPool.AddReceipt(recC, C.ToHeader()) - _, err = bs.build.BuildOn(C.ID(), bs.setter) + _, err = bs.build.BuildOn(C.ID(), bs.setter, bs.sign) bs.Require().NoError(err) - expectedReceipts := flow.ExecutionReceiptMetaList{recB1.Meta(), recB2.Meta(), recC.Meta()} + expectedReceipts := flow.ExecutionReceiptStubList{recB1.Stub(), recB2.Stub(), recC.Stub()} expectedResults := flow.ExecutionResultList{&recB1.ExecutionResult, &recB2.ExecutionResult, &recC.ExecutionResult} bs.Assert().ElementsMatch(expectedReceipts, bs.assembled.Receipts, "payload should contain receipts from valid execution forks") bs.Assert().ElementsMatch(expectedResults, bs.assembled.Results, "payload should contain results from valid execution forks") diff --git a/module/builder/consensus/config.go b/module/builder/consensus/config.go index 8c1df13b213..f97350cbdf9 100644 --- a/module/builder/consensus/config.go +++ b/module/builder/consensus/config.go @@ -1,5 +1,3 @@ -// (c) 2019 Dapper Labs - ALL RIGHTS RESERVED - package consensus import ( diff --git a/module/chainsync/core.go b/module/chainsync/core.go index 08514b1b513..c493df79cb9 100644 --- a/module/chainsync/core.go +++ b/module/chainsync/core.go @@ -112,7 +112,7 @@ func (c *Core) HandleBlock(header *flow.Header) bool { } // HandleHeight handles receiving a new highest finalized height from another node. -// If the height difference between local and the reported height, we do nothing. +// If the height difference between local and the reported height is outside tolerance, we do nothing. // Otherwise, we queue each missing height. func (c *Core) HandleHeight(final *flow.Header, height uint64) { log := c.log.With().Uint64("final_height", final.Height).Uint64("recv_height", height).Logger() diff --git a/module/chainsync/core_rapid_test.go b/module/chainsync/core_rapid_test.go index 649fce871d8..2554577caa3 100644 --- a/module/chainsync/core_rapid_test.go +++ b/module/chainsync/core_rapid_test.go @@ -25,8 +25,8 @@ func populatedBlockStore(t *rapid.T) []*flow.Header { store := []*flow.Header{unittest.BlockHeaderFixture()} for i := 1; i < NUM_BLOCKS; i++ { // we sample from the store 2/3 times to get deeper trees - b := rapid.OneOf(rapid.Just(unittest.BlockHeaderFixture()), rapid.SampledFrom(store), rapid.SampledFrom(store)).Draw(t, "parent").(flow.Header) - store = append(store, unittest.BlockHeaderWithParentFixture(&b)) + b := rapid.OneOf(rapid.Just(unittest.BlockHeaderFixture()), rapid.SampledFrom(store), rapid.SampledFrom(store)).Draw(t, "parent") + store = append(store, unittest.BlockHeaderWithParentFixture(b)) } return store } @@ -38,8 +38,8 @@ type rapidSync struct { heightRequests map[uint64]bool // depth 1 pushdown automaton to track height requests } -// Init is an action for initializing a rapidSync instance. -func (r *rapidSync) Init(t *rapid.T) { +// init is an action for initializing a rapidSync instance. +func (r *rapidSync) init(t *rapid.T) { var err error r.core, err = New(zerolog.New(io.Discard), DefaultConfig(), metrics.NewNoopCollector(), flow.Localnet) @@ -52,7 +52,7 @@ func (r *rapidSync) Init(t *rapid.T) { // RequestByID is an action that requests a block by its ID. func (r *rapidSync) RequestByID(t *rapid.T) { - b := rapid.SampledFrom(r.store).Draw(t, "id_request").(*flow.Header) + b := rapid.SampledFrom(r.store).Draw(t, "id_request") r.core.RequestBlock(b.ID(), b.Height) // Re-queueing by ID should always succeed r.idRequests[b.ID()] = true @@ -62,7 +62,7 @@ func (r *rapidSync) RequestByID(t *rapid.T) { // RequestByHeight is an action that requests a specific height func (r *rapidSync) RequestByHeight(t *rapid.T) { - b := rapid.SampledFrom(r.store).Draw(t, "height_request").(*flow.Header) + b := rapid.SampledFrom(r.store).Draw(t, "height_request") r.core.RequestHeight(b.Height) // Re-queueing by height should always succeed r.heightRequests[b.Height] = true @@ -71,8 +71,8 @@ func (r *rapidSync) RequestByHeight(t *rapid.T) { // HandleHeight is an action that requests a heights // upon receiving an argument beyond a certain tolerance func (r *rapidSync) HandleHeight(t *rapid.T) { - b := rapid.SampledFrom(r.store).Draw(t, "height_hint_request").(*flow.Header) - incr := rapid.IntRange(0, (int)(DefaultConfig().Tolerance)+1).Draw(t, "height increment").(int) + b := rapid.SampledFrom(r.store).Draw(t, "height_hint_request") + incr := rapid.IntRange(0, (int)(DefaultConfig().Tolerance)+1).Draw(t, "height increment") requestHeight := b.Height + (uint64)(incr) r.core.HandleHeight(b, requestHeight) // Re-queueing by height should always succeed if beyond tolerance @@ -85,7 +85,7 @@ func (r *rapidSync) HandleHeight(t *rapid.T) { // HandleByID is an action that provides a block header to the sync engine func (r *rapidSync) HandleByID(t *rapid.T) { - b := rapid.SampledFrom(r.store).Draw(t, "id_handling").(*flow.Header) + b := rapid.SampledFrom(r.store).Draw(t, "id_handling") success := r.core.HandleBlock(b) assert.True(t, success || r.idRequests[b.ID()] == false) @@ -174,7 +174,11 @@ func (r *rapidSync) Check(t *rapid.T) { func TestRapidSync(t *testing.T) { unittest.SkipUnless(t, unittest.TEST_FLAKY, "flaky test") - rapid.Check(t, rapid.Run(&rapidSync{})) + rapid.Check(t, func(t *rapid.T) { + sm := new(rapidSync) + sm.init(t) + t.Repeat(rapid.StateMachineActions(sm)) + }) } // utility functions diff --git a/module/chainsync/core_test.go b/module/chainsync/core_test.go index d8feca7e2fd..0fbd306e553 100644 --- a/module/chainsync/core_test.go +++ b/module/chainsync/core_test.go @@ -394,38 +394,42 @@ func (ss *SyncSuite) TestPrune() { final.Height = 100 var ( - prunableHeights []flow.Block - prunableBlockIDs []flow.Block - unprunable []flow.Block + prunableHeights []*flow.Block + prunableBlockIDs []*flow.Block + unprunable []*flow.Block ) // add some finalized blocks by height for i := 0; i < 3; i++ { - block := unittest.BlockFixture() - block.Header.Height = uint64(i + 1) - ss.core.heights[block.Header.Height] = ss.QueuedStatus() + block := unittest.BlockFixture( + unittest.Block.WithHeight(uint64(i + 1)), + ) + ss.core.heights[block.Height] = ss.QueuedStatus() prunableHeights = append(prunableHeights, block) } // add some un-finalized blocks by height for i := 0; i < 3; i++ { - block := unittest.BlockFixture() - block.Header.Height = final.Height + uint64(i+1) - ss.core.heights[block.Header.Height] = ss.QueuedStatus() + block := unittest.BlockFixture( + unittest.Block.WithHeight(final.Height + uint64(i+1)), + ) + ss.core.heights[block.Height] = ss.QueuedStatus() unprunable = append(unprunable, block) } // add some finalized blocks by block ID for i := 0; i < 3; i++ { - block := unittest.BlockFixture() - block.Header.Height = uint64(i + 1) - ss.core.blockIDs[block.ID()] = ss.ReceivedStatus(block.Header) + block := unittest.BlockFixture( + unittest.Block.WithHeight(uint64(i + 1)), + ) + ss.core.blockIDs[block.ID()] = ss.ReceivedStatus(block.ToHeader()) prunableBlockIDs = append(prunableBlockIDs, block) } // add some un-finalized, received blocks by block ID for i := 0; i < 3; i++ { - block := unittest.BlockFixture() - block.Header.Height = 100 + uint64(i+1) - ss.core.blockIDs[block.ID()] = ss.ReceivedStatus(block.Header) + block := unittest.BlockFixture( + unittest.Block.WithHeight(100 + uint64(i+1)), + ) + ss.core.blockIDs[block.ID()] = ss.ReceivedStatus(block.ToHeader()) unprunable = append(unprunable, block) } @@ -444,11 +448,11 @@ func (ss *SyncSuite) TestPrune() { assert.False(ss.T(), exists) } for _, block := range prunableHeights { - _, exists := ss.core.heights[block.Header.Height] + _, exists := ss.core.heights[block.Height] assert.False(ss.T(), exists) } for _, block := range unprunable { - _, heightExists := ss.core.heights[block.Header.Height] + _, heightExists := ss.core.heights[block.Height] _, blockIDExists := ss.core.blockIDs[block.ID()] assert.True(ss.T(), heightExists || blockIDExists) } diff --git a/module/chunks.go b/module/chunks.go index 696689f1db2..e71427aa5a8 100644 --- a/module/chunks.go +++ b/module/chunks.go @@ -22,11 +22,5 @@ type ChunkVerifier interface { // the final state commitment. // It returns a Spock Secret as a byte array, verification fault of the // chunk, and an error. - Verify( - ch *verification.VerifiableChunkData, - ) ( - []byte, - chmodels.ChunkFault, - error, - ) + Verify(ch *verification.VerifiableChunkData) ([]byte, error) } diff --git a/module/chunks/chunkVerifier.go b/module/chunks/chunkVerifier.go index 11b3a2d6c2b..7b2b18c53db 100644 --- a/module/chunks/chunkVerifier.go +++ b/module/chunks/chunkVerifier.go @@ -6,12 +6,10 @@ import ( "github.com/rs/zerolog" - "github.com/onflow/flow-go/fvm/blueprints" - "github.com/onflow/flow-go/model/verification" - "github.com/onflow/flow-go/engine/execution/computation/computer" executionState "github.com/onflow/flow-go/engine/execution/state" "github.com/onflow/flow-go/fvm" + "github.com/onflow/flow-go/fvm/blueprints" "github.com/onflow/flow-go/fvm/storage/derived" "github.com/onflow/flow-go/fvm/storage/logical" "github.com/onflow/flow-go/fvm/storage/snapshot" @@ -19,7 +17,12 @@ import ( "github.com/onflow/flow-go/ledger" "github.com/onflow/flow-go/ledger/partial" chmodels "github.com/onflow/flow-go/model/chunks" + "github.com/onflow/flow-go/model/fingerprint" "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/model/verification" + "github.com/onflow/flow-go/module/executiondatasync/execution_data" + "github.com/onflow/flow-go/module/executiondatasync/provider" + "github.com/onflow/flow-go/module/metrics" ) // ChunkVerifier is a verifier based on the current definitions of the flow network @@ -27,6 +30,7 @@ type ChunkVerifier struct { vm fvm.VM vmCtx fvm.Context systemChunkCtx fvm.Context + callbackCtx fvm.Context logger zerolog.Logger } @@ -35,7 +39,8 @@ func NewChunkVerifier(vm fvm.VM, vmCtx fvm.Context, logger zerolog.Logger) *Chun return &ChunkVerifier{ vm: vm, vmCtx: vmCtx, - systemChunkCtx: computer.SystemChunkContext(vmCtx, vmCtx.Logger), + systemChunkCtx: computer.SystemChunkContext(vmCtx, metrics.NewNoopCollector()), + callbackCtx: computer.CallbackContext(vmCtx, metrics.NewNoopCollector()), logger: logger.With().Str("component", "chunk_verifier").Logger(), } } @@ -48,29 +53,20 @@ func (fcv *ChunkVerifier) Verify( vc *verification.VerifiableChunkData, ) ( []byte, - chmodels.ChunkFault, error, ) { var ctx fvm.Context + var callbackCtx fvm.Context var transactions []*fvm.TransactionProcedure - if vc.IsSystemChunk { - ctx = fvm.NewContextFromParent( - fcv.systemChunkCtx, - fvm.WithBlockHeader(vc.Header)) - - txBody, err := blueprints.SystemChunkTransaction(fcv.vmCtx.Chain) - if err != nil { - return nil, nil, fmt.Errorf("could not get system chunk transaction: %w", err) - } + derivedBlockData := derived.NewEmptyDerivedBlockData(logical.Time(vc.TransactionOffset)) - transactions = []*fvm.TransactionProcedure{ - fvm.Transaction(txBody, vc.TransactionOffset+uint32(0)), - } + if vc.IsSystemChunk { + ctx = contextFromVerifiableChunk(fcv.systemChunkCtx, vc, derivedBlockData) + callbackCtx = contextFromVerifiableChunk(fcv.callbackCtx, vc, derivedBlockData) + // transactions will be dynamically created for system chunk } else { - ctx = fvm.NewContextFromParent( - fcv.vmCtx, - fvm.WithBlockHeader(vc.Header)) + ctx = contextFromVerifiableChunk(fcv.vmCtx, vc, derivedBlockData) transactions = make( []*fvm.TransactionProcedure, @@ -82,8 +78,9 @@ func (fcv *ChunkVerifier) Verify( } } - return fcv.verifyTransactionsInContext( + res, err := fcv.verifyTransactionsInContext( ctx, + callbackCtx, vc.TransactionOffset, vc.Chunk, vc.ChunkDataPack, @@ -91,6 +88,21 @@ func (fcv *ChunkVerifier) Verify( transactions, vc.EndState, vc.IsSystemChunk) + + return res, err +} + +func contextFromVerifiableChunk( + parentCtx fvm.Context, + vc *verification.VerifiableChunkData, + derivedBlockData *derived.DerivedBlockData, +) fvm.Context { + return fvm.NewContextFromParent( + parentCtx, + fvm.WithBlockHeader(vc.Header), + fvm.WithProtocolStateSnapshot(vc.Snapshot), + fvm.WithDerivedBlockData(derivedBlockData), + ) } type partialLedgerStorageSnapshot struct { @@ -123,6 +135,7 @@ func (storage *partialLedgerStorageSnapshot) Get( func (fcv *ChunkVerifier) verifyTransactionsInContext( context fvm.Context, + callbackCtx fvm.Context, transactionOffset uint32, chunk *flow.Chunk, chunkDataPack *flow.ChunkDataPack, @@ -132,7 +145,6 @@ func (fcv *ChunkVerifier) verifyTransactionsInContext( systemChunk bool, ) ( []byte, - chmodels.ChunkFault, error, ) { @@ -144,30 +156,41 @@ func (fcv *ChunkVerifier) verifyTransactionsInContext( execResID := result.ID() if chunkDataPack == nil { - return nil, nil, fmt.Errorf("missing chunk data pack") + return nil, fmt.Errorf("missing chunk data pack") + } + + // Execution nodes must not include a collection for system chunks. + if systemChunk && chunkDataPack.Collection != nil { + return nil, chmodels.NewCFSystemChunkIncludedCollection(chIndex, execResID) + } + + // Consensus nodes already enforce some fundamental properties of ExecutionResults: + // 1. The result contains the correct number of chunks (compared to the block it pertains to). + // 2. The result contains chunks with strictly monotonically increasing `Chunk.Index` starting with index 0 + // 3. for each chunk, the consistency requirement `Chunk.Index == Chunk.CollectionIndex` holds + // See `module/validation/receiptValidator` for implementation, which is used by the consensus nodes. + // And issue https://github.com/dapperlabs/flow-go/issues/6864 for implementing 3. + // Hence, the following is a consistency check. Failing it means we have either encountered a critical bug, + // or a super majority of byzantine nodes. In their case, continuing operations is impossible. + if int(chIndex) >= len(result.Chunks) { + return nil, chmodels.NewCFInvalidVerifiableChunk("error constructing partial trie: ", + fmt.Errorf("chunk index out of bounds of ExecutionResult's chunk list"), chIndex, execResID) } - events := make(flow.EventsList, 0) + var events flow.EventsList = nil serviceEvents := make(flow.ServiceEventList, 0) // constructing a partial trie given chunk data package psmt, err := partial.NewLedger(chunkDataPack.Proof, ledger.State(chunkDataPack.StartState), partial.DefaultPathFinderVersion) - if err != nil { // TODO provide more details based on the error type return nil, chmodels.NewCFInvalidVerifiableChunk( - "error constructing partial trie: ", - err, - chIndex, - execResID), - nil + "error constructing partial trie: ", + err, + chIndex, + execResID) } - context = fvm.NewContextFromParent( - context, - fvm.WithDerivedBlockData( - derived.NewEmptyDerivedBlockData(logical.Time(transactionOffset)))) - // chunk view construction // unknown register tracks access to parts of the partial trie which // are not expanded and values are unknown. @@ -182,16 +205,61 @@ func (fcv *ChunkVerifier) verifyTransactionsInContext( chunkState := fvmState.NewExecutionState(nil, fvmState.DefaultParameters()) var problematicTx flow.Identifier - // executes all transactions in this chunk - for i, tx := range transactions { + // collect execution data formatted transaction results + var txStartIndex int + var processResult *flow.LightTransactionResult + + if systemChunk { + transactions, processResult, err = fcv.createSystemChunk( + callbackCtx, + &snapshotTree, + chunkState, + transactionOffset, + &events, + &serviceEvents, + unknownRegTouch, + execResID, + chIndex, + ) + if err != nil { + return nil, fmt.Errorf("failed to create system chunk transactions: %w", err) + } + } + + // collect execution data formatted transaction results + var txResults []flow.LightTransactionResult + if len(transactions) > 0 { + txResults = make([]flow.LightTransactionResult, len(transactions)) + } + + // If system chunk, we already executed the process callback transaction so skip it + // by setting the start index to 1 and assigning existing process result to tx results + if processResult != nil { + // if process was executed, transaction length should always be at least 2 (process + system) + txResults[0] = *processResult + txStartIndex = 1 + } + + // Executes all transactions in this chunk (or remaining transactions for callbacks) + for i := txStartIndex; i < len(transactions); i++ { + tx := transactions[i] + ctx := context + + // For system chunks with callbacks: + // - Process callback transaction and callback executions use callbackCtx + // - System transaction (last one) uses the original system chunk context + if systemChunk && context.ScheduleCallbacksEnabled && i < len(transactions)-1 { + ctx = callbackCtx + } + executionSnapshot, output, err := fcv.vm.Run( - context, + ctx, tx, snapshotTree) if err != nil { // this covers unexpected and very rare cases (e.g. system memory issues...), // so we shouldn't be here even if transaction naturally fails (e.g. permission, runtime ... ) - return nil, nil, fmt.Errorf("failed to execute transaction: %d (%w)", i, err) + return nil, fmt.Errorf("failed to execute transaction: %d (%w)", i, err) } if len(unknownRegTouch) > 0 { @@ -204,7 +272,13 @@ func (fcv *ChunkVerifier) verifyTransactionsInContext( snapshotTree = snapshotTree.Append(executionSnapshot) err = chunkState.Merge(executionSnapshot) if err != nil { - return nil, nil, fmt.Errorf("failed to merge: %d (%w)", i, err) + return nil, fmt.Errorf("failed to merge: %d (%w)", i, err) + } + + txResults[i] = flow.LightTransactionResult{ + TransactionID: tx.ID, + ComputationUsed: output.ComputationUsed, + Failed: output.Err != nil, } } @@ -214,12 +288,12 @@ func (fcv *ChunkVerifier) verifyTransactionsInContext( for id := range unknownRegTouch { missingRegs = append(missingRegs, id.String()) } - return nil, chmodels.NewCFMissingRegisterTouch(missingRegs, chIndex, execResID, problematicTx), nil + return nil, chmodels.NewCFMissingRegisterTouch(missingRegs, chIndex, execResID, problematicTx) } eventsHash, err := flow.EventsMerkleRootHash(events) if err != nil { - return nil, nil, fmt.Errorf("cannot calculate events collection hash: %w", err) + return nil, fmt.Errorf("cannot calculate events collection hash: %w", err) } if chunk.EventCollection != eventsHash { collectionID := "" @@ -229,7 +303,7 @@ func (fcv *ChunkVerifier) verifyTransactionsInContext( for i, event := range events { fcv.logger.Warn().Int("list_index", i). Str("event_id", event.ID().String()). - Hex("event_fingerptint", event.Fingerprint()). + Hex("event_fingerprint", fingerprint.Fingerprint(event)). Str("event_type", string(event.Type)). Str("event_tx_id", event.TransactionID.String()). Uint32("event_tx_index", event.TransactionIndex). @@ -242,17 +316,16 @@ func (fcv *ChunkVerifier) verifyTransactionsInContext( Msg("not matching events debug") } - return nil, chmodels.NewCFInvalidEventsCollection(chunk.EventCollection, eventsHash, chIndex, execResID, events), nil + return nil, chmodels.NewCFInvalidEventsCollection(chunk.EventCollection, eventsHash, chIndex, execResID, events) } - if systemChunk { - equal, err := result.ServiceEvents.EqualTo(serviceEvents) - if err != nil { - return nil, nil, fmt.Errorf("error while comparing service events: %w", err) - } - if !equal { - return nil, chmodels.CFInvalidServiceSystemEventsEmitted(result.ServiceEvents, serviceEvents, chIndex, execResID), nil - } + serviceEventsInChunk := result.ServiceEventsByChunk(chunk.Index) + equal, err := serviceEventsInChunk.EqualTo(serviceEvents) + if err != nil { + return nil, fmt.Errorf("error while comparing service events: %w", err) + } + if !equal { + return nil, chmodels.CFInvalidServiceSystemEventsEmitted(serviceEventsInChunk, serviceEvents, chIndex, execResID) } // Applying chunk updates to the partial trie. This returns the expected @@ -267,11 +340,10 @@ func (fcv *ChunkVerifier) verifyTransactionsInContext( keys, values) if err != nil { - return nil, nil, fmt.Errorf("cannot create ledger update: %w", err) + return nil, fmt.Errorf("cannot create ledger update: %w", err) } - expEndStateComm, _, err := psmt.Set(update) - + expEndStateComm, trieUpdate, err := psmt.Set(update) if err != nil { if errors.Is(err, ledger.ErrMissingKeys{}) { keys := err.(*ledger.ErrMissingKeys).Keys @@ -279,16 +351,188 @@ func (fcv *ChunkVerifier) verifyTransactionsInContext( for i, key := range keys { stringKeys[i] = key.String() } - return nil, chmodels.NewCFMissingRegisterTouch(stringKeys, chIndex, execResID, problematicTx), nil + return nil, chmodels.NewCFMissingRegisterTouch(stringKeys, chIndex, execResID, problematicTx) } - return nil, chmodels.NewCFMissingRegisterTouch(nil, chIndex, execResID, problematicTx), nil + return nil, chmodels.NewCFMissingRegisterTouch(nil, chIndex, execResID, problematicTx) } // TODO check if exec node provided register touches that was not used (no read and no update) // check if the end state commitment mentioned in the chunk matches // what the partial trie is providing. if flow.StateCommitment(expEndStateComm) != endState { - return nil, chmodels.NewCFNonMatchingFinalState(flow.StateCommitment(expEndStateComm), endState, chIndex, execResID), nil + return nil, chmodels.NewCFNonMatchingFinalState(flow.StateCommitment(expEndStateComm), endState, chIndex, execResID) + } + + // verify the execution data ID included in the ExecutionResult + // 1. check basic execution data root fields + if chunk.BlockID != chunkDataPack.ExecutionDataRoot.BlockID { + return nil, chmodels.NewCFExecutionDataBlockIDMismatch(chunkDataPack.ExecutionDataRoot.BlockID, chunk.BlockID, chIndex, execResID) + } + + if len(chunkDataPack.ExecutionDataRoot.ChunkExecutionDataIDs) != len(result.Chunks) { + return nil, chmodels.NewCFExecutionDataChunksLengthMismatch(len(chunkDataPack.ExecutionDataRoot.ChunkExecutionDataIDs), len(result.Chunks), chIndex, execResID) + } + + cedCollection := chunkDataPack.Collection + // the system chunk collection is not included in the chunkDataPack, but is included in the + // ChunkExecutionData. Create the collection here using the transaction bodies from the + // transactions list (includes process callback + callback executions + system transaction) + if systemChunk { + systemTxBodies := make([]*flow.TransactionBody, len(transactions)) + for i, tx := range transactions { + systemTxBodies[i] = tx.Transaction + } + + cedCollection, err = flow.NewCollection(flow.UntrustedCollection{ + Transactions: systemTxBodies, + }) + + if err != nil { + return nil, fmt.Errorf("could not construct system collection: %w", err) + } + } + + // 2. build our chunk's chunk execution data using the locally calculated values, and calculate + // its CID + chunkExecutionData := execution_data.ChunkExecutionData{ + Collection: cedCollection, + Events: events, + TrieUpdate: trieUpdate, + TransactionResults: txResults, + } + + cidProvider := provider.NewExecutionDataCIDProvider(execution_data.DefaultSerializer) + cedCID, err := cidProvider.CalculateChunkExecutionDataID(chunkExecutionData) + if err != nil { + return nil, fmt.Errorf("failed to calculate CID of ChunkExecutionData: %w", err) + } + + // 3. check that with the chunk execution results that we created locally, + // we can reproduce the ChunkExecutionData's ID, which the execution node is stating in its ChunkDataPack + if cedCID != chunkDataPack.ExecutionDataRoot.ChunkExecutionDataIDs[chIndex] { + return nil, chmodels.NewCFExecutionDataInvalidChunkCID( + chunkDataPack.ExecutionDataRoot.ChunkExecutionDataIDs[chIndex], + cedCID, + chIndex, + execResID, + ) + } + + // 4. check the execution data root ID by calculating it using the provided execution data root + executionDataID, err := cidProvider.CalculateExecutionDataRootID(chunkDataPack.ExecutionDataRoot) + if err != nil { + return nil, fmt.Errorf("failed to calculate ID of ExecutionDataRoot: %w", err) + } + if executionDataID != result.ExecutionDataID { + return nil, chmodels.NewCFInvalidExecutionDataID(result.ExecutionDataID, executionDataID, chIndex, execResID) } - return chunkExecutionSnapshot.SpockSecret, nil, nil + + return chunkExecutionSnapshot.SpockSecret, nil +} + +// createSystemChunk recreates the system chunk transactions and executes the +// process callback transaction if scheduled callbacks are enabled. +// +// Returns system transaction list, which contains the system chunk transaction +// and if the scheduled callbacks are enabled it also includes process / execute +// callback transactions, and if callbacks enabled it returns the result of the +// process callback transaction. No errors are expected during normal operation. +// +// If scheduled callbacks are dissabled it will only contain the system transaction. +// If scheduled callbacks are enabled we need to do the following actions: +// 1. add and execute the process callback transaction that returns events for execute callbacks +// 2. add one transaction for each callback event +// 3. add the system transaction as last transaction +func (fcv *ChunkVerifier) createSystemChunk( + callbackCtx fvm.Context, + snapshotTree *snapshot.SnapshotTree, + chunkState *fvmState.ExecutionState, + transactionOffset uint32, + events *flow.EventsList, + serviceEvents *flow.ServiceEventList, + unknownRegTouch map[flow.RegisterID]struct{}, + execResID flow.Identifier, + chIndex uint64, +) ([]*fvm.TransactionProcedure, *flow.LightTransactionResult, error) { + txIndex := transactionOffset + + // If scheduled callbacks are dissabled we only have the system transaction in the chunk + if !fcv.vmCtx.ScheduleCallbacksEnabled { + txBody, err := blueprints.SystemChunkTransaction(fcv.vmCtx.Chain) + if err != nil { + return nil, nil, fmt.Errorf("could not get system chunk transaction: %w", err) + } + + // Need to return a placeholder result that will be filled by the caller + // when the transaction is actually executed + return []*fvm.TransactionProcedure{ + fvm.Transaction(txBody, txIndex), + }, nil, nil + } + + processBody, err := blueprints.ProcessCallbacksTransaction(fcv.vmCtx.Chain) + if err != nil { + return nil, nil, fmt.Errorf("could not get process callback transaction: %w", err) + } + processTx := fvm.Transaction(processBody, txIndex) + + // Execute process callback transaction + executionSnapshot, processOutput, err := fcv.vm.Run(callbackCtx, processTx, *snapshotTree) + if err != nil { + return nil, nil, fmt.Errorf("failed to execute process callback transaction: %w", err) + } + if processOutput.Err != nil { + return nil, nil, fmt.Errorf("process callback transaction failed: %w", processOutput.Err) + } + + processResult := &flow.LightTransactionResult{ + TransactionID: processTx.ID, + ComputationUsed: processOutput.ComputationUsed, + Failed: false, + } + + if len(unknownRegTouch) > 0 { + var missingRegs []string + for id := range unknownRegTouch { + missingRegs = append(missingRegs, id.String()) + } + return nil, nil, chmodels.NewCFMissingRegisterTouch(missingRegs, chIndex, execResID, processTx.ID) + } + + // Generate callback execution transactions from the events + callbackTxs, err := blueprints.ExecuteCallbacksTransactions(fcv.vmCtx.Chain, processOutput.Events) + if err != nil { + return nil, nil, fmt.Errorf("failed to generate callback execution transactions: %w", err) + } + + // Build the final transaction list: [processCallback, ...callbackExecutions, systemTx] + transactions := make([]*fvm.TransactionProcedure, 0, len(callbackTxs)+2) + transactions = append(transactions, processTx) + + // Add callback execution transactions + for _, c := range callbackTxs { + txIndex++ + transactions = append(transactions, fvm.Transaction(c, txIndex)) + } + + // Add the system transaction as last transaction in collection + systemTx, err := blueprints.SystemChunkTransaction(fcv.vmCtx.Chain) + if err != nil { + return nil, nil, fmt.Errorf("could not get system chunk transaction: %w", err) + } + + txIndex++ + transactions = append(transactions, fvm.Transaction(systemTx, txIndex)) + + // Add events with pointers to reflect the change to the caller + *events = append(*events, processOutput.Events...) + *serviceEvents = append(*serviceEvents, processOutput.ConvertedServiceEvents...) + + *snapshotTree = snapshotTree.Append(executionSnapshot) + err = chunkState.Merge(executionSnapshot) + if err != nil { + return nil, nil, fmt.Errorf("failed to merge process callback: %w", err) + } + + return transactions, processResult, nil } diff --git a/module/chunks/chunkVerifier_test.go b/module/chunks/chunkVerifier_test.go index a794d66c184..ca6190f5467 100644 --- a/module/chunks/chunkVerifier_test.go +++ b/module/chunks/chunkVerifier_test.go @@ -2,20 +2,25 @@ package chunks_test import ( "fmt" - "math/rand" "testing" - "time" + "github.com/ipfs/go-cid" + "github.com/onflow/cadence" + "github.com/onflow/cadence/encoding/ccf" "github.com/onflow/cadence/runtime" "github.com/rs/zerolog" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" executionState "github.com/onflow/flow-go/engine/execution/state" "github.com/onflow/flow-go/fvm" + "github.com/onflow/flow-go/fvm/blueprints" fvmErrors "github.com/onflow/flow-go/fvm/errors" + fvmmock "github.com/onflow/flow-go/fvm/mock" "github.com/onflow/flow-go/fvm/storage/snapshot" + "github.com/onflow/flow-go/fvm/systemcontracts" "github.com/onflow/flow-go/ledger" completeLedger "github.com/onflow/flow-go/ledger/complete" "github.com/onflow/flow-go/ledger/complete/wal/fixtures" @@ -24,10 +29,14 @@ import ( "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/model/verification" "github.com/onflow/flow-go/module/chunks" + "github.com/onflow/flow-go/module/executiondatasync/execution_data" + "github.com/onflow/flow-go/module/executiondatasync/provider" "github.com/onflow/flow-go/module/metrics" + "github.com/onflow/flow-go/state/protocol" "github.com/onflow/flow-go/utils/unittest" ) +// eventsList is the set of events emitted by each transaction, by default var eventsList = flow.EventsList{ { Type: "event.someType", @@ -45,41 +54,107 @@ var eventsList = flow.EventsList{ }, } +const computationUsed = uint64(100) + +var id0 = flow.NewRegisterID(unittest.RandomAddressFixture(), "") +var id5 = flow.NewRegisterID(unittest.RandomAddressFixture(), "") + // the chain we use for this test suite var testChain = flow.Emulator var epochSetupEvent, _ = unittest.EpochSetupFixtureByChainID(testChain) var epochCommitEvent, _ = unittest.EpochCommitFixtureByChainID(testChain) -var epochSetupServiceEvent, _ = convert.ServiceEvent(testChain, epochSetupEvent) -var epochCommitServiceEvent, _ = convert.ServiceEvent(testChain, epochCommitEvent) - -var serviceEventsList = []flow.ServiceEvent{ - *epochSetupServiceEvent, +// serviceEventsList is the list of service events emitted by default. +var serviceEventsList = []flow.Event{ + epochSetupEvent, } +var executionDataCIDProvider = provider.NewExecutionDataCIDProvider(execution_data.DefaultSerializer) + +var serviceTxBody *flow.TransactionBody +var processTxBody *flow.TransactionBody + type ChunkVerifierTestSuite struct { suite.Suite - verifier *chunks.ChunkVerifier - systemOkVerifier *chunks.ChunkVerifier - systemBadVerifier *chunks.ChunkVerifier + + verifier *chunks.ChunkVerifier + ledger *completeLedger.Ledger + + // Below, snapshots and outputs map transaction scripts to execution artifacts + // Test cases can inject a script when constructing a chunk, then associate + // it with the desired execution artifacts by adding entries to these maps. + // If no entry exists, then the default snapshot/output is used. + snapshots map[string]*snapshot.ExecutionSnapshot + outputs map[string]fvm.ProcedureOutput } // Make sure variables are set properly // SetupTest is executed prior to each individual test in this test suite func (s *ChunkVerifierTestSuite) SetupSuite() { - // seed the RNG - rand.Seed(time.Now().UnixNano()) + vmCtx := fvm.NewContext( + fvm.WithChain(testChain.Chain()), + fvm.WithScheduleCallbacksEnabled(true), + ) + vmMock := fvmmock.NewVM(s.T()) + + vmMock. + On("Run", + mock.AnythingOfType("fvm.Context"), + mock.AnythingOfType("*fvm.TransactionProcedure"), + mock.AnythingOfType("snapshot.SnapshotTree")). + Return( + func(ctx fvm.Context, proc fvm.Procedure, storage snapshot.StorageSnapshot) *snapshot.ExecutionSnapshot { + tx, ok := proc.(*fvm.TransactionProcedure) + if !ok { + s.Fail("unexpected procedure type") + return nil + } + + if snapshot, ok := s.snapshots[string(tx.Transaction.Script)]; ok { + return snapshot + } + return generateDefaultSnapshot() + }, + func(ctx fvm.Context, proc fvm.Procedure, storage snapshot.StorageSnapshot) fvm.ProcedureOutput { + tx, ok := proc.(*fvm.TransactionProcedure) + if !ok { + s.Fail("unexpected procedure type") + return fvm.ProcedureOutput{} + } + + if output, ok := s.outputs[string(tx.Transaction.Script)]; ok { + return output + } + return generateDefaultOutput() + }, + func(ctx fvm.Context, proc fvm.Procedure, storage snapshot.StorageSnapshot) error { + return nil + }, + ). + Maybe() // don't require for all tests since some never call FVM + + s.verifier = chunks.NewChunkVerifier(vmMock, vmCtx, zerolog.Nop()) + + txBody, err := blueprints.SystemChunkTransaction(testChain.Chain()) + require.NoError(s.T(), err) + serviceTxBody = txBody + + processTxBody, err = blueprints.ProcessCallbacksTransaction(testChain.Chain()) + require.NoError(s.T(), err) +} - vm := new(vmMock) - systemOkVm := new(vmSystemOkMock) - systemBadVm := new(vmSystemBadMock) - vmCtx := fvm.NewContext(fvm.WithChain(testChain.Chain())) +func (s *ChunkVerifierTestSuite) SetupTest() { + s.ledger = newLedger(s.T()) - // system chunk runs predefined system transaction, hence we can't distinguish - // based on its content and we need separate VMs - s.verifier = chunks.NewChunkVerifier(vm, vmCtx, zerolog.Nop()) - s.systemOkVerifier = chunks.NewChunkVerifier(systemOkVm, vmCtx, zerolog.Nop()) - s.systemBadVerifier = chunks.NewChunkVerifier(systemBadVm, vmCtx, zerolog.Nop()) + s.snapshots = make(map[string]*snapshot.ExecutionSnapshot) + s.outputs = make(map[string]fvm.ProcedureOutput) + + // Add default snapshot for process callback transaction with no events + // subject to overwrite by test cases + s.snapshots[string(processTxBody.Script)] = &snapshot.ExecutionSnapshot{} + s.outputs[string(processTxBody.Script)] = fvm.ProcedureOutput{ + ComputationUsed: computationUsed, + } } // TestChunkVerifier invokes all the tests in this test suite @@ -89,11 +164,11 @@ func TestChunkVerifier(t *testing.T) { // TestHappyPath tests verification of the baseline verifiable chunk func (s *ChunkVerifierTestSuite) TestHappyPath() { - vch := GetBaselineVerifiableChunk(s.T(), "", false) - assert.NotNil(s.T(), vch) - spockSecret, chFaults, err := s.verifier.Verify(vch) - assert.Nil(s.T(), err) - assert.Nil(s.T(), chFaults) + meta := s.GetTestSetup(s.T(), "", false, false) + vch := meta.RefreshChunkData(s.T()) + + spockSecret, err := s.verifier.Verify(vch) + assert.NoError(s.T(), err) assert.NotNil(s.T(), spockSecret) } @@ -101,172 +176,454 @@ func (s *ChunkVerifierTestSuite) TestHappyPath() { func (s *ChunkVerifierTestSuite) TestMissingRegisterTouchForUpdate() { unittest.SkipUnless(s.T(), unittest.TEST_DEPRECATED, "Check new partial ledger for missing keys") - vch := GetBaselineVerifiableChunk(s.T(), "", false) - assert.NotNil(s.T(), vch) + meta := s.GetTestSetup(s.T(), "", false, false) + vch := meta.RefreshChunkData(s.T()) + // remove the second register touch // vch.ChunkDataPack.RegisterTouches = vch.ChunkDataPack.RegisterTouches[:1] - spockSecret, chFaults, err := s.verifier.Verify(vch) - assert.Nil(s.T(), err) - assert.NotNil(s.T(), chFaults) + spockSecret, err := s.verifier.Verify(vch) + assert.True(s.T(), chunksmodels.IsChunkFaultError(err)) + assert.IsType(s.T(), &chunksmodels.CFMissingRegisterTouch{}, err) assert.Nil(s.T(), spockSecret) - _, ok := chFaults.(*chunksmodels.CFMissingRegisterTouch) - assert.True(s.T(), ok) } // TestMissingRegisterTouchForRead tests verification given a chunkdatapack missing a register touch (read) func (s *ChunkVerifierTestSuite) TestMissingRegisterTouchForRead() { unittest.SkipUnless(s.T(), unittest.TEST_DEPRECATED, "Check new partial ledger for missing keys") - vch := GetBaselineVerifiableChunk(s.T(), "", false) - assert.NotNil(s.T(), vch) + meta := s.GetTestSetup(s.T(), "", false, false) + vch := meta.RefreshChunkData(s.T()) + // remove the second register touch // vch.ChunkDataPack.RegisterTouches = vch.ChunkDataPack.RegisterTouches[1:] - spockSecret, chFaults, err := s.verifier.Verify(vch) - assert.Nil(s.T(), err) - assert.NotNil(s.T(), chFaults) + spockSecret, err := s.verifier.Verify(vch) + assert.True(s.T(), chunksmodels.IsChunkFaultError(err)) + assert.IsType(s.T(), &chunksmodels.CFMissingRegisterTouch{}, err) assert.Nil(s.T(), spockSecret) - _, ok := chFaults.(*chunksmodels.CFMissingRegisterTouch) - assert.True(s.T(), ok) } // TestWrongEndState tests verification covering the case // the state commitment computed after updating the partial trie // doesn't match the one provided by the chunks func (s *ChunkVerifierTestSuite) TestWrongEndState() { - vch := GetBaselineVerifiableChunk(s.T(), "wrongEndState", false) - assert.NotNil(s.T(), vch) - spockSecret, chFaults, err := s.verifier.Verify(vch) - assert.Nil(s.T(), err) - assert.NotNil(s.T(), chFaults) + meta := s.GetTestSetup(s.T(), "wrongEndState", false, false) + vch := meta.RefreshChunkData(s.T()) + + // modify calculated end state, which is different from the one provided by the vch + s.snapshots["wrongEndState"] = &snapshot.ExecutionSnapshot{ + WriteSet: map[flow.RegisterID]flow.RegisterValue{ + id0: []byte{'F'}, + }, + } + + spockSecret, err := s.verifier.Verify(vch) + assert.True(s.T(), chunksmodels.IsChunkFaultError(err)) + assert.IsType(s.T(), &chunksmodels.CFNonMatchingFinalState{}, err) assert.Nil(s.T(), spockSecret) - _, ok := chFaults.(*chunksmodels.CFNonMatchingFinalState) - assert.True(s.T(), ok) } // TestFailedTx tests verification behavior in case // of failed transaction. if a transaction fails, it should // still change the state commitment. func (s *ChunkVerifierTestSuite) TestFailedTx() { - vch := GetBaselineVerifiableChunk(s.T(), "failedTx", false) - assert.NotNil(s.T(), vch) - spockSecret, chFaults, err := s.verifier.Verify(vch) - assert.Nil(s.T(), err) - assert.Nil(s.T(), chFaults) + meta := s.GetTestSetup(s.T(), "failedTx", false, false) + vch := meta.RefreshChunkData(s.T()) + + // modify the FVM output to include a failing tx. the input already has a failing tx, but we need to + s.snapshots["failedTx"] = &snapshot.ExecutionSnapshot{ + WriteSet: map[flow.RegisterID]flow.RegisterValue{ + id5: []byte{'B'}, + }, + } + s.outputs["failedTx"] = fvm.ProcedureOutput{ + ComputationUsed: computationUsed, + Err: fvmErrors.NewCadenceRuntimeError(runtime.Error{}), // inside the runtime (e.g. div by zero, access account) + } + + spockSecret, err := s.verifier.Verify(vch) + assert.NoError(s.T(), err) assert.NotNil(s.T(), spockSecret) } // TestEventsMismatch tests verification behavior in case // of emitted events not matching chunks func (s *ChunkVerifierTestSuite) TestEventsMismatch() { - vch := GetBaselineVerifiableChunk(s.T(), "eventsMismatch", false) - assert.NotNil(s.T(), vch) - _, chFault, err := s.verifier.Verify(vch) - assert.Nil(s.T(), err) - assert.NotNil(s.T(), chFault) - assert.IsType(s.T(), &chunksmodels.CFInvalidEventsCollection{}, chFault) + meta := s.GetTestSetup(s.T(), "eventsMismatch", false, false) + vch := meta.RefreshChunkData(s.T()) + + // add an additional event to the list of events produced by FVM + output := generateDefaultOutput() + output.Events = append(eventsList, flow.Event{ + Type: "event.Extra", + TransactionID: flow.Identifier{2, 3}, + TransactionIndex: 0, + EventIndex: 0, + Payload: []byte{88}, + }) + s.outputs["eventsMismatch"] = output + + _, err := s.verifier.Verify(vch) + assert.Error(s.T(), err) + assert.True(s.T(), chunksmodels.IsChunkFaultError(err)) + assert.IsType(s.T(), &chunksmodels.CFInvalidEventsCollection{}, err) } // TestServiceEventsMismatch tests verification behavior in case // of emitted service events not matching chunks' -func (s *ChunkVerifierTestSuite) TestServiceEventsMismatch() { - vch := GetBaselineVerifiableChunk(s.T(), "doesn't matter", true) - assert.NotNil(s.T(), vch) - _, chFault, err := s.systemBadVerifier.Verify(vch) - assert.Nil(s.T(), err) - assert.NotNil(s.T(), chFault) - assert.IsType(s.T(), &chunksmodels.CFInvalidServiceEventsEmitted{}, chFault) +func (s *ChunkVerifierTestSuite) TestServiceEventsMismatch_SystemChunk() { + meta := s.GetTestSetup(s.T(), "doesn't matter", true, true) + vch := meta.RefreshChunkData(s.T()) + + // modify the list of service events produced by FVM + // EpochSetup event is expected, but we emit EpochCommit here resulting in a chunk fault + epochCommitServiceEvent, err := convert.ServiceEvent(testChain, epochCommitEvent) + require.NoError(s.T(), err) + + s.snapshots[string(serviceTxBody.Script)] = &snapshot.ExecutionSnapshot{} + s.outputs[string(serviceTxBody.Script)] = fvm.ProcedureOutput{ + ComputationUsed: computationUsed, + ServiceEvents: unittest.EventsFixture(1), + ConvertedServiceEvents: flow.ServiceEventList{*epochCommitServiceEvent}, + Events: meta.ChunkEvents, + } + + processTxBody, err := blueprints.ProcessCallbacksTransaction(testChain.Chain()) + require.NoError(s.T(), err) + + s.snapshots[string(processTxBody.Script)] = &snapshot.ExecutionSnapshot{} + s.outputs[string(processTxBody.Script)] = fvm.ProcedureOutput{ + ComputationUsed: computationUsed, + } + + _, err = s.verifier.Verify(vch) + assert.Error(s.T(), err) + assert.True(s.T(), chunksmodels.IsChunkFaultError(err)) + assert.IsType(s.T(), &chunksmodels.CFInvalidServiceEventsEmitted{}, err) } // TestServiceEventsAreChecked ensures that service events are in fact checked -func (s *ChunkVerifierTestSuite) TestServiceEventsAreChecked() { - vch := GetBaselineVerifiableChunk(s.T(), "doesn't matter", true) - assert.NotNil(s.T(), vch) - _, chFault, err := s.systemOkVerifier.Verify(vch) - assert.Nil(s.T(), err) - assert.Nil(s.T(), chFault) +func (s *ChunkVerifierTestSuite) TestServiceEventsAreChecked_SystemChunk() { + meta := s.GetTestSetup(s.T(), "doesn't matter", true, true) + vch := meta.RefreshChunkData(s.T()) + + // setup the verifier output to include the correct data for the service events + output := generateDefaultOutput() + output.ConvertedServiceEvents = meta.ServiceEvents + output.Events = meta.ChunkEvents + s.outputs[string(serviceTxBody.Script)] = output + + _, err := s.verifier.Verify(vch) + assert.NoError(s.T(), err) +} + +// Tests the case where a service event is emitted outside the system chunk +// and the event computed by the VN does not match the Result. +// NOTE: this test case relies on the ordering of transactions in generateCollection. +func (s *ChunkVerifierTestSuite) TestServiceEventsMismatch_NonSystemChunk() { + script := "service event mismatch in non-system chunk" + meta := s.GetTestSetup(s.T(), script, false, true) + vch := meta.RefreshChunkData(s.T()) + + // modify the list of service events produced by FVM + // EpochSetup event is expected, but we emit EpochCommit here resulting in a chunk fault + epochCommitServiceEvent, err := convert.ServiceEvent(testChain, epochCommitEvent) + require.NoError(s.T(), err) + + s.snapshots[script] = &snapshot.ExecutionSnapshot{} + // overwrite the expected output for our custom transaction, passing + // in the non-matching EpochCommit event (should cause validation failure) + s.outputs[script] = fvm.ProcedureOutput{ + ComputationUsed: computationUsed, + ConvertedServiceEvents: flow.ServiceEventList{*epochCommitServiceEvent}, + Events: meta.ChunkEvents[:3], // 2 default event + EpochSetup + } + + _, err = s.verifier.Verify(vch) + + assert.Error(s.T(), err) + assert.True(s.T(), chunksmodels.IsChunkFaultError(err)) + assert.IsType(s.T(), &chunksmodels.CFInvalidServiceEventsEmitted{}, err) +} + +// Tests that service events are checked, when they appear outside the system chunk. +// NOTE: this test case relies on the ordering of transactions in generateCollection. +func (s *ChunkVerifierTestSuite) TestServiceEventsAreChecked_NonSystemChunk() { + script := "service event in non-system chunk" + meta := s.GetTestSetup(s.T(), script, false, true) + vch := meta.RefreshChunkData(s.T()) + + // setup the verifier output to include the correct data for the service events + output := generateDefaultOutput() + output.ConvertedServiceEvents = meta.ServiceEvents + output.Events = meta.ChunkEvents[:3] // 2 default events + 1 service event + s.outputs[script] = output + + spockSecret, err := s.verifier.Verify(vch) + assert.NoError(s.T(), err) + assert.NotNil(s.T(), spockSecret) +} + +// TestSystemChunkWithCollectionFails ensures verification fails for system chunks with collections +func (s *ChunkVerifierTestSuite) TestSystemChunkWithCollectionFails() { + meta := s.GetTestSetup(s.T(), "doesn't matter", true, true) + + // add a collection to the system chunk + col := unittest.CollectionFixture(1) + meta.Collection = &col + + vch := meta.RefreshChunkData(s.T()) + + _, err := s.verifier.Verify(vch) + assert.Error(s.T(), err) + assert.True(s.T(), chunksmodels.IsChunkFaultError(err)) + assert.IsType(s.T(), &chunksmodels.CFSystemChunkIncludedCollection{}, err) } // TestEmptyCollection tests verification behaviour if a // collection doesn't have any transaction. func (s *ChunkVerifierTestSuite) TestEmptyCollection() { - vch := GetBaselineVerifiableChunk(s.T(), "", false) - assert.NotNil(s.T(), vch) - col := unittest.CollectionFixture(0) - vch.ChunkDataPack.Collection = &col - vch.EndState = vch.ChunkDataPack.StartState - emptyListHash, err := flow.EventsMerkleRootHash(flow.EventsList{}) + meta := s.GetTestSetup(s.T(), "", false, false) + + // reset test to use an empty collection + collection := unittest.CollectionFixture(0) + meta.Collection = &collection + meta.ChunkEvents = nil + meta.TxResults = nil + + // update the Update to not change the state + update, err := ledger.NewEmptyUpdate(meta.StartState) + require.NoError(s.T(), err) + + meta.Update = update + + vch := meta.RefreshChunkData(s.T()) + + spockSecret, err := s.verifier.Verify(vch) assert.NoError(s.T(), err) - vch.Chunk.EventCollection = emptyListHash // empty collection emits no events - spockSecret, chFaults, err := s.verifier.Verify(vch) - assert.Nil(s.T(), err) - assert.Nil(s.T(), chFaults) assert.NotNil(s.T(), spockSecret) } -// GetBaselineVerifiableChunk returns a verifiable chunk and sets the script -// of a transaction in the middle of the collection to some value to signal the -// mocked vm on what to return as tx exec outcome. -func GetBaselineVerifiableChunk(t *testing.T, script string, system bool) *verification.VerifiableChunkData { +func (s *ChunkVerifierTestSuite) TestExecutionDataBlockMismatch() { + meta := s.GetTestSetup(s.T(), "", false, false) - // Collection setup + // modify Block in the ExecutionDataRoot + meta.ExecDataBlockID = unittest.IdentifierFixture() - collectionSize := 5 - magicTxIndex := 3 - coll := unittest.CollectionFixture(collectionSize) - coll.Transactions[magicTxIndex] = &flow.TransactionBody{Script: []byte(script)} + vch := meta.RefreshChunkData(s.T()) - guarantee := coll.Guarantee() + _, err := s.verifier.Verify(vch) + assert.Error(s.T(), err) + assert.True(s.T(), chunksmodels.IsChunkFaultError(err)) + assert.IsType(s.T(), &chunksmodels.CFExecutionDataBlockIDMismatch{}, err) +} - // Block setup - payload := flow.Payload{ - Guarantees: []*flow.CollectionGuarantee{&guarantee}, - } - header := unittest.BlockHeaderFixture() - header.PayloadHash = payload.Hash() - block := flow.Block{ - Header: header, - Payload: &payload, - } - blockID := block.ID() +func (s *ChunkVerifierTestSuite) TestExecutionDataChunkIdsLengthDiffers() { + meta := s.GetTestSetup(s.T(), "", false, false) + vch := meta.RefreshChunkData(s.T()) - // registerTouch and State setup - id1 := flow.NewRegisterID("00", "") - value1 := []byte{'a'} + // add an additional ChunkExecutionDataID into the ExecutionDataRoot passed into Verify + vch.ChunkDataPack.ExecutionDataRoot.ChunkExecutionDataIDs = append(vch.ChunkDataPack.ExecutionDataRoot.ChunkExecutionDataIDs, cid.Undef) - id2Bytes := make([]byte, 32) - id2Bytes[0] = byte(5) - id2 := flow.NewRegisterID("05", "") - value2 := []byte{'b'} - UpdatedValue2 := []byte{'B'} + _, err := s.verifier.Verify(vch) + assert.Error(s.T(), err) + assert.True(s.T(), chunksmodels.IsChunkFaultError(err)) + assert.IsType(s.T(), &chunksmodels.CFExecutionDataChunksLengthMismatch{}, err) +} - entries := flow.RegisterEntries{ - { - Key: id1, - Value: value1, +func (s *ChunkVerifierTestSuite) TestExecutionDataChunkIdMismatch() { + meta := s.GetTestSetup(s.T(), "", false, false) + vch := meta.RefreshChunkData(s.T()) + + // modify one of the ChunkExecutionDataIDs passed into Verify + vch.ChunkDataPack.ExecutionDataRoot.ChunkExecutionDataIDs[0] = cid.Undef // substitute invalid CID + + _, err := s.verifier.Verify(vch) + assert.Error(s.T(), err) + assert.True(s.T(), chunksmodels.IsChunkFaultError(err)) + assert.IsType(s.T(), &chunksmodels.CFExecutionDataInvalidChunkCID{}, err) +} + +func (s *ChunkVerifierTestSuite) TestExecutionDataIdMismatch() { + meta := s.GetTestSetup(s.T(), "", false, false) + vch := meta.RefreshChunkData(s.T()) + + // modify ExecutionDataID passed into Verify + vch.Result.ExecutionDataID[5]++ + + _, err := s.verifier.Verify(vch) + assert.Error(s.T(), err) + assert.True(s.T(), chunksmodels.IsChunkFaultError(err)) + assert.IsType(s.T(), &chunksmodels.CFInvalidExecutionDataID{}, err) +} + +func (s *ChunkVerifierTestSuite) TestSystemChunkWithScheduledCallbackReturningEvent() { + systemContracts := systemcontracts.SystemContractsForChain(testChain) + + // create the event returned for processed callback + processedEventName := fmt.Sprintf( + "A.%s.FlowTransactionScheduler.PendingExecution", + systemContracts.FlowCallbackScheduler.Address, + ) + callbackEventPayload, err := ccf.Encode(cadence.NewEvent( + []cadence.Value{ + cadence.NewUInt64(1), // id + cadence.NewUInt64(computationUsed), // executionEffort }, - { - Key: id2, - Value: value2, + ).WithType(cadence.NewEventType( + nil, + processedEventName, + []cadence.Field{ + {Identifier: "id", Type: cadence.UInt64Type}, + {Identifier: "executionEffort", Type: cadence.UInt64Type}, }, + nil, + ))) + require.NoError(s.T(), err) + + processEvent := flow.Event{ + Type: flow.EventType(processedEventName), + TransactionID: processTxBody.ID(), + TransactionIndex: 0, + EventIndex: 0, + Payload: callbackEventPayload, + } + + // Setup mock outputs for process callback transaction + s.outputs[string(processTxBody.Script)] = fvm.ProcedureOutput{ + ComputationUsed: computationUsed, + Events: flow.EventsList{processEvent}, + } + + // Create execute callback transaction body + executeCallbackTxs, err := blueprints.ExecuteCallbacksTransactions(testChain.Chain(), flow.EventsList{processEvent}) + require.NoError(s.T(), err) + require.Len(s.T(), executeCallbackTxs, 1) + + executeCallbackTx := executeCallbackTxs[0] + + // Setup mock output for execute callback transaction + s.outputs[string(executeCallbackTx.Script)] = fvm.ProcedureOutput{ + ComputationUsed: computationUsed, + } + + // Setup system transaction output + epochSetupServiceEvent, err := convert.ServiceEvent(testChain, epochSetupEvent) + require.NoError(s.T(), err) + + s.outputs[string(serviceTxBody.Script)] = fvm.ProcedureOutput{ + ComputationUsed: computationUsed, + ConvertedServiceEvents: flow.ServiceEventList{*epochSetupServiceEvent}, + Events: flow.EventsList{epochSetupEvent}, + } + + // Create custom test metadata + meta := s.GetTestSetup(s.T(), "", true, true) + + // Override the collection to include callback execution transaction + meta.customCollection = &flow.Collection{ + Transactions: []*flow.TransactionBody{processTxBody, executeCallbackTx, serviceTxBody}, + } + + // Update expected events to include all events + meta.ChunkEvents = flow.EventsList{} + meta.ChunkEvents = append(meta.ChunkEvents, processEvent) // Process callback event + meta.ChunkEvents = append(meta.ChunkEvents, epochSetupEvent) // System tx event + + // Update transaction results to match the 3 transactions + meta.TxResults = []flow.LightTransactionResult{ + {TransactionID: processTxBody.ID(), ComputationUsed: computationUsed, Failed: false}, + {TransactionID: executeCallbackTx.ID(), ComputationUsed: computationUsed, Failed: false}, + {TransactionID: serviceTxBody.ID(), ComputationUsed: computationUsed, Failed: false}, + } + + // Update service events + meta.ServiceEvents = []flow.ServiceEvent{*epochSetupServiceEvent} + + vch := meta.RefreshChunkData(s.T()) + + _, err = s.verifier.Verify(vch) + assert.NoError(s.T(), err) +} + +// TestSystemChunkWithNoScheduledCallbacks tests verification of system chunks +// when scheduled callbacks are enabled but no callbacks are actually scheduled +func (s *ChunkVerifierTestSuite) TestSystemChunkWithNoScheduledCallbacks() { + // Setup mock outputs for process callback transaction with no events + s.outputs[string(processTxBody.Script)] = fvm.ProcedureOutput{ + ComputationUsed: computationUsed, + Events: flow.EventsList{}, // No callback events + } + + // Setup system transaction output + epochSetupServiceEvent, err := convert.ServiceEvent(testChain, epochSetupEvent) + require.NoError(s.T(), err) + + s.outputs[string(serviceTxBody.Script)] = fvm.ProcedureOutput{ + ComputationUsed: computationUsed, + ConvertedServiceEvents: flow.ServiceEventList{*epochSetupServiceEvent}, + Events: flow.EventsList{epochSetupEvent}, } - var verifiableChunkData verification.VerifiableChunkData + // Create test metadata - no custom collection needed since no callbacks + meta := s.GetTestSetup(s.T(), "", true, true) + + // Expected events: only system tx service event (no callback events) + meta.ChunkEvents = flow.EventsList{epochSetupEvent} + + // Update transaction results for 2 transactions (process callback + system) + meta.TxResults = []flow.LightTransactionResult{ + {TransactionID: processTxBody.ID(), ComputationUsed: computationUsed, Failed: false}, + {TransactionID: serviceTxBody.ID(), ComputationUsed: computationUsed, Failed: false}, + } - metricsCollector := &metrics.NoopCollector{} + // Update service events + meta.ServiceEvents = []flow.ServiceEvent{*epochSetupServiceEvent} + + vch := meta.RefreshChunkData(s.T()) + + _, err = s.verifier.Verify(vch) + assert.NoError(s.T(), err) +} - f, _ := completeLedger.NewLedger(&fixtures.NoopWAL{}, 1000, metricsCollector, zerolog.Nop(), completeLedger.DefaultPathFinderVersion) +func newLedger(t *testing.T) *completeLedger.Ledger { + f, err := completeLedger.NewLedger(&fixtures.NoopWAL{}, 1000, metrics.NewNoopCollector(), zerolog.Nop(), completeLedger.DefaultPathFinderVersion) + require.NoError(t, err) compactor := fixtures.NewNoopCompactor(f) <-compactor.Ready() - defer func() { + t.Cleanup(func() { <-f.Done() <-compactor.Done() - }() + }) + + return f +} + +func blockFixture(collection *flow.Collection) *flow.Block { + guarantee := &flow.CollectionGuarantee{CollectionID: collection.ID()} + block := unittest.BlockFixture( + unittest.Block.WithPayload( + flow.Payload{Guarantees: []*flow.CollectionGuarantee{guarantee}}, + ), + ) + return block +} + +func generateStateUpdates(t *testing.T, f *completeLedger.Ledger) (ledger.State, ledger.Proof, *ledger.Update) { + entries := flow.RegisterEntries{ + { + Key: id0, + Value: []byte{'a'}, + }, + { + Key: id5, + Value: []byte{'b'}, + }, + } keys, values := executionState.RegisterEntriesToKeysValues(entries) update, err := ledger.NewUpdate(f.InitialState(), keys, values) - require.NoError(t, err) startState, _, err := f.Set(update) @@ -280,8 +637,8 @@ func GetBaselineVerifiableChunk(t *testing.T, script string, system bool) *verif entries = flow.RegisterEntries{ { - Key: id2, - Value: UpdatedValue2, + Key: id5, + Value: []byte{'B'}, }, } @@ -289,219 +646,259 @@ func GetBaselineVerifiableChunk(t *testing.T, script string, system bool) *verif update, err = ledger.NewUpdate(startState, keys, values) require.NoError(t, err) - endState, _, err := f.Set(update) - require.NoError(t, err) - - // events - chunkEvents := make(flow.EventsList, 0) + return startState, proof, update +} - erServiceEvents := make([]flow.ServiceEvent, 0) +func generateExecutionData(t *testing.T, blockID flow.Identifier, ced *execution_data.ChunkExecutionData) (flow.Identifier, flow.BlockExecutionDataRoot) { + chunkCid, err := executionDataCIDProvider.CalculateChunkExecutionDataID(*ced) + require.NoError(t, err) - if system { - chunkEvents = flow.EventsList{} - erServiceEvents = serviceEventsList - } else { - for i := 0; i < collectionSize; i++ { - if i == magicTxIndex { - switch script { - case "failedTx": - continue - } - } - chunkEvents = append(chunkEvents, eventsList...) - } + executionDataRoot := flow.BlockExecutionDataRoot{ + BlockID: blockID, + ChunkExecutionDataIDs: []cid.Cid{chunkCid}, } - EventsMerkleRootHash, err := flow.EventsMerkleRootHash(chunkEvents) + executionDataID, err := executionDataCIDProvider.CalculateExecutionDataRootID(executionDataRoot) require.NoError(t, err) - // Chunk setup - chunk := flow.Chunk{ - ChunkBody: flow.ChunkBody{ - CollectionIndex: 0, - StartState: flow.StateCommitment(startState), - BlockID: blockID, - EventCollection: EventsMerkleRootHash, - }, - Index: 0, - } + return executionDataID, executionDataRoot +} - chunkDataPack := flow.ChunkDataPack{ - ChunkID: chunk.ID(), - StartState: flow.StateCommitment(startState), - Proof: proof, - Collection: &coll, - } +func generateEvents(t *testing.T, collection *flow.Collection, includeServiceEvent bool) (flow.EventsList, []flow.ServiceEvent) { + var chunkEvents flow.EventsList + serviceEvents := make([]flow.ServiceEvent, 0) - // ExecutionResult setup - result := flow.ExecutionResult{ - BlockID: blockID, - Chunks: flow.ChunkList{&chunk}, - ServiceEvents: erServiceEvents, - } + // service events are also included as regular events + if includeServiceEvent { + for _, e := range serviceEventsList { + e := e + event, err := convert.ServiceEvent(testChain, e) + require.NoError(t, err) - verifiableChunkData = verification.VerifiableChunkData{ - IsSystemChunk: system, - Chunk: &chunk, - Header: header, - Result: &result, - ChunkDataPack: &chunkDataPack, - EndState: flow.StateCommitment(endState), + serviceEvents = append(serviceEvents, *event) + chunkEvents = append(chunkEvents, e) + } } - return &verifiableChunkData -} - -type vmMock struct{} - -func (vm *vmMock) Run( - ctx fvm.Context, - proc fvm.Procedure, - storage snapshot.StorageSnapshot, -) ( - *snapshot.ExecutionSnapshot, - fvm.ProcedureOutput, - error, -) { - tx, ok := proc.(*fvm.TransactionProcedure) - if !ok { - return nil, fvm.ProcedureOutput{}, fmt.Errorf( - "invokable is not a transaction") + for _, coll := range collection.Transactions { + switch string(coll.Script) { + case "failedTx": + continue + } + chunkEvents = append(chunkEvents, eventsList...) } - snapshot := &snapshot.ExecutionSnapshot{} - output := fvm.ProcedureOutput{} - - id0 := flow.NewRegisterID("00", "") - id5 := flow.NewRegisterID("05", "") + return chunkEvents, serviceEvents +} - switch string(tx.Transaction.Script) { - case "wrongEndState": - snapshot.WriteSet = map[flow.RegisterID]flow.RegisterValue{ - id0: []byte{'F'}, - } - output.Logs = []string{"log1", "log2"} - output.Events = eventsList - case "failedTx": - snapshot.WriteSet = map[flow.RegisterID]flow.RegisterValue{ - id5: []byte{'B'}, - } - output.Err = fvmErrors.NewCadenceRuntimeError(runtime.Error{}) // inside the runtime (e.g. div by zero, access account) - case "eventsMismatch": - output.Events = append(eventsList, flow.Event{ - Type: "event.Extra", - TransactionID: flow.Identifier{2, 3}, - TransactionIndex: 0, - EventIndex: 0, - Payload: []byte{88}, - }) - default: - snapshot.ReadSet = map[flow.RegisterID]struct{}{ - id0: struct{}{}, - id5: struct{}{}, +func generateTransactionResults(t *testing.T, collection *flow.Collection) []flow.LightTransactionResult { + txResults := make([]flow.LightTransactionResult, len(collection.Transactions)) + for i, tx := range collection.Transactions { + txResults[i] = flow.LightTransactionResult{ + TransactionID: tx.ID(), + ComputationUsed: computationUsed, + Failed: false, } - snapshot.WriteSet = map[flow.RegisterID]flow.RegisterValue{ - id5: []byte{'B'}, + + if string(tx.Script) == "failedTx" { + txResults[i].Failed = true } - output.Logs = []string{"log1", "log2"} - output.Events = eventsList } - return snapshot, output, nil + return txResults } -func (vmMock) GetAccount( - _ fvm.Context, - _ flow.Address, - _ snapshot.StorageSnapshot, -) ( - *flow.Account, - error) { - panic("not expected") -} +// generateCollection generates a collection fixture that is predictable based on inputs. +// Test cases in this file rely on the predictable pattern of collections generated here. +// If this is a system chunk, we return a collection containing only the service transaction. +// Otherwise, we return a collection with 5 transactions. Only the first of these 5 uses the input script. +// The transaction script is the lookup key for determining the result of transaction execution, +// so test cases can inject a desired transaction output associated with the input script. +func generateCollection(t *testing.T, isSystemChunk bool, script string) *flow.Collection { + if isSystemChunk { + // the system chunk's data pack does not include the collection, but the execution data does. + // we must include the correct collection in the execution data, otherwise verification will fail. + return &flow.Collection{ + Transactions: []*flow.TransactionBody{processTxBody, serviceTxBody}, + } + } -type vmSystemOkMock struct{} + collectionSize := 5 + // add the user-specified transaction first + userSpecifiedTxIndex := 0 -func (vm *vmSystemOkMock) Run( - ctx fvm.Context, - proc fvm.Procedure, - storage snapshot.StorageSnapshot, -) ( - *snapshot.ExecutionSnapshot, - fvm.ProcedureOutput, - error, -) { - _, ok := proc.(*fvm.TransactionProcedure) - if !ok { - return nil, fvm.ProcedureOutput{}, fmt.Errorf( - "invokable is not a transaction") + coll := unittest.CollectionFixture(collectionSize) + if script != "" { + coll.Transactions[userSpecifiedTxIndex] = &flow.TransactionBody{Script: []byte(script)} } - id0 := flow.NewRegisterID("00", "") - id5 := flow.NewRegisterID("05", "") + return &coll +} - // add "default" interaction expected in tests - snapshot := &snapshot.ExecutionSnapshot{ +func generateDefaultSnapshot() *snapshot.ExecutionSnapshot { + return &snapshot.ExecutionSnapshot{ ReadSet: map[flow.RegisterID]struct{}{ - id0: struct{}{}, - id5: struct{}{}, + id0: {}, + id5: {}, }, WriteSet: map[flow.RegisterID]flow.RegisterValue{ id5: []byte{'B'}, }, } - output := fvm.ProcedureOutput{ - ConvertedServiceEvents: flow.ServiceEventList{*epochSetupServiceEvent}, - Logs: []string{"log1", "log2"}, +} + +func generateDefaultOutput() fvm.ProcedureOutput { + return fvm.ProcedureOutput{ + ComputationUsed: computationUsed, + Logs: []string{"log1", "log2"}, + Events: eventsList, } +} - return snapshot, output, nil +func (s *ChunkVerifierTestSuite) GetTestSetup(t *testing.T, script string, system bool, includeServiceEvents bool) *testMetadata { + collection := generateCollection(t, system, script) + block := blockFixture(collection) + + // transaction results + txResults := generateTransactionResults(t, collection) + // make sure this includes results even for the service tx + if system { + require.Len(t, txResults, 2) // todo should be dynamic + } else { + require.Len(t, txResults, len(collection.Transactions)) + } + + // events + chunkEvents, serviceEvents := generateEvents(t, collection, includeServiceEvents) + // make sure this includes events even for the service tx + require.NotEmpty(t, chunkEvents) + + // registerTouch and State setup + startState, proof, update := generateStateUpdates(t, s.ledger) + + if system { + collection = nil + } + + meta := &testMetadata{ + IsSystemChunk: system, + Header: block.ToHeader(), + Collection: collection, + TxResults: txResults, + ChunkEvents: chunkEvents, + ServiceEvents: serviceEvents, + StartState: startState, + Update: update, + Proof: proof, + + ExecDataBlockID: block.ID(), + + ledger: s.ledger, + } + + return meta } -func (vmSystemOkMock) GetAccount( - _ fvm.Context, - _ flow.Address, - _ snapshot.StorageSnapshot, -) ( - *flow.Account, - error, -) { - panic("not expected") +type testMetadata struct { + IsSystemChunk bool + Header *flow.Header + Collection *flow.Collection + TxResults []flow.LightTransactionResult + ChunkEvents flow.EventsList + ServiceEvents []flow.ServiceEvent + StartState ledger.State + Update *ledger.Update + Proof ledger.Proof + + // separated to allow overriding + ExecDataBlockID flow.Identifier + + // custom collection for system chunks with callbacks + customCollection *flow.Collection + + ledger *completeLedger.Ledger } -type vmSystemBadMock struct{} +func (m *testMetadata) RefreshChunkData(t *testing.T) *verification.VerifiableChunkData { + cedCollection := m.Collection + + if m.IsSystemChunk { + // the system chunk's data pack does not include the collection, but the execution data does. + // we must include the correct collection in the execution data, otherwise verification will fail. + if m.customCollection != nil { + cedCollection = m.customCollection + } else { + cedCollection = &flow.Collection{ + Transactions: []*flow.TransactionBody{processTxBody, serviceTxBody}, + } + } + } + + endState, trieUpdate, err := m.ledger.Set(m.Update) + require.NoError(t, err) + + eventsMerkleRootHash, err := flow.EventsMerkleRootHash(m.ChunkEvents) + require.NoError(t, err) -func (vm *vmSystemBadMock) Run( - ctx fvm.Context, - proc fvm.Procedure, - storage snapshot.StorageSnapshot, -) ( - *snapshot.ExecutionSnapshot, - fvm.ProcedureOutput, - error, -) { - _, ok := proc.(*fvm.TransactionProcedure) - if !ok { - return nil, fvm.ProcedureOutput{}, fmt.Errorf( - "invokable is not a transaction") + chunkExecutionData := &execution_data.ChunkExecutionData{ + Collection: cedCollection, + Events: m.ChunkEvents, + TrieUpdate: trieUpdate, + TransactionResults: m.TxResults, } - // EpochSetup event is expected, but we emit EpochCommit here resulting in - // a chunk fault - output := fvm.ProcedureOutput{ - ConvertedServiceEvents: flow.ServiceEventList{*epochCommitServiceEvent}, + executionDataID, executionDataRoot := generateExecutionData(t, m.ExecDataBlockID, chunkExecutionData) + + // Chunk setup + chunk := &flow.Chunk{ + ChunkBody: flow.ChunkBody{ + CollectionIndex: 0, + StartState: flow.StateCommitment(m.StartState), + BlockID: m.Header.ID(), + // in these test cases, all defined service events correspond to the current chunk + ServiceEventCount: uint16(len(m.ServiceEvents)), + EventCollection: eventsMerkleRootHash, + }, + Index: 0, + } + + chunkDataPack := &flow.ChunkDataPack{ + ChunkID: chunk.ID(), + StartState: flow.StateCommitment(m.StartState), + Proof: m.Proof, + Collection: m.Collection, + ExecutionDataRoot: executionDataRoot, + } + + // ExecutionResult setup + result := &flow.ExecutionResult{ + BlockID: m.Header.ID(), + Chunks: flow.ChunkList{chunk}, + ServiceEvents: m.ServiceEvents, + ExecutionDataID: executionDataID, } - return &snapshot.ExecutionSnapshot{}, output, nil + return &verification.VerifiableChunkData{ + IsSystemChunk: m.IsSystemChunk, + Header: m.Header, + Chunk: chunk, + Result: result, + ChunkDataPack: chunkDataPack, + EndState: flow.StateCommitment(endState), + Snapshot: mockSnapshotSubset{}, + } +} + +type mockSnapshotSubset struct{} + +func (m mockSnapshotSubset) RandomSource() ([]byte, error) { + //TODO implement me + panic("implement me") } -func (vmSystemBadMock) GetAccount( - _ fvm.Context, - _ flow.Address, - _ snapshot.StorageSnapshot, -) ( - *flow.Account, - error, -) { - panic("not expected") +func (m mockSnapshotSubset) VersionBeacon() (*flow.SealedVersionBeacon, error) { + //TODO implement me + panic("implement me") } + +func (m mockSnapshotSubset) ProtocolState() (protocol.KVStoreReader, error) { panic("implement me") } diff --git a/module/chunks/chunk_assigner.go b/module/chunks/chunk_assigner.go index 7ac2247c997..87cdd0a84c2 100644 --- a/module/chunks/chunk_assigner.go +++ b/module/chunks/chunk_assigner.go @@ -3,8 +3,9 @@ package chunks import ( "fmt" - "github.com/onflow/flow-go/crypto/hash" - "github.com/onflow/flow-go/crypto/random" + "github.com/onflow/crypto/hash" + "github.com/onflow/crypto/random" + chunkmodels "github.com/onflow/flow-go/model/chunks" "github.com/onflow/flow-go/model/encoding/json" "github.com/onflow/flow-go/model/flow" @@ -12,7 +13,7 @@ import ( "github.com/onflow/flow-go/module/mempool" "github.com/onflow/flow-go/module/mempool/stdmap" "github.com/onflow/flow-go/state/protocol" - "github.com/onflow/flow-go/state/protocol/seed" + "github.com/onflow/flow-go/state/protocol/prg" ) // ChunkAssigner implements an instance of the Public Chunk Assignment @@ -30,13 +31,9 @@ type ChunkAssigner struct { // be assigned to each chunk. func NewChunkAssigner(alpha uint, protocolState protocol.State) (*ChunkAssigner, error) { // TODO to have limit of assignment mempool as a parameter (2703) - assignment, err := stdmap.NewAssignments(1000) - if err != nil { - return nil, fmt.Errorf("could not create an assignment mempool: %w", err) - } return &ChunkAssigner{ alpha: int(alpha), - assignments: assignment, + assignments: stdmap.NewAssignments(1000), protocolState: protocolState, }, nil } @@ -60,15 +57,18 @@ func (p *ChunkAssigner) Assign(result *flow.ExecutionResult, blockID flow.Identi // checks cache against this assignment assignmentFingerprint := flow.HashToID(hash) - a, exists := p.assignments.ByID(assignmentFingerprint) + a, exists := p.assignments.Get(assignmentFingerprint) if exists { return a, nil } // Get a list of verifiers at block that is being sealed - verifiers, err := p.protocolState.AtBlockID(result.BlockID).Identities(filter.And(filter.HasRole(flow.RoleVerification), - filter.HasWeight(true), - filter.Not(filter.Ejected))) + verifiers, err := p.protocolState.AtBlockID(result.BlockID).Identities( + filter.And( + filter.HasInitialWeight[flow.Identity](true), + filter.HasRole[flow.Identity](flow.RoleVerification), + filter.IsValidCurrentEpochParticipant, + )) if err != nil { return nil, fmt.Errorf("could not get verifiers: %w", err) } @@ -98,7 +98,7 @@ func (p *ChunkAssigner) rngByBlockID(stateSnapshot protocol.Snapshot) (random.Ra return nil, fmt.Errorf("failed to retrieve source of randomness: %w", err) } - rng, err := seed.PRGFromRandomSource(randomSource, seed.ProtocolVerificationChunkAssignment) + rng, err := prg.New(randomSource, prg.VerificationChunkAssignment, nil) if err != nil { return nil, fmt.Errorf("failed to instantiate random number generator: %w", err) } @@ -114,7 +114,7 @@ func chunkAssignment(ids flow.IdentifierList, chunks flow.ChunkList, rng random. } // creates an assignment - assignment := chunkmodels.NewAssignment() + assignmentBuilder := chunkmodels.NewAssignmentBuilder() // permutes the entire slice err := rng.Shuffle(len(ids), ids.Swap) @@ -155,9 +155,12 @@ func chunkAssignment(ids flow.IdentifierList, chunks flow.ChunkList, rng random. if !ok { return nil, fmt.Errorf("chunk out of range requested: %v", i) } - assignment.Add(chunk, assignees) + err := assignmentBuilder.Add(chunk.Index, assignees) + if err != nil { + return nil, fmt.Errorf("adding chunk %d failed: %w", i, err) + } } - return assignment, nil + return assignmentBuilder.Build(), nil } func fingerPrint(blockID flow.Identifier, resultID flow.Identifier, alpha int) (hash.Hash, error) { diff --git a/module/chunks/chunk_assigner_test.go b/module/chunks/chunk_assigner_test.go index 1c65c91d817..80bf01a5a5d 100644 --- a/module/chunks/chunk_assigner_test.go +++ b/module/chunks/chunk_assigner_test.go @@ -1,7 +1,7 @@ package chunks import ( - "math/rand" + "crypto/rand" "testing" "github.com/stretchr/testify/mock" @@ -10,8 +10,8 @@ import ( chmodels "github.com/onflow/flow-go/model/chunks" "github.com/onflow/flow-go/model/flow" - protocolMock "github.com/onflow/flow-go/state/protocol/mock" - "github.com/onflow/flow-go/state/protocol/seed" + protocol "github.com/onflow/flow-go/state/protocol/mock" + "github.com/onflow/flow-go/state/protocol/prg" "github.com/onflow/flow-go/utils/unittest" ) @@ -21,7 +21,7 @@ type PublicAssignmentTestSuite struct { } // Setup test with n verification nodes -func (a *PublicAssignmentTestSuite) SetupTest(n int) (*flow.Header, *protocolMock.Snapshot, *protocolMock.State) { +func (a *PublicAssignmentTestSuite) SetupTest(n int) (*flow.Header, *protocol.Snapshot, *protocol.State) { nodes := make([]flow.Role, 0) for i := 1; i < n; i++ { nodes = append(nodes, flow.RoleVerification) @@ -30,7 +30,7 @@ func (a *PublicAssignmentTestSuite) SetupTest(n int) (*flow.Header, *protocolMoc // setup protocol state block, snapshot, state, _ := unittest.FinalizedProtocolStateWithParticipants(participants) - head := block.Header + head := block.ToHeader() return head, snapshot, state } @@ -46,62 +46,47 @@ func (a *PublicAssignmentTestSuite) TestByNodeID() { // creates ids and twice chunks of the ids ids := unittest.IdentityListFixture(size) chunks := a.CreateChunks(2*size, a.T()) - assignment := chmodels.NewAssignment() + assignmentBuilder := chmodels.NewAssignmentBuilder() - // assigns two chunks to each verifier node - // j keeps track of chunks - j := 0 - for i := 0; i < size; i++ { - c, ok := chunks.ByIndex(uint64(j)) - require.True(a.T(), ok, "chunk out of range requested") - assignment.Add(c, append(assignment.Verifiers(c), ids[i].NodeID)) - j++ - c, ok = chunks.ByIndex(uint64(j)) - require.True(a.T(), ok, "chunk out of range requested") - assignment.Add(c, append(assignment.Verifiers(c), ids[i].NodeID)) + // assign each chunk to exactly one verifier, in round-robin order + // Since there are 2x as many chunks as verifiers, each verifier will have 2 chunks + for j, chunk := range chunks { + v := ids[j%size].NodeID + require.NoError(a.T(), assignmentBuilder.Add(chunk.Index, flow.IdentifierList{v})) } + assignment := assignmentBuilder.Build() // evaluating the chunk assignment // each verifier should have two certain chunks based on the assignment - // j keeps track of chunks - j = 0 for i := 0; i < size; i++ { assignedChunks := assignment.ByNodeID(ids[i].NodeID) require.Len(a.T(), assignedChunks, 2) - c, ok := chunks.ByIndex(uint64(j)) + c, ok := chunks.ByIndex(uint64(i)) require.True(a.T(), ok, "chunk out of range requested") require.Contains(a.T(), assignedChunks, c.Index) - j++ - c, ok = chunks.ByIndex(uint64(j)) + c2, ok := chunks.ByIndex(uint64(i + size)) require.True(a.T(), ok, "chunk out of range requested") - require.Contains(a.T(), assignedChunks, c.Index) + require.Contains(a.T(), assignedChunks, c2.Index) } } -// TestAssignDuplicate tests assign Add duplicate verifiers +// TestAssignDuplicate tests that duplicate verifiers for a chunk are not allowed +// since it would weaken the protocol's security func (a *PublicAssignmentTestSuite) TestAssignDuplicate() { size := 5 - // creates ids and twice chunks of the ids - var ids flow.IdentityList = unittest.IdentityListFixture(size) - chunks := a.CreateChunks(2, a.T()) - assignment := chmodels.NewAssignment() + // creates verifier ids + var ids = unittest.IdentityListFixture(size) + assignmentBuilder := chmodels.NewAssignmentBuilder() // assigns first chunk to non-duplicate list of verifiers - c, ok := chunks.ByIndex(uint64(0)) - require.True(a.T(), ok, "chunk out of range requested") - assignment.Add(c, ids.NodeIDs()) - require.Len(a.T(), assignment.Verifiers(c), size) + require.NoError(a.T(), assignmentBuilder.Add(0, ids.NodeIDs())) // duplicates first verifier, hence size increases by 1 ids = append(ids, ids[0]) require.Len(a.T(), ids, size+1) // assigns second chunk to a duplicate list of verifiers - c, ok = chunks.ByIndex(uint64(1)) - require.True(a.T(), ok, "chunk out of range requested") - assignment.Add(c, ids.NodeIDs()) - // should be size not size + 1 - require.Len(a.T(), assignment.Verifiers(c), size) + require.Error(a.T(), assignmentBuilder.Add(1, ids.NodeIDs())) } // TestPermuteEntirely tests permuting an entire IdentityList against @@ -280,7 +265,9 @@ func (a *PublicAssignmentTestSuite) ChunkAssignmentScenario(chunkNum, verNum, al for _, chunk := range result.Chunks { // each chunk should be assigned to alpha verifiers - require.Equal(a.T(), p1.Verifiers(chunk).Len(), alpha) + verifiers, err := p1.Verifiers(chunk.Index) + require.NoError(a.T(), err) + require.Equal(a.T(), len(verifiers), alpha) } } @@ -361,7 +348,7 @@ func (a *PublicAssignmentTestSuite) CreateResult(head *flow.Header, num int, t * } func (a *PublicAssignmentTestSuite) GetSeed(t *testing.T) []byte { - seed := make([]byte, seed.RandomSourceLength) + seed := make([]byte, prg.RandomSourceLength) _, err := rand.Read(seed) require.NoError(t, err) return seed diff --git a/module/compliance/config.go b/module/compliance/config.go index 7409b0acd4a..97707cfaac8 100644 --- a/module/compliance/config.go +++ b/module/compliance/config.go @@ -18,6 +18,15 @@ func DefaultConfig() Config { } } +// GetSkipNewProposalsThreshold returns stored value in config possibly applying a lower bound. +func (c *Config) GetSkipNewProposalsThreshold() uint64 { + if c.SkipNewProposalsThreshold < MinSkipNewProposalsThreshold { + return MinSkipNewProposalsThreshold + } + + return c.SkipNewProposalsThreshold +} + type Opt func(*Config) // WithSkipNewProposalsThreshold returns an option to set the skip new proposals diff --git a/module/component/component.go b/module/component/component.go index 34f8f61cf14..11b543f239f 100644 --- a/module/component/component.go +++ b/module/component/component.go @@ -19,9 +19,25 @@ var ErrComponentShutdown = fmt.Errorf("component has already shut down") // channels that close when startup and shutdown have completed. // Once Start has been called, the channel returned by Done must close eventually, // whether that be because of a graceful shutdown or an irrecoverable error. +// See also ComponentManager below. type Component interface { module.Startable - module.ReadyDoneAware + // Ready returns a ready channel that is closed once startup has completed. + // Unlike the previous [module.ReadyDoneAware] interface, Ready does not start the component, + // but only exposes information about whether the component has completed startup. + // To start the component, instead use the Start() method. + // Note that the ready channel may never close if errors are encountered during startup, + // or if shutdown has already commenced before startup is complete. + // This should be an idempotent method. + Ready() <-chan struct{} + + // Done returns a done channel that is closed once shutdown has completed. + // Unlike the previous [module.ReadyDoneAware] interface, Done does not shut down the component, + // but only exposes information about whether the component has shut down yet. + // To shutdown the component, instead cancel the context that was passed to Start(). + // Implementations must close the done channel even if errors are encountered during shutdown. + // This should be an idempotent method. + Done() <-chan struct{} } type ComponentFactory func() (Component, error) diff --git a/module/component/component_manager_test.go b/module/component/component_manager_test.go index fc99ca92af3..1431abdd4c0 100644 --- a/module/component/component_manager_test.go +++ b/module/component/component_manager_test.go @@ -345,7 +345,7 @@ func StartStateTransition() (func(t func()), func(*rapid.T)) { executeTransitions := func(t *rapid.T) { for i := 0; i < len(transitions); i++ { - j := rapid.IntRange(0, len(transitions)-i-1).Draw(t, "").(int) + j := rapid.IntRange(0, len(transitions)-i-1).Draw(t, "") transitions[i], transitions[j+i] = transitions[j+i], transitions[i] transitions[i]() } @@ -390,35 +390,34 @@ type ComponentManagerMachine struct { assertErrorThrownMatches func(t *rapid.T, err error, msgAndArgs ...interface{}) assertErrorNotThrown func(t *rapid.T) - cancelGenerator *rapid.Generator + cancelGenerator *rapid.Generator[bool] drawStateTransition func(t *rapid.T) *StateTransition } -func (c *ComponentManagerMachine) Init(t *rapid.T) { - numWorkers := rapid.IntRange(0, 5).Draw(t, "num_workers").(int) - pCancel := rapid.Float64Range(0, 100).Draw(t, "p_cancel").(float64) +func (c *ComponentManagerMachine) init(t *rapid.T) { + numWorkers := rapid.IntRange(0, 5).Draw(t, "num_workers") + pCancel := rapid.Float64Range(0, 100).Draw(t, "p_cancel") - c.cancelGenerator = rapid.Float64Range(0, 100). - Map(func(n float64) bool { - return pCancel == 100 || n < pCancel - }) + c.cancelGenerator = rapid.Map(rapid.Float64Range(0, 100), func(n float64) bool { + return pCancel == 100 || n < pCancel + }) c.drawStateTransition = func(t *rapid.T) *StateTransition { st := &StateTransition{} if !c.canceled { - st.cancel = c.cancelGenerator.Draw(t, "cancel").(bool) + st.cancel = c.cancelGenerator.Draw(t, "cancel") } for workerId, state := range c.workerStates { if allowedTransitions, ok := WorkerStateTransitions[state]; ok { label := fmt.Sprintf("worker_transition_%v", workerId) st.workerIDs = append(st.workerIDs, workerId) - st.workerTransitions = append(st.workerTransitions, rapid.SampledFrom(allowedTransitions).Draw(t, label).(WorkerStateTransition)) + st.workerTransitions = append(st.workerTransitions, rapid.SampledFrom(allowedTransitions).Draw(t, label)) } } - return rapid.Just(st).Draw(t, "state_transition").(*StateTransition) + return rapid.Just(st).Draw(t, "state_transition") } ctx, cancel := context.WithCancel(context.Background()) @@ -625,7 +624,11 @@ func (c *ComponentManagerMachine) Check(t *rapid.T) { func TestComponentManager(t *testing.T) { unittest.SkipUnless(t, unittest.TEST_LONG_RUNNING, "skip because this test takes too long") - rapid.Check(t, rapid.Run(&ComponentManagerMachine{})) + rapid.Check(t, func(t *rapid.T) { + sm := new(ComponentManagerMachine) + sm.init(t) + t.Repeat(rapid.StateMachineActions(sm)) + }) } func TestComponentManagerShutdown(t *testing.T) { @@ -636,7 +639,7 @@ func TestComponentManagerShutdown(t *testing.T) { }).Build() parent, cancel := context.WithCancel(context.Background()) - ctx, _ := irrecoverable.WithSignaler(parent) + ctx := irrecoverable.NewMockSignalerContext(t, parent) mgr.Start(ctx) unittest.AssertClosesBefore(t, mgr.Ready(), 10*time.Millisecond) diff --git a/module/component/mock/component.go b/module/component/mock/component.go index f93cc95799d..55ba98226d6 100644 --- a/module/component/mock/component.go +++ b/module/component/mock/component.go @@ -1,6 +1,6 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. -package component +package mock import ( irrecoverable "github.com/onflow/flow-go/module/irrecoverable" @@ -12,10 +12,14 @@ type Component struct { mock.Mock } -// Done provides a mock function with given fields: +// Done provides a mock function with no fields func (_m *Component) Done() <-chan struct{} { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Done") + } + var r0 <-chan struct{} if rf, ok := ret.Get(0).(func() <-chan struct{}); ok { r0 = rf() @@ -28,10 +32,14 @@ func (_m *Component) Done() <-chan struct{} { return r0 } -// Ready provides a mock function with given fields: +// Ready provides a mock function with no fields func (_m *Component) Ready() <-chan struct{} { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Ready") + } + var r0 <-chan struct{} if rf, ok := ret.Get(0).(func() <-chan struct{}); ok { r0 = rf() @@ -49,13 +57,12 @@ func (_m *Component) Start(_a0 irrecoverable.SignalerContext) { _m.Called(_a0) } -type mockConstructorTestingTNewComponent interface { +// NewComponent creates a new instance of Component. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewComponent(t interface { mock.TestingT Cleanup(func()) -} - -// NewComponent creates a new instance of Component. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewComponent(t mockConstructorTestingTNewComponent) *Component { +}) *Component { mock := &Component{} mock.Mock.Test(t) diff --git a/module/component/mock/component_factory.go b/module/component/mock/component_factory.go deleted file mode 100644 index 2bba231ddb1..00000000000 --- a/module/component/mock/component_factory.go +++ /dev/null @@ -1,54 +0,0 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. - -package component - -import ( - component "github.com/onflow/flow-go/module/component" - mock "github.com/stretchr/testify/mock" -) - -// ComponentFactory is an autogenerated mock type for the ComponentFactory type -type ComponentFactory struct { - mock.Mock -} - -// Execute provides a mock function with given fields: -func (_m *ComponentFactory) Execute() (component.Component, error) { - ret := _m.Called() - - var r0 component.Component - var r1 error - if rf, ok := ret.Get(0).(func() (component.Component, error)); ok { - return rf() - } - if rf, ok := ret.Get(0).(func() component.Component); ok { - r0 = rf() - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(component.Component) - } - } - - if rf, ok := ret.Get(1).(func() error); ok { - r1 = rf() - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -type mockConstructorTestingTNewComponentFactory interface { - mock.TestingT - Cleanup(func()) -} - -// NewComponentFactory creates a new instance of ComponentFactory. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewComponentFactory(t mockConstructorTestingTNewComponentFactory) *ComponentFactory { - mock := &ComponentFactory{} - mock.Mock.Test(t) - - t.Cleanup(func() { mock.AssertExpectations(t) }) - - return mock -} diff --git a/module/component/mock/component_manager_builder.go b/module/component/mock/component_manager_builder.go index c414ddc6663..1d0e0f14615 100644 --- a/module/component/mock/component_manager_builder.go +++ b/module/component/mock/component_manager_builder.go @@ -1,6 +1,6 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. -package component +package mock import ( component "github.com/onflow/flow-go/module/component" @@ -16,6 +16,10 @@ type ComponentManagerBuilder struct { func (_m *ComponentManagerBuilder) AddWorker(_a0 component.ComponentWorker) component.ComponentManagerBuilder { ret := _m.Called(_a0) + if len(ret) == 0 { + panic("no return value specified for AddWorker") + } + var r0 component.ComponentManagerBuilder if rf, ok := ret.Get(0).(func(component.ComponentWorker) component.ComponentManagerBuilder); ok { r0 = rf(_a0) @@ -28,10 +32,14 @@ func (_m *ComponentManagerBuilder) AddWorker(_a0 component.ComponentWorker) comp return r0 } -// Build provides a mock function with given fields: +// Build provides a mock function with no fields func (_m *ComponentManagerBuilder) Build() *component.ComponentManager { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Build") + } + var r0 *component.ComponentManager if rf, ok := ret.Get(0).(func() *component.ComponentManager); ok { r0 = rf() @@ -44,13 +52,12 @@ func (_m *ComponentManagerBuilder) Build() *component.ComponentManager { return r0 } -type mockConstructorTestingTNewComponentManagerBuilder interface { +// NewComponentManagerBuilder creates a new instance of ComponentManagerBuilder. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewComponentManagerBuilder(t interface { mock.TestingT Cleanup(func()) -} - -// NewComponentManagerBuilder creates a new instance of ComponentManagerBuilder. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewComponentManagerBuilder(t mockConstructorTestingTNewComponentManagerBuilder) *ComponentManagerBuilder { +}) *ComponentManagerBuilder { mock := &ComponentManagerBuilder{} mock.Mock.Test(t) diff --git a/module/component/mock/component_worker.go b/module/component/mock/component_worker.go deleted file mode 100644 index acdf93a3908..00000000000 --- a/module/component/mock/component_worker.go +++ /dev/null @@ -1,35 +0,0 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. - -package component - -import ( - component "github.com/onflow/flow-go/module/component" - irrecoverable "github.com/onflow/flow-go/module/irrecoverable" - - mock "github.com/stretchr/testify/mock" -) - -// ComponentWorker is an autogenerated mock type for the ComponentWorker type -type ComponentWorker struct { - mock.Mock -} - -// Execute provides a mock function with given fields: ctx, ready -func (_m *ComponentWorker) Execute(ctx irrecoverable.SignalerContext, ready component.ReadyFunc) { - _m.Called(ctx, ready) -} - -type mockConstructorTestingTNewComponentWorker interface { - mock.TestingT - Cleanup(func()) -} - -// NewComponentWorker creates a new instance of ComponentWorker. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewComponentWorker(t mockConstructorTestingTNewComponentWorker) *ComponentWorker { - mock := &ComponentWorker{} - mock.Mock.Test(t) - - t.Cleanup(func() { mock.AssertExpectations(t) }) - - return mock -} diff --git a/module/component/mock/ready_func.go b/module/component/mock/ready_func.go deleted file mode 100644 index 57e61098bba..00000000000 --- a/module/component/mock/ready_func.go +++ /dev/null @@ -1,30 +0,0 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. - -package component - -import mock "github.com/stretchr/testify/mock" - -// ReadyFunc is an autogenerated mock type for the ReadyFunc type -type ReadyFunc struct { - mock.Mock -} - -// Execute provides a mock function with given fields: -func (_m *ReadyFunc) Execute() { - _m.Called() -} - -type mockConstructorTestingTNewReadyFunc interface { - mock.TestingT - Cleanup(func()) -} - -// NewReadyFunc creates a new instance of ReadyFunc. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewReadyFunc(t mockConstructorTestingTNewReadyFunc) *ReadyFunc { - mock := &ReadyFunc{} - mock.Mock.Test(t) - - t.Cleanup(func() { mock.AssertExpectations(t) }) - - return mock -} diff --git a/module/counters/monotonous_counter.go b/module/counters/monotonous_counter.go new file mode 100644 index 00000000000..a96498d8327 --- /dev/null +++ b/module/counters/monotonous_counter.go @@ -0,0 +1,48 @@ +package counters + +import "sync/atomic" + +// StrictMonotonicCounter is a helper struct which implements a strict monotonic counter. +// StrictMonotonicCounter is implemented using atomic operations and doesn't allow to set a value +// which is lower or equal to the already stored one. The counter is implemented +// solely with non-blocking atomic operations for concurrency safety. +type StrictMonotonicCounter struct { + atomicCounter uint64 +} + +// NewMonotonicCounter creates new counter with initial value +func NewMonotonicCounter(initialValue uint64) StrictMonotonicCounter { + return StrictMonotonicCounter{ + atomicCounter: initialValue, + } +} + +// Set updates value of counter if and only if it's strictly larger than value which is already stored. +// Returns true if update was successful or false if stored value is larger. +func (c *StrictMonotonicCounter) Set(newValue uint64) bool { + for { + oldValue := c.Value() + if newValue <= oldValue { + return false + } + if atomic.CompareAndSwapUint64(&c.atomicCounter, oldValue, newValue) { + return true + } + } +} + +// Value returns value which is stored in atomic variable +func (c *StrictMonotonicCounter) Value() uint64 { + return atomic.LoadUint64(&c.atomicCounter) +} + +// Increment atomically increments counter and returns updated value +func (c *StrictMonotonicCounter) Increment() uint64 { + for { + oldValue := c.Value() + newValue := oldValue + 1 + if atomic.CompareAndSwapUint64(&c.atomicCounter, oldValue, newValue) { + return newValue + } + } +} diff --git a/module/counters/monotonous_counter_test.go b/module/counters/monotonous_counter_test.go new file mode 100644 index 00000000000..cfda762fb34 --- /dev/null +++ b/module/counters/monotonous_counter_test.go @@ -0,0 +1,64 @@ +package counters + +import ( + "testing" + + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/utils/unittest" +) + +func TestSet(t *testing.T) { + counter := NewMonotonicCounter(3) + require.True(t, counter.Set(4)) + require.Equal(t, uint64(4), counter.Value()) + require.False(t, counter.Set(2)) + require.Equal(t, uint64(4), counter.Value()) +} + +func TestIncrement(t *testing.T) { + counter := NewMonotonicCounter(1) + require.Equal(t, uint64(2), counter.Increment()) + require.Equal(t, uint64(3), counter.Increment()) +} + +// TestIncrementConcurrently tests that the MonotonicCounter's Increment method +// works correctly when called concurrently from multiple goroutines +func TestIncrementConcurrently(t *testing.T) { + counter := NewMonotonicCounter(0) + + unittest.Concurrently(100, func(i int) { + counter.Increment() + }) + + require.Equal(t, uint64(100), counter.Value()) +} + +func TestFuzzy(t *testing.T) { + counter := NewMonotonicCounter(3) + require.True(t, counter.Set(4)) + require.False(t, counter.Set(2)) + require.True(t, counter.Set(7)) + require.True(t, counter.Set(9)) + require.True(t, counter.Set(12)) + require.False(t, counter.Set(10)) + require.True(t, counter.Set(18)) + + for i := 20; i < 100; i++ { + require.True(t, counter.Set(uint64(i))) + } + + for i := 20; i < 100; i++ { + require.False(t, counter.Set(uint64(i))) + } +} + +func TestConcurrent(t *testing.T) { + counter := NewMonotonicCounter(3) + + unittest.Concurrently(100, func(i int) { + counter.Set(uint64(i)) + }) + + require.Equal(t, uint64(99), counter.Value()) +} diff --git a/module/counters/persistent_strict_monotonic_counter.go b/module/counters/persistent_strict_monotonic_counter.go new file mode 100644 index 00000000000..95c2e3e8aae --- /dev/null +++ b/module/counters/persistent_strict_monotonic_counter.go @@ -0,0 +1,62 @@ +package counters + +import ( + "errors" + "fmt" + + "github.com/onflow/flow-go/storage" +) + +// ErrIncorrectValue indicates that a processed value is lower or equal than current value. +var ( + ErrIncorrectValue = errors.New("incorrect value") +) + +// PersistentStrictMonotonicCounter represents the consumer progress with strict monotonic counter. +type PersistentStrictMonotonicCounter struct { + consumerProgress storage.ConsumerProgress + + // used to skip heights that are lower than the current height + counter StrictMonotonicCounter +} + +// NewPersistentStrictMonotonicCounter creates a new PersistentStrictMonotonicCounter. +// The consumer progress and associated db entry must not be accessed outside of calls to the returned object, +// otherwise the state may become inconsistent. +// +// No errors are expected during normal operation. +func NewPersistentStrictMonotonicCounter(consumerProgress storage.ConsumerProgress) (*PersistentStrictMonotonicCounter, error) { + m := &PersistentStrictMonotonicCounter{ + consumerProgress: consumerProgress, + } + + // sync with storage for the processed index to ensure the consistency + value, err := m.consumerProgress.ProcessedIndex() + if err != nil { + return nil, fmt.Errorf("failed to get processed index: %w", err) + } + + m.counter = NewMonotonicCounter(value) + + return m, nil +} + +// Set sets the processed index, ensuring it is strictly monotonically increasing. +// +// Expected errors during normal operation: +// - codes.ErrIncorrectValue - if stored value is >= processed (requirement of strict monotonic increase is violated). +// - generic error in case of unexpected failure from the database layer or +// encoding failure. +func (m *PersistentStrictMonotonicCounter) Set(processed uint64) error { + if !m.counter.Set(processed) { + return ErrIncorrectValue + } + return m.consumerProgress.SetProcessedIndex(processed) +} + +// Value loads the current stored index. +// +// No errors are expected during normal operation. +func (m *PersistentStrictMonotonicCounter) Value() uint64 { + return m.counter.Value() +} diff --git a/module/counters/persistent_strict_monotonic_counter_test.go b/module/counters/persistent_strict_monotonic_counter_test.go new file mode 100644 index 00000000000..e0188307577 --- /dev/null +++ b/module/counters/persistent_strict_monotonic_counter_test.go @@ -0,0 +1,53 @@ +package counters_test + +import ( + "testing" + + "github.com/cockroachdb/pebble/v2" + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/module" + "github.com/onflow/flow-go/module/counters" + "github.com/onflow/flow-go/storage/operation/pebbleimpl" + "github.com/onflow/flow-go/storage/store" + "github.com/onflow/flow-go/utils/unittest" +) + +func TestMonotonicConsumer(t *testing.T) { + unittest.RunWithPebbleDB(t, func(pdb *pebble.DB) { + var height1 = uint64(1234) + progress, err := store.NewConsumerProgress(pebbleimpl.ToDB(pdb), module.ConsumeProgressLastFullBlockHeight).Initialize(height1) + require.NoError(t, err) + persistentStrictMonotonicCounter, err := counters.NewPersistentStrictMonotonicCounter(progress) + require.NoError(t, err) + + // check value can be retrieved + actual := persistentStrictMonotonicCounter.Value() + require.Equal(t, height1, actual) + + // try to update value with less than current + var lessHeight = uint64(1233) + err = persistentStrictMonotonicCounter.Set(lessHeight) + require.Error(t, err) + require.ErrorIs(t, err, counters.ErrIncorrectValue) + + // update the value with bigger height + var height2 = uint64(1235) + err = persistentStrictMonotonicCounter.Set(height2) + require.NoError(t, err) + + // check that the new value can be retrieved + actual = persistentStrictMonotonicCounter.Value() + require.Equal(t, height2, actual) + + progress2, err := store.NewConsumerProgress(pebbleimpl.ToDB(pdb), module.ConsumeProgressLastFullBlockHeight).Initialize(height1) + require.NoError(t, err) + // check that new persistent strict monotonic counter has the same value + persistentStrictMonotonicCounter2, err := counters.NewPersistentStrictMonotonicCounter(progress2) + require.NoError(t, err) + + // check that the value still the same + actual = persistentStrictMonotonicCounter2.Value() + require.Equal(t, height2, actual) + }) +} diff --git a/module/dkg.go b/module/dkg.go index 7952fbbdc8d..db23a27ec50 100644 --- a/module/dkg.go +++ b/module/dkg.go @@ -1,7 +1,8 @@ package module import ( - "github.com/onflow/flow-go/crypto" + "github.com/onflow/crypto" + "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/model/messages" ) @@ -31,12 +32,30 @@ type DKGContractClient interface { // messages for that phase. ReadBroadcast(fromIndex uint, referenceBlock flow.Identifier) ([]messages.BroadcastDKGMessage, error) - // SubmitResult submits the final public result of the DKG protocol. This - // represents the group public key and the node's local computation of the - // public keys for each DKG participant. - // - // SubmitResult must be called strictly after the final phase has ended. - SubmitResult(crypto.PublicKey, []crypto.PublicKey) error + // SubmitParametersAndResult posts the DKG setup parameters (`flow.DKGIndexMap`) and the node's locally-computed DKG result to + // the DKG white-board smart contract. The DKG results are the node's local computation of the group public key and the public + // key shares. Serialized public keys are encoded as lower-case hex strings. + // Conceptually the flow.DKGIndexMap is not an output of the DKG protocol. Rather, it is part of the configuration/initialization + // information of the DKG. Before an epoch transition on the happy path (using the data in the EpochSetup event), each consensus + // participant locally fixes the DKG committee 𝒟 including the respective nodes' order to be identical to the consensus + // committee 𝒞. However, in case of a failed epoch transition, we desire the ability to manually provide the result of a successful + // DKG for the immediately next epoch (so-called recovery epoch). The DKG committee 𝒟 must have a sufficiently large overlap with + // the recovery epoch's consensus committee 𝒞 -- though for flexibility, we do *not* want to require that both committees are identical. + // Therefore, we need to explicitly specify the DKG committee 𝒟 on the fallback path. For uniformity of implementation, we do the + // same also on the happy path. + SubmitParametersAndResult(indexMap flow.DKGIndexMap, groupPublicKey crypto.PublicKey, publicKeys []crypto.PublicKey) error + + // SubmitEmptyResult submits an empty result of the DKG protocol. + // The empty result is obtained by a node when it realizes locally that its DKG participation + // was unsuccessful (possible reasons include: node received too many byzantine inputs; + // node has networking issues; locally computed key is invalid…). However, a node obtaining an + // empty result can happen in both cases of the DKG succeeding or failing globally. + // For further details, please see: + // https://flowfoundation.notion.site/Random-Beacon-2d61f3b3ad6e40ee9f29a1a38a93c99c + // Honest nodes would call `SubmitEmptyResult` strictly after the final phase has ended if DKG has ended. + // However, `SubmitEmptyResult` also supports implementing byzantine participants for testing that submit an + // empty result too early (intentional protocol violation), *before* the final DKG phase concluded. + SubmitEmptyResult() error } // DKGController controls the execution of a Joint Feldman DKG instance. @@ -81,5 +100,5 @@ type DKGController interface { type DKGControllerFactory interface { // Create instantiates a new DKGController. - Create(dkgInstanceID string, participants flow.IdentityList, seed []byte) (DKGController, error) + Create(dkgInstanceID string, participants flow.IdentitySkeletonList, seed []byte) (DKGController, error) } diff --git a/module/dkg/broker.go b/module/dkg/broker.go index f94fbc981fe..881c8958548 100644 --- a/module/dkg/broker.go +++ b/module/dkg/broker.go @@ -6,10 +6,10 @@ import ( "sync" "time" + "github.com/onflow/crypto" "github.com/rs/zerolog" "github.com/sethvargo/go-retry" - "github.com/onflow/flow-go/crypto" "github.com/onflow/flow-go/engine" "github.com/onflow/flow-go/model/fingerprint" "github.com/onflow/flow-go/model/flow" @@ -59,7 +59,7 @@ type Broker struct { log zerolog.Logger unit *engine.Unit dkgInstanceID string // unique identifier of the current dkg run (prevent replay attacks) - committee flow.IdentityList // identities of DKG members + committee flow.IdentitySkeletonList // identities of DKG members in canonical order me module.Local // used for signing broadcast messages myIndex int // index of this instance in the committee dkgContractClients []module.DKGContractClient // array of clients to communicate with the DKG smart contract in priority order for fallbacks during retries @@ -81,22 +81,27 @@ var _ module.DKGBroker = (*Broker)(nil) // NewBroker instantiates a new epoch-specific broker capable of communicating // with other nodes via a network engine and dkg smart-contract. +// No errors are expected during normal operations. func NewBroker( log zerolog.Logger, dkgInstanceID string, - committee flow.IdentityList, + committee flow.IdentitySkeletonList, me module.Local, myIndex int, dkgContractClients []module.DKGContractClient, tunnel *BrokerTunnel, opts ...BrokerOpt, -) *Broker { +) (*Broker, error) { config := DefaultBrokerConfig() for _, apply := range opts { apply(&config) } + if !committee.Sorted(flow.Canonical[flow.IdentitySkeleton]) { + return nil, fmt.Errorf("DKG broker expects that participants are sorted in canonical order") + } + b := &Broker{ config: config, log: log.With().Str("component", "dkg_broker").Str("dkg_instance_id", dkgInstanceID).Logger(), @@ -114,7 +119,7 @@ func NewBroker( go b.listen() - return b + return b, nil } /*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -134,8 +139,11 @@ func (b *Broker) PrivateSend(dest int, data []byte) { return } dkgMessageOut := messages.PrivDKGMessageOut{ - DKGMessage: messages.NewDKGMessage(data, b.dkgInstanceID), - DestID: b.committee[dest].NodeID, + DKGMessage: messages.DKGMessage{ + Data: data, + DKGInstanceID: b.dkgInstanceID, + }, + DestID: b.committee[dest].NodeID, } b.tunnel.SendOut(dkgMessageOut) } @@ -204,18 +212,38 @@ func (b *Broker) Broadcast(data []byte) { } // SubmitResult publishes the result of the DKG protocol to the smart contract. +// This function should be passed the beacon keys (group key, participant keys) resulting from the DKG process. +// If the DKG process failed, no beacon keys will exist. In that case, we pass in nil here for both arguments. +// +// If non-nil arguments are provided, we submit a non-empty ResultSubmission to the DKG smart contract, +// indicating that we completed the DKG successfully and essentially "voting for" our result. +// If nil arguments are provided, we submit an empty ResultSubmission to the DKG smart contract, +// indicating that we completed the DKG unsuccessfully. func (b *Broker) SubmitResult(groupKey crypto.PublicKey, pubKeys []crypto.PublicKey) error { - // If the DKG failed locally, we will get a nil key vector here. We need to convert - // the nil slice to a slice of nil keys before submission. + // If the DKG failed locally, we will get a nil group key and nil participant key vector here. + // There are two different transaction templates for submitting either a happy-path and failure-path result. + // We use SubmitResult to submit a successful result and SubmitEmptyResult to communicate that we completed the DKG without a result. // // In general, if pubKeys does not have one key per participant, we cannot submit // a valid result - therefore we submit a nil vector (indicating that we have // completed the process, but we know that we don't have a valid result). - if len(pubKeys) != len(b.committee) { - b.log.Warn().Msgf("submitting dkg result with incomplete key vector (len=%d, expected=%d)", len(pubKeys), len(b.committee)) - // create a key vector with one nil entry for each committee member - pubKeys = make([]crypto.PublicKey, len(b.committee)) + var submitResult func(client module.DKGContractClient) error + if len(pubKeys) == len(b.committee) && groupKey != nil { + indexMap := make(flow.DKGIndexMap, len(pubKeys)) + // build a map of node IDs to indices in the key vector, + // this logic expects that committee is sorted in canonical order! + for i, participant := range b.committee { + indexMap[participant.NodeID] = i + } + submitResult = func(client module.DKGContractClient) error { + return client.SubmitParametersAndResult(indexMap, groupKey, pubKeys) + } + } else { + b.log.Warn().Msgf("submitting empty dkg result because I completed the DKG unsuccessfully") + submitResult = func(client module.DKGContractClient) error { + return client.SubmitEmptyResult() + } } backoff := retry.NewExponential(b.config.RetryInitialWait) @@ -228,10 +256,9 @@ func (b *Broker) SubmitResult(groupKey crypto.PublicKey, pubKeys []crypto.Public b.log.Warn().Msgf("submit result: retrying on attempt (%d) with fallback access node at index (%d)", totalAttempts, clientIndex) } backoff = retrymiddleware.AfterConsecutiveFailures(b.config.RetryMaxConsecutiveFailures, backoff, onMaxConsecutiveRetries) - attempts := 1 err := retry.Do(b.unit.Ctx(), backoff, func(ctx context.Context) error { - err := dkgContractClient.SubmitResult(groupKey, pubKeys) + err := submitResult(dkgContractClient) if err != nil { b.log.Error().Err(err).Msgf("error submitting DKG result, retrying (attempt %d)", attempts) attempts++ @@ -345,7 +372,7 @@ func (b *Broker) Poll(referenceBlock flow.Identifier) error { continue } if !ok { - b.log.Error().Msg("invalid signature on broadcast dkg message") + b.log.Error().Err(err).Msg("invalid signature on broadcast dkg message") continue } b.log.Debug().Msgf("forwarding broadcast message to controller") @@ -442,10 +469,10 @@ func (b *Broker) hasValidDKGInstanceID(msg messages.DKGMessage) error { // prepareBroadcastMessage creates BroadcastDKGMessage with a signature from the // node's staking key. func (b *Broker) prepareBroadcastMessage(data []byte) (messages.BroadcastDKGMessage, error) { - dkgMessage := messages.NewDKGMessage( - data, - b.dkgInstanceID, - ) + dkgMessage := messages.DKGMessage{ + Data: data, + DKGInstanceID: b.dkgInstanceID, + } sigData := fingerprint.Fingerprint(dkgMessage) signature, err := b.me.Sign(sigData[:], NewDKGMessageHasher()) if err != nil { @@ -470,7 +497,7 @@ func (b *Broker) prepareBroadcastMessage(data []byte) (messages.BroadcastDKGMess func (b *Broker) verifyBroadcastMessage(bcastMsg messages.BroadcastDKGMessage) (bool, error) { err := b.hasValidDKGInstanceID(bcastMsg.DKGMessage) if err != nil { - return false, err + return false, fmt.Errorf("invalid dkg instance: %w", err) } origin := b.committee[bcastMsg.CommitteeMemberIndex] signData := fingerprint.Fingerprint(bcastMsg.DKGMessage) diff --git a/module/dkg/broker_test.go b/module/dkg/broker_test.go index 85b744a913d..566fcb5ca23 100644 --- a/module/dkg/broker_test.go +++ b/module/dkg/broker_test.go @@ -27,12 +27,13 @@ var ( dkgInstanceID = "flow-testnet-42" // dkg instance identifier ) -func initCommittee(n int) (identities flow.IdentityList, locals []module.Local) { - privateStakingKeys := unittest.StakingKeys(n) - for i, key := range privateStakingKeys { - id := unittest.IdentityFixture(unittest.WithStakingPubKey(key.PublicKey())) - identities = append(identities, id) - local, _ := local.New(id, privateStakingKeys[i]) +func initCommittee(n int) (identities flow.IdentitySkeletonList, locals []module.Local) { + for _, identity := range unittest.IdentityListFixture(n).ToSkeleton().Sort(flow.Canonical[flow.IdentitySkeleton]) { + stakingPriv := unittest.StakingPrivKeyFixture() + identity.StakingPubKey = stakingPriv.PublicKey() + + identities = append(identities, identity) + local, _ := local.New(*identity, stakingPriv) locals = append(locals, local) } return identities, locals @@ -62,6 +63,23 @@ func TestDefaultConfig(t *testing.T) { }) } +// TestNewBroker_OrderNotCanonical checks that broker creation fails if the identities aren't sorted in canonical order. +func TestNewBroker_OrderNotCanonical(t *testing.T) { + identities, locals := initCommittee(2) + identities[0], identities[1] = identities[1], identities[0] // break canonical order + + _, err := NewBroker( + zerolog.Logger{}, + dkgInstanceID, + identities, + locals[orig], + orig, + []module.DKGContractClient{&mock.DKGContractClient{}}, + NewBrokerTunnel(), + ) + require.Error(t, err) +} + // TestPrivateSend_Valid checks that the broker correctly converts the message // destination parameter (index in committee list) to the corresponding // public Identifier, and successfully sends a DKG message to the intended @@ -70,7 +88,7 @@ func TestPrivateSend_Valid(t *testing.T) { committee, locals := initCommittee(2) // sender broker - sender := NewBroker( + sender, err := NewBroker( zerolog.Logger{}, dkgInstanceID, committee, @@ -79,13 +97,14 @@ func TestPrivateSend_Valid(t *testing.T) { []module.DKGContractClient{&mock.DKGContractClient{}}, NewBrokerTunnel(), ) + require.NoError(t, err) // expected DKGMessageOut expectedMsg := msg.PrivDKGMessageOut{ - DKGMessage: msg.NewDKGMessage( - msgb, - dkgInstanceID, - ), + DKGMessage: msg.DKGMessage{ + Data: msgb, + DKGInstanceID: dkgInstanceID, + }, DestID: committee[dest].NodeID, } @@ -111,7 +130,7 @@ func TestPrivateSend_IndexOutOfRange(t *testing.T) { committee, locals := initCommittee(2) // sender broker - sender := NewBroker( + sender, err := NewBroker( zerolog.Logger{}, dkgInstanceID, committee, @@ -120,6 +139,7 @@ func TestPrivateSend_IndexOutOfRange(t *testing.T) { []module.DKGContractClient{&mock.DKGContractClient{}}, NewBrokerTunnel(), ) + require.NoError(t, err) // Launch a background routine to capture messages sent through the tunnel. // No messages should be received because we are only sending invalid ones. @@ -145,7 +165,7 @@ func TestReceivePrivateMessage_Valid(t *testing.T) { committee, locals := initCommittee(2) // receiving broker - receiver := NewBroker( + receiver, err := NewBroker( zerolog.Logger{}, dkgInstanceID, committee, @@ -154,8 +174,9 @@ func TestReceivePrivateMessage_Valid(t *testing.T) { []module.DKGContractClient{&mock.DKGContractClient{}}, NewBrokerTunnel(), ) + require.NoError(t, err) - dkgMessage := msg.NewDKGMessage(msgb, dkgInstanceID) + dkgMessage := msg.DKGMessage{Data: msgb, DKGInstanceID: dkgInstanceID} expectedMsg := msg.PrivDKGMessageIn{ OriginID: committee[0].NodeID, DKGMessage: dkgMessage, @@ -192,7 +213,7 @@ func TestBroadcastMessage(t *testing.T) { committee, locals := initCommittee(2) // sender - sender := NewBroker( + sender, err := NewBroker( unittest.Logger(), dkgInstanceID, committee, @@ -202,6 +223,7 @@ func TestBroadcastMessage(t *testing.T) { NewBrokerTunnel(), func(config *BrokerConfig) { config.RetryInitialWait = 1 }, // disable waiting between retries for tests ) + require.NoError(t, err) expectedMsg, err := sender.prepareBroadcastMessage(msgb) require.NoError(t, err) @@ -236,7 +258,7 @@ func TestBroadcastMessage(t *testing.T) { func TestPoll(t *testing.T) { committee, locals := initCommittee(2) - sender := NewBroker( + sender, err := NewBroker( zerolog.Logger{}, dkgInstanceID, committee, @@ -245,8 +267,9 @@ func TestPoll(t *testing.T) { []module.DKGContractClient{&mock.DKGContractClient{}}, NewBrokerTunnel(), ) + require.NoError(t, err) - recipient := NewBroker( + recipient, err := NewBroker( zerolog.Logger{}, dkgInstanceID, committee, @@ -255,6 +278,7 @@ func TestPoll(t *testing.T) { []module.DKGContractClient{&mock.DKGContractClient{}}, NewBrokerTunnel(), ) + require.NoError(t, err) blockID := unittest.IdentifierFixture() bcastMsgs := []msg.BroadcastDKGMessage{} @@ -286,7 +310,7 @@ func TestPoll(t *testing.T) { } }() - err := sender.Poll(blockID) + err = sender.Poll(blockID) require.NoError(t, err) // check that the contract has been correctly called @@ -315,7 +339,7 @@ func TestLogHook(t *testing.T) { logger := zerolog.New(os.Stdout).Level(zerolog.WarnLevel).Hook(hook) // sender - sender := NewBroker( + sender, err := NewBroker( logger, dkgInstanceID, committee, @@ -324,6 +348,7 @@ func TestLogHook(t *testing.T) { []module.DKGContractClient{&mock.DKGContractClient{}}, NewBrokerTunnel(), ) + require.NoError(t, err) sender.Disqualify(1, "testing") sender.FlagMisbehavior(1, "test") @@ -336,7 +361,7 @@ func TestProcessPrivateMessage_InvalidOrigin(t *testing.T) { committee, locals := initCommittee(2) // receiving broker - receiver := NewBroker( + receiver, err := NewBroker( zerolog.Logger{}, dkgInstanceID, committee, @@ -345,6 +370,7 @@ func TestProcessPrivateMessage_InvalidOrigin(t *testing.T) { []module.DKGContractClient{&mock.DKGContractClient{}}, NewBrokerTunnel(), ) + require.NoError(t, err) // Launch a background routine to capture messages forwared to the private // message channel. No messages should be received because we are only @@ -358,10 +384,10 @@ func TestProcessPrivateMessage_InvalidOrigin(t *testing.T) { } }() - dkgMsg := msg.NewDKGMessage( - msgb, - dkgInstanceID, - ) + dkgMsg := msg.DKGMessage{ + Data: msgb, + DKGInstanceID: dkgInstanceID, + } // simulate receiving an incoming message with an OriginID of a non-committee member receiver.tunnel.SendIn( msg.PrivDKGMessageIn{ diff --git a/module/dkg/client.go b/module/dkg/client.go index e8401f23736..6ff4df210da 100644 --- a/module/dkg/client.go +++ b/module/dkg/client.go @@ -2,20 +2,22 @@ package dkg import ( "context" + _ "embed" "encoding/json" "fmt" "strconv" "time" + "github.com/rs/zerolog" + "github.com/onflow/cadence" + "github.com/onflow/crypto" "github.com/onflow/flow-core-contracts/lib/go/templates" - "github.com/rs/zerolog" sdk "github.com/onflow/flow-go-sdk" sdkcrypto "github.com/onflow/flow-go-sdk/crypto" - "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/crypto" + "github.com/onflow/flow-go/model/flow" model "github.com/onflow/flow-go/model/messages" "github.com/onflow/flow-go/module" "github.com/onflow/flow-go/module/epochs" @@ -29,6 +31,8 @@ type Client struct { env templates.Environment } +var _ module.DKGContractClient = (*Client)(nil) + // NewClient initializes a new client to the Flow DKG contract func NewClient( log zerolog.Logger, @@ -37,14 +41,14 @@ func NewClient( signer sdkcrypto.Signer, dkgContractAddress, accountAddress string, - accountKeyIndex uint, + accountKeyIndex uint32, ) *Client { log = log.With(). Str("component", "dkg_contract_client"). Str("flow_client_an_id", flowClientANID.String()). Logger() - base := epochs.NewBaseClient(log, flowClient, accountAddress, accountKeyIndex, signer, dkgContractAddress) + base := epochs.NewBaseClient(log, flowClient, accountAddress, accountKeyIndex, signer) env := templates.Environment{DkgAddress: dkgContractAddress} @@ -63,8 +67,14 @@ func (c *Client) ReadBroadcast(fromIndex uint, referenceBlock flow.Identifier) ( // construct read latest broadcast messages transaction template := templates.GenerateGetDKGLatestWhiteBoardMessagesScript(c.env) - value, err := c.FlowClient.ExecuteScriptAtBlockID(ctx, - sdk.Identifier(referenceBlock), template, []cadence.Value{cadence.NewInt(int(fromIndex))}) + value, err := c.FlowClient.ExecuteScriptAtBlockID( + ctx, + sdk.Identifier(referenceBlock), + template, + []cadence.Value{ + cadence.NewInt(int(fromIndex)), + }, + ) if err != nil { return nil, fmt.Errorf("could not execute read broadcast script: %w", err) } @@ -73,7 +83,9 @@ func (c *Client) ReadBroadcast(fromIndex uint, referenceBlock flow.Identifier) ( // unpack return from contract to `model.DKGMessage` messages := make([]model.BroadcastDKGMessage, 0, len(values)) for _, val := range values { - id, err := strconv.Unquote(val.(cadence.Struct).Fields[0].String()) + fields := cadence.FieldsMappedByName(val.(cadence.Struct)) + + id, err := strconv.Unquote(fields["nodeID"].String()) if err != nil { return nil, fmt.Errorf("could not unquote nodeID cadence string (%s): %w", id, err) } @@ -83,7 +95,7 @@ func (c *Client) ReadBroadcast(fromIndex uint, referenceBlock flow.Identifier) ( return nil, fmt.Errorf("could not parse nodeID (%v): %w", val, err) } - content := val.(cadence.Struct).Fields[1] + content := fields["content"] jsonString, err := strconv.Unquote(content.String()) if err != nil { return nil, fmt.Errorf("could not unquote json string: %w", err) @@ -126,9 +138,9 @@ func (c *Client) Broadcast(msg model.BroadcastDKGMessage) error { // construct transaction to send dkg whiteboard message to contract tx := sdk.NewTransaction(). SetScript(templates.GenerateSendDKGWhiteboardMessageScript(c.env)). - SetGasLimit(9999). + SetComputeLimit(9999). SetReferenceBlockID(latestBlock.ID). - SetProposalKey(account.Address, int(c.AccountKeyIndex), account.Keys[int(c.AccountKeyIndex)].SequenceNumber). + SetProposalKey(account.Address, c.AccountKeyIndex, account.Keys[int(c.AccountKeyIndex)].SequenceNumber). SetPayer(account.Address). AddAuthorizer(account.Address) @@ -149,7 +161,7 @@ func (c *Client) Broadcast(msg model.BroadcastDKGMessage) error { } // sign envelope using account signer - err = tx.SignEnvelope(account.Address, int(c.AccountKeyIndex), c.Signer) + err = tx.SignEnvelope(account.Address, c.AccountKeyIndex, c.Signer) if err != nil { return fmt.Errorf("could not sign transaction: %w", err) } @@ -169,11 +181,18 @@ func (c *Client) Broadcast(msg model.BroadcastDKGMessage) error { return nil } -// SubmitResult submits the final public result of the DKG protocol. This -// represents the group public key and the node's local computation of the -// public keys for each DKG participant. Serialized pub keys are encoded as hex. -func (c *Client) SubmitResult(groupPublicKey crypto.PublicKey, publicKeys []crypto.PublicKey) error { - +// SubmitParametersAndResult posts the DKG setup parameters (`flow.DKGIndexMap`) and the node's locally-computed DKG result to +// the DKG white-board smart contract. The DKG results are the node's local computation of the group public key and the public +// key shares. Serialized public keys are encoded as lower-case hex strings. +// Conceptually the flow.DKGIndexMap is not an output of the DKG protocol. Rather, it is part of the configuration/initialization +// information of the DKG. Before an epoch transition on the happy path (using the data in the EpochSetup event), each consensus +// participant locally fixes the DKG committee 𝒟 including the respective nodes' order to be identical to the consensus +// committee 𝒞. However, in case of a failed epoch transition, we desire the ability to manually provide the result of a successful +// DKG for the immediately next epoch (so-called recovery epoch). The DKG committee 𝒟 must have a sufficiently large overlap with +// the recovery epoch's consensus committee 𝒞 -- though for flexibility, we do *not* want to require that both committees are identical. +// Therefore, we need to explicitly specify the DKG committee 𝒟 on the fallback path. For uniformity of implementation, we do the +// same also on the happy path. +func (c *Client) SubmitParametersAndResult(indexMap flow.DKGIndexMap, groupPublicKey crypto.PublicKey, publicKeys []crypto.PublicKey) error { started := time.Now() ctx, cancel := context.WithTimeout(context.Background(), epochs.TransactionSubmissionTimeout) defer cancel() @@ -192,55 +211,116 @@ func (c *Client) SubmitResult(groupPublicKey crypto.PublicKey, publicKeys []cryp tx := sdk.NewTransaction(). SetScript(templates.GenerateSendDKGFinalSubmissionScript(c.env)). - SetGasLimit(9999). + SetComputeLimit(9999). SetReferenceBlockID(latestBlock.ID). - SetProposalKey(account.Address, int(c.AccountKeyIndex), account.Keys[int(c.AccountKeyIndex)].SequenceNumber). + SetProposalKey(account.Address, c.AccountKeyIndex, account.Keys[int(c.AccountKeyIndex)].SequenceNumber). SetPayer(account.Address). AddAuthorizer(account.Address) - // Note: We need to make sure that we pull the keys out in the same order that - // we have done here. Group Public key first followed by the individual public keys - finalSubmission := make([]cadence.Value, 0, len(publicKeys)) + trimmedGroupHexString := trim0x(groupPublicKey.String()) + cdcGroupString, err := cadence.NewString(trimmedGroupHexString) + if err != nil { + return fmt.Errorf("could not convert group key to cadence: %w", err) + } - // first append group public key - if groupPublicKey != nil { - trimmedGroupHexString := trim0x(groupPublicKey.String()) - cdcGroupString, err := cadence.NewString(trimmedGroupHexString) - if err != nil { - return fmt.Errorf("could not convert group key to cadence: %w", err) - } - finalSubmission = append(finalSubmission, cadence.NewOptional(cdcGroupString)) - } else { - finalSubmission = append(finalSubmission, cadence.NewOptional(nil)) + // setup first arg - group key + err = tx.AddArgument(cdcGroupString) + if err != nil { + return fmt.Errorf("could not add argument to transaction: %w", err) } + cdcPublicKeys := make([]cadence.Value, 0, len(publicKeys)) for _, publicKey := range publicKeys { - // append individual public keys - if publicKey != nil { - trimmedHexString := trim0x(publicKey.String()) - cdcPubKey, err := cadence.NewString(trimmedHexString) - if err != nil { - return fmt.Errorf("could not convert pub keyshare to cadence: %w", err) - } - finalSubmission = append(finalSubmission, cadence.NewOptional(cdcPubKey)) - } else { - finalSubmission = append(finalSubmission, cadence.NewOptional(nil)) + trimmedHexString := trim0x(publicKey.String()) + cdcPubKey, err := cadence.NewString(trimmedHexString) + if err != nil { + return fmt.Errorf("could not convert pub keyshare to cadence: %w", err) } + cdcPublicKeys = append(cdcPublicKeys, cdcPubKey) } - err = tx.AddArgument(cadence.NewArray(finalSubmission)) + // setup second arg - array of public keys + err = tx.AddArgument(cadence.NewArray(cdcPublicKeys)) if err != nil { return fmt.Errorf("could not add argument to transaction: %w", err) } + cdcIndexMap := make([]cadence.KeyValuePair, 0, len(indexMap)) + for nodeID, dkgIndex := range indexMap { + cdcIndexMap = append(cdcIndexMap, cadence.KeyValuePair{ + Key: cadence.String(nodeID.String()), + Value: cadence.NewInt(dkgIndex), + }) + } + + // setup third arg - IndexMap + err = tx.AddArgument(cadence.NewDictionary(cdcIndexMap)) + if err != nil { + return fmt.Errorf("could not add argument to transaction: %w", err) + } + + // sign envelope using account signer + err = tx.SignEnvelope(account.Address, c.AccountKeyIndex, c.Signer) + if err != nil { + return fmt.Errorf("could not sign transaction: %w", err) + } + + c.Log.Info().Str("tx_id", tx.ID().Hex()).Msg("sending SubmitParametersAndResult transaction") + txID, err := c.SendTransaction(ctx, tx) + if err != nil { + return fmt.Errorf("failed to submit transaction: %w", err) + } + + err = c.WaitForSealed(ctx, txID, started) + if err != nil { + return fmt.Errorf("failed to wait for transaction seal: %w", err) + } + + return nil +} + +// SubmitEmptyResult submits an empty result of the DKG protocol. The empty result is obtained by a node when +// it realizes locally that its DKG participation was unsuccessful (either because the DKG failed as a whole, +// or because the node received too many byzantine inputs). However, a node obtaining an empty result can +// happen in both cases of the DKG succeeding or failing. For further details, please see: +// https://flowfoundation.notion.site/Random-Beacon-2d61f3b3ad6e40ee9f29a1a38a93c99c +// Honest nodes would call `SubmitEmptyResult` strictly after the final phase has ended if DKG has ended. +// Though, `SubmitEmptyResult` also supports implementing byzantine participants for testing that submit an +// empty result too early (intentional protocol violation), *before* the final DKG phase concluded. +// SubmitEmptyResult must be called strictly after the final phase has ended if DKG has failed. +func (c *Client) SubmitEmptyResult() error { + started := time.Now() + ctx, cancel := context.WithTimeout(context.Background(), epochs.TransactionSubmissionTimeout) + defer cancel() + + // get account for given address + account, err := c.GetAccount(ctx) + if err != nil { + return fmt.Errorf("could not get account details: %w", err) + } + + // get latest finalized block to execute transaction + latestBlock, err := c.FlowClient.GetLatestBlock(ctx, false) + if err != nil { + return fmt.Errorf("could not get latest block from node: %w", err) + } + + tx := sdk.NewTransaction(). + SetScript(templates.GenerateSendEmptyDKGFinalSubmissionScript(c.env)). + SetComputeLimit(9999). + SetReferenceBlockID(latestBlock.ID). + SetProposalKey(account.Address, c.AccountKeyIndex, account.Keys[int(c.AccountKeyIndex)].SequenceNumber). + SetPayer(account.Address). + AddAuthorizer(account.Address) + // sign envelope using account signer - err = tx.SignEnvelope(account.Address, int(c.AccountKeyIndex), c.Signer) + err = tx.SignEnvelope(account.Address, c.AccountKeyIndex, c.Signer) if err != nil { return fmt.Errorf("could not sign transaction: %w", err) } - c.Log.Info().Str("tx_id", tx.ID().Hex()).Msg("sending SubmitResult transaction") + c.Log.Info().Str("tx_id", tx.ID().Hex()).Msg("sending SubmitEmptyResult transaction") txID, err := c.SendTransaction(ctx, tx) if err != nil { return fmt.Errorf("failed to submit transaction: %w", err) diff --git a/module/dkg/controller.go b/module/dkg/controller.go index 5c9adf4994a..34f91b49a72 100644 --- a/module/dkg/controller.go +++ b/module/dkg/controller.go @@ -2,68 +2,15 @@ package dkg import ( "fmt" - "math" - "math/rand" "sync" - "time" + "github.com/onflow/crypto" "github.com/rs/zerolog" - "github.com/onflow/flow-go/crypto" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/module" ) -const ( - - // DefaultBaseStartDelay is the default base delay to use when introducing - // random delay to the DKG start process. See preStartDelay for details. - DefaultBaseStartDelay = 500 * time.Microsecond - - // DefaultBaseHandleFirstBroadcastDelay is the default base to use when - // introducing random delay to processing the first DKG broadcast message. - // See preHandleFirstBroadcastDelay for details. - // - // For a 150-node DKG, we observe a cost of ~2.5s per message to process - // broadcast messages during phase 1, for a total of ~6m of total CPU time. - // We would like to target spreading this cost over a 30 minute period. - // With the default value for DefaultHandleSubsequentBroadcastDelay, this - // results in processing all phase 1 messages in 6m+6m=12m, so for a maximum - // total processing time of 30m, we sample the initial delay from [0,18m]. - // We use 50ms as the default because 50ms*150^2 = 18.75m - // - DefaultBaseHandleFirstBroadcastDelay = 50 * time.Millisecond - - // DefaultHandleSubsequentBroadcastDelay is the default delay to use before - // processing all DKG broadcasts after the first. - DefaultHandleSubsequentBroadcastDelay = 2500 * time.Millisecond -) - -// ControllerConfig defines configuration for the DKG Controller. These define -// how the DKG controller introduces delays to expensive DKG computations. -// -// We introduce delays for two reasons: -// 1. Avoid running long-running expensive DKG computations consecutively. -// 2. Avoid synchronizing expensive DKG computations across the DKG committee. -// -// Delays introduced prior to DKG start and prior to processing the FIRST broadcast -// message are sampled uniformly from [0,m), where m=b*n^2 -// -// b = base delay (from config) -// n = size of DKG committee -// -// Delays introduced prior to processing subsequent broadcast messages are constant. -type ControllerConfig struct { - // BaseStartDelay determines the maximum delay before starting the DKG. - BaseStartDelay time.Duration - // BaseHandleFirstBroadcastDelay determines the maximum delay before handling - // the first broadcast message. - BaseHandleFirstBroadcastDelay time.Duration - // HandleSubsequentBroadcastDelay determines the constant delay before handling - // all broadcast messages following the first. - HandleSubsequentBroadcastDelay time.Duration -} - // Controller implements the DKGController interface. It controls the execution // of a Joint Feldman DKG instance. A new Controller must be instantiated for // every epoch. @@ -101,10 +48,11 @@ type Controller struct { // artifactsLock protects access to artifacts artifactsLock sync.Mutex - config ControllerConfig - once *sync.Once + once *sync.Once } +var _ module.DKGController = (*Controller)(nil) + // NewController instantiates a new Joint Feldman DKG controller. func NewController( log zerolog.Logger, @@ -112,7 +60,6 @@ func NewController( dkg crypto.DKGState, seed []byte, broker module.DKGBroker, - config ControllerConfig, ) *Controller { logger := log.With(). @@ -130,7 +77,6 @@ func NewController( endCh: make(chan struct{}), shutdownCh: make(chan struct{}), once: new(sync.Once), - config: config, } } @@ -293,29 +239,6 @@ func (c *Controller) doBackgroundWork() { case msg := <-broadcastMsgCh: - // before processing a broadcast message during phase 1, sleep for a - // random delay to avoid synchronizing this expensive operation across - // all consensus nodes - state := c.GetState() - if state == Phase1 { - - // introduce a large, uniformly sampled delay prior to processing - // the first message - isFirstMessage := false - c.once.Do(func() { - isFirstMessage = true - delay := c.preHandleFirstBroadcastDelay() - c.log.Info().Msgf("sleeping for %s before processing first phase 1 broadcast message", delay) - time.Sleep(delay) - }) - - if !isFirstMessage { - // introduce a constant delay for all subsequent messages - c.log.Debug().Msgf("sleeping for %s before processing subsequent phase 1 broadcast message", c.config.HandleSubsequentBroadcastDelay) - time.Sleep(c.config.HandleSubsequentBroadcastDelay) - } - } - c.dkgLock.Lock() err := c.dkg.HandleBroadcastMsg(int(msg.CommitteeMemberIndex), msg.Data) c.dkgLock.Unlock() @@ -335,12 +258,6 @@ func (c *Controller) start() error { return fmt.Errorf("cannot execute start routine in state %s", state) } - // before starting the DKG, sleep for a random delay to avoid synchronizing - // this expensive operation across all consensus nodes - delay := c.preStartDelay() - c.log.Debug().Msgf("sleeping for %s before starting DKG", delay) - time.Sleep(delay) - c.dkgLock.Lock() err := c.dkg.Start(c.seed) c.dkgLock.Unlock() @@ -417,55 +334,3 @@ func (c *Controller) phase3() error { } } } - -// preStartDelay returns a duration to delay prior to starting the DKG process. -// This prevents synchronization of the DKG starting (an expensive operation) -// across the network, which can impact finalization. -func (c *Controller) preStartDelay() time.Duration { - delay := computePreprocessingDelay(c.config.BaseStartDelay, c.dkg.Size()) - return delay -} - -// preHandleFirstBroadcastDelay returns a duration to delay prior to handling -// the first broadcast message. This delay is used only during phase 1 of the DKG. -// This prevents synchronization of processing verification vectors (an -// expensive operation) across the network, which can impact finalization. -func (c *Controller) preHandleFirstBroadcastDelay() time.Duration { - delay := computePreprocessingDelay(c.config.BaseHandleFirstBroadcastDelay, c.dkg.Size()) - return delay -} - -// computePreprocessingDelay computes a random delay to introduce before an -// expensive operation. -// -// The maximum delay is m=b*n^2 where: -// * b is a configurable base delay -// * n is the size of the DKG committee -func computePreprocessingDelay(baseDelay time.Duration, dkgSize int) time.Duration { - - maxDelay := computePreprocessingDelayMax(baseDelay, dkgSize) - if maxDelay <= 0 { - return 0 - } - // select delay from [0,m) - delay := time.Duration(rand.Int63n(maxDelay.Nanoseconds())) - return delay -} - -// computePreprocessingDelayMax computes the maximum dely for computePreprocessingDelay. -func computePreprocessingDelayMax(baseDelay time.Duration, dkgSize int) time.Duration { - // sanity checks - if baseDelay < 0 { - baseDelay = 0 - } - if dkgSize < 0 { - dkgSize = 0 - } - - // m=b*n^2 - maxDelay := time.Duration(math.Pow(float64(dkgSize), 2)) * baseDelay - if maxDelay <= 0 { - return 0 - } - return maxDelay -} diff --git a/module/dkg/controller_factory.go b/module/dkg/controller_factory.go index ae12219706e..13587835e43 100644 --- a/module/dkg/controller_factory.go +++ b/module/dkg/controller_factory.go @@ -3,9 +3,9 @@ package dkg import ( "fmt" + "github.com/onflow/crypto" "github.com/rs/zerolog" - "github.com/onflow/flow-go/crypto" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/module" "github.com/onflow/flow-go/module/signature" @@ -21,7 +21,6 @@ type ControllerFactory struct { me module.Local dkgContractClients []module.DKGContractClient tunnel *BrokerTunnel - config ControllerConfig } // NewControllerFactory creates a new factory that generates Controllers with @@ -31,30 +30,36 @@ func NewControllerFactory( me module.Local, dkgContractClients []module.DKGContractClient, tunnel *BrokerTunnel, - config ControllerConfig) *ControllerFactory { +) *ControllerFactory { return &ControllerFactory{ log: log, me: me, dkgContractClients: dkgContractClients, tunnel: tunnel, - config: config, } } // Create creates a new epoch-specific Controller equipped with a broker which // is capable of communicating with other nodes. +// Participants list must be sorted in canonical order. +// No errors are expected during normal operations. func (f *ControllerFactory) Create( dkgInstanceID string, - participants flow.IdentityList, - seed []byte) (module.DKGController, error) { + participants flow.IdentitySkeletonList, + seed []byte, +) (module.DKGController, error) { + // ensure participants are sorted in canonical order + if !participants.Sorted(flow.Canonical[flow.IdentitySkeleton]) { + return nil, fmt.Errorf("participants are not sorted in canonical order") + } myIndex, ok := participants.GetIndex(f.me.NodeID()) if !ok { return nil, fmt.Errorf("failed to create controller factory, node %s is not part of DKG committee", f.me.NodeID().String()) } - broker := NewBroker( + broker, err := NewBroker( f.log, dkgInstanceID, participants, @@ -63,6 +68,9 @@ func (f *ControllerFactory) Create( f.dkgContractClients, f.tunnel, ) + if err != nil { + return nil, fmt.Errorf("could not create DKG broker: %w", err) + } n := len(participants) threshold := signature.RandomBeaconThreshold(n) @@ -77,7 +85,6 @@ func (f *ControllerFactory) Create( dkg, seed, broker, - f.config, ) return controller, nil diff --git a/module/dkg/controller_test.go b/module/dkg/controller_test.go index 03f10adf1c1..22f8ec6a100 100644 --- a/module/dkg/controller_test.go +++ b/module/dkg/controller_test.go @@ -6,11 +6,10 @@ import ( "testing" "time" + "github.com/onflow/crypto" "github.com/rs/zerolog" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "github.com/onflow/flow-go/crypto" "github.com/onflow/flow-go/model/flow" msg "github.com/onflow/flow-go/model/messages" "github.com/onflow/flow-go/module/signature" @@ -99,7 +98,7 @@ type broker struct { // PrivateSend implements the crypto.DKGProcessor interface. func (b *broker) PrivateSend(dest int, data []byte) { b.privateChannels[dest] <- msg.PrivDKGMessageIn{ - DKGMessage: msg.NewDKGMessage(data, b.dkgInstanceID), + DKGMessage: msg.DKGMessage{Data: data, DKGInstanceID: b.dkgInstanceID}, CommitteeMemberIndex: uint64(b.id), } } @@ -118,7 +117,7 @@ func (b *broker) Broadcast(data []byte) { } // epoch and phase are not relevant at the controller level b.broadcastChannels[i] <- msg.BroadcastDKGMessage{ - DKGMessage: msg.NewDKGMessage(data, b.dkgInstanceID), + DKGMessage: msg.DKGMessage{Data: data, DKGInstanceID: b.dkgInstanceID}, CommitteeMemberIndex: uint64(b.id), } } @@ -248,25 +247,17 @@ func initNodes(t *testing.T, n int, phase1Duration, phase2Duration, phase3Durati logger: logger, } - seed := unittest.SeedFixture(20) + seed := unittest.SeedFixture(crypto.KeyGenSeedMinLen) dkg, err := crypto.NewJointFeldman(n, signature.RandomBeaconThreshold(n), i, broker) require.NoError(t, err) - // create a config with no delays for tests - config := ControllerConfig{ - BaseStartDelay: 0, - BaseHandleFirstBroadcastDelay: 0, - HandleSubsequentBroadcastDelay: 0, - } - controller := NewController( logger, "dkg_test", dkg, seed, broker, - config, ) require.NoError(t, err) @@ -329,54 +320,3 @@ func checkArtifacts(t *testing.T, nodes []*node, totalNodes int) { } } } - -func TestDelay(t *testing.T) { - - t.Run("should return 0 delay for <=0 inputs", func(t *testing.T) { - delay := computePreprocessingDelay(0, 100) - assert.Equal(t, delay, time.Duration(0)) - delay = computePreprocessingDelay(time.Hour, 0) - assert.Equal(t, delay, time.Duration(0)) - delay = computePreprocessingDelay(time.Millisecond, -1) - assert.Equal(t, delay, time.Duration(0)) - delay = computePreprocessingDelay(-time.Millisecond, 100) - assert.Equal(t, delay, time.Duration(0)) - }) - - // NOTE: this is a probabilistic test. It will (extremely infrequently) fail. - t.Run("should return different values for same inputs", func(t *testing.T) { - d1 := computePreprocessingDelay(time.Hour, 100) - d2 := computePreprocessingDelay(time.Hour, 100) - assert.NotEqual(t, d1, d2) - }) - - t.Run("should return values in expected range", func(t *testing.T) { - baseDelay := time.Second - dkgSize := 100 - minDelay := time.Duration(0) - // m=b*n^2 - expectedMaxDelay := time.Duration(int64(baseDelay) * int64(dkgSize) * int64(dkgSize)) - - maxDelay := computePreprocessingDelayMax(baseDelay, dkgSize) - assert.Equal(t, expectedMaxDelay, maxDelay) - - delay := computePreprocessingDelay(baseDelay, dkgSize) - assert.LessOrEqual(t, minDelay, delay) - assert.GreaterOrEqual(t, expectedMaxDelay, delay) - }) - - t.Run("should return values in expected range for defaults", func(t *testing.T) { - baseDelay := DefaultBaseHandleFirstBroadcastDelay - dkgSize := 150 - minDelay := time.Duration(0) - // m=b*n^2 - expectedMaxDelay := time.Duration(int64(baseDelay) * int64(dkgSize) * int64(dkgSize)) - - maxDelay := computePreprocessingDelayMax(baseDelay, dkgSize) - assert.Equal(t, expectedMaxDelay, maxDelay) - - delay := computePreprocessingDelay(baseDelay, dkgSize) - assert.LessOrEqual(t, minDelay, delay) - assert.GreaterOrEqual(t, expectedMaxDelay, delay) - }) -} diff --git a/module/dkg/hasher.go b/module/dkg/hasher.go index 8f6cee27f57..f7fbc826282 100644 --- a/module/dkg/hasher.go +++ b/module/dkg/hasher.go @@ -1,7 +1,8 @@ package dkg import ( - "github.com/onflow/flow-go/crypto/hash" + "github.com/onflow/crypto/hash" + "github.com/onflow/flow-go/module/signature" ) diff --git a/module/dkg/mock_client.go b/module/dkg/mock_client.go deleted file mode 100644 index 0bb3e2f85fb..00000000000 --- a/module/dkg/mock_client.go +++ /dev/null @@ -1,44 +0,0 @@ -package dkg - -import ( - "github.com/rs/zerolog" - - "github.com/onflow/flow-go/crypto" - "github.com/onflow/flow-go/model/flow" - model "github.com/onflow/flow-go/model/messages" -) - -// TEMPORARY: The functionality to allow starting up a node without a properly configured -// machine account is very much intended to be temporary. - -// This is required to support the very first mainnet spork with the epoch smart contract. -// At the beginning of the spork, operators will not have been able to generate their machine account -// because the smart contracts to do so have not been deployed yet. Therefore, for the duration of the spork, -// we allow this config to be omitted. For all subsequent sporks, it will be required. -// Implemented by: https://github.com/dapperlabs/flow-go/issues/5585 -// Will be reverted by: https://github.com/dapperlabs/flow-go/issues/5619 - -type MockClient struct { - log zerolog.Logger -} - -func NewMockClient(log zerolog.Logger) *MockClient { - - log = log.With().Str("component", "mock_dkg_contract_client").Logger() - return &MockClient{log: log} -} - -func (c *MockClient) Broadcast(msg model.BroadcastDKGMessage) error { - c.log.Fatal().Msg("caution: missing machine account configuration, but machine account used (Broadcast)") - return nil -} - -func (c *MockClient) ReadBroadcast(fromIndex uint, referenceBlock flow.Identifier) ([]model.BroadcastDKGMessage, error) { - c.log.Fatal().Msg("caution: missing machine account configuration, but machine account used (ReadBroadcast)") - return nil, nil -} - -func (c *MockClient) SubmitResult(groupPublicKey crypto.PublicKey, publicKeys []crypto.PublicKey) error { - c.log.Fatal().Msg("caution: missing machine account configuration, but machine account used (SubmitResult)") - return nil -} diff --git a/module/dkg/recovery.go b/module/dkg/recovery.go new file mode 100644 index 00000000000..837734dbe9b --- /dev/null +++ b/module/dkg/recovery.go @@ -0,0 +1,176 @@ +package dkg + +import ( + "context" + "errors" + "fmt" + + "github.com/rs/zerolog" + + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module" + "github.com/onflow/flow-go/module/irrecoverable" + "github.com/onflow/flow-go/state/protocol" + "github.com/onflow/flow-go/state/protocol/events" + "github.com/onflow/flow-go/storage" + "github.com/onflow/flow-go/utils/logging" +) + +// the nextEpochNotYetCommitted is an entirely internal sentinel error, which indicates that no +// private Random Beacon Key for the next epoch could be recovered, as the next epoch is not +// yet committed. +var nextEpochNotYetCommitted = errors.New("next Epoch not yet committed") + +// BeaconKeyRecovery is a specific module that attempts automatic recovery of the random beacon private key +// when exiting Epoch Fallback Mode [EFM]. +// In the happy path of the protocol, each node that takes part in the DKG obtains a random beacon +// private key, which is stored in storage.DKGState. If the network enters EFM, the network can be recovered +// via the flow.EpochRecover service event, which exits EFM by specifying the subsequent epoch ("recovery epoch"). +// This recovery epoch must have a Random Beacon committee with valid keys, but no successful DKG occurred. +// To solve this, by convention, we require the recovery epoch to re-use the Random Beacon public keys from +// the most recent successful DKG. +// Upon observing that EFM was exited, this component: +// - looks up its Random Beacon private key from the last DKG +// - looks up the Random Beacon public keys specified for the recovery epoch +// - validates that its private key is compatible (matches the public key specified) +// - if valid, persists that private key as the safe beacon key for the recovery epoch +type BeaconKeyRecovery struct { + events.Noop + log zerolog.Logger + local module.Local + state protocol.State + localDKGState storage.EpochRecoveryMyBeaconKey +} + +var _ protocol.Consumer = &BeaconKeyRecovery{} + +// NewBeaconKeyRecovery creates a new BeaconKeyRecovery instance and tries to recover the random beacon private key. +// This method ensures that we try to recover the random beacon private key even if we have missed the `EpochFallbackModeExited` +// protocol event (this could happen if the node has crashed between emitting and delivering the event). +// No errors are expected during normal operations. +func NewBeaconKeyRecovery( + log zerolog.Logger, + local module.Local, + state protocol.State, + localDKGState storage.EpochRecoveryMyBeaconKey, +) (*BeaconKeyRecovery, error) { + recovery := &BeaconKeyRecovery{ + Noop: events.Noop{}, + log: log.With().Str("module", "my_beacon_key_recovery").Logger(), + local: local, + state: state, + localDKGState: localDKGState, + } + + err := recovery.recoverMyBeaconPrivateKey(state.Final()) + if err != nil && !errors.Is(err, nextEpochNotYetCommitted) { + return nil, fmt.Errorf("could not recover my beacon private key when initializing: %w", err) + } + + return recovery, nil +} + +// EpochFallbackModeExited implements handler from protocol.Consumer to perform recovery of the beacon private key when +// this node has exited the epoch fallback mode. +func (b *BeaconKeyRecovery) EpochFallbackModeExited(epochCounter uint64, refBlock *flow.Header) { + b.log.Info().Msgf("epoch fallback mode exited for epoch %d", epochCounter) + err := b.recoverMyBeaconPrivateKey(b.state.AtHeight(refBlock.Height)) // refBlock must be finalized + if err != nil { + irrecoverable.Throw(context.TODO(), fmt.Errorf("failed to recovery my beacon private key: %w", err)) + } +} + +// recoverMyBeaconPrivateKey performs the recovery of the random beacon private key for the next epoch by trying to use +// a safe 'my beacon key' from the current epoch (it is expected that this method will be called before entering the recovered epoch). +// If a safe 'my beacon key' is found, it will be stored in the storage.EpochRecoveryMyBeaconKey for the next epoch +// concluding the 'my beacon key' recovery. +// If there is a safe 'my beacon key' for the next epoch, or we are not in committed phase (DKG for next epoch is not available) +// then calling this method is no-op. +// Expected Errors under normal operations: +// - `nextEpochNotYetCommitted` if the next epoch is not yet committed, hence we can't confirm whether we have a usable +// Random Beacon key. +func (b *BeaconKeyRecovery) recoverMyBeaconPrivateKey(final protocol.Snapshot) error { + head, err := final.Head() + if err != nil { + return fmt.Errorf("could not get head of snapshot: %w", err) + } + epochProtocolState, err := final.EpochProtocolState() + if err != nil { + return fmt.Errorf("could not get epoch protocol state: %w", err) + } + currentEpochCounter := epochProtocolState.Epoch() + + log := b.log.With(). + Uint64("height", head.Height). + Uint64("view", head.View). + Uint64("epochCounter", currentEpochCounter). + Logger() + // Only when the next epoch is committed, Random Beacon keys for that epoch's consensus participants are finally persisted + // in `localDKGState`. It is important to wait until this point, so each node can locally check whether their private key + // is consistent with its respective entry in the public key vector `EpochCommit.DKGParticipantKeys` (assuming the node + // concluded the DKG). This mechanic is identical to the happy path (`EpochCommit` event emitted by Epoch System Smart + // Contract) and the recovery epoch (`EpochCommit` event part of `EpochRecover` event, whose content is effectively + // provided by the human governance committee). + // The next epoch *not yet* being committed is only possible on the happy path, when the node boots up and checks + // that it hasn't lost a `FallbackModeExited` notification. However, when observing the `EpochFallbackModeExited` + // notification, the Epoch Phase *must* be `flow.EpochPhaseCommitted`, otherwise the state is corrupted. + // Diligently verifying this case is important to avoid a situation where the node runs into the next epoch and failes + // to access its Random Beacon key, just because this recovery here has failed before. + if epochProtocolState.EpochPhase() != flow.EpochPhaseCommitted { + log.Info(). + Str("EpochPhase", epochProtocolState.EpochPhase().String()). + Msgf("Cannot (yet) determine dkg key for next epoch, as next epoch is not yet committed") + return nextEpochNotYetCommitted + } + + nextEpoch, err := final.Epochs().NextCommitted() // guaranteed to be committed + if err != nil { + return fmt.Errorf("could not get next epoch counter: %w", err) + } + nextEpochCounter := nextEpoch.Counter() + _, safe, err := b.localDKGState.RetrieveMyBeaconPrivateKey(nextEpochCounter) + if err != nil && !errors.Is(err, storage.ErrNotFound) { + return fmt.Errorf("could not retrieve my beacon private key for the next epoch: %w", err) + } + if safe { + log.Info().Msg("my beacon private key for the next epoch is safe, nothing to do") + return nil + } + + myBeaconPrivateKey, safe, err := b.localDKGState.RetrieveMyBeaconPrivateKey(currentEpochCounter) + if err != nil { + if errors.Is(err, storage.ErrNotFound) { + log.Warn().Str(logging.KeyPotentialConfigurationProblem, "true").Msgf("no my beacon key for the current epoch has been found") + return nil + } + return fmt.Errorf("could not retrieve my beacon private key for the current epoch: %w", err) + } + if !safe { + log.Warn().Str(logging.KeyPotentialConfigurationProblem, "true").Msgf("my beacon key for the current epoch is not safe") + return nil + } + + nextEpochDKG, err := nextEpoch.DKG() + if err != nil { + return fmt.Errorf("could not get DKG for next epoch %d: %w", nextEpochCounter, err) + } + beaconPubKey, err := nextEpochDKG.KeyShare(b.local.NodeID()) + if err != nil { + if protocol.IsIdentityNotFound(err) { + log.Warn().Str(logging.KeyPotentialConfigurationProblem, "true").Msgf("current node is not part of the next epoch DKG") + return nil + } + return fmt.Errorf("could not get beacon key share for my node(%x): %w", b.local.NodeID(), err) + } + if beaconPubKey.Equals(myBeaconPrivateKey.PublicKey()) { + err := b.localDKGState.UpsertMyBeaconPrivateKey(nextEpochCounter, myBeaconPrivateKey, epochProtocolState.Entry().NextEpochCommit) + if err != nil { + return fmt.Errorf("could not overwrite my beacon private key for the next epoch: %w", err) + } + log.Warn().Msgf("succesfully recovered my beacon private key for the next epoch") + } else { + log.Debug().Msgf("my beacon key is not part of the next epoch DKG") + } + + return nil +} diff --git a/module/dkg/recovery_test.go b/module/dkg/recovery_test.go new file mode 100644 index 00000000000..aadb6d975e1 --- /dev/null +++ b/module/dkg/recovery_test.go @@ -0,0 +1,378 @@ +package dkg + +import ( + "errors" + "testing" + + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" + + "github.com/onflow/crypto" + + "github.com/onflow/flow-go/model/flow" + mockmodule "github.com/onflow/flow-go/module/mock" + "github.com/onflow/flow-go/state/protocol" + mockprotocol "github.com/onflow/flow-go/state/protocol/mock" + "github.com/onflow/flow-go/storage" + mockstorage "github.com/onflow/flow-go/storage/mock" + "github.com/onflow/flow-go/utils/unittest" +) + +func TestBeaconKeyRecovery(t *testing.T) { + suite.Run(t, new(BeaconKeyRecoverySuite)) +} + +// BeaconKeyRecoverySuite is a suite of tests for the BeaconKeyRecovery module. It contains a mocked state that can be +// used to simplify the creation of the module in multiple test cases. The suite itself is not creating the BeaconKeyRecovery +// since it contains logic that runs on the module creation, so each test case should create the module itself but using the +// mocked state. +type BeaconKeyRecoverySuite struct { + suite.Suite + head *flow.Header + local *mockmodule.Local + state *mockprotocol.State + epochProtocolState *mockprotocol.EpochProtocolState + dkgState *mockstorage.EpochRecoveryMyBeaconKey + finalSnapshot *mockprotocol.Snapshot + epochs *mockprotocol.EpochQuery + nextEpoch *mockprotocol.CommittedEpoch + + currentEpochCounter uint64 + nextEpochCounter uint64 + currentEpochPhase flow.EpochPhase + nextEpochCommit *flow.EpochCommit +} + +func (s *BeaconKeyRecoverySuite) SetupTest() { + s.local = mockmodule.NewLocal(s.T()) + s.state = mockprotocol.NewState(s.T()) + s.dkgState = mockstorage.NewEpochRecoveryMyBeaconKey(s.T()) + s.epochProtocolState = mockprotocol.NewEpochProtocolState(s.T()) + s.finalSnapshot = mockprotocol.NewSnapshot(s.T()) + s.nextEpoch = mockprotocol.NewCommittedEpoch(s.T()) + + s.head = unittest.BlockHeaderFixture() + s.currentEpochPhase = flow.EpochPhaseCommitted + s.currentEpochCounter = uint64(0) + s.nextEpochCounter = uint64(1) + entry := unittest.EpochStateFixture(unittest.WithNextEpochProtocolState(), func(entry *flow.RichEpochStateEntry) { + entry.NextEpochCommit.Counter = s.nextEpochCounter + entry.NextEpoch.CommitID = entry.NextEpochCommit.ID() + }) + s.nextEpochCommit = entry.NextEpochCommit + + s.local.On("NodeID").Return(unittest.IdentifierFixture()).Maybe() + s.epochProtocolState.On("Epoch").Return(s.currentEpochCounter).Maybe() + s.epochProtocolState.On("EpochPhase").Return(func() flow.EpochPhase { return s.currentEpochPhase }).Maybe() + s.epochProtocolState.On("Entry").Return(entry, nil).Maybe() + s.nextEpoch.On("Counter").Return(s.nextEpochCounter, nil).Maybe() + + s.epochs = mockprotocol.NewEpochQuery(s.T()) + s.epochs.On("NextCommitted").Return(s.nextEpoch, nil).Maybe() + + s.finalSnapshot.On("Head").Return(s.head, nil) + s.finalSnapshot.On("EpochProtocolState").Return(s.epochProtocolState, nil).Maybe() + s.finalSnapshot.On("Epochs").Return(s.epochs).Maybe() + + s.state.On("Final").Return(s.finalSnapshot) +} + +// TestNewBeaconKeyRecovery_EpochIsNotCommitted tests a scenario: +// - node is not in epoch committed phase +// In a case like this there is no need to proceed since we don't have the next epoch available. +func (s *BeaconKeyRecoverySuite) TestNewBeaconKeyRecovery_EpochIsNotCommitted() { + for _, phase := range []flow.EpochPhase{ + flow.EpochPhaseFallback, + flow.EpochPhaseStaking, + flow.EpochPhaseSetup, + } { + s.currentEpochPhase = phase + recovery, err := NewBeaconKeyRecovery(unittest.Logger(), s.local, s.state, s.dkgState) + require.NoError(s.T(), err) + require.NotNil(s.T(), recovery) + } + s.dkgState.AssertNumberOfCalls(s.T(), "UpsertMyBeaconPrivateKey", 0) +} + +// TestNewBeaconKeyRecovery_HeadException tests a scenario: +// - exception is thrown when trying to get the head of the final snapshot +// This is an unexpected error and should be propagated to the caller. +func (s *BeaconKeyRecoverySuite) TestNewBeaconKeyRecovery_HeadException() { + exception := errors.New("exception") + s.finalSnapshot.On("Head").Unset() + s.finalSnapshot.On("Head").Return(nil, exception).Once() + + recovery, err := NewBeaconKeyRecovery(unittest.Logger(), s.local, s.state, s.dkgState) + require.ErrorIs(s.T(), err, exception) + require.Nil(s.T(), recovery) +} + +// TestNewBeaconKeyRecovery_EpochProtocolStateException tests a scenario: +// - exception is thrown when trying to get the epoch protocol state of the final snapshot +// This is an unexpected error and should be propagated to the caller. +func (s *BeaconKeyRecoverySuite) TestNewBeaconKeyRecovery_EpochProtocolStateException() { + exception := errors.New("exception") + s.finalSnapshot.On("EpochProtocolState").Unset() + s.finalSnapshot.On("EpochProtocolState").Return(nil, exception).Once() + + recovery, err := NewBeaconKeyRecovery(unittest.Logger(), s.local, s.state, s.dkgState) + require.ErrorIs(s.T(), err, exception) + require.Nil(s.T(), recovery) +} + +// TestNewBeaconKeyRecovery_NextEpochException tests a scenario: +// - node is in epoch committed phase +// - exception is thrown when trying to retrieve the next epoch +// This is an unexpected error and should be propagated to the caller. +func (s *BeaconKeyRecoverySuite) TestNewBeaconKeyRecovery_NextEpochException() { + exception := errors.New("exception") + s.epochs.On("NextCommitted").Unset() + s.epochs.On("NextCommitted").Return(nil, exception).Once() + + recovery, err := NewBeaconKeyRecovery(unittest.Logger(), s.local, s.state, s.dkgState) + require.ErrorIs(s.T(), err, exception) + require.Nil(s.T(), recovery) +} + +// TestNewBeaconKeyRecovery_NextEpochRetrieveMyBeaconPrivateKeyException tests a scenario: +// - node is in epoch committed phase +// - exception is thrown when trying to check if there is a safe beacon key for the next epoch +// This is an unexpected error and should be propagated to the caller. +func (s *BeaconKeyRecoverySuite) TestNewBeaconKeyRecovery_NextEpochRetrieveMyBeaconPrivateKeyException() { + exception := errors.New("exception") + s.dkgState.On("RetrieveMyBeaconPrivateKey", s.nextEpochCounter).Return(nil, false, exception).Once() + + recovery, err := NewBeaconKeyRecovery(unittest.Logger(), s.local, s.state, s.dkgState) + require.ErrorIs(s.T(), err, exception) + require.Nil(s.T(), recovery) +} + +// TestNewBeaconKeyRecovery_KeyAlreadyRecovered tests a scenario: +// - node is in epoch committed phase +// - node has a safe beacon key for the next epoch +// In case like this there is no need for recovery and we should exit early. +func (s *BeaconKeyRecoverySuite) TestNewBeaconKeyRecovery_KeyAlreadyRecovered() { + s.dkgState.On("RetrieveMyBeaconPrivateKey", s.nextEpochCounter).Return( + unittest.PrivateKeyFixture(crypto.BLSBLS12381), true, nil).Once() + + recovery, err := NewBeaconKeyRecovery(unittest.Logger(), s.local, s.state, s.dkgState) + require.NoError(s.T(), err) + require.NotNil(s.T(), recovery) + + s.dkgState.AssertNumberOfCalls(s.T(), "UpsertMyBeaconPrivateKey", 0) +} + +// TestNewBeaconKeyRecovery_NoSafeMyBeaconPrivateKey tests a scenario: +// - node is in epoch committed phase +// - node doesn't have a safe beacon key for the next epoch +// - node doesn't have a safe beacon key for the current epoch +// We can't do much in this case since there is no key to recover. +func (s *BeaconKeyRecoverySuite) TestNewBeaconKeyRecovery_NoSafeMyBeaconPrivateKey() { + s.Run("no-safe-key", func() { + dkgState := mockstorage.NewEpochRecoveryMyBeaconKey(s.T()) + dkgState.On("RetrieveMyBeaconPrivateKey", s.nextEpochCounter).Return( + nil, false, nil).Once() + dkgState.On("RetrieveMyBeaconPrivateKey", s.currentEpochCounter).Return( + nil, false, nil).Once() + + recovery, err := NewBeaconKeyRecovery(unittest.Logger(), s.local, s.state, dkgState) + require.NoError(s.T(), err) + require.NotNil(s.T(), recovery) + + dkgState.AssertNumberOfCalls(s.T(), "UpsertMyBeaconPrivateKey", 0) + }) + s.Run("err-not-found", func() { + dkgState := mockstorage.NewEpochRecoveryMyBeaconKey(s.T()) + dkgState.On("RetrieveMyBeaconPrivateKey", s.nextEpochCounter).Return( + nil, false, nil).Once() + dkgState.On("RetrieveMyBeaconPrivateKey", s.currentEpochCounter).Return( + nil, false, storage.ErrNotFound).Once() + + recovery, err := NewBeaconKeyRecovery(unittest.Logger(), s.local, s.state, dkgState) + require.NoError(s.T(), err) + require.NotNil(s.T(), recovery) + + dkgState.AssertNumberOfCalls(s.T(), "UpsertMyBeaconPrivateKey", 0) + }) + s.Run("exception", func() { + exception := errors.New("exception") + dkgState := mockstorage.NewEpochRecoveryMyBeaconKey(s.T()) + dkgState.On("RetrieveMyBeaconPrivateKey", s.nextEpochCounter).Return( + nil, false, nil).Once() + dkgState.On("RetrieveMyBeaconPrivateKey", s.currentEpochCounter).Return( + nil, false, exception).Once() + + recovery, err := NewBeaconKeyRecovery(unittest.Logger(), s.local, s.state, dkgState) + require.ErrorIs(s.T(), err, exception) + require.Nil(s.T(), recovery) + + dkgState.AssertNumberOfCalls(s.T(), "UpsertMyBeaconPrivateKey", 0) + }) +} + +// TestNewBeaconKeyRecovery_NextEpochDKGException tests a scenario: +// - node is in epoch committed phase +// - node doesn't have a safe beacon key for the next epoch +// - node has a safe beacon key for the current epoch +// - exception is thrown when trying to get DKG for next epoch +// This is an unexpected error and should be propagated to the caller. +func (s *BeaconKeyRecoverySuite) TestNewBeaconKeyRecovery_NextEpochDKGException() { + s.dkgState.On("RetrieveMyBeaconPrivateKey", s.nextEpochCounter).Return(nil, false, nil).Once() + // have a safe key for the current epoch + myBeaconKey := unittest.PrivateKeyFixture(crypto.BLSBLS12381) + s.dkgState.On("RetrieveMyBeaconPrivateKey", s.currentEpochCounter).Return(myBeaconKey, true, nil).Once() + + exception := errors.New("exception") + s.nextEpoch.On("DKG").Unset() + s.nextEpoch.On("DKG").Return(nil, exception).Once() + + recovery, err := NewBeaconKeyRecovery(unittest.Logger(), s.local, s.state, s.dkgState) + require.ErrorIs(s.T(), err, exception) + require.Nil(s.T(), recovery) +} + +// TestNewBeaconKeyRecovery_NextEpochKeyShareException tests a scenario: +// - node is in epoch committed phase +// - node doesn't have a safe beacon key for the next epoch +// - node has a safe beacon key for the current epoch +// - exception is thrown when trying to get beacon public key share for this node. +// This is an unexpected error and should be propagated to the caller. +func (s *BeaconKeyRecoverySuite) TestNewBeaconKeyRecovery_NextEpochKeyShareException() { + s.dkgState.On("RetrieveMyBeaconPrivateKey", s.nextEpochCounter).Return(nil, false, nil).Once() + // have a safe key for the current epoch + myBeaconKey := unittest.PrivateKeyFixture(crypto.BLSBLS12381) + s.dkgState.On("RetrieveMyBeaconPrivateKey", s.currentEpochCounter).Return(myBeaconKey, true, nil).Once() + + exception := errors.New("exception") + dkg := mockprotocol.NewDKG(s.T()) + dkg.On("KeyShare", s.local.NodeID()).Return(nil, exception).Once() + s.nextEpoch.On("DKG").Return(dkg, nil).Once() + + recovery, err := NewBeaconKeyRecovery(unittest.Logger(), s.local, s.state, s.dkgState) + require.ErrorIs(s.T(), err, exception) + require.Nil(s.T(), recovery) +} + +// TestNewBeaconKeyRecovery_NodeIsNotPartOfNextEpochDKG tests a scenario: +// - node is in epoch committed phase +// - node doesn't have a safe beacon key for the next epoch +// - node has a safe beacon key for the current epoch +// - node is not part of the DKG for the next epoch(no pub key or priv/pub key mismatch) +// In case like this we can't recover the key since we don't have the necessary data, or we are not authorized to participate. +func (s *BeaconKeyRecoverySuite) TestNewBeaconKeyRecovery_NodeIsNotPartOfNextEpochDKG() { + s.Run("no-pub-key", func() { + dkgState := mockstorage.NewEpochRecoveryMyBeaconKey(s.T()) + dkgState.On("RetrieveMyBeaconPrivateKey", s.nextEpochCounter).Return(nil, false, nil).Once() + + // have a safe key for the current epoch + myBeaconKey := unittest.PrivateKeyFixture(crypto.BLSBLS12381) + dkgState.On("RetrieveMyBeaconPrivateKey", s.currentEpochCounter).Return(myBeaconKey, true, nil).Once() + // node is not part of the DKG for the next epoch + dkg := mockprotocol.NewDKG(s.T()) + dkg.On("KeyShare", s.local.NodeID()).Return(nil, protocol.IdentityNotFoundError{}).Once() + s.nextEpoch.On("DKG").Return(dkg, nil).Once() + + recovery, err := NewBeaconKeyRecovery(unittest.Logger(), s.local, s.state, dkgState) + require.NoError(s.T(), err) + require.NotNil(s.T(), recovery) + + dkgState.AssertNumberOfCalls(s.T(), "UpsertMyBeaconPrivateKey", 0) + }) + s.Run("pub-key-mismatch", func() { + dkgState := mockstorage.NewEpochRecoveryMyBeaconKey(s.T()) + dkgState.On("RetrieveMyBeaconPrivateKey", s.nextEpochCounter).Return(nil, false, nil).Once() + + // have a safe key for the current epoch + myBeaconKey := unittest.PrivateKeyFixture(crypto.BLSBLS12381) + dkgState.On("RetrieveMyBeaconPrivateKey", s.currentEpochCounter).Return(myBeaconKey, true, nil).Once() + // DKG doesn't contain a public key for our private key. + dkg := mockprotocol.NewDKG(s.T()) + randomPubKey := unittest.PublicKeysFixture(1, crypto.BLSBLS12381)[0] + dkg.On("KeyShare", s.local.NodeID()).Return(randomPubKey, nil).Once() + s.nextEpoch.On("DKG").Return(dkg, nil).Once() + + recovery, err := NewBeaconKeyRecovery(unittest.Logger(), s.local, s.state, dkgState) + require.NoError(s.T(), err) + require.NotNil(s.T(), recovery) + + dkgState.AssertNumberOfCalls(s.T(), "UpsertMyBeaconPrivateKey", 0) + }) +} + +// TestNewBeaconKeyRecovery_RecoverKey tests a scenario: +// - node is in epoch committed phase +// - node doesn't have a safe beacon key for the next epoch +// - node has a safe beacon key for the current epoch +// - node is part of the DKG for the next epoch +// In this case, the implementation should successfully recover the Random Beacon Private key from the current epoch. +func (s *BeaconKeyRecoverySuite) TestNewBeaconKeyRecovery_RecoverKey() { + performTest := func(dkgState *mockstorage.EpochRecoveryMyBeaconKey) { + // have a safe key for the current epoch + myBeaconKey := unittest.PrivateKeyFixture(crypto.BLSBLS12381) + dkgState.On("RetrieveMyBeaconPrivateKey", s.currentEpochCounter).Return(myBeaconKey, true, nil).Once() + // node is part of the DKG for the next epoch + dkg := mockprotocol.NewDKG(s.T()) + dkg.On("KeyShare", s.local.NodeID()).Return(myBeaconKey.PublicKey(), nil).Once() + s.nextEpoch.On("DKG").Return(dkg, nil).Once() + + dkgState.On("UpsertMyBeaconPrivateKey", s.nextEpochCounter, myBeaconKey, s.nextEpochCommit).Return(nil).Once() + + recovery, err := NewBeaconKeyRecovery(unittest.Logger(), s.local, s.state, dkgState) + require.NoError(s.T(), err) + require.NotNil(s.T(), recovery) + + dkgState.AssertNumberOfCalls(s.T(), "UpsertMyBeaconPrivateKey", 1) + } + + s.Run("err-not-found-for-key-next-epoch", func() { + dkgState := mockstorage.NewEpochRecoveryMyBeaconKey(s.T()) + dkgState.On("RetrieveMyBeaconPrivateKey", s.nextEpochCounter).Return(nil, false, storage.ErrNotFound).Once() + performTest(dkgState) + }) + s.Run("key-for-next-epoch-is-not-safe", func() { + dkgState := mockstorage.NewEpochRecoveryMyBeaconKey(s.T()) + dkgState.On("RetrieveMyBeaconPrivateKey", s.nextEpochCounter).Return(nil, false, nil).Once() + performTest(dkgState) + }) +} + +// TestEpochFallbackModeExited confirms successful key recovery +// when the recovery process is triggered by observing a `EpochFallbackModeExited` notification: +// - node starts in epoch fallback phase +// - when creating NewBeaconKeyRecovery we shouldn't attempt to recover the key since the epoch phase is not committed. +// - node leaves EFM and transitions to the epoch committed phase +// - node doesn't have a safe beacon key for the next epoch +// - node has a safe beacon key for the current epoch +// - node is part of the DKG for the next epoch +// In this case, the implementation should successfully recover the Random Beacon Private key from the current epoch. +func (s *BeaconKeyRecoverySuite) TestEpochFallbackModeExited() { + // start in epoch fallback phase + s.currentEpochPhase = flow.EpochPhaseFallback + + // this shouldn't perform any recovery + recovery, err := NewBeaconKeyRecovery(unittest.Logger(), s.local, s.state, s.dkgState) + require.NoError(s.T(), err) + require.NotNil(s.T(), recovery) + s.dkgState.AssertNumberOfCalls(s.T(), "UpsertMyBeaconPrivateKey", 0) + + s.state.On("AtHeight", s.head.Height).Return(s.finalSnapshot, nil).Once() + + // transition to epoch committed phase + s.currentEpochPhase = flow.EpochPhaseCommitted + + // don't have a key for the next epoch + s.dkgState.On("RetrieveMyBeaconPrivateKey", s.nextEpochCounter).Return(nil, false, nil).Once() + + // have a safe key for the current epoch + myBeaconKey := unittest.PrivateKeyFixture(crypto.BLSBLS12381) + s.dkgState.On("RetrieveMyBeaconPrivateKey", s.currentEpochCounter).Return(myBeaconKey, true, nil).Once() + // node is part of the DKG for the next epoch + dkg := mockprotocol.NewDKG(s.T()) + dkg.On("KeyShare", s.local.NodeID()).Return(myBeaconKey.PublicKey(), nil).Once() + s.nextEpoch.On("DKG").Return(dkg, nil).Once() + + s.dkgState.On("UpsertMyBeaconPrivateKey", s.nextEpochCounter, myBeaconKey, s.nextEpochCommit).Return(nil).Once() + + recovery.EpochFallbackModeExited(s.currentEpochCounter, s.head) + s.dkgState.AssertNumberOfCalls(s.T(), "UpsertMyBeaconPrivateKey", 1) +} diff --git a/module/dkg_broker.go b/module/dkg_broker.go index 49ebb0ad051..7bc02e2fb99 100644 --- a/module/dkg_broker.go +++ b/module/dkg_broker.go @@ -1,10 +1,8 @@ -//go:build relic -// +build relic - package module import ( - "github.com/onflow/flow-go/crypto" + "github.com/onflow/crypto" + "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/model/messages" ) diff --git a/module/epochs.go b/module/epochs.go index 6964959e950..ab8d2c59d8e 100644 --- a/module/epochs.go +++ b/module/epochs.go @@ -13,12 +13,15 @@ import ( type ClusterRootQCVoter interface { // Vote handles the full procedure of generating a vote, submitting it to the epoch - // smart contract, and verifying submission. It is safe to run Vote multiple - // times within a single setup phase. + // smart contract, and verifying submission. This logic should run as part of the Epoch + // Setup Phase, i.e. at a time when the next epoch has not yet been committed. Hence, + // this function takes the [protocol.TentativeEpoch] information as input. + // It is safe to run `Vote` multiple times within a single Epoch Setup Phase. + // CAUTION: epoch transition might not happen as described by [protocol.TentativeEpoch]. // Error returns: // - epochs.ClusterQCNoVoteError if we fail to vote for a benign reason // - generic error in case of critical unexpected failure - Vote(context.Context, protocol.Epoch) error + Vote(context.Context, protocol.TentativeEpoch) error } // QCContractClient enables interacting with the cluster QC aggregator smart @@ -46,16 +49,14 @@ type QCContractClient interface { } // EpochLookup enables looking up epochs by view. +// Only Epochs that are fully committed can be retrieved. // CAUTION: EpochLookup should only be used for querying the previous, current, or next epoch. type EpochLookup interface { - // EpochForViewWithFallback returns the counter of the epoch that the input view belongs to. - // If epoch fallback has been triggered, returns the last committed epoch counter - // in perpetuity for any inputs beyond the last committed epoch view range. - // For example, if we trigger epoch fallback during epoch 10, and reach the final - // view of epoch 10 before epoch 11 has finished being setup, this function will - // return 10 even for input views beyond the final view of epoch 10. + // EpochForView returns the counter of the epoch that the input view belongs to. + // Note: The EpochLookup component processes EpochExtended notifications which will + // extend the view range for the latest epoch. // // Returns model.ErrViewForUnknownEpoch if the input does not fall within the range of a known epoch. - EpochForViewWithFallback(view uint64) (epochCounter uint64, err error) + EpochForView(view uint64) (epochCounter uint64, err error) } diff --git a/module/epochs/base_client.go b/module/epochs/base_client.go index eea76558639..a0b845fd19a 100644 --- a/module/epochs/base_client.go +++ b/module/epochs/base_client.go @@ -12,6 +12,7 @@ import ( sdk "github.com/onflow/flow-go-sdk" sdkcrypto "github.com/onflow/flow-go-sdk/crypto" + "github.com/onflow/flow-go/network" "github.com/onflow/flow-go/module" @@ -34,11 +35,10 @@ var ( type BaseClient struct { Log zerolog.Logger // default logger - ContractAddress string // contract address - FlowClient module.SDKClientWrapper // flow access node client + FlowClient module.SDKClientWrapper // flow access node client AccountAddress sdk.Address // account belonging to node interacting with the contract - AccountKeyIndex uint // account key index + AccountKeyIndex uint32 // account key index Signer sdkcrypto.Signer // signer used to sign transactions } @@ -47,14 +47,12 @@ func NewBaseClient( log zerolog.Logger, flowClient module.SDKClientWrapper, accountAddress string, - accountKeyIndex uint, + accountKeyIndex uint32, signer sdkcrypto.Signer, - contractAddress string, ) *BaseClient { return &BaseClient{ Log: log, - ContractAddress: contractAddress, FlowClient: flowClient, AccountKeyIndex: accountKeyIndex, Signer: signer, diff --git a/module/epochs/epoch_config.go b/module/epochs/epoch_config.go index 2fdb317c51f..67de2ae6837 100644 --- a/module/epochs/epoch_config.go +++ b/module/epochs/epoch_config.go @@ -4,7 +4,7 @@ import ( "github.com/onflow/cadence" jsoncdc "github.com/onflow/cadence/encoding/json" - "github.com/onflow/flow-go/crypto" + "github.com/onflow/flow-go/model/encodable" "github.com/onflow/flow-go/model/flow" ) @@ -22,7 +22,7 @@ type EpochConfig struct { RandomSource cadence.String CollectorClusters flow.AssignmentList ClusterQCs []*flow.QuorumCertificate - DKGPubKeys []crypto.PublicKey + DKGPubKeys []encodable.RandomBeaconPubKey } // DefaultEpochConfig returns an EpochConfig with default values used for diff --git a/module/epochs/epoch_lookup.go b/module/epochs/epoch_lookup.go index 195c72159f7..629f755759e 100644 --- a/module/epochs/epoch_lookup.go +++ b/module/epochs/epoch_lookup.go @@ -1,11 +1,10 @@ package epochs import ( + "errors" "fmt" "sync" - "go.uber.org/atomic" - "github.com/onflow/flow-go/consensus/hotstuff/model" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/module" @@ -15,45 +14,96 @@ import ( "github.com/onflow/flow-go/state/protocol/events" ) -// epochRange captures the counter and view range of an epoch (inclusive on both ends) -type epochRange struct { - counter uint64 - firstView uint64 - finalView uint64 +const ( + invalidExtensionFinalView = "sanity check failed: latest epoch final view %d greater than extension final view %d" + mismatchEpochCounter = "sanity check failed: latest epoch counter %d does not match extension epoch counter %d" + invalidEpochViewSequence = "sanity check: first view of the epoch extension %d should immediately start after the final view of the latest epoch %d" +) + +// viewRange captures the counter and view range of an epoch (inclusive on both ends) +// The zero value (epochCounter = firstView = finalView = 0) can conceptually not occur: +// even the genesis epoch '0' is required to have some views to prepare the subsequent epoch. +// Therefore, we use the zero value to represent 'undefined'. +type viewRange struct { + epochCounter uint64 + firstView uint64 + finalView uint64 } -// exists returns true when the epochRange is initialized (anything besides the zero value for the struct). +// undefined returns true when the viewRange is not initialized (equal to the zero value for the struct). // It is useful for checking existence while iterating the epochRangeCache. -func (er epochRange) exists() bool { - return er != epochRange{} +func (er viewRange) undefined() bool { + return er.epochCounter == 0 && er.firstView == 0 && er.finalView == 0 } -// epochRangeCache stores at most the 3 latest epoch ranges. -// Ranges are ordered by counter (ascending) and right-aligned. -// For example, if we only have one epoch cached, `epochRangeCache[0]` and `epochRangeCache[1]` are `nil`. -// Not safe for concurrent use. -type epochRangeCache [3]epochRange - -// latest returns the latest cached epoch range, or nil if no epochs are cached. -func (cache *epochRangeCache) latest() epochRange { - return cache[2] +// epochRangeCache stores at most the 3 latest epoch `viewRange`s. +// viewRange are ordered by counter (ascending) and right-aligned. For example, if we only have one epoch +// cached, `epochRangeCache[0]` and `epochRangeCache[1]` are `undefined`. epochRangeCache enforces the +// following conventions: +// 1. For two adjacent epochs, their view ranges must form a continuous sequence without gaps or overlap. +// Formally: epochRangeCache[i].finalView + 1 = epochRangeCache[i+1].firstView must hold for _all_ cached epochs. +// 2. For any two adjacent epochs, their the view counters must satisfy: +// epochRangeCache[i].epochCounter + 1 = epochRangeCache[i+1].epochCounter +// +// NOT safe for CONCURRENT use. +type epochRangeCache [3]viewRange + +// latest returns the latest cached epoch range. Follows map semantics +// to indicate whether latest is known. If the boolean return value is false, the returned view range +// is the zero value, i.e. undefined. +func (cache *epochRangeCache) latest() (viewRange, bool) { + return cache[2], !cache[2].undefined() } -// combinedRange returns the endpoints of the combined view range of all cached -// epochs. In particular, we return the lowest firstView and the greatest finalView. -// At least one epoch must already be cached, otherwise this function will panic. -func (cache *epochRangeCache) combinedRange() (firstView uint64, finalView uint64) { +// extendLatestEpoch updates the final view of the latest epoch with the final view of the epoch extension. +// No errors are expected during normal operation. +func (cache *epochRangeCache) extendLatestEpoch(epochCounter uint64, extension flow.EpochExtension) error { + latestEpoch := cache[2] + // sanity check: latest epoch should already be cached. + if latestEpoch.undefined() { + return fmt.Errorf("sanity check failed: latest epoch does not exist") + } + + // duplicate events are no-ops + if latestEpoch.finalView == extension.FinalView { + return nil + } + + // sanity check: `extension.FinalView` should be greater than final view of latest epoch + if latestEpoch.finalView > extension.FinalView { + return fmt.Errorf(invalidExtensionFinalView, latestEpoch.finalView, extension.FinalView) + } + + // sanity check: epoch extension should have the same epoch counter as the latest epoch + if latestEpoch.epochCounter != epochCounter { + return fmt.Errorf(mismatchEpochCounter, latestEpoch.epochCounter, epochCounter) + } + + // sanity check: first view of the epoch extension should immediately start after the final view of the latest epoch. + if latestEpoch.finalView+1 != extension.FirstView { + return fmt.Errorf(invalidEpochViewSequence, extension.FirstView, latestEpoch.finalView) + } + + cache[2].finalView = extension.FinalView + return nil +} - // low end of the range is the first view of the first cached epoch - for _, epoch := range cache { - if epoch.exists() { - firstView = epoch.firstView - break +// cachedEpochs returns a slice of the cached epochs in order. The return slice is guaranteed to satisfy: +// 1. For two adjacent epochs, their view ranges form a continuous sequence without gaps or overlap. +// Formally: epochRangeCache[i].finalView + 1 = epochRangeCache[i+1].firstView +// 2. For any two adjacent epochs, their epoch counters increment by one +// epochRangeCache[i].epochCounter + 1 = epochRangeCache[i+1].epochCounter +// 3. All slice elements are different from the zero value (i.e. not undefined). +// +// If no elements are cached, the return slice is empty/nil. It may also contain only a single epoch. +func (cache *epochRangeCache) cachedEpochs() []viewRange { + for i, epoch := range cache { + if epoch.undefined() { + continue } + return cache[i:3] } - // high end of the range is the final view of the latest cached epoch - finalView = cache.latest().finalView - return + return nil } // add inserts an epoch range to the cache. @@ -61,17 +111,15 @@ func (cache *epochRangeCache) combinedRange() (firstView uint64, finalView uint6 // Adding the same epoch multiple times is a no-op. // Guarantees ordering and alignment properties of epochRangeCache are preserved. // No errors are expected during normal operation. -func (cache *epochRangeCache) add(epoch epochRange) error { - - // sanity check: ensure the epoch we are adding is considered a non-zero value - // this helps ensure internal consistency in this component, but if we ever trip this check, something is seriously wrong elsewhere - if !epoch.exists() { +func (cache *epochRangeCache) add(epoch viewRange) error { + // sanity check: ensure the epoch we are adding is non-zero value. This helps ensure internal consistency + // in this component, but if we ever trip this check, something is seriously wrong elsewhere! + if epoch.undefined() { return fmt.Errorf("sanity check failed: caller attempted to cache invalid zero epoch") } - latestCachedEpoch := cache.latest() - // initial case - no epoch ranges are stored yet - if !latestCachedEpoch.exists() { + latestCachedEpoch, exists := cache.latest() + if !exists { // initial case - no epoch ranges are stored yet cache[2] = epoch return nil } @@ -82,8 +130,8 @@ func (cache *epochRangeCache) add(epoch epochRange) error { } // sanity check: ensure counters/views are sequential - if epoch.counter != latestCachedEpoch.counter+1 { - return fmt.Errorf("non-sequential epoch counters: adding epoch %d when latest cached epoch is %d", epoch.counter, latestCachedEpoch.counter) + if epoch.epochCounter != latestCachedEpoch.epochCounter+1 { + return fmt.Errorf("non-sequential epoch counters: adding epoch %d when latest cached epoch is %d", epoch.epochCounter, latestCachedEpoch.epochCounter) } if epoch.firstView != latestCachedEpoch.finalView+1 { return fmt.Errorf("non-sequential epoch view ranges: adding range [%d,%d] when latest cached range is [%d,%d]", @@ -100,14 +148,16 @@ func (cache *epochRangeCache) add(epoch epochRange) error { } // EpochLookup implements the EpochLookup interface using protocol state to match views to epochs. +// Only Epochs that are fully committed can be retrieved. // CAUTION: EpochLookup should only be used for querying the previous, current, or next epoch. type EpochLookup struct { - state protocol.State - mu sync.RWMutex - epochs epochRangeCache - committedEpochsCh chan *flow.Header // protocol events for newly committed epochs (the first block of the epoch is passed over the channel) - epochFallbackIsTriggered *atomic.Bool // true when epoch fallback is triggered - events.Noop // implements protocol.Consumer + state protocol.State + mu sync.RWMutex + epochs epochRangeCache + // epochEvents queues functors for processing epoch-related protocol events. + // Events will be processed in the order they are received (fifo). + epochEvents chan func() error + events.Noop // implements protocol.Consumer component.Component } @@ -117,83 +167,68 @@ var _ module.EpochLookup = (*EpochLookup)(nil) // NewEpochLookup instantiates a new EpochLookup func NewEpochLookup(state protocol.State) (*EpochLookup, error) { lookup := &EpochLookup{ - state: state, - committedEpochsCh: make(chan *flow.Header, 1), - epochFallbackIsTriggered: atomic.NewBool(false), + state: state, + epochEvents: make(chan func() error, 20), } lookup.Component = component.NewComponentManagerBuilder(). AddWorker(lookup.handleProtocolEvents). Build() - final := state.Final() - - // we cache the previous epoch, if one exists - exists, err := protocol.PreviousEpochExists(final) + epochs := state.Final().Epochs() + prev, err := epochs.Previous() if err != nil { - return nil, fmt.Errorf("could not check previous epoch exists: %w", err) - } - if exists { - err := lookup.cacheEpoch(final.Epochs().Previous()) + if !errors.Is(err, protocol.ErrNoPreviousEpoch) { + return nil, irrecoverable.NewExceptionf("unexpected error while retrieving previous epoch: %w", err) + } + // `ErrNoPreviousEpoch` is an expected edge case during normal operations (e.g. we are in first epoch after spork) + // continue without caching the previous epoch + } else { // previous epoch was successfully retrieved + err = lookup.cacheEpoch(prev) if err != nil { return nil, fmt.Errorf("could not prepare previous epoch: %w", err) } } // we always cache the current epoch - err = lookup.cacheEpoch(final.Epochs().Current()) + curr, err := epochs.Current() + if err != nil { + return nil, fmt.Errorf("could not get current epoch: %w", err) + } + err = lookup.cacheEpoch(curr) if err != nil { return nil, fmt.Errorf("could not prepare current epoch: %w", err) } // we cache the next epoch, if it is committed - phase, err := final.Phase() + nextEpoch, err := epochs.NextCommitted() if err != nil { - return nil, fmt.Errorf("could not check epoch phase: %w", err) - } - if phase == flow.EpochPhaseCommitted { - err := lookup.cacheEpoch(final.Epochs().Next()) + if !errors.Is(err, protocol.ErrNextEpochNotCommitted) { + return nil, irrecoverable.NewExceptionf("unexpected error retrieving next epoch: %w", err) + } + // receiving a `ErrNextEpochNotCommitted` is expected during the happy path + } else { // next epoch was successfully retrieved + err = lookup.cacheEpoch(nextEpoch) if err != nil { - return nil, fmt.Errorf("could not prepare previous epoch: %w", err) + return nil, fmt.Errorf("could not cache next committed epoch: %w", err) } } - // if epoch fallback was triggered, note it here - triggered, err := state.Params().EpochFallbackTriggered() - if err != nil { - return nil, fmt.Errorf("could not check epoch fallback: %w", err) - } - if triggered { - lookup.epochFallbackIsTriggered.Store(true) - } - return lookup, nil } // cacheEpoch caches the given epoch's view range. Must only be called with committed epochs. // No errors are expected during normal operation. -func (lookup *EpochLookup) cacheEpoch(epoch protocol.Epoch) error { - counter, err := epoch.Counter() - if err != nil { - return err - } - firstView, err := epoch.FirstView() - if err != nil { - return err - } - finalView, err := epoch.FinalView() - if err != nil { - return err - } - - cachedEpoch := epochRange{ - counter: counter, - firstView: firstView, - finalView: finalView, +func (lookup *EpochLookup) cacheEpoch(epoch protocol.CommittedEpoch) error { + counter := epoch.Counter() + cachedEpoch := viewRange{ + epochCounter: counter, + firstView: epoch.FirstView(), + finalView: epoch.FinalView(), } lookup.mu.Lock() - err = lookup.epochs.add(cachedEpoch) + err := lookup.epochs.add(cachedEpoch) lookup.mu.Unlock() if err != nil { return fmt.Errorf("could not add epoch %d: %w", counter, err) @@ -201,59 +236,57 @@ func (lookup *EpochLookup) cacheEpoch(epoch protocol.Epoch) error { return nil } -// EpochForViewWithFallback returns the counter of the epoch that the input view belongs to. -// If epoch fallback has been triggered, returns the last committed epoch counter -// in perpetuity for any inputs beyond the last committed epoch view range. -// For example, if we trigger epoch fallback during epoch 10, and reach the final -// view of epoch 10 before epoch 11 has finished being setup, this function will -// return 10 even for input views beyond the final view of epoch 10. +// EpochForView returns the counter of the epoch that the input view belongs to. +// Note: The EpochLookup component processes EpochExtended notifications which will +// extend the view range for the latest epoch. // // Returns model.ErrViewForUnknownEpoch if the input does not fall within the range of a known epoch. -func (lookup *EpochLookup) EpochForViewWithFallback(view uint64) (uint64, error) { +func (lookup *EpochLookup) EpochForView(view uint64) (uint64, error) { lookup.mu.RLock() defer lookup.mu.RUnlock() - firstView, finalView := lookup.epochs.combinedRange() + cachedEpochs := lookup.epochs.cachedEpochs() + l := len(cachedEpochs) + if l == 0 { + return 0, model.ErrViewForUnknownEpoch + } + + // by convention, `epochRangeCache` guarantees that epochs are successive, without gaps or overlaps + // in their epoch counters and view ranges. Therefore, we can just chronologically walk through the + // epochs. This is an internal lookup, so we optimize for the happy path and proceed reverse chronologically // LEGEND: // * -> view argument // [----| -> epoch view range - // view is before any known epochs - // ---*---[----|----|----]------- - if view < firstView { - return 0, model.ErrViewForUnknownEpoch - } - // view is after any known epochs + // view is after the known epochs // -------[----|----|----]---*--- - if view > finalView { - // if epoch fallback is triggered, we treat this view as part of the last committed epoch - if lookup.epochFallbackIsTriggered.Load() { - return lookup.epochs.latest().counter, nil - } - // otherwise, we are waiting for the epoch including this view to be committed + if view > cachedEpochs[l-1].finalView { + // The epoch including this view may be close to being committed. However, until an epoch + // is committed, views cannot be conclusively assigned to it. return 0, model.ErrViewForUnknownEpoch } - // view is within a known epoch - for _, epoch := range lookup.epochs { - if !epoch.exists() { - continue - } - if epoch.firstView <= view && view <= epoch.finalView { - return epoch.counter, nil + // -------[----|-*--|----]------- + for i := l - 1; i >= 0; i-- { + if cachedEpochs[i].firstView <= view && view <= cachedEpochs[i].finalView { + return cachedEpochs[i].epochCounter, nil } } + // view is before any known epochs + // ---*---[----|----|----]------- + if view < cachedEpochs[0].firstView { + return 0, model.ErrViewForUnknownEpoch + } // reaching this point indicates a corrupted state or internal bug - return 0, fmt.Errorf("sanity check failed: cached epochs (%v) does not contain input view %d", lookup.epochs, view) + return 0, fmt.Errorf("sanity check failed: input view %d falls within the view range [%d, %d] of the cached epochs epochs, but none contains it", view, cachedEpochs[0].firstView, cachedEpochs[l-1].finalView) } // handleProtocolEvents processes queued Epoch events `EpochCommittedPhaseStarted` -// and `EpochEmergencyFallbackTriggered`. This function permanently utilizes a worker +// and `EpochExtended`. This function permanently utilizes a worker // routine until the `Component` terminates. // When we observe a new epoch being committed, we compute -// the leader selection and cache static info for the epoch. When we observe -// epoch emergency fallback being triggered, we inject a fallback epoch. +// the leader selection and cache static info for the epoch. func (lookup *EpochLookup) handleProtocolEvents(ctx irrecoverable.SignalerContext, ready component.ReadyFunc) { ready() @@ -261,9 +294,8 @@ func (lookup *EpochLookup) handleProtocolEvents(ctx irrecoverable.SignalerContex select { case <-ctx.Done(): return - case block := <-lookup.committedEpochsCh: - epoch := lookup.state.AtBlockID(block.ID()).Epochs().Next() - err := lookup.cacheEpoch(epoch) + case processEventFn := <-lookup.epochEvents: + err := processEventFn() if err != nil { ctx.Throw(err) } @@ -271,12 +303,42 @@ func (lookup *EpochLookup) handleProtocolEvents(ctx irrecoverable.SignalerContex } } -// EpochCommittedPhaseStarted informs the `committee.Consensus` that the block starting the Epoch Committed Phase has been finalized. -func (lookup *EpochLookup) EpochCommittedPhaseStarted(_ uint64, first *flow.Header) { - lookup.committedEpochsCh <- first +// EpochExtended listens to `EpochExtended` protocol notifications which the Protocol +// State emits when we finalize the first block whose Protocol State further extends the current +// epoch. The next epoch should not be committed so far, because epoch extension are only added +// when there is no subsequent epoch that we could transition into but the current epoch is nearing +// its end. The notification is queued for async processing by the worker. +// Specifically, we update the final view of the latest epoch range with the final view of the +// current epoch, which will now be updated because the epoch has extensions. +// We must process _all_ `EpochExtended` notifications. +// No errors are expected to be returned by the process callback during normal operation. +func (lookup *EpochLookup) EpochExtended(epochCounter uint64, _ *flow.Header, extension flow.EpochExtension) { + lookup.epochEvents <- func() error { + err := lookup.epochs.extendLatestEpoch(epochCounter, extension) + if err != nil { + return err + } + + return nil + } } -// EpochEmergencyFallbackTriggered passes the protocol event to the worker thread. -func (lookup *EpochLookup) EpochEmergencyFallbackTriggered() { - lookup.epochFallbackIsTriggered.Store(true) +// EpochCommittedPhaseStarted ingests the respective protocol notifications +// which the Protocol State emits when we finalize the first block whose Protocol State further extends the current +// epoch. The notification is queued for async processing by the worker. Specifically, we cache the next epoch in the EpochLookup. +// We must process _all_ `EpochCommittedPhaseStarted` notifications. +// No errors are expected to be returned by the process callback during normal operation. +func (lookup *EpochLookup) EpochCommittedPhaseStarted(_ uint64, first *flow.Header) { + lookup.epochEvents <- func() error { + epoch, err := lookup.state.AtBlockID(first.ID()).Epochs().NextCommitted() + if err != nil { + return fmt.Errorf("could not get next committed epoch: %w", err) + } + err = lookup.cacheEpoch(epoch) + if err != nil { + return fmt.Errorf("failed to cache next epoch: %w", err) + } + + return nil + } } diff --git a/module/epochs/epoch_lookup_test.go b/module/epochs/epoch_lookup_test.go index 7c278542e43..41e7efc079b 100644 --- a/module/epochs/epoch_lookup_test.go +++ b/module/epochs/epoch_lookup_test.go @@ -2,18 +2,18 @@ package epochs import ( "context" + "fmt" "sync" "testing" "time" "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" + "github.com/stretchr/testify/mock" "github.com/stretchr/testify/suite" "github.com/onflow/flow-go/consensus/hotstuff/model" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/module/irrecoverable" - "github.com/onflow/flow-go/state/protocol" mockprotocol "github.com/onflow/flow-go/state/protocol/mock" "github.com/onflow/flow-go/utils/unittest" "github.com/onflow/flow-go/utils/unittest/mocks" @@ -28,16 +28,15 @@ type EpochLookupSuite struct { snapshot *mockprotocol.Snapshot params *mockprotocol.Params - // backend for mocked functions - mu sync.Mutex // protects access to epochFallbackTriggered and phase - epochFallbackTriggered bool - phase flow.EpochPhase + // protects access to phase and used to invoke funcs with a lock + mu sync.Mutex + phase flow.EpochPhase // config for each epoch currentEpochCounter uint64 - prevEpoch epochRange - currEpoch epochRange - nextEpoch epochRange + prevEpoch viewRange + currEpoch viewRange + nextEpoch viewRange lookup *EpochLookup cancel context.CancelFunc @@ -51,9 +50,9 @@ func (suite *EpochLookupSuite) SetupTest() { suite.currentEpochCounter = uint64(1) suite.phase = flow.EpochPhaseStaking - suite.prevEpoch = epochRange{counter: suite.currentEpochCounter - 1, firstView: 100, finalView: 199} - suite.currEpoch = epochRange{counter: suite.currentEpochCounter, firstView: 200, finalView: 299} - suite.nextEpoch = epochRange{counter: suite.currentEpochCounter + 1, firstView: 300, finalView: 399} + suite.prevEpoch = viewRange{epochCounter: suite.currentEpochCounter - 1, firstView: 100, finalView: 199} + suite.currEpoch = viewRange{epochCounter: suite.currentEpochCounter, firstView: 200, finalView: 299} + suite.nextEpoch = viewRange{epochCounter: suite.currentEpochCounter + 1, firstView: 300, finalView: 399} suite.state = new(mockprotocol.State) suite.snapshot = new(mockprotocol.Snapshot) @@ -61,13 +60,13 @@ func (suite *EpochLookupSuite) SetupTest() { suite.epochQuery = mocks.NewEpochQuery(suite.T(), suite.currentEpochCounter) suite.snapshot.On("Epochs").Return(suite.epochQuery) - suite.snapshot.On("Phase").Return( + suite.snapshot.On("EpochPhase").Return( func() flow.EpochPhase { return suite.Phase() }, func() error { return nil }) - suite.params.On("EpochFallbackTriggered").Return( - func() bool { return suite.EpochFallbackTriggered() }, - func() error { return nil }) + epochProtocolState := mockprotocol.NewEpochProtocolState(suite.T()) + + suite.snapshot.On("EpochProtocolState").Return(epochProtocolState, nil) suite.state.On("Final").Return(suite.snapshot) suite.state.On("Params").Return(suite.params) @@ -87,12 +86,6 @@ func (suite *EpochLookupSuite) WithLock(f func()) { suite.mu.Unlock() } -func (suite *EpochLookupSuite) EpochFallbackTriggered() bool { - suite.mu.Lock() - defer suite.mu.Unlock() - return suite.epochFallbackTriggered -} - func (suite *EpochLookupSuite) Phase() flow.EpochPhase { suite.mu.Lock() defer suite.mu.Unlock() @@ -100,12 +93,12 @@ func (suite *EpochLookupSuite) Phase() flow.EpochPhase { } // CommitEpochs adds the new epochs to the state. -func (suite *EpochLookupSuite) CommitEpochs(epochs ...epochRange) { +func (suite *EpochLookupSuite) CommitEpochs(epochs ...viewRange) { for _, epoch := range epochs { - mockEpoch := newMockEpoch(epoch.counter, epoch.firstView, epoch.finalView) - suite.epochQuery.Add(mockEpoch) + mockEpoch := newMockCommittedEpoch(epoch.epochCounter, epoch.firstView, epoch.finalView) + suite.epochQuery.AddCommitted(mockEpoch) // if we add a next epoch (counter 1 greater than current), then set phase to committed - if epoch.counter == suite.currentEpochCounter+1 { + if epoch.epochCounter == suite.currentEpochCounter+1 { suite.WithLock(func() { suite.phase = flow.EpochPhaseCommitted }) @@ -127,84 +120,167 @@ func (suite *EpochLookupSuite) CreateAndStartEpochLookup() { suite.cancel = cancel } -// TestEpochForViewWithFallback_Curr tests constructing and subsequently querying +// TestEpochForView_Curr tests constructing and subsequently querying // EpochLookup with an initial state of a current epoch. -func (suite *EpochLookupSuite) TestEpochForViewWithFallback_Curr() { - epochs := []epochRange{suite.currEpoch} +func (suite *EpochLookupSuite) TestEpochForView_Curr() { + epochs := []viewRange{suite.currEpoch} suite.CommitEpochs(epochs...) suite.CreateAndStartEpochLookup() - testEpochForViewWithFallback(suite.T(), suite.lookup, suite.state, epochs...) + testEpochForView(suite.T(), suite.lookup, epochs...) } -// TestEpochForViewWithFallback_PrevCurr tests constructing and subsequently querying +// TestEpochForView_PrevCurr tests constructing and subsequently querying // EpochLookup with an initial state of a previous and current epoch. -func (suite *EpochLookupSuite) TestEpochForViewWithFallback_PrevCurr() { - epochs := []epochRange{suite.prevEpoch, suite.currEpoch} +func (suite *EpochLookupSuite) TestEpochForView_PrevCurr() { + epochs := []viewRange{suite.prevEpoch, suite.currEpoch} suite.CommitEpochs(epochs...) suite.CreateAndStartEpochLookup() - testEpochForViewWithFallback(suite.T(), suite.lookup, suite.state, epochs...) + testEpochForView(suite.T(), suite.lookup, epochs...) } -// TestEpochForViewWithFallback_CurrNext tests constructing and subsequently querying +// TestEpochForView_CurrNext tests constructing and subsequently querying // EpochLookup with an initial state of a current and next epoch. -func (suite *EpochLookupSuite) TestEpochForViewWithFallback_CurrNext() { - epochs := []epochRange{suite.currEpoch, suite.nextEpoch} +func (suite *EpochLookupSuite) TestEpochForView_CurrNext() { + epochs := []viewRange{suite.currEpoch, suite.nextEpoch} suite.CommitEpochs(epochs...) suite.CreateAndStartEpochLookup() - testEpochForViewWithFallback(suite.T(), suite.lookup, suite.state, epochs...) + testEpochForView(suite.T(), suite.lookup, epochs...) } -// TestEpochForViewWithFallback_CurrNextPrev tests constructing and subsequently querying +// TestEpochForView_CurrNextPrev tests constructing and subsequently querying // EpochLookup with an initial state of a previous, current, and next epoch. -func (suite *EpochLookupSuite) TestEpochForViewWithFallback_CurrNextPrev() { - epochs := []epochRange{suite.prevEpoch, suite.currEpoch, suite.nextEpoch} +func (suite *EpochLookupSuite) TestEpochForView_CurrNextPrev() { + epochs := []viewRange{suite.prevEpoch, suite.currEpoch, suite.nextEpoch} suite.CommitEpochs(epochs...) suite.CreateAndStartEpochLookup() - testEpochForViewWithFallback(suite.T(), suite.lookup, suite.state, epochs...) + testEpochForView(suite.T(), suite.lookup, epochs...) } -// TestEpochForViewWithFallback_EpochFallbackTriggered tests constructing and subsequently querying -// EpochLookup with an initial state of epoch fallback triggered. -func (suite *EpochLookupSuite) TestEpochForViewWithFallback_EpochFallbackTriggered() { - epochs := []epochRange{suite.prevEpoch, suite.currEpoch, suite.nextEpoch} - suite.WithLock(func() { - suite.epochFallbackTriggered = true - }) - suite.CommitEpochs(epochs...) - suite.CreateAndStartEpochLookup() - testEpochForViewWithFallback(suite.T(), suite.lookup, suite.state, epochs...) -} +// TestProtocolEvents_EpochExtended tests constructing and subsequently querying +// EpochLookup, where we process an EpochExtended event and expect the latest +// epoch final view to be updated with the updated final view of the current epoch +// in the protocol state. +func (suite *EpochLookupSuite) TestProtocolEvents_EpochExtended() { + // previous and current epochs will be committed + epochs := []viewRange{suite.prevEpoch, suite.currEpoch} + suite.CommitEpochs(suite.prevEpoch, suite.currEpoch) -// TestProtocolEvents_EpochFallbackTriggered tests constructing and subsequently querying -// EpochLookup, where there is no epoch fallback at construction time, -// but an epoch fallback happens later via an epoch event. -func (suite *EpochLookupSuite) TestProtocolEvents_EpochFallbackTriggered() { - // initially, only current epoch is committed - suite.CommitEpochs(suite.currEpoch) suite.CreateAndStartEpochLookup() - // trigger epoch fallback - suite.WithLock(func() { - suite.epochFallbackTriggered = true - }) - suite.lookup.EpochEmergencyFallbackTriggered() + extension := flow.EpochExtension{ + FirstView: suite.currEpoch.finalView + 1, + FinalView: suite.currEpoch.finalView + 100, + } + suite.lookup.EpochExtended(suite.currEpoch.epochCounter, nil, extension) // wait for the protocol event to be processed (async) assert.Eventually(suite.T(), func() bool { - _, err := suite.lookup.EpochForViewWithFallback(suite.currEpoch.finalView + 1) + _, err := suite.lookup.EpochForView(extension.FinalView) return err == nil }, 5*time.Second, 50*time.Millisecond) // validate queries are answered correctly - testEpochForViewWithFallback(suite.T(), suite.lookup, suite.state, suite.currEpoch) + suite.currEpoch.finalView = extension.FinalView // expect final view to have been updated from extension + testEpochForView(suite.T(), suite.lookup, epochs...) // should handle multiple deliveries of the protocol event - suite.lookup.EpochEmergencyFallbackTriggered() - suite.lookup.EpochEmergencyFallbackTriggered() - suite.lookup.EpochEmergencyFallbackTriggered() + suite.lookup.EpochExtended(suite.currEpoch.epochCounter, nil, extension) + suite.lookup.EpochExtended(suite.currEpoch.epochCounter, nil, extension) + suite.lookup.EpochExtended(suite.currEpoch.epochCounter, nil, extension) + + assert.Eventually(suite.T(), func() bool { + return len(suite.lookup.epochEvents) == 0 + }, time.Second, time.Millisecond) // validate queries are answered correctly - testEpochForViewWithFallback(suite.T(), suite.lookup, suite.state, suite.currEpoch) + testEpochForView(suite.T(), suite.lookup, epochs...) +} + +// TestProtocolEvents_EpochExtended_SanityChecks ensures all expected sanity checks are checked when processing +// EpochExtended events. +func (suite *EpochLookupSuite) TestProtocolEvents_EpochExtended_SanityChecks() { + initAndStartLookup := func() *irrecoverable.MockSignalerContext { + lookup, err := NewEpochLookup(suite.state) + suite.Require().NoError(err) + ctx, cancel := irrecoverable.NewMockSignalerContextWithCancel(suite.T(), context.Background()) + lookup.Start(ctx) + + suite.lookup = lookup + suite.cancel = cancel + + return ctx + } + + suite.T().Run("sanity check: `extension.FinalView` should be greater than final view of latest epoch", func(t *testing.T) { + // initially, only current epoch is committed + suite.CommitEpochs(suite.prevEpoch, suite.currEpoch) + ctx := initAndStartLookup() + + // create invalid extension with final view in the past + extension := flow.EpochExtension{ + FirstView: suite.currEpoch.finalView + 1, + FinalView: suite.currEpoch.finalView - 100, + } + + ctx.On("Throw", mock.AnythingOfType("*errors.errorString")).Run(func(args mock.Arguments) { + err, ok := args.Get(0).(error) + assert.True(suite.T(), ok) + assert.Contains(suite.T(), err.Error(), fmt.Sprintf(invalidExtensionFinalView, suite.currEpoch.finalView, extension.FinalView)) + }) + + suite.lookup.EpochExtended(suite.currEpoch.epochCounter, nil, extension) + + // wait for the protocol event to be processed (async) + assert.Eventually(suite.T(), func() bool { + return len(suite.lookup.epochEvents) == 0 + }, 2*time.Second, 50*time.Millisecond) + }) + suite.T().Run("sanity check: epoch extension should have the same epoch counter as the latest epoch", func(t *testing.T) { + // initially, only current epoch is committed + suite.CommitEpochs(suite.prevEpoch, suite.currEpoch) + ctx := initAndStartLookup() + + unknownCounter := uint64(100) + ctx.On("Throw", mock.AnythingOfType("*errors.errorString")).Run(func(args mock.Arguments) { + err, ok := args.Get(0).(error) + assert.True(suite.T(), ok) + assert.Contains(suite.T(), err.Error(), fmt.Sprintf(mismatchEpochCounter, suite.currEpoch.epochCounter, unknownCounter)) + }) + + suite.lookup.EpochExtended(unknownCounter, nil, flow.EpochExtension{ + FirstView: suite.currEpoch.finalView + 1, + FinalView: suite.currEpoch.finalView + 100, + }) + + // wait for the protocol event to be processed (async) + assert.Eventually(suite.T(), func() bool { + return len(suite.lookup.epochEvents) == 0 + }, 2*time.Second, 50*time.Millisecond) + }) + suite.T().Run("sanity check: first view of the epoch extension should immediately start after the final view of the latest epoch", func(t *testing.T) { + // initially, only current epoch is committed + suite.CommitEpochs(suite.prevEpoch, suite.currEpoch) + ctx := initAndStartLookup() + + // create invalid extension with final view in the past + extension := flow.EpochExtension{ + FirstView: suite.currEpoch.finalView - 100, + FinalView: suite.currEpoch.finalView + 100, + } + + ctx.On("Throw", mock.AnythingOfType("*errors.errorString")).Run(func(args mock.Arguments) { + err, ok := args.Get(0).(error) + assert.True(suite.T(), ok) + assert.Contains(suite.T(), err.Error(), fmt.Sprintf(invalidEpochViewSequence, extension.FirstView, suite.currEpoch.finalView)) + }) + + suite.lookup.EpochExtended(suite.currEpoch.epochCounter, nil, extension) + + // wait for the protocol event to be processed (async) + assert.Eventually(suite.T(), func() bool { + return len(suite.lookup.epochEvents) == 0 + }, 2*time.Second, 50*time.Millisecond) + }) } // TestProtocolEvents_CommittedEpoch tests correct processing of an `EpochCommittedPhaseStarted` event @@ -221,12 +297,12 @@ func (suite *EpochLookupSuite) TestProtocolEvents_CommittedEpoch() { // wait for the protocol event to be processed (async) assert.Eventually(suite.T(), func() bool { - _, err := suite.lookup.EpochForViewWithFallback(suite.currEpoch.finalView + 1) + _, err := suite.lookup.EpochForView(suite.currEpoch.finalView + 1) return err == nil }, 5*time.Second, 50*time.Millisecond) // validate queries are answered correctly - testEpochForViewWithFallback(suite.T(), suite.lookup, suite.state, suite.currEpoch, suite.nextEpoch) + testEpochForView(suite.T(), suite.lookup, suite.currEpoch, suite.nextEpoch) // should handle multiple deliveries of the protocol event suite.lookup.EpochCommittedPhaseStarted(suite.currentEpochCounter, firstBlockOfCommittedPhase) @@ -234,77 +310,61 @@ func (suite *EpochLookupSuite) TestProtocolEvents_CommittedEpoch() { suite.lookup.EpochCommittedPhaseStarted(suite.currentEpochCounter, firstBlockOfCommittedPhase) // validate queries are answered correctly - testEpochForViewWithFallback(suite.T(), suite.lookup, suite.state, suite.currEpoch, suite.nextEpoch) + testEpochForView(suite.T(), suite.lookup, suite.currEpoch, suite.nextEpoch) } -// testEpochForViewWithFallback accepts a constructed EpochLookup and state, and +// testEpochForView accepts a constructed EpochLookup and state, and // validates correctness by issuing various queries, using the input state and // epochs as source of truth. -func testEpochForViewWithFallback(t *testing.T, lookup *EpochLookup, state protocol.State, epochs ...epochRange) { - epochFallbackTriggered, err := state.Params().EpochFallbackTriggered() - require.NoError(t, err) - - t.Run("should have set epoch fallback triggered correctly", func(t *testing.T) { - assert.Equal(t, epochFallbackTriggered, lookup.epochFallbackIsTriggered.Load()) - }) - +func testEpochForView(t *testing.T, lookup *EpochLookup, epochs ...viewRange) { t.Run("should be able to query within any committed epoch", func(t *testing.T) { for _, epoch := range epochs { t.Run("first view", func(t *testing.T) { - counter, err := lookup.EpochForViewWithFallback(epoch.firstView) + counter, err := lookup.EpochForView(epoch.firstView) assert.NoError(t, err) - assert.Equal(t, epoch.counter, counter) + assert.Equal(t, epoch.epochCounter, counter) }) t.Run("final view", func(t *testing.T) { - counter, err := lookup.EpochForViewWithFallback(epoch.finalView) + counter, err := lookup.EpochForView(epoch.finalView) assert.NoError(t, err) - assert.Equal(t, epoch.counter, counter) + assert.Equal(t, epoch.epochCounter, counter) }) t.Run("random view in range", func(t *testing.T) { - counter, err := lookup.EpochForViewWithFallback(unittest.Uint64InRange(epoch.firstView, epoch.finalView)) + counter, err := lookup.EpochForView(unittest.Uint64InRange(epoch.firstView, epoch.finalView)) assert.NoError(t, err) - assert.Equal(t, epoch.counter, counter) + assert.Equal(t, epoch.epochCounter, counter) }) } }) t.Run("should return ErrViewForUnknownEpoch below earliest epoch", func(t *testing.T) { t.Run("view 0", func(t *testing.T) { - _, err := lookup.EpochForViewWithFallback(0) + _, err := lookup.EpochForView(0) assert.ErrorIs(t, err, model.ErrViewForUnknownEpoch) }) t.Run("boundary of earliest epoch", func(t *testing.T) { - _, err := lookup.EpochForViewWithFallback(epochs[0].firstView - 1) + _, err := lookup.EpochForView(epochs[0].firstView - 1) assert.ErrorIs(t, err, model.ErrViewForUnknownEpoch) }) t.Run("random view below earliest epoch", func(t *testing.T) { - _, err := lookup.EpochForViewWithFallback(unittest.Uint64InRange(0, epochs[0].firstView-1)) + _, err := lookup.EpochForView(unittest.Uint64InRange(0, epochs[0].firstView-1)) assert.ErrorIs(t, err, model.ErrViewForUnknownEpoch) }) }) - // if epoch fallback is triggered, fallback to returning latest epoch counter - // otherwise return ErrViewForUnknownEpoch - if epochFallbackTriggered { - t.Run("should use fallback logic for queries above latest epoch when epoch fallback is triggered", func(t *testing.T) { - counter, err := lookup.EpochForViewWithFallback(epochs[len(epochs)-1].finalView + 1) - assert.NoError(t, err) - // should fallback to returning the counter for the latest epoch - assert.Equal(t, epochs[len(epochs)-1].counter, counter) - }) - } else { - t.Run("should return ErrViewForUnknownEpoch for queries above latest epoch when epoch fallback is not triggered", func(t *testing.T) { - _, err := lookup.EpochForViewWithFallback(epochs[len(epochs)-1].finalView + 1) - assert.ErrorIs(t, err, model.ErrViewForUnknownEpoch) - }) - } + t.Run("should return ErrViewForUnknownEpoch for queries above latest epoch final view", func(t *testing.T) { + latest, exists := lookup.epochs.latest() + assert.True(t, exists) + _, err := lookup.EpochForView(latest.finalView + 1) + assert.ErrorIs(t, err, model.ErrViewForUnknownEpoch) + }) } -// newMockEpoch returns a mock epoch with the given fields set. -func newMockEpoch(counter, firstView, finalView uint64) *mockprotocol.Epoch { - epoch := new(mockprotocol.Epoch) - epoch.On("FirstView").Return(firstView, nil) - epoch.On("FinalView").Return(finalView, nil) - epoch.On("Counter").Return(counter, nil) +// newMockCommittedEpoch returns a mock epoch with the given properties +func newMockCommittedEpoch(counter, firstView, finalView uint64) *mockprotocol.CommittedEpoch { + epoch := new(mockprotocol.CommittedEpoch) + epoch.On("FirstView").Return(firstView) + epoch.On("FinalView").Return(finalView) + epoch.On("Counter").Return(counter) return epoch } diff --git a/module/epochs/machine_account.go b/module/epochs/machine_account.go index 56320367f60..08451e1273e 100644 --- a/module/epochs/machine_account.go +++ b/module/epochs/machine_account.go @@ -4,6 +4,7 @@ import ( "bytes" "context" "fmt" + "strconv" "time" "github.com/onflow/cadence" @@ -13,9 +14,12 @@ import ( sdk "github.com/onflow/flow-go-sdk" client "github.com/onflow/flow-go-sdk/access/grpc" sdkcrypto "github.com/onflow/flow-go-sdk/crypto" - "github.com/onflow/flow-go/engine" + "github.com/onflow/flow-go/model/bootstrap" "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module" + "github.com/onflow/flow-go/module/component" + "github.com/onflow/flow-go/module/irrecoverable" ) var ( @@ -23,6 +27,7 @@ var ( // We will log a warning once for a soft limit, and will log an error // in perpetuity for a hard limit. // Taken from https://www.notion.so/dapperlabs/Machine-Account-f3c293593ea442a39614fcebf705a132 + // TODO update these for FLIP74 defaultSoftMinBalanceLN cadence.UFix64 defaultHardMinBalanceLN cadence.UFix64 @@ -30,6 +35,11 @@ var ( defaultHardMinBalanceSN cadence.UFix64 ) +const ( + recommendedMinBalanceLN = 0.002 + recommendedMinBalanceSN = 0.05 +) + func init() { var err error defaultSoftMinBalanceLN, err = cadence.NewUFix64("0.0025") @@ -48,19 +58,30 @@ func init() { if err != nil { panic(fmt.Errorf("could not convert hard min balance for SN: %w", err)) } + + // sanity checks + if asFloat, err := ufix64Tofloat64(defaultHardMinBalanceLN); err != nil { + panic(err) + } else if asFloat != recommendedMinBalanceLN { + panic(fmt.Errorf("failed sanity check: %f!=%f", asFloat, recommendedMinBalanceLN)) + } + if asFloat, err := ufix64Tofloat64(defaultHardMinBalanceSN); err != nil { + panic(err) + } else if asFloat != recommendedMinBalanceSN { + panic(fmt.Errorf("failed sanity check: %f!=%f", asFloat, recommendedMinBalanceSN)) + } } const ( - checkMachineAccountRetryBase = time.Second * 5 - checkMachineAccountRetryMax = time.Minute * 10 - checkMachineAccountRetryJitterPct = 5 + checkMachineAccountRetryBase = time.Second * 30 + checkMachineAccountRetryMax = time.Minute * 30 + checkMachineAccountRetryJitterPct = 10 ) -// checkMachineAccountRetryBackoff returns the default backoff for checking -// machine account configs. -// * exponential backoff with base of 5s -// * maximum inter-check wait of 10m -// * 5% jitter +// checkMachineAccountRetryBackoff returns the default backoff for checking machine account configs. +// - exponential backoff with base of 30s +// - maximum inter-check wait of 30 +// - 10% jitter func checkMachineAccountRetryBackoff() retry.Backoff { backoff := retry.NewExponential(checkMachineAccountRetryBase) backoff = retry.WithCappedDuration(checkMachineAccountRetryMax, backoff) @@ -100,12 +121,14 @@ type MachineAccountValidatorConfigOption func(*MachineAccountValidatorConfig) // MachineAccountConfigValidator is used to validate that a machine account is // configured correctly. type MachineAccountConfigValidator struct { - unit *engine.Unit - config MachineAccountValidatorConfig - log zerolog.Logger - client *client.Client - role flow.Role - info bootstrap.NodeMachineAccountInfo + config MachineAccountValidatorConfig + metrics module.MachineAccountMetrics + log zerolog.Logger + client *client.Client + role flow.Role + info bootstrap.NodeMachineAccountInfo + + component.Component } func NewMachineAccountConfigValidator( @@ -113,6 +136,7 @@ func NewMachineAccountConfigValidator( flowClient *client.Client, role flow.Role, info bootstrap.NodeMachineAccountInfo, + metrics module.MachineAccountMetrics, opts ...MachineAccountValidatorConfigOption, ) (*MachineAccountConfigValidator, error) { @@ -122,72 +146,101 @@ func NewMachineAccountConfigValidator( } validator := &MachineAccountConfigValidator{ - unit: engine.NewUnit(), - config: conf, - log: log.With().Str("component", "machine_account_config_validator").Logger(), - client: flowClient, - role: role, - info: info, + config: conf, + log: log.With().Str("component", "machine_account_config_validator").Logger(), + client: flowClient, + role: role, + info: info, + metrics: metrics, } - return validator, nil -} -// Ready will launch the validator function in a goroutine. -func (validator *MachineAccountConfigValidator) Ready() <-chan struct{} { - return validator.unit.Ready(func() { - validator.unit.Launch(func() { - validator.validateMachineAccountConfig(validator.unit.Ctx()) - }) - }) -} + // report recommended min balance once at construction + switch role { + case flow.RoleCollection: + validator.metrics.RecommendedMinBalance(recommendedMinBalanceLN) + case flow.RoleConsensus: + validator.metrics.RecommendedMinBalance(recommendedMinBalanceSN) + default: + return nil, fmt.Errorf("invalid role: %s", role) + } -// Done will cancel the context of the unit, which will end the validator -// goroutine, if it is still running. -func (validator *MachineAccountConfigValidator) Done() <-chan struct{} { - return validator.unit.Done() + validator.Component = component.NewComponentManagerBuilder(). + AddWorker(validator.reportMachineAccountConfigWorker). + Build() + + return validator, nil } -// validateMachineAccountConfig checks that the machine account in use by this -// BaseClient object is correctly configured. If the machine account is critically -// mis-configured, or a correct configuration cannot be confirmed, this function -// will perpetually log errors indicating the problem. +// reportMachineAccountConfigWorker is a worker function that periodically checks +// and reports on the health of the node's configured machine account. +// When a misconfiguration or insufficient account balance is detected, the worker +// will report metrics and log specific information about what is wrong. // -// This function should be invoked as a goroutine by using Ready and Done. -func (validator *MachineAccountConfigValidator) validateMachineAccountConfig(ctx context.Context) { - - log := validator.log +// This worker runs perpetually in the background, executing once per 30 minutes +// in the steady state. It will execute more frequently right after startup. +func (validator *MachineAccountConfigValidator) reportMachineAccountConfigWorker(ctx irrecoverable.SignalerContext, ready component.ReadyFunc) { + ready() backoff := checkMachineAccountRetryBackoff() - err := retry.Do(ctx, backoff, func(ctx context.Context) error { - account, err := validator.client.GetAccount(ctx, validator.info.SDKAddress()) - if err != nil { - // we cannot validate a correct configuration - log an error and try again - log.Error(). - Err(err). - Str("machine_account_address", validator.info.Address). - Msg("failed to validate machine account config - could not get machine account") - return retry.RetryableError(err) + for { + select { + case <-ctx.Done(): + return + default: } - err = CheckMachineAccountInfo(log, validator.config, validator.role, validator.info, account) + err := validator.checkAndReportOnMachineAccountConfig(ctx) if err != nil { - // either we cannot validate the configuration or there is a critical - // misconfiguration - log a warning and retry - we will continue checking - // and logging until the problem is resolved - log.Error(). - Err(err). - Msg("critical machine account misconfiguration") - return retry.RetryableError(err) + ctx.Throw(err) } + + next, _ := backoff.Next() + t := time.NewTimer(next) + select { + case <-ctx.Done(): + t.Stop() + return + case <-t.C: + } + } +} + +// checkAndReportOnMachineAccountConfig checks the node's machine account for misconfiguration +// or insufficient balance once. Any discovered issues are logged and reported in metrics. +// No errors are expected during normal operation. +func (validator *MachineAccountConfigValidator) checkAndReportOnMachineAccountConfig(ctx context.Context) error { + + account, err := validator.client.GetAccount(ctx, validator.info.SDKAddress()) + if err != nil { + // we cannot validate a correct configuration - log an error and try again + validator.log.Error(). + Err(err). + Str("machine_account_address", validator.info.Address). + Msg("failed to validate machine account config - could not get machine account") return nil - }) + } + + accountBalance, err := ufix64Tofloat64(cadence.UFix64(account.Balance)) if err != nil { - log.Error().Err(err).Msg("failed to check machine account configuration after retry") - return + return irrecoverable.NewExceptionf("failed to convert account balance (%d): %w", account.Balance, err) } + validator.metrics.AccountBalance(accountBalance) - log.Info().Msg("confirmed valid machine account configuration. machine account config validator exiting...") + err = CheckMachineAccountInfo(validator.log, validator.config, validator.role, validator.info, account) + if err != nil { + // either we cannot validate the configuration or there is a critical + // misconfiguration - log a warning and retry - we will continue checking + // and logging until the problem is resolved + validator.metrics.IsMisconfigured(true) + validator.log.Error(). + Err(err). + Msg("critical machine account misconfiguration") + return nil + } + validator.metrics.IsMisconfigured(false) + + return nil } // CheckMachineAccountInfo checks a node machine account config, logging @@ -254,7 +307,7 @@ func CheckMachineAccountInfo( info.SigningAlgorithm.String(), accountKey.SigAlgo.String()) } - if accountKey.Index != int(info.KeyIndex) { + if accountKey.Index != info.KeyIndex { return fmt.Errorf("machine account key index mismatch between local (%d) and on-chain (%d)", info.KeyIndex, accountKey.Index) @@ -289,3 +342,13 @@ func CheckMachineAccountInfo( return nil } + +// ufix64Tofloat64 converts a cadence.UFix64 type to float64. +// All UFix64 values should be convertible to float64, so no errors are expected. +func ufix64Tofloat64(fix cadence.UFix64) (float64, error) { + f, err := strconv.ParseFloat(fix.String(), 64) + if err != nil { + return 0, err + } + return f, nil +} diff --git a/module/epochs/machine_account_test.go b/module/epochs/machine_account_test.go index f5cb3dd8dad..e80af3e31ce 100644 --- a/module/epochs/machine_account_test.go +++ b/module/epochs/machine_account_test.go @@ -3,13 +3,14 @@ package epochs import ( "testing" - "github.com/onflow/cadence" "github.com/rs/zerolog" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - sdkcrypto "github.com/onflow/flow-go-sdk/crypto" - "github.com/onflow/flow-go/crypto" + "github.com/onflow/cadence" + "github.com/onflow/crypto" + "github.com/onflow/crypto/hash" + "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/utils/unittest" ) @@ -35,20 +36,20 @@ func TestMachineAccountChecking(t *testing.T) { }) t.Run("inconsistent key", func(t *testing.T) { local, remote := unittest.MachineAccountFixture(t) - randomKey := unittest.PrivateKeyFixture(crypto.ECDSAP256, unittest.DefaultSeedFixtureLength) + randomKey := unittest.PrivateKeyFixture(crypto.ECDSAP256) remote.Keys[0].PublicKey = randomKey.PublicKey() err := CheckMachineAccountInfo(zerolog.Nop(), conf, flow.RoleConsensus, local, remote) require.Error(t, err) }) t.Run("inconsistent hash algo", func(t *testing.T) { local, remote := unittest.MachineAccountFixture(t) - remote.Keys[0].HashAlgo = sdkcrypto.SHA2_384 + remote.Keys[0].HashAlgo = hash.SHA2_384 err := CheckMachineAccountInfo(zerolog.Nop(), conf, flow.RoleConsensus, local, remote) require.Error(t, err) }) t.Run("inconsistent sig algo", func(t *testing.T) { local, remote := unittest.MachineAccountFixture(t) - remote.Keys[0].SigAlgo = sdkcrypto.ECDSA_secp256k1 + remote.Keys[0].SigAlgo = crypto.ECDSASecp256k1 err := CheckMachineAccountInfo(zerolog.Nop(), conf, flow.RoleConsensus, local, remote) require.Error(t, err) }) @@ -141,8 +142,8 @@ func TestMachineAccountChecking(t *testing.T) { t.Run("local file deviates from defaults", func(t *testing.T) { t.Run("hash algo", func(t *testing.T) { local, remote := unittest.MachineAccountFixture(t) - local.HashAlgorithm = sdkcrypto.SHA3_384 // non-standard hash algo - remote.Keys[0].HashAlgo = sdkcrypto.SHA3_384 // consistent between local/remote + local.HashAlgorithm = hash.SHA3_384 // non-standard hash algo + remote.Keys[0].HashAlgo = hash.SHA3_384 // consistent between local/remote log, hook := unittest.HookedLogger() err := CheckMachineAccountInfo(log, conf, flow.RoleConsensus, local, remote) @@ -153,12 +154,12 @@ func TestMachineAccountChecking(t *testing.T) { local, remote := unittest.MachineAccountFixture(t) // non-standard sig algo - sk := unittest.PrivateKeyFixture(crypto.ECDSASecp256k1, unittest.DefaultSeedFixtureLength) + sk := unittest.PrivateKeyFixture(crypto.ECDSASecp256k1) local.EncodedPrivateKey = sk.Encode() - local.SigningAlgorithm = sdkcrypto.ECDSA_secp256k1 + local.SigningAlgorithm = crypto.ECDSASecp256k1 // consistent between local/remote remote.Keys[0].PublicKey = sk.PublicKey() - remote.Keys[0].SigAlgo = sdkcrypto.ECDSA_secp256k1 + remote.Keys[0].SigAlgo = crypto.ECDSASecp256k1 log, hook := unittest.HookedLogger() err := CheckMachineAccountInfo(log, conf, flow.RoleConsensus, local, remote) @@ -204,3 +205,32 @@ func TestMachineAccountValidatorBackoff_Overflow(t *testing.T) { lastWait = wait } } + +// TestUfix64Tofloat64 sanity checks the conversion between cadence.UFix64 and float64. +func TestUfix64Tofloat64(t *testing.T) { + for _, testcase := range []struct { + str string + expected float64 + }{{ + str: "1.01", + expected: 1.01, + }, { + str: "0.0001", + expected: 0.0001, + }, { + str: "100000.001", + expected: 100000.001, + }, { + str: "0.0", + expected: 0, + }, { + str: "123456.0", + expected: 123456, + }} { + cdc, err := cadence.NewUFix64(testcase.str) + require.NoError(t, err) + f, err := ufix64Tofloat64(cdc) + require.NoError(t, err) + assert.InDelta(t, testcase.expected, f, 0.0000001) + } +} diff --git a/module/epochs/qc_client.go b/module/epochs/qc_client.go index a1a2b5ec461..4f891361063 100644 --- a/module/epochs/qc_client.go +++ b/module/epochs/qc_client.go @@ -13,6 +13,7 @@ import ( sdk "github.com/onflow/flow-go-sdk" sdkcrypto "github.com/onflow/flow-go-sdk/crypto" + "github.com/onflow/flow-go/network" "github.com/onflow/flow-go/consensus/hotstuff/model" @@ -47,7 +48,7 @@ func NewQCContractClient( flowClientANID flow.Identifier, nodeID flow.Identifier, accountAddress string, - accountKeyIndex uint, + accountKeyIndex uint32, qcContractAddress string, signer sdkcrypto.Signer, ) *QCContractClient { @@ -56,7 +57,7 @@ func NewQCContractClient( Str("component", "qc_contract_client"). Str("flow_client_an_id", flowClientANID.String()). Logger() - base := NewBaseClient(log, flowClient, accountAddress, accountKeyIndex, signer, qcContractAddress) + base := NewBaseClient(log, flowClient, accountAddress, accountKeyIndex, signer) // set QCContractAddress to the contract address given env := templates.Environment{QuorumCertificateAddress: qcContractAddress} @@ -104,9 +105,9 @@ func (c *QCContractClient) SubmitVote(ctx context.Context, vote *model.Vote) err seqNumber := account.Keys[int(c.AccountKeyIndex)].SequenceNumber tx := sdk.NewTransaction(). SetScript(templates.GenerateSubmitVoteScript(c.env)). - SetGasLimit(9999). + SetComputeLimit(9999). SetReferenceBlockID(latestBlock.ID). - SetProposalKey(account.Address, int(c.AccountKeyIndex), seqNumber). + SetProposalKey(account.Address, c.AccountKeyIndex, seqNumber). SetPayer(account.Address). AddAuthorizer(account.Address) @@ -132,7 +133,7 @@ func (c *QCContractClient) SubmitVote(ctx context.Context, vote *model.Vote) err } // sign envelope using account signer - err = tx.SignEnvelope(account.Address, int(c.AccountKeyIndex), c.Signer) + err = tx.SignEnvelope(account.Address, c.AccountKeyIndex, c.Signer) if err != nil { return fmt.Errorf("could not sign transaction: %w", err) } diff --git a/module/epochs/qc_voter.go b/module/epochs/qc_voter.go index 9213e2165d1..82392c7f861 100644 --- a/module/epochs/qc_voter.go +++ b/module/epochs/qc_voter.go @@ -58,7 +58,6 @@ func NewRootQCVoter( state protocol.State, contractClients []module.QCContractClient, ) *RootQCVoter { - voter := &RootQCVoter{ log: log.With().Str("module", "root_qc_voter").Logger(), me: me, @@ -71,19 +70,18 @@ func NewRootQCVoter( return voter } -// Vote handles the full procedure of generating a vote, submitting it to the -// epoch smart contract, and verifying submission. -// It is safe to run multiple times within a single setup phase. +// Vote handles the full procedure of generating a vote, submitting it to the epoch +// smart contract, and verifying submission. This logic should run as part of the Epoch +// Setup Phase, i.e. at a time when the next epoch has not yet been committed. Hence, +// this function takes the [protocol.TentativeEpoch] information as input. +// It is safe to run `Vote` multiple times within a single Epoch Setup Phase. +// CAUTION: epoch transition might not happen as described by [protocol.TentativeEpoch]. // // Error returns: -// - ErrWontVote if we fail to vote for a benign reason +// - epochs.ClusterQCNoVoteError if we fail to vote for a benign reason // - generic error in case of critical unexpected failure -func (voter *RootQCVoter) Vote(ctx context.Context, epoch protocol.Epoch) error { - - counter, err := epoch.Counter() - if err != nil { - return fmt.Errorf("could not get epoch counter: %w", err) - } +func (voter *RootQCVoter) Vote(ctx context.Context, epoch protocol.TentativeEpoch) error { + counter := epoch.Counter() clusters, err := epoch.Clustering() if err != nil { return fmt.Errorf("could not get clustering: %w", err) @@ -101,9 +99,12 @@ func (voter *RootQCVoter) Vote(ctx context.Context, epoch protocol.Epoch) error log.Info().Msg("preparing to generate vote for cluster root qc") // create the canonical root block for our cluster - root := clusterstate.CanonicalRootBlock(counter, cluster) + root, err := clusterstate.CanonicalRootBlock(counter, cluster) + if err != nil { + return fmt.Errorf("could not create cluster root block: %w", err) + } // create a signable hotstuff model - signable := hotmodel.GenesisBlockFromFlow(root.Header) + signable := hotmodel.GenesisBlockFromFlow(root.ToHeader()) vote, err := voter.signer.CreateVote(signable) if err != nil { @@ -118,15 +119,15 @@ func (voter *RootQCVoter) Vote(ctx context.Context, epoch protocol.Epoch) error clientIndex, qcContractClient := voter.getInitialContractClient() onMaxConsecutiveRetries := func(totalAttempts int) { - voter.updateContractClient(clientIndex) + clientIndex, qcContractClient = voter.updateContractClient(clientIndex) log.Warn().Msgf("retrying on attempt (%d) with fallback access node at index (%d)", totalAttempts, clientIndex) } backoff = retrymiddleware.AfterConsecutiveFailures(retryMaxConsecutiveFailures, backoff, onMaxConsecutiveRetries) - err = retry.Do(ctx, backoff, func(ctx context.Context) error { + castVote := func(ctx context.Context) error { // check that we're still in the setup phase, if we're not we can't // submit a vote anyway and must exit this process - phase, err := voter.state.Final().Phase() + phase, err := voter.state.Final().EpochPhase() if err != nil { return fmt.Errorf("unexpected error - unable to get current epoch phase: %w", err) } else if phase != flow.EpochPhaseSetup { @@ -170,8 +171,9 @@ func (voter *RootQCVoter) Vote(ctx context.Context, epoch protocol.Epoch) error // update our last successful client index for future calls voter.updateLastSuccessfulClient(clientIndex) return nil - }) - if network.IsTransientError(err) || errors.Is(err, errTransactionReverted) || errors.Is(err, errTransactionReverted) { + } + err = retry.Do(ctx, backoff, castVote) + if network.IsTransientError(err) || errors.Is(err, errTransactionReverted) || errors.Is(err, context.Canceled) { return NewClusterQCNoVoteErrorf("exceeded retry limit without successfully submitting our vote: %w", err) } return err diff --git a/module/epochs/qc_voter_test.go b/module/epochs/qc_voter_test.go index 71a2fdd3b97..be84e639a0b 100644 --- a/module/epochs/qc_voter_test.go +++ b/module/epochs/qc_voter_test.go @@ -5,6 +5,7 @@ import ( "io" "math/rand" "testing" + "time" "github.com/rs/zerolog" "github.com/stretchr/testify/mock" @@ -16,6 +17,7 @@ import ( flowmodule "github.com/onflow/flow-go/module" "github.com/onflow/flow-go/module/epochs" module "github.com/onflow/flow-go/module/mock" + "github.com/onflow/flow-go/network" protocol "github.com/onflow/flow-go/state/protocol/mock" "github.com/onflow/flow-go/utils/unittest" ) @@ -32,7 +34,7 @@ type Suite struct { state *protocol.State snap *protocol.Snapshot - epoch *protocol.Epoch + epoch *protocol.TentativeEpoch counter uint64 phase flow.EpochPhase nodes flow.IdentityList @@ -60,23 +62,23 @@ func (suite *Suite) SetupTest() { suite.snap = new(protocol.Snapshot) suite.state.On("Final").Return(suite.snap) suite.phase = flow.EpochPhaseSetup - suite.snap.On("Phase").Return( + suite.snap.On("EpochPhase").Return( func() flow.EpochPhase { return suite.phase }, func() error { return nil }, ) - suite.epoch = new(protocol.Epoch) + suite.epoch = new(protocol.TentativeEpoch) suite.counter = rand.Uint64() suite.nodes = unittest.IdentityListFixture(4, unittest.WithRole(flow.RoleCollection)) - suite.me = suite.nodes.Sample(1)[0] + suite.me = suite.nodes[rand.Intn(len(suite.nodes))] suite.local.On("NodeID").Return(func() flow.Identifier { return suite.me.NodeID }) var err error - assignments := unittest.ClusterAssignment(2, suite.nodes) - suite.clustering, err = factory.NewClusterList(assignments, suite.nodes) + assignments := unittest.ClusterAssignment(2, suite.nodes.ToSkeleton()) + suite.clustering, err = factory.NewClusterList(assignments, suite.nodes.ToSkeleton()) suite.Require().NoError(err) suite.epoch.On("Counter").Return(suite.counter, nil) @@ -90,7 +92,7 @@ func TestRootQCVoter(t *testing.T) { suite.Run(t, new(Suite)) } -// should fail if this node isn't in any cluster next epoch +// TestNonClusterParticipant should fail if this node isn't in any cluster next epoch. func (suite *Suite) TestNonClusterParticipant() { // change our identity so we aren't in the cluster assignment @@ -100,7 +102,7 @@ func (suite *Suite) TestNonClusterParticipant() { suite.Assert().True(epochs.IsClusterQCNoVoteError(err)) } -// should fail if we are not in setup phase +// TestInvalidPhase should fail if we are not in setup phase. func (suite *Suite) TestInvalidPhase() { suite.phase = flow.EpochPhaseStaking @@ -109,7 +111,7 @@ func (suite *Suite) TestInvalidPhase() { suite.Assert().True(epochs.IsClusterQCNoVoteError(err)) } -// should succeed and exit if we've already voted +// TestAlreadyVoted should succeed and exit if we've already voted. func (suite *Suite) TestAlreadyVoted() { suite.voted = true @@ -117,8 +119,35 @@ func (suite *Suite) TestAlreadyVoted() { suite.Assert().NoError(err) } -// should succeed and exit if voting succeeds +// TestVoting should succeed and exit if voting succeeds. func (suite *Suite) TestVoting() { err := suite.voter.Vote(context.Background(), suite.epoch) suite.Assert().NoError(err) } + +// TestCancelVoting verifies correct behaviour when the context injected into the `Vote` method is cancelled. +// The `RootQCVoter` should abort voting and quickly return an `ClusterQCNoVoteError`. +func (suite *Suite) TestCancelVoting() { + // We emulate the case, where the `QCContractClient` always returns an expected sentinel error, indicating + // that it could not interact with the system smart contract. To create a realistic test scenario, we cancel + // the context injected into the voter, when it is trying to submit a vote for the first time. + // The returned transient error will cause a retry, during which the retry logic will observe the context + // has been cancelled and exit with the context error. + ctxWithCancel, cancel := context.WithCancel(context.Background()) + suite.client.On("SubmitVote", mock.Anything, mock.Anything).Unset() + suite.client.On("SubmitVote", mock.Anything, mock.Anything). + Run(func(args mock.Arguments) { cancel() }). + Return(network.NewTransientErrorf("failed to submit transaction")) + + // The `Vote` method blocks. To avoid this test hanging in case of a bug, we call vote in a separate go routine + voteReturned := make(chan struct{}) + go func() { + err := suite.voter.Vote(ctxWithCancel, suite.epoch) + suite.Assert().Error(err, "when canceling voting process, Vote method should return with an error") + suite.Assert().ErrorIs(err, context.Canceled, "`context.Canceled` should be in the error trace") + suite.Assert().True(epochs.IsClusterQCNoVoteError(err), "got error of unexpected type") + close(voteReturned) + }() + + unittest.AssertClosesBefore(suite.T(), voteReturned, time.Second, "call of `Vote` method has not returned within the test's timeout") +} diff --git a/module/errors.go b/module/errors.go new file mode 100644 index 00000000000..5d91dafa8f6 --- /dev/null +++ b/module/errors.go @@ -0,0 +1,54 @@ +package module + +import ( + "errors" + "fmt" +) + +// UnknownBlockError indicates that a referenced block is missing +type UnknownBlockError struct { + err error +} + +func NewUnknownBlockError(msg string, args ...interface{}) error { + return UnknownBlockError{ + err: fmt.Errorf(msg, args...), + } +} + +func (e UnknownBlockError) Unwrap() error { + return e.err +} + +func (e UnknownBlockError) Error() string { + return e.err.Error() +} + +func IsUnknownBlockError(err error) bool { + var unknownExecutedBlockError UnknownBlockError + return errors.As(err, &unknownExecutedBlockError) +} + +// UnknownResultError indicates that a referenced result is missing +type UnknownResultError struct { + err error +} + +func NewUnknownResultError(msg string, args ...interface{}) error { + return UnknownResultError{ + err: fmt.Errorf(msg, args...), + } +} + +func (e UnknownResultError) Unwrap() error { + return e.err +} + +func (e UnknownResultError) Error() string { + return e.err.Error() +} + +func IsUnknownResultError(err error) bool { + var unknownParentResultError UnknownResultError + return errors.As(err, &unknownParentResultError) +} diff --git a/module/execution/mock/script_executor.go b/module/execution/mock/script_executor.go new file mode 100644 index 00000000000..b33420f17f1 --- /dev/null +++ b/module/execution/mock/script_executor.go @@ -0,0 +1,206 @@ +// Code generated by mockery. DO NOT EDIT. + +package mock + +import ( + context "context" + + flow "github.com/onflow/flow-go/model/flow" + + mock "github.com/stretchr/testify/mock" +) + +// ScriptExecutor is an autogenerated mock type for the ScriptExecutor type +type ScriptExecutor struct { + mock.Mock +} + +// ExecuteAtBlockHeight provides a mock function with given fields: ctx, script, arguments, height +func (_m *ScriptExecutor) ExecuteAtBlockHeight(ctx context.Context, script []byte, arguments [][]byte, height uint64) ([]byte, error) { + ret := _m.Called(ctx, script, arguments, height) + + if len(ret) == 0 { + panic("no return value specified for ExecuteAtBlockHeight") + } + + var r0 []byte + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, []byte, [][]byte, uint64) ([]byte, error)); ok { + return rf(ctx, script, arguments, height) + } + if rf, ok := ret.Get(0).(func(context.Context, []byte, [][]byte, uint64) []byte); ok { + r0 = rf(ctx, script, arguments, height) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]byte) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, []byte, [][]byte, uint64) error); ok { + r1 = rf(ctx, script, arguments, height) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetAccountAtBlockHeight provides a mock function with given fields: ctx, address, height +func (_m *ScriptExecutor) GetAccountAtBlockHeight(ctx context.Context, address flow.Address, height uint64) (*flow.Account, error) { + ret := _m.Called(ctx, address, height) + + if len(ret) == 0 { + panic("no return value specified for GetAccountAtBlockHeight") + } + + var r0 *flow.Account + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, flow.Address, uint64) (*flow.Account, error)); ok { + return rf(ctx, address, height) + } + if rf, ok := ret.Get(0).(func(context.Context, flow.Address, uint64) *flow.Account); ok { + r0 = rf(ctx, address, height) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*flow.Account) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, flow.Address, uint64) error); ok { + r1 = rf(ctx, address, height) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetAccountAvailableBalance provides a mock function with given fields: ctx, address, height +func (_m *ScriptExecutor) GetAccountAvailableBalance(ctx context.Context, address flow.Address, height uint64) (uint64, error) { + ret := _m.Called(ctx, address, height) + + if len(ret) == 0 { + panic("no return value specified for GetAccountAvailableBalance") + } + + var r0 uint64 + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, flow.Address, uint64) (uint64, error)); ok { + return rf(ctx, address, height) + } + if rf, ok := ret.Get(0).(func(context.Context, flow.Address, uint64) uint64); ok { + r0 = rf(ctx, address, height) + } else { + r0 = ret.Get(0).(uint64) + } + + if rf, ok := ret.Get(1).(func(context.Context, flow.Address, uint64) error); ok { + r1 = rf(ctx, address, height) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetAccountBalance provides a mock function with given fields: ctx, address, height +func (_m *ScriptExecutor) GetAccountBalance(ctx context.Context, address flow.Address, height uint64) (uint64, error) { + ret := _m.Called(ctx, address, height) + + if len(ret) == 0 { + panic("no return value specified for GetAccountBalance") + } + + var r0 uint64 + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, flow.Address, uint64) (uint64, error)); ok { + return rf(ctx, address, height) + } + if rf, ok := ret.Get(0).(func(context.Context, flow.Address, uint64) uint64); ok { + r0 = rf(ctx, address, height) + } else { + r0 = ret.Get(0).(uint64) + } + + if rf, ok := ret.Get(1).(func(context.Context, flow.Address, uint64) error); ok { + r1 = rf(ctx, address, height) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetAccountKey provides a mock function with given fields: ctx, address, keyIndex, height +func (_m *ScriptExecutor) GetAccountKey(ctx context.Context, address flow.Address, keyIndex uint32, height uint64) (*flow.AccountPublicKey, error) { + ret := _m.Called(ctx, address, keyIndex, height) + + if len(ret) == 0 { + panic("no return value specified for GetAccountKey") + } + + var r0 *flow.AccountPublicKey + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, flow.Address, uint32, uint64) (*flow.AccountPublicKey, error)); ok { + return rf(ctx, address, keyIndex, height) + } + if rf, ok := ret.Get(0).(func(context.Context, flow.Address, uint32, uint64) *flow.AccountPublicKey); ok { + r0 = rf(ctx, address, keyIndex, height) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*flow.AccountPublicKey) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, flow.Address, uint32, uint64) error); ok { + r1 = rf(ctx, address, keyIndex, height) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetAccountKeys provides a mock function with given fields: ctx, address, height +func (_m *ScriptExecutor) GetAccountKeys(ctx context.Context, address flow.Address, height uint64) ([]flow.AccountPublicKey, error) { + ret := _m.Called(ctx, address, height) + + if len(ret) == 0 { + panic("no return value specified for GetAccountKeys") + } + + var r0 []flow.AccountPublicKey + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, flow.Address, uint64) ([]flow.AccountPublicKey, error)); ok { + return rf(ctx, address, height) + } + if rf, ok := ret.Get(0).(func(context.Context, flow.Address, uint64) []flow.AccountPublicKey); ok { + r0 = rf(ctx, address, height) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]flow.AccountPublicKey) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, flow.Address, uint64) error); ok { + r1 = rf(ctx, address, height) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// NewScriptExecutor creates a new instance of ScriptExecutor. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewScriptExecutor(t interface { + mock.TestingT + Cleanup(func()) +}) *ScriptExecutor { + mock := &ScriptExecutor{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/module/execution/registers_async.go b/module/execution/registers_async.go new file mode 100644 index 00000000000..13f507bb6c9 --- /dev/null +++ b/module/execution/registers_async.go @@ -0,0 +1,68 @@ +package execution + +import ( + "fmt" + + "go.uber.org/atomic" + + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module/state_synchronization/indexer" + "github.com/onflow/flow-go/storage" +) + +// RegistersAsyncStore wraps an underlying register store so it can be used before the index is +// initialized. +type RegistersAsyncStore struct { + registerIndex *atomic.Pointer[storage.RegisterIndex] +} + +func NewRegistersAsyncStore() *RegistersAsyncStore { + return &RegistersAsyncStore{ + registerIndex: atomic.NewPointer[storage.RegisterIndex](nil), + } +} + +// Initialize initializes the underlying storage.RegisterIndex +// This method can be called at any time after the RegisterStore object is created. and before RegisterValues is called +// since we can't disambiguate between the underlying store before bootstrapping or just simply being behind sync +func (r *RegistersAsyncStore) Initialize(registers storage.RegisterIndex) error { + if r.registerIndex.CompareAndSwap(nil, ®isters) { + return nil + } + return fmt.Errorf("registers already initialized") +} + +// RegisterValues gets the register values from the underlying storage.RegisterIndex +// Expected errors: +// - indexer.ErrIndexNotInitialized if the store is still bootstrapping +// - storage.ErrHeightNotIndexed if the values at the height is not indexed yet +// - storage.ErrNotFound if the register does not exist at the height +func (r *RegistersAsyncStore) RegisterValues(ids flow.RegisterIDs, height uint64) ([]flow.RegisterValue, error) { + registerStore, err := r.getRegisterStore() + if err != nil { + return nil, err + } + + if height > registerStore.LatestHeight() || height < registerStore.FirstHeight() { + return nil, storage.ErrHeightNotIndexed + } + + result := make([]flow.RegisterValue, len(ids)) + for i, regID := range ids { + val, err := registerStore.Get(regID, height) + if err != nil { + return nil, fmt.Errorf("failed to get register value for id %d: %w", i, err) + } + result[i] = val + } + return result, nil +} + +func (r *RegistersAsyncStore) getRegisterStore() (storage.RegisterIndex, error) { + registerStore := r.registerIndex.Load() + if registerStore == nil { + return nil, indexer.ErrIndexNotInitialized + } + + return *registerStore, nil +} diff --git a/module/execution/registers_async_test.go b/module/execution/registers_async_test.go new file mode 100644 index 00000000000..f4b2cf783d3 --- /dev/null +++ b/module/execution/registers_async_test.go @@ -0,0 +1,87 @@ +package execution + +import ( + "testing" + + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module/state_synchronization/indexer" + "github.com/onflow/flow-go/storage" + storagemock "github.com/onflow/flow-go/storage/mock" + "github.com/onflow/flow-go/utils/unittest" +) + +func TestInitialize(t *testing.T) { + rootBlockHeight := uint64(1) + // test data available on init + registerID := unittest.RegisterIDFixture() + invalidRegisterID := flow.RegisterID{ + Owner: "ha", + Key: "ha", + } + registerValue1 := []byte("response1") + registerValue2 := []byte("response2") + firstHeight := rootBlockHeight + latestHeight := rootBlockHeight + 1 + + t.Parallel() + + t.Run("registersDB bootstrapped correct values returned", func(t *testing.T) { + registersAsync := NewRegistersAsyncStore() + registers := storagemock.NewRegisterIndex(t) + registers.On("Get", registerID, firstHeight).Return(registerValue1, nil) + registers.On("Get", registerID, latestHeight).Return(registerValue2, nil) + registers.On("FirstHeight").Return(firstHeight) + registers.On("LatestHeight").Return(latestHeight) + + require.NoError(t, registersAsync.Initialize(registers)) + val1, err := registersAsync.RegisterValues([]flow.RegisterID{registerID}, firstHeight) + require.NoError(t, err) + require.Equal(t, val1[0], registerValue1) + + val2, err := registersAsync.RegisterValues([]flow.RegisterID{registerID}, latestHeight) + require.NoError(t, err) + require.Equal(t, val2[0], registerValue2) + }) + + t.Run("out of bounds height correct error returned", func(t *testing.T) { + registersAsync := NewRegistersAsyncStore() + registers := storagemock.NewRegisterIndex(t) + registers.On("LatestHeight").Return(latestHeight) + + require.NoError(t, registersAsync.Initialize(registers)) + _, err := registersAsync.RegisterValues([]flow.RegisterID{registerID}, latestHeight+1) + require.ErrorIs(t, err, storage.ErrHeightNotIndexed) + }) + + t.Run("no register value available correct error returned", func(t *testing.T) { + registersAsync := NewRegistersAsyncStore() + registers := storagemock.NewRegisterIndex(t) + registers.On("Get", invalidRegisterID, latestHeight).Return(nil, storage.ErrNotFound) + registers.On("FirstHeight").Return(firstHeight) + registers.On("LatestHeight").Return(latestHeight) + + require.NoError(t, registersAsync.Initialize(registers)) + _, err := registersAsync.RegisterValues([]flow.RegisterID{invalidRegisterID}, latestHeight) + require.ErrorIs(t, err, storage.ErrNotFound) + }) +} + +func TestRegisterValuesDataUnAvailable(t *testing.T) { + rootBlockHeight := uint64(1) + registersAsync := NewRegistersAsyncStore() + // registerDB not bootstrapped, correct error returned + registerID := unittest.RegisterIDFixture() + _, err := registersAsync.RegisterValues([]flow.RegisterID{registerID}, rootBlockHeight) + require.ErrorIs(t, err, indexer.ErrIndexNotInitialized) +} + +func TestInitDataRepeatedCalls(t *testing.T) { + registersAsync := NewRegistersAsyncStore() + registers1 := storagemock.NewRegisterIndex(t) + registers2 := storagemock.NewRegisterIndex(t) + + require.NoError(t, registersAsync.Initialize(registers1)) + require.Error(t, registersAsync.Initialize(registers2)) +} diff --git a/module/execution/scripts.go b/module/execution/scripts.go new file mode 100644 index 00000000000..50a3b93c7da --- /dev/null +++ b/module/execution/scripts.go @@ -0,0 +1,218 @@ +package execution + +import ( + "context" + + "github.com/rs/zerolog" + + "github.com/onflow/flow-go/engine/execution/computation" + "github.com/onflow/flow-go/engine/execution/computation/query" + "github.com/onflow/flow-go/fvm" + "github.com/onflow/flow-go/fvm/environment" + "github.com/onflow/flow-go/fvm/storage/derived" + "github.com/onflow/flow-go/fvm/storage/snapshot" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module" + "github.com/onflow/flow-go/state/protocol" + "github.com/onflow/flow-go/storage" +) + +// RegisterAtHeight returns register value for provided register ID at the block height. +// Even if the register wasn't indexed at the provided height, returns the highest height the register was indexed at. +// If the register with the ID was not indexed at all return nil value and no error. +// Expected errors: +// - storage.ErrHeightNotIndexed if the given height was not indexed yet or lower than the first indexed height. +type RegisterAtHeight func(ID flow.RegisterID, height uint64) (flow.RegisterValue, error) + +type ScriptExecutor interface { + // ExecuteAtBlockHeight executes provided script against the block height. + // A result value is returned encoded as byte array. An error will be returned if script + // doesn't successfully execute. + // Expected errors: + // - storage.ErrNotFound if block or register value at height was not found. + // - storage.ErrHeightNotIndexed if the data for the block height is not available + ExecuteAtBlockHeight( + ctx context.Context, + script []byte, + arguments [][]byte, + height uint64, + ) ([]byte, error) + + // GetAccountAtBlockHeight returns a Flow account by the provided address and block height. + // Expected errors: + // - storage.ErrHeightNotIndexed if the data for the block height is not available + GetAccountAtBlockHeight(ctx context.Context, address flow.Address, height uint64) (*flow.Account, error) + + // GetAccountBalance returns a Flow account balance by the provided address and block height. + // Expected errors: + // - storage.ErrHeightNotIndexed if the data for the block height is not available + GetAccountBalance(ctx context.Context, address flow.Address, height uint64) (uint64, error) + + // GetAccountAvailableBalance returns a Flow account available balance by the provided address and block height. + // Expected errors: + // - storage.ErrHeightNotIndexed if the data for the block height is not available + GetAccountAvailableBalance(ctx context.Context, address flow.Address, height uint64) (uint64, error) + + // GetAccountKeys returns a Flow account public keys by the provided address and block height. + // Expected errors: + // - storage.ErrHeightNotIndexed if the data for the block height is not available + GetAccountKeys(ctx context.Context, address flow.Address, height uint64) ([]flow.AccountPublicKey, error) + + // GetAccountKey returns a Flow account public key by the provided address, block height and index. + // Expected errors: + // - storage.ErrHeightNotIndexed if the data for the block height is not available + GetAccountKey(ctx context.Context, address flow.Address, keyIndex uint32, height uint64) (*flow.AccountPublicKey, error) +} + +var _ ScriptExecutor = (*Scripts)(nil) + +type Scripts struct { + executor *query.QueryExecutor + headers storage.Headers + registerAtHeight RegisterAtHeight +} + +func NewScripts( + log zerolog.Logger, + metrics module.ExecutionMetrics, + chainID flow.ChainID, + protocolSnapshotProvider protocol.SnapshotExecutionSubsetProvider, + header storage.Headers, + registerAtHeight RegisterAtHeight, + queryConf query.QueryConfig, + derivedChainData *derived.DerivedChainData, + enableProgramCacheWrites bool, +) *Scripts { + vm := fvm.NewVirtualMachine() + + options := computation.DefaultFVMOptions( + chainID, + false, + true, + ) + blocks := environment.NewBlockFinder(header) + options = append(options, fvm.WithBlocks(blocks)) // add blocks for getBlocks calls in scripts + options = append(options, fvm.WithMetricsReporter(metrics)) + options = append(options, fvm.WithAllowProgramCacheWritesInScriptsEnabled(enableProgramCacheWrites)) + vmCtx := fvm.NewContext(options...) + + queryExecutor := query.NewQueryExecutor( + queryConf, + log, + metrics, + vm, + vmCtx, + derivedChainData, + protocolSnapshotProvider, + ) + + return &Scripts{ + executor: queryExecutor, + headers: header, + registerAtHeight: registerAtHeight, + } +} + +// ExecuteAtBlockHeight executes provided script against the block height. +// A result value is returned encoded as byte array. An error will be returned if script +// doesn't successfully execute. +// Expected errors: +// - Script execution related errors +// - storage.ErrHeightNotIndexed if the data for the block height is not available +func (s *Scripts) ExecuteAtBlockHeight( + ctx context.Context, + script []byte, + arguments [][]byte, + height uint64, +) ([]byte, error) { + + snap, header, err := s.snapshotWithBlock(height) + if err != nil { + return nil, err + } + + value, compUsage, err := s.executor.ExecuteScript(ctx, script, arguments, header, snap) + // TODO: return compUsage when upstream can handle it + _ = compUsage + return value, err +} + +// GetAccountAtBlockHeight returns a Flow account by the provided address and block height. +// Expected errors: +// - Script execution related errors +// - storage.ErrHeightNotIndexed if the data for the block height is not available +func (s *Scripts) GetAccountAtBlockHeight(ctx context.Context, address flow.Address, height uint64) (*flow.Account, error) { + snap, header, err := s.snapshotWithBlock(height) + if err != nil { + return nil, err + } + + return s.executor.GetAccount(ctx, address, header, snap) +} + +// GetAccountBalance returns a balance of Flow account by the provided address and block height. +// Expected errors: +// - Script execution related errors +// - storage.ErrHeightNotIndexed if the data for the block height is not available +func (s *Scripts) GetAccountBalance(ctx context.Context, address flow.Address, height uint64) (uint64, error) { + snap, header, err := s.snapshotWithBlock(height) + if err != nil { + return 0, err + } + + return s.executor.GetAccountBalance(ctx, address, header, snap) +} + +// GetAccountAvailableBalance returns an available balance of Flow account by the provided address and block height. +// Expected errors: +// - Script execution related errors +// - storage.ErrHeightNotIndexed if the data for the block height is not available +func (s *Scripts) GetAccountAvailableBalance(ctx context.Context, address flow.Address, height uint64) (uint64, error) { + snap, header, err := s.snapshotWithBlock(height) + if err != nil { + return 0, err + } + + return s.executor.GetAccountAvailableBalance(ctx, address, header, snap) +} + +// GetAccountKeys returns a public keys of Flow account by the provided address and block height. +// Expected errors: +// - Script execution related errors +// - storage.ErrHeightNotIndexed if the data for the block height is not available +func (s *Scripts) GetAccountKeys(ctx context.Context, address flow.Address, height uint64) ([]flow.AccountPublicKey, error) { + snap, header, err := s.snapshotWithBlock(height) + if err != nil { + return nil, err + } + + return s.executor.GetAccountKeys(ctx, address, header, snap) +} + +// GetAccountKey returns a public key of Flow account by the provided address, block height and index. +// Expected errors: +// - Script execution related errors +// - storage.ErrHeightNotIndexed if the data for the block height is not available +func (s *Scripts) GetAccountKey(ctx context.Context, address flow.Address, keyIndex uint32, height uint64) (*flow.AccountPublicKey, error) { + snap, header, err := s.snapshotWithBlock(height) + if err != nil { + return nil, err + } + + return s.executor.GetAccountKey(ctx, address, keyIndex, header, snap) +} + +// snapshotWithBlock is a common function for executing scripts and get account functionality. +// It creates a storage snapshot that is needed by the FVM to execute scripts. +func (s *Scripts) snapshotWithBlock(height uint64) (snapshot.StorageSnapshot, *flow.Header, error) { + header, err := s.headers.ByHeight(height) + if err != nil { + return nil, nil, err + } + + storageSnapshot := snapshot.NewReadFuncStorageSnapshot(func(ID flow.RegisterID) (flow.RegisterValue, error) { + return s.registerAtHeight(ID, height) + }) + + return storageSnapshot, header, nil +} diff --git a/module/execution/scripts_test.go b/module/execution/scripts_test.go new file mode 100644 index 00000000000..ef43c594ca4 --- /dev/null +++ b/module/execution/scripts_test.go @@ -0,0 +1,448 @@ +package execution + +import ( + "context" + "fmt" + "os" + "testing" + + "github.com/onflow/cadence" + "github.com/onflow/cadence/encoding/ccf" + jsoncdc "github.com/onflow/cadence/encoding/json" + "github.com/onflow/cadence/stdlib" + "github.com/rs/zerolog" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" + + "github.com/onflow/flow-go/engine/execution/computation/query" + "github.com/onflow/flow-go/engine/execution/testutil" + "github.com/onflow/flow-go/fvm" + "github.com/onflow/flow-go/fvm/errors" + "github.com/onflow/flow-go/fvm/storage/derived" + "github.com/onflow/flow-go/fvm/storage/snapshot" + "github.com/onflow/flow-go/fvm/systemcontracts" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module/metrics" + "github.com/onflow/flow-go/module/state_synchronization/indexer" + synctest "github.com/onflow/flow-go/module/state_synchronization/requester/unittest" + "github.com/onflow/flow-go/storage" + pebbleStorage "github.com/onflow/flow-go/storage/pebble" + "github.com/onflow/flow-go/utils/unittest" +) + +func TestScripts(t *testing.T) { + suite.Run(t, new(scriptTestSuite)) +} + +type scriptTestSuite struct { + suite.Suite + scripts *Scripts + registerIndex storage.RegisterIndex + vm *fvm.VirtualMachine + vmCtx fvm.Context + chain flow.Chain + height uint64 + snapshot snapshot.SnapshotTree + dbDir string +} + +func (s *scriptTestSuite) TestScriptExecution() { + s.Run("Simple Script Execution", func() { + number := int64(42) + code := []byte(fmt.Sprintf("access(all) fun main(): Int { return %d; }", number)) + + result, err := s.scripts.ExecuteAtBlockHeight(context.Background(), code, nil, s.height) + s.Require().NoError(err) + val, err := jsoncdc.Decode(nil, result) + s.Require().NoError(err) + s.Assert().Equal(number, val.(cadence.Int).Value.Int64()) + }) + + s.Run("Get Block", func() { + code := []byte(fmt.Sprintf(`access(all) fun main(): UInt64 { + getBlock(at: %d)! + return getCurrentBlock().height + }`, s.height)) + + result, err := s.scripts.ExecuteAtBlockHeight(context.Background(), code, nil, s.height) + s.Require().NoError(err) + val, err := jsoncdc.Decode(nil, result) + s.Require().NoError(err) + // make sure that the returned block height matches the current one set + s.Assert().Equal(s.height, uint64(val.(cadence.UInt64))) + }) + + s.Run("Handle not found Register", func() { + // use a non-existing address to trigger register get function + code := []byte("import Foo from 0x01; access(all) fun main() { }") + + result, err := s.scripts.ExecuteAtBlockHeight(context.Background(), code, nil, s.height) + s.Assert().Error(err) + s.Assert().Nil(result) + }) + + s.Run("Valid Argument", func() { + code := []byte("access(all) fun main(foo: Int): Int { return foo }") + arg := cadence.NewInt(2) + encoded, err := jsoncdc.Encode(arg) + s.Require().NoError(err) + + result, err := s.scripts.ExecuteAtBlockHeight( + context.Background(), + code, + [][]byte{encoded}, + s.height, + ) + s.Require().NoError(err) + s.Assert().Equal(encoded, result) + }) + + s.Run("Invalid Argument", func() { + code := []byte("access(all) fun main(foo: Int): Int { return foo }") + invalid := [][]byte{[]byte("i")} + + result, err := s.scripts.ExecuteAtBlockHeight(context.Background(), code, invalid, s.height) + s.Assert().Nil(result) + var coded errors.CodedError + s.Require().True(errors.As(err, &coded)) + fmt.Println(coded.Code(), coded.Error()) + s.Assert().Equal(errors.ErrCodeInvalidArgumentError, coded.Code()) + }) +} + +func (s *scriptTestSuite) TestGetAccount() { + s.Run("Get Service Account", func() { + address := s.chain.ServiceAddress() + account, err := s.scripts.GetAccountAtBlockHeight(context.Background(), address, s.height) + s.Require().NoError(err) + s.Assert().Equal(address, account.Address) + s.Assert().NotZero(account.Balance) + s.Assert().NotZero(len(account.Contracts)) + }) + + s.Run("Get New Account", func() { + address := s.createAccount() + account, err := s.scripts.GetAccountAtBlockHeight(context.Background(), address, s.height) + s.Require().NoError(err) + s.Require().Equal(address, account.Address) + s.Assert().Zero(account.Balance) + }) +} + +func (s *scriptTestSuite) TestGetAccountBalance() { + address := s.createAccount() + var transferAmount uint64 = 100000000 + s.transferTokens(address, transferAmount) + balance, err := s.scripts.GetAccountBalance(context.Background(), address, s.height) + s.Require().NoError(err) + s.Require().Equal(transferAmount, balance) +} + +func (s *scriptTestSuite) TestGetAccountKeys() { + address := s.createAccount() + publicKey := s.addAccountKey(address, accountKeyAPIVersionV2) + + accountKeys, err := s.scripts.GetAccountKeys(context.Background(), address, s.height) + s.Require().NoError(err) + s.Assert().Equal(1, len(accountKeys)) + s.Assert().Equal(publicKey.PublicKey, accountKeys[0].PublicKey) + s.Assert().Equal(publicKey.SignAlgo, accountKeys[0].SignAlgo) + s.Assert().Equal(publicKey.HashAlgo, accountKeys[0].HashAlgo) + s.Assert().Equal(publicKey.Weight, accountKeys[0].Weight) + +} + +func (s *scriptTestSuite) SetupTest() { + lockManager := storage.NewTestingLockManager() + logger := unittest.LoggerForTest(s.Suite.T(), zerolog.InfoLevel) + entropyProvider := testutil.ProtocolStateWithSourceFixture(nil) + blockchain := unittest.BlockchainFixture(10) + headers := newBlockHeadersStorage(blockchain) + + s.chain = flow.Emulator.Chain() + s.snapshot = snapshot.NewSnapshotTree(nil) + s.vm = fvm.NewVirtualMachine() + s.vmCtx = fvm.NewContext( + fvm.WithChain(s.chain), + fvm.WithAuthorizationChecksEnabled(false), + fvm.WithSequenceNumberCheckAndIncrementEnabled(false), + ) + s.height = blockchain[0].Height + + s.dbDir = unittest.TempDir(s.T()) + db := pebbleStorage.NewBootstrappedRegistersWithPathForTest(s.T(), s.dbDir, s.height, s.height) + pebbleRegisters, err := pebbleStorage.NewRegisters(db, pebbleStorage.PruningDisabled) + s.Require().NoError(err) + s.registerIndex = pebbleRegisters + + derivedChainData, err := derived.NewDerivedChainData(derived.DefaultDerivedDataCacheSize) + s.Require().NoError(err) + + index, err := indexer.New( + logger, + metrics.NewNoopCollector(), + nil, + s.registerIndex, + headers, + nil, + nil, + nil, + nil, + flow.Testnet.Chain(), + derivedChainData, + nil, + lockManager, + ) + s.Require().NoError(err) + + s.scripts = NewScripts( + logger, + metrics.NewNoopCollector(), + s.chain.ChainID(), + entropyProvider, + headers, + index.RegisterValue, + query.NewDefaultConfig(), + derivedChainData, + true, + ) + + s.bootstrap() +} + +func (s *scriptTestSuite) TearDownTest() { + s.Require().NoError(os.RemoveAll(s.dbDir)) +} + +func (s *scriptTestSuite) bootstrap() { + bootstrapOpts := []fvm.BootstrapProcedureOption{ + fvm.WithInitialTokenSupply(unittest.GenesisTokenSupply), + } + + executionSnapshot, out, err := s.vm.Run( + s.vmCtx, + fvm.Bootstrap(unittest.ServiceAccountPublicKey, bootstrapOpts...), + s.snapshot) + + s.Require().NoError(err) + s.Require().NoError(out.Err) + + s.height++ + err = s.registerIndex.Store(executionSnapshot.UpdatedRegisters(), s.height) + s.Require().NoError(err) + + s.snapshot = s.snapshot.Append(executionSnapshot) +} + +func (s *scriptTestSuite) createAccount() flow.Address { + const createAccountTransaction = ` + transaction { + prepare(signer: auth(Storage, Capabilities) &Account) { + let account = Account(payer: signer) + } + }` + + txBody, err := flow.NewTransactionBodyBuilder(). + SetScript([]byte(createAccountTransaction)). + SetPayer(unittest.RandomAddressFixture()). + AddAuthorizer(s.chain.ServiceAddress()). + Build() + s.Require().NoError(err) + + executionSnapshot, output, err := s.vm.Run( + s.vmCtx, + fvm.Transaction(txBody, 0), + s.snapshot, + ) + s.Require().NoError(err) + s.Require().NoError(output.Err) + + s.height++ + err = s.registerIndex.Store(executionSnapshot.UpdatedRegisters(), s.height) + s.Require().NoError(err) + + s.snapshot = s.snapshot.Append(executionSnapshot) + + var accountCreatedEvents []flow.Event + for _, event := range output.Events { + if event.Type != flow.EventAccountCreated { + continue + } + accountCreatedEvents = append(accountCreatedEvents, event) + break + } + s.Require().Len(accountCreatedEvents, 1) + + data, err := ccf.Decode(nil, accountCreatedEvents[0].Payload) + s.Require().NoError(err) + + return flow.ConvertAddress( + cadence.SearchFieldByName( + data.(cadence.Event), + stdlib.AccountEventAddressParameter.Identifier, + ).(cadence.Address), + ) +} + +func (s *scriptTestSuite) transferTokens(accountAddress flow.Address, amount uint64) { + transferTx, err := transferTokensTx(s.chain). + AddArgument(jsoncdc.MustEncode(cadence.UFix64(amount))). + AddArgument(jsoncdc.MustEncode(cadence.Address(accountAddress))). + AddAuthorizer(s.chain.ServiceAddress()). + SetPayer(accountAddress). + Build() + s.Require().NoError(err) + + executionSnapshot, _, err := s.vm.Run( + s.vmCtx, + fvm.Transaction(transferTx, 0), + s.snapshot, + ) + s.Require().NoError(err) + + s.height++ + err = s.registerIndex.Store(executionSnapshot.UpdatedRegisters(), s.height) + s.Require().NoError(err) + + s.snapshot = s.snapshot.Append(executionSnapshot) +} + +type accountKeyAPIVersion string + +const ( + accountKeyAPIVersionV1 accountKeyAPIVersion = "V1" + accountKeyAPIVersionV2 accountKeyAPIVersion = "V2" +) + +func (s *scriptTestSuite) addAccountKey( + accountAddress flow.Address, + apiVersion accountKeyAPIVersion, +) flow.AccountPublicKey { + const addAccountKeyTransaction = ` +transaction(key: [UInt8]) { + prepare(signer: auth(AddKey) &Account) { + let publicKey = PublicKey( + publicKey: key, + signatureAlgorithm: SignatureAlgorithm.ECDSA_P256 + ) + signer.keys.add( + publicKey: publicKey, + hashAlgorithm: HashAlgorithm.SHA3_256, + weight: 1000.0 + ) + } +} +` + privateKey, err := unittest.AccountKeyDefaultFixture() + s.Require().NoError(err) + + publicKey, encodedCadencePublicKey := newAccountKey(s.T(), privateKey, apiVersion) + + txBody, err := flow.NewTransactionBodyBuilder(). + SetScript([]byte(addAccountKeyTransaction)). + SetPayer(accountAddress). + AddArgument(encodedCadencePublicKey). + AddAuthorizer(accountAddress). + Build() + s.Require().NoError(err) + + executionSnapshot, _, err := s.vm.Run( + s.vmCtx, + fvm.Transaction(txBody, 0), + s.snapshot, + ) + s.Require().NoError(err) + + s.height++ + err = s.registerIndex.Store(executionSnapshot.UpdatedRegisters(), s.height) + s.Require().NoError(err) + + s.snapshot = s.snapshot.Append(executionSnapshot) + + return publicKey +} + +func newAccountKey( + tb testing.TB, + privateKey *flow.AccountPrivateKey, + apiVersion accountKeyAPIVersion, +) ( + publicKey flow.AccountPublicKey, + encodedCadencePublicKey []byte, +) { + publicKey = privateKey.PublicKey(fvm.AccountKeyWeightThreshold) + + var publicKeyBytes []byte + if apiVersion == accountKeyAPIVersionV1 { + var err error + publicKeyBytes, err = flow.EncodeRuntimeAccountPublicKey(publicKey) + require.NoError(tb, err) + } else { + publicKeyBytes = publicKey.PublicKey.Encode() + } + + cadencePublicKey := testutil.BytesToCadenceArray(publicKeyBytes) + encodedCadencePublicKey, err := jsoncdc.Encode(cadencePublicKey) + require.NoError(tb, err) + + return publicKey, encodedCadencePublicKey +} + +func newBlockHeadersStorage(blocks []*flow.Block) storage.Headers { + blocksByHeight := make(map[uint64]*flow.Block) + for _, b := range blocks { + blocksByHeight[b.Height] = b + } + + return synctest.MockBlockHeaderStorage(synctest.WithByHeight(blocksByHeight)) +} + +func transferTokensTx(chain flow.Chain) *flow.TransactionBodyBuilder { + sc := systemcontracts.SystemContractsForChain(chain.ChainID()) + + return flow.NewTransactionBodyBuilder(). + SetScript([]byte(fmt.Sprintf( + ` + // This transaction is a template for a transaction that + // could be used by anyone to send tokens to another account + // that has been set up to receive tokens. + // + // The withdraw amount and the account from getAccount + // would be the parameters to the transaction + + import FungibleToken from 0x%s + import FlowToken from 0x%s + + transaction(amount: UFix64, to: Address) { + + // The Vault resource that holds the tokens that are being transferred + let sentVault: @{FungibleToken.Vault} + + prepare(signer: auth(BorrowValue) &Account) { + + // Get a reference to the signer's stored vault + let vaultRef = signer.storage.borrow<auth(FungibleToken.Withdraw) &FlowToken.Vault>(from: /storage/flowTokenVault) + ?? panic("Could not borrow reference to the owner's Vault!") + + // Withdraw tokens from the signer's stored vault + self.sentVault <- vaultRef.withdraw(amount: amount) + } + + execute { + + // Get the recipient's public account object + let recipient = getAccount(to) + + // Get a reference to the recipient's Receiver + let receiverRef = recipient.capabilities.borrow<&{FungibleToken.Receiver}>(/public/flowTokenReceiver) + ?? panic("Could not borrow receiver reference to the recipient's Vault") + + // Deposit the withdrawn tokens in the recipient's receiver + receiverRef.deposit(from: <-self.sentVault) + } + }`, + sc.FungibleToken.Address.Hex(), + sc.FlowToken.Address.Hex(), + )), + ) +} diff --git a/module/executiondatasync/execution_data/cache/cache.go b/module/executiondatasync/execution_data/cache/cache.go new file mode 100644 index 00000000000..5b541855583 --- /dev/null +++ b/module/executiondatasync/execution_data/cache/cache.go @@ -0,0 +1,116 @@ +package cache + +import ( + "context" + "fmt" + + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module/executiondatasync/execution_data" + "github.com/onflow/flow-go/module/mempool" + "github.com/onflow/flow-go/storage" +) + +// ExecutionDataCache is a read-through cache for ExecutionData. +type ExecutionDataCache struct { + backend execution_data.ExecutionDataGetter + + headers storage.Headers + seals storage.Seals + results storage.ExecutionResults + cache mempool.ExecutionData +} + +// NewExecutionDataCache returns a new ExecutionDataCache. +func NewExecutionDataCache( + backend execution_data.ExecutionDataGetter, + headers storage.Headers, + seals storage.Seals, + results storage.ExecutionResults, + cache mempool.ExecutionData, +) *ExecutionDataCache { + return &ExecutionDataCache{ + backend: backend, + headers: headers, + seals: seals, + results: results, + cache: cache, + } +} + +// ByID returns the execution data for the given ExecutionDataID. +// +// Expected errors during normal operations: +// - BlobNotFoundError if some CID in the blob tree could not be found from the blobstore +// - MalformedDataError if some level of the blob tree cannot be properly deserialized +// - BlobSizeLimitExceededError if some blob in the blob tree exceeds the maximum allowed size +func (c *ExecutionDataCache) ByID(ctx context.Context, executionDataID flow.Identifier) (*execution_data.BlockExecutionDataEntity, error) { + execData, err := c.backend.Get(ctx, executionDataID) + if err != nil { + return nil, err + } + + return execution_data.NewBlockExecutionDataEntity(executionDataID, execData), nil +} + +// ByBlockID returns the execution data for the given block ID. +// +// Expected errors during normal operations: +// - storage.ErrNotFound if a seal or execution result is not available for the block +// - BlobNotFoundError if some CID in the blob tree could not be found from the blobstore +// - MalformedDataError if some level of the blob tree cannot be properly deserialized +// - BlobSizeLimitExceededError if some blob in the blob tree exceeds the maximum allowed size +func (c *ExecutionDataCache) ByBlockID(ctx context.Context, blockID flow.Identifier) (*execution_data.BlockExecutionDataEntity, error) { + if execData, ok := c.cache.Get(blockID); ok { + return execData, nil + } + + executionDataID, err := c.LookupID(blockID) + if err != nil { + return nil, err + } + + execData, err := c.backend.Get(ctx, executionDataID) + if err != nil { + return nil, err + } + + execDataEntity := execution_data.NewBlockExecutionDataEntity(executionDataID, execData) + + _ = c.cache.Add(execDataEntity.BlockID, execDataEntity) + + return execDataEntity, nil +} + +// ByHeight returns the execution data for the given block height. +// +// Expected errors during normal operations: +// - storage.ErrNotFound if a seal or execution result is not available for the block +// - BlobNotFoundError if some CID in the blob tree could not be found from the blobstore +// - MalformedDataError if some level of the blob tree cannot be properly deserialized +// - BlobSizeLimitExceededError if some blob in the blob tree exceeds the maximum allowed size +func (c *ExecutionDataCache) ByHeight(ctx context.Context, height uint64) (*execution_data.BlockExecutionDataEntity, error) { + blockID, err := c.headers.BlockIDByHeight(height) + if err != nil { + return nil, err + } + + return c.ByBlockID(ctx, blockID) +} + +// LookupID returns the ExecutionDataID for the given block ID. +// +// Expected errors during normal operations: +// - storage.ErrNotFound if a seal or execution result is not available for the block +func (c *ExecutionDataCache) LookupID(blockID flow.Identifier) (flow.Identifier, error) { + seal, err := c.seals.FinalizedSealForBlock(blockID) + if err != nil { + return flow.ZeroID, fmt.Errorf("failed to lookup seal for block %s: %w", blockID, err) + } + + result, err := c.results.ByID(seal.ResultID) + if err != nil { + return flow.ZeroID, fmt.Errorf("failed to lookup execution result for block %s: %w", blockID, err) + } + + return result.ExecutionDataID, nil +} diff --git a/module/executiondatasync/execution_data/downloader.go b/module/executiondatasync/execution_data/downloader.go index d0470428bfe..b0eae050519 100644 --- a/module/executiondatasync/execution_data/downloader.go +++ b/module/executiondatasync/execution_data/downloader.go @@ -5,6 +5,7 @@ import ( "context" "errors" "fmt" + "sync" "github.com/ipfs/go-cid" "golang.org/x/sync/errgroup" @@ -12,40 +13,54 @@ import ( "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/module" "github.com/onflow/flow-go/module/blobs" + "github.com/onflow/flow-go/module/executiondatasync/tracker" "github.com/onflow/flow-go/network" + "github.com/onflow/flow-go/storage" ) // Downloader is used to download execution data blobs from the network via a blob service. type Downloader interface { module.ReadyDoneAware - - // Download downloads and returns a Block Execution Data from the network. - // The returned error will be: - // - MalformedDataError if some level of the blob tree cannot be properly deserialized - // - BlobNotFoundError if some CID in the blob tree could not be found from the blob service - // - BlobSizeLimitExceededError if some blob in the blob tree exceeds the maximum allowed size - Download(ctx context.Context, executionDataID flow.Identifier) (*BlockExecutionData, error) + ExecutionDataGetter + ProcessedHeightRecorder } +var _ Downloader = (*downloader)(nil) +var _ ProcessedHeightRecorder = (*downloader)(nil) + type downloader struct { + ProcessedHeightRecorder blobService network.BlobService maxBlobSize int serializer Serializer + storage tracker.Storage + headers storage.Headers } type DownloaderOption func(*downloader) +// WithSerializer configures the serializer for the downloader func WithSerializer(serializer Serializer) DownloaderOption { return func(d *downloader) { d.serializer = serializer } } +// WithExecutionDataTracker configures the execution data tracker and the storage headers for the downloader +func WithExecutionDataTracker(storage tracker.Storage, headers storage.Headers) DownloaderOption { + return func(d *downloader) { + d.storage = storage + d.headers = headers + } +} + +// NewDownloader creates a new Downloader instance func NewDownloader(blobService network.BlobService, opts ...DownloaderOption) *downloader { d := &downloader{ - blobService, - DefaultMaxBlobSize, - DefaultSerializer, + blobService: blobService, + maxBlobSize: DefaultMaxBlobSize, + serializer: DefaultSerializer, + ProcessedHeightRecorder: NewProcessedHeightRecorderManager(0), } for _, opt := range opts { @@ -55,19 +70,23 @@ func NewDownloader(blobService network.BlobService, opts ...DownloaderOption) *d return d } +// Ready returns a channel that will be closed when the downloader is ready to be used func (d *downloader) Ready() <-chan struct{} { return d.blobService.Ready() } + +// Done returns a channel that will be closed when the downloader is finished shutting down func (d *downloader) Done() <-chan struct{} { return d.blobService.Done() } -// Download downloads a blob tree identified by executionDataID from the network and returns the deserialized BlockExecutionData struct -// During normal operation, the returned error will be: -// - MalformedDataError if some level of the blob tree cannot be properly deserialized +// Get downloads a blob tree identified by executionDataID from the network and returns the deserialized BlockExecutionData struct +// +// Expected errors during normal operations: // - BlobNotFoundError if some CID in the blob tree could not be found from the blob service +// - MalformedDataError if some level of the blob tree cannot be properly deserialized // - BlobSizeLimitExceededError if some blob in the blob tree exceeds the maximum allowed size -func (d *downloader) Download(ctx context.Context, executionDataID flow.Identifier) (*BlockExecutionData, error) { +func (d *downloader) Get(ctx context.Context, executionDataID flow.Identifier) (*BlockExecutionData, error) { blobGetter := d.blobService.GetSession(ctx) // First, download the root execution data record which contains a list of chunk execution data @@ -81,12 +100,16 @@ func (d *downloader) Download(ctx context.Context, executionDataID flow.Identifi // Next, download each of the chunk execution data blobs chunkExecutionDatas := make([]*ChunkExecutionData, len(edRoot.ChunkExecutionDataIDs)) + // Execution data cids + var edCids = []cid.Cid{flow.IdToCid(executionDataID)} + var mu sync.Mutex + for i, chunkDataID := range edRoot.ChunkExecutionDataIDs { i := i chunkDataID := chunkDataID g.Go(func() error { - ced, err := d.getChunkExecutionData( + ced, cids, err := d.getChunkExecutionData( gCtx, chunkDataID, blobGetter, @@ -96,7 +119,10 @@ func (d *downloader) Download(ctx context.Context, executionDataID flow.Identifi return fmt.Errorf("failed to get chunk execution data at index %d: %w", i, err) } + mu.Lock() chunkExecutionDatas[i] = ced + edCids = append(edCids, cids...) + mu.Unlock() return nil }) @@ -106,6 +132,11 @@ func (d *downloader) Download(ctx context.Context, executionDataID flow.Identifi return nil, err } + err = d.trackBlobs(edRoot.BlockID, edCids) + if err != nil { + return nil, fmt.Errorf("failed to track blob: %w", err) + } + // Finally, recombine data into original record. bed := &BlockExecutionData{ BlockID: edRoot.BlockID, @@ -115,11 +146,18 @@ func (d *downloader) Download(ctx context.Context, executionDataID flow.Identifi return bed, nil } +// getExecutionDataRoot downloads the root execution data record from the network and returns the +// deserialized flow.BlockExecutionDataRoot struct. +// +// Expected errors during normal operations: +// - BlobNotFoundError if the root blob could not be found from the blob service +// - MalformedDataError if the root blob cannot be properly deserialized +// - BlobSizeLimitExceededError if the root blob exceeds the maximum allowed size func (d *downloader) getExecutionDataRoot( ctx context.Context, rootID flow.Identifier, blobGetter network.BlobGetter, -) (*BlockExecutionDataRoot, error) { +) (*flow.BlockExecutionDataRoot, error) { rootCid := flow.IdToCid(rootID) blob, err := blobGetter.GetBlob(ctx, rootCid) @@ -142,7 +180,7 @@ func (d *downloader) getExecutionDataRoot( return nil, NewMalformedDataError(err) } - edRoot, ok := v.(*BlockExecutionDataRoot) + edRoot, ok := v.(*flow.BlockExecutionDataRoot) if !ok { return nil, NewMalformedDataError(fmt.Errorf("execution data root blob does not deserialize to a BlockExecutionDataRoot, got %T instead", v)) } @@ -150,37 +188,89 @@ func (d *downloader) getExecutionDataRoot( return edRoot, nil } +// getChunkExecutionData downloads a chunk execution data blob from the network and returns the +// deserialized ChunkExecutionData struct with list of cids from all levels of the blob tree. +// +// Expected errors during normal operations: +// - context.Canceled or context.DeadlineExceeded if the context is canceled or times out +// - BlobNotFoundError if the root blob could not be found from the blob service +// - MalformedDataError if the root blob cannot be properly deserialized +// - BlobSizeLimitExceededError if the root blob exceeds the maximum allowed size func (d *downloader) getChunkExecutionData( ctx context.Context, chunkExecutionDataID cid.Cid, blobGetter network.BlobGetter, -) (*ChunkExecutionData, error) { +) (*ChunkExecutionData, []cid.Cid, error) { cids := []cid.Cid{chunkExecutionDataID} + cidsFromAllLevels := []cid.Cid{chunkExecutionDataID} // iteratively process each level of the blob tree until a ChunkExecutionData is returned or an // error is encountered for i := 0; ; i++ { v, err := d.getBlobs(ctx, blobGetter, cids) if err != nil { - return nil, fmt.Errorf("failed to get level %d of blob tree: %w", i, err) + return nil, nil, fmt.Errorf("failed to get level %d of blob tree: %w", i, err) } switch v := v.(type) { case *ChunkExecutionData: - return v, nil + return v, cidsFromAllLevels, nil case *[]cid.Cid: + cidsFromAllLevels = append(cidsFromAllLevels, *v...) cids = *v default: - return nil, NewMalformedDataError(fmt.Errorf("blob tree contains unexpected type %T at level %d", v, i)) + return nil, nil, NewMalformedDataError(fmt.Errorf("blob tree contains unexpected type %T at level %d", v, i)) } } } +// trackBlobs updates the storage to track the provided CIDs for a given block. +// This is used to ensure that the blobs can be pruned later. +// +// Parameters: +// - blockID: The identifier of the block to which the blobs belong. +// - cids: CIDs to be tracked. +// +// No errors are expected during normal operations. +func (d *downloader) trackBlobs(blockID flow.Identifier, cids []cid.Cid) error { + if d.storage == nil || d.headers == nil { + return nil + } + + return d.storage.Update(func(trackBlobs tracker.TrackBlobsFn) error { + header, err := d.headers.ByBlockID(blockID) + if err != nil { + return err + } + + // track new blobs so that they can be pruned later + err = trackBlobs(header.Height, cids...) + if err != nil { + return err + } + + d.OnBlockProcessed(header.Height) + + return nil + }) +} + // getBlobs gets the given CIDs from the blobservice, reassembles the blobs, and deserializes the reassembled data into an object. +// +// Expected errors during normal operations: +// - context.Canceled or context.DeadlineExceeded if the context is canceled or times out +// - BlobNotFoundError if the root blob could not be found from the blob service +// - MalformedDataError if the root blob cannot be properly deserialized +// - BlobSizeLimitExceededError if the root blob exceeds the maximum allowed size func (d *downloader) getBlobs(ctx context.Context, blobGetter network.BlobGetter, cids []cid.Cid) (interface{}, error) { + // this uses an optimization to deserialize the data in a streaming fashion as it is received + // from the network, reducing the amount of memory required to deserialize large objects. blobCh, errCh := d.retrieveBlobs(ctx, blobGetter, cids) bcr := blobs.NewBlobChannelReader(blobCh) + v, deserializeErr := d.serializer.Deserialize(bcr) + + // blocks until all blobs have been retrieved or an error is encountered err := <-errCh if err != nil { @@ -195,6 +285,13 @@ func (d *downloader) getBlobs(ctx context.Context, blobGetter network.BlobGetter } // retrieveBlobs asynchronously retrieves the blobs for the given CIDs with the given BlobGetter. +// Blobs corresponding to the requested CIDs are returned in order on the response channel. +// +// Expected errors during normal operations: +// - context.Canceled or context.DeadlineExceeded if the context is canceled or times out +// - BlobNotFoundError if the root blob could not be found from the blob service +// - MalformedDataError if the root blob cannot be properly deserialized +// - BlobSizeLimitExceededError if the root blob exceeds the maximum allowed size func (d *downloader) retrieveBlobs(parent context.Context, blobGetter network.BlobGetter, cids []cid.Cid) (<-chan blobs.Blob, <-chan error) { blobsOut := make(chan blobs.Blob, len(cids)) errCh := make(chan error, 1) @@ -214,8 +311,10 @@ func (d *downloader) retrieveBlobs(parent context.Context, blobGetter network.Bl cachedBlobs := make(map[cid.Cid]blobs.Blob) cidCounts := make(map[cid.Cid]int) // used to account for duplicate CIDs + // record the number of times each CID appears in the list. this is later used to determine + // when it's safe to delete cached blobs during processing for _, c := range cids { - cidCounts[c] += 1 + cidCounts[c]++ } // for each cid, find the corresponding blob from the incoming blob channel and send it to @@ -235,7 +334,8 @@ func (d *downloader) retrieveBlobs(parent context.Context, blobGetter network.Bl } } - cidCounts[c] -= 1 + // remove the blob from the cache if it's no longer needed + cidCounts[c]-- if cidCounts[c] == 0 { delete(cachedBlobs, c) @@ -251,12 +351,20 @@ func (d *downloader) retrieveBlobs(parent context.Context, blobGetter network.Bl // findBlob retrieves blobs from the given channel, caching them along the way, until it either // finds the target blob or exhausts the channel. +// +// This is necessary to ensure blobs can be reassembled in order from the underlying blobservice +// which provides no guarantees for blob order on the response channel. +// +// Expected errors during normal operations: +// - BlobNotFoundError if the root blob could not be found from the blob service +// - BlobSizeLimitExceededError if the root blob exceeds the maximum allowed size func (d *downloader) findBlob( blobChan <-chan blobs.Blob, target cid.Cid, cache map[cid.Cid]blobs.Blob, ) (blobs.Blob, error) { - // Note: blobs are returned as they are found, in no particular order + // pull blobs off the blob channel until the target blob is found or the channel is closed + // Note: blobs are returned on the blob channel as they are found, in no particular order for blob := range blobChan { // check blob size blobSize := len(blob.RawData()) diff --git a/module/executiondatasync/execution_data/downloader_test.go b/module/executiondatasync/execution_data/downloader_test.go index e2267e03395..5b6c9959483 100644 --- a/module/executiondatasync/execution_data/downloader_test.go +++ b/module/executiondatasync/execution_data/downloader_test.go @@ -14,7 +14,7 @@ import ( "github.com/onflow/flow-go/module/blobs" "github.com/onflow/flow-go/module/executiondatasync/execution_data" - "github.com/onflow/flow-go/network/mocknetwork" + mocknetwork "github.com/onflow/flow-go/network/mock" ) func TestCIDNotFound(t *testing.T) { @@ -23,7 +23,7 @@ func TestCIDNotFound(t *testing.T) { downloader := execution_data.NewDownloader(blobService) edStore := execution_data.NewExecutionDataStore(blobstore, execution_data.DefaultSerializer) bed := generateBlockExecutionData(t, 10, 3*execution_data.DefaultMaxBlobSize) - edID, err := edStore.AddExecutionData(context.Background(), bed) + edID, err := edStore.Add(context.Background(), bed) require.NoError(t, err) blobGetter := new(mocknetwork.BlobGetter) @@ -54,7 +54,7 @@ func TestCIDNotFound(t *testing.T) { }, ) - _, err = downloader.Download(context.Background(), edID) + _, err = downloader.Get(context.Background(), edID) var blobNotFoundError *execution_data.BlobNotFoundError assert.ErrorAs(t, err, &blobNotFoundError) } diff --git a/module/executiondatasync/execution_data/entity.go b/module/executiondatasync/execution_data/entity.go index 6facd5ad580..a883907f0ee 100644 --- a/module/executiondatasync/execution_data/entity.go +++ b/module/executiondatasync/execution_data/entity.go @@ -4,29 +4,18 @@ import ( "github.com/onflow/flow-go/model/flow" ) -// BlockExecutionDataEntity is a wrapper around BlockExecutionData that implements the flow.Entity -// interface to support caching with Herocache +// BlockExecutionDataEntity is a wrapper around BlockExecutionData that holds the cached BlockExecutionData ID type BlockExecutionDataEntity struct { *BlockExecutionData - // id holds the cached BlockExecutionData ID. The ID generation process is expensive, so this - // entity interface exclusively uses a pre-calculated value. - id flow.Identifier + // ExecutionDataID holds the cached BlockExecutionData ID. The ID generation process is expensive, so this + // wrapper exclusively uses a pre-calculated value. + ExecutionDataID flow.Identifier } -var _ flow.Entity = (*BlockExecutionDataEntity)(nil) - -func NewBlockExecutionDataEntity(id flow.Identifier, executionData *BlockExecutionData) *BlockExecutionDataEntity { +func NewBlockExecutionDataEntity(executionDataID flow.Identifier, executionData *BlockExecutionData) *BlockExecutionDataEntity { return &BlockExecutionDataEntity{ - id: id, + ExecutionDataID: executionDataID, BlockExecutionData: executionData, } } - -func (c BlockExecutionDataEntity) ID() flow.Identifier { - return c.id -} - -func (c BlockExecutionDataEntity) Checksum() flow.Identifier { - return c.id -} diff --git a/module/executiondatasync/execution_data/execution_data.go b/module/executiondatasync/execution_data/execution_data.go index 5cdca9c775b..46f6e741738 100644 --- a/module/executiondatasync/execution_data/execution_data.go +++ b/module/executiondatasync/execution_data/execution_data.go @@ -1,27 +1,87 @@ package execution_data import ( - "github.com/ipfs/go-cid" + "errors" "github.com/onflow/flow-go/ledger" "github.com/onflow/flow-go/model/flow" ) +// ExecutionDataDBMode controls which db type to use. +type ExecutionDataDBMode int + +const ( + // ExecutionDataDBModeBadger uses badger db + ExecutionDataDBModeBadger ExecutionDataDBMode = iota + 1 + + // ExecutionDataDBModePebble uses pebble db + ExecutionDataDBModePebble +) + +func ParseExecutionDataDBMode(s string) (ExecutionDataDBMode, error) { + switch s { + case ExecutionDataDBModeBadger.String(): + return ExecutionDataDBModeBadger, nil + case ExecutionDataDBModePebble.String(): + return ExecutionDataDBModePebble, nil + default: + return 0, errors.New("invalid execution data DB mode") + } +} + +func (m ExecutionDataDBMode) String() string { + switch m { + case ExecutionDataDBModeBadger: + return "badger" + case ExecutionDataDBModePebble: + return "pebble" + default: + return "" + } +} + +// DefaultMaxBlobSize is the default maximum size of a blob. +// This is calibrated to fit within a libp2p message and not exceed the max size recommended by bitswap. const DefaultMaxBlobSize = 1 << 20 // 1MiB // ChunkExecutionData represents the execution data of a chunk type ChunkExecutionData struct { + // Collection is the collection for which this chunk was executed Collection *flow.Collection - Events flow.EventsList + + // Events are the events generated by executing the collection + Events flow.EventsList + + // TrieUpdate is the trie update generated by executing the collection + // This includes a list of all registers updated during the execution TrieUpdate *ledger.TrieUpdate -} -type BlockExecutionDataRoot struct { - BlockID flow.Identifier - ChunkExecutionDataIDs []cid.Cid + // TransactionResults are the results of executing the transactions in the collection + // This includes all of the data from flow.TransactionResult, except that it uses a boolean + // value to indicate if an error occurred instead of a full error message. + TransactionResults []flow.LightTransactionResult } +// BlockExecutionData represents the execution data of a block. type BlockExecutionData struct { BlockID flow.Identifier ChunkExecutionDatas []*ChunkExecutionData } + +// ConvertTransactionResults converts a list of flow.TransactionResults into a list of +// flow.LightTransactionResults to be included in a ChunkExecutionData. +func ConvertTransactionResults(results flow.TransactionResults) []flow.LightTransactionResult { + if len(results) == 0 { + return nil + } + + converted := make([]flow.LightTransactionResult, len(results)) + for i, txResult := range results { + converted[i] = flow.LightTransactionResult{ + TransactionID: txResult.TransactionID, + ComputationUsed: txResult.ComputationUsed, + Failed: txResult.ErrorMessage != "", + } + } + return converted +} diff --git a/module/executiondatasync/execution_data/execution_data_test.go b/module/executiondatasync/execution_data/execution_data_test.go new file mode 100644 index 00000000000..37cd18a18c6 --- /dev/null +++ b/module/executiondatasync/execution_data/execution_data_test.go @@ -0,0 +1,62 @@ +package execution_data_test + +import ( + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module/executiondatasync/execution_data" +) + +func TestConvertTransactionResults(t *testing.T) { + t.Parallel() + + t.Run("empty", func(t *testing.T) { + assert.Nil(t, execution_data.ConvertTransactionResults(nil)) + + var results []flow.TransactionResult + assert.Nil(t, execution_data.ConvertTransactionResults(results)) + + results = make([]flow.TransactionResult, 0) + assert.Nil(t, execution_data.ConvertTransactionResults(results)) + }) + + t.Run("non-empty", func(t *testing.T) { + results := []flow.TransactionResult{ + { + TransactionID: flow.Identifier{1, 2, 3}, + ComputationUsed: 100, + MemoryUsed: 1000, + }, + { + TransactionID: flow.Identifier{4, 5, 6}, + ComputationUsed: 200, + MemoryUsed: 2000, + ErrorMessage: "some error", + }, + } + expected := []flow.LightTransactionResult{ + { + TransactionID: flow.Identifier{1, 2, 3}, + ComputationUsed: 100, + Failed: false, + }, + { + TransactionID: flow.Identifier{4, 5, 6}, + ComputationUsed: 200, + Failed: true, + }, + } + + converted := execution_data.ConvertTransactionResults(results) + assert.Equal(t, len(results), len(converted)) + + for i, e := range expected { + assert.Equal(t, e, converted[i]) + assert.Equal(t, e.TransactionID, converted[i].TransactionID) + assert.Equal(t, e.ComputationUsed, converted[i].ComputationUsed) + assert.Equal(t, e.Failed, converted[i].Failed) + } + }) +} diff --git a/module/executiondatasync/execution_data/internal/execution_data_versions.go b/module/executiondatasync/execution_data/internal/execution_data_versions.go new file mode 100644 index 00000000000..49bb3b71f42 --- /dev/null +++ b/module/executiondatasync/execution_data/internal/execution_data_versions.go @@ -0,0 +1,21 @@ +package internal + +import ( + "github.com/onflow/flow-go/ledger" + "github.com/onflow/flow-go/model/flow" +) + +// This is a collection of data structures from previous versions of ExecutionData. +// They are maintained here for backwards compatibility testing. +// +// Note: the current codebase makes no guarantees about backwards compatibility with previous of +// execution data. The data structures and tests included are only to help inform of any breaking +// changes. + +// ChunkExecutionDataV1 [deprecated] only use for backwards compatibility testing +// was used up to block X (TODO: fill in block number after release) +type ChunkExecutionDataV1 struct { + Collection *flow.Collection + Events flow.EventsList + TrieUpdate *ledger.TrieUpdate +} diff --git a/module/executiondatasync/execution_data/mock/downloader.go b/module/executiondatasync/execution_data/mock/downloader.go index a79dbbe2483..e64250a84cf 100644 --- a/module/executiondatasync/execution_data/mock/downloader.go +++ b/module/executiondatasync/execution_data/mock/downloader.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mock @@ -16,10 +16,14 @@ type Downloader struct { mock.Mock } -// Done provides a mock function with given fields: +// Done provides a mock function with no fields func (_m *Downloader) Done() <-chan struct{} { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Done") + } + var r0 <-chan struct{} if rf, ok := ret.Get(0).(func() <-chan struct{}); ok { r0 = rf() @@ -32,17 +36,21 @@ func (_m *Downloader) Done() <-chan struct{} { return r0 } -// Download provides a mock function with given fields: ctx, executionDataID -func (_m *Downloader) Download(ctx context.Context, executionDataID flow.Identifier) (*execution_data.BlockExecutionData, error) { - ret := _m.Called(ctx, executionDataID) +// Get provides a mock function with given fields: ctx, rootID +func (_m *Downloader) Get(ctx context.Context, rootID flow.Identifier) (*execution_data.BlockExecutionData, error) { + ret := _m.Called(ctx, rootID) + + if len(ret) == 0 { + panic("no return value specified for Get") + } var r0 *execution_data.BlockExecutionData var r1 error if rf, ok := ret.Get(0).(func(context.Context, flow.Identifier) (*execution_data.BlockExecutionData, error)); ok { - return rf(ctx, executionDataID) + return rf(ctx, rootID) } if rf, ok := ret.Get(0).(func(context.Context, flow.Identifier) *execution_data.BlockExecutionData); ok { - r0 = rf(ctx, executionDataID) + r0 = rf(ctx, rootID) } else { if ret.Get(0) != nil { r0 = ret.Get(0).(*execution_data.BlockExecutionData) @@ -50,7 +58,7 @@ func (_m *Downloader) Download(ctx context.Context, executionDataID flow.Identif } if rf, ok := ret.Get(1).(func(context.Context, flow.Identifier) error); ok { - r1 = rf(ctx, executionDataID) + r1 = rf(ctx, rootID) } else { r1 = ret.Error(1) } @@ -58,10 +66,37 @@ func (_m *Downloader) Download(ctx context.Context, executionDataID flow.Identif return r0, r1 } -// Ready provides a mock function with given fields: +// HighestCompleteHeight provides a mock function with no fields +func (_m *Downloader) HighestCompleteHeight() uint64 { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for HighestCompleteHeight") + } + + var r0 uint64 + if rf, ok := ret.Get(0).(func() uint64); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(uint64) + } + + return r0 +} + +// OnBlockProcessed provides a mock function with given fields: _a0 +func (_m *Downloader) OnBlockProcessed(_a0 uint64) { + _m.Called(_a0) +} + +// Ready provides a mock function with no fields func (_m *Downloader) Ready() <-chan struct{} { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Ready") + } + var r0 <-chan struct{} if rf, ok := ret.Get(0).(func() <-chan struct{}); ok { r0 = rf() @@ -74,13 +109,17 @@ func (_m *Downloader) Ready() <-chan struct{} { return r0 } -type mockConstructorTestingTNewDownloader interface { - mock.TestingT - Cleanup(func()) +// SetHeightUpdatesConsumer provides a mock function with given fields: _a0 +func (_m *Downloader) SetHeightUpdatesConsumer(_a0 execution_data.HeightUpdatesConsumer) { + _m.Called(_a0) } // NewDownloader creates a new instance of Downloader. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewDownloader(t mockConstructorTestingTNewDownloader) *Downloader { +// The first argument is typically a *testing.T value. +func NewDownloader(t interface { + mock.TestingT + Cleanup(func()) +}) *Downloader { mock := &Downloader{} mock.Mock.Test(t) diff --git a/module/executiondatasync/execution_data/mock/execution_data_getter.go b/module/executiondatasync/execution_data/mock/execution_data_getter.go new file mode 100644 index 00000000000..ee19f20831d --- /dev/null +++ b/module/executiondatasync/execution_data/mock/execution_data_getter.go @@ -0,0 +1,61 @@ +// Code generated by mockery. DO NOT EDIT. + +package mock + +import ( + context "context" + + flow "github.com/onflow/flow-go/model/flow" + execution_data "github.com/onflow/flow-go/module/executiondatasync/execution_data" + + mock "github.com/stretchr/testify/mock" +) + +// ExecutionDataGetter is an autogenerated mock type for the ExecutionDataGetter type +type ExecutionDataGetter struct { + mock.Mock +} + +// Get provides a mock function with given fields: ctx, rootID +func (_m *ExecutionDataGetter) Get(ctx context.Context, rootID flow.Identifier) (*execution_data.BlockExecutionData, error) { + ret := _m.Called(ctx, rootID) + + if len(ret) == 0 { + panic("no return value specified for Get") + } + + var r0 *execution_data.BlockExecutionData + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, flow.Identifier) (*execution_data.BlockExecutionData, error)); ok { + return rf(ctx, rootID) + } + if rf, ok := ret.Get(0).(func(context.Context, flow.Identifier) *execution_data.BlockExecutionData); ok { + r0 = rf(ctx, rootID) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*execution_data.BlockExecutionData) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, flow.Identifier) error); ok { + r1 = rf(ctx, rootID) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// NewExecutionDataGetter creates a new instance of ExecutionDataGetter. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewExecutionDataGetter(t interface { + mock.TestingT + Cleanup(func()) +}) *ExecutionDataGetter { + mock := &ExecutionDataGetter{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/module/executiondatasync/execution_data/mock/execution_data_store.go b/module/executiondatasync/execution_data/mock/execution_data_store.go index f4360871bea..42fd911fb57 100644 --- a/module/executiondatasync/execution_data/mock/execution_data_store.go +++ b/module/executiondatasync/execution_data/mock/execution_data_store.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mock @@ -16,10 +16,14 @@ type ExecutionDataStore struct { mock.Mock } -// AddExecutionData provides a mock function with given fields: ctx, executionData -func (_m *ExecutionDataStore) AddExecutionData(ctx context.Context, executionData *execution_data.BlockExecutionData) (flow.Identifier, error) { +// Add provides a mock function with given fields: ctx, executionData +func (_m *ExecutionDataStore) Add(ctx context.Context, executionData *execution_data.BlockExecutionData) (flow.Identifier, error) { ret := _m.Called(ctx, executionData) + if len(ret) == 0 { + panic("no return value specified for Add") + } + var r0 flow.Identifier var r1 error if rf, ok := ret.Get(0).(func(context.Context, *execution_data.BlockExecutionData) (flow.Identifier, error)); ok { @@ -42,10 +46,14 @@ func (_m *ExecutionDataStore) AddExecutionData(ctx context.Context, executionDat return r0, r1 } -// GetExecutionData provides a mock function with given fields: ctx, rootID -func (_m *ExecutionDataStore) GetExecutionData(ctx context.Context, rootID flow.Identifier) (*execution_data.BlockExecutionData, error) { +// Get provides a mock function with given fields: ctx, rootID +func (_m *ExecutionDataStore) Get(ctx context.Context, rootID flow.Identifier) (*execution_data.BlockExecutionData, error) { ret := _m.Called(ctx, rootID) + if len(ret) == 0 { + panic("no return value specified for Get") + } + var r0 *execution_data.BlockExecutionData var r1 error if rf, ok := ret.Get(0).(func(context.Context, flow.Identifier) (*execution_data.BlockExecutionData, error)); ok { @@ -68,13 +76,12 @@ func (_m *ExecutionDataStore) GetExecutionData(ctx context.Context, rootID flow. return r0, r1 } -type mockConstructorTestingTNewExecutionDataStore interface { +// NewExecutionDataStore creates a new instance of ExecutionDataStore. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewExecutionDataStore(t interface { mock.TestingT Cleanup(func()) -} - -// NewExecutionDataStore creates a new instance of ExecutionDataStore. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewExecutionDataStore(t mockConstructorTestingTNewExecutionDataStore) *ExecutionDataStore { +}) *ExecutionDataStore { mock := &ExecutionDataStore{} mock.Mock.Test(t) diff --git a/module/executiondatasync/execution_data/mock/processed_height_recorder.go b/module/executiondatasync/execution_data/mock/processed_height_recorder.go new file mode 100644 index 00000000000..6b5afade686 --- /dev/null +++ b/module/executiondatasync/execution_data/mock/processed_height_recorder.go @@ -0,0 +1,55 @@ +// Code generated by mockery. DO NOT EDIT. + +package mock + +import ( + execution_data "github.com/onflow/flow-go/module/executiondatasync/execution_data" + mock "github.com/stretchr/testify/mock" +) + +// ProcessedHeightRecorder is an autogenerated mock type for the ProcessedHeightRecorder type +type ProcessedHeightRecorder struct { + mock.Mock +} + +// HighestCompleteHeight provides a mock function with no fields +func (_m *ProcessedHeightRecorder) HighestCompleteHeight() uint64 { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for HighestCompleteHeight") + } + + var r0 uint64 + if rf, ok := ret.Get(0).(func() uint64); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(uint64) + } + + return r0 +} + +// OnBlockProcessed provides a mock function with given fields: _a0 +func (_m *ProcessedHeightRecorder) OnBlockProcessed(_a0 uint64) { + _m.Called(_a0) +} + +// SetHeightUpdatesConsumer provides a mock function with given fields: _a0 +func (_m *ProcessedHeightRecorder) SetHeightUpdatesConsumer(_a0 execution_data.HeightUpdatesConsumer) { + _m.Called(_a0) +} + +// NewProcessedHeightRecorder creates a new instance of ProcessedHeightRecorder. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewProcessedHeightRecorder(t interface { + mock.TestingT + Cleanup(func()) +}) *ProcessedHeightRecorder { + mock := &ProcessedHeightRecorder{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/module/executiondatasync/execution_data/mock/serializer.go b/module/executiondatasync/execution_data/mock/serializer.go new file mode 100644 index 00000000000..cecae509493 --- /dev/null +++ b/module/executiondatasync/execution_data/mock/serializer.go @@ -0,0 +1,76 @@ +// Code generated by mockery. DO NOT EDIT. + +package mock + +import ( + io "io" + + mock "github.com/stretchr/testify/mock" +) + +// Serializer is an autogenerated mock type for the Serializer type +type Serializer struct { + mock.Mock +} + +// Deserialize provides a mock function with given fields: _a0 +func (_m *Serializer) Deserialize(_a0 io.Reader) (interface{}, error) { + ret := _m.Called(_a0) + + if len(ret) == 0 { + panic("no return value specified for Deserialize") + } + + var r0 interface{} + var r1 error + if rf, ok := ret.Get(0).(func(io.Reader) (interface{}, error)); ok { + return rf(_a0) + } + if rf, ok := ret.Get(0).(func(io.Reader) interface{}); ok { + r0 = rf(_a0) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(interface{}) + } + } + + if rf, ok := ret.Get(1).(func(io.Reader) error); ok { + r1 = rf(_a0) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Serialize provides a mock function with given fields: _a0, _a1 +func (_m *Serializer) Serialize(_a0 io.Writer, _a1 interface{}) error { + ret := _m.Called(_a0, _a1) + + if len(ret) == 0 { + panic("no return value specified for Serialize") + } + + var r0 error + if rf, ok := ret.Get(0).(func(io.Writer, interface{}) error); ok { + r0 = rf(_a0, _a1) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// NewSerializer creates a new instance of Serializer. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewSerializer(t interface { + mock.TestingT + Cleanup(func()) +}) *Serializer { + mock := &Serializer{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/module/executiondatasync/execution_data/processed_height_recorder.go b/module/executiondatasync/execution_data/processed_height_recorder.go new file mode 100644 index 00000000000..4a59effdff4 --- /dev/null +++ b/module/executiondatasync/execution_data/processed_height_recorder.go @@ -0,0 +1,65 @@ +package execution_data + +import ( + "sync" + + "go.uber.org/atomic" +) + +type HeightUpdatesConsumer func(height uint64) + +// ProcessedHeightRecorder is an interface for tracking the highest execution data processed +// height when a block is processed and for providing this height. +type ProcessedHeightRecorder interface { + // OnBlockProcessed updates the highest processed height when a block is processed. + OnBlockProcessed(uint64) + // HighestCompleteHeight returns the highest complete processed block height. + HighestCompleteHeight() uint64 + + // SetHeightUpdatesConsumer subscribe consumer for processed height updates. + // Callback are called synchronously and must be non-blocking + SetHeightUpdatesConsumer(HeightUpdatesConsumer) +} + +var _ ProcessedHeightRecorder = (*ProcessedHeightRecorderManager)(nil) + +// ProcessedHeightRecorderManager manages an execution data height recorder +// and tracks the highest processed block height. +type ProcessedHeightRecorderManager struct { + lock sync.RWMutex + highestCompleteHeight *atomic.Uint64 + consumer HeightUpdatesConsumer +} + +// NewProcessedHeightRecorderManager creates a new ProcessedHeightRecorderManager with the given initial height. +func NewProcessedHeightRecorderManager(initHeight uint64) *ProcessedHeightRecorderManager { + return &ProcessedHeightRecorderManager{ + highestCompleteHeight: atomic.NewUint64(initHeight), + } +} + +// OnBlockProcessed updates the highest processed height when a block is processed. +func (e *ProcessedHeightRecorderManager) OnBlockProcessed(height uint64) { + if height > e.highestCompleteHeight.Load() { + e.highestCompleteHeight.Store(height) + + e.lock.RLock() + defer e.lock.RUnlock() + if e.consumer != nil { + e.consumer(height) + } + } +} + +// HighestCompleteHeight returns the highest complete processed block height. +func (e *ProcessedHeightRecorderManager) HighestCompleteHeight() uint64 { + return e.highestCompleteHeight.Load() +} + +// SetHeightUpdatesConsumer subscribe consumers for processed height updates. +func (e *ProcessedHeightRecorderManager) SetHeightUpdatesConsumer(consumer HeightUpdatesConsumer) { + e.lock.Lock() + defer e.lock.Unlock() + + e.consumer = consumer +} diff --git a/module/executiondatasync/execution_data/serializer.go b/module/executiondatasync/execution_data/serializer.go index e47b6d9ed9b..942bb02a4e8 100644 --- a/module/executiondatasync/execution_data/serializer.go +++ b/module/executiondatasync/execution_data/serializer.go @@ -10,10 +10,20 @@ import ( "github.com/onflow/flow-go/model/encoding" "github.com/onflow/flow-go/model/encoding/cbor" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module/executiondatasync/execution_data/internal" "github.com/onflow/flow-go/network" "github.com/onflow/flow-go/network/compressor" ) +// DefaultSerializer is the default implementation for an Execution Data serializer. +// It is configured to use cbor encoding with LZ4 compression. +// +// CAUTION: this encoding should only be used for encoding/decoding data within a node. +// If used for decoding data that is shared between nodes, it makes the recipient VULNERABLE +// to RESOURCE EXHAUSTION ATTACKS, where a byzantine sender could include garbage data in the +// encoding, which would not be noticed by the recipient because the garbage data is dropped +// at the decoding step - yet, it consumes the recipient's networking bandwidth. var DefaultSerializer Serializer func init() { @@ -33,23 +43,28 @@ func init() { DefaultSerializer = NewSerializer(codec, compressor.NewLz4Compressor()) } -// header codes to distinguish between different types of data -// these codes provide simple versioning of execution state data blobs and indicate how the data -// should be deserialized into their original form. Therefore, each input format must have a unique -// code, and the codes must never be reused. This allows for libraries that can accurately decode -// the data without juggling software versions. +// header codes are used to distinguish between the different data types serialized within a blob. +// they provide simple versioning of execution state data blobs and indicate how the data should +// be deserialized back into their original form. Therefore, each input format must have a unique +// code, and the codes must never be reused. This allows libraries to accurately decode the data +// without juggling software versions. const ( codeRecursiveCIDs = iota + 1 codeExecutionDataRoot - codeChunkExecutionData + codeChunkExecutionDataV1 + codeChunkExecutionDataV2 // includes transaction results ) +// getCode returns the header code for the given value's type. +// It returns an error if the type is not supported. func getCode(v interface{}) (byte, error) { switch v.(type) { - case *BlockExecutionDataRoot: + case *flow.BlockExecutionDataRoot: return codeExecutionDataRoot, nil + case *internal.ChunkExecutionDataV1: // only used for backwards compatibility testing + return codeChunkExecutionDataV1, nil case *ChunkExecutionData: - return codeChunkExecutionData, nil + return codeChunkExecutionDataV2, nil case []cid.Cid: return codeRecursiveCIDs, nil default: @@ -57,12 +72,14 @@ func getCode(v interface{}) (byte, error) { } } +// getPrototype returns a new instance of the type that corresponds to the given header code. +// It returns an error if the code is not supported. func getPrototype(code byte) (interface{}, error) { switch code { case codeExecutionDataRoot: - return &BlockExecutionDataRoot{}, nil - case codeChunkExecutionData: - return &ChunkExecutionData{}, nil + return &flow.BlockExecutionDataRoot{}, nil + case codeChunkExecutionDataV2, codeChunkExecutionDataV1: + return &ChunkExecutionData{}, nil // only return the latest version case codeRecursiveCIDs: return &[]cid.Cid{}, nil default: @@ -73,7 +90,12 @@ func getPrototype(code byte) (interface{}, error) { // Serializer is used to serialize / deserialize Execution Data and CID lists for the // Execution Data Service. type Serializer interface { + // Serialize encodes and compresses the given value to the given writer. + // No errors are expected during normal operation. Serialize(io.Writer, interface{}) error + + // Deserialize decompresses and decodes the data from the given reader. + // No errors are expected during normal operation. Deserialize(io.Reader) (interface{}, error) } @@ -87,6 +109,7 @@ type serializer struct { compressor network.Compressor } +// NewSerializer returns a new Execution Data serializer using the provided encoder and compressor. func NewSerializer(codec encoding.Codec, compressor network.Compressor) *serializer { return &serializer{ codec: codec, @@ -116,7 +139,8 @@ func (s *serializer) writePrototype(w io.Writer, v interface{}) error { return nil } -// Serialize encodes and compresses the given value to the given writer +// Serialize encodes and compresses the given value to the given writer. +// No errors are expected during normal operation. func (s *serializer) Serialize(w io.Writer, v interface{}) error { if err := s.writePrototype(w, v); err != nil { return fmt.Errorf("failed to write prototype: %w", err) @@ -162,7 +186,8 @@ func (s *serializer) readPrototype(r io.Reader) (interface{}, error) { return getPrototype(code) } -// Deserialize decompresses and decodes the data from the given reader +// Deserialize decompresses and decodes the data from the given reader. +// No errors are expected during normal operation. func (s *serializer) Deserialize(r io.Reader) (interface{}, error) { v, err := s.readPrototype(r) diff --git a/module/executiondatasync/execution_data/serializer_test.go b/module/executiondatasync/execution_data/serializer_test.go new file mode 100644 index 00000000000..8861c26f855 --- /dev/null +++ b/module/executiondatasync/execution_data/serializer_test.go @@ -0,0 +1,62 @@ +package execution_data_test + +import ( + "bytes" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/module/executiondatasync/execution_data" + "github.com/onflow/flow-go/module/executiondatasync/execution_data/internal" + "github.com/onflow/flow-go/utils/unittest" +) + +func TestSerializeDeserializeChunkExecutionData(t *testing.T) { + serializer := execution_data.DefaultSerializer + + // Test serializing the current ChunkExecutionData version, then deserializing it back to the + // same type. Make sure the start and end data structures are the same. + t.Run("serialize and deserialize ChunkExecutionData", func(t *testing.T) { + ced := unittest.ChunkExecutionDataFixture(t, 1024, unittest.WithChunkEvents(unittest.EventsFixture(5))) + + buf := new(bytes.Buffer) + err := serializer.Serialize(buf, ced) + require.NoError(t, err) + + raw, err := serializer.Deserialize(buf) + require.NoError(t, err) + + actual, ok := raw.(*execution_data.ChunkExecutionData) + assert.True(t, ok) + assert.Equal(t, ced, actual) + }) + + // Test serializing the past ChunkExecutionDataV1 version, then deserializing it to the current + // ChunkExecutionData version. Make sure the fields in the start and end data structures are + // the same. + // + // This test ensures that the current code is backwards compatible with the previous version of + // the data structure. It does NOT ensure that the current data structure is backwards compatible + // with the previous code. + t.Run("serialize ChunkExecutionDataV1 and deserialize to ChunkExecutionData", func(t *testing.T) { + cedV2 := unittest.ChunkExecutionDataFixture(t, 1024, unittest.WithChunkEvents(unittest.EventsFixture(5))) + cedV2.TransactionResults = nil + cedV1 := &internal.ChunkExecutionDataV1{ + Collection: cedV2.Collection, + Events: cedV2.Events, + TrieUpdate: cedV2.TrieUpdate, + } + + buf := new(bytes.Buffer) + err := serializer.Serialize(buf, cedV1) + require.NoError(t, err) + + raw, err := serializer.Deserialize(buf) + require.NoError(t, err) + + actual, ok := raw.(*execution_data.ChunkExecutionData) + assert.True(t, ok) + assert.Equal(t, cedV2, actual) + }) +} diff --git a/module/executiondatasync/execution_data/store.go b/module/executiondatasync/execution_data/store.go index a082a97fe8c..8d31a8a0c4f 100644 --- a/module/executiondatasync/execution_data/store.go +++ b/module/executiondatasync/execution_data/store.go @@ -12,17 +12,24 @@ import ( "github.com/onflow/flow-go/module/blobs" ) -// ExecutionDataStore handles adding / getting execution data to / from a local blobstore -type ExecutionDataStore interface { - // GetExecutionData gets the BlockExecutionData for the given root ID from the blobstore. - // The returned error will be: - // - MalformedDataError if some level of the blob tree cannot be properly deserialized +// ExecutionDataGetter handles getting execution data from a blobstore +type ExecutionDataGetter interface { + // Get gets the BlockExecutionData for the given root ID from the blobstore. + // Expected errors during normal operations: // - BlobNotFoundError if some CID in the blob tree could not be found from the blobstore - GetExecutionData(ctx context.Context, rootID flow.Identifier) (*BlockExecutionData, error) + // - MalformedDataError if some level of the blob tree cannot be properly deserialized + // - BlobSizeLimitExceededError if some blob in the blob tree exceeds the maximum allowed size + Get(ctx context.Context, rootID flow.Identifier) (*BlockExecutionData, error) +} - // AddExecutionData constructs a blob tree for the given BlockExecutionData and adds it to the - // blobstore, and then returns the root CID. - AddExecutionData(ctx context.Context, executionData *BlockExecutionData) (flow.Identifier, error) +// ExecutionDataStore handles adding / getting execution data to / from a blobstore +type ExecutionDataStore interface { + ExecutionDataGetter + + // Add constructs a blob tree for the given BlockExecutionData, adds it to the blobstore, + // then returns the root CID. + // No errors are expected during normal operation. + Add(ctx context.Context, executionData *BlockExecutionData) (flow.Identifier, error) } type ExecutionDataStoreOption func(*store) @@ -34,6 +41,8 @@ func WithMaxBlobSize(size int) ExecutionDataStoreOption { } } +var _ ExecutionDataStore = (*store)(nil) + type store struct { blobstore blobs.Blobstore serializer Serializer @@ -55,8 +64,11 @@ func NewExecutionDataStore(blobstore blobs.Blobstore, serializer Serializer, opt return s } -func (s *store) AddExecutionData(ctx context.Context, executionData *BlockExecutionData) (flow.Identifier, error) { - executionDataRoot := &BlockExecutionDataRoot{ +// Add constructs a blob tree for the given BlockExecutionData, adds it to the blobstore, +// then returns the rootID. +// No errors are expected during normal operation. +func (s *store) Add(ctx context.Context, executionData *BlockExecutionData) (flow.Identifier, error) { + executionDataRoot := &flow.BlockExecutionDataRoot{ BlockID: executionData.BlockID, ChunkExecutionDataIDs: make([]cid.Cid, len(executionData.ChunkExecutionDatas)), } @@ -75,6 +87,13 @@ func (s *store) AddExecutionData(ctx context.Context, executionData *BlockExecut return flow.ZeroID, fmt.Errorf("could not serialize execution data root: %w", err) } + // this should never happen unless either: + // - maxBlobSize is set too low + // - an enormous number of chunks are included in the block + // e.g. given a 1MB max size, 32 byte CID and 32 byte blockID: + // 1MB/32byes - 1 = 32767 chunk CIDs + // if the number of chunks in a block ever exceeds this, we will need to update the root blob + // generation to support splitting it up into a tree similar to addChunkExecutionData if buf.Len() > s.maxBlobSize { return flow.ZeroID, errors.New("root blob exceeds blob size limit") } @@ -92,24 +111,38 @@ func (s *store) AddExecutionData(ctx context.Context, executionData *BlockExecut return rootID, nil } +// addChunkExecutionData constructs a blob tree for the given ChunkExecutionData, adds it to the +// blobstore, and returns the root CID. +// No errors are expected during normal operation. func (s *store) addChunkExecutionData(ctx context.Context, chunkExecutionData *ChunkExecutionData) (cid.Cid, error) { var v interface{} = chunkExecutionData + // given an arbitrarily large v, split it into blobs of size up to maxBlobSize, adding them to + // the blobstore. Then, combine the list of CIDs added into a second level of blobs, and repeat. + // This produces a tree of blobs, where the leaves are the actual data, and each internal node + // contains a list of CIDs for its children. for i := 0; ; i++ { + // chunk and store the data, then get the list of CIDs added cids, err := s.addBlobs(ctx, v) if err != nil { return cid.Undef, fmt.Errorf("failed to add blob tree level at height %d: %w", i, err) } + // once a single CID is left, we have reached the root of the tree if len(cids) == 1 { return cids[0], nil } + // the next level is the list of CIDs added in this level v = cids } } +// addBlobs splits the given value into blobs of size up to maxBlobSize, adds them to the blobstore, +// then returns the CIDs for each blob added. +// No errors are expected during normal operation. func (s *store) addBlobs(ctx context.Context, v interface{}) ([]cid.Cid, error) { + // first, serialize the data into a large byte slice buf := new(bytes.Buffer) if err := s.serializer.Serialize(buf, v); err != nil { return nil, fmt.Errorf("could not serialize execution data root: %w", err) @@ -119,6 +152,7 @@ func (s *store) addBlobs(ctx context.Context, v interface{}) ([]cid.Cid, error) var cids []cid.Cid var blbs []blobs.Blob + // next, chunk the data into blobs of size up to maxBlobSize for len(data) > 0 { blobLen := s.maxBlobSize if len(data) < blobLen { @@ -131,6 +165,7 @@ func (s *store) addBlobs(ctx context.Context, v interface{}) ([]cid.Cid, error) cids = append(cids, blob.Cid()) } + // finally, add the blobs to the blobstore and return the list of CIDs if err := s.blobstore.PutMany(ctx, blbs); err != nil { return nil, fmt.Errorf("could not add blobs: %w", err) } @@ -138,9 +173,14 @@ func (s *store) addBlobs(ctx context.Context, v interface{}) ([]cid.Cid, error) return cids, nil } -func (s *store) GetExecutionData(ctx context.Context, rootID flow.Identifier) (*BlockExecutionData, error) { +// Get gets the BlockExecutionData for the given root ID from the blobstore. +// Expected errors during normal operations: +// - BlobNotFoundError if some CID in the blob tree could not be found from the blobstore +// - MalformedDataError if some level of the blob tree cannot be properly deserialized +func (s *store) Get(ctx context.Context, rootID flow.Identifier) (*BlockExecutionData, error) { rootCid := flow.IdToCid(rootID) + // first, get the root blob. it will contain a list of blobs, one for each chunk rootBlob, err := s.blobstore.Get(ctx, rootCid) if err != nil { if errors.Is(err, blobs.ErrNotFound) { @@ -155,11 +195,12 @@ func (s *store) GetExecutionData(ctx context.Context, rootID flow.Identifier) (* return nil, NewMalformedDataError(err) } - executionDataRoot, ok := rootData.(*BlockExecutionDataRoot) + executionDataRoot, ok := rootData.(*flow.BlockExecutionDataRoot) if !ok { return nil, NewMalformedDataError(fmt.Errorf("root blob does not deserialize to a BlockExecutionDataRoot, got %T instead", rootData)) } + // next, get each chunk blob and deserialize it blockExecutionData := &BlockExecutionData{ BlockID: executionDataRoot.BlockID, ChunkExecutionDatas: make([]*ChunkExecutionData, len(executionDataRoot.ChunkExecutionDataIDs)), @@ -177,9 +218,14 @@ func (s *store) GetExecutionData(ctx context.Context, rootID flow.Identifier) (* return blockExecutionData, nil } +// getChunkExecutionData gets the ChunkExecutionData for the given CID from the blobstore. +// Expected errors during normal operations: +// - BlobNotFoundError if some CID in the blob tree could not be found from the blobstore +// - MalformedDataError if some level of the blob tree cannot be properly deserialized func (s *store) getChunkExecutionData(ctx context.Context, chunkExecutionDataID cid.Cid) (*ChunkExecutionData, error) { cids := []cid.Cid{chunkExecutionDataID} + // given a root CID, get the blob tree level by level, until we reach the full ChunkExecutionData for i := 0; ; i++ { v, err := s.getBlobs(ctx, cids) if err != nil { @@ -197,9 +243,14 @@ func (s *store) getChunkExecutionData(ctx context.Context, chunkExecutionDataID } } +// getBlobs gets the blobs for the given CIDs from the blobstore, deserializes them, and returns +// the deserialized value. +// - BlobNotFoundError if any of the CIDs could not be found from the blobstore +// - MalformedDataError if any of the blobs cannot be properly deserialized func (s *store) getBlobs(ctx context.Context, cids []cid.Cid) (interface{}, error) { buf := new(bytes.Buffer) + // get each blob and append the raw data to the buffer for _, cid := range cids { blob, err := s.blobstore.Get(ctx, cid) if err != nil { @@ -216,6 +267,7 @@ func (s *store) getBlobs(ctx context.Context, cids []cid.Cid) (interface{}, erro } } + // deserialize the buffer into a value, and return it v, err := s.serializer.Deserialize(buf) if err != nil { return nil, NewMalformedDataError(err) diff --git a/module/executiondatasync/execution_data/store_test.go b/module/executiondatasync/execution_data/store_test.go index 39d00d93044..ff475855447 100644 --- a/module/executiondatasync/execution_data/store_test.go +++ b/module/executiondatasync/execution_data/store_test.go @@ -3,9 +3,10 @@ package execution_data_test import ( "bytes" "context" + "crypto/rand" "fmt" "io" - "math/rand" + mrand "math/rand" "testing" "github.com/ipfs/go-cid" @@ -15,8 +16,6 @@ import ( "github.com/stretchr/testify/require" goassert "gotest.tools/assert" - "github.com/onflow/flow-go/ledger" - "github.com/onflow/flow-go/ledger/common/testutils" "github.com/onflow/flow-go/module/blobs" "github.com/onflow/flow-go/module/executiondatasync/execution_data" "github.com/onflow/flow-go/utils/unittest" @@ -30,44 +29,13 @@ func getExecutionDataStore(blobstore blobs.Blobstore, serializer execution_data. return execution_data.NewExecutionDataStore(blobstore, serializer) } -func generateChunkExecutionData(t *testing.T, minSerializedSize uint64) *execution_data.ChunkExecutionData { - ced := &execution_data.ChunkExecutionData{ - TrieUpdate: testutils.TrieUpdateFixture(1, 1, 8), - } - - size := 1 - - for { - buf := &bytes.Buffer{} - require.NoError(t, execution_data.DefaultSerializer.Serialize(buf, ced)) - - if buf.Len() >= int(minSerializedSize) { - t.Logf("Chunk execution data size: %d", buf.Len()) - return ced - } - - v := make([]byte, size) - _, _ = rand.Read(v) - - k, err := ced.TrieUpdate.Payloads[0].Key() - require.NoError(t, err) - - ced.TrieUpdate.Payloads[0] = ledger.NewPayload(k, v) - size *= 2 - } -} - func generateBlockExecutionData(t *testing.T, numChunks int, minSerializedSizePerChunk uint64) *execution_data.BlockExecutionData { - bed := &execution_data.BlockExecutionData{ - BlockID: unittest.IdentifierFixture(), - ChunkExecutionDatas: make([]*execution_data.ChunkExecutionData, numChunks), - } - + ceds := make([]*execution_data.ChunkExecutionData, numChunks) for i := 0; i < numChunks; i++ { - bed.ChunkExecutionDatas[i] = generateChunkExecutionData(t, minSerializedSizePerChunk) + ceds[i] = unittest.ChunkExecutionDataFixture(t, int(minSerializedSizePerChunk)) } - return bed + return unittest.BlockExecutionDataFixture(unittest.WithChunkExecutionDatas(ceds...)) } func getAllKeys(t *testing.T, bs blobs.Blobstore) []cid.Cid { @@ -103,9 +71,9 @@ func TestHappyPath(t *testing.T) { test := func(numChunks int, minSerializedSizePerChunk uint64) { expected := generateBlockExecutionData(t, numChunks, minSerializedSizePerChunk) - rootId, err := eds.AddExecutionData(context.Background(), expected) + rootId, err := eds.Add(context.Background(), expected) require.NoError(t, err) - actual, err := eds.GetExecutionData(context.Background(), rootId) + actual, err := eds.Get(context.Background(), rootId) require.NoError(t, err) deepEqual(t, expected, actual) } @@ -134,7 +102,7 @@ type corruptedTailSerializer struct { func newCorruptedTailSerializer(numChunks int) *corruptedTailSerializer { return &corruptedTailSerializer{ - corruptedChunk: rand.Intn(numChunks) + 1, + corruptedChunk: mrand.Intn(numChunks) + 1, } } @@ -171,9 +139,9 @@ func TestMalformedData(t *testing.T) { blobstore := getBlobstore() defaultEds := getExecutionDataStore(blobstore, execution_data.DefaultSerializer) malformedEds := getExecutionDataStore(blobstore, serializer) - rootID, err := malformedEds.AddExecutionData(context.Background(), bed) + rootID, err := malformedEds.Add(context.Background(), bed) require.NoError(t, err) - _, err = defaultEds.GetExecutionData(context.Background(), rootID) + _, err = defaultEds.Get(context.Background(), rootID) assert.True(t, execution_data.IsMalformedDataError(err)) } @@ -191,16 +159,16 @@ func TestGetIncompleteData(t *testing.T) { eds := getExecutionDataStore(blobstore, execution_data.DefaultSerializer) bed := generateBlockExecutionData(t, 5, 10*execution_data.DefaultMaxBlobSize) - rootID, err := eds.AddExecutionData(context.Background(), bed) + rootID, err := eds.Add(context.Background(), bed) require.NoError(t, err) cids := getAllKeys(t, blobstore) t.Logf("%d blobs in blob tree", len(cids)) - cidToDelete := cids[rand.Intn(len(cids))] + cidToDelete := cids[mrand.Intn(len(cids))] require.NoError(t, blobstore.DeleteBlob(context.Background(), cidToDelete)) - _, err = eds.GetExecutionData(context.Background(), rootID) + _, err = eds.Get(context.Background(), rootID) var blobNotFoundError *execution_data.BlobNotFoundError assert.ErrorAs(t, err, &blobNotFoundError) } diff --git a/module/executiondatasync/execution_data/util.go b/module/executiondatasync/execution_data/util.go index f2585f4c61f..50582d19840 100644 --- a/module/executiondatasync/execution_data/util.go +++ b/module/executiondatasync/execution_data/util.go @@ -7,10 +7,12 @@ import ( "github.com/onflow/flow-go/module/blobs" ) +// CalculateID calculates the root ID of the given execution data without storing any data. +// No errors are expected during normal operation. func CalculateID(ctx context.Context, execData *BlockExecutionData, serializer Serializer) (flow.Identifier, error) { - executionDatastore := NewExecutionDataStore(&blobs.NoopBlobstore{}, serializer) + executionDatastore := NewExecutionDataStore(blobs.NewNoopBlobstore(), serializer) - id, err := executionDatastore.AddExecutionData(ctx, execData) + id, err := executionDatastore.Add(ctx, execData) if err != nil { return flow.ZeroID, err } diff --git a/module/executiondatasync/optimistic_sync/core.go b/module/executiondatasync/optimistic_sync/core.go new file mode 100644 index 00000000000..c222b28a8ac --- /dev/null +++ b/module/executiondatasync/optimistic_sync/core.go @@ -0,0 +1,370 @@ +package optimistic_sync + +import ( + "context" + "errors" + "fmt" + "sync" + "time" + + "github.com/rs/zerolog" + "golang.org/x/sync/errgroup" + + "github.com/onflow/flow-go/engine/access/ingestion/tx_error_messages" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module/executiondatasync/execution_data" + "github.com/onflow/flow-go/module/executiondatasync/optimistic_sync/persisters" + "github.com/onflow/flow-go/module/executiondatasync/optimistic_sync/persisters/stores" + "github.com/onflow/flow-go/module/state_synchronization/indexer" + "github.com/onflow/flow-go/module/state_synchronization/requester" + "github.com/onflow/flow-go/storage" + "github.com/onflow/flow-go/storage/inmemory" +) + +// DefaultTxResultErrMsgsRequestTimeout is the default timeout for requesting transaction result error messages. +const DefaultTxResultErrMsgsRequestTimeout = 5 * time.Second + +// errResultAbandoned is returned when calling one of the methods after the result has been abandoned. +// Not exported because this is not an expected error condition. +var errResultAbandoned = fmt.Errorf("result abandoned") + +// <component_spec> +// Core defines the interface for pipelined execution result processing. There are 3 main steps which +// must be completed sequentially and exactly once. +// 1. Download the BlockExecutionData and TransactionResultErrorMessages for the execution result. +// 2. Index the downloaded data into mempools. +// 3. Persist the indexed data to into persisted storage. +// +// If the protocol abandons the execution result, Abandon() is called to signal to the Core instance +// that processing will stop and any data accumulated may be discarded. Abandon() may be called at +// any time, but may block until in-progress operations are complete. +// </component_spec> +// +// All exported methods are safe for concurrent use. +type Core interface { + // Download retrieves all necessary data for processing from the network. + // Download will block until the data is successfully downloaded, and has not internal timeout. + // When Aboandon is called, the caller must cancel the context passed in to shutdown the operation + // otherwise it may block indefinitely. + // + // Expected error returns during normal operation: + // - [context.Canceled]: if the provided context was canceled before completion + Download(ctx context.Context) error + + // Index processes the downloaded data and stores it into in-memory indexes. + // Must be called after Download. + // + // No error returns are expected during normal operations + Index() error + + // Persist stores the indexed data in permanent storage. + // Must be called after Index. + // + // No error returns are expected during normal operations + Persist() error + + // Abandon indicates that the protocol has abandoned this state. Hence processing will be aborted + // and any data dropped. + // This method will block until other in-progress operations are complete. If Download is in progress, + // the caller should cancel its context to ensure the operation completes in a timely manner. + Abandon() +} + +// workingData encapsulates all components and temporary storage +// involved in processing a single block's execution data. When processing +// is complete or abandoned, the entire workingData can be discarded. +type workingData struct { + protocolDB storage.DB + lockManager storage.LockManager + + persistentRegisters storage.RegisterIndex + persistentEvents storage.Events + persistentCollections storage.Collections + persistentResults storage.LightTransactionResults + persistentTxResultErrMsgs storage.TransactionResultErrorMessages + latestPersistedSealedResult storage.LatestPersistedSealedResult + + inmemRegisters *inmemory.RegistersReader + inmemEvents *inmemory.EventsReader + inmemCollections *inmemory.CollectionsReader + inmemTransactions *inmemory.TransactionsReader + inmemResults *inmemory.LightTransactionResultsReader + inmemTxResultErrMsgs *inmemory.TransactionResultErrorMessagesReader + + // Active processing components + execDataRequester requester.ExecutionDataRequester + txResultErrMsgsRequester tx_error_messages.Requester + txResultErrMsgsRequestTimeout time.Duration + + // Working data + executionData *execution_data.BlockExecutionData + txResultErrMsgsData []flow.TransactionResultErrorMessage + indexerData *indexer.IndexerData + persisted bool +} + +var _ Core = (*CoreImpl)(nil) + +// CoreImpl implements the Core interface for processing execution data. +// It coordinates the download, indexing, and persisting of execution data. +// +// Safe for concurrent use. +type CoreImpl struct { + log zerolog.Logger + mu sync.Mutex + + workingData *workingData + + executionResult *flow.ExecutionResult + block *flow.Block +} + +// NewCoreImpl creates a new CoreImpl with all necessary dependencies +// Safe for concurrent use. +// +// No error returns are expected during normal operations +func NewCoreImpl( + logger zerolog.Logger, + executionResult *flow.ExecutionResult, + block *flow.Block, + execDataRequester requester.ExecutionDataRequester, + txResultErrMsgsRequester tx_error_messages.Requester, + txResultErrMsgsRequestTimeout time.Duration, + persistentRegisters storage.RegisterIndex, + persistentEvents storage.Events, + persistentCollections storage.Collections, + persistentResults storage.LightTransactionResults, + persistentTxResultErrMsg storage.TransactionResultErrorMessages, + latestPersistedSealedResult storage.LatestPersistedSealedResult, + protocolDB storage.DB, + lockManager storage.LockManager, +) (*CoreImpl, error) { + if block.ID() != executionResult.BlockID { + return nil, fmt.Errorf("header ID and execution result block ID must match") + } + + coreLogger := logger.With(). + Str("component", "execution_data_core"). + Str("execution_result_id", executionResult.ID().String()). + Str("block_id", executionResult.BlockID.String()). + Uint64("height", block.Height). + Logger() + + return &CoreImpl{ + log: coreLogger, + block: block, + executionResult: executionResult, + workingData: &workingData{ + protocolDB: protocolDB, + lockManager: lockManager, + + execDataRequester: execDataRequester, + txResultErrMsgsRequester: txResultErrMsgsRequester, + txResultErrMsgsRequestTimeout: txResultErrMsgsRequestTimeout, + + persistentRegisters: persistentRegisters, + persistentEvents: persistentEvents, + persistentCollections: persistentCollections, + persistentResults: persistentResults, + persistentTxResultErrMsgs: persistentTxResultErrMsg, + latestPersistedSealedResult: latestPersistedSealedResult, + }, + }, nil +} + +// Download retrieves all necessary data for processing from the network. +// Download will block until the data is successfully downloaded, and has not internal timeout. +// When Aboandon is called, the caller must cancel the context passed in to shutdown the operation +// otherwise it may block indefinitely. +// +// The method may only be called once. Calling it multiple times will return an error. +// Calling Download after Abandon is called will return an error. +// +// Expected error returns during normal operation: +// - [context.Canceled]: if the provided context was canceled before completion +func (c *CoreImpl) Download(ctx context.Context) error { + c.mu.Lock() + defer c.mu.Unlock() + if c.workingData == nil { + return errResultAbandoned + } + if c.workingData.executionData != nil { + return fmt.Errorf("already downloaded") + } + + c.log.Debug().Msg("downloading execution data") + + g, gCtx := errgroup.WithContext(ctx) + + var executionData *execution_data.BlockExecutionData + g.Go(func() error { + var err error + executionData, err = c.workingData.execDataRequester.RequestExecutionData(gCtx) + if err != nil { + return fmt.Errorf("failed to request execution data: %w", err) + } + + return nil + }) + + var txResultErrMsgsData []flow.TransactionResultErrorMessage + g.Go(func() error { + timeoutCtx, cancel := context.WithTimeout(gCtx, c.workingData.txResultErrMsgsRequestTimeout) + defer cancel() + + var err error + txResultErrMsgsData, err = c.workingData.txResultErrMsgsRequester.Request(timeoutCtx) + if err != nil { + // transaction error messages are downloaded from execution nodes over grpc and have no + // protocol guarantees for delivery or correctness. Therefore, we attempt to download them + // on a best-effort basis, and give up after a reasonable timeout to avoid blocking the + // main indexing process. Missing error messages are handled gracefully by the rest of + // the system, and can be retried or backfilled as needed later. + if errors.Is(err, context.DeadlineExceeded) { + c.log.Debug(). + Dur("timeout", c.workingData.txResultErrMsgsRequestTimeout). + Msg("transaction result error messages request timed out") + return nil + } + + return fmt.Errorf("failed to request transaction result error messages data: %w", err) + } + return nil + }) + + if err := g.Wait(); err != nil { + return err + } + + c.workingData.executionData = executionData + c.workingData.txResultErrMsgsData = txResultErrMsgsData + + c.log.Debug().Msg("successfully downloaded execution data") + + return nil +} + +// Index processes the downloaded data and stores it into in-memory indexes. +// Must be called after Download. +// +// The method may only be called once. Calling it multiple times will return an error. +// Calling Index after Abandon is called will return an error. +// +// No error returns are expected during normal operations +func (c *CoreImpl) Index() error { + c.mu.Lock() + defer c.mu.Unlock() + if c.workingData == nil { + return errResultAbandoned + } + if c.workingData.executionData == nil { + return fmt.Errorf("downloading is not complete") + } + if c.workingData.indexerData != nil { + return fmt.Errorf("already indexed") + } + + c.log.Debug().Msg("indexing execution data") + + indexerComponent, err := indexer.NewInMemoryIndexer(c.log, c.block, c.executionResult) + if err != nil { + return fmt.Errorf("failed to create indexer: %w", err) + } + + indexerData, err := indexerComponent.IndexBlockData(c.workingData.executionData) + if err != nil { + return fmt.Errorf("failed to index execution data: %w", err) + } + + if c.workingData.txResultErrMsgsData != nil { + err = indexer.ValidateTxErrors(indexerData.Results, c.workingData.txResultErrMsgsData) + if err != nil { + return fmt.Errorf("failed to validate transaction result error messages: %w", err) + } + } + + blockID := c.executionResult.BlockID + + c.workingData.indexerData = indexerData + c.workingData.inmemCollections = inmemory.NewCollections(indexerData.Collections) + c.workingData.inmemTransactions = inmemory.NewTransactions(indexerData.Transactions) + c.workingData.inmemTxResultErrMsgs = inmemory.NewTransactionResultErrorMessages(blockID, c.workingData.txResultErrMsgsData) + c.workingData.inmemEvents = inmemory.NewEvents(blockID, indexerData.Events) + c.workingData.inmemResults = inmemory.NewLightTransactionResults(blockID, indexerData.Results) + c.workingData.inmemRegisters = inmemory.NewRegisters(c.block.Height, indexerData.Registers) + + c.log.Debug().Msg("successfully indexed execution data") + + return nil +} + +// Persist stores the indexed data in permanent storage. +// Must be called after Index. +// +// The method may only be called once. Calling it multiple times will return an error. +// Calling Persist after Abandon is called will return an error. +// +// No error returns are expected during normal operations +func (c *CoreImpl) Persist() error { + c.mu.Lock() + defer c.mu.Unlock() + if c.workingData == nil { + return errResultAbandoned + } + if c.workingData.persisted { + return fmt.Errorf("already persisted") + } + if c.workingData.indexerData == nil { + return fmt.Errorf("indexing is not complete") + } + + c.log.Debug().Msg("persisting execution data") + + indexerData := c.workingData.indexerData + + // the BlockPersister updates the latest persisted sealed result within the batch operation, so + // all other updates must be done before the batch is committed to ensure the state remains + // consistent. The registers db allows repeated indexing of the most recent block's registers, + // so it is safe to persist them before the block persister. + registerPersister := persisters.NewRegistersPersister(indexerData.Registers, c.workingData.persistentRegisters, c.block.Height) + if err := registerPersister.Persist(); err != nil { + return fmt.Errorf("failed to persist registers: %w", err) + } + + persisterStores := []stores.PersisterStore{ + stores.NewEventsStore(indexerData.Events, c.workingData.persistentEvents, c.executionResult.BlockID), + stores.NewResultsStore(indexerData.Results, c.workingData.persistentResults, c.executionResult.BlockID), + stores.NewCollectionsStore(indexerData.Collections, c.workingData.persistentCollections), + stores.NewTxResultErrMsgStore(c.workingData.txResultErrMsgsData, c.workingData.persistentTxResultErrMsgs, c.executionResult.BlockID), + stores.NewLatestSealedResultStore(c.workingData.latestPersistedSealedResult, c.executionResult.ID(), c.block.Height), + } + blockPersister := persisters.NewBlockPersister( + c.log, + c.workingData.protocolDB, + c.workingData.lockManager, + c.executionResult, + persisterStores, + ) + if err := blockPersister.Persist(); err != nil { + return fmt.Errorf("failed to persist block data: %w", err) + } + + // reset the indexer data to prevent multiple calls to Persist + c.workingData.indexerData = nil + c.workingData.persisted = true + + return nil +} + +// Abandon indicates that the protocol has abandoned this state. Hence processing will be aborted +// and any data dropped. +// This method will block until other in-progress operations are complete. If Download is in progress, +// the caller should cancel its context to ensure the operation completes in a timely manner. +// +// The method is idempotent. Calling it multiple times has no effect. +func (c *CoreImpl) Abandon() { + c.mu.Lock() + defer c.mu.Unlock() + + c.workingData = nil +} diff --git a/module/executiondatasync/optimistic_sync/core_impl_test.go b/module/executiondatasync/optimistic_sync/core_impl_test.go new file mode 100644 index 00000000000..19b87866603 --- /dev/null +++ b/module/executiondatasync/optimistic_sync/core_impl_test.go @@ -0,0 +1,525 @@ +package optimistic_sync + +import ( + "context" + "fmt" + "testing" + "time" + + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/suite" + + txerrmsgsmock "github.com/onflow/flow-go/engine/access/ingestion/tx_error_messages/mock" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module/executiondatasync/execution_data" + reqestermock "github.com/onflow/flow-go/module/state_synchronization/requester/mock" + "github.com/onflow/flow-go/storage" + storagemock "github.com/onflow/flow-go/storage/mock" + "github.com/onflow/flow-go/utils/unittest" + "github.com/onflow/flow-go/utils/unittest/fixtures" +) + +// CoreImplSuite is a test suite for testing the CoreImpl. +type CoreImplSuite struct { + suite.Suite + execDataRequester *reqestermock.ExecutionDataRequester + txResultErrMsgsRequester *txerrmsgsmock.Requester + txResultErrMsgsRequestTimeout time.Duration + db *storagemock.DB + persistentRegisters *storagemock.RegisterIndex + persistentEvents *storagemock.Events + persistentCollections *storagemock.Collections + persistentTransactions *storagemock.Transactions + persistentResults *storagemock.LightTransactionResults + persistentTxResultErrMsg *storagemock.TransactionResultErrorMessages + latestPersistedSealedResult *storagemock.LatestPersistedSealedResult +} + +func TestCoreImplSuiteSuite(t *testing.T) { + t.Parallel() + suite.Run(t, new(CoreImplSuite)) +} + +func (c *CoreImplSuite) SetupTest() { + t := c.T() + + c.execDataRequester = reqestermock.NewExecutionDataRequester(t) + c.txResultErrMsgsRequester = txerrmsgsmock.NewRequester(t) + c.txResultErrMsgsRequestTimeout = 100 * time.Millisecond +} + +// createTestCoreImpl creates a CoreImpl instance with mocked dependencies for testing. +// +// Returns a configured CoreImpl ready for testing. +func (c *CoreImplSuite) createTestCoreImpl(tf *testFixture) *CoreImpl { + core, err := NewCoreImpl( + unittest.Logger(), + tf.exeResult, + tf.block, + c.execDataRequester, + c.txResultErrMsgsRequester, + c.txResultErrMsgsRequestTimeout, + c.persistentRegisters, + c.persistentEvents, + c.persistentCollections, + c.persistentResults, + c.persistentTxResultErrMsg, + c.latestPersistedSealedResult, + c.db, + storage.NewTestingLockManager(), + ) + c.NoError(err) + return core +} + +type testFixture struct { + block *flow.Block + exeResult *flow.ExecutionResult + execData *execution_data.BlockExecutionData + txErrMsgs []flow.TransactionResultErrorMessage +} + +// generateFixture generates a test fixture for the indexer. The returned data has the following +// properties: +// - The block execution data contains collections for each of the block's guarantees, plus the system chunk +// - Each collection has 3 transactions +// - The first path in each trie update is the same, testing that the indexer will use the last value +// - Every 3rd transaction is failed +// - There are tx error messages for all failed transactions +func generateFixture(g *fixtures.GeneratorSuite) *testFixture { + collections := g.Collections().List(4, fixtures.Collection.WithTxCount(3)) + chunkExecutionDatas := make([]*execution_data.ChunkExecutionData, len(collections)) + guarantees := make([]*flow.CollectionGuarantee, len(collections)-1) + var txErrMsgs []flow.TransactionResultErrorMessage + path := g.LedgerPaths().Fixture() + for i, collection := range collections { + chunkData := g.ChunkExecutionDatas().Fixture( + fixtures.ChunkExecutionData.WithCollection(collection), + ) + // use the same path fo the first ledger payload in each chunk. the indexer should chose the + // last value in the register entry. + chunkData.TrieUpdate.Paths[0] = path + chunkExecutionDatas[i] = chunkData + + if i < len(collections)-1 { + guarantees[i] = g.Guarantees().Fixture(fixtures.Guarantee.WithCollectionID(collection.ID())) + } + for txIndex := range chunkExecutionDatas[i].TransactionResults { + if txIndex%3 == 0 { + chunkExecutionDatas[i].TransactionResults[txIndex].Failed = true + } + } + txErrMsgs = append(txErrMsgs, g.TransactionErrorMessages().ForTransactionResults(chunkExecutionDatas[i].TransactionResults)...) + } + + payload := g.Payloads().Fixture(fixtures.Payload.WithGuarantees(guarantees...)) + block := g.Blocks().Fixture(fixtures.Block.WithPayload(payload)) + + exeResult := g.ExecutionResults().Fixture(fixtures.ExecutionResult.WithBlock(block)) + execData := g.BlockExecutionDatas().Fixture( + fixtures.BlockExecutionData.WithBlockID(block.ID()), + fixtures.BlockExecutionData.WithChunkExecutionDatas(chunkExecutionDatas...), + ) + return &testFixture{ + block: block, + exeResult: exeResult, + execData: execData, + txErrMsgs: txErrMsgs, + } +} + +func (c *CoreImplSuite) TestCoreImpl_Constructor() { + block := unittest.BlockFixture() + executionResult := unittest.ExecutionResultFixture(unittest.WithBlock(block)) + + c.Run("happy path", func() { + core, err := NewCoreImpl( + unittest.Logger(), + executionResult, + block, + c.execDataRequester, + c.txResultErrMsgsRequester, + c.txResultErrMsgsRequestTimeout, + c.persistentRegisters, + c.persistentEvents, + c.persistentCollections, + c.persistentResults, + c.persistentTxResultErrMsg, + c.latestPersistedSealedResult, + c.db, + storage.NewTestingLockManager(), + ) + c.NoError(err) + c.NotNil(core) + }) + + c.Run("block ID mismatch", func() { + core, err := NewCoreImpl( + unittest.Logger(), + executionResult, + unittest.BlockFixture(), + c.execDataRequester, + c.txResultErrMsgsRequester, + c.txResultErrMsgsRequestTimeout, + c.persistentRegisters, + c.persistentEvents, + c.persistentCollections, + c.persistentResults, + c.persistentTxResultErrMsg, + c.latestPersistedSealedResult, + c.db, + storage.NewTestingLockManager(), + ) + c.ErrorContains(err, "header ID and execution result block ID must match") + c.Nil(core) + }) +} + +// TestCoreImpl_Download tests the Download method retrieves execution data and transaction error +// messages. +func (c *CoreImplSuite) TestCoreImpl_Download() { + ctx := context.Background() + g := fixtures.NewGeneratorSuite() + + eventuallyCanceled := func(args mock.Arguments) { + // make sure that the context is eventually canceled + ctx := args.Get(0).(context.Context) + c.Require().Eventually(func() bool { + return ctx.Err() != nil + }, time.Second, 10*time.Millisecond) + } + + c.Run("successful download", func() { + tf := generateFixture(g) + core := c.createTestCoreImpl(tf) + + c.execDataRequester.On("RequestExecutionData", mock.Anything).Return(tf.execData, nil).Once() + c.txResultErrMsgsRequester.On("Request", mock.Anything).Return(tf.txErrMsgs, nil).Once() + + err := core.Download(ctx) + c.Require().NoError(err) + + c.Assert().Equal(tf.execData, core.workingData.executionData) + c.Assert().Equal(tf.txErrMsgs, core.workingData.txResultErrMsgsData) + + // downloading a second time should return an error + err = core.Download(ctx) + c.ErrorContains(err, "already downloaded") + }) + + c.Run("execution data request error", func() { + tf := generateFixture(g) + core := c.createTestCoreImpl(tf) + + expectedErr := fmt.Errorf("test execution data request error") + + c.execDataRequester.On("RequestExecutionData", mock.Anything).Return(nil, expectedErr).Once() + c.txResultErrMsgsRequester. + On("Request", mock.Anything). + Return(nil, context.Canceled). + Run(eventuallyCanceled).Once() + + err := core.Download(ctx) + c.Require().Error(err) + + c.Assert().ErrorIs(err, expectedErr) + c.Assert().Nil(core.workingData.executionData) + c.Assert().Nil(core.workingData.txResultErrMsgsData) + }) + + c.Run("transaction result error messages request error", func() { + tf := generateFixture(g) + core := c.createTestCoreImpl(tf) + + expectedErr := fmt.Errorf("test tx error messages request error") + + c.execDataRequester. + On("RequestExecutionData", mock.Anything). + Return(nil, context.Canceled). + Run(eventuallyCanceled).Once() + c.txResultErrMsgsRequester.On("Request", mock.Anything).Return(nil, expectedErr).Once() + + err := core.Download(ctx) + c.Require().Error(err) + + c.Assert().ErrorIs(err, expectedErr) + c.Assert().Nil(core.workingData.executionData) + c.Assert().Nil(core.workingData.txResultErrMsgsData) + }) + + c.Run("context cancellation", func() { + tf := generateFixture(g) + core := c.createTestCoreImpl(tf) + + ctx, cancel := context.WithCancel(context.Background()) + cancel() + + c.execDataRequester. + On("RequestExecutionData", mock.Anything). + Return(nil, ctx.Err()). + Run(eventuallyCanceled). + Once() + c.txResultErrMsgsRequester. + On("Request", mock.Anything). + Return(nil, ctx.Err()). + Run(eventuallyCanceled). + Once() + + err := core.Download(ctx) + c.Require().Error(err) + + c.Assert().ErrorIs(err, context.Canceled) + c.Assert().Nil(core.workingData.executionData) + c.Assert().Nil(core.workingData.txResultErrMsgsData) + }) + + c.Run("txResultErrMsgsRequestTimeout expiration", func() { + tf := generateFixture(g) + core := c.createTestCoreImpl(tf) + + c.execDataRequester.On("RequestExecutionData", mock.Anything).Return(tf.execData, nil).Once() + + // Transaction result error messages request times out + c.txResultErrMsgsRequester. + On("Request", mock.Anything). + Return(nil, context.DeadlineExceeded). + Run(func(args mock.Arguments) { + // Simulate a slow request by sleeping longer than the timeout + time.Sleep(2 * c.txResultErrMsgsRequestTimeout) + }). + Once() + + unittest.AssertReturnsBefore(c.T(), func() { + err := core.Download(ctx) + c.Require().NoError(err) + }, time.Second) + + // the tx error messages timeout should be handled gracefully, and the execution data should + // be downloaded and stored + c.Assert().Equal(tf.execData, core.workingData.executionData) + c.Assert().Nil(core.workingData.txResultErrMsgsData) + }) + + c.Run("Download after Abandon returns an error", func() { + tf := generateFixture(g) + core := c.createTestCoreImpl(tf) + + core.Abandon() + c.Nil(core.workingData) + + err := core.Download(ctx) + c.ErrorIs(err, errResultAbandoned) + }) +} + +// TestCoreImpl_Index tests the Index method which processes downloaded data. +func (c *CoreImplSuite) TestCoreImpl_Index() { + ctx := context.Background() + g := fixtures.NewGeneratorSuite() + + tf := generateFixture(g) + c.execDataRequester.On("RequestExecutionData", mock.Anything).Return(tf.execData, nil) + c.txResultErrMsgsRequester.On("Request", mock.Anything).Return(tf.txErrMsgs, nil) + + c.Run("successful indexing", func() { + core := c.createTestCoreImpl(tf) + + err := core.Download(ctx) + c.Require().NoError(err) + + err = core.Index() + c.Require().NoError(err) + + c.NotNil(core.workingData.indexerData) + c.NotNil(core.workingData.inmemCollections) + c.NotNil(core.workingData.inmemTransactions) + c.NotNil(core.workingData.inmemTxResultErrMsgs) + c.NotNil(core.workingData.inmemEvents) + c.NotNil(core.workingData.inmemResults) + c.NotNil(core.workingData.inmemRegisters) + + // indexing a second time should return an error + err = core.Index() + c.ErrorContains(err, "already indexed") + c.NotNil(core.workingData.indexerData) + }) + + c.Run("indexer constructor error", func() { + core := c.createTestCoreImpl(tf) + core.block = g.Blocks().Fixture() + + err := core.Download(ctx) + c.Require().NoError(err) + + err = core.Index() + c.ErrorContains(err, "failed to create indexer") + c.Nil(core.workingData.indexerData) + }) + + c.Run("failed to index block", func() { + core := c.createTestCoreImpl(tf) + + err := core.Download(ctx) + c.Require().NoError(err) + + core.workingData.executionData = g.BlockExecutionDatas().Fixture() + + err = core.Index() + c.ErrorContains(err, "failed to index execution data") + c.Nil(core.workingData.indexerData) + }) + + c.Run("failed to validate transaction result error messages", func() { + core := c.createTestCoreImpl(tf) + + err := core.Download(ctx) + c.Require().NoError(err) + + // add an extra error message + core.workingData.txResultErrMsgsData = append(core.workingData.txResultErrMsgsData, flow.TransactionResultErrorMessage{ + TransactionID: g.Identifiers().Fixture(), + }) + + err = core.Index() + c.ErrorContains(err, "failed to validate transaction result error messages") + c.Nil(core.workingData.indexerData) + }) + + c.Run("Index after Abandon returns an error", func() { + core := c.createTestCoreImpl(tf) + + core.Abandon() + c.Nil(core.workingData) + + err := core.Index() + c.ErrorIs(err, errResultAbandoned) + }) + + c.Run("Index before Download returns an error", func() { + core := c.createTestCoreImpl(tf) + + err := core.Index() + c.ErrorContains(err, "downloading is not complete") + c.Nil(core.workingData.indexerData) + }) +} + +// TestCoreImpl_Persist tests the Persist method which persists indexed data to storages and database. +func (c *CoreImplSuite) TestCoreImpl_Persist() { + t := c.T() + ctx := context.Background() + g := fixtures.NewGeneratorSuite() + + resetMocks := func() { + c.db = storagemock.NewDB(t) + c.persistentRegisters = storagemock.NewRegisterIndex(t) + c.persistentEvents = storagemock.NewEvents(t) + c.persistentCollections = storagemock.NewCollections(t) + c.persistentTransactions = storagemock.NewTransactions(t) + c.persistentResults = storagemock.NewLightTransactionResults(t) + c.persistentTxResultErrMsg = storagemock.NewTransactionResultErrorMessages(t) + c.latestPersistedSealedResult = storagemock.NewLatestPersistedSealedResult(t) + } + + tf := generateFixture(g) + blockID := tf.block.ID() + c.execDataRequester.On("RequestExecutionData", mock.Anything).Return(tf.execData, nil) + c.txResultErrMsgsRequester.On("Request", mock.Anything).Return(tf.txErrMsgs, nil) + + c.Run("successful persistence of empty data", func() { + resetMocks() + core := c.createTestCoreImpl(tf) + + err := core.Download(ctx) + c.Require().NoError(err) + + err = core.Index() + c.Require().NoError(err) + + c.db. + On("WithReaderBatchWriter", mock.Anything). + Return(func(fn func(storage.ReaderBatchWriter) error) error { + return fn(storagemock.NewBatch(t)) + }). + Once() + + indexerData := core.workingData.indexerData + c.persistentRegisters.On("Store", flow.RegisterEntries(indexerData.Registers), tf.block.Height).Return(nil) + c.persistentEvents.On("BatchStore", blockID, []flow.EventsList{indexerData.Events}, mock.Anything).Return(nil) + c.persistentCollections.On("BatchStoreAndIndexByTransaction", mock.Anything, mock.Anything, mock.Anything).Return(nil, nil) + c.persistentResults.On("BatchStore", blockID, indexerData.Results, mock.Anything).Return(nil) + c.persistentTxResultErrMsg.On("BatchStore", blockID, core.workingData.txResultErrMsgsData, mock.Anything).Return(nil) + c.latestPersistedSealedResult.On("BatchSet", tf.exeResult.ID(), tf.block.Height, mock.Anything).Return(nil) + + err = core.Persist() + c.Require().NoError(err) + + // persisting a second time should return an error + err = core.Persist() + c.ErrorContains(err, "already persisted") + }) + + c.Run("persisting registers fails", func() { + expectedErr := fmt.Errorf("test persisting registers failure") + + resetMocks() + core := c.createTestCoreImpl(tf) + + err := core.Download(ctx) + c.Require().NoError(err) + + err = core.Index() + c.Require().NoError(err) + + indexerData := core.workingData.indexerData + c.persistentRegisters.On("Store", flow.RegisterEntries(indexerData.Registers), tf.block.Height).Return(expectedErr).Once() + + err = core.Persist() + c.ErrorIs(err, expectedErr) + }) + + c.Run("persisting block data fails", func() { + expectedErr := fmt.Errorf("test persisting events failure") + + resetMocks() + core := c.createTestCoreImpl(tf) + + err := core.Download(ctx) + c.Require().NoError(err) + + err = core.Index() + c.Require().NoError(err) + + c.db. + On("WithReaderBatchWriter", mock.Anything). + Return(func(fn func(storage.ReaderBatchWriter) error) error { + return fn(storagemock.NewBatch(t)) + }). + Once() + + indexerData := core.workingData.indexerData + c.persistentRegisters.On("Store", flow.RegisterEntries(indexerData.Registers), tf.block.Height).Return(nil).Once() + c.persistentEvents.On("BatchStore", blockID, []flow.EventsList{indexerData.Events}, mock.Anything).Return(expectedErr).Once() + + err = core.Persist() + c.ErrorIs(err, expectedErr) + }) + + c.Run("Persist after Abandon returns an error", func() { + resetMocks() + core := c.createTestCoreImpl(tf) + + core.Abandon() + c.Nil(core.workingData) + + err := core.Persist() + c.ErrorIs(err, errResultAbandoned) + }) + + c.Run("Persist before Index returns an error", func() { + resetMocks() + core := c.createTestCoreImpl(tf) + err := core.Persist() + c.ErrorContains(err, "indexing is not complete") + }) +} diff --git a/module/executiondatasync/optimistic_sync/execution_result_query_provider.go b/module/executiondatasync/optimistic_sync/execution_result_query_provider.go new file mode 100644 index 00000000000..db0100f1530 --- /dev/null +++ b/module/executiondatasync/optimistic_sync/execution_result_query_provider.go @@ -0,0 +1,51 @@ +package optimistic_sync + +import "github.com/onflow/flow-go/model/flow" + +// Criteria defines the filtering criteria for execution result queries. +// It specifies requirements for execution result selection including the number +// of agreeing executors and requires executor nodes. +type Criteria struct { + // AgreeingExecutorsCount is the number of receipts including the same ExecutionResult + AgreeingExecutorsCount uint + // RequiredExecutors is the list of EN node IDs, one of which must have produced the result + RequiredExecutors flow.IdentifierList +} + +// OverrideWith overrides the original criteria with the incoming criteria, returning a new Criteria object. +// Fields from `override` criteria take precedence when set. +func (c *Criteria) OverrideWith(override Criteria) Criteria { + newCriteria := *c + + if override.AgreeingExecutorsCount > 0 { + newCriteria.AgreeingExecutorsCount = override.AgreeingExecutorsCount + } + + if len(override.RequiredExecutors) > 0 { + newCriteria.RequiredExecutors = override.RequiredExecutors + } + + return newCriteria +} + +// Query contains the result of an execution result query. +// It includes both the execution result and the execution nodes that produced it. +type Query struct { + // ExecutionResult is the execution result for the queried block + ExecutionResult *flow.ExecutionResult + // ExecutionNodes is the list of execution node identities that produced the result + ExecutionNodes flow.IdentitySkeletonList +} + +// ExecutionResultQueryProvider provides execution results and execution nodes based on criteria. +// It allows querying for execution results by block ID with specific filtering criteria +// to ensure consistency and reliability of execution results. +type ExecutionResultQueryProvider interface { + // ExecutionResultQuery retrieves execution results and associated execution nodes for a given block ID + // based on the provided criteria. It returns a Query containing the execution result and + // the execution nodes that produced it. + // + // Expected errors during normal operations: + // - backend.InsufficientExecutionReceipts - found insufficient receipts for given block ID. + ExecutionResultQuery(blockID flow.Identifier, criteria Criteria) (*Query, error) +} diff --git a/module/executiondatasync/optimistic_sync/execution_result_query_provider/execution_nodes.go b/module/executiondatasync/optimistic_sync/execution_result_query_provider/execution_nodes.go new file mode 100644 index 00000000000..b3f647a00cd --- /dev/null +++ b/module/executiondatasync/optimistic_sync/execution_result_query_provider/execution_nodes.go @@ -0,0 +1,127 @@ +package execution_result_query_provider + +import ( + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/model/flow/filter" +) + +const ( + // defaultMaxNodesCnt is the maximum number of nodes that will be contacted to complete an API request. + defaultMaxNodesCnt = 3 +) + +// ExecutionNodesSelector handles the selection of execution nodes based on preferences and requirements. +// It encapsulates the logic for choosing execution nodes based on operator preferences, operator requirements, +// and user requirements. +type ExecutionNodesSelector struct { + // preferredENIdentifiers are the execution nodes that the operator prefers to use + preferredENIdentifiers flow.IdentifierList + // requiredENIdentifiers are the execution nodes that the operator requires to use + requiredENIdentifiers flow.IdentifierList + // maxNodesCnt is the maximum number of nodes to select + maxNodesCnt int +} + +// NewExecutionNodes creates a new ExecutionNodesSelector with the provided configuration. +func NewExecutionNodes( + preferredENIdentifiers flow.IdentifierList, + requiredENIdentifiers flow.IdentifierList, +) *ExecutionNodesSelector { + return &ExecutionNodesSelector{ + preferredENIdentifiers: preferredENIdentifiers, + requiredENIdentifiers: requiredENIdentifiers, + maxNodesCnt: defaultMaxNodesCnt, + } +} + +// SelectExecutionNodes finds the subset of execution nodes defined in the identity table that matches +// the provided executor IDs and executor criteria. +// +// The following precedence is used to determine the subset of execution nodes: +// +// 1. If the user's RequiredExecutors is not empty, only select executors from their list +// +// 2. If the operator's `requiredENIdentifiers` is set, only select executors from the required ENs list. +// If the operator's `preferredENIdentifiers` is also set, then the preferred ENs are selected first. +// +// 3. If only the operator's `preferredENIdentifiers` is set, then select any preferred ENs that +// have executed the result, and fall back to selecting any ENs that have executed the result. +// +// 4. If neither preferred nor required nodes are defined, then all execution nodes matching the +// executor IDs are returned. +// +// No errors are expected during normal operations +func (en *ExecutionNodesSelector) SelectExecutionNodes( + executors flow.IdentityList, + userRequiredExecutors flow.IdentifierList, +) (flow.IdentitySkeletonList, error) { + var chosenIDs flow.IdentityList + + // first, check if the user's criteria included any required executors. + // since the result is chosen based on the user's required executors, this should always return + // at least one match. + if len(userRequiredExecutors) > 0 { + chosenIDs = executors.Filter(filter.HasNodeID[flow.Identity](userRequiredExecutors...)) + return chosenIDs.ToSkeleton(), nil + } + + // if required ENs are set, only select executors from the required ENs list + // similarly, if the user does not provide any required executors, then the operator's + // `en.requiredENIdentifiers` are applied, so this should always return at least one match. + if len(en.requiredENIdentifiers) > 0 { + chosenIDs = en.selectFromRequiredENIDs(executors) + return chosenIDs.ToSkeleton(), nil + } + + // if only preferred ENs are set, then select any preferred ENs that have executed the result, + // and fall back to selecting any executors. + if len(en.preferredENIdentifiers) > 0 { + chosenIDs = executors.Filter(filter.HasNodeID[flow.Identity](en.preferredENIdentifiers...)) + if len(chosenIDs) >= en.maxNodesCnt { + return chosenIDs.ToSkeleton(), nil + } + } + + // finally, add any remaining required executors + chosenIDs = en.mergeExecutionNodes(chosenIDs, executors) + return chosenIDs.ToSkeleton(), nil +} + +// selectFromRequiredENIDs finds the subset the provided executors that match the required ENs. +// if `e.preferredENIdentifiers` is not empty, then any preferred ENs that have executed the result +// will be added to the subset. +// otherwise, any executor in the `e.requiredENIdentifiers` list will be returned. +func (en *ExecutionNodesSelector) selectFromRequiredENIDs( + executors flow.IdentityList, +) flow.IdentityList { + var chosenIDs flow.IdentityList + + // add any preferred ENs that have executed the result and return if there are enough nodes + // if both preferred and required ENs are set, then preferred MUST be a subset of required + if len(en.preferredENIdentifiers) > 0 { + chosenIDs = executors.Filter(filter.HasNodeID[flow.Identity](en.preferredENIdentifiers...)) + if len(chosenIDs) >= en.maxNodesCnt { + return chosenIDs + } + } + + // next, add any other required ENs that have executed the result + executedRequired := executors.Filter(filter.HasNodeID[flow.Identity](en.requiredENIdentifiers...)) + chosenIDs = en.mergeExecutionNodes(chosenIDs, executedRequired) + + return chosenIDs +} + +// mergeExecutionNodes adds nodes to chosenIDs if they are not already included +func (en *ExecutionNodesSelector) mergeExecutionNodes(chosenIDs, candidates flow.IdentityList) flow.IdentityList { + for _, candidateNode := range candidates { + if _, exists := chosenIDs.ByNodeID(candidateNode.NodeID); !exists { + chosenIDs = append(chosenIDs, candidateNode) + if len(chosenIDs) >= en.maxNodesCnt { + break + } + } + } + + return chosenIDs +} diff --git a/module/executiondatasync/optimistic_sync/execution_result_query_provider/execution_result_query_provider.go b/module/executiondatasync/optimistic_sync/execution_result_query_provider/execution_result_query_provider.go new file mode 100644 index 00000000000..130ce354646 --- /dev/null +++ b/module/executiondatasync/optimistic_sync/execution_result_query_provider/execution_result_query_provider.go @@ -0,0 +1,213 @@ +package execution_result_query_provider + +import ( + "fmt" + "sort" + + "github.com/rs/zerolog" + + "github.com/onflow/flow-go/engine/access/rpc/backend/common" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/model/flow/filter" + "github.com/onflow/flow-go/module/executiondatasync/optimistic_sync" + "github.com/onflow/flow-go/state/protocol" + "github.com/onflow/flow-go/storage" +) + +// DefaultCriteria is the system default criteria for execution result queries. +var DefaultCriteria = optimistic_sync.Criteria{ + AgreeingExecutorsCount: 2, +} + +var _ optimistic_sync.ExecutionResultQueryProvider = (*ExecutionResultQueryProvider)(nil) + +// ExecutionResultQueryProvider is a container for elements required to retrieve +// execution results and execution node identities for a given block ID based on specified criteria. +type ExecutionResultQueryProvider struct { + log zerolog.Logger + + executionReceipts storage.ExecutionReceipts + state protocol.State + + executionNodes *ExecutionNodesSelector + + rootBlockID flow.Identifier + rootBlockResult *flow.ExecutionResult + + baseCriteria optimistic_sync.Criteria +} + +// NewExecutionResultQueryProvider creates and returns a new instance of +// ExecutionResultQueryProvider. +// +// No errors are expected during normal operations +func NewExecutionResultQueryProvider( + log zerolog.Logger, + state protocol.State, + headers storage.Headers, + executionReceipts storage.ExecutionReceipts, + executionNodes *ExecutionNodesSelector, + operatorCriteria optimistic_sync.Criteria, +) (*ExecutionResultQueryProvider, error) { + // Root block ID and result should not change and could be cached. + sporkRootBlockHeight := state.Params().SporkRootBlockHeight() + rootBlockID, err := headers.BlockIDByHeight(sporkRootBlockHeight) + if err != nil { + return nil, fmt.Errorf("failed to retrieve block ID by height: %w", err) + } + + rootBlockResult, _, err := state.AtBlockID(rootBlockID).SealedResult() + if err != nil { + return nil, fmt.Errorf("failed to retrieve root block result: %w", err) + } + + return &ExecutionResultQueryProvider{ + log: log.With().Str("module", "execution_result_query").Logger(), + executionReceipts: executionReceipts, + state: state, + executionNodes: executionNodes, + rootBlockID: rootBlockID, + rootBlockResult: rootBlockResult, + baseCriteria: DefaultCriteria.OverrideWith(operatorCriteria), + }, nil +} + +// ExecutionResultQuery retrieves execution results and associated execution nodes for a given block ID +// based on the provided criteria. +// +// Expected errors during normal operations: +// - backend.InsufficientExecutionReceipts - found insufficient receipts for given block ID. +func (e *ExecutionResultQueryProvider) ExecutionResultQuery(blockID flow.Identifier, criteria optimistic_sync.Criteria) (*optimistic_sync.Query, error) { + executorIdentities, err := e.state.Final().Identities(filter.HasRole[flow.Identity](flow.RoleExecution)) + if err != nil { + return nil, fmt.Errorf("failed to retrieve execution IDs for root block: %w", err) + } + + // if the block ID is the root block, then use the root ExecutionResult and skip the receipt + // check since there will not be any. + if e.rootBlockID == blockID { + subsetENs, err := e.executionNodes.SelectExecutionNodes(executorIdentities, criteria.RequiredExecutors) + if err != nil { + return nil, fmt.Errorf("failed to choose execution nodes for root block ID %v: %w", e.rootBlockID, err) + } + + return &optimistic_sync.Query{ + ExecutionResult: e.rootBlockResult, + ExecutionNodes: subsetENs, + }, nil + } + + result, executorIDs, err := e.findResultAndExecutors(blockID, criteria) + if err != nil { + return nil, fmt.Errorf("failed to find result and executors for block ID %v: %w", blockID, err) + } + + executors := executorIdentities.Filter(filter.HasNodeID[flow.Identity](executorIDs...)) + subsetENs, err := e.executionNodes.SelectExecutionNodes(executors, criteria.RequiredExecutors) + if err != nil { + return nil, fmt.Errorf("failed to choose execution nodes for block ID %v: %w", blockID, err) + } + + if len(subsetENs) == 0 { + // this is unexpected, and probably indicates there is a bug. + // There are only three ways that SelectExecutionNodes can return an empty list: + // 1. there are no executors for the result + // 2. none of the user's required executors are in the executor list + // 3. none of the operator's required executors are in the executor list + // None of these are possible since there must be at least one AgreeingExecutorsCount. If the + // criteria is met, then there must be at least one acceptable executor. If this is not true, + // then the criteria check must fail. + return nil, fmt.Errorf("no execution nodes found for result %v (blockID: %v): %w", result.ID(), blockID, err) + } + + return &optimistic_sync.Query{ + ExecutionResult: result, + ExecutionNodes: subsetENs, + }, nil +} + +// findResultAndExecutors returns a query response for a given block ID. +// The result must match the provided criteria and have at least one acceptable executor. If multiple +// results are found, then the result with the most executors is returned. +// +// Expected errors during normal operations: +// - backend.InsufficientExecutionReceipts - found insufficient receipts for given block ID. +func (e *ExecutionResultQueryProvider) findResultAndExecutors( + blockID flow.Identifier, + criteria optimistic_sync.Criteria, +) (*flow.ExecutionResult, flow.IdentifierList, error) { + type result struct { + result *flow.ExecutionResult + receipts flow.ExecutionReceiptList + } + + criteria = e.baseCriteria.OverrideWith(criteria) + + // Note: this will return an empty slice with no error if no receipts are found. + allReceipts, err := e.executionReceipts.ByBlockID(blockID) + if err != nil { + return nil, nil, fmt.Errorf("failed to retreive execution receipts for block ID %v: %w", blockID, err) + } + + // find all results that match the criteria and have at least one acceptable executor + results := make([]result, 0) + for _, executionReceiptList := range allReceipts.GroupByResultID() { + executorGroup := executionReceiptList.GroupByExecutorID() + if isExecutorGroupMeetingCriteria(executorGroup, criteria) { + results = append(results, result{ + result: &executionReceiptList[0].ExecutionResult, + receipts: executionReceiptList, + }) + } + } + + if len(results) == 0 { + return nil, nil, common.NewInsufficientExecutionReceipts(blockID, 0) + } + + // sort results by the number of execution nodes in descending order + sort.Slice(results, func(i, j int) bool { + return len(results[i].receipts) > len(results[j].receipts) + }) + + executorIDs := getExecutorIDs(results[0].receipts) + return results[0].result, executorIDs, nil +} + +// isExecutorGroupMeetingCriteria checks if an executor group meets the specified criteria for execution receipts matching. +func isExecutorGroupMeetingCriteria(executorGroup flow.ExecutionReceiptGroupedList, criteria optimistic_sync.Criteria) bool { + if uint(len(executorGroup)) < criteria.AgreeingExecutorsCount { + return false + } + + if len(criteria.RequiredExecutors) > 0 { + hasRequiredExecutor := false + for _, requiredExecutor := range criteria.RequiredExecutors { + if _, ok := executorGroup[requiredExecutor]; ok { + hasRequiredExecutor = true + break + } + } + if !hasRequiredExecutor { + return false + } + } + + // TODO: Implement the `ResultInFork` check here, which iteratively checks ancestors to determine if + // the current result's fork includes the requested result. https://github.com/onflow/flow-go/issues/7587 + + return true +} + +// getExecutorIDs extracts unique executor node IDs from a list of execution receipts. +// It groups receipts by executor ID and returns all unique executor identifiers. +func getExecutorIDs(receipts flow.ExecutionReceiptList) flow.IdentifierList { + receiptGroupedByExecutorID := receipts.GroupByExecutorID() + + executorIDs := make(flow.IdentifierList, 0, len(receiptGroupedByExecutorID)) + for executorID := range receiptGroupedByExecutorID { + executorIDs = append(executorIDs, executorID) + } + + return executorIDs +} diff --git a/module/executiondatasync/optimistic_sync/execution_result_query_provider/execution_result_query_provider_test.go b/module/executiondatasync/optimistic_sync/execution_result_query_provider/execution_result_query_provider_test.go new file mode 100644 index 00000000000..23f388efb91 --- /dev/null +++ b/module/executiondatasync/optimistic_sync/execution_result_query_provider/execution_result_query_provider_test.go @@ -0,0 +1,332 @@ +package execution_result_query_provider + +import ( + "testing" + + "github.com/rs/zerolog" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/suite" + + "github.com/onflow/flow-go/engine/access/rpc/backend/common" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module/executiondatasync/optimistic_sync" + protocol "github.com/onflow/flow-go/state/protocol/mock" + storagemock "github.com/onflow/flow-go/storage/mock" + "github.com/onflow/flow-go/utils/unittest" +) + +// ExecutionResultQueryProviderSuite is a test suite for testing the ExecutionResultQueryProvider. +type ExecutionResultQueryProviderSuite struct { + suite.Suite + + state *protocol.State + snapshot *protocol.Snapshot + params *protocol.Params + log zerolog.Logger + + receipts *storagemock.ExecutionReceipts + headers *storagemock.Headers + + rootBlock *flow.Header + rootBlockResult *flow.ExecutionResult +} + +func TestExecutionResultQueryProvider(t *testing.T) { + suite.Run(t, new(ExecutionResultQueryProviderSuite)) +} + +// SetupTest initializes the test suite with mock state and receipts storage. +func (suite *ExecutionResultQueryProviderSuite) SetupTest() { + t := suite.T() + suite.log = zerolog.New(zerolog.NewConsoleWriter()) + suite.state = protocol.NewState(t) + suite.snapshot = protocol.NewSnapshot(t) + suite.params = protocol.NewParams(t) + suite.receipts = storagemock.NewExecutionReceipts(t) + suite.headers = storagemock.NewHeaders(t) + + suite.rootBlock = unittest.BlockHeaderFixture() + rootBlockID := suite.rootBlock.ID() + suite.rootBlockResult = unittest.ExecutionResultFixture(unittest.WithExecutionResultBlockID(rootBlockID)) + // This will be used just for the root block + suite.snapshot.On("SealedResult").Return(suite.rootBlockResult, nil, nil).Maybe() + suite.state.On("SealedResult", rootBlockID).Return(flow.ExecutionReceiptList{}).Maybe() + suite.params.On("SporkRootBlockHeight").Return(suite.rootBlock.Height, nil) + suite.headers.On("BlockIDByHeight", suite.rootBlock.Height).Return(rootBlockID, nil) + suite.state.On("Params").Return(suite.params) + suite.state.On("Final").Return(suite.snapshot, nil).Maybe() + suite.state.On("AtBlockID", mock.Anything).Return(suite.snapshot).Maybe() +} + +func (suite *ExecutionResultQueryProviderSuite) createProvider(preferredExecutors flow.IdentifierList, operatorCriteria optimistic_sync.Criteria) *ExecutionResultQueryProvider { + provider, err := NewExecutionResultQueryProvider( + suite.log, + suite.state, + suite.headers, + suite.receipts, + NewExecutionNodes(preferredExecutors, operatorCriteria.RequiredExecutors), + operatorCriteria, + ) + suite.Require().NoError(err) + + return provider +} + +// setupIdentitiesMock sets up the mock for identity-related calls. +func (suite *ExecutionResultQueryProviderSuite) setupIdentitiesMock(allExecutionNodes flow.IdentityList) { + suite.snapshot.On("Identities", mock.Anything).Return( + func(filter flow.IdentityFilter[flow.Identity]) flow.IdentityList { + return allExecutionNodes.Filter(filter) + }, + func(flow.IdentityFilter[flow.Identity]) error { return nil }) +} + +// TestExecutionResultQuery tests the main ExecutionResultQuery function with various scenarios. +func (suite *ExecutionResultQueryProviderSuite) TestExecutionResultQuery() { + totalReceipts := 5 + block := unittest.BlockFixture() + + // generate execution node identities for each receipt + allExecutionNodes := unittest.IdentityListFixture(totalReceipts, unittest.WithRole(flow.RoleExecution)) + + // create two different execution results to test agreement logic + executionResult := unittest.ExecutionResultFixture() + + suite.Run("query with client required executors", func() { + provider := suite.createProvider(flow.IdentifierList{}, optimistic_sync.Criteria{}) + + receipts := make(flow.ExecutionReceiptList, totalReceipts) + for i := 0; i < totalReceipts; i++ { + r := unittest.ReceiptForBlockFixture(block) + r.ExecutorID = allExecutionNodes[i].NodeID + r.ExecutionResult = *executionResult + receipts[i] = r + } + + suite.receipts.On("ByBlockID", block.ID()).Return(receipts, nil) + suite.setupIdentitiesMock(allExecutionNodes) + + // Require specific executors (first two nodes) + requiredExecutors := allExecutionNodes[0:2].NodeIDs() + + query, err := provider.ExecutionResultQuery(block.ID(), optimistic_sync.Criteria{ + AgreeingExecutorsCount: 2, + RequiredExecutors: requiredExecutors, + }) + suite.Require().NoError(err) + + suite.Assert().ElementsMatch(requiredExecutors, query.ExecutionNodes.NodeIDs()) + }) + + suite.Run("successful query with different block results", func() { + requiredExecutors := allExecutionNodes[0:3].NodeIDs() + + provider := suite.createProvider(flow.IdentifierList{}, optimistic_sync.Criteria{ + RequiredExecutors: requiredExecutors, + }) + + otherResult := unittest.ExecutionResultFixture() + // Create 3 receipts with the same result (executionResult) and 2 with a different result (otherResult) + receipts := make(flow.ExecutionReceiptList, totalReceipts) + for i := 0; i < 3; i++ { + r := unittest.ReceiptForBlockFixture(block) + r.ExecutorID = allExecutionNodes[i].NodeID + r.ExecutionResult = *executionResult + receipts[i] = r + } + for i := 3; i < totalReceipts; i++ { + r := unittest.ReceiptForBlockFixture(block) + r.ExecutorID = allExecutionNodes[i].NodeID + r.ExecutionResult = *otherResult + receipts[i] = r + } + + suite.receipts.On("ByBlockID", block.ID()).Return(receipts, nil) + suite.setupIdentitiesMock(allExecutionNodes) + + query, err := provider.ExecutionResultQuery(block.ID(), optimistic_sync.Criteria{}) + suite.Require().NoError(err) + + suite.Require().Equal(executionResult.ID(), query.ExecutionResult.ID()) + suite.Assert().ElementsMatch(requiredExecutors, query.ExecutionNodes.NodeIDs()) + }) + + suite.Run("insufficient agreeing executors returns error", func() { + provider := suite.createProvider(flow.IdentifierList{}, optimistic_sync.Criteria{}) + + // Create a fresh block for this test to ensure proper isolation + insufficientBlock := unittest.BlockFixture() + + // Create a scenario where we have receipts but no result has enough agreeing executors + + // Create only 1 receipt with 1 execution result + r := unittest.ReceiptForBlockFixture(insufficientBlock) + r.ExecutorID = allExecutionNodes[0].NodeID + r.ExecutionResult = *unittest.ExecutionResultFixture() + receipts := flow.ExecutionReceiptList{ + r, + } + + // Set up a separate mock call for this specific block + suite.receipts.On("ByBlockID", insufficientBlock.ID()).Return(receipts, nil).Once() + suite.setupIdentitiesMock(allExecutionNodes) + + _, err := provider.ExecutionResultQuery(insufficientBlock.ID(), optimistic_sync.Criteria{ + AgreeingExecutorsCount: 2, + RequiredExecutors: allExecutionNodes[0:1].NodeIDs(), + }) + suite.Require().Error(err) + + suite.Assert().True(common.IsInsufficientExecutionReceipts(err)) + }) + + suite.Run("required executors not found returns error", func() { + provider := suite.createProvider(flow.IdentifierList{}, optimistic_sync.Criteria{}) + receipts := make(flow.ExecutionReceiptList, totalReceipts) + for i := 0; i < totalReceipts; i++ { + r := unittest.ReceiptForBlockFixture(block) + r.ExecutorID = allExecutionNodes[i].NodeID + r.ExecutionResult = *executionResult + receipts[i] = r + } + + suite.receipts.On("ByBlockID", block.ID()).Return(receipts, nil) + suite.setupIdentitiesMock(allExecutionNodes) + + // Require executors that didn't produce any receipts + _, err := provider.ExecutionResultQuery(block.ID(), optimistic_sync.Criteria{ + RequiredExecutors: unittest.IdentityListFixture(2, unittest.WithRole(flow.RoleExecution)).NodeIDs(), + }) + suite.Require().Error(err) + + suite.Assert().True(common.IsInsufficientExecutionReceipts(err)) + }) +} + +// TestRootBlockHandling tests the special case handling for root blocks. +func (suite *ExecutionResultQueryProviderSuite) TestRootBlockHandling() { + allExecutionNodes := unittest.IdentityListFixture(5, unittest.WithRole(flow.RoleExecution)) + suite.setupIdentitiesMock(allExecutionNodes) + + suite.Run("root block returns execution nodes without execution result", func() { + provider := suite.createProvider(flow.IdentifierList{}, optimistic_sync.Criteria{}) + + query, err := provider.ExecutionResultQuery(suite.rootBlock.ID(), optimistic_sync.Criteria{}) + suite.Require().NoError(err) + + suite.Assert().Equal(suite.rootBlockResult, query.ExecutionResult) + suite.Assert().Len(query.ExecutionNodes.NodeIDs(), defaultMaxNodesCnt) + suite.Assert().Subset(allExecutionNodes.NodeIDs(), query.ExecutionNodes.NodeIDs()) + }) + + suite.Run("root block with required executors", func() { + provider := suite.createProvider(flow.IdentifierList{}, optimistic_sync.Criteria{}) + + requiredExecutors := allExecutionNodes[0:2].NodeIDs() + criteria := optimistic_sync.Criteria{ + AgreeingExecutorsCount: 1, + RequiredExecutors: requiredExecutors, + } + + query, err := provider.ExecutionResultQuery(suite.rootBlock.ID(), criteria) + suite.Require().NoError(err) + + suite.Assert().Equal(suite.rootBlockResult, query.ExecutionResult) + suite.Assert().ElementsMatch(query.ExecutionNodes.NodeIDs(), requiredExecutors) + }) +} + +// TestPreferredAndRequiredExecutionNodes tests the interaction with preferred and required execution nodes. +func (suite *ExecutionResultQueryProviderSuite) TestPreferredAndRequiredExecutionNodes() { + block := unittest.BlockFixture() + allExecutionNodes := unittest.IdentityListFixture(8, unittest.WithRole(flow.RoleExecution)) + executionResult := unittest.ExecutionResultFixture() + + numReceipts := 6 + // Create receipts from the first `numReceipts` execution nodes + receipts := make(flow.ExecutionReceiptList, numReceipts) + for i := 0; i < numReceipts; i++ { + r := unittest.ReceiptForBlockFixture(block) + r.ExecutorID = allExecutionNodes[i].NodeID + r.ExecutionResult = *executionResult + receipts[i] = r + } + + suite.receipts.On("ByBlockID", block.ID()).Return(receipts, nil) + suite.setupIdentitiesMock(allExecutionNodes) + + suite.Run("with default optimistic_sync.Criteria", func() { + provider := suite.createProvider(flow.IdentifierList{}, optimistic_sync.Criteria{}) + + // optimistic_sync.Criteria are empty to use operator defaults + query, err := provider.ExecutionResultQuery(block.ID(), optimistic_sync.Criteria{}) + suite.Require().NoError(err) + + expectedExecutors := allExecutionNodes[0:3].NodeIDs() + actualExecutors := query.ExecutionNodes.NodeIDs() + + suite.Assert().Len(actualExecutors, defaultMaxNodesCnt) + suite.Assert().ElementsMatch(expectedExecutors, actualExecutors) + }) + + suite.Run("with operator preferred executors", func() { + provider := suite.createProvider(allExecutionNodes[1:5].NodeIDs(), optimistic_sync.Criteria{}) + + // optimistic_sync.Criteria are empty to use operator defaults + query, err := provider.ExecutionResultQuery(block.ID(), optimistic_sync.Criteria{}) + suite.Require().NoError(err) + + actualExecutors := query.ExecutionNodes.NodeIDs() + + suite.Assert().ElementsMatch(provider.executionNodes.preferredENIdentifiers, actualExecutors) + }) + + suite.Run("with operator required executors", func() { + provider := suite.createProvider(flow.IdentifierList{}, optimistic_sync.Criteria{ + RequiredExecutors: allExecutionNodes[5:8].NodeIDs(), + }) + + // optimistic_sync.Criteria are empty to use operator defaults + query, err := provider.ExecutionResultQuery(block.ID(), optimistic_sync.Criteria{}) + suite.Require().NoError(err) + + actualExecutors := query.ExecutionNodes.NodeIDs() + + // Just one required executor contains the result + expectedExecutors := provider.executionNodes.requiredENIdentifiers[0:1] + + suite.Assert().ElementsMatch(expectedExecutors, actualExecutors) + }) + + suite.Run("with both: operator preferred & required executors", func() { + provider := suite.createProvider(allExecutionNodes[0:1].NodeIDs(), optimistic_sync.Criteria{ + RequiredExecutors: allExecutionNodes[3:6].NodeIDs(), + }) + + // optimistic_sync.Criteria are empty to use operator defaults + query, err := provider.ExecutionResultQuery(block.ID(), optimistic_sync.Criteria{}) + suite.Require().NoError(err) + + // `preferredENIdentifiers` contain 1 executor, that is not enough, so the logic will get 2 executors from `requiredENIdentifiers` to fill `defaultMaxNodesCnt` executors. + expectedExecutors := append(provider.executionNodes.preferredENIdentifiers, provider.executionNodes.requiredENIdentifiers[0:2]...) + actualExecutors := query.ExecutionNodes.NodeIDs() + + suite.Assert().Len(actualExecutors, defaultMaxNodesCnt) + suite.Assert().ElementsMatch(expectedExecutors, actualExecutors) + }) + + suite.Run("with client preferred executors", func() { + provider := suite.createProvider(allExecutionNodes[0:1].NodeIDs(), optimistic_sync.Criteria{ + RequiredExecutors: allExecutionNodes[2:4].NodeIDs(), + }) + + userCriteria := optimistic_sync.Criteria{ + RequiredExecutors: allExecutionNodes[5:6].NodeIDs(), + } + + query, err := provider.ExecutionResultQuery(block.ID(), userCriteria) + suite.Require().NoError(err) + + suite.Assert().ElementsMatch(userCriteria.RequiredExecutors, query.ExecutionNodes.NodeIDs()) + }) +} diff --git a/module/executiondatasync/optimistic_sync/execution_state_cache.go b/module/executiondatasync/optimistic_sync/execution_state_cache.go new file mode 100644 index 00000000000..58e2e1d4275 --- /dev/null +++ b/module/executiondatasync/optimistic_sync/execution_state_cache.go @@ -0,0 +1,16 @@ +package optimistic_sync + +import "github.com/onflow/flow-go/model/flow" + +// ExecutionStateCache provides access to execution state snapshots for querying data at specific ExecutionResults. +type ExecutionStateCache interface { + // Snapshot returns a view of the execution state as of the provided ExecutionResult. + // The returned Snapshot provides access to execution state data for the fork ending + // on the provided ExecutionResult which extends from the latest sealed result. + // The result may be sealed or unsealed. Only data for finalized blocks is available. + // + // Expected errors during normal operation: + // - storage.ErrNotFound - result is not available, not ready for querying, or does not descend from the latest sealed result. + // - All other errors are potential indicators of bugs or corrupted internal state (continuation impossible) + Snapshot(executionResultID flow.Identifier) (Snapshot, error) +} diff --git a/module/executiondatasync/optimistic_sync/factories.go b/module/executiondatasync/optimistic_sync/factories.go new file mode 100644 index 00000000000..06bd673d014 --- /dev/null +++ b/module/executiondatasync/optimistic_sync/factories.go @@ -0,0 +1,13 @@ +package optimistic_sync + +import "github.com/onflow/flow-go/model/flow" + +// CoreFactory is a factory object for creating new Core instances. +type CoreFactory interface { + NewCore(result *flow.ExecutionResult) Core +} + +// PipelineFactory is a factory object for creating new Pipeline instances. +type PipelineFactory interface { + NewPipeline(result *flow.ExecutionResult, isSealed bool) Pipeline +} diff --git a/module/executiondatasync/optimistic_sync/mock/core.go b/module/executiondatasync/optimistic_sync/mock/core.go new file mode 100644 index 00000000000..f2e9fb62b80 --- /dev/null +++ b/module/executiondatasync/optimistic_sync/mock/core.go @@ -0,0 +1,87 @@ +// Code generated by mockery. DO NOT EDIT. + +package mock + +import ( + context "context" + + mock "github.com/stretchr/testify/mock" +) + +// Core is an autogenerated mock type for the Core type +type Core struct { + mock.Mock +} + +// Abandon provides a mock function with no fields +func (_m *Core) Abandon() { + _m.Called() +} + +// Download provides a mock function with given fields: ctx +func (_m *Core) Download(ctx context.Context) error { + ret := _m.Called(ctx) + + if len(ret) == 0 { + panic("no return value specified for Download") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context) error); ok { + r0 = rf(ctx) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// Index provides a mock function with no fields +func (_m *Core) Index() error { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Index") + } + + var r0 error + if rf, ok := ret.Get(0).(func() error); ok { + r0 = rf() + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// Persist provides a mock function with no fields +func (_m *Core) Persist() error { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Persist") + } + + var r0 error + if rf, ok := ret.Get(0).(func() error); ok { + r0 = rf() + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// NewCore creates a new instance of Core. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewCore(t interface { + mock.TestingT + Cleanup(func()) +}) *Core { + mock := &Core{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/module/executiondatasync/optimistic_sync/persisters/block.go b/module/executiondatasync/optimistic_sync/persisters/block.go new file mode 100644 index 00000000000..371ed98b149 --- /dev/null +++ b/module/executiondatasync/optimistic_sync/persisters/block.go @@ -0,0 +1,88 @@ +package persisters + +import ( + "fmt" + "time" + + "github.com/jordanschalm/lockctx" + "github.com/rs/zerolog" + + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module/executiondatasync/optimistic_sync/persisters/stores" + "github.com/onflow/flow-go/storage" + "github.com/onflow/flow-go/utils/logging" +) + +// BlockPersister stores execution data for a single execution result into the database. +// It uses the set of [stores.PersisterStore] to persist the data with a single atomic batch operation. +// To ensure the database only contains data certified by the protocol, the block persister must +// only be called for sealed execution results. +type BlockPersister struct { + log zerolog.Logger + + persisterStores []stores.PersisterStore + protocolDB storage.DB + lockManager lockctx.Manager + executionResult *flow.ExecutionResult +} + +// NewBlockPersister creates a new block persister. +func NewBlockPersister( + log zerolog.Logger, + protocolDB storage.DB, + lockManager lockctx.Manager, + executionResult *flow.ExecutionResult, + persisterStores []stores.PersisterStore, +) *BlockPersister { + log = log.With(). + Str("component", "block_persister"). + Hex("execution_result_id", logging.ID(executionResult.ID())). + Hex("block_id", logging.ID(executionResult.BlockID)). + Logger() + + log.Info(). + Int("batch_persisters_count", len(persisterStores)). + Msg("block persisters initialized") + + return &BlockPersister{ + log: log, + persisterStores: persisterStores, + protocolDB: protocolDB, + executionResult: executionResult, + lockManager: lockManager, + } +} + +// Persist atomically stores all data into the database using the configured persister stores. +// +// No error returns are expected during normal operations +func (p *BlockPersister) Persist() error { + p.log.Debug().Msg("started to persist execution data") + start := time.Now() + + lctx := p.lockManager.NewContext() + err := lctx.AcquireLock(storage.LockInsertCollection) + if err != nil { + return fmt.Errorf("could not acquire lock for inserting light collections: %w", err) + } + defer lctx.Release() + + err = p.protocolDB.WithReaderBatchWriter(func(batch storage.ReaderBatchWriter) error { + for _, persister := range p.persisterStores { + if err := persister.Persist(lctx, batch); err != nil { + return err + } + } + return nil + }) + + if err != nil { + return fmt.Errorf("failed to commit batch: %w", err) + } + + p.log.Debug(). + Dur("duration_ms", time.Since(start)). + Msg("successfully prepared execution data for persistence") + + return nil +} diff --git a/module/executiondatasync/optimistic_sync/persisters/block_test.go b/module/executiondatasync/optimistic_sync/persisters/block_test.go new file mode 100644 index 00000000000..c09b7bbb13c --- /dev/null +++ b/module/executiondatasync/optimistic_sync/persisters/block_test.go @@ -0,0 +1,224 @@ +package persisters + +import ( + "errors" + "testing" + + "github.com/cockroachdb/pebble/v2" + "github.com/jordanschalm/lockctx" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" + + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module/executiondatasync/optimistic_sync/persisters/stores" + "github.com/onflow/flow-go/module/metrics" + "github.com/onflow/flow-go/module/state_synchronization/indexer" + "github.com/onflow/flow-go/storage" + storagemock "github.com/onflow/flow-go/storage/mock" + "github.com/onflow/flow-go/storage/operation/pebbleimpl" + "github.com/onflow/flow-go/storage/store" + "github.com/onflow/flow-go/utils/unittest" + "github.com/onflow/flow-go/utils/unittest/fixtures" +) + +type PersisterSuite struct { + suite.Suite + + headers *storagemock.Headers + executionResults *storagemock.ExecutionResults + + executionResult *flow.ExecutionResult + header *flow.Header + indexerData *indexer.IndexerData + txErrMsgs []flow.TransactionResultErrorMessage +} + +func TestPersisterSuite(t *testing.T) { + t.Parallel() + suite.Run(t, new(PersisterSuite)) +} + +func (p *PersisterSuite) SetupTest() { + g := fixtures.NewGeneratorSuite() + + block := g.Blocks().Fixture() + p.header = block.ToHeader() + p.executionResult = g.ExecutionResults().Fixture(fixtures.ExecutionResult.WithBlock(block)) + + p.indexerData = &indexer.IndexerData{ + Events: g.Events().List(5), + Collections: g.Collections().List(2), + Transactions: g.Transactions().List(2), + Results: g.LightTransactionResults().List(4), + Registers: g.RegisterEntries().List(3), + } + + for txIndex := range p.indexerData.Results { + if txIndex%2 == 0 { + p.indexerData.Results[txIndex].Failed = true + } + } + p.txErrMsgs = g.TransactionErrorMessages().ForTransactionResults(p.indexerData.Results) +} + +func (p *PersisterSuite) TestPersister_HappyPath() { + p.testWithDatabase() +} + +func (p *PersisterSuite) TestPersister_EmptyData() { + p.indexerData = &indexer.IndexerData{ + // this is needed because the events storage caches an empty slice when no events are passed. + // without it, assert.Equals will fail because nil != empty slice + Events: []flow.Event{}, + } + p.txErrMsgs = nil + p.testWithDatabase() +} + +func (p *PersisterSuite) testWithDatabase() { + logger := unittest.Logger() + metrics := metrics.NewNoopCollector() + lockManager := storage.NewTestingLockManager() + + p.headers = storagemock.NewHeaders(p.T()) + p.headers.On("ByHeight", p.header.Height).Return(p.header, nil) + + p.executionResults = storagemock.NewExecutionResults(p.T()) + p.executionResults.On("ByBlockID", p.executionResult.BlockID).Return(p.executionResult, nil) + + unittest.RunWithPebbleDB(p.T(), func(pdb *pebble.DB) { + db := pebbleimpl.ToDB(pdb) + + events := store.NewEvents(metrics, db) + results := store.NewLightTransactionResults(metrics, db, store.DefaultCacheSize) + transactions := store.NewTransactions(metrics, db) + collections := store.NewCollections(db, transactions) + txResultErrMsg := store.NewTransactionResultErrorMessages(metrics, db, store.DefaultCacheSize) + + progress, err := store.NewConsumerProgress(db, "test_consumer").Initialize(p.header.Height) + p.Require().NoError(err) + + latestPersistedSealedResult, err := store.NewLatestPersistedSealedResult(progress, p.headers, p.executionResults) + p.Require().NoError(err) + + persister := NewBlockPersister( + logger, + db, + lockManager, + p.executionResult, + []stores.PersisterStore{ + stores.NewEventsStore(p.indexerData.Events, events, p.executionResult.BlockID), + stores.NewResultsStore(p.indexerData.Results, results, p.executionResult.BlockID), + stores.NewCollectionsStore(p.indexerData.Collections, collections), + stores.NewTxResultErrMsgStore(p.txErrMsgs, txResultErrMsg, p.executionResult.BlockID), + stores.NewLatestSealedResultStore(latestPersistedSealedResult, p.executionResult.ID(), p.header.Height), + }, + ) + + err = persister.Persist() + p.Require().NoError(err) + + // Assert all of the expected data exists in the database + blockEvents, err := events.ByBlockID(p.executionResult.BlockID) + p.Require().NoError(err) + p.Require().Equal(p.indexerData.Events, blockEvents) + + blockTxResults, err := results.ByBlockID(p.executionResult.BlockID) + p.Require().NoError(err) + p.Require().Equal(p.indexerData.Results, blockTxResults) + + for _, expectedCollection := range p.indexerData.Collections { + expectedLightCollection := expectedCollection.Light() + expectedID := expectedCollection.ID() + + actualCollection, err := collections.ByID(expectedID) + p.Require().NoError(err) + p.Require().Equal(expectedCollection, actualCollection) + + actualLightCollection, err := collections.LightByID(expectedID) + p.Require().NoError(err) + p.Require().Equal(expectedLightCollection, actualLightCollection) + + for i, txID := range expectedLightCollection.Transactions { + tx, err := transactions.ByID(txID) + p.Require().NoError(err) + p.Require().Equal(expectedCollection.Transactions[i], tx) + } + } + + blockTxResultErrMsgs, err := txResultErrMsg.ByBlockID(p.executionResult.BlockID) + p.Require().NoError(err) + require.Equal(p.T(), p.txErrMsgs, blockTxResultErrMsgs) + + resultID, height := latestPersistedSealedResult.Latest() + p.Require().Equal(p.executionResult.ID(), resultID) + p.Require().Equal(p.header.Height, height) + + height, err = progress.ProcessedIndex() + p.Require().NoError(err) + p.Require().Equal(p.header.Height, height) + }) +} + +func (p *PersisterSuite) TestPersister_ErrorHandling() { + p.Run("persistor error", func() { + expectedErr := errors.New("event persistor error") + + lockManager := storage.NewTestingLockManager() + + database := storagemock.NewDB(p.T()) + database. + On("WithReaderBatchWriter", mock.Anything). + Return(func(fn func(storage.ReaderBatchWriter) error) error { + return fn(storagemock.NewBatch(p.T())) + }). + Once() + + collections := storagemock.NewCollections(p.T()) + collections. + On("BatchStoreAndIndexByTransaction", mock.Anything, mock.Anything, mock.Anything). + Return(nil, nil). + Times(len(p.indexerData.Collections)) + + events := storagemock.NewEvents(p.T()) + events.On("BatchStore", p.executionResult.BlockID, mock.Anything, mock.Anything).Return(expectedErr).Once() + + persister := NewBlockPersister( + unittest.Logger(), + database, + lockManager, + p.executionResult, + []stores.PersisterStore{ + stores.NewCollectionsStore(p.indexerData.Collections, collections), + stores.NewEventsStore(p.indexerData.Events, events, p.executionResult.BlockID), + }, + ) + + err := persister.Persist() + p.Require().ErrorIs(err, expectedErr) + }) + + p.Run("lock manager error", func() { + lockManager := lockctx.NewManager(nil, lockctx.NoPolicy) + + database := storagemock.NewDB(p.T()) + collections := storagemock.NewCollections(p.T()) + events := storagemock.NewEvents(p.T()) + + persister := NewBlockPersister( + unittest.Logger(), + database, + lockManager, + p.executionResult, + []stores.PersisterStore{ + stores.NewCollectionsStore(p.indexerData.Collections, collections), + stores.NewEventsStore(p.indexerData.Events, events, p.executionResult.BlockID), + }, + ) + + err := persister.Persist() + p.Require().Error(err) + p.True(lockctx.IsUnknownLockError(err)) + }) +} diff --git a/module/executiondatasync/optimistic_sync/persisters/registers.go b/module/executiondatasync/optimistic_sync/persisters/registers.go new file mode 100644 index 00000000000..55a878312be --- /dev/null +++ b/module/executiondatasync/optimistic_sync/persisters/registers.go @@ -0,0 +1,41 @@ +package persisters + +import ( + "fmt" + + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/storage" +) + +// RegistersPersister stores register data for a single execution result into the registers db in a +// single atomic batch operation. +// To ensure the database only contains data certified by the protocol, the registers persister must +// only be called for sealed execution results. +type RegistersPersister struct { + data []flow.RegisterEntry + registers storage.RegisterIndex + height uint64 +} + +func NewRegistersPersister( + data []flow.RegisterEntry, + registers storage.RegisterIndex, + height uint64, +) *RegistersPersister { + return &RegistersPersister{ + data: data, + registers: registers, + height: height, + } +} + +// Persist stores the register entries into the registers db +// +// No error returns are expected during normal operations +func (r *RegistersPersister) Persist() error { + if err := r.registers.Store(r.data, r.height); err != nil { + return fmt.Errorf("could not persist registers: %w", err) + } + + return nil +} diff --git a/module/executiondatasync/optimistic_sync/persisters/registers_test.go b/module/executiondatasync/optimistic_sync/persisters/registers_test.go new file mode 100644 index 00000000000..5885ef39c86 --- /dev/null +++ b/module/executiondatasync/optimistic_sync/persisters/registers_test.go @@ -0,0 +1,63 @@ +package persisters + +import ( + "fmt" + "testing" + + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/model/flow" + storagemock "github.com/onflow/flow-go/storage/mock" + "github.com/onflow/flow-go/utils/unittest" +) + +func TestRegistersPersister_PersistWithEmptyData(t *testing.T) { + t.Parallel() + + height := uint64(1000) + storedRegisters := make(flow.RegisterEntries, 3) + for i := range storedRegisters { + storedRegisters[i] = unittest.RegisterEntryFixture() + } + + t.Run("happy path", func(t *testing.T) { + t.Parallel() + + registers := storagemock.NewRegisterIndex(t) + registers.On("Store", storedRegisters, height).Return(nil).Once() + + persister := NewRegistersPersister(storedRegisters, registers, height) + + err := persister.Persist() + require.NoError(t, err) + }) + + // Registers must be stored for every height, even if empty + t.Run("persist empty registers", func(t *testing.T) { + t.Parallel() + + storedRegisters := make(flow.RegisterEntries, 0) + + registers := storagemock.NewRegisterIndex(t) + registers.On("Store", storedRegisters, height).Return(nil).Once() + + persister := NewRegistersPersister(storedRegisters, registers, height) + + err := persister.Persist() + require.NoError(t, err) + }) + + t.Run("persist error", func(t *testing.T) { + t.Parallel() + + expectedErr := fmt.Errorf("test error") + + registers := storagemock.NewRegisterIndex(t) + registers.On("Store", storedRegisters, height).Return(expectedErr).Once() + + persister := NewRegistersPersister(storedRegisters, registers, height) + + err := persister.Persist() + require.ErrorIs(t, err, expectedErr) + }) +} diff --git a/module/executiondatasync/optimistic_sync/persisters/stores/events.go b/module/executiondatasync/optimistic_sync/persisters/stores/events.go new file mode 100644 index 00000000000..cb7224c9096 --- /dev/null +++ b/module/executiondatasync/optimistic_sync/persisters/stores/events.go @@ -0,0 +1,41 @@ +package stores + +import ( + "fmt" + + "github.com/jordanschalm/lockctx" + + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/storage" +) + +var _ PersisterStore = (*EventsStore)(nil) + +// EventsStore handles persisting events +type EventsStore struct { + data []flow.Event + persistedEvents storage.Events + blockID flow.Identifier +} + +func NewEventsStore( + data []flow.Event, + persistedEvents storage.Events, + blockID flow.Identifier, +) *EventsStore { + return &EventsStore{ + data: data, + persistedEvents: persistedEvents, + blockID: blockID, + } +} + +// Persist adds events to the batch. +// +// No error returns are expected during normal operations +func (e *EventsStore) Persist(_ lockctx.Proof, batch storage.ReaderBatchWriter) error { + if err := e.persistedEvents.BatchStore(e.blockID, []flow.EventsList{e.data}, batch); err != nil { + return fmt.Errorf("could not add events to batch: %w", err) + } + return nil +} diff --git a/module/executiondatasync/optimistic_sync/persisters/stores/latest_sealed_result.go b/module/executiondatasync/optimistic_sync/persisters/stores/latest_sealed_result.go new file mode 100644 index 00000000000..b9cfe513c6b --- /dev/null +++ b/module/executiondatasync/optimistic_sync/persisters/stores/latest_sealed_result.go @@ -0,0 +1,41 @@ +package stores + +import ( + "fmt" + + "github.com/jordanschalm/lockctx" + + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/storage" +) + +var _ PersisterStore = (*LatestSealedResultStore)(nil) + +// LatestSealedResultStore handles persisting transaction result error messages +type LatestSealedResultStore struct { + latestPersistedSealedResult storage.LatestPersistedSealedResult + height uint64 + executionResultID flow.Identifier +} + +func NewLatestSealedResultStore( + latestPersistedSealedResult storage.LatestPersistedSealedResult, + executionResultID flow.Identifier, + height uint64, +) *LatestSealedResultStore { + return &LatestSealedResultStore{ + latestPersistedSealedResult: latestPersistedSealedResult, + height: height, + executionResultID: executionResultID, + } +} + +// Persist adds the latest sealed result to the batch. +// +// No error returns are expected during normal operations +func (t *LatestSealedResultStore) Persist(_ lockctx.Proof, batch storage.ReaderBatchWriter) error { + if err := t.latestPersistedSealedResult.BatchSet(t.executionResultID, t.height, batch); err != nil { + return fmt.Errorf("could not persist latest sealed result: %w", err) + } + return nil +} diff --git a/module/executiondatasync/optimistic_sync/persisters/stores/light_collections.go b/module/executiondatasync/optimistic_sync/persisters/stores/light_collections.go new file mode 100644 index 00000000000..adec658f1b0 --- /dev/null +++ b/module/executiondatasync/optimistic_sync/persisters/stores/light_collections.go @@ -0,0 +1,41 @@ +package stores + +import ( + "fmt" + + "github.com/jordanschalm/lockctx" + + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/storage" +) + +var _ PersisterStore = (*LightCollectionsStore)(nil) + +// LightCollectionsStore handles persisting light collections +type LightCollectionsStore struct { + data []*flow.Collection + persistedCollections storage.Collections +} + +func NewCollectionsStore( + data []*flow.Collection, + persistedCollections storage.Collections, +) *LightCollectionsStore { + return &LightCollectionsStore{ + data: data, + persistedCollections: persistedCollections, + } +} + +// Persist adds light collections to the batch. +// +// No error returns are expected during normal operations +func (c *LightCollectionsStore) Persist(lctx lockctx.Proof, batch storage.ReaderBatchWriter) error { + for _, collection := range c.data { + if _, err := c.persistedCollections.BatchStoreAndIndexByTransaction(lctx, collection, batch); err != nil { + return fmt.Errorf("could not add light collections to batch: %w", err) + } + } + + return nil +} diff --git a/module/executiondatasync/optimistic_sync/persisters/stores/persister_store.go b/module/executiondatasync/optimistic_sync/persisters/stores/persister_store.go new file mode 100644 index 00000000000..5055ea117ab --- /dev/null +++ b/module/executiondatasync/optimistic_sync/persisters/stores/persister_store.go @@ -0,0 +1,15 @@ +package stores + +import ( + "github.com/jordanschalm/lockctx" + + "github.com/onflow/flow-go/storage" +) + +// PersisterStore is the interface to handle persisting of a data type to persisted storage using batch operation. +type PersisterStore interface { + // Persist adds data to the batch for later commitment. + // + // No error returns are expected during normal operations + Persist(lctx lockctx.Proof, batch storage.ReaderBatchWriter) error +} diff --git a/module/executiondatasync/optimistic_sync/persisters/stores/results.go b/module/executiondatasync/optimistic_sync/persisters/stores/results.go new file mode 100644 index 00000000000..7cb9770c1a9 --- /dev/null +++ b/module/executiondatasync/optimistic_sync/persisters/stores/results.go @@ -0,0 +1,41 @@ +package stores + +import ( + "fmt" + + "github.com/jordanschalm/lockctx" + + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/storage" +) + +var _ PersisterStore = (*ResultsStore)(nil) + +// ResultsStore handles persisting transaction results +type ResultsStore struct { + data []flow.LightTransactionResult + persistedResults storage.LightTransactionResults + blockID flow.Identifier +} + +func NewResultsStore( + data []flow.LightTransactionResult, + persistedResults storage.LightTransactionResults, + blockID flow.Identifier, +) *ResultsStore { + return &ResultsStore{ + data: data, + persistedResults: persistedResults, + blockID: blockID, + } +} + +// Persist adds results to the batch. +// +// No error returns are expected during normal operations +func (r *ResultsStore) Persist(_ lockctx.Proof, batch storage.ReaderBatchWriter) error { + if err := r.persistedResults.BatchStore(r.blockID, r.data, batch); err != nil { + return fmt.Errorf("could not add transaction results to batch: %w", err) + } + return nil +} diff --git a/module/executiondatasync/optimistic_sync/persisters/stores/transaction_result_error_messages.go b/module/executiondatasync/optimistic_sync/persisters/stores/transaction_result_error_messages.go new file mode 100644 index 00000000000..5976d151e37 --- /dev/null +++ b/module/executiondatasync/optimistic_sync/persisters/stores/transaction_result_error_messages.go @@ -0,0 +1,41 @@ +package stores + +import ( + "fmt" + + "github.com/jordanschalm/lockctx" + + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/storage" +) + +var _ PersisterStore = (*TxResultErrMsgStore)(nil) + +// TxResultErrMsgStore handles persisting transaction result error messages +type TxResultErrMsgStore struct { + data []flow.TransactionResultErrorMessage + persistedTxResultErrMsg storage.TransactionResultErrorMessages + blockID flow.Identifier +} + +func NewTxResultErrMsgStore( + data []flow.TransactionResultErrorMessage, + persistedTxResultErrMsg storage.TransactionResultErrorMessages, + blockID flow.Identifier, +) *TxResultErrMsgStore { + return &TxResultErrMsgStore{ + data: data, + persistedTxResultErrMsg: persistedTxResultErrMsg, + blockID: blockID, + } +} + +// Persist adds transaction result error messages to the batch. +// +// No error returns are expected during normal operations +func (t *TxResultErrMsgStore) Persist(_ lockctx.Proof, batch storage.ReaderBatchWriter) error { + if err := t.persistedTxResultErrMsg.BatchStore(t.blockID, t.data, batch); err != nil { + return fmt.Errorf("could not add transaction result error messages to batch: %w", err) + } + return nil +} diff --git a/module/executiondatasync/optimistic_sync/pipeline.go b/module/executiondatasync/optimistic_sync/pipeline.go new file mode 100644 index 00000000000..d95a658f978 --- /dev/null +++ b/module/executiondatasync/optimistic_sync/pipeline.go @@ -0,0 +1,448 @@ +package optimistic_sync + +import ( + "context" + "errors" + "fmt" + + "github.com/gammazero/workerpool" + "github.com/rs/zerolog" + "go.uber.org/atomic" + + "github.com/onflow/flow-go/engine" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module/irrecoverable" +) + +var ( + // ErrInvalidTransition is returned when a state transition is invalid. + ErrInvalidTransition = errors.New("invalid state transition") +) + +// PipelineStateProvider is an interface that provides a pipeline's state. +type PipelineStateProvider interface { + // GetState returns the current state of the pipeline. + GetState() State +} + +// PipelineStateConsumer is a receiver of the pipeline state updates. +// PipelineStateConsumer implementations must be +// - NON-BLOCKING and consume the state updates without noteworthy delay +type PipelineStateConsumer interface { + // OnStateUpdated is called when a pipeline's state changes to notify the receiver of the new state. + // This method is will be called in the same goroutine that runs the pipeline, so it must not block. + OnStateUpdated(newState State) +} + +// Pipeline represents a processing pipelined state machine for a single ExecutionResult. +// The state machine is initialized in the Pending state. +// +// The state machine is designed to be run in a single goroutine. The Run method must only be called once. +type Pipeline interface { + PipelineStateProvider + + // Run starts the pipeline processing and blocks until completion or context cancellation. + // CAUTION: not concurrency safe! Run must only be called once. + // + // Expected Errors: + // - context.Canceled: when the context is canceled + // - All other errors are potential indicators of bugs or corrupted internal state (continuation impossible) + Run(ctx context.Context, core Core, parentState State) error + + // SetSealed marks the pipeline's result as sealed, which enables transitioning from StateWaitingPersist to StatePersisting. + SetSealed() + + // OnParentStateUpdated updates the pipeline's parent's state. + OnParentStateUpdated(parentState State) + + // Abandon marks the pipeline as abandoned. + Abandon() +} + +var _ Pipeline = (*PipelineImpl)(nil) + +// worker implements a single worker goroutine that processes tasks submitted to it. +// It supports submission of context-based tasks that return an error. +// Each error that occurs during task execution is sent to a dedicated error channel. +// The primary purpose of the worker is to handle tasks in a non-blocking manner, while still allowing the parent thread +// to observe and handle errors that occur during task execution. +type worker struct { + ctx context.Context + cancel context.CancelFunc + pool *workerpool.WorkerPool + errChan chan error +} + +// newWorker creates a single worker. +func newWorker() *worker { + ctx, cancel := context.WithCancel(context.Background()) + return &worker{ + ctx: ctx, + cancel: cancel, + pool: workerpool.New(1), + errChan: make(chan error, 1), + } +} + +// Submit submits a new task for processing, each error will be propagated in a specific channel. +// Might block the worker if there is no one reading from the error channel and errors are happening. +func (w *worker) Submit(task func(ctx context.Context) error) { + w.pool.Submit(func() { + err := task(w.ctx) + if err != nil && !errors.Is(err, context.Canceled) { + w.errChan <- err + } + }) +} + +// ErrChan returns the channel where errors are delivered from executed tasks. +func (w *worker) ErrChan() <-chan error { + return w.errChan +} + +// StopWait stops the worker pool and waits for all queued tasks to complete. +// No additional tasks may be submitted, but all pending tasks are executed by workers before this function returns. +// This function is blocking and guarantees that any error that occurred during the execution of tasks will be delivered +// to the caller as a return value of this function. +// Any error that was delivered during execution will be delivered to the caller. +func (w *worker) StopWait() error { + w.cancel() + w.pool.StopWait() + + defer close(w.errChan) + select { + case err := <-w.errChan: + return err + default: + return nil + } +} + +// PipelineImpl implements the Pipeline interface +type PipelineImpl struct { + log zerolog.Logger + stateConsumer PipelineStateConsumer + stateChangedNotifier engine.Notifier + core Core + worker *worker + + // The following fields are accessed externally. they are stored using atomics to avoid + // blocking the caller. + state *atomic.Int32 + parentStateCache *atomic.Int32 + isSealed *atomic.Bool + isAbandoned *atomic.Bool + isIndexed *atomic.Bool +} + +// NewPipeline creates a new processing pipeline. +// The pipeline is initialized in the Pending state. +func NewPipeline( + log zerolog.Logger, + executionResult *flow.ExecutionResult, + isSealed bool, + stateReceiver PipelineStateConsumer, +) *PipelineImpl { + log = log.With(). + Str("component", "pipeline"). + Str("execution_result_id", executionResult.ExecutionDataID.String()). + Str("block_id", executionResult.BlockID.String()). + Logger() + + return &PipelineImpl{ + log: log, + stateConsumer: stateReceiver, + worker: newWorker(), + state: atomic.NewInt32(int32(StatePending)), + parentStateCache: atomic.NewInt32(int32(StatePending)), + isSealed: atomic.NewBool(isSealed), + isAbandoned: atomic.NewBool(false), + isIndexed: atomic.NewBool(false), + stateChangedNotifier: engine.NewNotifier(), + } +} + +// Run starts the pipeline processing and blocks until completion or context cancellation. +// CAUTION: not concurrency safe! Run must only be called once. +// +// Expected Errors: +// - context.Canceled: when the context is canceled +// - All other errors are potential indicators of bugs or corrupted internal state (continuation impossible) +func (p *PipelineImpl) Run(ctx context.Context, core Core, parentState State) error { + if p.core != nil { + return irrecoverable.NewExceptionf("pipeline has been already started, it is not designed to be run again") + } + p.core = core + p.parentStateCache.Store(int32(parentState)) + // run the main event loop by calling p.loop. any error returned from it needs to be propagated to the caller. + // IMPORTANT: after the main loop has exited we need to ensure that worker goroutine has also finished + // because we need to ensure that it can report any error that has happened during the execution of detached operation. + // By calling StopWait we ensure that worker has stopped which also guarantees that any error has been delivered to the + // error channel and returned as result of StopWait. Without waiting for the worker to stop, we might skip some errors + // since the worker didn't have a chance to report them yet, and we have already returned from the Run method. + return errors.Join(p.loop(ctx), p.worker.StopWait()) +} + +// loop implements the main event loop for state machine. It reacts on different events and performs operations upon +// entering or leaving some state. +// loop will perform a blocking operation until one of next things happens, whatever happens first: +// 1. parent context signals that it is no longer valid. +// 2. the worker thread has received an error. It's not safe to continue execution anymore, so this error needs to be propagated +// to the caller. +// 3. Pipeline has successfully entered terminal state. +// Pipeline won't and shouldn't perform any state transitions after returning from this function. +// Expected Errors: +// - context.Canceled: when the context is canceled +// - All other errors are potential indicators of bugs or corrupted internal state (continuation impossible) +func (p *PipelineImpl) loop(ctx context.Context) error { + // try to start processing in case we are able to. + p.stateChangedNotifier.Notify() + + for { + select { + case <-ctx.Done(): + return ctx.Err() + case err := <-p.worker.ErrChan(): + return err + case <-p.stateChangedNotifier.Channel(): + select { + case <-ctx.Done(): + return ctx.Err() + default: + } + + // if parent got abandoned no point to continue, and we just go to the abandoned state and perform cleanup logic. + if p.checkAbandoned() { + if err := p.transitionTo(StateAbandoned); err != nil { + return fmt.Errorf("could not transition to abandoned state: %w", err) + } + } + + currentState := p.GetState() + switch currentState { + case StatePending: + if err := p.onStartProcessing(); err != nil { + return fmt.Errorf("could not process pending state: %w", err) + } + case StateProcessing: + if err := p.onProcessing(); err != nil { + return fmt.Errorf("could not process processing state: %w", err) + } + case StateWaitingPersist: + if err := p.onPersistChanges(); err != nil { + return fmt.Errorf("could not process waiting persist state: %w", err) + } + case StateAbandoned: + p.core.Abandon() + return nil + case StateComplete: + return nil // terminate + default: + return fmt.Errorf("invalid pipeline state: %s", currentState) + } + } + } +} + +// onStartProcessing performs the initial state transitions depending on the parent state: +// - Pending -> Processing +// - Pending -> Abandoned +// No errors are expected during normal operations. +func (p *PipelineImpl) onStartProcessing() error { + switch p.parentState() { + case StateProcessing, StateWaitingPersist, StateComplete: + err := p.transitionTo(StateProcessing) + if err != nil { + return err + } + p.worker.Submit(p.performDownload) + case StatePending: + return nil + case StateAbandoned: + return p.transitionTo(StateAbandoned) + default: + // it's unexpected for the parent to be in any other state. this most likely indicates there's a bug + return fmt.Errorf("unexpected parent state: %s", p.parentState()) + } + return nil +} + +// onProcessing performs the state transitions when the pipeline is in the Processing state. +// When data has been successfully indexed, we can transition to StateWaitingPersist. +// No errors are expected during normal operations. +func (p *PipelineImpl) onProcessing() error { + if p.isIndexed.Load() { + return p.transitionTo(StateWaitingPersist) + } + return nil +} + +// onPersistChanges performs the state transitions when the pipeline is in the WaitingPersist state. +// When the execution result has been sealed and the parent has already transitioned to StateComplete then +// we can persist the data and transition to StateComplete. +// No errors are expected during normal operations. +func (p *PipelineImpl) onPersistChanges() error { + if p.isSealed.Load() && p.parentState() == StateComplete { + if err := p.core.Persist(); err != nil { + return fmt.Errorf("could not persist pending changes: %w", err) + } + return p.transitionTo(StateComplete) + } else { + return nil + } +} + +// checkAbandoned returns true if the pipeline or its parent are abandoned. +func (p *PipelineImpl) checkAbandoned() bool { + if p.isAbandoned.Load() { + return true + } + if p.parentState() == StateAbandoned { + return true + } + return p.GetState() == StateAbandoned +} + +// GetState returns the current state of the pipeline. +func (p *PipelineImpl) GetState() State { + return State(p.state.Load()) +} + +// parentState returns the last cached parent state of the pipeline. +func (p *PipelineImpl) parentState() State { + return State(p.parentStateCache.Load()) +} + +// SetSealed marks the execution result as sealed. +// This will cause the pipeline to eventually transition to the StateComplete state when the parent finishes processing. +func (p *PipelineImpl) SetSealed() { + // Note: do not use a mutex here to avoid blocking the results forest. + if p.isSealed.CompareAndSwap(false, true) { + p.stateChangedNotifier.Notify() + } +} + +// OnParentStateUpdated updates the pipeline's state based on the provided parent state. +// If the parent state has changed, it will notify the state consumer and trigger a state change notification. +func (p *PipelineImpl) OnParentStateUpdated(parentState State) { + oldState := p.parentStateCache.Load() + if p.parentStateCache.CompareAndSwap(oldState, int32(parentState)) { + p.stateChangedNotifier.Notify() + } +} + +// Abandon marks the pipeline as abandoned +// This will cause the pipeline to eventually transition to the Abandoned state and halt processing +func (p *PipelineImpl) Abandon() { + if p.isAbandoned.CompareAndSwap(false, true) { + p.stateChangedNotifier.Notify() + } +} + +// performDownload performs the processing step of the pipeline by downloading and indexing data. +// It uses an atomic flag to indicate whether the operation has been completed successfully which +// informs the state machine that eventually it can transition to the next state. +// Expected Errors: +// - context.Canceled: when the context is canceled +// - All other errors are potential indicators of bugs or corrupted internal state (continuation impossible) +func (p *PipelineImpl) performDownload(ctx context.Context) error { + if err := p.core.Download(ctx); err != nil { + return fmt.Errorf("could not perform download: %w", err) + } + if err := p.core.Index(); err != nil { + return fmt.Errorf("could not perform indexing: %w", err) + } + if p.isIndexed.CompareAndSwap(false, true) { + p.stateChangedNotifier.Notify() + } + return nil +} + +// transitionTo transitions the pipeline to the given state and broadcasts +// the state change to children pipelines. +// +// Expected Errors: +// - ErrInvalidTransition: when the transition is invalid +// - All other errors are potential indicators of bugs or corrupted internal state (continuation impossible) +func (p *PipelineImpl) transitionTo(newState State) error { + hasChange, err := p.setState(newState) + if err != nil { + return err + } + + if hasChange { + // send notification for all state changes. we require that implementations of [PipelineStateConsumer] + // are non-blocking and consume the state updates without noteworthy delay. + p.stateConsumer.OnStateUpdated(newState) + p.stateChangedNotifier.Notify() + } + + return nil +} + +// setState sets the state of the pipeline and logs the transition. +// Returns true if the state was changed, false otherwise. +// +// Expected Errors: +// - ErrInvalidTransition: when the state transition is invalid +// - All other errors are potential indicators of bugs or corrupted internal state (continuation impossible) +func (p *PipelineImpl) setState(newState State) (bool, error) { + currentState := p.GetState() + + // transitioning to the same state is a no-op + if currentState == newState { + return false, nil + } + + if err := p.validateTransition(currentState, newState); err != nil { + return false, fmt.Errorf("failed to transition from %s to %s: %w", currentState, newState, err) + } + + if !p.state.CompareAndSwap(int32(currentState), int32(newState)) { + // Note: this should never happen since state is only updated within the Run goroutine. + return false, fmt.Errorf("failed to transition from %s to %s", currentState, newState) + } + + p.log.Debug(). + Str("old_state", currentState.String()). + Str("new_state", newState.String()). + Msg("pipeline state transition") + + return true, nil +} + +// validateTransition validates the transition from the current state to the new state. +// +// Expected Errors: +// - ErrInvalidTransition: when the transition is invalid +// - All other errors are potential indicators of bugs or corrupted internal state (continuation impossible) +func (p *PipelineImpl) validateTransition(currentState State, newState State) error { + switch newState { + case StateProcessing: + if currentState == StatePending { + return nil + } + case StateWaitingPersist: + if currentState == StateProcessing { + return nil + } + case StateComplete: + if currentState == StateWaitingPersist { + return nil + } + case StateAbandoned: + // Note: it does not make sense to transition to abandoned from persisting or completed since to be in either state: + // 1. the parent must be completed + // 2. the pipeline's result must be sealed + // At that point, there are no conditions that would cause the pipeline be abandoned + switch currentState { + case StatePending, StateProcessing, StateWaitingPersist: + return nil + } + + default: + return fmt.Errorf("invalid transition to state: %s", newState) + } + + return ErrInvalidTransition +} diff --git a/module/executiondatasync/optimistic_sync/pipeline_functional_test.go b/module/executiondatasync/optimistic_sync/pipeline_functional_test.go new file mode 100644 index 00000000000..84364b0e7bd --- /dev/null +++ b/module/executiondatasync/optimistic_sync/pipeline_functional_test.go @@ -0,0 +1,604 @@ +package optimistic_sync + +import ( + "context" + "fmt" + "os" + "testing" + "time" + + "github.com/jordanschalm/lockctx" + "github.com/rs/zerolog" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" + + txerrmsgsmock "github.com/onflow/flow-go/engine/access/ingestion/tx_error_messages/mock" + "github.com/onflow/flow-go/ledger/common/convert" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module" + "github.com/onflow/flow-go/module/executiondatasync/execution_data" + "github.com/onflow/flow-go/module/metrics" + reqestermock "github.com/onflow/flow-go/module/state_synchronization/requester/mock" + "github.com/onflow/flow-go/storage" + bstorage "github.com/onflow/flow-go/storage/badger" + storagemock "github.com/onflow/flow-go/storage/mock" + "github.com/onflow/flow-go/storage/operation" + "github.com/onflow/flow-go/storage/operation/pebbleimpl" + pebbleStorage "github.com/onflow/flow-go/storage/pebble" + "github.com/onflow/flow-go/storage/store" + "github.com/onflow/flow-go/utils/unittest" + "github.com/onflow/flow-go/utils/unittest/fixtures" +) + +type PipelineFunctionalSuite struct { + suite.Suite + logger zerolog.Logger + execDataRequester *reqestermock.ExecutionDataRequester + txResultErrMsgsRequester *txerrmsgsmock.Requester + txResultErrMsgsRequestTimeout time.Duration + tmpDir string + registerTmpDir string + registerDB storage.DB + db storage.DB + lockManager lockctx.Manager + persistentRegisters *pebbleStorage.Registers + persistentEvents storage.Events + persistentCollections *store.Collections + persistentTransactions *store.Transactions + persistentResults *store.LightTransactionResults + persistentTxResultErrMsg *store.TransactionResultErrorMessages + consumerProgress storage.ConsumerProgress + headers *store.Headers + results *store.ExecutionResults + persistentLatestSealedResult *store.LatestPersistedSealedResult + core *CoreImpl + block *flow.Block + executionResult *flow.ExecutionResult + metrics module.CacheMetrics + config PipelineConfig + expectedExecutionData *execution_data.BlockExecutionData + expectedTxResultErrMsgs []flow.TransactionResultErrorMessage + + expectedData *expectedData +} + +type expectedData struct { + events flow.EventsList + results []flow.LightTransactionResult + collections []*flow.Collection + transactions []*flow.TransactionBody + registerEntries []flow.RegisterEntry +} + +func TestPipelineFunctionalSuite(t *testing.T) { + t.Parallel() + suite.Run(t, new(PipelineFunctionalSuite)) +} + +// SetupTest initializes the test environment for each test case. +// It creates temporary directories, initializes database connections, +// sets up storage backends, creates test fixtures, and initializes +// the core implementation with all required dependencies. +func (p *PipelineFunctionalSuite) SetupTest() { + t := p.T() + p.lockManager = storage.NewTestingLockManager() + + p.tmpDir = unittest.TempDir(t) + p.logger = zerolog.Nop() + p.metrics = metrics.NewNoopCollector() + pdb := unittest.PebbleDB(t, p.tmpDir) + p.db = pebbleimpl.ToDB(pdb) + + g := fixtures.NewGeneratorSuite() + + rootBlock := g.Blocks().Fixture() + sealedBlock := g.Blocks().Fixture(g.Blocks().WithParentHeader(rootBlock.ToHeader())) + sealedExecutionResult := g.ExecutionResults().Fixture(g.ExecutionResults().WithBlock(sealedBlock)) + + tf := generateFixtureExtendingLatestPersisted(g, sealedBlock.ToHeader(), sealedExecutionResult.ID()) + p.block = tf.block + p.executionResult = tf.exeResult + p.expectedExecutionData = tf.execData + p.expectedTxResultErrMsgs = tf.txErrMsgs + + // Create real storages + var err error + // Use a separate directory for the register database to avoid lock conflicts + p.registerTmpDir = unittest.TempDir(t) + registerDB := pebbleStorage.NewBootstrappedRegistersWithPathForTest(t, p.registerTmpDir, rootBlock.Height, sealedBlock.Height) + p.registerDB = pebbleimpl.ToDB(registerDB) + p.persistentRegisters, err = pebbleStorage.NewRegisters(registerDB, pebbleStorage.PruningDisabled) + p.Require().NoError(err) + + p.persistentEvents = store.NewEvents(p.metrics, p.db) + p.persistentTransactions = store.NewTransactions(p.metrics, p.db) + p.persistentCollections = store.NewCollections(p.db, p.persistentTransactions) + p.persistentResults = store.NewLightTransactionResults(p.metrics, p.db, bstorage.DefaultCacheSize) + p.persistentTxResultErrMsg = store.NewTransactionResultErrorMessages(p.metrics, p.db, bstorage.DefaultCacheSize) + p.results = store.NewExecutionResults(p.metrics, p.db) + + p.consumerProgress, err = store.NewConsumerProgress(p.db, "test_consumer").Initialize(sealedBlock.Height) + p.Require().NoError(err) + + // store and index the root header + p.headers = store.NewHeaders(p.metrics, p.db) + + err = unittest.WithLock(t, p.lockManager, storage.LockInsertBlock, func(lctx lockctx.Context) error { + return p.db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return operation.InsertHeader(lctx, rw, rootBlock.ID(), rootBlock.ToHeader()) + }) + }) + require.NoError(t, err) + + err = unittest.WithLock(t, p.lockManager, storage.LockFinalizeBlock, func(lctx lockctx.Context) error { + return p.db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return operation.IndexFinalizedBlockByHeight(lctx, rw, rootBlock.Height, rootBlock.ID()) + }) + }) + require.NoError(t, err) + + // store and index the latest sealed block header + err = unittest.WithLock(t, p.lockManager, storage.LockInsertBlock, func(lctx lockctx.Context) error { + return p.db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return operation.InsertHeader(lctx, rw, sealedBlock.ID(), sealedBlock.ToHeader()) + }) + }) + require.NoError(t, err) + + err = unittest.WithLock(t, p.lockManager, storage.LockFinalizeBlock, func(lctx lockctx.Context) error { + return p.db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return operation.IndexFinalizedBlockByHeight(lctx, rw, sealedBlock.Height, sealedBlock.ID()) + }) + }) + require.NoError(t, err) + + // Store and index sealed block execution result + err = p.results.Store(sealedExecutionResult) + p.Require().NoError(err) + + err = p.results.Index(sealedBlock.ID(), sealedExecutionResult.ID()) + p.Require().NoError(err) + + p.persistentLatestSealedResult, err = store.NewLatestPersistedSealedResult(p.consumerProgress, p.headers, p.results) + p.Require().NoError(err) + + p.execDataRequester = reqestermock.NewExecutionDataRequester(t) + p.txResultErrMsgsRequester = txerrmsgsmock.NewRequester(t) + p.txResultErrMsgsRequestTimeout = DefaultTxResultErrMsgsRequestTimeout + + p.config = PipelineConfig{ + parentState: StateWaitingPersist, + } + + // generate expected data based on the fixtures + var expectedEvents flow.EventsList + var expectedResults []flow.LightTransactionResult + var expectedRegisterEntries []flow.RegisterEntry + var expectedCollections []*flow.Collection + var expectedTransactions []*flow.TransactionBody + for i, chunk := range p.expectedExecutionData.ChunkExecutionDatas { + expectedEvents = append(expectedEvents, chunk.Events...) + expectedResults = append(expectedResults, chunk.TransactionResults...) + + // skip the system chunk collection + if i < len(p.expectedExecutionData.ChunkExecutionDatas)-1 { + expectedCollections = append(expectedCollections, chunk.Collection) + expectedTransactions = append(expectedTransactions, chunk.Collection.Transactions...) + } + + for j, payload := range chunk.TrieUpdate.Payloads { + key, value, err := convert.PayloadToRegister(payload) + p.Require().NoError(err) + + if j == 0 && i < len(p.expectedExecutionData.ChunkExecutionDatas)-1 { + continue + } + + expectedRegisterEntries = append(expectedRegisterEntries, flow.RegisterEntry{ + Key: key, + Value: value, + }) + } + } + + p.expectedData = &expectedData{ + events: expectedEvents, + results: expectedResults, + collections: expectedCollections, + transactions: expectedTransactions, + registerEntries: expectedRegisterEntries, + } +} + +// TearDownTest cleans up resources after each test case. +// It closes database connections and removes temporary directories +// to ensure a clean state for subsequent tests. +func (p *PipelineFunctionalSuite) TearDownTest() { + p.Require().NoError(p.db.Close()) + p.Require().NoError(p.registerDB.Close()) + p.Require().NoError(os.RemoveAll(p.tmpDir)) + p.Require().NoError(os.RemoveAll(p.registerTmpDir)) +} + +// TestPipelineCompletesSuccessfully verifies the successful completion of the pipeline. +// It tests that: +// 1. Pipeline processes execution data through all states correctly +// 2. All data types (events, collections, transactions, registers, error messages) are correctly persisted to storage +// 3. No errors occur during the entire process +func (p *PipelineFunctionalSuite) TestPipelineCompletesSuccessfully() { + p.execDataRequester.On("RequestExecutionData", mock.Anything).Return(p.expectedExecutionData, nil).Once() + p.txResultErrMsgsRequester.On("Request", mock.Anything).Return(p.expectedTxResultErrMsgs, nil).Once() + + p.WithRunningPipeline(func(pipeline Pipeline, updateChan chan State, errChan chan error, cancel context.CancelFunc) { + pipeline.OnParentStateUpdated(StateComplete) + + waitForStateUpdates(p.T(), updateChan, errChan, StateProcessing, StateWaitingPersist) + + pipeline.SetSealed() + + waitForStateUpdates(p.T(), updateChan, errChan, StateComplete) + + actualEvents, err := p.persistentEvents.ByBlockID(p.block.ID()) + p.Require().NoError(err) + p.Assert().Equal([]flow.Event(p.expectedData.events), actualEvents) + + actualResults, err := p.persistentResults.ByBlockID(p.block.ID()) + p.Require().NoError(err) + p.Assert().Equal(p.expectedData.results, actualResults) + + for _, expectedCollection := range p.expectedData.collections { + actualCollection, err := p.persistentCollections.ByID(expectedCollection.ID()) + p.Require().NoError(err) + p.Assert().Equal(expectedCollection, actualCollection) + + actualLightCollection, err := p.persistentCollections.LightByID(expectedCollection.ID()) + p.Require().NoError(err) + p.Assert().Equal(expectedCollection.Light(), actualLightCollection) + } + + for _, expectedTransaction := range p.expectedData.transactions { + actualTransaction, err := p.persistentTransactions.ByID(expectedTransaction.ID()) + p.Require().NoError(err) + p.Assert().Equal(expectedTransaction, actualTransaction) + } + + for _, expectedRegisterEntry := range p.expectedData.registerEntries { + actualValue, err := p.persistentRegisters.Get(expectedRegisterEntry.Key, p.block.Height) + p.Require().NoError(err) + p.Assert().Equal(expectedRegisterEntry.Value, actualValue) + } + + actualTxResultErrMsgs, err := p.persistentTxResultErrMsg.ByBlockID(p.block.ID()) + p.Require().NoError(err) + p.Assert().Equal(p.expectedTxResultErrMsgs, actualTxResultErrMsgs) + + }, p.config) +} + +// TestPipelineDownloadError tests how the pipeline handles errors during the download phase. +// It ensures that both execution data and transaction result error message request errors +// are correctly detected and returned. +func (p *PipelineFunctionalSuite) TestPipelineDownloadError() { + p.Run("execution data requester malformed data error", func() { + expectedErr := execution_data.NewMalformedDataError(fmt.Errorf("execution data test deserialization error")) + + p.execDataRequester.On("RequestExecutionData", mock.Anything).Return(nil, expectedErr).Once() + p.txResultErrMsgsRequester.On("Request", mock.Anything).Return(p.expectedTxResultErrMsgs, nil).Once() + + p.WithRunningPipeline(func(pipeline Pipeline, updateChan chan State, errChan chan error, cancel context.CancelFunc) { + pipeline.OnParentStateUpdated(StateComplete) + + waitForError(p.T(), errChan, expectedErr) + p.Assert().Equal(StateProcessing, pipeline.GetState()) + }, p.config) + }) + + p.Run("transaction result error messages requester not found error", func() { + expectedErr := fmt.Errorf("test transaction result error messages not found error") + + p.execDataRequester.On("RequestExecutionData", mock.Anything).Return(p.expectedExecutionData, nil).Once() + p.txResultErrMsgsRequester.On("Request", mock.Anything).Return(nil, expectedErr).Once() + + p.WithRunningPipeline(func(pipeline Pipeline, updateChan chan State, errChan chan error, cancel context.CancelFunc) { + pipeline.OnParentStateUpdated(StateComplete) + + waitForError(p.T(), errChan, expectedErr) + p.Assert().Equal(StateProcessing, pipeline.GetState()) + }, p.config) + }) +} + +// TestPipelineIndexingError tests error handling during the indexing phase. +// It verifies that when execution data contains invalid block IDs, the pipeline +// properly detects the inconsistency and returns an appropriate error. +func (p *PipelineFunctionalSuite) TestPipelineIndexingError() { + invalidBlockID := unittest.IdentifierFixture() + // Setup successful download + expectedExecutionData := unittest.BlockExecutionDataFixture( + unittest.WithBlockExecutionDataBlockID(invalidBlockID), // Wrong block ID to cause indexing error + ) + p.execDataRequester.On("RequestExecutionData", mock.Anything).Return(expectedExecutionData, nil).Once() + + // note: txResultErrMsgsRequester.Request() currently never returns and error, so skipping the case + p.txResultErrMsgsRequester.On("Request", mock.Anything).Return(p.expectedTxResultErrMsgs, nil).Once() + + expectedIndexingError := fmt.Errorf("could not perform indexing: failed to index execution data: unexpected block execution data: expected block_id=%s, actual block_id=%s", + p.block.ID().String(), invalidBlockID.String()) + + p.WithRunningPipeline(func(pipeline Pipeline, updateChan chan State, errChan chan error, cancel context.CancelFunc) { + pipeline.OnParentStateUpdated(StateComplete) + + waitForErrorWithCustomCheckers(p.T(), errChan, func(err error) { + p.Require().Error(err) + p.Assert().Equal(expectedIndexingError.Error(), err.Error()) + }) + p.Assert().Equal(StateProcessing, pipeline.GetState()) + }, p.config) +} + +// TestPipelinePersistingError tests the pipeline behavior when an error occurs during the persisting step. +func (p *PipelineFunctionalSuite) TestPipelinePersistingError() { + expectedError := fmt.Errorf("test events batch store error") + // Mock events storage to simulate an error on a persisting step. In normal flow and with real storages, + // it is hard to make a meaningful error explicitly. + mockEvents := storagemock.NewEvents(p.T()) + mockEvents.On("BatchStore", mock.Anything, mock.Anything, mock.Anything).Return(expectedError).Once() + p.persistentEvents = mockEvents + + p.execDataRequester.On("RequestExecutionData", mock.Anything).Return(p.expectedExecutionData, nil).Once() + p.txResultErrMsgsRequester.On("Request", mock.Anything).Return(p.expectedTxResultErrMsgs, nil).Once() + + p.WithRunningPipeline(func(pipeline Pipeline, updateChan chan State, errChan chan error, cancel context.CancelFunc) { + pipeline.OnParentStateUpdated(StateComplete) + + waitForStateUpdates(p.T(), updateChan, errChan, StateProcessing, StateWaitingPersist) + + pipeline.SetSealed() + + waitForError(p.T(), errChan, expectedError) + p.Assert().Equal(StateWaitingPersist, pipeline.GetState()) + }, p.config) +} + +// TestMainCtxCancellationDuringRequestingExecutionData tests context cancellation during the +// request of execution data. It ensures that cancellation is handled properly when triggered +// while execution data is being downloaded. +func (p *PipelineFunctionalSuite) TestMainCtxCancellationDuringRequestingExecutionData() { + p.WithRunningPipeline(func(pipeline Pipeline, updateChan chan State, errChan chan error, cancel context.CancelFunc) { + p.execDataRequester. + On("RequestExecutionData", mock.Anything). + Return(func(ctx context.Context) (*execution_data.BlockExecutionData, error) { + cancel() + <-ctx.Done() + return nil, ctx.Err() + }). + Once() + + // This call marked as `Maybe()` because it may not be called depending on timing. + p.txResultErrMsgsRequester.On("Request", mock.Anything).Return([]flow.TransactionResultErrorMessage{}, nil).Maybe() + + pipeline.OnParentStateUpdated(StateComplete) + + waitForStateUpdates(p.T(), updateChan, errChan, StateProcessing) + waitForError(p.T(), errChan, context.Canceled) + + p.Assert().Equal(StateProcessing, pipeline.GetState()) + }, PipelineConfig{parentState: StatePending}) +} + +// TestMainCtxCancellationDuringRequestingTxResultErrMsgs tests context cancellation during +// the request of transaction result error messages. It verifies that when the parent context +// is cancelled during this phase, the pipeline handles the cancellation gracefully +// and transitions to the correct state. +func (p *PipelineFunctionalSuite) TestMainCtxCancellationDuringRequestingTxResultErrMsgs() { + p.WithRunningPipeline(func(pipeline Pipeline, updateChan chan State, errChan chan error, cancel context.CancelFunc) { + // This call marked as `Maybe()` because it may not be called depending on timing. + p.execDataRequester.On("RequestExecutionData", mock.Anything).Return(nil, nil).Maybe() + + p.txResultErrMsgsRequester. + On("Request", mock.Anything). + Return(func(ctx context.Context) ([]flow.TransactionResultErrorMessage, error) { + cancel() + <-ctx.Done() + return nil, ctx.Err() + }). + Once() + + pipeline.OnParentStateUpdated(StateComplete) + + waitForStateUpdates(p.T(), updateChan, errChan, StateProcessing) + waitForError(p.T(), errChan, context.Canceled) + + p.Assert().Equal(StateProcessing, pipeline.GetState()) + }, PipelineConfig{parentState: StatePending}) +} + +// TestMainCtxCancellationDuringWaitingPersist tests the pipeline's behavior when the main context is canceled during StateWaitingPersist. +func (p *PipelineFunctionalSuite) TestMainCtxCancellationDuringWaitingPersist() { + p.execDataRequester.On("RequestExecutionData", mock.Anything).Return(p.expectedExecutionData, nil).Once() + p.txResultErrMsgsRequester.On("Request", mock.Anything).Return(p.expectedTxResultErrMsgs, nil).Once() + + p.WithRunningPipeline(func(pipeline Pipeline, updateChan chan State, errChan chan error, cancel context.CancelFunc) { + pipeline.OnParentStateUpdated(StateComplete) + + waitForStateUpdates(p.T(), updateChan, errChan, StateProcessing, StateWaitingPersist) + + cancel() + + pipeline.SetSealed() + + waitForError(p.T(), errChan, context.Canceled) + + p.Assert().Equal(StateWaitingPersist, pipeline.GetState()) + }, p.config) +} + +// TestPipelineShutdownOnParentAbandon verifies that the pipeline transitions correctly to a shutdown state when the parent is abandoned. +func (p *PipelineFunctionalSuite) TestPipelineShutdownOnParentAbandon() { + tests := []struct { + name string + config PipelineConfig + customSetup func(pipeline Pipeline, updateChan chan State, errChan chan error) + }{ + { + name: "from StatePending", + config: PipelineConfig{ + beforePipelineRun: func(pipeline *PipelineImpl) { + pipeline.OnParentStateUpdated(StateAbandoned) + }, + parentState: StateAbandoned, + }, + }, + { + name: "from StateProcessing", + customSetup: func(pipeline Pipeline, updateChan chan State, errChan chan error) { + waitForStateUpdates(p.T(), updateChan, errChan, StateProcessing) + + pipeline.OnParentStateUpdated(StateAbandoned) + }, + config: p.config, + }, + { + name: "from StateWaitingPersist", + customSetup: func(pipeline Pipeline, updateChan chan State, errChan chan error) { + waitForStateUpdates(p.T(), updateChan, errChan, StateProcessing, StateWaitingPersist) + + pipeline.OnParentStateUpdated(StateAbandoned) + }, + config: p.config, + }, + } + + for _, test := range tests { + p.T().Run(test.name, func(t *testing.T) { + p.WithRunningPipeline(func(pipeline Pipeline, updateChan chan State, errChan chan error, cancel context.CancelFunc) { + p.execDataRequester.On("RequestExecutionData", mock.Anything).Return(p.expectedExecutionData, nil).Maybe() + p.txResultErrMsgsRequester.On("Request", mock.Anything).Return(p.expectedTxResultErrMsgs, nil).Maybe() + + if test.customSetup != nil { + test.customSetup(pipeline, updateChan, errChan) + } + + waitForStateUpdates(p.T(), updateChan, errChan, StateAbandoned) + waitForError(p.T(), errChan, nil) + + p.Assert().Equal(StateAbandoned, pipeline.GetState()) + p.Assert().Nil(p.core.workingData) + }, test.config) + }) + } +} + +type PipelineConfig struct { + beforePipelineRun func(pipeline *PipelineImpl) + parentState State +} + +// WithRunningPipeline is a test helper that initializes and starts a pipeline instance. +// It manages the context and channels needed to run the pipeline and invokes the testFunc +// with access to the pipeline, update channel, error channel, and cancel function. +func (p *PipelineFunctionalSuite) WithRunningPipeline( + testFunc func(pipeline Pipeline, updateChan chan State, errChan chan error, cancel context.CancelFunc), + pipelineConfig PipelineConfig, +) { + var err error + + p.core, err = NewCoreImpl( + p.logger, + p.executionResult, + p.block, + p.execDataRequester, + p.txResultErrMsgsRequester, + p.txResultErrMsgsRequestTimeout, + p.persistentRegisters, + p.persistentEvents, + p.persistentCollections, + p.persistentResults, + p.persistentTxResultErrMsg, + p.persistentLatestSealedResult, + p.db, + p.lockManager, + ) + p.Require().NoError(err) + + pipelineStateConsumer := NewMockStateConsumer() + + pipeline := NewPipeline(p.logger, p.executionResult, false, pipelineStateConsumer) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + errChan := make(chan error) + // wait until a pipeline goroutine run a pipeline + pipelineIsReady := make(chan struct{}) + + go func() { + if pipelineConfig.beforePipelineRun != nil { + pipelineConfig.beforePipelineRun(pipeline) + } + + close(pipelineIsReady) + + errChan <- pipeline.Run(ctx, p.core, pipelineConfig.parentState) + }() + + <-pipelineIsReady + + testFunc(pipeline, pipelineStateConsumer.updateChan, errChan, cancel) +} + +// generateFixture generates a test fixture for the indexer. The returned data has the following +// properties: +// - The block execution data contains collections for each of the block's guarantees, plus the system chunk +// - Each collection has 3 transactions +// - The first path in each trie update is the same, testing that the indexer will use the last value +// - Every 3rd transaction is failed +// - There are tx error messages for all failed transactions +func generateFixtureExtendingLatestPersisted(g *fixtures.GeneratorSuite, parentHeader *flow.Header, latestPersistedResultID flow.Identifier) *testFixture { + collections := g.Collections().List(4, fixtures.Collection.WithTxCount(3)) + chunkExecutionDatas := make([]*execution_data.ChunkExecutionData, len(collections)) + guarantees := make([]*flow.CollectionGuarantee, len(collections)-1) + var txErrMsgs []flow.TransactionResultErrorMessage + path := g.LedgerPaths().Fixture() + for i, collection := range collections { + chunkData := g.ChunkExecutionDatas().Fixture( + fixtures.ChunkExecutionData.WithCollection(collection), + ) + // use the same path fo the first ledger payload in each chunk. the indexer should chose the + // last value in the register entry. + chunkData.TrieUpdate.Paths[0] = path + chunkExecutionDatas[i] = chunkData + + if i < len(collections)-1 { + guarantees[i] = g.Guarantees().Fixture(fixtures.Guarantee.WithCollectionID(collection.ID())) + } + for txIndex := range chunkExecutionDatas[i].TransactionResults { + if txIndex%3 == 0 { + chunkExecutionDatas[i].TransactionResults[txIndex].Failed = true + } + } + txErrMsgs = append(txErrMsgs, g.TransactionErrorMessages().ForTransactionResults(chunkExecutionDatas[i].TransactionResults)...) + } + + payload := g.Payloads().Fixture(fixtures.Payload.WithGuarantees(guarantees...)) + block := g.Blocks().Fixture( + fixtures.Block.WithParentHeader(parentHeader), + fixtures.Block.WithPayload(payload), + ) + + exeResult := g.ExecutionResults().Fixture( + fixtures.ExecutionResult.WithBlock(block), + fixtures.ExecutionResult.WithPreviousResultID(latestPersistedResultID), + ) + execData := g.BlockExecutionDatas().Fixture( + fixtures.BlockExecutionData.WithBlockID(block.ID()), + fixtures.BlockExecutionData.WithChunkExecutionDatas(chunkExecutionDatas...), + ) + return &testFixture{ + block: block, + exeResult: exeResult, + execData: execData, + txErrMsgs: txErrMsgs, + } +} diff --git a/module/executiondatasync/optimistic_sync/pipeline_test.go b/module/executiondatasync/optimistic_sync/pipeline_test.go new file mode 100644 index 00000000000..123ff2e4498 --- /dev/null +++ b/module/executiondatasync/optimistic_sync/pipeline_test.go @@ -0,0 +1,449 @@ +package optimistic_sync + +import ( + "context" + "errors" + "testing" + "testing/synctest" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + + osmock "github.com/onflow/flow-go/module/executiondatasync/optimistic_sync/mock" + "github.com/onflow/flow-go/utils/unittest" +) + +// TestPipelineStateTransitions verifies that the pipeline correctly transitions +// through states when provided with the correct conditions. +func TestPipelineStateTransitions(t *testing.T) { + synctest.Test(t, func(t *testing.T) { + pipeline, mockCore, updateChan, parent := createPipeline(t) + + pipeline.SetSealed() + parent.UpdateState(StateComplete, pipeline) + + mockCore.On("Download", mock.Anything).Return(nil) + mockCore.On("Index").Return(nil) + mockCore.On("Persist").Return(nil) + + assert.Equal(t, StatePending, pipeline.GetState(), "Pipeline should start in Pending state") + + go func() { + err := pipeline.Run(context.Background(), mockCore, parent.GetState()) + require.NoError(t, err) + }() + + for _, expected := range []State{StateProcessing, StateWaitingPersist, StateComplete} { + synctest.Wait() + assertUpdate(t, updateChan, expected) + } + + // wait for Run goroutine to finish + synctest.Wait() + assertNoUpdate(t, pipeline, updateChan, StateComplete) + }) +} + +// TestPipelineParentDependentTransitions verifies that a pipeline's transitions +// depend on the parent pipeline's state. +func TestPipelineParentDependentTransitions(t *testing.T) { + synctest.Test(t, func(t *testing.T) { + pipeline, mockCore, updateChan, parent := createPipeline(t) + + assert.Equal(t, StatePending, pipeline.GetState(), "Pipeline should start in Pending state") + + go func() { + err := pipeline.Run(context.Background(), mockCore, parent.GetState()) + require.NoError(t, err) + }() + + // 1. Initial update - parent in Ready state + parent.UpdateState(StatePending, pipeline) + + // Check that pipeline remains in Ready state + synctest.Wait() + assertNoUpdate(t, pipeline, updateChan, StatePending) + + // 2. Update parent to downloading + parent.UpdateState(StateProcessing, pipeline) + + // Pipeline should now call Download and Index within the processing state, then progress to + // WaitingPersist and stop + mockCore.On("Download", mock.Anything).Return(nil) + mockCore.On("Index").Return(nil) + for _, expected := range []State{StateProcessing, StateWaitingPersist} { + synctest.Wait() + assertUpdate(t, updateChan, expected) + } + assert.Equal(t, StateWaitingPersist, pipeline.GetState(), "Pipeline should be in StateWaitingPersist state") + + // pipeline should remain in WaitingPersist state + synctest.Wait() + assertNoUpdate(t, pipeline, updateChan, StateWaitingPersist) + + // 3. Update parent to complete - should allow persisting when sealed + parent.UpdateState(StateComplete, pipeline) + + // this alone should not allow the pipeline to progress to any other state + synctest.Wait() + assertNoUpdate(t, pipeline, updateChan, StateWaitingPersist) + + // 4. Mark the execution result as sealed, this should allow the pipeline to progress to Complete state + pipeline.SetSealed() + mockCore.On("Persist").Return(nil) + + // Wait for pipeline to complete + synctest.Wait() + assertUpdate(t, updateChan, StateComplete) + + // Run should complete without error + synctest.Wait() + assertNoUpdate(t, pipeline, updateChan, StateComplete) + }) +} + +// TestParentAbandoned verifies that a pipeline is properly abandoned when +// the parent pipeline is abandoned. +func TestAbandoned(t *testing.T) { + t.Run("starts already abandoned", func(t *testing.T) { + synctest.Test(t, func(t *testing.T) { + pipeline, mockCore, updateChan, parent := createPipeline(t) + + mockCore.On("Abandon").Return(nil) + + pipeline.Abandon() + + go func() { + err := pipeline.Run(context.Background(), mockCore, parent.GetState()) + require.NoError(t, err) + }() + + // first state must be abandoned + synctest.Wait() + assertUpdate(t, updateChan, StateAbandoned) + + // wait for Run goroutine to finish + synctest.Wait() + assertNoUpdate(t, pipeline, updateChan, StateAbandoned) + }) + }) + + // Test cases abandoning during different stages of processing + testCases := []struct { + name string + setupMock func(*PipelineImpl, *mockStateProvider, *osmock.Core) + onStateFns map[State]func(*PipelineImpl, *mockStateProvider) + expectedStates []State + }{ + { + name: "Abandon during download", + setupMock: func(pipeline *PipelineImpl, parent *mockStateProvider, mockCore *osmock.Core) { + mockCore. + On("Download", mock.Anything). + Return(func(ctx context.Context) error { + pipeline.Abandon() + <-ctx.Done() // abandon should cause context to be canceled + return ctx.Err() + }) + }, + expectedStates: []State{StateProcessing, StateAbandoned}, + }, + { + name: "Parent abandoned during download", + setupMock: func(pipeline *PipelineImpl, parent *mockStateProvider, mockCore *osmock.Core) { + mockCore. + On("Download", mock.Anything). + Return(func(ctx context.Context) error { + parent.UpdateState(StateAbandoned, pipeline) + <-ctx.Done() // abandon should cause context to be canceled + return ctx.Err() + }) + }, + expectedStates: []State{StateProcessing, StateAbandoned}, + }, + { + name: "Abandon during index", + // Note: indexing will complete, and the pipeline will transition to waiting persist + setupMock: func(pipeline *PipelineImpl, parent *mockStateProvider, mockCore *osmock.Core) { + mockCore.On("Download", mock.Anything).Return(nil) + mockCore.On("Index").Run(func(args mock.Arguments) { + pipeline.Abandon() + }).Return(nil) + }, + expectedStates: []State{StateProcessing, StateAbandoned}, + }, + { + name: "Parent abandoned during index", + // Note: indexing will complete, and the pipeline will transition to waiting persist + setupMock: func(pipeline *PipelineImpl, parent *mockStateProvider, mockCore *osmock.Core) { + mockCore.On("Download", mock.Anything).Return(nil) + mockCore.On("Index").Run(func(args mock.Arguments) { + parent.UpdateState(StateAbandoned, pipeline) + }).Return(nil) + }, + expectedStates: []State{StateProcessing, StateAbandoned}, + }, + { + name: "Abandon during waiting to persist", + setupMock: func(pipeline *PipelineImpl, parent *mockStateProvider, mockCore *osmock.Core) { + mockCore.On("Download", mock.Anything).Return(nil) + mockCore.On("Index").Return(nil) + }, + onStateFns: map[State]func(*PipelineImpl, *mockStateProvider){ + StateWaitingPersist: func(pipeline *PipelineImpl, parent *mockStateProvider) { + pipeline.Abandon() + }, + }, + expectedStates: []State{StateProcessing, StateWaitingPersist, StateAbandoned}, + }, + { + name: "Parent abandoned during waiting to persist", + setupMock: func(pipeline *PipelineImpl, parent *mockStateProvider, mockCore *osmock.Core) { + mockCore.On("Download", mock.Anything).Return(nil) + mockCore.On("Index").Return(nil) + }, + onStateFns: map[State]func(*PipelineImpl, *mockStateProvider){ + StateWaitingPersist: func(pipeline *PipelineImpl, parent *mockStateProvider) { + parent.UpdateState(StateAbandoned, pipeline) + }, + }, + expectedStates: []State{StateProcessing, StateWaitingPersist, StateAbandoned}, + }, + // Note: it does not make sense to abandon during persist, since it will only be run when: + // 1. the parent is already complete + // 2. the pipeline's result is sealed + // At that point, there are no conditions that would cause the pipeline to transition to any other state + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + synctest.Test(t, func(t *testing.T) { + pipeline, mockCore, updateChan, parent := createPipeline(t) + tc.setupMock(pipeline, parent, mockCore) + + mockCore.On("Abandon").Return(nil) + + go func() { + err := pipeline.Run(context.Background(), mockCore, parent.GetState()) + require.NoError(t, err) + }() + + // Send parent update to start processing + parent.UpdateState(StateProcessing, pipeline) + + for _, expected := range tc.expectedStates { + synctest.Wait() + assertUpdate(t, updateChan, expected) + + if fn, ok := tc.onStateFns[expected]; ok { + fn(pipeline, parent) + } + } + + // wait for Run goroutine to finish + synctest.Wait() + assertNoUpdate(t, pipeline, updateChan, StateAbandoned) + }) + }) + } +} + +// TestPipelineContextCancellation tests the Run method's context cancelation behavior during different stages of processing +func TestPipelineContextCancellation(t *testing.T) { + // Test cases for different stages of processing + testCases := []struct { + name string + setupMock func(pipeline *PipelineImpl, parent *mockStateProvider, mockCore *osmock.Core) context.Context + }{ + { + name: "Cancel before download starts", + setupMock: func(pipeline *PipelineImpl, parent *mockStateProvider, mockCore *osmock.Core) context.Context { + ctx, cancel := context.WithCancel(context.Background()) + cancel() + // no Core methods called + return ctx + }, + }, + { + name: "Cancel during download", + setupMock: func(pipeline *PipelineImpl, parent *mockStateProvider, mockCore *osmock.Core) context.Context { + ctx, cancel := context.WithCancel(context.Background()) + mockCore.On("Download", mock.Anything).Run(func(args mock.Arguments) { + cancel() + pipelineCtx := args[0].(context.Context) + unittest.RequireCloseBefore(t, pipelineCtx.Done(), 500*time.Millisecond, "Abandon should cause context to be canceled") + }).Return(func(pipelineCtx context.Context) error { + return pipelineCtx.Err() + }) + return ctx + }, + }, + { + name: "Cancel between steps", + setupMock: func(pipeline *PipelineImpl, parent *mockStateProvider, mockCore *osmock.Core) context.Context { + ctx, cancel := context.WithCancel(context.Background()) + + mockCore.On("Download", mock.Anything).Return(nil) + mockCore.On("Index").Run(func(args mock.Arguments) { + cancel() + }).Return(nil) + + return ctx + }, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + synctest.Test(t, func(t *testing.T) { + pipeline, mockCore, _, parent := createPipeline(t) + + parent.UpdateState(StateComplete, pipeline) + pipeline.SetSealed() + + ctx := tc.setupMock(pipeline, parent, mockCore) + + go func() { + err := pipeline.Run(ctx, mockCore, parent.GetState()) + require.ErrorIs(t, err, context.Canceled) + }() + + // wait for Run goroutine to finish + synctest.Wait() + }) + }) + } +} + +// TestPipelineErrorHandling verifies that errors from Core methods are properly +// propagated back to the caller. +func TestPipelineErrorHandling(t *testing.T) { + // Test cases for different stages of processing + testCases := []struct { + name string + setupMock func(pipeline *PipelineImpl, parent *mockStateProvider, mockCore *osmock.Core, expectedErr error) + expectedErr error + expectedStates []State + }{ + { + name: "Download Error", + setupMock: func(pipeline *PipelineImpl, _ *mockStateProvider, mockCore *osmock.Core, expectedErr error) { + mockCore.On("Download", mock.Anything).Return(expectedErr) + }, + expectedErr: errors.New("download error"), + expectedStates: []State{StateProcessing}, + }, + { + name: "Index Error", + setupMock: func(pipeline *PipelineImpl, _ *mockStateProvider, mockCore *osmock.Core, expectedErr error) { + mockCore.On("Download", mock.Anything).Return(nil) + mockCore.On("Index").Return(expectedErr) + }, + expectedErr: errors.New("index error"), + expectedStates: []State{StateProcessing}, + }, + { + name: "Persist Error", + setupMock: func(pipeline *PipelineImpl, parent *mockStateProvider, mockCore *osmock.Core, expectedErr error) { + mockCore.On("Download", mock.Anything).Return(nil) + mockCore.On("Index").Run(func(args mock.Arguments) { + parent.UpdateState(StateComplete, pipeline) + pipeline.SetSealed() + }).Return(nil) + mockCore.On("Persist").Return(expectedErr) + }, + expectedErr: errors.New("persist error"), + expectedStates: []State{StateProcessing, StateWaitingPersist}, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + synctest.Test(t, func(t *testing.T) { + pipeline, mockCore, updateChan, parent := createPipeline(t) + + tc.setupMock(pipeline, parent, mockCore, tc.expectedErr) + + go func() { + err := pipeline.Run(context.Background(), mockCore, parent.GetState()) + require.ErrorIs(t, err, tc.expectedErr) + }() + + // Send parent update to trigger processing + parent.UpdateState(StateProcessing, pipeline) + + for _, expected := range tc.expectedStates { + synctest.Wait() + assertUpdate(t, updateChan, expected) + } + + // wait for Run goroutine to finish + synctest.Wait() + finalState := tc.expectedStates[len(tc.expectedStates)-1] + assertNoUpdate(t, pipeline, updateChan, finalState) + }) + }) + } +} + +// TestSetSealed verifies that the pipeline correctly sets the sealed flag. +func TestSetSealed(t *testing.T) { + pipeline, _, _, _ := createPipeline(t) + + pipeline.SetSealed() + assert.True(t, pipeline.isSealed.Load()) +} + +// TestValidateTransition verifies that the pipeline correctly validates state transitions. +func TestValidateTransition(t *testing.T) { + + allStates := []State{StatePending, StateProcessing, StateWaitingPersist, StateComplete, StateAbandoned} + + // these are all of the valid transitions from a state to another state + validTransitions := map[State]map[State]bool{ + StatePending: {StateProcessing: true, StateAbandoned: true}, + StateProcessing: {StateWaitingPersist: true, StateAbandoned: true}, + StateWaitingPersist: {StateComplete: true, StateAbandoned: true}, + StateComplete: {}, + StateAbandoned: {}, + } + + // iterate through all possible transitions, and validate that the valid transitions succeed, and the invalid transitions fail + pipeline, _, _, _ := createPipeline(t) + for _, currentState := range allStates { + for _, newState := range allStates[1:] { + if currentState == newState { + continue // skip since higher level code will handle this + } + + err := pipeline.validateTransition(currentState, newState) + + if validTransitions[currentState][newState] { + assert.NoError(t, err) + continue + } + + assert.ErrorIs(t, err, ErrInvalidTransition) + } + } +} + +func assertNoUpdate(t *testing.T, pipeline Pipeline, updateChan <-chan State, existingState State) { + select { + case update := <-updateChan: + t.Errorf("Pipeline should remain in %s state, but transitioned to %s", existingState, update) + default: + assert.Equal(t, existingState, pipeline.GetState(), "Pipeline should remain in %s state", existingState) + } +} + +func assertUpdate(t *testing.T, updateChan <-chan State, expected State) { + select { + case update := <-updateChan: + assert.Equal(t, expected, update, "Pipeline should transition to %s state", expected) + default: + t.Errorf("Pipeline should have transitioned to %s state", expected) + } +} diff --git a/module/executiondatasync/optimistic_sync/pipeline_test_utils.go b/module/executiondatasync/optimistic_sync/pipeline_test_utils.go new file mode 100644 index 00000000000..98e4e35a072 --- /dev/null +++ b/module/executiondatasync/optimistic_sync/pipeline_test_utils.go @@ -0,0 +1,118 @@ +package optimistic_sync + +import ( + "testing" + "time" + + "github.com/rs/zerolog" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + osmock "github.com/onflow/flow-go/module/executiondatasync/optimistic_sync/mock" + "github.com/onflow/flow-go/utils/unittest" +) + +// mockStateProvider is a mock implementation of a parent state provider. +// It tracks the current state and notifies the pipeline when the state changes. +type mockStateProvider struct { + state State +} + +var _ PipelineStateProvider = (*mockStateProvider)(nil) + +// NewMockStateProvider initializes a mockStateProvider with the default state StatePending. +func NewMockStateProvider() *mockStateProvider { + return &mockStateProvider{ + state: StatePending, + } +} + +// UpdateState sets the internal state and triggers a pipeline update. +func (m *mockStateProvider) UpdateState(state State, pipeline *PipelineImpl) { + m.state = state + pipeline.OnParentStateUpdated(state) +} + +// GetState returns the current internal state. + +func (m *mockStateProvider) GetState() State { + return m.state +} + +// mockStateConsumer is a mock implementation used in tests to receive state updates from the pipeline. +// It exposes a buffered channel to capture the state transitions. +type mockStateConsumer struct { + updateChan chan State +} + +var _ PipelineStateConsumer = (*mockStateConsumer)(nil) + +// NewMockStateConsumer creates a new instance of mockStateConsumer with a buffered channel. +func NewMockStateConsumer() *mockStateConsumer { + return &mockStateConsumer{ + updateChan: make(chan State, 10), + } +} + +func (m *mockStateConsumer) OnStateUpdated(state State) { + m.updateChan <- state +} + +// waitForStateUpdates waits for a sequence of state updates to occur or timeout after 500ms. +// updates must be received in the correct order or the test will fail. +func waitForStateUpdates(t *testing.T, updateChan <-chan State, errChan <-chan error, expectedStates ...State) { + done := make(chan struct{}) + unittest.RequireReturnsBefore(t, func() { + for _, expected := range expectedStates { + select { + case <-done: + return + case err := <-errChan: + require.NoError(t, err, "pipeline returned error") + case update := <-updateChan: + assert.Equalf(t, expected, update, "expected pipeline to transition to %s, but got %s", expected, update) + } + } + }, 500*time.Millisecond, "Timeout waiting for state update") + close(done) // make sure function exists after timeout +} + +// waitForErrorWithCustomCheckers waits for an error from the errChan within 500ms +// and applies custom checker functions to validate the error. +// If no checkers are provided, it asserts that no error occurred. +func waitForErrorWithCustomCheckers(t *testing.T, errChan <-chan error, errorCheckers ...func(err error)) { + unittest.RequireReturnsBefore(t, func() { + err := <-errChan + if len(errorCheckers) == 0 { + assert.NoError(t, err, "Pipeline should complete without errors") + } else { + for _, checker := range errorCheckers { + checker(err) + } + } + }, 500*time.Millisecond, "Timeout waiting for error") +} + +// waitForError waits for an error from the errChan within 500ms and asserts it matches the expected error. +func waitForError(t *testing.T, errChan <-chan error, expectedErr error) { + unittest.RequireReturnsBefore(t, func() { + err := <-errChan + if expectedErr == nil { + assert.NoError(t, err, "Pipeline should complete without errors") + } else { + assert.ErrorIs(t, err, expectedErr) + } + }, 500*time.Millisecond, "Timeout waiting for error") +} + +// createPipeline initializes and returns a pipeline instance with its mock dependencies. +// It returns the pipeline, the mocked core, a state update channel, and the parent state provider. +func createPipeline(t *testing.T) (*PipelineImpl, *osmock.Core, <-chan State, *mockStateProvider) { + mockCore := osmock.NewCore(t) + parent := NewMockStateProvider() + stateReceiver := NewMockStateConsumer() + + pipeline := NewPipeline(zerolog.Nop(), unittest.ExecutionResultFixture(), false, stateReceiver) + + return pipeline, mockCore, stateReceiver.updateChan, parent +} diff --git a/module/executiondatasync/optimistic_sync/snapshot.go b/module/executiondatasync/optimistic_sync/snapshot.go new file mode 100644 index 00000000000..3b3dd371f29 --- /dev/null +++ b/module/executiondatasync/optimistic_sync/snapshot.go @@ -0,0 +1,26 @@ +package optimistic_sync + +import ( + "github.com/onflow/flow-go/storage" +) + +// Snapshot provides access to execution data readers for querying various data types from a specific ExecutionResult. +type Snapshot interface { + // Events returns a reader for querying event data. + Events() storage.EventsReader + + // Collections returns a reader for querying collection data. + Collections() storage.CollectionsReader + + // Transactions returns a reader for querying transaction data. + Transactions() storage.TransactionsReader + + // LightTransactionResults returns a reader for querying light transaction result data. + LightTransactionResults() storage.LightTransactionResultsReader + + // TransactionResultErrorMessages returns a reader for querying transaction error message data. + TransactionResultErrorMessages() storage.TransactionResultErrorMessagesReader + + // Registers returns a reader for querying register data. + Registers() storage.RegisterIndexReader +} diff --git a/module/executiondatasync/optimistic_sync/state.go b/module/executiondatasync/optimistic_sync/state.go new file mode 100644 index 00000000000..435c08bad8c --- /dev/null +++ b/module/executiondatasync/optimistic_sync/state.go @@ -0,0 +1,40 @@ +package optimistic_sync + +// State represents the state of the processing pipeline +type State int32 + +const ( + // StatePending is the initial state after instantiation, before Run is called + StatePending State = iota + // StateProcessing represents the state where data processing (download and indexing) has been started + StateProcessing + // StateWaitingPersist represents the state where all data is indexed, but conditions to persist are not met + StateWaitingPersist + // StateComplete represents the state where all data is persisted to storage + StateComplete + // StateAbandoned represents the state where processing was aborted + StateAbandoned +) + +// String representation of states for logging +func (s State) String() string { + switch s { + case StatePending: + return "pending" + case StateWaitingPersist: + return "waiting_persist" + case StateProcessing: + return "processing" + case StateComplete: + return "complete" + case StateAbandoned: + return "abandoned" + default: + return "" + } +} + +// IsTerminal returns true if the state is a terminal state (Complete or Abandoned). +func (s State) IsTerminal() bool { + return s == StateComplete || s == StateAbandoned +} diff --git a/module/executiondatasync/provider/provider.go b/module/executiondatasync/provider/provider.go index 3345f7edb36..72551826634 100644 --- a/module/executiondatasync/provider/provider.go +++ b/module/executiondatasync/provider/provider.go @@ -19,24 +19,30 @@ import ( "github.com/onflow/flow-go/network" ) -type ProviderOption func(*Provider) +type ProviderOption func(*ExecutionDataProvider) func WithBlobSizeLimit(size int) ProviderOption { - return func(p *Provider) { + return func(p *ExecutionDataProvider) { p.maxBlobSize = size } } // Provider is used to provide execution data blobs over the network via a blob service. -type Provider struct { - logger zerolog.Logger - metrics module.ExecutionDataProviderMetrics - maxBlobSize int - serializer execution_data.Serializer - blobService network.BlobService - storage tracker.Storage +type Provider interface { + Provide(ctx context.Context, blockHeight uint64, executionData *execution_data.BlockExecutionData) (flow.Identifier, *flow.BlockExecutionDataRoot, error) +} + +type ExecutionDataProvider struct { + logger zerolog.Logger + metrics module.ExecutionDataProviderMetrics + maxBlobSize int + blobService network.BlobService + storage tracker.Storage + cidsProvider *ExecutionDataCIDProvider } +var _ Provider = (*ExecutionDataProvider)(nil) + func NewProvider( logger zerolog.Logger, metrics module.ExecutionDataProviderMetrics, @@ -44,14 +50,18 @@ func NewProvider( blobService network.BlobService, storage tracker.Storage, opts ...ProviderOption, -) *Provider { - p := &Provider{ - logger: logger.With().Str("component", "execution_data_provider").Logger(), - metrics: metrics, - maxBlobSize: execution_data.DefaultMaxBlobSize, - serializer: serializer, - blobService: blobService, - storage: storage, +) *ExecutionDataProvider { + if storage == nil { + storage = &tracker.NoopStorage{} + } + + p := &ExecutionDataProvider{ + logger: logger.With().Str("component", "execution_data_provider").Logger(), + metrics: metrics, + maxBlobSize: execution_data.DefaultMaxBlobSize, + cidsProvider: NewExecutionDataCIDProvider(serializer), + blobService: blobService, + storage: storage, } for _, opt := range opts { @@ -61,7 +71,7 @@ func NewProvider( return p } -func (p *Provider) storeBlobs(parent context.Context, blockHeight uint64, blobCh <-chan blobs.Blob) <-chan error { +func (p *ExecutionDataProvider) storeBlobs(parent context.Context, blockHeight uint64, blobCh <-chan blobs.Blob) <-chan error { ch := make(chan error, 1) go func() { defer close(ch) @@ -117,26 +127,26 @@ func (p *Provider) storeBlobs(parent context.Context, blockHeight uint64, blobCh // It computes and returns the root CID of the execution data blob tree. // This function returns once the root CID has been computed, and all blobs are successfully stored // in the Bitswap Blobstore. -func (p *Provider) Provide(ctx context.Context, blockHeight uint64, executionData *execution_data.BlockExecutionData) (flow.Identifier, error) { - rootID, errCh, err := p.provide(ctx, blockHeight, executionData) +func (p *ExecutionDataProvider) Provide(ctx context.Context, blockHeight uint64, executionData *execution_data.BlockExecutionData) (flow.Identifier, *flow.BlockExecutionDataRoot, error) { + rootID, rootData, errCh, err := p.provide(ctx, blockHeight, executionData) storeErr, ok := <-errCh if err != nil { - return flow.ZeroID, err + return flow.ZeroID, nil, err } if ok { - return flow.ZeroID, storeErr + return flow.ZeroID, nil, storeErr } if err = p.storage.SetFulfilledHeight(blockHeight); err != nil { - return flow.ZeroID, err + return flow.ZeroID, nil, err } - return rootID, nil + return rootID, rootData, nil } -func (p *Provider) provide(ctx context.Context, blockHeight uint64, executionData *execution_data.BlockExecutionData) (flow.Identifier, <-chan error, error) { +func (p *ExecutionDataProvider) provide(ctx context.Context, blockHeight uint64, executionData *execution_data.BlockExecutionData) (flow.Identifier, *flow.BlockExecutionDataRoot, <-chan error, error) { logger := p.logger.With().Uint64("height", blockHeight).Str("block_id", executionData.BlockID.String()).Logger() logger.Debug().Msg("providing execution data") @@ -146,7 +156,7 @@ func (p *Provider) provide(ctx context.Context, blockHeight uint64, executionDat defer close(blobCh) errCh := p.storeBlobs(ctx, blockHeight, blobCh) - g, gCtx := errgroup.WithContext(ctx) + g := new(errgroup.Group) chunkDataIDs := make([]cid.Cid, len(executionData.ChunkExecutionDatas)) for i, chunkExecutionData := range executionData.ChunkExecutionDatas { @@ -155,7 +165,7 @@ func (p *Provider) provide(ctx context.Context, blockHeight uint64, executionDat g.Go(func() error { logger.Debug().Int("chunk_index", i).Msg("adding chunk execution data") - cedID, err := p.addChunkExecutionData(gCtx, chunkExecutionData, blobCh) + cedID, err := p.cidsProvider.addChunkExecutionData(chunkExecutionData, blobCh) if err != nil { return fmt.Errorf("failed to add chunk execution data at index %d: %w", i, err) } @@ -167,28 +177,89 @@ func (p *Provider) provide(ctx context.Context, blockHeight uint64, executionDat } if err := g.Wait(); err != nil { - return flow.ZeroID, errCh, err + return flow.ZeroID, nil, errCh, err } - edRoot := &execution_data.BlockExecutionDataRoot{ + edRoot := &flow.BlockExecutionDataRoot{ BlockID: executionData.BlockID, ChunkExecutionDataIDs: chunkDataIDs, } - rootID, err := p.addExecutionDataRoot(ctx, edRoot, blobCh) + rootID, err := p.cidsProvider.addExecutionDataRoot(edRoot, blobCh) if err != nil { - return flow.ZeroID, errCh, fmt.Errorf("failed to add execution data root: %w", err) + return flow.ZeroID, nil, errCh, fmt.Errorf("failed to add execution data root: %w", err) } logger.Debug().Str("root_id", rootID.String()).Msg("root ID computed") duration := time.Since(start) p.metrics.RootIDComputed(duration, len(executionData.ChunkExecutionDatas)) - return rootID, errCh, nil + return rootID, edRoot, errCh, nil } -func (p *Provider) addExecutionDataRoot( - ctx context.Context, - edRoot *execution_data.BlockExecutionDataRoot, +func NewExecutionDataCIDProvider(serializer execution_data.Serializer) *ExecutionDataCIDProvider { + return &ExecutionDataCIDProvider{ + serializer: serializer, + maxBlobSize: execution_data.DefaultMaxBlobSize, + } +} + +type ExecutionDataCIDProvider struct { + serializer execution_data.Serializer + maxBlobSize int +} + +// GenerateExecutionDataRoot generates the execution data root and its ID from the provided +// block execution data. +// This is a helper function useful for testing. +// +// No errors are expected during normal operation. +func (p *ExecutionDataCIDProvider) GenerateExecutionDataRoot( + executionData *execution_data.BlockExecutionData, +) (flow.Identifier, *flow.BlockExecutionDataRoot, error) { + chunkDataIDs := make([]cid.Cid, len(executionData.ChunkExecutionDatas)) + for i, chunkExecutionData := range executionData.ChunkExecutionDatas { + cedID, err := p.addChunkExecutionData(chunkExecutionData, nil) + if err != nil { + return flow.ZeroID, nil, fmt.Errorf("failed to add chunk execution data at index %d: %w", i, err) + } + chunkDataIDs[i] = cedID + } + + root := &flow.BlockExecutionDataRoot{ + BlockID: executionData.BlockID, + ChunkExecutionDataIDs: chunkDataIDs, + } + + rootID, err := p.addExecutionDataRoot(root, nil) + if err != nil { + return flow.ZeroID, nil, fmt.Errorf("failed to add execution data root: %w", err) + } + + return rootID, root, nil +} + +// CalculateExecutionDataRootID calculates the execution data root ID from the provided +// execution data root. +// +// No errors are expected during normal operation. +func (p *ExecutionDataCIDProvider) CalculateExecutionDataRootID( + edRoot flow.BlockExecutionDataRoot, +) (flow.Identifier, error) { + return p.addExecutionDataRoot(&edRoot, nil) +} + +// CalculateChunkExecutionDataID calculates the chunk execution data ID from the provided +// chunk execution data. +// +// No errors are expected during normal operation. +func (p *ExecutionDataCIDProvider) CalculateChunkExecutionDataID( + ced execution_data.ChunkExecutionData, +) (cid.Cid, error) { + return p.addChunkExecutionData(&ced, nil) +} + +func (p *ExecutionDataCIDProvider) addExecutionDataRoot( + edRoot *flow.BlockExecutionDataRoot, blobCh chan<- blobs.Blob, ) (flow.Identifier, error) { buf := new(bytes.Buffer) @@ -201,7 +272,9 @@ func (p *Provider) addExecutionDataRoot( } rootBlob := blobs.NewBlob(buf.Bytes()) - blobCh <- rootBlob + if blobCh != nil { + blobCh <- rootBlob + } rootID, err := flow.CidToId(rootBlob.Cid()) if err != nil { @@ -211,12 +284,11 @@ func (p *Provider) addExecutionDataRoot( return rootID, nil } -func (p *Provider) addChunkExecutionData( - ctx context.Context, +func (p *ExecutionDataCIDProvider) addChunkExecutionData( ced *execution_data.ChunkExecutionData, blobCh chan<- blobs.Blob, ) (cid.Cid, error) { - cids, err := p.addBlobs(ctx, ced, blobCh) + cids, err := p.addBlobs(ced, blobCh) if err != nil { return cid.Undef, fmt.Errorf("failed to add chunk execution data blobs: %w", err) } @@ -226,14 +298,14 @@ func (p *Provider) addChunkExecutionData( return cids[0], nil } - if cids, err = p.addBlobs(ctx, cids, blobCh); err != nil { + if cids, err = p.addBlobs(cids, blobCh); err != nil { return cid.Undef, fmt.Errorf("failed to add cid blobs: %w", err) } } } // addBlobs serializes the given object, splits the serialized data into blobs, and sends them to the given channel. -func (p *Provider) addBlobs(ctx context.Context, v interface{}, blobCh chan<- blobs.Blob) ([]cid.Cid, error) { +func (p *ExecutionDataCIDProvider) addBlobs(v interface{}, blobCh chan<- blobs.Blob) ([]cid.Cid, error) { bcw := blobs.NewBlobChannelWriter(blobCh, p.maxBlobSize) defer bcw.Close() diff --git a/module/executiondatasync/provider/provider_test.go b/module/executiondatasync/provider/provider_test.go index 3ebd216767b..32b0944c6c3 100644 --- a/module/executiondatasync/provider/provider_test.go +++ b/module/executiondatasync/provider/provider_test.go @@ -1,30 +1,43 @@ package provider_test import ( - "bytes" "context" - "crypto/rand" + "path/filepath" "testing" "time" + "github.com/ipfs/go-cid" "github.com/ipfs/go-datastore" dssync "github.com/ipfs/go-datastore/sync" - "github.com/rs/zerolog" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" goassert "gotest.tools/assert" + "github.com/onflow/flow-go/engine/common/rpc/convert" "github.com/onflow/flow-go/ledger" - "github.com/onflow/flow-go/ledger/common/testutils" + "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/module/blobs" "github.com/onflow/flow-go/module/executiondatasync/execution_data" "github.com/onflow/flow-go/module/executiondatasync/provider" + edstorage "github.com/onflow/flow-go/module/executiondatasync/storage" mocktracker "github.com/onflow/flow-go/module/executiondatasync/tracker/mock" "github.com/onflow/flow-go/module/metrics" "github.com/onflow/flow-go/network" - "github.com/onflow/flow-go/network/mocknetwork" + mocknetwork "github.com/onflow/flow-go/network/mock" "github.com/onflow/flow-go/utils/unittest" + "github.com/onflow/flow-go/utils/unittest/fixtures" +) + +const ( + // canonicalGeneratorSeed is the static randomness seed used to generate the canonical execution data. + // this ensures the data is deterministic and can be used for hash testing. + canonicalGeneratorSeed = 1103801176737782919 +) + +var ( + // canonicalExecutionDataID is the execution data ID of the canonical execution data. + canonicalExecutionDataID = flow.MustHexStringToIdentifier("6796622b06907cc0894260c175fdec8960fe99c167084f901d238db22a676de3") ) func getDatastore() datastore.Batching { @@ -35,18 +48,18 @@ func getExecutionDataStore(ds datastore.Batching) execution_data.ExecutionDataSt return execution_data.NewExecutionDataStore(blobs.NewBlobstore(ds), execution_data.DefaultSerializer) } -func getBlobservice(ds datastore.Batching) network.BlobService { +func getBlobservice(t *testing.T, ds datastore.Batching) network.BlobService { blobstore := blobs.NewBlobstore(ds) - blobService := new(mocknetwork.BlobService) + blobService := mocknetwork.NewBlobService(t) blobService.On("AddBlobs", mock.Anything, mock.AnythingOfType("[]blocks.Block")).Return(blobstore.PutMany) return blobService } -func getProvider(blobService network.BlobService) *provider.Provider { +func getProvider(blobService network.BlobService) provider.Provider { trackerStorage := mocktracker.NewMockStorage() return provider.NewProvider( - zerolog.Nop(), + unittest.Logger(), metrics.NewNoopCollector(), execution_data.DefaultSerializer, blobService, @@ -54,44 +67,14 @@ func getProvider(blobService network.BlobService) *provider.Provider { ) } -func generateChunkExecutionData(t *testing.T, minSerializedSize uint64) *execution_data.ChunkExecutionData { - ced := &execution_data.ChunkExecutionData{ - TrieUpdate: testutils.TrieUpdateFixture(1, 1, 8), - } - - size := 1 - - for { - buf := &bytes.Buffer{} - require.NoError(t, execution_data.DefaultSerializer.Serialize(buf, ced)) - - if buf.Len() >= int(minSerializedSize) { - t.Logf("Chunk execution data size: %d", buf.Len()) - return ced - } +func generateBlockExecutionData(numChunks int, minSerializedSizePerChunk int) *execution_data.BlockExecutionData { + suite := fixtures.NewGeneratorSuite() - v := make([]byte, size) - _, _ = rand.Read(v) - - k, err := ced.TrieUpdate.Payloads[0].Key() - require.NoError(t, err) + cedGen := suite.ChunkExecutionDatas() + chunkExecutionData := cedGen.List(numChunks, cedGen.WithMinSize(minSerializedSizePerChunk)) - ced.TrieUpdate.Payloads[0] = ledger.NewPayload(k, v) - size *= 2 - } -} - -func generateBlockExecutionData(t *testing.T, numChunks int, minSerializedSizePerChunk uint64) *execution_data.BlockExecutionData { - bed := &execution_data.BlockExecutionData{ - BlockID: unittest.IdentifierFixture(), - ChunkExecutionDatas: make([]*execution_data.ChunkExecutionData, numChunks), - } - - for i := 0; i < numChunks; i++ { - bed.ChunkExecutionDatas[i] = generateChunkExecutionData(t, minSerializedSizePerChunk) - } - - return bed + bedGen := suite.BlockExecutionDatas() + return bedGen.Fixture(bedGen.WithChunkExecutionDatas(chunkExecutionData...)) } func deepEqual(t *testing.T, expected, actual *execution_data.BlockExecutionData) { @@ -111,16 +94,20 @@ func TestHappyPath(t *testing.T) { t.Parallel() ds := getDatastore() - provider := getProvider(getBlobservice(ds)) + provider := getProvider(getBlobservice(t, ds)) store := getExecutionDataStore(ds) - test := func(numChunks int, minSerializedSizePerChunk uint64) { - expected := generateBlockExecutionData(t, numChunks, minSerializedSizePerChunk) - executionDataID, err := provider.Provide(context.Background(), 0, expected) + test := func(numChunks int, minSerializedSizePerChunk int) { + expected := generateBlockExecutionData(numChunks, minSerializedSizePerChunk) + executionDataID, executionDataRoot, err := provider.Provide(context.Background(), 0, expected) require.NoError(t, err) - actual, err := store.GetExecutionData(context.Background(), executionDataID) + + actual, err := store.Get(context.Background(), executionDataID) require.NoError(t, err) deepEqual(t, expected, actual) + + assert.Equal(t, expected.BlockID, executionDataRoot.BlockID) + assert.Len(t, executionDataRoot.ChunkExecutionDataIDs, numChunks) } test(1, 0) // small execution data (single level blob tree) @@ -130,13 +117,13 @@ func TestHappyPath(t *testing.T) { func TestProvideContextCanceled(t *testing.T) { t.Parallel() - bed := generateBlockExecutionData(t, 5, 5*execution_data.DefaultMaxBlobSize) + bed := generateBlockExecutionData(5, 5*execution_data.DefaultMaxBlobSize) - provider := getProvider(getBlobservice(getDatastore())) - _, err := provider.Provide(context.Background(), 0, bed) + provider := getProvider(getBlobservice(t, getDatastore())) + _, _, err := provider.Provide(context.Background(), 0, bed) require.NoError(t, err) - blobService := new(mocknetwork.BlobService) + blobService := mocknetwork.NewBlobService(t) blobService.On("AddBlobs", mock.Anything, mock.AnythingOfType("[]blocks.Block")). Return(func(ctx context.Context, blobs []blobs.Blob) error { <-ctx.Done() @@ -145,6 +132,189 @@ func TestProvideContextCanceled(t *testing.T) { provider = getProvider(blobService) ctx, cancel := context.WithTimeout(context.Background(), time.Second) defer cancel() - _, err = provider.Provide(ctx, 0, bed) + _, _, err = provider.Provide(ctx, 0, bed) assert.ErrorIs(t, err, ctx.Err()) } + +// TestGenerateExecutionDataRoot tests that GenerateExecutionDataRoot produces the same execution data ID and root +// as the Provide method. +// This ensures we can use the GenerateExecutionDataRoot method during testing to generate the correct data. +func TestGenerateExecutionDataRoot(t *testing.T) { + t.Parallel() + + bed := generateBlockExecutionData(5, 5*execution_data.DefaultMaxBlobSize) + + testProvider := getProvider(getBlobservice(t, getDatastore())) + expectedExecutionDataID, expectedExecutionDataRoot, err := testProvider.Provide(context.Background(), 0, bed) + require.NoError(t, err) + + cidProvider := provider.NewExecutionDataCIDProvider(execution_data.DefaultSerializer) + actualExecutionDataID, actualExecutionDataRoot, err := cidProvider.GenerateExecutionDataRoot(bed) + require.NoError(t, err) + + assert.Equal(t, expectedExecutionDataID, actualExecutionDataID) + assert.Equal(t, expectedExecutionDataRoot, actualExecutionDataRoot) +} + +// TestCalculateExecutionDataRootID tests that CalculateExecutionDataRootID calculates the correct +// ID given a static BlockExecutionDataRoot. This is used to ensure library updates or modification +// to the provider do not change the ID calculation logic. +// +// CAUTION: Unintentional changes may cause execution forks! +// Only modify this test if the hash calculation is expected to change. +func TestCalculateExecutionDataRootID(t *testing.T) { + t.Parallel() + + // ONLY modify this hash if it was expected to change. Unintentional changes may cause execution forks! + expected := flow.MustHexStringToIdentifier("ae80bb200545de7ff009d2f3e20970643198be635a9b90fffb9da1198a988deb") + + edRoot := flow.BlockExecutionDataRoot{ + BlockID: flow.MustHexStringToIdentifier("2b31c5e26b999a41d18dc62584ac68476742b071fc9412d68be9e516e1dfc79e"), + ChunkExecutionDataIDs: []cid.Cid{ + cid.MustParse("QmcA2h3jARWXkCc9VvpR4jvt9cNc7RdiqSMvPJ1TU69Xvw"), + cid.MustParse("QmQN81Y7KdHWNdsLthDxtdf2dCHLb3ddjDWmDZQ4Znqfs4"), + cid.MustParse("QmcfMmNPa8jFN64t1Hu7Afk7Trx8a6dg7gZfEEUqzC827b"), + cid.MustParse("QmYTooZGux6epKrdHbzgubUN4JFHkLK9hw6Z6F3fAMEDH5"), + cid.MustParse("QmXxYakkZKZEoCVdLLzVisctMxyiWQSfassMMzvCdaCjAj"), + }, + } + + cidProvider := provider.NewExecutionDataCIDProvider(execution_data.DefaultSerializer) + actual, err := cidProvider.CalculateExecutionDataRootID(edRoot) + require.NoError(t, err) + + assert.Equal(t, expected, actual) +} + +// TestCalculateChunkExecutionDataID tests that CalculateChunkExecutionDataID calculates the correct +// ID given a static ChunkExecutionData. This is used to ensure library updates or modification to +// the provider do not change the ID calculation logic. +// +// CAUTION: Unintentional changes may cause execution forks! +// Only modify this test if the hash calculation is expected to change. +func TestCalculateChunkExecutionDataID(t *testing.T) { + t.Parallel() + + rootHash, err := ledger.ToRootHash([]byte("0123456789acbdef0123456789acbdef")) + require.NoError(t, err) + + // ONLY modify this hash if it was expected to change. Unintentional changes may cause execution forks! + expected := cid.MustParse("QmSZ4sMzj8Be7kkZekjHKppmx2os87oAHV87WFUgZTMrWf") + + ced := execution_data.ChunkExecutionData{ + Collection: &flow.Collection{ + Transactions: []*flow.TransactionBody{ + {Script: []byte("access(all) fun main() {}")}, + }, + }, + Events: []flow.Event{ + unittest.EventFixture( + unittest.Event.WithEventType("A.0123456789abcdef.SomeContract.SomeEvent"), + unittest.Event.WithTransactionIndex(1), + unittest.Event.WithEventIndex(2), + unittest.Event.WithTransactionID(flow.MustHexStringToIdentifier("95e0929839063afbe334a3d175bea0775cdf5d93f64299e369d16ce21aa423d3")), + // do not care about Payload + unittest.Event.WithPayload([]byte{}), + ), + }, + TrieUpdate: &ledger.TrieUpdate{ + RootHash: rootHash, + }, + TransactionResults: []flow.LightTransactionResult{ + { + TransactionID: flow.MustHexStringToIdentifier("0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef"), + ComputationUsed: 100, + Failed: true, + }, + }, + } + + cidProvider := provider.NewExecutionDataCIDProvider(execution_data.DefaultSerializer) + actual, err := cidProvider.CalculateChunkExecutionDataID(ced) + require.NoError(t, err) + + assert.Equal(t, + expected, actual, + "expected and actual CID do not match: expected %s, actual %s", + expected, + actual, + ) +} + +// TestCalculateExecutionDataLifecycle tests that the execution data is reproduced correctly +// at different stages of the lifecycle. This ensures that the data remains consistent, and +// the hashing logic is correct. +// +// CAUTION: Unintentional changes may cause execution forks! +func TestCalculateExecutionDataLifecycle(t *testing.T) { + t.Parallel() + + ctx := context.Background() + + bed, bedRoot := canonicalBlockExecutionData(t) + + unittest.RunWithTempDir(t, func(dbDir string) { + pebbleDir := filepath.Join(dbDir, "pebble") + + t.Run("pebble provider generates correct ID", func(t *testing.T) { + dsManager, err := edstorage.NewPebbleDatastoreManager(unittest.Logger(), pebbleDir, nil) + require.NoError(t, err) + defer dsManager.Close() + + provider := getProvider(getBlobservice(t, dsManager.Datastore())) + executionDataID, executionDataRoot, err := provider.Provide(ctx, 0, bed) + require.NoError(t, err) + + assert.Equal(t, canonicalExecutionDataID, executionDataID) + assert.Equal(t, bedRoot, executionDataRoot) + }) + + t.Run("pebble provider retrieves correct execution data", func(t *testing.T) { + dsManager, err := edstorage.NewPebbleDatastoreManager(unittest.Logger(), pebbleDir, nil) + require.NoError(t, err) + defer dsManager.Close() + + bs := blobs.NewBlobstore(dsManager.Datastore()) + bs.HashOnRead(true) // ensure data read from db matches expected hash + executionDataStore := execution_data.NewExecutionDataStore(bs, execution_data.DefaultSerializer) + + executionData, err := executionDataStore.Get(ctx, canonicalExecutionDataID) + require.NoError(t, err) + + deepEqual(t, bed, executionData) + }) + }) + + // test that the data is unchanged after protobuf conversions + t.Run("grpc proto messages", func(t *testing.T) { + protoMsg, err := convert.BlockExecutionDataToMessage(bed) + require.NoError(t, err) + + executionData, err := convert.MessageToBlockExecutionData(protoMsg, flow.Emulator.Chain()) + require.NoError(t, err) + + deepEqual(t, bed, executionData) + }) +} + +// canonicalBlockExecutionData returns a block execution data fixture generated using a static random seed. +// this ensures it produces the same data on every run, allowing for deterministic testing of output hashes. +func canonicalBlockExecutionData(t *testing.T) (*execution_data.BlockExecutionData, *flow.BlockExecutionDataRoot) { + suite := fixtures.NewGeneratorSuite(fixtures.WithSeed(canonicalGeneratorSeed)) + + bedGen := suite.BlockExecutionDatas() + cedGen := suite.ChunkExecutionDatas() + executionData := bedGen.Fixture(bedGen.WithChunkExecutionDatas(cedGen.List(4)...)) + + // use in-memory provider to generate the ExecutionDataRoot and ExecutionDataID + prov := getProvider(getBlobservice(t, getDatastore())) + executionDataID, executionDataRoot, err := prov.Provide(context.Background(), 0, executionData) + require.NoError(t, err) + + // ensure the generated execution data matches the expected canonical value + // if this fails, then something has change in either the execution data generation or the + // generator suite. + require.Equal(t, canonicalExecutionDataID, executionDataID) + + return executionData, executionDataRoot +} diff --git a/module/executiondatasync/pruner/pruner.go b/module/executiondatasync/pruner/pruner.go index 5002f2878d2..99cbe565d83 100644 --- a/module/executiondatasync/pruner/pruner.go +++ b/module/executiondatasync/pruner/pruner.go @@ -2,21 +2,30 @@ package pruner import ( "context" + "errors" "fmt" + "math" "time" "github.com/rs/zerolog" + "go.uber.org/atomic" "github.com/onflow/flow-go/module" "github.com/onflow/flow-go/module/component" + "github.com/onflow/flow-go/module/executiondatasync/execution_data" "github.com/onflow/flow-go/module/executiondatasync/tracker" "github.com/onflow/flow-go/module/irrecoverable" - "github.com/onflow/flow-go/module/util" ) +// ErrNoRegisteredHeightRecorders represents an error indicating that pruner did not register any execution data height recorders. +// This error occurs when the pruner attempts to perform operations that require +// at least one registered height recorder, but none are found. +var ErrNoRegisteredHeightRecorders = errors.New("no registered height recorders") + const ( - defaultHeightRangeTarget = uint64(400_000) - defaultThreshold = uint64(100_000) + DefaultHeightRangeTarget = uint64(2_000_000) + DefaultThreshold = uint64(100_000) + DefaultPruningInterval = 10 * time.Minute ) // Pruner is a component responsible for pruning data from @@ -39,29 +48,29 @@ type Pruner struct { storage tracker.Storage pruneCallback func(ctx context.Context) error - // channels used to send new fulfilled heights and config changes to the worker thread - fulfilledHeights chan uint64 - thresholdChan chan uint64 - heightRangeTargetChan chan uint64 - lastFulfilledHeight uint64 lastPrunedHeight uint64 // the height range is the range of heights between the last pruned and last fulfilled // heightRangeTarget is the target minimum value for this range, so that after pruning // the height range is equal to the target. - heightRangeTarget uint64 + heightRangeTarget *atomic.Uint64 // threshold defines the maximum height range and how frequently pruning is performed. // once the height range reaches `heightRangeTarget+threshold`, `threshold` many blocks // are pruned - threshold uint64 + threshold *atomic.Uint64 + + // pruningInterval how frequently pruning can be performed + pruningInterval time.Duration logger zerolog.Logger metrics module.ExecutionDataPrunerMetrics component.Component cm *component.ComponentManager + + registeredHeightRecorders []execution_data.ProcessedHeightRecorder } type PrunerOption func(*Pruner) @@ -70,23 +79,31 @@ type PrunerOption func(*Pruner) // height range target. func WithHeightRangeTarget(heightRangeTarget uint64) PrunerOption { return func(p *Pruner) { - p.heightRangeTarget = heightRangeTarget + p.heightRangeTarget.Store(heightRangeTarget) } } // WithThreshold is used to configure the pruner with a custom threshold. func WithThreshold(threshold uint64) PrunerOption { return func(p *Pruner) { - p.threshold = threshold + p.threshold.Store(threshold) } } +// WithPruneCallback sets a custom callback function to be called after pruning. func WithPruneCallback(callback func(context.Context) error) PrunerOption { return func(p *Pruner) { p.pruneCallback = callback } } +// WithPruningInterval is used to configure the pruner with a custom pruning interval. +func WithPruningInterval(interval time.Duration) PrunerOption { + return func(p *Pruner) { + p.pruningInterval = interval + } +} + // NewPruner creates a new Pruner. func NewPruner(logger zerolog.Logger, metrics module.ExecutionDataPrunerMetrics, storage tracker.Storage, opts ...PrunerOption) (*Pruner, error) { lastPrunedHeight, err := storage.GetPrunedHeight() @@ -99,21 +116,16 @@ func NewPruner(logger zerolog.Logger, metrics module.ExecutionDataPrunerMetrics, return nil, fmt.Errorf("failed to get fulfilled height: %w", err) } - fulfilledHeights := make(chan uint64, 32) - fulfilledHeights <- fulfilledHeight - p := &Pruner{ - logger: logger.With().Str("component", "execution_data_pruner").Logger(), - storage: storage, - pruneCallback: func(ctx context.Context) error { return nil }, - fulfilledHeights: fulfilledHeights, - thresholdChan: make(chan uint64), - heightRangeTargetChan: make(chan uint64), - lastFulfilledHeight: fulfilledHeight, - lastPrunedHeight: lastPrunedHeight, - heightRangeTarget: defaultHeightRangeTarget, - threshold: defaultThreshold, - metrics: metrics, + logger: logger.With().Str("component", "execution_data_pruner").Logger(), + storage: storage, + pruneCallback: func(ctx context.Context) error { return nil }, + lastFulfilledHeight: fulfilledHeight, + lastPrunedHeight: lastPrunedHeight, + heightRangeTarget: atomic.NewUint64(DefaultHeightRangeTarget), + threshold: atomic.NewUint64(DefaultThreshold), + metrics: metrics, + pruningInterval: DefaultPruningInterval, } p.cm = component.NewComponentManagerBuilder(). AddWorker(p.loop). @@ -127,66 +139,98 @@ func NewPruner(logger zerolog.Logger, metrics module.ExecutionDataPrunerMetrics, return p, nil } -// NotifyFulfilledHeight notifies the Pruner of the latest fulfilled height. -func (p *Pruner) NotifyFulfilledHeight(height uint64) { - if util.CheckClosed(p.cm.ShutdownSignal()) { - return - } - - select { - case p.fulfilledHeights <- height: - default: - } - +// RegisterHeightRecorder registers an execution data height recorder with the Pruner. +// +// Parameters: +// - recorder: The execution data height recorder to register. +func (p *Pruner) RegisterHeightRecorder(recorder execution_data.ProcessedHeightRecorder) { + p.registeredHeightRecorders = append(p.registeredHeightRecorders, recorder) } // SetHeightRangeTarget updates the Pruner's height range target. -// This may block for the duration of a pruning operation. -func (p *Pruner) SetHeightRangeTarget(heightRangeTarget uint64) error { - select { - case p.heightRangeTargetChan <- heightRangeTarget: - return nil - case <-p.cm.ShutdownSignal(): - return component.ErrComponentShutdown - } +func (p *Pruner) SetHeightRangeTarget(heightRangeTarget uint64) { + p.heightRangeTarget.Store(heightRangeTarget) } // SetThreshold update's the Pruner's threshold. -// This may block for the duration of a pruning operation. -func (p *Pruner) SetThreshold(threshold uint64) error { - select { - case p.thresholdChan <- threshold: - return nil - case <-p.cm.ShutdownSignal(): - return component.ErrComponentShutdown - } +func (p *Pruner) SetThreshold(threshold uint64) { + p.threshold.Store(threshold) } +// loop is the main worker for the Pruner, responsible for triggering +// pruning operations at regular intervals. It monitors the heights +// of registered height recorders and checks if pruning is necessary. func (p *Pruner) loop(ctx irrecoverable.SignalerContext, ready component.ReadyFunc) { ready() + ticker := time.NewTicker(p.pruningInterval) + defer ticker.Stop() for { select { case <-ctx.Done(): return - case height := <-p.fulfilledHeights: - if height > p.lastFulfilledHeight { - p.lastFulfilledHeight = height + case <-ticker.C: + lowestHeight, err := p.lowestRecordersHeight() + if err == nil || !errors.Is(err, ErrNoRegisteredHeightRecorders) { + err := p.updateFulfilledHeight(lowestHeight) + if err != nil { + ctx.Throw(fmt.Errorf("failed to update lowest fulfilled height: %w", err)) + } } p.checkPrune(ctx) - case heightRangeTarget := <-p.heightRangeTargetChan: - p.heightRangeTarget = heightRangeTarget - p.checkPrune(ctx) - case threshold := <-p.thresholdChan: - p.threshold = threshold - p.checkPrune(ctx) } } } +// lowestRecordersHeight returns the lowest height among all registered +// height recorders. +// +// This function iterates over all registered height recorders to determine +// the smallest of complete height recorded. If no height recorders are registered, it +// returns an error. +// +// Expected errors during normal operation: +// - ErrNoRegisteredHeightRecorders: if no height recorders are registered. +func (p *Pruner) lowestRecordersHeight() (uint64, error) { + if len(p.registeredHeightRecorders) == 0 { + return 0, ErrNoRegisteredHeightRecorders + } + + lowestHeight := uint64(math.MaxUint64) + for _, recorder := range p.registeredHeightRecorders { + height := recorder.HighestCompleteHeight() + if height < lowestHeight { + lowestHeight = height + } + } + return lowestHeight, nil +} + +// updateFulfilledHeight updates the last fulfilled height and stores it in the storage. +// +// Parameters: +// - height: The new fulfilled height. +// +// No errors are expected during normal operations. +func (p *Pruner) updateFulfilledHeight(height uint64) error { + if height > p.lastFulfilledHeight { + p.lastFulfilledHeight = height + return p.storage.SetFulfilledHeight(height) + } + return nil +} + +// checkPrune checks if pruning should be performed based on the height range +// and triggers the pruning operation if necessary. +// +// Parameters: +// - ctx: The context for managing the pruning operation. func (p *Pruner) checkPrune(ctx irrecoverable.SignalerContext) { - if p.lastFulfilledHeight > p.heightRangeTarget+p.threshold+p.lastPrunedHeight { - pruneHeight := p.lastFulfilledHeight - p.heightRangeTarget + threshold := p.threshold.Load() + heightRangeTarget := p.heightRangeTarget.Load() + + if p.lastFulfilledHeight > heightRangeTarget+threshold+p.lastPrunedHeight { + pruneHeight := p.lastFulfilledHeight - heightRangeTarget p.logger.Info().Uint64("prune_height", pruneHeight).Msg("pruning storage") start := time.Now() diff --git a/module/executiondatasync/pruner/pruner_test.go b/module/executiondatasync/pruner/pruner_test.go index e0061a7b88d..42158b3b7fb 100644 --- a/module/executiondatasync/pruner/pruner_test.go +++ b/module/executiondatasync/pruner/pruner_test.go @@ -8,6 +8,7 @@ import ( "github.com/rs/zerolog" "github.com/stretchr/testify/require" + exedatamock "github.com/onflow/flow-go/module/executiondatasync/execution_data/mock" "github.com/onflow/flow-go/module/executiondatasync/pruner" mocktracker "github.com/onflow/flow-go/module/executiondatasync/tracker/mock" "github.com/onflow/flow-go/module/irrecoverable" @@ -26,10 +27,18 @@ func TestBasicPrune(t *testing.T) { trackerStorage, pruner.WithHeightRangeTarget(10), pruner.WithThreshold(5), + pruner.WithPruningInterval(10*time.Millisecond), ) require.NoError(t, err) trackerStorage.AssertExpectations(t) + downloader := new(exedatamock.Downloader) + downloader.On("HighestCompleteHeight"). + Return(uint64(16)). + Once() + + pruner.RegisterHeightRecorder(downloader) + ctx, cancel := context.WithCancel(context.Background()) signalerCtx, errChan := irrecoverable.WithSignaler(ctx) @@ -40,46 +49,8 @@ func TestBasicPrune(t *testing.T) { close(pruned) return nil }).Once() + trackerStorage.On("SetFulfilledHeight", uint64(16)).Return(nil).Maybe() - pruner.NotifyFulfilledHeight(16) - unittest.AssertClosesBefore(t, pruned, time.Second) - trackerStorage.AssertExpectations(t) - - cancel() - <-pruner.Done() - - select { - case err := <-errChan: - require.NoError(t, err) - default: - } -} - -func TestInitialPrune(t *testing.T) { - trackerStorage := new(mocktracker.Storage) - trackerStorage.On("GetFulfilledHeight").Return(uint64(20), nil).Once() - trackerStorage.On("GetPrunedHeight").Return(uint64(0), nil).Once() - - pruner, err := pruner.NewPruner( - zerolog.Nop(), - metrics.NewNoopCollector(), - trackerStorage, - pruner.WithHeightRangeTarget(10), - pruner.WithThreshold(5), - ) - require.NoError(t, err) - trackerStorage.AssertExpectations(t) - - ctx, cancel := context.WithCancel(context.Background()) - signalerCtx, errChan := irrecoverable.WithSignaler(ctx) - - pruned := make(chan struct{}) - trackerStorage.On("PruneUpToHeight", uint64(10)).Return(func(height uint64) error { - close(pruned) - return nil - }).Once() - - pruner.Start(signalerCtx) unittest.AssertClosesBefore(t, pruned, time.Second) trackerStorage.AssertExpectations(t) @@ -104,6 +75,7 @@ func TestUpdateThreshold(t *testing.T) { trackerStorage, pruner.WithHeightRangeTarget(10), pruner.WithThreshold(10), + pruner.WithPruningInterval(10*time.Millisecond), ) require.NoError(t, err) trackerStorage.AssertExpectations(t) @@ -119,7 +91,7 @@ func TestUpdateThreshold(t *testing.T) { return nil }).Once() - require.NoError(t, pruner.SetThreshold(4)) + pruner.SetThreshold(4) unittest.AssertClosesBefore(t, pruned, time.Second) trackerStorage.AssertExpectations(t) @@ -144,6 +116,7 @@ func TestUpdateHeightRangeTarget(t *testing.T) { trackerStorage, pruner.WithHeightRangeTarget(15), pruner.WithThreshold(0), + pruner.WithPruningInterval(10*time.Millisecond), ) require.NoError(t, err) trackerStorage.AssertExpectations(t) @@ -159,7 +132,7 @@ func TestUpdateHeightRangeTarget(t *testing.T) { return nil }).Once() - require.NoError(t, pruner.SetHeightRangeTarget(5)) + pruner.SetHeightRangeTarget(5) unittest.AssertClosesBefore(t, pruned, time.Second) trackerStorage.AssertExpectations(t) diff --git a/module/executiondatasync/storage/datastore_factory.go b/module/executiondatasync/storage/datastore_factory.go new file mode 100644 index 00000000000..3565c994d6a --- /dev/null +++ b/module/executiondatasync/storage/datastore_factory.go @@ -0,0 +1,36 @@ +package storage + +import ( + "fmt" + "os" + "path/filepath" + + "github.com/rs/zerolog" +) + +// CreateDatastoreManager creates a new datastore manager of the specified type. +// It supports both Badger and Pebble datastores. +func CreateDatastoreManager( + logger zerolog.Logger, + executionDataDir string, +) (DatastoreManager, error) { + + // create the datastore directory if it does not exist + datastoreDir := filepath.Join(executionDataDir, "blobstore") + err := os.MkdirAll(datastoreDir, 0700) + if err != nil { + return nil, err + } + + // create the appropriate datastore manager based on the DB mode + var executionDatastoreManager DatastoreManager + logger.Info().Msgf("Using Pebble datastore for execution data at %s", datastoreDir) + executionDatastoreManager, err = NewPebbleDatastoreManager( + logger.With().Str("pebbledb", "endata").Logger(), + datastoreDir, nil) + if err != nil { + return nil, fmt.Errorf("could not create PebbleDatastoreManager for execution data: %w", err) + } + + return executionDatastoreManager, nil +} diff --git a/module/executiondatasync/storage/datastore_manager.go b/module/executiondatasync/storage/datastore_manager.go new file mode 100644 index 00000000000..36677d361d5 --- /dev/null +++ b/module/executiondatasync/storage/datastore_manager.go @@ -0,0 +1,31 @@ +package storage + +import ( + "context" + + ds "github.com/ipfs/go-datastore" + + "github.com/onflow/flow-go/storage" +) + +// DatastoreManager defines the interface for managing a datastore. +// It provides methods for accessing the datastore, the underlying database, +// closing the datastore, and performing garbage collection. +type DatastoreManager interface { + // Datastore provides access to the datastore for performing batched + // read and write operations. + Datastore() ds.Batching + + // DB returns the raw database object, allowing for more direct + // access to the underlying database features and operations. + DB() storage.DB + + // Close terminates the connection to the datastore and releases + // any associated resources. This method should be called + // when finished using the datastore to ensure proper resource cleanup. + Close() error + + // CollectGarbage initiates garbage collection on the datastore + // to reclaim unused space and optimize performance. + CollectGarbage(ctx context.Context) error +} diff --git a/module/executiondatasync/storage/pebble_datastore_manager.go b/module/executiondatasync/storage/pebble_datastore_manager.go new file mode 100644 index 00000000000..d38a2845575 --- /dev/null +++ b/module/executiondatasync/storage/pebble_datastore_manager.go @@ -0,0 +1,85 @@ +package storage + +import ( + "context" + "fmt" + + "github.com/cockroachdb/pebble/v2" + ds "github.com/ipfs/go-datastore" + pebbleds "github.com/ipfs/go-ds-pebble" + "github.com/rs/zerolog" + + "github.com/onflow/flow-go/storage" + "github.com/onflow/flow-go/storage/operation/pebbleimpl" + pstorage "github.com/onflow/flow-go/storage/pebble" +) + +var _ DatastoreManager = (*PebbleDatastoreManager)(nil) + +// PebbleDatastoreManager wraps the PebbleDB to implement the StorageDB interface. +type PebbleDatastoreManager struct { + ds *pebbleds.Datastore + db *pebble.DB +} + +// NewPebbleDatastoreManager creates and returns a new instance of PebbleDatastoreManager. +// It initializes the PebbleDB database with the specified path and options. +// If no options are provided, default options are used. +// +// Parameters: +// - path: The path to the directory where the PebbleDB files will be stored. +// - options: Configuration options for the PebbleDB database. If nil, default +// options are applied. +// +// No errors are expected during normal operations. +func NewPebbleDatastoreManager(logger zerolog.Logger, path string, options *pebble.Options) (*PebbleDatastoreManager, error) { + if options == nil { + cache := pebble.NewCache(pstorage.DefaultPebbleCacheSize) + defer cache.Unref() + options = pstorage.DefaultPebbleOptions(logger, cache, pebble.DefaultComparer) + } + + db, err := pebble.Open(path, options) + if err != nil { + return nil, fmt.Errorf("failed to open db: %w", err) + } + + ds, err := pebbleds.NewDatastore(path, + pebbleds.WithPebbleDB(db), + pebbleds.WithPebbleWriteOptions(pebble.Sync), + ) + if err != nil { + return nil, fmt.Errorf("could not open tracker ds: %w", err) + } + + return &PebbleDatastoreManager{ + ds: ds, + db: db, + }, nil +} + +// Datastore provides access to the datastore for performing batched +// read and write operations. +func (p *PebbleDatastoreManager) Datastore() ds.Batching { + return p.ds +} + +// DB returns the raw database object, allowing for more direct +// access to the underlying database features and operations. +func (p *PebbleDatastoreManager) DB() storage.DB { + return pebbleimpl.ToDB(p.db) +} + +// Close terminates the connection to the datastore and releases +// any associated resources. This method should be called +// when finished using the datastore to ensure proper resource cleanup. +func (p *PebbleDatastoreManager) Close() error { + return p.ds.Close() +} + +// CollectGarbage initiates garbage collection on the datastore +// to reclaim unused space and optimize performance. +func (p *PebbleDatastoreManager) CollectGarbage(_ context.Context) error { + // In PebbleDB, there's no direct equivalent to manual value log garbage collection + return nil +} diff --git a/module/executiondatasync/tracker/mock/storage.go b/module/executiondatasync/tracker/mock/storage.go index 6eef7092ffd..5a56946c0d1 100644 --- a/module/executiondatasync/tracker/mock/storage.go +++ b/module/executiondatasync/tracker/mock/storage.go @@ -1,6 +1,6 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. -package mocktracker +package mock import ( tracker "github.com/onflow/flow-go/module/executiondatasync/tracker" @@ -12,10 +12,14 @@ type Storage struct { mock.Mock } -// GetFulfilledHeight provides a mock function with given fields: +// GetFulfilledHeight provides a mock function with no fields func (_m *Storage) GetFulfilledHeight() (uint64, error) { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for GetFulfilledHeight") + } + var r0 uint64 var r1 error if rf, ok := ret.Get(0).(func() (uint64, error)); ok { @@ -36,10 +40,14 @@ func (_m *Storage) GetFulfilledHeight() (uint64, error) { return r0, r1 } -// GetPrunedHeight provides a mock function with given fields: +// GetPrunedHeight provides a mock function with no fields func (_m *Storage) GetPrunedHeight() (uint64, error) { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for GetPrunedHeight") + } + var r0 uint64 var r1 error if rf, ok := ret.Get(0).(func() (uint64, error)); ok { @@ -64,6 +72,10 @@ func (_m *Storage) GetPrunedHeight() (uint64, error) { func (_m *Storage) PruneUpToHeight(height uint64) error { ret := _m.Called(height) + if len(ret) == 0 { + panic("no return value specified for PruneUpToHeight") + } + var r0 error if rf, ok := ret.Get(0).(func(uint64) error); ok { r0 = rf(height) @@ -78,6 +90,10 @@ func (_m *Storage) PruneUpToHeight(height uint64) error { func (_m *Storage) SetFulfilledHeight(height uint64) error { ret := _m.Called(height) + if len(ret) == 0 { + panic("no return value specified for SetFulfilledHeight") + } + var r0 error if rf, ok := ret.Get(0).(func(uint64) error); ok { r0 = rf(height) @@ -92,6 +108,10 @@ func (_m *Storage) SetFulfilledHeight(height uint64) error { func (_m *Storage) Update(_a0 tracker.UpdateFn) error { ret := _m.Called(_a0) + if len(ret) == 0 { + panic("no return value specified for Update") + } + var r0 error if rf, ok := ret.Get(0).(func(tracker.UpdateFn) error); ok { r0 = rf(_a0) @@ -102,13 +122,12 @@ func (_m *Storage) Update(_a0 tracker.UpdateFn) error { return r0 } -type mockConstructorTestingTNewStorage interface { +// NewStorage creates a new instance of Storage. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewStorage(t interface { mock.TestingT Cleanup(func()) -} - -// NewStorage creates a new instance of Storage. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewStorage(t mockConstructorTestingTNewStorage) *Storage { +}) *Storage { mock := &Storage{} mock.Mock.Test(t) diff --git a/module/executiondatasync/tracker/mock/util.go b/module/executiondatasync/tracker/mock/util.go index 485ed1f13ee..63c3a810d55 100644 --- a/module/executiondatasync/tracker/mock/util.go +++ b/module/executiondatasync/tracker/mock/util.go @@ -1,4 +1,4 @@ -package mocktracker +package mock import ( "github.com/ipfs/go-cid" diff --git a/module/executiondatasync/tracker/noop.go b/module/executiondatasync/tracker/noop.go new file mode 100644 index 00000000000..552c1cbf2ca --- /dev/null +++ b/module/executiondatasync/tracker/noop.go @@ -0,0 +1,29 @@ +package tracker + +import "github.com/ipfs/go-cid" + +type NoopStorage struct{} + +var _ Storage = (*NoopStorage)(nil) + +func (s *NoopStorage) Update(update UpdateFn) error { + return update(func(blockHeight uint64, cids ...cid.Cid) error { + return nil + }) +} + +func (s *NoopStorage) GetFulfilledHeight() (uint64, error) { + return 0, nil +} + +func (s *NoopStorage) SetFulfilledHeight(uint64) error { + return nil +} + +func (s *NoopStorage) GetPrunedHeight() (uint64, error) { + return 0, nil +} + +func (s *NoopStorage) PruneUpToHeight(height uint64) error { + return nil +} diff --git a/module/executiondatasync/tracker/storage.go b/module/executiondatasync/tracker/storage.go index ad8ab613c5e..c7677f79ca7 100644 --- a/module/executiondatasync/tracker/storage.go +++ b/module/executiondatasync/tracker/storage.go @@ -52,13 +52,13 @@ const blobRecordKeyLength = 1 + 8 + blobs.CidLength func makeBlobRecordKey(blockHeight uint64, c cid.Cid) []byte { blobRecordKey := make([]byte, blobRecordKeyLength) blobRecordKey[0] = prefixBlobRecord - binary.LittleEndian.PutUint64(blobRecordKey[1:], blockHeight) + binary.BigEndian.PutUint64(blobRecordKey[1:], blockHeight) copy(blobRecordKey[1+8:], c.Bytes()) return blobRecordKey } func parseBlobRecordKey(key []byte) (uint64, cid.Cid, error) { - blockHeight := binary.LittleEndian.Uint64(key[1:]) + blockHeight := binary.BigEndian.Uint64(key[1:]) c, err := cid.Cast(key[1+8:]) return blockHeight, c, err } @@ -74,7 +74,7 @@ func makeLatestHeightKey(c cid.Cid) []byte { func makeUint64Value(v uint64) []byte { value := make([]byte, 8) - binary.LittleEndian.PutUint64(value, v) + binary.BigEndian.PutUint64(value, v) return value } @@ -84,7 +84,7 @@ func getUint64Value(item *badger.Item) (uint64, error) { return 0, err } - return binary.LittleEndian.Uint64(value), nil + return binary.BigEndian.Uint64(value), nil } // getBatchItemCountLimit returns the maximum number of items that can be included in a single batch @@ -189,6 +189,7 @@ func WithPruneCallback(callback PruneCallback) StorageOption { } func OpenStorage(dbPath string, startHeight uint64, logger zerolog.Logger, opts ...StorageOption) (*storage, error) { + lg := logger.With().Str("module", "tracker_storage").Logger() db, err := badger.Open(badger.LSMOnlyOptions(dbPath)) if err != nil { return nil, fmt.Errorf("could not open tracker db: %w", err) @@ -197,17 +198,21 @@ func OpenStorage(dbPath string, startHeight uint64, logger zerolog.Logger, opts storage := &storage{ db: db, pruneCallback: func(c cid.Cid) error { return nil }, - logger: logger.With().Str("module", "tracker_storage").Logger(), + logger: lg, } for _, opt := range opts { opt(storage) } + lg.Info().Msgf("initialize storage with start height: %d", startHeight) + if err := storage.init(startHeight); err != nil { return nil, fmt.Errorf("failed to initialize storage: %w", err) } + lg.Info().Msgf("storage initialized") + return storage, nil } @@ -224,10 +229,12 @@ func (s *storage) init(startHeight uint64) error { ) } + s.logger.Info().Msgf("prune from height %v up to height %d", fulfilledHeight, prunedHeight) // replay pruning in case it was interrupted during previous shutdown if err := s.PruneUpToHeight(prunedHeight); err != nil { return fmt.Errorf("failed to replay pruning: %w", err) } + s.logger.Info().Msgf("finished pruning") } else if errors.Is(fulfilledHeightErr, badger.ErrKeyNotFound) && errors.Is(prunedHeightErr, badger.ErrKeyNotFound) { // db is empty, we need to bootstrap it if err := s.bootstrap(startHeight); err != nil { diff --git a/module/executiondatasync/tracker/storage_test.go b/module/executiondatasync/tracker/storage_test.go index b0078065642..76c7b613ab6 100644 --- a/module/executiondatasync/tracker/storage_test.go +++ b/module/executiondatasync/tracker/storage_test.go @@ -120,3 +120,66 @@ func TestPruneNonLatestHeight(t *testing.T) { }) require.NoError(t, err) } + +// TestAscendingOrderOfRecords tests that order of data is ascending and all CIDs appearing at or below the pruned +// height, and their associated tracking data, should be removed from the database. +func TestAscendingOrderOfRecords(t *testing.T) { + expectedPrunedCIDs := make(map[cid.Cid]struct{}) + storageDir := t.TempDir() + storage, err := OpenStorage(storageDir, 0, zerolog.Nop(), WithPruneCallback(func(c cid.Cid) error { + _, ok := expectedPrunedCIDs[c] + assert.True(t, ok, "unexpected CID pruned: %s", c.String()) + delete(expectedPrunedCIDs, c) + return nil + })) + require.NoError(t, err) + + // c1 is for height 1, + // c2 is for height 2, + // c3 is for height 256 + // pruning up to height 1 will check if order of the records is ascending, c1 should be pruned + c1 := randomCid() + expectedPrunedCIDs[c1] = struct{}{} + c2 := randomCid() + c3 := randomCid() + + require.NoError(t, storage.Update(func(tbf TrackBlobsFn) error { + require.NoError(t, tbf(1, c1)) + require.NoError(t, tbf(2, c2)) + // It is important to check if the record with height 256 does not precede + // the record with height 1 during pruning. + require.NoError(t, tbf(256, c3)) + + return nil + })) + require.NoError(t, storage.PruneUpToHeight(1)) + + prunedHeight, err := storage.GetPrunedHeight() + require.NoError(t, err) + assert.Equal(t, uint64(1), prunedHeight) + + assert.Len(t, expectedPrunedCIDs, 0) + + err = storage.db.View(func(txn *badger.Txn) error { + // expected that blob record with height 1 was removed + _, err := txn.Get(makeBlobRecordKey(1, c1)) + assert.ErrorIs(t, err, badger.ErrKeyNotFound) + _, err = txn.Get(makeLatestHeightKey(c1)) + assert.ErrorIs(t, err, badger.ErrKeyNotFound) + + // expected that blob record with height 2 exists + _, err = txn.Get(makeBlobRecordKey(2, c2)) + assert.NoError(t, err) + _, err = txn.Get(makeLatestHeightKey(c2)) + assert.NoError(t, err) + + // expected that blob record with height 256 exists + _, err = txn.Get(makeBlobRecordKey(256, c3)) + assert.NoError(t, err) + _, err = txn.Get(makeLatestHeightKey(c3)) + assert.NoError(t, err) + + return nil + }) + require.NoError(t, err) +} diff --git a/module/finalizedreader/finalizedreader.go b/module/finalizedreader/finalizedreader.go new file mode 100644 index 00000000000..601badcf276 --- /dev/null +++ b/module/finalizedreader/finalizedreader.go @@ -0,0 +1,49 @@ +package finalizedreader + +import ( + "fmt" + + "go.uber.org/atomic" + + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/state/protocol" + "github.com/onflow/flow-go/state/protocol/events" + "github.com/onflow/flow-go/storage" +) + +type FinalizedReader struct { + events.Noop // all unimplemented event consumers are no-ops + lastHeight *atomic.Uint64 + headers storage.Headers +} + +var _ protocol.Consumer = (*FinalizedReader)(nil) + +func NewFinalizedReader(headers storage.Headers, lastHeight uint64) *FinalizedReader { + return &FinalizedReader{ + lastHeight: atomic.NewUint64(lastHeight), + headers: headers, + } +} + +// FinalizedBlockIDAtHeight returns the block ID of the finalized block at the given height. +// It returns storage.NotFound if the given height has not been finalized yet +// any other error returned are exceptions +func (r *FinalizedReader) FinalizedBlockIDAtHeight(height uint64) (flow.Identifier, error) { + if height > r.lastHeight.Load() { + return flow.ZeroID, fmt.Errorf("height not finalized (%v): %w", height, storage.ErrNotFound) + } + + finalizedID, err := r.headers.BlockIDByHeight(height) + if err != nil { + return flow.ZeroID, err + } + + return finalizedID, nil +} + +// BlockFinalized implements the protocol.Consumer interface, which allows FinalizedReader +// to consume finalized blocks from the protocol +func (r *FinalizedReader) BlockFinalized(h *flow.Header) { + r.lastHeight.Store(h.Height) +} diff --git a/module/finalizedreader/finalizedreader_test.go b/module/finalizedreader/finalizedreader_test.go new file mode 100644 index 00000000000..cd6bd194878 --- /dev/null +++ b/module/finalizedreader/finalizedreader_test.go @@ -0,0 +1,83 @@ +package finalizedreader + +import ( + "errors" + "testing" + + "github.com/jordanschalm/lockctx" + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/module/metrics" + "github.com/onflow/flow-go/storage" + "github.com/onflow/flow-go/storage/operation" + "github.com/onflow/flow-go/storage/operation/dbtest" + "github.com/onflow/flow-go/storage/store" + "github.com/onflow/flow-go/utils/unittest" +) + +func TestFinalizedReader(t *testing.T) { + dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { + lockManager := storage.NewTestingLockManager() + // prepare the storage.Headers instance + metrics := metrics.NewNoopCollector() + all := store.InitAll(metrics, db) + blocks := all.Blocks + headers := all.Headers + proposal := unittest.ProposalFixture() + block := proposal.Block + + // store `block` + err := unittest.WithLock(t, lockManager, storage.LockInsertBlock, func(lctx lockctx.Context) error { + return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return blocks.BatchStore(lctx, rw, proposal) + }) + }) + require.NoError(t, err) + + // index `block` as finalized + err = unittest.WithLock(t, lockManager, storage.LockFinalizeBlock, func(lctx lockctx.Context) error { + return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return operation.IndexFinalizedBlockByHeight(lctx, rw, block.Height, block.ID()) + }) + }) + require.NoError(t, err) + + // verify that `FinalizedReader` reads values from database that are not yet cached, eg. right after initialization + reader := NewFinalizedReader(headers, block.Height) + finalized, err := reader.FinalizedBlockIDAtHeight(block.Height) + require.NoError(t, err) + require.Equal(t, block.ID(), finalized) + + // verify that `FinalizedReader` returns storage.NotFound when the height is not finalized + _, err = reader.FinalizedBlockIDAtHeight(block.Height + 1) + require.Error(t, err) + require.True(t, errors.Is(err, storage.ErrNotFound), err) + + // store and finalize one more block + block2 := unittest.BlockWithParentFixture(block.ToHeader()) + err = unittest.WithLock(t, lockManager, storage.LockInsertBlock, func(lctx lockctx.Context) error { + return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return blocks.BatchStore(lctx, rw, unittest.ProposalFromBlock(block2)) + }) + }) + require.NoError(t, err) + err = unittest.WithLock(t, lockManager, storage.LockFinalizeBlock, func(lctx lockctx.Context) error { + return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return operation.IndexFinalizedBlockByHeight(lctx, rw, block2.Height, block2.ID()) + }) + }) + require.NoError(t, err) + + // We declare `block2` as via the `FinalizedReader` + reader.BlockFinalized(block2.ToHeader()) + require.Equal(t, block.ID(), finalized) + + // should be able to retrieve the block + finalized, err = reader.FinalizedBlockIDAtHeight(block2.Height) + require.NoError(t, err) + require.Equal(t, block2.ID(), finalized) + + // should noop and no panic + reader.BlockProcessable(block.ToHeader(), block2.ParentQC()) + }) +} diff --git a/module/finalizer.go b/module/finalizer.go index 274ef9b853a..8f6a1120f02 100644 --- a/module/finalizer.go +++ b/module/finalizer.go @@ -1,5 +1,3 @@ -// (c) 2019 Dapper Labs - ALL RIGHTS RESERVED - package module import ( diff --git a/module/finalizer/collection/finalizer.go b/module/finalizer/collection/finalizer.go index bfe1d76ae4f..d67b844ef74 100644 --- a/module/finalizer/collection/finalizer.go +++ b/module/finalizer/collection/finalizer.go @@ -3,16 +3,16 @@ package collection import ( "fmt" - "github.com/dgraph-io/badger/v2" + "github.com/jordanschalm/lockctx" + "github.com/onflow/flow-go/engine/collection" "github.com/onflow/flow-go/model/cluster" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/model/messages" "github.com/onflow/flow-go/module" "github.com/onflow/flow-go/module/mempool" - "github.com/onflow/flow-go/network" - "github.com/onflow/flow-go/storage/badger/operation" - "github.com/onflow/flow-go/storage/badger/procedure" + "github.com/onflow/flow-go/storage" + "github.com/onflow/flow-go/storage/operation" ) // Finalizer is a simple wrapper around our temporary state to clean up after a @@ -20,23 +20,26 @@ import ( // finalized collection from the mempool and updating the finalized boundary in // the cluster state. type Finalizer struct { - db *badger.DB + db storage.DB + lockManager lockctx.Manager transactions mempool.Transactions - prov network.Engine + pusher collection.GuaranteedCollectionPublisher metrics module.CollectionMetrics } // NewFinalizer creates a new finalizer for collection nodes. func NewFinalizer( - db *badger.DB, + db storage.DB, + lockManager lockctx.Manager, transactions mempool.Transactions, - prov network.Engine, + pusher collection.GuaranteedCollectionPublisher, metrics module.CollectionMetrics, ) *Finalizer { f := &Finalizer{ db: db, + lockManager: lockManager, transactions: transactions, - prov: prov, + pusher: pusher, metrics: metrics, } return f @@ -54,61 +57,69 @@ func NewFinalizer( // pools and persistent storage. // No errors are expected during normal operation. func (f *Finalizer) MakeFinal(blockID flow.Identifier) error { - return operation.RetryOnConflict(f.db.Update, func(tx *badger.Txn) error { + // Acquire a lock for finalizing cluster blocks + lctx := f.lockManager.NewContext() + defer lctx.Release() + if err := lctx.AcquireLock(storage.LockInsertOrFinalizeClusterBlock); err != nil { + return fmt.Errorf("could not acquire lock: %w", err) + } - // retrieve the header of the block we want to finalize - var header flow.Header - err := operation.RetrieveHeader(blockID, &header)(tx) - if err != nil { - return fmt.Errorf("could not retrieve header: %w", err) - } + reader := f.db.Reader() + // retrieve the header of the block we want to finalize + var header flow.Header + err := operation.RetrieveHeader(reader, blockID, &header) + if err != nil { + return fmt.Errorf("could not retrieve header: %w", err) + } - // retrieve the current finalized cluster state boundary - var boundary uint64 - err = operation.RetrieveClusterFinalizedHeight(header.ChainID, &boundary)(tx) - if err != nil { - return fmt.Errorf("could not retrieve boundary: %w", err) - } + // retrieve the current finalized cluster state boundary + var boundary uint64 + err = operation.RetrieveClusterFinalizedHeight(reader, header.ChainID, &boundary) + if err != nil { + return fmt.Errorf("could not retrieve boundary: %w", err) + } - // retrieve the ID of the last finalized block as marker for stopping - var headID flow.Identifier - err = operation.LookupClusterBlockHeight(header.ChainID, boundary, &headID)(tx) - if err != nil { - return fmt.Errorf("could not retrieve head: %w", err) - } + // retrieve the ID of the last finalized block as marker for stopping + var headID flow.Identifier + err = operation.LookupClusterBlockHeight(reader, header.ChainID, boundary, &headID) + if err != nil { + return fmt.Errorf("could not retrieve head: %w", err) + } - // there are no blocks to finalize, we may have already finalized - // this block - exit early - if boundary >= header.Height { - return nil - } + // there are no blocks to finalize, we may have already finalized + // this block - exit early + if boundary >= header.Height { + return nil + } - // To finalize all blocks from the currently finalized one up to and - // including the current, we first enumerate each of these blocks. - // We start at the youngest block and remember all visited blocks, - // while tracing back until we reach the finalized state - steps := []*flow.Header{&header} - parentID := header.ParentID - for parentID != headID { - var parent flow.Header - err = operation.RetrieveHeader(parentID, &parent)(tx) - if err != nil { - return fmt.Errorf("could not retrieve parent (%x): %w", parentID, err) - } - steps = append(steps, &parent) - parentID = parent.ParentID + // To finalize all blocks from the currently finalized one up to and + // including the current, we first enumerate each of these blocks. + // We start at the youngest block and remember all visited blocks, + // while tracing back until we reach the finalized state + steps := []*flow.Header{&header} + parentID := header.ParentID + for parentID != headID { + var parent flow.Header + err = operation.RetrieveHeader(reader, parentID, &parent) + if err != nil { + return fmt.Errorf("could not retrieve parent (%x): %w", parentID, err) } + steps = append(steps, &parent) + parentID = parent.ParentID + } - // now we can step backwards in order to go from oldest to youngest; for - // each header, we reconstruct the block and then apply the related - // changes to the protocol state - for i := len(steps) - 1; i >= 0; i-- { + // now we can step backwards in order to go from oldest to youngest; for + // each header, we reconstruct the block and then apply the related + // changes to the protocol state + for i := len(steps) - 1; i >= 0; i-- { + err := f.db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { clusterBlockID := steps[i].ID() // look up the transactions included in the payload step := steps[i] var payload cluster.Payload - err = procedure.RetrieveClusterPayload(clusterBlockID, &payload)(tx) + // This does not require a lock, as a block's payload once set never changes. + err = operation.RetrieveClusterPayload(rw.GlobalReader(), clusterBlockID, &payload) if err != nil { return fmt.Errorf("could not retrieve payload for cluster block (id=%x): %w", clusterBlockID, err) } @@ -121,31 +132,38 @@ func (f *Finalizer) MakeFinal(blockID flow.Identifier) error { } // finalize the block in cluster state - err = procedure.FinalizeClusterBlock(clusterBlockID)(tx) + err = operation.FinalizeClusterBlock(lctx, rw, clusterBlockID) if err != nil { return fmt.Errorf("could not finalize cluster block (id=%x): %w", clusterBlockID, err) } - block := &cluster.Block{ - Header: step, - Payload: &payload, + block, err := cluster.NewBlock( + cluster.UntrustedBlock{ + HeaderBody: step.HeaderBody, + Payload: payload, + }, + ) + if err != nil { + return fmt.Errorf("could not build cluster block: %w", err) } + f.metrics.ClusterBlockFinalized(block) // if the finalized collection is empty, we don't need to include it // in the reference height index or submit it to consensus nodes if len(payload.Collection.Transactions) == 0 { - continue + return nil } // look up the reference block height to populate index var refBlock flow.Header - err = operation.RetrieveHeader(payload.ReferenceBlockID, &refBlock)(tx) + // This does not require a lock, as a block's header once set never changes. + err = operation.RetrieveHeader(rw.GlobalReader(), payload.ReferenceBlockID, &refBlock) if err != nil { return fmt.Errorf("could not retrieve reference block (id=%x): %w", payload.ReferenceBlockID, err) } // index the finalized cluster block by reference block height - err = operation.IndexClusterBlockByReferenceHeight(refBlock.Height, clusterBlockID)(tx) + err = operation.IndexClusterBlockByReferenceHeight(lctx, rw.Writer(), refBlock.Height, clusterBlockID) if err != nil { return fmt.Errorf("could not index cluster block (id=%x) by reference height (%d): %w", clusterBlockID, refBlock.Height, err) } @@ -159,18 +177,30 @@ func (f *Finalizer) MakeFinal(blockID flow.Identifier) error { // For now, we just use the parent signers as the guarantors of this // collection. - // TODO add real signatures here (2711) - f.prov.SubmitLocal(&messages.SubmitCollectionGuarantee{ - Guarantee: flow.CollectionGuarantee{ - CollectionID: payload.Collection.ID(), - ReferenceBlockID: payload.ReferenceBlockID, - ChainID: header.ChainID, - SignerIndices: step.ParentVoterIndices, - Signature: nil, // TODO: to remove because it's not easily verifiable by consensus nodes - }, + // TODO add real signatures here (https://github.com/onflow/flow-go-internal/issues/4569) + // TODO: after adding real signature here add check for signature in NewCollectionGuarantee + guarantee, err := flow.NewCollectionGuarantee(flow.UntrustedCollectionGuarantee{ + CollectionID: payload.Collection.ID(), + ReferenceBlockID: payload.ReferenceBlockID, + ClusterChainID: header.ChainID, + SignerIndices: step.ParentVoterIndices, + Signature: nil, // TODO: to remove because it's not easily verifiable by consensus nodes }) + if err != nil { + return fmt.Errorf("could not construct guarantee: %w", err) + } + + // collections should only be pushed to consensus nodes, once they are successfully persisted as finalized: + storage.OnCommitSucceed(rw, func() { + f.pusher.SubmitCollectionGuarantee((*messages.CollectionGuarantee)(guarantee)) + }) + + return nil + }) + if err != nil { + return fmt.Errorf("could not finalize cluster block (%x): %w", steps[i].ID(), err) } + } - return nil - }) + return nil } diff --git a/module/finalizer/collection/finalizer_test.go b/module/finalizer/collection/finalizer_test.go index f8224105482..afc72b00168 100644 --- a/module/finalizer/collection/finalizer_test.go +++ b/module/finalizer/collection/finalizer_test.go @@ -1,293 +1,358 @@ package collection_test import ( - "math/rand" "testing" - "time" - "github.com/dgraph-io/badger/v2" + "github.com/cockroachdb/pebble/v2" + "github.com/jordanschalm/lockctx" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" + collectionmock "github.com/onflow/flow-go/engine/collection/mock" model "github.com/onflow/flow-go/model/cluster" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/model/messages" "github.com/onflow/flow-go/module/finalizer/collection" "github.com/onflow/flow-go/module/mempool/herocache" "github.com/onflow/flow-go/module/metrics" - "github.com/onflow/flow-go/network/mocknetwork" cluster "github.com/onflow/flow-go/state/cluster/badger" - "github.com/onflow/flow-go/storage/badger/operation" - "github.com/onflow/flow-go/storage/badger/procedure" + "github.com/onflow/flow-go/storage" + "github.com/onflow/flow-go/storage/operation" + "github.com/onflow/flow-go/storage/operation/pebbleimpl" "github.com/onflow/flow-go/utils/unittest" ) func TestFinalizer(t *testing.T) { - unittest.RunWithBadgerDB(t, func(db *badger.DB) { - - // seed the RNG - rand.Seed(time.Now().UnixNano()) - - // reference block on the main consensus chain - refBlock := unittest.BlockHeaderFixture() - // genesis block for the cluster chain - genesis := model.Genesis() - - metrics := metrics.NewNoopCollector() - - var state *cluster.State - - pool := herocache.NewTransactions(1000, unittest.Logger(), metrics) + // reference block on the main consensus chain + refBlock := unittest.ClusterBlockFixture() + // genesis block for the cluster chain + genesis, err := unittest.ClusterBlock.Genesis() + require.NoError(t, err) - // a helper function to clean up shared state between tests - cleanup := func() { - // wipe the DB - err := db.DropAll() - require.Nil(t, err) - // clear the mempool - for _, tx := range pool.All() { - pool.Remove(tx.ID()) - } - } + metrics := metrics.NewNoopCollector() + pool := herocache.NewTransactions(1000, unittest.Logger(), metrics) - // a helper function to bootstrap with the genesis block - bootstrap := func() { - stateRoot, err := cluster.NewStateRoot(genesis, unittest.QuorumCertificateFixture(), 0) - require.NoError(t, err) - state, err = cluster.Bootstrap(db, stateRoot) - require.NoError(t, err) - err = db.Update(operation.InsertHeader(refBlock.ID(), refBlock)) - require.NoError(t, err) - } + // a helper function to bootstrap with the genesis block + bootstrap := func(db storage.DB, lockManager lockctx.Manager) *cluster.State { + stateRoot, err := cluster.NewStateRoot(genesis, unittest.QuorumCertificateFixture(), 0) + require.NoError(t, err) + state, err := cluster.Bootstrap(db, lockManager, stateRoot) + require.NoError(t, err) - // a helper function to insert a block - insert := func(block model.Block) { - err := db.Update(procedure.InsertClusterBlock(&block)) - assert.Nil(t, err) - } + err = unittest.WithLock(t, lockManager, storage.LockInsertOrFinalizeClusterBlock, func(lctx lockctx.Context) error { + return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return operation.InsertHeader(lctx, rw, refBlock.ID(), refBlock.ToHeader()) + }) + }) + require.NoError(t, err) + return state + } + + // a helper function to insert a block + insert := func(db storage.DB, lockManager lockctx.Manager, block *model.Block) { + err := unittest.WithLock(t, lockManager, storage.LockInsertOrFinalizeClusterBlock, func(lctx lockctx.Context) error { + return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return operation.InsertClusterBlock(lctx, rw, unittest.ClusterProposalFromBlock(block)) + }) + }) + require.NoError(t, err) + } - t.Run("non-existent block", func(t *testing.T) { - bootstrap() - defer cleanup() + // Run each test with its own fresh database + t.Run("non-existent block", func(t *testing.T) { + unittest.RunWithPebbleDB(t, func(pdb *pebble.DB) { + db := pebbleimpl.ToDB(pdb) + lockManager := storage.NewTestingLockManager() + bootstrap(db, lockManager) - prov := new(mocknetwork.Engine) - prov.On("SubmitLocal", mock.Anything) - finalizer := collection.NewFinalizer(db, pool, prov, metrics) + pusher := collectionmock.NewGuaranteedCollectionPublisher(t) + finalizer := collection.NewFinalizer(db, lockManager, pool, pusher, metrics) fakeBlockID := unittest.IdentifierFixture() err := finalizer.MakeFinal(fakeBlockID) assert.Error(t, err) }) + }) - t.Run("already finalized block", func(t *testing.T) { - bootstrap() - defer cleanup() + t.Run("already finalized block", func(t *testing.T) { + unittest.RunWithPebbleDB(t, func(pdb *pebble.DB) { + db := pebbleimpl.ToDB(pdb) + lockManager := storage.NewTestingLockManager() + bootstrap(db, lockManager) - prov := new(mocknetwork.Engine) - prov.On("SubmitLocal", mock.Anything) - finalizer := collection.NewFinalizer(db, pool, prov, metrics) + pusher := collectionmock.NewGuaranteedCollectionPublisher(t) + pusher.On("SubmitCollectionGuarantee", mock.Anything).Once() + finalizer := collection.NewFinalizer(db, lockManager, pool, pusher, metrics) // tx1 is included in the finalized block tx1 := unittest.TransactionBodyFixture(func(tx *flow.TransactionBody) { tx.ProposalKey.SequenceNumber = 1 }) - assert.True(t, pool.Add(&tx1)) + assert.True(t, pool.Add(tx1.ID(), &tx1)) // create a new block on genesis - block := unittest.ClusterBlockWithParent(genesis) - block.SetPayload(model.PayloadFromTransactions(refBlock.ID(), &tx1)) - insert(block) + payload, err := model.NewPayload( + model.UntrustedPayload{ + ReferenceBlockID: refBlock.ID(), + Collection: flow.Collection{Transactions: []*flow.TransactionBody{&tx1}}, + }, + ) + require.NoError(t, err) + block := unittest.ClusterBlockFixture( + unittest.ClusterBlock.WithParent(genesis), + unittest.ClusterBlock.WithPayload(*payload), + ) + + insert(db, lockManager, block) // finalize the block - err := finalizer.MakeFinal(block.ID()) - assert.Nil(t, err) + err = finalizer.MakeFinal(block.ID()) + assert.NoError(t, err) // finalize the block again - this should be a no-op err = finalizer.MakeFinal(block.ID()) - assert.Nil(t, err) + assert.NoError(t, err) }) + }) - t.Run("unconnected block", func(t *testing.T) { - bootstrap() - defer cleanup() + t.Run("unconnected block", func(t *testing.T) { + unittest.RunWithPebbleDB(t, func(pdb *pebble.DB) { + db := pebbleimpl.ToDB(pdb) + lockManager := storage.NewTestingLockManager() + bootstrap(db, lockManager) - prov := new(mocknetwork.Engine) - prov.On("SubmitLocal", mock.Anything) - finalizer := collection.NewFinalizer(db, pool, prov, metrics) + pusher := collectionmock.NewGuaranteedCollectionPublisher(t) + finalizer := collection.NewFinalizer(db, lockManager, pool, pusher, metrics) // create a new block that isn't connected to a parent - block := unittest.ClusterBlockWithParent(genesis) - block.Header.ParentID = unittest.IdentifierFixture() - block.SetPayload(model.EmptyPayload(refBlock.ID())) - insert(block) + block := unittest.ClusterBlockFixture( + unittest.ClusterBlock.WithParent(genesis), + unittest.ClusterBlock.WithPayload(*model.NewEmptyPayload(refBlock.ID())), + ) + block.ParentID = unittest.IdentifierFixture() + + insert(db, lockManager, block) // try to finalize - this should fail err := finalizer.MakeFinal(block.ID()) assert.Error(t, err) }) + }) - t.Run("empty collection block", func(t *testing.T) { - bootstrap() - defer cleanup() + t.Run("empty collection block", func(t *testing.T) { + unittest.RunWithPebbleDB(t, func(pdb *pebble.DB) { + db := pebbleimpl.ToDB(pdb) + lockManager := storage.NewTestingLockManager() + state := bootstrap(db, lockManager) - prov := new(mocknetwork.Engine) - finalizer := collection.NewFinalizer(db, pool, prov, metrics) + pusher := collectionmock.NewGuaranteedCollectionPublisher(t) + finalizer := collection.NewFinalizer(db, lockManager, pool, pusher, metrics) // create a block with empty payload on genesis - block := unittest.ClusterBlockWithParent(genesis) - block.SetPayload(model.EmptyPayload(refBlock.ID())) - insert(block) + block := unittest.ClusterBlockFixture( + unittest.ClusterBlock.WithParent(genesis), + unittest.ClusterBlock.WithPayload(*model.NewEmptyPayload(refBlock.ID())), + ) + insert(db, lockManager, block) // finalize the block err := finalizer.MakeFinal(block.ID()) - assert.Nil(t, err) + assert.NoError(t, err) // check finalized boundary using cluster state final, err := state.Final().Head() - assert.Nil(t, err) - assert.Equal(t, block.ID(), final.ID()) - - // collection should not have been propagated - prov.AssertNotCalled(t, "SubmitLocal", mock.Anything) + assert.NoError(t, err) + assert.Equal(t, block.ToHeader().ID(), final.ID()) }) + }) - t.Run("finalize single block", func(t *testing.T) { - bootstrap() - defer cleanup() + t.Run("finalize single block", func(t *testing.T) { + unittest.RunWithPebbleDB(t, func(pdb *pebble.DB) { + db := pebbleimpl.ToDB(pdb) + lockManager := storage.NewTestingLockManager() + state := bootstrap(db, lockManager) - prov := new(mocknetwork.Engine) - prov.On("SubmitLocal", mock.Anything) - finalizer := collection.NewFinalizer(db, pool, prov, metrics) + pusher := collectionmock.NewGuaranteedCollectionPublisher(t) + finalizer := collection.NewFinalizer(db, lockManager, pool, pusher, metrics) // tx1 is included in the finalized block and mempool tx1 := unittest.TransactionBodyFixture(func(tx *flow.TransactionBody) { tx.ProposalKey.SequenceNumber = 1 }) - assert.True(t, pool.Add(&tx1)) + assert.True(t, pool.Add(tx1.ID(), &tx1)) // tx2 is only in the mempool tx2 := unittest.TransactionBodyFixture(func(tx *flow.TransactionBody) { tx.ProposalKey.SequenceNumber = 2 }) - assert.True(t, pool.Add(&tx2)) + assert.True(t, pool.Add(tx2.ID(), &tx2)) // create a block containing tx1 on top of genesis - block := unittest.ClusterBlockWithParent(genesis) - block.SetPayload(model.PayloadFromTransactions(refBlock.ID(), &tx1)) - insert(block) + payload, err := model.NewPayload( + model.UntrustedPayload{ + ReferenceBlockID: refBlock.ID(), + Collection: flow.Collection{Transactions: []*flow.TransactionBody{&tx1}}, + }, + ) + require.NoError(t, err) + block := unittest.ClusterBlockFixture( + unittest.ClusterBlock.WithParent(genesis), + unittest.ClusterBlock.WithPayload(*payload), + ) + insert(db, lockManager, block) + + // block should be passed to pusher + pusher.On("SubmitCollectionGuarantee", &messages.CollectionGuarantee{ + CollectionID: block.Payload.Collection.ID(), + ReferenceBlockID: refBlock.ID(), + ClusterChainID: block.ChainID, + SignerIndices: block.ParentVoterIndices, + Signature: nil, + }).Once() // finalize the block - err := finalizer.MakeFinal(block.ID()) - assert.Nil(t, err) + err = finalizer.MakeFinal(block.ID()) + assert.NoError(t, err) // tx1 should have been removed from mempool assert.False(t, pool.Has(tx1.ID())) - // tx2 should still be in mempool + // tx2 should NOT have been removed from mempool assert.True(t, pool.Has(tx2.ID())) // check finalized boundary using cluster state final, err := state.Final().Head() - assert.Nil(t, err) - assert.Equal(t, block.ID(), final.ID()) - assertClusterBlocksIndexedByReferenceHeight(t, db, refBlock.Height, final.ID()) - - // block should be passed to provider - prov.AssertNumberOfCalls(t, "SubmitLocal", 1) - prov.AssertCalled(t, "SubmitLocal", &messages.SubmitCollectionGuarantee{ - Guarantee: flow.CollectionGuarantee{ - CollectionID: block.Payload.Collection.ID(), - ReferenceBlockID: refBlock.ID(), - ChainID: block.Header.ChainID, - SignerIndices: block.Header.ParentVoterIndices, - Signature: nil, - }, - }) + assert.NoError(t, err) + assert.Equal(t, block.ToHeader().ID(), final.ID()) + assertClusterBlocksIndexedByReferenceHeight(t, lockManager, db, refBlock.Height, block.ID()) }) + }) - // when finalizing a block with un-finalized ancestors, those ancestors should be finalized as well - t.Run("finalize multiple blocks together", func(t *testing.T) { - bootstrap() - defer cleanup() + t.Run("finalize multiple blocks together", func(t *testing.T) { + unittest.RunWithPebbleDB(t, func(pdb *pebble.DB) { + db := pebbleimpl.ToDB(pdb) + lockManager := storage.NewTestingLockManager() + state := bootstrap(db, lockManager) - prov := new(mocknetwork.Engine) - prov.On("SubmitLocal", mock.Anything) - finalizer := collection.NewFinalizer(db, pool, prov, metrics) + pusher := collectionmock.NewGuaranteedCollectionPublisher(t) + finalizer := collection.NewFinalizer(db, lockManager, pool, pusher, metrics) - // tx1 is included in the first finalized block and mempool + // tx1 is included in block1 and mempool tx1 := unittest.TransactionBodyFixture(func(tx *flow.TransactionBody) { tx.ProposalKey.SequenceNumber = 1 }) - assert.True(t, pool.Add(&tx1)) - // tx2 is included in the second finalized block and mempool + assert.True(t, pool.Add(tx1.ID(), &tx1)) + // tx2 is included in block2 and mempool tx2 := unittest.TransactionBodyFixture(func(tx *flow.TransactionBody) { tx.ProposalKey.SequenceNumber = 2 }) - assert.True(t, pool.Add(&tx2)) - - // create a block containing tx1 on top of genesis - block1 := unittest.ClusterBlockWithParent(genesis) - block1.SetPayload(model.PayloadFromTransactions(refBlock.ID(), &tx1)) - insert(block1) - - // create a block containing tx2 on top of block1 - block2 := unittest.ClusterBlockWithParent(&block1) - block2.SetPayload(model.PayloadFromTransactions(refBlock.ID(), &tx2)) - insert(block2) + assert.True(t, pool.Add(tx2.ID(), &tx2)) - // finalize block2 (should indirectly finalize block1 as well) - err := finalizer.MakeFinal(block2.ID()) - assert.Nil(t, err) - - // tx1 and tx2 should have been removed from mempool + // create block1 containing tx1 on top of genesis + payload1, err := model.NewPayload( + model.UntrustedPayload{ + ReferenceBlockID: refBlock.ID(), + Collection: flow.Collection{Transactions: []*flow.TransactionBody{&tx1}}, + }, + ) + require.NoError(t, err) + block1 := unittest.ClusterBlockFixture( + unittest.ClusterBlock.WithParent(genesis), + unittest.ClusterBlock.WithPayload(*payload1), + ) + insert(db, lockManager, block1) + + // create block2 containing tx2 on top of block1 + payload2, err := model.NewPayload( + model.UntrustedPayload{ + ReferenceBlockID: refBlock.ID(), + Collection: flow.Collection{Transactions: []*flow.TransactionBody{&tx2}}, + }, + ) + require.NoError(t, err) + block2 := unittest.ClusterBlockFixture( + unittest.ClusterBlock.WithParent(block1), + unittest.ClusterBlock.WithPayload(*payload2), + ) + insert(db, lockManager, block2) + + // blocks should be passed to pusher + pusher.On("SubmitCollectionGuarantee", &messages.CollectionGuarantee{ + CollectionID: block1.Payload.Collection.ID(), + ReferenceBlockID: refBlock.ID(), + ClusterChainID: block1.ChainID, + SignerIndices: block1.ParentVoterIndices, + Signature: nil, + }).Once() + pusher.On("SubmitCollectionGuarantee", &messages.CollectionGuarantee{ + CollectionID: block2.Payload.Collection.ID(), + ReferenceBlockID: refBlock.ID(), + ClusterChainID: block2.ChainID, + SignerIndices: block2.ParentVoterIndices, + Signature: nil, + }).Once() + + // finalize both blocks together + err = finalizer.MakeFinal(block2.ID()) + assert.NoError(t, err) + + // both transactions should have been removed from mempool assert.False(t, pool.Has(tx1.ID())) assert.False(t, pool.Has(tx2.ID())) // check finalized boundary using cluster state final, err := state.Final().Head() - assert.Nil(t, err) - assert.Equal(t, block2.ID(), final.ID()) - assertClusterBlocksIndexedByReferenceHeight(t, db, refBlock.Height, block1.ID(), block2.ID()) - - // both blocks should be passed to provider - prov.AssertNumberOfCalls(t, "SubmitLocal", 2) - prov.AssertCalled(t, "SubmitLocal", &messages.SubmitCollectionGuarantee{ - Guarantee: flow.CollectionGuarantee{ - CollectionID: block1.Payload.Collection.ID(), - ReferenceBlockID: refBlock.ID(), - ChainID: block1.Header.ChainID, - SignerIndices: block1.Header.ParentVoterIndices, - Signature: nil, - }, - }) - prov.AssertCalled(t, "SubmitLocal", &messages.SubmitCollectionGuarantee{ - Guarantee: flow.CollectionGuarantee{ - CollectionID: block2.Payload.Collection.ID(), - ReferenceBlockID: refBlock.ID(), - ChainID: block2.Header.ChainID, - SignerIndices: block2.Header.ParentVoterIndices, - Signature: nil, - }, - }) + assert.NoError(t, err) + assert.Equal(t, block2.ToHeader().ID(), final.ID()) + assertClusterBlocksIndexedByReferenceHeight(t, lockManager, db, refBlock.Height, block1.ID(), block2.ID()) }) + }) - t.Run("finalize with un-finalized child", func(t *testing.T) { - bootstrap() - defer cleanup() + t.Run("finalize with un-finalized child", func(t *testing.T) { + unittest.RunWithPebbleDB(t, func(pdb *pebble.DB) { + db := pebbleimpl.ToDB(pdb) + lockManager := storage.NewTestingLockManager() + state := bootstrap(db, lockManager) - prov := new(mocknetwork.Engine) - prov.On("SubmitLocal", mock.Anything) - finalizer := collection.NewFinalizer(db, pool, prov, metrics) + pusher := collectionmock.NewGuaranteedCollectionPublisher(t) + finalizer := collection.NewFinalizer(db, lockManager, pool, pusher, metrics) - // tx1 is included in the finalized parent block and mempool + // tx1 is included in block1 and mempool tx1 := unittest.TransactionBodyFixture(func(tx *flow.TransactionBody) { tx.ProposalKey.SequenceNumber = 1 }) - assert.True(t, pool.Add(&tx1)) - // tx2 is included in the un-finalized block and mempool + assert.True(t, pool.Add(tx1.ID(), &tx1)) + // tx2 is included in block2 and mempool tx2 := unittest.TransactionBodyFixture(func(tx *flow.TransactionBody) { tx.ProposalKey.SequenceNumber = 2 }) - assert.True(t, pool.Add(&tx2)) + assert.True(t, pool.Add(tx2.ID(), &tx2)) - // create a block containing tx1 on top of genesis - block1 := unittest.ClusterBlockWithParent(genesis) - block1.SetPayload(model.PayloadFromTransactions(refBlock.ID(), &tx1)) - insert(block1) - - // create a block containing tx2 on top of block1 - block2 := unittest.ClusterBlockWithParent(&block1) - block2.SetPayload(model.PayloadFromTransactions(refBlock.ID(), &tx2)) - insert(block2) + // create block1 containing tx1 on top of genesis + payload1, err := model.NewPayload( + model.UntrustedPayload{ + ReferenceBlockID: refBlock.ID(), + Collection: flow.Collection{Transactions: []*flow.TransactionBody{&tx1}}, + }, + ) + require.NoError(t, err) + block1 := unittest.ClusterBlockFixture( + unittest.ClusterBlock.WithParent(genesis), + unittest.ClusterBlock.WithPayload(*payload1), + ) + insert(db, lockManager, block1) + + // create block2 containing tx2 on top of block1 + payload2, err := model.NewPayload( + model.UntrustedPayload{ + ReferenceBlockID: refBlock.ID(), + Collection: flow.Collection{Transactions: []*flow.TransactionBody{&tx2}}, + }, + ) + require.NoError(t, err) + block2 := unittest.ClusterBlockFixture( + unittest.ClusterBlock.WithParent(block1), + unittest.ClusterBlock.WithPayload(*payload2), + ) + insert(db, lockManager, block2) + + // block1 should be passed to pusher + pusher.On("SubmitCollectionGuarantee", &messages.CollectionGuarantee{ + CollectionID: block1.Payload.Collection.ID(), + ReferenceBlockID: refBlock.ID(), + ClusterChainID: block1.ChainID, + SignerIndices: block1.ParentVoterIndices, + Signature: nil, + }).Once() // finalize block1 (should NOT finalize block2) - err := finalizer.MakeFinal(block1.ID()) - assert.Nil(t, err) + err = finalizer.MakeFinal(block1.ID()) + assert.NoError(t, err) // tx1 should have been removed from mempool assert.False(t, pool.Has(tx1.ID())) @@ -296,52 +361,68 @@ func TestFinalizer(t *testing.T) { // check finalized boundary using cluster state final, err := state.Final().Head() - assert.Nil(t, err) - assert.Equal(t, block1.ID(), final.ID()) - assertClusterBlocksIndexedByReferenceHeight(t, db, refBlock.Height, block1.ID()) - - // block should be passed to provider - prov.AssertNumberOfCalls(t, "SubmitLocal", 1) - prov.AssertCalled(t, "SubmitLocal", &messages.SubmitCollectionGuarantee{ - Guarantee: flow.CollectionGuarantee{ - CollectionID: block1.Payload.Collection.ID(), - ReferenceBlockID: refBlock.ID(), - ChainID: block1.Header.ChainID, - SignerIndices: block1.Header.ParentVoterIndices, - Signature: nil, - }, - }) + assert.NoError(t, err) + assert.Equal(t, block1.ToHeader().ID(), final.ID()) + assertClusterBlocksIndexedByReferenceHeight(t, lockManager, db, refBlock.Height, block1.ID()) }) + }) - // when finalizing a block with a conflicting fork, the fork should not be finalized. - t.Run("conflicting fork", func(t *testing.T) { - bootstrap() - defer cleanup() + t.Run("conflicting fork", func(t *testing.T) { + unittest.RunWithPebbleDB(t, func(pdb *pebble.DB) { + db := pebbleimpl.ToDB(pdb) + lockManager := storage.NewTestingLockManager() + state := bootstrap(db, lockManager) - prov := new(mocknetwork.Engine) - prov.On("SubmitLocal", mock.Anything) - finalizer := collection.NewFinalizer(db, pool, prov, metrics) + pusher := collectionmock.NewGuaranteedCollectionPublisher(t) + finalizer := collection.NewFinalizer(db, lockManager, pool, pusher, metrics) // tx1 is included in the finalized block and mempool tx1 := unittest.TransactionBodyFixture(func(tx *flow.TransactionBody) { tx.ProposalKey.SequenceNumber = 1 }) - assert.True(t, pool.Add(&tx1)) + assert.True(t, pool.Add(tx1.ID(), &tx1)) // tx2 is included in the conflicting block and mempool tx2 := unittest.TransactionBodyFixture(func(tx *flow.TransactionBody) { tx.ProposalKey.SequenceNumber = 2 }) - assert.True(t, pool.Add(&tx2)) + assert.True(t, pool.Add(tx2.ID(), &tx2)) // create a block containing tx1 on top of genesis - block1 := unittest.ClusterBlockWithParent(genesis) - block1.SetPayload(model.PayloadFromTransactions(refBlock.ID(), &tx1)) - insert(block1) + payload, err := model.NewPayload( + model.UntrustedPayload{ + ReferenceBlockID: refBlock.ID(), + Collection: flow.Collection{Transactions: []*flow.TransactionBody{&tx1}}, + }, + ) + require.NoError(t, err) + block1 := unittest.ClusterBlockFixture( + unittest.ClusterBlock.WithParent(genesis), + unittest.ClusterBlock.WithPayload(*payload), + ) + insert(db, lockManager, block1) // create a block containing tx2 on top of genesis (conflicting with block1) - block2 := unittest.ClusterBlockWithParent(genesis) - block2.SetPayload(model.PayloadFromTransactions(refBlock.ID(), &tx2)) - insert(block2) + payload, err = model.NewPayload( + model.UntrustedPayload{ + ReferenceBlockID: refBlock.ID(), + Collection: flow.Collection{Transactions: []*flow.TransactionBody{&tx2}}, + }, + ) + require.NoError(t, err) + block2 := unittest.ClusterBlockFixture( + unittest.ClusterBlock.WithParent(genesis), + unittest.ClusterBlock.WithPayload(*payload), + ) + insert(db, lockManager, block2) + + // block should be passed to pusher + pusher.On("SubmitCollectionGuarantee", &messages.CollectionGuarantee{ + CollectionID: block1.Payload.Collection.ID(), + ReferenceBlockID: refBlock.ID(), + ClusterChainID: block1.ChainID, + SignerIndices: block1.ParentVoterIndices, + Signature: nil, + }).Once() // finalize block1 - err := finalizer.MakeFinal(block1.ID()) - assert.Nil(t, err) + err = finalizer.MakeFinal(block1.ID()) + assert.NoError(t, err) // tx1 should have been removed from mempool assert.False(t, pool.Has(tx1.ID())) @@ -350,21 +431,9 @@ func TestFinalizer(t *testing.T) { // check finalized boundary using cluster state final, err := state.Final().Head() - assert.Nil(t, err) - assert.Equal(t, block1.ID(), final.ID()) - assertClusterBlocksIndexedByReferenceHeight(t, db, refBlock.Height, block1.ID()) - - // block should be passed to provider - prov.AssertNumberOfCalls(t, "SubmitLocal", 1) - prov.AssertCalled(t, "SubmitLocal", &messages.SubmitCollectionGuarantee{ - Guarantee: flow.CollectionGuarantee{ - CollectionID: block1.Payload.Collection.ID(), - ReferenceBlockID: refBlock.ID(), - ChainID: block1.Header.ChainID, - SignerIndices: block1.Header.ParentVoterIndices, - Signature: nil, - }, - }) + assert.NoError(t, err) + assert.Equal(t, block1.ToHeader().ID(), final.ID()) + assertClusterBlocksIndexedByReferenceHeight(t, lockManager, db, refBlock.Height, block1.ID()) }) }) } @@ -372,9 +441,11 @@ func TestFinalizer(t *testing.T) { // assertClusterBlocksIndexedByReferenceHeight checks the given cluster blocks have // been indexed by the given reference block height, which is expected as part of // finalization. -func assertClusterBlocksIndexedByReferenceHeight(t *testing.T, db *badger.DB, refHeight uint64, clusterBlockIDs ...flow.Identifier) { +func assertClusterBlocksIndexedByReferenceHeight(t *testing.T, lockManager lockctx.Manager, db storage.DB, refHeight uint64, clusterBlockIDs ...flow.Identifier) { var ids []flow.Identifier - err := db.View(operation.LookupClusterBlocksByReferenceHeightRange(refHeight, refHeight, &ids)) + err := unittest.WithLock(t, lockManager, storage.LockInsertOrFinalizeClusterBlock, func(lctx lockctx.Context) error { + return operation.LookupClusterBlocksByReferenceHeightRange(lctx, db.Reader(), refHeight, refHeight, &ids) + }) require.NoError(t, err) assert.ElementsMatch(t, clusterBlockIDs, ids) } diff --git a/module/finalizer/consensus/finalizer.go b/module/finalizer/consensus/finalizer.go index d0f8bdda796..5caff16d08e 100644 --- a/module/finalizer/consensus/finalizer.go +++ b/module/finalizer/consensus/finalizer.go @@ -1,43 +1,39 @@ -// (c) 2019 Dapper Labs - ALL RIGHTS RESERVED - package consensus import ( "context" "fmt" - "github.com/dgraph-io/badger/v2" - "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/module" "github.com/onflow/flow-go/module/trace" "github.com/onflow/flow-go/state/protocol" "github.com/onflow/flow-go/storage" - "github.com/onflow/flow-go/storage/badger/operation" + "github.com/onflow/flow-go/storage/operation" ) // Finalizer is a simple wrapper around our temporary state to clean up after a // block has been fully finalized to the persistent protocol state. type Finalizer struct { - db *badger.DB - headers storage.Headers - state protocol.FollowerState - cleanup CleanupFunc - tracer module.Tracer + dbReader storage.Reader + headers storage.Headers + state protocol.FollowerState + cleanup CleanupFunc + tracer module.Tracer } // NewFinalizer creates a new finalizer for the temporary state. -func NewFinalizer(db *badger.DB, +func NewFinalizer(dbReader storage.Reader, headers storage.Headers, state protocol.FollowerState, tracer module.Tracer, options ...func(*Finalizer)) *Finalizer { f := &Finalizer{ - db: db, - state: state, - headers: headers, - cleanup: CleanupNothing(), - tracer: tracer, + dbReader: dbReader, + state: state, + headers: headers, + cleanup: CleanupNothing(), + tracer: tracer, } for _, option := range options { option(f) @@ -64,7 +60,7 @@ func (f *Finalizer) MakeFinal(blockID flow.Identifier) error { // that height, it's an invalid operation. Otherwise, it is a no-op. var finalized uint64 - err := f.db.View(operation.RetrieveFinalizedHeight(&finalized)) + err := operation.RetrieveFinalizedHeight(f.dbReader, &finalized) if err != nil { return fmt.Errorf("could not retrieve finalized height: %w", err) } @@ -75,12 +71,12 @@ func (f *Finalizer) MakeFinal(blockID flow.Identifier) error { } if pending.Height <= finalized { - dup, err := f.headers.ByHeight(pending.Height) + dupID, err := f.headers.BlockIDByHeight(pending.Height) if err != nil { return fmt.Errorf("could not retrieve finalized equivalent: %w", err) } - if dup.ID() != blockID { - return fmt.Errorf("cannot finalize pending block conflicting with finalized state (height: %d, pending: %x, finalized: %x)", pending.Height, blockID, dup.ID()) + if dupID != blockID { + return fmt.Errorf("cannot finalize pending block conflicting with finalized state (height: %d, pending: %x, finalized: %x)", pending.Height, blockID, dupID) } return nil } @@ -91,7 +87,7 @@ func (f *Finalizer) MakeFinal(blockID flow.Identifier) error { // back to the last finalized block, this is also an invalid call. var finalID flow.Identifier - err = f.db.View(operation.LookupBlockHeight(finalized, &finalID)) + err = operation.LookupBlockHeight(f.dbReader, finalized, &finalID) if err != nil { return fmt.Errorf("could not retrieve finalized header: %w", err) } diff --git a/module/finalizer/consensus/finalizer_test.go b/module/finalizer/consensus/finalizer_test.go index 35b20705ec4..965654328dc 100644 --- a/module/finalizer/consensus/finalizer_test.go +++ b/module/finalizer/consensus/finalizer_test.go @@ -4,7 +4,8 @@ import ( "math/rand" "testing" - "github.com/dgraph-io/badger/v2" + "github.com/cockroachdb/pebble/v2" + "github.com/jordanschalm/lockctx" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" @@ -13,9 +14,10 @@ import ( "github.com/onflow/flow-go/module/metrics" "github.com/onflow/flow-go/module/trace" mockprot "github.com/onflow/flow-go/state/protocol/mock" - storage "github.com/onflow/flow-go/storage/badger" - "github.com/onflow/flow-go/storage/badger/operation" - mockstor "github.com/onflow/flow-go/storage/mock" + "github.com/onflow/flow-go/storage" + "github.com/onflow/flow-go/storage/operation" + "github.com/onflow/flow-go/storage/operation/pebbleimpl" + "github.com/onflow/flow-go/storage/store" "github.com/onflow/flow-go/utils/unittest" ) @@ -26,18 +28,6 @@ func LogCleanup(list *[]flow.Identifier) func(flow.Identifier) error { } } -func TestNewFinalizer(t *testing.T) { - unittest.RunWithBadgerDB(t, func(db *badger.DB) { - headers := &mockstor.Headers{} - state := &mockprot.FollowerState{} - tracer := trace.NewNoopTracer() - fin := NewFinalizer(db, headers, state, tracer) - assert.Equal(t, fin.db, db) - assert.Equal(t, fin.headers, headers) - assert.Equal(t, fin.state, state) - }) -} - // TestMakeFinalValidChain checks whether calling `MakeFinal` with the ID of a valid // descendant block of the latest finalized header results in the finalization of the // valid descendant and all of its parents up to the finalized header, but excluding @@ -74,34 +64,53 @@ func TestMakeFinalValidChain(t *testing.T) { // this will hold the IDs of blocks clean up var list []flow.Identifier - unittest.RunWithBadgerDB(t, func(db *badger.DB) { + unittest.RunWithPebbleDB(t, func(pdb *pebble.DB) { + // set up lock context + lockManager := storage.NewTestingLockManager() + dbImpl := pebbleimpl.ToDB(pdb) - // insert the latest finalized height - err := db.Update(operation.InsertFinalizedHeight(final.Height)) - require.NoError(t, err) + err := unittest.WithLock(t, lockManager, storage.LockFinalizeBlock, func(lctx lockctx.Context) error { + // insert the latest finalized height + err := dbImpl.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return operation.UpsertFinalizedHeight(lctx, rw.Writer(), final.Height) + }) + require.NoError(t, err) - // map the finalized height to the finalized block ID - err = db.Update(operation.IndexBlockHeight(final.Height, final.ID())) + // map the finalized height to the finalized block ID + err = dbImpl.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return operation.IndexFinalizedBlockByHeight(lctx, rw, final.Height, final.ID()) + }) + require.NoError(t, err) + return nil + }) require.NoError(t, err) // insert the finalized block header into the DB - err = db.Update(operation.InsertHeader(final.ID(), final)) + err = unittest.WithLock(t, lockManager, storage.LockInsertBlock, func(lctx lockctx.Context) error { + return dbImpl.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return operation.InsertHeader(lctx, rw, final.ID(), final) + }) + }) require.NoError(t, err) // insert all of the pending blocks into the DB for _, header := range pending { - err = db.Update(operation.InsertHeader(header.ID(), header)) + err := unittest.WithLock(t, lockManager, storage.LockInsertBlock, func(lctx lockctx.Context) error { + return dbImpl.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return operation.InsertHeader(lctx, rw, header.ID(), header) + }) + }) require.NoError(t, err) } // initialize the finalizer with the dependencies and make the call metrics := metrics.NewNoopCollector() fin := Finalizer{ - db: db, - headers: storage.NewHeaders(metrics, db), - state: state, - tracer: trace.NewNoopTracer(), - cleanup: LogCleanup(&list), + dbReader: pebbleimpl.ToDB(pdb).Reader(), + headers: store.NewHeaders(metrics, pebbleimpl.ToDB(pdb)), + state: state, + tracer: trace.NewNoopTracer(), + cleanup: LogCleanup(&list), } err = fin.MakeFinal(lastID) require.NoError(t, err) @@ -132,32 +141,46 @@ func TestMakeFinalInvalidHeight(t *testing.T) { // this will hold the IDs of blocks clean up var list []flow.Identifier - unittest.RunWithBadgerDB(t, func(db *badger.DB) { - - // insert the latest finalized height - err := db.Update(operation.InsertFinalizedHeight(final.Height)) - require.NoError(t, err) - - // map the finalized height to the finalized block ID - err = db.Update(operation.IndexBlockHeight(final.Height, final.ID())) + unittest.RunWithPebbleDB(t, func(pdb *pebble.DB) { + dbImpl := pebbleimpl.ToDB(pdb) + lockManager := storage.NewTestingLockManager() + + // Insert the latest finalized height and map the finalized height to the finalized block ID. + err := unittest.WithLock(t, lockManager, storage.LockFinalizeBlock, func(lctx lockctx.Context) error { + return dbImpl.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + err := operation.IndexFinalizedBlockByHeight(lctx, rw, final.Height, final.ID()) + if err != nil { + return err + } + return operation.UpsertFinalizedHeight(lctx, rw.Writer(), final.Height) + }) + }) require.NoError(t, err) // insert the finalized block header into the DB - err = db.Update(operation.InsertHeader(final.ID(), final)) + err = unittest.WithLock(t, lockManager, storage.LockInsertBlock, func(insertLctx lockctx.Context) error { + return dbImpl.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return operation.InsertHeader(insertLctx, rw, final.ID(), final) + }) + }) require.NoError(t, err) - // insert all of the pending header into DB - err = db.Update(operation.InsertHeader(pending.ID(), pending)) + // insert pending header into DB, which has the same height as the finalized header + err = unittest.WithLock(t, lockManager, storage.LockInsertBlock, func(insertLctx lockctx.Context) error { + return dbImpl.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return operation.InsertHeader(insertLctx, rw, pending.ID(), pending) + }) + }) require.NoError(t, err) // initialize the finalizer with the dependencies and make the call metrics := metrics.NewNoopCollector() fin := Finalizer{ - db: db, - headers: storage.NewHeaders(metrics, db), - state: state, - tracer: trace.NewNoopTracer(), - cleanup: LogCleanup(&list), + dbReader: pebbleimpl.ToDB(pdb).Reader(), + headers: store.NewHeaders(metrics, pebbleimpl.ToDB(pdb)), + state: state, + tracer: trace.NewNoopTracer(), + cleanup: LogCleanup(&list), } err = fin.MakeFinal(pending.ID()) require.Error(t, err) @@ -184,28 +207,39 @@ func TestMakeFinalDuplicate(t *testing.T) { // this will hold the IDs of blocks clean up var list []flow.Identifier - unittest.RunWithBadgerDB(t, func(db *badger.DB) { + unittest.RunWithPebbleDB(t, func(pdb *pebble.DB) { + lockManager := storage.NewTestingLockManager() + dbImpl := pebbleimpl.ToDB(pdb) // insert the latest finalized height - err := db.Update(operation.InsertFinalizedHeight(final.Height)) - require.NoError(t, err) - - // map the finalized height to the finalized block ID - err = db.Update(operation.IndexBlockHeight(final.Height, final.ID())) + err := unittest.WithLock(t, lockManager, storage.LockFinalizeBlock, func(lctx lockctx.Context) error { + return dbImpl.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + err := operation.UpsertFinalizedHeight(lctx, rw.Writer(), final.Height) + if err != nil { + return err + } + + return operation.IndexFinalizedBlockByHeight(lctx, rw, final.Height, final.ID()) + }) + }) require.NoError(t, err) // insert the finalized block header into the DB - err = db.Update(operation.InsertHeader(final.ID(), final)) + err = unittest.WithLock(t, lockManager, storage.LockInsertBlock, func(insertLctx lockctx.Context) error { + return dbImpl.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return operation.InsertHeader(insertLctx, rw, final.ID(), final) + }) + }) require.NoError(t, err) // initialize the finalizer with the dependencies and make the call metrics := metrics.NewNoopCollector() fin := Finalizer{ - db: db, - headers: storage.NewHeaders(metrics, db), - state: state, - tracer: trace.NewNoopTracer(), - cleanup: LogCleanup(&list), + dbReader: pebbleimpl.ToDB(pdb).Reader(), + headers: store.NewHeaders(metrics, pebbleimpl.ToDB(pdb)), + state: state, + tracer: trace.NewNoopTracer(), + cleanup: LogCleanup(&list), } err = fin.MakeFinal(final.ID()) require.NoError(t, err) diff --git a/module/forest/mock/vertex.go b/module/forest/mock/vertex.go index fb56bc9df53..a24678376ed 100644 --- a/module/forest/mock/vertex.go +++ b/module/forest/mock/vertex.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mock @@ -13,10 +13,14 @@ type Vertex struct { mock.Mock } -// Level provides a mock function with given fields: +// Level provides a mock function with no fields func (_m *Vertex) Level() uint64 { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Level") + } + var r0 uint64 if rf, ok := ret.Get(0).(func() uint64); ok { r0 = rf() @@ -27,10 +31,14 @@ func (_m *Vertex) Level() uint64 { return r0 } -// Parent provides a mock function with given fields: +// Parent provides a mock function with no fields func (_m *Vertex) Parent() (flow.Identifier, uint64) { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Parent") + } + var r0 flow.Identifier var r1 uint64 if rf, ok := ret.Get(0).(func() (flow.Identifier, uint64)); ok { @@ -53,10 +61,14 @@ func (_m *Vertex) Parent() (flow.Identifier, uint64) { return r0, r1 } -// VertexID provides a mock function with given fields: +// VertexID provides a mock function with no fields func (_m *Vertex) VertexID() flow.Identifier { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for VertexID") + } + var r0 flow.Identifier if rf, ok := ret.Get(0).(func() flow.Identifier); ok { r0 = rf() @@ -69,13 +81,12 @@ func (_m *Vertex) VertexID() flow.Identifier { return r0 } -type mockConstructorTestingTNewVertex interface { +// NewVertex creates a new instance of Vertex. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewVertex(t interface { mock.TestingT Cleanup(func()) -} - -// NewVertex creates a new instance of Vertex. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewVertex(t mockConstructorTestingTNewVertex) *Vertex { +}) *Vertex { mock := &Vertex{} mock.Mock.Test(t) diff --git a/cmd/util/cmd/common/flow_client.go b/module/grpcclient/flow_client.go similarity index 87% rename from cmd/util/cmd/common/flow_client.go rename to module/grpcclient/flow_client.go index e16438da9f6..183b6c01417 100644 --- a/cmd/util/cmd/common/flow_client.go +++ b/module/grpcclient/flow_client.go @@ -1,4 +1,4 @@ -package common +package grpcclient import ( "fmt" @@ -9,9 +9,9 @@ import ( client "github.com/onflow/flow-go-sdk/access/grpc" + commonrpc "github.com/onflow/flow-go/engine/common/rpc" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/model/flow/filter" - "github.com/onflow/flow-go/model/flow/order" "github.com/onflow/flow-go/state/protocol" "github.com/onflow/flow-go/utils/grpcutils" ) @@ -64,9 +64,12 @@ func secureFlowClient(accessAddress, accessApiNodePubKey string) (*client.Client } // create flow client - flowClient, err := client.NewClient(accessAddress, - dialOpts, - grpc.WithDefaultCallOptions(grpc.MaxCallRecvMsgSize(grpcutils.DefaultMaxMsgSize)), + flowClient, err := client.NewClient( + accessAddress, + client.WithGRPCDialOptions( + dialOpts, + grpc.WithDefaultCallOptions(grpc.MaxCallRecvMsgSize(commonrpc.DefaultAccessMaxResponseSize)), + ), ) if err != nil { return nil, err @@ -78,9 +81,12 @@ func secureFlowClient(accessAddress, accessApiNodePubKey string) (*client.Client // insecureFlowClient creates flow client with insecure GRPC connection func insecureFlowClient(accessAddress string) (*client.Client, error) { // create flow client - flowClient, err := client.NewClient(accessAddress, - grpc.WithTransportCredentials(insecure.NewCredentials()), - grpc.WithDefaultCallOptions(grpc.MaxCallRecvMsgSize(grpcutils.DefaultMaxMsgSize)), + flowClient, err := client.NewClient( + accessAddress, + client.WithGRPCDialOptions( + grpc.WithTransportCredentials(insecure.NewCredentials()), + grpc.WithDefaultCallOptions(grpc.MaxCallRecvMsgSize(commonrpc.DefaultAccessMaxResponseSize)), + ), ) if err != nil { return nil, fmt.Errorf("failed to create flow client %w", err) @@ -93,11 +99,11 @@ func insecureFlowClient(accessAddress string) (*client.Client, error) { func FlowClientConfigs(accessNodeIDS []flow.Identifier, insecureAccessAPI bool, snapshot protocol.Snapshot) ([]*FlowClientConfig, error) { flowClientOpts := make([]*FlowClientConfig, 0) - identities, err := snapshot.Identities(filter.HasNodeID(accessNodeIDS...)) + identities, err := snapshot.Identities(filter.HasNodeID[flow.Identity](accessNodeIDS...)) if err != nil { return nil, fmt.Errorf("failed get identities access node identities (ids=%v) from snapshot: %w", accessNodeIDS, err) } - identities = identities.Sort(order.ByReferenceOrder(accessNodeIDS)) + identities = identities.Sort(flow.ByReferenceOrder(accessNodeIDS)) // make sure we have identities for all the access node IDs provided if len(identities) != len(accessNodeIDS) { @@ -139,7 +145,7 @@ func convertAccessAddrFromState(address string, insecureAccessAPI bool) string { // DefaultAccessNodeIDS will return all the access node IDS in the protocol state for staked access nodes func DefaultAccessNodeIDS(snapshot protocol.Snapshot) ([]flow.Identifier, error) { - identities, err := snapshot.Identities(filter.HasRole(flow.RoleAccess)) + identities, err := snapshot.Identities(filter.HasRole[flow.Identity](flow.RoleAccess)) if err != nil { return nil, fmt.Errorf("failed to get staked access node IDs from protocol state %w", err) } diff --git a/module/grpcserver/interceptor_context.go b/module/grpcserver/interceptor_context.go new file mode 100644 index 00000000000..2eb5bda99f9 --- /dev/null +++ b/module/grpcserver/interceptor_context.go @@ -0,0 +1,32 @@ +package grpcserver + +import ( + "context" + + "go.uber.org/atomic" + "google.golang.org/grpc" + + "github.com/onflow/flow-go/module/irrecoverable" +) + +// IrrecoverableCtxInjector injects the irrecoverable signaler context into requests so that the API +// handler and backend can use the irrecoverable system to handle exceptions. +// +// It injects the signaler context into the original gRPC context via a context value using the key +// irrecoverable.SignalerContextKey. If signalerCtx is not set yet (because the grpc server has not +// finished initializing), the original context is passed unchanged. +func IrrecoverableCtxInjector(signalerCtx *atomic.Pointer[irrecoverable.SignalerContext]) grpc.UnaryServerInterceptor { + return func(ctx context.Context, req any, _ *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (resp any, err error) { + // signalerCtx is set by the server initialization logic. in practice, the context should + // always be set by the time the first request is received since it is added before starting + // the server. + if sigCtx := signalerCtx.Load(); sigCtx != nil { + return handler(irrecoverable.WithSignalerContext(ctx, *sigCtx), req) + } + + // If signalerCtx is not available yet, just pass through the original context. + // This is OK since the `irrecoverable.Throw` function will still cause the node to crash + // even if it is passed a regular context. + return handler(ctx, req) + } +} diff --git a/module/grpcserver/interceptor_logging.go b/module/grpcserver/interceptor_logging.go new file mode 100644 index 00000000000..c665ce0867d --- /dev/null +++ b/module/grpcserver/interceptor_logging.go @@ -0,0 +1,54 @@ +package grpcserver + +import ( + "context" + "fmt" + + "github.com/grpc-ecosystem/go-grpc-middleware/v2/interceptors/logging" + "github.com/rs/zerolog" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" +) + +// LoggingInterceptor returns a grpc.UnaryServerInterceptor that logs incoming GRPC request and response +func LoggingInterceptor(log zerolog.Logger) grpc.UnaryServerInterceptor { + return logging.UnaryServerInterceptor( + InterceptorLogger(log), + logging.WithLevels(statusCodeToLogLevel), + ) +} + +// InterceptorLogger adapts a zerolog.Logger to interceptor's logging.Logger +// This code is simple enough to be copied and not imported. +func InterceptorLogger(l zerolog.Logger) logging.Logger { + return logging.LoggerFunc(func(_ context.Context, lvl logging.Level, msg string, fields ...any) { + l := l.With().Fields(fields).Logger() + + switch lvl { + case logging.LevelDebug: + l.Debug().Msg(msg) + case logging.LevelInfo: + l.Info().Msg(msg) + case logging.LevelWarn: + l.Warn().Msg(msg) + case logging.LevelError: + l.Error().Msg(msg) + default: + panic(fmt.Sprintf("unknown level %v", lvl)) + } + }) +} + +// statusCodeToLogLevel converts a grpc status.Code to the appropriate logging.Level +func statusCodeToLogLevel(c codes.Code) logging.Level { + switch c { + case codes.OK: + // log successful returns as Debug to avoid excessive logging in info mode + return logging.LevelDebug + case codes.DeadlineExceeded, codes.ResourceExhausted, codes.OutOfRange: + // these are common, map to info + return logging.LevelInfo + default: + return logging.DefaultServerCodeToLevel(c) + } +} diff --git a/module/grpcserver/interceptor_ratelimit.go b/module/grpcserver/interceptor_ratelimit.go new file mode 100644 index 00000000000..30b8a1e4c99 --- /dev/null +++ b/module/grpcserver/interceptor_ratelimit.go @@ -0,0 +1,84 @@ +package grpcserver + +import ( + "context" + "path/filepath" + + "github.com/rs/zerolog" + "golang.org/x/time/rate" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +const defaultRateLimit = 1000 // aggregate default rate limit for all unspecified API calls +const defaultBurst = 100 // default burst limit (calls made at the same time) for an API + +// RateLimiterInterceptor is a gRPC interceptor that applies rate limits to incoming requests. +type RateLimiterInterceptor struct { + log zerolog.Logger + + // a shared default rate limiter for APIs whose rate limit is not explicitly defined + defaultLimiter *rate.Limiter + + // a map of api and its limiter + methodLimiterMap map[string]*rate.Limiter +} + +// NewRateLimiterInterceptor creates a new rate limiter interceptor with the defined per second rate +// limits and the optional burst limit for each API. +func NewRateLimiterInterceptor(log zerolog.Logger, apiRateLimits map[string]int, apiBurstLimits map[string]int) *RateLimiterInterceptor { + defaultLimiter := rate.NewLimiter(rate.Limit(defaultRateLimit), defaultBurst) + methodLimiterMap := make(map[string]*rate.Limiter, len(apiRateLimits)) + + // read rate limit values for each API and create a limiter for each + for api, limit := range apiRateLimits { + // if a burst limit is defined for this api, use that else use the default + burst := defaultBurst + if b, ok := apiBurstLimits[api]; ok { + burst = b + } + methodLimiterMap[api] = rate.NewLimiter(rate.Limit(limit), burst) + } + + if len(methodLimiterMap) == 0 { + log.Info().Int("default_rate_limit", defaultRateLimit).Msg("no rate limits specified, using the default limit") + } + + return &RateLimiterInterceptor{ + defaultLimiter: defaultLimiter, + methodLimiterMap: methodLimiterMap, + log: log, + } +} + +// UnaryServerInterceptor returns a grpc.UnaryServerInterceptor that applies rate limits to request +// based on the limits defined when creating the RateLimiterInterceptor +func (interceptor *RateLimiterInterceptor) UnaryServerInterceptor( + ctx context.Context, + req interface{}, + info *grpc.UnaryServerInfo, + handler grpc.UnaryHandler, +) (resp interface{}, err error) { + // remove the package name (e.g. "/flow.access.AccessAPI/Ping" to "Ping") + methodName := filepath.Base(info.FullMethod) + + limiter, ok := interceptor.methodLimiterMap[methodName] + if !ok { + interceptor.log.Trace().Str("method", methodName).Msg("rate limit not defined, using default limit") + limiter = interceptor.defaultLimiter + } + + if !limiter.Allow() { + interceptor.log.Trace(). + Str("method", methodName). + Interface("request", req). + Float64("limit", float64(limiter.Limit())). + Msg("rate limit exceeded") + + return nil, status.Errorf(codes.ResourceExhausted, "%s rate limit reached, please retry later.", + info.FullMethod) + } + + return handler(ctx, req) +} diff --git a/module/grpcserver/server.go b/module/grpcserver/server.go new file mode 100644 index 00000000000..4cd2ada4db9 --- /dev/null +++ b/module/grpcserver/server.go @@ -0,0 +1,111 @@ +package grpcserver + +import ( + "net" + "sync" + + "go.uber.org/atomic" + + "github.com/rs/zerolog" + + "google.golang.org/grpc" + _ "google.golang.org/grpc/encoding/gzip" // required for gRPC compression + + _ "github.com/onflow/flow-go/engine/common/grpc/compressor/deflate" // required for gRPC compression + _ "github.com/onflow/flow-go/engine/common/grpc/compressor/snappy" // required for gRPC compression + + "github.com/onflow/flow-go/module/component" + "github.com/onflow/flow-go/module/irrecoverable" +) + +// GrpcServer wraps `grpc.Server` and allows to manage it using `component.Component` interface. It can be injected +// into different engines making it possible to use single grpc server for multiple services which live in different modules. +type GrpcServer struct { + component.Component + log zerolog.Logger + server *grpc.Server + + // grpcSignalerCtx shares the irrecoverable context passed to the GrpcServer on startup with an + // interceptor that's responsible for injecting the context into requests so that it's available + // within handler code. + grpcSignalerCtx *atomic.Pointer[irrecoverable.SignalerContext] + + grpcListenAddr string // the GRPC server address as ip:port + + addrLock sync.RWMutex + grpcAddress net.Addr +} + +var _ component.Component = (*GrpcServer)(nil) + +// NewGrpcServer returns a new grpc server. +func NewGrpcServer(log zerolog.Logger, + grpcListenAddr string, + grpcServer *grpc.Server, + grpcSignalerCtx *atomic.Pointer[irrecoverable.SignalerContext], +) *GrpcServer { + server := &GrpcServer{ + log: log, + server: grpcServer, + grpcListenAddr: grpcListenAddr, + grpcSignalerCtx: grpcSignalerCtx, + } + server.Component = component.NewComponentManagerBuilder(). + AddWorker(server.serveGRPCWorker). + AddWorker(server.shutdownWorker). + Build() + return server +} + +// RegisterService calls the provided function with the grpc server as an argument to register +// the server with an external service. +func (g *GrpcServer) RegisterService(fn func(*grpc.Server)) { + fn(g.server) +} + +// serveGRPCWorker is a worker routine which starts the gRPC server. +// The ready callback is called after the server address is bound and set. +func (g *GrpcServer) serveGRPCWorker(ctx irrecoverable.SignalerContext, ready component.ReadyFunc) { + g.log = g.log.With().Str("grpc_address", g.grpcListenAddr).Logger() + g.log.Info().Msg("starting grpc server on address") + + // add the signaler context before starting the server so that it's available to the interceptor + // for the first request. + g.grpcSignalerCtx.Store(&ctx) + + l, err := net.Listen("tcp", g.grpcListenAddr) + if err != nil { + g.log.Err(err).Msg("failed to start the grpc server") + ctx.Throw(err) + return + } + + // save the actual address on which we are listening (may be different from g.config.GRPCListenAddr if not port + // was specified) + g.addrLock.Lock() + g.grpcAddress = l.Addr() + g.addrLock.Unlock() + g.log.Debug().Msg("listening on port") + ready() + + err = g.server.Serve(l) // blocking call + if err != nil { + g.log.Err(err).Msg("fatal error in grpc server") + ctx.Throw(err) + } +} + +// GRPCAddress returns the listen address of the GRPC server. +// Guaranteed to be non-nil after Engine.Ready is closed. +func (g *GrpcServer) GRPCAddress() net.Addr { + g.addrLock.RLock() + defer g.addrLock.RUnlock() + return g.grpcAddress +} + +// shutdownWorker is a worker routine which shuts down server when the context is cancelled. +func (g *GrpcServer) shutdownWorker(ctx irrecoverable.SignalerContext, ready component.ReadyFunc) { + ready() + <-ctx.Done() + g.server.GracefulStop() +} diff --git a/module/grpcserver/server_builder.go b/module/grpcserver/server_builder.go new file mode 100644 index 00000000000..fbbfe16403a --- /dev/null +++ b/module/grpcserver/server_builder.go @@ -0,0 +1,127 @@ +package grpcserver + +import ( + grpc_prometheus "github.com/grpc-ecosystem/go-grpc-prometheus" + "github.com/rs/zerolog" + "go.uber.org/atomic" + "google.golang.org/grpc" + "google.golang.org/grpc/credentials" + + "github.com/onflow/flow-go/module/irrecoverable" +) + +type Option func(*GrpcServerBuilder) + +// WithTransportCredentials sets the transport credentials parameters for a grpc server builder. +func WithTransportCredentials(transportCredentials credentials.TransportCredentials) Option { + return func(c *GrpcServerBuilder) { + c.transportCredentials = transportCredentials + } +} + +// WithStreamInterceptor sets the StreamInterceptor option to grpc server. +func WithStreamInterceptor() Option { + return func(c *GrpcServerBuilder) { + c.stateStreamInterceptorEnable = true + } +} + +// GrpcServerBuilder created for separating the creation and starting GrpcServer, +// cause services need to be registered before the server starts. +type GrpcServerBuilder struct { + log zerolog.Logger + gRPCListenAddr string + server *grpc.Server + signalerCtx *atomic.Pointer[irrecoverable.SignalerContext] + + transportCredentials credentials.TransportCredentials // the GRPC credentials + stateStreamInterceptorEnable bool +} + +// NewGrpcServerBuilder creates a new builder for configuring and initializing a gRPC server. +// +// The builder is configured with the provided parameters such as logger, gRPC server address, maximum message size, +// API rate limits, and additional options. The builder also sets up the necessary interceptors, including handling +// irrecoverable errors using the irrecoverable.SignalerContext. The gRPC server can be configured with options such +// as maximum message sizes and interceptors for handling RPC calls. +// +// If RPC metrics are enabled, the builder adds the gRPC Prometheus interceptor for collecting metrics. Additionally, +// it can enable a state stream interceptor based on the configuration. Rate limiting interceptors can be added based +// on specified API rate limits. Logging and custom interceptors are applied, and the final gRPC server is returned. +// +// If transport credentials are provided, a secure gRPC server is created; otherwise, an unsecured server is initialized. +// +// Note: The gRPC server is created with the specified options and is ready for further configuration or starting. +func NewGrpcServerBuilder( + log zerolog.Logger, + gRPCListenAddr string, + maxRequestMsgSize uint, + maxResponseMsgSize uint, + rpcMetricsEnabled bool, + apiRateLimits map[string]int, // the api rate limit (max calls per second) for each of the Access API e.g. Ping->100, GetTransaction->300 + apiBurstLimits map[string]int, // the api burst limit (max calls at the same time) for each of the Access API e.g. Ping->50, GetTransaction->10 + opts ...Option, +) *GrpcServerBuilder { + log = log.With().Str("component", "grpc_server").Logger() + + grpcServerBuilder := &GrpcServerBuilder{ + gRPCListenAddr: gRPCListenAddr, + } + + for _, applyOption := range opts { + applyOption(grpcServerBuilder) + } + + // we use an atomic pointer to setup an interceptor for handling irrecoverable errors, the necessity of this approach + // is dictated by complex startup order of grpc server and other services. At the point where we need to register + // an interceptor we don't have an `irrecoverable.SignalerContext`, it becomes available only when we start + // the server but at that point we can't register interceptors anymore, so we inject it using this approach. + signalerCtx := atomic.NewPointer[irrecoverable.SignalerContext](nil) + + // create a GRPC server to serve GRPC clients + grpcOpts := []grpc.ServerOption{ + grpc.MaxRecvMsgSize(int(maxRequestMsgSize)), + grpc.MaxSendMsgSize(int(maxResponseMsgSize)), + } + + var unaryInterceptors []grpc.UnaryServerInterceptor + var streamInterceptors []grpc.StreamServerInterceptor + + unaryInterceptors = append(unaryInterceptors, IrrecoverableCtxInjector(signalerCtx)) + if rpcMetricsEnabled { + unaryInterceptors = append(unaryInterceptors, grpc_prometheus.UnaryServerInterceptor) + + if grpcServerBuilder.stateStreamInterceptorEnable { + streamInterceptors = append(streamInterceptors, grpc_prometheus.StreamServerInterceptor) + } + } + + if len(apiRateLimits) > 0 { + unaryInterceptors = append(unaryInterceptors, NewRateLimiterInterceptor(log, apiRateLimits, apiBurstLimits).UnaryServerInterceptor) + } + + // Note: make sure logging interceptor is innermost wrapper to capture all messages + unaryInterceptors = append(unaryInterceptors, LoggingInterceptor(log)) + + grpcOpts = append(grpcOpts, grpc.ChainUnaryInterceptor(unaryInterceptors...)) + if len(streamInterceptors) > 0 { + grpcOpts = append(grpcOpts, grpc.ChainStreamInterceptor(streamInterceptors...)) + } + + if grpcServerBuilder.transportCredentials != nil { + log = log.With().Str("endpoint", "secure").Logger() + grpcOpts = append(grpcOpts, grpc.Creds(grpcServerBuilder.transportCredentials)) + } else { + log = log.With().Str("endpoint", "unsecure").Logger() + } + + grpcServerBuilder.log = log + grpcServerBuilder.server = grpc.NewServer(grpcOpts...) + grpcServerBuilder.signalerCtx = signalerCtx + + return grpcServerBuilder +} + +func (b *GrpcServerBuilder) Build() *GrpcServer { + return NewGrpcServer(b.log, b.gRPCListenAddr, b.server, b.signalerCtx) +} diff --git a/module/hotstuff.go b/module/hotstuff.go index 8610ce0bce1..785aeed9988 100644 --- a/module/hotstuff.go +++ b/module/hotstuff.go @@ -22,7 +22,7 @@ type HotStuff interface { // // Block proposals must be submitted in order and only if they extend a // block already known to HotStuff core. - SubmitProposal(proposal *model.Proposal) + SubmitProposal(proposal *model.SignedProposal) } // HotStuffFollower is run by non-consensus nodes to observe the block chain diff --git a/module/id/filtered_provider.go b/module/id/filtered_provider.go index f3703f0d9ff..7b98c14be06 100644 --- a/module/id/filtered_provider.go +++ b/module/id/filtered_provider.go @@ -8,11 +8,11 @@ import ( // IdentityFilterIdentifierProvider implements an IdentifierProvider which provides the identifiers // resulting from applying a filter to an IdentityProvider. type IdentityFilterIdentifierProvider struct { - filter flow.IdentityFilter + filter flow.IdentityFilter[flow.Identity] identityProvider module.IdentityProvider } -func NewIdentityFilterIdentifierProvider(filter flow.IdentityFilter, identityProvider module.IdentityProvider) *IdentityFilterIdentifierProvider { +func NewIdentityFilterIdentifierProvider(filter flow.IdentityFilter[flow.Identity], identityProvider module.IdentityProvider) *IdentityFilterIdentifierProvider { return &IdentityFilterIdentifierProvider{filter, identityProvider} } diff --git a/module/id/fixed_provider.go b/module/id/fixed_provider.go index d26adc0f375..466c2d843d4 100644 --- a/module/id/fixed_provider.go +++ b/module/id/fixed_provider.go @@ -34,13 +34,13 @@ func NewFixedIdentityProvider(identities flow.IdentityList) *FixedIdentityProvid return &FixedIdentityProvider{identities} } -func (p *FixedIdentityProvider) Identities(filter flow.IdentityFilter) flow.IdentityList { +func (p *FixedIdentityProvider) Identities(filter flow.IdentityFilter[flow.Identity]) flow.IdentityList { return p.identities.Filter(filter) } func (p *FixedIdentityProvider) ByNodeID(flowID flow.Identifier) (*flow.Identity, bool) { for _, v := range p.identities { - if v.ID() == flowID { + if v.NodeID == flowID { return v, true } } diff --git a/module/id_provider.go b/module/id_provider.go index b5544f09bc9..3b84181fce2 100644 --- a/module/id_provider.go +++ b/module/id_provider.go @@ -20,7 +20,7 @@ type IdentityProvider interface { // protocol that pass the provided filter. Caution, this includes ejected nodes. // Please check the `Ejected` flag in the identities (or provide a filter for // removing ejected nodes). - Identities(flow.IdentityFilter) flow.IdentityList + Identities(flow.IdentityFilter[flow.Identity]) flow.IdentityList // ByNodeID returns the full identity for the node with the given Identifier, // where Identifier is the way the protocol refers to the node. The function diff --git a/module/irrecoverable/irrecoverable.go b/module/irrecoverable/irrecoverable.go index 1ef79f5f4ab..0877732bcd8 100644 --- a/module/irrecoverable/irrecoverable.go +++ b/module/irrecoverable/irrecoverable.go @@ -48,6 +48,9 @@ type SignalerContext interface { sealed() // private, to constrain builder to using WithSignaler } +// SignalerContextKey represents the key type for retrieving a SignalerContext from a value `context.Context`. +type SignalerContextKey struct{} + // private, to force context derivation / WithSignaler type signalerCtx struct { context.Context @@ -62,6 +65,11 @@ func WithSignaler(parent context.Context) (SignalerContext, <-chan error) { return &signalerCtx{parent, sig}, errChan } +// WithSignalerContext wraps `SignalerContext` using `context.WithValue` so it can later be used with `Throw`. +func WithSignalerContext(parent context.Context, ctx SignalerContext) context.Context { + return context.WithValue(parent, SignalerContextKey{}, ctx) +} + // Throw enables throwing an irrecoverable error using any context.Context. // // If we have an SignalerContext, we can directly ctx.Throw. @@ -72,12 +80,13 @@ func WithSignaler(parent context.Context) (SignalerContext, <-chan error) { // Throw can be a drop-in replacement anywhere we have a context.Context likely // to support Irrecoverables. Note: this is not a method func Throw(ctx context.Context, err error) { - signalerAbleContext, ok := ctx.(SignalerContext) + signalerAbleContext, ok := ctx.Value(SignalerContextKey{}).(SignalerContext) if ok { signalerAbleContext.Throw(err) + } else { + // Be spectacular on how this does not -but should- handle irrecoverables: + log.Fatalf("irrecoverable error signaler not found for context, please implement! Unhandled irrecoverable error: %v", err) } - // Be spectacular on how this does not -but should- handle irrecoverables: - log.Fatalf("irrecoverable error signaler not found for context, please implement! Unhandled irrecoverable error %v", err) } // WithSignallerAndCancel returns an irrecoverable context, the cancel diff --git a/module/irrecoverable/unittest.go b/module/irrecoverable/unittest.go index b3e252c905b..a64aef50578 100644 --- a/module/irrecoverable/unittest.go +++ b/module/irrecoverable/unittest.go @@ -3,12 +3,18 @@ package irrecoverable import ( "context" "testing" + + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" ) -// MockSignalerContext is a SignalerContext which will immediately fail a test if an error is thrown. +// MockSignalerContext is a SignalerContext that can be used in tests to assert that an error is thrown. +// It embeds a [mock.Mock], so it can be used to assert that Throw is called with a specific error. +// Use NewMockSignalerContextExpectError to create a new MockSignalerContext that expects a specific error, +// otherwise NewMockSignalerContext. type MockSignalerContext struct { context.Context - t *testing.T + *mock.Mock } var _ SignalerContext = &MockSignalerContext{} @@ -16,17 +22,32 @@ var _ SignalerContext = &MockSignalerContext{} func (m MockSignalerContext) sealed() {} func (m MockSignalerContext) Throw(err error) { - m.t.Fatalf("mock signaler context received error: %v", err) + m.Called(err) } func NewMockSignalerContext(t *testing.T, ctx context.Context) *MockSignalerContext { - return &MockSignalerContext{ + m := &MockSignalerContext{ Context: ctx, - t: t, + Mock: &mock.Mock{}, } + m.Mock.Test(t) + t.Cleanup(func() { m.AssertExpectations(t) }) + return m } +// NewMockSignalerContextWithCancel creates a new MockSignalerContext with a cancel function. func NewMockSignalerContextWithCancel(t *testing.T, parent context.Context) (*MockSignalerContext, context.CancelFunc) { ctx, cancel := context.WithCancel(parent) return NewMockSignalerContext(t, ctx), cancel } + +// NewMockSignalerContextExpectError creates a new MockSignalerContext which expects a specific error to be thrown. +func NewMockSignalerContextExpectError(t *testing.T, ctx context.Context, err error) *MockSignalerContext { + require.NotNil(t, err) + m := NewMockSignalerContext(t, ctx) + + // since we expect an error, we should expect a call to Throw + m.On("Throw", err).Once().Return() + + return m +} diff --git a/module/jobqueue.go b/module/jobqueue.go index 6f1febd548d..9150956504b 100644 --- a/module/jobqueue.go +++ b/module/jobqueue.go @@ -10,6 +10,12 @@ const ( ConsumeProgressExecutionDataRequesterBlockHeight = "ConsumeProgressExecutionDataRequesterBlockHeight" ConsumeProgressExecutionDataRequesterNotification = "ConsumeProgressExecutionDataRequesterNotification" + + ConsumeProgressExecutionDataIndexerBlockHeight = "ConsumeProgressExecutionDataIndexerBlockHeight" + + ConsumeProgressIngestionEngineBlockHeight = "ConsumeProgressIngestionEngineBlockHeight" + ConsumeProgressEngineTxErrorMessagesBlockHeight = "ConsumeProgressEngineTxErrorMessagesBlockHeight" + ConsumeProgressLastFullBlockHeight = "ConsumeProgressLastFullBlockHeight" ) // JobID is a unique ID of the job. @@ -26,9 +32,8 @@ type NewJobListener interface { type JobConsumer interface { NewJobListener - // Start starts processing jobs from a job queue. If this is the first time, a processed index - // will be initialized in the storage. If it fails to initialize, an error will be returned - Start(defaultIndex uint64) error + // Start starts processing jobs from a job queue. + Start() error // Stop gracefully stops the consumer from reading new jobs from the job queue. It does not stop // the existing worker finishing their jobs diff --git a/module/jobqueue/README.md b/module/jobqueue/README.md index 15562a89703..9c9dbf8f239 100644 --- a/module/jobqueue/README.md +++ b/module/jobqueue/README.md @@ -37,7 +37,7 @@ Job consumer provides the `Check` method for users to notify new jobs available. Once called, job consumer will iterate through each height with the `AtIndex` method. It stops when one of the following condition is true: 1. no job was found at a index -2. no more workers to work on them, which is limitted by the config item `maxProcessing` +2. no more workers to work on them, which is limited by the config item `maxProcessing` `Check` method is concurrent safe, meaning even if job consumer is notified concurrently about new jobs available, job consumer will check at most once to find new jobs. @@ -77,7 +77,7 @@ The jobqueue architecture is optimized for "pull" style processes, where the job Some use cases might require "push" style jobs where there is a job producer that create new jobs, and a consumer that processes work from the producer. This is possible with the jobqueue, but requires the producer persist the jobs to a database, then implement the `Head` and `AtIndex` methods that allow accessing jobs by sequential `uint64` indexes. ### TODOs -1. Jobs at different index are processed in parallel, it's possible that there is a job takes a long time to work on, and causing too many completed jobs cached in memory before being used to update the the last processed job index. +1. Jobs at different index are processed in parallel, it's possible that there is a job takes a long time to work on, and causing too many completed jobs cached in memory before being used to update the last processed job index. `maxSearchAhead` will allow the job consumer to stop consume more blocks if too many jobs are completed, but the job at index lastProcesssed + 1 has not been unprocessed yet. The difference between `maxSearchAhead` and `maxProcessing` is that: `maxProcessing` allows at most `maxProcessing` number of works to process jobs. However, even if there is worker available, it might not be assigned to a job, because the job at index lastProcesssed +1 has not been done, it won't work on an job with index higher than `lastProcesssed + maxSearchAhead`. 2. accept callback to get notified when the consecutive job index is finished. diff --git a/module/jobqueue/component_consumer.go b/module/jobqueue/component_consumer.go index b66e7802b8d..457aed3f804 100644 --- a/module/jobqueue/component_consumer.go +++ b/module/jobqueue/component_consumer.go @@ -8,7 +8,6 @@ import ( "github.com/onflow/flow-go/module" "github.com/onflow/flow-go/module/component" "github.com/onflow/flow-go/module/irrecoverable" - "github.com/onflow/flow-go/module/util" "github.com/onflow/flow-go/storage" ) @@ -28,13 +27,13 @@ type ComponentConsumer struct { func NewComponentConsumer( log zerolog.Logger, workSignal <-chan struct{}, - progress storage.ConsumerProgress, + progressInitializer storage.ConsumerProgressInitializer, jobs module.Jobs, defaultIndex uint64, processor JobProcessor, // method used to process jobs maxProcessing uint64, maxSearchAhead uint64, -) *ComponentConsumer { +) (*ComponentConsumer, error) { c := &ComponentConsumer{ workSignal: workSignal, @@ -48,20 +47,17 @@ func NewComponentConsumer( func(id module.JobID) { c.NotifyJobIsDone(id) }, maxProcessing, ) - c.consumer = NewConsumer(c.log, c.jobs, progress, worker, maxProcessing, maxSearchAhead) + + consumer, err := NewConsumer(log, jobs, progressInitializer, worker, maxProcessing, maxSearchAhead, defaultIndex) + if err != nil { + return nil, err + } + c.consumer = consumer builder := component.NewComponentManagerBuilder(). AddWorker(func(ctx irrecoverable.SignalerContext, ready component.ReadyFunc) { - worker.Start(ctx) - if err := util.WaitClosed(ctx, worker.Ready()); err != nil { - c.log.Info().Msg("job consumer startup aborted") - <-worker.Done() - c.log.Info().Msg("job consumer shutdown complete") - return - } - c.log.Info().Msg("job consumer starting") - err := c.consumer.Start(defaultIndex) + err := c.consumer.Start() if err != nil { ctx.Throw(fmt.Errorf("could not start consumer: %w", err)) } @@ -73,20 +69,38 @@ func NewComponentConsumer( // blocks until all running jobs have stopped c.consumer.Stop() + }). + AddWorker(func(ctx irrecoverable.SignalerContext, ready component.ReadyFunc) { + worker.Start(ctx) + + select { + case <-ctx.Done(): + c.log.Info().Msg("job consumer startup aborted") + case <-worker.Ready(): + ready() + } <-worker.Done() c.log.Info().Msg("job consumer shutdown complete") }). AddWorker(func(ctx irrecoverable.SignalerContext, ready component.ReadyFunc) { + // marking this ready first allows this worker to depend on the component's own Ready() + // channel to detect when other workers have started ready() - c.processingLoop(ctx) + + select { + case <-ctx.Done(): + return + case <-c.Ready(): + c.processingLoop(ctx) + } }) cm := builder.Build() c.cm = cm c.Component = cm - return c + return c, nil } // SetPreNotifier sets a notification function that is invoked before marking a job as done in the diff --git a/module/jobqueue/component_consumer_test.go b/module/jobqueue/component_consumer_test.go index 2802c08bf7c..8f4ec576580 100644 --- a/module/jobqueue/component_consumer_test.go +++ b/module/jobqueue/component_consumer_test.go @@ -3,15 +3,15 @@ package jobqueue import ( "context" "fmt" - "os" "sync" "testing" "time" - "github.com/rs/zerolog" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" + "go.uber.org/atomic" "github.com/onflow/flow-go/module" "github.com/onflow/flow-go/module/irrecoverable" @@ -27,8 +27,6 @@ type ComponentConsumerSuite struct { defaultIndex uint64 maxProcessing uint64 maxSearchAhead uint64 - - progress *storagemock.ConsumerProgress } func TestComponentConsumerSuite(t *testing.T) { @@ -40,8 +38,6 @@ func (suite *ComponentConsumerSuite) SetupTest() { suite.defaultIndex = uint64(0) suite.maxProcessing = uint64(2) suite.maxSearchAhead = uint64(5) - - suite.progress = new(storagemock.ConsumerProgress) } func mockJobs(data map[uint64]TestJob) *modulemock.Jobs { @@ -67,14 +63,6 @@ func mockJobs(data map[uint64]TestJob) *modulemock.Jobs { return jobs } -func mockProgress() *storagemock.ConsumerProgress { - progress := new(storagemock.ConsumerProgress) - progress.On("ProcessedIndex").Return(uint64(0), nil) - progress.On("SetProcessedIndex", mock.AnythingOfType("uint64")).Return(nil) - - return progress -} - func generateTestData(jobCount uint64) map[uint64]TestJob { jobData := make(map[uint64]TestJob, jobCount) @@ -93,19 +81,26 @@ func (suite *ComponentConsumerSuite) prepareTest( ) (*ComponentConsumer, chan struct{}) { jobs := mockJobs(jobData) - workSignal := make(chan struct{}) - progress := mockProgress() + workSignal := make(chan struct{}, 1) - consumer := NewComponentConsumer( - zerolog.New(os.Stdout).With().Timestamp().Logger(), + progress := new(storagemock.ConsumerProgress) + progress.On("ProcessedIndex").Return(suite.defaultIndex, nil) + progress.On("SetProcessedIndex", mock.AnythingOfType("uint64")).Return(nil) + + progressInitializer := new(storagemock.ConsumerProgressInitializer) + progressInitializer.On("Initialize", mock.AnythingOfType("uint64")).Return(progress, nil) + + consumer, err := NewComponentConsumer( + unittest.Logger(), workSignal, - progress, + progressInitializer, jobs, suite.defaultIndex, processor, suite.maxProcessing, suite.maxSearchAhead, ) + require.NoError(suite.T(), err) consumer.SetPreNotifier(preNotifier) consumer.SetPostNotifier(postNotifier) @@ -144,7 +139,7 @@ func (suite *ComponentConsumerSuite) TestHappyPath() { wg.Add(int(testJobsCount)) consumer, workSignal := suite.prepareTest(processor, nil, notifier, jobData) - suite.runTest(testCtx, consumer, workSignal, func() { + suite.runTest(testCtx, consumer, func() { workSignal <- struct{}{} wg.Wait() }) @@ -162,7 +157,7 @@ func (suite *ComponentConsumerSuite) TestHappyPath() { wg.Add(int(testJobsCount)) consumer, workSignal := suite.prepareTest(processor, notifier, nil, jobData) - suite.runTest(testCtx, consumer, workSignal, func() { + suite.runTest(testCtx, consumer, func() { workSignal <- struct{}{} wg.Wait() }) @@ -216,7 +211,7 @@ func (suite *ComponentConsumerSuite) TestProgressesOnComplete() { suite.maxProcessing = 1 consumer, workSignal := suite.prepareTest(processor, nil, notifier, jobData) - suite.runTest(testCtx, consumer, workSignal, func() { + suite.runTest(testCtx, consumer, func() { workSignal <- struct{}{} unittest.RequireNeverClosedWithin(suite.T(), done, 100*time.Millisecond, fmt.Sprintf("job %d wasn't supposed to finish", stopIndex+1)) }) @@ -230,6 +225,48 @@ func (suite *ComponentConsumerSuite) TestProgressesOnComplete() { } } +func (suite *ComponentConsumerSuite) TestSignalsBeforeReadyDoNotCheck() { + testCtx, testCancel := context.WithCancel(context.Background()) + defer testCancel() + + suite.defaultIndex = uint64(100) + started := atomic.NewBool(false) + + jobConsumer := modulemock.NewJobConsumer(suite.T()) + jobConsumer.On("Start").Return(func() error { + // force Start to take a while so the processingLoop is ready first + // the processingLoop should wait to start, otherwise Check would be called + time.Sleep(500 * time.Millisecond) + started.Store(true) + return nil + }) + jobConsumer.On("Stop") + + wg := sync.WaitGroup{} + wg.Add(1) + + jobConsumer.On("Check").Run(func(_ mock.Arguments) { + assert.True(suite.T(), started.Load(), "check was called before started") + wg.Done() + }) + + consumer, workSignal := suite.prepareTest(nil, nil, nil, nil) + consumer.consumer = jobConsumer + + // send a signal before the component starts to ensure Check would be called if the + // processingLoop was started + workSignal <- struct{}{} + + ctx, cancel := context.WithCancel(testCtx) + signalCtx := irrecoverable.NewMockSignalerContext(suite.T(), ctx) + consumer.Start(signalCtx) + + unittest.RequireCloseBefore(suite.T(), consumer.Ready(), 1*time.Second, "timeout waiting for consumer to be ready") + unittest.RequireReturnsBefore(suite.T(), wg.Wait, 100*time.Millisecond, "check was not called") + cancel() + unittest.RequireCloseBefore(suite.T(), consumer.Done(), 1*time.Second, "timeout waiting for consumer to be done") +} + // TestPassesIrrecoverableErrors: // - throws an irrecoverable error // - verifies no jobs were processed @@ -282,7 +319,6 @@ func (suite *ComponentConsumerSuite) TestPassesIrrecoverableErrors() { func (suite *ComponentConsumerSuite) runTest( testCtx context.Context, consumer *ComponentConsumer, - workSignal chan<- struct{}, sendJobs func(), ) { ctx, cancel := context.WithCancel(testCtx) diff --git a/module/jobqueue/consumer.go b/module/jobqueue/consumer.go index 9e5e6b113b8..035f625dfaf 100644 --- a/module/jobqueue/consumer.go +++ b/module/jobqueue/consumer.go @@ -43,16 +43,30 @@ type Consumer struct { processings map[uint64]*jobStatus // keep track of the status of each on going job processingsIndex map[module.JobID]uint64 // lookup the index of the job, useful when fast forwarding the // `processed` variable + + started *atomic.Bool // only allow the consumer to be started once, and forbid calls to Check before Start } func NewConsumer( log zerolog.Logger, jobs module.Jobs, - progress storage.ConsumerProgress, + progressInitializer storage.ConsumerProgressInitializer, worker Worker, maxProcessing uint64, maxSearchAhead uint64, -) *Consumer { + defaultIndex uint64, +) (*Consumer, error) { + + progress, err := progressInitializer.Initialize(defaultIndex) + if err != nil { + return nil, fmt.Errorf("could not initialize processed index: %w", err) + } + + processedIndex, err := progress.ProcessedIndex() + if err != nil { + return nil, fmt.Errorf("could not read processed index: %w", err) + } + return &Consumer{ log: log.With().Str("sub_module", "job_queue").Logger(), @@ -68,52 +82,29 @@ func NewConsumer( // init state variables running: false, isChecking: atomic.NewBool(false), - processedIndex: 0, + started: atomic.NewBool(false), + processedIndex: processedIndex, processings: make(map[uint64]*jobStatus), processingsIndex: make(map[module.JobID]uint64), - } + }, nil } // Start starts consuming the jobs from the job queue. -func (c *Consumer) Start(defaultIndex uint64) error { +func (c *Consumer) Start() error { c.mu.Lock() defer c.mu.Unlock() - if c.running { - return nil + if !c.started.CompareAndSwap(false, true) { + return fmt.Errorf("consumer has already been started") } - c.running = true - // on startup, sync with storage for the processed index - // to ensure the consistency - processedIndex, err := c.progress.ProcessedIndex() - if errors.Is(err, storage.ErrNotFound) { - err := c.progress.InitProcessedIndex(defaultIndex) - if errors.Is(err, storage.ErrAlreadyExists) { - return fmt.Errorf("processed index has already been inited, no effect for the second time. default index: %v", - defaultIndex) - } - - if err != nil { - return fmt.Errorf("could not init processed index: %w", err) - } - - processedIndex = defaultIndex - - c.log.Warn().Uint64("processed index", processedIndex). - Msg("processed index not found, initialized.") - } else if err != nil { - return fmt.Errorf("could not read processed index: %w", err) - } - - c.processedIndex = processedIndex + c.log.Info(). + Uint64("processed", c.processedIndex). + Msg("consumer started") c.checkProcessable() - c.log.Info(). - Uint64("processed", processedIndex). - Msg("consumer started") return nil } @@ -166,6 +157,12 @@ func (c *Consumer) NotifyJobIsDone(jobID module.JobID) uint64 { // since multiple checks at the same time are unnecessary, we could only keep one check by checking. // an atomic isChecking value. func (c *Consumer) Check() { + if !c.started.Load() { + // Check is not allowed before the consumer is started + c.log.Warn().Msg("ignoring Check before Start") + return + } + if !c.isChecking.CompareAndSwap(false, true) { // other process is checking, we could exit and rely on that process to check // processable jobs @@ -192,11 +189,10 @@ func (c *Consumer) checkProcessable() { } if processingCount > 0 { - c.log.Info().Int64("processing", processingCount).Msg("processing jobs") + c.log.Debug().Int64("processing", processingCount).Msg("processing jobs") } else { c.log.Debug().Bool("running", c.running).Msg("no job found") } - } // run checks if there are processable jobs and process them by giving diff --git a/module/jobqueue/consumer_behavior_test.go b/module/jobqueue/consumer_behavior_test.go index 1fac55faa96..b26f1249720 100644 --- a/module/jobqueue/consumer_behavior_test.go +++ b/module/jobqueue/consumer_behavior_test.go @@ -7,14 +7,15 @@ import ( "testing" "time" - badgerdb "github.com/dgraph-io/badger/v2" + "github.com/cockroachdb/pebble/v2" "github.com/rs/zerolog" "github.com/stretchr/testify/require" "github.com/onflow/flow-go/module" "github.com/onflow/flow-go/module/jobqueue" "github.com/onflow/flow-go/storage" - "github.com/onflow/flow-go/storage/badger" + "github.com/onflow/flow-go/storage/operation/pebbleimpl" + "github.com/onflow/flow-go/storage/store" "github.com/onflow/flow-go/utils/unittest" ) @@ -89,15 +90,15 @@ func TestConsumer(t *testing.T) { } func testOnStartup(t *testing.T) { - runWith(t, func(c module.JobConsumer, cp storage.ConsumerProgress, w *mockWorker, j *jobqueue.MockJobs, db *badgerdb.DB) { - require.NoError(t, c.Start(DefaultIndex)) + runWith(t, DefaultIndex, func(c module.JobConsumer, cp storage.ConsumerProgress, w *mockWorker, j *jobqueue.MockJobs, db *pebble.DB) { + require.NoError(t, c.Start()) assertProcessed(t, cp, 0) }) } func TestProcessedOrder(t *testing.T) { - runWith(t, func(c module.JobConsumer, cp storage.ConsumerProgress, w *mockWorker, j *jobqueue.MockJobs, db *badgerdb.DB) { - require.NoError(t, c.Start(5)) + runWith(t, 5, func(c module.JobConsumer, cp storage.ConsumerProgress, w *mockWorker, j *jobqueue.MockJobs, db *pebble.DB) { + require.NoError(t, c.Start()) assertProcessed(t, cp, 5) }) } @@ -105,8 +106,8 @@ func TestProcessedOrder(t *testing.T) { // [+1] => [0#, 1!] // when received job 1, it will be processed func testOnReceiveOneJob(t *testing.T) { - runWith(t, func(c module.JobConsumer, cp storage.ConsumerProgress, w *mockWorker, j *jobqueue.MockJobs, db *badgerdb.DB) { - require.NoError(t, c.Start(DefaultIndex)) + runWith(t, DefaultIndex, func(c module.JobConsumer, cp storage.ConsumerProgress, w *mockWorker, j *jobqueue.MockJobs, db *pebble.DB) { + require.NoError(t, c.Start()) require.NoError(t, j.PushOne()) // +1 c.Check() @@ -121,8 +122,8 @@ func testOnReceiveOneJob(t *testing.T) { // [+1, 1*] => [0#, 1#] // when job 1 is finished, it will be marked as processed func testOnJobFinished(t *testing.T) { - runWith(t, func(c module.JobConsumer, cp storage.ConsumerProgress, w *mockWorker, j *jobqueue.MockJobs, db *badgerdb.DB) { - require.NoError(t, c.Start(DefaultIndex)) + runWith(t, DefaultIndex, func(c module.JobConsumer, cp storage.ConsumerProgress, w *mockWorker, j *jobqueue.MockJobs, db *pebble.DB) { + require.NoError(t, c.Start()) require.NoError(t, j.PushOne()) // +1 c.Check() @@ -138,8 +139,8 @@ func testOnJobFinished(t *testing.T) { // [+1, +2, 1*, 2*] => [0#, 1#, 2#] // when job 2 and 1 are finished, they will be marked as processed func testOnJobsFinished(t *testing.T) { - runWith(t, func(c module.JobConsumer, cp storage.ConsumerProgress, w *mockWorker, j *jobqueue.MockJobs, db *badgerdb.DB) { - require.NoError(t, c.Start(DefaultIndex)) + runWith(t, DefaultIndex, func(c module.JobConsumer, cp storage.ConsumerProgress, w *mockWorker, j *jobqueue.MockJobs, db *pebble.DB) { + require.NoError(t, c.Start()) require.NoError(t, j.PushOne()) // +1 c.Check() @@ -159,8 +160,8 @@ func testOnJobsFinished(t *testing.T) { // [+1, +2, +3, +4] => [0#, 1!, 2!, 3!, 4] // when more jobs are arrived than the max number of workers, only the first 3 jobs will be processed func testMaxWorker(t *testing.T) { - runWith(t, func(c module.JobConsumer, cp storage.ConsumerProgress, w *mockWorker, j *jobqueue.MockJobs, db *badgerdb.DB) { - require.NoError(t, c.Start(DefaultIndex)) + runWith(t, DefaultIndex, func(c module.JobConsumer, cp storage.ConsumerProgress, w *mockWorker, j *jobqueue.MockJobs, db *pebble.DB) { + require.NoError(t, c.Start()) require.NoError(t, j.PushOne()) // +1 c.Check() @@ -183,8 +184,8 @@ func testMaxWorker(t *testing.T) { // [+1, +2, +3, +4, +5, +6] => [0#, !1, *2, *3, *4, *5, 6, +7] => [0#, *1, *2, *3, *4, *5, !6, !7] // when processing lags behind, the consumer is paused until processing catches up func testPauseResume(t *testing.T) { - runWithSeatchAhead(t, 5, func(c module.JobConsumer, cp storage.ConsumerProgress, w *mockWorker, j *jobqueue.MockJobs, db *badgerdb.DB) { - require.NoError(t, c.Start(DefaultIndex)) + runWithSeatchAhead(t, 5, DefaultIndex, func(c module.JobConsumer, cp storage.ConsumerProgress, w *mockWorker, j *jobqueue.MockJobs, db *pebble.DB) { + require.NoError(t, c.Start()) require.NoError(t, j.PushOne()) // +1 c.Check() @@ -229,8 +230,8 @@ func testPauseResume(t *testing.T) { // [+1, +2, +3, +4, 3*] => [0#, 1!, 2!, 3*, 4!] // when job 3 is finished, which is not the next processing job 1, the processed index won't change func testNonNextFinished(t *testing.T) { - runWith(t, func(c module.JobConsumer, cp storage.ConsumerProgress, w *mockWorker, j *jobqueue.MockJobs, db *badgerdb.DB) { - require.NoError(t, c.Start(DefaultIndex)) + runWith(t, DefaultIndex, func(c module.JobConsumer, cp storage.ConsumerProgress, w *mockWorker, j *jobqueue.MockJobs, db *pebble.DB) { + require.NoError(t, c.Start()) require.NoError(t, j.PushOne()) // +1 c.Check() @@ -259,8 +260,8 @@ func testNonNextFinished(t *testing.T) { // // [+1, +2, +3, +3, +4] => [1, 2, 3*, 4] => [1, 2, 3*, 4*] => => [1#, 2, 3*, 4*] => [1#, 2#, 3#, 4#] func testMovingProcessedIndex(t *testing.T) { - runWith(t, func(c module.JobConsumer, cp storage.ConsumerProgress, w *mockWorker, j *jobqueue.MockJobs, db *badgerdb.DB) { - require.NoError(t, c.Start(DefaultIndex)) + runWith(t, DefaultIndex, func(c module.JobConsumer, cp storage.ConsumerProgress, w *mockWorker, j *jobqueue.MockJobs, db *pebble.DB) { + require.NoError(t, c.Start()) require.NoError(t, j.PushOne()) // +1 c.Check() @@ -312,8 +313,8 @@ func testMovingProcessedIndex(t *testing.T) { // [+1, +2, +3, +4, 3*, 2*] => [0#, 1!, 2*, 3*, 4!] // when job 3 and 2 are finished, the processed index won't change, because 1 is still not finished func testTwoNonNextFinished(t *testing.T) { - runWith(t, func(c module.JobConsumer, cp storage.ConsumerProgress, w *mockWorker, j *jobqueue.MockJobs, db *badgerdb.DB) { - require.NoError(t, c.Start(DefaultIndex)) + runWith(t, DefaultIndex, func(c module.JobConsumer, cp storage.ConsumerProgress, w *mockWorker, j *jobqueue.MockJobs, db *pebble.DB) { + require.NoError(t, c.Start()) require.NoError(t, j.PushOne()) // +1 c.Check() @@ -339,8 +340,8 @@ func testTwoNonNextFinished(t *testing.T) { // [+1, +2, +3, +4, 3*, 2*, +5] => [0#, 1!, 2*, 3*, 4!, 5!] // when job 5 is received, it will be processed, because the worker has capacity func testProcessingWithNonNextFinished(t *testing.T) { - runWith(t, func(c module.JobConsumer, cp storage.ConsumerProgress, w *mockWorker, j *jobqueue.MockJobs, db *badgerdb.DB) { - require.NoError(t, c.Start(DefaultIndex)) + runWith(t, DefaultIndex, func(c module.JobConsumer, cp storage.ConsumerProgress, w *mockWorker, j *jobqueue.MockJobs, db *pebble.DB) { + require.NoError(t, c.Start()) require.NoError(t, j.PushOne()) // +1 c.Check() @@ -369,8 +370,8 @@ func testProcessingWithNonNextFinished(t *testing.T) { // [+1, +2, +3, +4, 3*, 2*, +5, +6] => [0#, 1!, 2*, 3*, 4!, 5!, 6] // when job 6 is received, no more worker can process it, it will be buffered func testMaxWorkerWithFinishedNonNexts(t *testing.T) { - runWith(t, func(c module.JobConsumer, cp storage.ConsumerProgress, w *mockWorker, j *jobqueue.MockJobs, db *badgerdb.DB) { - require.NoError(t, c.Start(DefaultIndex)) + runWith(t, DefaultIndex, func(c module.JobConsumer, cp storage.ConsumerProgress, w *mockWorker, j *jobqueue.MockJobs, db *pebble.DB) { + require.NoError(t, c.Start()) require.NoError(t, j.PushOne()) // +1 c.Check() @@ -402,8 +403,8 @@ func testMaxWorkerWithFinishedNonNexts(t *testing.T) { // [+1, +2, +3, +4, 3*, 2*, +5, 1*] => [0#, 1#, 2#, 3#, 4!, 5!] // when job 1 is finally finished, it will fast forward the processed index to 3 func testFastforward(t *testing.T) { - runWith(t, func(c module.JobConsumer, cp storage.ConsumerProgress, w *mockWorker, j *jobqueue.MockJobs, db *badgerdb.DB) { - require.NoError(t, c.Start(DefaultIndex)) + runWith(t, DefaultIndex, func(c module.JobConsumer, cp storage.ConsumerProgress, w *mockWorker, j *jobqueue.MockJobs, db *pebble.DB) { + require.NoError(t, c.Start()) require.NoError(t, j.PushOne()) // +1 c.Check() @@ -434,8 +435,8 @@ func testFastforward(t *testing.T) { // [+1, +2, +3, +4, 3*, 2*, +5, 1*, +6, +7, 6*], restart => [0#, 1#, 2#, 3#, 4!, 5!, 6*, 7!] // when job queue crashed and restarted, the queue can be resumed func testWorkOnNextAfterFastforward(t *testing.T) { - runWith(t, func(c module.JobConsumer, cp storage.ConsumerProgress, w *mockWorker, j *jobqueue.MockJobs, db *badgerdb.DB) { - require.NoError(t, c.Start(DefaultIndex)) + runWith(t, DefaultIndex, func(c module.JobConsumer, cp storage.ConsumerProgress, w *mockWorker, j *jobqueue.MockJobs, db *pebble.DB) { + require.NoError(t, c.Start()) require.NoError(t, j.PushOne()) // +1 c.Check() @@ -469,24 +470,27 @@ func testWorkOnNextAfterFastforward(t *testing.T) { // rebuild a consumer with the dependencies to simulate a restart // jobs need to be reused, since it stores all the jobs reWorker := newMockWorker() - reProgress := badger.NewConsumerProgress(db, ConsumerTag) - reConsumer := newTestConsumer(reProgress, j, reWorker, 0) + reProgress := store.NewConsumerProgress(pebbleimpl.ToDB(db), ConsumerTag) + reConsumer := newTestConsumer(t, reProgress, j, reWorker, 0, DefaultIndex) - err := reConsumer.Start(DefaultIndex) + progress, err := reProgress.Initialize(DefaultIndex) + require.NoError(t, err) + + err = reConsumer.Start() require.NoError(t, err) time.Sleep(1 * time.Millisecond) reWorker.AssertCalled(t, []int64{4, 5, 6}) - assertProcessed(t, reProgress, 3) + assertProcessed(t, progress, 3) }) } // [+1, +2, +3, +4, Stop, 2*] => [0#, 1!, 2*, 3!, 4] // when Stop is called, it won't work on any job any more func testStopRunning(t *testing.T) { - runWith(t, func(c module.JobConsumer, cp storage.ConsumerProgress, w *mockWorker, j *jobqueue.MockJobs, db *badgerdb.DB) { - require.NoError(t, c.Start(DefaultIndex)) + runWith(t, DefaultIndex, func(c module.JobConsumer, cp storage.ConsumerProgress, w *mockWorker, j *jobqueue.MockJobs, db *pebble.DB) { + require.NoError(t, c.Start()) for i := 0; i < 4; i++ { require.NoError(t, j.PushOne()) c.Check() @@ -507,8 +511,8 @@ func testStopRunning(t *testing.T) { } func testConcurrency(t *testing.T) { - runWith(t, func(c module.JobConsumer, cp storage.ConsumerProgress, w *mockWorker, j *jobqueue.MockJobs, db *badgerdb.DB) { - require.NoError(t, c.Start(DefaultIndex)) + runWith(t, DefaultIndex, func(c module.JobConsumer, cp storage.ConsumerProgress, w *mockWorker, j *jobqueue.MockJobs, db *pebble.DB) { + require.NoError(t, c.Start()) var finishAll sync.WaitGroup finishAll.Add(100) // Finish job concurrently @@ -549,17 +553,22 @@ func testConcurrency(t *testing.T) { type JobID = module.JobID type Job = module.Job -func runWith(t testing.TB, runTestWith func(module.JobConsumer, storage.ConsumerProgress, *mockWorker, *jobqueue.MockJobs, *badgerdb.DB)) { - runWithSeatchAhead(t, 0, runTestWith) +func runWith(t testing.TB, + defaultIndex uint64, + runTestWith func(module.JobConsumer, storage.ConsumerProgress, *mockWorker, *jobqueue.MockJobs, *pebble.DB)) { + runWithSeatchAhead(t, 0, defaultIndex, runTestWith) } -func runWithSeatchAhead(t testing.TB, maxSearchAhead uint64, runTestWith func(module.JobConsumer, storage.ConsumerProgress, *mockWorker, *jobqueue.MockJobs, *badgerdb.DB)) { - unittest.RunWithBadgerDB(t, func(db *badgerdb.DB) { +func runWithSeatchAhead(t testing.TB, maxSearchAhead uint64, defaultIndex uint64, + runTestWith func(module.JobConsumer, storage.ConsumerProgress, *mockWorker, *jobqueue.MockJobs, *pebble.DB)) { + unittest.RunWithPebbleDB(t, func(pdb *pebble.DB) { jobs := jobqueue.NewMockJobs() worker := newMockWorker() - progress := badger.NewConsumerProgress(db, ConsumerTag) - consumer := newTestConsumer(progress, jobs, worker, maxSearchAhead) - runTestWith(consumer, progress, worker, jobs, db) + progressInitializer := store.NewConsumerProgress(pebbleimpl.ToDB(pdb), ConsumerTag) + consumer := newTestConsumer(t, progressInitializer, jobs, worker, maxSearchAhead, defaultIndex) + progress, err := progressInitializer.Initialize(defaultIndex) + require.NoError(t, err) + runTestWith(consumer, progress, worker, jobs, pdb) }) } @@ -569,10 +578,12 @@ func assertProcessed(t testing.TB, cp storage.ConsumerProgress, expectProcessed require.Equal(t, expectProcessed, processed) } -func newTestConsumer(cp storage.ConsumerProgress, jobs module.Jobs, worker jobqueue.Worker, maxSearchAhead uint64) module.JobConsumer { +func newTestConsumer(t testing.TB, cp storage.ConsumerProgressInitializer, jobs module.Jobs, worker jobqueue.Worker, maxSearchAhead uint64, defaultIndex uint64) module.JobConsumer { log := unittest.Logger().With().Str("module", "consumer").Logger() maxProcessing := uint64(3) - return jobqueue.NewConsumer(log, jobs, cp, worker, maxProcessing, maxSearchAhead) + c, err := jobqueue.NewConsumer(log, jobs, cp, worker, maxProcessing, maxSearchAhead, defaultIndex) + require.NoError(t, err) + return c } // a Mock worker that stores all the jobs that it was asked to work on @@ -629,7 +640,7 @@ func (w *mockWorker) AssertCalled(t *testing.T, expectCalled []int64) { // 0.22 ms to finish job func BenchmarkPushAndConsume(b *testing.B) { b.StopTimer() - runWith(b, func(c module.JobConsumer, cp storage.ConsumerProgress, w *mockWorker, j *jobqueue.MockJobs, db *badgerdb.DB) { + runWith(b, DefaultIndex, func(c module.JobConsumer, cp storage.ConsumerProgress, w *mockWorker, j *jobqueue.MockJobs, db *pebble.DB) { var wg sync.WaitGroup wg.Add(b.N) @@ -641,7 +652,7 @@ func BenchmarkPushAndConsume(b *testing.B) { }() } - require.NoError(b, c.Start(DefaultIndex)) + require.NoError(b, c.Start()) b.StartTimer() for i := 0; i < b.N; i++ { diff --git a/module/jobqueue/consumer_test.go b/module/jobqueue/consumer_test.go index 1b3e6aab927..4c672d73876 100644 --- a/module/jobqueue/consumer_test.go +++ b/module/jobqueue/consumer_test.go @@ -7,13 +7,14 @@ import ( "testing" "time" - badgerdb "github.com/dgraph-io/badger/v2" "github.com/rs/zerolog" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/onflow/flow-go/module" "github.com/onflow/flow-go/storage" - "github.com/onflow/flow-go/storage/badger" + "github.com/onflow/flow-go/storage/operation/dbtest" + "github.com/onflow/flow-go/storage/store" "github.com/onflow/flow-go/utils/unittest" ) @@ -156,23 +157,18 @@ func TestProcessableJobs(t *testing.T) { // Test after jobs have been processed, the job status are removed to prevent from memory-leak func TestProcessedIndexDeletion(t *testing.T) { - setup := func(t *testing.T, f func(c *Consumer, jobs *MockJobs)) { - unittest.RunWithBadgerDB(t, func(db *badgerdb.DB) { - log := unittest.Logger().With().Str("module", "consumer").Logger() - jobs := NewMockJobs() - progress := badger.NewConsumerProgress(db, "consumer") - worker := newMockWorker() - maxProcessing := uint64(3) - c := NewConsumer(log, jobs, progress, worker, maxProcessing, 0) - worker.WithConsumer(c) - - f(c, jobs) - }) - } + dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { + log := unittest.Logger().With().Str("module", "consumer").Logger() + jobs := NewMockJobs() + progressInitializer := store.NewConsumerProgress(db, "consumer") + worker := newMockWorker() + maxProcessing := uint64(3) + c, err := NewConsumer(log, jobs, progressInitializer, worker, maxProcessing, 0, 0) + require.NoError(t, err) + worker.WithConsumer(c) - setup(t, func(c *Consumer, jobs *MockJobs) { require.NoError(t, jobs.PushN(10)) - require.NoError(t, c.Start(0)) + require.NoError(t, c.Start()) require.Eventually(t, func() bool { c.mu.Lock() @@ -188,6 +184,43 @@ func TestProcessedIndexDeletion(t *testing.T) { }) } +func TestCheckBeforeStartIsNoop(t *testing.T) { + t.Parallel() + + dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { + storedProcessedIndex := uint64(100) + + worker := newMockWorker() + progressInitializer := store.NewConsumerProgress(db, "consumer") + progress, err := progressInitializer.Initialize(10) + require.NoError(t, err) + // set the processedIndex to a value + require.NoError(t, progress.SetProcessedIndex(storedProcessedIndex)) + + c, err := NewConsumer( + unittest.Logger(), + NewMockJobs(), + progressInitializer, + worker, + uint64(3), + 0, + 10, // default index is before the stored processedIndex + ) + require.NoError(t, err) + worker.WithConsumer(c) + + // check will store the processedIndex. Before start, it will be uninitialized (0) + c.Check() + + // start will load the processedIndex from storage + err = c.Start() + require.NoError(t, err) + + // make sure that the processedIndex at the end is from storage + assert.Equal(t, storedProcessedIndex, c.LastProcessedIndex()) + }) +} + func assertJobs(t *testing.T, expectedIndex []uint64, jobsToRun []*jobAtIndex) { actualIndex := make([]uint64, 0, len(jobsToRun)) for _, jobAtIndex := range jobsToRun { diff --git a/module/jobqueue/finalized_block_reader_test.go b/module/jobqueue/finalized_block_reader_test.go index 41c5f403b97..32dc6f759f5 100644 --- a/module/jobqueue/finalized_block_reader_test.go +++ b/module/jobqueue/finalized_block_reader_test.go @@ -3,7 +3,7 @@ package jobqueue_test import ( "testing" - "github.com/dgraph-io/badger/v2" + "github.com/cockroachdb/pebble/v2" "github.com/stretchr/testify/require" "github.com/onflow/flow-go/engine/testutil" @@ -23,11 +23,11 @@ func TestBlockReader(t *testing.T) { // head of block reader should be the same height as the last block on the chain. head, err := reader.Head() require.NoError(t, err) - require.Equal(t, head, blocks[len(blocks)-1].Header.Height) + require.Equal(t, head, blocks[len(blocks)-1].Height) // retrieved blocks from block reader should be the same as the original blocks stored in it. for _, actual := range blocks { - index := actual.Header.Height + index := actual.Height job, err := reader.AtIndex(index) require.NoError(t, err) @@ -47,7 +47,7 @@ func withReader( withBlockReader func(*jobqueue.FinalizedBlockReader, []*flow.Block), ) { require.Equal(t, blockCount%2, 0, "block count for this test should be even") - unittest.RunWithBadgerDB(t, func(db *badger.DB) { + unittest.RunWithPebbleDB(t, func(pdb *pebble.DB) { collector := &metrics.NoopCollector{} tracer := trace.NewNoopTracer() @@ -62,10 +62,15 @@ func withReader( // blocks (i.e., containing guarantees), and Cs are container blocks for their preceding reference block, // Container blocks only contain receipts of their preceding reference blocks. But they do not // hold any guarantees. - root, err := s.State.Params().Root() + root, err := s.State.Final().Head() require.NoError(t, err) - clusterCommittee := participants.Filter(filter.HasRole(flow.RoleCollection)) - results := vertestutils.CompleteExecutionReceiptChainFixture(t, root, blockCount/2, vertestutils.WithClusterCommittee(clusterCommittee)) + protocolState, err := s.State.Final().ProtocolState() + require.NoError(t, err) + protocolStateID := protocolState.ID() + + clusterCommittee := participants.Filter(filter.HasRole[flow.Identity](flow.RoleCollection)) + sources := unittest.RandomSourcesFixture(10) + results := vertestutils.CompleteExecutionReceiptChainFixture(t, root, protocolStateID, blockCount/2, sources, vertestutils.WithClusterCommittee(clusterCommittee)) blocks := vertestutils.ExtendStateWithFinalizedBlocks(t, results, s.State) withBlockReader(reader, blocks) diff --git a/module/jobqueue/jobs_test.go b/module/jobqueue/jobs_test.go index 6aaf4caa94a..04592a1b81f 100644 --- a/module/jobqueue/jobs_test.go +++ b/module/jobqueue/jobs_test.go @@ -20,7 +20,7 @@ func TestJobID(t *testing.T) { func TestBlockJob(t *testing.T) { block := unittest.BlockFixture() - job := jobqueue.BlockToJob(&block) + job := jobqueue.BlockToJob(block) t.Run("job is correct type", func(t *testing.T) { assert.IsType(t, &jobqueue.BlockJob{}, job, "job is not a block job") @@ -34,7 +34,7 @@ func TestBlockJob(t *testing.T) { t.Run("job converts to block", func(t *testing.T) { b, err := jobqueue.JobToBlock(job) assert.NoError(t, err, "unexpected error converting notify job to block") - assert.Equal(t, block, *b, "converted block is not the same as the original block") + assert.Equal(t, block, b, "converted block is not the same as the original block") }) t.Run("incorrect job type fails to convert to block", func(t *testing.T) { diff --git a/module/jobqueue/sealed_header_reader_test.go b/module/jobqueue/sealed_header_reader_test.go index a8db553c540..735ed5ab09b 100644 --- a/module/jobqueue/sealed_header_reader_test.go +++ b/module/jobqueue/sealed_header_reader_test.go @@ -3,7 +3,7 @@ package jobqueue_test import ( "testing" - "github.com/dgraph-io/badger/v2" + "github.com/cockroachdb/pebble/v2" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -24,13 +24,13 @@ func TestSealedBlockHeaderReader(t *testing.T) { // head of the reader is the last sealed block head, err := reader.Head() assert.NoError(t, err) - assert.Equal(t, lastSealedBlock.Header.Height, head, "head does not match last sealed block") + assert.Equal(t, lastSealedBlock.Height, head, "head does not match last sealed block") // retrieved blocks from block reader should be the same as the original blocks stored in it. // all except the last block should be sealed lastIndex := len(blocks) for _, expected := range blocks[:lastIndex-1] { - index := expected.Header.Height + index := expected.Height job, err := reader.AtIndex(index) assert.NoError(t, err) @@ -55,13 +55,13 @@ func RunWithReader( withBlockReader func(*jobqueue.SealedBlockHeaderReader, []*flow.Block), ) { require.Equal(t, blockCount%2, 0, "block count for this test should be even") - unittest.RunWithBadgerDB(t, func(db *badger.DB) { + unittest.RunWithPebbleDB(t, func(pdb *pebble.DB) { blocks := make([]*flow.Block, blockCount) blocksByHeight := make(map[uint64]*flow.Block, blockCount) var seals []*flow.Header - parent := unittest.GenesisFixture().Header + parent := unittest.Block.Genesis(flow.Emulator).ToHeader() for i := 0; i < blockCount; i++ { seals = []*flow.Header{parent} height := uint64(i) + 1 @@ -69,7 +69,7 @@ func RunWithReader( blocks[i] = unittest.BlockWithParentAndSeals(parent, seals) blocksByHeight[height] = blocks[i] - parent = blocks[i].Header + parent = blocks[i].ToHeader() } snapshot := synctest.MockProtocolStateSnapshot(synctest.WithHead(seals[0])) diff --git a/module/local.go b/module/local.go index e1fa2f5fa45..cb7ec0b8f2e 100644 --- a/module/local.go +++ b/module/local.go @@ -1,10 +1,9 @@ -// (c) 2019 Dapper Labs - ALL RIGHTS RESERVED - package module import ( - "github.com/onflow/flow-go/crypto" - "github.com/onflow/flow-go/crypto/hash" + "github.com/onflow/crypto" + "github.com/onflow/crypto/hash" + "github.com/onflow/flow-go/model/flow" ) @@ -23,7 +22,7 @@ type Local interface { Sign([]byte, hash.Hasher) (crypto.Signature, error) // NotMeFilter returns handy not-me filter for searching identity - NotMeFilter() flow.IdentityFilter + NotMeFilter() flow.IdentityFilter[flow.Identity] // SignFunc provides a signature oracle that given a message, a hasher, and a signing function, it // generates and returns a signature over the message using the node's private key diff --git a/module/local/me.go b/module/local/me.go index 6a2f1ce117a..5cdb4275a6f 100644 --- a/module/local/me.go +++ b/module/local/me.go @@ -1,22 +1,21 @@ -// (c) 2019 Dapper Labs - ALL RIGHTS RESERVED - package local import ( "fmt" - "github.com/onflow/flow-go/crypto" - "github.com/onflow/flow-go/crypto/hash" + "github.com/onflow/crypto" + "github.com/onflow/crypto/hash" + "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/model/flow/filter" ) type Local struct { - me *flow.Identity + me flow.IdentitySkeleton sk crypto.PrivateKey // instance of the node's private staking key } -func New(id *flow.Identity, sk crypto.PrivateKey) (*Local, error) { +func New(id flow.IdentitySkeleton, sk crypto.PrivateKey) (*Local, error) { if !sk.PublicKey().Equals(id.StakingPubKey) { return nil, fmt.Errorf("cannot initialize with mismatching keys, expect %v, but got %v", id.StakingPubKey, sk.PublicKey()) @@ -41,8 +40,8 @@ func (l *Local) Sign(msg []byte, hasher hash.Hasher) (crypto.Signature, error) { return l.sk.Sign(msg, hasher) } -func (l *Local) NotMeFilter() flow.IdentityFilter { - return filter.Not(filter.HasNodeID(l.NodeID())) +func (l *Local) NotMeFilter() flow.IdentityFilter[flow.Identity] { + return filter.Not(filter.HasNodeID[flow.Identity](l.NodeID())) } // SignFunc provides a signature oracle that given a message, a hasher, and a signing function, it diff --git a/module/local/me_nokey.go b/module/local/me_nokey.go index d9de4348dc1..7f697aec1ae 100644 --- a/module/local/me_nokey.go +++ b/module/local/me_nokey.go @@ -3,17 +3,18 @@ package local import ( "fmt" - "github.com/onflow/flow-go/crypto" - "github.com/onflow/flow-go/crypto/hash" + "github.com/onflow/crypto" + "github.com/onflow/crypto/hash" + "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/model/flow/filter" ) type LocalNoKey struct { - me *flow.Identity + me flow.IdentitySkeleton } -func NewNoKey(id *flow.Identity) (*LocalNoKey, error) { +func NewNoKey(id flow.IdentitySkeleton) (*LocalNoKey, error) { l := &LocalNoKey{ me: id, } @@ -32,8 +33,8 @@ func (l *LocalNoKey) Sign(msg []byte, hasher hash.Hasher) (crypto.Signature, err return nil, fmt.Errorf("no private key") } -func (l *LocalNoKey) NotMeFilter() flow.IdentityFilter { - return filter.Not(filter.HasNodeID(l.NodeID())) +func (l *LocalNoKey) NotMeFilter() flow.IdentityFilter[flow.Identity] { + return filter.Not(filter.HasNodeID[flow.Identity](l.NodeID())) } // SignFunc provides a signature oracle that given a message, a hasher, and a signing function, it diff --git a/module/local/me_test.go b/module/local/me_test.go index 42e46ae8c2f..825f1e9aa03 100644 --- a/module/local/me_test.go +++ b/module/local/me_test.go @@ -15,7 +15,7 @@ func TestInitializeWithMatchingKey(t *testing.T) { nodeID := unittest.IdentityFixture() nodeID.StakingPubKey = stakingPriv.PublicKey() - me, err := New(nodeID, stakingPriv) + me, err := New(nodeID.IdentitySkeleton, stakingPriv) require.NoError(t, err) require.Equal(t, nodeID.NodeID, me.NodeID()) } @@ -29,6 +29,6 @@ func TestInitializeWithMisMatchingKey(t *testing.T) { nodeID := unittest.IdentityFixture() nodeID.StakingPubKey = badPriv.PublicKey() - _, err := New(nodeID, stakingPriv) + _, err := New(nodeID.IdentitySkeleton, stakingPriv) require.Error(t, err) } diff --git a/module/mempool/README.md b/module/mempool/README.md new file mode 100644 index 00000000000..ac7d6efadbf --- /dev/null +++ b/module/mempool/README.md @@ -0,0 +1,26 @@ +# The `mempool` module + +The `mempool` module provides mempool implementations for the Flow blockchain, which +are in-memory data structures that are tasked with storing the `flow.Entity` objects. +`flow.Entity` objects are the fundamental data model of the Flow blockchain, and +every Flow primitives such as transactions, blocks, and collections are represented +as `flow.Entity` objects. + +Each mempool implementation is tasked for storing a specific type of `flow.Entity`. +As a convention, all mempools are built on top of the `stdmap.Backend` struct, which +provides a thread-safe cache implementation for storing and retrieving `flow.Entity` objects. +The primary responsibility of the `stdmap.Backend` struct is to provide thread-safety for its underlying +data model (i.e., `mempool.Backdata`) that is tasked with maintaining the actual `flow.Entity` objects. + +At the moment, the `mempool` module provides two implementations for the `mempool.Backdata`: +- `backdata.Backdata`: a map implementation for storing `flow.Entity` objects using native Go `map`s. +- `herocache.Cache`: a cache implementation for storing `flow.Entity` objects, which is a heap-optimized + cache implementation that is aims on minimizing the memory footprint of the mempool on the heap and + reducing the GC pressure. + +Note-1: by design the `mempool.Backdata` interface is **not thread-safe**. Therefore, it is the responsibility +of the `stdmap.Backend` struct to provide thread-safety for its underlying `mempool.Backdata` implementation. + +Note-2: The `herocache.Cache` implementation is several orders of magnitude faster than the `backdata.Backdata` on +high-throughput workloads. For the read or write-heavy workloads, the `herocache.Cache` implementation is recommended as +the underlying `mempool.Backdata` implementation. diff --git a/module/mempool/assignments.go b/module/mempool/assignments.go index 0c1b934804c..dae57fb0bb3 100644 --- a/module/mempool/assignments.go +++ b/module/mempool/assignments.go @@ -1,4 +1,3 @@ -// (c) 2019 Dapper Labs - ALL RIGHTS RESERVED package mempool import ( @@ -6,29 +5,5 @@ import ( "github.com/onflow/flow-go/model/flow" ) -// Assignments represents a concurrency-safe memory pool for chunk assignments -type Assignments interface { - - // Has checks whether the Assignment with the given hash is currently in - // the memory pool. - Has(assignmentID flow.Identifier) bool - - // Add will add the given assignment to the memory pool. It will return - // false if it was already in the mempool. - Add(assignmentFingerprint flow.Identifier, assignment *chunkmodels.Assignment) bool - - // Remove will remove the given Assignment from the memory pool; it will - // return true if the Assignment was known and removed. - Remove(assignmentID flow.Identifier) bool - - // ByID retrieve the chunk assigment with the given ID from the memory pool. - // It will return false if it was not found in the mempool. - ByID(assignmentID flow.Identifier) (*chunkmodels.Assignment, bool) - - // Size will return the current size of the memory pool. - Size() uint - - // All will retrieve all Assignments that are currently in the memory pool - // as a slice. - All() []*chunkmodels.Assignment -} +// Assignments represents a concurrency-safe memory pool for chunk assignments. +type Assignments Mempool[flow.Identifier, *chunkmodels.Assignment] diff --git a/module/mempool/backData.go b/module/mempool/backData.go index dbee603299a..d002ae85b69 100644 --- a/module/mempool/backData.go +++ b/module/mempool/backData.go @@ -1,43 +1,45 @@ package mempool -import ( - "github.com/onflow/flow-go/model/flow" -) - -// BackData represents the underlying data structure that is utilized by mempool.Backend, as the -// core structure of maintaining data on memory-pools. +// BackData represents the underlying immutable generic key-value data structure used by mempool.Backend +// as the core structure of maintaining data on memory pools. +// +// This interface provides fundamental operations for storing, retrieving, and removing data structures, +// but it does not support mutating already stored data. If modifications to the stored data is required, +// use [MutableBackData] instead. +// // NOTE: BackData by default is not expected to provide concurrency-safe operations. As it is just the // model layer of the mempool, the safety against concurrent operations are guaranteed by the Backend that // is the control layer. -type BackData interface { - // Has checks if backdata already contains the entity with the given identifier. - Has(entityID flow.Identifier) bool - - // Add adds the given entity to the backdata. - Add(entityID flow.Identifier, entity flow.Entity) bool +type BackData[K comparable, V any] interface { + // Has checks if backdata already stores a value under the given key. + Has(key K) bool - // Remove removes the entity with the given identifier. - Remove(entityID flow.Identifier) (flow.Entity, bool) + // Add attempts to add the given value to the backdata, without overwriting existing data. + // If a value is already stored under the input key, Add is a no-op and returns false. + // If no value is stored under the input key, Add adds the value and returns true. + Add(key K, value V) bool - // Adjust adjusts the entity using the given function if the given identifier can be found. - // Returns a bool which indicates whether the entity was updated as well as the updated entity. - Adjust(entityID flow.Identifier, f func(flow.Entity) flow.Entity) (flow.Entity, bool) + // Remove removes the value with the given key. + // If the key-value pair exists, returns the value and true. + // Otherwise, returns the zero value for type V and false. + Remove(key K) (V, bool) - // ByID returns the given entity from the backdata. - ByID(entityID flow.Identifier) (flow.Entity, bool) + // Get returns the value for the given key. + // Returns true if the key-value pair exists, and false otherwise. + Get(key K) (V, bool) - // Size returns the size of the backdata, i.e., total number of stored (entityId, entity) pairs. + // Size returns the number of stored key-value pairs. Size() uint - // All returns all entities stored in the backdata. - All() map[flow.Identifier]flow.Entity + // All returns all stored key-value pairs as a map. + All() map[K]V - // Identifiers returns the list of identifiers of entities stored in the backdata. - Identifiers() flow.IdentifierList + // Keys returns an unordered list of keys stored in the backdata. + Keys() []K - // Entities returns the list of entities stored in the backdata. - Entities() []flow.Entity + // Values returns an unordered list of values stored in the backdata. + Values() []V - // Clear removes all entities from the backdata. + // Clear removes all key-value pairs from the backdata. Clear() } diff --git a/module/mempool/blocks.go b/module/mempool/blocks.go deleted file mode 100644 index a91c65b9f29..00000000000 --- a/module/mempool/blocks.go +++ /dev/null @@ -1,37 +0,0 @@ -// (c) 2019 Dapper Labs - ALL RIGHTS RESERVED - -package mempool - -import ( - "github.com/onflow/flow-go/model/flow" -) - -// Blocks represents a concurrency-safe memory pool for blocks. -type Blocks interface { - - // Has checks whether the block with the given hash is currently in - // the memory pool. - Has(blockID flow.Identifier) bool - - // Add will add the given block to the memory pool. It will return - // false if it was already in the mempool. - Add(block *flow.Block) bool - - // Remove will remove the given block from the memory pool; it will - // will return true if the block was known and removed. - Remove(blockID flow.Identifier) bool - - // ByID retrieve the block with the given ID from the memory pool. - // It will return false if it was not found in the mempool. - ByID(blockID flow.Identifier) (*flow.Block, bool) - - // Size will return the current size of the memory pool. - Size() uint - - // All will retrieve all blocks that are currently in the memory pool - // as a slice. - All() []*flow.Block - - // Hash will return a hash of the contents of the memory pool. - Hash() flow.Identifier -} diff --git a/module/mempool/chunk_data_packs.go b/module/mempool/chunk_data_packs.go deleted file mode 100644 index 9e04725c905..00000000000 --- a/module/mempool/chunk_data_packs.go +++ /dev/null @@ -1,34 +0,0 @@ -// (c) 2019 Dapper Labs - ALL RIGHTS RESERVED - -package mempool - -import ( - "github.com/onflow/flow-go/model/flow" -) - -// ChunkDataPacks represents a concurrency-safe memory pool for chunk data packs. -type ChunkDataPacks interface { - - // Has checks whether the ChunkDataPack with the given chunkID is currently in - // the memory pool. - Has(chunkID flow.Identifier) bool - - // Add will add the given chunk datapack to the memory pool. It will return - // false if it was already in the mempool. - Add(cdp *flow.ChunkDataPack) bool - - // Remove will remove the given ChunkDataPack from the memory pool; it will - // return true if the ChunkDataPack was known and removed. - Remove(chunkID flow.Identifier) bool - - // ByChunkID retrieve the chunk datapacke with the given chunk ID from the memory - // pool. It will return false if it was not found in the mempool. - ByChunkID(chunkID flow.Identifier) (*flow.ChunkDataPack, bool) - - // Size will return the current size of the memory pool. - Size() uint - - // All will retrieve all ChunkDataPacks that are currently in the memory pool - // as a slice. - All() []*flow.ChunkDataPack -} diff --git a/module/mempool/chunk_statuses.go b/module/mempool/chunk_statuses.go index 6068554abe1..e8bd63984c5 100644 --- a/module/mempool/chunk_statuses.go +++ b/module/mempool/chunk_statuses.go @@ -6,26 +6,4 @@ import ( ) // ChunkStatuses is an in-memory storage for maintaining the chunk status data objects. -type ChunkStatuses interface { - // Get returns a chunk status by its chunk index and result ID. - // There is a one-to-one correspondence between the chunk statuses in memory, and - // their pair of chunk index and result id. - Get(chunkIndex uint64, resultID flow.Identifier) (*verification.ChunkStatus, bool) - - // Add provides insertion functionality into the memory pool. - // The insertion is only successful if there is no duplicate status with the same - // chunk ID in the memory. Otherwise, it aborts the insertion and returns false. - Add(status *verification.ChunkStatus) bool - - // Remove provides deletion functionality from the memory pool based on the pair of - // chunk index and result id. - // If there is a chunk status associated with this pair, Remove removes it and returns true. - // Otherwise, it returns false. - Remove(chunkIndex uint64, resultID flow.Identifier) bool - - // All returns all chunk statuses stored in this memory pool. - All() []*verification.ChunkStatus - - // Size returns total number of chunk statuses in the memory pool. - Size() uint -} +type ChunkStatuses Mempool[flow.Identifier, *verification.ChunkStatus] diff --git a/module/mempool/collections.go b/module/mempool/collections.go deleted file mode 100644 index f09d9b6e5b2..00000000000 --- a/module/mempool/collections.go +++ /dev/null @@ -1,34 +0,0 @@ -// (c) 2019 Dapper Labs - ALL RIGHTS RESERVED - -package mempool - -import ( - "github.com/onflow/flow-go/model/flow" -) - -// Collections represents a concurrency-safe memory pool for collections. -type Collections interface { - - // Has checks whether the collection with the given hash is currently in - // the memory pool. - Has(collID flow.Identifier) bool - - // Add will add the given collection to the memory pool. It will return - // false if it was already in the mempool. - Add(coll *flow.Collection) bool - - // Remove will remove the given collection from the memory pool; it will - // return true if the collection was known and removed. - Remove(collID flow.Identifier) bool - - // ByID retrieve the collection with the given ID from the memory pool. - // It will return false if it was not found in the mempool. - ByID(collID flow.Identifier) (*flow.Collection, bool) - - // Size will return the current size of the memory pool. - Size() uint - - // All will retrieve all collections that are currently in the memory pool - // as a slice. - All() []*flow.Collection -} diff --git a/module/mempool/common.go b/module/mempool/common.go index 7b7bed05f18..e0122bf5cab 100644 --- a/module/mempool/common.go +++ b/module/mempool/common.go @@ -1,8 +1,6 @@ package mempool -import "github.com/onflow/flow-go/model/flow" - // OnEjection is a callback which a mempool executes on ejecting // one of its elements. The callbacks are executed from within the thread // that serves the mempool. Implementations should be non-blocking. -type OnEjection func(flow.Entity) +type OnEjection[V any] func(V) diff --git a/module/mempool/consensus/Fork-Aware_Mempools.md b/module/mempool/consensus/Fork-Aware_Mempools.md index c3ff47337bc..48b15d3aebc 100644 --- a/module/mempool/consensus/Fork-Aware_Mempools.md +++ b/module/mempool/consensus/Fork-Aware_Mempools.md @@ -16,7 +16,7 @@ We use the following notation * an Execution Receipt `r` has the following fields: * `PreviousResultID` denotes the result `ID` for the parent block that has been used as starting state for computing the current block -![Execution Tree](/docs/ExecutionResultTrees.png) +![Execution Tree](/docs/images/ExecutionResultTrees.png) ### Criteria for Incorporating Execution Receipts @@ -45,7 +45,7 @@ Note that the condition cannot be relaxed to: "there must be any ExecutionResult As illustrated by the figure above, the ExecutionResults form a tree, with the last sealed result as the root. * All Execution Receipts committing to the same result from an [equivalence class](https://en.wikipedia.org/wiki/Equivalence_class) and can be -represented as one vertex in the [Execution Tree](/docs/ExecutionResultTrees.png). +represented as one vertex in the [Execution Tree](/docs/images/ExecutionResultTrees.png). * Consider the results `r[A]` and `r[B]`. As `r[A]`'s output state is used as the staring state to compute block `B`, we can say: "from result `r[A]` `computation` (denoted by symbol `Σ`) leads to `r[B]`". Formally: ``` @@ -112,7 +112,7 @@ When searching the tree in step 1, we skip all receipts that are in `M` on the f ## Further reading -* [Lecture notes on directed Graphs](http://www.orcca.on.ca/~yxie/courses/cs2210b-2011/htmls/notes/16-directedgraph.pdf) +* [Lecture notes on directed Graphs](http://web.archive.org/web/20180219025720/https://orcca.on.ca/~yxie/courses/cs2210b-2011/htmls/notes/16-directedgraph.pdf) * [Graph Algorithms and Network Flows](https://hochbaum.ieor.berkeley.edu/files/ieor266-2014.pdf) * Paper: [The Serial Transitive Closure Problem for Trees](https://www.math.ucsd.edu/~sbuss/ResearchWeb/transclosure/paper.pdf) diff --git a/module/mempool/consensus/exec_fork_suppressor.go b/module/mempool/consensus/exec_fork_suppressor.go index d08f71cdfa2..209b0337c3a 100644 --- a/module/mempool/consensus/exec_fork_suppressor.go +++ b/module/mempool/consensus/exec_fork_suppressor.go @@ -2,11 +2,10 @@ package consensus import ( "encoding/json" - "errors" "fmt" "sync" - "github.com/dgraph-io/badger/v2" + "github.com/jordanschalm/lockctx" "github.com/rs/zerolog" "github.com/rs/zerolog/log" "go.uber.org/atomic" @@ -15,7 +14,7 @@ import ( "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/module/mempool" "github.com/onflow/flow-go/storage" - "github.com/onflow/flow-go/storage/badger/operation" + "github.com/onflow/flow-go/storage/store" ) // ExecForkSuppressor is a wrapper around a conventional mempool.IncorporatedResultSeals @@ -40,44 +39,55 @@ import ( // // Implementation is concurrency safe. type ExecForkSuppressor struct { - mutex sync.RWMutex - seals mempool.IncorporatedResultSeals - sealsForBlock map[flow.Identifier]sealSet // map BlockID -> set of IncorporatedResultSeal - byHeight map[uint64]map[flow.Identifier]struct{} // map height -> set of executed block IDs at height - lowestHeight uint64 - execForkDetected atomic.Bool - onExecFork ExecForkActor - db *badger.DB - log zerolog.Logger + mutex sync.RWMutex + seals mempool.IncorporatedResultSeals + sealsForBlock map[flow.Identifier]sealSet // map BlockID -> set of IncorporatedResultSeal + byHeight map[uint64]map[flow.Identifier]struct{} // map height -> set of executed block IDs at height + lowestHeight uint64 + execForkDetected atomic.Bool + onExecFork ExecForkActor + execForkEvidenceStore storage.ExecutionForkEvidence + lockManager storage.LockManager + log zerolog.Logger } var _ mempool.IncorporatedResultSeals = (*ExecForkSuppressor)(nil) -// sealSet is a set of seals; internally represented as a map from sealID -> to seal +// sealSet is a set of seals; internally represented as a map from incorporated result ID -> to seal type sealSet map[flow.Identifier]*flow.IncorporatedResultSeal // sealsList is a list of seals type sealsList []*flow.IncorporatedResultSeal -func NewExecStateForkSuppressor(seals mempool.IncorporatedResultSeals, onExecFork ExecForkActor, db *badger.DB, log zerolog.Logger) (*ExecForkSuppressor, error) { - conflictingSeals, err := checkExecutionForkEvidence(db) +func NewExecStateForkSuppressor( + seals mempool.IncorporatedResultSeals, + onExecFork ExecForkActor, + db storage.DB, + lockManager storage.LockManager, + log zerolog.Logger, +) (*ExecForkSuppressor, error) { + executionForkEvidenceStore := store.NewExecutionForkEvidence(db) + + conflictingSeals, err := executionForkEvidenceStore.Retrieve() if err != nil { return nil, fmt.Errorf("failed to interface with storage: %w", err) } + execForkDetectedFlag := len(conflictingSeals) != 0 if execForkDetectedFlag { onExecFork(conflictingSeals) } wrapper := ExecForkSuppressor{ - mutex: sync.RWMutex{}, - seals: seals, - sealsForBlock: make(map[flow.Identifier]sealSet), - byHeight: make(map[uint64]map[flow.Identifier]struct{}), - execForkDetected: *atomic.NewBool(execForkDetectedFlag), - onExecFork: onExecFork, - db: db, - log: log.With().Str("mempool", "ExecForkSuppressor").Logger(), + mutex: sync.RWMutex{}, + seals: seals, + sealsForBlock: make(map[flow.Identifier]sealSet), + byHeight: make(map[uint64]map[flow.Identifier]struct{}), + execForkDetected: *atomic.NewBool(execForkDetectedFlag), + onExecFork: onExecFork, + execForkEvidenceStore: executionForkEvidenceStore, + lockManager: lockManager, + log: log.With().Str("mempool", "ExecForkSuppressor").Logger(), } return &wrapper, nil @@ -133,7 +143,7 @@ func (s *ExecForkSuppressor) Add(newSeal *flow.IncorporatedResultSeal) (bool, er blockSeals = make(sealSet) s.sealsForBlock[blockID] = blockSeals } - blockSeals[newSeal.ID()] = newSeal + blockSeals[newSeal.IncorporatedResultID()] = newSeal // cache block height to prune additional index by height blocksAtHeight, found := s.byHeight[newSeal.Header.Height] @@ -166,15 +176,14 @@ func (s *ExecForkSuppressor) All() []*flow.IncorporatedResultSeal { return s.filterConflictingSeals(sealsByBlockID) } -// ByID returns an IncorporatedResultSeal by its ID. -// The IncorporatedResultSeal's ID is the same as IncorporatedResult's ID, -// so this call essentially is to find the seal for the incorporated result in the mempool. +// Get returns an IncorporatedResultSeal by IncorporatedResult's ID. +// This call essentially is to find the seal for the incorporated result in the mempool. // Note: This call might crash if the block of the seal has multiple seals in mempool for conflicting // incorporated results. Usually the builder will call this method to find a seal for an incorporated // result, so the builder might crash if multiple conflicting seals exist. -func (s *ExecForkSuppressor) ByID(identifier flow.Identifier) (*flow.IncorporatedResultSeal, bool) { +func (s *ExecForkSuppressor) Get(identifier flow.Identifier) (*flow.IncorporatedResultSeal, bool) { s.mutex.RLock() - seal, found := s.seals.ByID(identifier) + seal, found := s.seals.Get(identifier) // if we haven't found seal in underlying storage - exit early if !found { s.mutex.RUnlock() @@ -201,12 +210,12 @@ func (s *ExecForkSuppressor) ByID(identifier flow.Identifier) (*flow.Incorporate return seals[0], true } -// Remove removes the IncorporatedResultSeal with id from the mempool +// Remove removes the IncorporatedResultSeal by IncorporatedResult ID from the mempool. func (s *ExecForkSuppressor) Remove(id flow.Identifier) bool { s.mutex.Lock() defer s.mutex.Unlock() - seal, found := s.seals.ByID(id) + seal, found := s.seals.Get(id) if found { s.seals.Remove(id) set, found := s.sealsForBlock[seal.Seal.BlockID] @@ -337,41 +346,6 @@ func hasConsistentStateTransitions(irSeal, irSeal2 *flow.IncorporatedResultSeal) return true } -// checkExecutionForkDetected checks the database whether evidence -// about an execution fork is stored. Returns the stored evidence. -func checkExecutionForkEvidence(db *badger.DB) ([]*flow.IncorporatedResultSeal, error) { - var conflictingSeals []*flow.IncorporatedResultSeal - err := db.View(func(tx *badger.Txn) error { - err := operation.RetrieveExecutionForkEvidence(&conflictingSeals)(tx) - if errors.Is(err, storage.ErrNotFound) { - return nil // no evidence in data base; conflictingSeals is still nil slice - } - if err != nil { - return fmt.Errorf("failed to load evidence whether or not an execution fork occured: %w", err) - } - return nil - }) - return conflictingSeals, err -} - -// storeExecutionForkEvidence stores the provided seals in the database -// as evidence for an execution fork. -func storeExecutionForkEvidence(conflictingSeals []*flow.IncorporatedResultSeal, db *badger.DB) error { - err := operation.RetryOnConflict(db.Update, func(tx *badger.Txn) error { - err := operation.InsertExecutionForkEvidence(conflictingSeals)(tx) - if errors.Is(err, storage.ErrAlreadyExists) { - // some evidence about execution fork already stored; - // we only keep the first evidence => noting more to do - return nil - } - if err != nil { - return fmt.Errorf("failed to store evidence about execution fork: %w", err) - } - return nil - }) - return err -} - // filterConflictingSeals performs filtering of provided seals by checking if there are conflicting seals for same block. // For every block we check if first seal has same state transitions as others. Multiple seals for same block are allowed // but their state transitions should be the same. Upon detecting seal with inconsistent state transition we will clear our mempool, @@ -395,9 +369,13 @@ func (s *ExecForkSuppressor) filterConflictingSeals(sealsByBlockID map[flow.Iden s.execForkDetected.Store(true) s.Clear() conflictingSeals = append(sealsList{candidateSeal}, conflictingSeals...) - err := storeExecutionForkEvidence(conflictingSeals, s.db) + + // Acquire lock and store execution fork evidence + err := storage.WithLock(s.lockManager, storage.LockInsertExecutionForkEvidence, func(lctx lockctx.Context) error { + return s.execForkEvidenceStore.StoreIfNotExists(lctx, conflictingSeals) + }) if err != nil { - panic("failed to store execution fork evidence") + s.log.Fatal().Msg("failed to store execution fork evidence") } s.onExecFork(conflictingSeals) return nil diff --git a/module/mempool/consensus/exec_fork_suppressor_test.go b/module/mempool/consensus/exec_fork_suppressor_test.go index 86e87224149..546867b5c7a 100644 --- a/module/mempool/consensus/exec_fork_suppressor_test.go +++ b/module/mempool/consensus/exec_fork_suppressor_test.go @@ -4,7 +4,6 @@ import ( "os" "testing" - "github.com/dgraph-io/badger/v2" "github.com/rs/zerolog" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" @@ -15,13 +14,15 @@ import ( actormock "github.com/onflow/flow-go/module/mempool/consensus/mock" poolmock "github.com/onflow/flow-go/module/mempool/mock" "github.com/onflow/flow-go/module/mempool/stdmap" + "github.com/onflow/flow-go/storage" mockstorage "github.com/onflow/flow-go/storage/mock" + "github.com/onflow/flow-go/storage/operation/dbtest" "github.com/onflow/flow-go/utils/unittest" ) // Test_Construction verifies correctness of the initial size and limit values func Test_Construction(t *testing.T) { - WithExecStateForkSuppressor(t, func(wrapper *ExecForkSuppressor, wrappedMempool *poolmock.IncorporatedResultSeals, execForkActor *actormock.ExecForkActorMock) { + WithExecStateForkSuppressor(t, func(wrapper *ExecForkSuppressor, wrappedMempool *poolmock.IncorporatedResultSeals, execForkActor *actormock.ExecForkActor) { wrappedMempool.On("Size").Return(uint(0)).Once() require.Equal(t, uint(0), wrapper.Size()) wrappedMempool.On("Limit").Return(uint(0)).Once() @@ -32,7 +33,7 @@ func Test_Construction(t *testing.T) { // Test_Size checks that ExecForkSuppressor is reporting the size of the wrapped mempool func Test_Size(t *testing.T) { - WithExecStateForkSuppressor(t, func(wrapper *ExecForkSuppressor, wrappedMempool *poolmock.IncorporatedResultSeals, execForkActor *actormock.ExecForkActorMock) { + WithExecStateForkSuppressor(t, func(wrapper *ExecForkSuppressor, wrappedMempool *poolmock.IncorporatedResultSeals, execForkActor *actormock.ExecForkActor) { wrappedMempool.On("Size").Return(uint(139)).Once() require.Equal(t, uint(139), wrapper.Size()) wrappedMempool.AssertExpectations(t) @@ -41,7 +42,7 @@ func Test_Size(t *testing.T) { // Test_Limit checks that ExecForkSuppressor is reporting the capacity limit of the wrapped mempool func Test_Limit(t *testing.T) { - WithExecStateForkSuppressor(t, func(wrapper *ExecForkSuppressor, wrappedMempool *poolmock.IncorporatedResultSeals, execForkActor *actormock.ExecForkActorMock) { + WithExecStateForkSuppressor(t, func(wrapper *ExecForkSuppressor, wrappedMempool *poolmock.IncorporatedResultSeals, execForkActor *actormock.ExecForkActor) { wrappedMempool.On("Limit").Return(uint(227)).Once() require.Equal(t, uint(227), wrapper.Limit()) wrappedMempool.AssertExpectations(t) @@ -52,7 +53,7 @@ func Test_Limit(t *testing.T) { // - the wrapper also clears the wrapped mempool; // - the reported mempool size, _after_ clearing should be zero func Test_Clear(t *testing.T) { - WithExecStateForkSuppressor(t, func(wrapper *ExecForkSuppressor, wrappedMempool *poolmock.IncorporatedResultSeals, execForkActor *actormock.ExecForkActorMock) { + WithExecStateForkSuppressor(t, func(wrapper *ExecForkSuppressor, wrappedMempool *poolmock.IncorporatedResultSeals, execForkActor *actormock.ExecForkActor) { wrappedMempool.On("Clear").Return().Once() wrapper.Clear() @@ -64,7 +65,7 @@ func Test_Clear(t *testing.T) { // Test_All checks that ExecForkSuppressor.All() is returning the elements of the wrapped mempool func Test_All(t *testing.T) { - WithExecStateForkSuppressor(t, func(wrapper *ExecForkSuppressor, wrappedMempool *poolmock.IncorporatedResultSeals, execForkActor *actormock.ExecForkActorMock) { + WithExecStateForkSuppressor(t, func(wrapper *ExecForkSuppressor, wrappedMempool *poolmock.IncorporatedResultSeals, execForkActor *actormock.ExecForkActor) { expectedSeals := unittest.IncorporatedResultSeal.Fixtures(7) wrappedMempool.On("All").Return(expectedSeals) retrievedSeals := wrapper.All() @@ -84,7 +85,7 @@ func Test_All(t *testing.T) { // same result as (1) and incorporated in same block B1; // should be automatically de-duplicated (irrespective of approvals on the seal). func Test_Add(t *testing.T) { - WithExecStateForkSuppressor(t, func(wrapper *ExecForkSuppressor, wrappedMempool *poolmock.IncorporatedResultSeals, execForkActor *actormock.ExecForkActorMock) { + WithExecStateForkSuppressor(t, func(wrapper *ExecForkSuppressor, wrappedMempool *poolmock.IncorporatedResultSeals, execForkActor *actormock.ExecForkActor) { for _, block := range unittest.BlockFixtures(2) { result := unittest.ExecutionResultFixture(unittest.WithBlock(block)) @@ -100,7 +101,7 @@ func Test_Add(t *testing.T) { // the value for IncorporatedResultSeal.IncorporatedResult.IncorporatedBlockID is randomly // generated and therefore, will be different from for irSeal1 irSeal2 := unittest.IncorporatedResultSeal.Fixture(unittest.IncorporatedResultSeal.WithResult(result)) - require.False(t, irSeal1.ID() == irSeal2.ID()) // incorporated in different block => different seal ID expected + require.False(t, irSeal1.IncorporatedResultID() == irSeal2.IncorporatedResultID()) // incorporated in different block => different result ID expected wrappedMempool.On("Add", irSeal2).Return(true, nil).Once() added, err = wrapper.Add(irSeal2) require.NoError(t, err) @@ -112,8 +113,8 @@ func Test_Add(t *testing.T) { unittest.IncorporatedResultSeal.WithResult(result), unittest.IncorporatedResultSeal.WithIncorporatedBlockID(irSeal1.IncorporatedResult.IncorporatedBlockID), ) - require.True(t, irSeal1.ID() == irSeal3.ID()) // same result incorporated same block as (1) => identical ID expected - wrappedMempool.On("Add", irSeal3).Return(false, nil).Once() // deduplicate + require.True(t, irSeal1.IncorporatedResultID() == irSeal3.IncorporatedResultID()) // same result incorporated same block as (1) => identical IncorporatedResultID expected + wrappedMempool.On("Add", irSeal3).Return(false, nil).Once() // deduplicate added, err = wrapper.Add(irSeal3) require.NoError(t, err) require.False(t, added) @@ -125,26 +126,26 @@ func Test_Add(t *testing.T) { // Test_Remove checks that ExecForkSuppressor.Remove() // - delegates the call to the underlying mempool func Test_Remove(t *testing.T) { - WithExecStateForkSuppressor(t, func(wrapper *ExecForkSuppressor, wrappedMempool *poolmock.IncorporatedResultSeals, execForkActor *actormock.ExecForkActorMock) { + WithExecStateForkSuppressor(t, func(wrapper *ExecForkSuppressor, wrappedMempool *poolmock.IncorporatedResultSeals, execForkActor *actormock.ExecForkActor) { // element is in wrapped mempool: Remove should be called seal := unittest.IncorporatedResultSeal.Fixture() wrappedMempool.On("Add", seal).Return(true, nil).Once() - wrappedMempool.On("ByID", seal.ID()).Return(seal, true) + wrappedMempool.On("Get", seal.IncorporatedResultID()).Return(seal, true) added, err := wrapper.Add(seal) require.NoError(t, err) require.True(t, added) - wrappedMempool.On("ByID", seal.ID()).Return(seal, true) - wrappedMempool.On("Remove", seal.ID()).Return(true).Once() - removed := wrapper.Remove(seal.ID()) + wrappedMempool.On("Get", seal.IncorporatedResultID()).Return(seal, true) + wrappedMempool.On("Remove", seal.IncorporatedResultID()).Return(true).Once() + removed := wrapper.Remove(seal.IncorporatedResultID()) require.True(t, removed) wrappedMempool.AssertExpectations(t) // element _not_ in wrapped mempool: Remove might be called seal = unittest.IncorporatedResultSeal.Fixture() - wrappedMempool.On("ByID", seal.ID()).Return(seal, false) - wrappedMempool.On("Remove", seal.ID()).Return(false).Maybe() - removed = wrapper.Remove(seal.ID()) + wrappedMempool.On("Get", seal.IncorporatedResultID()).Return(seal, false) + wrappedMempool.On("Remove", seal.IncorporatedResultID()).Return(false).Maybe() + removed = wrapper.Remove(seal.IncorporatedResultID()) require.False(t, removed) wrappedMempool.AssertExpectations(t) }) @@ -153,7 +154,7 @@ func Test_Remove(t *testing.T) { // Test_RejectInvalidSeals verifies that ExecForkSuppressor rejects seals whose // which don't have a chunk (i.e. their start and end state of the result cannot be determined) func Test_RejectInvalidSeals(t *testing.T) { - WithExecStateForkSuppressor(t, func(wrapper *ExecForkSuppressor, wrappedMempool *poolmock.IncorporatedResultSeals, execForkActor *actormock.ExecForkActorMock) { + WithExecStateForkSuppressor(t, func(wrapper *ExecForkSuppressor, wrappedMempool *poolmock.IncorporatedResultSeals, execForkActor *actormock.ExecForkActor) { irSeal := unittest.IncorporatedResultSeal.Fixture() irSeal.IncorporatedResult.Result.Chunks = make(flow.ChunkList, 0) irSeal.Seal.FinalState = flow.DummyStateCommitment @@ -173,7 +174,7 @@ func Test_RejectInvalidSeals(t *testing.T) { // This logic has to be executed for all queries(`ByID`, `All`) func Test_ConflictingResults(t *testing.T) { assertConflictingResult := func(t *testing.T, action func(irSeals []*flow.IncorporatedResultSeal, conflictingSeal *flow.IncorporatedResultSeal, wrapper *ExecForkSuppressor, wrappedMempool *poolmock.IncorporatedResultSeals)) { - WithExecStateForkSuppressor(t, func(wrapper *ExecForkSuppressor, wrappedMempool *poolmock.IncorporatedResultSeals, execForkActor *actormock.ExecForkActorMock) { + WithExecStateForkSuppressor(t, func(wrapper *ExecForkSuppressor, wrappedMempool *poolmock.IncorporatedResultSeals, execForkActor *actormock.ExecForkActor) { // add 3 random irSeals irSeals := unittest.IncorporatedResultSeal.Fixtures(3) for _, s := range irSeals { @@ -204,8 +205,8 @@ func Test_ConflictingResults(t *testing.T) { }).Return().Once() action(irSeals, conflictingSeal, wrapper, wrappedMempool) - wrappedMempool.On("ByID", conflictingSeal.ID()).Return(nil, false).Once() - byID, found := wrapper.ByID(conflictingSeal.ID()) + wrappedMempool.On("Get", conflictingSeal.IncorporatedResultID()).Return(nil, false).Once() + byID, found := wrapper.Get(conflictingSeal.IncorporatedResultID()) require.False(t, found) require.Nil(t, byID) @@ -233,8 +234,8 @@ func Test_ConflictingResults(t *testing.T) { }) t.Run("by-id-query", func(t *testing.T) { assertConflictingResult(t, func(irSeals []*flow.IncorporatedResultSeal, conflictingSeal *flow.IncorporatedResultSeal, wrapper *ExecForkSuppressor, wrappedMempool *poolmock.IncorporatedResultSeals) { - wrappedMempool.On("ByID", conflictingSeal.ID()).Return(conflictingSeal, true).Once() - byID, found := wrapper.ByID(conflictingSeal.ID()) + wrappedMempool.On("Get", conflictingSeal.IncorporatedResultID()).Return(conflictingSeal, true).Once() + byID, found := wrapper.Get(conflictingSeal.IncorporatedResultID()) require.False(t, found) require.Nil(t, byID) }) @@ -245,64 +246,71 @@ func Test_ConflictingResults(t *testing.T) { // Test_ForkDetectionPersisted verifies that, when ExecForkSuppressor detects a fork, this information is // persisted in the data base func Test_ForkDetectionPersisted(t *testing.T) { - unittest.RunWithTempDir(t, func(dir string) { - db := unittest.BadgerDB(t, dir) - defer db.Close() + block := unittest.BlockFixture() + sealA := unittest.IncorporatedResultSeal.Fixture(unittest.IncorporatedResultSeal.WithResult(unittest.ExecutionResultFixture(unittest.WithBlock(block)))) + sealB := unittest.IncorporatedResultSeal.Fixture(unittest.IncorporatedResultSeal.WithResult(unittest.ExecutionResultFixture(unittest.WithBlock(block)))) - // initialize ExecForkSuppressor - wrappedMempool := &poolmock.IncorporatedResultSeals{} - execForkActor := &actormock.ExecForkActorMock{} - wrapper, _ := NewExecStateForkSuppressor(wrappedMempool, execForkActor.OnExecFork, db, zerolog.New(os.Stderr)) + dbtest.RunFuncsWithNewDBHandle( + t, - // add seal - block := unittest.BlockFixture() - sealA := unittest.IncorporatedResultSeal.Fixture(unittest.IncorporatedResultSeal.WithResult(unittest.ExecutionResultFixture(unittest.WithBlock(&block)))) - wrappedMempool.On("Add", sealA).Return(true, nil).Once() - _, _ = wrapper.Add(sealA) + // This function stores conflicting seals to the underlying database. + func(t *testing.T, db storage.DB) { + lockManager := storage.NewTestingLockManager() - // add conflicting seal - sealB := unittest.IncorporatedResultSeal.Fixture(unittest.IncorporatedResultSeal.WithResult(unittest.ExecutionResultFixture(unittest.WithBlock(&block)))) - wrappedMempool.On("Add", sealB).Return(true, nil).Once() - added, _ := wrapper.Add(sealB) // should be rejected because it is conflicting with sealA - require.True(t, added) + // initialize ExecForkSuppressor + wrappedMempool := &poolmock.IncorporatedResultSeals{} + execForkActor := &actormock.ExecForkActor{} + wrapper, _ := NewExecStateForkSuppressor(wrappedMempool, execForkActor.OnExecFork, db, lockManager, zerolog.New(os.Stderr)) - wrappedMempool.On("ByID", sealA.ID()).Return(sealA, true).Once() - execForkActor.On("OnExecFork", mock.Anything).Run(func(args mock.Arguments) { - conflictingSeals := args.Get(0).([]*flow.IncorporatedResultSeal) - require.ElementsMatch(t, []*flow.IncorporatedResultSeal{sealA, sealB}, conflictingSeals) - }).Return().Once() - wrappedMempool.On("Clear").Return().Once() - // try to query, at this point we will detect a conflicting seal - wrapper.ByID(sealA.ID()) + // add seal + wrappedMempool.On("Add", sealA).Return(true, nil).Once() + _, _ = wrapper.Add(sealA) - wrappedMempool.AssertExpectations(t) - execForkActor.AssertExpectations(t) - - // crash => re-initialization - db.Close() - db2 := unittest.BadgerDB(t, dir) - wrappedMempool2 := &poolmock.IncorporatedResultSeals{} - execForkActor2 := &actormock.ExecForkActorMock{} - execForkActor2.On("OnExecFork", mock.Anything). - Run(func(args mock.Arguments) { + // add conflicting seal + wrappedMempool.On("Add", sealB).Return(true, nil).Once() + added, _ := wrapper.Add(sealB) // should be rejected because it is conflicting with sealA + require.True(t, added) + + wrappedMempool.On("Get", sealA.IncorporatedResultID()).Return(sealA, true).Once() + execForkActor.On("OnExecFork", mock.Anything).Run(func(args mock.Arguments) { conflictingSeals := args.Get(0).([]*flow.IncorporatedResultSeal) require.ElementsMatch(t, []*flow.IncorporatedResultSeal{sealA, sealB}, conflictingSeals) }).Return().Once() - wrapper2, _ := NewExecStateForkSuppressor(wrappedMempool2, execForkActor2.OnExecFork, db2, zerolog.New(os.Stderr)) - - // add another (non-conflicting) seal to ExecForkSuppressor - // fail test if seal is added to wrapped mempool - wrappedMempool2.On("Add", mock.Anything). - Run(func(args mock.Arguments) { require.Fail(t, "seal was added to wrapped mempool") }). - Return(true, nil).Maybe() - added, _ = wrapper2.Add(unittest.IncorporatedResultSeal.Fixture()) - require.False(t, added) - wrappedMempool2.On("Size").Return(uint(0)) // we asserted that Clear was called on wrappedMempool - require.Equal(t, uint(0), wrapper2.Size()) + wrappedMempool.On("Clear").Return().Once() + // try to query, at this point we will detect a conflicting seal + wrapper.Get(sealA.IncorporatedResultID()) - wrappedMempool2.AssertExpectations(t) - execForkActor2.AssertExpectations(t) - }) + wrappedMempool.AssertExpectations(t) + execForkActor.AssertExpectations(t) + }, + + // This function retrieves conflicting seals from the same underlying database with a new instance of storage.DB. + func(t *testing.T, db storage.DB) { + lockManager := storage.NewTestingLockManager() + + wrappedMempool2 := &poolmock.IncorporatedResultSeals{} + execForkActor2 := &actormock.ExecForkActor{} + execForkActor2.On("OnExecFork", mock.Anything). + Run(func(args mock.Arguments) { + conflictingSeals := args.Get(0).([]*flow.IncorporatedResultSeal) + require.ElementsMatch(t, []*flow.IncorporatedResultSeal{sealA, sealB}, conflictingSeals) + }).Return().Once() + wrapper2, _ := NewExecStateForkSuppressor(wrappedMempool2, execForkActor2.OnExecFork, db, lockManager, zerolog.New(os.Stderr)) + + // add another (non-conflicting) seal to ExecForkSuppressor + // fail test if seal is added to wrapped mempool + wrappedMempool2.On("Add", mock.Anything). + Run(func(mock.Arguments) { require.Fail(t, "seal was added to wrapped mempool") }). + Return(true, nil).Maybe() + added, _ := wrapper2.Add(unittest.IncorporatedResultSeal.Fixture()) + require.False(t, added) + wrappedMempool2.On("Size").Return(uint(0)) // we asserted that Clear was called on wrappedMempool + require.Equal(t, uint(0), wrapper2.Size()) + + wrappedMempool2.AssertExpectations(t) + execForkActor2.AssertExpectations(t) + }, + ) } // Test_AddRemove_SmokeTest tests a real system of stdmap.IncorporatedResultSeals mempool @@ -312,9 +320,11 @@ func Test_AddRemove_SmokeTest(t *testing.T) { onExecFork := func([]*flow.IncorporatedResultSeal) { require.Fail(t, "no call to onExecFork expected ") } - unittest.RunWithBadgerDB(t, func(db *badger.DB) { + dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { + lockManager := storage.NewTestingLockManager() + wrappedMempool := stdmap.NewIncorporatedResultSeals(100) - wrapper, err := NewExecStateForkSuppressor(wrappedMempool, onExecFork, db, zerolog.New(os.Stderr)) + wrapper, err := NewExecStateForkSuppressor(wrappedMempool, onExecFork, db, lockManager, zerolog.New(os.Stderr)) require.NoError(t, err) require.NotNil(t, wrapper) @@ -349,7 +359,9 @@ func Test_AddRemove_SmokeTest(t *testing.T) { // ExecForkSuppressor. We wrap stdmap.IncorporatedResultSeals with consensus.IncorporatedResultSeals which is wrapped with ExecForkSuppressor. // Test adding conflicting seals with different number of matching receipts. func Test_ConflictingSeal_SmokeTest(t *testing.T) { - unittest.RunWithBadgerDB(t, func(db *badger.DB) { + dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { + lockManager := storage.NewTestingLockManager() + executingForkDetected := atomic.NewBool(false) onExecFork := func([]*flow.IncorporatedResultSeal) { executingForkDetected.Store(true) @@ -358,7 +370,7 @@ func Test_ConflictingSeal_SmokeTest(t *testing.T) { rawMempool := stdmap.NewIncorporatedResultSeals(100) receiptsDB := mockstorage.NewExecutionReceipts(t) wrappedMempool := NewIncorporatedResultSeals(rawMempool, receiptsDB) - wrapper, err := NewExecStateForkSuppressor(wrappedMempool, onExecFork, db, zerolog.New(os.Stderr)) + wrapper, err := NewExecStateForkSuppressor(wrappedMempool, onExecFork, db, lockManager, zerolog.New(os.Stderr)) require.NoError(t, err) require.NotNil(t, wrapper) @@ -366,7 +378,7 @@ func Test_ConflictingSeal_SmokeTest(t *testing.T) { // two of them are non-conflicting but for same block and one is conflicting. block := unittest.BlockFixture() - sealA := unittest.IncorporatedResultSeal.Fixture(unittest.IncorporatedResultSeal.WithResult(unittest.ExecutionResultFixture(unittest.WithBlock(&block)))) + sealA := unittest.IncorporatedResultSeal.Fixture(unittest.IncorporatedResultSeal.WithResult(unittest.ExecutionResultFixture(unittest.WithBlock(block)))) _, _ = wrapper.Add(sealA) // different seal but for same result @@ -390,7 +402,7 @@ func Test_ConflictingSeal_SmokeTest(t *testing.T) { require.ElementsMatch(t, []*flow.IncorporatedResultSeal{sealA, sealB}, seals) // add conflicting seal, which doesn't have any receipts yet - conflictingSeal := unittest.IncorporatedResultSeal.Fixture(unittest.IncorporatedResultSeal.WithResult(unittest.ExecutionResultFixture(unittest.WithBlock(&block)))) + conflictingSeal := unittest.IncorporatedResultSeal.Fixture(unittest.IncorporatedResultSeal.WithResult(unittest.ExecutionResultFixture(unittest.WithBlock(block)))) _, _ = wrapper.Add(conflictingSeal) // conflicting seal doesn't have any receipts yet @@ -419,11 +431,13 @@ func Test_ConflictingSeal_SmokeTest(t *testing.T) { // 2. wraps `wrappedMempool` in a ExecForkSuppressor // 3. ensures that initializing the wrapper did not error // 4. executes the `testLogic` -func WithExecStateForkSuppressor(t testing.TB, testLogic func(wrapper *ExecForkSuppressor, wrappedMempool *poolmock.IncorporatedResultSeals, execForkActor *actormock.ExecForkActorMock)) { - unittest.RunWithBadgerDB(t, func(db *badger.DB) { +func WithExecStateForkSuppressor(t *testing.T, testLogic func(wrapper *ExecForkSuppressor, wrappedMempool *poolmock.IncorporatedResultSeals, execForkActor *actormock.ExecForkActor)) { + dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { + lockManager := storage.NewTestingLockManager() + wrappedMempool := &poolmock.IncorporatedResultSeals{} - execForkActor := &actormock.ExecForkActorMock{} - wrapper, err := NewExecStateForkSuppressor(wrappedMempool, execForkActor.OnExecFork, db, zerolog.New(os.Stderr)) + execForkActor := &actormock.ExecForkActor{} + wrapper, err := NewExecStateForkSuppressor(wrappedMempool, execForkActor.OnExecFork, db, lockManager, zerolog.New(os.Stderr)) require.NoError(t, err) require.NotNil(t, wrapper) testLogic(wrapper, wrappedMempool, execForkActor) diff --git a/module/mempool/consensus/execution_tree.go b/module/mempool/consensus/execution_tree.go index 50e10d04095..34280e51a1b 100644 --- a/module/mempool/consensus/execution_tree.go +++ b/module/mempool/consensus/execution_tree.go @@ -6,6 +6,7 @@ import ( "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/module/forest" + "github.com/onflow/flow-go/module/irrecoverable" "github.com/onflow/flow-go/module/mempool" ) @@ -165,9 +166,8 @@ func (et *ExecutionTree) ReachableReceipts(resultID flow.Identifier, blockFilter } receipts := make([]*flow.ExecutionReceipt, 0, 10) // we expect just below 10 execution Receipts per call - receipts = et.reachableReceipts(vertex, blockFilter, receiptFilter, receipts) - return receipts, nil + return et.reachableReceipts(vertex, blockFilter, receiptFilter, receipts) } // reachableReceipts implements a depth-first search over the Execution Tree. @@ -175,15 +175,20 @@ func (et *ExecutionTree) ReachableReceipts(resultID flow.Identifier, blockFilter // For each result (vertex in the Execution Tree), which the tree search visits, the known receipts are inspected. // Receipts that pass the receiptFilter are appended to `receipts` in the order they are encountered during the // tree search. the resulting slice is returned. -func (et *ExecutionTree) reachableReceipts(vertex forest.Vertex, blockFilter mempool.BlockFilter, receiptFilter mempool.ReceiptFilter, receipts []*flow.ExecutionReceipt) []*flow.ExecutionReceipt { +// +// No errors are expected during normal operation. +func (et *ExecutionTree) reachableReceipts(vertex forest.Vertex, blockFilter mempool.BlockFilter, receiptFilter mempool.ReceiptFilter, receipts []*flow.ExecutionReceipt) ([]*flow.ExecutionReceipt, error) { receiptsForResult := vertex.(*ReceiptsOfSameResult) if !blockFilter(receiptsForResult.blockHeader) { - return receipts + return receipts, nil } // add all Execution Receipts for result to `receipts` provided they pass the receiptFilter for _, recMeta := range receiptsForResult.receipts { - receipt := flow.ExecutionReceiptFromMeta(*recMeta, *receiptsForResult.result) + receipt, err := flow.ExecutionReceiptFromStub(*recMeta, *receiptsForResult.result) + if err != nil { + return nil, irrecoverable.NewExceptionf("could not create execution receipt from stub: %w", err) + } if !receiptFilter(receipt) { continue } @@ -192,11 +197,15 @@ func (et *ExecutionTree) reachableReceipts(vertex forest.Vertex, blockFilter mem // travers down the tree in a deep-first-search manner children := et.forest.GetChildren(vertex.VertexID()) + var err error for children.HasNext() { child := children.NextVertex() - receipts = et.reachableReceipts(child, blockFilter, receiptFilter, receipts) + receipts, err = et.reachableReceipts(child, blockFilter, receiptFilter, receipts) + if err != nil { + return nil, irrecoverable.NewExceptionf("failed to find execution receipts over the execution tree: %w", err) + } } - return receipts + return receipts, nil } // PruneUpToHeight prunes all results for all blocks with height up to but diff --git a/module/mempool/consensus/execution_tree_test.go b/module/mempool/consensus/execution_tree_test.go index 13ba4fa4761..128728ad446 100644 --- a/module/mempool/consensus/execution_tree_test.go +++ b/module/mempool/consensus/execution_tree_test.go @@ -64,18 +64,17 @@ func (et *ExecutionTreeTestSuite) createExecutionTree() (map[string]*flow.Block, // Make blocks blocks := make(map[string]*flow.Block) - blocks["A10"] = makeBlockWithHeight(10) - blocks["A11"] = makeChildBlock(blocks["A10"]) + blocks["A10"] = unittest.BlockFixture(unittest.Block.WithHeight(10)) + blocks["A11"] = unittest.BlockWithParentFixture(blocks["A10"].ToHeader()) + blocks["B10"] = unittest.BlockFixture(unittest.Block.WithHeight(10)) + blocks["B11"] = unittest.BlockWithParentFixture(blocks["B10"].ToHeader()) + blocks["B12"] = unittest.BlockWithParentFixture(blocks["B11"].ToHeader()) - blocks["B10"] = makeBlockWithHeight(10) - blocks["B11"] = makeChildBlock(blocks["B10"]) - blocks["B12"] = makeChildBlock(blocks["B11"]) + blocks["C11"] = unittest.BlockWithParentFixture(blocks["B10"].ToHeader()) + blocks["C12"] = unittest.BlockWithParentFixture(blocks["C11"].ToHeader()) + blocks["C13"] = unittest.BlockWithParentFixture(blocks["C12"].ToHeader()) - blocks["C11"] = makeChildBlock(blocks["B10"]) - blocks["C12"] = makeChildBlock(blocks["C11"]) - blocks["C13"] = makeChildBlock(blocks["C12"]) - - blocks["D13"] = makeBlockWithHeight(13) + blocks["D13"] = unittest.BlockFixture(unittest.Block.WithHeight(13)) // Make Results results := make(map[string]*flow.ExecutionResult) @@ -124,7 +123,7 @@ func (et *ExecutionTreeTestSuite) addReceipts2ReceiptsForest(receipts map[string } for name, rcpt := range receipts { block := blockById[rcpt.ExecutionResult.BlockID] - _, err := et.Forest.AddReceipt(rcpt, block.Header) + _, err := et.Forest.AddReceipt(rcpt, block.ToHeader()) if err != nil { et.FailNow("failed to add receipt '%s'", name) } @@ -141,23 +140,23 @@ func (et *ExecutionTreeTestSuite) Test_Initialization() { // Receipts that are already included in the fork should be skipped. func (et *ExecutionTreeTestSuite) Test_AddReceipt() { block := unittest.BlockFixture() - receipt := unittest.ReceiptForBlockFixture(&block) + receipt := unittest.ReceiptForBlockFixture(block) // add should succeed and increase size - added, err := et.Forest.AddReceipt(receipt, block.Header) + added, err := et.Forest.AddReceipt(receipt, block.ToHeader()) assert.NoError(et.T(), err) assert.True(et.T(), added) assert.Equal(et.T(), uint(1), et.Forest.Size()) // adding different receipt for same result receipt2 := unittest.ExecutionReceiptFixture(unittest.WithResult(&receipt.ExecutionResult)) - added, err = et.Forest.AddReceipt(receipt2, block.Header) + added, err = et.Forest.AddReceipt(receipt2, block.ToHeader()) assert.NoError(et.T(), err) assert.True(et.T(), added) assert.Equal(et.T(), uint(2), et.Forest.Size()) // repeated addition should be idempotent - added, err = et.Forest.AddReceipt(receipt, block.Header) + added, err = et.Forest.AddReceipt(receipt, block.ToHeader()) assert.NoError(et.T(), err) assert.False(et.T(), added) assert.Equal(et.T(), uint(2), et.Forest.Size()) @@ -167,10 +166,12 @@ func (et *ExecutionTreeTestSuite) Test_AddReceipt() { // an Execution Receipt. Here, we add a result for a completely detached block. Starting a tree search // from this result should not yield any receipts. func (et *ExecutionTreeTestSuite) Test_AddResult_Detached() { - miscBlock := makeBlockWithHeight(101) + miscBlock := unittest.BlockFixture( + unittest.Block.WithHeight(101), + ) miscResult := unittest.ExecutionResultFixture(unittest.WithBlock(miscBlock)) - err := et.Forest.AddResult(miscResult, miscBlock.Header) + err := et.Forest.AddResult(miscResult, miscBlock.ToHeader()) assert.NoError(et.T(), err) collectedReceipts, err := et.Forest.ReachableReceipts(miscResult.ID(), anyBlock(), anyReceipt()) assert.NoError(et.T(), err) @@ -201,7 +202,7 @@ func (et *ExecutionTreeTestSuite) Test_AddResult_Bridge() { et.Assert().True(reflect.DeepEqual(expected, et.receiptSet(collectedReceipts, receipts))) // after we added r[C12], tree search should reach r[C13] and hence include the corresponding receipt ER[r[C13]] - err = et.Forest.AddResult(results["r[C12]"], blocks["C12"].Header) + err = et.Forest.AddResult(results["r[C12]"], blocks["C12"].ToHeader()) assert.NoError(et.T(), err) collectedReceipts, err = et.Forest.ReachableReceipts(results["r[B10]"].ID(), blockFilter, anyReceipt()) assert.NoError(et.T(), err) @@ -381,16 +382,6 @@ func anyReceipt() mempool.ReceiptFilter { return func(*flow.ExecutionReceipt) bool { return true } } -func makeBlockWithHeight(height uint64) *flow.Block { - block := unittest.BlockFixture() - block.Header.Height = height - return &block -} - -func makeChildBlock(parent *flow.Block) *flow.Block { - return unittest.BlockWithParentFixture(parent.Header) -} - func (et *ExecutionTreeTestSuite) receiptSet(selected []*flow.ExecutionReceipt, receipts map[string]*flow.ExecutionReceipt) map[string]struct{} { id2Name := make(map[flow.Identifier]string) for name, rcpt := range receipts { diff --git a/module/mempool/consensus/incorporated_result_seals.go b/module/mempool/consensus/incorporated_result_seals.go index 88069d5a68b..575405116cd 100644 --- a/module/mempool/consensus/incorporated_result_seals.go +++ b/module/mempool/consensus/incorporated_result_seals.go @@ -67,9 +67,9 @@ func (ir *IncorporatedResultSeals) resultHasMultipleReceipts(incorporatedResult return receiptsForIncorporatedResults.GroupByExecutorID().NumberGroups() >= 2 } -// ByID gets an IncorporatedResultSeal by IncorporatedResult ID -func (ir *IncorporatedResultSeals) ByID(id flow.Identifier) (*flow.IncorporatedResultSeal, bool) { - seal, ok := ir.seals.ByID(id) +// Get gets an IncorporatedResultSeal by IncorporatedResult ID +func (ir *IncorporatedResultSeals) Get(id flow.Identifier) (*flow.IncorporatedResultSeal, bool) { + seal, ok := ir.seals.Get(id) if !ok { return nil, false } diff --git a/module/mempool/consensus/mock/exec_fork_actor.go b/module/mempool/consensus/mock/exec_fork_actor.go index ae567dd9e7c..5f18410cfc7 100644 --- a/module/mempool/consensus/mock/exec_fork_actor.go +++ b/module/mempool/consensus/mock/exec_fork_actor.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mock @@ -7,24 +7,23 @@ import ( mock "github.com/stretchr/testify/mock" ) -// ExecForkActorMock is an autogenerated mock type for the ExecForkActor type -type ExecForkActorMock struct { +// ExecForkActor is an autogenerated mock type for the ExecForkActor type +type ExecForkActor struct { mock.Mock } // OnExecFork provides a mock function with given fields: _a0 -func (_m *ExecForkActorMock) OnExecFork(_a0 []*flow.IncorporatedResultSeal) { +func (_m *ExecForkActor) OnExecFork(_a0 []*flow.IncorporatedResultSeal) { _m.Called(_a0) } -type mockConstructorTestingTNewExecForkActorMock interface { +// NewExecForkActor creates a new instance of ExecForkActor. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewExecForkActor(t interface { mock.TestingT Cleanup(func()) -} - -// NewExecForkActorMock creates a new instance of ExecForkActorMock. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewExecForkActorMock(t mockConstructorTestingTNewExecForkActorMock) *ExecForkActorMock { - mock := &ExecForkActorMock{} +}) *ExecForkActor { + mock := &ExecForkActor{} mock.Mock.Test(t) t.Cleanup(func() { mock.AssertExpectations(t) }) diff --git a/module/mempool/consensus/mock/mock_actor.go b/module/mempool/consensus/mock_interfaces/mock_actor.go similarity index 100% rename from module/mempool/consensus/mock/mock_actor.go rename to module/mempool/consensus/mock_interfaces/mock_actor.go diff --git a/module/mempool/consensus/receipt_equivalence_class.go b/module/mempool/consensus/receipt_equivalence_class.go index 6b428e2dac5..c112de94459 100644 --- a/module/mempool/consensus/receipt_equivalence_class.go +++ b/module/mempool/consensus/receipt_equivalence_class.go @@ -14,7 +14,7 @@ import ( // Execution Receipts. // Implements LevelledForest's Vertex interface. type ReceiptsOfSameResult struct { - receipts map[flow.Identifier]*flow.ExecutionReceiptMeta // map from ExecutionReceipt.ID -> ExecutionReceiptMeta + receipts map[flow.Identifier]*flow.ExecutionReceiptStub // map from ExecutionReceipt.ID -> ExecutionReceiptStub result *flow.ExecutionResult resultID flow.Identifier // precomputed ID of result to avoid expensive hashing on each call blockHeader *flow.Header // header of the block which the result is for @@ -28,7 +28,7 @@ func NewReceiptsOfSameResult(result *flow.ExecutionResult, block *flow.Header) ( } // construct ReceiptsOfSameResult only containing initialReceipt - rcpts := make(map[flow.Identifier]*flow.ExecutionReceiptMeta) + rcpts := make(map[flow.Identifier]*flow.ExecutionReceiptStub) rs := &ReceiptsOfSameResult{ receipts: rcpts, result: result, @@ -52,7 +52,7 @@ func (rsr *ReceiptsOfSameResult) AddReceipt(receipt *flow.ExecutionReceipt) (uin if rsr.Has(receiptID) { return 0, nil } - rsr.receipts[receipt.ID()] = receipt.Meta() + rsr.receipts[receiptID] = receipt.Stub() return 1, nil } diff --git a/module/mempool/entity/executableblock.go b/module/mempool/entity/executableblock.go index 3c80e801d3c..49777337590 100644 --- a/module/mempool/entity/executableblock.go +++ b/module/mempool/entity/executableblock.go @@ -4,83 +4,59 @@ import ( "github.com/onflow/flow-go/model/flow" ) -// A complete collection contains the guarantee and the transactions. +// CompleteCollection contains the guarantee and the transactions. // the guarantee is the hash of all the transactions. The execution node // receives the guarantee from the block, and queries the transactions by // the guarantee from the collection node. // when receiving a collection from collection node, the execution node will -// update the Transactions field of a CompleteCollection and make it complete. +// update the Collection field of a CompleteCollection and make it complete. type CompleteCollection struct { - Guarantee *flow.CollectionGuarantee - Transactions []*flow.TransactionBody + Guarantee *flow.CollectionGuarantee + Collection *flow.Collection } // ExecutableBlock represents a block that can be executed by the VM // -// It assumes that the Block attached is immutable, so take care in not modifying or changing the inner +// It assumes that the attached Block is immutable, so take care in not modifying or changing the inner // *flow.Block, otherwise the struct will be in an inconsistent state. It requires the Block is immutable -// because the it lazy lodas the Block.ID() into the private id field, on the first call to ExecutableBlock.ID() -// All future calls to ID will not call Block.ID(), therefore it Block changes, the id will not match the Block. +// because it lazy loads the Block.ID() into the private blockID field, on the first call to ExecutableBlock.BlockID() +// All future calls to BlockID will not call Block.ID(), therefore if the Block changes, the blockID will not match the Block. type ExecutableBlock struct { - id flow.Identifier + blockID flow.Identifier Block *flow.Block CompleteCollections map[flow.Identifier]*CompleteCollection // key is the collection ID. StartState *flow.StateCommitment Executing bool // flag used to indicate if block is being executed, to avoid re-execution } -// BlocksByCollection represents a collection that the execution node. -// has not received its transactions yet. -// it also holds references to the blocks that contains this collection -// and are waiting to be executed. -type BlocksByCollection struct { - CollectionID flow.Identifier - // a reversed map to look up which block contains this collection. key is the collection id - ExecutableBlocks map[flow.Identifier]*ExecutableBlock -} - -func (c CompleteCollection) Collection() flow.Collection { - return flow.Collection{Transactions: c.Transactions} -} - +// IsCompleted returns true if the collection has been retrieved from the network. +// This function assumes that the collection is non-empty and that collections are retrieved either in full or not at all. func (c CompleteCollection) IsCompleted() bool { - return len(c.Transactions) > 0 -} - -func (b *BlocksByCollection) ID() flow.Identifier { - return b.CollectionID -} - -func (b *BlocksByCollection) Checksum() flow.Identifier { - return b.CollectionID + return c.Collection != nil && len(c.Collection.Transactions) > 0 } -// ID lazy loads the Block.ID() into the private id field on the first call, and returns +// BlockID lazy loads the Block.ID() into the private blockID field on the first call, and returns // the id field in all future calls -func (b *ExecutableBlock) ID() flow.Identifier { - if b.id == flow.ZeroID { - b.id = b.Block.ID() +func (b *ExecutableBlock) BlockID() flow.Identifier { + if b.blockID == flow.ZeroID { + b.blockID = b.Block.ID() } - return b.id -} - -func (b *ExecutableBlock) Checksum() flow.Identifier { - return b.Block.Checksum() + return b.blockID } func (b *ExecutableBlock) Height() uint64 { - return b.Block.Header.Height + return b.Block.Height } func (b *ExecutableBlock) ParentID() flow.Identifier { - return b.Block.Header.ParentID + return b.Block.ParentID } func (b *ExecutableBlock) Collections() []*CompleteCollection { collections := make([]*CompleteCollection, len(b.Block.Payload.Guarantees)) for i, cg := range b.Block.Payload.Guarantees { - collections[i] = b.CompleteCollections[cg.ID()] + collections[i] = b.CompleteCollections[cg.CollectionID] } return collections @@ -92,7 +68,7 @@ func (b *ExecutableBlock) CompleteCollectionAt(index int) *CompleteCollection { if index < 0 || index >= len(b.Block.Payload.Guarantees) { return nil } - return b.CompleteCollections[b.Block.Payload.Guarantees[index].ID()] + return b.CompleteCollections[b.Block.Payload.Guarantees[index].CollectionID] } // CollectionAt returns a collection at the given index, @@ -102,7 +78,7 @@ func (b *ExecutableBlock) CollectionAt(index int) *flow.Collection { if cc == nil { return nil } - return &flow.Collection{Transactions: cc.Transactions} + return cc.Collection } // HasAllTransactions returns whether all the transactions for all collections @@ -110,7 +86,7 @@ func (b *ExecutableBlock) CollectionAt(index int) *flow.Collection { func (b *ExecutableBlock) HasAllTransactions() bool { for _, collection := range b.Block.Payload.Guarantees { - completeCollection, ok := b.CompleteCollections[collection.ID()] + completeCollection, ok := b.CompleteCollections[collection.CollectionID] if ok && completeCollection.IsCompleted() { continue } diff --git a/module/mempool/epochs/transactions_test.go b/module/mempool/epochs/transactions_test.go index 9d8b3e92df6..07bd798d9c8 100644 --- a/module/mempool/epochs/transactions_test.go +++ b/module/mempool/epochs/transactions_test.go @@ -54,7 +54,7 @@ func TestMultipleEpochs(t *testing.T) { tx := unittest.TransactionBodyFixture() transactions = append(transactions, &tx) - pool.Add(&tx) + pool.Add(tx.ID(), &tx) } }() } @@ -76,7 +76,7 @@ func TestCombinedSize(t *testing.T) { pool := pools.ForEpoch(epoch) for i := 0; i < int(transactionsPerEpoch); i++ { next := unittest.TransactionBodyFixture() - pool.Add(&next) + pool.Add(next.ID(), &next) } } diff --git a/module/mempool/errors.go b/module/mempool/errors.go index 8c7ffbc3632..1dec04c84f8 100644 --- a/module/mempool/errors.go +++ b/module/mempool/errors.go @@ -10,10 +10,6 @@ type UnknownExecutionResultError struct { err error } -func NewUnknownExecutionResultError(msg string) error { - return NewUnknownExecutionResultErrorf(msg) -} - func NewUnknownExecutionResultErrorf(msg string, args ...interface{}) error { return UnknownExecutionResultError{ err: fmt.Errorf(msg, args...), diff --git a/module/mempool/execution_data.go b/module/mempool/execution_data.go new file mode 100644 index 00000000000..e11a4eb5de6 --- /dev/null +++ b/module/mempool/execution_data.go @@ -0,0 +1,9 @@ +package mempool + +import ( + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module/executiondatasync/execution_data" +) + +// ExecutionData represents a concurrency-safe memory pool for BlockExecutionData. +type ExecutionData Mempool[flow.Identifier, *execution_data.BlockExecutionDataEntity] diff --git a/module/mempool/execution_tree.go b/module/mempool/execution_tree.go index 14fa0ff6707..76e46b8d039 100644 --- a/module/mempool/execution_tree.go +++ b/module/mempool/execution_tree.go @@ -1,5 +1,3 @@ -// (c) 2019 Dapper Labs - ALL RIGHTS RESERVED - package mempool import ( diff --git a/module/mempool/guarantees.go b/module/mempool/guarantees.go index a6ff0560a4b..0c0731cd2db 100644 --- a/module/mempool/guarantees.go +++ b/module/mempool/guarantees.go @@ -1,5 +1,3 @@ -// (c) 2019 Dapper Labs - ALL RIGHTS RESERVED - package mempool import ( @@ -7,28 +5,4 @@ import ( ) // Guarantees represents a concurrency-safe memory pool for collection guarantees. -type Guarantees interface { - - // Has checks whether the collection guarantee with the given hash is - // currently in the memory pool. - Has(collID flow.Identifier) bool - - // Add will add the given collection guarantee to the memory pool. It will - // return false if it was already in the mempool. - Add(guarantee *flow.CollectionGuarantee) bool - - // Remove will remove the given collection guarantees from the memory pool; it - // will return true if the collection guarantees was known and removed. - Remove(collID flow.Identifier) bool - - // ByID retrieve the collection guarantee with the given ID from the memory - // pool. It will return false if it was not found in the mempool. - ByID(collID flow.Identifier) (*flow.CollectionGuarantee, bool) - - // Size will return the current size of the memory pool. - Size() uint - - // All will retrieve all collection guarantees that are currently in the memory pool - // as a slice. - All() []*flow.CollectionGuarantee -} +type Guarantees Mempool[flow.Identifier, *flow.CollectionGuarantee] diff --git a/module/mempool/herocache/backdata/cache.go b/module/mempool/herocache/backdata/cache.go index 1c7956fd578..cf6c3843b38 100644 --- a/module/mempool/herocache/backdata/cache.go +++ b/module/mempool/herocache/backdata/cache.go @@ -5,10 +5,11 @@ import ( "time" _ "unsafe" // for linking runtimeNano + "github.com/onflow/flow-go/model/flow" + "github.com/rs/zerolog" "go.uber.org/atomic" - "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/module" "github.com/onflow/flow-go/module/mempool/herocache/backdata/heropool" "github.com/onflow/flow-go/utils/logging" @@ -41,16 +42,16 @@ type bucketIndex uint64 // slotIndex is data type representing a slot index in a bucket. type slotIndex uint64 -// sha32of256 is a 32-bits prefix flow.Identifier used to determine the bucketIndex of the entity +// sha32of256 is a 32-bits prefix flow.Identifier used to determine the bucketIndex of the value // it represents. type sha32of256 uint32 -// slot is an internal notion corresponding to the identifier of an entity that is +// slot is an internal notion corresponding to the identifier of an value that is // meant to be stored in this Cache. type slot struct { - slotAge uint64 // age of this slot. - entityIndex heropool.EIndex // link to actual entity. - entityId32of256 sha32of256 // the 32-bits prefix of entity identifier. + slotAge uint64 // age of this slot. + valueIndex heropool.EIndex // link to actual value. + valueId32of256 sha32of256 // the 32-bits prefix of value identifier. } // slotBucket represents a bucket of slots. @@ -59,7 +60,8 @@ type slotBucket struct { } // Cache implements an array-based generic memory pool backed by a fixed total array. -type Cache struct { +// Note that this implementation is NOT thread-safe, and the higher-level Backend is responsible for concurrency management. +type Cache[V any] struct { logger zerolog.Logger collector module.HeroCacheMetrics // NOTE: as a BackData implementation, Cache must be non-blocking. @@ -68,10 +70,10 @@ type Cache struct { slotCount uint64 // total number of non-expired key-values bucketNum uint64 // total number of buckets (i.e., total of buckets) ejectionMode heropool.EjectionMode - // buckets keeps the slots (i.e., entityId) of the (entityId, entity) pairs that are maintained in this BackData. + // buckets keeps the slots (i.e., valueId) of the (key, value) pairs that are maintained in this BackData. buckets []slotBucket - // entities keeps the values (i.e., entity) of the (entityId, entity) pairs that are maintained in this BackData. - entities *heropool.Pool + // entities keeps the values (i.e., entity) of the (key, value) pairs that are maintained in this BackData. + entities *heropool.Pool[flow.Identifier, V] // telemetry // // availableSlotHistogram[i] represents number of buckets with i @@ -87,7 +89,7 @@ type Cache struct { // Its purpose is to manage the speed at which telemetry logs are printed. lastTelemetryDump *atomic.Int64 // tracer reports ejection events, initially nil but can be injection using CacheOpt - tracer Tracer + tracer Tracer[V] } // DefaultOversizeFactor determines the default oversizing factor of HeroCache. @@ -108,29 +110,36 @@ type Cache struct { // The default overSizeFactor factor is different in the package code because slotsPerBucket is > 3. const DefaultOversizeFactor = uint32(8) -func NewCache(sizeLimit uint32, +func NewCache[V any]( + sizeLimit uint32, oversizeFactor uint32, ejectionMode heropool.EjectionMode, logger zerolog.Logger, collector module.HeroCacheMetrics, - opts ...CacheOpt) *Cache { + opts ...CacheOpt[V], +) *Cache[V] { // total buckets. capacity := uint64(sizeLimit * oversizeFactor) bucketNum := capacity / slotsPerBucket + if bucketNum == 0 { + // we panic here because we don't want to continue with a zero bucketNum (it can cause a DoS attack). + panic("bucketNum cannot be zero, choose a bigger sizeLimit or a smaller oversizeFactor") + } + if capacity%slotsPerBucket != 0 { // accounting for remainder. bucketNum++ } - bd := &Cache{ + bd := &Cache[V]{ logger: logger, collector: collector, bucketNum: bucketNum, sizeLimit: sizeLimit, buckets: make([]slotBucket, bucketNum), ejectionMode: ejectionMode, - entities: heropool.NewHeroPool(sizeLimit, ejectionMode), + entities: heropool.NewHeroPool[flow.Identifier, V](sizeLimit, ejectionMode, logger), availableSlotHistogram: make([]uint64, slotsPerBucket+1), // +1 is to account for empty buckets as well. interactionCounter: atomic.NewUint64(0), lastTelemetryDump: atomic.NewInt64(0), @@ -144,167 +153,195 @@ func NewCache(sizeLimit uint32, return bd } -// Has checks if backdata already contains the entity with the given identifier. -func (c *Cache) Has(entityID flow.Identifier) bool { +// Has checks if backdata already contains the value with the given identifier. +func (c *Cache[V]) Has(key flow.Identifier) bool { defer c.logTelemetry() - _, _, _, ok := c.get(entityID) + _, _, _, ok := c.get(key) return ok } -// Add adds the given entity to the backdata and returns true if the entity was added or false if -// a valid entity already exists for the provided ID. -func (c *Cache) Add(entityID flow.Identifier, entity flow.Entity) bool { +// Add adds the given value to the backdata and returns true if the value was added or false if +// a valid value already exists for the provided ID. +func (c *Cache[V]) Add(key flow.Identifier, value V) bool { defer c.logTelemetry() - return c.put(entityID, entity) + return c.put(key, value) } -// Remove removes the entity with the given identifier and returns the removed entity and true if -// the entity was removed or false if the entity was not found. -func (c *Cache) Remove(entityID flow.Identifier) (flow.Entity, bool) { +// Remove removes the value with the given identifier and returns the removed value and true if +// the value was removed or false if the value was not found. +func (c *Cache[V]) Remove(key flow.Identifier) (value V, ok bool) { defer c.logTelemetry() - entity, bucketIndex, sliceIndex, exists := c.get(entityID) + value, bucketIndex, sliceIndex, exists := c.get(key) if !exists { - return nil, false + return value, false } // removes value from underlying entities list. - c.invalidateEntity(bucketIndex, sliceIndex) + c.invalidateValue(bucketIndex, sliceIndex) // frees up slot c.unuseSlot(bucketIndex, sliceIndex) c.collector.OnKeyRemoved(c.entities.Size()) - return entity, true + return value, true } -// Adjust adjusts the entity using the given function if the given identifier can be found. -// Returns a bool which indicates whether the entity was updated as well as the updated entity. -func (c *Cache) Adjust(entityID flow.Identifier, f func(flow.Entity) flow.Entity) (flow.Entity, bool) { +// Adjust adjusts the value using the given function if the given identifier can be found. +// Returns a bool which indicates whether the value was updated as well as the updated value. +func (c *Cache[V]) Adjust(key flow.Identifier, f func(V) V) (V, bool) { defer c.logTelemetry() - entity, removed := c.Remove(entityID) - if !removed { - return nil, false + // locate the slot + value, b, s, ok := c.get(key) + if !ok { + return value, false } - newEntity := f(entity) - newEntityID := newEntity.ID() + // compute the new value + newValue := f(value) + + // bump its age (so evictions still see this as “recent”) + c.slotCount++ + c.buckets[b].slots[s].slotAge = c.slotCount - c.put(newEntityID, newEntity) + // update in the underlying pool, in-place + idx := c.buckets[b].slots[s].valueIndex + c.entities.UpdateAtIndex(idx, newValue) - return newEntity, true + // and refresh its LRU position + c.entities.Touch(idx) + + return newValue, true +} + +// AdjustWithInit adjusts the value using the given function if the given identifier can be found. When the +// value is not found, it initializes the value using the given init function and then applies the adjust function. +// Args: +// - key: the identifier of the value to adjust. +// - adjust: the function that adjusts the value. +// - init: the function that initializes the value when it is not found. +// Returns: +// - the adjusted value. +// +// - a bool which indicates whether the value was adjusted. +func (c *Cache[V]) AdjustWithInit(key flow.Identifier, adjust func(V) V, init func() V) (V, bool) { + defer c.logTelemetry() + + if c.Has(key) { + return c.Adjust(key, adjust) + } + c.put(key, init()) + return c.Adjust(key, adjust) } -// ByID returns the given entity from the backdata. -func (c *Cache) ByID(entityID flow.Identifier) (flow.Entity, bool) { +// Get returns the given value from the backdata. +func (c *Cache[V]) Get(key flow.Identifier) (V, bool) { defer c.logTelemetry() - entity, _, _, ok := c.get(entityID) - return entity, ok + value, _, _, ok := c.get(key) + return value, ok } -// Size returns the size of the backdata, i.e., total number of stored (entityId, entity) pairs. -func (c Cache) Size() uint { +// Size returns the size of the backdata, i.e., total number of stored (key, value) pairs. +func (c *Cache[V]) Size() uint { defer c.logTelemetry() return uint(c.entities.Size()) } -// Head returns the head of queue. +// Head returns the head key and value of queue. // Boolean return value determines whether there is a head available. -func (c Cache) Head() (flow.Entity, bool) { +func (c *Cache[V]) Head() (flow.Identifier, V, bool) { return c.entities.Head() } // All returns all entities stored in the backdata. -func (c Cache) All() map[flow.Identifier]flow.Entity { +func (c *Cache[V]) All() map[flow.Identifier]V { defer c.logTelemetry() entitiesList := c.entities.All() - all := make(map[flow.Identifier]flow.Entity, len(c.entities.All())) + all := make(map[flow.Identifier]V, len(entitiesList)) - total := len(entitiesList) - for i := 0; i < total; i++ { - p := entitiesList[i] + for _, p := range entitiesList { all[p.Id()] = p.Entity() } return all } -// Identifiers returns the list of identifiers of entities stored in the backdata. -func (c Cache) Identifiers() flow.IdentifierList { +// Keys returns the list of identifiers of entities stored in the backdata. +func (c *Cache[V]) Keys() []flow.Identifier { defer c.logTelemetry() - ids := make(flow.IdentifierList, c.entities.Size()) - for i, p := range c.entities.All() { - ids[i] = p.Id() + ids := make(flow.IdentifierList, 0, c.entities.Size()) + for _, p := range c.entities.All() { + ids = append(ids, p.Id()) } return ids } -// Entities returns the list of entities stored in the backdata. -func (c Cache) Entities() []flow.Entity { +// Values returns the list of entities stored in the backdata. +func (c *Cache[V]) Values() []V { defer c.logTelemetry() - entities := make([]flow.Entity, c.entities.Size()) - for i, p := range c.entities.All() { - entities[i] = p.Entity() + entities := make([]V, 0, c.entities.Size()) + for _, p := range c.entities.All() { + entities = append(entities, p.Entity()) } return entities } // Clear removes all entities from the backdata. -func (c *Cache) Clear() { +func (c *Cache[V]) Clear() { defer c.logTelemetry() c.buckets = make([]slotBucket, c.bucketNum) - c.entities = heropool.NewHeroPool(c.sizeLimit, c.ejectionMode) + c.entities = heropool.NewHeroPool[flow.Identifier, V](c.sizeLimit, c.ejectionMode, c.logger) c.availableSlotHistogram = make([]uint64, slotsPerBucket+1) c.interactionCounter = atomic.NewUint64(0) c.lastTelemetryDump = atomic.NewInt64(0) c.slotCount = 0 } -// put writes the (entityId, entity) pair into this BackData. Boolean return value +// put writes the (key, value) pair into this BackData. Boolean return value // determines whether the write operation was successful. A write operation fails when there is already // a duplicate entityId exists in the BackData, and that entityId is linked to a valid entity. -func (c *Cache) put(entityId flow.Identifier, entity flow.Entity) bool { +func (c *Cache[V]) put(key flow.Identifier, value V) bool { c.collector.OnKeyPutAttempt(c.entities.Size()) - entityId32of256, b := c.entityId32of256AndBucketIndex(entityId) - slotToUse, unique := c.slotIndexInBucket(b, entityId32of256, entityId) + entityId32of256, b := c.entityId32of256AndBucketIndex(key) + slotToUse, unique := c.slotIndexInBucket(b, entityId32of256, key) if !unique { // entityId already exists c.collector.OnKeyPutDeduplicated() return false } - if linkedId, _, ok := c.linkedEntityOf(b, slotToUse); ok { + if linkedId, _, ok := c.linkedValueOf(b, slotToUse); ok { // bucket is full, and we are replacing an already linked (but old) slot that has a valid value, hence // we should remove its value from underlying entities list. - ejectedEntity := c.invalidateEntity(b, slotToUse) + ejectedEntity := c.invalidateValue(b, slotToUse) if c.tracer != nil { c.tracer.EntityEjectionDueToEmergency(ejectedEntity) } c.collector.OnEntityEjectionDueToEmergency() c.logger.Warn(). Hex("replaced_entity_id", logging.ID(linkedId)). - Hex("added_entity_id", logging.ID(entityId)). + Hex("added_entity_id", logging.ID(key)). Msg("emergency ejection, adding entity to cache resulted in replacing a valid key, potential collision") } c.slotCount++ - entityIndex, slotAvailable, ejectedEntity := c.entities.Add(entityId, entity, c.ownerIndexOf(b, slotToUse)) + entityIndex, slotAvailable, ejectedEntity, wasEjected := c.entities.Add(key, value, c.ownerIndexOf(b, slotToUse)) if !slotAvailable { c.collector.OnKeyPutDrop() return false } - if ejectedEntity != nil { + if wasEjected { // cache is at its full size and ejection happened to make room for this new entity. if c.tracer != nil { c.tracer.EntityEjectionDueToFullCapacity(ejectedEntity) @@ -313,44 +350,44 @@ func (c *Cache) put(entityId flow.Identifier, entity flow.Entity) bool { } c.buckets[b].slots[slotToUse].slotAge = c.slotCount - c.buckets[b].slots[slotToUse].entityIndex = entityIndex - c.buckets[b].slots[slotToUse].entityId32of256 = entityId32of256 + c.buckets[b].slots[slotToUse].valueIndex = entityIndex + c.buckets[b].slots[slotToUse].valueId32of256 = entityId32of256 c.collector.OnKeyPutSuccess(c.entities.Size()) return true } -// get retrieves the entity corresponding to given identifier from underlying entities list. -// The boolean return value determines whether an entity with given id exists in the BackData. -func (c *Cache) get(entityID flow.Identifier) (flow.Entity, bucketIndex, slotIndex, bool) { - entityId32of256, b := c.entityId32of256AndBucketIndex(entityID) +// get retrieves the value corresponding to given identifier from underlying entities list. +// The boolean return value determines whether an value with given id exists in the BackData. +func (c *Cache[V]) get(key flow.Identifier) (value V, bckIndex bucketIndex, sltIndex slotIndex, ok bool) { + entityId32of256, b := c.entityId32of256AndBucketIndex(key) for s := slotIndex(0); s < slotIndex(slotsPerBucket); s++ { - if c.buckets[b].slots[s].entityId32of256 != entityId32of256 { + if c.buckets[b].slots[s].valueId32of256 != entityId32of256 { continue } - id, entity, linked := c.linkedEntityOf(b, s) + id, linkedValue, linked := c.linkedValueOf(b, s) if !linked { // no linked entity for this (bucketIndex, slotIndex) pair. c.collector.OnKeyGetFailure() - return nil, 0, 0, false + return value, 0, 0, false } - if id != entityID { + if id != key { // checking identifiers fully. continue } c.collector.OnKeyGetSuccess() - return entity, b, s, true + return linkedValue, b, s, true } c.collector.OnKeyGetFailure() - return nil, 0, 0, false + return value, 0, 0, false } // entityId32of256AndBucketIndex determines the id prefix as well as the bucket index corresponding to the // given identifier. -func (c Cache) entityId32of256AndBucketIndex(id flow.Identifier) (sha32of256, bucketIndex) { +func (c *Cache[V]) entityId32of256AndBucketIndex(id flow.Identifier) (sha32of256, bucketIndex) { // uint64(id[0:8]) used to compute bucket index for which this identifier belongs to b := binary.LittleEndian.Uint64(id[0:8]) % c.bucketNum @@ -361,7 +398,7 @@ func (c Cache) entityId32of256AndBucketIndex(id flow.Identifier) (sha32of256, bu } // expiryThreshold returns the threshold for which all slots with index below threshold are considered old enough for eviction. -func (c Cache) expiryThreshold() uint64 { +func (c *Cache[V]) expiryThreshold() uint64 { var expiryThreshold uint64 = 0 if c.slotCount > uint64(c.sizeLimit) { // total number of slots written are above the predefined limit @@ -373,7 +410,7 @@ func (c Cache) expiryThreshold() uint64 { // slotIndexInBucket returns a free slot for this entityId in the bucket. In case the bucket is full, it invalidates the oldest valid slot, // and returns its index as free slot. It returns false if the entityId already exists in this bucket. -func (c *Cache) slotIndexInBucket(b bucketIndex, slotId sha32of256, entityId flow.Identifier) (slotIndex, bool) { +func (c *Cache[V]) slotIndexInBucket(b bucketIndex, slotId sha32of256, entityId flow.Identifier) (slotIndex, bool) { slotToUse := slotIndex(0) expiryThreshold := c.expiryThreshold() availableSlotCount := uint64(0) // for telemetry logs. @@ -393,14 +430,14 @@ func (c *Cache) slotIndexInBucket(b bucketIndex, slotId sha32of256, entityId flo continue } - if c.buckets[b].slots[s].entityId32of256 != slotId { + if c.buckets[b].slots[s].valueId32of256 != slotId { // slot id is distinct and fresh, and hence move to next slot. continue } - id, _, linked := c.linkedEntityOf(b, s) + id, _, linked := c.linkedValueOf(b, s) if !linked { - // slot is not linked to a valid entity, hence, can be used + // slot is not linked to a valid value, hence, can be used // as an available slot. availableSlotCount++ slotToUse = s @@ -413,7 +450,7 @@ func (c *Cache) slotIndexInBucket(b bucketIndex, slotId sha32of256, entityId flo continue } - // entity ID already exists in the bucket + // value ID already exists in the bucket return 0, false } @@ -425,34 +462,35 @@ func (c *Cache) slotIndexInBucket(b bucketIndex, slotId sha32of256, entityId flo // ownerIndexOf maps the (bucketIndex, slotIndex) pair to a canonical unique (scalar) index. // This scalar index is used to represent this (bucketIndex, slotIndex) pair in the underlying // entities list. -func (c Cache) ownerIndexOf(b bucketIndex, s slotIndex) uint64 { +func (c *Cache[V]) ownerIndexOf(b bucketIndex, s slotIndex) uint64 { return (uint64(b) * slotsPerBucket) + uint64(s) } -// linkedEntityOf returns the entity linked to this (bucketIndex, slotIndex) pair from the underlying entities list. -// By a linked entity, we mean if the entity has an owner index matching to (bucketIndex, slotIndex). -// The bool return value corresponds to whether there is a linked entity to this (bucketIndex, slotIndex) or not. -func (c *Cache) linkedEntityOf(b bucketIndex, s slotIndex) (flow.Identifier, flow.Entity, bool) { +// linkedValueOf returns the value linked to this (bucketIndex, slotIndex) pair from the underlying entities list. +// By a linked value, we mean if the value has an owner index matching to (bucketIndex, slotIndex). +// The bool return value corresponds to whether there is a linked value to this (bucketIndex, slotIndex) or not. +func (c *Cache[V]) linkedValueOf(b bucketIndex, s slotIndex) (key flow.Identifier, value V, ok bool) { if c.buckets[b].slots[s].slotAge == slotAgeUnallocated { // slotIndex never used, or recently invalidated, hence - // does not have any linked entity - return flow.Identifier{}, nil, false + // does not have any linked value + return flow.Identifier{}, value, false } - // retrieving entity index in the underlying entities linked-list - valueIndex := c.buckets[b].slots[s].entityIndex - id, entity, owner := c.entities.Get(valueIndex) + // retrieving value index in the underlying values linked-list + valueIndex := c.buckets[b].slots[s].valueIndex + var owner uint64 + key, value, owner = c.entities.Get(valueIndex) if c.ownerIndexOf(b, s) != owner { - // entity is not linked to this (bucketIndex, slotIndex) + // value is not linked to this (bucketIndex, slotIndex) c.buckets[b].slots[s].slotAge = slotAgeUnallocated - return flow.Identifier{}, nil, false + return flow.Identifier{}, value, false } - return id, entity, true + return key, value, true } // logTelemetry prints telemetry logs depending on number of interactions and last time telemetry has been logged. -func (c *Cache) logTelemetry() { +func (c *Cache[V]) logTelemetry() { counter := c.interactionCounter.Inc() if counter < telemetryCounterInterval { // not enough interactions to log. @@ -478,17 +516,17 @@ func (c *Cache) logTelemetry() { Logger() } - lg.Info().Msg("logging telemetry") + lg.Debug().Msg("logging telemetry") c.lastTelemetryDump.Store(runtimeNano()) } // unuseSlot marks slot as free so that it is ready to be re-used. -func (c *Cache) unuseSlot(b bucketIndex, s slotIndex) { +func (c *Cache[V]) unuseSlot(b bucketIndex, s slotIndex) { c.buckets[b].slots[s].slotAge = slotAgeUnallocated } -// invalidateEntity removes the entity linked to the specified slot from the underlying entities -// list. So that entity slot is made available to take if needed. -func (c *Cache) invalidateEntity(b bucketIndex, s slotIndex) flow.Entity { - return c.entities.Remove(c.buckets[b].slots[s].entityIndex) +// invalidateValue removes the value linked to the specified slot from the underlying entities +// list. So that value slot is made available to take if needed. +func (c *Cache[V]) invalidateValue(b bucketIndex, s slotIndex) V { + return c.entities.Remove(c.buckets[b].slots[s].valueIndex) } diff --git a/module/mempool/herocache/backdata/cache_test.go b/module/mempool/herocache/backdata/cache_test.go index 7c9864786d8..c3bd2801991 100644 --- a/module/mempool/herocache/backdata/cache_test.go +++ b/module/mempool/herocache/backdata/cache_test.go @@ -17,9 +17,9 @@ import ( // TestArrayBackData_SingleBucket evaluates health of state transition for storing 10 entities in a Cache with only // a single bucket (of 16). It also evaluates all stored items are retrievable. func TestArrayBackData_SingleBucket(t *testing.T) { - limit := 10 + limit := 16 - bd := NewCache(uint32(limit), + bd := NewCache[*unittest.MockEntity](uint32(limit), 1, heropool.LRUEjection, unittest.Logger(), @@ -38,7 +38,7 @@ func TestArrayBackData_SingleBucket(t *testing.T) { require.Equal(t, bd.buckets[0].slots[i].slotAge, uint64(i+1)) // also, since we have not yet over-limited, // entities are assigned their entityIndex in the same order they are added. - require.Equal(t, bd.buckets[0].slots[i].entityIndex, i) + require.Equal(t, bd.buckets[0].slots[i].valueIndex, i) _, _, owner := bd.entities.Get(i) require.Equal(t, owner, uint64(i)) } @@ -52,7 +52,8 @@ func TestArrayBackData_SingleBucket(t *testing.T) { func TestArrayBackData_Adjust(t *testing.T) { limit := 100_000 - bd := NewCache(uint32(limit), + bd := NewCache[*unittest.MockEntity]( + uint32(limit), 8, heropool.LRUEjection, unittest.Logger(), @@ -66,56 +67,55 @@ func TestArrayBackData_Adjust(t *testing.T) { // picks a random entity from BackData and adjusts its identifier to a new one. entityIndex := rand.Int() % limit // checking integrity of retrieving entity - oldEntity, ok := bd.ByID(entities[entityIndex].ID()) + oldEntity, ok := bd.Get(entities[entityIndex].Identifier) require.True(t, ok) - oldEntityID := oldEntity.ID() - require.Equal(t, entities[entityIndex].ID(), oldEntityID) + oldEntityID := oldEntity.Identifier + require.Equal(t, entities[entityIndex].Identifier, oldEntityID) require.Equal(t, entities[entityIndex], oldEntity) - // picks a new identifier for the entity and makes sure it is different than its current one. + // picks a new identifier for the entity and makes sure it is different from its current one. newEntityID := unittest.IdentifierFixture() require.NotEqual(t, oldEntityID, newEntityID) - // adjusts old entity to a new entity with a new identifier - newEntity, ok := bd.Adjust(oldEntity.ID(), func(entity flow.Entity) flow.Entity { - mockEntity, ok := entity.(*unittest.MockEntity) + // adjusts old entity to a new entity + newEntity, ok := bd.Adjust(oldEntity.Identifier, func(entity *unittest.MockEntity) *unittest.MockEntity { require.True(t, ok) // oldEntity must be passed to func parameter of adjust. - require.Equal(t, oldEntityID, mockEntity.ID()) - require.Equal(t, oldEntity, mockEntity) + require.Equal(t, oldEntityID, entity.Identifier) + require.Equal(t, oldEntity, entity) - return &unittest.MockEntity{Identifier: newEntityID} + return &unittest.MockEntity{Identifier: oldEntityID, Nonce: entity.Nonce + 1} }) - // adjustment must be successful, and identifier must be updated. - require.True(t, ok) - require.Equal(t, newEntityID, newEntity.ID()) - newMockEntity, ok := newEntity.(*unittest.MockEntity) + // adjustment must be successful, and identifier must be same. require.True(t, ok) + require.Equal(t, oldEntityID, newEntity.Identifier) // replaces new entity in the original reference list and // retrieves all. - entities[entityIndex] = newMockEntity + entities[entityIndex] = newEntity testRetrievableFrom(t, bd, entities, 0) - // re-adjusting old entity must fail, since its identifier must no longer exist - entity, ok := bd.Adjust(oldEntityID, func(entity flow.Entity) flow.Entity { - require.Fail(t, "function must not be invoked on a non-existing entity") - return entity + // re-adjusting the entity should succeed because the adjusted entity remains under the original id. + entity, ok := bd.Adjust(oldEntityID, func(entity *unittest.MockEntity) *unittest.MockEntity { + return &unittest.MockEntity{ + Identifier: entity.Identifier, // preserve the old id + Nonce: entity.Nonce + 1, + } }) - require.False(t, ok) - require.Nil(t, entity) + require.True(t, ok) + require.NotNil(t, entity) - // similarly, retrieving old entity must fail - entity, ok = bd.ByID(oldEntityID) - require.False(t, ok) - require.Nil(t, entity) + // similarly, retrieving old entity must not fail + entity, ok = bd.Get(oldEntityID) + require.True(t, ok) + require.NotNil(t, entity) ok = bd.Has(oldEntityID) - require.False(t, ok) + require.True(t, ok) // adjusting any random non-existing identifier must fail - entity, ok = bd.Adjust(unittest.IdentifierFixture(), func(entity flow.Entity) flow.Entity { + entity, ok = bd.Adjust(unittest.IdentifierFixture(), func(entity *unittest.MockEntity) *unittest.MockEntity { require.Fail(t, "function must not be invoked on a non-existing entity") return entity }) @@ -126,12 +126,198 @@ func TestArrayBackData_Adjust(t *testing.T) { require.Equal(t, bd.Size(), uint(limit)) } +// TestRemoveAfterAdjustRandom ensures that when you Adjust a random entry in a full cache, +// Remove returns the updated entity, the cache size drops by one, and all other entries remain. +func TestArrayBackData_RemoveAfterAdjustRandom(t *testing.T) { + limit := 100_000 + + bd := NewCache[*unittest.MockEntity]( + uint32(limit), + 8, + heropool.LRUEjection, + unittest.Logger(), + metrics.NewNoopCollector(), + ) + + entities := unittest.EntityListFixture(uint(limit)) + + // adds all entities to backdata + testAddEntities(t, bd, entities, heropool.LRUEjection) + + // pick one at random and Adjust it + entityIndex := rand.Int() % limit + original, ok := bd.Get(entities[entityIndex].Identifier) + require.True(t, ok) + + // adjusts old entity to a new entity + updatedEntity, ok := bd.Adjust(original.Identifier, func(ent *unittest.MockEntity) *unittest.MockEntity { + require.True(t, ok) + + require.Equal(t, original.Identifier, ent.Identifier) + return &unittest.MockEntity{ + Identifier: ent.Identifier, + Nonce: ent.Nonce + 7, + } + }) + require.True(t, ok) + require.Equal(t, original.Identifier, updatedEntity.Identifier) + require.Equal(t, original.Nonce+7, updatedEntity.Nonce) + + // remove that same key + removed, ok := bd.Remove(original.Identifier) + require.True(t, ok) + require.Equal(t, updatedEntity, removed) + + // cache size must have dropped by one + require.Equal(t, uint(limit-1), bd.Size()) + + // the removed key is gone: + _, exists := bd.Get(original.Identifier) + require.False(t, exists) + require.False(t, bd.Has(original.Identifier)) + + // all other entities should still be retrievable, with their original nonces + for i, e := range entities { + if i == entityIndex { + continue + } + got, ok := bd.Get(e.Identifier) + require.True(t, ok, "entity at index %d must still be present", i) + require.Equal(t, e, got, "entity %d must be unchanged", i) + } +} + +// TestAdjustRefreshesLRU makes sure Adjust bumps the “recently used” order. +// We add limit+1 entities, adjust the very first one, then add one more to force LRU eviction. +// The first entity should survive, and the one that was second should get evicted. +func TestArrayBackData_AdjustAffectsLRU(t *testing.T) { + limit := 100_000 + + bd := NewCache[*unittest.MockEntity]( + uint32(limit), + 8, + heropool.LRUEjection, + unittest.Logger(), + metrics.NewNoopCollector(), + ) + + entities := unittest.EntityListFixture(uint(limit)) + + // adds all entities to backdata + testAddEntities(t, bd, entities, heropool.LRUEjection) + + // Adjust the very first one (index 0) to bump its LRU age + firstID := entities[0].Identifier + _, ok := bd.Adjust(firstID, func(ent *unittest.MockEntity) *unittest.MockEntity { + // no payload change just marking as used + return ent + }) + require.True(t, ok) + + // add one more to force an LRU eviction + extra := &unittest.MockEntity{Identifier: unittest.IdentifierFixture(), Nonce: 999} + require.True(t, bd.Add(extra.Identifier, extra)) + + // verify that first entity is still there + got, gotOK := bd.Get(firstID) + require.True(t, gotOK) + require.Equal(t, firstID, got.Identifier) + + // but the second entity (the old LRU) has been evicted + secondID := entities[1].Identifier + _, secondOk := bd.Get(secondID) + require.False(t, secondOk) + + // and the new “extra” entity is present + _, extraOK := bd.Get(extra.Identifier) + require.True(t, extraOK) +} + +// TestArrayBackData_AdjustWitInit evaluates that AdjustWithInit method. It should initialize and then adjust the value of +// non-existing entity while preserving the integrity of BackData on just adjusting the value of existing entity. +func TestArrayBackData_AdjustWitInit(t *testing.T) { + limit := 100_000 + + bd := NewCache[*unittest.MockEntity](uint32(limit), + 8, + heropool.LRUEjection, + unittest.Logger(), + metrics.NewNoopCollector()) + + entities := unittest.EntityListFixture(uint(limit)) + for _, e := range entities { + adjustedEntity, adjusted := bd.AdjustWithInit(e.Identifier, func(entity *unittest.MockEntity) *unittest.MockEntity { + // adjust logic, increments the nonce of the entity + entity.Nonce++ + return entity + }, func() *unittest.MockEntity { + return e // initialize with the entity + }) + require.True(t, adjusted) + require.Equal(t, e.Identifier, adjustedEntity.Identifier) + require.Equal(t, uint64(1), adjustedEntity.Nonce) + } + + // picks a random entity from BackData and adjusts its identifier to a new one. + entityIndex := rand.Int() % limit + // checking integrity of retrieving entity + oldEntity, ok := bd.Get(entities[entityIndex].Identifier) + require.True(t, ok) + oldEntityID := oldEntity.Identifier + require.Equal(t, entities[entityIndex].Identifier, oldEntityID) + require.Equal(t, entities[entityIndex], oldEntity) + + // picks a new identifier for the entity and makes sure it is different than its current one. + newEntityID := unittest.IdentifierFixture() + require.NotEqual(t, oldEntityID, newEntityID) + + // adjusts old entity to a new entity with a new identifier + newEntity, ok := bd.Adjust(oldEntity.Identifier, func(entity *unittest.MockEntity) *unittest.MockEntity { + // oldEntity must be passed to func parameter of adjust. + require.Equal(t, oldEntityID, entity.Identifier) + require.Equal(t, oldEntity, entity) + + // adjust logic, adjsuts the nonce of the entity + return &unittest.MockEntity{Identifier: oldEntityID, Nonce: 2} + }) + + // adjustment must be successful, and identifier must be updated. + require.True(t, ok) + require.Equal(t, oldEntityID, newEntity.Identifier) + require.Equal(t, uint64(2), newEntity.Nonce) + + // replaces new entity in the original reference list and + // retrieves all. + entities[entityIndex] = newEntity + testRetrievableFrom(t, bd, entities, 0) + + // Now, re-adjusting the entity (using its original ID) should succeed. + entity, ok := bd.Adjust(oldEntityID, func(entity *unittest.MockEntity) *unittest.MockEntity { + // Further adjust: increment the nonce. + return &unittest.MockEntity{Identifier: oldEntityID, Nonce: entity.Nonce + 1} + }) + require.True(t, ok) + require.NotNil(t, entity) + require.Equal(t, oldEntityID, entity.Identifier) + // Check that the nonce was incremented from 2 to 3. + require.Equal(t, uint64(3), entity.Nonce) + + // Retrieving the entity using the original identifier must succeed. + entity, ok = bd.Get(oldEntityID) + require.True(t, ok) + require.Equal(t, oldEntityID, entity.Identifier) + require.Equal(t, uint64(3), entity.Nonce) + + ok = bd.Has(oldEntityID) + require.True(t, ok) +} + // TestArrayBackData_WriteHeavy evaluates correctness of Cache under the writing and retrieving // a heavy load of entities up to its limit. All data must be written successfully and then retrievable. func TestArrayBackData_WriteHeavy(t *testing.T) { limit := 100_000 - bd := NewCache(uint32(limit), + bd := NewCache[*unittest.MockEntity](uint32(limit), 8, heropool.LRUEjection, unittest.Logger(), @@ -155,7 +341,7 @@ func TestArrayBackData_LRU_Ejection(t *testing.T) { limit := 100_000 items := uint(1_000_000) - bd := NewCache(uint32(limit), + bd := NewCache[*unittest.MockEntity](uint32(limit), 8, heropool.LRUEjection, unittest.Logger(), @@ -180,7 +366,7 @@ func TestArrayBackData_No_Ejection(t *testing.T) { limit := 100_000 items := uint(1_000_000) - bd := NewCache(uint32(limit), + bd := NewCache[*unittest.MockEntity](uint32(limit), 8, heropool.NoEjection, unittest.Logger(), @@ -205,7 +391,7 @@ func TestArrayBackData_Random_Ejection(t *testing.T) { limit := 100_000 items := uint(1_000_000) - bd := NewCache(uint32(limit), + bd := NewCache[*unittest.MockEntity](uint32(limit), 8, heropool.RandomEjection, unittest.Logger(), @@ -226,7 +412,7 @@ func TestArrayBackData_Random_Ejection(t *testing.T) { func TestArrayBackData_AddDuplicate(t *testing.T) { limit := 100 - bd := NewCache(uint32(limit), + bd := NewCache[*unittest.MockEntity](uint32(limit), 8, heropool.LRUEjection, unittest.Logger(), @@ -239,7 +425,7 @@ func TestArrayBackData_AddDuplicate(t *testing.T) { // adding duplicate entity should fail for _, entity := range entities { - require.False(t, bd.Add(entity.ID(), entity)) + require.False(t, bd.Add(entity.Identifier, entity)) } // still all entities must be retrievable from Cache. @@ -250,7 +436,7 @@ func TestArrayBackData_AddDuplicate(t *testing.T) { func TestArrayBackData_Clear(t *testing.T) { limit := 100 - bd := NewCache(uint32(limit), + bd := NewCache[*unittest.MockEntity](uint32(limit), 8, heropool.LRUEjection, unittest.Logger(), @@ -306,7 +492,7 @@ func TestArrayBackData_All(t *testing.T) { for _, tc := range tt { t.Run(fmt.Sprintf("%d-limit-%d-items-%s-ejection", tc.limit, tc.items, tc.ejectionMode), func(t *testing.T) { - bd := NewCache(tc.limit, + bd := NewCache[*unittest.MockEntity](tc.limit, 8, tc.ejectionMode, unittest.Logger(), @@ -319,8 +505,8 @@ func TestArrayBackData_All(t *testing.T) { // in random ejection mode we count total number of matched entities // with All map. testMapMatchCount(t, bd.All(), entities, int(tc.limit)) - testEntitiesMatchCount(t, bd.Entities(), entities, int(tc.limit)) - testIdentifiersMatchCount(t, bd.Identifiers(), entities, int(tc.limit)) + testEntitiesMatchCount(t, bd.Values(), entities, int(tc.limit)) + testIdentifiersMatchCount(t, bd.Keys(), entities, int(tc.limit)) } else { // in LRU ejection mode we match All items based on a from index (i.e., last "from" items). from := int(tc.items) - int(tc.limit) @@ -329,8 +515,8 @@ func TestArrayBackData_All(t *testing.T) { from = 0 } testMapMatchFrom(t, bd.All(), entities, from) - testEntitiesMatchFrom(t, bd.Entities(), entities, from) - testIdentifiersMatchFrom(t, bd.Identifiers(), entities, from) + testEntitiesMatchFrom(t, bd.Values(), entities, from) + testIdentifiersMatchFrom(t, bd.Keys(), entities, from) } }) } @@ -372,7 +558,7 @@ func TestArrayBackData_Remove(t *testing.T) { for _, tc := range tt { t.Run(fmt.Sprintf("%d-limit-%d-items-%dfrom-%dcount", tc.limit, tc.items, tc.from, tc.count), func(t *testing.T) { - bd := NewCache( + bd := NewCache[*unittest.MockEntity]( tc.limit, 8, heropool.RandomEjection, @@ -398,25 +584,28 @@ func TestArrayBackData_Remove(t *testing.T) { // testAddEntities is a test helper that checks entities are added successfully to the Cache. // and each entity is retrievable right after it is written to backdata. -func testAddEntities(t *testing.T, bd *Cache, entities []*unittest.MockEntity, ejection heropool.EjectionMode) { +func testAddEntities(t *testing.T, bd *Cache[*unittest.MockEntity], entities []*unittest.MockEntity, ejection heropool.EjectionMode) { // initially, head should be undefined - e, ok := bd.Head() + k, e, ok := bd.Head() require.False(t, ok) + require.Equal(t, flow.ZeroID, k) require.Nil(t, e) // adding elements for i, e := range entities { if ejection == heropool.NoEjection && uint32(i) >= bd.sizeLimit { // with no ejection when it goes beyond limit, the writes should be unsuccessful. - require.False(t, bd.Add(e.ID(), e)) + require.False(t, bd.Add(e.Identifier, e)) // the head should retrieve the first added entity. - headEntity, headExists := bd.Head() + headKey, headEntity, headExists := bd.Head() require.True(t, headExists) - require.Equal(t, headEntity.ID(), entities[0].ID()) + expectedID := entities[0].Identifier + require.Equal(t, expectedID, headKey) + require.Equal(t, expectedID, headEntity.Identifier) } else { // adding each element must be successful. - require.True(t, bd.Add(e.ID(), e)) + require.True(t, bd.Add(e.Identifier, e)) if uint32(i) < bd.sizeLimit { // when we are below limit the size of @@ -424,9 +613,11 @@ func testAddEntities(t *testing.T, bd *Cache, entities []*unittest.MockEntity, e require.Equal(t, bd.Size(), uint(i+1)) // in case cache is not full, the head should retrieve the first added entity. - headEntity, headExists := bd.Head() + headKey, headEntity, headExists := bd.Head() require.True(t, headExists) - require.Equal(t, headEntity.ID(), entities[0].ID()) + expectedID := entities[0].Identifier + require.Equal(t, expectedID, headKey) + require.Equal(t, expectedID, headEntity.Identifier) } else { // when we cross the limit, the ejection kicks in, and // size must be steady at the limit. @@ -434,7 +625,7 @@ func testAddEntities(t *testing.T, bd *Cache, entities []*unittest.MockEntity, e } // entity should be immediately retrievable - actual, ok := bd.ByID(e.ID()) + actual, ok := bd.Get(e.Identifier) require.True(t, ok) require.Equal(t, e, actual) } @@ -442,15 +633,15 @@ func testAddEntities(t *testing.T, bd *Cache, entities []*unittest.MockEntity, e } // testRetrievableInRange is a test helper that evaluates that all entities starting from given index are retrievable from Cache. -func testRetrievableFrom(t *testing.T, bd *Cache, entities []*unittest.MockEntity, from int) { +func testRetrievableFrom(t *testing.T, bd *Cache[*unittest.MockEntity], entities []*unittest.MockEntity, from int) { testRetrievableInRange(t, bd, entities, from, len(entities)) } // testRetrievableInRange is a test helper that evaluates within given range [from, to) are retrievable from Cache. -func testRetrievableInRange(t *testing.T, bd *Cache, entities []*unittest.MockEntity, from int, to int) { +func testRetrievableInRange(t *testing.T, bd *Cache[*unittest.MockEntity], entities []*unittest.MockEntity, from int, to int) { for i := range entities { expected := entities[i] - actual, ok := bd.ByID(expected.ID()) + actual, ok := bd.Get(expected.Identifier) if i < from || i >= to { require.False(t, ok, i) require.Nil(t, actual) @@ -462,11 +653,11 @@ func testRetrievableInRange(t *testing.T, bd *Cache, entities []*unittest.MockEn } // testRemoveAtRandom is a test helper removes specified number of entities from Cache at random. -func testRemoveAtRandom(t *testing.T, bd *Cache, entities []*unittest.MockEntity, count int) { +func testRemoveAtRandom(t *testing.T, bd *Cache[*unittest.MockEntity], entities []*unittest.MockEntity, count int) { for removedCount := 0; removedCount < count; { unittest.RequireReturnsBefore(t, func() { index := rand.Int() % len(entities) - expected, removed := bd.Remove(entities[index].ID()) + expected, removed := bd.Remove(entities[index].Identifier) if !removed { return } @@ -479,9 +670,9 @@ func testRemoveAtRandom(t *testing.T, bd *Cache, entities []*unittest.MockEntity } // testRemoveRange is a test helper that removes specified range of entities from Cache. -func testRemoveRange(t *testing.T, bd *Cache, entities []*unittest.MockEntity, from int, to int) { +func testRemoveRange(t *testing.T, bd *Cache[*unittest.MockEntity], entities []*unittest.MockEntity, from int, to int) { for i := from; i < to; i++ { - expected, removed := bd.Remove(entities[i].ID()) + expected, removed := bd.Remove(entities[i].Identifier) require.True(t, removed) require.Equal(t, entities[i], expected) // size sanity check after removal @@ -490,26 +681,26 @@ func testRemoveRange(t *testing.T, bd *Cache, entities []*unittest.MockEntity, f } // testCheckRangeRemoved is a test helper that evaluates the specified range of entities have been removed from Cache. -func testCheckRangeRemoved(t *testing.T, bd *Cache, entities []*unittest.MockEntity, from int, to int) { +func testCheckRangeRemoved(t *testing.T, bd *Cache[*unittest.MockEntity], entities []*unittest.MockEntity, from int, to int) { for i := from; i < to; i++ { // both removal and retrieval must fail - expected, removed := bd.Remove(entities[i].ID()) + expected, removed := bd.Remove(entities[i].Identifier) require.False(t, removed) require.Nil(t, expected) - expected, exists := bd.ByID(entities[i].ID()) + expected, exists := bd.Get(entities[i].Identifier) require.False(t, exists) require.Nil(t, expected) } } // testMapMatchFrom is a test helper that checks entities are retrievable from entitiesMap starting specified index. -func testMapMatchFrom(t *testing.T, entitiesMap map[flow.Identifier]flow.Entity, entities []*unittest.MockEntity, from int) { +func testMapMatchFrom(t *testing.T, entitiesMap map[flow.Identifier]*unittest.MockEntity, entities []*unittest.MockEntity, from int) { require.Len(t, entitiesMap, len(entities)-from) for i := range entities { expected := entities[i] - actual, ok := entitiesMap[expected.ID()] + actual, ok := entitiesMap[expected.Identifier] if i < from { require.False(t, ok, i) require.Nil(t, actual) @@ -521,7 +712,7 @@ func testMapMatchFrom(t *testing.T, entitiesMap map[flow.Identifier]flow.Entity, } // testEntitiesMatchFrom is a test helper that checks entities are retrievable from given list starting specified index. -func testEntitiesMatchFrom(t *testing.T, expectedEntities []flow.Entity, actualEntities []*unittest.MockEntity, from int) { +func testEntitiesMatchFrom(t *testing.T, expectedEntities []*unittest.MockEntity, actualEntities []*unittest.MockEntity, from int) { require.Len(t, expectedEntities, len(actualEntities)-from) for i, actual := range actualEntities { @@ -539,20 +730,20 @@ func testIdentifiersMatchFrom(t *testing.T, expectedIdentifiers flow.IdentifierL for i, actual := range actualEntities { if i < from { - require.NotContains(t, expectedIdentifiers, actual.ID()) + require.NotContains(t, expectedIdentifiers, actual.Identifier) } else { - require.Contains(t, expectedIdentifiers, actual.ID()) + require.Contains(t, expectedIdentifiers, actual.Identifier) } } } // testMapMatchFrom is a test helper that checks specified number of entities are retrievable from entitiesMap. -func testMapMatchCount(t *testing.T, entitiesMap map[flow.Identifier]flow.Entity, entities []*unittest.MockEntity, count int) { +func testMapMatchCount(t *testing.T, entitiesMap map[flow.Identifier]*unittest.MockEntity, entities []*unittest.MockEntity, count int) { require.Len(t, entitiesMap, count) actualCount := 0 for i := range entities { expected := entities[i] - actual, ok := entitiesMap[expected.ID()] + actual, ok := entitiesMap[expected.Identifier] if !ok { continue } @@ -563,12 +754,12 @@ func testMapMatchCount(t *testing.T, entitiesMap map[flow.Identifier]flow.Entity } // testEntitiesMatchCount is a test helper that checks specified number of entities are retrievable from given list. -func testEntitiesMatchCount(t *testing.T, expectedEntities []flow.Entity, actualEntities []*unittest.MockEntity, count int) { - entitiesMap := make(map[flow.Identifier]flow.Entity) +func testEntitiesMatchCount(t *testing.T, expectedEntities []*unittest.MockEntity, actualEntities []*unittest.MockEntity, count int) { + entitiesMap := make(map[flow.Identifier]*unittest.MockEntity) // converts expected entities list to a map in order to utilize a test helper. for _, expected := range expectedEntities { - entitiesMap[expected.ID()] = expected + entitiesMap[expected.Identifier] = expected } testMapMatchCount(t, entitiesMap, actualEntities, count) @@ -586,7 +777,7 @@ func testIdentifiersMatchCount(t *testing.T, expectedIdentifiers flow.Identifier require.Len(t, idMap, count) actualCount := 0 for _, e := range actualEntities { - _, ok := idMap[e.ID()] + _, ok := idMap[e.Identifier] if !ok { continue } @@ -597,12 +788,12 @@ func testIdentifiersMatchCount(t *testing.T, expectedIdentifiers flow.Identifier // testRetrievableCount is a test helper that checks the number of retrievable entities from backdata exactly matches // the expectedCount. -func testRetrievableCount(t *testing.T, bd *Cache, entities []*unittest.MockEntity, expectedCount uint64) { +func testRetrievableCount(t *testing.T, bd *Cache[*unittest.MockEntity], entities []*unittest.MockEntity, expectedCount uint64) { actualCount := 0 for i := range entities { expected := entities[i] - actual, ok := bd.ByID(expected.ID()) + actual, ok := bd.Get(expected.Identifier) if !ok { continue } diff --git a/module/mempool/herocache/backdata/heropool/linkedlist.go b/module/mempool/herocache/backdata/heropool/linkedlist.go index 4c72931c630..30414ed9de5 100644 --- a/module/mempool/herocache/backdata/heropool/linkedlist.go +++ b/module/mempool/herocache/backdata/heropool/linkedlist.go @@ -2,13 +2,25 @@ package heropool // link represents a slice-based doubly linked-list node that // consists of a next and previous poolIndex. +// if a link doesn't belong to any state it's next and prev should hold InvalidIndex. type link struct { - next poolIndex - prev poolIndex + next EIndex + prev EIndex } // state represents a doubly linked-list by its head and tail pool indices. +// If state has 0 size, its tail's and head's prev and next are treated as invalid and should hold InvalidIndex values. type state struct { - head poolIndex - tail poolIndex + head EIndex + tail EIndex + size uint32 +} + +// NewStates constructs an array of a doubly linked-lists. +func NewStates(numberOfStates int) []state { + result := make([]state, numberOfStates) + for i := 1; i < numberOfStates; i++ { + result[i] = state{head: InvalidIndex, tail: InvalidIndex, size: 0} + } + return result } diff --git a/module/mempool/herocache/backdata/heropool/pool.go b/module/mempool/herocache/backdata/heropool/pool.go index 39dabcef07c..0394f6f9d8d 100644 --- a/module/mempool/herocache/backdata/heropool/pool.go +++ b/module/mempool/herocache/backdata/heropool/pool.go @@ -1,9 +1,12 @@ package heropool import ( - "math/rand" + "fmt" + "math" - "github.com/onflow/flow-go/model/flow" + "github.com/rs/zerolog" + + "github.com/onflow/flow-go/utils/rand" ) type EjectionMode string @@ -14,126 +17,147 @@ const ( NoEjection = EjectionMode("no-ejection") ) -// EIndex is data type representing an entity index in Pool. +// StateIndex is a type of state of a placeholder in a pool. +type StateIndex uint + +const numberOfStates = 2 +const ( // iota is reset to 0 + stateFree StateIndex = iota + stateUsed +) + +// EIndex is data type representing an value index in Pool. type EIndex uint32 +// InvalidIndex is used when a link doesn't point anywhere, in other words it is an equivalent of a nil address. +const InvalidIndex EIndex = math.MaxUint32 + // poolEntity represents the data type that is maintained by -type poolEntity struct { - PoolEntity - // owner maintains an external reference to the key associated with this entity. - // The key is maintained by the HeroCache, and entity is maintained by Pool. +type poolEntity[K comparable, V any] struct { + PoolEntity[K, V] + // owner maintains an external reference to the key associated with this value. + // The key is maintained by the HeroCache, and value is maintained by Pool. owner uint64 // node keeps the link to the previous and next entities. - // When this entity is allocated, the node maintains the connections it to the next and previous (used) pool entities. - // When this entity is unallocated, the node maintains the connections to the next and previous unallocated (free) pool entities. + // When this value is allocated, the node maintains the connections it to the next and previous (used) pool entities. + // When this value is unallocated, the node maintains the connections to the next and previous unallocated (free) pool entities. node link + + // invalidated indicates whether this pool value has been invalidated. + // A value becomes invalidated when it is removed or ejected from the pool, + // meaning its key and value are no longer valid for use. + // This flag helps manage the lifecycle of the value within the pool. + invalidated bool } -type PoolEntity struct { - // Identity associated with this entity. - id flow.Identifier +type PoolEntity[K comparable, V any] struct { + // Key associated with this value. + key K - // Actual entity itself. - entity flow.Entity + // Actual value itself. + value V } -func (p PoolEntity) Id() flow.Identifier { - return p.id +func (p PoolEntity[K, V]) Id() K { + return p.key } -func (p PoolEntity) Entity() flow.Entity { - return p.entity +func (p PoolEntity[K, V]) Entity() V { + return p.value } -type Pool struct { - size uint32 - free state // keeps track of free slots. - used state // keeps track of allocated slots to cachedEntities. - poolEntities []poolEntity +type Pool[K comparable, V any] struct { + logger zerolog.Logger + states []state // keeps track of a slot's state + poolEntities []poolEntity[K, V] ejectionMode EjectionMode } -func NewHeroPool(sizeLimit uint32, ejectionMode EjectionMode) *Pool { - l := &Pool{ - free: state{ - head: poolIndex{index: 0}, - tail: poolIndex{index: 0}, - }, - used: state{ - head: poolIndex{index: 0}, - tail: poolIndex{index: 0}, - }, - poolEntities: make([]poolEntity, sizeLimit), +// NewHeroPool returns a pointer to a new hero pool constructed based on a provided EjectionMode, +// logger and a provided fixed size. +func NewHeroPool[K comparable, V any](sizeLimit uint32, ejectionMode EjectionMode, logger zerolog.Logger) *Pool[K, V] { + l := &Pool[K, V]{ + //construcs states initialized to InvalidIndexes + states: NewStates(numberOfStates), + poolEntities: make([]poolEntity[K, V], sizeLimit), ejectionMode: ejectionMode, + logger: logger, } + l.setDefaultNodeLinkValues() l.initFreeEntities() return l } -// initFreeEntities initializes the free double linked-list with the indices of all cached entity poolEntities. -func (p *Pool) initFreeEntities() { - p.free.head.setPoolIndex(0) - p.free.tail.setPoolIndex(0) +// setDefaultNodeLinkValues sets nodes prev and next to InvalidIndex for all cached entities in poolEntities. +func (p *Pool[K, V]) setDefaultNodeLinkValues() { + for i := 0; i < len(p.poolEntities); i++ { + p.poolEntities[i].node.next = InvalidIndex + p.poolEntities[i].node.prev = InvalidIndex + } +} +// initFreeEntities initializes the free double linked-list with the indices of all cached entity poolEntities. +func (p *Pool[K, V]) initFreeEntities() { + p.states[stateFree].head = 0 + p.states[stateFree].tail = 0 + p.poolEntities[p.states[stateFree].head].node.prev = InvalidIndex + p.poolEntities[p.states[stateFree].tail].node.next = InvalidIndex + p.states[stateFree].size = 1 for i := 1; i < len(p.poolEntities); i++ { - // appends slice index i to tail of free linked list - p.connect(p.free.tail, EIndex(i)) - // and updates its tail - p.free.tail.setPoolIndex(EIndex(i)) + p.connect(p.states[stateFree].tail, EIndex(i)) + p.states[stateFree].tail = EIndex(i) + p.poolEntities[p.states[stateFree].tail].node.next = InvalidIndex + p.states[stateFree].size++ } } -// Add writes given entity into a poolEntity on the underlying entities linked-list. +// Add writes given value into a poolEntity on the underlying values linked-list. // // The boolean return value (slotAvailable) says whether pool has an available slot. Pool goes out of available slots if // it is full and no ejection is set. // -// If the pool has no available slots and an ejection is set, ejection occurs when adding a new entity. -// If an ejection occurred, ejectedEntity holds the ejected entity. -func (p *Pool) Add(entityId flow.Identifier, entity flow.Entity, owner uint64) (entityIndex EIndex, slotAvailable bool, ejectedEntity flow.Entity) { - entityIndex, slotAvailable, ejectedEntity = p.sliceIndexForEntity() +// If the pool has no available slots and an ejection is set, ejection occurs when adding a new value. +// If an ejection occurred, ejectedEntity holds the ejected value. +// +// Returns: +// - valueIndex: The index in the pool where the new entity was inserted. +// If no slot is available (and no ejection occurs), this will be set to InvalidIndex. +// - slotAvailable: Indicates whether an available slot was found. It is true if +// the entity was inserted (either in a free slot or by ejecting an existing entity). +// - ejectedValue: If an ejection occurred to free a slot, this value holds the entity +// that was ejected; otherwise, it is the zero value of type V. +// - wasEjected: Indicates whether an ejection was performed (true if an entity was ejected, +// false if the insertion simply used an available free slot). +func (p *Pool[K, V]) Add(key K, value V, owner uint64) ( + valueIndex EIndex, slotAvailable bool, ejectedValue V, wasEjected bool) { + valueIndex, slotAvailable, ejectedValue, wasEjected = p.sliceIndexForEntity() if slotAvailable { - p.poolEntities[entityIndex].entity = entity - p.poolEntities[entityIndex].id = entityId - p.poolEntities[entityIndex].owner = owner - p.poolEntities[entityIndex].node.next.setUndefined() - p.poolEntities[entityIndex].node.prev.setUndefined() - - if p.used.head.isUndefined() { - // used list is empty, hence setting head of used list to current entityIndex. - p.used.head.setPoolIndex(entityIndex) - p.poolEntities[p.used.head.getSliceIndex()].node.prev.setUndefined() - } - - if !p.used.tail.isUndefined() { - // links new entity to the tail - p.connect(p.used.tail, entityIndex) - } - - // since we are appending to the used list, entityIndex also acts as tail of the list. - p.used.tail.setPoolIndex(entityIndex) - - p.size++ + p.poolEntities[valueIndex].value = value + p.poolEntities[valueIndex].key = key + p.poolEntities[valueIndex].owner = owner + // Reset the invalidated flag when reusing a slot. + p.poolEntities[valueIndex].invalidated = false + p.switchState(stateFree, stateUsed, valueIndex) } - return entityIndex, slotAvailable, ejectedEntity + return valueIndex, slotAvailable, ejectedValue, wasEjected } -// Get returns entity corresponding to the entity index from the underlying list. -func (p Pool) Get(entityIndex EIndex) (flow.Identifier, flow.Entity, uint64) { - return p.poolEntities[entityIndex].id, p.poolEntities[entityIndex].entity, p.poolEntities[entityIndex].owner +// Get returns value corresponding to the value index from the underlying list. +func (p *Pool[K, V]) Get(valueIndex EIndex) (K, V, uint64) { + return p.poolEntities[valueIndex].key, p.poolEntities[valueIndex].value, p.poolEntities[valueIndex].owner } -// All returns all stored entities in this pool. -func (p Pool) All() []PoolEntity { - all := make([]PoolEntity, p.size) - next := p.used.head +// All returns all stored values in this pool. +func (p *Pool[K, V]) All() []PoolEntity[K, V] { + all := make([]PoolEntity[K, V], p.states[stateUsed].size) + next := p.states[stateUsed].head - for i := uint32(0); i < p.size; i++ { - e := p.poolEntities[next.getSliceIndex()] + for i := uint32(0); i < p.states[stateUsed].size; i++ { + e := p.poolEntities[next] all[i] = e.PoolEntity next = e.node.next } @@ -142,205 +166,199 @@ func (p Pool) All() []PoolEntity { } // Head returns the head of used items. Assuming no ejection happened and pool never goes beyond limit, Head returns -// the first inserted element. -func (p Pool) Head() (flow.Entity, bool) { - if p.used.head.isUndefined() { - return nil, false +// the first inserted key and value of the element. +func (p *Pool[K, V]) Head() (key K, value V, ok bool) { + if p.states[stateUsed].size == 0 { + return key, value, false } - e := p.poolEntities[p.used.head.getSliceIndex()] - return e.Entity(), true + e := p.poolEntities[p.states[stateUsed].head] + return e.Id(), e.Entity(), true } // sliceIndexForEntity returns a slice index which hosts the next entity to be added to the list. +// This index is invalid if there are no available slots or ejection could not be performed. +// If the valid index is returned then it is guaranteed that it corresponds to a free list head. +// Thus, when filled with a new value a switchState must be applied. // // The first boolean return value (hasAvailableSlot) says whether pool has an available slot. // Pool goes out of available slots if it is full and no ejection is set. // // Ejection happens if there is no available slot, and there is an ejection mode set. // If an ejection occurred, ejectedEntity holds the ejected entity. -func (p *Pool) sliceIndexForEntity() (i EIndex, hasAvailableSlot bool, ejectedEntity flow.Entity) { - if p.free.head.isUndefined() { +// +// Returns: +// - i: The slice index where the new entity should be stored. This index is valid only +// if hasAvailableSlot is true. +// - hasAvailableSlot: Indicates whether the pool has an available slot for a new entity. +// If false, the pool is full and no ejection was performed (e.g. if ejection mode is NoEjection). +// - ejectedValue: If an ejection occurred to free up a slot, this value holds the entity that was +// removed (ejected) from the pool. Otherwise, it is the zero value of type V. +// - wasEjected (bool): Indicates whether an ejection occurred (true if an entity was ejected; false otherwise). +func (p *Pool[K, V]) sliceIndexForEntity() (i EIndex, hasAvailableSlot bool, ejectedValue V, wasEjected bool) { + lruEject := func() (EIndex, bool, V, bool) { + // LRU ejection + // the used head is the oldest entity, so we turn the used head to a free head here. + invalidatedValue := p.invalidateUsedHead() + return p.states[stateFree].head, true, invalidatedValue, true + } + + if p.states[stateFree].size == 0 { // the free list is empty, so we are out of space, and we need to eject. switch p.ejectionMode { case NoEjection: // pool is set for no ejection, hence, no slice index is selected, abort immediately. - return 0, false, nil - case LRUEjection: - // LRU ejection - // the used head is the oldest entity, so we turn the used head to a free head here. - invalidatedEntity := p.invalidateUsedHead() - return p.claimFreeHead(), true, invalidatedEntity + return InvalidIndex, false, ejectedValue, false case RandomEjection: // we only eject randomly when the pool is full and random ejection is on. - randomIndex := EIndex(rand.Uint32() % p.size) - invalidatedEntity := p.invalidateEntityAtIndex(randomIndex) - return p.claimFreeHead(), true, invalidatedEntity + random, err := rand.Uint32n(p.states[stateUsed].size) + if err != nil { + p.logger.Fatal().Err(err). + Msg("hero pool random ejection failed - falling back to LRU ejection") + // fall back to LRU ejection only for this instance + return lruEject() + } + randomIndex := EIndex(random) + invalidatedEntity := p.invalidateValueAtIndex(randomIndex) + return p.states[stateFree].head, true, invalidatedEntity, true + case LRUEjection: + // LRU ejection + return lruEject() } } - // claiming the head of free list as the slice index for the next entity to be added - return p.claimFreeHead(), true, nil + // returning the head of free list as the slice index for the next entity to be added + return p.states[stateFree].head, true, ejectedValue, false } -// Size returns total number of entities that this list maintains. -func (p Pool) Size() uint32 { - return p.size +// Size returns total number of values that this list maintains. +func (p *Pool[K, V]) Size() uint32 { + return p.states[stateUsed].size } -// getHeads returns entities corresponding to the used and free heads. -func (p Pool) getHeads() (*poolEntity, *poolEntity) { - var usedHead, freeHead *poolEntity - if !p.used.head.isUndefined() { - usedHead = &p.poolEntities[p.used.head.getSliceIndex()] +// getHeads returns values corresponding to the used and free heads. +func (p *Pool[K, V]) getHeads() (*poolEntity[K, V], *poolEntity[K, V]) { + var usedHead, freeHead *poolEntity[K, V] + if p.states[stateUsed].size != 0 { + usedHead = &p.poolEntities[p.states[stateUsed].head] } - if !p.free.head.isUndefined() { - freeHead = &p.poolEntities[p.free.head.getSliceIndex()] + if p.states[stateFree].size != 0 { + freeHead = &p.poolEntities[p.states[stateFree].head] } return usedHead, freeHead } -// getTails returns entities corresponding to the used and free tails. -func (p Pool) getTails() (*poolEntity, *poolEntity) { - var usedTail, freeTail *poolEntity - if !p.used.tail.isUndefined() { - usedTail = &p.poolEntities[p.used.tail.getSliceIndex()] +// getTails returns values corresponding to the used and free tails. +func (p *Pool[K, V]) getTails() (*poolEntity[K, V], *poolEntity[K, V]) { + var usedTail, freeTail *poolEntity[K, V] + if p.states[stateUsed].size != 0 { + usedTail = &p.poolEntities[p.states[stateUsed].tail] } - - if !p.free.tail.isUndefined() { - freeTail = &p.poolEntities[p.free.tail.getSliceIndex()] + if p.states[stateFree].size != 0 { + freeTail = &p.poolEntities[p.states[stateFree].tail] } return usedTail, freeTail } // connect links the prev and next nodes as the adjacent nodes in the double-linked list. -func (p *Pool) connect(prev poolIndex, next EIndex) { - p.poolEntities[prev.getSliceIndex()].node.next.setPoolIndex(next) +func (p *Pool[K, V]) connect(prev EIndex, next EIndex) { + p.poolEntities[prev].node.next = next p.poolEntities[next].node.prev = prev } // invalidateUsedHead moves current used head forward by one node. It -// also removes the entity the invalidated head is presenting and appends the +// also removes the value the invalidated head is presenting and appends the // node represented by the used head to the tail of the free list. -func (p *Pool) invalidateUsedHead() flow.Entity { - headSliceIndex := p.used.head.getSliceIndex() - return p.invalidateEntityAtIndex(headSliceIndex) +func (p *Pool[K, V]) invalidateUsedHead() V { + headSliceIndex := p.states[stateUsed].head + return p.invalidateValueAtIndex(headSliceIndex) } -// claimFreeHead moves the free head forward, and returns the slice index of the -// old free head to host a new entity. -func (p *Pool) claimFreeHead() EIndex { - oldFreeHeadIndex := p.free.head.getSliceIndex() - // moves head forward - p.free.head = p.poolEntities[oldFreeHeadIndex].node.next - // new head should point to an undefined prev, - // but we first check if list is not empty, i.e., - // head itself is not undefined. - if !p.free.head.isUndefined() { - p.poolEntities[p.free.head.getSliceIndex()].node.prev.setUndefined() - } - - // also, we check if the old head and tail are aligned and, if so, update the - // tail as well. This happens when we claim the only existing - // node of the free list. - if p.free.tail.getSliceIndex() == oldFreeHeadIndex { - p.free.tail.setUndefined() - } - - // clears pointers of claimed head - p.poolEntities[oldFreeHeadIndex].node.next.setUndefined() - p.poolEntities[oldFreeHeadIndex].node.prev.setUndefined() +// Remove removes value corresponding to given getSliceIndex from the list. +func (p *Pool[K, V]) Remove(sliceIndex EIndex) V { + return p.invalidateValueAtIndex(sliceIndex) +} - return oldFreeHeadIndex +// UpdateAtIndex replaces the value at the given pool index. +func (p *Pool[K, V]) UpdateAtIndex(idx EIndex, newValue V) { + p.poolEntities[idx].value = newValue } -// Remove removes entity corresponding to given getSliceIndex from the list. -func (p *Pool) Remove(sliceIndex EIndex) flow.Entity { - return p.invalidateEntityAtIndex(sliceIndex) +// Touch marks the entry at pool index as “recently used” by moving it +// from the head of the used list to its tail. +func (p *Pool[K, V]) Touch(idx EIndex) { + // remove from used list + p.switchState(stateUsed, stateFree, idx) + // immediately append back to used list tail + p.switchState(stateFree, stateUsed, idx) } -// invalidateEntityAtIndex invalidates the given getSliceIndex in the linked list by +// invalidateValueAtIndex invalidates the given getSliceIndex in the linked list by // removing its corresponding linked-list node from the used linked list, and appending -// it to the tail of the free list. It also removes the entity that the invalidated node is presenting. -func (p *Pool) invalidateEntityAtIndex(sliceIndex EIndex) flow.Entity { - poolEntity := p.poolEntities[sliceIndex] - prev := poolEntity.node.prev - next := poolEntity.node.next - invalidatedEntity := poolEntity.entity - - if sliceIndex != p.used.head.getSliceIndex() && sliceIndex != p.used.tail.getSliceIndex() { - // links next and prev elements for non-head and non-tail element - p.connect(prev, next.getSliceIndex()) - } - - if sliceIndex == p.used.head.getSliceIndex() { - // invalidating used head - // moves head forward - oldUsedHead, _ := p.getHeads() - p.used.head = oldUsedHead.node.next - // new head should point to an undefined prev, - // but we first check if list is not empty, i.e., - // head itself is not undefined. - if !p.used.head.isUndefined() { - usedHead, _ := p.getHeads() - usedHead.node.prev.setUndefined() - } - } - - if sliceIndex == p.used.tail.getSliceIndex() { - // invalidating used tail - // moves tail backward - oldUsedTail, _ := p.getTails() - p.used.tail = oldUsedTail.node.prev - // new head should point tail to an undefined next, - // but we first check if list is not empty, i.e., - // tail itself is not undefined. - if !p.used.tail.isUndefined() { - usedTail, _ := p.getTails() - usedTail.node.next.setUndefined() - } +// it to the tail of the free list. It also removes the value that the invalidated node is presenting. +func (p *Pool[K, V]) invalidateValueAtIndex(sliceIndex EIndex) V { + invalidatedValue := p.poolEntities[sliceIndex].value + if p.poolEntities[sliceIndex].invalidated { + panic(fmt.Sprintf("removing an entity from an empty slot with an index : %d", sliceIndex)) } + p.switchState(stateUsed, stateFree, sliceIndex) + + var zeroKey K + var zeroValue V + p.poolEntities[sliceIndex].key = zeroKey + p.poolEntities[sliceIndex].value = zeroValue + p.poolEntities[sliceIndex].invalidated = true + return invalidatedValue +} - // invalidates entity and adds it to free entities. - p.poolEntities[sliceIndex].id = flow.ZeroID - p.poolEntities[sliceIndex].entity = nil - p.poolEntities[sliceIndex].node.next.setUndefined() - p.poolEntities[sliceIndex].node.prev.setUndefined() - - p.appendToFreeList(sliceIndex) - - // decrements Size - p.size-- - - return invalidatedEntity +// isInvalidated returns true if linked-list node represented by getSliceIndex does not contain +// a valid value. +func (p *Pool[K, V]) isInvalidated(sliceIndex EIndex) bool { + return p.poolEntities[sliceIndex].invalidated } -// appendToFreeList appends linked-list node represented by getSliceIndex to tail of free list. -func (p *Pool) appendToFreeList(sliceIndex EIndex) { - if p.free.head.isUndefined() { - // free list is empty - p.free.head.setPoolIndex(sliceIndex) - p.free.tail.setPoolIndex(sliceIndex) - return - } +// switches state of a value. +func (p *Pool[K, V]) switchState(stateFrom StateIndex, stateTo StateIndex, valueIndex EIndex) { + // Remove from stateFrom list + if p.states[stateFrom].size == 0 { + panic("Removing an entity from an empty list") + } else if p.states[stateFrom].size == 1 { + p.states[stateFrom].head = InvalidIndex + p.states[stateFrom].tail = InvalidIndex + } else { + node := p.poolEntities[valueIndex].node + + if valueIndex != p.states[stateFrom].head && valueIndex != p.states[stateFrom].tail { + // links next and prev elements for non-head and non-tail element + p.connect(node.prev, node.next) + } - // appends to the tail, and updates the tail - p.connect(p.free.tail, sliceIndex) - p.free.tail.setPoolIndex(sliceIndex) -} + if valueIndex == p.states[stateFrom].head { + // moves head forward + p.states[stateFrom].head = node.next + p.poolEntities[p.states[stateFrom].head].node.prev = InvalidIndex + } -// isInvalidated returns true if linked-list node represented by getSliceIndex does not contain -// a valid entity. -func (p Pool) isInvalidated(sliceIndex EIndex) bool { - if p.poolEntities[sliceIndex].id != flow.ZeroID { - return false + if valueIndex == p.states[stateFrom].tail { + // moves tail backwards + p.states[stateFrom].tail = node.prev + p.poolEntities[p.states[stateFrom].tail].node.next = InvalidIndex + } } - - if p.poolEntities[sliceIndex].entity != nil { - return false + p.states[stateFrom].size-- + + // Add to stateTo list + if p.states[stateTo].size == 0 { + p.states[stateTo].head = valueIndex + p.states[stateTo].tail = valueIndex + p.poolEntities[p.states[stateTo].head].node.prev = InvalidIndex + p.poolEntities[p.states[stateTo].tail].node.next = InvalidIndex + } else { + p.connect(p.states[stateTo].tail, valueIndex) + p.states[stateTo].tail = valueIndex + p.poolEntities[p.states[stateTo].tail].node.next = InvalidIndex } - - return true + p.states[stateTo].size++ } diff --git a/module/mempool/herocache/backdata/heropool/poolIndex.go b/module/mempool/herocache/backdata/heropool/poolIndex.go deleted file mode 100644 index 46d356faeea..00000000000 --- a/module/mempool/herocache/backdata/heropool/poolIndex.go +++ /dev/null @@ -1,34 +0,0 @@ -package heropool - -// poolIndex represents a slice-based linked list pointer. Instead of pointing -// to a memory address, this pointer points to a slice index. -// -// Note: an "undefined" (i.e., nil) notion for this poolIndex corresponds to the -// value of uint32(0). Hence, legit "index" poolEntities start from uint32(1). -// poolIndex also furnished with methods to convert a "poolIndex" value to -// a slice index, and vice versa. -type poolIndex struct { - index uint32 -} - -// isUndefined returns true if this poolIndex is set to zero. An undefined -// poolIndex is equivalent to a nil address-based one. -func (p poolIndex) isUndefined() bool { - return p.index == uint32(0) -} - -// setUndefined sets poolIndex to its undefined (i.e., nil equivalent) value. -func (p *poolIndex) setUndefined() { - p.index = uint32(0) -} - -// getSliceIndex returns the slice-index equivalent of the poolIndex. -func (p poolIndex) getSliceIndex() EIndex { - return EIndex(p.index) - 1 -} - -// setPoolIndex converts the input slice-based index into a pool index and -// sets the underlying poolIndex. -func (p *poolIndex) setPoolIndex(sliceIndex EIndex) { - p.index = uint32(sliceIndex + 1) -} diff --git a/module/mempool/herocache/backdata/heropool/pool_test.go b/module/mempool/herocache/backdata/heropool/pool_test.go index 8f3a83db681..2df32131bbe 100644 --- a/module/mempool/herocache/backdata/heropool/pool_test.go +++ b/module/mempool/herocache/backdata/heropool/pool_test.go @@ -2,8 +2,11 @@ package heropool import ( "fmt" + "math" "testing" + "github.com/onflow/flow-go/utils/rand" + "github.com/stretchr/testify/require" "github.com/onflow/flow-go/model/flow" @@ -37,14 +40,14 @@ func TestStoreAndRetrieval_BelowLimit(t *testing.T) { }, } { t.Run(fmt.Sprintf("%d-limit-%d-entities", tc.limit, tc.entityCount), func(t *testing.T) { - withTestScenario(t, tc.limit, tc.entityCount, LRUEjection, []func(*testing.T, *Pool, []*unittest.MockEntity){ - func(t *testing.T, pool *Pool, entities []*unittest.MockEntity) { + withTestScenario(t, tc.limit, tc.entityCount, LRUEjection, []func(*testing.T, *Pool[flow.Identifier, *unittest.MockEntity], []*unittest.MockEntity){ + func(t *testing.T, pool *Pool[flow.Identifier, *unittest.MockEntity], entities []*unittest.MockEntity) { testInitialization(t, pool, entities) }, - func(t *testing.T, pool *Pool, entities []*unittest.MockEntity) { + func(t *testing.T, pool *Pool[flow.Identifier, *unittest.MockEntity], entities []*unittest.MockEntity) { testAddingEntities(t, pool, entities, LRUEjection) }, - func(t *testing.T, pool *Pool, entities []*unittest.MockEntity) { + func(t *testing.T, pool *Pool[flow.Identifier, *unittest.MockEntity], entities []*unittest.MockEntity) { testRetrievingEntitiesFrom(t, pool, entities, 0) }, }..., @@ -73,11 +76,11 @@ func TestStoreAndRetrieval_With_No_Ejection(t *testing.T) { }, } { t.Run(fmt.Sprintf("%d-limit-%d-entities", tc.limit, tc.entityCount), func(t *testing.T) { - withTestScenario(t, tc.limit, tc.entityCount, NoEjection, []func(*testing.T, *Pool, []*unittest.MockEntity){ - func(t *testing.T, pool *Pool, entities []*unittest.MockEntity) { + withTestScenario(t, tc.limit, tc.entityCount, NoEjection, []func(*testing.T, *Pool[flow.Identifier, *unittest.MockEntity], []*unittest.MockEntity){ + func(t *testing.T, pool *Pool[flow.Identifier, *unittest.MockEntity], entities []*unittest.MockEntity) { testAddingEntities(t, pool, entities, NoEjection) }, - func(t *testing.T, pool *Pool, entities []*unittest.MockEntity) { + func(t *testing.T, pool *Pool[flow.Identifier, *unittest.MockEntity], entities []*unittest.MockEntity) { // with the NoEjection mode, only the first "limit" entities must be retrievable. testRetrievingEntitiesInRange(t, pool, entities, 0, EIndex(tc.limit)) }, @@ -108,11 +111,11 @@ func TestStoreAndRetrieval_With_LRU_Ejection(t *testing.T) { }, } { t.Run(fmt.Sprintf("%d-limit-%d-entities", tc.limit, tc.entityCount), func(t *testing.T) { - withTestScenario(t, tc.limit, tc.entityCount, LRUEjection, []func(*testing.T, *Pool, []*unittest.MockEntity){ - func(t *testing.T, pool *Pool, entities []*unittest.MockEntity) { + withTestScenario(t, tc.limit, tc.entityCount, LRUEjection, []func(*testing.T, *Pool[flow.Identifier, *unittest.MockEntity], []*unittest.MockEntity){ + func(t *testing.T, pool *Pool[flow.Identifier, *unittest.MockEntity], entities []*unittest.MockEntity) { testAddingEntities(t, pool, entities, LRUEjection) }, - func(t *testing.T, pool *Pool, entities []*unittest.MockEntity) { + func(t *testing.T, pool *Pool[flow.Identifier, *unittest.MockEntity], entities []*unittest.MockEntity) { // with a limit of tc.limit, storing a total of tc.entityCount (> tc.limit) entities, results // in ejection of the first tc.entityCount - tc.limit entities. // Hence, we check retrieval of the last tc.limit entities, which start from index @@ -141,11 +144,11 @@ func TestStoreAndRetrieval_With_Random_Ejection(t *testing.T) { }, } { t.Run(fmt.Sprintf("%d-limit-%d-entities", tc.limit, tc.entityCount), func(t *testing.T) { - withTestScenario(t, tc.limit, tc.entityCount, RandomEjection, []func(*testing.T, *Pool, []*unittest.MockEntity){ - func(t *testing.T, backData *Pool, entities []*unittest.MockEntity) { + withTestScenario(t, tc.limit, tc.entityCount, RandomEjection, []func(*testing.T, *Pool[flow.Identifier, *unittest.MockEntity], []*unittest.MockEntity){ + func(t *testing.T, backData *Pool[flow.Identifier, *unittest.MockEntity], entities []*unittest.MockEntity) { testAddingEntities(t, backData, entities, RandomEjection) }, - func(t *testing.T, pool *Pool, entities []*unittest.MockEntity) { + func(t *testing.T, pool *Pool[flow.Identifier, *unittest.MockEntity], entities []*unittest.MockEntity) { // with a limit of tc.limit, storing a total of tc.entityCount (> tc.limit) entities, results // in ejection of "tc.entityCount - tc.limit" entities at random. // Hence, we check retrieval any successful total of "tc.limit" entities. @@ -191,11 +194,11 @@ func TestInvalidateEntity(t *testing.T) { } { // head invalidation test (LRU) t.Run(fmt.Sprintf("head-invalidation-%d-limit-%d-entities", tc.limit, tc.entityCount), func(t *testing.T) { - withTestScenario(t, tc.limit, tc.entityCount, LRUEjection, []func(*testing.T, *Pool, []*unittest.MockEntity){ - func(t *testing.T, backData *Pool, entities []*unittest.MockEntity) { + withTestScenario(t, tc.limit, tc.entityCount, LRUEjection, []func(*testing.T, *Pool[flow.Identifier, *unittest.MockEntity], []*unittest.MockEntity){ + func(t *testing.T, backData *Pool[flow.Identifier, *unittest.MockEntity], entities []*unittest.MockEntity) { testAddingEntities(t, backData, entities, LRUEjection) }, - func(t *testing.T, pool *Pool, entities []*unittest.MockEntity) { + func(t *testing.T, pool *Pool[flow.Identifier, *unittest.MockEntity], entities []*unittest.MockEntity) { testInvalidatingHead(t, pool, entities) }, }...) @@ -203,11 +206,11 @@ func TestInvalidateEntity(t *testing.T) { // tail invalidation test (LIFO) t.Run(fmt.Sprintf("tail-invalidation-%d-limit-%d-entities-", tc.limit, tc.entityCount), func(t *testing.T) { - withTestScenario(t, tc.limit, tc.entityCount, LRUEjection, []func(*testing.T, *Pool, []*unittest.MockEntity){ - func(t *testing.T, backData *Pool, entities []*unittest.MockEntity) { + withTestScenario(t, tc.limit, tc.entityCount, LRUEjection, []func(*testing.T, *Pool[flow.Identifier, *unittest.MockEntity], []*unittest.MockEntity){ + func(t *testing.T, backData *Pool[flow.Identifier, *unittest.MockEntity], entities []*unittest.MockEntity) { testAddingEntities(t, backData, entities, LRUEjection) }, - func(t *testing.T, pool *Pool, entities []*unittest.MockEntity) { + func(t *testing.T, pool *Pool[flow.Identifier, *unittest.MockEntity], entities []*unittest.MockEntity) { testInvalidatingTail(t, pool, entities) }, }...) @@ -215,9 +218,157 @@ func TestInvalidateEntity(t *testing.T) { } } +// TestAddAndRemoveEntities checks health of heroPool for scenario where entitites are stored and removed in a predetermined order. +// LRUEjection, NoEjection and RandomEjection are tested. RandomEjection doesn't allow to provide a final state of the pool to check. +func TestAddAndRemoveEntities(t *testing.T) { + for _, tc := range []struct { + limit uint32 // capacity of the pool + entityCount uint32 // total entities to be stored + ejectionMode EjectionMode // ejection mode + numberOfOperations int + probabilityOfAdding float32 + }{ + { + limit: 500, + entityCount: 1000, + ejectionMode: LRUEjection, + numberOfOperations: 1000, + probabilityOfAdding: 0.8, + }, + { + limit: 500, + entityCount: 1000, + ejectionMode: NoEjection, + numberOfOperations: 1000, + probabilityOfAdding: 0.8, + }, + { + limit: 500, + entityCount: 1000, + ejectionMode: RandomEjection, + numberOfOperations: 1000, + probabilityOfAdding: 0.8, + }, + } { + t.Run(fmt.Sprintf("%d-limit-%d-entities", tc.limit, tc.entityCount), func(t *testing.T) { + testAddRemoveEntities(t, tc.limit, tc.entityCount, tc.ejectionMode, tc.numberOfOperations, tc.probabilityOfAdding) + }) + } +} + +// testAddRemoveEntities adds and removes randomly elements in the pool, probabilityOfAdding and its counterpart 1-probabilityOfAdding are probabilities +// for an operation to be add or remove. Current timestamp is taken as a seed for the random number generator. +func testAddRemoveEntities(t *testing.T, limit uint32, entityCount uint32, ejectionMode EjectionMode, numberOfOperations int, probabilityOfAdding float32) { + + require.GreaterOrEqual(t, entityCount, 2*limit, "entityCount must be greater or equal to 2*limit to test add/remove operations") + + randomIntN := func(length int) int { + random, err := rand.Uintn(uint(length)) + require.NoError(t, err) + return int(random) + } + + pool := NewHeroPool[flow.Identifier, *unittest.MockEntity](limit, ejectionMode, unittest.Logger()) + entities := unittest.EntityListFixture(uint(entityCount)) + // retryLimit is the max number of retries to find an entity that is not already in the pool to add it. + // The test fails if it reaches this limit. + retryLimit := 100 + // an array of random owner Ids. + ownerIds := make([]uint64, entityCount) + // generate ownerId to index in the entities array. + for i := 0; i < int(entityCount); i++ { + randomOwnerId, err := rand.Uint64() + require.NoError(t, err) + ownerIds[i] = randomOwnerId + } + // this map maintains entities currently stored in the pool. + addedEntities := make(map[flow.Identifier]int) + addedEntitiesInPool := make(map[flow.Identifier]EIndex) + for i := 0; i < numberOfOperations; i++ { + // choose between Add and Remove with a probability of probabilityOfAdding and 1-probabilityOfAdding respectively. + if float32(randomIntN(math.MaxInt32))/math.MaxInt32 < probabilityOfAdding || len(addedEntities) == 0 { + // keeps finding an entity to add until it finds one that is not already in the pool. + found := false + for retryTime := 0; retryTime < retryLimit; retryTime++ { + toAddIndex := randomIntN(int(entityCount)) + _, found = addedEntities[entities[toAddIndex].Identifier] + if !found { + // found an entity that is not in the pool, add it. + indexInThePool, _, ejectedEntity, wasEjected := pool.Add(entities[toAddIndex].Identifier, entities[toAddIndex], ownerIds[toAddIndex]) + if ejectionMode != NoEjection || len(addedEntities) < int(limit) { + // when there is an ejection mode in place, or the pool is not full, the index should be valid. + require.NotEqual(t, InvalidIndex, indexInThePool) + } + require.LessOrEqual(t, len(addedEntities), int(limit), "pool should not contain more elements than its limit") + if ejectionMode != NoEjection && len(addedEntities) == int(limit) { + // when there is an ejection mode in place, the ejected entity should be valid. + require.NotNil(t, ejectedEntity) + require.True(t, wasEjected) + } + if ejectionMode != NoEjection && len(addedEntities) >= int(limit) { + // when there is an ejection mode in place, the ejected entity should be valid. + require.NotNil(t, ejectedEntity) + require.True(t, wasEjected) + } + if indexInThePool != InvalidIndex { + entityId := entities[toAddIndex].Identifier + // tracks the index of the entity in the pool and the index of the entity in the entities array. + addedEntities[entityId] = int(toAddIndex) + addedEntitiesInPool[entityId] = indexInThePool + // any entity added to the pool should be in the pool, and must be retrievable. + actualFlowId, actualEntity, actualOwnerId := pool.Get(indexInThePool) + require.Equal(t, entityId, actualFlowId) + require.Equal(t, entities[toAddIndex], actualEntity, "pool returned a different entity than the one added") + require.Equal(t, ownerIds[toAddIndex], actualOwnerId, "pool returned a different owner than the one added") + } + if ejectedEntity != nil { + require.Contains(t, addedEntities, ejectedEntity.Identifier, "pool ejected an entity that was not added before") + delete(addedEntities, ejectedEntity.Identifier) + delete(addedEntitiesInPool, ejectedEntity.Identifier) + } + break + } + } + require.Falsef(t, found, "could not find an entity to add after %d retries", retryLimit) + } else { + // randomly select an index of an entity to remove. + entityToRemove := randomIntN(len(addedEntities)) + i := 0 + var indexInPoolToRemove EIndex = 0 + var indexInEntitiesArray int = 0 + for k, v := range addedEntities { + if i == entityToRemove { + indexInPoolToRemove = addedEntitiesInPool[k] + indexInEntitiesArray = v + break + } + i++ + } + // remove the selected entity from the pool. + removedEntity := pool.Remove(indexInPoolToRemove) + expectedRemovedEntityId := entities[indexInEntitiesArray].Identifier + require.Equal(t, expectedRemovedEntityId, removedEntity.Identifier, "removed wrong entity") + delete(addedEntities, expectedRemovedEntityId) + delete(addedEntitiesInPool, expectedRemovedEntityId) + actualFlowId, actualEntity, _ := pool.Get(indexInPoolToRemove) + require.Equal(t, flow.ZeroID, actualFlowId) + require.Nil(t, actualEntity) + require.True(t, pool.isInvalidated(indexInPoolToRemove)) + } + } + for k, v := range addedEntities { + indexInPool := addedEntitiesInPool[k] + actualFlowId, actualEntity, actualOwnerId := pool.Get(indexInPool) + require.Equal(t, entities[v].Identifier, actualFlowId) + require.Equal(t, entities[v], actualEntity) + require.Equal(t, ownerIds[v], actualOwnerId) + } + require.Equalf(t, len(addedEntities), int(pool.Size()), "pool size is not correct, expected %d, actual %d", len(addedEntities), pool.Size()) +} + // testInvalidatingHead keeps invalidating the head and evaluates the linked-list keeps updating its head // and remains connected. -func testInvalidatingHead(t *testing.T, pool *Pool, entities []*unittest.MockEntity) { +func testInvalidatingHead(t *testing.T, pool *Pool[flow.Identifier, *unittest.MockEntity], entities []*unittest.MockEntity) { // total number of entities to store totalEntitiesStored := len(entities) // freeListInitialSize is total number of empty nodes after @@ -232,17 +383,17 @@ func testInvalidatingHead(t *testing.T, pool *Pool, entities []*unittest.MockEnt // size of list should be decremented after each invalidation. require.Equal(t, uint32(totalEntitiesStored-i-1), pool.Size()) // invalidated head should be appended to free entities - require.Equal(t, pool.free.tail.getSliceIndex(), EIndex(i)) + require.Equal(t, pool.states[stateFree].tail, EIndex(i)) if freeListInitialSize != 0 { // number of entities is below limit, hence free list is not empty. // invalidating used head must not change the free head. - require.Equal(t, EIndex(totalEntitiesStored), pool.free.head.getSliceIndex()) + require.Equal(t, EIndex(totalEntitiesStored), pool.states[stateFree].head) } else { // number of entities is greater than or equal to limit, hence free list is empty. // free head must be updated to the first invalidated head (index 0), // and must be kept there for entire test (as we invalidate head not tail). - require.Equal(t, EIndex(0), pool.free.head.getSliceIndex()) + require.Equal(t, EIndex(0), pool.states[stateFree].head) } // except when the list is empty, head must be updated after invalidation, @@ -251,14 +402,14 @@ func testInvalidatingHead(t *testing.T, pool *Pool, entities []*unittest.MockEnt if i != totalEntitiesStored-1 { // used linked-list tailAccessibleFromHead(t, - pool.used.head.getSliceIndex(), - pool.used.tail.getSliceIndex(), + pool.states[stateUsed].head, + pool.states[stateUsed].tail, pool, pool.Size()) headAccessibleFromTail(t, - pool.used.head.getSliceIndex(), - pool.used.tail.getSliceIndex(), + pool.states[stateUsed].head, + pool.states[stateUsed].tail, pool, pool.Size()) @@ -266,14 +417,14 @@ func testInvalidatingHead(t *testing.T, pool *Pool, entities []*unittest.MockEnt // // after invalidating each item, size of free linked-list is incremented by one. tailAccessibleFromHead(t, - pool.free.head.getSliceIndex(), - pool.free.tail.getSliceIndex(), + pool.states[stateFree].head, + pool.states[stateFree].tail, pool, uint32(i+1+freeListInitialSize)) headAccessibleFromTail(t, - pool.free.head.getSliceIndex(), - pool.free.tail.getSliceIndex(), + pool.states[stateFree].head, + pool.states[stateFree].tail, pool, uint32(i+1+freeListInitialSize)) } @@ -286,50 +437,52 @@ func testInvalidatingHead(t *testing.T, pool *Pool, entities []*unittest.MockEnt // // used tail should point to the last element in pool, since we are // invalidating head. - require.Equal(t, entities[totalEntitiesStored-1].ID(), usedTail.id) - require.Equal(t, EIndex(totalEntitiesStored-1), pool.used.tail.getSliceIndex()) + require.Equal(t, entities[totalEntitiesStored-1].Identifier, usedTail.key) + require.Equal(t, EIndex(totalEntitiesStored-1), pool.states[stateUsed].tail) // used head must point to the next element in the pool, // i.e., invalidating head moves it forward. - require.Equal(t, entities[i+1].ID(), usedHead.id) - require.Equal(t, EIndex(i+1), pool.used.head.getSliceIndex()) + require.Equal(t, entities[i+1].Identifier, usedHead.key) + require.Equal(t, EIndex(i+1), pool.states[stateUsed].head) } else { // pool is empty // used head and tail must be nil and their corresponding // pointer indices must be undefined. require.Nil(t, usedHead) require.Nil(t, usedTail) - require.True(t, pool.used.tail.isUndefined()) - require.True(t, pool.used.head.isUndefined()) + require.True(t, pool.states[stateUsed].size == 0) + require.Equal(t, pool.states[stateUsed].tail, InvalidIndex) + require.Equal(t, pool.states[stateUsed].head, InvalidIndex) } + checkEachEntityIsInFreeOrUsedState(t, pool) } } // testInvalidatingHead keeps invalidating the tail and evaluates the underlying free and used linked-lists keep updating its tail and remains connected. -func testInvalidatingTail(t *testing.T, pool *Pool, entities []*unittest.MockEntity) { +func testInvalidatingTail(t *testing.T, pool *Pool[flow.Identifier, *unittest.MockEntity], entities []*unittest.MockEntity) { size := len(entities) offset := len(pool.poolEntities) - size for i := 0; i < size; i++ { // invalidates tail index - tailIndex := pool.used.tail.getSliceIndex() + tailIndex := pool.states[stateUsed].tail require.Equal(t, EIndex(size-1-i), tailIndex) - pool.invalidateEntityAtIndex(tailIndex) + pool.invalidateValueAtIndex(tailIndex) // old head index must be invalidated require.True(t, pool.isInvalidated(tailIndex)) // unclaimed head should be appended to free entities - require.Equal(t, pool.free.tail.getSliceIndex(), tailIndex) + require.Equal(t, pool.states[stateFree].tail, tailIndex) if offset != 0 { // number of entities is below limit // free must head keeps pointing to first empty index after // adding all entities. - require.Equal(t, EIndex(size), pool.free.head.getSliceIndex()) + require.Equal(t, EIndex(size), pool.states[stateFree].head) } else { // number of entities is greater than or equal to limit // free head must be updated to last element in the pool (size - 1), // and must be kept there for entire test (as we invalidate tail not head). - require.Equal(t, EIndex(size-1), pool.free.head.getSliceIndex()) + require.Equal(t, EIndex(size-1), pool.states[stateFree].head) } // size of pool should be shrunk after each invalidation. @@ -342,27 +495,27 @@ func testInvalidatingTail(t *testing.T, pool *Pool, entities []*unittest.MockEnt // used linked-list tailAccessibleFromHead(t, - pool.used.head.getSliceIndex(), - pool.used.tail.getSliceIndex(), + pool.states[stateUsed].head, + pool.states[stateUsed].tail, pool, pool.Size()) headAccessibleFromTail(t, - pool.used.head.getSliceIndex(), - pool.used.tail.getSliceIndex(), + pool.states[stateUsed].head, + pool.states[stateUsed].tail, pool, pool.Size()) // free linked-list tailAccessibleFromHead(t, - pool.free.head.getSliceIndex(), - pool.free.tail.getSliceIndex(), + pool.states[stateFree].head, + pool.states[stateFree].tail, pool, uint32(i+1+offset)) headAccessibleFromTail(t, - pool.free.head.getSliceIndex(), - pool.free.tail.getSliceIndex(), + pool.states[stateFree].head, + pool.states[stateFree].tail, pool, uint32(i+1+offset)) } @@ -373,69 +526,73 @@ func testInvalidatingTail(t *testing.T, pool *Pool, entities []*unittest.MockEnt // pool is not empty yet // // used tail should move backward after each invalidation - require.Equal(t, entities[size-i-2].ID(), usedTail.id) - require.Equal(t, EIndex(size-i-2), pool.used.tail.getSliceIndex()) + require.Equal(t, entities[size-i-2].Identifier, usedTail.key) + require.Equal(t, EIndex(size-i-2), pool.states[stateUsed].tail) // used head must point to the first element in the pool, - require.Equal(t, entities[0].ID(), usedHead.id) - require.Equal(t, EIndex(0), pool.used.head.getSliceIndex()) + require.Equal(t, entities[0].Identifier, usedHead.key) + require.Equal(t, EIndex(0), pool.states[stateUsed].head) } else { // pool is empty // used head and tail must be nil and their corresponding // pointer indices must be undefined. require.Nil(t, usedHead) require.Nil(t, usedTail) - require.True(t, pool.used.tail.isUndefined()) - require.True(t, pool.used.head.isUndefined()) + require.True(t, pool.states[stateUsed].size == 0) + require.Equal(t, pool.states[stateUsed].head, InvalidIndex) + require.Equal(t, pool.states[stateUsed].tail, InvalidIndex) } + checkEachEntityIsInFreeOrUsedState(t, pool) } } // testInitialization evaluates the state of an initialized pool before adding any element to it. -func testInitialization(t *testing.T, pool *Pool, _ []*unittest.MockEntity) { - // head and tail of "used" linked-list must be undefined at initialization time, since we have no elements in the list. - require.True(t, pool.used.head.isUndefined()) - require.True(t, pool.used.tail.isUndefined()) +func testInitialization(t *testing.T, pool *Pool[flow.Identifier, *unittest.MockEntity], _ []*unittest.MockEntity) { + // "used" linked-list must have a zero size, since we have no elements in the list. + require.True(t, pool.states[stateUsed].size == 0) + require.Equal(t, pool.states[stateUsed].head, InvalidIndex) + require.Equal(t, pool.states[stateUsed].tail, InvalidIndex) for i := 0; i < len(pool.poolEntities); i++ { if i == 0 { - // head of "free" linked-list should point to index 0 of entities slice. - require.Equal(t, EIndex(i), pool.free.head.getSliceIndex()) + // head of "free" linked-list should point to InvalidIndex of entities slice. + require.Equal(t, EIndex(i), pool.states[stateFree].head) // previous element of head must be undefined (linked-list head feature). - require.True(t, pool.poolEntities[i].node.prev.isUndefined()) + require.Equal(t, pool.poolEntities[i].node.prev, InvalidIndex) } if i != 0 { // except head, any element should point back to its previous index in slice. - require.Equal(t, EIndex(i-1), pool.poolEntities[i].node.prev.getSliceIndex()) + require.Equal(t, EIndex(i-1), pool.poolEntities[i].node.prev) } if i != len(pool.poolEntities)-1 { // except tail, any element should point forward to its next index in slice. - require.Equal(t, EIndex(i+1), pool.poolEntities[i].node.next.getSliceIndex()) + require.Equal(t, EIndex(i+1), pool.poolEntities[i].node.next) } if i == len(pool.poolEntities)-1 { // tail of "free" linked-list should point to the last index in entities slice. - require.Equal(t, EIndex(i), pool.free.tail.getSliceIndex()) + require.Equal(t, EIndex(i), pool.states[stateFree].tail) // next element of tail must be undefined. - require.True(t, pool.poolEntities[i].node.next.isUndefined()) + require.Equal(t, pool.poolEntities[i].node.next, InvalidIndex) } } } // testAddingEntities evaluates health of pool for storing new elements. -func testAddingEntities(t *testing.T, pool *Pool, entitiesToBeAdded []*unittest.MockEntity, ejectionMode EjectionMode) { +func testAddingEntities(t *testing.T, pool *Pool[flow.Identifier, *unittest.MockEntity], entitiesToBeAdded []*unittest.MockEntity, ejectionMode EjectionMode) { // initially head must be empty - e, ok := pool.Head() + k, e, ok := pool.Head() require.False(t, ok) + require.Equal(t, flow.ZeroID, k) require.Nil(t, e) var uniqueEntities map[flow.Identifier]struct{} if ejectionMode != NoEjection { uniqueEntities = make(map[flow.Identifier]struct{}) for _, entity := range entitiesToBeAdded { - uniqueEntities[entity.ID()] = struct{}{} + uniqueEntities[entity.Identifier] = struct{}{} } require.Equalf(t, len(uniqueEntities), len(entitiesToBeAdded), "entitesToBeAdded must be constructed of unique entities") } @@ -444,7 +601,7 @@ func testAddingEntities(t *testing.T, pool *Pool, entitiesToBeAdded []*unittest. lruEjectedIndex := 0 for i, e := range entitiesToBeAdded { // adding each element must be successful. - entityIndex, slotAvailable, ejectedEntity := pool.Add(e.ID(), e, uint64(i)) + entityIndex, slotAvailable, ejectedEntity, wasEjected := pool.Add(e.Identifier, e, uint64(i)) if i < len(pool.poolEntities) { // in case of no over limit, size of entities linked list should be incremented by each addition. @@ -452,12 +609,15 @@ func testAddingEntities(t *testing.T, pool *Pool, entitiesToBeAdded []*unittest. require.True(t, slotAvailable) require.Nil(t, ejectedEntity) + require.False(t, wasEjected) require.Equal(t, entityIndex, EIndex(i)) // in case pool is not full, the head should retrieve the first added entity. - headEntity, headExists := pool.Head() + headKey, headEntity, headExists := pool.Head() require.True(t, headExists) - require.Equal(t, headEntity.ID(), entitiesToBeAdded[0].ID()) + expectedID := entitiesToBeAdded[0].Identifier + require.Equal(t, headKey, expectedID) + require.Equal(t, headEntity.Identifier, expectedID) } if ejectionMode == LRUEjection { @@ -468,13 +628,16 @@ func testAddingEntities(t *testing.T, pool *Pool, entitiesToBeAdded []*unittest. if i >= len(pool.poolEntities) { require.True(t, slotAvailable) require.NotNil(t, ejectedEntity) + require.True(t, wasEjected) // confirm that ejected entity is the oldest entity require.Equal(t, entitiesToBeAdded[lruEjectedIndex], ejectedEntity) lruEjectedIndex++ // when pool is full and with LRU ejection, the head should move forward with each element added. - headEntity, headExists := pool.Head() + headKey, headEntity, headExists := pool.Head() require.True(t, headExists) - require.Equal(t, headEntity.ID(), entitiesToBeAdded[i+1-len(pool.poolEntities)].ID()) + expectedID := entitiesToBeAdded[i+1-len(pool.poolEntities)].Identifier + require.Equal(t, expectedID, headKey) + require.Equal(t, expectedID, headEntity.Identifier) } } @@ -482,8 +645,9 @@ func testAddingEntities(t *testing.T, pool *Pool, entitiesToBeAdded []*unittest. if i >= len(pool.poolEntities) { require.True(t, slotAvailable) require.NotNil(t, ejectedEntity) + require.True(t, wasEjected) // confirm that ejected entity is from list of entitiesToBeAdded - _, ok := uniqueEntities[ejectedEntity.ID()] + _, ok := uniqueEntities[ejectedEntity.Identifier] require.True(t, ok) } } @@ -492,12 +656,15 @@ func testAddingEntities(t *testing.T, pool *Pool, entitiesToBeAdded []*unittest. if i >= len(pool.poolEntities) { require.False(t, slotAvailable) require.Nil(t, ejectedEntity) - require.Equal(t, entityIndex, EIndex(0)) + require.False(t, wasEjected) + require.Equal(t, entityIndex, InvalidIndex) // when pool is full and with NoEjection, the head must keep pointing to the first added element. - headEntity, headExists := pool.Head() + headKey, headEntity, headExists := pool.Head() require.True(t, headExists) - require.Equal(t, headEntity.ID(), entitiesToBeAdded[0].ID()) + expectedID := entitiesToBeAdded[0].Identifier + require.Equal(t, expectedID, headKey) + require.Equal(t, expectedID, headEntity.Identifier) } } @@ -513,32 +680,33 @@ func testAddingEntities(t *testing.T, pool *Pool, entitiesToBeAdded []*unittest. // be moved. expectedUsedHead = (i + 1) % len(pool.poolEntities) } - require.Equal(t, pool.poolEntities[expectedUsedHead].entity, usedHead.entity) + require.Equal(t, pool.poolEntities[expectedUsedHead].value, usedHead.value) // head must be healthy and point back to undefined. - require.True(t, usedHead.node.prev.isUndefined()) + require.Equal(t, usedHead.node.prev, InvalidIndex) } if ejectionMode != NoEjection || i < len(pool.poolEntities) { // new entity must be successfully added to tail of used linked-list - require.Equal(t, entitiesToBeAdded[i], usedTail.entity) + require.Equal(t, entitiesToBeAdded[i], usedTail.value) // used tail must be healthy and point back to undefined. - require.True(t, usedTail.node.next.isUndefined()) + require.Equal(t, usedTail.node.next, InvalidIndex) } if ejectionMode == NoEjection && i >= len(pool.poolEntities) { // used tail must not move - require.Equal(t, entitiesToBeAdded[len(pool.poolEntities)-1], usedTail.entity) + require.Equal(t, entitiesToBeAdded[len(pool.poolEntities)-1], usedTail.value) // used tail must be healthy and point back to undefined. - require.True(t, usedTail.node.next.isUndefined()) + // This is not needed anymore as tail's next is now ignored + require.Equal(t, usedTail.node.next, InvalidIndex) } // free head if i < len(pool.poolEntities)-1 { // as long as we are below limit, after adding i element, free head // should move to i+1 element. - require.Equal(t, EIndex(i+1), pool.free.head.getSliceIndex()) + require.Equal(t, EIndex(i+1), pool.states[stateFree].head) // head must be healthy and point back to undefined. - require.True(t, freeHead.node.prev.isUndefined()) + require.Equal(t, freeHead.node.prev, InvalidIndex) } else { // once we go beyond limit, // we run out of free slots, @@ -552,9 +720,9 @@ func testAddingEntities(t *testing.T, pool *Pool, entitiesToBeAdded []*unittest. // must keep pointing to last index of the array-based linked-list. In other // words, adding element must not change free tail (since only free head is // updated). - require.Equal(t, EIndex(len(pool.poolEntities)-1), pool.free.tail.getSliceIndex()) + require.Equal(t, EIndex(len(pool.poolEntities)-1), pool.states[stateFree].tail) // head tail be healthy and point next to undefined. - require.True(t, freeTail.node.next.isUndefined()) + require.Equal(t, freeTail.node.next, InvalidIndex) } else { // once we go beyond limit, we run out of free slots, and // free tail must be kept at undefined. @@ -572,13 +740,13 @@ func testAddingEntities(t *testing.T, pool *Pool, entitiesToBeAdded []*unittest. usedTraverseStep = uint32(len(pool.poolEntities)) } tailAccessibleFromHead(t, - pool.used.head.getSliceIndex(), - pool.used.tail.getSliceIndex(), + pool.states[stateUsed].head, + pool.states[stateUsed].tail, pool, usedTraverseStep) headAccessibleFromTail(t, - pool.used.head.getSliceIndex(), - pool.used.tail.getSliceIndex(), + pool.states[stateUsed].head, + pool.states[stateUsed].tail, pool, usedTraverseStep) @@ -596,40 +764,42 @@ func testAddingEntities(t *testing.T, pool *Pool, entitiesToBeAdded []*unittest. freeTraverseStep = uint32(0) } tailAccessibleFromHead(t, - pool.free.head.getSliceIndex(), - pool.free.tail.getSliceIndex(), + pool.states[stateFree].head, + pool.states[stateFree].tail, pool, freeTraverseStep) headAccessibleFromTail(t, - pool.free.head.getSliceIndex(), - pool.free.tail.getSliceIndex(), + pool.states[stateFree].head, + pool.states[stateFree].tail, pool, freeTraverseStep) + + checkEachEntityIsInFreeOrUsedState(t, pool) } } // testRetrievingEntitiesFrom evaluates that all entities starting from given index are retrievable from pool. -func testRetrievingEntitiesFrom(t *testing.T, pool *Pool, entities []*unittest.MockEntity, from EIndex) { +func testRetrievingEntitiesFrom(t *testing.T, pool *Pool[flow.Identifier, *unittest.MockEntity], entities []*unittest.MockEntity, from EIndex) { testRetrievingEntitiesInRange(t, pool, entities, from, EIndex(len(entities))) } // testRetrievingEntitiesInRange evaluates that all entities in the given range are retrievable from pool. -func testRetrievingEntitiesInRange(t *testing.T, pool *Pool, entities []*unittest.MockEntity, from EIndex, to EIndex) { +func testRetrievingEntitiesInRange(t *testing.T, pool *Pool[flow.Identifier, *unittest.MockEntity], entities []*unittest.MockEntity, from EIndex, to EIndex) { for i := from; i < to; i++ { actualID, actual, _ := pool.Get(i % EIndex(len(pool.poolEntities))) - require.Equal(t, entities[i].ID(), actualID, i) + require.Equal(t, entities[i].Identifier, actualID, i) require.Equal(t, entities[i], actual, i) } } // testRetrievingCount evaluates that exactly expected number of entities are retrievable from underlying pool. -func testRetrievingCount(t *testing.T, pool *Pool, entities []*unittest.MockEntity, expected int) { +func testRetrievingCount(t *testing.T, pool *Pool[flow.Identifier, *unittest.MockEntity], entities []*unittest.MockEntity, expected int) { actualRetrievable := 0 for i := EIndex(0); i < EIndex(len(entities)); i++ { for j := EIndex(0); j < EIndex(len(pool.poolEntities)); j++ { actualID, actual, _ := pool.Get(j % EIndex(len(pool.poolEntities))) - if entities[i].ID() == actualID && entities[i] == actual { + if entities[i].Identifier == actualID && entities[i] == actual { actualRetrievable++ } } @@ -643,12 +813,12 @@ func withTestScenario(t *testing.T, limit uint32, entityCount uint32, ejectionMode EjectionMode, - helpers ...func(*testing.T, *Pool, []*unittest.MockEntity)) { + helpers ...func(*testing.T, *Pool[flow.Identifier, *unittest.MockEntity], []*unittest.MockEntity)) { - pool := NewHeroPool(limit, ejectionMode) + pool := NewHeroPool[flow.Identifier, *unittest.MockEntity](limit, ejectionMode, unittest.Logger()) // head on underlying linked-list value should be uninitialized - require.True(t, pool.used.head.isUndefined()) + require.True(t, pool.states[stateUsed].size == 0) require.Equal(t, pool.Size(), uint32(0)) entities := unittest.EntityListFixture(uint(entityCount)) @@ -659,7 +829,7 @@ func withTestScenario(t *testing.T, } // tailAccessibleFromHead checks tail of given entities linked-list is reachable from its head by traversing expected number of steps. -func tailAccessibleFromHead(t *testing.T, headSliceIndex EIndex, tailSliceIndex EIndex, pool *Pool, steps uint32) { +func tailAccessibleFromHead(t *testing.T, headSliceIndex EIndex, tailSliceIndex EIndex, pool *Pool[flow.Identifier, *unittest.MockEntity], steps uint32) { seen := make(map[EIndex]struct{}) index := headSliceIndex @@ -673,13 +843,13 @@ func tailAccessibleFromHead(t *testing.T, headSliceIndex EIndex, tailSliceIndex _, ok := seen[index] require.False(t, ok, "duplicate identifiers found") - require.False(t, pool.poolEntities[index].node.next.isUndefined(), "tail not found, and reached end of list") - index = pool.poolEntities[index].node.next.getSliceIndex() + require.NotEqual(t, pool.poolEntities[index].node.next, InvalidIndex, "tail not found, and reached end of list") + index = pool.poolEntities[index].node.next } } // headAccessibleFromTail checks head of given entities linked list is reachable from its tail by traversing expected number of steps. -func headAccessibleFromTail(t *testing.T, headSliceIndex EIndex, tailSliceIndex EIndex, pool *Pool, total uint32) { +func headAccessibleFromTail(t *testing.T, headSliceIndex EIndex, tailSliceIndex EIndex, pool *Pool[flow.Identifier, *unittest.MockEntity], total uint32) { seen := make(map[EIndex]struct{}) index := tailSliceIndex @@ -693,6 +863,31 @@ func headAccessibleFromTail(t *testing.T, headSliceIndex EIndex, tailSliceIndex _, ok := seen[index] require.False(t, ok, "duplicate identifiers found") - index = pool.poolEntities[index].node.prev.getSliceIndex() + index = pool.poolEntities[index].node.prev + } +} + +// checkEachEntityIsInFreeOrUsedState checks if each entity in the pool belongs exactly to one of the state lists. +func checkEachEntityIsInFreeOrUsedState(t *testing.T, pool *Pool[flow.Identifier, *unittest.MockEntity]) { + pool_capacity := len(pool.poolEntities) + // check size + require.Equal(t, int(pool.states[stateFree].size+pool.states[stateUsed].size), pool_capacity, "Pool capacity is not equal to the sum of used and free sizes") + // check elelments + nodesInFree := discoverEntitiesBelongingToStateList(t, pool, stateFree) + nodesInUsed := discoverEntitiesBelongingToStateList(t, pool, stateUsed) + for i := 0; i < pool_capacity; i++ { + require.False(t, !nodesInFree[i] && !nodesInUsed[i], "Node is not in any state list") + require.False(t, nodesInFree[i] && nodesInUsed[i], "Node is in two state lists at the same time") + } +} + +// discoverEntitiesBelongingToStateList discovers all entities in the pool that belong to the given list. +func discoverEntitiesBelongingToStateList(t *testing.T, pool *Pool[flow.Identifier, *unittest.MockEntity], stateType StateIndex) []bool { + result := make([]bool, len(pool.poolEntities)) + for node_index := pool.states[stateType].head; node_index != InvalidIndex; { + require.False(t, result[node_index], "A node is present two times in the same state list") + result[node_index] = true + node_index = pool.poolEntities[node_index].node.next } + return result } diff --git a/module/mempool/herocache/backdata/tracer.go b/module/mempool/herocache/backdata/tracer.go index f7d85b70d20..a0af01a9ceb 100644 --- a/module/mempool/herocache/backdata/tracer.go +++ b/module/mempool/herocache/backdata/tracer.go @@ -1,25 +1,23 @@ package herocache -import "github.com/onflow/flow-go/model/flow" - -type CacheOpt func(*Cache) +type CacheOpt[V any] func(*Cache[V]) // Tracer is a generic interface that is used to report specific events that happen during // lifetime of Cache and are potentially interesting for external consumer. -type Tracer interface { +type Tracer[V any] interface { // EntityEjectionDueToEmergency reports ejected entity whenever a bucket is found full and all of its keys are valid, i.e., // each key belongs to an existing (key, entity) pair. // Hence, adding a new key to that bucket will replace the oldest valid key inside that bucket. // This ejection happens with very low, but still cryptographically non-negligible probability. - EntityEjectionDueToEmergency(ejectedEntity flow.Entity) + EntityEjectionDueToEmergency(ejectedEntity V) // EntityEjectionDueToFullCapacity reports ejected entity whenever adding a new (key, entity) to the cache results in ejection of another (key', entity') pair. // This normally happens -- and is expected -- when the cache is full. - EntityEjectionDueToFullCapacity(ejectedEntity flow.Entity) + EntityEjectionDueToFullCapacity(ejectedEntity V) } // WithTracer injects tracer into the cache -func WithTracer(t Tracer) CacheOpt { - return func(c *Cache) { +func WithTracer[V any](t Tracer[V]) CacheOpt[V] { + return func(c *Cache[V]) { c.tracer = t } } diff --git a/module/mempool/herocache/dns_cache.go b/module/mempool/herocache/dns_cache.go index db4c9a9b67b..b15c17719a8 100644 --- a/module/mempool/herocache/dns_cache.go +++ b/module/mempool/herocache/dns_cache.go @@ -14,94 +14,80 @@ import ( "github.com/onflow/flow-go/module/mempool/stdmap" ) +// DNSCache provides a caching mechanism for DNS IP and TXT records. +// +// The cache stores IP records in ipCache and TXT records in txtCache, using the domain's hashed +// value as the key. type DNSCache struct { - ipCache *stdmap.Backend - txtCache *stdmap.Backend + ipCache *stdmap.Backend[flow.Identifier, *mempool.IpRecord] + txtCache *stdmap.Backend[flow.Identifier, *mempool.TxtRecord] } -func NewDNSCache(sizeLimit uint32, logger zerolog.Logger, ipCollector module.HeroCacheMetrics, txtCollector module.HeroCacheMetrics) *DNSCache { +// NewDNSCache creates and returns a new instance of DNSCache. +// It initializes both the IP and TXT record caches with the provided size limit, logger, +// and cache metrics collectors for IP and TXT records. +func NewDNSCache(sizeLimit uint32, logger zerolog.Logger, ipCollector module.HeroCacheMetrics, txtCollector module.HeroCacheMetrics, +) *DNSCache { return &DNSCache{ txtCache: stdmap.NewBackend( - stdmap.WithBackData( - herocache.NewCache( + stdmap.WithMutableBackData[flow.Identifier, *mempool.TxtRecord]( + herocache.NewCache[*mempool.TxtRecord]( sizeLimit, herocache.DefaultOversizeFactor, heropool.LRUEjection, logger.With().Str("mempool", "dns-txt-cache").Logger(), - txtCollector))), + txtCollector, + ), + ), + ), ipCache: stdmap.NewBackend( - stdmap.WithBackData( - herocache.NewCache( + stdmap.WithMutableBackData[flow.Identifier, *mempool.IpRecord]( + herocache.NewCache[*mempool.IpRecord]( sizeLimit, herocache.DefaultOversizeFactor, heropool.LRUEjection, logger.With().Str("mempool", "dns-ip-cache").Logger(), - ipCollector))), + ipCollector, + ), + ), + ), } } // PutIpDomain adds the given ip domain into the cache. func (d *DNSCache) PutIpDomain(domain string, addresses []net.IPAddr, timestamp int64) bool { - i := ipEntity{ - IpRecord: mempool.IpRecord{ - Domain: domain, - Addresses: addresses, - Timestamp: timestamp, - Locked: false, - }, - id: domainToIdentifier(domain), + ipRecord := &mempool.IpRecord{ + Domain: domain, + Addresses: addresses, + Timestamp: timestamp, + Locked: false, } - return d.ipCache.Add(i) + return d.ipCache.Add(domainToIdentifier(domain), ipRecord) } // PutTxtRecord adds the given txt record into the cache. func (d *DNSCache) PutTxtRecord(domain string, record []string, timestamp int64) bool { - t := txtEntity{ - TxtRecord: mempool.TxtRecord{ - Txt: domain, - Records: record, - Timestamp: timestamp, - Locked: false, - }, - id: domainToIdentifier(domain), + txtRecord := &mempool.TxtRecord{ + Txt: domain, + Records: record, + Timestamp: timestamp, + Locked: false, } - return d.txtCache.Add(t) + return d.txtCache.Add(domainToIdentifier(domain), txtRecord) } // GetDomainIp returns the ip domain if exists in the cache. // The boolean return value determines if domain exists in the cache. func (d *DNSCache) GetDomainIp(domain string) (*mempool.IpRecord, bool) { - entity, ok := d.ipCache.ByID(domainToIdentifier(domain)) - if !ok { - return nil, false - } - - i, ok := entity.(ipEntity) - if !ok { - return nil, false - } - ipRecord := i.IpRecord - - return &ipRecord, true + return d.ipCache.Get(domainToIdentifier(domain)) } // GetTxtRecord returns the txt record if exists in the cache. // The boolean return value determines if record exists in the cache. func (d *DNSCache) GetTxtRecord(domain string) (*mempool.TxtRecord, bool) { - entity, ok := d.txtCache.ByID(domainToIdentifier(domain)) - if !ok { - return nil, false - } - - t, ok := entity.(txtEntity) - if !ok { - return nil, false - } - txtRecord := t.TxtRecord - - return &txtRecord, true + return d.txtCache.Get(domainToIdentifier(domain)) } // RemoveIp removes an ip domain from cache. @@ -125,33 +111,29 @@ func (d *DNSCache) RemoveTxt(domain string) bool { // So the locking happens to avoid any other parallel resolving func (d *DNSCache) LockIPDomain(domain string) (bool, error) { locked := false - err := d.ipCache.Run(func(backdata mempool.BackData) error { - id := domainToIdentifier(domain) - entity, ok := backdata.ByID(id) - if !ok { - return fmt.Errorf("ip record does not exist in cache for locking: %s", domain) - } + err := d.ipCache.Run(func(backData mempool.BackData[flow.Identifier, *mempool.IpRecord]) error { + key := domainToIdentifier(domain) - record, ok := entity.(ipEntity) + ipRecord, ok := backData.Get(key) if !ok { - return fmt.Errorf("unexpected type retrieved, expected: %T, obtained: %T", ipEntity{}, entity) + return fmt.Errorf("ip record does not exist in cache for locking: %s", domain) } - if record.Locked { + if ipRecord.Locked { return nil // record has already been locked } - record.Locked = true + ipRecord.Locked = true - if _, removed := backdata.Remove(id); !removed { + if _, removed := backData.Remove(key); !removed { return fmt.Errorf("ip record could not be removed from backdata") } - if added := backdata.Add(id, record); !added { + if added := backData.Add(key, ipRecord); !added { return fmt.Errorf("updated ip record could not be added to back data") } - locked = record.Locked + locked = ipRecord.Locked return nil }) @@ -160,23 +142,20 @@ func (d *DNSCache) LockIPDomain(domain string) (bool, error) { // UpdateIPDomain updates the dns record for the given ip domain with the new address and timestamp values. func (d *DNSCache) UpdateIPDomain(domain string, addresses []net.IPAddr, timestamp int64) error { - return d.ipCache.Run(func(backdata mempool.BackData) error { - id := domainToIdentifier(domain) + return d.ipCache.Run(func(backData mempool.BackData[flow.Identifier, *mempool.IpRecord]) error { + key := domainToIdentifier(domain) // removes old entry if exists. - backdata.Remove(id) - - ipRecord := ipEntity{ - IpRecord: mempool.IpRecord{ - Domain: domain, - Addresses: addresses, - Timestamp: timestamp, - Locked: false, // by default an ip record is unlocked. - }, - id: id, + backData.Remove(key) + + ipRecord := &mempool.IpRecord{ + Domain: domain, + Addresses: addresses, + Timestamp: timestamp, + Locked: false, // by default an ip record is unlocked. } - if added := backdata.Add(id, ipRecord); !added { + if added := backData.Add(key, ipRecord); !added { return fmt.Errorf("updated ip record could not be added to backdata") } @@ -186,23 +165,20 @@ func (d *DNSCache) UpdateIPDomain(domain string, addresses []net.IPAddr, timesta // UpdateTxtRecord updates the dns record for the given txt domain with the new address and timestamp values. func (d *DNSCache) UpdateTxtRecord(txt string, records []string, timestamp int64) error { - return d.txtCache.Run(func(backdata mempool.BackData) error { - id := domainToIdentifier(txt) + return d.txtCache.Run(func(backData mempool.BackData[flow.Identifier, *mempool.TxtRecord]) error { + key := domainToIdentifier(txt) // removes old entry if exists. - backdata.Remove(id) - - txtRecord := txtEntity{ - TxtRecord: mempool.TxtRecord{ - Txt: txt, - Records: records, - Timestamp: timestamp, - Locked: false, // by default a txt record is unlocked. - }, - id: id, + backData.Remove(key) + + txtRecord := &mempool.TxtRecord{ + Txt: txt, + Records: records, + Timestamp: timestamp, + Locked: false, // by default a txt record is unlocked. } - if added := backdata.Add(id, txtRecord); !added { + if added := backData.Add(key, txtRecord); !added { return fmt.Errorf("updated txt record could not be added to backdata") } @@ -221,33 +197,29 @@ func (d *DNSCache) UpdateTxtRecord(txt string, records []string, timestamp int64 // So the locking happens to avoid any other parallel resolving. func (d *DNSCache) LockTxtRecord(txt string) (bool, error) { locked := false - err := d.txtCache.Run(func(backdata mempool.BackData) error { - id := domainToIdentifier(txt) - entity, ok := backdata.ByID(id) - if !ok { - return fmt.Errorf("txt record does not exist in cache for locking: %s", txt) - } + err := d.txtCache.Run(func(backData mempool.BackData[flow.Identifier, *mempool.TxtRecord]) error { + key := domainToIdentifier(txt) - record, ok := entity.(txtEntity) + txtRecord, ok := backData.Get(key) if !ok { - return fmt.Errorf("unexpected type retrieved, expected: %T, obtained: %T", txtEntity{}, entity) + return fmt.Errorf("txt record does not exist in cache for locking: %s", txt) } - if record.Locked { + if txtRecord.Locked { return nil // record has already been locked } - record.Locked = true + txtRecord.Locked = true - if _, removed := backdata.Remove(id); !removed { + if _, removed := backData.Remove(key); !removed { return fmt.Errorf("txt record could not be removed from backdata") } - if added := backdata.Add(id, record); !added { + if added := backData.Add(key, txtRecord); !added { return fmt.Errorf("updated txt record could not be added to back data") } - locked = record.Locked + locked = txtRecord.Locked return nil }) @@ -257,42 +229,11 @@ func (d *DNSCache) LockTxtRecord(txt string) (bool, error) { // Size returns total domains maintained into this cache. // The first returned value determines number of ip domains. // The second returned value determines number of txt records. -func (d DNSCache) Size() (uint, uint) { +func (d *DNSCache) Size() (uint, uint) { return d.ipCache.Size(), d.txtCache.Size() } -// ipEntity is a dns cache entry for ip records. -type ipEntity struct { - mempool.IpRecord - // caching identifier to avoid cpu overhead - // per query. - id flow.Identifier -} - -func (i ipEntity) ID() flow.Identifier { - return i.id -} - -func (i ipEntity) Checksum() flow.Identifier { - return domainToIdentifier(i.IpRecord.Domain) -} - -// txtEntity is a dns cache entry for txt records. -type txtEntity struct { - mempool.TxtRecord - // caching identifier to avoid cpu overhead - // per query. - id flow.Identifier -} - -func (t txtEntity) ID() flow.Identifier { - return t.id -} - -func (t txtEntity) Checksum() flow.Identifier { - return domainToIdentifier(t.TxtRecord.Txt) -} - +// domainToIdentifier is a helper function for creating the key for DNSCache by hashing the domain. func domainToIdentifier(domain string) flow.Identifier { return flow.MakeID(domain) } diff --git a/module/mempool/herocache/execution_data.go b/module/mempool/herocache/execution_data.go index 75251cbc923..ff44b23b371 100644 --- a/module/mempool/herocache/execution_data.go +++ b/module/mempool/herocache/execution_data.go @@ -1,8 +1,6 @@ package herocache import ( - "fmt" - "github.com/rs/zerolog" "github.com/onflow/flow-go/model/flow" @@ -10,86 +8,27 @@ import ( "github.com/onflow/flow-go/module/executiondatasync/execution_data" herocache "github.com/onflow/flow-go/module/mempool/herocache/backdata" "github.com/onflow/flow-go/module/mempool/herocache/backdata/heropool" - "github.com/onflow/flow-go/module/mempool/herocache/internal" "github.com/onflow/flow-go/module/mempool/stdmap" ) +// BlockExecutionData implements the block execution data memory pool. +// Stored execution data are keyed by block id. type BlockExecutionData struct { - c *stdmap.Backend + *stdmap.Backend[flow.Identifier, *execution_data.BlockExecutionDataEntity] } // NewBlockExecutionData implements a block execution data mempool based on hero cache. func NewBlockExecutionData(limit uint32, logger zerolog.Logger, collector module.HeroCacheMetrics) *BlockExecutionData { return &BlockExecutionData{ - c: stdmap.NewBackend( - stdmap.WithBackData( - herocache.NewCache(limit, + stdmap.NewBackend( + stdmap.WithMutableBackData[flow.Identifier, *execution_data.BlockExecutionDataEntity]( + herocache.NewCache[*execution_data.BlockExecutionDataEntity](limit, herocache.DefaultOversizeFactor, heropool.LRUEjection, logger.With().Str("mempool", "block_execution_data").Logger(), - collector))), - } -} - -// Has checks whether the block execution data with the given hash is currently in -// the memory pool. -func (t *BlockExecutionData) Has(id flow.Identifier) bool { - return t.c.Has(id) -} - -// Add adds a block execution data to the mempool. -func (t *BlockExecutionData) Add(ed *execution_data.BlockExecutionDataEntity) bool { - entity := internal.NewWrappedEntity(ed.BlockID, ed) - return t.c.Add(*entity) -} - -// ByID returns the block execution data with the given ID from the mempool. -func (t *BlockExecutionData) ByID(txID flow.Identifier) (*execution_data.BlockExecutionDataEntity, bool) { - entity, exists := t.c.ByID(txID) - if !exists { - return nil, false - } - - return unwrap(entity), true -} - -// All returns all block execution data from the mempool. Since it is using the HeroCache, All guarantees returning -// all block execution data in the same order as they are added. -func (t *BlockExecutionData) All() []*execution_data.BlockExecutionDataEntity { - entities := t.c.All() - eds := make([]*execution_data.BlockExecutionDataEntity, 0, len(entities)) - for _, entity := range entities { - eds = append(eds, unwrap(entity)) + collector, + ), + ), + ), } - return eds -} - -// Clear removes all block execution data stored in this mempool. -func (t *BlockExecutionData) Clear() { - t.c.Clear() -} - -// Size returns total number of stored block execution data. -func (t *BlockExecutionData) Size() uint { - return t.c.Size() -} - -// Remove removes block execution data from mempool. -func (t *BlockExecutionData) Remove(id flow.Identifier) bool { - return t.c.Remove(id) -} - -// unwrap converts an internal.WrappedEntity to a BlockExecutionDataEntity. -func unwrap(entity flow.Entity) *execution_data.BlockExecutionDataEntity { - wrappedEntity, ok := entity.(internal.WrappedEntity) - if !ok { - panic(fmt.Sprintf("invalid wrapped entity in block execution data pool (%T)", entity)) - } - - ed, ok := wrappedEntity.Entity.(*execution_data.BlockExecutionDataEntity) - if !ok { - panic(fmt.Sprintf("invalid entity in block execution data pool (%T)", wrappedEntity.Entity)) - } - - return ed } diff --git a/module/mempool/herocache/execution_data_test.go b/module/mempool/herocache/execution_data_test.go index 46c0d302956..c83a5b0e906 100644 --- a/module/mempool/herocache/execution_data_test.go +++ b/module/mempool/herocache/execution_data_test.go @@ -21,12 +21,12 @@ func TestBlockExecutionDataPool(t *testing.T) { cache := herocache.NewBlockExecutionData(1000, unittest.Logger(), metrics.NewNoopCollector()) t.Run("should be able to add first", func(t *testing.T) { - added := cache.Add(ed1) + added := cache.Add(ed1.BlockID, ed1) assert.True(t, added) }) t.Run("should be able to add second", func(t *testing.T) { - added := cache.Add(ed2) + added := cache.Add(ed2.BlockID, ed2) assert.True(t, added) }) @@ -36,7 +36,7 @@ func TestBlockExecutionDataPool(t *testing.T) { }) t.Run("should be able to get first by blockID", func(t *testing.T) { - actual, exists := cache.ByID(ed1.BlockID) + actual, exists := cache.Get(ed1.BlockID) assert.True(t, exists) assert.Equal(t, ed1, actual) }) @@ -49,7 +49,9 @@ func TestBlockExecutionDataPool(t *testing.T) { t.Run("should be able to retrieve all", func(t *testing.T) { items := cache.All() assert.Len(t, items, 1) - assert.Equal(t, ed1, items[0]) + val, exists := items[ed1.BlockID] + require.True(t, exists) + assert.Equal(t, ed1, val) }) t.Run("should be able to clear", func(t *testing.T) { @@ -71,7 +73,7 @@ func TestBlockExecutionDataConcurrentWriteAndRead(t *testing.T) { // storing all cache for i := 0; i < total; i++ { go func(ed *execution_data.BlockExecutionDataEntity) { - require.True(t, cache.Add(ed)) + require.True(t, cache.Add(ed.BlockID, ed)) wg.Done() }(execDatas[i]) @@ -84,7 +86,7 @@ func TestBlockExecutionDataConcurrentWriteAndRead(t *testing.T) { // reading all cache for i := 0; i < total; i++ { go func(ed *execution_data.BlockExecutionDataEntity) { - actual, ok := cache.ByID(ed.BlockID) + actual, ok := cache.Get(ed.BlockID) require.True(t, ok) require.Equal(t, ed, actual) @@ -103,8 +105,8 @@ func TestBlockExecutionDataAllReturnsInOrder(t *testing.T) { // storing all cache for i := 0; i < total; i++ { - require.True(t, cache.Add(execDatas[i])) - ed, ok := cache.ByID(execDatas[i].BlockID) + require.True(t, cache.Add(execDatas[i].BlockID, execDatas[i])) + ed, ok := cache.Get(execDatas[i].BlockID) require.True(t, ok) require.Equal(t, execDatas[i], ed) } @@ -112,6 +114,8 @@ func TestBlockExecutionDataAllReturnsInOrder(t *testing.T) { // all cache must be retrieved in the same order as they are added all := cache.All() for i := 0; i < total; i++ { - require.Equal(t, execDatas[i], all[i]) + val, exists := all[execDatas[i].BlockID] + require.True(t, exists) + assert.Equal(t, execDatas[i], val) } } diff --git a/module/mempool/herocache/internal/wrapped_entity.go b/module/mempool/herocache/internal/wrapped_entity.go deleted file mode 100644 index 342f9094f3c..00000000000 --- a/module/mempool/herocache/internal/wrapped_entity.go +++ /dev/null @@ -1,33 +0,0 @@ -package internal - -import "github.com/onflow/flow-go/model/flow" - -// WrappedEntity is a wrapper around a flow.Entity that allows overriding the ID. -// The has 2 main use cases: -// - when the ID is expensive to compute, we can pre-compute it and use it for the cache -// - when caching an entity using a different ID than what's returned by ID(). For example, if there -// is a 1:1 mapping between a block and an entity, we can use the block ID as the cache key. -type WrappedEntity struct { - flow.Entity - id flow.Identifier -} - -var _ flow.Entity = (*WrappedEntity)(nil) - -// NewWrappedEntity creates a new WrappedEntity -func NewWrappedEntity(id flow.Identifier, entity flow.Entity) *WrappedEntity { - return &WrappedEntity{ - Entity: entity, - id: id, - } -} - -// ID returns the cached ID of the wrapped entity -func (w WrappedEntity) ID() flow.Identifier { - return w.id -} - -// Checksum returns th cached ID of the wrapped entity -func (w WrappedEntity) Checksum() flow.Identifier { - return w.id -} diff --git a/module/mempool/herocache/transactions.go b/module/mempool/herocache/transactions.go index a052728de52..37422ff0971 100644 --- a/module/mempool/herocache/transactions.go +++ b/module/mempool/herocache/transactions.go @@ -1,88 +1,144 @@ package herocache import ( - "fmt" - "github.com/rs/zerolog" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/module" + "github.com/onflow/flow-go/module/mempool" herocache "github.com/onflow/flow-go/module/mempool/herocache/backdata" "github.com/onflow/flow-go/module/mempool/herocache/backdata/heropool" "github.com/onflow/flow-go/module/mempool/stdmap" ) type Transactions struct { - c *stdmap.Backend + *stdmap.Backend[flow.Identifier, *flow.TransactionBody] + byPayer map[flow.Address]map[flow.Identifier]struct{} } +var _ mempool.Transactions = (*Transactions)(nil) + // NewTransactions implements a transactions mempool based on hero cache. func NewTransactions(limit uint32, logger zerolog.Logger, collector module.HeroCacheMetrics) *Transactions { + byPayer := make(map[flow.Address]map[flow.Identifier]struct{}) t := &Transactions{ - c: stdmap.NewBackend( - stdmap.WithBackData( - herocache.NewCache(limit, - herocache.DefaultOversizeFactor, - heropool.LRUEjection, - logger.With().Str("mempool", "transactions").Logger(), - collector))), + byPayer: byPayer, } - return t -} + tracer := &ejectionTracer{transactions: t} + t.Backend = stdmap.NewBackend( + stdmap.WithMutableBackData[flow.Identifier, *flow.TransactionBody]( + herocache.NewCache[*flow.TransactionBody](limit, + herocache.DefaultOversizeFactor, + heropool.LRUEjection, + logger.With().Str("mempool", "transactions").Logger(), + collector, + herocache.WithTracer[*flow.TransactionBody](tracer)))) -// Has checks whether the transaction with the given hash is currently in -// the memory pool. -func (t Transactions) Has(id flow.Identifier) bool { - return t.c.Has(id) + return t } // Add adds a transaction to the mempool. -func (t *Transactions) Add(tx *flow.TransactionBody) bool { - // Warning! reference pointer must be dereferenced before adding to HeroCache. - // This is crucial for its heap object optimizations. - return t.c.Add(*tx) +func (t *Transactions) Add(txID flow.Identifier, tx *flow.TransactionBody) bool { + added := false + err := t.Run(func(backdata mempool.BackData[flow.Identifier, *flow.TransactionBody]) error { + // Warning! reference pointer must be dereferenced before adding to HeroCache. + // This is crucial for its heap object optimizations. + added = backdata.Add(txID, tx) + if !added { + return nil + } + txns, ok := t.byPayer[tx.Payer] + if !ok { + txns = make(map[flow.Identifier]struct{}) + t.byPayer[tx.Payer] = txns + } + txns[txID] = struct{}{} + return nil + }) + if err != nil { + panic("failed to add transaction to mempool: " + err.Error()) + } + return added } -// ByID returns the transaction with the given ID from the mempool. -func (t Transactions) ByID(txID flow.Identifier) (*flow.TransactionBody, bool) { - entity, exists := t.c.ByID(txID) - if !exists { - return nil, false +// Clear removes all transactions stored in this mempool. +func (t *Transactions) Clear() { + err := t.Run(func(backdata mempool.BackData[flow.Identifier, *flow.TransactionBody]) error { + backdata.Clear() + t.byPayer = make(map[flow.Address]map[flow.Identifier]struct{}) + return nil + }) + if err != nil { + panic("failed to clear transactions mempool: " + err.Error()) } - tx, ok := entity.(flow.TransactionBody) - if !ok { - panic(fmt.Sprintf("invalid entity in transaction pool (%T)", entity)) +} + +// Remove removes transaction from mempool. +func (t *Transactions) Remove(id flow.Identifier) bool { + removed := false + err := t.Run(func(backdata mempool.BackData[flow.Identifier, *flow.TransactionBody]) error { + var txBody *flow.TransactionBody + txBody, removed = backdata.Remove(id) + if !removed { + return nil + } + t.removeFromIndex(id, txBody.Payer) + return nil + }) + if err != nil { + panic("failed to remove transaction from mempool: " + err.Error()) } - return &tx, true + return removed } -// All returns all transactions from the mempool. Since it is using the HeroCache, All guarantees returning -// all transactions in the same order as they are added. -func (t Transactions) All() []*flow.TransactionBody { - entities := t.c.All() - txs := make([]*flow.TransactionBody, 0, len(entities)) - for _, entity := range entities { - tx, ok := entity.(flow.TransactionBody) - if !ok { - panic(fmt.Sprintf("invalid entity in transaction pool (%T)", entity)) +// ByPayer retrieves all transactions from the memory pool that are sent +// by the given payer. +func (t *Transactions) ByPayer(payer flow.Address) []*flow.TransactionBody { + var result []*flow.TransactionBody + err := t.Run(func(backdata mempool.BackData[flow.Identifier, *flow.TransactionBody]) error { + ids := t.byPayer[payer] + for id := range ids { + txBody, exists := backdata.Get(id) + if !exists { + continue + } + result = append(result, txBody) } - txs = append(txs, &tx) + return nil + }) + if err != nil { + panic("failed to get transactions by payer: " + err.Error()) } - return txs + return result } -// Clear removes all transactions stored in this mempool. -func (t *Transactions) Clear() { - t.c.Clear() +// removeFromIndex removes the transaction with the given ID from the index. +// This function expects that underlying backadata has been locked by the caller, otherwise the operation won't be atomic. +func (t *Transactions) removeFromIndex(id flow.Identifier, payer flow.Address) { + txns := t.byPayer[payer] + delete(txns, id) + if len(txns) == 0 { + delete(t.byPayer, payer) + } } -// Size returns total number of stored transactions. -func (t Transactions) Size() uint { - return t.c.Size() +// ejectionTracer implements herocache.Tracer interface and is used to clean up the index +// when a transaction is ejected from the HeroCache due to capacity or emergency. +type ejectionTracer struct { + transactions *Transactions } -// Remove removes transaction from mempool. -func (t *Transactions) Remove(id flow.Identifier) bool { - return t.c.Remove(id) +var _ herocache.Tracer[*flow.TransactionBody] = (*ejectionTracer)(nil) + +// EntityEjectionDueToEmergency calls removeFromIndex on the transactions to clean up the index. This is safe since +// backdata is locked by the caller when this function is called. +func (t *ejectionTracer) EntityEjectionDueToEmergency(txBody *flow.TransactionBody) { + t.transactions.removeFromIndex(txBody.ID(), txBody.Payer) +} + +// EntityEjectionDueToFullCapacity calls removeFromIndex on the transactions to clean up the index. This is safe since +// backdata is locked by the caller when this function is called. +func (t *ejectionTracer) EntityEjectionDueToFullCapacity(txBody *flow.TransactionBody) { + t.transactions.removeFromIndex(txBody.ID(), txBody.Payer) } diff --git a/module/mempool/herocache/transactions_test.go b/module/mempool/herocache/transactions_test.go index a56cd1fe4f2..6fbf710b949 100644 --- a/module/mempool/herocache/transactions_test.go +++ b/module/mempool/herocache/transactions_test.go @@ -21,12 +21,12 @@ func TestTransactionPool(t *testing.T) { transactions := herocache.NewTransactions(1000, unittest.Logger(), metrics.NewNoopCollector()) t.Run("should be able to add first", func(t *testing.T) { - added := transactions.Add(&tx1) + added := transactions.Add(tx1.ID(), &tx1) assert.True(t, added) }) t.Run("should be able to add second", func(t *testing.T) { - added := transactions.Add(&tx2) + added := transactions.Add(tx2.ID(), &tx2) assert.True(t, added) }) @@ -36,7 +36,7 @@ func TestTransactionPool(t *testing.T) { }) t.Run("should be able to get first", func(t *testing.T) { - actual, exists := transactions.ByID(tx1.ID()) + actual, exists := transactions.Get(tx1.ID()) assert.True(t, exists) assert.Equal(t, &tx1, actual) }) @@ -46,8 +46,8 @@ func TestTransactionPool(t *testing.T) { assert.True(t, ok) }) - t.Run("should be able to retrieve all", func(t *testing.T) { - items := transactions.All() + t.Run("should be able to retrieve all values", func(t *testing.T) { + items := transactions.Values() assert.Len(t, items, 1) assert.Equal(t, &tx1, items[0]) }) @@ -71,7 +71,7 @@ func TestConcurrentWriteAndRead(t *testing.T) { // storing all transactions for i := 0; i < total; i++ { go func(tx flow.TransactionBody) { - require.True(t, transactions.Add(&tx)) + require.True(t, transactions.Add(tx.ID(), &tx)) wg.Done() }(txs[i]) @@ -84,7 +84,7 @@ func TestConcurrentWriteAndRead(t *testing.T) { // reading all transactions for i := 0; i < total; i++ { go func(tx flow.TransactionBody) { - actual, ok := transactions.ByID(tx.ID()) + actual, ok := transactions.Get(tx.ID()) require.True(t, ok) require.Equal(t, tx, *actual) @@ -94,23 +94,23 @@ func TestConcurrentWriteAndRead(t *testing.T) { unittest.RequireReturnsBefore(t, wg.Wait, 100*time.Millisecond, "could not read all transactions on time") } -// TestAllReturnsInOrder checks All method of the HeroCache-based transactions mempool returns all +// TestValuesReturnsInOrder checks All method of the HeroCache-based transactions mempool returns all // transactions in the same order as they are returned. -func TestAllReturnsInOrder(t *testing.T) { +func TestValuesReturnsInOrder(t *testing.T) { total := 100 txs := unittest.TransactionBodyListFixture(total) transactions := herocache.NewTransactions(uint32(total), unittest.Logger(), metrics.NewNoopCollector()) // storing all transactions for i := 0; i < total; i++ { - require.True(t, transactions.Add(&txs[i])) - tx, ok := transactions.ByID(txs[i].ID()) + require.True(t, transactions.Add(txs[i].ID(), &txs[i])) + tx, ok := transactions.Get(txs[i].ID()) require.True(t, ok) require.Equal(t, txs[i], *tx) } // all transactions must be retrieved in the same order as they are added - all := transactions.All() + all := transactions.Values() for i := 0; i < total; i++ { require.Equal(t, txs[i], *all[i]) } diff --git a/module/mempool/identifier_map.go b/module/mempool/identifier_map.go index b5cbab24926..3149ef24b6a 100644 --- a/module/mempool/identifier_map.go +++ b/module/mempool/identifier_map.go @@ -4,10 +4,10 @@ import ( "github.com/onflow/flow-go/model/flow" ) -// IdentifierMap represents a concurrency-safe memory pool for mapping an identifier to a list of identifiers +// IdentifierMap represents a concurrency-safe memory pool for sets of Identifier (keyed by some Identifier). type IdentifierMap interface { // Append will append the id to the list of identifiers associated with key. - Append(key, id flow.Identifier) error + Append(key, id flow.Identifier) // Remove removes the given key with all associated identifiers. Remove(key flow.Identifier) bool @@ -18,15 +18,15 @@ type IdentifierMap interface { // Get returns list of all identifiers associated with key and true, if the key exists in the mempool. // Otherwise it returns nil and false. - Get(key flow.Identifier) ([]flow.Identifier, bool) + Get(key flow.Identifier) (flow.IdentifierList, bool) // Has returns true if the key exists in the map, i.e., there is at least an id // attached to it. Has(key flow.Identifier) bool - // Keys returns a list of all keys in the mempool - Keys() ([]flow.Identifier, bool) + // Keys returns a list of all keys in the mempool. + Keys() (flow.IdentifierList, bool) - // Size returns number of IdMapEntities in mempool + // Size returns the number of items in the mempool. Size() uint } diff --git a/module/mempool/incorporated_result_seals.go b/module/mempool/incorporated_result_seals.go index 50e800bacac..50711b63b5a 100644 --- a/module/mempool/incorporated_result_seals.go +++ b/module/mempool/incorporated_result_seals.go @@ -1,5 +1,3 @@ -// (c) 2019 Dapper Labs - ALL RIGHTS RESERVED - package mempool import ( @@ -7,24 +5,24 @@ import ( ) // IncorporatedResultSeals represents a concurrency safe memory pool for -// incorporated result seals +// incorporated result seals. type IncorporatedResultSeals interface { - // Add adds an IncorporatedResultSeal to the mempool + // Add adds an IncorporatedResultSeal to the mempool. Add(irSeal *flow.IncorporatedResultSeal) (bool, error) - // All returns all the IncorporatedResultSeals in the mempool + // All returns all the IncorporatedResultSeals in the mempool. All() []*flow.IncorporatedResultSeal - // ByID returns an IncorporatedResultSeal by ID - ByID(flow.Identifier) (*flow.IncorporatedResultSeal, bool) + // Get returns an IncorporatedResultSeal by IncorporatedResult ID. + Get(flow.Identifier) (*flow.IncorporatedResultSeal, bool) - // Limit returns the size limit of the mempool + // Limit returns the size limit of the mempool. Limit() uint - // Remove removes an IncorporatedResultSeal from the mempool + // Remove removes an IncorporatedResultSeal from the mempool. Remove(incorporatedResultID flow.Identifier) bool - // Size returns the number of items in the mempool + // Size returns the number of items in the mempool. Size() uint // Clear removes all entities from the pool. diff --git a/module/mempool/mempool.go b/module/mempool/mempool.go new file mode 100644 index 00000000000..df8f1f8cbc2 --- /dev/null +++ b/module/mempool/mempool.go @@ -0,0 +1,31 @@ +package mempool + +// Mempool is a generic interface for concurrency-safe memory pool. +type Mempool[K comparable, V any] interface { + // Has checks if a value is stored under the given key. + Has(K) bool + // Get returns the value for the given key. + // Returns true if the key-value pair exists, and false otherwise. + Get(K) (V, bool) + // Add attempts to add the given value, without overwriting existing data. + // If a value is already stored under the input key, Add is a no-op and returns false. + // If no value is stored under the input key, Add adds the value and returns true. + Add(K, V) bool + // Remove removes the value with the given key. + // If the key-value pair exists, returns the value and true. + // Otherwise, returns the zero value for type V and false. + Remove(K) bool + // Adjust will adjust the value item using the given function if the given key can be found. + // Returns: + // - value, true if the value with the given key was found. The returned value is the version after the update is applied. + // - nil, false if no value with the given key was found + Adjust(key K, f func(V) V) (V, bool) + // Size will return the size of the mempool. + Size() uint + // Values returns all stored values from the mempool. + Values() []V + // All returns all stored key-value pairs as a map from the mempool. + All() map[K]V + // Clear removes all key-value pairs from the mempool. + Clear() +} diff --git a/module/mempool/mock/assignments.go b/module/mempool/mock/assignments.go index e6b186ceabd..cb30939359f 100644 --- a/module/mempool/mock/assignments.go +++ b/module/mempool/mock/assignments.go @@ -1,6 +1,6 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. -package mempool +package mock import ( chunks "github.com/onflow/flow-go/model/chunks" @@ -14,13 +14,17 @@ type Assignments struct { mock.Mock } -// Add provides a mock function with given fields: assignmentFingerprint, assignment -func (_m *Assignments) Add(assignmentFingerprint flow.Identifier, assignment *chunks.Assignment) bool { - ret := _m.Called(assignmentFingerprint, assignment) +// Add provides a mock function with given fields: _a0, _a1 +func (_m *Assignments) Add(_a0 flow.Identifier, _a1 *chunks.Assignment) bool { + ret := _m.Called(_a0, _a1) + + if len(ret) == 0 { + panic("no return value specified for Add") + } var r0 bool if rf, ok := ret.Get(0).(func(flow.Identifier, *chunks.Assignment) bool); ok { - r0 = rf(assignmentFingerprint, assignment) + r0 = rf(_a0, _a1) } else { r0 = ret.Get(0).(bool) } @@ -28,33 +32,76 @@ func (_m *Assignments) Add(assignmentFingerprint flow.Identifier, assignment *ch return r0 } -// All provides a mock function with given fields: -func (_m *Assignments) All() []*chunks.Assignment { +// Adjust provides a mock function with given fields: key, f +func (_m *Assignments) Adjust(key flow.Identifier, f func(*chunks.Assignment) *chunks.Assignment) (*chunks.Assignment, bool) { + ret := _m.Called(key, f) + + if len(ret) == 0 { + panic("no return value specified for Adjust") + } + + var r0 *chunks.Assignment + var r1 bool + if rf, ok := ret.Get(0).(func(flow.Identifier, func(*chunks.Assignment) *chunks.Assignment) (*chunks.Assignment, bool)); ok { + return rf(key, f) + } + if rf, ok := ret.Get(0).(func(flow.Identifier, func(*chunks.Assignment) *chunks.Assignment) *chunks.Assignment); ok { + r0 = rf(key, f) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*chunks.Assignment) + } + } + + if rf, ok := ret.Get(1).(func(flow.Identifier, func(*chunks.Assignment) *chunks.Assignment) bool); ok { + r1 = rf(key, f) + } else { + r1 = ret.Get(1).(bool) + } + + return r0, r1 +} + +// All provides a mock function with no fields +func (_m *Assignments) All() map[flow.Identifier]*chunks.Assignment { ret := _m.Called() - var r0 []*chunks.Assignment - if rf, ok := ret.Get(0).(func() []*chunks.Assignment); ok { + if len(ret) == 0 { + panic("no return value specified for All") + } + + var r0 map[flow.Identifier]*chunks.Assignment + if rf, ok := ret.Get(0).(func() map[flow.Identifier]*chunks.Assignment); ok { r0 = rf() } else { if ret.Get(0) != nil { - r0 = ret.Get(0).([]*chunks.Assignment) + r0 = ret.Get(0).(map[flow.Identifier]*chunks.Assignment) } } return r0 } -// ByID provides a mock function with given fields: assignmentID -func (_m *Assignments) ByID(assignmentID flow.Identifier) (*chunks.Assignment, bool) { - ret := _m.Called(assignmentID) +// Clear provides a mock function with no fields +func (_m *Assignments) Clear() { + _m.Called() +} + +// Get provides a mock function with given fields: _a0 +func (_m *Assignments) Get(_a0 flow.Identifier) (*chunks.Assignment, bool) { + ret := _m.Called(_a0) + + if len(ret) == 0 { + panic("no return value specified for Get") + } var r0 *chunks.Assignment var r1 bool if rf, ok := ret.Get(0).(func(flow.Identifier) (*chunks.Assignment, bool)); ok { - return rf(assignmentID) + return rf(_a0) } if rf, ok := ret.Get(0).(func(flow.Identifier) *chunks.Assignment); ok { - r0 = rf(assignmentID) + r0 = rf(_a0) } else { if ret.Get(0) != nil { r0 = ret.Get(0).(*chunks.Assignment) @@ -62,7 +109,7 @@ func (_m *Assignments) ByID(assignmentID flow.Identifier) (*chunks.Assignment, b } if rf, ok := ret.Get(1).(func(flow.Identifier) bool); ok { - r1 = rf(assignmentID) + r1 = rf(_a0) } else { r1 = ret.Get(1).(bool) } @@ -70,13 +117,17 @@ func (_m *Assignments) ByID(assignmentID flow.Identifier) (*chunks.Assignment, b return r0, r1 } -// Has provides a mock function with given fields: assignmentID -func (_m *Assignments) Has(assignmentID flow.Identifier) bool { - ret := _m.Called(assignmentID) +// Has provides a mock function with given fields: _a0 +func (_m *Assignments) Has(_a0 flow.Identifier) bool { + ret := _m.Called(_a0) + + if len(ret) == 0 { + panic("no return value specified for Has") + } var r0 bool if rf, ok := ret.Get(0).(func(flow.Identifier) bool); ok { - r0 = rf(assignmentID) + r0 = rf(_a0) } else { r0 = ret.Get(0).(bool) } @@ -84,13 +135,17 @@ func (_m *Assignments) Has(assignmentID flow.Identifier) bool { return r0 } -// Remove provides a mock function with given fields: assignmentID -func (_m *Assignments) Remove(assignmentID flow.Identifier) bool { - ret := _m.Called(assignmentID) +// Remove provides a mock function with given fields: _a0 +func (_m *Assignments) Remove(_a0 flow.Identifier) bool { + ret := _m.Called(_a0) + + if len(ret) == 0 { + panic("no return value specified for Remove") + } var r0 bool if rf, ok := ret.Get(0).(func(flow.Identifier) bool); ok { - r0 = rf(assignmentID) + r0 = rf(_a0) } else { r0 = ret.Get(0).(bool) } @@ -98,10 +153,14 @@ func (_m *Assignments) Remove(assignmentID flow.Identifier) bool { return r0 } -// Size provides a mock function with given fields: +// Size provides a mock function with no fields func (_m *Assignments) Size() uint { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Size") + } + var r0 uint if rf, ok := ret.Get(0).(func() uint); ok { r0 = rf() @@ -112,13 +171,32 @@ func (_m *Assignments) Size() uint { return r0 } -type mockConstructorTestingTNewAssignments interface { - mock.TestingT - Cleanup(func()) +// Values provides a mock function with no fields +func (_m *Assignments) Values() []*chunks.Assignment { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Values") + } + + var r0 []*chunks.Assignment + if rf, ok := ret.Get(0).(func() []*chunks.Assignment); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]*chunks.Assignment) + } + } + + return r0 } // NewAssignments creates a new instance of Assignments. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewAssignments(t mockConstructorTestingTNewAssignments) *Assignments { +// The first argument is typically a *testing.T value. +func NewAssignments(t interface { + mock.TestingT + Cleanup(func()) +}) *Assignments { mock := &Assignments{} mock.Mock.Test(t) diff --git a/module/mempool/mock/back_data.go b/module/mempool/mock/back_data.go index 68661aa9c23..2f1a3b8c3a2 100644 --- a/module/mempool/mock/back_data.go +++ b/module/mempool/mock/back_data.go @@ -1,25 +1,25 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. -package mempool +package mock -import ( - flow "github.com/onflow/flow-go/model/flow" - - mock "github.com/stretchr/testify/mock" -) +import mock "github.com/stretchr/testify/mock" // BackData is an autogenerated mock type for the BackData type -type BackData struct { +type BackData[K comparable, V any] struct { mock.Mock } -// Add provides a mock function with given fields: entityID, entity -func (_m *BackData) Add(entityID flow.Identifier, entity flow.Entity) bool { - ret := _m.Called(entityID, entity) +// Add provides a mock function with given fields: key, value +func (_m *BackData[K, V]) Add(key K, value V) bool { + ret := _m.Called(key, value) + + if len(ret) == 0 { + panic("no return value specified for Add") + } var r0 bool - if rf, ok := ret.Get(0).(func(flow.Identifier, flow.Entity) bool); ok { - r0 = rf(entityID, entity) + if rf, ok := ret.Get(0).(func(K, V) bool); ok { + r0 = rf(key, value) } else { r0 = ret.Get(0).(bool) } @@ -27,67 +27,54 @@ func (_m *BackData) Add(entityID flow.Identifier, entity flow.Entity) bool { return r0 } -// Adjust provides a mock function with given fields: entityID, f -func (_m *BackData) Adjust(entityID flow.Identifier, f func(flow.Entity) flow.Entity) (flow.Entity, bool) { - ret := _m.Called(entityID, f) - - var r0 flow.Entity - var r1 bool - if rf, ok := ret.Get(0).(func(flow.Identifier, func(flow.Entity) flow.Entity) (flow.Entity, bool)); ok { - return rf(entityID, f) - } - if rf, ok := ret.Get(0).(func(flow.Identifier, func(flow.Entity) flow.Entity) flow.Entity); ok { - r0 = rf(entityID, f) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(flow.Entity) - } - } +// All provides a mock function with no fields +func (_m *BackData[K, V]) All() map[K]V { + ret := _m.Called() - if rf, ok := ret.Get(1).(func(flow.Identifier, func(flow.Entity) flow.Entity) bool); ok { - r1 = rf(entityID, f) - } else { - r1 = ret.Get(1).(bool) + if len(ret) == 0 { + panic("no return value specified for All") } - return r0, r1 -} - -// All provides a mock function with given fields: -func (_m *BackData) All() map[flow.Identifier]flow.Entity { - ret := _m.Called() - - var r0 map[flow.Identifier]flow.Entity - if rf, ok := ret.Get(0).(func() map[flow.Identifier]flow.Entity); ok { + var r0 map[K]V + if rf, ok := ret.Get(0).(func() map[K]V); ok { r0 = rf() } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(map[flow.Identifier]flow.Entity) + r0 = ret.Get(0).(map[K]V) } } return r0 } -// ByID provides a mock function with given fields: entityID -func (_m *BackData) ByID(entityID flow.Identifier) (flow.Entity, bool) { - ret := _m.Called(entityID) +// Clear provides a mock function with no fields +func (_m *BackData[K, V]) Clear() { + _m.Called() +} + +// Get provides a mock function with given fields: key +func (_m *BackData[K, V]) Get(key K) (V, bool) { + ret := _m.Called(key) - var r0 flow.Entity + if len(ret) == 0 { + panic("no return value specified for Get") + } + + var r0 V var r1 bool - if rf, ok := ret.Get(0).(func(flow.Identifier) (flow.Entity, bool)); ok { - return rf(entityID) + if rf, ok := ret.Get(0).(func(K) (V, bool)); ok { + return rf(key) } - if rf, ok := ret.Get(0).(func(flow.Identifier) flow.Entity); ok { - r0 = rf(entityID) + if rf, ok := ret.Get(0).(func(K) V); ok { + r0 = rf(key) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(flow.Entity) + r0 = ret.Get(0).(V) } } - if rf, ok := ret.Get(1).(func(flow.Identifier) bool); ok { - r1 = rf(entityID) + if rf, ok := ret.Get(1).(func(K) bool); ok { + r1 = rf(key) } else { r1 = ret.Get(1).(bool) } @@ -95,34 +82,17 @@ func (_m *BackData) ByID(entityID flow.Identifier) (flow.Entity, bool) { return r0, r1 } -// Clear provides a mock function with given fields: -func (_m *BackData) Clear() { - _m.Called() -} +// Has provides a mock function with given fields: key +func (_m *BackData[K, V]) Has(key K) bool { + ret := _m.Called(key) -// Entities provides a mock function with given fields: -func (_m *BackData) Entities() []flow.Entity { - ret := _m.Called() - - var r0 []flow.Entity - if rf, ok := ret.Get(0).(func() []flow.Entity); ok { - r0 = rf() - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).([]flow.Entity) - } + if len(ret) == 0 { + panic("no return value specified for Has") } - return r0 -} - -// Has provides a mock function with given fields: entityID -func (_m *BackData) Has(entityID flow.Identifier) bool { - ret := _m.Called(entityID) - var r0 bool - if rf, ok := ret.Get(0).(func(flow.Identifier) bool); ok { - r0 = rf(entityID) + if rf, ok := ret.Get(0).(func(K) bool); ok { + r0 = rf(key) } else { r0 = ret.Get(0).(bool) } @@ -130,41 +100,49 @@ func (_m *BackData) Has(entityID flow.Identifier) bool { return r0 } -// Identifiers provides a mock function with given fields: -func (_m *BackData) Identifiers() flow.IdentifierList { +// Keys provides a mock function with no fields +func (_m *BackData[K, V]) Keys() []K { ret := _m.Called() - var r0 flow.IdentifierList - if rf, ok := ret.Get(0).(func() flow.IdentifierList); ok { + if len(ret) == 0 { + panic("no return value specified for Keys") + } + + var r0 []K + if rf, ok := ret.Get(0).(func() []K); ok { r0 = rf() } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(flow.IdentifierList) + r0 = ret.Get(0).([]K) } } return r0 } -// Remove provides a mock function with given fields: entityID -func (_m *BackData) Remove(entityID flow.Identifier) (flow.Entity, bool) { - ret := _m.Called(entityID) +// Remove provides a mock function with given fields: key +func (_m *BackData[K, V]) Remove(key K) (V, bool) { + ret := _m.Called(key) + + if len(ret) == 0 { + panic("no return value specified for Remove") + } - var r0 flow.Entity + var r0 V var r1 bool - if rf, ok := ret.Get(0).(func(flow.Identifier) (flow.Entity, bool)); ok { - return rf(entityID) + if rf, ok := ret.Get(0).(func(K) (V, bool)); ok { + return rf(key) } - if rf, ok := ret.Get(0).(func(flow.Identifier) flow.Entity); ok { - r0 = rf(entityID) + if rf, ok := ret.Get(0).(func(K) V); ok { + r0 = rf(key) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(flow.Entity) + r0 = ret.Get(0).(V) } } - if rf, ok := ret.Get(1).(func(flow.Identifier) bool); ok { - r1 = rf(entityID) + if rf, ok := ret.Get(1).(func(K) bool); ok { + r1 = rf(key) } else { r1 = ret.Get(1).(bool) } @@ -172,10 +150,14 @@ func (_m *BackData) Remove(entityID flow.Identifier) (flow.Entity, bool) { return r0, r1 } -// Size provides a mock function with given fields: -func (_m *BackData) Size() uint { +// Size provides a mock function with no fields +func (_m *BackData[K, V]) Size() uint { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Size") + } + var r0 uint if rf, ok := ret.Get(0).(func() uint); ok { r0 = rf() @@ -186,14 +168,33 @@ func (_m *BackData) Size() uint { return r0 } -type mockConstructorTestingTNewBackData interface { - mock.TestingT - Cleanup(func()) +// Values provides a mock function with no fields +func (_m *BackData[K, V]) Values() []V { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Values") + } + + var r0 []V + if rf, ok := ret.Get(0).(func() []V); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]V) + } + } + + return r0 } // NewBackData creates a new instance of BackData. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewBackData(t mockConstructorTestingTNewBackData) *BackData { - mock := &BackData{} +// The first argument is typically a *testing.T value. +func NewBackData[K comparable, V any](t interface { + mock.TestingT + Cleanup(func()) +}) *BackData[K, V] { + mock := &BackData[K, V]{} mock.Mock.Test(t) t.Cleanup(func() { mock.AssertExpectations(t) }) diff --git a/module/mempool/mock/block_filter.go b/module/mempool/mock/block_filter.go deleted file mode 100644 index 61bb7df32b8..00000000000 --- a/module/mempool/mock/block_filter.go +++ /dev/null @@ -1,43 +0,0 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. - -package mempool - -import ( - flow "github.com/onflow/flow-go/model/flow" - - mock "github.com/stretchr/testify/mock" -) - -// BlockFilter is an autogenerated mock type for the BlockFilter type -type BlockFilter struct { - mock.Mock -} - -// Execute provides a mock function with given fields: header -func (_m *BlockFilter) Execute(header *flow.Header) bool { - ret := _m.Called(header) - - var r0 bool - if rf, ok := ret.Get(0).(func(*flow.Header) bool); ok { - r0 = rf(header) - } else { - r0 = ret.Get(0).(bool) - } - - return r0 -} - -type mockConstructorTestingTNewBlockFilter interface { - mock.TestingT - Cleanup(func()) -} - -// NewBlockFilter creates a new instance of BlockFilter. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewBlockFilter(t mockConstructorTestingTNewBlockFilter) *BlockFilter { - mock := &BlockFilter{} - mock.Mock.Test(t) - - t.Cleanup(func() { mock.AssertExpectations(t) }) - - return mock -} diff --git a/module/mempool/mock/blocks.go b/module/mempool/mock/blocks.go deleted file mode 100644 index 470e27d19d2..00000000000 --- a/module/mempool/mock/blocks.go +++ /dev/null @@ -1,143 +0,0 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. - -package mempool - -import ( - flow "github.com/onflow/flow-go/model/flow" - - mock "github.com/stretchr/testify/mock" -) - -// Blocks is an autogenerated mock type for the Blocks type -type Blocks struct { - mock.Mock -} - -// Add provides a mock function with given fields: block -func (_m *Blocks) Add(block *flow.Block) bool { - ret := _m.Called(block) - - var r0 bool - if rf, ok := ret.Get(0).(func(*flow.Block) bool); ok { - r0 = rf(block) - } else { - r0 = ret.Get(0).(bool) - } - - return r0 -} - -// All provides a mock function with given fields: -func (_m *Blocks) All() []*flow.Block { - ret := _m.Called() - - var r0 []*flow.Block - if rf, ok := ret.Get(0).(func() []*flow.Block); ok { - r0 = rf() - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).([]*flow.Block) - } - } - - return r0 -} - -// ByID provides a mock function with given fields: blockID -func (_m *Blocks) ByID(blockID flow.Identifier) (*flow.Block, bool) { - ret := _m.Called(blockID) - - var r0 *flow.Block - var r1 bool - if rf, ok := ret.Get(0).(func(flow.Identifier) (*flow.Block, bool)); ok { - return rf(blockID) - } - if rf, ok := ret.Get(0).(func(flow.Identifier) *flow.Block); ok { - r0 = rf(blockID) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*flow.Block) - } - } - - if rf, ok := ret.Get(1).(func(flow.Identifier) bool); ok { - r1 = rf(blockID) - } else { - r1 = ret.Get(1).(bool) - } - - return r0, r1 -} - -// Has provides a mock function with given fields: blockID -func (_m *Blocks) Has(blockID flow.Identifier) bool { - ret := _m.Called(blockID) - - var r0 bool - if rf, ok := ret.Get(0).(func(flow.Identifier) bool); ok { - r0 = rf(blockID) - } else { - r0 = ret.Get(0).(bool) - } - - return r0 -} - -// Hash provides a mock function with given fields: -func (_m *Blocks) Hash() flow.Identifier { - ret := _m.Called() - - var r0 flow.Identifier - if rf, ok := ret.Get(0).(func() flow.Identifier); ok { - r0 = rf() - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(flow.Identifier) - } - } - - return r0 -} - -// Remove provides a mock function with given fields: blockID -func (_m *Blocks) Remove(blockID flow.Identifier) bool { - ret := _m.Called(blockID) - - var r0 bool - if rf, ok := ret.Get(0).(func(flow.Identifier) bool); ok { - r0 = rf(blockID) - } else { - r0 = ret.Get(0).(bool) - } - - return r0 -} - -// Size provides a mock function with given fields: -func (_m *Blocks) Size() uint { - ret := _m.Called() - - var r0 uint - if rf, ok := ret.Get(0).(func() uint); ok { - r0 = rf() - } else { - r0 = ret.Get(0).(uint) - } - - return r0 -} - -type mockConstructorTestingTNewBlocks interface { - mock.TestingT - Cleanup(func()) -} - -// NewBlocks creates a new instance of Blocks. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewBlocks(t mockConstructorTestingTNewBlocks) *Blocks { - mock := &Blocks{} - mock.Mock.Test(t) - - t.Cleanup(func() { mock.AssertExpectations(t) }) - - return mock -} diff --git a/module/mempool/mock/chunk_data_pack_message_store.go b/module/mempool/mock/chunk_data_pack_message_store.go deleted file mode 100644 index 60a55cb1e0d..00000000000 --- a/module/mempool/mock/chunk_data_pack_message_store.go +++ /dev/null @@ -1,80 +0,0 @@ -// Code generated by mockery v2.13.1. DO NOT EDIT. - -package mempool - -import ( - engine "github.com/onflow/flow-go/engine" - - mock "github.com/stretchr/testify/mock" -) - -// ChunkDataPackMessageStore is an autogenerated mock type for the ChunkDataPackMessageStore type -type ChunkDataPackMessageStore struct { - mock.Mock -} - -// Get provides a mock function with given fields: -func (_m *ChunkDataPackMessageStore) Get() (*engine.Message, bool) { - ret := _m.Called() - - var r0 *engine.Message - if rf, ok := ret.Get(0).(func() *engine.Message); ok { - r0 = rf() - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*engine.Message) - } - } - - var r1 bool - if rf, ok := ret.Get(1).(func() bool); ok { - r1 = rf() - } else { - r1 = ret.Get(1).(bool) - } - - return r0, r1 -} - -// Put provides a mock function with given fields: _a0 -func (_m *ChunkDataPackMessageStore) Put(_a0 *engine.Message) bool { - ret := _m.Called(_a0) - - var r0 bool - if rf, ok := ret.Get(0).(func(*engine.Message) bool); ok { - r0 = rf(_a0) - } else { - r0 = ret.Get(0).(bool) - } - - return r0 -} - -// Size provides a mock function with given fields: -func (_m *ChunkDataPackMessageStore) Size() uint { - ret := _m.Called() - - var r0 uint - if rf, ok := ret.Get(0).(func() uint); ok { - r0 = rf() - } else { - r0 = ret.Get(0).(uint) - } - - return r0 -} - -type mockConstructorTestingTNewChunkDataPackMessageStore interface { - mock.TestingT - Cleanup(func()) -} - -// NewChunkDataPackMessageStore creates a new instance of ChunkDataPackMessageStore. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewChunkDataPackMessageStore(t mockConstructorTestingTNewChunkDataPackMessageStore) *ChunkDataPackMessageStore { - mock := &ChunkDataPackMessageStore{} - mock.Mock.Test(t) - - t.Cleanup(func() { mock.AssertExpectations(t) }) - - return mock -} diff --git a/module/mempool/mock/chunk_data_packs.go b/module/mempool/mock/chunk_data_packs.go deleted file mode 100644 index 01b5b22bf9e..00000000000 --- a/module/mempool/mock/chunk_data_packs.go +++ /dev/null @@ -1,127 +0,0 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. - -package mempool - -import ( - flow "github.com/onflow/flow-go/model/flow" - - mock "github.com/stretchr/testify/mock" -) - -// ChunkDataPacks is an autogenerated mock type for the ChunkDataPacks type -type ChunkDataPacks struct { - mock.Mock -} - -// Add provides a mock function with given fields: cdp -func (_m *ChunkDataPacks) Add(cdp *flow.ChunkDataPack) bool { - ret := _m.Called(cdp) - - var r0 bool - if rf, ok := ret.Get(0).(func(*flow.ChunkDataPack) bool); ok { - r0 = rf(cdp) - } else { - r0 = ret.Get(0).(bool) - } - - return r0 -} - -// All provides a mock function with given fields: -func (_m *ChunkDataPacks) All() []*flow.ChunkDataPack { - ret := _m.Called() - - var r0 []*flow.ChunkDataPack - if rf, ok := ret.Get(0).(func() []*flow.ChunkDataPack); ok { - r0 = rf() - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).([]*flow.ChunkDataPack) - } - } - - return r0 -} - -// ByChunkID provides a mock function with given fields: chunkID -func (_m *ChunkDataPacks) ByChunkID(chunkID flow.Identifier) (*flow.ChunkDataPack, bool) { - ret := _m.Called(chunkID) - - var r0 *flow.ChunkDataPack - var r1 bool - if rf, ok := ret.Get(0).(func(flow.Identifier) (*flow.ChunkDataPack, bool)); ok { - return rf(chunkID) - } - if rf, ok := ret.Get(0).(func(flow.Identifier) *flow.ChunkDataPack); ok { - r0 = rf(chunkID) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*flow.ChunkDataPack) - } - } - - if rf, ok := ret.Get(1).(func(flow.Identifier) bool); ok { - r1 = rf(chunkID) - } else { - r1 = ret.Get(1).(bool) - } - - return r0, r1 -} - -// Has provides a mock function with given fields: chunkID -func (_m *ChunkDataPacks) Has(chunkID flow.Identifier) bool { - ret := _m.Called(chunkID) - - var r0 bool - if rf, ok := ret.Get(0).(func(flow.Identifier) bool); ok { - r0 = rf(chunkID) - } else { - r0 = ret.Get(0).(bool) - } - - return r0 -} - -// Remove provides a mock function with given fields: chunkID -func (_m *ChunkDataPacks) Remove(chunkID flow.Identifier) bool { - ret := _m.Called(chunkID) - - var r0 bool - if rf, ok := ret.Get(0).(func(flow.Identifier) bool); ok { - r0 = rf(chunkID) - } else { - r0 = ret.Get(0).(bool) - } - - return r0 -} - -// Size provides a mock function with given fields: -func (_m *ChunkDataPacks) Size() uint { - ret := _m.Called() - - var r0 uint - if rf, ok := ret.Get(0).(func() uint); ok { - r0 = rf() - } else { - r0 = ret.Get(0).(uint) - } - - return r0 -} - -type mockConstructorTestingTNewChunkDataPacks interface { - mock.TestingT - Cleanup(func()) -} - -// NewChunkDataPacks creates a new instance of ChunkDataPacks. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewChunkDataPacks(t mockConstructorTestingTNewChunkDataPacks) *ChunkDataPacks { - mock := &ChunkDataPacks{} - mock.Mock.Test(t) - - t.Cleanup(func() { mock.AssertExpectations(t) }) - - return mock -} diff --git a/module/mempool/mock/chunk_request_history_updater_func.go b/module/mempool/mock/chunk_request_history_updater_func.go deleted file mode 100644 index ee733755bb7..00000000000 --- a/module/mempool/mock/chunk_request_history_updater_func.go +++ /dev/null @@ -1,60 +0,0 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. - -package mempool - -import ( - time "time" - - mock "github.com/stretchr/testify/mock" -) - -// ChunkRequestHistoryUpdaterFunc is an autogenerated mock type for the ChunkRequestHistoryUpdaterFunc type -type ChunkRequestHistoryUpdaterFunc struct { - mock.Mock -} - -// Execute provides a mock function with given fields: _a0, _a1 -func (_m *ChunkRequestHistoryUpdaterFunc) Execute(_a0 uint64, _a1 time.Duration) (uint64, time.Duration, bool) { - ret := _m.Called(_a0, _a1) - - var r0 uint64 - var r1 time.Duration - var r2 bool - if rf, ok := ret.Get(0).(func(uint64, time.Duration) (uint64, time.Duration, bool)); ok { - return rf(_a0, _a1) - } - if rf, ok := ret.Get(0).(func(uint64, time.Duration) uint64); ok { - r0 = rf(_a0, _a1) - } else { - r0 = ret.Get(0).(uint64) - } - - if rf, ok := ret.Get(1).(func(uint64, time.Duration) time.Duration); ok { - r1 = rf(_a0, _a1) - } else { - r1 = ret.Get(1).(time.Duration) - } - - if rf, ok := ret.Get(2).(func(uint64, time.Duration) bool); ok { - r2 = rf(_a0, _a1) - } else { - r2 = ret.Get(2).(bool) - } - - return r0, r1, r2 -} - -type mockConstructorTestingTNewChunkRequestHistoryUpdaterFunc interface { - mock.TestingT - Cleanup(func()) -} - -// NewChunkRequestHistoryUpdaterFunc creates a new instance of ChunkRequestHistoryUpdaterFunc. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewChunkRequestHistoryUpdaterFunc(t mockConstructorTestingTNewChunkRequestHistoryUpdaterFunc) *ChunkRequestHistoryUpdaterFunc { - mock := &ChunkRequestHistoryUpdaterFunc{} - mock.Mock.Test(t) - - t.Cleanup(func() { mock.AssertExpectations(t) }) - - return mock -} diff --git a/module/mempool/mock/chunk_requests.go b/module/mempool/mock/chunk_requests.go index 9d5924da359..b98ed91e624 100644 --- a/module/mempool/mock/chunk_requests.go +++ b/module/mempool/mock/chunk_requests.go @@ -1,6 +1,6 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. -package mempool +package mock import ( chunks "github.com/onflow/flow-go/model/chunks" @@ -24,6 +24,10 @@ type ChunkRequests struct { func (_m *ChunkRequests) Add(request *verification.ChunkDataPackRequest) bool { ret := _m.Called(request) + if len(ret) == 0 { + panic("no return value specified for Add") + } + var r0 bool if rf, ok := ret.Get(0).(func(*verification.ChunkDataPackRequest) bool); ok { r0 = rf(request) @@ -34,10 +38,14 @@ func (_m *ChunkRequests) Add(request *verification.ChunkDataPackRequest) bool { return r0 } -// All provides a mock function with given fields: +// All provides a mock function with no fields func (_m *ChunkRequests) All() verification.ChunkDataPackRequestInfoList { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for All") + } + var r0 verification.ChunkDataPackRequestInfoList if rf, ok := ret.Get(0).(func() verification.ChunkDataPackRequestInfoList); ok { r0 = rf() @@ -54,6 +62,10 @@ func (_m *ChunkRequests) All() verification.ChunkDataPackRequestInfoList { func (_m *ChunkRequests) IncrementAttempt(chunkID flow.Identifier) bool { ret := _m.Called(chunkID) + if len(ret) == 0 { + panic("no return value specified for IncrementAttempt") + } + var r0 bool if rf, ok := ret.Get(0).(func(flow.Identifier) bool); ok { r0 = rf(chunkID) @@ -68,6 +80,10 @@ func (_m *ChunkRequests) IncrementAttempt(chunkID flow.Identifier) bool { func (_m *ChunkRequests) PopAll(chunkID flow.Identifier) (chunks.LocatorMap, bool) { ret := _m.Called(chunkID) + if len(ret) == 0 { + panic("no return value specified for PopAll") + } + var r0 chunks.LocatorMap var r1 bool if rf, ok := ret.Get(0).(func(flow.Identifier) (chunks.LocatorMap, bool)); ok { @@ -94,6 +110,10 @@ func (_m *ChunkRequests) PopAll(chunkID flow.Identifier) (chunks.LocatorMap, boo func (_m *ChunkRequests) Remove(chunkID flow.Identifier) bool { ret := _m.Called(chunkID) + if len(ret) == 0 { + panic("no return value specified for Remove") + } + var r0 bool if rf, ok := ret.Get(0).(func(flow.Identifier) bool); ok { r0 = rf(chunkID) @@ -108,6 +128,10 @@ func (_m *ChunkRequests) Remove(chunkID flow.Identifier) bool { func (_m *ChunkRequests) RequestHistory(chunkID flow.Identifier) (uint64, time.Time, time.Duration, bool) { ret := _m.Called(chunkID) + if len(ret) == 0 { + panic("no return value specified for RequestHistory") + } + var r0 uint64 var r1 time.Time var r2 time.Duration @@ -142,10 +166,14 @@ func (_m *ChunkRequests) RequestHistory(chunkID flow.Identifier) (uint64, time.T return r0, r1, r2, r3 } -// Size provides a mock function with given fields: +// Size provides a mock function with no fields func (_m *ChunkRequests) Size() uint { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Size") + } + var r0 uint if rf, ok := ret.Get(0).(func() uint); ok { r0 = rf() @@ -160,6 +188,10 @@ func (_m *ChunkRequests) Size() uint { func (_m *ChunkRequests) UpdateRequestHistory(chunkID flow.Identifier, updater mempool.ChunkRequestHistoryUpdaterFunc) (uint64, time.Time, time.Duration, bool) { ret := _m.Called(chunkID, updater) + if len(ret) == 0 { + panic("no return value specified for UpdateRequestHistory") + } + var r0 uint64 var r1 time.Time var r2 time.Duration @@ -194,13 +226,12 @@ func (_m *ChunkRequests) UpdateRequestHistory(chunkID flow.Identifier, updater m return r0, r1, r2, r3 } -type mockConstructorTestingTNewChunkRequests interface { +// NewChunkRequests creates a new instance of ChunkRequests. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewChunkRequests(t interface { mock.TestingT Cleanup(func()) -} - -// NewChunkRequests creates a new instance of ChunkRequests. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewChunkRequests(t mockConstructorTestingTNewChunkRequests) *ChunkRequests { +}) *ChunkRequests { mock := &ChunkRequests{} mock.Mock.Test(t) diff --git a/module/mempool/mock/chunk_statuses.go b/module/mempool/mock/chunk_statuses.go index a3fbffe6ca7..7834ed92fae 100644 --- a/module/mempool/mock/chunk_statuses.go +++ b/module/mempool/mock/chunk_statuses.go @@ -1,6 +1,6 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. -package mempool +package mock import ( flow "github.com/onflow/flow-go/model/flow" @@ -15,13 +15,17 @@ type ChunkStatuses struct { mock.Mock } -// Add provides a mock function with given fields: status -func (_m *ChunkStatuses) Add(status *verification.ChunkStatus) bool { - ret := _m.Called(status) +// Add provides a mock function with given fields: _a0, _a1 +func (_m *ChunkStatuses) Add(_a0 flow.Identifier, _a1 *verification.ChunkStatus) bool { + ret := _m.Called(_a0, _a1) + + if len(ret) == 0 { + panic("no return value specified for Add") + } var r0 bool - if rf, ok := ret.Get(0).(func(*verification.ChunkStatus) bool); ok { - r0 = rf(status) + if rf, ok := ret.Get(0).(func(flow.Identifier, *verification.ChunkStatus) bool); ok { + r0 = rf(_a0, _a1) } else { r0 = ret.Get(0).(bool) } @@ -29,41 +33,84 @@ func (_m *ChunkStatuses) Add(status *verification.ChunkStatus) bool { return r0 } -// All provides a mock function with given fields: -func (_m *ChunkStatuses) All() []*verification.ChunkStatus { +// Adjust provides a mock function with given fields: key, f +func (_m *ChunkStatuses) Adjust(key flow.Identifier, f func(*verification.ChunkStatus) *verification.ChunkStatus) (*verification.ChunkStatus, bool) { + ret := _m.Called(key, f) + + if len(ret) == 0 { + panic("no return value specified for Adjust") + } + + var r0 *verification.ChunkStatus + var r1 bool + if rf, ok := ret.Get(0).(func(flow.Identifier, func(*verification.ChunkStatus) *verification.ChunkStatus) (*verification.ChunkStatus, bool)); ok { + return rf(key, f) + } + if rf, ok := ret.Get(0).(func(flow.Identifier, func(*verification.ChunkStatus) *verification.ChunkStatus) *verification.ChunkStatus); ok { + r0 = rf(key, f) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*verification.ChunkStatus) + } + } + + if rf, ok := ret.Get(1).(func(flow.Identifier, func(*verification.ChunkStatus) *verification.ChunkStatus) bool); ok { + r1 = rf(key, f) + } else { + r1 = ret.Get(1).(bool) + } + + return r0, r1 +} + +// All provides a mock function with no fields +func (_m *ChunkStatuses) All() map[flow.Identifier]*verification.ChunkStatus { ret := _m.Called() - var r0 []*verification.ChunkStatus - if rf, ok := ret.Get(0).(func() []*verification.ChunkStatus); ok { + if len(ret) == 0 { + panic("no return value specified for All") + } + + var r0 map[flow.Identifier]*verification.ChunkStatus + if rf, ok := ret.Get(0).(func() map[flow.Identifier]*verification.ChunkStatus); ok { r0 = rf() } else { if ret.Get(0) != nil { - r0 = ret.Get(0).([]*verification.ChunkStatus) + r0 = ret.Get(0).(map[flow.Identifier]*verification.ChunkStatus) } } return r0 } -// Get provides a mock function with given fields: chunkIndex, resultID -func (_m *ChunkStatuses) Get(chunkIndex uint64, resultID flow.Identifier) (*verification.ChunkStatus, bool) { - ret := _m.Called(chunkIndex, resultID) +// Clear provides a mock function with no fields +func (_m *ChunkStatuses) Clear() { + _m.Called() +} + +// Get provides a mock function with given fields: _a0 +func (_m *ChunkStatuses) Get(_a0 flow.Identifier) (*verification.ChunkStatus, bool) { + ret := _m.Called(_a0) + + if len(ret) == 0 { + panic("no return value specified for Get") + } var r0 *verification.ChunkStatus var r1 bool - if rf, ok := ret.Get(0).(func(uint64, flow.Identifier) (*verification.ChunkStatus, bool)); ok { - return rf(chunkIndex, resultID) + if rf, ok := ret.Get(0).(func(flow.Identifier) (*verification.ChunkStatus, bool)); ok { + return rf(_a0) } - if rf, ok := ret.Get(0).(func(uint64, flow.Identifier) *verification.ChunkStatus); ok { - r0 = rf(chunkIndex, resultID) + if rf, ok := ret.Get(0).(func(flow.Identifier) *verification.ChunkStatus); ok { + r0 = rf(_a0) } else { if ret.Get(0) != nil { r0 = ret.Get(0).(*verification.ChunkStatus) } } - if rf, ok := ret.Get(1).(func(uint64, flow.Identifier) bool); ok { - r1 = rf(chunkIndex, resultID) + if rf, ok := ret.Get(1).(func(flow.Identifier) bool); ok { + r1 = rf(_a0) } else { r1 = ret.Get(1).(bool) } @@ -71,13 +118,17 @@ func (_m *ChunkStatuses) Get(chunkIndex uint64, resultID flow.Identifier) (*veri return r0, r1 } -// Remove provides a mock function with given fields: chunkIndex, resultID -func (_m *ChunkStatuses) Remove(chunkIndex uint64, resultID flow.Identifier) bool { - ret := _m.Called(chunkIndex, resultID) +// Has provides a mock function with given fields: _a0 +func (_m *ChunkStatuses) Has(_a0 flow.Identifier) bool { + ret := _m.Called(_a0) + + if len(ret) == 0 { + panic("no return value specified for Has") + } var r0 bool - if rf, ok := ret.Get(0).(func(uint64, flow.Identifier) bool); ok { - r0 = rf(chunkIndex, resultID) + if rf, ok := ret.Get(0).(func(flow.Identifier) bool); ok { + r0 = rf(_a0) } else { r0 = ret.Get(0).(bool) } @@ -85,10 +136,32 @@ func (_m *ChunkStatuses) Remove(chunkIndex uint64, resultID flow.Identifier) boo return r0 } -// Size provides a mock function with given fields: +// Remove provides a mock function with given fields: _a0 +func (_m *ChunkStatuses) Remove(_a0 flow.Identifier) bool { + ret := _m.Called(_a0) + + if len(ret) == 0 { + panic("no return value specified for Remove") + } + + var r0 bool + if rf, ok := ret.Get(0).(func(flow.Identifier) bool); ok { + r0 = rf(_a0) + } else { + r0 = ret.Get(0).(bool) + } + + return r0 +} + +// Size provides a mock function with no fields func (_m *ChunkStatuses) Size() uint { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Size") + } + var r0 uint if rf, ok := ret.Get(0).(func() uint); ok { r0 = rf() @@ -99,13 +172,32 @@ func (_m *ChunkStatuses) Size() uint { return r0 } -type mockConstructorTestingTNewChunkStatuses interface { - mock.TestingT - Cleanup(func()) +// Values provides a mock function with no fields +func (_m *ChunkStatuses) Values() []*verification.ChunkStatus { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Values") + } + + var r0 []*verification.ChunkStatus + if rf, ok := ret.Get(0).(func() []*verification.ChunkStatus); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]*verification.ChunkStatus) + } + } + + return r0 } // NewChunkStatuses creates a new instance of ChunkStatuses. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewChunkStatuses(t mockConstructorTestingTNewChunkStatuses) *ChunkStatuses { +// The first argument is typically a *testing.T value. +func NewChunkStatuses(t interface { + mock.TestingT + Cleanup(func()) +}) *ChunkStatuses { mock := &ChunkStatuses{} mock.Mock.Test(t) diff --git a/module/mempool/mock/collections.go b/module/mempool/mock/collections.go deleted file mode 100644 index 04d143f8773..00000000000 --- a/module/mempool/mock/collections.go +++ /dev/null @@ -1,127 +0,0 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. - -package mempool - -import ( - flow "github.com/onflow/flow-go/model/flow" - - mock "github.com/stretchr/testify/mock" -) - -// Collections is an autogenerated mock type for the Collections type -type Collections struct { - mock.Mock -} - -// Add provides a mock function with given fields: coll -func (_m *Collections) Add(coll *flow.Collection) bool { - ret := _m.Called(coll) - - var r0 bool - if rf, ok := ret.Get(0).(func(*flow.Collection) bool); ok { - r0 = rf(coll) - } else { - r0 = ret.Get(0).(bool) - } - - return r0 -} - -// All provides a mock function with given fields: -func (_m *Collections) All() []*flow.Collection { - ret := _m.Called() - - var r0 []*flow.Collection - if rf, ok := ret.Get(0).(func() []*flow.Collection); ok { - r0 = rf() - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).([]*flow.Collection) - } - } - - return r0 -} - -// ByID provides a mock function with given fields: collID -func (_m *Collections) ByID(collID flow.Identifier) (*flow.Collection, bool) { - ret := _m.Called(collID) - - var r0 *flow.Collection - var r1 bool - if rf, ok := ret.Get(0).(func(flow.Identifier) (*flow.Collection, bool)); ok { - return rf(collID) - } - if rf, ok := ret.Get(0).(func(flow.Identifier) *flow.Collection); ok { - r0 = rf(collID) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*flow.Collection) - } - } - - if rf, ok := ret.Get(1).(func(flow.Identifier) bool); ok { - r1 = rf(collID) - } else { - r1 = ret.Get(1).(bool) - } - - return r0, r1 -} - -// Has provides a mock function with given fields: collID -func (_m *Collections) Has(collID flow.Identifier) bool { - ret := _m.Called(collID) - - var r0 bool - if rf, ok := ret.Get(0).(func(flow.Identifier) bool); ok { - r0 = rf(collID) - } else { - r0 = ret.Get(0).(bool) - } - - return r0 -} - -// Remove provides a mock function with given fields: collID -func (_m *Collections) Remove(collID flow.Identifier) bool { - ret := _m.Called(collID) - - var r0 bool - if rf, ok := ret.Get(0).(func(flow.Identifier) bool); ok { - r0 = rf(collID) - } else { - r0 = ret.Get(0).(bool) - } - - return r0 -} - -// Size provides a mock function with given fields: -func (_m *Collections) Size() uint { - ret := _m.Called() - - var r0 uint - if rf, ok := ret.Get(0).(func() uint); ok { - r0 = rf() - } else { - r0 = ret.Get(0).(uint) - } - - return r0 -} - -type mockConstructorTestingTNewCollections interface { - mock.TestingT - Cleanup(func()) -} - -// NewCollections creates a new instance of Collections. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewCollections(t mockConstructorTestingTNewCollections) *Collections { - mock := &Collections{} - mock.Mock.Test(t) - - t.Cleanup(func() { mock.AssertExpectations(t) }) - - return mock -} diff --git a/module/mempool/mock/dns_cache.go b/module/mempool/mock/dns_cache.go index b95edca4789..17873f92406 100644 --- a/module/mempool/mock/dns_cache.go +++ b/module/mempool/mock/dns_cache.go @@ -1,6 +1,6 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. -package mempool +package mock import ( mempool "github.com/onflow/flow-go/module/mempool" @@ -18,6 +18,10 @@ type DNSCache struct { func (_m *DNSCache) GetDomainIp(_a0 string) (*mempool.IpRecord, bool) { ret := _m.Called(_a0) + if len(ret) == 0 { + panic("no return value specified for GetDomainIp") + } + var r0 *mempool.IpRecord var r1 bool if rf, ok := ret.Get(0).(func(string) (*mempool.IpRecord, bool)); ok { @@ -44,6 +48,10 @@ func (_m *DNSCache) GetDomainIp(_a0 string) (*mempool.IpRecord, bool) { func (_m *DNSCache) GetTxtRecord(_a0 string) (*mempool.TxtRecord, bool) { ret := _m.Called(_a0) + if len(ret) == 0 { + panic("no return value specified for GetTxtRecord") + } + var r0 *mempool.TxtRecord var r1 bool if rf, ok := ret.Get(0).(func(string) (*mempool.TxtRecord, bool)); ok { @@ -70,6 +78,10 @@ func (_m *DNSCache) GetTxtRecord(_a0 string) (*mempool.TxtRecord, bool) { func (_m *DNSCache) LockIPDomain(_a0 string) (bool, error) { ret := _m.Called(_a0) + if len(ret) == 0 { + panic("no return value specified for LockIPDomain") + } + var r0 bool var r1 error if rf, ok := ret.Get(0).(func(string) (bool, error)); ok { @@ -94,6 +106,10 @@ func (_m *DNSCache) LockIPDomain(_a0 string) (bool, error) { func (_m *DNSCache) LockTxtRecord(_a0 string) (bool, error) { ret := _m.Called(_a0) + if len(ret) == 0 { + panic("no return value specified for LockTxtRecord") + } + var r0 bool var r1 error if rf, ok := ret.Get(0).(func(string) (bool, error)); ok { @@ -118,6 +134,10 @@ func (_m *DNSCache) LockTxtRecord(_a0 string) (bool, error) { func (_m *DNSCache) PutIpDomain(_a0 string, _a1 []net.IPAddr, _a2 int64) bool { ret := _m.Called(_a0, _a1, _a2) + if len(ret) == 0 { + panic("no return value specified for PutIpDomain") + } + var r0 bool if rf, ok := ret.Get(0).(func(string, []net.IPAddr, int64) bool); ok { r0 = rf(_a0, _a1, _a2) @@ -132,6 +152,10 @@ func (_m *DNSCache) PutIpDomain(_a0 string, _a1 []net.IPAddr, _a2 int64) bool { func (_m *DNSCache) PutTxtRecord(_a0 string, _a1 []string, _a2 int64) bool { ret := _m.Called(_a0, _a1, _a2) + if len(ret) == 0 { + panic("no return value specified for PutTxtRecord") + } + var r0 bool if rf, ok := ret.Get(0).(func(string, []string, int64) bool); ok { r0 = rf(_a0, _a1, _a2) @@ -146,6 +170,10 @@ func (_m *DNSCache) PutTxtRecord(_a0 string, _a1 []string, _a2 int64) bool { func (_m *DNSCache) RemoveIp(_a0 string) bool { ret := _m.Called(_a0) + if len(ret) == 0 { + panic("no return value specified for RemoveIp") + } + var r0 bool if rf, ok := ret.Get(0).(func(string) bool); ok { r0 = rf(_a0) @@ -160,6 +188,10 @@ func (_m *DNSCache) RemoveIp(_a0 string) bool { func (_m *DNSCache) RemoveTxt(_a0 string) bool { ret := _m.Called(_a0) + if len(ret) == 0 { + panic("no return value specified for RemoveTxt") + } + var r0 bool if rf, ok := ret.Get(0).(func(string) bool); ok { r0 = rf(_a0) @@ -170,10 +202,14 @@ func (_m *DNSCache) RemoveTxt(_a0 string) bool { return r0 } -// Size provides a mock function with given fields: +// Size provides a mock function with no fields func (_m *DNSCache) Size() (uint, uint) { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Size") + } + var r0 uint var r1 uint if rf, ok := ret.Get(0).(func() (uint, uint)); ok { @@ -198,6 +234,10 @@ func (_m *DNSCache) Size() (uint, uint) { func (_m *DNSCache) UpdateIPDomain(_a0 string, _a1 []net.IPAddr, _a2 int64) error { ret := _m.Called(_a0, _a1, _a2) + if len(ret) == 0 { + panic("no return value specified for UpdateIPDomain") + } + var r0 error if rf, ok := ret.Get(0).(func(string, []net.IPAddr, int64) error); ok { r0 = rf(_a0, _a1, _a2) @@ -212,6 +252,10 @@ func (_m *DNSCache) UpdateIPDomain(_a0 string, _a1 []net.IPAddr, _a2 int64) erro func (_m *DNSCache) UpdateTxtRecord(_a0 string, _a1 []string, _a2 int64) error { ret := _m.Called(_a0, _a1, _a2) + if len(ret) == 0 { + panic("no return value specified for UpdateTxtRecord") + } + var r0 error if rf, ok := ret.Get(0).(func(string, []string, int64) error); ok { r0 = rf(_a0, _a1, _a2) @@ -222,13 +266,12 @@ func (_m *DNSCache) UpdateTxtRecord(_a0 string, _a1 []string, _a2 int64) error { return r0 } -type mockConstructorTestingTNewDNSCache interface { +// NewDNSCache creates a new instance of DNSCache. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewDNSCache(t interface { mock.TestingT Cleanup(func()) -} - -// NewDNSCache creates a new instance of DNSCache. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewDNSCache(t mockConstructorTestingTNewDNSCache) *DNSCache { +}) *DNSCache { mock := &DNSCache{} mock.Mock.Test(t) diff --git a/module/mempool/mock/execution_data.go b/module/mempool/mock/execution_data.go new file mode 100644 index 00000000000..1bd80b8ed58 --- /dev/null +++ b/module/mempool/mock/execution_data.go @@ -0,0 +1,206 @@ +// Code generated by mockery. DO NOT EDIT. + +package mock + +import ( + flow "github.com/onflow/flow-go/model/flow" + execution_data "github.com/onflow/flow-go/module/executiondatasync/execution_data" + + mock "github.com/stretchr/testify/mock" +) + +// ExecutionData is an autogenerated mock type for the ExecutionData type +type ExecutionData struct { + mock.Mock +} + +// Add provides a mock function with given fields: _a0, _a1 +func (_m *ExecutionData) Add(_a0 flow.Identifier, _a1 *execution_data.BlockExecutionDataEntity) bool { + ret := _m.Called(_a0, _a1) + + if len(ret) == 0 { + panic("no return value specified for Add") + } + + var r0 bool + if rf, ok := ret.Get(0).(func(flow.Identifier, *execution_data.BlockExecutionDataEntity) bool); ok { + r0 = rf(_a0, _a1) + } else { + r0 = ret.Get(0).(bool) + } + + return r0 +} + +// Adjust provides a mock function with given fields: key, f +func (_m *ExecutionData) Adjust(key flow.Identifier, f func(*execution_data.BlockExecutionDataEntity) *execution_data.BlockExecutionDataEntity) (*execution_data.BlockExecutionDataEntity, bool) { + ret := _m.Called(key, f) + + if len(ret) == 0 { + panic("no return value specified for Adjust") + } + + var r0 *execution_data.BlockExecutionDataEntity + var r1 bool + if rf, ok := ret.Get(0).(func(flow.Identifier, func(*execution_data.BlockExecutionDataEntity) *execution_data.BlockExecutionDataEntity) (*execution_data.BlockExecutionDataEntity, bool)); ok { + return rf(key, f) + } + if rf, ok := ret.Get(0).(func(flow.Identifier, func(*execution_data.BlockExecutionDataEntity) *execution_data.BlockExecutionDataEntity) *execution_data.BlockExecutionDataEntity); ok { + r0 = rf(key, f) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*execution_data.BlockExecutionDataEntity) + } + } + + if rf, ok := ret.Get(1).(func(flow.Identifier, func(*execution_data.BlockExecutionDataEntity) *execution_data.BlockExecutionDataEntity) bool); ok { + r1 = rf(key, f) + } else { + r1 = ret.Get(1).(bool) + } + + return r0, r1 +} + +// All provides a mock function with no fields +func (_m *ExecutionData) All() map[flow.Identifier]*execution_data.BlockExecutionDataEntity { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for All") + } + + var r0 map[flow.Identifier]*execution_data.BlockExecutionDataEntity + if rf, ok := ret.Get(0).(func() map[flow.Identifier]*execution_data.BlockExecutionDataEntity); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(map[flow.Identifier]*execution_data.BlockExecutionDataEntity) + } + } + + return r0 +} + +// Clear provides a mock function with no fields +func (_m *ExecutionData) Clear() { + _m.Called() +} + +// Get provides a mock function with given fields: _a0 +func (_m *ExecutionData) Get(_a0 flow.Identifier) (*execution_data.BlockExecutionDataEntity, bool) { + ret := _m.Called(_a0) + + if len(ret) == 0 { + panic("no return value specified for Get") + } + + var r0 *execution_data.BlockExecutionDataEntity + var r1 bool + if rf, ok := ret.Get(0).(func(flow.Identifier) (*execution_data.BlockExecutionDataEntity, bool)); ok { + return rf(_a0) + } + if rf, ok := ret.Get(0).(func(flow.Identifier) *execution_data.BlockExecutionDataEntity); ok { + r0 = rf(_a0) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*execution_data.BlockExecutionDataEntity) + } + } + + if rf, ok := ret.Get(1).(func(flow.Identifier) bool); ok { + r1 = rf(_a0) + } else { + r1 = ret.Get(1).(bool) + } + + return r0, r1 +} + +// Has provides a mock function with given fields: _a0 +func (_m *ExecutionData) Has(_a0 flow.Identifier) bool { + ret := _m.Called(_a0) + + if len(ret) == 0 { + panic("no return value specified for Has") + } + + var r0 bool + if rf, ok := ret.Get(0).(func(flow.Identifier) bool); ok { + r0 = rf(_a0) + } else { + r0 = ret.Get(0).(bool) + } + + return r0 +} + +// Remove provides a mock function with given fields: _a0 +func (_m *ExecutionData) Remove(_a0 flow.Identifier) bool { + ret := _m.Called(_a0) + + if len(ret) == 0 { + panic("no return value specified for Remove") + } + + var r0 bool + if rf, ok := ret.Get(0).(func(flow.Identifier) bool); ok { + r0 = rf(_a0) + } else { + r0 = ret.Get(0).(bool) + } + + return r0 +} + +// Size provides a mock function with no fields +func (_m *ExecutionData) Size() uint { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Size") + } + + var r0 uint + if rf, ok := ret.Get(0).(func() uint); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(uint) + } + + return r0 +} + +// Values provides a mock function with no fields +func (_m *ExecutionData) Values() []*execution_data.BlockExecutionDataEntity { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Values") + } + + var r0 []*execution_data.BlockExecutionDataEntity + if rf, ok := ret.Get(0).(func() []*execution_data.BlockExecutionDataEntity); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]*execution_data.BlockExecutionDataEntity) + } + } + + return r0 +} + +// NewExecutionData creates a new instance of ExecutionData. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewExecutionData(t interface { + mock.TestingT + Cleanup(func()) +}) *ExecutionData { + mock := &ExecutionData{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/module/mempool/mock/execution_tree.go b/module/mempool/mock/execution_tree.go index f3bb8c4d90d..493f111c129 100644 --- a/module/mempool/mock/execution_tree.go +++ b/module/mempool/mock/execution_tree.go @@ -1,6 +1,6 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. -package mempool +package mock import ( flow "github.com/onflow/flow-go/model/flow" @@ -18,6 +18,10 @@ type ExecutionTree struct { func (_m *ExecutionTree) AddReceipt(receipt *flow.ExecutionReceipt, block *flow.Header) (bool, error) { ret := _m.Called(receipt, block) + if len(ret) == 0 { + panic("no return value specified for AddReceipt") + } + var r0 bool var r1 error if rf, ok := ret.Get(0).(func(*flow.ExecutionReceipt, *flow.Header) (bool, error)); ok { @@ -42,6 +46,10 @@ func (_m *ExecutionTree) AddReceipt(receipt *flow.ExecutionReceipt, block *flow. func (_m *ExecutionTree) AddResult(result *flow.ExecutionResult, block *flow.Header) error { ret := _m.Called(result, block) + if len(ret) == 0 { + panic("no return value specified for AddResult") + } + var r0 error if rf, ok := ret.Get(0).(func(*flow.ExecutionResult, *flow.Header) error); ok { r0 = rf(result, block) @@ -56,6 +64,10 @@ func (_m *ExecutionTree) AddResult(result *flow.ExecutionResult, block *flow.Hea func (_m *ExecutionTree) HasReceipt(receipt *flow.ExecutionReceipt) bool { ret := _m.Called(receipt) + if len(ret) == 0 { + panic("no return value specified for HasReceipt") + } + var r0 bool if rf, ok := ret.Get(0).(func(*flow.ExecutionReceipt) bool); ok { r0 = rf(receipt) @@ -66,10 +78,14 @@ func (_m *ExecutionTree) HasReceipt(receipt *flow.ExecutionReceipt) bool { return r0 } -// LowestHeight provides a mock function with given fields: +// LowestHeight provides a mock function with no fields func (_m *ExecutionTree) LowestHeight() uint64 { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for LowestHeight") + } + var r0 uint64 if rf, ok := ret.Get(0).(func() uint64); ok { r0 = rf() @@ -84,6 +100,10 @@ func (_m *ExecutionTree) LowestHeight() uint64 { func (_m *ExecutionTree) PruneUpToHeight(newLowestHeight uint64) error { ret := _m.Called(newLowestHeight) + if len(ret) == 0 { + panic("no return value specified for PruneUpToHeight") + } + var r0 error if rf, ok := ret.Get(0).(func(uint64) error); ok { r0 = rf(newLowestHeight) @@ -98,6 +118,10 @@ func (_m *ExecutionTree) PruneUpToHeight(newLowestHeight uint64) error { func (_m *ExecutionTree) ReachableReceipts(resultID flow.Identifier, blockFilter mempool.BlockFilter, receiptFilter mempool.ReceiptFilter) ([]*flow.ExecutionReceipt, error) { ret := _m.Called(resultID, blockFilter, receiptFilter) + if len(ret) == 0 { + panic("no return value specified for ReachableReceipts") + } + var r0 []*flow.ExecutionReceipt var r1 error if rf, ok := ret.Get(0).(func(flow.Identifier, mempool.BlockFilter, mempool.ReceiptFilter) ([]*flow.ExecutionReceipt, error)); ok { @@ -120,10 +144,14 @@ func (_m *ExecutionTree) ReachableReceipts(resultID flow.Identifier, blockFilter return r0, r1 } -// Size provides a mock function with given fields: +// Size provides a mock function with no fields func (_m *ExecutionTree) Size() uint { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Size") + } + var r0 uint if rf, ok := ret.Get(0).(func() uint); ok { r0 = rf() @@ -134,13 +162,12 @@ func (_m *ExecutionTree) Size() uint { return r0 } -type mockConstructorTestingTNewExecutionTree interface { +// NewExecutionTree creates a new instance of ExecutionTree. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewExecutionTree(t interface { mock.TestingT Cleanup(func()) -} - -// NewExecutionTree creates a new instance of ExecutionTree. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewExecutionTree(t mockConstructorTestingTNewExecutionTree) *ExecutionTree { +}) *ExecutionTree { mock := &ExecutionTree{} mock.Mock.Test(t) diff --git a/module/mempool/mock/guarantees.go b/module/mempool/mock/guarantees.go index 18a83de6979..cd7d2fbf316 100644 --- a/module/mempool/mock/guarantees.go +++ b/module/mempool/mock/guarantees.go @@ -1,6 +1,6 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. -package mempool +package mock import ( flow "github.com/onflow/flow-go/model/flow" @@ -13,13 +13,17 @@ type Guarantees struct { mock.Mock } -// Add provides a mock function with given fields: guarantee -func (_m *Guarantees) Add(guarantee *flow.CollectionGuarantee) bool { - ret := _m.Called(guarantee) +// Add provides a mock function with given fields: _a0, _a1 +func (_m *Guarantees) Add(_a0 flow.Identifier, _a1 *flow.CollectionGuarantee) bool { + ret := _m.Called(_a0, _a1) + + if len(ret) == 0 { + panic("no return value specified for Add") + } var r0 bool - if rf, ok := ret.Get(0).(func(*flow.CollectionGuarantee) bool); ok { - r0 = rf(guarantee) + if rf, ok := ret.Get(0).(func(flow.Identifier, *flow.CollectionGuarantee) bool); ok { + r0 = rf(_a0, _a1) } else { r0 = ret.Get(0).(bool) } @@ -27,33 +31,76 @@ func (_m *Guarantees) Add(guarantee *flow.CollectionGuarantee) bool { return r0 } -// All provides a mock function with given fields: -func (_m *Guarantees) All() []*flow.CollectionGuarantee { +// Adjust provides a mock function with given fields: key, f +func (_m *Guarantees) Adjust(key flow.Identifier, f func(*flow.CollectionGuarantee) *flow.CollectionGuarantee) (*flow.CollectionGuarantee, bool) { + ret := _m.Called(key, f) + + if len(ret) == 0 { + panic("no return value specified for Adjust") + } + + var r0 *flow.CollectionGuarantee + var r1 bool + if rf, ok := ret.Get(0).(func(flow.Identifier, func(*flow.CollectionGuarantee) *flow.CollectionGuarantee) (*flow.CollectionGuarantee, bool)); ok { + return rf(key, f) + } + if rf, ok := ret.Get(0).(func(flow.Identifier, func(*flow.CollectionGuarantee) *flow.CollectionGuarantee) *flow.CollectionGuarantee); ok { + r0 = rf(key, f) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*flow.CollectionGuarantee) + } + } + + if rf, ok := ret.Get(1).(func(flow.Identifier, func(*flow.CollectionGuarantee) *flow.CollectionGuarantee) bool); ok { + r1 = rf(key, f) + } else { + r1 = ret.Get(1).(bool) + } + + return r0, r1 +} + +// All provides a mock function with no fields +func (_m *Guarantees) All() map[flow.Identifier]*flow.CollectionGuarantee { ret := _m.Called() - var r0 []*flow.CollectionGuarantee - if rf, ok := ret.Get(0).(func() []*flow.CollectionGuarantee); ok { + if len(ret) == 0 { + panic("no return value specified for All") + } + + var r0 map[flow.Identifier]*flow.CollectionGuarantee + if rf, ok := ret.Get(0).(func() map[flow.Identifier]*flow.CollectionGuarantee); ok { r0 = rf() } else { if ret.Get(0) != nil { - r0 = ret.Get(0).([]*flow.CollectionGuarantee) + r0 = ret.Get(0).(map[flow.Identifier]*flow.CollectionGuarantee) } } return r0 } -// ByID provides a mock function with given fields: collID -func (_m *Guarantees) ByID(collID flow.Identifier) (*flow.CollectionGuarantee, bool) { - ret := _m.Called(collID) +// Clear provides a mock function with no fields +func (_m *Guarantees) Clear() { + _m.Called() +} + +// Get provides a mock function with given fields: _a0 +func (_m *Guarantees) Get(_a0 flow.Identifier) (*flow.CollectionGuarantee, bool) { + ret := _m.Called(_a0) + + if len(ret) == 0 { + panic("no return value specified for Get") + } var r0 *flow.CollectionGuarantee var r1 bool if rf, ok := ret.Get(0).(func(flow.Identifier) (*flow.CollectionGuarantee, bool)); ok { - return rf(collID) + return rf(_a0) } if rf, ok := ret.Get(0).(func(flow.Identifier) *flow.CollectionGuarantee); ok { - r0 = rf(collID) + r0 = rf(_a0) } else { if ret.Get(0) != nil { r0 = ret.Get(0).(*flow.CollectionGuarantee) @@ -61,7 +108,7 @@ func (_m *Guarantees) ByID(collID flow.Identifier) (*flow.CollectionGuarantee, b } if rf, ok := ret.Get(1).(func(flow.Identifier) bool); ok { - r1 = rf(collID) + r1 = rf(_a0) } else { r1 = ret.Get(1).(bool) } @@ -69,13 +116,17 @@ func (_m *Guarantees) ByID(collID flow.Identifier) (*flow.CollectionGuarantee, b return r0, r1 } -// Has provides a mock function with given fields: collID -func (_m *Guarantees) Has(collID flow.Identifier) bool { - ret := _m.Called(collID) +// Has provides a mock function with given fields: _a0 +func (_m *Guarantees) Has(_a0 flow.Identifier) bool { + ret := _m.Called(_a0) + + if len(ret) == 0 { + panic("no return value specified for Has") + } var r0 bool if rf, ok := ret.Get(0).(func(flow.Identifier) bool); ok { - r0 = rf(collID) + r0 = rf(_a0) } else { r0 = ret.Get(0).(bool) } @@ -83,13 +134,17 @@ func (_m *Guarantees) Has(collID flow.Identifier) bool { return r0 } -// Remove provides a mock function with given fields: collID -func (_m *Guarantees) Remove(collID flow.Identifier) bool { - ret := _m.Called(collID) +// Remove provides a mock function with given fields: _a0 +func (_m *Guarantees) Remove(_a0 flow.Identifier) bool { + ret := _m.Called(_a0) + + if len(ret) == 0 { + panic("no return value specified for Remove") + } var r0 bool if rf, ok := ret.Get(0).(func(flow.Identifier) bool); ok { - r0 = rf(collID) + r0 = rf(_a0) } else { r0 = ret.Get(0).(bool) } @@ -97,10 +152,14 @@ func (_m *Guarantees) Remove(collID flow.Identifier) bool { return r0 } -// Size provides a mock function with given fields: +// Size provides a mock function with no fields func (_m *Guarantees) Size() uint { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Size") + } + var r0 uint if rf, ok := ret.Get(0).(func() uint); ok { r0 = rf() @@ -111,13 +170,32 @@ func (_m *Guarantees) Size() uint { return r0 } -type mockConstructorTestingTNewGuarantees interface { - mock.TestingT - Cleanup(func()) +// Values provides a mock function with no fields +func (_m *Guarantees) Values() []*flow.CollectionGuarantee { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Values") + } + + var r0 []*flow.CollectionGuarantee + if rf, ok := ret.Get(0).(func() []*flow.CollectionGuarantee); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]*flow.CollectionGuarantee) + } + } + + return r0 } // NewGuarantees creates a new instance of Guarantees. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewGuarantees(t mockConstructorTestingTNewGuarantees) *Guarantees { +// The first argument is typically a *testing.T value. +func NewGuarantees(t interface { + mock.TestingT + Cleanup(func()) +}) *Guarantees { mock := &Guarantees{} mock.Mock.Test(t) diff --git a/module/mempool/mock/identifier_map.go b/module/mempool/mock/identifier_map.go index 6ab8567fda5..7e52e2fbce1 100644 --- a/module/mempool/mock/identifier_map.go +++ b/module/mempool/mock/identifier_map.go @@ -1,6 +1,6 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. -package mempool +package mock import ( flow "github.com/onflow/flow-go/model/flow" @@ -14,33 +14,28 @@ type IdentifierMap struct { } // Append provides a mock function with given fields: key, id -func (_m *IdentifierMap) Append(key flow.Identifier, id flow.Identifier) error { - ret := _m.Called(key, id) - - var r0 error - if rf, ok := ret.Get(0).(func(flow.Identifier, flow.Identifier) error); ok { - r0 = rf(key, id) - } else { - r0 = ret.Error(0) - } - - return r0 +func (_m *IdentifierMap) Append(key flow.Identifier, id flow.Identifier) { + _m.Called(key, id) } // Get provides a mock function with given fields: key -func (_m *IdentifierMap) Get(key flow.Identifier) ([]flow.Identifier, bool) { +func (_m *IdentifierMap) Get(key flow.Identifier) (flow.IdentifierList, bool) { ret := _m.Called(key) - var r0 []flow.Identifier + if len(ret) == 0 { + panic("no return value specified for Get") + } + + var r0 flow.IdentifierList var r1 bool - if rf, ok := ret.Get(0).(func(flow.Identifier) ([]flow.Identifier, bool)); ok { + if rf, ok := ret.Get(0).(func(flow.Identifier) (flow.IdentifierList, bool)); ok { return rf(key) } - if rf, ok := ret.Get(0).(func(flow.Identifier) []flow.Identifier); ok { + if rf, ok := ret.Get(0).(func(flow.Identifier) flow.IdentifierList); ok { r0 = rf(key) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).([]flow.Identifier) + r0 = ret.Get(0).(flow.IdentifierList) } } @@ -57,6 +52,10 @@ func (_m *IdentifierMap) Get(key flow.Identifier) ([]flow.Identifier, bool) { func (_m *IdentifierMap) Has(key flow.Identifier) bool { ret := _m.Called(key) + if len(ret) == 0 { + panic("no return value specified for Has") + } + var r0 bool if rf, ok := ret.Get(0).(func(flow.Identifier) bool); ok { r0 = rf(key) @@ -67,20 +66,24 @@ func (_m *IdentifierMap) Has(key flow.Identifier) bool { return r0 } -// Keys provides a mock function with given fields: -func (_m *IdentifierMap) Keys() ([]flow.Identifier, bool) { +// Keys provides a mock function with no fields +func (_m *IdentifierMap) Keys() (flow.IdentifierList, bool) { ret := _m.Called() - var r0 []flow.Identifier + if len(ret) == 0 { + panic("no return value specified for Keys") + } + + var r0 flow.IdentifierList var r1 bool - if rf, ok := ret.Get(0).(func() ([]flow.Identifier, bool)); ok { + if rf, ok := ret.Get(0).(func() (flow.IdentifierList, bool)); ok { return rf() } - if rf, ok := ret.Get(0).(func() []flow.Identifier); ok { + if rf, ok := ret.Get(0).(func() flow.IdentifierList); ok { r0 = rf() } else { if ret.Get(0) != nil { - r0 = ret.Get(0).([]flow.Identifier) + r0 = ret.Get(0).(flow.IdentifierList) } } @@ -97,6 +100,10 @@ func (_m *IdentifierMap) Keys() ([]flow.Identifier, bool) { func (_m *IdentifierMap) Remove(key flow.Identifier) bool { ret := _m.Called(key) + if len(ret) == 0 { + panic("no return value specified for Remove") + } + var r0 bool if rf, ok := ret.Get(0).(func(flow.Identifier) bool); ok { r0 = rf(key) @@ -111,6 +118,10 @@ func (_m *IdentifierMap) Remove(key flow.Identifier) bool { func (_m *IdentifierMap) RemoveIdFromKey(key flow.Identifier, id flow.Identifier) error { ret := _m.Called(key, id) + if len(ret) == 0 { + panic("no return value specified for RemoveIdFromKey") + } + var r0 error if rf, ok := ret.Get(0).(func(flow.Identifier, flow.Identifier) error); ok { r0 = rf(key, id) @@ -121,10 +132,14 @@ func (_m *IdentifierMap) RemoveIdFromKey(key flow.Identifier, id flow.Identifier return r0 } -// Size provides a mock function with given fields: +// Size provides a mock function with no fields func (_m *IdentifierMap) Size() uint { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Size") + } + var r0 uint if rf, ok := ret.Get(0).(func() uint); ok { r0 = rf() @@ -135,13 +150,12 @@ func (_m *IdentifierMap) Size() uint { return r0 } -type mockConstructorTestingTNewIdentifierMap interface { +// NewIdentifierMap creates a new instance of IdentifierMap. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewIdentifierMap(t interface { mock.TestingT Cleanup(func()) -} - -// NewIdentifierMap creates a new instance of IdentifierMap. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewIdentifierMap(t mockConstructorTestingTNewIdentifierMap) *IdentifierMap { +}) *IdentifierMap { mock := &IdentifierMap{} mock.Mock.Test(t) diff --git a/module/mempool/mock/incorporated_result_seals.go b/module/mempool/mock/incorporated_result_seals.go index dafe6d7bb03..afb3e1426da 100644 --- a/module/mempool/mock/incorporated_result_seals.go +++ b/module/mempool/mock/incorporated_result_seals.go @@ -1,6 +1,6 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. -package mempool +package mock import ( flow "github.com/onflow/flow-go/model/flow" @@ -17,6 +17,10 @@ type IncorporatedResultSeals struct { func (_m *IncorporatedResultSeals) Add(irSeal *flow.IncorporatedResultSeal) (bool, error) { ret := _m.Called(irSeal) + if len(ret) == 0 { + panic("no return value specified for Add") + } + var r0 bool var r1 error if rf, ok := ret.Get(0).(func(*flow.IncorporatedResultSeal) (bool, error)); ok { @@ -37,10 +41,14 @@ func (_m *IncorporatedResultSeals) Add(irSeal *flow.IncorporatedResultSeal) (boo return r0, r1 } -// All provides a mock function with given fields: +// All provides a mock function with no fields func (_m *IncorporatedResultSeals) All() []*flow.IncorporatedResultSeal { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for All") + } + var r0 []*flow.IncorporatedResultSeal if rf, ok := ret.Get(0).(func() []*flow.IncorporatedResultSeal); ok { r0 = rf() @@ -53,10 +61,19 @@ func (_m *IncorporatedResultSeals) All() []*flow.IncorporatedResultSeal { return r0 } -// ByID provides a mock function with given fields: _a0 -func (_m *IncorporatedResultSeals) ByID(_a0 flow.Identifier) (*flow.IncorporatedResultSeal, bool) { +// Clear provides a mock function with no fields +func (_m *IncorporatedResultSeals) Clear() { + _m.Called() +} + +// Get provides a mock function with given fields: _a0 +func (_m *IncorporatedResultSeals) Get(_a0 flow.Identifier) (*flow.IncorporatedResultSeal, bool) { ret := _m.Called(_a0) + if len(ret) == 0 { + panic("no return value specified for Get") + } + var r0 *flow.IncorporatedResultSeal var r1 bool if rf, ok := ret.Get(0).(func(flow.Identifier) (*flow.IncorporatedResultSeal, bool)); ok { @@ -79,15 +96,14 @@ func (_m *IncorporatedResultSeals) ByID(_a0 flow.Identifier) (*flow.Incorporated return r0, r1 } -// Clear provides a mock function with given fields: -func (_m *IncorporatedResultSeals) Clear() { - _m.Called() -} - -// Limit provides a mock function with given fields: +// Limit provides a mock function with no fields func (_m *IncorporatedResultSeals) Limit() uint { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Limit") + } + var r0 uint if rf, ok := ret.Get(0).(func() uint); ok { r0 = rf() @@ -102,6 +118,10 @@ func (_m *IncorporatedResultSeals) Limit() uint { func (_m *IncorporatedResultSeals) PruneUpToHeight(height uint64) error { ret := _m.Called(height) + if len(ret) == 0 { + panic("no return value specified for PruneUpToHeight") + } + var r0 error if rf, ok := ret.Get(0).(func(uint64) error); ok { r0 = rf(height) @@ -116,6 +136,10 @@ func (_m *IncorporatedResultSeals) PruneUpToHeight(height uint64) error { func (_m *IncorporatedResultSeals) Remove(incorporatedResultID flow.Identifier) bool { ret := _m.Called(incorporatedResultID) + if len(ret) == 0 { + panic("no return value specified for Remove") + } + var r0 bool if rf, ok := ret.Get(0).(func(flow.Identifier) bool); ok { r0 = rf(incorporatedResultID) @@ -126,10 +150,14 @@ func (_m *IncorporatedResultSeals) Remove(incorporatedResultID flow.Identifier) return r0 } -// Size provides a mock function with given fields: +// Size provides a mock function with no fields func (_m *IncorporatedResultSeals) Size() uint { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Size") + } + var r0 uint if rf, ok := ret.Get(0).(func() uint); ok { r0 = rf() @@ -140,13 +168,12 @@ func (_m *IncorporatedResultSeals) Size() uint { return r0 } -type mockConstructorTestingTNewIncorporatedResultSeals interface { +// NewIncorporatedResultSeals creates a new instance of IncorporatedResultSeals. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewIncorporatedResultSeals(t interface { mock.TestingT Cleanup(func()) -} - -// NewIncorporatedResultSeals creates a new instance of IncorporatedResultSeals. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewIncorporatedResultSeals(t mockConstructorTestingTNewIncorporatedResultSeals) *IncorporatedResultSeals { +}) *IncorporatedResultSeals { mock := &IncorporatedResultSeals{} mock.Mock.Test(t) diff --git a/module/mempool/mock/mempool.go b/module/mempool/mock/mempool.go new file mode 100644 index 00000000000..b1b555994ea --- /dev/null +++ b/module/mempool/mock/mempool.go @@ -0,0 +1,201 @@ +// Code generated by mockery. DO NOT EDIT. + +package mock + +import mock "github.com/stretchr/testify/mock" + +// Mempool is an autogenerated mock type for the Mempool type +type Mempool[K comparable, V any] struct { + mock.Mock +} + +// Add provides a mock function with given fields: _a0, _a1 +func (_m *Mempool[K, V]) Add(_a0 K, _a1 V) bool { + ret := _m.Called(_a0, _a1) + + if len(ret) == 0 { + panic("no return value specified for Add") + } + + var r0 bool + if rf, ok := ret.Get(0).(func(K, V) bool); ok { + r0 = rf(_a0, _a1) + } else { + r0 = ret.Get(0).(bool) + } + + return r0 +} + +// Adjust provides a mock function with given fields: key, f +func (_m *Mempool[K, V]) Adjust(key K, f func(V) V) (V, bool) { + ret := _m.Called(key, f) + + if len(ret) == 0 { + panic("no return value specified for Adjust") + } + + var r0 V + var r1 bool + if rf, ok := ret.Get(0).(func(K, func(V) V) (V, bool)); ok { + return rf(key, f) + } + if rf, ok := ret.Get(0).(func(K, func(V) V) V); ok { + r0 = rf(key, f) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(V) + } + } + + if rf, ok := ret.Get(1).(func(K, func(V) V) bool); ok { + r1 = rf(key, f) + } else { + r1 = ret.Get(1).(bool) + } + + return r0, r1 +} + +// All provides a mock function with no fields +func (_m *Mempool[K, V]) All() map[K]V { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for All") + } + + var r0 map[K]V + if rf, ok := ret.Get(0).(func() map[K]V); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(map[K]V) + } + } + + return r0 +} + +// Clear provides a mock function with no fields +func (_m *Mempool[K, V]) Clear() { + _m.Called() +} + +// Get provides a mock function with given fields: _a0 +func (_m *Mempool[K, V]) Get(_a0 K) (V, bool) { + ret := _m.Called(_a0) + + if len(ret) == 0 { + panic("no return value specified for Get") + } + + var r0 V + var r1 bool + if rf, ok := ret.Get(0).(func(K) (V, bool)); ok { + return rf(_a0) + } + if rf, ok := ret.Get(0).(func(K) V); ok { + r0 = rf(_a0) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(V) + } + } + + if rf, ok := ret.Get(1).(func(K) bool); ok { + r1 = rf(_a0) + } else { + r1 = ret.Get(1).(bool) + } + + return r0, r1 +} + +// Has provides a mock function with given fields: _a0 +func (_m *Mempool[K, V]) Has(_a0 K) bool { + ret := _m.Called(_a0) + + if len(ret) == 0 { + panic("no return value specified for Has") + } + + var r0 bool + if rf, ok := ret.Get(0).(func(K) bool); ok { + r0 = rf(_a0) + } else { + r0 = ret.Get(0).(bool) + } + + return r0 +} + +// Remove provides a mock function with given fields: _a0 +func (_m *Mempool[K, V]) Remove(_a0 K) bool { + ret := _m.Called(_a0) + + if len(ret) == 0 { + panic("no return value specified for Remove") + } + + var r0 bool + if rf, ok := ret.Get(0).(func(K) bool); ok { + r0 = rf(_a0) + } else { + r0 = ret.Get(0).(bool) + } + + return r0 +} + +// Size provides a mock function with no fields +func (_m *Mempool[K, V]) Size() uint { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Size") + } + + var r0 uint + if rf, ok := ret.Get(0).(func() uint); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(uint) + } + + return r0 +} + +// Values provides a mock function with no fields +func (_m *Mempool[K, V]) Values() []V { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Values") + } + + var r0 []V + if rf, ok := ret.Get(0).(func() []V); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]V) + } + } + + return r0 +} + +// NewMempool creates a new instance of Mempool. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewMempool[K comparable, V any](t interface { + mock.TestingT + Cleanup(func()) +}) *Mempool[K, V] { + mock := &Mempool[K, V]{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/module/mempool/mock/mutable_back_data.go b/module/mempool/mock/mutable_back_data.go new file mode 100644 index 00000000000..bc330255a7f --- /dev/null +++ b/module/mempool/mock/mutable_back_data.go @@ -0,0 +1,263 @@ +// Code generated by mockery. DO NOT EDIT. + +package mock + +import mock "github.com/stretchr/testify/mock" + +// MutableBackData is an autogenerated mock type for the MutableBackData type +type MutableBackData[K comparable, V any] struct { + mock.Mock +} + +// Add provides a mock function with given fields: key, value +func (_m *MutableBackData[K, V]) Add(key K, value V) bool { + ret := _m.Called(key, value) + + if len(ret) == 0 { + panic("no return value specified for Add") + } + + var r0 bool + if rf, ok := ret.Get(0).(func(K, V) bool); ok { + r0 = rf(key, value) + } else { + r0 = ret.Get(0).(bool) + } + + return r0 +} + +// Adjust provides a mock function with given fields: key, f +func (_m *MutableBackData[K, V]) Adjust(key K, f func(V) V) (V, bool) { + ret := _m.Called(key, f) + + if len(ret) == 0 { + panic("no return value specified for Adjust") + } + + var r0 V + var r1 bool + if rf, ok := ret.Get(0).(func(K, func(V) V) (V, bool)); ok { + return rf(key, f) + } + if rf, ok := ret.Get(0).(func(K, func(V) V) V); ok { + r0 = rf(key, f) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(V) + } + } + + if rf, ok := ret.Get(1).(func(K, func(V) V) bool); ok { + r1 = rf(key, f) + } else { + r1 = ret.Get(1).(bool) + } + + return r0, r1 +} + +// AdjustWithInit provides a mock function with given fields: key, adjust, init +func (_m *MutableBackData[K, V]) AdjustWithInit(key K, adjust func(V) V, init func() V) (V, bool) { + ret := _m.Called(key, adjust, init) + + if len(ret) == 0 { + panic("no return value specified for AdjustWithInit") + } + + var r0 V + var r1 bool + if rf, ok := ret.Get(0).(func(K, func(V) V, func() V) (V, bool)); ok { + return rf(key, adjust, init) + } + if rf, ok := ret.Get(0).(func(K, func(V) V, func() V) V); ok { + r0 = rf(key, adjust, init) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(V) + } + } + + if rf, ok := ret.Get(1).(func(K, func(V) V, func() V) bool); ok { + r1 = rf(key, adjust, init) + } else { + r1 = ret.Get(1).(bool) + } + + return r0, r1 +} + +// All provides a mock function with no fields +func (_m *MutableBackData[K, V]) All() map[K]V { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for All") + } + + var r0 map[K]V + if rf, ok := ret.Get(0).(func() map[K]V); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(map[K]V) + } + } + + return r0 +} + +// Clear provides a mock function with no fields +func (_m *MutableBackData[K, V]) Clear() { + _m.Called() +} + +// Get provides a mock function with given fields: key +func (_m *MutableBackData[K, V]) Get(key K) (V, bool) { + ret := _m.Called(key) + + if len(ret) == 0 { + panic("no return value specified for Get") + } + + var r0 V + var r1 bool + if rf, ok := ret.Get(0).(func(K) (V, bool)); ok { + return rf(key) + } + if rf, ok := ret.Get(0).(func(K) V); ok { + r0 = rf(key) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(V) + } + } + + if rf, ok := ret.Get(1).(func(K) bool); ok { + r1 = rf(key) + } else { + r1 = ret.Get(1).(bool) + } + + return r0, r1 +} + +// Has provides a mock function with given fields: key +func (_m *MutableBackData[K, V]) Has(key K) bool { + ret := _m.Called(key) + + if len(ret) == 0 { + panic("no return value specified for Has") + } + + var r0 bool + if rf, ok := ret.Get(0).(func(K) bool); ok { + r0 = rf(key) + } else { + r0 = ret.Get(0).(bool) + } + + return r0 +} + +// Keys provides a mock function with no fields +func (_m *MutableBackData[K, V]) Keys() []K { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Keys") + } + + var r0 []K + if rf, ok := ret.Get(0).(func() []K); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]K) + } + } + + return r0 +} + +// Remove provides a mock function with given fields: key +func (_m *MutableBackData[K, V]) Remove(key K) (V, bool) { + ret := _m.Called(key) + + if len(ret) == 0 { + panic("no return value specified for Remove") + } + + var r0 V + var r1 bool + if rf, ok := ret.Get(0).(func(K) (V, bool)); ok { + return rf(key) + } + if rf, ok := ret.Get(0).(func(K) V); ok { + r0 = rf(key) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(V) + } + } + + if rf, ok := ret.Get(1).(func(K) bool); ok { + r1 = rf(key) + } else { + r1 = ret.Get(1).(bool) + } + + return r0, r1 +} + +// Size provides a mock function with no fields +func (_m *MutableBackData[K, V]) Size() uint { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Size") + } + + var r0 uint + if rf, ok := ret.Get(0).(func() uint); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(uint) + } + + return r0 +} + +// Values provides a mock function with no fields +func (_m *MutableBackData[K, V]) Values() []V { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Values") + } + + var r0 []V + if rf, ok := ret.Get(0).(func() []V); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]V) + } + } + + return r0 +} + +// NewMutableBackData creates a new instance of MutableBackData. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewMutableBackData[K comparable, V any](t interface { + mock.TestingT + Cleanup(func()) +}) *MutableBackData[K, V] { + mock := &MutableBackData[K, V]{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/module/mempool/mock/on_ejection.go b/module/mempool/mock/on_ejection.go deleted file mode 100644 index 266c44b076c..00000000000 --- a/module/mempool/mock/on_ejection.go +++ /dev/null @@ -1,34 +0,0 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. - -package mempool - -import ( - flow "github.com/onflow/flow-go/model/flow" - - mock "github.com/stretchr/testify/mock" -) - -// OnEjection is an autogenerated mock type for the OnEjection type -type OnEjection struct { - mock.Mock -} - -// Execute provides a mock function with given fields: _a0 -func (_m *OnEjection) Execute(_a0 flow.Entity) { - _m.Called(_a0) -} - -type mockConstructorTestingTNewOnEjection interface { - mock.TestingT - Cleanup(func()) -} - -// NewOnEjection creates a new instance of OnEjection. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewOnEjection(t mockConstructorTestingTNewOnEjection) *OnEjection { - mock := &OnEjection{} - mock.Mock.Test(t) - - t.Cleanup(func() { mock.AssertExpectations(t) }) - - return mock -} diff --git a/module/mempool/mock/pending_receipts.go b/module/mempool/mock/pending_receipts.go index 9ad0910aea4..5ae9dcd8778 100644 --- a/module/mempool/mock/pending_receipts.go +++ b/module/mempool/mock/pending_receipts.go @@ -1,6 +1,6 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. -package mempool +package mock import ( flow "github.com/onflow/flow-go/model/flow" @@ -17,6 +17,10 @@ type PendingReceipts struct { func (_m *PendingReceipts) Add(receipt *flow.ExecutionReceipt) bool { ret := _m.Called(receipt) + if len(ret) == 0 { + panic("no return value specified for Add") + } + var r0 bool if rf, ok := ret.Get(0).(func(*flow.ExecutionReceipt) bool); ok { r0 = rf(receipt) @@ -27,13 +31,17 @@ func (_m *PendingReceipts) Add(receipt *flow.ExecutionReceipt) bool { return r0 } -// ByPreviousResultID provides a mock function with given fields: previousReusltID -func (_m *PendingReceipts) ByPreviousResultID(previousReusltID flow.Identifier) []*flow.ExecutionReceipt { - ret := _m.Called(previousReusltID) +// ByPreviousResultID provides a mock function with given fields: previousResultID +func (_m *PendingReceipts) ByPreviousResultID(previousResultID flow.Identifier) []*flow.ExecutionReceipt { + ret := _m.Called(previousResultID) + + if len(ret) == 0 { + panic("no return value specified for ByPreviousResultID") + } var r0 []*flow.ExecutionReceipt if rf, ok := ret.Get(0).(func(flow.Identifier) []*flow.ExecutionReceipt); ok { - r0 = rf(previousReusltID) + r0 = rf(previousResultID) } else { if ret.Get(0) != nil { r0 = ret.Get(0).([]*flow.ExecutionReceipt) @@ -47,6 +55,10 @@ func (_m *PendingReceipts) ByPreviousResultID(previousReusltID flow.Identifier) func (_m *PendingReceipts) PruneUpToHeight(height uint64) error { ret := _m.Called(height) + if len(ret) == 0 { + panic("no return value specified for PruneUpToHeight") + } + var r0 error if rf, ok := ret.Get(0).(func(uint64) error); ok { r0 = rf(height) @@ -61,6 +73,10 @@ func (_m *PendingReceipts) PruneUpToHeight(height uint64) error { func (_m *PendingReceipts) Remove(receiptID flow.Identifier) bool { ret := _m.Called(receiptID) + if len(ret) == 0 { + panic("no return value specified for Remove") + } + var r0 bool if rf, ok := ret.Get(0).(func(flow.Identifier) bool); ok { r0 = rf(receiptID) @@ -71,13 +87,12 @@ func (_m *PendingReceipts) Remove(receiptID flow.Identifier) bool { return r0 } -type mockConstructorTestingTNewPendingReceipts interface { +// NewPendingReceipts creates a new instance of PendingReceipts. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewPendingReceipts(t interface { mock.TestingT Cleanup(func()) -} - -// NewPendingReceipts creates a new instance of PendingReceipts. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewPendingReceipts(t mockConstructorTestingTNewPendingReceipts) *PendingReceipts { +}) *PendingReceipts { mock := &PendingReceipts{} mock.Mock.Test(t) diff --git a/module/mempool/mock/receipt_filter.go b/module/mempool/mock/receipt_filter.go deleted file mode 100644 index f3cdcec50c1..00000000000 --- a/module/mempool/mock/receipt_filter.go +++ /dev/null @@ -1,43 +0,0 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. - -package mempool - -import ( - flow "github.com/onflow/flow-go/model/flow" - - mock "github.com/stretchr/testify/mock" -) - -// ReceiptFilter is an autogenerated mock type for the ReceiptFilter type -type ReceiptFilter struct { - mock.Mock -} - -// Execute provides a mock function with given fields: receipt -func (_m *ReceiptFilter) Execute(receipt *flow.ExecutionReceipt) bool { - ret := _m.Called(receipt) - - var r0 bool - if rf, ok := ret.Get(0).(func(*flow.ExecutionReceipt) bool); ok { - r0 = rf(receipt) - } else { - r0 = ret.Get(0).(bool) - } - - return r0 -} - -type mockConstructorTestingTNewReceiptFilter interface { - mock.TestingT - Cleanup(func()) -} - -// NewReceiptFilter creates a new instance of ReceiptFilter. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewReceiptFilter(t mockConstructorTestingTNewReceiptFilter) *ReceiptFilter { - mock := &ReceiptFilter{} - mock.Mock.Test(t) - - t.Cleanup(func() { mock.AssertExpectations(t) }) - - return mock -} diff --git a/module/mempool/mock/results.go b/module/mempool/mock/results.go deleted file mode 100644 index 199f146b512..00000000000 --- a/module/mempool/mock/results.go +++ /dev/null @@ -1,127 +0,0 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. - -package mempool - -import ( - flow "github.com/onflow/flow-go/model/flow" - - mock "github.com/stretchr/testify/mock" -) - -// Results is an autogenerated mock type for the Results type -type Results struct { - mock.Mock -} - -// Add provides a mock function with given fields: result -func (_m *Results) Add(result *flow.ExecutionResult) bool { - ret := _m.Called(result) - - var r0 bool - if rf, ok := ret.Get(0).(func(*flow.ExecutionResult) bool); ok { - r0 = rf(result) - } else { - r0 = ret.Get(0).(bool) - } - - return r0 -} - -// All provides a mock function with given fields: -func (_m *Results) All() []*flow.ExecutionResult { - ret := _m.Called() - - var r0 []*flow.ExecutionResult - if rf, ok := ret.Get(0).(func() []*flow.ExecutionResult); ok { - r0 = rf() - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).([]*flow.ExecutionResult) - } - } - - return r0 -} - -// ByID provides a mock function with given fields: resultID -func (_m *Results) ByID(resultID flow.Identifier) (*flow.ExecutionResult, bool) { - ret := _m.Called(resultID) - - var r0 *flow.ExecutionResult - var r1 bool - if rf, ok := ret.Get(0).(func(flow.Identifier) (*flow.ExecutionResult, bool)); ok { - return rf(resultID) - } - if rf, ok := ret.Get(0).(func(flow.Identifier) *flow.ExecutionResult); ok { - r0 = rf(resultID) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*flow.ExecutionResult) - } - } - - if rf, ok := ret.Get(1).(func(flow.Identifier) bool); ok { - r1 = rf(resultID) - } else { - r1 = ret.Get(1).(bool) - } - - return r0, r1 -} - -// Has provides a mock function with given fields: resultID -func (_m *Results) Has(resultID flow.Identifier) bool { - ret := _m.Called(resultID) - - var r0 bool - if rf, ok := ret.Get(0).(func(flow.Identifier) bool); ok { - r0 = rf(resultID) - } else { - r0 = ret.Get(0).(bool) - } - - return r0 -} - -// Remove provides a mock function with given fields: resultID -func (_m *Results) Remove(resultID flow.Identifier) bool { - ret := _m.Called(resultID) - - var r0 bool - if rf, ok := ret.Get(0).(func(flow.Identifier) bool); ok { - r0 = rf(resultID) - } else { - r0 = ret.Get(0).(bool) - } - - return r0 -} - -// Size provides a mock function with given fields: -func (_m *Results) Size() uint { - ret := _m.Called() - - var r0 uint - if rf, ok := ret.Get(0).(func() uint); ok { - r0 = rf() - } else { - r0 = ret.Get(0).(uint) - } - - return r0 -} - -type mockConstructorTestingTNewResults interface { - mock.TestingT - Cleanup(func()) -} - -// NewResults creates a new instance of Results. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewResults(t mockConstructorTestingTNewResults) *Results { - mock := &Results{} - mock.Mock.Test(t) - - t.Cleanup(func() { mock.AssertExpectations(t) }) - - return mock -} diff --git a/module/mempool/mock/transaction_timings.go b/module/mempool/mock/transaction_timings.go index 69ba557458d..568336d94c0 100644 --- a/module/mempool/mock/transaction_timings.go +++ b/module/mempool/mock/transaction_timings.go @@ -1,6 +1,6 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. -package mempool +package mock import ( flow "github.com/onflow/flow-go/model/flow" @@ -13,13 +13,17 @@ type TransactionTimings struct { mock.Mock } -// Add provides a mock function with given fields: tx -func (_m *TransactionTimings) Add(tx *flow.TransactionTiming) bool { - ret := _m.Called(tx) +// Add provides a mock function with given fields: _a0, _a1 +func (_m *TransactionTimings) Add(_a0 flow.Identifier, _a1 *flow.TransactionTiming) bool { + ret := _m.Called(_a0, _a1) + + if len(ret) == 0 { + panic("no return value specified for Add") + } var r0 bool - if rf, ok := ret.Get(0).(func(*flow.TransactionTiming) bool); ok { - r0 = rf(tx) + if rf, ok := ret.Get(0).(func(flow.Identifier, *flow.TransactionTiming) bool); ok { + r0 = rf(_a0, _a1) } else { r0 = ret.Get(0).(bool) } @@ -27,17 +31,21 @@ func (_m *TransactionTimings) Add(tx *flow.TransactionTiming) bool { return r0 } -// Adjust provides a mock function with given fields: txID, f -func (_m *TransactionTimings) Adjust(txID flow.Identifier, f func(*flow.TransactionTiming) *flow.TransactionTiming) (*flow.TransactionTiming, bool) { - ret := _m.Called(txID, f) +// Adjust provides a mock function with given fields: key, f +func (_m *TransactionTimings) Adjust(key flow.Identifier, f func(*flow.TransactionTiming) *flow.TransactionTiming) (*flow.TransactionTiming, bool) { + ret := _m.Called(key, f) + + if len(ret) == 0 { + panic("no return value specified for Adjust") + } var r0 *flow.TransactionTiming var r1 bool if rf, ok := ret.Get(0).(func(flow.Identifier, func(*flow.TransactionTiming) *flow.TransactionTiming) (*flow.TransactionTiming, bool)); ok { - return rf(txID, f) + return rf(key, f) } if rf, ok := ret.Get(0).(func(flow.Identifier, func(*flow.TransactionTiming) *flow.TransactionTiming) *flow.TransactionTiming); ok { - r0 = rf(txID, f) + r0 = rf(key, f) } else { if ret.Get(0) != nil { r0 = ret.Get(0).(*flow.TransactionTiming) @@ -45,7 +53,7 @@ func (_m *TransactionTimings) Adjust(txID flow.Identifier, f func(*flow.Transact } if rf, ok := ret.Get(1).(func(flow.Identifier, func(*flow.TransactionTiming) *flow.TransactionTiming) bool); ok { - r1 = rf(txID, f) + r1 = rf(key, f) } else { r1 = ret.Get(1).(bool) } @@ -53,33 +61,46 @@ func (_m *TransactionTimings) Adjust(txID flow.Identifier, f func(*flow.Transact return r0, r1 } -// All provides a mock function with given fields: -func (_m *TransactionTimings) All() []*flow.TransactionTiming { +// All provides a mock function with no fields +func (_m *TransactionTimings) All() map[flow.Identifier]*flow.TransactionTiming { ret := _m.Called() - var r0 []*flow.TransactionTiming - if rf, ok := ret.Get(0).(func() []*flow.TransactionTiming); ok { + if len(ret) == 0 { + panic("no return value specified for All") + } + + var r0 map[flow.Identifier]*flow.TransactionTiming + if rf, ok := ret.Get(0).(func() map[flow.Identifier]*flow.TransactionTiming); ok { r0 = rf() } else { if ret.Get(0) != nil { - r0 = ret.Get(0).([]*flow.TransactionTiming) + r0 = ret.Get(0).(map[flow.Identifier]*flow.TransactionTiming) } } return r0 } -// ByID provides a mock function with given fields: txID -func (_m *TransactionTimings) ByID(txID flow.Identifier) (*flow.TransactionTiming, bool) { - ret := _m.Called(txID) +// Clear provides a mock function with no fields +func (_m *TransactionTimings) Clear() { + _m.Called() +} + +// Get provides a mock function with given fields: _a0 +func (_m *TransactionTimings) Get(_a0 flow.Identifier) (*flow.TransactionTiming, bool) { + ret := _m.Called(_a0) + + if len(ret) == 0 { + panic("no return value specified for Get") + } var r0 *flow.TransactionTiming var r1 bool if rf, ok := ret.Get(0).(func(flow.Identifier) (*flow.TransactionTiming, bool)); ok { - return rf(txID) + return rf(_a0) } if rf, ok := ret.Get(0).(func(flow.Identifier) *flow.TransactionTiming); ok { - r0 = rf(txID) + r0 = rf(_a0) } else { if ret.Get(0) != nil { r0 = ret.Get(0).(*flow.TransactionTiming) @@ -87,7 +108,7 @@ func (_m *TransactionTimings) ByID(txID flow.Identifier) (*flow.TransactionTimin } if rf, ok := ret.Get(1).(func(flow.Identifier) bool); ok { - r1 = rf(txID) + r1 = rf(_a0) } else { r1 = ret.Get(1).(bool) } @@ -95,13 +116,17 @@ func (_m *TransactionTimings) ByID(txID flow.Identifier) (*flow.TransactionTimin return r0, r1 } -// Remove provides a mock function with given fields: txID -func (_m *TransactionTimings) Remove(txID flow.Identifier) bool { - ret := _m.Called(txID) +// Has provides a mock function with given fields: _a0 +func (_m *TransactionTimings) Has(_a0 flow.Identifier) bool { + ret := _m.Called(_a0) + + if len(ret) == 0 { + panic("no return value specified for Has") + } var r0 bool if rf, ok := ret.Get(0).(func(flow.Identifier) bool); ok { - r0 = rf(txID) + r0 = rf(_a0) } else { r0 = ret.Get(0).(bool) } @@ -109,13 +134,68 @@ func (_m *TransactionTimings) Remove(txID flow.Identifier) bool { return r0 } -type mockConstructorTestingTNewTransactionTimings interface { - mock.TestingT - Cleanup(func()) +// Remove provides a mock function with given fields: _a0 +func (_m *TransactionTimings) Remove(_a0 flow.Identifier) bool { + ret := _m.Called(_a0) + + if len(ret) == 0 { + panic("no return value specified for Remove") + } + + var r0 bool + if rf, ok := ret.Get(0).(func(flow.Identifier) bool); ok { + r0 = rf(_a0) + } else { + r0 = ret.Get(0).(bool) + } + + return r0 +} + +// Size provides a mock function with no fields +func (_m *TransactionTimings) Size() uint { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Size") + } + + var r0 uint + if rf, ok := ret.Get(0).(func() uint); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(uint) + } + + return r0 +} + +// Values provides a mock function with no fields +func (_m *TransactionTimings) Values() []*flow.TransactionTiming { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Values") + } + + var r0 []*flow.TransactionTiming + if rf, ok := ret.Get(0).(func() []*flow.TransactionTiming); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]*flow.TransactionTiming) + } + } + + return r0 } // NewTransactionTimings creates a new instance of TransactionTimings. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewTransactionTimings(t mockConstructorTestingTNewTransactionTimings) *TransactionTimings { +// The first argument is typically a *testing.T value. +func NewTransactionTimings(t interface { + mock.TestingT + Cleanup(func()) +}) *TransactionTimings { mock := &TransactionTimings{} mock.Mock.Test(t) diff --git a/module/mempool/mock/transactions.go b/module/mempool/mock/transactions.go index 96a14fc3b19..5d986705688 100644 --- a/module/mempool/mock/transactions.go +++ b/module/mempool/mock/transactions.go @@ -1,6 +1,6 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. -package mempool +package mock import ( flow "github.com/onflow/flow-go/model/flow" @@ -13,13 +13,17 @@ type Transactions struct { mock.Mock } -// Add provides a mock function with given fields: tx -func (_m *Transactions) Add(tx *flow.TransactionBody) bool { - ret := _m.Called(tx) +// Add provides a mock function with given fields: _a0, _a1 +func (_m *Transactions) Add(_a0 flow.Identifier, _a1 *flow.TransactionBody) bool { + ret := _m.Called(_a0, _a1) + + if len(ret) == 0 { + panic("no return value specified for Add") + } var r0 bool - if rf, ok := ret.Get(0).(func(*flow.TransactionBody) bool); ok { - r0 = rf(tx) + if rf, ok := ret.Get(0).(func(flow.Identifier, *flow.TransactionBody) bool); ok { + r0 = rf(_a0, _a1) } else { r0 = ret.Get(0).(bool) } @@ -27,13 +31,67 @@ func (_m *Transactions) Add(tx *flow.TransactionBody) bool { return r0 } -// All provides a mock function with given fields: -func (_m *Transactions) All() []*flow.TransactionBody { +// Adjust provides a mock function with given fields: key, f +func (_m *Transactions) Adjust(key flow.Identifier, f func(*flow.TransactionBody) *flow.TransactionBody) (*flow.TransactionBody, bool) { + ret := _m.Called(key, f) + + if len(ret) == 0 { + panic("no return value specified for Adjust") + } + + var r0 *flow.TransactionBody + var r1 bool + if rf, ok := ret.Get(0).(func(flow.Identifier, func(*flow.TransactionBody) *flow.TransactionBody) (*flow.TransactionBody, bool)); ok { + return rf(key, f) + } + if rf, ok := ret.Get(0).(func(flow.Identifier, func(*flow.TransactionBody) *flow.TransactionBody) *flow.TransactionBody); ok { + r0 = rf(key, f) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*flow.TransactionBody) + } + } + + if rf, ok := ret.Get(1).(func(flow.Identifier, func(*flow.TransactionBody) *flow.TransactionBody) bool); ok { + r1 = rf(key, f) + } else { + r1 = ret.Get(1).(bool) + } + + return r0, r1 +} + +// All provides a mock function with no fields +func (_m *Transactions) All() map[flow.Identifier]*flow.TransactionBody { ret := _m.Called() - var r0 []*flow.TransactionBody - if rf, ok := ret.Get(0).(func() []*flow.TransactionBody); ok { + if len(ret) == 0 { + panic("no return value specified for All") + } + + var r0 map[flow.Identifier]*flow.TransactionBody + if rf, ok := ret.Get(0).(func() map[flow.Identifier]*flow.TransactionBody); ok { r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(map[flow.Identifier]*flow.TransactionBody) + } + } + + return r0 +} + +// ByPayer provides a mock function with given fields: payer +func (_m *Transactions) ByPayer(payer flow.Address) []*flow.TransactionBody { + ret := _m.Called(payer) + + if len(ret) == 0 { + panic("no return value specified for ByPayer") + } + + var r0 []*flow.TransactionBody + if rf, ok := ret.Get(0).(func(flow.Address) []*flow.TransactionBody); ok { + r0 = rf(payer) } else { if ret.Get(0) != nil { r0 = ret.Get(0).([]*flow.TransactionBody) @@ -43,17 +101,26 @@ func (_m *Transactions) All() []*flow.TransactionBody { return r0 } -// ByID provides a mock function with given fields: txID -func (_m *Transactions) ByID(txID flow.Identifier) (*flow.TransactionBody, bool) { - ret := _m.Called(txID) +// Clear provides a mock function with no fields +func (_m *Transactions) Clear() { + _m.Called() +} + +// Get provides a mock function with given fields: _a0 +func (_m *Transactions) Get(_a0 flow.Identifier) (*flow.TransactionBody, bool) { + ret := _m.Called(_a0) + + if len(ret) == 0 { + panic("no return value specified for Get") + } var r0 *flow.TransactionBody var r1 bool if rf, ok := ret.Get(0).(func(flow.Identifier) (*flow.TransactionBody, bool)); ok { - return rf(txID) + return rf(_a0) } if rf, ok := ret.Get(0).(func(flow.Identifier) *flow.TransactionBody); ok { - r0 = rf(txID) + r0 = rf(_a0) } else { if ret.Get(0) != nil { r0 = ret.Get(0).(*flow.TransactionBody) @@ -61,7 +128,7 @@ func (_m *Transactions) ByID(txID flow.Identifier) (*flow.TransactionBody, bool) } if rf, ok := ret.Get(1).(func(flow.Identifier) bool); ok { - r1 = rf(txID) + r1 = rf(_a0) } else { r1 = ret.Get(1).(bool) } @@ -69,18 +136,17 @@ func (_m *Transactions) ByID(txID flow.Identifier) (*flow.TransactionBody, bool) return r0, r1 } -// Clear provides a mock function with given fields: -func (_m *Transactions) Clear() { - _m.Called() -} +// Has provides a mock function with given fields: _a0 +func (_m *Transactions) Has(_a0 flow.Identifier) bool { + ret := _m.Called(_a0) -// Has provides a mock function with given fields: txID -func (_m *Transactions) Has(txID flow.Identifier) bool { - ret := _m.Called(txID) + if len(ret) == 0 { + panic("no return value specified for Has") + } var r0 bool if rf, ok := ret.Get(0).(func(flow.Identifier) bool); ok { - r0 = rf(txID) + r0 = rf(_a0) } else { r0 = ret.Get(0).(bool) } @@ -88,13 +154,17 @@ func (_m *Transactions) Has(txID flow.Identifier) bool { return r0 } -// Remove provides a mock function with given fields: txID -func (_m *Transactions) Remove(txID flow.Identifier) bool { - ret := _m.Called(txID) +// Remove provides a mock function with given fields: _a0 +func (_m *Transactions) Remove(_a0 flow.Identifier) bool { + ret := _m.Called(_a0) + + if len(ret) == 0 { + panic("no return value specified for Remove") + } var r0 bool if rf, ok := ret.Get(0).(func(flow.Identifier) bool); ok { - r0 = rf(txID) + r0 = rf(_a0) } else { r0 = ret.Get(0).(bool) } @@ -102,10 +172,14 @@ func (_m *Transactions) Remove(txID flow.Identifier) bool { return r0 } -// Size provides a mock function with given fields: +// Size provides a mock function with no fields func (_m *Transactions) Size() uint { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Size") + } + var r0 uint if rf, ok := ret.Get(0).(func() uint); ok { r0 = rf() @@ -116,13 +190,32 @@ func (_m *Transactions) Size() uint { return r0 } -type mockConstructorTestingTNewTransactions interface { - mock.TestingT - Cleanup(func()) +// Values provides a mock function with no fields +func (_m *Transactions) Values() []*flow.TransactionBody { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Values") + } + + var r0 []*flow.TransactionBody + if rf, ok := ret.Get(0).(func() []*flow.TransactionBody); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]*flow.TransactionBody) + } + } + + return r0 } // NewTransactions creates a new instance of Transactions. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewTransactions(t mockConstructorTestingTNewTransactions) *Transactions { +// The first argument is typically a *testing.T value. +func NewTransactions(t interface { + mock.TestingT + Cleanup(func()) +}) *Transactions { mock := &Transactions{} mock.Mock.Test(t) diff --git a/module/mempool/model/approval_map_entity.go b/module/mempool/model/approval_map_entity.go deleted file mode 100644 index b00f922f0a4..00000000000 --- a/module/mempool/model/approval_map_entity.go +++ /dev/null @@ -1,28 +0,0 @@ -package model - -import ( - "github.com/onflow/flow-go/model/flow" -) - -// ApprovalMapEntity is an internal data structure for the approval mempool. -// It implements a key-value entry where a chunk is associated with a map of -// approvals indexed by approver ID. -type ApprovalMapEntity struct { - ChunkKey flow.Identifier - ResultID flow.Identifier - ChunkIndex uint64 - Approvals map[flow.Identifier]*flow.ResultApproval // [approver_id] => approval -} - -// ID implements flow.Entity.ID for ApprovalMapEntity to make it capable of -// being stored directly in mempools and storage. -func (a *ApprovalMapEntity) ID() flow.Identifier { - return a.ChunkKey -} - -// CheckSum implements flow.Entity.CheckSum for ApprovalMapEntity to make it -// capable of being stored directly in mempools and storage. It makes the id of -// the entire ApprovalMapEntity. -func (a *ApprovalMapEntity) Checksum() flow.Identifier { - return flow.MakeID(a) -} diff --git a/module/mempool/model/id_entity.go b/module/mempool/model/id_entity.go deleted file mode 100644 index 61e02c0aa25..00000000000 --- a/module/mempool/model/id_entity.go +++ /dev/null @@ -1,27 +0,0 @@ -package model - -import ( - "github.com/onflow/flow-go/model/flow" -) - -// IdEntity is an internal data structure for mempool -// It implements a wrapper around the original flow Identifier -// which represents it as a flow Entity -// that allows the identifier to directly gets stored in the mempool -type IdEntity struct { - Id flow.Identifier -} - -// ID implements flow.Entity.ID for Identifier to make it capable of being stored directly -// in mempools and storage -// ID returns the identifier itself -func (id IdEntity) ID() flow.Identifier { - return id.Id -} - -// ID implements flow.Entity.ID for Identifier to make it capable of being stored directly -// in mempools and storage -// ID returns checksum of identifier -func (id IdEntity) Checksum() flow.Identifier { - return flow.MakeID(id) -} diff --git a/module/mempool/model/id_map_entity.go b/module/mempool/model/id_map_entity.go deleted file mode 100644 index 8846330b1f4..00000000000 --- a/module/mempool/model/id_map_entity.go +++ /dev/null @@ -1,24 +0,0 @@ -package model - -import ( - "github.com/onflow/flow-go/model/flow" -) - -// IdMapEntity is an internal data structure for mempool -// It implements a key-value entry where an identifier is mapped to a list of other identifiers. -type IdMapEntity struct { - Key flow.Identifier - IDs map[flow.Identifier]struct{} -} - -// ID implements flow.Entity.ID for IdMapEntity to make it capable of being stored directly -// in mempools and storage. It returns key field of the id. -func (id IdMapEntity) ID() flow.Identifier { - return id.Key -} - -// CheckSum implements flow.Entity.CheckSum for IdMapEntity to make it capable of being stored directly -// in mempools and storage. It makes the id of the entire IdMapEntity. -func (id IdMapEntity) Checksum() flow.Identifier { - return flow.MakeID(id) -} diff --git a/module/mempool/model/incorporated_result_map.go b/module/mempool/model/incorporated_result_map.go deleted file mode 100644 index 44a88a9c92e..00000000000 --- a/module/mempool/model/incorporated_result_map.go +++ /dev/null @@ -1,24 +0,0 @@ -package model - -import "github.com/onflow/flow-go/model/flow" - -// IncorporatedResultMap is an internal data structure for the incorporated -// results mempool. IncorporatedResults are indexed by ExecutionResult ID and -// IncorporatedBlockID -type IncorporatedResultMap struct { - ExecutionResult *flow.ExecutionResult - IncorporatedResults map[flow.Identifier]*flow.IncorporatedResult // [incorporated block ID] => IncorporatedResult -} - -// ID implements flow.Entity.ID for IncorporatedResultMap to make it capable of -// being stored directly in mempools and storage. -func (a *IncorporatedResultMap) ID() flow.Identifier { - return a.ExecutionResult.ID() -} - -// CheckSum implements flow.Entity.CheckSum for IncorporatedResultMap to make it -// capable of being stored directly in mempools and storage. It makes the id of -// the entire IncorporatedResultMap. -func (a *IncorporatedResultMap) Checksum() flow.Identifier { - return flow.MakeID(a) -} diff --git a/module/mempool/mutable_back_data.go b/module/mempool/mutable_back_data.go new file mode 100644 index 00000000000..97076a1bdb4 --- /dev/null +++ b/module/mempool/mutable_back_data.go @@ -0,0 +1,28 @@ +package mempool + +// MutableBackData extends BackData by allowing modifications to stored data structures. +// Unlike BackData, this interface supports adjusting existing data structures, making it suitable for use cases +// where they do not have a cryptographic hash function. +// +// WARNING: Entities that are cryptographically protected, such as Entity objects tied to signatures or hashes, +// should not be modified. Use BackData instead to prevent unintended mutations. +type MutableBackData[K comparable, V any] interface { + BackData[K, V] + + // Adjust adjusts the value using the given function if the given key can be found. + // Returns: + // - value, true if the value with the given key was found. The returned value is the version after the update is applied. + // - nil, false if no value with the given key was found + Adjust(key K, f func(value V) V) (V, bool) + + // AdjustWithInit adjusts the value using the given function if the given key can be found. When the + // value is not found, it initializes the value using the given init function and then applies the adjust function. + // Args: + // - key: the identifier of the value to adjust. + // - adjust: the function that adjusts the value. + // - init: the function that initializes the value when it is not found. + // Returns: + // - the adjusted value. + // - a bool which indicates whether the value was either added or adjusted. + AdjustWithInit(key K, adjust func(value V) V, init func() V) (V, bool) +} diff --git a/module/mempool/pending_receipts.go b/module/mempool/pending_receipts.go index 2fd2d321a23..b0906c61842 100644 --- a/module/mempool/pending_receipts.go +++ b/module/mempool/pending_receipts.go @@ -16,7 +16,7 @@ type PendingReceipts interface { // ByPreviousResultID returns all the pending receipts whose previous result id // matches the given result id - ByPreviousResultID(previousReusltID flow.Identifier) []*flow.ExecutionReceipt + ByPreviousResultID(previousResultID flow.Identifier) []*flow.ExecutionReceipt // PruneUpToHeight remove all receipts for blocks whose height is strictly // smaller that height. Note: receipts for blocks at height are retained. diff --git a/module/mempool/queue/heroQueue.go b/module/mempool/queue/heroQueue.go index ec1269147b8..2eb134e31f7 100644 --- a/module/mempool/queue/heroQueue.go +++ b/module/mempool/queue/heroQueue.go @@ -11,30 +11,32 @@ import ( "github.com/onflow/flow-go/module/mempool/herocache/backdata/heropool" ) -// HeroQueue implements a HeroCache-based in-memory queue. +// HeroQueue is a generic in-memory queue implementation based on HeroCache. // HeroCache is a key-value cache with zero heap allocation and optimized Garbage Collection. -type HeroQueue struct { +type HeroQueue[V any] struct { mu sync.RWMutex - cache *herocache.Cache + cache *herocache.Cache[V] sizeLimit uint } -func NewHeroQueue(sizeLimit uint32, logger zerolog.Logger, collector module.HeroCacheMetrics) *HeroQueue { - return &HeroQueue{ - cache: herocache.NewCache( +// NewHeroQueue creates a new instance of HeroQueue with the specified size limit. +func NewHeroQueue[V any](sizeLimit uint32, logger zerolog.Logger, collector module.HeroCacheMetrics) *HeroQueue[V] { + return &HeroQueue[V]{ + cache: herocache.NewCache[V]( sizeLimit, herocache.DefaultOversizeFactor, heropool.NoEjection, logger.With().Str("mempool", "hero-queue").Logger(), - collector), + collector, + ), sizeLimit: uint(sizeLimit), } } -// Push stores the entity into the queue. +// Push stores the key-value pair into the queue. // Boolean returned variable determines whether push was successful, i.e., // push may be dropped if queue is full or already exists. -func (c *HeroQueue) Push(entity flow.Entity) bool { +func (c *HeroQueue[V]) Push(key flow.Identifier, value V) bool { c.mu.Lock() defer c.mu.Unlock() @@ -45,26 +47,28 @@ func (c *HeroQueue) Push(entity flow.Entity) bool { return false } - return c.cache.Add(entity.ID(), entity) + return c.cache.Add(key, value) } // Pop removes and returns the head of queue, and updates the head to the next element. // Boolean return value determines whether pop is successful, i.e., popping an empty queue returns false. -func (c *HeroQueue) Pop() (flow.Entity, bool) { +func (c *HeroQueue[V]) Pop() (value V, ok bool) { c.mu.Lock() defer c.mu.Unlock() - head, ok := c.cache.Head() + var key flow.Identifier + key, value, ok = c.cache.Head() if !ok { // cache is empty, and there is no head yet to pop. - return nil, false + return value, false } - c.cache.Remove(head.ID()) - return head, true + c.cache.Remove(key) + return value, true } -func (c *HeroQueue) Size() uint { +// Size returns the number of elements currently stored in the queue. +func (c *HeroQueue[V]) Size() uint { c.mu.RLock() defer c.mu.RUnlock() diff --git a/module/mempool/queue/heroQueue_test.go b/module/mempool/queue/heroQueue_test.go index 75396a9b1ed..0107c789a56 100644 --- a/module/mempool/queue/heroQueue_test.go +++ b/module/mempool/queue/heroQueue_test.go @@ -15,7 +15,7 @@ import ( // TestHeroQueue_Sequential evaluates correctness of queue implementation against sequential push and pop. func TestHeroQueue_Sequential(t *testing.T) { sizeLimit := 100 - q := queue.NewHeroQueue(uint32(sizeLimit), unittest.Logger(), metrics.NewNoopCollector()) + q := queue.NewHeroQueue[*unittest.MockEntity](uint32(sizeLimit), unittest.Logger(), metrics.NewNoopCollector()) // initially queue must be zero require.Zero(t, q.Size()) @@ -25,20 +25,21 @@ func TestHeroQueue_Sequential(t *testing.T) { require.False(t, ok) require.Nil(t, entity) - entities := unittest.MockEntityListFixture(sizeLimit) + entities := unittest.EntityListFixture(uint(sizeLimit)) // pushing entities sequentially. for i, e := range entities { - require.True(t, q.Push(*e)) + require.True(t, q.Push(e.Identifier, e)) // duplicate push should fail - require.False(t, q.Push(*e)) + require.False(t, q.Push(e.Identifier, e)) require.Equal(t, q.Size(), uint(i+1)) } // once queue meets the size limit, any extra push should fail. for i := 0; i < 100; i++ { - require.False(t, q.Push(*unittest.MockEntityFixture())) + entity := unittest.MockEntityFixture() + require.False(t, q.Push(entity.Identifier, entity)) // size should not change require.Equal(t, q.Size(), uint(sizeLimit)) @@ -48,8 +49,8 @@ func TestHeroQueue_Sequential(t *testing.T) { for i, e := range entities { popedE, ok := q.Pop() require.True(t, ok) - require.Equal(t, *e, popedE) - require.Equal(t, e.ID(), popedE.ID()) + require.Equal(t, e, popedE) + require.Equal(t, e.Identifier, popedE.Identifier) require.Equal(t, q.Size(), uint(len(entities)-i-1)) } @@ -58,11 +59,9 @@ func TestHeroQueue_Sequential(t *testing.T) { // TestHeroQueue_Concurrent evaluates correctness of queue implementation against concurrent push and pop. func TestHeroQueue_Concurrent(t *testing.T) { sizeLimit := 100 - q := queue.NewHeroQueue(uint32(sizeLimit), unittest.Logger(), metrics.NewNoopCollector()) - + q := queue.NewHeroQueue[*unittest.MockEntity](uint32(sizeLimit), unittest.Logger(), metrics.NewNoopCollector()) // initially queue must be zero require.Zero(t, q.Size()) - // initially there should be nothing to pop entity, ok := q.Pop() require.False(t, ok) @@ -71,12 +70,12 @@ func TestHeroQueue_Concurrent(t *testing.T) { pushWG := &sync.WaitGroup{} pushWG.Add(sizeLimit) - entities := unittest.MockEntityListFixture(sizeLimit) + entities := unittest.EntityListFixture(uint(sizeLimit)) // pushing entities concurrently. for _, e := range entities { e := e // suppress loop variable go func() { - require.True(t, q.Push(*e)) + require.True(t, q.Push(e.Identifier, e)) pushWG.Done() }() } @@ -86,7 +85,8 @@ func TestHeroQueue_Concurrent(t *testing.T) { pushWG.Add(sizeLimit) for i := 0; i < sizeLimit; i++ { go func() { - require.False(t, q.Push(*unittest.MockEntityFixture())) + entity := unittest.MockEntityFixture() + require.False(t, q.Push(entity.Identifier, entity)) pushWG.Done() }() } @@ -103,7 +103,7 @@ func TestHeroQueue_Concurrent(t *testing.T) { require.True(t, ok) matchLock.Lock() - matchAndRemoveEntity(t, entities, popedE.(unittest.MockEntity)) + matchAndRemoveEntity(t, entities, popedE) matchLock.Unlock() popWG.Done() @@ -117,9 +117,9 @@ func TestHeroQueue_Concurrent(t *testing.T) { // matchAndRemove checks existence of the entity in the "entities" array, and if a match is found, it is removed. // If no match is found for an entity, it fails the test. -func matchAndRemoveEntity(t *testing.T, entities []*unittest.MockEntity, entity unittest.MockEntity) []*unittest.MockEntity { +func matchAndRemoveEntity(t *testing.T, entities []*unittest.MockEntity, entity *unittest.MockEntity) []*unittest.MockEntity { for i, e := range entities { - if *e == entity { + if e == entity { // removes the matched entity from the list entities = append(entities[:i], entities[i+1:]...) return entities diff --git a/module/mempool/queue/heroStore.go b/module/mempool/queue/heroStore.go index 8a9e4805c63..4b8fcdf43a9 100644 --- a/module/mempool/queue/heroStore.go +++ b/module/mempool/queue/heroStore.go @@ -1,42 +1,48 @@ package queue import ( + "time" + "github.com/rs/zerolog" "github.com/onflow/flow-go/engine" + "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/module" - "github.com/onflow/flow-go/module/mempool/queue/internal" ) -type HeroStoreConfig struct { - SizeLimit uint32 - Collector module.HeroCacheMetrics -} +// KeyFunc defines a function type for computing the unique identifier for a message. +type KeyFunc func(msg *engine.Message) flow.Identifier -type HeroStoreConfigOption func(builder *HeroStoreConfig) +// Default key function. +var defaultKeyFunc = IdentifierOfMessage -func WithHeroStoreSizeLimit(sizeLimit uint32) HeroStoreConfigOption { - return func(builder *HeroStoreConfig) { - builder.SizeLimit = sizeLimit - } -} +type HeroStoreOption func(heroStore *HeroStore) -func WithHeroStoreCollector(collector module.HeroCacheMetrics) HeroStoreConfigOption { - return func(builder *HeroStoreConfig) { - builder.Collector = collector +// WithMessageKeyFactory allows setting a custom function to generate the key for a message. +func WithMessageKeyFactory(f KeyFunc) HeroStoreOption { + return func(h *HeroStore) { + h.keyFactory = f } } // HeroStore is a FIFO (first-in-first-out) size-bound queue for maintaining engine.Message types. // It is based on HeroQueue. type HeroStore struct { - q *HeroQueue + q *HeroQueue[*engine.Message] + keyFactory KeyFunc } -func NewHeroStore(sizeLimit uint32, logger zerolog.Logger, collector module.HeroCacheMetrics) *HeroStore { - return &HeroStore{ - q: NewHeroQueue(sizeLimit, logger, collector), +func NewHeroStore(sizeLimit uint32, logger zerolog.Logger, collector module.HeroCacheMetrics, opts ...HeroStoreOption) *HeroStore { + h := &HeroStore{ + q: NewHeroQueue[*engine.Message](sizeLimit, logger, collector), + keyFactory: defaultKeyFunc, } + + for _, opt := range opts { + opt(h) + } + + return h } // Put enqueues the message into the message store. @@ -44,17 +50,31 @@ func NewHeroStore(sizeLimit uint32, logger zerolog.Logger, collector module.Hero // Boolean returned variable determines whether enqueuing was successful, i.e., // put may be dropped if queue is full or already exists. func (c *HeroStore) Put(message *engine.Message) bool { - return c.q.Push(internal.NewMessageEntity(message)) + return c.q.Push(c.keyFactory(message), message) } // Get pops the queue, i.e., it returns the head of queue, and updates the head to the next element. // Boolean return value determines whether pop is successful, i.e., popping an empty queue returns false. func (c *HeroStore) Get() (*engine.Message, bool) { - head, ok := c.q.Pop() - if !ok { - return nil, false - } + return c.q.Pop() +} + +func (c *HeroStore) Size() uint { + return c.q.Size() +} + +// IdentifierOfMessage generates the unique identifier for a message. +func IdentifierOfMessage(msg *engine.Message) flow.Identifier { + return flow.MakeID(msg) +} - msg := head.(internal.MessageEntity).Msg - return &msg, true +// IdentifierOfMessageWithNonce generates an identifier with a nonce to prevent de-duplication. +func IdentifierOfMessageWithNonce(msg *engine.Message) flow.Identifier { + return flow.MakeID(struct { + *engine.Message + Nonce uint64 + }{ + msg, + uint64(time.Now().UnixNano()), + }) } diff --git a/module/mempool/queue/internal/messageEntity.go b/module/mempool/queue/internal/messageEntity.go deleted file mode 100644 index 6174f1e0a12..00000000000 --- a/module/mempool/queue/internal/messageEntity.go +++ /dev/null @@ -1,33 +0,0 @@ -package internal - -import ( - "github.com/onflow/flow-go/engine" - "github.com/onflow/flow-go/model/flow" -) - -// MessageEntity is an internal data structure for storing messages in HeroQueue. -type MessageEntity struct { - Msg engine.Message - id flow.Identifier -} - -var _ flow.Entity = (*MessageEntity)(nil) - -func NewMessageEntity(msg *engine.Message) MessageEntity { - return MessageEntity{ - Msg: *msg, - id: identifierOfMessage(msg), - } -} - -func (m MessageEntity) ID() flow.Identifier { - return m.id -} - -func (m MessageEntity) Checksum() flow.Identifier { - return m.id -} - -func identifierOfMessage(msg *engine.Message) flow.Identifier { - return flow.MakeID(msg) -} diff --git a/module/mempool/queue/queue.go b/module/mempool/queue/queue.go deleted file mode 100644 index 996193b9326..00000000000 --- a/module/mempool/queue/queue.go +++ /dev/null @@ -1,168 +0,0 @@ -package queue - -import ( - "github.com/onflow/flow-go/model/flow" -) - -type Node struct { - Item Blockify - Children []*Node -} - -// Blockify becuase Blocker seems a bit off. -// Make items behave like a block, so it can be queued -type Blockify interface { - flow.Entity - Height() uint64 - ParentID() flow.Identifier -} - -// Queue is a fork-aware queue/tree of blocks for use in execution Node, where parallel forks -// can be processed simultaneously. For fast lookup which is predicted to be common case -// all nodes are kept as one queue, which is expected to split into separate queues once -// a fork (multiple children) is reached. -// Note that this is not a thread-safe structure and external synchronisation is required -// to use in concurrent environment -type Queue struct { - Head *Node - Highest *Node - Nodes map[flow.Identifier]*Node -} - -// Make queue an entity so it can be stored in mempool - -func (q *Queue) ID() flow.Identifier { - return q.Head.Item.ID() -} - -func (q *Queue) Checksum() flow.Identifier { - return q.Head.Item.Checksum() -} - -// Size returns number of elements in the queue -func (q *Queue) Size() int { - return len(q.Nodes) -} - -// Returns difference between lowest and highest element in the queue -// Formally, the Queue stores a tree. The height of the tree is the -// number of edges on the longest downward path between the root and any leaf. -func (q *Queue) Height() uint64 { - return q.Highest.Item.Height() - q.Head.Item.Height() -} - -// traverse Node children recursively and populate m -func traverse(node *Node, m map[flow.Identifier]*Node, highest *Node) { - m[node.Item.ID()] = node - for _, node := range node.Children { - if node.Item.Height() > highest.Item.Height() { - *highest = *node - } - traverse(node, m, highest) - } -} - -func NewQueue(blockify Blockify) *Queue { - n := &Node{ - Item: blockify, - Children: nil, - } - return &Queue{ - Head: n, - Highest: n, - Nodes: map[flow.Identifier]*Node{n.Item.ID(): n}, - } -} - -// rebuildQueue makes a new queue from a Node which was already part of other queue -// and fills lookup cache -func rebuildQueue(n *Node) *Queue { - // rebuild map-cache - cache := make(map[flow.Identifier]*Node) - highest := *n //copy n - traverse(n, cache, &highest) - - return &Queue{ - Head: n, - Nodes: cache, - Highest: &highest, - } -} - -// Special case for removing single-childed head element -func dequeue(queue *Queue) *Queue { - onlyChild := queue.Head.Children[0] - - cache := make(map[flow.Identifier]*Node) - - //copy all but head caches - headID := queue.Head.Item.ID() // ID computation is about as expensive 1000 Go int additions - for key, val := range queue.Nodes { - if key != headID { - cache[key] = val - } - } - - return &Queue{ - Head: onlyChild, - Nodes: cache, - Highest: queue.Highest, - } -} - -// TryAdd tries to add a new element to the queue. -// A element can only be added if the parent exists in the queue. -// TryAdd(elmt) is an idempotent operation for the same elmt, i.e. -// after the first, subsequent additions of the same elements are NoOps. -// Returns: -// stored = True if and only if _after_ the operation, the element is stored in the -// queue. This is the case if (a) element was newly added to the queue or -// (b) element was already stored in the queue _before_ the call. -// new = Indicates if element was new to the queue, when `stored` was true. It lets -// distinguish (a) and (b) cases. -// Adding an element fails with return value `false` for `stored` in the following cases: -// - element.ParentID() is _not_ stored in the queue -// - element's height is _unequal to_ its parent's height + 1 -func (q *Queue) TryAdd(element Blockify) (stored bool, new bool) { - if _, found := q.Nodes[element.ID()]; found { - // (b) element was already stored in the queue _before_ the call. - return true, false - } - // at this point, we are sure that the element is _not_ in the queue and therefore, - // the element cannot be referenced as a child by any other element in the queue - n, ok := q.Nodes[element.ParentID()] - if !ok { - return false, false - } - if n.Item.Height() != element.Height()-1 { - return false, false - } - newNode := &Node{ - Item: element, - Children: nil, - } - // we know: element is _not_ (yet) in the queue - // => it cannot be in _any_ nodes Children list - // => the following operation is guaranteed to _not_ produce - // duplicates in the Children list - n.Children = append(n.Children, newNode) - q.Nodes[element.ID()] = newNode - if element.Height() > q.Highest.Item.Height() { - q.Highest = newNode - } - return true, true -} - -// Dismount removes the head element, returns it and it's children as new queues -func (q *Queue) Dismount() (Blockify, []*Queue) { - - queues := make([]*Queue, len(q.Head.Children)) - if len(q.Head.Children) == 1 { //optimize for most common single-child case - queues[0] = dequeue(q) - } else { - for i, child := range q.Head.Children { - queues[i] = rebuildQueue(child) - } - } - return q.Head.Item, queues -} diff --git a/module/mempool/queue/queue_test.go b/module/mempool/queue/queue_test.go deleted file mode 100644 index 71b4e2bc447..00000000000 --- a/module/mempool/queue/queue_test.go +++ /dev/null @@ -1,292 +0,0 @@ -package queue - -import ( - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/onflow/flow-go/module/mempool/entity" - "github.com/onflow/flow-go/utils/unittest" -) - -func TestQueue(t *testing.T) { - - /* Input queue: - g-b - \ - f--d--c-a - / - e - - */ - - a := unittest.ExecutableBlockFixture(nil, nil) - c := unittest.ExecutableBlockFixtureWithParent(nil, a.Block.Header, nil) - b := unittest.ExecutableBlockFixtureWithParent(nil, c.Block.Header, nil) - d := unittest.ExecutableBlockFixtureWithParent(nil, c.Block.Header, nil) - e := unittest.ExecutableBlockFixtureWithParent(nil, d.Block.Header, nil) - f := unittest.ExecutableBlockFixtureWithParent(nil, d.Block.Header, nil) - g := unittest.ExecutableBlockFixtureWithParent(nil, b.Block.Header, nil) - - dBroken := unittest.ExecutableBlockFixtureWithParent(nil, c.Block.Header, nil) - dBroken.Block.Header.Height += 2 //change height - - queue := NewQueue(a) - - t.Run("Adding", func(t *testing.T) { - stored, _ := queue.TryAdd(b) //parent not stored yet - size := queue.Size() - height := queue.Height() - assert.False(t, stored) - assert.Equal(t, 1, size) - assert.Equal(t, uint64(0), height) - - stored, new := queue.TryAdd(c) - size = queue.Size() - height = queue.Height() - assert.True(t, stored) - assert.True(t, new) - assert.Equal(t, 2, size) - assert.Equal(t, uint64(1), height) - - stored, new = queue.TryAdd(b) - size = queue.Size() - height = queue.Height() - assert.True(t, stored) - assert.True(t, new) - assert.Equal(t, 3, size) - assert.Equal(t, uint64(2), height) - - stored, new = queue.TryAdd(b) //repeat - size = queue.Size() - height = queue.Height() - assert.True(t, stored) - assert.False(t, new) - assert.Equal(t, 3, size) - assert.Equal(t, uint64(2), height) - - stored, _ = queue.TryAdd(f) //parent not stored yet - assert.False(t, stored) - - stored, new = queue.TryAdd(d) - size = queue.Size() - height = queue.Height() - assert.True(t, stored) - assert.True(t, new) - assert.Equal(t, 4, size) - assert.Equal(t, uint64(2), height) - - stored, _ = queue.TryAdd(dBroken) // wrong height - assert.False(t, stored) - - stored, new = queue.TryAdd(e) - size = queue.Size() - height = queue.Height() - assert.True(t, stored) - assert.True(t, new) - assert.Equal(t, 5, size) - assert.Equal(t, uint64(3), height) - - stored, new = queue.TryAdd(f) - size = queue.Size() - height = queue.Height() - assert.True(t, stored) - assert.True(t, new) - assert.Equal(t, 6, size) - assert.Equal(t, uint64(3), height) - - stored, new = queue.TryAdd(g) - size = queue.Size() - height = queue.Height() - assert.True(t, stored) - assert.True(t, new) - assert.Equal(t, 7, size) - assert.Equal(t, uint64(3), height) - }) - - t.Run("Dismounting", func(t *testing.T) { - // dismount queue - blockA, queuesA := queue.Dismount() - assert.Equal(t, a, blockA) - require.Len(t, queuesA, 1) - assert.Equal(t, 6, queuesA[0].Size()) - assert.Equal(t, uint64(2), queuesA[0].Height()) - - blockC, queuesC := queuesA[0].Dismount() - assert.Equal(t, c, blockC) - require.Len(t, queuesC, 2) - - // order of children is not guaranteed - var queueD *Queue - var queueB *Queue - if queuesC[0].Head.Item == d { - queueD = queuesC[0] - queueB = queuesC[1] - } else { - queueD = queuesC[1] - queueB = queuesC[0] - } - assert.Equal(t, d, queueD.Head.Item) - sizeD := queueD.Size() - heightD := queueD.Height() - sizeB := queueB.Size() - heightB := queueB.Height() - - assert.Equal(t, 3, sizeD) - assert.Equal(t, uint64(1), heightD) - assert.Equal(t, 2, sizeB) - assert.Equal(t, uint64(1), heightB) - - blockD, queuesD := queueD.Dismount() - assert.Equal(t, d, blockD) - assert.Len(t, queuesD, 2) - }) - - t.Run("Process all", func(t *testing.T) { - // Dismounting iteratively all queues should yield all nodes/blocks only once - // and in the proper order (parents are always evaluated first) - blocksInOrder := make([]*entity.ExecutableBlock, 0) - - executionHeads := make(chan *Queue, 10) - executionHeads <- queue - - for len(executionHeads) > 0 { - currentHead := <-executionHeads - block, newQueues := currentHead.Dismount() - blocksInOrder = append(blocksInOrder, block.(*entity.ExecutableBlock)) - for _, newQueue := range newQueues { - executionHeads <- newQueue - } - } - - // Couldn't find ready assertion for subset in order, so lets - // map nodes by their index and check if order is as expected - indices := make(map[*entity.ExecutableBlock]int) - - for i, block := range blocksInOrder { - indices[block] = i - } - - // a -> c -> b -> g - assert.Less(t, indices[a], indices[c]) - assert.Less(t, indices[c], indices[b]) - assert.Less(t, indices[b], indices[g]) - - // a -> c -> d -> f - assert.Less(t, indices[a], indices[c]) - assert.Less(t, indices[c], indices[d]) - assert.Less(t, indices[d], indices[f]) - - // a -> c -> d -> e - assert.Less(t, indices[a], indices[c]) - assert.Less(t, indices[c], indices[d]) - assert.Less(t, indices[d], indices[e]) - }) - - //t.Run("Attaching", func(t *testing.T) { - // queue := NewQueue(a) - // - // added, new := queue.TryAdd(c) - // assert.True(t, added) - // assert.True(t, new) - // assert.Equal(t, 2, queue.Size()) - // assert.Equal(t, uint64(1), queue.Height()) - // - // queueB := NewQueue(b) - // added, new = queueB.TryAdd(g) - // assert.True(t, added) - // assert.True(t, new) - // - // assert.Equal(t, 2, queueB.Size()) - // assert.Equal(t, uint64(1), queueB.Height()) - // - // queueF := NewQueue(f) - // - // err := queue.Attach(queueF) // node D is missing - // assert.Error(t, err) - // - // err = queue.Attach(queueB) - // assert.NoError(t, err) - // assert.Equal(t, 4, queue.Size()) - // assert.Equal(t, uint64(3), queue.Height()) - // - // added, new = queue.TryAdd(d) - // assert.True(t, added) - // assert.True(t, new) - // assert.Equal(t, 5, queue.Size()) - // assert.Equal(t, uint64(3), queue.Height()) - // - // err = queue.Attach(queueF) // node D is now in the queue - // assert.NoError(t, err) - // assert.Equal(t, 6, queue.Size()) - // assert.Equal(t, uint64(3), queue.Height()) - //}) - - // Creating queue: - // f--d--c-a - // Addingan element should be an idempotent operation: - // * adding c a second time - // * Dequeueing single head: - // we should only get one child queue f--d--c - t.Run("Adding_Idempotent", func(t *testing.T) { - queue := NewQueue(a) - add, new := queue.TryAdd(c) - assert.True(t, add) - assert.True(t, new) - - add, new = queue.TryAdd(d) - assert.True(t, add) - assert.True(t, new) - - add, new = queue.TryAdd(f) - assert.True(t, add) - assert.True(t, new) - - assert.Equal(t, 4, queue.Size()) - assert.Equal(t, uint64(3), queue.Height()) - - // adding c a second time - add, new = queue.TryAdd(c) - assert.True(t, add) - assert.False(t, new) - - // Dequeueing a - head, childQueues := queue.Dismount() - assert.Equal(t, a, head) - assert.Equal(t, 1, len(childQueues), "There should only be a single child queue") - assert.Equal(t, c.ID(), childQueues[0].Head.Item.ID()) - }) - - // Testing attaching overlapping queues: - // queue A: - // g-b - // \ - // c - // queue B: - // d--c-a - // attach queueA to queueB: we expect an error as the queues have nodes in common - //t.Run("Attaching_partially_overlapped_queue", func(t *testing.T) { - // queueA := NewQueue(c) - // add, new := queueA.TryAdd(b) - // assert.True(t, add) - // assert.True(t, new) - // - // add, new = queueA.TryAdd(g) - // assert.True(t, add) - // assert.True(t, new) - // - // queueB := NewQueue(a) - // add, new = queueB.TryAdd(c) - // assert.True(t, add) - // assert.True(t, new) - // - // add, new = queueB.TryAdd(d) - // assert.True(t, add) - // assert.True(t, new) - // - // err := queueB.Attach(queueA) - // assert.Error(t, err) - //}) - -} diff --git a/module/mempool/queue/rpcInspectionRequest_test.go b/module/mempool/queue/rpcInspectionRequest_test.go new file mode 100644 index 00000000000..f09097f99c3 --- /dev/null +++ b/module/mempool/queue/rpcInspectionRequest_test.go @@ -0,0 +1,56 @@ +package queue_test + +import ( + "testing" + + pubsub "github.com/libp2p/go-libp2p-pubsub" + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/engine" + "github.com/onflow/flow-go/module/mempool/queue" + "github.com/onflow/flow-go/network/p2p/inspector/validation" + p2ptest "github.com/onflow/flow-go/network/p2p/test" + "github.com/onflow/flow-go/utils/unittest" +) + +// TestMessage_InspectRPCRequest_Key tests that the key for an engine.Message created from an InspectRPCRequest is +// only dependent of the Nonce and PeerID fields of the InspectRPCRequest; and is independent of the RPC field. +// Unique identifier for the HeroCache is imperative to prevent false-positive de-duplication. +// However, the RPC field contains a bulk of the data in the InspectRPCRequest, and including it in the key would +// cause the InspectRPCRequest store and retrieval to be resource intensive. +func TestMessage_InspectRPCRequest_Key(t *testing.T) { + rpcs := p2ptest.GossipSubRpcFixtures(t, 2) + rpc1 := rpcs[0] + rpc2 := rpcs[1] + peerId1 := unittest.PeerIdFixture(t) + + // creates two InspectRPCRequest structs with the same Nonce and PeerID fields + req1, err := validation.NewInspectRPCRequest(peerId1, &pubsub.RPC{ + RPC: *rpc1, + }) + require.NoError(t, err) + + req2, err := validation.NewInspectRPCRequest(peerId1, &pubsub.RPC{ + RPC: *rpc1, + }) + require.NoError(t, err) + // Set the Nonce field of the second InspectRPCRequest struct to the Nonce field of the first + req2.Nonce = req1.Nonce + + // creates a third InspectRPCRequest struct with the same Nonce and PeerID fields as the first two + // but with a different RPC field + req3, err := validation.NewInspectRPCRequest(peerId1, &pubsub.RPC{ + RPC: *rpc2, + }) + require.NoError(t, err) + req3.Nonce = req1.Nonce + + message1 := &engine.Message{Payload: req1} + message2 := &engine.Message{Payload: req2} + message3 := &engine.Message{Payload: req3} + + // as the Nonce and PeerID fields are the same, the key of the Message should be the same across all three + // in other words, the RPC field should not affect the key + require.Equal(t, queue.IdentifierOfMessage(message1), queue.IdentifierOfMessage(message2)) + require.Equal(t, queue.IdentifierOfMessage(message1), queue.IdentifierOfMessage(message3)) +} diff --git a/module/mempool/results.go b/module/mempool/results.go deleted file mode 100644 index 1c4a8a6e875..00000000000 --- a/module/mempool/results.go +++ /dev/null @@ -1,29 +0,0 @@ -package mempool - -import ( - "github.com/onflow/flow-go/model/flow" -) - -// Results represents a concurrency-safe memory pool for execution results. -type Results interface { - - // Has will check if the given result is in the memory pool. - Has(resultID flow.Identifier) bool - - // Add will add the given execution result to the memory pool. It will return - // false if it was already in the mempool. - Add(result *flow.ExecutionResult) bool - - // Remove will attempt to remove the result from the memory pool. - Remove(resultID flow.Identifier) bool - - // ByID retrieve the execution result with the given ID from the memory pool. - // It will return false if it was not found in the mempool. - ByID(resultID flow.Identifier) (*flow.ExecutionResult, bool) - - // Size will return the current size of the memory pool. - Size() uint - - // All will return a list of all approvals in the memory pool. - All() []*flow.ExecutionResult -} diff --git a/module/mempool/stdmap/assignments.go b/module/mempool/stdmap/assignments.go index d817477c64e..ea0fbae512f 100644 --- a/module/mempool/stdmap/assignments.go +++ b/module/mempool/stdmap/assignments.go @@ -6,57 +6,12 @@ import ( ) // Assignments implements the chunk assignment memory pool. +// Stored assignments are keyed by assignment fingerprint. type Assignments struct { - *Backend + *Backend[flow.Identifier, *chunkmodels.Assignment] } // NewAssignments creates a new memory pool for Assignments. -func NewAssignments(limit uint) (*Assignments, error) { - a := &Assignments{ - Backend: NewBackend(WithLimit(limit)), - } - return a, nil -} - -// Has checks whether the Assignment with the given hash is currently in -// the memory pool. -func (a *Assignments) Has(assignmentID flow.Identifier) bool { - return a.Backend.Has(assignmentID) - -} - -// ByID retrieves the chunk assignment from mempool based on provided ID -func (a *Assignments) ByID(assignmentID flow.Identifier) (*chunkmodels.Assignment, bool) { - entity, exists := a.Backend.ByID(assignmentID) - if !exists { - return nil, false - } - adp := entity.(*chunkmodels.AssignmentDataPack) - return adp.Assignment(), true -} - -// Add adds an Assignment to the mempool. -func (a *Assignments) Add(fingerprint flow.Identifier, assignment *chunkmodels.Assignment) bool { - return a.Backend.Add(chunkmodels.NewAssignmentDataPack(fingerprint, assignment)) -} - -// Remove will remove the given Assignment from the memory pool; it will -// return true if the Assignment was known and removed. -func (a *Assignments) Remove(assignmentID flow.Identifier) bool { - return a.Backend.Remove(assignmentID) -} - -// Size will return the current size of the memory pool. -func (a *Assignments) Size() uint { - return a.Backend.Size() -} - -// All returns all chunk data packs from the pool. -func (a *Assignments) All() []*chunkmodels.Assignment { - entities := a.Backend.All() - assignments := make([]*chunkmodels.Assignment, 0, len(entities)) - for _, entity := range entities { - assignments = append(assignments, entity.(*chunkmodels.AssignmentDataPack).Assignment()) - } - return assignments +func NewAssignments(limit uint) *Assignments { + return &Assignments{NewBackend(WithLimit[flow.Identifier, *chunkmodels.Assignment](limit))} } diff --git a/module/mempool/stdmap/backDataHeapBenchmark_test.go b/module/mempool/stdmap/backDataHeapBenchmark_test.go index 1a3fdbc7e17..79cff38abd0 100644 --- a/module/mempool/stdmap/backDataHeapBenchmark_test.go +++ b/module/mempool/stdmap/backDataHeapBenchmark_test.go @@ -3,10 +3,11 @@ package stdmap_test import ( "runtime" "runtime/debug" + "sync" "testing" "time" - lru "github.com/hashicorp/golang-lru" + lru "github.com/hashicorp/golang-lru/v2" zlog "github.com/rs/zerolog/log" "github.com/stretchr/testify/require" @@ -27,9 +28,9 @@ func BenchmarkBaselineLRU(b *testing.B) { defer debug.SetGCPercent(debug.SetGCPercent(-1)) // disable GC limit := uint(50) - backData := stdmap.NewBackend( - stdmap.WithBackData(newBaselineLRU(int(limit))), - stdmap.WithLimit(limit)) + backData := stdmap.NewBackend[flow.Identifier, *unittest.MockEntity]( + stdmap.WithMutableBackData[flow.Identifier, *unittest.MockEntity](newBaselineLRU[flow.Identifier, *unittest.MockEntity](int(limit))), + stdmap.WithLimit[flow.Identifier, *unittest.MockEntity](limit)) entities := unittest.EntityListFixture(uint(100_000)) testAddEntities(b, limit, backData, entities) @@ -46,15 +47,15 @@ func BenchmarkArrayBackDataLRU(b *testing.B) { defer debug.SetGCPercent(debug.SetGCPercent(-1)) // disable GC limit := uint(50_000) - backData := stdmap.NewBackend( - stdmap.WithBackData( - herocache.NewCache( + backData := stdmap.NewBackend[flow.Identifier, *unittest.MockEntity]( + stdmap.WithMutableBackData[flow.Identifier, *unittest.MockEntity]( + herocache.NewCache[*unittest.MockEntity]( uint32(limit), 8, heropool.LRUEjection, unittest.Logger(), metrics.NewNoopCollector())), - stdmap.WithLimit(limit)) + stdmap.WithLimit[flow.Identifier, *unittest.MockEntity](limit)) entities := unittest.EntityListFixture(uint(100_000_000)) testAddEntities(b, limit, backData, entities) @@ -76,13 +77,13 @@ func gcAndWriteHeapProfile() { // testAddEntities is a test helper that checks entities are added successfully to the backdata. // and each entity is retrievable right after it is written to backdata. -func testAddEntities(t testing.TB, limit uint, b *stdmap.Backend, entities []*unittest.MockEntity) { +func testAddEntities(t testing.TB, limit uint, b *stdmap.Backend[flow.Identifier, *unittest.MockEntity], entities []*unittest.MockEntity) { // adding elements t1 := time.Now() for i, e := range entities { - require.False(t, b.Has(e.ID())) + require.False(t, b.Has(e.Identifier)) // adding each element must be successful. - require.True(t, b.Add(*e)) + require.True(t, b.Add(e.Identifier, e)) if uint(i) < limit { // when we are below limit the total of @@ -91,13 +92,13 @@ func testAddEntities(t testing.TB, limit uint, b *stdmap.Backend, entities []*un } else { // when we cross the limit, the ejection kicks in, and // size must be steady at the limit. - require.Equal(t, uint(b.Size()), limit) + require.Equal(t, b.Size(), limit) } // entity should be immediately retrievable - actual, ok := b.ByID(e.ID()) + actual, ok := b.Get(e.Identifier) require.True(t, ok) - require.Equal(t, *e, actual) + require.Equal(t, e, actual) } elapsed := time.Since(t1) zlog.Info().Dur("interaction_time", elapsed).Msg("adding elements done") @@ -107,141 +108,144 @@ func testAddEntities(t testing.TB, limit uint, b *stdmap.Backend, entities []*un // it compliant to be used as BackData component in mempool.Backend. Note that // this is used only as an experimental baseline, and so it's not exported for // production. -type baselineLRU struct { - c *lru.Cache // used to incorporate an LRU cache +type baselineLRU[K comparable, V any] struct { + c *lru.Cache[K, V] // used to incorporate an LRU cache limit int + + // atomicAdjustMutex is used to synchronize concurrent access to the + // underlying LRU cache. This is needed because hashicorp LRU does not + // provide thread-safety for atomic adjust-with-init or get-with-init operations. + atomicAdjustMutex sync.Mutex } -func newBaselineLRU(limit int) *baselineLRU { +func newBaselineLRU[K comparable, V any](limit int) *baselineLRU[K, V] { var err error - c, err := lru.New(limit) + c, err := lru.New[K, V](limit) if err != nil { panic(err) } - return &baselineLRU{ + return &baselineLRU[K, V]{ c: c, limit: limit, } } -// Has checks if we already contain the item with the given hash. -func (b *baselineLRU) Has(entityID flow.Identifier) bool { - _, ok := b.c.Get(entityID) +// Has checks if backdata already stores a value under the given key. +func (b *baselineLRU[K, V]) Has(key K) bool { + _, ok := b.c.Get(key) return ok } // Add adds the given item to the pool. -func (b *baselineLRU) Add(entityID flow.Identifier, entity flow.Entity) bool { - b.c.Add(entityID, entity) +func (b *baselineLRU[K, V]) Add(key K, value V) bool { + b.c.Add(key, value) return true } // Remove will remove the item with the given hash. -func (b *baselineLRU) Remove(entityID flow.Identifier) (flow.Entity, bool) { - e, ok := b.c.Get(entityID) - if !ok { - return nil, false - } - entity, ok := e.(flow.Entity) +func (b *baselineLRU[K, V]) Remove(key K) (value V, removed bool) { + value, ok := b.c.Get(key) if !ok { - return nil, false + return value, false } - return entity, b.c.Remove(entityID) + return value, b.c.Remove(key) } // Adjust will adjust the value item using the given function if the given key can be found. // Returns a bool which indicates whether the value was updated as well as the updated value -func (b *baselineLRU) Adjust(entityID flow.Identifier, f func(flow.Entity) flow.Entity) (flow.Entity, bool) { - entity, removed := b.Remove(entityID) +func (b *baselineLRU[K, V]) Adjust(key K, f func(V) V) (value V, ok bool) { + value, removed := b.Remove(key) if !removed { - return nil, false + return value, false } - newEntity := f(entity) - newEntityID := newEntity.ID() + newValue := f(value) - b.Add(newEntityID, newEntity) + b.Add(key, newValue) - return newEntity, true + return newValue, true } -// ByID returns the given item from the pool. -func (b *baselineLRU) ByID(entityID flow.Identifier) (flow.Entity, bool) { - e, ok := b.c.Get(entityID) - if !ok { - return nil, false +// AdjustWithInit will adjust the value item using the given function if the given key can be found. +// If the key is not found, the init function will be called to create a new value. +// Returns a bool which indicates whether the value was updated as well as the updated value and +// a bool indicating whether the value was initialized. +// Note: this is a benchmark helper, hence, the adjust-with-init provides serializability w.r.t other concurrent adjust-with-init or get-with-init operations, +// and does not provide serializability w.r.t concurrent add, adjust or get operations. +func (b *baselineLRU[K, V]) AdjustWithInit(key K, adjust func(V) V, init func() V) (value V, ok bool) { + b.atomicAdjustMutex.Lock() + defer b.atomicAdjustMutex.Unlock() + + if b.Has(key) { + return b.Adjust(key, adjust) + } + added := b.Add(key, init()) + if !added { + return value, false } + return b.Adjust(key, adjust) +} - entity, ok := e.(flow.Entity) +// Get returns the given item from the pool. +func (b *baselineLRU[K, V]) Get(key K) (value V, ok bool) { + value, ok = b.c.Get(key) if !ok { - return nil, false + return value, false } - return entity, ok + + return value, ok } // Size will return the total of the backend. -func (b baselineLRU) Size() uint { +func (b *baselineLRU[K, V]) Size() uint { return uint(b.c.Len()) } // All returns all entities from the pool. -func (b baselineLRU) All() map[flow.Identifier]flow.Entity { - all := make(map[flow.Identifier]flow.Entity) - for _, entityID := range b.c.Keys() { - id, ok := entityID.(flow.Identifier) - if !ok { - panic("could not assert to entity id") - } +func (b *baselineLRU[K, V]) All() map[K]V { + all := make(map[K]V) + for _, key := range b.c.Keys() { - entity, ok := b.ByID(id) + entity, ok := b.Get(key) if !ok { - panic("could not retrieve entity from mempool") + panic("could not retrieve value from mempool") } - all[id] = entity + all[key] = entity } return all } -func (b baselineLRU) Identifiers() flow.IdentifierList { - ids := make(flow.IdentifierList, b.c.Len()) - entityIds := b.c.Keys() - total := len(entityIds) +func (b *baselineLRU[K, V]) Keys() []K { + keys := make([]K, b.c.Len()) + valueKeys := b.c.Keys() + total := len(valueKeys) for i := 0; i < total; i++ { - id, ok := entityIds[i].(flow.Identifier) - if !ok { - panic("could not assert to entity id") - } - ids[i] = id + keys[i] = valueKeys[i] } - return ids + return keys } -func (b baselineLRU) Entities() []flow.Entity { - entities := make([]flow.Entity, b.c.Len()) - entityIds := b.c.Keys() - total := len(entityIds) +func (b *baselineLRU[K, V]) Values() []V { + values := make([]V, b.c.Len()) + valuesIds := b.c.Keys() + total := len(valuesIds) for i := 0; i < total; i++ { - id, ok := entityIds[i].(flow.Identifier) - if !ok { - panic("could not assert to entity id") - } - - entity, ok := b.ByID(id) + entity, ok := b.Get(valuesIds[i]) if !ok { panic("could not retrieve entity from mempool") } - entities[i] = entity + values[i] = entity } - return entities + return values } // Clear removes all entities from the pool. -func (b *baselineLRU) Clear() { +func (b *baselineLRU[K, V]) Clear() { var err error - b.c, err = lru.New(b.limit) + b.c, err = lru.New[K, V](b.limit) if err != nil { panic(err) } diff --git a/module/mempool/stdmap/backdata/mapBackData.go b/module/mempool/stdmap/backdata/mapBackData.go index 887b6fca335..ab86f468d36 100644 --- a/module/mempool/stdmap/backdata/mapBackData.go +++ b/module/mempool/stdmap/backdata/mapBackData.go @@ -1,110 +1,126 @@ package backdata -import ( - "github.com/onflow/flow-go/model/flow" -) - // MapBackData implements a map-based generic memory BackData backed by a Go map. -type MapBackData struct { +// Note that this implementation is NOT thread-safe, and the higher-level Backend is responsible for concurrency management. +type MapBackData[K comparable, V any] struct { // NOTE: as a BackData implementation, MapBackData must be non-blocking. // Concurrency management is done by overlay Backend. - entities map[flow.Identifier]flow.Entity + dataMap map[K]V } -func NewMapBackData() *MapBackData { - bd := &MapBackData{ - entities: make(map[flow.Identifier]flow.Entity), +func NewMapBackData[K comparable, V any]() *MapBackData[K, V] { + bd := &MapBackData[K, V]{ + dataMap: make(map[K]V), } return bd } -// Has checks if backdata already contains the entity with the given identifier. -func (b MapBackData) Has(entityID flow.Identifier) bool { - _, exists := b.entities[entityID] +// Has checks if a value is stored under the given key. +func (b *MapBackData[K, V]) Has(key K) bool { + _, exists := b.dataMap[key] return exists } -// Add adds the given entity to the backdata. -func (b *MapBackData) Add(entityID flow.Identifier, entity flow.Entity) bool { - _, exists := b.entities[entityID] +// Add attempts to add the given value to the backdata, without overwriting existing data. +// If a value is already stored under the input key, Add is a no-op and returns false. +// If no value is stored under the input key, Add adds the value and returns true. +func (b *MapBackData[K, V]) Add(key K, value V) bool { + _, exists := b.dataMap[key] if exists { return false } - b.entities[entityID] = entity + b.dataMap[key] = value return true } -// Remove removes the entity with the given identifier. -func (b *MapBackData) Remove(entityID flow.Identifier) (flow.Entity, bool) { - entity, exists := b.entities[entityID] - if !exists { - return nil, false +// Remove removes the value with the given key. +// If the key-value pair exists, returns the value and true. +// Otherwise, returns the zero value for type V and false. +func (b *MapBackData[K, V]) Remove(key K) (value V, ok bool) { + value, ok = b.dataMap[key] + if !ok { + return value, false } - delete(b.entities, entityID) - return entity, true + delete(b.dataMap, key) + return value, true } -// Adjust adjusts the entity using the given function if the given identifier can be found. -// Returns a bool which indicates whether the entity was updated as well as the updated entity. -func (b *MapBackData) Adjust(entityID flow.Identifier, f func(flow.Entity) flow.Entity) (flow.Entity, bool) { - entity, ok := b.entities[entityID] +// Adjust adjusts the value using the given function if the given key can be found. +// Returns: +// - the updated value for the key (if the key exists) +// - a boolean indicating whether the key was found (and the update applied) +func (b *MapBackData[K, V]) Adjust(key K, f func(V) V) (value V, ok bool) { + value, ok = b.dataMap[key] if !ok { - return nil, false + return value, false } - newentity := f(entity) - newentityID := newentity.ID() + newValue := f(value) + b.dataMap[key] = newValue + return newValue, true +} - delete(b.entities, entityID) - b.entities[newentityID] = newentity - return newentity, true +// AdjustWithInit adjusts the value using the provided function if the key is found. +// If the key is not found, it initializes the value using the given init function and then applies the adjustment. +// +// Args: +// - key: The key for which the value should be adjusted. +// - adjust: the function that adjusts the value. +// - init: A function that initializes the value if the key is not present. +// +// Returns: +// - the adjusted value. +// - a bool which indicates whether the value was adjusted (for MapBackData this is always true) +func (b *MapBackData[K, V]) AdjustWithInit(key K, adjust func(V) V, init func() V) (V, bool) { + if b.Has(key) { + return b.Adjust(key, adjust) + } + b.Add(key, init()) + return b.Adjust(key, adjust) } -// ByID returns the given entity from the backdata. -func (b MapBackData) ByID(entityID flow.Identifier) (flow.Entity, bool) { - entity, exists := b.entities[entityID] - if !exists { - return nil, false +// Get returns the value for the given key. +// Returns true if the key-value pair exists, and false otherwise. +func (b *MapBackData[K, V]) Get(key K) (value V, ok bool) { + value, ok = b.dataMap[key] + if !ok { + return value, false } - return entity, true + return value, true } -// Size returns the size of the backdata, i.e., total number of stored (entityId, entity) -func (b MapBackData) Size() uint { - return uint(len(b.entities)) +// Size returns the number of stored key-value pairs. +func (b *MapBackData[K, V]) Size() uint { + return uint(len(b.dataMap)) } -// All returns all entities stored in the backdata. -func (b MapBackData) All() map[flow.Identifier]flow.Entity { - entities := make(map[flow.Identifier]flow.Entity) - for entityID, entity := range b.entities { - entities[entityID] = entity +// All returns all stored key-value pairs as a map. +func (b *MapBackData[K, V]) All() map[K]V { + values := make(map[K]V) + for key, value := range b.dataMap { + values[key] = value } - return entities + return values } -// Identifiers returns the list of identifiers of entities stored in the backdata. -func (b MapBackData) Identifiers() flow.IdentifierList { - ids := make(flow.IdentifierList, len(b.entities)) - i := 0 - for entityID := range b.entities { - ids[i] = entityID - i++ +// Keys returns an unordered list of keys stored in the backdata. +func (b *MapBackData[K, V]) Keys() []K { + keys := make([]K, 0, len(b.dataMap)) + for key := range b.dataMap { + keys = append(keys, key) } - return ids + return keys } -// Entities returns the list of entities stored in the backdata. -func (b MapBackData) Entities() []flow.Entity { - entities := make([]flow.Entity, len(b.entities)) - i := 0 - for _, entity := range b.entities { - entities[i] = entity - i++ +// Values returns an unordered list of values stored in the backdata. +func (b *MapBackData[K, V]) Values() []V { + values := make([]V, 0, len(b.dataMap)) + for _, value := range b.dataMap { + values = append(values, value) } - return entities + return values } -// Clear removes all entities from the backdata. -func (b *MapBackData) Clear() { - b.entities = make(map[flow.Identifier]flow.Entity) +// Clear removes all key-value pairs from the backdata. +func (b *MapBackData[K, V]) Clear() { + b.dataMap = make(map[K]V) } diff --git a/module/mempool/stdmap/backdata/mapBackData_test.go b/module/mempool/stdmap/backdata/mapBackData_test.go index 7e5858d65ac..5cc24b019e2 100644 --- a/module/mempool/stdmap/backdata/mapBackData_test.go +++ b/module/mempool/stdmap/backdata/mapBackData_test.go @@ -5,23 +5,24 @@ import ( "github.com/stretchr/testify/require" + "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/utils/unittest" ) func TestMapBackData_StoreAnd(t *testing.T) { - backData := NewMapBackData() + backData := NewMapBackData[flow.Identifier, *unittest.MockEntity]() entities := unittest.EntityListFixture(100) // Add for _, e := range entities { // all entities must be stored successfully - require.True(t, backData.Add(e.ID(), e)) + require.True(t, backData.Add(e.Identifier, e)) } - // ByID + // Get for _, expected := range entities { // all entities must be retrievable successfully - actual, ok := backData.ByID(expected.ID()) + actual, ok := backData.Get(expected.Identifier) require.True(t, ok) require.Equal(t, expected, actual) } @@ -30,20 +31,78 @@ func TestMapBackData_StoreAnd(t *testing.T) { all := backData.All() require.Equal(t, len(entities), len(all)) for _, expected := range entities { - actual, ok := backData.ByID(expected.ID()) + actual, ok := backData.Get(expected.Identifier) require.True(t, ok) require.Equal(t, expected, actual) } - // Identifiers - ids := backData.Identifiers() + // Keys + ids := backData.Keys() require.Equal(t, len(entities), len(ids)) for _, id := range ids { require.True(t, backData.Has(id)) } - // Entities - actualEntities := backData.Entities() - require.Equal(t, len(entities), len(actualEntities)) - require.ElementsMatch(t, entities, actualEntities) + // Values + actualValues := backData.Values() + require.Equal(t, len(entities), len(actualValues)) + require.ElementsMatch(t, entities, actualValues) +} + +// TestMapBackData_AdjustWithInit tests the AdjustWithInit method of the MapBackData. +// Note that as the backdata is not inherently thread-safe, this test is not concurrent. +func TestMapBackData_AdjustWithInit(t *testing.T) { + backData := NewMapBackData[flow.Identifier, *unittest.MockEntity]() + entities := unittest.EntityListFixture(100) + + ids := make([]flow.Identifier, 0, len(entities)) + for _, entity := range entities { + ids = append(ids, entity.Identifier) + } + + // AdjustWithInit + for _, e := range entities { + // all entities must be adjusted successfully + actual, ok := backData.AdjustWithInit(e.Identifier, func(entity *unittest.MockEntity) *unittest.MockEntity { + // increment nonce of the entity + entity.Nonce++ + return entity + }, func() *unittest.MockEntity { + return e + }) + require.True(t, ok) + require.Equal(t, e, actual) + } + + // All + all := backData.All() + require.Equal(t, len(entities), len(all)) + for _, expected := range entities { + actual, ok := backData.Get(expected.Identifier) + require.True(t, ok) + require.Equal(t, expected.Identifier, actual.Identifier) + require.Equal(t, uint64(1), actual.Nonce) + } + + // Keys + retriedIds := backData.Keys() + require.Equal(t, len(entities), len(retriedIds)) + require.ElementsMatch(t, ids, retriedIds) + for _, id := range retriedIds { + require.True(t, backData.Has(id)) + } + + // Values + actualValues := backData.Values() + require.Equal(t, len(entities), len(actualValues)) + require.ElementsMatch(t, entities, actualValues) + + // Get + for _, e := range entities { + // all entities must be retrieved successfully + actual, ok := backData.Get(e.Identifier) + require.True(t, ok) + require.Equal(t, e.Identifier, actual.Identifier) + require.Equal(t, uint64(1), actual.Nonce) + } } diff --git a/module/mempool/stdmap/backend.go b/module/mempool/stdmap/backend.go index cb0dca2640d..33caa01ef4b 100644 --- a/module/mempool/stdmap/backend.go +++ b/module/mempool/stdmap/backend.go @@ -1,34 +1,30 @@ -// (c) 2019 Dapper Labs - ALL RIGHTS RESERVED - package stdmap import ( "math" "sync" - "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/module/mempool" "github.com/onflow/flow-go/module/mempool/stdmap/backdata" - _ "github.com/onflow/flow-go/utils/binstat" ) -// Backend provides synchronized access to a backdata -type Backend struct { +// Backend is a wrapper around the mutable backdata that provides concurrency-safe operations. +type Backend[K comparable, V any] struct { sync.RWMutex - backData mempool.BackData + mutableBackData mempool.MutableBackData[K, V] guaranteedCapacity uint - batchEject BatchEjectFunc - eject EjectFunc - ejectionCallbacks []mempool.OnEjection + batchEject BatchEjectFunc[K, V] + eject EjectFunc[K, V] + ejectionCallbacks []mempool.OnEjection[V] } // NewBackend creates a new memory pool backend. -// This is using EjectTrueRandomFast() -func NewBackend(options ...OptionFunc) *Backend { - b := Backend{ - backData: backdata.NewMapBackData(), +// This is using EjectRandomFast() +func NewBackend[K comparable, V any](options ...OptionFunc[K, V]) *Backend[K, V] { + b := Backend[K, V]{ + mutableBackData: backdata.NewMapBackData[K, V](), guaranteedCapacity: uint(math.MaxUint32), - batchEject: EjectTrueRandomFast, + batchEject: EjectRandomFast[K, V], eject: nil, ejectionCallbacks: nil, } @@ -38,161 +34,196 @@ func NewBackend(options ...OptionFunc) *Backend { return &b } -// Has checks if we already contain the item with the given hash. -func (b *Backend) Has(entityID flow.Identifier) bool { - //bs1 := binstat.EnterTime(binstat.BinStdmap + ".r_lock.(Backend)Has") +// Has checks if a value is stored under the given key. +func (b *Backend[K, V]) Has(key K) bool { + // bs1 := binstat.EnterTime(binstat.BinStdmap + ".r_lock.(Backend)Has") b.RLock() - //binstat.Leave(bs1) + // binstat.Leave(bs1) - //bs2 := binstat.EnterTime(binstat.BinStdmap + ".inlock.(Backend)Has") - //defer binstat.Leave(bs2) + // bs2 := binstat.EnterTime(binstat.BinStdmap + ".inlock.(Backend)Has") + // defer binstat.Leave(bs2) defer b.RUnlock() - has := b.backData.Has(entityID) + has := b.mutableBackData.Has(key) return has } -// Add adds the given item to the pool. -func (b *Backend) Add(entity flow.Entity) bool { - //bs0 := binstat.EnterTime(binstat.BinStdmap + ".<<lock.(Backend)Add") - entityID := entity.ID() // this expensive operation done OUTSIDE of lock :-) - //binstat.Leave(bs0) - - //bs1 := binstat.EnterTime(binstat.BinStdmap + ".w_lock.(Backend)Add") +// Add attempts to add the given value, without overwriting existing data. +// If a value is already stored under the input key, Add is a no-op and returns false. +// If no value is stored under the input key, Add adds the value and returns true. +func (b *Backend[K, V]) Add(key K, value V) bool { + // bs1 := binstat.EnterTime(binstat.BinStdmap + ".w_lock.(Backend)Add") b.Lock() - //binstat.Leave(bs1) + // binstat.Leave(bs1) - //bs2 := binstat.EnterTime(binstat.BinStdmap + ".inlock.(Backend)Add") - //defer binstat.Leave(bs2) + // bs2 := binstat.EnterTime(binstat.BinStdmap + ".inlock.(Backend)Add") + // defer binstat.Leave(bs2) defer b.Unlock() - added := b.backData.Add(entityID, entity) + added := b.mutableBackData.Add(key, value) b.reduce() return added } -// Remove will remove the item with the given hash. -func (b *Backend) Remove(entityID flow.Identifier) bool { - //bs1 := binstat.EnterTime(binstat.BinStdmap + ".w_lock.(Backend)Remove") +// Remove removes the value with the given key. +// If the key-value pair exists, returns the value and true. +// Otherwise, returns the zero value for type V and false. +func (b *Backend[K, V]) Remove(key K) bool { + // bs1 := binstat.EnterTime(binstat.BinStdmap + ".w_lock.(Backend)Remove") b.Lock() - //binstat.Leave(bs1) + // binstat.Leave(bs1) - //bs2 := binstat.EnterTime(binstat.BinStdmap + ".inlock.(Backend)Remove") - //defer binstat.Leave(bs2) + // bs2 := binstat.EnterTime(binstat.BinStdmap + ".inlock.(Backend)Remove") + // defer binstat.Leave(bs2) defer b.Unlock() - _, removed := b.backData.Remove(entityID) + _, removed := b.mutableBackData.Remove(key) return removed } // Adjust will adjust the value item using the given function if the given key can be found. -// Returns a bool which indicates whether the value was updated. -func (b *Backend) Adjust(entityID flow.Identifier, f func(flow.Entity) flow.Entity) (flow.Entity, bool) { - //bs1 := binstat.EnterTime(binstat.BinStdmap + ".w_lock.(Backend)Adjust") +// Returns: +// - value, true if the value with the given key was found. The returned value is the version after the update is applied. +// - nil, false if no value with the given key was found +func (b *Backend[K, V]) Adjust(key K, f func(V) V) (V, bool) { + // bs1 := binstat.EnterTime(binstat.BinStdmap + ".w_lock.(Backend)Adjust") b.Lock() - //binstat.Leave(bs1) + // binstat.Leave(bs1) - //bs2 := binstat.EnterTime(binstat.BinStdmap + ".inlock.(Backend)Adjust") - //defer binstat.Leave(bs2) + // bs2 := binstat.EnterTime(binstat.BinStdmap + ".inlock.(Backend)Adjust") + // defer binstat.Leave(bs2) defer b.Unlock() - entity, wasUpdated := b.backData.Adjust(entityID, f) - return entity, wasUpdated + value, wasUpdated := b.mutableBackData.Adjust(key, f) + return value, wasUpdated +} + +// AdjustWithInit adjusts the value using the given function if the given identifier can be found. When the +// value is not found, it initializes the value using the given init function and then applies the adjust function. +// Args: +// - key: the identifier of the value to adjust. +// - adjust: the function that adjusts the value. +// - init: the function that initializes the value when it is not found. +// Returns: +// - the adjusted value. +// - a bool which indicates whether the value was adjusted. +func (b *Backend[K, V]) AdjustWithInit(key K, adjust func(V) V, init func() V) (V, bool) { + b.Lock() + defer b.Unlock() + + return b.mutableBackData.AdjustWithInit(key, adjust, init) } -// ByID returns the given item from the pool. -func (b *Backend) ByID(entityID flow.Identifier) (flow.Entity, bool) { - //bs1 := binstat.EnterTime(binstat.BinStdmap + ".r_lock.(Backend)ByID") +// Get returns the value for the given key. +// Returns true if the key-value pair exists, and false otherwise. +func (b *Backend[K, V]) Get(key K) (V, bool) { + // bs1 := binstat.EnterTime(binstat.BinStdmap + ".r_lock.(Backend)ByID") b.RLock() - //binstat.Leave(bs1) + // binstat.Leave(bs1) - //bs2 := binstat.EnterTime(binstat.BinStdmap + ".inlock.(Backend)ByID") - //defer binstat.Leave(bs2) + // bs2 := binstat.EnterTime(binstat.BinStdmap + ".inlock.(Backend)ByID") + // defer binstat.Leave(bs2) defer b.RUnlock() - entity, exists := b.backData.ByID(entityID) - return entity, exists + value, exists := b.mutableBackData.Get(key) + return value, exists } -// Run executes a function giving it exclusive access to the backdata -func (b *Backend) Run(f func(backdata mempool.BackData) error) error { - //bs1 := binstat.EnterTime(binstat.BinStdmap + ".w_lock.(Backend)Run") +// Run executes a function giving it exclusive access to the backdata. +// All errors returned from the input functor f are considered exceptions. +// No errors are expected during normal operation. +func (b *Backend[K, V]) Run(f func(backdata mempool.BackData[K, V]) error) error { + // bs1 := binstat.EnterTime(binstat.BinStdmap + ".w_lock.(Backend)Run") b.Lock() - //binstat.Leave(bs1) + // binstat.Leave(bs1) - //bs2 := binstat.EnterTime(binstat.BinStdmap + ".inlock.(Backend)Run") - //defer binstat.Leave(bs2) + // bs2 := binstat.EnterTime(binstat.BinStdmap + ".inlock.(Backend)Run") + // defer binstat.Leave(bs2) defer b.Unlock() - err := f(b.backData) + err := f(b.mutableBackData) b.reduce() return err } // Size will return the size of the backend. -func (b *Backend) Size() uint { - //bs1 := binstat.EnterTime(binstat.BinStdmap + ".r_lock.(Backend)Size") +func (b *Backend[K, V]) Size() uint { + // bs1 := binstat.EnterTime(binstat.BinStdmap + ".r_lock.(Backend)Size") b.RLock() - //binstat.Leave(bs1) + // binstat.Leave(bs1) - //bs2 := binstat.EnterTime(binstat.BinStdmap + ".inlock.(Backend)Size") - //defer binstat.Leave(bs2) + // bs2 := binstat.EnterTime(binstat.BinStdmap + ".inlock.(Backend)Size") + // defer binstat.Leave(bs2) defer b.RUnlock() - size := b.backData.Size() + size := b.mutableBackData.Size() return size } // Limit returns the maximum number of items allowed in the backend. -func (b *Backend) Limit() uint { +func (b *Backend[K, V]) Limit() uint { return b.guaranteedCapacity } -// All returns all entities from the pool. -func (b *Backend) All() []flow.Entity { - //bs1 := binstat.EnterTime(binstat.BinStdmap + ".r_lock.(Backend)All") +// Values returns all stored values from the pool. +func (b *Backend[K, V]) Values() []V { + // bs1 := binstat.EnterTime(binstat.BinStdmap + ".r_lock.(Backend)All") + b.RLock() + // binstat.Leave(bs1) + + // bs2 := binstat.EnterTime(binstat.BinStdmap + ".inlock.(Backend)All") + // defer binstat.Leave(bs2) + defer b.RUnlock() + + return b.mutableBackData.Values() +} + +// All returns all stored key-value pairs as a map from the pool. +// ATTENTION: All does not guarantee returning key-value pairs in the same order as they are added. +func (b *Backend[K, V]) All() map[K]V { + // bs1 := binstat.EnterTime(binstat.BinStdmap + ".r_lock.(Backend)All") b.RLock() - //binstat.Leave(bs1) + // binstat.Leave(bs1) - //bs2 := binstat.EnterTime(binstat.BinStdmap + ".inlock.(Backend)All") - //defer binstat.Leave(bs2) + // bs2 := binstat.EnterTime(binstat.BinStdmap + ".inlock.(Backend)All") + // defer binstat.Leave(bs2) defer b.RUnlock() - return b.backData.Entities() + return b.mutableBackData.All() } // Clear removes all entities from the pool. -func (b *Backend) Clear() { - //bs1 := binstat.EnterTime(binstat.BinStdmap + ".w_lock.(Backend)Clear") +func (b *Backend[K, V]) Clear() { + // bs1 := binstat.EnterTime(binstat.BinStdmap + ".w_lock.(Backend)Clear") b.Lock() - //binstat.Leave(bs1) + // binstat.Leave(bs1) - //bs2 := binstat.EnterTime(binstat.BinStdmap + ".inlock.(Backend)Clear") - //defer binstat.Leave(bs2) + // bs2 := binstat.EnterTime(binstat.BinStdmap + ".inlock.(Backend)Clear") + // defer binstat.Leave(bs2) defer b.Unlock() - b.backData.Clear() + b.mutableBackData.Clear() } // RegisterEjectionCallbacks adds the provided OnEjection callbacks -func (b *Backend) RegisterEjectionCallbacks(callbacks ...mempool.OnEjection) { - //bs1 := binstat.EnterTime(binstat.BinStdmap + ".r_lock.(Backend)RegisterEjectionCallbacks") +func (b *Backend[K, V]) RegisterEjectionCallbacks(callbacks ...mempool.OnEjection[V]) { + // bs1 := binstat.EnterTime(binstat.BinStdmap + ".r_lock.(Backend)RegisterEjectionCallbacks") b.Lock() - //binstat.Leave(bs1) + // binstat.Leave(bs1) - //bs2 := binstat.EnterTime(binstat.BinStdmap + ".inlock.(Backend)RegisterEjectionCallbacks") - //defer binstat.Leave(bs2) + // bs2 := binstat.EnterTime(binstat.BinStdmap + ".inlock.(Backend)RegisterEjectionCallbacks") + // defer binstat.Leave(bs2) defer b.Unlock() b.ejectionCallbacks = append(b.ejectionCallbacks, callbacks...) } // reduce will reduce the size of the kept entities until we are within the // configured memory pool size limit. -func (b *Backend) reduce() { - //bs := binstat.EnterTime(binstat.BinStdmap + ".??lock.(Backend)reduce") - //defer binstat.Leave(bs) +func (b *Backend[K, V]) reduce() { + // bs := binstat.EnterTime(binstat.BinStdmap + ".??lock.(Backend)reduce") + // defer binstat.Leave(bs) // we keep reducing the cache size until we are at limit again - // this was a loop, but the loop is now in EjectTrueRandomFast() + // this was a loop, but the loop is now in EjectRandomFast() // the ejections are batched, so this call to eject() may not actually // do anything until the batch threshold is reached (currently 128) - if b.backData.Size() > b.guaranteedCapacity { + if b.mutableBackData.Size() > b.guaranteedCapacity { // get the key from the eject function // we don't do anything if there is an error if b.batchEject != nil { - _ = b.batchEject(b) + _, _ = b.batchEject(b) } else { _, _, _ = b.eject(b) } diff --git a/module/mempool/stdmap/backend_test.go b/module/mempool/stdmap/backend_test.go index 28acbf91eff..73cd2c5ef6e 100644 --- a/module/mempool/stdmap/backend_test.go +++ b/module/mempool/stdmap/backend_test.go @@ -1,5 +1,3 @@ -// (c) 2019 Dapper Labs - ALL RIGHTS RESERVED - package stdmap_test import ( @@ -14,7 +12,10 @@ import ( "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/module/mempool" + herocache "github.com/onflow/flow-go/module/mempool/herocache/backdata" + "github.com/onflow/flow-go/module/mempool/herocache/backdata/heropool" "github.com/onflow/flow-go/module/mempool/stdmap" + "github.com/onflow/flow-go/module/metrics" "github.com/onflow/flow-go/utils/unittest" ) @@ -23,10 +24,10 @@ func TestAddRemove(t *testing.T) { item2 := unittest.MockEntityFixture() t.Run("should be able to add and rem", func(t *testing.T) { - pool := stdmap.NewBackend() - added := pool.Add(item1) + pool := stdmap.NewBackend[flow.Identifier, *unittest.MockEntity]() + added := pool.Add(item1.Identifier, item1) require.True(t, added) - added = pool.Add(item2) + added = pool.Add(item2.Identifier, item2) require.True(t, added) t.Run("should be able to get size", func(t *testing.T) { @@ -35,13 +36,13 @@ func TestAddRemove(t *testing.T) { }) t.Run("should be able to get first", func(t *testing.T) { - gotItem, exists := pool.ByID(item1.ID()) + gotItem, exists := pool.Get(item1.Identifier) assert.True(t, exists) assert.Equal(t, item1, gotItem) }) t.Run("should be able to remove first", func(t *testing.T) { - removed := pool.Remove(item1.ID()) + removed := pool.Remove(item1.Identifier) assert.True(t, removed) size := pool.Size() assert.EqualValues(t, uint(1), size) @@ -50,7 +51,9 @@ func TestAddRemove(t *testing.T) { t.Run("should be able to retrieve all", func(t *testing.T) { items := pool.All() require.Len(t, items, 1) - assert.Equal(t, item2, items[0]) + val, exists := items[item2.Identifier] + require.True(t, exists) + assert.Equal(t, item2, val) }) }) } @@ -60,26 +63,26 @@ func TestAdjust(t *testing.T) { item2 := unittest.MockEntityFixture() t.Run("should not adjust if not exist", func(t *testing.T) { - pool := stdmap.NewBackend() - _ = pool.Add(item1) + pool := stdmap.NewBackend[flow.Identifier, *unittest.MockEntity]() + _ = pool.Add(item1.Identifier, item1) // item2 doesn't exist - updatedItem, updated := pool.Adjust(item2.ID(), func(old flow.Entity) flow.Entity { + updatedItem, updated := pool.Adjust(item2.Identifier, func(old *unittest.MockEntity) *unittest.MockEntity { return item2 }) assert.False(t, updated) assert.Nil(t, updatedItem) - _, found := pool.ByID(item2.ID()) + _, found := pool.Get(item2.Identifier) assert.False(t, found) }) t.Run("should adjust if exists", func(t *testing.T) { - pool := stdmap.NewBackend() - _ = pool.Add(item1) + pool := stdmap.NewBackend[flow.Identifier, *unittest.MockEntity]() + _ = pool.Add(item1.Identifier, item1) - updatedItem, ok := pool.Adjust(item1.ID(), func(old flow.Entity) flow.Entity { + updatedItem, ok := pool.Adjust(item1.Identifier, func(old *unittest.MockEntity) *unittest.MockEntity { // item 1 exist, got replaced with item2, the value was updated return item2 }) @@ -87,7 +90,7 @@ func TestAdjust(t *testing.T) { assert.True(t, ok) assert.Equal(t, updatedItem, item2) - value2, found := pool.ByID(item2.ID()) + value2, found := pool.Get(item1.Identifier) assert.True(t, found) assert.Equal(t, value2, item2) }) @@ -97,11 +100,11 @@ func TestAdjust(t *testing.T) { func Test_DeduplicationByID(t *testing.T) { item1 := unittest.MockEntityFixture() item2 := unittest.MockEntity{Identifier: item1.Identifier} // duplicate - assert.True(t, item1.ID() == item2.ID()) + assert.True(t, item1.Identifier == item2.Identifier) - pool := stdmap.NewBackend() - pool.Add(item1) - pool.Add(item2) + pool := stdmap.NewBackend[flow.Identifier, *unittest.MockEntity]() + pool.Add(item1.Identifier, item1) + pool.Add(item2.Identifier, item1) assert.Equal(t, uint(1), pool.Size()) } @@ -114,7 +117,7 @@ func TestBackend_RunLimitChecking(t *testing.T) { limit = 150 swarm = 150 ) - pool := stdmap.NewBackend(stdmap.WithLimit(limit)) + pool := stdmap.NewBackend[flow.Identifier, *unittest.MockEntity](stdmap.WithLimit[flow.Identifier, *unittest.MockEntity](limit)) wg := sync.WaitGroup{} wg.Add(swarm) @@ -123,8 +126,8 @@ func TestBackend_RunLimitChecking(t *testing.T) { go func(x int) { // creates and adds a fake item to the mempool item := unittest.MockEntityFixture() - _ = pool.Run(func(backdata mempool.BackData) error { - added := backdata.Add(item.ID(), item) + _ = pool.Run(func(backdata mempool.BackData[flow.Identifier, *unittest.MockEntity]) error { + added := backdata.Add(item.Identifier, item) if !added { return fmt.Errorf("potential race condition on adding to back data") } @@ -149,13 +152,13 @@ func TestBackend_RegisterEjectionCallback(t *testing.T) { limit = 20 swarm = 20 ) - pool := stdmap.NewBackend(stdmap.WithLimit(limit)) + pool := stdmap.NewBackend[flow.Identifier, *unittest.MockEntity](stdmap.WithLimit[flow.Identifier, *unittest.MockEntity](limit)) // on ejection callback: test whether ejected identity is no longer part of the mempool - ensureEntityNotInMempool := func(entity flow.Entity) { - id := entity.ID() + ensureEntityNotInMempool := func(entity *unittest.MockEntity) { + id := entity.Identifier go func() { - e, found := pool.ByID(id) + e, found := pool.Get(id) require.False(t, found) require.Nil(t, e) }() @@ -171,7 +174,7 @@ func TestBackend_RegisterEjectionCallback(t *testing.T) { go func(x int) { // creates and adds a fake item to the mempool item := unittest.MockEntityFixture() - pool.Add(item) + pool.Add(item.Identifier, item) wg.Done() }(i) } @@ -185,7 +188,7 @@ func TestBackend_RegisterEjectionCallback(t *testing.T) { func TestBackend_Multiple_OnEjectionCallbacks(t *testing.T) { // ejection callback counts number of calls calls := uint64(0) - callback := func(entity flow.Entity) { + callback := func(entity *unittest.MockEntity) { atomic.AddUint64(&calls, 1) } @@ -193,7 +196,7 @@ func TestBackend_Multiple_OnEjectionCallbacks(t *testing.T) { const ( limit = 30 ) - pool := stdmap.NewBackend(stdmap.WithLimit(limit)) + pool := stdmap.NewBackend[flow.Identifier, *unittest.MockEntity](stdmap.WithLimit[flow.Identifier, *unittest.MockEntity](limit)) pool.RegisterEjectionCallbacks(callback, callback) t.Run("fill mempool up to limit", func(t *testing.T) { @@ -217,34 +220,108 @@ func TestBackend_Multiple_OnEjectionCallbacks(t *testing.T) { }) } -func addRandomEntities(t *testing.T, backend *stdmap.Backend, num int) { +// TestBackend_AdjustWithInit_Concurrent tests the AdjustWithInit method of the Backend with HeroCache as the backdata. +// It concurrently attempts on adjusting non-existent entities, and verifies that the entities are initialized and adjusted correctly. +func TestBackend_AdjustWithInit_Concurrent_HeroCache(t *testing.T) { + sizeLimit := uint32(100) + backData := herocache.NewCache[*unittest.MockEntity](sizeLimit, + herocache.DefaultOversizeFactor, + heropool.LRUEjection, + unittest.Logger(), + metrics.NewNoopCollector()) + + backend := stdmap.NewBackend(stdmap.WithMutableBackData[flow.Identifier, *unittest.MockEntity](backData)) + entities := unittest.EntityListFixture(100) + adjustDone := sync.WaitGroup{} + for _, e := range entities { + adjustDone.Add(1) + e := e // capture range variable + go func() { + defer adjustDone.Done() + + backend.AdjustWithInit(e.Identifier, func(entity *unittest.MockEntity) *unittest.MockEntity { + // increment nonce of the entity + entity.Nonce++ + return entity + }, func() *unittest.MockEntity { + return e + }) + }() + } + + unittest.RequireReturnsBefore(t, adjustDone.Wait, 1*time.Second, "failed to adjust elements in time") + + for _, e := range entities { + actual, ok := backend.Get(e.Identifier) + require.True(t, ok) + require.Equal(t, e.Identifier, actual.Identifier) + require.Equal(t, uint64(1), actual.Nonce) + } +} + +// TestBackend_AdjustWithInit_Concurrent_MapBased tests the AdjustWithInit method of the Backend with golang map as the backdata. +// It concurrently attempts on adjusting non-existent entities, and verifies that the entities are initialized and adjusted correctly. +func TestBackend_AdjustWithInit_Concurrent_MapBased(t *testing.T) { + sizeLimit := uint(100) + backend := stdmap.NewBackend[flow.Identifier, *unittest.MockEntity](stdmap.WithLimit[flow.Identifier, *unittest.MockEntity](sizeLimit)) + entities := unittest.EntityListFixture(sizeLimit) + + adjustDone := sync.WaitGroup{} + for _, e := range entities { + adjustDone.Add(1) + e := e // capture range variable + go func() { + defer adjustDone.Done() + + backend.AdjustWithInit(e.Identifier, func(entity *unittest.MockEntity) *unittest.MockEntity { + // increment nonce of the entity + entity.Nonce++ + return entity + }, func() *unittest.MockEntity { + return e + }) + }() + } + + unittest.RequireReturnsBefore(t, adjustDone.Wait, 1*time.Second, "failed to adjust elements in time") + + for _, e := range entities { + actual, ok := backend.Get(e.Identifier) + require.True(t, ok) + require.Equal(t, e.Identifier, actual.Identifier) + require.Equal(t, uint64(1), actual.Nonce) + } +} + +func addRandomEntities(t *testing.T, backend *stdmap.Backend[flow.Identifier, *unittest.MockEntity], num int) { + // add swarm-number of items to backend wg := sync.WaitGroup{} wg.Add(num) for ; num > 0; num-- { go func() { - backend.Add(unittest.MockEntityFixture()) // creates and adds a fake item to the mempool - wg.Done() + defer wg.Done() + backend.Add(unittest.IdentifierFixture(), unittest.MockEntityFixture()) // creates and adds a fake item to the mempool }() } unittest.RequireReturnsBefore(t, wg.Wait, 1*time.Second, "failed to add elements in time") } func TestBackend_All(t *testing.T) { - backend := stdmap.NewBackend() + backend := stdmap.NewBackend[flow.Identifier, *unittest.MockEntity]() entities := unittest.EntityListFixture(100) // Add for _, e := range entities { // all entities must be stored successfully - require.True(t, backend.Add(e)) + require.True(t, backend.Add(e.Identifier, e)) } // All all := backend.All() require.Equal(t, len(entities), len(all)) for _, expected := range entities { - actual, ok := backend.ByID(expected.ID()) + actual, ok := backend.Get(expected.Identifier) require.True(t, ok) require.Equal(t, expected, actual) } diff --git a/module/mempool/stdmap/blockbycollections.go b/module/mempool/stdmap/blockbycollections.go deleted file mode 100644 index 3b710ad6488..00000000000 --- a/module/mempool/stdmap/blockbycollections.go +++ /dev/null @@ -1,62 +0,0 @@ -// (c) 2019 Dapper Labs - ALL RIGHTS RESERVED - -package stdmap - -import ( - "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/module/mempool" - "github.com/onflow/flow-go/module/mempool/entity" - _ "github.com/onflow/flow-go/utils/binstat" -) - -// Hold all the missing collections. -// Each entry is a missing collection, and all the blocks that contain -// this collection -type BlockByCollections struct { - *Backend -} - -// BlockByCollectionBackdata contains all the collections is being requested, -// for each collection it stores the blocks that contains the collection. -// the Backdata is essentially map<collectionID>map<blockID>*ExecutableBlock -type BlockByCollectionBackdata struct { - mempool.BackData -} - -func NewBlockByCollections() *BlockByCollections { - return &BlockByCollections{NewBackend(WithEject(EjectPanic))} -} - -func (b *BlockByCollections) Add(block *entity.BlocksByCollection) bool { - return b.Backend.Add(block) -} - -func (b *BlockByCollections) Get(collID flow.Identifier) (*entity.BlocksByCollection, bool) { - backdata := &BlockByCollectionBackdata{b.backData} - return backdata.ByID(collID) -} - -func (b *BlockByCollections) Run(f func(backdata *BlockByCollectionBackdata) error) error { - //bs1 := binstat.EnterTime(binstat.BinStdmap + ".w_lock.(BlockByCollections).Run") - b.Lock() - //binstat.Leave(bs1) - - //bs2 := binstat.EnterTime(binstat.BinStdmap + ".inlock.(BlockByCollections).Run)") - defer b.Unlock() - err := f(&BlockByCollectionBackdata{b.backData}) - //binstat.Leave(bs2) - - if err != nil { - return err - } - return nil -} - -func (b *BlockByCollectionBackdata) ByID(id flow.Identifier) (*entity.BlocksByCollection, bool) { - e, exists := b.BackData.ByID(id) - if !exists { - return nil, false - } - block := e.(*entity.BlocksByCollection) - return block, true -} diff --git a/module/mempool/stdmap/blocks.go b/module/mempool/stdmap/blocks.go deleted file mode 100644 index cb48877a861..00000000000 --- a/module/mempool/stdmap/blocks.go +++ /dev/null @@ -1,46 +0,0 @@ -// (c) 2019 Dapper Labs - ALL RIGHTS RESERVED - -package stdmap - -import ( - "github.com/onflow/flow-go/model/flow" -) - -// Blocks implements the blocks memory pool. -type Blocks struct { - *Backend -} - -// NewBlocks creates a new memory pool for blocks. -func NewBlocks(limit uint) (*Blocks, error) { - a := &Blocks{ - Backend: NewBackend(WithLimit(limit)), - } - - return a, nil -} - -// Add adds an block to the mempool. -func (a *Blocks) Add(block *flow.Block) bool { - return a.Backend.Add(block) -} - -// ByID returns the block with the given ID from the mempool. -func (a *Blocks) ByID(blockID flow.Identifier) (*flow.Block, bool) { - entity, exists := a.Backend.ByID(blockID) - if !exists { - return nil, false - } - block := entity.(*flow.Block) - return block, true -} - -// All returns all blocks from the pool. -func (a *Blocks) All() []*flow.Block { - entities := a.Backend.All() - blocks := make([]*flow.Block, 0, len(entities)) - for _, entity := range entities { - blocks = append(blocks, entity.(*flow.Block)) - } - return blocks -} diff --git a/module/mempool/stdmap/chunk_data_packs.go b/module/mempool/stdmap/chunk_data_packs.go deleted file mode 100644 index 2a2ba5753e8..00000000000 --- a/module/mempool/stdmap/chunk_data_packs.go +++ /dev/null @@ -1,62 +0,0 @@ -// (c) 2019 Dapper Labs - ALL RIGHTS RESERVED -package stdmap - -import ( - "github.com/onflow/flow-go/model/flow" -) - -// ChunkDataPacks implements the ChunkDataPack memory pool. -type ChunkDataPacks struct { - *Backend -} - -// NewChunkDataPacks creates a new memory pool for ChunkDataPacks. -func NewChunkDataPacks(limit uint) (*ChunkDataPacks, error) { - a := &ChunkDataPacks{ - Backend: NewBackend(WithLimit(limit)), - } - return a, nil -} - -// Has checks whether the ChunkDataPack with the given chunkID is currently in -// the memory pool. -func (c *ChunkDataPacks) Has(chunkID flow.Identifier) bool { - return c.Backend.Has(chunkID) -} - -// Add adds an chunkDataPack to the mempool. -func (c *ChunkDataPacks) Add(cdp *flow.ChunkDataPack) bool { - added := c.Backend.Add(cdp) - return added -} - -// Remove will remove chunk data pack by ID -func (c *ChunkDataPacks) Remove(chunkID flow.Identifier) bool { - removed := c.Backend.Remove(chunkID) - return removed -} - -// ByChunkID returns the chunk data pack with the given chunkID from the mempool. -func (c *ChunkDataPacks) ByChunkID(chunkID flow.Identifier) (*flow.ChunkDataPack, bool) { - entity, exists := c.Backend.ByID(chunkID) - if !exists { - return nil, false - } - chunkDataPack := entity.(*flow.ChunkDataPack) - return chunkDataPack, true -} - -// Size will return the current size of the memory pool. -func (c *ChunkDataPacks) Size() uint { - return c.Backend.Size() -} - -// All returns all chunk data packs from the pool. -func (c *ChunkDataPacks) All() []*flow.ChunkDataPack { - entities := c.Backend.All() - chunkDataPack := make([]*flow.ChunkDataPack, 0, len(entities)) - for _, entity := range entities { - chunkDataPack = append(chunkDataPack, entity.(*flow.ChunkDataPack)) - } - return chunkDataPack -} diff --git a/module/mempool/stdmap/chunk_requests.go b/module/mempool/stdmap/chunk_requests.go index 28a37d36290..d5475e5c808 100644 --- a/module/mempool/stdmap/chunk_requests.go +++ b/module/mempool/stdmap/chunk_requests.go @@ -15,22 +15,13 @@ import ( // In this implementation, the ChunkRequests // wraps the ChunkDataPackRequests around an internal ChunkRequestStatus data object, and maintains the wrapped // version in memory. +// Stored chunk request status are keyed by chunk ID. type ChunkRequests struct { - *Backend + *Backend[flow.Identifier, *chunkRequestStatus] } func NewChunkRequests(limit uint) *ChunkRequests { - return &ChunkRequests{ - Backend: NewBackend(WithLimit(limit)), - } -} - -func toChunkRequestStatus(entity flow.Entity) *chunkRequestStatus { - status, ok := entity.(*chunkRequestStatus) - if !ok { - panic(fmt.Sprintf("could not convert the entity into chunk status from the mempool: %v", entity)) - } - return status + return &ChunkRequests{NewBackend(WithLimit[flow.Identifier, *chunkRequestStatus](limit))} } // RequestHistory returns the number of times the chunk has been requested, @@ -44,16 +35,15 @@ func (cs *ChunkRequests) RequestHistory(chunkID flow.Identifier) (uint64, time.T var retryAfter time.Duration var attempts uint64 - err := cs.Backend.Run(func(backdata mempool.BackData) error { - entity, ok := backdata.ByID(chunkID) + err := cs.Backend.Run(func(backdata mempool.BackData[flow.Identifier, *chunkRequestStatus]) error { + status, ok := backdata.Get(chunkID) if !ok { return fmt.Errorf("request does not exist for chunk %x", chunkID) } - request := toChunkRequestStatus(entity) - lastAttempt = request.LastAttempt - retryAfter = request.RetryAfter - attempts = request.Attempt + lastAttempt = status.LastAttempt + retryAfter = status.RetryAfter + attempts = status.Attempt return nil }) @@ -64,8 +54,8 @@ func (cs *ChunkRequests) RequestHistory(chunkID flow.Identifier) (uint64, time.T // The insertion is only successful if there is no duplicate chunk request for the same // tuple of (chunkID, resultID, chunkIndex). func (cs *ChunkRequests) Add(request *verification.ChunkDataPackRequest) bool { - err := cs.Backend.Run(func(backdata mempool.BackData) error { - entity, exists := backdata.ByID(request.ChunkID) + err := cs.Backend.Run(func(backdata mempool.BackData[flow.Identifier, *chunkRequestStatus]) error { + status, exists := backdata.Get(request.ChunkID) chunkLocatorID := request.Locator.ID() if !exists { @@ -73,7 +63,7 @@ func (cs *ChunkRequests) Add(request *verification.ChunkDataPackRequest) bool { locators[chunkLocatorID] = &request.Locator // no chunk request status exists for this chunk ID, hence initiating one. - status := &chunkRequestStatus{ + status = &chunkRequestStatus{ Locators: locators, RequestInfo: request.ChunkDataPackRequestInfo, } @@ -84,7 +74,6 @@ func (cs *ChunkRequests) Add(request *verification.ChunkDataPackRequest) bool { return nil } - status := toChunkRequestStatus(entity) if _, ok := status.Locators[chunkLocatorID]; ok { return fmt.Errorf("chunk request exists with same locator (result_id=%x, chunk_index=%d)", request.Locator.ResultID, request.Locator.Index) } @@ -101,13 +90,6 @@ func (cs *ChunkRequests) Add(request *verification.ChunkDataPackRequest) bool { return err == nil } -// Remove provides deletion functionality from the memory pool. -// If there is a chunk request with this ID, Remove removes it and returns true. -// Otherwise it returns false. -func (cs *ChunkRequests) Remove(chunkID flow.Identifier) bool { - return cs.Backend.Remove(chunkID) -} - // PopAll atomically returns all locators associated with this chunk ID while clearing out the // chunk request status for this chunk id. // Boolean return value indicates whether there are requests in the memory pool associated @@ -115,12 +97,12 @@ func (cs *ChunkRequests) Remove(chunkID flow.Identifier) bool { func (cs *ChunkRequests) PopAll(chunkID flow.Identifier) (chunks.LocatorMap, bool) { var locators map[flow.Identifier]*chunks.Locator - err := cs.Backend.Run(func(backdata mempool.BackData) error { - entity, exists := backdata.ByID(chunkID) + err := cs.Backend.Run(func(backdata mempool.BackData[flow.Identifier, *chunkRequestStatus]) error { + status, exists := backdata.Get(chunkID) if !exists { return fmt.Errorf("not exist") } - locators = toChunkRequestStatus(entity).Locators + locators = status.Locators _, removed := backdata.Remove(chunkID) if !removed { @@ -143,14 +125,13 @@ func (cs *ChunkRequests) PopAll(chunkID flow.Identifier) (chunks.LocatorMap, boo // // The increments are done atomically, thread-safe, and in isolation. func (cs *ChunkRequests) IncrementAttempt(chunkID flow.Identifier) bool { - err := cs.Backend.Run(func(backdata mempool.BackData) error { - entity, exists := backdata.ByID(chunkID) + err := cs.Backend.Run(func(backdata mempool.BackData[flow.Identifier, *chunkRequestStatus]) error { + status, exists := backdata.Get(chunkID) if !exists { return fmt.Errorf("not exist") } - chunk := toChunkRequestStatus(entity) - chunk.Attempt++ - chunk.LastAttempt = time.Now() + status.Attempt++ + status.LastAttempt = time.Now() return nil }) @@ -161,8 +142,8 @@ func (cs *ChunkRequests) IncrementAttempt(chunkID flow.Identifier) bool { func (cs *ChunkRequests) All() verification.ChunkDataPackRequestInfoList { all := cs.Backend.All() requestInfoList := verification.ChunkDataPackRequestInfoList{} - for _, entity := range all { - requestInfo := toChunkRequestStatus(entity).RequestInfo + for _, status := range all { + requestInfo := status.RequestInfo requestInfoList = append(requestInfoList, &requestInfo) } return requestInfoList @@ -180,12 +161,11 @@ func (cs *ChunkRequests) UpdateRequestHistory(chunkID flow.Identifier, updater m var retryAfter time.Duration var attempts uint64 - err := cs.Backend.Run(func(backdata mempool.BackData) error { - entity, exists := backdata.ByID(chunkID) + err := cs.Backend.Run(func(backdata mempool.BackData[flow.Identifier, *chunkRequestStatus]) error { + status, exists := backdata.Get(chunkID) if !exists { return fmt.Errorf("not exist") } - status := toChunkRequestStatus(entity) var ok bool attempts, retryAfter, ok = updater(status.Attempt, status.RetryAfter) @@ -205,11 +185,6 @@ func (cs *ChunkRequests) UpdateRequestHistory(chunkID flow.Identifier, updater m return attempts, lastAttempt, retryAfter, err == nil } -// Size returns total number of chunk requests in the memory pool. -func (cs ChunkRequests) Size() uint { - return cs.Backend.Size() -} - // chunkRequestStatus is an internal data type for ChunkRequests mempool. It acts as a wrapper for ChunkDataRequests, maintaining // some auxiliary attributes that are internal to ChunkRequests. type chunkRequestStatus struct { @@ -219,11 +194,3 @@ type chunkRequestStatus struct { RetryAfter time.Duration // interval until request should be retried. Attempt uint64 // number of times this chunk request has been dispatched in the network. } - -func (c chunkRequestStatus) ID() flow.Identifier { - return c.RequestInfo.ChunkID -} - -func (c chunkRequestStatus) Checksum() flow.Identifier { - return c.RequestInfo.ChunkID -} diff --git a/module/mempool/stdmap/chunk_requests_test.go b/module/mempool/stdmap/chunk_requests_test.go index 82d8a0eeae9..9143d1ad76f 100644 --- a/module/mempool/stdmap/chunk_requests_test.go +++ b/module/mempool/stdmap/chunk_requests_test.go @@ -9,7 +9,6 @@ import ( "github.com/stretchr/testify/require" "github.com/onflow/flow-go/engine/verification/requester" - "github.com/onflow/flow-go/model/chunks" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/model/verification" "github.com/onflow/flow-go/module/mempool" @@ -204,10 +203,7 @@ func TestAddingDuplicateChunkIDs(t *testing.T) { // adding another request for the same tuple of (chunkID, resultID, chunkIndex) // is deduplicated. require.False(t, requests.Add(&verification.ChunkDataPackRequest{ - Locator: chunks.Locator{ - ResultID: thisReq.ResultID, - Index: thisReq.Index, - }, + Locator: *unittest.ChunkLocatorFixture(thisReq.ResultID, thisReq.Index), ChunkDataPackRequestInfo: verification.ChunkDataPackRequestInfo{ ChunkID: thisReq.ChunkID, }, @@ -215,10 +211,7 @@ func TestAddingDuplicateChunkIDs(t *testing.T) { // adding another request for the same chunk ID but different result ID is stored. otherReq := &verification.ChunkDataPackRequest{ - Locator: chunks.Locator{ - ResultID: unittest.IdentifierFixture(), - Index: thisReq.Index, - }, + Locator: *unittest.ChunkLocatorFixture(unittest.IdentifierFixture(), thisReq.Index), ChunkDataPackRequestInfo: verification.ChunkDataPackRequestInfo{ ChunkID: thisReq.ChunkID, Agrees: unittest.IdentifierListFixture(2), @@ -239,8 +232,8 @@ func TestAddingDuplicateChunkIDs(t *testing.T) { require.ElementsMatch(t, thisReq.Agrees.Union(otherReq.Agrees), reqInfoList[0].Agrees) require.ElementsMatch(t, thisReq.Disagrees.Union(otherReq.Disagrees), reqInfoList[0].Disagrees) - var thisTargets flow.IdentifierList = thisReq.Targets.NodeIDs() - var otherTargets flow.IdentifierList = otherReq.Targets.NodeIDs() + thisTargets := thisReq.Targets.NodeIDs() + otherTargets := otherReq.Targets.NodeIDs() require.ElementsMatch(t, thisTargets.Union(otherTargets), reqInfoList[0].Targets.NodeIDs()) locators, ok := requests.PopAll(thisReq.ChunkID) diff --git a/module/mempool/stdmap/chunk_statuses.go b/module/mempool/stdmap/chunk_statuses.go index 25d2cf38930..6c5b1c31166 100644 --- a/module/mempool/stdmap/chunk_statuses.go +++ b/module/mempool/stdmap/chunk_statuses.go @@ -1,102 +1,16 @@ package stdmap import ( - "fmt" - - "github.com/onflow/flow-go/model/chunks" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/model/verification" ) // ChunkStatuses is an implementation of in-memory storage for maintaining the chunk status data objects. +// Stored chunk statuses are keyed by chunks.Locator ID. type ChunkStatuses struct { - *Backend + *Backend[flow.Identifier, *verification.ChunkStatus] } func NewChunkStatuses(limit uint) *ChunkStatuses { - return &ChunkStatuses{ - Backend: NewBackend(WithLimit(limit)), - } -} - -func chunkStatus(entity flow.Entity) *verification.ChunkStatus { - status, ok := entity.(inMemChunkStatus) - if !ok { - panic(fmt.Sprintf("could not convert the entity into chunk status from the mempool: %v", entity)) - } - return &verification.ChunkStatus{ - ChunkIndex: status.ChunkIndex, - ExecutionResult: status.ExecutionResult, - BlockHeight: status.BlockHeight, - } -} - -// Get returns a chunk status by its chunk index and result ID. -// There is a one-to-one correspondence between the chunk statuses in memory, and -// their pair of chunk index and result id. -func (cs ChunkStatuses) Get(chunkIndex uint64, resultID flow.Identifier) (*verification.ChunkStatus, bool) { - entity, exists := cs.Backend.ByID(chunks.ChunkLocatorID(resultID, chunkIndex)) - if !exists { - return nil, false - } - - status := chunkStatus(entity) - return status, true -} - -// Add provides insertion functionality into the memory pool. -// The insertion is only successful if there is no duplicate status with the same -// chunk ID in the memory. Otherwise, it aborts the insertion and returns false. -func (cs *ChunkStatuses) Add(status *verification.ChunkStatus) bool { - return cs.Backend.Add(inMemChunkStatus{ - ChunkIndex: status.ChunkIndex, - ExecutionResult: status.ExecutionResult, - BlockHeight: status.BlockHeight, - }) -} - -// Remove provides deletion functionality from the memory pool based on the pair of -// chunk index and result id. -// If there is a chunk status associated with this pair, Remove removes it and returns true. -// Otherwise, it returns false. -func (cs *ChunkStatuses) Remove(chunkIndex uint64, resultID flow.Identifier) bool { - return cs.Backend.Remove(chunks.ChunkLocatorID(resultID, chunkIndex)) -} - -// All returns all chunk statuses stored in this memory pool. -func (cs ChunkStatuses) All() []*verification.ChunkStatus { - all := cs.Backend.All() - statuses := make([]*verification.ChunkStatus, 0, len(all)) - for _, entity := range all { - chunk := chunkStatus(entity) - statuses = append(statuses, chunk) - } - return statuses -} - -// Size returns total number of chunk statuses in the memory pool. -func (cs ChunkStatuses) Size() uint { - return cs.Backend.Size() -} - -// inMemChunkStatus is an internal type for storing ChunkStatus in the mempool. -// -// It is the same as ChunkStatus, but additionally it implements an Entity type which -// makes it storable in the mempool. -// Note that as an entity, the ID of a inMemChunkStatus is computed as the ID of the chunk locator -// it represents. However, the usage of ID method is only confined to maintaining it on the mempool. -// That is the motivation behind making it an internal type to make sure that no further decision out of -// this package is taken based on ID of inMemChunkStatus. -type inMemChunkStatus struct { - ChunkIndex uint64 - BlockHeight uint64 - ExecutionResult *flow.ExecutionResult -} - -func (s inMemChunkStatus) ID() flow.Identifier { - return chunks.ChunkLocatorID(s.ExecutionResult.ID(), s.ChunkIndex) -} - -func (s inMemChunkStatus) Checksum() flow.Identifier { - return chunks.ChunkLocatorID(s.ExecutionResult.ID(), s.ChunkIndex) + return &ChunkStatuses{NewBackend(WithLimit[flow.Identifier, *verification.ChunkStatus](limit))} } diff --git a/module/mempool/stdmap/collections.go b/module/mempool/stdmap/collections.go deleted file mode 100644 index 51b91739191..00000000000 --- a/module/mempool/stdmap/collections.go +++ /dev/null @@ -1,52 +0,0 @@ -// (c) 2019 Dapper Labs - ALL RIGHTS RESERVED - -package stdmap - -import ( - "github.com/onflow/flow-go/model/flow" -) - -// Collections implements a mempool storing collections. -type Collections struct { - *Backend -} - -// NewCollections creates a new memory pool for collection. -func NewCollections(limit uint) (*Collections, error) { - c := &Collections{ - Backend: NewBackend(WithLimit(limit)), - } - return c, nil -} - -// Add adds a collection to the mempool. -func (c *Collections) Add(coll *flow.Collection) bool { - added := c.Backend.Add(coll) - return added -} - -// Remove removes a collection by ID from memory -func (c *Collections) Remove(collID flow.Identifier) bool { - ok := c.Backend.Remove(collID) - return ok -} - -// ByID returns the collection with the given ID from the mempool. -func (c *Collections) ByID(collID flow.Identifier) (*flow.Collection, bool) { - entity, exists := c.Backend.ByID(collID) - if !exists { - return nil, false - } - coll := entity.(*flow.Collection) - return coll, true -} - -// All returns all collections from the mempool. -func (c *Collections) All() []*flow.Collection { - entities := c.Backend.All() - colls := make([]*flow.Collection, 0, len(entities)) - for _, entity := range entities { - colls = append(colls, entity.(*flow.Collection)) - } - return colls -} diff --git a/module/mempool/stdmap/eject.go b/module/mempool/stdmap/eject.go index 3ed2d59683a..fa6c2dfcc0c 100644 --- a/module/mempool/stdmap/eject.go +++ b/module/mempool/stdmap/eject.go @@ -1,14 +1,12 @@ -// (c) 2019 Dapper Labs - ALL RIGHTS RESERVED - package stdmap import ( + "fmt" "math" - "math/rand" "sort" "sync" - "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/utils/rand" ) // this is the threshold for how much over the guaranteed capacity the @@ -31,49 +29,33 @@ const overCapacityThreshold = 128 // concurrency (specifically, it locks the mempool during ejection). // - The implementation should be non-blocking (though, it is allowed to // take a bit of time; the mempool will just be locked during this time). -type BatchEjectFunc func(b *Backend) bool -type EjectFunc func(b *Backend) (flow.Identifier, flow.Entity, bool) - -// EjectTrueRandom relies on a random generator to pick a random entity to eject from the -// entity set. It will, on average, iterate through half the entities of the set. However, -// it provides us with a truly evenly distributed random selection. -func EjectTrueRandom(b *Backend) (flow.Identifier, flow.Entity, bool) { - var entity flow.Entity - var entityID flow.Identifier - - bFound := false - i := 0 - n := rand.Intn(int(b.backData.Size())) - for entityID, entity = range b.backData.All() { - if i == n { - bFound = true - break - } - i++ - } - return entityID, entity, bFound -} +type BatchEjectFunc[K comparable, V any] func(b *Backend[K, V]) (bool, error) +type EjectFunc[K comparable, V any] func(b *Backend[K, V]) (K, V, bool) -// EjectTrueRandomFast checks if the map size is beyond the +// EjectRandomFast checks if the map size is beyond the // threshold size, and will iterate through them and eject unneeded // entries if that is the case. Return values are unused -func EjectTrueRandomFast(b *Backend) bool { - currentSize := b.backData.Size() +func EjectRandomFast[K comparable, V any](b *Backend[K, V]) (bool, error) { + currentSize := b.mutableBackData.Size() if b.guaranteedCapacity >= currentSize { - return false + return false, nil } // At this point, we know that currentSize > b.guaranteedCapacity. As // currentSize fits into an int, b.guaranteedCapacity must also fit. overcapacity := currentSize - b.guaranteedCapacity if overcapacity <= overCapacityThreshold { - return false + return false, nil } // Randomly select indices of elements to remove: mapIndices := make([]int, 0, overcapacity) for i := overcapacity; i > 0; i-- { - mapIndices = append(mapIndices, rand.Intn(int(currentSize))) + rand, err := rand.Uintn(currentSize) + if err != nil { + return false, fmt.Errorf("random generation failed: %w", err) + } + mapIndices = append(mapIndices, int(rand)) } sort.Ints(mapIndices) // inplace @@ -83,11 +65,11 @@ func EjectTrueRandomFast(b *Backend) bool { idx := 0 // index into mapIndices next2Remove := mapIndices[0] // index of the element to be removed next i := 0 // index into the entities map - for entityID, entity := range b.backData.All() { + for key, value := range b.mutableBackData.All() { if i == next2Remove { - b.backData.Remove(entityID) // remove entity + b.mutableBackData.Remove(key) // remove entity for _, callback := range b.ejectionCallbacks { - callback(entity) // notify callback + callback(value) // notify callback } idx++ @@ -99,42 +81,42 @@ func EjectTrueRandomFast(b *Backend) bool { } if idx == int(overcapacity) { - return true + return true, nil } next2Remove = mapIndices[idx] } i++ } - return true + return true, nil } // EjectPanic simply panics, crashing the program. Useful when cache is not expected // to grow beyond certain limits, but ejecting is not applicable -func EjectPanic(b *Backend) (flow.Identifier, flow.Entity, bool) { +func EjectPanic[K comparable, V any](b *Backend[K, V]) (K, V, bool) { panic("unexpected: mempool size over the limit") } // LRUEjector provides a swift FIFO ejection functionality -type LRUEjector struct { +type LRUEjector[K comparable] struct { sync.Mutex - table map[flow.Identifier]uint64 // keeps sequence number of entities it tracks - seqNum uint64 // keeps the most recent sequence number + table map[K]uint64 // keeps sequence number of values it tracks + seqNum uint64 // keeps the most recent sequence number } -func NewLRUEjector() *LRUEjector { - return &LRUEjector{ - table: make(map[flow.Identifier]uint64), +func NewLRUEjector[K comparable]() *LRUEjector[K] { + return &LRUEjector[K]{ + table: make(map[K]uint64), seqNum: 0, } } // Track should be called every time a new entity is added to the mempool. // It tracks the entity for later ejection. -func (q *LRUEjector) Track(entityID flow.Identifier) { +func (q *LRUEjector[K]) Track(key K) { q.Lock() defer q.Unlock() - if _, ok := q.table[entityID]; ok { + if _, ok := q.table[key]; ok { // skips adding duplicate item return } @@ -144,46 +126,38 @@ func (q *LRUEjector) Track(entityID flow.Identifier) { // With proper resource cleanups by the mempools, the Eject is supposed // as a very infrequent operation. However, further optimizations on // Eject efficiency is needed. - q.table[entityID] = q.seqNum + q.table[key] = q.seqNum q.seqNum++ } // Untrack simply removes the tracker of the ejector off the entityID -func (q *LRUEjector) Untrack(entityID flow.Identifier) { +func (q *LRUEjector[K]) Untrack(key K) { q.Lock() defer q.Unlock() - delete(q.table, entityID) + delete(q.table, key) } -// Eject implements EjectFunc for LRUEjector. It finds the entity with the lowest sequence number (i.e., -// the oldest entity). It also untracks. This is using a linear search -func (q *LRUEjector) Eject(b *Backend) (flow.Identifier, flow.Entity, bool) { +// Eject implements EjectFunc for LRUEjector. It finds the value with the lowest sequence number (i.e., +// the oldest entity). It also untracks. This is using a linear search. +func Eject[K comparable, V any](q *LRUEjector[K], b *Backend[K, V]) K { q.Lock() defer q.Unlock() // finds the oldest entity oldestSQ := uint64(math.MaxUint64) - var oldestID flow.Identifier - for _, id := range b.backData.Identifiers() { + var oldestID K + for _, id := range b.mutableBackData.Keys() { if sq, ok := q.table[id]; ok { if sq < oldestSQ { oldestID = id oldestSQ = sq } - } } - // TODO: don't do a lookup if it isn't necessary - oldestEntity, ok := b.backData.ByID(oldestID) - - if !ok { - oldestID, oldestEntity, ok = EjectTrueRandom(b) - } - // untracks the oldest id as it is supposed to be ejected delete(q.table, oldestID) - return oldestID, oldestEntity, ok + return oldestID } diff --git a/module/mempool/stdmap/eject_test.go b/module/mempool/stdmap/eject_test.go index cee1974e840..a9ddefb7901 100644 --- a/module/mempool/stdmap/eject_test.go +++ b/module/mempool/stdmap/eject_test.go @@ -13,7 +13,7 @@ import ( // TestLRUEjector_Track evaluates that tracking a new item adds the item to the ejector table. func TestLRUEjector_Track(t *testing.T) { - ejector := NewLRUEjector() + ejector := NewLRUEjector[flow.Identifier]() // ejector's table should be empty assert.Len(t, ejector.table, 0) @@ -39,7 +39,7 @@ func TestLRUEjector_Track(t *testing.T) { // TestLRUEjector_Track_Duplicate evaluates that tracking a duplicate item // does not change the internal state of the ejector. func TestLRUEjector_Track_Duplicate(t *testing.T) { - ejector := NewLRUEjector() + ejector := NewLRUEjector[flow.Identifier]() // creates adds an item to the ejector item := flow.Identifier{0x00} @@ -70,7 +70,7 @@ func TestLRUEjector_Track_Duplicate(t *testing.T) { // changes the state of ejector properly, i.e., items reside on the // memory, and sequence number changed accordingly. func TestLRUEjector_Track_Many(t *testing.T) { - ejector := NewLRUEjector() + ejector := NewLRUEjector[flow.Identifier]() // creates and tracks 100 items size := 100 @@ -98,7 +98,7 @@ func TestLRUEjector_Track_Many(t *testing.T) { // TestLRUEjector_Untrack_One evaluates that untracking an existing item // removes it from the ejector state and changes the state accordingly. func TestLRUEjector_Untrack_One(t *testing.T) { - ejector := NewLRUEjector() + ejector := NewLRUEjector[flow.Identifier]() // creates adds an item to the ejector item := flow.Identifier{0x00} @@ -132,7 +132,7 @@ func TestLRUEjector_Untrack_One(t *testing.T) { // TestLRUEjector_Untrack_Duplicate evaluates that untracking an item twice // removes it from the ejector state only once and changes the state safely. func TestLRUEjector_Untrack_Duplicate(t *testing.T) { - ejector := NewLRUEjector() + ejector := NewLRUEjector[flow.Identifier]() // creates and adds two items to the ejector item1 := flow.Identifier{0x00} @@ -175,19 +175,19 @@ func TestLRUEjector_Untrack_Duplicate(t *testing.T) { // TestLRUEjector_UntrackEject evaluates that untracking the next ejectable item // properly changes the next ejectable item in the ejector. func TestLRUEjector_UntrackEject(t *testing.T) { - ejector := NewLRUEjector() + ejector := NewLRUEjector[flow.Identifier]() // creates and tracks 100 items size := 100 - backEnd := NewBackend() + backEnd := NewBackend[flow.Identifier, *unittest.MockEntity]() - items := make([]flow.Identifier, size) + items := make(flow.IdentifierList, size) for i := 0; i < size; i++ { mockEntity := unittest.MockEntityFixture() - require.True(t, backEnd.Add(mockEntity)) + require.True(t, backEnd.Add(mockEntity.Identifier, mockEntity)) - id := mockEntity.ID() + id := mockEntity.Identifier ejector.Track(id) items[i] = id } @@ -195,27 +195,27 @@ func TestLRUEjector_UntrackEject(t *testing.T) { // untracks the oldest item ejector.Untrack(items[0]) - // next ejectable item should be the second oldest item - id, _, _ := ejector.Eject(backEnd) + // next ejectable item should be the second-oldest item + id := Eject(ejector, backEnd) assert.Equal(t, id, items[1]) } // TestLRUEjector_EjectAll adds many item to the ejector and then ejects them // all one by one and evaluates an LRU ejection behavior. func TestLRUEjector_EjectAll(t *testing.T) { - ejector := NewLRUEjector() + ejector := NewLRUEjector[flow.Identifier]() // creates and tracks 100 items size := 100 - backEnd := NewBackend() + backEnd := NewBackend[flow.Identifier, *unittest.MockEntity]() - items := make([]flow.Identifier, size) + items := make(flow.IdentifierList, size) for i := 0; i < size; i++ { mockEntity := unittest.MockEntityFixture() - require.True(t, backEnd.Add(mockEntity)) + require.True(t, backEnd.Add(mockEntity.Identifier, mockEntity)) - id := mockEntity.ID() + id := mockEntity.Identifier ejector.Track(id) items[i] = id } @@ -224,7 +224,7 @@ func TestLRUEjector_EjectAll(t *testing.T) { // ejects one by one for i := 0; i < size; i++ { - id, _, _ := ejector.Eject(backEnd) + id := Eject(ejector, backEnd) require.Equal(t, id, items[i]) } } diff --git a/module/mempool/stdmap/guarantees.go b/module/mempool/stdmap/guarantees.go index f8e2dbefb8d..160ed354565 100644 --- a/module/mempool/stdmap/guarantees.go +++ b/module/mempool/stdmap/guarantees.go @@ -1,5 +1,3 @@ -// (c) 2019 Dapper Labs - ALL RIGHTS RESERVED - package stdmap import ( @@ -8,40 +6,12 @@ import ( // Guarantees implements the collections memory pool of the consensus nodes, // used to store collection guarantees and to generate block payloads. +// Stored Collection Guarantees are keyed by collection ID. type Guarantees struct { - *Backend + *Backend[flow.Identifier, *flow.CollectionGuarantee] } // NewGuarantees creates a new memory pool for collection guarantees. -func NewGuarantees(limit uint) (*Guarantees, error) { - g := &Guarantees{ - Backend: NewBackend(WithLimit(limit)), - } - - return g, nil -} - -// Add adds a collection guarantee guarantee to the mempool. -func (g *Guarantees) Add(guarantee *flow.CollectionGuarantee) bool { - return g.Backend.Add(guarantee) -} - -// ByID returns the collection guarantee with the given ID from the mempool. -func (g *Guarantees) ByID(collID flow.Identifier) (*flow.CollectionGuarantee, bool) { - entity, exists := g.Backend.ByID(collID) - if !exists { - return nil, false - } - guarantee := entity.(*flow.CollectionGuarantee) - return guarantee, true -} - -// All returns all collection guarantees from the mempool. -func (g *Guarantees) All() []*flow.CollectionGuarantee { - entities := g.Backend.All() - guarantees := make([]*flow.CollectionGuarantee, 0, len(entities)) - for _, entity := range entities { - guarantees = append(guarantees, entity.(*flow.CollectionGuarantee)) - } - return guarantees +func NewGuarantees(limit uint) *Guarantees { + return &Guarantees{NewBackend(WithLimit[flow.Identifier, *flow.CollectionGuarantee](limit))} } diff --git a/module/mempool/stdmap/guarantees_test.go b/module/mempool/stdmap/guarantees_test.go index 7bc356dd21b..a86f57f903a 100644 --- a/module/mempool/stdmap/guarantees_test.go +++ b/module/mempool/stdmap/guarantees_test.go @@ -1,5 +1,3 @@ -// (c) 2019 Dapper Labs - ALL RIGHTS RESERVED - package stdmap_test import ( @@ -20,16 +18,15 @@ func TestGuaranteePool(t *testing.T) { CollectionID: flow.Identifier{0x02}, } - pool, err := stdmap.NewGuarantees(1000) - require.NoError(t, err) + pool := stdmap.NewGuarantees(1000) t.Run("should be able to add first", func(t *testing.T) { - added := pool.Add(item1) + added := pool.Add(item1.ID(), item1) assert.True(t, added) }) t.Run("should be able to add second", func(t *testing.T) { - added := pool.Add(item2) + added := pool.Add(item2.ID(), item2) assert.True(t, added) }) @@ -39,7 +36,7 @@ func TestGuaranteePool(t *testing.T) { }) t.Run("should be able to get first", func(t *testing.T) { - got, exists := pool.ByID(item1.ID()) + got, exists := pool.Get(item1.ID()) assert.True(t, exists) assert.Equal(t, item1, got) }) @@ -52,6 +49,9 @@ func TestGuaranteePool(t *testing.T) { t.Run("should be able to retrieve all", func(t *testing.T) { items := pool.All() assert.Len(t, items, 1) - assert.Equal(t, item1, items[0]) + val, exists := items[item1.ID()] + require.True(t, exists) + assert.Equal(t, item1, val) + }) } diff --git a/module/mempool/stdmap/identifier_map.go b/module/mempool/stdmap/identifier_map.go index 4bb4c6f0716..35adf0df63f 100644 --- a/module/mempool/stdmap/identifier_map.go +++ b/module/mempool/stdmap/identifier_map.go @@ -5,83 +5,48 @@ import ( "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/module/mempool" - "github.com/onflow/flow-go/module/mempool/model" ) -// IdentifierMap represents a concurrency-safe memory pool for IdMapEntity. +// IdentifierMap represents a concurrency-safe memory pool for sets of Identifier (keyed by some Identifier). type IdentifierMap struct { - *Backend + *Backend[flow.Identifier, map[flow.Identifier]struct{}] } -// NewIdentifierMap creates a new memory pool for IdMapEntity. -func NewIdentifierMap(limit uint) (*IdentifierMap, error) { - i := &IdentifierMap{ - Backend: NewBackend(WithLimit(limit)), - } - return i, nil +// NewIdentifierMap creates a new memory pool for sets of Identifier (keyed by some Identifier). +func NewIdentifierMap(limit uint) *IdentifierMap { + return &IdentifierMap{NewBackend(WithLimit[flow.Identifier, map[flow.Identifier]struct{}](limit))} } -// Append will append the id to the list of identifiers associated with key. -func (i *IdentifierMap) Append(key, id flow.Identifier) error { - return i.Backend.Run(func(backdata mempool.BackData) error { - var ids map[flow.Identifier]struct{} - entity, ok := backdata.ByID(key) - if !ok { - // no record with key is available in the mempool, - // initializes ids. - ids = make(map[flow.Identifier]struct{}) - } else { - idMapEntity, ok := entity.(model.IdMapEntity) - if !ok { - return fmt.Errorf("could not assert entity to IdMapEntity") - } - - ids = idMapEntity.IDs - if _, ok := ids[id]; ok { - // id is already associated with the key - // no need to append - return nil - } - - // removes map entry associated with key for update - if _, removed := backdata.Remove(key); !removed { - return fmt.Errorf("potential race condition on removing from identifier map") - } - } - - // appends id to the ids list - ids[id] = struct{}{} - - // adds the new ids list associated with key to mempool - idMapEntity := model.IdMapEntity{ - Key: key, - IDs: ids, - } - - if added := backdata.Add(key, idMapEntity); !added { - return fmt.Errorf("potential race condition on adding to identifier map") - } - - return nil +// Append will add the id to the set of identifiers associated with key. +// If the key does not exist, a new set containing only `id` is stored. +// If `id` already exists in the set stored under `key`, Append is a no-op. +func (i *IdentifierMap) Append(key, id flow.Identifier) { + i.Backend.AdjustWithInit(key, func(stored map[flow.Identifier]struct{}) map[flow.Identifier]struct{} { + stored[id] = struct{}{} + return stored + }, func() map[flow.Identifier]struct{} { + return map[flow.Identifier]struct{}{id: {}} }) } -// Get returns list of all identifiers associated with key and true, if the key exists in the mempool. +// Get returns the set of all identifiers associated with key and true, if the key exists in the mempool. +// The set is returned as an unordered list with no duplicates. // Otherwise it returns nil and false. -func (i *IdentifierMap) Get(key flow.Identifier) ([]flow.Identifier, bool) { - ids := make([]flow.Identifier, 0) - err := i.Run(func(backdata mempool.BackData) error { - entity, ok := backdata.ByID(key) +func (i *IdentifierMap) Get(key flow.Identifier) (flow.IdentifierList, bool) { + var ids flow.IdentifierList + // we need to perform a blocking operation since we are dealing with a reference object. Since our mempool + // changes the map itself in other operations we need to ensure that all changes to the map are done in mutually + // exclusive way, otherwise we risk observing a race condition. This is exactly why we perform `Get` and + // transformation of the map in critical section since if the goroutine will be suspended in between those two operations + // we are potentially concurrently accessing the map. + err := i.Run(func(backdata mempool.BackData[flow.Identifier, map[flow.Identifier]struct{}]) error { + idsMap, ok := backdata.Get(key) if !ok { return fmt.Errorf("could not retrieve key from backend") } - mapEntity, ok := entity.(model.IdMapEntity) - if !ok { - return fmt.Errorf("could not assert entity as IdMapEntity") - } - - for id := range mapEntity.IDs { + ids = make(flow.IdentifierList, 0, len(idsMap)) + for id := range idsMap { ids = append(ids, id) } @@ -94,60 +59,22 @@ func (i *IdentifierMap) Get(key flow.Identifier) ([]flow.Identifier, bool) { return ids, true } -// Has returns true if the key exists in the map, i.e., there is at least an id -// attached to it. -func (i *IdentifierMap) Has(key flow.Identifier) bool { - return i.Backend.Has(key) -} - -// Remove removes the given key with all associated identifiers. -func (i *IdentifierMap) Remove(key flow.Identifier) bool { - return i.Backend.Remove(key) -} - // RemoveIdFromKey removes the id from the list of identifiers associated with key. // If the list becomes empty, it also removes the key from the map. func (i *IdentifierMap) RemoveIdFromKey(key, id flow.Identifier) error { - err := i.Backend.Run(func(backdata mempool.BackData) error { - // var ids map[flow.Identifier]struct{} - entity, ok := backdata.ByID(key) + err := i.Backend.Run(func(backData mempool.BackData[flow.Identifier, map[flow.Identifier]struct{}]) error { + idsMap, ok := backData.Get(key) if !ok { // entity key has already been removed return nil } - idMapEntity, ok := entity.(model.IdMapEntity) - if !ok { - return fmt.Errorf("could not assert entity to IdMapEntity") - } - - if _, ok := idMapEntity.IDs[id]; !ok { - // id has already been removed from the key map - return nil - } - - // removes map entry associated with key for update - if _, removed := backdata.Remove(key); !removed { - return fmt.Errorf("potential race condition on removing from identifier map") - } - - // removes id from the secondary map of the key - delete(idMapEntity.IDs, id) - - if len(idMapEntity.IDs) == 0 { - // all ids related to key are removed, so there is no need - // to add key back to the idMapEntity - return nil - } - - // adds the new ids list associated with key to mempool - idMapEntity = model.IdMapEntity{ - Key: key, - IDs: idMapEntity.IDs, - } - - if added := backdata.Add(key, idMapEntity); !added { - return fmt.Errorf("potential race condition on adding to identifier map") + delete(idsMap, id) // mutates the map stored in backData + if len(idsMap) == 0 { + // if the set stored under the key is empty, remove the key + if _, removed := backData.Remove(key); !removed { + return fmt.Errorf("sanity check failed: race condition observed removing from identifier map (key=%x, id=%x)", key, id) + } } return nil @@ -156,21 +83,12 @@ func (i *IdentifierMap) RemoveIdFromKey(key, id flow.Identifier) error { return err } -// Size returns number of IdMapEntities in mempool -func (i *IdentifierMap) Size() uint { - return i.Backend.Size() -} - -// Keys returns a list of all keys in the mempool -func (i *IdentifierMap) Keys() ([]flow.Identifier, bool) { - entities := i.Backend.All() - keys := make([]flow.Identifier, 0) - for _, entity := range entities { - idMapEntity, ok := entity.(model.IdMapEntity) - if !ok { - return nil, false - } - keys = append(keys, idMapEntity.Key) +// Keys returns a list of all keys in the mempool. +func (i *IdentifierMap) Keys() (flow.IdentifierList, bool) { + all := i.Backend.All() + keys := make(flow.IdentifierList, 0, len(all)) + for key := range all { + keys = append(keys, key) } return keys, true } diff --git a/module/mempool/stdmap/identifier_map_test.go b/module/mempool/stdmap/identifier_map_test.go index e493438e4b8..f09a9ebd823 100644 --- a/module/mempool/stdmap/identifier_map_test.go +++ b/module/mempool/stdmap/identifier_map_test.go @@ -13,17 +13,12 @@ import ( ) func TestIdentiferMap(t *testing.T) { - idMap, err := NewIdentifierMap(10) - - t.Run("creating new mempool", func(t *testing.T) { - require.NoError(t, err) - }) + idMap := NewIdentifierMap(10) key1 := unittest.IdentifierFixture() id1 := unittest.IdentifierFixture() t.Run("appending id to new key", func(t *testing.T) { - err := idMap.Append(key1, id1) - require.NoError(t, err) + idMap.Append(key1, id1) // checks the existence of id1 for key ids, ok := idMap.Get(key1) @@ -33,8 +28,7 @@ func TestIdentiferMap(t *testing.T) { id2 := unittest.IdentifierFixture() t.Run("appending the second id", func(t *testing.T) { - err := idMap.Append(key1, id2) - require.NoError(t, err) + idMap.Append(key1, id2) // checks the existence of both id1 and id2 for key1 ids, ok := idMap.Get(key1) @@ -47,8 +41,7 @@ func TestIdentiferMap(t *testing.T) { // tests against existence of another key, with a shared id (id1) key2 := unittest.IdentifierFixture() t.Run("appending shared id to another key", func(t *testing.T) { - err := idMap.Append(key2, id1) - require.NoError(t, err) + idMap.Append(key2, id1) // checks the existence of both id1 and id2 for key1 ids, ok := idMap.Get(key1) @@ -109,21 +102,18 @@ func TestIdentiferMap(t *testing.T) { require.True(t, ok) assert.Contains(t, ids, id1) - err := idMap.Append(key2, id1) - require.NoError(t, err) + idMap.Append(key2, id1) }) t.Run("removing id from a key test", func(t *testing.T) { // creates key3 and adds id1 and id2 to it. key3 := unittest.IdentifierFixture() - err := idMap.Append(key3, id1) - require.NoError(t, err) - err = idMap.Append(key3, id2) - require.NoError(t, err) + idMap.Append(key3, id1) + idMap.Append(key3, id2) // removes id1 and id2 from key3 // removing id1 - err = idMap.RemoveIdFromKey(key3, id1) + err := idMap.RemoveIdFromKey(key3, id1) require.NoError(t, err) // key3 should still reside on idMap and id2 should be attached to it @@ -155,8 +145,7 @@ func TestIdentiferMap(t *testing.T) { // Running this test with `-race` flag detects and reports the existence of race condition if // it is the case. func TestRaceCondition(t *testing.T) { - idMap, err := NewIdentifierMap(10) - require.NoError(t, err) + idMap := NewIdentifierMap(10) wg := sync.WaitGroup{} @@ -166,7 +155,7 @@ func TestRaceCondition(t *testing.T) { go func() { defer wg.Done() - require.NoError(t, idMap.Append(key, id)) + idMap.Append(key, id) }() go func() { @@ -196,8 +185,7 @@ func TestCapacity(t *testing.T) { limit = 20 swarm = 20 ) - idMap, err := NewIdentifierMap(limit) - require.NoError(t, err) + idMap := NewIdentifierMap(limit) wg := sync.WaitGroup{} wg.Add(swarm) @@ -207,8 +195,7 @@ func TestCapacity(t *testing.T) { // adds an item on a separate goroutine key := unittest.IdentifierFixture() id := unittest.IdentifierFixture() - err := idMap.Append(key, id) - require.NoError(t, err) + idMap.Append(key, id) // evaluates that the size remains in the permissible range require.True(t, idMap.Size() <= uint(limit), diff --git a/module/mempool/stdmap/identifiers.go b/module/mempool/stdmap/identifiers.go deleted file mode 100644 index effd3f2ea35..00000000000 --- a/module/mempool/stdmap/identifiers.go +++ /dev/null @@ -1,51 +0,0 @@ -package stdmap - -import ( - "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/module/mempool/model" -) - -// Identifiers represents a concurrency-safe memory pool for IDs. -type Identifiers struct { - *Backend -} - -// NewIdentifiers creates a new memory pool for identifiers. -func NewIdentifiers(limit uint) (*Identifiers, error) { - i := &Identifiers{ - Backend: NewBackend(WithLimit(limit)), - } - return i, nil -} - -// Add will add the given identifier to the memory pool or it will error if -// the identifier is already in the memory pool. -func (i *Identifiers) Add(id flow.Identifier) bool { - // wraps ID around an ID entity to be stored in the mempool - idEntity := &model.IdEntity{ - Id: id, - } - return i.Backend.Add(idEntity) -} - -// Has checks whether the mempool has the identifier -func (i *Identifiers) Has(id flow.Identifier) bool { - return i.Backend.Has(id) -} - -// Remove removes the given identifier from the memory pool; it will -// return true if the identifier was known and removed. -func (i *Identifiers) Remove(id flow.Identifier) bool { - return i.Backend.Remove(id) -} - -// All returns all identifiers stored in the mempool -func (i *Identifiers) All() flow.IdentifierList { - entities := i.Backend.All() - idEntities := make([]flow.Identifier, 0, len(entities)) - for _, entity := range entities { - idEntity := entity.(*model.IdEntity) - idEntities = append(idEntities, idEntity.Id) - } - return idEntities -} diff --git a/module/mempool/stdmap/incorporated_result_seals.go b/module/mempool/stdmap/incorporated_result_seals.go index 7202aac0f3a..321a7861443 100644 --- a/module/mempool/stdmap/incorporated_result_seals.go +++ b/module/mempool/stdmap/incorporated_result_seals.go @@ -1,8 +1,6 @@ package stdmap import ( - "log" - "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/module/mempool" ) @@ -11,11 +9,14 @@ type sealSet map[flow.Identifier]*flow.IncorporatedResultSeal // IncorporatedResultSeals implements the incorporated result seals memory pool // of the consensus nodes, used to store seals that need to be added to blocks. +// Incorporated result seals are keyed by the ID of the incorporated result. // ATTENTION: This data structure should NEVER eject seals because it can break liveness. // Modules that are using this structure expect that it NEVER ejects a seal. type IncorporatedResultSeals struct { - *Backend - // index the seals by the height of the executed block + *Backend[flow.Identifier, *flow.IncorporatedResultSeal] + + // CAUTION: byHeight and lowestHeight are protected by the Backend lock and must be modified within `Backend.Run` + // byHeight indexes seals by the height of the executed block byHeight map[uint64]sealSet lowestHeight uint64 } @@ -33,13 +34,11 @@ func NewIncorporatedResultSeals(limit uint) *IncorporatedResultSeals { // ejecting a seal from mempool means that we have reached our limit and something is very bad, meaning that sealing // is not actually happening. // By setting high limit ~12 hours we ensure that we have some safety window for sealing to recover and make progress - ejector := func(b *Backend) (flow.Identifier, flow.Entity, bool) { - log.Fatalf("incorporated result seals reached max capacity %d", limit) - panic("incorporated result seals reached max capacity") - } - r := &IncorporatedResultSeals{ - Backend: NewBackend(WithLimit(limit), WithEject(ejector)), + Backend: NewBackend( + WithLimit[flow.Identifier, *flow.IncorporatedResultSeal](limit), + WithEject(EjectPanic[flow.Identifier, *flow.IncorporatedResultSeal]), + ), byHeight: byHeight, } @@ -56,7 +55,7 @@ func (ir *IncorporatedResultSeals) removeFromIndex(id flow.Identifier, height ui func (ir *IncorporatedResultSeals) removeByHeight(height uint64) { for sealID := range ir.byHeight[height] { - ir.backData.Remove(sealID) + ir.mutableBackData.Remove(sealID) } delete(ir.byHeight, height) } @@ -64,14 +63,14 @@ func (ir *IncorporatedResultSeals) removeByHeight(height uint64) { // Add adds an IncorporatedResultSeal to the mempool func (ir *IncorporatedResultSeals) Add(seal *flow.IncorporatedResultSeal) (bool, error) { added := false - sealID := seal.ID() - err := ir.Backend.Run(func(_ mempool.BackData) error { + resultID := seal.IncorporatedResult.ID() + err := ir.Backend.Run(func(backData mempool.BackData[flow.Identifier, *flow.IncorporatedResultSeal]) error { // skip elements below the pruned if seal.Header.Height < ir.lowestHeight { return nil } - added = ir.backData.Add(sealID, seal) + added = backData.Add(resultID, seal) if !added { return nil } @@ -82,50 +81,31 @@ func (ir *IncorporatedResultSeals) Add(seal *flow.IncorporatedResultSeal) (bool, sameHeight = make(sealSet) ir.byHeight[height] = sameHeight } - sameHeight[sealID] = seal + sameHeight[resultID] = seal return nil }) return added, err } -// Size returns the size of the underlying backing store -func (ir *IncorporatedResultSeals) Size() uint { - return ir.Backend.Size() -} - // All returns all the items in the mempool func (ir *IncorporatedResultSeals) All() []*flow.IncorporatedResultSeal { - entities := ir.Backend.All() - res := make([]*flow.IncorporatedResultSeal, 0, ir.backData.Size()) - for _, entity := range entities { - // uncaught type assertion; should never panic as the mempool only stores IncorporatedResultSeal: - res = append(res, entity.(*flow.IncorporatedResultSeal)) + all := ir.Backend.All() + results := make([]*flow.IncorporatedResultSeal, 0, len(all)) + for _, result := range all { + results = append(results, result) } - return res -} - -// ByID gets an IncorporatedResultSeal by IncorporatedResult ID -func (ir *IncorporatedResultSeals) ByID(id flow.Identifier) (*flow.IncorporatedResultSeal, bool) { - entity, ok := ir.Backend.ByID(id) - if !ok { - return nil, false - } - // uncaught type assertion; should never panic as the mempool only stores IncorporatedResultSeal: - return entity.(*flow.IncorporatedResultSeal), true + return results } // Remove removes an IncorporatedResultSeal from the mempool func (ir *IncorporatedResultSeals) Remove(id flow.Identifier) bool { removed := false - err := ir.Backend.Run(func(_ mempool.BackData) error { - var entity flow.Entity - entity, removed = ir.backData.Remove(id) - if !removed { - return nil + err := ir.Backend.Run(func(backData mempool.BackData[flow.Identifier, *flow.IncorporatedResultSeal]) error { + if seal, ok := backData.Remove(id); ok { + ir.removeFromIndex(id, seal.Header.Height) + removed = true } - seal := entity.(*flow.IncorporatedResultSeal) - ir.removeFromIndex(id, seal.Header.Height) return nil }) if err != nil { @@ -135,8 +115,8 @@ func (ir *IncorporatedResultSeals) Remove(id flow.Identifier) bool { } func (ir *IncorporatedResultSeals) Clear() { - err := ir.Backend.Run(func(_ mempool.BackData) error { - ir.backData.Clear() + err := ir.Backend.Run(func(backData mempool.BackData[flow.Identifier, *flow.IncorporatedResultSeal]) error { + backData.Clear() ir.byHeight = make(map[uint64]sealSet) return nil }) @@ -154,7 +134,7 @@ func (ir *IncorporatedResultSeals) Clear() { // If `height` is smaller than the previous value, the previous value is kept // and the sentinel mempool.BelowPrunedThresholdError is returned. func (ir *IncorporatedResultSeals) PruneUpToHeight(height uint64) error { - return ir.Backend.Run(func(backData mempool.BackData) error { + return ir.Backend.Run(func(backData mempool.BackData[flow.Identifier, *flow.IncorporatedResultSeal]) error { if height < ir.lowestHeight { return mempool.NewBelowPrunedThresholdErrorf( "pruning height: %d, existing height: %d", height, ir.lowestHeight) diff --git a/module/mempool/stdmap/incorporated_result_seals_test.go b/module/mempool/stdmap/incorporated_result_seals_test.go index fb1a4b450b9..c9160cc606d 100644 --- a/module/mempool/stdmap/incorporated_result_seals_test.go +++ b/module/mempool/stdmap/incorporated_result_seals_test.go @@ -18,14 +18,14 @@ type icrSealsMachine struct { state []*flow.IncorporatedResultSeal // model of the icrSeals } -// Init is an action for initializing a icrSeals instance. -func (m *icrSealsMachine) Init(t *rapid.T) { +// init is an action for initializing a icrSeals instance. +func (m *icrSealsMachine) init(t *rapid.T) { m.icrs = NewIncorporatedResultSeals(1000) } // Add is a conditional action which adds an item to the icrSeals. func (m *icrSealsMachine) Add(t *rapid.T) { - i := rapid.Uint64().Draw(t, "i").(uint64) + i := rapid.Uint64().Draw(t, "i") seal := unittest.IncorporatedResultSeal.Fixture(func(s *flow.IncorporatedResultSeal) { s.Header.Height = i @@ -37,7 +37,7 @@ func (m *icrSealsMachine) Add(t *rapid.T) { // we do not re-add already present seals unmet := true for _, v := range m.state { - if v.ID() == seal.ID() { + if v.IncorporatedResultID() == seal.IncorporatedResultID() { unmet = false } } @@ -49,7 +49,7 @@ func (m *icrSealsMachine) Add(t *rapid.T) { // Prune is a Conditional action that removes elements of height strictly lower than its argument func (m *icrSealsMachine) PruneUpToHeight(t *rapid.T) { - h := rapid.Uint64().Draw(t, "h").(uint64) + h := rapid.Uint64().Draw(t, "h") err := m.icrs.PruneUpToHeight(h) if h >= m.icrs.lowestHeight { require.NoError(t, err) @@ -72,10 +72,10 @@ func (m *icrSealsMachine) Get(t *rapid.T) { if n == 0 { return } - i := rapid.IntRange(0, n-1).Draw(t, "i").(int) + i := rapid.IntRange(0, n-1).Draw(t, "i") s := m.state[i] - actual, ok := m.icrs.ByID(s.ID()) + actual, ok := m.icrs.Get(s.IncorporatedResult.ID()) require.True(t, ok) require.Equal(t, s, actual) @@ -89,7 +89,7 @@ func (m *icrSealsMachine) GetUnknown(t *rapid.T) { if n == 0 { return } - i := rapid.IntRange(0, n-1).Draw(t, "i").(int) + i := rapid.IntRange(0, n-1).Draw(t, "i") seal := unittest.IncorporatedResultSeal.Fixture(func(s *flow.IncorporatedResultSeal) { s.Header.Height = uint64(i) }) @@ -97,13 +97,13 @@ func (m *icrSealsMachine) GetUnknown(t *rapid.T) { // check seal is unknown unknown := true for _, v := range m.state { - if v.ID() == seal.ID() { + if v.IncorporatedResultID() == seal.IncorporatedResultID() { unknown = false } } if unknown { - _, found := m.icrs.ByID(seal.ID()) + _, found := m.icrs.Get(seal.IncorporatedResult.ID()) require.False(t, found) } // no modification of state @@ -117,10 +117,10 @@ func (m *icrSealsMachine) Remove(t *rapid.T) { if n == 0 { return } - i := rapid.IntRange(0, n-1).Draw(t, "i").(int) + i := rapid.IntRange(0, n-1).Draw(t, "i") s := m.state[i] - ok := m.icrs.Remove(s.ID()) + ok := m.icrs.Remove(s.IncorporatedResult.ID()) require.True(t, ok) // remove m[i], we don't care about ordering here @@ -137,7 +137,7 @@ func (m *icrSealsMachine) RemoveUnknown(t *rapid.T) { if n == 0 { return } - i := rapid.IntRange(0, n-1).Draw(t, "i").(int) + i := rapid.IntRange(0, n-1).Draw(t, "i") seal := unittest.IncorporatedResultSeal.Fixture(func(s *flow.IncorporatedResultSeal) { s.Header.Height = uint64(i) }) @@ -145,13 +145,13 @@ func (m *icrSealsMachine) RemoveUnknown(t *rapid.T) { // check seal is unknown unknown := true for _, v := range m.state { - if v.ID() == seal.ID() { + if v.IncorporatedResultID() == seal.IncorporatedResultID() { unknown = false } } if unknown { - removed := m.icrs.Remove(seal.ID()) + removed := m.icrs.Remove(seal.IncorporatedResult.ID()) require.False(t, removed) } // no modification of state @@ -168,7 +168,11 @@ func (m *icrSealsMachine) Check(t *rapid.T) { // Run the icrSeals state machine and test it against its model func TestIcrs(t *testing.T) { - rapid.Check(t, rapid.Run(&icrSealsMachine{})) + rapid.Check(t, func(t *rapid.T) { + sm := new(icrSealsMachine) + sm.init(t) + t.Repeat(rapid.StateMachineActions(sm)) + }) } func TestIncorporatedResultSeals(t *testing.T) { @@ -185,14 +189,14 @@ func TestIncorporatedResultSeals(t *testing.T) { require.NoError(t, err) require.True(t, ok) - actual, ok := pool.ByID(seal.ID()) + actual, ok := pool.Get(seal.IncorporatedResult.ID()) require.True(t, ok) require.Equal(t, seal, actual) - deleted := pool.Remove(seal.ID()) + deleted := pool.Remove(seal.IncorporatedResult.ID()) require.True(t, deleted) - _, ok = pool.ByID(seal.ID()) + _, ok = pool.Get(seal.IncorporatedResult.ID()) require.False(t, ok) }) @@ -250,14 +254,14 @@ func TestIncorporatedResultSeals(t *testing.T) { func verifyPresent(t *testing.T, pool *IncorporatedResultSeals, seals ...*flow.IncorporatedResultSeal) { for _, seal := range seals { - _, ok := pool.ByID(seal.ID()) + _, ok := pool.Get(seal.IncorporatedResult.ID()) require.True(t, ok, "seal at height %d should be in mempool", seal.Header.Height) } } func verifyAbsent(t *testing.T, pool *IncorporatedResultSeals, seals ...*flow.IncorporatedResultSeal) { for _, seal := range seals { - _, ok := pool.ByID(seal.ID()) + _, ok := pool.Get(seal.IncorporatedResult.ID()) require.False(t, ok, "seal at height %d should not be in mempool", seal.Header.Height) } } diff --git a/module/mempool/stdmap/options.go b/module/mempool/stdmap/options.go index a32ca3e7749..245297cc2c3 100644 --- a/module/mempool/stdmap/options.go +++ b/module/mempool/stdmap/options.go @@ -1,5 +1,3 @@ -// (c) 2019 Dapper Labs - ALL RIGHTS RESERVED - package stdmap import ( @@ -8,13 +6,13 @@ import ( // OptionFunc is a function that can be provided to the backend on creation in // order to set a certain custom option. -type OptionFunc func(*Backend) +type OptionFunc[K comparable, V any] func(backend *Backend[K, V]) // WithLimit can be provided to the backend on creation in order to set a point // where it's time to check for ejection conditions. The actual size may continue // to rise by the threshold for batch ejection (currently 128) -func WithLimit(limit uint) OptionFunc { - return func(be *Backend) { +func WithLimit[K comparable, V any](limit uint) OptionFunc[K, V] { + return func(be *Backend[K, V]) { be.guaranteedCapacity = limit } } @@ -22,18 +20,19 @@ func WithLimit(limit uint) OptionFunc { // WithEject can be provided to the backend on creation in order to set a custom // eject function to pick the entity to be evicted upon overflow, as well as // hooking into it for additional cleanup work. -func WithEject(eject EjectFunc) OptionFunc { - return func(be *Backend) { +func WithEject[K comparable, V any](eject EjectFunc[K, V]) OptionFunc[K, V] { + return func(be *Backend[K, V]) { be.eject = eject be.batchEject = nil } } -// WithBackData sets the underlying backdata of the backend. -// BackData represents the underlying data structure that is utilized by mempool.Backend, as the +// WithMutableBackData sets the underlying mutable BackData for the backend. +// +// MutableBackData represents the mutable data structure used by mempool.Backend // core structure of maintaining data on memory-pools. -func WithBackData(backdata mempool.BackData) OptionFunc { - return func(be *Backend) { - be.backData = backdata +func WithMutableBackData[K comparable, V any](mutableBackData mempool.MutableBackData[K, V]) OptionFunc[K, V] { + return func(be *Backend[K, V]) { + be.mutableBackData = mutableBackData } } diff --git a/module/mempool/stdmap/pending_receipts.go b/module/mempool/stdmap/pending_receipts.go index 7dc4e70e819..cab47d75bf8 100644 --- a/module/mempool/stdmap/pending_receipts.go +++ b/module/mempool/stdmap/pending_receipts.go @@ -10,11 +10,11 @@ import ( type receiptsSet map[flow.Identifier]struct{} -// PendingReceipts stores pending receipts indexed by the id. +// PendingReceipts stores pending receipts indexed by the ID of execution receipt. // It also maintains a secondary index on the previous result id. // in order to allow to find receipts by the previous result id. type PendingReceipts struct { - *Backend + *Backend[flow.Identifier, *flow.ExecutionReceipt] headers storage.Headers // used to query headers of executed blocks // secondary index by parent result id, since multiple receipts could // have the same parent result, (even if they have different result) @@ -42,7 +42,7 @@ func (r *PendingReceipts) indexByHeight(receipt *flow.ExecutionReceipt) (uint64, func NewPendingReceipts(headers storage.Headers, limit uint) *PendingReceipts { // create the receipts memory pool with the lookup maps r := &PendingReceipts{ - Backend: NewBackend(WithLimit(limit)), + Backend: NewBackend(WithLimit[flow.Identifier, *flow.ExecutionReceipt](limit)), headers: headers, byPreviousResultID: make(map[flow.Identifier]receiptsSet), byHeight: make(map[uint64]receiptsSet), @@ -50,16 +50,15 @@ func NewPendingReceipts(headers storage.Headers, limit uint) *PendingReceipts { // TODO: there is smarter eject exists. For instance: // if the mempool fills up, we want to eject the receipts for the highest blocks // See https://github.com/onflow/flow-go/pull/387/files#r574228078 - r.RegisterEjectionCallbacks(func(entity flow.Entity) { - receipt := entity.(*flow.ExecutionReceipt) - removeReceipt(receipt, r.backData, r.byPreviousResultID) + r.RegisterEjectionCallbacks(func(receipt *flow.ExecutionReceipt) { + removeReceipt(receipt, r.mutableBackData, r.byPreviousResultID) }) return r } func removeReceipt( receipt *flow.ExecutionReceipt, - entities mempool.BackData, + entities mempool.BackData[flow.Identifier, *flow.ExecutionReceipt], byPreviousResultID map[flow.Identifier]receiptsSet) { receiptID := receipt.ID() @@ -76,9 +75,9 @@ func removeReceipt( // Add adds an execution receipt to the mempool. func (r *PendingReceipts) Add(receipt *flow.ExecutionReceipt) bool { added := false - err := r.Backend.Run(func(backData mempool.BackData) error { + err := r.Backend.Run(func(backData mempool.BackData[flow.Identifier, *flow.ExecutionReceipt]) error { receiptID := receipt.ID() - _, exists := backData.ByID(receiptID) + _, exists := backData.Get(receiptID) if exists { // duplication return nil @@ -125,11 +124,10 @@ func (r *PendingReceipts) Add(receipt *flow.ExecutionReceipt) bool { // Remove will remove a receipt by ID. func (r *PendingReceipts) Remove(receiptID flow.Identifier) bool { removed := false - err := r.Backend.Run(func(backData mempool.BackData) error { - entity, ok := backData.ByID(receiptID) + err := r.Backend.Run(func(backData mempool.BackData[flow.Identifier, *flow.ExecutionReceipt]) error { + receipt, ok := backData.Get(receiptID) if ok { - receipt := entity.(*flow.ExecutionReceipt) - removeReceipt(receipt, r.backData, r.byPreviousResultID) + removeReceipt(receipt, r.mutableBackData, r.byPreviousResultID) removed = true } return nil @@ -143,20 +141,16 @@ func (r *PendingReceipts) Remove(receiptID flow.Identifier) bool { // ByPreviousResultID returns receipts whose previous result ID matches the given ID func (r *PendingReceipts) ByPreviousResultID(previousResultID flow.Identifier) []*flow.ExecutionReceipt { var receipts []*flow.ExecutionReceipt - err := r.Backend.Run(func(backData mempool.BackData) error { + err := r.Backend.Run(func(backData mempool.BackData[flow.Identifier, *flow.ExecutionReceipt]) error { siblings, foundIndex := r.byPreviousResultID[previousResultID] if !foundIndex { return nil } for receiptID := range siblings { - entity, ok := backData.ByID(receiptID) + receipt, ok := backData.Get(receiptID) if !ok { return fmt.Errorf("inconsistent index. can not find entity by id: %v", receiptID) } - receipt, ok := entity.(*flow.ExecutionReceipt) - if !ok { - return fmt.Errorf("could not convert entity to receipt: %v", receiptID) - } receipts = append(receipts, receipt) } return nil @@ -168,11 +162,6 @@ func (r *PendingReceipts) ByPreviousResultID(previousResultID flow.Identifier) [ return receipts } -// Size will return the total number of pending receipts -func (r *PendingReceipts) Size() uint { - return r.Backend.Size() -} - // PruneUpToHeight remove all receipts for blocks whose height is strictly // smaller that height. Note: receipts for blocks at height are retained. // After pruning, receipts below for blocks below the given height are dropped. @@ -182,7 +171,7 @@ func (r *PendingReceipts) Size() uint { // If `height` is smaller than the previous value, the previous value is kept // and the sentinel mempool.BelowPrunedThresholdError is returned. func (r *PendingReceipts) PruneUpToHeight(height uint64) error { - return r.Backend.Run(func(backData mempool.BackData) error { + return r.Backend.Run(func(backData mempool.BackData[flow.Identifier, *flow.ExecutionReceipt]) error { if height < r.lowestHeight { return mempool.NewBelowPrunedThresholdErrorf( "pruning height: %d, existing height: %d", height, r.lowestHeight) @@ -211,11 +200,11 @@ func (r *PendingReceipts) PruneUpToHeight(height uint64) error { }) } -func (r *PendingReceipts) removeByHeight(height uint64, backData mempool.BackData) { +func (r *PendingReceipts) removeByHeight(height uint64, backData mempool.BackData[flow.Identifier, *flow.ExecutionReceipt]) { for receiptID := range r.byHeight[height] { - entity, ok := backData.ByID(receiptID) + receipt, ok := backData.Get(receiptID) if ok { - removeReceipt(entity.(*flow.ExecutionReceipt), r.backData, r.byPreviousResultID) + removeReceipt(receipt, r.mutableBackData, r.byPreviousResultID) } } delete(r.byHeight, height) diff --git a/module/mempool/stdmap/pending_receipts_test.go b/module/mempool/stdmap/pending_receipts_test.go index 2c60fc0cdb9..6d5044c899f 100644 --- a/module/mempool/stdmap/pending_receipts_test.go +++ b/module/mempool/stdmap/pending_receipts_test.go @@ -54,7 +54,7 @@ func TestPendingReceipts(t *testing.T) { rs[0] = parent for i := 1; i < n; i++ { rs[i] = unittest.ExecutionReceiptFixture(func(receipt *flow.ExecutionReceipt) { - receipt.ExecutionResult.PreviousResultID = parent.ID() + receipt.ExecutionResult.PreviousResultID = parent.ExecutionResult.ID() parent = receipt }) } @@ -190,10 +190,10 @@ func TestPendingReceipts(t *testing.T) { headers := &mockstorage.Headers{} pool := NewPendingReceipts(headers, 100) executedBlock := unittest.BlockFixture() - nextExecutedBlock := unittest.BlockWithParentFixture(executedBlock.Header) - er := unittest.ExecutionResultFixture(unittest.WithBlock(&executedBlock)) - headers.On("ByBlockID", executedBlock.ID()).Return(executedBlock.Header, nil) - headers.On("ByBlockID", nextExecutedBlock.ID()).Return(nextExecutedBlock.Header, nil) + nextExecutedBlock := unittest.BlockWithParentFixture(executedBlock.ToHeader()) + er := unittest.ExecutionResultFixture(unittest.WithBlock(executedBlock)) + headers.On("ByBlockID", executedBlock.ID()).Return(executedBlock.ToHeader(), nil) + headers.On("ByBlockID", nextExecutedBlock.ID()).Return(nextExecutedBlock.ToHeader(), nil) ids := make(map[flow.Identifier]struct{}) for i := 0; i < 10; i++ { receipt := unittest.ExecutionReceiptFixture(unittest.WithResult(er)) @@ -210,7 +210,7 @@ func TestPendingReceipts(t *testing.T) { require.True(t, pool.Has(id)) } - err := pool.PruneUpToHeight(nextExecutedBlock.Header.Height) + err := pool.PruneUpToHeight(nextExecutedBlock.Height) require.NoError(t, err) // these receipts should be pruned diff --git a/module/mempool/stdmap/queues.go b/module/mempool/stdmap/queues.go deleted file mode 100644 index 24136c2674c..00000000000 --- a/module/mempool/stdmap/queues.go +++ /dev/null @@ -1,73 +0,0 @@ -package stdmap - -import ( - "fmt" - - "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/module/mempool" - "github.com/onflow/flow-go/module/mempool/queue" - _ "github.com/onflow/flow-go/utils/binstat" -) - -type Queues struct { - *Backend -} - -// QueuesBackdata is mempool map for ingestion.Queues (head Node ID -> Queues) -type QueuesBackdata struct { - mempool.BackData -} - -func NewQueues() *Queues { - return &Queues{NewBackend(WithEject(EjectPanic))} -} - -func (b *QueuesBackdata) ByID(queueID flow.Identifier) (*queue.Queue, bool) { - entity, exists := b.BackData.ByID(queueID) - if !exists { - return nil, false - } - queue := entity.(*queue.Queue) - return queue, true -} - -func (b *QueuesBackdata) All() []*queue.Queue { - entities := b.BackData.All() - - queues := make([]*queue.Queue, len(entities)) - i := 0 - for _, entity := range entities { - queue, ok := entity.(*queue.Queue) - if !ok { - panic(fmt.Sprintf("invalid entity in queue mempool (%T)", entity)) - } - queues[i] = queue - i++ - } - return queues -} - -func (b *Queues) Add(queue *queue.Queue) bool { - return b.Backend.Add(queue) -} - -func (b *Queues) Get(queueID flow.Identifier) (*queue.Queue, bool) { - backdata := &QueuesBackdata{b.backData} - return backdata.ByID(queueID) -} - -func (b *Queues) Run(f func(backdata *QueuesBackdata) error) error { - //bs1 := binstat.EnterTime(binstat.BinStdmap + ".w_lock.(Queues)Run") - b.Lock() - //binstat.Leave(bs1) - - //bs2 := binstat.EnterTime(binstat.BinStdmap + ".inlock.(Queues)Run") - defer b.Unlock() - err := f(&QueuesBackdata{b.backData}) - //binstat.Leave(bs2) - - if err != nil { - return err - } - return nil -} diff --git a/module/mempool/stdmap/receipts.go b/module/mempool/stdmap/receipts.go index 9e416972814..3f621dd77f3 100644 --- a/module/mempool/stdmap/receipts.go +++ b/module/mempool/stdmap/receipts.go @@ -1,5 +1,3 @@ -// (c) 2019 Dapper Labs - ALL RIGHTS RESERVED - package stdmap import ( @@ -8,47 +6,13 @@ import ( // Receipts implements the execution receipts memory pool of the consensus node, // used to store execution receipts and to generate block seals. +// Stored Receipts are keyed by the ID of execution receipt. type Receipts struct { - *Backend + *Backend[flow.Identifier, *flow.ExecutionReceipt] } // NewReceipts creates a new memory pool for execution receipts. -func NewReceipts(limit uint) (*Receipts, error) { +func NewReceipts(limit uint) *Receipts { // create the receipts memory pool with the lookup maps - r := &Receipts{ - Backend: NewBackend(WithLimit(limit)), - } - return r, nil -} - -// Add adds an execution receipt to the mempool. -func (r *Receipts) Add(receipt *flow.ExecutionReceipt) bool { - added := r.Backend.Add(receipt) - return added -} - -// Remove will remove a receipt by ID. -func (r *Receipts) Remove(receiptID flow.Identifier) bool { - removed := r.Backend.Remove(receiptID) - return removed -} - -// ByID will retrieve an approval by ID. -func (r *Receipts) ByID(receiptID flow.Identifier) (*flow.ExecutionReceipt, bool) { - entity, exists := r.Backend.ByID(receiptID) - if !exists { - return nil, false - } - receipt := entity.(*flow.ExecutionReceipt) - return receipt, true -} - -// All will return all execution receipts in the memory pool. -func (r *Receipts) All() []*flow.ExecutionReceipt { - entities := r.Backend.All() - receipts := make([]*flow.ExecutionReceipt, 0, len(entities)) - for _, entity := range entities { - receipts = append(receipts, entity.(*flow.ExecutionReceipt)) - } - return receipts + return &Receipts{NewBackend(WithLimit[flow.Identifier, *flow.ExecutionReceipt](limit))} } diff --git a/module/mempool/stdmap/receipts_test.go b/module/mempool/stdmap/receipts_test.go index a664126caaf..7b6b16fb00f 100644 --- a/module/mempool/stdmap/receipts_test.go +++ b/module/mempool/stdmap/receipts_test.go @@ -1,5 +1,3 @@ -// (c) 2019 Dapper Labs - ALL RIGHTS RESERVED - package stdmap_test import ( @@ -16,16 +14,15 @@ func TestReceiptPool(t *testing.T) { item1 := unittest.ExecutionReceiptFixture() item2 := unittest.ExecutionReceiptFixture() - pool, err := stdmap.NewReceipts(1000) - require.NoError(t, err) + pool := stdmap.NewReceipts(1000) t.Run("should be able to add first", func(t *testing.T) { - added := pool.Add(item1) + added := pool.Add(item1.ID(), item1) assert.True(t, added) }) t.Run("should be able to add second", func(t *testing.T) { - added := pool.Add(item2) + added := pool.Add(item2.ID(), item2) assert.True(t, added) }) @@ -35,7 +32,7 @@ func TestReceiptPool(t *testing.T) { }) t.Run("should be able to get first", func(t *testing.T) { - got, exists := pool.ByID(item1.ID()) + got, exists := pool.Get(item1.ID()) assert.True(t, exists) assert.Equal(t, item1, got) }) @@ -48,6 +45,8 @@ func TestReceiptPool(t *testing.T) { t.Run("should be able to retrieve all", func(t *testing.T) { items := pool.All() assert.Len(t, items, 1) - assert.Equal(t, item1, items[0]) + val, exists := items[item1.ID()] + require.True(t, exists) + assert.Equal(t, item1, val) }) } diff --git a/module/mempool/stdmap/results.go b/module/mempool/stdmap/results.go deleted file mode 100644 index ac679c37a89..00000000000 --- a/module/mempool/stdmap/results.go +++ /dev/null @@ -1,54 +0,0 @@ -package stdmap - -import ( - "github.com/onflow/flow-go/model/flow" -) - -// Results implements the execution results memory pool of the consensus node, -// used to store execution results and to generate block seals. -type Results struct { - *Backend -} - -// NewResults creates a new memory pool for execution results. -func NewResults(limit uint) (*Results, error) { - - // create the results memory pool with the lookup maps - r := &Results{ - Backend: NewBackend(WithLimit(limit)), - } - - return r, nil -} - -// Add adds an execution result to the mempool. -func (r *Results) Add(result *flow.ExecutionResult) bool { - added := r.Backend.Add(result) - return added -} - -// Remove will remove a result by ID. -func (r *Results) Remove(resultID flow.Identifier) bool { - removed := r.Backend.Remove(resultID) - return removed -} - -// ByID will retrieve an approval by ID. -func (r *Results) ByID(resultID flow.Identifier) (*flow.ExecutionResult, bool) { - entity, exists := r.Backend.ByID(resultID) - if !exists { - return nil, false - } - result := entity.(*flow.ExecutionResult) - return result, true -} - -// All will return all execution results in the memory pool. -func (r *Results) All() []*flow.ExecutionResult { - entities := r.Backend.All() - results := make([]*flow.ExecutionResult, 0, len(entities)) - for _, entity := range entities { - results = append(results, entity.(*flow.ExecutionResult)) - } - return results -} diff --git a/module/mempool/stdmap/times.go b/module/mempool/stdmap/times.go index e5e7c33218f..764d7f25f33 100644 --- a/module/mempool/stdmap/times.go +++ b/module/mempool/stdmap/times.go @@ -1,61 +1,18 @@ -// (c) 2019 Dapper Labs - ALL RIGHTS RESERVED - package stdmap import ( - "fmt" "time" "github.com/onflow/flow-go/model/flow" ) -// Times implements the times memory pool used to store time.Times for an idetifier to track transaction metrics in -// access nodes +// Times implements the times memory pool used to store time.Time values associated with +// flow.Identifiers for tracking transaction metrics in Access nodes. type Times struct { - *Backend -} - -// NewTimes creates a new memory pool for times -func NewTimes(limit uint) (*Times, error) { - t := &Times{ - Backend: NewBackend(WithLimit(limit)), - } - - return t, nil -} - -type Time struct { - id flow.Identifier - ti time.Time -} - -func (t *Time) ID() flow.Identifier { - return t.id -} - -func (t *Time) Checksum() flow.Identifier { - return t.id -} - -// Add adds a time to the mempool. -func (t *Times) Add(id flow.Identifier, ti time.Time) bool { - return t.Backend.Add(&Time{id, ti}) -} - -// ByID returns the time with the given ID from the mempool. -func (t *Times) ByID(id flow.Identifier) (time.Time, bool) { - entity, exists := t.Backend.ByID(id) - if !exists { - return time.Time{}, false - } - tt, ok := entity.(*Time) - if !ok { - panic(fmt.Sprintf("invalid entity in times pool (%T)", entity)) - } - return tt.ti, true + *Backend[flow.Identifier, time.Time] } -// Remove removes the time with the given ID. -func (t *Times) Remove(id flow.Identifier) bool { - return t.Backend.Remove(id) +// NewTimes creates a new memory pool for times. +func NewTimes(limit uint) *Times { + return &Times{NewBackend(WithLimit[flow.Identifier, time.Time](limit))} } diff --git a/module/mempool/stdmap/times_test.go b/module/mempool/stdmap/times_test.go index 40ca4b18bae..7cb18a20aec 100644 --- a/module/mempool/stdmap/times_test.go +++ b/module/mempool/stdmap/times_test.go @@ -1,5 +1,3 @@ -// (c) 2019 Dapper Labs - ALL RIGHTS RESERVED - package stdmap_test import ( @@ -7,7 +5,6 @@ import ( "time" "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" "github.com/onflow/flow-go/module/mempool/stdmap" "github.com/onflow/flow-go/utils/unittest" @@ -17,8 +14,7 @@ func TestTimesPool(t *testing.T) { id := unittest.IdentifierFixture() ti := time.Now() - pool, err := stdmap.NewTimes(3) - require.NoError(t, err) + pool := stdmap.NewTimes(3) t.Run("should be able to add", func(t *testing.T) { added := pool.Add(id, ti) @@ -26,7 +22,7 @@ func TestTimesPool(t *testing.T) { }) t.Run("should be able to get", func(t *testing.T) { - got, exists := pool.ByID(id) + got, exists := pool.Get(id) assert.True(t, exists) assert.Equal(t, ti, got) }) diff --git a/module/mempool/stdmap/transaction_timings.go b/module/mempool/stdmap/transaction_timings.go index 407347304ea..debb4e6597b 100644 --- a/module/mempool/stdmap/transaction_timings.go +++ b/module/mempool/stdmap/transaction_timings.go @@ -1,5 +1,3 @@ -// (c) 2019 Dapper Labs - ALL RIGHTS RESERVED - package stdmap import ( @@ -7,70 +5,13 @@ import ( ) // TransactionTimings implements the transaction timings memory pool of access nodes, -// used to store transaction timings to report the timing of individual transactions +// used to store transaction timings to report the timing of individual transactions. +// Stored transaction timings are keyed by transaction ID. type TransactionTimings struct { - *Backend -} - -// NewTransactionTimings creates a new memory pool for transaction timings -func NewTransactionTimings(limit uint) (*TransactionTimings, error) { - t := &TransactionTimings{ - Backend: NewBackend(WithLimit(limit)), - } - - return t, nil -} - -// Add adds a transaction timing to the mempool. -func (t *TransactionTimings) Add(tx *flow.TransactionTiming) bool { - return t.Backend.Add(tx) -} - -// ByID returns the transaction timing with the given ID from the mempool. -func (t *TransactionTimings) ByID(txID flow.Identifier) (*flow.TransactionTiming, bool) { - entity, exists := t.Backend.ByID(txID) - if !exists { - return nil, false - } - tt, ok := entity.(*flow.TransactionTiming) - if !ok { - return nil, false - } - return tt, true -} - -// Adjust will adjust the transaction timing using the given function if the given key can be found. -// Returns a bool which indicates whether the value was updated as well as the updated value. -func (t *TransactionTimings) Adjust(txID flow.Identifier, f func(*flow.TransactionTiming) *flow.TransactionTiming) ( - *flow.TransactionTiming, bool) { - e, updated := t.Backend.Adjust(txID, func(e flow.Entity) flow.Entity { - tt, ok := e.(*flow.TransactionTiming) - if !ok { - return nil - } - return f(tt) - }) - if !updated { - return nil, false - } - tt, ok := e.(*flow.TransactionTiming) - if !ok { - return nil, false - } - return tt, updated -} - -// All returns all transaction timings from the mempool. -func (t *TransactionTimings) All() []*flow.TransactionTiming { - entities := t.Backend.All() - txs := make([]*flow.TransactionTiming, 0, len(entities)) - for _, entity := range entities { - txs = append(txs, entity.(*flow.TransactionTiming)) - } - return txs + *Backend[flow.Identifier, *flow.TransactionTiming] } -// Remove removes the transaction timing with the given ID. -func (t *TransactionTimings) Remove(txID flow.Identifier) bool { - return t.Backend.Remove(txID) +// NewTransactionTimings creates a new memory pool for transaction timings. +func NewTransactionTimings(limit uint) *TransactionTimings { + return &TransactionTimings{NewBackend(WithLimit[flow.Identifier, *flow.TransactionTiming](limit))} } diff --git a/module/mempool/stdmap/transaction_timings_test.go b/module/mempool/stdmap/transaction_timings_test.go index dc2d818b7ef..347ae4c959e 100644 --- a/module/mempool/stdmap/transaction_timings_test.go +++ b/module/mempool/stdmap/transaction_timings_test.go @@ -1,5 +1,3 @@ -// (c) 2019 Dapper Labs - ALL RIGHTS RESERVED - package stdmap_test import ( @@ -19,16 +17,15 @@ func TestTransactionTimingsPool(t *testing.T) { Received: time.Now().Add(-10 * time.Second), Executed: time.Now()} item2 := &flow.TransactionTiming{TransactionID: unittest.IdentifierFixture(), Received: time.Now()} - pool, err := stdmap.NewTransactionTimings(1000) - require.NoError(t, err) + pool := stdmap.NewTransactionTimings(1000) t.Run("should be able to add first", func(t *testing.T) { - added := pool.Add(item1) + added := pool.Add(item1.TransactionID, item1) assert.True(t, added) }) t.Run("should be able to add second", func(t *testing.T) { - added := pool.Add(item2) + added := pool.Add(item2.TransactionID, item2) assert.True(t, added) }) @@ -39,7 +36,7 @@ func TestTransactionTimingsPool(t *testing.T) { t.Run("should be able to adjust the first", func(t *testing.T) { finalized := time.Now() - entity, updated := pool.Adjust(item1.ID(), func(t *flow.TransactionTiming) *flow.TransactionTiming { + entity, updated := pool.Adjust(item1.TransactionID, func(t *flow.TransactionTiming) *flow.TransactionTiming { t.Finalized = finalized return t }) @@ -48,20 +45,22 @@ func TestTransactionTimingsPool(t *testing.T) { }) t.Run("should be able to get first", func(t *testing.T) { - got, exists := pool.ByID(item1.ID()) + got, exists := pool.Get(item1.TransactionID) assert.True(t, exists) assert.Equal(t, item1, got) }) t.Run("should be able to remove second", func(t *testing.T) { - ok := pool.Remove(item2.ID()) + ok := pool.Remove(item2.TransactionID) assert.True(t, ok) }) t.Run("should be able to retrieve all", func(t *testing.T) { items := pool.All() assert.Len(t, items, 1) - assert.Equal(t, item1, items[0]) + val, exists := items[item1.TransactionID] + require.True(t, exists) + assert.Equal(t, item1, val) }) t.Run("should not panic if item does not exist yet", func(t *testing.T) { diff --git a/module/mempool/stdmap/transactions.go b/module/mempool/stdmap/transactions.go deleted file mode 100644 index ea59e35b289..00000000000 --- a/module/mempool/stdmap/transactions.go +++ /dev/null @@ -1,53 +0,0 @@ -// (c) 2019 Dapper Labs - ALL RIGHTS RESERVED - -package stdmap - -import ( - "fmt" - - "github.com/onflow/flow-go/model/flow" -) - -// Transactions implements the transactions memory pool of the consensus nodes, -// used to store transactions and to generate block payloads. -type Transactions struct { - *Backend -} - -// NewTransactions creates a new memory pool for transactions. -// Deprecated: use herocache.Transactions instead. -func NewTransactions(limit uint) *Transactions { - t := &Transactions{ - Backend: NewBackend(WithLimit(limit)), - } - - return t -} - -// Add adds a transaction to the mempool. -func (t *Transactions) Add(tx *flow.TransactionBody) bool { - return t.Backend.Add(tx) -} - -// ByID returns the transaction with the given ID from the mempool. -func (t *Transactions) ByID(txID flow.Identifier) (*flow.TransactionBody, bool) { - entity, exists := t.Backend.ByID(txID) - if !exists { - return nil, false - } - tx, ok := entity.(*flow.TransactionBody) - if !ok { - panic(fmt.Sprintf("invalid entity in transaction pool (%T)", entity)) - } - return tx, true -} - -// All returns all transactions from the mempool. -func (t *Transactions) All() []*flow.TransactionBody { - entities := t.Backend.All() - txs := make([]*flow.TransactionBody, 0, len(entities)) - for _, entity := range entities { - txs = append(txs, entity.(*flow.TransactionBody)) - } - return txs -} diff --git a/module/mempool/stdmap/transactions_test.go b/module/mempool/stdmap/transactions_test.go deleted file mode 100644 index f16da60d505..00000000000 --- a/module/mempool/stdmap/transactions_test.go +++ /dev/null @@ -1,60 +0,0 @@ -// (c) 2019 Dapper Labs - ALL RIGHTS RESERVED - -package stdmap_test - -import ( - "testing" - - "github.com/stretchr/testify/assert" - - "github.com/onflow/flow-go/module/mempool/stdmap" - "github.com/onflow/flow-go/utils/unittest" -) - -func TestTransactionPool(t *testing.T) { - tx1 := unittest.TransactionBodyFixture() - item1 := &tx1 - - tx2 := unittest.TransactionBodyFixture() - item2 := &tx2 - - pool := stdmap.NewTransactions(1000) - - t.Run("should be able to add first", func(t *testing.T) { - added := pool.Add(item1) - assert.True(t, added) - }) - - t.Run("should be able to add second", func(t *testing.T) { - added := pool.Add(item2) - assert.True(t, added) - }) - - t.Run("should be able to get size", func(t *testing.T) { - size := pool.Size() - assert.EqualValues(t, 2, size) - }) - - t.Run("should be able to get first", func(t *testing.T) { - got, exists := pool.ByID(item1.ID()) - assert.True(t, exists) - assert.Equal(t, item1, got) - }) - - t.Run("should be able to remove second", func(t *testing.T) { - ok := pool.Remove(item2.ID()) - assert.True(t, ok) - }) - - t.Run("should be able to retrieve all", func(t *testing.T) { - items := pool.All() - assert.Len(t, items, 1) - assert.Equal(t, item1, items[0]) - }) - - t.Run("should be able to clear", func(t *testing.T) { - assert.True(t, pool.Size() > 0) - pool.Clear() - assert.Equal(t, uint(0), pool.Size()) - }) -} diff --git a/module/mempool/transaction_timings.go b/module/mempool/transaction_timings.go index f64f07d59d1..def620822c4 100644 --- a/module/mempool/transaction_timings.go +++ b/module/mempool/transaction_timings.go @@ -1,5 +1,3 @@ -// (c) 2019 Dapper Labs - ALL RIGHTS RESERVED - package mempool import ( @@ -7,21 +5,4 @@ import ( ) // TransactionTimings represents a concurrency-safe memory pool for transaction timings. -type TransactionTimings interface { - - // Add adds a transaction timing to the mempool. - Add(tx *flow.TransactionTiming) bool - - // ByID returns the transaction timing with the given ID from the mempool. - ByID(txID flow.Identifier) (*flow.TransactionTiming, bool) - // Adjust will adjust the transaction timing using the given function if the given key can be found. - // Returns a bool which indicates whether the value was updated as well as the updated value. - Adjust(txID flow.Identifier, f func(*flow.TransactionTiming) *flow.TransactionTiming) (*flow.TransactionTiming, - bool) - - // All returns all transaction timings from the mempool. - All() []*flow.TransactionTiming - - // Remove removes the transaction timing with the given ID. - Remove(txID flow.Identifier) bool -} +type TransactionTimings Mempool[flow.Identifier, *flow.TransactionTiming] diff --git a/module/mempool/transactions.go b/module/mempool/transactions.go index a30292e4b73..35fd8e873b7 100644 --- a/module/mempool/transactions.go +++ b/module/mempool/transactions.go @@ -1,5 +1,3 @@ -// (c) 2019 Dapper Labs - ALL RIGHTS RESERVED - package mempool import ( @@ -8,30 +6,8 @@ import ( // Transactions represents a concurrency-safe memory pool for transactions. type Transactions interface { - - // Has checks whether the transaction with the given hash is currently in - // the memory pool. - Has(txID flow.Identifier) bool - - // Add will add the given transaction body to the memory pool. It will - // return false if it was already in the mempool. - Add(tx *flow.TransactionBody) bool - - // Remove will remove the given transaction from the memory pool; it will - // will return true if the transaction was known and removed. - Remove(txID flow.Identifier) bool - - // ByID retrieve the transaction with the given ID from the memory - // pool. It will return false if it was not found in the mempool. - ByID(txID flow.Identifier) (*flow.TransactionBody, bool) - - // Size will return the current size of the memory pool. - Size() uint - - // All will retrieve all transactions that are currently in the memory pool - // as a slice. - All() []*flow.TransactionBody - - // Clear removes all transactions from the mempool. - Clear() + Mempool[flow.Identifier, *flow.TransactionBody] + // ByPayer retrieves all transactions from the memory pool that are sent + // by the given payer. + ByPayer(payer flow.Address) []*flow.TransactionBody } diff --git a/module/metrics.go b/module/metrics.go index 8ee8df549a9..0720a3f049f 100644 --- a/module/metrics.go +++ b/module/metrics.go @@ -1,15 +1,19 @@ package module import ( + "context" "time" "github.com/libp2p/go-libp2p/core/peer" rcmgr "github.com/libp2p/go-libp2p/p2p/host/resource-manager" + httpmetrics "github.com/slok/go-http-metrics/metrics" + "github.com/onflow/flow-go/fvm/meter" "github.com/onflow/flow-go/model/chainsync" "github.com/onflow/flow-go/model/cluster" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/network/channels" + p2pmsg "github.com/onflow/flow-go/network/p2p/message" ) type EntriesFunc func() uint @@ -40,52 +44,144 @@ type NetworkSecurityMetrics interface { // OnRateLimitedPeer tracks the number of rate limited unicast messages seen on the network. OnRateLimitedPeer(pid peer.ID, role, msgType, topic, reason string) + + // OnViolationReportSkipped tracks the number of slashing violations consumer violations that were not + // reported for misbehavior when the identity of the sender not known. + OnViolationReportSkipped() +} + +// GossipSubRpcInspectorMetrics encapsulates the metrics collectors for GossipSub RPC Inspector module of the networking layer. +// The RPC inspector is the entry point of the GossipSub protocol. It inspects the incoming RPC messages and decides +// whether to accept, prune, or reject the RPC message. +// The GossipSubRpcInspectorMetrics tracks the number of RPC messages received by the local node from other nodes over +// the GossipSub protocol. It also tracks the number of control messages included in the RPC messages, i.e., IHAVE, IWANT, +// GRAFT, PRUNE. It also tracks the number of actual messages included in the RPC messages. +// The GossipSubRpcInspectorMetrics differs from LocalGossipSubRouterMetrics in that the former tracks the messages +// received by the local node from other nodes over the GossipSub protocol but may not all be accepted by the local node, +// e.g., due to RPC pruning or throttling; while the latter tracks the local node's view of the GossipSub protocol, i.e., entirely +// containing the messages that are accepted by the local node (either as whole RPC or only for the control messages). +// Having this distinction is useful for debugging and troubleshooting the GossipSub protocol, for example, the number of +// messages received by the local node from other nodes over the GossipSub protocol may be much higher than the number +// of messages accepted by the local node, which may indicate that the local node is throttling the incoming messages. +type GossipSubRpcInspectorMetrics interface { + // OnIWantMessageIDsReceived tracks the number of message ids received by the node from other nodes on an RPC. + // Note: this function is called on each IWANT message received by the node, not on each message id received. + OnIWantMessageIDsReceived(msgIdCount int) + + // OnIHaveMessageIDsReceived tracks the number of message ids received by the node from other nodes on an iHave message. + // This function is called on each iHave message received by the node. + // Args: + // - channel: the channel on which the iHave message was received. + // - msgIdCount: the number of message ids received on the iHave message. + OnIHaveMessageIDsReceived(channel string, msgIdCount int) + + // OnIncomingRpcReceived tracks the number of RPC messages received by the node. + // Args: + // iHaveCount: the number of iHAVE messages included in the RPC. + // iWantCount: the number of iWANT messages included in the RPC. + // graftCount: the number of GRAFT messages included in the RPC. + // pruneCount: the number of PRUNE messages included in the RPC. + // msgCount: the number of publish messages included in the RPC. + OnIncomingRpcReceived(iHaveCount, iWantCount, graftCount, pruneCount, msgCount int) +} + +// GossipSubScoringRegistryMetrics encapsulates the metrics collectors for collecting metrics related to the Gossipsub scoring registry. +// GossipSubScoringRegistryMetrics encapsulates various metrics collectors offering insights into penalties and +// other factors used by the scoring registry to compute the application-specific score. It focuses on tracking internal +// aspects of the application-specific score, distinguishing itself from GossipSubScoringMetrics. +type GossipSubScoringRegistryMetrics interface { + // DuplicateMessagePenalties tracks the duplicate message penalty for a node. + DuplicateMessagePenalties(penalty float64) + // DuplicateMessagesCounts tracks the duplicate message count for a node. + DuplicateMessagesCounts(count float64) } -// GossipSubRouterMetrics encapsulates the metrics collectors for GossipSubRouter module of the networking layer. -// It mostly collects the metrics related to the control message exchange between nodes over the GossipSub protocol. -type GossipSubRouterMetrics interface { - // OnIncomingRpcAcceptedFully tracks the number of RPC messages received by the node that are fully accepted. - // An RPC may contain any number of control messages, i.e., IHAVE, IWANT, GRAFT, PRUNE, as well as the actual messages. - // A fully accepted RPC means that all the control messages are accepted and all the messages are accepted. - OnIncomingRpcAcceptedFully() +// LocalGossipSubRouterMetrics encapsulates the metrics collectors for GossipSub router of the local node. +// It gives a lens into the local GossipSub node's view of the GossipSub protocol. +// LocalGossipSubRouterMetrics differs from GossipSubRpcInspectorMetrics in that the former tracks the local node's view +// of the GossipSub protocol, while the latter tracks the messages received by the local node from other nodes over the +// GossipSub protocol but may not all be accepted by the local node, e.g., due to RPC pruning or throttling. +// Having this distinction is useful for debugging and troubleshooting the GossipSub protocol, for example, the number of +// messages received by the local node from other nodes over the GossipSub protocol may be much higher than the number +// of messages accepted by the local node, which may indicate that the local node is throttling the incoming messages. +type LocalGossipSubRouterMetrics interface { + // OnLocalMeshSizeUpdated tracks the size of the local mesh for a topic. + OnLocalMeshSizeUpdated(topic string, size int) - // OnIncomingRpcAcceptedOnlyForControlMessages tracks the number of RPC messages received by the node that are accepted - // only for the control messages, i.e., only for the included IHAVE, IWANT, GRAFT, PRUNE. However, the actual messages - // included in the RPC are not accepted. - // This happens mostly when the validation pipeline of GossipSub is throttled, and cannot accept more actual messages for - // validation. - OnIncomingRpcAcceptedOnlyForControlMessages() + // OnPeerAddedToProtocol is called when the local node receives a stream from a peer on a gossipsub-related protocol. + // Args: + // protocol: the protocol name that the peer is connected to. + OnPeerAddedToProtocol(protocol string) - // OnIncomingRpcRejected tracks the number of RPC messages received by the node that are rejected. - // This happens mostly when the RPC is coming from a low-scored peer based on the peer scoring module of GossipSub. - OnIncomingRpcRejected() + // OnPeerRemovedFromProtocol is called when the local considers a remote peer blacklisted or unavailable. + OnPeerRemovedFromProtocol() - // OnIWantReceived tracks the number of IWANT messages received by the node from other nodes over an RPC message. - // iWant is a control message that is sent by a node to request a message that it has seen advertised in an iHAVE message. - OnIWantReceived(count int) + // OnLocalPeerJoinedTopic is called when the local node subscribes to a gossipsub topic. + OnLocalPeerJoinedTopic() - // OnIHaveReceived tracks the number of IHAVE messages received by the node from other nodes over an RPC message. - // iHave is a control message that is sent by a node to another node to indicate that it has a new gossiped message. - OnIHaveReceived(count int) + // OnLocalPeerLeftTopic is called when the local node unsubscribes from a gossipsub topic. + OnLocalPeerLeftTopic() - // OnGraftReceived tracks the number of GRAFT messages received by the node from other nodes over an RPC message. - // GRAFT is a control message of GossipSub protocol that connects two nodes over a topic directly as gossip partners. - OnGraftReceived(count int) + // OnPeerGraftTopic is called when the local node receives a GRAFT message from a remote peer on a topic. + // Note: the received GRAFT at this point is considered passed the RPC inspection, and is accepted by the local node. + OnPeerGraftTopic(topic string) - // OnPruneReceived tracks the number of PRUNE messages received by the node from other nodes over an RPC message. - // PRUNE is a control message of GossipSub protocol that disconnects two nodes over a topic. - OnPruneReceived(count int) + // OnPeerPruneTopic is called when the local node receives a PRUNE message from a remote peer on a topic. + // Note: the received PRUNE at this point is considered passed the RPC inspection, and is accepted by the local node. + OnPeerPruneTopic(topic string) - // OnPublishedGossipMessagesReceived tracks the number of gossip messages received by the node from other nodes over an - // RPC message. - OnPublishedGossipMessagesReceived(count int) -} + // OnMessageEnteredValidation is called when a received pubsub message enters the validation pipeline. It is the + // internal validation pipeline of GossipSub protocol. The message may be rejected or accepted by the validation + // pipeline. + OnMessageEnteredValidation(size int) -// GossipSubLocalMeshMetrics encapsulates the metrics collectors for GossipSub mesh of the networking layer. -type GossipSubLocalMeshMetrics interface { - // OnLocalMeshSizeUpdated tracks the size of the local mesh for a topic. - OnLocalMeshSizeUpdated(topic string, size int) + // OnMessageRejected is called when a received pubsub message is rejected by the validation pipeline. + // Args: + // + // reason: the reason for rejection. + // size: the size of the message in bytes. + OnMessageRejected(size int, reason string) + + // OnMessageDuplicate is called when a received pubsub message is a duplicate of a previously received message, and + // is dropped. + // Args: + // size: the size of the message in bytes. + OnMessageDuplicate(size int) + + // OnPeerThrottled is called when a peer is throttled by the local node, i.e., the local node is not accepting any + // pubsub message from the peer but may still accept control messages. + OnPeerThrottled() + + // OnRpcReceived is called when an RPC message is received by the local node. The received RPC is considered + // passed the RPC inspection, and is accepted by the local node. + // Args: + // msgCount: the number of messages included in the RPC. + // iHaveCount: the number of iHAVE messages included in the RPC. + // iWantCount: the number of iWANT messages included in the RPC. + // graftCount: the number of GRAFT messages included in the RPC. + // pruneCount: the number of PRUNE messages included in the RPC. + OnRpcReceived(msgCount int, iHaveCount int, iWantCount int, graftCount int, pruneCount int) + + // OnRpcSent is called when an RPC message is sent by the local node. + // Note: the sent RPC is considered passed the RPC inspection, and is accepted by the local node. + // Args: + // msgCount: the number of messages included in the RPC. + // iHaveCount: the number of iHAVE messages included in the RPC. + // iWantCount: the number of iWANT messages included in the RPC. + // graftCount: the number of GRAFT messages included in the RPC. + // pruneCount: the number of PRUNE messages included in the RPC. + OnRpcSent(msgCount int, iHaveCount int, iWantCount int, graftCount int, pruneCount int) + + // OnOutboundRpcDropped is called when an outbound RPC message is dropped by the local node, typically because the local node + // outbound message queue is full; or the RPC is big and the local node cannot fragment it. + OnOutboundRpcDropped() + + // OnUndeliveredMessage is called when a message is not delivered at least one subscriber of the topic, for example when + // the subscriber is too slow to process the message. + OnUndeliveredMessage() + + // OnMessageDeliveredToAllSubscribers is called when a message is delivered to all subscribers of the topic. + OnMessageDeliveredToAllSubscribers(size int) } // UnicastManagerMetrics unicast manager metrics. @@ -106,12 +202,25 @@ type UnicastManagerMetrics interface { // OnEstablishStreamFailure tracks the amount of time taken and number of retry attempts used when the unicast manager cannot establish // a stream on the open connection between two peers. OnEstablishStreamFailure(duration time.Duration, attempts int) + + // OnDialRetryBudgetUpdated tracks the history of the dial retry budget updates. + OnDialRetryBudgetUpdated(budget uint64) + + // OnStreamCreationRetryBudgetUpdated tracks the history of the stream creation retry budget updates. + OnStreamCreationRetryBudgetUpdated(budget uint64) + + // OnDialRetryBudgetResetToDefault tracks the number of times the dial retry budget is reset to default. + OnDialRetryBudgetResetToDefault() + + // OnStreamCreationRetryBudgetResetToDefault tracks the number of times the stream creation retry budget is reset to default. + OnStreamCreationRetryBudgetResetToDefault() } type GossipSubMetrics interface { GossipSubScoringMetrics - GossipSubRouterMetrics - GossipSubLocalMeshMetrics + GossipSubRpcInspectorMetrics + LocalGossipSubRouterMetrics + GossipSubRpcValidationInspectorMetrics } type LibP2PMetrics interface { @@ -121,6 +230,7 @@ type LibP2PMetrics interface { rcmgr.MetricsReporter LibP2PConnectionMetrics UnicastManagerMetrics + GossipSubScoringRegistryMetrics } // GossipSubScoringMetrics encapsulates the metrics collectors for the peer scoring module of GossipSub protocol. @@ -148,6 +258,134 @@ type GossipSubScoringMetrics interface { SetWarningStateCount(uint) } +// GossipSubRpcValidationInspectorMetrics encapsulates the metrics collectors for the gossipsub rpc validation control message inspectors. +type GossipSubRpcValidationInspectorMetrics interface { + GossipSubRpcInspectorMetrics + + // AsyncProcessingStarted increments the metric tracking the number of inspect message request being processed by workers in the rpc validator worker pool. + AsyncProcessingStarted() + // AsyncProcessingFinished tracks the time spent by a rpc validation inspector worker to process an inspect message request asynchronously and decrements the metric tracking + // the number of inspect message requests being processed asynchronously by the rpc validation inspector workers. + AsyncProcessingFinished(duration time.Duration) + + // OnIHaveControlMessageIdsTruncated tracks the number of times message ids on an iHave message were truncated. + // Note that this function is called only when the message ids are truncated from an iHave message, not when the iHave message itself is truncated. + // This is different from the OnControlMessagesTruncated function which is called when a slice of control messages truncated from an RPC with all their message ids. + // Args: + // + // diff: the number of actual messages truncated. + OnIHaveControlMessageIdsTruncated(diff int) + + // OnIWantControlMessageIdsTruncated tracks the number of times message ids on an iWant message were truncated. + // Note that this function is called only when the message ids are truncated from an iWant message, not when the iWant message itself is truncated. + // This is different from the OnControlMessagesTruncated function which is called when a slice of control messages truncated from an RPC with all their message ids. + // Args: + // diff: the number of actual messages truncated. + OnIWantControlMessageIdsTruncated(diff int) + + // OnControlMessagesTruncated tracks the number of times a slice of control messages is truncated from an RPC with all their included message ids. + // Args: + // + // messageType: the type of the control message that was truncated + // diff: the number of control messages truncated. + OnControlMessagesTruncated(messageType p2pmsg.ControlMessageType, diff int) + + // OnIWantMessagesInspected tracks the number of duplicate and cache miss message ids received by the node on iWant messages at the end of the async inspection iWants + // across one RPC, regardless of the result of the inspection. + // + // duplicateCount: the total number of duplicate message ids received by the node on the iWant messages at the end of the async inspection of the RPC. + // cacheMissCount: the total number of cache miss message ids received by the node on the iWant message at the end of the async inspection of the RPC. + OnIWantMessagesInspected(duplicateCount int, cacheMissCount int) + + // OnIWantDuplicateMessageIdsExceedThreshold tracks the number of times that async inspection of iWant messages failed due to the total number of duplicate message ids + // received by the node on the iWant messages of a single RPC exceeding the threshold, which results in a misbehaviour report. + OnIWantDuplicateMessageIdsExceedThreshold() + + // OnIWantCacheMissMessageIdsExceedThreshold tracks the number of times that async inspection of iWant messages failed due to the total + // number of cache miss message ids received by the node on the iWant messages of a single RPC exceeding the threshold, which results in a misbehaviour report. + OnIWantCacheMissMessageIdsExceedThreshold() + + // OnIHaveMessagesInspected is called at the end of the async inspection of iHave messages of a single RPC, regardless of the result of the inspection. + // It tracks the number of duplicate topic ids and duplicate message ids received by the node on the iHave messages of that single RPC at the end of the async inspection iHaves. + // Args: + // + // duplicateTopicIds: the total number of duplicate topic ids received by the node on the iHave messages at the end of the async inspection of the RPC. + // duplicateMessageIds: the number of duplicate message ids received by the node on the iHave messages at the end of the async inspection of the RPC. + // invalidTopicIds: the number of invalid message ids received by the node on the iHave messages at the end of the async inspection of the RPC. + OnIHaveMessagesInspected(duplicateTopicIds int, duplicateMessageIds, invalidTopicIds int) + + // OnIHaveDuplicateTopicIdsExceedThreshold tracks the number of times that the async inspection of iHave messages of a single RPC failed due to the total number of duplicate topic ids + // received by the node on the iHave messages of that RPC exceeding the threshold, which results in a misbehaviour report. + OnIHaveDuplicateTopicIdsExceedThreshold() + + // OnIHaveInvalidTopicIdsExceedThreshold tracks the number of times that the async inspection of iHave messages of a single RPC failed due to the total number of invalid topic ids + // received by the node on the iHave messages of that RPC exceeding the threshold, which results in a misbehaviour report. + OnIHaveInvalidTopicIdsExceedThreshold() + + // OnIHaveDuplicateMessageIdsExceedThreshold tracks the number of times that the async inspection of iHave messages of a single RPC failed due to the total number of duplicate message ids + // received by the node on an iHave message exceeding the threshold, which results in a misbehaviour report. + OnIHaveDuplicateMessageIdsExceedThreshold() + + // OnInvalidTopicIdDetectedForControlMessage tracks the number of times that the async inspection of a control message type on a single RPC failed due to an invalid topic id. + // Args: + // - messageType: the type of the control message that was truncated. + OnInvalidTopicIdDetectedForControlMessage(messageType p2pmsg.ControlMessageType) + + // OnActiveClusterIDsNotSetErr tracks the number of times that the async inspection of a control message type on a single RPC failed due to active cluster ids not set inspection failure. + // This is not causing a misbehaviour report. + OnActiveClusterIDsNotSetErr() + + // OnUnstakedPeerInspectionFailed tracks the number of times that the async inspection of a control message type on a single RPC failed due to unstaked peer inspection failure. + // This is not causing a misbehaviour report. + OnUnstakedPeerInspectionFailed() + + // OnInvalidControlMessageNotificationSent tracks the number of times that the async inspection of a control message failed and resulted in dissemination of an invalid control message was sent. + OnInvalidControlMessageNotificationSent() + + // OnRpcRejectedFromUnknownSender tracks the number of rpc's rejected from unstaked nodes. + OnRpcRejectedFromUnknownSender() + + // OnPublishMessagesInspectionErrorExceedsThreshold tracks the number of times that async inspection of publish messages failed due to the number of errors. + OnPublishMessagesInspectionErrorExceedsThreshold() + + // OnPruneDuplicateTopicIdsExceedThreshold tracks the number of times that the async inspection of prune messages for an RPC failed due to the number of duplicate topic ids + // received by the node on prune messages of the same RPC excesses threshold, which results in a misbehaviour report. + OnPruneDuplicateTopicIdsExceedThreshold() + + // OnPruneInvalidTopicIdsExceedThreshold tracks the number of times that the async inspection of prune messages for an RPC failed due to the number of invalid topic ids + // received by the node on prune messages of the same RPC excesses threshold, which results in a misbehaviour report. + OnPruneInvalidTopicIdsExceedThreshold() + + // OnPruneMessageInspected is called at the end of the async inspection of prune messages of the RPC, regardless of the result of the inspection. + // Args: + // duplicateTopicIds: the number of duplicate topic ids received by the node on the prune messages of the RPC at the end of the async inspection prunes. + // invalidTopicIds: the number of invalid topic ids received by the node on the prune messages at the end of the async inspection of a single RPC. + OnPruneMessageInspected(duplicateTopicIds, invalidTopicIds int) + + // OnGraftDuplicateTopicIdsExceedThreshold tracks the number of times that the async inspection of the graft messages of a single RPC failed due to the number of duplicate topic ids + // received by the node on graft messages of the same RPC excesses threshold, which results in a misbehaviour report. + OnGraftDuplicateTopicIdsExceedThreshold() + + // OnGraftInvalidTopicIdsExceedThreshold tracks the number of times that the async inspection of the graft messages of a single RPC failed due to the number of invalid topic ids + // received by the node on graft messages of the same RPC excesses threshold, which results in a misbehaviour report. + OnGraftInvalidTopicIdsExceedThreshold() + + // OnGraftMessageInspected is called at the end of the async inspection of graft messages of a single RPC, regardless of the result of the inspection. + // Args: + // duplicateTopicIds: the number of duplicate topic ids received by the node on the graft messages at the end of the async inspection of a single RPC. + // invalidTopicIds: the number of invalid topic ids received by the node on the graft messages at the end of the async inspection of a single RPC. + OnGraftMessageInspected(duplicateTopicIds, invalidTopicIds int) + + // OnPublishMessageInspected is called at the end of the async inspection of publish messages of a single RPC, regardless of the result of the inspection. + // It tracks the total number of errors detected during the async inspection of the rpc together with their individual breakdown. + // Args: + // - errCount: the number of errors that occurred during the async inspection of publish messages. + // - invalidTopicIdsCount: the number of times that an invalid topic id was detected during the async inspection of publish messages. + // - invalidSubscriptionsCount: the number of times that an invalid subscription was detected during the async inspection of publish messages. + // - invalidSendersCount: the number of times that an invalid sender was detected during the async inspection of publish messages. + OnPublishMessageInspected(totalErrCount int, invalidTopicIdsCount int, invalidSubscriptionsCount int, invalidSendersCount int) +} + // NetworkInboundQueueMetrics encapsulates the metrics collectors for the inbound queue of the networking layer. type NetworkInboundQueueMetrics interface { @@ -165,6 +403,8 @@ type NetworkInboundQueueMetrics interface { type NetworkCoreMetrics interface { NetworkInboundQueueMetrics AlspMetrics + NetworkSecurityMetrics + // OutboundMessageSent collects metrics related to a message sent by the node. OutboundMessageSent(sizeBytes int, topic string, protocol string, messageType string) // InboundMessageReceived collects metrics related to a message received by the node. @@ -206,7 +446,6 @@ type AlspMetrics interface { // NetworkMetrics is the blanket abstraction that encapsulates the metrics collectors for the networking layer. type NetworkMetrics interface { LibP2PMetrics - NetworkSecurityMetrics NetworkCoreMetrics } @@ -230,20 +469,37 @@ type EngineMetrics interface { OutboundMessageDropped(engine string, messages string) } +// ComplianceMetrics reports metrics about the compliance layer, which processes, finalizes, +// and seals blocks and tracks data in the Protocol State. type ComplianceMetrics interface { + // FinalizedHeight reports the latest finalized height known to this node. FinalizedHeight(height uint64) - CommittedEpochFinalView(view uint64) + // EpochTransitionHeight reports the height of the most recently finalized epoch transition. EpochTransitionHeight(height uint64) + // SealedHeight reports the latest block that has a finalized seal known to this node. SealedHeight(height uint64) + // BlockFinalized reports information about data contained within finalized blocks. BlockFinalized(*flow.Block) + // BlockSealed reports information about data contained within blocks with a finalized seal. + // When a new block is finalized, this method is called for every seal in the finalized block, + // in the order the seals are listed. + // CAUTION: within a block, seals can be included in any permutation (not necessarily in order + // of increasing height). BlockSealed(*flow.Block) + // CurrentEpochCounter reports the current epoch counter. CurrentEpochCounter(counter uint64) + // CurrentEpochPhase reports the current epoch phase. CurrentEpochPhase(phase flow.EpochPhase) + // CurrentEpochFinalView reports the final view of the current epoch, including epoch extensions. CurrentEpochFinalView(view uint64) - CurrentDKGPhase1FinalView(view uint64) - CurrentDKGPhase2FinalView(view uint64) - CurrentDKGPhase3FinalView(view uint64) - EpochEmergencyFallbackTriggered() + // CurrentDKGPhaseViews reports the final view of each DKG phase for the current epoch. + CurrentDKGPhaseViews(phase1FinalView, phase2FinalView, phase3FinalView uint64) + // EpochFallbackModeTriggered reports that EFM is triggered. + EpochFallbackModeTriggered() + // EpochFallbackModeExited reports that EFM is no longer triggered. + EpochFallbackModeExited() + // ProtocolStateVersion reports the version of the latest finalized protocol state. + ProtocolStateVersion(version uint64) } type CleanerMetrics interface { @@ -324,9 +580,36 @@ type HotstuffMetrics interface { // PayloadProductionDuration measures the time which the HotStuff's core logic // spends in the module.Builder component, i.e. the with generating block payloads. PayloadProductionDuration(duration time.Duration) + + // TimeoutCollectorsRange collects information from the node's `TimeoutAggregator` component. + // Specifically, it measurers the number of views for which we are currently collecting timeouts + // (i.e. the number of `TimeoutCollector` instances we are maintaining) and their lowest/highest view. + TimeoutCollectorsRange(lowestRetainedView uint64, newestViewCreatedCollector uint64, activeCollectors int) +} + +type CruiseCtlMetrics interface { + + // PIDError measures the current error values for the proportional, integration, + // and derivative terms of the PID controller. + PIDError(p, i, d float64) + + // TargetProposalDuration measures the current value of the Block Time Controller output: + // the target duration from parent to child proposal. + TargetProposalDuration(duration time.Duration) + + // ControllerOutput measures the output of the cruise control PID controller. + // Concretely, this is the quantity to subtract from the baseline view duration. + ControllerOutput(duration time.Duration) + + // ProposalPublicationDelay measures the effective delay the controller imposes on publishing + // the node's own proposals, with all limits of authority applied. + // Note: Technically, our metrics capture the publication delay relative to when the publication delay was + // last requested. Currently, only the EventHandler requests a publication delay, exactly once per proposal. + ProposalPublicationDelay(duration time.Duration) } type CollectionMetrics interface { + TransactionValidationMetrics // TransactionIngested is called when a new transaction is ingested by the // node. It increments the total count of ingested transactions and starts // a tx->col span for the transaction. @@ -338,6 +621,9 @@ type CollectionMetrics interface { // ClusterBlockFinalized is called when a collection is finalized. ClusterBlockFinalized(block *cluster.Block) + + // CollectionMaxSize measures the current maximum size of a collection. + CollectionMaxSize(size uint) } type ConsensusMetrics interface { @@ -490,6 +776,8 @@ type LedgerMetrics interface { } type WALMetrics interface { + // ExecutionCheckpointSize reports the size of a checkpoint in bytes + ExecutionCheckpointSize(bytes uint64) } type RateLimitedBlockstoreMetrics interface { @@ -513,6 +801,8 @@ type ExecutionDataRequesterMetrics interface { ExecutionDataFetchStarted() // ExecutionDataFetchFinished records a completed download + // Pass the highest consecutive height to ensure the metrics reflect the height up to which the + // requester has completed downloads. This allows us to easily see when downloading gets stuck. ExecutionDataFetchFinished(duration time.Duration, success bool, height uint64) // NotificationSent reports that ExecutionData received notifications were sent for a block height @@ -522,6 +812,35 @@ type ExecutionDataRequesterMetrics interface { FetchRetried() } +type ExecutionStateIndexerMetrics interface { + // BlockIndexed records metrics from indexing execution data from a single block. + BlockIndexed(height uint64, duration time.Duration, events, registers, transactionResults int) + + // BlockReindexed records that a previously indexed block was indexed again. + BlockReindexed() + + // InitializeLatestHeight records the latest height that has been indexed. + // This should only be used during startup. After startup, use BlockIndexed to record newly + // indexed heights. + InitializeLatestHeight(height uint64) +} + +type TransactionErrorMessagesMetrics interface { + // TxErrorsInitialHeight records the initial height of the transaction error messages. + TxErrorsInitialHeight(height uint64) + + // TxErrorsFetchStarted records that a transaction error messages download has started. + TxErrorsFetchStarted() + + // TxErrorsFetchFinished records that a transaction error messages download has finished. + // Pass the highest consecutive height to ensure the metrics reflect the height up to which the + // requester has completed downloads. This allows us to easily see when downloading gets stuck. + TxErrorsFetchFinished(duration time.Duration, success bool, height uint64) + + // TxErrorsFetchRetried records that a transaction error messages download has been retried. + TxErrorsFetchRetried() +} + type RuntimeMetrics interface { // RuntimeTransactionParsed reports the time spent parsing a single transaction RuntimeTransactionParsed(dur time.Duration) @@ -544,6 +863,15 @@ type RuntimeMetrics interface { RuntimeTransactionProgramsCacheHit() } +type EVMMetrics interface { + // SetNumberOfDeployedCOAs sets the total number of deployed COAs + SetNumberOfDeployedCOAs(count uint64) + // EVMTransactionExecuted reports the gas used when executing an evm transaction + EVMTransactionExecuted(gasUsed uint64, isDirectCall bool, failed bool) + // EVMBlockExecuted reports the block size, total gas used and total supply when executing an evm block + EVMBlockExecuted(txCount int, totalGasUsed uint64, totalSupplyInFlow float64) +} + type ProviderMetrics interface { // ChunkDataPackRequestProcessed is executed every time a chunk data pack request is picked up for processing at execution node. // It increases the request processed counter by one. @@ -569,7 +897,14 @@ type ExecutionDataPrunerMetrics interface { Pruned(height uint64, duration time.Duration) } -type AccessMetrics interface { +type RestMetrics interface { + // Example recorder taken from: + // https://github.com/slok/go-http-metrics/blob/master/metrics/prometheus/prometheus.go + httpmetrics.Recorder + AddTotalRequests(ctx context.Context, method string, routeName string) +} + +type GRPCConnectionPoolMetrics interface { // TotalConnectionsInPool updates the number connections to collection/execution nodes stored in the pool, and the size of the pool TotalConnectionsInPool(connectionCount uint, connectionPoolSize uint) @@ -592,6 +927,20 @@ type AccessMetrics interface { ConnectionFromPoolEvicted() } +type AccessMetrics interface { + RestMetrics + GRPCConnectionPoolMetrics + TransactionMetrics + TransactionValidationMetrics + BackendScriptsMetrics + + // UpdateExecutionReceiptMaxHeight is called whenever we store an execution receipt from a block from a newer height + UpdateExecutionReceiptMaxHeight(height uint64) + + // UpdateLastFullBlockHeight tracks the height of the last block for which all collections were received + UpdateLastFullBlockHeight(height uint64) +} + type ExecutionResultStats struct { ComputationUsed uint64 MemoryUsed uint64 @@ -599,8 +948,31 @@ type ExecutionResultStats struct { EventSize int NumberOfRegistersTouched int NumberOfBytesWrittenToRegisters int - NumberOfCollections int - NumberOfTransactions int +} + +type BlockExecutionResultStats struct { + CollectionExecutionResultStats + NumberOfCollections int +} + +type CollectionExecutionResultStats struct { + ExecutionResultStats + NumberOfTransactions int +} + +type TransactionExecutionResultStats struct { + ExecutionResultStats + NumberOfTxnConflictRetries int + Failed bool + ScheduledTransaction bool + SystemTransaction bool + ComputationIntensities meter.MeteredComputationIntensities +} + +type TransactionExecutionResultInfo struct { + TransactionID flow.Identifier + BlockID flow.Identifier + BlockHeight uint64 } func (stats *ExecutionResultStats) Merge(other ExecutionResultStats) { @@ -610,13 +982,23 @@ func (stats *ExecutionResultStats) Merge(other ExecutionResultStats) { stats.EventSize += other.EventSize stats.NumberOfRegistersTouched += other.NumberOfRegistersTouched stats.NumberOfBytesWrittenToRegisters += other.NumberOfBytesWrittenToRegisters - stats.NumberOfCollections += other.NumberOfCollections +} + +func (stats *CollectionExecutionResultStats) Add(other TransactionExecutionResultStats) { + stats.ExecutionResultStats.Merge(other.ExecutionResultStats) + stats.NumberOfTransactions += 1 +} + +func (stats *BlockExecutionResultStats) Add(other CollectionExecutionResultStats) { + stats.CollectionExecutionResultStats.Merge(other.ExecutionResultStats) stats.NumberOfTransactions += other.NumberOfTransactions + stats.NumberOfCollections += 1 } type ExecutionMetrics interface { LedgerMetrics RuntimeMetrics + EVMMetrics ProviderMetrics WALMetrics @@ -634,23 +1016,29 @@ type ExecutionMetrics interface { // ExecutionLastExecutedBlockHeight reports last executed block height ExecutionLastExecutedBlockHeight(height uint64) + // ExecutionLastFinalizedExecutedBlockHeight reports last finalized and executed block height + ExecutionLastFinalizedExecutedBlockHeight(height uint64) + + // ExecutionLastChunkDataPackPrunedHeight reports last chunk data pack pruned height + ExecutionLastChunkDataPackPrunedHeight(height uint64) + + // ExecutionTargetChunkDataPackPrunedHeight reports the target height for chunk data pack to be pruned + ExecutionTargetChunkDataPackPrunedHeight(height uint64) + // ExecutionBlockExecuted reports the total time and computation spent on executing a block - ExecutionBlockExecuted(dur time.Duration, stats ExecutionResultStats) + ExecutionBlockExecuted(dur time.Duration, stats BlockExecutionResultStats) // ExecutionBlockExecutionEffortVectorComponent reports the unweighted effort of given ComputationKind at block level - ExecutionBlockExecutionEffortVectorComponent(string, uint) + ExecutionBlockExecutionEffortVectorComponent(string, uint64) // ExecutionBlockCachedPrograms reports the number of cached programs at the end of a block ExecutionBlockCachedPrograms(programs int) // ExecutionCollectionExecuted reports the total time and computation spent on executing a collection - ExecutionCollectionExecuted(dur time.Duration, stats ExecutionResultStats) + ExecutionCollectionExecuted(dur time.Duration, stats CollectionExecutionResultStats) // ExecutionTransactionExecuted reports stats on executing a single transaction - ExecutionTransactionExecuted(dur time.Duration, - compUsed, memoryUsed uint64, - eventCounts, eventSize int, - failed bool) + ExecutionTransactionExecuted(dur time.Duration, stats TransactionExecutionResultStats, info TransactionExecutionResultInfo) // ExecutionChunkDataPackGenerated reports stats on chunk data pack generation ExecutionChunkDataPackGenerated(proofSize, numberOfTransactions int) @@ -658,12 +1046,12 @@ type ExecutionMetrics interface { // ExecutionScriptExecuted reports the time and memory spent on executing an script ExecutionScriptExecuted(dur time.Duration, compUsed, memoryUsed, memoryEstimate uint64) + // ExecutionCallbacksExecuted reports the number of callbacks executed, computation used by process transaction, and total computation limits for execute transactions + ExecutionCallbacksExecuted(callbackCount int, processComputationUsed, executeComputationLimits uint64) + // ExecutionCollectionRequestSent reports when a request for a collection is sent to a collection node ExecutionCollectionRequestSent() - // Unused - ExecutionCollectionRequestRetried() - // ExecutionSync reports when the state syncing is triggered or stopped. ExecutionSync(syncing bool) @@ -677,13 +1065,37 @@ type ExecutionMetrics interface { } type BackendScriptsMetrics interface { - // Record the round trip time while executing a script + // ScriptExecuted records the round trip time while executing a script ScriptExecuted(dur time.Duration, size int) + + // ScriptExecutionErrorLocal records script execution failures from local execution + ScriptExecutionErrorLocal() + + // ScriptExecutionErrorOnExecutionNode records script execution failures on Execution Nodes + ScriptExecutionErrorOnExecutionNode() + + // ScriptExecutionResultMismatch records script execution result mismatches between local and + // execution nodes + ScriptExecutionResultMismatch() + + // ScriptExecutionResultMatch records script execution result matches between local and + // execution nodes + ScriptExecutionResultMatch() + + // ScriptExecutionErrorMismatch records script execution error mismatches between local and + // execution nodes + ScriptExecutionErrorMismatch() + + // ScriptExecutionErrorMatch records script execution error matches between local and + // execution nodes + ScriptExecutionErrorMatch() + + // ScriptExecutionNotIndexed records script execution matches where data for the block is not + // indexed locally yet + ScriptExecutionNotIndexed() } type TransactionMetrics interface { - BackendScriptsMetrics - // Record the round trip time while getting a transaction result TransactionResultFetched(dur time.Duration, size int) @@ -694,6 +1106,10 @@ type TransactionMetrics interface { // works if the transaction was earlier added as received. TransactionFinalized(txID flow.Identifier, when time.Time) + // TransactionSealed reports the time spent between the transaction being received and sealed. Reporting only + // works if the transaction was earlier added as received. + TransactionSealed(txID flow.Identifier, when time.Time) + // TransactionExecuted reports the time spent between the transaction being received and executed. Reporting only // works if the transaction was earlier added as received. TransactionExecuted(txID flow.Identifier, when time.Time) @@ -703,9 +1119,15 @@ type TransactionMetrics interface { // TransactionSubmissionFailed should be called whenever we try to submit a transaction and it fails TransactionSubmissionFailed() +} - // UpdateExecutionReceiptMaxHeight is called whenever we store an execution receipt from a block from a newer height - UpdateExecutionReceiptMaxHeight(height uint64) +type TransactionValidationMetrics interface { + // TransactionValidated tracks number of successfully validated transactions + TransactionValidated() + // TransactionValidationFailed tracks number of validation failed transactions with reason + TransactionValidationFailed(reason string) + // TransactionValidationSkipped tracks number of skipped transaction validations + TransactionValidationSkipped() } type PingMetrics interface { @@ -782,3 +1204,23 @@ type DHTMetrics interface { RoutingTablePeerAdded() RoutingTablePeerRemoved() } + +type CollectionExecutedMetric interface { + CollectionFinalized(light *flow.LightCollection) + CollectionExecuted(light *flow.LightCollection) + BlockFinalized(block *flow.Block) + ExecutionReceiptReceived(r *flow.ExecutionReceipt) + UpdateLastFullBlockHeight(height uint64) +} + +type MachineAccountMetrics interface { + // AccountBalance reports the current balance of the machine account. + AccountBalance(bal float64) + // RecommendedMinBalance reports the recommended minimum balance. If the actual balance + // falls below this level, it must be refilled. + // NOTE: Operators should alert on `AccountBalance < RecommendedMinBalance` + RecommendedMinBalance(bal float64) + // IsMisconfigured reports whether a critical misconfiguration has been detected. + // NOTE Operators should alert on non-zero values reported here. + IsMisconfigured(misconfigured bool) +} diff --git a/module/metrics/access.go b/module/metrics/access.go index 4dcfc6e6f38..577e8c34405 100644 --- a/module/metrics/access.go +++ b/module/metrics/access.go @@ -3,9 +3,43 @@ package metrics import ( "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promauto" + + "github.com/onflow/flow-go/module" + "github.com/onflow/flow-go/module/counters" ) +type AccessCollectorOpts func(*AccessCollector) + +func WithTransactionValidationMetrics(m module.TransactionValidationMetrics) AccessCollectorOpts { + return func(ac *AccessCollector) { + ac.TransactionValidationMetrics = m + } +} + +func WithTransactionMetrics(m module.TransactionMetrics) AccessCollectorOpts { + return func(ac *AccessCollector) { + ac.TransactionMetrics = m + } +} + +func WithBackendScriptsMetrics(m module.BackendScriptsMetrics) AccessCollectorOpts { + return func(ac *AccessCollector) { + ac.BackendScriptsMetrics = m + } +} + +func WithRestMetrics(m module.RestMetrics) AccessCollectorOpts { + return func(ac *AccessCollector) { + ac.RestMetrics = m + } +} + type AccessCollector struct { + module.RestMetrics + module.TransactionMetrics + module.TransactionValidationMetrics + module.BackendScriptsMetrics + connectionReused prometheus.Counter connectionsInPool *prometheus.GaugeVec connectionAdded prometheus.Counter @@ -13,9 +47,16 @@ type AccessCollector struct { connectionInvalidated prometheus.Counter connectionUpdated prometheus.Counter connectionEvicted prometheus.Counter + lastFullBlockHeight prometheus.Gauge + maxReceiptHeight prometheus.Gauge + + // used to skip heights that are lower than the current max height + maxReceiptHeightValue counters.StrictMonotonicCounter } -func NewAccessCollector() *AccessCollector { +var _ module.AccessMetrics = (*AccessCollector)(nil) + +func NewAccessCollector(opts ...AccessCollectorOpts) *AccessCollector { ac := &AccessCollector{ connectionReused: promauto.NewCounter(prometheus.CounterOpts{ Name: "connection_reused", @@ -59,6 +100,23 @@ func NewAccessCollector() *AccessCollector { Subsystem: subsystemConnectionPool, Help: "counter for the number of times a cached connection is evicted from the connection pool", }), + lastFullBlockHeight: promauto.NewGauge(prometheus.GaugeOpts{ + Name: "last_full_finalized_block_height", + Namespace: namespaceAccess, + Subsystem: subsystemIngestion, + Help: "gauge to track the highest consecutive finalized block height with all collections indexed", + }), + maxReceiptHeight: promauto.NewGauge(prometheus.GaugeOpts{ + Name: "max_receipt_height", + Namespace: namespaceAccess, + Subsystem: subsystemIngestion, + Help: "gauge to track the maximum block height of execution receipts received", + }), + maxReceiptHeightValue: counters.NewMonotonicCounter(0), + } + + for _, opt := range opts { + opt(ac) } return ac @@ -92,3 +150,13 @@ func (ac *AccessCollector) ConnectionFromPoolUpdated() { func (ac *AccessCollector) ConnectionFromPoolEvicted() { ac.connectionEvicted.Inc() } + +func (ac *AccessCollector) UpdateLastFullBlockHeight(height uint64) { + ac.lastFullBlockHeight.Set(float64(height)) +} + +func (ac *AccessCollector) UpdateExecutionReceiptMaxHeight(height uint64) { + if ac.maxReceiptHeightValue.Set(height) { + ac.maxReceiptHeight.Set(float64(height)) + } +} diff --git a/module/metrics/collection.go b/module/metrics/collection.go index 19be622f0ab..d92e9c891dd 100644 --- a/module/metrics/collection.go +++ b/module/metrics/collection.go @@ -10,18 +10,22 @@ import ( ) type CollectionCollector struct { + module.TransactionValidationMetrics tracer module.Tracer transactionsIngested prometheus.Counter // tracks the number of ingested transactions finalizedHeight *prometheus.GaugeVec // tracks the finalized height + maxCollectionSize prometheus.Gauge // tracks the maximum collection size proposals *prometheus.HistogramVec // tracks the number/size of PROPOSED collections guarantees *prometheus.HistogramVec // counts the number/size of FINALIZED collections } +var _ module.CollectionMetrics = (*CollectionCollector)(nil) + func NewCollectionCollector(tracer module.Tracer) *CollectionCollector { cc := &CollectionCollector{ - tracer: tracer, - + TransactionValidationMetrics: NewTransactionValidationCollector(), + tracer: tracer, transactionsIngested: promauto.NewCounter(prometheus.CounterOpts{ Namespace: namespaceCollection, Name: "ingested_transactions_total", @@ -35,6 +39,13 @@ func NewCollectionCollector(tracer module.Tracer) *CollectionCollector { Help: "tracks the latest finalized height", }, []string{LabelChain}), + maxCollectionSize: promauto.NewGauge(prometheus.GaugeOpts{ + Namespace: namespaceCollection, + Subsystem: subsystemProposal, + Name: "max_collection_size", + Help: "last used max collection size", + }), + proposals: promauto.NewHistogramVec(prometheus.HistogramOpts{ Namespace: namespaceCollection, Subsystem: subsystemProposal, @@ -49,7 +60,7 @@ func NewCollectionCollector(tracer module.Tracer) *CollectionCollector { Buckets: []float64{1, 2, 5, 10, 20}, Name: "guarantees_size_transactions", Help: "size/number of guaranteed/finalized collections", - }, []string{LabelChain, LabelProposer}), + }, []string{LabelChain}), } return cc @@ -67,7 +78,7 @@ func (cc *CollectionCollector) ClusterBlockProposed(block *cluster.Block) { collection := block.Payload.Collection.Light() cc.proposals. - With(prometheus.Labels{LabelChain: block.Header.ChainID.String()}). + With(prometheus.Labels{LabelChain: block.ChainID.String()}). Observe(float64(collection.Len())) } @@ -75,16 +86,19 @@ func (cc *CollectionCollector) ClusterBlockProposed(block *cluster.Block) { // finishes the tx->collection span for each constituent transaction. func (cc *CollectionCollector) ClusterBlockFinalized(block *cluster.Block) { collection := block.Payload.Collection.Light() - chainID := block.Header.ChainID - proposer := block.Header.ProposerID + chainID := block.ChainID cc.finalizedHeight. With(prometheus.Labels{LabelChain: chainID.String()}). - Set(float64(block.Header.Height)) + Set(float64(block.Height)) cc.guarantees. With(prometheus.Labels{ - LabelChain: chainID.String(), - LabelProposer: proposer.String(), + LabelChain: chainID.String(), }). Observe(float64(collection.Len())) } + +// CollectionMaxSize measures the current maximum size of a collection. +func (cc *CollectionCollector) CollectionMaxSize(size uint) { + cc.maxCollectionSize.Set(float64(size)) +} diff --git a/module/metrics/compliance.go b/module/metrics/compliance.go index de74b79cfcf..6e86e539567 100644 --- a/module/metrics/compliance.go +++ b/module/metrics/compliance.go @@ -11,23 +11,23 @@ import ( ) type ComplianceCollector struct { - finalizedHeight prometheus.Gauge - sealedHeight prometheus.Gauge - finalizedBlocks *prometheus.CounterVec - sealedBlocks prometheus.Counter - finalizedPayload *prometheus.CounterVec - sealedPayload *prometheus.CounterVec - lastBlockFinalizedAt time.Time - finalizedBlocksPerSecond prometheus.Summary - committedEpochFinalView prometheus.Gauge - lastEpochTransitionHeight prometheus.Gauge - currentEpochCounter prometheus.Gauge - currentEpochPhase prometheus.Gauge - currentEpochFinalView prometheus.Gauge - currentDKGPhase1FinalView prometheus.Gauge - currentDKGPhase2FinalView prometheus.Gauge - currentDKGPhase3FinalView prometheus.Gauge - epochEmergencyFallbackTriggered prometheus.Gauge + finalizedHeight prometheus.Gauge + sealedHeight prometheus.Gauge + finalizedBlocks prometheus.Counter + sealedBlocks prometheus.Counter + finalizedPayload *prometheus.CounterVec + sealedPayload *prometheus.CounterVec + lastBlockFinalizedAt time.Time + finalizedBlocksPerSecond prometheus.Summary + lastEpochTransitionHeight prometheus.Gauge + currentEpochCounter prometheus.Gauge + currentEpochPhase prometheus.Gauge + currentEpochFinalView prometheus.Gauge + currentDKGPhase1FinalView prometheus.Gauge + currentDKGPhase2FinalView prometheus.Gauge + currentDKGPhase3FinalView prometheus.Gauge + epochFallbackModeTriggered prometheus.Gauge + protocolStateVersion prometheus.Gauge } var _ module.ComplianceMetrics = (*ComplianceCollector)(nil) @@ -50,13 +50,6 @@ func NewComplianceCollector() *ComplianceCollector { Help: "the current epoch's phase", }), - committedEpochFinalView: promauto.NewGauge(prometheus.GaugeOpts{ - Name: "committed_epoch_final_view", - Namespace: namespaceConsensus, - Subsystem: subsystemCompliance, - Help: "the final view of the committed epoch with the greatest counter", - }), - lastEpochTransitionHeight: promauto.NewGauge(prometheus.GaugeOpts{ Name: "last_epoch_transition_height", Namespace: namespaceConsensus, @@ -105,12 +98,12 @@ func NewComplianceCollector() *ComplianceCollector { Help: "the last sealed height", }), - finalizedBlocks: promauto.NewCounterVec(prometheus.CounterOpts{ + finalizedBlocks: promauto.NewCounter(prometheus.CounterOpts{ Name: "finalized_blocks_total", Namespace: namespaceConsensus, Subsystem: subsystemCompliance, Help: "the number of finalized blocks", - }, []string{LabelProposer}), + }), sealedBlocks: promauto.NewCounter(prometheus.CounterOpts{ Name: "sealed_blocks_total", @@ -150,11 +143,18 @@ func NewComplianceCollector() *ComplianceCollector { BufCap: 500, }), - epochEmergencyFallbackTriggered: promauto.NewGauge(prometheus.GaugeOpts{ + epochFallbackModeTriggered: promauto.NewGauge(prometheus.GaugeOpts{ Name: "epoch_fallback_triggered", Namespace: namespaceConsensus, Subsystem: subsystemCompliance, - Help: "indicates whether epoch emergency fallback is triggered; if >0, the fallback is triggered", + Help: "indicates whether epoch fallback mode is triggered; if >0, the fallback is triggered", + }), + + protocolStateVersion: promauto.NewGauge(prometheus.GaugeOpts{ + Name: "protocol_state_version", + Namespace: namespaceConsensus, + Subsystem: subsystemCompliance, + Help: "reports the protocol state version of the latest finalized block", }), } @@ -170,13 +170,16 @@ func (cc *ComplianceCollector) FinalizedHeight(height uint64) { func (cc *ComplianceCollector) BlockFinalized(block *flow.Block) { now := time.Now() if !cc.lastBlockFinalizedAt.IsZero() { - cc.finalizedBlocksPerSecond.Observe(1 / now.Sub(cc.lastBlockFinalizedAt).Seconds()) + cc.finalizedBlocksPerSecond.Observe(1.0 / now.Sub(cc.lastBlockFinalizedAt).Seconds()) } cc.lastBlockFinalizedAt = now - cc.finalizedBlocks.With(prometheus.Labels{LabelProposer: block.Header.ProposerID.String()}).Inc() + cc.finalizedBlocks.Inc() cc.finalizedPayload.With(prometheus.Labels{LabelResource: ResourceGuarantee}).Add(float64(len(block.Payload.Guarantees))) + // count of seals INCLUDES emergency seals cc.finalizedPayload.With(prometheus.Labels{LabelResource: ResourceSeal}).Add(float64(len(block.Payload.Seals))) + // keep track of emergency seals (any seal without a verifier signature) + cc.finalizedPayload.With(prometheus.Labels{LabelResource: ResourceEmergencySeal}).Add(float64(countPayloadSealsWithoutApproverSig(block.Payload.Seals))) } // SealedHeight sets the finalized height. @@ -191,10 +194,6 @@ func (cc *ComplianceCollector) BlockSealed(block *flow.Block) { cc.sealedPayload.With(prometheus.Labels{LabelResource: ResourceSeal}).Add(float64(len(block.Payload.Seals))) } -func (cc *ComplianceCollector) CommittedEpochFinalView(view uint64) { - cc.committedEpochFinalView.Set(float64(view)) -} - func (cc *ComplianceCollector) EpochTransitionHeight(height uint64) { // An epoch transition comprises a block in epoch N followed by a block in epoch N+1. // height here refers to the height of the first block in epoch N+1. @@ -213,18 +212,37 @@ func (cc *ComplianceCollector) CurrentEpochFinalView(view uint64) { cc.currentEpochFinalView.Set(float64(view)) } -func (cc *ComplianceCollector) CurrentDKGPhase1FinalView(view uint64) { - cc.currentDKGPhase1FinalView.Set(float64(view)) +func (cc *ComplianceCollector) CurrentDKGPhaseViews(phase1FinalView, phase2FinalView, phase3FinalView uint64) { + cc.currentDKGPhase1FinalView.Set(float64(phase1FinalView)) + cc.currentDKGPhase2FinalView.Set(float64(phase2FinalView)) + cc.currentDKGPhase3FinalView.Set(float64(phase3FinalView)) } -func (cc *ComplianceCollector) CurrentDKGPhase2FinalView(view uint64) { - cc.currentDKGPhase2FinalView.Set(float64(view)) +func (cc *ComplianceCollector) EpochFallbackModeTriggered() { + cc.epochFallbackModeTriggered.Set(float64(1)) } -func (cc *ComplianceCollector) CurrentDKGPhase3FinalView(view uint64) { - cc.currentDKGPhase3FinalView.Set(float64(view)) +func (cc *ComplianceCollector) EpochFallbackModeExited() { + cc.epochFallbackModeTriggered.Set(float64(0)) } -func (cc *ComplianceCollector) EpochEmergencyFallbackTriggered() { - cc.epochEmergencyFallbackTriggered.Set(float64(1)) +func (cc *ComplianceCollector) ProtocolStateVersion(version uint64) { + cc.protocolStateVersion.Set(float64(version)) +} + +// countPayloadSealsWithoutApproverSig counts the number of seals which have any nil signature fields. +// A seal with a nil signature field can be created in two legitimate ways: +// 1. If emergency sealing is enabled +// 2. If a network is configured to disable verification (via chunk alpha, or consensus node seal construction config) +func countPayloadSealsWithoutApproverSig(seals []*flow.Seal) int { + count := 0 + for _, seal := range seals { + for _, sig := range seal.AggregatedApprovalSigs { + if sig.SignerIDs == nil || sig.VerifierSignatures == nil { + count++ + break + } + } + } + return count } diff --git a/module/metrics/cruisectl.go b/module/metrics/cruisectl.go new file mode 100644 index 00000000000..c4d471814ee --- /dev/null +++ b/module/metrics/cruisectl.go @@ -0,0 +1,82 @@ +package metrics + +import ( + "time" + + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" + + "github.com/onflow/flow-go/module" +) + +// CruiseCtlMetrics captures metrics about the Block Rate Controller, which adjusts +// the proposal duration to attain a target epoch switchover time. +type CruiseCtlMetrics struct { + proportionalErr prometheus.Gauge + integralErr prometheus.Gauge + derivativeErr prometheus.Gauge + targetProposalDur prometheus.Gauge + controllerOutput prometheus.Gauge + proposalPublicationDelay prometheus.Gauge +} + +var _ module.CruiseCtlMetrics = (*CruiseCtlMetrics)(nil) + +func NewCruiseCtlMetrics() *CruiseCtlMetrics { + return &CruiseCtlMetrics{ + proportionalErr: promauto.NewGauge(prometheus.GaugeOpts{ + Name: "proportional_err_s", + Namespace: namespaceConsensus, + Subsystem: subsystemCruiseCtl, + Help: "The current proportional error measured by the controller", + }), + integralErr: promauto.NewGauge(prometheus.GaugeOpts{ + Name: "integral_err_s", + Namespace: namespaceConsensus, + Subsystem: subsystemCruiseCtl, + Help: "The current integral error measured by the controller", + }), + derivativeErr: promauto.NewGauge(prometheus.GaugeOpts{ + Name: "derivative_err_per_s", + Namespace: namespaceConsensus, + Subsystem: subsystemCruiseCtl, + Help: "The current derivative error measured by the controller", + }), + targetProposalDur: promauto.NewGauge(prometheus.GaugeOpts{ + Name: "target_proposal_dur_s", + Namespace: namespaceConsensus, + Subsystem: subsystemCruiseCtl, + Help: "The current target duration from parent to child proposal [seconds]", + }), + controllerOutput: promauto.NewGauge(prometheus.GaugeOpts{ + Name: "controller_output_s", + Namespace: namespaceConsensus, + Subsystem: subsystemCruiseCtl, + Help: "The most recent output of the controller [seconds]; the adjustment to subtract from the baseline proposal duration", + }), + proposalPublicationDelay: promauto.NewGauge(prometheus.GaugeOpts{ + Name: "proposal_publication_delay_s", + Namespace: namespaceConsensus, + Subsystem: subsystemCruiseCtl, + Help: "Effective delay the controller imposes on publishing the node's own proposals [seconds]; with all limits of authority", + }), + } +} + +func (c *CruiseCtlMetrics) PIDError(p, i, d float64) { + c.proportionalErr.Set(p) + c.integralErr.Set(i) + c.derivativeErr.Set(d) +} + +func (c *CruiseCtlMetrics) TargetProposalDuration(duration time.Duration) { + c.targetProposalDur.Set(duration.Seconds()) +} + +func (c *CruiseCtlMetrics) ControllerOutput(duration time.Duration) { + c.controllerOutput.Set(duration.Seconds()) +} + +func (c *CruiseCtlMetrics) ProposalPublicationDelay(duration time.Duration) { + c.proposalPublicationDelay.Set(duration.Seconds()) +} diff --git a/module/metrics/example/README.md b/module/metrics/example/README.md index f693cac0780..ec319414ad8 100644 --- a/module/metrics/example/README.md +++ b/module/metrics/example/README.md @@ -18,7 +18,7 @@ You can choose one of the following: Note: Running example with `-happypath` flag examines the metrics collection on a real happy path of verification node. ``` - go run --tags=relic module/metrics/example/verification/main.go + go run module/metrics/example/verification/main.go ``` - Consensus Node: ``` diff --git a/module/metrics/example/consensus/main.go b/module/metrics/example/consensus/main.go index 829dfee562a..8c18ffbfc4e 100644 --- a/module/metrics/example/consensus/main.go +++ b/module/metrics/example/consensus/main.go @@ -40,7 +40,7 @@ func main() { for i := 0; i < 100; i++ { block := unittest.BlockFixture() collector.MempoolEntries(metrics.ResourceGuarantee, 22) - collector.BlockFinalized(&block) + collector.BlockFinalized(block) collector.HotStuffBusyDuration(10, metrics.HotstuffEventTypeLocalTimeout) collector.HotStuffWaitDuration(10, metrics.HotstuffEventTypeLocalTimeout) collector.HotStuffIdleDuration(10) diff --git a/module/metrics/example/execution/main.go b/module/metrics/example/execution/main.go index 5527419b80a..94837711d2d 100644 --- a/module/metrics/example/execution/main.go +++ b/module/metrics/example/execution/main.go @@ -41,13 +41,17 @@ func main() { collector.ExecutionBlockExecuted( duration, - module.ExecutionResultStats{ - ComputationUsed: uint64(rand.Int63n(1e6)), - MemoryUsed: uint64(rand.Int63n(1e6)), - EventCounts: 2, - EventSize: 100, - NumberOfCollections: 1, - NumberOfTransactions: 1, + module.BlockExecutionResultStats{ + CollectionExecutionResultStats: module.CollectionExecutionResultStats{ + ExecutionResultStats: module.ExecutionResultStats{ + EventSize: 100, + EventCounts: 2, + MemoryUsed: uint64(rand.Int63n(1e6)), + ComputationUsed: uint64(rand.Int63n(1e6)), + }, + NumberOfTransactions: 1, + }, + NumberOfCollections: 1, }) diskIncrease := rand.Int63n(1024 * 1024) diff --git a/module/metrics/example/verification/main.go b/module/metrics/example/verification/main.go index e915f1a1e89..103b751240b 100644 --- a/module/metrics/example/verification/main.go +++ b/module/metrics/example/verification/main.go @@ -10,6 +10,7 @@ import ( "github.com/rs/zerolog" vertestutils "github.com/onflow/flow-go/engine/verification/utils/unittest" + "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/module/buffer" "github.com/onflow/flow-go/module/mempool/stdmap" "github.com/onflow/flow-go/module/metrics" @@ -84,41 +85,24 @@ func demo() { <-mc.Ready() // creates a receipt mempool and registers a metric on its size - receipts, err := stdmap.NewReceipts(100) - if err != nil { - panic(err) - } + receipts := stdmap.NewReceipts(100) err = mc.Register(metrics.ResourceReceipt, receipts.Size) if err != nil { panic(err) } // creates pending receipt ids by block mempool, and registers size method of backend for metrics - receiptIDsByBlock, err := stdmap.NewIdentifierMap(100) - if err != nil { - panic(err) - } + receiptIDsByBlock := stdmap.NewIdentifierMap(100) + err = mc.Register(metrics.ResourcePendingReceiptIDsByBlock, receiptIDsByBlock.Size) if err != nil { panic(err) } // creates pending receipt ids by result mempool, and registers size method of backend for metrics - receiptIDsByResult, err := stdmap.NewIdentifierMap(100) - if err != nil { - panic(err) - } - err = mc.Register(metrics.ResourceReceiptIDsByResult, receiptIDsByResult.Size) - if err != nil { - panic(err) - } + receiptIDsByResult := stdmap.NewIdentifierMap(100) - // creates processed results ids mempool, and registers size method of backend for metrics - processedResultsIDs, err := stdmap.NewIdentifiers(100) - if err != nil { - panic(err) - } - err = mc.Register(metrics.ResourceProcessedResultID, processedResultsIDs.Size) + err = mc.Register(metrics.ResourceReceiptIDsByResult, receiptIDsByResult.Size) if err != nil { panic(err) } @@ -176,30 +160,23 @@ func demo() { // memory pools receipt := unittest.ExecutionReceiptFixture() tryRandomCall(func() { - receipts.Add(receipt) - }) - - tryRandomCall(func() { - err := receiptIDsByBlock.Append(receipt.ExecutionResult.BlockID, receipt.ID()) - if err != nil { - panic(err) - } + receipts.Add(receipt.ID(), receipt) }) tryRandomCall(func() { - err = receiptIDsByResult.Append(receipt.ExecutionResult.BlockID, receipt.ExecutionResult.ID()) - if err != nil { - panic(err) - } + receiptIDsByBlock.Append(receipt.ExecutionResult.BlockID, receipt.ID()) }) tryRandomCall(func() { - processedResultsIDs.Add(receipt.ExecutionResult.ID()) + receiptIDsByResult.Append(receipt.ExecutionResult.BlockID, receipt.ExecutionResult.ID()) }) tryRandomCall(func() { - block := unittest.BlockFixture() - pendingBlocks.Add(unittest.IdentifierFixture(), &block) + proposal := unittest.ProposalFixture() + pendingBlocks.Add(flow.Slashable[*flow.Proposal]{ + OriginID: unittest.IdentifierFixture(), + Message: proposal, + }) }) // adds a synthetic 1 s delay for verification duration diff --git a/module/metrics/execution.go b/module/metrics/execution.go index 856265de18a..0176e6010ec 100644 --- a/module/metrics/execution.go +++ b/module/metrics/execution.go @@ -8,79 +8,100 @@ import ( "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/module" + "github.com/onflow/flow-go/module/counters" ) type ExecutionCollector struct { - tracer module.Tracer - totalExecutedBlocksCounter prometheus.Counter - totalExecutedCollectionsCounter prometheus.Counter - totalExecutedTransactionsCounter prometheus.Counter - totalExecutedScriptsCounter prometheus.Counter - totalFailedTransactionsCounter prometheus.Counter - lastExecutedBlockHeightGauge prometheus.Gauge - stateStorageDiskTotal prometheus.Gauge - storageStateCommitment prometheus.Gauge - forestApproxMemorySize prometheus.Gauge - forestNumberOfTrees prometheus.Gauge - latestTrieRegCount prometheus.Gauge - latestTrieRegCountDiff prometheus.Gauge - latestTrieRegSize prometheus.Gauge - latestTrieRegSizeDiff prometheus.Gauge - latestTrieMaxDepthTouched prometheus.Gauge - updated prometheus.Counter - proofSize prometheus.Gauge - updatedValuesNumber prometheus.Counter - updatedValuesSize prometheus.Gauge - updatedDuration prometheus.Histogram - updatedDurationPerValue prometheus.Histogram - readValuesNumber prometheus.Counter - readValuesSize prometheus.Gauge - readDuration prometheus.Histogram - readDurationPerValue prometheus.Histogram - blockComputationUsed prometheus.Histogram - blockComputationVector *prometheus.GaugeVec - blockCachedPrograms prometheus.Gauge - blockMemoryUsed prometheus.Histogram - blockEventCounts prometheus.Histogram - blockEventSize prometheus.Histogram - blockExecutionTime prometheus.Histogram - blockTransactionCounts prometheus.Histogram - blockCollectionCounts prometheus.Histogram - collectionComputationUsed prometheus.Histogram - collectionMemoryUsed prometheus.Histogram - collectionEventSize prometheus.Histogram - collectionEventCounts prometheus.Histogram - collectionNumberOfRegistersTouched prometheus.Histogram - collectionTotalBytesWrittenToRegisters prometheus.Histogram - collectionExecutionTime prometheus.Histogram - collectionTransactionCounts prometheus.Histogram - collectionRequestSent prometheus.Counter - collectionRequestRetried prometheus.Counter - transactionParseTime prometheus.Histogram - transactionCheckTime prometheus.Histogram - transactionInterpretTime prometheus.Histogram - transactionExecutionTime prometheus.Histogram - transactionMemoryEstimate prometheus.Histogram - transactionComputationUsed prometheus.Histogram - transactionEmittedEvents prometheus.Histogram - transactionEventSize prometheus.Histogram - scriptExecutionTime prometheus.Histogram - scriptComputationUsed prometheus.Histogram - scriptMemoryUsage prometheus.Histogram - scriptMemoryEstimate prometheus.Histogram - scriptMemoryDifference prometheus.Histogram - numberOfAccounts prometheus.Gauge - programsCacheMiss prometheus.Counter - programsCacheHit prometheus.Counter - chunkDataPackRequestProcessedTotal prometheus.Counter - chunkDataPackProofSize prometheus.Histogram - chunkDataPackCollectionSize prometheus.Histogram - stateSyncActive prometheus.Gauge - blockDataUploadsInProgress prometheus.Gauge - blockDataUploadsDuration prometheus.Histogram - maxCollectionHeight prometheus.Gauge - computationResultUploadedCount prometheus.Counter - computationResultUploadRetriedCount prometheus.Counter + tracer module.Tracer + totalExecutedBlocksCounter prometheus.Counter + totalExecutedCollectionsCounter prometheus.Counter + totalExecutedTransactionsCounter prometheus.Counter + totalExecutedScriptsCounter prometheus.Counter + totalFailedTransactionsCounter prometheus.Counter + lastExecutedBlockHeightGauge prometheus.Gauge + lastFinalizedExecutedBlockHeightGauge prometheus.Gauge + lastChunkDataPackPrunedHeightGauge prometheus.Gauge + targetChunkDataPackPrunedHeightGauge prometheus.Gauge + stateStorageDiskTotal prometheus.Gauge + storageStateCommitment prometheus.Gauge + checkpointSize prometheus.Gauge + forestApproxMemorySize prometheus.Gauge + forestNumberOfTrees prometheus.Gauge + latestTrieRegCount prometheus.Gauge + latestTrieRegCountDiff prometheus.Gauge + latestTrieRegSize prometheus.Gauge + latestTrieRegSizeDiff prometheus.Gauge + latestTrieMaxDepthTouched prometheus.Gauge + updated prometheus.Counter + proofSize prometheus.Gauge + updatedValuesNumber prometheus.Counter + updatedValuesSize prometheus.Gauge + updatedDuration prometheus.Histogram + updatedDurationPerValue prometheus.Histogram + readValuesNumber prometheus.Counter + readValuesSize prometheus.Gauge + readDuration prometheus.Histogram + readDurationPerValue prometheus.Histogram + blockComputationUsed prometheus.Histogram + blockComputationVector *prometheus.GaugeVec + blockCachedPrograms prometheus.Gauge + blockMemoryUsed prometheus.Histogram + blockEventCounts prometheus.Histogram + blockEventSize prometheus.Histogram + blockExecutionTime prometheus.Histogram + blockTransactionCounts prometheus.Histogram + blockCollectionCounts prometheus.Histogram + collectionComputationUsed prometheus.Histogram + collectionMemoryUsed prometheus.Histogram + collectionEventSize prometheus.Histogram + collectionEventCounts prometheus.Histogram + collectionNumberOfRegistersTouched prometheus.Histogram + collectionTotalBytesWrittenToRegisters prometheus.Histogram + collectionExecutionTime prometheus.Histogram + collectionTransactionCounts prometheus.Histogram + collectionRequestSent prometheus.Counter + collectionRequestRetried prometheus.Counter + transactionParseTime prometheus.Histogram + transactionCheckTime prometheus.Histogram + transactionInterpretTime prometheus.Histogram + transactionExecutionTime prometheus.Histogram + transactionConflictRetries prometheus.Histogram + transactionMemoryEstimate prometheus.Histogram + transactionComputationUsed prometheus.Histogram + transactionNormalizedTimePerComputation prometheus.Histogram + transactionEmittedEvents prometheus.Histogram + transactionEventSize prometheus.Histogram + scriptExecutionTime prometheus.Histogram + scriptComputationUsed prometheus.Histogram + scriptMemoryUsage prometheus.Histogram + scriptMemoryEstimate prometheus.Histogram + scriptMemoryDifference prometheus.Histogram + numberOfAccounts prometheus.Gauge + programsCacheMiss prometheus.Counter + programsCacheHit prometheus.Counter + chunkDataPackRequestProcessedTotal prometheus.Counter + chunkDataPackProofSize prometheus.Histogram + chunkDataPackCollectionSize prometheus.Histogram + stateSyncActive prometheus.Gauge + blockDataUploadsInProgress prometheus.Gauge + blockDataUploadsDuration prometheus.Histogram + maxCollectionHeightData counters.StrictMonotonicCounter + maxCollectionHeight prometheus.Gauge + computationResultUploadedCount prometheus.Counter + computationResultUploadRetriedCount prometheus.Counter + numberOfDeployedCOAs prometheus.Gauge + evmBlockTotalSupply prometheus.Gauge + totalExecutedEVMTransactionsCounter prometheus.Counter + totalFailedEVMTransactionsCounter prometheus.Counter + totalExecutedEVMDirectCallsCounter prometheus.Counter + totalFailedEVMDirectCallsCounter prometheus.Counter + evmTransactionGasUsed prometheus.Histogram + evmBlockTxCount prometheus.Histogram + evmBlockGasUsed prometheus.Histogram + callbacksExecutedCount prometheus.Histogram + callbacksExecutedTotal prometheus.Counter + callbacksProcessComputationUsed prometheus.Histogram + callbacksExecuteComputationLimits prometheus.Histogram } func NewExecutionCollector(tracer module.Tracer) *ExecutionCollector { @@ -213,7 +234,7 @@ func NewExecutionCollector(tracer module.Tracer) *ExecutionCollector { Subsystem: subsystemRuntime, Name: "block_execution_time_milliseconds", Help: "the total time spent on block execution in milliseconds", - Buckets: []float64{100, 500, 1000, 1500, 2000, 2500, 3000, 6000}, + Buckets: []float64{50, 100, 200, 300, 400, 1000, 2000, 6000}, }) blockComputationUsed := promauto.NewHistogram(prometheus.HistogramOpts{ @@ -388,6 +409,14 @@ func NewExecutionCollector(tracer module.Tracer) *ExecutionCollector { Buckets: prometheus.ExponentialBuckets(2, 2, 10), }) + transactionConflictRetries := promauto.NewHistogram(prometheus.HistogramOpts{ + Namespace: namespaceExecution, + Subsystem: subsystemRuntime, + Name: "transaction_conflict_retries", + Help: "the number of conflict retries needed to successfully commit a transaction. If retry count is high, consider reducing concurrency", + Buckets: []float64{0, 1, 2, 3, 4, 5, 10, 20, 30, 40, 50, 100}, + }) + transactionComputationUsed := promauto.NewHistogram(prometheus.HistogramOpts{ Namespace: namespaceExecution, Subsystem: subsystemRuntime, @@ -396,6 +425,14 @@ func NewExecutionCollector(tracer module.Tracer) *ExecutionCollector { Buckets: []float64{50, 100, 500, 1000, 5000, 10000}, }) + transactionNormalizedTimePerComputation := promauto.NewHistogram(prometheus.HistogramOpts{ + Namespace: namespaceExecution, + Subsystem: subsystemRuntime, + Name: "transaction_ms_per_computation", + Help: "The normalized ratio of millisecond of execution time per computation used. Value below 1 means the transaction was executed faster than estimated (is using less resources then estimated)", + Buckets: []float64{0.015625, 0.03125, 0.0625, 0.125, 0.25, 0.5, 1, 2, 4, 8, 16, 32, 64}, + }) + transactionMemoryEstimate := promauto.NewHistogram(prometheus.HistogramOpts{ Namespace: namespaceExecution, Subsystem: subsystemRuntime, @@ -515,62 +552,64 @@ func NewExecutionCollector(tracer module.Tracer) *ExecutionCollector { ec := &ExecutionCollector{ tracer: tracer, - forestApproxMemorySize: forestApproxMemorySize, - forestNumberOfTrees: forestNumberOfTrees, - latestTrieRegCount: latestTrieRegCount, - latestTrieRegCountDiff: latestTrieRegCountDiff, - latestTrieRegSize: latestTrieRegSize, - latestTrieRegSizeDiff: latestTrieRegSizeDiff, - latestTrieMaxDepthTouched: latestTrieMaxDepthTouched, - updated: updatedCount, - proofSize: proofSize, - updatedValuesNumber: updatedValuesNumber, - updatedValuesSize: updatedValuesSize, - updatedDuration: updatedDuration, - updatedDurationPerValue: updatedDurationPerValue, - readValuesNumber: readValuesNumber, - readValuesSize: readValuesSize, - readDuration: readDuration, - readDurationPerValue: readDurationPerValue, - blockExecutionTime: blockExecutionTime, - blockComputationUsed: blockComputationUsed, - blockComputationVector: blockComputationVector, - blockCachedPrograms: blockCachedPrograms, - blockMemoryUsed: blockMemoryUsed, - blockEventCounts: blockEventCounts, - blockEventSize: blockEventSize, - blockTransactionCounts: blockTransactionCounts, - blockCollectionCounts: blockCollectionCounts, - collectionExecutionTime: collectionExecutionTime, - collectionComputationUsed: collectionComputationUsed, - collectionMemoryUsed: collectionMemoryUsed, - collectionEventSize: collectionEventSize, - collectionEventCounts: collectionEventCounts, - collectionNumberOfRegistersTouched: collectionNumberOfRegistersTouched, - collectionTotalBytesWrittenToRegisters: collectionTotalBytesWrittenToRegisters, - collectionTransactionCounts: collectionTransactionCounts, - collectionRequestSent: collectionRequestsSent, - collectionRequestRetried: collectionRequestsRetries, - transactionParseTime: transactionParseTime, - transactionCheckTime: transactionCheckTime, - transactionInterpretTime: transactionInterpretTime, - transactionExecutionTime: transactionExecutionTime, - transactionComputationUsed: transactionComputationUsed, - transactionMemoryEstimate: transactionMemoryEstimate, - transactionEmittedEvents: transactionEmittedEvents, - transactionEventSize: transactionEventSize, - scriptExecutionTime: scriptExecutionTime, - scriptComputationUsed: scriptComputationUsed, - scriptMemoryUsage: scriptMemoryUsage, - scriptMemoryEstimate: scriptMemoryEstimate, - scriptMemoryDifference: scriptMemoryDifference, - chunkDataPackRequestProcessedTotal: chunkDataPackRequestProcessedTotal, - chunkDataPackProofSize: chunkDataPackProofSize, - chunkDataPackCollectionSize: chunkDataPackCollectionSize, - blockDataUploadsInProgress: blockDataUploadsInProgress, - blockDataUploadsDuration: blockDataUploadsDuration, - computationResultUploadedCount: computationResultUploadedCount, - computationResultUploadRetriedCount: computationResultUploadRetriedCount, + forestApproxMemorySize: forestApproxMemorySize, + forestNumberOfTrees: forestNumberOfTrees, + latestTrieRegCount: latestTrieRegCount, + latestTrieRegCountDiff: latestTrieRegCountDiff, + latestTrieRegSize: latestTrieRegSize, + latestTrieRegSizeDiff: latestTrieRegSizeDiff, + latestTrieMaxDepthTouched: latestTrieMaxDepthTouched, + updated: updatedCount, + proofSize: proofSize, + updatedValuesNumber: updatedValuesNumber, + updatedValuesSize: updatedValuesSize, + updatedDuration: updatedDuration, + updatedDurationPerValue: updatedDurationPerValue, + readValuesNumber: readValuesNumber, + readValuesSize: readValuesSize, + readDuration: readDuration, + readDurationPerValue: readDurationPerValue, + blockExecutionTime: blockExecutionTime, + blockComputationUsed: blockComputationUsed, + blockComputationVector: blockComputationVector, + blockCachedPrograms: blockCachedPrograms, + blockMemoryUsed: blockMemoryUsed, + blockEventCounts: blockEventCounts, + blockEventSize: blockEventSize, + blockTransactionCounts: blockTransactionCounts, + blockCollectionCounts: blockCollectionCounts, + collectionExecutionTime: collectionExecutionTime, + collectionComputationUsed: collectionComputationUsed, + collectionMemoryUsed: collectionMemoryUsed, + collectionEventSize: collectionEventSize, + collectionEventCounts: collectionEventCounts, + collectionNumberOfRegistersTouched: collectionNumberOfRegistersTouched, + collectionTotalBytesWrittenToRegisters: collectionTotalBytesWrittenToRegisters, + collectionTransactionCounts: collectionTransactionCounts, + collectionRequestSent: collectionRequestsSent, + collectionRequestRetried: collectionRequestsRetries, + transactionParseTime: transactionParseTime, + transactionCheckTime: transactionCheckTime, + transactionInterpretTime: transactionInterpretTime, + transactionExecutionTime: transactionExecutionTime, + transactionConflictRetries: transactionConflictRetries, + transactionComputationUsed: transactionComputationUsed, + transactionNormalizedTimePerComputation: transactionNormalizedTimePerComputation, + transactionMemoryEstimate: transactionMemoryEstimate, + transactionEmittedEvents: transactionEmittedEvents, + transactionEventSize: transactionEventSize, + scriptExecutionTime: scriptExecutionTime, + scriptComputationUsed: scriptComputationUsed, + scriptMemoryUsage: scriptMemoryUsage, + scriptMemoryEstimate: scriptMemoryEstimate, + scriptMemoryDifference: scriptMemoryDifference, + chunkDataPackRequestProcessedTotal: chunkDataPackRequestProcessedTotal, + chunkDataPackProofSize: chunkDataPackProofSize, + chunkDataPackCollectionSize: chunkDataPackCollectionSize, + blockDataUploadsInProgress: blockDataUploadsInProgress, + blockDataUploadsDuration: blockDataUploadsDuration, + computationResultUploadedCount: computationResultUploadedCount, + computationResultUploadRetriedCount: computationResultUploadRetriedCount, totalExecutedBlocksCounter: promauto.NewCounter(prometheus.CounterOpts{ Namespace: namespaceExecution, Subsystem: subsystemRuntime, @@ -613,6 +652,27 @@ func NewExecutionCollector(tracer module.Tracer) *ExecutionCollector { Help: "the last height that was executed", }), + lastFinalizedExecutedBlockHeightGauge: promauto.NewGauge(prometheus.GaugeOpts{ + Namespace: namespaceExecution, + Subsystem: subsystemRuntime, + Name: "last_finalized_executed_block_height", + Help: "the last height that was finalized and executed", + }), + + lastChunkDataPackPrunedHeightGauge: promauto.NewGauge(prometheus.GaugeOpts{ + Namespace: namespaceExecution, + Subsystem: subsystemRuntime, + Name: "last_chunk_data_pack_pruned_height", + Help: "the last height that was pruned for chunk data pack", + }), + + targetChunkDataPackPrunedHeightGauge: promauto.NewGauge(prometheus.GaugeOpts{ + Namespace: namespaceExecution, + Subsystem: subsystemRuntime, + Name: "target_chunk_data_pack_pruned_height", + Help: "the target height for pruning chunk data pack", + }), + stateStorageDiskTotal: promauto.NewGauge(prometheus.GaugeOpts{ Namespace: namespaceExecution, Subsystem: subsystemStateStorage, @@ -620,6 +680,7 @@ func NewExecutionCollector(tracer module.Tracer) *ExecutionCollector { Help: "the execution state size on disk in bytes", }), + // TODO: remove storageStateCommitment: promauto.NewGauge(prometheus.GaugeOpts{ Namespace: namespaceExecution, Subsystem: subsystemStateStorage, @@ -627,6 +688,13 @@ func NewExecutionCollector(tracer module.Tracer) *ExecutionCollector { Help: "the storage size of a state commitment in bytes", }), + checkpointSize: promauto.NewGauge(prometheus.GaugeOpts{ + Namespace: namespaceExecution, + Subsystem: subsystemStateStorage, + Name: "checkpoint_size_bytes", + Help: "the size of a checkpoint in bytes", + }), + stateSyncActive: promauto.NewGauge(prometheus.GaugeOpts{ Namespace: namespaceExecution, Subsystem: subsystemIngestion, @@ -655,12 +723,111 @@ func NewExecutionCollector(tracer module.Tracer) *ExecutionCollector { Help: "the number of times a program was found in the cache", }), + maxCollectionHeightData: counters.NewMonotonicCounter(0), + maxCollectionHeight: prometheus.NewGauge(prometheus.GaugeOpts{ Name: "max_collection_height", Namespace: namespaceExecution, Subsystem: subsystemIngestion, Help: "gauge to track the maximum block height of collections received", }), + + numberOfDeployedCOAs: promauto.NewGauge(prometheus.GaugeOpts{ + Namespace: namespaceExecution, + Subsystem: subsystemEVM, + Name: "number_of_deployed_coas", + Help: "the number of deployed coas", + }), + + totalExecutedEVMTransactionsCounter: promauto.NewCounter(prometheus.CounterOpts{ + Namespace: namespaceExecution, + Subsystem: subsystemEVM, + Name: "total_executed_evm_transaction_count", + Help: "the total number of executed evm transactions (including direct calls)", + }), + + totalFailedEVMTransactionsCounter: promauto.NewCounter(prometheus.CounterOpts{ + Namespace: namespaceExecution, + Subsystem: subsystemEVM, + Name: "total_failed_evm_transaction_count", + Help: "the total number of executed evm transactions with failed status (including direct calls)", + }), + + totalExecutedEVMDirectCallsCounter: promauto.NewCounter(prometheus.CounterOpts{ + Namespace: namespaceExecution, + Subsystem: subsystemEVM, + Name: "total_executed_evm_direct_call_count", + Help: "the total number of executed evm direct calls", + }), + + totalFailedEVMDirectCallsCounter: promauto.NewCounter(prometheus.CounterOpts{ + Namespace: namespaceExecution, + Subsystem: subsystemEVM, + Name: "total_failed_evm_direct_call_count", + Help: "the total number of executed evm direct calls with failed status.", + }), + + evmTransactionGasUsed: promauto.NewHistogram(prometheus.HistogramOpts{ + Namespace: namespaceExecution, + Subsystem: subsystemEVM, + Name: "evm_transaction_gas_used", + Help: "the total amount of gas used by a transaction", + Buckets: prometheus.ExponentialBuckets(20_000, 2, 8), + }), + + evmBlockTxCount: promauto.NewHistogram(prometheus.HistogramOpts{ + Namespace: namespaceExecution, + Subsystem: subsystemEVM, + Name: "evm_block_transaction_counts", + Help: "the total number of transactions per evm block", + Buckets: prometheus.ExponentialBuckets(1, 2, 8), + }), + + evmBlockGasUsed: promauto.NewHistogram(prometheus.HistogramOpts{ + Namespace: namespaceExecution, + Subsystem: subsystemEVM, + Name: "evm_block_gas_used", + Help: "the total amount of gas used by a block", + Buckets: prometheus.ExponentialBuckets(100_000, 2, 8), + }), + + evmBlockTotalSupply: promauto.NewGauge(prometheus.GaugeOpts{ + Namespace: namespaceExecution, + Subsystem: subsystemEVM, + Name: "evm_block_total_supply", + Help: "the total amount of flow deposited to EVM (in FLOW)", + }), + + callbacksExecutedCount: promauto.NewHistogram(prometheus.HistogramOpts{ + Namespace: namespaceExecution, + Subsystem: subsystemRuntime, + Name: "callbacks_executed_count", + Help: "the number of callbacks executed", + Buckets: prometheus.ExponentialBuckets(1, 2, 8), + }), + + callbacksExecutedTotal: promauto.NewCounter(prometheus.CounterOpts{ + Namespace: namespaceExecution, + Subsystem: subsystemRuntime, + Name: "callbacks_executed_total", + Help: "the total number of callbacks executed", + }), + + callbacksProcessComputationUsed: promauto.NewHistogram(prometheus.HistogramOpts{ + Namespace: namespaceExecution, + Subsystem: subsystemRuntime, + Name: "callbacks_process_computation_used", + Help: "the computation used by the process callback transaction", + Buckets: prometheus.ExponentialBuckets(10_000, 2, 12), + }), + + callbacksExecuteComputationLimits: promauto.NewHistogram(prometheus.HistogramOpts{ + Namespace: namespaceExecution, + Subsystem: subsystemRuntime, + Name: "callbacks_execute_computation_limits", + Help: "the total computation limits for execute callback transactions", + Buckets: prometheus.ExponentialBuckets(10_000, 2, 12), + }), } return ec @@ -679,7 +846,7 @@ func (ec *ExecutionCollector) FinishBlockReceivedToExecuted(blockID flow.Identif // ExecutionBlockExecuted reports execution meta data after executing a block func (ec *ExecutionCollector) ExecutionBlockExecuted( dur time.Duration, - stats module.ExecutionResultStats, + stats module.BlockExecutionResultStats, ) { ec.totalExecutedBlocksCounter.Inc() ec.blockExecutionTime.Observe(float64(dur.Milliseconds())) @@ -694,7 +861,7 @@ func (ec *ExecutionCollector) ExecutionBlockExecuted( // ExecutionCollectionExecuted reports stats for executing a collection func (ec *ExecutionCollector) ExecutionCollectionExecuted( dur time.Duration, - stats module.ExecutionResultStats, + stats module.CollectionExecutionResultStats, ) { ec.totalExecutedCollectionsCounter.Inc() ec.collectionExecutionTime.Observe(float64(dur.Milliseconds())) @@ -707,7 +874,7 @@ func (ec *ExecutionCollector) ExecutionCollectionExecuted( ec.collectionTransactionCounts.Observe(float64(stats.NumberOfTransactions)) } -func (ec *ExecutionCollector) ExecutionBlockExecutionEffortVectorComponent(compKind string, value uint) { +func (ec *ExecutionCollector) ExecutionBlockExecutionEffortVectorComponent(compKind string, value uint64) { ec.blockComputationVector.With(prometheus.Labels{LabelComputationKind: compKind}).Set(float64(value)) } @@ -715,20 +882,22 @@ func (ec *ExecutionCollector) ExecutionBlockCachedPrograms(programs int) { ec.blockCachedPrograms.Set(float64(programs)) } -// TransactionExecuted reports stats for executing a transaction +// ExecutionTransactionExecuted reports stats for executing a transaction func (ec *ExecutionCollector) ExecutionTransactionExecuted( dur time.Duration, - compUsed, memoryUsed uint64, - eventCounts, eventSize int, - failed bool, + stats module.TransactionExecutionResultStats, + info module.TransactionExecutionResultInfo, ) { ec.totalExecutedTransactionsCounter.Inc() ec.transactionExecutionTime.Observe(float64(dur.Milliseconds())) - ec.transactionComputationUsed.Observe(float64(compUsed)) - ec.transactionMemoryEstimate.Observe(float64(memoryUsed)) - ec.transactionEmittedEvents.Observe(float64(eventCounts)) - ec.transactionEventSize.Observe(float64(eventSize)) - if failed { + ec.transactionConflictRetries.Observe(float64(stats.NumberOfTxnConflictRetries)) + ec.transactionComputationUsed.Observe(float64(stats.ComputationUsed)) + ec.transactionNormalizedTimePerComputation.Observe( + flow.NormalizedExecutionTimePerComputationUnit(dur, stats.ComputationUsed)) + ec.transactionMemoryEstimate.Observe(float64(stats.MemoryUsed)) + ec.transactionEmittedEvents.Observe(float64(stats.EventCounts)) + ec.transactionEventSize.Observe(float64(stats.EventSize)) + if stats.Failed { ec.totalFailedTransactionsCounter.Inc() } } @@ -749,6 +918,14 @@ func (ec *ExecutionCollector) ExecutionScriptExecuted(dur time.Duration, compUse ec.scriptMemoryDifference.Observe(float64(memoryEstimated) - float64(memoryUsed)) } +// ExecutionCallbacksExecuted reports callback execution metrics +func (ec *ExecutionCollector) ExecutionCallbacksExecuted(callbackCount int, processComputationUsed, executeComputationLimits uint64) { + ec.callbacksExecutedCount.Observe(float64(callbackCount)) + ec.callbacksExecutedTotal.Add(float64(callbackCount)) + ec.callbacksProcessComputationUsed.Observe(float64(processComputationUsed)) + ec.callbacksExecuteComputationLimits.Observe(float64(executeComputationLimits)) +} + // ExecutionStateStorageDiskTotal reports the total storage size of the execution state on disk in bytes func (ec *ExecutionCollector) ExecutionStateStorageDiskTotal(bytes int64) { ec.stateStorageDiskTotal.Set(float64(bytes)) @@ -759,11 +936,30 @@ func (ec *ExecutionCollector) ExecutionStorageStateCommitment(bytes int64) { ec.storageStateCommitment.Set(float64(bytes)) } +// ExecutionCheckpointSize reports the size of a checkpoint in bytes +func (ec *ExecutionCollector) ExecutionCheckpointSize(bytes uint64) { + ec.checkpointSize.Set(float64(bytes)) +} + // ExecutionLastExecutedBlockHeight reports last executed block height func (ec *ExecutionCollector) ExecutionLastExecutedBlockHeight(height uint64) { ec.lastExecutedBlockHeightGauge.Set(float64(height)) } +// ExecutionLastFinalizedExecutedBlockHeight reports last finalized executed block height +func (ec *ExecutionCollector) ExecutionLastFinalizedExecutedBlockHeight(height uint64) { + ec.lastFinalizedExecutedBlockHeightGauge.Set(float64(height)) +} + +// ExecutionLastChunkDataPackPrunedHeight reports last chunk data pack pruned height +func (ec *ExecutionCollector) ExecutionLastChunkDataPackPrunedHeight(height uint64) { + ec.lastChunkDataPackPrunedHeightGauge.Set(float64(height)) +} + +func (ec *ExecutionCollector) ExecutionTargetChunkDataPackPrunedHeight(height uint64) { + ec.targetChunkDataPackPrunedHeightGauge.Set(float64(height)) +} + // ForestApproxMemorySize records approximate memory usage of forest (all in-memory trees) func (ec *ExecutionCollector) ForestApproxMemorySize(bytes uint64) { ec.forestApproxMemorySize.Set(float64(bytes)) @@ -853,10 +1049,6 @@ func (ec *ExecutionCollector) ExecutionCollectionRequestSent() { ec.collectionRequestSent.Inc() } -func (ec *ExecutionCollector) ExecutionCollectionRequestRetried() { - ec.collectionRequestRetried.Inc() -} - func (ec *ExecutionCollector) ExecutionBlockDataUploadStarted() { ec.blockDataUploadsInProgress.Inc() } @@ -907,8 +1099,43 @@ func (ec *ExecutionCollector) RuntimeTransactionProgramsCacheHit() { ec.programsCacheHit.Inc() } +func (ec *ExecutionCollector) SetNumberOfDeployedCOAs(count uint64) { + ec.numberOfDeployedCOAs.Set(float64(count)) +} + +func (ec *ExecutionCollector) EVMTransactionExecuted( + gasUsed uint64, + isDirectCall bool, + failed bool, +) { + ec.totalExecutedEVMTransactionsCounter.Inc() + if isDirectCall { + ec.totalExecutedEVMDirectCallsCounter.Inc() + if failed { + ec.totalFailedEVMDirectCallsCounter.Inc() + } + } + if failed { + ec.totalFailedEVMTransactionsCounter.Inc() + } + ec.evmTransactionGasUsed.Observe(float64(gasUsed)) +} + +func (ec *ExecutionCollector) EVMBlockExecuted( + txCount int, + totalGasUsed uint64, + totalSupplyInFlow float64, +) { + ec.evmBlockTxCount.Observe(float64(txCount)) + ec.evmBlockGasUsed.Observe(float64(totalGasUsed)) + ec.evmBlockTotalSupply.Set(totalSupplyInFlow) +} + func (ec *ExecutionCollector) UpdateCollectionMaxHeight(height uint64) { - ec.maxCollectionHeight.Set(float64(height)) + updated := ec.maxCollectionHeightData.Set(height) + if updated { + ec.maxCollectionHeight.Set(float64(height)) + } } func (ec *ExecutionCollector) ExecutionComputationResultUploaded() { @@ -918,3 +1145,34 @@ func (ec *ExecutionCollector) ExecutionComputationResultUploaded() { func (ec *ExecutionCollector) ExecutionComputationResultUploadRetried() { ec.computationResultUploadRetriedCount.Inc() } + +type ExecutionCollectorWithTransactionCallback struct { + *ExecutionCollector + TransactionCallback func( + dur time.Duration, + stats module.TransactionExecutionResultStats, + info module.TransactionExecutionResultInfo, + ) +} + +func (ec *ExecutionCollector) WithTransactionCallback( + callback func( + time.Duration, + module.TransactionExecutionResultStats, + module.TransactionExecutionResultInfo, + ), +) *ExecutionCollectorWithTransactionCallback { + return &ExecutionCollectorWithTransactionCallback{ + ExecutionCollector: ec, + TransactionCallback: callback, + } +} + +func (ec *ExecutionCollectorWithTransactionCallback) ExecutionTransactionExecuted( + dur time.Duration, + stats module.TransactionExecutionResultStats, + info module.TransactionExecutionResultInfo, +) { + ec.ExecutionCollector.ExecutionTransactionExecuted(dur, stats, info) + ec.TransactionCallback(dur, stats, info) +} diff --git a/module/metrics/execution_data_requester.go b/module/metrics/execution_data_requester.go index e8ccc5e3266..b8be5545299 100644 --- a/module/metrics/execution_data_requester.go +++ b/module/metrics/execution_data_requester.go @@ -85,10 +85,14 @@ func NewExecutionDataRequesterCollector() module.ExecutionDataRequesterMetrics { } } +// ExecutionDataFetchStarted records an in-progress download func (ec *ExecutionDataRequesterCollector) ExecutionDataFetchStarted() { ec.downloadsInProgress.Inc() } +// ExecutionDataFetchFinished records a completed download +// Pass the highest consecutive height to ensure the metrics reflect the height up to which the +// requester has completed downloads. This allows us to easily see when downloading gets stuck. func (ec *ExecutionDataRequesterCollector) ExecutionDataFetchFinished(duration time.Duration, success bool, height uint64) { ec.downloadsInProgress.Dec() ec.fetchDuration.Observe(float64(duration.Milliseconds())) @@ -100,11 +104,13 @@ func (ec *ExecutionDataRequesterCollector) ExecutionDataFetchFinished(duration t } } +// NotificationSent records that ExecutionData received notifications were sent for a block height func (ec *ExecutionDataRequesterCollector) NotificationSent(height uint64) { ec.outstandingNotifications.Dec() ec.highestNotificationHeight.Set(float64(height)) } +// FetchRetried records that a download retry was processed func (ec *ExecutionDataRequesterCollector) FetchRetried() { ec.downloadRetries.Inc() } diff --git a/module/metrics/execution_state_indexer.go b/module/metrics/execution_state_indexer.go new file mode 100644 index 00000000000..ffe7c15d215 --- /dev/null +++ b/module/metrics/execution_state_indexer.go @@ -0,0 +1,97 @@ +package metrics + +import ( + "time" + + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" + + "github.com/onflow/flow-go/module" +) + +var _ module.ExecutionStateIndexerMetrics = (*ExecutionStateIndexerCollector)(nil) + +type ExecutionStateIndexerCollector struct { + indexDuration prometheus.Histogram + highestIndexedHeight prometheus.Gauge + + indexedEvents prometheus.Counter + indexedRegisters prometheus.Counter + indexedTransactionResults prometheus.Counter + reindexedHeightCount prometheus.Counter +} + +func NewExecutionStateIndexerCollector() module.ExecutionStateIndexerMetrics { + indexDuration := promauto.NewHistogram(prometheus.HistogramOpts{ + Namespace: namespaceAccess, + Subsystem: subsystemExecutionStateIndexer, + Name: "index_duration_ms", + Help: "the duration of execution state indexing operation", + Buckets: []float64{1, 5, 10, 50, 100}, + }) + + highestIndexedHeight := promauto.NewGauge(prometheus.GaugeOpts{ + Namespace: namespaceAccess, + Subsystem: subsystemExecutionStateIndexer, + Name: "highest_indexed_height", + Help: "highest block height that has been indexed", + }) + + indexedEvents := promauto.NewCounter(prometheus.CounterOpts{ + Namespace: namespaceAccess, + Subsystem: subsystemExecutionStateIndexer, + Name: "indexed_events", + Help: "number of events indexed", + }) + + indexedRegisters := promauto.NewCounter(prometheus.CounterOpts{ + Namespace: namespaceAccess, + Subsystem: subsystemExecutionStateIndexer, + Name: "indexed_registers", + Help: "number of registers indexed", + }) + + indexedTransactionResults := promauto.NewCounter(prometheus.CounterOpts{ + Namespace: namespaceAccess, + Subsystem: subsystemExecutionStateIndexer, + Name: "indexed_transaction_results", + Help: "number of transaction results indexed", + }) + + reindexedHeightCount := promauto.NewCounter(prometheus.CounterOpts{ + Namespace: namespaceAccess, + Subsystem: subsystemExecutionStateIndexer, + Name: "reindexed_height_count", + Help: "number of times a previously indexed height is reindexed", + }) + + return &ExecutionStateIndexerCollector{ + indexDuration: indexDuration, + highestIndexedHeight: highestIndexedHeight, + indexedEvents: indexedEvents, + indexedRegisters: indexedRegisters, + indexedTransactionResults: indexedTransactionResults, + reindexedHeightCount: reindexedHeightCount, + } +} + +// InitializeLatestHeight records the latest height that has been indexed. +// This should only be used during startup. After startup, use BlockIndexed to record newly +// indexed heights. +func (c *ExecutionStateIndexerCollector) InitializeLatestHeight(height uint64) { + c.highestIndexedHeight.Set(float64(height)) +} + +// BlockIndexed records metrics from indexing execution data from a single block. +func (c *ExecutionStateIndexerCollector) BlockIndexed(height uint64, duration time.Duration, events, registers, transactionResults int) { + c.indexDuration.Observe(float64(duration.Milliseconds())) + c.highestIndexedHeight.Set(float64(height)) + c.indexedEvents.Add(float64(events)) + c.indexedRegisters.Add(float64(registers)) + c.indexedTransactionResults.Add(float64(transactionResults)) +} + +// BlockReindexed records that a previously indexed block was indexed again. +func (c *ExecutionStateIndexerCollector) BlockReindexed() { + c.reindexedHeightCount.Inc() +} diff --git a/module/metrics/gossipsub.go b/module/metrics/gossipsub.go index 5ba5369fa0d..39dfd1f1e36 100644 --- a/module/metrics/gossipsub.go +++ b/module/metrics/gossipsub.go @@ -7,173 +7,374 @@ import ( "github.com/onflow/flow-go/module" ) -type GossipSubMetrics struct { - receivedIHaveCount prometheus.Counter - receivedIWantCount prometheus.Counter - receivedGraftCount prometheus.Counter - receivedPruneCount prometheus.Counter - incomingRpcAcceptedFullyCount prometheus.Counter - incomingRpcAcceptedOnlyControlCount prometheus.Counter - incomingRpcRejectedCount prometheus.Counter - receivedPublishMessageCount prometheus.Counter +// LocalGossipSubRouterMetrics encapsulates the metrics collectors for GossipSub router of the local node. +// It gives a lens into the local node's view of the GossipSub protocol. +type LocalGossipSubRouterMetrics struct { + // localMeshSize is the number of peers in the local mesh of the node on each topic. + localMeshSize prometheus.GaugeVec - prefix string -} + // peerAddedOnProtocolCount is the number of peers added to the local gossipsub router on a gossipsub protocol. + peerAddedOnProtocolCount prometheus.CounterVec + + // peerRemovedFromProtocolCount is the number of peers removed from the local gossipsub router (i.e., blacklisted or unavailable). + peerRemovedFromProtocolCount prometheus.Counter + + // localPeerJoinedTopicCount is the number of times the local node joined (i.e., subscribed) to a topic. + localPeerJoinedTopicCount prometheus.Counter + + // localPeerLeftTopicCount is the number of times the local node left (i.e., unsubscribed) from a topic. + localPeerLeftTopicCount prometheus.Counter + + // peerGraftTopicCount is the number of peers grafted to a topic on the local mesh of the node, i.e., the local node + // is directly connected to the peer on the topic, and exchange messages directly. + peerGraftTopicCount prometheus.CounterVec + + // peerPruneTopicCount is the number of peers pruned from a topic on the local mesh of the node, i.e., the local node + // is no longer directly connected to the peer on the topic, and exchange messages indirectly. + peerPruneTopicCount prometheus.CounterVec + + // messageEnteredValidationCount is the number of incoming pubsub messages entered internal validation pipeline of gossipsub. + messageEnteredValidationCount prometheus.Counter + + // messageDeliveredSize is the size of messages delivered to all subscribers of the topic. + messageDeliveredSize prometheus.Histogram + + // messageRejectedSize is the size of inbound messages rejected by the validation pipeline; the rejection reason is also included. + messageRejectedSize prometheus.HistogramVec + + // messageDuplicateSize is the size of messages that are duplicates of already received messages. + messageDuplicateSize prometheus.Histogram + + // peerThrottledCount is the number of peers that are throttled by the local node, i.e., the local node is not accepting + // any pubsub message from the peer but may still accept control messages. + peerThrottledCount prometheus.Counter + + // rpcRcvCount is the number of rpc messages received and processed by the router (i.e., passed rpc inspection). + rpcRcvCount prometheus.Counter -var _ module.GossipSubRouterMetrics = (*GossipSubMetrics)(nil) + // iWantRcvCount is the number of iwant messages received by the router on rpcs. + iWantRcvCount prometheus.Counter -func NewGossipSubMetrics(prefix string) *GossipSubMetrics { - gs := &GossipSubMetrics{prefix: prefix} + // iHaveRcvCount is the number of ihave messages received by the router on rpcs. + iHaveRcvCount prometheus.Counter + + // graftRcvCount is the number of graft messages received by the router on rpcs. + graftRcvCount prometheus.Counter + + // pruneRcvCount is the number of prune messages received by the router on rpcs. + pruneRcvCount prometheus.Counter + + // pubsubMsgRcvCount is the number of pubsub messages received by the router. + pubsubMsgRcvCount prometheus.Counter + + // rpcSentCount is the number of rpc messages sent by the router. + rpcSentCount prometheus.Counter + + // iWantSentCount is the number of iwant messages sent by the router on rpcs. + iWantSentCount prometheus.Counter + + // iHaveSentCount is the number of ihave messages sent by the router on rpcs. + iHaveSentCount prometheus.Counter + + // graftSentCount is the number of graft messages sent by the router on rpcs. + graftSentCount prometheus.Counter + + // pruneSentCount is the number of prune messages sent by the router on rpcs. + pruneSentCount prometheus.Counter + + // pubsubMsgSentCount is the number of pubsub messages sent by the router. + pubsubMsgSentCount prometheus.Counter + + // outboundRpcDroppedCount is the number of outbound rpc messages dropped, typically because the outbound message queue is full. + outboundRpcDroppedCount prometheus.Counter + + // undeliveredOutboundMessageCount is the number of undelivered messages, i.e., messages that are not delivered to at least one subscriber. + undeliveredOutboundMessageCount prometheus.Counter +} - gs.receivedIHaveCount = promauto.NewCounter( - prometheus.CounterOpts{ +func NewGossipSubLocalMeshMetrics(prefix string) *LocalGossipSubRouterMetrics { + return &LocalGossipSubRouterMetrics{ + localMeshSize: *promauto.NewGaugeVec( + prometheus.GaugeOpts{ + Namespace: namespaceNetwork, + Subsystem: subsystemGossip, + Name: prefix + "gossipsub_local_mesh_size", + Help: "number of peers in the local mesh of the node", + }, + []string{LabelChannel}, + ), + peerAddedOnProtocolCount: *promauto.NewCounterVec(prometheus.CounterOpts{ Namespace: namespaceNetwork, Subsystem: subsystemGossip, - Name: gs.prefix + "gossipsub_received_ihave_total", - Help: "number of received ihave messages from gossipsub protocol", - }, - ) - - gs.receivedIWantCount = promauto.NewCounter( - prometheus.CounterOpts{ + Name: prefix + "gossipsub_added_peer_on_protocol_total", + Help: "number of peers added to the local gossipsub router on a gossipsub protocol", + }, []string{LabelProtocol}), + peerRemovedFromProtocolCount: prometheus.NewCounter(prometheus.CounterOpts{ Namespace: namespaceNetwork, Subsystem: subsystemGossip, - Name: gs.prefix + "gossipsub_received_iwant_total", - Help: "number of received iwant messages from gossipsub protocol", - }, - ) - - gs.receivedGraftCount = promauto.NewCounter( - prometheus.CounterOpts{ + Name: prefix + "gossipsub_removed_peer_total", + Help: "number of peers removed from the local gossipsub router on a gossipsub protocol due to unavailability or blacklisting", + }), + localPeerJoinedTopicCount: prometheus.NewCounter(prometheus.CounterOpts{ Namespace: namespaceNetwork, Subsystem: subsystemGossip, - Name: gs.prefix + "gossipsub_received_graft_total", - Help: "number of received graft messages from gossipsub protocol", - }, - ) - - gs.receivedPruneCount = promauto.NewCounter( - prometheus.CounterOpts{ + Name: prefix + "gossipsub_joined_topic_total", + Help: "number of times the local node joined (i.e., subscribed) to a topic", + }), + localPeerLeftTopicCount: prometheus.NewCounter(prometheus.CounterOpts{ Namespace: namespaceNetwork, Subsystem: subsystemGossip, - Name: gs.prefix + "gossipsub_received_prune_total", - Help: "number of received prune messages from gossipsub protocol", - }, - ) - - gs.incomingRpcAcceptedFullyCount = promauto.NewCounter( - prometheus.CounterOpts{ + Name: prefix + "gossipsub_left_topic_total", + Help: "number of times the local node left (i.e., unsubscribed) from a topic", + }), + peerGraftTopicCount: *promauto.NewCounterVec(prometheus.CounterOpts{ Namespace: namespaceNetwork, Subsystem: subsystemGossip, - Name: gs.prefix + "gossipsub_incoming_rpc_accepted_fully_total", - Help: "number of incoming rpc messages accepted fully by gossipsub protocol", - }, - ) - - gs.incomingRpcAcceptedOnlyControlCount = promauto.NewCounter( - prometheus.CounterOpts{ + Name: prefix + "gossipsub_graft_topic_total", + Help: "number of peers grafted to a topic on the local mesh of the node", + }, []string{LabelChannel}), + peerPruneTopicCount: *promauto.NewCounterVec(prometheus.CounterOpts{ Namespace: namespaceNetwork, Subsystem: subsystemGossip, - Name: gs.prefix + "gossipsub_incoming_rpc_accepted_only_control_total", - Help: "number of incoming rpc messages accepted only control messages by gossipsub protocol", - }, - ) - - gs.incomingRpcRejectedCount = promauto.NewCounter( - prometheus.CounterOpts{ + Name: prefix + "gossipsub_prune_topic_total", + Help: "number of peers pruned from a topic on the local mesh of the node", + }, []string{LabelChannel}), + messageEnteredValidationCount: prometheus.NewCounter(prometheus.CounterOpts{ Namespace: namespaceNetwork, Subsystem: subsystemGossip, - Name: gs.prefix + "gossipsub_incoming_rpc_rejected_total", - Help: "number of incoming rpc messages rejected by gossipsub protocol", - }, - ) - - gs.receivedPublishMessageCount = promauto.NewCounter( - prometheus.CounterOpts{ + Name: prefix + "gossipsub_message_entered_validation_total", + Help: "number of messages entered internal validation pipeline of gossipsub", + }), + messageDeliveredSize: prometheus.NewHistogram(prometheus.HistogramOpts{ + Namespace: namespaceNetwork, + Subsystem: subsystemGossip, + Buckets: []float64{KiB, 100 * KiB, 1 * MiB}, + Name: prefix + "gossipsub_message_delivered_size", + Help: "size of messages delivered to all subscribers of the topic", + }), + messageRejectedSize: *promauto.NewHistogramVec(prometheus.HistogramOpts{ + Namespace: namespaceNetwork, + Subsystem: subsystemGossip, + Name: prefix + "gossipsub_message_rejected_size_bytes", + Help: "size of messages rejected by the validation pipeline", + }, []string{LabelRejectionReason}), + messageDuplicateSize: prometheus.NewHistogram(prometheus.HistogramOpts{ + Namespace: namespaceNetwork, + Subsystem: subsystemGossip, + Buckets: []float64{KiB, 100 * KiB, 1 * MiB}, + Name: prefix + "gossipsub_duplicate_message_size_bytes", + Help: "size of messages that are duplicates of already received messages", + }), + peerThrottledCount: prometheus.NewCounter(prometheus.CounterOpts{ + Namespace: namespaceNetwork, + Subsystem: subsystemGossip, + Name: prefix + "gossipsub_peer_throttled_total", + Help: "number of peers that are throttled by the local node, i.e., the local node is not accepting any pubsub message from the peer but may still accept control messages", + }), + rpcRcvCount: prometheus.NewCounter(prometheus.CounterOpts{ + Namespace: namespaceNetwork, + Subsystem: subsystemGossip, + Name: prefix + "gossipsub_rpc_received_total", + Help: "number of rpc messages received and processed by the router (i.e., passed rpc inspection)", + }), + rpcSentCount: prometheus.NewCounter(prometheus.CounterOpts{ + Namespace: namespaceNetwork, + Subsystem: subsystemGossip, + Name: prefix + "gossipsub_rpc_sent_total", + Help: "number of rpc messages sent by the router", + }), + outboundRpcDroppedCount: prometheus.NewCounter(prometheus.CounterOpts{ + Namespace: namespaceNetwork, + Subsystem: subsystemGossip, + Name: prefix + "gossipsub_rpc_dropped_total", + Help: "number of outbound rpc messages dropped, typically because the outbound message queue is full", + }), + undeliveredOutboundMessageCount: prometheus.NewCounter(prometheus.CounterOpts{ + Namespace: namespaceNetwork, + Subsystem: subsystemGossip, + Name: prefix + "gossipsub_undelivered_message_total", + Help: "number of undelivered messages, i.e., messages that are not delivered to at least one subscriber", + }), + iHaveRcvCount: prometheus.NewCounter(prometheus.CounterOpts{ + Namespace: namespaceNetwork, + Subsystem: subsystemGossip, + Name: prefix + "gossipsub_ihave_received_total", + Help: "number of ihave messages received by the router on rpcs", + }), + iWantRcvCount: prometheus.NewCounter(prometheus.CounterOpts{ + Namespace: namespaceNetwork, + Subsystem: subsystemGossip, + Name: prefix + "gossipsub_iwant_received_total", + Help: "number of iwant messages received by the router on rpcs", + }), + graftRcvCount: prometheus.NewCounter(prometheus.CounterOpts{ + Namespace: namespaceNetwork, + Subsystem: subsystemGossip, + Name: prefix + "gossipsub_graft_received_total", + Help: "number of graft messages received by the router on rpcs", + }), + pruneRcvCount: prometheus.NewCounter(prometheus.CounterOpts{ + Namespace: namespaceNetwork, + Subsystem: subsystemGossip, + Name: prefix + "gossipsub_prune_received_total", + Help: "number of prune messages received by the router on rpcs", + }), + pubsubMsgRcvCount: prometheus.NewCounter(prometheus.CounterOpts{ + Namespace: namespaceNetwork, + Subsystem: subsystemGossip, + Name: prefix + "gossipsub_pubsub_message_received_total", + Help: "number of pubsub messages received by the router", + }), + iHaveSentCount: prometheus.NewCounter(prometheus.CounterOpts{ + Namespace: namespaceNetwork, + Subsystem: subsystemGossip, + Name: prefix + "gossipsub_ihave_sent_total", + Help: "number of ihave messages sent by the router on rpcs", + }), + iWantSentCount: prometheus.NewCounter(prometheus.CounterOpts{ + Namespace: namespaceNetwork, + Subsystem: subsystemGossip, + Name: prefix + "gossipsub_iwant_sent_total", + Help: "number of iwant messages sent by the router on rpcs", + }), + graftSentCount: prometheus.NewCounter(prometheus.CounterOpts{ + Namespace: namespaceNetwork, + Subsystem: subsystemGossip, + Name: prefix + "gossipsub_graft_sent_total", + Help: "number of graft messages sent by the router on rpcs", + }), + pruneSentCount: prometheus.NewCounter(prometheus.CounterOpts{ + Namespace: namespaceNetwork, + Subsystem: subsystemGossip, + Name: prefix + "gossipsub_prune_sent_total", + Help: "number of prune messages sent by the router on rpcs", + }), + pubsubMsgSentCount: prometheus.NewCounter(prometheus.CounterOpts{ Namespace: namespaceNetwork, Subsystem: subsystemGossip, - Name: gs.prefix + "gossipsub_received_publish_message_total", - Help: "number of received publish messages from gossipsub protocol", - }, - ) + Name: prefix + "gossipsub_pubsub_message_sent_total", + Help: "number of pubsub messages sent by the router", + }), + } +} - return gs +var _ module.LocalGossipSubRouterMetrics = (*LocalGossipSubRouterMetrics)(nil) + +// OnLocalMeshSizeUpdated updates the local mesh size metric. +func (g *LocalGossipSubRouterMetrics) OnLocalMeshSizeUpdated(topic string, size int) { + g.localMeshSize.WithLabelValues(topic).Set(float64(size)) } -// OnIWantReceived tracks the number of IWANT messages received by the node from other nodes. -// iWant is a control message that is sent by a node to request a message that it has seen advertised in an iHAVE message. -func (nc *GossipSubMetrics) OnIWantReceived(count int) { - nc.receivedIWantCount.Add(float64(count)) +// OnPeerAddedToProtocol is called when the local node receives a stream from a peer on a gossipsub-related protocol. +// Args: +// +// protocol: the protocol name that the peer is connected to. +func (g *LocalGossipSubRouterMetrics) OnPeerAddedToProtocol(protocol string) { + g.peerAddedOnProtocolCount.WithLabelValues(protocol).Inc() } -// OnIHaveReceived tracks the number of IHAVE messages received by the node from other nodes. -// iHave is a control message that is sent by a node to another node to indicate that it has a new gossiped message. -func (nc *GossipSubMetrics) OnIHaveReceived(count int) { - nc.receivedIHaveCount.Add(float64(count)) +// OnPeerRemovedFromProtocol is called when the local considers a remote peer blacklisted or unavailable. +func (g *LocalGossipSubRouterMetrics) OnPeerRemovedFromProtocol() { + g.peerRemovedFromProtocolCount.Inc() } -// OnGraftReceived tracks the number of GRAFT messages received by the node from other nodes. -// GRAFT is a control message of GossipSub protocol that connects two nodes over a topic directly as gossip partners. -func (nc *GossipSubMetrics) OnGraftReceived(count int) { - nc.receivedGraftCount.Add(float64(count)) +// OnLocalPeerJoinedTopic is called when the local node subscribes to a gossipsub topic. +// Args: +// +// topic: the topic that the local peer subscribed to. +func (g *LocalGossipSubRouterMetrics) OnLocalPeerJoinedTopic() { + g.localPeerJoinedTopicCount.Inc() } -// OnPruneReceived tracks the number of PRUNE messages received by the node from other nodes. -// PRUNE is a control message of GossipSub protocol that disconnects two nodes over a topic. -func (nc *GossipSubMetrics) OnPruneReceived(count int) { - nc.receivedPruneCount.Add(float64(count)) +// OnLocalPeerLeftTopic is called when the local node unsubscribes from a gossipsub topic. +// Args: +// +// topic: the topic that the local peer has unsubscribed from. +func (g *LocalGossipSubRouterMetrics) OnLocalPeerLeftTopic() { + g.localPeerLeftTopicCount.Inc() } -// OnIncomingRpcAcceptedFully tracks the number of RPC messages received by the node that are fully accepted. -// An RPC may contain any number of control messages, i.e., IHAVE, IWANT, GRAFT, PRUNE, as well as the actual messages. -// A fully accepted RPC means that all the control messages are accepted and all the messages are accepted. -func (nc *GossipSubMetrics) OnIncomingRpcAcceptedFully() { - nc.incomingRpcAcceptedFullyCount.Inc() +// OnPeerGraftTopic is called when the local node receives a GRAFT message from a remote peer on a topic. +// Note: the received GRAFT at this point is considered passed the RPC inspection, and is accepted by the local node. +func (g *LocalGossipSubRouterMetrics) OnPeerGraftTopic(topic string) { + g.peerGraftTopicCount.WithLabelValues(topic).Inc() } -// OnIncomingRpcAcceptedOnlyForControlMessages tracks the number of RPC messages received by the node that are accepted -// only for the control messages, i.e., only for the included IHAVE, IWANT, GRAFT, PRUNE. However, the actual messages -// included in the RPC are not accepted. -// This happens mostly when the validation pipeline of GossipSub is throttled, and cannot accept more actual messages for -// validation. -func (nc *GossipSubMetrics) OnIncomingRpcAcceptedOnlyForControlMessages() { - nc.incomingRpcAcceptedOnlyControlCount.Inc() +// OnPeerPruneTopic is called when the local node receives a PRUNE message from a remote peer on a topic. +// Note: the received PRUNE at this point is considered passed the RPC inspection, and is accepted by the local node. +func (g *LocalGossipSubRouterMetrics) OnPeerPruneTopic(topic string) { + g.peerPruneTopicCount.WithLabelValues(topic).Inc() } -// OnIncomingRpcRejected tracks the number of RPC messages received by the node that are rejected. -// This happens mostly when the RPC is coming from a low-scored peer based on the peer scoring module of GossipSub. -func (nc *GossipSubMetrics) OnIncomingRpcRejected() { - nc.incomingRpcRejectedCount.Inc() +// OnMessageEnteredValidation is called when a received pubsub message enters the validation pipeline. It is the +// internal validation pipeline of GossipSub protocol. The message may be rejected or accepted by the validation +// pipeline. +func (g *LocalGossipSubRouterMetrics) OnMessageEnteredValidation(int) { + g.messageEnteredValidationCount.Inc() } -// OnPublishedGossipMessagesReceived tracks the number of gossip messages received by the node from other nodes over an -// RPC message. -func (nc *GossipSubMetrics) OnPublishedGossipMessagesReceived(count int) { - nc.receivedPublishMessageCount.Add(float64(count)) +// OnMessageRejected is called when a received pubsub message is rejected by the validation pipeline. +// Args: +// +// reason: the reason for rejection. +// size: the size of the rejected message. +func (g *LocalGossipSubRouterMetrics) OnMessageRejected(size int, reason string) { + g.messageRejectedSize.WithLabelValues(reason).Observe(float64(size)) } -// GossipSubLocalMeshMetrics is a metrics collector for the local mesh of GossipSub protocol. -type GossipSubLocalMeshMetrics struct { - localMeshSize prometheus.GaugeVec +// OnMessageDuplicate is called when a received pubsub message is a duplicate of a previously received message, and +// is dropped. +// Args: +// +// size: the size of the duplicate message. +func (g *LocalGossipSubRouterMetrics) OnMessageDuplicate(size int) { + g.messageDuplicateSize.Observe(float64(size)) } -func NewGossipSubLocalMeshMetrics(prefix string) *GossipSubLocalMeshMetrics { - return &GossipSubLocalMeshMetrics{ - localMeshSize: *promauto.NewGaugeVec( - prometheus.GaugeOpts{ - Namespace: namespaceNetwork, - Subsystem: subsystemGossip, - Name: prefix + "gossipsub_local_mesh_size", - Help: "number of peers in the local mesh of the node", - }, - []string{LabelChannel}, - ), - } +// OnPeerThrottled is called when a peer is throttled by the local node, i.e., the local node is not accepting any +// pubsub message from the peer but may still accept control messages. +func (g *LocalGossipSubRouterMetrics) OnPeerThrottled() { + g.peerThrottledCount.Inc() } -var _ module.GossipSubLocalMeshMetrics = (*GossipSubLocalMeshMetrics)(nil) +// OnRpcReceived is called when an RPC message is received by the local node. The received RPC is considered +// passed the RPC inspection, and is accepted by the local node. +func (g *LocalGossipSubRouterMetrics) OnRpcReceived(msgCount int, iHaveCount int, iWantCount int, graftCount int, pruneCount int) { + g.rpcRcvCount.Inc() + g.pubsubMsgRcvCount.Add(float64(msgCount)) + g.iHaveRcvCount.Add(float64(iHaveCount)) + g.iWantRcvCount.Add(float64(iWantCount)) + g.graftRcvCount.Add(float64(graftCount)) + g.pruneRcvCount.Add(float64(pruneCount)) +} -// OnLocalMeshSizeUpdated updates the local mesh size metric. -func (g *GossipSubLocalMeshMetrics) OnLocalMeshSizeUpdated(topic string, size int) { - g.localMeshSize.WithLabelValues(topic).Set(float64(size)) +// OnRpcSent is called when an RPC message is sent by the local node. +// Note: the sent RPC is considered passed the RPC inspection, and is accepted by the local node. +func (g *LocalGossipSubRouterMetrics) OnRpcSent(msgCount int, iHaveCount int, iWantCount int, graftCount int, pruneCount int) { + g.rpcSentCount.Inc() + g.pubsubMsgSentCount.Add(float64(msgCount)) + g.iHaveSentCount.Add(float64(iHaveCount)) + g.iWantSentCount.Add(float64(iWantCount)) + g.graftSentCount.Add(float64(graftCount)) + g.pruneSentCount.Add(float64(pruneCount)) +} + +// OnOutboundRpcDropped is called when an outbound RPC message is dropped by the local node, typically because the local node +// outbound message queue is full; or the RPC is big and the local node cannot fragment it. +func (g *LocalGossipSubRouterMetrics) OnOutboundRpcDropped() { + g.outboundRpcDroppedCount.Inc() +} + +// OnUndeliveredMessage is called when a message is not delivered at least one subscriber of the topic, for example when +// the subscriber is too slow to process the message. +func (g *LocalGossipSubRouterMetrics) OnUndeliveredMessage() { + g.undeliveredOutboundMessageCount.Inc() +} + +// OnMessageDeliveredToAllSubscribers is called when a message is delivered to all subscribers of the topic. +// Args: +// +// size: the size of the delivered message. +func (g *LocalGossipSubRouterMetrics) OnMessageDeliveredToAllSubscribers(size int) { + g.messageDeliveredSize.Observe(float64(size)) } diff --git a/module/metrics/gossipsub_rpc_validation_inspector.go b/module/metrics/gossipsub_rpc_validation_inspector.go new file mode 100644 index 00000000000..d8c20fccc81 --- /dev/null +++ b/module/metrics/gossipsub_rpc_validation_inspector.go @@ -0,0 +1,586 @@ +package metrics + +import ( + "time" + + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" + + "github.com/onflow/flow-go/module" + p2pmsg "github.com/onflow/flow-go/network/p2p/message" +) + +const ( + labelIHaveMessageIds = "ihave_message_ids" + labelIWantMessageIds = "iwant_message_ids" +) + +// GossipSubRpcValidationInspectorMetrics metrics collector for the gossipsub RPC validation inspector. +type GossipSubRpcValidationInspectorMetrics struct { + prefix string + rpcCtrlMsgInAsyncPreProcessingGauge prometheus.Gauge + rpcCtrlMsgAsyncProcessingTimeHistogram prometheus.Histogram + rpcCtrlMsgTruncation prometheus.HistogramVec + ctrlMsgInvalidTopicIdCount prometheus.CounterVec + receivedIWantMsgCount prometheus.Counter + receivedIWantMsgIDsHistogram prometheus.Histogram + receivedIHaveMsgCount prometheus.Counter + receivedIHaveMsgIDsHistogram prometheus.HistogramVec + receivedPruneCount prometheus.Counter + receivedGraftCount prometheus.Counter + receivedPublishMessageCount prometheus.Counter + incomingRpcCount prometheus.Counter + + // graft inspection + graftDuplicateTopicIdsHistogram prometheus.Histogram + graftInvalidTopicIdsHistogram prometheus.Histogram + graftDuplicateTopicIdsExceedThresholdCount prometheus.Counter + graftInvalidTopicIdsExceedThresholdCount prometheus.Counter + + // prune inspection + pruneDuplicateTopicIdsHistogram prometheus.Histogram + pruneInvalidTopicIdsHistogram prometheus.Histogram + pruneDuplicateTopicIdsExceedThresholdCount prometheus.Counter + pruneInvalidTopicIdsExceedThresholdCount prometheus.Counter + + // iHave inspection + iHaveDuplicateMessageIdHistogram prometheus.Histogram + iHaveDuplicateTopicIdHistogram prometheus.Histogram + iHaveInvalidTopicIdHistogram prometheus.Histogram + iHaveDuplicateMessageIdExceedThresholdCount prometheus.Counter + iHaveDuplicateTopicIdExceedThresholdCount prometheus.Counter + iHaveInvalidTopicIdExceedThresholdCount prometheus.Counter + + // iWant inspection + iWantDuplicateMessageIdHistogram prometheus.Histogram + iWantCacheMissHistogram prometheus.Histogram + iWantDuplicateMessageIdExceedThresholdCount prometheus.Counter + iWantCacheMissMessageIdExceedThresholdCount prometheus.Counter + + // inspection result + errActiveClusterIdsNotSetCount prometheus.Counter + errUnstakedPeerInspectionFailedCount prometheus.Counter + invalidControlMessageNotificationSentCount prometheus.Counter + unstakedNodeRPCRejectedCount prometheus.Counter + + // publish messages + publishMessageInspectionErrExceedThresholdCount prometheus.Counter + publishMessageInvalidSenderCountHistogram prometheus.Histogram + publishMessageInvalidSubscriptionsHistogram prometheus.Histogram + publishMessageInvalidTopicIdHistogram prometheus.Histogram + publishMessageInspectedErrHistogram prometheus.Histogram +} + +var _ module.GossipSubRpcValidationInspectorMetrics = (*GossipSubRpcValidationInspectorMetrics)(nil) + +// NewGossipSubRPCValidationInspectorMetrics returns a new *GossipSubRpcValidationInspectorMetrics. +func NewGossipSubRPCValidationInspectorMetrics(prefix string) *GossipSubRpcValidationInspectorMetrics { + gc := &GossipSubRpcValidationInspectorMetrics{prefix: prefix} + gc.rpcCtrlMsgInAsyncPreProcessingGauge = promauto.NewGauge( + prometheus.GaugeOpts{ + Namespace: namespaceNetwork, + Subsystem: subsystemGossip, + Name: gc.prefix + "control_messages_in_async_processing_total", + Help: "the number of rpc control messages currently being processed asynchronously by workers from the rpc validator worker pool", + }, + ) + gc.rpcCtrlMsgAsyncProcessingTimeHistogram = promauto.NewHistogram( + prometheus.HistogramOpts{ + Namespace: namespaceNetwork, + Subsystem: subsystemGossip, + Name: gc.prefix + "rpc_control_message_validator_async_processing_time_seconds", + Help: "duration [seconds; measured with float64 precision] of how long it takes rpc control message validator to asynchronously process a rpc message", + Buckets: []float64{.1, 1}, + }, + ) + + gc.rpcCtrlMsgTruncation = *promauto.NewHistogramVec(prometheus.HistogramOpts{ + Namespace: namespaceNetwork, + Subsystem: subsystemGossip, + Name: gc.prefix + "rpc_control_message_truncation", + Help: "the number of times a control message was truncated", + Buckets: []float64{10, 100, 1000}, + }, []string{LabelMessage}) + + gc.receivedIHaveMsgCount = promauto.NewCounter(prometheus.CounterOpts{ + Namespace: namespaceNetwork, + Subsystem: subsystemGossip, + Name: gc.prefix + "gossipsub_received_ihave_total", + Help: "number of received ihave messages from gossipsub protocol", + }) + + gc.receivedIHaveMsgIDsHistogram = *promauto.NewHistogramVec(prometheus.HistogramOpts{ + Namespace: namespaceNetwork, + Subsystem: subsystemGossip, + Name: gc.prefix + "gossipsub_received_ihave_message_ids", + Help: "histogram of received ihave message ids from gossipsub protocol per channel", + }, []string{LabelChannel}) + + gc.receivedIWantMsgCount = promauto.NewCounter(prometheus.CounterOpts{ + Namespace: namespaceNetwork, + Subsystem: subsystemGossip, + Name: gc.prefix + "gossipsub_received_iwant_total", + Help: "total number of received iwant messages from gossipsub protocol", + }) + + gc.receivedIWantMsgIDsHistogram = promauto.NewHistogram(prometheus.HistogramOpts{ + Namespace: namespaceNetwork, + Subsystem: subsystemGossip, + Name: gc.prefix + "gossipsub_received_iwant_message_ids", + Help: "histogram of received iwant message ids from gossipsub protocol per channel", + }) + + gc.receivedGraftCount = promauto.NewCounter(prometheus.CounterOpts{ + Namespace: namespaceNetwork, + Subsystem: subsystemGossip, + Name: gc.prefix + "gossipsub_received_graft_total", + Help: "total number of received graft messages from gossipsub protocol", + }) + + gc.receivedPruneCount = promauto.NewCounter(prometheus.CounterOpts{ + Namespace: namespaceNetwork, + Subsystem: subsystemGossip, + Name: gc.prefix + "gossipsub_received_prune_total", + Help: "total number of received prune messages from gossipsub protocol", + }) + + gc.receivedPublishMessageCount = promauto.NewCounter(prometheus.CounterOpts{ + Namespace: namespaceNetwork, + Subsystem: subsystemGossip, + Name: gc.prefix + "gossipsub_received_publish_message_total", + Help: "total number of received publish messages from gossipsub protocol", + }) + + gc.incomingRpcCount = promauto.NewCounter(prometheus.CounterOpts{ + Namespace: namespaceNetwork, + Subsystem: subsystemGossip, + Name: gc.prefix + "gossipsub_incoming_rpc_total", + Help: "total number of incoming rpc messages from gossipsub protocol", + }) + + gc.iHaveDuplicateMessageIdHistogram = promauto.NewHistogram(prometheus.HistogramOpts{ + Namespace: namespaceNetwork, + Subsystem: subsystemGossip, + Buckets: []float64{1, 100, 1000}, + Name: gc.prefix + "rpc_inspection_ihave_duplicate_message_ids_count", + Help: "number of duplicate message ids received from gossipsub protocol during the async inspection of a single RPC", + }) + + gc.iHaveDuplicateTopicIdHistogram = promauto.NewHistogram(prometheus.HistogramOpts{ + Namespace: namespaceNetwork, + Subsystem: subsystemGossip, + Buckets: []float64{1, 100, 1000}, + Name: gc.prefix + "rpc_inspection_ihave_duplicate_topic_ids_count", + Help: "number of duplicate topic ids received from gossipsub protocol during the async inspection of a single RPC", + }) + + gc.iHaveInvalidTopicIdHistogram = promauto.NewHistogram(prometheus.HistogramOpts{ + Namespace: namespaceNetwork, + Subsystem: subsystemGossip, + Buckets: []float64{1, 100, 1000}, + Name: gc.prefix + "rpc_inspection_ihave_invalid_topic_ids_count", + Help: "number of invalid topic ids received from gossipsub protocol during the async inspection of a single RPC", + }) + + gc.iHaveDuplicateMessageIdExceedThresholdCount = promauto.NewCounter(prometheus.CounterOpts{ + Namespace: namespaceNetwork, + Subsystem: subsystemGossip, + Name: gc.prefix + "rpc_inspection_ihave_duplicate_message_ids_exceed_threshold_total", + Help: "total number of times that the async inspection of iHave messages failed due to the number of duplicate message ids exceeding the threshold", + }) + + gc.iHaveDuplicateTopicIdExceedThresholdCount = promauto.NewCounter(prometheus.CounterOpts{ + Namespace: namespaceNetwork, + Subsystem: subsystemGossip, + Name: gc.prefix + "rpc_inspection_ihave_duplicate_topic_ids_exceed_threshold_total", + Help: "total number of times that the async inspection of iHave messages failed due to the number of duplicate topic ids exceeding the threshold", + }) + + gc.iHaveInvalidTopicIdExceedThresholdCount = promauto.NewCounter(prometheus.CounterOpts{ + Namespace: namespaceNetwork, + Subsystem: subsystemGossip, + Name: gc.prefix + "rpc_inspection_ihave_invalid_topic_ids_exceed_threshold_total", + Help: "total number of times that the async inspection of iHave messages failed due to the number of invalid topic ids exceeding the threshold", + }) + + gc.iWantDuplicateMessageIdHistogram = promauto.NewHistogram(prometheus.HistogramOpts{ + Namespace: namespaceNetwork, + Subsystem: subsystemGossip, + Name: gc.prefix + "rpc_inspection_iwant_duplicate_message_ids_count", + Buckets: []float64{1, 100, 1000}, + Help: "number of duplicate message ids received from gossipsub protocol during the async inspection of a single RPC", + }) + + gc.iWantCacheMissHistogram = promauto.NewHistogram(prometheus.HistogramOpts{ + Namespace: namespaceNetwork, + Subsystem: subsystemGossip, + Name: gc.prefix + "rpc_inspection_iwant_cache_miss_message_ids_count", + Buckets: []float64{1, 100, 1000}, + Help: "total number of cache miss message ids received from gossipsub protocol during the async inspection of a single RPC", + }) + + gc.iWantDuplicateMessageIdExceedThresholdCount = promauto.NewCounter(prometheus.CounterOpts{ + Namespace: namespaceNetwork, + Subsystem: subsystemGossip, + Name: gc.prefix + "rpc_inspection_iwant_duplicate_message_ids_exceed_threshold_total", + Help: "total number of times that the async inspection of iWant messages failed due to the number of duplicate message ids ", + }) + + gc.iWantCacheMissMessageIdExceedThresholdCount = promauto.NewCounter(prometheus.CounterOpts{ + Namespace: namespaceNetwork, + Subsystem: subsystemGossip, + Name: gc.prefix + "rpc_inspection_iwant_cache_miss_message_ids_exceed_threshold_total", + Help: "total number of times that the async inspection of iWant messages failed due to the number of cache miss message ids ", + }) + + gc.ctrlMsgInvalidTopicIdCount = *promauto.NewCounterVec(prometheus.CounterOpts{ + Namespace: namespaceNetwork, + Subsystem: subsystemGossip, + Name: gc.prefix + "control_message_invalid_topic_id_total", + Help: "total number of control messages with invalid topic id received from gossipsub protocol during the async inspection", + }, []string{LabelMessage}) + + gc.errActiveClusterIdsNotSetCount = promauto.NewCounter(prometheus.CounterOpts{ + Namespace: namespaceNetwork, + Subsystem: subsystemGossip, + Name: gc.prefix + "active_cluster_ids_not_inspection_error_total", + Help: "total number of inspection errors due to active cluster ids not set inspection failure", + }) + + gc.errUnstakedPeerInspectionFailedCount = promauto.NewCounter(prometheus.CounterOpts{ + Namespace: namespaceNetwork, + Subsystem: subsystemGossip, + Name: gc.prefix + "unstaked_peer_inspection_error_total", + Help: "total number of inspection errors due to unstaked peer inspection failure", + }) + + gc.invalidControlMessageNotificationSentCount = promauto.NewCounter(prometheus.CounterOpts{ + Namespace: namespaceNetwork, + Subsystem: subsystemGossip, + Name: gc.prefix + "invalid_control_message_notification_sent_total", + Help: "number of invalid control message notifications (i.e., misbehavior report) sent due to async inspection of rpcs failure", + }) + + gc.unstakedNodeRPCRejectedCount = promauto.NewCounter(prometheus.CounterOpts{ + Namespace: namespaceNetwork, + Subsystem: subsystemGossip, + Name: gc.prefix + "unstaked_node_rejection_total", + Help: "number of rpcs rejected from unstaked node", + }) + + gc.graftDuplicateTopicIdsHistogram = promauto.NewHistogram(prometheus.HistogramOpts{ + Namespace: namespaceNetwork, + Subsystem: subsystemGossip, + Name: gc.prefix + "rpc_inspection_graft_duplicate_topic_ids_count", + Buckets: []float64{1, 100, 1000}, + Help: "number of duplicate topic ids on graft messages of a single RPC during the async inspection, regardless of the result of the inspection", + }) + + gc.graftInvalidTopicIdsHistogram = promauto.NewHistogram(prometheus.HistogramOpts{ + Namespace: namespaceNetwork, + Subsystem: subsystemGossip, + Name: gc.prefix + "rpc_inspection_graft_invalid_topic_ids_count", + Buckets: []float64{1, 100, 1000}, + Help: "number of invalid topic ids on graft messages of a single RPC during the async inspection, regardless of the result of the inspection", + }) + + gc.graftDuplicateTopicIdsExceedThresholdCount = promauto.NewCounter(prometheus.CounterOpts{ + Namespace: namespaceNetwork, + Subsystem: subsystemGossip, + Name: gc.prefix + "rpc_inspection_graft_duplicate_topic_ids_exceed_threshold_total", + Help: "number of times that the async inspection of graft messages of an rpc failed due to the number of duplicate topic ids exceeding the threshold", + }) + + gc.graftInvalidTopicIdsExceedThresholdCount = promauto.NewCounter(prometheus.CounterOpts{ + Namespace: namespaceNetwork, + Subsystem: subsystemGossip, + Name: gc.prefix + "rpc_inspection_graft_invalid_topic_ids_exceed_threshold_total", + Help: "number of times that the async inspection of graft messages of an rpc failed due to the number of invalid topic ids exceeding the threshold", + }) + + gc.pruneDuplicateTopicIdsHistogram = promauto.NewHistogram(prometheus.HistogramOpts{ + Namespace: namespaceNetwork, + Subsystem: subsystemGossip, + Buckets: []float64{1, 100, 1000}, + Name: gc.prefix + "rpc_inspection_prune_duplicate_topic_ids_count", + Help: "number of duplicate topic ids on prune messages of a single RPC during the async inspection, regardless of the result of the inspection", + }) + + gc.pruneInvalidTopicIdsHistogram = promauto.NewHistogram(prometheus.HistogramOpts{ + Namespace: namespaceNetwork, + Subsystem: subsystemGossip, + Buckets: []float64{1, 100, 1000}, + Name: gc.prefix + "rpc_inspection_prune_invalid_topic_ids_count", + Help: "number of invalid topic ids on prune messages of a single RPC during the async inspection, regardless of the result of the inspection", + }) + + gc.pruneDuplicateTopicIdsExceedThresholdCount = promauto.NewCounter(prometheus.CounterOpts{ + Namespace: namespaceNetwork, + Subsystem: subsystemGossip, + Name: gc.prefix + "rpc_inspection_prune_duplicate_topic_ids_exceed_threshold_total", + Help: "number of times that the async inspection of prune messages failed due to the number of duplicate topic ids exceeding the threshold", + }) + + gc.pruneInvalidTopicIdsExceedThresholdCount = promauto.NewCounter(prometheus.CounterOpts{ + Namespace: namespaceNetwork, + Subsystem: subsystemGossip, + Name: gc.prefix + "rpc_inspection_prune_invalid_topic_ids_exceed_threshold_total", + Help: "number of times that the async inspection of prune messages failed due to the number of invalid topic ids exceeding the threshold", + }) + + gc.publishMessageInspectedErrHistogram = promauto.NewHistogram(prometheus.HistogramOpts{ + Namespace: namespaceNetwork, + Subsystem: subsystemGossip, + Name: gc.prefix + "publish_message_inspected_error_count", + Buckets: []float64{10, 100, 1000}, + Help: "number of errors that occurred during the async inspection of publish messages on a single RPC, regardless pof the result", + }) + + gc.publishMessageInvalidSenderCountHistogram = promauto.NewHistogram(prometheus.HistogramOpts{ + Namespace: namespaceNetwork, + Subsystem: subsystemGossip, + Name: gc.prefix + "rpc_inspection_publish_message_invalid_sender_count", + Buckets: []float64{1, 100, 1000}, + Help: "number of invalid senders observed during the async inspection of publish messages on a single RPC, regardless of the result", + }) + + gc.publishMessageInvalidTopicIdHistogram = promauto.NewHistogram(prometheus.HistogramOpts{ + Namespace: namespaceNetwork, + Subsystem: subsystemGossip, + Name: gc.prefix + "rpc_inspection_publish_message_invalid_topic_id_count", + Buckets: []float64{1, 100, 1000}, + Help: "number of invalid topic ids observed during the async inspection of publish messages on a single RPC, regardless of the result", + }) + + gc.publishMessageInvalidSubscriptionsHistogram = promauto.NewHistogram(prometheus.HistogramOpts{ + Namespace: namespaceNetwork, + Subsystem: subsystemGossip, + Name: gc.prefix + "rpc_inspection_publish_message_invalid_subscriptions_count", + Buckets: []float64{1, 100, 1000}, + Help: "number of invalid subscriptions observed during the async inspection of publish messages on a single RPC, regardless of the result", + }) + + gc.publishMessageInspectionErrExceedThresholdCount = promauto.NewCounter(prometheus.CounterOpts{ + Namespace: namespaceNetwork, + Subsystem: subsystemGossip, + Name: gc.prefix + "publish_message_inspection_err_exceed_threshold_total", + Help: "number of rpcs fail on inspection due to published message inspection errors exceeding the threshold", + }) + + return gc +} + +// AsyncProcessingStarted increments the metric tracking the number of RPC's being processed asynchronously by the rpc validation inspector. +func (c *GossipSubRpcValidationInspectorMetrics) AsyncProcessingStarted() { + c.rpcCtrlMsgInAsyncPreProcessingGauge.Inc() +} + +// AsyncProcessingFinished tracks the time spent by the rpc validation inspector to process an RPC asynchronously and decrements the metric tracking +// the number of RPC's being processed asynchronously by the rpc validation inspector. +func (c *GossipSubRpcValidationInspectorMetrics) AsyncProcessingFinished(duration time.Duration) { + c.rpcCtrlMsgInAsyncPreProcessingGauge.Dec() + c.rpcCtrlMsgAsyncProcessingTimeHistogram.Observe(duration.Seconds()) +} + +// OnControlMessagesTruncated tracks the number of times a control message was truncated. +// Args: +// +// messageType: the type of the control message that was truncated +// diff: the number of message ids truncated. +func (c *GossipSubRpcValidationInspectorMetrics) OnControlMessagesTruncated(messageType p2pmsg.ControlMessageType, diff int) { + c.rpcCtrlMsgTruncation.WithLabelValues(messageType.String()).Observe(float64(diff)) +} + +// OnIHaveControlMessageIdsTruncated tracks the number of times message ids on an iHave message were truncated. +// Note that this function is called only when the message ids are truncated from an iHave message, not when the iHave message itself is truncated. +// This is different from the OnControlMessagesTruncated function which is called when a slice of control messages truncated from an RPC with all their message ids. +// Args: +// +// diff: the number of actual messages truncated. +func (c *GossipSubRpcValidationInspectorMetrics) OnIHaveControlMessageIdsTruncated(diff int) { + c.OnControlMessagesTruncated(labelIHaveMessageIds, diff) +} + +// OnIWantControlMessageIdsTruncated tracks the number of times message ids on an iWant message were truncated. +// Note that this function is called only when the message ids are truncated from an iWant message, not when the iWant message itself is truncated. +// This is different from the OnControlMessagesTruncated function which is called when a slice of control messages truncated from an RPC with all their message ids. +// Args: +// +// diff: the number of actual messages truncated. +func (c *GossipSubRpcValidationInspectorMetrics) OnIWantControlMessageIdsTruncated(diff int) { + c.OnControlMessagesTruncated(labelIWantMessageIds, diff) +} + +// OnIWantMessageIDsReceived tracks the number of message ids received by the node from other nodes on an RPC. +// Note: this function is called on each IWANT message received by the node. +// Args: +// - msgIdCount: the number of message ids received on the IWANT message. +func (c *GossipSubRpcValidationInspectorMetrics) OnIWantMessageIDsReceived(msgIdCount int) { + c.receivedIWantMsgIDsHistogram.Observe(float64(msgIdCount)) +} + +// OnIHaveMessageIDsReceived tracks the number of message ids received by the node from other nodes on an iHave message. +// This function is called on each iHave message received by the node. +// Args: +// - channel: the channel on which the iHave message was received. +// - msgIdCount: the number of message ids received on the iHave message. +func (c *GossipSubRpcValidationInspectorMetrics) OnIHaveMessageIDsReceived(channel string, msgIdCount int) { + c.receivedIHaveMsgIDsHistogram.WithLabelValues(channel).Observe(float64(msgIdCount)) +} + +// OnIncomingRpcReceived tracks the number of incoming RPC messages received by the node. +func (c *GossipSubRpcValidationInspectorMetrics) OnIncomingRpcReceived(iHaveCount, iWantCount, graftCount, pruneCount, msgCount int) { + c.incomingRpcCount.Inc() + c.receivedPublishMessageCount.Add(float64(msgCount)) + c.receivedPruneCount.Add(float64(pruneCount)) + c.receivedGraftCount.Add(float64(graftCount)) + c.receivedIWantMsgCount.Add(float64(iWantCount)) + c.receivedIHaveMsgCount.Add(float64(iHaveCount)) +} + +// OnIWantMessagesInspected tracks the number of duplicate and cache miss message ids received by the node on iWant messages at the end of the async inspection iWants +// across one RPC, regardless of the result of the inspection. +// +// duplicateCount: the total number of duplicate message ids received by the node on the iWant messages at the end of the async inspection of the RPC. +// cacheMissCount: the total number of cache miss message ids received by the node on the iWant message at the end of the async inspection of the RPC. +func (c *GossipSubRpcValidationInspectorMetrics) OnIWantMessagesInspected(duplicateCount int, cacheMissCount int) { + c.iWantDuplicateMessageIdHistogram.Observe(float64(duplicateCount)) + c.iWantCacheMissHistogram.Observe(float64(cacheMissCount)) +} + +// OnIWantDuplicateMessageIdsExceedThreshold tracks the number of times that async inspection of iWant messages failed due to the total number of duplicate message ids +// received by the node on the iWant messages of a single RPC exceeding the threshold, which results in a misbehaviour report. +func (c *GossipSubRpcValidationInspectorMetrics) OnIWantDuplicateMessageIdsExceedThreshold() { + c.iWantDuplicateMessageIdExceedThresholdCount.Inc() +} + +// OnIWantCacheMissMessageIdsExceedThreshold tracks the number of times that async inspection of iWant messages failed due to the total +// number of cache miss message ids received by the node on the iWant messages of a single RPC exceeding the threshold, which results in a misbehaviour report. +func (c *GossipSubRpcValidationInspectorMetrics) OnIWantCacheMissMessageIdsExceedThreshold() { + c.iWantCacheMissMessageIdExceedThresholdCount.Inc() +} + +// OnIHaveMessagesInspected is called at the end of the async inspection of iHave messages of a single RPC, regardless of the result of the inspection. +// It tracks the number of duplicate topic ids and duplicate message ids received by the node on the iHave messages of that single RPC at the end of the async inspection iHaves. +// Args: +// +// duplicateTopicIds: the total number of duplicate topic ids received by the node on the iHave messages at the end of the async inspection of the RPC. +// duplicateMessageIds: the number of duplicate message ids received by the node on the iHave messages at the end of the async inspection of the RPC. +// invalidTopicIds: the number of invalid message ids received by the node on the iHave messages at the end of the async inspection of the RPC. +func (c *GossipSubRpcValidationInspectorMetrics) OnIHaveMessagesInspected(duplicateTopicIds, duplicateMessageIds, invalidTopicIds int) { + c.iHaveDuplicateTopicIdHistogram.Observe(float64(duplicateTopicIds)) + c.iHaveDuplicateMessageIdHistogram.Observe(float64(duplicateMessageIds)) + c.iHaveInvalidTopicIdHistogram.Observe(float64(invalidTopicIds)) +} + +// OnIHaveDuplicateTopicIdsExceedThreshold tracks the number of times that the async inspection of iHave messages of a single RPC failed due to the total number of duplicate topic ids +// received by the node on the iHave messages of that RPC exceeding the threshold, which results in a misbehaviour report. +func (c *GossipSubRpcValidationInspectorMetrics) OnIHaveDuplicateTopicIdsExceedThreshold() { + c.iHaveDuplicateTopicIdExceedThresholdCount.Inc() +} + +// OnIHaveDuplicateMessageIdsExceedThreshold tracks the number of times that the async inspection of iHave messages of a single RPC failed due to the total number of duplicate message ids +// received by the node on an iHave message exceeding the threshold, which results in a misbehaviour report. +func (c *GossipSubRpcValidationInspectorMetrics) OnIHaveDuplicateMessageIdsExceedThreshold() { + c.iHaveDuplicateMessageIdExceedThresholdCount.Inc() +} + +// OnIHaveInvalidTopicIdsExceedThreshold tracks the number of times that the async inspection of iHave messages of a single RPC failed due to the total number of invalid topic ids +// received by the node on the iHave messages of that RPC exceeding the threshold, which results in a misbehaviour report. +func (c *GossipSubRpcValidationInspectorMetrics) OnIHaveInvalidTopicIdsExceedThreshold() { + c.iHaveInvalidTopicIdExceedThresholdCount.Inc() +} + +// OnInvalidTopicIdDetectedForControlMessage tracks the number of times that the async inspection of a control message type on a single RPC failed due to an invalid topic id. +// Args: +// - messageType: the type of the control message that was truncated. +func (c *GossipSubRpcValidationInspectorMetrics) OnInvalidTopicIdDetectedForControlMessage(messageType p2pmsg.ControlMessageType) { + c.ctrlMsgInvalidTopicIdCount.WithLabelValues(messageType.String()).Inc() +} + +// OnActiveClusterIDsNotSetErr tracks the number of times that the async inspection of a control message type on a single RPC failed due to active cluster ids not set inspection failure. +// This is not causing a misbehaviour report. +func (c *GossipSubRpcValidationInspectorMetrics) OnActiveClusterIDsNotSetErr() { + c.errActiveClusterIdsNotSetCount.Inc() +} + +// OnUnstakedPeerInspectionFailed tracks the number of times that the async inspection of a control message type on a single RPC failed due to unstaked peer inspection failure. +// This is not causing a misbehaviour report. +func (c *GossipSubRpcValidationInspectorMetrics) OnUnstakedPeerInspectionFailed() { + c.errUnstakedPeerInspectionFailedCount.Inc() +} + +// OnInvalidControlMessageNotificationSent tracks the number of times that the async inspection of a control message failed and resulted in dissemination of an invalid control message was sent (i.e., a +// misbehavior report). +func (c *GossipSubRpcValidationInspectorMetrics) OnInvalidControlMessageNotificationSent() { + c.invalidControlMessageNotificationSentCount.Inc() +} + +// OnRpcRejectedFromUnknownSender tracks the number of rpc's rejected from unstaked nodes. +func (c *GossipSubRpcValidationInspectorMetrics) OnRpcRejectedFromUnknownSender() { + c.unstakedNodeRPCRejectedCount.Inc() +} + +// OnPruneDuplicateTopicIdsExceedThreshold tracks the number of times that the async inspection of prune messages for an RPC failed due to the number of duplicate topic ids +// received by the node on prune messages of the same RPC excesses threshold, which results in a misbehaviour report. +func (c *GossipSubRpcValidationInspectorMetrics) OnPruneDuplicateTopicIdsExceedThreshold() { + c.pruneDuplicateTopicIdsExceedThresholdCount.Inc() +} + +// OnPruneInvalidTopicIdsExceedThreshold tracks the number of times that the async inspection of prune messages for an RPC failed due to the number of invalid topic ids +// received by the node on prune messages of the same RPC excesses threshold, which results in a misbehaviour report. +func (c *GossipSubRpcValidationInspectorMetrics) OnPruneInvalidTopicIdsExceedThreshold() { + c.pruneInvalidTopicIdsExceedThresholdCount.Inc() +} + +// OnPruneMessageInspected is called at the end of the async inspection of prune messages of the RPC, regardless of the result of the inspection. +// Args: +// +// duplicateTopicIds: the number of duplicate topic ids received by the node on the prune messages of the RPC at the end of the async inspection prunes. +// invalidTopicIds: the number of invalid message ids received by the node on the prune messages at the end of the async inspection of the RPC. +func (c *GossipSubRpcValidationInspectorMetrics) OnPruneMessageInspected(duplicateTopicIds, invalidTopicIds int) { + c.pruneDuplicateTopicIdsHistogram.Observe(float64(duplicateTopicIds)) + c.pruneInvalidTopicIdsHistogram.Observe(float64(invalidTopicIds)) +} + +// OnGraftDuplicateTopicIdsExceedThreshold tracks the number of times that the async inspection of a graft message failed due to the number of duplicate topic ids. +// received by the node on graft messages of the same rpc excesses threshold, which results in a misbehaviour report. +func (c *GossipSubRpcValidationInspectorMetrics) OnGraftDuplicateTopicIdsExceedThreshold() { + c.graftDuplicateTopicIdsExceedThresholdCount.Inc() +} + +// OnGraftInvalidTopicIdsExceedThreshold tracks the number of times that the async inspection of the graft messages of a single RPC failed due to the number of invalid topic ids +// received by the node on graft messages of the same RPC excesses threshold, which results in a misbehaviour report. +func (c *GossipSubRpcValidationInspectorMetrics) OnGraftInvalidTopicIdsExceedThreshold() { + c.graftInvalidTopicIdsExceedThresholdCount.Inc() +} + +// OnGraftMessageInspected is called at the end of the async inspection of graft messages of a single RPC, regardless of the result of the inspection. +// Args: +// +// duplicateTopicIds: the number of duplicate topic ids received by the node on the graft messages at the end of the async inspection of a single RPC. +// invalidTopicIds: the number of invalid message ids received by the node on the graft messages at the end of the async inspection of the RPC. +func (c *GossipSubRpcValidationInspectorMetrics) OnGraftMessageInspected(duplicateTopicIds, invalidTopicIds int) { + c.graftDuplicateTopicIdsHistogram.Observe(float64(duplicateTopicIds)) + c.graftInvalidTopicIdsHistogram.Observe(float64(invalidTopicIds)) +} + +// OnPublishMessageInspected is called at the end of the async inspection of publish messages of a single RPC, regardless of the result of the inspection. +// It tracks the total number of errors detected during the async inspection of the rpc together with their individual breakdown. +// Args: +// - errCount: the number of errors that occurred during the async inspection of publish messages. +// - invalidTopicIdsCount: the number of times that an invalid topic id was detected during the async inspection of publish messages. +// - invalidSubscriptionsCount: the number of times that an invalid subscription was detected during the async inspection of publish messages. +// - invalidSendersCount: the number of times that an invalid sender was detected during the async inspection of publish messages. +func (c *GossipSubRpcValidationInspectorMetrics) OnPublishMessageInspected(totalErrCount int, invalidTopicIdsCount int, invalidSubscriptionsCount int, invalidSendersCount int) { + c.publishMessageInspectedErrHistogram.Observe(float64(totalErrCount)) + c.publishMessageInvalidSenderCountHistogram.Observe(float64(invalidSendersCount)) + c.publishMessageInvalidSubscriptionsHistogram.Observe(float64(invalidSubscriptionsCount)) + c.publishMessageInvalidTopicIdHistogram.Observe(float64(invalidTopicIdsCount)) +} + +// OnPublishMessagesInspectionErrorExceedsThreshold tracks the number of times that async inspection of publish messages failed due to the number of errors exceeding the threshold. +// Note that it causes a misbehaviour report. +func (c *GossipSubRpcValidationInspectorMetrics) OnPublishMessagesInspectionErrorExceedsThreshold() { + c.publishMessageInspectionErrExceedThresholdCount.Inc() +} diff --git a/module/metrics/gossipsub_score.go b/module/metrics/gossipsub_score.go index 2f574cf332b..15e3628469c 100644 --- a/module/metrics/gossipsub_score.go +++ b/module/metrics/gossipsub_score.go @@ -10,12 +10,6 @@ import ( "github.com/onflow/flow-go/network/channels" ) -var ( - // gossipSubScoreBuckets is a list of buckets for gossipsub score metrics. - // There is almost no limit to the score, so we use a large range of buckets to capture the full range. - gossipSubScoreBuckets = []float64{-10e6, -10e5, -10e4, -10e3, -10e2, -10e1, -10e0, 0, 10e0, 10e1, 10e2, 10e3, 10e4, 10e5, 10e6} -) - type GossipSubScoreMetrics struct { peerScore prometheus.Histogram appSpecificScore prometheus.Histogram @@ -42,7 +36,7 @@ func NewGossipSubScoreMetrics(prefix string) *GossipSubScoreMetrics { Subsystem: subsystemGossip, Name: prefix + "gossipsub_overall_peer_score", Help: "overall peer score from gossipsub peer scoring", - Buckets: gossipSubScoreBuckets, + Buckets: []float64{-100, 0, 100}, }, ) @@ -52,7 +46,7 @@ func NewGossipSubScoreMetrics(prefix string) *GossipSubScoreMetrics { Subsystem: subsystemGossip, Name: prefix + "gossipsub_app_specific_score", Help: "app specific score from gossipsub peer scoring", - Buckets: gossipSubScoreBuckets, + Buckets: []float64{-100, 0, 100}, }, ) @@ -62,7 +56,7 @@ func NewGossipSubScoreMetrics(prefix string) *GossipSubScoreMetrics { Subsystem: subsystemGossip, Name: prefix + "gossipsub_behaviour_penalty_score", Help: "behaviour penalty from gossipsub peer scoring", - Buckets: gossipSubScoreBuckets, + Buckets: []float64{10, 100, 1000}, }, ) @@ -72,7 +66,7 @@ func NewGossipSubScoreMetrics(prefix string) *GossipSubScoreMetrics { Subsystem: subsystemGossip, Name: prefix + "gossipsub_ip_collocation_factor_score", Help: "ip collocation factor from gossipsub peer scoring", - Buckets: gossipSubScoreBuckets, + Buckets: []float64{10, 100, 1000}, }, ) @@ -80,9 +74,9 @@ func NewGossipSubScoreMetrics(prefix string) *GossipSubScoreMetrics { prometheus.HistogramOpts{ Namespace: namespaceNetwork, Subsystem: subsystemGossip, - Name: prefix + "gossipsub_time_in_mesh_score", - Help: "time in mesh from gossipsub scoring", - Buckets: gossipSubScoreBuckets, + Name: prefix + "gossipsub_time_in_mesh_quantum_count", + Help: "time in mesh from gossipsub scoring; number of the time quantum a peer has been in the mesh", + Buckets: []float64{1, 24, 168}, // 1h, 1d, 1w with quantum of 1h }, []string{LabelChannel}, ) @@ -91,8 +85,9 @@ func NewGossipSubScoreMetrics(prefix string) *GossipSubScoreMetrics { prometheus.HistogramOpts{ Namespace: namespaceNetwork, Subsystem: subsystemGossip, - Name: prefix + "gossipsub_mesh_message_delivery_score", - Help: "mesh message delivery from gossipsub peer scoring", + Name: prefix + "gossipsub_mesh_message_delivery", + Buckets: []float64{100, 1000, 10_000}, + Help: "mesh message delivery from gossipsub peer scoring; number of messages delivered to the mesh of local peer; decayed over time; and capped at certain value", }, []string{LabelChannel}, ) @@ -101,8 +96,9 @@ func NewGossipSubScoreMetrics(prefix string) *GossipSubScoreMetrics { prometheus.HistogramOpts{ Namespace: namespaceNetwork, Subsystem: subsystemGossip, - Name: prefix + "gossipsub_invalid_message_delivery_score", - Help: "invalid message delivery from gossipsub peer scoring", + Buckets: []float64{10, 100, 1000}, + Name: prefix + "gossipsub_invalid_message_delivery_count", + Help: "invalid message delivery from gossipsub peer scoring; number of invalid messages delivered to the mesh of local peer; decayed over time; and capped at certain value", }, []string{LabelChannel}, ) @@ -111,8 +107,9 @@ func NewGossipSubScoreMetrics(prefix string) *GossipSubScoreMetrics { prometheus.HistogramOpts{ Namespace: namespaceNetwork, Subsystem: subsystemGossip, - Name: prefix + "gossipsub_first_message_delivery_score", - Help: "first message delivery from gossipsub peer scoring", + Buckets: []float64{100, 1000, 10_000}, + Name: prefix + "gossipsub_first_message_delivery_count", + Help: "first message delivery from gossipsub peer scoring; number of fresh messages delivered to the mesh of local peer; decayed over time; and capped at certain value", }, []string{LabelChannel}, ) diff --git a/module/metrics/gossipsub_scoring_registry.go b/module/metrics/gossipsub_scoring_registry.go new file mode 100644 index 00000000000..111b2279226 --- /dev/null +++ b/module/metrics/gossipsub_scoring_registry.go @@ -0,0 +1,53 @@ +package metrics + +import ( + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" + + "github.com/onflow/flow-go/module" +) + +// GossipSubScoringRegistryMetrics encapsulates the metrics collectors for collecting metrics related to the Gossipsub scoring registry, offering insights into penalties and +// other factors used by the scoring registry to compute the application-specific score. It focuses on tracking internal +// aspects of the application-specific score, distinguishing itself from GossipSubScoringMetrics. +type GossipSubScoringRegistryMetrics struct { + prefix string + duplicateMessagePenalties prometheus.Histogram + duplicateMessageCounts prometheus.Histogram +} + +var _ module.GossipSubScoringRegistryMetrics = (*GossipSubScoringRegistryMetrics)(nil) + +// NewGossipSubScoringRegistryMetrics returns a new *GossipSubScoringRegistryMetrics. +func NewGossipSubScoringRegistryMetrics(prefix string) *GossipSubScoringRegistryMetrics { + gc := &GossipSubScoringRegistryMetrics{prefix: prefix} + gc.duplicateMessagePenalties = promauto.NewHistogram( + prometheus.HistogramOpts{ + Namespace: namespaceNetwork, + Subsystem: subsystemGossip, + Name: gc.prefix + "gossipsub_scoring_registry_duplicate_message_penalties", + Help: "duplicate message penalty applied to the overall application specific score of a node", + Buckets: []float64{-1, -0.01, -0.001}, + }, + ) + gc.duplicateMessageCounts = promauto.NewHistogram( + prometheus.HistogramOpts{ + Namespace: namespaceNetwork, + Subsystem: subsystemGossip, + Name: gc.prefix + "gossipsub_scoring_registry_duplicate_message_counts", + Help: "duplicate message count of a node at the time it is used to compute the duplicate message penalty", + Buckets: []float64{25, 50, 100, 1000}, + }, + ) + return gc +} + +// DuplicateMessagePenalties tracks the duplicate message penalty for a node. +func (g GossipSubScoringRegistryMetrics) DuplicateMessagePenalties(penalty float64) { + g.duplicateMessagePenalties.Observe(penalty) +} + +// DuplicateMessagesCounts tracks the duplicate message count for a node. +func (g GossipSubScoringRegistryMetrics) DuplicateMessagesCounts(count float64) { + g.duplicateMessageCounts.Observe(count) +} diff --git a/module/metrics/herocache.go b/module/metrics/herocache.go index 567295fcaa2..f6c810deeea 100644 --- a/module/metrics/herocache.go +++ b/module/metrics/herocache.go @@ -6,6 +6,7 @@ import ( "github.com/prometheus/client_golang/prometheus" "github.com/onflow/flow-go/module" + "github.com/onflow/flow-go/network" ) const subsystemHeroCache = "hero_cache" @@ -63,9 +64,57 @@ func NewNoopHeroCacheMetricsFactory() HeroCacheMetricsFactory { } } -func NetworkReceiveCacheMetricsFactory(f HeroCacheMetricsFactory, publicNetwork bool) module.HeroCacheMetrics { +func NetworkReceiveCacheMetricsFactory(f HeroCacheMetricsFactory, networkType network.NetworkingType) module.HeroCacheMetrics { r := ResourceNetworkingReceiveCache - if publicNetwork { + if networkType == network.PublicNetwork { + r = PrependPublicPrefix(r) + } + return f(namespaceNetwork, r) +} + +func NewSubscriptionRecordCacheMetricsFactory(f HeroCacheMetricsFactory, networkType network.NetworkingType) module.HeroCacheMetrics { + r := ResourceNetworkingSubscriptionRecordsCache + if networkType == network.PublicNetwork { + r = PrependPublicPrefix(r) + } + return f(namespaceNetwork, r) +} + +// NewGossipSubApplicationSpecificScoreCacheMetrics is the factory method for creating a new HeroCacheCollector for the +// application specific score cache of the GossipSub peer scoring module. The application specific score cache is used +// to keep track of the application specific score of peers in GossipSub. +// Args: +// - f: the HeroCacheMetricsFactory to create the collector +// Returns: +// - a HeroCacheMetrics for the application specific score cache +func NewGossipSubApplicationSpecificScoreCacheMetrics(f HeroCacheMetricsFactory, networkingType network.NetworkingType) module.HeroCacheMetrics { + r := ResourceNetworkingGossipSubApplicationSpecificScoreCache + if networkingType == network.PublicNetwork { + r = PrependPublicPrefix(r) + } + return f(namespaceNetwork, r) +} + +// DisallowListCacheMetricsFactory is the factory method for creating a new HeroCacheCollector for the disallow list cache. +// The disallow-list cache is used to keep track of peers that are disallow-listed and the reasons for it. +// Args: +// - f: the HeroCacheMetricsFactory to create the collector +// - networkingType: the networking type of the cache, i.e., whether it is used for the public or the private network +// Returns: +// - a HeroCacheMetrics for the disallow list cache +func DisallowListCacheMetricsFactory(f HeroCacheMetricsFactory, networkingType network.NetworkingType) module.HeroCacheMetrics { + r := ResourceNetworkingDisallowListCache + if networkingType == network.PublicNetwork { + r = PrependPublicPrefix(r) + } + return f(namespaceNetwork, r) +} + +// GossipSubSpamRecordCacheMetricsFactory is the factory method for creating a new HeroCacheCollector for the spam record cache. +// The spam record cache is used to keep track of peers that are spamming the network and the reasons for it. +func GossipSubSpamRecordCacheMetricsFactory(f HeroCacheMetricsFactory, networkingType network.NetworkingType) module.HeroCacheMetrics { + r := ResourceNetworkingGossipSubSpamRecordCache + if networkingType == network.PublicNetwork { r = PrependPublicPrefix(r) } return f(namespaceNetwork, r) @@ -95,27 +144,94 @@ func DisallowListNotificationQueueMetricFactory(registrar prometheus.Registerer) return NewHeroCacheCollector(namespaceNetwork, ResourceNetworkingDisallowListNotificationQueue, registrar) } -func GossipSubRPCMetricsObserverInspectorQueueMetricFactory(f HeroCacheMetricsFactory, publicNetwork bool) module.HeroCacheMetrics { - // we don't use the public prefix for the metrics here for sake of backward compatibility of metric name. - r := ResourceNetworkingRpcMetricsObserverInspectorQueue - if publicNetwork { - r = ResourceNetworkingPublicRpcMetricsObserverInspectorQueue +func ApplicationLayerSpamRecordCacheMetricFactory(f HeroCacheMetricsFactory, networkType network.NetworkingType) module.HeroCacheMetrics { + r := ResourceNetworkingApplicationLayerSpamRecordCache + if networkType == network.PublicNetwork { + r = PrependPublicPrefix(r) + } + + return f(namespaceNetwork, r) +} + +func DialConfigCacheMetricFactory(f HeroCacheMetricsFactory, networkType network.NetworkingType) module.HeroCacheMetrics { + r := ResourceNetworkingUnicastDialConfigCache + if networkType == network.PublicNetwork { + r = PrependPublicPrefix(r) + } + return f(namespaceNetwork, r) +} + +func ApplicationLayerSpamRecordQueueMetricsFactory(f HeroCacheMetricsFactory, networkType network.NetworkingType) module.HeroCacheMetrics { + r := ResourceNetworkingApplicationLayerSpamReportQueue + if networkType == network.PublicNetwork { + r = PrependPublicPrefix(r) } return f(namespaceNetwork, r) } -func GossipSubRPCInspectorQueueMetricFactory(f HeroCacheMetricsFactory, publicNetwork bool) module.HeroCacheMetrics { +func GossipSubRPCInspectorQueueMetricFactory(f HeroCacheMetricsFactory, networkType network.NetworkingType) module.HeroCacheMetrics { // we don't use the public prefix for the metrics here for sake of backward compatibility of metric name. r := ResourceNetworkingRpcValidationInspectorQueue - if publicNetwork { - r = ResourceNetworkingPublicRpcValidationInspectorQueue + if networkType == network.PublicNetwork { + r = PrependPublicPrefix(r) + } + return f(namespaceNetwork, r) +} + +func GossipSubDuplicateMessageTrackerCacheMetricFactory(f HeroCacheMetricsFactory, networkType network.NetworkingType) module.HeroCacheMetrics { + r := ResourceNetworkingGossipsubDuplicateMessagesTrackerCache + if networkType == network.PublicNetwork { + r = PrependPublicPrefix(r) + } + return f(namespaceNetwork, r) +} + +func GossipSubRPCSentTrackerMetricFactory(f HeroCacheMetricsFactory, networkType network.NetworkingType) module.HeroCacheMetrics { + // we don't use the public prefix for the metrics here for sake of backward compatibility of metric name. + r := ResourceNetworkingRPCSentTrackerCache + if networkType == network.PublicNetwork { + r = PrependPublicPrefix(r) + } + return f(namespaceNetwork, r) +} + +func GossipSubRPCSentTrackerQueueMetricFactory(f HeroCacheMetricsFactory, networkType network.NetworkingType) module.HeroCacheMetrics { + // we don't use the public prefix for the metrics here for sake of backward compatibility of metric name. + r := ResourceNetworkingRPCSentTrackerQueue + if networkType == network.PublicNetwork { + r = PrependPublicPrefix(r) } return f(namespaceNetwork, r) } -func RpcInspectorNotificationQueueMetricFactory(f HeroCacheMetricsFactory, publicNetwork bool) module.HeroCacheMetrics { +func RpcInspectorNotificationQueueMetricFactory(f HeroCacheMetricsFactory, networkType network.NetworkingType) module.HeroCacheMetrics { r := ResourceNetworkingRpcInspectorNotificationQueue - if publicNetwork { + if networkType == network.PublicNetwork { + r = PrependPublicPrefix(r) + } + return f(namespaceNetwork, r) +} + +func GossipSubRPCInspectorClusterPrefixedCacheMetricFactory(f HeroCacheMetricsFactory, networkType network.NetworkingType) module.HeroCacheMetrics { + // we don't use the public prefix for the metrics here for sake of backward compatibility of metric name. + r := ResourceNetworkingRpcClusterPrefixReceivedCache + if networkType == network.PublicNetwork { + r = PrependPublicPrefix(r) + } + return f(namespaceNetwork, r) +} + +// GossipSubAppSpecificScoreUpdateQueueMetricFactory is the factory method for creating a new HeroCacheCollector for the +// app-specific score update queue of the GossipSub peer scoring module. The app-specific score update queue is used to +// queue the update requests for the app-specific score of peers. The update requests are queued in a worker pool and +// processed asynchronously. +// Args: +// - f: the HeroCacheMetricsFactory to create the collector +// Returns: +// - a HeroCacheMetrics for the app-specific score update queue. +func GossipSubAppSpecificScoreUpdateQueueMetricFactory(f HeroCacheMetricsFactory, networkingType network.NetworkingType) module.HeroCacheMetrics { + r := ResourceNetworkingAppSpecificScoreUpdateQueue + if networkingType == network.PublicNetwork { r = PrependPublicPrefix(r) } return f(namespaceNetwork, r) diff --git a/module/metrics/hotstuff.go b/module/metrics/hotstuff.go index 258c15ddec0..df843cdeaa8 100644 --- a/module/metrics/hotstuff.go +++ b/module/metrics/hotstuff.go @@ -43,6 +43,8 @@ type HotstuffCollector struct { signerComputationsDuration prometheus.Histogram validatorComputationsDuration prometheus.Histogram payloadProductionDuration prometheus.Histogram + timeoutCollectorsRange *prometheus.GaugeVec + numberOfActiveCollectors prometheus.Gauge } var _ module.HotstuffMetrics = (*HotstuffCollector)(nil) @@ -185,6 +187,20 @@ func NewHotstuffCollector(chain flow.ChainID) *HotstuffCollector { Buckets: []float64{0.02, 0.05, 0.1, 0.2, 0.5, 1, 2}, ConstLabels: prometheus.Labels{LabelChain: chain.String()}, }), + timeoutCollectorsRange: promauto.NewGaugeVec(prometheus.GaugeOpts{ + Name: "timeout_collectors_range", + Namespace: namespaceConsensus, + Subsystem: subsystemHotstuff, + Help: "lowest and highest views that we are maintaining TimeoutCollectors for", + ConstLabels: prometheus.Labels{LabelChain: chain.String()}, + }, []string{"prefix"}), + numberOfActiveCollectors: promauto.NewGauge(prometheus.GaugeOpts{ + Name: "active_collectors", + Namespace: namespaceConsensus, + Subsystem: subsystemHotstuff, + Help: "number of active TimeoutCollectors that the TimeoutAggregator component currently maintains", + ConstLabels: prometheus.Labels{LabelChain: chain.String()}, + }), } return hc @@ -277,3 +293,12 @@ func (hc *HotstuffCollector) ValidatorProcessingDuration(duration time.Duration) func (hc *HotstuffCollector) PayloadProductionDuration(duration time.Duration) { hc.payloadProductionDuration.Observe(duration.Seconds()) // unit: seconds; with float64 precision } + +// TimeoutCollectorsRange collects information from the node's `TimeoutAggregator` component. +// Specifically, it measurers the number of views for which we are currently collecting timeouts +// (i.e. the number of `TimeoutCollector` instances we are maintaining) and their lowest/highest view. +func (hc *HotstuffCollector) TimeoutCollectorsRange(lowestRetainedView uint64, newestViewCreatedCollector uint64, activeCollectors int) { + hc.timeoutCollectorsRange.WithLabelValues("lowest_view_of_active_timeout_collectors").Set(float64(lowestRetainedView)) + hc.timeoutCollectorsRange.WithLabelValues("newest_view_of_active_timeout_collectors").Set(float64(newestViewCreatedCollector)) + hc.numberOfActiveCollectors.Set(float64(activeCollectors)) +} diff --git a/module/metrics/labels.go b/module/metrics/labels.go index c31bd23ae2c..97dd011d1cd 100644 --- a/module/metrics/labels.go +++ b/module/metrics/labels.go @@ -19,6 +19,12 @@ const ( LabelConnectionUseFD = "usefd" // whether the connection is using a file descriptor LabelSuccess = "success" LabelMisbehavior = "misbehavior" + LabelHandler = "handler" + LabelStatusCode = "code" + LabelMethod = "method" + LabelService = "service" + LabelRejectionReason = "rejection_reason" + LabelAccountAddress = "acct_address" // Account address for a machine account ) const ( @@ -44,19 +50,27 @@ const ( const ( ResourceUndefined = "undefined" ResourceProposal = "proposal" + ResourceProposalSignature = "proposal_signature" ResourceHeader = "header" ResourceFinalizedHeight = "finalized_height" + ResourceCertifiedView = "certified_view" ResourceIndex = "index" ResourceIdentity = "identity" ResourceGuarantee = "guarantee" + ResourceGuaranteeByCollectionID = "guarantee_by_collection_id" ResourceResult = "result" ResourceResultApprovals = "result_approvals" + ResourceChunkLocators = "chunk_locators" ResourceReceipt = "receipt" ResourceQC = "qc" ResourceMyReceipt = "my_receipt" ResourceCollection = "collection" - ResourceApproval = "approval" - ResourceSeal = "seal" + ResourceProtocolState = "protocol_state" + ResourceProtocolStateByBlockID = "protocol_state_by_block_id" + ResourceProtocolKVStore = "protocol_kv_store" + ResourceProtocolKVStoreByBlockID = "protocol_kv_store_by_block_id" + ResourceSeal = "seal" // block seals (including emergency seals) + ResourceEmergencySeal = "emergency_seal" // any seal which does not include a verifier signature ResourcePendingIncorporatedSeal = "pending_incorporated_seal" ResourceCommit = "commit" ResourceTransaction = "transaction" @@ -81,36 +95,49 @@ const ( ResourceEpochCommit = "epoch_commit" ResourceEpochStatus = "epoch_status" ResourceNetworkingReceiveCache = "networking_received_message" // networking layer + ResourceNetworkingSubscriptionRecordsCache = "subscription_records_cache" // networking layer ResourceNetworkingDnsIpCache = "networking_dns_ip_cache" // networking layer ResourceNetworkingDnsTxtCache = "networking_dns_txt_cache" // networking layer ResourceNetworkingDisallowListNotificationQueue = "networking_disallow_list_notification_queue" ResourceNetworkingRpcInspectorNotificationQueue = "networking_rpc_inspector_notification_queue" ResourceNetworkingRpcValidationInspectorQueue = "networking_rpc_validation_inspector_queue" - ResourceNetworkingRpcMetricsObserverInspectorQueue = "networking_rpc_metrics_observer_inspector_queue" - ResourceNetworkingPublicRpcValidationInspectorQueue = "networking_public_rpc_validation_inspector_queue" - ResourceNetworkingPublicRpcMetricsObserverInspectorQueue = "networking_public_rpc_metrics_observer_inspector_queue" + ResourceNetworkingApplicationLayerSpamRecordCache = "application_layer_spam_record_cache" + ResourceNetworkingApplicationLayerSpamReportQueue = "application_layer_spam_report_queue" + ResourceNetworkingRpcClusterPrefixReceivedCache = "rpc_cluster_prefixed_received_cache" + ResourceNetworkingAppSpecificScoreUpdateQueue = "gossipsub_app_specific_score_update_queue" + ResourceNetworkingGossipSubApplicationSpecificScoreCache = "gossipsub_application_specific_score_cache" + ResourceNetworkingGossipSubSpamRecordCache = "gossipsub_spam_record_cache" + ResourceNetworkingDisallowListCache = "disallow_list_cache" + ResourceNetworkingRPCSentTrackerCache = "gossipsub_rpc_sent_tracker_cache" + ResourceNetworkingRPCSentTrackerQueue = "gossipsub_rpc_sent_tracker_queue" + ResourceNetworkingUnicastDialConfigCache = "unicast_dial_config_cache" + ResourceNetworkingGossipsubDuplicateMessagesTrackerCache = "gossipsub_duplicate_messages_tracker_cache" - ResourceFollowerPendingBlocksCache = "follower_pending_block_cache" // follower engine - ResourceClusterBlockProposalQueue = "cluster_compliance_proposal_queue" // collection node, compliance engine - ResourceTransactionIngestQueue = "ingest_transaction_queue" // collection node, ingest engine - ResourceBeaconKey = "beacon-key" // consensus node, DKG engine - ResourceDKGMessage = "dkg_private_message" // consensus, DKG messaging engine - ResourceApprovalQueue = "sealing_approval_queue" // consensus node, sealing engine - ResourceReceiptQueue = "sealing_receipt_queue" // consensus node, sealing engine - ResourceApprovalResponseQueue = "sealing_approval_response_queue" // consensus node, sealing engine - ResourceBlockResponseQueue = "compliance_block_response_queue" // consensus node, compliance engine - ResourceBlockProposalQueue = "compliance_proposal_queue" // consensus node, compliance engine - ResourceBlockVoteQueue = "vote_aggregator_queue" // consensus/collection node, vote aggregator - ResourceTimeoutObjectQueue = "timeout_aggregator_queue" // consensus/collection node, timeout aggregator - ResourceCollectionGuaranteesQueue = "ingestion_col_guarantee_queue" // consensus node, ingestion engine - ResourceChunkDataPack = "chunk_data_pack" // execution node - ResourceChunkDataPackRequests = "chunk_data_pack_request" // execution node - ResourceEvents = "events" // execution node - ResourceServiceEvents = "service_events" // execution node - ResourceTransactionResults = "transaction_results" // execution node - ResourceTransactionResultIndices = "transaction_result_indices" // execution node - ResourceTransactionResultByBlock = "transaction_result_by_block" // execution node - ResourceExecutionDataCache = "execution_data_cache" // access node + ResourceFollowerPendingBlocksCache = "follower_pending_block_cache" // follower engine + ResourceFollowerLoopCertifiedBlocksChannel = "follower_loop_certified_blocks_channel" // follower loop, certified blocks buffered channel + ResourceClusterBlockProposalQueue = "cluster_compliance_proposal_queue" // collection node, compliance engine + ResourceTransactionIngestQueue = "ingest_transaction_queue" // collection node, ingest engine + ResourceSubmitCollectionGuaranteesQueue = "pusher_col_guarantee_queue" // collection node, pusher engine + ResourceBeaconKey = "beacon-key" // consensus node, DKG engine + ResourceDKGMessage = "dkg_private_message" // consensus, DKG messaging engine + ResourceApprovalQueue = "sealing_approval_queue" // consensus node, sealing engine + ResourceReceiptQueue = "sealing_receipt_queue" // consensus node, sealing engine + ResourceApprovalResponseQueue = "sealing_approval_response_queue" // consensus node, sealing engine + ResourceBlockResponseQueue = "compliance_block_response_queue" // consensus node, compliance engine + ResourceBlockProposalQueue = "compliance_proposal_queue" // consensus node, compliance engine + ResourceBlockVoteQueue = "vote_aggregator_queue" // consensus/collection node, vote aggregator + ResourceTimeoutObjectQueue = "timeout_aggregator_queue" // consensus/collection node, timeout aggregator + ResourceCollectionGuaranteesQueue = "ingestion_col_guarantee_queue" // consensus node, ingestion engine + ResourceChunkDataPack = "chunk_data_pack" // execution node + ResourceChunkDataPackRequests = "chunk_data_pack_request" // execution node + ResourceEvents = "events" // execution node + ResourceServiceEvents = "service_events" // execution node + ResourceTransactionResults = "transaction_results" // execution node + ResourceTransactionResultIndices = "transaction_result_indices" // execution node + ResourceTransactionResultErrorMessages = "transaction_result_error_messages" // execution node + ResourceTransactionResultErrorMessagesIndices = "transaction_result_error_messages_indices" // execution node + ResourceTransactionResultByBlock = "transaction_result_by_block" // execution node + ResourceExecutionDataCache = "execution_data_cache" // access node ) const ( @@ -135,6 +162,20 @@ const ( MessageEntityResponse = "entity_response" ) +// transaction validation labels +const ( + InvalidTransactionRateLimit = "payer_exceeded_rate_limit" + InvalidTransactionByteSize = "transaction_exceeded_size_limit" + IncompleteTransaction = "missing_fields" + InvalidGasLimit = "invalid_gas_limit" + ExpiredTransaction = "transaction_expired" + InvalidScript = "invalid_script" + InvalidAddresses = "invalid_address" + InvalidSignature = "invalid_signature" + DuplicatedSignature = "duplicate_signature" + InsufficientBalance = "payer_insufficient_balance" +) + const ExecutionDataRequestRetryable = "retryable" const LabelViolationReason = "reason" diff --git a/module/metrics/libp2p_resource_manager.go b/module/metrics/libp2p_resource_manager.go index 83c0c7da206..fd8ec8aa5ff 100644 --- a/module/metrics/libp2p_resource_manager.go +++ b/module/metrics/libp2p_resource_manager.go @@ -11,6 +11,7 @@ import ( "github.com/prometheus/client_golang/prometheus/promauto" "github.com/rs/zerolog" + p2plogging "github.com/onflow/flow-go/network/p2p/logging" "github.com/onflow/flow-go/utils/logging" ) @@ -171,22 +172,22 @@ func (l *LibP2PResourceManagerMetrics) BlockConn(dir network.Direction, usefd bo func (l *LibP2PResourceManagerMetrics) AllowStream(p peer.ID, dir network.Direction) { l.allowStreamCount.WithLabelValues(dir.String()).Inc() - l.logger.Trace().Str("peer", p.String()).Str("direction", dir.String()).Msg("allowing stream") + l.logger.Trace().Str("peer", p2plogging.PeerId(p)).Str("direction", dir.String()).Msg("allowing stream") } func (l *LibP2PResourceManagerMetrics) BlockStream(p peer.ID, dir network.Direction) { l.blockStreamCount.WithLabelValues(dir.String()).Inc() - l.logger.Debug().Bool(logging.KeySuspicious, true).Str("peer", p.String()).Str("direction", dir.String()).Msg("blocking stream") + l.logger.Debug().Bool(logging.KeySuspicious, true).Str("peer", p2plogging.PeerId(p)).Str("direction", dir.String()).Msg("blocking stream") } func (l *LibP2PResourceManagerMetrics) AllowPeer(p peer.ID) { l.allowPeerCount.Inc() - l.logger.Trace().Str("peer", p.String()).Msg("allowing peer") + l.logger.Trace().Str("peer", p2plogging.PeerId(p)).Msg("allowing peer") } func (l *LibP2PResourceManagerMetrics) BlockPeer(p peer.ID) { l.blockPeerCount.Inc() - l.logger.Debug().Bool(logging.KeySuspicious, true).Str("peer", p.String()).Msg("blocking peer") + l.logger.Debug().Bool(logging.KeySuspicious, true).Str("peer", p2plogging.PeerId(p)).Msg("blocking peer") } func (l *LibP2PResourceManagerMetrics) AllowProtocol(proto protocol.ID) { @@ -201,7 +202,7 @@ func (l *LibP2PResourceManagerMetrics) BlockProtocol(proto protocol.ID) { func (l *LibP2PResourceManagerMetrics) BlockProtocolPeer(proto protocol.ID, p peer.ID) { l.blockProtocolPeerCount.Inc() - l.logger.Debug().Bool(logging.KeySuspicious, true).Str("protocol", string(proto)).Str("peer", p.String()).Msg("blocking protocol for peer") + l.logger.Debug().Bool(logging.KeySuspicious, true).Str("protocol", string(proto)).Str("peer", p2plogging.PeerId(p)).Msg("blocking protocol for peer") } func (l *LibP2PResourceManagerMetrics) AllowService(svc string) { @@ -216,7 +217,7 @@ func (l *LibP2PResourceManagerMetrics) BlockService(svc string) { func (l *LibP2PResourceManagerMetrics) BlockServicePeer(svc string, p peer.ID) { l.blockServicePeerCount.Inc() - l.logger.Debug().Bool(logging.KeySuspicious, true).Str("service", svc).Str("peer", p.String()).Msg("blocking service for peer") + l.logger.Debug().Bool(logging.KeySuspicious, true).Str("service", svc).Str("peer", p2plogging.PeerId(p)).Msg("blocking service for peer") } func (l *LibP2PResourceManagerMetrics) AllowMemory(size int) { diff --git a/module/metrics/machine_account.go b/module/metrics/machine_account.go new file mode 100644 index 00000000000..ef72a3471d8 --- /dev/null +++ b/module/metrics/machine_account.go @@ -0,0 +1,59 @@ +package metrics + +import ( + "github.com/onflow/flow-go/model/flow" + + "github.com/prometheus/client_golang/prometheus" +) + +// MachineAccountCollector implements metric collection for machine accounts. +type MachineAccountCollector struct { + accountBalance prometheus.Gauge + recommendedMinBalance prometheus.Gauge + misconfigured prometheus.Gauge +} + +func NewMachineAccountCollector(registerer prometheus.Registerer, machineAccountAddress flow.Address) *MachineAccountCollector { + accountBalance := prometheus.NewGauge(prometheus.GaugeOpts{ + Namespace: namespaceMachineAcct, + Name: "balance", + Help: "the last observed balance of this node's machine account, in units of FLOW", + ConstLabels: map[string]string{LabelAccountAddress: machineAccountAddress.String()}, + }) + recommendedMinBalance := prometheus.NewGauge(prometheus.GaugeOpts{ + Namespace: namespaceMachineAcct, + Name: "recommended_min_balance", + Help: "the recommended minimum balance for this node role; refill the account when the balance reaches this threshold", + ConstLabels: map[string]string{LabelAccountAddress: machineAccountAddress.String()}, + }) + misconfigured := prometheus.NewGauge(prometheus.GaugeOpts{ + Namespace: namespaceMachineAcct, + Name: "is_misconfigured", + Help: "reported as a non-zero value when a misconfiguration is detected; check logs for further details", + ConstLabels: map[string]string{LabelAccountAddress: machineAccountAddress.String()}, + }) + registerer.MustRegister(accountBalance, recommendedMinBalance, misconfigured) + + collector := &MachineAccountCollector{ + accountBalance: accountBalance, + recommendedMinBalance: recommendedMinBalance, + misconfigured: misconfigured, + } + return collector +} + +func (m MachineAccountCollector) AccountBalance(bal float64) { + m.accountBalance.Set(bal) +} + +func (m MachineAccountCollector) RecommendedMinBalance(bal float64) { + m.recommendedMinBalance.Set(bal) +} + +func (m MachineAccountCollector) IsMisconfigured(misconfigured bool) { + if misconfigured { + m.misconfigured.Set(1) + } else { + m.misconfigured.Set(0) + } +} diff --git a/module/metrics/namespaces.go b/module/metrics/namespaces.go index da485589056..b72b524850f 100644 --- a/module/metrics/namespaces.go +++ b/module/metrics/namespaces.go @@ -15,6 +15,8 @@ const ( namespaceExecutionDataSync = "execution_data_sync" namespaceChainsync = "chainsync" namespaceFollowerEngine = "follower" + namespaceRestAPI = "access_rest_api" + namespaceMachineAcct = "machine_account" ) // Network subsystems represent the various layers of networking. @@ -28,6 +30,7 @@ const ( subsystemAuth = "authorization" subsystemRateLimiting = "ratelimit" subsystemAlsp = "alsp" + subsystemSecurity = "security" ) // Storage subsystems represent the various components of the storage layer. @@ -41,7 +44,9 @@ const ( const ( subsystemTransactionTiming = "transaction_timing" subsystemTransactionSubmission = "transaction_submission" + subsystemTransactionValidation = "transaction_validation" subsystemConnectionPool = "connection_pool" + subsystemHTTP = "http" ) // Observer subsystem @@ -58,6 +63,7 @@ const ( const ( subsystemCompliance = "compliance" subsystemHotstuff = "hotstuff" + subsystemCruiseCtl = "cruisectl" subsystemMatchEngine = "match" ) @@ -67,6 +73,7 @@ const ( subsystemMTrie = "mtrie" subsystemIngestion = "ingestion" subsystemRuntime = "runtime" + subsystemEVM = "evm" subsystemProvider = "provider" subsystemBlockDataUploader = "block_data_uploader" ) @@ -87,7 +94,9 @@ const ( subsystemExeDataProvider = "provider" subsystemExeDataPruner = "pruner" subsystemExecutionDataRequester = "execution_data_requester" + subsystemExecutionStateIndexer = "execution_state_indexer" subsystemExeDataBlobstore = "blobstore" + subsystemTxErrorFetcher = "tx_errors" ) // module/synchronization core diff --git a/module/metrics/network.go b/module/metrics/network.go index 5c3e5b7995c..a6eead52e48 100644 --- a/module/metrics/network.go +++ b/module/metrics/network.go @@ -10,6 +10,7 @@ import ( "github.com/rs/zerolog" "github.com/onflow/flow-go/module" + logging2 "github.com/onflow/flow-go/network/p2p/logging" "github.com/onflow/flow-go/utils/logging" ) @@ -23,9 +24,10 @@ const ( type NetworkCollector struct { *UnicastManagerMetrics *LibP2PResourceManagerMetrics - *GossipSubMetrics *GossipSubScoreMetrics - *GossipSubLocalMeshMetrics + *LocalGossipSubRouterMetrics + *GossipSubRpcValidationInspectorMetrics + *GossipSubScoringRegistryMetrics *AlspMetrics outboundMessageSize *prometheus.HistogramVec inboundMessageSize *prometheus.HistogramVec @@ -44,9 +46,10 @@ type NetworkCollector struct { dnsLookupRequestDroppedCount prometheus.Counter routingTableSize prometheus.Gauge - // authorization, rate limiting metrics + // security metrics unAuthorizedMessagesCount *prometheus.CounterVec rateLimitedUnicastMessagesCount *prometheus.CounterVec + violationReportSkippedCount prometheus.Counter prefix string } @@ -72,9 +75,10 @@ func NewNetworkCollector(logger zerolog.Logger, opts ...NetworkCollectorOpt) *Ne nc.UnicastManagerMetrics = NewUnicastManagerMetrics(nc.prefix) nc.LibP2PResourceManagerMetrics = NewLibP2PResourceManagerMetrics(logger, nc.prefix) - nc.GossipSubLocalMeshMetrics = NewGossipSubLocalMeshMetrics(nc.prefix) - nc.GossipSubMetrics = NewGossipSubMetrics(nc.prefix) + nc.LocalGossipSubRouterMetrics = NewGossipSubLocalMeshMetrics(nc.prefix) nc.GossipSubScoreMetrics = NewGossipSubScoreMetrics(nc.prefix) + nc.GossipSubRpcValidationInspectorMetrics = NewGossipSubRPCValidationInspectorMetrics(nc.prefix) + nc.GossipSubScoringRegistryMetrics = NewGossipSubScoringRegistryMetrics(nc.prefix) nc.AlspMetrics = NewAlspMetrics() nc.outboundMessageSize = promauto.NewHistogramVec( @@ -83,7 +87,7 @@ func NewNetworkCollector(logger zerolog.Logger, opts ...NetworkCollectorOpt) *Ne Subsystem: subsystemGossip, Name: nc.prefix + "outbound_message_size_bytes", Help: "size of the outbound network message", - Buckets: []float64{KiB, 100 * KiB, 500 * KiB, 1 * MiB, 2 * MiB, 4 * MiB}, + Buckets: []float64{KiB, 100 * KiB, 1 * MiB}, }, []string{LabelChannel, LabelProtocol, LabelMessage}, ) @@ -93,7 +97,7 @@ func NewNetworkCollector(logger zerolog.Logger, opts ...NetworkCollectorOpt) *Ne Subsystem: subsystemGossip, Name: nc.prefix + "inbound_message_size_bytes", Help: "size of the inbound network message", - Buckets: []float64{KiB, 100 * KiB, 500 * KiB, 1 * MiB, 2 * MiB, 4 * MiB}, + Buckets: []float64{KiB, 100 * KiB, 1 * MiB}, }, []string{LabelChannel, LabelProtocol, LabelMessage}, ) @@ -243,6 +247,15 @@ func NewNetworkCollector(logger zerolog.Logger, opts ...NetworkCollectorOpt) *Ne }, []string{LabelNodeRole, LabelMessage, LabelChannel, LabelRateLimitReason}, ) + nc.violationReportSkippedCount = promauto.NewCounter( + prometheus.CounterOpts{ + Namespace: namespaceNetwork, + Subsystem: subsystemSecurity, + Name: nc.prefix + "slashing_violation_reports_skipped_count", + Help: "number of slashing violations consumer violations that were not reported for misbehavior because the identity of the sender not known", + }, + ) + return nc } @@ -347,7 +360,7 @@ func (nc *NetworkCollector) OnUnauthorizedMessage(role, msgType, topic, offense // OnRateLimitedPeer tracks the number of rate limited messages seen on the network. func (nc *NetworkCollector) OnRateLimitedPeer(peerID peer.ID, role, msgType, topic, reason string) { nc.logger.Warn(). - Str("peer_id", peerID.String()). + Str("peer_id", logging2.PeerId(peerID)). Str("role", role). Str("message_type", msgType). Str("topic", topic). @@ -356,3 +369,9 @@ func (nc *NetworkCollector) OnRateLimitedPeer(peerID peer.ID, role, msgType, top Msg("unicast peer rate limited") nc.rateLimitedUnicastMessagesCount.WithLabelValues(role, msgType, topic, reason).Inc() } + +// OnViolationReportSkipped tracks the number of slashing violations consumer violations that were not +// reported for misbehavior when the identity of the sender not known. +func (nc *NetworkCollector) OnViolationReportSkipped() { + nc.violationReportSkippedCount.Inc() +} diff --git a/module/metrics/node_info.go b/module/metrics/node_info.go index aa24cdb919c..9c6a97875ec 100644 --- a/module/metrics/node_info.go +++ b/module/metrics/node_info.go @@ -14,10 +14,10 @@ type NodeInfoCollector struct { } const ( - sporkIDLabel = "spork_id" - versionLabel = "version" - commitLabel = "commit" - protocolVersionLabel = "protocol_version" + sporkIDLabel = "spork_id" + versionLabel = "version" + commitLabel = "commit" + protocolStateVersionLabel = "protocol_state_version" ) func NewNodeInfoCollector() *NodeInfoCollector { @@ -32,9 +32,9 @@ func NewNodeInfoCollector() *NodeInfoCollector { return collector } -func (sc *NodeInfoCollector) NodeInfo(version, commit, sporkID string, protocolVersion uint) { +func (sc *NodeInfoCollector) NodeInfo(version, commit, sporkID string, protocolStateVersion uint64) { sc.nodeInfo.WithLabelValues(versionLabel, version) sc.nodeInfo.WithLabelValues(commitLabel, commit) sc.nodeInfo.WithLabelValues(sporkIDLabel, sporkID) - sc.nodeInfo.WithLabelValues(protocolVersionLabel, strconv.FormatUint(uint64(protocolVersion), 10)) + sc.nodeInfo.WithLabelValues(protocolStateVersionLabel, strconv.FormatUint(protocolStateVersion, 10)) } diff --git a/module/metrics/node_info_test.go b/module/metrics/node_info_test.go index c7cf5d6d2ae..97cf16163a9 100644 --- a/module/metrics/node_info_test.go +++ b/module/metrics/node_info_test.go @@ -1,6 +1,7 @@ package metrics import ( + "math/rand" "strconv" "testing" @@ -19,8 +20,8 @@ func TestNodeInfoCollector_NodeInfo(t *testing.T) { version := "0.29" commit := "63cec231136914941e2358de2054a6ef71ea3c99" sporkID := unittest.IdentifierFixture().String() - protocolVersion := uint(10076) - collector.NodeInfo(version, commit, sporkID, protocolVersion) + protocolStateVersion := rand.Uint64() + collector.NodeInfo(version, commit, sporkID, protocolStateVersion) metricsFamilies, err := reg.Gather() require.NoError(t, err) @@ -35,8 +36,7 @@ func TestNodeInfoCollector_NodeInfo(t *testing.T) { assert.Failf(t, "metric not found", "except to find value %s", value) } - protocolVersionAsString := strconv.FormatUint(uint64(protocolVersion), 10) - for _, value := range []string{version, commit, sporkID, protocolVersionAsString} { + for _, value := range []string{version, commit, sporkID, strconv.FormatUint(protocolStateVersion, 10)} { assertReported(value) } } diff --git a/module/metrics/noop.go b/module/metrics/noop.go index 710166fed80..b65f50ed035 100644 --- a/module/metrics/noop.go +++ b/module/metrics/noop.go @@ -4,17 +4,19 @@ import ( "context" "time" + "google.golang.org/grpc/codes" + "github.com/libp2p/go-libp2p/core/network" "github.com/libp2p/go-libp2p/core/peer" "github.com/libp2p/go-libp2p/core/protocol" + httpmetrics "github.com/slok/go-http-metrics/metrics" "github.com/onflow/flow-go/model/chainsync" "github.com/onflow/flow-go/model/cluster" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/module" "github.com/onflow/flow-go/network/channels" - - httpmetrics "github.com/slok/go-http-metrics/metrics" + p2pmsg "github.com/onflow/flow-go/network/p2p/message" ) type NoopCollector struct{} @@ -32,10 +34,15 @@ func (nc *NoopCollector) UnicastMessageSendingCompleted(topic string) func (nc *NoopCollector) BlockProposed(*flow.Block) {} func (nc *NoopCollector) BlockProposalDuration(duration time.Duration) {} +// interface check +var _ module.BackendScriptsMetrics = (*NoopCollector)(nil) +var _ module.TransactionMetrics = (*NoopCollector)(nil) +var _ module.TransactionValidationMetrics = (*NoopCollector)(nil) var _ module.HotstuffMetrics = (*NoopCollector)(nil) var _ module.EngineMetrics = (*NoopCollector)(nil) var _ module.HeroCacheMetrics = (*NoopCollector)(nil) var _ module.NetworkMetrics = (*NoopCollector)(nil) +var _ module.CollectionMetrics = (*NoopCollector)(nil) func (nc *NoopCollector) Peers(prefix string, n int) {} func (nc *NoopCollector) Wantlist(prefix string, n int) {} @@ -85,133 +92,155 @@ func (nc *NoopCollector) FinalizedHeight(height uint64) func (nc *NoopCollector) SealedHeight(height uint64) {} func (nc *NoopCollector) BlockFinalized(*flow.Block) {} func (nc *NoopCollector) BlockSealed(*flow.Block) {} -func (nc *NoopCollector) CommittedEpochFinalView(view uint64) {} func (nc *NoopCollector) EpochTransitionHeight(height uint64) {} func (nc *NoopCollector) CurrentEpochCounter(counter uint64) {} func (nc *NoopCollector) CurrentEpochPhase(phase flow.EpochPhase) {} func (nc *NoopCollector) CurrentEpochFinalView(view uint64) {} -func (nc *NoopCollector) CurrentDKGPhase1FinalView(view uint64) {} -func (nc *NoopCollector) CurrentDKGPhase2FinalView(view uint64) {} -func (nc *NoopCollector) CurrentDKGPhase3FinalView(view uint64) {} -func (nc *NoopCollector) EpochEmergencyFallbackTriggered() {} -func (nc *NoopCollector) CacheEntries(resource string, entries uint) {} -func (nc *NoopCollector) CacheHit(resource string) {} -func (nc *NoopCollector) CacheNotFound(resource string) {} -func (nc *NoopCollector) CacheMiss(resource string) {} -func (nc *NoopCollector) MempoolEntries(resource string, entries uint) {} -func (nc *NoopCollector) Register(resource string, entriesFunc module.EntriesFunc) error { return nil } -func (nc *NoopCollector) HotStuffBusyDuration(duration time.Duration, event string) {} -func (nc *NoopCollector) HotStuffIdleDuration(duration time.Duration) {} -func (nc *NoopCollector) HotStuffWaitDuration(duration time.Duration, event string) {} -func (nc *NoopCollector) SetCurView(view uint64) {} -func (nc *NoopCollector) SetQCView(view uint64) {} -func (nc *NoopCollector) SetTCView(uint64) {} -func (nc *NoopCollector) CountSkipped() {} -func (nc *NoopCollector) CountTimeout() {} -func (nc *NoopCollector) BlockProcessingDuration(time.Duration) {} -func (nc *NoopCollector) VoteProcessingDuration(time.Duration) {} -func (nc *NoopCollector) TimeoutObjectProcessingDuration(time.Duration) {} -func (nc *NoopCollector) SetTimeout(duration time.Duration) {} -func (nc *NoopCollector) CommitteeProcessingDuration(duration time.Duration) {} -func (nc *NoopCollector) SignerProcessingDuration(duration time.Duration) {} -func (nc *NoopCollector) ValidatorProcessingDuration(duration time.Duration) {} -func (nc *NoopCollector) PayloadProductionDuration(duration time.Duration) {} -func (nc *NoopCollector) TransactionIngested(txID flow.Identifier) {} -func (nc *NoopCollector) ClusterBlockProposed(*cluster.Block) {} -func (nc *NoopCollector) ClusterBlockFinalized(*cluster.Block) {} -func (nc *NoopCollector) StartCollectionToFinalized(collectionID flow.Identifier) {} -func (nc *NoopCollector) FinishCollectionToFinalized(collectionID flow.Identifier) {} -func (nc *NoopCollector) StartBlockToSeal(blockID flow.Identifier) {} -func (nc *NoopCollector) FinishBlockToSeal(blockID flow.Identifier) {} -func (nc *NoopCollector) EmergencySeal() {} -func (nc *NoopCollector) OnReceiptProcessingDuration(duration time.Duration) {} -func (nc *NoopCollector) OnApprovalProcessingDuration(duration time.Duration) {} -func (nc *NoopCollector) CheckSealingDuration(duration time.Duration) {} -func (nc *NoopCollector) OnExecutionResultReceivedAtAssignerEngine() {} -func (nc *NoopCollector) OnVerifiableChunkReceivedAtVerifierEngine() {} -func (nc *NoopCollector) OnResultApprovalDispatchedInNetworkByVerifier() {} +func (nc *NoopCollector) CurrentDKGPhaseViews(phase1FinalView, phase2FinalView, phase3FinalView uint64) { +} +func (nc *NoopCollector) EpochFallbackModeTriggered() {} +func (nc *NoopCollector) EpochFallbackModeExited() {} +func (nc *NoopCollector) ProtocolStateVersion(version uint64) {} +func (nc *NoopCollector) CacheEntries(resource string, entries uint) {} +func (nc *NoopCollector) CacheHit(resource string) {} +func (nc *NoopCollector) CacheNotFound(resource string) {} +func (nc *NoopCollector) CacheMiss(resource string) {} +func (nc *NoopCollector) MempoolEntries(resource string, entries uint) {} +func (nc *NoopCollector) Register(resource string, entriesFunc module.EntriesFunc) error { return nil } +func (nc *NoopCollector) HotStuffBusyDuration(duration time.Duration, event string) {} +func (nc *NoopCollector) HotStuffIdleDuration(duration time.Duration) {} +func (nc *NoopCollector) HotStuffWaitDuration(duration time.Duration, event string) {} +func (nc *NoopCollector) SetCurView(view uint64) {} +func (nc *NoopCollector) SetQCView(view uint64) {} +func (nc *NoopCollector) SetTCView(uint64) {} +func (nc *NoopCollector) CountSkipped() {} +func (nc *NoopCollector) CountTimeout() {} +func (nc *NoopCollector) BlockProcessingDuration(time.Duration) {} +func (nc *NoopCollector) VoteProcessingDuration(time.Duration) {} +func (nc *NoopCollector) TimeoutObjectProcessingDuration(time.Duration) {} +func (nc *NoopCollector) SetTimeout(duration time.Duration) {} +func (nc *NoopCollector) CommitteeProcessingDuration(duration time.Duration) {} +func (nc *NoopCollector) SignerProcessingDuration(duration time.Duration) {} +func (nc *NoopCollector) ValidatorProcessingDuration(duration time.Duration) {} +func (nc *NoopCollector) PayloadProductionDuration(duration time.Duration) {} +func (nc *NoopCollector) TimeoutCollectorsRange(uint64, uint64, int) {} +func (nc *NoopCollector) TransactionIngested(txID flow.Identifier) {} +func (nc *NoopCollector) ClusterBlockProposed(*cluster.Block) {} +func (nc *NoopCollector) ClusterBlockFinalized(*cluster.Block) {} +func (nc *NoopCollector) CollectionMaxSize(uint) {} +func (nc *NoopCollector) StartCollectionToFinalized(collectionID flow.Identifier) {} +func (nc *NoopCollector) FinishCollectionToFinalized(collectionID flow.Identifier) {} +func (nc *NoopCollector) StartBlockToSeal(blockID flow.Identifier) {} +func (nc *NoopCollector) FinishBlockToSeal(blockID flow.Identifier) {} +func (nc *NoopCollector) EmergencySeal() {} +func (nc *NoopCollector) OnReceiptProcessingDuration(duration time.Duration) {} +func (nc *NoopCollector) OnApprovalProcessingDuration(duration time.Duration) {} +func (nc *NoopCollector) CheckSealingDuration(duration time.Duration) {} +func (nc *NoopCollector) OnExecutionResultReceivedAtAssignerEngine() {} +func (nc *NoopCollector) OnVerifiableChunkReceivedAtVerifierEngine() {} +func (nc *NoopCollector) OnResultApprovalDispatchedInNetworkByVerifier() {} func (nc *NoopCollector) SetMaxChunkDataPackAttemptsForNextUnsealedHeightAtRequester(attempts uint64) { } -func (nc *NoopCollector) OnFinalizedBlockArrivedAtAssigner(height uint64) {} -func (nc *NoopCollector) OnChunksAssignmentDoneAtAssigner(chunks int) {} -func (nc *NoopCollector) OnAssignedChunkProcessedAtAssigner() {} -func (nc *NoopCollector) OnAssignedChunkReceivedAtFetcher() {} -func (nc *NoopCollector) OnChunkDataPackRequestDispatchedInNetworkByRequester() {} -func (nc *NoopCollector) OnChunkDataPackRequestSentByFetcher() {} -func (nc *NoopCollector) OnChunkDataPackRequestReceivedByRequester() {} -func (nc *NoopCollector) OnChunkDataPackArrivedAtFetcher() {} -func (nc *NoopCollector) OnChunkDataPackSentToFetcher() {} -func (nc *NoopCollector) OnVerifiableChunkSentToVerifier() {} -func (nc *NoopCollector) OnBlockConsumerJobDone(uint64) {} -func (nc *NoopCollector) OnChunkConsumerJobDone(uint64) {} -func (nc *NoopCollector) OnChunkDataPackResponseReceivedFromNetworkByRequester() {} -func (nc *NoopCollector) TotalConnectionsInPool(connectionCount uint, connectionPoolSize uint) {} -func (nc *NoopCollector) ConnectionFromPoolReused() {} -func (nc *NoopCollector) ConnectionAddedToPool() {} -func (nc *NoopCollector) NewConnectionEstablished() {} -func (nc *NoopCollector) ConnectionFromPoolInvalidated() {} -func (nc *NoopCollector) ConnectionFromPoolUpdated() {} -func (nc *NoopCollector) ConnectionFromPoolEvicted() {} -func (nc *NoopCollector) StartBlockReceivedToExecuted(blockID flow.Identifier) {} -func (nc *NoopCollector) FinishBlockReceivedToExecuted(blockID flow.Identifier) {} -func (nc *NoopCollector) ExecutionComputationUsedPerBlock(computation uint64) {} -func (nc *NoopCollector) ExecutionStorageStateCommitment(bytes int64) {} -func (nc *NoopCollector) ExecutionLastExecutedBlockHeight(height uint64) {} -func (nc *NoopCollector) ExecutionBlockExecuted(_ time.Duration, _ module.ExecutionResultStats) {} -func (nc *NoopCollector) ExecutionCollectionExecuted(_ time.Duration, _ module.ExecutionResultStats) { +func (nc *NoopCollector) OnFinalizedBlockArrivedAtAssigner(height uint64) {} +func (nc *NoopCollector) OnChunksAssignmentDoneAtAssigner(chunks int) {} +func (nc *NoopCollector) OnAssignedChunkProcessedAtAssigner() {} +func (nc *NoopCollector) OnAssignedChunkReceivedAtFetcher() {} +func (nc *NoopCollector) OnChunkDataPackRequestDispatchedInNetworkByRequester() {} +func (nc *NoopCollector) OnChunkDataPackRequestSentByFetcher() {} +func (nc *NoopCollector) OnChunkDataPackRequestReceivedByRequester() {} +func (nc *NoopCollector) OnChunkDataPackArrivedAtFetcher() {} +func (nc *NoopCollector) OnChunkDataPackSentToFetcher() {} +func (nc *NoopCollector) OnVerifiableChunkSentToVerifier() {} +func (nc *NoopCollector) OnBlockConsumerJobDone(uint64) {} +func (nc *NoopCollector) OnChunkConsumerJobDone(uint64) {} +func (nc *NoopCollector) OnChunkDataPackResponseReceivedFromNetworkByRequester() {} +func (nc *NoopCollector) TotalConnectionsInPool(connectionCount uint, connectionPoolSize uint) {} +func (nc *NoopCollector) ConnectionFromPoolReused() {} +func (nc *NoopCollector) ConnectionAddedToPool() {} +func (nc *NoopCollector) NewConnectionEstablished() {} +func (nc *NoopCollector) ConnectionFromPoolInvalidated() {} +func (nc *NoopCollector) ConnectionFromPoolUpdated() {} +func (nc *NoopCollector) ConnectionFromPoolEvicted() {} +func (nc *NoopCollector) StartBlockReceivedToExecuted(blockID flow.Identifier) {} +func (nc *NoopCollector) FinishBlockReceivedToExecuted(blockID flow.Identifier) {} +func (nc *NoopCollector) ExecutionComputationUsedPerBlock(computation uint64) {} +func (nc *NoopCollector) ExecutionStorageStateCommitment(bytes int64) {} +func (nc *NoopCollector) ExecutionCheckpointSize(bytes uint64) {} +func (nc *NoopCollector) ExecutionLastExecutedBlockHeight(height uint64) {} +func (nc *NoopCollector) ExecutionLastFinalizedExecutedBlockHeight(height uint64) {} +func (nc *NoopCollector) ExecutionBlockExecuted(_ time.Duration, _ module.BlockExecutionResultStats) { } -func (nc *NoopCollector) ExecutionBlockExecutionEffortVectorComponent(_ string, _ uint) {} -func (nc *NoopCollector) ExecutionBlockCachedPrograms(programs int) {} -func (nc *NoopCollector) ExecutionTransactionExecuted(_ time.Duration, _, _ uint64, _, _ int, _ bool) { +func (nc *NoopCollector) ExecutionLastChunkDataPackPrunedHeight(height uint64) {} +func (nc *NoopCollector) ExecutionTargetChunkDataPackPrunedHeight(height uint64) {} + +func (nc *NoopCollector) ExecutionCollectionExecuted(_ time.Duration, _ module.CollectionExecutionResultStats) { } -func (nc *NoopCollector) ExecutionChunkDataPackGenerated(_, _ int) {} -func (nc *NoopCollector) ExecutionScriptExecuted(dur time.Duration, compUsed, _, _ uint64) {} -func (nc *NoopCollector) ForestApproxMemorySize(bytes uint64) {} -func (nc *NoopCollector) ForestNumberOfTrees(number uint64) {} -func (nc *NoopCollector) LatestTrieRegCount(number uint64) {} -func (nc *NoopCollector) LatestTrieRegCountDiff(number int64) {} -func (nc *NoopCollector) LatestTrieRegSize(size uint64) {} -func (nc *NoopCollector) LatestTrieRegSizeDiff(size int64) {} -func (nc *NoopCollector) LatestTrieMaxDepthTouched(maxDepth uint16) {} -func (nc *NoopCollector) UpdateCount() {} -func (nc *NoopCollector) ProofSize(bytes uint32) {} -func (nc *NoopCollector) UpdateValuesNumber(number uint64) {} -func (nc *NoopCollector) UpdateValuesSize(byte uint64) {} -func (nc *NoopCollector) UpdateDuration(duration time.Duration) {} -func (nc *NoopCollector) UpdateDurationPerItem(duration time.Duration) {} -func (nc *NoopCollector) ReadValuesNumber(number uint64) {} -func (nc *NoopCollector) ReadValuesSize(byte uint64) {} -func (nc *NoopCollector) ReadDuration(duration time.Duration) {} -func (nc *NoopCollector) ReadDurationPerItem(duration time.Duration) {} -func (nc *NoopCollector) ExecutionCollectionRequestSent() {} -func (nc *NoopCollector) ExecutionCollectionRequestRetried() {} -func (nc *NoopCollector) RuntimeTransactionParsed(dur time.Duration) {} -func (nc *NoopCollector) RuntimeTransactionChecked(dur time.Duration) {} -func (nc *NoopCollector) RuntimeTransactionInterpreted(dur time.Duration) {} -func (nc *NoopCollector) RuntimeSetNumberOfAccounts(count uint64) {} -func (nc *NoopCollector) RuntimeTransactionProgramsCacheMiss() {} -func (nc *NoopCollector) RuntimeTransactionProgramsCacheHit() {} -func (nc *NoopCollector) ScriptExecuted(dur time.Duration, size int) {} -func (nc *NoopCollector) TransactionResultFetched(dur time.Duration, size int) {} -func (nc *NoopCollector) TransactionReceived(txID flow.Identifier, when time.Time) {} -func (nc *NoopCollector) TransactionFinalized(txID flow.Identifier, when time.Time) {} -func (nc *NoopCollector) TransactionExecuted(txID flow.Identifier, when time.Time) {} -func (nc *NoopCollector) TransactionExpired(txID flow.Identifier) {} -func (nc *NoopCollector) TransactionSubmissionFailed() {} -func (nc *NoopCollector) UpdateExecutionReceiptMaxHeight(height uint64) {} -func (nc *NoopCollector) ChunkDataPackRequestProcessed() {} -func (nc *NoopCollector) ExecutionSync(syncing bool) {} -func (nc *NoopCollector) ExecutionBlockDataUploadStarted() {} -func (nc *NoopCollector) ExecutionBlockDataUploadFinished(dur time.Duration) {} -func (nc *NoopCollector) ExecutionComputationResultUploaded() {} -func (nc *NoopCollector) ExecutionComputationResultUploadRetried() {} -func (nc *NoopCollector) RootIDComputed(duration time.Duration, numberOfChunks int) {} -func (nc *NoopCollector) AddBlobsSucceeded(duration time.Duration, totalSize uint64) {} -func (nc *NoopCollector) AddBlobsFailed() {} -func (nc *NoopCollector) FulfilledHeight(blockHeight uint64) {} -func (nc *NoopCollector) ReceiptSkipped() {} -func (nc *NoopCollector) RequestSucceeded(blockHeight uint64, duration time.Duration, totalSize uint64, numberOfAttempts int) { +func (nc *NoopCollector) ExecutionBlockExecutionEffortVectorComponent(_ string, _ uint64) {} +func (nc *NoopCollector) ExecutionBlockCachedPrograms(programs int) {} +func (nc *NoopCollector) ExecutionTransactionExecuted(_ time.Duration, _ module.TransactionExecutionResultStats, _ module.TransactionExecutionResultInfo) { } +func (nc *NoopCollector) ExecutionChunkDataPackGenerated(_, _ int) {} +func (nc *NoopCollector) ExecutionScriptExecuted(dur time.Duration, compUsed, _, _ uint64) {} +func (nc *NoopCollector) ExecutionCallbacksExecuted(int, uint64, uint64) {} +func (nc *NoopCollector) ForestApproxMemorySize(bytes uint64) {} +func (nc *NoopCollector) ForestNumberOfTrees(number uint64) {} +func (nc *NoopCollector) LatestTrieRegCount(number uint64) {} +func (nc *NoopCollector) LatestTrieRegCountDiff(number int64) {} +func (nc *NoopCollector) LatestTrieRegSize(size uint64) {} +func (nc *NoopCollector) LatestTrieRegSizeDiff(size int64) {} +func (nc *NoopCollector) LatestTrieMaxDepthTouched(maxDepth uint16) {} +func (nc *NoopCollector) UpdateCount() {} +func (nc *NoopCollector) ProofSize(bytes uint32) {} +func (nc *NoopCollector) UpdateValuesNumber(number uint64) {} +func (nc *NoopCollector) UpdateValuesSize(byte uint64) {} +func (nc *NoopCollector) UpdateDuration(duration time.Duration) {} +func (nc *NoopCollector) UpdateDurationPerItem(duration time.Duration) {} +func (nc *NoopCollector) ReadValuesNumber(number uint64) {} +func (nc *NoopCollector) ReadValuesSize(byte uint64) {} +func (nc *NoopCollector) ReadDuration(duration time.Duration) {} +func (nc *NoopCollector) ReadDurationPerItem(duration time.Duration) {} +func (nc *NoopCollector) ExecutionCollectionRequestSent() {} +func (nc *NoopCollector) RuntimeTransactionParsed(dur time.Duration) {} +func (nc *NoopCollector) RuntimeTransactionChecked(dur time.Duration) {} +func (nc *NoopCollector) RuntimeTransactionInterpreted(dur time.Duration) {} +func (nc *NoopCollector) RuntimeSetNumberOfAccounts(count uint64) {} +func (nc *NoopCollector) RuntimeTransactionProgramsCacheMiss() {} +func (nc *NoopCollector) RuntimeTransactionProgramsCacheHit() {} +func (nc *NoopCollector) SetNumberOfDeployedCOAs(_ uint64) {} +func (nc *NoopCollector) EVMTransactionExecuted(_ uint64, _ bool, _ bool) {} +func (nc *NoopCollector) EVMBlockExecuted(_ int, _ uint64, _ float64) {} +func (nc *NoopCollector) ScriptExecuted(dur time.Duration, size int) {} +func (nc *NoopCollector) ScriptExecutionErrorLocal() {} +func (nc *NoopCollector) ScriptExecutionErrorOnExecutionNode() {} +func (nc *NoopCollector) ScriptExecutionResultMismatch() {} +func (nc *NoopCollector) ScriptExecutionResultMatch() {} +func (nc *NoopCollector) ScriptExecutionErrorMismatch() {} +func (nc *NoopCollector) ScriptExecutionErrorMatch() {} +func (nc *NoopCollector) ScriptExecutionNotIndexed() {} +func (nc *NoopCollector) TransactionResultFetched(dur time.Duration, size int) {} +func (nc *NoopCollector) TransactionReceived(txID flow.Identifier, when time.Time) {} +func (nc *NoopCollector) TransactionFinalized(txID flow.Identifier, when time.Time) {} +func (nc *NoopCollector) TransactionSealed(txID flow.Identifier, when time.Time) {} +func (nc *NoopCollector) TransactionExecuted(txID flow.Identifier, when time.Time) {} +func (nc *NoopCollector) TransactionExpired(txID flow.Identifier) {} +func (nc *NoopCollector) TransactionValidated() {} +func (nc *NoopCollector) TransactionValidationFailed(reason string) {} +func (nc *NoopCollector) TransactionValidationSkipped() {} +func (nc *NoopCollector) TransactionSubmissionFailed() {} +func (nc *NoopCollector) UpdateExecutionReceiptMaxHeight(height uint64) {} +func (nc *NoopCollector) UpdateLastFullBlockHeight(height uint64) {} +func (nc *NoopCollector) ChunkDataPackRequestProcessed() {} +func (nc *NoopCollector) ExecutionSync(syncing bool) {} +func (nc *NoopCollector) ExecutionBlockDataUploadStarted() {} +func (nc *NoopCollector) ExecutionBlockDataUploadFinished(dur time.Duration) {} +func (nc *NoopCollector) ExecutionComputationResultUploaded() {} +func (nc *NoopCollector) ExecutionComputationResultUploadRetried() {} +func (nc *NoopCollector) RootIDComputed(duration time.Duration, numberOfChunks int) {} +func (nc *NoopCollector) AddBlobsSucceeded(duration time.Duration, totalSize uint64) {} +func (nc *NoopCollector) AddBlobsFailed() {} +func (nc *NoopCollector) FulfilledHeight(blockHeight uint64) {} +func (nc *NoopCollector) ReceiptSkipped() {} +func (nc *NoopCollector) RequestSucceeded(uint64, time.Duration, uint64, int) {} func (nc *NoopCollector) RequestFailed(duration time.Duration, retryable bool) {} func (nc *NoopCollector) RequestCanceled() {} func (nc *NoopCollector) ResponseDropped() {} @@ -253,20 +282,34 @@ func (nc *NoopCollector) OnPeerDialed(duration time.Duration, attempts int) func (nc *NoopCollector) OnPeerDialFailure(duration time.Duration, attempts int) {} func (nc *NoopCollector) OnStreamEstablished(duration time.Duration, attempts int) {} func (nc *NoopCollector) OnEstablishStreamFailure(duration time.Duration, attempts int) {} +func (nc *NoopCollector) OnDialRetryBudgetUpdated(budget uint64) {} +func (nc *NoopCollector) OnStreamCreationRetryBudgetUpdated(budget uint64) {} +func (nc *NoopCollector) OnDialRetryBudgetResetToDefault() {} +func (nc *NoopCollector) OnStreamCreationRetryBudgetResetToDefault() {} var _ module.HeroCacheMetrics = (*NoopCollector)(nil) -var _ module.NetworkMetrics = (*NoopCollector)(nil) -func (nc *NoopCollector) OnRateLimitedUnicastMessage(role, msgType, topic, reason string) {} -func (nc *NoopCollector) OnIWantReceived(int) {} -func (nc *NoopCollector) OnIHaveReceived(int) {} -func (nc *NoopCollector) OnGraftReceived(int) {} -func (nc *NoopCollector) OnPruneReceived(int) {} -func (nc *NoopCollector) OnIncomingRpcAcceptedFully() {} -func (nc *NoopCollector) OnIncomingRpcAcceptedOnlyForControlMessages() {} -func (nc *NoopCollector) OnIncomingRpcRejected() {} -func (nc *NoopCollector) OnPublishedGossipMessagesReceived(int) {} -func (nc *NoopCollector) OnLocalMeshSizeUpdated(string, int) {} +func (nc *NoopCollector) OnIWantControlMessageIdsTruncated(diff int) {} +func (nc *NoopCollector) OnIWantMessageIDsReceived(msgIdCount int) {} +func (nc *NoopCollector) OnIHaveMessageIDsReceived(channel string, msgIdCount int) {} +func (nc *NoopCollector) OnLocalMeshSizeUpdated(string, int) {} +func (nc *NoopCollector) OnPeerAddedToProtocol(protocol string) {} +func (nc *NoopCollector) OnPeerRemovedFromProtocol() {} +func (nc *NoopCollector) OnLocalPeerJoinedTopic() {} +func (nc *NoopCollector) OnLocalPeerLeftTopic() {} +func (nc *NoopCollector) OnPeerGraftTopic(topic string) {} +func (nc *NoopCollector) OnPeerPruneTopic(topic string) {} +func (nc *NoopCollector) OnMessageEnteredValidation(size int) {} +func (nc *NoopCollector) OnMessageRejected(size int, reason string) {} +func (nc *NoopCollector) OnMessageDuplicate(size int) {} +func (nc *NoopCollector) OnPeerThrottled() {} +func (nc *NoopCollector) OnRpcReceived(msgCount int, iHaveCount int, iWantCount int, graftCount int, pruneCount int) { +} +func (nc *NoopCollector) OnRpcSent(msgCount int, iHaveCount int, iWantCount int, graftCount int, pruneCount int) { +} +func (nc *NoopCollector) OnOutboundRpcDropped() {} +func (nc *NoopCollector) OnUndeliveredMessage() {} +func (nc *NoopCollector) OnMessageDeliveredToAllSubscribers(size int) {} func (nc *NoopCollector) AllowConn(network.Direction, bool) {} func (nc *NoopCollector) BlockConn(network.Direction, bool) {} func (nc *NoopCollector) AllowStream(peer.ID, network.Direction) {} @@ -290,4 +333,72 @@ func (nc *NoopCollector) OnBehaviourPenaltyUpdated(f float64) func (nc *NoopCollector) OnIPColocationFactorUpdated(f float64) {} func (nc *NoopCollector) OnAppSpecificScoreUpdated(f float64) {} func (nc *NoopCollector) OnOverallPeerScoreUpdated(f float64) {} -func (nc *NoopCollector) OnMisbehaviorReported(string, string) {} +func (nc *NoopCollector) OnIHaveControlMessageIdsTruncated(diff int) {} +func (nc *NoopCollector) OnControlMessagesTruncated(messageType p2pmsg.ControlMessageType, diff int) { +} +func (nc *NoopCollector) OnIncomingRpcReceived(iHaveCount, iWantCount, graftCount, pruneCount, msgCount int) { +} +func (nc *NoopCollector) AsyncProcessingStarted() {} +func (nc *NoopCollector) AsyncProcessingFinished(time.Duration) {} +func (nc *NoopCollector) OnIWantMessagesInspected(duplicateCount int, cacheMissCount int) {} +func (nc *NoopCollector) OnIWantDuplicateMessageIdsExceedThreshold() {} +func (nc *NoopCollector) OnIWantCacheMissMessageIdsExceedThreshold() {} +func (nc *NoopCollector) OnIHaveMessagesInspected(duplicateTopicIds int, duplicateMessageIds, invalidTopicIds int) { +} +func (nc *NoopCollector) OnIHaveDuplicateTopicIdsExceedThreshold() {} +func (nc *NoopCollector) OnIHaveInvalidTopicIdsExceedThreshold() {} +func (nc *NoopCollector) OnIHaveDuplicateMessageIdsExceedThreshold() {} +func (nc *NoopCollector) OnInvalidTopicIdDetectedForControlMessage(messageType p2pmsg.ControlMessageType) { +} +func (nc *NoopCollector) OnActiveClusterIDsNotSetErr() {} +func (nc *NoopCollector) OnUnstakedPeerInspectionFailed() {} +func (nc *NoopCollector) OnInvalidControlMessageNotificationSent() {} +func (nc *NoopCollector) OnRpcRejectedFromUnknownSender() {} +func (nc *NoopCollector) OnPublishMessagesInspectionErrorExceedsThreshold() {} +func (nc *NoopCollector) OnPruneDuplicateTopicIdsExceedThreshold() {} +func (nc *NoopCollector) OnPruneInvalidTopicIdsExceedThreshold() {} +func (nc *NoopCollector) OnPruneMessageInspected(duplicateTopicIds, invalidTopicIds int) {} +func (nc *NoopCollector) OnGraftDuplicateTopicIdsExceedThreshold() {} +func (nc *NoopCollector) OnGraftInvalidTopicIdsExceedThreshold() {} +func (nc *NoopCollector) OnGraftMessageInspected(duplicateTopicIds, invalidTopicIds int) {} +func (nc *NoopCollector) OnPublishMessageInspected(totalErrCount int, invalidTopicIdsCount int, invalidSubscriptionsCount int, invalidSendersCount int) { +} + +func (nc *NoopCollector) OnMisbehaviorReported(string, string) {} +func (nc *NoopCollector) OnViolationReportSkipped() {} + +var _ ObserverMetrics = (*NoopCollector)(nil) + +func (nc *NoopCollector) RecordRPC(handler, rpc string, code codes.Code) {} + +var _ module.ExecutionStateIndexerMetrics = (*NoopCollector)(nil) + +func (nc *NoopCollector) BlockIndexed(uint64, time.Duration, int, int, int) {} +func (nc *NoopCollector) BlockReindexed() {} +func (nc *NoopCollector) InitializeLatestHeight(height uint64) {} + +var _ module.TransactionErrorMessagesMetrics = (*NoopCollector)(nil) + +func (nc *NoopCollector) TxErrorsInitialHeight(uint64) {} +func (nc *NoopCollector) TxErrorsFetchStarted() {} +func (nc *NoopCollector) TxErrorsFetchFinished(time.Duration, bool, uint64) {} +func (nc *NoopCollector) TxErrorsFetchRetried() {} + +var _ module.GossipSubScoringRegistryMetrics = (*NoopCollector)(nil) + +func (nc *NoopCollector) DuplicateMessagePenalties(penalty float64) {} + +func (nc *NoopCollector) DuplicateMessagesCounts(count float64) {} + +var _ module.CollectionExecutedMetric = (*NoopCollector)(nil) + +func (nc *NoopCollector) CollectionFinalized(light *flow.LightCollection) {} +func (nc *NoopCollector) CollectionExecuted(light *flow.LightCollection) {} +func (nc *NoopCollector) ExecutionReceiptReceived(r *flow.ExecutionReceipt) { +} + +func (nc *NoopCollector) AccountBalance(bal float64) {} +func (nc *NoopCollector) RecommendedMinBalance(bal float64) {} +func (nc *NoopCollector) IsMisconfigured(misconfigured bool) {} + +var _ module.MachineAccountMetrics = (*NoopCollector)(nil) diff --git a/module/metrics/observer.go b/module/metrics/observer.go index 4e885c9bf4c..375aa66a2ac 100644 --- a/module/metrics/observer.go +++ b/module/metrics/observer.go @@ -6,10 +6,16 @@ import ( "google.golang.org/grpc/codes" ) +type ObserverMetrics interface { + RecordRPC(handler, rpc string, code codes.Code) +} + type ObserverCollector struct { rpcs *prometheus.CounterVec } +var _ ObserverMetrics = (*ObserverCollector)(nil) + func NewObserverCollector() *ObserverCollector { return &ObserverCollector{ rpcs: promauto.NewCounterVec(prometheus.CounterOpts{ diff --git a/module/metrics/rest_api.go b/module/metrics/rest_api.go index 36c3d1b8b1a..e9132f243c6 100644 --- a/module/metrics/rest_api.go +++ b/module/metrics/rest_api.go @@ -2,108 +2,112 @@ package metrics import ( "context" + "fmt" "time" "github.com/prometheus/client_golang/prometheus" - httpmetrics "github.com/slok/go-http-metrics/metrics" - metricsProm "github.com/slok/go-http-metrics/metrics/prometheus" -) -// Example recorder taken from: -// https://github.com/slok/go-http-metrics/blob/master/metrics/prometheus/prometheus.go -type RestCollector interface { - httpmetrics.Recorder - AddTotalRequests(ctx context.Context, service string, id string) -} + "github.com/onflow/flow-go/module" +) -type recorder struct { +type RestCollector struct { httpRequestDurHistogram *prometheus.HistogramVec httpResponseSizeHistogram *prometheus.HistogramVec httpRequestsInflight *prometheus.GaugeVec httpRequestsTotal *prometheus.GaugeVec -} - -// NewRestCollector returns a new metrics recorder that implements the recorder -// using Prometheus as the backend. -func NewRestCollector(cfg metricsProm.Config) RestCollector { - if len(cfg.DurationBuckets) == 0 { - cfg.DurationBuckets = prometheus.DefBuckets - } - - if len(cfg.SizeBuckets) == 0 { - cfg.SizeBuckets = prometheus.ExponentialBuckets(100, 10, 8) - } - - if cfg.Registry == nil { - cfg.Registry = prometheus.DefaultRegisterer - } - if cfg.HandlerIDLabel == "" { - cfg.HandlerIDLabel = "handler" - } - - if cfg.StatusCodeLabel == "" { - cfg.StatusCodeLabel = "code" - } + // urlToRouteMapper is a callback that converts a URL to a route name + urlToRouteMapper func(string) (string, error) +} - if cfg.MethodLabel == "" { - cfg.MethodLabel = "method" - } +var _ module.RestMetrics = (*RestCollector)(nil) - if cfg.ServiceLabel == "" { - cfg.ServiceLabel = "service" +// NewRestCollector returns a new metrics RestCollector that implements the RestCollector +// using Prometheus as the backend. +func NewRestCollector(urlToRouteMapper func(string) (string, error), registerer prometheus.Registerer) (*RestCollector, error) { + if urlToRouteMapper == nil { + return nil, fmt.Errorf("urlToRouteMapper cannot be nil") } - r := &recorder{ + r := &RestCollector{ + urlToRouteMapper: urlToRouteMapper, httpRequestDurHistogram: prometheus.NewHistogramVec(prometheus.HistogramOpts{ - Namespace: cfg.Prefix, - Subsystem: "http", + Namespace: namespaceRestAPI, + Subsystem: subsystemHTTP, Name: "request_duration_seconds", Help: "The latency of the HTTP requests.", - Buckets: cfg.DurationBuckets, - }, []string{cfg.ServiceLabel, cfg.HandlerIDLabel, cfg.MethodLabel, cfg.StatusCodeLabel}), + Buckets: prometheus.DefBuckets, + }, []string{LabelService, LabelHandler, LabelMethod, LabelStatusCode}), httpResponseSizeHistogram: prometheus.NewHistogramVec(prometheus.HistogramOpts{ - Namespace: cfg.Prefix, - Subsystem: "http", + Namespace: namespaceRestAPI, + Subsystem: subsystemHTTP, Name: "response_size_bytes", Help: "The size of the HTTP responses.", - Buckets: cfg.SizeBuckets, - }, []string{cfg.ServiceLabel, cfg.HandlerIDLabel, cfg.MethodLabel, cfg.StatusCodeLabel}), + Buckets: prometheus.ExponentialBuckets(100, 10, 8), + }, []string{LabelService, LabelHandler, LabelMethod, LabelStatusCode}), httpRequestsInflight: prometheus.NewGaugeVec(prometheus.GaugeOpts{ - Namespace: cfg.Prefix, - Subsystem: "http", + Namespace: namespaceRestAPI, + Subsystem: subsystemHTTP, Name: "requests_inflight", Help: "The number of inflight requests being handled at the same time.", - }, []string{cfg.ServiceLabel, cfg.HandlerIDLabel}), + }, []string{LabelService, LabelHandler}), httpRequestsTotal: prometheus.NewGaugeVec(prometheus.GaugeOpts{ - Namespace: cfg.Prefix, - Subsystem: "http", + Namespace: namespaceRestAPI, + Subsystem: subsystemHTTP, Name: "requests_total", Help: "The number of requests handled over time.", - }, []string{cfg.ServiceLabel, cfg.HandlerIDLabel}), + }, []string{LabelMethod, LabelHandler}), } - return r + registerer.MustRegister( + r.httpRequestDurHistogram, + r.httpResponseSizeHistogram, + r.httpRequestsInflight, + r.httpRequestsTotal, + ) + + return r, nil +} + +// ObserveHTTPRequestDuration records the duration of the REST request. +// This method is called automatically by go-http-metrics/middleware +func (r *RestCollector) ObserveHTTPRequestDuration(_ context.Context, p httpmetrics.HTTPReqProperties, duration time.Duration) { + handler := r.mapURLToRoute(p.ID) + r.httpRequestDurHistogram.WithLabelValues(p.Service, handler, p.Method, p.Code).Observe(duration.Seconds()) } -// These methods are called automatically by go-http-metrics/middleware -func (r recorder) ObserveHTTPRequestDuration(_ context.Context, p httpmetrics.HTTPReqProperties, duration time.Duration) { - r.httpRequestDurHistogram.WithLabelValues(p.Service, p.ID, p.Method, p.Code).Observe(duration.Seconds()) +// ObserveHTTPResponseSize records the response size of the REST request. +// This method is called automatically by go-http-metrics/middleware +func (r *RestCollector) ObserveHTTPResponseSize(_ context.Context, p httpmetrics.HTTPReqProperties, sizeBytes int64) { + handler := r.mapURLToRoute(p.ID) + r.httpResponseSizeHistogram.WithLabelValues(p.Service, handler, p.Method, p.Code).Observe(float64(sizeBytes)) } -func (r recorder) ObserveHTTPResponseSize(_ context.Context, p httpmetrics.HTTPReqProperties, sizeBytes int64) { - r.httpResponseSizeHistogram.WithLabelValues(p.Service, p.ID, p.Method, p.Code).Observe(float64(sizeBytes)) +// AddInflightRequests increments and decrements the number of inflight request being processed. +// This method is called automatically by go-http-metrics/middleware +func (r *RestCollector) AddInflightRequests(_ context.Context, p httpmetrics.HTTPProperties, quantity int) { + handler := r.mapURLToRoute(p.ID) + r.httpRequestsInflight.WithLabelValues(p.Service, handler).Add(float64(quantity)) } -func (r recorder) AddInflightRequests(_ context.Context, p httpmetrics.HTTPProperties, quantity int) { - r.httpRequestsInflight.WithLabelValues(p.Service, p.ID).Add(float64(quantity)) +// AddTotalRequests records all REST requests +// This is a custom method called by the REST handler +func (r *RestCollector) AddTotalRequests(_ context.Context, method, path string) { + handler := r.mapURLToRoute(path) + r.httpRequestsTotal.WithLabelValues(method, handler).Inc() } -// New custom method to track all requests made for every REST API request -func (r recorder) AddTotalRequests(_ context.Context, method string, id string) { - r.httpRequestsTotal.WithLabelValues(method, id).Inc() +// mapURLToRoute uses the urlToRouteMapper callback to convert a URL to a route name +// This normalizes the URL, removing dynamic information converting it to a static string +func (r *RestCollector) mapURLToRoute(url string) string { + route, err := r.urlToRouteMapper(url) + if err != nil { + return "unknown" + } + + return route } diff --git a/module/metrics/server.go b/module/metrics/server.go index cd8187b1fbd..09221cb35ce 100644 --- a/module/metrics/server.go +++ b/module/metrics/server.go @@ -3,18 +3,29 @@ package metrics import ( "context" "errors" + "net" "net/http" "strconv" "time" "github.com/prometheus/client_golang/prometheus/promhttp" "github.com/rs/zerolog" + "github.com/rs/zerolog/log" + + "github.com/onflow/flow-go/module/component" + "github.com/onflow/flow-go/module/irrecoverable" ) +// metricsServerShutdownTimeout is the time to wait for the server to shut down gracefully +const metricsServerShutdownTimeout = 5 * time.Second + // Server is the http server that will be serving the /metrics request for prometheus type Server struct { - server *http.Server - log zerolog.Logger + component.Component + + address string + server *http.Server + log zerolog.Logger } // NewServer creates a new server that will start on the specified port, @@ -25,44 +36,71 @@ func NewServer(log zerolog.Logger, port uint) *Server { mux := http.NewServeMux() endpoint := "/metrics" mux.Handle(endpoint, promhttp.Handler()) - log.Info().Str("address", addr).Str("endpoint", endpoint).Msg("metrics server started") m := &Server{ - server: &http.Server{Addr: addr, Handler: mux}, - log: log, + address: addr, + server: &http.Server{Addr: addr, Handler: mux}, + log: log.With().Str("address", addr).Str("endpoint", endpoint).Logger(), } + m.Component = component.NewComponentManagerBuilder(). + AddWorker(m.serve). + AddWorker(m.shutdownOnContextDone). + Build() + return m } -// Ready returns a channel that will close when the network stack is ready. -func (m *Server) Ready() <-chan struct{} { - ready := make(chan struct{}) - go func() { - if err := m.server.ListenAndServe(); err != nil { - // http.ErrServerClosed is returned when Close or Shutdown is called - // we don't consider this an error, so print this with debug level instead - if errors.Is(err, http.ErrServerClosed) { - m.log.Debug().Err(err).Msg("metrics server shutdown") - } else { - m.log.Err(err).Msg("error shutting down metrics server") - } +func (m *Server) serve(ctx irrecoverable.SignalerContext, ready component.ReadyFunc) { + m.log.Info().Msg("starting metrics server on address") + + l, err := net.Listen("tcp", m.address) + if err != nil { + m.log.Err(err).Msg("failed to start the metrics server") + ctx.Throw(err) + return + } + + ready() + + // pass the signaler context to the server so that the signaler context + // can control the server's lifetime + m.server.BaseContext = func(_ net.Listener) context.Context { + return ctx + } + + err = m.server.Serve(l) // blocking call + if err != nil { + if errors.Is(err, http.ErrServerClosed) { + return } - }() - go func() { - close(ready) - }() - return ready + log.Err(err).Msg("fatal error in the metrics server") + ctx.Throw(err) + } } -// Done returns a channel that will close when shutdown is complete. -func (m *Server) Done() <-chan struct{} { - done := make(chan struct{}) - go func() { - ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) - _ = m.server.Shutdown(ctx) - cancel() - close(done) - }() - return done +func (m *Server) shutdownOnContextDone(ictx irrecoverable.SignalerContext, ready component.ReadyFunc) { + ready() + <-ictx.Done() + + ctx, cancel := context.WithTimeout(context.Background(), metricsServerShutdownTimeout) + defer cancel() + + // shutdown the server gracefully + err := m.server.Shutdown(ctx) + if err == nil { + m.log.Info().Msg("metrics server graceful shutdown completed") + return + } + + if errors.Is(err, ctx.Err()) { + m.log.Warn().Msg("metrics server graceful shutdown timed out") + // shutdown the server forcefully + err := m.server.Close() + if err != nil { + m.log.Err(err).Msg("error closing metrics server") + } + } else { + m.log.Err(err).Msg("error shutting down metrics server") + } } diff --git a/module/metrics/transaction.go b/module/metrics/transaction.go index 94b19e4f219..4c1125e6941 100644 --- a/module/metrics/transaction.go +++ b/module/metrics/transaction.go @@ -7,33 +7,43 @@ import ( "github.com/prometheus/client_golang/prometheus/promauto" "github.com/rs/zerolog" - "github.com/onflow/flow-go/engine/consensus/sealing/counters" "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module" "github.com/onflow/flow-go/module/mempool" ) type TransactionCollector struct { - transactionTimings mempool.TransactionTimings - log zerolog.Logger - logTimeToFinalized bool - logTimeToExecuted bool - logTimeToFinalizedExecuted bool - timeToFinalized prometheus.Summary - timeToExecuted prometheus.Summary - timeToFinalizedExecuted prometheus.Summary - transactionSubmission *prometheus.CounterVec - scriptExecutedDuration *prometheus.HistogramVec - transactionResultDuration *prometheus.HistogramVec - scriptSize prometheus.Histogram - transactionSize prometheus.Histogram - maxReceiptHeight prometheus.Gauge - - // used to skip heights that are lower than the current max height - maxReceiptHeightValue counters.StrictMonotonousCounter + transactionTimings mempool.TransactionTimings + log zerolog.Logger + logTimeToFinalized bool + logTimeToExecuted bool + logTimeToFinalizedExecuted bool + logTimeToSealed bool + timeToFinalized prometheus.Summary + timeToExecuted prometheus.Summary + timeToFinalizedExecuted prometheus.Summary + timeToSealed prometheus.Summary + transactionSubmission *prometheus.CounterVec + transactionSize prometheus.Histogram + scriptExecutedDuration *prometheus.HistogramVec + scriptExecutionErrorOnExecutor *prometheus.CounterVec + scriptExecutionComparison *prometheus.CounterVec + scriptSize prometheus.Histogram + transactionResultDuration *prometheus.HistogramVec } -func NewTransactionCollector(transactionTimings mempool.TransactionTimings, log zerolog.Logger, - logTimeToFinalized bool, logTimeToExecuted bool, logTimeToFinalizedExecuted bool) *TransactionCollector { +// interface check +var _ module.BackendScriptsMetrics = (*TransactionCollector)(nil) +var _ module.TransactionMetrics = (*TransactionCollector)(nil) + +func NewTransactionCollector( + log zerolog.Logger, + transactionTimings mempool.TransactionTimings, + logTimeToFinalized bool, + logTimeToExecuted bool, + logTimeToFinalizedExecuted bool, + logTimeToSealed bool, +) *TransactionCollector { tc := &TransactionCollector{ transactionTimings: transactionTimings, @@ -41,6 +51,7 @@ func NewTransactionCollector(transactionTimings mempool.TransactionTimings, log logTimeToFinalized: logTimeToFinalized, logTimeToExecuted: logTimeToExecuted, logTimeToFinalizedExecuted: logTimeToFinalizedExecuted, + logTimeToSealed: logTimeToSealed, timeToFinalized: promauto.NewSummary(prometheus.SummaryOpts{ Name: "time_to_finalized_seconds", Namespace: namespaceAccess, @@ -84,6 +95,20 @@ func NewTransactionCollector(transactionTimings mempool.TransactionTimings, log AgeBuckets: 5, BufCap: 500, }), + timeToSealed: promauto.NewSummary(prometheus.SummaryOpts{ + Name: "time_to_seal_seconds", + Namespace: namespaceAccess, + Subsystem: subsystemTransactionTiming, + Help: "the duration of how long it took between the transaction was received until it was sealed", + Objectives: map[float64]float64{ + 0.01: 0.001, + 0.5: 0.05, + 0.99: 0.001, + }, + MaxAge: 10 * time.Minute, + AgeBuckets: 5, + BufCap: 500, + }), transactionSubmission: promauto.NewCounterVec(prometheus.CounterOpts{ Name: "transaction_submission", Namespace: namespaceAccess, @@ -97,6 +122,18 @@ func NewTransactionCollector(transactionTimings mempool.TransactionTimings, log Help: "histogram for the duration in ms of the round trip time for executing a script", Buckets: []float64{1, 100, 500, 1000, 2000, 5000}, }, []string{"script_size"}), + scriptExecutionErrorOnExecutor: promauto.NewCounterVec(prometheus.CounterOpts{ + Name: "script_execution_error_executor", + Namespace: namespaceAccess, + Subsystem: subsystemTransactionSubmission, + Help: "counter for the internal errors while executing a script", + }, []string{"source"}), + scriptExecutionComparison: promauto.NewCounterVec(prometheus.CounterOpts{ + Name: "script_execution_comparison", + Namespace: namespaceAccess, + Subsystem: subsystemTransactionSubmission, + Help: "counter for the comparison outcomes of executing a script locally and on execution node", + }, []string{"outcome"}), transactionResultDuration: promauto.NewHistogramVec(prometheus.HistogramOpts{ Name: "transaction_result_fetched_duration", Namespace: namespaceAccess, @@ -116,18 +153,13 @@ func NewTransactionCollector(transactionTimings mempool.TransactionTimings, log Subsystem: subsystemTransactionSubmission, Help: "histogram for the transaction size in kb of scripts used in GetTransactionResult", }), - maxReceiptHeight: promauto.NewGauge(prometheus.GaugeOpts{ - Name: "max_receipt_height", - Namespace: namespaceAccess, - Subsystem: subsystemIngestion, - Help: "gauge to track the maximum block height of execution receipts received", - }), - maxReceiptHeightValue: counters.NewMonotonousCounter(0), } return tc } +// Script exec metrics + func (tc *TransactionCollector) ScriptExecuted(dur time.Duration, size int) { // record the execute script duration and script size tc.scriptSize.Observe(float64(size / 1024)) @@ -136,6 +168,43 @@ func (tc *TransactionCollector) ScriptExecuted(dur time.Duration, size int) { }).Observe(float64(dur.Milliseconds())) } +func (tc *TransactionCollector) ScriptExecutionErrorLocal() { + // record the execution error count + tc.scriptExecutionErrorOnExecutor.WithLabelValues("local").Inc() +} + +func (tc *TransactionCollector) ScriptExecutionErrorOnExecutionNode() { + // record the execution error count + tc.scriptExecutionErrorOnExecutor.WithLabelValues("execution").Inc() +} + +func (tc *TransactionCollector) ScriptExecutionResultMismatch() { + // record the execution error count + tc.scriptExecutionComparison.WithLabelValues("result_mismatch").Inc() +} + +func (tc *TransactionCollector) ScriptExecutionResultMatch() { + // record the execution error count + tc.scriptExecutionComparison.WithLabelValues("result_match").Inc() +} +func (tc *TransactionCollector) ScriptExecutionErrorMismatch() { + // record the execution error count + tc.scriptExecutionComparison.WithLabelValues("error_mismatch").Inc() +} + +func (tc *TransactionCollector) ScriptExecutionErrorMatch() { + // record the execution error count + tc.scriptExecutionComparison.WithLabelValues("error_match").Inc() +} + +// ScriptExecutionNotIndexed records script execution matches where data for the block is not +// indexed locally yet +func (tc *TransactionCollector) ScriptExecutionNotIndexed() { + tc.scriptExecutionComparison.WithLabelValues("not_indexed").Inc() +} + +// TransactionResult metrics + func (tc *TransactionCollector) TransactionResultFetched(dur time.Duration, size int) { // record the transaction result duration and transaction script/payload size tc.transactionSize.Observe(float64(size / 1024)) @@ -161,7 +230,7 @@ func (tc *TransactionCollector) sizeLabel(size int) string { func (tc *TransactionCollector) TransactionReceived(txID flow.Identifier, when time.Time) { // we don't need to check whether the transaction timing already exists, it will not be overwritten by the mempool - added := tc.transactionTimings.Add(&flow.TransactionTiming{TransactionID: txID, Received: when}) + added := tc.transactionTimings.Add(txID, &flow.TransactionTiming{TransactionID: txID, Received: when}) if !added { tc.log.Warn(). Str("transaction_id", txID.String()). @@ -189,11 +258,6 @@ func (tc *TransactionCollector) TransactionFinalized(txID flow.Identifier, when tc.trackTTF(t, tc.logTimeToFinalized) tc.trackTTFE(t, tc.logTimeToFinalizedExecuted) - - // remove transaction timing from mempool if finalized and executed - if !t.Finalized.IsZero() && !t.Executed.IsZero() { - tc.transactionTimings.Remove(txID) - } } func (tc *TransactionCollector) TransactionExecuted(txID flow.Identifier, when time.Time) { @@ -211,11 +275,25 @@ func (tc *TransactionCollector) TransactionExecuted(txID flow.Identifier, when t tc.trackTTE(t, tc.logTimeToExecuted) tc.trackTTFE(t, tc.logTimeToFinalizedExecuted) +} - // remove transaction timing from mempool if finalized and executed - if !t.Finalized.IsZero() && !t.Executed.IsZero() { - tc.transactionTimings.Remove(txID) +func (tc *TransactionCollector) TransactionSealed(txID flow.Identifier, when time.Time) { + t, updated := tc.transactionTimings.Adjust(txID, func(t *flow.TransactionTiming) *flow.TransactionTiming { + t.Sealed = when + return t + }) + + if !updated { + tc.log.Debug(). + Str("transaction_id", txID.String()). + Msg("failed to update TransactionSealed metric") + return } + + tc.trackTTS(t, tc.logTimeToSealed) + + // remove transaction timing from mempool + tc.transactionTimings.Remove(txID) } func (tc *TransactionCollector) trackTTF(t *flow.TransactionTiming, log bool) { @@ -266,12 +344,26 @@ func (tc *TransactionCollector) trackTTFE(t *flow.TransactionTiming, log bool) { } } +func (tc *TransactionCollector) trackTTS(t *flow.TransactionTiming, log bool) { + if t.Received.IsZero() || t.Sealed.IsZero() { + return + } + duration := t.Sealed.Sub(t.Received).Seconds() + + tc.timeToSealed.Observe(duration) + + if log { + tc.log.Info().Str("transaction_id", t.TransactionID.String()).Float64("duration", duration). + Msg("transaction time to sealed") + } +} + func (tc *TransactionCollector) TransactionSubmissionFailed() { tc.transactionSubmission.WithLabelValues("failed").Inc() } func (tc *TransactionCollector) TransactionExpired(txID flow.Identifier) { - _, exist := tc.transactionTimings.ByID(txID) + _, exist := tc.transactionTimings.Get(txID) if !exist { // likely previously removed, either executed or expired @@ -280,9 +372,3 @@ func (tc *TransactionCollector) TransactionExpired(txID flow.Identifier) { tc.transactionSubmission.WithLabelValues("expired").Inc() tc.transactionTimings.Remove(txID) } - -func (tc *TransactionCollector) UpdateExecutionReceiptMaxHeight(height uint64) { - if tc.maxReceiptHeightValue.Set(height) { - tc.maxReceiptHeight.Set(float64(height)) - } -} diff --git a/module/metrics/transaction_error_messages.go b/module/metrics/transaction_error_messages.go new file mode 100644 index 00000000000..9b7b0167fac --- /dev/null +++ b/module/metrics/transaction_error_messages.go @@ -0,0 +1,94 @@ +package metrics + +import ( + "time" + + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" + + "github.com/onflow/flow-go/module" +) + +var _ module.TransactionErrorMessagesMetrics = (*TransactionErrorMessagesCollector)(nil) + +type TransactionErrorMessagesCollector struct { + fetchDuration prometheus.Histogram + downloadsInProgress prometheus.Gauge + highestIndexedHeight prometheus.Gauge + downloadRetries prometheus.Counter + failedDownloads prometheus.Counter +} + +func NewTransactionErrorMessagesCollector() *TransactionErrorMessagesCollector { + + fetchDuration := promauto.NewHistogram(prometheus.HistogramOpts{ + Namespace: namespaceAccess, + Subsystem: subsystemTxErrorFetcher, + Name: "tx_errors_download_duration_ms", + Help: "the duration of transaction error messages download operation", + Buckets: []float64{1, 100, 500, 1000, 2000, 5000, 20000}, + }) + + highestIndexedHeight := promauto.NewGauge(prometheus.GaugeOpts{ + Namespace: namespaceAccess, + Subsystem: subsystemTxErrorFetcher, + Name: "tx_errors_indexed_height", + Help: "highest consecutive finalized block height with all transaction errors indexed", + }) + + downloadsInProgress := promauto.NewGauge(prometheus.GaugeOpts{ + Namespace: namespaceAccess, + Subsystem: subsystemTxErrorFetcher, + Name: "tx_errors_in_progress_downloads", + Help: "number of concurrently running transaction error messages download operations", + }) + + downloadRetries := promauto.NewCounter(prometheus.CounterOpts{ + Namespace: namespaceAccess, + Subsystem: subsystemTxErrorFetcher, + Name: "tx_errors_download_retries_total", + Help: "number of transaction error messages download retries", + }) + + failedDownloads := promauto.NewCounter(prometheus.CounterOpts{ + Namespace: namespaceAccess, + Subsystem: subsystemTxErrorFetcher, + Name: "tx_errors_failed_downloads_total", + Help: "number of failed transaction error messages downloads", + }) + + return &TransactionErrorMessagesCollector{ + fetchDuration: fetchDuration, + highestIndexedHeight: highestIndexedHeight, + downloadsInProgress: downloadsInProgress, + downloadRetries: downloadRetries, + failedDownloads: failedDownloads, + } +} + +func (c *TransactionErrorMessagesCollector) TxErrorsInitialHeight(height uint64) { + c.highestIndexedHeight.Set(float64(height)) +} + +// TxErrorsFetchStarted records that a transaction error messages download has started. +func (c *TransactionErrorMessagesCollector) TxErrorsFetchStarted() { + c.downloadsInProgress.Inc() +} + +// TxErrorsFetchFinished records that a transaction error messages download has finished. +// Pass the highest consecutive height to ensure the metrics reflect the height up to which the +// requester has completed downloads. This allows us to easily see when downloading gets stuck. +func (c *TransactionErrorMessagesCollector) TxErrorsFetchFinished(duration time.Duration, success bool, height uint64) { + c.downloadsInProgress.Dec() + c.fetchDuration.Observe(float64(duration.Milliseconds())) + if success { + c.highestIndexedHeight.Set(float64(height)) + } else { + c.failedDownloads.Inc() + } +} + +// TxErrorsFetchRetried records that a transaction error messages download has been retried. +func (c *TransactionErrorMessagesCollector) TxErrorsFetchRetried() { + c.downloadRetries.Inc() +} diff --git a/module/metrics/transaction_validation.go b/module/metrics/transaction_validation.go new file mode 100644 index 00000000000..70cfe2d7a6a --- /dev/null +++ b/module/metrics/transaction_validation.go @@ -0,0 +1,59 @@ +package metrics + +import ( + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" + + "github.com/onflow/flow-go/module" +) + +type NamespaceType string + +// TransactionValidationCollector the metrics for transaction validation functionality +type TransactionValidationCollector struct { + transactionValidated prometheus.Counter + transactionValidationSkipped prometheus.Counter + transactionValidationFailed *prometheus.CounterVec +} + +// interface check +var _ module.TransactionValidationMetrics = (*TransactionValidationCollector)(nil) + +// NewTransactionValidationCollector creates new instance of TransactionValidationCollector +func NewTransactionValidationCollector() *TransactionValidationCollector { + return &TransactionValidationCollector{ + transactionValidated: promauto.NewCounter(prometheus.CounterOpts{ + Name: "transaction_validation_successes_total", + Namespace: namespaceAccess, + Subsystem: subsystemTransactionValidation, + Help: "counter for the validated transactions", + }), + transactionValidationSkipped: promauto.NewCounter(prometheus.CounterOpts{ + Name: "transaction_validation_skipped_total", + Namespace: namespaceAccess, + Subsystem: subsystemTransactionValidation, + Help: "counter for the skipped transaction validations", + }), + transactionValidationFailed: promauto.NewCounterVec(prometheus.CounterOpts{ + Name: "transaction_validation_failed_total", + Namespace: namespaceAccess, + Subsystem: subsystemTransactionValidation, + Help: "counter for the failed transactions validation", + }, []string{"reason"}), + } +} + +// TransactionValidated tracks number of successfully validated transactions +func (tc *TransactionValidationCollector) TransactionValidated() { + tc.transactionValidated.Inc() +} + +// TransactionValidationFailed tracks number of validation failed transactions with reason +func (tc *TransactionValidationCollector) TransactionValidationFailed(reason string) { + tc.transactionValidationFailed.WithLabelValues(reason).Inc() +} + +// TransactionValidationSkipped tracks number of skipped transaction validations +func (tc *TransactionValidationCollector) TransactionValidationSkipped() { + tc.transactionValidationSkipped.Inc() +} diff --git a/module/metrics/unicast_manager.go b/module/metrics/unicast_manager.go index f790996d490..4f1ef04ec52 100644 --- a/module/metrics/unicast_manager.go +++ b/module/metrics/unicast_manager.go @@ -23,6 +23,14 @@ type UnicastManagerMetrics struct { createStreamOnConnRetries *prometheus.HistogramVec // Tracks the time it takes to create the stream after peer dialing completes and a connection is established. createStreamOnConnTime *prometheus.HistogramVec + // Tracks the history of the stream retry budget updates. + streamRetryBudgetUpdates prometheus.Histogram + // Tracks the history of the dial retry budget updates. + dialRetryBudgetUpdates prometheus.Histogram + // Tracks the number of times the dial retry budget is reset to default. + dialRetryBudgetResetToDefault prometheus.Counter + // Tracks the number of times the stream creation retry budget is reset to default. + streamCreationRetryBudgetResetToDefault prometheus.Counter prefix string } @@ -92,6 +100,42 @@ func NewUnicastManagerMetrics(prefix string) *UnicastManagerMetrics { }, []string{LabelSuccess}, ) + uc.streamRetryBudgetUpdates = prometheus.NewHistogram( + prometheus.HistogramOpts{ + Namespace: namespaceNetwork, + Subsystem: subsystemGossip, + Name: uc.prefix + "stream_creation_retry_budget", + Help: "the history of the stream retry budget updates", + Buckets: []float64{1, 2, 3, 4, 5, 10}, + }, + ) + + uc.dialRetryBudgetUpdates = prometheus.NewHistogram( + prometheus.HistogramOpts{ + Namespace: namespaceNetwork, + Subsystem: subsystemGossip, + Name: uc.prefix + "dial_retry_budget", + Help: "the history of the dial retry budget updates", + Buckets: []float64{1, 2, 3, 4, 5, 10}, + }, + ) + + uc.streamCreationRetryBudgetResetToDefault = promauto.NewCounter( + prometheus.CounterOpts{ + Namespace: namespaceNetwork, + Subsystem: subsystemGossip, + Name: uc.prefix + "stream_creation_retry_budget_reset_to_default_total", + Help: "the number of times the stream creation retry budget is reset to default by the unicast manager", + }) + + uc.dialRetryBudgetResetToDefault = promauto.NewCounter( + prometheus.CounterOpts{ + Namespace: namespaceNetwork, + Subsystem: subsystemGossip, + Name: uc.prefix + "dial_retry_budget_reset_to_default_total", + Help: "the number of times the dial retry budget is reset to default by the unicast manager", + }) + return uc } @@ -134,3 +178,23 @@ func (u *UnicastManagerMetrics) OnEstablishStreamFailure(duration time.Duration, u.createStreamOnConnRetries.WithLabelValues("false").Observe(float64(attempts)) u.createStreamOnConnTime.WithLabelValues("false").Observe(duration.Seconds()) } + +// OnStreamCreationRetryBudgetUpdated tracks the history of the stream creation retry budget updates. +func (u *UnicastManagerMetrics) OnStreamCreationRetryBudgetUpdated(budget uint64) { + u.dialRetryBudgetUpdates.Observe(float64(budget)) +} + +// OnDialRetryBudgetUpdated tracks the history of the dial retry budget updates. +func (u *UnicastManagerMetrics) OnDialRetryBudgetUpdated(budget uint64) { + u.streamRetryBudgetUpdates.Observe(float64(budget)) +} + +// OnDialRetryBudgetResetToDefault tracks the number of times the dial retry budget is reset to default. +func (u *UnicastManagerMetrics) OnDialRetryBudgetResetToDefault() { + u.dialRetryBudgetResetToDefault.Inc() +} + +// OnStreamCreationRetryBudgetResetToDefault tracks the number of times the stream creation retry budget is reset to default. +func (u *UnicastManagerMetrics) OnStreamCreationRetryBudgetResetToDefault() { + u.streamCreationRetryBudgetResetToDefault.Inc() +} diff --git a/module/mock/access_metrics.go b/module/mock/access_metrics.go index c6e25585e6a..47145fd21b4 100644 --- a/module/mock/access_metrics.go +++ b/module/mock/access_metrics.go @@ -1,56 +1,184 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mock -import mock "github.com/stretchr/testify/mock" +import ( + context "context" + + flow "github.com/onflow/flow-go/model/flow" + metrics "github.com/slok/go-http-metrics/metrics" + + mock "github.com/stretchr/testify/mock" + + time "time" +) // AccessMetrics is an autogenerated mock type for the AccessMetrics type type AccessMetrics struct { mock.Mock } -// ConnectionAddedToPool provides a mock function with given fields: +// AddInflightRequests provides a mock function with given fields: ctx, props, quantity +func (_m *AccessMetrics) AddInflightRequests(ctx context.Context, props metrics.HTTPProperties, quantity int) { + _m.Called(ctx, props, quantity) +} + +// AddTotalRequests provides a mock function with given fields: ctx, method, routeName +func (_m *AccessMetrics) AddTotalRequests(ctx context.Context, method string, routeName string) { + _m.Called(ctx, method, routeName) +} + +// ConnectionAddedToPool provides a mock function with no fields func (_m *AccessMetrics) ConnectionAddedToPool() { _m.Called() } -// ConnectionFromPoolEvicted provides a mock function with given fields: +// ConnectionFromPoolEvicted provides a mock function with no fields func (_m *AccessMetrics) ConnectionFromPoolEvicted() { _m.Called() } -// ConnectionFromPoolInvalidated provides a mock function with given fields: +// ConnectionFromPoolInvalidated provides a mock function with no fields func (_m *AccessMetrics) ConnectionFromPoolInvalidated() { _m.Called() } -// ConnectionFromPoolReused provides a mock function with given fields: +// ConnectionFromPoolReused provides a mock function with no fields func (_m *AccessMetrics) ConnectionFromPoolReused() { _m.Called() } -// ConnectionFromPoolUpdated provides a mock function with given fields: +// ConnectionFromPoolUpdated provides a mock function with no fields func (_m *AccessMetrics) ConnectionFromPoolUpdated() { _m.Called() } -// NewConnectionEstablished provides a mock function with given fields: +// NewConnectionEstablished provides a mock function with no fields func (_m *AccessMetrics) NewConnectionEstablished() { _m.Called() } +// ObserveHTTPRequestDuration provides a mock function with given fields: ctx, props, duration +func (_m *AccessMetrics) ObserveHTTPRequestDuration(ctx context.Context, props metrics.HTTPReqProperties, duration time.Duration) { + _m.Called(ctx, props, duration) +} + +// ObserveHTTPResponseSize provides a mock function with given fields: ctx, props, sizeBytes +func (_m *AccessMetrics) ObserveHTTPResponseSize(ctx context.Context, props metrics.HTTPReqProperties, sizeBytes int64) { + _m.Called(ctx, props, sizeBytes) +} + +// ScriptExecuted provides a mock function with given fields: dur, size +func (_m *AccessMetrics) ScriptExecuted(dur time.Duration, size int) { + _m.Called(dur, size) +} + +// ScriptExecutionErrorLocal provides a mock function with no fields +func (_m *AccessMetrics) ScriptExecutionErrorLocal() { + _m.Called() +} + +// ScriptExecutionErrorMatch provides a mock function with no fields +func (_m *AccessMetrics) ScriptExecutionErrorMatch() { + _m.Called() +} + +// ScriptExecutionErrorMismatch provides a mock function with no fields +func (_m *AccessMetrics) ScriptExecutionErrorMismatch() { + _m.Called() +} + +// ScriptExecutionErrorOnExecutionNode provides a mock function with no fields +func (_m *AccessMetrics) ScriptExecutionErrorOnExecutionNode() { + _m.Called() +} + +// ScriptExecutionNotIndexed provides a mock function with no fields +func (_m *AccessMetrics) ScriptExecutionNotIndexed() { + _m.Called() +} + +// ScriptExecutionResultMatch provides a mock function with no fields +func (_m *AccessMetrics) ScriptExecutionResultMatch() { + _m.Called() +} + +// ScriptExecutionResultMismatch provides a mock function with no fields +func (_m *AccessMetrics) ScriptExecutionResultMismatch() { + _m.Called() +} + // TotalConnectionsInPool provides a mock function with given fields: connectionCount, connectionPoolSize func (_m *AccessMetrics) TotalConnectionsInPool(connectionCount uint, connectionPoolSize uint) { _m.Called(connectionCount, connectionPoolSize) } -type mockConstructorTestingTNewAccessMetrics interface { - mock.TestingT - Cleanup(func()) +// TransactionExecuted provides a mock function with given fields: txID, when +func (_m *AccessMetrics) TransactionExecuted(txID flow.Identifier, when time.Time) { + _m.Called(txID, when) +} + +// TransactionExpired provides a mock function with given fields: txID +func (_m *AccessMetrics) TransactionExpired(txID flow.Identifier) { + _m.Called(txID) +} + +// TransactionFinalized provides a mock function with given fields: txID, when +func (_m *AccessMetrics) TransactionFinalized(txID flow.Identifier, when time.Time) { + _m.Called(txID, when) +} + +// TransactionReceived provides a mock function with given fields: txID, when +func (_m *AccessMetrics) TransactionReceived(txID flow.Identifier, when time.Time) { + _m.Called(txID, when) +} + +// TransactionResultFetched provides a mock function with given fields: dur, size +func (_m *AccessMetrics) TransactionResultFetched(dur time.Duration, size int) { + _m.Called(dur, size) +} + +// TransactionSealed provides a mock function with given fields: txID, when +func (_m *AccessMetrics) TransactionSealed(txID flow.Identifier, when time.Time) { + _m.Called(txID, when) +} + +// TransactionSubmissionFailed provides a mock function with no fields +func (_m *AccessMetrics) TransactionSubmissionFailed() { + _m.Called() +} + +// TransactionValidated provides a mock function with no fields +func (_m *AccessMetrics) TransactionValidated() { + _m.Called() +} + +// TransactionValidationFailed provides a mock function with given fields: reason +func (_m *AccessMetrics) TransactionValidationFailed(reason string) { + _m.Called(reason) +} + +// TransactionValidationSkipped provides a mock function with no fields +func (_m *AccessMetrics) TransactionValidationSkipped() { + _m.Called() +} + +// UpdateExecutionReceiptMaxHeight provides a mock function with given fields: height +func (_m *AccessMetrics) UpdateExecutionReceiptMaxHeight(height uint64) { + _m.Called(height) +} + +// UpdateLastFullBlockHeight provides a mock function with given fields: height +func (_m *AccessMetrics) UpdateLastFullBlockHeight(height uint64) { + _m.Called(height) } // NewAccessMetrics creates a new instance of AccessMetrics. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewAccessMetrics(t mockConstructorTestingTNewAccessMetrics) *AccessMetrics { +// The first argument is typically a *testing.T value. +func NewAccessMetrics(t interface { + mock.TestingT + Cleanup(func()) +}) *AccessMetrics { mock := &AccessMetrics{} mock.Mock.Test(t) diff --git a/module/mock/alsp_metrics.go b/module/mock/alsp_metrics.go index 937a210d61a..0c202466bed 100644 --- a/module/mock/alsp_metrics.go +++ b/module/mock/alsp_metrics.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mock @@ -14,13 +14,12 @@ func (_m *AlspMetrics) OnMisbehaviorReported(channel string, misbehaviorType str _m.Called(channel, misbehaviorType) } -type mockConstructorTestingTNewAlspMetrics interface { +// NewAlspMetrics creates a new instance of AlspMetrics. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewAlspMetrics(t interface { mock.TestingT Cleanup(func()) -} - -// NewAlspMetrics creates a new instance of AlspMetrics. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewAlspMetrics(t mockConstructorTestingTNewAlspMetrics) *AlspMetrics { +}) *AlspMetrics { mock := &AlspMetrics{} mock.Mock.Test(t) diff --git a/module/mock/backend_scripts_metrics.go b/module/mock/backend_scripts_metrics.go index c2d30cea955..0b981dd282e 100644 --- a/module/mock/backend_scripts_metrics.go +++ b/module/mock/backend_scripts_metrics.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mock @@ -18,13 +18,47 @@ func (_m *BackendScriptsMetrics) ScriptExecuted(dur time.Duration, size int) { _m.Called(dur, size) } -type mockConstructorTestingTNewBackendScriptsMetrics interface { - mock.TestingT - Cleanup(func()) +// ScriptExecutionErrorLocal provides a mock function with no fields +func (_m *BackendScriptsMetrics) ScriptExecutionErrorLocal() { + _m.Called() +} + +// ScriptExecutionErrorMatch provides a mock function with no fields +func (_m *BackendScriptsMetrics) ScriptExecutionErrorMatch() { + _m.Called() +} + +// ScriptExecutionErrorMismatch provides a mock function with no fields +func (_m *BackendScriptsMetrics) ScriptExecutionErrorMismatch() { + _m.Called() +} + +// ScriptExecutionErrorOnExecutionNode provides a mock function with no fields +func (_m *BackendScriptsMetrics) ScriptExecutionErrorOnExecutionNode() { + _m.Called() +} + +// ScriptExecutionNotIndexed provides a mock function with no fields +func (_m *BackendScriptsMetrics) ScriptExecutionNotIndexed() { + _m.Called() +} + +// ScriptExecutionResultMatch provides a mock function with no fields +func (_m *BackendScriptsMetrics) ScriptExecutionResultMatch() { + _m.Called() +} + +// ScriptExecutionResultMismatch provides a mock function with no fields +func (_m *BackendScriptsMetrics) ScriptExecutionResultMismatch() { + _m.Called() } // NewBackendScriptsMetrics creates a new instance of BackendScriptsMetrics. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewBackendScriptsMetrics(t mockConstructorTestingTNewBackendScriptsMetrics) *BackendScriptsMetrics { +// The first argument is typically a *testing.T value. +func NewBackendScriptsMetrics(t interface { + mock.TestingT + Cleanup(func()) +}) *BackendScriptsMetrics { mock := &BackendScriptsMetrics{} mock.Mock.Test(t) diff --git a/module/mock/bitswap_metrics.go b/module/mock/bitswap_metrics.go index 146a3398144..acaaefc1362 100644 --- a/module/mock/bitswap_metrics.go +++ b/module/mock/bitswap_metrics.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mock @@ -54,13 +54,12 @@ func (_m *BitswapMetrics) Wantlist(prefix string, n int) { _m.Called(prefix, n) } -type mockConstructorTestingTNewBitswapMetrics interface { +// NewBitswapMetrics creates a new instance of BitswapMetrics. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewBitswapMetrics(t interface { mock.TestingT Cleanup(func()) -} - -// NewBitswapMetrics creates a new instance of BitswapMetrics. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewBitswapMetrics(t mockConstructorTestingTNewBitswapMetrics) *BitswapMetrics { +}) *BitswapMetrics { mock := &BitswapMetrics{} mock.Mock.Test(t) diff --git a/module/mock/block_iterator.go b/module/mock/block_iterator.go new file mode 100644 index 00000000000..1efa21af3c7 --- /dev/null +++ b/module/mock/block_iterator.go @@ -0,0 +1,127 @@ +// Code generated by mockery. DO NOT EDIT. + +package mock + +import ( + flow "github.com/onflow/flow-go/model/flow" + mock "github.com/stretchr/testify/mock" +) + +// BlockIterator is an autogenerated mock type for the BlockIterator type +type BlockIterator struct { + mock.Mock +} + +// Checkpoint provides a mock function with no fields +func (_m *BlockIterator) Checkpoint() (uint64, error) { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Checkpoint") + } + + var r0 uint64 + var r1 error + if rf, ok := ret.Get(0).(func() (uint64, error)); ok { + return rf() + } + if rf, ok := ret.Get(0).(func() uint64); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(uint64) + } + + if rf, ok := ret.Get(1).(func() error); ok { + r1 = rf() + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Next provides a mock function with no fields +func (_m *BlockIterator) Next() (flow.Identifier, bool, error) { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Next") + } + + var r0 flow.Identifier + var r1 bool + var r2 error + if rf, ok := ret.Get(0).(func() (flow.Identifier, bool, error)); ok { + return rf() + } + if rf, ok := ret.Get(0).(func() flow.Identifier); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(flow.Identifier) + } + } + + if rf, ok := ret.Get(1).(func() bool); ok { + r1 = rf() + } else { + r1 = ret.Get(1).(bool) + } + + if rf, ok := ret.Get(2).(func() error); ok { + r2 = rf() + } else { + r2 = ret.Error(2) + } + + return r0, r1, r2 +} + +// Progress provides a mock function with no fields +func (_m *BlockIterator) Progress() (uint64, uint64, uint64) { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Progress") + } + + var r0 uint64 + var r1 uint64 + var r2 uint64 + if rf, ok := ret.Get(0).(func() (uint64, uint64, uint64)); ok { + return rf() + } + if rf, ok := ret.Get(0).(func() uint64); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(uint64) + } + + if rf, ok := ret.Get(1).(func() uint64); ok { + r1 = rf() + } else { + r1 = ret.Get(1).(uint64) + } + + if rf, ok := ret.Get(2).(func() uint64); ok { + r2 = rf() + } else { + r2 = ret.Get(2).(uint64) + } + + return r0, r1, r2 +} + +// NewBlockIterator creates a new instance of BlockIterator. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewBlockIterator(t interface { + mock.TestingT + Cleanup(func()) +}) *BlockIterator { + mock := &BlockIterator{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/module/mock/block_requester.go b/module/mock/block_requester.go index f877a2fcdb0..32adf7b6093 100644 --- a/module/mock/block_requester.go +++ b/module/mock/block_requester.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mock @@ -27,13 +27,12 @@ func (_m *BlockRequester) RequestHeight(height uint64) { _m.Called(height) } -type mockConstructorTestingTNewBlockRequester interface { +// NewBlockRequester creates a new instance of BlockRequester. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewBlockRequester(t interface { mock.TestingT Cleanup(func()) -} - -// NewBlockRequester creates a new instance of BlockRequester. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewBlockRequester(t mockConstructorTestingTNewBlockRequester) *BlockRequester { +}) *BlockRequester { mock := &BlockRequester{} mock.Mock.Test(t) diff --git a/module/mock/builder.go b/module/mock/builder.go index ad65271ddd7..3e79501103d 100644 --- a/module/mock/builder.go +++ b/module/mock/builder.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mock @@ -12,25 +12,29 @@ type Builder struct { mock.Mock } -// BuildOn provides a mock function with given fields: parentID, setter -func (_m *Builder) BuildOn(parentID flow.Identifier, setter func(*flow.Header) error) (*flow.Header, error) { - ret := _m.Called(parentID, setter) +// BuildOn provides a mock function with given fields: parentID, setter, sign +func (_m *Builder) BuildOn(parentID flow.Identifier, setter func(*flow.HeaderBodyBuilder) error, sign func(*flow.Header) ([]byte, error)) (*flow.ProposalHeader, error) { + ret := _m.Called(parentID, setter, sign) - var r0 *flow.Header + if len(ret) == 0 { + panic("no return value specified for BuildOn") + } + + var r0 *flow.ProposalHeader var r1 error - if rf, ok := ret.Get(0).(func(flow.Identifier, func(*flow.Header) error) (*flow.Header, error)); ok { - return rf(parentID, setter) + if rf, ok := ret.Get(0).(func(flow.Identifier, func(*flow.HeaderBodyBuilder) error, func(*flow.Header) ([]byte, error)) (*flow.ProposalHeader, error)); ok { + return rf(parentID, setter, sign) } - if rf, ok := ret.Get(0).(func(flow.Identifier, func(*flow.Header) error) *flow.Header); ok { - r0 = rf(parentID, setter) + if rf, ok := ret.Get(0).(func(flow.Identifier, func(*flow.HeaderBodyBuilder) error, func(*flow.Header) ([]byte, error)) *flow.ProposalHeader); ok { + r0 = rf(parentID, setter, sign) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(*flow.Header) + r0 = ret.Get(0).(*flow.ProposalHeader) } } - if rf, ok := ret.Get(1).(func(flow.Identifier, func(*flow.Header) error) error); ok { - r1 = rf(parentID, setter) + if rf, ok := ret.Get(1).(func(flow.Identifier, func(*flow.HeaderBodyBuilder) error, func(*flow.Header) ([]byte, error)) error); ok { + r1 = rf(parentID, setter, sign) } else { r1 = ret.Error(1) } @@ -38,13 +42,12 @@ func (_m *Builder) BuildOn(parentID flow.Identifier, setter func(*flow.Header) e return r0, r1 } -type mockConstructorTestingTNewBuilder interface { +// NewBuilder creates a new instance of Builder. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewBuilder(t interface { mock.TestingT Cleanup(func()) -} - -// NewBuilder creates a new instance of Builder. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewBuilder(t mockConstructorTestingTNewBuilder) *Builder { +}) *Builder { mock := &Builder{} mock.Mock.Test(t) diff --git a/module/mock/cache_metrics.go b/module/mock/cache_metrics.go index 035f136bddc..4c14a03f9f7 100644 --- a/module/mock/cache_metrics.go +++ b/module/mock/cache_metrics.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mock @@ -29,13 +29,12 @@ func (_m *CacheMetrics) CacheNotFound(resource string) { _m.Called(resource) } -type mockConstructorTestingTNewCacheMetrics interface { +// NewCacheMetrics creates a new instance of CacheMetrics. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewCacheMetrics(t interface { mock.TestingT Cleanup(func()) -} - -// NewCacheMetrics creates a new instance of CacheMetrics. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewCacheMetrics(t mockConstructorTestingTNewCacheMetrics) *CacheMetrics { +}) *CacheMetrics { mock := &CacheMetrics{} mock.Mock.Test(t) diff --git a/module/mock/chain_sync_metrics.go b/module/mock/chain_sync_metrics.go index 47b2192ddb9..8f57a1ffe71 100644 --- a/module/mock/chain_sync_metrics.go +++ b/module/mock/chain_sync_metrics.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mock @@ -37,13 +37,12 @@ func (_m *ChainSyncMetrics) RangeRequested(ran chainsync.Range) { _m.Called(ran) } -type mockConstructorTestingTNewChainSyncMetrics interface { +// NewChainSyncMetrics creates a new instance of ChainSyncMetrics. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewChainSyncMetrics(t interface { mock.TestingT Cleanup(func()) -} - -// NewChainSyncMetrics creates a new instance of ChainSyncMetrics. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewChainSyncMetrics(t mockConstructorTestingTNewChainSyncMetrics) *ChainSyncMetrics { +}) *ChainSyncMetrics { mock := &ChainSyncMetrics{} mock.Mock.Test(t) diff --git a/module/mock/chunk_assigner.go b/module/mock/chunk_assigner.go index 3acd354caf9..91fd50d04cd 100644 --- a/module/mock/chunk_assigner.go +++ b/module/mock/chunk_assigner.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mock @@ -18,6 +18,10 @@ type ChunkAssigner struct { func (_m *ChunkAssigner) Assign(result *flow.ExecutionResult, blockID flow.Identifier) (*chunks.Assignment, error) { ret := _m.Called(result, blockID) + if len(ret) == 0 { + panic("no return value specified for Assign") + } + var r0 *chunks.Assignment var r1 error if rf, ok := ret.Get(0).(func(*flow.ExecutionResult, flow.Identifier) (*chunks.Assignment, error)); ok { @@ -40,13 +44,12 @@ func (_m *ChunkAssigner) Assign(result *flow.ExecutionResult, blockID flow.Ident return r0, r1 } -type mockConstructorTestingTNewChunkAssigner interface { +// NewChunkAssigner creates a new instance of ChunkAssigner. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewChunkAssigner(t interface { mock.TestingT Cleanup(func()) -} - -// NewChunkAssigner creates a new instance of ChunkAssigner. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewChunkAssigner(t mockConstructorTestingTNewChunkAssigner) *ChunkAssigner { +}) *ChunkAssigner { mock := &ChunkAssigner{} mock.Mock.Test(t) diff --git a/module/mock/chunk_verifier.go b/module/mock/chunk_verifier.go index 0e3b163980d..7eb3eef557d 100644 --- a/module/mock/chunk_verifier.go +++ b/module/mock/chunk_verifier.go @@ -1,9 +1,8 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mock import ( - chunks "github.com/onflow/flow-go/model/chunks" mock "github.com/stretchr/testify/mock" verification "github.com/onflow/flow-go/model/verification" @@ -15,13 +14,16 @@ type ChunkVerifier struct { } // Verify provides a mock function with given fields: ch -func (_m *ChunkVerifier) Verify(ch *verification.VerifiableChunkData) ([]byte, chunks.ChunkFault, error) { +func (_m *ChunkVerifier) Verify(ch *verification.VerifiableChunkData) ([]byte, error) { ret := _m.Called(ch) + if len(ret) == 0 { + panic("no return value specified for Verify") + } + var r0 []byte - var r1 chunks.ChunkFault - var r2 error - if rf, ok := ret.Get(0).(func(*verification.VerifiableChunkData) ([]byte, chunks.ChunkFault, error)); ok { + var r1 error + if rf, ok := ret.Get(0).(func(*verification.VerifiableChunkData) ([]byte, error)); ok { return rf(ch) } if rf, ok := ret.Get(0).(func(*verification.VerifiableChunkData) []byte); ok { @@ -32,30 +34,21 @@ func (_m *ChunkVerifier) Verify(ch *verification.VerifiableChunkData) ([]byte, c } } - if rf, ok := ret.Get(1).(func(*verification.VerifiableChunkData) chunks.ChunkFault); ok { + if rf, ok := ret.Get(1).(func(*verification.VerifiableChunkData) error); ok { r1 = rf(ch) } else { - if ret.Get(1) != nil { - r1 = ret.Get(1).(chunks.ChunkFault) - } - } - - if rf, ok := ret.Get(2).(func(*verification.VerifiableChunkData) error); ok { - r2 = rf(ch) - } else { - r2 = ret.Error(2) + r1 = ret.Error(1) } - return r0, r1, r2 + return r0, r1 } -type mockConstructorTestingTNewChunkVerifier interface { +// NewChunkVerifier creates a new instance of ChunkVerifier. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewChunkVerifier(t interface { mock.TestingT Cleanup(func()) -} - -// NewChunkVerifier creates a new instance of ChunkVerifier. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewChunkVerifier(t mockConstructorTestingTNewChunkVerifier) *ChunkVerifier { +}) *ChunkVerifier { mock := &ChunkVerifier{} mock.Mock.Test(t) diff --git a/module/mock/cleaner_metrics.go b/module/mock/cleaner_metrics.go index ad42918506e..ad8b75c392f 100644 --- a/module/mock/cleaner_metrics.go +++ b/module/mock/cleaner_metrics.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mock @@ -18,13 +18,12 @@ func (_m *CleanerMetrics) RanGC(took time.Duration) { _m.Called(took) } -type mockConstructorTestingTNewCleanerMetrics interface { +// NewCleanerMetrics creates a new instance of CleanerMetrics. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewCleanerMetrics(t interface { mock.TestingT Cleanup(func()) -} - -// NewCleanerMetrics creates a new instance of CleanerMetrics. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewCleanerMetrics(t mockConstructorTestingTNewCleanerMetrics) *CleanerMetrics { +}) *CleanerMetrics { mock := &CleanerMetrics{} mock.Mock.Test(t) diff --git a/module/mock/cluster_root_qc_voter.go b/module/mock/cluster_root_qc_voter.go index a2b709459af..6a4292094a9 100644 --- a/module/mock/cluster_root_qc_voter.go +++ b/module/mock/cluster_root_qc_voter.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mock @@ -16,11 +16,15 @@ type ClusterRootQCVoter struct { } // Vote provides a mock function with given fields: _a0, _a1 -func (_m *ClusterRootQCVoter) Vote(_a0 context.Context, _a1 protocol.Epoch) error { +func (_m *ClusterRootQCVoter) Vote(_a0 context.Context, _a1 protocol.TentativeEpoch) error { ret := _m.Called(_a0, _a1) + if len(ret) == 0 { + panic("no return value specified for Vote") + } + var r0 error - if rf, ok := ret.Get(0).(func(context.Context, protocol.Epoch) error); ok { + if rf, ok := ret.Get(0).(func(context.Context, protocol.TentativeEpoch) error); ok { r0 = rf(_a0, _a1) } else { r0 = ret.Error(0) @@ -29,13 +33,12 @@ func (_m *ClusterRootQCVoter) Vote(_a0 context.Context, _a1 protocol.Epoch) erro return r0 } -type mockConstructorTestingTNewClusterRootQCVoter interface { +// NewClusterRootQCVoter creates a new instance of ClusterRootQCVoter. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewClusterRootQCVoter(t interface { mock.TestingT Cleanup(func()) -} - -// NewClusterRootQCVoter creates a new instance of ClusterRootQCVoter. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewClusterRootQCVoter(t mockConstructorTestingTNewClusterRootQCVoter) *ClusterRootQCVoter { +}) *ClusterRootQCVoter { mock := &ClusterRootQCVoter{} mock.Mock.Test(t) diff --git a/module/mock/collection_executed_metric.go b/module/mock/collection_executed_metric.go new file mode 100644 index 00000000000..aedfc4f4300 --- /dev/null +++ b/module/mock/collection_executed_metric.go @@ -0,0 +1,52 @@ +// Code generated by mockery. DO NOT EDIT. + +package mock + +import ( + flow "github.com/onflow/flow-go/model/flow" + mock "github.com/stretchr/testify/mock" +) + +// CollectionExecutedMetric is an autogenerated mock type for the CollectionExecutedMetric type +type CollectionExecutedMetric struct { + mock.Mock +} + +// BlockFinalized provides a mock function with given fields: block +func (_m *CollectionExecutedMetric) BlockFinalized(block *flow.Block) { + _m.Called(block) +} + +// CollectionExecuted provides a mock function with given fields: light +func (_m *CollectionExecutedMetric) CollectionExecuted(light *flow.LightCollection) { + _m.Called(light) +} + +// CollectionFinalized provides a mock function with given fields: light +func (_m *CollectionExecutedMetric) CollectionFinalized(light *flow.LightCollection) { + _m.Called(light) +} + +// ExecutionReceiptReceived provides a mock function with given fields: r +func (_m *CollectionExecutedMetric) ExecutionReceiptReceived(r *flow.ExecutionReceipt) { + _m.Called(r) +} + +// UpdateLastFullBlockHeight provides a mock function with given fields: height +func (_m *CollectionExecutedMetric) UpdateLastFullBlockHeight(height uint64) { + _m.Called(height) +} + +// NewCollectionExecutedMetric creates a new instance of CollectionExecutedMetric. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewCollectionExecutedMetric(t interface { + mock.TestingT + Cleanup(func()) +}) *CollectionExecutedMetric { + mock := &CollectionExecutedMetric{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/module/mock/collection_metrics.go b/module/mock/collection_metrics.go index 3d1e0da64b6..1ed0c97621a 100644 --- a/module/mock/collection_metrics.go +++ b/module/mock/collection_metrics.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mock @@ -24,18 +24,37 @@ func (_m *CollectionMetrics) ClusterBlockProposed(block *cluster.Block) { _m.Called(block) } +// CollectionMaxSize provides a mock function with given fields: size +func (_m *CollectionMetrics) CollectionMaxSize(size uint) { + _m.Called(size) +} + // TransactionIngested provides a mock function with given fields: txID func (_m *CollectionMetrics) TransactionIngested(txID flow.Identifier) { _m.Called(txID) } -type mockConstructorTestingTNewCollectionMetrics interface { - mock.TestingT - Cleanup(func()) +// TransactionValidated provides a mock function with no fields +func (_m *CollectionMetrics) TransactionValidated() { + _m.Called() +} + +// TransactionValidationFailed provides a mock function with given fields: reason +func (_m *CollectionMetrics) TransactionValidationFailed(reason string) { + _m.Called(reason) +} + +// TransactionValidationSkipped provides a mock function with no fields +func (_m *CollectionMetrics) TransactionValidationSkipped() { + _m.Called() } // NewCollectionMetrics creates a new instance of CollectionMetrics. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewCollectionMetrics(t mockConstructorTestingTNewCollectionMetrics) *CollectionMetrics { +// The first argument is typically a *testing.T value. +func NewCollectionMetrics(t interface { + mock.TestingT + Cleanup(func()) +}) *CollectionMetrics { mock := &CollectionMetrics{} mock.Mock.Test(t) diff --git a/module/mock/compliance_metrics.go b/module/mock/compliance_metrics.go index 545394518a3..86bcf0a0676 100644 --- a/module/mock/compliance_metrics.go +++ b/module/mock/compliance_metrics.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mock @@ -22,24 +22,9 @@ func (_m *ComplianceMetrics) BlockSealed(_a0 *flow.Block) { _m.Called(_a0) } -// CommittedEpochFinalView provides a mock function with given fields: view -func (_m *ComplianceMetrics) CommittedEpochFinalView(view uint64) { - _m.Called(view) -} - -// CurrentDKGPhase1FinalView provides a mock function with given fields: view -func (_m *ComplianceMetrics) CurrentDKGPhase1FinalView(view uint64) { - _m.Called(view) -} - -// CurrentDKGPhase2FinalView provides a mock function with given fields: view -func (_m *ComplianceMetrics) CurrentDKGPhase2FinalView(view uint64) { - _m.Called(view) -} - -// CurrentDKGPhase3FinalView provides a mock function with given fields: view -func (_m *ComplianceMetrics) CurrentDKGPhase3FinalView(view uint64) { - _m.Called(view) +// CurrentDKGPhaseViews provides a mock function with given fields: phase1FinalView, phase2FinalView, phase3FinalView +func (_m *ComplianceMetrics) CurrentDKGPhaseViews(phase1FinalView uint64, phase2FinalView uint64, phase3FinalView uint64) { + _m.Called(phase1FinalView, phase2FinalView, phase3FinalView) } // CurrentEpochCounter provides a mock function with given fields: counter @@ -57,8 +42,13 @@ func (_m *ComplianceMetrics) CurrentEpochPhase(phase flow.EpochPhase) { _m.Called(phase) } -// EpochEmergencyFallbackTriggered provides a mock function with given fields: -func (_m *ComplianceMetrics) EpochEmergencyFallbackTriggered() { +// EpochFallbackModeExited provides a mock function with no fields +func (_m *ComplianceMetrics) EpochFallbackModeExited() { + _m.Called() +} + +// EpochFallbackModeTriggered provides a mock function with no fields +func (_m *ComplianceMetrics) EpochFallbackModeTriggered() { _m.Called() } @@ -72,18 +62,22 @@ func (_m *ComplianceMetrics) FinalizedHeight(height uint64) { _m.Called(height) } +// ProtocolStateVersion provides a mock function with given fields: version +func (_m *ComplianceMetrics) ProtocolStateVersion(version uint64) { + _m.Called(version) +} + // SealedHeight provides a mock function with given fields: height func (_m *ComplianceMetrics) SealedHeight(height uint64) { _m.Called(height) } -type mockConstructorTestingTNewComplianceMetrics interface { +// NewComplianceMetrics creates a new instance of ComplianceMetrics. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewComplianceMetrics(t interface { mock.TestingT Cleanup(func()) -} - -// NewComplianceMetrics creates a new instance of ComplianceMetrics. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewComplianceMetrics(t mockConstructorTestingTNewComplianceMetrics) *ComplianceMetrics { +}) *ComplianceMetrics { mock := &ComplianceMetrics{} mock.Mock.Test(t) diff --git a/module/mock/consensus_metrics.go b/module/mock/consensus_metrics.go index 776b8d7315c..a6588cbdf28 100644 --- a/module/mock/consensus_metrics.go +++ b/module/mock/consensus_metrics.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mock @@ -19,7 +19,7 @@ func (_m *ConsensusMetrics) CheckSealingDuration(duration time.Duration) { _m.Called(duration) } -// EmergencySeal provides a mock function with given fields: +// EmergencySeal provides a mock function with no fields func (_m *ConsensusMetrics) EmergencySeal() { _m.Called() } @@ -54,13 +54,12 @@ func (_m *ConsensusMetrics) StartCollectionToFinalized(collectionID flow.Identif _m.Called(collectionID) } -type mockConstructorTestingTNewConsensusMetrics interface { +// NewConsensusMetrics creates a new instance of ConsensusMetrics. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewConsensusMetrics(t interface { mock.TestingT Cleanup(func()) -} - -// NewConsensusMetrics creates a new instance of ConsensusMetrics. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewConsensusMetrics(t mockConstructorTestingTNewConsensusMetrics) *ConsensusMetrics { +}) *ConsensusMetrics { mock := &ConsensusMetrics{} mock.Mock.Test(t) diff --git a/module/mock/cruise_ctl_metrics.go b/module/mock/cruise_ctl_metrics.go new file mode 100644 index 00000000000..e6e0541cede --- /dev/null +++ b/module/mock/cruise_ctl_metrics.go @@ -0,0 +1,48 @@ +// Code generated by mockery. DO NOT EDIT. + +package mock + +import ( + mock "github.com/stretchr/testify/mock" + + time "time" +) + +// CruiseCtlMetrics is an autogenerated mock type for the CruiseCtlMetrics type +type CruiseCtlMetrics struct { + mock.Mock +} + +// ControllerOutput provides a mock function with given fields: duration +func (_m *CruiseCtlMetrics) ControllerOutput(duration time.Duration) { + _m.Called(duration) +} + +// PIDError provides a mock function with given fields: p, i, d +func (_m *CruiseCtlMetrics) PIDError(p float64, i float64, d float64) { + _m.Called(p, i, d) +} + +// ProposalPublicationDelay provides a mock function with given fields: duration +func (_m *CruiseCtlMetrics) ProposalPublicationDelay(duration time.Duration) { + _m.Called(duration) +} + +// TargetProposalDuration provides a mock function with given fields: duration +func (_m *CruiseCtlMetrics) TargetProposalDuration(duration time.Duration) { + _m.Called(duration) +} + +// NewCruiseCtlMetrics creates a new instance of CruiseCtlMetrics. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewCruiseCtlMetrics(t interface { + mock.TestingT + Cleanup(func()) +}) *CruiseCtlMetrics { + mock := &CruiseCtlMetrics{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/module/mock/dht_metrics.go b/module/mock/dht_metrics.go index 7edd231020f..7fa90934124 100644 --- a/module/mock/dht_metrics.go +++ b/module/mock/dht_metrics.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mock @@ -9,23 +9,22 @@ type DHTMetrics struct { mock.Mock } -// RoutingTablePeerAdded provides a mock function with given fields: +// RoutingTablePeerAdded provides a mock function with no fields func (_m *DHTMetrics) RoutingTablePeerAdded() { _m.Called() } -// RoutingTablePeerRemoved provides a mock function with given fields: +// RoutingTablePeerRemoved provides a mock function with no fields func (_m *DHTMetrics) RoutingTablePeerRemoved() { _m.Called() } -type mockConstructorTestingTNewDHTMetrics interface { +// NewDHTMetrics creates a new instance of DHTMetrics. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewDHTMetrics(t interface { mock.TestingT Cleanup(func()) -} - -// NewDHTMetrics creates a new instance of DHTMetrics. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewDHTMetrics(t mockConstructorTestingTNewDHTMetrics) *DHTMetrics { +}) *DHTMetrics { mock := &DHTMetrics{} mock.Mock.Test(t) diff --git a/module/mock/dkg_broker.go b/module/mock/dkg_broker.go index 788da3bbc1d..4169ca53452 100644 --- a/module/mock/dkg_broker.go +++ b/module/mock/dkg_broker.go @@ -1,9 +1,9 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mock import ( - crypto "github.com/onflow/flow-go/crypto" + crypto "github.com/onflow/crypto" flow "github.com/onflow/flow-go/model/flow" messages "github.com/onflow/flow-go/model/messages" @@ -21,20 +21,24 @@ func (_m *DKGBroker) Broadcast(data []byte) { _m.Called(data) } -// Disqualify provides a mock function with given fields: participant, log -func (_m *DKGBroker) Disqualify(participant int, log string) { - _m.Called(participant, log) +// Disqualify provides a mock function with given fields: index, log +func (_m *DKGBroker) Disqualify(index int, log string) { + _m.Called(index, log) } -// FlagMisbehavior provides a mock function with given fields: participant, log -func (_m *DKGBroker) FlagMisbehavior(participant int, log string) { - _m.Called(participant, log) +// FlagMisbehavior provides a mock function with given fields: index, log +func (_m *DKGBroker) FlagMisbehavior(index int, log string) { + _m.Called(index, log) } -// GetBroadcastMsgCh provides a mock function with given fields: +// GetBroadcastMsgCh provides a mock function with no fields func (_m *DKGBroker) GetBroadcastMsgCh() <-chan messages.BroadcastDKGMessage { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for GetBroadcastMsgCh") + } + var r0 <-chan messages.BroadcastDKGMessage if rf, ok := ret.Get(0).(func() <-chan messages.BroadcastDKGMessage); ok { r0 = rf() @@ -47,10 +51,14 @@ func (_m *DKGBroker) GetBroadcastMsgCh() <-chan messages.BroadcastDKGMessage { return r0 } -// GetIndex provides a mock function with given fields: +// GetIndex provides a mock function with no fields func (_m *DKGBroker) GetIndex() int { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for GetIndex") + } + var r0 int if rf, ok := ret.Get(0).(func() int); ok { r0 = rf() @@ -61,10 +69,14 @@ func (_m *DKGBroker) GetIndex() int { return r0 } -// GetPrivateMsgCh provides a mock function with given fields: +// GetPrivateMsgCh provides a mock function with no fields func (_m *DKGBroker) GetPrivateMsgCh() <-chan messages.PrivDKGMessageIn { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for GetPrivateMsgCh") + } + var r0 <-chan messages.PrivDKGMessageIn if rf, ok := ret.Get(0).(func() <-chan messages.PrivDKGMessageIn); ok { r0 = rf() @@ -81,6 +93,10 @@ func (_m *DKGBroker) GetPrivateMsgCh() <-chan messages.PrivDKGMessageIn { func (_m *DKGBroker) Poll(referenceBlock flow.Identifier) error { ret := _m.Called(referenceBlock) + if len(ret) == 0 { + panic("no return value specified for Poll") + } + var r0 error if rf, ok := ret.Get(0).(func(flow.Identifier) error); ok { r0 = rf(referenceBlock) @@ -96,7 +112,7 @@ func (_m *DKGBroker) PrivateSend(dest int, data []byte) { _m.Called(dest, data) } -// Shutdown provides a mock function with given fields: +// Shutdown provides a mock function with no fields func (_m *DKGBroker) Shutdown() { _m.Called() } @@ -105,6 +121,10 @@ func (_m *DKGBroker) Shutdown() { func (_m *DKGBroker) SubmitResult(_a0 crypto.PublicKey, _a1 []crypto.PublicKey) error { ret := _m.Called(_a0, _a1) + if len(ret) == 0 { + panic("no return value specified for SubmitResult") + } + var r0 error if rf, ok := ret.Get(0).(func(crypto.PublicKey, []crypto.PublicKey) error); ok { r0 = rf(_a0, _a1) @@ -115,13 +135,12 @@ func (_m *DKGBroker) SubmitResult(_a0 crypto.PublicKey, _a1 []crypto.PublicKey) return r0 } -type mockConstructorTestingTNewDKGBroker interface { +// NewDKGBroker creates a new instance of DKGBroker. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewDKGBroker(t interface { mock.TestingT Cleanup(func()) -} - -// NewDKGBroker creates a new instance of DKGBroker. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewDKGBroker(t mockConstructorTestingTNewDKGBroker) *DKGBroker { +}) *DKGBroker { mock := &DKGBroker{} mock.Mock.Test(t) diff --git a/module/mock/dkg_contract_client.go b/module/mock/dkg_contract_client.go index 7bcfa5eddbf..c6d316476d0 100644 --- a/module/mock/dkg_contract_client.go +++ b/module/mock/dkg_contract_client.go @@ -1,9 +1,9 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mock import ( - crypto "github.com/onflow/flow-go/crypto" + crypto "github.com/onflow/crypto" flow "github.com/onflow/flow-go/model/flow" messages "github.com/onflow/flow-go/model/messages" @@ -20,6 +20,10 @@ type DKGContractClient struct { func (_m *DKGContractClient) Broadcast(msg messages.BroadcastDKGMessage) error { ret := _m.Called(msg) + if len(ret) == 0 { + panic("no return value specified for Broadcast") + } + var r0 error if rf, ok := ret.Get(0).(func(messages.BroadcastDKGMessage) error); ok { r0 = rf(msg) @@ -34,6 +38,10 @@ func (_m *DKGContractClient) Broadcast(msg messages.BroadcastDKGMessage) error { func (_m *DKGContractClient) ReadBroadcast(fromIndex uint, referenceBlock flow.Identifier) ([]messages.BroadcastDKGMessage, error) { ret := _m.Called(fromIndex, referenceBlock) + if len(ret) == 0 { + panic("no return value specified for ReadBroadcast") + } + var r0 []messages.BroadcastDKGMessage var r1 error if rf, ok := ret.Get(0).(func(uint, flow.Identifier) ([]messages.BroadcastDKGMessage, error)); ok { @@ -56,13 +64,17 @@ func (_m *DKGContractClient) ReadBroadcast(fromIndex uint, referenceBlock flow.I return r0, r1 } -// SubmitResult provides a mock function with given fields: _a0, _a1 -func (_m *DKGContractClient) SubmitResult(_a0 crypto.PublicKey, _a1 []crypto.PublicKey) error { - ret := _m.Called(_a0, _a1) +// SubmitEmptyResult provides a mock function with no fields +func (_m *DKGContractClient) SubmitEmptyResult() error { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for SubmitEmptyResult") + } var r0 error - if rf, ok := ret.Get(0).(func(crypto.PublicKey, []crypto.PublicKey) error); ok { - r0 = rf(_a0, _a1) + if rf, ok := ret.Get(0).(func() error); ok { + r0 = rf() } else { r0 = ret.Error(0) } @@ -70,13 +82,30 @@ func (_m *DKGContractClient) SubmitResult(_a0 crypto.PublicKey, _a1 []crypto.Pub return r0 } -type mockConstructorTestingTNewDKGContractClient interface { - mock.TestingT - Cleanup(func()) +// SubmitParametersAndResult provides a mock function with given fields: indexMap, groupPublicKey, publicKeys +func (_m *DKGContractClient) SubmitParametersAndResult(indexMap flow.DKGIndexMap, groupPublicKey crypto.PublicKey, publicKeys []crypto.PublicKey) error { + ret := _m.Called(indexMap, groupPublicKey, publicKeys) + + if len(ret) == 0 { + panic("no return value specified for SubmitParametersAndResult") + } + + var r0 error + if rf, ok := ret.Get(0).(func(flow.DKGIndexMap, crypto.PublicKey, []crypto.PublicKey) error); ok { + r0 = rf(indexMap, groupPublicKey, publicKeys) + } else { + r0 = ret.Error(0) + } + + return r0 } // NewDKGContractClient creates a new instance of DKGContractClient. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewDKGContractClient(t mockConstructorTestingTNewDKGContractClient) *DKGContractClient { +// The first argument is typically a *testing.T value. +func NewDKGContractClient(t interface { + mock.TestingT + Cleanup(func()) +}) *DKGContractClient { mock := &DKGContractClient{} mock.Mock.Test(t) diff --git a/module/mock/dkg_controller.go b/module/mock/dkg_controller.go index 90d88cd362b..1637c7536c7 100644 --- a/module/mock/dkg_controller.go +++ b/module/mock/dkg_controller.go @@ -1,9 +1,9 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mock import ( - crypto "github.com/onflow/flow-go/crypto" + crypto "github.com/onflow/crypto" flow "github.com/onflow/flow-go/model/flow" mock "github.com/stretchr/testify/mock" @@ -14,10 +14,14 @@ type DKGController struct { mock.Mock } -// End provides a mock function with given fields: +// End provides a mock function with no fields func (_m *DKGController) End() error { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for End") + } + var r0 error if rf, ok := ret.Get(0).(func() error); ok { r0 = rf() @@ -28,10 +32,14 @@ func (_m *DKGController) End() error { return r0 } -// EndPhase1 provides a mock function with given fields: +// EndPhase1 provides a mock function with no fields func (_m *DKGController) EndPhase1() error { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for EndPhase1") + } + var r0 error if rf, ok := ret.Get(0).(func() error); ok { r0 = rf() @@ -42,10 +50,14 @@ func (_m *DKGController) EndPhase1() error { return r0 } -// EndPhase2 provides a mock function with given fields: +// EndPhase2 provides a mock function with no fields func (_m *DKGController) EndPhase2() error { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for EndPhase2") + } + var r0 error if rf, ok := ret.Get(0).(func() error); ok { r0 = rf() @@ -56,10 +68,14 @@ func (_m *DKGController) EndPhase2() error { return r0 } -// GetArtifacts provides a mock function with given fields: +// GetArtifacts provides a mock function with no fields func (_m *DKGController) GetArtifacts() (crypto.PrivateKey, crypto.PublicKey, []crypto.PublicKey) { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for GetArtifacts") + } + var r0 crypto.PrivateKey var r1 crypto.PublicKey var r2 []crypto.PublicKey @@ -93,10 +109,14 @@ func (_m *DKGController) GetArtifacts() (crypto.PrivateKey, crypto.PublicKey, [] return r0, r1, r2 } -// GetIndex provides a mock function with given fields: +// GetIndex provides a mock function with no fields func (_m *DKGController) GetIndex() int { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for GetIndex") + } + var r0 int if rf, ok := ret.Get(0).(func() int); ok { r0 = rf() @@ -111,6 +131,10 @@ func (_m *DKGController) GetIndex() int { func (_m *DKGController) Poll(blockReference flow.Identifier) error { ret := _m.Called(blockReference) + if len(ret) == 0 { + panic("no return value specified for Poll") + } + var r0 error if rf, ok := ret.Get(0).(func(flow.Identifier) error); ok { r0 = rf(blockReference) @@ -121,10 +145,14 @@ func (_m *DKGController) Poll(blockReference flow.Identifier) error { return r0 } -// Run provides a mock function with given fields: +// Run provides a mock function with no fields func (_m *DKGController) Run() error { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Run") + } + var r0 error if rf, ok := ret.Get(0).(func() error); ok { r0 = rf() @@ -135,15 +163,19 @@ func (_m *DKGController) Run() error { return r0 } -// Shutdown provides a mock function with given fields: +// Shutdown provides a mock function with no fields func (_m *DKGController) Shutdown() { _m.Called() } -// SubmitResult provides a mock function with given fields: +// SubmitResult provides a mock function with no fields func (_m *DKGController) SubmitResult() error { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for SubmitResult") + } + var r0 error if rf, ok := ret.Get(0).(func() error); ok { r0 = rf() @@ -154,13 +186,12 @@ func (_m *DKGController) SubmitResult() error { return r0 } -type mockConstructorTestingTNewDKGController interface { +// NewDKGController creates a new instance of DKGController. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewDKGController(t interface { mock.TestingT Cleanup(func()) -} - -// NewDKGController creates a new instance of DKGController. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewDKGController(t mockConstructorTestingTNewDKGController) *DKGController { +}) *DKGController { mock := &DKGController{} mock.Mock.Test(t) diff --git a/module/mock/dkg_controller_factory.go b/module/mock/dkg_controller_factory.go index df4c29971de..d37798a7771 100644 --- a/module/mock/dkg_controller_factory.go +++ b/module/mock/dkg_controller_factory.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mock @@ -15,15 +15,19 @@ type DKGControllerFactory struct { } // Create provides a mock function with given fields: dkgInstanceID, participants, seed -func (_m *DKGControllerFactory) Create(dkgInstanceID string, participants flow.IdentityList, seed []byte) (module.DKGController, error) { +func (_m *DKGControllerFactory) Create(dkgInstanceID string, participants flow.IdentitySkeletonList, seed []byte) (module.DKGController, error) { ret := _m.Called(dkgInstanceID, participants, seed) + if len(ret) == 0 { + panic("no return value specified for Create") + } + var r0 module.DKGController var r1 error - if rf, ok := ret.Get(0).(func(string, flow.IdentityList, []byte) (module.DKGController, error)); ok { + if rf, ok := ret.Get(0).(func(string, flow.IdentitySkeletonList, []byte) (module.DKGController, error)); ok { return rf(dkgInstanceID, participants, seed) } - if rf, ok := ret.Get(0).(func(string, flow.IdentityList, []byte) module.DKGController); ok { + if rf, ok := ret.Get(0).(func(string, flow.IdentitySkeletonList, []byte) module.DKGController); ok { r0 = rf(dkgInstanceID, participants, seed) } else { if ret.Get(0) != nil { @@ -31,7 +35,7 @@ func (_m *DKGControllerFactory) Create(dkgInstanceID string, participants flow.I } } - if rf, ok := ret.Get(1).(func(string, flow.IdentityList, []byte) error); ok { + if rf, ok := ret.Get(1).(func(string, flow.IdentitySkeletonList, []byte) error); ok { r1 = rf(dkgInstanceID, participants, seed) } else { r1 = ret.Error(1) @@ -40,13 +44,12 @@ func (_m *DKGControllerFactory) Create(dkgInstanceID string, participants flow.I return r0, r1 } -type mockConstructorTestingTNewDKGControllerFactory interface { +// NewDKGControllerFactory creates a new instance of DKGControllerFactory. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewDKGControllerFactory(t interface { mock.TestingT Cleanup(func()) -} - -// NewDKGControllerFactory creates a new instance of DKGControllerFactory. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewDKGControllerFactory(t mockConstructorTestingTNewDKGControllerFactory) *DKGControllerFactory { +}) *DKGControllerFactory { mock := &DKGControllerFactory{} mock.Mock.Test(t) diff --git a/module/mock/dkg_processor.go b/module/mock/dkg_processor.go new file mode 100644 index 00000000000..e519c5dca25 --- /dev/null +++ b/module/mock/dkg_processor.go @@ -0,0 +1,44 @@ +// Code generated by mockery. DO NOT EDIT. + +package mock + +import mock "github.com/stretchr/testify/mock" + +// DKGProcessor is an autogenerated mock type for the DKGProcessor type +type DKGProcessor struct { + mock.Mock +} + +// Broadcast provides a mock function with given fields: data +func (_m *DKGProcessor) Broadcast(data []byte) { + _m.Called(data) +} + +// Disqualify provides a mock function with given fields: index, log +func (_m *DKGProcessor) Disqualify(index int, log string) { + _m.Called(index, log) +} + +// FlagMisbehavior provides a mock function with given fields: index, log +func (_m *DKGProcessor) FlagMisbehavior(index int, log string) { + _m.Called(index, log) +} + +// PrivateSend provides a mock function with given fields: dest, data +func (_m *DKGProcessor) PrivateSend(dest int, data []byte) { + _m.Called(dest, data) +} + +// NewDKGProcessor creates a new instance of DKGProcessor. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewDKGProcessor(t interface { + mock.TestingT + Cleanup(func()) +}) *DKGProcessor { + mock := &DKGProcessor{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/module/mock/dkg_state.go b/module/mock/dkg_state.go new file mode 100644 index 00000000000..023b1f462e8 --- /dev/null +++ b/module/mock/dkg_state.go @@ -0,0 +1,219 @@ +// Code generated by mockery. DO NOT EDIT. + +package mock + +import ( + crypto "github.com/onflow/crypto" + mock "github.com/stretchr/testify/mock" +) + +// DKGState is an autogenerated mock type for the DKGState type +type DKGState struct { + mock.Mock +} + +// End provides a mock function with no fields +func (_m *DKGState) End() (crypto.PrivateKey, crypto.PublicKey, []crypto.PublicKey, error) { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for End") + } + + var r0 crypto.PrivateKey + var r1 crypto.PublicKey + var r2 []crypto.PublicKey + var r3 error + if rf, ok := ret.Get(0).(func() (crypto.PrivateKey, crypto.PublicKey, []crypto.PublicKey, error)); ok { + return rf() + } + if rf, ok := ret.Get(0).(func() crypto.PrivateKey); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(crypto.PrivateKey) + } + } + + if rf, ok := ret.Get(1).(func() crypto.PublicKey); ok { + r1 = rf() + } else { + if ret.Get(1) != nil { + r1 = ret.Get(1).(crypto.PublicKey) + } + } + + if rf, ok := ret.Get(2).(func() []crypto.PublicKey); ok { + r2 = rf() + } else { + if ret.Get(2) != nil { + r2 = ret.Get(2).([]crypto.PublicKey) + } + } + + if rf, ok := ret.Get(3).(func() error); ok { + r3 = rf() + } else { + r3 = ret.Error(3) + } + + return r0, r1, r2, r3 +} + +// ForceDisqualify provides a mock function with given fields: participant +func (_m *DKGState) ForceDisqualify(participant int) error { + ret := _m.Called(participant) + + if len(ret) == 0 { + panic("no return value specified for ForceDisqualify") + } + + var r0 error + if rf, ok := ret.Get(0).(func(int) error); ok { + r0 = rf(participant) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// HandleBroadcastMsg provides a mock function with given fields: orig, msg +func (_m *DKGState) HandleBroadcastMsg(orig int, msg []byte) error { + ret := _m.Called(orig, msg) + + if len(ret) == 0 { + panic("no return value specified for HandleBroadcastMsg") + } + + var r0 error + if rf, ok := ret.Get(0).(func(int, []byte) error); ok { + r0 = rf(orig, msg) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// HandlePrivateMsg provides a mock function with given fields: orig, msg +func (_m *DKGState) HandlePrivateMsg(orig int, msg []byte) error { + ret := _m.Called(orig, msg) + + if len(ret) == 0 { + panic("no return value specified for HandlePrivateMsg") + } + + var r0 error + if rf, ok := ret.Get(0).(func(int, []byte) error); ok { + r0 = rf(orig, msg) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// NextTimeout provides a mock function with no fields +func (_m *DKGState) NextTimeout() error { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for NextTimeout") + } + + var r0 error + if rf, ok := ret.Get(0).(func() error); ok { + r0 = rf() + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// Running provides a mock function with no fields +func (_m *DKGState) Running() bool { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Running") + } + + var r0 bool + if rf, ok := ret.Get(0).(func() bool); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(bool) + } + + return r0 +} + +// Size provides a mock function with no fields +func (_m *DKGState) Size() int { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Size") + } + + var r0 int + if rf, ok := ret.Get(0).(func() int); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(int) + } + + return r0 +} + +// Start provides a mock function with given fields: seed +func (_m *DKGState) Start(seed []byte) error { + ret := _m.Called(seed) + + if len(ret) == 0 { + panic("no return value specified for Start") + } + + var r0 error + if rf, ok := ret.Get(0).(func([]byte) error); ok { + r0 = rf(seed) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// Threshold provides a mock function with no fields +func (_m *DKGState) Threshold() int { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Threshold") + } + + var r0 int + if rf, ok := ret.Get(0).(func() int); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(int) + } + + return r0 +} + +// NewDKGState creates a new instance of DKGState. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewDKGState(t interface { + mock.TestingT + Cleanup(func()) +}) *DKGState { + mock := &DKGState{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/module/mock/engine_metrics.go b/module/mock/engine_metrics.go index 739ca717e56..b2838e776ec 100644 --- a/module/mock/engine_metrics.go +++ b/module/mock/engine_metrics.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mock @@ -34,13 +34,12 @@ func (_m *EngineMetrics) OutboundMessageDropped(engine string, messages string) _m.Called(engine, messages) } -type mockConstructorTestingTNewEngineMetrics interface { +// NewEngineMetrics creates a new instance of EngineMetrics. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewEngineMetrics(t interface { mock.TestingT Cleanup(func()) -} - -// NewEngineMetrics creates a new instance of EngineMetrics. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewEngineMetrics(t mockConstructorTestingTNewEngineMetrics) *EngineMetrics { +}) *EngineMetrics { mock := &EngineMetrics{} mock.Mock.Test(t) diff --git a/module/mock/entries_func.go b/module/mock/entries_func.go deleted file mode 100644 index 11371fee7dd..00000000000 --- a/module/mock/entries_func.go +++ /dev/null @@ -1,39 +0,0 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. - -package mock - -import mock "github.com/stretchr/testify/mock" - -// EntriesFunc is an autogenerated mock type for the EntriesFunc type -type EntriesFunc struct { - mock.Mock -} - -// Execute provides a mock function with given fields: -func (_m *EntriesFunc) Execute() uint { - ret := _m.Called() - - var r0 uint - if rf, ok := ret.Get(0).(func() uint); ok { - r0 = rf() - } else { - r0 = ret.Get(0).(uint) - } - - return r0 -} - -type mockConstructorTestingTNewEntriesFunc interface { - mock.TestingT - Cleanup(func()) -} - -// NewEntriesFunc creates a new instance of EntriesFunc. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewEntriesFunc(t mockConstructorTestingTNewEntriesFunc) *EntriesFunc { - mock := &EntriesFunc{} - mock.Mock.Test(t) - - t.Cleanup(func() { mock.AssertExpectations(t) }) - - return mock -} diff --git a/module/mock/epoch_lookup.go b/module/mock/epoch_lookup.go index 4f62fcd88af..72648ea24cc 100644 --- a/module/mock/epoch_lookup.go +++ b/module/mock/epoch_lookup.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mock @@ -9,10 +9,14 @@ type EpochLookup struct { mock.Mock } -// EpochForViewWithFallback provides a mock function with given fields: view -func (_m *EpochLookup) EpochForViewWithFallback(view uint64) (uint64, error) { +// EpochForView provides a mock function with given fields: view +func (_m *EpochLookup) EpochForView(view uint64) (uint64, error) { ret := _m.Called(view) + if len(ret) == 0 { + panic("no return value specified for EpochForView") + } + var r0 uint64 var r1 error if rf, ok := ret.Get(0).(func(uint64) (uint64, error)); ok { @@ -33,13 +37,12 @@ func (_m *EpochLookup) EpochForViewWithFallback(view uint64) (uint64, error) { return r0, r1 } -type mockConstructorTestingTNewEpochLookup interface { +// NewEpochLookup creates a new instance of EpochLookup. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewEpochLookup(t interface { mock.TestingT Cleanup(func()) -} - -// NewEpochLookup creates a new instance of EpochLookup. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewEpochLookup(t mockConstructorTestingTNewEpochLookup) *EpochLookup { +}) *EpochLookup { mock := &EpochLookup{} mock.Mock.Test(t) diff --git a/module/mock/evm_metrics.go b/module/mock/evm_metrics.go new file mode 100644 index 00000000000..04e90149f75 --- /dev/null +++ b/module/mock/evm_metrics.go @@ -0,0 +1,39 @@ +// Code generated by mockery. DO NOT EDIT. + +package mock + +import mock "github.com/stretchr/testify/mock" + +// EVMMetrics is an autogenerated mock type for the EVMMetrics type +type EVMMetrics struct { + mock.Mock +} + +// EVMBlockExecuted provides a mock function with given fields: txCount, totalGasUsed, totalSupplyInFlow +func (_m *EVMMetrics) EVMBlockExecuted(txCount int, totalGasUsed uint64, totalSupplyInFlow float64) { + _m.Called(txCount, totalGasUsed, totalSupplyInFlow) +} + +// EVMTransactionExecuted provides a mock function with given fields: gasUsed, isDirectCall, failed +func (_m *EVMMetrics) EVMTransactionExecuted(gasUsed uint64, isDirectCall bool, failed bool) { + _m.Called(gasUsed, isDirectCall, failed) +} + +// SetNumberOfDeployedCOAs provides a mock function with given fields: count +func (_m *EVMMetrics) SetNumberOfDeployedCOAs(count uint64) { + _m.Called(count) +} + +// NewEVMMetrics creates a new instance of EVMMetrics. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewEVMMetrics(t interface { + mock.TestingT + Cleanup(func()) +}) *EVMMetrics { + mock := &EVMMetrics{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/module/mock/execution_data_provider_metrics.go b/module/mock/execution_data_provider_metrics.go index 58714e372e9..6869e83c99e 100644 --- a/module/mock/execution_data_provider_metrics.go +++ b/module/mock/execution_data_provider_metrics.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mock @@ -13,7 +13,7 @@ type ExecutionDataProviderMetrics struct { mock.Mock } -// AddBlobsFailed provides a mock function with given fields: +// AddBlobsFailed provides a mock function with no fields func (_m *ExecutionDataProviderMetrics) AddBlobsFailed() { _m.Called() } @@ -28,13 +28,12 @@ func (_m *ExecutionDataProviderMetrics) RootIDComputed(duration time.Duration, n _m.Called(duration, numberOfChunks) } -type mockConstructorTestingTNewExecutionDataProviderMetrics interface { +// NewExecutionDataProviderMetrics creates a new instance of ExecutionDataProviderMetrics. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewExecutionDataProviderMetrics(t interface { mock.TestingT Cleanup(func()) -} - -// NewExecutionDataProviderMetrics creates a new instance of ExecutionDataProviderMetrics. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewExecutionDataProviderMetrics(t mockConstructorTestingTNewExecutionDataProviderMetrics) *ExecutionDataProviderMetrics { +}) *ExecutionDataProviderMetrics { mock := &ExecutionDataProviderMetrics{} mock.Mock.Test(t) diff --git a/module/mock/execution_data_pruner_metrics.go b/module/mock/execution_data_pruner_metrics.go index 28176f7df01..a5cfcc60338 100644 --- a/module/mock/execution_data_pruner_metrics.go +++ b/module/mock/execution_data_pruner_metrics.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mock @@ -18,13 +18,12 @@ func (_m *ExecutionDataPrunerMetrics) Pruned(height uint64, duration time.Durati _m.Called(height, duration) } -type mockConstructorTestingTNewExecutionDataPrunerMetrics interface { +// NewExecutionDataPrunerMetrics creates a new instance of ExecutionDataPrunerMetrics. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewExecutionDataPrunerMetrics(t interface { mock.TestingT Cleanup(func()) -} - -// NewExecutionDataPrunerMetrics creates a new instance of ExecutionDataPrunerMetrics. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewExecutionDataPrunerMetrics(t mockConstructorTestingTNewExecutionDataPrunerMetrics) *ExecutionDataPrunerMetrics { +}) *ExecutionDataPrunerMetrics { mock := &ExecutionDataPrunerMetrics{} mock.Mock.Test(t) diff --git a/module/mock/execution_data_requester_metrics.go b/module/mock/execution_data_requester_metrics.go index 804d52c8362..3a1cdc1b253 100644 --- a/module/mock/execution_data_requester_metrics.go +++ b/module/mock/execution_data_requester_metrics.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mock @@ -18,12 +18,12 @@ func (_m *ExecutionDataRequesterMetrics) ExecutionDataFetchFinished(duration tim _m.Called(duration, success, height) } -// ExecutionDataFetchStarted provides a mock function with given fields: +// ExecutionDataFetchStarted provides a mock function with no fields func (_m *ExecutionDataRequesterMetrics) ExecutionDataFetchStarted() { _m.Called() } -// FetchRetried provides a mock function with given fields: +// FetchRetried provides a mock function with no fields func (_m *ExecutionDataRequesterMetrics) FetchRetried() { _m.Called() } @@ -33,13 +33,12 @@ func (_m *ExecutionDataRequesterMetrics) NotificationSent(height uint64) { _m.Called(height) } -type mockConstructorTestingTNewExecutionDataRequesterMetrics interface { +// NewExecutionDataRequesterMetrics creates a new instance of ExecutionDataRequesterMetrics. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewExecutionDataRequesterMetrics(t interface { mock.TestingT Cleanup(func()) -} - -// NewExecutionDataRequesterMetrics creates a new instance of ExecutionDataRequesterMetrics. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewExecutionDataRequesterMetrics(t mockConstructorTestingTNewExecutionDataRequesterMetrics) *ExecutionDataRequesterMetrics { +}) *ExecutionDataRequesterMetrics { mock := &ExecutionDataRequesterMetrics{} mock.Mock.Test(t) diff --git a/module/mock/execution_data_requester_v2_metrics.go b/module/mock/execution_data_requester_v2_metrics.go index 9119153196c..76162a30605 100644 --- a/module/mock/execution_data_requester_v2_metrics.go +++ b/module/mock/execution_data_requester_v2_metrics.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mock @@ -18,12 +18,12 @@ func (_m *ExecutionDataRequesterV2Metrics) FulfilledHeight(blockHeight uint64) { _m.Called(blockHeight) } -// ReceiptSkipped provides a mock function with given fields: +// ReceiptSkipped provides a mock function with no fields func (_m *ExecutionDataRequesterV2Metrics) ReceiptSkipped() { _m.Called() } -// RequestCanceled provides a mock function with given fields: +// RequestCanceled provides a mock function with no fields func (_m *ExecutionDataRequesterV2Metrics) RequestCanceled() { _m.Called() } @@ -38,18 +38,17 @@ func (_m *ExecutionDataRequesterV2Metrics) RequestSucceeded(blockHeight uint64, _m.Called(blockHeight, duration, totalSize, numberOfAttempts) } -// ResponseDropped provides a mock function with given fields: +// ResponseDropped provides a mock function with no fields func (_m *ExecutionDataRequesterV2Metrics) ResponseDropped() { _m.Called() } -type mockConstructorTestingTNewExecutionDataRequesterV2Metrics interface { +// NewExecutionDataRequesterV2Metrics creates a new instance of ExecutionDataRequesterV2Metrics. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewExecutionDataRequesterV2Metrics(t interface { mock.TestingT Cleanup(func()) -} - -// NewExecutionDataRequesterV2Metrics creates a new instance of ExecutionDataRequesterV2Metrics. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewExecutionDataRequesterV2Metrics(t mockConstructorTestingTNewExecutionDataRequesterV2Metrics) *ExecutionDataRequesterV2Metrics { +}) *ExecutionDataRequesterV2Metrics { mock := &ExecutionDataRequesterV2Metrics{} mock.Mock.Test(t) diff --git a/module/mock/execution_metrics.go b/module/mock/execution_metrics.go index b2cfc181b2d..ecee1466d25 100644 --- a/module/mock/execution_metrics.go +++ b/module/mock/execution_metrics.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mock @@ -16,11 +16,21 @@ type ExecutionMetrics struct { mock.Mock } -// ChunkDataPackRequestProcessed provides a mock function with given fields: +// ChunkDataPackRequestProcessed provides a mock function with no fields func (_m *ExecutionMetrics) ChunkDataPackRequestProcessed() { _m.Called() } +// EVMBlockExecuted provides a mock function with given fields: txCount, totalGasUsed, totalSupplyInFlow +func (_m *ExecutionMetrics) EVMBlockExecuted(txCount int, totalGasUsed uint64, totalSupplyInFlow float64) { + _m.Called(txCount, totalGasUsed, totalSupplyInFlow) +} + +// EVMTransactionExecuted provides a mock function with given fields: gasUsed, isDirectCall, failed +func (_m *ExecutionMetrics) EVMTransactionExecuted(gasUsed uint64, isDirectCall bool, failed bool) { + _m.Called(gasUsed, isDirectCall, failed) +} + // ExecutionBlockCachedPrograms provides a mock function with given fields: programs func (_m *ExecutionMetrics) ExecutionBlockCachedPrograms(programs int) { _m.Called(programs) @@ -31,56 +41,71 @@ func (_m *ExecutionMetrics) ExecutionBlockDataUploadFinished(dur time.Duration) _m.Called(dur) } -// ExecutionBlockDataUploadStarted provides a mock function with given fields: +// ExecutionBlockDataUploadStarted provides a mock function with no fields func (_m *ExecutionMetrics) ExecutionBlockDataUploadStarted() { _m.Called() } // ExecutionBlockExecuted provides a mock function with given fields: dur, stats -func (_m *ExecutionMetrics) ExecutionBlockExecuted(dur time.Duration, stats module.ExecutionResultStats) { +func (_m *ExecutionMetrics) ExecutionBlockExecuted(dur time.Duration, stats module.BlockExecutionResultStats) { _m.Called(dur, stats) } // ExecutionBlockExecutionEffortVectorComponent provides a mock function with given fields: _a0, _a1 -func (_m *ExecutionMetrics) ExecutionBlockExecutionEffortVectorComponent(_a0 string, _a1 uint) { +func (_m *ExecutionMetrics) ExecutionBlockExecutionEffortVectorComponent(_a0 string, _a1 uint64) { _m.Called(_a0, _a1) } +// ExecutionCallbacksExecuted provides a mock function with given fields: callbackCount, processComputationUsed, executeComputationLimits +func (_m *ExecutionMetrics) ExecutionCallbacksExecuted(callbackCount int, processComputationUsed uint64, executeComputationLimits uint64) { + _m.Called(callbackCount, processComputationUsed, executeComputationLimits) +} + +// ExecutionCheckpointSize provides a mock function with given fields: bytes +func (_m *ExecutionMetrics) ExecutionCheckpointSize(bytes uint64) { + _m.Called(bytes) +} + // ExecutionChunkDataPackGenerated provides a mock function with given fields: proofSize, numberOfTransactions func (_m *ExecutionMetrics) ExecutionChunkDataPackGenerated(proofSize int, numberOfTransactions int) { _m.Called(proofSize, numberOfTransactions) } // ExecutionCollectionExecuted provides a mock function with given fields: dur, stats -func (_m *ExecutionMetrics) ExecutionCollectionExecuted(dur time.Duration, stats module.ExecutionResultStats) { +func (_m *ExecutionMetrics) ExecutionCollectionExecuted(dur time.Duration, stats module.CollectionExecutionResultStats) { _m.Called(dur, stats) } -// ExecutionCollectionRequestRetried provides a mock function with given fields: -func (_m *ExecutionMetrics) ExecutionCollectionRequestRetried() { - _m.Called() -} - -// ExecutionCollectionRequestSent provides a mock function with given fields: +// ExecutionCollectionRequestSent provides a mock function with no fields func (_m *ExecutionMetrics) ExecutionCollectionRequestSent() { _m.Called() } -// ExecutionComputationResultUploadRetried provides a mock function with given fields: +// ExecutionComputationResultUploadRetried provides a mock function with no fields func (_m *ExecutionMetrics) ExecutionComputationResultUploadRetried() { _m.Called() } -// ExecutionComputationResultUploaded provides a mock function with given fields: +// ExecutionComputationResultUploaded provides a mock function with no fields func (_m *ExecutionMetrics) ExecutionComputationResultUploaded() { _m.Called() } +// ExecutionLastChunkDataPackPrunedHeight provides a mock function with given fields: height +func (_m *ExecutionMetrics) ExecutionLastChunkDataPackPrunedHeight(height uint64) { + _m.Called(height) +} + // ExecutionLastExecutedBlockHeight provides a mock function with given fields: height func (_m *ExecutionMetrics) ExecutionLastExecutedBlockHeight(height uint64) { _m.Called(height) } +// ExecutionLastFinalizedExecutedBlockHeight provides a mock function with given fields: height +func (_m *ExecutionMetrics) ExecutionLastFinalizedExecutedBlockHeight(height uint64) { + _m.Called(height) +} + // ExecutionScriptExecuted provides a mock function with given fields: dur, compUsed, memoryUsed, memoryEstimate func (_m *ExecutionMetrics) ExecutionScriptExecuted(dur time.Duration, compUsed uint64, memoryUsed uint64, memoryEstimate uint64) { _m.Called(dur, compUsed, memoryUsed, memoryEstimate) @@ -96,9 +121,14 @@ func (_m *ExecutionMetrics) ExecutionSync(syncing bool) { _m.Called(syncing) } -// ExecutionTransactionExecuted provides a mock function with given fields: dur, compUsed, memoryUsed, eventCounts, eventSize, failed -func (_m *ExecutionMetrics) ExecutionTransactionExecuted(dur time.Duration, compUsed uint64, memoryUsed uint64, eventCounts int, eventSize int, failed bool) { - _m.Called(dur, compUsed, memoryUsed, eventCounts, eventSize, failed) +// ExecutionTargetChunkDataPackPrunedHeight provides a mock function with given fields: height +func (_m *ExecutionMetrics) ExecutionTargetChunkDataPackPrunedHeight(height uint64) { + _m.Called(height) +} + +// ExecutionTransactionExecuted provides a mock function with given fields: dur, stats, info +func (_m *ExecutionMetrics) ExecutionTransactionExecuted(dur time.Duration, stats module.TransactionExecutionResultStats, info module.TransactionExecutionResultInfo) { + _m.Called(dur, stats, info) } // FinishBlockReceivedToExecuted provides a mock function with given fields: blockID @@ -186,16 +216,21 @@ func (_m *ExecutionMetrics) RuntimeTransactionParsed(dur time.Duration) { _m.Called(dur) } -// RuntimeTransactionProgramsCacheHit provides a mock function with given fields: +// RuntimeTransactionProgramsCacheHit provides a mock function with no fields func (_m *ExecutionMetrics) RuntimeTransactionProgramsCacheHit() { _m.Called() } -// RuntimeTransactionProgramsCacheMiss provides a mock function with given fields: +// RuntimeTransactionProgramsCacheMiss provides a mock function with no fields func (_m *ExecutionMetrics) RuntimeTransactionProgramsCacheMiss() { _m.Called() } +// SetNumberOfDeployedCOAs provides a mock function with given fields: count +func (_m *ExecutionMetrics) SetNumberOfDeployedCOAs(count uint64) { + _m.Called(count) +} + // StartBlockReceivedToExecuted provides a mock function with given fields: blockID func (_m *ExecutionMetrics) StartBlockReceivedToExecuted(blockID flow.Identifier) { _m.Called(blockID) @@ -206,7 +241,7 @@ func (_m *ExecutionMetrics) UpdateCollectionMaxHeight(height uint64) { _m.Called(height) } -// UpdateCount provides a mock function with given fields: +// UpdateCount provides a mock function with no fields func (_m *ExecutionMetrics) UpdateCount() { _m.Called() } @@ -231,13 +266,12 @@ func (_m *ExecutionMetrics) UpdateValuesSize(byte uint64) { _m.Called(byte) } -type mockConstructorTestingTNewExecutionMetrics interface { +// NewExecutionMetrics creates a new instance of ExecutionMetrics. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewExecutionMetrics(t interface { mock.TestingT Cleanup(func()) -} - -// NewExecutionMetrics creates a new instance of ExecutionMetrics. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewExecutionMetrics(t mockConstructorTestingTNewExecutionMetrics) *ExecutionMetrics { +}) *ExecutionMetrics { mock := &ExecutionMetrics{} mock.Mock.Test(t) diff --git a/module/mock/execution_state_indexer_metrics.go b/module/mock/execution_state_indexer_metrics.go new file mode 100644 index 00000000000..e278c84d79a --- /dev/null +++ b/module/mock/execution_state_indexer_metrics.go @@ -0,0 +1,43 @@ +// Code generated by mockery. DO NOT EDIT. + +package mock + +import ( + mock "github.com/stretchr/testify/mock" + + time "time" +) + +// ExecutionStateIndexerMetrics is an autogenerated mock type for the ExecutionStateIndexerMetrics type +type ExecutionStateIndexerMetrics struct { + mock.Mock +} + +// BlockIndexed provides a mock function with given fields: height, duration, events, registers, transactionResults +func (_m *ExecutionStateIndexerMetrics) BlockIndexed(height uint64, duration time.Duration, events int, registers int, transactionResults int) { + _m.Called(height, duration, events, registers, transactionResults) +} + +// BlockReindexed provides a mock function with no fields +func (_m *ExecutionStateIndexerMetrics) BlockReindexed() { + _m.Called() +} + +// InitializeLatestHeight provides a mock function with given fields: height +func (_m *ExecutionStateIndexerMetrics) InitializeLatestHeight(height uint64) { + _m.Called(height) +} + +// NewExecutionStateIndexerMetrics creates a new instance of ExecutionStateIndexerMetrics. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewExecutionStateIndexerMetrics(t interface { + mock.TestingT + Cleanup(func()) +}) *ExecutionStateIndexerMetrics { + mock := &ExecutionStateIndexerMetrics{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/module/mock/finalized_header_cache.go b/module/mock/finalized_header_cache.go new file mode 100644 index 00000000000..66f99b6979f --- /dev/null +++ b/module/mock/finalized_header_cache.go @@ -0,0 +1,47 @@ +// Code generated by mockery. DO NOT EDIT. + +package mock + +import ( + flow "github.com/onflow/flow-go/model/flow" + mock "github.com/stretchr/testify/mock" +) + +// FinalizedHeaderCache is an autogenerated mock type for the FinalizedHeaderCache type +type FinalizedHeaderCache struct { + mock.Mock +} + +// Get provides a mock function with no fields +func (_m *FinalizedHeaderCache) Get() *flow.Header { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Get") + } + + var r0 *flow.Header + if rf, ok := ret.Get(0).(func() *flow.Header); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*flow.Header) + } + } + + return r0 +} + +// NewFinalizedHeaderCache creates a new instance of FinalizedHeaderCache. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewFinalizedHeaderCache(t interface { + mock.TestingT + Cleanup(func()) +}) *FinalizedHeaderCache { + mock := &FinalizedHeaderCache{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/module/mock/finalizer.go b/module/mock/finalizer.go index d3f933199db..2891bdf6755 100644 --- a/module/mock/finalizer.go +++ b/module/mock/finalizer.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mock @@ -16,6 +16,10 @@ type Finalizer struct { func (_m *Finalizer) MakeFinal(blockID flow.Identifier) error { ret := _m.Called(blockID) + if len(ret) == 0 { + panic("no return value specified for MakeFinal") + } + var r0 error if rf, ok := ret.Get(0).(func(flow.Identifier) error); ok { r0 = rf(blockID) @@ -26,13 +30,12 @@ func (_m *Finalizer) MakeFinal(blockID flow.Identifier) error { return r0 } -type mockConstructorTestingTNewFinalizer interface { +// NewFinalizer creates a new instance of Finalizer. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewFinalizer(t interface { mock.TestingT Cleanup(func()) -} - -// NewFinalizer creates a new instance of Finalizer. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewFinalizer(t mockConstructorTestingTNewFinalizer) *Finalizer { +}) *Finalizer { mock := &Finalizer{} mock.Mock.Test(t) diff --git a/module/mock/gossip_sub_local_mesh_metrics.go b/module/mock/gossip_sub_local_mesh_metrics.go deleted file mode 100644 index aa14978a00e..00000000000 --- a/module/mock/gossip_sub_local_mesh_metrics.go +++ /dev/null @@ -1,30 +0,0 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. - -package mock - -import mock "github.com/stretchr/testify/mock" - -// GossipSubLocalMeshMetrics is an autogenerated mock type for the GossipSubLocalMeshMetrics type -type GossipSubLocalMeshMetrics struct { - mock.Mock -} - -// OnLocalMeshSizeUpdated provides a mock function with given fields: topic, size -func (_m *GossipSubLocalMeshMetrics) OnLocalMeshSizeUpdated(topic string, size int) { - _m.Called(topic, size) -} - -type mockConstructorTestingTNewGossipSubLocalMeshMetrics interface { - mock.TestingT - Cleanup(func()) -} - -// NewGossipSubLocalMeshMetrics creates a new instance of GossipSubLocalMeshMetrics. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewGossipSubLocalMeshMetrics(t mockConstructorTestingTNewGossipSubLocalMeshMetrics) *GossipSubLocalMeshMetrics { - mock := &GossipSubLocalMeshMetrics{} - mock.Mock.Test(t) - - t.Cleanup(func() { mock.AssertExpectations(t) }) - - return mock -} diff --git a/module/mock/gossip_sub_metrics.go b/module/mock/gossip_sub_metrics.go index da87176c43b..572f484c9be 100644 --- a/module/mock/gossip_sub_metrics.go +++ b/module/mock/gossip_sub_metrics.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mock @@ -6,6 +6,8 @@ import ( channels "github.com/onflow/flow-go/network/channels" mock "github.com/stretchr/testify/mock" + p2pmsg "github.com/onflow/flow-go/network/p2p/message" + time "time" ) @@ -14,6 +16,21 @@ type GossipSubMetrics struct { mock.Mock } +// AsyncProcessingFinished provides a mock function with given fields: duration +func (_m *GossipSubMetrics) AsyncProcessingFinished(duration time.Duration) { + _m.Called(duration) +} + +// AsyncProcessingStarted provides a mock function with no fields +func (_m *GossipSubMetrics) AsyncProcessingStarted() { + _m.Called() +} + +// OnActiveClusterIDsNotSetErr provides a mock function with no fields +func (_m *GossipSubMetrics) OnActiveClusterIDsNotSetErr() { + _m.Called() +} + // OnAppSpecificScoreUpdated provides a mock function with given fields: _a0 func (_m *GossipSubMetrics) OnAppSpecificScoreUpdated(_a0 float64) { _m.Called(_a0) @@ -24,19 +41,59 @@ func (_m *GossipSubMetrics) OnBehaviourPenaltyUpdated(_a0 float64) { _m.Called(_a0) } +// OnControlMessagesTruncated provides a mock function with given fields: messageType, diff +func (_m *GossipSubMetrics) OnControlMessagesTruncated(messageType p2pmsg.ControlMessageType, diff int) { + _m.Called(messageType, diff) +} + // OnFirstMessageDeliveredUpdated provides a mock function with given fields: _a0, _a1 func (_m *GossipSubMetrics) OnFirstMessageDeliveredUpdated(_a0 channels.Topic, _a1 float64) { _m.Called(_a0, _a1) } -// OnGraftReceived provides a mock function with given fields: count -func (_m *GossipSubMetrics) OnGraftReceived(count int) { - _m.Called(count) +// OnGraftDuplicateTopicIdsExceedThreshold provides a mock function with no fields +func (_m *GossipSubMetrics) OnGraftDuplicateTopicIdsExceedThreshold() { + _m.Called() +} + +// OnGraftInvalidTopicIdsExceedThreshold provides a mock function with no fields +func (_m *GossipSubMetrics) OnGraftInvalidTopicIdsExceedThreshold() { + _m.Called() +} + +// OnGraftMessageInspected provides a mock function with given fields: duplicateTopicIds, invalidTopicIds +func (_m *GossipSubMetrics) OnGraftMessageInspected(duplicateTopicIds int, invalidTopicIds int) { + _m.Called(duplicateTopicIds, invalidTopicIds) +} + +// OnIHaveControlMessageIdsTruncated provides a mock function with given fields: diff +func (_m *GossipSubMetrics) OnIHaveControlMessageIdsTruncated(diff int) { + _m.Called(diff) +} + +// OnIHaveDuplicateMessageIdsExceedThreshold provides a mock function with no fields +func (_m *GossipSubMetrics) OnIHaveDuplicateMessageIdsExceedThreshold() { + _m.Called() +} + +// OnIHaveDuplicateTopicIdsExceedThreshold provides a mock function with no fields +func (_m *GossipSubMetrics) OnIHaveDuplicateTopicIdsExceedThreshold() { + _m.Called() +} + +// OnIHaveInvalidTopicIdsExceedThreshold provides a mock function with no fields +func (_m *GossipSubMetrics) OnIHaveInvalidTopicIdsExceedThreshold() { + _m.Called() +} + +// OnIHaveMessageIDsReceived provides a mock function with given fields: channel, msgIdCount +func (_m *GossipSubMetrics) OnIHaveMessageIDsReceived(channel string, msgIdCount int) { + _m.Called(channel, msgIdCount) } -// OnIHaveReceived provides a mock function with given fields: count -func (_m *GossipSubMetrics) OnIHaveReceived(count int) { - _m.Called(count) +// OnIHaveMessagesInspected provides a mock function with given fields: duplicateTopicIds, duplicateMessageIds, invalidTopicIds +func (_m *GossipSubMetrics) OnIHaveMessagesInspected(duplicateTopicIds int, duplicateMessageIds int, invalidTopicIds int) { + _m.Called(duplicateTopicIds, duplicateMessageIds, invalidTopicIds) } // OnIPColocationFactorUpdated provides a mock function with given fields: _a0 @@ -44,23 +101,38 @@ func (_m *GossipSubMetrics) OnIPColocationFactorUpdated(_a0 float64) { _m.Called(_a0) } -// OnIWantReceived provides a mock function with given fields: count -func (_m *GossipSubMetrics) OnIWantReceived(count int) { - _m.Called(count) +// OnIWantCacheMissMessageIdsExceedThreshold provides a mock function with no fields +func (_m *GossipSubMetrics) OnIWantCacheMissMessageIdsExceedThreshold() { + _m.Called() } -// OnIncomingRpcAcceptedFully provides a mock function with given fields: -func (_m *GossipSubMetrics) OnIncomingRpcAcceptedFully() { - _m.Called() +// OnIWantControlMessageIdsTruncated provides a mock function with given fields: diff +func (_m *GossipSubMetrics) OnIWantControlMessageIdsTruncated(diff int) { + _m.Called(diff) } -// OnIncomingRpcAcceptedOnlyForControlMessages provides a mock function with given fields: -func (_m *GossipSubMetrics) OnIncomingRpcAcceptedOnlyForControlMessages() { +// OnIWantDuplicateMessageIdsExceedThreshold provides a mock function with no fields +func (_m *GossipSubMetrics) OnIWantDuplicateMessageIdsExceedThreshold() { _m.Called() } -// OnIncomingRpcRejected provides a mock function with given fields: -func (_m *GossipSubMetrics) OnIncomingRpcRejected() { +// OnIWantMessageIDsReceived provides a mock function with given fields: msgIdCount +func (_m *GossipSubMetrics) OnIWantMessageIDsReceived(msgIdCount int) { + _m.Called(msgIdCount) +} + +// OnIWantMessagesInspected provides a mock function with given fields: duplicateCount, cacheMissCount +func (_m *GossipSubMetrics) OnIWantMessagesInspected(duplicateCount int, cacheMissCount int) { + _m.Called(duplicateCount, cacheMissCount) +} + +// OnIncomingRpcReceived provides a mock function with given fields: iHaveCount, iWantCount, graftCount, pruneCount, msgCount +func (_m *GossipSubMetrics) OnIncomingRpcReceived(iHaveCount int, iWantCount int, graftCount int, pruneCount int, msgCount int) { + _m.Called(iHaveCount, iWantCount, graftCount, pruneCount, msgCount) +} + +// OnInvalidControlMessageNotificationSent provides a mock function with no fields +func (_m *GossipSubMetrics) OnInvalidControlMessageNotificationSent() { _m.Called() } @@ -69,29 +141,124 @@ func (_m *GossipSubMetrics) OnInvalidMessageDeliveredUpdated(_a0 channels.Topic, _m.Called(_a0, _a1) } +// OnInvalidTopicIdDetectedForControlMessage provides a mock function with given fields: messageType +func (_m *GossipSubMetrics) OnInvalidTopicIdDetectedForControlMessage(messageType p2pmsg.ControlMessageType) { + _m.Called(messageType) +} + // OnLocalMeshSizeUpdated provides a mock function with given fields: topic, size func (_m *GossipSubMetrics) OnLocalMeshSizeUpdated(topic string, size int) { _m.Called(topic, size) } +// OnLocalPeerJoinedTopic provides a mock function with no fields +func (_m *GossipSubMetrics) OnLocalPeerJoinedTopic() { + _m.Called() +} + +// OnLocalPeerLeftTopic provides a mock function with no fields +func (_m *GossipSubMetrics) OnLocalPeerLeftTopic() { + _m.Called() +} + // OnMeshMessageDeliveredUpdated provides a mock function with given fields: _a0, _a1 func (_m *GossipSubMetrics) OnMeshMessageDeliveredUpdated(_a0 channels.Topic, _a1 float64) { _m.Called(_a0, _a1) } +// OnMessageDeliveredToAllSubscribers provides a mock function with given fields: size +func (_m *GossipSubMetrics) OnMessageDeliveredToAllSubscribers(size int) { + _m.Called(size) +} + +// OnMessageDuplicate provides a mock function with given fields: size +func (_m *GossipSubMetrics) OnMessageDuplicate(size int) { + _m.Called(size) +} + +// OnMessageEnteredValidation provides a mock function with given fields: size +func (_m *GossipSubMetrics) OnMessageEnteredValidation(size int) { + _m.Called(size) +} + +// OnMessageRejected provides a mock function with given fields: size, reason +func (_m *GossipSubMetrics) OnMessageRejected(size int, reason string) { + _m.Called(size, reason) +} + +// OnOutboundRpcDropped provides a mock function with no fields +func (_m *GossipSubMetrics) OnOutboundRpcDropped() { + _m.Called() +} + // OnOverallPeerScoreUpdated provides a mock function with given fields: _a0 func (_m *GossipSubMetrics) OnOverallPeerScoreUpdated(_a0 float64) { _m.Called(_a0) } -// OnPruneReceived provides a mock function with given fields: count -func (_m *GossipSubMetrics) OnPruneReceived(count int) { - _m.Called(count) +// OnPeerAddedToProtocol provides a mock function with given fields: protocol +func (_m *GossipSubMetrics) OnPeerAddedToProtocol(protocol string) { + _m.Called(protocol) +} + +// OnPeerGraftTopic provides a mock function with given fields: topic +func (_m *GossipSubMetrics) OnPeerGraftTopic(topic string) { + _m.Called(topic) +} + +// OnPeerPruneTopic provides a mock function with given fields: topic +func (_m *GossipSubMetrics) OnPeerPruneTopic(topic string) { + _m.Called(topic) +} + +// OnPeerRemovedFromProtocol provides a mock function with no fields +func (_m *GossipSubMetrics) OnPeerRemovedFromProtocol() { + _m.Called() +} + +// OnPeerThrottled provides a mock function with no fields +func (_m *GossipSubMetrics) OnPeerThrottled() { + _m.Called() +} + +// OnPruneDuplicateTopicIdsExceedThreshold provides a mock function with no fields +func (_m *GossipSubMetrics) OnPruneDuplicateTopicIdsExceedThreshold() { + _m.Called() } -// OnPublishedGossipMessagesReceived provides a mock function with given fields: count -func (_m *GossipSubMetrics) OnPublishedGossipMessagesReceived(count int) { - _m.Called(count) +// OnPruneInvalidTopicIdsExceedThreshold provides a mock function with no fields +func (_m *GossipSubMetrics) OnPruneInvalidTopicIdsExceedThreshold() { + _m.Called() +} + +// OnPruneMessageInspected provides a mock function with given fields: duplicateTopicIds, invalidTopicIds +func (_m *GossipSubMetrics) OnPruneMessageInspected(duplicateTopicIds int, invalidTopicIds int) { + _m.Called(duplicateTopicIds, invalidTopicIds) +} + +// OnPublishMessageInspected provides a mock function with given fields: totalErrCount, invalidTopicIdsCount, invalidSubscriptionsCount, invalidSendersCount +func (_m *GossipSubMetrics) OnPublishMessageInspected(totalErrCount int, invalidTopicIdsCount int, invalidSubscriptionsCount int, invalidSendersCount int) { + _m.Called(totalErrCount, invalidTopicIdsCount, invalidSubscriptionsCount, invalidSendersCount) +} + +// OnPublishMessagesInspectionErrorExceedsThreshold provides a mock function with no fields +func (_m *GossipSubMetrics) OnPublishMessagesInspectionErrorExceedsThreshold() { + _m.Called() +} + +// OnRpcReceived provides a mock function with given fields: msgCount, iHaveCount, iWantCount, graftCount, pruneCount +func (_m *GossipSubMetrics) OnRpcReceived(msgCount int, iHaveCount int, iWantCount int, graftCount int, pruneCount int) { + _m.Called(msgCount, iHaveCount, iWantCount, graftCount, pruneCount) +} + +// OnRpcRejectedFromUnknownSender provides a mock function with no fields +func (_m *GossipSubMetrics) OnRpcRejectedFromUnknownSender() { + _m.Called() +} + +// OnRpcSent provides a mock function with given fields: msgCount, iHaveCount, iWantCount, graftCount, pruneCount +func (_m *GossipSubMetrics) OnRpcSent(msgCount int, iHaveCount int, iWantCount int, graftCount int, pruneCount int) { + _m.Called(msgCount, iHaveCount, iWantCount, graftCount, pruneCount) } // OnTimeInMeshUpdated provides a mock function with given fields: _a0, _a1 @@ -99,18 +266,27 @@ func (_m *GossipSubMetrics) OnTimeInMeshUpdated(_a0 channels.Topic, _a1 time.Dur _m.Called(_a0, _a1) } +// OnUndeliveredMessage provides a mock function with no fields +func (_m *GossipSubMetrics) OnUndeliveredMessage() { + _m.Called() +} + +// OnUnstakedPeerInspectionFailed provides a mock function with no fields +func (_m *GossipSubMetrics) OnUnstakedPeerInspectionFailed() { + _m.Called() +} + // SetWarningStateCount provides a mock function with given fields: _a0 func (_m *GossipSubMetrics) SetWarningStateCount(_a0 uint) { _m.Called(_a0) } -type mockConstructorTestingTNewGossipSubMetrics interface { +// NewGossipSubMetrics creates a new instance of GossipSubMetrics. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewGossipSubMetrics(t interface { mock.TestingT Cleanup(func()) -} - -// NewGossipSubMetrics creates a new instance of GossipSubMetrics. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewGossipSubMetrics(t mockConstructorTestingTNewGossipSubMetrics) *GossipSubMetrics { +}) *GossipSubMetrics { mock := &GossipSubMetrics{} mock.Mock.Test(t) diff --git a/module/mock/gossip_sub_router_metrics.go b/module/mock/gossip_sub_router_metrics.go deleted file mode 100644 index a320a11fffc..00000000000 --- a/module/mock/gossip_sub_router_metrics.go +++ /dev/null @@ -1,65 +0,0 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. - -package mock - -import mock "github.com/stretchr/testify/mock" - -// GossipSubRouterMetrics is an autogenerated mock type for the GossipSubRouterMetrics type -type GossipSubRouterMetrics struct { - mock.Mock -} - -// OnGraftReceived provides a mock function with given fields: count -func (_m *GossipSubRouterMetrics) OnGraftReceived(count int) { - _m.Called(count) -} - -// OnIHaveReceived provides a mock function with given fields: count -func (_m *GossipSubRouterMetrics) OnIHaveReceived(count int) { - _m.Called(count) -} - -// OnIWantReceived provides a mock function with given fields: count -func (_m *GossipSubRouterMetrics) OnIWantReceived(count int) { - _m.Called(count) -} - -// OnIncomingRpcAcceptedFully provides a mock function with given fields: -func (_m *GossipSubRouterMetrics) OnIncomingRpcAcceptedFully() { - _m.Called() -} - -// OnIncomingRpcAcceptedOnlyForControlMessages provides a mock function with given fields: -func (_m *GossipSubRouterMetrics) OnIncomingRpcAcceptedOnlyForControlMessages() { - _m.Called() -} - -// OnIncomingRpcRejected provides a mock function with given fields: -func (_m *GossipSubRouterMetrics) OnIncomingRpcRejected() { - _m.Called() -} - -// OnPruneReceived provides a mock function with given fields: count -func (_m *GossipSubRouterMetrics) OnPruneReceived(count int) { - _m.Called(count) -} - -// OnPublishedGossipMessagesReceived provides a mock function with given fields: count -func (_m *GossipSubRouterMetrics) OnPublishedGossipMessagesReceived(count int) { - _m.Called(count) -} - -type mockConstructorTestingTNewGossipSubRouterMetrics interface { - mock.TestingT - Cleanup(func()) -} - -// NewGossipSubRouterMetrics creates a new instance of GossipSubRouterMetrics. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewGossipSubRouterMetrics(t mockConstructorTestingTNewGossipSubRouterMetrics) *GossipSubRouterMetrics { - mock := &GossipSubRouterMetrics{} - mock.Mock.Test(t) - - t.Cleanup(func() { mock.AssertExpectations(t) }) - - return mock -} diff --git a/module/mock/gossip_sub_rpc_inspector_metrics.go b/module/mock/gossip_sub_rpc_inspector_metrics.go new file mode 100644 index 00000000000..f6d1c520c87 --- /dev/null +++ b/module/mock/gossip_sub_rpc_inspector_metrics.go @@ -0,0 +1,39 @@ +// Code generated by mockery. DO NOT EDIT. + +package mock + +import mock "github.com/stretchr/testify/mock" + +// GossipSubRpcInspectorMetrics is an autogenerated mock type for the GossipSubRpcInspectorMetrics type +type GossipSubRpcInspectorMetrics struct { + mock.Mock +} + +// OnIHaveMessageIDsReceived provides a mock function with given fields: channel, msgIdCount +func (_m *GossipSubRpcInspectorMetrics) OnIHaveMessageIDsReceived(channel string, msgIdCount int) { + _m.Called(channel, msgIdCount) +} + +// OnIWantMessageIDsReceived provides a mock function with given fields: msgIdCount +func (_m *GossipSubRpcInspectorMetrics) OnIWantMessageIDsReceived(msgIdCount int) { + _m.Called(msgIdCount) +} + +// OnIncomingRpcReceived provides a mock function with given fields: iHaveCount, iWantCount, graftCount, pruneCount, msgCount +func (_m *GossipSubRpcInspectorMetrics) OnIncomingRpcReceived(iHaveCount int, iWantCount int, graftCount int, pruneCount int, msgCount int) { + _m.Called(iHaveCount, iWantCount, graftCount, pruneCount, msgCount) +} + +// NewGossipSubRpcInspectorMetrics creates a new instance of GossipSubRpcInspectorMetrics. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewGossipSubRpcInspectorMetrics(t interface { + mock.TestingT + Cleanup(func()) +}) *GossipSubRpcInspectorMetrics { + mock := &GossipSubRpcInspectorMetrics{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/module/mock/gossip_sub_rpc_validation_inspector_metrics.go b/module/mock/gossip_sub_rpc_validation_inspector_metrics.go new file mode 100644 index 00000000000..eb61983370e --- /dev/null +++ b/module/mock/gossip_sub_rpc_validation_inspector_metrics.go @@ -0,0 +1,170 @@ +// Code generated by mockery. DO NOT EDIT. + +package mock + +import ( + mock "github.com/stretchr/testify/mock" + + p2pmsg "github.com/onflow/flow-go/network/p2p/message" + + time "time" +) + +// GossipSubRpcValidationInspectorMetrics is an autogenerated mock type for the GossipSubRpcValidationInspectorMetrics type +type GossipSubRpcValidationInspectorMetrics struct { + mock.Mock +} + +// AsyncProcessingFinished provides a mock function with given fields: duration +func (_m *GossipSubRpcValidationInspectorMetrics) AsyncProcessingFinished(duration time.Duration) { + _m.Called(duration) +} + +// AsyncProcessingStarted provides a mock function with no fields +func (_m *GossipSubRpcValidationInspectorMetrics) AsyncProcessingStarted() { + _m.Called() +} + +// OnActiveClusterIDsNotSetErr provides a mock function with no fields +func (_m *GossipSubRpcValidationInspectorMetrics) OnActiveClusterIDsNotSetErr() { + _m.Called() +} + +// OnControlMessagesTruncated provides a mock function with given fields: messageType, diff +func (_m *GossipSubRpcValidationInspectorMetrics) OnControlMessagesTruncated(messageType p2pmsg.ControlMessageType, diff int) { + _m.Called(messageType, diff) +} + +// OnGraftDuplicateTopicIdsExceedThreshold provides a mock function with no fields +func (_m *GossipSubRpcValidationInspectorMetrics) OnGraftDuplicateTopicIdsExceedThreshold() { + _m.Called() +} + +// OnGraftInvalidTopicIdsExceedThreshold provides a mock function with no fields +func (_m *GossipSubRpcValidationInspectorMetrics) OnGraftInvalidTopicIdsExceedThreshold() { + _m.Called() +} + +// OnGraftMessageInspected provides a mock function with given fields: duplicateTopicIds, invalidTopicIds +func (_m *GossipSubRpcValidationInspectorMetrics) OnGraftMessageInspected(duplicateTopicIds int, invalidTopicIds int) { + _m.Called(duplicateTopicIds, invalidTopicIds) +} + +// OnIHaveControlMessageIdsTruncated provides a mock function with given fields: diff +func (_m *GossipSubRpcValidationInspectorMetrics) OnIHaveControlMessageIdsTruncated(diff int) { + _m.Called(diff) +} + +// OnIHaveDuplicateMessageIdsExceedThreshold provides a mock function with no fields +func (_m *GossipSubRpcValidationInspectorMetrics) OnIHaveDuplicateMessageIdsExceedThreshold() { + _m.Called() +} + +// OnIHaveDuplicateTopicIdsExceedThreshold provides a mock function with no fields +func (_m *GossipSubRpcValidationInspectorMetrics) OnIHaveDuplicateTopicIdsExceedThreshold() { + _m.Called() +} + +// OnIHaveInvalidTopicIdsExceedThreshold provides a mock function with no fields +func (_m *GossipSubRpcValidationInspectorMetrics) OnIHaveInvalidTopicIdsExceedThreshold() { + _m.Called() +} + +// OnIHaveMessageIDsReceived provides a mock function with given fields: channel, msgIdCount +func (_m *GossipSubRpcValidationInspectorMetrics) OnIHaveMessageIDsReceived(channel string, msgIdCount int) { + _m.Called(channel, msgIdCount) +} + +// OnIHaveMessagesInspected provides a mock function with given fields: duplicateTopicIds, duplicateMessageIds, invalidTopicIds +func (_m *GossipSubRpcValidationInspectorMetrics) OnIHaveMessagesInspected(duplicateTopicIds int, duplicateMessageIds int, invalidTopicIds int) { + _m.Called(duplicateTopicIds, duplicateMessageIds, invalidTopicIds) +} + +// OnIWantCacheMissMessageIdsExceedThreshold provides a mock function with no fields +func (_m *GossipSubRpcValidationInspectorMetrics) OnIWantCacheMissMessageIdsExceedThreshold() { + _m.Called() +} + +// OnIWantControlMessageIdsTruncated provides a mock function with given fields: diff +func (_m *GossipSubRpcValidationInspectorMetrics) OnIWantControlMessageIdsTruncated(diff int) { + _m.Called(diff) +} + +// OnIWantDuplicateMessageIdsExceedThreshold provides a mock function with no fields +func (_m *GossipSubRpcValidationInspectorMetrics) OnIWantDuplicateMessageIdsExceedThreshold() { + _m.Called() +} + +// OnIWantMessageIDsReceived provides a mock function with given fields: msgIdCount +func (_m *GossipSubRpcValidationInspectorMetrics) OnIWantMessageIDsReceived(msgIdCount int) { + _m.Called(msgIdCount) +} + +// OnIWantMessagesInspected provides a mock function with given fields: duplicateCount, cacheMissCount +func (_m *GossipSubRpcValidationInspectorMetrics) OnIWantMessagesInspected(duplicateCount int, cacheMissCount int) { + _m.Called(duplicateCount, cacheMissCount) +} + +// OnIncomingRpcReceived provides a mock function with given fields: iHaveCount, iWantCount, graftCount, pruneCount, msgCount +func (_m *GossipSubRpcValidationInspectorMetrics) OnIncomingRpcReceived(iHaveCount int, iWantCount int, graftCount int, pruneCount int, msgCount int) { + _m.Called(iHaveCount, iWantCount, graftCount, pruneCount, msgCount) +} + +// OnInvalidControlMessageNotificationSent provides a mock function with no fields +func (_m *GossipSubRpcValidationInspectorMetrics) OnInvalidControlMessageNotificationSent() { + _m.Called() +} + +// OnInvalidTopicIdDetectedForControlMessage provides a mock function with given fields: messageType +func (_m *GossipSubRpcValidationInspectorMetrics) OnInvalidTopicIdDetectedForControlMessage(messageType p2pmsg.ControlMessageType) { + _m.Called(messageType) +} + +// OnPruneDuplicateTopicIdsExceedThreshold provides a mock function with no fields +func (_m *GossipSubRpcValidationInspectorMetrics) OnPruneDuplicateTopicIdsExceedThreshold() { + _m.Called() +} + +// OnPruneInvalidTopicIdsExceedThreshold provides a mock function with no fields +func (_m *GossipSubRpcValidationInspectorMetrics) OnPruneInvalidTopicIdsExceedThreshold() { + _m.Called() +} + +// OnPruneMessageInspected provides a mock function with given fields: duplicateTopicIds, invalidTopicIds +func (_m *GossipSubRpcValidationInspectorMetrics) OnPruneMessageInspected(duplicateTopicIds int, invalidTopicIds int) { + _m.Called(duplicateTopicIds, invalidTopicIds) +} + +// OnPublishMessageInspected provides a mock function with given fields: totalErrCount, invalidTopicIdsCount, invalidSubscriptionsCount, invalidSendersCount +func (_m *GossipSubRpcValidationInspectorMetrics) OnPublishMessageInspected(totalErrCount int, invalidTopicIdsCount int, invalidSubscriptionsCount int, invalidSendersCount int) { + _m.Called(totalErrCount, invalidTopicIdsCount, invalidSubscriptionsCount, invalidSendersCount) +} + +// OnPublishMessagesInspectionErrorExceedsThreshold provides a mock function with no fields +func (_m *GossipSubRpcValidationInspectorMetrics) OnPublishMessagesInspectionErrorExceedsThreshold() { + _m.Called() +} + +// OnRpcRejectedFromUnknownSender provides a mock function with no fields +func (_m *GossipSubRpcValidationInspectorMetrics) OnRpcRejectedFromUnknownSender() { + _m.Called() +} + +// OnUnstakedPeerInspectionFailed provides a mock function with no fields +func (_m *GossipSubRpcValidationInspectorMetrics) OnUnstakedPeerInspectionFailed() { + _m.Called() +} + +// NewGossipSubRpcValidationInspectorMetrics creates a new instance of GossipSubRpcValidationInspectorMetrics. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewGossipSubRpcValidationInspectorMetrics(t interface { + mock.TestingT + Cleanup(func()) +}) *GossipSubRpcValidationInspectorMetrics { + mock := &GossipSubRpcValidationInspectorMetrics{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/module/mock/gossip_sub_scoring_metrics.go b/module/mock/gossip_sub_scoring_metrics.go index 63484e7bf4d..b71dc48f749 100644 --- a/module/mock/gossip_sub_scoring_metrics.go +++ b/module/mock/gossip_sub_scoring_metrics.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mock @@ -59,13 +59,12 @@ func (_m *GossipSubScoringMetrics) SetWarningStateCount(_a0 uint) { _m.Called(_a0) } -type mockConstructorTestingTNewGossipSubScoringMetrics interface { +// NewGossipSubScoringMetrics creates a new instance of GossipSubScoringMetrics. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewGossipSubScoringMetrics(t interface { mock.TestingT Cleanup(func()) -} - -// NewGossipSubScoringMetrics creates a new instance of GossipSubScoringMetrics. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewGossipSubScoringMetrics(t mockConstructorTestingTNewGossipSubScoringMetrics) *GossipSubScoringMetrics { +}) *GossipSubScoringMetrics { mock := &GossipSubScoringMetrics{} mock.Mock.Test(t) diff --git a/module/mock/gossip_sub_scoring_registry_metrics.go b/module/mock/gossip_sub_scoring_registry_metrics.go new file mode 100644 index 00000000000..a93a480a348 --- /dev/null +++ b/module/mock/gossip_sub_scoring_registry_metrics.go @@ -0,0 +1,34 @@ +// Code generated by mockery. DO NOT EDIT. + +package mock + +import mock "github.com/stretchr/testify/mock" + +// GossipSubScoringRegistryMetrics is an autogenerated mock type for the GossipSubScoringRegistryMetrics type +type GossipSubScoringRegistryMetrics struct { + mock.Mock +} + +// DuplicateMessagePenalties provides a mock function with given fields: penalty +func (_m *GossipSubScoringRegistryMetrics) DuplicateMessagePenalties(penalty float64) { + _m.Called(penalty) +} + +// DuplicateMessagesCounts provides a mock function with given fields: count +func (_m *GossipSubScoringRegistryMetrics) DuplicateMessagesCounts(count float64) { + _m.Called(count) +} + +// NewGossipSubScoringRegistryMetrics creates a new instance of GossipSubScoringRegistryMetrics. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewGossipSubScoringRegistryMetrics(t interface { + mock.TestingT + Cleanup(func()) +}) *GossipSubScoringRegistryMetrics { + mock := &GossipSubScoringRegistryMetrics{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/module/mock/grpc_connection_pool_metrics.go b/module/mock/grpc_connection_pool_metrics.go new file mode 100644 index 00000000000..98e13123393 --- /dev/null +++ b/module/mock/grpc_connection_pool_metrics.go @@ -0,0 +1,59 @@ +// Code generated by mockery. DO NOT EDIT. + +package mock + +import mock "github.com/stretchr/testify/mock" + +// GRPCConnectionPoolMetrics is an autogenerated mock type for the GRPCConnectionPoolMetrics type +type GRPCConnectionPoolMetrics struct { + mock.Mock +} + +// ConnectionAddedToPool provides a mock function with no fields +func (_m *GRPCConnectionPoolMetrics) ConnectionAddedToPool() { + _m.Called() +} + +// ConnectionFromPoolEvicted provides a mock function with no fields +func (_m *GRPCConnectionPoolMetrics) ConnectionFromPoolEvicted() { + _m.Called() +} + +// ConnectionFromPoolInvalidated provides a mock function with no fields +func (_m *GRPCConnectionPoolMetrics) ConnectionFromPoolInvalidated() { + _m.Called() +} + +// ConnectionFromPoolReused provides a mock function with no fields +func (_m *GRPCConnectionPoolMetrics) ConnectionFromPoolReused() { + _m.Called() +} + +// ConnectionFromPoolUpdated provides a mock function with no fields +func (_m *GRPCConnectionPoolMetrics) ConnectionFromPoolUpdated() { + _m.Called() +} + +// NewConnectionEstablished provides a mock function with no fields +func (_m *GRPCConnectionPoolMetrics) NewConnectionEstablished() { + _m.Called() +} + +// TotalConnectionsInPool provides a mock function with given fields: connectionCount, connectionPoolSize +func (_m *GRPCConnectionPoolMetrics) TotalConnectionsInPool(connectionCount uint, connectionPoolSize uint) { + _m.Called(connectionCount, connectionPoolSize) +} + +// NewGRPCConnectionPoolMetrics creates a new instance of GRPCConnectionPoolMetrics. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewGRPCConnectionPoolMetrics(t interface { + mock.TestingT + Cleanup(func()) +}) *GRPCConnectionPoolMetrics { + mock := &GRPCConnectionPoolMetrics{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/module/mock/hero_cache_metrics.go b/module/mock/hero_cache_metrics.go index 139cca95b2a..4459b8a27a8 100644 --- a/module/mock/hero_cache_metrics.go +++ b/module/mock/hero_cache_metrics.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mock @@ -14,22 +14,22 @@ func (_m *HeroCacheMetrics) BucketAvailableSlots(_a0 uint64, _a1 uint64) { _m.Called(_a0, _a1) } -// OnEntityEjectionDueToEmergency provides a mock function with given fields: +// OnEntityEjectionDueToEmergency provides a mock function with no fields func (_m *HeroCacheMetrics) OnEntityEjectionDueToEmergency() { _m.Called() } -// OnEntityEjectionDueToFullCapacity provides a mock function with given fields: +// OnEntityEjectionDueToFullCapacity provides a mock function with no fields func (_m *HeroCacheMetrics) OnEntityEjectionDueToFullCapacity() { _m.Called() } -// OnKeyGetFailure provides a mock function with given fields: +// OnKeyGetFailure provides a mock function with no fields func (_m *HeroCacheMetrics) OnKeyGetFailure() { _m.Called() } -// OnKeyGetSuccess provides a mock function with given fields: +// OnKeyGetSuccess provides a mock function with no fields func (_m *HeroCacheMetrics) OnKeyGetSuccess() { _m.Called() } @@ -39,12 +39,12 @@ func (_m *HeroCacheMetrics) OnKeyPutAttempt(size uint32) { _m.Called(size) } -// OnKeyPutDeduplicated provides a mock function with given fields: +// OnKeyPutDeduplicated provides a mock function with no fields func (_m *HeroCacheMetrics) OnKeyPutDeduplicated() { _m.Called() } -// OnKeyPutDrop provides a mock function with given fields: +// OnKeyPutDrop provides a mock function with no fields func (_m *HeroCacheMetrics) OnKeyPutDrop() { _m.Called() } @@ -59,13 +59,12 @@ func (_m *HeroCacheMetrics) OnKeyRemoved(size uint32) { _m.Called(size) } -type mockConstructorTestingTNewHeroCacheMetrics interface { +// NewHeroCacheMetrics creates a new instance of HeroCacheMetrics. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewHeroCacheMetrics(t interface { mock.TestingT Cleanup(func()) -} - -// NewHeroCacheMetrics creates a new instance of HeroCacheMetrics. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewHeroCacheMetrics(t mockConstructorTestingTNewHeroCacheMetrics) *HeroCacheMetrics { +}) *HeroCacheMetrics { mock := &HeroCacheMetrics{} mock.Mock.Test(t) diff --git a/module/mock/hot_stuff.go b/module/mock/hot_stuff.go index af949a227e8..1d1caa7c72b 100644 --- a/module/mock/hot_stuff.go +++ b/module/mock/hot_stuff.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mock @@ -14,10 +14,14 @@ type HotStuff struct { mock.Mock } -// Done provides a mock function with given fields: +// Done provides a mock function with no fields func (_m *HotStuff) Done() <-chan struct{} { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Done") + } + var r0 <-chan struct{} if rf, ok := ret.Get(0).(func() <-chan struct{}); ok { r0 = rf() @@ -30,10 +34,14 @@ func (_m *HotStuff) Done() <-chan struct{} { return r0 } -// Ready provides a mock function with given fields: +// Ready provides a mock function with no fields func (_m *HotStuff) Ready() <-chan struct{} { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Ready") + } + var r0 <-chan struct{} if rf, ok := ret.Get(0).(func() <-chan struct{}); ok { r0 = rf() @@ -52,17 +60,16 @@ func (_m *HotStuff) Start(_a0 irrecoverable.SignalerContext) { } // SubmitProposal provides a mock function with given fields: proposal -func (_m *HotStuff) SubmitProposal(proposal *model.Proposal) { +func (_m *HotStuff) SubmitProposal(proposal *model.SignedProposal) { _m.Called(proposal) } -type mockConstructorTestingTNewHotStuff interface { +// NewHotStuff creates a new instance of HotStuff. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewHotStuff(t interface { mock.TestingT Cleanup(func()) -} - -// NewHotStuff creates a new instance of HotStuff. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewHotStuff(t mockConstructorTestingTNewHotStuff) *HotStuff { +}) *HotStuff { mock := &HotStuff{} mock.Mock.Test(t) diff --git a/module/mock/hot_stuff_follower.go b/module/mock/hot_stuff_follower.go index 23c43d387cd..04dd13126be 100644 --- a/module/mock/hot_stuff_follower.go +++ b/module/mock/hot_stuff_follower.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mock @@ -19,10 +19,14 @@ func (_m *HotStuffFollower) AddCertifiedBlock(certifiedBlock *model.CertifiedBlo _m.Called(certifiedBlock) } -// Done provides a mock function with given fields: +// Done provides a mock function with no fields func (_m *HotStuffFollower) Done() <-chan struct{} { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Done") + } + var r0 <-chan struct{} if rf, ok := ret.Get(0).(func() <-chan struct{}); ok { r0 = rf() @@ -35,10 +39,14 @@ func (_m *HotStuffFollower) Done() <-chan struct{} { return r0 } -// Ready provides a mock function with given fields: +// Ready provides a mock function with no fields func (_m *HotStuffFollower) Ready() <-chan struct{} { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Ready") + } + var r0 <-chan struct{} if rf, ok := ret.Get(0).(func() <-chan struct{}); ok { r0 = rf() @@ -56,13 +64,12 @@ func (_m *HotStuffFollower) Start(_a0 irrecoverable.SignalerContext) { _m.Called(_a0) } -type mockConstructorTestingTNewHotStuffFollower interface { +// NewHotStuffFollower creates a new instance of HotStuffFollower. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewHotStuffFollower(t interface { mock.TestingT Cleanup(func()) -} - -// NewHotStuffFollower creates a new instance of HotStuffFollower. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewHotStuffFollower(t mockConstructorTestingTNewHotStuffFollower) *HotStuffFollower { +}) *HotStuffFollower { mock := &HotStuffFollower{} mock.Mock.Test(t) diff --git a/module/mock/hotstuff_metrics.go b/module/mock/hotstuff_metrics.go index 79760994bad..296e7a1d8e1 100644 --- a/module/mock/hotstuff_metrics.go +++ b/module/mock/hotstuff_metrics.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mock @@ -23,12 +23,12 @@ func (_m *HotstuffMetrics) CommitteeProcessingDuration(duration time.Duration) { _m.Called(duration) } -// CountSkipped provides a mock function with given fields: +// CountSkipped provides a mock function with no fields func (_m *HotstuffMetrics) CountSkipped() { _m.Called() } -// CountTimeout provides a mock function with given fields: +// CountTimeout provides a mock function with no fields func (_m *HotstuffMetrics) CountTimeout() { _m.Called() } @@ -78,6 +78,11 @@ func (_m *HotstuffMetrics) SignerProcessingDuration(duration time.Duration) { _m.Called(duration) } +// TimeoutCollectorsRange provides a mock function with given fields: lowestRetainedView, newestViewCreatedCollector, activeCollectors +func (_m *HotstuffMetrics) TimeoutCollectorsRange(lowestRetainedView uint64, newestViewCreatedCollector uint64, activeCollectors int) { + _m.Called(lowestRetainedView, newestViewCreatedCollector, activeCollectors) +} + // TimeoutObjectProcessingDuration provides a mock function with given fields: duration func (_m *HotstuffMetrics) TimeoutObjectProcessingDuration(duration time.Duration) { _m.Called(duration) @@ -93,13 +98,12 @@ func (_m *HotstuffMetrics) VoteProcessingDuration(duration time.Duration) { _m.Called(duration) } -type mockConstructorTestingTNewHotstuffMetrics interface { +// NewHotstuffMetrics creates a new instance of HotstuffMetrics. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewHotstuffMetrics(t interface { mock.TestingT Cleanup(func()) -} - -// NewHotstuffMetrics creates a new instance of HotstuffMetrics. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewHotstuffMetrics(t mockConstructorTestingTNewHotstuffMetrics) *HotstuffMetrics { +}) *HotstuffMetrics { mock := &HotstuffMetrics{} mock.Mock.Test(t) diff --git a/module/mock/identifier_provider.go b/module/mock/identifier_provider.go index 8aad36e546c..5836f825c4e 100644 --- a/module/mock/identifier_provider.go +++ b/module/mock/identifier_provider.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mock @@ -12,10 +12,14 @@ type IdentifierProvider struct { mock.Mock } -// Identifiers provides a mock function with given fields: +// Identifiers provides a mock function with no fields func (_m *IdentifierProvider) Identifiers() flow.IdentifierList { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Identifiers") + } + var r0 flow.IdentifierList if rf, ok := ret.Get(0).(func() flow.IdentifierList); ok { r0 = rf() @@ -28,13 +32,12 @@ func (_m *IdentifierProvider) Identifiers() flow.IdentifierList { return r0 } -type mockConstructorTestingTNewIdentifierProvider interface { +// NewIdentifierProvider creates a new instance of IdentifierProvider. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewIdentifierProvider(t interface { mock.TestingT Cleanup(func()) -} - -// NewIdentifierProvider creates a new instance of IdentifierProvider. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewIdentifierProvider(t mockConstructorTestingTNewIdentifierProvider) *IdentifierProvider { +}) *IdentifierProvider { mock := &IdentifierProvider{} mock.Mock.Test(t) diff --git a/module/mock/identity_provider.go b/module/mock/identity_provider.go index 925583a40d0..b2641ffb105 100644 --- a/module/mock/identity_provider.go +++ b/module/mock/identity_provider.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mock @@ -18,6 +18,10 @@ type IdentityProvider struct { func (_m *IdentityProvider) ByNodeID(_a0 flow.Identifier) (*flow.Identity, bool) { ret := _m.Called(_a0) + if len(ret) == 0 { + panic("no return value specified for ByNodeID") + } + var r0 *flow.Identity var r1 bool if rf, ok := ret.Get(0).(func(flow.Identifier) (*flow.Identity, bool)); ok { @@ -44,6 +48,10 @@ func (_m *IdentityProvider) ByNodeID(_a0 flow.Identifier) (*flow.Identity, bool) func (_m *IdentityProvider) ByPeerID(_a0 peer.ID) (*flow.Identity, bool) { ret := _m.Called(_a0) + if len(ret) == 0 { + panic("no return value specified for ByPeerID") + } + var r0 *flow.Identity var r1 bool if rf, ok := ret.Get(0).(func(peer.ID) (*flow.Identity, bool)); ok { @@ -67,11 +75,15 @@ func (_m *IdentityProvider) ByPeerID(_a0 peer.ID) (*flow.Identity, bool) { } // Identities provides a mock function with given fields: _a0 -func (_m *IdentityProvider) Identities(_a0 flow.IdentityFilter) flow.IdentityList { +func (_m *IdentityProvider) Identities(_a0 flow.IdentityFilter[flow.Identity]) flow.IdentityList { ret := _m.Called(_a0) + if len(ret) == 0 { + panic("no return value specified for Identities") + } + var r0 flow.IdentityList - if rf, ok := ret.Get(0).(func(flow.IdentityFilter) flow.IdentityList); ok { + if rf, ok := ret.Get(0).(func(flow.IdentityFilter[flow.Identity]) flow.IdentityList); ok { r0 = rf(_a0) } else { if ret.Get(0) != nil { @@ -82,13 +94,12 @@ func (_m *IdentityProvider) Identities(_a0 flow.IdentityFilter) flow.IdentityLis return r0 } -type mockConstructorTestingTNewIdentityProvider interface { +// NewIdentityProvider creates a new instance of IdentityProvider. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewIdentityProvider(t interface { mock.TestingT Cleanup(func()) -} - -// NewIdentityProvider creates a new instance of IdentityProvider. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewIdentityProvider(t mockConstructorTestingTNewIdentityProvider) *IdentityProvider { +}) *IdentityProvider { mock := &IdentityProvider{} mock.Mock.Test(t) diff --git a/module/mock/iterator_creator.go b/module/mock/iterator_creator.go new file mode 100644 index 00000000000..82d6600d155 --- /dev/null +++ b/module/mock/iterator_creator.go @@ -0,0 +1,84 @@ +// Code generated by mockery. DO NOT EDIT. + +package mock + +import ( + module "github.com/onflow/flow-go/module" + mock "github.com/stretchr/testify/mock" +) + +// IteratorCreator is an autogenerated mock type for the IteratorCreator type +type IteratorCreator struct { + mock.Mock +} + +// Create provides a mock function with no fields +func (_m *IteratorCreator) Create() (module.BlockIterator, bool, error) { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Create") + } + + var r0 module.BlockIterator + var r1 bool + var r2 error + if rf, ok := ret.Get(0).(func() (module.BlockIterator, bool, error)); ok { + return rf() + } + if rf, ok := ret.Get(0).(func() module.BlockIterator); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(module.BlockIterator) + } + } + + if rf, ok := ret.Get(1).(func() bool); ok { + r1 = rf() + } else { + r1 = ret.Get(1).(bool) + } + + if rf, ok := ret.Get(2).(func() error); ok { + r2 = rf() + } else { + r2 = ret.Error(2) + } + + return r0, r1, r2 +} + +// IteratorState provides a mock function with no fields +func (_m *IteratorCreator) IteratorState() module.IteratorStateReader { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for IteratorState") + } + + var r0 module.IteratorStateReader + if rf, ok := ret.Get(0).(func() module.IteratorStateReader); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(module.IteratorStateReader) + } + } + + return r0 +} + +// NewIteratorCreator creates a new instance of IteratorCreator. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewIteratorCreator(t interface { + mock.TestingT + Cleanup(func()) +}) *IteratorCreator { + mock := &IteratorCreator{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/module/mock/iterator_state.go b/module/mock/iterator_state.go new file mode 100644 index 00000000000..6b832d9567f --- /dev/null +++ b/module/mock/iterator_state.go @@ -0,0 +1,70 @@ +// Code generated by mockery. DO NOT EDIT. + +package mock + +import mock "github.com/stretchr/testify/mock" + +// IteratorState is an autogenerated mock type for the IteratorState type +type IteratorState struct { + mock.Mock +} + +// LoadState provides a mock function with no fields +func (_m *IteratorState) LoadState() (uint64, error) { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for LoadState") + } + + var r0 uint64 + var r1 error + if rf, ok := ret.Get(0).(func() (uint64, error)); ok { + return rf() + } + if rf, ok := ret.Get(0).(func() uint64); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(uint64) + } + + if rf, ok := ret.Get(1).(func() error); ok { + r1 = rf() + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// SaveState provides a mock function with given fields: _a0 +func (_m *IteratorState) SaveState(_a0 uint64) error { + ret := _m.Called(_a0) + + if len(ret) == 0 { + panic("no return value specified for SaveState") + } + + var r0 error + if rf, ok := ret.Get(0).(func(uint64) error); ok { + r0 = rf(_a0) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// NewIteratorState creates a new instance of IteratorState. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewIteratorState(t interface { + mock.TestingT + Cleanup(func()) +}) *IteratorState { + mock := &IteratorState{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/module/mock/iterator_state_reader.go b/module/mock/iterator_state_reader.go new file mode 100644 index 00000000000..cac08633302 --- /dev/null +++ b/module/mock/iterator_state_reader.go @@ -0,0 +1,52 @@ +// Code generated by mockery. DO NOT EDIT. + +package mock + +import mock "github.com/stretchr/testify/mock" + +// IteratorStateReader is an autogenerated mock type for the IteratorStateReader type +type IteratorStateReader struct { + mock.Mock +} + +// LoadState provides a mock function with no fields +func (_m *IteratorStateReader) LoadState() (uint64, error) { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for LoadState") + } + + var r0 uint64 + var r1 error + if rf, ok := ret.Get(0).(func() (uint64, error)); ok { + return rf() + } + if rf, ok := ret.Get(0).(func() uint64); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(uint64) + } + + if rf, ok := ret.Get(1).(func() error); ok { + r1 = rf() + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// NewIteratorStateReader creates a new instance of IteratorStateReader. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewIteratorStateReader(t interface { + mock.TestingT + Cleanup(func()) +}) *IteratorStateReader { + mock := &IteratorStateReader{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/module/mock/iterator_state_writer.go b/module/mock/iterator_state_writer.go new file mode 100644 index 00000000000..9801672458c --- /dev/null +++ b/module/mock/iterator_state_writer.go @@ -0,0 +1,42 @@ +// Code generated by mockery. DO NOT EDIT. + +package mock + +import mock "github.com/stretchr/testify/mock" + +// IteratorStateWriter is an autogenerated mock type for the IteratorStateWriter type +type IteratorStateWriter struct { + mock.Mock +} + +// SaveState provides a mock function with given fields: _a0 +func (_m *IteratorStateWriter) SaveState(_a0 uint64) error { + ret := _m.Called(_a0) + + if len(ret) == 0 { + panic("no return value specified for SaveState") + } + + var r0 error + if rf, ok := ret.Get(0).(func(uint64) error); ok { + r0 = rf(_a0) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// NewIteratorStateWriter creates a new instance of IteratorStateWriter. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewIteratorStateWriter(t interface { + mock.TestingT + Cleanup(func()) +}) *IteratorStateWriter { + mock := &IteratorStateWriter{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/module/mock/job.go b/module/mock/job.go index 5f7a390fc33..f2b04faaf0a 100644 --- a/module/mock/job.go +++ b/module/mock/job.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mock @@ -12,10 +12,14 @@ type Job struct { mock.Mock } -// ID provides a mock function with given fields: +// ID provides a mock function with no fields func (_m *Job) ID() module.JobID { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for ID") + } + var r0 module.JobID if rf, ok := ret.Get(0).(func() module.JobID); ok { r0 = rf() @@ -26,13 +30,12 @@ func (_m *Job) ID() module.JobID { return r0 } -type mockConstructorTestingTNewJob interface { +// NewJob creates a new instance of Job. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewJob(t interface { mock.TestingT Cleanup(func()) -} - -// NewJob creates a new instance of Job. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewJob(t mockConstructorTestingTNewJob) *Job { +}) *Job { mock := &Job{} mock.Mock.Test(t) diff --git a/module/mock/job_consumer.go b/module/mock/job_consumer.go index 346231f09fc..670fad92308 100644 --- a/module/mock/job_consumer.go +++ b/module/mock/job_consumer.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mock @@ -12,15 +12,19 @@ type JobConsumer struct { mock.Mock } -// Check provides a mock function with given fields: +// Check provides a mock function with no fields func (_m *JobConsumer) Check() { _m.Called() } -// LastProcessedIndex provides a mock function with given fields: +// LastProcessedIndex provides a mock function with no fields func (_m *JobConsumer) LastProcessedIndex() uint64 { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for LastProcessedIndex") + } + var r0 uint64 if rf, ok := ret.Get(0).(func() uint64); ok { r0 = rf() @@ -35,6 +39,10 @@ func (_m *JobConsumer) LastProcessedIndex() uint64 { func (_m *JobConsumer) NotifyJobIsDone(_a0 module.JobID) uint64 { ret := _m.Called(_a0) + if len(ret) == 0 { + panic("no return value specified for NotifyJobIsDone") + } + var r0 uint64 if rf, ok := ret.Get(0).(func(module.JobID) uint64); ok { r0 = rf(_a0) @@ -45,10 +53,14 @@ func (_m *JobConsumer) NotifyJobIsDone(_a0 module.JobID) uint64 { return r0 } -// Size provides a mock function with given fields: +// Size provides a mock function with no fields func (_m *JobConsumer) Size() uint { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Size") + } + var r0 uint if rf, ok := ret.Get(0).(func() uint); ok { r0 = rf() @@ -59,13 +71,17 @@ func (_m *JobConsumer) Size() uint { return r0 } -// Start provides a mock function with given fields: defaultIndex -func (_m *JobConsumer) Start(defaultIndex uint64) error { - ret := _m.Called(defaultIndex) +// Start provides a mock function with no fields +func (_m *JobConsumer) Start() error { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Start") + } var r0 error - if rf, ok := ret.Get(0).(func(uint64) error); ok { - r0 = rf(defaultIndex) + if rf, ok := ret.Get(0).(func() error); ok { + r0 = rf() } else { r0 = ret.Error(0) } @@ -73,18 +89,17 @@ func (_m *JobConsumer) Start(defaultIndex uint64) error { return r0 } -// Stop provides a mock function with given fields: +// Stop provides a mock function with no fields func (_m *JobConsumer) Stop() { _m.Called() } -type mockConstructorTestingTNewJobConsumer interface { +// NewJobConsumer creates a new instance of JobConsumer. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewJobConsumer(t interface { mock.TestingT Cleanup(func()) -} - -// NewJobConsumer creates a new instance of JobConsumer. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewJobConsumer(t mockConstructorTestingTNewJobConsumer) *JobConsumer { +}) *JobConsumer { mock := &JobConsumer{} mock.Mock.Test(t) diff --git a/module/mock/job_queue.go b/module/mock/job_queue.go index d54249370c3..7f6356965e2 100644 --- a/module/mock/job_queue.go +++ b/module/mock/job_queue.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mock @@ -16,6 +16,10 @@ type JobQueue struct { func (_m *JobQueue) Add(job module.Job) error { ret := _m.Called(job) + if len(ret) == 0 { + panic("no return value specified for Add") + } + var r0 error if rf, ok := ret.Get(0).(func(module.Job) error); ok { r0 = rf(job) @@ -26,13 +30,12 @@ func (_m *JobQueue) Add(job module.Job) error { return r0 } -type mockConstructorTestingTNewJobQueue interface { +// NewJobQueue creates a new instance of JobQueue. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewJobQueue(t interface { mock.TestingT Cleanup(func()) -} - -// NewJobQueue creates a new instance of JobQueue. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewJobQueue(t mockConstructorTestingTNewJobQueue) *JobQueue { +}) *JobQueue { mock := &JobQueue{} mock.Mock.Test(t) diff --git a/module/mock/jobs.go b/module/mock/jobs.go index 65e73327476..6a7376b2d07 100644 --- a/module/mock/jobs.go +++ b/module/mock/jobs.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mock @@ -16,6 +16,10 @@ type Jobs struct { func (_m *Jobs) AtIndex(index uint64) (module.Job, error) { ret := _m.Called(index) + if len(ret) == 0 { + panic("no return value specified for AtIndex") + } + var r0 module.Job var r1 error if rf, ok := ret.Get(0).(func(uint64) (module.Job, error)); ok { @@ -38,10 +42,14 @@ func (_m *Jobs) AtIndex(index uint64) (module.Job, error) { return r0, r1 } -// Head provides a mock function with given fields: +// Head provides a mock function with no fields func (_m *Jobs) Head() (uint64, error) { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Head") + } + var r0 uint64 var r1 error if rf, ok := ret.Get(0).(func() (uint64, error)); ok { @@ -62,13 +70,12 @@ func (_m *Jobs) Head() (uint64, error) { return r0, r1 } -type mockConstructorTestingTNewJobs interface { +// NewJobs creates a new instance of Jobs. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewJobs(t interface { mock.TestingT Cleanup(func()) -} - -// NewJobs creates a new instance of Jobs. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewJobs(t mockConstructorTestingTNewJobs) *Jobs { +}) *Jobs { mock := &Jobs{} mock.Mock.Test(t) diff --git a/module/mock/ledger_metrics.go b/module/mock/ledger_metrics.go index 9f0fbbbc1d8..e2168fe1b32 100644 --- a/module/mock/ledger_metrics.go +++ b/module/mock/ledger_metrics.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mock @@ -73,7 +73,7 @@ func (_m *LedgerMetrics) ReadValuesSize(byte uint64) { _m.Called(byte) } -// UpdateCount provides a mock function with given fields: +// UpdateCount provides a mock function with no fields func (_m *LedgerMetrics) UpdateCount() { _m.Called() } @@ -98,13 +98,12 @@ func (_m *LedgerMetrics) UpdateValuesSize(byte uint64) { _m.Called(byte) } -type mockConstructorTestingTNewLedgerMetrics interface { +// NewLedgerMetrics creates a new instance of LedgerMetrics. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewLedgerMetrics(t interface { mock.TestingT Cleanup(func()) -} - -// NewLedgerMetrics creates a new instance of LedgerMetrics. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewLedgerMetrics(t mockConstructorTestingTNewLedgerMetrics) *LedgerMetrics { +}) *LedgerMetrics { mock := &LedgerMetrics{} mock.Mock.Test(t) diff --git a/module/mock/lib_p2_p_connection_metrics.go b/module/mock/lib_p2_p_connection_metrics.go index 8e0bf8366de..81baeeae880 100644 --- a/module/mock/lib_p2_p_connection_metrics.go +++ b/module/mock/lib_p2_p_connection_metrics.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mock @@ -19,13 +19,12 @@ func (_m *LibP2PConnectionMetrics) OutboundConnections(connectionCount uint) { _m.Called(connectionCount) } -type mockConstructorTestingTNewLibP2PConnectionMetrics interface { +// NewLibP2PConnectionMetrics creates a new instance of LibP2PConnectionMetrics. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewLibP2PConnectionMetrics(t interface { mock.TestingT Cleanup(func()) -} - -// NewLibP2PConnectionMetrics creates a new instance of LibP2PConnectionMetrics. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewLibP2PConnectionMetrics(t mockConstructorTestingTNewLibP2PConnectionMetrics) *LibP2PConnectionMetrics { +}) *LibP2PConnectionMetrics { mock := &LibP2PConnectionMetrics{} mock.Mock.Test(t) diff --git a/module/mock/lib_p2_p_metrics.go b/module/mock/lib_p2_p_metrics.go index 78b39fdae55..7526b312810 100644 --- a/module/mock/lib_p2_p_metrics.go +++ b/module/mock/lib_p2_p_metrics.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mock @@ -8,6 +8,8 @@ import ( network "github.com/libp2p/go-libp2p/core/network" + p2pmsg "github.com/onflow/flow-go/network/p2p/message" + peer "github.com/libp2p/go-libp2p/core/peer" protocol "github.com/libp2p/go-libp2p/core/protocol" @@ -50,6 +52,16 @@ func (_m *LibP2PMetrics) AllowStream(p peer.ID, dir network.Direction) { _m.Called(p, dir) } +// AsyncProcessingFinished provides a mock function with given fields: duration +func (_m *LibP2PMetrics) AsyncProcessingFinished(duration time.Duration) { + _m.Called(duration) +} + +// AsyncProcessingStarted provides a mock function with no fields +func (_m *LibP2PMetrics) AsyncProcessingStarted() { + _m.Called() +} + // BlockConn provides a mock function with given fields: dir, usefd func (_m *LibP2PMetrics) BlockConn(dir network.Direction, usefd bool) { _m.Called(dir, usefd) @@ -95,11 +107,26 @@ func (_m *LibP2PMetrics) DNSLookupDuration(duration time.Duration) { _m.Called(duration) } +// DuplicateMessagePenalties provides a mock function with given fields: penalty +func (_m *LibP2PMetrics) DuplicateMessagePenalties(penalty float64) { + _m.Called(penalty) +} + +// DuplicateMessagesCounts provides a mock function with given fields: count +func (_m *LibP2PMetrics) DuplicateMessagesCounts(count float64) { + _m.Called(count) +} + // InboundConnections provides a mock function with given fields: connectionCount func (_m *LibP2PMetrics) InboundConnections(connectionCount uint) { _m.Called(connectionCount) } +// OnActiveClusterIDsNotSetErr provides a mock function with no fields +func (_m *LibP2PMetrics) OnActiveClusterIDsNotSetErr() { + _m.Called() +} + // OnAppSpecificScoreUpdated provides a mock function with given fields: _a0 func (_m *LibP2PMetrics) OnAppSpecificScoreUpdated(_a0 float64) { _m.Called(_a0) @@ -110,26 +137,41 @@ func (_m *LibP2PMetrics) OnBehaviourPenaltyUpdated(_a0 float64) { _m.Called(_a0) } -// OnDNSCacheHit provides a mock function with given fields: +// OnControlMessagesTruncated provides a mock function with given fields: messageType, diff +func (_m *LibP2PMetrics) OnControlMessagesTruncated(messageType p2pmsg.ControlMessageType, diff int) { + _m.Called(messageType, diff) +} + +// OnDNSCacheHit provides a mock function with no fields func (_m *LibP2PMetrics) OnDNSCacheHit() { _m.Called() } -// OnDNSCacheInvalidated provides a mock function with given fields: +// OnDNSCacheInvalidated provides a mock function with no fields func (_m *LibP2PMetrics) OnDNSCacheInvalidated() { _m.Called() } -// OnDNSCacheMiss provides a mock function with given fields: +// OnDNSCacheMiss provides a mock function with no fields func (_m *LibP2PMetrics) OnDNSCacheMiss() { _m.Called() } -// OnDNSLookupRequestDropped provides a mock function with given fields: +// OnDNSLookupRequestDropped provides a mock function with no fields func (_m *LibP2PMetrics) OnDNSLookupRequestDropped() { _m.Called() } +// OnDialRetryBudgetResetToDefault provides a mock function with no fields +func (_m *LibP2PMetrics) OnDialRetryBudgetResetToDefault() { + _m.Called() +} + +// OnDialRetryBudgetUpdated provides a mock function with given fields: budget +func (_m *LibP2PMetrics) OnDialRetryBudgetUpdated(budget uint64) { + _m.Called(budget) +} + // OnEstablishStreamFailure provides a mock function with given fields: duration, attempts func (_m *LibP2PMetrics) OnEstablishStreamFailure(duration time.Duration, attempts int) { _m.Called(duration, attempts) @@ -140,14 +182,49 @@ func (_m *LibP2PMetrics) OnFirstMessageDeliveredUpdated(_a0 channels.Topic, _a1 _m.Called(_a0, _a1) } -// OnGraftReceived provides a mock function with given fields: count -func (_m *LibP2PMetrics) OnGraftReceived(count int) { - _m.Called(count) +// OnGraftDuplicateTopicIdsExceedThreshold provides a mock function with no fields +func (_m *LibP2PMetrics) OnGraftDuplicateTopicIdsExceedThreshold() { + _m.Called() } -// OnIHaveReceived provides a mock function with given fields: count -func (_m *LibP2PMetrics) OnIHaveReceived(count int) { - _m.Called(count) +// OnGraftInvalidTopicIdsExceedThreshold provides a mock function with no fields +func (_m *LibP2PMetrics) OnGraftInvalidTopicIdsExceedThreshold() { + _m.Called() +} + +// OnGraftMessageInspected provides a mock function with given fields: duplicateTopicIds, invalidTopicIds +func (_m *LibP2PMetrics) OnGraftMessageInspected(duplicateTopicIds int, invalidTopicIds int) { + _m.Called(duplicateTopicIds, invalidTopicIds) +} + +// OnIHaveControlMessageIdsTruncated provides a mock function with given fields: diff +func (_m *LibP2PMetrics) OnIHaveControlMessageIdsTruncated(diff int) { + _m.Called(diff) +} + +// OnIHaveDuplicateMessageIdsExceedThreshold provides a mock function with no fields +func (_m *LibP2PMetrics) OnIHaveDuplicateMessageIdsExceedThreshold() { + _m.Called() +} + +// OnIHaveDuplicateTopicIdsExceedThreshold provides a mock function with no fields +func (_m *LibP2PMetrics) OnIHaveDuplicateTopicIdsExceedThreshold() { + _m.Called() +} + +// OnIHaveInvalidTopicIdsExceedThreshold provides a mock function with no fields +func (_m *LibP2PMetrics) OnIHaveInvalidTopicIdsExceedThreshold() { + _m.Called() +} + +// OnIHaveMessageIDsReceived provides a mock function with given fields: channel, msgIdCount +func (_m *LibP2PMetrics) OnIHaveMessageIDsReceived(channel string, msgIdCount int) { + _m.Called(channel, msgIdCount) +} + +// OnIHaveMessagesInspected provides a mock function with given fields: duplicateTopicIds, duplicateMessageIds, invalidTopicIds +func (_m *LibP2PMetrics) OnIHaveMessagesInspected(duplicateTopicIds int, duplicateMessageIds int, invalidTopicIds int) { + _m.Called(duplicateTopicIds, duplicateMessageIds, invalidTopicIds) } // OnIPColocationFactorUpdated provides a mock function with given fields: _a0 @@ -155,23 +232,38 @@ func (_m *LibP2PMetrics) OnIPColocationFactorUpdated(_a0 float64) { _m.Called(_a0) } -// OnIWantReceived provides a mock function with given fields: count -func (_m *LibP2PMetrics) OnIWantReceived(count int) { - _m.Called(count) +// OnIWantCacheMissMessageIdsExceedThreshold provides a mock function with no fields +func (_m *LibP2PMetrics) OnIWantCacheMissMessageIdsExceedThreshold() { + _m.Called() } -// OnIncomingRpcAcceptedFully provides a mock function with given fields: -func (_m *LibP2PMetrics) OnIncomingRpcAcceptedFully() { - _m.Called() +// OnIWantControlMessageIdsTruncated provides a mock function with given fields: diff +func (_m *LibP2PMetrics) OnIWantControlMessageIdsTruncated(diff int) { + _m.Called(diff) } -// OnIncomingRpcAcceptedOnlyForControlMessages provides a mock function with given fields: -func (_m *LibP2PMetrics) OnIncomingRpcAcceptedOnlyForControlMessages() { +// OnIWantDuplicateMessageIdsExceedThreshold provides a mock function with no fields +func (_m *LibP2PMetrics) OnIWantDuplicateMessageIdsExceedThreshold() { _m.Called() } -// OnIncomingRpcRejected provides a mock function with given fields: -func (_m *LibP2PMetrics) OnIncomingRpcRejected() { +// OnIWantMessageIDsReceived provides a mock function with given fields: msgIdCount +func (_m *LibP2PMetrics) OnIWantMessageIDsReceived(msgIdCount int) { + _m.Called(msgIdCount) +} + +// OnIWantMessagesInspected provides a mock function with given fields: duplicateCount, cacheMissCount +func (_m *LibP2PMetrics) OnIWantMessagesInspected(duplicateCount int, cacheMissCount int) { + _m.Called(duplicateCount, cacheMissCount) +} + +// OnIncomingRpcReceived provides a mock function with given fields: iHaveCount, iWantCount, graftCount, pruneCount, msgCount +func (_m *LibP2PMetrics) OnIncomingRpcReceived(iHaveCount int, iWantCount int, graftCount int, pruneCount int, msgCount int) { + _m.Called(iHaveCount, iWantCount, graftCount, pruneCount, msgCount) +} + +// OnInvalidControlMessageNotificationSent provides a mock function with no fields +func (_m *LibP2PMetrics) OnInvalidControlMessageNotificationSent() { _m.Called() } @@ -180,21 +272,66 @@ func (_m *LibP2PMetrics) OnInvalidMessageDeliveredUpdated(_a0 channels.Topic, _a _m.Called(_a0, _a1) } +// OnInvalidTopicIdDetectedForControlMessage provides a mock function with given fields: messageType +func (_m *LibP2PMetrics) OnInvalidTopicIdDetectedForControlMessage(messageType p2pmsg.ControlMessageType) { + _m.Called(messageType) +} + // OnLocalMeshSizeUpdated provides a mock function with given fields: topic, size func (_m *LibP2PMetrics) OnLocalMeshSizeUpdated(topic string, size int) { _m.Called(topic, size) } +// OnLocalPeerJoinedTopic provides a mock function with no fields +func (_m *LibP2PMetrics) OnLocalPeerJoinedTopic() { + _m.Called() +} + +// OnLocalPeerLeftTopic provides a mock function with no fields +func (_m *LibP2PMetrics) OnLocalPeerLeftTopic() { + _m.Called() +} + // OnMeshMessageDeliveredUpdated provides a mock function with given fields: _a0, _a1 func (_m *LibP2PMetrics) OnMeshMessageDeliveredUpdated(_a0 channels.Topic, _a1 float64) { _m.Called(_a0, _a1) } +// OnMessageDeliveredToAllSubscribers provides a mock function with given fields: size +func (_m *LibP2PMetrics) OnMessageDeliveredToAllSubscribers(size int) { + _m.Called(size) +} + +// OnMessageDuplicate provides a mock function with given fields: size +func (_m *LibP2PMetrics) OnMessageDuplicate(size int) { + _m.Called(size) +} + +// OnMessageEnteredValidation provides a mock function with given fields: size +func (_m *LibP2PMetrics) OnMessageEnteredValidation(size int) { + _m.Called(size) +} + +// OnMessageRejected provides a mock function with given fields: size, reason +func (_m *LibP2PMetrics) OnMessageRejected(size int, reason string) { + _m.Called(size, reason) +} + +// OnOutboundRpcDropped provides a mock function with no fields +func (_m *LibP2PMetrics) OnOutboundRpcDropped() { + _m.Called() +} + // OnOverallPeerScoreUpdated provides a mock function with given fields: _a0 func (_m *LibP2PMetrics) OnOverallPeerScoreUpdated(_a0 float64) { _m.Called(_a0) } +// OnPeerAddedToProtocol provides a mock function with given fields: _a0 +func (_m *LibP2PMetrics) OnPeerAddedToProtocol(_a0 string) { + _m.Called(_a0) +} + // OnPeerDialFailure provides a mock function with given fields: duration, attempts func (_m *LibP2PMetrics) OnPeerDialFailure(duration time.Duration, attempts int) { _m.Called(duration, attempts) @@ -205,14 +342,64 @@ func (_m *LibP2PMetrics) OnPeerDialed(duration time.Duration, attempts int) { _m.Called(duration, attempts) } -// OnPruneReceived provides a mock function with given fields: count -func (_m *LibP2PMetrics) OnPruneReceived(count int) { - _m.Called(count) +// OnPeerGraftTopic provides a mock function with given fields: topic +func (_m *LibP2PMetrics) OnPeerGraftTopic(topic string) { + _m.Called(topic) } -// OnPublishedGossipMessagesReceived provides a mock function with given fields: count -func (_m *LibP2PMetrics) OnPublishedGossipMessagesReceived(count int) { - _m.Called(count) +// OnPeerPruneTopic provides a mock function with given fields: topic +func (_m *LibP2PMetrics) OnPeerPruneTopic(topic string) { + _m.Called(topic) +} + +// OnPeerRemovedFromProtocol provides a mock function with no fields +func (_m *LibP2PMetrics) OnPeerRemovedFromProtocol() { + _m.Called() +} + +// OnPeerThrottled provides a mock function with no fields +func (_m *LibP2PMetrics) OnPeerThrottled() { + _m.Called() +} + +// OnPruneDuplicateTopicIdsExceedThreshold provides a mock function with no fields +func (_m *LibP2PMetrics) OnPruneDuplicateTopicIdsExceedThreshold() { + _m.Called() +} + +// OnPruneInvalidTopicIdsExceedThreshold provides a mock function with no fields +func (_m *LibP2PMetrics) OnPruneInvalidTopicIdsExceedThreshold() { + _m.Called() +} + +// OnPruneMessageInspected provides a mock function with given fields: duplicateTopicIds, invalidTopicIds +func (_m *LibP2PMetrics) OnPruneMessageInspected(duplicateTopicIds int, invalidTopicIds int) { + _m.Called(duplicateTopicIds, invalidTopicIds) +} + +// OnPublishMessageInspected provides a mock function with given fields: totalErrCount, invalidTopicIdsCount, invalidSubscriptionsCount, invalidSendersCount +func (_m *LibP2PMetrics) OnPublishMessageInspected(totalErrCount int, invalidTopicIdsCount int, invalidSubscriptionsCount int, invalidSendersCount int) { + _m.Called(totalErrCount, invalidTopicIdsCount, invalidSubscriptionsCount, invalidSendersCount) +} + +// OnPublishMessagesInspectionErrorExceedsThreshold provides a mock function with no fields +func (_m *LibP2PMetrics) OnPublishMessagesInspectionErrorExceedsThreshold() { + _m.Called() +} + +// OnRpcReceived provides a mock function with given fields: msgCount, iHaveCount, iWantCount, graftCount, pruneCount +func (_m *LibP2PMetrics) OnRpcReceived(msgCount int, iHaveCount int, iWantCount int, graftCount int, pruneCount int) { + _m.Called(msgCount, iHaveCount, iWantCount, graftCount, pruneCount) +} + +// OnRpcRejectedFromUnknownSender provides a mock function with no fields +func (_m *LibP2PMetrics) OnRpcRejectedFromUnknownSender() { + _m.Called() +} + +// OnRpcSent provides a mock function with given fields: msgCount, iHaveCount, iWantCount, graftCount, pruneCount +func (_m *LibP2PMetrics) OnRpcSent(msgCount int, iHaveCount int, iWantCount int, graftCount int, pruneCount int) { + _m.Called(msgCount, iHaveCount, iWantCount, graftCount, pruneCount) } // OnStreamCreated provides a mock function with given fields: duration, attempts @@ -225,6 +412,16 @@ func (_m *LibP2PMetrics) OnStreamCreationFailure(duration time.Duration, attempt _m.Called(duration, attempts) } +// OnStreamCreationRetryBudgetResetToDefault provides a mock function with no fields +func (_m *LibP2PMetrics) OnStreamCreationRetryBudgetResetToDefault() { + _m.Called() +} + +// OnStreamCreationRetryBudgetUpdated provides a mock function with given fields: budget +func (_m *LibP2PMetrics) OnStreamCreationRetryBudgetUpdated(budget uint64) { + _m.Called(budget) +} + // OnStreamEstablished provides a mock function with given fields: duration, attempts func (_m *LibP2PMetrics) OnStreamEstablished(duration time.Duration, attempts int) { _m.Called(duration, attempts) @@ -235,17 +432,27 @@ func (_m *LibP2PMetrics) OnTimeInMeshUpdated(_a0 channels.Topic, _a1 time.Durati _m.Called(_a0, _a1) } +// OnUndeliveredMessage provides a mock function with no fields +func (_m *LibP2PMetrics) OnUndeliveredMessage() { + _m.Called() +} + +// OnUnstakedPeerInspectionFailed provides a mock function with no fields +func (_m *LibP2PMetrics) OnUnstakedPeerInspectionFailed() { + _m.Called() +} + // OutboundConnections provides a mock function with given fields: connectionCount func (_m *LibP2PMetrics) OutboundConnections(connectionCount uint) { _m.Called(connectionCount) } -// RoutingTablePeerAdded provides a mock function with given fields: +// RoutingTablePeerAdded provides a mock function with no fields func (_m *LibP2PMetrics) RoutingTablePeerAdded() { _m.Called() } -// RoutingTablePeerRemoved provides a mock function with given fields: +// RoutingTablePeerRemoved provides a mock function with no fields func (_m *LibP2PMetrics) RoutingTablePeerRemoved() { _m.Called() } @@ -255,13 +462,12 @@ func (_m *LibP2PMetrics) SetWarningStateCount(_a0 uint) { _m.Called(_a0) } -type mockConstructorTestingTNewLibP2PMetrics interface { +// NewLibP2PMetrics creates a new instance of LibP2PMetrics. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewLibP2PMetrics(t interface { mock.TestingT Cleanup(func()) -} - -// NewLibP2PMetrics creates a new instance of LibP2PMetrics. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewLibP2PMetrics(t mockConstructorTestingTNewLibP2PMetrics) *LibP2PMetrics { +}) *LibP2PMetrics { mock := &LibP2PMetrics{} mock.Mock.Test(t) diff --git a/module/mock/local.go b/module/mock/local.go index 37a980da0cd..db5aa9679fb 100644 --- a/module/mock/local.go +++ b/module/mock/local.go @@ -1,12 +1,12 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mock import ( - crypto "github.com/onflow/flow-go/crypto" + crypto "github.com/onflow/crypto" flow "github.com/onflow/flow-go/model/flow" - hash "github.com/onflow/flow-go/crypto/hash" + hash "github.com/onflow/crypto/hash" mock "github.com/stretchr/testify/mock" ) @@ -16,10 +16,14 @@ type Local struct { mock.Mock } -// Address provides a mock function with given fields: +// Address provides a mock function with no fields func (_m *Local) Address() string { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Address") + } + var r0 string if rf, ok := ret.Get(0).(func() string); ok { r0 = rf() @@ -30,10 +34,14 @@ func (_m *Local) Address() string { return r0 } -// NodeID provides a mock function with given fields: +// NodeID provides a mock function with no fields func (_m *Local) NodeID() flow.Identifier { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for NodeID") + } + var r0 flow.Identifier if rf, ok := ret.Get(0).(func() flow.Identifier); ok { r0 = rf() @@ -46,16 +54,20 @@ func (_m *Local) NodeID() flow.Identifier { return r0 } -// NotMeFilter provides a mock function with given fields: -func (_m *Local) NotMeFilter() flow.IdentityFilter { +// NotMeFilter provides a mock function with no fields +func (_m *Local) NotMeFilter() flow.IdentityFilter[flow.Identity] { ret := _m.Called() - var r0 flow.IdentityFilter - if rf, ok := ret.Get(0).(func() flow.IdentityFilter); ok { + if len(ret) == 0 { + panic("no return value specified for NotMeFilter") + } + + var r0 flow.IdentityFilter[flow.Identity] + if rf, ok := ret.Get(0).(func() flow.IdentityFilter[flow.Identity]); ok { r0 = rf() } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(flow.IdentityFilter) + r0 = ret.Get(0).(flow.IdentityFilter[flow.Identity]) } } @@ -66,6 +78,10 @@ func (_m *Local) NotMeFilter() flow.IdentityFilter { func (_m *Local) Sign(_a0 []byte, _a1 hash.Hasher) (crypto.Signature, error) { ret := _m.Called(_a0, _a1) + if len(ret) == 0 { + panic("no return value specified for Sign") + } + var r0 crypto.Signature var r1 error if rf, ok := ret.Get(0).(func([]byte, hash.Hasher) (crypto.Signature, error)); ok { @@ -92,6 +108,10 @@ func (_m *Local) Sign(_a0 []byte, _a1 hash.Hasher) (crypto.Signature, error) { func (_m *Local) SignFunc(_a0 []byte, _a1 hash.Hasher, _a2 func(crypto.PrivateKey, []byte, hash.Hasher) (crypto.Signature, error)) (crypto.Signature, error) { ret := _m.Called(_a0, _a1, _a2) + if len(ret) == 0 { + panic("no return value specified for SignFunc") + } + var r0 crypto.Signature var r1 error if rf, ok := ret.Get(0).(func([]byte, hash.Hasher, func(crypto.PrivateKey, []byte, hash.Hasher) (crypto.Signature, error)) (crypto.Signature, error)); ok { @@ -114,13 +134,12 @@ func (_m *Local) SignFunc(_a0 []byte, _a1 hash.Hasher, _a2 func(crypto.PrivateKe return r0, r1 } -type mockConstructorTestingTNewLocal interface { +// NewLocal creates a new instance of Local. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewLocal(t interface { mock.TestingT Cleanup(func()) -} - -// NewLocal creates a new instance of Local. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewLocal(t mockConstructorTestingTNewLocal) *Local { +}) *Local { mock := &Local{} mock.Mock.Test(t) diff --git a/module/mock/local_gossip_sub_router_metrics.go b/module/mock/local_gossip_sub_router_metrics.go new file mode 100644 index 00000000000..11417d02ba6 --- /dev/null +++ b/module/mock/local_gossip_sub_router_metrics.go @@ -0,0 +1,104 @@ +// Code generated by mockery. DO NOT EDIT. + +package mock + +import mock "github.com/stretchr/testify/mock" + +// LocalGossipSubRouterMetrics is an autogenerated mock type for the LocalGossipSubRouterMetrics type +type LocalGossipSubRouterMetrics struct { + mock.Mock +} + +// OnLocalMeshSizeUpdated provides a mock function with given fields: topic, size +func (_m *LocalGossipSubRouterMetrics) OnLocalMeshSizeUpdated(topic string, size int) { + _m.Called(topic, size) +} + +// OnLocalPeerJoinedTopic provides a mock function with no fields +func (_m *LocalGossipSubRouterMetrics) OnLocalPeerJoinedTopic() { + _m.Called() +} + +// OnLocalPeerLeftTopic provides a mock function with no fields +func (_m *LocalGossipSubRouterMetrics) OnLocalPeerLeftTopic() { + _m.Called() +} + +// OnMessageDeliveredToAllSubscribers provides a mock function with given fields: size +func (_m *LocalGossipSubRouterMetrics) OnMessageDeliveredToAllSubscribers(size int) { + _m.Called(size) +} + +// OnMessageDuplicate provides a mock function with given fields: size +func (_m *LocalGossipSubRouterMetrics) OnMessageDuplicate(size int) { + _m.Called(size) +} + +// OnMessageEnteredValidation provides a mock function with given fields: size +func (_m *LocalGossipSubRouterMetrics) OnMessageEnteredValidation(size int) { + _m.Called(size) +} + +// OnMessageRejected provides a mock function with given fields: size, reason +func (_m *LocalGossipSubRouterMetrics) OnMessageRejected(size int, reason string) { + _m.Called(size, reason) +} + +// OnOutboundRpcDropped provides a mock function with no fields +func (_m *LocalGossipSubRouterMetrics) OnOutboundRpcDropped() { + _m.Called() +} + +// OnPeerAddedToProtocol provides a mock function with given fields: protocol +func (_m *LocalGossipSubRouterMetrics) OnPeerAddedToProtocol(protocol string) { + _m.Called(protocol) +} + +// OnPeerGraftTopic provides a mock function with given fields: topic +func (_m *LocalGossipSubRouterMetrics) OnPeerGraftTopic(topic string) { + _m.Called(topic) +} + +// OnPeerPruneTopic provides a mock function with given fields: topic +func (_m *LocalGossipSubRouterMetrics) OnPeerPruneTopic(topic string) { + _m.Called(topic) +} + +// OnPeerRemovedFromProtocol provides a mock function with no fields +func (_m *LocalGossipSubRouterMetrics) OnPeerRemovedFromProtocol() { + _m.Called() +} + +// OnPeerThrottled provides a mock function with no fields +func (_m *LocalGossipSubRouterMetrics) OnPeerThrottled() { + _m.Called() +} + +// OnRpcReceived provides a mock function with given fields: msgCount, iHaveCount, iWantCount, graftCount, pruneCount +func (_m *LocalGossipSubRouterMetrics) OnRpcReceived(msgCount int, iHaveCount int, iWantCount int, graftCount int, pruneCount int) { + _m.Called(msgCount, iHaveCount, iWantCount, graftCount, pruneCount) +} + +// OnRpcSent provides a mock function with given fields: msgCount, iHaveCount, iWantCount, graftCount, pruneCount +func (_m *LocalGossipSubRouterMetrics) OnRpcSent(msgCount int, iHaveCount int, iWantCount int, graftCount int, pruneCount int) { + _m.Called(msgCount, iHaveCount, iWantCount, graftCount, pruneCount) +} + +// OnUndeliveredMessage provides a mock function with no fields +func (_m *LocalGossipSubRouterMetrics) OnUndeliveredMessage() { + _m.Called() +} + +// NewLocalGossipSubRouterMetrics creates a new instance of LocalGossipSubRouterMetrics. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewLocalGossipSubRouterMetrics(t interface { + mock.TestingT + Cleanup(func()) +}) *LocalGossipSubRouterMetrics { + mock := &LocalGossipSubRouterMetrics{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/module/mock/machine_account_metrics.go b/module/mock/machine_account_metrics.go new file mode 100644 index 00000000000..c764f0e2f9b --- /dev/null +++ b/module/mock/machine_account_metrics.go @@ -0,0 +1,39 @@ +// Code generated by mockery. DO NOT EDIT. + +package mock + +import mock "github.com/stretchr/testify/mock" + +// MachineAccountMetrics is an autogenerated mock type for the MachineAccountMetrics type +type MachineAccountMetrics struct { + mock.Mock +} + +// AccountBalance provides a mock function with given fields: bal +func (_m *MachineAccountMetrics) AccountBalance(bal float64) { + _m.Called(bal) +} + +// IsMisconfigured provides a mock function with given fields: misconfigured +func (_m *MachineAccountMetrics) IsMisconfigured(misconfigured bool) { + _m.Called(misconfigured) +} + +// RecommendedMinBalance provides a mock function with given fields: bal +func (_m *MachineAccountMetrics) RecommendedMinBalance(bal float64) { + _m.Called(bal) +} + +// NewMachineAccountMetrics creates a new instance of MachineAccountMetrics. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewMachineAccountMetrics(t interface { + mock.TestingT + Cleanup(func()) +}) *MachineAccountMetrics { + mock := &MachineAccountMetrics{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/module/mock/mempool_metrics.go b/module/mock/mempool_metrics.go index 29de10c7b7c..60e215820dc 100644 --- a/module/mock/mempool_metrics.go +++ b/module/mock/mempool_metrics.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mock @@ -21,6 +21,10 @@ func (_m *MempoolMetrics) MempoolEntries(resource string, entries uint) { func (_m *MempoolMetrics) Register(resource string, entriesFunc module.EntriesFunc) error { ret := _m.Called(resource, entriesFunc) + if len(ret) == 0 { + panic("no return value specified for Register") + } + var r0 error if rf, ok := ret.Get(0).(func(string, module.EntriesFunc) error); ok { r0 = rf(resource, entriesFunc) @@ -31,13 +35,12 @@ func (_m *MempoolMetrics) Register(resource string, entriesFunc module.EntriesFu return r0 } -type mockConstructorTestingTNewMempoolMetrics interface { +// NewMempoolMetrics creates a new instance of MempoolMetrics. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewMempoolMetrics(t interface { mock.TestingT Cleanup(func()) -} - -// NewMempoolMetrics creates a new instance of MempoolMetrics. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewMempoolMetrics(t mockConstructorTestingTNewMempoolMetrics) *MempoolMetrics { +}) *MempoolMetrics { mock := &MempoolMetrics{} mock.Mock.Test(t) diff --git a/module/mock/network_core_metrics.go b/module/mock/network_core_metrics.go index 63c849fbf27..a391d4f9458 100644 --- a/module/mock/network_core_metrics.go +++ b/module/mock/network_core_metrics.go @@ -1,10 +1,12 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mock import ( mock "github.com/stretchr/testify/mock" + peer "github.com/libp2p/go-libp2p/core/peer" + time "time" ) @@ -48,6 +50,21 @@ func (_m *NetworkCoreMetrics) OnMisbehaviorReported(channel string, misbehaviorT _m.Called(channel, misbehaviorType) } +// OnRateLimitedPeer provides a mock function with given fields: pid, role, msgType, topic, reason +func (_m *NetworkCoreMetrics) OnRateLimitedPeer(pid peer.ID, role string, msgType string, topic string, reason string) { + _m.Called(pid, role, msgType, topic, reason) +} + +// OnUnauthorizedMessage provides a mock function with given fields: role, msgType, topic, offense +func (_m *NetworkCoreMetrics) OnUnauthorizedMessage(role string, msgType string, topic string, offense string) { + _m.Called(role, msgType, topic, offense) +} + +// OnViolationReportSkipped provides a mock function with no fields +func (_m *NetworkCoreMetrics) OnViolationReportSkipped() { + _m.Called() +} + // OutboundMessageSent provides a mock function with given fields: sizeBytes, topic, protocol, messageType func (_m *NetworkCoreMetrics) OutboundMessageSent(sizeBytes int, topic string, protocol string, messageType string) { _m.Called(sizeBytes, topic, protocol, messageType) @@ -68,13 +85,12 @@ func (_m *NetworkCoreMetrics) UnicastMessageSendingStarted(topic string) { _m.Called(topic) } -type mockConstructorTestingTNewNetworkCoreMetrics interface { +// NewNetworkCoreMetrics creates a new instance of NetworkCoreMetrics. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewNetworkCoreMetrics(t interface { mock.TestingT Cleanup(func()) -} - -// NewNetworkCoreMetrics creates a new instance of NetworkCoreMetrics. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewNetworkCoreMetrics(t mockConstructorTestingTNewNetworkCoreMetrics) *NetworkCoreMetrics { +}) *NetworkCoreMetrics { mock := &NetworkCoreMetrics{} mock.Mock.Test(t) diff --git a/module/mock/network_inbound_queue_metrics.go b/module/mock/network_inbound_queue_metrics.go index ed6c4d78f45..090baca793e 100644 --- a/module/mock/network_inbound_queue_metrics.go +++ b/module/mock/network_inbound_queue_metrics.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mock @@ -28,13 +28,12 @@ func (_m *NetworkInboundQueueMetrics) QueueDuration(duration time.Duration, prio _m.Called(duration, priority) } -type mockConstructorTestingTNewNetworkInboundQueueMetrics interface { +// NewNetworkInboundQueueMetrics creates a new instance of NetworkInboundQueueMetrics. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewNetworkInboundQueueMetrics(t interface { mock.TestingT Cleanup(func()) -} - -// NewNetworkInboundQueueMetrics creates a new instance of NetworkInboundQueueMetrics. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewNetworkInboundQueueMetrics(t mockConstructorTestingTNewNetworkInboundQueueMetrics) *NetworkInboundQueueMetrics { +}) *NetworkInboundQueueMetrics { mock := &NetworkInboundQueueMetrics{} mock.Mock.Test(t) diff --git a/module/mock/network_metrics.go b/module/mock/network_metrics.go index b1e3742d993..a5bab21f319 100644 --- a/module/mock/network_metrics.go +++ b/module/mock/network_metrics.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mock @@ -8,6 +8,8 @@ import ( network "github.com/libp2p/go-libp2p/core/network" + p2pmsg "github.com/onflow/flow-go/network/p2p/message" + peer "github.com/libp2p/go-libp2p/core/peer" protocol "github.com/libp2p/go-libp2p/core/protocol" @@ -50,6 +52,16 @@ func (_m *NetworkMetrics) AllowStream(p peer.ID, dir network.Direction) { _m.Called(p, dir) } +// AsyncProcessingFinished provides a mock function with given fields: duration +func (_m *NetworkMetrics) AsyncProcessingFinished(duration time.Duration) { + _m.Called(duration) +} + +// AsyncProcessingStarted provides a mock function with no fields +func (_m *NetworkMetrics) AsyncProcessingStarted() { + _m.Called() +} + // BlockConn provides a mock function with given fields: dir, usefd func (_m *NetworkMetrics) BlockConn(dir network.Direction, usefd bool) { _m.Called(dir, usefd) @@ -100,6 +112,16 @@ func (_m *NetworkMetrics) DuplicateInboundMessagesDropped(topic string, _a1 stri _m.Called(topic, _a1, messageType) } +// DuplicateMessagePenalties provides a mock function with given fields: penalty +func (_m *NetworkMetrics) DuplicateMessagePenalties(penalty float64) { + _m.Called(penalty) +} + +// DuplicateMessagesCounts provides a mock function with given fields: count +func (_m *NetworkMetrics) DuplicateMessagesCounts(count float64) { + _m.Called(count) +} + // InboundConnections provides a mock function with given fields: connectionCount func (_m *NetworkMetrics) InboundConnections(connectionCount uint) { _m.Called(connectionCount) @@ -130,6 +152,11 @@ func (_m *NetworkMetrics) MessageRemoved(priority int) { _m.Called(priority) } +// OnActiveClusterIDsNotSetErr provides a mock function with no fields +func (_m *NetworkMetrics) OnActiveClusterIDsNotSetErr() { + _m.Called() +} + // OnAppSpecificScoreUpdated provides a mock function with given fields: _a0 func (_m *NetworkMetrics) OnAppSpecificScoreUpdated(_a0 float64) { _m.Called(_a0) @@ -140,26 +167,41 @@ func (_m *NetworkMetrics) OnBehaviourPenaltyUpdated(_a0 float64) { _m.Called(_a0) } -// OnDNSCacheHit provides a mock function with given fields: +// OnControlMessagesTruncated provides a mock function with given fields: messageType, diff +func (_m *NetworkMetrics) OnControlMessagesTruncated(messageType p2pmsg.ControlMessageType, diff int) { + _m.Called(messageType, diff) +} + +// OnDNSCacheHit provides a mock function with no fields func (_m *NetworkMetrics) OnDNSCacheHit() { _m.Called() } -// OnDNSCacheInvalidated provides a mock function with given fields: +// OnDNSCacheInvalidated provides a mock function with no fields func (_m *NetworkMetrics) OnDNSCacheInvalidated() { _m.Called() } -// OnDNSCacheMiss provides a mock function with given fields: +// OnDNSCacheMiss provides a mock function with no fields func (_m *NetworkMetrics) OnDNSCacheMiss() { _m.Called() } -// OnDNSLookupRequestDropped provides a mock function with given fields: +// OnDNSLookupRequestDropped provides a mock function with no fields func (_m *NetworkMetrics) OnDNSLookupRequestDropped() { _m.Called() } +// OnDialRetryBudgetResetToDefault provides a mock function with no fields +func (_m *NetworkMetrics) OnDialRetryBudgetResetToDefault() { + _m.Called() +} + +// OnDialRetryBudgetUpdated provides a mock function with given fields: budget +func (_m *NetworkMetrics) OnDialRetryBudgetUpdated(budget uint64) { + _m.Called(budget) +} + // OnEstablishStreamFailure provides a mock function with given fields: duration, attempts func (_m *NetworkMetrics) OnEstablishStreamFailure(duration time.Duration, attempts int) { _m.Called(duration, attempts) @@ -170,14 +212,49 @@ func (_m *NetworkMetrics) OnFirstMessageDeliveredUpdated(_a0 channels.Topic, _a1 _m.Called(_a0, _a1) } -// OnGraftReceived provides a mock function with given fields: count -func (_m *NetworkMetrics) OnGraftReceived(count int) { - _m.Called(count) +// OnGraftDuplicateTopicIdsExceedThreshold provides a mock function with no fields +func (_m *NetworkMetrics) OnGraftDuplicateTopicIdsExceedThreshold() { + _m.Called() } -// OnIHaveReceived provides a mock function with given fields: count -func (_m *NetworkMetrics) OnIHaveReceived(count int) { - _m.Called(count) +// OnGraftInvalidTopicIdsExceedThreshold provides a mock function with no fields +func (_m *NetworkMetrics) OnGraftInvalidTopicIdsExceedThreshold() { + _m.Called() +} + +// OnGraftMessageInspected provides a mock function with given fields: duplicateTopicIds, invalidTopicIds +func (_m *NetworkMetrics) OnGraftMessageInspected(duplicateTopicIds int, invalidTopicIds int) { + _m.Called(duplicateTopicIds, invalidTopicIds) +} + +// OnIHaveControlMessageIdsTruncated provides a mock function with given fields: diff +func (_m *NetworkMetrics) OnIHaveControlMessageIdsTruncated(diff int) { + _m.Called(diff) +} + +// OnIHaveDuplicateMessageIdsExceedThreshold provides a mock function with no fields +func (_m *NetworkMetrics) OnIHaveDuplicateMessageIdsExceedThreshold() { + _m.Called() +} + +// OnIHaveDuplicateTopicIdsExceedThreshold provides a mock function with no fields +func (_m *NetworkMetrics) OnIHaveDuplicateTopicIdsExceedThreshold() { + _m.Called() +} + +// OnIHaveInvalidTopicIdsExceedThreshold provides a mock function with no fields +func (_m *NetworkMetrics) OnIHaveInvalidTopicIdsExceedThreshold() { + _m.Called() +} + +// OnIHaveMessageIDsReceived provides a mock function with given fields: channel, msgIdCount +func (_m *NetworkMetrics) OnIHaveMessageIDsReceived(channel string, msgIdCount int) { + _m.Called(channel, msgIdCount) +} + +// OnIHaveMessagesInspected provides a mock function with given fields: duplicateTopicIds, duplicateMessageIds, invalidTopicIds +func (_m *NetworkMetrics) OnIHaveMessagesInspected(duplicateTopicIds int, duplicateMessageIds int, invalidTopicIds int) { + _m.Called(duplicateTopicIds, duplicateMessageIds, invalidTopicIds) } // OnIPColocationFactorUpdated provides a mock function with given fields: _a0 @@ -185,23 +262,38 @@ func (_m *NetworkMetrics) OnIPColocationFactorUpdated(_a0 float64) { _m.Called(_a0) } -// OnIWantReceived provides a mock function with given fields: count -func (_m *NetworkMetrics) OnIWantReceived(count int) { - _m.Called(count) +// OnIWantCacheMissMessageIdsExceedThreshold provides a mock function with no fields +func (_m *NetworkMetrics) OnIWantCacheMissMessageIdsExceedThreshold() { + _m.Called() } -// OnIncomingRpcAcceptedFully provides a mock function with given fields: -func (_m *NetworkMetrics) OnIncomingRpcAcceptedFully() { - _m.Called() +// OnIWantControlMessageIdsTruncated provides a mock function with given fields: diff +func (_m *NetworkMetrics) OnIWantControlMessageIdsTruncated(diff int) { + _m.Called(diff) } -// OnIncomingRpcAcceptedOnlyForControlMessages provides a mock function with given fields: -func (_m *NetworkMetrics) OnIncomingRpcAcceptedOnlyForControlMessages() { +// OnIWantDuplicateMessageIdsExceedThreshold provides a mock function with no fields +func (_m *NetworkMetrics) OnIWantDuplicateMessageIdsExceedThreshold() { _m.Called() } -// OnIncomingRpcRejected provides a mock function with given fields: -func (_m *NetworkMetrics) OnIncomingRpcRejected() { +// OnIWantMessageIDsReceived provides a mock function with given fields: msgIdCount +func (_m *NetworkMetrics) OnIWantMessageIDsReceived(msgIdCount int) { + _m.Called(msgIdCount) +} + +// OnIWantMessagesInspected provides a mock function with given fields: duplicateCount, cacheMissCount +func (_m *NetworkMetrics) OnIWantMessagesInspected(duplicateCount int, cacheMissCount int) { + _m.Called(duplicateCount, cacheMissCount) +} + +// OnIncomingRpcReceived provides a mock function with given fields: iHaveCount, iWantCount, graftCount, pruneCount, msgCount +func (_m *NetworkMetrics) OnIncomingRpcReceived(iHaveCount int, iWantCount int, graftCount int, pruneCount int, msgCount int) { + _m.Called(iHaveCount, iWantCount, graftCount, pruneCount, msgCount) +} + +// OnInvalidControlMessageNotificationSent provides a mock function with no fields +func (_m *NetworkMetrics) OnInvalidControlMessageNotificationSent() { _m.Called() } @@ -210,26 +302,71 @@ func (_m *NetworkMetrics) OnInvalidMessageDeliveredUpdated(_a0 channels.Topic, _ _m.Called(_a0, _a1) } +// OnInvalidTopicIdDetectedForControlMessage provides a mock function with given fields: messageType +func (_m *NetworkMetrics) OnInvalidTopicIdDetectedForControlMessage(messageType p2pmsg.ControlMessageType) { + _m.Called(messageType) +} + // OnLocalMeshSizeUpdated provides a mock function with given fields: topic, size func (_m *NetworkMetrics) OnLocalMeshSizeUpdated(topic string, size int) { _m.Called(topic, size) } +// OnLocalPeerJoinedTopic provides a mock function with no fields +func (_m *NetworkMetrics) OnLocalPeerJoinedTopic() { + _m.Called() +} + +// OnLocalPeerLeftTopic provides a mock function with no fields +func (_m *NetworkMetrics) OnLocalPeerLeftTopic() { + _m.Called() +} + // OnMeshMessageDeliveredUpdated provides a mock function with given fields: _a0, _a1 func (_m *NetworkMetrics) OnMeshMessageDeliveredUpdated(_a0 channels.Topic, _a1 float64) { _m.Called(_a0, _a1) } +// OnMessageDeliveredToAllSubscribers provides a mock function with given fields: size +func (_m *NetworkMetrics) OnMessageDeliveredToAllSubscribers(size int) { + _m.Called(size) +} + +// OnMessageDuplicate provides a mock function with given fields: size +func (_m *NetworkMetrics) OnMessageDuplicate(size int) { + _m.Called(size) +} + +// OnMessageEnteredValidation provides a mock function with given fields: size +func (_m *NetworkMetrics) OnMessageEnteredValidation(size int) { + _m.Called(size) +} + +// OnMessageRejected provides a mock function with given fields: size, reason +func (_m *NetworkMetrics) OnMessageRejected(size int, reason string) { + _m.Called(size, reason) +} + // OnMisbehaviorReported provides a mock function with given fields: channel, misbehaviorType func (_m *NetworkMetrics) OnMisbehaviorReported(channel string, misbehaviorType string) { _m.Called(channel, misbehaviorType) } +// OnOutboundRpcDropped provides a mock function with no fields +func (_m *NetworkMetrics) OnOutboundRpcDropped() { + _m.Called() +} + // OnOverallPeerScoreUpdated provides a mock function with given fields: _a0 func (_m *NetworkMetrics) OnOverallPeerScoreUpdated(_a0 float64) { _m.Called(_a0) } +// OnPeerAddedToProtocol provides a mock function with given fields: _a0 +func (_m *NetworkMetrics) OnPeerAddedToProtocol(_a0 string) { + _m.Called(_a0) +} + // OnPeerDialFailure provides a mock function with given fields: duration, attempts func (_m *NetworkMetrics) OnPeerDialFailure(duration time.Duration, attempts int) { _m.Called(duration, attempts) @@ -240,14 +377,49 @@ func (_m *NetworkMetrics) OnPeerDialed(duration time.Duration, attempts int) { _m.Called(duration, attempts) } -// OnPruneReceived provides a mock function with given fields: count -func (_m *NetworkMetrics) OnPruneReceived(count int) { - _m.Called(count) +// OnPeerGraftTopic provides a mock function with given fields: topic +func (_m *NetworkMetrics) OnPeerGraftTopic(topic string) { + _m.Called(topic) } -// OnPublishedGossipMessagesReceived provides a mock function with given fields: count -func (_m *NetworkMetrics) OnPublishedGossipMessagesReceived(count int) { - _m.Called(count) +// OnPeerPruneTopic provides a mock function with given fields: topic +func (_m *NetworkMetrics) OnPeerPruneTopic(topic string) { + _m.Called(topic) +} + +// OnPeerRemovedFromProtocol provides a mock function with no fields +func (_m *NetworkMetrics) OnPeerRemovedFromProtocol() { + _m.Called() +} + +// OnPeerThrottled provides a mock function with no fields +func (_m *NetworkMetrics) OnPeerThrottled() { + _m.Called() +} + +// OnPruneDuplicateTopicIdsExceedThreshold provides a mock function with no fields +func (_m *NetworkMetrics) OnPruneDuplicateTopicIdsExceedThreshold() { + _m.Called() +} + +// OnPruneInvalidTopicIdsExceedThreshold provides a mock function with no fields +func (_m *NetworkMetrics) OnPruneInvalidTopicIdsExceedThreshold() { + _m.Called() +} + +// OnPruneMessageInspected provides a mock function with given fields: duplicateTopicIds, invalidTopicIds +func (_m *NetworkMetrics) OnPruneMessageInspected(duplicateTopicIds int, invalidTopicIds int) { + _m.Called(duplicateTopicIds, invalidTopicIds) +} + +// OnPublishMessageInspected provides a mock function with given fields: totalErrCount, invalidTopicIdsCount, invalidSubscriptionsCount, invalidSendersCount +func (_m *NetworkMetrics) OnPublishMessageInspected(totalErrCount int, invalidTopicIdsCount int, invalidSubscriptionsCount int, invalidSendersCount int) { + _m.Called(totalErrCount, invalidTopicIdsCount, invalidSubscriptionsCount, invalidSendersCount) +} + +// OnPublishMessagesInspectionErrorExceedsThreshold provides a mock function with no fields +func (_m *NetworkMetrics) OnPublishMessagesInspectionErrorExceedsThreshold() { + _m.Called() } // OnRateLimitedPeer provides a mock function with given fields: pid, role, msgType, topic, reason @@ -255,6 +427,21 @@ func (_m *NetworkMetrics) OnRateLimitedPeer(pid peer.ID, role string, msgType st _m.Called(pid, role, msgType, topic, reason) } +// OnRpcReceived provides a mock function with given fields: msgCount, iHaveCount, iWantCount, graftCount, pruneCount +func (_m *NetworkMetrics) OnRpcReceived(msgCount int, iHaveCount int, iWantCount int, graftCount int, pruneCount int) { + _m.Called(msgCount, iHaveCount, iWantCount, graftCount, pruneCount) +} + +// OnRpcRejectedFromUnknownSender provides a mock function with no fields +func (_m *NetworkMetrics) OnRpcRejectedFromUnknownSender() { + _m.Called() +} + +// OnRpcSent provides a mock function with given fields: msgCount, iHaveCount, iWantCount, graftCount, pruneCount +func (_m *NetworkMetrics) OnRpcSent(msgCount int, iHaveCount int, iWantCount int, graftCount int, pruneCount int) { + _m.Called(msgCount, iHaveCount, iWantCount, graftCount, pruneCount) +} + // OnStreamCreated provides a mock function with given fields: duration, attempts func (_m *NetworkMetrics) OnStreamCreated(duration time.Duration, attempts int) { _m.Called(duration, attempts) @@ -265,6 +452,16 @@ func (_m *NetworkMetrics) OnStreamCreationFailure(duration time.Duration, attemp _m.Called(duration, attempts) } +// OnStreamCreationRetryBudgetResetToDefault provides a mock function with no fields +func (_m *NetworkMetrics) OnStreamCreationRetryBudgetResetToDefault() { + _m.Called() +} + +// OnStreamCreationRetryBudgetUpdated provides a mock function with given fields: budget +func (_m *NetworkMetrics) OnStreamCreationRetryBudgetUpdated(budget uint64) { + _m.Called(budget) +} + // OnStreamEstablished provides a mock function with given fields: duration, attempts func (_m *NetworkMetrics) OnStreamEstablished(duration time.Duration, attempts int) { _m.Called(duration, attempts) @@ -280,6 +477,21 @@ func (_m *NetworkMetrics) OnUnauthorizedMessage(role string, msgType string, top _m.Called(role, msgType, topic, offense) } +// OnUndeliveredMessage provides a mock function with no fields +func (_m *NetworkMetrics) OnUndeliveredMessage() { + _m.Called() +} + +// OnUnstakedPeerInspectionFailed provides a mock function with no fields +func (_m *NetworkMetrics) OnUnstakedPeerInspectionFailed() { + _m.Called() +} + +// OnViolationReportSkipped provides a mock function with no fields +func (_m *NetworkMetrics) OnViolationReportSkipped() { + _m.Called() +} + // OutboundConnections provides a mock function with given fields: connectionCount func (_m *NetworkMetrics) OutboundConnections(connectionCount uint) { _m.Called(connectionCount) @@ -295,12 +507,12 @@ func (_m *NetworkMetrics) QueueDuration(duration time.Duration, priority int) { _m.Called(duration, priority) } -// RoutingTablePeerAdded provides a mock function with given fields: +// RoutingTablePeerAdded provides a mock function with no fields func (_m *NetworkMetrics) RoutingTablePeerAdded() { _m.Called() } -// RoutingTablePeerRemoved provides a mock function with given fields: +// RoutingTablePeerRemoved provides a mock function with no fields func (_m *NetworkMetrics) RoutingTablePeerRemoved() { _m.Called() } @@ -320,13 +532,12 @@ func (_m *NetworkMetrics) UnicastMessageSendingStarted(topic string) { _m.Called(topic) } -type mockConstructorTestingTNewNetworkMetrics interface { +// NewNetworkMetrics creates a new instance of NetworkMetrics. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewNetworkMetrics(t interface { mock.TestingT Cleanup(func()) -} - -// NewNetworkMetrics creates a new instance of NetworkMetrics. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewNetworkMetrics(t mockConstructorTestingTNewNetworkMetrics) *NetworkMetrics { +}) *NetworkMetrics { mock := &NetworkMetrics{} mock.Mock.Test(t) diff --git a/module/mock/network_security_metrics.go b/module/mock/network_security_metrics.go index 51d045c2a12..f8d50d5b71b 100644 --- a/module/mock/network_security_metrics.go +++ b/module/mock/network_security_metrics.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mock @@ -23,13 +23,17 @@ func (_m *NetworkSecurityMetrics) OnUnauthorizedMessage(role string, msgType str _m.Called(role, msgType, topic, offense) } -type mockConstructorTestingTNewNetworkSecurityMetrics interface { - mock.TestingT - Cleanup(func()) +// OnViolationReportSkipped provides a mock function with no fields +func (_m *NetworkSecurityMetrics) OnViolationReportSkipped() { + _m.Called() } // NewNetworkSecurityMetrics creates a new instance of NetworkSecurityMetrics. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewNetworkSecurityMetrics(t mockConstructorTestingTNewNetworkSecurityMetrics) *NetworkSecurityMetrics { +// The first argument is typically a *testing.T value. +func NewNetworkSecurityMetrics(t interface { + mock.TestingT + Cleanup(func()) +}) *NetworkSecurityMetrics { mock := &NetworkSecurityMetrics{} mock.Mock.Test(t) diff --git a/module/mock/new_job_listener.go b/module/mock/new_job_listener.go index 9f89325743d..1e4d864f8a5 100644 --- a/module/mock/new_job_listener.go +++ b/module/mock/new_job_listener.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mock @@ -9,18 +9,17 @@ type NewJobListener struct { mock.Mock } -// Check provides a mock function with given fields: +// Check provides a mock function with no fields func (_m *NewJobListener) Check() { _m.Called() } -type mockConstructorTestingTNewNewJobListener interface { +// NewNewJobListener creates a new instance of NewJobListener. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewNewJobListener(t interface { mock.TestingT Cleanup(func()) -} - -// NewNewJobListener creates a new instance of NewJobListener. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewNewJobListener(t mockConstructorTestingTNewNewJobListener) *NewJobListener { +}) *NewJobListener { mock := &NewJobListener{} mock.Mock.Test(t) diff --git a/module/mock/pending_block_buffer.go b/module/mock/pending_block_buffer.go index b94869f7a04..f9ecd30f752 100644 --- a/module/mock/pending_block_buffer.go +++ b/module/mock/pending_block_buffer.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mock @@ -12,13 +12,17 @@ type PendingBlockBuffer struct { mock.Mock } -// Add provides a mock function with given fields: originID, block -func (_m *PendingBlockBuffer) Add(originID flow.Identifier, block *flow.Block) bool { - ret := _m.Called(originID, block) +// Add provides a mock function with given fields: block +func (_m *PendingBlockBuffer) Add(block flow.Slashable[*flow.Proposal]) bool { + ret := _m.Called(block) + + if len(ret) == 0 { + panic("no return value specified for Add") + } var r0 bool - if rf, ok := ret.Get(0).(func(flow.Identifier, *flow.Block) bool); ok { - r0 = rf(originID, block) + if rf, ok := ret.Get(0).(func(flow.Slashable[*flow.Proposal]) bool); ok { + r0 = rf(block) } else { r0 = ret.Get(0).(bool) } @@ -27,18 +31,22 @@ func (_m *PendingBlockBuffer) Add(originID flow.Identifier, block *flow.Block) b } // ByID provides a mock function with given fields: blockID -func (_m *PendingBlockBuffer) ByID(blockID flow.Identifier) (flow.Slashable[*flow.Block], bool) { +func (_m *PendingBlockBuffer) ByID(blockID flow.Identifier) (flow.Slashable[*flow.Proposal], bool) { ret := _m.Called(blockID) - var r0 flow.Slashable[*flow.Block] + if len(ret) == 0 { + panic("no return value specified for ByID") + } + + var r0 flow.Slashable[*flow.Proposal] var r1 bool - if rf, ok := ret.Get(0).(func(flow.Identifier) (flow.Slashable[*flow.Block], bool)); ok { + if rf, ok := ret.Get(0).(func(flow.Identifier) (flow.Slashable[*flow.Proposal], bool)); ok { return rf(blockID) } - if rf, ok := ret.Get(0).(func(flow.Identifier) flow.Slashable[*flow.Block]); ok { + if rf, ok := ret.Get(0).(func(flow.Identifier) flow.Slashable[*flow.Proposal]); ok { r0 = rf(blockID) } else { - r0 = ret.Get(0).(flow.Slashable[*flow.Block]) + r0 = ret.Get(0).(flow.Slashable[*flow.Proposal]) } if rf, ok := ret.Get(1).(func(flow.Identifier) bool); ok { @@ -51,19 +59,23 @@ func (_m *PendingBlockBuffer) ByID(blockID flow.Identifier) (flow.Slashable[*flo } // ByParentID provides a mock function with given fields: parentID -func (_m *PendingBlockBuffer) ByParentID(parentID flow.Identifier) ([]flow.Slashable[*flow.Block], bool) { +func (_m *PendingBlockBuffer) ByParentID(parentID flow.Identifier) ([]flow.Slashable[*flow.Proposal], bool) { ret := _m.Called(parentID) - var r0 []flow.Slashable[*flow.Block] + if len(ret) == 0 { + panic("no return value specified for ByParentID") + } + + var r0 []flow.Slashable[*flow.Proposal] var r1 bool - if rf, ok := ret.Get(0).(func(flow.Identifier) ([]flow.Slashable[*flow.Block], bool)); ok { + if rf, ok := ret.Get(0).(func(flow.Identifier) ([]flow.Slashable[*flow.Proposal], bool)); ok { return rf(parentID) } - if rf, ok := ret.Get(0).(func(flow.Identifier) []flow.Slashable[*flow.Block]); ok { + if rf, ok := ret.Get(0).(func(flow.Identifier) []flow.Slashable[*flow.Proposal]); ok { r0 = rf(parentID) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).([]flow.Slashable[*flow.Block]) + r0 = ret.Get(0).([]flow.Slashable[*flow.Proposal]) } } @@ -86,10 +98,14 @@ func (_m *PendingBlockBuffer) PruneByView(view uint64) { _m.Called(view) } -// Size provides a mock function with given fields: +// Size provides a mock function with no fields func (_m *PendingBlockBuffer) Size() uint { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Size") + } + var r0 uint if rf, ok := ret.Get(0).(func() uint); ok { r0 = rf() @@ -100,13 +116,12 @@ func (_m *PendingBlockBuffer) Size() uint { return r0 } -type mockConstructorTestingTNewPendingBlockBuffer interface { +// NewPendingBlockBuffer creates a new instance of PendingBlockBuffer. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewPendingBlockBuffer(t interface { mock.TestingT Cleanup(func()) -} - -// NewPendingBlockBuffer creates a new instance of PendingBlockBuffer. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewPendingBlockBuffer(t mockConstructorTestingTNewPendingBlockBuffer) *PendingBlockBuffer { +}) *PendingBlockBuffer { mock := &PendingBlockBuffer{} mock.Mock.Test(t) diff --git a/module/mock/pending_cluster_block_buffer.go b/module/mock/pending_cluster_block_buffer.go index a1f30da90c0..d23cf6228a4 100644 --- a/module/mock/pending_cluster_block_buffer.go +++ b/module/mock/pending_cluster_block_buffer.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mock @@ -14,13 +14,17 @@ type PendingClusterBlockBuffer struct { mock.Mock } -// Add provides a mock function with given fields: originID, block -func (_m *PendingClusterBlockBuffer) Add(originID flow.Identifier, block *cluster.Block) bool { - ret := _m.Called(originID, block) +// Add provides a mock function with given fields: block +func (_m *PendingClusterBlockBuffer) Add(block flow.Slashable[*cluster.Proposal]) bool { + ret := _m.Called(block) + + if len(ret) == 0 { + panic("no return value specified for Add") + } var r0 bool - if rf, ok := ret.Get(0).(func(flow.Identifier, *cluster.Block) bool); ok { - r0 = rf(originID, block) + if rf, ok := ret.Get(0).(func(flow.Slashable[*cluster.Proposal]) bool); ok { + r0 = rf(block) } else { r0 = ret.Get(0).(bool) } @@ -29,18 +33,22 @@ func (_m *PendingClusterBlockBuffer) Add(originID flow.Identifier, block *cluste } // ByID provides a mock function with given fields: blockID -func (_m *PendingClusterBlockBuffer) ByID(blockID flow.Identifier) (flow.Slashable[*cluster.Block], bool) { +func (_m *PendingClusterBlockBuffer) ByID(blockID flow.Identifier) (flow.Slashable[*cluster.Proposal], bool) { ret := _m.Called(blockID) - var r0 flow.Slashable[*cluster.Block] + if len(ret) == 0 { + panic("no return value specified for ByID") + } + + var r0 flow.Slashable[*cluster.Proposal] var r1 bool - if rf, ok := ret.Get(0).(func(flow.Identifier) (flow.Slashable[*cluster.Block], bool)); ok { + if rf, ok := ret.Get(0).(func(flow.Identifier) (flow.Slashable[*cluster.Proposal], bool)); ok { return rf(blockID) } - if rf, ok := ret.Get(0).(func(flow.Identifier) flow.Slashable[*cluster.Block]); ok { + if rf, ok := ret.Get(0).(func(flow.Identifier) flow.Slashable[*cluster.Proposal]); ok { r0 = rf(blockID) } else { - r0 = ret.Get(0).(flow.Slashable[*cluster.Block]) + r0 = ret.Get(0).(flow.Slashable[*cluster.Proposal]) } if rf, ok := ret.Get(1).(func(flow.Identifier) bool); ok { @@ -53,19 +61,23 @@ func (_m *PendingClusterBlockBuffer) ByID(blockID flow.Identifier) (flow.Slashab } // ByParentID provides a mock function with given fields: parentID -func (_m *PendingClusterBlockBuffer) ByParentID(parentID flow.Identifier) ([]flow.Slashable[*cluster.Block], bool) { +func (_m *PendingClusterBlockBuffer) ByParentID(parentID flow.Identifier) ([]flow.Slashable[*cluster.Proposal], bool) { ret := _m.Called(parentID) - var r0 []flow.Slashable[*cluster.Block] + if len(ret) == 0 { + panic("no return value specified for ByParentID") + } + + var r0 []flow.Slashable[*cluster.Proposal] var r1 bool - if rf, ok := ret.Get(0).(func(flow.Identifier) ([]flow.Slashable[*cluster.Block], bool)); ok { + if rf, ok := ret.Get(0).(func(flow.Identifier) ([]flow.Slashable[*cluster.Proposal], bool)); ok { return rf(parentID) } - if rf, ok := ret.Get(0).(func(flow.Identifier) []flow.Slashable[*cluster.Block]); ok { + if rf, ok := ret.Get(0).(func(flow.Identifier) []flow.Slashable[*cluster.Proposal]); ok { r0 = rf(parentID) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).([]flow.Slashable[*cluster.Block]) + r0 = ret.Get(0).([]flow.Slashable[*cluster.Proposal]) } } @@ -88,10 +100,14 @@ func (_m *PendingClusterBlockBuffer) PruneByView(view uint64) { _m.Called(view) } -// Size provides a mock function with given fields: +// Size provides a mock function with no fields func (_m *PendingClusterBlockBuffer) Size() uint { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Size") + } + var r0 uint if rf, ok := ret.Get(0).(func() uint); ok { r0 = rf() @@ -102,13 +118,12 @@ func (_m *PendingClusterBlockBuffer) Size() uint { return r0 } -type mockConstructorTestingTNewPendingClusterBlockBuffer interface { +// NewPendingClusterBlockBuffer creates a new instance of PendingClusterBlockBuffer. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewPendingClusterBlockBuffer(t interface { mock.TestingT Cleanup(func()) -} - -// NewPendingClusterBlockBuffer creates a new instance of PendingClusterBlockBuffer. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewPendingClusterBlockBuffer(t mockConstructorTestingTNewPendingClusterBlockBuffer) *PendingClusterBlockBuffer { +}) *PendingClusterBlockBuffer { mock := &PendingClusterBlockBuffer{} mock.Mock.Test(t) diff --git a/module/mock/ping_metrics.go b/module/mock/ping_metrics.go index d278cbda096..79699dac4ce 100644 --- a/module/mock/ping_metrics.go +++ b/module/mock/ping_metrics.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mock @@ -24,13 +24,12 @@ func (_m *PingMetrics) NodeReachable(node *flow.Identity, nodeInfo string, rtt t _m.Called(node, nodeInfo, rtt) } -type mockConstructorTestingTNewPingMetrics interface { +// NewPingMetrics creates a new instance of PingMetrics. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewPingMetrics(t interface { mock.TestingT Cleanup(func()) -} - -// NewPingMetrics creates a new instance of PingMetrics. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewPingMetrics(t mockConstructorTestingTNewPingMetrics) *PingMetrics { +}) *PingMetrics { mock := &PingMetrics{} mock.Mock.Test(t) diff --git a/module/mock/private_key.go b/module/mock/private_key.go new file mode 100644 index 00000000000..4fbf10cc4da --- /dev/null +++ b/module/mock/private_key.go @@ -0,0 +1,171 @@ +// Code generated by mockery. DO NOT EDIT. + +package mock + +import ( + crypto "github.com/onflow/crypto" + hash "github.com/onflow/crypto/hash" + + mock "github.com/stretchr/testify/mock" +) + +// PrivateKey is an autogenerated mock type for the PrivateKey type +type PrivateKey struct { + mock.Mock +} + +// Algorithm provides a mock function with no fields +func (_m *PrivateKey) Algorithm() crypto.SigningAlgorithm { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Algorithm") + } + + var r0 crypto.SigningAlgorithm + if rf, ok := ret.Get(0).(func() crypto.SigningAlgorithm); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(crypto.SigningAlgorithm) + } + + return r0 +} + +// Encode provides a mock function with no fields +func (_m *PrivateKey) Encode() []byte { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Encode") + } + + var r0 []byte + if rf, ok := ret.Get(0).(func() []byte); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]byte) + } + } + + return r0 +} + +// Equals provides a mock function with given fields: _a0 +func (_m *PrivateKey) Equals(_a0 crypto.PrivateKey) bool { + ret := _m.Called(_a0) + + if len(ret) == 0 { + panic("no return value specified for Equals") + } + + var r0 bool + if rf, ok := ret.Get(0).(func(crypto.PrivateKey) bool); ok { + r0 = rf(_a0) + } else { + r0 = ret.Get(0).(bool) + } + + return r0 +} + +// PublicKey provides a mock function with no fields +func (_m *PrivateKey) PublicKey() crypto.PublicKey { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for PublicKey") + } + + var r0 crypto.PublicKey + if rf, ok := ret.Get(0).(func() crypto.PublicKey); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(crypto.PublicKey) + } + } + + return r0 +} + +// Sign provides a mock function with given fields: _a0, _a1 +func (_m *PrivateKey) Sign(_a0 []byte, _a1 hash.Hasher) (crypto.Signature, error) { + ret := _m.Called(_a0, _a1) + + if len(ret) == 0 { + panic("no return value specified for Sign") + } + + var r0 crypto.Signature + var r1 error + if rf, ok := ret.Get(0).(func([]byte, hash.Hasher) (crypto.Signature, error)); ok { + return rf(_a0, _a1) + } + if rf, ok := ret.Get(0).(func([]byte, hash.Hasher) crypto.Signature); ok { + r0 = rf(_a0, _a1) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(crypto.Signature) + } + } + + if rf, ok := ret.Get(1).(func([]byte, hash.Hasher) error); ok { + r1 = rf(_a0, _a1) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Size provides a mock function with no fields +func (_m *PrivateKey) Size() int { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Size") + } + + var r0 int + if rf, ok := ret.Get(0).(func() int); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(int) + } + + return r0 +} + +// String provides a mock function with no fields +func (_m *PrivateKey) String() string { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for String") + } + + var r0 string + if rf, ok := ret.Get(0).(func() string); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(string) + } + + return r0 +} + +// NewPrivateKey creates a new instance of PrivateKey. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewPrivateKey(t interface { + mock.TestingT + Cleanup(func()) +}) *PrivateKey { + mock := &PrivateKey{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/module/mock/processing_notifier.go b/module/mock/processing_notifier.go index b09e9efa03b..325696ce0dd 100644 --- a/module/mock/processing_notifier.go +++ b/module/mock/processing_notifier.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mock @@ -17,13 +17,12 @@ func (_m *ProcessingNotifier) Notify(entityID flow.Identifier) { _m.Called(entityID) } -type mockConstructorTestingTNewProcessingNotifier interface { +// NewProcessingNotifier creates a new instance of ProcessingNotifier. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewProcessingNotifier(t interface { mock.TestingT Cleanup(func()) -} - -// NewProcessingNotifier creates a new instance of ProcessingNotifier. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewProcessingNotifier(t mockConstructorTestingTNewProcessingNotifier) *ProcessingNotifier { +}) *ProcessingNotifier { mock := &ProcessingNotifier{} mock.Mock.Test(t) diff --git a/module/mock/provider_metrics.go b/module/mock/provider_metrics.go index d02f0d73a57..1f4ddd2e0ab 100644 --- a/module/mock/provider_metrics.go +++ b/module/mock/provider_metrics.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mock @@ -9,18 +9,17 @@ type ProviderMetrics struct { mock.Mock } -// ChunkDataPackRequestProcessed provides a mock function with given fields: +// ChunkDataPackRequestProcessed provides a mock function with no fields func (_m *ProviderMetrics) ChunkDataPackRequestProcessed() { _m.Called() } -type mockConstructorTestingTNewProviderMetrics interface { +// NewProviderMetrics creates a new instance of ProviderMetrics. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewProviderMetrics(t interface { mock.TestingT Cleanup(func()) -} - -// NewProviderMetrics creates a new instance of ProviderMetrics. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewProviderMetrics(t mockConstructorTestingTNewProviderMetrics) *ProviderMetrics { +}) *ProviderMetrics { mock := &ProviderMetrics{} mock.Mock.Test(t) diff --git a/module/mock/public_key.go b/module/mock/public_key.go index 6b9c8432aca..1b640f67df3 100644 --- a/module/mock/public_key.go +++ b/module/mock/public_key.go @@ -1,10 +1,10 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mock import ( - crypto "github.com/onflow/flow-go/crypto" - hash "github.com/onflow/flow-go/crypto/hash" + crypto "github.com/onflow/crypto" + hash "github.com/onflow/crypto/hash" mock "github.com/stretchr/testify/mock" ) @@ -14,10 +14,14 @@ type PublicKey struct { mock.Mock } -// Algorithm provides a mock function with given fields: +// Algorithm provides a mock function with no fields func (_m *PublicKey) Algorithm() crypto.SigningAlgorithm { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Algorithm") + } + var r0 crypto.SigningAlgorithm if rf, ok := ret.Get(0).(func() crypto.SigningAlgorithm); ok { r0 = rf() @@ -28,10 +32,14 @@ func (_m *PublicKey) Algorithm() crypto.SigningAlgorithm { return r0 } -// Encode provides a mock function with given fields: +// Encode provides a mock function with no fields func (_m *PublicKey) Encode() []byte { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Encode") + } + var r0 []byte if rf, ok := ret.Get(0).(func() []byte); ok { r0 = rf() @@ -44,10 +52,14 @@ func (_m *PublicKey) Encode() []byte { return r0 } -// EncodeCompressed provides a mock function with given fields: +// EncodeCompressed provides a mock function with no fields func (_m *PublicKey) EncodeCompressed() []byte { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for EncodeCompressed") + } + var r0 []byte if rf, ok := ret.Get(0).(func() []byte); ok { r0 = rf() @@ -64,6 +76,10 @@ func (_m *PublicKey) EncodeCompressed() []byte { func (_m *PublicKey) Equals(_a0 crypto.PublicKey) bool { ret := _m.Called(_a0) + if len(ret) == 0 { + panic("no return value specified for Equals") + } + var r0 bool if rf, ok := ret.Get(0).(func(crypto.PublicKey) bool); ok { r0 = rf(_a0) @@ -74,10 +90,14 @@ func (_m *PublicKey) Equals(_a0 crypto.PublicKey) bool { return r0 } -// Size provides a mock function with given fields: +// Size provides a mock function with no fields func (_m *PublicKey) Size() int { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Size") + } + var r0 int if rf, ok := ret.Get(0).(func() int); ok { r0 = rf() @@ -88,10 +108,14 @@ func (_m *PublicKey) Size() int { return r0 } -// String provides a mock function with given fields: +// String provides a mock function with no fields func (_m *PublicKey) String() string { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for String") + } + var r0 string if rf, ok := ret.Get(0).(func() string); ok { r0 = rf() @@ -106,6 +130,10 @@ func (_m *PublicKey) String() string { func (_m *PublicKey) Verify(_a0 crypto.Signature, _a1 []byte, _a2 hash.Hasher) (bool, error) { ret := _m.Called(_a0, _a1, _a2) + if len(ret) == 0 { + panic("no return value specified for Verify") + } + var r0 bool var r1 error if rf, ok := ret.Get(0).(func(crypto.Signature, []byte, hash.Hasher) (bool, error)); ok { @@ -126,13 +154,12 @@ func (_m *PublicKey) Verify(_a0 crypto.Signature, _a1 []byte, _a2 hash.Hasher) ( return r0, r1 } -type mockConstructorTestingTNewPublicKey interface { +// NewPublicKey creates a new instance of PublicKey. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewPublicKey(t interface { mock.TestingT Cleanup(func()) -} - -// NewPublicKey creates a new instance of PublicKey. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewPublicKey(t mockConstructorTestingTNewPublicKey) *PublicKey { +}) *PublicKey { mock := &PublicKey{} mock.Mock.Test(t) diff --git a/module/mock/qc_contract_client.go b/module/mock/qc_contract_client.go index 4802370d2bb..8b568c51f2e 100644 --- a/module/mock/qc_contract_client.go +++ b/module/mock/qc_contract_client.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mock @@ -18,6 +18,10 @@ type QCContractClient struct { func (_m *QCContractClient) SubmitVote(ctx context.Context, vote *model.Vote) error { ret := _m.Called(ctx, vote) + if len(ret) == 0 { + panic("no return value specified for SubmitVote") + } + var r0 error if rf, ok := ret.Get(0).(func(context.Context, *model.Vote) error); ok { r0 = rf(ctx, vote) @@ -32,6 +36,10 @@ func (_m *QCContractClient) SubmitVote(ctx context.Context, vote *model.Vote) er func (_m *QCContractClient) Voted(ctx context.Context) (bool, error) { ret := _m.Called(ctx) + if len(ret) == 0 { + panic("no return value specified for Voted") + } + var r0 bool var r1 error if rf, ok := ret.Get(0).(func(context.Context) (bool, error)); ok { @@ -52,13 +60,12 @@ func (_m *QCContractClient) Voted(ctx context.Context) (bool, error) { return r0, r1 } -type mockConstructorTestingTNewQCContractClient interface { +// NewQCContractClient creates a new instance of QCContractClient. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewQCContractClient(t interface { mock.TestingT Cleanup(func()) -} - -// NewQCContractClient creates a new instance of QCContractClient. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewQCContractClient(t mockConstructorTestingTNewQCContractClient) *QCContractClient { +}) *QCContractClient { mock := &QCContractClient{} mock.Mock.Test(t) diff --git a/module/mock/random_beacon_key_store.go b/module/mock/random_beacon_key_store.go index e1719fd4019..3d1963e08d7 100644 --- a/module/mock/random_beacon_key_store.go +++ b/module/mock/random_beacon_key_store.go @@ -1,9 +1,9 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mock import ( - crypto "github.com/onflow/flow-go/crypto" + crypto "github.com/onflow/crypto" mock "github.com/stretchr/testify/mock" ) @@ -16,6 +16,10 @@ type RandomBeaconKeyStore struct { func (_m *RandomBeaconKeyStore) ByView(view uint64) (crypto.PrivateKey, error) { ret := _m.Called(view) + if len(ret) == 0 { + panic("no return value specified for ByView") + } + var r0 crypto.PrivateKey var r1 error if rf, ok := ret.Get(0).(func(uint64) (crypto.PrivateKey, error)); ok { @@ -38,13 +42,12 @@ func (_m *RandomBeaconKeyStore) ByView(view uint64) (crypto.PrivateKey, error) { return r0, r1 } -type mockConstructorTestingTNewRandomBeaconKeyStore interface { +// NewRandomBeaconKeyStore creates a new instance of RandomBeaconKeyStore. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewRandomBeaconKeyStore(t interface { mock.TestingT Cleanup(func()) -} - -// NewRandomBeaconKeyStore creates a new instance of RandomBeaconKeyStore. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewRandomBeaconKeyStore(t mockConstructorTestingTNewRandomBeaconKeyStore) *RandomBeaconKeyStore { +}) *RandomBeaconKeyStore { mock := &RandomBeaconKeyStore{} mock.Mock.Test(t) diff --git a/module/mock/rate_limited_blockstore_metrics.go b/module/mock/rate_limited_blockstore_metrics.go index f804e0824a8..e63e92e81fb 100644 --- a/module/mock/rate_limited_blockstore_metrics.go +++ b/module/mock/rate_limited_blockstore_metrics.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mock @@ -14,13 +14,12 @@ func (_m *RateLimitedBlockstoreMetrics) BytesRead(_a0 int) { _m.Called(_a0) } -type mockConstructorTestingTNewRateLimitedBlockstoreMetrics interface { +// NewRateLimitedBlockstoreMetrics creates a new instance of RateLimitedBlockstoreMetrics. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewRateLimitedBlockstoreMetrics(t interface { mock.TestingT Cleanup(func()) -} - -// NewRateLimitedBlockstoreMetrics creates a new instance of RateLimitedBlockstoreMetrics. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewRateLimitedBlockstoreMetrics(t mockConstructorTestingTNewRateLimitedBlockstoreMetrics) *RateLimitedBlockstoreMetrics { +}) *RateLimitedBlockstoreMetrics { mock := &RateLimitedBlockstoreMetrics{} mock.Mock.Test(t) diff --git a/module/mock/readonly_sealing_lag_rate_limiter_config.go b/module/mock/readonly_sealing_lag_rate_limiter_config.go new file mode 100644 index 00000000000..e28d6670412 --- /dev/null +++ b/module/mock/readonly_sealing_lag_rate_limiter_config.go @@ -0,0 +1,96 @@ +// Code generated by mockery. DO NOT EDIT. + +package mock + +import mock "github.com/stretchr/testify/mock" + +// ReadonlySealingLagRateLimiterConfig is an autogenerated mock type for the ReadonlySealingLagRateLimiterConfig type +type ReadonlySealingLagRateLimiterConfig struct { + mock.Mock +} + +// HalvingInterval provides a mock function with no fields +func (_m *ReadonlySealingLagRateLimiterConfig) HalvingInterval() uint { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for HalvingInterval") + } + + var r0 uint + if rf, ok := ret.Get(0).(func() uint); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(uint) + } + + return r0 +} + +// MaxSealingLag provides a mock function with no fields +func (_m *ReadonlySealingLagRateLimiterConfig) MaxSealingLag() uint { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for MaxSealingLag") + } + + var r0 uint + if rf, ok := ret.Get(0).(func() uint); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(uint) + } + + return r0 +} + +// MinCollectionSize provides a mock function with no fields +func (_m *ReadonlySealingLagRateLimiterConfig) MinCollectionSize() uint { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for MinCollectionSize") + } + + var r0 uint + if rf, ok := ret.Get(0).(func() uint); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(uint) + } + + return r0 +} + +// MinSealingLag provides a mock function with no fields +func (_m *ReadonlySealingLagRateLimiterConfig) MinSealingLag() uint { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for MinSealingLag") + } + + var r0 uint + if rf, ok := ret.Get(0).(func() uint); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(uint) + } + + return r0 +} + +// NewReadonlySealingLagRateLimiterConfig creates a new instance of ReadonlySealingLagRateLimiterConfig. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewReadonlySealingLagRateLimiterConfig(t interface { + mock.TestingT + Cleanup(func()) +}) *ReadonlySealingLagRateLimiterConfig { + mock := &ReadonlySealingLagRateLimiterConfig{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/module/mock/ready_done_aware.go b/module/mock/ready_done_aware.go index df4856d7c68..3d1573b776d 100644 --- a/module/mock/ready_done_aware.go +++ b/module/mock/ready_done_aware.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mock @@ -9,10 +9,14 @@ type ReadyDoneAware struct { mock.Mock } -// Done provides a mock function with given fields: +// Done provides a mock function with no fields func (_m *ReadyDoneAware) Done() <-chan struct{} { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Done") + } + var r0 <-chan struct{} if rf, ok := ret.Get(0).(func() <-chan struct{}); ok { r0 = rf() @@ -25,10 +29,14 @@ func (_m *ReadyDoneAware) Done() <-chan struct{} { return r0 } -// Ready provides a mock function with given fields: +// Ready provides a mock function with no fields func (_m *ReadyDoneAware) Ready() <-chan struct{} { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Ready") + } + var r0 <-chan struct{} if rf, ok := ret.Get(0).(func() <-chan struct{}); ok { r0 = rf() @@ -41,13 +49,12 @@ func (_m *ReadyDoneAware) Ready() <-chan struct{} { return r0 } -type mockConstructorTestingTNewReadyDoneAware interface { +// NewReadyDoneAware creates a new instance of ReadyDoneAware. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewReadyDoneAware(t interface { mock.TestingT Cleanup(func()) -} - -// NewReadyDoneAware creates a new instance of ReadyDoneAware. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewReadyDoneAware(t mockConstructorTestingTNewReadyDoneAware) *ReadyDoneAware { +}) *ReadyDoneAware { mock := &ReadyDoneAware{} mock.Mock.Test(t) diff --git a/module/mock/receipt_validator.go b/module/mock/receipt_validator.go index f6f0545666d..57073a227c4 100644 --- a/module/mock/receipt_validator.go +++ b/module/mock/receipt_validator.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mock @@ -12,13 +12,17 @@ type ReceiptValidator struct { mock.Mock } -// Validate provides a mock function with given fields: receipts -func (_m *ReceiptValidator) Validate(receipts *flow.ExecutionReceipt) error { - ret := _m.Called(receipts) +// Validate provides a mock function with given fields: receipt +func (_m *ReceiptValidator) Validate(receipt *flow.ExecutionReceipt) error { + ret := _m.Called(receipt) + + if len(ret) == 0 { + panic("no return value specified for Validate") + } var r0 error if rf, ok := ret.Get(0).(func(*flow.ExecutionReceipt) error); ok { - r0 = rf(receipts) + r0 = rf(receipt) } else { r0 = ret.Error(0) } @@ -30,6 +34,10 @@ func (_m *ReceiptValidator) Validate(receipts *flow.ExecutionReceipt) error { func (_m *ReceiptValidator) ValidatePayload(candidate *flow.Block) error { ret := _m.Called(candidate) + if len(ret) == 0 { + panic("no return value specified for ValidatePayload") + } + var r0 error if rf, ok := ret.Get(0).(func(*flow.Block) error); ok { r0 = rf(candidate) @@ -40,13 +48,12 @@ func (_m *ReceiptValidator) ValidatePayload(candidate *flow.Block) error { return r0 } -type mockConstructorTestingTNewReceiptValidator interface { +// NewReceiptValidator creates a new instance of ReceiptValidator. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewReceiptValidator(t interface { mock.TestingT Cleanup(func()) -} - -// NewReceiptValidator creates a new instance of ReceiptValidator. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewReceiptValidator(t mockConstructorTestingTNewReceiptValidator) *ReceiptValidator { +}) *ReceiptValidator { mock := &ReceiptValidator{} mock.Mock.Test(t) diff --git a/module/mock/requester.go b/module/mock/requester.go index d3effd8e215..289423aecc0 100644 --- a/module/mock/requester.go +++ b/module/mock/requester.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mock @@ -13,27 +13,26 @@ type Requester struct { } // EntityByID provides a mock function with given fields: entityID, selector -func (_m *Requester) EntityByID(entityID flow.Identifier, selector flow.IdentityFilter) { +func (_m *Requester) EntityByID(entityID flow.Identifier, selector flow.IdentityFilter[flow.Identity]) { _m.Called(entityID, selector) } -// Force provides a mock function with given fields: +// Force provides a mock function with no fields func (_m *Requester) Force() { _m.Called() } // Query provides a mock function with given fields: key, selector -func (_m *Requester) Query(key flow.Identifier, selector flow.IdentityFilter) { +func (_m *Requester) Query(key flow.Identifier, selector flow.IdentityFilter[flow.Identity]) { _m.Called(key, selector) } -type mockConstructorTestingTNewRequester interface { +// NewRequester creates a new instance of Requester. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewRequester(t interface { mock.TestingT Cleanup(func()) -} - -// NewRequester creates a new instance of Requester. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewRequester(t mockConstructorTestingTNewRequester) *Requester { +}) *Requester { mock := &Requester{} mock.Mock.Test(t) diff --git a/module/mock/resolver_metrics.go b/module/mock/resolver_metrics.go index a2473e7bf03..8e391c14f4a 100644 --- a/module/mock/resolver_metrics.go +++ b/module/mock/resolver_metrics.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mock @@ -18,33 +18,32 @@ func (_m *ResolverMetrics) DNSLookupDuration(duration time.Duration) { _m.Called(duration) } -// OnDNSCacheHit provides a mock function with given fields: +// OnDNSCacheHit provides a mock function with no fields func (_m *ResolverMetrics) OnDNSCacheHit() { _m.Called() } -// OnDNSCacheInvalidated provides a mock function with given fields: +// OnDNSCacheInvalidated provides a mock function with no fields func (_m *ResolverMetrics) OnDNSCacheInvalidated() { _m.Called() } -// OnDNSCacheMiss provides a mock function with given fields: +// OnDNSCacheMiss provides a mock function with no fields func (_m *ResolverMetrics) OnDNSCacheMiss() { _m.Called() } -// OnDNSLookupRequestDropped provides a mock function with given fields: +// OnDNSLookupRequestDropped provides a mock function with no fields func (_m *ResolverMetrics) OnDNSLookupRequestDropped() { _m.Called() } -type mockConstructorTestingTNewResolverMetrics interface { +// NewResolverMetrics creates a new instance of ResolverMetrics. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewResolverMetrics(t interface { mock.TestingT Cleanup(func()) -} - -// NewResolverMetrics creates a new instance of ResolverMetrics. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewResolverMetrics(t mockConstructorTestingTNewResolverMetrics) *ResolverMetrics { +}) *ResolverMetrics { mock := &ResolverMetrics{} mock.Mock.Test(t) diff --git a/module/mock/rest_metrics.go b/module/mock/rest_metrics.go new file mode 100644 index 00000000000..ac9fdfb82e6 --- /dev/null +++ b/module/mock/rest_metrics.go @@ -0,0 +1,51 @@ +// Code generated by mockery. DO NOT EDIT. + +package mock + +import ( + context "context" + + metrics "github.com/slok/go-http-metrics/metrics" + mock "github.com/stretchr/testify/mock" + + time "time" +) + +// RestMetrics is an autogenerated mock type for the RestMetrics type +type RestMetrics struct { + mock.Mock +} + +// AddInflightRequests provides a mock function with given fields: ctx, props, quantity +func (_m *RestMetrics) AddInflightRequests(ctx context.Context, props metrics.HTTPProperties, quantity int) { + _m.Called(ctx, props, quantity) +} + +// AddTotalRequests provides a mock function with given fields: ctx, method, routeName +func (_m *RestMetrics) AddTotalRequests(ctx context.Context, method string, routeName string) { + _m.Called(ctx, method, routeName) +} + +// ObserveHTTPRequestDuration provides a mock function with given fields: ctx, props, duration +func (_m *RestMetrics) ObserveHTTPRequestDuration(ctx context.Context, props metrics.HTTPReqProperties, duration time.Duration) { + _m.Called(ctx, props, duration) +} + +// ObserveHTTPResponseSize provides a mock function with given fields: ctx, props, sizeBytes +func (_m *RestMetrics) ObserveHTTPResponseSize(ctx context.Context, props metrics.HTTPReqProperties, sizeBytes int64) { + _m.Called(ctx, props, sizeBytes) +} + +// NewRestMetrics creates a new instance of RestMetrics. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewRestMetrics(t interface { + mock.TestingT + Cleanup(func()) +}) *RestMetrics { + mock := &RestMetrics{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/module/mock/runtime_metrics.go b/module/mock/runtime_metrics.go index 4cb356b27e1..7ad3aaa5e41 100644 --- a/module/mock/runtime_metrics.go +++ b/module/mock/runtime_metrics.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mock @@ -33,23 +33,22 @@ func (_m *RuntimeMetrics) RuntimeTransactionParsed(dur time.Duration) { _m.Called(dur) } -// RuntimeTransactionProgramsCacheHit provides a mock function with given fields: +// RuntimeTransactionProgramsCacheHit provides a mock function with no fields func (_m *RuntimeMetrics) RuntimeTransactionProgramsCacheHit() { _m.Called() } -// RuntimeTransactionProgramsCacheMiss provides a mock function with given fields: +// RuntimeTransactionProgramsCacheMiss provides a mock function with no fields func (_m *RuntimeMetrics) RuntimeTransactionProgramsCacheMiss() { _m.Called() } -type mockConstructorTestingTNewRuntimeMetrics interface { +// NewRuntimeMetrics creates a new instance of RuntimeMetrics. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewRuntimeMetrics(t interface { mock.TestingT Cleanup(func()) -} - -// NewRuntimeMetrics creates a new instance of RuntimeMetrics. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewRuntimeMetrics(t mockConstructorTestingTNewRuntimeMetrics) *RuntimeMetrics { +}) *RuntimeMetrics { mock := &RuntimeMetrics{} mock.Mock.Test(t) diff --git a/module/mock/sdk_client_wrapper.go b/module/mock/sdk_client_wrapper.go index 90d3a2db32e..6c84cc52855 100644 --- a/module/mock/sdk_client_wrapper.go +++ b/module/mock/sdk_client_wrapper.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mock @@ -21,6 +21,10 @@ type SDKClientWrapper struct { func (_m *SDKClientWrapper) ExecuteScriptAtBlockID(_a0 context.Context, _a1 flow.Identifier, _a2 []byte, _a3 []cadence.Value) (cadence.Value, error) { ret := _m.Called(_a0, _a1, _a2, _a3) + if len(ret) == 0 { + panic("no return value specified for ExecuteScriptAtBlockID") + } + var r0 cadence.Value var r1 error if rf, ok := ret.Get(0).(func(context.Context, flow.Identifier, []byte, []cadence.Value) (cadence.Value, error)); ok { @@ -47,6 +51,10 @@ func (_m *SDKClientWrapper) ExecuteScriptAtBlockID(_a0 context.Context, _a1 flow func (_m *SDKClientWrapper) ExecuteScriptAtLatestBlock(_a0 context.Context, _a1 []byte, _a2 []cadence.Value) (cadence.Value, error) { ret := _m.Called(_a0, _a1, _a2) + if len(ret) == 0 { + panic("no return value specified for ExecuteScriptAtLatestBlock") + } + var r0 cadence.Value var r1 error if rf, ok := ret.Get(0).(func(context.Context, []byte, []cadence.Value) (cadence.Value, error)); ok { @@ -73,6 +81,10 @@ func (_m *SDKClientWrapper) ExecuteScriptAtLatestBlock(_a0 context.Context, _a1 func (_m *SDKClientWrapper) GetAccount(_a0 context.Context, _a1 flow.Address) (*flow.Account, error) { ret := _m.Called(_a0, _a1) + if len(ret) == 0 { + panic("no return value specified for GetAccount") + } + var r0 *flow.Account var r1 error if rf, ok := ret.Get(0).(func(context.Context, flow.Address) (*flow.Account, error)); ok { @@ -99,6 +111,10 @@ func (_m *SDKClientWrapper) GetAccount(_a0 context.Context, _a1 flow.Address) (* func (_m *SDKClientWrapper) GetAccountAtLatestBlock(_a0 context.Context, _a1 flow.Address) (*flow.Account, error) { ret := _m.Called(_a0, _a1) + if len(ret) == 0 { + panic("no return value specified for GetAccountAtLatestBlock") + } + var r0 *flow.Account var r1 error if rf, ok := ret.Get(0).(func(context.Context, flow.Address) (*flow.Account, error)); ok { @@ -125,6 +141,10 @@ func (_m *SDKClientWrapper) GetAccountAtLatestBlock(_a0 context.Context, _a1 flo func (_m *SDKClientWrapper) GetLatestBlock(_a0 context.Context, _a1 bool) (*flow.Block, error) { ret := _m.Called(_a0, _a1) + if len(ret) == 0 { + panic("no return value specified for GetLatestBlock") + } + var r0 *flow.Block var r1 error if rf, ok := ret.Get(0).(func(context.Context, bool) (*flow.Block, error)); ok { @@ -151,6 +171,10 @@ func (_m *SDKClientWrapper) GetLatestBlock(_a0 context.Context, _a1 bool) (*flow func (_m *SDKClientWrapper) GetTransactionResult(_a0 context.Context, _a1 flow.Identifier) (*flow.TransactionResult, error) { ret := _m.Called(_a0, _a1) + if len(ret) == 0 { + panic("no return value specified for GetTransactionResult") + } + var r0 *flow.TransactionResult var r1 error if rf, ok := ret.Get(0).(func(context.Context, flow.Identifier) (*flow.TransactionResult, error)); ok { @@ -177,6 +201,10 @@ func (_m *SDKClientWrapper) GetTransactionResult(_a0 context.Context, _a1 flow.I func (_m *SDKClientWrapper) SendTransaction(_a0 context.Context, _a1 flow.Transaction) error { ret := _m.Called(_a0, _a1) + if len(ret) == 0 { + panic("no return value specified for SendTransaction") + } + var r0 error if rf, ok := ret.Get(0).(func(context.Context, flow.Transaction) error); ok { r0 = rf(_a0, _a1) @@ -187,13 +215,12 @@ func (_m *SDKClientWrapper) SendTransaction(_a0 context.Context, _a1 flow.Transa return r0 } -type mockConstructorTestingTNewSDKClientWrapper interface { +// NewSDKClientWrapper creates a new instance of SDKClientWrapper. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewSDKClientWrapper(t interface { mock.TestingT Cleanup(func()) -} - -// NewSDKClientWrapper creates a new instance of SDKClientWrapper. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewSDKClientWrapper(t mockConstructorTestingTNewSDKClientWrapper) *SDKClientWrapper { +}) *SDKClientWrapper { mock := &SDKClientWrapper{} mock.Mock.Test(t) diff --git a/module/mock/seal_validator.go b/module/mock/seal_validator.go index 0661a6daabf..d9195f240db 100644 --- a/module/mock/seal_validator.go +++ b/module/mock/seal_validator.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mock @@ -16,6 +16,10 @@ type SealValidator struct { func (_m *SealValidator) Validate(candidate *flow.Block) (*flow.Seal, error) { ret := _m.Called(candidate) + if len(ret) == 0 { + panic("no return value specified for Validate") + } + var r0 *flow.Seal var r1 error if rf, ok := ret.Get(0).(func(*flow.Block) (*flow.Seal, error)); ok { @@ -38,13 +42,12 @@ func (_m *SealValidator) Validate(candidate *flow.Block) (*flow.Seal, error) { return r0, r1 } -type mockConstructorTestingTNewSealValidator interface { +// NewSealValidator creates a new instance of SealValidator. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewSealValidator(t interface { mock.TestingT Cleanup(func()) -} - -// NewSealValidator creates a new instance of SealValidator. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewSealValidator(t mockConstructorTestingTNewSealValidator) *SealValidator { +}) *SealValidator { mock := &SealValidator{} mock.Mock.Test(t) diff --git a/module/mock/sealing_configs_getter.go b/module/mock/sealing_configs_getter.go index dfdf4179fd0..21e76d9ceca 100644 --- a/module/mock/sealing_configs_getter.go +++ b/module/mock/sealing_configs_getter.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mock @@ -9,10 +9,14 @@ type SealingConfigsGetter struct { mock.Mock } -// ApprovalRequestsThresholdConst provides a mock function with given fields: +// ApprovalRequestsThresholdConst provides a mock function with no fields func (_m *SealingConfigsGetter) ApprovalRequestsThresholdConst() uint64 { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for ApprovalRequestsThresholdConst") + } + var r0 uint64 if rf, ok := ret.Get(0).(func() uint64); ok { r0 = rf() @@ -23,10 +27,14 @@ func (_m *SealingConfigsGetter) ApprovalRequestsThresholdConst() uint64 { return r0 } -// ChunkAlphaConst provides a mock function with given fields: +// ChunkAlphaConst provides a mock function with no fields func (_m *SealingConfigsGetter) ChunkAlphaConst() uint { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for ChunkAlphaConst") + } + var r0 uint if rf, ok := ret.Get(0).(func() uint); ok { r0 = rf() @@ -37,10 +45,14 @@ func (_m *SealingConfigsGetter) ChunkAlphaConst() uint { return r0 } -// EmergencySealingActiveConst provides a mock function with given fields: +// EmergencySealingActiveConst provides a mock function with no fields func (_m *SealingConfigsGetter) EmergencySealingActiveConst() bool { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for EmergencySealingActiveConst") + } + var r0 bool if rf, ok := ret.Get(0).(func() bool); ok { r0 = rf() @@ -51,10 +63,14 @@ func (_m *SealingConfigsGetter) EmergencySealingActiveConst() bool { return r0 } -// RequireApprovalsForSealConstructionDynamicValue provides a mock function with given fields: +// RequireApprovalsForSealConstructionDynamicValue provides a mock function with no fields func (_m *SealingConfigsGetter) RequireApprovalsForSealConstructionDynamicValue() uint { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for RequireApprovalsForSealConstructionDynamicValue") + } + var r0 uint if rf, ok := ret.Get(0).(func() uint); ok { r0 = rf() @@ -65,10 +81,14 @@ func (_m *SealingConfigsGetter) RequireApprovalsForSealConstructionDynamicValue( return r0 } -// RequireApprovalsForSealVerificationConst provides a mock function with given fields: +// RequireApprovalsForSealVerificationConst provides a mock function with no fields func (_m *SealingConfigsGetter) RequireApprovalsForSealVerificationConst() uint { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for RequireApprovalsForSealVerificationConst") + } + var r0 uint if rf, ok := ret.Get(0).(func() uint); ok { r0 = rf() @@ -79,13 +99,12 @@ func (_m *SealingConfigsGetter) RequireApprovalsForSealVerificationConst() uint return r0 } -type mockConstructorTestingTNewSealingConfigsGetter interface { +// NewSealingConfigsGetter creates a new instance of SealingConfigsGetter. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewSealingConfigsGetter(t interface { mock.TestingT Cleanup(func()) -} - -// NewSealingConfigsGetter creates a new instance of SealingConfigsGetter. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewSealingConfigsGetter(t mockConstructorTestingTNewSealingConfigsGetter) *SealingConfigsGetter { +}) *SealingConfigsGetter { mock := &SealingConfigsGetter{} mock.Mock.Test(t) diff --git a/module/mock/sealing_configs_setter.go b/module/mock/sealing_configs_setter.go index db05378c24c..7aadddc1f07 100644 --- a/module/mock/sealing_configs_setter.go +++ b/module/mock/sealing_configs_setter.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mock @@ -9,10 +9,14 @@ type SealingConfigsSetter struct { mock.Mock } -// ApprovalRequestsThresholdConst provides a mock function with given fields: +// ApprovalRequestsThresholdConst provides a mock function with no fields func (_m *SealingConfigsSetter) ApprovalRequestsThresholdConst() uint64 { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for ApprovalRequestsThresholdConst") + } + var r0 uint64 if rf, ok := ret.Get(0).(func() uint64); ok { r0 = rf() @@ -23,10 +27,14 @@ func (_m *SealingConfigsSetter) ApprovalRequestsThresholdConst() uint64 { return r0 } -// ChunkAlphaConst provides a mock function with given fields: +// ChunkAlphaConst provides a mock function with no fields func (_m *SealingConfigsSetter) ChunkAlphaConst() uint { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for ChunkAlphaConst") + } + var r0 uint if rf, ok := ret.Get(0).(func() uint); ok { r0 = rf() @@ -37,10 +45,14 @@ func (_m *SealingConfigsSetter) ChunkAlphaConst() uint { return r0 } -// EmergencySealingActiveConst provides a mock function with given fields: +// EmergencySealingActiveConst provides a mock function with no fields func (_m *SealingConfigsSetter) EmergencySealingActiveConst() bool { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for EmergencySealingActiveConst") + } + var r0 bool if rf, ok := ret.Get(0).(func() bool); ok { r0 = rf() @@ -51,10 +63,14 @@ func (_m *SealingConfigsSetter) EmergencySealingActiveConst() bool { return r0 } -// RequireApprovalsForSealConstructionDynamicValue provides a mock function with given fields: +// RequireApprovalsForSealConstructionDynamicValue provides a mock function with no fields func (_m *SealingConfigsSetter) RequireApprovalsForSealConstructionDynamicValue() uint { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for RequireApprovalsForSealConstructionDynamicValue") + } + var r0 uint if rf, ok := ret.Get(0).(func() uint); ok { r0 = rf() @@ -65,10 +81,14 @@ func (_m *SealingConfigsSetter) RequireApprovalsForSealConstructionDynamicValue( return r0 } -// RequireApprovalsForSealVerificationConst provides a mock function with given fields: +// RequireApprovalsForSealVerificationConst provides a mock function with no fields func (_m *SealingConfigsSetter) RequireApprovalsForSealVerificationConst() uint { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for RequireApprovalsForSealVerificationConst") + } + var r0 uint if rf, ok := ret.Get(0).(func() uint); ok { r0 = rf() @@ -83,6 +103,10 @@ func (_m *SealingConfigsSetter) RequireApprovalsForSealVerificationConst() uint func (_m *SealingConfigsSetter) SetRequiredApprovalsForSealingConstruction(newVal uint) error { ret := _m.Called(newVal) + if len(ret) == 0 { + panic("no return value specified for SetRequiredApprovalsForSealingConstruction") + } + var r0 error if rf, ok := ret.Get(0).(func(uint) error); ok { r0 = rf(newVal) @@ -93,13 +117,12 @@ func (_m *SealingConfigsSetter) SetRequiredApprovalsForSealingConstruction(newVa return r0 } -type mockConstructorTestingTNewSealingConfigsSetter interface { +// NewSealingConfigsSetter creates a new instance of SealingConfigsSetter. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewSealingConfigsSetter(t interface { mock.TestingT Cleanup(func()) -} - -// NewSealingConfigsSetter creates a new instance of SealingConfigsSetter. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewSealingConfigsSetter(t mockConstructorTestingTNewSealingConfigsSetter) *SealingConfigsSetter { +}) *SealingConfigsSetter { mock := &SealingConfigsSetter{} mock.Mock.Test(t) diff --git a/module/mock/sealing_lag_rate_limiter_config.go b/module/mock/sealing_lag_rate_limiter_config.go new file mode 100644 index 00000000000..15d51e3ba76 --- /dev/null +++ b/module/mock/sealing_lag_rate_limiter_config.go @@ -0,0 +1,168 @@ +// Code generated by mockery. DO NOT EDIT. + +package mock + +import mock "github.com/stretchr/testify/mock" + +// SealingLagRateLimiterConfig is an autogenerated mock type for the SealingLagRateLimiterConfig type +type SealingLagRateLimiterConfig struct { + mock.Mock +} + +// HalvingInterval provides a mock function with no fields +func (_m *SealingLagRateLimiterConfig) HalvingInterval() uint { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for HalvingInterval") + } + + var r0 uint + if rf, ok := ret.Get(0).(func() uint); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(uint) + } + + return r0 +} + +// MaxSealingLag provides a mock function with no fields +func (_m *SealingLagRateLimiterConfig) MaxSealingLag() uint { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for MaxSealingLag") + } + + var r0 uint + if rf, ok := ret.Get(0).(func() uint); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(uint) + } + + return r0 +} + +// MinCollectionSize provides a mock function with no fields +func (_m *SealingLagRateLimiterConfig) MinCollectionSize() uint { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for MinCollectionSize") + } + + var r0 uint + if rf, ok := ret.Get(0).(func() uint); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(uint) + } + + return r0 +} + +// MinSealingLag provides a mock function with no fields +func (_m *SealingLagRateLimiterConfig) MinSealingLag() uint { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for MinSealingLag") + } + + var r0 uint + if rf, ok := ret.Get(0).(func() uint); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(uint) + } + + return r0 +} + +// SetHalvingInterval provides a mock function with given fields: value +func (_m *SealingLagRateLimiterConfig) SetHalvingInterval(value uint) error { + ret := _m.Called(value) + + if len(ret) == 0 { + panic("no return value specified for SetHalvingInterval") + } + + var r0 error + if rf, ok := ret.Get(0).(func(uint) error); ok { + r0 = rf(value) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// SetMaxSealingLag provides a mock function with given fields: value +func (_m *SealingLagRateLimiterConfig) SetMaxSealingLag(value uint) error { + ret := _m.Called(value) + + if len(ret) == 0 { + panic("no return value specified for SetMaxSealingLag") + } + + var r0 error + if rf, ok := ret.Get(0).(func(uint) error); ok { + r0 = rf(value) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// SetMinCollectionSize provides a mock function with given fields: value +func (_m *SealingLagRateLimiterConfig) SetMinCollectionSize(value uint) error { + ret := _m.Called(value) + + if len(ret) == 0 { + panic("no return value specified for SetMinCollectionSize") + } + + var r0 error + if rf, ok := ret.Get(0).(func(uint) error); ok { + r0 = rf(value) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// SetMinSealingLag provides a mock function with given fields: value +func (_m *SealingLagRateLimiterConfig) SetMinSealingLag(value uint) error { + ret := _m.Called(value) + + if len(ret) == 0 { + panic("no return value specified for SetMinSealingLag") + } + + var r0 error + if rf, ok := ret.Get(0).(func(uint) error); ok { + r0 = rf(value) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// NewSealingLagRateLimiterConfig creates a new instance of SealingLagRateLimiterConfig. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewSealingLagRateLimiterConfig(t interface { + mock.TestingT + Cleanup(func()) +}) *SealingLagRateLimiterConfig { + mock := &SealingLagRateLimiterConfig{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/module/mock/signer.go b/module/mock/signer.go new file mode 100644 index 00000000000..9e8aba1932c --- /dev/null +++ b/module/mock/signer.go @@ -0,0 +1,147 @@ +// Code generated by mockery. DO NOT EDIT. + +package mock + +import ( + crypto "github.com/onflow/crypto" + mock "github.com/stretchr/testify/mock" +) + +// signer is an autogenerated mock type for the signer type +type signer struct { + mock.Mock +} + +// decodePrivateKey provides a mock function with given fields: _a0 +func (_m *signer) decodePrivateKey(_a0 []byte) (crypto.PrivateKey, error) { + ret := _m.Called(_a0) + + if len(ret) == 0 { + panic("no return value specified for decodePrivateKey") + } + + var r0 crypto.PrivateKey + var r1 error + if rf, ok := ret.Get(0).(func([]byte) (crypto.PrivateKey, error)); ok { + return rf(_a0) + } + if rf, ok := ret.Get(0).(func([]byte) crypto.PrivateKey); ok { + r0 = rf(_a0) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(crypto.PrivateKey) + } + } + + if rf, ok := ret.Get(1).(func([]byte) error); ok { + r1 = rf(_a0) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// decodePublicKey provides a mock function with given fields: _a0 +func (_m *signer) decodePublicKey(_a0 []byte) (crypto.PublicKey, error) { + ret := _m.Called(_a0) + + if len(ret) == 0 { + panic("no return value specified for decodePublicKey") + } + + var r0 crypto.PublicKey + var r1 error + if rf, ok := ret.Get(0).(func([]byte) (crypto.PublicKey, error)); ok { + return rf(_a0) + } + if rf, ok := ret.Get(0).(func([]byte) crypto.PublicKey); ok { + r0 = rf(_a0) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(crypto.PublicKey) + } + } + + if rf, ok := ret.Get(1).(func([]byte) error); ok { + r1 = rf(_a0) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// decodePublicKeyCompressed provides a mock function with given fields: _a0 +func (_m *signer) decodePublicKeyCompressed(_a0 []byte) (crypto.PublicKey, error) { + ret := _m.Called(_a0) + + if len(ret) == 0 { + panic("no return value specified for decodePublicKeyCompressed") + } + + var r0 crypto.PublicKey + var r1 error + if rf, ok := ret.Get(0).(func([]byte) (crypto.PublicKey, error)); ok { + return rf(_a0) + } + if rf, ok := ret.Get(0).(func([]byte) crypto.PublicKey); ok { + r0 = rf(_a0) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(crypto.PublicKey) + } + } + + if rf, ok := ret.Get(1).(func([]byte) error); ok { + r1 = rf(_a0) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// generatePrivateKey provides a mock function with given fields: _a0 +func (_m *signer) generatePrivateKey(_a0 []byte) (crypto.PrivateKey, error) { + ret := _m.Called(_a0) + + if len(ret) == 0 { + panic("no return value specified for generatePrivateKey") + } + + var r0 crypto.PrivateKey + var r1 error + if rf, ok := ret.Get(0).(func([]byte) (crypto.PrivateKey, error)); ok { + return rf(_a0) + } + if rf, ok := ret.Get(0).(func([]byte) crypto.PrivateKey); ok { + r0 = rf(_a0) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(crypto.PrivateKey) + } + } + + if rf, ok := ret.Get(1).(func([]byte) error); ok { + r1 = rf(_a0) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// newSigner creates a new instance of signer. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func newSigner(t interface { + mock.TestingT + Cleanup(func()) +}) *signer { + mock := &signer{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/module/mock/startable.go b/module/mock/startable.go index ae29c392065..a2096729e14 100644 --- a/module/mock/startable.go +++ b/module/mock/startable.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mock @@ -17,13 +17,12 @@ func (_m *Startable) Start(_a0 irrecoverable.SignalerContext) { _m.Called(_a0) } -type mockConstructorTestingTNewStartable interface { +// NewStartable creates a new instance of Startable. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewStartable(t interface { mock.TestingT Cleanup(func()) -} - -// NewStartable creates a new instance of Startable. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewStartable(t mockConstructorTestingTNewStartable) *Startable { +}) *Startable { mock := &Startable{} mock.Mock.Test(t) diff --git a/module/mock/sync_core.go b/module/mock/sync_core.go index cfcce6ccee5..ca5e9946bcf 100644 --- a/module/mock/sync_core.go +++ b/module/mock/sync_core.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mock @@ -23,6 +23,10 @@ func (_m *SyncCore) BatchRequested(batch chainsync.Batch) { func (_m *SyncCore) HandleBlock(header *flow.Header) bool { ret := _m.Called(header) + if len(ret) == 0 { + panic("no return value specified for HandleBlock") + } + var r0 bool if rf, ok := ret.Get(0).(func(*flow.Header) bool); ok { r0 = rf(header) @@ -47,6 +51,10 @@ func (_m *SyncCore) RangeRequested(ran chainsync.Range) { func (_m *SyncCore) ScanPending(final *flow.Header) ([]chainsync.Range, []chainsync.Batch) { ret := _m.Called(final) + if len(ret) == 0 { + panic("no return value specified for ScanPending") + } + var r0 []chainsync.Range var r1 []chainsync.Batch if rf, ok := ret.Get(0).(func(*flow.Header) ([]chainsync.Range, []chainsync.Batch)); ok { @@ -75,6 +83,10 @@ func (_m *SyncCore) ScanPending(final *flow.Header) ([]chainsync.Range, []chains func (_m *SyncCore) WithinTolerance(final *flow.Header, height uint64) bool { ret := _m.Called(final, height) + if len(ret) == 0 { + panic("no return value specified for WithinTolerance") + } + var r0 bool if rf, ok := ret.Get(0).(func(*flow.Header, uint64) bool); ok { r0 = rf(final, height) @@ -85,13 +97,12 @@ func (_m *SyncCore) WithinTolerance(final *flow.Header, height uint64) bool { return r0 } -type mockConstructorTestingTNewSyncCore interface { +// NewSyncCore creates a new instance of SyncCore. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewSyncCore(t interface { mock.TestingT Cleanup(func()) -} - -// NewSyncCore creates a new instance of SyncCore. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewSyncCore(t mockConstructorTestingTNewSyncCore) *SyncCore { +}) *SyncCore { mock := &SyncCore{} mock.Mock.Test(t) diff --git a/module/mock/threshold_signature_inspector.go b/module/mock/threshold_signature_inspector.go new file mode 100644 index 00000000000..8740ef2536f --- /dev/null +++ b/module/mock/threshold_signature_inspector.go @@ -0,0 +1,222 @@ +// Code generated by mockery. DO NOT EDIT. + +package mock + +import ( + crypto "github.com/onflow/crypto" + mock "github.com/stretchr/testify/mock" +) + +// ThresholdSignatureInspector is an autogenerated mock type for the ThresholdSignatureInspector type +type ThresholdSignatureInspector struct { + mock.Mock +} + +// EnoughShares provides a mock function with no fields +func (_m *ThresholdSignatureInspector) EnoughShares() bool { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for EnoughShares") + } + + var r0 bool + if rf, ok := ret.Get(0).(func() bool); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(bool) + } + + return r0 +} + +// HasShare provides a mock function with given fields: index +func (_m *ThresholdSignatureInspector) HasShare(index int) (bool, error) { + ret := _m.Called(index) + + if len(ret) == 0 { + panic("no return value specified for HasShare") + } + + var r0 bool + var r1 error + if rf, ok := ret.Get(0).(func(int) (bool, error)); ok { + return rf(index) + } + if rf, ok := ret.Get(0).(func(int) bool); ok { + r0 = rf(index) + } else { + r0 = ret.Get(0).(bool) + } + + if rf, ok := ret.Get(1).(func(int) error); ok { + r1 = rf(index) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ThresholdSignature provides a mock function with no fields +func (_m *ThresholdSignatureInspector) ThresholdSignature() (crypto.Signature, error) { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for ThresholdSignature") + } + + var r0 crypto.Signature + var r1 error + if rf, ok := ret.Get(0).(func() (crypto.Signature, error)); ok { + return rf() + } + if rf, ok := ret.Get(0).(func() crypto.Signature); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(crypto.Signature) + } + } + + if rf, ok := ret.Get(1).(func() error); ok { + r1 = rf() + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// TrustedAdd provides a mock function with given fields: index, share +func (_m *ThresholdSignatureInspector) TrustedAdd(index int, share crypto.Signature) (bool, error) { + ret := _m.Called(index, share) + + if len(ret) == 0 { + panic("no return value specified for TrustedAdd") + } + + var r0 bool + var r1 error + if rf, ok := ret.Get(0).(func(int, crypto.Signature) (bool, error)); ok { + return rf(index, share) + } + if rf, ok := ret.Get(0).(func(int, crypto.Signature) bool); ok { + r0 = rf(index, share) + } else { + r0 = ret.Get(0).(bool) + } + + if rf, ok := ret.Get(1).(func(int, crypto.Signature) error); ok { + r1 = rf(index, share) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// VerifyAndAdd provides a mock function with given fields: index, share +func (_m *ThresholdSignatureInspector) VerifyAndAdd(index int, share crypto.Signature) (bool, bool, error) { + ret := _m.Called(index, share) + + if len(ret) == 0 { + panic("no return value specified for VerifyAndAdd") + } + + var r0 bool + var r1 bool + var r2 error + if rf, ok := ret.Get(0).(func(int, crypto.Signature) (bool, bool, error)); ok { + return rf(index, share) + } + if rf, ok := ret.Get(0).(func(int, crypto.Signature) bool); ok { + r0 = rf(index, share) + } else { + r0 = ret.Get(0).(bool) + } + + if rf, ok := ret.Get(1).(func(int, crypto.Signature) bool); ok { + r1 = rf(index, share) + } else { + r1 = ret.Get(1).(bool) + } + + if rf, ok := ret.Get(2).(func(int, crypto.Signature) error); ok { + r2 = rf(index, share) + } else { + r2 = ret.Error(2) + } + + return r0, r1, r2 +} + +// VerifyShare provides a mock function with given fields: index, share +func (_m *ThresholdSignatureInspector) VerifyShare(index int, share crypto.Signature) (bool, error) { + ret := _m.Called(index, share) + + if len(ret) == 0 { + panic("no return value specified for VerifyShare") + } + + var r0 bool + var r1 error + if rf, ok := ret.Get(0).(func(int, crypto.Signature) (bool, error)); ok { + return rf(index, share) + } + if rf, ok := ret.Get(0).(func(int, crypto.Signature) bool); ok { + r0 = rf(index, share) + } else { + r0 = ret.Get(0).(bool) + } + + if rf, ok := ret.Get(1).(func(int, crypto.Signature) error); ok { + r1 = rf(index, share) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// VerifyThresholdSignature provides a mock function with given fields: thresholdSignature +func (_m *ThresholdSignatureInspector) VerifyThresholdSignature(thresholdSignature crypto.Signature) (bool, error) { + ret := _m.Called(thresholdSignature) + + if len(ret) == 0 { + panic("no return value specified for VerifyThresholdSignature") + } + + var r0 bool + var r1 error + if rf, ok := ret.Get(0).(func(crypto.Signature) (bool, error)); ok { + return rf(thresholdSignature) + } + if rf, ok := ret.Get(0).(func(crypto.Signature) bool); ok { + r0 = rf(thresholdSignature) + } else { + r0 = ret.Get(0).(bool) + } + + if rf, ok := ret.Get(1).(func(crypto.Signature) error); ok { + r1 = rf(thresholdSignature) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// NewThresholdSignatureInspector creates a new instance of ThresholdSignatureInspector. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewThresholdSignatureInspector(t interface { + mock.TestingT + Cleanup(func()) +}) *ThresholdSignatureInspector { + mock := &ThresholdSignatureInspector{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/module/mock/threshold_signature_participant.go b/module/mock/threshold_signature_participant.go new file mode 100644 index 00000000000..0506361e5c0 --- /dev/null +++ b/module/mock/threshold_signature_participant.go @@ -0,0 +1,252 @@ +// Code generated by mockery. DO NOT EDIT. + +package mock + +import ( + crypto "github.com/onflow/crypto" + mock "github.com/stretchr/testify/mock" +) + +// ThresholdSignatureParticipant is an autogenerated mock type for the ThresholdSignatureParticipant type +type ThresholdSignatureParticipant struct { + mock.Mock +} + +// EnoughShares provides a mock function with no fields +func (_m *ThresholdSignatureParticipant) EnoughShares() bool { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for EnoughShares") + } + + var r0 bool + if rf, ok := ret.Get(0).(func() bool); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(bool) + } + + return r0 +} + +// HasShare provides a mock function with given fields: index +func (_m *ThresholdSignatureParticipant) HasShare(index int) (bool, error) { + ret := _m.Called(index) + + if len(ret) == 0 { + panic("no return value specified for HasShare") + } + + var r0 bool + var r1 error + if rf, ok := ret.Get(0).(func(int) (bool, error)); ok { + return rf(index) + } + if rf, ok := ret.Get(0).(func(int) bool); ok { + r0 = rf(index) + } else { + r0 = ret.Get(0).(bool) + } + + if rf, ok := ret.Get(1).(func(int) error); ok { + r1 = rf(index) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// SignShare provides a mock function with no fields +func (_m *ThresholdSignatureParticipant) SignShare() (crypto.Signature, error) { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for SignShare") + } + + var r0 crypto.Signature + var r1 error + if rf, ok := ret.Get(0).(func() (crypto.Signature, error)); ok { + return rf() + } + if rf, ok := ret.Get(0).(func() crypto.Signature); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(crypto.Signature) + } + } + + if rf, ok := ret.Get(1).(func() error); ok { + r1 = rf() + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ThresholdSignature provides a mock function with no fields +func (_m *ThresholdSignatureParticipant) ThresholdSignature() (crypto.Signature, error) { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for ThresholdSignature") + } + + var r0 crypto.Signature + var r1 error + if rf, ok := ret.Get(0).(func() (crypto.Signature, error)); ok { + return rf() + } + if rf, ok := ret.Get(0).(func() crypto.Signature); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(crypto.Signature) + } + } + + if rf, ok := ret.Get(1).(func() error); ok { + r1 = rf() + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// TrustedAdd provides a mock function with given fields: index, share +func (_m *ThresholdSignatureParticipant) TrustedAdd(index int, share crypto.Signature) (bool, error) { + ret := _m.Called(index, share) + + if len(ret) == 0 { + panic("no return value specified for TrustedAdd") + } + + var r0 bool + var r1 error + if rf, ok := ret.Get(0).(func(int, crypto.Signature) (bool, error)); ok { + return rf(index, share) + } + if rf, ok := ret.Get(0).(func(int, crypto.Signature) bool); ok { + r0 = rf(index, share) + } else { + r0 = ret.Get(0).(bool) + } + + if rf, ok := ret.Get(1).(func(int, crypto.Signature) error); ok { + r1 = rf(index, share) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// VerifyAndAdd provides a mock function with given fields: index, share +func (_m *ThresholdSignatureParticipant) VerifyAndAdd(index int, share crypto.Signature) (bool, bool, error) { + ret := _m.Called(index, share) + + if len(ret) == 0 { + panic("no return value specified for VerifyAndAdd") + } + + var r0 bool + var r1 bool + var r2 error + if rf, ok := ret.Get(0).(func(int, crypto.Signature) (bool, bool, error)); ok { + return rf(index, share) + } + if rf, ok := ret.Get(0).(func(int, crypto.Signature) bool); ok { + r0 = rf(index, share) + } else { + r0 = ret.Get(0).(bool) + } + + if rf, ok := ret.Get(1).(func(int, crypto.Signature) bool); ok { + r1 = rf(index, share) + } else { + r1 = ret.Get(1).(bool) + } + + if rf, ok := ret.Get(2).(func(int, crypto.Signature) error); ok { + r2 = rf(index, share) + } else { + r2 = ret.Error(2) + } + + return r0, r1, r2 +} + +// VerifyShare provides a mock function with given fields: index, share +func (_m *ThresholdSignatureParticipant) VerifyShare(index int, share crypto.Signature) (bool, error) { + ret := _m.Called(index, share) + + if len(ret) == 0 { + panic("no return value specified for VerifyShare") + } + + var r0 bool + var r1 error + if rf, ok := ret.Get(0).(func(int, crypto.Signature) (bool, error)); ok { + return rf(index, share) + } + if rf, ok := ret.Get(0).(func(int, crypto.Signature) bool); ok { + r0 = rf(index, share) + } else { + r0 = ret.Get(0).(bool) + } + + if rf, ok := ret.Get(1).(func(int, crypto.Signature) error); ok { + r1 = rf(index, share) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// VerifyThresholdSignature provides a mock function with given fields: thresholdSignature +func (_m *ThresholdSignatureParticipant) VerifyThresholdSignature(thresholdSignature crypto.Signature) (bool, error) { + ret := _m.Called(thresholdSignature) + + if len(ret) == 0 { + panic("no return value specified for VerifyThresholdSignature") + } + + var r0 bool + var r1 error + if rf, ok := ret.Get(0).(func(crypto.Signature) (bool, error)); ok { + return rf(thresholdSignature) + } + if rf, ok := ret.Get(0).(func(crypto.Signature) bool); ok { + r0 = rf(thresholdSignature) + } else { + r0 = ret.Get(0).(bool) + } + + if rf, ok := ret.Get(1).(func(crypto.Signature) error); ok { + r1 = rf(thresholdSignature) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// NewThresholdSignatureParticipant creates a new instance of ThresholdSignatureParticipant. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewThresholdSignatureParticipant(t interface { + mock.TestingT + Cleanup(func()) +}) *ThresholdSignatureParticipant { + mock := &ThresholdSignatureParticipant{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/module/mock/tracer.go b/module/mock/tracer.go index 65c7544ab5b..28b0c1f160f 100644 --- a/module/mock/tracer.go +++ b/module/mock/tracer.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mock @@ -22,6 +22,10 @@ type Tracer struct { func (_m *Tracer) BlockRootSpan(blockID flow.Identifier) trace.Span { ret := _m.Called(blockID) + if len(ret) == 0 { + panic("no return value specified for BlockRootSpan") + } + var r0 trace.Span if rf, ok := ret.Get(0).(func(flow.Identifier) trace.Span); ok { r0 = rf(blockID) @@ -34,10 +38,14 @@ func (_m *Tracer) BlockRootSpan(blockID flow.Identifier) trace.Span { return r0 } -// Done provides a mock function with given fields: +// Done provides a mock function with no fields func (_m *Tracer) Done() <-chan struct{} { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Done") + } + var r0 <-chan struct{} if rf, ok := ret.Get(0).(func() <-chan struct{}); ok { r0 = rf() @@ -50,10 +58,14 @@ func (_m *Tracer) Done() <-chan struct{} { return r0 } -// Ready provides a mock function with given fields: +// Ready provides a mock function with no fields func (_m *Tracer) Ready() <-chan struct{} { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Ready") + } + var r0 <-chan struct{} if rf, ok := ret.Get(0).(func() <-chan struct{}); ok { r0 = rf() @@ -70,6 +82,10 @@ func (_m *Tracer) Ready() <-chan struct{} { func (_m *Tracer) ShouldSample(entityID flow.Identifier) bool { ret := _m.Called(entityID) + if len(ret) == 0 { + panic("no return value specified for ShouldSample") + } + var r0 bool if rf, ok := ret.Get(0).(func(flow.Identifier) bool); ok { r0 = rf(entityID) @@ -91,6 +107,10 @@ func (_m *Tracer) StartBlockSpan(ctx context.Context, blockID flow.Identifier, s _ca = append(_ca, _va...) ret := _m.Called(_ca...) + if len(ret) == 0 { + panic("no return value specified for StartBlockSpan") + } + var r0 trace.Span var r1 context.Context if rf, ok := ret.Get(0).(func(context.Context, flow.Identifier, moduletrace.SpanName, ...trace.SpanStartOption) (trace.Span, context.Context)); ok { @@ -126,6 +146,10 @@ func (_m *Tracer) StartCollectionSpan(ctx context.Context, collectionID flow.Ide _ca = append(_ca, _va...) ret := _m.Called(_ca...) + if len(ret) == 0 { + panic("no return value specified for StartCollectionSpan") + } + var r0 trace.Span var r1 context.Context if rf, ok := ret.Get(0).(func(context.Context, flow.Identifier, moduletrace.SpanName, ...trace.SpanStartOption) (trace.Span, context.Context)); ok { @@ -161,6 +185,10 @@ func (_m *Tracer) StartSampledSpanFromParent(parentSpan trace.Span, entityID flo _ca = append(_ca, _va...) ret := _m.Called(_ca...) + if len(ret) == 0 { + panic("no return value specified for StartSampledSpanFromParent") + } + var r0 trace.Span if rf, ok := ret.Get(0).(func(trace.Span, flow.Identifier, moduletrace.SpanName, ...trace.SpanStartOption) trace.Span); ok { r0 = rf(parentSpan, entityID, operationName, opts...) @@ -184,6 +212,10 @@ func (_m *Tracer) StartSpanFromContext(ctx context.Context, operationName module _ca = append(_ca, _va...) ret := _m.Called(_ca...) + if len(ret) == 0 { + panic("no return value specified for StartSpanFromContext") + } + var r0 trace.Span var r1 context.Context if rf, ok := ret.Get(0).(func(context.Context, moduletrace.SpanName, ...trace.SpanStartOption) (trace.Span, context.Context)); ok { @@ -219,6 +251,10 @@ func (_m *Tracer) StartSpanFromParent(parentSpan trace.Span, operationName modul _ca = append(_ca, _va...) ret := _m.Called(_ca...) + if len(ret) == 0 { + panic("no return value specified for StartSpanFromParent") + } + var r0 trace.Span if rf, ok := ret.Get(0).(func(trace.Span, moduletrace.SpanName, ...trace.SpanStartOption) trace.Span); ok { r0 = rf(parentSpan, operationName, opts...) @@ -231,6 +267,45 @@ func (_m *Tracer) StartSpanFromParent(parentSpan trace.Span, operationName modul return r0 } +// StartTransactionSpan provides a mock function with given fields: ctx, transactionID, spanName, opts +func (_m *Tracer) StartTransactionSpan(ctx context.Context, transactionID flow.Identifier, spanName moduletrace.SpanName, opts ...trace.SpanStartOption) (trace.Span, context.Context) { + _va := make([]interface{}, len(opts)) + for _i := range opts { + _va[_i] = opts[_i] + } + var _ca []interface{} + _ca = append(_ca, ctx, transactionID, spanName) + _ca = append(_ca, _va...) + ret := _m.Called(_ca...) + + if len(ret) == 0 { + panic("no return value specified for StartTransactionSpan") + } + + var r0 trace.Span + var r1 context.Context + if rf, ok := ret.Get(0).(func(context.Context, flow.Identifier, moduletrace.SpanName, ...trace.SpanStartOption) (trace.Span, context.Context)); ok { + return rf(ctx, transactionID, spanName, opts...) + } + if rf, ok := ret.Get(0).(func(context.Context, flow.Identifier, moduletrace.SpanName, ...trace.SpanStartOption) trace.Span); ok { + r0 = rf(ctx, transactionID, spanName, opts...) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(trace.Span) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, flow.Identifier, moduletrace.SpanName, ...trace.SpanStartOption) context.Context); ok { + r1 = rf(ctx, transactionID, spanName, opts...) + } else { + if ret.Get(1) != nil { + r1 = ret.Get(1).(context.Context) + } + } + + return r0, r1 +} + // WithSpanFromContext provides a mock function with given fields: ctx, operationName, f, opts func (_m *Tracer) WithSpanFromContext(ctx context.Context, operationName moduletrace.SpanName, f func(), opts ...trace.SpanStartOption) { _va := make([]interface{}, len(opts)) @@ -243,13 +318,12 @@ func (_m *Tracer) WithSpanFromContext(ctx context.Context, operationName modulet _m.Called(_ca...) } -type mockConstructorTestingTNewTracer interface { +// NewTracer creates a new instance of Tracer. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewTracer(t interface { mock.TestingT Cleanup(func()) -} - -// NewTracer creates a new instance of Tracer. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewTracer(t mockConstructorTestingTNewTracer) *Tracer { +}) *Tracer { mock := &Tracer{} mock.Mock.Test(t) diff --git a/module/mock/transaction_error_messages_metrics.go b/module/mock/transaction_error_messages_metrics.go new file mode 100644 index 00000000000..3a00b189b7b --- /dev/null +++ b/module/mock/transaction_error_messages_metrics.go @@ -0,0 +1,48 @@ +// Code generated by mockery. DO NOT EDIT. + +package mock + +import ( + mock "github.com/stretchr/testify/mock" + + time "time" +) + +// TransactionErrorMessagesMetrics is an autogenerated mock type for the TransactionErrorMessagesMetrics type +type TransactionErrorMessagesMetrics struct { + mock.Mock +} + +// TxErrorsFetchFinished provides a mock function with given fields: duration, success, height +func (_m *TransactionErrorMessagesMetrics) TxErrorsFetchFinished(duration time.Duration, success bool, height uint64) { + _m.Called(duration, success, height) +} + +// TxErrorsFetchRetried provides a mock function with no fields +func (_m *TransactionErrorMessagesMetrics) TxErrorsFetchRetried() { + _m.Called() +} + +// TxErrorsFetchStarted provides a mock function with no fields +func (_m *TransactionErrorMessagesMetrics) TxErrorsFetchStarted() { + _m.Called() +} + +// TxErrorsInitialHeight provides a mock function with given fields: height +func (_m *TransactionErrorMessagesMetrics) TxErrorsInitialHeight(height uint64) { + _m.Called(height) +} + +// NewTransactionErrorMessagesMetrics creates a new instance of TransactionErrorMessagesMetrics. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewTransactionErrorMessagesMetrics(t interface { + mock.TestingT + Cleanup(func()) +}) *TransactionErrorMessagesMetrics { + mock := &TransactionErrorMessagesMetrics{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/module/mock/transaction_metrics.go b/module/mock/transaction_metrics.go index 49f5f0c3958..9544d83d3e3 100644 --- a/module/mock/transaction_metrics.go +++ b/module/mock/transaction_metrics.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mock @@ -14,11 +14,6 @@ type TransactionMetrics struct { mock.Mock } -// ScriptExecuted provides a mock function with given fields: dur, size -func (_m *TransactionMetrics) ScriptExecuted(dur time.Duration, size int) { - _m.Called(dur, size) -} - // TransactionExecuted provides a mock function with given fields: txID, when func (_m *TransactionMetrics) TransactionExecuted(txID flow.Identifier, when time.Time) { _m.Called(txID, when) @@ -44,23 +39,22 @@ func (_m *TransactionMetrics) TransactionResultFetched(dur time.Duration, size i _m.Called(dur, size) } -// TransactionSubmissionFailed provides a mock function with given fields: -func (_m *TransactionMetrics) TransactionSubmissionFailed() { - _m.Called() +// TransactionSealed provides a mock function with given fields: txID, when +func (_m *TransactionMetrics) TransactionSealed(txID flow.Identifier, when time.Time) { + _m.Called(txID, when) } -// UpdateExecutionReceiptMaxHeight provides a mock function with given fields: height -func (_m *TransactionMetrics) UpdateExecutionReceiptMaxHeight(height uint64) { - _m.Called(height) +// TransactionSubmissionFailed provides a mock function with no fields +func (_m *TransactionMetrics) TransactionSubmissionFailed() { + _m.Called() } -type mockConstructorTestingTNewTransactionMetrics interface { +// NewTransactionMetrics creates a new instance of TransactionMetrics. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewTransactionMetrics(t interface { mock.TestingT Cleanup(func()) -} - -// NewTransactionMetrics creates a new instance of TransactionMetrics. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewTransactionMetrics(t mockConstructorTestingTNewTransactionMetrics) *TransactionMetrics { +}) *TransactionMetrics { mock := &TransactionMetrics{} mock.Mock.Test(t) diff --git a/module/mock/transaction_validation_metrics.go b/module/mock/transaction_validation_metrics.go new file mode 100644 index 00000000000..43600bea3e2 --- /dev/null +++ b/module/mock/transaction_validation_metrics.go @@ -0,0 +1,39 @@ +// Code generated by mockery. DO NOT EDIT. + +package mock + +import mock "github.com/stretchr/testify/mock" + +// TransactionValidationMetrics is an autogenerated mock type for the TransactionValidationMetrics type +type TransactionValidationMetrics struct { + mock.Mock +} + +// TransactionValidated provides a mock function with no fields +func (_m *TransactionValidationMetrics) TransactionValidated() { + _m.Called() +} + +// TransactionValidationFailed provides a mock function with given fields: reason +func (_m *TransactionValidationMetrics) TransactionValidationFailed(reason string) { + _m.Called(reason) +} + +// TransactionValidationSkipped provides a mock function with no fields +func (_m *TransactionValidationMetrics) TransactionValidationSkipped() { + _m.Called() +} + +// NewTransactionValidationMetrics creates a new instance of TransactionValidationMetrics. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewTransactionValidationMetrics(t interface { + mock.TestingT + Cleanup(func()) +}) *TransactionValidationMetrics { + mock := &TransactionValidationMetrics{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/module/mock/unicast_manager_metrics.go b/module/mock/unicast_manager_metrics.go index 6f26b3c7566..d085ab774be 100644 --- a/module/mock/unicast_manager_metrics.go +++ b/module/mock/unicast_manager_metrics.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mock @@ -13,6 +13,16 @@ type UnicastManagerMetrics struct { mock.Mock } +// OnDialRetryBudgetResetToDefault provides a mock function with no fields +func (_m *UnicastManagerMetrics) OnDialRetryBudgetResetToDefault() { + _m.Called() +} + +// OnDialRetryBudgetUpdated provides a mock function with given fields: budget +func (_m *UnicastManagerMetrics) OnDialRetryBudgetUpdated(budget uint64) { + _m.Called(budget) +} + // OnEstablishStreamFailure provides a mock function with given fields: duration, attempts func (_m *UnicastManagerMetrics) OnEstablishStreamFailure(duration time.Duration, attempts int) { _m.Called(duration, attempts) @@ -38,18 +48,27 @@ func (_m *UnicastManagerMetrics) OnStreamCreationFailure(duration time.Duration, _m.Called(duration, attempts) } +// OnStreamCreationRetryBudgetResetToDefault provides a mock function with no fields +func (_m *UnicastManagerMetrics) OnStreamCreationRetryBudgetResetToDefault() { + _m.Called() +} + +// OnStreamCreationRetryBudgetUpdated provides a mock function with given fields: budget +func (_m *UnicastManagerMetrics) OnStreamCreationRetryBudgetUpdated(budget uint64) { + _m.Called(budget) +} + // OnStreamEstablished provides a mock function with given fields: duration, attempts func (_m *UnicastManagerMetrics) OnStreamEstablished(duration time.Duration, attempts int) { _m.Called(duration, attempts) } -type mockConstructorTestingTNewUnicastManagerMetrics interface { +// NewUnicastManagerMetrics creates a new instance of UnicastManagerMetrics. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewUnicastManagerMetrics(t interface { mock.TestingT Cleanup(func()) -} - -// NewUnicastManagerMetrics creates a new instance of UnicastManagerMetrics. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewUnicastManagerMetrics(t mockConstructorTestingTNewUnicastManagerMetrics) *UnicastManagerMetrics { +}) *UnicastManagerMetrics { mock := &UnicastManagerMetrics{} mock.Mock.Test(t) diff --git a/module/mock/verification_metrics.go b/module/mock/verification_metrics.go index 4b357a6b163..81a92ab6f6d 100644 --- a/module/mock/verification_metrics.go +++ b/module/mock/verification_metrics.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mock @@ -9,12 +9,12 @@ type VerificationMetrics struct { mock.Mock } -// OnAssignedChunkProcessedAtAssigner provides a mock function with given fields: +// OnAssignedChunkProcessedAtAssigner provides a mock function with no fields func (_m *VerificationMetrics) OnAssignedChunkProcessedAtAssigner() { _m.Called() } -// OnAssignedChunkReceivedAtFetcher provides a mock function with given fields: +// OnAssignedChunkReceivedAtFetcher provides a mock function with no fields func (_m *VerificationMetrics) OnAssignedChunkReceivedAtFetcher() { _m.Called() } @@ -29,32 +29,32 @@ func (_m *VerificationMetrics) OnChunkConsumerJobDone(_a0 uint64) { _m.Called(_a0) } -// OnChunkDataPackArrivedAtFetcher provides a mock function with given fields: +// OnChunkDataPackArrivedAtFetcher provides a mock function with no fields func (_m *VerificationMetrics) OnChunkDataPackArrivedAtFetcher() { _m.Called() } -// OnChunkDataPackRequestDispatchedInNetworkByRequester provides a mock function with given fields: +// OnChunkDataPackRequestDispatchedInNetworkByRequester provides a mock function with no fields func (_m *VerificationMetrics) OnChunkDataPackRequestDispatchedInNetworkByRequester() { _m.Called() } -// OnChunkDataPackRequestReceivedByRequester provides a mock function with given fields: +// OnChunkDataPackRequestReceivedByRequester provides a mock function with no fields func (_m *VerificationMetrics) OnChunkDataPackRequestReceivedByRequester() { _m.Called() } -// OnChunkDataPackRequestSentByFetcher provides a mock function with given fields: +// OnChunkDataPackRequestSentByFetcher provides a mock function with no fields func (_m *VerificationMetrics) OnChunkDataPackRequestSentByFetcher() { _m.Called() } -// OnChunkDataPackResponseReceivedFromNetworkByRequester provides a mock function with given fields: +// OnChunkDataPackResponseReceivedFromNetworkByRequester provides a mock function with no fields func (_m *VerificationMetrics) OnChunkDataPackResponseReceivedFromNetworkByRequester() { _m.Called() } -// OnChunkDataPackSentToFetcher provides a mock function with given fields: +// OnChunkDataPackSentToFetcher provides a mock function with no fields func (_m *VerificationMetrics) OnChunkDataPackSentToFetcher() { _m.Called() } @@ -64,7 +64,7 @@ func (_m *VerificationMetrics) OnChunksAssignmentDoneAtAssigner(chunks int) { _m.Called(chunks) } -// OnExecutionResultReceivedAtAssignerEngine provides a mock function with given fields: +// OnExecutionResultReceivedAtAssignerEngine provides a mock function with no fields func (_m *VerificationMetrics) OnExecutionResultReceivedAtAssignerEngine() { _m.Called() } @@ -74,17 +74,17 @@ func (_m *VerificationMetrics) OnFinalizedBlockArrivedAtAssigner(height uint64) _m.Called(height) } -// OnResultApprovalDispatchedInNetworkByVerifier provides a mock function with given fields: +// OnResultApprovalDispatchedInNetworkByVerifier provides a mock function with no fields func (_m *VerificationMetrics) OnResultApprovalDispatchedInNetworkByVerifier() { _m.Called() } -// OnVerifiableChunkReceivedAtVerifierEngine provides a mock function with given fields: +// OnVerifiableChunkReceivedAtVerifierEngine provides a mock function with no fields func (_m *VerificationMetrics) OnVerifiableChunkReceivedAtVerifierEngine() { _m.Called() } -// OnVerifiableChunkSentToVerifier provides a mock function with given fields: +// OnVerifiableChunkSentToVerifier provides a mock function with no fields func (_m *VerificationMetrics) OnVerifiableChunkSentToVerifier() { _m.Called() } @@ -94,13 +94,12 @@ func (_m *VerificationMetrics) SetMaxChunkDataPackAttemptsForNextUnsealedHeightA _m.Called(attempts) } -type mockConstructorTestingTNewVerificationMetrics interface { +// NewVerificationMetrics creates a new instance of VerificationMetrics. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewVerificationMetrics(t interface { mock.TestingT Cleanup(func()) -} - -// NewVerificationMetrics creates a new instance of VerificationMetrics. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewVerificationMetrics(t mockConstructorTestingTNewVerificationMetrics) *VerificationMetrics { +}) *VerificationMetrics { mock := &VerificationMetrics{} mock.Mock.Test(t) diff --git a/module/mock/wal_metrics.go b/module/mock/wal_metrics.go index bf26cbb86ef..6ff334241c5 100644 --- a/module/mock/wal_metrics.go +++ b/module/mock/wal_metrics.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mock @@ -9,13 +9,17 @@ type WALMetrics struct { mock.Mock } -type mockConstructorTestingTNewWALMetrics interface { - mock.TestingT - Cleanup(func()) +// ExecutionCheckpointSize provides a mock function with given fields: bytes +func (_m *WALMetrics) ExecutionCheckpointSize(bytes uint64) { + _m.Called(bytes) } // NewWALMetrics creates a new instance of WALMetrics. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewWALMetrics(t mockConstructorTestingTNewWALMetrics) *WALMetrics { +// The first argument is typically a *testing.T value. +func NewWALMetrics(t interface { + mock.TestingT + Cleanup(func()) +}) *WALMetrics { mock := &WALMetrics{} mock.Mock.Test(t) diff --git a/module/mocks/network.go b/module/mocks/network.go deleted file mode 100644 index 3788efaf45a..00000000000 --- a/module/mocks/network.go +++ /dev/null @@ -1,168 +0,0 @@ -// Code generated by MockGen. DO NOT EDIT. -// Source: github.com/onflow/flow-go/module (interfaces: Local,Requester) - -// Package mocks is a generated GoMock package. -package mocks - -import ( - reflect "reflect" - - gomock "github.com/golang/mock/gomock" - crypto "github.com/onflow/flow-go/crypto" - hash "github.com/onflow/flow-go/crypto/hash" - flow "github.com/onflow/flow-go/model/flow" -) - -// MockLocal is a mock of Local interface. -type MockLocal struct { - ctrl *gomock.Controller - recorder *MockLocalMockRecorder -} - -// MockLocalMockRecorder is the mock recorder for MockLocal. -type MockLocalMockRecorder struct { - mock *MockLocal -} - -// NewMockLocal creates a new mock instance. -func NewMockLocal(ctrl *gomock.Controller) *MockLocal { - mock := &MockLocal{ctrl: ctrl} - mock.recorder = &MockLocalMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockLocal) EXPECT() *MockLocalMockRecorder { - return m.recorder -} - -// Address mocks base method. -func (m *MockLocal) Address() string { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Address") - ret0, _ := ret[0].(string) - return ret0 -} - -// Address indicates an expected call of Address. -func (mr *MockLocalMockRecorder) Address() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Address", reflect.TypeOf((*MockLocal)(nil).Address)) -} - -// NodeID mocks base method. -func (m *MockLocal) NodeID() flow.Identifier { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "NodeID") - ret0, _ := ret[0].(flow.Identifier) - return ret0 -} - -// NodeID indicates an expected call of NodeID. -func (mr *MockLocalMockRecorder) NodeID() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NodeID", reflect.TypeOf((*MockLocal)(nil).NodeID)) -} - -// NotMeFilter mocks base method. -func (m *MockLocal) NotMeFilter() flow.IdentityFilter { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "NotMeFilter") - ret0, _ := ret[0].(flow.IdentityFilter) - return ret0 -} - -// NotMeFilter indicates an expected call of NotMeFilter. -func (mr *MockLocalMockRecorder) NotMeFilter() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NotMeFilter", reflect.TypeOf((*MockLocal)(nil).NotMeFilter)) -} - -// Sign mocks base method. -func (m *MockLocal) Sign(arg0 []byte, arg1 hash.Hasher) (crypto.Signature, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Sign", arg0, arg1) - ret0, _ := ret[0].(crypto.Signature) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// Sign indicates an expected call of Sign. -func (mr *MockLocalMockRecorder) Sign(arg0, arg1 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Sign", reflect.TypeOf((*MockLocal)(nil).Sign), arg0, arg1) -} - -// SignFunc mocks base method. -func (m *MockLocal) SignFunc(arg0 []byte, arg1 hash.Hasher, arg2 func(crypto.PrivateKey, []byte, hash.Hasher) (crypto.Signature, error)) (crypto.Signature, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "SignFunc", arg0, arg1, arg2) - ret0, _ := ret[0].(crypto.Signature) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// SignFunc indicates an expected call of SignFunc. -func (mr *MockLocalMockRecorder) SignFunc(arg0, arg1, arg2 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SignFunc", reflect.TypeOf((*MockLocal)(nil).SignFunc), arg0, arg1, arg2) -} - -// MockRequester is a mock of Requester interface. -type MockRequester struct { - ctrl *gomock.Controller - recorder *MockRequesterMockRecorder -} - -// MockRequesterMockRecorder is the mock recorder for MockRequester. -type MockRequesterMockRecorder struct { - mock *MockRequester -} - -// NewMockRequester creates a new mock instance. -func NewMockRequester(ctrl *gomock.Controller) *MockRequester { - mock := &MockRequester{ctrl: ctrl} - mock.recorder = &MockRequesterMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockRequester) EXPECT() *MockRequesterMockRecorder { - return m.recorder -} - -// EntityByID mocks base method. -func (m *MockRequester) EntityByID(arg0 flow.Identifier, arg1 flow.IdentityFilter) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "EntityByID", arg0, arg1) -} - -// EntityByID indicates an expected call of EntityByID. -func (mr *MockRequesterMockRecorder) EntityByID(arg0, arg1 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "EntityByID", reflect.TypeOf((*MockRequester)(nil).EntityByID), arg0, arg1) -} - -// Force mocks base method. -func (m *MockRequester) Force() { - m.ctrl.T.Helper() - m.ctrl.Call(m, "Force") -} - -// Force indicates an expected call of Force. -func (mr *MockRequesterMockRecorder) Force() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Force", reflect.TypeOf((*MockRequester)(nil).Force)) -} - -// Query mocks base method. -func (m *MockRequester) Query(arg0 flow.Identifier, arg1 flow.IdentityFilter) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "Query", arg0, arg1) -} - -// Query indicates an expected call of Query. -func (mr *MockRequesterMockRecorder) Query(arg0, arg1 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Query", reflect.TypeOf((*MockRequester)(nil).Query), arg0, arg1) -} diff --git a/module/profiler/profiler.go b/module/profiler/profiler.go index 0c1058f66da..6054bf77078 100644 --- a/module/profiler/profiler.go +++ b/module/profiler/profiler.go @@ -142,7 +142,8 @@ func (p *AutoProfiler) runOnce(d time.Duration) { {profileName: "goroutine", profileType: pb.ProfileType_THREADS, profileFunc: func(w io.Writer, _ time.Duration) error { return newProfileFunc("goroutine")(w) }}, {profileName: "heap", profileType: pb.ProfileType_HEAP, profileFunc: p.pprofHeap}, {profileName: "allocs", profileType: pb.ProfileType_HEAP_ALLOC, profileFunc: p.pprofAllocs}, - {profileName: "block", profileType: pb.ProfileType_CONTENTION, profileFunc: p.pprofBlock}, + // The block profiling is causing some crashes in the runtime, so we are disabling it for now. + // {profileName: "block", profileType: pb.ProfileType_CONTENTION, profileFunc: p.pprofBlock}, {profileName: "cpu", profileType: pb.ProfileType_WALL, profileFunc: p.pprofCpu}, } { path := filepath.Join(p.dir, fmt.Sprintf("%s-%s", prof.profileName, time.Now().Format(time.RFC3339))) @@ -311,6 +312,9 @@ func (p *AutoProfiler) pprofAllocs(w io.Writer, d time.Duration) (err error) { return diff.Write(w) } +// I'm keeping this for later use +// +//nolint:unused func (p *AutoProfiler) pprofBlock(w io.Writer, d time.Duration) error { runtime.SetBlockProfileRate(100) defer runtime.SetBlockProfileRate(0) diff --git a/module/profiler/profiler_test.go b/module/profiler/profiler_test.go index 9df4c409b23..f889e78fc8f 100644 --- a/module/profiler/profiler_test.go +++ b/module/profiler/profiler_test.go @@ -46,7 +46,7 @@ func TestProfiler(t *testing.T) { require.NoError(t, err) foundPtypes := make(map[string]bool) - for _, pType := range []string{"heap", "allocs", "goroutine", "cpu", "block"} { + for _, pType := range []string{"heap", "allocs", "goroutine", "cpu"} { foundPtypes[pType] = false } diff --git a/module/pruner/pruners/chunk_data_pack.go b/module/pruner/pruners/chunk_data_pack.go new file mode 100644 index 00000000000..71517a4522a --- /dev/null +++ b/module/pruner/pruners/chunk_data_pack.go @@ -0,0 +1,49 @@ +package pruners + +import ( + "errors" + "fmt" + + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/storage" +) + +type ChunkDataPackPruner struct { + chunkDataPacks storage.ChunkDataPacks + results storage.ExecutionResults +} + +func NewChunkDataPackPruner(chunkDataPacks storage.ChunkDataPacks, results storage.ExecutionResults) *ChunkDataPackPruner { + return &ChunkDataPackPruner{ + chunkDataPacks: chunkDataPacks, + results: results, + } +} + +func (p *ChunkDataPackPruner) PruneByBlockID(blockID flow.Identifier, batchWriter storage.ReaderBatchWriter) error { + result, err := p.results.ByBlockID(blockID) + + // result not found, then chunk data pack must not exist either + if errors.Is(err, storage.ErrNotFound) { + return nil + } + + if err != nil { + return fmt.Errorf("failed to get execution result by block ID: %w", err) + } + + for _, chunk := range result.Chunks { + chunkID := chunk.ID() + // remove chunk data pack + err := p.chunkDataPacks.BatchRemove(chunkID, batchWriter) + if errors.Is(err, storage.ErrNotFound) { + continue + } + + if err != nil { + return fmt.Errorf("could not remove chunk id %v for block id %v: %w", chunkID, blockID, err) + } + } + + return nil +} diff --git a/module/pruner/pruners/chunk_data_pack_test.go b/module/pruner/pruners/chunk_data_pack_test.go new file mode 100644 index 00000000000..f595ed30bcd --- /dev/null +++ b/module/pruner/pruners/chunk_data_pack_test.go @@ -0,0 +1,59 @@ +package pruners + +import ( + "errors" + "fmt" + "testing" + + "github.com/cockroachdb/pebble/v2" + "github.com/jordanschalm/lockctx" + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/module/metrics" + "github.com/onflow/flow-go/storage" + "github.com/onflow/flow-go/storage/operation/pebbleimpl" + "github.com/onflow/flow-go/storage/store" + "github.com/onflow/flow-go/utils/unittest" +) + +func TestChunkDataPackPruner(t *testing.T) { + + unittest.RunWithPebbleDB(t, func(pebbleDB *pebble.DB) { + lockManager := storage.NewTestingLockManager() + m := metrics.NewNoopCollector() + db := pebbleimpl.ToDB(pebbleDB) + results := store.NewExecutionResults(m, db) + transactions := store.NewTransactions(m, db) + collections := store.NewCollections(db, transactions) + byChunkIDCacheSize := uint(10) + chunks := store.NewChunkDataPacks(m, db, collections, byChunkIDCacheSize) + + // store the chunks + cdp1, result1 := unittest.ChunkDataPacksFixtureAndResult() + require.NoError(t, results.Store(result1)) + require.NoError(t, unittest.WithLock(t, lockManager, storage.LockInsertChunkDataPack, func(lctx lockctx.Context) error { + return chunks.StoreByChunkID(lctx, cdp1) + })) + + pruner := NewChunkDataPackPruner(chunks, results) + + // prune the chunks + require.NoError(t, db.WithReaderBatchWriter(func(w storage.ReaderBatchWriter) error { + return pruner.PruneByBlockID(result1.BlockID, w) + })) + + // verify they are pruned + _, err := chunks.ByChunkID(cdp1[0].ChunkID) + require.True(t, errors.Is(err, storage.ErrNotFound), fmt.Errorf("expected ErrNotFound but got %v", err)) + + // prune again should not return error + require.NoError(t, db.WithReaderBatchWriter(func(w storage.ReaderBatchWriter) error { + return pruner.PruneByBlockID(result1.BlockID, w) + })) + + // prune non-exist block should not return error + require.NoError(t, db.WithReaderBatchWriter(func(w storage.ReaderBatchWriter) error { + return pruner.PruneByBlockID(unittest.IdentifierFixture(), w) + })) + }) +} diff --git a/module/queue/concurrent_priority_queue.go b/module/queue/concurrent_priority_queue.go new file mode 100644 index 00000000000..9f2c53be39b --- /dev/null +++ b/module/queue/concurrent_priority_queue.go @@ -0,0 +1,75 @@ +package queue + +import ( + "container/heap" + "sync" + + "github.com/onflow/flow-go/engine" +) + +// ConcurrentPriorityQueue is a thread-safe priority queue that provides a channel-based notification +// mechanism when items are inserted. +// All methods are safe for concurrent access. +type ConcurrentPriorityQueue[T any] struct { + queue PriorityQueue[T] + smallerValuesFirst bool + notifier engine.Notifier + mu sync.RWMutex +} + +// NewConcurrentPriorityQueue creates a new instance of ConcurrentPriorityQueue. +// If smallerValuesFirst is true, inverts the priority so items with lower values take precedence. +func NewConcurrentPriorityQueue[T any](smallerValuesFirst bool) *ConcurrentPriorityQueue[T] { + return &ConcurrentPriorityQueue[T]{ + queue: PriorityQueue[T]{}, + smallerValuesFirst: smallerValuesFirst, + notifier: engine.NewNotifier(), + } +} + +// Len returns the number of items currently in the queue. +func (mq *ConcurrentPriorityQueue[T]) Len() int { + mq.mu.RLock() + defer mq.mu.RUnlock() + + return mq.queue.Len() +} + +// Push adds a new item to the queue with the specified priority. +// A notification is sent on the channel if it's not already full. +func (mq *ConcurrentPriorityQueue[T]) Push(item T, priority uint64) { + mq.mu.Lock() + defer mq.mu.Unlock() + + // if smaller values are higher priority, invert the priority value since the heap will always + // return the largest value first. + if mq.smallerValuesFirst { + priority = ^priority + } + + heap.Push(&mq.queue, NewPriorityQueueItem(item, priority)) + + mq.notifier.Notify() +} + +// Pop removes and immediately returns the highest priority item from the queue. +// If the queue is empty, false is returned. +// If multiple items have the same priority, the oldest one by insertion time is returned. +func (mq *ConcurrentPriorityQueue[T]) Pop() (T, bool) { + mq.mu.Lock() + defer mq.mu.Unlock() + + if mq.queue.Len() == 0 { + var nilT T + return nilT, false + } + + item := heap.Pop(&mq.queue).(*PriorityQueueItem[T]) + return item.Message(), true +} + +// Channel returns a signal channel that receives a signal when an item is inserted. +// This allows consumers to be notified of new items without polling. +func (mq *ConcurrentPriorityQueue[T]) Channel() <-chan struct{} { + return mq.notifier.Channel() +} diff --git a/module/queue/concurrent_priority_queue_test.go b/module/queue/concurrent_priority_queue_test.go new file mode 100644 index 00000000000..734295da80d --- /dev/null +++ b/module/queue/concurrent_priority_queue_test.go @@ -0,0 +1,538 @@ +package queue + +import ( + "context" + "fmt" + "math" + "sync" + "testing" + "time" + + "github.com/stretchr/testify/assert" + + "github.com/onflow/flow-go/utils/unittest" +) + +// TestNewConcurrentPriorityQueue tests the constructor +func TestNewConcurrentPriorityQueue(t *testing.T) { + t.Run("creates queue with larger values first", func(t *testing.T) { + mq := NewConcurrentPriorityQueue[string](false) + + assert.NotNil(t, mq) + assert.NotNil(t, mq.queue) + assert.False(t, mq.smallerValuesFirst) + assert.Equal(t, 0, mq.Len()) + }) + + t.Run("creates queue with smaller values first", func(t *testing.T) { + mq := NewConcurrentPriorityQueue[string](true) + + assert.NotNil(t, mq) + assert.NotNil(t, mq.queue) + assert.True(t, mq.smallerValuesFirst) + assert.Equal(t, 0, mq.Len()) + }) +} + +// TestConcurrentPriorityQueue_Len tests the Len method +func TestConcurrentPriorityQueue_Len(t *testing.T) { + t.Run("empty queue returns 0", func(t *testing.T) { + mq := NewConcurrentPriorityQueue[string](false) + + assert.Equal(t, 0, mq.Len()) + }) + + t.Run("queue with items returns correct length", func(t *testing.T) { + mq := NewConcurrentPriorityQueue[string](false) + + mq.Push("item1", 1) + mq.Push("item2", 2) + mq.Push("item3", 3) + + assert.Equal(t, 3, mq.Len()) + }) + + t.Run("length decreases after pop", func(t *testing.T) { + mq := NewConcurrentPriorityQueue[string](false) + + mq.Push("item1", 1) + mq.Push("item2", 2) + + assert.Equal(t, 2, mq.Len()) + + _, ok := mq.Pop() + assert.True(t, ok) + assert.Equal(t, 1, mq.Len()) + }) +} + +// TestConcurrentPriorityQueue_Push tests the Push method +func TestConcurrentPriorityQueue_Push(t *testing.T) { + t.Run("push adds items with larger values first", func(t *testing.T) { + mq := NewConcurrentPriorityQueue[string](false) + + mq.Push("low", 1) + mq.Push("high", 10) + mq.Push("medium", 5) + + assert.Equal(t, 3, mq.Len()) + + // Pop items and verify they come out in priority order + item1, ok := mq.Pop() + assert.True(t, ok) + assert.Equal(t, "high", item1) + + item2, ok := mq.Pop() + assert.True(t, ok) + assert.Equal(t, "medium", item2) + + item3, ok := mq.Pop() + assert.True(t, ok) + assert.Equal(t, "low", item3) + + _, ok = mq.Pop() + assert.False(t, ok) + }) + + t.Run("push adds items with smaller values first", func(t *testing.T) { + mq := NewConcurrentPriorityQueue[string](true) + + mq.Push("high", 10) + mq.Push("low", 1) + mq.Push("medium", 5) + + assert.Equal(t, 3, mq.Len()) + + // Pop items and verify they come out in priority order (smaller first) + item1, ok := mq.Pop() + assert.True(t, ok) + assert.Equal(t, "low", item1) + + item2, ok := mq.Pop() + assert.True(t, ok) + assert.Equal(t, "medium", item2) + + item3, ok := mq.Pop() + assert.True(t, ok) + assert.Equal(t, "high", item3) + + _, ok = mq.Pop() + assert.False(t, ok) + }) + + t.Run("push with zero priority", func(t *testing.T) { + mq := NewConcurrentPriorityQueue[string](false) + + mq.Push("zero", 0) + mq.Push("high", 100) + + // Zero priority should come last when larger values are first + item1, ok := mq.Pop() + assert.True(t, ok) + assert.Equal(t, "high", item1) + + item2, ok := mq.Pop() + assert.True(t, ok) + assert.Equal(t, "zero", item2) + }) + + t.Run("push with zero priority and smaller values first", func(t *testing.T) { + mq := NewConcurrentPriorityQueue[string](true) + + mq.Push("high", 100) + mq.Push("zero", 0) + + // Zero priority should come first when smaller values are first + item1, ok := mq.Pop() + assert.True(t, ok) + assert.Equal(t, "zero", item1) + + item2, ok := mq.Pop() + assert.True(t, ok) + assert.Equal(t, "high", item2) + }) +} + +// TestConcurrentPriorityQueue_Pop tests the Pop method +func TestConcurrentPriorityQueue_Pop(t *testing.T) { + t.Run("pop on empty queue returns false", func(t *testing.T) { + mq := NewConcurrentPriorityQueue[string](false) + + item, ok := mq.Pop() + assert.False(t, ok) + var zero string + assert.Equal(t, zero, item) + }) + + t.Run("pop returns items in priority order", func(t *testing.T) { + mq := NewConcurrentPriorityQueue[string](false) + + mq.Push("item1", 1) + mq.Push("item3", 3) + mq.Push("item2", 2) + + // Should come out in descending priority order + item1, ok := mq.Pop() + assert.True(t, ok) + assert.Equal(t, "item3", item1) + + item2, ok := mq.Pop() + assert.True(t, ok) + assert.Equal(t, "item2", item2) + + item3, ok := mq.Pop() + assert.True(t, ok) + assert.Equal(t, "item1", item3) + + _, ok = mq.Pop() + assert.False(t, ok) + }) + + t.Run("pop with equal priorities uses timestamp ordering", func(t *testing.T) { + mq := NewConcurrentPriorityQueue[string](false) + + // Add items with same priority but different timestamps + mq.Push("first", 5) + time.Sleep(time.Millisecond) + mq.Push("second", 5) + time.Sleep(time.Millisecond) + mq.Push("third", 5) + + // Should come out in insertion order (oldest first) + item1, ok := mq.Pop() + assert.True(t, ok) + assert.Equal(t, "first", item1) + + item2, ok := mq.Pop() + assert.True(t, ok) + assert.Equal(t, "second", item2) + + item3, ok := mq.Pop() + assert.True(t, ok) + assert.Equal(t, "third", item3) + }) +} + +// TestConcurrentPriorityQueue_Channel tests the Channel method +func TestConcurrentPriorityQueue_Channel(t *testing.T) { + t.Run("channel receives notification on push", func(t *testing.T) { + mq := NewConcurrentPriorityQueue[string](false) + ch := mq.Channel() + + // Push an item + mq.Push("test", 1) + + // Should receive notification + select { + case <-ch: + // Success + case <-time.After(100 * time.Millisecond): + t.Fatal("did not receive notification within timeout") + } + }) + + t.Run("channel does not block on multiple pushes", func(t *testing.T) { + mq := NewConcurrentPriorityQueue[string](false) + ch := mq.Channel() + + // Push multiple items rapidly + for i := 0; i < 10; i++ { + mq.Push("test", uint64(i)) + } + + // Should receive at least one notification + select { + case <-ch: + // Success + case <-time.After(100 * time.Millisecond): + t.Fatal("did not receive notification within timeout") + } + + // Channel should be buffered and not block + assert.Equal(t, 10, mq.Len()) + }) + + t.Run("channel is buffered", func(t *testing.T) { + mq := NewConcurrentPriorityQueue[string](false) + ch := mq.Channel() + + // Push multiple items without reading from channel + for i := 0; i < 5; i++ { + mq.Push("test", uint64(i)) + } + + // Should not block + assert.Equal(t, 5, mq.Len()) + + // Should be able to read from channel + select { + case <-ch: + // Success + case <-time.After(100 * time.Millisecond): + t.Fatal("did not receive notification within timeout") + } + }) +} + +// TestConcurrentPriorityQueue_Concurrency tests thread safety +func TestConcurrentPriorityQueue_Concurrency(t *testing.T) { + t.Run("concurrent push operations", func(t *testing.T) { + mq := NewConcurrentPriorityQueue[int](false) + numGoroutines := 1000 + var wg sync.WaitGroup + + // Start multiple goroutines pushing items + for i := 0; i < numGoroutines; i++ { + wg.Add(1) + go func(id int) { + defer wg.Done() + mq.Push(id, uint64(id)) + }(i) + } + + wg.Wait() + + // Verify all items were added + assert.Equal(t, numGoroutines, mq.Len()) + + // Verify items can be popped correctly + popped := make(map[int]bool) + for i := 0; i < numGoroutines; i++ { + item, ok := mq.Pop() + assert.True(t, ok) + assert.False(t, popped[item], "duplicate item popped: %d", item) + popped[item] = true + } + + assert.Equal(t, numGoroutines, len(popped)) + }) + + t.Run("concurrent push and pop operations", func(t *testing.T) { + mq := NewConcurrentPriorityQueue[int](false) + numGoroutines := 1000 + var wg sync.WaitGroup + var mu sync.Mutex + popped := make(map[int]bool) + + // Start goroutines that push items + for i := 0; i < numGoroutines; i++ { + wg.Add(1) + go func(id int) { + defer wg.Done() + mq.Push(id, uint64(id)) + }(i) + } + + // Wait for all pushes to complete + wg.Wait() + + // Now start goroutines that pop items + var popWg sync.WaitGroup + for i := 0; i < numGoroutines/2; i++ { + popWg.Add(1) + go func() { + defer popWg.Done() + for { + item, ok := mq.Pop() + if !ok { + break + } + mu.Lock() + assert.False(t, popped[item], "duplicate item popped: %d", item) + popped[item] = true + mu.Unlock() + } + }() + } + + popWg.Wait() + + // Verify no duplicates and all items were processed + assert.Equal(t, numGoroutines, len(popped)) + }) + + t.Run("concurrent len operations", func(t *testing.T) { + mq := NewConcurrentPriorityQueue[int](false) + numGoroutines := 1000 + var wg sync.WaitGroup + + // Start goroutines that push items + for i := 0; i < numGoroutines; i++ { + wg.Add(1) + go func(id int) { + defer wg.Done() + mq.Push(id, uint64(id)) + }(i) + } + + // Start goroutines that call Len + for i := 0; i < numGoroutines/2; i++ { + wg.Add(1) + go func() { + defer wg.Done() + for j := 0; j < 100; j++ { + _ = mq.Len() + } + }() + } + + wg.Wait() + + // Verify final length + assert.Equal(t, numGoroutines, mq.Len()) + }) +} + +// TestConcurrentPriorityQueue_EdgeCases tests edge cases +func TestConcurrentPriorityQueue_EdgeCases(t *testing.T) { + t.Run("max uint64 priority with larger values first", func(t *testing.T) { + mq := NewConcurrentPriorityQueue[string](false) + + mq.Push("normal", 1000) + mq.Push("max", math.MaxUint64) + + item, ok := mq.Pop() + assert.True(t, ok) + assert.Equal(t, "max", item) + }) + + t.Run("max uint64 priority with smaller values first", func(t *testing.T) { + mq := NewConcurrentPriorityQueue[string](true) + + mq.Push("normal", 1000) + mq.Push("max", math.MaxUint64) + + item, ok := mq.Pop() + assert.True(t, ok) + assert.Equal(t, "normal", item) // max priority becomes 0 after inversion + }) + + t.Run("empty queue after popping all items", func(t *testing.T) { + mq := NewConcurrentPriorityQueue[string](false) + + mq.Push("item1", 1) + mq.Push("item2", 2) + + assert.Equal(t, 2, mq.Len()) + + _, ok := mq.Pop() + assert.True(t, ok) + assert.Equal(t, 1, mq.Len()) + + _, ok = mq.Pop() + assert.True(t, ok) + assert.Equal(t, 0, mq.Len()) + + _, ok = mq.Pop() + assert.False(t, ok) + assert.Equal(t, 0, mq.Len()) + }) + + t.Run("channel notification after queue becomes empty", func(t *testing.T) { + mq := NewConcurrentPriorityQueue[string](false) + ch := mq.Channel() + + mq.Push("item", 1) + + // Read the notification + <-ch + + // Pop the item + _, ok := mq.Pop() + assert.True(t, ok) + + // Push another item + mq.Push("item2", 2) + + // Should receive another notification + select { + case <-ch: + // Success + case <-time.After(100 * time.Millisecond): + t.Fatal("did not receive notification within timeout") + } + }) +} + +// TestConcurrentPriorityQueue_Integration tests integration scenarios +func TestConcurrentPriorityQueue_Integration(t *testing.T) { + t.Run("mixed operations with different priorities", func(t *testing.T) { + mq := NewConcurrentPriorityQueue[string](false) + + // Add items with various priorities + mq.Push("urgent", 100) + mq.Push("normal", 50) + mq.Push("low", 10) + mq.Push("critical", 200) + mq.Push("medium", 75) + + // Pop all items and verify order + expected := []string{"critical", "urgent", "medium", "normal", "low"} + for _, exp := range expected { + item, ok := mq.Pop() + assert.True(t, ok) + assert.Equal(t, exp, item) + } + + _, ok := mq.Pop() + assert.False(t, ok) + }) + + t.Run("priority inversion with mixed operations", func(t *testing.T) { + mq := NewConcurrentPriorityQueue[string](true) + + // Add items with various priorities + mq.Push("urgent", 100) + mq.Push("normal", 50) + mq.Push("low", 10) + mq.Push("critical", 200) + mq.Push("medium", 75) + + // Pop all items and verify order (smaller values first) + expected := []string{"low", "normal", "medium", "urgent", "critical"} + for _, exp := range expected { + item, ok := mq.Pop() + assert.True(t, ok) + assert.Equal(t, exp, item) + } + + _, ok := mq.Pop() + assert.False(t, ok) + }) + + t.Run("queue processing using channel", func(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + mq := NewConcurrentPriorityQueue[string](true) + + itemCount := 100 + go func() { + for i := range itemCount { + mq.Push(fmt.Sprintf("item-%d", i), uint64(i)) + } + }() + + unittest.RequireReturnsBefore(t, func() { + for i := 0; i < itemCount; { + select { + case <-ctx.Done(): + return + case <-mq.Channel(): + } + + for { + message, ok := mq.Pop() + if !ok { + break + } + assert.Equal(t, fmt.Sprintf("item-%d", i), message) + i++ + } + } + }, time.Second, "did not receive all messages within timeout") + + // make sure the queue is empty + assert.Zero(t, mq.Len()) + }) +} diff --git a/module/queue/priority_queue.go b/module/queue/priority_queue.go new file mode 100644 index 00000000000..75a330e7b30 --- /dev/null +++ b/module/queue/priority_queue.go @@ -0,0 +1,107 @@ +package queue + +import ( + "container/heap" + "fmt" + "time" +) + +// PriorityQueueItem is a generic item in the priority queue. +// Each item contains a message, priority value, and metadata for queue management. +// PriorityQueueItems are immutable once created and safe for concurrent access. +type PriorityQueueItem[T any] struct { + // message is the actual item in the queue. + message T + + // priority is the priority of the item in the queue. + // Larger priority values are dequeued first. + priority uint64 + + // index is the index of the item in the heap. + // The index is required by update() and is maintained by the heap.Interface methods. + index int + + // timestamp to maintain insertions order for items with the same priority and for telemetry + timestamp time.Time +} + +// NewPriorityQueueItem creates a new PriorityQueueItem with the given message and priority. +func NewPriorityQueueItem[T any](message T, priority uint64) *PriorityQueueItem[T] { + return &PriorityQueueItem[T]{ + message: message, + priority: priority, + index: -1, // index is set when the item is pushed to the heap + timestamp: time.Now(), + } +} + +// Message returns the message stored in the item. +func (item *PriorityQueueItem[T]) Message() T { + return item.message +} + +var _ heap.Interface = (*PriorityQueue[any])(nil) + +// PriorityQueue implements heap.Interface and holds PriorityQueueItems. +// It provides a priority queue where items with larger priority values +// are dequeued first. For items with equal priority, the oldest item (by insertion time) +// is dequeued first. +// CAUTION: not concurrency safe! Caller must implement their own synchronization. +type PriorityQueue[T any] []*PriorityQueueItem[T] + +// Len returns the number of items in the priority queue. +// CAUTION: not concurrency safe! +func (pq PriorityQueue[T]) Len() int { return len(pq) } + +// Less determines the ordering of items in the priority queue. +// PriorityQueueItems with larger priority values come first. For items with equal priority, +// the oldest item (by insertion timestamp) comes first. +// Returns true if and only if item at index i should come before item at index j. +// CAUTION: not concurrency safe! +func (pq PriorityQueue[T]) Less(i, j int) bool { + // We want Pop to give us the highest, not lowest, priority so we use greater than here. + if pq[i].priority > pq[j].priority { + return true + } + if pq[i].priority < pq[j].priority { + return false + } + // if both items have the same priority, then pop the oldest + return pq[i].timestamp.Before(pq[j].timestamp) +} + +// Swap exchanges the items at the given indices and updates their heap indices. +// CAUTION: not concurrency safe! +func (pq PriorityQueue[T]) Swap(i, j int) { + pq[i], pq[j] = pq[j], pq[i] + pq[i].index = i + pq[j].index = j +} + +// Push adds an item to the priority queue. +// The item's index is automatically set to its position in the heap. +// The item must be of type `*PriorityQueueItem[T]` otherwise the method will panic. +// CAUTION: not concurrency safe! +func (pq *PriorityQueue[T]) Push(x any) { + n := len(*pq) + item, ok := x.(*PriorityQueueItem[T]) + if !ok { + panic(fmt.Sprintf("unexpected type added to priority queue: %T", x)) + } + item.index = n + *pq = append(*pq, item) +} + +// Pop removes and returns the highest priority item from the queue. +// The returned item will have the highest priority value, or if multiple items +// have the same priority, the oldest one by insertion time. +// CAUTION: not concurrency safe! +func (pq *PriorityQueue[T]) Pop() any { + old := *pq + n := len(old) + item := old[n-1] + old[n-1] = nil // avoid memory leak + item.index = -1 // for safety + *pq = old[0 : n-1] + return item +} diff --git a/module/queue/priority_queue_test.go b/module/queue/priority_queue_test.go new file mode 100644 index 00000000000..c7c8b65d025 --- /dev/null +++ b/module/queue/priority_queue_test.go @@ -0,0 +1,337 @@ +package queue + +import ( + "container/heap" + "math" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +// TestPriorityQueueItem tests the PriorityQueueItem struct and its methods +func TestPriorityQueueItem(t *testing.T) { + t.Run("NewPriorityQueueItem creates item with correct values", func(t *testing.T) { + message := "test message" + priority := uint64(42) + + item := NewPriorityQueueItem(message, priority) + + assert.Equal(t, message, item.message) + assert.Equal(t, priority, item.priority) + assert.Equal(t, -1, item.index) + assert.WithinDuration(t, time.Now(), item.timestamp, time.Minute) + }) + + t.Run("Message returns the stored message", func(t *testing.T) { + message := "test message" + item := NewPriorityQueueItem(message, 1) + + result := item.Message() + + assert.Equal(t, message, result) + }) +} + +// TestPriorityQueue_Len tests the Len method +func TestPriorityQueue_Len(t *testing.T) { + t.Run("empty queue has length 0", func(t *testing.T) { + pq := PriorityQueue[string]{} + + assert.Equal(t, 0, pq.Len()) + }) + + t.Run("queue with items has correct length", func(t *testing.T) { + pq := PriorityQueue[string]{ + NewPriorityQueueItem("item1", 1), + NewPriorityQueueItem("item2", 2), + NewPriorityQueueItem("item3", 3), + } + + assert.Equal(t, 3, pq.Len()) + }) +} + +// TestPriorityQueue_Less tests the Less method for priority ordering +func TestPriorityQueue_Less(t *testing.T) { + t.Run("higher priority comes first", func(t *testing.T) { + pq := PriorityQueue[string]{ + NewPriorityQueueItem("low", 1), + NewPriorityQueueItem("high", 10), + } + + // high priority should be "less" (come first in heap) + assert.True(t, pq.Less(1, 0)) + assert.False(t, pq.Less(0, 1)) + }) + + t.Run("equal priority uses timestamp ordering", func(t *testing.T) { + now := time.Now() + item1 := &PriorityQueueItem[string]{ + message: "first", + priority: 5, + timestamp: now, + } + item2 := &PriorityQueueItem[string]{ + message: "second", + priority: 5, + timestamp: now.Add(time.Millisecond), + } + + pq := PriorityQueue[string]{item1, item2} + + // older timestamp should be "less" (come first in heap) + assert.True(t, pq.Less(0, 1)) + assert.False(t, pq.Less(1, 0)) + }) + + t.Run("same priority and timestamp", func(t *testing.T) { + now := time.Now() + item1 := &PriorityQueueItem[string]{ + message: "item1", + priority: 5, + timestamp: now, + } + item2 := &PriorityQueueItem[string]{ + message: "item2", + priority: 5, + timestamp: now, + } + + pq := PriorityQueue[string]{item1, item2} + + // Should be consistent (not less) + assert.False(t, pq.Less(0, 1)) + assert.False(t, pq.Less(1, 0)) + }) +} + +// TestPriorityQueue_Swap tests the Swap method +func TestPriorityQueue_Swap(t *testing.T) { + t.Run("swap exchanges items and updates indices", func(t *testing.T) { + item1 := NewPriorityQueueItem("item1", 1) + item2 := NewPriorityQueueItem("item2", 2) + + pq := PriorityQueue[string]{item1, item2} + + // Set initial indices + pq[0].index = 0 + pq[1].index = 1 + + // Swap items + pq.Swap(0, 1) + + // Check that items are swapped + assert.Equal(t, "item2", pq[0].message) + assert.Equal(t, "item1", pq[1].message) + + // Check that indices are updated + assert.Equal(t, 0, pq[0].index) + assert.Equal(t, 1, pq[1].index) + }) +} + +// TestPriorityQueue_Push tests the Push method +func TestPriorityQueue_Push(t *testing.T) { + t.Run("push adds item to queue", func(t *testing.T) { + pq := &PriorityQueue[string]{} + item := NewPriorityQueueItem("test", 5) + + pq.Push(item) + + assert.Equal(t, 1, pq.Len()) + assert.Equal(t, item, (*pq)[0]) + assert.Equal(t, 0, item.index) + }) + + t.Run("push sets correct index", func(t *testing.T) { + pq := &PriorityQueue[string]{} + item1 := NewPriorityQueueItem("item1", 1) + item2 := NewPriorityQueueItem("item2", 2) + + pq.Push(item1) + pq.Push(item2) + + assert.Equal(t, 0, item1.index) + assert.Equal(t, 1, item2.index) + }) + + t.Run("push panics on non-PriorityQueueItem", func(t *testing.T) { + pq := &PriorityQueue[string]{} + initialLen := pq.Len() + + defer func() { + r := recover() + require.Equal(t, "unexpected type added to priority queue: string", r) + assert.Equal(t, initialLen, pq.Len()) + }() + + pq.Push("not an item") + }) +} + +// TestPriorityQueue_Pop tests the Pop method +func TestPriorityQueue_Pop(t *testing.T) { + t.Run("pop removes and returns last item", func(t *testing.T) { + item1 := NewPriorityQueueItem("item1", 1) + item2 := NewPriorityQueueItem("item2", 2) + + pq := &PriorityQueue[string]{item1, item2} + initialLen := pq.Len() + + result := pq.Pop() + + assert.Equal(t, item2, result) + assert.Equal(t, initialLen-1, pq.Len()) + assert.Equal(t, -1, item2.index) + }) +} + +// TestPriorityQueue_HeapOperations tests the priority queue as a heap +func TestPriorityQueue_HeapOperations(t *testing.T) { + t.Run("heap operations maintain priority order", func(t *testing.T) { + pq := &PriorityQueue[string]{} + heap.Init(pq) + + // Add items with different priorities + heap.Push(pq, NewPriorityQueueItem("low", 1)) + heap.Push(pq, NewPriorityQueueItem("high", 10)) + heap.Push(pq, NewPriorityQueueItem("medium", 5)) + + // Pop items and verify they come out in priority order + item1 := heap.Pop(pq).(*PriorityQueueItem[string]) + assert.Equal(t, "high", item1.message) + assert.Equal(t, uint64(10), item1.priority) + + item2 := heap.Pop(pq).(*PriorityQueueItem[string]) + assert.Equal(t, "medium", item2.message) + assert.Equal(t, uint64(5), item2.priority) + + item3 := heap.Pop(pq).(*PriorityQueueItem[string]) + assert.Equal(t, "low", item3.message) + assert.Equal(t, uint64(1), item3.priority) + + assert.Equal(t, 0, pq.Len()) + }) + + t.Run("heap operations with equal priorities use timestamp", func(t *testing.T) { + pq := &PriorityQueue[string]{} + heap.Init(pq) + + // Add items with same priority but different timestamps + item1 := NewPriorityQueueItem("first", 5) + time.Sleep(time.Millisecond) + item2 := NewPriorityQueueItem("second", 5) + time.Sleep(time.Millisecond) + item3 := NewPriorityQueueItem("third", 5) + + heap.Push(pq, item2) // Add in different order + heap.Push(pq, item3) + heap.Push(pq, item1) + + // Pop items and verify they come out in timestamp order (oldest first) + result1 := heap.Pop(pq).(*PriorityQueueItem[string]) + assert.Equal(t, "first", result1.message) + + result2 := heap.Pop(pq).(*PriorityQueueItem[string]) + assert.Equal(t, "second", result2.message) + + result3 := heap.Pop(pq).(*PriorityQueueItem[string]) + assert.Equal(t, "third", result3.message) + }) + + t.Run("heap operations with mixed priorities and timestamps", func(t *testing.T) { + pq := &PriorityQueue[string]{} + heap.Init(pq) + + // Add items with different priorities and timestamps + item1 := NewPriorityQueueItem("low1", 1) + time.Sleep(time.Millisecond) + item2 := NewPriorityQueueItem("low2", 1) + time.Sleep(time.Millisecond) + item3 := NewPriorityQueueItem("high1", 10) + time.Sleep(time.Millisecond) + item4 := NewPriorityQueueItem("high2", 10) + + heap.Push(pq, item4) + heap.Push(pq, item1) + heap.Push(pq, item3) + heap.Push(pq, item2) + + // Pop items and verify order + results := make([]string, 4) + for i := 0; i < 4; i++ { + item := heap.Pop(pq).(*PriorityQueueItem[string]) + results[i] = item.message + } + + // High priority items should come first, then low priority items + // Within same priority, older timestamps should come first + expected := []string{"high1", "high2", "low1", "low2"} + assert.Equal(t, expected, results) + }) +} + +// TestPriorityQueue_EdgeCases tests edge cases and error conditions +func TestPriorityQueue_EdgeCases(t *testing.T) { + t.Run("empty queue operations", func(t *testing.T) { + pq := &PriorityQueue[string]{} + heap.Init(pq) + + assert.Equal(t, 0, pq.Len()) + + // Pop on empty queue should panic (heap behavior) + assert.Panics(t, func() { + heap.Pop(pq) + }) + }) + + t.Run("single item queue", func(t *testing.T) { + pq := &PriorityQueue[string]{} + heap.Init(pq) + + item := NewPriorityQueueItem("single", 5) + heap.Push(pq, item) + + assert.Equal(t, 1, pq.Len()) + + result := heap.Pop(pq).(*PriorityQueueItem[string]) + assert.Equal(t, item, result) + assert.Equal(t, 0, pq.Len()) + }) + + t.Run("zero priority items", func(t *testing.T) { + pq := &PriorityQueue[string]{} + heap.Init(pq) + + item1 := NewPriorityQueueItem("zero1", 0) + time.Sleep(time.Millisecond) + item2 := NewPriorityQueueItem("zero2", 0) + + heap.Push(pq, item2) + heap.Push(pq, item1) + + // Should come out in timestamp order + result1 := heap.Pop(pq).(*PriorityQueueItem[string]) + assert.Equal(t, "zero1", result1.message) + + result2 := heap.Pop(pq).(*PriorityQueueItem[string]) + assert.Equal(t, "zero2", result2.message) + }) + + t.Run("very high priority values", func(t *testing.T) { + pq := &PriorityQueue[string]{} + heap.Init(pq) + + item1 := NewPriorityQueueItem("normal", 1000) + item2 := NewPriorityQueueItem("very high", math.MaxUint64) + + heap.Push(pq, item1) + heap.Push(pq, item2) + + result := heap.Pop(pq).(*PriorityQueueItem[string]) + assert.Equal(t, "very high", result.message) + assert.Equal(t, uint64(math.MaxUint64), result.priority) + }) +} diff --git a/module/receipt_validator.go b/module/receipt_validator.go index 6a9d98840f1..f50fa00b98f 100644 --- a/module/receipt_validator.go +++ b/module/receipt_validator.go @@ -1,41 +1,51 @@ package module -import "github.com/onflow/flow-go/model/flow" +import ( + "github.com/onflow/flow-go/model/flow" +) // ReceiptValidator is an interface which is used for validating // receipts with respect to current protocol state. type ReceiptValidator interface { - // Validate verifies that the ExecutionReceipt satisfies - // the following conditions: - // * is from Execution node with positive weight - // * has valid signature - // * chunks are in correct format - // * execution result has a valid parent and satisfies the subgraph check - // Returns nil if all checks passed successfully. + // Validate verifies that the ExecutionReceipt satisfies the following conditions: + // - is from Execution node with positive weight + // - has valid signature + // - chunks are in correct format + // - execution result has a valid parent and satisfies the subgraph check + // + // In order to validate a receipt, both the executed block and the parent result + // referenced in `receipt.ExecutionResult` must be known. We return nil if all checks + // pass successfully. + // // Expected errors during normal operations: - // * engine.InvalidInputError - // if receipt violates protocol condition - // * engine.UnverifiableInputError - // if receipt's parent result is unknown - Validate(receipts *flow.ExecutionReceipt) error + // - engine.InvalidInputError if receipt violates protocol condition + // - module.UnknownResultError if the receipt's parent result is unknown + // - module.UnknownBlockError if the executed block is unknown + // + // All other error are potential symptoms critical internal failures, such as bugs or state corruption. + Validate(receipt *flow.ExecutionReceipt) error // ValidatePayload verifies the ExecutionReceipts and ExecutionResults // in the payload for compliance with the protocol: // Receipts: - // * are from Execution node with positive weight - // * have valid signature - // * chunks are in correct format - // * no duplicates in fork + // - are from Execution node with positive weight + // - have valid signature + // - chunks are in correct format + // - no duplicates in fork + // // Results: - // * have valid parents and satisfy the subgraph check - // * extend the execution tree, where the tree root is the latest - // finalized block and only results from this fork are included - // * no duplicates in fork + // - have valid parents and satisfy the subgraph check + // - extend the execution tree, where the tree root is the latest + // finalized block and only results from this fork are included + // - no duplicates in fork + // // Expected errors during normal operations: - // * engine.InvalidInputError - // if some receipts in the candidate block violate protocol condition - // * engine.UnverifiableInputError - // if for some of the receipts, their respective parent result is unknown + // - engine.InvalidInputError if some receipts in the candidate block violate protocol condition + // - module.UnknownBlockError if the candidate block's _parent_ is unknown + // + // All other error are potential symptoms critical internal failures, such as bugs or state corruption. + // Note that module.UnknownResultError is not possible; we have either an invalid candidate block + // (yields engine.InvalidInputError) or a missing parent block (yields module.UnknownBlockError). ValidatePayload(candidate *flow.Block) error } diff --git a/module/requester.go b/module/requester.go index dc5e1baa059..93b3f8a66f2 100644 --- a/module/requester.go +++ b/module/requester.go @@ -12,17 +12,30 @@ type Requester interface { // if no additional restrictions are required. Data integrity of response // will be checked upon arrival. This function should be used for requesting // entites by their IDs. - EntityByID(entityID flow.Identifier, selector flow.IdentityFilter) + EntityByID(entityID flow.Identifier, selector flow.IdentityFilter[flow.Identity]) // Query will request data through the request engine backing the interface. // The additional selector will be applied to the subset // of valid providers for the data and allows finer-grained control // over which providers to request data from. Doesn't perform integrity check // can be used to get entities without knowing their ID. - Query(key flow.Identifier, selector flow.IdentityFilter) + Query(key flow.Identifier, selector flow.IdentityFilter[flow.Identity]) // Force will force the dispatcher to send all possible batches immediately. // It can be used in cases where responsiveness is of utmost importance, at // the cost of additional network messages. Force() } + +type NoopRequester struct{} + +func (n NoopRequester) EntityByID(entityID flow.Identifier, selector flow.IdentityFilter[flow.Identity]) { +} + +func (n NoopRequester) Query(key flow.Identifier, selector flow.IdentityFilter[flow.Identity]) {} + +func (n NoopRequester) Force() {} + +func (n NoopRequester) WithHandle(func(flow.Identifier, flow.Entity)) Requester { + return n +} diff --git a/module/signature/aggregation.go b/module/signature/aggregation.go index 99129c656dc..d56651d2221 100644 --- a/module/signature/aggregation.go +++ b/module/signature/aggregation.go @@ -1,14 +1,11 @@ -//go:build relic -// +build relic - package signature import ( "fmt" "sync" - "github.com/onflow/flow-go/crypto" - "github.com/onflow/flow-go/crypto/hash" + "github.com/onflow/crypto" + "github.com/onflow/crypto/hash" ) // SignatureAggregatorSameMessage aggregates BLS signatures of the same message from different signers. diff --git a/module/signature/aggregation_no_relic.go b/module/signature/aggregation_no_relic.go deleted file mode 100644 index 6b51c6f35a3..00000000000 --- a/module/signature/aggregation_no_relic.go +++ /dev/null @@ -1,34 +0,0 @@ -//go:build !relic -// +build !relic - -package signature - -import ( - "github.com/onflow/flow-go/crypto" -) - -const panic_relic = "function only supported with the relic build tag" - -// These functions are the non-relic versions of some public functions from the package. -// The functions are here to allow the build of flow-emulator, since the emulator is built -// without the "relic" build tag, and does not run the functions below. -type SignatureAggregatorSameMessage struct{} - -func NewSignatureAggregatorSameMessage( - message []byte, - dsTag string, - publicKeys []crypto.PublicKey, -) (*SignatureAggregatorSameMessage, error) { - panic(panic_relic) -} - -func (s *SignatureAggregatorSameMessage) Verify(signer int, sig crypto.Signature) (bool, error) { - panic(panic_relic) -} -func (s *SignatureAggregatorSameMessage) TrustedAdd(signer int, sig crypto.Signature) error { - panic(panic_relic) -} - -func (s *SignatureAggregatorSameMessage) Aggregate() ([]int, crypto.Signature, error) { - panic(panic_relic) -} diff --git a/module/signature/aggregation_test.go b/module/signature/aggregation_test.go index aacd0a89f06..565534e7b78 100644 --- a/module/signature/aggregation_test.go +++ b/module/signature/aggregation_test.go @@ -1,22 +1,23 @@ -//go:build relic -// +build relic - package signature import ( - "crypto/rand" - "errors" mrand "math/rand" "sort" "testing" "time" + "github.com/onflow/crypto" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - - "github.com/onflow/flow-go/crypto" ) +func getPRG(t *testing.T) *mrand.Rand { + random := time.Now().UnixNano() + t.Logf("rng seed is %d", random) + rng := mrand.New(mrand.NewSource(random)) + return rng +} + // Utility function that flips a point sign bit to negate the point // this is shortcut which works only for zcash BLS12-381 compressed serialization // that is currently supported by the flow crypto module @@ -25,7 +26,7 @@ func negatePoint(pointbytes []byte) { pointbytes[0] ^= 0x20 } -func createAggregationData(t *testing.T, signersNumber int) ( +func createAggregationData(t *testing.T, rand *mrand.Rand, signersNumber int) ( []byte, string, []crypto.Signature, []crypto.PublicKey, ) { // create message and tag @@ -54,7 +55,7 @@ func createAggregationData(t *testing.T, signersNumber int) ( } func TestAggregatorSameMessage(t *testing.T) { - + rand := getPRG(t) signersNum := 20 // constructor edge cases @@ -79,7 +80,7 @@ func TestAggregatorSameMessage(t *testing.T) { // Happy paths // all signatures are valid t.Run("happy path", func(t *testing.T) { - msg, tag, sigs, pks := createAggregationData(t, signersNum) + msg, tag, sigs, pks := createAggregationData(t, rand, signersNum) aggregator, err := NewSignatureAggregatorSameMessage(msg, tag, pks) require.NoError(t, err) @@ -150,7 +151,7 @@ func TestAggregatorSameMessage(t *testing.T) { // Unhappy paths t.Run("invalid inputs", func(t *testing.T) { - msg, tag, sigs, pks := createAggregationData(t, signersNum) + msg, tag, sigs, pks := createAggregationData(t, rand, signersNum) aggregator, err := NewSignatureAggregatorSameMessage(msg, tag, pks) require.NoError(t, err) // invalid indices for different methods @@ -184,7 +185,7 @@ func TestAggregatorSameMessage(t *testing.T) { }) t.Run("duplicate signers", func(t *testing.T) { - msg, tag, sigs, pks := createAggregationData(t, signersNum) + msg, tag, sigs, pks := createAggregationData(t, rand, signersNum) aggregator, err := NewSignatureAggregatorSameMessage(msg, tag, pks) require.NoError(t, err) @@ -223,7 +224,7 @@ func TestAggregatorSameMessage(t *testing.T) { // 1: No signature has been added. t.Run("aggregate with no signatures", func(t *testing.T) { - msg, tag, _, pks := createAggregationData(t, 1) + msg, tag, _, pks := createAggregationData(t, rand, 1) aggregator, err := NewSignatureAggregatorSameMessage(msg, tag, pks) require.NoError(t, err) // Aggregation should error with sentinel InsufficientSignaturesError @@ -239,7 +240,7 @@ func TestAggregatorSameMessage(t *testing.T) { // 2.a. aggregated public key is not identity // 2.b. aggregated public key is identity t.Run("invalid signature serialization", func(t *testing.T) { - msg, tag, sigs, pks := createAggregationData(t, 2) + msg, tag, sigs, pks := createAggregationData(t, rand, 2) invalidStructureSig := (crypto.Signature)([]byte{0, 0}) t.Run("with non-identity aggregated public key", func(t *testing.T) { @@ -305,7 +306,7 @@ func TestAggregatorSameMessage(t *testing.T) { // 3.a. aggregated public key is not identity // 3.b. aggregated public key is identity t.Run("correct serialization and invalid signature", func(t *testing.T) { - msg, tag, sigs, pks := createAggregationData(t, 2) + msg, tag, sigs, pks := createAggregationData(t, rand, 2) t.Run("with non-identity aggregated public key", func(t *testing.T) { aggregator, err := NewSignatureAggregatorSameMessage(msg, tag, pks) @@ -365,8 +366,7 @@ func TestAggregatorSameMessage(t *testing.T) { // Aggregation should error with sentinel ErrIdentityPublicKey // aggregated public key is identity signers, agg, err := aggregator.Aggregate() - assert.Error(t, err) - assert.True(t, errors.Is(err, ErrIdentityPublicKey)) + assert.ErrorIs(t, err, ErrIdentityPublicKey) assert.Nil(t, agg) assert.Nil(t, signers) }) @@ -374,7 +374,7 @@ func TestAggregatorSameMessage(t *testing.T) { // 4. All signatures are valid but aggregated key is identity t.Run("all valid signatures and identity aggregated key", func(t *testing.T) { - msg, tag, sigs, pks := createAggregationData(t, 2) + msg, tag, sigs, pks := createAggregationData(t, rand, 2) // public key at index 1 is opposite of public key at index 0 (pks[1] = -pks[0]) // so that aggregation of pks[0] and pks[1] is identity @@ -405,17 +405,14 @@ func TestAggregatorSameMessage(t *testing.T) { // Aggregation should error with sentinel ErrIdentityPublicKey signers, agg, err := aggregator.Aggregate() - assert.Error(t, err) - assert.True(t, errors.Is(err, ErrIdentityPublicKey)) + assert.ErrorIs(t, err, ErrIdentityPublicKey) assert.Nil(t, agg) assert.Nil(t, signers) }) } func TestKeyAggregator(t *testing.T) { - r := time.Now().UnixNano() - mrand.Seed(r) - t.Logf("math rand seed is %d", r) + rand := getPRG(t) signersNum := 20 // create keys @@ -497,8 +494,8 @@ func TestKeyAggregator(t *testing.T) { rounds := 30 for i := 0; i < rounds; i++ { go func() { // test module concurrency - low := mrand.Intn(signersNum - 1) - high := low + 1 + mrand.Intn(signersNum-1-low) + low := rand.Intn(signersNum - 1) + high := low + 1 + rand.Intn(signersNum-1-low) var key, expectedKey crypto.PublicKey var err error key, err = aggregator.KeyAggregate(indices[low:high]) diff --git a/module/signature/checksum_test.go b/module/signature/checksum_test.go index 35a11408bca..635d86a6a59 100644 --- a/module/signature/checksum_test.go +++ b/module/signature/checksum_test.go @@ -1,7 +1,6 @@ package signature_test import ( - "errors" "testing" "github.com/stretchr/testify/require" @@ -50,11 +49,11 @@ func TestCheckSum(t *testing.T) { // is able to extract the same data as the encoder. func TestPrefixCheckSum(t *testing.T) { rapid.Check(t, func(t *rapid.T) { - committeeSize := rapid.IntRange(0, 300).Draw(t, "committeeSize").(int) + committeeSize := rapid.IntRange(0, 300).Draw(t, "committeeSize") committee := unittest.IdentifierListFixture(committeeSize) - data := rapid.IntRange(0, 200).Map(func(count int) []byte { + data := rapid.Map(rapid.IntRange(0, 200), func(count int) []byte { return unittest.RandomBytes(count) - }).Draw(t, "data").([]byte) + }).Draw(t, "data") extracted, err := msig.CompareAndExtract(committee, msig.PrefixCheckSum(committee, data)) require.NoError(t, err) require.Equal(t, data, extracted) @@ -68,13 +67,13 @@ func Test_InvalidCheckSum(t *testing.T) { t.Run("checksum too short", func(t *testing.T) { for i := 0; i < 4; i++ { _, _, err := msig.SplitCheckSum(unittest.RandomBytes(i)) - require.True(t, errors.Is(err, msig.ErrInvalidChecksum)) + require.ErrorIs(t, err, msig.ErrInvalidChecksum) } }) t.Run("mismatching checksum", func(t *testing.T) { committee := unittest.IdentifierListFixture(20) _, err := msig.CompareAndExtract(committee, unittest.RandomBytes(112)) - require.True(t, errors.Is(err, msig.ErrInvalidChecksum)) + require.ErrorIs(t, err, msig.ErrInvalidChecksum) }) } diff --git a/module/signature/signer_indices.go b/module/signature/signer_indices.go index 68e3c78f1d5..30bf3faadb8 100644 --- a/module/signature/signer_indices.go +++ b/module/signature/signer_indices.go @@ -127,9 +127,9 @@ func EncodeSignerToIndicesAndSigType( // Expected Error returns during normal operations: // - signature.IsInvalidSigTypesError if the given `sigType` does not encode a valid sequence of signature types func DecodeSigTypeToStakingAndBeaconSigners( - signers flow.IdentityList, + signers flow.IdentitySkeletonList, sigType []byte, -) (flow.IdentityList, flow.IdentityList, error) { +) (flow.IdentitySkeletonList, flow.IdentitySkeletonList, error) { numberSigners := len(signers) if err := validPadding(sigType, numberSigners); err != nil { if errors.Is(err, ErrIncompatibleBitVectorLength) || errors.Is(err, ErrIllegallyPaddedBitVector) { @@ -138,9 +138,9 @@ func DecodeSigTypeToStakingAndBeaconSigners( return nil, nil, fmt.Errorf("unexpected exception while checking padding of sigTypes: %w", err) } - // decode bits to Identities - stakingSigners := make(flow.IdentityList, 0, numberSigners) - beaconSigners := make(flow.IdentityList, 0, numberSigners) + // decode bits to IdentitySkeletonList + stakingSigners := make(flow.IdentitySkeletonList, 0, numberSigners) + beaconSigners := make(flow.IdentitySkeletonList, 0, numberSigners) for i, signer := range signers { if bitutils.ReadBit(sigType, i) == 0 { stakingSigners = append(stakingSigners, signer) @@ -156,6 +156,7 @@ func DecodeSigTypeToStakingAndBeaconSigners( // - The input `canonicalIdentifiers` must exhaustively list the set of authorized signers in their canonical order. // - The input `signerIDs` represents a set, i.e. it should not contain any duplicates. // - `signerIDs` must be a subset of `canonicalIdentifiers` +// - `signerIDs` can be in arbitrary order (canonical order _not required_) // // RETURN VALUE: // - `signerIndices` is a bit vector. Let signerIndices[i] denote the ith bit of `signerIndices`. @@ -278,18 +279,20 @@ func decodeSignerIndices( // Prerequisite: // - The input `canonicalIdentifiers` must exhaustively list the set of authorized signers in their canonical order. // +// The returned list of decoded identities is in canonical order. +// // Expected Error returns during normal operations: // * signature.InvalidSignerIndicesError if the given index vector `prefixed` does not encode a valid set of signers func DecodeSignerIndicesToIdentities( - canonicalIdentities flow.IdentityList, + canonicalIdentities flow.IdentitySkeletonList, prefixed []byte, -) (flow.IdentityList, error) { +) (flow.IdentitySkeletonList, error) { indices, err := decodeSignerIndices(canonicalIdentities.NodeIDs(), prefixed) if err != nil { return nil, err } - signers := make(flow.IdentityList, 0, len(indices)) + signers := make(flow.IdentitySkeletonList, 0, len(indices)) for _, index := range indices { signers = append(signers, canonicalIdentities[index]) } diff --git a/module/signature/signer_indices_test.go b/module/signature/signer_indices_test.go index c34daea4f37..2a10311e2a9 100644 --- a/module/signature/signer_indices_test.go +++ b/module/signature/signer_indices_test.go @@ -12,7 +12,6 @@ import ( "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/model/flow/filter" "github.com/onflow/flow-go/model/flow/filter/id" - "github.com/onflow/flow-go/model/flow/order" "github.com/onflow/flow-go/module/signature" "github.com/onflow/flow-go/utils/unittest" ) @@ -22,7 +21,7 @@ import ( // 2. for the decoding step, we offer an optimized convenience function to directly // decode to full identities: Indices --decode--> Identities func TestEncodeDecodeIdentities(t *testing.T) { - canonicalIdentities := unittest.IdentityListFixture(20) + canonicalIdentities := unittest.IdentityListFixture(20).Sort(flow.Canonical[flow.Identity]).ToSkeleton() canonicalIdentifiers := canonicalIdentities.NodeIDs() for s := 0; s < 20; s++ { for e := s; e < 20; e++ { @@ -105,14 +104,14 @@ func TestEncodeFail(t *testing.T) { func Test_EncodeSignerToIndicesAndSigType(t *testing.T) { rapid.Check(t, func(t *rapid.T) { // select total committee size, number of random beacon signers and number of staking signers - committeeSize := rapid.IntRange(1, 272).Draw(t, "committeeSize").(int) - numStakingSigners := rapid.IntRange(0, committeeSize).Draw(t, "numStakingSigners").(int) - numRandomBeaconSigners := rapid.IntRange(0, committeeSize-numStakingSigners).Draw(t, "numRandomBeaconSigners").(int) + committeeSize := rapid.IntRange(1, 272).Draw(t, "committeeSize") + numStakingSigners := rapid.IntRange(0, committeeSize).Draw(t, "numStakingSigners") + numRandomBeaconSigners := rapid.IntRange(0, committeeSize-numStakingSigners).Draw(t, "numRandomBeaconSigners") // create committee - committeeIdentities := unittest.IdentityListFixture(committeeSize, unittest.WithRole(flow.RoleConsensus)).Sort(order.Canonical) + committeeIdentities := unittest.IdentityListFixture(committeeSize, unittest.WithRole(flow.RoleConsensus)).Sort(flow.Canonical[flow.Identity]) committee := committeeIdentities.NodeIDs() - stakingSigners, beaconSigners := sampleSigners(committee, numStakingSigners, numRandomBeaconSigners) + stakingSigners, beaconSigners := sampleSigners(t, committee, numStakingSigners, numRandomBeaconSigners) // encode prefixed, sigTypes, err := signature.EncodeSignerToIndicesAndSigType(committee, stakingSigners, beaconSigners) @@ -126,7 +125,7 @@ func Test_EncodeSignerToIndicesAndSigType(t *testing.T) { correctEncoding(t, signerIndices, committee, unorderedSigners) // check sigTypes - canSigners := committeeIdentities.Filter(filter.HasNodeID(unorderedSigners...)).NodeIDs() // generates list of signer IDs in canonical order + canSigners := committeeIdentities.Filter(filter.HasNodeID[flow.Identity](unorderedSigners...)).NodeIDs() // generates list of signer IDs in canonical order correctEncoding(t, sigTypes, canSigners, beaconSigners) }) } @@ -143,49 +142,50 @@ func Test_EncodeSignerToIndicesAndSigType(t *testing.T) { func Test_DecodeSigTypeToStakingAndBeaconSigners(t *testing.T) { rapid.Check(t, func(t *rapid.T) { // select total committee size, number of random beacon signers and number of staking signers - committeeSize := rapid.IntRange(1, 272).Draw(t, "committeeSize").(int) - numStakingSigners := rapid.IntRange(0, committeeSize).Draw(t, "numStakingSigners").(int) - numRandomBeaconSigners := rapid.IntRange(0, committeeSize-numStakingSigners).Draw(t, "numRandomBeaconSigners").(int) + committeeSize := rapid.IntRange(1, 272).Draw(t, "committeeSize") + numStakingSigners := rapid.IntRange(0, committeeSize).Draw(t, "numStakingSigners") + numRandomBeaconSigners := rapid.IntRange(0, committeeSize-numStakingSigners).Draw(t, "numRandomBeaconSigners") // create committee - committeeIdentities := unittest.IdentityListFixture(committeeSize, unittest.WithRole(flow.RoleConsensus)).Sort(order.Canonical) + committeeIdentities := unittest.IdentityListFixture(committeeSize, unittest.WithRole(flow.RoleConsensus)). + Sort(flow.Canonical[flow.Identity]) committee := committeeIdentities.NodeIDs() - stakingSigners, beaconSigners := sampleSigners(committee, numStakingSigners, numRandomBeaconSigners) + stakingSigners, beaconSigners := sampleSigners(t, committee, numStakingSigners, numRandomBeaconSigners) // encode signerIndices, sigTypes, err := signature.EncodeSignerToIndicesAndSigType(committee, stakingSigners, beaconSigners) require.NoError(t, err) // decode - decSignerIdentites, err := signature.DecodeSignerIndicesToIdentities(committeeIdentities, signerIndices) + decSignerIdentites, err := signature.DecodeSignerIndicesToIdentities(committeeIdentities.ToSkeleton(), signerIndices) require.NoError(t, err) decStakingSigners, decBeaconSigners, err := signature.DecodeSigTypeToStakingAndBeaconSigners(decSignerIdentites, sigTypes) require.NoError(t, err) // verify; note that there is a slightly different convention between Filter and the decoding logic: // Filter returns nil for an empty list, while the decoding logic returns an instance of an empty slice - sigIdentities := committeeIdentities.Filter(filter.Or(filter.HasNodeID(stakingSigners...), filter.HasNodeID(beaconSigners...))) // signer identities in canonical order + sigIdentities := committeeIdentities.Filter( + filter.Or(filter.HasNodeID[flow.Identity](stakingSigners...), filter.HasNodeID[flow.Identity](beaconSigners...))).ToSkeleton() // signer identities in canonical order if len(stakingSigners)+len(decBeaconSigners) > 0 { require.Equal(t, sigIdentities, decSignerIdentites) } if len(stakingSigners) == 0 { require.Empty(t, decStakingSigners) } else { - require.Equal(t, committeeIdentities.Filter(filter.HasNodeID(stakingSigners...)), decStakingSigners) + require.Equal(t, committeeIdentities.Filter(filter.HasNodeID[flow.Identity](stakingSigners...)).ToSkeleton(), decStakingSigners) } if len(decBeaconSigners) == 0 { require.Empty(t, decBeaconSigners) } else { - require.Equal(t, committeeIdentities.Filter(filter.HasNodeID(beaconSigners...)), decBeaconSigners) + require.Equal(t, committeeIdentities.Filter(filter.HasNodeID[flow.Identity](beaconSigners...)).ToSkeleton(), decBeaconSigners) } }) } func Test_ValidPaddingErrIncompatibleBitVectorLength(t *testing.T) { - var signers flow.IdentityList var err error // if bits is multiply of 8, then there is no padding needed, any sig type can be decoded. - signers = unittest.IdentityListFixture(16) + signers := unittest.IdentityListFixture(16).ToSkeleton() // 16 bits needs 2 bytes, provided 2 bytes _, _, err = signature.DecodeSigTypeToStakingAndBeaconSigners(signers, unittest.RandomBytes(2)) @@ -202,7 +202,7 @@ func Test_ValidPaddingErrIncompatibleBitVectorLength(t *testing.T) { require.ErrorIs(t, err, signature.ErrIncompatibleBitVectorLength, "low-level error representing the failure should be ErrIncompatibleBitVectorLength") // if bits is not multiply of 8, then padding is needed - signers = unittest.IdentityListFixture(15) + signers = unittest.IdentityListFixture(15).ToSkeleton() _, _, err = signature.DecodeSigTypeToStakingAndBeaconSigners(signers, []byte{byte(255), byte(254)}) require.NoError(t, err) @@ -218,30 +218,30 @@ func Test_ValidPaddingErrIncompatibleBitVectorLength(t *testing.T) { // if bits is not multiply of 8, // 1 byte more - signers = unittest.IdentityListFixture(0) + signers = unittest.IdentityListFixture(0).ToSkeleton() _, _, err = signature.DecodeSigTypeToStakingAndBeaconSigners(signers, []byte{byte(255)}) require.True(t, signature.IsInvalidSigTypesError(err), "API-level error should be InvalidSigTypesError") require.ErrorIs(t, err, signature.ErrIncompatibleBitVectorLength, "low-level error representing the failure should be ErrIncompatibleBitVectorLength") // 1 byte more - signers = unittest.IdentityListFixture(1) + signers = unittest.IdentityListFixture(1).ToSkeleton() _, _, err = signature.DecodeSigTypeToStakingAndBeaconSigners(signers, []byte{byte(0), byte(0)}) require.True(t, signature.IsInvalidSigTypesError(err), "API-level error should be InvalidSigTypesError") require.ErrorIs(t, err, signature.ErrIncompatibleBitVectorLength, "low-level error representing the failure should be ErrIncompatibleBitVectorLength") // 1 byte less - signers = unittest.IdentityListFixture(7) + signers = unittest.IdentityListFixture(7).ToSkeleton() _, _, err = signature.DecodeSigTypeToStakingAndBeaconSigners(signers, []byte{}) require.True(t, signature.IsInvalidSigTypesError(err), "API-level error should be InvalidSigTypesError") require.ErrorIs(t, err, signature.ErrIncompatibleBitVectorLength, "low-level error representing the failure should be ErrIncompatibleBitVectorLength") } func TestValidPaddingErrIllegallyPaddedBitVector(t *testing.T) { - var signers flow.IdentityList + var signers flow.IdentitySkeletonList var err error // if bits is multiply of 8, then there is no padding needed, any sig type can be decoded. for count := 1; count < 8; count++ { - signers = unittest.IdentityListFixture(count) + signers = unittest.IdentityListFixture(count).ToSkeleton() _, _, err = signature.DecodeSigTypeToStakingAndBeaconSigners(signers, []byte{byte(255)}) // last bit should be 0, but 1 require.True(t, signature.IsInvalidSigTypesError(err), "API-level error should be InvalidSigTypesError") require.ErrorIs(t, err, signature.ErrIllegallyPaddedBitVector, "low-level error representing the failure should be ErrIllegallyPaddedBitVector") @@ -252,7 +252,7 @@ func TestValidPaddingErrIllegallyPaddedBitVector(t *testing.T) { } for count := 9; count < 16; count++ { - signers = unittest.IdentityListFixture(count) + signers = unittest.IdentityListFixture(count).ToSkeleton() _, _, err = signature.DecodeSigTypeToStakingAndBeaconSigners(signers, []byte{byte(255), byte(255)}) // last bit should be 0, but 1 require.True(t, signature.IsInvalidSigTypesError(err), "API-level error should be InvalidSigTypesError") require.ErrorIs(t, err, signature.ErrIllegallyPaddedBitVector, "low-level error representing the failure should be ErrIllegallyPaddedBitVector") @@ -270,13 +270,14 @@ func TestValidPaddingErrIllegallyPaddedBitVector(t *testing.T) { func Test_EncodeSignersToIndices(t *testing.T) { rapid.Check(t, func(t *rapid.T) { // select total committee size, number of random beacon signers and number of staking signers - committeeSize := rapid.IntRange(1, 272).Draw(t, "committeeSize").(int) - numSigners := rapid.IntRange(0, committeeSize).Draw(t, "numSigners").(int) + committeeSize := rapid.IntRange(1, 272).Draw(t, "committeeSize") + numSigners := rapid.IntRange(0, committeeSize).Draw(t, "numSigners") // create committee - identities := unittest.IdentityListFixture(committeeSize, unittest.WithRole(flow.RoleConsensus)).Sort(order.Canonical) + identities := unittest.IdentityListFixture(committeeSize, unittest.WithRole(flow.RoleConsensus)).Sort(flow.Canonical[flow.Identity]) committee := identities.NodeIDs() - signers := committee.Sample(uint(numSigners)) + signers, err := committee.Sample(uint(numSigners)) + require.NoError(t, err) // encode prefixed, err := signature.EncodeSignersToIndices(committee, signers) @@ -299,13 +300,14 @@ func Test_EncodeSignersToIndices(t *testing.T) { func Test_DecodeSignerIndicesToIdentifiers(t *testing.T) { rapid.Check(t, func(t *rapid.T) { // select total committee size, number of random beacon signers and number of staking signers - committeeSize := rapid.IntRange(1, 272).Draw(t, "committeeSize").(int) - numSigners := rapid.IntRange(0, committeeSize).Draw(t, "numSigners").(int) + committeeSize := rapid.IntRange(1, 272).Draw(t, "committeeSize") + numSigners := rapid.IntRange(0, committeeSize).Draw(t, "numSigners") // create committee - identities := unittest.IdentityListFixture(committeeSize, unittest.WithRole(flow.RoleConsensus)).Sort(order.Canonical) + identities := unittest.IdentityListFixture(committeeSize, unittest.WithRole(flow.RoleConsensus)).Sort(flow.Canonical[flow.Identity]) committee := identities.NodeIDs() - signers := committee.Sample(uint(numSigners)) + signers, err := committee.Sample(uint(numSigners)) + require.NoError(t, err) sort.Sort(signers) // encode @@ -332,30 +334,33 @@ func Test_DecodeSignerIndicesToIdentifiers(t *testing.T) { const UpperBoundCommitteeSize = 272 func Test_DecodeSignerIndicesToIdentities(t *testing.T) { - rapid.Check(t, func(t *rapid.T) { // select total committee size, number of random beacon signers and number of staking signers - committeeSize := rapid.IntRange(1, UpperBoundCommitteeSize).Draw(t, "committeeSize").(int) - numSigners := rapid.IntRange(0, committeeSize).Draw(t, "numSigners").(int) + committeeSize := rapid.IntRange(1, UpperBoundCommitteeSize).Draw(t, "committeeSize") + numSigners := rapid.IntRange(0, committeeSize).Draw(t, "numSigners") // create committee - identities := unittest.IdentityListFixture(committeeSize, unittest.WithRole(flow.RoleConsensus)).Sort(order.Canonical) - signers := identities.Sample(uint(numSigners)) + identities := unittest.IdentityListFixture(committeeSize, unittest.WithRole(flow.RoleConsensus)).Sort(flow.Canonical[flow.Identity]) + fullSigners, err := identities.Sample(uint(numSigners)) + require.NoError(t, err) + signers := fullSigners.ToSkeleton() // encode signerIndices, err := signature.EncodeSignersToIndices(identities.NodeIDs(), signers.NodeIDs()) require.NoError(t, err) // decode and verify - decodedSigners, err := signature.DecodeSignerIndicesToIdentities(identities, signerIndices) + decodedSigners, err := signature.DecodeSignerIndicesToIdentities(identities.ToSkeleton(), signerIndices) require.NoError(t, err) - require.Equal(t, signers.Sort(order.Canonical), decodedSigners.Sort(order.Canonical)) + + require.Equal(t, signers.Sort(flow.Canonical[flow.IdentitySkeleton]), decodedSigners.Sort(flow.Canonical[flow.IdentitySkeleton])) }) } // sampleSigners takes `committee` and samples to _disjoint_ subsets // (`stakingSigners` and `randomBeaconSigners`) with the specified cardinality func sampleSigners( + t *rapid.T, committee flow.IdentifierList, numStakingSigners int, numRandomBeaconSigners int, @@ -364,9 +369,12 @@ func sampleSigners( panic(fmt.Sprintf("Cannot sample %d nodes out of a committee is size %d", numStakingSigners+numRandomBeaconSigners, len(committee))) } - stakingSigners = committee.Sample(uint(numStakingSigners)) + var err error + stakingSigners, err = committee.Sample(uint(numStakingSigners)) + require.NoError(t, err) remaining := committee.Filter(id.Not(id.In(stakingSigners...))) - randomBeaconSigners = remaining.Sample(uint(numRandomBeaconSigners)) + randomBeaconSigners, err = remaining.Sample(uint(numRandomBeaconSigners)) + require.NoError(t, err) return } diff --git a/module/signature/signing_tags.go b/module/signature/signing_tags.go index 0e60ef1cfc1..00d7e06903c 100644 --- a/module/signature/signing_tags.go +++ b/module/signature/signing_tags.go @@ -1,8 +1,8 @@ package signature import ( - "github.com/onflow/flow-go/crypto" - "github.com/onflow/flow-go/crypto/hash" + "github.com/onflow/crypto" + "github.com/onflow/crypto/hash" ) // List of domain separation tags for protocol signatures. @@ -61,7 +61,7 @@ var ( // NewBLSHasher returns a hasher to be used for BLS signing and verifying // in the protocol and abstracts the hasher details from the protocol logic. // -// The hasher returned is the the expand-message step in the BLS hash-to-curve. +// The hasher returned is the expand-message step in the BLS hash-to-curve. // It uses a xof (extendable output function) based on KMAC128. It therefore has // 128-bytes outputs. func NewBLSHasher(tag string) hash.Hasher { diff --git a/module/signature/type_encoder.go b/module/signature/type_encoder.go index 11241c0fa57..cfcdd327ac4 100644 --- a/module/signature/type_encoder.go +++ b/module/signature/type_encoder.go @@ -3,7 +3,8 @@ package signature import ( "fmt" - "github.com/onflow/flow-go/crypto" + "github.com/onflow/crypto" + "github.com/onflow/flow-go/model/encoding" ) @@ -60,13 +61,14 @@ func EncodeDoubleSig(stakingSig crypto.Signature, beaconSig crypto.Signature) [] // if sigData is the size of a BLS signature, we interpret sigData entirely as staking signature // - nil, nil, ErrInvalidSignatureFormat if the sig type is invalid (covers nil or empty sigData) func DecodeDoubleSig(sigData []byte) (crypto.Signature, crypto.Signature, error) { - sigLength := len(sigData) - switch sigLength { - case SigLen: + sigLen := SigLen + + switch len(sigData) { + case sigLen: return sigData, nil, nil - case 2 * SigLen: - return sigData[:SigLen], sigData[SigLen:], nil + case 2 * sigLen: + return sigData[:sigLen], sigData[sigLen:], nil } - return nil, nil, fmt.Errorf("invalid sig data length %d: %w", sigLength, ErrInvalidSignatureFormat) + return nil, nil, fmt.Errorf("invalid sig data length %d: %w", len(sigData), ErrInvalidSignatureFormat) } diff --git a/module/signer.go b/module/signer.go index b02514c06ee..8438a00aa01 100644 --- a/module/signer.go +++ b/module/signer.go @@ -3,7 +3,7 @@ package module import ( "errors" - "github.com/onflow/flow-go/crypto" + "github.com/onflow/crypto" ) var ( diff --git a/module/state_synchronization/execution_data_requester.go b/module/state_synchronization/execution_data_requester.go index 6e4c5c93b8a..35245834363 100644 --- a/module/state_synchronization/execution_data_requester.go +++ b/module/state_synchronization/execution_data_requester.go @@ -17,6 +17,8 @@ type ExecutionDataRequester interface { // OnBlockFinalized accepts block finalization notifications from the FollowerDistributor OnBlockFinalized(*model.Block) - // AddOnExecutionDataReceivedConsumer adds a callback to be called when a new ExecutionData is received - AddOnExecutionDataReceivedConsumer(fn OnExecutionDataReceivedConsumer) + // HighestConsecutiveHeight returns the highest consecutive block height for which ExecutionData + // has been received. + // This method must only be called after the component is Ready. If it is called early, an error is returned. + HighestConsecutiveHeight() (uint64, error) } diff --git a/module/state_synchronization/index_reporter.go b/module/state_synchronization/index_reporter.go new file mode 100644 index 00000000000..2498cbaa03c --- /dev/null +++ b/module/state_synchronization/index_reporter.go @@ -0,0 +1,9 @@ +package state_synchronization + +// IndexReporter provides information about the current state of the execution state indexer. +type IndexReporter interface { + // LowestIndexedHeight returns the lowest height indexed by the execution state indexer. + LowestIndexedHeight() (uint64, error) + // HighestIndexedHeight returns the highest height indexed by the execution state indexer. + HighestIndexedHeight() (uint64, error) +} diff --git a/module/state_synchronization/indexer/collection_executed_metric.go b/module/state_synchronization/indexer/collection_executed_metric.go new file mode 100644 index 00000000000..ec1ac58b054 --- /dev/null +++ b/module/state_synchronization/indexer/collection_executed_metric.go @@ -0,0 +1,178 @@ +package indexer + +import ( + "errors" + "time" + + "github.com/rs/zerolog" + + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module" + "github.com/onflow/flow-go/module/mempool/stdmap" + "github.com/onflow/flow-go/storage" +) + +var _ module.CollectionExecutedMetric = (*CollectionExecutedMetricImpl)(nil) + +// CollectionExecutedMetricImpl tracks metrics to measure how long it takes for tx to reach each step in their lifecycle +type CollectionExecutedMetricImpl struct { + log zerolog.Logger // used to log relevant actions with context + + accessMetrics module.AccessMetrics + collectionsToMarkFinalized *stdmap.Times + collectionsToMarkExecuted *stdmap.Times + blocksToMarkExecuted *stdmap.Times + + collections storage.Collections + blocks storage.Blocks + + blockTransactions *stdmap.IdentifierMap // Map to track transactions for each block for sealed metrics +} + +var _ module.CollectionExecutedMetric = (*CollectionExecutedMetricImpl)(nil) + +func NewCollectionExecutedMetricImpl( + log zerolog.Logger, + accessMetrics module.AccessMetrics, + collectionsToMarkFinalized *stdmap.Times, + collectionsToMarkExecuted *stdmap.Times, + blocksToMarkExecuted *stdmap.Times, + collections storage.Collections, + blocks storage.Blocks, + blockTransactions *stdmap.IdentifierMap, +) (*CollectionExecutedMetricImpl, error) { + return &CollectionExecutedMetricImpl{ + log: log, + accessMetrics: accessMetrics, + collectionsToMarkFinalized: collectionsToMarkFinalized, + collectionsToMarkExecuted: collectionsToMarkExecuted, + blocksToMarkExecuted: blocksToMarkExecuted, + collections: collections, + blocks: blocks, + blockTransactions: blockTransactions, + }, nil +} + +// CollectionFinalized tracks collections to mark finalized +func (c *CollectionExecutedMetricImpl) CollectionFinalized(light *flow.LightCollection) { + lightID := light.ID() + if ti, found := c.collectionsToMarkFinalized.Get(lightID); found { + + block, err := c.blocks.ByCollectionID(lightID) + if err != nil { + c.log.Warn().Err(err).Msg("could not find block by collection ID") + return + } + blockID := block.ID() + + for _, t := range light.Transactions { + c.accessMetrics.TransactionFinalized(t, ti) + + c.blockTransactions.Append(blockID, t) + } + c.collectionsToMarkFinalized.Remove(lightID) + } +} + +// CollectionExecuted tracks collections to mark executed +func (c *CollectionExecutedMetricImpl) CollectionExecuted(light *flow.LightCollection) { + if ti, found := c.collectionsToMarkExecuted.Get(light.ID()); found { + for _, t := range light.Transactions { + c.accessMetrics.TransactionExecuted(t, ti) + } + c.collectionsToMarkExecuted.Remove(light.ID()) + } +} + +// BlockFinalized tracks finalized metric for block +func (c *CollectionExecutedMetricImpl) BlockFinalized(block *flow.Block) { + // TODO: lookup actual finalization time by looking at the block finalizing `b` + now := time.Now().UTC() + blockID := block.ID() + + // mark all transactions as finalized + // TODO: sample to reduce performance overhead + for _, g := range block.Payload.Guarantees { + l, err := c.collections.LightByID(g.CollectionID) + if errors.Is(err, storage.ErrNotFound) { + c.collectionsToMarkFinalized.Add(g.CollectionID, now) + continue + } else if err != nil { + c.log.Warn().Err(err).Str("collection_id", g.CollectionID.String()). + Msg("could not track tx finalized metric: finalized collection not found locally") + continue + } + + for _, t := range l.Transactions { + c.accessMetrics.TransactionFinalized(t, now) + c.blockTransactions.Append(blockID, t) + } + } + + // Process block seals + for _, s := range block.Payload.Seals { + transactions, found := c.blockTransactions.Get(s.BlockID) + + if found { + for _, t := range transactions { + c.accessMetrics.TransactionSealed(t, now) + } + c.blockTransactions.Remove(s.BlockID) + } + } + + if ti, found := c.blocksToMarkExecuted.Get(blockID); found { + c.blockExecuted(block, ti) + c.accessMetrics.UpdateExecutionReceiptMaxHeight(block.Height) + c.blocksToMarkExecuted.Remove(blockID) + } +} + +// ExecutionReceiptReceived tracks execution receipt metrics +func (c *CollectionExecutedMetricImpl) ExecutionReceiptReceived(r *flow.ExecutionReceipt) { + // TODO add actual execution time to execution receipt? + now := time.Now().UTC() + + // retrieve the block + // TODO: consider using storage.Index.ByBlockID, the index contains collection id and seals ID + b, err := c.blocks.ByID(r.ExecutionResult.BlockID) + + if errors.Is(err, storage.ErrNotFound) { + c.blocksToMarkExecuted.Add(r.ExecutionResult.BlockID, now) + return + } + + if err != nil { + c.log.Warn().Err(err).Msg("could not track tx executed metric: executed block not found locally") + return + } + + c.accessMetrics.UpdateExecutionReceiptMaxHeight(b.Height) + + c.blockExecuted(b, now) +} + +func (c *CollectionExecutedMetricImpl) UpdateLastFullBlockHeight(height uint64) { + c.accessMetrics.UpdateLastFullBlockHeight(height) +} + +// blockExecuted tracks executed metric for block +func (c *CollectionExecutedMetricImpl) blockExecuted(block *flow.Block, ti time.Time) { + // mark all transactions as executed + // TODO: sample to reduce performance overhead + for _, g := range block.Payload.Guarantees { + l, err := c.collections.LightByID(g.CollectionID) + if errors.Is(err, storage.ErrNotFound) { + c.collectionsToMarkExecuted.Add(g.CollectionID, ti) + continue + } else if err != nil { + c.log.Warn().Err(err).Str("collection_id", g.CollectionID.String()). + Msg("could not track tx executed metric: executed collection not found locally") + continue + } + + for _, t := range l.Transactions { + c.accessMetrics.TransactionExecuted(t, ti) + } + } +} diff --git a/module/state_synchronization/indexer/in_memory_indexer.go b/module/state_synchronization/indexer/in_memory_indexer.go new file mode 100644 index 00000000000..c87b3e1eee3 --- /dev/null +++ b/module/state_synchronization/indexer/in_memory_indexer.go @@ -0,0 +1,202 @@ +package indexer + +import ( + "fmt" + "time" + + "github.com/rs/zerolog" + + "github.com/onflow/flow-go/ledger" + "github.com/onflow/flow-go/ledger/common/convert" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module/executiondatasync/execution_data" +) + +// <component_spec> +// InMemoryIndexer indexes block execution data for a single ExecutionResult into a mempool. It is +// designed to be used as part of the optimistic syncing processing pipeline, to index data for +// unsealed execution results which is eventually persisted when the execution result is sealed. +// +// The data contained within the BlockExecutionData is verified by verifications nodes as part of the +// approval process. Once the execution result is sealed, the Access node can accept it as valid +// without further verification. However, with optimistic syncing, the Access node may index data +// for execution results that are not sealed. Since this data is not certified by the protocol, it +// must not be persisted to disk. It may be used by the Access node to serve Access API requests, +// with the understanding that it may be later determined to be invalid. +// +// The provided BlockExecutionData is received over the network and its hash is compared to the value +// included in an ExecutionResult within a certified block. This guarantees it is the same data that +// was produced by an execution node whose stake is at risk if the data is incorrect. It is not +// practical for an Access node to verify all data, but the indexer may perform opportunistic checks +// to ensure the data is generally consistent. +// +// Transaction error messages are received directly from execution nodes with no protocol guarantees. +// The node must validate that there is a one-to-one mapping between failed transactions and +// transaction error messages. +// Since the error messages are requested directly from the execution nodes, it's possible that they +// are delayed. To avoid blocking the indexing process if ENs are unresponsive, the processing pipeline +// may skip the call to `ValidateTxErrors()` if the error messages are not ready. In this case, the +// the error messages may be validated and backfilled later. +// </component_spec> +// +// Safe for concurrent use. +type InMemoryIndexer struct { + log zerolog.Logger + block *flow.Block + executionResult *flow.ExecutionResult +} + +// IndexerData is the collection of data ingested by the indexer. +type IndexerData struct { + Events []flow.Event + Collections []*flow.Collection + Transactions []*flow.TransactionBody + Results []flow.LightTransactionResult + Registers []flow.RegisterEntry +} + +// NewInMemoryIndexer returns a new indexer that indexes block execution data and error messages for +// a single ExecutionResult. +// +// No error returns are expected during normal operations. +func NewInMemoryIndexer( + log zerolog.Logger, + block *flow.Block, + executionResult *flow.ExecutionResult, +) (*InMemoryIndexer, error) { + if block.ID() != executionResult.BlockID { + return nil, fmt.Errorf("block ID and execution result block ID must match") + } + + return &InMemoryIndexer{ + log: log.With(). + Str("component", "in_memory_indexer"). + Str("execution_result_id", executionResult.ExecutionDataID.String()). + Str("block_id", executionResult.BlockID.String()). + Logger(), + block: block, + executionResult: executionResult, + }, nil +} + +// IndexBlockData indexes all execution block data. +// +// The method is idempotent and does not modify the state of the indexer. +// +// All error returns are benign and side-effect free for the node. They indicate that the BlockExecutionData +// is inconsistent with the execution result and its block, which points to invalid data produced by +// an external node. +func (i *InMemoryIndexer) IndexBlockData(data *execution_data.BlockExecutionData) (*IndexerData, error) { + if data.BlockID != i.executionResult.BlockID { + return nil, fmt.Errorf("unexpected block execution data: expected block_id=%s, actual block_id=%s", i.executionResult.BlockID, data.BlockID) + } + + // sanity check: the execution data should contain data for all collections in the block, plus + // the system chunk + if len(data.ChunkExecutionDatas) != len(i.block.Payload.Guarantees)+1 { + return nil, fmt.Errorf("block execution data chunk (%d) count does not match block guarantee (%d) plus system chunk", + len(data.ChunkExecutionDatas), len(i.block.Payload.Guarantees)) + } + + start := time.Now() + i.log.Debug().Msg("indexing block data") + + events := make([]flow.Event, 0) + results := make([]flow.LightTransactionResult, 0) + collections := make([]*flow.Collection, 0) + transactions := make([]*flow.TransactionBody, 0) + registerUpdates := make(map[ledger.Path]*ledger.Payload) + + for idx, chunk := range data.ChunkExecutionDatas { + events = append(events, chunk.Events...) + results = append(results, chunk.TransactionResults...) + + // do not index tx and collections from the system chunk since they can be generated on demand + if idx < len(data.ChunkExecutionDatas)-1 { + collections = append(collections, chunk.Collection) + transactions = append(transactions, chunk.Collection.Transactions...) + } + + // sanity check: there must be a one-to-one mapping between transactions and results + if len(chunk.Collection.Transactions) != len(chunk.TransactionResults) { + return nil, fmt.Errorf("number of transactions (%d) does not match number of results (%d)", + len(chunk.Collection.Transactions), len(chunk.TransactionResults)) + } + + // collect register updates + if chunk.TrieUpdate != nil { + // sanity check: there must be a one-to-one mapping between paths and payloads + if len(chunk.TrieUpdate.Paths) != len(chunk.TrieUpdate.Payloads) { + return nil, fmt.Errorf("number of ledger paths (%d) does not match number of ledger payloads (%d)", + len(chunk.TrieUpdate.Paths), len(chunk.TrieUpdate.Payloads)) + } + + // collect registers (last update for a path within the block is persisted) + for i, path := range chunk.TrieUpdate.Paths { + registerUpdates[path] = chunk.TrieUpdate.Payloads[i] + } + } + } + + // convert final payloads to register entries + registerEntries := make([]flow.RegisterEntry, 0, len(registerUpdates)) + for path, payload := range registerUpdates { + key, value, err := convert.PayloadToRegister(payload) + if err != nil { + return nil, fmt.Errorf("failed to convert payload to register entry (path: %s): %w", path.String(), err) + } + + registerEntries = append(registerEntries, flow.RegisterEntry{ + Key: key, + Value: value, + }) + } + + i.log.Debug(). + Dur("duration_ms", time.Since(start)). + Int("event_count", len(events)). + Int("register_count", len(registerEntries)). + Int("result_count", len(results)). + Int("collection_count", len(collections)). + Msg("indexed block data") + + return &IndexerData{ + Events: events, + Collections: collections, + Transactions: transactions, + Results: results, + Registers: registerEntries, + }, nil +} + +// ValidateTxErrors validates that the transaction results and error messages are consistent, and +// returns an error if they are not. +// +// All error returns are benign and side-effect free for the node. They indicate that the transaction +// results and error messages are inconsistent, which points to invalid data produced by an external +// node. +func ValidateTxErrors(results []flow.LightTransactionResult, txResultErrMsgs []flow.TransactionResultErrorMessage) error { + txWithErrors := make(map[flow.Identifier]bool) + for _, txResult := range txResultErrMsgs { + txWithErrors[txResult.TransactionID] = true + } + + failedCount := 0 + for _, txResult := range results { + if !txResult.Failed { + continue + } + failedCount++ + + if !txWithErrors[txResult.TransactionID] { + return fmt.Errorf("transaction %s failed but no error message was provided", txResult.TransactionID) + } + } + + // make sure there are not extra error messages + if failedCount != len(txWithErrors) { + return fmt.Errorf("number of failed transactions (%d) does not match number of transaction error messages (%d)", failedCount, len(txWithErrors)) + } + + return nil +} diff --git a/module/state_synchronization/indexer/in_memory_indexer_test.go b/module/state_synchronization/indexer/in_memory_indexer_test.go new file mode 100644 index 00000000000..df95d944875 --- /dev/null +++ b/module/state_synchronization/indexer/in_memory_indexer_test.go @@ -0,0 +1,246 @@ +package indexer + +import ( + "fmt" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/ledger" + "github.com/onflow/flow-go/ledger/common/convert" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module/executiondatasync/execution_data" + "github.com/onflow/flow-go/utils/unittest" + "github.com/onflow/flow-go/utils/unittest/fixtures" +) + +type testFixture struct { + block *flow.Block + exeResult *flow.ExecutionResult + execData *execution_data.BlockExecutionData +} + +// generateFixture generates a test fixture for the indexer. The returned data has the following +// properties: +// - The block execution data contains collections for each of the block's guarantees, plus the system chunk +// - Each collection has 3 transactions +// - The first path in each trie update is the same, testing that the indexer will use the last value +// - Every 3rd transaction is failed +// - There are tx error messages for all failed transactions +func generateFixture(g *fixtures.GeneratorSuite) *testFixture { + collections := g.Collections().List(4, fixtures.Collection.WithTxCount(3)) + chunkExecutionDatas := make([]*execution_data.ChunkExecutionData, len(collections)) + guarantees := make([]*flow.CollectionGuarantee, len(collections)-1) + path := g.LedgerPaths().Fixture() + for i, collection := range collections { + chunkData := g.ChunkExecutionDatas().Fixture( + fixtures.ChunkExecutionData.WithCollection(collection), + ) + // use the same path fo the first ledger payload in each chunk. the indexer should chose the + // last value in the register entry. + chunkData.TrieUpdate.Paths[0] = path + chunkExecutionDatas[i] = chunkData + + if i < len(collections)-1 { + guarantees[i] = g.Guarantees().Fixture(fixtures.Guarantee.WithCollectionID(collection.ID())) + } + for txIndex := range chunkExecutionDatas[i].TransactionResults { + if txIndex%3 == 0 { + chunkExecutionDatas[i].TransactionResults[txIndex].Failed = true + } + } + } + + payload := g.Payloads().Fixture(fixtures.Payload.WithGuarantees(guarantees...)) + block := g.Blocks().Fixture(fixtures.Block.WithPayload(payload)) + + exeResult := g.ExecutionResults().Fixture(fixtures.ExecutionResult.WithBlock(block)) + execData := g.BlockExecutionDatas().Fixture( + fixtures.BlockExecutionData.WithBlockID(block.ID()), + fixtures.BlockExecutionData.WithChunkExecutionDatas(chunkExecutionDatas...), + ) + return &testFixture{ + block: block, + exeResult: exeResult, + execData: execData, + } +} + +func assertIndexerData(t *testing.T, indexerData *IndexerData, execData *execution_data.BlockExecutionData) { + expectedEvents := make([]flow.Event, 0) + expectedResults := make([]flow.LightTransactionResult, 0) + expectedCollections := make([]*flow.Collection, 0) + expectedTransactions := make([]*flow.TransactionBody, 0) + expectedRegisterEntries := make([]flow.RegisterEntry, 0) + + for i, chunk := range execData.ChunkExecutionDatas { + expectedEvents = append(expectedEvents, chunk.Events...) + expectedResults = append(expectedResults, chunk.TransactionResults...) + + if i < len(execData.ChunkExecutionDatas)-1 { + expectedCollections = append(expectedCollections, chunk.Collection) + expectedTransactions = append(expectedTransactions, chunk.Collection.Transactions...) + } + + for j, payload := range chunk.TrieUpdate.Payloads { + // the first payload of each trie update has the same path. only keep the last one. + if j == 0 && i < len(execData.ChunkExecutionDatas)-1 { + continue + } + key, value, err := convert.PayloadToRegister(payload) + require.NoError(t, err) + + expectedRegisterEntries = append(expectedRegisterEntries, flow.RegisterEntry{ + Key: key, + Value: value, + }) + } + } + + assert.Equal(t, expectedEvents, indexerData.Events) + assert.Equal(t, expectedResults, indexerData.Results) + assert.Equal(t, expectedCollections, indexerData.Collections) + assert.Equal(t, expectedTransactions, indexerData.Transactions) + assert.ElementsMatch(t, expectedRegisterEntries, indexerData.Registers) // may not be in the same order +} + +func TestIndexBlockData(t *testing.T) { + g := fixtures.NewGeneratorSuite() + + t.Run("happy path", func(t *testing.T) { + data := generateFixture(g) + + indexer, err := NewInMemoryIndexer(unittest.Logger(), data.block, data.exeResult) + require.NoError(t, err) + + indexerData, err := indexer.IndexBlockData(data.execData) + require.NoError(t, err) + + assertIndexerData(t, indexerData, data.execData) + }) + + t.Run("mismatched blockID in constructor", func(t *testing.T) { + block := g.Blocks().Fixture() + execResult := g.ExecutionResults().Fixture() + + indexer, err := NewInMemoryIndexer(unittest.Logger(), block, execResult) + require.Nil(t, indexer) + require.ErrorContains(t, err, "block ID and execution result block ID must match") + }) + + t.Run("incorrect block ID", func(t *testing.T) { + data := generateFixture(g) + block := g.Blocks().Fixture() + execResult := g.ExecutionResults().Fixture(fixtures.ExecutionResult.WithBlock(block)) + + indexer, err := NewInMemoryIndexer(unittest.Logger(), block, execResult) + require.NoError(t, err) + + indexerData, err := indexer.IndexBlockData(data.execData) + require.Nil(t, indexerData) + require.ErrorContains(t, err, "unexpected block execution data: expected block_id") + }) + + t.Run("incorrect chunk count", func(t *testing.T) { + data := generateFixture(g) + data.execData.ChunkExecutionDatas = data.execData.ChunkExecutionDatas[:len(data.execData.ChunkExecutionDatas)-1] + + indexer, err := NewInMemoryIndexer(unittest.Logger(), data.block, data.exeResult) + require.NoError(t, err) + + indexerData, err := indexer.IndexBlockData(data.execData) + require.Nil(t, indexerData) + require.ErrorContains(t, err, "block execution data chunk (3) count does not match block guarantee (3) plus system chunk") + }) + + t.Run("mismatched transaction count", func(t *testing.T) { + data := generateFixture(g) + data.execData.ChunkExecutionDatas[0].TransactionResults = data.execData.ChunkExecutionDatas[0].TransactionResults[:len(data.execData.ChunkExecutionDatas[0].TransactionResults)-1] + + indexer, err := NewInMemoryIndexer(unittest.Logger(), data.block, data.exeResult) + require.NoError(t, err) + + indexerData, err := indexer.IndexBlockData(data.execData) + require.Nil(t, indexerData) + require.ErrorContains(t, err, "number of transactions (3) does not match number of results (2)") + }) + + t.Run("mismatched ledger path count", func(t *testing.T) { + data := generateFixture(g) + data.execData.ChunkExecutionDatas[0].TrieUpdate.Paths = data.execData.ChunkExecutionDatas[0].TrieUpdate.Paths[:len(data.execData.ChunkExecutionDatas[0].TrieUpdate.Paths)-1] + + indexer, err := NewInMemoryIndexer(unittest.Logger(), data.block, data.exeResult) + require.NoError(t, err) + + indexerData, err := indexer.IndexBlockData(data.execData) + require.Nil(t, indexerData) + require.ErrorContains(t, err, "number of ledger paths (1) does not match number of ledger payloads (2)") + }) + + t.Run("invalid register payload", func(t *testing.T) { + data := generateFixture(g) + + payload := &ledger.Payload{} + payloadJSON := `{"Key":{"KeyParts":[{"Type":3,"Value":"1c3c5064a9a381ff"},{"Type":2,"Value":"eef4f13ec229f5f7"}]},"Value":"5353ae707c"}` + err := payload.UnmarshalJSON([]byte(payloadJSON)) + require.NoError(t, err) + + data.execData.ChunkExecutionDatas[0].TrieUpdate.Payloads[1] = payload + + indexer, err := NewInMemoryIndexer(unittest.Logger(), data.block, data.exeResult) + require.NoError(t, err) + + indexerData, err := indexer.IndexBlockData(data.execData) + require.Nil(t, indexerData) + require.ErrorContains(t, err, "failed to convert payload to register entry") + }) +} + +func TestValidateTxErrors(t *testing.T) { + g := fixtures.NewGeneratorSuite() + + data := generateFixture(g) + + indexer, err := NewInMemoryIndexer(unittest.Logger(), data.block, data.exeResult) + require.NoError(t, err) + + indexerData, err := indexer.IndexBlockData(data.execData) + require.NoError(t, err) + + txErrMsgs := g.TransactionErrorMessages().ForTransactionResults(indexerData.Results) + + t.Run("happy path", func(t *testing.T) { + err = ValidateTxErrors(indexerData.Results, txErrMsgs) + require.NoError(t, err) + }) + + t.Run("missing tx error messages", func(t *testing.T) { + errMsg := txErrMsgs[len(txErrMsgs)-1] + txErrMsgs := txErrMsgs[:len(txErrMsgs)-1] + + err = ValidateTxErrors(indexerData.Results, txErrMsgs) + assert.ErrorContains(t, err, fmt.Sprintf("transaction %s failed but no error message was provided", errMsg.TransactionID)) + }) + + t.Run("mismatched tx error message count", func(t *testing.T) { + txErrMsgs := append(txErrMsgs, flow.TransactionResultErrorMessage{ + TransactionID: g.Identifiers().Fixture(), + Index: g.Random().Uint32(), + ErrorMessage: "test error", + ExecutorID: g.Identifiers().Fixture(), + }) + + err = ValidateTxErrors(indexerData.Results, txErrMsgs) + assert.ErrorContains(t, err, "number of failed transactions (4) does not match number of transaction error messages (5)") + }) + + t.Run("mismatched tx error message transaction ID", func(t *testing.T) { + txErrMsgs := make([]flow.TransactionResultErrorMessage, len(txErrMsgs)) + copy(txErrMsgs, txErrMsgs) + txErrMsgs[0].TransactionID = g.Identifiers().Fixture() + + err = ValidateTxErrors(indexerData.Results, txErrMsgs) + assert.ErrorContains(t, err, "failed but no error message was provided") + }) +} diff --git a/module/state_synchronization/indexer/indexer.go b/module/state_synchronization/indexer/indexer.go new file mode 100644 index 00000000000..db164a2c12d --- /dev/null +++ b/module/state_synchronization/indexer/indexer.go @@ -0,0 +1,229 @@ +package indexer + +import ( + "errors" + "fmt" + "time" + + "github.com/rs/zerolog" + "go.uber.org/atomic" + + "github.com/onflow/flow-go/engine" + "github.com/onflow/flow-go/module" + "github.com/onflow/flow-go/module/component" + "github.com/onflow/flow-go/module/executiondatasync/execution_data" + "github.com/onflow/flow-go/module/executiondatasync/execution_data/cache" + "github.com/onflow/flow-go/module/irrecoverable" + "github.com/onflow/flow-go/module/jobqueue" + "github.com/onflow/flow-go/module/state_synchronization" + "github.com/onflow/flow-go/module/state_synchronization/requester/jobs" + "github.com/onflow/flow-go/module/util" + "github.com/onflow/flow-go/storage" +) + +const ( + workersCount = 1 // how many workers will concurrently process the tasks in the jobqueue + searchAhead = 1 // how many block heights ahead of the current will be requested and tasked for jobqueue + + // fetchTimeout is the timeout for retrieving execution data from the datastore + // This is required by the execution data reader, but in practice, this isn't needed + // here since the data is in a local db. + fetchTimeout = 30 * time.Second +) + +// ErrIndexNotInitialized is returned when the indexer is not initialized +// +// This generally indicates that the index databases are still being initialized, and trying again +// later may succeed +var ErrIndexNotInitialized = errors.New("index not initialized") + +var _ state_synchronization.IndexReporter = (*Indexer)(nil) +var _ execution_data.ProcessedHeightRecorder = (*Indexer)(nil) + +// Indexer handles ingestion of new execution data available and uses the execution data indexer module +// to index the data. +// The processing of new available data is done by creating a jobqueue that uses the execution data reader to +// obtain new jobs. The worker also implements the `highestConsecutiveHeight` method which is used by the execution +// data reader, so it doesn't surpass the highest sealed block height when fetching the data. +// The execution state worker has a callback that is used by the upstream queues which download new execution data to +// notify new data is available and kick off indexing. +type Indexer struct { + component.Component + execution_data.ProcessedHeightRecorder + + log zerolog.Logger + exeDataReader *jobs.ExecutionDataReader + exeDataNotifier engine.Notifier + blockIndexedNotifier engine.Notifier + // lastProcessedHeight the last handled block height + lastProcessedHeight *atomic.Uint64 + indexer *IndexerCore + jobConsumer *jobqueue.ComponentConsumer + registers storage.RegisterIndex +} + +// NewIndexer creates a new execution worker. +func NewIndexer( + log zerolog.Logger, + initHeight uint64, + registers storage.RegisterIndex, + indexer *IndexerCore, + executionCache *cache.ExecutionDataCache, + executionDataLatestHeight func() (uint64, error), + processedHeightInitializer storage.ConsumerProgressInitializer, +) (*Indexer, error) { + r := &Indexer{ + log: log.With().Str("module", "execution_indexer").Logger(), + exeDataNotifier: engine.NewNotifier(), + blockIndexedNotifier: engine.NewNotifier(), + lastProcessedHeight: atomic.NewUint64(initHeight), + indexer: indexer, + registers: registers, + ProcessedHeightRecorder: execution_data.NewProcessedHeightRecorderManager(initHeight), + } + + r.exeDataReader = jobs.NewExecutionDataReader(executionCache, fetchTimeout, executionDataLatestHeight) + + // create a jobqueue that will process new available block execution data. The `exeDataNotifier` is used to + // signal new work, which is being triggered on the `OnExecutionData` handler. + jobConsumer, err := jobqueue.NewComponentConsumer( + r.log, + r.exeDataNotifier.Channel(), + processedHeightInitializer, + r.exeDataReader, + initHeight, + r.processExecutionData, + workersCount, + searchAhead, + ) + if err != nil { + return nil, fmt.Errorf("error creating execution data jobqueue: %w", err) + } + + r.jobConsumer = jobConsumer + + // SetPostNotifier will notify blockIndexedNotifier AFTER e.jobConsumer.LastProcessedIndex is updated. + r.jobConsumer.SetPostNotifier(func(module.JobID) { + r.blockIndexedNotifier.Notify() + }) + + cm := component.NewComponentManagerBuilder() + cm.AddWorker(r.runExecutionDataConsumer) + cm.AddWorker(r.processBlockIndexed) + r.Component = cm.Build() + + return r, nil +} + +// runExecutionDataConsumer runs the jobConsumer component +// +// No errors are expected during normal operations. +func (i *Indexer) runExecutionDataConsumer(ctx irrecoverable.SignalerContext, ready component.ReadyFunc) { + i.log.Info().Msg("starting execution data jobqueue") + i.jobConsumer.Start(ctx) + err := util.WaitClosed(ctx, i.jobConsumer.Ready()) + if err == nil { + ready() + } + + <-i.jobConsumer.Done() +} + +// processBlockIndexed is a worker that processes indexed blocks. +func (i *Indexer) processBlockIndexed( + ctx irrecoverable.SignalerContext, + ready component.ReadyFunc, +) { + ready() + + for { + select { + case <-ctx.Done(): + return + case <-i.blockIndexedNotifier.Channel(): + err := i.onBlockIndexed() + if err != nil { + ctx.Throw(err) + } + } + } +} + +// onBlockIndexed notifies ProcessedHeightRecorderManager that new block is indexed. +// +// Expected errors during normal operation: +// - storage.ErrNotFound: if no finalized block header is known at given height +func (i *Indexer) onBlockIndexed() error { + lastProcessedHeight := i.lastProcessedHeight.Load() + highestIndexedHeight := i.jobConsumer.LastProcessedIndex() + + if lastProcessedHeight < highestIndexedHeight { + // we need loop here because it's possible for a height to be missed here, + // we should guarantee all heights are processed + for height := lastProcessedHeight + 1; height <= highestIndexedHeight; height++ { + header, err := i.indexer.headers.ByHeight(height) + if err != nil { + // if the execution data is available, the block must be locally finalized + i.log.Error().Err(err).Msgf("could not get header for height %d:", height) + return fmt.Errorf("could not get header for height %d: %w", height, err) + } + + i.OnBlockProcessed(header.Height) + } + i.lastProcessedHeight.Store(highestIndexedHeight) + } + + return nil +} + +// Start the worker jobqueue to consume the available data. +func (i *Indexer) Start(ctx irrecoverable.SignalerContext) { + i.exeDataReader.AddContext(ctx) + i.Component.Start(ctx) +} + +// LowestIndexedHeight returns the lowest height indexed by the execution indexer. +func (i *Indexer) LowestIndexedHeight() (uint64, error) { + // TODO: use a separate value to track the lowest indexed height. We're using the registers db's + // value here to start because it's convenient. When pruning support is added, this will need to + // be updated. + return i.registers.FirstHeight(), nil +} + +// HighestIndexedHeight returns the highest height indexed by the execution indexer. +func (i *Indexer) HighestIndexedHeight() (uint64, error) { + select { + case <-i.jobConsumer.Ready(): + default: + // LastProcessedIndex is not meaningful until the component has completed startup + return 0, fmt.Errorf("HighestIndexedHeight must not be called before the component is ready") + } + + // The jobqueue maintains its own highest indexed height value, separate from the register db. + // Since jobs are only marked complete when ALL data is indexed, the lastProcessedIndex must + // be strictly less than or equal to the register db's LatestHeight. + return i.jobConsumer.LastProcessedIndex(), nil +} + +// OnExecutionData is used to notify when new execution data is downloaded by the execution data requester jobqueue. +func (i *Indexer) OnExecutionData(_ *execution_data.BlockExecutionDataEntity) { + i.exeDataNotifier.Notify() +} + +// processExecutionData is a worker method that is being called by the jobqueue when processing a new job. +// The job data contains execution data which we provide to the execution indexer to index it. +func (i *Indexer) processExecutionData(ctx irrecoverable.SignalerContext, job module.Job, done func()) { + entry, err := jobs.JobToBlockEntry(job) + if err != nil { + i.log.Error().Err(err).Str("job_id", string(job.ID())).Msg("error converting execution data job") + ctx.Throw(err) + } + + err = i.indexer.IndexBlockData(entry.ExecutionData) + if err != nil { + i.log.Error().Err(err).Str("job_id", string(job.ID())).Msg("error during execution data index processing job") + ctx.Throw(err) + } + + done() +} diff --git a/module/state_synchronization/indexer/indexer_core.go b/module/state_synchronization/indexer/indexer_core.go new file mode 100644 index 00000000000..ef768840a7a --- /dev/null +++ b/module/state_synchronization/indexer/indexer_core.go @@ -0,0 +1,369 @@ +package indexer + +import ( + "errors" + "fmt" + "time" + + "github.com/jordanschalm/lockctx" + "github.com/rs/zerolog" + "golang.org/x/sync/errgroup" + + "github.com/onflow/flow-go/fvm/storage/derived" + "github.com/onflow/flow-go/ledger" + "github.com/onflow/flow-go/ledger/common/convert" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module" + "github.com/onflow/flow-go/module/executiondatasync/execution_data" + "github.com/onflow/flow-go/storage" + "github.com/onflow/flow-go/utils/logging" +) + +// IndexerCore indexes the execution state. +type IndexerCore struct { + log zerolog.Logger + metrics module.ExecutionStateIndexerMetrics + + registers storage.RegisterIndex + headers storage.Headers + events storage.Events + collections storage.Collections + transactions storage.Transactions + results storage.LightTransactionResults + protocolDB storage.DB + + collectionExecutedMetric module.CollectionExecutedMetric + + derivedChainData *derived.DerivedChainData + serviceAddress flow.Address + lockManager lockctx.Manager +} + +// New execution state indexer used to ingest block execution data and index it by height. +// The passed RegisterIndex storage must be populated to include the first and last height otherwise the indexer +// won't be initialized to ensure we have bootstrapped the storage first. +func New( + log zerolog.Logger, + metrics module.ExecutionStateIndexerMetrics, + protocolDB storage.DB, + registers storage.RegisterIndex, + headers storage.Headers, + events storage.Events, + collections storage.Collections, + transactions storage.Transactions, + results storage.LightTransactionResults, + chain flow.Chain, + derivedChainData *derived.DerivedChainData, + collectionExecutedMetric module.CollectionExecutedMetric, + lockManager lockctx.Manager, +) (*IndexerCore, error) { + log = log.With().Str("component", "execution_indexer").Logger() + metrics.InitializeLatestHeight(registers.LatestHeight()) + + log.Info(). + Uint64("first_height", registers.FirstHeight()). + Uint64("latest_height", registers.LatestHeight()). + Msg("indexer initialized") + + return &IndexerCore{ + log: log, + metrics: metrics, + protocolDB: protocolDB, + registers: registers, + headers: headers, + collections: collections, + transactions: transactions, + events: events, + results: results, + serviceAddress: chain.ServiceAddress(), + derivedChainData: derivedChainData, + + collectionExecutedMetric: collectionExecutedMetric, + lockManager: lockManager, + }, nil +} + +// RegisterValue retrieves register values by the register IDs at the provided block height. +// Even if the register wasn't indexed at the provided height, returns the highest height the register was indexed at. +// If a register is not found it will return a nil value and not an error. +// Expected errors: +// - storage.ErrHeightNotIndexed if the given height was not indexed yet or lower than the first indexed height. +func (c *IndexerCore) RegisterValue(ID flow.RegisterID, height uint64) (flow.RegisterValue, error) { + value, err := c.registers.Get(ID, height) + if err != nil { + // only return an error if the error doesn't match the not found error, since we have + // to gracefully handle not found values and instead assign nil, that is because the script executor + // expects that behaviour + if errors.Is(err, storage.ErrNotFound) { + return nil, nil + } + + return nil, err + } + + return value, nil +} + +// IndexBlockData indexes all execution block data by height. +// This method shouldn't be used concurrently. +// Expected errors: +// - storage.ErrNotFound if the block for execution data was not found +func (c *IndexerCore) IndexBlockData(data *execution_data.BlockExecutionDataEntity) error { + header, err := c.headers.ByBlockID(data.BlockID) + if err != nil { + return fmt.Errorf("could not get the block by ID %s: %w", data.BlockID, err) + } + + lg := c.log.With(). + Hex("block_id", logging.ID(data.BlockID)). + Uint64("height", header.Height). + Logger() + + lg.Debug().Msgf("indexing new block") + + // the height we are indexing must be exactly one bigger or same as the latest height indexed from the storage + latest := c.registers.LatestHeight() + if header.Height != latest+1 && header.Height != latest { + return fmt.Errorf("must index block data with the next height %d, but got %d", latest+1, header.Height) + } + + // allow rerunning the indexer for same height since we are fetching height from register storage, but there are other storages + // for indexing resources which might fail to update the values, so this enables rerunning and reindexing those resources + if header.Height == latest { + lg.Warn().Msg("reindexing block data") + c.metrics.BlockReindexed() + } + + start := time.Now() + g := errgroup.Group{} + + var eventCount, resultCount, registerCount int + g.Go(func() error { + start := time.Now() + + events := make([]flow.Event, 0) + results := make([]flow.LightTransactionResult, 0) + for _, chunk := range data.ChunkExecutionDatas { + events = append(events, chunk.Events...) + results = append(results, chunk.TransactionResults...) + } + + err := c.protocolDB.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + err := c.events.BatchStore(data.BlockID, []flow.EventsList{events}, rw) + if err != nil { + return fmt.Errorf("could not index events at height %d: %w", header.Height, err) + } + + err = c.results.BatchStore(data.BlockID, results, rw) + if err != nil { + return fmt.Errorf("could not index transaction results at height %d: %w", header.Height, err) + } + return nil + }) + + if err != nil { + return fmt.Errorf("could not commit block data: %w", err) + } + + eventCount = len(events) + resultCount = len(results) + + lg.Debug(). + Int("event_count", eventCount). + Int("result_count", resultCount). + Dur("duration_ms", time.Since(start)). + Msg("indexed badger data") + + return nil + }) + + g.Go(func() error { + start := time.Now() + + // index all collections except the system chunk + // Note: the access ingestion engine also indexes collections, starting when the block is + // finalized. This process can fall behind due to the node being offline, resource issues + // or network congestion. This indexer ensures that collections are never farther behind + // than the latest indexed block. Calling the collection handler with a collection that + // has already been indexed is a noop. + indexedCount := 0 + if len(data.ChunkExecutionDatas) > 0 { + for _, chunk := range data.ChunkExecutionDatas[0 : len(data.ChunkExecutionDatas)-1] { + err := c.indexCollection(chunk.Collection) + if err != nil { + return err + } + indexedCount++ + } + } + + lg.Debug(). + Int("collection_count", indexedCount). + Dur("duration_ms", time.Since(start)). + Msg("indexed collections") + + return nil + }) + + g.Go(func() error { + start := time.Now() + + // we are iterating all the registers and overwrite any existing register at the same path + // this will make sure if we have multiple register changes only the last change will get persisted + // if block has two chucks: + // first chunk updates: { X: 1, Y: 2 } + // second chunk updates: { X: 2 } + // then we should persist only {X: 2: Y: 2} + payloads := make(map[ledger.Path]*ledger.Payload) + events := make([]flow.Event, 0) + collections := make([]*flow.Collection, 0) + for _, chunk := range data.ChunkExecutionDatas { + events = append(events, chunk.Events...) + collections = append(collections, chunk.Collection) + update := chunk.TrieUpdate + if update != nil { + // this should never happen but we check anyway + if len(update.Paths) != len(update.Payloads) { + return fmt.Errorf("update paths length is %d and payloads length is %d and they don't match", len(update.Paths), len(update.Payloads)) + } + + for i, path := range update.Paths { + payloads[path] = update.Payloads[i] + } + } + } + + err = c.indexRegisters(payloads, header.Height) + if err != nil { + return fmt.Errorf("could not index register payloads at height %d: %w", header.Height, err) + } + + err = c.updateProgramCache(header, events, collections) + if err != nil { + return fmt.Errorf("could not update program cache at height %d: %w", header.Height, err) + } + + registerCount = len(payloads) + + lg.Debug(). + Int("register_count", registerCount). + Dur("duration_ms", time.Since(start)). + Msg("indexed registers") + + return nil + }) + + err = g.Wait() + if err != nil { + return fmt.Errorf("failed to index block data at height %d: %w", header.Height, err) + } + + c.metrics.BlockIndexed(header.Height, time.Since(start), eventCount, registerCount, resultCount) + lg.Debug(). + Dur("duration_ms", time.Since(start)). + Msg("indexed block data") + + return nil +} + +func (c *IndexerCore) updateProgramCache(header *flow.Header, events []flow.Event, collections []*flow.Collection) error { + if c.derivedChainData == nil { + return nil + } + + derivedBlockData := c.derivedChainData.GetOrCreateDerivedBlockData( + header.ID(), + header.ParentID, + ) + + // get a list of all contracts that were updated in this block + updatedContracts, err := findContractUpdates(events) + if err != nil { + return fmt.Errorf("could not find contract updates for block %d: %w", header.Height, err) + } + + // invalidate cache entries for all modified programs + tx, err := derivedBlockData.NewDerivedTransactionData(0, 0) + if err != nil { + return fmt.Errorf("could not create derived transaction data for block %d: %w", header.Height, err) + } + + tx.AddInvalidator(&accessInvalidator{ + programs: &programInvalidator{ + invalidated: updatedContracts, + invalidateAll: hasAuthorizedTransaction(collections, c.serviceAddress), + }, + executionParameters: &executionParametersInvalidator{ + invalidateAll: hasAuthorizedTransaction(collections, c.serviceAddress), + }, + }) + + err = tx.Commit() + if err != nil { + return fmt.Errorf("could not commit derived transaction data for block %d: %w", header.Height, err) + } + + return nil +} + +func (c *IndexerCore) indexRegisters(registers map[ledger.Path]*ledger.Payload, height uint64) error { + regEntries := make(flow.RegisterEntries, 0, len(registers)) + + for _, payload := range registers { + k, err := payload.Key() + if err != nil { + return err + } + + id, err := convert.LedgerKeyToRegisterID(k) + if err != nil { + return err + } + + regEntries = append(regEntries, flow.RegisterEntry{ + Key: id, + Value: payload.Value(), + }) + } + + return c.registers.Store(regEntries, height) +} + +func (c *IndexerCore) indexCollection(collection *flow.Collection) error { + lctx := c.lockManager.NewContext() + defer lctx.Release() + err := lctx.AcquireLock(storage.LockInsertCollection) + if err != nil { + return fmt.Errorf("could not acquire lock for indexing collections: %w", err) + } + + err = IndexCollection(lctx, collection, c.collections, c.log, c.collectionExecutedMetric) + if err != nil { + return fmt.Errorf("could not handle collection") + } + return nil +} + +// IndexCollection handles the response of the collection request made earlier when a block was received. +// No errors expected during normal operations. +func IndexCollection( + lctx lockctx.Proof, + collection *flow.Collection, + collections storage.Collections, + logger zerolog.Logger, + collectionExecutedMetric module.CollectionExecutedMetric, +) error { + + // FIX: we can't index guarantees here, as we might have more than one block + // with the same collection as long as it is not finalized + + // store the collection, including constituent transactions, and index transactionID -> collectionID + light, err := collections.StoreAndIndexByTransaction(lctx, collection) + if err != nil { + return err + } + + collectionExecutedMetric.CollectionFinalized(light) + collectionExecutedMetric.CollectionExecuted(light) + return nil +} diff --git a/module/state_synchronization/indexer/indexer_core_test.go b/module/state_synchronization/indexer/indexer_core_test.go new file mode 100644 index 00000000000..a1ccd0c3639 --- /dev/null +++ b/module/state_synchronization/indexer/indexer_core_test.go @@ -0,0 +1,781 @@ +package indexer + +import ( + "context" + "fmt" + "os" + "testing" + + "github.com/rs/zerolog" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + mocks "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/fvm/storage/derived" + "github.com/onflow/flow-go/ledger" + "github.com/onflow/flow-go/ledger/common/convert" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module" + "github.com/onflow/flow-go/module/executiondatasync/execution_data" + "github.com/onflow/flow-go/module/mempool/stdmap" + "github.com/onflow/flow-go/module/metrics" + synctest "github.com/onflow/flow-go/module/state_synchronization/requester/unittest" + "github.com/onflow/flow-go/storage" + storagemock "github.com/onflow/flow-go/storage/mock" + "github.com/onflow/flow-go/storage/operation/pebbleimpl" + pebbleStorage "github.com/onflow/flow-go/storage/pebble" + "github.com/onflow/flow-go/utils/unittest" +) + +type indexCoreTest struct { + t *testing.T + indexer *IndexerCore + registers *storagemock.RegisterIndex + events *storagemock.Events + collection *flow.Collection + collections *storagemock.Collections + transactions *storagemock.Transactions + results *storagemock.LightTransactionResults + headers *storagemock.Headers + ctx context.Context + blocks []*flow.Block + data *execution_data.BlockExecutionDataEntity + lastHeightStore func(t *testing.T) uint64 + firstHeightStore func(t *testing.T) uint64 + registersStore func(t *testing.T, entries flow.RegisterEntries, height uint64) error + eventsStore func(t *testing.T, ID flow.Identifier, events []flow.EventsList) error + registersGet func(t *testing.T, IDs flow.RegisterID, height uint64) (flow.RegisterValue, error) +} + +func newIndexCoreTest( + t *testing.T, + blocks []*flow.Block, + exeData *execution_data.BlockExecutionDataEntity, +) *indexCoreTest { + + collection := unittest.CollectionFixture(0) + + return &indexCoreTest{ + t: t, + registers: storagemock.NewRegisterIndex(t), + events: storagemock.NewEvents(t), + collection: &collection, + results: storagemock.NewLightTransactionResults(t), + collections: storagemock.NewCollections(t), + transactions: storagemock.NewTransactions(t), + blocks: blocks, + ctx: context.Background(), + data: exeData, + headers: newBlockHeadersStorage(blocks).(*storagemock.Headers), // convert it back to mock type for tests, + } +} + +func (i *indexCoreTest) useDefaultBlockByHeight() *indexCoreTest { + i.headers. + On("BlockIDByHeight", mocks.AnythingOfType("uint64")). + Return(func(height uint64) (flow.Identifier, error) { + for _, b := range i.blocks { + if b.Height == height { + return b.ID(), nil + } + } + return flow.ZeroID, fmt.Errorf("not found") + }) + + i.headers. + On("ByHeight", mocks.AnythingOfType("uint64")). + Return(func(height uint64) (*flow.Header, error) { + for _, b := range i.blocks { + if b.Height == height { + return b.ToHeader(), nil + } + } + return nil, fmt.Errorf("not found") + }) + + return i +} + +func (i *indexCoreTest) setLastHeight(f func(t *testing.T) uint64) *indexCoreTest { + i.registers. + On("LatestHeight"). + Return(func() uint64 { + return f(i.t) + }) + return i +} + +func (i *indexCoreTest) useDefaultHeights() *indexCoreTest { + i.registers. + On("FirstHeight"). + Return(func() uint64 { + return i.blocks[0].Height + }) + i.registers. + On("LatestHeight"). + Return(func() uint64 { + return i.blocks[len(i.blocks)-1].Height + }) + return i +} + +func (i *indexCoreTest) setStoreRegisters(f func(t *testing.T, entries flow.RegisterEntries, height uint64) error) *indexCoreTest { + i.registers. + On("Store", mock.AnythingOfType("flow.RegisterEntries"), mock.AnythingOfType("uint64")). + Return(func(entries flow.RegisterEntries, height uint64) error { + return f(i.t, entries, height) + }).Once() + return i +} + +func (i *indexCoreTest) setStoreEvents(f func(*testing.T, flow.Identifier, []flow.EventsList) error) *indexCoreTest { + i.events. + On("BatchStore", mock.AnythingOfType("flow.Identifier"), mock.AnythingOfType("[]flow.EventsList"), mock.Anything). + Return(func(blockID flow.Identifier, events []flow.EventsList, batch storage.ReaderBatchWriter) error { + require.NotNil(i.t, batch) + return f(i.t, blockID, events) + }) + return i +} + +func (i *indexCoreTest) setStoreTransactionResults(f func(*testing.T, flow.Identifier, []flow.LightTransactionResult) error) *indexCoreTest { + i.results. + On("BatchStore", mock.AnythingOfType("flow.Identifier"), mock.AnythingOfType("[]flow.LightTransactionResult"), mock.Anything). + Return(func(blockID flow.Identifier, results []flow.LightTransactionResult, batch storage.ReaderBatchWriter) error { + require.NotNil(i.t, batch) + return f(i.t, blockID, results) + }) + return i +} + +func (i *indexCoreTest) setGetRegisters(f func(t *testing.T, ID flow.RegisterID, height uint64) (flow.RegisterValue, error)) *indexCoreTest { + i.registers. + On("Get", mock.AnythingOfType("flow.RegisterID"), mock.AnythingOfType("uint64")). + Return(func(IDs flow.RegisterID, height uint64) (flow.RegisterValue, error) { + return f(i.t, IDs, height) + }) + return i +} + +func (i *indexCoreTest) useDefaultStorageMocks() *indexCoreTest { + + i.collections.On("StoreAndIndexByTransaction", mock.Anything, mock.AnythingOfType("*flow.Collection")).Return(&flow.LightCollection{}, nil).Maybe() + i.transactions.On("Store", mock.AnythingOfType("*flow.TransactionBody")).Return(nil).Maybe() + + return i +} + +func (i *indexCoreTest) useDefaultEvents() *indexCoreTest { + i.events. + On("BatchStore", mock.AnythingOfType("flow.Identifier"), mock.AnythingOfType("[]flow.EventsList"), mock.Anything). + Return(nil) + return i +} + +func (i *indexCoreTest) useDefaultTransactionResults() *indexCoreTest { + i.results. + On("BatchStore", mock.AnythingOfType("flow.Identifier"), mock.AnythingOfType("[]flow.LightTransactionResult"), mock.Anything). + Return(nil) + return i +} + +func (i *indexCoreTest) initIndexer() *indexCoreTest { + lockManager := storage.NewTestingLockManager() + pdb, dbDir := unittest.TempPebbleDB(i.t) + db := pebbleimpl.ToDB(pdb) + i.t.Cleanup(func() { + require.NoError(i.t, db.Close()) + require.NoError(i.t, os.RemoveAll(dbDir)) + }) + + i.useDefaultHeights() + + collectionsToMarkFinalized := stdmap.NewTimes(100) + collectionsToMarkExecuted := stdmap.NewTimes(100) + blocksToMarkExecuted := stdmap.NewTimes(100) + blockTransactions := stdmap.NewIdentifierMap(100) + + log := zerolog.New(os.Stdout) + blocks := storagemock.NewBlocks(i.t) + + collectionExecutedMetric, err := NewCollectionExecutedMetricImpl( + log, + metrics.NewNoopCollector(), + collectionsToMarkFinalized, + collectionsToMarkExecuted, + blocksToMarkExecuted, + i.collections, + blocks, + blockTransactions, + ) + require.NoError(i.t, err) + + derivedChainData, err := derived.NewDerivedChainData(derived.DefaultDerivedDataCacheSize) + require.NoError(i.t, err) + + indexer, err := New( + log, + metrics.NewNoopCollector(), + db, + i.registers, + i.headers, + i.events, + i.collections, + i.transactions, + i.results, + flow.Testnet.Chain(), + derivedChainData, + collectionExecutedMetric, + lockManager, + ) + require.NoError(i.t, err) + i.indexer = indexer + return i +} + +func (i *indexCoreTest) runIndexBlockData() error { + i.initIndexer() + return i.indexer.IndexBlockData(i.data) +} + +func (i *indexCoreTest) runGetRegister(ID flow.RegisterID, height uint64) (flow.RegisterValue, error) { + i.initIndexer() + return i.indexer.RegisterValue(ID, height) +} + +func TestExecutionState_IndexBlockData(t *testing.T) { + blocks := unittest.BlockchainFixture(5) + block := blocks[len(blocks)-1] + collection := unittest.CollectionFixture(0) + + // this test makes sure the index block data is correctly calling store register with the + // same entries we create as a block execution data test, and correctly converts the registers + t.Run("Index Single Chunk and Single Register", func(t *testing.T) { + trie := TrieUpdateRandomLedgerPayloadsFixture(t) + ed := &execution_data.BlockExecutionData{ + BlockID: block.ID(), + ChunkExecutionDatas: []*execution_data.ChunkExecutionData{ + { + Collection: &collection, + TrieUpdate: trie, + }, + }, + } + execData := execution_data.NewBlockExecutionDataEntity(block.ID(), ed) + + err := newIndexCoreTest(t, blocks, execData). + initIndexer(). + useDefaultEvents(). + useDefaultTransactionResults(). + // make sure update registers match in length and are same as block data ledger payloads + setStoreRegisters(func(t *testing.T, entries flow.RegisterEntries, height uint64) error { + assert.Equal(t, height, block.Height) + assert.Len(t, trie.Payloads, entries.Len()) + + // make sure all the registers from the execution data have been stored as well the value matches + trieRegistersPayloadComparer(t, trie.Payloads, entries) + return nil + }). + runIndexBlockData() + + assert.NoError(t, err) + }) + + // this test makes sure that if we have multiple trie updates in a single block data + // and some of those trie updates are for same register but have different values, + // we only update that register once with the latest value, so this makes sure merging of + // registers is done correctly. + t.Run("Index Multiple Chunks and Merge Same Register Updates", func(t *testing.T) { + tries := []*ledger.TrieUpdate{TrieUpdateRandomLedgerPayloadsFixture(t), TrieUpdateRandomLedgerPayloadsFixture(t)} + // make sure we have two register updates that are updating the same value, so we can check + // if the value from the second update is being persisted instead of first + tries[1].Paths[0] = tries[0].Paths[0] + testValue := tries[1].Payloads[0] + key, err := testValue.Key() + require.NoError(t, err) + testRegisterID, err := convert.LedgerKeyToRegisterID(key) + require.NoError(t, err) + + ed := &execution_data.BlockExecutionData{ + BlockID: block.ID(), + ChunkExecutionDatas: []*execution_data.ChunkExecutionData{ + { + Collection: &collection, + TrieUpdate: tries[0], + }, + { + Collection: &collection, + TrieUpdate: tries[1], + }, + }, + } + execData := execution_data.NewBlockExecutionDataEntity(block.ID(), ed) + + testRegisterFound := false + err = newIndexCoreTest(t, blocks, execData). + initIndexer(). + useDefaultEvents(). + useDefaultStorageMocks(). + useDefaultTransactionResults(). + // make sure update registers match in length and are same as block data ledger payloads + setStoreRegisters(func(t *testing.T, entries flow.RegisterEntries, height uint64) error { + for _, entry := range entries { + if entry.Key.String() == testRegisterID.String() { + testRegisterFound = true + assert.True(t, testValue.Value().Equals(entry.Value)) + } + } + // we should make sure the register updates are equal to both payloads' length -1 since we don't + // duplicate the same register + assert.Equal(t, len(tries[0].Payloads)+len(tries[1].Payloads)-1, len(entries)) + return nil + }). + runIndexBlockData() + + assert.NoError(t, err) + assert.True(t, testRegisterFound) + }) + + t.Run("Index Events", func(t *testing.T) { + expectedEvents := unittest.EventsFixture(20) + ed := &execution_data.BlockExecutionData{ + BlockID: block.ID(), + ChunkExecutionDatas: []*execution_data.ChunkExecutionData{ + // split events into 2 chunks + { + Collection: &collection, + Events: expectedEvents[:10], + }, + { + Collection: &collection, + Events: expectedEvents[10:], + }, + }, + } + execData := execution_data.NewBlockExecutionDataEntity(block.ID(), ed) + + err := newIndexCoreTest(t, blocks, execData). + initIndexer(). + useDefaultStorageMocks(). + // make sure all events are stored at once in order + setStoreEvents(func(t *testing.T, actualBlockID flow.Identifier, actualEvents []flow.EventsList) error { + assert.Equal(t, block.ID(), actualBlockID) + require.Len(t, actualEvents, 1) + require.Len(t, actualEvents[0], len(expectedEvents)) + for i, expected := range expectedEvents { + assert.Equal(t, expected, actualEvents[0][i]) + } + return nil + }). + // make sure an empty set of transaction results were stored + setStoreTransactionResults(func(t *testing.T, actualBlockID flow.Identifier, actualResults []flow.LightTransactionResult) error { + assert.Equal(t, block.ID(), actualBlockID) + require.Len(t, actualResults, 0) + return nil + }). + // make sure an empty set of register entries was stored + setStoreRegisters(func(t *testing.T, entries flow.RegisterEntries, height uint64) error { + assert.Equal(t, height, block.Height) + assert.Equal(t, 0, entries.Len()) + return nil + }). + runIndexBlockData() + + assert.NoError(t, err) + }) + + t.Run("Index Tx Results", func(t *testing.T) { + expectedResults := unittest.LightTransactionResultsFixture(20) + ed := &execution_data.BlockExecutionData{ + BlockID: block.ID(), + ChunkExecutionDatas: []*execution_data.ChunkExecutionData{ + // split events into 2 chunks + { + Collection: &collection, + TransactionResults: expectedResults[:10], + }, + { + Collection: &collection, + TransactionResults: expectedResults[10:], + }, + }, + } + execData := execution_data.NewBlockExecutionDataEntity(block.ID(), ed) + + err := newIndexCoreTest(t, blocks, execData). + initIndexer(). + useDefaultStorageMocks(). + // make sure an empty set of events were stored + setStoreEvents(func(t *testing.T, actualBlockID flow.Identifier, actualEvents []flow.EventsList) error { + assert.Equal(t, block.ID(), actualBlockID) + require.Len(t, actualEvents, 1) + require.Len(t, actualEvents[0], 0) + return nil + }). + // make sure all results are stored at once in order + setStoreTransactionResults(func(t *testing.T, actualBlockID flow.Identifier, actualResults []flow.LightTransactionResult) error { + assert.Equal(t, block.ID(), actualBlockID) + require.Len(t, actualResults, len(expectedResults)) + for i, expected := range expectedResults { + assert.Equal(t, expected, actualResults[i]) + } + return nil + }). + // make sure an empty set of register entries was stored + setStoreRegisters(func(t *testing.T, entries flow.RegisterEntries, height uint64) error { + assert.Equal(t, height, block.Height) + assert.Equal(t, 0, entries.Len()) + return nil + }). + runIndexBlockData() + + assert.NoError(t, err) + }) + + t.Run("Index Collections", func(t *testing.T) { + expectedCollections := unittest.CollectionListFixture(2) + ed := &execution_data.BlockExecutionData{ + BlockID: block.ID(), + ChunkExecutionDatas: []*execution_data.ChunkExecutionData{ + {Collection: expectedCollections[0]}, + {Collection: expectedCollections[1]}, + }, + } + execData := execution_data.NewBlockExecutionDataEntity(block.ID(), ed) + err := newIndexCoreTest(t, blocks, execData). + initIndexer(). + useDefaultStorageMocks(). + // make sure an empty set of events were stored + setStoreEvents(func(t *testing.T, actualBlockID flow.Identifier, actualEvents []flow.EventsList) error { + assert.Equal(t, block.ID(), actualBlockID) + require.Len(t, actualEvents, 1) + require.Len(t, actualEvents[0], 0) + return nil + }). + // make sure an empty set of transaction results were stored + setStoreTransactionResults(func(t *testing.T, actualBlockID flow.Identifier, actualResults []flow.LightTransactionResult) error { + assert.Equal(t, block.ID(), actualBlockID) + require.Len(t, actualResults, 0) + return nil + }). + // make sure an empty set of register entries was stored + setStoreRegisters(func(t *testing.T, entries flow.RegisterEntries, height uint64) error { + assert.Equal(t, height, block.Height) + assert.Equal(t, 0, entries.Len()) + return nil + }). + runIndexBlockData() + + assert.NoError(t, err) + }) + + t.Run("Index AllTheThings", func(t *testing.T) { + expectedEvents := unittest.EventsFixture(20) + expectedResults := unittest.LightTransactionResultsFixture(20) + expectedCollections := unittest.CollectionListFixture(2) + expectedTries := []*ledger.TrieUpdate{TrieUpdateRandomLedgerPayloadsFixture(t), TrieUpdateRandomLedgerPayloadsFixture(t)} + expectedPayloads := make([]*ledger.Payload, 0) + for _, trie := range expectedTries { + expectedPayloads = append(expectedPayloads, trie.Payloads...) + } + + ed := &execution_data.BlockExecutionData{ + BlockID: block.ID(), + ChunkExecutionDatas: []*execution_data.ChunkExecutionData{ + { + Collection: expectedCollections[0], + Events: expectedEvents[:10], + TransactionResults: expectedResults[:10], + TrieUpdate: expectedTries[0], + }, + { + Collection: expectedCollections[1], + TransactionResults: expectedResults[10:], + Events: expectedEvents[10:], + TrieUpdate: expectedTries[1], + }, + }, + } + execData := execution_data.NewBlockExecutionDataEntity(block.ID(), ed) + err := newIndexCoreTest(t, blocks, execData). + initIndexer(). + useDefaultStorageMocks(). + // make sure all events are stored at once in order + setStoreEvents(func(t *testing.T, actualBlockID flow.Identifier, actualEvents []flow.EventsList) error { + assert.Equal(t, block.ID(), actualBlockID) + require.Len(t, actualEvents, 1) + require.Len(t, actualEvents[0], len(expectedEvents)) + for i, expected := range expectedEvents { + assert.Equal(t, expected, actualEvents[0][i]) + } + return nil + }). + // make sure all results are stored at once in order + setStoreTransactionResults(func(t *testing.T, actualBlockID flow.Identifier, actualResults []flow.LightTransactionResult) error { + assert.Equal(t, block.ID(), actualBlockID) + require.Len(t, actualResults, len(expectedResults)) + for i, expected := range expectedResults { + assert.Equal(t, expected, actualResults[i]) + } + return nil + }). + // make sure update registers match in length and are same as block data ledger payloads + setStoreRegisters(func(t *testing.T, entries flow.RegisterEntries, actualHeight uint64) error { + assert.Equal(t, actualHeight, block.Height) + assert.Equal(t, entries.Len(), len(expectedPayloads)) + + // make sure all the registers from the execution data have been stored as well the value matches + trieRegistersPayloadComparer(t, expectedPayloads, entries) + return nil + }). + runIndexBlockData() + + assert.NoError(t, err) + }) + + // this test makes sure we get correct error when we try to index block that is not + // within the range of indexed heights. + t.Run("Invalid Heights", func(t *testing.T) { + last := blocks[len(blocks)-1] + ed := &execution_data.BlockExecutionData{ + BlockID: last.ID(), + } + execData := execution_data.NewBlockExecutionDataEntity(last.ID(), ed) + latestHeight := blocks[len(blocks)-3].Height + + err := newIndexCoreTest(t, blocks, execData). + // return a height one smaller than the latest block in storage + setLastHeight(func(t *testing.T) uint64 { + return latestHeight + }). + runIndexBlockData() + + assert.EqualError(t, err, fmt.Sprintf("must index block data with the next height %d, but got %d", latestHeight+1, last.Height)) + }) + + // this test makes sure that if a block we try to index is not found in block storage + // we get correct error. + t.Run("Unknown block ID", func(t *testing.T) { + unknownBlock := unittest.BlockFixture() + ed := &execution_data.BlockExecutionData{ + BlockID: unknownBlock.ID(), + } + execData := execution_data.NewBlockExecutionDataEntity(unknownBlock.ID(), ed) + + err := newIndexCoreTest(t, blocks, execData).runIndexBlockData() + + assert.ErrorIs(t, err, storage.ErrNotFound) + }) + +} + +func TestExecutionState_RegisterValues(t *testing.T) { + t.Run("Get value for single register", func(t *testing.T) { + blocks := unittest.BlockchainFixture(5) + height := blocks[1].Height + id := flow.RegisterID{ + Owner: "1", + Key: "2", + } + val := flow.RegisterValue("0x1") + + values, err := newIndexCoreTest(t, blocks, nil). + initIndexer(). + setGetRegisters(func(t *testing.T, ID flow.RegisterID, height uint64) (flow.RegisterValue, error) { + return val, nil + }). + runGetRegister(id, height) + + assert.NoError(t, err) + assert.Equal(t, values, val) + }) +} + +func newBlockHeadersStorage(blocks []*flow.Block) storage.Headers { + blocksByID := make(map[flow.Identifier]*flow.Block, 0) + for _, b := range blocks { + blocksByID[b.ID()] = b + } + + return synctest.MockBlockHeaderStorage(synctest.WithByID(blocksByID)) +} + +// trieRegistersPayloadComparer checks that trie payloads and register payloads are same, used for testing. +func trieRegistersPayloadComparer(t *testing.T, triePayloads []*ledger.Payload, registerPayloads flow.RegisterEntries) { + assert.Equal(t, len(triePayloads), len(registerPayloads.Values()), "registers length should equal") + + // crate a lookup map that matches flow register ID to index in the payloads slice + payloadRegID := make(map[flow.RegisterID]int) + for i, p := range triePayloads { + k, _ := p.Key() + regKey, _ := convert.LedgerKeyToRegisterID(k) + payloadRegID[regKey] = i + } + + for _, entry := range registerPayloads { + index, ok := payloadRegID[entry.Key] + assert.True(t, ok, fmt.Sprintf("register entry not found for key %s", entry.Key.String())) + val := triePayloads[index].Value() + assert.True(t, val.Equals(entry.Value), fmt.Sprintf("payload values not same %s - %s", val, entry.Value)) + } +} + +func TestIndexerIntegration_StoreAndGet(t *testing.T) { + lockManager := storage.NewTestingLockManager() + regOwnerAddress := unittest.RandomAddressFixture() + regOwner := string(regOwnerAddress.Bytes()) + regKey := "code" + registerID := flow.NewRegisterID(regOwnerAddress, regKey) + + pdb, dbDir := unittest.TempPebbleDB(t) + t.Cleanup(func() { + require.NoError(t, os.RemoveAll(dbDir)) + }) + + logger := zerolog.Nop() + metrics := metrics.NewNoopCollector() + + derivedChainData, err := derived.NewDerivedChainData(derived.DefaultDerivedDataCacheSize) + require.NoError(t, err) + + // this test makes sure index values for a single register are correctly updated and always last value is returned + t.Run("Single Index Value Changes", func(t *testing.T) { + pebbleStorage.RunWithRegistersStorageAtInitialHeights(t, 0, 0, func(registers *pebbleStorage.Registers) { + index, err := New( + logger, + module.ExecutionStateIndexerMetrics(metrics), + pebbleimpl.ToDB(pdb), + registers, + nil, + nil, + nil, + nil, + nil, + flow.Testnet.Chain(), + derivedChainData, + nil, + lockManager, + ) + require.NoError(t, err) + + values := [][]byte{[]byte("1"), []byte("1"), []byte("2"), []byte("3"), []byte("4")} + for i, val := range values { + testDesc := fmt.Sprintf("test iteration number %d failed with test value %s", i, val) + height := uint64(i + 1) + err := storeRegisterWithValue(index, height, regOwner, regKey, val) + require.NoError(t, err) + + results, err := index.RegisterValue(registerID, height) + require.NoError(t, err, testDesc) + assert.Equal(t, val, results) + } + }) + }) + + // this test makes sure if a register is not found the value returned is nil and without an error in order for this to be + // up to the specification script executor requires + t.Run("Missing Register", func(t *testing.T) { + pebbleStorage.RunWithRegistersStorageAtInitialHeights(t, 0, 0, func(registers *pebbleStorage.Registers) { + index, err := New( + logger, + module.ExecutionStateIndexerMetrics(metrics), + pebbleimpl.ToDB(pdb), + registers, + nil, + nil, + nil, + nil, + nil, + flow.Testnet.Chain(), + derivedChainData, + nil, + lockManager, + ) + require.NoError(t, err) + + value, err := index.RegisterValue(registerID, 0) + require.Nil(t, value) + assert.NoError(t, err) + }) + }) + + // this test makes sure that even if indexed values for a single register are requested with higher height + // the correct highest height indexed value is returned. + // e.g. we index A{h(1) -> X}, A{h(2) -> Y}, when we request h(4) we get value Y + t.Run("Single Index Value At Later Heights", func(t *testing.T) { + pebbleStorage.RunWithRegistersStorageAtInitialHeights(t, 0, 0, func(registers *pebbleStorage.Registers) { + index, err := New( + logger, + module.ExecutionStateIndexerMetrics(metrics), + pebbleimpl.ToDB(pdb), + registers, + nil, + nil, + nil, + nil, + nil, + flow.Testnet.Chain(), + derivedChainData, + nil, + lockManager, + ) + require.NoError(t, err) + + storeValues := [][]byte{[]byte("1"), []byte("2")} + + require.NoError(t, storeRegisterWithValue(index, 1, regOwner, regKey, storeValues[0])) + + require.NoError(t, index.indexRegisters(nil, 2)) + + value, err := index.RegisterValue(registerID, uint64(2)) + require.NoError(t, err) + assert.Equal(t, storeValues[0], value) + + require.NoError(t, index.indexRegisters(nil, 3)) + + err = storeRegisterWithValue(index, 4, regOwner, regKey, storeValues[1]) + require.NoError(t, err) + + value, err = index.RegisterValue(registerID, uint64(4)) + require.NoError(t, err) + assert.Equal(t, storeValues[1], value) + + value, err = index.RegisterValue(registerID, uint64(3)) + require.NoError(t, err) + assert.Equal(t, storeValues[0], value) + }) + }) + + // this test makes sure we correctly handle weird payloads + t.Run("Empty and Nil Payloads", func(t *testing.T) { + pebbleStorage.RunWithRegistersStorageAtInitialHeights(t, 0, 0, func(registers *pebbleStorage.Registers) { + index, err := New( + logger, + module.ExecutionStateIndexerMetrics(metrics), + pebbleimpl.ToDB(pdb), + registers, + nil, + nil, + nil, + nil, + nil, + flow.Testnet.Chain(), + derivedChainData, + nil, + lockManager, + ) + require.NoError(t, err) + + require.NoError(t, index.indexRegisters(map[ledger.Path]*ledger.Payload{}, 1)) + require.NoError(t, index.indexRegisters(map[ledger.Path]*ledger.Payload{}, 1)) + require.NoError(t, index.indexRegisters(nil, 2)) + }) + }) +} + +// helper to store register at height and increment index range +func storeRegisterWithValue(indexer *IndexerCore, height uint64, owner string, key string, value []byte) error { + payload := LedgerPayloadFixture(owner, key, value) + return indexer.indexRegisters(map[ledger.Path]*ledger.Payload{ledger.DummyPath: payload}, height) +} diff --git a/module/state_synchronization/indexer/indexer_test.go b/module/state_synchronization/indexer/indexer_test.go new file mode 100644 index 00000000000..15048fa8b68 --- /dev/null +++ b/module/state_synchronization/indexer/indexer_test.go @@ -0,0 +1,263 @@ +package indexer + +import ( + "context" + "fmt" + "testing" + "time" + + "github.com/stretchr/testify/assert" + mocks "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + "go.uber.org/atomic" + + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module/executiondatasync/execution_data" + "github.com/onflow/flow-go/module/executiondatasync/execution_data/cache" + "github.com/onflow/flow-go/module/executiondatasync/execution_data/mock" + "github.com/onflow/flow-go/module/irrecoverable" + mempool "github.com/onflow/flow-go/module/mempool/mock" + "github.com/onflow/flow-go/storage" + storagemock "github.com/onflow/flow-go/storage/mock" + "github.com/onflow/flow-go/utils/unittest" +) + +const testTimeout = 300 * time.Millisecond + +type indexerTest struct { + blocks []*flow.Block + progress *mockProgress + registers *storagemock.RegisterIndex + indexTest *indexCoreTest + worker *Indexer + executionData *mempool.Mempool[flow.Identifier, *execution_data.BlockExecutionDataEntity] + t *testing.T +} + +// newIndexerTest set up a jobqueue integration test with the worker. +// It will create blocks fixtures with the length provided as availableBlocks, and it will set heights already +// indexed to lastIndexedIndex value. Using run it should index all the remaining blocks up to all available blocks. +func newIndexerTest(t *testing.T, availableBlocks int, lastIndexedIndex int) *indexerTest { + blocks := unittest.BlockchainFixture(availableBlocks) + // we use 5th index as the latest indexed height, so we leave 5 more blocks to be indexed by the indexer in this test + lastIndexedHeight := blocks[lastIndexedIndex].Height + progress := newMockProgress() + err := progress.SetProcessedIndex(lastIndexedHeight) + require.NoError(t, err) + + registers := storagemock.NewRegisterIndex(t) + + indexerCoreTest := newIndexCoreTest(t, blocks, nil). + setLastHeight(func(t *testing.T) uint64 { + i, err := progress.ProcessedIndex() + require.NoError(t, err) + + return i + }). + useDefaultBlockByHeight(). + useDefaultEvents(). + useDefaultTransactionResults(). + initIndexer() + + executionData := &mempool.Mempool[flow.Identifier, *execution_data.BlockExecutionDataEntity]{} + exeCache := cache.NewExecutionDataCache( + mock.NewExecutionDataStore(t), + indexerCoreTest.indexer.headers, + nil, + nil, + executionData, + ) + + test := &indexerTest{ + t: t, + blocks: blocks, + progress: progress, + indexTest: indexerCoreTest, + executionData: executionData, + } + + test.worker, err = NewIndexer( + unittest.Logger(), + test.first().Height, + registers, + indexerCoreTest.indexer, + exeCache, + test.latestHeight, + &mockProgressInitializer{progress: progress}, + ) + require.NoError(t, err) + + return test +} + +func (w *indexerTest) setBlockDataGet(f func(ID flow.Identifier) (*execution_data.BlockExecutionDataEntity, bool)) { + w.executionData. + On("Get", mocks.AnythingOfType("flow.Identifier")). + Return(f) +} + +func (w *indexerTest) latestHeight() (uint64, error) { + return w.last().Height, nil +} + +func (w *indexerTest) last() *flow.Block { + return w.blocks[len(w.blocks)-1] +} + +func (w *indexerTest) first() *flow.Block { + return w.blocks[0] +} + +func (w *indexerTest) run(ctx irrecoverable.SignalerContext, reachHeight uint64, cancel context.CancelFunc) { + w.worker.Start(ctx) + + unittest.RequireComponentsReadyBefore(w.t, testTimeout, w.worker) + + w.worker.OnExecutionData(nil) + + // wait for end to be reached + <-w.progress.WaitForIndex(reachHeight) + cancel() + + unittest.RequireCloseBefore(w.t, w.worker.Done(), testTimeout, "timeout waiting for the consumer to be done") +} + +type mockProgressInitializer struct { + progress *mockProgress +} + +func (m *mockProgressInitializer) Initialize(defaultIndex uint64) (storage.ConsumerProgress, error) { + return m.progress, nil +} + +var _ storage.ConsumerProgress = (*mockProgress)(nil) + +type mockProgress struct { + index *atomic.Uint64 + doneIndex *atomic.Uint64 + // signal to mark the progress reached an index set with WaitForIndex + doneChan chan struct{} +} + +func newMockProgress() *mockProgress { + return &mockProgress{ + index: atomic.NewUint64(0), + doneIndex: atomic.NewUint64(0), + doneChan: make(chan struct{}), + } +} + +func (w *mockProgress) ProcessedIndex() (uint64, error) { + return w.index.Load(), nil +} + +func (w *mockProgress) SetProcessedIndex(index uint64) error { + w.index.Store(index) + + if index > 0 && index == w.doneIndex.Load() { + close(w.doneChan) + } + + return nil +} + +func (w *mockProgress) BatchSetProcessedIndex(_ uint64, _ storage.ReaderBatchWriter) error { + return fmt.Errorf("batch not supported") +} + +func (w *mockProgress) InitProcessedIndex(index uint64) error { + w.index.Store(index) + return nil +} + +// WaitForIndex will trigger a signal to the consumer, so they know the test reached a certain point +func (w *mockProgress) WaitForIndex(n uint64) <-chan struct{} { + w.doneIndex.Store(n) + return w.doneChan +} + +func TestIndexer_Success(t *testing.T) { + // we use 5th index as the latest indexed height, so we leave 5 more blocks to be indexed by the indexer in this test + blocks := 10 + lastIndexedIndex := 5 + test := newIndexerTest(t, blocks, lastIndexedIndex) + + test.setBlockDataGet(func(ID flow.Identifier) (*execution_data.BlockExecutionDataEntity, bool) { + trie := TrieUpdateRandomLedgerPayloadsFixture(t) + collection := unittest.CollectionFixture(0) + ed := &execution_data.BlockExecutionData{ + BlockID: ID, + ChunkExecutionDatas: []*execution_data.ChunkExecutionData{{ + Collection: &collection, + TrieUpdate: trie, + }}, + } + + // create this to capture the closure of the creation of block execution data, so we can for each returned + // block execution data make sure the store of registers will match what the execution data returned and + // also that the height was correct + test.indexTest.setStoreRegisters(func(t *testing.T, entries flow.RegisterEntries, height uint64) error { + var blockHeight uint64 + for _, b := range test.blocks { + if b.ID() == ID { + blockHeight = b.Height + } + } + + assert.Equal(t, blockHeight, height) + trieRegistersPayloadComparer(t, trie.Payloads, entries) + return nil + }) + + return execution_data.NewBlockExecutionDataEntity(ID, ed), true + }) + + signalerCtx, cancel := irrecoverable.NewMockSignalerContextWithCancel(t, context.Background()) + lastHeight := test.blocks[len(test.blocks)-1].Height + test.run(signalerCtx, lastHeight, cancel) + + // make sure store was called correct number of times + test.indexTest.registers.AssertNumberOfCalls(t, "Store", blocks-lastIndexedIndex-1) +} + +func TestIndexer_Failure(t *testing.T) { + // we use 5th index as the latest indexed height, so we leave 5 more blocks to be indexed by the indexer in this test + blocks := 10 + lastIndexedIndex := 5 + test := newIndexerTest(t, blocks, lastIndexedIndex) + + test.setBlockDataGet(func(ID flow.Identifier) (*execution_data.BlockExecutionDataEntity, bool) { + trie := TrieUpdateRandomLedgerPayloadsFixture(t) + collection := unittest.CollectionFixture(0) + ed := &execution_data.BlockExecutionData{ + BlockID: ID, + ChunkExecutionDatas: []*execution_data.ChunkExecutionData{{ + Collection: &collection, + TrieUpdate: trie, + }}, + } + + // fail when trying to persist registers + test.indexTest.setStoreRegisters(func(t *testing.T, entries flow.RegisterEntries, height uint64) error { + return fmt.Errorf("error persisting data") + }) + + return execution_data.NewBlockExecutionDataEntity(ID, ed), true + }) + + // make sure the error returned is as expected + expectedErr := fmt.Errorf( + "failed to index block data at height %d: %w", + test.blocks[lastIndexedIndex].Height+1, + fmt.Errorf( + "could not index register payloads at height %d: %w", test.blocks[lastIndexedIndex].Height+1, fmt.Errorf("error persisting data")), + ) + + _, cancel := context.WithCancel(context.Background()) + signalerCtx := irrecoverable.NewMockSignalerContextExpectError(t, context.Background(), expectedErr) + lastHeight := test.blocks[lastIndexedIndex].Height + test.run(signalerCtx, lastHeight, cancel) + + // make sure store was called correct number of times + test.indexTest.registers.AssertNumberOfCalls(t, "Store", 1) // it fails after first run +} diff --git a/module/state_synchronization/indexer/ledger_trie_updates_test_utils.go b/module/state_synchronization/indexer/ledger_trie_updates_test_utils.go new file mode 100644 index 00000000000..8547c43645b --- /dev/null +++ b/module/state_synchronization/indexer/ledger_trie_updates_test_utils.go @@ -0,0 +1,80 @@ +package indexer + +import ( + "crypto/rand" + "fmt" + "testing" + + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/ledger" + "github.com/onflow/flow-go/ledger/common/pathfinder" + "github.com/onflow/flow-go/ledger/complete" + "github.com/onflow/flow-go/utils/unittest" +) + +// TrieUpdateRandomLedgerPayloadsFixture creates a test trie update with multiple test payloads +// for use in testing register persistence functionality. +func TrieUpdateRandomLedgerPayloadsFixture(t *testing.T) *ledger.TrieUpdate { + return TrieUpdateWithPayloadsFixture( + t, + []*ledger.Payload{ + LedgerRandomPayloadFixture(t), + LedgerRandomPayloadFixture(t), + LedgerRandomPayloadFixture(t), + LedgerRandomPayloadFixture(t), + }) +} + +// TrieUpdateWithPayloadsFixture creates a trie update from the provided payloads. +// It extracts keys and values from payloads and constructs a proper ledger update +// and trie update structure for testing purposes. +func TrieUpdateWithPayloadsFixture(t *testing.T, payloads []*ledger.Payload) *ledger.TrieUpdate { + keys := make([]ledger.Key, 0) + values := make([]ledger.Value, 0) + for _, payload := range payloads { + key, err := payload.Key() + require.NoError(t, err) + keys = append(keys, key) + values = append(values, payload.Value()) + } + + update, err := ledger.NewUpdate(ledger.DummyState, keys, values) + require.NoError(t, err) + trie, err := pathfinder.UpdateToTrieUpdate(update, complete.DefaultPathFinderVersion) + require.NoError(t, err) + return trie +} + +// LedgerRandomPayloadFixture creates a single test payload with a random owner, key, and value +// for use in ledger and register testing scenarios. +func LedgerRandomPayloadFixture(t *testing.T) *ledger.Payload { + owner := unittest.RandomAddressFixture() + key := make([]byte, 8) + _, err := rand.Read(key) + require.NoError(t, err) + val := make([]byte, 8) + _, err = rand.Read(val) + require.NoError(t, err) + return LedgerPayloadFixture(owner.String(), fmt.Sprintf("%x", key), val) +} + +// LedgerPayloadFixture creates a ledger payload with the specified owner, key, and value. +// It constructs a proper ledger key with owner and key parts and returns a payload +// suitable for testing ledger operations. +func LedgerPayloadFixture(owner string, key string, value []byte) *ledger.Payload { + k := ledger.Key{ + KeyParts: []ledger.KeyPart{ + { + Type: ledger.KeyPartOwner, + Value: []byte(owner), + }, + { + Type: ledger.KeyPartKey, + Value: []byte(key), + }, + }, + } + + return ledger.NewPayload(k, value) +} diff --git a/module/state_synchronization/indexer/util.go b/module/state_synchronization/indexer/util.go new file mode 100644 index 00000000000..5526776716b --- /dev/null +++ b/module/state_synchronization/indexer/util.go @@ -0,0 +1,137 @@ +package indexer + +import ( + "fmt" + + "github.com/onflow/cadence" + "github.com/onflow/cadence/common" + "github.com/onflow/cadence/encoding/ccf" + "github.com/onflow/cadence/stdlib" + + "github.com/onflow/flow-go/fvm/storage/derived" + "github.com/onflow/flow-go/fvm/storage/snapshot" + "github.com/onflow/flow-go/model/flow" +) + +var ( + accountContractUpdated = flow.EventType(stdlib.AccountContractUpdatedEventType.ID()) +) + +// hasAuthorizedTransaction checks if the provided account was an authorizer in any of the transactions +// within the provided collections. +func hasAuthorizedTransaction(collections []*flow.Collection, address flow.Address) bool { + for _, collection := range collections { + for _, tx := range collection.Transactions { + for _, authorizer := range tx.Authorizers { + if authorizer == address { + return true + } + } + } + } + + return false +} + +// findContractUpdates returns a map of common.AddressLocation for all contracts updated within the +// provided events. +// No errors are expected during normal operation and indicate an invalid protocol event was encountered +func findContractUpdates(events []flow.Event) (map[common.Location]struct{}, error) { + invalidatedPrograms := make(map[common.Location]struct{}) + for _, event := range events { + if event.Type == accountContractUpdated { + location, err := parseAccountContractUpdated(&event) + if err != nil { + return nil, fmt.Errorf("could not parse account contract updated event: %w", err) + } + invalidatedPrograms[location] = struct{}{} + } + } + return invalidatedPrograms, nil +} + +// parseAccountContractUpdated parses an account contract updated event and returns the address location. +// No errors are expected during normal operation and indicate an invalid protocol event was encountered +func parseAccountContractUpdated(event *flow.Event) (common.AddressLocation, error) { + payload, err := ccf.Decode(nil, event.Payload) + if err != nil { + return common.AddressLocation{}, fmt.Errorf("could not unmarshal event payload: %w", err) + } + + cdcEvent, ok := payload.(cadence.Event) + if !ok { + return common.AddressLocation{}, fmt.Errorf("invalid event payload type: %T", payload) + } + + fields := cadence.FieldsMappedByName(cdcEvent) + + addressField := fields[stdlib.AccountEventAddressParameter.Identifier] + address, ok := addressField.(cadence.Address) + if !ok { + return common.AddressLocation{}, fmt.Errorf("invalid Cadence type for address field: %T", addressField) + } + + contractNameField := fields[stdlib.AccountEventContractParameter.Identifier] + contractName, ok := contractNameField.(cadence.String) + if !ok { + return common.AddressLocation{}, fmt.Errorf( + "invalid Cadence type for contract name field: %T", + contractNameField, + ) + } + + return common.NewAddressLocation( + nil, + common.Address(address), + string(contractName), + ), nil +} + +var _ derived.TransactionInvalidator = (*accessInvalidator)(nil) + +// accessInvalidator is a derived.TransactionInvalidator that invalidates programs and meter param overrides. +type accessInvalidator struct { + programs *programInvalidator + executionParameters *executionParametersInvalidator +} + +func (inv *accessInvalidator) ProgramInvalidator() derived.ProgramInvalidator { + return inv.programs +} + +func (inv *accessInvalidator) ExecutionParametersInvalidator() derived.ExecutionParametersInvalidator { + return inv.executionParameters +} + +var _ derived.ProgramInvalidator = (*programInvalidator)(nil) + +// programInvalidator is a derived.ProgramInvalidator that invalidates all programs or a specific set of programs. +// this is used to invalidate all programs who's code was updated in a specific block. +type programInvalidator struct { + invalidateAll bool + invalidated map[common.Location]struct{} +} + +func (inv *programInvalidator) ShouldInvalidateEntries() bool { + return inv.invalidateAll +} + +func (inv *programInvalidator) ShouldInvalidateEntry(location common.AddressLocation, _ *derived.Program, _ *snapshot.ExecutionSnapshot) bool { + _, ok := inv.invalidated[location] + return inv.invalidateAll || ok +} + +var _ derived.ExecutionParametersInvalidator = (*executionParametersInvalidator)(nil) + +// executionParametersInvalidator is a derived.ExecutionParametersInvalidator that invalidates meter param overrides and execution version. +type executionParametersInvalidator struct { + invalidateAll bool +} + +func (inv *executionParametersInvalidator) ShouldInvalidateEntries() bool { + return inv.invalidateAll +} + +func (inv *executionParametersInvalidator) ShouldInvalidateEntry(_ struct{}, _ derived.StateExecutionParameters, _ *snapshot.ExecutionSnapshot) bool { + return inv.invalidateAll +} diff --git a/module/state_synchronization/indexer/util_test.go b/module/state_synchronization/indexer/util_test.go new file mode 100644 index 00000000000..664ef945fba --- /dev/null +++ b/module/state_synchronization/indexer/util_test.go @@ -0,0 +1,100 @@ +package indexer + +import ( + "testing" + + "github.com/onflow/cadence" + "github.com/onflow/cadence/common" + "github.com/onflow/cadence/encoding/ccf" + "github.com/onflow/cadence/stdlib" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/onflow/flow/protobuf/go/flow/entities" + + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/utils/unittest" +) + +// TestFindContractUpdates tests the findContractUpdates function returns all contract updates +func TestFindContractUpdates(t *testing.T) { + t.Parallel() + + g := unittest.NewEventGenerator(unittest.EventGenerator.WithEncoding(entities.EventEncodingVersion_CCF_V0)) + + expectedAddress1 := unittest.RandomAddressFixture() + expected1 := common.NewAddressLocation(nil, common.Address(expectedAddress1), "TestContract1") + + expectedAddress2 := unittest.RandomAddressFixture() + expected2 := common.NewAddressLocation(nil, common.Address(expectedAddress2), "TestContract2") + + events := []flow.Event{ + g.New(), // random event + contractUpdatedFixture( + t, + expected1.Address, + expected1.Name, + ), + g.New(), // random event + g.New(), // random event + contractUpdatedFixture( + t, + expected2.Address, + expected2.Name, + ), + } + + updates, err := findContractUpdates(events) + require.NoError(t, err) + + assert.Len(t, updates, 2) + + _, ok := updates[expected1] + assert.Truef(t, ok, "could not find %s", expected1.ID()) + + _, ok = updates[expected2] + assert.Truef(t, ok, "could not find %s", expected2.ID()) +} + +func contractUpdatedFixture(t *testing.T, address common.Address, contractName string) flow.Event { + contractUpdateEventType := cadence.NewEventType( + stdlib.AccountContractAddedEventType.Location, + stdlib.AccountContractAddedEventType.QualifiedIdentifier(), + []cadence.Field{ + { + Identifier: "address", + Type: cadence.AddressType, + }, + { + Identifier: "codeHash", + Type: cadence.AddressType, // actually a byte slice, but we're ignoring it anyway + }, + { + Identifier: "contract", + Type: cadence.StringType, + }, + }, + nil, + ) + + contractString, err := cadence.NewString(contractName) + require.NoError(t, err) + + testEvent := cadence.NewEvent( + []cadence.Value{ + cadence.NewAddress(address), + cadence.NewAddress(flow.EmptyAddress), + contractString, + }).WithType(contractUpdateEventType) + + payload, err := ccf.Encode(testEvent) + require.NoError(t, err) + + return flow.Event{ + Type: accountContractUpdated, + TransactionID: unittest.IdentifierFixture(), + TransactionIndex: 0, + EventIndex: 0, + Payload: payload, + } +} diff --git a/module/state_synchronization/mock/execution_data_requester.go b/module/state_synchronization/mock/execution_data_requester.go index 139c8102c6a..5b521e8b97d 100644 --- a/module/state_synchronization/mock/execution_data_requester.go +++ b/module/state_synchronization/mock/execution_data_requester.go @@ -1,14 +1,12 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. -package state_synchronization +package mock import ( irrecoverable "github.com/onflow/flow-go/module/irrecoverable" mock "github.com/stretchr/testify/mock" model "github.com/onflow/flow-go/consensus/hotstuff/model" - - state_synchronization "github.com/onflow/flow-go/module/state_synchronization" ) // ExecutionDataRequester is an autogenerated mock type for the ExecutionDataRequester type @@ -16,15 +14,14 @@ type ExecutionDataRequester struct { mock.Mock } -// AddOnExecutionDataReceivedConsumer provides a mock function with given fields: fn -func (_m *ExecutionDataRequester) AddOnExecutionDataReceivedConsumer(fn state_synchronization.OnExecutionDataReceivedConsumer) { - _m.Called(fn) -} - -// Done provides a mock function with given fields: +// Done provides a mock function with no fields func (_m *ExecutionDataRequester) Done() <-chan struct{} { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Done") + } + var r0 <-chan struct{} if rf, ok := ret.Get(0).(func() <-chan struct{}); ok { r0 = rf() @@ -37,15 +34,47 @@ func (_m *ExecutionDataRequester) Done() <-chan struct{} { return r0 } +// HighestConsecutiveHeight provides a mock function with no fields +func (_m *ExecutionDataRequester) HighestConsecutiveHeight() (uint64, error) { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for HighestConsecutiveHeight") + } + + var r0 uint64 + var r1 error + if rf, ok := ret.Get(0).(func() (uint64, error)); ok { + return rf() + } + if rf, ok := ret.Get(0).(func() uint64); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(uint64) + } + + if rf, ok := ret.Get(1).(func() error); ok { + r1 = rf() + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + // OnBlockFinalized provides a mock function with given fields: _a0 func (_m *ExecutionDataRequester) OnBlockFinalized(_a0 *model.Block) { _m.Called(_a0) } -// Ready provides a mock function with given fields: +// Ready provides a mock function with no fields func (_m *ExecutionDataRequester) Ready() <-chan struct{} { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Ready") + } + var r0 <-chan struct{} if rf, ok := ret.Get(0).(func() <-chan struct{}); ok { r0 = rf() @@ -63,13 +92,12 @@ func (_m *ExecutionDataRequester) Start(_a0 irrecoverable.SignalerContext) { _m.Called(_a0) } -type mockConstructorTestingTNewExecutionDataRequester interface { +// NewExecutionDataRequester creates a new instance of ExecutionDataRequester. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewExecutionDataRequester(t interface { mock.TestingT Cleanup(func()) -} - -// NewExecutionDataRequester creates a new instance of ExecutionDataRequester. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewExecutionDataRequester(t mockConstructorTestingTNewExecutionDataRequester) *ExecutionDataRequester { +}) *ExecutionDataRequester { mock := &ExecutionDataRequester{} mock.Mock.Test(t) diff --git a/module/state_synchronization/mock/index_reporter.go b/module/state_synchronization/mock/index_reporter.go new file mode 100644 index 00000000000..bbf6138cd64 --- /dev/null +++ b/module/state_synchronization/mock/index_reporter.go @@ -0,0 +1,80 @@ +// Code generated by mockery. DO NOT EDIT. + +package mock + +import mock "github.com/stretchr/testify/mock" + +// IndexReporter is an autogenerated mock type for the IndexReporter type +type IndexReporter struct { + mock.Mock +} + +// HighestIndexedHeight provides a mock function with no fields +func (_m *IndexReporter) HighestIndexedHeight() (uint64, error) { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for HighestIndexedHeight") + } + + var r0 uint64 + var r1 error + if rf, ok := ret.Get(0).(func() (uint64, error)); ok { + return rf() + } + if rf, ok := ret.Get(0).(func() uint64); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(uint64) + } + + if rf, ok := ret.Get(1).(func() error); ok { + r1 = rf() + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// LowestIndexedHeight provides a mock function with no fields +func (_m *IndexReporter) LowestIndexedHeight() (uint64, error) { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for LowestIndexedHeight") + } + + var r0 uint64 + var r1 error + if rf, ok := ret.Get(0).(func() (uint64, error)); ok { + return rf() + } + if rf, ok := ret.Get(0).(func() uint64); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(uint64) + } + + if rf, ok := ret.Get(1).(func() error); ok { + r1 = rf() + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// NewIndexReporter creates a new instance of IndexReporter. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewIndexReporter(t interface { + mock.TestingT + Cleanup(func()) +}) *IndexReporter { + mock := &IndexReporter{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/module/state_synchronization/requester/distributer.go b/module/state_synchronization/requester/distributer.go index ded5ebb95a2..ac97fb07982 100644 --- a/module/state_synchronization/requester/distributer.go +++ b/module/state_synchronization/requester/distributer.go @@ -11,7 +11,7 @@ import ( // distributes them to subscribers type ExecutionDataDistributor struct { consumers []state_synchronization.OnExecutionDataReceivedConsumer - lock sync.Mutex + lock sync.RWMutex } func NewExecutionDataDistributor() *ExecutionDataDistributor { @@ -28,8 +28,8 @@ func (p *ExecutionDataDistributor) AddOnExecutionDataReceivedConsumer(consumer s // OnExecutionDataReceived is called when new execution data is received func (p *ExecutionDataDistributor) OnExecutionDataReceived(executionData *execution_data.BlockExecutionDataEntity) { - p.lock.Lock() - defer p.lock.Unlock() + p.lock.RLock() + defer p.lock.RUnlock() for _, consumer := range p.consumers { consumer(executionData) diff --git a/module/state_synchronization/requester/execution_data_requester.go b/module/state_synchronization/requester/execution_data_requester.go index b00b3052def..c6c04823f01 100644 --- a/module/state_synchronization/requester/execution_data_requester.go +++ b/module/state_synchronization/requester/execution_data_requester.go @@ -4,7 +4,6 @@ import ( "context" "errors" "fmt" - "sync" "time" "github.com/rs/zerolog" @@ -16,6 +15,7 @@ import ( "github.com/onflow/flow-go/module" "github.com/onflow/flow-go/module/component" "github.com/onflow/flow-go/module/executiondatasync/execution_data" + "github.com/onflow/flow-go/module/executiondatasync/execution_data/cache" "github.com/onflow/flow-go/module/irrecoverable" "github.com/onflow/flow-go/module/jobqueue" "github.com/onflow/flow-go/module/state_synchronization" @@ -23,6 +23,7 @@ import ( "github.com/onflow/flow-go/module/util" "github.com/onflow/flow-go/state/protocol" "github.com/onflow/flow-go/storage" + "github.com/onflow/flow-go/utils/logging" ) // The ExecutionDataRequester downloads ExecutionData for sealed blocks from other participants in @@ -43,7 +44,7 @@ import ( // // The requester is made up of 3 subcomponents: // -// * OnFinalizedBlock: receives block finalized events from the finalization distributor and +// * OnBlockFinalized: receives block finalized events from the finalization distributor and // forwards them to the blockConsumer. // // * blockConsumer: is a jobqueue that receives block finalization events. On each event, @@ -60,7 +61,7 @@ import ( // consecutive height at least once. // // +------------------+ +---------------+ +----------------------+ -// -->| OnFinalizedBlock |----->| blockConsumer | +-->| notificationConsumer | +// -->| OnBlockFinalized |----->| blockConsumer | +-->| notificationConsumer | // +------------------+ +-------+-------+ | +-----------+----------+ // | | | // +------+------+ | +------+------+ @@ -115,7 +116,6 @@ type ExecutionDataConfig struct { type executionDataRequester struct { component.Component - cm *component.ComponentManager downloader execution_data.Downloader metrics module.ExecutionDataRequesterMetrics config ExecutionDataConfig @@ -123,8 +123,6 @@ type executionDataRequester struct { // Local db objects headers storage.Headers - results storage.ExecutionResults - seals storage.Seals executionDataReader *jobs.ExecutionDataReader @@ -135,10 +133,8 @@ type executionDataRequester struct { blockConsumer *jobqueue.ComponentConsumer notificationConsumer *jobqueue.ComponentConsumer - // List of callbacks to call when ExecutionData is successfully fetched for a block - consumers []state_synchronization.OnExecutionDataReceivedConsumer - - consumerMu sync.RWMutex + execDataCache *cache.ExecutionDataCache + distributor *ExecutionDataDistributor } var _ state_synchronization.ExecutionDataRequester = (*executionDataRequester)(nil) @@ -148,23 +144,23 @@ func New( log zerolog.Logger, edrMetrics module.ExecutionDataRequesterMetrics, downloader execution_data.Downloader, - processedHeight storage.ConsumerProgress, - processedNotifications storage.ConsumerProgress, + execDataCache *cache.ExecutionDataCache, + processedHeight storage.ConsumerProgressInitializer, + processedNotifications storage.ConsumerProgressInitializer, state protocol.State, headers storage.Headers, - results storage.ExecutionResults, - seals storage.Seals, cfg ExecutionDataConfig, -) state_synchronization.ExecutionDataRequester { + distributor *ExecutionDataDistributor, +) (state_synchronization.ExecutionDataRequester, error) { e := &executionDataRequester{ log: log.With().Str("component", "execution_data_requester").Logger(), downloader: downloader, + execDataCache: execDataCache, metrics: edrMetrics, headers: headers, - results: results, - seals: seals, config: cfg, finalizationNotifier: engine.NewNotifier(), + distributor: distributor, } executionDataNotifier := engine.NewNotifier() @@ -183,7 +179,7 @@ func New( // from `processedHeight + 1`. If the database is empty, rootHeight will be used to init the // last processed height. Once the execution data is fetched and stored, it notifies // `executionDataNotifier`. - e.blockConsumer = jobqueue.NewComponentConsumer( + blockConsumer, err := jobqueue.NewComponentConsumer( e.log.With().Str("module", "block_consumer").Logger(), e.finalizationNotifier.Channel(), // to listen to finalization events to find newly sealed blocks processedHeight, // read and persist the downloaded height @@ -193,26 +189,30 @@ func New( fetchWorkers, // the number of concurrent workers e.config.MaxSearchAhead, // max number of unsent notifications to allow before pausing new fetches ) + if err != nil { + return nil, fmt.Errorf("failed to create block consumer: %w", err) + } + e.blockConsumer = blockConsumer + // notifies notificationConsumer when new ExecutionData blobs are available // SetPostNotifier will notify executionDataNotifier AFTER e.blockConsumer.LastProcessedIndex is updated. // Even though it doesn't guarantee to notify for every height at least once, the notificationConsumer is - // able to guarantee to process every height at least once, because the notificationConsumer finds new job - // using executionDataReader which finds new height using e.blockConsumer.LastProcessedIndex + // able to guarantee to process every height at least once, because the notificationConsumer finds new jobs + // using executionDataReader which finds new heights using e.blockConsumer.LastProcessedIndex e.blockConsumer.SetPostNotifier(func(module.JobID) { executionDataNotifier.Notify() }) // jobqueue Jobs object tracks downloaded execution data by height. This is used by the // notificationConsumer to get downloaded execution data from storage. e.executionDataReader = jobs.NewExecutionDataReader( - e.downloader, - e.headers, - e.results, - e.seals, + e.execDataCache, e.config.FetchTimeout, // method to get highest consecutive height that has downloaded execution data. it is used // here by the notification job consumer to discover new jobs. // Note: we don't want to notify notificationConsumer for a block if it has not downloaded // execution data yet. - e.blockConsumer.LastProcessedIndex, + func() (uint64, error) { + return e.blockConsumer.LastProcessedIndex(), nil + }, ) // notificationConsumer consumes `OnExecutionDataFetched` events, and ensures its consumer @@ -226,7 +226,7 @@ func New( // `e.consumers`. // Note: the `e.consumers` will be guaranteed to receive at least one `OnExecutionDataFetched` event // for each sealed block in consecutive block height order. - e.notificationConsumer = jobqueue.NewComponentConsumer( + e.notificationConsumer, err = jobqueue.NewComponentConsumer( e.log.With().Str("module", "notification_consumer").Logger(), executionDataNotifier.Channel(), // listen for notifications from the block consumer processedNotifications, // read and persist the notified height @@ -236,15 +236,18 @@ func New( 1, // use a single worker to ensure notification is delivered in consecutive order 0, // search ahead limit controlled by worker count ) + if err != nil { + return nil, fmt.Errorf("failed to create notification consumer: %w", err) + } - builder := component.NewComponentManagerBuilder(). - AddWorker(e.runBlockConsumer). - AddWorker(e.runNotificationConsumer) + e.metrics.ExecutionDataFetchFinished(0, true, e.blockConsumer.LastProcessedIndex()) - e.cm = builder.Build() - e.Component = e.cm + e.Component = component.NewComponentManagerBuilder(). + AddWorker(e.runBlockConsumer). + AddWorker(e.runNotificationConsumer). + Build() - return e + return e, nil } // OnBlockFinalized accepts block finalization notifications from the FollowerDistributor @@ -252,16 +255,18 @@ func (e *executionDataRequester) OnBlockFinalized(*model.Block) { e.finalizationNotifier.Notify() } -// AddOnExecutionDataReceivedConsumer adds a callback to be called when a new ExecutionData is received -// Callback Implementations must: -// - be concurrency safe -// - be non-blocking -// - handle repetition of the same events (with some processing overhead). -func (e *executionDataRequester) AddOnExecutionDataReceivedConsumer(fn state_synchronization.OnExecutionDataReceivedConsumer) { - e.consumerMu.Lock() - defer e.consumerMu.Unlock() +// HighestConsecutiveHeight returns the highest consecutive block height for which ExecutionData +// has been received. +// This method must only be called after the component is Ready. If it is called early, an error is returned. +func (e *executionDataRequester) HighestConsecutiveHeight() (uint64, error) { + select { + case <-e.blockConsumer.Ready(): + default: + // LastProcessedIndex is not meaningful until the component has completed startup + return 0, fmt.Errorf("HighestConsecutiveHeight must not be called before the component is ready") + } - e.consumers = append(e.consumers, fn) + return e.blockConsumer.LastProcessedIndex(), nil } // runBlockConsumer runs the blockConsumer component @@ -361,7 +366,7 @@ func (e *executionDataRequester) processSealedHeight(ctx irrecoverable.SignalerC }) } -func (e *executionDataRequester) processFetchRequest(ctx irrecoverable.SignalerContext, blockID flow.Identifier, height uint64, fetchTimeout time.Duration) error { +func (e *executionDataRequester) processFetchRequest(parentCtx irrecoverable.SignalerContext, blockID flow.Identifier, height uint64, fetchTimeout time.Duration) error { logger := e.log.With(). Str("block_id", blockID.String()). Uint64("height", height). @@ -369,26 +374,19 @@ func (e *executionDataRequester) processFetchRequest(ctx irrecoverable.SignalerC logger.Debug().Msg("processing fetch request") - seal, err := e.seals.FinalizedSealForBlock(blockID) - if err != nil { - ctx.Throw(fmt.Errorf("failed to get seal for block %s: %w", blockID, err)) - } - - result, err := e.results.ByID(seal.ResultID) - if err != nil { - ctx.Throw(fmt.Errorf("failed to lookup execution result for block %s: %w", blockID, err)) - } - - logger = logger.With().Str("execution_data_id", result.ExecutionDataID.String()).Logger() - start := time.Now() e.metrics.ExecutionDataFetchStarted() logger.Debug().Msg("downloading execution data") - _, err = e.fetchExecutionData(ctx, result.ExecutionDataID, fetchTimeout) + ctx, cancel := context.WithTimeout(parentCtx, fetchTimeout) + defer cancel() + + execData, err := e.execDataCache.ByBlockID(ctx, blockID) - e.metrics.ExecutionDataFetchFinished(time.Since(start), err == nil, height) + // use the last processed index to ensure the metrics reflect the highest _consecutive_ height. + // this makes it easier to see when downloading gets stuck at a height. + e.metrics.ExecutionDataFetchFinished(time.Since(start), err == nil, e.blockConsumer.LastProcessedIndex()) if isInvalidBlobError(err) { // This means an execution result was sealed with an invalid execution data id (invalid data). @@ -409,31 +407,16 @@ func (e *executionDataRequester) processFetchRequest(ctx irrecoverable.SignalerC if err != nil { logger.Error().Err(err).Msg("unexpected error fetching execution data") - ctx.Throw(err) + parentCtx.Throw(err) } - logger.Info().Msg("execution data fetched") + logger.Info(). + Hex("execution_data_id", logging.ID(execData.ExecutionDataID)). + Msg("execution data fetched") return nil } -// fetchExecutionData fetches the ExecutionData by its ID, and times out if fetchTimeout is exceeded -func (e *executionDataRequester) fetchExecutionData(signalerCtx irrecoverable.SignalerContext, executionDataID flow.Identifier, fetchTimeout time.Duration) (*execution_data.BlockExecutionData, error) { - ctx, cancel := context.WithTimeout(signalerCtx, fetchTimeout) - defer cancel() - - // Get the data from the network - // this is a blocking call, won't be unblocked until either hitting error (including timeout) or - // the data is received - executionData, err := e.downloader.Download(ctx, executionDataID) - - if err != nil { - return nil, err - } - - return executionData, nil -} - // Notification Worker Methods func (e *executionDataRequester) processNotificationJob(ctx irrecoverable.SignalerContext, job module.Job, jobComplete func()) { @@ -443,26 +426,16 @@ func (e *executionDataRequester) processNotificationJob(ctx irrecoverable.Signal ctx.Throw(fmt.Errorf("failed to convert job to entry: %w", err)) } - e.processNotification(ctx, entry.Height, entry.ExecutionData) - jobComplete() -} - -func (e *executionDataRequester) processNotification(ctx irrecoverable.SignalerContext, height uint64, executionData *execution_data.BlockExecutionDataEntity) { - e.log.Debug().Msgf("notifying for block %d", height) + e.log.Debug(). + Hex("block_id", logging.ID(entry.BlockID)). + Uint64("height", entry.Height). + Msgf("notifying for block") // send notifications - e.notifyConsumers(executionData) - - e.metrics.NotificationSent(height) -} - -func (e *executionDataRequester) notifyConsumers(executionData *execution_data.BlockExecutionDataEntity) { - e.consumerMu.RLock() - defer e.consumerMu.RUnlock() + e.distributor.OnExecutionDataReceived(entry.ExecutionData) + jobComplete() - for _, fn := range e.consumers { - fn(executionData) - } + e.metrics.NotificationSent(entry.Height) } func isInvalidBlobError(err error) bool { diff --git a/module/state_synchronization/requester/execution_data_requester_test.go b/module/state_synchronization/requester/execution_data_requester_test.go index 747d9568afd..3684679d900 100644 --- a/module/state_synchronization/requester/execution_data_requester_test.go +++ b/module/state_synchronization/requester/execution_data_requester_test.go @@ -1,18 +1,16 @@ -package requester_test +package requester import ( "context" "fmt" "math/rand" - "os" "sync" "testing" "time" - "github.com/dgraph-io/badger/v2" + "github.com/cockroachdb/pebble/v2" "github.com/ipfs/go-datastore" dssync "github.com/ipfs/go-datastore/sync" - "github.com/rs/zerolog" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" @@ -20,29 +18,33 @@ import ( "github.com/onflow/flow-go/consensus/hotstuff/model" "github.com/onflow/flow-go/consensus/hotstuff/notifications/pubsub" + "github.com/onflow/flow-go/engine/access/subscription" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/module" "github.com/onflow/flow-go/module/blobs" "github.com/onflow/flow-go/module/executiondatasync/execution_data" + "github.com/onflow/flow-go/module/executiondatasync/execution_data/cache" exedatamock "github.com/onflow/flow-go/module/executiondatasync/execution_data/mock" "github.com/onflow/flow-go/module/irrecoverable" + "github.com/onflow/flow-go/module/mempool/herocache" "github.com/onflow/flow-go/module/metrics" "github.com/onflow/flow-go/module/state_synchronization" - "github.com/onflow/flow-go/module/state_synchronization/requester" synctest "github.com/onflow/flow-go/module/state_synchronization/requester/unittest" "github.com/onflow/flow-go/state/protocol" statemock "github.com/onflow/flow-go/state/protocol/mock" - bstorage "github.com/onflow/flow-go/storage/badger" + "github.com/onflow/flow-go/storage/operation/pebbleimpl" + "github.com/onflow/flow-go/storage/store" "github.com/onflow/flow-go/utils/unittest" ) type ExecutionDataRequesterSuite struct { suite.Suite - blobstore blobs.Blobstore - datastore datastore.Batching - db *badger.DB - downloader *exedatamock.Downloader + blobstore blobs.Blobstore + datastore datastore.Batching + db *pebble.DB + downloader *exedatamock.Downloader + distributor *ExecutionDataDistributor run edTestRun @@ -51,7 +53,6 @@ type ExecutionDataRequesterSuite struct { func TestExecutionDataRequesterSuite(t *testing.T) { t.Parallel() - rand.Seed(time.Now().UnixMilli()) suite.Run(t, new(ExecutionDataRequesterSuite)) } @@ -87,7 +88,7 @@ type edTestRun struct { type testExecutionDataCallback func(*execution_data.BlockExecutionData) (*execution_data.BlockExecutionData, error) -func mockDownloader(edStore map[flow.Identifier]*testExecutionDataServiceEntry) *exedatamock.Downloader { +func MockDownloader(edStore map[flow.Identifier]*testExecutionDataServiceEntry) *exedatamock.Downloader { downloader := new(exedatamock.Downloader) get := func(id flow.Identifier) (*execution_data.BlockExecutionData, error) { @@ -112,7 +113,7 @@ func mockDownloader(edStore map[flow.Identifier]*testExecutionDataServiceEntry) return ed.ExecutionData, nil } - downloader.On("Download", mock.Anything, mock.AnythingOfType("flow.Identifier")). + downloader.On("Get", mock.Anything, mock.AnythingOfType("flow.Identifier")). Return( func(ctx context.Context, id flow.Identifier) *execution_data.BlockExecutionData { ed, _ := get(id) @@ -137,7 +138,7 @@ func (suite *ExecutionDataRequesterSuite) mockProtocolState(blocksByHeight map[u state := new(statemock.State) suite.mockSnapshot = new(mockSnapshot) - suite.mockSnapshot.set(blocksByHeight[0].Header, nil) // genesis block + suite.mockSnapshot.set(blocksByHeight[0].ToHeader(), nil) // genesis block state.On("Sealed").Return(suite.mockSnapshot).Maybe() return state @@ -172,13 +173,13 @@ func (suite *ExecutionDataRequesterSuite) TestRequesterProcessesBlocks() { for _, run := range tests { suite.Run(run.name, func() { - unittest.RunWithBadgerDB(suite.T(), func(db *badger.DB) { + unittest.RunWithPebbleDB(suite.T(), func(db *pebble.DB) { suite.db = db suite.datastore = dssync.MutexWrap(datastore.NewMapDatastore()) suite.blobstore = blobs.NewBlobstore(suite.datastore) - testData := suite.generateTestData(run.blockCount, run.specialBlocks(run.blockCount)) + testData := generateTestData(suite.T(), suite.blobstore, run.blockCount, run.specialBlocks(run.blockCount)) edr, fd := suite.prepareRequesterTest(testData) fetchedExecutionData := suite.runRequesterTest(edr, fd, testData) @@ -196,12 +197,12 @@ func (suite *ExecutionDataRequesterSuite) TestRequesterResumesAfterRestart() { suite.datastore = dssync.MutexWrap(datastore.NewMapDatastore()) suite.blobstore = blobs.NewBlobstore(suite.datastore) - testData := suite.generateTestData(suite.run.blockCount, suite.run.specialBlocks(suite.run.blockCount)) + testData := generateTestData(suite.T(), suite.blobstore, suite.run.blockCount, suite.run.specialBlocks(suite.run.blockCount)) test := func(stopHeight, resumeHeight uint64) { testData.fetchedExecutionData = nil - unittest.RunWithBadgerDB(suite.T(), func(db *badger.DB) { + unittest.RunWithPebbleDB(suite.T(), func(db *pebble.DB) { suite.db = db // Process half of the blocks @@ -239,13 +240,13 @@ func (suite *ExecutionDataRequesterSuite) TestRequesterResumesAfterRestart() { // TestRequesterCatchesUp tests that the requester processes all heights when it starts with a // backlog of sealed blocks. func (suite *ExecutionDataRequesterSuite) TestRequesterCatchesUp() { - unittest.RunWithBadgerDB(suite.T(), func(db *badger.DB) { + unittest.RunWithPebbleDB(suite.T(), func(db *pebble.DB) { suite.db = db suite.datastore = dssync.MutexWrap(datastore.NewMapDatastore()) suite.blobstore = blobs.NewBlobstore(suite.datastore) - testData := suite.generateTestData(suite.run.blockCount, suite.run.specialBlocks(suite.run.blockCount)) + testData := generateTestData(suite.T(), suite.blobstore, suite.run.blockCount, suite.run.specialBlocks(suite.run.blockCount)) // start processing with all seals available edr, fd := suite.prepareRequesterTest(testData) @@ -261,7 +262,7 @@ func (suite *ExecutionDataRequesterSuite) TestRequesterCatchesUp() { // TestRequesterPausesAndResumes tests that the requester pauses when it downloads maxSearchAhead // blocks beyond the last processed block, and resumes when it catches up. func (suite *ExecutionDataRequesterSuite) TestRequesterPausesAndResumes() { - unittest.RunWithBadgerDB(suite.T(), func(db *badger.DB) { + unittest.RunWithPebbleDB(suite.T(), func(db *pebble.DB) { suite.db = db pauseHeight := uint64(10) @@ -271,12 +272,14 @@ func (suite *ExecutionDataRequesterSuite) TestRequesterPausesAndResumes() { // until the resume() is called. generate, resume := generatePauseResume(pauseHeight) - testData := suite.generateTestData(suite.run.blockCount, generate(suite.run.blockCount)) + testData := generateTestData(suite.T(), suite.blobstore, suite.run.blockCount, generate(suite.run.blockCount)) testData.maxSearchAhead = maxSearchAhead testData.waitTimeout = time.Second * 10 - // calculate the expected number of blocks that should be downloaded before resuming - expectedDownloads := maxSearchAhead + (pauseHeight-1)*2 + // calculate the expected number of blocks that should be downloaded before resuming. + // the test should download all blocks up to pauseHeight, then maxSearchAhead blocks beyond. + // the pause block itself is excluded. + expectedDownloads := pauseHeight + maxSearchAhead - 1 edr, fd := suite.prepareRequesterTest(testData) fetchedExecutionData := suite.runRequesterTestPauseResume(edr, fd, testData, int(expectedDownloads), resume) @@ -290,7 +293,7 @@ func (suite *ExecutionDataRequesterSuite) TestRequesterPausesAndResumes() { // TestRequesterHalts tests that the requester handles halting correctly when it encounters an // invalid block func (suite *ExecutionDataRequesterSuite) TestRequesterHalts() { - unittest.RunWithBadgerDB(suite.T(), func(db *badger.DB) { + unittest.RunWithPebbleDB(suite.T(), func(db *pebble.DB) { suite.db = db suite.run.blockCount = 10 @@ -299,7 +302,7 @@ func (suite *ExecutionDataRequesterSuite) TestRequesterHalts() { // generate a block that will return a malformed blob error. causing the requester to halt generate, expectedErr := generateBlocksWithHaltingError(suite.run.blockCount) - testData := suite.generateTestData(suite.run.blockCount, generate(suite.run.blockCount)) + testData := generateTestData(suite.T(), suite.blobstore, suite.run.blockCount, generate(suite.run.blockCount)) // start processing with all seals available edr, followerDistributor := suite.prepareRequesterTest(testData) @@ -386,9 +389,13 @@ func generatePauseResume(pauseHeight uint64) (specialBlockGenerator, func()) { } func (suite *ExecutionDataRequesterSuite) prepareRequesterTest(cfg *fetchTestRun) (state_synchronization.ExecutionDataRequester, *pubsub.FollowerDistributor) { + logger := unittest.Logger() + metricsCollector := metrics.NewNoopCollector() + headers := synctest.MockBlockHeaderStorage( synctest.WithByID(cfg.blocksByID), synctest.WithByHeight(cfg.blocksByHeight), + synctest.WithBlockIDByHeight(cfg.blocksByHeight), ) results := synctest.MockResultsStorage( synctest.WithResultByID(cfg.resultsByID), @@ -398,30 +405,35 @@ func (suite *ExecutionDataRequesterSuite) prepareRequesterTest(cfg *fetchTestRun ) state := suite.mockProtocolState(cfg.blocksByHeight) - suite.downloader = mockDownloader(cfg.executionDataEntries) + suite.downloader = MockDownloader(cfg.executionDataEntries) + suite.distributor = NewExecutionDataDistributor() + + heroCache := herocache.NewBlockExecutionData(subscription.DefaultCacheSize, logger, metricsCollector) + edCache := cache.NewExecutionDataCache(suite.downloader, headers, seals, results, heroCache) followerDistributor := pubsub.NewFollowerDistributor() - processedHeight := bstorage.NewConsumerProgress(suite.db, module.ConsumeProgressExecutionDataRequesterBlockHeight) - processedNotification := bstorage.NewConsumerProgress(suite.db, module.ConsumeProgressExecutionDataRequesterNotification) + processedHeight := store.NewConsumerProgress(pebbleimpl.ToDB(suite.db), module.ConsumeProgressExecutionDataRequesterBlockHeight) + processedNotification := store.NewConsumerProgress(pebbleimpl.ToDB(suite.db), module.ConsumeProgressExecutionDataRequesterNotification) - edr := requester.New( - zerolog.New(os.Stdout).With().Timestamp().Logger(), - metrics.NewNoopCollector(), + edr, err := New( + logger, + metricsCollector, suite.downloader, + edCache, processedHeight, processedNotification, state, headers, - results, - seals, - requester.ExecutionDataConfig{ + ExecutionDataConfig{ InitialBlockHeight: cfg.startHeight - 1, MaxSearchAhead: cfg.maxSearchAhead, FetchTimeout: cfg.fetchTimeout, RetryDelay: cfg.retryDelay, MaxRetryDelay: cfg.maxRetryDelay, }, + suite.distributor, ) + require.NoError(suite.T(), err) followerDistributor.AddOnBlockFinalizedConsumer(edr.OnBlockFinalized) @@ -439,7 +451,7 @@ func (suite *ExecutionDataRequesterSuite) runRequesterTestHalts(edr state_synchr fetchedExecutionData := cfg.FetchedExecutionData() // collect all execution data notifications - edr.AddOnExecutionDataReceivedConsumer(suite.consumeExecutionDataNotifications(cfg, func() { close(testDone) }, fetchedExecutionData)) + suite.distributor.AddOnExecutionDataReceivedConsumer(suite.consumeExecutionDataNotifications(cfg, func() { close(testDone) }, fetchedExecutionData)) edr.Start(signalerCtx) unittest.RequireCloseBefore(suite.T(), edr.Ready(), cfg.waitTimeout, "timed out waiting for requester to be ready") @@ -466,7 +478,7 @@ func (suite *ExecutionDataRequesterSuite) runRequesterTestPauseResume(edr state_ fetchedExecutionData := cfg.FetchedExecutionData() // collect all execution data notifications - edr.AddOnExecutionDataReceivedConsumer(suite.consumeExecutionDataNotifications(cfg, func() { close(testDone) }, fetchedExecutionData)) + suite.distributor.AddOnExecutionDataReceivedConsumer(suite.consumeExecutionDataNotifications(cfg, func() { close(testDone) }, fetchedExecutionData)) edr.Start(signalerCtx) unittest.RequireCloseBefore(suite.T(), edr.Ready(), cfg.waitTimeout, "timed out waiting for requester to be ready") @@ -478,7 +490,7 @@ func (suite *ExecutionDataRequesterSuite) runRequesterTestPauseResume(edr state_ unittest.RequireNeverClosedWithin(suite.T(), testDone, 500*time.Millisecond, "finished unexpectedly") // confirm the expected number of downloads were attempted - suite.downloader.AssertNumberOfCalls(suite.T(), "Download", expectedDownloads) + suite.downloader.AssertNumberOfCalls(suite.T(), "Get", expectedDownloads) suite.T().Log("Resuming") resume() @@ -504,7 +516,7 @@ func (suite *ExecutionDataRequesterSuite) runRequesterTest(edr state_synchroniza fetchedExecutionData := cfg.FetchedExecutionData() // collect all execution data notifications - edr.AddOnExecutionDataReceivedConsumer(suite.consumeExecutionDataNotifications(cfg, func() { close(testDone) }, fetchedExecutionData)) + suite.distributor.AddOnExecutionDataReceivedConsumer(suite.consumeExecutionDataNotifications(cfg, func() { close(testDone) }, fetchedExecutionData)) edr.Start(signalerCtx) unittest.RequireCloseBefore(suite.T(), edr.Ready(), cfg.waitTimeout, "timed out waiting for requester to be ready") @@ -530,7 +542,12 @@ func (suite *ExecutionDataRequesterSuite) consumeExecutionDataNotifications(cfg } fetchedExecutionData[ed.BlockID] = ed.BlockExecutionData - suite.T().Logf("notified of execution data for block %v height %d (%d/%d)", ed.BlockID, cfg.blocksByID[ed.BlockID].Header.Height, len(fetchedExecutionData), cfg.sealedCount) + if _, ok := cfg.blocksByID[ed.BlockID]; !ok { + suite.T().Errorf("unknown execution data for block %s", ed.BlockID) + return + } + + suite.T().Logf("notified of execution data for block %v height %d (%d/%d)", ed.BlockID, cfg.blocksByID[ed.BlockID].Height, len(fetchedExecutionData), cfg.sealedCount) if cfg.IsLastSeal(ed.BlockID) { done() @@ -542,11 +559,11 @@ func (suite *ExecutionDataRequesterSuite) finalizeBlocks(cfg *fetchTestRun, foll for i := cfg.StartHeight(); i <= cfg.endHeight; i++ { b := cfg.blocksByHeight[i] - suite.T().Log(">>>> Finalizing block", b.ID(), b.Header.Height) + suite.T().Log(">>>> Finalizing block", b.ID(), b.Height) if len(b.Payload.Seals) > 0 { seal := b.Payload.Seals[0] - sealedHeader := cfg.blocksByID[seal.BlockID].Header + sealedHeader := cfg.blocksByID[seal.BlockID].ToHeader() suite.mockSnapshot.set(sealedHeader, nil) suite.T().Log(">>>> Sealing block", sealedHeader.ID(), sealedHeader.Height) @@ -614,7 +631,7 @@ func (r *fetchTestRun) IsLastSeal(blockID flow.Identifier) bool { return lastSeal == r.blocksByID[blockID].ID() } -func (suite *ExecutionDataRequesterSuite) generateTestData(blockCount int, specialHeightFuncs map[uint64]testExecutionDataCallback) *fetchTestRun { +func generateTestData(t *testing.T, blobstore blobs.Blobstore, blockCount int, specialHeightFuncs map[uint64]testExecutionDataCallback) *fetchTestRun { edsEntries := map[flow.Identifier]*testExecutionDataServiceEntry{} blocksByHeight := map[uint64]*flow.Block{} blocksByID := map[flow.Identifier]*flow.Block{} @@ -632,7 +649,7 @@ func (suite *ExecutionDataRequesterSuite) generateTestData(blockCount int, speci endHeight := uint64(blockCount) - 1 // instantiate ExecutionDataService to generate correct CIDs - eds := execution_data.NewExecutionDataStore(suite.blobstore, execution_data.DefaultSerializer) + eds := execution_data.NewExecutionDataStore(blobstore, execution_data.DefaultSerializer) var previousBlock *flow.Block var previousResult *flow.ExecutionResult @@ -642,7 +659,7 @@ func (suite *ExecutionDataRequesterSuite) generateTestData(blockCount int, speci if i >= firstSeal { sealedBlock := blocksByHeight[uint64(i-firstSeal+1)] seals = []*flow.Header{ - sealedBlock.Header, // block 0 doesn't get sealed (it's pre-sealed in the genesis state) + sealedBlock.ToHeader(), // block 0 doesn't get sealed (it's pre-sealed in the genesis state) } sealsByBlockID[sealedBlock.ID()] = unittest.Seal.Fixture( @@ -650,7 +667,7 @@ func (suite *ExecutionDataRequesterSuite) generateTestData(blockCount int, speci unittest.Seal.WithResult(resultsByBlockID[sealedBlock.ID()]), ) - suite.T().Logf("block %d has seals for %d", i, seals[0].Height) + t.Logf("block %d has seals for %d", i, seals[0].Height) } height := uint64(i) @@ -658,8 +675,8 @@ func (suite *ExecutionDataRequesterSuite) generateTestData(blockCount int, speci ed := unittest.BlockExecutionDataFixture(unittest.WithBlockExecutionDataBlockID(block.ID())) - cid, err := eds.AddExecutionData(context.Background(), ed) - require.NoError(suite.T(), err) + cid, err := eds.Add(context.Background(), ed) + require.NoError(t, err) result := buildResult(block, cid, previousResult) @@ -697,8 +714,8 @@ func (suite *ExecutionDataRequesterSuite) generateTestData(blockCount int, speci executionDataIDByBlockID: executionDataIDByBlockID, waitTimeout: time.Second * 5, - maxSearchAhead: requester.DefaultMaxSearchAhead, - fetchTimeout: requester.DefaultFetchTimeout, + maxSearchAhead: DefaultMaxSearchAhead, + fetchTimeout: DefaultFetchTimeout, retryDelay: 1 * time.Millisecond, maxRetryDelay: 15 * time.Millisecond, } @@ -706,14 +723,14 @@ func (suite *ExecutionDataRequesterSuite) generateTestData(blockCount int, speci func buildBlock(height uint64, parent *flow.Block, seals []*flow.Header) *flow.Block { if parent == nil { - return unittest.GenesisFixture() + return unittest.Block.Genesis(flow.Emulator) } if len(seals) == 0 { - return unittest.BlockWithParentFixture(parent.Header) + return unittest.BlockWithParentFixture(parent.ToHeader()) } - return unittest.BlockWithParentAndSeals(parent.Header, seals) + return unittest.BlockWithParentAndSeals(parent.ToHeader(), seals) } func buildResult(block *flow.Block, cid flow.Identifier, previousResult *flow.ExecutionResult) *flow.ExecutionResult { @@ -753,6 +770,8 @@ type mockSnapshot struct { mu sync.Mutex } +var _ protocol.Snapshot = &mockSnapshot{} + func (m *mockSnapshot) set(header *flow.Header, err error) { m.mu.Lock() defer m.mu.Unlock() @@ -770,17 +789,20 @@ func (m *mockSnapshot) Head() (*flow.Header, error) { // none of these are used in this test func (m *mockSnapshot) QuorumCertificate() (*flow.QuorumCertificate, error) { return nil, nil } -func (m *mockSnapshot) Identities(selector flow.IdentityFilter) (flow.IdentityList, error) { +func (m *mockSnapshot) Identities(selector flow.IdentityFilter[flow.Identity]) (flow.IdentityList, error) { return nil, nil } func (m *mockSnapshot) Identity(nodeID flow.Identifier) (*flow.Identity, error) { return nil, nil } func (m *mockSnapshot) SealedResult() (*flow.ExecutionResult, *flow.Seal, error) { return nil, nil, nil } -func (m *mockSnapshot) Commit() (flow.StateCommitment, error) { return flow.DummyStateCommitment, nil } -func (m *mockSnapshot) SealingSegment() (*flow.SealingSegment, error) { return nil, nil } -func (m *mockSnapshot) Descendants() ([]flow.Identifier, error) { return nil, nil } -func (m *mockSnapshot) RandomSource() ([]byte, error) { return nil, nil } -func (m *mockSnapshot) Phase() (flow.EpochPhase, error) { return flow.EpochPhaseUndefined, nil } -func (m *mockSnapshot) Epochs() protocol.EpochQuery { return nil } -func (m *mockSnapshot) Params() protocol.GlobalParams { return nil } +func (m *mockSnapshot) Commit() (flow.StateCommitment, error) { return flow.DummyStateCommitment, nil } +func (m *mockSnapshot) SealingSegment() (*flow.SealingSegment, error) { return nil, nil } +func (m *mockSnapshot) Descendants() ([]flow.Identifier, error) { return nil, nil } +func (m *mockSnapshot) RandomSource() ([]byte, error) { return nil, nil } +func (m *mockSnapshot) EpochPhase() (flow.EpochPhase, error) { return flow.EpochPhaseUndefined, nil } +func (m *mockSnapshot) Epochs() protocol.EpochQuery { return nil } +func (m *mockSnapshot) Params() protocol.GlobalParams { return nil } +func (m *mockSnapshot) EpochProtocolState() (protocol.EpochProtocolState, error) { return nil, nil } +func (m *mockSnapshot) ProtocolState() (protocol.KVStoreReader, error) { return nil, nil } +func (m *mockSnapshot) VersionBeacon() (*flow.SealedVersionBeacon, error) { return nil, nil } diff --git a/module/state_synchronization/requester/jobs/execution_data_reader.go b/module/state_synchronization/requester/jobs/execution_data_reader.go index eabd7178b21..721356ac7df 100644 --- a/module/state_synchronization/requester/jobs/execution_data_reader.go +++ b/module/state_synchronization/requester/jobs/execution_data_reader.go @@ -8,6 +8,7 @@ import ( "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/module" "github.com/onflow/flow-go/module/executiondatasync/execution_data" + "github.com/onflow/flow-go/module/executiondatasync/execution_data/cache" "github.com/onflow/flow-go/module/irrecoverable" "github.com/onflow/flow-go/storage" ) @@ -19,15 +20,14 @@ type BlockEntry struct { ExecutionData *execution_data.BlockExecutionDataEntity } +var _ module.Jobs = (*ExecutionDataReader)(nil) + // ExecutionDataReader provides an abstraction for consumers to read blocks as job. type ExecutionDataReader struct { - downloader execution_data.Downloader - headers storage.Headers - results storage.ExecutionResults - seals storage.Seals + store *cache.ExecutionDataCache - fetchTimeout time.Duration - highestAvailableHeight func() uint64 + fetchTimeout time.Duration + highestConsecutiveHeight func() (uint64, error) // TODO: refactor this to accept a context in AtIndex instead of storing it on the struct. // This requires also refactoring jobqueue.Consumer @@ -36,20 +36,14 @@ type ExecutionDataReader struct { // NewExecutionDataReader creates and returns a ExecutionDataReader. func NewExecutionDataReader( - downloader execution_data.Downloader, - headers storage.Headers, - results storage.ExecutionResults, - seals storage.Seals, + store *cache.ExecutionDataCache, fetchTimeout time.Duration, - highestAvailableHeight func() uint64, + highestConsecutiveHeight func() (uint64, error), ) *ExecutionDataReader { return &ExecutionDataReader{ - downloader: downloader, - headers: headers, - results: results, - seals: seals, - fetchTimeout: fetchTimeout, - highestAvailableHeight: highestAvailableHeight, + store: store, + fetchTimeout: fetchTimeout, + highestConsecutiveHeight: highestConsecutiveHeight, } } @@ -67,14 +61,22 @@ func (r *ExecutionDataReader) AtIndex(height uint64) (module.Job, error) { return nil, fmt.Errorf("execution data reader is not initialized") } - // height has not been downloaded, so height is not available yet - if height > r.highestAvailableHeight() { + // data for the requested height or a lower height, has not been downloaded yet. + highestHeight, err := r.highestConsecutiveHeight() + if err != nil { + return nil, fmt.Errorf("failed to get highest height: %w", err) + } + + if height > highestHeight { return nil, storage.ErrNotFound } - executionData, err := r.getExecutionData(r.ctx, height) + ctx, cancel := context.WithTimeout(r.ctx, r.fetchTimeout) + defer cancel() + + executionData, err := r.store.ByHeight(ctx, height) if err != nil { - return nil, err + return nil, fmt.Errorf("failed to get execution data for height %d: %w", height, err) } return BlockEntryToJob(&BlockEntry{ @@ -86,36 +88,5 @@ func (r *ExecutionDataReader) AtIndex(height uint64) (module.Job, error) { // Head returns the highest consecutive block height with downloaded execution data func (r *ExecutionDataReader) Head() (uint64, error) { - return r.highestAvailableHeight(), nil -} - -// getExecutionData returns the ExecutionData for the given block height. -// This is used by the execution data reader to get the ExecutionData for a block. -func (r *ExecutionDataReader) getExecutionData(signalCtx irrecoverable.SignalerContext, height uint64) (*execution_data.BlockExecutionDataEntity, error) { - header, err := r.headers.ByHeight(height) - if err != nil { - return nil, fmt.Errorf("failed to lookup header for height %d: %w", height, err) - } - - // get the ExecutionResultID for the block from the block's seal - seal, err := r.seals.FinalizedSealForBlock(header.ID()) - if err != nil { - return nil, fmt.Errorf("failed to lookup seal for block %s: %w", header.ID(), err) - } - - result, err := r.results.ByID(seal.ResultID) - if err != nil { - return nil, fmt.Errorf("failed to lookup execution result for block %s: %w", header.ID(), err) - } - - ctx, cancel := context.WithTimeout(signalCtx, r.fetchTimeout) - defer cancel() - - executionData, err := r.downloader.Download(ctx, result.ExecutionDataID) - - if err != nil { - return nil, fmt.Errorf("failed to get execution data for block %s: %w", header.ID(), err) - } - - return execution_data.NewBlockExecutionDataEntity(result.ExecutionDataID, executionData), nil + return r.highestConsecutiveHeight() } diff --git a/module/state_synchronization/requester/jobs/execution_data_reader_test.go b/module/state_synchronization/requester/jobs/execution_data_reader_test.go index 3306ac1ce84..74d3ee8e655 100644 --- a/module/state_synchronization/requester/jobs/execution_data_reader_test.go +++ b/module/state_synchronization/requester/jobs/execution_data_reader_test.go @@ -3,7 +3,6 @@ package jobs import ( "context" "errors" - "math/rand" "testing" "time" @@ -14,8 +13,11 @@ import ( "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/module/executiondatasync/execution_data" + "github.com/onflow/flow-go/module/executiondatasync/execution_data/cache" exedatamock "github.com/onflow/flow-go/module/executiondatasync/execution_data/mock" "github.com/onflow/flow-go/module/irrecoverable" + "github.com/onflow/flow-go/module/mempool/herocache" + "github.com/onflow/flow-go/module/metrics" synctest "github.com/onflow/flow-go/module/state_synchronization/requester/unittest" "github.com/onflow/flow-go/storage" storagemock "github.com/onflow/flow-go/storage/mock" @@ -42,7 +44,6 @@ type ExecutionDataReaderSuite struct { func TestExecutionDataReaderSuite(t *testing.T) { t.Parallel() - rand.Seed(time.Now().UnixMilli()) suite.Run(t, new(ExecutionDataReaderSuite)) } @@ -53,12 +54,12 @@ func (suite *ExecutionDataReaderSuite) SetupTest() { parent := unittest.BlockHeaderFixture(unittest.WithHeaderHeight(1)) suite.block = unittest.BlockWithParentFixture(parent) suite.blocksByHeight = map[uint64]*flow.Block{ - suite.block.Header.Height: suite.block, + suite.block.Height: suite.block, } suite.executionData = unittest.BlockExecutionDataFixture(unittest.WithBlockExecutionDataBlockID(suite.block.ID())) - suite.highestAvailableHeight = func() uint64 { return suite.block.Header.Height + 1 } + suite.highestAvailableHeight = func() uint64 { return suite.block.Height + 1 } suite.reset() } @@ -74,7 +75,10 @@ func (suite *ExecutionDataReaderSuite) reset() { unittest.Seal.WithResult(result), ) - suite.headers = synctest.MockBlockHeaderStorage(synctest.WithByHeight(suite.blocksByHeight)) + suite.headers = synctest.MockBlockHeaderStorage( + synctest.WithByHeight(suite.blocksByHeight), + synctest.WithBlockIDByHeight(suite.blocksByHeight), + ) suite.results = synctest.MockResultsStorage( synctest.WithResultByID(map[flow.Identifier]*flow.ExecutionResult{ result.ID(): result, @@ -87,21 +91,23 @@ func (suite *ExecutionDataReaderSuite) reset() { ) suite.downloader = new(exedatamock.Downloader) + var executionDataCacheSize uint32 = 100 // Use local value to avoid cycle dependency on subscription package + + heroCache := herocache.NewBlockExecutionData(executionDataCacheSize, unittest.Logger(), metrics.NewNoopCollector()) + cache := cache.NewExecutionDataCache(suite.downloader, suite.headers, suite.seals, suite.results, heroCache) + suite.reader = NewExecutionDataReader( - suite.downloader, - suite.headers, - suite.results, - suite.seals, + cache, suite.fetchTimeout, - func() uint64 { - return suite.highestAvailableHeight() + func() (uint64, error) { + return suite.highestAvailableHeight(), nil }, ) } func (suite *ExecutionDataReaderSuite) TestAtIndex() { setExecutionDataGet := func(executionData *execution_data.BlockExecutionData, err error) { - suite.downloader.On("Download", mock.Anything, suite.executionDataID).Return( + suite.downloader.On("Get", mock.Anything, suite.executionDataID).Return( func(ctx context.Context, id flow.Identifier) *execution_data.BlockExecutionData { return executionData }, @@ -135,7 +141,7 @@ func (suite *ExecutionDataReaderSuite) TestAtIndex() { edEntity := execution_data.NewBlockExecutionDataEntity(suite.executionDataID, ed) - job, err := suite.reader.AtIndex(suite.block.Header.Height) + job, err := suite.reader.AtIndex(suite.block.Height) require.NoError(suite.T(), err) entry, err := JobToBlockEntry(job) @@ -152,7 +158,7 @@ func (suite *ExecutionDataReaderSuite) TestAtIndex() { expectedErr := errors.New("expected error: get failed") setExecutionDataGet(nil, expectedErr) - job, err := suite.reader.AtIndex(suite.block.Header.Height) + job, err := suite.reader.AtIndex(suite.block.Height) assert.Nil(suite.T(), job, "job should be nil") assert.ErrorIs(suite.T(), err, expectedErr) }) @@ -162,7 +168,7 @@ func (suite *ExecutionDataReaderSuite) TestAtIndex() { suite.reset() suite.runTest(func() { // search for an index that doesn't have a header in storage - job, err := suite.reader.AtIndex(suite.block.Header.Height + 1) + job, err := suite.reader.AtIndex(suite.block.Height + 1) assert.Nil(suite.T(), job, "job should be nil") assert.ErrorIs(suite.T(), err, storage.ErrNotFound) }) @@ -172,10 +178,10 @@ func (suite *ExecutionDataReaderSuite) TestAtIndex() { suite.reset() suite.runTest(func() { // add a new block without an execution result - newBlock := unittest.BlockWithParentFixture(suite.block.Header) - suite.blocksByHeight[newBlock.Header.Height] = newBlock + newBlock := unittest.BlockWithParentFixture(suite.block.ToHeader()) + suite.blocksByHeight[newBlock.Height] = newBlock - job, err := suite.reader.AtIndex(newBlock.Header.Height) + job, err := suite.reader.AtIndex(newBlock.Height) assert.Nil(suite.T(), job, "job should be nil") assert.ErrorIs(suite.T(), err, storage.ErrNotFound) }) diff --git a/module/state_synchronization/requester/mock/execution_data_requester.go b/module/state_synchronization/requester/mock/execution_data_requester.go new file mode 100644 index 00000000000..f4ca43a1ef1 --- /dev/null +++ b/module/state_synchronization/requester/mock/execution_data_requester.go @@ -0,0 +1,59 @@ +// Code generated by mockery. DO NOT EDIT. + +package mock + +import ( + context "context" + + execution_data "github.com/onflow/flow-go/module/executiondatasync/execution_data" + mock "github.com/stretchr/testify/mock" +) + +// ExecutionDataRequester is an autogenerated mock type for the ExecutionDataRequester type +type ExecutionDataRequester struct { + mock.Mock +} + +// RequestExecutionData provides a mock function with given fields: ctx +func (_m *ExecutionDataRequester) RequestExecutionData(ctx context.Context) (*execution_data.BlockExecutionData, error) { + ret := _m.Called(ctx) + + if len(ret) == 0 { + panic("no return value specified for RequestExecutionData") + } + + var r0 *execution_data.BlockExecutionData + var r1 error + if rf, ok := ret.Get(0).(func(context.Context) (*execution_data.BlockExecutionData, error)); ok { + return rf(ctx) + } + if rf, ok := ret.Get(0).(func(context.Context) *execution_data.BlockExecutionData); ok { + r0 = rf(ctx) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*execution_data.BlockExecutionData) + } + } + + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(ctx) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// NewExecutionDataRequester creates a new instance of ExecutionDataRequester. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewExecutionDataRequester(t interface { + mock.TestingT + Cleanup(func()) +}) *ExecutionDataRequester { + mock := &ExecutionDataRequester{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/module/state_synchronization/requester/oneshot_execution_data_requester.go b/module/state_synchronization/requester/oneshot_execution_data_requester.go new file mode 100644 index 00000000000..b7059860099 --- /dev/null +++ b/module/state_synchronization/requester/oneshot_execution_data_requester.go @@ -0,0 +1,193 @@ +package requester + +import ( + "context" + "errors" + "fmt" + "time" + + "github.com/rs/zerolog" + "github.com/sethvargo/go-retry" + + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module" + "github.com/onflow/flow-go/module/executiondatasync/execution_data" +) + +// ExecutionDataRequester defines the interface for requesting execution data for a block. +type ExecutionDataRequester interface { + // RequestExecutionData requests execution data for a given block. + // + // Expected errors: + // - context.Canceled: if the provided context was canceled before completion + // All other errors are unexpected exceptions and may indicate invalid execution data was received. + RequestExecutionData(ctx context.Context) (*execution_data.BlockExecutionData, error) +} + +// OneshotExecutionDataConfig is a config for the oneshot execution data requester. +// It contains the retry settings for the execution data fetch. +type OneshotExecutionDataConfig struct { + // the initial timeout for fetching execution data from the db/network. The timeout is + // increased using an incremental backoff until FetchTimeout. + FetchTimeout time.Duration + // the max timeout for fetching execution data from the db/network. + MaxFetchTimeout time.Duration + // the initial delay used in the exponential backoff for failed execution data download + // retries. + RetryDelay time.Duration + // the max delay used in the exponential backoff for failed execution data download. + MaxRetryDelay time.Duration +} + +var _ ExecutionDataRequester = (*OneshotExecutionDataRequester)(nil) + +// OneshotExecutionDataRequester is a component that requests execution data for a block. +// It uses a retry mechanism to retry the download execution data if they are not found. +type OneshotExecutionDataRequester struct { + log zerolog.Logger + metrics module.ExecutionDataRequesterMetrics + config OneshotExecutionDataConfig + execDataDownloader execution_data.Downloader + executionResult *flow.ExecutionResult + blockHeader *flow.Header +} + +// NewOneshotExecutionDataRequester creates a new OneshotExecutionDataRequester instance. +// It validates that the provided block header and execution result are consistent +// +// Parameters: +// - log: Logger instance for the requester component +// - metrics: Metrics collector for execution data requester operations +// - execDataDownloader: Cache for storing and retrieving execution data +// - executionResult: The execution result to request data for +// - blockHeader: The block header corresponding to the execution result +// - config: Configuration settings for the oneshot execution data requester +// +// No errors are expected during normal operations and likely indicate a bug or +// inconsistent state. +func NewOneshotExecutionDataRequester( + log zerolog.Logger, + metrics module.ExecutionDataRequesterMetrics, + execDataDownloader execution_data.Downloader, + executionResult *flow.ExecutionResult, + blockHeader *flow.Header, + config OneshotExecutionDataConfig, +) (*OneshotExecutionDataRequester, error) { + if blockHeader.ID() != executionResult.BlockID { + return nil, fmt.Errorf("block id and execution result mismatch") + } + + return &OneshotExecutionDataRequester{ + log: log.With().Str("component", "oneshot_execution_data_requester").Logger(), + metrics: metrics, + execDataDownloader: execDataDownloader, + executionResult: executionResult, + blockHeader: blockHeader, + config: config, + }, nil +} + +// RequestExecutionData requests execution data for a given block from the network. +// It performs a fetch using a retry mechanism with exponential backoff if execution data not found. +// Returns the execution data entity and any error encountered. +// +// Expected errors: +// - context.Canceled: if the provided context was canceled before completion +// All other errors are unexpected exceptions and may indicate invalid execution data was received. +func (r *OneshotExecutionDataRequester) RequestExecutionData( + ctx context.Context, +) (*execution_data.BlockExecutionData, error) { + backoff := retry.NewExponential(r.config.RetryDelay) + backoff = retry.WithCappedDuration(r.config.MaxRetryDelay, backoff) + backoff = retry.WithJitterPercent(15, backoff) + + // bitswap always waits for either all data to be received or a timeout, even if it encountered an error. + // use an incremental backoff for the timeout so we do faster initial retries, then allow for more + // time in case data is large or there is network congestion. + timeout := retry.NewExponential(r.config.FetchTimeout) + timeout = retry.WithCappedDuration(r.config.MaxFetchTimeout, timeout) + + attempt := 0 + lg := r.log.With(). + Str("block_id", r.executionResult.BlockID.String()). + Str("execution_data_id", r.executionResult.ExecutionDataID.String()). + Uint64("height", r.blockHeader.Height). + Logger() + + var execData *execution_data.BlockExecutionData + err := retry.Do(ctx, backoff, func(context.Context) error { + if attempt > 0 { + lg.Debug(). + Uint64("attempt", uint64(attempt)). + Msgf("retrying download") + + r.metrics.FetchRetried() + } + attempt++ + + // download execution data for the block + fetchTimeout, _ := timeout.Next() + var err error + execData, err = r.processFetchRequest(ctx, fetchTimeout) + if isBlobNotFoundError(err) || errors.Is(err, context.DeadlineExceeded) { + return retry.RetryableError(err) + } + + if execution_data.IsMalformedDataError(err) || execution_data.IsBlobSizeLimitExceededError(err) { + // these errors indicate the execution data was received successfully and its hash matched + // the value in the ExecutionResult, however, the data was malformed or invalid. this means that + // an execution node produced an invalid execution data blob, and verification nodes approved it + lg.Error().Err(err). + Msg("received invalid execution data from network (potential slashing evidence?)") + } + + return err + }) + + if err != nil { + return nil, err + } + + return execData, nil +} + +// processFetchRequest performs the actual fetch of execution data for the given execution result. +// +// Expected errors during normal operations: +// - BlobNotFoundError if some CID in the blob tree could not be found from the blobstore +// - MalformedDataError if some level of the blob tree cannot be properly deserialized +// - BlobSizeLimitExceededError if some blob in the blob tree exceeds the maximum allowed size +// - context.DeadlineExceeded if fetching time exceeded fetchTimeout duration +// - context.Canceled if context was canceled during the request. +func (r *OneshotExecutionDataRequester) processFetchRequest( + parentCtx context.Context, + fetchTimeout time.Duration, +) (*execution_data.BlockExecutionData, error) { + height := r.blockHeader.Height + executionDataID := r.executionResult.ExecutionDataID + + lg := r.log.With(). + Str("block_id", r.executionResult.BlockID.String()). + Str("execution_data_id", executionDataID.String()). + Uint64("height", height). + Logger() + + lg.Debug().Msg("processing fetch request") + + start := time.Now() + r.metrics.ExecutionDataFetchStarted() + lg.Debug().Msg("downloading execution data") + + ctx, cancel := context.WithTimeout(parentCtx, fetchTimeout) + defer cancel() + + execData, err := r.execDataDownloader.Get(ctx, executionDataID) + r.metrics.ExecutionDataFetchFinished(time.Since(start), err == nil, height) + if err != nil { + return nil, err + } + + lg.Info().Msg("execution data fetched") + + return execData, nil +} diff --git a/module/state_synchronization/requester/oneshot_execution_data_requester_test.go b/module/state_synchronization/requester/oneshot_execution_data_requester_test.go new file mode 100644 index 00000000000..6951c4d6368 --- /dev/null +++ b/module/state_synchronization/requester/oneshot_execution_data_requester_test.go @@ -0,0 +1,225 @@ +package requester + +import ( + "context" + "fmt" + "testing" + "time" + + "github.com/ipfs/go-datastore" + dssync "github.com/ipfs/go-datastore/sync" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" + + "github.com/onflow/flow-go/module/blobs" + "github.com/onflow/flow-go/module/executiondatasync/execution_data" + edmock "github.com/onflow/flow-go/module/executiondatasync/execution_data/mock" + "github.com/onflow/flow-go/module/metrics" + "github.com/onflow/flow-go/utils/unittest" +) + +type OneshotExecutionDataRequesterSuite struct { + suite.Suite +} + +func TestRawExecutionDataRequesterSuite(t *testing.T) { + t.Parallel() + suite.Run(t, new(OneshotExecutionDataRequesterSuite)) +} + +func (suite *OneshotExecutionDataRequesterSuite) TestRequestExecutionData() { + logger := unittest.Logger() + metricsCollector := metrics.NewNoopCollector() + config := OneshotExecutionDataConfig{ + FetchTimeout: DefaultFetchTimeout, + MaxFetchTimeout: DefaultMaxFetchTimeout, + RetryDelay: DefaultRetryDelay, + MaxRetryDelay: DefaultMaxRetryDelay, + } + + suite.Run("Happy path. Raw setup", func() { + block := unittest.BlockFixture() + result := unittest.ExecutionResultFixture(unittest.WithBlock(block)) + blockEd := unittest.BlockExecutionDataFixture(unittest.WithBlockExecutionDataBlockID(block.ID())) + + execDataDownloader := edmock.NewDownloader(suite.T()) + execDataDownloader. + On("Get", mock.Anything, mock.AnythingOfType("flow.Identifier")). + Return(blockEd, nil). + Once() + + requester, err := NewOneshotExecutionDataRequester(logger, metricsCollector, execDataDownloader, result, block.ToHeader(), config) + require.NoError(suite.T(), err) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + execData, err := requester.RequestExecutionData(ctx) + require.NoError(suite.T(), err) + require.Equal(suite.T(), result.BlockID, execData.BlockID) + }) + + suite.Run("Happy path. Full storages setup", func() { + dataStore := dssync.MutexWrap(datastore.NewMapDatastore()) + blobstore := blobs.NewBlobstore(dataStore) + testData := generateTestData(suite.T(), blobstore, 5, map[uint64]testExecutionDataCallback{}) + execDataDownloader := MockDownloader(testData.executionDataEntries) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + // Test each sealed block + for blockID, expectedED := range testData.executionDataByID { + block := testData.blocksByID[blockID] + result := testData.resultsByBlockID[blockID] + + requester, err := NewOneshotExecutionDataRequester( + logger, + metricsCollector, + execDataDownloader, + result, + block.ToHeader(), + config, + ) + require.NoError(suite.T(), err) + require.NotNil(suite.T(), requester) + + execData, err := requester.RequestExecutionData(ctx) + require.NoError(suite.T(), err) + require.Equal(suite.T(), execData.BlockID, result.BlockID) + require.Equal(suite.T(), expectedED.BlockID, execData.BlockID) + require.Equal(suite.T(), expectedED.ChunkExecutionDatas, execData.ChunkExecutionDatas) + } + }) +} + +func (suite *OneshotExecutionDataRequesterSuite) TestRequestExecution_ERCacheReturnsError() { + logger := unittest.Logger() + metricsCollector := metrics.NewNoopCollector() + config := OneshotExecutionDataConfig{ + FetchTimeout: DefaultFetchTimeout, + MaxFetchTimeout: DefaultMaxFetchTimeout, + RetryDelay: DefaultRetryDelay, + MaxRetryDelay: DefaultMaxRetryDelay, + } + + // Create a block and execution result + block := unittest.BlockFixture() + executionResult := unittest.ExecutionResultFixture(unittest.WithBlock(block)) + + suite.Run("blob not found error", func() { + // Mock downloader to return blob not found error first, then success + execDataDownloader := edmock.NewDownloader(suite.T()) + expectedError := &execution_data.BlobNotFoundError{} + execDataDownloader. + On("Get", mock.Anything, executionResult.ExecutionDataID). + Return(nil, expectedError). + Once() + + // Eventually return execution data + blockEd := unittest.BlockExecutionDataFixture(unittest.WithBlockExecutionDataBlockID(block.ID())) + execDataDownloader. + On("Get", mock.Anything, executionResult.ExecutionDataID). + Return(blockEd, nil). + Once() + + requester, err := NewOneshotExecutionDataRequester(logger, metricsCollector, execDataDownloader, executionResult, block.ToHeader(), config) + require.NoError(suite.T(), err) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + execData, err := requester.RequestExecutionData(ctx) + require.NoError(suite.T(), err) + require.NotNil(suite.T(), execData) + require.Equal(suite.T(), block.ID(), execData.BlockID) + }) + + suite.Run("deadline exceeded error", func() { + // Mock downloader to return not found error + execDataDownloader := edmock.NewDownloader(suite.T()) + expectedError := context.DeadlineExceeded + execDataDownloader. + On("Get", mock.Anything, executionResult.ExecutionDataID). + Return(nil, expectedError). + Maybe() + + requester, err := NewOneshotExecutionDataRequester(logger, metricsCollector, execDataDownloader, executionResult, block.ToHeader(), config) + require.NoError(suite.T(), err) + + ctx, cancel := context.WithTimeout(context.Background(), 100*time.Millisecond) + defer cancel() + + // Should retry until timeout + execData, err := requester.RequestExecutionData(ctx) + require.ErrorIs(suite.T(), err, expectedError) + require.Nil(suite.T(), execData) + }) + + suite.Run("malformed data error", func() { + // Mock downloader to return malformed data error + execDataDownloader := edmock.NewDownloader(suite.T()) + expectedError := execution_data.NewMalformedDataError(fmt.Errorf("malformed data")) + execDataDownloader. + On("Get", mock.Anything, executionResult.ExecutionDataID). + Return(nil, expectedError). + Once() + + requester, err := NewOneshotExecutionDataRequester(logger, metricsCollector, execDataDownloader, executionResult, block.ToHeader(), config) + require.NoError(suite.T(), err) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + execData, err := requester.RequestExecutionData(ctx) + require.ErrorIs(suite.T(), err, expectedError) + require.Nil(suite.T(), execData) + }) + + suite.Run("blob size limit exceed error", func() { + // Mock downloader to return blob size limit exceeded error + execDataDownloader := edmock.NewDownloader(suite.T()) + expectedError := &execution_data.BlobSizeLimitExceededError{} + execDataDownloader. + On("Get", mock.Anything, executionResult.ExecutionDataID). + Return(nil, expectedError). + Once() + + requester, err := NewOneshotExecutionDataRequester(logger, metricsCollector, execDataDownloader, executionResult, block.ToHeader(), config) + require.NoError(suite.T(), err) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + execData, err := requester.RequestExecutionData(ctx) + require.ErrorIs(suite.T(), err, expectedError) + require.Nil(suite.T(), execData) + }) + + suite.Run("context canceled error", func() { + // Return context.DeadlineExceeded to trigger retry logic + execDataDownloader := edmock.NewDownloader(suite.T()) + execDataDownloader. + On("Get", mock.Anything, executionResult.ExecutionDataID). + Return(nil, context.DeadlineExceeded). + Once() + + // Eventually return context.Canceled to stop downloader's retry logic + expectedError := context.Canceled + execDataDownloader. + On("Get", mock.Anything, executionResult.ExecutionDataID). + Return(nil, expectedError). + Once() + + requester, err := NewOneshotExecutionDataRequester(logger, metricsCollector, execDataDownloader, executionResult, block.ToHeader(), config) + require.NoError(suite.T(), err) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + execData, err := requester.RequestExecutionData(ctx) + require.ErrorIs(suite.T(), err, expectedError) + require.Nil(suite.T(), execData) + }) +} diff --git a/module/state_synchronization/requester/unittest/unittest.go b/module/state_synchronization/requester/unittest/unittest.go index a5b6b010f03..fd350ffd444 100644 --- a/module/state_synchronization/requester/unittest/unittest.go +++ b/module/state_synchronization/requester/unittest/unittest.go @@ -5,14 +5,14 @@ import ( "fmt" "sync" + "github.com/ipfs/boxo/blockstore" "github.com/ipfs/go-cid" - blockstore "github.com/ipfs/go-ipfs-blockstore" "github.com/stretchr/testify/mock" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/module" "github.com/onflow/flow-go/module/blobs" - "github.com/onflow/flow-go/network/mocknetwork" + mocknetwork "github.com/onflow/flow-go/network/mock" statemock "github.com/onflow/flow-go/state/protocol/mock" "github.com/onflow/flow-go/storage" storagemock "github.com/onflow/flow-go/storage/mock" @@ -118,7 +118,7 @@ func WithByHeight(blocksByHeight map[uint64]*flow.Block) BlockHeaderMockOptions if _, has := blocksByHeight[height]; !has { return nil } - return blocksByHeight[height].Header + return blocksByHeight[height].ToHeader() }, func(height uint64) error { if _, has := blocksByHeight[height]; !has { @@ -137,7 +137,7 @@ func WithByID(blocksByID map[flow.Identifier]*flow.Block) BlockHeaderMockOptions if _, has := blocksByID[blockID]; !has { return nil } - return blocksByID[blockID].Header + return blocksByID[blockID].ToHeader() }, func(blockID flow.Identifier) error { if _, has := blocksByID[blockID]; !has { @@ -149,6 +149,25 @@ func WithByID(blocksByID map[flow.Identifier]*flow.Block) BlockHeaderMockOptions } } +func WithBlockIDByHeight(blocksByHeight map[uint64]*flow.Block) BlockHeaderMockOptions { + return func(blocks *storagemock.Headers) { + blocks.On("BlockIDByHeight", mock.AnythingOfType("uint64")).Return( + func(height uint64) flow.Identifier { + if _, has := blocksByHeight[height]; !has { + return flow.ZeroID + } + return blocksByHeight[height].ID() + }, + func(height uint64) error { + if _, has := blocksByHeight[height]; !has { + return fmt.Errorf("block %d not found: %w", height, storage.ErrNotFound) + } + return nil + }, + ) + } +} + func MockBlockHeaderStorage(opts ...BlockHeaderMockOptions) *storagemock.Headers { headers := new(storagemock.Headers) diff --git a/module/synchronization.go b/module/synchronization.go index ec7e893699f..fe78ecb7fc0 100644 --- a/module/synchronization.go +++ b/module/synchronization.go @@ -1,5 +1,3 @@ -// (c) 2019 Dapper Labs - ALL RIGHTS RESERVED - package module import ( @@ -17,7 +15,7 @@ type BlockRequester interface { // RequestHeight indicates that the given block height should be queued for retrieval. RequestHeight(height uint64) - // Manually Prune requests + // Prune manually prunes requests Prune(final *flow.Header) } diff --git a/module/trace/constants.go b/module/trace/constants.go index f89e0588e1c..1241ce765a0 100644 --- a/module/trace/constants.go +++ b/module/trace/constants.go @@ -12,6 +12,7 @@ const ( ProtoStateMutatorExtendCheckGuarantees SpanName = "proto.state.mutator.extend.checkGuarantees" ProtoStateMutatorExtendCheckSeals SpanName = "proto.state.mutator.extend.checkSeals" ProtoStateMutatorExtendCheckReceipts SpanName = "proto.state.mutator.extend.checkReceipts" + ProtoStateMutatorEvolveProtocolState SpanName = "proto.state.mutator.extend.evolveProtocolState" ProtoStateMutatorExtendDBInsert SpanName = "proto.state.mutator.extend.dbInsert" // HeaderExtend @@ -152,43 +153,54 @@ const ( FVMTransactionStorageUsedCheck SpanName = "fvm.env.transactionStorageUsedCheck" FVMInvokeContractFunction SpanName = "fvm.invokeContractFunction" - FVMEnvValueExists SpanName = "fvm.env.valueExists" - FVMEnvGetValue SpanName = "fvm.env.getValue" - FVMEnvSetValue SpanName = "fvm.env.setValue" - FVMEnvAllocateStorageIndex SpanName = "fvm.env.allocateStorageIndex" - FVMEnvGetAccount SpanName = "fvm.env.getAccount" - FVMEnvGetStorageUsed SpanName = "fvm.env.getStorageUsed" - FVMEnvGetStorageCapacity SpanName = "fvm.env.getStorageCapacity" - FVMEnvGetAccountBalance SpanName = "fvm.env.getAccountBalance" - FVMEnvGetAccountAvailableBalance SpanName = "fvm.env.getAccountAvailableBalance" - FVMEnvResolveLocation SpanName = "fvm.env.resolveLocation" - FVMEnvGetCode SpanName = "fvm.env.getCode" - FVMEnvGetAccountContractNames SpanName = "fvm.env.getAccountContractNames" - FVMEnvGetOrLoadProgram SpanName = "fvm.env.getOrLoadCachedProgram" - FVMEnvProgramLog SpanName = "fvm.env.programLog" - FVMEnvEmitEvent SpanName = "fvm.env.emitEvent" - FVMEnvGenerateUUID SpanName = "fvm.env.generateUUID" - FVMEnvDecodeArgument SpanName = "fvm.env.decodeArgument" - FVMEnvHash SpanName = "fvm.env.Hash" - FVMEnvVerifySignature SpanName = "fvm.env.verifySignature" - FVMEnvValidatePublicKey SpanName = "fvm.env.validatePublicKey" - FVMEnvBLSVerifyPOP SpanName = "fvm.env.blsVerifyPOP" - FVMEnvBLSAggregateSignatures SpanName = "fvm.env.blsAggregateSignatures" - FVMEnvBLSAggregatePublicKeys SpanName = "fvm.env.blsAggregatePublicKeys" - FVMEnvGetCurrentBlockHeight SpanName = "fvm.env.getCurrentBlockHeight" - FVMEnvGetBlockAtHeight SpanName = "fvm.env.getBlockAtHeight" - FVMEnvUnsafeRandom SpanName = "fvm.env.unsafeRandom" - FVMEnvCreateAccount SpanName = "fvm.env.createAccount" - FVMEnvAddAccountKey SpanName = "fvm.env.addAccountKey" - FVMEnvAddEncodedAccountKey SpanName = "fvm.env.addEncodedAccountKey" - FVMEnvAccountKeysCount SpanName = "fvm.env.accountKeysCount" - FVMEnvGetAccountKey SpanName = "fvm.env.getAccountKey" - FVMEnvRevokeAccountKey SpanName = "fvm.env.revokeAccountKey" - FVMEnvRevokeEncodedAccountKey SpanName = "fvm.env.revokeEncodedAccountKey" - FVMEnvUpdateAccountContractCode SpanName = "fvm.env.updateAccountContractCode" - FVMEnvGetAccountContractCode SpanName = "fvm.env.getAccountContractCode" - FVMEnvRemoveAccountContractCode SpanName = "fvm.env.removeAccountContractCode" - FVMEnvGetSigningAccounts SpanName = "fvm.env.getSigningAccounts" + FVMEnvValueExists SpanName = "fvm.env.valueExists" + FVMEnvGetValue SpanName = "fvm.env.getValue" + FVMEnvSetValue SpanName = "fvm.env.setValue" + FVMEnvAllocateSlabIndex SpanName = "fvm.env.allocateSlabIndex" + FVMEnvGetAccount SpanName = "fvm.env.getAccount" + FVMEnvGetStorageUsed SpanName = "fvm.env.getStorageUsed" + FVMEnvGetStorageCapacity SpanName = "fvm.env.getStorageCapacity" + FVMEnvGetAccountBalance SpanName = "fvm.env.getAccountBalance" + FVMEnvGetAccountAvailableBalance SpanName = "fvm.env.getAccountAvailableBalance" + FVMEnvGetAccountKeys SpanName = "fvm.env.getAccountKeys" + FVMEnvResolveLocation SpanName = "fvm.env.resolveLocation" + FVMEnvGetCode SpanName = "fvm.env.getCode" + FVMEnvGetAccountContractNames SpanName = "fvm.env.getAccountContractNames" + FVMEnvGetOrLoadProgram SpanName = "fvm.env.getOrLoadCachedProgram" + FVMEnvProgramLog SpanName = "fvm.env.programLog" + FVMEnvEmitEvent SpanName = "fvm.env.emitEvent" + FVMEnvEncodeEvent SpanName = "fvm.env.encodeEvent" + FVMEnvGenerateUUID SpanName = "fvm.env.generateUUID" + FVMEnvGenerateAccountLocalID SpanName = "fvm.env.generateAccountLocalID" + FVMEnvDecodeArgument SpanName = "fvm.env.decodeArgument" + FVMEnvHash SpanName = "fvm.env.Hash" + FVMEnvVerifySignature SpanName = "fvm.env.verifySignature" + FVMEnvValidatePublicKey SpanName = "fvm.env.validatePublicKey" + FVMEnvBLSVerifyPOP SpanName = "fvm.env.blsVerifyPOP" + FVMEnvBLSAggregateSignatures SpanName = "fvm.env.blsAggregateSignatures" + FVMEnvBLSAggregatePublicKeys SpanName = "fvm.env.blsAggregatePublicKeys" + FVMEnvGetCurrentBlockHeight SpanName = "fvm.env.getCurrentBlockHeight" + FVMEnvGetBlockAtHeight SpanName = "fvm.env.getBlockAtHeight" + FVMEnvRandom SpanName = "fvm.env.unsafeRandom" + FVMEnvRandomSourceHistoryProvider SpanName = "fvm.env.randomSourceHistoryProvider" + FVMEnvCreateAccount SpanName = "fvm.env.createAccount" + FVMEnvAddAccountKey SpanName = "fvm.env.addAccountKey" + FVMEnvAccountKeysCount SpanName = "fvm.env.accountKeysCount" + FVMEnvGetAccountKey SpanName = "fvm.env.getAccountKey" + FVMEnvRevokeAccountKey SpanName = "fvm.env.revokeAccountKey" + FVMEnvUpdateAccountContractCode SpanName = "fvm.env.updateAccountContractCode" + FVMEnvGetAccountContractCode SpanName = "fvm.env.getAccountContractCode" + FVMEnvRemoveAccountContractCode SpanName = "fvm.env.removeAccountContractCode" + FVMEnvGetSigningAccounts SpanName = "fvm.env.getSigningAccounts" + + FVMEVMDeployCOA SpanName = "fvm.evm.deployCOA" + FVMEVMRun SpanName = "fvm.evm.run" + FVMEVMDryRun SpanName = "fvm.evm.dryRun" + FVMEVMBatchRun SpanName = "fvm.evm.batchRun" + FVMEVMDeposit SpanName = "fvm.evm.deposit" + FVMEVMWithdraw SpanName = "fvm.evm.withdraw" + FVMEVMDeploy SpanName = "fvm.evm.deploy" + FVMEVMCall SpanName = "fvm.evm.call" FVMCadenceTrace SpanName = "fvm.cadence.trace" ) diff --git a/module/trace/noop.go b/module/trace/noop.go index 683d5f2b782..f2797579d6b 100644 --- a/module/trace/noop.go +++ b/module/trace/noop.go @@ -63,6 +63,18 @@ func (t *NoopTracer) StartCollectionSpan( return NoopSpan, ctx } +func (t *NoopTracer) StartTransactionSpan( + ctx context.Context, + entityID flow.Identifier, + spanName SpanName, + opts ...trace.SpanStartOption, +) ( + trace.Span, + context.Context, +) { + return NoopSpan, ctx +} + func (t *NoopTracer) StartSpanFromContext( ctx context.Context, operationName SpanName, diff --git a/module/trace/trace.go b/module/trace/trace.go index c734f4a699b..ac2b3580bb9 100644 --- a/module/trace/trace.go +++ b/module/trace/trace.go @@ -5,7 +5,7 @@ import ( "fmt" "time" - lru "github.com/hashicorp/golang-lru" + lru "github.com/hashicorp/golang-lru/v2" "github.com/rs/zerolog" "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/attribute" @@ -41,7 +41,7 @@ type Tracer struct { tracer trace.Tracer shutdown func(context.Context) error log zerolog.Logger - spanCache *lru.Cache + spanCache *lru.Cache[flow.Identifier, trace.Span] chainID string sensitivity uint } @@ -55,6 +55,36 @@ func NewTracer( ) ( *Tracer, error, +) { + ctx := context.TODO() + // OLTP trace gRPC client initialization. Connection parameters for the exporter are extracted + // from environment variables. e.g.: `OTEL_EXPORTER_OTLP_TRACES_ENDPOINT`. + // + // For more information, see OpenTelemetry specification: + // https://github.com/open-telemetry/opentelemetry-specification/blob/v1.12.0/specification/protocol/exporter.md + traceExporter, err := otlptracegrpc.New(ctx) + if err != nil { + return nil, fmt.Errorf("failed to create trace exporter: %w", err) + } + + return NewTracerWithExporter( + log, + serviceName, + chainID, + sensitivity, + traceExporter, + ) +} + +func NewTracerWithExporter( + log zerolog.Logger, + serviceName string, + chainID string, + sensitivity uint, + traceExporter sdktrace.SpanExporter, +) ( + *Tracer, + error, ) { ctx := context.TODO() res, err := resource.New( @@ -68,16 +98,6 @@ func NewTracer( return nil, fmt.Errorf("failed to create resource: %w", err) } - // OLTP trace gRPC client initialization. Connection parameters for the exporter are extracted - // from environment variables. e.g.: `OTEL_EXPORTER_OTLP_TRACES_ENDPOINT`. - // - // For more information, see OpenTelemetry specification: - // https://github.com/open-telemetry/opentelemetry-specification/blob/v1.12.0/specification/protocol/exporter.md - traceExporter, err := otlptracegrpc.New(ctx) - if err != nil { - return nil, fmt.Errorf("failed to create trace exporter: %w", err) - } - tracerProvider := sdktrace.NewTracerProvider( sdktrace.WithResource(res), sdktrace.WithBatcher(traceExporter), @@ -88,7 +108,7 @@ func NewTracer( log.Debug().Err(err).Msg("tracing error") })) - spanCache, err := lru.New(int(DefaultEntityCacheSize)) + spanCache, err := lru.New[flow.Identifier, trace.Span](int(DefaultEntityCacheSize)) if err != nil { return nil, err } @@ -157,8 +177,7 @@ func (t *Tracer) entityRootSpan( trace.Span, ) { if c, ok := t.spanCache.Get(entityID); ok { - span := c.(trace.Span) - return trace.ContextWithSpan(ctx, span), span + return trace.ContextWithSpan(ctx, c), c } traceID := (*trace.TraceID)(entityID[:16]) @@ -208,6 +227,18 @@ func (t *Tracer) StartCollectionSpan( return t.startEntitySpan(ctx, collectionID, EntityTypeCollection, spanName, opts...) } +func (t *Tracer) StartTransactionSpan( + ctx context.Context, + transactionID flow.Identifier, + spanName SpanName, + opts ...trace.SpanStartOption, +) ( + trace.Span, + context.Context, +) { + return t.startEntitySpan(ctx, transactionID, EntityTypeTransaction, spanName, opts...) +} + func (t *Tracer) StartSpanFromContext( ctx context.Context, operationName SpanName, diff --git a/module/trace/trace_test.go b/module/trace/trace_test.go index c98a632d4a9..f1011589930 100644 --- a/module/trace/trace_test.go +++ b/module/trace/trace_test.go @@ -2,7 +2,7 @@ package trace import ( "context" - "math/rand" + "crypto/rand" "testing" "github.com/rs/zerolog" diff --git a/module/tracer.go b/module/tracer.go index 62454a59477..d08fba6a3bc 100644 --- a/module/tracer.go +++ b/module/tracer.go @@ -35,7 +35,7 @@ type Tracer interface { context.Context, ) - // StartCollectionSpan starts an span for a collection, built as a child of + // StartCollectionSpan starts a span for a collection, built as a child of // rootSpan. It also returns the context including this span which can be // used for nested calls. StartCollectionSpan( @@ -48,6 +48,16 @@ type Tracer interface { context.Context, ) + StartTransactionSpan( + ctx context.Context, + transactionID flow.Identifier, + spanName trace.SpanName, + opts ...otelTrace.SpanStartOption, + ) ( + otelTrace.Span, + context.Context, + ) + StartSpanFromContext( ctx context.Context, operationName trace.SpanName, diff --git a/module/updatable_configs.go b/module/updatable_configs.go index ab69cf673f0..cb6a2c4a67c 100644 --- a/module/updatable_configs.go +++ b/module/updatable_configs.go @@ -28,3 +28,43 @@ type SealingConfigsSetter interface { // Returns ValidationError if the new value results in an invalid sealing config. SetRequiredApprovalsForSealingConstruction(newVal uint) error } + +// ReadonlySealingLagRateLimiterConfig is an interface for the actual updatable configs module. +// but only exposes its getter methods to return the config values without exposing +// its setter methods. +// ReadonlySealingLagRateLimiterConfig contains several configs: +// - MinSealingLag (updatable) +// - MaxSealingLag (updatable) +// - HalvingInterval (updatable) +// - MinCollectionSize (updatable) +type ReadonlySealingLagRateLimiterConfig interface { + // MinSealingLag is the minimum sealing lag that the rate limiter will allow. + MinSealingLag() uint + // MaxSealingLag is the maximum sealing lag that the rate limiter will allow. + MaxSealingLag() uint + // HalvingInterval is the interval in blocks in which the halving is applied. + HalvingInterval() uint + // MinCollectionSize is the minimum collection size that the rate limiter will allow. + MinCollectionSize() uint +} + +// SealingLagRateLimiterConfig is an interface that allows the caller to update updatable configs +type SealingLagRateLimiterConfig interface { + ReadonlySealingLagRateLimiterConfig + // SetMinSealingLag takes a new config value and updates the config + // if the new value is valid. + // Returns ValidationError if the new value results in an invalid config. + SetMinSealingLag(value uint) error + // SetMaxSealingLag takes a new config value and updates the config + // if the new value is valid. + // Returns ValidationError if the new value results in an invalid config. + SetMaxSealingLag(value uint) error + // SetHalvingInterval takes a new config value and updates the config + // if the new value is valid. + // Returns ValidationError if the new value results in an invalid config. + SetHalvingInterval(value uint) error + // SetMinCollectionSize takes a new config value and updates the config + // if the new value is valid. + // Returns ValidationError if the new value results in an invalid config. + SetMinCollectionSize(value uint) error +} diff --git a/module/updatable_configs/collection_configs.go b/module/updatable_configs/collection_configs.go new file mode 100644 index 00000000000..743e2bab471 --- /dev/null +++ b/module/updatable_configs/collection_configs.go @@ -0,0 +1,76 @@ +package updatable_configs + +import ( + "go.uber.org/atomic" + + "github.com/onflow/flow-go/module" + "github.com/onflow/flow-go/module/updatable_configs/validation" +) + +type bySealingLagRateLimiterConfigs struct { + minSealingLag *atomic.Uint32 + maxSealingLag *atomic.Uint32 + halvingInterval *atomic.Uint32 + minCollectionSize *atomic.Uint32 +} + +var _ module.SealingLagRateLimiterConfig = (*bySealingLagRateLimiterConfigs)(nil) + +// DefaultBySealingLagRateLimiterConfigs returns a default config for collection throttling. +// It performs binary throttling once the sealing lag reaches max sealing lag. +func DefaultBySealingLagRateLimiterConfigs() module.SealingLagRateLimiterConfig { + // default config results in binary throttling once the sealing lag reaches max sealing lag, before that no throttling + // is being applied. The 600 blocks is chosen as it is roughly 5 minutes. + // 2 blocks / second * 60 seconds * 5 minutes = 600 blocks + return &bySealingLagRateLimiterConfigs{ + minSealingLag: atomic.NewUint32(300), + maxSealingLag: atomic.NewUint32(600), + halvingInterval: atomic.NewUint32(300), + minCollectionSize: atomic.NewUint32(0), + } +} + +func (c *bySealingLagRateLimiterConfigs) MinSealingLag() uint { + return uint(c.minSealingLag.Load()) +} + +func (c *bySealingLagRateLimiterConfigs) MaxSealingLag() uint { + return uint(c.maxSealingLag.Load()) +} + +func (c *bySealingLagRateLimiterConfigs) HalvingInterval() uint { + return uint(c.halvingInterval.Load()) +} + +func (c *bySealingLagRateLimiterConfigs) MinCollectionSize() uint { + return uint(c.minCollectionSize.Load()) +} + +func (c *bySealingLagRateLimiterConfigs) SetMinSealingLag(value uint) error { + if err := validation.ValidateMinMaxSealingLag(value, c.MaxSealingLag()); err != nil { + return err + } + c.minSealingLag.Store(uint32(value)) + return nil +} + +func (c *bySealingLagRateLimiterConfigs) SetMaxSealingLag(value uint) error { + if err := validation.ValidateMinMaxSealingLag(c.MinSealingLag(), value); err != nil { + return err + } + c.maxSealingLag.Store(uint32(value)) + return nil +} + +func (c *bySealingLagRateLimiterConfigs) SetHalvingInterval(value uint) error { + if err := validation.ValidateHalvingInterval(value); err != nil { + return err + } + c.halvingInterval.Store(uint32(value)) + return nil +} + +func (c *bySealingLagRateLimiterConfigs) SetMinCollectionSize(value uint) error { + c.minCollectionSize.Store(uint32(value)) + return nil +} diff --git a/module/updatable_configs/validation/collection_configs.go b/module/updatable_configs/validation/collection_configs.go new file mode 100644 index 00000000000..4cc056b0fab --- /dev/null +++ b/module/updatable_configs/validation/collection_configs.go @@ -0,0 +1,19 @@ +package validation + +import "fmt" + +// ValidateMinMaxSealingLag validates that the minimum sealing lag is not greater than the maximum sealing lag. +func ValidateMinMaxSealingLag(minSealingLag uint, maxSealingLag uint) error { + if minSealingLag > maxSealingLag { + return fmt.Errorf("invalid sealing lag parameters: minSealingLag (%v) > maxSealingLag (%v)", minSealingLag, maxSealingLag) + } + return nil +} + +// ValidateHalvingInterval validates that the halving interval is greater than zero. +func ValidateHalvingInterval(halvingInterval uint) error { + if halvingInterval == 0 { + return fmt.Errorf("halving interval must be greater than zero") + } + return nil +} diff --git a/module/upstream/upstream_connector.go b/module/upstream/upstream_connector.go index 36eb362e4f2..db8843cd619 100644 --- a/module/upstream/upstream_connector.go +++ b/module/upstream/upstream_connector.go @@ -20,7 +20,7 @@ import ( // upstreamConnector tries to connect the unstaked AN with atleast one of the configured bootstrap access nodes type upstreamConnector struct { lm *lifecycle.LifecycleManager - bootstrapIdentities flow.IdentityList + bootstrapIdentities flow.IdentitySkeletonList logger zerolog.Logger unstakedNode p2p.LibP2PNode cancel context.CancelFunc @@ -28,7 +28,7 @@ type upstreamConnector struct { maxRetries uint64 } -func NewUpstreamConnector(bootstrapIdentities flow.IdentityList, unstakedNode p2p.LibP2PNode, logger zerolog.Logger) *upstreamConnector { +func NewUpstreamConnector(bootstrapIdentities flow.IdentitySkeletonList, unstakedNode p2p.LibP2PNode, logger zerolog.Logger) *upstreamConnector { return &upstreamConnector{ lm: lifecycle.NewLifecycleManager(), bootstrapIdentities: bootstrapIdentities, @@ -86,7 +86,7 @@ func (connector *upstreamConnector) Ready() <-chan struct{} { } // connect is run to connect to an boostrap peer -func (connector *upstreamConnector) connect(ctx context.Context, bootstrapPeer flow.Identity) error { +func (connector *upstreamConnector) connect(ctx context.Context, bootstrapPeer flow.IdentitySkeleton) error { select { // check for a cancelled/expired context @@ -102,7 +102,7 @@ func (connector *upstreamConnector) connect(ctx context.Context, bootstrapPeer f } // try and connect to the bootstrap server - return connector.unstakedNode.AddPeer(ctx, peerAddrInfo) + return connector.unstakedNode.ConnectToPeer(ctx, peerAddrInfo) } func (connector *upstreamConnector) Done() <-chan struct{} { diff --git a/module/util/folder.go b/module/util/folder.go new file mode 100644 index 00000000000..c168a58ce35 --- /dev/null +++ b/module/util/folder.go @@ -0,0 +1,32 @@ +package util + +import ( + "fmt" + "os" +) + +// IsEmptyOrNotExists returns true if the directory does not exist or is empty. +// It returns an error if there's an issue accessing the directory. +func IsEmptyOrNotExists(path string) (bool, error) { + // Check if the path exists + info, err := os.Stat(path) + if os.IsNotExist(err) { + // Directory does not exist + return true, nil + } + if err != nil { + return false, fmt.Errorf("error stating path %s: %w", path, err) + } + if !info.IsDir() { + return false, fmt.Errorf("path %s exists but is not a directory", path) + } + + // Read directory contents + files, err := os.ReadDir(path) + if err != nil { + return false, fmt.Errorf("error reading directory %s: %w", path, err) + } + + // If the directory has no entries, it's empty + return len(files) == 0, nil +} diff --git a/module/util/log.go b/module/util/log.go index 45807b9757d..560a90710ea 100644 --- a/module/util/log.go +++ b/module/util/log.go @@ -1,25 +1,198 @@ package util import ( + "sync" + "sync/atomic" + "time" + "github.com/rs/zerolog" ) -// LogProgress takes a total and return function such that when called with a 0-based index -// it prints the progress from 0% to 100% to indicate the index from 0 to (total - 1) has been -// processed. -// useful to report the progress of processing the index from 0 to (total - 1) -func LogProgress(msg string, total int, logger *zerolog.Logger) func(currentIndex int) { - logThreshold := float64(0) - return func(currentIndex int) { +// LogProgressFunc is a function that can be called to add to the progress. +// The function can be called concurrently. addProgress is the amount to add to the progress. +// It is any integer number type, but all negative values are ignored. +type LogProgressFunc[T int | uint | int32 | uint32 | uint64 | int64] func(addProgress T) + +type LogProgressConfig[T int | uint | int32 | uint32 | uint64 | int64] struct { + // message is part of the messages that will be logged. + // The full template is: `%s progress %d/%d (%.1f%%) total time %s`. + message string + // total is the total value of progress expected. + // When total is added to LogProgressFunc the progress is considered to be 100%. + total T + // noDataLogDuration. If the last log line was more than this duration ago and a new data point is added, a new log line is logged. + // No line is logged if no data is received. The minimum resolution for noDataLogDuration is 1 millisecond. + noDataLogDuration time.Duration + // ticks is the number of increments to log at. If total is > 0 there will be at least 2 ticks. One at 0 and one at total. + // If you want to log at every 10% set ticks to 11 (one is at 0%). + // If the number of ticks is more than total, it will be set to total + 1. + ticks uint64 +} + +// DefaultLogProgressConfig returns a LogProgressConfig with default values. +// The default values will log every 10% and will log an additional line if new data is received +// after no data has been received for 1 minute. +func DefaultLogProgressConfig[T int | uint | int32 | uint32 | uint64 | int64]( + message string, + total T, +) LogProgressConfig[T] { + return NewLogProgressConfig[T]( + message, + total, + 60*time.Second, + 10, + ) +} + +// NewLogProgressConfig creates and returns a new LogProgressConfig with the specified message, total, duration, and ticks. +// The duration is rounded to the nearest millisecond. +// The number of ticks is the number of increments to log at. Logging at 0% is always done. +// If you want to log at 10% increments, set ticks to 10. +func NewLogProgressConfig[T int | uint | int32 | uint32 | uint64 | int64]( + message string, + total T, + noDataLogDuration time.Duration, + ticks uint64, +) LogProgressConfig[T] { + // sanitize total + if total < 0 { + total = 0 + } + + // add the tick at 0% + ticks = ticks + 1 + + // sanitize ticks + // number of ticks should be at most total + 1 + if uint64(total+1) < ticks { + ticks = uint64(total + 1) + } + + // sanitize noDataLogDuration + if noDataLogDuration < time.Millisecond { + noDataLogDuration = time.Millisecond + } + + return LogProgressConfig[T]{ + message: message, + total: total, + noDataLogDuration: noDataLogDuration, + ticks: ticks, + } + +} + +// LogProgress takes a LogProgressConfig and return function such that when called adds the given +// number to the progress and logs the progress in defined increments or there is a time gap between progress +// updates. +// The returned function can be called concurrently. +// An eta is also logged, but it assumes that the progress is linear. +func LogProgress[T int | uint | int32 | uint32 | uint64 | int64]( + log zerolog.Logger, + config LogProgressConfig[T], +) LogProgressFunc[T] { + + start := time.Now().UnixMilli() + var lastDataTime atomic.Int64 + lastDataTime.Store(start) + var currentIndex atomic.Uint64 + + // mutex to protect logProgress from concurrent calls + // mutex is technically only needed for when the underlying io.Writer for the provider zerolog.Logger + // is not thread safe. However we lock conservatively because we intend to call logProgress infrequently in normal + // usage anyway. + var mux sync.Mutex + + total := uint64(config.total) + logProgress := func(current uint64) { + mux.Lock() + defer mux.Unlock() + + elapsed := time.Since(time.UnixMilli(start)) + elapsedString := elapsed.Round(1 * time.Second).String() + percentage := float64(100) - if total > 0 { - percentage = (float64(currentIndex+1) / float64(total)) * 100. // currentIndex+1 assuming zero based indexing + if config.total > 0 { + percentage = (float64(current) / float64(config.total)) * 100. + } + + etaString := "unknown" + if percentage > 0 { + eta := time.Duration(float64(elapsed) / percentage * (100 - percentage)) + if eta < 0 { + eta = 0 + } + etaString = eta.Round(1 * time.Second).String() + } + + if current < total { + log.Info().Msgf("%s progress %d/%d (%.1f%%) elapsed: %s, eta %s", config.message, current, config.total, percentage, elapsedString, etaString) + } else { + log.Info().Msgf("%s progress %d/%d (%.1f%%) total time %s", config.message, current, config.total, percentage, elapsedString) + } + } + + // log 0% progress + logProgress(0) + + // sanitize inputs and calculate increment + ticksIncludingZero := config.ticks + if ticksIncludingZero < 2 { + ticksIncludingZero = 2 + } + ticks := ticksIncludingZero - 1 + + increment := total / ticks + if increment == 0 { + increment = 1 + } + + // increment doesn't necessarily divide config.total + // Because we want 100% to mean 100% we need to deduct this overflow from the current value + // before checking if it is a multiple of the increment. + incrementsOverflow := total % increment + noLogDurationMillis := config.noDataLogDuration.Milliseconds() + + return func(add T) { + if total == 0 { + return + } + if add < 0 { + return + } + diff := uint64(add) + now := time.Now().UnixMilli() + + // it can technically happen that current > total. In this case we continue to log + // the progress using the calculated increments + current := currentIndex.Add(diff) + lastTime := lastDataTime.Swap(now) + + // if the diff went over one or more increments, log the progress for each increment + fromTick := uint64(0) + if current-diff >= incrementsOverflow { + fromTick = (current - diff - incrementsOverflow) / increment + } + toTick := uint64(0) + if current >= incrementsOverflow { + toTick = (current - incrementsOverflow) / increment + } + + if fromTick == toTick && now-lastTime > noLogDurationMillis { + // no data for a while, log whatever we are at now + logProgress(current) + return + } + + if toTick <= fromTick { + return } - // report every 10 percent - if percentage >= logThreshold { - logger.Info().Msgf("%s progress: %v percent", msg, logThreshold) - logThreshold += 10 + for t := fromTick; t < toTick; t++ { + // (t+1) because we want to log the progress for the increment reached + // not the increment past + current := increment*(t+1) + incrementsOverflow + logProgress(current) } } } diff --git a/module/util/log_test.go b/module/util/log_test.go index 9d1d4851dcd..da9f96fcace 100644 --- a/module/util/log_test.go +++ b/module/util/log_test.go @@ -2,58 +2,398 @@ package util import ( "bytes" + "fmt" + "io" + "strings" + "sync" "testing" + "time" "github.com/rs/zerolog" "github.com/stretchr/testify/require" ) func TestLogProgress40(t *testing.T) { + t.Parallel() + buf := bytes.NewBufferString("") lg := zerolog.New(buf) total := 40 - logger := LogProgress("test", total, &lg) + logger := LogProgress( + lg, + DefaultLogProgressConfig( + "test", + total, + ), + ) + for i := 0; i < total; i++ { + logger(1) + } + + expectedLogs := []string{ + `test progress 0/40 (0.0%)`, + `test progress 4/40 (10.0%)`, + `test progress 8/40 (20.0%)`, + `test progress 12/40 (30.0%)`, + `test progress 16/40 (40.0%)`, + `test progress 20/40 (50.0%)`, + `test progress 24/40 (60.0%)`, + `test progress 28/40 (70.0%)`, + `test progress 32/40 (80.0%)`, + `test progress 36/40 (90.0%)`, + `test progress 40/40 (100.0%)`, + } + + for _, log := range expectedLogs { + require.Contains(t, buf.String(), log, total) + } +} + +func TestLogProgress40By3(t *testing.T) { + t.Parallel() + + buf := bytes.NewBufferString("") + lg := zerolog.New(buf) + total := 40 + logger := LogProgress( + lg, + DefaultLogProgressConfig( + "test", + total, + ), + ) + for i := 0; i < total/3; i++ { + logger(3) + } + logger(1) + + expectedLogs := []string{ + `test progress 0/40 (0.0%)`, + `test progress 4/40 (10.0%)`, + `test progress 8/40 (20.0%)`, + `test progress 12/40 (30.0%)`, + `test progress 16/40 (40.0%)`, + `test progress 20/40 (50.0%)`, + `test progress 24/40 (60.0%)`, + `test progress 28/40 (70.0%)`, + `test progress 32/40 (80.0%)`, + `test progress 36/40 (90.0%)`, + `test progress 40/40 (100.0%)`, + } + + for _, log := range expectedLogs { + require.Contains(t, buf.String(), log, total) + } +} + +func TestLogProgress43B(t *testing.T) { + t.Parallel() + + buf := bytes.NewBufferString("") + lg := zerolog.New(buf) + total := 43 + logger := LogProgress( + lg, + DefaultLogProgressConfig( + "test", + total, + ), + ) for i := 0; i < total; i++ { - logger(i) + logger(1) + } + + expectedLogs := []string{ + `test progress 0/43 (0.0%)`, + `test progress 7/43`, + `test progress 11/43`, + `test progress 15/43`, + `test progress 19/43`, + `test progress 23/43`, + `test progress 27/43`, + `test progress 31/43`, + `test progress 35/43`, + `test progress 39/43`, + `test progress 43/43 (100.0%)`, + } + + for _, log := range expectedLogs { + require.Contains(t, buf.String(), log, total) + } +} + +func TestLogProgress43By3(t *testing.T) { + t.Parallel() + + buf := bytes.NewBufferString("") + lg := zerolog.New(buf) + total := 43 + logger := LogProgress( + lg, + DefaultLogProgressConfig( + "test", + total, + ), + ) + for i := 0; i < total/3; i++ { + logger(3) + } + logger(1) + + expectedLogs := []string{ + `test progress 0/43 (0.0%)`, + `test progress 7/43`, + `test progress 11/43`, + `test progress 15/43`, + `test progress 19/43`, + `test progress 23/43`, + `test progress 27/43`, + `test progress 31/43`, + `test progress 35/43`, + `test progress 39/43`, + `test progress 43/43 (100.0%)`, + } + + for _, log := range expectedLogs { + require.Contains(t, buf.String(), log, total) + } +} + +func TestLog100WhenOvershooting(t *testing.T) { + t.Parallel() + + buf := bytes.NewBufferString("") + lg := zerolog.New(buf) + total := 100 + logger := LogProgress( + lg, + DefaultLogProgressConfig( + "test", + total, + ), + ) + for i := 0; i < total/3+1; i++ { + logger(3) + } + + expectedLogs := []string{ + `test progress 0/100 (0.0%)`, + `test progress 100/100 (100.0%)`, } - expectedLogs := - `{"level":"info","message":"test progress: 0 percent"} -{"level":"info","message":"test progress: 10 percent"} -{"level":"info","message":"test progress: 20 percent"} -{"level":"info","message":"test progress: 30 percent"} -{"level":"info","message":"test progress: 40 percent"} -{"level":"info","message":"test progress: 50 percent"} -{"level":"info","message":"test progress: 60 percent"} -{"level":"info","message":"test progress: 70 percent"} -{"level":"info","message":"test progress: 80 percent"} -{"level":"info","message":"test progress: 90 percent"} -{"level":"info","message":"test progress: 100 percent"} -` - require.Equal(t, expectedLogs, buf.String()) + for _, log := range expectedLogs { + require.Contains(t, buf.String(), log, total) + } + lines := strings.Count(buf.String(), "\n") + // every 10% + 1 for the final log + require.Equal(t, 11, lines) } func TestLogProgress1000(t *testing.T) { + t.Parallel() + for total := 11; total < 1000; total++ { buf := bytes.NewBufferString("") lg := zerolog.New(buf) - logger := LogProgress("test", total, &lg) + logger := LogProgress( + lg, + DefaultLogProgressConfig( + "test", + total, + ), + ) + for i := 0; i < total; i++ { - logger(i) + logger(1) + } + + expectedLogs := []string{ + fmt.Sprintf(`test progress 0/%d`, total), + fmt.Sprintf(`test progress %d/%d (100.0%%)`, total, total), + } + + for _, log := range expectedLogs { + require.Contains(t, buf.String(), log, total) } + } +} + +func TestLogProgressWhenTotalIs0(t *testing.T) { + t.Parallel() + + buf := bytes.NewBufferString("") + lg := zerolog.New(buf) + logger := LogProgress( + lg, + DefaultLogProgressConfig( + "test", + 0, + ), + ) + + for i := 0; i < 10; i++ { + logger(1) + } + + expectedLogs := []string{ + fmt.Sprintf(`test progress %d/%d (100.0%%)`, 0, 0), + } + + for _, log := range expectedLogs { + require.Contains(t, buf.String(), log, 0) + } + + lines := strings.Count(buf.String(), "\n") + // log only once + require.Equal(t, 1, lines) +} + +func TestLogProgressMoreTicksThenTotal(t *testing.T) { + t.Parallel() + + buf := bytes.NewBufferString("") + lg := zerolog.New(buf) + logger := LogProgress( + lg, + DefaultLogProgressConfig( + "test", + 5, + ), + ) + + for i := 0; i < 5; i++ { + logger(1) + } + + expectedLogs := []string{ + fmt.Sprintf(`test progress %d/%d (100.0%%)`, 5, 5), + } + + for _, log := range expectedLogs { + require.Contains(t, buf.String(), log, 0) + } + + lines := strings.Count(buf.String(), "\n") + // log only once + require.Equal(t, 6, lines) +} + +func TestLogProgressContinueLoggingAfter100(t *testing.T) { + t.Parallel() + + buf := bytes.NewBufferString("") + lg := zerolog.New(buf) + logger := LogProgress( + lg, + DefaultLogProgressConfig( + "test", + 100, + ), + ) + + for i := 0; i < 15; i++ { + logger(10) + } + + expectedLogs := []string{ + fmt.Sprintf(`test progress %d/%d (100.0%%)`, 100, 100), + fmt.Sprintf(`test progress %d/%d (110.0%%)`, 110, 100), + fmt.Sprintf(`test progress %d/%d (150.0%%)`, 150, 100), + } + + for _, log := range expectedLogs { + require.Contains(t, buf.String(), log, 0) + } + + lines := strings.Count(buf.String(), "\n") + // log only once + require.Equal(t, 16, lines) +} + +func TestLogProgressNoDataForAWhile(t *testing.T) { + t.Parallel() + + total := 1000 + + buf := bytes.NewBufferString("") + lg := zerolog.New(buf) + logger := LogProgress( + lg, + NewLogProgressConfig[uint64]( + "test", + uint64(total), + 1*time.Millisecond, + 10, + ), + ) + + for i := 0; i < total; i++ { + // somewhere in the middle pause for a bit + if i == 13 { + <-time.After(3 * time.Millisecond) + } + + logger(1) + } + + expectedLogs := []string{ + fmt.Sprintf(`test progress 0/%d`, total), + fmt.Sprintf(`test progress %d/%d (100.0%%)`, total, total), + } + + for _, log := range expectedLogs { + require.Contains(t, buf.String(), log, total) + } + lines := strings.Count(buf.String(), "\n") + // every 10% + 1 for the final log + 1 for the "no data in a while" log + require.Equal(t, 12, lines) +} + +func TestLogProgressMultipleGoroutines(t *testing.T) { + t.Parallel() + + total := 1000 + + buf := bytes.NewBufferString("") + lg := zerolog.New(buf) + logger := LogProgress( + lg, + DefaultLogProgressConfig( + "test", + total, + ), + ) + + wg := sync.WaitGroup{} + for i := 0; i < 10; i++ { + wg.Add(1) + go func() { + defer wg.Done() + for j := 0; j < 100; j++ { + logger(1) + } + }() + } + + wg.Wait() + + expectedLogs := []string{ + fmt.Sprintf(`test progress 0/%d`, total), + fmt.Sprintf(`test progress %d/%d (100.0%%)`, total, total), + } + + lines := strings.Count(buf.String(), "\n") + // every 10% + 1 for the final log + require.Equal(t, 11, lines) + + for _, log := range expectedLogs { + require.Contains(t, buf.String(), log, total) + } +} - expectedLogs := `{"level":"info","message":"test progress: 0 percent"} -{"level":"info","message":"test progress: 10 percent"} -{"level":"info","message":"test progress: 20 percent"} -{"level":"info","message":"test progress: 30 percent"} -{"level":"info","message":"test progress: 40 percent"} -{"level":"info","message":"test progress: 50 percent"} -{"level":"info","message":"test progress: 60 percent"} -{"level":"info","message":"test progress: 70 percent"} -{"level":"info","message":"test progress: 80 percent"} -{"level":"info","message":"test progress: 90 percent"} -{"level":"info","message":"test progress: 100 percent"} -` - require.Equal(t, expectedLogs, buf.String(), total) +func BenchmarkLogProgress(b *testing.B) { + l := LogProgress(zerolog.New(io.Discard), DefaultLogProgressConfig("test", b.N)) + for i := 0; i < b.N; i++ { + l(1) } } diff --git a/module/util/util.go b/module/util/util.go index 1be65b3d9da..55a24fc19d1 100644 --- a/module/util/util.go +++ b/module/util/util.go @@ -2,6 +2,7 @@ package util import ( "context" + "math" "reflect" "github.com/onflow/flow-go/module" @@ -185,3 +186,25 @@ func DetypeSlice[T any](typedSlice []T) []any { } return untypedSlice } + +// SampleN computes a percentage of the given number 'n', and returns the result as an unsigned integer. +// If the calculated sample is greater than the provided 'max' value, it returns the ceil of 'max'. +// If 'n' is less than or equal to 0, it returns 0. +// +// Parameters: +// - n: The input number, used as the base to compute the percentage. +// - max: The maximum value that the computed sample should not exceed. +// - percentage: The percentage (in range 0.0 to 1.0) to be applied to 'n'. +// +// Returns: +// - The computed sample as an unsigned integer, with consideration to the given constraints. +func SampleN(n int, max, percentage float64) uint { + if n <= 0 { + return 0 + } + sample := float64(n) * percentage + if sample > max { + sample = max + } + return uint(math.Ceil(sample)) +} diff --git a/module/util/util_test.go b/module/util/util_test.go index 7d3069573e3..8d0f42ed1ed 100644 --- a/module/util/util_test.go +++ b/module/util/util_test.go @@ -303,3 +303,38 @@ func TestDetypeSlice(t *testing.T) { assert.Equal(t, slice[i], detyped[i].(int)) } } + +// TestSampleN contains a series of test cases to validate the behavior of the util.SampleN function. +// The test cases cover different scenarios: +// 1. "returns expected sample": Checks if the function returns the expected sample value when +// given a valid input. +// 2. "returns max value when sample greater than max": Verifies that the function returns the +// maximum allowed value when the calculated sample exceeds the maximum limit. +// 3. "returns 0 when n is less than or equal to 0": Asserts that the function returns 0 when +// the input 'n' is less than or equal to 0, which represents an invalid input. +func TestSampleN(t *testing.T) { + t.Run("returns expected sample", func(t *testing.T) { + n := 8 + max := 5.0 + percentage := .5 + sample := util.SampleN(n, max, percentage) + assert.Equal(t, uint(4), sample) + }) + t.Run("returns max value when sample greater than max", func(t *testing.T) { + n := 20 + max := 5.0 + percentage := .5 + sample := util.SampleN(n, max, percentage) + assert.Equal(t, uint(max), sample) + }) + t.Run("returns 0 when n is less than or equal to 0", func(t *testing.T) { + n := 0 + max := 5.0 + percentage := .5 + sample := util.SampleN(n, max, percentage) + assert.Equal(t, uint(0), sample, "sample returned should be 0 when n == 0") + n = -1 + sample = util.SampleN(n, max, percentage) + assert.Equal(t, uint(0), sample, "sample returned should be 0 when n < 0") + }) +} diff --git a/module/validation/common.go b/module/validation/common.go index fda8ea42e9e..135c0b6efbb 100644 --- a/module/validation/common.go +++ b/module/validation/common.go @@ -1,18 +1,24 @@ package validation import ( + "errors" "fmt" "github.com/onflow/flow-go/engine" "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/model/flow/filter" + "github.com/onflow/flow-go/module" + protocolstate "github.com/onflow/flow-go/state" "github.com/onflow/flow-go/state/protocol" ) // identityForNode ensures that `nodeID` is an authorized member of the network // at the given block and returns the corresponding node's full identity. // Error returns: -// - sentinel engine.InvalidInputError is nodeID is NOT an authorized member of the network -// - generic error indicating a fatal internal problem +// - engine.InvalidInputError if nodeID is NOT an authorized member of the network at the given block +// - module.UnknownBlockError if blockID is not known to the protocol state +// +// All other error are potential symptoms critical internal failures, such as bugs or state corruption. func identityForNode(state protocol.State, blockID flow.Identifier, nodeID flow.Identifier) (*flow.Identity, error) { // get the identity of the origin node identity, err := state.AtBlockID(blockID).Identity(nodeID) @@ -20,8 +26,10 @@ func identityForNode(state protocol.State, blockID flow.Identifier, nodeID flow. if protocol.IsIdentityNotFound(err) { return nil, engine.NewInvalidInputErrorf("unknown node identity: %w", err) } - // unexpected exception - return nil, fmt.Errorf("failed to retrieve node identity: %w", err) + if errors.Is(err, protocolstate.ErrUnknownSnapshotReference) { + return nil, module.NewUnknownBlockError("block %v is unknown: %w", blockID, err) + } + return nil, fmt.Errorf("unexpected exception retrieving node identity: %w", err) } return identity, nil @@ -30,10 +38,10 @@ func identityForNode(state protocol.State, blockID flow.Identifier, nodeID flow. // ensureNodeHasWeightAndRole checks whether, at the given block, `nodeID` // - has _positive_ weight // - and has the expected role -// - and is not ejected +// - is an active participant of the current epoch and not ejected (i.e. has `EpochParticipationStatusActive`) // -// Returns the following errors: -// - sentinel engine.InvalidInputError if any of the above-listed conditions are violated. +// This function is side-effect free. The only possible error it returns is of type +// - engine.InvalidInputError if any of the above-listed conditions are violated. // // Note: the method receives the identity as proof of its existence. // Therefore, we consider the case where the respective identity is unknown to the @@ -43,16 +51,13 @@ func ensureNodeHasWeightAndRole(identity *flow.Identity, expectedRole flow.Role) if identity.Role != expectedRole { return engine.NewInvalidInputErrorf("expected node %x to have role %s but got %s", identity.NodeID, expectedRole, identity.Role) } - // check if the identity has non-zero weight - if identity.Weight == 0 { - return engine.NewInvalidInputErrorf("node has zero weight (%x)", identity.NodeID) + if identity.InitialWeight == 0 { + return engine.NewInvalidInputErrorf("node %x has zero weight", identity.NodeID) } - - // check that node was not ejected - if identity.Ejected { - return engine.NewInvalidInputErrorf("node was ejected from network (%x)", identity.NodeID) + // check if the identity is a valid epoch participant(is active in the current epoch + not ejected) + if !filter.IsValidCurrentEpochParticipant(identity) { + return engine.NewInvalidInputErrorf("node %x is not an active participant, instead has status: %s", identity.NodeID, identity.EpochParticipationStatus.String()) } - return nil } diff --git a/module/validation/receipt_validator.go b/module/validation/receipt_validator.go index dae906a982a..ad7aa0a9005 100644 --- a/module/validation/receipt_validator.go +++ b/module/validation/receipt_validator.go @@ -4,18 +4,20 @@ import ( "errors" "fmt" - "github.com/onflow/flow-go/crypto/hash" + "github.com/onflow/crypto/hash" + "github.com/onflow/flow-go/engine" "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module" + "github.com/onflow/flow-go/module/irrecoverable" "github.com/onflow/flow-go/module/signature" - "github.com/onflow/flow-go/state" "github.com/onflow/flow-go/state/fork" "github.com/onflow/flow-go/state/protocol" "github.com/onflow/flow-go/storage" ) // receiptValidator holds all needed context for checking -// receipt validity against current protocol state. +// receipt validity against the current protocol state. type receiptValidator struct { headers storage.Headers seals storage.Seals @@ -25,12 +27,14 @@ type receiptValidator struct { signatureHasher hash.Hasher } +var _ module.ReceiptValidator = (*receiptValidator)(nil) + func NewReceiptValidator(state protocol.State, headers storage.Headers, index storage.Index, results storage.ExecutionResults, seals storage.Seals, -) *receiptValidator { +) module.ReceiptValidator { rv := &receiptValidator{ state: state, headers: headers, @@ -39,80 +43,104 @@ func NewReceiptValidator(state protocol.State, signatureHasher: signature.NewBLSHasher(signature.ExecutionReceiptTag), seals: seals, } - return rv } -func (v *receiptValidator) verifySignature(receipt *flow.ExecutionReceiptMeta, nodeIdentity *flow.Identity) error { - id := receipt.ID() - valid, err := nodeIdentity.StakingPubKey.Verify(receipt.ExecutorSignature, id[:], v.signatureHasher) - if err != nil { - return fmt.Errorf("failed to verify signature: %w", err) +// verifySignature ensures that the given receipt has a valid signature from nodeIdentity. +// Expected errors during normal operations: +// - engine.InvalidInputError if the signature is invalid +func (v *receiptValidator) verifySignature(receipt *flow.ExecutionReceiptStub, nodeIdentity *flow.Identity) error { + unsignedReceiptID := receipt.UnsignedExecutionReceiptStub.ID() + valid, err := nodeIdentity.StakingPubKey.Verify(receipt.ExecutorSignature, unsignedReceiptID[:], v.signatureHasher) + if err != nil { // Verify(..) returns (false,nil) for invalid signature. Any error indicates unexpected internal failure. + return irrecoverable.NewExceptionf("failed to verify signature: %w", err) } - if !valid { return engine.NewInvalidInputErrorf("invalid signature for (%x)", nodeIdentity.NodeID) } + return nil +} +// verifyChunkServiceEvents enforces that the [flow.Chunk.ServiceEventCount] fields are protocol compliant: +// The sum over all chunks must equal the number of elements in [flow.ExecutionResult.ServiceEvents] +// Expected errors during normal operations: +// - engine.InvalidInputError if the result has malformed chunks +func (v *receiptValidator) verifyChunkServiceEvents(result *flow.ExecutionResult) error { + chunkServiceEventCountTotal := 0 + for _, chunk := range result.Chunks { + chunkServiceEventCountTotal += int(chunk.ServiceEventCount) + } + if chunkServiceEventCountTotal != len(result.ServiceEvents) { + return engine.NewInvalidInputErrorf("invalid chunk format: service event count mismatch (%d != %d)", + chunkServiceEventCountTotal, len(result.ServiceEvents)) + } return nil } +// verifyChunksFormat enforces that: +// - chunks are indexed without any gaps starting from zero +// - each chunk references the same blockID as the top-level execution result +// - the execution result has the correct number of chunks in accordance with the number of collections in the executed block +// +// Expected errors during normal operations: +// - engine.InvalidInputError if the result has malformed chunks +// - module.UnknownBlockError when the executed block is unknown func (v *receiptValidator) verifyChunksFormat(result *flow.ExecutionResult) error { for index, chunk := range result.Chunks.Items() { if uint(index) != chunk.CollectionIndex { return engine.NewInvalidInputErrorf("invalid CollectionIndex, expected %d got %d", index, chunk.CollectionIndex) } - + if uint64(index) != chunk.Index { + return engine.NewInvalidInputErrorf("invalid Chunk.Index, expected %d got %d", index, chunk.CollectionIndex) + } if chunk.BlockID != result.BlockID { return engine.NewInvalidInputErrorf("invalid blockID, expected %v got %v", result.BlockID, chunk.BlockID) } } - // we create one chunk per collection, plus the - // system chunk. so we can check if the chunk number matches with the - // number of guarantees plus one; this will ensure the execution receipt - // cannot lie about having less chunks and having the remaining ones - // approved - requiredChunks := 1 // system chunk: must exist for block's ExecutionResult, even if block payload itself is empty - - index, err := v.index.ByBlockID(result.BlockID) + // For a block containing k collections, the Flow protocol prescribes that a valid execution result + // must contain k+1 chunks. Specifically, we have one chunk per collection plus the system chunk. + // The system chunk must exist, even if block payload itself is empty. + index, err := v.index.ByBlockID(result.BlockID) // returns `storage.ErrNotFound` for unknown BlockID if err != nil { - // the mutator will always create payload index for a valid block - return fmt.Errorf("could not find payload index for executed block %v: %w", result.BlockID, err) + if errors.Is(err, storage.ErrNotFound) { + return module.NewUnknownBlockError("could not find payload index for executed block %v: %w", result.BlockID, err) + } + return irrecoverable.NewExceptionf("unexpected failure retrieving index for executed block %v: %w", result.BlockID, err) } - - requiredChunks += len(index.CollectionIDs) - + requiredChunks := 1 + len(index.GuaranteeIDs) // one chunk per collection + 1 system chunk if result.Chunks.Len() != requiredChunks { - return engine.NewInvalidInputErrorf("invalid number of chunks, expected %d got %d", - requiredChunks, result.Chunks.Len()) + return engine.NewInvalidInputErrorf("invalid number of chunks, expected %d got %d", requiredChunks, result.Chunks.Len()) } - return nil -} + if err := v.verifyChunkServiceEvents(result); err != nil { + return fmt.Errorf("invalid chunk service events: %w", err) + } -func (v *receiptValidator) fetchResult(resultID flow.Identifier) (*flow.ExecutionResult, error) { - prevResult, err := v.results.ByID(resultID) - if err != nil { - if errors.Is(err, storage.ErrNotFound) { - return nil, engine.NewUnverifiableInputError("cannot retrieve result: %v", resultID) + // We have at least one chunk, check chunk state consistency + chunks := result.Chunks.Items() + for i := range len(chunks) - 1 { + if chunks[i].EndState != chunks[i+1].StartState { + return engine.NewInvalidInputErrorf("chunk state mismatch at index %v, EndState %v but next StartState %v", i, chunks[i].EndState, chunks[i+1].StartState) } - return nil, err } - return prevResult, nil + return nil } // subgraphCheck enforces that result forms a valid sub-graph: -// Let R1 be a result that references block A, and R2 be R1's parent result. -// The execution results form a valid subgraph if and only if R2 references -// A's parent. +// Let R1 be a result that references block A, and R2 be R1's parent result. The +// execution results form a valid subgraph if and only if R2 references A's parent. +// +// Expected errors during normal operations: +// - engine.InvalidInputError if result does not form a valid sub-graph +// - module.UnknownBlockError when the executed block is unknown func (v *receiptValidator) subgraphCheck(result *flow.ExecutionResult, prevResult *flow.ExecutionResult) error { - block, err := v.state.AtBlockID(result.BlockID).Head() + block, err := v.state.AtBlockID(result.BlockID).Head() // returns `storage.ErrNotFound` for unknown BlockID if err != nil { - if errors.Is(err, state.ErrUnknownSnapshotReference) { - return engine.NewInvalidInputErrorf("no block found %v %w", result.BlockID, err) + if errors.Is(err, storage.ErrNotFound) { + return module.NewUnknownBlockError("executed block %v unknown: %w", result.BlockID, err) } - return err + return irrecoverable.NewExceptionf("unexpected failure retrieving executed block %v: %w", result.BlockID, err) } // validating the PreviousResultID field @@ -124,12 +152,13 @@ func (v *receiptValidator) subgraphCheck(result *flow.ExecutionResult, prevResul if prevResult.BlockID != block.ParentID { return engine.NewInvalidInputErrorf("invalid block for previous result %v", prevResult.BlockID) } - return nil } // resultChainCheck enforces that the end state of the parent result -// matches the current result's start state +// matches the current result's start state. +// This function is side effect free. The only possible error it returns is of type +// - engine.InvalidInputError if starting state of result is inconsistent with previous result's end state func (v *receiptValidator) resultChainCheck(result *flow.ExecutionResult, prevResult *flow.ExecutionResult) error { finalState, err := prevResult.FinalStateCommitment() if err != nil { @@ -146,38 +175,40 @@ func (v *receiptValidator) resultChainCheck(result *flow.ExecutionResult, prevRe return nil } -// Validate verifies that the ExecutionReceipt satisfies -// the following conditions: +// Validate verifies that the ExecutionReceipt satisfies the following conditions: // - is from Execution node with positive weight // - has valid signature // - chunks are in correct format // - execution result has a valid parent and satisfies the subgraph check // -// Returns nil if all checks passed successfully. +// In order to validate a receipt, both the executed block and the parent result +// referenced in `receipt.ExecutionResult` must be known. We return nil if all checks +// pass successfully. +// // Expected errors during normal operations: -// - engine.InvalidInputError -// if receipt violates protocol condition -// - engine.UnverifiableInputError -// if receipt's parent result is unknown +// - engine.InvalidInputError if receipt violates protocol rules +// - module.UnknownBlockError if the executed block is unknown +// - module.UnknownResultError if the receipt's parent result is unknown +// +// All other error are potential symptoms critical internal failures, such as bugs or state corruption. func (v *receiptValidator) Validate(receipt *flow.ExecutionReceipt) error { - // TODO: this can be optimized by checking if result was already stored and validated. - // This needs to be addressed later since many tests depend on this behavior. - prevResult, err := v.fetchResult(receipt.ExecutionResult.PreviousResultID) - if err != nil { - return fmt.Errorf("error fetching parent result of receipt %v: %w", receipt.ID(), err) + parentResult, err := v.results.ByID(receipt.ExecutionResult.PreviousResultID) + if err != nil { // we expect `storage.ErrNotFound` in case parent result is unknown; any other error is unexpected, critical failure + if errors.Is(err, storage.ErrNotFound) { + return module.NewUnknownResultError("parent result %v unknown: %w", receipt.ExecutionResult.PreviousResultID, err) + } + return irrecoverable.NewExceptionf("unexpected exception fetching parent result: %v", receipt.ExecutionResult.PreviousResultID) } - // first validate result to avoid signature check in in `validateReceipt` in case result is invalid. - err = v.validateResult(&receipt.ExecutionResult, prevResult) + // first validate result to avoid expensive signature check in `validateReceipt` in case result is invalid. + err = v.validateResult(&receipt.ExecutionResult, parentResult) if err != nil { return fmt.Errorf("could not validate single result %v at index: %w", receipt.ExecutionResult.ID(), err) } - err = v.validateReceipt(receipt.Meta(), receipt.ExecutionResult.BlockID) + err = v.validateReceipt(receipt.Stub(), receipt.ExecutionResult.BlockID) if err != nil { - // It's very important that we fail the whole validation if one of the receipts is invalid. - // It allows us to make assumptions as stated in previous comment. - return fmt.Errorf("could not validate single receipt %v: %w", receipt.ID(), err) + return fmt.Errorf("could not validate receipt %v: %w", receipt.ID(), err) } return nil @@ -198,13 +229,27 @@ func (v *receiptValidator) Validate(receipt *flow.ExecutionReceipt) error { // - no duplicates in fork // // Expected errors during normal operations: -// - engine.InvalidInputError -// if some receipts in the candidate block violate protocol condition -// - engine.UnverifiableInputError -// if for some of the receipts, their respective parent result is unknown +// - engine.InvalidInputError if some receipts in the candidate block violate protocol condition +// - module.UnknownBlockError if the candidate block's _parent_ is unknown +// +// All other error are potential symptoms of critical internal failures, such as bugs or state corruption. +// Note that module.UnknownResultError is not possible; we have either an invalid candidate block +// (yields engine.InvalidInputError) or a missing parent block (yields module.UnknownBlockError). func (v *receiptValidator) ValidatePayload(candidate *flow.Block) error { - header := candidate.Header payload := candidate.Payload + parentID := candidate.ParentID + + // As a prerequisite, we check that candidate's parent block is known. Otherwise, we cannot validate it. + // This check is important to distinguish expected error cases from unexpected exceptions. By confirming + // that the protocol state knows the parent block, we guarantee that we can successfully traverse the + // candidate's ancestry below. + exists, err := v.headers.Exists(parentID) + if err != nil { + return irrecoverable.NewExceptionf("unexpected exception retrieving the candidate block's parent %v: %w", parentID, err) + } + if !exists { + return module.NewUnknownBlockError("cannot validate receipts in block, as its parent block is unknown %v", parentID) + } // return if nothing to validate if len(payload.Receipts) == 0 && len(payload.Results) == 0 { @@ -213,9 +258,9 @@ func (v *receiptValidator) ValidatePayload(candidate *flow.Block) error { // Get the latest sealed result on this fork and the corresponding block, // whose result is sealed. This block is not necessarily finalized. - lastSeal, err := v.seals.HighestInFork(header.ParentID) + lastSeal, err := v.seals.HighestInFork(parentID) if err != nil { - return fmt.Errorf("could not retrieve latest seal for fork with head %x: %w", header.ParentID, err) + return fmt.Errorf("could not retrieve latest seal for fork with head %x: %w", parentID, err) } latestSealedResult, err := v.results.ByID(lastSeal.ResultID) if err != nil { @@ -267,17 +312,29 @@ func (v *receiptValidator) ValidatePayload(candidate *flow.Block) error { } return nil } - err = fork.TraverseForward(v.headers, header.ParentID, bookKeeper, fork.ExcludingBlock(lastSeal.BlockID)) + err = fork.TraverseForward(v.headers, parentID, bookKeeper, fork.ExcludingBlock(lastSeal.BlockID)) if err != nil { - return fmt.Errorf("internal error while traversing the ancestor fork of unsealed blocks: %w", err) + // At the beginning, we checked that candidate's parent exists in the protocol state, i.e. its + // ancestry is known and valid. Hence, any error here is a symptom of internal state corruption. + return irrecoverable.NewExceptionf("internal error while traversing the ancestor fork of unsealed blocks: %w", err) } - // first validate all results that were included into payload - // if one of results is invalid we fail the whole check because it could be violating - // parent-children relationship + // tracks the number of receipts committing to each result. + // it's ok to only index receipts at this point, because we will perform + // all needed checks after we have validated all results. + receiptsByResult := payload.Receipts.GroupByResultID() + + // Validate all results that are incorporated into the payload. If one is malformed, the entire block is invalid. for i, result := range payload.Results { resultID := result.ID() + // Every included result must be accompanied by a receipt with a corresponding `ResultID`, in the same block. + // If a result is included without a corresponding receipt, it cannot be attributed to any executor. + receiptsForResult := len(receiptsByResult.GetGroup(resultID)) + if receiptsForResult == 0 { + return engine.NewInvalidInputErrorf("no receipts for result %v at index %d", resultID, i) + } + // check for duplicated results if _, isDuplicate := executionTree[resultID]; isDuplicate { return engine.NewInvalidInputErrorf("duplicate result %v at index %d", resultID, i) @@ -293,11 +350,19 @@ func (v *receiptValidator) ValidatePayload(candidate *flow.Block) error { if _, forBlockOnFork := forkBlocks[result.BlockID]; !forBlockOnFork { return engine.NewInvalidInputErrorf("results %v at index %d is for block not on fork (%x)", resultID, i, result.BlockID) } + // Reaching the following code implies that the executed block with ID `result.BlockID` is known to the protocol state, i.e. well formed. // validate result err = v.validateResult(result, prevResult) if err != nil { - return fmt.Errorf("could not validate result %v at index %d: %w", resultID, i, err) + if engine.IsInvalidInputError(err) { + return fmt.Errorf("result %v at index %d is invalid: %w", resultID, i, err) + } + if module.IsUnknownBlockError(err) { + // Above, we checked that the result is for an ancestor of the candidate block. If this block or parts of it are not found, our state is corrupted + return irrecoverable.NewExceptionf("the executed block or some of its parts were not found despite the block being already incorporated: %w", err) + } + return fmt.Errorf("unexpected exception while validating result %v at index %d: %w", resultID, i, err) } executionTree[resultID] = result } @@ -325,13 +390,25 @@ func (v *receiptValidator) ValidatePayload(candidate *flow.Block) error { err = v.validateReceipt(receipt, result.BlockID) if err != nil { - return fmt.Errorf("receipt %v at index %d failed validation: %w", receiptID, i, err) + if engine.IsInvalidInputError(err) { + return fmt.Errorf("receipt %v at index %d failed validation: %w", receiptID, i, err) + } + if module.IsUnknownBlockError(err) { + // Above, we checked that the result is for an ancestor of the candidate block. If this block or parts of it are not found, our state is corrupted + return irrecoverable.NewExceptionf("the executed block or some of its parts were not found despite the block being already incorporated: %w", err) + } + return fmt.Errorf("unexpected exception validating receipt %v at index %d: %w", receiptID, i, err) } } return nil } +// validateResult validates that the given result is well-formed. +// We do not check the validity of the resulting state commitment. +// Expected errors during normal operations: +// - engine.InvalidInputError if the result has malformed chunks +// - module.UnknownBlockError if blockID does not correspond to a block known by the protocol state func (v *receiptValidator) validateResult(result *flow.ExecutionResult, prevResult *flow.ExecutionResult) error { err := v.verifyChunksFormat(result) if err != nil { @@ -351,14 +428,15 @@ func (v *receiptValidator) validateResult(result *flow.ExecutionResult, prevResu return nil } -func (v *receiptValidator) validateReceipt(receipt *flow.ExecutionReceiptMeta, blockID flow.Identifier) error { - identity, err := identityForNode(v.state, blockID, receipt.ExecutorID) +// validateReceipt validates that the given `receipt` is a valid commitment from an Execution Node +// to some result. +// Error returns: +// - engine.InvalidInputError if `receipt` is invalid +// - module.UnknownBlockError if executedBlockID is unknown +func (v *receiptValidator) validateReceipt(receipt *flow.ExecutionReceiptStub, executedBlockID flow.Identifier) error { + identity, err := identityForNode(v.state, executedBlockID, receipt.ExecutorID) if err != nil { - return fmt.Errorf( - "failed to get executor identity %v at block %v: %w", - receipt.ExecutorID, - blockID, - err) + return fmt.Errorf("retrieving idenity of node %v at block %v failed: %w", receipt.ExecutorID, executedBlockID, err) } err = ensureNodeHasWeightAndRole(identity, flow.RoleExecution) diff --git a/module/validation/receipt_validator_test.go b/module/validation/receipt_validator_test.go index ec6a31d9475..3c1dfd8d197 100644 --- a/module/validation/receipt_validator_test.go +++ b/module/validation/receipt_validator_test.go @@ -1,6 +1,8 @@ package validation import ( + "errors" + "math/rand" "testing" "github.com/stretchr/testify/mock" @@ -10,7 +12,9 @@ import ( "github.com/onflow/flow-go/engine" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/module" - fmock "github.com/onflow/flow-go/module/mock" + mock_module "github.com/onflow/flow-go/module/mock" + mock_protocol "github.com/onflow/flow-go/state/protocol/mock" + mock_storage "github.com/onflow/flow-go/storage/mock" "github.com/onflow/flow-go/utils/unittest" ) @@ -22,14 +26,20 @@ type ReceiptValidationSuite struct { unittest.BaseChainSuite receiptValidator module.ReceiptValidator - publicKey *fmock.PublicKey + publicKey *mock_module.PublicKey } func (s *ReceiptValidationSuite) SetupTest() { s.SetupChain() - s.publicKey = &fmock.PublicKey{} + s.publicKey = mock_module.NewPublicKey(s.T()) s.Identities[s.ExeID].StakingPubKey = s.publicKey - s.receiptValidator = NewReceiptValidator(s.State, s.HeadersDB, s.IndexDB, s.ResultsDB, s.SealsDB) + s.receiptValidator = NewReceiptValidator( + s.State, + s.HeadersDB, + s.IndexDB, + s.ResultsDB, + s.SealsDB, + ) } // TestReceiptValid try submitting valid receipt @@ -39,10 +49,10 @@ func (s *ReceiptValidationSuite) TestReceiptValid() { unittest.WithResult(valSubgrph.Result)) s.AddSubgraphFixtureToMempools(valSubgrph) - receiptID := receipt.ID() + unsignedReceiptID := receipt.UnsignedExecutionReceipt.ID() s.publicKey.On("Verify", receipt.ExecutorSignature, - receiptID[:], + unsignedReceiptID[:], mock.Anything, ).Return(true, nil).Once() @@ -52,29 +62,27 @@ func (s *ReceiptValidationSuite) TestReceiptValid() { } // TestReceiptNoIdentity tests that we reject receipt with invalid `ExecutionResult.ExecutorID` +// Note: for a receipt with a bad `ExecutorID`, we should never get to validating the signature, +// because there is no valid identity, where we can retrieve a staking signature from. func (s *ReceiptValidationSuite) TestReceiptNoIdentity() { valSubgrph := s.ValidSubgraphFixture() - node := unittest.IdentityFixture() - mockPk := &fmock.PublicKey{} + node := unittest.IdentityFixture() // unknown Node + mockPk := mock_module.NewPublicKey(s.T()) node.StakingPubKey = mockPk - receipt := unittest.ExecutionReceiptFixture(unittest.WithExecutorID(node.NodeID), - unittest.WithResult(valSubgrph.Result)) + receipt := unittest.ExecutionReceiptFixture(unittest.WithExecutorID(node.NodeID), unittest.WithResult(valSubgrph.Result)) s.AddSubgraphFixtureToMempools(valSubgrph) - receiptID := receipt.ID() - mockPk.On("Verify", - receiptID[:], - receipt.ExecutorSignature, - mock.Anything, - ).Return(true, nil).Once() err := s.receiptValidator.Validate(receipt) s.Require().Error(err, "should reject invalid identity") s.Assert().True(engine.IsInvalidInputError(err)) } -// TestReceiptFromZeroWeightNode tests that we reject receipt from node with zero weight -func (s *ReceiptValidationSuite) TestReceiptFromZeroWeightNode() { +// TestReceiptFromNonActiveNode tests that we reject receipt from an execution node which is not authorized to participate: +// - execution node is joining +// - execution node is leaving +// - execution node has zero initial weight. +func (s *ReceiptValidationSuite) TestReceiptFromNonAuthorizedNode() { valSubgrph := s.ValidSubgraphFixture() receipt := unittest.ExecutionReceiptFixture(unittest.WithExecutorID(s.ExeID), unittest.WithResult(valSubgrph.Result)) @@ -85,12 +93,31 @@ func (s *ReceiptValidationSuite) TestReceiptFromZeroWeightNode() { mock.Anything, mock.Anything).Return(true, nil).Maybe() // call optional, as validator might check weight first - // replace weight with invalid one - s.Identities[s.ExeID].Weight = 0 + s.Run("execution-node-leaving", func() { + // replace EN participation status + s.Identities[s.ExeID].EpochParticipationStatus = flow.EpochParticipationStatusLeaving - err := s.receiptValidator.Validate(receipt) - s.Require().Error(err, "should reject invalid weight") - s.Assert().True(engine.IsInvalidInputError(err)) + err := s.receiptValidator.Validate(receipt) + s.Require().Error(err, "should reject invalid weight") + s.Assert().True(engine.IsInvalidInputError(err)) + }) + s.Run("execution-node-joining", func() { + // replace EN participation status + s.Identities[s.ExeID].EpochParticipationStatus = flow.EpochParticipationStatusJoining + + err := s.receiptValidator.Validate(receipt) + s.Require().Error(err, "should reject invalid weight") + s.Assert().True(engine.IsInvalidInputError(err)) + }) + s.Run("execution-node-zero-weight", func() { + // replace EN participation status and initial weight + s.Identities[s.ExeID].EpochParticipationStatus = flow.EpochParticipationStatusActive + s.Identities[s.ExeID].InitialWeight = 0 + + err := s.receiptValidator.Validate(receipt) + s.Require().Error(err, "should reject invalid weight") + s.Assert().True(engine.IsInvalidInputError(err)) + }) } // TestReceiptInvalidRole tests that we reject receipt with invalid execution node role @@ -152,6 +179,136 @@ func (s *ReceiptValidationSuite) TestReceiptTooFewChunks() { s.Assert().True(engine.IsInvalidInputError(err)) } +// TestReceiptServiceEventCountMismatch tests that we reject any receipt where +// the sum of service event counts specified by chunks is inconsistent with the +// number of service events in the ExecutionResult. +func (s *ReceiptValidationSuite) TestReceiptServiceEventCountMismatch() { + s.publicKey.On("Verify", + mock.Anything, + mock.Anything, + mock.Anything).Return(true, nil).Maybe() + + s.Run("result contains service events", func() { + valSubgrph := s.ValidSubgraphFixture() + receipt := unittest.ExecutionReceiptFixture(unittest.WithExecutorID(s.ExeID), unittest.WithResult(valSubgrph.Result)) + result := &receipt.ExecutionResult + unittest.WithServiceEvents(10)(result) // also sets consistent ServiceEventCount fields for all chunks + s.AddSubgraphFixtureToMempools(valSubgrph) + + s.Run("compliant chunk list", func() { + err := s.receiptValidator.Validate(receipt) + s.Require().NoError(err) + }) + s.Run("chunk list has too large service event count", func() { + result.Chunks[rand.Intn(len(result.Chunks))].ServiceEventCount++ + err := s.receiptValidator.Validate(receipt) + s.Require().Error(err, "should reject with invalid chunks") + s.Assert().True(engine.IsInvalidInputError(err)) + }) + s.Run("chunk list has too small service event count", func() { + for _, chunk := range result.Chunks { + if chunk.ServiceEventCount > 0 { + chunk.ServiceEventCount-- + } + } + result.Chunks[rand.Intn(len(result.Chunks))].ServiceEventCount++ + err := s.receiptValidator.Validate(receipt) + s.Require().Error(err, "should reject with invalid chunks") + s.Assert().True(engine.IsInvalidInputError(err)) + }) + }) + + s.Run("result contains no service events", func() { + valSubgrph := s.ValidSubgraphFixture() + receipt := unittest.ExecutionReceiptFixture(unittest.WithExecutorID(s.ExeID), unittest.WithResult(valSubgrph.Result)) + result := &receipt.ExecutionResult + s.AddSubgraphFixtureToMempools(valSubgrph) + + s.Run("compliant chunk list", func() { + err := s.receiptValidator.Validate(receipt) + s.Require().NoError(err) + }) + s.Run("chunk list has wrong sum of service event counts", func() { + result.Chunks[rand.Intn(len(result.Chunks))].ServiceEventCount++ + err := s.receiptValidator.Validate(receipt) + s.Require().Error(err, "should reject with invalid chunks") + s.Assert().True(engine.IsInvalidInputError(err)) + }) + }) +} + +// TestReceiptForBlockWith0Collections tests handling of the edge case of a block that contains no +// collection guarantees: +// - A receipt must contain one chunk (system chunk) +// - receipts with zero or 2 chunks are rejected +func (s *ReceiptValidationSuite) TestReceiptForBlockWith0Collections() { + s.publicKey.On("Verify", mock.Anything, mock.Anything, mock.Anything).Return(true, nil).Maybe() + valSubgrph := s.ValidSubgraphFixture() + block, err := flow.NewBlock( + flow.UntrustedBlock{ + HeaderBody: valSubgrph.Block.HeaderBody, + Payload: unittest.PayloadFixture(), + }, + ) + s.Require().NoError(err) + valSubgrph.Block = block + s.Assert().Equal(0, len(valSubgrph.Block.Payload.Guarantees)) // sanity check that no collections in block + s.AddSubgraphFixtureToMempools(valSubgrph) + + // happy path receipt + receipt := unittest.ExecutionReceiptFixture( + unittest.WithExecutorID(s.ExeID), + unittest.WithResult(unittest.ExecutionResultFixture( + unittest.WithBlock(valSubgrph.Block), + unittest.WithPreviousResult(*valSubgrph.PreviousResult), + ))) + s.Assert().Equal(1, len(receipt.Chunks)) // sanity check that one chunk in result + + s.T().Run("valid case: 1 chunk", func(t *testing.T) { // confirm happy path receipt valid + err := s.receiptValidator.Validate(receipt) + s.Require().NoError(err) + }) + + s.T().Run("invalid: zero chunks", func(t *testing.T) { // missing system chunk + var r flow.ExecutionReceipt = *receipt // copy + r.Chunks = r.Chunks[0:0] + err := s.receiptValidator.Validate(&r) + s.Require().Error(err, "should reject with invalid chunks") + s.Assert().True(engine.IsInvalidInputError(err)) + }) + + s.T().Run("invalid: 2 chunks", func(t *testing.T) { // one too many chunks + var r flow.ExecutionReceipt = *receipt // copy + var extraChunk flow.Chunk = *r.Chunks[0] + extraChunk.Index = 1 + extraChunk.CollectionIndex = 1 + r.Chunks = append(r.Chunks, &extraChunk) + err := s.receiptValidator.Validate(&r) + s.Require().Error(err, "should reject with invalid chunks") + s.Assert().True(engine.IsInvalidInputError(err)) + }) +} + +// TestReceiptInconsistentChunkList tests that we reject receipts when the Start and End states +// within the chunk list are inconsistent (e.g. chunk[0].EndState != chunk[1].StartState). +func (s *ReceiptValidationSuite) TestReceiptInconsistentChunkList() { + s.publicKey.On("Verify", mock.Anything, mock.Anything, mock.Anything).Return(true, nil).Maybe() + valSubgrph := s.ValidSubgraphFixture() + chunks := valSubgrph.Result.Chunks + require.GreaterOrEqual(s.T(), chunks.Len(), 1) + // swap last chunk's start and end states + lastChunk := chunks[len(chunks)-1] + lastChunk.StartState, lastChunk.EndState = lastChunk.EndState, lastChunk.StartState + + receipt := unittest.ExecutionReceiptFixture(unittest.WithExecutorID(s.ExeID), + unittest.WithResult(valSubgrph.Result)) + s.AddSubgraphFixtureToMempools(valSubgrph) + + err := s.receiptValidator.Validate(receipt) + s.Require().Error(err, "should reject with invalid chunks") + s.Assert().True(engine.IsInvalidInputError(err)) +} + // TestReceiptTooManyChunks tests that we reject receipt with more chunks than expected func (s *ReceiptValidationSuite) TestReceiptTooManyChunks() { valSubgrph := s.ValidSubgraphFixture() @@ -207,7 +364,9 @@ func (s *ReceiptValidationSuite) TestReceiptInvalidCollectionIndex() { s.Assert().True(engine.IsInvalidInputError(err)) } -// TestReceiptNoPreviousResult tests that we reject receipt with missing previous result +// TestReceiptNoPreviousResult tests that `Validate` rejects a receipt, whose parent result is unknown: +// - per API contract it should return a `module.UnknownResultError` +// - should _not_ be misinterpreted as an invalid receipt, i.e. should not receive an `engine.InvalidInputError` func (s *ReceiptValidationSuite) TestReceiptNoPreviousResult() { valSubgrph := s.ValidSubgraphFixture() // invalidate prev execution result, it will result in failing to lookup @@ -224,24 +383,43 @@ func (s *ReceiptValidationSuite) TestReceiptNoPreviousResult() { err := s.receiptValidator.Validate(receipt) s.Require().Error(err, "should reject invalid receipt") - s.Assert().True(engine.IsUnverifiableInputError(err), err) + s.Assert().True(module.IsUnknownResultError(err), err) + s.Assert().False(engine.IsInvalidInputError(err), err) } -// TestReceiptInvalidPreviousResult tests that we reject receipt with invalid previous result -func (s *ReceiptValidationSuite) TestReceiptInvalidPreviousResult() { - valSubgrph := s.ValidSubgraphFixture() - receipt := unittest.ExecutionReceiptFixture(unittest.WithExecutorID(s.ExeID), - unittest.WithResult(valSubgrph.Result)) - s.AddSubgraphFixtureToMempools(valSubgrph) - - // invalidate prev execution result blockID, this should fail because - // prev result points to wrong block - valSubgrph.PreviousResult.BlockID = unittest.IdentifierFixture() - - s.publicKey.On("Verify", - mock.Anything, - mock.Anything, - mock.Anything).Return(true, nil).Maybe() +// TestInvalidSubgraph is part of verifying that we reject a receipt, whose result +// does not form a valid 'subgraph'. Formally, a subgraph is defined as +// +// Result -----------------------------------> Block +// | | +// | v +// | ParentBlock +// v +// PreviousResult ---> PreviousResult.BlockID +// +// with the validity requirement that PreviousResult.BlockID == ParentBlock.ID(). +// +// In our test case, we assume that `ParentResult` and `Block` are known, but +// ParentResult.BlockID ≠ ParentBlock.ID(). The compliance layer guarantees that new elements are added +// to the blockchain graph if and only if they are protocol compliant. In other words, we are testing +// a byzantine receipt that references known and valid entities, but they do not form a valid subgraph. +// For example, it could be a result for a block in a different fork or an ancestor further in the past. +func (s *ReceiptValidationSuite) TestInvalidSubgraph() { + s.publicKey.On("Verify", mock.Anything, mock.Anything, mock.Anything).Return(true, nil).Maybe() + + // add two independent sub-graphs, which is essentially two different forks + fork1 := s.ValidSubgraphFixture() + s.AddSubgraphFixtureToMempools(fork1) + fork2 := s.ValidSubgraphFixture() + s.AddSubgraphFixtureToMempools(fork2) + + // Receipt is for block in fork1 but references a result in fork2 as parent + receipt := unittest.ExecutionReceiptFixture( + unittest.WithExecutorID(s.ExeID), // valid executor + unittest.WithResult(unittest.ExecutionResultFixture( + unittest.WithBlock(fork1.Block), // known executed block on fork 1 + unittest.WithPreviousResult(*fork2.Result)), // known parent result + )) err := s.receiptValidator.Validate(receipt) s.Require().Error(err, "should reject invalid previous result") @@ -272,11 +450,12 @@ func (s *ReceiptValidationSuite) TestReceiptInvalidResultChain() { // TestMultiReceiptValidResultChain tests that multiple receipts and results // within one block payload are accepted, where the receipts are building on -// top of each other (i.e. their results form a chain). -// Say B(A) means block B has receipt for A: -// - we have such chain in storage: G <- A <- B(A) <- C +// top of each other (i.e. their results form a chain). Test case: +// - we have the chain in storage: G <- A <- B(A) <- C // - if a child block of C payload contains receipts and results for (B,C) // it should be accepted as valid +// +// Notation: B(A) means block B has receipt for A. func (s *ReceiptValidationSuite) TestMultiReceiptValidResultChain() { // assuming signatures are all good s.publicKey.On("Verify", mock.Anything, mock.Anything, mock.Anything).Return(true, nil) @@ -289,10 +468,10 @@ func (s *ReceiptValidationSuite) TestMultiReceiptValidResultChain() { blockA, blockB, blockC := blocks[1], blocks[2], blocks[3] receiptA, receiptB, receiptC := receipts[1], receipts[2], receipts[3] - blockA.Payload.Receipts = []*flow.ExecutionReceiptMeta{} - blockB.Payload.Receipts = []*flow.ExecutionReceiptMeta{receiptA.Meta()} + blockA.Payload.Receipts = []*flow.ExecutionReceiptStub{} + blockB.Payload.Receipts = []*flow.ExecutionReceiptStub{receiptA.Stub()} blockB.Payload.Results = []*flow.ExecutionResult{&receiptA.ExecutionResult} - blockC.Payload.Receipts = []*flow.ExecutionReceiptMeta{} + blockC.Payload.Receipts = []*flow.ExecutionReceiptStub{} // update block header so that blocks are chained together unittest.ReconnectBlocksAndReceipts(blocks, receipts) // assuming all receipts are executed by the correct executor @@ -305,22 +484,25 @@ func (s *ReceiptValidationSuite) TestMultiReceiptValidResultChain() { } s.PersistedResults[result0.ID()] = result0 - candidate := unittest.BlockWithParentFixture(blockC.Header) - candidate.Payload = &flow.Payload{ - Receipts: []*flow.ExecutionReceiptMeta{receiptB.Meta(), receiptC.Meta()}, - Results: []*flow.ExecutionResult{&receiptB.ExecutionResult, &receiptC.ExecutionResult}, - } + candidate := unittest.BlockWithParentAndPayload( + blockC.ToHeader(), + flow.Payload{ + Receipts: []*flow.ExecutionReceiptStub{receiptB.Stub(), receiptC.Stub()}, + Results: []*flow.ExecutionResult{&receiptB.ExecutionResult, &receiptC.ExecutionResult}, + }, + ) err := s.receiptValidator.ValidatePayload(candidate) s.Require().NoError(err) } -// we have such chain in storage: G <- A <- B(A) <- C -// if a block payload contains (C,B_bad), they should be invalid +// TestMultiReceiptInvalidParent performs the following test: +// - we have the chain in storage: G <- A <- B(A) <- C +// and are receiving `candidate`, which is a child block of C +// - candidate should be invalid, if its payload contains (C,B_bad). +// +// Notation: B(A) means block B has receipt for A. func (s *ReceiptValidationSuite) TestMultiReceiptInvalidParent() { - // assuming signatures are all good - s.publicKey.On("Verify", mock.Anything, mock.Anything, mock.Anything).Return(true, nil) - // G <- A <- B <- C blocks, result0, seal := unittest.ChainFixture(4) s.SealsIndex[blocks[0].ID()] = seal @@ -330,10 +512,10 @@ func (s *ReceiptValidationSuite) TestMultiReceiptInvalidParent() { receiptA := receipts[1] receiptBInvalid := receipts[2] receiptC := receipts[3] - blockA.Payload.Receipts = []*flow.ExecutionReceiptMeta{} - blockB.Payload.Receipts = []*flow.ExecutionReceiptMeta{receiptA.Meta()} + blockA.Payload.Receipts = []*flow.ExecutionReceiptStub{} + blockB.Payload.Receipts = []*flow.ExecutionReceiptStub{receiptA.Stub()} blockB.Payload.Results = []*flow.ExecutionResult{&receiptA.ExecutionResult} - blockC.Payload.Receipts = []*flow.ExecutionReceiptMeta{} + blockC.Payload.Receipts = []*flow.ExecutionReceiptStub{} // update block header so that blocks are chained together unittest.ReconnectBlocksAndReceipts(blocks, receipts) // assuming all receipts are executed by the correct executor @@ -346,14 +528,18 @@ func (s *ReceiptValidationSuite) TestMultiReceiptInvalidParent() { } s.PersistedResults[result0.ID()] = result0 - // make receipt B as bad + // receipt B is from an invalid node + // Note: for a receipt with a bad `ExecutorID`, we should never get to validating the signature, + // because there is no valid identity, where we can retrieve a staking signature from. receiptBInvalid.ExecutorID = unittest.IdentifierFixture() - candidate := unittest.BlockWithParentFixture(blockC.Header) - candidate.Payload = &flow.Payload{ - Receipts: []*flow.ExecutionReceiptMeta{receiptBInvalid.Meta(), receiptC.Meta()}, - Results: []*flow.ExecutionResult{&receiptBInvalid.ExecutionResult, &receiptC.ExecutionResult}, - } + candidate := unittest.BlockWithParentAndPayload( + blockC.ToHeader(), + flow.Payload{ + Receipts: []*flow.ExecutionReceiptStub{receiptBInvalid.Stub(), receiptC.Stub()}, + Results: []*flow.ExecutionResult{&receiptBInvalid.ExecutionResult, &receiptC.ExecutionResult}, + }, + ) // receiptB and receiptC err := s.receiptValidator.ValidatePayload(candidate) @@ -369,8 +555,10 @@ func (s *ReceiptValidationSuite) TestValidationReceiptsForSealedBlock() { s.publicKey.On("Verify", mock.Anything, mock.Anything, mock.Anything).Return(true, nil) // create block2 - block2 := unittest.BlockWithParentFixture(s.LatestSealedBlock.Header) - block2.SetPayload(flow.Payload{}) + block2 := unittest.BlockWithParentAndPayload( + s.LatestSealedBlock.ToHeader(), + flow.Payload{}, + ) s.Extend(block2) block2Receipt := unittest.ExecutionReceiptFixture(unittest.WithResult( @@ -380,21 +568,25 @@ func (s *ReceiptValidationSuite) TestValidationReceiptsForSealedBlock() { // B1<--B2<--B3{R{B2)}<--B4{S(R(B2))}<--B5{R'(B2)} // create block3 with a receipt for block2 - block3 := unittest.BlockWithParentFixture(block2.Header) - block3.SetPayload(flow.Payload{ - Receipts: []*flow.ExecutionReceiptMeta{block2Receipt.Meta()}, - Results: []*flow.ExecutionResult{&block2Receipt.ExecutionResult}, - }) + block3 := unittest.BlockWithParentAndPayload( + block2.ToHeader(), + flow.Payload{ + Receipts: []*flow.ExecutionReceiptStub{block2Receipt.Stub()}, + Results: []*flow.ExecutionResult{&block2Receipt.ExecutionResult}, + }, + ) s.Extend(block3) // create a seal for block2 seal2 := unittest.Seal.Fixture(unittest.Seal.WithResult(&block2Receipt.ExecutionResult)) // create block4 containing a seal for block2 - block4 := unittest.BlockWithParentFixture(block3.Header) - block4.SetPayload(flow.Payload{ - Seals: []*flow.Seal{seal2}, - }) + block4 := unittest.BlockWithParentAndPayload( + block3.ToHeader(), + flow.Payload{ + Seals: []*flow.Seal{seal2}, + }, + ) s.Extend(block4) // insert another receipt for block 2, which is now the highest sealed @@ -403,11 +595,13 @@ func (s *ReceiptValidationSuite) TestValidationReceiptsForSealedBlock() { unittest.ExecutionResultFixture(unittest.WithBlock(block2), unittest.WithPreviousResult(*s.LatestExecutionResult))), unittest.WithExecutorID(s.ExeID)) - block5 := unittest.BlockWithParentFixture(block4.Header) - block5.SetPayload(flow.Payload{ - Receipts: []*flow.ExecutionReceiptMeta{receipt.Meta()}, - Results: []*flow.ExecutionResult{&receipt.ExecutionResult}, - }) + block5 := unittest.BlockWithParentAndPayload( + block4.ToHeader(), + flow.Payload{ + Receipts: []*flow.ExecutionReceiptStub{receipt.Stub()}, + Results: []*flow.ExecutionResult{&receipt.ExecutionResult}, + }, + ) err := s.receiptValidator.ValidatePayload(block5) require.Error(s.T(), err) @@ -420,11 +614,12 @@ func (s *ReceiptValidationSuite) TestValidationReceiptsForSealedBlock() { // insert another receipt for B2 but in a separate fork. The fact that // B2 is sealed on a separate fork should not cause the receipt to be // rejected - block6 := unittest.BlockWithParentFixture(block2.Header) - block6.SetPayload(flow.Payload{ - Receipts: []*flow.ExecutionReceiptMeta{receipt.Meta()}, - Results: []*flow.ExecutionResult{&receipt.ExecutionResult}, - }) + block6 := unittest.BlockWithParentAndPayload( + block2.ToHeader(), + flow.Payload{ + Receipts: []*flow.ExecutionReceiptStub{receipt.Stub()}, + Results: []*flow.ExecutionResult{&receipt.ExecutionResult}, + }) err = s.receiptValidator.ValidatePayload(block6) require.NoError(s.T(), err) } @@ -436,8 +631,10 @@ func (s *ReceiptValidationSuite) TestValidationReceiptForIncorporatedResult() { s.publicKey.On("Verify", mock.Anything, mock.Anything, mock.Anything).Return(true, nil) // create block2 - block2 := unittest.BlockWithParentFixture(s.LatestSealedBlock.Header) - block2.SetPayload(flow.Payload{}) + block2 := unittest.BlockWithParentAndPayload( + s.LatestSealedBlock.ToHeader(), + flow.Payload{}, + ) s.Extend(block2) executionResult := unittest.ExecutionResultFixture(unittest.WithBlock(block2), @@ -449,11 +646,13 @@ func (s *ReceiptValidationSuite) TestValidationReceiptForIncorporatedResult() { // B1<--B2<--B3{R{B2)}<--B4{(R'(B2))} // create block3 with a receipt for block2 - block3 := unittest.BlockWithParentFixture(block2.Header) - block3.SetPayload(flow.Payload{ - Receipts: []*flow.ExecutionReceiptMeta{firstReceipt.Meta()}, - Results: []*flow.ExecutionResult{&firstReceipt.ExecutionResult}, - }) + block3 := unittest.BlockWithParentAndPayload( + block2.ToHeader(), + flow.Payload{ + Receipts: []*flow.ExecutionReceiptStub{firstReceipt.Stub()}, + Results: []*flow.ExecutionResult{&firstReceipt.ExecutionResult}, + }, + ) s.Extend(block3) exe := unittest.IdentityFixture(unittest.WithRole(flow.RoleExecution)) @@ -465,11 +664,13 @@ func (s *ReceiptValidationSuite) TestValidationReceiptForIncorporatedResult() { secondReceipt := unittest.ExecutionReceiptFixture( unittest.WithResult(executionResult), unittest.WithExecutorID(exe.NodeID)) - block5 := unittest.BlockWithParentFixture(block3.Header) - block5.SetPayload(flow.Payload{ - // no results, only receipt - Receipts: []*flow.ExecutionReceiptMeta{secondReceipt.Meta()}, - }) + block5 := unittest.BlockWithParentAndPayload( + block3.ToHeader(), + flow.Payload{ + // no results, only receipt + Receipts: []*flow.ExecutionReceiptStub{secondReceipt.Stub()}, + }, + ) err := s.receiptValidator.ValidatePayload(block5) require.NoError(s.T(), err) @@ -481,18 +682,18 @@ func (s *ReceiptValidationSuite) TestValidationReceiptForIncorporatedResult() { // - we have the chain in storage: // . G <- A <- B // . ^- C(Result[A], ReceiptMeta[A]) -// here, block C contains the result _and_ the receipt Meta-data for block A +// here, block C contains the result _and_ the receipt Stub-data for block A // - now receive the new block X: G <- A <- B <- X(ReceiptMeta[A]) // Note that X only contains the receipt for A, but _not_ the result. // // Block X must be considered invalid, because confirming validity of // ReceiptMeta[A] requires information _not_ included in the fork. func (s *ReceiptValidationSuite) TestValidationReceiptWithoutIncorporatedResult() { - // assuming signatures are all good - s.publicKey.On("Verify", mock.Anything, mock.Anything, mock.Anything).Return(true, nil) + // assuming signatures are all good (if we get to checking signatures) + s.publicKey.On("Verify", mock.Anything, mock.Anything, mock.Anything).Return(true, nil).Maybe() // create block A - blockA := unittest.BlockWithParentFixture(s.LatestSealedBlock.Header) // for block G, we use the LatestSealedBlock + blockA := unittest.BlockWithParentFixture(s.LatestSealedBlock.ToHeader()) // for block G, we use the LatestSealedBlock s.Extend(blockA) // result for A; and receipt for A @@ -500,20 +701,23 @@ func (s *ReceiptValidationSuite) TestValidationReceiptWithoutIncorporatedResult( receiptA := unittest.ExecutionReceiptFixture(unittest.WithResult(resultA), unittest.WithExecutorID(s.ExeID)) // create block B and block C - blockB := unittest.BlockWithParentFixture(blockA.Header) - blockC := unittest.BlockWithParentFixture(blockA.Header) - blockC.SetPayload(flow.Payload{ - Receipts: []*flow.ExecutionReceiptMeta{receiptA.Meta()}, - Results: []*flow.ExecutionResult{resultA}, - }) + blockB := unittest.BlockWithParentFixture(blockA.ToHeader()) + blockC := unittest.BlockWithParentAndPayload( + blockA.ToHeader(), + flow.Payload{ + Receipts: []*flow.ExecutionReceiptStub{receiptA.Stub()}, + Results: []*flow.ExecutionResult{resultA}, + }, + ) s.Extend(blockB) s.Extend(blockC) // create block X: - blockX := unittest.BlockWithParentFixture(blockB.Header) - blockX.SetPayload(flow.Payload{ - Receipts: []*flow.ExecutionReceiptMeta{receiptA.Meta()}, - }) + blockX := unittest.BlockWithParentAndPayload( + blockB.ToHeader(), + flow.Payload{ + Receipts: []*flow.ExecutionReceiptStub{receiptA.Stub()}, + }) err := s.receiptValidator.ValidatePayload(blockX) require.Error(s.T(), err) @@ -555,19 +759,23 @@ func (s *ReceiptValidationSuite) TestPayloadWithExecutionFork() { receiptS2 := unittest.ExecutionReceiptFixture(unittest.WithResult(resultS2), unittest.WithExecutorID(s.ExeID)) // create block A, including results and receipts for it - blockA := unittest.BlockWithParentFixture(blockS.Header) - blockA.SetPayload(flow.Payload{ - Results: []*flow.ExecutionResult{resultS1, resultS2}, - Receipts: []*flow.ExecutionReceiptMeta{receiptS1.Meta(), receiptS2.Meta()}, - }) + blockA := unittest.BlockWithParentAndPayload( + blockS.ToHeader(), + flow.Payload{ + Results: []*flow.ExecutionResult{resultS1, resultS2}, + Receipts: []*flow.ExecutionReceiptStub{receiptS1.Stub(), receiptS2.Stub()}, + }, + ) s.Extend(blockA) // create block B - blockB := unittest.BlockWithParentFixture(blockA.Header) - sealResultS2 := unittest.Seal.Fixture(unittest.Seal.WithBlock(blockS.Header), unittest.Seal.WithResult(resultS2)) - blockB.SetPayload(flow.Payload{ - Seals: []*flow.Seal{sealResultS2}, - }) + sealResultS2 := unittest.Seal.Fixture(unittest.Seal.WithBlock(blockS.ToHeader()), unittest.Seal.WithResult(resultS2)) + blockB := unittest.BlockWithParentAndPayload( + blockA.ToHeader(), + flow.Payload{ + Seals: []*flow.Seal{sealResultS2}, + }, + ) s.Extend(blockB) // create Result[A]_1, Result[A]_2, Result[A]_3 and their receipts @@ -579,21 +787,25 @@ func (s *ReceiptValidationSuite) TestPayloadWithExecutionFork() { receiptA3 := unittest.ExecutionReceiptFixture(unittest.WithResult(resultA3), unittest.WithExecutorID(s.ExeID)) // SCENARIO (i): a block containing Result[A]_1 should fail validation - blockX := unittest.BlockWithParentFixture(blockB.Header) - blockX.SetPayload(flow.Payload{ - Results: []*flow.ExecutionResult{resultA1, resultA2, resultA3}, - Receipts: []*flow.ExecutionReceiptMeta{receiptA1.Meta(), receiptA2.Meta(), receiptA3.Meta()}, - }) + blockX := unittest.BlockWithParentAndPayload( + blockB.ToHeader(), + flow.Payload{ + Results: []*flow.ExecutionResult{resultA1, resultA2, resultA3}, + Receipts: []*flow.ExecutionReceiptStub{receiptA1.Stub(), receiptA2.Stub(), receiptA3.Stub()}, + }, + ) err := s.receiptValidator.ValidatePayload(blockX) require.Error(s.T(), err) require.True(s.T(), engine.IsInvalidInputError(err), err) // SCENARIO (ii): a block containing only results Result[A]_2 and Result[A]_3 should pass validation - blockX = unittest.BlockWithParentFixture(blockB.Header) - blockX.SetPayload(flow.Payload{ - Results: []*flow.ExecutionResult{resultA2, resultA3}, - Receipts: []*flow.ExecutionReceiptMeta{receiptA2.Meta(), receiptA3.Meta()}, - }) + blockX = unittest.BlockWithParentAndPayload( + blockB.ToHeader(), + flow.Payload{ + Results: []*flow.ExecutionResult{resultA2, resultA3}, + Receipts: []*flow.ExecutionReceiptStub{receiptA2.Stub(), receiptA3.Stub()}, + }, + ) err = s.receiptValidator.ValidatePayload(blockX) require.NoError(s.T(), err) } @@ -613,37 +825,43 @@ func (s *ReceiptValidationSuite) TestMultiLevelExecutionTree() { s.publicKey.On("Verify", mock.Anything, mock.Anything, mock.Anything).Return(true, nil) // create block A, including result and receipt for it - blockA := unittest.BlockWithParentFixture(s.LatestSealedBlock.Header) + blockA := unittest.BlockWithParentFixture(s.LatestSealedBlock.ToHeader()) resultA := unittest.ExecutionResultFixture(unittest.WithBlock(blockA), unittest.WithPreviousResult(*s.LatestExecutionResult)) receiptA := unittest.ExecutionReceiptFixture(unittest.WithResult(resultA), unittest.WithExecutorID(s.ExeID)) s.Extend(blockA) // create block B, including result and receipt for it - blockB := unittest.BlockWithParentFixture(blockA.Header) - blockB.SetPayload(flow.Payload{ - Receipts: []*flow.ExecutionReceiptMeta{receiptA.Meta()}, - Results: []*flow.ExecutionResult{resultA}, - }) + blockB := unittest.BlockWithParentAndPayload( + blockA.ToHeader(), + flow.Payload{ + Receipts: []*flow.ExecutionReceiptStub{receiptA.Stub()}, + Results: []*flow.ExecutionResult{resultA}, + }, + ) resultB := unittest.ExecutionResultFixture(unittest.WithBlock(blockB), unittest.WithPreviousResult(*resultA)) receiptB := unittest.ExecutionReceiptFixture(unittest.WithResult(resultB), unittest.WithExecutorID(s.ExeID)) s.Extend(blockB) // create block C, including result and receipt for it - blockC := unittest.BlockWithParentFixture(blockB.Header) - blockC.SetPayload(flow.Payload{ - Receipts: []*flow.ExecutionReceiptMeta{receiptB.Meta()}, - Results: []*flow.ExecutionResult{resultB}, - }) + blockC := unittest.BlockWithParentAndPayload( + blockB.ToHeader(), + flow.Payload{ + Receipts: []*flow.ExecutionReceiptStub{receiptB.Stub()}, + Results: []*flow.ExecutionResult{resultB}, + }, + ) resultC := unittest.ExecutionResultFixture(unittest.WithBlock(blockC), unittest.WithPreviousResult(*resultB)) receiptC := unittest.ExecutionReceiptFixture(unittest.WithResult(resultC), unittest.WithExecutorID(s.ExeID)) s.Extend(blockC) // create block X: - blockX := unittest.BlockWithParentFixture(blockC.Header) - blockX.SetPayload(flow.Payload{ - Receipts: []*flow.ExecutionReceiptMeta{receiptC.Meta()}, - Results: []*flow.ExecutionResult{resultC}, - }) + blockX := unittest.BlockWithParentAndPayload( + blockC.ToHeader(), + flow.Payload{ + Receipts: []*flow.ExecutionReceiptStub{receiptC.Stub()}, + Results: []*flow.ExecutionResult{resultC}, + }, + ) err := s.receiptValidator.ValidatePayload(blockX) require.NoError(s.T(), err) @@ -657,23 +875,26 @@ func (s *ReceiptValidationSuite) TestMultiLevelExecutionTree() { // +----B4{R(B3)} func (s *ReceiptValidationSuite) TestValidationReceiptsBlockNotOnFork() { // create block2 - block2 := unittest.BlockWithParentFixture(s.LatestFinalizedBlock.Header) + block2 := unittest.BlockWithParentFixture(s.LatestFinalizedBlock.ToHeader()) block2.Payload.Guarantees = nil - block2.Header.PayloadHash = block2.Payload.Hash() s.Extend(block2) // create block3 - block3 := unittest.BlockWithParentFixture(block2.Header) - block3.SetPayload(flow.Payload{}) + block3 := unittest.BlockWithParentAndPayload( + block2.ToHeader(), + flow.Payload{}, + ) s.Extend(block3) block3Receipt := unittest.ReceiptForBlockFixture(block3) - block4 := unittest.BlockWithParentFixture(block2.Header) - block4.SetPayload(flow.Payload{ - Receipts: []*flow.ExecutionReceiptMeta{block3Receipt.Meta()}, - Results: []*flow.ExecutionResult{&block3Receipt.ExecutionResult}, - }) + block4 := unittest.BlockWithParentAndPayload( + block2.ToHeader(), + flow.Payload{ + Receipts: []*flow.ExecutionReceiptStub{block3Receipt.Stub()}, + Results: []*flow.ExecutionResult{&block3Receipt.ExecutionResult}, + }, + ) err := s.receiptValidator.ValidatePayload(block4) require.Error(s.T(), err) require.True(s.T(), engine.IsInvalidInputError(err), err) @@ -682,27 +903,32 @@ func (s *ReceiptValidationSuite) TestValidationReceiptsBlockNotOnFork() { // Test that Extend will refuse payloads that contain duplicate receipts, where // duplicates can be in another block on the fork, or within the payload. func (s *ReceiptValidationSuite) TestExtendReceiptsDuplicate() { - - block2 := unittest.BlockWithParentFixture(s.LatestFinalizedBlock.Header) - block2.SetPayload(flow.Payload{}) + block2 := unittest.BlockWithParentAndPayload( + s.LatestFinalizedBlock.ToHeader(), + flow.Payload{}, + ) s.Extend(block2) receipt := unittest.ReceiptForBlockFixture(block2) // B1 <- B2 <- B3{R(B2)} <- B4{R(B2)} s.T().Run("duplicate receipt in different block", func(t *testing.T) { - block3 := unittest.BlockWithParentFixture(block2.Header) - block3.SetPayload(flow.Payload{ - Receipts: []*flow.ExecutionReceiptMeta{receipt.Meta()}, - Results: []*flow.ExecutionResult{&receipt.ExecutionResult}, - }) + block3 := unittest.BlockWithParentAndPayload( + block2.ToHeader(), + flow.Payload{ + Receipts: []*flow.ExecutionReceiptStub{receipt.Stub()}, + Results: []*flow.ExecutionResult{&receipt.ExecutionResult}, + }, + ) s.Extend(block3) - block4 := unittest.BlockWithParentFixture(block3.Header) - block4.SetPayload(flow.Payload{ - Receipts: []*flow.ExecutionReceiptMeta{receipt.Meta()}, - Results: []*flow.ExecutionResult{&receipt.ExecutionResult}, - }) + block4 := unittest.BlockWithParentAndPayload( + block3.ToHeader(), + flow.Payload{ + Receipts: []*flow.ExecutionReceiptStub{receipt.Stub()}, + Results: []*flow.ExecutionResult{&receipt.ExecutionResult}, + }, + ) err := s.receiptValidator.ValidatePayload(block4) require.Error(t, err) require.True(t, engine.IsInvalidInputError(err), err) @@ -710,16 +936,18 @@ func (s *ReceiptValidationSuite) TestExtendReceiptsDuplicate() { // B1 <- B2 <- B3{R(B2), R(B2)} s.T().Run("duplicate receipt in same block", func(t *testing.T) { - block3 := unittest.BlockWithParentFixture(block2.Header) - block3.SetPayload(flow.Payload{ - Receipts: []*flow.ExecutionReceiptMeta{ - receipt.Meta(), - receipt.Meta(), + block3 := unittest.BlockWithParentAndPayload( + block2.ToHeader(), + flow.Payload{ + Receipts: []*flow.ExecutionReceiptStub{ + receipt.Stub(), + receipt.Stub(), + }, + Results: []*flow.ExecutionResult{ + &receipt.ExecutionResult, + }, }, - Results: []*flow.ExecutionResult{ - &receipt.ExecutionResult, - }, - }) + ) err := s.receiptValidator.ValidatePayload(block3) require.Error(t, err) require.True(t, engine.IsInvalidInputError(err), err) @@ -729,19 +957,377 @@ func (s *ReceiptValidationSuite) TestExtendReceiptsDuplicate() { // `TestValidateReceiptAfterBootstrap` tests a special case when we try to produce a new block // after genesis with empty payload. func (s *ReceiptValidationSuite) TestValidateReceiptAfterBootstrap() { + // Genesis block + blocks, result0, seal := unittest.ChainFixture(0) + require.Equal(s.T(), len(blocks), 1, "expected only creation of genesis block") + s.SealsIndex[blocks[0].ID()] = seal + s.Extend(blocks[0]) + s.PersistedResults[result0.ID()] = result0 + + candidate := unittest.BlockWithParentFixture(blocks[0].ToHeader()) + err := s.receiptValidator.ValidatePayload(candidate) + s.Require().NoError(err) +} + +// TestValidateReceiptResultWithoutReceipt tests a case when a malicious leader incorporates a made-up execution result +// into their proposal. ReceiptValidator must ensure that for each result included in the block, there must be +// at least one receipt included in that block as well. +func (s *ReceiptValidationSuite) TestValidateReceiptResultWithoutReceipt() { + // assuming signatures are all good (if we get to checking signatures) + s.publicKey.On("Verify", mock.Anything, mock.Anything, mock.Anything).Return(true, nil).Maybe() + + // G <- A <- B + blocks, result0, seal := unittest.ChainFixture(2) + s.SealsIndex[blocks[0].ID()] = seal + + receipts := unittest.ReceiptChainFor(blocks, result0) + blockA, blockB := blocks[1], blocks[2] + receiptA, receiptB := receipts[1], receipts[2] + + blockA.Payload.Receipts = []*flow.ExecutionReceiptStub{} + blockB.Payload.Receipts = []*flow.ExecutionReceiptStub{receiptA.Stub()} + blockB.Payload.Results = []*flow.ExecutionResult{&receiptA.ExecutionResult} + // update block header so that blocks are chained together + unittest.ReconnectBlocksAndReceipts(blocks, receipts) + // assuming all receipts are executed by the correct executor + for _, r := range receipts { + r.ExecutorID = s.ExeID + } + + for _, b := range blocks { + s.Extend(b) + } + s.PersistedResults[result0.ID()] = result0 + + candidate := unittest.BlockWithParentAndPayload( + blockB.ToHeader(), + flow.Payload{ + Receipts: []*flow.ExecutionReceiptStub{}, + Results: []*flow.ExecutionResult{&receiptB.ExecutionResult}, + }, + ) + + err := s.receiptValidator.ValidatePayload(candidate) + s.Require().Error(err) + s.Require().True(engine.IsInvalidInputError(err)) +} + +// TestValidateReceiptResultHasEnoughReceipts tests the happy path of a block proposal, where a leader +// includes multiple Execution Receipts that commit to the same result. In this case, the Flow protocol +// prescribes that +// - the Execution Result is only incorporated once +// - from each Receipt the `ExecutionReceiptStub` is to be included. +// +// The validator is expected to accept such payload as valid. +func (s *ReceiptValidationSuite) TestValidateReceiptResultHasEnoughReceipts() { + k := uint(5) // assuming signatures are all good s.publicKey.On("Verify", mock.Anything, mock.Anything, mock.Anything).Return(true, nil) - // G - blocks, result0, seal := unittest.ChainFixture(0) + // G <- A <- B + blocks, result0, seal := unittest.ChainFixture(2) s.SealsIndex[blocks[0].ID()] = seal + receipts := unittest.ReceiptChainFor(blocks, result0) + blockA, blockB := blocks[1], blocks[2] + receiptA, receiptB := receipts[1], receipts[2] + + blockA.Payload.Receipts = []*flow.ExecutionReceiptStub{} + blockB.Payload.Receipts = []*flow.ExecutionReceiptStub{receiptA.Stub()} + blockB.Payload.Results = []*flow.ExecutionResult{&receiptA.ExecutionResult} + // update block header so that blocks are chained together + unittest.ReconnectBlocksAndReceipts(blocks, receipts) + // assuming all receipts are executed by the correct executor + for _, r := range receipts { + r.ExecutorID = s.ExeID + } + for _, b := range blocks { s.Extend(b) } s.PersistedResults[result0.ID()] = result0 - candidate := unittest.BlockWithParentFixture(blocks[0].Header) + candidateReceipts := []*flow.ExecutionReceiptStub{receiptB.Stub()} + // add k-1 more receipts for the same execution result + for i := uint(1); i < k; i++ { + // use base receipt and change the executor ID, we don't care about signatures since we are not validating them + receipt := *receiptB.Stub() + // create a mock executor which submitted the receipt + executor := unittest.IdentityFixture(unittest.WithRole(flow.RoleExecution), unittest.WithStakingPubKey(s.publicKey)) + receipt.ExecutorID = executor.NodeID + // update local identity table so the receipt is considered valid + s.Identities[executor.NodeID] = executor + candidateReceipts = append(candidateReceipts, &receipt) + } + + candidate := unittest.BlockWithParentAndPayload( + blockB.ToHeader(), + flow.Payload{ + Receipts: candidateReceipts, + Results: []*flow.ExecutionResult{&receiptB.ExecutionResult}, + }, + ) + err := s.receiptValidator.ValidatePayload(candidate) s.Require().NoError(err) } + +// TestReceiptNoBlock tests that the validator rejects a receipt, whose executed block is unknown: +// - per API contract it should return a `module.UnknownBlockError` +// - should _not_ be misinterpreted as an invalid receipt, i.e. should not receive an `engine.InvalidInputError` +func (s *ReceiptValidationSuite) TestReceiptNoBlock() { + s.publicKey.On("Verify", mock.Anything, mock.Anything, mock.Anything).Return(true, nil).Maybe() + + // Initially, s.LatestExecutionResult points to the result for s.LatestSealedBlock. We construct the chain: + // LatestSealedBlock <-- unknownExecutedBlock <-- candidate(r) + // where `r` denotes an execution receipt for block `unknownExecutedBlock` + unknownExecutedBlock := unittest.BlockWithParentFixture(s.LatestSealedBlock.ToHeader()) + r := unittest.ExecutionReceiptFixture( + unittest.WithExecutorID(s.ExeID), // valid executor + unittest.WithResult(unittest.ExecutionResultFixture( + unittest.WithBlock(unknownExecutedBlock), + unittest.WithPreviousResult(*s.LatestExecutionResult)), // known parent result + )) // but the ID of the executed block is randomly chosen, i.e. unknown + + // attempting to validate receipt `r` should fail with an `module.UnknownBlockError` + err := s.receiptValidator.Validate(r) + s.Require().Error(err, "should reject invalid receipt") + s.Assert().True(module.IsUnknownBlockError(err), err) + s.Assert().False(engine.IsInvalidInputError(err), err) + + // attempting to validate a block, whose payload contains receipt `r` should fail with an `module.UnknownBlockError` + candidate := unittest.BlockWithParentAndPayload( + unknownExecutedBlock.ToHeader(), + unittest.PayloadFixture(unittest.WithReceipts(r)), + ) + err = s.receiptValidator.ValidatePayload(candidate) + s.Require().Error(err, "should reject invalid receipt") + s.Assert().True(module.IsUnknownBlockError(err), err) + s.Assert().False(engine.IsInvalidInputError(err), err) +} + +// TestException_HeadersExists tests that unexpected exceptions raised by the dependency +// `receiptValidator.headers.Exists(..)` are escalated and not misinterpreted as +// `InvalidInputError` or `UnknownBlockError` or `UnknownResultError` +func (s *ReceiptValidationSuite) TestException_HeadersExists() { + s.publicKey.On("Verify", mock.Anything, mock.Anything, mock.Anything).Return(true, nil).Maybe() + + valSubgrph := s.ValidSubgraphFixture() + s.AddSubgraphFixtureToMempools(valSubgrph) + + receipt := unittest.ExecutionReceiptFixture(unittest.WithExecutorID(s.ExeID), unittest.WithResult(valSubgrph.Result)) + candidate := unittest.BlockWithParentAndPayload( + valSubgrph.Block.ToHeader(), + unittest.PayloadFixture(unittest.WithReceipts(receipt)), + ) + + // receiptValidator.headers yields exception on retrieving any block header + *s.HeadersDB = *mock_storage.NewHeaders(s.T()) // receiptValidator has pointer to this field, which we override with a new state mock + exception := errors.New("headers.ByBlockID() exception") + s.HeadersDB.On("Exists", mock.Anything).Return(false, exception) + + err := s.receiptValidator.ValidatePayload(candidate) + s.Require().Error(err, "ValidatePayload should escalate exception") + s.Assert().False(engine.IsInvalidInputError(err), err) + s.Assert().False(module.IsUnknownBlockError(err), err) + s.Assert().False(module.IsUnknownResultError(err), err) +} + +// TestException_HeadersByBlockID tests that unexpected exceptions raised by the dependency +// `receiptValidator.headers.ByBlockID(..)` are escalated and not misinterpreted as +// `InvalidInputError` or `UnknownBlockError` or `UnknownResultError` +func (s *ReceiptValidationSuite) TestException_HeadersByBlockID() { + s.publicKey.On("Verify", mock.Anything, mock.Anything, mock.Anything).Return(true, nil).Maybe() + + valSubgrph := s.ValidSubgraphFixture() + s.AddSubgraphFixtureToMempools(valSubgrph) + + receipt := unittest.ExecutionReceiptFixture(unittest.WithExecutorID(s.ExeID), unittest.WithResult(valSubgrph.Result)) + candidate := unittest.BlockWithParentAndPayload( + valSubgrph.Block.ToHeader(), + unittest.PayloadFixture(unittest.WithReceipts(receipt)), + ) + + // receiptValidator.headers yields exception on retrieving any block header + exception := errors.New("headers.ByBlockID() exception") + *s.HeadersDB = *mock_storage.NewHeaders(s.T()) // receiptValidator has pointer to this field, which we override with a new state mock + s.HeadersDB.On("Exists", mock.Anything).Return(true, nil) + s.HeadersDB.On("ByBlockID", mock.Anything).Return(nil, exception) + + err := s.receiptValidator.ValidatePayload(candidate) + s.Require().Error(err, "ValidatePayload should escalate exception") + s.Assert().False(engine.IsInvalidInputError(err), err) + s.Assert().False(module.IsUnknownBlockError(err), err) + s.Assert().False(module.IsUnknownResultError(err), err) +} + +// TestException_SealsHighestInFork tests that unexpected exceptions raised by the dependency +// `receiptValidator.seals.HighestInFork(..)` are escalated and not misinterpreted as +// `InvalidInputError` or `UnknownBlockError` or `UnknownResultError` +func (s *ReceiptValidationSuite) TestException_SealsHighestInFork() { + s.publicKey.On("Verify", mock.Anything, mock.Anything, mock.Anything).Return(true, nil).Maybe() + valSubgrph := s.ValidSubgraphFixture() + s.AddSubgraphFixtureToMempools(valSubgrph) + + receipt := unittest.ExecutionReceiptFixture(unittest.WithExecutorID(s.ExeID), unittest.WithResult(valSubgrph.Result)) + candidate := unittest.BlockWithParentAndPayload( + valSubgrph.Block.ToHeader(), + unittest.PayloadFixture(unittest.WithReceipts(receipt)), + ) + + // receiptValidator.seals yields exception on retrieving highest sealed block in fork up to candidate's parent + *s.SealsDB = *mock_storage.NewSeals(s.T()) // receiptValidator has pointer to this field, which we override with a new state mock + exception := errors.New("seals.HighestInFork(..) exception") + s.SealsDB.On("HighestInFork", candidate.ParentID).Return(nil, exception) + + err := s.receiptValidator.ValidatePayload(candidate) + s.Require().Error(err, "ValidatePayload should escalate exception") + s.Assert().False(engine.IsInvalidInputError(err), err) + s.Assert().False(module.IsUnknownBlockError(err), err) + s.Assert().False(module.IsUnknownResultError(err), err) +} + +// TestException_ProtocolStateHead tests that unexpected exceptions raised by the dependency +// `receiptValidator.state.AtBlockID() -> Snapshot.Head(..)` are escalated and not misinterpreted as +// `InvalidInputError` or `UnknownBlockError` or `UnknownResultError` +func (s *ReceiptValidationSuite) TestException_ProtocolStateHead() { + s.publicKey.On("Verify", mock.Anything, mock.Anything, mock.Anything).Return(true, nil).Maybe() + valSubgrph := s.ValidSubgraphFixture() + s.AddSubgraphFixtureToMempools(valSubgrph) + receipt := unittest.ExecutionReceiptFixture(unittest.WithExecutorID(s.ExeID), unittest.WithResult(valSubgrph.Result)) + + // receiptValidator.state yields exception on Block Header retrieval + *s.State = *mock_protocol.NewState(s.T()) // receiptValidator has pointer to this field, which we override with a new state mock + snapshot := mock_protocol.NewSnapshot(s.T()) + exception := errors.New("state.Head() exception") + snapshot.On("Head").Return(nil, exception) + s.State.On("AtBlockID", valSubgrph.Block.ID()).Return(snapshot) + + s.T().Run("Method Validate", func(t *testing.T) { + err := s.receiptValidator.Validate(receipt) + s.Require().Error(err, "Validate should escalate exception") + s.Assert().False(engine.IsInvalidInputError(err), err) + s.Assert().False(module.IsUnknownBlockError(err), err) + s.Assert().False(module.IsUnknownResultError(err), err) + }) + + s.T().Run("Method ValidatePayload", func(t *testing.T) { + candidate := unittest.BlockWithParentAndPayload( + valSubgrph.Block.ToHeader(), + unittest.PayloadFixture(unittest.WithReceipts(receipt)), + ) + err := s.receiptValidator.ValidatePayload(candidate) + s.Require().Error(err, "ValidatePayload should escalate exception") + s.Assert().False(engine.IsInvalidInputError(err), err) + s.Assert().False(module.IsUnknownBlockError(err), err) + s.Assert().False(module.IsUnknownResultError(err), err) + }) +} + +// TestException_ProtocolStateIdentity tests that unexpected exceptions raised by the dependency +// `receiptValidator.state.AtBlockID() -> Snapshot.Identity(..)` are escalated and not misinterpreted as +// `InvalidInputError` or `UnknownBlockError` or `UnknownResultError` +func (s *ReceiptValidationSuite) TestException_ProtocolStateIdentity() { + s.publicKey.On("Verify", mock.Anything, mock.Anything, mock.Anything).Return(true, nil).Maybe() + valSubgrph := s.ValidSubgraphFixture() + s.AddSubgraphFixtureToMempools(valSubgrph) + receipt := unittest.ExecutionReceiptFixture(unittest.WithExecutorID(s.ExeID), unittest.WithResult(valSubgrph.Result)) + + // receiptValidator.state yields exception on Identity retrieval + *s.State = *mock_protocol.NewState(s.T()) // receiptValidator has pointer to this field, which we override with a new state mock + snapshot := mock_protocol.NewSnapshot(s.T()) + exception := errors.New("state.Identity() exception") + snapshot.On("Head").Return(valSubgrph.Block.ToHeader(), nil) + snapshot.On("Identity", mock.Anything).Return(nil, exception) + s.State.On("AtBlockID", valSubgrph.Block.ID()).Return(snapshot) + + s.T().Run("Method Validate", func(t *testing.T) { + err := s.receiptValidator.Validate(receipt) + s.Require().Error(err, "Validate should escalate exception") + s.Assert().False(engine.IsInvalidInputError(err), err) + s.Assert().False(module.IsUnknownBlockError(err), err) + s.Assert().False(module.IsUnknownResultError(err), err) + }) + + s.T().Run("Method ValidatePayload", func(t *testing.T) { + candidate := unittest.BlockWithParentAndPayload( + valSubgrph.Block.ToHeader(), + unittest.PayloadFixture(unittest.WithReceipts(receipt)), + ) + err := s.receiptValidator.ValidatePayload(candidate) + s.Require().Error(err, "ValidatePayload should escalate exception") + s.Assert().False(engine.IsInvalidInputError(err), err) + s.Assert().False(module.IsUnknownBlockError(err), err) + s.Assert().False(module.IsUnknownResultError(err), err) + }) +} + +// TestException_IndexByBlockID tests that unexpected exceptions raised by the dependency +// `receiptValidator.index.ByBlockID(..)` are escalated and not misinterpreted as +// `InvalidInputError` or `UnknownBlockError` or `UnknownResultError` +func (s *ReceiptValidationSuite) TestException_IndexByBlockID() { + s.publicKey.On("Verify", mock.Anything, mock.Anything, mock.Anything).Return(true, nil).Maybe() + valSubgrph := s.ValidSubgraphFixture() + s.AddSubgraphFixtureToMempools(valSubgrph) + receipt := unittest.ExecutionReceiptFixture(unittest.WithExecutorID(s.ExeID), unittest.WithResult(valSubgrph.Result)) + + // receiptValidator.index yields exception on Identity retrieval + *s.IndexDB = *mock_storage.NewIndex(s.T()) // receiptValidator has pointer to this field, which we override with a new state mock + exception := errors.New("index.ByBlockID(..) exception") + s.IndexDB.On("ByBlockID", valSubgrph.Block.ID()).Return(nil, exception) + + s.T().Run("Method Validate", func(t *testing.T) { + err := s.receiptValidator.Validate(receipt) + s.Require().Error(err, "Validate should escalate exception") + s.Assert().False(engine.IsInvalidInputError(err), err) + s.Assert().False(module.IsUnknownBlockError(err), err) + s.Assert().False(module.IsUnknownResultError(err), err) + }) + + s.T().Run("Method ValidatePayload", func(t *testing.T) { + candidate := unittest.BlockWithParentAndPayload( + valSubgrph.Block.ToHeader(), + unittest.PayloadFixture(unittest.WithReceipts(receipt)), + ) + err := s.receiptValidator.ValidatePayload(candidate) + s.Require().Error(err, "ValidatePayload should escalate exception") + s.Assert().False(engine.IsInvalidInputError(err), err) + s.Assert().False(module.IsUnknownBlockError(err), err) + s.Assert().False(module.IsUnknownResultError(err), err) + }) +} + +// TestException_ResultsByID tests that unexpected exceptions raised by the dependency +// `receiptValidator.results.ByID(..)` are escalated and not misinterpreted as +// `InvalidInputError` or `UnknownBlockError` or `UnknownResultError` +func (s *ReceiptValidationSuite) TestException_ResultsByID() { + s.publicKey.On("Verify", mock.Anything, mock.Anything, mock.Anything).Return(true, nil).Maybe() + valSubgrph := s.ValidSubgraphFixture() + s.AddSubgraphFixtureToMempools(valSubgrph) + receipt := unittest.ExecutionReceiptFixture(unittest.WithExecutorID(s.ExeID), unittest.WithResult(valSubgrph.Result)) + + // receiptValidator.results yields exception on ExecutionResult retrieval + *s.ResultsDB = *mock_storage.NewExecutionResults(s.T()) // receiptValidator has pointer to this field, which we override with a new state mock + exception := errors.New("results.ByID(..) exception") + s.ResultsDB.On("ByID", valSubgrph.Result.PreviousResultID).Return(nil, exception) + + s.T().Run("Method Validate", func(t *testing.T) { + err := s.receiptValidator.Validate(receipt) + s.Require().Error(err, "Validate should escalate exception") + s.Assert().False(engine.IsInvalidInputError(err), err) + s.Assert().False(module.IsUnknownBlockError(err), err) + s.Assert().False(module.IsUnknownResultError(err), err) + }) + + s.T().Run("Method ValidatePayload", func(t *testing.T) { + candidate := unittest.BlockWithParentAndPayload( + valSubgrph.Block.ToHeader(), + unittest.PayloadFixture(unittest.WithReceipts(receipt)), + ) + err := s.receiptValidator.ValidatePayload(candidate) + s.Require().Error(err, "ValidatePayload should escalate exception") + s.Assert().False(engine.IsInvalidInputError(err), err) + s.Assert().False(module.IsUnknownBlockError(err), err) + s.Assert().False(module.IsUnknownResultError(err), err) + }) +} diff --git a/module/validation/seal_validator.go b/module/validation/seal_validator.go index 19649abf31f..c6c5341a97e 100644 --- a/module/validation/seal_validator.go +++ b/module/validation/seal_validator.go @@ -3,7 +3,8 @@ package validation import ( "fmt" - "github.com/onflow/flow-go/crypto/hash" + "github.com/onflow/crypto/hash" + "github.com/onflow/flow-go/engine" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/module" @@ -54,11 +55,15 @@ func (s *sealValidator) verifySealSignature(aggregatedSignatures *flow.Aggregate chunk *flow.Chunk, executionResultID flow.Identifier) error { // TODO: replace implementation once proper aggregation is used for Verifiers' attestation signatures. - atst := flow.Attestation{ + atst, err := flow.NewAttestation(flow.UntrustedAttestation{ BlockID: chunk.BlockID, ExecutionResultID: executionResultID, ChunkIndex: chunk.Index, + }) + if err != nil { + return fmt.Errorf("could not build attestation: %w", err) } + atstID := atst.ID() for i, signature := range aggregatedSignatures.VerifierSignatures { @@ -99,9 +104,8 @@ func (s *sealValidator) verifySealSignature(aggregatedSignatures *flow.Aggregate // => Therefore, only seals whose results pass the sub-graph check will be // allowed. func (s *sealValidator) Validate(candidate *flow.Block) (*flow.Seal, error) { - header := candidate.Header payload := candidate.Payload - parentID := header.ParentID + parentID := candidate.ParentID // Get the latest seal in the fork that ends with the candidate's parent. // The protocol state saves this information for each block that has been @@ -130,7 +134,7 @@ func (s *sealValidator) Validate(candidate *flow.Block) (*flow.Seal, error) { byBlock[seal.BlockID] = seal } if len(payload.Seals) != len(byBlock) { - return nil, engine.NewInvalidInputError("multiple seals for the same block") + return nil, engine.NewInvalidInputErrorf("multiple seals for the same block") } // incorporatedResults collects execution results that are incorporated in unsealed @@ -168,7 +172,14 @@ func (s *sealValidator) Validate(candidate *flow.Block) (*flow.Seal, error) { if err != nil { return fmt.Errorf("internal error fetching result %v incorporated in block %v: %w", resultID, blockID, err) } - incorporatedResults[resultID] = flow.NewIncorporatedResult(blockID, result) + incorporatedResult, err := flow.NewIncorporatedResult(flow.UntrustedIncorporatedResult{ + IncorporatedBlockID: blockID, + Result: result, + }) + if err != nil { + return fmt.Errorf("could not create incorporated result: %w", err) + } + incorporatedResults[resultID] = incorporatedResult } return nil } @@ -290,7 +301,11 @@ func (s *sealValidator) validateSeal(seal *flow.Seal, incorporatedResult *flow.I // only Verification Nodes that were assigned to the chunk are allowed to approve it for _, signerId := range chunkSigs.SignerIDs { - if !assignments.HasVerifier(chunk, signerId) { + b, err := assignments.HasVerifier(chunk.Index, signerId) + if err != nil { + return fmt.Errorf("getting verifiers for chunk %d failed: %w", chunk.Index, err) + } + if !b { return engine.NewInvalidInputErrorf("invalid signer id at chunk: %d", chunk.Index) } } diff --git a/module/validation/seal_validator_test.go b/module/validation/seal_validator_test.go index 5401fd61354..f88d98f4e21 100644 --- a/module/validation/seal_validator_test.go +++ b/module/validation/seal_validator_test.go @@ -66,10 +66,12 @@ func (s *SealValidationSuite) TestSeal_EnforceGap() { b1, _, _, _, sealB0 := s.generateBasicTestFork() // block includes seal for direct parent: - newBlock := unittest.BlockWithParentFixture(b1.Header) - newBlock.SetPayload(flow.Payload{ - Seals: []*flow.Seal{sealB0}, - }) + newBlock := unittest.BlockWithParentAndPayload( + b1.ToHeader(), + flow.Payload{ + Seals: []*flow.Seal{sealB0}, + }, + ) _, err := s.sealValidator.Validate(newBlock) s.Require().Error(err) @@ -132,7 +134,7 @@ func (s *SealValidationSuite) TestSealEmergencySeal_VerificationRequire0Approval // requiredApprovalsForSealConstruction = 2 // receive seal with 2 approvals => _not_ emergency sealed - newBlock := s.makeBlockSealingResult(b2.Header, &receipt.ExecutionResult, 2) + newBlock := s.makeBlockSealingResult(b2, &receipt.ExecutionResult, 2) metrics := &module.ConsensusMetrics{} metrics.On("EmergencySeal").Run(func(args mock.Arguments) { s.T().Errorf("happy path sealing should not be counted as emmergency sealed") @@ -155,7 +157,7 @@ func (s *SealValidationSuite) TestSealEmergencySeal_VerificationRequire1Approval // requiredApprovalsForSealVerification = 1 // receive seal with 1 approval => emergency sealed - newBlock := s.makeBlockSealingResult(b2.Header, &receipt.ExecutionResult, 1) + newBlock := s.makeBlockSealingResult(b2, &receipt.ExecutionResult, 1) metrics := &module.ConsensusMetrics{} metrics.On("EmergencySeal").Once() s.sealValidator.metrics = metrics @@ -176,7 +178,7 @@ func (s *SealValidationSuite) TestSealEmergencySeal_VerificationRequire1Approval // requiredApprovalsForSealConstruction = 2 // requiredApprovalsForSealVerification = 1 // receive seal with 0 approval => invalid - newBlock := s.makeBlockSealingResult(b2.Header, &receipt.ExecutionResult, 0) + newBlock := s.makeBlockSealingResult(b2, &receipt.ExecutionResult, 0) metrics := &module.ConsensusMetrics{} metrics.On("EmergencySeal").Run(func(args mock.Arguments) { s.T().Errorf("invaid seal should not be counted as emmergency sealed") @@ -200,7 +202,7 @@ func (s *SealValidationSuite) TestSealEmergencySeal_VerificationRequire0Approval // requiredApprovalsForSealVerification = 0 // receive seal with 1 approval => emergency sealed - newBlock := s.makeBlockSealingResult(b2.Header, &receipt.ExecutionResult, 1) + newBlock := s.makeBlockSealingResult(b2, &receipt.ExecutionResult, 1) metrics := &module.ConsensusMetrics{} metrics.On("EmergencySeal").Once() s.sealValidator.metrics = metrics @@ -221,7 +223,7 @@ func (s *SealValidationSuite) TestSealEmergencySeal_VerificationRequire0Approval // requiredApprovalsForSealConstruction = 2 // requiredApprovalsForSealVerification = 0 // receive seal with 0 approval => emergency sealed - newBlock := s.makeBlockSealingResult(b2.Header, &receipt.ExecutionResult, 0) + newBlock := s.makeBlockSealingResult(b2, &receipt.ExecutionResult, 0) metrics := &module.ConsensusMetrics{} metrics.On("EmergencySeal").Once() s.sealValidator.metrics = metrics @@ -305,7 +307,6 @@ func (s *SealValidationSuite) TestSealInvalidChunkAssignment() { // the validator handles this without error. func (s *SealValidationSuite) TestHighestSeal() { // take finalized block and build a receipt for it - block3 := unittest.BlockWithParentFixture(s.LatestFinalizedBlock.Header) block2Receipt := unittest.ExecutionReceiptFixture( unittest.WithExecutorID(s.ExeID), unittest.WithResult(unittest.ExecutionResultFixture( @@ -313,10 +314,13 @@ func (s *SealValidationSuite) TestHighestSeal() { unittest.WithPreviousResult(*s.LatestExecutionResult), )), ) - block3.SetPayload(flow.Payload{ - Receipts: []*flow.ExecutionReceiptMeta{block2Receipt.Meta()}, - Results: []*flow.ExecutionResult{&block2Receipt.ExecutionResult}, - }) + block3 := unittest.BlockWithParentAndPayload( + s.LatestFinalizedBlock.ToHeader(), + flow.Payload{ + Receipts: []*flow.ExecutionReceiptStub{block2Receipt.Stub()}, + Results: []*flow.ExecutionResult{&block2Receipt.ExecutionResult}, + }, + ) s.Extend(block3) // create and insert block4 containing a receipt for block3 @@ -327,26 +331,30 @@ func (s *SealValidationSuite) TestHighestSeal() { unittest.WithPreviousResult(block2Receipt.ExecutionResult), )), ) - block4 := unittest.BlockWithParentFixture(block3.Header) - block4.SetPayload(flow.Payload{ - Receipts: []*flow.ExecutionReceiptMeta{block3Receipt.Meta()}, - Results: []*flow.ExecutionResult{&block3Receipt.ExecutionResult}, - }) + block4 := unittest.BlockWithParentAndPayload( + block3.ToHeader(), + flow.Payload{ + Receipts: []*flow.ExecutionReceiptStub{block3Receipt.Stub()}, + Results: []*flow.ExecutionResult{&block3Receipt.ExecutionResult}, + }, + ) s.Extend(block4) // block B5 - block5 := unittest.BlockWithParentFixture(block4.Header) + block5 := unittest.BlockWithParentFixture(block4.ToHeader()) s.Extend(block5) // construct block B5 and its payload components, i.e. seals for block B2 and B3 seal2 := s.validSealForResult(&block2Receipt.ExecutionResult) seal3 := s.validSealForResult(&block3Receipt.ExecutionResult) - block6 := unittest.BlockWithParentFixture(block5.Header) - block6.SetPayload(flow.Payload{ - // placing seals in the reversed order to test - // Extend will pick the highest sealed block - Seals: []*flow.Seal{seal3, seal2}, - }) + block6 := unittest.BlockWithParentAndPayload( + block5.ToHeader(), + flow.Payload{ + // placing seals in the reversed order to test + // Extend will pick the highest sealed block + Seals: []*flow.Seal{seal3, seal2}, + }, + ) last, err := s.sealValidator.Validate(block6) require.NoError(s.T(), err) @@ -376,17 +384,23 @@ func (s *SealValidationSuite) TestHighestSeal() { // // In addition, we also run a valid test case to confirm the proper construction of the test func (s *SealValidationSuite) TestValidatePayload_SealsSkipBlock() { - - blocks := unittest.ChainFixtureFrom(4, s.LatestSealedBlock.Header) + blocks := unittest.ChainFixtureFrom(4, s.LatestSealedBlock.ToHeader()) // B3's payload contains results and receipts for B0, B1, B2 resultB0 := unittest.ExecutionResultFixture(unittest.WithBlock(blocks[0]), unittest.WithPreviousResult(*s.LatestExecutionResult)) receipts := unittest.ReceiptChainFor(blocks, resultB0) - blocks[3].SetPayload(flow.Payload{ - Receipts: []*flow.ExecutionReceiptMeta{receipts[0].Meta(), receipts[1].Meta(), receipts[2].Meta()}, - Results: []*flow.ExecutionResult{&receipts[0].ExecutionResult, &receipts[1].ExecutionResult, &receipts[2].ExecutionResult}, - }) - b4 := unittest.BlockWithParentFixture(blocks[3].Header) + var err error + blocks[3], err = flow.NewBlock( + flow.UntrustedBlock{ + HeaderBody: blocks[3].HeaderBody, + Payload: unittest.PayloadFixture( + unittest.WithReceipts(receipts[0], receipts[1], receipts[2]), + ), + }, + ) + require.NoError(s.T(), err) + + b4 := unittest.BlockWithParentFixture(blocks[3].ToHeader()) blocks = append(blocks, b4) for _, b := range blocks { @@ -399,10 +413,12 @@ func (s *SealValidationSuite) TestValidatePayload_SealsSkipBlock() { // S <- B0 <- B1 <- B2 <- B3{R(B0), R(B1), R(B2)} <- B4 <- X{Seal(R(B1))} s.T().Run("no seal for the immediately next unsealed block", func(t *testing.T) { - X := unittest.BlockWithParentFixture(b4.Header) - X.SetPayload(flow.Payload{ - Seals: []*flow.Seal{block1Seal}, - }) + X := unittest.BlockWithParentAndPayload( + b4.ToHeader(), + flow.Payload{ + Seals: []*flow.Seal{block1Seal}, + }, + ) _, err := s.sealValidator.Validate(X) require.Error(s.T(), err) @@ -411,10 +427,12 @@ func (s *SealValidationSuite) TestValidatePayload_SealsSkipBlock() { // S <- B0 <- B1 <- B2 <- B3{R(B0), R(B1), R(B2)} <- B4 <- X{Seal(R(B0)), Seal(R(B2))} s.T().Run("seals skip one of the following blocks", func(t *testing.T) { - X := unittest.BlockWithParentFixture(b4.Header) - X.SetPayload(flow.Payload{ - Seals: []*flow.Seal{block0Seal, block2Seal}, - }) + X := unittest.BlockWithParentAndPayload( + b4.ToHeader(), + flow.Payload{ + Seals: []*flow.Seal{block0Seal, block2Seal}, + }, + ) _, err := s.sealValidator.Validate(X) require.Error(s.T(), err) @@ -423,10 +441,12 @@ func (s *SealValidationSuite) TestValidatePayload_SealsSkipBlock() { // S <- B0 <- B1 <- B2 <- B3{R(B0), R(B1), R(B2)} <- B4 <- X{Seal(R(B0)), Seal(R(B1)), Seal(R(B2))} s.T().Run("valid test case", func(t *testing.T) { - X := unittest.BlockWithParentFixture(b4.Header) - X.SetPayload(flow.Payload{ - Seals: []*flow.Seal{block0Seal, block1Seal, block2Seal}, - }) + X := unittest.BlockWithParentAndPayload( + b4.ToHeader(), + flow.Payload{ + Seals: []*flow.Seal{block0Seal, block1Seal, block2Seal}, + }, + ) _, err := s.sealValidator.Validate(X) require.NoError(s.T(), err) @@ -459,19 +479,34 @@ func (s *SealValidationSuite) TestValidatePayload_SealsSkipBlock() { func (s *SealValidationSuite) TestValidatePayload_ExecutionDisconnected() { blocks := []*flow.Block{&s.LatestSealedBlock} // slice with elements [S, A, B, C, D] - blocks = append(blocks, unittest.ChainFixtureFrom(4, s.LatestSealedBlock.Header)...) + blocks = append(blocks, unittest.ChainFixtureFrom(4, s.LatestSealedBlock.ToHeader())...) receiptChain1 := unittest.ReceiptChainFor(blocks, unittest.ExecutionResultFixture()) // elements [Result[S]_1, Result[A]_1, Result[B]_1, ...] receiptChain2 := unittest.ReceiptChainFor(blocks, unittest.ExecutionResultFixture()) // elements [Result[S]_2, Result[A]_2, Result[B]_2, ...] - for i := 1; i <= 3; i++ { // set payload for blocks A, B, C - blocks[i].SetPayload(flow.Payload{ - Results: []*flow.ExecutionResult{&receiptChain1[i-1].ExecutionResult, &receiptChain2[i-1].ExecutionResult}, - Receipts: []*flow.ExecutionReceiptMeta{receiptChain1[i-1].Meta(), receiptChain2[i-1].Meta()}, - }) + var err error + for i := 1; i <= 3; i++ { + // set payload for blocks A, B, C + blocks[i], err = flow.NewBlock( + flow.UntrustedBlock{ + HeaderBody: blocks[i].HeaderBody, + Payload: unittest.PayloadFixture( + unittest.WithReceipts(receiptChain1[i-1], receiptChain2[i-1]), + ), + }, + ) + require.NoError(s.T(), err) } - blocks[4].SetPayload(flow.Payload{ - Seals: []*flow.Seal{unittest.Seal.Fixture(unittest.Seal.WithResult(&receiptChain1[0].ExecutionResult))}, - }) + + blocks[4], err = flow.NewBlock( + flow.UntrustedBlock{ + HeaderBody: blocks[4].HeaderBody, + Payload: unittest.PayloadFixture( + unittest.WithSeals(unittest.Seal.Fixture(unittest.Seal.WithResult(&receiptChain1[0].ExecutionResult))), + ), + }, + ) + require.NoError(s.T(), err) + for i := 0; i <= 4; i++ { // we need to run this several times, as in each iteration as we have _multiple_ execution chains. // In each iteration, we only mange to reconnect one additional height @@ -491,10 +526,12 @@ func (s *SealValidationSuite) TestValidatePayload_ExecutionDisconnected() { // S <- A{..} <- B{..} <- C{..} <- D{..} <- X{Seal for Result[A]_2} s.T().Run("seals in candidate block does connect to latest sealed result of parent", func(t *testing.T) { - X := unittest.BlockWithParentFixture(blocks[4].Header) - X.SetPayload(flow.Payload{ - Seals: []*flow.Seal{sealA2}, - }) + X := unittest.BlockWithParentAndPayload( + blocks[4].ToHeader(), + flow.Payload{ + Seals: []*flow.Seal{sealA2}, + }, + ) _, err := s.sealValidator.Validate(X) require.Error(s.T(), err) @@ -503,10 +540,12 @@ func (s *SealValidationSuite) TestValidatePayload_ExecutionDisconnected() { // S <- A{..} <- B{..} <- C{..} <- D{..} <- X{Seal for Result[A]_1; Seal for Result[B]_2} s.T().Run("sealed execution results within candidate block do not form a chain", func(t *testing.T) { - X := unittest.BlockWithParentFixture(blocks[4].Header) - X.SetPayload(flow.Payload{ - Seals: []*flow.Seal{sealA1, sealB2}, - }) + X := unittest.BlockWithParentAndPayload( + blocks[4].ToHeader(), + flow.Payload{ + Seals: []*flow.Seal{sealA1, sealB2}, + }, + ) _, err := s.sealValidator.Validate(X) require.Error(s.T(), err) @@ -515,10 +554,12 @@ func (s *SealValidationSuite) TestValidatePayload_ExecutionDisconnected() { // S <- A{..} <- B{..} <- C{..} <- D{..} <- X{Seal for Result[A]_1; Seal for Result[B]_1} s.T().Run("valid test case", func(t *testing.T) { - X := unittest.BlockWithParentFixture(blocks[4].Header) - X.SetPayload(flow.Payload{ - Seals: []*flow.Seal{sealA1, sealB1}, - }) + X := unittest.BlockWithParentAndPayload( + blocks[4].ToHeader(), + flow.Payload{ + Seals: []*flow.Seal{sealA1, sealB1}, + }, + ) _, err := s.sealValidator.Validate(X) require.NoError(s.T(), err) @@ -542,10 +583,12 @@ func (s *SealValidationSuite) TestExtendSealDuplicate() { s.Extend(b3) // insert B4 with a duplicate seal - b4 := unittest.BlockWithParentFixture(b3.Header) - b4.SetPayload(flow.Payload{ - Seals: []*flow.Seal{sealB1}, - }) + b4 := unittest.BlockWithParentAndPayload( + b3.ToHeader(), + flow.Payload{ + Seals: []*flow.Seal{sealB1}, + }, + ) // we expect an error because block 4 contains a seal that is // already contained in another block on the fork @@ -557,12 +600,18 @@ func (s *SealValidationSuite) TestExtendSealDuplicate() { // <- LatestSealedBlock <- B0 <- B1{ Result[B0], Receipt[B0] } <- B2 <- B3{S(R(B1)), S(R(B1))} s.T().Run("Duplicate seal in same payload", func(t *testing.T) { _, _, b3, _, sealB1 := s.generateBasicTestFork() - b3.SetPayload(flow.Payload{ - Seals: []*flow.Seal{sealB1, sealB1}, - }) + b3, err := flow.NewBlock( + flow.UntrustedBlock{ + HeaderBody: b3.HeaderBody, + Payload: unittest.PayloadFixture( + unittest.WithSeals(sealB1, sealB1), + ), + }, + ) + require.NoError(t, err) // we expect an error because block 3 contains duplicate seals within its payload - _, err := s.sealValidator.Validate(b3) + _, err = s.sealValidator.Validate(b3) require.Error(t, err) require.True(t, engine.IsInvalidInputError(err), err) }) @@ -580,8 +629,8 @@ func (s *SealValidationSuite) TestExtendSeal_NoIncorporatedResult() { // * the result for `LatestSealedBlock` is `LatestExecutionResult` (already initialized in test setup) // * as block B0, we use `LatestFinalizedBlock` (already initialized in test setup) // construct block B1 and B2 - b1 := unittest.BlockWithParentFixture(s.LatestFinalizedBlock.Header) - b2 := unittest.BlockWithParentFixture(b1.Header) + b1 := unittest.BlockWithParentFixture(s.LatestFinalizedBlock.ToHeader()) + b2 := unittest.BlockWithParentFixture(b1.ToHeader()) s.Extend(b1) s.Extend(b2) @@ -594,10 +643,12 @@ func (s *SealValidationSuite) TestExtendSeal_NoIncorporatedResult() { )), ) seal := unittest.Seal.Fixture(unittest.Seal.WithResult(&receipt.ExecutionResult)) - newBlock := unittest.BlockWithParentFixture(b2.Header) - newBlock.SetPayload(flow.Payload{ - Seals: []*flow.Seal{seal}, - }) + newBlock := unittest.BlockWithParentAndPayload( + b2.ToHeader(), + flow.Payload{ + Seals: []*flow.Seal{seal}, + }, + ) // we expect an error because there is no block on the fork that // contains a receipt committing to block1 @@ -621,14 +672,20 @@ func (s *SealValidationSuite) TestExtendSeal_DifferentIncorporatedResult() { unittest.WithPreviousResult(*s.LatestExecutionResult), ) seal := unittest.Seal.Fixture(unittest.Seal.WithResult(differentResult)) - newBlock.SetPayload(flow.Payload{ - Seals: []*flow.Seal{seal}, - }) + newBlock, err := flow.NewBlock( + flow.UntrustedBlock{ + HeaderBody: newBlock.HeaderBody, + Payload: unittest.PayloadFixture( + unittest.WithSeals(seal), + ), + }, + ) + require.NoError(s.T(), err) // Should fail because ER0a is different than ER0b, although they // reference the same block. Technically the fork does not contain an // IncorporatedResult for the result referenced by the proposed seal. - _, err := s.sealValidator.Validate(newBlock) + _, err = s.sealValidator.Validate(newBlock) s.Require().Error(err) s.Require().True(engine.IsInvalidInputError(err), err) } @@ -655,25 +712,29 @@ func (s *SealValidationSuite) TestExtendSeal_ResultIncorporatedOnDifferentFork() ) // construct block A1 - a1 := unittest.BlockWithParentFixture(s.LatestFinalizedBlock.Header) - a1.SetPayload(flow.Payload{ - Receipts: []*flow.ExecutionReceiptMeta{receipt.Meta()}, - Results: []*flow.ExecutionResult{&receipt.ExecutionResult}, - }) + a1 := unittest.BlockWithParentAndPayload( + s.LatestFinalizedBlock.ToHeader(), + flow.Payload{ + Receipts: []*flow.ExecutionReceiptStub{receipt.Stub()}, + Results: []*flow.ExecutionResult{&receipt.ExecutionResult}, + }, + ) s.Extend(a1) // construct block B1 and B2 - b1 := unittest.BlockWithParentFixture(s.LatestFinalizedBlock.Header) - b2 := unittest.BlockWithParentFixture(b1.Header) + b1 := unittest.BlockWithParentFixture(s.LatestFinalizedBlock.ToHeader()) + b2 := unittest.BlockWithParentFixture(b1.ToHeader()) s.Extend(b1) s.Extend(b2) // construct `newBlock` seal := s.validSealForResult(&receipt.ExecutionResult) - newBlock := unittest.BlockWithParentFixture(b2.Header) - newBlock.SetPayload(flow.Payload{ - Seals: []*flow.Seal{seal}, - }) + newBlock := unittest.BlockWithParentAndPayload( + b2.ToHeader(), + flow.Payload{ + Seals: []*flow.Seal{seal}, + }, + ) // we expect an error because there is no block on the fork that // contains a receipt committing to the seal's result @@ -690,9 +751,14 @@ func (s *SealValidationSuite) validSealForResult(result *flow.ExecutionResult) * assignment := s.Assignments[result.ID()] for _, chunk := range result.Chunks { aggregatedSigs := &seal.AggregatedApprovalSigs[chunk.Index] - assignedVerifiers := assignment.Verifiers(chunk) - aggregatedSigs.SignerIDs = assignedVerifiers[:] - aggregatedSigs.VerifierSignatures = unittest.SignaturesFixture(len(assignedVerifiers)) + assignedVerifiers, err := assignment.Verifiers(chunk.Index) + require.NoError(s.T(), err) + v := make([]flow.Identifier, 0, len(assignedVerifiers)) + for id := range assignedVerifiers { + v = append(v, id) + } + aggregatedSigs.SignerIDs = v + aggregatedSigs.VerifierSignatures = unittest.SignaturesFixture(len(v)) for i, aggregatedSig := range aggregatedSigs.VerifierSignatures { payload := flow.Attestation{ @@ -716,17 +782,19 @@ func (s *SealValidationSuite) validSealForResult(result *flow.ExecutionResult) * // a seal for `sealedResult`. For each chunk, the seal has aggregated approval signatures from // `numberApprovals` assigned verification Nodes. // Note: numberApprovals cannot be larger than the number of assigned verification nodes. -func (s *SealValidationSuite) makeBlockSealingResult(parentBlock *flow.Header, sealedResult *flow.ExecutionResult, numberApprovals int) *flow.Block { +func (s *SealValidationSuite) makeBlockSealingResult(parentBlock *flow.Block, sealedResult *flow.ExecutionResult, numberApprovals int) *flow.Block { seal := s.validSealForResult(sealedResult) for chunkIndex := 0; chunkIndex < len(seal.AggregatedApprovalSigs); chunkIndex++ { seal.AggregatedApprovalSigs[chunkIndex].SignerIDs = seal.AggregatedApprovalSigs[chunkIndex].SignerIDs[:numberApprovals] seal.AggregatedApprovalSigs[chunkIndex].VerifierSignatures = seal.AggregatedApprovalSigs[chunkIndex].VerifierSignatures[:numberApprovals] } - sealingBlock := unittest.BlockWithParentFixture(parentBlock) - sealingBlock.SetPayload(flow.Payload{ - Seals: []*flow.Seal{seal}, - }) + sealingBlock := unittest.BlockWithParentAndPayload( + parentBlock.ToHeader(), + flow.Payload{ + Seals: []*flow.Seal{seal}, + }, + ) return sealingBlock } @@ -751,20 +819,24 @@ func (s *SealValidationSuite) generateBasicTestFork() (*flow.Block, *flow.Block, ) // construct block B1 and B2 - b1 := unittest.BlockWithParentFixture(s.LatestFinalizedBlock.Header) - b1.SetPayload(flow.Payload{ - Receipts: []*flow.ExecutionReceiptMeta{receipt.Meta()}, - Results: []*flow.ExecutionResult{&receipt.ExecutionResult}, - }) - b2 := unittest.BlockWithParentFixture(b1.Header) + b1 := unittest.BlockWithParentAndPayload( + s.LatestFinalizedBlock.ToHeader(), + flow.Payload{ + Receipts: []*flow.ExecutionReceiptStub{receipt.Stub()}, + Results: []*flow.ExecutionResult{&receipt.ExecutionResult}, + }, + ) + b2 := unittest.BlockWithParentFixture(b1.ToHeader()) s.Extend(b1) s.Extend(b2) seal := s.validSealForResult(&receipt.ExecutionResult) - newBlock := unittest.BlockWithParentFixture(b2.Header) - newBlock.SetPayload(flow.Payload{ - Seals: []*flow.Seal{seal}, - }) + newBlock := unittest.BlockWithParentAndPayload( + b2.ToHeader(), + flow.Payload{ + Seals: []*flow.Seal{seal}, + }, + ) return b1, b2, newBlock, receipt, seal } diff --git a/network/README.MD b/network/README.MD new file mode 100644 index 00000000000..706a7655a6b --- /dev/null +++ b/network/README.MD @@ -0,0 +1,13 @@ + +# Networking Layer + +## Configuration +- [Resource Management](..%2Fconfig%2Fdocs%2FresourceManager.MD) + +## Protocols +- [Unicast (1:1 connections)](p2p%2Funicast%2FREADME.MD) + +## Security Protections +- [Application Layer Spam Prevention (ALSP)](alsp%2Freadme.md) +- [GossipSub Peer Scoring](p2p%2Fscoring%2FREADME.md) +- [GossipSub RPC Inspection](p2p%2Finspector%2FREADME.MD) \ No newline at end of file diff --git a/network/alsp.go b/network/alsp.go index 4df91d97b3e..2ed3fd938ca 100644 --- a/network/alsp.go +++ b/network/alsp.go @@ -2,6 +2,7 @@ package network import ( "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module/component" "github.com/onflow/flow-go/network/channels" ) @@ -35,7 +36,7 @@ type MisbehaviorReport interface { Reason() Misbehavior // Penalty returns the penalty value of the misbehavior. - Penalty() int + Penalty() float64 } // MisbehaviorReportManager abstracts the semantics of handling misbehavior reports. @@ -43,6 +44,7 @@ type MisbehaviorReport interface { // The misbehavior report manager is responsible for penalizing the misbehaving node and disallow-listing the node // if the overall penalty of the misbehaving node drops below the disallow-listing threshold. type MisbehaviorReportManager interface { + component.Component // HandleMisbehaviorReport handles the misbehavior report that is sent by the engine. // The implementation of this function should penalize the misbehaving node and report the node to be // disallow-listed if the overall penalty of the misbehaving node drops below the disallow-listing threshold. diff --git a/network/alsp/cache.go b/network/alsp/cache.go index 88bf5ce9ee0..9c2916e3f55 100644 --- a/network/alsp/cache.go +++ b/network/alsp/cache.go @@ -1,19 +1,25 @@ package alsp -import "github.com/onflow/flow-go/model/flow" +import ( + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/network/alsp/model" +) // SpamRecordCache is a cache of spam records for the ALSP module. // It is used to keep track of the spam records of the nodes that have been reported for spamming. type SpamRecordCache interface { - // Init initializes the spam record cache for the given origin id if it does not exist. - // Returns true if the record is initialized, false otherwise (i.e., the record already exists). - Init(originId flow.Identifier) bool - - // Adjust applies the given adjust function to the spam record of the given origin id. + // AdjustWithInit applies the given adjust function to the spam record of the given origin id. // Returns the Penalty value of the record after the adjustment. // It returns an error if the adjustFunc returns an error or if the record does not exist. - // Assuming that adjust is always called when the record exists, the error is irrecoverable and indicates a bug. - Adjust(originId flow.Identifier, adjustFunc RecordAdjustFunc) (float64, error) + // Note that if the record does not exist, the record is initialized and the + // adjust function is applied to the initialized record again. + // Args: + // - originId: the origin id of the spam record. + // - adjustFunc: the function that adjusts the spam record. + // Returns: + // - Penalty value of the record after the adjustment. + // - error any returned error should be considered as an irrecoverable error and indicates a bug. + AdjustWithInit(originId flow.Identifier, adjustFunc model.RecordAdjustFunc) (float64, error) // Identities returns the list of identities of the nodes that have a spam record in the cache. Identities() []flow.Identifier @@ -29,7 +35,7 @@ type SpamRecordCache interface { // Returns: // - the record and true if the record exists, nil and false otherwise. // Note that the returned record is a copy of the record in the cache (we do not want the caller to modify the record). - Get(originId flow.Identifier) (*ProtocolSpamRecord, bool) + Get(originId flow.Identifier) (*model.ProtocolSpamRecord, bool) // Size returns the number of records in the cache. Size() uint diff --git a/network/alsp/internal/cache.go b/network/alsp/internal/cache.go index 38ebd06c995..975e24dfdf8 100644 --- a/network/alsp/internal/cache.go +++ b/network/alsp/internal/cache.go @@ -11,14 +11,14 @@ import ( "github.com/onflow/flow-go/module/mempool/herocache/backdata/heropool" "github.com/onflow/flow-go/module/mempool/stdmap" "github.com/onflow/flow-go/network/alsp" + "github.com/onflow/flow-go/network/alsp/model" ) -var ErrSpamRecordNotFound = fmt.Errorf("spam record not found") - // SpamRecordCache is a cache that stores spam records at the protocol layer for ALSP. +// Stored protocol spam records are keyed by the origin id of the spam record. type SpamRecordCache struct { - recordFactory func(flow.Identifier) alsp.ProtocolSpamRecord // recordFactory is a factory function that creates a new spam record. - c *stdmap.Backend // c is the underlying cache. + *stdmap.Backend[flow.Identifier, *model.ProtocolSpamRecord] // c is the underlying cache. + recordFactory model.SpamRecordFactoryFunc // recordFactory is a factory function that creates a new spam record. } var _ alsp.SpamRecordCache = (*SpamRecordCache)(nil) @@ -35,79 +35,58 @@ var _ alsp.SpamRecordCache = (*SpamRecordCache)(nil) // expected to be small, we do not eject any records from the cache. The cache size must be large enough to hold all // the spam records of the authorized nodes. Also, this cache is keeping at most one record per origin id, so the // size of the cache must be at least the number of authorized nodes. -func NewSpamRecordCache(sizeLimit uint32, logger zerolog.Logger, collector module.HeroCacheMetrics, recordFactory func(flow.Identifier) alsp.ProtocolSpamRecord) *SpamRecordCache { - backData := herocache.NewCache(sizeLimit, +func NewSpamRecordCache(sizeLimit uint32, logger zerolog.Logger, collector module.HeroCacheMetrics, recordFactory model.SpamRecordFactoryFunc) *SpamRecordCache { + backData := herocache.NewCache[*model.ProtocolSpamRecord](sizeLimit, herocache.DefaultOversizeFactor, - // this cache is supposed to keep the spam record for the authorized (staked) nodes. Since the number of such nodes is - // expected to be small, we do not eject any records from the cache. The cache size must be large enough to hold all - // the spam records of the authorized nodes. Also, this cache is keeping at most one record per origin id, so the - // size of the cache must be at least the number of authorized nodes. - heropool.NoEjection, - logger.With().Str("mempool", "aslp=spam-records").Logger(), - collector) + heropool.LRUEjection, + logger.With().Str("mempool", "aslp-spam-records").Logger(), + collector, + ) return &SpamRecordCache{ recordFactory: recordFactory, - c: stdmap.NewBackend(stdmap.WithBackData(backData)), + Backend: stdmap.NewBackend(stdmap.WithMutableBackData[flow.Identifier, *model.ProtocolSpamRecord](backData)), } } -// Init initializes the spam record cache for the given origin id if it does not exist. -// Returns true if the record is initialized, false otherwise (i.e., the record already exists). -// Args: -// - originId: the origin id of the spam record. -// Returns: -// - true if the record is initialized, false otherwise (i.e., the record already exists). -// Note that if Init is called multiple times for the same origin id, the record is initialized only once, and the -// subsequent calls return false and do not change the record (i.e., the record is not re-initialized). -func (s *SpamRecordCache) Init(originId flow.Identifier) bool { - return s.c.Add(ProtocolSpamRecordEntity{s.recordFactory(originId)}) -} - -// Adjust applies the given adjust function to the spam record of the given origin id. +// AdjustWithInit applies the given adjust function to the spam record of the given origin id. // Returns the Penalty value of the record after the adjustment. // It returns an error if the adjustFunc returns an error or if the record does not exist. -// Assuming that adjust is always called when the record exists, the error is irrecoverable and indicates a bug. +// Note that if the record does not exist, the record is initialized and the +// adjust function is applied to the initialized record again. // Args: // - originId: the origin id of the spam record. // - adjustFunc: the function that adjusts the spam record. // Returns: // - Penalty value of the record after the adjustment. -// - error if the adjustFunc returns an error or if the record does not exist (ErrSpamRecordNotFound). Except the ErrSpamRecordNotFound, -// any other error should be treated as an irrecoverable error and indicates a bug. -// -// Note if Adjust is called under the assumption that the record exists, the ErrSpamRecordNotFound should be treated -// as an irrecoverable error and indicates a bug. -func (s *SpamRecordCache) Adjust(originId flow.Identifier, adjustFunc alsp.RecordAdjustFunc) (float64, error) { +// - error any returned error should be considered as an irrecoverable error and indicates a bug. +func (s *SpamRecordCache) AdjustWithInit(originId flow.Identifier, adjustFunc model.RecordAdjustFunc) (float64, error) { var rErr error - adjustedEntity, adjusted := s.c.Adjust(originId, func(entity flow.Entity) flow.Entity { - record, ok := entity.(ProtocolSpamRecordEntity) - if !ok { - // sanity check - // This should never happen, because the cache only contains ProtocolSpamRecordEntity entities. - panic(fmt.Sprintf("invalid entity type, expected ProtocolSpamRecordEntity type, got: %T", entity)) - } - + wrapAdjustFunc := func(record *model.ProtocolSpamRecord) *model.ProtocolSpamRecord { // Adjust the record. - adjustedRecord, err := adjustFunc(record.ProtocolSpamRecord) + adjustedRecord, err := adjustFunc(record) if err != nil { rErr = fmt.Errorf("adjust function failed: %w", err) - return entity // returns the original entity (reverse the adjustment). + return record // returns the original record (reverse the adjustment). } // Return the adjusted record. - return ProtocolSpamRecordEntity{adjustedRecord} - }) + return adjustedRecord + } + initFunc := func() *model.ProtocolSpamRecord { + return s.recordFactory(originId) + } + adjustedRecord, adjusted := s.Backend.AdjustWithInit(originId, wrapAdjustFunc, initFunc) if rErr != nil { return 0, fmt.Errorf("failed to adjust record: %w", rErr) } if !adjusted { - return 0, ErrSpamRecordNotFound + return 0, fmt.Errorf("adjustment failed for origin id %s", originId) } - return adjustedEntity.(ProtocolSpamRecordEntity).Penalty, nil + return adjustedRecord.Penalty, nil } // Get returns the spam record of the given origin id. @@ -117,44 +96,28 @@ func (s *SpamRecordCache) Adjust(originId flow.Identifier, adjustFunc alsp.Recor // Returns: // - the record and true if the record exists, nil and false otherwise. // Note that the returned record is a copy of the record in the cache (we do not want the caller to modify the record). -func (s *SpamRecordCache) Get(originId flow.Identifier) (*alsp.ProtocolSpamRecord, bool) { - entity, ok := s.c.ByID(originId) +func (s *SpamRecordCache) Get(originId flow.Identifier) (*model.ProtocolSpamRecord, bool) { + record, ok := s.Backend.Get(originId) if !ok { return nil, false } - record, ok := entity.(ProtocolSpamRecordEntity) - if !ok { - // sanity check - // This should never happen, because the cache only contains ProtocolSpamRecordEntity entities. - panic(fmt.Sprintf("invalid entity type, expected ProtocolSpamRecordEntity type, got: %T", entity)) - } - // return a copy of the record (we do not want the caller to modify the record). - return &alsp.ProtocolSpamRecord{ - OriginId: record.OriginId, - Decay: record.Decay, - CutoffCounter: record.CutoffCounter, - Penalty: record.Penalty, + return &model.ProtocolSpamRecord{ + OriginId: record.OriginId, + Decay: record.Decay, + CutoffCounter: record.CutoffCounter, + Penalty: record.Penalty, + DisallowListed: record.DisallowListed, }, true } // Identities returns the list of identities of the nodes that have a spam record in the cache. func (s *SpamRecordCache) Identities() []flow.Identifier { - return flow.GetIDs(s.c.All()) -} - -// Remove removes the spam record of the given origin id from the cache. -// Returns true if the record is removed, false otherwise (i.e., the record does not exist). -// Args: -// - originId: the origin id of the spam record. -// Returns: -// - true if the record is removed, false otherwise (i.e., the record does not exist). -func (s *SpamRecordCache) Remove(originId flow.Identifier) bool { - return s.c.Remove(originId) -} - -// Size returns the number of spam records in the cache. -func (s *SpamRecordCache) Size() uint { - return s.c.Size() + all := s.Backend.All() + identifiers := make(flow.IdentifierList, 0, len(all)) + for identifier := range all { + identifiers = append(identifiers, identifier) + } + return identifiers } diff --git a/network/alsp/internal/cache_entity.go b/network/alsp/internal/cache_entity.go deleted file mode 100644 index 3f3b5e250ad..00000000000 --- a/network/alsp/internal/cache_entity.go +++ /dev/null @@ -1,28 +0,0 @@ -package internal - -import ( - "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/network/alsp" -) - -// ProtocolSpamRecordEntity is an entity that represents a spam record. It is internally -// used by the SpamRecordCache to store the spam records in the cache. -// The identifier of this entity is the origin id of the spam record. This entails that the spam records -// are deduplicated by origin id. -type ProtocolSpamRecordEntity struct { - alsp.ProtocolSpamRecord -} - -var _ flow.Entity = (*ProtocolSpamRecordEntity)(nil) - -// ID returns the origin id of the spam record, which is used as the unique identifier of the entity for maintenance and -// deduplication purposes in the cache. -func (p ProtocolSpamRecordEntity) ID() flow.Identifier { - return p.OriginId -} - -// Checksum returns the origin id of the spam record, it does not have any purpose in the cache. -// It is implemented to satisfy the flow.Entity interface. -func (p ProtocolSpamRecordEntity) Checksum() flow.Identifier { - return p.OriginId -} diff --git a/network/alsp/internal/cache_test.go b/network/alsp/internal/cache_test.go index abd6d0ebcef..8a7e116e3bb 100644 --- a/network/alsp/internal/cache_test.go +++ b/network/alsp/internal/cache_test.go @@ -8,12 +8,11 @@ import ( "github.com/rs/zerolog" "github.com/stretchr/testify/require" - "go.uber.org/atomic" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/module/metrics" - "github.com/onflow/flow-go/network/alsp" "github.com/onflow/flow-go/network/alsp/internal" + "github.com/onflow/flow-go/network/alsp/model" "github.com/onflow/flow-go/utils/unittest" ) @@ -24,7 +23,7 @@ func TestNewSpamRecordCache(t *testing.T) { sizeLimit := uint32(100) logger := zerolog.Nop() collector := metrics.NewNoopCollector() - recordFactory := func(id flow.Identifier) alsp.ProtocolSpamRecord { + recordFactory := func(id flow.Identifier) *model.ProtocolSpamRecord { return protocolSpamRecordFixture(id) } @@ -39,8 +38,8 @@ func TestNewSpamRecordCache(t *testing.T) { // Returns: // - alsp.ProtocolSpamRecord, the created spam record. // Note that the returned spam record is not a valid spam record. It is used only for testing. -func protocolSpamRecordFixture(id flow.Identifier) alsp.ProtocolSpamRecord { - return alsp.ProtocolSpamRecord{ +func protocolSpamRecordFixture(id flow.Identifier) *model.ProtocolSpamRecord { + return &model.ProtocolSpamRecord{ OriginId: id, Decay: 1000, CutoffCounter: 0, @@ -48,16 +47,23 @@ func protocolSpamRecordFixture(id flow.Identifier) alsp.ProtocolSpamRecord { } } -// TestSpamRecordCache_Init tests the Init method of the SpamRecordCache. -// It ensures that the method returns true when a new record is initialized -// and false when an existing record is initialized. -func TestSpamRecordCache_Init(t *testing.T) { +// TestSpamRecordCache_Adjust_Init tests that when the Adjust function is called +// on a record that does not exist in the cache, the record is initialized and +// the adjust function is applied to the initialized record. +func TestSpamRecordCache_Adjust_Init(t *testing.T) { sizeLimit := uint32(100) logger := zerolog.Nop() collector := metrics.NewNoopCollector() - recordFactory := func(id flow.Identifier) alsp.ProtocolSpamRecord { + + recordFactoryCalled := 0 + recordFactory := func(id flow.Identifier) *model.ProtocolSpamRecord { + require.Less(t, recordFactoryCalled, 2, "record factory must be called only twice") return protocolSpamRecordFixture(id) } + adjustFuncIncrement := func(record *model.ProtocolSpamRecord) (*model.ProtocolSpamRecord, error) { + record.Penalty += 1 + return record, nil + } cache := internal.NewSpamRecordCache(sizeLimit, logger, collector, recordFactory) require.NotNil(t, cache) @@ -66,47 +72,57 @@ func TestSpamRecordCache_Init(t *testing.T) { originID1 := unittest.IdentifierFixture() originID2 := unittest.IdentifierFixture() - // test initializing a spam record for an origin ID that doesn't exist in the cache - initialized := cache.Init(originID1) - require.True(t, initialized, "expected record to be initialized") + // adjusting a spam record for an origin ID that does not exist in the cache should initialize the record. + initializedPenalty, err := cache.AdjustWithInit(originID1, adjustFuncIncrement) + require.NoError(t, err, "expected no error") + require.Equal(t, float64(1), initializedPenalty, "expected initialized penalty to be 1") + record1, ok := cache.Get(originID1) require.True(t, ok, "expected record to exist") require.NotNil(t, record1, "expected non-nil record") require.Equal(t, originID1, record1.OriginId, "expected record to have correct origin ID") + require.False(t, record1.DisallowListed, "expected record to not be disallow listed") require.Equal(t, cache.Size(), uint(1), "expected cache to have one record") - // test initializing a spam record for an origin ID that already exists in the cache - initialized = cache.Init(originID1) - require.False(t, initialized, "expected record not to be initialized") + // adjusting a spam record for an origin ID that already exists in the cache should not initialize the record, + // but should apply the adjust function to the existing record. + initializedPenalty, err = cache.AdjustWithInit(originID1, adjustFuncIncrement) + require.NoError(t, err, "expected no error") + require.Equal(t, float64(2), initializedPenalty, "expected initialized penalty to be 2") record1Again, ok := cache.Get(originID1) require.True(t, ok, "expected record to still exist") require.NotNil(t, record1Again, "expected non-nil record") require.Equal(t, originID1, record1Again.OriginId, "expected record to have correct origin ID") - require.Equal(t, record1, record1Again, "expected records to be the same") + require.False(t, record1Again.DisallowListed, "expected record not to be disallow listed") require.Equal(t, cache.Size(), uint(1), "expected cache to still have one record") - // test initializing a spam record for another origin ID - initialized = cache.Init(originID2) - require.True(t, initialized, "expected record to be initialized") + // adjusting a spam record for a different origin ID should initialize the record. + // this is to ensure that the record factory is called only once. + initializedPenalty, err = cache.AdjustWithInit(originID2, adjustFuncIncrement) + require.NoError(t, err, "expected no error") + require.Equal(t, float64(1), initializedPenalty, "expected initialized penalty to be 1") record2, ok := cache.Get(originID2) require.True(t, ok, "expected record to exist") require.NotNil(t, record2, "expected non-nil record") require.Equal(t, originID2, record2.OriginId, "expected record to have correct origin ID") + require.False(t, record2.DisallowListed, "expected record not to be disallow listed") require.Equal(t, cache.Size(), uint(2), "expected cache to have two records") } // TestSpamRecordCache_Adjust tests the Adjust method of the SpamRecordCache. // The test covers the following scenarios: // 1. Adjusting a spam record for an existing origin ID. -// 2. Attempting to adjust a spam record for a non-existing origin ID. -// 3. Attempting to adjust a spam record with an adjustFunc that returns an error. -func TestSpamRecordCache_Adjust(t *testing.T) { +// 2. Attempting to adjust a spam record with an adjustFunc that returns an error. +func TestSpamRecordCache_Adjust_Error(t *testing.T) { sizeLimit := uint32(100) logger := zerolog.Nop() collector := metrics.NewNoopCollector() - recordFactory := func(id flow.Identifier) alsp.ProtocolSpamRecord { + recordFactory := func(id flow.Identifier) *model.ProtocolSpamRecord { return protocolSpamRecordFixture(id) } + adjustFnNoOp := func(record *model.ProtocolSpamRecord) (*model.ProtocolSpamRecord, error) { + return record, nil // no-op + } cache := internal.NewSpamRecordCache(sizeLimit, logger, collector, recordFactory) require.NotNil(t, cache) @@ -115,15 +131,19 @@ func TestSpamRecordCache_Adjust(t *testing.T) { originID2 := unittest.IdentifierFixture() // initialize spam records for originID1 and originID2 - require.True(t, cache.Init(originID1)) - require.True(t, cache.Init(originID2)) + penalty, err := cache.AdjustWithInit(originID1, adjustFnNoOp) + require.NoError(t, err, "expected no error") + require.Equal(t, 0.0, penalty, "expected penalty to be 0") + penalty, err = cache.AdjustWithInit(originID2, adjustFnNoOp) + require.NoError(t, err, "expected no error") + require.Equal(t, 0.0, penalty, "expected penalty to be 0") // test adjusting the spam record for an existing origin ID - adjustFunc := func(record alsp.ProtocolSpamRecord) (alsp.ProtocolSpamRecord, error) { + adjustFunc := func(record *model.ProtocolSpamRecord) (*model.ProtocolSpamRecord, error) { record.Penalty -= 10 return record, nil } - penalty, err := cache.Adjust(originID1, adjustFunc) + penalty, err = cache.AdjustWithInit(originID1, adjustFunc) require.NoError(t, err) require.Equal(t, -10.0, penalty) @@ -132,16 +152,11 @@ func TestSpamRecordCache_Adjust(t *testing.T) { require.NotNil(t, record1) require.Equal(t, -10.0, record1.Penalty) - // test adjusting the spam record for a non-existing origin ID - originID3 := unittest.IdentifierFixture() - _, err = cache.Adjust(originID3, adjustFunc) - require.Error(t, err) - // test adjusting the spam record with an adjustFunc that returns an error - adjustFuncError := func(record alsp.ProtocolSpamRecord) (alsp.ProtocolSpamRecord, error) { + adjustFuncError := func(record *model.ProtocolSpamRecord) (*model.ProtocolSpamRecord, error) { return record, errors.New("adjustment error") } - _, err = cache.Adjust(originID1, adjustFuncError) + _, err = cache.AdjustWithInit(originID1, adjustFuncError) require.Error(t, err) // even though the adjustFunc returned an error, the record should be intact. @@ -159,21 +174,27 @@ func TestSpamRecordCache_Identities(t *testing.T) { sizeLimit := uint32(100) logger := zerolog.Nop() collector := metrics.NewNoopCollector() - recordFactory := func(id flow.Identifier) alsp.ProtocolSpamRecord { + recordFactory := func(id flow.Identifier) *model.ProtocolSpamRecord { return protocolSpamRecordFixture(id) } + adjustFnNoOp := func(record *model.ProtocolSpamRecord) (*model.ProtocolSpamRecord, error) { + return record, nil // no-op + } cache := internal.NewSpamRecordCache(sizeLimit, logger, collector, recordFactory) require.NotNil(t, cache) - // initialize spam records for a few origin IDs originID1 := unittest.IdentifierFixture() originID2 := unittest.IdentifierFixture() originID3 := unittest.IdentifierFixture() - require.True(t, cache.Init(originID1)) - require.True(t, cache.Init(originID2)) - require.True(t, cache.Init(originID3)) + // initialize spam records for a few origin IDs + _, err := cache.AdjustWithInit(originID1, adjustFnNoOp) + require.NoError(t, err) + _, err = cache.AdjustWithInit(originID2, adjustFnNoOp) + require.NoError(t, err) + _, err = cache.AdjustWithInit(originID3, adjustFnNoOp) + require.NoError(t, err) // check if the Identities method returns the correct set of origin IDs identities := cache.Identities() @@ -199,21 +220,27 @@ func TestSpamRecordCache_Remove(t *testing.T) { sizeLimit := uint32(100) logger := zerolog.Nop() collector := metrics.NewNoopCollector() - recordFactory := func(id flow.Identifier) alsp.ProtocolSpamRecord { + recordFactory := func(id flow.Identifier) *model.ProtocolSpamRecord { return protocolSpamRecordFixture(id) } + adjustFnNoOp := func(record *model.ProtocolSpamRecord) (*model.ProtocolSpamRecord, error) { + return record, nil // no-op + } cache := internal.NewSpamRecordCache(sizeLimit, logger, collector, recordFactory) require.NotNil(t, cache) - // initialize spam records for a few origin IDs originID1 := unittest.IdentifierFixture() originID2 := unittest.IdentifierFixture() originID3 := unittest.IdentifierFixture() - require.True(t, cache.Init(originID1)) - require.True(t, cache.Init(originID2)) - require.True(t, cache.Init(originID3)) + // initialize spam records for a few origin IDs + _, err := cache.AdjustWithInit(originID1, adjustFnNoOp) + require.NoError(t, err) + _, err = cache.AdjustWithInit(originID2, adjustFnNoOp) + require.NoError(t, err) + _, err = cache.AdjustWithInit(originID3, adjustFnNoOp) + require.NoError(t, err) // remove originID1 and check if the record is removed require.True(t, cache.Remove(originID1)) @@ -240,29 +267,37 @@ func TestSpamRecordCache_EdgeCasesAndInvalidInputs(t *testing.T) { sizeLimit := uint32(100) logger := zerolog.Nop() collector := metrics.NewNoopCollector() - recordFactory := func(id flow.Identifier) alsp.ProtocolSpamRecord { + recordFactory := func(id flow.Identifier) *model.ProtocolSpamRecord { return protocolSpamRecordFixture(id) } + adjustFnNoOp := func(record *model.ProtocolSpamRecord) (*model.ProtocolSpamRecord, error) { + return record, nil // no-op + } cache := internal.NewSpamRecordCache(sizeLimit, logger, collector, recordFactory) require.NotNil(t, cache) // 1. initializing a spam record multiple times originID1 := unittest.IdentifierFixture() - require.True(t, cache.Init(originID1)) - require.False(t, cache.Init(originID1)) + + _, err := cache.AdjustWithInit(originID1, adjustFnNoOp) + require.NoError(t, err) + _, err = cache.AdjustWithInit(originID1, adjustFnNoOp) + require.NoError(t, err) // 2. Test adjusting a non-existent spam record originID2 := unittest.IdentifierFixture() - _, err := cache.Adjust(originID2, func(record alsp.ProtocolSpamRecord) (alsp.ProtocolSpamRecord, error) { + initialPenalty, err := cache.AdjustWithInit(originID2, func(record *model.ProtocolSpamRecord) (*model.ProtocolSpamRecord, error) { record.Penalty -= 10 return record, nil }) - require.Error(t, err) + require.NoError(t, err) + require.Equal(t, float64(-10), initialPenalty) // 3. Test removing a spam record multiple times originID3 := unittest.IdentifierFixture() - require.True(t, cache.Init(originID3)) + _, err = cache.AdjustWithInit(originID3, adjustFnNoOp) + require.NoError(t, err) require.True(t, cache.Remove(originID3)) require.False(t, cache.Remove(originID3)) } @@ -275,9 +310,12 @@ func TestSpamRecordCache_ConcurrentInitialization(t *testing.T) { sizeLimit := uint32(100) logger := zerolog.Nop() collector := metrics.NewNoopCollector() - recordFactory := func(id flow.Identifier) alsp.ProtocolSpamRecord { + recordFactory := func(id flow.Identifier) *model.ProtocolSpamRecord { return protocolSpamRecordFixture(id) } + adjustFnNoOp := func(record *model.ProtocolSpamRecord) (*model.ProtocolSpamRecord, error) { + return record, nil // no-op + } cache := internal.NewSpamRecordCache(sizeLimit, logger, collector, recordFactory) require.NotNil(t, cache) @@ -290,7 +328,9 @@ func TestSpamRecordCache_ConcurrentInitialization(t *testing.T) { for _, originID := range originIDs { go func(id flow.Identifier) { defer wg.Done() - cache.Init(id) + penalty, err := cache.AdjustWithInit(id, adjustFnNoOp) + require.NoError(t, err) + require.Equal(t, float64(0), penalty) }(originID) } @@ -305,18 +345,24 @@ func TestSpamRecordCache_ConcurrentInitialization(t *testing.T) { } } -// TestSpamRecordCache_ConcurrentSameRecordInitialization tests the concurrent initialization of the same spam record. +// TestSpamRecordCache_ConcurrentSameRecordAdjust tests the concurrent adjust of the same spam record. // The test covers the following scenarios: -// 1. Multiple goroutines attempting to initialize the same spam record concurrently. -// 2. Only one goroutine successfully initializes the record, and others receive false on initialization. -// 3. The record is correctly initialized in the cache and can be retrieved using the Get method. -func TestSpamRecordCache_ConcurrentSameRecordInitialization(t *testing.T) { +// 1. Multiple goroutines attempting to adjust the same spam record concurrently. +// 2. Only one of the adjust operations succeeds on initializing the record. +// 3. The rest of the adjust operations only update the record (no initialization). +func TestSpamRecordCache_ConcurrentSameRecordAdjust(t *testing.T) { sizeLimit := uint32(100) logger := zerolog.Nop() collector := metrics.NewNoopCollector() - recordFactory := func(id flow.Identifier) alsp.ProtocolSpamRecord { + recordFactory := func(id flow.Identifier) *model.ProtocolSpamRecord { return protocolSpamRecordFixture(id) } + adjustFn := func(record *model.ProtocolSpamRecord) (*model.ProtocolSpamRecord, error) { + record.Penalty -= 1.0 + record.DisallowListed = true + record.Decay += 1.0 + return record, nil // no-op + } cache := internal.NewSpamRecordCache(sizeLimit, logger, collector, recordFactory) require.NotNil(t, cache) @@ -327,27 +373,25 @@ func TestSpamRecordCache_ConcurrentSameRecordInitialization(t *testing.T) { var wg sync.WaitGroup wg.Add(concurrentAttempts) - successCount := atomic.Int32{} - for i := 0; i < concurrentAttempts; i++ { go func() { defer wg.Done() - initSuccess := cache.Init(originID) - if initSuccess { - successCount.Inc() - } + penalty, err := cache.AdjustWithInit(originID, adjustFn) + require.NoError(t, err) + require.Less(t, penalty, 0.0) // penalty should be negative }() } unittest.RequireReturnsBefore(t, wg.Wait, 100*time.Millisecond, "timed out waiting for goroutines to finish") - // ensure that only one goroutine successfully initialized the record - require.Equal(t, int32(1), successCount.Load()) - - // ensure that the record is correctly initialized in the cache + // ensure that the record is correctly initialized and adjusted in the cache + initDecay := model.SpamRecordFactory()(originID).Decay record, found := cache.Get(originID) require.True(t, found) require.NotNil(t, record) + require.Equal(t, concurrentAttempts*-1.0, record.Penalty) + require.Equal(t, initDecay+concurrentAttempts*1.0, record.Decay) + require.True(t, record.DisallowListed) require.Equal(t, originID, record.OriginId) } @@ -359,16 +403,21 @@ func TestSpamRecordCache_ConcurrentRemoval(t *testing.T) { sizeLimit := uint32(100) logger := zerolog.Nop() collector := metrics.NewNoopCollector() - recordFactory := func(id flow.Identifier) alsp.ProtocolSpamRecord { + recordFactory := func(id flow.Identifier) *model.ProtocolSpamRecord { return protocolSpamRecordFixture(id) } + adjustFnNoOp := func(record *model.ProtocolSpamRecord) (*model.ProtocolSpamRecord, error) { + return record, nil // no-op + } cache := internal.NewSpamRecordCache(sizeLimit, logger, collector, recordFactory) require.NotNil(t, cache) originIDs := unittest.IdentifierListFixture(10) for _, originID := range originIDs { - cache.Init(originID) + penalty, err := cache.AdjustWithInit(originID, adjustFnNoOp) + require.NoError(t, err) + require.Equal(t, float64(0), penalty) } var wg sync.WaitGroup @@ -403,22 +452,27 @@ func TestSpamRecordCache_ConcurrentUpdatesAndReads(t *testing.T) { sizeLimit := uint32(100) logger := zerolog.Nop() collector := metrics.NewNoopCollector() - recordFactory := func(id flow.Identifier) alsp.ProtocolSpamRecord { + recordFactory := func(id flow.Identifier) *model.ProtocolSpamRecord { return protocolSpamRecordFixture(id) } + adjustFnNoOp := func(record *model.ProtocolSpamRecord) (*model.ProtocolSpamRecord, error) { + return record, nil // no-op + } cache := internal.NewSpamRecordCache(sizeLimit, logger, collector, recordFactory) require.NotNil(t, cache) originIDs := unittest.IdentifierListFixture(10) for _, originID := range originIDs { - cache.Init(originID) + penalty, err := cache.AdjustWithInit(originID, adjustFnNoOp) + require.NoError(t, err) + require.Equal(t, float64(0), penalty) } var wg sync.WaitGroup wg.Add(len(originIDs) * 2) - adjustFunc := func(record alsp.ProtocolSpamRecord) (alsp.ProtocolSpamRecord, error) { + adjustFunc := func(record *model.ProtocolSpamRecord) (*model.ProtocolSpamRecord, error) { record.Penalty -= 1 return record, nil } @@ -427,7 +481,7 @@ func TestSpamRecordCache_ConcurrentUpdatesAndReads(t *testing.T) { // adjust spam records concurrently go func(id flow.Identifier) { defer wg.Done() - _, err := cache.Adjust(id, adjustFunc) + _, err := cache.AdjustWithInit(id, adjustFunc) require.NoError(t, err) }(originID) @@ -460,9 +514,12 @@ func TestSpamRecordCache_ConcurrentInitAndRemove(t *testing.T) { sizeLimit := uint32(100) logger := zerolog.Nop() collector := metrics.NewNoopCollector() - recordFactory := func(id flow.Identifier) alsp.ProtocolSpamRecord { + recordFactory := func(id flow.Identifier) *model.ProtocolSpamRecord { return protocolSpamRecordFixture(id) } + adjustFnNoOp := func(record *model.ProtocolSpamRecord) (*model.ProtocolSpamRecord, error) { + return record, nil // no-op + } cache := internal.NewSpamRecordCache(sizeLimit, logger, collector, recordFactory) require.NotNil(t, cache) @@ -472,7 +529,9 @@ func TestSpamRecordCache_ConcurrentInitAndRemove(t *testing.T) { originIDsToRemove := originIDs[10:] for _, originID := range originIDsToRemove { - cache.Init(originID) + penalty, err := cache.AdjustWithInit(originID, adjustFnNoOp) + require.NoError(t, err) + require.Equal(t, float64(0), penalty) } var wg sync.WaitGroup @@ -480,10 +539,13 @@ func TestSpamRecordCache_ConcurrentInitAndRemove(t *testing.T) { // initialize spam records concurrently for _, originID := range originIDsToAdd { - go func(id flow.Identifier) { + originID := originID // capture range variable + go func() { defer wg.Done() - cache.Init(id) - }(originID) + penalty, err := cache.AdjustWithInit(originID, adjustFnNoOp) + require.NoError(t, err) + require.Equal(t, float64(0), penalty) + }() } // remove spam records concurrently @@ -519,9 +581,12 @@ func TestSpamRecordCache_ConcurrentInitRemoveAdjust(t *testing.T) { sizeLimit := uint32(100) logger := zerolog.Nop() collector := metrics.NewNoopCollector() - recordFactory := func(id flow.Identifier) alsp.ProtocolSpamRecord { + recordFactory := func(id flow.Identifier) *model.ProtocolSpamRecord { return protocolSpamRecordFixture(id) } + adjustFnNoOp := func(record *model.ProtocolSpamRecord) (*model.ProtocolSpamRecord, error) { + return record, nil // no-op + } cache := internal.NewSpamRecordCache(sizeLimit, logger, collector, recordFactory) require.NotNil(t, cache) @@ -532,10 +597,12 @@ func TestSpamRecordCache_ConcurrentInitRemoveAdjust(t *testing.T) { originIDsToAdjust := originIDs[20:] for _, originID := range originIDsToRemove { - cache.Init(originID) + penalty, err := cache.AdjustWithInit(originID, adjustFnNoOp) + require.NoError(t, err) + require.Equal(t, float64(0), penalty) } - adjustFunc := func(record alsp.ProtocolSpamRecord) (alsp.ProtocolSpamRecord, error) { + adjustFunc := func(record *model.ProtocolSpamRecord) (*model.ProtocolSpamRecord, error) { record.Penalty -= 1 return record, nil } @@ -545,10 +612,13 @@ func TestSpamRecordCache_ConcurrentInitRemoveAdjust(t *testing.T) { // Initialize spam records concurrently for _, originID := range originIDsToAdd { - go func(id flow.Identifier) { + originID := originID // capture range variable + go func() { defer wg.Done() - cache.Init(id) - }(originID) + penalty, err := cache.AdjustWithInit(originID, adjustFnNoOp) + require.NoError(t, err) + require.Equal(t, float64(0), penalty) + }() } // Remove spam records concurrently @@ -563,7 +633,7 @@ func TestSpamRecordCache_ConcurrentInitRemoveAdjust(t *testing.T) { for _, originID := range originIDsToAdjust { go func(id flow.Identifier) { defer wg.Done() - _, _ = cache.Adjust(id, adjustFunc) + _, _ = cache.AdjustWithInit(id, adjustFunc) }(originID) } @@ -582,9 +652,12 @@ func TestSpamRecordCache_ConcurrentInitRemoveAndAdjust(t *testing.T) { sizeLimit := uint32(100) logger := zerolog.Nop() collector := metrics.NewNoopCollector() - recordFactory := func(id flow.Identifier) alsp.ProtocolSpamRecord { + recordFactory := func(id flow.Identifier) *model.ProtocolSpamRecord { return protocolSpamRecordFixture(id) } + adjustFnNoOp := func(record *model.ProtocolSpamRecord) (*model.ProtocolSpamRecord, error) { + return record, nil // no-op + } cache := internal.NewSpamRecordCache(sizeLimit, logger, collector, recordFactory) require.NotNil(t, cache) @@ -595,11 +668,15 @@ func TestSpamRecordCache_ConcurrentInitRemoveAndAdjust(t *testing.T) { originIDsToAdjust := originIDs[20:] for _, originID := range originIDsToRemove { - cache.Init(originID) + penalty, err := cache.AdjustWithInit(originID, adjustFnNoOp) + require.NoError(t, err) + require.Equal(t, float64(0), penalty) } for _, originID := range originIDsToAdjust { - cache.Init(originID) + penalty, err := cache.AdjustWithInit(originID, adjustFnNoOp) + require.NoError(t, err) + require.Equal(t, float64(0), penalty) } var wg sync.WaitGroup @@ -607,30 +684,35 @@ func TestSpamRecordCache_ConcurrentInitRemoveAndAdjust(t *testing.T) { // initialize spam records concurrently for _, originID := range originIDsToAdd { - go func(id flow.Identifier) { + originID := originID + go func() { defer wg.Done() - cache.Init(id) - }(originID) + penalty, err := cache.AdjustWithInit(originID, adjustFnNoOp) + require.NoError(t, err) + require.Equal(t, float64(0), penalty) + }() } // remove spam records concurrently for _, originID := range originIDsToRemove { - go func(id flow.Identifier) { + originID := originID + go func() { defer wg.Done() - cache.Remove(id) - }(originID) + cache.Remove(originID) + }() } // adjust spam records concurrently for _, originID := range originIDsToAdjust { - go func(id flow.Identifier) { + originID := originID + go func() { defer wg.Done() - _, err := cache.Adjust(id, func(record alsp.ProtocolSpamRecord) (alsp.ProtocolSpamRecord, error) { + _, err := cache.AdjustWithInit(originID, func(record *model.ProtocolSpamRecord) (*model.ProtocolSpamRecord, error) { record.Penalty -= 1 return record, nil }) require.NoError(t, err) - }(originID) + }() } unittest.RequireReturnsBefore(t, wg.Wait, 100*time.Millisecond, "timed out waiting for goroutines to finish") @@ -666,9 +748,12 @@ func TestSpamRecordCache_ConcurrentIdentitiesAndOperations(t *testing.T) { sizeLimit := uint32(100) logger := zerolog.Nop() collector := metrics.NewNoopCollector() - recordFactory := func(id flow.Identifier) alsp.ProtocolSpamRecord { + recordFactory := func(id flow.Identifier) *model.ProtocolSpamRecord { return protocolSpamRecordFixture(id) } + adjustFnNoOp := func(record *model.ProtocolSpamRecord) (*model.ProtocolSpamRecord, error) { + return record, nil // no-op + } cache := internal.NewSpamRecordCache(sizeLimit, logger, collector, recordFactory) require.NotNil(t, cache) @@ -678,7 +763,9 @@ func TestSpamRecordCache_ConcurrentIdentitiesAndOperations(t *testing.T) { originIDsToRemove := originIDs[10:20] for _, originID := range originIDsToRemove { - cache.Init(originID) + penalty, err := cache.AdjustWithInit(originID, adjustFnNoOp) + require.NoError(t, err) + require.Equal(t, float64(0), penalty) } var wg sync.WaitGroup @@ -686,24 +773,28 @@ func TestSpamRecordCache_ConcurrentIdentitiesAndOperations(t *testing.T) { // initialize spam records concurrently for _, originID := range originIDsToAdd { - go func(id flow.Identifier) { + originID := originID + go func() { defer wg.Done() - require.True(t, cache.Init(id)) - retrieved, ok := cache.Get(id) + penalty, err := cache.AdjustWithInit(originID, adjustFnNoOp) + require.NoError(t, err) + require.Equal(t, float64(0), penalty) + retrieved, ok := cache.Get(originID) require.True(t, ok) require.NotNil(t, retrieved) - }(originID) + }() } // remove spam records concurrently for _, originID := range originIDsToRemove { - go func(id flow.Identifier) { + originID := originID + go func() { defer wg.Done() - require.True(t, cache.Remove(id)) - retrieved, ok := cache.Get(id) + require.True(t, cache.Remove(originID)) + retrieved, ok := cache.Get(originID) require.False(t, ok) require.Nil(t, retrieved) - }(originID) + }() } // call Identities method concurrently diff --git a/network/alsp/internal/reported_misbehavior_work.go b/network/alsp/internal/reported_misbehavior_work.go new file mode 100644 index 00000000000..c27c52b2225 --- /dev/null +++ b/network/alsp/internal/reported_misbehavior_work.go @@ -0,0 +1,36 @@ +package internal + +import ( + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/network" + "github.com/onflow/flow-go/network/channels" +) + +const NonceSize = 8 + +// ReportedMisbehaviorWork is an internal data structure for "temporarily" storing misbehavior reports in the queue +// till they are processed by the worker. +type ReportedMisbehaviorWork struct { + // Channel is the channel that the misbehavior report is about. + Channel channels.Channel + + // OriginId is the ID of the peer that the misbehavior report is about. + OriginId flow.Identifier + + // Reason is the reason of the misbehavior. + Reason network.Misbehavior + + // Nonce is a random nonce value that is used to make the key of the struct unique in the queue even when + // the same misbehavior report is reported multiple times. This is needed as we expect the same misbehavior report + // to be reported multiple times when an attack persists for a while. We don't want to deduplicate the misbehavior + // reports in the queue as we want to penalize the misbehaving node for each report. + Nonce [NonceSize]byte + + // Penalty is the penalty value of the misbehavior. + // We use `rlp:"-"` to ignore this field when serializing the struct to RLP to determine the key of this struct + // when storing in the queue. Hence, the penalty value does "not" contribute to the key for storing in the queue. + // As RLP encoding does not support float64, we cannot use this field as the key of the + // struct. As we use a random nonce value for the key of the struct, we can be sure that we will not have a collision + // in the queue, and duplicate reports will be accepted with unique keys. + Penalty float64 `rlp:"-"` +} diff --git a/network/alsp/manager.go b/network/alsp/manager.go deleted file mode 100644 index 151b8aff528..00000000000 --- a/network/alsp/manager.go +++ /dev/null @@ -1,48 +0,0 @@ -package alsp - -import ( - "github.com/rs/zerolog" - - "github.com/onflow/flow-go/module" - "github.com/onflow/flow-go/network" - "github.com/onflow/flow-go/network/channels" - "github.com/onflow/flow-go/utils/logging" -) - -// MisbehaviorReportManager is responsible for handling misbehavior reports. -// The current version is at the minimum viable product stage and only logs the reports. -// TODO: the mature version should be able to handle the reports and take actions accordingly, i.e., penalize the misbehaving node -// -// and report the node to be disallow-listed if the overall penalty of the misbehaving node drops below the disallow-listing threshold. -type MisbehaviorReportManager struct { - logger zerolog.Logger - metrics module.AlspMetrics -} - -var _ network.MisbehaviorReportManager = (*MisbehaviorReportManager)(nil) - -// NewMisbehaviorReportManager creates a new instance of the MisbehaviorReportManager. -func NewMisbehaviorReportManager(logger zerolog.Logger, metrics module.AlspMetrics) *MisbehaviorReportManager { - return &MisbehaviorReportManager{ - logger: logger.With().Str("module", "misbehavior_report_manager").Logger(), - metrics: metrics, - } -} - -// HandleMisbehaviorReport is called upon a new misbehavior is reported. -// The current version is at the minimum viable product stage and only logs the reports. -// The implementation of this function should be thread-safe and non-blocking. -// TODO: the mature version should be able to handle the reports and take actions accordingly, i.e., penalize the misbehaving node -// -// and report the node to be disallow-listed if the overall penalty of the misbehaving node drops below the disallow-listing threshold. -func (m *MisbehaviorReportManager) HandleMisbehaviorReport(channel channels.Channel, report network.MisbehaviorReport) { - m.metrics.OnMisbehaviorReported(channel.String(), report.Reason().String()) - - m.logger.Debug(). - Str("channel", channel.String()). - Hex("misbehaving_id", logging.ID(report.OriginId())). - Str("reason", report.Reason().String()). - Msg("received misbehavior report") - - // TODO: handle the misbehavior report and take actions accordingly. -} diff --git a/network/alsp/manager/README.md b/network/alsp/manager/README.md new file mode 100644 index 00000000000..36bf3deda9d --- /dev/null +++ b/network/alsp/manager/README.md @@ -0,0 +1,84 @@ +# Application Layer Spam Prevention (ASLP) Manager +Implementation of ALSP manager is available here: [manager.go](manager.go) +Note that this readme is primarily focusing on the ALSP manager. For more details regarding the ALSP system please refer to [readme.md](..%2Freadme.md). +--- +## Architectural Overview +### Reporting Misbehavior and Managing Node Penalties +Figure below illustrates the ALSP manager’s role in the reporting of misbehavior and the management of node penalties as +well as the interactions between the ALSP manager and the `LibP2PNode`, `ConnectionGater`, and `PeerManager` components for +the disallow listing and allow listing processes. + +#### Reporting Misbehavior +In the event that an engine detects misbehavior within a channel, +it is imperative to report this finding to the ALSP manager. +This is achieved by invoking the `ReportMisbehavior` method on the conduit corresponding to the engine. + +#### Managing Penalties +The ALSP manager is responsible for maintaining records of misbehavior reports associated with +remote nodes and for calculating their accumulated misbehavior penalties. +Should a node’s misbehavior penalty surpass a certain threshold +(referred to as `DisallowListingThreshold`), the ALSP manager initiates the disallow listing process. When a remote node is disallow-listed, +it is effectively isolated from the network by the `ConnectionGater` and `PeerManager` components, i.e., the existing +connections to that remote node are closed and new connections attempts are rejected. + +##### Disallow Listing Process +1. The ALSP manager communicates with the `LibP2PNode` by calling its `OnDisallowListNotification` method to indicate that a particular remote node has been disallow-listed. +2. In response, the `LibP2PNode` takes two important actions: + + a. It alerts the `PeerManager`, instructing it to sever the connection with the disallow-listed node. + b. It notifies the `ConnectionGater` to block any incoming or outgoing connections to and from the disallow-listed node. +This ensures that the disallow-listed node is effectively isolated from the local node's network. + +##### Penalty Decay and Allow Listing Process +The ALSP manager also includes a penalty decay mechanism, which gradually reduces the penalties of nodes over time upon regular heartbeat intervals (default is every one second). +Once a disallow-listed node's penalty decays back to zero, the node can be reintegrated into the network through the allow listing process. The allow-listing process involves allowing +the `ConnectionGater` to lift the block on the disallow-listed node and instructing the `PeerManager` to initiate an outbound connection with the allow-listed node. + +1. The ALSP manager calls the `OnAllowListNotification` method on the `LibP2PNode` to signify that a previously disallow-listed node is now allow-listed. +2. The `LibP2PNode` responds by: + + a. Instructing the `ConnectionGater` to lift the block, thereby permitting connections with the now allow-listed node. + b. Requesting the `PeerManager` to initiate an outbound connection with the allow-listed node. + +This series of actions allows the rehabilitated node to be reintegrated and actively participate in the network once again. +![alsp-manager.png](alsp-manager.png) +--- + + + +## Developer Guidelines +The ALSP (Application Layer Spam Prevention) Manager handles application layer spamming misbehavior reports and penalizes misbehaving nodes. It also disallow-lists nodes whose penalties drop below a threshold. + + +- **Misbehavior Reports**: When a local engine detects a spamming misbehavior of a remote node, it sends a report to the ALSP manager, by invoking the `HandleMisbehaviorReport` method of the corresponding +conduit on which the misbehavior was detected. The manager handles the report in a thread-safe and non-blocking manner, using worker pools. + +```go +func (m *MisbehaviorReportManager) HandleMisbehaviorReport(channel channels.Channel, report network.MisbehaviorReport) { + // Handle the report +} +``` + +- **Penalties**: Misbehaving nodes are penalized by the manager. +The manager keeps a cache of records with penalties for each node. +The penalties are decayed over time through periodic heartbeats. + +- **Disallow-listing**: Nodes whose penalties drop below a threshold are disallow-listed. + +- **Heartbeats**: Periodic heartbeats allow the manager to perform recurring tasks, such as decaying the penalties of misbehaving nodes. +```go +func (m *MisbehaviorReportManager) heartbeatLoop(ctx irrecoverable.SignalerContext, interval time.Duration) { + // Handle heartbeats +} +``` + +- **Disallow-list Notification Consumer**: is the interface of the consumer of disallow-list notifications, which is +responsible for taking actions when a node is disallow-listed, i.e., closing exisitng connections with the remote disallow-listed +node and blocking any incoming or outgoing connections to that node. The consumer is passed to the manager when it is created. +In the current implementation the consumer is the instance of the `LibP2PNode` component of the node. +```go +disallowListingConsumer network.DisallowListNotificationConsumer +``` + +### Configuration +The configuration includes settings like cache size, heartbeat intervals, and network type. \ No newline at end of file diff --git a/network/alsp/manager/alsp-manager.png b/network/alsp/manager/alsp-manager.png new file mode 100644 index 00000000000..97e111e532b Binary files /dev/null and b/network/alsp/manager/alsp-manager.png differ diff --git a/network/alsp/manager/manager.go b/network/alsp/manager/manager.go new file mode 100644 index 00000000000..6a7bf856411 --- /dev/null +++ b/network/alsp/manager/manager.go @@ -0,0 +1,511 @@ +package alspmgr + +import ( + crand "crypto/rand" + "errors" + "fmt" + "math" + "time" + + "github.com/rs/zerolog" + + "github.com/onflow/flow-go/engine/common/worker" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module" + "github.com/onflow/flow-go/module/component" + "github.com/onflow/flow-go/module/irrecoverable" + "github.com/onflow/flow-go/module/mempool/queue" + "github.com/onflow/flow-go/module/metrics" + "github.com/onflow/flow-go/network" + "github.com/onflow/flow-go/network/alsp" + "github.com/onflow/flow-go/network/alsp/internal" + "github.com/onflow/flow-go/network/alsp/model" + "github.com/onflow/flow-go/network/channels" + "github.com/onflow/flow-go/utils/logging" +) + +const ( + // defaultMisbehaviorReportManagerWorkers is the default number of workers in the worker pool. + defaultMisbehaviorReportManagerWorkers = 2 +) + +var ( + // ErrSpamRecordCacheSizeNotSet is returned when the spam record cache size is not set, it is a fatal irrecoverable error, + // and the ALSP module cannot be initialized. + ErrSpamRecordCacheSizeNotSet = errors.New("spam record cache size is not set") + // ErrSpamReportQueueSizeNotSet is returned when the spam report queue size is not set, it is a fatal irrecoverable error, + // and the ALSP module cannot be initialized. + ErrSpamReportQueueSizeNotSet = errors.New("spam report queue size is not set") + // ErrHeartBeatIntervalNotSet is returned when the heartbeat interval is not set, it is a fatal irrecoverable error, + // and the ALSP module cannot be initialized. + ErrHeartBeatIntervalNotSet = errors.New("heartbeat interval is not set") +) + +type SpamRecordCacheFactory func(zerolog.Logger, uint32, module.HeroCacheMetrics) alsp.SpamRecordCache + +// SpamRecordDecayFunc is the function that calculates the decay of the spam record. +type SpamRecordDecayFunc func(*model.ProtocolSpamRecord) float64 + +func defaultSpamRecordDecayFunc() SpamRecordDecayFunc { + return func(record *model.ProtocolSpamRecord) float64 { + return math.Min(record.Penalty+record.Decay, 0) + } +} + +// defaultSpamRecordCacheFactory is the default spam record cache factory. It creates a new spam record cache with the given parameter. +func defaultSpamRecordCacheFactory() SpamRecordCacheFactory { + return func(logger zerolog.Logger, size uint32, cacheMetrics module.HeroCacheMetrics) alsp.SpamRecordCache { + return internal.NewSpamRecordCache( + size, + logger.With().Str("component", "spam_record_cache").Logger(), + cacheMetrics, + model.SpamRecordFactory()) + } +} + +// MisbehaviorReportManager is responsible for handling misbehavior reports, i.e., penalizing the misbehaving node +// and report the node to be disallow-listed if the overall penalty of the misbehaving node drops below the disallow-listing threshold. +type MisbehaviorReportManager struct { + component.Component + logger zerolog.Logger + metrics module.AlspMetrics + // cacheFactory is the factory for creating the spam record cache. MisbehaviorReportManager is coming with a + // default factory that creates a new spam record cache with the given parameter. However, this factory can be + // overridden with a custom factory. + cacheFactory SpamRecordCacheFactory + // cache is the spam record cache that stores the spam records for the authorized nodes. It is initialized by + // invoking the cacheFactory. + cache alsp.SpamRecordCache + // disablePenalty indicates whether applying the penalty to the misbehaving node is disabled. + // When disabled, the ALSP module logs the misbehavior reports and updates the metrics, but does not apply the penalty. + // This is useful for managing production incidents. + // Note: under normal circumstances, the ALSP module should not be disabled. + disablePenalty bool + + // disallowListingConsumer is the consumer for the disallow-listing notifications. + // It is notified when a node is disallow-listed by this manager. + disallowListingConsumer network.DisallowListNotificationConsumer + + // workerPool is the worker pool for handling the misbehavior reports in a thread-safe and non-blocking manner. + workerPool *worker.Pool[internal.ReportedMisbehaviorWork] + + // decayFunc is the function that calculates the decay of the spam record. + decayFunc SpamRecordDecayFunc +} + +var _ network.MisbehaviorReportManager = (*MisbehaviorReportManager)(nil) + +type MisbehaviorReportManagerConfig struct { + Logger zerolog.Logger + // SpamRecordCacheSize is the size of the spam record cache that stores the spam records for the authorized nodes. + // It should be as big as the number of authorized nodes in Flow network. + // Recommendation: for small network sizes 10 * number of authorized nodes to ensure that the cache can hold all the spam records of the authorized nodes. + SpamRecordCacheSize uint32 + // SpamReportQueueSize is the size of the queue that stores the spam records to be processed by the worker pool. + SpamReportQueueSize uint32 + // AlspMetrics is the metrics instance for the alsp module (collecting spam reports). + AlspMetrics module.AlspMetrics + // HeroCacheMetricsFactory is the metrics factory for the HeroCache-related metrics. + // Having factory as part of the config allows to create the metrics locally in the module. + HeroCacheMetricsFactory metrics.HeroCacheMetricsFactory + // DisablePenalty indicates whether applying the penalty to the misbehaving node is disabled. + // When disabled, the ALSP module logs the misbehavior reports and updates the metrics, but does not apply the penalty. + // This is useful for managing production incidents. + // Note: under normal circumstances, the ALSP module should not be disabled. + DisablePenalty bool + // NetworkType is the type of the network it is used to determine whether the ALSP module is utilized in the + // public (unstaked) or private (staked) network. + NetworkType network.NetworkingType + // HeartBeatInterval is the interval between the heartbeats. Heartbeat is a recurring event that is used to + // apply recurring actions, e.g., decay the penalty of the misbehaving nodes. + HeartBeatInterval time.Duration + Opts []MisbehaviorReportManagerOption +} + +// validate validates the MisbehaviorReportManagerConfig instance. It returns an error if the config is invalid. +// It only validates the numeric fields of the config that may yield a stealth error in the production. +// It does not validate the struct fields of the config against a nil value. +// Args: +// +// None. +// +// Returns: +// +// An error if the config is invalid. +func (c MisbehaviorReportManagerConfig) validate() error { + if c.SpamRecordCacheSize == 0 { + return ErrSpamRecordCacheSizeNotSet + } + if c.SpamReportQueueSize == 0 { + return ErrSpamReportQueueSizeNotSet + } + if c.HeartBeatInterval == 0 { + return ErrHeartBeatIntervalNotSet + } + return nil +} + +type MisbehaviorReportManagerOption func(*MisbehaviorReportManager) + +// NewMisbehaviorReportManager creates a new instance of the MisbehaviorReportManager. +// Args: +// cfg: the configuration for the MisbehaviorReportManager. +// consumer: the consumer for the disallow-listing notifications. When the manager decides to disallow-list a node, it notifies the consumer to +// perform the lower-level disallow-listing action at the networking layer. +// All connections to the disallow-listed node are closed and the node is removed from the overlay, and +// no further connections are established to the disallow-listed node, either inbound or outbound. +// +// Returns: +// +// A new instance of the MisbehaviorReportManager. +// An error if the config is invalid. The error is considered irrecoverable. +func NewMisbehaviorReportManager(cfg *MisbehaviorReportManagerConfig, consumer network.DisallowListNotificationConsumer) (*MisbehaviorReportManager, error) { + if err := cfg.validate(); err != nil { + return nil, fmt.Errorf("invalid configuration for MisbehaviorReportManager: %w", err) + } + + lg := cfg.Logger.With().Str("module", "misbehavior_report_manager").Logger() + m := &MisbehaviorReportManager{ + logger: lg, + metrics: cfg.AlspMetrics, + disablePenalty: cfg.DisablePenalty, + disallowListingConsumer: consumer, + cacheFactory: defaultSpamRecordCacheFactory(), + decayFunc: defaultSpamRecordDecayFunc(), + } + + store := queue.NewHeroStore( + cfg.SpamReportQueueSize, + lg.With().Str("component", "spam_record_queue").Logger(), + metrics.ApplicationLayerSpamRecordQueueMetricsFactory(cfg.HeroCacheMetricsFactory, cfg.NetworkType)) + + m.workerPool = worker.NewWorkerPoolBuilder[internal.ReportedMisbehaviorWork]( + cfg.Logger, + store, + m.processMisbehaviorReport).Build() + + for _, opt := range cfg.Opts { + opt(m) + } + + m.cache = m.cacheFactory( + lg, + cfg.SpamRecordCacheSize, + metrics.ApplicationLayerSpamRecordCacheMetricFactory(cfg.HeroCacheMetricsFactory, cfg.NetworkType)) + + builder := component.NewComponentManagerBuilder() + builder.AddWorker(func(ctx irrecoverable.SignalerContext, ready component.ReadyFunc) { + ready() + m.heartbeatLoop(ctx, cfg.HeartBeatInterval) // blocking call + }) + for i := 0; i < defaultMisbehaviorReportManagerWorkers; i++ { + builder.AddWorker(m.workerPool.WorkerLogic()) + } + + m.Component = builder.Build() + + if m.disablePenalty { + m.logger.Warn().Msg("penalty mechanism of alsp is disabled") + } + return m, nil +} + +// HandleMisbehaviorReport is called upon a new misbehavior is reported. +// The implementation of this function should be thread-safe and non-blocking. +// Args: +// +// channel: the channel on which the misbehavior is reported. +// report: the misbehavior report. +// +// Returns: +// +// none. +func (m *MisbehaviorReportManager) HandleMisbehaviorReport(channel channels.Channel, report network.MisbehaviorReport) { + lg := m.logger.With(). + Str("channel", channel.String()). + Hex("misbehaving_id", logging.ID(report.OriginId())). + Str("reason", report.Reason().String()). + Float64("penalty", report.Penalty()).Logger() + lg.Trace().Msg("received misbehavior report") + m.metrics.OnMisbehaviorReported(channel.String(), report.Reason().String()) + + nonce := [internal.NonceSize]byte{} + nonceSize, err := crand.Read(nonce[:]) + if err != nil { + // this should never happen, but if it does, we should not continue + lg.Fatal().Err(err).Msg("failed to generate nonce") + return + } + if nonceSize != internal.NonceSize { + // this should never happen, but if it does, we should not continue + lg.Fatal().Msgf("nonce size mismatch: expected %d, got %d", internal.NonceSize, nonceSize) + return + } + + if ok := m.workerPool.Submit(internal.ReportedMisbehaviorWork{ + Channel: channel, + OriginId: report.OriginId(), + Reason: report.Reason(), + Penalty: report.Penalty(), + Nonce: nonce, + }); !ok { + lg.Warn().Msg("discarding misbehavior report because either the queue is full or the misbehavior report is duplicate") + } + + lg.Debug().Msg("misbehavior report submitted") +} + +// heartbeatLoop starts the heartbeat ticks ticker to tick at the given intervals. It is a blocking function, and +// should be called in a separate goroutine. It returns when the context is canceled. Hearbeats are recurring events that +// are used to perform periodic tasks. +// Args: +// +// ctx: the context. +// interval: the interval between two ticks. +// +// Returns: +// +// none. +func (m *MisbehaviorReportManager) heartbeatLoop(ctx irrecoverable.SignalerContext, interval time.Duration) { + ticker := time.NewTicker(interval) + m.logger.Info().Dur("interval", interval).Msg("starting heartbeat ticks") + defer ticker.Stop() + for { + select { + case <-ctx.Done(): + m.logger.Debug().Msg("heartbeat ticks stopped") + return + case <-ticker.C: + m.logger.Trace().Msg("new heartbeat ticked") + if err := m.onHeartbeat(); err != nil { + // any error returned from onHeartbeat is considered irrecoverable. + ctx.Throw(fmt.Errorf("failed to perform heartbeat: %w", err)) + } + } + } +} + +// onHeartbeat is called upon a heartbeatLoop. It encapsulates the recurring tasks that should be performed +// during a heartbeat, which currently includes decay of the spam records. +// Args: +// +// none. +// +// Returns: +// +// error: if an error occurs, it is returned. No error is expected during normal operation. Any returned error must +// be considered as irrecoverable. +func (m *MisbehaviorReportManager) onHeartbeat() error { + allIds := m.cache.Identities() + + for _, id := range allIds { + m.logger.Trace().Hex("identifier", logging.ID(id)).Msg("onHeartbeat - looping through spam records") + penalty, err := m.cache.AdjustWithInit(id, func(record *model.ProtocolSpamRecord) (*model.ProtocolSpamRecord, error) { + if record.Penalty > 0 { + // sanity check; this should never happen. + return record, fmt.Errorf("illegal state: spam record %x has positive penalty %f", id, record.Penalty) + } + if record.Decay <= 0 { + // sanity check; this should never happen. + return record, fmt.Errorf("illegal state: spam record %x has non-positive decay %f", id, record.Decay) + } + + // TODO: this can be done in batch but at this stage let's send individual notifications. + // (it requires enabling the batch mode end-to-end including the cache in network). + // as long as record.Penalty is NOT below model.DisallowListingThreshold, + // the node is considered allow-listed and can conduct inbound and outbound connections. + // Once it falls below model.DisallowListingThreshold, it needs to be disallow listed. + if record.Penalty < model.DisallowListingThreshold && !record.DisallowListed { + // cutoff counter keeps track of how many times the penalty has been below the threshold. + record.CutoffCounter++ + record.DisallowListed = true + // Adjusts decay dynamically based on how many times the node was disallow-listed (cutoff). + record.Decay = m.adjustDecayFunc(record.CutoffCounter) + m.logger.Warn(). + Str("key", logging.KeySuspicious). + Hex("identifier", logging.ID(id)). + Float64("penalty", record.Penalty). + Uint64("cutoff_counter", record.CutoffCounter). + Float64("decay_speed", record.Decay). + Bool("disallow_listed", record.DisallowListed). + Msg("node penalty dropped below threshold, initiating disallow listing") + m.disallowListingConsumer.OnDisallowListNotification(&network.DisallowListingUpdate{ + FlowIds: flow.IdentifierList{id}, + Cause: network.DisallowListedCauseAlsp, // sets the ALSP disallow listing cause on node + }) + } + // each time we decay the penalty by the decay speed, the penalty is a negative number, and the decay speed + // is a positive number. So the penalty is getting closer to zero. + // We use math.Min() to make sure the penalty is never positive. + m.logger.Trace(). + Hex("identifier", logging.ID(id)). + Uint64("cutoff_counter", record.CutoffCounter). + Bool("disallow_listed", record.DisallowListed). + Float64("penalty", record.Penalty). + Msg("heartbeat interval, pulled the spam record for decaying") + record.Penalty = m.decayFunc(record) + m.logger.Trace(). + Hex("identifier", logging.ID(id)). + Uint64("cutoff_counter", record.CutoffCounter). + Bool("disallow_listed", record.DisallowListed). + Float64("penalty", record.Penalty). + Msg("heartbeat interval, spam record penalty adjusted by decay function") + + // TODO: this can be done in batch but at this stage let's send individual notifications. + // (it requires enabling the batch mode end-to-end including the cache in network). + if record.Penalty == float64(0) && record.DisallowListed { + record.DisallowListed = false + + m.logger.Info(). + Hex("identifier", logging.ID(id)). + Uint64("cutoff_counter", record.CutoffCounter). + Float64("decay_speed", record.Decay). + Bool("disallow_listed", record.DisallowListed). + Msg("allow-listing a node that was disallow listed") + // Penalty has fully decayed to zero and the node can be back in the allow list. + m.disallowListingConsumer.OnAllowListNotification(&network.AllowListingUpdate{ + FlowIds: flow.IdentifierList{id}, + Cause: network.DisallowListedCauseAlsp, // clears the ALSP disallow listing cause from node + }) + } + + m.logger.Trace(). + Hex("identifier", logging.ID(id)). + Uint64("cutoff_counter", record.CutoffCounter). + Float64("decay_speed", record.Decay). + Bool("disallow_listed", record.DisallowListed). + Msg("spam record decayed successfully") + return record, nil + }) + + // any error here is fatal because it indicates a bug in the cache. All ids being iterated over are in the cache, + // and adjust function above should not return an error unless there is a bug. + if err != nil { + return fmt.Errorf("failed to decay spam record %x: %w", id, err) + } + + m.logger.Trace(). + Hex("identifier", logging.ID(id)). + Float64("updated_penalty", penalty). + Msg("spam record decayed") + } + + return nil +} + +// processMisbehaviorReport is the worker function that processes the misbehavior reports. +// It is called by the worker pool. +// It applies the penalty to the misbehaving node and updates the spam record cache. +// Implementation must be thread-safe so that it can be called concurrently. +// Args: +// +// report: the misbehavior report to be processed. +// +// Returns: +// +// error: the error that occurred during the processing of the misbehavior report. The returned error is +// irrecoverable and the node should crash if it occurs (indicating a bug in the ALSP module). +func (m *MisbehaviorReportManager) processMisbehaviorReport(report internal.ReportedMisbehaviorWork) error { + lg := m.logger.With(). + Str("channel", report.Channel.String()). + Hex("misbehaving_id", logging.ID(report.OriginId)). + Str("reason", report.Reason.String()). + Float64("penalty", report.Penalty).Logger() + + if m.disablePenalty { + // when penalty mechanism disabled, the misbehavior is logged and metrics are updated, + // but no further actions are taken. + lg.Trace().Msg("discarding misbehavior report because alsp penalty is disabled") + return nil + } + + // Adjust will first try to apply the penalty to the spam record, if it does not exist, the Adjust method will initialize + // a spam record for the peer first and then applies the penalty. In other words, Adjust uses an optimistic update by + // first assuming that the spam record exists and then initializing it if it does not exist. In this way, we avoid + // acquiring the lock twice per misbehavior report, reducing the contention on the lock and improving the performance. + updatedPenalty, err := m.cache.AdjustWithInit(report.OriginId, func(record *model.ProtocolSpamRecord) (*model.ProtocolSpamRecord, error) { + if report.Penalty > 0 { + // this should never happen, unless there is a bug in the misbehavior report handling logic. + // we should crash the node in this case to prevent further misbehavior reports from being lost and fix the bug. + // we return the error as it is considered as a fatal error. + return record, fmt.Errorf("penalty value is positive, expected negative %f", report.Penalty) + } + record.Penalty += report.Penalty // penalty value is negative. We add it to the current penalty. + lg = lg.With(). + Float64("penalty_before_update", record.Penalty). + Uint64("cutoff_counter", record.CutoffCounter). + Float64("decay_speed", record.Decay). + Bool("disallow_listed", record.DisallowListed). + Logger() + return record, nil + }) + if err != nil { + // this should never happen, unless there is a bug in the spam record cache implementation. + // we should crash the node in this case to prevent further misbehavior reports from being lost and fix the bug. + return fmt.Errorf("failed to apply penalty to the spam record: %w", err) + } + lg.Debug().Float64("updated_penalty", updatedPenalty).Msg("misbehavior report handled") + return nil +} + +// adjustDecayFunc calculates the decay value of the spam record cache. This allows the decay to be different on subsequent disallow listings. +// It returns the decay speed for the given cutoff counter. +// The cutoff counter is the number of times that the node has been disallow-listed. +// Args: +// cutoffCounter: the number of times that the node has been disallow-listed including the current time. Note that the cutoff counter +// must always be updated before calling this function. +// +// Returns: +// +// float64: the decay speed for the given cutoff counter. +func (m *MisbehaviorReportManager) adjustDecayFunc(cutoffCounter uint64) float64 { + // decaySpeeds illustrates the decay speeds for different cutoff counters. + // The first cutoff does not reduce the decay speed (1000 -> 1000). However, the second, third, + // and forth cutoffs reduce the decay speed by 90% (1000 -> 100, 100 -> 10, 10 -> 1). + // All subsequent cutoffs after the fourth cutoff use the last decay speed (1). + // This is to prevent the decay speed from becoming too small and the spam record from taking too long to decay. + switch { + case cutoffCounter == 1: + return 1000 + case cutoffCounter == 2: + return 100 + case cutoffCounter == 3: + return 10 + case cutoffCounter >= 4: + return 1 + default: + panic(fmt.Sprintf("illegal-state cutoff counter must be positive, it should include the current time: %d", cutoffCounter)) + } +} + +// WithSpamRecordsCacheFactory sets the spam record cache factory for the MisbehaviorReportManager. +// Args: +// +// f: the spam record cache factory. +// +// Returns: +// +// a MisbehaviorReportManagerOption that sets the spam record cache for the MisbehaviorReportManager. +// +// Note: this option is useful primarily for testing purposes. The default factory should be sufficient for production. +func WithSpamRecordsCacheFactory(f SpamRecordCacheFactory) MisbehaviorReportManagerOption { + return func(m *MisbehaviorReportManager) { + m.cacheFactory = f + } +} + +// WithDecayFunc sets the decay function for the MisbehaviorReportManager. Useful for testing purposes to simulate the decay of the penalty without waiting for the actual decay. +// Args: +// +// f: the decay function. +// +// Returns: +// +// a MisbehaviorReportManagerOption that sets the decay function for the MisbehaviorReportManager. +// +// Note: this option is useful primarily for testing purposes. The default decay function should be used for production. +func WithDecayFunc(f SpamRecordDecayFunc) MisbehaviorReportManagerOption { + return func(m *MisbehaviorReportManager) { + m.decayFunc = f + } +} diff --git a/network/alsp/manager/manager_test.go b/network/alsp/manager/manager_test.go new file mode 100644 index 00000000000..223a7a3f5fb --- /dev/null +++ b/network/alsp/manager/manager_test.go @@ -0,0 +1,1819 @@ +package alspmgr_test + +import ( + "context" + "math" + "math/rand" + "sync" + "testing" + "time" + + "github.com/rs/zerolog" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/config" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module" + "github.com/onflow/flow-go/module/id" + "github.com/onflow/flow-go/module/irrecoverable" + "github.com/onflow/flow-go/module/metrics" + mockmodule "github.com/onflow/flow-go/module/mock" + "github.com/onflow/flow-go/network" + "github.com/onflow/flow-go/network/alsp" + "github.com/onflow/flow-go/network/alsp/internal" + alspmgr "github.com/onflow/flow-go/network/alsp/manager" + mockalsp "github.com/onflow/flow-go/network/alsp/mock" + "github.com/onflow/flow-go/network/alsp/model" + "github.com/onflow/flow-go/network/channels" + "github.com/onflow/flow-go/network/internal/testutils" + mocknetwork "github.com/onflow/flow-go/network/mock" + "github.com/onflow/flow-go/network/p2p" + p2ptest "github.com/onflow/flow-go/network/p2p/test" + "github.com/onflow/flow-go/network/slashing" + "github.com/onflow/flow-go/network/underlay" + "github.com/onflow/flow-go/utils/unittest" +) + +// TestHandleReportedMisbehavior tests the handling of reported misbehavior by the network. +// +// The test sets up a mock MisbehaviorReportManager and a conduitFactory with this manager. +// It generates a single node network with the conduitFactory and starts it. +// It then uses a mock engine to register a channel with the network. +// It prepares a set of misbehavior reports and reports them to the conduit on the test channel. +// The test ensures that the MisbehaviorReportManager receives and handles all reported misbehavior +// without any duplicate reports and within a specified time. +func TestNetworkPassesReportedMisbehavior(t *testing.T) { + misbehaviorReportManger := mocknetwork.NewMisbehaviorReportManager(t) + misbehaviorReportManger.On("Start", mock.Anything).Return().Once() + + readyDoneChan := func() <-chan struct{} { + ch := make(chan struct{}) + close(ch) + return ch + }() + + sporkId := unittest.IdentifierFixture() + misbehaviorReportManger.On("Ready").Return(readyDoneChan).Once() + misbehaviorReportManger.On("Done").Return(readyDoneChan).Once() + ids, nodes := testutils.LibP2PNodeForNetworkFixture(t, sporkId, 1) + idProvider := id.NewFixedIdentityProvider(ids) + networkCfg := testutils.NetworkConfigFixture(t, *ids[0], idProvider, sporkId, nodes[0]) + net, err := underlay.NewNetwork(networkCfg, underlay.WithAlspManager(misbehaviorReportManger)) + require.NoError(t, err) + + ctx, cancel := context.WithCancel(context.Background()) + signalerCtx := irrecoverable.NewMockSignalerContext(t, ctx) + testutils.StartNodesAndNetworks(signalerCtx, t, nodes, []network.EngineRegistry{net}) + defer testutils.StopComponents[p2p.LibP2PNode](t, nodes, 100*time.Millisecond) + defer cancel() + + e := mocknetwork.NewEngine(t) + con, err := net.Register(channels.TestNetworkChannel, e) + require.NoError(t, err) + + reports := testutils.MisbehaviorReportsFixture(t, 10) + allReportsManaged := sync.WaitGroup{} + allReportsManaged.Add(len(reports)) + var seenReports []network.MisbehaviorReport + misbehaviorReportManger.On("HandleMisbehaviorReport", channels.TestNetworkChannel, mock.Anything).Run(func(args mock.Arguments) { + report := args.Get(1).(network.MisbehaviorReport) + require.Contains(t, reports, report) // ensures that the report is one of the reports we expect. + require.NotContainsf(t, seenReports, report, "duplicate report: %v", report) // ensures that we have not seen this report before. + seenReports = append(seenReports, report) // adds the report to the list of seen reports. + allReportsManaged.Done() + }).Return(nil) + + for _, report := range reports { + con.ReportMisbehavior(report) // reports the misbehavior + } + + unittest.RequireReturnsBefore(t, allReportsManaged.Wait, 100*time.Millisecond, "did not receive all reports") +} + +// TestHandleReportedMisbehavior tests the handling of reported misbehavior by the network. +// +// The test sets up a mock MisbehaviorReportManager and a conduitFactory with this manager. +// It generates a single node network with the conduitFactory and starts it. +// It then uses a mock engine to register a channel with the network. +// It prepares a set of misbehavior reports and reports them to the conduit on the test channel. +// The test ensures that the MisbehaviorReportManager receives and handles all reported misbehavior +// without any duplicate reports and within a specified time. +func TestHandleReportedMisbehavior_Cache_Integration(t *testing.T) { + cfg := managerCfgFixture(t) + + // this test is assessing the integration of the ALSP manager with the network. As the ALSP manager is an attribute + // of the network, we need to configure the ALSP manager via the network configuration, and let the network create + // the ALSP manager. + var cache alsp.SpamRecordCache + cfg.Opts = []alspmgr.MisbehaviorReportManagerOption{ + alspmgr.WithSpamRecordsCacheFactory(func(logger zerolog.Logger, size uint32, metrics module.HeroCacheMetrics) alsp.SpamRecordCache { + cache = internal.NewSpamRecordCache(size, logger, metrics, model.SpamRecordFactory()) + return cache + }), + } + + sporkId := unittest.IdentifierFixture() + ids, nodes := testutils.LibP2PNodeForNetworkFixture(t, sporkId, 1) + idProvider := id.NewFixedIdentityProvider(ids) + networkCfg := testutils.NetworkConfigFixture(t, *ids[0], idProvider, sporkId, nodes[0], underlay.WithAlspConfig(cfg)) + net, err := underlay.NewNetwork(networkCfg) + require.NoError(t, err) + + ctx, cancel := context.WithCancel(context.Background()) + signalerCtx := irrecoverable.NewMockSignalerContext(t, ctx) + testutils.StartNodesAndNetworks(signalerCtx, t, nodes, []network.EngineRegistry{net}) + defer testutils.StopComponents[p2p.LibP2PNode](t, nodes, 100*time.Millisecond) + defer cancel() + + e := mocknetwork.NewEngine(t) + con, err := net.Register(channels.TestNetworkChannel, e) + require.NoError(t, err) + + // create a map of origin IDs to their respective misbehavior reports (10 peers, 5 reports each) + numPeers := 10 + numReportsPerPeer := 5 + peersReports := make(map[flow.Identifier][]network.MisbehaviorReport) + + for i := 0; i < numPeers; i++ { + originID := unittest.IdentifierFixture() + reports := createRandomMisbehaviorReportsForOriginId(t, originID, numReportsPerPeer) + peersReports[originID] = reports + } + + wg := sync.WaitGroup{} + for _, reports := range peersReports { + wg.Add(len(reports)) + // reports the misbehavior + for _, report := range reports { + r := report // capture range variable + go func() { + defer wg.Done() + + con.ReportMisbehavior(r) + }() + } + } + + unittest.RequireReturnsBefore(t, wg.Wait, 100*time.Millisecond, "not all misbehavior reports have been processed") + + // check if the misbehavior reports have been processed by verifying that the Adjust method was called on the cache + require.Eventually(t, func() bool { + for originID, reports := range peersReports { + totalPenalty := float64(0) + for _, report := range reports { + totalPenalty += report.Penalty() + } + + record, ok := cache.Get(originID) + if !ok { + return false + } + require.NotNil(t, record) + + require.Equal(t, totalPenalty, record.Penalty) + // with just reporting a single misbehavior report, the cutoff counter should not be incremented. + require.Equal(t, uint64(0), record.CutoffCounter) + // with just reporting a single misbehavior report, the node should not be disallowed. + require.False(t, record.DisallowListed) + // the decay should be the default decay value. + require.Equal(t, model.SpamRecordFactory()(unittest.IdentifierFixture()).Decay, record.Decay) + } + + return true + }, 1*time.Second, 10*time.Millisecond, "ALSP manager did not handle the misbehavior report") +} + +// TestHandleReportedMisbehavior_And_DisallowListing_Integration implements an end-to-end integration test for the +// handling of reported misbehavior and disallow listing. +// +// The test sets up 3 nodes, one victim, one honest, and one (alleged) spammer. +// Initially, the test ensures that all nodes are connected to each other. +// Then, test imitates that victim node reports the spammer node for spamming. +// The test generates enough spam reports to trigger the disallow-listing of the victim node. +// The test ensures that the victim node is disconnected from the spammer node. +// The test ensures that despite attempting on connections, no inbound or outbound connections between the victim and +// the disallow-listed spammer node are established. +func TestHandleReportedMisbehavior_And_DisallowListing_Integration(t *testing.T) { + cfg := managerCfgFixture(t) + + // this test is assessing the integration of the ALSP manager with the network. As the ALSP manager is an attribute + // of the network, we need to configure the ALSP manager via the network configuration, and let the network create + // the ALSP manager. + var victimSpamRecordCache alsp.SpamRecordCache + cfg.Opts = []alspmgr.MisbehaviorReportManagerOption{ + alspmgr.WithSpamRecordsCacheFactory(func(logger zerolog.Logger, size uint32, metrics module.HeroCacheMetrics) alsp.SpamRecordCache { + victimSpamRecordCache = internal.NewSpamRecordCache(size, logger, metrics, model.SpamRecordFactory()) + return victimSpamRecordCache + }), + } + + sporkId := unittest.IdentifierFixture() + ids, nodes := testutils.LibP2PNodeForNetworkFixture( + t, + sporkId, + 3, + p2ptest.WithPeerManagerEnabled(p2ptest.PeerManagerConfigFixture(), nil)) + + idProvider := id.NewFixedIdentityProvider(ids) + networkCfg := testutils.NetworkConfigFixture(t, *ids[0], idProvider, sporkId, nodes[0], underlay.WithAlspConfig(cfg)) + victimNetwork, err := underlay.NewNetwork(networkCfg) + require.NoError(t, err) + + // index of the victim node in the nodes slice. + victimIndex := 0 + // index of the spammer node in the nodes slice (the node that will be reported for misbehavior and disallow-listed by victim). + spammerIndex := 1 + // other node (not victim and not spammer) that we have to ensure is not affected by the disallow-listing of the spammer. + honestIndex := 2 + + ctx, cancel := context.WithCancel(context.Background()) + signalerCtx := irrecoverable.NewMockSignalerContext(t, ctx) + testutils.StartNodesAndNetworks(signalerCtx, t, nodes, []network.EngineRegistry{victimNetwork}) + defer testutils.StopComponents[p2p.LibP2PNode](t, nodes, 100*time.Millisecond) + defer cancel() + + p2ptest.LetNodesDiscoverEachOther(t, ctx, nodes, ids) + // initially victim and spammer should be able to connect to each other. + p2ptest.TryConnectionAndEnsureConnected(t, ctx, nodes) + + e := mocknetwork.NewEngine(t) + con, err := victimNetwork.Register(channels.TestNetworkChannel, e) + require.NoError(t, err) + + // creates a misbehavior report for the spammer + report := misbehaviorReportFixtureWithPenalty(t, ids[spammerIndex].NodeID, model.DefaultPenaltyValue) + + // simulates the victim node reporting the spammer node misbehavior 120 times + // to the network. As each report has the default penalty, ideally the spammer should be disallow-listed after + // 100 reports (each having 0.01 * disallow-listing penalty). But we take 120 as a safe number to ensure that + // the spammer is definitely disallow-listed. + reportCount := 120 + wg := sync.WaitGroup{} + for i := 0; i < reportCount; i++ { + wg.Add(1) + // reports the misbehavior + r := report // capture range variable + go func() { + defer wg.Done() + + con.ReportMisbehavior(r) + }() + } + + unittest.RequireReturnsBefore(t, wg.Wait, 100*time.Millisecond, "not all misbehavior reports have been processed") + + // ensures that the spammer is disallow-listed by the victim + p2ptest.RequireEventuallyNotConnected(t, []p2p.LibP2PNode{nodes[victimIndex]}, []p2p.LibP2PNode{nodes[spammerIndex]}, 100*time.Millisecond, 5*time.Second) + + // despite disallow-listing spammer, ensure that (victim and honest) and (honest and spammer) are still connected. + p2ptest.RequireConnectedEventually(t, []p2p.LibP2PNode{nodes[spammerIndex], nodes[honestIndex]}, 1*time.Millisecond, 100*time.Millisecond) + p2ptest.RequireConnectedEventually(t, []p2p.LibP2PNode{nodes[honestIndex], nodes[victimIndex]}, 1*time.Millisecond, 100*time.Millisecond) + + // while the spammer node is disallow-listed, it cannot connect to the victim node. Also, the victim node cannot directly dial and connect to the spammer node, unless + // it is allow-listed again. + p2ptest.RequireEventuallyNotConnected(t, []p2p.LibP2PNode{nodes[victimIndex]}, []p2p.LibP2PNode{nodes[spammerIndex]}, 100*time.Millisecond, 2*time.Second) +} + +// TestHandleReportedMisbehavior_And_DisallowListing_RepeatOffender_Integration implements an end-to-end integration test for the +// handling of repeated reported misbehavior and disallow listing. +func TestHandleReportedMisbehavior_And_DisallowListing_RepeatOffender_Integration(t *testing.T) { + cfg := managerCfgFixture(t) + sporkId := unittest.IdentifierFixture() + fastDecay := false + fastDecayFunc := func(record *model.ProtocolSpamRecord) float64 { + t.Logf("decayFuc called with record: %+v", record) + if fastDecay { + // decay to zero in a single heart beat + t.Log("fastDecay is true, so decay to zero") + return 0 + } else { + // decay as usual + t.Log("fastDecay is false, so decay as usual") + return math.Min(record.Penalty+record.Decay, 0) + } + } + + // this test is assessing the integration of the ALSP manager with the network. As the ALSP manager is an attribute + // of the network, we need to configure the ALSP manager via the network configuration, and let the network create + // the ALSP manager. + var victimSpamRecordCache alsp.SpamRecordCache + cfg.Opts = []alspmgr.MisbehaviorReportManagerOption{ + alspmgr.WithSpamRecordsCacheFactory(func(logger zerolog.Logger, size uint32, metrics module.HeroCacheMetrics) alsp.SpamRecordCache { + victimSpamRecordCache = internal.NewSpamRecordCache(size, logger, metrics, model.SpamRecordFactory()) + return victimSpamRecordCache + }), + alspmgr.WithDecayFunc(fastDecayFunc), + } + + ids, nodes := testutils.LibP2PNodeForNetworkFixture(t, sporkId, 3, + p2ptest.WithPeerManagerEnabled(p2ptest.PeerManagerConfigFixture(p2ptest.WithZeroJitterAndZeroBackoff(t)), nil)) + idProvider := unittest.NewUpdatableIDProvider(ids) + networkCfg := testutils.NetworkConfigFixture(t, *ids[0], idProvider, sporkId, nodes[0], underlay.WithAlspConfig(cfg)) + + victimNetwork, err := underlay.NewNetwork(networkCfg) + require.NoError(t, err) + + // index of the victim node in the nodes slice. + victimIndex := 0 + // index of the spammer node in the nodes slice (the node that will be reported for misbehavior and disallow-listed by victim). + spammerIndex := 1 + // other node (not victim and not spammer) that we have to ensure is not affected by the disallow-listing of the spammer. + honestIndex := 2 + + ctx, cancel := context.WithCancel(context.Background()) + signalerCtx := irrecoverable.NewMockSignalerContext(t, ctx) + testutils.StartNodesAndNetworks(signalerCtx, t, nodes, []network.EngineRegistry{victimNetwork}) + defer testutils.StopComponents[p2p.LibP2PNode](t, nodes, 100*time.Millisecond) + defer cancel() + + p2ptest.LetNodesDiscoverEachOther(t, ctx, nodes, ids) + // initially victim and spammer should be able to connect to each other. + p2ptest.TryConnectionAndEnsureConnected(t, ctx, nodes) + + e := mocknetwork.NewEngine(t) + con, err := victimNetwork.Register(channels.TestNetworkChannel, e) + require.NoError(t, err) + + // creates a misbehavior report for the spammer + report := misbehaviorReportFixtureWithPenalty(t, ids[spammerIndex].NodeID, model.DefaultPenaltyValue) + + expectedDecays := []float64{1000, 100, 10, 1, 1, 1} // list of expected decay values after each disallow listing + + t.Log("resetting cutoff counter") + expectedCutoffCounter := uint64(0) + + // keep misbehaving until the spammer is disallow-listed and check that the decay is as expected + for expectedDecayIndex := range expectedDecays { + t.Logf("starting iteration %d with expected decay index %f", expectedDecayIndex, expectedDecays[expectedDecayIndex]) + + // reset the decay function to the default + fastDecay = false + + // simulates the victim node reporting the spammer node misbehavior 120 times + // as each report has the default penalty, ideally the spammer should be disallow-listed after + // 100 reports (each having 0.01 * disallow-listing penalty). But we take 120 as a safe number to ensure that + // the spammer is definitely disallow-listed. + reportCount := 120 + wg := sync.WaitGroup{} + for reportCounter := 0; reportCounter < reportCount; reportCounter++ { + wg.Add(1) + // reports the misbehavior + r := report // capture range variable + go func() { + defer wg.Done() + + con.ReportMisbehavior(r) + }() + } + + unittest.RequireReturnsBefore(t, wg.Wait, 100*time.Millisecond, "not all misbehavior reports have been processed") + + expectedCutoffCounter++ // cutoff counter is expected to be incremented after each disallow listing + + // ensures that the spammer is disallow-listed by the victim + // while the spammer node is disallow-listed, it cannot connect to the victim node. Also, the victim node cannot directly dial and connect to the spammer node, unless + // it is allow-listed again. + p2ptest.RequireEventuallyNotConnected(t, []p2p.LibP2PNode{nodes[victimIndex]}, []p2p.LibP2PNode{nodes[spammerIndex]}, 100*time.Millisecond, 3*time.Second) + + // ensures that the spammer is not disallow-listed by the honest node + p2ptest.RequireConnectedEventually(t, []p2p.LibP2PNode{nodes[honestIndex], nodes[spammerIndex]}, 1*time.Millisecond, 100*time.Millisecond) + + // ensures that the spammer is disallow-listed for the expected amount of time + record, ok := victimSpamRecordCache.Get(ids[spammerIndex].NodeID) + require.True(t, ok) + require.NotNil(t, record) + + // check the penalty of the spammer node, which should be below the disallow-listing threshold. + // i.e. spammer penalty should be more negative than the disallow-listing threshold, hence disallow-listed. + require.Less(t, record.Penalty, float64(model.DisallowListingThreshold)) + require.Equal(t, expectedDecays[expectedDecayIndex], record.Decay) + + require.Equal(t, expectedDecays[expectedDecayIndex], record.Decay) + // when a node is disallow-listed, it remains disallow-listed until its penalty decays back to zero. + require.Equal(t, true, record.DisallowListed) + require.Equal(t, expectedCutoffCounter, record.CutoffCounter) + + penalty1 := record.Penalty + + // wait for one heartbeat to be processed. + time.Sleep(1 * time.Second) + + record, ok = victimSpamRecordCache.Get(ids[spammerIndex].NodeID) + require.True(t, ok) + require.NotNil(t, record) + + // check the penalty of the spammer node, which should be below the disallow-listing threshold. + // i.e. spammer penalty should be more negative than the disallow-listing threshold, hence disallow-listed. + require.Less(t, record.Penalty, float64(model.DisallowListingThreshold)) + require.Equal(t, expectedDecays[expectedDecayIndex], record.Decay) + + require.Equal(t, expectedDecays[expectedDecayIndex], record.Decay) + // when a node is disallow-listed, it remains disallow-listed until its penalty decays back to zero. + require.Equal(t, true, record.DisallowListed) + require.Equal(t, expectedCutoffCounter, record.CutoffCounter) + penalty2 := record.Penalty + + // check that the penalty has decayed by the expected amount in one heartbeat + require.Equal(t, expectedDecays[expectedDecayIndex], penalty2-penalty1) + + // decay the disallow-listing penalty of the spammer node to zero. + t.Log("about to decay the disallow-listing penalty of the spammer node to zero") + fastDecay = true + t.Log("decayed the disallow-listing penalty of the spammer node to zero") + + // after serving the disallow-listing period, the spammer should be able to connect to the victim node again. + p2ptest.RequireConnectedEventually(t, []p2p.LibP2PNode{nodes[spammerIndex], nodes[victimIndex]}, 1*time.Millisecond, 3*time.Second) + t.Log("spammer node is able to connect to the victim node again") + + // all the nodes should be able to connect to each other again. + p2ptest.TryConnectionAndEnsureConnected(t, ctx, nodes) + + record, ok = victimSpamRecordCache.Get(ids[spammerIndex].NodeID) + require.True(t, ok) + require.NotNil(t, record) + + require.Equal(t, float64(0), record.Penalty) + require.Equal(t, expectedDecays[expectedDecayIndex], record.Decay) + require.Equal(t, false, record.DisallowListed) + require.Equal(t, expectedCutoffCounter, record.CutoffCounter) + + // go back to regular decay to prepare for the next set of misbehavior reports. + fastDecay = false + t.Log("about to report misbehavior again") + } +} + +// TestHandleReportedMisbehavior_And_SlashingViolationsConsumer_Integration implements an end-to-end integration test for the +// handling of reported misbehavior from the slashing.ViolationsConsumer. +// +// The test sets up one victim, one honest, and one (alleged) spammer for each of the current slashing violations. +// Initially, the test ensures that all nodes are connected to each other. +// Then, test imitates the slashing violations consumer on the victim node reporting misbehavior's for each slashing violation. +// The test generates enough slashing violations to trigger the connection to each of the spamming nodes to be eventually pruned. +// The test ensures that the victim node is disconnected from all spammer nodes. +// The test ensures that despite attempting on connections, no inbound or outbound connections between the victim and +// the pruned spammer nodes are established. +func TestHandleReportedMisbehavior_And_SlashingViolationsConsumer_Integration(t *testing.T) { + sporkId := unittest.IdentifierFixture() + + // create 1 victim node, 1 honest node and a node for each slashing violation + ids, nodes := testutils.LibP2PNodeForNetworkFixture(t, sporkId, 7) // creates 7 nodes (1 victim, 1 honest, 5 spammer nodes one for each slashing violation). + idProvider := id.NewFixedIdentityProvider(ids) + + // also a placeholder for the slashing violations consumer. + var violationsConsumer network.ViolationsConsumer + networkCfg := testutils.NetworkConfigFixture( + t, + *ids[0], + idProvider, + sporkId, + nodes[0], + underlay.WithAlspConfig(managerCfgFixture(t)), + underlay.WithSlashingViolationConsumerFactory(func(adapter network.ConduitAdapter) network.ViolationsConsumer { + violationsConsumer = slashing.NewSlashingViolationsConsumer(unittest.Logger(), metrics.NewNoopCollector(), adapter) + return violationsConsumer + })) + victimNetwork, err := underlay.NewNetwork(networkCfg) + require.NoError(t, err) + + ctx, cancel := context.WithCancel(context.Background()) + signalerCtx := irrecoverable.NewMockSignalerContext(t, ctx) + testutils.StartNodesAndNetworks(signalerCtx, t, nodes, []network.EngineRegistry{victimNetwork}) + defer testutils.StopComponents[p2p.LibP2PNode](t, nodes, 100*time.Millisecond) + defer cancel() + + p2ptest.LetNodesDiscoverEachOther(t, ctx, nodes, ids) + // initially victim and misbehaving nodes should be able to connect to each other. + p2ptest.TryConnectionAndEnsureConnected(t, ctx, nodes) + + // each slashing violation func is mapped to a violation with the identity of one of the misbehaving nodes + // index of the victim node in the nodes slice. + victimIndex := 0 + honestNodeIndex := 1 + invalidMessageIndex := 2 + senderEjectedIndex := 3 + unauthorizedUnicastOnChannelIndex := 4 + unauthorizedPublishOnChannelIndex := 5 + unknownMsgTypeIndex := 6 + slashingViolationTestCases := []struct { + violationsConsumerFunc func(violation *network.Violation) + violation *network.Violation + }{ + {violationsConsumer.OnUnAuthorizedSenderError, &network.Violation{Identity: ids[invalidMessageIndex]}}, + {violationsConsumer.OnSenderEjectedError, &network.Violation{Identity: ids[senderEjectedIndex]}}, + {violationsConsumer.OnUnauthorizedUnicastOnChannel, &network.Violation{Identity: ids[unauthorizedUnicastOnChannelIndex]}}, + {violationsConsumer.OnUnauthorizedPublishOnChannel, &network.Violation{Identity: ids[unauthorizedPublishOnChannelIndex]}}, + {violationsConsumer.OnUnknownMsgTypeError, &network.Violation{Identity: ids[unknownMsgTypeIndex]}}, + } + + violationsWg := sync.WaitGroup{} + violationCount := 120 + for _, testCase := range slashingViolationTestCases { + for i := 0; i < violationCount; i++ { + testCase := testCase + violationsWg.Add(1) + go func() { + defer violationsWg.Done() + testCase.violationsConsumerFunc(testCase.violation) + }() + } + } + unittest.RequireReturnsBefore(t, violationsWg.Wait, 100*time.Millisecond, "slashing violations not reported in time") + + forEachMisbehavingNode := func(f func(i int)) { + for misbehavingNodeIndex := 2; misbehavingNodeIndex <= len(nodes)-1; misbehavingNodeIndex++ { + f(misbehavingNodeIndex) + } + } + + // ensures all misbehaving nodes are disconnected from the victim node + forEachMisbehavingNode(func(misbehavingNodeIndex int) { + p2ptest.RequireEventuallyNotConnected(t, []p2p.LibP2PNode{nodes[victimIndex]}, []p2p.LibP2PNode{nodes[misbehavingNodeIndex]}, 100*time.Millisecond, 2*time.Second) + }) + + // despite being disconnected from the victim node, misbehaving nodes and the honest node are still connected. + forEachMisbehavingNode(func(misbehavingNodeIndex int) { + p2ptest.RequireConnectedEventually(t, []p2p.LibP2PNode{nodes[honestNodeIndex], nodes[misbehavingNodeIndex]}, 1*time.Millisecond, 100*time.Millisecond) + }) + + // despite disconnecting misbehaving nodes, ensure that (victim and honest) are still connected. + p2ptest.RequireConnectedEventually(t, []p2p.LibP2PNode{nodes[honestNodeIndex], nodes[victimIndex]}, 1*time.Millisecond, 100*time.Millisecond) + + // while misbehaving nodes are disconnected, they cannot connect to the victim node. Also, the victim node cannot directly dial and connect to the misbehaving nodes until each node's peer score decays. + forEachMisbehavingNode(func(misbehavingNodeIndex int) { + p2ptest.EnsureNotConnectedBetweenGroups(t, ctx, []p2p.LibP2PNode{nodes[victimIndex]}, []p2p.LibP2PNode{nodes[misbehavingNodeIndex]}) + }) +} + +// TestMisbehaviorReportMetrics tests the recording of misbehavior report metrics. +// It checks that when a misbehavior report is received by the ALSP manager, the metrics are recorded. +// It fails the test if the metrics are not recorded or if they are recorded incorrectly. +func TestMisbehaviorReportMetrics(t *testing.T) { + cfg := managerCfgFixture(t) + + // this test is assessing the integration of the ALSP manager with the network. As the ALSP manager is an attribute + // of the network, we need to configure the ALSP manager via the network configuration, and let the network create + // the ALSP manager. + alspMetrics := mockmodule.NewAlspMetrics(t) + cfg.AlspMetrics = alspMetrics + + sporkId := unittest.IdentifierFixture() + ids, nodes := testutils.LibP2PNodeForNetworkFixture(t, sporkId, 1) + idProvider := id.NewFixedIdentityProvider(ids) + + networkCfg := testutils.NetworkConfigFixture(t, *ids[0], idProvider, sporkId, nodes[0], underlay.WithAlspConfig(cfg)) + net, err := underlay.NewNetwork(networkCfg) + require.NoError(t, err) + + ctx, cancel := context.WithCancel(context.Background()) + + signalerCtx := irrecoverable.NewMockSignalerContext(t, ctx) + testutils.StartNodesAndNetworks(signalerCtx, t, nodes, []network.EngineRegistry{net}) + defer testutils.StopComponents[p2p.LibP2PNode](t, nodes, 100*time.Millisecond) + defer cancel() + + e := mocknetwork.NewEngine(t) + con, err := net.Register(channels.TestNetworkChannel, e) + require.NoError(t, err) + + report := testutils.MisbehaviorReportFixture(t) + + // this channel is used to signal that the metrics have been recorded by the ALSP manager correctly. + reported := make(chan struct{}) + + // ensures that the metrics are recorded when a misbehavior report is received. + alspMetrics.On("OnMisbehaviorReported", channels.TestNetworkChannel.String(), report.Reason().String()).Run(func(args mock.Arguments) { + close(reported) + }).Once() + + con.ReportMisbehavior(report) // reports the misbehavior + + unittest.RequireCloseBefore(t, reported, 100*time.Millisecond, "metrics for the misbehavior report were not recorded") +} + +// The TestReportCreation tests the creation of misbehavior reports using the alsp.NewMisbehaviorReport function. +// The function tests the creation of both valid and invalid misbehavior reports by setting different penalty amplification values. +func TestReportCreation(t *testing.T) { + + // creates a valid misbehavior report (i.e., amplification between 1 and 100) + report, err := alsp.NewMisbehaviorReport( + unittest.IdentifierFixture(), + testutils.MisbehaviorTypeFixture(t), + alsp.WithPenaltyAmplification(10)) + require.NoError(t, err) + require.NotNil(t, report) + + // creates a valid misbehavior report with default amplification. + report, err = alsp.NewMisbehaviorReport( + unittest.IdentifierFixture(), + testutils.MisbehaviorTypeFixture(t)) + require.NoError(t, err) + require.NotNil(t, report) + + // creates an in valid misbehavior report (i.e., amplification greater than 100 and less than 1) + report, err = alsp.NewMisbehaviorReport( + unittest.IdentifierFixture(), + testutils.MisbehaviorTypeFixture(t), + alsp.WithPenaltyAmplification(100*rand.Float64()-101)) + require.Error(t, err) + require.Nil(t, report) + + report, err = alsp.NewMisbehaviorReport( + unittest.IdentifierFixture(), + testutils.MisbehaviorTypeFixture(t), + alsp.WithPenaltyAmplification(100*rand.Float64()+101)) + require.Error(t, err) + require.Nil(t, report) + + // 0 is not a valid amplification + report, err = alsp.NewMisbehaviorReport( + unittest.IdentifierFixture(), + testutils.MisbehaviorTypeFixture(t), + alsp.WithPenaltyAmplification(0)) + require.Error(t, err) + require.Nil(t, report) +} + +// TestNewMisbehaviorReportManager tests the creation of a new ALSP manager. +// It is a minimum viable test that ensures that a non-nil ALSP manager is created with expected set of inputs. +// In other words, variation of input values do not cause a nil ALSP manager to be created or a panic. +func TestNewMisbehaviorReportManager(t *testing.T) { + cfg := managerCfgFixture(t) + consumer := mocknetwork.NewDisallowListNotificationConsumer(t) + var cache alsp.SpamRecordCache + cfg.Opts = []alspmgr.MisbehaviorReportManagerOption{ + alspmgr.WithSpamRecordsCacheFactory(func(logger zerolog.Logger, size uint32, metrics module.HeroCacheMetrics) alsp.SpamRecordCache { + cache = internal.NewSpamRecordCache(size, logger, metrics, model.SpamRecordFactory()) + return cache + }), + } + + t.Run("with default values", func(t *testing.T) { + m, err := alspmgr.NewMisbehaviorReportManager(cfg, consumer) + require.NoError(t, err) + assert.NotNil(t, m) + }) + + t.Run("with a custom spam record cache", func(t *testing.T) { + m, err := alspmgr.NewMisbehaviorReportManager(cfg, consumer) + require.NoError(t, err) + assert.NotNil(t, m) + }) + + t.Run("with ALSP module enabled", func(t *testing.T) { + m, err := alspmgr.NewMisbehaviorReportManager(cfg, consumer) + require.NoError(t, err) + assert.NotNil(t, m) + }) + + t.Run("with ALSP module disabled", func(t *testing.T) { + m, err := alspmgr.NewMisbehaviorReportManager(cfg, consumer) + require.NoError(t, err) + assert.NotNil(t, m) + }) +} + +// TestMisbehaviorReportManager_InitializationError tests the creation of a new ALSP manager with invalid inputs. +// It is a minimum viable test that ensures that a nil ALSP manager is created with invalid set of inputs. +func TestMisbehaviorReportManager_InitializationError(t *testing.T) { + cfg := managerCfgFixture(t) + consumer := mocknetwork.NewDisallowListNotificationConsumer(t) + + t.Run("missing spam report queue size", func(t *testing.T) { + cfg.SpamReportQueueSize = 0 + m, err := alspmgr.NewMisbehaviorReportManager(cfg, consumer) + require.Error(t, err) + require.ErrorIs(t, err, alspmgr.ErrSpamReportQueueSizeNotSet) + assert.Nil(t, m) + }) + + t.Run("missing spam record cache size", func(t *testing.T) { + cfg.SpamRecordCacheSize = 0 + m, err := alspmgr.NewMisbehaviorReportManager(cfg, consumer) + require.Error(t, err) + require.ErrorIs(t, err, alspmgr.ErrSpamRecordCacheSizeNotSet) + assert.Nil(t, m) + }) + + t.Run("missing heartbeat intervals", func(t *testing.T) { + cfg.HeartBeatInterval = 0 + m, err := alspmgr.NewMisbehaviorReportManager(cfg, consumer) + require.Error(t, err) + require.ErrorIs(t, err, alspmgr.ErrSpamRecordCacheSizeNotSet) + assert.Nil(t, m) + }) +} + +// TestHandleMisbehaviorReport_SinglePenaltyReport tests the handling of a single misbehavior report. +// The test ensures that the misbehavior report is handled correctly and the penalty is applied to the peer in the cache. +func TestHandleMisbehaviorReport_SinglePenaltyReport(t *testing.T) { + cfg := managerCfgFixture(t) + consumer := mocknetwork.NewDisallowListNotificationConsumer(t) + + // create a new MisbehaviorReportManager + var cache alsp.SpamRecordCache + cfg.Opts = []alspmgr.MisbehaviorReportManagerOption{ + alspmgr.WithSpamRecordsCacheFactory(func(logger zerolog.Logger, size uint32, metrics module.HeroCacheMetrics) alsp.SpamRecordCache { + cache = internal.NewSpamRecordCache(size, logger, metrics, model.SpamRecordFactory()) + return cache + }), + } + + m, err := alspmgr.NewMisbehaviorReportManager(cfg, consumer) + require.NoError(t, err) + + // start the ALSP manager + ctx, cancel := context.WithCancel(context.Background()) + defer func() { + cancel() + unittest.RequireCloseBefore(t, m.Done(), 100*time.Millisecond, "ALSP manager did not stop") + }() + signalerCtx := irrecoverable.NewMockSignalerContext(t, ctx) + m.Start(signalerCtx) + unittest.RequireCloseBefore(t, m.Ready(), 100*time.Millisecond, "ALSP manager did not start") + + // create a mock misbehavior report with a negative penalty value + penalty := float64(-5) + report := mocknetwork.NewMisbehaviorReport(t) + report.On("OriginId").Return(unittest.IdentifierFixture()) + report.On("Reason").Return(alsp.InvalidMessage) + report.On("Penalty").Return(penalty) + + channel := channels.Channel("test-channel") + + // handle the misbehavior report + m.HandleMisbehaviorReport(channel, report) + + require.Eventually(t, func() bool { + // check if the misbehavior report has been processed by verifying that the Adjust method was called on the cache + record, ok := cache.Get(report.OriginId()) + if !ok { + return false + } + require.NotNil(t, record) + require.Equal(t, penalty, record.Penalty) + require.False(t, record.DisallowListed) // the peer should not be disallow listed yet + require.Equal(t, uint64(0), record.CutoffCounter) // with just reporting a misbehavior, the cutoff counter should not be incremented. + require.Equal(t, model.SpamRecordFactory()(unittest.IdentifierFixture()).Decay, record.Decay) // the decay should be the default decay value. + + return true + }, 1*time.Second, 10*time.Millisecond, "ALSP manager did not handle the misbehavior report") +} + +// TestHandleMisbehaviorReport_SinglePenaltyReport_PenaltyDisable tests the handling of a single misbehavior report when the penalty is disabled. +// The test ensures that the misbehavior is reported on metrics but the penalty is not applied to the peer in the cache. +func TestHandleMisbehaviorReport_SinglePenaltyReport_PenaltyDisable(t *testing.T) { + cfg := managerCfgFixture(t) + consumer := mocknetwork.NewDisallowListNotificationConsumer(t) + + cfg.DisablePenalty = true // disable penalty for misbehavior reports + alspMetrics := mockmodule.NewAlspMetrics(t) + cfg.AlspMetrics = alspMetrics + + // we use a mock cache but we do not expect any calls to the cache, since the penalty is disabled. + var cache *mockalsp.SpamRecordCache + cfg.Opts = []alspmgr.MisbehaviorReportManagerOption{ + alspmgr.WithSpamRecordsCacheFactory(func(logger zerolog.Logger, size uint32, metrics module.HeroCacheMetrics) alsp.SpamRecordCache { + cache = mockalsp.NewSpamRecordCache(t) + return cache + }), + } + m, err := alspmgr.NewMisbehaviorReportManager(cfg, consumer) + require.NoError(t, err) + + // start the ALSP manager + ctx, cancel := context.WithCancel(context.Background()) + defer func() { + cancel() + unittest.RequireCloseBefore(t, m.Done(), 100*time.Millisecond, "ALSP manager did not stop") + }() + signalerCtx := irrecoverable.NewMockSignalerContext(t, ctx) + m.Start(signalerCtx) + unittest.RequireCloseBefore(t, m.Ready(), 100*time.Millisecond, "ALSP manager did not start") + + // create a mock misbehavior report with a negative penalty value + penalty := float64(-5) + report := mocknetwork.NewMisbehaviorReport(t) + report.On("OriginId").Return(unittest.IdentifierFixture()) + report.On("Reason").Return(alsp.InvalidMessage) + report.On("Penalty").Return(penalty) + + channel := channels.Channel("test-channel") + + // this channel is used to signal that the metrics have been recorded by the ALSP manager correctly. + // even in case of a disabled penalty, the metrics should be recorded. + reported := make(chan struct{}) + + // ensures that the metrics are recorded when a misbehavior report is received. + alspMetrics.On("OnMisbehaviorReported", channel.String(), report.Reason().String()).Run(func(args mock.Arguments) { + close(reported) + }).Once() + + // handle the misbehavior report + m.HandleMisbehaviorReport(channel, report) + + unittest.RequireCloseBefore(t, reported, 100*time.Millisecond, "metrics for the misbehavior report were not recorded") + + // since the penalty is disabled, we do not expect any calls to the cache. + cache.AssertNotCalled(t, "Adjust", mock.Anything, mock.Anything) +} + +// TestHandleMisbehaviorReport_MultiplePenaltyReportsForSinglePeer_Sequentially tests the handling of multiple misbehavior reports for a single peer. +// Reports are coming in sequentially. +// The test ensures that each misbehavior report is handled correctly and the penalties are cumulatively applied to the peer in the cache. +func TestHandleMisbehaviorReport_MultiplePenaltyReportsForSinglePeer_Sequentially(t *testing.T) { + cfg := managerCfgFixture(t) + consumer := mocknetwork.NewDisallowListNotificationConsumer(t) + + // create a new MisbehaviorReportManager + var cache alsp.SpamRecordCache + cfg.Opts = []alspmgr.MisbehaviorReportManagerOption{ + alspmgr.WithSpamRecordsCacheFactory(func(logger zerolog.Logger, size uint32, metrics module.HeroCacheMetrics) alsp.SpamRecordCache { + cache = internal.NewSpamRecordCache(size, logger, metrics, model.SpamRecordFactory()) + return cache + }), + } + m, err := alspmgr.NewMisbehaviorReportManager(cfg, consumer) + require.NoError(t, err) + + // start the ALSP manager + ctx, cancel := context.WithCancel(context.Background()) + defer func() { + cancel() + unittest.RequireCloseBefore(t, m.Done(), 100*time.Millisecond, "ALSP manager did not stop") + }() + signalerCtx := irrecoverable.NewMockSignalerContext(t, ctx) + m.Start(signalerCtx) + unittest.RequireCloseBefore(t, m.Ready(), 100*time.Millisecond, "ALSP manager did not start") + + // creates a list of mock misbehavior reports with negative penalty values for a single peer + originId := unittest.IdentifierFixture() + reports := createRandomMisbehaviorReportsForOriginId(t, originId, 5) + + channel := channels.Channel("test-channel") + + // handle the misbehavior reports + totalPenalty := float64(0) + for _, report := range reports { + totalPenalty += report.Penalty() + m.HandleMisbehaviorReport(channel, report) + } + + require.Eventually(t, func() bool { + // check if the misbehavior report has been processed by verifying that the Adjust method was called on the cache + record, ok := cache.Get(originId) + if !ok { + return false + } + require.NotNil(t, record) + + if totalPenalty != record.Penalty { + // all the misbehavior reports should be processed by now, so the penalty should be equal to the total penalty + return false + } + require.False(t, record.DisallowListed) // the peer should not be disallow listed yet. + // with just reporting a few misbehavior reports, the cutoff counter should not be incremented. + require.Equal(t, uint64(0), record.CutoffCounter) + // the decay should be the default decay value. + require.Equal(t, model.SpamRecordFactory()(unittest.IdentifierFixture()).Decay, record.Decay) + + return true + }, 1*time.Second, 10*time.Millisecond, "ALSP manager did not handle the misbehavior report") +} + +// TestHandleMisbehaviorReport_MultiplePenaltyReportsForSinglePeer_Sequential tests the handling of multiple misbehavior reports for a single peer. +// Reports are coming in concurrently. +// The test ensures that each misbehavior report is handled correctly and the penalties are cumulatively applied to the peer in the cache. +func TestHandleMisbehaviorReport_MultiplePenaltyReportsForSinglePeer_Concurrently(t *testing.T) { + cfg := managerCfgFixture(t) + consumer := mocknetwork.NewDisallowListNotificationConsumer(t) + + var cache alsp.SpamRecordCache + cfg.Opts = []alspmgr.MisbehaviorReportManagerOption{ + alspmgr.WithSpamRecordsCacheFactory(func(logger zerolog.Logger, size uint32, metrics module.HeroCacheMetrics) alsp.SpamRecordCache { + cache = internal.NewSpamRecordCache(size, logger, metrics, model.SpamRecordFactory()) + return cache + }), + } + m, err := alspmgr.NewMisbehaviorReportManager(cfg, consumer) + require.NoError(t, err) + + // start the ALSP manager + ctx, cancel := context.WithCancel(context.Background()) + defer func() { + cancel() + unittest.RequireCloseBefore(t, m.Done(), 100*time.Millisecond, "ALSP manager did not stop") + }() + signalerCtx := irrecoverable.NewMockSignalerContext(t, ctx) + m.Start(signalerCtx) + unittest.RequireCloseBefore(t, m.Ready(), 100*time.Millisecond, "ALSP manager did not start") + + // creates a list of mock misbehavior reports with negative penalty values for a single peer + originId := unittest.IdentifierFixture() + reports := createRandomMisbehaviorReportsForOriginId(t, originId, 5) + + channel := channels.Channel("test-channel") + + wg := sync.WaitGroup{} + wg.Add(len(reports)) + // handle the misbehavior reports + totalPenalty := float64(0) + for _, report := range reports { + r := report // capture range variable + totalPenalty += report.Penalty() + go func() { + defer wg.Done() + + m.HandleMisbehaviorReport(channel, r) + }() + } + + unittest.RequireReturnsBefore(t, wg.Wait, 100*time.Millisecond, "not all misbehavior reports have been processed") + + require.Eventually(t, func() bool { + // check if the misbehavior report has been processed by verifying that the Adjust method was called on the cache + record, ok := cache.Get(originId) + if !ok { + return false + } + require.NotNil(t, record) + + if totalPenalty != record.Penalty { + // all the misbehavior reports should be processed by now, so the penalty should be equal to the total penalty + return false + } + require.False(t, record.DisallowListed) // the peer should not be disallow listed yet. + // with just reporting a few misbehavior reports, the cutoff counter should not be incremented. + require.Equal(t, uint64(0), record.CutoffCounter) + // the decay should be the default decay value. + require.Equal(t, model.SpamRecordFactory()(unittest.IdentifierFixture()).Decay, record.Decay) + + return true + }, 1*time.Second, 10*time.Millisecond, "ALSP manager did not handle the misbehavior report") +} + +// TestHandleMisbehaviorReport_SinglePenaltyReportsForMultiplePeers_Sequentially tests the handling of single misbehavior reports for multiple peers. +// Reports are coming in sequentially. +// The test ensures that each misbehavior report is handled correctly and the penalties are applied to the corresponding peers in the cache. +func TestHandleMisbehaviorReport_SinglePenaltyReportsForMultiplePeers_Sequentially(t *testing.T) { + cfg := managerCfgFixture(t) + consumer := mocknetwork.NewDisallowListNotificationConsumer(t) + + var cache alsp.SpamRecordCache + cfg.Opts = []alspmgr.MisbehaviorReportManagerOption{ + alspmgr.WithSpamRecordsCacheFactory(func(logger zerolog.Logger, size uint32, metrics module.HeroCacheMetrics) alsp.SpamRecordCache { + cache = internal.NewSpamRecordCache(size, logger, metrics, model.SpamRecordFactory()) + return cache + }), + } + m, err := alspmgr.NewMisbehaviorReportManager(cfg, consumer) + require.NoError(t, err) + + // start the ALSP manager + ctx, cancel := context.WithCancel(context.Background()) + defer func() { + cancel() + unittest.RequireCloseBefore(t, m.Done(), 100*time.Millisecond, "ALSP manager did not stop") + }() + signalerCtx := irrecoverable.NewMockSignalerContext(t, ctx) + m.Start(signalerCtx) + unittest.RequireCloseBefore(t, m.Ready(), 100*time.Millisecond, "ALSP manager did not start") + + // creates a list of single misbehavior reports for multiple peers (10 peers) + numPeers := 10 + reports := createRandomMisbehaviorReports(t, numPeers) + + channel := channels.Channel("test-channel") + + // handle the misbehavior reports + for _, report := range reports { + m.HandleMisbehaviorReport(channel, report) + } + + // check if the misbehavior reports have been processed by verifying that the Adjust method was called on the cache + require.Eventually(t, func() bool { + for _, report := range reports { + originID := report.OriginId() + record, ok := cache.Get(originID) + if !ok { + return false + } + require.NotNil(t, record) + require.False(t, record.DisallowListed) // the peer should not be disallow listed yet. + require.Equal(t, report.Penalty(), record.Penalty) + // with just reporting a single misbehavior report, the cutoff counter should not be incremented. + require.Equal(t, uint64(0), record.CutoffCounter) + // the decay should be the default decay value. + require.Equal(t, model.SpamRecordFactory()(unittest.IdentifierFixture()).Decay, record.Decay) + } + + return true + }, 1*time.Second, 10*time.Millisecond, "ALSP manager did not handle the misbehavior report") + +} + +// TestHandleMisbehaviorReport_SinglePenaltyReportsForMultiplePeers_Concurrently tests the handling of single misbehavior reports for multiple peers. +// Reports are coming in concurrently. +// The test ensures that each misbehavior report is handled correctly and the penalties are applied to the corresponding peers in the cache. +func TestHandleMisbehaviorReport_SinglePenaltyReportsForMultiplePeers_Concurrently(t *testing.T) { + cfg := managerCfgFixture(t) + consumer := mocknetwork.NewDisallowListNotificationConsumer(t) + + var cache alsp.SpamRecordCache + cfg.Opts = []alspmgr.MisbehaviorReportManagerOption{ + alspmgr.WithSpamRecordsCacheFactory(func(logger zerolog.Logger, size uint32, metrics module.HeroCacheMetrics) alsp.SpamRecordCache { + cache = internal.NewSpamRecordCache(size, logger, metrics, model.SpamRecordFactory()) + return cache + }), + } + m, err := alspmgr.NewMisbehaviorReportManager(cfg, consumer) + require.NoError(t, err) + + // start the ALSP manager + ctx, cancel := context.WithCancel(context.Background()) + defer func() { + cancel() + unittest.RequireCloseBefore(t, m.Done(), 100*time.Millisecond, "ALSP manager did not stop") + }() + signalerCtx := irrecoverable.NewMockSignalerContext(t, ctx) + m.Start(signalerCtx) + unittest.RequireCloseBefore(t, m.Ready(), 100*time.Millisecond, "ALSP manager did not start") + + // creates a list of single misbehavior reports for multiple peers (10 peers) + numPeers := 10 + reports := createRandomMisbehaviorReports(t, numPeers) + + channel := channels.Channel("test-channel") + + wg := sync.WaitGroup{} + wg.Add(len(reports)) + // handle the misbehavior reports + totalPenalty := float64(0) + for _, report := range reports { + r := report // capture range variable + totalPenalty += report.Penalty() + go func() { + defer wg.Done() + + m.HandleMisbehaviorReport(channel, r) + }() + } + + unittest.RequireReturnsBefore(t, wg.Wait, 100*time.Millisecond, "not all misbehavior reports have been processed") + + // check if the misbehavior reports have been processed by verifying that the Adjust method was called on the cache + require.Eventually(t, func() bool { + for _, report := range reports { + originID := report.OriginId() + record, ok := cache.Get(originID) + if !ok { + return false + } + require.NotNil(t, record) + require.False(t, record.DisallowListed) // the peer should not be disallow listed yet. + require.Equal(t, report.Penalty(), record.Penalty) + // with just reporting a single misbehavior report, the cutoff counter should not be incremented. + require.Equal(t, uint64(0), record.CutoffCounter) + // the decay should be the default decay value. + require.Equal(t, model.SpamRecordFactory()(unittest.IdentifierFixture()).Decay, record.Decay) + } + + return true + }, 1*time.Second, 10*time.Millisecond, "ALSP manager did not handle the misbehavior report") +} + +// TestHandleMisbehaviorReport_MultiplePenaltyReportsForMultiplePeers_Sequentially tests the handling of multiple misbehavior reports for multiple peers. +// Reports are coming in sequentially. +// The test ensures that each misbehavior report is handled correctly and the penalties are cumulatively applied to the corresponding peers in the cache. +func TestHandleMisbehaviorReport_MultiplePenaltyReportsForMultiplePeers_Sequentially(t *testing.T) { + cfg := managerCfgFixture(t) + consumer := mocknetwork.NewDisallowListNotificationConsumer(t) + + var cache alsp.SpamRecordCache + cfg.Opts = []alspmgr.MisbehaviorReportManagerOption{ + alspmgr.WithSpamRecordsCacheFactory(func(logger zerolog.Logger, size uint32, metrics module.HeroCacheMetrics) alsp.SpamRecordCache { + cache = internal.NewSpamRecordCache(size, logger, metrics, model.SpamRecordFactory()) + return cache + }), + } + m, err := alspmgr.NewMisbehaviorReportManager(cfg, consumer) + require.NoError(t, err) + + // start the ALSP manager + ctx, cancel := context.WithCancel(context.Background()) + defer func() { + cancel() + unittest.RequireCloseBefore(t, m.Done(), 100*time.Millisecond, "ALSP manager did not stop") + }() + signalerCtx := irrecoverable.NewMockSignalerContext(t, ctx) + m.Start(signalerCtx) + unittest.RequireCloseBefore(t, m.Ready(), 100*time.Millisecond, "ALSP manager did not start") + + // create a map of origin IDs to their respective misbehavior reports (10 peers, 5 reports each) + numPeers := 10 + numReportsPerPeer := 5 + peersReports := make(map[flow.Identifier][]network.MisbehaviorReport) + + for i := 0; i < numPeers; i++ { + originID := unittest.IdentifierFixture() + reports := createRandomMisbehaviorReportsForOriginId(t, originID, numReportsPerPeer) + peersReports[originID] = reports + } + + channel := channels.Channel("test-channel") + + wg := sync.WaitGroup{} + // handle the misbehavior reports + for _, reports := range peersReports { + wg.Add(len(reports)) + for _, report := range reports { + r := report // capture range variable + go func() { + defer wg.Done() + + m.HandleMisbehaviorReport(channel, r) + }() + } + } + + unittest.RequireReturnsBefore(t, wg.Wait, 100*time.Millisecond, "not all misbehavior reports have been processed") + + // check if the misbehavior reports have been processed by verifying that the Adjust method was called on the cache + require.Eventually(t, func() bool { + for originID, reports := range peersReports { + totalPenalty := float64(0) + for _, report := range reports { + totalPenalty += report.Penalty() + } + + record, ok := cache.Get(originID) + if !ok { + return false + } + require.NotNil(t, record) + require.False(t, record.DisallowListed) // the peer should not be disallow listed yet. + require.Equal(t, totalPenalty, record.Penalty) + // with just reporting a single misbehavior report, the cutoff counter should not be incremented. + require.Equal(t, uint64(0), record.CutoffCounter) + // the decay should be the default decay value. + require.Equal(t, model.SpamRecordFactory()(unittest.IdentifierFixture()).Decay, record.Decay) + } + + return true + }, 2*time.Second, 10*time.Millisecond, "ALSP manager did not handle the misbehavior report") +} + +// TestHandleMisbehaviorReport_MultiplePenaltyReportsForMultiplePeers_Sequentially tests the handling of multiple misbehavior reports for multiple peers. +// Reports are coming in concurrently. +// The test ensures that each misbehavior report is handled correctly and the penalties are cumulatively applied to the corresponding peers in the cache. +func TestHandleMisbehaviorReport_MultiplePenaltyReportsForMultiplePeers_Concurrently(t *testing.T) { + cfg := managerCfgFixture(t) + consumer := mocknetwork.NewDisallowListNotificationConsumer(t) + + var cache alsp.SpamRecordCache + cfg.Opts = []alspmgr.MisbehaviorReportManagerOption{ + alspmgr.WithSpamRecordsCacheFactory(func(logger zerolog.Logger, size uint32, metrics module.HeroCacheMetrics) alsp.SpamRecordCache { + cache = internal.NewSpamRecordCache(size, logger, metrics, model.SpamRecordFactory()) + return cache + }), + } + m, err := alspmgr.NewMisbehaviorReportManager(cfg, consumer) + require.NoError(t, err) + + // start the ALSP manager + ctx, cancel := context.WithCancel(context.Background()) + defer func() { + cancel() + unittest.RequireCloseBefore(t, m.Done(), 100*time.Millisecond, "ALSP manager did not stop") + }() + signalerCtx := irrecoverable.NewMockSignalerContext(t, ctx) + m.Start(signalerCtx) + unittest.RequireCloseBefore(t, m.Ready(), 100*time.Millisecond, "ALSP manager did not start") + + // create a map of origin IDs to their respective misbehavior reports (10 peers, 5 reports each) + numPeers := 10 + numReportsPerPeer := 5 + peersReports := make(map[flow.Identifier][]network.MisbehaviorReport) + + for i := 0; i < numPeers; i++ { + originID := unittest.IdentifierFixture() + reports := createRandomMisbehaviorReportsForOriginId(t, originID, numReportsPerPeer) + peersReports[originID] = reports + } + + channel := channels.Channel("test-channel") + + // handle the misbehavior reports + for _, reports := range peersReports { + for _, report := range reports { + m.HandleMisbehaviorReport(channel, report) + } + } + + // check if the misbehavior reports have been processed by verifying that the Adjust method was called on the cache + require.Eventually(t, func() bool { + for originID, reports := range peersReports { + totalPenalty := float64(0) + for _, report := range reports { + totalPenalty += report.Penalty() + } + + record, ok := cache.Get(originID) + if !ok { + return false + } + require.NotNil(t, record) + require.False(t, record.DisallowListed) // the peer should not be disallow listed yet. + require.Equal(t, totalPenalty, record.Penalty) + // with just reporting a single misbehavior report, the cutoff counter should not be incremented. + require.Equal(t, uint64(0), record.CutoffCounter) + // the decay should be the default decay value. + require.Equal(t, model.SpamRecordFactory()(unittest.IdentifierFixture()).Decay, record.Decay) + } + + return true + }, 1*time.Second, 10*time.Millisecond, "ALSP manager did not handle the misbehavior report") +} + +// TestHandleMisbehaviorReport_DuplicateReportsForSinglePeer_Concurrently tests the handling of duplicate misbehavior reports for a single peer. +// Reports are coming in concurrently. +// The test ensures that each misbehavior report is handled correctly and the penalties are cumulatively applied to the peer in the cache, in +// other words, the duplicate reports are not ignored. This is important because the misbehavior reports are assumed each uniquely reporting +// a different misbehavior even though they are coming with the same description. This is similar to the traffic tickets, where each ticket +// is uniquely identifying a traffic violation, even though the description of the violation is the same. +func TestHandleMisbehaviorReport_DuplicateReportsForSinglePeer_Concurrently(t *testing.T) { + cfg := managerCfgFixture(t) + consumer := mocknetwork.NewDisallowListNotificationConsumer(t) + + var cache alsp.SpamRecordCache + cfg.Opts = []alspmgr.MisbehaviorReportManagerOption{ + alspmgr.WithSpamRecordsCacheFactory(func(logger zerolog.Logger, size uint32, metrics module.HeroCacheMetrics) alsp.SpamRecordCache { + cache = internal.NewSpamRecordCache(size, logger, metrics, model.SpamRecordFactory()) + return cache + }), + } + m, err := alspmgr.NewMisbehaviorReportManager(cfg, consumer) + require.NoError(t, err) + + // start the ALSP manager + ctx, cancel := context.WithCancel(context.Background()) + defer func() { + cancel() + unittest.RequireCloseBefore(t, m.Done(), 100*time.Millisecond, "ALSP manager did not stop") + }() + signalerCtx := irrecoverable.NewMockSignalerContext(t, ctx) + m.Start(signalerCtx) + unittest.RequireCloseBefore(t, m.Ready(), 100*time.Millisecond, "ALSP manager did not start") + + // creates a single misbehavior report + originId := unittest.IdentifierFixture() + report := misbehaviorReportFixture(t, originId) + + channel := channels.Channel("test-channel") + + times := 100 // number of times the duplicate misbehavior report is reported concurrently + wg := sync.WaitGroup{} + wg.Add(times) + + // concurrently reports the same misbehavior report twice + for i := 0; i < times; i++ { + go func() { + defer wg.Done() + + m.HandleMisbehaviorReport(channel, report) + }() + } + unittest.RequireReturnsBefore(t, wg.Wait, 100*time.Millisecond, "not all misbehavior reports have been processed") + + require.Eventually(t, func() bool { + // check if the misbehavior reports have been processed by verifying that the Adjust method was called on the cache + record, ok := cache.Get(originId) + if !ok { + return false + } + require.NotNil(t, record) + + // eventually, the penalty should be the accumulated penalty of all the duplicate misbehavior reports. + if record.Penalty != report.Penalty()*float64(times) { + return false + } + require.False(t, record.DisallowListed) // the peer should not be disallow listed yet. + // with just reporting a few misbehavior reports, the cutoff counter should not be incremented. + require.Equal(t, uint64(0), record.CutoffCounter) + // the decay should be the default decay value. + require.Equal(t, model.SpamRecordFactory()(unittest.IdentifierFixture()).Decay, record.Decay) + + return true + }, 1*time.Second, 10*time.Millisecond, "ALSP manager did not handle the misbehavior report") +} + +// TestDecayMisbehaviorPenalty_SingleHeartbeat tests the decay of the misbehavior penalty. The test ensures that the misbehavior penalty +// is decayed after a single heartbeat. The test guarantees waiting for at least one heartbeat by waiting for the first decay to happen. +func TestDecayMisbehaviorPenalty_SingleHeartbeat(t *testing.T) { + cfg := managerCfgFixture(t) + consumer := mocknetwork.NewDisallowListNotificationConsumer(t) + + var cache alsp.SpamRecordCache + cfg.Opts = []alspmgr.MisbehaviorReportManagerOption{ + alspmgr.WithSpamRecordsCacheFactory(func(logger zerolog.Logger, size uint32, metrics module.HeroCacheMetrics) alsp.SpamRecordCache { + cache = internal.NewSpamRecordCache(size, logger, metrics, model.SpamRecordFactory()) + return cache + }), + } + m, err := alspmgr.NewMisbehaviorReportManager(cfg, consumer) + require.NoError(t, err) + + // start the ALSP manager + ctx, cancel := context.WithCancel(context.Background()) + defer func() { + cancel() + unittest.RequireCloseBefore(t, m.Done(), 100*time.Millisecond, "ALSP manager did not stop") + }() + signalerCtx := irrecoverable.NewMockSignalerContext(t, ctx) + m.Start(signalerCtx) + unittest.RequireCloseBefore(t, m.Ready(), 100*time.Millisecond, "ALSP manager did not start") + + // creates a single misbehavior report + originId := unittest.IdentifierFixture() + report := misbehaviorReportFixtureWithDefaultPenalty(t, originId) + require.Less(t, report.Penalty(), float64(0)) // ensure the penalty is negative + + channel := channels.Channel("test-channel") + + // number of times the duplicate misbehavior report is reported concurrently + times := 10 + wg := sync.WaitGroup{} + wg.Add(times) + + // concurrently reports the same misbehavior report twice + for i := 0; i < times; i++ { + go func() { + defer wg.Done() + + m.HandleMisbehaviorReport(channel, report) + }() + } + unittest.RequireReturnsBefore(t, wg.Wait, 100*time.Millisecond, "not all misbehavior reports have been processed") + + // phase-1: eventually all the misbehavior reports should be processed. + penaltyBeforeDecay := float64(0) + require.Eventually(t, func() bool { + // check if the misbehavior reports have been processed by verifying that the Adjust method was called on the cache + record, ok := cache.Get(originId) + if !ok { + return false + } + require.NotNil(t, record) + + // eventually, the penalty should be the accumulated penalty of all the duplicate misbehavior reports. + if record.Penalty != report.Penalty()*float64(times) { + return false + } + require.False(t, record.DisallowListed) // the peer should not be disallow listed yet. + // with just reporting a few misbehavior reports, the cutoff counter should not be incremented. + require.Equal(t, uint64(0), record.CutoffCounter) + // the decay should be the default decay value. + require.Equal(t, model.SpamRecordFactory()(unittest.IdentifierFixture()).Decay, record.Decay) + + penaltyBeforeDecay = record.Penalty + return true + }, 1*time.Second, 10*time.Millisecond, "ALSP manager did not handle the misbehavior report") + + // phase-2: wait enough for at least one heartbeat to be processed. + time.Sleep(1 * time.Second) + + // phase-3: check if the penalty was decayed for at least one heartbeat. + record, ok := cache.Get(originId) + require.True(t, ok) // the record should be in the cache + require.NotNil(t, record) + + // with at least a single heartbeat, the penalty should be greater than the penalty before the decay. + require.Greater(t, record.Penalty, penaltyBeforeDecay) + // we waited for at most one heartbeat, so the decayed penalty should be still less than the value after 2 heartbeats. + require.Less(t, record.Penalty, penaltyBeforeDecay+2*record.Decay) + // with just reporting a few misbehavior reports, the cutoff counter should not be incremented. + require.Equal(t, uint64(0), record.CutoffCounter) + // the decay should be the default decay value. + require.Equal(t, model.SpamRecordFactory()(unittest.IdentifierFixture()).Decay, record.Decay) +} + +// TestDecayMisbehaviorPenalty_MultipleHeartbeat tests the decay of the misbehavior penalty under multiple heartbeats. +// The test ensures that the misbehavior penalty is decayed with a linear progression within multiple heartbeats. +func TestDecayMisbehaviorPenalty_MultipleHeartbeats(t *testing.T) { + cfg := managerCfgFixture(t) + consumer := mocknetwork.NewDisallowListNotificationConsumer(t) + + var cache alsp.SpamRecordCache + cfg.Opts = []alspmgr.MisbehaviorReportManagerOption{ + alspmgr.WithSpamRecordsCacheFactory(func(logger zerolog.Logger, size uint32, metrics module.HeroCacheMetrics) alsp.SpamRecordCache { + cache = internal.NewSpamRecordCache(size, logger, metrics, model.SpamRecordFactory()) + return cache + }), + } + m, err := alspmgr.NewMisbehaviorReportManager(cfg, consumer) + require.NoError(t, err) + + // start the ALSP manager + ctx, cancel := context.WithCancel(context.Background()) + defer func() { + cancel() + unittest.RequireCloseBefore(t, m.Done(), 100*time.Millisecond, "ALSP manager did not stop") + }() + signalerCtx := irrecoverable.NewMockSignalerContext(t, ctx) + m.Start(signalerCtx) + unittest.RequireCloseBefore(t, m.Ready(), 100*time.Millisecond, "ALSP manager did not start") + + // creates a single misbehavior report + originId := unittest.IdentifierFixture() + report := misbehaviorReportFixtureWithDefaultPenalty(t, originId) + require.Less(t, report.Penalty(), float64(0)) // ensure the penalty is negative + + channel := channels.Channel("test-channel") + + // number of times the duplicate misbehavior report is reported concurrently + times := 10 + wg := sync.WaitGroup{} + wg.Add(times) + + // concurrently reports the same misbehavior report twice + for i := 0; i < times; i++ { + go func() { + defer wg.Done() + + m.HandleMisbehaviorReport(channel, report) + }() + } + unittest.RequireReturnsBefore(t, wg.Wait, 100*time.Millisecond, "not all misbehavior reports have been processed") + + // phase-1: eventually all the misbehavior reports should be processed. + penaltyBeforeDecay := float64(0) + require.Eventually(t, func() bool { + // check if the misbehavior reports have been processed by verifying that the Adjust method was called on the cache + record, ok := cache.Get(originId) + if !ok { + return false + } + require.NotNil(t, record) + + // eventually, the penalty should be the accumulated penalty of all the duplicate misbehavior reports. + if record.Penalty != report.Penalty()*float64(times) { + return false + } + // with just reporting a few misbehavior reports, the cutoff counter should not be incremented. + require.Equal(t, uint64(0), record.CutoffCounter) + // the decay should be the default decay value. + require.Equal(t, model.SpamRecordFactory()(unittest.IdentifierFixture()).Decay, record.Decay) + + penaltyBeforeDecay = record.Penalty + return true + }, 1*time.Second, 10*time.Millisecond, "ALSP manager did not handle the misbehavior report") + + // phase-2: wait for 3 heartbeats to be processed. + time.Sleep(3 * time.Second) + + // phase-3: check if the penalty was decayed in a linear progression. + record, ok := cache.Get(originId) + require.True(t, ok) // the record should be in the cache + require.NotNil(t, record) + + // with 3 heartbeats processed, the penalty should be greater than the penalty before the decay. + require.Greater(t, record.Penalty, penaltyBeforeDecay) + // with 3 heartbeats processed, the decayed penalty should be less than the value after 4 heartbeats. + require.Less(t, record.Penalty, penaltyBeforeDecay+4*record.Decay) + require.False(t, record.DisallowListed) // the peer should not be disallow listed yet. + // with just reporting a few misbehavior reports, the cutoff counter should not be incremented. + require.Equal(t, uint64(0), record.CutoffCounter) + // the decay should be the default decay value. + require.Equal(t, model.SpamRecordFactory()(unittest.IdentifierFixture()).Decay, record.Decay) +} + +// TestDecayMisbehaviorPenalty_MultipleHeartbeat tests the decay of the misbehavior penalty under multiple heartbeats. +// The test ensures that the misbehavior penalty is decayed with a linear progression within multiple heartbeats. +func TestDecayMisbehaviorPenalty_DecayToZero(t *testing.T) { + cfg := managerCfgFixture(t) + consumer := mocknetwork.NewDisallowListNotificationConsumer(t) + + var cache alsp.SpamRecordCache + cfg.Opts = []alspmgr.MisbehaviorReportManagerOption{ + alspmgr.WithSpamRecordsCacheFactory(func(logger zerolog.Logger, size uint32, metrics module.HeroCacheMetrics) alsp.SpamRecordCache { + cache = internal.NewSpamRecordCache(size, logger, metrics, model.SpamRecordFactory()) + return cache + }), + } + m, err := alspmgr.NewMisbehaviorReportManager(cfg, consumer) + require.NoError(t, err) + + // start the ALSP manager + ctx, cancel := context.WithCancel(context.Background()) + defer func() { + cancel() + unittest.RequireCloseBefore(t, m.Done(), 100*time.Millisecond, "ALSP manager did not stop") + }() + signalerCtx := irrecoverable.NewMockSignalerContext(t, ctx) + m.Start(signalerCtx) + unittest.RequireCloseBefore(t, m.Ready(), 100*time.Millisecond, "ALSP manager did not start") + + // creates a single misbehavior report + originId := unittest.IdentifierFixture() + report := misbehaviorReportFixture(t, originId) // penalties are between -1 and -10 + require.Less(t, report.Penalty(), float64(0)) // ensure the penalty is negative + + channel := channels.Channel("test-channel") + + // number of times the duplicate misbehavior report is reported concurrently + times := 10 + wg := sync.WaitGroup{} + wg.Add(times) + + // concurrently reports the same misbehavior report twice + for i := 0; i < times; i++ { + go func() { + defer wg.Done() + + m.HandleMisbehaviorReport(channel, report) + }() + } + unittest.RequireReturnsBefore(t, wg.Wait, 100*time.Millisecond, "not all misbehavior reports have been processed") + + // phase-1: eventually all the misbehavior reports should be processed. + require.Eventually(t, func() bool { + // check if the misbehavior reports have been processed by verifying that the Adjust method was called on the cache + record, ok := cache.Get(originId) + if !ok { + return false + } + require.NotNil(t, record) + + // eventually, the penalty should be the accumulated penalty of all the duplicate misbehavior reports. + if record.Penalty != report.Penalty()*float64(times) { + return false + } + // with just reporting a few misbehavior reports, the cutoff counter should not be incremented. + require.Equal(t, uint64(0), record.CutoffCounter) + // the decay should be the default decay value. + require.Equal(t, model.SpamRecordFactory()(unittest.IdentifierFixture()).Decay, record.Decay) + + return true + }, 1*time.Second, 10*time.Millisecond, "ALSP manager did not handle the misbehavior report") + + // phase-2: default decay speed is 1000 and with 10 penalties in range of [-1, -10], the penalty should be decayed to zero in + // a single heartbeat. + time.Sleep(1 * time.Second) + + // phase-3: check if the penalty was decayed to zero. + record, ok := cache.Get(originId) + require.True(t, ok) // the record should be in the cache + require.NotNil(t, record) + + require.False(t, record.DisallowListed) // the peer should not be disallow listed. + // with a single heartbeat and decay speed of 1000, the penalty should be decayed to zero. + require.Equal(t, float64(0), record.Penalty) + // the decay should be the default decay value. + require.Equal(t, model.SpamRecordFactory()(unittest.IdentifierFixture()).Decay, record.Decay) +} + +// TestDecayMisbehaviorPenalty_DecayToZero_AllowListing tests that when the misbehavior penalty of an already disallow-listed +// peer is decayed to zero, the peer is allow-listed back in the network, and its spam record cache is updated accordingly. +func TestDecayMisbehaviorPenalty_DecayToZero_AllowListing(t *testing.T) { + cfg := managerCfgFixture(t) + consumer := mocknetwork.NewDisallowListNotificationConsumer(t) + + var cache alsp.SpamRecordCache + cfg.Opts = []alspmgr.MisbehaviorReportManagerOption{ + alspmgr.WithSpamRecordsCacheFactory(func(logger zerolog.Logger, size uint32, metrics module.HeroCacheMetrics) alsp.SpamRecordCache { + cache = internal.NewSpamRecordCache(size, logger, metrics, model.SpamRecordFactory()) + return cache + }), + } + m, err := alspmgr.NewMisbehaviorReportManager(cfg, consumer) + require.NoError(t, err) + + // start the ALSP manager + ctx, cancel := context.WithCancel(context.Background()) + defer func() { + cancel() + unittest.RequireCloseBefore(t, m.Done(), 100*time.Millisecond, "ALSP manager did not stop") + }() + signalerCtx := irrecoverable.NewMockSignalerContext(t, ctx) + m.Start(signalerCtx) + unittest.RequireCloseBefore(t, m.Ready(), 100*time.Millisecond, "ALSP manager did not start") + + // simulates a disallow-listed peer in cache. + originId := unittest.IdentifierFixture() + penalty, err := cache.AdjustWithInit(originId, func(record *model.ProtocolSpamRecord) (*model.ProtocolSpamRecord, error) { + record.Penalty = -10 // set the penalty to -10 to simulate that the penalty has already been decayed for a while. + record.CutoffCounter = 1 + record.DisallowListed = true + record.OriginId = originId + record.Decay = model.SpamRecordFactory()(unittest.IdentifierFixture()).Decay + return record, nil + }) + require.NoError(t, err) + require.Equal(t, float64(-10), penalty) + + // sanity check + record, ok := cache.Get(originId) + require.True(t, ok) // the record should be in the cache + require.NotNil(t, record) + require.Equal(t, float64(-10), record.Penalty) + require.True(t, record.DisallowListed) + require.Equal(t, uint64(1), record.CutoffCounter) + require.Equal(t, model.SpamRecordFactory()(unittest.IdentifierFixture()).Decay, record.Decay) + + // eventually, we expect the ALSP manager to emit an allow list notification to the network layer when the penalty is decayed to zero. + consumer.On("OnAllowListNotification", &network.AllowListingUpdate{ + FlowIds: flow.IdentifierList{originId}, + Cause: network.DisallowListedCauseAlsp, + }).Return(nil).Once() + + // wait for at most two heartbeats; default decay speed is 1000 and with a penalty of -10, the penalty should be decayed to zero in a single heartbeat. + require.Eventually(t, func() bool { + record, ok = cache.Get(originId) + if !ok { + t.Log("spam record not found in cache") + return false + } + if record.DisallowListed { + t.Logf("peer %s is still disallow-listed", originId) + return false // the peer should not be allow-listed yet. + } + if record.Penalty != float64(0) { + t.Log("penalty is not decayed to zero") + return false // the penalty should be decayed to zero. + } + if record.CutoffCounter != 1 { + t.Logf("cutoff counter is %d, expected 1", record.CutoffCounter) + return false // the cutoff counter should be incremented. + } + if record.Decay != model.SpamRecordFactory()(unittest.IdentifierFixture()).Decay { + t.Logf("decay is %f, expected %f", record.Decay, model.SpamRecordFactory()(unittest.IdentifierFixture()).Decay) + return false // the decay should be the default decay value. + } + + return true + + }, 2*time.Second, 10*time.Millisecond, "penalty was not decayed to zero") +} + +// TestDisallowListNotification tests the emission of the allow list notification to the network layer when the misbehavior +// penalty of a node is dropped below the disallow-listing threshold. The test ensures that the disallow list notification is +// emitted to the network layer when the misbehavior penalty is dropped below the disallow-listing threshold and that the +// cutoff counter of the spam record for the misbehaving node is incremented indicating that the node is disallow-listed once. +func TestDisallowListNotification(t *testing.T) { + cfg := managerCfgFixture(t) + consumer := mocknetwork.NewDisallowListNotificationConsumer(t) + + var cache alsp.SpamRecordCache + cfg.Opts = []alspmgr.MisbehaviorReportManagerOption{ + alspmgr.WithSpamRecordsCacheFactory(func(logger zerolog.Logger, size uint32, metrics module.HeroCacheMetrics) alsp.SpamRecordCache { + cache = internal.NewSpamRecordCache(size, logger, metrics, model.SpamRecordFactory()) + return cache + }), + } + m, err := alspmgr.NewMisbehaviorReportManager(cfg, consumer) + require.NoError(t, err) + + // start the ALSP manager + ctx, cancel := context.WithCancel(context.Background()) + defer func() { + cancel() + unittest.RequireCloseBefore(t, m.Done(), 100*time.Millisecond, "ALSP manager did not stop") + }() + signalerCtx := irrecoverable.NewMockSignalerContext(t, ctx) + m.Start(signalerCtx) + unittest.RequireCloseBefore(t, m.Ready(), 100*time.Millisecond, "ALSP manager did not start") + + // creates a single misbehavior report + originId := unittest.IdentifierFixture() + report := misbehaviorReportFixtureWithDefaultPenalty(t, originId) + require.Less(t, report.Penalty(), float64(0)) // ensure the penalty is negative + + channel := channels.Channel("test-channel") + + // reporting the same misbehavior 120 times, should result in a single disallow list notification, since each + // misbehavior report is reported with the same penalty 0.01 * diallowlisting-threshold. We go over the threshold + // to ensure that the disallow list notification is emitted only once. + times := 120 + wg := sync.WaitGroup{} + wg.Add(times) + + // concurrently reports the same misbehavior report twice + for i := 0; i < times; i++ { + go func() { + defer wg.Done() + + m.HandleMisbehaviorReport(channel, report) + }() + } + + // at this point, we expect a single disallow list notification to be emitted to the network layer when all the misbehavior + // reports are processed by the ALSP manager (the notification is emitted when at the next heartbeat). + consumer.On("OnDisallowListNotification", &network.DisallowListingUpdate{ + FlowIds: flow.IdentifierList{report.OriginId()}, + Cause: network.DisallowListedCauseAlsp, + }).Return().Once() + + unittest.RequireReturnsBefore(t, wg.Wait, 100*time.Millisecond, "not all misbehavior reports have been processed") + + require.Eventually(t, func() bool { + // check if the misbehavior reports have been processed by verifying that the Adjust method was called on the cache + record, ok := cache.Get(originId) + if !ok { + return false + } + require.NotNil(t, record) + + // eventually, the penalty should be the accumulated penalty of all the duplicate misbehavior reports (with the default decay). + // the decay is added to the penalty as we allow for a single heartbeat before the disallow list notification is emitted. + if record.Penalty != report.Penalty()*float64(times)+record.Decay { + return false + } + require.True(t, record.DisallowListed) // the peer should be disallow-listed. + // cutoff counter should be incremented since the penalty is above the disallow-listing threshold. + require.Equal(t, uint64(1), record.CutoffCounter) + // the decay should be the default decay value. + require.Equal(t, model.SpamRecordFactory()(unittest.IdentifierFixture()).Decay, record.Decay) + + return true + }, 2*time.Second, 10*time.Millisecond, "ALSP manager did not handle the misbehavior report") +} + +// //////////////////////////// TEST HELPERS /////////////////////////////////////////////////////////////////////////////// +// The following functions are helpers for the tests. It wasn't feasible to put them in a helper file in the alspmgr_test +// package because that would break encapsulation of the ALSP manager and require making some fields exportable. +// Putting them in alspmgr package would cause a circular import cycle. Therefore, they are put in the internal test package here. + +// createRandomMisbehaviorReportsForOriginId creates a slice of random misbehavior reports for a single origin id. +// Args: +// - t: the testing.T instance +// - originID: the origin id of the misbehavior reports +// - numReports: the number of misbehavior reports to create +// Returns: +// - []network.MisbehaviorReport: the slice of misbehavior reports +// Note: the penalty of the misbehavior reports is randomly chosen between -1 and -10. +func createRandomMisbehaviorReportsForOriginId(t *testing.T, originID flow.Identifier, numReports int) []network.MisbehaviorReport { + reports := make([]network.MisbehaviorReport, numReports) + + for i := 0; i < numReports; i++ { + reports[i] = misbehaviorReportFixture(t, originID) + } + + return reports +} + +// createRandomMisbehaviorReports creates a slice of random misbehavior reports. +// Args: +// - t: the testing.T instance +// - numReports: the number of misbehavior reports to create +// Returns: +// - []network.MisbehaviorReport: the slice of misbehavior reports +// Note: the penalty of the misbehavior reports is randomly chosen between -1 and -10. +func createRandomMisbehaviorReports(t *testing.T, numReports int) []network.MisbehaviorReport { + reports := make([]network.MisbehaviorReport, numReports) + + for i := 0; i < numReports; i++ { + reports[i] = misbehaviorReportFixture(t, unittest.IdentifierFixture()) + } + + return reports +} + +// managerCfgFixture creates a new MisbehaviorReportManagerConfig with default values for testing. +func managerCfgFixture(t *testing.T) *alspmgr.MisbehaviorReportManagerConfig { + c, err := config.DefaultConfig() + require.NoError(t, err) + return &alspmgr.MisbehaviorReportManagerConfig{ + Logger: unittest.Logger(), + SpamRecordCacheSize: c.NetworkConfig.AlspConfig.SpamRecordCacheSize, + SpamReportQueueSize: c.NetworkConfig.AlspConfig.SpamReportQueueSize, + HeartBeatInterval: c.NetworkConfig.AlspConfig.HearBeatInterval, + AlspMetrics: metrics.NewNoopCollector(), + HeroCacheMetricsFactory: metrics.NewNoopHeroCacheMetricsFactory(), + } +} + +// misbehaviorReportFixture creates a mock misbehavior report for a single origin id. +// Args: +// - t: the testing.T instance +// - originID: the origin id of the misbehavior report +// Returns: +// - network.MisbehaviorReport: the misbehavior report +// Note: the penalty of the misbehavior report is randomly chosen between -1 and -10. +func misbehaviorReportFixture(t *testing.T, originID flow.Identifier) network.MisbehaviorReport { + return misbehaviorReportFixtureWithPenalty(t, originID, math.Min(-1, float64(-1-rand.Intn(10)))) +} + +func misbehaviorReportFixtureWithDefaultPenalty(t *testing.T, originID flow.Identifier) network.MisbehaviorReport { + return misbehaviorReportFixtureWithPenalty(t, originID, model.DefaultPenaltyValue) +} + +func misbehaviorReportFixtureWithPenalty(t *testing.T, originID flow.Identifier, penalty float64) network.MisbehaviorReport { + report := mocknetwork.NewMisbehaviorReport(t) + report.On("OriginId").Return(originID) + report.On("Reason").Return(alsp.AllMisbehaviorTypes()[rand.Intn(len(alsp.AllMisbehaviorTypes()))]) + report.On("Penalty").Return(penalty) + + return report +} diff --git a/network/alsp/manager_test.go b/network/alsp/manager_test.go deleted file mode 100644 index c22508d5059..00000000000 --- a/network/alsp/manager_test.go +++ /dev/null @@ -1,177 +0,0 @@ -package alsp_test - -import ( - "context" - "math/rand" - "sync" - "testing" - "time" - - "github.com/stretchr/testify/mock" - "github.com/stretchr/testify/require" - - "github.com/onflow/flow-go/module/irrecoverable" - "github.com/onflow/flow-go/module/metrics" - mockmodule "github.com/onflow/flow-go/module/mock" - "github.com/onflow/flow-go/network" - "github.com/onflow/flow-go/network/alsp" - "github.com/onflow/flow-go/network/channels" - "github.com/onflow/flow-go/network/internal/testutils" - "github.com/onflow/flow-go/network/mocknetwork" - "github.com/onflow/flow-go/network/p2p" - "github.com/onflow/flow-go/network/p2p/conduit" - "github.com/onflow/flow-go/utils/unittest" -) - -// TestHandleReportedMisbehavior tests the handling of reported misbehavior by the network. -// -// The test sets up a mock MisbehaviorReportManager and a conduitFactory with this manager. -// It generates a single node network with the conduitFactory and starts it. -// It then uses a mock engine to register a channel with the network. -// It prepares a set of misbehavior reports and reports them to the conduit on the test channel. -// The test ensures that the MisbehaviorReportManager receives and handles all reported misbehavior -// without any duplicate reports and within a specified time. -func TestHandleReportedMisbehavior(t *testing.T) { - misbehaviorReportManger := mocknetwork.NewMisbehaviorReportManager(t) - conduitFactory := conduit.NewDefaultConduitFactory( - unittest.Logger(), - metrics.NewNoopCollector(), - conduit.WithMisbehaviorManager(misbehaviorReportManger)) - - ids, nodes, mws, _, _ := testutils.GenerateIDsAndMiddlewares( - t, - 1, - unittest.Logger(), - unittest.NetworkCodec(), - unittest.NetworkSlashingViolationsConsumer(unittest.Logger(), metrics.NewNoopCollector())) - sms := testutils.GenerateSubscriptionManagers(t, mws) - networks := testutils.GenerateNetworks( - t, - unittest.Logger(), - ids, - mws, - sms, - p2p.WithConduitFactory(conduitFactory)) - - ctx, cancel := context.WithCancel(context.Background()) - - signalerCtx := irrecoverable.NewMockSignalerContext(t, ctx) - testutils.StartNodesAndNetworks(signalerCtx, t, nodes, networks, 100*time.Millisecond) - defer testutils.StopComponents[p2p.LibP2PNode](t, nodes, 100*time.Millisecond) - defer cancel() - - e := mocknetwork.NewEngine(t) - con, err := networks[0].Register(channels.TestNetworkChannel, e) - require.NoError(t, err) - - reports := testutils.MisbehaviorReportsFixture(t, 10) - allReportsManaged := sync.WaitGroup{} - allReportsManaged.Add(len(reports)) - var seenReports []network.MisbehaviorReport - misbehaviorReportManger.On("HandleMisbehaviorReport", channels.TestNetworkChannel, mock.Anything).Run(func(args mock.Arguments) { - report := args.Get(1).(network.MisbehaviorReport) - require.Contains(t, reports, report) // ensures that the report is one of the reports we expect. - require.NotContainsf(t, seenReports, report, "duplicate report: %v", report) // ensures that we have not seen this report before. - seenReports = append(seenReports, report) // adds the report to the list of seen reports. - allReportsManaged.Done() - }).Return(nil) - - for _, report := range reports { - con.ReportMisbehavior(report) // reports the misbehavior - } - - unittest.RequireReturnsBefore(t, allReportsManaged.Wait, 100*time.Millisecond, "did not receive all reports") -} - -// TestMisbehaviorReportMetrics tests the recording of misbehavior report metrics. -// It checks that when a misbehavior report is received by the ALSP manager, the metrics are recorded. -// It fails the test if the metrics are not recorded or if they are recorded incorrectly. -func TestMisbehaviorReportMetrics(t *testing.T) { - alspMetrics := mockmodule.NewAlspMetrics(t) - conduitFactory := conduit.NewDefaultConduitFactory( - unittest.Logger(), - alspMetrics) - - ids, nodes, mws, _, _ := testutils.GenerateIDsAndMiddlewares( - t, - 1, - unittest.Logger(), - unittest.NetworkCodec(), - unittest.NetworkSlashingViolationsConsumer(unittest.Logger(), metrics.NewNoopCollector())) - sms := testutils.GenerateSubscriptionManagers(t, mws) - networks := testutils.GenerateNetworks( - t, - unittest.Logger(), - ids, - mws, - sms, - p2p.WithConduitFactory(conduitFactory)) - - ctx, cancel := context.WithCancel(context.Background()) - - signalerCtx := irrecoverable.NewMockSignalerContext(t, ctx) - testutils.StartNodesAndNetworks(signalerCtx, t, nodes, networks, 100*time.Millisecond) - defer testutils.StopComponents[p2p.LibP2PNode](t, nodes, 100*time.Millisecond) - defer cancel() - - e := mocknetwork.NewEngine(t) - con, err := networks[0].Register(channels.TestNetworkChannel, e) - require.NoError(t, err) - - report := testutils.MisbehaviorReportFixture(t) - - // this channel is used to signal that the metrics have been recorded by the ALSP manager correctly. - reported := make(chan struct{}) - - // ensures that the metrics are recorded when a misbehavior report is received. - alspMetrics.On("OnMisbehaviorReported", channels.TestNetworkChannel.String(), report.Reason().String()).Run(func(args mock.Arguments) { - close(reported) - }).Once() - - con.ReportMisbehavior(report) // reports the misbehavior - - unittest.RequireCloseBefore(t, reported, 100*time.Millisecond, "metrics for the misbehavior report were not recorded") -} - -// The TestReportCreation tests the creation of misbehavior reports using the alsp.NewMisbehaviorReport function. -// The function tests the creation of both valid and invalid misbehavior reports by setting different penalty amplification values. -func TestReportCreation(t *testing.T) { - - // creates a valid misbehavior report (i.e., amplification between 1 and 100) - report, err := alsp.NewMisbehaviorReport( - unittest.IdentifierFixture(), - testutils.MisbehaviorTypeFixture(t), - alsp.WithPenaltyAmplification(10)) - require.NoError(t, err) - require.NotNil(t, report) - - // creates a valid misbehavior report with default amplification. - report, err = alsp.NewMisbehaviorReport( - unittest.IdentifierFixture(), - testutils.MisbehaviorTypeFixture(t)) - require.NoError(t, err) - require.NotNil(t, report) - - // creates an in valid misbehavior report (i.e., amplification greater than 100 and less than 1) - report, err = alsp.NewMisbehaviorReport( - unittest.IdentifierFixture(), - testutils.MisbehaviorTypeFixture(t), - alsp.WithPenaltyAmplification(rand.Intn(100)-101)) - require.Error(t, err) - require.Nil(t, report) - - report, err = alsp.NewMisbehaviorReport( - unittest.IdentifierFixture(), - testutils.MisbehaviorTypeFixture(t), - alsp.WithPenaltyAmplification(rand.Int()+101)) - require.Error(t, err) - require.Nil(t, report) - - // 0 is not a valid amplification - report, err = alsp.NewMisbehaviorReport( - unittest.IdentifierFixture(), - testutils.MisbehaviorTypeFixture(t), - alsp.WithPenaltyAmplification(0)) - require.Error(t, err) - require.Nil(t, report) -} diff --git a/network/alsp/misbehavior.go b/network/alsp/misbehavior.go index 326b113cd8b..af4921cd06a 100644 --- a/network/alsp/misbehavior.go +++ b/network/alsp/misbehavior.go @@ -24,6 +24,25 @@ const ( // the message is not valid according to the engine's validation logic. The decision to consider a message invalid // is up to the engine. InvalidMessage network.Misbehavior = "misbehavior-invalid-message" + + // UnExpectedValidationError is a misbehavior that is reported when a validation error is encountered during message validation before the message + // is processed by an engine. + UnExpectedValidationError network.Misbehavior = "unexpected-validation-error" + + // UnknownMsgType is a misbehavior that is reported when a message of unknown type is received from a peer. + UnknownMsgType network.Misbehavior = "unknown-message-type" + + // SenderEjected is a misbehavior that is reported when a message is received from an ejected peer. + SenderEjected network.Misbehavior = "sender-ejected" + + // UnauthorizedUnicastOnChannel is a misbehavior that is reported when a message not authorized to be sent via unicast is received via unicast. + UnauthorizedUnicastOnChannel network.Misbehavior = "unauthorized-unicast-on-channel" + + // UnAuthorizedSender is a misbehavior that is reported when a message is sent by an unauthorized role. + UnAuthorizedSender network.Misbehavior = "unauthorized-sender" + + // UnauthorizedPublishOnChannel is a misbehavior that is reported when a message not authorized to be sent via pubsub is received via pubsub. + UnauthorizedPublishOnChannel network.Misbehavior = "unauthorized-pubsub-on-channel" ) func AllMisbehaviorTypes() []network.Misbehavior { @@ -33,5 +52,11 @@ func AllMisbehaviorTypes() []network.Misbehavior { RedundantMessage, UnsolicitedMessage, InvalidMessage, + UnExpectedValidationError, + UnknownMsgType, + SenderEjected, + UnauthorizedUnicastOnChannel, + UnauthorizedPublishOnChannel, + UnAuthorizedSender, } } diff --git a/network/alsp/mock/spam_record_cache.go b/network/alsp/mock/spam_record_cache.go new file mode 100644 index 00000000000..695611278d3 --- /dev/null +++ b/network/alsp/mock/spam_record_cache.go @@ -0,0 +1,143 @@ +// Code generated by mockery. DO NOT EDIT. + +package mock + +import ( + flow "github.com/onflow/flow-go/model/flow" + mock "github.com/stretchr/testify/mock" + + model "github.com/onflow/flow-go/network/alsp/model" +) + +// SpamRecordCache is an autogenerated mock type for the SpamRecordCache type +type SpamRecordCache struct { + mock.Mock +} + +// AdjustWithInit provides a mock function with given fields: originId, adjustFunc +func (_m *SpamRecordCache) AdjustWithInit(originId flow.Identifier, adjustFunc model.RecordAdjustFunc) (float64, error) { + ret := _m.Called(originId, adjustFunc) + + if len(ret) == 0 { + panic("no return value specified for AdjustWithInit") + } + + var r0 float64 + var r1 error + if rf, ok := ret.Get(0).(func(flow.Identifier, model.RecordAdjustFunc) (float64, error)); ok { + return rf(originId, adjustFunc) + } + if rf, ok := ret.Get(0).(func(flow.Identifier, model.RecordAdjustFunc) float64); ok { + r0 = rf(originId, adjustFunc) + } else { + r0 = ret.Get(0).(float64) + } + + if rf, ok := ret.Get(1).(func(flow.Identifier, model.RecordAdjustFunc) error); ok { + r1 = rf(originId, adjustFunc) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Get provides a mock function with given fields: originId +func (_m *SpamRecordCache) Get(originId flow.Identifier) (*model.ProtocolSpamRecord, bool) { + ret := _m.Called(originId) + + if len(ret) == 0 { + panic("no return value specified for Get") + } + + var r0 *model.ProtocolSpamRecord + var r1 bool + if rf, ok := ret.Get(0).(func(flow.Identifier) (*model.ProtocolSpamRecord, bool)); ok { + return rf(originId) + } + if rf, ok := ret.Get(0).(func(flow.Identifier) *model.ProtocolSpamRecord); ok { + r0 = rf(originId) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*model.ProtocolSpamRecord) + } + } + + if rf, ok := ret.Get(1).(func(flow.Identifier) bool); ok { + r1 = rf(originId) + } else { + r1 = ret.Get(1).(bool) + } + + return r0, r1 +} + +// Identities provides a mock function with no fields +func (_m *SpamRecordCache) Identities() []flow.Identifier { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Identities") + } + + var r0 []flow.Identifier + if rf, ok := ret.Get(0).(func() []flow.Identifier); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]flow.Identifier) + } + } + + return r0 +} + +// Remove provides a mock function with given fields: originId +func (_m *SpamRecordCache) Remove(originId flow.Identifier) bool { + ret := _m.Called(originId) + + if len(ret) == 0 { + panic("no return value specified for Remove") + } + + var r0 bool + if rf, ok := ret.Get(0).(func(flow.Identifier) bool); ok { + r0 = rf(originId) + } else { + r0 = ret.Get(0).(bool) + } + + return r0 +} + +// Size provides a mock function with no fields +func (_m *SpamRecordCache) Size() uint { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Size") + } + + var r0 uint + if rf, ok := ret.Get(0).(func() uint); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(uint) + } + + return r0 +} + +// NewSpamRecordCache creates a new instance of SpamRecordCache. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewSpamRecordCache(t interface { + mock.TestingT + Cleanup(func()) +}) *SpamRecordCache { + mock := &SpamRecordCache{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/network/alsp/model/params.go b/network/alsp/model/params.go new file mode 100644 index 00000000000..7d722b2d3c3 --- /dev/null +++ b/network/alsp/model/params.go @@ -0,0 +1,47 @@ +package model + +// To give a summary with the default value: +// 1. The penalty of each misbehavior is 0.01 * DisallowListingThreshold = -864 +// 2. The penalty of each misbehavior is decayed by a decay value at each decay interval. The default decay value is 1000. +// This means that by default if a node misbehaves 100 times in a second, it gets disallow-listed, and takes 86.4 seconds to recover. +// We emphasize on the default penalty value can be amplified by the engine that reports the misbehavior. +// 3. Each time a node is disallow-listed, its decay speed is decreased by 90%. This means that if a node is disallow-listed +// for the first time, it takes 86.4 seconds to recover. If the node is disallow-listed for the second time, its decay +// speed is decreased by 90% from 1000 to 100, and it takes around 15 minutes to recover. If the node is disallow-listed +// for the third time, its decay speed is decreased by 90% from 100 to 10, and it takes around 2.5 hours to recover. +// If the node is disallow-listed for the fourth time, its decay speed is decreased by 90% from 10 to 1, and it takes +// around a day to recover. From this point on, the decay speed is 1, and it takes around a day to recover from each +// disallow-listing. +const ( + // DisallowListingThreshold is the threshold for concluding a node behavior is malicious and disallow-listing the node. + // If the overall penalty of this node drops below this threshold, the node is reported to be disallow-listed by + // the networking layer, i.e., existing connections to the node are closed and the node is no longer allowed to connect till + // its penalty is decayed back to zero. + // maximum block-list period is 1 day + DisallowListingThreshold = -24 * 60 * 60 // (Don't change this value) + + // DefaultPenaltyValue is the default penalty value for misbehaving nodes. + // By default, each reported infringement will be penalized by this value. However, the penalty can be amplified + // by the engine that reports the misbehavior. The penalty system is designed in a way that more than 100 misbehaviors/sec + // at the default penalty value will result in disallow-listing the node. By amplifying the penalty, the engine can + // decrease the number of misbehaviors/sec that will result in disallow-listing the node. For example, if the engine + // amplifies the penalty by 10, the number of misbehaviors/sec that will result in disallow-listing the node will be + // 10 times less than the default penalty value and the node will be disallow-listed after 10 misbehaviors/sec. + DefaultPenaltyValue = 0.01 * DisallowListingThreshold // (Don't change this value) + + // InitialDecaySpeed is the initial decay speed of the penalty of a misbehaving node. + // The decay speed is applied on an arithmetic progression. The penalty value of the node is the first term of the + // progression and the decay speed is the common difference of the progression, i.e., p(n) = p(0) + n * d, where + // p(n) is the penalty value of the node after n decay intervals, p(0) is the initial penalty value of the node, and + // d is the decay speed. Decay intervals are set to 1 second (protocol invariant). Hence, with the initial decay speed + // of 1000, the penalty value of the node will be decreased by 1000 every second. This means that if a node misbehaves + // 100 times in a second, it gets disallow-listed, and takes 86.4 seconds to recover. + // In mature implementation of the protocol, the decay speed of a node is decreased by 90% each time the node is + // disallow-listed. This means that if a node is disallow-listed for the first time, it takes 86.4 seconds to recover. + // If the node is disallow-listed for the second time, its decay speed is decreased by 90% from 1000 to 100, and it + // takes around 15 minutes to recover. If the node is disallow-listed for the third time, its decay speed is decreased + // by 90% from 100 to 10, and it takes around 2.5 hours to recover. If the node is disallow-listed for the fourth time, + // its decay speed is decreased by 90% from 10 to 1, and it takes around a day to recover. From this point on, the decay + // speed is 1, and it takes around a day to recover from each disallow-listing. + InitialDecaySpeed = 1000 // (Don't change this value) +) diff --git a/network/alsp/model/record.go b/network/alsp/model/record.go new file mode 100644 index 00000000000..9aa9bf75d7b --- /dev/null +++ b/network/alsp/model/record.go @@ -0,0 +1,60 @@ +package model + +import ( + "github.com/onflow/flow-go/model/flow" +) + +// ProtocolSpamRecord is a record of a misbehaving node. It is used to keep track of the Penalty value of the node +// and the number of times it has been slashed due to its Penalty value dropping below the disallow-listing threshold. +type ProtocolSpamRecord struct { + // OriginId is the node id of the misbehaving node. It is assumed an authorized (i.e., staked) node at the + // time of the misbehavior report creation (otherwise, the networking layer should not have dispatched the + // message to the Flow protocol layer in the first place). + OriginId flow.Identifier + + // Decay speed of Penalty for this misbehaving node. Each node may have a different Decay speed based on its behavior. + // Subsequent disallow listings of the node will decrease the Decay speed of the node so it will take longer to be allow-listed. + Decay float64 + + // CutoffCounter is a counter that is used to determine how many times the connections to the node has been cut due to + // its Penalty value dropping below the disallow-listing threshold. + // Note that the cutoff connections are recovered after a certain amount of time. + CutoffCounter uint64 + + // DisallowListed indicates whether the node is currently disallow-listed or not. When a node is in the disallow-list, + // the existing connections to the node are cut and no new connections are allowed to be established, neither incoming + // nor outgoing. + DisallowListed bool + + // total Penalty value of the misbehaving node. Should be a negative value. + Penalty float64 +} + +// RecordAdjustFunc is a function that is used to adjust the fields of a ProtocolSpamRecord. +// The function is called with the current record and should return the adjusted record. +// Returned error indicates that the adjustment is not applied, and the record should not be updated. +// In BFT setup, the returned error should be treated as a fatal error. +type RecordAdjustFunc func(*ProtocolSpamRecord) (*ProtocolSpamRecord, error) + +// SpamRecordFactoryFunc is a function that creates a new protocol spam record with the given origin id and initial values. +// Args: +// - originId: the origin id of the spam record. +// Returns: +// - ProtocolSpamRecord, the created record. +type SpamRecordFactoryFunc func(flow.Identifier) *ProtocolSpamRecord + +// SpamRecordFactory returns the default factory function for creating a new protocol spam record. +// Returns: +// - SpamRecordFactoryFunc, the default factory function. +// Note that the default factory function creates a new record with the initial values. +func SpamRecordFactory() SpamRecordFactoryFunc { + return func(originId flow.Identifier) *ProtocolSpamRecord { + return &ProtocolSpamRecord{ + OriginId: originId, + Decay: InitialDecaySpeed, + DisallowListed: false, + CutoffCounter: uint64(0), + Penalty: float64(0), + } + } +} diff --git a/network/alsp/params.go b/network/alsp/params.go deleted file mode 100644 index f855ab5f6d9..00000000000 --- a/network/alsp/params.go +++ /dev/null @@ -1,47 +0,0 @@ -package alsp - -// To give a summary with the default value: -// 1. The penalty of each misbehavior is 0.01 * misbehaviorDisallowListingThreshold = -864 -// 2. The penalty of each misbehavior is decayed by a decay value at each decay interval. The default decay value is 1000. -// This means that by default if a node misbehaves 100 times in a second, it gets disallow-listed, and takes 86.4 seconds to recover. -// We emphasize on the default penalty value can be amplified by the engine that reports the misbehavior. -// 3. Each time a node is disallow-listed, its decay speed is decreased by 90%. This means that if a node is disallow-listed -// for the first time, it takes 86.4 seconds to recover. If the node is disallow-listed for the second time, its decay -// speed is decreased by 90% from 1000 to 100, and it takes around 15 minutes to recover. If the node is disallow-listed -// for the third time, its decay speed is decreased by 90% from 100 to 10, and it takes around 2.5 hours to recover. -// If the node is disallow-listed for the fourth time, its decay speed is decreased by 90% from 10 to 1, and it takes -// around a day to recover. From this point on, the decay speed is 1, and it takes around a day to recover from each -// disallow-listing. -const ( - // misbehaviorDisallowListingThreshold is the threshold for concluding a node behavior is malicious and disallow-listing the node. - // If the overall penalty of this node drops below this threshold, the node is reported to be disallow-listed by - // the networking layer, i.e., existing connections to the node are closed and the node is no longer allowed to connect till - // its penalty is decayed back to zero. - // maximum block-list period is 1 day - misbehaviorDisallowListingThreshold = -24 * 60 * 60 // (Don't change this value) - - // defaultPenaltyValue is the default penalty value for misbehaving nodes. - // By default, each reported infringement will be penalized by this value. However, the penalty can be amplified - // by the engine that reports the misbehavior. The penalty system is designed in a way that more than 100 misbehavior/sec - // at the default penalty value will result in disallow-listing the node. By amplifying the penalty, the engine can - // decrease the number of misbehavior/sec that will result in disallow-listing the node. For example, if the engine - // amplifies the penalty by 10, the number of misbehavior/sec that will result in disallow-listing the node will be - // 10 times less than the default penalty value and the node will be disallow-listed after 10 times more misbehavior/sec. - defaultPenaltyValue = 0.01 * misbehaviorDisallowListingThreshold // (Don't change this value) - - // initialDecaySpeed is the initial decay speed of the penalty of a misbehaving node. - // The decay speed is applied on an arithmetic progression. The penalty value of the node is the first term of the - // progression and the decay speed is the common difference of the progression, i.e., p(n) = p(0) + n * d, where - // p(n) is the penalty value of the node after n decay intervals, p(0) is the initial penalty value of the node, and - // d is the decay speed. Decay intervals are set to 1 second (protocol invariant). Hence, with the initial decay speed - // of 1000, the penalty value of the node will be decreased by 1000 every second. This means that if a node misbehaves - // 100 times in a second, it gets disallow-listed, and takes 86.4 seconds to recover. - // In mature implementation of the protocol, the decay speed of a node is decreased by 90% each time the node is - // disallow-listed. This means that if a node is disallow-listed for the first time, it takes 86.4 seconds to recover. - // If the node is disallow-listed for the second time, its decay speed is decreased by 90% from 1000 to 100, and it - // takes around 15 minutes to recover. If the node is disallow-listed for the third time, its decay speed is decreased - // by 90% from 100 to 10, and it takes around 2.5 hours to recover. If the node is disallow-listed for the fourth time, - // its decay speed is decreased by 90% from 10 to 1, and it takes around a day to recover. From this point on, the decay - // speed is 1, and it takes around a day to recover from each disallow-listing. - initialDecaySpeed = 1000 // (Don't change this value) -) diff --git a/network/alsp/record.go b/network/alsp/record.go deleted file mode 100644 index 7db8e837055..00000000000 --- a/network/alsp/record.go +++ /dev/null @@ -1,51 +0,0 @@ -package alsp - -import ( - "fmt" - - "github.com/onflow/flow-go/model/flow" -) - -// ProtocolSpamRecord is a record of a misbehaving node. It is used to keep track of the Penalty value of the node -// and the number of times it has been slashed due to its Penalty value dropping below the disallow-listing threshold. -type ProtocolSpamRecord struct { - // OriginId is the node id of the misbehaving node. It is assumed an authorized (i.e., staked) node at the - // time of the misbehavior report creation (otherwise, the networking layer should not have dispatched the - // message to the Flow protocol layer in the first place). - OriginId flow.Identifier - - // Decay speed of Penalty for this misbehaving node. Each node may have a different Decay speed based on its behavior. - Decay float64 - - // CutoffCounter is a counter that is used to determine how many times the connections to the node has been cut due to - // its Penalty value dropping below the disallow-listing threshold. - // Note that the cutoff connections are recovered after a certain amount of time. - CutoffCounter uint64 - - // total Penalty value of the misbehaving node. Should be a negative value. - Penalty float64 -} - -// RecordAdjustFunc is a function that is used to adjust the fields of a ProtocolSpamRecord. -// The function is called with the current record and should return the adjusted record. -// Returned error indicates that the adjustment is not applied, and the record should not be updated. -// In BFT setup, the returned error should be treated as a fatal error. -type RecordAdjustFunc func(ProtocolSpamRecord) (ProtocolSpamRecord, error) - -// NewProtocolSpamRecord creates a new protocol spam record with the given origin id and Penalty value. -// The Decay speed of the record is set to the initial Decay speed. The CutoffCounter value is set to zero. -// The Penalty value should be a negative value. -// If the Penalty value is not a negative value, an error is returned. The error is irrecoverable and indicates a -// bug. -func NewProtocolSpamRecord(originId flow.Identifier, penalty float64) (*ProtocolSpamRecord, error) { - if penalty >= 0 { - return nil, fmt.Errorf("penalty value must be negative: %f", penalty) - } - - return &ProtocolSpamRecord{ - OriginId: originId, - Decay: initialDecaySpeed, - CutoffCounter: uint64(0), - Penalty: penalty, - }, nil -} diff --git a/network/alsp/report.go b/network/alsp/report.go index f980cb15929..8653b6c34f4 100644 --- a/network/alsp/report.go +++ b/network/alsp/report.go @@ -5,6 +5,7 @@ import ( "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/network" + "github.com/onflow/flow-go/network/alsp/model" ) // MisbehaviorReport is a report that is sent to the networking layer to penalize the misbehaving node. @@ -19,7 +20,7 @@ import ( type MisbehaviorReport struct { id flow.Identifier // the ID of the misbehaving node reason network.Misbehavior // the reason of the misbehavior - penalty int // the penalty value of the misbehavior + penalty float64 // the penalty value of the misbehavior } var _ network.MisbehaviorReport = (*MisbehaviorReport)(nil) @@ -32,10 +33,10 @@ type MisbehaviorReportOpt func(r *MisbehaviorReport) error // If the value is not in the range, an error is returned. // The returned error by this option indicates that the option is not applied. In BFT setup, the returned error // should be treated as a fatal error. -func WithPenaltyAmplification(v int) MisbehaviorReportOpt { +func WithPenaltyAmplification(v float64) MisbehaviorReportOpt { return func(r *MisbehaviorReport) error { if v <= 0 || v > 100 { - return fmt.Errorf("penalty value should be between 1-100: %d", v) + return fmt.Errorf("penalty value should be between 1-100: %v", v) } r.penalty *= v return nil @@ -53,7 +54,7 @@ func (r MisbehaviorReport) Reason() network.Misbehavior { } // Penalty returns the penalty value of the misbehavior. -func (r MisbehaviorReport) Penalty() int { +func (r MisbehaviorReport) Penalty() float64 { return r.penalty } @@ -66,7 +67,7 @@ func NewMisbehaviorReport(misbehavingId flow.Identifier, reason network.Misbehav m := &MisbehaviorReport{ id: misbehavingId, reason: reason, - penalty: defaultPenaltyValue, + penalty: model.DefaultPenaltyValue, } for _, opt := range opts { diff --git a/network/cache/rcvcache.go b/network/cache/rcvcache.go index be685ae670d..7228e1cef78 100644 --- a/network/cache/rcvcache.go +++ b/network/cache/rcvcache.go @@ -10,42 +10,30 @@ import ( "github.com/onflow/flow-go/module/mempool/stdmap" ) -// ReceiveCache implements an LRU cache of the received eventIDs that delivered to their engines +// ReceiveCache implements an LRU cache of the received eventIDs that delivered to their engines. +// Each key in this cache is the event ID represented as a flow.Identifier. type ReceiveCache struct { - c *stdmap.Backend -} - -// receiveCacheEntry represents an entry for the ReceiveCache -type receiveCacheEntry struct { - eventID flow.Identifier -} - -func (r receiveCacheEntry) ID() flow.Identifier { - return r.eventID -} - -func (r receiveCacheEntry) Checksum() flow.Identifier { - return r.eventID + *stdmap.Backend[flow.Identifier, struct{}] } // NewHeroReceiveCache returns a new HeroCache-based receive cache. -func NewHeroReceiveCache(sizeLimit uint32, logger zerolog.Logger, collector module.HeroCacheMetrics) *ReceiveCache { - backData := herocache.NewCache(sizeLimit, +func NewHeroReceiveCache(sizeLimit uint32, logger zerolog.Logger, collector module.HeroCacheMetrics, +) *ReceiveCache { + backData := herocache.NewCache[struct{}](sizeLimit, herocache.DefaultOversizeFactor, heropool.LRUEjection, // receive cache must be LRU. logger.With().Str("mempool", "receive-cache").Logger(), - collector) - backend := stdmap.NewBackend(stdmap.WithBackData(backData)) + collector, + ) + backend := stdmap.NewBackend(stdmap.WithMutableBackData[flow.Identifier, struct{}](backData)) return NewReceiveCache(uint(sizeLimit), func(cache *ReceiveCache) { - cache.c = backend + cache.Backend = backend }) } // NewReceiveCache creates and returns a new ReceiveCache func NewReceiveCache(sizeLimit uint, opts ...func(cache *ReceiveCache)) *ReceiveCache { - cache := &ReceiveCache{ - c: stdmap.NewBackend(stdmap.WithLimit(sizeLimit)), - } + cache := &ReceiveCache{stdmap.NewBackend(stdmap.WithLimit[flow.Identifier, struct{}](sizeLimit))} for _, opt := range opts { opt(cache) @@ -57,9 +45,5 @@ func NewReceiveCache(sizeLimit uint, opts ...func(cache *ReceiveCache)) *Receive // Add adds a new message to the cache if not already present. Returns true if the message is new and unseen, and false if message is duplicate, and // already has been seen by the node. func (r *ReceiveCache) Add(eventID []byte) bool { - return r.c.Add(receiveCacheEntry{eventID: flow.HashToID(eventID)}) // ignore eviction status -} - -func (r ReceiveCache) Size() uint { - return r.c.Size() + return r.Backend.Add(flow.HashToID(eventID), struct{}{}) // ignore eviction status } diff --git a/network/cache/rcvcache_test.go b/network/cache/rcvcache_test.go index 32551e1264f..36cc7b600bc 100644 --- a/network/cache/rcvcache_test.go +++ b/network/cache/rcvcache_test.go @@ -6,7 +6,7 @@ import ( "testing" "time" - "github.com/onflow/flow-go/network" + "github.com/onflow/flow-go/network/message" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -14,7 +14,7 @@ import ( "github.com/onflow/flow-go/network/channels" - "github.com/onflow/flow-go/crypto/hash" + "github.com/onflow/crypto/hash" "github.com/onflow/flow-go/module/metrics" netcache "github.com/onflow/flow-go/network/cache" @@ -43,20 +43,20 @@ func (r *ReceiveCacheTestSuite) SetupTest() { // TestSingleElementAdd adds a single element to the cache and verifies its existence. func (r *ReceiveCacheTestSuite) TestSingleElementAdd() { - eventID, err := network.EventId(channels.Channel("0"), []byte("event-1")) + eventID, err := message.EventId(channels.Channel("0"), []byte("event-1")) require.NoError(r.T(), err) assert.True(r.Suite.T(), r.c.Add(eventID)) assert.False(r.Suite.T(), r.c.Add(eventID)) // same channel but different event should be treated as unseen - eventID2, err := network.EventId(channels.Channel("0"), []byte("event-2")) + eventID2, err := message.EventId(channels.Channel("0"), []byte("event-2")) require.NoError(r.T(), err) assert.True(r.Suite.T(), r.c.Add(eventID2)) assert.False(r.Suite.T(), r.c.Add(eventID2)) // same event but different channels should be treated as unseen - eventID3, err := network.EventId(channels.Channel("1"), []byte("event-2")) + eventID3, err := message.EventId(channels.Channel("1"), []byte("event-2")) require.NoError(r.T(), err) assert.True(r.Suite.T(), r.c.Add(eventID3)) assert.False(r.Suite.T(), r.c.Add(eventID3)) @@ -64,7 +64,7 @@ func (r *ReceiveCacheTestSuite) TestSingleElementAdd() { // TestNoneExistence evaluates the correctness of cache operation against non-existing element func (r *ReceiveCacheTestSuite) TestNoneExistence() { - eventID, err := network.EventId(channels.Channel("1"), []byte("non-existing event")) + eventID, err := message.EventId(channels.Channel("1"), []byte("non-existing event")) require.NoError(r.T(), err) // adding new event to cache should return true @@ -76,7 +76,7 @@ func (r *ReceiveCacheTestSuite) TestMultipleElementAdd() { // creates and populates slice of 10 events eventIDs := make([]hash.Hash, 0) for i := 0; i < r.size; i++ { - eventID, err := network.EventId(channels.Channel("1"), []byte(fmt.Sprintf("event-%d", i))) + eventID, err := message.EventId(channels.Channel("1"), []byte(fmt.Sprintf("event-%d", i))) require.NoError(r.T(), err) eventIDs = append(eventIDs, eventID) @@ -114,7 +114,7 @@ func (r *ReceiveCacheTestSuite) TestLRU() { eventIDs := make([]hash.Hash, 0) total := r.size + 1 for i := 0; i < total; i++ { - eventID, err := network.EventId(channels.Channel("1"), []byte(fmt.Sprintf("event-%d", i))) + eventID, err := message.EventId(channels.Channel("1"), []byte(fmt.Sprintf("event-%d", i))) require.NoError(r.T(), err) eventIDs = append(eventIDs, eventID) diff --git a/network/channels/channel.go b/network/channels/channel.go index 07541ae6f8d..bbc3d24e868 100644 --- a/network/channels/channel.go +++ b/network/channels/channel.go @@ -2,9 +2,6 @@ package channels import ( "regexp" - "sort" - - "github.com/onflow/flow-go/model/flow" ) // Channel specifies a virtual and isolated communication medium. @@ -36,13 +33,6 @@ func (cl ChannelList) Swap(i, j int) { cl[i], cl[j] = cl[j], cl[i] } -// ID returns hash of the content of ChannelList. It first sorts the ChannelList and then takes its -// hash value. -func (cl ChannelList) ID() flow.Identifier { - sort.Sort(cl) - return flow.MakeID(cl) -} - // Contains returns true if the ChannelList contains the given channel. func (cl ChannelList) Contains(channel Channel) bool { for _, c := range cl { diff --git a/network/channels/channels.go b/network/channels/channels.go index b9394b12c64..5cd3790a665 100644 --- a/network/channels/channels.go +++ b/network/channels/channels.go @@ -1,5 +1,3 @@ -// (c) 2019 Dapper Labs - ALL RIGHTS RESERVED - package channels import ( @@ -102,6 +100,7 @@ func PublicChannels() ChannelList { return ChannelList{ PublicSyncCommittee, PublicReceiveBlocks, + PublicExecutionDataService, } } @@ -154,9 +153,10 @@ const ( ProvideApprovalsByChunk = RequestApprovalsByChunk // Public network channels - PublicPushBlocks = Channel("public-push-blocks") - PublicReceiveBlocks = PublicPushBlocks - PublicSyncCommittee = Channel("public-sync-committee") + PublicPushBlocks = Channel("public-push-blocks") + PublicReceiveBlocks = PublicPushBlocks + PublicSyncCommittee = Channel("public-sync-committee") + PublicExecutionDataService = Channel("public-execution-data-service") // Execution data service ExecutionDataService = Channel("execution-data-service") @@ -277,13 +277,52 @@ func ChannelFromTopic(topic Topic) (Channel, bool) { return "", false } -// SporkIDFromTopic returns the spork ID from a topic. -// All errors returned from this function can be considered benign. -func SporkIDFromTopic(topic Topic) (flow.Identifier, error) { +// sporkIdFromTopic returns the pre-pended spork ID flow identifier for the topic. +// A valid channel has a spork ID suffix: +// +// channel/spork_id +// +// A generic error is returned if an error is encountered while converting the spork ID to flow Identifier or +// the spork ID is missing. +func sporkIdFromTopic(topic Topic) (flow.Identifier, error) { if index := strings.LastIndex(topic.String(), "/"); index != -1 { - return flow.HexStringToIdentifier(string(topic)[index+1:]) + id, err := flow.HexStringToIdentifier(string(topic)[index+1:]) + if err != nil { + return flow.Identifier{}, fmt.Errorf("failed to get spork ID from topic %s: %w", topic, err) + } + + return id, nil + } + return flow.Identifier{}, fmt.Errorf("spork id missing from topic") +} + +// sporkIdStrFromTopic returns the pre-pended spork ID string for the topic. +// A valid channel has a spork ID suffix: +// +// channel/spork_id +// +// A generic error is returned if an error is encountered while deriving the spork ID from the topic +func sporkIdStrFromTopic(topic Topic) (string, error) { + sporkId, err := sporkIdFromTopic(topic) + if err != nil { + return "", err + } + return sporkId.String(), nil +} + +// clusterIDStrFromTopic returns the appended cluster ID in flow.ChainID format for the cluster prefixed topic. +// A valid cluster-prefixed channel includes the cluster prefix and cluster ID suffix: +// +// sync-cluster/some_cluster_id +// +// A generic error is returned if the topic is malformed. +func clusterIDStrFromTopic(topic Topic) (flow.ChainID, error) { + for prefix := range clusterChannelPrefixRoleMap { + if strings.HasPrefix(topic.String(), prefix) { + return flow.ChainID(strings.TrimPrefix(topic.String(), fmt.Sprintf("%s-", prefix))), nil + } } - return flow.Identifier{}, fmt.Errorf("spork ID is missing") + return "", fmt.Errorf("failed to get cluster ID from topic %s", topic) } // ConsensusCluster returns a dynamic cluster consensus channel based on @@ -298,33 +337,63 @@ func SyncCluster(clusterID flow.ChainID) Channel { return Channel(fmt.Sprintf("%s-%s", SyncClusterPrefix, clusterID)) } -// IsValidFlowTopic ensures the topic is a valid Flow network topic. -// A valid Topic has the following properties: -// - A Channel can be derived from the Topic and that channel exists. -// - The sporkID part of the Topic is equal to the current network sporkID. -// All errors returned from this function can be considered benign. -func IsValidFlowTopic(topic Topic, expectedSporkID flow.Identifier) error { - channel, ok := ChannelFromTopic(topic) - if !ok { - return fmt.Errorf("invalid topic: failed to get channel from topic") - } - err := IsValidFlowChannel(channel) +// IsValidNonClusterFlowTopic ensures the topic is a valid Flow network topic and +// ensures the sporkID part of the Topic is equal to the current network sporkID. +// Expected errors: +// - InvalidTopicErr if the topic is not a if the topic is not a valid topic for the given spork. +func IsValidNonClusterFlowTopic(topic Topic, expectedSporkID flow.Identifier) error { + sporkID, err := sporkIdStrFromTopic(topic) if err != nil { - return fmt.Errorf("invalid topic: %w", err) + return NewInvalidTopicErr(topic, fmt.Errorf("failed to get spork ID from topic: %w", err)) } - if IsClusterChannel(channel) { - return nil + if sporkID != expectedSporkID.String() { + return NewInvalidTopicErr(topic, fmt.Errorf("invalid flow topic mismatch spork ID expected spork ID %s actual spork ID %s", expectedSporkID, sporkID)) } - sporkID, err := SporkIDFromTopic(topic) + return isValidFlowTopic(topic) +} + +// IsValidFlowClusterTopic ensures the topic is a valid Flow network topic and +// ensures the cluster ID part of the Topic is equal to one of the provided active cluster IDs. +// All errors returned from this function can be considered benign. +// Expected errors: +// - InvalidTopicErr if the topic is not a valid Flow topic or the cluster ID cannot be derived from the topic. +// - UnknownClusterIDErr if the cluster ID from the topic is not in the activeClusterIDS list. +func IsValidFlowClusterTopic(topic Topic, activeClusterIDS flow.ChainIDList) error { + err := isValidFlowTopic(topic) if err != nil { return err } - if sporkID != expectedSporkID { - return fmt.Errorf("invalid topic: wrong spork ID %s the current spork ID is %s", sporkID, expectedSporkID) + + clusterID, err := clusterIDStrFromTopic(topic) + if err != nil { + return NewInvalidTopicErr(topic, fmt.Errorf("failed to get cluster ID from topic: %w", err)) + } + + for _, activeClusterID := range activeClusterIDS { + if clusterID == activeClusterID { + return nil + } } + return NewUnknownClusterIdErr(clusterID, activeClusterIDS) +} + +// isValidFlowTopic ensures the topic is a valid Flow network topic. +// A valid Topic has the following properties: +// - A Channel can be derived from the Topic and that channel exists. +// Expected errors: +// - InvalidTopicErr if the topic is not a valid Flow topic. +func isValidFlowTopic(topic Topic) error { + channel, ok := ChannelFromTopic(topic) + if !ok { + return NewInvalidTopicErr(topic, fmt.Errorf("invalid topic: failed to get channel from topic")) + } + err := IsValidFlowChannel(channel) + if err != nil { + return NewInvalidTopicErr(topic, fmt.Errorf("invalid topic: %w", err)) + } return nil } diff --git a/network/channels/errors.go b/network/channels/errors.go new file mode 100644 index 00000000000..3be8c826417 --- /dev/null +++ b/network/channels/errors.go @@ -0,0 +1,50 @@ +package channels + +import ( + "errors" + "fmt" + + "github.com/onflow/flow-go/model/flow" +) + +// InvalidTopicErr error wrapper that indicates an error when checking if a Topic is a valid Flow Topic. +type InvalidTopicErr struct { + topic Topic + err error +} + +func (e InvalidTopicErr) Error() string { + return fmt.Errorf("invalid topic %s: %w", e.topic, e.err).Error() +} + +// NewInvalidTopicErr returns a new ErrMalformedTopic +func NewInvalidTopicErr(topic Topic, err error) InvalidTopicErr { + return InvalidTopicErr{topic: topic, err: err} +} + +// IsInvalidTopicErr returns true if an error is InvalidTopicErr +func IsInvalidTopicErr(err error) bool { + var e InvalidTopicErr + return errors.As(err, &e) +} + +// UnknownClusterIDErr error wrapper that indicates an invalid topic with an unknown cluster ID prefix. +type UnknownClusterIDErr struct { + clusterId flow.ChainID + activeClusterIds flow.ChainIDList +} + +func (e UnknownClusterIDErr) Error() string { + return fmt.Errorf("cluster ID %s not found in active cluster IDs list %s", e.clusterId, e.activeClusterIds).Error() +} + +// NewUnknownClusterIdErr returns a new UnknownClusterIDErr +func NewUnknownClusterIdErr(clusterId flow.ChainID, activeClusterIds flow.ChainIDList) UnknownClusterIDErr { + return UnknownClusterIDErr{clusterId: clusterId, activeClusterIds: activeClusterIds} +} + +// IsUnknownClusterIDErr returns true if an error is UnknownClusterIDErr +func IsUnknownClusterIDErr(err error) bool { + var e UnknownClusterIDErr + return errors.As(err, &e) +} diff --git a/network/channels/errors_test.go b/network/channels/errors_test.go new file mode 100644 index 00000000000..56dd23cd09b --- /dev/null +++ b/network/channels/errors_test.go @@ -0,0 +1,46 @@ +package channels + +import ( + "fmt" + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/onflow/flow-go/model/flow" +) + +// TestInvalidTopicErrRoundTrip ensures correct error formatting for InvalidTopicErr. +func TestInvalidTopicErrRoundTrip(t *testing.T) { + topic := Topic("invalid-topic") + wrapErr := fmt.Errorf("this err should be wrapped with topic to add context") + err := NewInvalidTopicErr(topic, wrapErr) + + // tests the error message formatting. + expectedErrMsg := fmt.Errorf("invalid topic %s: %w", topic, wrapErr).Error() + assert.Equal(t, expectedErrMsg, err.Error(), "the error message should be correctly formatted") + + // tests the IsErrActiveClusterIDsNotSet function. + assert.True(t, IsInvalidTopicErr(err), "IsInvalidTopicErr should return true for InvalidTopicErr error") + + // test IsErrActiveClusterIDsNotSet with a different error type. + dummyErr := fmt.Errorf("dummy error") + assert.False(t, IsInvalidTopicErr(dummyErr), "IsInvalidTopicErr should return false for non-IsInvalidTopicErr error") +} + +// TestUnknownClusterIDErrRoundTrip ensures correct error formatting for UnknownClusterIDErr. +func TestUnknownClusterIDErrRoundTrip(t *testing.T) { + clusterId := flow.ChainID("cluster-id") + activeClusterIds := flow.ChainIDList{"active", "cluster", "ids"} + err := NewUnknownClusterIdErr(clusterId, activeClusterIds) + + // tests the error message formatting. + expectedErrMsg := fmt.Errorf("cluster ID %s not found in active cluster IDs list %s", clusterId, activeClusterIds).Error() + assert.Equal(t, expectedErrMsg, err.Error(), "the error message should be correctly formatted") + + // tests the IsErrActiveClusterIDsNotSet function. + assert.True(t, IsUnknownClusterIDErr(err), "IsUnknownClusterIDErr should return true for UnknownClusterIDErr error") + + // test IsErrActiveClusterIDsNotSet with a different error type. + dummyErr := fmt.Errorf("dummy error") + assert.False(t, IsUnknownClusterIDErr(dummyErr), "IsUnknownClusterIDErr should return false for non-UnknownClusterIDErr error") +} diff --git a/network/codec.go b/network/codec.go index a0c04b1f3cf..b1998a1b006 100644 --- a/network/codec.go +++ b/network/codec.go @@ -1,9 +1,9 @@ -// (c) 2019 Dapper Labs - ALL RIGHTS RESERVED - package network import ( "io" + + "github.com/onflow/flow-go/model/messages" ) // Codec provides factory functions for encoders and decoders. @@ -17,7 +17,7 @@ type Codec interface { // - codec.ErrInvalidEncoding if message encoding is invalid. // - codec.ErrUnknownMsgCode if message code byte does not match any of the configured message codes. // - codec.ErrMsgUnmarshal if the codec fails to unmarshal the data to the message type denoted by the message code. - Decode(data []byte) (interface{}, error) + Decode(data []byte) (messages.UntrustedMessage, error) } // Encoder encodes the given message into the underlying writer. @@ -31,5 +31,5 @@ type Encoder interface { // - codec.ErrUnknownMsgCode if message code byte does not match any of the configured message codes. // - codec.ErrMsgUnmarshal if the codec fails to unmarshal the data to the message type denoted by the message code. type Decoder interface { - Decode() (interface{}, error) + Decode() (messages.UntrustedMessage, error) } diff --git a/network/codec/cbor/cbor_behaviour_test.go b/network/codec/cbor/cbor_behaviour_test.go new file mode 100644 index 00000000000..1c9b8d6ef8c --- /dev/null +++ b/network/codec/cbor/cbor_behaviour_test.go @@ -0,0 +1,139 @@ +package cbor + +import ( + "testing" + + "github.com/fxamacker/cbor/v2" + "github.com/stretchr/testify/assert" + + cborcodec "github.com/onflow/flow-go/model/encoding/cbor" +) + +// The CBOR network codec uses the [cbor.ExtraDecErrorUnknownField] option, which +// causes decoding to return an error when decoding a message which contains an +// extra field, not present in the target (struct into which we are decoding). +// +// This test validates this behaviour. +func TestBehaviour_DecodeExtraField(t *testing.T) { + t.Run("decoding NON-ZERO VALUE of extra field not present in the target struct is forbidden", func(t *testing.T) { + type model1 struct { + A int + } + type model2 struct { + A int + B int + } + + m2 := model2{ + A: 100, + B: 200, + } + bz, err := cborcodec.EncMode.Marshal(m2) + assert.NoError(t, err) + + var m1 model1 + err = cborcodec.DefaultDecMode.Unmarshal(bz, &m1) + assert.Error(t, err) + target := &cbor.UnknownFieldError{} + assert.ErrorAs(t, err, &target) + }) + + t.Run("decoding ZERO VALUE of extra field not present in the target struct is forbidden", func(t *testing.T) { + type model1 struct { + A *int + } + type model2 struct { + A *int + B *int + } + + a := 100 + m2 := model2{ + A: &a, + // B has zero-value + } + bz, err := cborcodec.EncMode.Marshal(m2) + assert.NoError(t, err) + + var m1 model1 + err = cborcodec.DefaultDecMode.Unmarshal(bz, &m1) + assert.Error(t, err) + target := &cbor.UnknownFieldError{} + assert.ErrorAs(t, err, &target) + }) +} + +// The CBOR network codec uses the [cbor.ExtraDecErrorUnknownField] option, which +// causes decoding to return an error when decoding a message which contains an +// extra field, not present in the target (struct into which we are decoding). +// +// This test validates that, when decoding a message which OMITS a field present +// in the target, no error is returned. +// +// This behaviour is very useful for downwards compatibility: for example if we add +// a new field B to a struct, nodes running the updated software can still decode +// messages emitted by the old software - with the convention that in the decoded +// message, field B has the zero-value. +// However, note that the reverse (i.e. downwards compatibility) is not true *by default* +// Specifically the old software cannot decode the new struct, even if field B has the +// zero value, as demonstrated by the test [TestBehaviour_DecodeExtraField] above. +// +// Nevertheless, downwards compatibility can be improved with suitable conventions +// as demonstrated in the test [TestBehaviour_OmittingNewFieldForDownwardsCompatibility] below +func TestBehaviour_DecodeOmittedField(t *testing.T) { + type model1 struct { + A int + } + type model2 struct { + A int + B int + } + + m1 := model1{ + A: 100, + } + bz, err := cborcodec.EncMode.Marshal(m1) + assert.NoError(t, err) + + var m2 model2 + err = cborcodec.DefaultDecMode.Unmarshal(bz, &m2) + assert.NoError(t, err) + assert.Equal(t, m2.A, m1.A) + assert.Equal(t, m2.B, int(0)) +} + +// This test demonstrates a possible convention for improving downwards compatibility, +// when we want to add a new field to an existing struct. Let's say that the struct +// `model1` describes the old data structure, to which we want to add a new integer +// field `B`. +// Note that the following pattern only works out of the box, if field `B` is required +// according to the new protocol convention. In other words, the new software can +// differentiate between the old and the new data model based on whether field `B` +// is present. +// The important aspects are +// 1. define field `B` as a pointer variable. Thereby, the new software can represent +// an old data model with `B` being nil, while the new data model always has `B` ≠ nil. +// 2. In the new software, provide the cbor directive `cbor:",omitempty"`, which instructs +// cbor to omit the field entirely during the encoding step. Thereby the new software +// reproduces the encoding of the old software when dealing with the old data model. +func TestBehaviour_OmittingNewFieldForDownwardsCompatibility(t *testing.T) { + type model1 struct { + A int + } + type model2 struct { + A int + B *int `cbor:",omitempty"` + } + + a := 100 + m2 := model2{ + A: a, + } + // m2.B is `nil`, which cbor will omit in the encoding step according to our directive "omitempty" + bz, err := cborcodec.EncMode.Marshal(m2) + assert.NoError(t, err) + + var m1 model1 + err = cborcodec.DefaultDecMode.Unmarshal(bz, &m1) + assert.NoError(t, err) +} diff --git a/network/codec/cbor/codec.go b/network/codec/cbor/codec.go index fa5a6acb451..baadbb6d71c 100644 --- a/network/codec/cbor/codec.go +++ b/network/codec/cbor/codec.go @@ -1,5 +1,3 @@ -// (c) 2019 Dapper Labs - ALL RIGHTS RESERVED - package cbor import ( @@ -7,21 +5,24 @@ import ( "fmt" "io" - "github.com/fxamacker/cbor/v2" - cborcodec "github.com/onflow/flow-go/model/encoding/cbor" + "github.com/onflow/flow-go/model/messages" "github.com/onflow/flow-go/network" "github.com/onflow/flow-go/network/codec" _ "github.com/onflow/flow-go/utils/binstat" ) -var defaultDecMode, _ = cbor.DecOptions{ExtraReturnErrors: cbor.ExtraDecErrorUnknownField}.DecMode() - // Codec represents a CBOR codec for our network. -type Codec struct { -} - -// NewCodec creates a new CBOR codec. +type Codec struct{} + +// NewCodec returns a new cbor Codec with the provided EncMode and DecMode. +// If either is nil, the default cbor EncMode/DecMode will be used. +// +// CAUTION: this encoding should only be used for encoding/decoding data within a node. +// If used for decoding data that is shared between nodes, it makes the recipient VULNERABLE +// to RESOURCE EXHAUSTION ATTACKS, where a byzantine sender could include garbage data in the +// encoding, which would not be noticed by the recipient because the garbage data is dropped +// at the decoding step - yet, it consumes the recipient's networking bandwidth. func NewCodec() *Codec { c := &Codec{} return c @@ -35,7 +36,7 @@ func (c *Codec) NewEncoder(w io.Writer) network.Encoder { // NewDecoder creates a new CBOR decoder with the given underlying reader. func (c *Codec) NewDecoder(r io.Reader) network.Decoder { - dec := defaultDecMode.NewDecoder(r) + dec := cborcodec.DefaultDecMode.NewDecoder(r) return &Decoder{dec: dec} } @@ -88,7 +89,7 @@ func (c *Codec) Encode(v interface{}) ([]byte, error) { // - codec.ErrInvalidEncoding if message encoding is invalid. // - codec.ErrUnknownMsgCode if message code byte does not match any of the configured message codes. // - codec.ErrMsgUnmarshal if the codec fails to unmarshal the data to the message type denoted by the message code. -func (c *Codec) Decode(data []byte) (interface{}, error) { +func (c *Codec) Decode(data []byte) (messages.UntrustedMessage, error) { msgCode, err := codec.MessageCodeFromPayload(data) if err != nil { @@ -106,7 +107,7 @@ func (c *Codec) Decode(data []byte) (interface{}, error) { // unmarshal the payload //bs2 := binstat.EnterTimeVal(fmt.Sprintf("%s%s%s:%d", binstat.BinNet, ":wire>4(cbor)", what, code), int64(len(data))) // e.g. ~3net:wire>4(cbor)CodeEntityRequest:23 - err = defaultDecMode.Unmarshal(data[1:], msgInterface) // all but first byte + err = cborcodec.DefaultDecMode.Unmarshal(data[1:], msgInterface) // all but first byte //binstat.Leave(bs2) if err != nil { return nil, codec.NewMsgUnmarshalErr(data[0], what, err) diff --git a/network/codec/cbor/codec_test.go b/network/codec/cbor/codec_test.go index 8d3c555b87d..df79856b0a3 100644 --- a/network/codec/cbor/codec_test.go +++ b/network/codec/cbor/codec_test.go @@ -6,31 +6,26 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "github.com/onflow/flow-go/model/messages" "github.com/onflow/flow-go/network/codec" "github.com/onflow/flow-go/network/codec/cbor" "github.com/onflow/flow-go/utils/unittest" ) func TestCodec_Decode(t *testing.T) { - t.Parallel() - c := cbor.NewCodec() - t.Run("decodes message successfully", func(t *testing.T) { - t.Parallel() + data := messages.Proposal(*unittest.ProposalFixture()) - data := unittest.ProposalFixture() - encoded, err := c.Encode(data) + encoded, err := c.Encode(&data) require.NoError(t, err) decoded, err := c.Decode(encoded) require.NoError(t, err) - require.Equal(t, data, decoded) + require.Equal(t, &data, decoded) }) t.Run("returns error when data is empty", func(t *testing.T) { - t.Parallel() - decoded, err := c.Decode(nil) assert.Nil(t, decoded) assert.True(t, codec.IsErrInvalidEncoding(err)) @@ -41,8 +36,6 @@ func TestCodec_Decode(t *testing.T) { }) t.Run("returns error when message code is invalid", func(t *testing.T) { - t.Parallel() - decoded, err := c.Decode([]byte{codec.CodeMin.Uint8()}) assert.Nil(t, decoded) assert.True(t, codec.IsErrUnknownMsgCode(err)) @@ -61,18 +54,14 @@ func TestCodec_Decode(t *testing.T) { }) t.Run("returns error when unmarshalling fails - empty", func(t *testing.T) { - t.Parallel() - decoded, err := c.Decode([]byte{codec.CodeBlockProposal.Uint8()}) assert.Nil(t, decoded) assert.True(t, codec.IsErrMsgUnmarshal(err)) }) t.Run("returns error when unmarshalling fails - wrong type", func(t *testing.T) { - t.Parallel() - - data := unittest.ProposalFixture() - encoded, err := c.Encode(data) + data := messages.Proposal(*unittest.ProposalFixture()) + encoded, err := c.Encode(&data) require.NoError(t, err) encoded[0] = codec.CodeCollectionGuarantee.Uint8() @@ -83,10 +72,8 @@ func TestCodec_Decode(t *testing.T) { }) t.Run("returns error when unmarshalling fails - corrupt", func(t *testing.T) { - t.Parallel() - - data := unittest.ProposalFixture() - encoded, err := c.Encode(data) + data := messages.Proposal(*unittest.ProposalFixture()) + encoded, err := c.Encode(&data) require.NoError(t, err) encoded[2] = 0x20 // corrupt payload diff --git a/network/codec/cbor/decoder.go b/network/codec/cbor/decoder.go index 77dd48bd82c..552b21ba7c4 100644 --- a/network/codec/cbor/decoder.go +++ b/network/codec/cbor/decoder.go @@ -1,10 +1,10 @@ -// (c) 2019 Dapper Labs - ALL RIGHTS RESERVED - package cbor import ( "github.com/fxamacker/cbor/v2" + cborcodec "github.com/onflow/flow-go/model/encoding/cbor" + "github.com/onflow/flow-go/model/messages" "github.com/onflow/flow-go/network/codec" _ "github.com/onflow/flow-go/utils/binstat" ) @@ -19,8 +19,7 @@ type Decoder struct { // - codec.ErrInvalidEncoding if message encoding is invalid. // - codec.ErrUnknownMsgCode if message code byte does not match any of the configured message codes. // - codec.ErrMsgUnmarshal if the codec fails to unmarshal the data to the message type denoted by the message code. -func (d *Decoder) Decode() (interface{}, error) { - +func (d *Decoder) Decode() (messages.UntrustedMessage, error) { // read from stream and extract code var data []byte //bs1 := binstat.EnterTime(binstat.BinNet + ":strm>1(cbor)iowriter2payload2envelope") @@ -42,7 +41,7 @@ func (d *Decoder) Decode() (interface{}, error) { // unmarshal the payload //bs2 := binstat.EnterTimeVal(fmt.Sprintf("%s%s%s:%d", binstat.BinNet, ":strm>2(cbor)", what, code), int64(len(data))) // e.g. ~3net:strm>2(cbor)CodeEntityRequest:23 - err = defaultDecMode.Unmarshal(data[1:], msgInterface) // all but first byte + err = cborcodec.DefaultDecMode.Unmarshal(data[1:], msgInterface) // all but first byte //binstat.Leave(bs2) if err != nil { return nil, codec.NewMsgUnmarshalErr(data[0], what, err) diff --git a/network/codec/cbor/decoder_test.go b/network/codec/cbor/decoder_test.go index 1f71cb5e306..a5c293b4def 100644 --- a/network/codec/cbor/decoder_test.go +++ b/network/codec/cbor/decoder_test.go @@ -8,34 +8,29 @@ import ( "github.com/stretchr/testify/require" cborcodec "github.com/onflow/flow-go/model/encoding/cbor" + "github.com/onflow/flow-go/model/messages" "github.com/onflow/flow-go/network/codec" "github.com/onflow/flow-go/network/codec/cbor" "github.com/onflow/flow-go/utils/unittest" ) func TestDecoder_Decode(t *testing.T) { - t.Parallel() - c := cbor.NewCodec() - blockProposal := unittest.ProposalFixture() + blockProposal := messages.Proposal(*unittest.ProposalFixture()) t.Run("decodes message successfully", func(t *testing.T) { - t.Parallel() - var buf bytes.Buffer - err := c.NewEncoder(&buf).Encode(blockProposal) + err := c.NewEncoder(&buf).Encode(&blockProposal) require.NoError(t, err) decoded, err := c.NewDecoder(&buf).Decode() require.NoError(t, err) - require.Equal(t, blockProposal, decoded) + require.Equal(t, &blockProposal, decoded.(*messages.Proposal)) }) t.Run("returns error when data is empty", func(t *testing.T) { - t.Parallel() - var buf bytes.Buffer // empty buffer @@ -45,8 +40,6 @@ func TestDecoder_Decode(t *testing.T) { }) t.Run("returns error when data is empty - nil byte", func(t *testing.T) { - t.Parallel() - var buf bytes.Buffer // nil byte @@ -58,8 +51,6 @@ func TestDecoder_Decode(t *testing.T) { }) t.Run("returns error when data is empty - cbor nil", func(t *testing.T) { - t.Parallel() - var buf bytes.Buffer // explicit cbor encoding of nil @@ -72,8 +63,6 @@ func TestDecoder_Decode(t *testing.T) { }) t.Run("returns error when data is empty - cbor empty []byte", func(t *testing.T) { - t.Parallel() - var buf bytes.Buffer // explicit cbor encoding of an empty byte slice @@ -86,8 +75,6 @@ func TestDecoder_Decode(t *testing.T) { }) t.Run("returns error when message code is invalid", func(t *testing.T) { - t.Parallel() - var buf bytes.Buffer // the first byte is the message code, the remaining bytes are the message @@ -113,8 +100,6 @@ func TestDecoder_Decode(t *testing.T) { }) t.Run("returns error when unmarshalling fails - empty", func(t *testing.T) { - t.Parallel() - var buf bytes.Buffer err := cborcodec.NewCodec().NewEncoder(&buf).Encode([]byte{codec.CodeBlockProposal.Uint8()}) @@ -126,8 +111,6 @@ func TestDecoder_Decode(t *testing.T) { }) t.Run("returns error when unmarshalling fails - wrong type", func(t *testing.T) { - t.Parallel() - // first encode the message to bytes with an incorrect type var data bytes.Buffer _ = data.WriteByte(codec.CodeCollectionGuarantee.Uint8()) @@ -147,8 +130,6 @@ func TestDecoder_Decode(t *testing.T) { }) t.Run("returns error when unmarshalling fails - corrupt", func(t *testing.T) { - t.Parallel() - // first encode the message to bytes var data bytes.Buffer _ = data.WriteByte(codec.CodeBlockProposal.Uint8()) diff --git a/network/codec/cbor/encoder.go b/network/codec/cbor/encoder.go index c154ddebead..e7b14682403 100644 --- a/network/codec/cbor/encoder.go +++ b/network/codec/cbor/encoder.go @@ -1,5 +1,3 @@ -// (c) 2019 Dapper Labs - ALL RIGHTS RESERVED - package cbor import ( diff --git a/network/codec/codes.go b/network/codec/codes.go index 6a91576e17a..84d39397dc6 100644 --- a/network/codec/codes.go +++ b/network/codec/codes.go @@ -1,11 +1,8 @@ -// (c) 2019 Dapper Labs - ALL RIGHTS RESERVED - package codec import ( "fmt" - "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/model/libp2p/message" "github.com/onflow/flow-go/model/messages" ) @@ -39,7 +36,7 @@ const ( // collections, guarantees & transactions CodeCollectionGuarantee - CodeTransaction + _ // DEPRECATED as of Mainnet 27; previously used for essentially an executed transaction CodeTransactionBody // core messages for execution & verification @@ -72,7 +69,7 @@ func MessageCodeFromInterface(v interface{}) (MessageCode, string, error) { s := what(v) switch v.(type) { // consensus - case *messages.BlockProposal: + case *messages.Proposal: return CodeBlockProposal, s, nil case *messages.BlockVote: return CodeBlockVote, s, nil @@ -80,7 +77,7 @@ func MessageCodeFromInterface(v interface{}) (MessageCode, string, error) { return CodeTimeoutObject, s, nil // cluster consensus - case *messages.ClusterBlockProposal: + case *messages.ClusterProposal: return CodeClusterBlockProposal, s, nil case *messages.ClusterBlockVote: return CodeClusterBlockVote, s, nil @@ -102,17 +99,15 @@ func MessageCodeFromInterface(v interface{}) (MessageCode, string, error) { return CodeBlockResponse, s, nil // collections, guarantees & transactions - case *flow.CollectionGuarantee: + case *messages.CollectionGuarantee: return CodeCollectionGuarantee, s, nil - case *flow.TransactionBody: + case *messages.TransactionBody: return CodeTransactionBody, s, nil - case *flow.Transaction: - return CodeTransaction, s, nil // core messages for execution & verification - case *flow.ExecutionReceipt: + case *messages.ExecutionReceipt: return CodeExecutionReceipt, s, nil - case *flow.ResultApproval: + case *messages.ResultApproval: return CodeResultApproval, s, nil // data exchange for execution of blocks @@ -150,11 +145,11 @@ func MessageCodeFromInterface(v interface{}) (MessageCode, string, error) { // of the message code represents. // Expected error returns during normal operations: // - ErrUnknownMsgCode if message code does not match any of the configured message codes above. -func InterfaceFromMessageCode(code MessageCode) (interface{}, string, error) { +func InterfaceFromMessageCode(code MessageCode) (messages.UntrustedMessage, string, error) { switch code { // consensus case CodeBlockProposal: - return &messages.BlockProposal{}, what(&messages.BlockProposal{}), nil + return &messages.Proposal{}, what(&messages.Proposal{}), nil case CodeBlockVote: return &messages.BlockVote{}, what(&messages.BlockVote{}), nil case CodeTimeoutObject: @@ -162,7 +157,7 @@ func InterfaceFromMessageCode(code MessageCode) (interface{}, string, error) { // cluster consensus case CodeClusterBlockProposal: - return &messages.ClusterBlockProposal{}, what(&messages.ClusterBlockProposal{}), nil + return &messages.ClusterProposal{}, what(&messages.ClusterProposal{}), nil case CodeClusterBlockVote: return &messages.ClusterBlockVote{}, what(&messages.ClusterBlockVote{}), nil case CodeClusterBlockResponse: @@ -182,19 +177,17 @@ func InterfaceFromMessageCode(code MessageCode) (interface{}, string, error) { case CodeBlockResponse: return &messages.BlockResponse{}, what(&messages.BlockResponse{}), nil - // collections, guarantees & transactions + // collection guarantees & transactions case CodeCollectionGuarantee: - return &flow.CollectionGuarantee{}, what(&flow.CollectionGuarantee{}), nil + return &messages.CollectionGuarantee{}, what(&messages.CollectionGuarantee{}), nil case CodeTransactionBody: - return &flow.TransactionBody{}, what(&flow.TransactionBody{}), nil - case CodeTransaction: - return &flow.Transaction{}, what(&flow.Transaction{}), nil + return &messages.TransactionBody{}, what(&messages.TransactionBody{}), nil // core messages for execution & verification case CodeExecutionReceipt: - return &flow.ExecutionReceipt{}, what(&flow.ExecutionReceipt{}), nil + return &messages.ExecutionReceipt{}, what(&messages.ExecutionReceipt{}), nil case CodeResultApproval: - return &flow.ResultApproval{}, what(&flow.ResultApproval{}), nil + return &messages.ResultApproval{}, what(&messages.ResultApproval{}), nil // data exchange for execution of blocks case CodeChunkDataRequest: diff --git a/network/codec/roundTripHeader_test.go b/network/codec/roundTripHeader_test.go index 382b7fd808b..1c33c320142 100644 --- a/network/codec/roundTripHeader_test.go +++ b/network/codec/roundTripHeader_test.go @@ -22,23 +22,24 @@ import ( // next developer who wants to add a new serialization format :-) func roundTripHeaderViaCodec(t *testing.T, codec network.Codec) { block := unittest.BlockFixture() - message := messages.NewBlockProposal(&block) - encoded, err := codec.Encode(message) + proposal := unittest.ProposalFromBlock(block) + message := messages.Proposal(*proposal) + encoded, err := codec.Encode(&message) assert.NoError(t, err) decodedInterface, err := codec.Decode(encoded) assert.NoError(t, err) - decoded := decodedInterface.(*messages.BlockProposal) - decodedBlock := decoded.Block.ToInternal() + decoded := decodedInterface.(*messages.Proposal) + decodedBlock := decoded.Block // compare LastViewTC separately, because it is a pointer field - if decodedBlock.Header.LastViewTC == nil { - assert.Equal(t, block.Header.LastViewTC, decodedBlock.Header.LastViewTC) + if decodedBlock.LastViewTC == nil { + assert.Equal(t, block.LastViewTC, decodedBlock.LastViewTC) } else { - assert.Equal(t, *block.Header.LastViewTC, *decodedBlock.Header.LastViewTC) + assert.Equal(t, *block.LastViewTC, *decodedBlock.LastViewTC) } // compare the rest of the header // manually set LastViewTC fields to be equal to pass the Header pointer comparison - decodedBlock.Header.LastViewTC = block.Header.LastViewTC - assert.Equal(t, *block.Header, *decodedBlock.Header) + decodedBlock.LastViewTC = block.LastViewTC + assert.Equal(t, *block.ToHeader(), *decodedBlock.ToHeader()) } func TestRoundTripHeaderViaCBOR(t *testing.T) { diff --git a/network/compressor/lz4Compressor.go b/network/compressor/lz4Compressor.go index 3d7365b36e3..cb313d4fd03 100644 --- a/network/compressor/lz4Compressor.go +++ b/network/compressor/lz4Compressor.go @@ -3,7 +3,7 @@ package compressor import ( "io" - "github.com/pierrec/lz4" + "github.com/pierrec/lz4/v4" "github.com/onflow/flow-go/network" ) diff --git a/network/conduit.go b/network/conduit.go index fa6e891e09a..5002eb9a291 100644 --- a/network/conduit.go +++ b/network/conduit.go @@ -1,5 +1,3 @@ -// (c) 2019 Dapper Labs - ALL RIGHTS RESERVED - package network import ( @@ -13,13 +11,13 @@ import ( // ConduitFactory is an interface type that is utilized by the Network to create conduits for the channels. type ConduitFactory interface { - // RegisterAdapter sets the Adapter component of the factory. - // The Adapter is a wrapper around the Network layer that only exposes the set of methods + // RegisterAdapter sets the ConduitAdapter component of the factory. + // The ConduitAdapter is a wrapper around the Network layer that only exposes the set of methods // that are needed by a conduit. - RegisterAdapter(Adapter) error + RegisterAdapter(ConduitAdapter) error // NewConduit creates a conduit on the specified channel. - // Prior to creating any conduit, the factory requires an Adapter to be registered with it. + // Prior to creating any conduit, the factory requires an ConduitAdapter to be registered with it. NewConduit(context.Context, channels.Channel) (Conduit, error) } diff --git a/network/converter/network.go b/network/converter/network.go index a30bb683d61..a6947f390e3 100644 --- a/network/converter/network.go +++ b/network/converter/network.go @@ -6,14 +6,14 @@ import ( ) type Network struct { - network.Network + network.EngineRegistry from channels.Channel to channels.Channel } -var _ network.Network = (*Network)(nil) +var _ network.EngineRegistry = (*Network)(nil) -func NewNetwork(net network.Network, from channels.Channel, to channels.Channel) *Network { +func NewNetwork(net network.EngineRegistry, from channels.Channel, to channels.Channel) *Network { return &Network{net, from, to} } @@ -25,5 +25,5 @@ func (n *Network) convert(channel channels.Channel) channels.Channel { } func (n *Network) Register(channel channels.Channel, engine network.MessageProcessor) (network.Conduit, error) { - return n.Network.Register(n.convert(channel), engine) + return n.EngineRegistry.Register(n.convert(channel), engine) } diff --git a/network/disallow.go b/network/disallow.go new file mode 100644 index 00000000000..feaa6d2b27b --- /dev/null +++ b/network/disallow.go @@ -0,0 +1,49 @@ +package network + +import ( + "github.com/onflow/flow-go/model/flow" +) + +// DisallowListedCause is a type representing the cause of disallow listing. A remote node may be disallow-listed by the +// current node for a variety of reasons. This type is used to represent the reason for disallow-listing, so that if +// a node is disallow-listed for reasons X and Y, allow-listing it back for reason X does not automatically allow-list +// it for reason Y. +type DisallowListedCause string + +func (c DisallowListedCause) String() string { + return string(c) +} + +const ( + // DisallowListedCauseAdmin is the cause of disallow-listing a node by an admin command. + DisallowListedCauseAdmin DisallowListedCause = "disallow-listed-admin" + // DisallowListedCauseAlsp is the cause of disallow-listing a node by the ALSP (Application Layer Spam Prevention). + DisallowListedCauseAlsp DisallowListedCause = "disallow-listed-alsp" +) + +// DisallowListingUpdate is a notification of a new disallow list update, it contains a list of Flow identities that +// are now disallow listed for a specific reason. +type DisallowListingUpdate struct { + FlowIds flow.IdentifierList + Cause DisallowListedCause +} + +// AllowListingUpdate is a notification of a new allow list update, it contains a list of Flow identities that +// are now allow listed for a specific reason, i.e., their disallow list entry for that reason is removed. +type AllowListingUpdate struct { + FlowIds flow.IdentifierList + Cause DisallowListedCause +} + +// DisallowListNotificationConsumer is an interface for consuming disallow/allow list update notifications. +type DisallowListNotificationConsumer interface { + // OnDisallowListNotification is called when a new disallow list update notification is distributed. + // Any error on consuming an event must be handled internally. + // The implementation must be concurrency safe. + OnDisallowListNotification(*DisallowListingUpdate) + + // OnAllowListNotification is called when a new allow list update notification is distributed. + // Any error on consuming an event must be handled internally. + // The implementation must be concurrency safe. + OnAllowListNotification(*AllowListingUpdate) +} diff --git a/network/engine.go b/network/engine.go index a6c2fd6707a..0997894096d 100644 --- a/network/engine.go +++ b/network/engine.go @@ -1,4 +1,3 @@ -// (c) 2019 Dapper Labs - ALL RIGHTS RESERVED package network import ( @@ -50,11 +49,12 @@ type MessageProcessor interface { // Implementations of Process should be non-blocking. In general, Process should // only queue the message internally by the engine for later async processing. // - // TODO: This function should not return an error. - // The networking layer's responsibility is fulfilled once it delivers a message to an engine. - // It does not possess the context required to handle errors that may arise during an engine's processing - // of the message, as error handling for message processing falls outside the domain of the networking layer. - // Consequently, it is reasonable to remove the error from the Process function's signature, - // since returning an error to the networking layer would not be useful in this context. + // TODO(BFT, #7620): This function should not return an error. The networking layer's responsibility is fulfilled + // once it delivers a message to an engine. It does not possess the context required to handle + // errors that may arise during an engine's processing of the message, as error handling for + // message processing falls outside the domain of the networking layer. + // + // Some of the current error returns signal Byzantine behavior, such as forged or malformed + // messages. These cases must be logged and routed to a dedicated violation reporting consumer. Process(channel channels.Channel, originID flow.Identifier, message interface{}) error } diff --git a/network/errors.go b/network/errors.go index f469165fe46..f9edc2d291a 100644 --- a/network/errors.go +++ b/network/errors.go @@ -5,6 +5,8 @@ import ( "fmt" "github.com/libp2p/go-libp2p/core/peer" + + p2plogging "github.com/onflow/flow-go/network/p2p/logging" ) var ( @@ -18,7 +20,7 @@ type ErrIllegalConnectionState struct { } func (e ErrIllegalConnectionState) Error() string { - return fmt.Sprintf("unexpected connection status to peer %s: received NotConnected status while connection list is not empty %d ", e.pid.String(), e.numOfConns) + return fmt.Sprintf("unexpected connection status to peer %s: received NotConnected status while connection list is not empty %d ", p2plogging.PeerId(e.pid), e.numOfConns) } // NewConnectionStatusErr returns a new ErrIllegalConnectionState. diff --git a/network/internal/p2pfixtures/fixtures.go b/network/internal/p2pfixtures/fixtures.go index 6d989d3ef06..fab3fb6228a 100644 --- a/network/internal/p2pfixtures/fixtures.go +++ b/network/internal/p2pfixtures/fixtures.go @@ -4,6 +4,7 @@ import ( "bufio" "bytes" "context" + "errors" "fmt" "net" "testing" @@ -13,29 +14,26 @@ import ( pubsub "github.com/libp2p/go-libp2p-pubsub" "github.com/libp2p/go-libp2p/core/host" "github.com/libp2p/go-libp2p/core/network" - "github.com/libp2p/go-libp2p/core/peer" "github.com/libp2p/go-libp2p/core/peerstore" "github.com/libp2p/go-libp2p/core/routing" "github.com/multiformats/go-multiaddr" manet "github.com/multiformats/go-multiaddr/net" + "github.com/onflow/crypto" "github.com/rs/zerolog" "github.com/stretchr/testify/require" - "github.com/onflow/flow-go/crypto" + "github.com/onflow/flow-go/config" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/module/id" "github.com/onflow/flow-go/module/metrics" + flownet "github.com/onflow/flow-go/network" "github.com/onflow/flow-go/network/channels" "github.com/onflow/flow-go/network/internal/p2putils" - "github.com/onflow/flow-go/network/internal/testutils" "github.com/onflow/flow-go/network/message" "github.com/onflow/flow-go/network/p2p" + p2pbuilder "github.com/onflow/flow-go/network/p2p/builder" + p2pbuilderconfig "github.com/onflow/flow-go/network/p2p/builder/config" p2pdht "github.com/onflow/flow-go/network/p2p/dht" - "github.com/onflow/flow-go/network/p2p/keyutils" - "github.com/onflow/flow-go/network/p2p/p2pbuilder" - inspectorbuilder "github.com/onflow/flow-go/network/p2p/p2pbuilder/inspector" - "github.com/onflow/flow-go/network/p2p/tracer" - "github.com/onflow/flow-go/network/p2p/unicast" "github.com/onflow/flow-go/network/p2p/unicast/protocols" "github.com/onflow/flow-go/network/p2p/utils" "github.com/onflow/flow-go/utils/unittest" @@ -96,33 +94,37 @@ func WithSubscriptionFilter(filter pubsub.SubscriptionFilter) nodeOpt { } } +// TODO: this should be replaced by node fixture: https://github.com/onflow/flow-go/blob/master/network/p2p/test/fixtures.go func CreateNode(t *testing.T, networkKey crypto.PrivateKey, sporkID flow.Identifier, logger zerolog.Logger, nodeIds flow.IdentityList, opts ...nodeOpt) p2p.LibP2PNode { idProvider := id.NewFixedIdentityProvider(nodeIds) - - meshTracer := tracer.NewGossipSubMeshTracer( - logger, - metrics.NewNoopCollector(), - idProvider, - p2pbuilder.DefaultGossipSubConfig().LocalMeshLogInterval) - - rpcInspectorSuite, err := inspectorbuilder.NewGossipSubInspectorBuilder(logger, sporkID, inspectorbuilder.DefaultGossipSubRPCInspectorsConfig()).Build() + defaultFlowConfig, err := config.DefaultConfig() require.NoError(t, err) builder := p2pbuilder.NewNodeBuilder( logger, - metrics.NewNoopCollector(), + &defaultFlowConfig.NetworkConfig.GossipSub, + &p2pbuilderconfig.MetricsConfig{ + HeroCacheFactory: metrics.NewNoopHeroCacheMetricsFactory(), + Metrics: metrics.NewNoopCollector(), + }, + flownet.PrivateNetwork, unittest.DefaultAddress, networkKey, sporkID, - p2pbuilder.DefaultResourceManagerConfig()). + idProvider, + &defaultFlowConfig.NetworkConfig.ResourceManager, + p2pbuilderconfig.PeerManagerDisableConfig(), + &p2p.DisallowListCacheConfig{ + MaxSize: uint32(1000), + Metrics: metrics.NewNoopCollector(), + }, + &p2pbuilderconfig.UnicastConfig{ + Unicast: defaultFlowConfig.NetworkConfig.Unicast, + }). SetRoutingSystem(func(c context.Context, h host.Host) (routing.Routing, error) { return p2pdht.NewDHT(c, h, protocols.FlowDHTProtocolID(sporkID), zerolog.Nop(), metrics.NewNoopCollector()) }). - SetResourceManager(testutils.NewResourceManager(t)). - SetStreamCreationRetryInterval(unicast.DefaultRetryDelay). - SetGossipSubTracer(meshTracer). - SetGossipSubScoreTracerInterval(p2pbuilder.DefaultGossipSubConfig().ScoreTracerInterval). - SetGossipSubRpcInspectorSuite(rpcInspectorSuite) + SetResourceManager(&network.NullResourceManager{}) for _, opt := range opts { opt(builder) @@ -134,34 +136,38 @@ func CreateNode(t *testing.T, networkKey crypto.PrivateKey, sporkID flow.Identif return libp2pNode } -// PeerIdFixture creates a random and unique peer ID (libp2p node ID). -func PeerIdFixture(t *testing.T) peer.ID { - key, err := generateNetworkingKey(unittest.IdentifierFixture()) - require.NoError(t, err) - - pubKey, err := keyutils.LibP2PPublicKeyFromFlow(key.PublicKey()) - require.NoError(t, err) - - peerID, err := peer.IDFromPublicKey(pubKey) - require.NoError(t, err) - - return peerID -} +// SubMustEventuallyStopReceivingAnyMessage checks that the subscription eventually stops receiving any messages within the given timeout by the context. +// This func uses the publish callback to continually publish messages to the subscription, this ensures that the subscription indeed stops receiving the messages. +func SubMustEventuallyStopReceivingAnyMessage(t *testing.T, ctx context.Context, sub p2p.Subscription, publish func(t *testing.T)) { + done := make(chan struct{}) + ticker := time.NewTicker(500 * time.Millisecond) + defer func() { + close(done) + ticker.Stop() + }() -// generateNetworkingKey generates a Flow ECDSA key using the given seed -func generateNetworkingKey(s flow.Identifier) (crypto.PrivateKey, error) { - seed := make([]byte, crypto.KeyGenSeedMinLen) - copy(seed, s[:]) - return crypto.GeneratePrivateKey(crypto.ECDSASecp256k1, seed) -} + go func() { + for { + select { + case <-done: + return + case <-ticker.C: + publish(t) + } + } + }() -// PeerIdsFixture creates random and unique peer IDs (libp2p node IDs). -func PeerIdsFixture(t *testing.T, n int) []peer.ID { - peerIDs := make([]peer.ID, n) - for i := 0; i < n; i++ { - peerIDs[i] = PeerIdFixture(t) - } - return peerIDs + // eventually we should stop receiving messages on the sub + require.Eventually(t, func() bool { + _, err := sub.Next(ctx) + return errors.Is(err, context.DeadlineExceeded) + }, 10*time.Second, 100*time.Millisecond) + + // after we stop receiving messages on sub we should continue to not receiving messages + // despite messages continuing to be published + _, err := sub.Next(ctx) + require.Error(t, err) + require.ErrorIs(t, err, context.DeadlineExceeded) } // SubMustNeverReceiveAnyMessage checks that the subscription never receives any message within the given timeout by the context. @@ -180,6 +186,12 @@ func SubMustNeverReceiveAnyMessage(t *testing.T, ctx context.Context, sub p2p.Su unittest.RequireCloseBefore(t, timeouted, 10*time.Second, "timeout did not happen on receiving expected pubsub message") } +func SubsMustEventuallyStopReceivingAnyMessage(t *testing.T, ctx context.Context, subs []p2p.Subscription, send func(t *testing.T)) { + for _, sub := range subs { + SubMustEventuallyStopReceivingAnyMessage(t, ctx, sub, send) + } +} + // HasSubReceivedMessage checks that the subscription have received the given message within the given timeout by the context. // It returns true if the subscription has received the message, false otherwise. func HasSubReceivedMessage(t *testing.T, ctx context.Context, expectedMessage []byte, sub p2p.Subscription) bool { @@ -219,7 +231,7 @@ func AddNodesToEachOthersPeerStore(t *testing.T, nodes []p2p.LibP2PNode, ids flo if node == other { continue } - otherPInfo, err := utils.PeerAddressInfo(*ids[i]) + otherPInfo, err := utils.PeerAddressInfo(ids[i].IdentitySkeleton) require.NoError(t, err) node.Host().Peerstore().AddAddrs(otherPInfo.ID, otherPInfo.Addrs, peerstore.AddressTTL) } @@ -233,13 +245,13 @@ func EnsureNotConnected(t *testing.T, ctx context.Context, from []p2p.LibP2PNode if this == other { require.Fail(t, "overlapping nodes in from and to lists") } - thisId := this.Host().ID() + thisId := this.ID() // we intentionally do not check the error here, with libp2p v0.24 connection gating at the "InterceptSecured" level // does not cause the nodes to complain about the connection being rejected at the dialer side. // Hence, we instead check for any trace of the connection being established in the receiver side. - _ = this.Host().Connect(ctx, other.Host().Peerstore().PeerInfo(other.Host().ID())) + _ = this.Host().Connect(ctx, other.Host().Peerstore().PeerInfo(other.ID())) // ensures that other node has never received a connection from this node. - require.Equal(t, other.Host().Network().Connectedness(thisId), network.NotConnected) + require.Equal(t, network.NotConnected, other.Host().Network().Connectedness(thisId)) require.Empty(t, other.Host().Network().ConnsToPeer(thisId)) } } @@ -258,14 +270,18 @@ func EnsureMessageExchangeOverUnicast(t *testing.T, ctx context.Context, nodes [ if this == other { continue } - s, err := this.CreateStream(ctx, other.Host().ID()) - require.NoError(t, err) - rw := bufio.NewReadWriter(bufio.NewReader(s), bufio.NewWriter(s)) - _, err = rw.WriteString(msg) + err := this.OpenAndWriteOnStream(ctx, other.ID(), t.Name(), func(stream network.Stream) error { + rw := bufio.NewReadWriter(bufio.NewReader(stream), bufio.NewWriter(stream)) + _, err := rw.WriteString(msg) + require.NoError(t, err) + + // Flush the stream + require.NoError(t, rw.Flush()) + + return nil + }) require.NoError(t, err) - // Flush the stream - require.NoError(t, rw.Flush()) } // wait for the message to be received by all other nodes @@ -304,8 +320,8 @@ func EnsureNoStreamCreation(t *testing.T, ctx context.Context, from []p2p.LibP2P // we intentionally do not check the error here, with libp2p v0.24 connection gating at the "InterceptSecured" level // does not cause the nodes to complain about the connection being rejected at the dialer side. // Hence, we instead check for any trace of the connection being established in the receiver side. - otherId := other.Host().ID() - thisId := this.Host().ID() + otherId := other.ID() + thisId := this.ID() // closes all connections from other node to this node in order to isolate the connection attempt. for _, conn := range other.Host().Network().ConnsToPeer(thisId) { @@ -313,9 +329,12 @@ func EnsureNoStreamCreation(t *testing.T, ctx context.Context, from []p2p.LibP2P } require.Empty(t, other.Host().Network().ConnsToPeer(thisId)) - _, err := this.CreateStream(ctx, otherId) + err := this.OpenAndWriteOnStream(ctx, otherId, t.Name(), func(stream network.Stream) error { + // no-op as the stream is never created. + return nil + }) // ensures that other node has never received a connection from this node. - require.Equal(t, other.Host().Network().Connectedness(thisId), network.NotConnected) + require.Equal(t, network.NotConnected, other.Host().Network().Connectedness(thisId)) // a stream is established on top of a connection, so if there is no connection, there should be no stream. require.Empty(t, other.Host().Network().ConnsToPeer(thisId)) // runs the error checkers if any. @@ -335,9 +354,11 @@ func EnsureStreamCreation(t *testing.T, ctx context.Context, from []p2p.LibP2PNo require.Fail(t, "node is in both from and to lists") } // stream creation should pass without error - s, err := this.CreateStream(ctx, other.Host().ID()) + err := this.OpenAndWriteOnStream(ctx, other.ID(), t.Name(), func(stream network.Stream) error { + require.NotNil(t, stream) + return nil + }) require.NoError(t, err) - require.NotNil(t, s) } } } diff --git a/network/internal/p2putils/utils.go b/network/internal/p2putils/utils.go index 2415ca5b4c8..4a7d7b58cbe 100644 --- a/network/internal/p2putils/utils.go +++ b/network/internal/p2putils/utils.go @@ -4,16 +4,17 @@ import ( "fmt" "net" - "github.com/libp2p/go-libp2p/core" "github.com/libp2p/go-libp2p/core/crypto" "github.com/libp2p/go-libp2p/core/host" "github.com/libp2p/go-libp2p/core/network" "github.com/libp2p/go-libp2p/core/peer" + "github.com/libp2p/go-libp2p/core/protocol" "github.com/multiformats/go-multiaddr" "github.com/rs/zerolog" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/network/p2p/keyutils" + p2plogging "github.com/onflow/flow-go/network/p2p/logging" "github.com/onflow/flow-go/network/p2p/unicast/protocols" ) @@ -31,9 +32,9 @@ func FlowStream(conn network.Conn) network.Stream { func StreamLogger(log zerolog.Logger, stream network.Stream) zerolog.Logger { logger := log.With(). Str("protocol", string(stream.Protocol())). - Str("remote_peer", stream.Conn().RemotePeer().String()). + Str("remote_peer", p2plogging.PeerId(stream.Conn().RemotePeer())). Str("remote_address", stream.Conn().RemoteMultiaddr().String()). - Str("local_peer", stream.Conn().LocalPeer().String()). + Str("local_peer", p2plogging.PeerId(stream.Conn().LocalPeer())). Str("local_address", stream.Conn().LocalMultiaddr().String()).Logger() return logger } @@ -68,46 +69,95 @@ func ConnectednessToString(connectedness network.Connectedness) (string, bool) { } -// FindOutboundStream finds an existing outbound stream to the target id if it exists by querying libp2p -func FindOutboundStream(host host.Host, targetID peer.ID, protocol core.ProtocolID) (network.Stream, bool) { - streams := FilterStream(host, targetID, protocol, network.DirOutbound, false) - if len(streams) > 0 { - return streams[0], true +// CountStream finds total number of outbound stream to the target id +func CountStream(host host.Host, targetID peer.ID, opts ...FilterOption) int { + streams := FilterStream(host, targetID, append(opts, All())...) + return len(streams) +} + +// FilterOptions holds the filtering options used in FilterStream. +type FilterOptions struct { + // dir specifies the direction of the streams to be filtered. + // The default value is network.DirBoth, which considers both inbound and outbound streams. + dir network.Direction + + // protocol specifies the protocol ID of the streams to be filtered. + // The default value is an empty string, which considers streams of all protocol IDs. + protocol protocol.ID + + // all specifies whether to return all matching streams or just the first matching stream. + // The default value is false, which returns just the first matching stream. + all bool +} + +// FilterOption defines a function type that modifies FilterOptions. +type FilterOption func(*FilterOptions) + +// Direction is a FilterOption for setting the direction of the streams to be filtered. +func Direction(dir network.Direction) FilterOption { + return func(opts *FilterOptions) { + opts.dir = dir } - return nil, false } -// CountStream finds total number of outbound stream to the target id -func CountStream(host host.Host, targetID peer.ID, protocol core.ProtocolID, dir network.Direction) int { - streams := FilterStream(host, targetID, protocol, dir, true) - return len(streams) +// Protocol is a FilterOption for setting the protocol ID of the streams to be filtered. +func Protocol(protocol protocol.ID) FilterOption { + return func(opts *FilterOptions) { + opts.protocol = protocol + } } -// FilterStream finds one or all existing outbound streams to the target id if it exists. -// if parameter all is true - all streams are found else the first stream found is returned -func FilterStream(host host.Host, targetID peer.ID, protocol core.ProtocolID, dir network.Direction, all bool) []network.Stream { +// All is a FilterOption for setting whether to return all matching streams or just the first matching stream. +func All() FilterOption { + return func(opts *FilterOptions) { + opts.all = true + } +} +// FilterStream filters the streams to a target peer based on the provided options. +// The default behavior is to consider all directions and protocols, and return just the first matching stream. +// This behavior can be customized by providing FilterOption values. +// +// Usage: +// +// - To find all outbound streams to a target peer with a specific protocol ID: +// streams := FilterStream(host, targetID, Direction(network.DirOutbound), Protocol(myProtocolID), All(true)) +// +// - To find the first inbound stream to a target peer, regardless of protocol ID: +// stream := FilterStream(host, targetID, Direction(network.DirInbound)) +// +// host is the host from which to filter streams. +// targetID is the ID of the target peer. +// options is a variadic parameter that allows zero or more FilterOption values to be provided. +// +// It returns a slice of network.Stream values that match the filtering criteria. +func FilterStream(host host.Host, targetID peer.ID, options ...FilterOption) []network.Stream { var filteredStreams []network.Stream + const allProtocols = "*" + // default values + opts := FilterOptions{ + dir: network.DirUnknown, // by default, consider both inbound and outbound streams + protocol: allProtocols, // by default, consider streams of all protocol IDs + all: false, // by default, return just the first matching stream + } + + // apply provided options + for _, option := range options { + option(&opts) + } - // choose the connection only if it is connected if host.Network().Connectedness(targetID) != network.Connected { return filteredStreams } - // get all connections conns := host.Network().ConnsToPeer(targetID) - - // find a connection which is in the connected state for _, conn := range conns { - - // get all streams streams := conn.GetStreams() for _, stream := range streams { - - // choose a stream which is marked as outbound and is for the flow protocol - if stream.Stat().Direction == dir && stream.Protocol() == protocol { + if (opts.dir == network.DirUnknown || stream.Stat().Direction == opts.dir) && + (opts.protocol == allProtocols || stream.Protocol() == opts.protocol) { filteredStreams = append(filteredStreams, stream) - if !all { + if !opts.all { return filteredStreams } } @@ -117,7 +167,7 @@ func FilterStream(host host.Host, targetID peer.ID, protocol core.ProtocolID, di } // NetworkingInfo returns ip, port, libp2p public key of the identity. -func NetworkingInfo(identity flow.Identity) (string, string, crypto.PubKey, error) { +func NetworkingInfo(identity flow.IdentitySkeleton) (string, string, crypto.PubKey, error) { // split the node address into ip and port ip, port, err := net.SplitHostPort(identity.Address) if err != nil { @@ -158,7 +208,7 @@ func IPPortFromMultiAddress(addrs ...multiaddr.Multiaddr) (string, string, error return "", "", err } - //there should only be one valid IPv4 address + // there should only be one valid IPv4 address return ipOrHostname, port, nil } return "", "", fmt.Errorf("ip address or hostname not found") diff --git a/network/internal/testutils/fixtures.go b/network/internal/testutils/fixtures.go index e4e1bd6ef1c..b2fff20abbb 100644 --- a/network/internal/testutils/fixtures.go +++ b/network/internal/testutils/fixtures.go @@ -22,7 +22,7 @@ func MisbehaviorReportFixture(t *testing.T) network.MisbehaviorReport { // pick a random misbehavior type misbehaviorType := alsp.AllMisbehaviorTypes()[rand.Intn(len(alsp.AllMisbehaviorTypes()))] - amplification := rand.Intn(100) + amplification := 100 * rand.Float64() report, err := alsp.NewMisbehaviorReport( unittest.IdentifierFixture(), misbehaviorType, diff --git a/network/internal/testutils/meshengine.go b/network/internal/testutils/meshengine.go index f792899410d..a8dafef6df7 100644 --- a/network/internal/testutils/meshengine.go +++ b/network/internal/testutils/meshengine.go @@ -26,7 +26,7 @@ type MeshEngine struct { mockcomponent.Component } -func NewMeshEngine(t *testing.T, net network.Network, cap int, channel channels.Channel) *MeshEngine { +func NewMeshEngine(t *testing.T, net network.EngineRegistry, cap int, channel channels.Channel) *MeshEngine { te := &MeshEngine{ t: t, Event: make(chan interface{}, cap), diff --git a/network/internal/testutils/testUtil.go b/network/internal/testutils/testUtil.go index e2facb58799..6a8a4331ee5 100644 --- a/network/internal/testutils/testUtil.go +++ b/network/internal/testutils/testUtil.go @@ -1,7 +1,6 @@ package testutils import ( - "context" "fmt" "reflect" "runtime" @@ -10,47 +9,34 @@ import ( "testing" "time" - dht "github.com/libp2p/go-libp2p-kad-dht" - "github.com/libp2p/go-libp2p/core/connmgr" - "github.com/libp2p/go-libp2p/core/host" - p2pNetwork "github.com/libp2p/go-libp2p/core/network" "github.com/libp2p/go-libp2p/core/peer" - pc "github.com/libp2p/go-libp2p/core/protocol" - "github.com/libp2p/go-libp2p/core/routing" "github.com/rs/zerolog" "github.com/stretchr/testify/require" - "github.com/onflow/flow-go/crypto" + "github.com/onflow/flow-go/config" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/model/flow/filter" libp2pmessage "github.com/onflow/flow-go/model/libp2p/message" "github.com/onflow/flow-go/module" - "github.com/onflow/flow-go/module/id" "github.com/onflow/flow-go/module/irrecoverable" "github.com/onflow/flow-go/module/metrics" "github.com/onflow/flow-go/module/mock" "github.com/onflow/flow-go/module/observable" "github.com/onflow/flow-go/network" + alspmgr "github.com/onflow/flow-go/network/alsp/manager" netcache "github.com/onflow/flow-go/network/cache" - "github.com/onflow/flow-go/network/channels" "github.com/onflow/flow-go/network/codec/cbor" + mocknetwork "github.com/onflow/flow-go/network/mock" + "github.com/onflow/flow-go/network/netconf" "github.com/onflow/flow-go/network/p2p" + "github.com/onflow/flow-go/network/p2p/conduit" "github.com/onflow/flow-go/network/p2p/connection" - p2pdht "github.com/onflow/flow-go/network/p2p/dht" - "github.com/onflow/flow-go/network/p2p/middleware" - "github.com/onflow/flow-go/network/p2p/p2pbuilder" - inspectorbuilder "github.com/onflow/flow-go/network/p2p/p2pbuilder/inspector" - "github.com/onflow/flow-go/network/p2p/subscription" + p2ptest "github.com/onflow/flow-go/network/p2p/test" "github.com/onflow/flow-go/network/p2p/translator" - "github.com/onflow/flow-go/network/p2p/unicast" - "github.com/onflow/flow-go/network/p2p/unicast/protocols" - "github.com/onflow/flow-go/network/p2p/unicast/ratelimit" - "github.com/onflow/flow-go/network/slashing" + "github.com/onflow/flow-go/network/underlay" "github.com/onflow/flow-go/utils/unittest" ) -var sporkID = unittest.IdentifierFixture() - // RateLimitConsumer p2p.RateLimiterConsumer fixture that invokes a callback when rate limit event is consumed. type RateLimitConsumer struct { callback func(pid peer.ID, role, msgType, topic, reason string) // callback func that will be invoked on rate limit @@ -110,7 +96,7 @@ func (tw *TagWatchingConnManager) Unprotect(id peer.ID, tag string) bool { } // NewTagWatchingConnManager creates a new TagWatchingConnManager with the given config. It returns an error if the config is invalid. -func NewTagWatchingConnManager(log zerolog.Logger, metrics module.LibP2PConnectionMetrics, config *connection.ManagerConfig) (*TagWatchingConnManager, error) { +func NewTagWatchingConnManager(log zerolog.Logger, metrics module.LibP2PConnectionMetrics, config *netconf.ConnectionManager) (*TagWatchingConnManager, error) { cm, err := connection.NewConnManager(log, metrics, config) if err != nil { return nil, fmt.Errorf("could not create connection manager: %w", err) @@ -123,250 +109,122 @@ func NewTagWatchingConnManager(log zerolog.Logger, metrics module.LibP2PConnecti }, nil } -// GenerateIDs is a test helper that generate flow identities with a valid port and libp2p nodes. -func GenerateIDs(t *testing.T, logger zerolog.Logger, n int, opts ...func(*optsConfig)) (flow.IdentityList, - []p2p.LibP2PNode, - []observable.Observable) { - libP2PNodes := make([]p2p.LibP2PNode, n) - tagObservables := make([]observable.Observable, n) - - identities := unittest.IdentityListFixture(n, unittest.WithAllRoles()) - idProvider := NewUpdatableIDProvider(identities) - o := &optsConfig{ - peerUpdateInterval: connection.DefaultPeerUpdateInterval, - unicastRateLimiterDistributor: ratelimit.NewUnicastRateLimiterDistributor(), - connectionGater: NewConnectionGater(idProvider, func(p peer.ID) error { - return nil - }), - createStreamRetryInterval: unicast.DefaultRetryDelay, - } - for _, opt := range opts { - opt(o) - } - - for _, identity := range identities { - for _, idOpt := range o.idOpts { - idOpt(identity) - } - } - - // generates keys and address for the node - for i, identity := range identities { - // generate key - key, err := generateNetworkingKey(identity.NodeID) - require.NoError(t, err) - - var opts []nodeBuilderOption - - opts = append(opts, withDHT(o.dhtPrefix, o.dhtOpts...)) - opts = append(opts, withPeerManagerOptions(connection.PruningEnabled, o.peerUpdateInterval)) - opts = append(opts, withRateLimiterDistributor(o.unicastRateLimiterDistributor)) - opts = append(opts, withConnectionGater(o.connectionGater)) - opts = append(opts, withUnicastManagerOpts(o.createStreamRetryInterval)) - - libP2PNodes[i], tagObservables[i] = generateLibP2PNode(t, logger, key, opts...) - - _, port, err := libP2PNodes[i].GetIPPort() - require.NoError(t, err) - - identities[i].Address = unittest.IPPort(port) - identities[i].NetworkPubKey = key.PublicKey() - } - - return identities, libP2PNodes, tagObservables -} - -// GenerateMiddlewares creates and initializes middleware instances for all the identities -func GenerateMiddlewares(t *testing.T, - logger zerolog.Logger, - identities flow.IdentityList, - libP2PNodes []p2p.LibP2PNode, - codec network.Codec, - consumer slashing.ViolationsConsumer, - opts ...func(*optsConfig)) ([]network.Middleware, []*UpdatableIDProvider) { - mws := make([]network.Middleware, len(identities)) - idProviders := make([]*UpdatableIDProvider, len(identities)) - bitswapmet := metrics.NewNoopCollector() - o := &optsConfig{ - peerUpdateInterval: connection.DefaultPeerUpdateInterval, - unicastRateLimiters: ratelimit.NoopRateLimiters(), - networkMetrics: metrics.NewNoopCollector(), - peerManagerFilters: []p2p.PeerFilter{}, - } - - for _, opt := range opts { - opt(o) - } - - total := len(identities) - for i := 0; i < total; i++ { - // casts libP2PNode instance to a local variable to avoid closure - node := libP2PNodes[i] - nodeId := identities[i].NodeID - - idProviders[i] = NewUpdatableIDProvider(identities) - - // creating middleware of nodes - mws[i] = middleware.NewMiddleware( - logger, - node, - nodeId, - bitswapmet, - sporkID, - middleware.DefaultUnicastTimeout, - translator.NewIdentityProviderIDTranslator(idProviders[i]), - codec, - consumer, - middleware.WithUnicastRateLimiters(o.unicastRateLimiters), - middleware.WithPeerManagerFilters(o.peerManagerFilters)) - } - return mws, idProviders -} - -// GenerateNetworks generates the network for the given middlewares -func GenerateNetworks(t *testing.T, - log zerolog.Logger, +// LibP2PNodeForNetworkFixture is a test helper that generate flow identities with a valid port and libp2p nodes. +// Note that the LibP2PNode created by this fixture is meant to used with a network component. +// If you want to create a standalone LibP2PNode without network component, please use p2ptest.NodeFixture. +// Args: +// +// t: testing.T- the test object +// sporkId: flow.Identifier - the spork id to use for the nodes +// n: int - number of nodes to create +// +// opts: []p2ptest.NodeFixtureParameterOption - options to configure the nodes +// Returns: +// +// flow.IdentityList - list of identities created for the nodes, one for each node. +// +// []p2p.LibP2PNode - list of libp2p nodes created. +// TODO: several test cases only need a single node, consider encapsulating this function in a single node fixture. +func LibP2PNodeForNetworkFixture(t *testing.T, sporkId flow.Identifier, n int, opts ...p2ptest.NodeFixtureParameterOption) (flow.IdentityList, []p2p.LibP2PNode) { + libP2PNodes := make([]p2p.LibP2PNode, 0) + identities := make(flow.IdentityList, 0) + idProvider := unittest.NewUpdatableIDProvider(flow.IdentityList{}) + opts = append(opts, p2ptest.WithUnicastHandlerFunc(nil)) + + for i := 0; i < n; i++ { + node, nodeId := p2ptest.NodeFixture(t, + sporkId, + t.Name(), + idProvider, + opts...) + libP2PNodes = append(libP2PNodes, node) + identities = append(identities, &nodeId) + } + idProvider.SetIdentities(identities) + return identities, libP2PNodes +} + +// NetworksFixture generates the network for the given libp2p nodes. +func NetworksFixture(t *testing.T, + sporkId flow.Identifier, ids flow.IdentityList, - mws []network.Middleware, - sms []network.SubscriptionManager, - opts ...p2p.NetworkOptFunction) []network.Network { + libp2pNodes []p2p.LibP2PNode, + configOpts ...func(*underlay.NetworkConfig)) ([]*underlay.Network, []*unittest.UpdatableIDProvider) { + count := len(ids) - nets := make([]network.Network, 0) + nets := make([]*underlay.Network, 0) + idProviders := make([]*unittest.UpdatableIDProvider, 0) for i := 0; i < count; i++ { + idProvider := unittest.NewUpdatableIDProvider(ids) + params := NetworkConfigFixture(t, *ids[i], idProvider, sporkId, libp2pNodes[i]) - // creates and mocks me - me := &mock.Local{} - me.On("NodeID").Return(ids[i].NodeID) - me.On("NotMeFilter").Return(filter.Not(filter.HasNodeID(me.NodeID()))) - me.On("Address").Return(ids[i].Address) - - receiveCache := netcache.NewHeroReceiveCache(p2p.DefaultReceiveCacheSize, log, metrics.NewNoopCollector()) - - // create the network - net, err := p2p.NewNetwork(&p2p.NetworkParameters{ - Logger: log, - Codec: cbor.NewCodec(), - Me: me, - MiddlewareFactory: func() (network.Middleware, error) { return mws[i], nil }, - Topology: unittest.NetworkTopology(), - SubscriptionManager: sms[i], - Metrics: metrics.NewNoopCollector(), - IdentityProvider: id.NewFixedIdentityProvider(ids), - ReceiveCache: receiveCache, - Options: opts, - }) + for _, opt := range configOpts { + opt(params) + } + + net, err := underlay.NewNetwork(params) require.NoError(t, err) nets = append(nets, net) + idProviders = append(idProviders, idProvider) } - return nets -} - -// GenerateIDsAndMiddlewares returns nodeIDs, libp2pNodes, middlewares, and observables which can be subscirbed to in order to witness protect events from pubsub -func GenerateIDsAndMiddlewares(t *testing.T, - n int, - logger zerolog.Logger, - codec network.Codec, - consumer slashing.ViolationsConsumer, - opts ...func(*optsConfig)) (flow.IdentityList, []p2p.LibP2PNode, []network.Middleware, []observable.Observable, []*UpdatableIDProvider) { - - ids, libP2PNodes, protectObservables := GenerateIDs(t, logger, n, opts...) - mws, providers := GenerateMiddlewares(t, logger, ids, libP2PNodes, codec, consumer, opts...) - return ids, libP2PNodes, mws, protectObservables, providers -} - -type optsConfig struct { - idOpts []func(*flow.Identity) - dhtPrefix string - dhtOpts []dht.Option - unicastRateLimiters *ratelimit.RateLimiters - peerUpdateInterval time.Duration - networkMetrics module.NetworkMetrics - peerManagerFilters []p2p.PeerFilter - unicastRateLimiterDistributor p2p.UnicastRateLimiterDistributor - connectionGater connmgr.ConnectionGater - createStreamRetryInterval time.Duration -} - -func WithCreateStreamRetryInterval(delay time.Duration) func(*optsConfig) { - return func(o *optsConfig) { - o.createStreamRetryInterval = delay - } -} - -func WithUnicastRateLimiterDistributor(distributor p2p.UnicastRateLimiterDistributor) func(*optsConfig) { - return func(o *optsConfig) { - o.unicastRateLimiterDistributor = distributor - } + return nets, idProviders } -func WithIdentityOpts(idOpts ...func(*flow.Identity)) func(*optsConfig) { - return func(o *optsConfig) { - o.idOpts = idOpts - } -} - -func WithDHT(prefix string, dhtOpts ...dht.Option) func(*optsConfig) { - return func(o *optsConfig) { - o.dhtPrefix = prefix - o.dhtOpts = dhtOpts - } -} - -func WithPeerUpdateInterval(interval time.Duration) func(*optsConfig) { - return func(o *optsConfig) { - o.peerUpdateInterval = interval - } -} +func NetworkConfigFixture( + t *testing.T, + myId flow.Identity, + idProvider module.IdentityProvider, + sporkId flow.Identifier, + libp2pNode p2p.LibP2PNode, + opts ...underlay.NetworkConfigOption) *underlay.NetworkConfig { -func WithPeerManagerFilters(filters ...p2p.PeerFilter) func(*optsConfig) { - return func(o *optsConfig) { - o.peerManagerFilters = filters - } -} + me := mock.NewLocal(t) + me.On("NodeID").Return(myId.NodeID).Maybe() + me.On("NotMeFilter").Return(filter.Not(filter.HasNodeID[flow.Identity](me.NodeID()))).Maybe() + me.On("Address").Return(myId.Address).Maybe() -func WithUnicastRateLimiters(limiters *ratelimit.RateLimiters) func(*optsConfig) { - return func(o *optsConfig) { - o.unicastRateLimiters = limiters - } -} + defaultFlowConfig, err := config.DefaultConfig() + require.NoError(t, err) -func WithConnectionGater(connectionGater connmgr.ConnectionGater) func(*optsConfig) { - return func(o *optsConfig) { - o.connectionGater = connectionGater + receiveCache := netcache.NewHeroReceiveCache( + defaultFlowConfig.NetworkConfig.NetworkReceivedMessageCacheSize, + unittest.Logger(), + metrics.NewNoopCollector()) + params := &underlay.NetworkConfig{ + Logger: unittest.Logger(), + Codec: unittest.NetworkCodec(), + Libp2pNode: libp2pNode, + Me: me, + BitSwapMetrics: metrics.NewNoopCollector(), + Topology: unittest.NetworkTopology(), + Metrics: metrics.NewNoopCollector(), + IdentityProvider: idProvider, + ReceiveCache: receiveCache, + ConduitFactory: conduit.NewDefaultConduitFactory(), + SporkId: sporkId, + UnicastMessageTimeout: underlay.DefaultUnicastTimeout, + IdentityTranslator: translator.NewIdentityProviderIDTranslator(idProvider), + AlspCfg: &alspmgr.MisbehaviorReportManagerConfig{ + Logger: unittest.Logger(), + SpamRecordCacheSize: defaultFlowConfig.NetworkConfig.AlspConfig.SpamRecordCacheSize, + SpamReportQueueSize: defaultFlowConfig.NetworkConfig.AlspConfig.SpamReportQueueSize, + HeartBeatInterval: defaultFlowConfig.NetworkConfig.AlspConfig.HearBeatInterval, + AlspMetrics: metrics.NewNoopCollector(), + HeroCacheMetricsFactory: metrics.NewNoopHeroCacheMetricsFactory(), + }, + SlashingViolationConsumerFactory: func(_ network.ConduitAdapter) network.ViolationsConsumer { + return mocknetwork.NewViolationsConsumer(t) + }, } -} -func WithNetworkMetrics(m module.NetworkMetrics) func(*optsConfig) { - return func(o *optsConfig) { - o.networkMetrics = m + for _, opt := range opts { + opt(params) } -} - -func GenerateIDsMiddlewaresNetworks(t *testing.T, - n int, - log zerolog.Logger, - codec network.Codec, - consumer slashing.ViolationsConsumer, - opts ...func(*optsConfig)) (flow.IdentityList, []p2p.LibP2PNode, []network.Middleware, []network.Network, []observable.Observable) { - ids, libp2pNodes, mws, observables, _ := GenerateIDsAndMiddlewares(t, n, log, codec, consumer, opts...) - sms := GenerateSubscriptionManagers(t, mws) - networks := GenerateNetworks(t, log, ids, mws, sms) - - return ids, libp2pNodes, mws, networks, observables -} -// GenerateEngines generates MeshEngines for the given networks -func GenerateEngines(t *testing.T, nets []network.Network) []*MeshEngine { - count := len(nets) - engs := make([]*MeshEngine, count) - for i, n := range nets { - eng := NewMeshEngine(t, n, 100, channels.TestNetworkChannel) - engs[i] = eng - } - return engs + return params } // StartNodesAndNetworks starts the provided networks and libp2p nodes, returning the irrecoverable error channel. @@ -378,11 +236,11 @@ func GenerateEngines(t *testing.T, nets []network.Network) []*MeshEngine { // - timeout: the timeout to use for waiting for the nodes and networks to start. // // This function fails the test if the nodes or networks do not start within the given timeout. -func StartNodesAndNetworks(ctx irrecoverable.SignalerContext, t *testing.T, nodes []p2p.LibP2PNode, nets []network.Network, timeout time.Duration) { - StartNetworks(ctx, t, nets, timeout) +func StartNodesAndNetworks(ctx irrecoverable.SignalerContext, t *testing.T, nodes []p2p.LibP2PNode, nets []network.EngineRegistry) { + StartNetworks(ctx, t, nets) // start up nodes and Peer managers - StartNodes(ctx, t, nodes, timeout) + StartNodes(ctx, t, nodes) } // StartNetworks starts the provided networks using the provided irrecoverable context @@ -393,23 +251,22 @@ func StartNodesAndNetworks(ctx irrecoverable.SignalerContext, t *testing.T, node // - duration: the timeout to use for waiting for the networks to start. // // This function fails the test if the networks do not start within the given timeout. -func StartNetworks(ctx irrecoverable.SignalerContext, t *testing.T, nets []network.Network, duration time.Duration) { - // start up networks (this will implicitly start middlewares) +func StartNetworks(ctx irrecoverable.SignalerContext, t *testing.T, nets []network.EngineRegistry) { for _, net := range nets { net.Start(ctx) - unittest.RequireComponentsReadyBefore(t, duration, net) + unittest.RequireComponentsReadyBefore(t, 5*time.Second, net) } } // StartNodes starts the provided nodes and their peer managers using the provided irrecoverable context -func StartNodes(ctx irrecoverable.SignalerContext, t *testing.T, nodes []p2p.LibP2PNode, duration time.Duration) { +func StartNodes(ctx irrecoverable.SignalerContext, t *testing.T, nodes []p2p.LibP2PNode) { for _, node := range nodes { node.Start(ctx) - unittest.RequireComponentsReadyBefore(t, duration, node) + unittest.RequireComponentsReadyBefore(t, 5*time.Second, node) pm := node.PeerManagerComponent() pm.Start(ctx) - unittest.RequireComponentsReadyBefore(t, duration, pm) + unittest.RequireComponentsReadyBefore(t, 5*time.Second, pm) } } @@ -424,77 +281,6 @@ func StopComponents[R module.ReadyDoneAware](t *testing.T, rda []R, duration tim unittest.RequireComponentsDoneBefore(t, duration, comps...) } -type nodeBuilderOption func(p2p.NodeBuilder) - -func withDHT(prefix string, dhtOpts ...dht.Option) nodeBuilderOption { - return func(nb p2p.NodeBuilder) { - nb.SetRoutingSystem(func(c context.Context, h host.Host) (routing.Routing, error) { - return p2pdht.NewDHT(c, h, pc.ID(protocols.FlowDHTProtocolIDPrefix+prefix), zerolog.Nop(), metrics.NewNoopCollector(), dhtOpts...) - }) - } -} - -func withPeerManagerOptions(connectionPruning bool, updateInterval time.Duration) nodeBuilderOption { - return func(nb p2p.NodeBuilder) { - nb.SetPeerManagerOptions(connectionPruning, updateInterval) - } -} - -func withRateLimiterDistributor(distributor p2p.UnicastRateLimiterDistributor) nodeBuilderOption { - return func(nb p2p.NodeBuilder) { - nb.SetRateLimiterDistributor(distributor) - } -} - -func withConnectionGater(connectionGater connmgr.ConnectionGater) nodeBuilderOption { - return func(nb p2p.NodeBuilder) { - nb.SetConnectionGater(connectionGater) - } -} - -func withUnicastManagerOpts(delay time.Duration) nodeBuilderOption { - return func(nb p2p.NodeBuilder) { - nb.SetStreamCreationRetryInterval(delay) - } -} - -// generateLibP2PNode generates a `LibP2PNode` on localhost using a port assigned by the OS -func generateLibP2PNode(t *testing.T, - logger zerolog.Logger, - key crypto.PrivateKey, - opts ...nodeBuilderOption) (p2p.LibP2PNode, observable.Observable) { - - noopMetrics := metrics.NewNoopCollector() - - // Inject some logic to be able to observe connections of this node - connManager, err := NewTagWatchingConnManager(logger, noopMetrics, connection.DefaultConnManagerConfig()) - require.NoError(t, err) - - rpcInspectorSuite, err := inspectorbuilder.NewGossipSubInspectorBuilder(logger, sporkID, inspectorbuilder.DefaultGossipSubRPCInspectorsConfig()).Build() - require.NoError(t, err) - - builder := p2pbuilder.NewNodeBuilder( - logger, - metrics.NewNoopCollector(), - unittest.DefaultAddress, - key, - sporkID, - p2pbuilder.DefaultResourceManagerConfig()). - SetConnectionManager(connManager). - SetResourceManager(NewResourceManager(t)). - SetStreamCreationRetryInterval(unicast.DefaultRetryDelay). - SetGossipSubRpcInspectorSuite(rpcInspectorSuite) - - for _, opt := range opts { - opt(builder) - } - - libP2PNode, err := builder.Build() - require.NoError(t, err) - - return libP2PNode, connManager -} - // OptionalSleep introduces a sleep to allow nodes to heartbeat and discover each other (only needed when using PubSub) func OptionalSleep(send ConduitSendWrapperFunc) { sendFuncName := runtime.FuncForPC(reflect.ValueOf(send).Pointer()).Name() @@ -503,24 +289,6 @@ func OptionalSleep(send ConduitSendWrapperFunc) { } } -// generateNetworkingKey generates a Flow ECDSA key using the given seed -func generateNetworkingKey(s flow.Identifier) (crypto.PrivateKey, error) { - seed := make([]byte, crypto.KeyGenSeedMinLen) - copy(seed, s[:]) - return crypto.GeneratePrivateKey(crypto.ECDSASecp256k1, seed) -} - -// GenerateSubscriptionManagers creates and returns a ChannelSubscriptionManager for each middleware object. -func GenerateSubscriptionManagers(t *testing.T, mws []network.Middleware) []network.SubscriptionManager { - require.NotEmpty(t, mws) - - sms := make([]network.SubscriptionManager, len(mws)) - for i, mw := range mws { - sms[i] = subscription.NewChannelSubscriptionManager(mw) - } - return sms -} - // NetworkPayloadFixture creates a blob of random bytes with the given size (in bytes) and returns it. // The primary goal of utilizing this helper function is to apply stress tests on the network layer by // sending large messages to transmit. @@ -558,27 +326,12 @@ func NetworkPayloadFixture(t *testing.T, size uint) []byte { return payload } -// NewResourceManager creates a new resource manager for testing with no limits. -func NewResourceManager(t *testing.T) p2pNetwork.ResourceManager { - return &p2pNetwork.NullResourceManager{} -} - -// NewConnectionGater creates a new connection gater for testing with given allow listing filter. -func NewConnectionGater(idProvider module.IdentityProvider, allowListFilter p2p.PeerFilter) connmgr.ConnectionGater { - filters := []p2p.PeerFilter{allowListFilter} - return connection.NewConnGater(unittest.Logger(), - idProvider, - connection.WithOnInterceptPeerDialFilters(filters), - connection.WithOnInterceptSecuredFilters(filters)) -} - // IsRateLimitedPeerFilter returns a p2p.PeerFilter that will return an error if the peer is rate limited. func IsRateLimitedPeerFilter(rateLimiter p2p.RateLimiter) p2p.PeerFilter { return func(p peer.ID) error { if rateLimiter.IsRateLimited(p) { return fmt.Errorf("peer is rate limited") } - return nil } } diff --git a/network/message/Makefile b/network/message/Makefile index 9c83ad1c9dd..a9612fc3564 100644 --- a/network/message/Makefile +++ b/network/message/Makefile @@ -1,4 +1,4 @@ -# To re-generate the the protobuf go code, install tools first: +# To re-generate the protobuf go code, install tools first: # ``` # cd flow-go # make install-tools diff --git a/network/message/authorization.go b/network/message/authorization.go index 9b16a90141f..7a7f33d518b 100644 --- a/network/message/authorization.go +++ b/network/message/authorization.go @@ -61,7 +61,7 @@ func initializeMessageAuthConfigsMap() { authorizationConfigs[BlockProposal] = MsgAuthConfig{ Name: BlockProposal, Type: func() interface{} { - return new(messages.BlockProposal) + return new(messages.Proposal) }, Config: map[channels.Channel]ChannelAuthConfig{ channels.ConsensusCommittee: { @@ -181,7 +181,7 @@ func initializeMessageAuthConfigsMap() { authorizationConfigs[ClusterBlockProposal] = MsgAuthConfig{ Name: ClusterBlockProposal, Type: func() interface{} { - return new(messages.ClusterBlockProposal) + return new(messages.ClusterProposal) }, Config: map[channels.Channel]ChannelAuthConfig{ channels.ConsensusClusterPrefix: { @@ -231,7 +231,7 @@ func initializeMessageAuthConfigsMap() { authorizationConfigs[CollectionGuarantee] = MsgAuthConfig{ Name: CollectionGuarantee, Type: func() interface{} { - return new(flow.CollectionGuarantee) + return new(messages.CollectionGuarantee) }, Config: map[channels.Channel]ChannelAuthConfig{ channels.PushGuarantees: { @@ -243,7 +243,7 @@ func initializeMessageAuthConfigsMap() { authorizationConfigs[TransactionBody] = MsgAuthConfig{ Name: TransactionBody, Type: func() interface{} { - return new(flow.TransactionBody) + return new(messages.TransactionBody) }, Config: map[channels.Channel]ChannelAuthConfig{ channels.PushTransactions: { @@ -257,7 +257,7 @@ func initializeMessageAuthConfigsMap() { authorizationConfigs[ExecutionReceipt] = MsgAuthConfig{ Name: ExecutionReceipt, Type: func() interface{} { - return new(flow.ExecutionReceipt) + return new(messages.ExecutionReceipt) }, Config: map[channels.Channel]ChannelAuthConfig{ channels.PushReceipts: { @@ -269,7 +269,7 @@ func initializeMessageAuthConfigsMap() { authorizationConfigs[ResultApproval] = MsgAuthConfig{ Name: ResultApproval, Type: func() interface{} { - return new(flow.ResultApproval) + return new(messages.ResultApproval) }, Config: map[channels.Channel]ChannelAuthConfig{ channels.PushApprovals: { @@ -405,7 +405,7 @@ func initializeMessageAuthConfigsMap() { func GetMessageAuthConfig(v interface{}) (MsgAuthConfig, error) { switch v.(type) { // consensus - case *messages.BlockProposal: + case *messages.Proposal: return authorizationConfigs[BlockProposal], nil case *messages.BlockVote: return authorizationConfigs[BlockVote], nil @@ -425,7 +425,7 @@ func GetMessageAuthConfig(v interface{}) (MsgAuthConfig, error) { return authorizationConfigs[BlockResponse], nil // cluster consensus - case *messages.ClusterBlockProposal: + case *messages.ClusterProposal: return authorizationConfigs[ClusterBlockProposal], nil case *messages.ClusterBlockVote: return authorizationConfigs[ClusterBlockVote], nil @@ -435,15 +435,15 @@ func GetMessageAuthConfig(v interface{}) (MsgAuthConfig, error) { return authorizationConfigs[ClusterBlockResponse], nil // collections, guarantees & transactions - case *flow.CollectionGuarantee: + case *messages.CollectionGuarantee: return authorizationConfigs[CollectionGuarantee], nil - case *flow.TransactionBody: + case *messages.TransactionBody: return authorizationConfigs[TransactionBody], nil // core messages for execution & verification - case *flow.ExecutionReceipt: + case *messages.ExecutionReceipt: return authorizationConfigs[ExecutionReceipt], nil - case *flow.ResultApproval: + case *messages.ResultApproval: return authorizationConfigs[ResultApproval], nil // data exchange for execution of blocks diff --git a/network/message/gossipsub.go b/network/message/gossipsub.go new file mode 100644 index 00000000000..ede1b09878e --- /dev/null +++ b/network/message/gossipsub.go @@ -0,0 +1 @@ +package message diff --git a/network/message/message.pb.go b/network/message/message.pb.go index 9618db05eef..a9cb83c279f 100644 --- a/network/message/message.pb.go +++ b/network/message/message.pb.go @@ -5,10 +5,11 @@ package message import ( fmt "fmt" - proto "github.com/golang/protobuf/proto" io "io" math "math" math_bits "math/bits" + + proto "github.com/golang/protobuf/proto" ) // Reference imports to suppress errors if they are not otherwise used. diff --git a/network/message/message_scope.go b/network/message/message_scope.go new file mode 100644 index 00000000000..0b93de7af19 --- /dev/null +++ b/network/message/message_scope.go @@ -0,0 +1,206 @@ +package message + +import ( + "fmt" + "strings" + + "github.com/onflow/crypto/hash" + + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/network/channels" +) + +const ( + // eventIDPackingPrefix is used as a salt to generate payload hash for messages. + eventIDPackingPrefix = "libp2ppacking" +) + +// EventId computes the event ID for a given channel and payload (i.e., the hash of the payload and channel). +// All errors returned by this function are benign and should not cause the node to crash. +// It errors if the hash function fails to hash the payload and channel. +func EventId(channel channels.Channel, payload []byte) (hash.Hash, error) { + // use a hash with an engine-specific salt to get the payload hash + h := hash.NewSHA3_384() + _, err := h.Write([]byte(eventIDPackingPrefix + channel)) + if err != nil { + return nil, fmt.Errorf("could not hash channel as salt: %w", err) + } + + _, err = h.Write(payload) + if err != nil { + return nil, fmt.Errorf("could not hash event: %w", err) + } + + return h.SumHash(), nil +} + +// MessageType returns the type of the message payload. +func MessageType(decodedPayload interface{}) string { + return strings.TrimLeft(fmt.Sprintf("%T", decodedPayload), "*") +} + +// IncomingMessageScope captures the context around an incoming message that is received by the network layer. +type IncomingMessageScope struct { + originId flow.Identifier // the origin node ID. + targetIds flow.IdentifierList // the target node IDs (i.e., intended recipients). + eventId hash.Hash // hash of the payload and channel. + msg *Message // the raw message received. + decodedPayload interface{} // decoded payload of the message. + protocol ProtocolType // the type of protocol used to receive the message. +} + +// NewIncomingScope creates a new incoming message scope. +// All errors returned by this function are benign and should not cause the node to crash, especially that it is not +// safe to crash the node when receiving a message. +// It errors if event id (i.e., hash of the payload and channel) cannot be computed, or if it fails to +// convert the target IDs from bytes slice to a flow.IdentifierList. +func NewIncomingScope(originId flow.Identifier, protocol ProtocolType, msg *Message, decodedPayload interface{}) (*IncomingMessageScope, error) { + eventId, err := EventId(channels.Channel(msg.ChannelID), msg.Payload) + if err != nil { + return nil, fmt.Errorf("could not compute event id: %w", err) + } + + targetIds, err := flow.ByteSlicesToIds(msg.TargetIDs) + if err != nil { + return nil, fmt.Errorf("could not convert target ids: %w", err) + } + return &IncomingMessageScope{ + eventId: eventId, + originId: originId, + msg: msg, + decodedPayload: decodedPayload, + protocol: protocol, + targetIds: targetIds, + }, nil +} + +func (m IncomingMessageScope) OriginId() flow.Identifier { + return m.originId +} + +func (m IncomingMessageScope) Proto() *Message { + return m.msg +} + +func (m IncomingMessageScope) DecodedPayload() interface{} { + return m.decodedPayload +} + +func (m IncomingMessageScope) Protocol() ProtocolType { + return m.protocol +} + +func (m IncomingMessageScope) Channel() channels.Channel { + return channels.Channel(m.msg.ChannelID) +} + +func (m IncomingMessageScope) Size() int { + return m.msg.Size() +} + +func (m IncomingMessageScope) TargetIDs() flow.IdentifierList { + return m.targetIds +} + +func (m IncomingMessageScope) EventID() []byte { + return m.eventId[:] +} + +func (m IncomingMessageScope) PayloadType() string { + return MessageType(m.decodedPayload) +} + +// OutgoingMessageScope captures the context around an outgoing message that is about to be sent. +type OutgoingMessageScope struct { + targetIds flow.IdentifierList // the target node IDs. + topic channels.Topic // the topic, i.e., channel-id/spork-id. + payload interface{} // the payload to be sent. + encoder func(interface{}) ([]byte, error) // the encoder to encode the payload. + msg *Message // raw proto message sent on wire. + protocol ProtocolType // the type of protocol used to send the message. +} + +// NewOutgoingScope creates a new outgoing message scope. +// All errors returned by this function are benign and should not cause the node to crash. +// It errors if the encoder fails to encode the payload into a protobuf message, or +// if the number of target IDs does not match the protocol type (i.e., unicast messages +// should have exactly one target ID, while pubsub messages should have at least one target ID). +func NewOutgoingScope( + targetIds flow.IdentifierList, + topic channels.Topic, + payload interface{}, + encoder func(interface{}) ([]byte, error), + protocolType ProtocolType) (*OutgoingMessageScope, error) { + scope := &OutgoingMessageScope{ + targetIds: targetIds, + topic: topic, + payload: payload, + encoder: encoder, + protocol: protocolType, + } + + if protocolType == ProtocolTypeUnicast { + // for unicast messages, we should have exactly one target. + if len(targetIds) != 1 { + return nil, fmt.Errorf("expected exactly one target id for unicast message, got: %d", len(targetIds)) + } + } + if protocolType == ProtocolTypePubSub { + // for pubsub messages, we should have at least one target. + if len(targetIds) == 0 { + return nil, fmt.Errorf("expected at least one target id for pubsub message, got: %d", len(targetIds)) + } + } + + msg, err := scope.buildMessage() + if err != nil { + return nil, fmt.Errorf("could not build message: %w", err) + } + scope.msg = msg + return scope, nil +} + +func (o OutgoingMessageScope) TargetIds() flow.IdentifierList { + return o.targetIds +} + +func (o OutgoingMessageScope) Size() int { + return o.msg.Size() +} + +func (o OutgoingMessageScope) PayloadType() string { + return MessageType(o.payload) +} + +func (o OutgoingMessageScope) Topic() channels.Topic { + return o.topic +} + +// buildMessage builds the raw proto message to be sent on the wire. +func (o OutgoingMessageScope) buildMessage() (*Message, error) { + payload, err := o.encoder(o.payload) + if err != nil { + return nil, fmt.Errorf("could not encode payload: %w", err) + } + + emTargets := make([][]byte, 0) + for _, targetId := range o.targetIds { + tempID := targetId // avoid capturing loop variable + emTargets = append(emTargets, tempID[:]) + } + + channel, ok := channels.ChannelFromTopic(o.topic) + if !ok { + return nil, fmt.Errorf("could not convert topic to channel: %s", o.topic) + } + + return &Message{ + TargetIDs: emTargets, + ChannelID: channel.String(), + Payload: payload, + }, nil +} + +func (o OutgoingMessageScope) Proto() *Message { + return o.msg +} diff --git a/network/message/ping.pb.go b/network/message/ping.pb.go index 9f86dc7ee73..521e7293909 100644 --- a/network/message/ping.pb.go +++ b/network/message/ping.pb.go @@ -5,10 +5,11 @@ package message import ( fmt "fmt" - proto "github.com/golang/protobuf/proto" io "io" math "math" math_bits "math/bits" + + proto "github.com/golang/protobuf/proto" ) // Reference imports to suppress errors if they are not otherwise used. diff --git a/network/message_scope.go b/network/message_scope.go index 3db13a1b2bd..4e4ded4b9cc 100644 --- a/network/message_scope.go +++ b/network/message_scope.go @@ -1,201 +1,57 @@ package network import ( - "fmt" - "strings" - - "github.com/onflow/flow-go/crypto/hash" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/network/channels" "github.com/onflow/flow-go/network/message" ) -const ( - // eventIDPackingPrefix is used as a salt to generate payload hash for messages. - eventIDPackingPrefix = "libp2ppacking" -) - -// IncomingMessageScope captures the context around an incoming message that is received by the network layer. -type IncomingMessageScope struct { - originId flow.Identifier // the origin node ID. - targetIds flow.IdentifierList // the target node IDs (i.e., intended recipients). - eventId hash.Hash // hash of the payload and channel. - msg *message.Message // the raw message received. - decodedPayload interface{} // decoded payload of the message. - protocol message.ProtocolType // the type of protocol used to receive the message. -} - -// NewIncomingScope creates a new incoming message scope. -// All errors returned by this function are benign and should not cause the node to crash, especially that it is not -// safe to crash the node when receiving a message. -// It errors if event id (i.e., hash of the payload and channel) cannot be computed, or if it fails to -// convert the target IDs from bytes slice to a flow.IdentifierList. -func NewIncomingScope(originId flow.Identifier, protocol message.ProtocolType, msg *message.Message, decodedPayload interface{}) (*IncomingMessageScope, error) { - eventId, err := EventId(channels.Channel(msg.ChannelID), msg.Payload) - if err != nil { - return nil, fmt.Errorf("could not compute event id: %w", err) - } - - targetIds, err := flow.ByteSlicesToIds(msg.TargetIDs) - if err != nil { - return nil, fmt.Errorf("could not convert target ids: %w", err) - } - return &IncomingMessageScope{ - eventId: eventId, - originId: originId, - msg: msg, - decodedPayload: decodedPayload, - protocol: protocol, - targetIds: targetIds, - }, nil -} - -func (m IncomingMessageScope) OriginId() flow.Identifier { - return m.originId -} - -func (m IncomingMessageScope) Proto() *message.Message { - return m.msg -} - -func (m IncomingMessageScope) DecodedPayload() interface{} { - return m.decodedPayload -} - -func (m IncomingMessageScope) Protocol() message.ProtocolType { - return m.protocol -} - -func (m IncomingMessageScope) Channel() channels.Channel { - return channels.Channel(m.msg.ChannelID) -} - -func (m IncomingMessageScope) Size() int { - return m.msg.Size() -} - -func (m IncomingMessageScope) TargetIDs() flow.IdentifierList { - return m.targetIds -} - -func (m IncomingMessageScope) EventID() []byte { - return m.eventId[:] -} - -func (m IncomingMessageScope) PayloadType() string { - return MessageType(m.decodedPayload) -} - -// OutgoingMessageScope captures the context around an outgoing message that is about to be sent. -type OutgoingMessageScope struct { - targetIds flow.IdentifierList // the target node IDs. - channelId channels.Channel // the channel ID. - payload interface{} // the payload to be sent. - encoder func(interface{}) ([]byte, error) // the encoder to encode the payload. - msg *message.Message // raw proto message sent on wire. - protocol message.ProtocolType // the type of protocol used to send the message. -} +// IncomingMessageScope defines the interface for incoming message scopes, i.e., self-contained messages that have been +// received on the wire and are ready to be processed. +type IncomingMessageScope interface { + // OriginId returns the origin node ID. + OriginId() flow.Identifier -// NewOutgoingScope creates a new outgoing message scope. -// All errors returned by this function are benign and should not cause the node to crash. -// It errors if the encoder fails to encode the payload into a protobuf message, or -// if the number of target IDs does not match the protocol type (i.e., unicast messages -// should have exactly one target ID, while pubsub messages should have at least one target ID). -func NewOutgoingScope( - targetIds flow.IdentifierList, - channelId channels.Channel, - payload interface{}, - encoder func(interface{}) ([]byte, error), - protocolType message.ProtocolType) (*OutgoingMessageScope, error) { - scope := &OutgoingMessageScope{ - targetIds: targetIds, - channelId: channelId, - payload: payload, - encoder: encoder, - protocol: protocolType, - } + // Proto returns the raw message received. + Proto() *message.Message - if protocolType == message.ProtocolTypeUnicast { - // for unicast messages, we should have exactly one target. - if len(targetIds) != 1 { - return nil, fmt.Errorf("expected exactly one target id for unicast message, got: %d", len(targetIds)) - } - } - if protocolType == message.ProtocolTypePubSub { - // for pubsub messages, we should have at least one target. - if len(targetIds) == 0 { - return nil, fmt.Errorf("expected at least one target id for pubsub message, got: %d", len(targetIds)) - } - } + // DecodedPayload returns the decoded payload of the message. + DecodedPayload() interface{} - msg, err := scope.buildMessage() - if err != nil { - return nil, fmt.Errorf("could not build message: %w", err) - } - scope.msg = msg - return scope, nil -} + // Protocol returns the type of protocol used to receive the message. + Protocol() message.ProtocolType -func (o OutgoingMessageScope) TargetIds() flow.IdentifierList { - return o.targetIds -} + // Channel returns the channel of the message. + Channel() channels.Channel -func (o OutgoingMessageScope) Size() int { - return o.msg.Size() -} + // Size returns the size of the message. + Size() int -func (o OutgoingMessageScope) PayloadType() string { - return MessageType(o.payload) -} + // TargetIDs returns the target node IDs, i.e., the intended recipients. + TargetIDs() flow.IdentifierList -func (o OutgoingMessageScope) Channel() channels.Channel { - return o.channelId -} + // EventID returns the hash of the payload and channel. + EventID() []byte -// buildMessage builds the raw proto message to be sent on the wire. -func (o OutgoingMessageScope) buildMessage() (*message.Message, error) { - payload, err := o.encoder(o.payload) - if err != nil { - return nil, fmt.Errorf("could not encode payload: %w", err) - } - - emTargets := make([][]byte, 0) - for _, targetId := range o.targetIds { - tempID := targetId // avoid capturing loop variable - emTargets = append(emTargets, tempID[:]) - } - - return &message.Message{ - TargetIDs: emTargets, - ChannelID: o.channelId.String(), - Payload: payload, - }, nil + // PayloadType returns the type of the decoded payload. + PayloadType() string } -func (o OutgoingMessageScope) Proto() *message.Message { - return o.msg -} +// OutgoingMessageScope defines the interface for building outgoing message scopes, i.e., self-contained messages +// that are ready to be sent on the wire. +type OutgoingMessageScope interface { + // TargetIds returns the target node IDs. + TargetIds() flow.IdentifierList -// EventId computes the event ID for a given channel and payload (i.e., the hash of the payload and channel). -// All errors returned by this function are benign and should not cause the node to crash. -// It errors if the hash function fails to hash the payload and channel. -func EventId(channel channels.Channel, payload []byte) (hash.Hash, error) { - // use a hash with an engine-specific salt to get the payload hash - h := hash.NewSHA3_384() - _, err := h.Write([]byte(eventIDPackingPrefix + channel)) - if err != nil { - return nil, fmt.Errorf("could not hash channel as salt: %w", err) - } + // Size returns the size of the message. + Size() int - _, err = h.Write(payload) - if err != nil { - return nil, fmt.Errorf("could not hash event: %w", err) - } + // PayloadType returns the type of the payload to be sent. + PayloadType() string - return h.SumHash(), nil -} + // Topic returns the topic, i.e., channel-id/spork-id. + Topic() channels.Topic -// MessageType returns the type of the message payload. -func MessageType(decodedPayload interface{}) string { - return strings.TrimLeft(fmt.Sprintf("%T", decodedPayload), "*") + // Proto returns the raw proto message sent on the wire. + Proto() *message.Message } diff --git a/network/middleware.go b/network/middleware.go deleted file mode 100644 index 7bc600fbc8f..00000000000 --- a/network/middleware.go +++ /dev/null @@ -1,79 +0,0 @@ -// (c) 2019 Dapper Labs - ALL RIGHTS RESERVED - -package network - -import ( - "github.com/ipfs/go-datastore" - "github.com/libp2p/go-libp2p/core/peer" - "github.com/libp2p/go-libp2p/core/protocol" - - "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/module/component" - "github.com/onflow/flow-go/network/channels" -) - -// Middleware represents the middleware layer, which manages the connections to -// our direct neighbours on the network. It handles the creation & teardown of -// connections, as well as reading & writing to/from the connections. -type Middleware interface { - component.Component - - // SetOverlay sets the overlay used by the middleware. This must be called before the middleware can be Started. - SetOverlay(Overlay) - - // SendDirect sends msg on a 1-1 direct connection to the target ID. It models a guaranteed delivery asynchronous - // direct one-to-one connection on the underlying network. No intermediate node on the overlay is utilized - // as the router. - // - // Dispatch should be used whenever guaranteed delivery to a specific target is required. Otherwise, Publish is - // a more efficient candidate. - // All errors returned from this function can be considered benign. - SendDirect(msg *OutgoingMessageScope) error - - // Publish publishes a message on the channel. It models a distributed broadcast where the message is meant for all or - // a many nodes subscribing to the channel. It does not guarantee the delivery though, and operates on a best - // effort. - // All errors returned from this function can be considered benign. - Publish(msg *OutgoingMessageScope) error - - // Subscribe subscribes the middleware to a channel. - // No errors are expected during normal operation. - Subscribe(channel channels.Channel) error - - // Unsubscribe unsubscribes the middleware from a channel. - // All errors returned from this function can be considered benign. - Unsubscribe(channel channels.Channel) error - - // UpdateNodeAddresses fetches and updates the addresses of all the authorized participants - // in the Flow protocol. - UpdateNodeAddresses() - - // NewBlobService creates a new BlobService for the given channel. - NewBlobService(channel channels.Channel, store datastore.Batching, opts ...BlobServiceOption) BlobService - - // NewPingService creates a new PingService for the given ping protocol ID. - NewPingService(pingProtocol protocol.ID, provider PingInfoProvider) PingService - - IsConnected(nodeID flow.Identifier) (bool, error) -} - -// Overlay represents the interface that middleware uses to interact with the -// overlay network layer. -type Overlay interface { - // Topology returns an identity list of nodes which this node should be directly connected to as peers - Topology() flow.IdentityList - - // Identities returns a list of all Flow identities on the network - Identities() flow.IdentityList - - // Identity returns the Identity associated with the given peer ID, if it exists - Identity(peer.ID) (*flow.Identity, bool) - - Receive(*IncomingMessageScope) error -} - -// Connection represents an interface to read from & write to a connection. -type Connection interface { - Send(msg interface{}) error - Receive() (interface{}, error) -} diff --git a/network/mocknetwork/basic_resolver.go b/network/mock/basic_resolver.go similarity index 84% rename from network/mocknetwork/basic_resolver.go rename to network/mock/basic_resolver.go index 9cf9f6bcbde..15c4ad5f3d6 100644 --- a/network/mocknetwork/basic_resolver.go +++ b/network/mock/basic_resolver.go @@ -1,6 +1,6 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. -package mocknetwork +package mock import ( context "context" @@ -18,6 +18,10 @@ type BasicResolver struct { func (_m *BasicResolver) LookupIPAddr(_a0 context.Context, _a1 string) ([]net.IPAddr, error) { ret := _m.Called(_a0, _a1) + if len(ret) == 0 { + panic("no return value specified for LookupIPAddr") + } + var r0 []net.IPAddr var r1 error if rf, ok := ret.Get(0).(func(context.Context, string) ([]net.IPAddr, error)); ok { @@ -44,6 +48,10 @@ func (_m *BasicResolver) LookupIPAddr(_a0 context.Context, _a1 string) ([]net.IP func (_m *BasicResolver) LookupTXT(_a0 context.Context, _a1 string) ([]string, error) { ret := _m.Called(_a0, _a1) + if len(ret) == 0 { + panic("no return value specified for LookupTXT") + } + var r0 []string var r1 error if rf, ok := ret.Get(0).(func(context.Context, string) ([]string, error)); ok { @@ -66,13 +74,12 @@ func (_m *BasicResolver) LookupTXT(_a0 context.Context, _a1 string) ([]string, e return r0, r1 } -type mockConstructorTestingTNewBasicResolver interface { +// NewBasicResolver creates a new instance of BasicResolver. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewBasicResolver(t interface { mock.TestingT Cleanup(func()) -} - -// NewBasicResolver creates a new instance of BasicResolver. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewBasicResolver(t mockConstructorTestingTNewBasicResolver) *BasicResolver { +}) *BasicResolver { mock := &BasicResolver{} mock.Mock.Test(t) diff --git a/network/mock/blob_getter.go b/network/mock/blob_getter.go new file mode 100644 index 00000000000..e543cec17b5 --- /dev/null +++ b/network/mock/blob_getter.go @@ -0,0 +1,81 @@ +// Code generated by mockery. DO NOT EDIT. + +package mock + +import ( + cid "github.com/ipfs/go-cid" + blobs "github.com/onflow/flow-go/module/blobs" + + context "context" + + mock "github.com/stretchr/testify/mock" +) + +// BlobGetter is an autogenerated mock type for the BlobGetter type +type BlobGetter struct { + mock.Mock +} + +// GetBlob provides a mock function with given fields: ctx, c +func (_m *BlobGetter) GetBlob(ctx context.Context, c cid.Cid) (blobs.Blob, error) { + ret := _m.Called(ctx, c) + + if len(ret) == 0 { + panic("no return value specified for GetBlob") + } + + var r0 blobs.Blob + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, cid.Cid) (blobs.Blob, error)); ok { + return rf(ctx, c) + } + if rf, ok := ret.Get(0).(func(context.Context, cid.Cid) blobs.Blob); ok { + r0 = rf(ctx, c) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(blobs.Blob) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, cid.Cid) error); ok { + r1 = rf(ctx, c) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetBlobs provides a mock function with given fields: ctx, ks +func (_m *BlobGetter) GetBlobs(ctx context.Context, ks []cid.Cid) <-chan blobs.Blob { + ret := _m.Called(ctx, ks) + + if len(ret) == 0 { + panic("no return value specified for GetBlobs") + } + + var r0 <-chan blobs.Blob + if rf, ok := ret.Get(0).(func(context.Context, []cid.Cid) <-chan blobs.Blob); ok { + r0 = rf(ctx, ks) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(<-chan blobs.Blob) + } + } + + return r0 +} + +// NewBlobGetter creates a new instance of BlobGetter. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewBlobGetter(t interface { + mock.TestingT + Cleanup(func()) +}) *BlobGetter { + mock := &BlobGetter{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/network/mock/blob_service.go b/network/mock/blob_service.go new file mode 100644 index 00000000000..925d57933bb --- /dev/null +++ b/network/mock/blob_service.go @@ -0,0 +1,222 @@ +// Code generated by mockery. DO NOT EDIT. + +package mock + +import ( + cid "github.com/ipfs/go-cid" + blobs "github.com/onflow/flow-go/module/blobs" + + context "context" + + irrecoverable "github.com/onflow/flow-go/module/irrecoverable" + + mock "github.com/stretchr/testify/mock" + + network "github.com/onflow/flow-go/network" +) + +// BlobService is an autogenerated mock type for the BlobService type +type BlobService struct { + mock.Mock +} + +// AddBlob provides a mock function with given fields: ctx, b +func (_m *BlobService) AddBlob(ctx context.Context, b blobs.Blob) error { + ret := _m.Called(ctx, b) + + if len(ret) == 0 { + panic("no return value specified for AddBlob") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, blobs.Blob) error); ok { + r0 = rf(ctx, b) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// AddBlobs provides a mock function with given fields: ctx, bs +func (_m *BlobService) AddBlobs(ctx context.Context, bs []blobs.Blob) error { + ret := _m.Called(ctx, bs) + + if len(ret) == 0 { + panic("no return value specified for AddBlobs") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, []blobs.Blob) error); ok { + r0 = rf(ctx, bs) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// DeleteBlob provides a mock function with given fields: ctx, c +func (_m *BlobService) DeleteBlob(ctx context.Context, c cid.Cid) error { + ret := _m.Called(ctx, c) + + if len(ret) == 0 { + panic("no return value specified for DeleteBlob") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, cid.Cid) error); ok { + r0 = rf(ctx, c) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// Done provides a mock function with no fields +func (_m *BlobService) Done() <-chan struct{} { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Done") + } + + var r0 <-chan struct{} + if rf, ok := ret.Get(0).(func() <-chan struct{}); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(<-chan struct{}) + } + } + + return r0 +} + +// GetBlob provides a mock function with given fields: ctx, c +func (_m *BlobService) GetBlob(ctx context.Context, c cid.Cid) (blobs.Blob, error) { + ret := _m.Called(ctx, c) + + if len(ret) == 0 { + panic("no return value specified for GetBlob") + } + + var r0 blobs.Blob + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, cid.Cid) (blobs.Blob, error)); ok { + return rf(ctx, c) + } + if rf, ok := ret.Get(0).(func(context.Context, cid.Cid) blobs.Blob); ok { + r0 = rf(ctx, c) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(blobs.Blob) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, cid.Cid) error); ok { + r1 = rf(ctx, c) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetBlobs provides a mock function with given fields: ctx, ks +func (_m *BlobService) GetBlobs(ctx context.Context, ks []cid.Cid) <-chan blobs.Blob { + ret := _m.Called(ctx, ks) + + if len(ret) == 0 { + panic("no return value specified for GetBlobs") + } + + var r0 <-chan blobs.Blob + if rf, ok := ret.Get(0).(func(context.Context, []cid.Cid) <-chan blobs.Blob); ok { + r0 = rf(ctx, ks) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(<-chan blobs.Blob) + } + } + + return r0 +} + +// GetSession provides a mock function with given fields: ctx +func (_m *BlobService) GetSession(ctx context.Context) network.BlobGetter { + ret := _m.Called(ctx) + + if len(ret) == 0 { + panic("no return value specified for GetSession") + } + + var r0 network.BlobGetter + if rf, ok := ret.Get(0).(func(context.Context) network.BlobGetter); ok { + r0 = rf(ctx) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(network.BlobGetter) + } + } + + return r0 +} + +// Ready provides a mock function with no fields +func (_m *BlobService) Ready() <-chan struct{} { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Ready") + } + + var r0 <-chan struct{} + if rf, ok := ret.Get(0).(func() <-chan struct{}); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(<-chan struct{}) + } + } + + return r0 +} + +// Start provides a mock function with given fields: _a0 +func (_m *BlobService) Start(_a0 irrecoverable.SignalerContext) { + _m.Called(_a0) +} + +// TriggerReprovide provides a mock function with given fields: ctx +func (_m *BlobService) TriggerReprovide(ctx context.Context) error { + ret := _m.Called(ctx) + + if len(ret) == 0 { + panic("no return value specified for TriggerReprovide") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context) error); ok { + r0 = rf(ctx) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// NewBlobService creates a new instance of BlobService. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewBlobService(t interface { + mock.TestingT + Cleanup(func()) +}) *BlobService { + mock := &BlobService{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/network/mock/codec.go b/network/mock/codec.go new file mode 100644 index 00000000000..1f4c6662a00 --- /dev/null +++ b/network/mock/codec.go @@ -0,0 +1,131 @@ +// Code generated by mockery. DO NOT EDIT. + +package mock + +import ( + io "io" + + messages "github.com/onflow/flow-go/model/messages" + mock "github.com/stretchr/testify/mock" + + network "github.com/onflow/flow-go/network" +) + +// Codec is an autogenerated mock type for the Codec type +type Codec struct { + mock.Mock +} + +// Decode provides a mock function with given fields: data +func (_m *Codec) Decode(data []byte) (messages.UntrustedMessage, error) { + ret := _m.Called(data) + + if len(ret) == 0 { + panic("no return value specified for Decode") + } + + var r0 messages.UntrustedMessage + var r1 error + if rf, ok := ret.Get(0).(func([]byte) (messages.UntrustedMessage, error)); ok { + return rf(data) + } + if rf, ok := ret.Get(0).(func([]byte) messages.UntrustedMessage); ok { + r0 = rf(data) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(messages.UntrustedMessage) + } + } + + if rf, ok := ret.Get(1).(func([]byte) error); ok { + r1 = rf(data) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Encode provides a mock function with given fields: v +func (_m *Codec) Encode(v interface{}) ([]byte, error) { + ret := _m.Called(v) + + if len(ret) == 0 { + panic("no return value specified for Encode") + } + + var r0 []byte + var r1 error + if rf, ok := ret.Get(0).(func(interface{}) ([]byte, error)); ok { + return rf(v) + } + if rf, ok := ret.Get(0).(func(interface{}) []byte); ok { + r0 = rf(v) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]byte) + } + } + + if rf, ok := ret.Get(1).(func(interface{}) error); ok { + r1 = rf(v) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// NewDecoder provides a mock function with given fields: r +func (_m *Codec) NewDecoder(r io.Reader) network.Decoder { + ret := _m.Called(r) + + if len(ret) == 0 { + panic("no return value specified for NewDecoder") + } + + var r0 network.Decoder + if rf, ok := ret.Get(0).(func(io.Reader) network.Decoder); ok { + r0 = rf(r) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(network.Decoder) + } + } + + return r0 +} + +// NewEncoder provides a mock function with given fields: w +func (_m *Codec) NewEncoder(w io.Writer) network.Encoder { + ret := _m.Called(w) + + if len(ret) == 0 { + panic("no return value specified for NewEncoder") + } + + var r0 network.Encoder + if rf, ok := ret.Get(0).(func(io.Writer) network.Encoder); ok { + r0 = rf(w) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(network.Encoder) + } + } + + return r0 +} + +// NewCodec creates a new instance of Codec. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewCodec(t interface { + mock.TestingT + Cleanup(func()) +}) *Codec { + mock := &Codec{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/network/mocknetwork/compressor.go b/network/mock/compressor.go similarity index 84% rename from network/mocknetwork/compressor.go rename to network/mock/compressor.go index ad6f1cd716c..3392a78426e 100644 --- a/network/mocknetwork/compressor.go +++ b/network/mock/compressor.go @@ -1,6 +1,6 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. -package mocknetwork +package mock import ( io "io" @@ -18,6 +18,10 @@ type Compressor struct { func (_m *Compressor) NewReader(_a0 io.Reader) (io.ReadCloser, error) { ret := _m.Called(_a0) + if len(ret) == 0 { + panic("no return value specified for NewReader") + } + var r0 io.ReadCloser var r1 error if rf, ok := ret.Get(0).(func(io.Reader) (io.ReadCloser, error)); ok { @@ -44,6 +48,10 @@ func (_m *Compressor) NewReader(_a0 io.Reader) (io.ReadCloser, error) { func (_m *Compressor) NewWriter(_a0 io.Writer) (network.WriteCloseFlusher, error) { ret := _m.Called(_a0) + if len(ret) == 0 { + panic("no return value specified for NewWriter") + } + var r0 network.WriteCloseFlusher var r1 error if rf, ok := ret.Get(0).(func(io.Writer) (network.WriteCloseFlusher, error)); ok { @@ -66,13 +74,12 @@ func (_m *Compressor) NewWriter(_a0 io.Writer) (network.WriteCloseFlusher, error return r0, r1 } -type mockConstructorTestingTNewCompressor interface { +// NewCompressor creates a new instance of Compressor. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewCompressor(t interface { mock.TestingT Cleanup(func()) -} - -// NewCompressor creates a new instance of Compressor. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewCompressor(t mockConstructorTestingTNewCompressor) *Compressor { +}) *Compressor { mock := &Compressor{} mock.Mock.Test(t) diff --git a/network/mocknetwork/conduit.go b/network/mock/conduit.go similarity index 82% rename from network/mocknetwork/conduit.go rename to network/mock/conduit.go index 06bb0f9f5f2..d9d82ffaea0 100644 --- a/network/mocknetwork/conduit.go +++ b/network/mock/conduit.go @@ -1,6 +1,6 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. -package mocknetwork +package mock import ( flow "github.com/onflow/flow-go/model/flow" @@ -14,10 +14,14 @@ type Conduit struct { mock.Mock } -// Close provides a mock function with given fields: +// Close provides a mock function with no fields func (_m *Conduit) Close() error { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Close") + } + var r0 error if rf, ok := ret.Get(0).(func() error); ok { r0 = rf() @@ -39,6 +43,10 @@ func (_m *Conduit) Multicast(event interface{}, num uint, targetIDs ...flow.Iden _ca = append(_ca, _va...) ret := _m.Called(_ca...) + if len(ret) == 0 { + panic("no return value specified for Multicast") + } + var r0 error if rf, ok := ret.Get(0).(func(interface{}, uint, ...flow.Identifier) error); ok { r0 = rf(event, num, targetIDs...) @@ -60,6 +68,10 @@ func (_m *Conduit) Publish(event interface{}, targetIDs ...flow.Identifier) erro _ca = append(_ca, _va...) ret := _m.Called(_ca...) + if len(ret) == 0 { + panic("no return value specified for Publish") + } + var r0 error if rf, ok := ret.Get(0).(func(interface{}, ...flow.Identifier) error); ok { r0 = rf(event, targetIDs...) @@ -79,6 +91,10 @@ func (_m *Conduit) ReportMisbehavior(_a0 network.MisbehaviorReport) { func (_m *Conduit) Unicast(event interface{}, targetID flow.Identifier) error { ret := _m.Called(event, targetID) + if len(ret) == 0 { + panic("no return value specified for Unicast") + } + var r0 error if rf, ok := ret.Get(0).(func(interface{}, flow.Identifier) error); ok { r0 = rf(event, targetID) @@ -89,13 +105,12 @@ func (_m *Conduit) Unicast(event interface{}, targetID flow.Identifier) error { return r0 } -type mockConstructorTestingTNewConduit interface { +// NewConduit creates a new instance of Conduit. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewConduit(t interface { mock.TestingT Cleanup(func()) -} - -// NewConduit creates a new instance of Conduit. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewConduit(t mockConstructorTestingTNewConduit) *Conduit { +}) *Conduit { mock := &Conduit{} mock.Mock.Test(t) diff --git a/network/mock/conduit_adapter.go b/network/mock/conduit_adapter.go new file mode 100644 index 00000000000..b2ccff46d04 --- /dev/null +++ b/network/mock/conduit_adapter.go @@ -0,0 +1,122 @@ +// Code generated by mockery. DO NOT EDIT. + +package mock + +import ( + flow "github.com/onflow/flow-go/model/flow" + channels "github.com/onflow/flow-go/network/channels" + + mock "github.com/stretchr/testify/mock" + + network "github.com/onflow/flow-go/network" +) + +// ConduitAdapter is an autogenerated mock type for the ConduitAdapter type +type ConduitAdapter struct { + mock.Mock +} + +// MulticastOnChannel provides a mock function with given fields: _a0, _a1, _a2, _a3 +func (_m *ConduitAdapter) MulticastOnChannel(_a0 channels.Channel, _a1 interface{}, _a2 uint, _a3 ...flow.Identifier) error { + _va := make([]interface{}, len(_a3)) + for _i := range _a3 { + _va[_i] = _a3[_i] + } + var _ca []interface{} + _ca = append(_ca, _a0, _a1, _a2) + _ca = append(_ca, _va...) + ret := _m.Called(_ca...) + + if len(ret) == 0 { + panic("no return value specified for MulticastOnChannel") + } + + var r0 error + if rf, ok := ret.Get(0).(func(channels.Channel, interface{}, uint, ...flow.Identifier) error); ok { + r0 = rf(_a0, _a1, _a2, _a3...) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// PublishOnChannel provides a mock function with given fields: _a0, _a1, _a2 +func (_m *ConduitAdapter) PublishOnChannel(_a0 channels.Channel, _a1 interface{}, _a2 ...flow.Identifier) error { + _va := make([]interface{}, len(_a2)) + for _i := range _a2 { + _va[_i] = _a2[_i] + } + var _ca []interface{} + _ca = append(_ca, _a0, _a1) + _ca = append(_ca, _va...) + ret := _m.Called(_ca...) + + if len(ret) == 0 { + panic("no return value specified for PublishOnChannel") + } + + var r0 error + if rf, ok := ret.Get(0).(func(channels.Channel, interface{}, ...flow.Identifier) error); ok { + r0 = rf(_a0, _a1, _a2...) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// ReportMisbehaviorOnChannel provides a mock function with given fields: channel, report +func (_m *ConduitAdapter) ReportMisbehaviorOnChannel(channel channels.Channel, report network.MisbehaviorReport) { + _m.Called(channel, report) +} + +// UnRegisterChannel provides a mock function with given fields: channel +func (_m *ConduitAdapter) UnRegisterChannel(channel channels.Channel) error { + ret := _m.Called(channel) + + if len(ret) == 0 { + panic("no return value specified for UnRegisterChannel") + } + + var r0 error + if rf, ok := ret.Get(0).(func(channels.Channel) error); ok { + r0 = rf(channel) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// UnicastOnChannel provides a mock function with given fields: _a0, _a1, _a2 +func (_m *ConduitAdapter) UnicastOnChannel(_a0 channels.Channel, _a1 interface{}, _a2 flow.Identifier) error { + ret := _m.Called(_a0, _a1, _a2) + + if len(ret) == 0 { + panic("no return value specified for UnicastOnChannel") + } + + var r0 error + if rf, ok := ret.Get(0).(func(channels.Channel, interface{}, flow.Identifier) error); ok { + r0 = rf(_a0, _a1, _a2) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// NewConduitAdapter creates a new instance of ConduitAdapter. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewConduitAdapter(t interface { + mock.TestingT + Cleanup(func()) +}) *ConduitAdapter { + mock := &ConduitAdapter{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/network/mocknetwork/conduit_factory.go b/network/mock/conduit_factory.go similarity index 75% rename from network/mocknetwork/conduit_factory.go rename to network/mock/conduit_factory.go index abd1b8bdd6e..249b24b0d59 100644 --- a/network/mocknetwork/conduit_factory.go +++ b/network/mock/conduit_factory.go @@ -1,6 +1,6 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. -package mocknetwork +package mock import ( context "context" @@ -21,6 +21,10 @@ type ConduitFactory struct { func (_m *ConduitFactory) NewConduit(_a0 context.Context, _a1 channels.Channel) (network.Conduit, error) { ret := _m.Called(_a0, _a1) + if len(ret) == 0 { + panic("no return value specified for NewConduit") + } + var r0 network.Conduit var r1 error if rf, ok := ret.Get(0).(func(context.Context, channels.Channel) (network.Conduit, error)); ok { @@ -44,11 +48,15 @@ func (_m *ConduitFactory) NewConduit(_a0 context.Context, _a1 channels.Channel) } // RegisterAdapter provides a mock function with given fields: _a0 -func (_m *ConduitFactory) RegisterAdapter(_a0 network.Adapter) error { +func (_m *ConduitFactory) RegisterAdapter(_a0 network.ConduitAdapter) error { ret := _m.Called(_a0) + if len(ret) == 0 { + panic("no return value specified for RegisterAdapter") + } + var r0 error - if rf, ok := ret.Get(0).(func(network.Adapter) error); ok { + if rf, ok := ret.Get(0).(func(network.ConduitAdapter) error); ok { r0 = rf(_a0) } else { r0 = ret.Error(0) @@ -57,13 +65,12 @@ func (_m *ConduitFactory) RegisterAdapter(_a0 network.Adapter) error { return r0 } -type mockConstructorTestingTNewConduitFactory interface { +// NewConduitFactory creates a new instance of ConduitFactory. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewConduitFactory(t interface { mock.TestingT Cleanup(func()) -} - -// NewConduitFactory creates a new instance of ConduitFactory. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewConduitFactory(t mockConstructorTestingTNewConduitFactory) *ConduitFactory { +}) *ConduitFactory { mock := &ConduitFactory{} mock.Mock.Test(t) diff --git a/network/mock/connection.go b/network/mock/connection.go new file mode 100644 index 00000000000..d31100e1fea --- /dev/null +++ b/network/mock/connection.go @@ -0,0 +1,72 @@ +// Code generated by mockery. DO NOT EDIT. + +package mock + +import mock "github.com/stretchr/testify/mock" + +// Connection is an autogenerated mock type for the Connection type +type Connection struct { + mock.Mock +} + +// Receive provides a mock function with no fields +func (_m *Connection) Receive() (interface{}, error) { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Receive") + } + + var r0 interface{} + var r1 error + if rf, ok := ret.Get(0).(func() (interface{}, error)); ok { + return rf() + } + if rf, ok := ret.Get(0).(func() interface{}); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(interface{}) + } + } + + if rf, ok := ret.Get(1).(func() error); ok { + r1 = rf() + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Send provides a mock function with given fields: msg +func (_m *Connection) Send(msg interface{}) error { + ret := _m.Called(msg) + + if len(ret) == 0 { + panic("no return value specified for Send") + } + + var r0 error + if rf, ok := ret.Get(0).(func(interface{}) error); ok { + r0 = rf(msg) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// NewConnection creates a new instance of Connection. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewConnection(t interface { + mock.TestingT + Cleanup(func()) +}) *Connection { + mock := &Connection{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/network/mock/decoder.go b/network/mock/decoder.go new file mode 100644 index 00000000000..631f3977cff --- /dev/null +++ b/network/mock/decoder.go @@ -0,0 +1,57 @@ +// Code generated by mockery. DO NOT EDIT. + +package mock + +import ( + messages "github.com/onflow/flow-go/model/messages" + mock "github.com/stretchr/testify/mock" +) + +// Decoder is an autogenerated mock type for the Decoder type +type Decoder struct { + mock.Mock +} + +// Decode provides a mock function with no fields +func (_m *Decoder) Decode() (messages.UntrustedMessage, error) { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Decode") + } + + var r0 messages.UntrustedMessage + var r1 error + if rf, ok := ret.Get(0).(func() (messages.UntrustedMessage, error)); ok { + return rf() + } + if rf, ok := ret.Get(0).(func() messages.UntrustedMessage); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(messages.UntrustedMessage) + } + } + + if rf, ok := ret.Get(1).(func() error); ok { + r1 = rf() + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// NewDecoder creates a new instance of Decoder. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewDecoder(t interface { + mock.TestingT + Cleanup(func()) +}) *Decoder { + mock := &Decoder{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/network/mock/disallow_list_notification_consumer.go b/network/mock/disallow_list_notification_consumer.go new file mode 100644 index 00000000000..4b983dfb817 --- /dev/null +++ b/network/mock/disallow_list_notification_consumer.go @@ -0,0 +1,37 @@ +// Code generated by mockery. DO NOT EDIT. + +package mock + +import ( + network "github.com/onflow/flow-go/network" + mock "github.com/stretchr/testify/mock" +) + +// DisallowListNotificationConsumer is an autogenerated mock type for the DisallowListNotificationConsumer type +type DisallowListNotificationConsumer struct { + mock.Mock +} + +// OnAllowListNotification provides a mock function with given fields: _a0 +func (_m *DisallowListNotificationConsumer) OnAllowListNotification(_a0 *network.AllowListingUpdate) { + _m.Called(_a0) +} + +// OnDisallowListNotification provides a mock function with given fields: _a0 +func (_m *DisallowListNotificationConsumer) OnDisallowListNotification(_a0 *network.DisallowListingUpdate) { + _m.Called(_a0) +} + +// NewDisallowListNotificationConsumer creates a new instance of DisallowListNotificationConsumer. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewDisallowListNotificationConsumer(t interface { + mock.TestingT + Cleanup(func()) +}) *DisallowListNotificationConsumer { + mock := &DisallowListNotificationConsumer{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/network/mocknetwork/encoder.go b/network/mock/encoder.go similarity index 75% rename from network/mocknetwork/encoder.go rename to network/mock/encoder.go index 41a260a7168..36600dfb412 100644 --- a/network/mocknetwork/encoder.go +++ b/network/mock/encoder.go @@ -1,6 +1,6 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. -package mocknetwork +package mock import mock "github.com/stretchr/testify/mock" @@ -13,6 +13,10 @@ type Encoder struct { func (_m *Encoder) Encode(v interface{}) error { ret := _m.Called(v) + if len(ret) == 0 { + panic("no return value specified for Encode") + } + var r0 error if rf, ok := ret.Get(0).(func(interface{}) error); ok { r0 = rf(v) @@ -23,13 +27,12 @@ func (_m *Encoder) Encode(v interface{}) error { return r0 } -type mockConstructorTestingTNewEncoder interface { +// NewEncoder creates a new instance of Encoder. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewEncoder(t interface { mock.TestingT Cleanup(func()) -} - -// NewEncoder creates a new instance of Encoder. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewEncoder(t mockConstructorTestingTNewEncoder) *Encoder { +}) *Encoder { mock := &Encoder{} mock.Mock.Test(t) diff --git a/network/mock/engine.go b/network/mock/engine.go new file mode 100644 index 00000000000..a0c10674757 --- /dev/null +++ b/network/mock/engine.go @@ -0,0 +1,115 @@ +// Code generated by mockery. DO NOT EDIT. + +package mock + +import ( + flow "github.com/onflow/flow-go/model/flow" + channels "github.com/onflow/flow-go/network/channels" + + mock "github.com/stretchr/testify/mock" +) + +// Engine is an autogenerated mock type for the Engine type +type Engine struct { + mock.Mock +} + +// Done provides a mock function with no fields +func (_m *Engine) Done() <-chan struct{} { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Done") + } + + var r0 <-chan struct{} + if rf, ok := ret.Get(0).(func() <-chan struct{}); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(<-chan struct{}) + } + } + + return r0 +} + +// Process provides a mock function with given fields: channel, originID, event +func (_m *Engine) Process(channel channels.Channel, originID flow.Identifier, event interface{}) error { + ret := _m.Called(channel, originID, event) + + if len(ret) == 0 { + panic("no return value specified for Process") + } + + var r0 error + if rf, ok := ret.Get(0).(func(channels.Channel, flow.Identifier, interface{}) error); ok { + r0 = rf(channel, originID, event) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// ProcessLocal provides a mock function with given fields: event +func (_m *Engine) ProcessLocal(event interface{}) error { + ret := _m.Called(event) + + if len(ret) == 0 { + panic("no return value specified for ProcessLocal") + } + + var r0 error + if rf, ok := ret.Get(0).(func(interface{}) error); ok { + r0 = rf(event) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// Ready provides a mock function with no fields +func (_m *Engine) Ready() <-chan struct{} { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Ready") + } + + var r0 <-chan struct{} + if rf, ok := ret.Get(0).(func() <-chan struct{}); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(<-chan struct{}) + } + } + + return r0 +} + +// Submit provides a mock function with given fields: channel, originID, event +func (_m *Engine) Submit(channel channels.Channel, originID flow.Identifier, event interface{}) { + _m.Called(channel, originID, event) +} + +// SubmitLocal provides a mock function with given fields: event +func (_m *Engine) SubmitLocal(event interface{}) { + _m.Called(event) +} + +// NewEngine creates a new instance of Engine. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewEngine(t interface { + mock.TestingT + Cleanup(func()) +}) *Engine { + mock := &Engine{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/network/mock/engine_registry.go b/network/mock/engine_registry.go new file mode 100644 index 00000000000..ef7c2378ec7 --- /dev/null +++ b/network/mock/engine_registry.go @@ -0,0 +1,177 @@ +// Code generated by mockery. DO NOT EDIT. + +package mock + +import ( + datastore "github.com/ipfs/go-datastore" + channels "github.com/onflow/flow-go/network/channels" + + irrecoverable "github.com/onflow/flow-go/module/irrecoverable" + + mock "github.com/stretchr/testify/mock" + + network "github.com/onflow/flow-go/network" + + protocol "github.com/libp2p/go-libp2p/core/protocol" +) + +// EngineRegistry is an autogenerated mock type for the EngineRegistry type +type EngineRegistry struct { + mock.Mock +} + +// Done provides a mock function with no fields +func (_m *EngineRegistry) Done() <-chan struct{} { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Done") + } + + var r0 <-chan struct{} + if rf, ok := ret.Get(0).(func() <-chan struct{}); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(<-chan struct{}) + } + } + + return r0 +} + +// Ready provides a mock function with no fields +func (_m *EngineRegistry) Ready() <-chan struct{} { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Ready") + } + + var r0 <-chan struct{} + if rf, ok := ret.Get(0).(func() <-chan struct{}); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(<-chan struct{}) + } + } + + return r0 +} + +// Register provides a mock function with given fields: channel, messageProcessor +func (_m *EngineRegistry) Register(channel channels.Channel, messageProcessor network.MessageProcessor) (network.Conduit, error) { + ret := _m.Called(channel, messageProcessor) + + if len(ret) == 0 { + panic("no return value specified for Register") + } + + var r0 network.Conduit + var r1 error + if rf, ok := ret.Get(0).(func(channels.Channel, network.MessageProcessor) (network.Conduit, error)); ok { + return rf(channel, messageProcessor) + } + if rf, ok := ret.Get(0).(func(channels.Channel, network.MessageProcessor) network.Conduit); ok { + r0 = rf(channel, messageProcessor) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(network.Conduit) + } + } + + if rf, ok := ret.Get(1).(func(channels.Channel, network.MessageProcessor) error); ok { + r1 = rf(channel, messageProcessor) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// RegisterBlobService provides a mock function with given fields: channel, store, opts +func (_m *EngineRegistry) RegisterBlobService(channel channels.Channel, store datastore.Batching, opts ...network.BlobServiceOption) (network.BlobService, error) { + _va := make([]interface{}, len(opts)) + for _i := range opts { + _va[_i] = opts[_i] + } + var _ca []interface{} + _ca = append(_ca, channel, store) + _ca = append(_ca, _va...) + ret := _m.Called(_ca...) + + if len(ret) == 0 { + panic("no return value specified for RegisterBlobService") + } + + var r0 network.BlobService + var r1 error + if rf, ok := ret.Get(0).(func(channels.Channel, datastore.Batching, ...network.BlobServiceOption) (network.BlobService, error)); ok { + return rf(channel, store, opts...) + } + if rf, ok := ret.Get(0).(func(channels.Channel, datastore.Batching, ...network.BlobServiceOption) network.BlobService); ok { + r0 = rf(channel, store, opts...) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(network.BlobService) + } + } + + if rf, ok := ret.Get(1).(func(channels.Channel, datastore.Batching, ...network.BlobServiceOption) error); ok { + r1 = rf(channel, store, opts...) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// RegisterPingService provides a mock function with given fields: pingProtocolID, pingInfoProvider +func (_m *EngineRegistry) RegisterPingService(pingProtocolID protocol.ID, pingInfoProvider network.PingInfoProvider) (network.PingService, error) { + ret := _m.Called(pingProtocolID, pingInfoProvider) + + if len(ret) == 0 { + panic("no return value specified for RegisterPingService") + } + + var r0 network.PingService + var r1 error + if rf, ok := ret.Get(0).(func(protocol.ID, network.PingInfoProvider) (network.PingService, error)); ok { + return rf(pingProtocolID, pingInfoProvider) + } + if rf, ok := ret.Get(0).(func(protocol.ID, network.PingInfoProvider) network.PingService); ok { + r0 = rf(pingProtocolID, pingInfoProvider) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(network.PingService) + } + } + + if rf, ok := ret.Get(1).(func(protocol.ID, network.PingInfoProvider) error); ok { + r1 = rf(pingProtocolID, pingInfoProvider) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Start provides a mock function with given fields: _a0 +func (_m *EngineRegistry) Start(_a0 irrecoverable.SignalerContext) { + _m.Called(_a0) +} + +// NewEngineRegistry creates a new instance of EngineRegistry. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewEngineRegistry(t interface { + mock.TestingT + Cleanup(func()) +}) *EngineRegistry { + mock := &EngineRegistry{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/network/mock/incoming_message_scope.go b/network/mock/incoming_message_scope.go new file mode 100644 index 00000000000..50a58fff6d8 --- /dev/null +++ b/network/mock/incoming_message_scope.go @@ -0,0 +1,203 @@ +// Code generated by mockery. DO NOT EDIT. + +package mock + +import ( + flow "github.com/onflow/flow-go/model/flow" + channels "github.com/onflow/flow-go/network/channels" + + message "github.com/onflow/flow-go/network/message" + + mock "github.com/stretchr/testify/mock" +) + +// IncomingMessageScope is an autogenerated mock type for the IncomingMessageScope type +type IncomingMessageScope struct { + mock.Mock +} + +// Channel provides a mock function with no fields +func (_m *IncomingMessageScope) Channel() channels.Channel { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Channel") + } + + var r0 channels.Channel + if rf, ok := ret.Get(0).(func() channels.Channel); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(channels.Channel) + } + + return r0 +} + +// DecodedPayload provides a mock function with no fields +func (_m *IncomingMessageScope) DecodedPayload() interface{} { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for DecodedPayload") + } + + var r0 interface{} + if rf, ok := ret.Get(0).(func() interface{}); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(interface{}) + } + } + + return r0 +} + +// EventID provides a mock function with no fields +func (_m *IncomingMessageScope) EventID() []byte { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for EventID") + } + + var r0 []byte + if rf, ok := ret.Get(0).(func() []byte); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]byte) + } + } + + return r0 +} + +// OriginId provides a mock function with no fields +func (_m *IncomingMessageScope) OriginId() flow.Identifier { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for OriginId") + } + + var r0 flow.Identifier + if rf, ok := ret.Get(0).(func() flow.Identifier); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(flow.Identifier) + } + } + + return r0 +} + +// PayloadType provides a mock function with no fields +func (_m *IncomingMessageScope) PayloadType() string { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for PayloadType") + } + + var r0 string + if rf, ok := ret.Get(0).(func() string); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(string) + } + + return r0 +} + +// Proto provides a mock function with no fields +func (_m *IncomingMessageScope) Proto() *message.Message { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Proto") + } + + var r0 *message.Message + if rf, ok := ret.Get(0).(func() *message.Message); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*message.Message) + } + } + + return r0 +} + +// Protocol provides a mock function with no fields +func (_m *IncomingMessageScope) Protocol() message.ProtocolType { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Protocol") + } + + var r0 message.ProtocolType + if rf, ok := ret.Get(0).(func() message.ProtocolType); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(message.ProtocolType) + } + + return r0 +} + +// Size provides a mock function with no fields +func (_m *IncomingMessageScope) Size() int { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Size") + } + + var r0 int + if rf, ok := ret.Get(0).(func() int); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(int) + } + + return r0 +} + +// TargetIDs provides a mock function with no fields +func (_m *IncomingMessageScope) TargetIDs() flow.IdentifierList { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for TargetIDs") + } + + var r0 flow.IdentifierList + if rf, ok := ret.Get(0).(func() flow.IdentifierList); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(flow.IdentifierList) + } + } + + return r0 +} + +// NewIncomingMessageScope creates a new instance of IncomingMessageScope. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewIncomingMessageScope(t interface { + mock.TestingT + Cleanup(func()) +}) *IncomingMessageScope { + mock := &IncomingMessageScope{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/network/mocknetwork/message_processor.go b/network/mock/message_processor.go similarity index 80% rename from network/mocknetwork/message_processor.go rename to network/mock/message_processor.go index fa9f3e34573..0c39c0912ff 100644 --- a/network/mocknetwork/message_processor.go +++ b/network/mock/message_processor.go @@ -1,6 +1,6 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. -package mocknetwork +package mock import ( flow "github.com/onflow/flow-go/model/flow" @@ -18,6 +18,10 @@ type MessageProcessor struct { func (_m *MessageProcessor) Process(channel channels.Channel, originID flow.Identifier, message interface{}) error { ret := _m.Called(channel, originID, message) + if len(ret) == 0 { + panic("no return value specified for Process") + } + var r0 error if rf, ok := ret.Get(0).(func(channels.Channel, flow.Identifier, interface{}) error); ok { r0 = rf(channel, originID, message) @@ -28,13 +32,12 @@ func (_m *MessageProcessor) Process(channel channels.Channel, originID flow.Iden return r0 } -type mockConstructorTestingTNewMessageProcessor interface { +// NewMessageProcessor creates a new instance of MessageProcessor. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewMessageProcessor(t interface { mock.TestingT Cleanup(func()) -} - -// NewMessageProcessor creates a new instance of MessageProcessor. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewMessageProcessor(t mockConstructorTestingTNewMessageProcessor) *MessageProcessor { +}) *MessageProcessor { mock := &MessageProcessor{} mock.Mock.Test(t) diff --git a/network/mock/message_queue.go b/network/mock/message_queue.go new file mode 100644 index 00000000000..ef6a21a1da0 --- /dev/null +++ b/network/mock/message_queue.go @@ -0,0 +1,80 @@ +// Code generated by mockery. DO NOT EDIT. + +package mock + +import mock "github.com/stretchr/testify/mock" + +// MessageQueue is an autogenerated mock type for the MessageQueue type +type MessageQueue struct { + mock.Mock +} + +// Insert provides a mock function with given fields: message +func (_m *MessageQueue) Insert(message interface{}) error { + ret := _m.Called(message) + + if len(ret) == 0 { + panic("no return value specified for Insert") + } + + var r0 error + if rf, ok := ret.Get(0).(func(interface{}) error); ok { + r0 = rf(message) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// Len provides a mock function with no fields +func (_m *MessageQueue) Len() int { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Len") + } + + var r0 int + if rf, ok := ret.Get(0).(func() int); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(int) + } + + return r0 +} + +// Remove provides a mock function with no fields +func (_m *MessageQueue) Remove() interface{} { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Remove") + } + + var r0 interface{} + if rf, ok := ret.Get(0).(func() interface{}); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(interface{}) + } + } + + return r0 +} + +// NewMessageQueue creates a new instance of MessageQueue. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewMessageQueue(t interface { + mock.TestingT + Cleanup(func()) +}) *MessageQueue { + mock := &MessageQueue{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/network/mocknetwork/message_validator.go b/network/mock/message_validator.go similarity index 77% rename from network/mocknetwork/message_validator.go rename to network/mock/message_validator.go index f2c78f75d20..f0980e11102 100644 --- a/network/mocknetwork/message_validator.go +++ b/network/mock/message_validator.go @@ -1,6 +1,6 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. -package mocknetwork +package mock import ( network "github.com/onflow/flow-go/network" @@ -16,6 +16,10 @@ type MessageValidator struct { func (_m *MessageValidator) Validate(msg network.IncomingMessageScope) bool { ret := _m.Called(msg) + if len(ret) == 0 { + panic("no return value specified for Validate") + } + var r0 bool if rf, ok := ret.Get(0).(func(network.IncomingMessageScope) bool); ok { r0 = rf(msg) @@ -26,13 +30,12 @@ func (_m *MessageValidator) Validate(msg network.IncomingMessageScope) bool { return r0 } -type mockConstructorTestingTNewMessageValidator interface { +// NewMessageValidator creates a new instance of MessageValidator. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewMessageValidator(t interface { mock.TestingT Cleanup(func()) -} - -// NewMessageValidator creates a new instance of MessageValidator. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewMessageValidator(t mockConstructorTestingTNewMessageValidator) *MessageValidator { +}) *MessageValidator { mock := &MessageValidator{} mock.Mock.Test(t) diff --git a/network/mock/misbehavior_report.go b/network/mock/misbehavior_report.go new file mode 100644 index 00000000000..a0528feeb7f --- /dev/null +++ b/network/mock/misbehavior_report.go @@ -0,0 +1,85 @@ +// Code generated by mockery. DO NOT EDIT. + +package mock + +import ( + flow "github.com/onflow/flow-go/model/flow" + mock "github.com/stretchr/testify/mock" + + network "github.com/onflow/flow-go/network" +) + +// MisbehaviorReport is an autogenerated mock type for the MisbehaviorReport type +type MisbehaviorReport struct { + mock.Mock +} + +// OriginId provides a mock function with no fields +func (_m *MisbehaviorReport) OriginId() flow.Identifier { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for OriginId") + } + + var r0 flow.Identifier + if rf, ok := ret.Get(0).(func() flow.Identifier); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(flow.Identifier) + } + } + + return r0 +} + +// Penalty provides a mock function with no fields +func (_m *MisbehaviorReport) Penalty() float64 { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Penalty") + } + + var r0 float64 + if rf, ok := ret.Get(0).(func() float64); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(float64) + } + + return r0 +} + +// Reason provides a mock function with no fields +func (_m *MisbehaviorReport) Reason() network.Misbehavior { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Reason") + } + + var r0 network.Misbehavior + if rf, ok := ret.Get(0).(func() network.Misbehavior); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(network.Misbehavior) + } + + return r0 +} + +// NewMisbehaviorReport creates a new instance of MisbehaviorReport. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewMisbehaviorReport(t interface { + mock.TestingT + Cleanup(func()) +}) *MisbehaviorReport { + mock := &MisbehaviorReport{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/network/mock/misbehavior_report_consumer.go b/network/mock/misbehavior_report_consumer.go new file mode 100644 index 00000000000..27c1bf3acf9 --- /dev/null +++ b/network/mock/misbehavior_report_consumer.go @@ -0,0 +1,34 @@ +// Code generated by mockery. DO NOT EDIT. + +package mock + +import ( + channels "github.com/onflow/flow-go/network/channels" + mock "github.com/stretchr/testify/mock" + + network "github.com/onflow/flow-go/network" +) + +// MisbehaviorReportConsumer is an autogenerated mock type for the MisbehaviorReportConsumer type +type MisbehaviorReportConsumer struct { + mock.Mock +} + +// ReportMisbehaviorOnChannel provides a mock function with given fields: channel, report +func (_m *MisbehaviorReportConsumer) ReportMisbehaviorOnChannel(channel channels.Channel, report network.MisbehaviorReport) { + _m.Called(channel, report) +} + +// NewMisbehaviorReportConsumer creates a new instance of MisbehaviorReportConsumer. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewMisbehaviorReportConsumer(t interface { + mock.TestingT + Cleanup(func()) +}) *MisbehaviorReportConsumer { + mock := &MisbehaviorReportConsumer{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/network/mock/misbehavior_report_manager.go b/network/mock/misbehavior_report_manager.go new file mode 100644 index 00000000000..a3fd58c28d0 --- /dev/null +++ b/network/mock/misbehavior_report_manager.go @@ -0,0 +1,81 @@ +// Code generated by mockery. DO NOT EDIT. + +package mock + +import ( + irrecoverable "github.com/onflow/flow-go/module/irrecoverable" + channels "github.com/onflow/flow-go/network/channels" + + mock "github.com/stretchr/testify/mock" + + network "github.com/onflow/flow-go/network" +) + +// MisbehaviorReportManager is an autogenerated mock type for the MisbehaviorReportManager type +type MisbehaviorReportManager struct { + mock.Mock +} + +// Done provides a mock function with no fields +func (_m *MisbehaviorReportManager) Done() <-chan struct{} { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Done") + } + + var r0 <-chan struct{} + if rf, ok := ret.Get(0).(func() <-chan struct{}); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(<-chan struct{}) + } + } + + return r0 +} + +// HandleMisbehaviorReport provides a mock function with given fields: _a0, _a1 +func (_m *MisbehaviorReportManager) HandleMisbehaviorReport(_a0 channels.Channel, _a1 network.MisbehaviorReport) { + _m.Called(_a0, _a1) +} + +// Ready provides a mock function with no fields +func (_m *MisbehaviorReportManager) Ready() <-chan struct{} { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Ready") + } + + var r0 <-chan struct{} + if rf, ok := ret.Get(0).(func() <-chan struct{}); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(<-chan struct{}) + } + } + + return r0 +} + +// Start provides a mock function with given fields: _a0 +func (_m *MisbehaviorReportManager) Start(_a0 irrecoverable.SignalerContext) { + _m.Called(_a0) +} + +// NewMisbehaviorReportManager creates a new instance of MisbehaviorReportManager. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewMisbehaviorReportManager(t interface { + mock.TestingT + Cleanup(func()) +}) *MisbehaviorReportManager { + mock := &MisbehaviorReportManager{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/network/mocknetwork/misbehavior_reporter.go b/network/mock/misbehavior_reporter.go similarity index 75% rename from network/mocknetwork/misbehavior_reporter.go rename to network/mock/misbehavior_reporter.go index 101d7e32f90..b6690f35d2c 100644 --- a/network/mocknetwork/misbehavior_reporter.go +++ b/network/mock/misbehavior_reporter.go @@ -1,6 +1,6 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. -package mocknetwork +package mock import ( network "github.com/onflow/flow-go/network" @@ -17,13 +17,12 @@ func (_m *MisbehaviorReporter) ReportMisbehavior(_a0 network.MisbehaviorReport) _m.Called(_a0) } -type mockConstructorTestingTNewMisbehaviorReporter interface { +// NewMisbehaviorReporter creates a new instance of MisbehaviorReporter. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewMisbehaviorReporter(t interface { mock.TestingT Cleanup(func()) -} - -// NewMisbehaviorReporter creates a new instance of MisbehaviorReporter. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewMisbehaviorReporter(t mockConstructorTestingTNewMisbehaviorReporter) *MisbehaviorReporter { +}) *MisbehaviorReporter { mock := &MisbehaviorReporter{} mock.Mock.Test(t) diff --git a/network/mock/outgoing_message_scope.go b/network/mock/outgoing_message_scope.go new file mode 100644 index 00000000000..e888f89e761 --- /dev/null +++ b/network/mock/outgoing_message_scope.go @@ -0,0 +1,125 @@ +// Code generated by mockery. DO NOT EDIT. + +package mock + +import ( + flow "github.com/onflow/flow-go/model/flow" + channels "github.com/onflow/flow-go/network/channels" + + message "github.com/onflow/flow-go/network/message" + + mock "github.com/stretchr/testify/mock" +) + +// OutgoingMessageScope is an autogenerated mock type for the OutgoingMessageScope type +type OutgoingMessageScope struct { + mock.Mock +} + +// PayloadType provides a mock function with no fields +func (_m *OutgoingMessageScope) PayloadType() string { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for PayloadType") + } + + var r0 string + if rf, ok := ret.Get(0).(func() string); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(string) + } + + return r0 +} + +// Proto provides a mock function with no fields +func (_m *OutgoingMessageScope) Proto() *message.Message { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Proto") + } + + var r0 *message.Message + if rf, ok := ret.Get(0).(func() *message.Message); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*message.Message) + } + } + + return r0 +} + +// Size provides a mock function with no fields +func (_m *OutgoingMessageScope) Size() int { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Size") + } + + var r0 int + if rf, ok := ret.Get(0).(func() int); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(int) + } + + return r0 +} + +// TargetIds provides a mock function with no fields +func (_m *OutgoingMessageScope) TargetIds() flow.IdentifierList { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for TargetIds") + } + + var r0 flow.IdentifierList + if rf, ok := ret.Get(0).(func() flow.IdentifierList); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(flow.IdentifierList) + } + } + + return r0 +} + +// Topic provides a mock function with no fields +func (_m *OutgoingMessageScope) Topic() channels.Topic { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Topic") + } + + var r0 channels.Topic + if rf, ok := ret.Get(0).(func() channels.Topic); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(channels.Topic) + } + + return r0 +} + +// NewOutgoingMessageScope creates a new instance of OutgoingMessageScope. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewOutgoingMessageScope(t interface { + mock.TestingT + Cleanup(func()) +}) *OutgoingMessageScope { + mock := &OutgoingMessageScope{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/network/mock/ping_info_provider.go b/network/mock/ping_info_provider.go new file mode 100644 index 00000000000..cb0d46c9750 --- /dev/null +++ b/network/mock/ping_info_provider.go @@ -0,0 +1,78 @@ +// Code generated by mockery. DO NOT EDIT. + +package mock + +import mock "github.com/stretchr/testify/mock" + +// PingInfoProvider is an autogenerated mock type for the PingInfoProvider type +type PingInfoProvider struct { + mock.Mock +} + +// HotstuffView provides a mock function with no fields +func (_m *PingInfoProvider) HotstuffView() uint64 { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for HotstuffView") + } + + var r0 uint64 + if rf, ok := ret.Get(0).(func() uint64); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(uint64) + } + + return r0 +} + +// SealedBlockHeight provides a mock function with no fields +func (_m *PingInfoProvider) SealedBlockHeight() uint64 { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for SealedBlockHeight") + } + + var r0 uint64 + if rf, ok := ret.Get(0).(func() uint64); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(uint64) + } + + return r0 +} + +// SoftwareVersion provides a mock function with no fields +func (_m *PingInfoProvider) SoftwareVersion() string { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for SoftwareVersion") + } + + var r0 string + if rf, ok := ret.Get(0).(func() string); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(string) + } + + return r0 +} + +// NewPingInfoProvider creates a new instance of PingInfoProvider. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewPingInfoProvider(t interface { + mock.TestingT + Cleanup(func()) +}) *PingInfoProvider { + mock := &PingInfoProvider{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/network/mocknetwork/ping_service.go b/network/mock/ping_service.go similarity index 86% rename from network/mocknetwork/ping_service.go rename to network/mock/ping_service.go index 6ea49fe96a7..8e189e91d67 100644 --- a/network/mocknetwork/ping_service.go +++ b/network/mock/ping_service.go @@ -1,6 +1,6 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. -package mocknetwork +package mock import ( context "context" @@ -22,6 +22,10 @@ type PingService struct { func (_m *PingService) Ping(ctx context.Context, peerID peer.ID) (message.PingResponse, time.Duration, error) { ret := _m.Called(ctx, peerID) + if len(ret) == 0 { + panic("no return value specified for Ping") + } + var r0 message.PingResponse var r1 time.Duration var r2 error @@ -49,13 +53,12 @@ func (_m *PingService) Ping(ctx context.Context, peerID peer.ID) (message.PingRe return r0, r1, r2 } -type mockConstructorTestingTNewPingService interface { +// NewPingService creates a new instance of PingService. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewPingService(t interface { mock.TestingT Cleanup(func()) -} - -// NewPingService creates a new instance of PingService. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewPingService(t mockConstructorTestingTNewPingService) *PingService { +}) *PingService { mock := &PingService{} mock.Mock.Test(t) diff --git a/network/mocknetwork/subscription_manager.go b/network/mock/subscription_manager.go similarity index 81% rename from network/mocknetwork/subscription_manager.go rename to network/mock/subscription_manager.go index 3cc901de877..ed6a3c45f90 100644 --- a/network/mocknetwork/subscription_manager.go +++ b/network/mock/subscription_manager.go @@ -1,6 +1,6 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. -package mocknetwork +package mock import ( channels "github.com/onflow/flow-go/network/channels" @@ -14,10 +14,14 @@ type SubscriptionManager struct { mock.Mock } -// Channels provides a mock function with given fields: +// Channels provides a mock function with no fields func (_m *SubscriptionManager) Channels() channels.ChannelList { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Channels") + } + var r0 channels.ChannelList if rf, ok := ret.Get(0).(func() channels.ChannelList); ok { r0 = rf() @@ -34,6 +38,10 @@ func (_m *SubscriptionManager) Channels() channels.ChannelList { func (_m *SubscriptionManager) GetEngine(channel channels.Channel) (network.MessageProcessor, error) { ret := _m.Called(channel) + if len(ret) == 0 { + panic("no return value specified for GetEngine") + } + var r0 network.MessageProcessor var r1 error if rf, ok := ret.Get(0).(func(channels.Channel) (network.MessageProcessor, error)); ok { @@ -60,6 +68,10 @@ func (_m *SubscriptionManager) GetEngine(channel channels.Channel) (network.Mess func (_m *SubscriptionManager) Register(channel channels.Channel, engine network.MessageProcessor) error { ret := _m.Called(channel, engine) + if len(ret) == 0 { + panic("no return value specified for Register") + } + var r0 error if rf, ok := ret.Get(0).(func(channels.Channel, network.MessageProcessor) error); ok { r0 = rf(channel, engine) @@ -74,6 +86,10 @@ func (_m *SubscriptionManager) Register(channel channels.Channel, engine network func (_m *SubscriptionManager) Unregister(channel channels.Channel) error { ret := _m.Called(channel) + if len(ret) == 0 { + panic("no return value specified for Unregister") + } + var r0 error if rf, ok := ret.Get(0).(func(channels.Channel) error); ok { r0 = rf(channel) @@ -84,13 +100,12 @@ func (_m *SubscriptionManager) Unregister(channel channels.Channel) error { return r0 } -type mockConstructorTestingTNewSubscriptionManager interface { +// NewSubscriptionManager creates a new instance of SubscriptionManager. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewSubscriptionManager(t interface { mock.TestingT Cleanup(func()) -} - -// NewSubscriptionManager creates a new instance of SubscriptionManager. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewSubscriptionManager(t mockConstructorTestingTNewSubscriptionManager) *SubscriptionManager { +}) *SubscriptionManager { mock := &SubscriptionManager{} mock.Mock.Test(t) diff --git a/network/mocknetwork/topology.go b/network/mock/topology.go similarity index 79% rename from network/mocknetwork/topology.go rename to network/mock/topology.go index 04a0dec6f17..55bf8e15038 100644 --- a/network/mocknetwork/topology.go +++ b/network/mock/topology.go @@ -1,6 +1,6 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. -package mocknetwork +package mock import ( flow "github.com/onflow/flow-go/model/flow" @@ -16,6 +16,10 @@ type Topology struct { func (_m *Topology) Fanout(ids flow.IdentityList) flow.IdentityList { ret := _m.Called(ids) + if len(ret) == 0 { + panic("no return value specified for Fanout") + } + var r0 flow.IdentityList if rf, ok := ret.Get(0).(func(flow.IdentityList) flow.IdentityList); ok { r0 = rf(ids) @@ -28,13 +32,12 @@ func (_m *Topology) Fanout(ids flow.IdentityList) flow.IdentityList { return r0 } -type mockConstructorTestingTNewTopology interface { +// NewTopology creates a new instance of Topology. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewTopology(t interface { mock.TestingT Cleanup(func()) -} - -// NewTopology creates a new instance of Topology. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewTopology(t mockConstructorTestingTNewTopology) *Topology { +}) *Topology { mock := &Topology{} mock.Mock.Test(t) diff --git a/network/mock/underlay.go b/network/mock/underlay.go new file mode 100644 index 00000000000..9ea08e42128 --- /dev/null +++ b/network/mock/underlay.go @@ -0,0 +1,120 @@ +// Code generated by mockery. DO NOT EDIT. + +package mock + +import ( + channels "github.com/onflow/flow-go/network/channels" + mock "github.com/stretchr/testify/mock" + + network "github.com/onflow/flow-go/network" +) + +// Underlay is an autogenerated mock type for the Underlay type +type Underlay struct { + mock.Mock +} + +// Done provides a mock function with no fields +func (_m *Underlay) Done() <-chan struct{} { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Done") + } + + var r0 <-chan struct{} + if rf, ok := ret.Get(0).(func() <-chan struct{}); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(<-chan struct{}) + } + } + + return r0 +} + +// OnAllowListNotification provides a mock function with given fields: _a0 +func (_m *Underlay) OnAllowListNotification(_a0 *network.AllowListingUpdate) { + _m.Called(_a0) +} + +// OnDisallowListNotification provides a mock function with given fields: _a0 +func (_m *Underlay) OnDisallowListNotification(_a0 *network.DisallowListingUpdate) { + _m.Called(_a0) +} + +// Ready provides a mock function with no fields +func (_m *Underlay) Ready() <-chan struct{} { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Ready") + } + + var r0 <-chan struct{} + if rf, ok := ret.Get(0).(func() <-chan struct{}); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(<-chan struct{}) + } + } + + return r0 +} + +// Subscribe provides a mock function with given fields: channel +func (_m *Underlay) Subscribe(channel channels.Channel) error { + ret := _m.Called(channel) + + if len(ret) == 0 { + panic("no return value specified for Subscribe") + } + + var r0 error + if rf, ok := ret.Get(0).(func(channels.Channel) error); ok { + r0 = rf(channel) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// Unsubscribe provides a mock function with given fields: channel +func (_m *Underlay) Unsubscribe(channel channels.Channel) error { + ret := _m.Called(channel) + + if len(ret) == 0 { + panic("no return value specified for Unsubscribe") + } + + var r0 error + if rf, ok := ret.Get(0).(func(channels.Channel) error); ok { + r0 = rf(channel) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// UpdateNodeAddresses provides a mock function with no fields +func (_m *Underlay) UpdateNodeAddresses() { + _m.Called() +} + +// NewUnderlay creates a new instance of Underlay. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewUnderlay(t interface { + mock.TestingT + Cleanup(func()) +}) *Underlay { + mock := &Underlay{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/network/mock/violations_consumer.go b/network/mock/violations_consumer.go new file mode 100644 index 00000000000..9a5b824d1b4 --- /dev/null +++ b/network/mock/violations_consumer.go @@ -0,0 +1,62 @@ +// Code generated by mockery. DO NOT EDIT. + +package mock + +import ( + network "github.com/onflow/flow-go/network" + mock "github.com/stretchr/testify/mock" +) + +// ViolationsConsumer is an autogenerated mock type for the ViolationsConsumer type +type ViolationsConsumer struct { + mock.Mock +} + +// OnInvalidMsgError provides a mock function with given fields: violation +func (_m *ViolationsConsumer) OnInvalidMsgError(violation *network.Violation) { + _m.Called(violation) +} + +// OnSenderEjectedError provides a mock function with given fields: violation +func (_m *ViolationsConsumer) OnSenderEjectedError(violation *network.Violation) { + _m.Called(violation) +} + +// OnUnAuthorizedSenderError provides a mock function with given fields: violation +func (_m *ViolationsConsumer) OnUnAuthorizedSenderError(violation *network.Violation) { + _m.Called(violation) +} + +// OnUnauthorizedPublishOnChannel provides a mock function with given fields: violation +func (_m *ViolationsConsumer) OnUnauthorizedPublishOnChannel(violation *network.Violation) { + _m.Called(violation) +} + +// OnUnauthorizedUnicastOnChannel provides a mock function with given fields: violation +func (_m *ViolationsConsumer) OnUnauthorizedUnicastOnChannel(violation *network.Violation) { + _m.Called(violation) +} + +// OnUnexpectedError provides a mock function with given fields: violation +func (_m *ViolationsConsumer) OnUnexpectedError(violation *network.Violation) { + _m.Called(violation) +} + +// OnUnknownMsgTypeError provides a mock function with given fields: violation +func (_m *ViolationsConsumer) OnUnknownMsgTypeError(violation *network.Violation) { + _m.Called(violation) +} + +// NewViolationsConsumer creates a new instance of ViolationsConsumer. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewViolationsConsumer(t interface { + mock.TestingT + Cleanup(func()) +}) *ViolationsConsumer { + mock := &ViolationsConsumer{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/network/mock/write_close_flusher.go b/network/mock/write_close_flusher.go new file mode 100644 index 00000000000..be3736694ea --- /dev/null +++ b/network/mock/write_close_flusher.go @@ -0,0 +1,88 @@ +// Code generated by mockery. DO NOT EDIT. + +package mock + +import mock "github.com/stretchr/testify/mock" + +// WriteCloseFlusher is an autogenerated mock type for the WriteCloseFlusher type +type WriteCloseFlusher struct { + mock.Mock +} + +// Close provides a mock function with no fields +func (_m *WriteCloseFlusher) Close() error { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Close") + } + + var r0 error + if rf, ok := ret.Get(0).(func() error); ok { + r0 = rf() + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// Flush provides a mock function with no fields +func (_m *WriteCloseFlusher) Flush() error { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Flush") + } + + var r0 error + if rf, ok := ret.Get(0).(func() error); ok { + r0 = rf() + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// Write provides a mock function with given fields: p +func (_m *WriteCloseFlusher) Write(p []byte) (int, error) { + ret := _m.Called(p) + + if len(ret) == 0 { + panic("no return value specified for Write") + } + + var r0 int + var r1 error + if rf, ok := ret.Get(0).(func([]byte) (int, error)); ok { + return rf(p) + } + if rf, ok := ret.Get(0).(func([]byte) int); ok { + r0 = rf(p) + } else { + r0 = ret.Get(0).(int) + } + + if rf, ok := ret.Get(1).(func([]byte) error); ok { + r1 = rf(p) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// NewWriteCloseFlusher creates a new instance of WriteCloseFlusher. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewWriteCloseFlusher(t interface { + mock.TestingT + Cleanup(func()) +}) *WriteCloseFlusher { + mock := &WriteCloseFlusher{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/network/mocknetwork/adapter.go b/network/mocknetwork/adapter.go deleted file mode 100644 index 6cf0775432d..00000000000 --- a/network/mocknetwork/adapter.go +++ /dev/null @@ -1,100 +0,0 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. - -package mocknetwork - -import ( - flow "github.com/onflow/flow-go/model/flow" - channels "github.com/onflow/flow-go/network/channels" - - mock "github.com/stretchr/testify/mock" -) - -// Adapter is an autogenerated mock type for the Adapter type -type Adapter struct { - mock.Mock -} - -// MulticastOnChannel provides a mock function with given fields: _a0, _a1, _a2, _a3 -func (_m *Adapter) MulticastOnChannel(_a0 channels.Channel, _a1 interface{}, _a2 uint, _a3 ...flow.Identifier) error { - _va := make([]interface{}, len(_a3)) - for _i := range _a3 { - _va[_i] = _a3[_i] - } - var _ca []interface{} - _ca = append(_ca, _a0, _a1, _a2) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) - - var r0 error - if rf, ok := ret.Get(0).(func(channels.Channel, interface{}, uint, ...flow.Identifier) error); ok { - r0 = rf(_a0, _a1, _a2, _a3...) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// PublishOnChannel provides a mock function with given fields: _a0, _a1, _a2 -func (_m *Adapter) PublishOnChannel(_a0 channels.Channel, _a1 interface{}, _a2 ...flow.Identifier) error { - _va := make([]interface{}, len(_a2)) - for _i := range _a2 { - _va[_i] = _a2[_i] - } - var _ca []interface{} - _ca = append(_ca, _a0, _a1) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) - - var r0 error - if rf, ok := ret.Get(0).(func(channels.Channel, interface{}, ...flow.Identifier) error); ok { - r0 = rf(_a0, _a1, _a2...) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// UnRegisterChannel provides a mock function with given fields: channel -func (_m *Adapter) UnRegisterChannel(channel channels.Channel) error { - ret := _m.Called(channel) - - var r0 error - if rf, ok := ret.Get(0).(func(channels.Channel) error); ok { - r0 = rf(channel) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// UnicastOnChannel provides a mock function with given fields: _a0, _a1, _a2 -func (_m *Adapter) UnicastOnChannel(_a0 channels.Channel, _a1 interface{}, _a2 flow.Identifier) error { - ret := _m.Called(_a0, _a1, _a2) - - var r0 error - if rf, ok := ret.Get(0).(func(channels.Channel, interface{}, flow.Identifier) error); ok { - r0 = rf(_a0, _a1, _a2) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -type mockConstructorTestingTNewAdapter interface { - mock.TestingT - Cleanup(func()) -} - -// NewAdapter creates a new instance of Adapter. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewAdapter(t mockConstructorTestingTNewAdapter) *Adapter { - mock := &Adapter{} - mock.Mock.Test(t) - - t.Cleanup(func() { mock.AssertExpectations(t) }) - - return mock -} diff --git a/network/mocknetwork/blob_getter.go b/network/mocknetwork/blob_getter.go deleted file mode 100644 index 1fa2c1e8f49..00000000000 --- a/network/mocknetwork/blob_getter.go +++ /dev/null @@ -1,74 +0,0 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. - -package mocknetwork - -import ( - blocks "github.com/ipfs/go-block-format" - cid "github.com/ipfs/go-cid" - - context "context" - - mock "github.com/stretchr/testify/mock" -) - -// BlobGetter is an autogenerated mock type for the BlobGetter type -type BlobGetter struct { - mock.Mock -} - -// GetBlob provides a mock function with given fields: ctx, c -func (_m *BlobGetter) GetBlob(ctx context.Context, c cid.Cid) (blocks.Block, error) { - ret := _m.Called(ctx, c) - - var r0 blocks.Block - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, cid.Cid) (blocks.Block, error)); ok { - return rf(ctx, c) - } - if rf, ok := ret.Get(0).(func(context.Context, cid.Cid) blocks.Block); ok { - r0 = rf(ctx, c) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(blocks.Block) - } - } - - if rf, ok := ret.Get(1).(func(context.Context, cid.Cid) error); ok { - r1 = rf(ctx, c) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// GetBlobs provides a mock function with given fields: ctx, ks -func (_m *BlobGetter) GetBlobs(ctx context.Context, ks []cid.Cid) <-chan blocks.Block { - ret := _m.Called(ctx, ks) - - var r0 <-chan blocks.Block - if rf, ok := ret.Get(0).(func(context.Context, []cid.Cid) <-chan blocks.Block); ok { - r0 = rf(ctx, ks) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(<-chan blocks.Block) - } - } - - return r0 -} - -type mockConstructorTestingTNewBlobGetter interface { - mock.TestingT - Cleanup(func()) -} - -// NewBlobGetter creates a new instance of BlobGetter. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewBlobGetter(t mockConstructorTestingTNewBlobGetter) *BlobGetter { - mock := &BlobGetter{} - mock.Mock.Test(t) - - t.Cleanup(func() { mock.AssertExpectations(t) }) - - return mock -} diff --git a/network/mocknetwork/blob_service.go b/network/mocknetwork/blob_service.go deleted file mode 100644 index acf392695c3..00000000000 --- a/network/mocknetwork/blob_service.go +++ /dev/null @@ -1,187 +0,0 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. - -package mocknetwork - -import ( - blocks "github.com/ipfs/go-block-format" - cid "github.com/ipfs/go-cid" - - context "context" - - irrecoverable "github.com/onflow/flow-go/module/irrecoverable" - - mock "github.com/stretchr/testify/mock" - - network "github.com/onflow/flow-go/network" -) - -// BlobService is an autogenerated mock type for the BlobService type -type BlobService struct { - mock.Mock -} - -// AddBlob provides a mock function with given fields: ctx, b -func (_m *BlobService) AddBlob(ctx context.Context, b blocks.Block) error { - ret := _m.Called(ctx, b) - - var r0 error - if rf, ok := ret.Get(0).(func(context.Context, blocks.Block) error); ok { - r0 = rf(ctx, b) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// AddBlobs provides a mock function with given fields: ctx, bs -func (_m *BlobService) AddBlobs(ctx context.Context, bs []blocks.Block) error { - ret := _m.Called(ctx, bs) - - var r0 error - if rf, ok := ret.Get(0).(func(context.Context, []blocks.Block) error); ok { - r0 = rf(ctx, bs) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// DeleteBlob provides a mock function with given fields: ctx, c -func (_m *BlobService) DeleteBlob(ctx context.Context, c cid.Cid) error { - ret := _m.Called(ctx, c) - - var r0 error - if rf, ok := ret.Get(0).(func(context.Context, cid.Cid) error); ok { - r0 = rf(ctx, c) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// Done provides a mock function with given fields: -func (_m *BlobService) Done() <-chan struct{} { - ret := _m.Called() - - var r0 <-chan struct{} - if rf, ok := ret.Get(0).(func() <-chan struct{}); ok { - r0 = rf() - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(<-chan struct{}) - } - } - - return r0 -} - -// GetBlob provides a mock function with given fields: ctx, c -func (_m *BlobService) GetBlob(ctx context.Context, c cid.Cid) (blocks.Block, error) { - ret := _m.Called(ctx, c) - - var r0 blocks.Block - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, cid.Cid) (blocks.Block, error)); ok { - return rf(ctx, c) - } - if rf, ok := ret.Get(0).(func(context.Context, cid.Cid) blocks.Block); ok { - r0 = rf(ctx, c) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(blocks.Block) - } - } - - if rf, ok := ret.Get(1).(func(context.Context, cid.Cid) error); ok { - r1 = rf(ctx, c) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// GetBlobs provides a mock function with given fields: ctx, ks -func (_m *BlobService) GetBlobs(ctx context.Context, ks []cid.Cid) <-chan blocks.Block { - ret := _m.Called(ctx, ks) - - var r0 <-chan blocks.Block - if rf, ok := ret.Get(0).(func(context.Context, []cid.Cid) <-chan blocks.Block); ok { - r0 = rf(ctx, ks) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(<-chan blocks.Block) - } - } - - return r0 -} - -// GetSession provides a mock function with given fields: ctx -func (_m *BlobService) GetSession(ctx context.Context) network.BlobGetter { - ret := _m.Called(ctx) - - var r0 network.BlobGetter - if rf, ok := ret.Get(0).(func(context.Context) network.BlobGetter); ok { - r0 = rf(ctx) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(network.BlobGetter) - } - } - - return r0 -} - -// Ready provides a mock function with given fields: -func (_m *BlobService) Ready() <-chan struct{} { - ret := _m.Called() - - var r0 <-chan struct{} - if rf, ok := ret.Get(0).(func() <-chan struct{}); ok { - r0 = rf() - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(<-chan struct{}) - } - } - - return r0 -} - -// Start provides a mock function with given fields: _a0 -func (_m *BlobService) Start(_a0 irrecoverable.SignalerContext) { - _m.Called(_a0) -} - -// TriggerReprovide provides a mock function with given fields: ctx -func (_m *BlobService) TriggerReprovide(ctx context.Context) error { - ret := _m.Called(ctx) - - var r0 error - if rf, ok := ret.Get(0).(func(context.Context) error); ok { - r0 = rf(ctx) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -type mockConstructorTestingTNewBlobService interface { - mock.TestingT - Cleanup(func()) -} - -// NewBlobService creates a new instance of BlobService. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewBlobService(t mockConstructorTestingTNewBlobService) *BlobService { - mock := &BlobService{} - mock.Mock.Test(t) - - t.Cleanup(func() { mock.AssertExpectations(t) }) - - return mock -} diff --git a/network/mocknetwork/blob_service_option.go b/network/mocknetwork/blob_service_option.go deleted file mode 100644 index 7547090a254..00000000000 --- a/network/mocknetwork/blob_service_option.go +++ /dev/null @@ -1,33 +0,0 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. - -package mocknetwork - -import ( - network "github.com/onflow/flow-go/network" - mock "github.com/stretchr/testify/mock" -) - -// BlobServiceOption is an autogenerated mock type for the BlobServiceOption type -type BlobServiceOption struct { - mock.Mock -} - -// Execute provides a mock function with given fields: _a0 -func (_m *BlobServiceOption) Execute(_a0 network.BlobService) { - _m.Called(_a0) -} - -type mockConstructorTestingTNewBlobServiceOption interface { - mock.TestingT - Cleanup(func()) -} - -// NewBlobServiceOption creates a new instance of BlobServiceOption. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewBlobServiceOption(t mockConstructorTestingTNewBlobServiceOption) *BlobServiceOption { - mock := &BlobServiceOption{} - mock.Mock.Test(t) - - t.Cleanup(func() { mock.AssertExpectations(t) }) - - return mock -} diff --git a/network/mocknetwork/codec.go b/network/mocknetwork/codec.go deleted file mode 100644 index 3da3e34a5ba..00000000000 --- a/network/mocknetwork/codec.go +++ /dev/null @@ -1,114 +0,0 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. - -package mocknetwork - -import ( - io "io" - - network "github.com/onflow/flow-go/network" - mock "github.com/stretchr/testify/mock" -) - -// Codec is an autogenerated mock type for the Codec type -type Codec struct { - mock.Mock -} - -// Decode provides a mock function with given fields: data -func (_m *Codec) Decode(data []byte) (interface{}, error) { - ret := _m.Called(data) - - var r0 interface{} - var r1 error - if rf, ok := ret.Get(0).(func([]byte) (interface{}, error)); ok { - return rf(data) - } - if rf, ok := ret.Get(0).(func([]byte) interface{}); ok { - r0 = rf(data) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(interface{}) - } - } - - if rf, ok := ret.Get(1).(func([]byte) error); ok { - r1 = rf(data) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// Encode provides a mock function with given fields: v -func (_m *Codec) Encode(v interface{}) ([]byte, error) { - ret := _m.Called(v) - - var r0 []byte - var r1 error - if rf, ok := ret.Get(0).(func(interface{}) ([]byte, error)); ok { - return rf(v) - } - if rf, ok := ret.Get(0).(func(interface{}) []byte); ok { - r0 = rf(v) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).([]byte) - } - } - - if rf, ok := ret.Get(1).(func(interface{}) error); ok { - r1 = rf(v) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// NewDecoder provides a mock function with given fields: r -func (_m *Codec) NewDecoder(r io.Reader) network.Decoder { - ret := _m.Called(r) - - var r0 network.Decoder - if rf, ok := ret.Get(0).(func(io.Reader) network.Decoder); ok { - r0 = rf(r) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(network.Decoder) - } - } - - return r0 -} - -// NewEncoder provides a mock function with given fields: w -func (_m *Codec) NewEncoder(w io.Writer) network.Encoder { - ret := _m.Called(w) - - var r0 network.Encoder - if rf, ok := ret.Get(0).(func(io.Writer) network.Encoder); ok { - r0 = rf(w) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(network.Encoder) - } - } - - return r0 -} - -type mockConstructorTestingTNewCodec interface { - mock.TestingT - Cleanup(func()) -} - -// NewCodec creates a new instance of Codec. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewCodec(t mockConstructorTestingTNewCodec) *Codec { - mock := &Codec{} - mock.Mock.Test(t) - - t.Cleanup(func() { mock.AssertExpectations(t) }) - - return mock -} diff --git a/network/mocknetwork/connection.go b/network/mocknetwork/connection.go deleted file mode 100644 index 337d51fca93..00000000000 --- a/network/mocknetwork/connection.go +++ /dev/null @@ -1,65 +0,0 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. - -package mocknetwork - -import mock "github.com/stretchr/testify/mock" - -// Connection is an autogenerated mock type for the Connection type -type Connection struct { - mock.Mock -} - -// Receive provides a mock function with given fields: -func (_m *Connection) Receive() (interface{}, error) { - ret := _m.Called() - - var r0 interface{} - var r1 error - if rf, ok := ret.Get(0).(func() (interface{}, error)); ok { - return rf() - } - if rf, ok := ret.Get(0).(func() interface{}); ok { - r0 = rf() - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(interface{}) - } - } - - if rf, ok := ret.Get(1).(func() error); ok { - r1 = rf() - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// Send provides a mock function with given fields: msg -func (_m *Connection) Send(msg interface{}) error { - ret := _m.Called(msg) - - var r0 error - if rf, ok := ret.Get(0).(func(interface{}) error); ok { - r0 = rf(msg) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -type mockConstructorTestingTNewConnection interface { - mock.TestingT - Cleanup(func()) -} - -// NewConnection creates a new instance of Connection. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewConnection(t mockConstructorTestingTNewConnection) *Connection { - mock := &Connection{} - mock.Mock.Test(t) - - t.Cleanup(func() { mock.AssertExpectations(t) }) - - return mock -} diff --git a/network/mocknetwork/connector.go b/network/mocknetwork/connector.go deleted file mode 100644 index 7f6a50e317c..00000000000 --- a/network/mocknetwork/connector.go +++ /dev/null @@ -1,36 +0,0 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. - -package mocknetwork - -import ( - context "context" - - mock "github.com/stretchr/testify/mock" - - peer "github.com/libp2p/go-libp2p/core/peer" -) - -// Connector is an autogenerated mock type for the Connector type -type Connector struct { - mock.Mock -} - -// UpdatePeers provides a mock function with given fields: ctx, peerIDs -func (_m *Connector) UpdatePeers(ctx context.Context, peerIDs peer.IDSlice) { - _m.Called(ctx, peerIDs) -} - -type mockConstructorTestingTNewConnector interface { - mock.TestingT - Cleanup(func()) -} - -// NewConnector creates a new instance of Connector. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewConnector(t mockConstructorTestingTNewConnector) *Connector { - mock := &Connector{} - mock.Mock.Test(t) - - t.Cleanup(func() { mock.AssertExpectations(t) }) - - return mock -} diff --git a/network/mocknetwork/connector_host.go b/network/mocknetwork/connector_host.go deleted file mode 100644 index 51c7ac7b539..00000000000 --- a/network/mocknetwork/connector_host.go +++ /dev/null @@ -1,102 +0,0 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. - -package mocknetwork - -import ( - network "github.com/libp2p/go-libp2p/core/network" - mock "github.com/stretchr/testify/mock" - - peer "github.com/libp2p/go-libp2p/core/peer" -) - -// ConnectorHost is an autogenerated mock type for the ConnectorHost type -type ConnectorHost struct { - mock.Mock -} - -// ClosePeer provides a mock function with given fields: id -func (_m *ConnectorHost) ClosePeer(id peer.ID) error { - ret := _m.Called(id) - - var r0 error - if rf, ok := ret.Get(0).(func(peer.ID) error); ok { - r0 = rf(id) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// Connections provides a mock function with given fields: -func (_m *ConnectorHost) Connections() []network.Conn { - ret := _m.Called() - - var r0 []network.Conn - if rf, ok := ret.Get(0).(func() []network.Conn); ok { - r0 = rf() - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).([]network.Conn) - } - } - - return r0 -} - -// ID provides a mock function with given fields: -func (_m *ConnectorHost) ID() peer.ID { - ret := _m.Called() - - var r0 peer.ID - if rf, ok := ret.Get(0).(func() peer.ID); ok { - r0 = rf() - } else { - r0 = ret.Get(0).(peer.ID) - } - - return r0 -} - -// IsProtected provides a mock function with given fields: id -func (_m *ConnectorHost) IsProtected(id peer.ID) bool { - ret := _m.Called(id) - - var r0 bool - if rf, ok := ret.Get(0).(func(peer.ID) bool); ok { - r0 = rf(id) - } else { - r0 = ret.Get(0).(bool) - } - - return r0 -} - -// PeerInfo provides a mock function with given fields: id -func (_m *ConnectorHost) PeerInfo(id peer.ID) peer.AddrInfo { - ret := _m.Called(id) - - var r0 peer.AddrInfo - if rf, ok := ret.Get(0).(func(peer.ID) peer.AddrInfo); ok { - r0 = rf(id) - } else { - r0 = ret.Get(0).(peer.AddrInfo) - } - - return r0 -} - -type mockConstructorTestingTNewConnectorHost interface { - mock.TestingT - Cleanup(func()) -} - -// NewConnectorHost creates a new instance of ConnectorHost. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewConnectorHost(t mockConstructorTestingTNewConnectorHost) *ConnectorHost { - mock := &ConnectorHost{} - mock.Mock.Test(t) - - t.Cleanup(func() { mock.AssertExpectations(t) }) - - return mock -} diff --git a/network/mocknetwork/decoder.go b/network/mocknetwork/decoder.go deleted file mode 100644 index 306fd9b3df1..00000000000 --- a/network/mocknetwork/decoder.go +++ /dev/null @@ -1,51 +0,0 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. - -package mocknetwork - -import mock "github.com/stretchr/testify/mock" - -// Decoder is an autogenerated mock type for the Decoder type -type Decoder struct { - mock.Mock -} - -// Decode provides a mock function with given fields: -func (_m *Decoder) Decode() (interface{}, error) { - ret := _m.Called() - - var r0 interface{} - var r1 error - if rf, ok := ret.Get(0).(func() (interface{}, error)); ok { - return rf() - } - if rf, ok := ret.Get(0).(func() interface{}); ok { - r0 = rf() - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(interface{}) - } - } - - if rf, ok := ret.Get(1).(func() error); ok { - r1 = rf() - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -type mockConstructorTestingTNewDecoder interface { - mock.TestingT - Cleanup(func()) -} - -// NewDecoder creates a new instance of Decoder. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewDecoder(t mockConstructorTestingTNewDecoder) *Decoder { - mock := &Decoder{} - mock.Mock.Test(t) - - t.Cleanup(func() { mock.AssertExpectations(t) }) - - return mock -} diff --git a/network/mocknetwork/engine.go b/network/mocknetwork/engine.go deleted file mode 100644 index 47c82c8cb3d..00000000000 --- a/network/mocknetwork/engine.go +++ /dev/null @@ -1,100 +0,0 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. - -package mocknetwork - -import ( - flow "github.com/onflow/flow-go/model/flow" - channels "github.com/onflow/flow-go/network/channels" - - mock "github.com/stretchr/testify/mock" -) - -// Engine is an autogenerated mock type for the Engine type -type Engine struct { - mock.Mock -} - -// Done provides a mock function with given fields: -func (_m *Engine) Done() <-chan struct{} { - ret := _m.Called() - - var r0 <-chan struct{} - if rf, ok := ret.Get(0).(func() <-chan struct{}); ok { - r0 = rf() - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(<-chan struct{}) - } - } - - return r0 -} - -// Process provides a mock function with given fields: channel, originID, event -func (_m *Engine) Process(channel channels.Channel, originID flow.Identifier, event interface{}) error { - ret := _m.Called(channel, originID, event) - - var r0 error - if rf, ok := ret.Get(0).(func(channels.Channel, flow.Identifier, interface{}) error); ok { - r0 = rf(channel, originID, event) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// ProcessLocal provides a mock function with given fields: event -func (_m *Engine) ProcessLocal(event interface{}) error { - ret := _m.Called(event) - - var r0 error - if rf, ok := ret.Get(0).(func(interface{}) error); ok { - r0 = rf(event) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// Ready provides a mock function with given fields: -func (_m *Engine) Ready() <-chan struct{} { - ret := _m.Called() - - var r0 <-chan struct{} - if rf, ok := ret.Get(0).(func() <-chan struct{}); ok { - r0 = rf() - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(<-chan struct{}) - } - } - - return r0 -} - -// Submit provides a mock function with given fields: channel, originID, event -func (_m *Engine) Submit(channel channels.Channel, originID flow.Identifier, event interface{}) { - _m.Called(channel, originID, event) -} - -// SubmitLocal provides a mock function with given fields: event -func (_m *Engine) SubmitLocal(event interface{}) { - _m.Called(event) -} - -type mockConstructorTestingTNewEngine interface { - mock.TestingT - Cleanup(func()) -} - -// NewEngine creates a new instance of Engine. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewEngine(t mockConstructorTestingTNewEngine) *Engine { - mock := &Engine{} - mock.Mock.Test(t) - - t.Cleanup(func() { mock.AssertExpectations(t) }) - - return mock -} diff --git a/network/mocknetwork/message_queue.go b/network/mocknetwork/message_queue.go deleted file mode 100644 index 86ee98ec4cd..00000000000 --- a/network/mocknetwork/message_queue.go +++ /dev/null @@ -1,69 +0,0 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. - -package mocknetwork - -import mock "github.com/stretchr/testify/mock" - -// MessageQueue is an autogenerated mock type for the MessageQueue type -type MessageQueue struct { - mock.Mock -} - -// Insert provides a mock function with given fields: message -func (_m *MessageQueue) Insert(message interface{}) error { - ret := _m.Called(message) - - var r0 error - if rf, ok := ret.Get(0).(func(interface{}) error); ok { - r0 = rf(message) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// Len provides a mock function with given fields: -func (_m *MessageQueue) Len() int { - ret := _m.Called() - - var r0 int - if rf, ok := ret.Get(0).(func() int); ok { - r0 = rf() - } else { - r0 = ret.Get(0).(int) - } - - return r0 -} - -// Remove provides a mock function with given fields: -func (_m *MessageQueue) Remove() interface{} { - ret := _m.Called() - - var r0 interface{} - if rf, ok := ret.Get(0).(func() interface{}); ok { - r0 = rf() - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(interface{}) - } - } - - return r0 -} - -type mockConstructorTestingTNewMessageQueue interface { - mock.TestingT - Cleanup(func()) -} - -// NewMessageQueue creates a new instance of MessageQueue. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewMessageQueue(t mockConstructorTestingTNewMessageQueue) *MessageQueue { - mock := &MessageQueue{} - mock.Mock.Test(t) - - t.Cleanup(func() { mock.AssertExpectations(t) }) - - return mock -} diff --git a/network/mocknetwork/middleware.go b/network/mocknetwork/middleware.go deleted file mode 100644 index 457d8fd7360..00000000000 --- a/network/mocknetwork/middleware.go +++ /dev/null @@ -1,204 +0,0 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. - -package mocknetwork - -import ( - datastore "github.com/ipfs/go-datastore" - channels "github.com/onflow/flow-go/network/channels" - - flow "github.com/onflow/flow-go/model/flow" - - irrecoverable "github.com/onflow/flow-go/module/irrecoverable" - - mock "github.com/stretchr/testify/mock" - - network "github.com/onflow/flow-go/network" - - protocol "github.com/libp2p/go-libp2p/core/protocol" -) - -// Middleware is an autogenerated mock type for the Middleware type -type Middleware struct { - mock.Mock -} - -// Done provides a mock function with given fields: -func (_m *Middleware) Done() <-chan struct{} { - ret := _m.Called() - - var r0 <-chan struct{} - if rf, ok := ret.Get(0).(func() <-chan struct{}); ok { - r0 = rf() - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(<-chan struct{}) - } - } - - return r0 -} - -// IsConnected provides a mock function with given fields: nodeID -func (_m *Middleware) IsConnected(nodeID flow.Identifier) (bool, error) { - ret := _m.Called(nodeID) - - var r0 bool - var r1 error - if rf, ok := ret.Get(0).(func(flow.Identifier) (bool, error)); ok { - return rf(nodeID) - } - if rf, ok := ret.Get(0).(func(flow.Identifier) bool); ok { - r0 = rf(nodeID) - } else { - r0 = ret.Get(0).(bool) - } - - if rf, ok := ret.Get(1).(func(flow.Identifier) error); ok { - r1 = rf(nodeID) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// NewBlobService provides a mock function with given fields: channel, store, opts -func (_m *Middleware) NewBlobService(channel channels.Channel, store datastore.Batching, opts ...network.BlobServiceOption) network.BlobService { - _va := make([]interface{}, len(opts)) - for _i := range opts { - _va[_i] = opts[_i] - } - var _ca []interface{} - _ca = append(_ca, channel, store) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) - - var r0 network.BlobService - if rf, ok := ret.Get(0).(func(channels.Channel, datastore.Batching, ...network.BlobServiceOption) network.BlobService); ok { - r0 = rf(channel, store, opts...) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(network.BlobService) - } - } - - return r0 -} - -// NewPingService provides a mock function with given fields: pingProtocol, provider -func (_m *Middleware) NewPingService(pingProtocol protocol.ID, provider network.PingInfoProvider) network.PingService { - ret := _m.Called(pingProtocol, provider) - - var r0 network.PingService - if rf, ok := ret.Get(0).(func(protocol.ID, network.PingInfoProvider) network.PingService); ok { - r0 = rf(pingProtocol, provider) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(network.PingService) - } - } - - return r0 -} - -// Publish provides a mock function with given fields: msg -func (_m *Middleware) Publish(msg *network.OutgoingMessageScope) error { - ret := _m.Called(msg) - - var r0 error - if rf, ok := ret.Get(0).(func(*network.OutgoingMessageScope) error); ok { - r0 = rf(msg) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// Ready provides a mock function with given fields: -func (_m *Middleware) Ready() <-chan struct{} { - ret := _m.Called() - - var r0 <-chan struct{} - if rf, ok := ret.Get(0).(func() <-chan struct{}); ok { - r0 = rf() - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(<-chan struct{}) - } - } - - return r0 -} - -// SendDirect provides a mock function with given fields: msg -func (_m *Middleware) SendDirect(msg *network.OutgoingMessageScope) error { - ret := _m.Called(msg) - - var r0 error - if rf, ok := ret.Get(0).(func(*network.OutgoingMessageScope) error); ok { - r0 = rf(msg) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// SetOverlay provides a mock function with given fields: _a0 -func (_m *Middleware) SetOverlay(_a0 network.Overlay) { - _m.Called(_a0) -} - -// Start provides a mock function with given fields: _a0 -func (_m *Middleware) Start(_a0 irrecoverable.SignalerContext) { - _m.Called(_a0) -} - -// Subscribe provides a mock function with given fields: channel -func (_m *Middleware) Subscribe(channel channels.Channel) error { - ret := _m.Called(channel) - - var r0 error - if rf, ok := ret.Get(0).(func(channels.Channel) error); ok { - r0 = rf(channel) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// Unsubscribe provides a mock function with given fields: channel -func (_m *Middleware) Unsubscribe(channel channels.Channel) error { - ret := _m.Called(channel) - - var r0 error - if rf, ok := ret.Get(0).(func(channels.Channel) error); ok { - r0 = rf(channel) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// UpdateNodeAddresses provides a mock function with given fields: -func (_m *Middleware) UpdateNodeAddresses() { - _m.Called() -} - -type mockConstructorTestingTNewMiddleware interface { - mock.TestingT - Cleanup(func()) -} - -// NewMiddleware creates a new instance of Middleware. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewMiddleware(t mockConstructorTestingTNewMiddleware) *Middleware { - mock := &Middleware{} - mock.Mock.Test(t) - - t.Cleanup(func() { mock.AssertExpectations(t) }) - - return mock -} diff --git a/network/mocknetwork/misbehavior_report.go b/network/mocknetwork/misbehavior_report.go deleted file mode 100644 index 85527fd9ad3..00000000000 --- a/network/mocknetwork/misbehavior_report.go +++ /dev/null @@ -1,74 +0,0 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. - -package mocknetwork - -import ( - flow "github.com/onflow/flow-go/model/flow" - mock "github.com/stretchr/testify/mock" - - network "github.com/onflow/flow-go/network" -) - -// MisbehaviorReport is an autogenerated mock type for the MisbehaviorReport type -type MisbehaviorReport struct { - mock.Mock -} - -// OriginId provides a mock function with given fields: -func (_m *MisbehaviorReport) OriginId() flow.Identifier { - ret := _m.Called() - - var r0 flow.Identifier - if rf, ok := ret.Get(0).(func() flow.Identifier); ok { - r0 = rf() - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(flow.Identifier) - } - } - - return r0 -} - -// Penalty provides a mock function with given fields: -func (_m *MisbehaviorReport) Penalty() int { - ret := _m.Called() - - var r0 int - if rf, ok := ret.Get(0).(func() int); ok { - r0 = rf() - } else { - r0 = ret.Get(0).(int) - } - - return r0 -} - -// Reason provides a mock function with given fields: -func (_m *MisbehaviorReport) Reason() network.Misbehavior { - ret := _m.Called() - - var r0 network.Misbehavior - if rf, ok := ret.Get(0).(func() network.Misbehavior); ok { - r0 = rf() - } else { - r0 = ret.Get(0).(network.Misbehavior) - } - - return r0 -} - -type mockConstructorTestingTNewMisbehaviorReport interface { - mock.TestingT - Cleanup(func()) -} - -// NewMisbehaviorReport creates a new instance of MisbehaviorReport. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewMisbehaviorReport(t mockConstructorTestingTNewMisbehaviorReport) *MisbehaviorReport { - mock := &MisbehaviorReport{} - mock.Mock.Test(t) - - t.Cleanup(func() { mock.AssertExpectations(t) }) - - return mock -} diff --git a/network/mocknetwork/misbehavior_report_manager.go b/network/mocknetwork/misbehavior_report_manager.go deleted file mode 100644 index 74b4e66bcad..00000000000 --- a/network/mocknetwork/misbehavior_report_manager.go +++ /dev/null @@ -1,35 +0,0 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. - -package mocknetwork - -import ( - channels "github.com/onflow/flow-go/network/channels" - mock "github.com/stretchr/testify/mock" - - network "github.com/onflow/flow-go/network" -) - -// MisbehaviorReportManager is an autogenerated mock type for the MisbehaviorReportManager type -type MisbehaviorReportManager struct { - mock.Mock -} - -// HandleMisbehaviorReport provides a mock function with given fields: _a0, _a1 -func (_m *MisbehaviorReportManager) HandleMisbehaviorReport(_a0 channels.Channel, _a1 network.MisbehaviorReport) { - _m.Called(_a0, _a1) -} - -type mockConstructorTestingTNewMisbehaviorReportManager interface { - mock.TestingT - Cleanup(func()) -} - -// NewMisbehaviorReportManager creates a new instance of MisbehaviorReportManager. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewMisbehaviorReportManager(t mockConstructorTestingTNewMisbehaviorReportManager) *MisbehaviorReportManager { - mock := &MisbehaviorReportManager{} - mock.Mock.Test(t) - - t.Cleanup(func() { mock.AssertExpectations(t) }) - - return mock -} diff --git a/network/mocknetwork/mock_network.go b/network/mocknetwork/mock_network.go deleted file mode 100644 index 413122da44b..00000000000 --- a/network/mocknetwork/mock_network.go +++ /dev/null @@ -1,129 +0,0 @@ -// Code generated by MockGen. DO NOT EDIT. -// Source: github.com/onflow/flow-go/network (interfaces: Network) - -// Package mocknetwork is a generated GoMock package. -package mocknetwork - -import ( - reflect "reflect" - - gomock "github.com/golang/mock/gomock" - datastore "github.com/ipfs/go-datastore" - protocol "github.com/libp2p/go-libp2p/core/protocol" - irrecoverable "github.com/onflow/flow-go/module/irrecoverable" - network "github.com/onflow/flow-go/network" - channels "github.com/onflow/flow-go/network/channels" -) - -// MockNetwork is a mock of Network interface. -type MockNetwork struct { - ctrl *gomock.Controller - recorder *MockNetworkMockRecorder -} - -// MockNetworkMockRecorder is the mock recorder for MockNetwork. -type MockNetworkMockRecorder struct { - mock *MockNetwork -} - -// NewMockNetwork creates a new mock instance. -func NewMockNetwork(ctrl *gomock.Controller) *MockNetwork { - mock := &MockNetwork{ctrl: ctrl} - mock.recorder = &MockNetworkMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockNetwork) EXPECT() *MockNetworkMockRecorder { - return m.recorder -} - -// Done mocks base method. -func (m *MockNetwork) Done() <-chan struct{} { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Done") - ret0, _ := ret[0].(<-chan struct{}) - return ret0 -} - -// Done indicates an expected call of Done. -func (mr *MockNetworkMockRecorder) Done() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Done", reflect.TypeOf((*MockNetwork)(nil).Done)) -} - -// Ready mocks base method. -func (m *MockNetwork) Ready() <-chan struct{} { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Ready") - ret0, _ := ret[0].(<-chan struct{}) - return ret0 -} - -// Ready indicates an expected call of Ready. -func (mr *MockNetworkMockRecorder) Ready() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Ready", reflect.TypeOf((*MockNetwork)(nil).Ready)) -} - -// Register mocks base method. -func (m *MockNetwork) Register(arg0 channels.Channel, arg1 network.MessageProcessor) (network.Conduit, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Register", arg0, arg1) - ret0, _ := ret[0].(network.Conduit) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// Register indicates an expected call of Register. -func (mr *MockNetworkMockRecorder) Register(arg0, arg1 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Register", reflect.TypeOf((*MockNetwork)(nil).Register), arg0, arg1) -} - -// RegisterBlobService mocks base method. -func (m *MockNetwork) RegisterBlobService(arg0 channels.Channel, arg1 datastore.Batching, arg2 ...network.BlobServiceOption) (network.BlobService, error) { - m.ctrl.T.Helper() - varargs := []interface{}{arg0, arg1} - for _, a := range arg2 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "RegisterBlobService", varargs...) - ret0, _ := ret[0].(network.BlobService) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// RegisterBlobService indicates an expected call of RegisterBlobService. -func (mr *MockNetworkMockRecorder) RegisterBlobService(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0, arg1}, arg2...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RegisterBlobService", reflect.TypeOf((*MockNetwork)(nil).RegisterBlobService), varargs...) -} - -// RegisterPingService mocks base method. -func (m *MockNetwork) RegisterPingService(arg0 protocol.ID, arg1 network.PingInfoProvider) (network.PingService, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "RegisterPingService", arg0, arg1) - ret0, _ := ret[0].(network.PingService) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// RegisterPingService indicates an expected call of RegisterPingService. -func (mr *MockNetworkMockRecorder) RegisterPingService(arg0, arg1 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RegisterPingService", reflect.TypeOf((*MockNetwork)(nil).RegisterPingService), arg0, arg1) -} - -// Start mocks base method. -func (m *MockNetwork) Start(arg0 irrecoverable.SignalerContext) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "Start", arg0) -} - -// Start indicates an expected call of Start. -func (mr *MockNetworkMockRecorder) Start(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Start", reflect.TypeOf((*MockNetwork)(nil).Start), arg0) -} diff --git a/network/mocknetwork/network.go b/network/mocknetwork/network.go deleted file mode 100644 index 95891793892..00000000000 --- a/network/mocknetwork/network.go +++ /dev/null @@ -1,158 +0,0 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. - -package mocknetwork - -import ( - datastore "github.com/ipfs/go-datastore" - channels "github.com/onflow/flow-go/network/channels" - - irrecoverable "github.com/onflow/flow-go/module/irrecoverable" - - mock "github.com/stretchr/testify/mock" - - network "github.com/onflow/flow-go/network" - - protocol "github.com/libp2p/go-libp2p/core/protocol" -) - -// Network is an autogenerated mock type for the Network type -type Network struct { - mock.Mock -} - -// Done provides a mock function with given fields: -func (_m *Network) Done() <-chan struct{} { - ret := _m.Called() - - var r0 <-chan struct{} - if rf, ok := ret.Get(0).(func() <-chan struct{}); ok { - r0 = rf() - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(<-chan struct{}) - } - } - - return r0 -} - -// Ready provides a mock function with given fields: -func (_m *Network) Ready() <-chan struct{} { - ret := _m.Called() - - var r0 <-chan struct{} - if rf, ok := ret.Get(0).(func() <-chan struct{}); ok { - r0 = rf() - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(<-chan struct{}) - } - } - - return r0 -} - -// Register provides a mock function with given fields: channel, messageProcessor -func (_m *Network) Register(channel channels.Channel, messageProcessor network.MessageProcessor) (network.Conduit, error) { - ret := _m.Called(channel, messageProcessor) - - var r0 network.Conduit - var r1 error - if rf, ok := ret.Get(0).(func(channels.Channel, network.MessageProcessor) (network.Conduit, error)); ok { - return rf(channel, messageProcessor) - } - if rf, ok := ret.Get(0).(func(channels.Channel, network.MessageProcessor) network.Conduit); ok { - r0 = rf(channel, messageProcessor) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(network.Conduit) - } - } - - if rf, ok := ret.Get(1).(func(channels.Channel, network.MessageProcessor) error); ok { - r1 = rf(channel, messageProcessor) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// RegisterBlobService provides a mock function with given fields: channel, store, opts -func (_m *Network) RegisterBlobService(channel channels.Channel, store datastore.Batching, opts ...network.BlobServiceOption) (network.BlobService, error) { - _va := make([]interface{}, len(opts)) - for _i := range opts { - _va[_i] = opts[_i] - } - var _ca []interface{} - _ca = append(_ca, channel, store) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) - - var r0 network.BlobService - var r1 error - if rf, ok := ret.Get(0).(func(channels.Channel, datastore.Batching, ...network.BlobServiceOption) (network.BlobService, error)); ok { - return rf(channel, store, opts...) - } - if rf, ok := ret.Get(0).(func(channels.Channel, datastore.Batching, ...network.BlobServiceOption) network.BlobService); ok { - r0 = rf(channel, store, opts...) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(network.BlobService) - } - } - - if rf, ok := ret.Get(1).(func(channels.Channel, datastore.Batching, ...network.BlobServiceOption) error); ok { - r1 = rf(channel, store, opts...) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// RegisterPingService provides a mock function with given fields: pingProtocolID, pingInfoProvider -func (_m *Network) RegisterPingService(pingProtocolID protocol.ID, pingInfoProvider network.PingInfoProvider) (network.PingService, error) { - ret := _m.Called(pingProtocolID, pingInfoProvider) - - var r0 network.PingService - var r1 error - if rf, ok := ret.Get(0).(func(protocol.ID, network.PingInfoProvider) (network.PingService, error)); ok { - return rf(pingProtocolID, pingInfoProvider) - } - if rf, ok := ret.Get(0).(func(protocol.ID, network.PingInfoProvider) network.PingService); ok { - r0 = rf(pingProtocolID, pingInfoProvider) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(network.PingService) - } - } - - if rf, ok := ret.Get(1).(func(protocol.ID, network.PingInfoProvider) error); ok { - r1 = rf(pingProtocolID, pingInfoProvider) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// Start provides a mock function with given fields: _a0 -func (_m *Network) Start(_a0 irrecoverable.SignalerContext) { - _m.Called(_a0) -} - -type mockConstructorTestingTNewNetwork interface { - mock.TestingT - Cleanup(func()) -} - -// NewNetwork creates a new instance of Network. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewNetwork(t mockConstructorTestingTNewNetwork) *Network { - mock := &Network{} - mock.Mock.Test(t) - - t.Cleanup(func() { mock.AssertExpectations(t) }) - - return mock -} diff --git a/network/mocknetwork/overlay.go b/network/mocknetwork/overlay.go deleted file mode 100644 index e36869114c1..00000000000 --- a/network/mocknetwork/overlay.go +++ /dev/null @@ -1,104 +0,0 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. - -package mocknetwork - -import ( - flow "github.com/onflow/flow-go/model/flow" - mock "github.com/stretchr/testify/mock" - - network "github.com/onflow/flow-go/network" - - peer "github.com/libp2p/go-libp2p/core/peer" -) - -// Overlay is an autogenerated mock type for the Overlay type -type Overlay struct { - mock.Mock -} - -// Identities provides a mock function with given fields: -func (_m *Overlay) Identities() flow.IdentityList { - ret := _m.Called() - - var r0 flow.IdentityList - if rf, ok := ret.Get(0).(func() flow.IdentityList); ok { - r0 = rf() - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(flow.IdentityList) - } - } - - return r0 -} - -// Identity provides a mock function with given fields: _a0 -func (_m *Overlay) Identity(_a0 peer.ID) (*flow.Identity, bool) { - ret := _m.Called(_a0) - - var r0 *flow.Identity - var r1 bool - if rf, ok := ret.Get(0).(func(peer.ID) (*flow.Identity, bool)); ok { - return rf(_a0) - } - if rf, ok := ret.Get(0).(func(peer.ID) *flow.Identity); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*flow.Identity) - } - } - - if rf, ok := ret.Get(1).(func(peer.ID) bool); ok { - r1 = rf(_a0) - } else { - r1 = ret.Get(1).(bool) - } - - return r0, r1 -} - -// Receive provides a mock function with given fields: _a0 -func (_m *Overlay) Receive(_a0 *network.IncomingMessageScope) error { - ret := _m.Called(_a0) - - var r0 error - if rf, ok := ret.Get(0).(func(*network.IncomingMessageScope) error); ok { - r0 = rf(_a0) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// Topology provides a mock function with given fields: -func (_m *Overlay) Topology() flow.IdentityList { - ret := _m.Called() - - var r0 flow.IdentityList - if rf, ok := ret.Get(0).(func() flow.IdentityList); ok { - r0 = rf() - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(flow.IdentityList) - } - } - - return r0 -} - -type mockConstructorTestingTNewOverlay interface { - mock.TestingT - Cleanup(func()) -} - -// NewOverlay creates a new instance of Overlay. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewOverlay(t mockConstructorTestingTNewOverlay) *Overlay { - mock := &Overlay{} - mock.Mock.Test(t) - - t.Cleanup(func() { mock.AssertExpectations(t) }) - - return mock -} diff --git a/network/mocknetwork/ping_info_provider.go b/network/mocknetwork/ping_info_provider.go deleted file mode 100644 index 57479dc7b4c..00000000000 --- a/network/mocknetwork/ping_info_provider.go +++ /dev/null @@ -1,67 +0,0 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. - -package mocknetwork - -import mock "github.com/stretchr/testify/mock" - -// PingInfoProvider is an autogenerated mock type for the PingInfoProvider type -type PingInfoProvider struct { - mock.Mock -} - -// HotstuffView provides a mock function with given fields: -func (_m *PingInfoProvider) HotstuffView() uint64 { - ret := _m.Called() - - var r0 uint64 - if rf, ok := ret.Get(0).(func() uint64); ok { - r0 = rf() - } else { - r0 = ret.Get(0).(uint64) - } - - return r0 -} - -// SealedBlockHeight provides a mock function with given fields: -func (_m *PingInfoProvider) SealedBlockHeight() uint64 { - ret := _m.Called() - - var r0 uint64 - if rf, ok := ret.Get(0).(func() uint64); ok { - r0 = rf() - } else { - r0 = ret.Get(0).(uint64) - } - - return r0 -} - -// SoftwareVersion provides a mock function with given fields: -func (_m *PingInfoProvider) SoftwareVersion() string { - ret := _m.Called() - - var r0 string - if rf, ok := ret.Get(0).(func() string); ok { - r0 = rf() - } else { - r0 = ret.Get(0).(string) - } - - return r0 -} - -type mockConstructorTestingTNewPingInfoProvider interface { - mock.TestingT - Cleanup(func()) -} - -// NewPingInfoProvider creates a new instance of PingInfoProvider. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewPingInfoProvider(t mockConstructorTestingTNewPingInfoProvider) *PingInfoProvider { - mock := &PingInfoProvider{} - mock.Mock.Test(t) - - t.Cleanup(func() { mock.AssertExpectations(t) }) - - return mock -} diff --git a/network/mocknetwork/violations_consumer.go b/network/mocknetwork/violations_consumer.go deleted file mode 100644 index 9c6f252b095..00000000000 --- a/network/mocknetwork/violations_consumer.go +++ /dev/null @@ -1,58 +0,0 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. - -package mocknetwork - -import ( - slashing "github.com/onflow/flow-go/network/slashing" - mock "github.com/stretchr/testify/mock" -) - -// ViolationsConsumer is an autogenerated mock type for the ViolationsConsumer type -type ViolationsConsumer struct { - mock.Mock -} - -// OnInvalidMsgError provides a mock function with given fields: violation -func (_m *ViolationsConsumer) OnInvalidMsgError(violation *slashing.Violation) { - _m.Called(violation) -} - -// OnSenderEjectedError provides a mock function with given fields: violation -func (_m *ViolationsConsumer) OnSenderEjectedError(violation *slashing.Violation) { - _m.Called(violation) -} - -// OnUnAuthorizedSenderError provides a mock function with given fields: violation -func (_m *ViolationsConsumer) OnUnAuthorizedSenderError(violation *slashing.Violation) { - _m.Called(violation) -} - -// OnUnauthorizedUnicastOnChannel provides a mock function with given fields: violation -func (_m *ViolationsConsumer) OnUnauthorizedUnicastOnChannel(violation *slashing.Violation) { - _m.Called(violation) -} - -// OnUnexpectedError provides a mock function with given fields: violation -func (_m *ViolationsConsumer) OnUnexpectedError(violation *slashing.Violation) { - _m.Called(violation) -} - -// OnUnknownMsgTypeError provides a mock function with given fields: violation -func (_m *ViolationsConsumer) OnUnknownMsgTypeError(violation *slashing.Violation) { - _m.Called(violation) -} - -type mockConstructorTestingTNewViolationsConsumer interface { - mock.TestingT - Cleanup(func()) -} - -// NewViolationsConsumer creates a new instance of ViolationsConsumer. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewViolationsConsumer(t mockConstructorTestingTNewViolationsConsumer) *ViolationsConsumer { - mock := &ViolationsConsumer{} - mock.Mock.Test(t) - - t.Cleanup(func() { mock.AssertExpectations(t) }) - - return mock -} diff --git a/network/mocknetwork/write_close_flusher.go b/network/mocknetwork/write_close_flusher.go deleted file mode 100644 index 1fc8dbe8cf4..00000000000 --- a/network/mocknetwork/write_close_flusher.go +++ /dev/null @@ -1,77 +0,0 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. - -package mocknetwork - -import mock "github.com/stretchr/testify/mock" - -// WriteCloseFlusher is an autogenerated mock type for the WriteCloseFlusher type -type WriteCloseFlusher struct { - mock.Mock -} - -// Close provides a mock function with given fields: -func (_m *WriteCloseFlusher) Close() error { - ret := _m.Called() - - var r0 error - if rf, ok := ret.Get(0).(func() error); ok { - r0 = rf() - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// Flush provides a mock function with given fields: -func (_m *WriteCloseFlusher) Flush() error { - ret := _m.Called() - - var r0 error - if rf, ok := ret.Get(0).(func() error); ok { - r0 = rf() - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// Write provides a mock function with given fields: p -func (_m *WriteCloseFlusher) Write(p []byte) (int, error) { - ret := _m.Called(p) - - var r0 int - var r1 error - if rf, ok := ret.Get(0).(func([]byte) (int, error)); ok { - return rf(p) - } - if rf, ok := ret.Get(0).(func([]byte) int); ok { - r0 = rf(p) - } else { - r0 = ret.Get(0).(int) - } - - if rf, ok := ret.Get(1).(func([]byte) error); ok { - r1 = rf(p) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -type mockConstructorTestingTNewWriteCloseFlusher interface { - mock.TestingT - Cleanup(func()) -} - -// NewWriteCloseFlusher creates a new instance of WriteCloseFlusher. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewWriteCloseFlusher(t mockConstructorTestingTNewWriteCloseFlusher) *WriteCloseFlusher { - mock := &WriteCloseFlusher{} - mock.Mock.Test(t) - - t.Cleanup(func() { mock.AssertExpectations(t) }) - - return mock -} diff --git a/network/netconf/config.go b/network/netconf/config.go new file mode 100644 index 00000000000..a14933a0498 --- /dev/null +++ b/network/netconf/config.go @@ -0,0 +1,78 @@ +package netconf + +import ( + "time" + + p2pconfig "github.com/onflow/flow-go/network/p2p/config" +) + +const ( + gossipsubKey = "gossipsub" + unicastKey = "unicast" + connectionManagerKey = "connection-manager" +) + +// Config encapsulation of configuration structs for all components related to the Flow network. +type Config struct { + Unicast Unicast `mapstructure:"unicast"` + ResourceManager p2pconfig.ResourceManagerConfig `mapstructure:"libp2p-resource-manager"` + ConnectionManager ConnectionManager `mapstructure:"connection-manager"` + // GossipSub core gossipsub configuration. + GossipSub p2pconfig.GossipSubParameters `mapstructure:"gossipsub"` + AlspConfig `mapstructure:",squash"` + + // NetworkConnectionPruning determines whether connections to nodes + // that are not part of protocol state should be trimmed + // TODO: solely a fallback mechanism, can be removed upon reliable behavior in production. + NetworkConnectionPruning bool `mapstructure:"networking-connection-pruning"` + // PreferredUnicastProtocols list of unicast protocols in preferred order + PreferredUnicastProtocols []string `mapstructure:"preferred-unicast-protocols"` + NetworkReceivedMessageCacheSize uint32 `validate:"gt=0" mapstructure:"received-message-cache-size"` + PeerUpdateInterval time.Duration `validate:"gt=0s" mapstructure:"peerupdate-interval"` + + DNSCacheTTL time.Duration `validate:"gt=0s" mapstructure:"dns-cache-ttl"` + // DisallowListNotificationCacheSize size of the queue for notifications about new peers in the disallow list. + DisallowListNotificationCacheSize uint32 `validate:"gt=0" mapstructure:"disallow-list-notification-cache-size"` +} + +// AlspConfig is the config for the Application Layer Spam Prevention (ALSP) protocol. +type AlspConfig struct { + // Size of the cache for spam records. There is at most one spam record per authorized (i.e., staked) node. + // Recommended size is 10 * number of authorized nodes to allow for churn. + SpamRecordCacheSize uint32 `mapstructure:"alsp-spam-record-cache-size"` + + // SpamReportQueueSize is the size of the queue for spam records. The queue is used to store spam records + // temporarily till they are picked by the workers. When the queue is full, new spam records are dropped. + // Recommended size is 100 * number of authorized nodes to allow for churn. + SpamReportQueueSize uint32 `mapstructure:"alsp-spam-report-queue-size"` + + // DisablePenalty indicates whether applying the penalty to the misbehaving node is disabled. + // When disabled, the ALSP module logs the misbehavior reports and updates the metrics, but does not apply the penalty. + // This is useful for managing production incidents. + // Note: under normal circumstances, the ALSP module should not be disabled. + DisablePenalty bool `mapstructure:"alsp-disable-penalty"` + + // HeartBeatInterval is the interval between heartbeats sent by the ALSP module. The heartbeats are recurring + // events that are used to perform critical ALSP tasks, such as updating the spam records cache. + HearBeatInterval time.Duration `mapstructure:"alsp-heart-beat-interval"` + + SyncEngine SyncEngineAlspConfig `mapstructure:",squash"` +} + +// SyncEngineAlspConfig is the ALSP config for the SyncEngine. +type SyncEngineAlspConfig struct { + // BatchRequestBaseProb is the base probability in [0,1] that's used in creating the final probability of creating a + // misbehavior report for a BatchRequest message. This is why the word "base" is used in the name of this field, + // since it's not the final probability and there are other factors that determine the final probability. + // The reason for this is that we want to increase the probability of creating a misbehavior report for a large batch. + BatchRequestBaseProb float32 `validate:"gte=0,lte=1" mapstructure:"alsp-sync-engine-batch-request-base-prob"` + + // RangeRequestBaseProb is the base probability in [0,1] that's used in creating the final probability of creating a + // misbehavior report for a RangeRequest message. This is why the word "base" is used in the name of this field, + // since it's not the final probability and there are other factors that determine the final probability. + // The reason for this is that we want to increase the probability of creating a misbehavior report for a large range. + RangeRequestBaseProb float32 `validate:"gte=0,lte=1" mapstructure:"alsp-sync-engine-range-request-base-prob"` + + // SyncRequestProb is the probability in [0,1] of creating a misbehavior report for a SyncRequest message. + SyncRequestProb float32 `validate:"gte=0,lte=1" mapstructure:"alsp-sync-engine-sync-request-prob"` +} diff --git a/network/netconf/config_test.go b/network/netconf/config_test.go new file mode 100644 index 00000000000..d6a062cdb2f --- /dev/null +++ b/network/netconf/config_test.go @@ -0,0 +1,75 @@ +package netconf_test + +import ( + "fmt" + "strings" + "testing" + + "github.com/spf13/viper" + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/config" + "github.com/onflow/flow-go/network/netconf" +) + +// TestSetAliases ensures every network configuration key prefixed with "network" has an alias without the "network" prefix. +func TestSetAliases(t *testing.T) { + c := viper.New() + for _, key := range netconf.AllFlagNames() { + c.Set(fmt.Sprintf("network.%s", key), "not aliased") + c.Set(key, "aliased") + } + + // ensure network prefixed keys do not point to non-prefixed alias + for _, key := range c.AllKeys() { + parts := strings.Split(key, ".") + if len(parts) != 2 { + continue + } + require.NotEqual(t, c.GetString(parts[1]), c.GetString(key)) + } + + err := netconf.SetAliases(c) + require.NoError(t, err) + + // ensure each network prefixed key now points to the non-prefixed alias + for _, key := range c.AllKeys() { + parts := strings.Split(key, ".") + if len(parts) != 2 { + continue + } + require.Equal(t, c.GetString(parts[1]), c.GetString(key)) + } +} + +// TestCrossReferenceFlagsWithConfigs ensures that each flag is cross-referenced with the config file, i.e., that each +// flag has a corresponding config key. +func TestCrossReferenceFlagsWithConfigs(t *testing.T) { + // reads the default config file + c := config.RawViperConfig() + err := netconf.SetAliases(c) + require.NoError(t, err) +} + +// TestCrossReferenceConfigsWithFlags ensures that each config is cross-referenced with the flags, i.e., that each config +// key has a corresponding flag. +func TestCrossReferenceConfigsWithFlags(t *testing.T) { + c := config.RawViperConfig() + // keeps all flag names + m := make(map[string]struct{}) + + // each flag name should correspond to exactly one key in our config store after it is loaded with the default config + for _, flagName := range netconf.AllFlagNames() { + m[flagName] = struct{}{} + } + + for _, key := range c.AllKeys() { + s := strings.Split(key, ".") + flag := strings.Join(s[1:], "-") + if len(flag) == 0 { + continue + } + _, ok := m[flag] + require.Truef(t, ok, "config key %s does not have a corresponding flag", flag) + } +} diff --git a/network/netconf/connection_manager.go b/network/netconf/connection_manager.go new file mode 100644 index 00000000000..2533e5c38fc --- /dev/null +++ b/network/netconf/connection_manager.go @@ -0,0 +1,33 @@ +package netconf + +import "time" + +const ( + highWatermarkKey = "high-watermark" + lowWatermarkKey = "low-watermark" + silencePeriodKey = "silence-period" + gracePeriodKey = "grace-period" +) + +type ConnectionManager struct { + // HighWatermark and LowWatermark govern the number of connections are maintained by the ConnManager. + // When the peer count exceeds the HighWatermark, as many peers will be pruned (and + // their connections terminated) until LowWatermark peers remain. In other words, whenever the + // peer count is x > HighWatermark, the ConnManager will prune x - LowWatermark peers. + // The pruning algorithm is as follows: + // 1. The ConnManager will not prune any peers that have been connected for less than GracePeriod. + // 2. The ConnManager will not prune any peers that are protected. + // 3. The ConnManager will sort the peers based on their number of streams and direction of connections, and + // prunes the peers with the least number of streams. If there are ties, the peer with the incoming connection + // will be pruned. If both peers have incoming connections, and there are still ties, one of the peers will be + // pruned at random. + // Algorithm implementation is in https://github.com/libp2p/go-libp2p/blob/master/p2p/net/connmgr/connmgr.go#L262-L318 + HighWatermark int `mapstructure:"high-watermark"` // naming from libp2p + LowWatermark int `mapstructure:"low-watermark"` // naming from libp2p + + // SilencePeriod is a regular interval that the connection manager will check for pruning peers if the number of peers exceeds the high-watermark. + // It is a regular interval. + SilencePeriod time.Duration `mapstructure:"silence-period"` // naming from libp2p + // GracePeriod is the time to wait before a new connection is considered for pruning. + GracePeriod time.Duration `mapstructure:"grace-period"` // naming from libp2p +} diff --git a/network/netconf/flags.go b/network/netconf/flags.go new file mode 100644 index 00000000000..db99cfcdaa6 --- /dev/null +++ b/network/netconf/flags.go @@ -0,0 +1,697 @@ +package netconf + +import ( + "fmt" + "strings" + + "github.com/spf13/pflag" + "github.com/spf13/viper" + + p2pconfig "github.com/onflow/flow-go/network/p2p/config" +) + +const ( + // All constant strings are used for CLI flag names and corresponding keys for config values. + // network configuration + networkingConnectionPruning = "networking-connection-pruning" + preferredUnicastsProtocols = "preferred-unicast-protocols" + receivedMessageCacheSize = "received-message-cache-size" + peerUpdateInterval = "peerupdate-interval" + dnsCacheTTL = "dns-cache-ttl" + disallowListNotificationCacheSize = "disallow-list-notification-cache-size" + // resource manager config + rootResourceManagerPrefix = "libp2p-resource-manager" + memoryLimitRatioPrefix = "memory-limit-ratio" + fileDescriptorsRatioPrefix = "file-descriptors-ratio" + limitsOverridePrefix = "limits-override" + systemScope = "system" + transientScope = "transient" + protocolScope = "protocol" + peerScope = "peer" + peerProtocolScope = "peer-protocol" + inboundStreamLimit = "streams-inbound" + outboundStreamLimit = "streams-outbound" + inboundConnectionLimit = "connections-inbound" + outboundConnectionLimit = "connections-outbound" + fileDescriptorsLimit = "fd" + memoryLimitBytes = "memory-bytes" + + alspDisabled = "alsp-disable-penalty" + alspSpamRecordCacheSize = "alsp-spam-record-cache-size" + alspSpamRecordQueueSize = "alsp-spam-report-queue-size" + alspHearBeatInterval = "alsp-heart-beat-interval" + alspSyncEngineBatchRequestBaseProb = "alsp-sync-engine-batch-request-base-prob" + alspSyncEngineRangeRequestBaseProb = "alsp-sync-engine-range-request-base-prob" + alspSyncEngineSyncRequestProb = "alsp-sync-engine-sync-request-prob" +) + +func AllFlagNames() []string { + allFlags := []string{ + networkingConnectionPruning, + preferredUnicastsProtocols, + receivedMessageCacheSize, + peerUpdateInterval, + BuildFlagName(unicastKey, MessageTimeoutKey), + BuildFlagName(unicastKey, unicastManagerKey, createStreamBackoffDelayKey), + BuildFlagName(unicastKey, unicastManagerKey, streamZeroRetryResetThresholdKey), + BuildFlagName(unicastKey, unicastManagerKey, maxStreamCreationRetryAttemptTimesKey), + BuildFlagName(unicastKey, unicastManagerKey, configCacheSizeKey), + dnsCacheTTL, + disallowListNotificationCacheSize, + BuildFlagName(unicastKey, rateLimiterKey, messageRateLimitKey), + BuildFlagName(unicastKey, rateLimiterKey, BandwidthRateLimitKey), + BuildFlagName(unicastKey, rateLimiterKey, BandwidthBurstLimitKey), + BuildFlagName(unicastKey, rateLimiterKey, LockoutDurationKey), + BuildFlagName(unicastKey, rateLimiterKey, DryRunKey), + BuildFlagName(unicastKey, enableStreamProtectionKey), + BuildFlagName(rootResourceManagerPrefix, memoryLimitRatioPrefix), + BuildFlagName(rootResourceManagerPrefix, fileDescriptorsRatioPrefix), + BuildFlagName(connectionManagerKey, highWatermarkKey), + BuildFlagName(connectionManagerKey, lowWatermarkKey), + BuildFlagName(connectionManagerKey, silencePeriodKey), + BuildFlagName(connectionManagerKey, gracePeriodKey), + alspDisabled, + alspSpamRecordCacheSize, + alspSpamRecordQueueSize, + alspHearBeatInterval, + alspSyncEngineBatchRequestBaseProb, + alspSyncEngineRangeRequestBaseProb, + alspSyncEngineSyncRequestProb, + + BuildFlagName(gossipsubKey, p2pconfig.PeerScoringEnabledKey), + BuildFlagName(gossipsubKey, p2pconfig.RpcTracerKey, p2pconfig.LocalMeshLogIntervalKey), + BuildFlagName(gossipsubKey, p2pconfig.RpcTracerKey, p2pconfig.ScoreTracerIntervalKey), + BuildFlagName(gossipsubKey, p2pconfig.RpcTracerKey, p2pconfig.RPCSentTrackerCacheSizeKey), + BuildFlagName(gossipsubKey, p2pconfig.RpcTracerKey, p2pconfig.RPCSentTrackerQueueCacheSizeKey), + BuildFlagName(gossipsubKey, p2pconfig.RpcTracerKey, p2pconfig.RPCSentTrackerNumOfWorkersKey), + + BuildFlagName(gossipsubKey, p2pconfig.RpcTracerKey, p2pconfig.DuplicateMessageCacheTrackerKey, p2pconfig.DuplicateMessageCacheTrackerSizeKey), + BuildFlagName(gossipsubKey, p2pconfig.RpcTracerKey, p2pconfig.DuplicateMessageCacheTrackerKey, p2pconfig.DuplicateMessageCacheTrackerDecayKey), + BuildFlagName(gossipsubKey, p2pconfig.RpcTracerKey, p2pconfig.DuplicateMessageCacheTrackerKey, p2pconfig.SkipDecayThresholdKey), + + BuildFlagName(gossipsubKey, p2pconfig.RpcInspectorKey, p2pconfig.ValidationConfigKey, p2pconfig.InspectionQueueConfigKey, p2pconfig.NumberOfWorkersKey), + BuildFlagName(gossipsubKey, p2pconfig.RpcInspectorKey, p2pconfig.ValidationConfigKey, p2pconfig.InspectionQueueConfigKey, p2pconfig.QueueSizeKey), + + BuildFlagName(gossipsubKey, p2pconfig.RpcInspectorKey, p2pconfig.ValidationConfigKey, p2pconfig.ClusterPrefixedMessageConfigKey, p2pconfig.TrackerCacheSizeKey), + BuildFlagName(gossipsubKey, p2pconfig.RpcInspectorKey, p2pconfig.ValidationConfigKey, p2pconfig.ClusterPrefixedMessageConfigKey, p2pconfig.TrackerCacheDecayKey), + BuildFlagName(gossipsubKey, p2pconfig.RpcInspectorKey, p2pconfig.ValidationConfigKey, p2pconfig.ClusterPrefixedMessageConfigKey, p2pconfig.HardThresholdKey), + BuildFlagName(gossipsubKey, p2pconfig.RpcInspectorKey, p2pconfig.NotificationCacheSizeKey), + + BuildFlagName(gossipsubKey, p2pconfig.RpcInspectorKey, p2pconfig.ValidationConfigKey, p2pconfig.ProcessKey, p2pconfig.InspectionKey, p2pconfig.DisabledKey), + BuildFlagName(gossipsubKey, p2pconfig.RpcInspectorKey, p2pconfig.ValidationConfigKey, p2pconfig.ProcessKey, p2pconfig.InspectionKey, p2pconfig.EnableKey, p2pconfig.GraftKey), + BuildFlagName(gossipsubKey, p2pconfig.RpcInspectorKey, p2pconfig.ValidationConfigKey, p2pconfig.ProcessKey, p2pconfig.InspectionKey, p2pconfig.EnableKey, p2pconfig.PruneKey), + BuildFlagName(gossipsubKey, p2pconfig.RpcInspectorKey, p2pconfig.ValidationConfigKey, p2pconfig.ProcessKey, p2pconfig.InspectionKey, p2pconfig.EnableKey, p2pconfig.IHaveKey), + BuildFlagName(gossipsubKey, p2pconfig.RpcInspectorKey, p2pconfig.ValidationConfigKey, p2pconfig.ProcessKey, p2pconfig.InspectionKey, p2pconfig.EnableKey, p2pconfig.IWantKey), + BuildFlagName(gossipsubKey, p2pconfig.RpcInspectorKey, p2pconfig.ValidationConfigKey, p2pconfig.ProcessKey, p2pconfig.InspectionKey, p2pconfig.EnableKey, p2pconfig.PublishKey), + BuildFlagName(gossipsubKey, p2pconfig.RpcInspectorKey, p2pconfig.ValidationConfigKey, p2pconfig.ProcessKey, p2pconfig.InspectionKey, p2pconfig.RejectUnstakedPeers), + + BuildFlagName(gossipsubKey, p2pconfig.RpcInspectorKey, p2pconfig.ValidationConfigKey, p2pconfig.ProcessKey, p2pconfig.TruncationKey, p2pconfig.DisabledKey), + BuildFlagName(gossipsubKey, p2pconfig.RpcInspectorKey, p2pconfig.ValidationConfigKey, p2pconfig.ProcessKey, p2pconfig.TruncationKey, p2pconfig.EnableKey, p2pconfig.GraftKey), + BuildFlagName(gossipsubKey, p2pconfig.RpcInspectorKey, p2pconfig.ValidationConfigKey, p2pconfig.ProcessKey, p2pconfig.TruncationKey, p2pconfig.EnableKey, p2pconfig.PruneKey), + BuildFlagName(gossipsubKey, p2pconfig.RpcInspectorKey, p2pconfig.ValidationConfigKey, p2pconfig.ProcessKey, p2pconfig.TruncationKey, p2pconfig.EnableKey, p2pconfig.IHaveKey), + BuildFlagName(gossipsubKey, + p2pconfig.RpcInspectorKey, + p2pconfig.ValidationConfigKey, + p2pconfig.ProcessKey, + p2pconfig.TruncationKey, + p2pconfig.EnableKey, + p2pconfig.IHaveKey, + p2pconfig.MessageIDKey), + BuildFlagName(gossipsubKey, p2pconfig.RpcInspectorKey, p2pconfig.ValidationConfigKey, p2pconfig.ProcessKey, p2pconfig.TruncationKey, p2pconfig.EnableKey, p2pconfig.IWantConfigKey), + BuildFlagName(gossipsubKey, + p2pconfig.RpcInspectorKey, + p2pconfig.ValidationConfigKey, + p2pconfig.ProcessKey, + p2pconfig.TruncationKey, + p2pconfig.EnableKey, + p2pconfig.IWantKey, + p2pconfig.MessageIDKey), + + BuildFlagName(gossipsubKey, p2pconfig.RpcInspectorKey, p2pconfig.ValidationConfigKey, p2pconfig.IHaveConfigKey, p2pconfig.MessageCountThreshold), + BuildFlagName(gossipsubKey, p2pconfig.RpcInspectorKey, p2pconfig.ValidationConfigKey, p2pconfig.IHaveConfigKey, p2pconfig.MessageIdCountThreshold), + BuildFlagName(gossipsubKey, p2pconfig.RpcInspectorKey, p2pconfig.ValidationConfigKey, p2pconfig.IHaveConfigKey, p2pconfig.DuplicateTopicIdThresholdKey), + BuildFlagName(gossipsubKey, p2pconfig.RpcInspectorKey, p2pconfig.ValidationConfigKey, p2pconfig.IHaveConfigKey, p2pconfig.DuplicateMessageIdThresholdKey), + BuildFlagName(gossipsubKey, p2pconfig.RpcInspectorKey, p2pconfig.ValidationConfigKey, p2pconfig.IHaveConfigKey, p2pconfig.InvalidTopicIdThresholdKey), + BuildFlagName(gossipsubKey, p2pconfig.RpcInspectorKey, p2pconfig.ValidationConfigKey, p2pconfig.GraftPruneKey, p2pconfig.DuplicateTopicIdThresholdKey), + BuildFlagName(gossipsubKey, p2pconfig.RpcInspectorKey, p2pconfig.ValidationConfigKey, p2pconfig.GraftPruneKey, p2pconfig.InvalidTopicIdThresholdKey), + BuildFlagName(gossipsubKey, p2pconfig.RpcInspectorKey, p2pconfig.ValidationConfigKey, p2pconfig.GraftPruneKey, p2pconfig.MessageCountThreshold), + BuildFlagName(gossipsubKey, p2pconfig.RpcInspectorKey, p2pconfig.ValidationConfigKey, p2pconfig.IWantConfigKey, p2pconfig.MessageCountThreshold), + BuildFlagName(gossipsubKey, p2pconfig.RpcInspectorKey, p2pconfig.ValidationConfigKey, p2pconfig.IWantConfigKey, p2pconfig.MessageIdCountThreshold), + BuildFlagName(gossipsubKey, p2pconfig.RpcInspectorKey, p2pconfig.ValidationConfigKey, p2pconfig.IWantConfigKey, p2pconfig.CacheMissThresholdKey), + BuildFlagName(gossipsubKey, p2pconfig.RpcInspectorKey, p2pconfig.ValidationConfigKey, p2pconfig.IWantConfigKey, p2pconfig.DuplicateMsgIDThresholdKey), + BuildFlagName(gossipsubKey, p2pconfig.RpcInspectorKey, p2pconfig.ValidationConfigKey, p2pconfig.PublishMessagesConfigKey, p2pconfig.MaxSampleSizeKey), + BuildFlagName(gossipsubKey, p2pconfig.RpcInspectorKey, p2pconfig.ValidationConfigKey, p2pconfig.PublishMessagesConfigKey, p2pconfig.MessageErrorThresholdKey), + BuildFlagName(gossipsubKey, p2pconfig.SubscriptionProviderKey, p2pconfig.UpdateIntervalKey), + BuildFlagName(gossipsubKey, p2pconfig.SubscriptionProviderKey, p2pconfig.CacheSizeKey), + + BuildFlagName(gossipsubKey, p2pconfig.ScoreParamsKey, p2pconfig.PeerScoringKey, p2pconfig.InternalKey, p2pconfig.AppSpecificScoreWeightKey), + BuildFlagName(gossipsubKey, p2pconfig.ScoreParamsKey, p2pconfig.PeerScoringKey, p2pconfig.InternalKey, p2pconfig.DecayIntervalKey), + BuildFlagName(gossipsubKey, p2pconfig.ScoreParamsKey, p2pconfig.PeerScoringKey, p2pconfig.InternalKey, p2pconfig.DecayToZeroKey), + + BuildFlagName(gossipsubKey, p2pconfig.ScoreParamsKey, p2pconfig.PeerScoringKey, p2pconfig.InternalKey, p2pconfig.TopicKey, p2pconfig.SkipAtomicValidationKey), + BuildFlagName(gossipsubKey, p2pconfig.ScoreParamsKey, p2pconfig.PeerScoringKey, p2pconfig.InternalKey, p2pconfig.TopicKey, p2pconfig.InvalidMessageDeliveriesWeightKey), + BuildFlagName(gossipsubKey, p2pconfig.ScoreParamsKey, p2pconfig.PeerScoringKey, p2pconfig.InternalKey, p2pconfig.TopicKey, p2pconfig.InvalidMessageDeliveriesDecayKey), + BuildFlagName(gossipsubKey, p2pconfig.ScoreParamsKey, p2pconfig.PeerScoringKey, p2pconfig.InternalKey, p2pconfig.TopicKey, p2pconfig.TimeInMeshQuantumKey), + BuildFlagName(gossipsubKey, p2pconfig.ScoreParamsKey, p2pconfig.PeerScoringKey, p2pconfig.InternalKey, p2pconfig.TopicKey, p2pconfig.TopicWeightKey), + BuildFlagName(gossipsubKey, p2pconfig.ScoreParamsKey, p2pconfig.PeerScoringKey, p2pconfig.InternalKey, p2pconfig.TopicKey, p2pconfig.MeshMessageDeliveriesDecayKey), + BuildFlagName(gossipsubKey, p2pconfig.ScoreParamsKey, p2pconfig.PeerScoringKey, p2pconfig.InternalKey, p2pconfig.TopicKey, p2pconfig.MeshMessageDeliveriesCapKey), + BuildFlagName(gossipsubKey, p2pconfig.ScoreParamsKey, p2pconfig.PeerScoringKey, p2pconfig.InternalKey, p2pconfig.TopicKey, p2pconfig.MeshMessageDeliveryThresholdKey), + BuildFlagName(gossipsubKey, p2pconfig.ScoreParamsKey, p2pconfig.PeerScoringKey, p2pconfig.InternalKey, p2pconfig.TopicKey, p2pconfig.MeshDeliveriesWeightKey), + BuildFlagName(gossipsubKey, p2pconfig.ScoreParamsKey, p2pconfig.PeerScoringKey, p2pconfig.InternalKey, p2pconfig.TopicKey, p2pconfig.MeshMessageDeliveriesWindowKey), + BuildFlagName(gossipsubKey, p2pconfig.ScoreParamsKey, p2pconfig.PeerScoringKey, p2pconfig.InternalKey, p2pconfig.TopicKey, p2pconfig.MeshMessageDeliveryActivationKey), + + BuildFlagName(gossipsubKey, p2pconfig.ScoreParamsKey, p2pconfig.PeerScoringKey, p2pconfig.InternalKey, p2pconfig.ThresholdsKey, p2pconfig.GossipThresholdKey), + BuildFlagName(gossipsubKey, p2pconfig.ScoreParamsKey, p2pconfig.PeerScoringKey, p2pconfig.InternalKey, p2pconfig.ThresholdsKey, p2pconfig.PublishKey), + BuildFlagName(gossipsubKey, p2pconfig.ScoreParamsKey, p2pconfig.PeerScoringKey, p2pconfig.InternalKey, p2pconfig.ThresholdsKey, p2pconfig.GraylistThresholdKey), + BuildFlagName(gossipsubKey, p2pconfig.ScoreParamsKey, p2pconfig.PeerScoringKey, p2pconfig.InternalKey, p2pconfig.ThresholdsKey, p2pconfig.AcceptPXThresholdKey), + BuildFlagName(gossipsubKey, p2pconfig.ScoreParamsKey, p2pconfig.PeerScoringKey, p2pconfig.InternalKey, p2pconfig.ThresholdsKey, p2pconfig.OpportunisticGraftThresholdKey), + + BuildFlagName(gossipsubKey, p2pconfig.ScoreParamsKey, p2pconfig.PeerScoringKey, p2pconfig.InternalKey, p2pconfig.BehaviourKey, p2pconfig.BehaviourPenaltyThresholdKey), + BuildFlagName(gossipsubKey, p2pconfig.ScoreParamsKey, p2pconfig.PeerScoringKey, p2pconfig.InternalKey, p2pconfig.BehaviourKey, p2pconfig.BehaviourPenaltyWeightKey), + BuildFlagName(gossipsubKey, p2pconfig.ScoreParamsKey, p2pconfig.PeerScoringKey, p2pconfig.InternalKey, p2pconfig.BehaviourKey, p2pconfig.BehaviourPenaltyDecayKey), + + BuildFlagName(gossipsubKey, p2pconfig.ScoreParamsKey, p2pconfig.PeerScoringKey, p2pconfig.ProtocolKey, p2pconfig.MaxDebugLogsKey), + BuildFlagName(gossipsubKey, p2pconfig.ScoreParamsKey, p2pconfig.PeerScoringKey, p2pconfig.ProtocolKey, p2pconfig.AppSpecificKey, p2pconfig.MaxAppSpecificKey, p2pconfig.PenaltyKey), + BuildFlagName(gossipsubKey, p2pconfig.ScoreParamsKey, p2pconfig.PeerScoringKey, p2pconfig.ProtocolKey, p2pconfig.AppSpecificKey, p2pconfig.MinAppSpecificKey, p2pconfig.PenaltyKey), + BuildFlagName(gossipsubKey, p2pconfig.ScoreParamsKey, p2pconfig.PeerScoringKey, p2pconfig.ProtocolKey, p2pconfig.AppSpecificKey, p2pconfig.UnknownIdentityKey, p2pconfig.PenaltyKey), + BuildFlagName(gossipsubKey, p2pconfig.ScoreParamsKey, p2pconfig.PeerScoringKey, p2pconfig.ProtocolKey, p2pconfig.AppSpecificKey, p2pconfig.InvalidSubscriptionKey, p2pconfig.PenaltyKey), + BuildFlagName(gossipsubKey, p2pconfig.ScoreParamsKey, p2pconfig.PeerScoringKey, p2pconfig.ProtocolKey, p2pconfig.AppSpecificKey, p2pconfig.DuplicateMessageKey, p2pconfig.PenaltyKey), + BuildFlagName(gossipsubKey, p2pconfig.ScoreParamsKey, p2pconfig.PeerScoringKey, p2pconfig.ProtocolKey, p2pconfig.AppSpecificKey, p2pconfig.DuplicateMessageKey, p2pconfig.ThresholdKey), + BuildFlagName(gossipsubKey, p2pconfig.ScoreParamsKey, p2pconfig.PeerScoringKey, p2pconfig.ProtocolKey, p2pconfig.AppSpecificKey, p2pconfig.MaxAppSpecificKey, p2pconfig.RewardKey), + BuildFlagName(gossipsubKey, p2pconfig.ScoreParamsKey, p2pconfig.PeerScoringKey, p2pconfig.ProtocolKey, p2pconfig.AppSpecificKey, p2pconfig.StakedIdentityKey, p2pconfig.RewardKey), + + BuildFlagName(gossipsubKey, p2pconfig.ScoreParamsKey, p2pconfig.ScoringRegistryKey, p2pconfig.StartupSilenceDurationKey), + BuildFlagName(gossipsubKey, p2pconfig.ScoreParamsKey, p2pconfig.ScoringRegistryKey, p2pconfig.AppSpecificScoreRegistryKey, p2pconfig.ScoreUpdateWorkerNumKey), + BuildFlagName(gossipsubKey, p2pconfig.ScoreParamsKey, p2pconfig.ScoringRegistryKey, p2pconfig.AppSpecificScoreRegistryKey, p2pconfig.ScoreUpdateRequestQueueSizeKey), + BuildFlagName(gossipsubKey, p2pconfig.ScoreParamsKey, p2pconfig.ScoringRegistryKey, p2pconfig.AppSpecificScoreRegistryKey, p2pconfig.InvalidControlMessageNotificationQueueSizeKey), + BuildFlagName(gossipsubKey, p2pconfig.ScoreParamsKey, p2pconfig.ScoringRegistryKey, p2pconfig.AppSpecificScoreRegistryKey, p2pconfig.ScoreTTLKey), + + BuildFlagName(gossipsubKey, p2pconfig.ScoreParamsKey, p2pconfig.ScoringRegistryKey, p2pconfig.SpamRecordCacheKey, p2pconfig.CacheSizeKey), + BuildFlagName(gossipsubKey, p2pconfig.ScoreParamsKey, p2pconfig.ScoringRegistryKey, p2pconfig.SpamRecordCacheKey, p2pconfig.DecayKey, p2pconfig.PenaltyDecaySlowdownThresholdKey), + BuildFlagName(gossipsubKey, p2pconfig.ScoreParamsKey, p2pconfig.ScoringRegistryKey, p2pconfig.SpamRecordCacheKey, p2pconfig.DecayKey, p2pconfig.DecayRateReductionFactorKey), + BuildFlagName(gossipsubKey, p2pconfig.ScoreParamsKey, p2pconfig.ScoringRegistryKey, p2pconfig.SpamRecordCacheKey, p2pconfig.DecayKey, p2pconfig.PenaltyDecayEvaluationPeriodKey), + BuildFlagName(gossipsubKey, p2pconfig.ScoreParamsKey, p2pconfig.ScoringRegistryKey, p2pconfig.SpamRecordCacheKey, p2pconfig.DecayKey, p2pconfig.MinimumSpamPenaltyDecayFactorKey), + BuildFlagName(gossipsubKey, p2pconfig.ScoreParamsKey, p2pconfig.ScoringRegistryKey, p2pconfig.SpamRecordCacheKey, p2pconfig.DecayKey, p2pconfig.MaximumSpamPenaltyDecayFactorKey), + BuildFlagName(gossipsubKey, p2pconfig.ScoreParamsKey, p2pconfig.ScoringRegistryKey, p2pconfig.SpamRecordCacheKey, p2pconfig.DecayKey, p2pconfig.SkipDecayThresholdKey), + + BuildFlagName(gossipsubKey, p2pconfig.ScoreParamsKey, p2pconfig.ScoringRegistryKey, p2pconfig.MisbehaviourPenaltiesKey, p2pconfig.GraftKey), + BuildFlagName(gossipsubKey, p2pconfig.ScoreParamsKey, p2pconfig.ScoringRegistryKey, p2pconfig.MisbehaviourPenaltiesKey, p2pconfig.PruneKey), + BuildFlagName(gossipsubKey, p2pconfig.ScoreParamsKey, p2pconfig.ScoringRegistryKey, p2pconfig.MisbehaviourPenaltiesKey, p2pconfig.IHaveKey), + BuildFlagName(gossipsubKey, p2pconfig.ScoreParamsKey, p2pconfig.ScoringRegistryKey, p2pconfig.MisbehaviourPenaltiesKey, p2pconfig.IWantKey), + BuildFlagName(gossipsubKey, p2pconfig.ScoreParamsKey, p2pconfig.ScoringRegistryKey, p2pconfig.MisbehaviourPenaltiesKey, p2pconfig.PublishKey), + BuildFlagName(gossipsubKey, p2pconfig.ScoreParamsKey, p2pconfig.ScoringRegistryKey, p2pconfig.MisbehaviourPenaltiesKey, p2pconfig.ClusterPrefixedReductionFactorKey), + + BuildFlagName(gossipsubKey, p2pconfig.PeerGaterKey, p2pconfig.EnabledKey), + BuildFlagName(gossipsubKey, p2pconfig.PeerGaterKey, p2pconfig.SourceDecayKey), + BuildFlagName(gossipsubKey, p2pconfig.PeerGaterKey, p2pconfig.TopicDeliveryWeightsKey), + } + + for _, scope := range []string{systemScope, transientScope, protocolScope, peerScope, peerProtocolScope} { + for _, resource := range []string{inboundStreamLimit, + outboundStreamLimit, + inboundConnectionLimit, + outboundConnectionLimit, + fileDescriptorsLimit, + memoryLimitBytes} { + allFlags = append(allFlags, fmt.Sprintf("%s-%s-%s-%s", rootResourceManagerPrefix, limitsOverridePrefix, scope, resource)) + } + } + + return allFlags +} + +// InitializeNetworkFlags initializes all CLI flags for the Flow network configuration on the provided pflag set. +// Args: +// +// *pflag.FlagSet: the pflag set of the Flow node. +// *Config: the default network config used to set default values on the flags +func InitializeNetworkFlags(flags *pflag.FlagSet, config *Config) { + flags.Bool(networkingConnectionPruning, config.NetworkConnectionPruning, "enabling connection trimming") + flags.Duration(dnsCacheTTL, config.DNSCacheTTL, "time-to-live for dns cache") + flags.StringSlice( + preferredUnicastsProtocols, config.PreferredUnicastProtocols, "preferred unicast protocols in ascending order of preference") + flags.Uint32(receivedMessageCacheSize, config.NetworkReceivedMessageCacheSize, "incoming message cache size at networking layer") + flags.Uint32( + disallowListNotificationCacheSize, + config.DisallowListNotificationCacheSize, + "cache size for notification events from disallow list") + flags.Duration(peerUpdateInterval, config.PeerUpdateInterval, "how often to refresh the peer connections for the node") + flags.Duration(BuildFlagName(unicastKey, MessageTimeoutKey), config.Unicast.MessageTimeout, "how long a unicast transmission can take to complete") + flags.Duration(BuildFlagName(unicastKey, unicastManagerKey, createStreamBackoffDelayKey), config.Unicast.UnicastManager.CreateStreamBackoffDelay, + "initial backoff delay between failing to establish a connection with another node and retrying, "+ + "this delay increases exponentially with the number of subsequent failures to establish a connection.") + flags.Uint64(BuildFlagName(unicastKey, unicastManagerKey, streamZeroRetryResetThresholdKey), config.Unicast.UnicastManager.StreamZeroRetryResetThreshold, + "reset stream creation retry budget from zero to the maximum after consecutive successful streams reach this threshold.") + flags.Uint64(BuildFlagName(unicastKey, unicastManagerKey, maxStreamCreationRetryAttemptTimesKey), + config.Unicast.UnicastManager.MaxStreamCreationRetryAttemptTimes, + "max attempts to create a unicast stream.") + flags.Uint32(BuildFlagName(unicastKey, unicastManagerKey, configCacheSizeKey), config.Unicast.UnicastManager.ConfigCacheSize, + "cache size of the dial config cache, recommended to be big enough to accommodate the entire nodes in the network.") + + // unicast stream handler rate limits + flags.Int(BuildFlagName(unicastKey, rateLimiterKey, messageRateLimitKey), config.Unicast.RateLimiter.MessageRateLimit, "maximum number of unicast messages that a peer can send per second") + flags.Int(BuildFlagName(unicastKey, rateLimiterKey, BandwidthRateLimitKey), config.Unicast.RateLimiter.BandwidthRateLimit, + "bandwidth size in bytes a peer is allowed to send via unicast streams per second") + flags.Int(BuildFlagName(unicastKey, rateLimiterKey, BandwidthBurstLimitKey), config.Unicast.RateLimiter.BandwidthBurstLimit, "bandwidth size in bytes a peer is allowed to send at one time") + flags.Duration(BuildFlagName(unicastKey, rateLimiterKey, LockoutDurationKey), config.Unicast.RateLimiter.LockoutDuration, + "the number of seconds a peer will be forced to wait before being allowed to successful reconnect to the node after being rate limited") + flags.Bool(BuildFlagName(unicastKey, rateLimiterKey, DryRunKey), config.Unicast.RateLimiter.DryRun, "disable peer disconnects and connections gating when rate limiting peers") + flags.Bool(BuildFlagName(unicastKey, enableStreamProtectionKey), + config.Unicast.EnableStreamProtection, + "enable stream protection for unicast streams, when enabled, all connections that are being established or have been already established for unicast streams are protected") + + LoadLibP2PResourceManagerFlags(flags, config) + + flags.Int(BuildFlagName(connectionManagerKey, lowWatermarkKey), config.ConnectionManager.LowWatermark, "low watermarking for libp2p connection manager") + flags.Int(BuildFlagName(connectionManagerKey, highWatermarkKey), config.ConnectionManager.HighWatermark, "high watermarking for libp2p connection manager") + flags.Duration(BuildFlagName(connectionManagerKey, gracePeriodKey), config.ConnectionManager.GracePeriod, "grace period for libp2p connection manager") + flags.Duration(BuildFlagName(connectionManagerKey, silencePeriodKey), config.ConnectionManager.SilencePeriod, "silence period for libp2p connection manager") + flags.Bool(BuildFlagName(gossipsubKey, p2pconfig.PeerScoringEnabledKey), config.GossipSub.PeerScoringEnabled, "enabling peer scoring on pubsub network") + flags.Duration(BuildFlagName(gossipsubKey, p2pconfig.RpcTracerKey, p2pconfig.LocalMeshLogIntervalKey), + config.GossipSub.RpcTracer.LocalMeshLogInterval, + "logging interval for local mesh in gossipsub tracer") + flags.Duration(BuildFlagName(gossipsubKey, p2pconfig.RpcTracerKey, p2pconfig.ScoreTracerIntervalKey), config.GossipSub.RpcTracer.ScoreTracerInterval, + "logging interval for peer score tracer in gossipsub, set to 0 to disable") + flags.Uint32(BuildFlagName(gossipsubKey, p2pconfig.RpcTracerKey, p2pconfig.RPCSentTrackerCacheSizeKey), config.GossipSub.RpcTracer.RPCSentTrackerCacheSize, + "cache size of the rpc sent tracker used by the gossipsub mesh tracer.") + flags.Uint32(BuildFlagName(gossipsubKey, p2pconfig.RpcTracerKey, p2pconfig.RPCSentTrackerQueueCacheSizeKey), config.GossipSub.RpcTracer.RPCSentTrackerQueueCacheSize, + "cache size of the rpc sent tracker worker queue.") + + flags.Uint32(BuildFlagName(gossipsubKey, p2pconfig.RpcTracerKey, p2pconfig.DuplicateMessageCacheTrackerKey, p2pconfig.DuplicateMessageCacheTrackerSizeKey), + config.GossipSub.RpcTracer.DuplicateMessageTrackerConfig.CacheSize, + "cache size of the gossipsub duplicate message tracker.") + flags.Float64(BuildFlagName(gossipsubKey, p2pconfig.RpcTracerKey, p2pconfig.DuplicateMessageCacheTrackerKey, p2pconfig.DuplicateMessageCacheTrackerDecayKey), + config.GossipSub.RpcTracer.DuplicateMessageTrackerConfig.Decay, + "decay rate for the peer duplicate message counters.") + flags.Float64(BuildFlagName(gossipsubKey, p2pconfig.RpcTracerKey, p2pconfig.DuplicateMessageCacheTrackerKey, p2pconfig.SkipDecayThresholdKey), + config.GossipSub.RpcTracer.DuplicateMessageTrackerConfig.SkipDecayThreshold, + "the duplicate message count threshold below which the penalty will not be decayed") + + flags.Int(BuildFlagName(gossipsubKey, p2pconfig.RpcTracerKey, p2pconfig.RPCSentTrackerNumOfWorkersKey), config.GossipSub.RpcTracer.RpcSentTrackerNumOfWorkers, + "number of workers for the rpc sent tracker worker pool.") + // gossipsub RPC control message validation limits used for validation configuration and rate limiting + flags.Int(BuildFlagName(gossipsubKey, p2pconfig.RpcInspectorKey, p2pconfig.ValidationConfigKey, p2pconfig.InspectionQueueConfigKey, p2pconfig.NumberOfWorkersKey), + config.GossipSub.RpcInspector.Validation.InspectionQueue.NumberOfWorkers, + "number of gossipsub RPC control message validation inspector component workers") + flags.Uint32(BuildFlagName(gossipsubKey, p2pconfig.RpcInspectorKey, p2pconfig.ValidationConfigKey, p2pconfig.InspectionQueueConfigKey, p2pconfig.QueueSizeKey), + config.GossipSub.RpcInspector.Validation.InspectionQueue.Size, + "queue size for gossipsub RPC validation inspector events worker pool queue.") + flags.Uint32(BuildFlagName(gossipsubKey, p2pconfig.RpcInspectorKey, p2pconfig.ValidationConfigKey, p2pconfig.ClusterPrefixedMessageConfigKey, p2pconfig.TrackerCacheSizeKey), + config.GossipSub.RpcInspector.Validation.ClusterPrefixedMessage.ControlMsgsReceivedCacheSize, + "cache size for gossipsub RPC validation inspector cluster prefix received tracker.") + flags.Float64(BuildFlagName(gossipsubKey, p2pconfig.RpcInspectorKey, p2pconfig.ValidationConfigKey, p2pconfig.ClusterPrefixedMessageConfigKey, p2pconfig.TrackerCacheDecayKey), + config.GossipSub.RpcInspector.Validation.ClusterPrefixedMessage.ControlMsgsReceivedCacheDecay, + "the decay value used to decay cluster prefix received topics received cached counters.") + flags.Float64(BuildFlagName(gossipsubKey, p2pconfig.RpcInspectorKey, p2pconfig.ValidationConfigKey, p2pconfig.ClusterPrefixedMessageConfigKey, p2pconfig.HardThresholdKey), + config.GossipSub.RpcInspector.Validation.ClusterPrefixedMessage.HardThreshold, + "the maximum number of cluster-prefixed control messages allowed to be processed when the active cluster id is unset or a mismatch is detected, exceeding this threshold will result in node penalization by gossipsub.") + // networking event notifications + flags.Uint32(BuildFlagName(gossipsubKey, p2pconfig.RpcInspectorKey, p2pconfig.NotificationCacheSizeKey), config.GossipSub.RpcInspector.NotificationCacheSize, + "cache size for notification events from gossipsub rpc inspector") + // application layer spam prevention (alsp) protocol + flags.Bool(alspDisabled, config.AlspConfig.DisablePenalty, "disable the penalty mechanism of the alsp protocol. default value (recommended) is false") + flags.Uint32(alspSpamRecordCacheSize, config.AlspConfig.SpamRecordCacheSize, "size of spam record cache, recommended to be 10x the number of authorized nodes") + flags.Uint32(alspSpamRecordQueueSize, config.AlspConfig.SpamReportQueueSize, "size of spam report queue, recommended to be 100x the number of authorized nodes") + flags.Duration(alspHearBeatInterval, + config.AlspConfig.HearBeatInterval, + "interval between two consecutive heartbeat events at alsp, recommended to leave it as default unless you know what you are doing.") + flags.Float32(alspSyncEngineBatchRequestBaseProb, + config.AlspConfig.SyncEngine.BatchRequestBaseProb, + "base probability of creating a misbehavior report for a batch request message") + flags.Float32(alspSyncEngineRangeRequestBaseProb, + config.AlspConfig.SyncEngine.RangeRequestBaseProb, + "base probability of creating a misbehavior report for a range request message") + flags.Float32(alspSyncEngineSyncRequestProb, config.AlspConfig.SyncEngine.SyncRequestProb, "probability of creating a misbehavior report for a sync request message") + + flags.Bool(BuildFlagName(gossipsubKey, p2pconfig.RpcInspectorKey, p2pconfig.ValidationConfigKey, p2pconfig.ProcessKey, p2pconfig.InspectionKey, p2pconfig.DisabledKey), + config.GossipSub.RpcInspector.Validation.InspectionProcess.Inspect.Disabled, + "disable rpc inspection for all control message types") + flags.Bool(BuildFlagName(gossipsubKey, p2pconfig.RpcInspectorKey, p2pconfig.ValidationConfigKey, p2pconfig.ProcessKey, p2pconfig.InspectionKey, p2pconfig.EnableKey, p2pconfig.GraftKey), + config.GossipSub.RpcInspector.Validation.InspectionProcess.Inspect.EnableGraft, + "disable graft control message inspection") + flags.Bool(BuildFlagName(gossipsubKey, p2pconfig.RpcInspectorKey, p2pconfig.ValidationConfigKey, p2pconfig.ProcessKey, p2pconfig.InspectionKey, p2pconfig.EnableKey, p2pconfig.PruneKey), + config.GossipSub.RpcInspector.Validation.InspectionProcess.Inspect.EnablePrune, + "disable prune control message inspection") + flags.Bool(BuildFlagName(gossipsubKey, p2pconfig.RpcInspectorKey, p2pconfig.ValidationConfigKey, p2pconfig.ProcessKey, p2pconfig.InspectionKey, p2pconfig.EnableKey, p2pconfig.IHaveKey), + config.GossipSub.RpcInspector.Validation.InspectionProcess.Inspect.EnableIHave, + "disable ihave control message inspection") + flags.Bool(BuildFlagName(gossipsubKey, p2pconfig.RpcInspectorKey, p2pconfig.ValidationConfigKey, p2pconfig.ProcessKey, p2pconfig.InspectionKey, p2pconfig.EnableKey, p2pconfig.IWantKey), + config.GossipSub.RpcInspector.Validation.InspectionProcess.Inspect.EnableIWant, + "disable iwant control message inspection") + flags.Bool(BuildFlagName(gossipsubKey, p2pconfig.RpcInspectorKey, p2pconfig.ValidationConfigKey, p2pconfig.ProcessKey, p2pconfig.InspectionKey, p2pconfig.EnableKey, p2pconfig.PublishKey), + config.GossipSub.RpcInspector.Validation.InspectionProcess.Inspect.EnablePublish, + "disable rpc publish message inspection") + flags.Bool(BuildFlagName(gossipsubKey, p2pconfig.RpcInspectorKey, p2pconfig.ValidationConfigKey, p2pconfig.ProcessKey, p2pconfig.InspectionKey, p2pconfig.RejectUnstakedPeers), + config.GossipSub.RpcInspector.Validation.InspectionProcess.Inspect.RejectUnstakedPeers, + "reject rpcs from unstaked peers") + flags.Bool(BuildFlagName(gossipsubKey, p2pconfig.RpcInspectorKey, p2pconfig.ValidationConfigKey, p2pconfig.ProcessKey, p2pconfig.TruncationKey, p2pconfig.DisabledKey), + config.GossipSub.RpcInspector.Validation.InspectionProcess.Truncate.Disabled, + "disable rpc truncation for all control message types") + flags.Bool(BuildFlagName(gossipsubKey, p2pconfig.RpcInspectorKey, p2pconfig.ValidationConfigKey, p2pconfig.ProcessKey, p2pconfig.TruncationKey, p2pconfig.EnableKey, p2pconfig.GraftKey), + config.GossipSub.RpcInspector.Validation.InspectionProcess.Truncate.EnableGraft, + "disable graft control message truncation") + flags.Bool(BuildFlagName(gossipsubKey, p2pconfig.RpcInspectorKey, p2pconfig.ValidationConfigKey, p2pconfig.ProcessKey, p2pconfig.TruncationKey, p2pconfig.EnableKey, p2pconfig.PruneKey), + config.GossipSub.RpcInspector.Validation.InspectionProcess.Truncate.EnablePrune, + "disable prune control message truncation") + flags.Bool(BuildFlagName(gossipsubKey, p2pconfig.RpcInspectorKey, p2pconfig.ValidationConfigKey, p2pconfig.ProcessKey, p2pconfig.TruncationKey, p2pconfig.EnableKey, p2pconfig.IHaveKey), + config.GossipSub.RpcInspector.Validation.InspectionProcess.Truncate.EnableIHave, + "disable ihave control message truncation") + flags.Bool(BuildFlagName(gossipsubKey, + p2pconfig.RpcInspectorKey, + p2pconfig.ValidationConfigKey, + p2pconfig.ProcessKey, + p2pconfig.TruncationKey, + p2pconfig.EnableKey, + p2pconfig.IHaveKey, + p2pconfig.MessageIDKey), + config.GossipSub.RpcInspector.Validation.InspectionProcess.Truncate.EnableIHaveMessageIds, + "disable ihave message id truncation") + flags.Bool(BuildFlagName(gossipsubKey, p2pconfig.RpcInspectorKey, p2pconfig.ValidationConfigKey, p2pconfig.ProcessKey, p2pconfig.TruncationKey, p2pconfig.EnableKey, p2pconfig.IWantKey), + config.GossipSub.RpcInspector.Validation.InspectionProcess.Truncate.EnableIWant, + "disable iwant control message truncation") + flags.Bool(BuildFlagName(gossipsubKey, + p2pconfig.RpcInspectorKey, + p2pconfig.ValidationConfigKey, + p2pconfig.ProcessKey, + p2pconfig.TruncationKey, + p2pconfig.EnableKey, + p2pconfig.IWantKey, + p2pconfig.MessageIDKey), + config.GossipSub.RpcInspector.Validation.InspectionProcess.Truncate.EnableIWantMessageIds, + "disable iwant message id truncation") + + flags.Int(BuildFlagName(gossipsubKey, p2pconfig.RpcInspectorKey, p2pconfig.ValidationConfigKey, p2pconfig.IHaveConfigKey, p2pconfig.MessageCountThreshold), + config.GossipSub.RpcInspector.Validation.IHave.MessageCountThreshold, + "threshold for the number of ihave control messages to accept on a single RPC message, if exceeded the RPC message will be sampled and truncated") + flags.Int(BuildFlagName(gossipsubKey, p2pconfig.RpcInspectorKey, p2pconfig.ValidationConfigKey, p2pconfig.IHaveConfigKey, p2pconfig.MessageIdCountThreshold), + config.GossipSub.RpcInspector.Validation.IHave.MessageIdCountThreshold, + "threshold for the number of message ids on a single ihave control message to accept, if exceeded the RPC message ids will be sampled and truncated") + flags.Int(BuildFlagName(gossipsubKey, p2pconfig.RpcInspectorKey, p2pconfig.ValidationConfigKey, p2pconfig.IHaveConfigKey, p2pconfig.DuplicateTopicIdThresholdKey), + config.GossipSub.RpcInspector.Validation.IHave.DuplicateTopicIdThreshold, + "the max allowed duplicate topic IDs across all ihave control messages in a single RPC message, if exceeded a misbehavior report will be created") + flags.Int(BuildFlagName(gossipsubKey, p2pconfig.RpcInspectorKey, p2pconfig.ValidationConfigKey, p2pconfig.IHaveConfigKey, p2pconfig.DuplicateMessageIdThresholdKey), + config.GossipSub.RpcInspector.Validation.IHave.DuplicateMessageIdThreshold, + "the max allowed duplicate message IDs in a single ihave control message, if exceeded a misbehavior report will be created") + flags.Int(BuildFlagName(gossipsubKey, p2pconfig.RpcInspectorKey, p2pconfig.ValidationConfigKey, p2pconfig.IHaveConfigKey, p2pconfig.InvalidTopicIdThresholdKey), + config.GossipSub.RpcInspector.Validation.IHave.InvalidTopicIdThreshold, + "the max allowed invalid topics in a single ihave control message, if exceeded a misbehavior report will be created", + ) + + flags.Int(BuildFlagName(gossipsubKey, p2pconfig.RpcInspectorKey, p2pconfig.ValidationConfigKey, p2pconfig.GraftPruneKey, p2pconfig.MessageCountThreshold), + config.GossipSub.RpcInspector.Validation.GraftPrune.MessageCountThreshold, + "threshold for the number of graft or prune control messages to accept on a single RPC message, if exceeded the RPC message will be sampled and truncated") + flags.Uint(BuildFlagName(gossipsubKey, p2pconfig.RpcInspectorKey, p2pconfig.ValidationConfigKey, p2pconfig.IWantConfigKey, p2pconfig.MessageCountThreshold), + config.GossipSub.RpcInspector.Validation.IWant.MessageCountThreshold, + "threshold for the number of iwant control messages to accept on a single RPC message, if exceeded the RPC message will be sampled and truncated") + flags.Int(BuildFlagName(gossipsubKey, p2pconfig.RpcInspectorKey, p2pconfig.ValidationConfigKey, p2pconfig.IWantConfigKey, p2pconfig.MessageIdCountThreshold), + config.GossipSub.RpcInspector.Validation.IWant.MessageIdCountThreshold, + "threshold for the number of message ids on a single iwant control message to accept, if exceeded the RPC message ids will be sampled and truncated") + flags.Int(BuildFlagName(gossipsubKey, p2pconfig.RpcInspectorKey, p2pconfig.ValidationConfigKey, p2pconfig.IWantConfigKey, p2pconfig.CacheMissThresholdKey), + config.GossipSub.RpcInspector.Validation.IWant.CacheMissThreshold, + "max number of cache misses (untracked) allowed in a single iWant control message, if exceeded a misbehavior report will be created") + flags.Int(BuildFlagName(gossipsubKey, p2pconfig.RpcInspectorKey, p2pconfig.ValidationConfigKey, p2pconfig.IWantConfigKey, p2pconfig.DuplicateMsgIDThresholdKey), + config.GossipSub.RpcInspector.Validation.IWant.DuplicateMsgIdThreshold, + "max allowed duplicate message IDs in a single iWant control message, if exceeded a misbehavior report will be created") + flags.Int(BuildFlagName(gossipsubKey, p2pconfig.RpcInspectorKey, p2pconfig.ValidationConfigKey, p2pconfig.PublishMessagesConfigKey, p2pconfig.MaxSampleSizeKey), + config.GossipSub.RpcInspector.Validation.PublishMessages.MaxSampleSize, + "the max sample size for async validation of publish messages, if exceeded the message will be sampled for inspection, but is not truncated") + flags.Int(BuildFlagName(gossipsubKey, p2pconfig.RpcInspectorKey, p2pconfig.ValidationConfigKey, p2pconfig.PublishMessagesConfigKey, p2pconfig.MessageErrorThresholdKey), + config.GossipSub.RpcInspector.Validation.PublishMessages.ErrorThreshold, + "the max number of errors allowed in a (sampled) set of publish messages on a single rpc, if exceeded a misbehavior report will be created") + flags.Int(BuildFlagName(gossipsubKey, p2pconfig.RpcInspectorKey, p2pconfig.ValidationConfigKey, p2pconfig.GraftPruneKey, p2pconfig.DuplicateTopicIdThresholdKey), + config.GossipSub.RpcInspector.Validation.GraftPrune.DuplicateTopicIdThreshold, + "the max allowed duplicate topic IDs across all graft or prune control messages in a single RPC message, if exceeded a misbehavior report will be created") + flags.Int(BuildFlagName(gossipsubKey, p2pconfig.RpcInspectorKey, p2pconfig.ValidationConfigKey, p2pconfig.GraftPruneKey, p2pconfig.InvalidTopicIdThresholdKey), + config.GossipSub.RpcInspector.Validation.GraftPrune.InvalidTopicIdThreshold, + "the max allowed invalid topic across all graft or prune control messages in a single RPC message, if exceeded a misbehavior report will be created") + + flags.Duration(BuildFlagName(gossipsubKey, p2pconfig.SubscriptionProviderKey, p2pconfig.UpdateIntervalKey), + config.GossipSub.SubscriptionProvider.UpdateInterval, + "interval for updating the list of subscribed topics for all peers in the gossipsub, recommended value is a few minutes") + flags.Uint32(BuildFlagName(gossipsubKey, p2pconfig.SubscriptionProviderKey, p2pconfig.CacheSizeKey), + config.GossipSub.SubscriptionProvider.CacheSize, + "size of the cache that keeps the list of topics each peer has subscribed to, recommended size is 10x the number of authorized nodes") + + flags.Duration(BuildFlagName(gossipsubKey, p2pconfig.ScoreParamsKey, p2pconfig.PeerScoringKey, p2pconfig.InternalKey, p2pconfig.DecayIntervalKey), + config.GossipSub.ScoringParameters.PeerScoring.Internal.DecayInterval, + "interval at which the counters associated with a peer behavior in GossipSub system are decayed, recommended value is one minute") + flags.Float64(BuildFlagName(gossipsubKey, p2pconfig.ScoreParamsKey, p2pconfig.PeerScoringKey, p2pconfig.InternalKey, p2pconfig.AppSpecificScoreWeightKey), + config.GossipSub.ScoringParameters.PeerScoring.Internal.AppSpecificScoreWeight, + "the weight for app-specific scores") + flags.Float64(BuildFlagName(gossipsubKey, p2pconfig.ScoreParamsKey, p2pconfig.PeerScoringKey, p2pconfig.InternalKey, p2pconfig.DecayToZeroKey), + config.GossipSub.ScoringParameters.PeerScoring.Internal.DecayToZero, + "the maximum value below which a peer scoring counter is reset to zero") + + flags.Bool(BuildFlagName(gossipsubKey, p2pconfig.ScoreParamsKey, p2pconfig.PeerScoringKey, p2pconfig.InternalKey, p2pconfig.TopicKey, p2pconfig.SkipAtomicValidationKey), + config.GossipSub.ScoringParameters.PeerScoring.Internal.TopicParameters.SkipAtomicValidation, + "the default value for the skip atomic validation flag for topics") + flags.Float64(BuildFlagName(gossipsubKey, p2pconfig.ScoreParamsKey, p2pconfig.PeerScoringKey, p2pconfig.InternalKey, p2pconfig.TopicKey, p2pconfig.InvalidMessageDeliveriesWeightKey), + config.GossipSub.ScoringParameters.PeerScoring.Internal.TopicParameters.InvalidMessageDeliveriesWeight, + "this value is applied to the square of the number of invalid message deliveries on a topic") + flags.Float64(BuildFlagName(gossipsubKey, p2pconfig.ScoreParamsKey, p2pconfig.PeerScoringKey, p2pconfig.InternalKey, p2pconfig.TopicKey, p2pconfig.InvalidMessageDeliveriesDecayKey), + config.GossipSub.ScoringParameters.PeerScoring.Internal.TopicParameters.InvalidMessageDeliveriesDecay, + "the decay factor used to decay the number of invalid message deliveries") + flags.Duration(BuildFlagName(gossipsubKey, p2pconfig.ScoreParamsKey, p2pconfig.PeerScoringKey, p2pconfig.InternalKey, p2pconfig.TopicKey, p2pconfig.TimeInMeshQuantumKey), + config.GossipSub.ScoringParameters.PeerScoring.Internal.TopicParameters.TimeInMeshQuantum, + "the time in mesh quantum for the GossipSub scoring system") + flags.Float64(BuildFlagName(gossipsubKey, p2pconfig.ScoreParamsKey, p2pconfig.PeerScoringKey, p2pconfig.InternalKey, p2pconfig.TopicKey, p2pconfig.TopicWeightKey), + config.GossipSub.ScoringParameters.PeerScoring.Internal.TopicParameters.TopicWeight, + "the weight of a topic in the GossipSub scoring system") + flags.Float64(BuildFlagName(gossipsubKey, p2pconfig.ScoreParamsKey, p2pconfig.PeerScoringKey, p2pconfig.InternalKey, p2pconfig.TopicKey, p2pconfig.MeshMessageDeliveriesDecayKey), + config.GossipSub.ScoringParameters.PeerScoring.Internal.TopicParameters.MeshMessageDeliveriesDecay, + "this is applied to the number of actual message deliveries in a topic mesh at each decay interval (i.e., DecayInterval)") + flags.Float64(BuildFlagName(gossipsubKey, p2pconfig.ScoreParamsKey, p2pconfig.PeerScoringKey, p2pconfig.InternalKey, p2pconfig.TopicKey, p2pconfig.MeshMessageDeliveriesCapKey), + config.GossipSub.ScoringParameters.PeerScoring.Internal.TopicParameters.MeshMessageDeliveriesCap, + "The maximum number of actual message deliveries in a topic mesh that is used to calculate the score of a peer in that topic mesh") + flags.Float64(BuildFlagName(gossipsubKey, p2pconfig.ScoreParamsKey, p2pconfig.PeerScoringKey, p2pconfig.InternalKey, p2pconfig.TopicKey, p2pconfig.MeshMessageDeliveryThresholdKey), + config.GossipSub.ScoringParameters.PeerScoring.Internal.TopicParameters.MeshMessageDeliveryThreshold, + "The threshold for the number of actual message deliveries in a topic mesh that is used to calculate the score of a peer in that topic mesh") + flags.Float64(BuildFlagName(gossipsubKey, p2pconfig.ScoreParamsKey, p2pconfig.PeerScoringKey, p2pconfig.InternalKey, p2pconfig.TopicKey, p2pconfig.MeshDeliveriesWeightKey), + config.GossipSub.ScoringParameters.PeerScoring.Internal.TopicParameters.MeshDeliveriesWeight, + "the weight for applying penalty when a peer is under-performing in a topic mesh") + flags.Duration(BuildFlagName(gossipsubKey, p2pconfig.ScoreParamsKey, p2pconfig.PeerScoringKey, p2pconfig.InternalKey, p2pconfig.TopicKey, p2pconfig.MeshMessageDeliveriesWindowKey), + config.GossipSub.ScoringParameters.PeerScoring.Internal.TopicParameters.MeshMessageDeliveriesWindow, + "the window size is time interval that we count a delivery of an already seen message towards the score of a peer in a topic mesh. The delivery is counted by GossipSub only if the previous sender of the message is different from the current sender") + flags.Duration(BuildFlagName(gossipsubKey, p2pconfig.ScoreParamsKey, p2pconfig.PeerScoringKey, p2pconfig.InternalKey, p2pconfig.TopicKey, p2pconfig.MeshMessageDeliveryActivationKey), + config.GossipSub.ScoringParameters.PeerScoring.Internal.TopicParameters.MeshMessageDeliveryActivation, + "the time interval that we wait for a new peer that joins a topic mesh till start counting the number of actual message deliveries of that peer in that topic mesh") + + flags.Float64(BuildFlagName(gossipsubKey, p2pconfig.ScoreParamsKey, p2pconfig.PeerScoringKey, p2pconfig.InternalKey, p2pconfig.ThresholdsKey, p2pconfig.GossipThresholdKey), + config.GossipSub.ScoringParameters.PeerScoring.Internal.Thresholds.Gossip, + "the threshold when a peer's penalty drops below this threshold, no gossip is emitted towards that peer and gossip from that peer is ignored") + flags.Float64(BuildFlagName(gossipsubKey, p2pconfig.ScoreParamsKey, p2pconfig.PeerScoringKey, p2pconfig.InternalKey, p2pconfig.ThresholdsKey, p2pconfig.PublishKey), + config.GossipSub.ScoringParameters.PeerScoring.Internal.Thresholds.Publish, + "the threshold when a peer's penalty drops below this threshold, self-published messages are not propagated towards this peer") + flags.Float64(BuildFlagName(gossipsubKey, p2pconfig.ScoreParamsKey, p2pconfig.PeerScoringKey, p2pconfig.InternalKey, p2pconfig.ThresholdsKey, p2pconfig.GraylistThresholdKey), + config.GossipSub.ScoringParameters.PeerScoring.Internal.Thresholds.Graylist, + "the threshold when a peer's penalty drops below this threshold, the peer is graylisted, i.e., incoming RPCs from the peer are ignored") + flags.Float64(BuildFlagName(gossipsubKey, p2pconfig.ScoreParamsKey, p2pconfig.PeerScoringKey, p2pconfig.InternalKey, p2pconfig.ThresholdsKey, p2pconfig.AcceptPXThresholdKey), + config.GossipSub.ScoringParameters.PeerScoring.Internal.Thresholds.AcceptPX, + "the threshold when a peer sends us PX information with a prune, we only accept it and connect to the supplied peers if the originating peer's penalty exceeds this threshold") + flags.Float64(BuildFlagName(gossipsubKey, p2pconfig.ScoreParamsKey, p2pconfig.PeerScoringKey, p2pconfig.InternalKey, p2pconfig.ThresholdsKey, p2pconfig.OpportunisticGraftThresholdKey), + config.GossipSub.ScoringParameters.PeerScoring.Internal.Thresholds.OpportunisticGraft, + "the threshold when the median peer penalty in the mesh drops below this value, the peer may select more peers with penalty above the median to opportunistically graft on the mesh") + + flags.Float64(BuildFlagName(gossipsubKey, p2pconfig.ScoreParamsKey, p2pconfig.PeerScoringKey, p2pconfig.InternalKey, p2pconfig.BehaviourKey, p2pconfig.BehaviourPenaltyThresholdKey), + config.GossipSub.ScoringParameters.PeerScoring.Internal.Behaviour.PenaltyThreshold, + "the threshold when the behavior of a peer is considered as bad by GossipSub") + flags.Float64(BuildFlagName(gossipsubKey, p2pconfig.ScoreParamsKey, p2pconfig.PeerScoringKey, p2pconfig.InternalKey, p2pconfig.BehaviourKey, p2pconfig.BehaviourPenaltyWeightKey), + config.GossipSub.ScoringParameters.PeerScoring.Internal.Behaviour.PenaltyWeight, + "the weight for applying penalty when a peer misbehavior goes beyond the threshold") + flags.Float64(BuildFlagName(gossipsubKey, p2pconfig.ScoreParamsKey, p2pconfig.PeerScoringKey, p2pconfig.InternalKey, p2pconfig.BehaviourKey, p2pconfig.BehaviourPenaltyDecayKey), + config.GossipSub.ScoringParameters.PeerScoring.Internal.Behaviour.PenaltyDecay, + "the decay interval for the misbehavior counter of a peer. The misbehavior counter is incremented by GossipSub for iHave broken promises or the GRAFT flooding attacks (i.e., each GRAFT received from a remote peer while that peer is on a PRUNE backoff)") + + flags.Uint32(BuildFlagName(gossipsubKey, p2pconfig.ScoreParamsKey, p2pconfig.PeerScoringKey, p2pconfig.ProtocolKey, p2pconfig.MaxDebugLogsKey), + config.GossipSub.ScoringParameters.PeerScoring.Protocol.MaxDebugLogs, + "the max number of debug/trace log events per second. Logs emitted above this threshold are dropped") + flags.Float64(BuildFlagName(gossipsubKey, p2pconfig.ScoreParamsKey, p2pconfig.PeerScoringKey, p2pconfig.ProtocolKey, p2pconfig.AppSpecificKey, p2pconfig.MaxAppSpecificKey, p2pconfig.PenaltyKey), + config.GossipSub.ScoringParameters.PeerScoring.Protocol.AppSpecificScore.MaxAppSpecificPenalty, + "the maximum penalty for sever offenses that we apply to a remote node score") + flags.Float64(BuildFlagName(gossipsubKey, p2pconfig.ScoreParamsKey, p2pconfig.PeerScoringKey, p2pconfig.ProtocolKey, p2pconfig.AppSpecificKey, p2pconfig.MinAppSpecificKey, p2pconfig.PenaltyKey), + config.GossipSub.ScoringParameters.PeerScoring.Protocol.AppSpecificScore.MinAppSpecificPenalty, + "the minimum penalty for sever offenses that we apply to a remote node score") + flags.Float64(BuildFlagName(gossipsubKey, p2pconfig.ScoreParamsKey, p2pconfig.PeerScoringKey, p2pconfig.ProtocolKey, p2pconfig.AppSpecificKey, p2pconfig.UnknownIdentityKey, p2pconfig.PenaltyKey), + config.GossipSub.ScoringParameters.PeerScoring.Protocol.AppSpecificScore.UnknownIdentityPenalty, + "the penalty for unknown identity. It is applied to the peer's score when the peer is not in the identity list") + flags.Float64(BuildFlagName(gossipsubKey, + p2pconfig.ScoreParamsKey, + p2pconfig.PeerScoringKey, + p2pconfig.ProtocolKey, + p2pconfig.AppSpecificKey, + p2pconfig.InvalidSubscriptionKey, + p2pconfig.PenaltyKey), + config.GossipSub.ScoringParameters.PeerScoring.Protocol.AppSpecificScore.InvalidSubscriptionPenalty, + "the penalty for invalid subscription. It is applied to the peer's score when the peer subscribes to a topic that it is not authorized to subscribe to") + flags.Float64(BuildFlagName(gossipsubKey, p2pconfig.ScoreParamsKey, p2pconfig.PeerScoringKey, p2pconfig.ProtocolKey, p2pconfig.AppSpecificKey, p2pconfig.DuplicateMessageKey, p2pconfig.PenaltyKey), + config.GossipSub.ScoringParameters.PeerScoring.Protocol.AppSpecificScore.DuplicateMessagePenalty, + "the penalty for duplicate messages detected by the gossipsub tracer for a peer") + flags.Float64(BuildFlagName(gossipsubKey, p2pconfig.ScoreParamsKey, p2pconfig.PeerScoringKey, p2pconfig.ProtocolKey, p2pconfig.AppSpecificKey, p2pconfig.MaxAppSpecificKey, p2pconfig.RewardKey), + config.GossipSub.ScoringParameters.PeerScoring.Protocol.AppSpecificScore.MaxAppSpecificReward, + "the reward for well-behaving staked peers") + flags.Float64(BuildFlagName(gossipsubKey, p2pconfig.ScoreParamsKey, p2pconfig.PeerScoringKey, p2pconfig.ProtocolKey, p2pconfig.AppSpecificKey, p2pconfig.StakedIdentityKey, p2pconfig.RewardKey), + config.GossipSub.ScoringParameters.PeerScoring.Protocol.AppSpecificScore.StakedIdentityReward, + "the reward for staking peers") + flags.Float64(BuildFlagName(gossipsubKey, + p2pconfig.ScoreParamsKey, + p2pconfig.PeerScoringKey, + p2pconfig.ProtocolKey, + p2pconfig.AppSpecificKey, + p2pconfig.DuplicateMessageKey, + p2pconfig.ThresholdKey), + config.GossipSub.ScoringParameters.PeerScoring.Protocol.AppSpecificScore.DuplicateMessageThreshold, + "the peer's duplicate message count threshold above which the peer will be penalized") + + flags.Duration(BuildFlagName(gossipsubKey, p2pconfig.ScoreParamsKey, p2pconfig.ScoringRegistryKey, p2pconfig.StartupSilenceDurationKey), + config.GossipSub.ScoringParameters.ScoringRegistryParameters.StartupSilenceDuration, + "the duration of time, after the node startup, during which the scoring registry remains inactive before penalizing nodes.") + flags.Int(BuildFlagName(gossipsubKey, p2pconfig.ScoreParamsKey, p2pconfig.ScoringRegistryKey, p2pconfig.AppSpecificScoreRegistryKey, p2pconfig.ScoreUpdateWorkerNumKey), + config.GossipSub.ScoringParameters.ScoringRegistryParameters.AppSpecificScore.ScoreUpdateWorkerNum, + "number of workers for the app specific score update worker pool") + flags.Uint32(BuildFlagName(gossipsubKey, p2pconfig.ScoreParamsKey, p2pconfig.ScoringRegistryKey, p2pconfig.AppSpecificScoreRegistryKey, p2pconfig.ScoreUpdateRequestQueueSizeKey), + config.GossipSub.ScoringParameters.ScoringRegistryParameters.AppSpecificScore.ScoreUpdateRequestQueueSize, + "size of the app specific score update worker pool queue") + flags.Uint32(BuildFlagName(gossipsubKey, p2pconfig.ScoreParamsKey, p2pconfig.ScoringRegistryKey, p2pconfig.AppSpecificScoreRegistryKey, p2pconfig.InvalidControlMessageNotificationQueueSizeKey), + config.GossipSub.ScoringParameters.ScoringRegistryParameters.AppSpecificScore.InvalidControlMessageNotificationQueueSize, + "size of the queue for invalid control message notifications processing") + flags.Duration(BuildFlagName(gossipsubKey, p2pconfig.ScoreParamsKey, p2pconfig.ScoringRegistryKey, p2pconfig.AppSpecificScoreRegistryKey, p2pconfig.ScoreTTLKey), + config.GossipSub.ScoringParameters.ScoringRegistryParameters.AppSpecificScore.ScoreTTL, + "time to live for app specific scores; when expired a new request will be sent to the score update worker pool; till then the expired score will be used") + + flags.Uint32(BuildFlagName(gossipsubKey, p2pconfig.ScoreParamsKey, p2pconfig.ScoringRegistryKey, p2pconfig.SpamRecordCacheKey, p2pconfig.CacheSizeKey), + config.GossipSub.ScoringParameters.ScoringRegistryParameters.SpamRecordCache.CacheSize, + "size of the spam record cache, recommended size is 10x the number of authorized nodes") + flags.Float64(BuildFlagName(gossipsubKey, p2pconfig.ScoreParamsKey, p2pconfig.ScoringRegistryKey, p2pconfig.SpamRecordCacheKey, p2pconfig.DecayKey, p2pconfig.PenaltyDecaySlowdownThresholdKey), + config.GossipSub.ScoringParameters.ScoringRegistryParameters.SpamRecordCache.Decay.PenaltyDecaySlowdownThreshold, + fmt.Sprintf("the penalty level at which the decay rate is reduced by --%s", + BuildFlagName(gossipsubKey, p2pconfig.ScoreParamsKey, p2pconfig.ScoringRegistryKey, p2pconfig.SpamRecordCacheKey, p2pconfig.DecayKey, p2pconfig.DecayRateReductionFactorKey))) + flags.Float64(BuildFlagName(gossipsubKey, p2pconfig.ScoreParamsKey, p2pconfig.ScoringRegistryKey, p2pconfig.SpamRecordCacheKey, p2pconfig.DecayKey, p2pconfig.DecayRateReductionFactorKey), + config.GossipSub.ScoringParameters.ScoringRegistryParameters.SpamRecordCache.Decay.DecayRateReductionFactor, + fmt.Sprintf("defines the value by which the decay rate is decreased every time the penalty is below the --%s", + BuildFlagName(gossipsubKey, p2pconfig.ScoreParamsKey, p2pconfig.ScoringRegistryKey, p2pconfig.SpamRecordCacheKey, p2pconfig.DecayKey, p2pconfig.PenaltyDecaySlowdownThresholdKey))) + flags.Duration(BuildFlagName(gossipsubKey, p2pconfig.ScoreParamsKey, p2pconfig.ScoringRegistryKey, p2pconfig.SpamRecordCacheKey, p2pconfig.DecayKey, p2pconfig.PenaltyDecayEvaluationPeriodKey), + config.GossipSub.ScoringParameters.ScoringRegistryParameters.SpamRecordCache.Decay.PenaltyDecayEvaluationPeriod, + "defines the period at which the decay for a spam record is okay to be adjusted.") + flags.Float64(BuildFlagName(gossipsubKey, p2pconfig.ScoreParamsKey, p2pconfig.ScoringRegistryKey, p2pconfig.SpamRecordCacheKey, p2pconfig.DecayKey, p2pconfig.MinimumSpamPenaltyDecayFactorKey), + config.GossipSub.ScoringParameters.ScoringRegistryParameters.SpamRecordCache.Decay.MinimumSpamPenaltyDecayFactor, + "the minimum speed at which the spam penalty value of a peer is decayed") + flags.Float64(BuildFlagName(gossipsubKey, p2pconfig.ScoreParamsKey, p2pconfig.ScoringRegistryKey, p2pconfig.SpamRecordCacheKey, p2pconfig.DecayKey, p2pconfig.MaximumSpamPenaltyDecayFactorKey), + config.GossipSub.ScoringParameters.ScoringRegistryParameters.SpamRecordCache.Decay.MaximumSpamPenaltyDecayFactor, + "the maximum rate at which the spam penalty value of a peer decays") + flags.Float64(BuildFlagName(gossipsubKey, p2pconfig.ScoreParamsKey, p2pconfig.ScoringRegistryKey, p2pconfig.SpamRecordCacheKey, p2pconfig.DecayKey, p2pconfig.SkipDecayThresholdKey), + config.GossipSub.ScoringParameters.ScoringRegistryParameters.SpamRecordCache.Decay.SkipDecayThreshold, + "the threshold for which when the negative penalty is above this value, the decay function will not be called") + + flags.Float64(BuildFlagName(gossipsubKey, p2pconfig.ScoreParamsKey, p2pconfig.ScoringRegistryKey, p2pconfig.MisbehaviourPenaltiesKey, p2pconfig.GraftKey), + config.GossipSub.ScoringParameters.ScoringRegistryParameters.MisbehaviourPenalties.GraftMisbehaviour, + "the penalty applied to the application specific penalty when a peer conducts a graft misbehaviour") + flags.Float64(BuildFlagName(gossipsubKey, p2pconfig.ScoreParamsKey, p2pconfig.ScoringRegistryKey, p2pconfig.MisbehaviourPenaltiesKey, p2pconfig.PruneKey), + config.GossipSub.ScoringParameters.ScoringRegistryParameters.MisbehaviourPenalties.PruneMisbehaviour, + "the penalty applied to the application specific penalty when a peer conducts a prune misbehaviour") + flags.Float64(BuildFlagName(gossipsubKey, p2pconfig.ScoreParamsKey, p2pconfig.ScoringRegistryKey, p2pconfig.MisbehaviourPenaltiesKey, p2pconfig.IHaveKey), + config.GossipSub.ScoringParameters.ScoringRegistryParameters.MisbehaviourPenalties.IHaveMisbehaviour, + "the penalty applied to the application specific penalty when a peer conducts a iHave misbehaviour") + flags.Float64(BuildFlagName(gossipsubKey, p2pconfig.ScoreParamsKey, p2pconfig.ScoringRegistryKey, p2pconfig.MisbehaviourPenaltiesKey, p2pconfig.IWantKey), + config.GossipSub.ScoringParameters.ScoringRegistryParameters.MisbehaviourPenalties.IWantMisbehaviour, + "the penalty applied to the application specific penalty when a peer conducts a iWant misbehaviour") + flags.Float64(BuildFlagName(gossipsubKey, p2pconfig.ScoreParamsKey, p2pconfig.ScoringRegistryKey, p2pconfig.MisbehaviourPenaltiesKey, p2pconfig.PublishKey), + config.GossipSub.ScoringParameters.ScoringRegistryParameters.MisbehaviourPenalties.PublishMisbehaviour, + "the penalty applied to the application specific penalty when a peer conducts a rpc publish message misbehaviour") + flags.Float64(BuildFlagName(gossipsubKey, p2pconfig.ScoreParamsKey, p2pconfig.ScoringRegistryKey, p2pconfig.MisbehaviourPenaltiesKey, p2pconfig.ClusterPrefixedReductionFactorKey), + config.GossipSub.ScoringParameters.ScoringRegistryParameters.MisbehaviourPenalties.ClusterPrefixedReductionFactor, + "the factor used to reduce the penalty for control message misbehaviours on cluster prefixed topics") + + flags.Bool(BuildFlagName(gossipsubKey, p2pconfig.PeerGaterKey, p2pconfig.EnabledKey), + config.GossipSub.PeerGaterEnabled, + "enable the libp2p peer gater") + flags.Duration(BuildFlagName(gossipsubKey, p2pconfig.PeerGaterKey, p2pconfig.SourceDecayKey), + config.GossipSub.PeerGaterSourceDecay, + "the per IP decay for all counters tracked by the peer gater for a peer") + flags.String(BuildFlagName(gossipsubKey, p2pconfig.PeerGaterKey, p2pconfig.TopicDeliveryWeightsKey), + config.GossipSub.PeerGaterTopicDeliveryWeightsOverride, + "topic delivery weights override, this is a comma separated with the format topic_1:2.2,topic_2:3.2,topic_3:1.7 these will be used to override the default topic weight of 1.0 for the specified topic.") +} + +// LoadLibP2PResourceManagerFlags loads all CLI flags for the libp2p resource manager configuration on the provided pflag set. +// Args: +// *pflag.FlagSet: the pflag set of the Flow node. +// *Config: the default network config used to set default values on the flags +func LoadLibP2PResourceManagerFlags(flags *pflag.FlagSet, config *Config) { + flags.Float64(fmt.Sprintf("%s-%s", rootResourceManagerPrefix, fileDescriptorsRatioPrefix), + config.ResourceManager.FileDescriptorsRatio, + "ratio of available file descriptors to be used by libp2p (in (0,1])") + flags.Float64(fmt.Sprintf("%s-%s", rootResourceManagerPrefix, memoryLimitRatioPrefix), + config.ResourceManager.MemoryLimitRatio, + "ratio of available memory to be used by libp2p (in (0,1])") + loadLibP2PResourceManagerFlagsForScope(systemScope, flags, &config.ResourceManager.Override.System) + loadLibP2PResourceManagerFlagsForScope(transientScope, flags, &config.ResourceManager.Override.Transient) + loadLibP2PResourceManagerFlagsForScope(protocolScope, flags, &config.ResourceManager.Override.Protocol) + loadLibP2PResourceManagerFlagsForScope(peerScope, flags, &config.ResourceManager.Override.Peer) + loadLibP2PResourceManagerFlagsForScope(peerProtocolScope, flags, &config.ResourceManager.Override.PeerProtocol) +} + +// loadLibP2PResourceManagerFlagsForScope loads all CLI flags for the libp2p resource manager configuration on the provided pflag set for the specific scope. +// Args: +// *p2pconf.ResourceScope: the resource scope to load flags for. +// *pflag.FlagSet: the pflag set of the Flow node. +// *Config: the default network config used to set default values on the flags. +func loadLibP2PResourceManagerFlagsForScope(scope p2pconfig.ResourceScope, flags *pflag.FlagSet, override *p2pconfig.ResourceManagerOverrideLimit) { + flags.Int(fmt.Sprintf("%s-%s-%s-%s", rootResourceManagerPrefix, limitsOverridePrefix, scope, inboundStreamLimit), + override.StreamsInbound, + fmt.Sprintf("the limit on the number of inbound streams at %s scope, 0 means use the default value", scope)) + flags.Int(fmt.Sprintf("%s-%s-%s-%s", rootResourceManagerPrefix, limitsOverridePrefix, scope, outboundStreamLimit), + override.StreamsOutbound, + fmt.Sprintf("the limit on the number of outbound streams at %s scope, 0 means use the default value", scope)) + flags.Int(fmt.Sprintf("%s-%s-%s-%s", rootResourceManagerPrefix, limitsOverridePrefix, scope, inboundConnectionLimit), + override.ConnectionsInbound, + fmt.Sprintf("the limit on the number of inbound connections at %s scope, 0 means use the default value", scope)) + flags.Int(fmt.Sprintf("%s-%s-%s-%s", rootResourceManagerPrefix, limitsOverridePrefix, scope, outboundConnectionLimit), + override.ConnectionsOutbound, + fmt.Sprintf("the limit on the number of outbound connections at %s scope, 0 means use the default value", scope)) + flags.Int(fmt.Sprintf("%s-%s-%s-%s", rootResourceManagerPrefix, limitsOverridePrefix, scope, fileDescriptorsLimit), + override.FD, + fmt.Sprintf("the limit on the number of file descriptors at %s scope, 0 means use the default value", scope)) + flags.Int(fmt.Sprintf("%s-%s-%s-%s", rootResourceManagerPrefix, limitsOverridePrefix, scope, memoryLimitBytes), + override.Memory, + fmt.Sprintf("the limit on the amount of memory (bytes) at %s scope, 0 means use the default value", scope)) +} + +// SetAliases this func sets an aliases for each CLI flag defined for network config overrides to it's corresponding +// full key in the viper config store. This is required because in our p2pconfig.yml file all configuration values for the +// Flow network are stored one level down on the network-config property. When the default config is bootstrapped viper will +// store these values with the "network-p2pconfig." prefix on the config key, because we do not want to use CLI flags like --network-p2pconfig.networking-connection-pruning +// to override default values we instead use cleans flags like --networking-connection-pruning and create an alias from networking-connection-pruning -> network-p2pconfig.networking-connection-pruning +// to ensure overrides happen as expected. +// Args: +// *viper.Viper: instance of the viper store to register network config aliases on. +// Returns: +// error: if a flag does not have a corresponding key in the viper store; all returned errors are fatal. +func SetAliases(conf *viper.Viper) error { + m := make(map[string]string) + // create map of key -> full pathkey + // ie: "networking-connection-pruning" -> "network-p2pconfig.networking-connection-pruning" + for _, key := range conf.AllKeys() { + s := strings.Split(key, ".") + // Each networking config has the format of network-p2pconfig.key1.key2.key3... in the config file + // which is translated to key1-key2-key3... in the CLI flags + // Hence, we map the CLI flag name to the full key in the config store + // TODO: all networking flags should also be prefixed with "network-config". Hence, this + // mapping should be from network-p2pconfig.key1.key2.key3... to network-config-key1-key2-key3... + m[strings.Join(s[1:], "-")] = key + } + + // each flag name should correspond to exactly one key in our config store after it is loaded with the default config + for _, flagName := range AllFlagNames() { + fullKey, ok := m[flagName] + if !ok { + return fmt.Errorf("invalid network configuration missing configuration key flag name %s check config file and cli flags", flagName) + } + + conf.RegisterAlias(fullKey, flagName) + } + return nil +} + +func BuildFlagName(keys ...string) string { + return strings.Join(keys, "-") +} diff --git a/network/netconf/flags_test.go b/network/netconf/flags_test.go new file mode 100644 index 00000000000..984dc68936e --- /dev/null +++ b/network/netconf/flags_test.go @@ -0,0 +1,51 @@ +package netconf_test + +import ( + "testing" + + "github.com/onflow/flow-go/network/netconf" +) + +// TestBuildFlagName tests the BuildFlagName function for various cases +func TestBuildFlagName(t *testing.T) { + tests := []struct { + name string + keys []string + expected string + }{ + { + name: "Single key", + keys: []string{"key1"}, + expected: "key1", + }, + { + name: "Two keys", + keys: []string{"key1", "key2"}, + expected: "key1-key2", + }, + { + name: "Multiple keys", + keys: []string{"key1", "key2", "key3"}, + expected: "key1-key2-key3", + }, + { + name: "No keys", + keys: []string{}, + expected: "", + }, + { + name: "Key with spaces", + keys: []string{"key 1", "key 2"}, + expected: "key 1-key 2", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := netconf.BuildFlagName(tt.keys...) + if result != tt.expected { + t.Errorf("BuildFlagName(%v) = %v, want %v", tt.keys, result, tt.expected) + } + }) + } +} diff --git a/network/netconf/unicast.go b/network/netconf/unicast.go new file mode 100644 index 00000000000..0dfd31f857f --- /dev/null +++ b/network/netconf/unicast.go @@ -0,0 +1,79 @@ +package netconf + +import "time" + +const ( + rateLimiterKey = "rate-limiter" + unicastManagerKey = "manager" + enableStreamProtectionKey = "enable-stream-protection" + MessageTimeoutKey = "message-timeout" +) + +// Unicast configuration parameters for the unicast protocol. +type Unicast struct { + // RateLimiter configuration for all unicast rate limiters. + RateLimiter RateLimiter `mapstructure:"rate-limiter"` + + // UnicastManager configuration for the unicast manager. The unicast manager is responsible for establishing unicast streams. + UnicastManager UnicastManager `mapstructure:"manager"` + + // EnableStreamProtection enables stream protection for unicast streams. When enabled, all connections that are being established or + // have been already established for unicast streams are protected, meaning that they won't be closed by the connection manager. + // This is useful for preventing the connection manager from closing unicast streams that are being used by the application layer. + // However, it may interfere with the resource manager of libp2p, i.e., the connection manager may not be able to close connections + // that are not being used by the application layer while at the same time the node is running out of resources for new connections. + EnableStreamProtection bool `mapstructure:"enable-stream-protection"` + + MessageTimeout time.Duration `validate:"gt=0s" mapstructure:"message-timeout"` +} + +const ( + DryRunKey = "dry-run" + LockoutDurationKey = "lockout-duration" + messageRateLimitKey = "message-rate-limit" + BandwidthRateLimitKey = "bandwidth-rate-limit" + BandwidthBurstLimitKey = "bandwidth-burst-limit" +) + +// RateLimiter unicast rate limiter configuration for the message and bandwidth rate limiters. +type RateLimiter struct { + // DryRun setting this to true will disable connection disconnects and gating when unicast rate limiters are configured + DryRun bool `mapstructure:"dry-run"` + // LockoutDuration the number of seconds a peer will be forced to wait before being allowed to successfully reconnect to the node + // after being rate limited. + LockoutDuration time.Duration `validate:"gte=0" mapstructure:"lockout-duration"` + // MessageRateLimit amount of unicast messages that can be sent by a peer per second. + MessageRateLimit int `validate:"gte=0" mapstructure:"message-rate-limit"` + // BandwidthRateLimit bandwidth size in bytes a peer is allowed to send via unicast streams per second. + BandwidthRateLimit int `validate:"gte=0" mapstructure:"bandwidth-rate-limit"` + // BandwidthBurstLimit bandwidth size in bytes a peer is allowed to send via unicast streams at once. + BandwidthBurstLimit int `validate:"gte=0" mapstructure:"bandwidth-burst-limit"` +} + +const ( + createStreamBackoffDelayKey = "create-stream-retry-delay" + streamZeroRetryResetThresholdKey = "stream-zero-retry-reset-threshold" + maxStreamCreationRetryAttemptTimesKey = "max-stream-creation-retry-attempt-times" + configCacheSizeKey = "dial-config-cache-size" +) + +// UnicastManager configuration for the unicast manager. The unicast manager is responsible for establishing unicast streams. +type UnicastManager struct { + // CreateStreamBackoffDelay initial delay used in the exponential backoff for create stream retries. + CreateStreamBackoffDelay time.Duration `validate:"gt=0s" mapstructure:"create-stream-retry-delay"` + // StreamZeroRetryResetThreshold is the threshold that determines when to reset the stream creation retry budget to the default value. + // + // For example the default value of 100 means that if the stream creation retry budget is decreased to 0, then it will be reset to default value + // when the number of consecutive successful streams reaches 100. + // + // This is to prevent the retry budget from being reset too frequently, as the retry budget is used to gauge the reliability of the stream creation. + // When the stream creation retry budget is reset to the default value, it means that the stream creation is reliable enough to be trusted again. + // This parameter mandates when the stream creation is reliable enough to be trusted again; i.e., when the number of consecutive successful streams reaches this threshold. + // Note that the counter is reset to 0 when the stream creation fails, so the value of for example 100 means that the stream creation is reliable enough that the recent + // 100 stream creations are all successful. + StreamZeroRetryResetThreshold uint64 `validate:"gt=0" mapstructure:"stream-zero-retry-reset-threshold"` + // MaxStreamCreationRetryAttemptTimes is the maximum number of attempts to be made to create a stream to a remote node over a direct unicast (1:1) connection before we give up. + MaxStreamCreationRetryAttemptTimes uint64 `validate:"gt=1" mapstructure:"max-stream-creation-retry-attempt-times"` + // ConfigCacheSize is the cache size of the dial config cache that keeps the individual dial config for each peer. + ConfigCacheSize uint32 `validate:"gt=0" mapstructure:"dial-config-cache-size"` +} diff --git a/network/network.go b/network/network.go index 50c84887b72..32b1a172d3c 100644 --- a/network/network.go +++ b/network/network.go @@ -5,15 +5,40 @@ import ( "github.com/libp2p/go-libp2p/core/protocol" "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module" "github.com/onflow/flow-go/module/component" "github.com/onflow/flow-go/network/channels" ) -// Network represents the network layer of the node. It allows processes that -// work across the peer-to-peer network to register themselves as an engine with -// a unique engine ID. The returned conduit allows the process to communicate to -// the same engine on other nodes across the network in a network-agnostic way. -type Network interface { +// NetworkingType is the type of the Flow networking layer. It is used to differentiate between the public (i.e., unstaked) +// and private (i.e., staked) networks. +type NetworkingType uint8 + +func (t NetworkingType) String() string { + switch t { + case PrivateNetwork: + return "private" + case PublicNetwork: + return "public" + default: + return "unknown" + } +} + +const ( + // PrivateNetwork indicates that the staked private-side of the Flow blockchain that nodes can only join and leave + // with a staking requirement. + PrivateNetwork NetworkingType = iota + 1 + // PublicNetwork indicates that the unstaked public-side of the Flow blockchain that nodes can join and leave at will + // with no staking requirement. + PublicNetwork +) + +// EngineRegistry is one of the networking layer interfaces in Flow (i.e., EngineRegistry, ConduitAdapter, and Underlay). It represents the interface that networking layer +// offers to the Flow protocol layer, i.e., engines. It is responsible for creating conduits through which engines +// can send and receive messages to and from other engines on the network, as well as registering other services +// such as BlobService and PingService. +type EngineRegistry interface { component.Component // Register will subscribe to the channel with the given engine and // the engine will be notified with incoming messages on the channel. @@ -23,6 +48,7 @@ type Network interface { // RegisterBlobService registers a BlobService on the given channel, using the given datastore to retrieve values. // The returned BlobService can be used to request blocks from the network. + // RegisterBlobService starts the BlobService component using the network's context. // TODO: We should return a function that can be called to unregister / close the BlobService RegisterBlobService(channel channels.Channel, store datastore.Batching, opts ...BlobServiceOption) (BlobService, error) @@ -30,10 +56,11 @@ type Network interface { RegisterPingService(pingProtocolID protocol.ID, pingInfoProvider PingInfoProvider) (PingService, error) } -// Adapter is a wrapper around the Network implementation. It only exposes message dissemination functionalities. -// Adapter is meant to be utilized by the Conduit interface to send messages to the Network layer to be -// delivered to the remote targets. -type Adapter interface { +// ConduitAdapter is one of the networking layer interfaces in Flow (i.e., EngineRegistry, ConduitAdapter, and Underlay). It represents the interface that networking layer +// offers to a single conduit which enables the conduit to send different types of messages i.e., unicast, multicast, +// and publish, to other conduits on the network. +type ConduitAdapter interface { + MisbehaviorReportConsumer // UnicastOnChannel sends the message in a reliable way to the given recipient. UnicastOnChannel(channels.Channel, interface{}, flow.Identifier) error @@ -48,3 +75,42 @@ type Adapter interface { // receive messages from that channel. UnRegisterChannel(channel channels.Channel) error } + +// Underlay is one of the networking layer interfaces in Flow (i.e., EngineRegistry, ConduitAdapter, and Underlay). It represents the interface that networking layer +// offers to lower level networking components such as libp2p. It is responsible for subscribing to and unsubscribing +// from channels, as well as updating the addresses of all the authorized participants in the Flow protocol. +type Underlay interface { + module.ReadyDoneAware + DisallowListNotificationConsumer + + // Subscribe subscribes the network Underlay to a channel. + // No errors are expected during normal operation. + Subscribe(channel channels.Channel) error + + // Unsubscribe unsubscribes the network Underlay from a channel. + // All errors returned from this function can be considered benign. + Unsubscribe(channel channels.Channel) error + + // UpdateNodeAddresses fetches and updates the addresses of all the authorized participants + // in the Flow protocol. + UpdateNodeAddresses() +} + +// Connection represents an interface to read from & write to a connection. +type Connection interface { + Send(msg interface{}) error + Receive() (interface{}, error) +} + +// MisbehaviorReportConsumer set of funcs used to handle MisbehaviorReport disseminated from misbehavior reporters. +type MisbehaviorReportConsumer interface { + // ReportMisbehaviorOnChannel reports the misbehavior of a node on sending a message to the current node that appears + // valid based on the networking layer but is considered invalid by the current node based on the Flow protocol. + // The misbehavior report is sent to the current node's networking layer on the given channel to be processed. + // Args: + // - channel: The channel on which the misbehavior report is sent. + // - report: The misbehavior report to be sent. + // Returns: + // none + ReportMisbehaviorOnChannel(channel channels.Channel, report MisbehaviorReport) +} diff --git a/network/p2p/blob/blob_service.go b/network/p2p/blob/blob_service.go index 2febe968689..b4a1257b200 100644 --- a/network/p2p/blob/blob_service.go +++ b/network/p2p/blob/blob_service.go @@ -3,23 +3,24 @@ package blob import ( "context" "errors" + "fmt" "time" "github.com/hashicorp/go-multierror" + "github.com/ipfs/boxo/bitswap" + bsmsg "github.com/ipfs/boxo/bitswap/message" + bsnet "github.com/ipfs/boxo/bitswap/network" + "github.com/ipfs/boxo/blockservice" + "github.com/ipfs/boxo/blockstore" + "github.com/ipfs/boxo/provider" blocks "github.com/ipfs/go-block-format" - "github.com/ipfs/go-blockservice" "github.com/ipfs/go-cid" "github.com/ipfs/go-datastore" - blockstore "github.com/ipfs/go-ipfs-blockstore" - provider "github.com/ipfs/go-ipfs-provider" - "github.com/ipfs/go-ipfs-provider/simple" + ipld "github.com/ipfs/go-ipld-format" "github.com/libp2p/go-libp2p/core/host" "github.com/libp2p/go-libp2p/core/peer" "github.com/libp2p/go-libp2p/core/protocol" "github.com/libp2p/go-libp2p/core/routing" - "github.com/onflow/go-bitswap" - bsmsg "github.com/onflow/go-bitswap/message" - bsnet "github.com/onflow/go-bitswap/network" "github.com/rs/zerolog" "golang.org/x/time/rate" @@ -30,9 +31,13 @@ import ( "github.com/onflow/flow-go/module/irrecoverable" "github.com/onflow/flow-go/module/metrics" "github.com/onflow/flow-go/network" + p2plogging "github.com/onflow/flow-go/network/p2p/logging" "github.com/onflow/flow-go/utils/logging" +) - ipld "github.com/ipfs/go-ipld-format" +const ( + // DefaultReprovideInterval is the default interval at which DHT provider entries are refreshed + DefaultReprovideInterval = 12 * time.Hour ) type blobService struct { @@ -40,7 +45,7 @@ type blobService struct { component.Component blockService blockservice.BlockService blockStore blockstore.Blockstore - reprovider provider.Reprovider + reprovider provider.System config *BlobServiceConfig } @@ -66,7 +71,14 @@ func WithBitswapOptions(opts ...bitswap.Option) network.BlobServiceOption { } } -// WithHashOnRead sets whether or not the blobstore will rehash the blob data on read +// WithParentBlobService configures the blob service to use the parent's blockstore +func WithParentBlobService(parent network.BlobService) network.BlobServiceOption { + return func(bs network.BlobService) { + bs.(*blobService).blockStore = parent.(*blobService).blockStore + } +} + +// WithHashOnRead sets whether the blobstore will rehash the blob data on read // When set, calls to GetBlob will fail with an error if the hash of the data in storage does not // match its CID func WithHashOnRead(enabled bool) network.BlobServiceOption { @@ -95,14 +107,22 @@ func NewBlobService( metrics module.BitswapMetrics, logger zerolog.Logger, opts ...network.BlobServiceOption, -) *blobService { +) (*blobService, error) { bsNetwork := bsnet.NewFromIpfsHost(host, r, bsnet.Prefix(protocol.ID(prefix))) + blockStore, err := blockstore.CachedBlockstore( + context.Background(), + blockstore.NewBlockstore(ds), + blockstore.DefaultCacheOpts(), + ) + if err != nil { + return nil, fmt.Errorf("failed to create cached blockstore: %w", err) + } bs := &blobService{ prefix: prefix, config: &BlobServiceConfig{ - ReprovideInterval: 12 * time.Hour, + ReprovideInterval: DefaultReprovideInterval, }, - blockStore: blockstore.NewBlockstore(ds), + blockStore: blockStore, } for _, opt := range opts { @@ -141,11 +161,18 @@ func NewBlobService( } }). AddWorker(func(ctx irrecoverable.SignalerContext, ready component.ReadyFunc) { - bs.reprovider = simple.NewReprovider(ctx, bs.config.ReprovideInterval, r, simple.NewBlockstoreProvider(bs.blockStore)) + // New creates and starts the reprovider (non-blocking) + reprovider, err := provider.New(ds, + provider.Online(r), + provider.KeyProvider(provider.NewBlockstoreProvider(bs.blockStore)), + provider.ReproviderInterval(bs.config.ReprovideInterval), + ) + if err != nil { + ctx.Throw(fmt.Errorf("failed to start reprovider: %w", err)) + } + bs.reprovider = reprovider ready() - - bs.reprovider.Run() }). AddWorker(func(ctx irrecoverable.SignalerContext, ready component.ReadyFunc) { ready() @@ -166,11 +193,11 @@ func NewBlobService( bs.Component = cm - return bs + return bs, nil } func (bs *blobService) TriggerReprovide(ctx context.Context) error { - return bs.reprovider.Trigger(ctx) + return bs.reprovider.Reprovide(ctx) } func (bs *blobService) GetBlob(ctx context.Context, c cid.Cid) (blobs.Blob, error) { @@ -262,7 +289,7 @@ func AuthorizedRequester( return func(peerID peer.ID, _ cid.Cid) bool { lg := logger.With(). Str("component", "blob_service"). - Str("peer_id", peerID.String()). + Str("peer_id", p2plogging.PeerId(peerID)). Logger() id, ok := identityProvider.ByPeerID(peerID) @@ -280,7 +307,7 @@ func AuthorizedRequester( Logger() // TODO: when execution data verification is enabled, add verification nodes here - if (id.Role != flow.RoleExecution && id.Role != flow.RoleAccess) || id.Ejected { + if (id.Role != flow.RoleExecution && id.Role != flow.RoleAccess) || id.IsEjected() { lg.Warn(). Bool(logging.KeySuspicious, true). Msg("rejecting request from peer: unauthorized") diff --git a/network/p2p/blob/blob_service_test.go b/network/p2p/blob/blob_service_test.go index 020d8842856..97189fc05ad 100644 --- a/network/p2p/blob/blob_service_test.go +++ b/network/p2p/blob/blob_service_test.go @@ -89,8 +89,8 @@ func TestAuthorizedRequester(t *testing.T) { assert.False(t, authorizer(sn1PeerID, cid.Cid{})) }) - an1.Ejected = true - en1.Ejected = true + an1.EpochParticipationStatus = flow.EpochParticipationStatusEjected + en1.EpochParticipationStatus = flow.EpochParticipationStatusEjected // AN1 is on allow list (not passed) but is ejected t.Run("always denies ejected AN", func(t *testing.T) { diff --git a/network/p2p/blob/rate_limit_test.go b/network/p2p/blob/rate_limit_test.go index 6881db0393d..90292334e9b 100644 --- a/network/p2p/blob/rate_limit_test.go +++ b/network/p2p/blob/rate_limit_test.go @@ -5,10 +5,10 @@ import ( "crypto/rand" "testing" + "github.com/ipfs/boxo/blockstore" blocks "github.com/ipfs/go-block-format" "github.com/ipfs/go-datastore" dssync "github.com/ipfs/go-datastore/sync" - blockstore "github.com/ipfs/go-ipfs-blockstore" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) diff --git a/network/p2p/builder.go b/network/p2p/builder.go index 6192eded6cb..59df7ad8e5f 100644 --- a/network/p2p/builder.go +++ b/network/p2p/builder.go @@ -2,31 +2,39 @@ package p2p import ( "context" - "time" pubsub "github.com/libp2p/go-libp2p-pubsub" "github.com/libp2p/go-libp2p/core/connmgr" "github.com/libp2p/go-libp2p/core/host" "github.com/libp2p/go-libp2p/core/network" "github.com/libp2p/go-libp2p/core/peer" + "github.com/libp2p/go-libp2p/core/protocol" "github.com/libp2p/go-libp2p/core/routing" madns "github.com/multiformats/go-multiaddr-dns" "github.com/rs/zerolog" + "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/module" "github.com/onflow/flow-go/module/irrecoverable" + "github.com/onflow/flow-go/module/metrics" + flownet "github.com/onflow/flow-go/network" "github.com/onflow/flow-go/network/channels" + p2pconfig "github.com/onflow/flow-go/network/p2p/config" ) -// LibP2PFactoryFunc is a factory function type for generating libp2p Node instances. -type LibP2PFactoryFunc func() (LibP2PNode, error) -type GossipSubFactoryFunc func(context.Context, zerolog.Logger, host.Host, PubSubAdapterConfig) (PubSubAdapter, error) -type CreateNodeFunc func(zerolog.Logger, host.Host, ProtocolPeerCache, PeerManager) LibP2PNode +type GossipSubFactoryFunc func(context.Context, zerolog.Logger, host.Host, PubSubAdapterConfig, CollectionClusterChangesConsumer) (PubSubAdapter, error) + +// NodeConstructor is a function that creates a new libp2p node. +// Args: +// - config: configuration for the node +// Returns: +// - LibP2PNode: new libp2p node +// - error: error if any, any returned error is irrecoverable. +type NodeConstructor func(config *NodeConfig) (LibP2PNode, error) type GossipSubAdapterConfigFunc func(*BasePubSubAdapterConfig) PubSubAdapterConfig // GossipSubBuilder provides a builder pattern for creating a GossipSub pubsub system. type GossipSubBuilder interface { - PeerScoringBuilder // SetHost sets the host of the builder. // If the host has already been set, a fatal error is logged. SetHost(host.Host) @@ -43,30 +51,31 @@ type GossipSubBuilder interface { // We expect the node to initialize with a default gossipsub config. Hence, this function overrides the default config. SetGossipSubConfigFunc(GossipSubAdapterConfigFunc) - // SetGossipSubPeerScoring sets the gossipsub peer scoring of the builder. - // If the gossipsub peer scoring flag has already been set, a fatal error is logged. - SetGossipSubPeerScoring(bool) - - // SetGossipSubScoreTracerInterval sets the gossipsub score tracer interval of the builder. - // If the gossipsub score tracer interval has already been set, a fatal error is logged. - SetGossipSubScoreTracerInterval(time.Duration) - - // SetGossipSubTracer sets the gossipsub tracer of the builder. - // If the gossipsub tracer has already been set, a fatal error is logged. - SetGossipSubTracer(PubSubTracer) - - // SetIDProvider sets the identity provider of the builder. - // If the identity provider has already been set, a fatal error is logged. - SetIDProvider(module.IdentityProvider) + // EnableGossipSubScoringWithOverride enables peer scoring for the GossipSub pubsub system with the given override. + // Any existing peer scoring config attribute that is set in the override will override the default peer scoring config. + // Anything that is left to nil or zero value in the override will be ignored and the default value will be used. + // Note: it is not recommended to override the default peer scoring config in production unless you know what you are doing. + // Production Tip: use PeerScoringConfigNoOverride as the argument to this function to enable peer scoring without any override. + // Args: + // - PeerScoringConfigOverride: override for the peer scoring config- Recommended to use PeerScoringConfigNoOverride for production. + // Returns: + // none + EnableGossipSubScoringWithOverride(*PeerScoringConfigOverride) // SetRoutingSystem sets the routing system of the builder. // If the routing system has already been set, a fatal error is logged. SetRoutingSystem(routing.Routing) - // SetGossipSubRPCInspectorSuite sets the gossipsub rpc inspector suite of the builder. It contains the - // inspector function that is injected into the gossipsub rpc layer, as well as the notification distributors that - // are used to notify the app specific scoring mechanism of misbehaving peers. - SetGossipSubRPCInspectorSuite(GossipSubInspectorSuite) + // OverrideDefaultRpcInspectorFactory overrides the default RPC inspector suite factory of the builder. + // A default RPC inspector suite factory is provided by the node. This function overrides the default factory. + // The purpose of override is to allow the node to provide a custom RPC inspector suite factory for sake of testing + // or experimentation. + // It is NOT recommended to override the default RPC inspector suite factory in production unless you know what you are doing. + OverrideDefaultRpcInspectorFactory(GossipSubRpcInspectorFactoryFunc) + + // OverrideDefaultValidateQueueSize overrides the default validate queue size of libp2p nodes. + // CAUTION: Be careful setting this to a larger number as it will change the backpressure behavior of the system. + OverrideDefaultValidateQueueSize(int) // Build creates a new GossipSub pubsub system. // It returns the newly created GossipSub pubsub system and any errors encountered during its creation. @@ -76,21 +85,36 @@ type GossipSubBuilder interface { // // Returns: // - PubSubAdapter: a GossipSub pubsub system for the libp2p node. - // - PeerScoreTracer: a peer score tracer for the GossipSub pubsub system (if enabled, otherwise nil). // - error: if an error occurs during the creation of the GossipSub pubsub system, it is returned. Otherwise, nil is returned. // Note that on happy path, the returned error is nil. Any error returned is unexpected and should be handled as irrecoverable. - Build(irrecoverable.SignalerContext) (PubSubAdapter, PeerScoreTracer, error) + Build(irrecoverable.SignalerContext) (PubSubAdapter, error) } -type PeerScoringBuilder interface { - // SetTopicScoreParams sets the topic score parameters for the given topic. - // If the topic score parameters have already been set for the given topic, it is overwritten. - SetTopicScoreParams(topic channels.Topic, topicScoreParams *pubsub.TopicScoreParams) - - // SetAppSpecificScoreParams sets the application specific score parameters for the given topic. - // If the application specific score parameters have already been set for the given topic, it is overwritten. - SetAppSpecificScoreParams(func(peer.ID) float64) -} +// GossipSubRpcInspectorFactoryFunc is a function that creates a new RPC inspector. It is used to create +// an RPC inspector for the gossipsub protocol. The RPC inspectors are used to inspect and validate +// incoming RPC messages before they are processed by the gossipsub protocol. +// Args: +// - logger: logger to use +// - sporkID: spork ID of the node +// - cfg: configuration for the RPC inspectors +// - metrics: metrics to use for the RPC inspectors +// - heroCacheMetricsFactory: metrics factory for the hero cache +// - networkingType: networking type of the node, i.e., public or private +// - identityProvider: identity provider of the node +// Returns: +// - GossipSubRPCInspector: new RPC inspector suite +// - error: error if any, any returned error is irrecoverable. +type GossipSubRpcInspectorFactoryFunc func( + zerolog.Logger, + flow.Identifier, + *p2pconfig.RpcInspectorParameters, + module.GossipSubMetrics, + metrics.HeroCacheMetricsFactory, + flownet.NetworkingType, + module.IdentityProvider, + func() TopicProvider, + GossipSubInvCtrlMsgNotifConsumer, +) (GossipSubRPCInspector, error) // NodeBuilder is a builder pattern for creating a libp2p Node instance. type NodeBuilder interface { @@ -98,29 +122,93 @@ type NodeBuilder interface { SetSubscriptionFilter(pubsub.SubscriptionFilter) NodeBuilder SetResourceManager(network.ResourceManager) NodeBuilder SetConnectionManager(connmgr.ConnManager) NodeBuilder - SetConnectionGater(connmgr.ConnectionGater) NodeBuilder + SetConnectionGater(ConnectionGater) NodeBuilder SetRoutingSystem(func(context.Context, host.Host) (routing.Routing, error)) NodeBuilder - SetPeerManagerOptions(bool, time.Duration) NodeBuilder - // EnableGossipSubPeerScoring enables peer scoring for the GossipSub pubsub system. - // Arguments: - // - module.IdentityProvider: the identity provider for the node (must be set before calling this method). - // - *PeerScoringConfig: the peer scoring configuration for the GossipSub pubsub system. If nil, the default configuration is used. - EnableGossipSubPeerScoring(module.IdentityProvider, *PeerScoringConfig) NodeBuilder - SetCreateNode(CreateNodeFunc) NodeBuilder - SetGossipSubFactory(GossipSubFactoryFunc, GossipSubAdapterConfigFunc) NodeBuilder - SetStreamCreationRetryInterval(time.Duration) NodeBuilder - SetRateLimiterDistributor(UnicastRateLimiterDistributor) NodeBuilder - SetGossipSubTracer(PubSubTracer) NodeBuilder - SetGossipSubScoreTracerInterval(time.Duration) NodeBuilder - SetGossipSubRpcInspectorSuite(GossipSubInspectorSuite) NodeBuilder + // OverrideGossipSubScoringConfig overrides the default peer scoring config for the GossipSub protocol. + // Note that it does not enable peer scoring. The peer scoring is enabled directly by setting the `peer-scoring-enabled` flag to true in `default-config.yaml`, or + // by setting the `gossipsub-peer-scoring-enabled` runtime flag to true. This function only overrides the default peer scoring config which takes effect + // only if the peer scoring is enabled (mostly for testing purposes). + // Any existing peer scoring config attribute that is set in the override will override the default peer scoring config. + // Anything that is left to nil or zero value in the override will be ignored and the default value will be used. + // Note: it is not recommended to override the default peer scoring config in production unless you know what you are doing. + // Args: + // - PeerScoringConfigOverride: override for the peer scoring config- Recommended to use PeerScoringConfigNoOverride for production. + // Returns: + // none + OverrideGossipSubScoringConfig(*PeerScoringConfigOverride) NodeBuilder + + // OverrideNodeConstructor overrides the default node constructor, i.e., the function that creates a new libp2p node. + // The purpose of override is to allow the node to provide a custom node constructor for sake of testing or experimentation. + // It is NOT recommended to override the default node constructor in production unless you know what you are doing. + // Args: + // - NodeConstructor: custom node constructor + // Returns: + // none + OverrideNodeConstructor(NodeConstructor) NodeBuilder + + // OverrideGossipSubFactory overrides the default gossipsub factory for the GossipSub protocol. + // The purpose of override is to allow the node to provide a custom gossipsub factory for sake of testing or experimentation. + // Note: it is not recommended to override the default gossipsub factory in production unless you know what you are doing. + // Args: + // - factory: custom gossipsub factory + // Returns: + // - NodeBuilder: the node builder + OverrideGossipSubFactory(GossipSubFactoryFunc, GossipSubAdapterConfigFunc) NodeBuilder + + // OverrideDefaultRpcInspectorFactory overrides the default rpc inspector factory for the GossipSub protocol. + // The purpose of override is to allow the node to provide a custom rpc inspector factory for sake of testing or experimentation. + // Note: it is not recommended to override the default rpc inspector factory in production unless you know what you are doing. + // Args: + // - factory: custom rpc inspector factory + // Returns: + // - NodeBuilder: the node builder + OverrideDefaultRpcInspectorFactory(GossipSubRpcInspectorFactoryFunc) NodeBuilder + + // OverrideDefaultValidateQueueSize overrides the default validate queue size of libp2p nodes. + // CAUTION: Be careful setting this to a larger number as it will change the backpressure behavior of the system. + OverrideDefaultValidateQueueSize(int) NodeBuilder + + // Build creates a new libp2p node. It returns the newly created libp2p node and any errors encountered during its creation. + // Args: + // none + // Returns: + // - LibP2PNode: a new libp2p node + // - error: if an error occurs during the creation of the libp2p node, it is returned. Otherwise, nil is returned. Any error returned is unexpected and should be handled as irrecoverable. Build() (LibP2PNode, error) } -// PeerScoringConfig is a configuration for peer scoring parameters for a GossipSub pubsub system. -type PeerScoringConfig struct { +// PeerScoringConfigOverride is a structure that is used to carry over the override values for peer scoring configuration. +// Any attribute that is set in the override will override the default peer scoring config. +// Typically, we are not recommending to override the default peer scoring config in production unless you know what you are doing. +type PeerScoringConfigOverride struct { // TopicScoreParams is a map of topic score parameters for each topic. + // Override criteria: any topic (i.e., key in the map) will override the default topic score parameters for that topic and + // the corresponding value in the map will be used instead of the default value. + // If you don't want to override topic score params for a given topic, simply don't include that topic in the map. + // If the map is nil, the default topic score parameters are used for all topics. TopicScoreParams map[channels.Topic]*pubsub.TopicScoreParams + // AppSpecificScoreParams is a function that returns the application specific score parameters for a given peer. + // Override criteria: if the function is not nil, it will override the default application specific score parameters. + // If the function is nil, the default application specific score parameters are used. AppSpecificScoreParams func(peer.ID) float64 } + +// NodeParameters are the numerical values that are used to configure the libp2p node. +type NodeParameters struct { + EnableProtectedStreams bool `validate:"required"` +} + +// NodeConfig is the configuration for the libp2p node, it contains the parameters as well as the essential components for setting up the node. +// It is used to create a new libp2p node. +type NodeConfig struct { + Parameters *NodeParameters `validate:"required"` + // logger used to provide logging + Logger zerolog.Logger `validate:"required"` + // reference to the libp2p host (https://godoc.org/github.com/libp2p/go-libp2p/core/host) + Host host.Host `validate:"required"` + PeerManager PeerManager + DisallowListCacheCfg *DisallowListCacheConfig `validate:"required"` + ProtocolPeerCacheList []protocol.ID +} diff --git a/network/p2p/builder/config/config.go b/network/p2p/builder/config/config.go new file mode 100644 index 00000000000..63d819d0248 --- /dev/null +++ b/network/p2p/builder/config/config.go @@ -0,0 +1,43 @@ +package p2pbuilderconfig + +import ( + "time" + + "github.com/onflow/flow-go/network/netconf" + "github.com/onflow/flow-go/network/p2p" +) + +// UnicastConfig configuration parameters for the unicast protocol. +type UnicastConfig struct { + netconf.Unicast + + // RateLimiterDistributor distributor that distributes notifications whenever a peer is rate limited to all consumers. + RateLimiterDistributor p2p.UnicastRateLimiterDistributor +} + +// ConnectionGaterConfig configuration parameters for the connection gater. +type ConnectionGaterConfig struct { + // InterceptPeerDialFilters list of peer filters used to filter peers on outgoing connections in the InterceptPeerDial callback. + InterceptPeerDialFilters []p2p.PeerFilter + // InterceptSecuredFilters list of peer filters used to filter peers and accept or reject inbound connections in InterceptSecured callback. + InterceptSecuredFilters []p2p.PeerFilter +} + +// PeerManagerConfig configuration parameters for the peer manager. +type PeerManagerConfig struct { + // ConnectionPruning enables connection pruning in the connection manager. + ConnectionPruning bool + // UpdateInterval interval used by the libp2p node peer manager component to periodically request peer updates. + UpdateInterval time.Duration + // ConnectorFactory is a factory function to create a new connector. + ConnectorFactory p2p.ConnectorFactory +} + +// PeerManagerDisableConfig returns a configuration that disables the peer manager. +func PeerManagerDisableConfig() *PeerManagerConfig { + return &PeerManagerConfig{ + ConnectionPruning: false, + UpdateInterval: 0, + ConnectorFactory: nil, + } +} diff --git a/network/p2p/builder/config/metrics.go b/network/p2p/builder/config/metrics.go new file mode 100644 index 00000000000..510bd65e0bf --- /dev/null +++ b/network/p2p/builder/config/metrics.go @@ -0,0 +1,20 @@ +package p2pbuilderconfig + +import ( + "github.com/onflow/flow-go/module" + "github.com/onflow/flow-go/module/metrics" +) + +// MetricsConfig is a wrapper around the metrics configuration for the libp2p node. +// It is used to pass the metrics configuration to the libp2p node builder. +type MetricsConfig struct { + // HeroCacheFactory is the factory for the HeroCache metrics. It is used to + // create a HeroCache metrics instance for each cache when needed. By passing + // the factory to the libp2p node builder, the libp2p node can create the + // HeroCache metrics instance for each cache internally, which reduces the + // number of arguments needed to be passed to the libp2p node builder. + HeroCacheFactory metrics.HeroCacheMetricsFactory + + // LibP2PMetrics is the metrics instance for the libp2p node. + Metrics module.LibP2PMetrics +} diff --git a/network/p2p/builder/gossipsub/gossipSubBuilder.go b/network/p2p/builder/gossipsub/gossipSubBuilder.go new file mode 100644 index 00000000000..f4d642f9cd7 --- /dev/null +++ b/network/p2p/builder/gossipsub/gossipSubBuilder.go @@ -0,0 +1,380 @@ +package gossipsubbuilder + +import ( + "context" + "fmt" + + pubsub "github.com/libp2p/go-libp2p-pubsub" + "github.com/libp2p/go-libp2p/core/host" + "github.com/libp2p/go-libp2p/core/routing" + "github.com/rs/zerolog" + + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module" + "github.com/onflow/flow-go/module/irrecoverable" + "github.com/onflow/flow-go/module/metrics" + "github.com/onflow/flow-go/network" + "github.com/onflow/flow-go/network/p2p" + p2pbuilderconfig "github.com/onflow/flow-go/network/p2p/builder/config" + p2pconfig "github.com/onflow/flow-go/network/p2p/config" + "github.com/onflow/flow-go/network/p2p/inspector/validation" + p2pnode "github.com/onflow/flow-go/network/p2p/node" + "github.com/onflow/flow-go/network/p2p/scoring" + "github.com/onflow/flow-go/network/p2p/tracer" + "github.com/onflow/flow-go/network/p2p/utils" + "github.com/onflow/flow-go/utils/logging" +) + +// The Builder struct is used to configure and create a new GossipSub pubsub system. +type Builder struct { + networkType network.NetworkingType + sporkId flow.Identifier + logger zerolog.Logger + metricsCfg *p2pbuilderconfig.MetricsConfig + h host.Host + subscriptionFilter pubsub.SubscriptionFilter + gossipSubFactory p2p.GossipSubFactoryFunc + gossipSubConfigFunc p2p.GossipSubAdapterConfigFunc + rpcInspectorFactory p2p.GossipSubRpcInspectorFactoryFunc + // gossipSubTracer is a callback interface that is called by the gossipsub implementation upon + // certain events. Currently, we use it to log and observe the local mesh of the node. + gossipSubTracer p2p.PubSubTracer + scoreOptionConfig *scoring.ScoreOptionConfig + idProvider module.IdentityProvider + routingSystem routing.Routing + gossipSubCfg *p2pconfig.GossipSubParameters + validateQueueSize int +} + +var _ p2p.GossipSubBuilder = (*Builder)(nil) + +// SetHost sets the host of the builder. +// If the host has already been set, a fatal error is logged. +func (g *Builder) SetHost(h host.Host) { + if g.h != nil { + g.logger.Fatal().Msg("host has already been set") + return + } + g.h = h +} + +// OverrideDefaultRpcInspectorFactory overrides the default rpc inspector factory of the builder. +// If the rpc inspector factory has already been set, a warning is logged. +// Note: it is not recommended to override the default rpc inspector factory in production unless you know what you are doing. +// The purpose of this function is to allow for testing and development. +// Args: +// - factoryFunc: the factory function to override the default rpc inspector factory. +// Returns: +// none +func (g *Builder) OverrideDefaultRpcInspectorFactory(factoryFunc p2p.GossipSubRpcInspectorFactoryFunc) { + g.logger.Warn().Bool(logging.KeySuspicious, true).Msg("overriding default rpc inspector factory, not recommended for production") + g.rpcInspectorFactory = factoryFunc +} + +// SetSubscriptionFilter sets the subscription filter of the builder. +// If the subscription filter has already been set, a fatal error is logged. +func (g *Builder) SetSubscriptionFilter(subscriptionFilter pubsub.SubscriptionFilter) { + if g.subscriptionFilter != nil { + g.logger.Fatal().Msg("subscription filter has already been set") + } + g.subscriptionFilter = subscriptionFilter +} + +// SetGossipSubFactory sets the gossipsub factory of the builder. +// We expect the node to initialize with a default gossipsub factory. Hence, this function overrides the default config. +func (g *Builder) SetGossipSubFactory(gossipSubFactory p2p.GossipSubFactoryFunc) { + if g.gossipSubFactory != nil { + g.logger.Warn().Msg("gossipsub factory has already been set, overriding the previous factory.") + } + g.gossipSubFactory = gossipSubFactory +} + +// SetGossipSubConfigFunc sets the gossipsub config function of the builder. +// We expect the node to initialize with a default gossipsub config. Hence, this function overrides the default config. +func (g *Builder) SetGossipSubConfigFunc(gossipSubConfigFunc p2p.GossipSubAdapterConfigFunc) { + if g.gossipSubConfigFunc != nil { + g.logger.Warn().Msg("gossipsub config function has already been set, overriding the previous config function.") + } + g.gossipSubConfigFunc = gossipSubConfigFunc +} + +// EnableGossipSubScoringWithOverride enables peer scoring for the GossipSub pubsub system with the given override. +// Any existing peer scoring config attribute that is set in the override will override the default peer scoring config. +// Anything that is left to nil or zero value in the override will be ignored and the default value will be used. +// Note: it is not recommended to override the default peer scoring config in production unless you know what you are doing. +// Production Tip: use PeerScoringConfigNoOverride as the argument to this function to enable peer scoring without any override. +// Args: +// - PeerScoringConfigOverride: override for the peer scoring config- Recommended to use PeerScoringConfigNoOverride for production. +// Returns: +// none +func (g *Builder) EnableGossipSubScoringWithOverride(override *p2p.PeerScoringConfigOverride) { + g.gossipSubCfg.PeerScoringEnabled = true // TODO: we should enable peer scoring by default. + if override == nil { + return + } + if override.AppSpecificScoreParams != nil { + g.logger.Warn(). + Str(logging.KeyNetworkingSecurity, "true"). + Msg("overriding app specific score params for gossipsub") + g.scoreOptionConfig.OverrideAppSpecificScoreFunction(override.AppSpecificScoreParams) + } + if override.TopicScoreParams != nil { + for topic, params := range override.TopicScoreParams { + topicLogger := utils.TopicScoreParamsLogger(g.logger, topic.String(), params) + topicLogger.Warn(). + Str(logging.KeyNetworkingSecurity, "true"). + Msg("overriding topic score params for gossipsub") + g.scoreOptionConfig.OverrideTopicScoreParams(topic, params) + } + } +} + +// SetRoutingSystem sets the routing system of the builder. +// If the routing system has already been set, a fatal error is logged. +func (g *Builder) SetRoutingSystem(routingSystem routing.Routing) { + if g.routingSystem != nil { + g.logger.Fatal().Msg("routing system has already been set") + return + } + g.routingSystem = routingSystem +} + +// OverrideDefaultValidateQueueSize sets the validate queue size to use for the libp2p pubsub system. +// CAUTION: Be careful setting this to a larger number as it will change the backpressure behavior of the system. +func (g *Builder) OverrideDefaultValidateQueueSize(size int) { + g.validateQueueSize = size +} + +// NewGossipSubBuilder returns a new gossipsub builder. +// Args: +// - logger: the logger of the node. +// - metricsCfg: the metrics config of the node. +// - networkType: the network type of the node. +// - sporkId: the spork id of the node. +// - idProvider: the identity provider of the node. +// - rpcInspectorConfig: the rpc inspector config of the node. +// - subscriptionProviderPrams: the subscription provider params of the node. +// - meshTracer: gossipsub mesh tracer. +// Returns: +// - a new gossipsub builder. +// Note: the builder is not thread-safe. It should only be used in the main thread. +func NewGossipSubBuilder( + logger zerolog.Logger, + metricsCfg *p2pbuilderconfig.MetricsConfig, + gossipSubCfg *p2pconfig.GossipSubParameters, + networkType network.NetworkingType, + sporkId flow.Identifier, + idProvider module.IdentityProvider, +) *Builder { + lg := logger.With(). + Str("component", "gossipsub"). + Str("network-type", networkType.String()). + Logger() + + meshTracerCfg := &tracer.GossipSubMeshTracerConfig{ + Logger: lg, + Metrics: metricsCfg.Metrics, + IDProvider: idProvider, + LoggerInterval: gossipSubCfg.RpcTracer.LocalMeshLogInterval, + RpcSentTracker: tracer.RpcSentTrackerConfig{ + CacheSize: gossipSubCfg.RpcTracer.RPCSentTrackerCacheSize, + WorkerQueueCacheSize: gossipSubCfg.RpcTracer.RPCSentTrackerQueueCacheSize, + WorkerQueueNumber: gossipSubCfg.RpcTracer.RpcSentTrackerNumOfWorkers, + }, + DuplicateMessageTrackerCacheConfig: gossipSubCfg.RpcTracer.DuplicateMessageTrackerConfig, + HeroCacheMetricsFactory: metricsCfg.HeroCacheFactory, + NetworkingType: networkType, + } + meshTracer := tracer.NewGossipSubMeshTracer(meshTracerCfg) + + b := &Builder{ + logger: lg, + metricsCfg: metricsCfg, + sporkId: sporkId, + networkType: networkType, + idProvider: idProvider, + gossipSubFactory: defaultGossipSubFactory(), + gossipSubConfigFunc: defaultGossipSubAdapterConfig(), + scoreOptionConfig: scoring.NewScoreOptionConfig(lg, + gossipSubCfg.ScoringParameters, + metricsCfg.HeroCacheFactory, + metricsCfg.Metrics, + idProvider, + meshTracer.DuplicateMessageCount, + networkType, + ), + gossipSubTracer: meshTracer, + gossipSubCfg: gossipSubCfg, + rpcInspectorFactory: defaultRpcInspectorFactory(meshTracer), + } + + return b +} + +// defaultRpcInspectorFactory returns the default rpc inspector factory function. It is used to create the default rpc inspector factory. +// Note: always use the default rpc inspector factory function to create the rpc inspector factory (unless you know what you are doing). +// Args: +// - tracer: the tracer of the node. +// Returns: +// - a new rpc inspector factory function. +func defaultRpcInspectorFactory(tracer p2p.PubSubTracer) p2p.GossipSubRpcInspectorFactoryFunc { + return func(logger zerolog.Logger, + sporkId flow.Identifier, + rpcInspectorConfig *p2pconfig.RpcInspectorParameters, + inspectorMetrics module.GossipSubMetrics, + heroCacheMetrics metrics.HeroCacheMetricsFactory, + networkingType network.NetworkingType, + idProvider module.IdentityProvider, + topicProvider func() p2p.TopicProvider, + notificationConsumer p2p.GossipSubInvCtrlMsgNotifConsumer) (p2p.GossipSubRPCInspector, error) { + return validation.NewControlMsgValidationInspector(&validation.InspectorParams{ + Logger: logger.With().Str("component", "rpc-inspector").Logger(), + SporkID: sporkId, + Config: &rpcInspectorConfig.Validation, + HeroCacheMetricsFactory: heroCacheMetrics, + IdProvider: idProvider, + InspectorMetrics: inspectorMetrics, + RpcTracker: tracer, + NetworkingType: networkingType, + InvalidControlMessageNotificationConsumer: notificationConsumer, + TopicOracle: topicProvider, + }) + } +} + +// defaultGossipSubFactory returns the default gossipsub factory function. It is used to create the default gossipsub factory. +// Note: always use the default gossipsub factory function to create the gossipsub factory (unless you know what you are doing). +func defaultGossipSubFactory() p2p.GossipSubFactoryFunc { + return func(ctx context.Context, logger zerolog.Logger, h host.Host, cfg p2p.PubSubAdapterConfig, clusterChangeConsumer p2p.CollectionClusterChangesConsumer) (p2p.PubSubAdapter, error) { + return p2pnode.NewGossipSubAdapter(ctx, logger, h, cfg, clusterChangeConsumer) + } +} + +// defaultGossipSubAdapterConfig returns the default gossipsub config function. It is used to create the default gossipsub config. +// Note: always use the default gossipsub config function to create the gossipsub config (unless you know what you are doing). +func defaultGossipSubAdapterConfig() p2p.GossipSubAdapterConfigFunc { + return func(cfg *p2p.BasePubSubAdapterConfig) p2p.PubSubAdapterConfig { + return p2pnode.NewGossipSubAdapterConfig(cfg) + } +} + +// Build creates a new GossipSub pubsub system. +// It returns the newly created GossipSub pubsub system and any errors encountered during its creation. +// Arguments: +// - ctx: the irrecoverable context of the node. +// +// Returns: +// - p2p.PubSubAdapter: a GossipSub pubsub system for the libp2p node. +// - p2p.PeerScoreTracer: a peer score tracer for the GossipSub pubsub system (if enabled, otherwise nil). +// - error: if an error occurs during the creation of the GossipSub pubsub system, it is returned. Otherwise, nil is returned. +// Note that on happy path, the returned error is nil. Any error returned is unexpected and should be handled as irrecoverable. +func (g *Builder) Build(ctx irrecoverable.SignalerContext) (p2p.PubSubAdapter, error) { + // placeholder for the gossipsub pubsub system that will be created (so that it can be passed around even + // before it is created). + var gossipSub p2p.PubSubAdapter + + gossipSubConfigs := g.gossipSubConfigFunc(&p2p.BasePubSubAdapterConfig{ + MaxMessageSize: p2pnode.DefaultMaxPubSubMsgSize, + }) + gossipSubConfigs.WithMessageIdFunction(utils.MessageID) + + if g.gossipSubCfg.PeerGaterEnabled { + topicDeliveryWeights, err := g.gossipSubCfg.PeerGaterTopicDeliveryWeights() + if err != nil { + return nil, fmt.Errorf("failed to add peer gater option: %w", err) + } + gossipSubConfigs.WithPeerGater(topicDeliveryWeights, g.gossipSubCfg.PeerGaterSourceDecay) + } + + if g.routingSystem != nil { + gossipSubConfigs.WithRoutingDiscovery(g.routingSystem) + } + + if g.subscriptionFilter != nil { + gossipSubConfigs.WithSubscriptionFilter(g.subscriptionFilter) + } + + // scoreOpt is the score option for the GossipSub pubsub system. It is a self-contained component that is used carry over the + // peer scoring parameters (including the entire app-specific score function) and inject it into the GossipSub pubsub system at creation time. + var scoreOpt *scoring.ScoreOption + // scoreTracer is the peer score tracer for the GossipSub pubsub system. It is used to trace the peer scores. + // It is only created if peer scoring is enabled. Otherwise, it is nil. + var scoreTracer p2p.PeerScoreTracer + // consumer is the consumer of the invalid control message notifications; i.e., the component that should be nlotified when + // an RPC validation fails. This component is responsible for taking action on the notification. Currently, the score option + // is the consumer of the invalid control message notifications. + // When the peer scoring is disabled, the consumer is a no-op consumer. + var consumer p2p.GossipSubInvCtrlMsgNotifConsumer + // currently, peer scoring is not supported for public networks. + if g.gossipSubCfg.PeerScoringEnabled && g.networkType != network.PublicNetwork { + // wires the gossipsub score option to the subscription provider. + subscriptionProvider, err := scoring.NewSubscriptionProvider(&scoring.SubscriptionProviderConfig{ + Logger: g.logger, + TopicProviderOracle: func() p2p.TopicProvider { + // gossipSub has not been created yet, hence instead of passing it directly, we pass a function that returns it. + // the cardinal assumption is this function is only invoked when the subscription provider is started, which is + // after the gossipsub is created. + return gossipSub + }, + IdProvider: g.idProvider, + Params: &g.gossipSubCfg.SubscriptionProvider, + HeroCacheMetricsFactory: g.metricsCfg.HeroCacheFactory, + NetworkingType: g.networkType, + }) + if err != nil { + return nil, fmt.Errorf("could not create subscription provider: %w", err) + } + scoreOpt, err = scoring.NewScoreOption(g.scoreOptionConfig, subscriptionProvider) + if err != nil { + return nil, fmt.Errorf("could not create gossipsub score option: %w", err) + } + gossipSubConfigs.WithScoreOption(scoreOpt) + consumer = scoreOpt // the score option is the consumer of the invalid control message notifications. + + if g.gossipSubCfg.RpcTracer.ScoreTracerInterval > 0 { + scoreTracer = tracer.NewGossipSubScoreTracer(g.logger, g.idProvider, g.metricsCfg.Metrics, g.gossipSubCfg.RpcTracer.ScoreTracerInterval) + gossipSubConfigs.WithScoreTracer(scoreTracer) + } + } else { + g.logger.Warn(). + Str(logging.KeyNetworkingSecurity, "true"). + Msg("gossipsub peer scoring is disabled, no-op consumer will be used for invalid control message notifications.") + consumer = scoring.NewNoopInvCtrlMsgNotifConsumer() // no-op consumer as peer scoring is disabled. + } + + rpcValidationInspector, err := g.rpcInspectorFactory( + g.logger, + g.sporkId, + &g.gossipSubCfg.RpcInspector, + g.metricsCfg.Metrics, + g.metricsCfg.HeroCacheFactory, + g.networkType, + g.idProvider, + func() p2p.TopicProvider { + return gossipSub + }, + consumer) + if err != nil { + return nil, fmt.Errorf("failed to create new rpc valiadation inspector: %w", err) + } + gossipSubConfigs.WithRpcInspector(rpcValidationInspector) + + if g.gossipSubTracer != nil { + gossipSubConfigs.WithTracer(g.gossipSubTracer) + } + + if g.validateQueueSize > 0 { + gossipSubConfigs.WithValidateQueueSize(g.validateQueueSize) + } + + if g.h == nil { + return nil, fmt.Errorf("could not create gossipsub: host is nil") + } + + gossipSub, err = g.gossipSubFactory(ctx, g.logger, g.h, gossipSubConfigs, rpcValidationInspector) + if err != nil { + return nil, fmt.Errorf("could not create gossipsub: %w", err) + } + + return gossipSub, nil +} diff --git a/network/p2p/builder/libp2pNodeBuilder.go b/network/p2p/builder/libp2pNodeBuilder.go new file mode 100644 index 00000000000..851b2c28a07 --- /dev/null +++ b/network/p2p/builder/libp2pNodeBuilder.go @@ -0,0 +1,513 @@ +package p2pbuilder + +import ( + "context" + "errors" + "fmt" + "net" + + "github.com/libp2p/go-libp2p" + pubsub "github.com/libp2p/go-libp2p-pubsub" + routinghelpers "github.com/libp2p/go-libp2p-routing-helpers" + "github.com/libp2p/go-libp2p/config" + "github.com/libp2p/go-libp2p/core/connmgr" + "github.com/libp2p/go-libp2p/core/host" + "github.com/libp2p/go-libp2p/core/network" + "github.com/libp2p/go-libp2p/core/protocol" + "github.com/libp2p/go-libp2p/core/routing" + "github.com/libp2p/go-libp2p/core/transport" + rcmgr "github.com/libp2p/go-libp2p/p2p/host/resource-manager" + "github.com/libp2p/go-libp2p/p2p/net/swarm" + "github.com/libp2p/go-libp2p/p2p/transport/tcp" + "github.com/multiformats/go-multiaddr" + madns "github.com/multiformats/go-multiaddr-dns" + fcrypto "github.com/onflow/crypto" + "github.com/rs/zerolog" + + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module" + "github.com/onflow/flow-go/module/component" + "github.com/onflow/flow-go/module/irrecoverable" + "github.com/onflow/flow-go/module/metrics" + flownet "github.com/onflow/flow-go/network" + "github.com/onflow/flow-go/network/netconf" + "github.com/onflow/flow-go/network/p2p" + p2pbuilderconfig "github.com/onflow/flow-go/network/p2p/builder/config" + gossipsubbuilder "github.com/onflow/flow-go/network/p2p/builder/gossipsub" + p2pconfig "github.com/onflow/flow-go/network/p2p/config" + "github.com/onflow/flow-go/network/p2p/connection" + "github.com/onflow/flow-go/network/p2p/dht" + "github.com/onflow/flow-go/network/p2p/keyutils" + p2plogging "github.com/onflow/flow-go/network/p2p/logging" + p2pnode "github.com/onflow/flow-go/network/p2p/node" + "github.com/onflow/flow-go/network/p2p/subscription" + "github.com/onflow/flow-go/network/p2p/unicast" + unicastcache "github.com/onflow/flow-go/network/p2p/unicast/cache" + "github.com/onflow/flow-go/network/p2p/unicast/protocols" + "github.com/onflow/flow-go/network/p2p/unicast/stream" + "github.com/onflow/flow-go/network/p2p/utils" +) + +type DhtSystemActivation bool + +const ( + DhtSystemEnabled DhtSystemActivation = true + DhtSystemDisabled DhtSystemActivation = false +) + +type LibP2PNodeBuilder struct { + gossipSubBuilder p2p.GossipSubBuilder + sporkId flow.Identifier + address string + networkKey fcrypto.PrivateKey + logger zerolog.Logger + metricsConfig *p2pbuilderconfig.MetricsConfig + basicResolver madns.BasicResolver + + resourceManager network.ResourceManager + resourceManagerCfg *p2pconfig.ResourceManagerConfig + connManager connmgr.ConnManager + connGater p2p.ConnectionGater + routingFactory func(context.Context, host.Host) (routing.Routing, error) + peerManagerConfig *p2pbuilderconfig.PeerManagerConfig + createNode p2p.NodeConstructor + disallowListCacheCfg *p2p.DisallowListCacheConfig + unicastConfig *p2pbuilderconfig.UnicastConfig + networkingType flownet.NetworkingType // whether the node is running in private (staked) or public (unstaked) network + protocolPeerCacheList []protocol.ID +} + +func NewNodeBuilder( + logger zerolog.Logger, + gossipSubCfg *p2pconfig.GossipSubParameters, + metricsConfig *p2pbuilderconfig.MetricsConfig, + networkingType flownet.NetworkingType, + address string, + networkKey fcrypto.PrivateKey, + sporkId flow.Identifier, + idProvider module.IdentityProvider, + rCfg *p2pconfig.ResourceManagerConfig, + peerManagerConfig *p2pbuilderconfig.PeerManagerConfig, + disallowListCacheCfg *p2p.DisallowListCacheConfig, + unicastConfig *p2pbuilderconfig.UnicastConfig, +) *LibP2PNodeBuilder { + return &LibP2PNodeBuilder{ + logger: logger, + sporkId: sporkId, + address: address, + networkKey: networkKey, + createNode: func(cfg *p2p.NodeConfig) (p2p.LibP2PNode, error) { return p2pnode.NewNode(cfg) }, + metricsConfig: metricsConfig, + resourceManagerCfg: rCfg, + disallowListCacheCfg: disallowListCacheCfg, + networkingType: networkingType, + gossipSubBuilder: gossipsubbuilder.NewGossipSubBuilder(logger, + metricsConfig, + gossipSubCfg, + networkingType, + sporkId, + idProvider), + peerManagerConfig: peerManagerConfig, + unicastConfig: unicastConfig, + } +} + +var _ p2p.NodeBuilder = &LibP2PNodeBuilder{} + +// SetBasicResolver sets the DNS resolver for the node. +func (builder *LibP2PNodeBuilder) SetBasicResolver(br madns.BasicResolver) p2p.NodeBuilder { + builder.basicResolver = br + return builder +} + +// SetSubscriptionFilter sets the pubsub subscription filter for the node. +func (builder *LibP2PNodeBuilder) SetSubscriptionFilter(filter pubsub.SubscriptionFilter) p2p.NodeBuilder { + builder.gossipSubBuilder.SetSubscriptionFilter(filter) + return builder +} + +// SetResourceManager sets the resource manager for the node. +func (builder *LibP2PNodeBuilder) SetResourceManager(manager network.ResourceManager) p2p.NodeBuilder { + builder.resourceManager = manager + return builder +} + +// SetConnectionManager sets the connection manager for the node. +func (builder *LibP2PNodeBuilder) SetConnectionManager(manager connmgr.ConnManager) p2p.NodeBuilder { + builder.connManager = manager + return builder +} + +// SetConnectionGater sets the connection gater for the node. +func (builder *LibP2PNodeBuilder) SetConnectionGater(gater p2p.ConnectionGater) p2p.NodeBuilder { + builder.connGater = gater + return builder +} + +// SetRoutingSystem sets the routing system factory function. +func (builder *LibP2PNodeBuilder) SetRoutingSystem(f func(context.Context, host.Host) (routing.Routing, error)) p2p.NodeBuilder { + builder.routingFactory = f + return builder +} + +// OverrideDefaultValidateQueueSize sets the validate queue size to use for the libp2p pubsub system. +// CAUTION: Be careful setting this to a larger number as it will change the backpressure behavior of the system. +func (builder *LibP2PNodeBuilder) OverrideDefaultValidateQueueSize(size int) p2p.NodeBuilder { + builder.gossipSubBuilder.OverrideDefaultValidateQueueSize(size) + return builder +} + +// SetProtocolPeerCacheList sets the protocols to track in the protocol peer cache. +func (builder *LibP2PNodeBuilder) SetProtocolPeerCacheList(protocols ...protocol.ID) p2p.NodeBuilder { + builder.protocolPeerCacheList = protocols + return builder +} + +// OverrideGossipSubFactory overrides the default gossipsub factory for the GossipSub protocol. +// The purpose of override is to allow the node to provide a custom gossipsub factory for sake of testing or experimentation. +// Note: it is not recommended to override the default gossipsub factory in production unless you know what you are doing. +// Args: +// - factory: custom gossipsub factory +// Returns: +// - NodeBuilder: the node builder +func (builder *LibP2PNodeBuilder) OverrideGossipSubFactory(gf p2p.GossipSubFactoryFunc, cf p2p.GossipSubAdapterConfigFunc) p2p.NodeBuilder { + builder.gossipSubBuilder.SetGossipSubFactory(gf) + builder.gossipSubBuilder.SetGossipSubConfigFunc(cf) + return builder +} + +// OverrideGossipSubScoringConfig overrides the default peer scoring config for the GossipSub protocol. +// Note that it does not enable peer scoring. The peer scoring is enabled directly by setting the `peer-scoring-enabled` flag to true in `default-config.yaml`, or +// by setting the `gossipsub-peer-scoring-enabled` runtime flag to true. This function only overrides the default peer scoring config which takes effect +// only if the peer scoring is enabled (mostly for testing purposes). +// Any existing peer scoring config attribute that is set in the override will override the default peer scoring config. +// Anything that is left to nil or zero value in the override will be ignored and the default value will be used. +// Note: it is not recommended to override the default peer scoring config in production unless you know what you are doing. +// Args: +// - PeerScoringConfigOverride: override for the peer scoring config- Recommended to use PeerScoringConfigNoOverride for production. +// Returns: +// none +func (builder *LibP2PNodeBuilder) OverrideGossipSubScoringConfig(config *p2p.PeerScoringConfigOverride) p2p.NodeBuilder { + builder.gossipSubBuilder.EnableGossipSubScoringWithOverride(config) + return builder +} + +// OverrideNodeConstructor overrides the default node constructor, i.e., the function that creates a new libp2p node. +// The purpose of override is to allow the node to provide a custom node constructor for sake of testing or experimentation. +// It is NOT recommended to override the default node constructor in production unless you know what you are doing. +// Args: +// - NodeConstructor: custom node constructor +// Returns: +// none +func (builder *LibP2PNodeBuilder) OverrideNodeConstructor(f p2p.NodeConstructor) p2p.NodeBuilder { + builder.createNode = f + return builder +} + +// OverrideDefaultRpcInspectorFactory overrides the default rpc inspector factory for the GossipSub protocol. +// The purpose of override is to allow the node to provide a custom rpc inspector factory for sake of testing or experimentation. +// Note: it is not recommended to override the default rpc inspector factory in production unless you know what you are doing. +// Args: +// - factory: custom rpc inspector factory +// Returns: +// - NodeBuilder: the node builder +func (builder *LibP2PNodeBuilder) OverrideDefaultRpcInspectorFactory(factory p2p.GossipSubRpcInspectorFactoryFunc) p2p.NodeBuilder { + builder.gossipSubBuilder.OverrideDefaultRpcInspectorFactory(factory) + return builder +} + +// Build creates a new libp2p node using the configured options. +func (builder *LibP2PNodeBuilder) Build() (p2p.LibP2PNode, error) { + var opts []libp2p.Option + + if builder.basicResolver != nil { + resolver, err := madns.NewResolver(madns.WithDefaultResolver(builder.basicResolver)) + + if err != nil { + return nil, fmt.Errorf("could not create resolver: %w", err) + } + + opts = append(opts, libp2p.MultiaddrResolver(swarm.ResolverFromMaDNS{Resolver: resolver})) + } + + if builder.resourceManager != nil { + opts = append(opts, libp2p.ResourceManager(builder.resourceManager)) + builder.logger.Warn(). + Msg("libp2p resource manager is overridden by the node builder, metrics may not be available") + } else { + // scales the default limits by the allowed memory and file descriptors and applies the inbound connection and stream limits. + limits, err := BuildLibp2pResourceManagerLimits(builder.logger, builder.resourceManagerCfg) + if err != nil { + return nil, fmt.Errorf("could not build libp2p resource manager limits: %w", err) + } + mgr, err := rcmgr.NewResourceManager(rcmgr.NewFixedLimiter(*limits), rcmgr.WithMetrics(builder.metricsConfig.Metrics)) + if err != nil { + return nil, fmt.Errorf("could not create libp2p resource manager: %w", err) + } + + opts = append(opts, libp2p.ResourceManager(mgr)) + builder.logger.Info().Msgf("default libp2p resource manager is enabled with metrics, pubkey: %s", builder.networkKey.PublicKey()) + } + + if builder.connManager != nil { + opts = append(opts, libp2p.ConnectionManager(builder.connManager)) + } + + if builder.connGater != nil { + opts = append(opts, libp2p.ConnectionGater(builder.connGater)) + } + + h, err := DefaultLibP2PHost(builder.address, builder.networkKey, opts...) + if err != nil { + return nil, err + } + builder.gossipSubBuilder.SetHost(h) + builder.logger = builder.logger.With().Str("local_peer_id", p2plogging.PeerId(h.ID())).Logger() + + var peerManager p2p.PeerManager + if builder.peerManagerConfig.UpdateInterval > 0 { + connector, err := builder.peerManagerConfig.ConnectorFactory(h) + if err != nil { + return nil, fmt.Errorf("failed to create libp2p connector: %w", err) + } + peerUpdater, err := connection.NewPeerUpdater( + &connection.PeerUpdaterConfig{ + PruneConnections: builder.peerManagerConfig.ConnectionPruning, + Logger: builder.logger, + Host: connection.NewConnectorHost(h), + Connector: connector, + }) + if err != nil { + return nil, fmt.Errorf("failed to create libp2p connector: %w", err) + } + + peerManager = connection.NewPeerManager(builder.logger, builder.peerManagerConfig.UpdateInterval, peerUpdater) + + if builder.unicastConfig.RateLimiterDistributor != nil { + builder.unicastConfig.RateLimiterDistributor.AddConsumer(peerManager) + } + } + + node, err := builder.createNode(&p2p.NodeConfig{ + Parameters: &p2p.NodeParameters{ + EnableProtectedStreams: builder.unicastConfig.EnableStreamProtection, + }, + Logger: builder.logger, + Host: h, + PeerManager: peerManager, + DisallowListCacheCfg: builder.disallowListCacheCfg, + ProtocolPeerCacheList: builder.protocolPeerCacheList, + }) + if err != nil { + return nil, fmt.Errorf("could not create libp2p node: %w", err) + } + + if builder.connGater != nil { + builder.connGater.SetDisallowListOracle(node) + } + + unicastManager, err := unicast.NewUnicastManager(&unicast.ManagerConfig{ + Logger: builder.logger, + StreamFactory: stream.NewLibP2PStreamFactory(h), + SporkId: builder.sporkId, + Metrics: builder.metricsConfig.Metrics, + Parameters: &builder.unicastConfig.UnicastManager, + UnicastConfigCacheFactory: func(configFactory func() unicast.Config) unicast.ConfigCache { + return unicastcache.NewUnicastConfigCache( + builder.unicastConfig.UnicastManager.ConfigCacheSize, + builder.logger, + metrics.DialConfigCacheMetricFactory(builder.metricsConfig.HeroCacheFactory, builder.networkingType), + configFactory, + ) + }, + }) + if err != nil { + return nil, fmt.Errorf("could not create unicast manager: %w", err) + } + node.SetUnicastManager(unicastManager) + + cm := component.NewComponentManagerBuilder(). + AddWorker(func(ctx irrecoverable.SignalerContext, ready component.ReadyFunc) { + if builder.routingFactory != nil { + routingSystem, err := builder.routingFactory(ctx, h) + if err != nil { + ctx.Throw(fmt.Errorf("could not create routing system: %w", err)) + } + if err := node.SetRouting(routingSystem); err != nil { + ctx.Throw(fmt.Errorf("could not set routing system: %w", err)) + } + builder.gossipSubBuilder.SetRoutingSystem(routingSystem) + builder.logger.Debug().Msg("routing system created") + } + // gossipsub is created here, because it needs to be created during the node startup. + gossipSub, err := builder.gossipSubBuilder.Build(ctx) + if err != nil { + ctx.Throw(fmt.Errorf("could not create gossipsub: %w", err)) + } + node.SetPubSub(gossipSub) + gossipSub.Start(ctx) + ready() + + <-gossipSub.Done() + }). + AddWorker(func(ctx irrecoverable.SignalerContext, ready component.ReadyFunc) { + // encapsulates shutdown logic for the libp2p node. + ready() + <-ctx.Done() + // we wait till the context is done, and then we stop the libp2p node. + + err = node.Stop() + if err != nil { + // ignore context cancellation errors + if !errors.Is(err, context.Canceled) && !errors.Is(err, context.DeadlineExceeded) { + ctx.Throw(fmt.Errorf("could not stop libp2p node: %w", err)) + } + } + }) + + node.SetComponentManager(cm.Build()) + + return node, nil +} + +// DefaultLibP2PHost returns a libp2p host initialized to listen on the given address and using the given private key and +// customized with options +func DefaultLibP2PHost(address string, key fcrypto.PrivateKey, options ...config.Option) (host.Host, error) { + defaultOptions, err := defaultLibP2POptions(address, key) + if err != nil { + return nil, err + } + + allOptions := append(defaultOptions, options...) + + // create the libp2p host + libP2PHost, err := libp2p.New(allOptions...) + if err != nil { + return nil, fmt.Errorf("could not create libp2p host: %w", err) + } + + return libP2PHost, nil +} + +// defaultLibP2POptions creates and returns the standard LibP2P host options that are used for the Flow Libp2p network +func defaultLibP2POptions(address string, key fcrypto.PrivateKey) ([]config.Option, error) { + + libp2pKey, err := keyutils.LibP2PPrivKeyFromFlow(key) + if err != nil { + return nil, fmt.Errorf("could not generate libp2p key: %w", err) + } + + ip, port, err := net.SplitHostPort(address) + if err != nil { + return nil, fmt.Errorf("could not split node address %s:%w", address, err) + } + + sourceMultiAddr, err := multiaddr.NewMultiaddr(utils.MultiAddressStr(ip, port)) + if err != nil { + return nil, fmt.Errorf("failed to translate Flow address to Libp2p multiaddress: %w", err) + } + + // create a transport which disables port reuse and web socket. + // Port reuse enables listening and dialing from the same TCP port (https://github.com/libp2p/go-reuseport) + // While this sounds great, it intermittently causes a 'broken pipe' error + // as the 1-k discovery process and the 1-1 messaging both sometimes attempt to open connection to the same target + // As of now there is no requirement of client sockets to be a well-known port, so disabling port reuse all together. + t := libp2p.Transport(func(u transport.Upgrader) (*tcp.TcpTransport, error) { + return tcp.NewTCPTransport(u, nil, nil, tcp.DisableReuseport()) + }) + + // gather all the options for the libp2p node + options := []config.Option{ + libp2p.ListenAddrs(sourceMultiAddr), // set the listen address + libp2p.Identity(libp2pKey), // pass in the networking key + t, // set the transport + } + + return options, nil +} + +// DefaultNodeBuilder returns a node builder. +func DefaultNodeBuilder( + logger zerolog.Logger, + address string, + networkingType flownet.NetworkingType, + flowKey fcrypto.PrivateKey, + sporkId flow.Identifier, + idProvider module.IdentityProvider, + metricsCfg *p2pbuilderconfig.MetricsConfig, + resolver madns.BasicResolver, + role string, + connGaterCfg *p2pbuilderconfig.ConnectionGaterConfig, + peerManagerCfg *p2pbuilderconfig.PeerManagerConfig, + gossipCfg *p2pconfig.GossipSubParameters, + rCfg *p2pconfig.ResourceManagerConfig, + uniCfg *p2pbuilderconfig.UnicastConfig, + connMgrConfig *netconf.ConnectionManager, + disallowListCacheCfg *p2p.DisallowListCacheConfig, + dhtSystemActivation DhtSystemActivation, +) (p2p.NodeBuilder, error) { + + connManager, err := connection.NewConnManager(logger, metricsCfg.Metrics, connMgrConfig) + if err != nil { + return nil, fmt.Errorf("could not create connection manager: %w", err) + } + + // set the default connection gater peer filters for both InterceptPeerDial and InterceptSecured callbacks + peerFilter := notEjectedPeerFilter(idProvider) + peerFilters := []p2p.PeerFilter{peerFilter} + + connGater := connection.NewConnGater( + logger, + idProvider, + connection.WithOnInterceptPeerDialFilters(append(peerFilters, connGaterCfg.InterceptPeerDialFilters...)), + connection.WithOnInterceptSecuredFilters(append(peerFilters, connGaterCfg.InterceptSecuredFilters...))) + + builder := NewNodeBuilder(logger, + gossipCfg, + metricsCfg, + networkingType, + address, + flowKey, + sporkId, + idProvider, + rCfg, peerManagerCfg, + disallowListCacheCfg, + uniCfg) + + builder. + SetBasicResolver(resolver). + SetConnectionManager(connManager). + SetConnectionGater(connGater) + + if role != "ghost" { + r, err := flow.ParseRole(role) + if err != nil { + return nil, fmt.Errorf("could not parse role: %w", err) + } + builder.SetSubscriptionFilter(subscription.NewRoleBasedFilter(r, idProvider)) + + builder.configureRoutingSystem(r, dhtSystemActivation) + } + + return builder, nil +} + +func (b *LibP2PNodeBuilder) configureRoutingSystem( + role flow.Role, + dhtSystemActivation DhtSystemActivation, +) { + if role != flow.RoleAccess && role != flow.RoleExecution { + return // routing only required for Access and Execution nodes + } + + if dhtSystemActivation == DhtSystemEnabled { + b.SetRoutingSystem(func(ctx context.Context, host host.Host) (routing.Routing, error) { + return dht.NewDHT(ctx, host, protocols.FlowDHTProtocolID(b.sporkId), b.logger, b.metricsConfig.Metrics, dht.AsServer()) + }) + } else { + // bitswap requires a content routing system. this returns a stub instead of a full DHT + b.SetRoutingSystem(func(ctx context.Context, host host.Host) (routing.Routing, error) { + return routinghelpers.Null{}, nil + }) + } +} diff --git a/network/p2p/p2pbuilder/libp2pscaler.go b/network/p2p/builder/libp2pscaler.go similarity index 100% rename from network/p2p/p2pbuilder/libp2pscaler.go rename to network/p2p/builder/libp2pscaler.go diff --git a/network/p2p/builder/libp2pscaler_test.go b/network/p2p/builder/libp2pscaler_test.go new file mode 100644 index 00000000000..4fd853dbd1c --- /dev/null +++ b/network/p2p/builder/libp2pscaler_test.go @@ -0,0 +1,210 @@ +package p2pbuilder + +import ( + "testing" + + "github.com/libp2p/go-libp2p" + rcmgr "github.com/libp2p/go-libp2p/p2p/host/resource-manager" + "github.com/pbnjay/memory" + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/config" + p2pconfig "github.com/onflow/flow-go/network/p2p/config" + "github.com/onflow/flow-go/utils/unittest" +) + +func TestAllowedMemoryScale(t *testing.T) { + m := memory.TotalMemory() + require.True(t, m > 0) + + // scaling with factor of 1 should return the total memory. + s, err := allowedMemory(1) + require.NoError(t, err) + require.Equal(t, int64(m), s) + + // scaling with factor of 0 should return an error. + _, err = allowedMemory(0) + require.Error(t, err) + + // scaling with factor of -1 should return an error. + _, err = allowedMemory(-1) + require.Error(t, err) + + // scaling with factor of 2 should return an error. + _, err = allowedMemory(2) + require.Error(t, err) + + // scaling with factor of 0.5 should return half the total memory. + s, err = allowedMemory(0.5) + require.NoError(t, err) + require.Equal(t, int64(m/2), s) + + // scaling with factor of 0.1 should return 10% of the total memory. + s, err = allowedMemory(0.1) + require.NoError(t, err) + require.Equal(t, int64(m/10), s) + + // scaling with factor of 0.01 should return 1% of the total memory. + s, err = allowedMemory(0.01) + require.NoError(t, err) + require.Equal(t, int64(m/100), s) + + // scaling with factor of 0.001 should return 0.1% of the total memory. + s, err = allowedMemory(0.001) + require.NoError(t, err) + require.Equal(t, int64(m/1000), s) + + // scaling with factor of 0.0001 should return 0.01% of the total memory. + s, err = allowedMemory(0.0001) + require.NoError(t, err) + require.Equal(t, int64(m/10000), s) +} + +func TestAllowedFileDescriptorsScale(t *testing.T) { + // getting actual file descriptor limit. + fd, err := getNumFDs() + require.NoError(t, err) + require.True(t, fd > 0) + + // scaling with factor of 1 should return the total file descriptors. + s, err := allowedFileDescriptors(1) + require.NoError(t, err) + require.Equal(t, fd, s) + + // scaling with factor of 0 should return an error. + _, err = allowedFileDescriptors(0) + require.Error(t, err) + + // scaling with factor of -1 should return an error. + _, err = allowedFileDescriptors(-1) + require.Error(t, err) + + // scaling with factor of 2 should return an error. + _, err = allowedFileDescriptors(2) + require.Error(t, err) + + // scaling with factor of 0.5 should return half the total file descriptors. + s, err = allowedFileDescriptors(0.5) + require.NoError(t, err) + require.Equal(t, fd/2, s) + + // scaling with factor of 0.1 should return 10% of the total file descriptors. + s, err = allowedFileDescriptors(0.1) + require.NoError(t, err) + require.Equal(t, fd/10, s) + + // scaling with factor of 0.01 should return 1% of the total file descriptors. + s, err = allowedFileDescriptors(0.01) + require.NoError(t, err) + require.Equal(t, fd/100, s) + + // scaling with factor of 0.001 should return 0.1% of the total file descriptors. + s, err = allowedFileDescriptors(0.001) + require.NoError(t, err) + require.Equal(t, fd/1000, s) + + // scaling with factor of 0.0001 should return 0.01% of the total file descriptors. + s, err = allowedFileDescriptors(0.0001) + require.NoError(t, err) + require.Equal(t, fd/10000, s) +} + +// TestApplyResourceLimitOverride tests the ApplyResourceLimitOverride function. It tests the following cases: +// 1. The override limit is not set (i.e., left at zero), the original limit should be used. +// 2. The override limit is set, the override limit should be used. +func TestApplyResourceLimitOverride(t *testing.T) { + cfg, err := config.DefaultConfig() + require.NoError(t, err) + + mem, err := allowedMemory(cfg.NetworkConfig.ResourceManager.MemoryLimitRatio) + require.NoError(t, err) + + fd, err := allowedFileDescriptors(cfg.NetworkConfig.ResourceManager.FileDescriptorsRatio) + require.NoError(t, err) + limits := rcmgr.DefaultLimits + libp2p.SetDefaultServiceLimits(&limits) + scaled := limits.Scale(mem, fd) + + systemOverride := p2pconfig.ResourceManagerOverrideLimit{ + StreamsInbound: 0, // should not be overridden. + StreamsOutbound: 456, + ConnectionsInbound: 789, + ConnectionsOutbound: 0, // should not be overridden. + FD: 4560, + Memory: 7890, + } + + peerOverride := p2pconfig.ResourceManagerOverrideLimit{ + StreamsInbound: 321, + StreamsOutbound: 0, // should not be overridden. + ConnectionsInbound: 987, + ConnectionsOutbound: 3210, + FD: 0, // should not be overridden. + Memory: 9870, + } + + partial := rcmgr.PartialLimitConfig{} + partial.System = ApplyResourceLimitOverride(unittest.Logger(), p2pconfig.ResourceScopeSystem, scaled.ToPartialLimitConfig().System, systemOverride) + partial.PeerDefault = ApplyResourceLimitOverride(unittest.Logger(), p2pconfig.ResourceScopePeer, scaled.ToPartialLimitConfig().PeerDefault, peerOverride) + + final := partial.Build(scaled).ToPartialLimitConfig() + require.Equal(t, 456, int(final.System.StreamsOutbound)) // should be overridden. + require.Equal(t, 789, int(final.System.ConnsInbound)) // should be overridden. + require.Equal(t, 4560, int(final.System.FD)) // should be overridden. + require.Equal(t, 7890, int(final.System.Memory)) // should be overridden. + require.Equal(t, scaled.ToPartialLimitConfig().System.StreamsInbound, final.System.StreamsInbound) // should NOT be overridden. + require.Equal(t, scaled.ToPartialLimitConfig().System.ConnsOutbound, final.System.ConnsOutbound) // should NOT be overridden. +} + +// TestBuildLibp2pResourceManagerLimits tests the BuildLibp2pResourceManagerLimits function. +// It creates a default configuration and an overridden configuration, and tests overriding the default configuration. +func TestBuildLibp2pResourceManagerLimits(t *testing.T) { + cfg, err := config.DefaultConfig() + require.NoError(t, err) + + // the default concrete limits is built from the default configuration. + defaultConcreteLimits, err := BuildLibp2pResourceManagerLimits(unittest.Logger(), &cfg.NetworkConfig.ResourceManager) + require.NoError(t, err) + + // now the test creates random override configs for each scope, and re-build the concrete limits. + cfg.NetworkConfig.ResourceManager.Override.System = unittest.LibP2PResourceLimitOverrideFixture() + cfg.NetworkConfig.ResourceManager.Override.Transient = unittest.LibP2PResourceLimitOverrideFixture() + cfg.NetworkConfig.ResourceManager.Override.Protocol = unittest.LibP2PResourceLimitOverrideFixture() + cfg.NetworkConfig.ResourceManager.Override.Peer = unittest.LibP2PResourceLimitOverrideFixture() + cfg.NetworkConfig.ResourceManager.Override.PeerProtocol = unittest.LibP2PResourceLimitOverrideFixture() + overriddenConcreteLimits, err := BuildLibp2pResourceManagerLimits(unittest.Logger(), &cfg.NetworkConfig.ResourceManager) + require.NoError(t, err) + + // this function will evaluate that the limits are override correctly on each scope using the override config. + requireEqual := func(t *testing.T, override p2pconfig.ResourceManagerOverrideLimit, actual rcmgr.ResourceLimits) { + require.Equal(t, override.StreamsInbound, int(actual.StreamsInbound)) + require.Equal(t, override.StreamsOutbound, int(actual.StreamsOutbound)) + require.Equal(t, override.ConnectionsInbound, int(actual.ConnsInbound)) + require.Equal(t, override.ConnectionsOutbound, int(actual.ConnsOutbound)) + require.Equal(t, override.FD, int(actual.FD)) + require.Equal(t, override.Memory, int(actual.Memory)) + } + + op := overriddenConcreteLimits.ToPartialLimitConfig() + requireEqual(t, cfg.NetworkConfig.ResourceManager.Override.System, op.System) + requireEqual(t, cfg.NetworkConfig.ResourceManager.Override.Transient, op.Transient) + requireEqual(t, cfg.NetworkConfig.ResourceManager.Override.Protocol, op.ProtocolDefault) + requireEqual(t, cfg.NetworkConfig.ResourceManager.Override.Peer, op.PeerDefault) + requireEqual(t, cfg.NetworkConfig.ResourceManager.Override.PeerProtocol, op.ProtocolPeerDefault) + + // this function will evaluate that the default limits (before overriding) are not equal to the overridden limits. + requireNotEqual := func(t *testing.T, a rcmgr.ResourceLimits, b rcmgr.ResourceLimits) { + require.NotEqual(t, a.StreamsInbound, b.StreamsInbound) + require.NotEqual(t, a.StreamsOutbound, b.StreamsOutbound) + require.NotEqual(t, a.ConnsInbound, b.ConnsInbound) + require.NotEqual(t, a.ConnsOutbound, b.ConnsOutbound) + require.NotEqual(t, a.FD, b.FD) + require.NotEqual(t, a.Memory, b.Memory) + } + dp := defaultConcreteLimits.ToPartialLimitConfig() + requireNotEqual(t, dp.System, op.System) + requireNotEqual(t, dp.Transient, op.Transient) + requireNotEqual(t, dp.ProtocolDefault, op.ProtocolDefault) + requireNotEqual(t, dp.PeerDefault, op.PeerDefault) + requireNotEqual(t, dp.ProtocolPeerDefault, op.ProtocolPeerDefault) +} diff --git a/network/p2p/builder/resourceLimit.go b/network/p2p/builder/resourceLimit.go new file mode 100644 index 00000000000..56a804d1258 --- /dev/null +++ b/network/p2p/builder/resourceLimit.go @@ -0,0 +1,106 @@ +package p2pbuilder + +import ( + "fmt" + + "github.com/libp2p/go-libp2p" + rcmgr "github.com/libp2p/go-libp2p/p2p/host/resource-manager" + "github.com/rs/zerolog" + + p2pconfig "github.com/onflow/flow-go/network/p2p/config" +) + +// BuildLibp2pResourceManagerLimits builds the resource manager limits for the libp2p node. +// Args: +// +// logger: logger to log the resource manager limits. +// config: the resource manager configuration. +// +// Returns: +// +// - the resource manager limits. +// - any error encountered, all returned errors are irrecoverable (we cannot continue without the resource manager limits). +func BuildLibp2pResourceManagerLimits(logger zerolog.Logger, config *p2pconfig.ResourceManagerConfig) (*rcmgr.ConcreteLimitConfig, error) { + defaultLimits := rcmgr.DefaultLimits + libp2p.SetDefaultServiceLimits(&defaultLimits) + + mem, err := allowedMemory(config.MemoryLimitRatio) + if err != nil { + return nil, fmt.Errorf("could not get allowed memory: %w", err) + } + fd, err := allowedFileDescriptors(config.FileDescriptorsRatio) + if err != nil { + return nil, fmt.Errorf("could not get allowed file descriptors: %w", err) + } + + scaled := defaultLimits.Scale(mem, fd) + scaledP := scaled.ToPartialLimitConfig() + override := rcmgr.PartialLimitConfig{} + override.System = ApplyResourceLimitOverride(logger, p2pconfig.ResourceScopeSystem, scaledP.System, config.Override.System) + override.Transient = ApplyResourceLimitOverride(logger, p2pconfig.ResourceScopeTransient, scaledP.Transient, config.Override.Transient) + override.ProtocolDefault = ApplyResourceLimitOverride(logger, p2pconfig.ResourceScopeProtocol, scaledP.ProtocolDefault, config.Override.Protocol) + override.PeerDefault = ApplyResourceLimitOverride(logger, p2pconfig.ResourceScopePeer, scaledP.PeerDefault, config.Override.Peer) + override.ProtocolPeerDefault = ApplyResourceLimitOverride(logger, p2pconfig.ResourceScopePeerProtocol, scaledP.ProtocolPeerDefault, config.Override.PeerProtocol) + + limits := override.Build(scaled) + logger.Info(). + Str("key", keyResourceManagerLimit). + Int64("allowed_memory", mem). + Int("allowed_file_descriptors", fd). + Msg("allowed memory and file descriptors are fetched from the system") + newLimitConfigLogger(logger.With().Str("key", keyResourceManagerLimit).Logger()).LogResourceManagerLimits(limits) + return &limits, nil +} + +// ApplyResourceLimitOverride applies the override limit to the original limit. +// For any attribute that is set in the override limit, the original limit will be overridden, otherwise +// the original limit will be used. +// Args: +// +// logger: logger to log the override limit. +// resourceScope: the scope of the resource, e.g., system, transient, protocol, peer, peer-protocol. +// original: the original limit. +// override: the override limit. +// +// Returns: +// +// the overridden limit. +func ApplyResourceLimitOverride(logger zerolog.Logger, + resourceScope p2pconfig.ResourceScope, + original rcmgr.ResourceLimits, + override p2pconfig.ResourceManagerOverrideLimit) rcmgr.ResourceLimits { + lg := logger.With().Logger() + + if override.StreamsInbound > 0 { + lg = lg.With().Int("streams-inbound-override", override.StreamsInbound).Int("streams-inbound-original", int(original.StreamsInbound)).Logger() + original.StreamsInbound = rcmgr.LimitVal(override.StreamsInbound) + } + + if override.StreamsOutbound > 0 { + lg = lg.With().Int("streams-outbound-override", override.StreamsOutbound).Int("streams-outbound-original", int(original.StreamsOutbound)).Logger() + original.StreamsOutbound = rcmgr.LimitVal(override.StreamsOutbound) + } + + if override.ConnectionsInbound > 0 { + lg = lg.With().Int("connections-inbound-override", override.ConnectionsInbound).Int("connections-inbound-original", int(original.ConnsInbound)).Logger() + original.ConnsInbound = rcmgr.LimitVal(override.ConnectionsInbound) + } + + if override.ConnectionsOutbound > 0 { + lg = lg.With().Int("connections-outbound-override", override.ConnectionsOutbound).Int("connections-outbound-original", int(original.ConnsOutbound)).Logger() + original.ConnsOutbound = rcmgr.LimitVal(override.ConnectionsOutbound) + } + + if override.FD > 0 { + lg = lg.With().Int("fd-override", override.FD).Int("fd-original", int(original.FD)).Logger() + original.FD = rcmgr.LimitVal(override.FD) + } + + if override.Memory > 0 { + lg = lg.With().Int("memory-override", override.Memory).Int("memory-original", int(original.Memory)).Logger() + original.Memory = rcmgr.LimitVal64(override.Memory) + } + + lg.Info().Str("scope", resourceScope.String()).Msg("scope resource limits examined for override") + return original +} diff --git a/network/p2p/builder/utils.go b/network/p2p/builder/utils.go new file mode 100644 index 00000000000..1a4f8a7bd80 --- /dev/null +++ b/network/p2p/builder/utils.go @@ -0,0 +1,125 @@ +package p2pbuilder + +import ( + "fmt" + + "github.com/libp2p/go-libp2p/core/peer" + "github.com/libp2p/go-libp2p/core/protocol" + rcmgr "github.com/libp2p/go-libp2p/p2p/host/resource-manager" + "github.com/rs/zerolog" + + "github.com/onflow/flow-go/module" + "github.com/onflow/flow-go/network/p2p" + p2plogging "github.com/onflow/flow-go/network/p2p/logging" +) + +const keyResourceManagerLimit = "libp2p_resource_manager_limit" + +// notEjectedPeerFilter returns a PeerFilter that will return an error if the peer is unknown or ejected. +func notEjectedPeerFilter(idProvider module.IdentityProvider) p2p.PeerFilter { + return func(p peer.ID) error { + if id, found := idProvider.ByPeerID(p); !found { + return fmt.Errorf("failed to get identity of unknown peer with peer id %s", p2plogging.PeerId(p)) + } else if id.IsEjected() { + return fmt.Errorf("peer %s with node id %s is ejected", p2plogging.PeerId(p), id.NodeID.String()) + } + + return nil + } +} + +type limitConfigLogger struct { + logger zerolog.Logger +} + +// newLimitConfigLogger creates a new limitConfigLogger. +func newLimitConfigLogger(logger zerolog.Logger) *limitConfigLogger { + return &limitConfigLogger{logger: logger} +} + +// withBaseLimit appends the base limit to the logger with the given prefix. +func (l *limitConfigLogger) withBaseLimit(prefix string, baseLimit rcmgr.ResourceLimits) zerolog.Logger { + return l.logger.With(). + Str("key", keyResourceManagerLimit). + Str(fmt.Sprintf("%s_streams", prefix), fmt.Sprintf("%v", baseLimit.Streams)). + Str(fmt.Sprintf("%s_streams_inbound", prefix), fmt.Sprintf("%v", baseLimit.StreamsInbound)). + Str(fmt.Sprintf("%s_streams_outbound", prefix), fmt.Sprintf("%v,", baseLimit.StreamsOutbound)). + Str(fmt.Sprintf("%s_conns", prefix), fmt.Sprintf("%v", baseLimit.Conns)). + Str(fmt.Sprintf("%s_conns_inbound", prefix), fmt.Sprintf("%v", baseLimit.ConnsInbound)). + Str(fmt.Sprintf("%s_conns_outbound", prefix), fmt.Sprintf("%v", baseLimit.ConnsOutbound)). + Str(fmt.Sprintf("%s_file_descriptors", prefix), fmt.Sprintf("%v", baseLimit.FD)). + Str(fmt.Sprintf("%s_memory", prefix), fmt.Sprintf("%v", baseLimit.Memory)).Logger() +} + +func (l *limitConfigLogger) LogResourceManagerLimits(config rcmgr.ConcreteLimitConfig) { + // PartialLimit config is the same as ConcreteLimit config, but with the exported fields. + pCfg := config.ToPartialLimitConfig() + l.logGlobalResourceLimits(pCfg) + l.logServiceLimits(pCfg.Service) + l.logProtocolLimits(pCfg.Protocol) + l.logPeerLimits(pCfg.Peer) + l.logPeerProtocolLimits(pCfg.ProtocolPeer) +} + +func (l *limitConfigLogger) logGlobalResourceLimits(config rcmgr.PartialLimitConfig) { + lg := l.withBaseLimit("system", config.System) + lg.Info().Msg("system limits set") + + lg = l.withBaseLimit("transient", config.Transient) + lg.Info().Msg("transient limits set") + + lg = l.withBaseLimit("allowed_listed_system", config.AllowlistedSystem) + lg.Info().Msg("allowed listed system limits set") + + lg = l.withBaseLimit("allowed_lister_transient", config.AllowlistedTransient) + lg.Info().Msg("allowed listed transient limits set") + + lg = l.withBaseLimit("service_default", config.ServiceDefault) + lg.Info().Msg("service default limits set") + + lg = l.withBaseLimit("service_peer_default", config.ServicePeerDefault) + lg.Info().Msg("service peer default limits set") + + lg = l.withBaseLimit("protocol_default", config.ProtocolDefault) + lg.Info().Msg("protocol default limits set") + + lg = l.withBaseLimit("protocol_peer_default", config.ProtocolPeerDefault) + lg.Info().Msg("protocol peer default limits set") + + lg = l.withBaseLimit("peer_default", config.PeerDefault) + lg.Info().Msg("peer default limits set") + + lg = l.withBaseLimit("connections", config.Conn) + lg.Info().Msg("connection limits set") + + lg = l.withBaseLimit("streams", config.Stream) + lg.Info().Msg("stream limits set") +} + +func (l *limitConfigLogger) logServiceLimits(s map[string]rcmgr.ResourceLimits) { + for sName, sLimits := range s { + lg := l.withBaseLimit(fmt.Sprintf("service_%s", sName), sLimits) + lg.Info().Msg("service limits set") + } +} + +func (l *limitConfigLogger) logProtocolLimits(p map[protocol.ID]rcmgr.ResourceLimits) { + for pName, pLimits := range p { + lg := l.withBaseLimit(fmt.Sprintf("protocol_%s", pName), pLimits) + lg.Info().Msg("protocol limits set") + } +} + +func (l *limitConfigLogger) logPeerLimits(p map[peer.ID]rcmgr.ResourceLimits) { + for pId, pLimits := range p { + lg := l.withBaseLimit(fmt.Sprintf("peer_%s", p2plogging.PeerId(pId)), pLimits) + lg.Info().Msg("peer limits set") + } +} + +func (l *limitConfigLogger) logPeerProtocolLimits(p map[protocol.ID]rcmgr.ResourceLimits) { + for pName, pLimits := range p { + lg := l.withBaseLimit(fmt.Sprintf("protocol_peer_%s", pName), pLimits) + lg.Info().Msg("protocol peer limits set") + } +} diff --git a/network/p2p/cache.go b/network/p2p/cache.go index f764f1c6321..c6e3decb9eb 100644 --- a/network/p2p/cache.go +++ b/network/p2p/cache.go @@ -1,8 +1,12 @@ package p2p import ( + "time" + "github.com/libp2p/go-libp2p/core/peer" "github.com/libp2p/go-libp2p/core/protocol" + + "github.com/onflow/flow-go/model/flow" ) // ProtocolPeerCache is an interface that stores a mapping from protocol ID to peers who support that protocol. @@ -16,8 +20,8 @@ type ProtocolPeerCache interface { // RemoveProtocols removes the specified protocols for the given peer from the protocol cache. RemoveProtocols(peerID peer.ID, protocols []protocol.ID) - // GetPeers returns a copy of the set of peers that support the given protocol. - GetPeers(pid protocol.ID) map[peer.ID]struct{} + // GetPeers returns the set of peers that support the given protocol. + GetPeers(pid protocol.ID) peer.IDSlice } // UpdateFunction is a function that adjusts the GossipSub spam record of a peer. @@ -34,15 +38,6 @@ type UpdateFunction func(record GossipSubSpamRecord) GossipSubSpamRecord // // Implementation must be thread-safe. type GossipSubSpamRecordCache interface { - // Add adds the GossipSubSpamRecord of a peer to the cache. - // Args: - // - peerID: the peer ID of the peer in the GossipSub protocol. - // - record: the GossipSubSpamRecord of the peer. - // - // Returns: - // - bool: true if the record was added successfully, false otherwise. - Add(peerId peer.ID, record GossipSubSpamRecord) bool - // Get returns the GossipSubSpamRecord of a peer from the cache. // Args: // - peerID: the peer ID of the peer in the GossipSub protocol. @@ -52,14 +47,16 @@ type GossipSubSpamRecordCache interface { // - bool: true if the record was retrieved successfully, false otherwise. Get(peerID peer.ID) (*GossipSubSpamRecord, error, bool) - // Update updates the GossipSub spam penalty of a peer in the cache using the given adjust function. + // Adjust updates the GossipSub spam penalty of a peer in the cache. If the peer does not have a record in the cache, a new record is created. + // The order of the pre-processing functions is the same as the order in which they were added to the cache. // Args: // - peerID: the peer ID of the peer in the GossipSub protocol. - // - adjustFn: the adjust function to be applied to the record. + // - updateFn: the update function to be applied to the record. // Returns: // - *GossipSubSpamRecord: the updated record. // - error on failure to update the record. The returned error is irrecoverable and indicates an exception. - Update(peerID peer.ID, updateFunc UpdateFunction) (*GossipSubSpamRecord, error) + // Note that if any of the pre-processing functions returns an error, the record is reverted to its original state (prior to applying the update function). + Adjust(peerID peer.ID, updateFunc UpdateFunction) (*GossipSubSpamRecord, error) // Has returns true if the cache contains the GossipSubSpamRecord of the given peer. // Args: @@ -69,6 +66,33 @@ type GossipSubSpamRecordCache interface { Has(peerID peer.ID) bool } +// GossipSubApplicationSpecificScoreCache is a cache for storing the application specific score of peers. +// The application specific score of a peer is used to calculate the GossipSub score of the peer; it contains the spam penalty of the peer, staking score, and subscription penalty. +// Note that none of the application specific scores, spam penalties, staking scores, and subscription penalties are shared publicly with other peers. +// Rather they are solely used by the current peer to select the peers to which it will connect on a topic mesh. +// The cache is expected to have an eject policy to remove the least recently used record when the cache is full. +// Implementation must be thread-safe, but can be blocking. +type GossipSubApplicationSpecificScoreCache interface { + // Get returns the application specific score of a peer from the cache. + // Args: + // - peerID: the peer ID of the peer in the GossipSub protocol. + // Returns: + // - float64: the application specific score of the peer. + // - time.Time: the time at which the score was last updated. + // - bool: true if the score was retrieved successfully, false otherwise. + Get(peerID peer.ID) (float64, time.Time, bool) + + // AdjustWithInit adds the application specific score of a peer to the cache. + // If the peer already has a score in the cache, the score is updated. + // Args: + // - peerID: the peer ID of the peer in the GossipSub protocol. + // - score: the application specific score of the peer. + // - time: the time at which the score was last updated. + // Returns: + // - error on failure to add the score. The returned error is irrecoverable and indicates an exception. + AdjustWithInit(peerID peer.ID, score float64, time time.Time) error +} + // GossipSubSpamRecord represents spam record of a peer in the GossipSub protocol. // It acts as a penalty card for a peer in the GossipSub protocol that keeps the // spam penalty of the peer as well as its decay factor. @@ -82,4 +106,17 @@ type GossipSubSpamRecord struct { Decay float64 // Penalty is the application specific Penalty of the peer. Penalty float64 + // LastDecayAdjustment records the time of the most recent adjustment in the decay process for a spam record. + // At each interval, the system evaluates and potentially adjusts the decay rate, which affects how quickly a node's penalty diminishes. + // The decay process is multiplicative (newPenalty = decayRate * oldPenalty) and operates within a range of 0 to 1. At certain regular intervals, the decay adjustment is evaluated and if the node's penalty falls below the set threshold, the decay rate is modified by the reduction factor, such as 0.01. This modification incrementally increases the decay rate. For example, if the decay rate is `x`, adding the reduction factor results in a decay rate of `x + 0.01`, leading to a slower reduction in penalty. Thus, a higher decay rate actually slows down the recovery process, contrary to accelerating it. + // The LastDecayAdjustment timestamp is crucial in ensuring balanced and fair penalization, especially important during periods of high message traffic to prevent unintended rapid decay of penalties for malicious nodes. + LastDecayAdjustment time.Time +} + +// MakeId is a helper function which converts a peer ID to a flow Identifier by taking the hash of the peer ID. +// This is not a protocol-level conversion, and is only used internally by the mempool caches, MUST NOT be exposed outside the cache. +// Returns: +// - the hash of the peerID as a flow.Identifier. +func MakeId(peerID peer.ID) flow.Identifier { + return flow.MakeID([]byte(peerID)) } diff --git a/network/p2p/cache/gossipsub_spam_records.go b/network/p2p/cache/gossipsub_spam_records.go index 61251e28bcc..04712b9249f 100644 --- a/network/p2p/cache/gossipsub_spam_records.go +++ b/network/p2p/cache/gossipsub_spam_records.go @@ -13,15 +13,17 @@ import ( "github.com/onflow/flow-go/module/mempool/herocache/backdata/heropool" "github.com/onflow/flow-go/module/mempool/stdmap" "github.com/onflow/flow-go/network/p2p" + p2plogging "github.com/onflow/flow-go/network/p2p/logging" ) // GossipSubSpamRecordCache is a cache for storing the gossipsub spam records of peers. It is thread-safe. // The spam records of peers is used to calculate the application specific score, which is part of the GossipSub score of a peer. // Note that neither of the spam records, application specific score, and GossipSub score are shared publicly with other peers. // Rather they are solely used by the current peer to select the peers to which it will connect on a topic mesh. +// Stored gossipSubSpamRecords are keyed by the hash of the peerID. type GossipSubSpamRecordCache struct { // the in-memory and thread-safe cache for storing the spam records of peers. - c *stdmap.Backend + c *stdmap.Backend[flow.Identifier, gossipSubSpamRecord] // Optional: the pre-processors to be called upon reading or updating a record in the cache. // The pre-processors are called in the order they are added to the cache. @@ -29,6 +31,9 @@ type GossipSubSpamRecordCache struct { // Primary use case is to perform decay operations on the record before reading or updating it. In this way, a // record is only decayed when it is read or updated without the need to explicitly iterating over the cache. preprocessFns []PreprocessorFunc + + // initFn is a function that is called to initialize a new record in the cache. + initFn func() p2p.GossipSubSpamRecord } var _ p2p.GossipSubSpamRecordCache = (*GossipSubSpamRecordCache)(nil) @@ -59,41 +64,26 @@ type PreprocessorFunc func(record p2p.GossipSubSpamRecord, lastUpdated time.Time // Returns: // // *GossipSubSpamRecordCache: the newly created cache with a HeroCache-based backend. -func NewGossipSubSpamRecordCache(sizeLimit uint32, logger zerolog.Logger, collector module.HeroCacheMetrics, prFns ...PreprocessorFunc) *GossipSubSpamRecordCache { - backData := herocache.NewCache(sizeLimit, +func NewGossipSubSpamRecordCache(sizeLimit uint32, + logger zerolog.Logger, + collector module.HeroCacheMetrics, + initFn func() p2p.GossipSubSpamRecord, + prFns ...PreprocessorFunc) *GossipSubSpamRecordCache { + backData := herocache.NewCache[gossipSubSpamRecord]( + sizeLimit, herocache.DefaultOversizeFactor, - // we should not evict any record from the cache, - // eviction will open the node to spam attacks by malicious peers to erase their application specific penalty. - heropool.NoEjection, + heropool.LRUEjection, logger.With().Str("mempool", "gossipsub-app-Penalty-cache").Logger(), - collector) + collector, + ) return &GossipSubSpamRecordCache{ - c: stdmap.NewBackend(stdmap.WithBackData(backData)), + c: stdmap.NewBackend(stdmap.WithMutableBackData[flow.Identifier, gossipSubSpamRecord](backData)), preprocessFns: prFns, + initFn: initFn, } } -// Add adds the GossipSubSpamRecord of a peer to the cache. -// Args: -// - peerID: the peer ID of the peer in the GossipSub protocol. -// - record: the GossipSubSpamRecord of the peer. -// -// Returns: -// - bool: true if the record was added successfully, false otherwise. -// Note that a record is added successfully if the cache has enough space to store the record and no record exists for the peer in the cache. -// In other words, the entries are deduplicated by the peer ID. -func (a *GossipSubSpamRecordCache) Add(peerId peer.ID, record p2p.GossipSubSpamRecord) bool { - entityId := flow.HashToID([]byte(peerId)) // HeroCache uses hash of peer.ID as the unique identifier of the record. - return a.c.Add(gossipsubSpamRecordEntity{ - entityId: entityId, - peerID: peerId, - lastUpdated: time.Now(), - GossipSubSpamRecord: record, - }) -} - -// Update updates the GossipSub spam penalty of a peer in the cache. It assumes that a record already exists for the peer in the cache. -// It first reads the record from the cache, applies the pre-processing functions to the record, and then applies the update function to the record. +// Adjust updates the GossipSub spam penalty of a peer in the cache. If the peer does not have a record in the cache, a new record is created. // The order of the pre-processing functions is the same as the order in which they were added to the cache. // Args: // - peerID: the peer ID of the peer in the GossipSub protocol. @@ -102,45 +92,44 @@ func (a *GossipSubSpamRecordCache) Add(peerId peer.ID, record p2p.GossipSubSpamR // - *GossipSubSpamRecord: the updated record. // - error on failure to update the record. The returned error is irrecoverable and indicates an exception. // Note that if any of the pre-processing functions returns an error, the record is reverted to its original state (prior to applying the update function). -func (a *GossipSubSpamRecordCache) Update(peerID peer.ID, updateFn p2p.UpdateFunction) (*p2p.GossipSubSpamRecord, error) { - // HeroCache uses flow.Identifier for keys, so reformat of the peer.ID - entityId := flow.HashToID([]byte(peerID)) - if !a.c.Has(entityId) { - return nil, fmt.Errorf("could not update spam records for peer %s, record not found", peerID.String()) - } - +func (a *GossipSubSpamRecordCache) Adjust(peerID peer.ID, updateFn p2p.UpdateFunction) (*p2p.GossipSubSpamRecord, error) { var err error - record, updated := a.c.Adjust(entityId, func(entry flow.Entity) flow.Entity { - e := entry.(gossipsubSpamRecordEntity) - - currentRecord := e.GossipSubSpamRecord + adjustFunc := func(gossipSubSpamRecordWrapper gossipSubSpamRecord) gossipSubSpamRecord { + currentRecord := gossipSubSpamRecordWrapper.GossipSubSpamRecord // apply the pre-processing functions to the record. for _, apply := range a.preprocessFns { - e.GossipSubSpamRecord, err = apply(e.GossipSubSpamRecord, e.lastUpdated) + gossipSubSpamRecordWrapper.GossipSubSpamRecord, err = apply(gossipSubSpamRecordWrapper.GossipSubSpamRecord, gossipSubSpamRecordWrapper.lastUpdated) if err != nil { - e.GossipSubSpamRecord = currentRecord - return e // return the original record if the pre-processing fails (atomic abort). + gossipSubSpamRecordWrapper.GossipSubSpamRecord = currentRecord + return gossipSubSpamRecordWrapper // return the original record if the pre-processing fails (atomic abort). } } // apply the update function to the record. - e.GossipSubSpamRecord = updateFn(e.GossipSubSpamRecord) + gossipSubSpamRecordWrapper.GossipSubSpamRecord = updateFn(gossipSubSpamRecordWrapper.GossipSubSpamRecord) - if e.GossipSubSpamRecord != currentRecord { - e.lastUpdated = time.Now() + if gossipSubSpamRecordWrapper.GossipSubSpamRecord != currentRecord { + gossipSubSpamRecordWrapper.lastUpdated = time.Now() } - return e - }) + return gossipSubSpamRecordWrapper + } + + initFunc := func() gossipSubSpamRecord { + return gossipSubSpamRecord{ + peerID: peerID, + GossipSubSpamRecord: a.initFn(), + } + } + + adjustedWrapper, adjusted := a.c.AdjustWithInit(p2p.MakeId(peerID), adjustFunc, initFunc) if err != nil { - return nil, fmt.Errorf("could not update spam records for peer %s, error: %w", peerID.String(), err) + return nil, fmt.Errorf("error while applying pre-processing functions to cache record for peer %s: %w", p2plogging.PeerId(peerID), err) } - if !updated { - // this happens when the underlying HeroCache fails to update the record. - return nil, fmt.Errorf("internal cache error for updating %s", peerID.String()) + if !adjusted { + return nil, fmt.Errorf("could not adjust cache record for peer %s", p2plogging.PeerId(peerID)) } - r := record.(gossipsubSpamRecordEntity).GossipSubSpamRecord - return &r, nil + return &adjustedWrapper.GossipSubSpamRecord, nil } // Has returns true if the spam record of a peer is found in the cache, false otherwise. @@ -149,8 +138,7 @@ func (a *GossipSubSpamRecordCache) Update(peerID peer.ID, updateFn p2p.UpdateFun // Returns: // - true if the gossipsub spam record of the peer is found in the cache, false otherwise. func (a *GossipSubSpamRecordCache) Has(peerID peer.ID) bool { - entityId := flow.HashToID([]byte(peerID)) // HeroCache uses hash of peer.ID as the unique identifier of the record. - return a.c.Has(entityId) + return a.c.Has(p2p.MakeId(peerID)) } // Get returns the spam record of a peer from the cache. @@ -164,62 +152,42 @@ func (a *GossipSubSpamRecordCache) Has(peerID peer.ID) bool { // the caller is advised to crash the node. // - true if the record is found in the cache, false otherwise. func (a *GossipSubSpamRecordCache) Get(peerID peer.ID) (*p2p.GossipSubSpamRecord, error, bool) { - entityId := flow.HashToID([]byte(peerID)) // HeroCache uses hash of peer.ID as the unique identifier of the record. - if !a.c.Has(entityId) { + key := p2p.MakeId(peerID) + if !a.c.Has(key) { return nil, nil, false } var err error - record, updated := a.c.Adjust(entityId, func(entry flow.Entity) flow.Entity { - e := entry.(gossipsubSpamRecordEntity) - - currentRecord := e.GossipSubSpamRecord + record, updated := a.c.Adjust(key, func(gossipSubSpamRecordWrapper gossipSubSpamRecord) gossipSubSpamRecord { + currentRecord := gossipSubSpamRecordWrapper.GossipSubSpamRecord for _, apply := range a.preprocessFns { - e.GossipSubSpamRecord, err = apply(e.GossipSubSpamRecord, e.lastUpdated) + gossipSubSpamRecordWrapper.GossipSubSpamRecord, err = apply(gossipSubSpamRecordWrapper.GossipSubSpamRecord, gossipSubSpamRecordWrapper.lastUpdated) if err != nil { - e.GossipSubSpamRecord = currentRecord - return e // return the original record if the pre-processing fails (atomic abort). + gossipSubSpamRecordWrapper.GossipSubSpamRecord = currentRecord + return gossipSubSpamRecordWrapper // return the original record if the pre-processing fails (atomic abort). } } - if e.GossipSubSpamRecord != currentRecord { - e.lastUpdated = time.Now() + if gossipSubSpamRecordWrapper.GossipSubSpamRecord != currentRecord { + gossipSubSpamRecordWrapper.lastUpdated = time.Now() } - return e + return gossipSubSpamRecordWrapper }) if err != nil { - return nil, fmt.Errorf("error while applying pre-processing functions to cache record for peer %s: %w", peerID.String(), err), false + return nil, fmt.Errorf("error while applying pre-processing functions to cache record for peer %s: %w", p2plogging.PeerId(peerID), err), false } if !updated { - return nil, fmt.Errorf("could not decay cache record for peer %s", peerID.String()), false + return nil, fmt.Errorf("could not decay cache record for peer %s", p2plogging.PeerId(peerID)), false } - r := record.(gossipsubSpamRecordEntity).GossipSubSpamRecord - return &r, nil, true + return &record.GossipSubSpamRecord, nil, true } -// GossipSubSpamRecord represents an Entity implementation GossipSubSpamRecord. +// gossipSubSpamRecord represents a wrapper around the p2p.GossipSubSpamRecord. // It is internally used by the HeroCache to store the GossipSubSpamRecord. -type gossipsubSpamRecordEntity struct { - entityId flow.Identifier // the ID of the record (used to identify the record in the cache). - // lastUpdated is the time at which the record was last updated. +type gossipSubSpamRecord struct { // the peer ID of the peer in the GossipSub protocol. - peerID peer.ID + peerID peer.ID + // lastUpdated is the time at which the record was last updated. lastUpdated time.Time p2p.GossipSubSpamRecord } - -// In order to use HeroCache, the gossipsubSpamRecordEntity must implement the flow.Entity interface. -var _ flow.Entity = (*gossipsubSpamRecordEntity)(nil) - -// ID returns the ID of the gossipsubSpamRecordEntity. As the ID is used to identify the record in the cache, it must be unique. -// Also, as the ID is used frequently in the cache, it is stored in the record to avoid recomputing it. -// ID is never exposed outside the cache. -func (a gossipsubSpamRecordEntity) ID() flow.Identifier { - return a.entityId -} - -// Checksum returns the same value as ID. Checksum is implemented to satisfy the flow.Entity interface. -// HeroCache does not use the checksum of the gossipsubSpamRecordEntity. -func (a gossipsubSpamRecordEntity) Checksum() flow.Identifier { - return a.entityId -} diff --git a/network/p2p/cache/gossipsub_spam_records_test.go b/network/p2p/cache/gossipsub_spam_records_test.go index 166776b93ba..bee5ecdc26e 100644 --- a/network/p2p/cache/gossipsub_spam_records_test.go +++ b/network/p2p/cache/gossipsub_spam_records_test.go @@ -7,7 +7,6 @@ import ( "time" "github.com/libp2p/go-libp2p/core/peer" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "go.uber.org/atomic" @@ -21,58 +20,65 @@ import ( // adding a new record to the cache. func TestGossipSubSpamRecordCache_Add(t *testing.T) { // create a new instance of GossipSubSpamRecordCache. - cache := netcache.NewGossipSubSpamRecordCache(100, unittest.Logger(), metrics.NewNoopCollector()) + cache := netcache.NewGossipSubSpamRecordCache(100, unittest.Logger(), metrics.NewNoopCollector(), func() p2p.GossipSubSpamRecord { + return p2p.GossipSubSpamRecord{ + Decay: 0, + Penalty: 0, + LastDecayAdjustment: time.Now(), + } + }) - // tests adding a new record to the cache. - require.True(t, cache.Add("peer0", p2p.GossipSubSpamRecord{ - Decay: 0.1, - Penalty: 0.5, - })) + adjustedEntity, err := cache.Adjust("peer0", func(record p2p.GossipSubSpamRecord) p2p.GossipSubSpamRecord { + record.Decay = 0.1 + record.Penalty = 0.5 - // tests updating an existing record in the cache. - require.False(t, cache.Add("peer0", p2p.GossipSubSpamRecord{ - Decay: 0.1, - Penalty: 0.5, - })) + return record + }) + require.NoError(t, err) + require.Equal(t, 0.1, adjustedEntity.Decay) + require.Equal(t, 0.5, adjustedEntity.Penalty) // makes the cache full. - for i := 1; i < 100; i++ { - require.True(t, cache.Add(peer.ID(fmt.Sprintf("peer%d", i)), p2p.GossipSubSpamRecord{ - Decay: 0.1, - Penalty: 0.5, - })) - } + for i := 1; i <= 100; i++ { + adjustedEntity, err := cache.Adjust(peer.ID(fmt.Sprintf("peer%d", i)), func(record p2p.GossipSubSpamRecord) p2p.GossipSubSpamRecord { + record.Decay = 0.1 + record.Penalty = 0.5 - // adding a new record to the cache should fail. - require.False(t, cache.Add("peer101", p2p.GossipSubSpamRecord{ - Decay: 0.1, - Penalty: 0.5, - })) + return record + }) + + require.NoError(t, err) + require.Equal(t, 0.1, adjustedEntity.Decay) + } // retrieving an existing record should work. - for i := 0; i < 100; i++ { + for i := 1; i <= 100; i++ { record, err, ok := cache.Get(peer.ID(fmt.Sprintf("peer%d", i))) - require.True(t, ok) + require.True(t, ok, fmt.Sprintf("record for peer%d should exist", i)) require.NoError(t, err) require.Equal(t, 0.1, record.Decay) require.Equal(t, 0.5, record.Penalty) } - // yet attempting on adding an existing record should fail. - require.False(t, cache.Add("peer1", p2p.GossipSubSpamRecord{ - Decay: 0.2, - Penalty: 0.8, - })) + // since cache is LRU, the first record should be evicted. + _, err, ok := cache.Get("peer0") + require.False(t, ok) + require.NoError(t, err) } -// TestGossipSubSpamRecordCache_Concurrent_Add tests if the cache can be added and retrieved concurrently. -// It updates the cache with a number of records concurrently and then checks if the cache -// can retrieve all records. -func TestGossipSubSpamRecordCache_Concurrent_Add(t *testing.T) { - cache := netcache.NewGossipSubSpamRecordCache(200, unittest.Logger(), metrics.NewNoopCollector()) +// TestGossipSubSpamRecordCache_Concurrent_Adjust tests if the cache can be adjusted and retrieved concurrently. +// It adjusts the cache with a number of records concurrently and then checks if the cache can retrieve all records. +func TestGossipSubSpamRecordCache_Concurrent_Adjust(t *testing.T) { + cache := netcache.NewGossipSubSpamRecordCache(200, unittest.Logger(), metrics.NewNoopCollector(), func() p2p.GossipSubSpamRecord { + return p2p.GossipSubSpamRecord{ + Decay: 0, + Penalty: 0, + LastDecayAdjustment: time.Now(), + } + }) - // defines the number of records to update. + // defines the number of records to be adjusted. numRecords := 100 // uses a wait group to wait for all goroutines to finish. @@ -84,15 +90,20 @@ func TestGossipSubSpamRecordCache_Concurrent_Add(t *testing.T) { go func(num int) { defer wg.Done() peerID := fmt.Sprintf("peer%d", num) - added := cache.Add(peer.ID(peerID), p2p.GossipSubSpamRecord{ - Decay: 0.1 * float64(num), - Penalty: float64(num), + adjustedEntity, err := cache.Adjust(peer.ID(peerID), func(record p2p.GossipSubSpamRecord) p2p.GossipSubSpamRecord { + record.Decay = 0.1 * float64(num) + record.Penalty = float64(num) + + return record }) - require.True(t, added) + + require.NoError(t, err) + require.Equal(t, 0.1*float64(num), adjustedEntity.Decay) + require.Equal(t, float64(num), adjustedEntity.Penalty) }(i) } - unittest.RequireReturnsBefore(t, wg.Wait, 100*time.Millisecond, "could not update all records concurrently on time") + unittest.RequireReturnsBefore(t, wg.Wait, 100*time.Millisecond, "could not adjust all records concurrently on time") // checks if the cache can retrieve all records. for i := 0; i < numRecords; i++ { @@ -110,96 +121,51 @@ func TestGossipSubSpamRecordCache_Concurrent_Add(t *testing.T) { } } -// TestGossipSubSpamRecordCache_Update tests the Update method of the GossipSubSpamRecordCache. It tests if the cache can update -// the penalty of an existing record and fail to update the penalty of a non-existing record. -func TestGossipSubSpamRecordCache_Update(t *testing.T) { - cache := netcache.NewGossipSubSpamRecordCache(200, unittest.Logger(), metrics.NewNoopCollector()) +// TestGossipSubSpamRecordCache_Adjust tests the Adjust method of the GossipSubSpamRecordCache. It tests if the cache can adjust +// the penalty of an existing record and add a new record. +func TestGossipSubSpamRecordCache_Adjust(t *testing.T) { + cache := netcache.NewGossipSubSpamRecordCache(200, unittest.Logger(), metrics.NewNoopCollector(), func() p2p.GossipSubSpamRecord { + return p2p.GossipSubSpamRecord{ + Decay: 0, + Penalty: 0, + LastDecayAdjustment: time.Now(), + } + }) peerID := "peer1" - // tests updateing the penalty of an existing record. - require.True(t, cache.Add(peer.ID(peerID), p2p.GossipSubSpamRecord{ - Decay: 0.1, - Penalty: 0.5, - })) - record, err := cache.Update(peer.ID(peerID), func(record p2p.GossipSubSpamRecord) p2p.GossipSubSpamRecord { + // test adjusting a non-existing record. + record, err := cache.Adjust(peer.ID(peerID), func(record p2p.GossipSubSpamRecord) p2p.GossipSubSpamRecord { record.Penalty = 0.7 return record }) require.NoError(t, err) - require.Equal(t, 0.7, record.Penalty) // checks if the penalty is updateed correctly. + require.Equal(t, 0.7, record.Penalty) // checks if the penalty is adjusted correctly. - // tests updating the penalty of a non-existing record. - record, err = cache.Update(peer.ID("peer2"), func(record p2p.GossipSubSpamRecord) p2p.GossipSubSpamRecord { - require.Fail(t, "the function should not be called for a non-existing record") + // test adjusting an existing record. + record, err = cache.Adjust(peer.ID(peerID), func(record p2p.GossipSubSpamRecord) p2p.GossipSubSpamRecord { + record.Penalty = 0.8 return record }) - require.Error(t, err) - require.Nil(t, record) -} - -// TestGossipSubSpamRecordCache_Concurrent_Update tests if the cache can be updated concurrently. It updates the cache -// with a number of records concurrently and then checks if the cache can retrieve all records. -func TestGossipSubSpamRecordCache_Concurrent_Update(t *testing.T) { - cache := netcache.NewGossipSubSpamRecordCache(200, unittest.Logger(), metrics.NewNoopCollector()) - - // defines the number of records to update. - numRecords := 100 - - // adds all records to the cache, sequentially. - for i := 0; i < numRecords; i++ { - peerID := fmt.Sprintf("peer%d", i) - err := cache.Add(peer.ID(peerID), p2p.GossipSubSpamRecord{ - Decay: 0.1 * float64(i), - Penalty: float64(i), - }) - require.True(t, err) - } - - // uses a wait group to wait for all goroutines to finish. - var wg sync.WaitGroup - wg.Add(numRecords) - - // updates the records concurrently. - for i := 0; i < numRecords; i++ { - go func(num int) { - defer wg.Done() - peerID := fmt.Sprintf("peer%d", num) - _, err := cache.Update(peer.ID(peerID), func(record p2p.GossipSubSpamRecord) p2p.GossipSubSpamRecord { - record.Penalty = 0.7 * float64(num) - record.Decay = 0.1 * float64(num) - return record - }) - require.NoError(t, err) - }(i) - } - - unittest.RequireReturnsBefore(t, wg.Wait, 100*time.Millisecond, "could not update all records concurrently on time") - - // checks if the cache can retrieve all records. - for i := 0; i < numRecords; i++ { - peerID := fmt.Sprintf("peer%d", i) - record, err, found := cache.Get(peer.ID(peerID)) - require.True(t, found) - require.NoError(t, err) - - expectedPenalty := 0.7 * float64(i) - require.Equal(t, expectedPenalty, record.Penalty, - "Get() returned incorrect Penalty for record %s: expected %f, got %f", peerID, expectedPenalty, record.Penalty) - expectedDecay := 0.1 * float64(i) - require.Equal(t, expectedDecay, record.Decay, - "Get() returned incorrect Decay for record %s: expected %f, got %f", peerID, expectedDecay, record.Decay) - } + require.NoError(t, err) + require.Equal(t, 0.8, record.Penalty) // checks if the penalty is adjusted correctly. } -// TestGossipSubSpamRecordCache_Update_With_Preprocess tests Update method of the GossipSubSpamRecordCache when the cache +// TestGossipSubSpamRecordCache_Adjust_With_Preprocess tests Adjust method of the GossipSubSpamRecordCache when the cache // has preprocessor functions. -// It tests when the cache has preprocessor functions, all preprocessor functions are called prior to the update function. +// It tests when the cache has preprocessor functions, all preprocessor functions are called prior to the adjust function. // Also, it tests if the pre-processor functions are called in the order they are added. -func TestGossipSubSpamRecordCache_Update_With_Preprocess(t *testing.T) { +func TestGossipSubSpamRecordCache_Adjust_With_Preprocess(t *testing.T) { cache := netcache.NewGossipSubSpamRecordCache(200, unittest.Logger(), metrics.NewNoopCollector(), + func() p2p.GossipSubSpamRecord { + return p2p.GossipSubSpamRecord{ + Decay: 0, + Penalty: 0, + LastDecayAdjustment: time.Now(), + } + }, func(record p2p.GossipSubSpamRecord, lastUpdated time.Time) (p2p.GossipSubSpamRecord, error) { record.Penalty += 1.5 return record, nil @@ -209,14 +175,19 @@ func TestGossipSubSpamRecordCache_Update_With_Preprocess(t *testing.T) { }) peerID := "peer1" - // adds a record to the cache. - require.True(t, cache.Add(peer.ID(peerID), p2p.GossipSubSpamRecord{ - Decay: 0.1, - Penalty: 0.5, - })) + + // test adjusting a non-existing record. + record, err := cache.Adjust(peer.ID(peerID), func(record p2p.GossipSubSpamRecord) p2p.GossipSubSpamRecord { + record.Penalty = 0.5 + record.Decay = 0.1 + return record + }) + require.NoError(t, err) + require.Equal(t, 0.5, record.Penalty) // checks if the penalty is adjusted correctly. + require.Equal(t, 0.1, record.Decay) // checks if the decay is adjusted correctly. // tests updating the penalty of an existing record. - record, err := cache.Update(peer.ID(peerID), func(record p2p.GossipSubSpamRecord) p2p.GossipSubSpamRecord { + record, err = cache.Adjust(peer.ID(peerID), func(record p2p.GossipSubSpamRecord) p2p.GossipSubSpamRecord { record.Penalty += 0.7 return record }) @@ -225,46 +196,61 @@ func TestGossipSubSpamRecordCache_Update_With_Preprocess(t *testing.T) { require.Equal(t, 0.1, record.Decay) // checks if the decay is not changed. } -// TestGossipSubSpamRecordCache_Update_Preprocess_Error tests the Update method of the GossipSubSpamRecordCache. -// It tests if any of the preprocessor functions returns an error, the update function effect +// TestGossipSubSpamRecordCache_Adjust_Preprocess_Error tests the Adjust method of the GossipSubSpamRecordCache. +// It tests if any of the preprocessor functions returns an error, the Adjust function effect // is reverted, and the error is returned. -func TestGossipSubSpamRecordCache_Update_Preprocess_Error(t *testing.T) { - secondPreprocessorCalled := false +func TestGossipSubSpamRecordCache_Adjust_Preprocess_Error(t *testing.T) { + secondPreprocessorCalled := 0 cache := netcache.NewGossipSubSpamRecordCache(200, unittest.Logger(), metrics.NewNoopCollector(), + func() p2p.GossipSubSpamRecord { + return p2p.GossipSubSpamRecord{ + Decay: 0, + Penalty: 0, + LastDecayAdjustment: time.Now(), + } + }, // the first preprocessor function does not return an error. func(record p2p.GossipSubSpamRecord, lastUpdated time.Time) (p2p.GossipSubSpamRecord, error) { return record, nil }, - // the second preprocessor function returns an error on the first call and nil on the second call onwards. + // the second preprocessor function returns an error on the second call, and does not return an error on any other call. + // this means that adjustment should be successful on the first call, and should fail on the second call. func(record p2p.GossipSubSpamRecord, lastUpdated time.Time) (p2p.GossipSubSpamRecord, error) { - if !secondPreprocessorCalled { - secondPreprocessorCalled = true - return record, fmt.Errorf("error") + secondPreprocessorCalled++ + if secondPreprocessorCalled == 2 { + return record, fmt.Errorf("some error") } return record, nil + }) - peerID := "peer1" - // adds a record to the cache. - require.True(t, cache.Add(peer.ID(peerID), p2p.GossipSubSpamRecord{ - Decay: 0.1, - Penalty: 0.5, - })) + peerID := unittest.PeerIdFixture(t) - // tests updating the penalty of an existing record. - record, err := cache.Update(peer.ID(peerID), func(record p2p.GossipSubSpamRecord) p2p.GossipSubSpamRecord { + // tests adjusting the penalty of a non-existing record; the record should be initiated and the penalty should be adjusted. + record, err := cache.Adjust(peerID, func(record p2p.GossipSubSpamRecord) p2p.GossipSubSpamRecord { + record.Penalty = 0.5 + record.Decay = 0.1 + return record + }) + require.NoError(t, err) + require.NotNil(t, record) + require.Equal(t, 0.5, record.Penalty) // checks if the penalty is not changed. + require.Equal(t, 0.1, record.Decay) // checks if the decay is not changed. + + // tests adjusting the penalty of an existing record. + record, err = cache.Adjust(peerID, func(record p2p.GossipSubSpamRecord) p2p.GossipSubSpamRecord { record.Penalty = 0.7 return record }) - // since the second preprocessor function returns an error, the update function effect should be reverted. + // since the second preprocessor function returns an error, the adjust function effect should be reverted. // the error should be returned. require.Error(t, err) require.Nil(t, record) // checks if the record is not changed. - record, err, found := cache.Get(peer.ID(peerID)) + record, err, found := cache.Get(peerID) require.True(t, found) require.NoError(t, err) require.Equal(t, 0.5, record.Penalty) // checks if the penalty is not changed. @@ -272,22 +258,33 @@ func TestGossipSubSpamRecordCache_Update_Preprocess_Error(t *testing.T) { } // TestGossipSubSpamRecordCache_ByValue tests if the cache stores the GossipSubSpamRecord by value. -// It updates the cache with a record and then modifies the record externally. +// It adjusts the cache with a record and then modifies the record externally. // It then checks if the record in the cache is still the original record. // This is a desired behavior that is guaranteed by the underlying HeroCache library. -// In other words, we don't desire the records to be externally mutable after they are added to the cache (unless by a subsequent call to Update). +// In other words, we don't desire the records to be externally mutable after they are added to the cache (unless by a subsequent call to Adjust). func TestGossipSubSpamRecordCache_ByValue(t *testing.T) { - cache := netcache.NewGossipSubSpamRecordCache(200, unittest.Logger(), metrics.NewNoopCollector()) + cache := netcache.NewGossipSubSpamRecordCache(200, unittest.Logger(), metrics.NewNoopCollector(), func() p2p.GossipSubSpamRecord { + return p2p.GossipSubSpamRecord{ + Decay: 0, + Penalty: 0, + LastDecayAdjustment: time.Now(), + } + }) - peerID := "peer1" - added := cache.Add(peer.ID(peerID), p2p.GossipSubSpamRecord{ - Decay: 0.1, - Penalty: 0.5, + peerID := unittest.PeerIdFixture(t) + // adjusts a non-existing record, which should initiate the record. + record, err := cache.Adjust(peerID, func(record p2p.GossipSubSpamRecord) p2p.GossipSubSpamRecord { + record.Penalty = 0.5 + record.Decay = 0.1 + return record }) - require.True(t, added) + require.NoError(t, err) + require.NotNil(t, record) + require.Equal(t, 0.5, record.Penalty) // checks if the penalty is not changed. + require.Equal(t, 0.1, record.Decay) // checks if the decay is not changed. // get the record from the cache - record, err, found := cache.Get(peer.ID(peerID)) + record, err, found := cache.Get(peerID) require.True(t, found) require.NoError(t, err) @@ -296,7 +293,7 @@ func TestGossipSubSpamRecordCache_ByValue(t *testing.T) { record.Penalty = 0.8 // get the record from the cache again - record, err, found = cache.Get(peer.ID(peerID)) + record, err, found = cache.Get(peerID) require.True(t, found) require.NoError(t, err) @@ -305,10 +302,16 @@ func TestGossipSubSpamRecordCache_ByValue(t *testing.T) { require.Equal(t, 0.5, record.Penalty) } -// TestGossipSubSpamRecordCache_Get_With_Preprocessors tests if the cache applies the preprocessors to the records -// before returning them. +// TestGossipSubSpamRecordCache_Get_With_Preprocessors tests if the cache applies the preprocessors to the records before returning them. func TestGossipSubSpamRecordCache_Get_With_Preprocessors(t *testing.T) { cache := netcache.NewGossipSubSpamRecordCache(10, unittest.Logger(), metrics.NewNoopCollector(), + func() p2p.GossipSubSpamRecord { + return p2p.GossipSubSpamRecord{ + Decay: 0, + Penalty: 0, + LastDecayAdjustment: time.Now(), + } + }, // first preprocessor: adds 1 to the penalty. func(record p2p.GossipSubSpamRecord, lastUpdated time.Time) (p2p.GossipSubSpamRecord, error) { record.Penalty++ @@ -321,22 +324,24 @@ func TestGossipSubSpamRecordCache_Get_With_Preprocessors(t *testing.T) { }, ) - record := p2p.GossipSubSpamRecord{ - Decay: 0.5, - Penalty: 1, - } - added := cache.Add("peerA", record) - assert.True(t, added) + peerId := unittest.PeerIdFixture(t) + adjustedRecord, err := cache.Adjust(peerId, func(record p2p.GossipSubSpamRecord) p2p.GossipSubSpamRecord { + record.Penalty = 1 + record.Decay = 0.5 + return record + }) + require.NoError(t, err) + require.Equal(t, 1.0, adjustedRecord.Penalty) - // verifies that the preprocessors were called and the record was updated accordingly. - cachedRecord, err, ok := cache.Get("peerA") - assert.NoError(t, err) - assert.True(t, ok) + // verifies that the preprocessors were called and the record was adjusted accordingly. + cachedRecord, err, ok := cache.Get(peerId) + require.NoError(t, err) + require.True(t, ok) // expected penalty is 4: the first preprocessor adds 1 to the penalty and the second preprocessor multiplies the penalty by 2. // (1 + 1) * 2 = 4 - assert.Equal(t, 4.0, cachedRecord.Penalty) // penalty should be updated - assert.Equal(t, 0.5, cachedRecord.Decay) // decay should not be modified + require.Equal(t, 4.0, cachedRecord.Penalty) // penalty should be adjusted + require.Equal(t, 0.5, cachedRecord.Decay) // decay should not be modified } // TestGossipSubSpamRecordCache_Get_Preprocessor_Error tests if the cache returns an error if one of the preprocessors returns an error upon a Get. @@ -349,15 +354,22 @@ func TestGossipSubSpamRecordCache_Get_Preprocessor_Error(t *testing.T) { thirdPreprocessorCalledCount := 0 cache := netcache.NewGossipSubSpamRecordCache(10, unittest.Logger(), metrics.NewNoopCollector(), + func() p2p.GossipSubSpamRecord { + return p2p.GossipSubSpamRecord{ + Decay: 0, + Penalty: 0, + LastDecayAdjustment: time.Now(), + } + }, // first preprocessor: adds 1 to the penalty. func(record p2p.GossipSubSpamRecord, lastUpdated time.Time) (p2p.GossipSubSpamRecord, error) { record.Penalty++ return record, nil }, - // second preprocessor: multiplies the penalty by 2 (this preprocessor returns an error on the second call) + // second preprocessor: multiplies the penalty by 2 (this preprocessor returns an error on the third call and forward) func(record p2p.GossipSubSpamRecord, lastUpdated time.Time) (p2p.GossipSubSpamRecord, error) { secondPreprocessorCalledCount++ - if secondPreprocessorCalledCount < 2 { + if secondPreprocessorCalledCount < 3 { // on the first call, the preprocessor is successful return record, nil } else { @@ -365,109 +377,153 @@ func TestGossipSubSpamRecordCache_Get_Preprocessor_Error(t *testing.T) { return p2p.GossipSubSpamRecord{}, fmt.Errorf("error in preprocessor") } }, - // since second preprocessor returns an error on the second call, the third preprocessor should not be called more than once.. + // since second preprocessor returns an error on the second call, the third preprocessor should not be called more than once. func(record p2p.GossipSubSpamRecord, lastUpdated time.Time) (p2p.GossipSubSpamRecord, error) { thirdPreprocessorCalledCount++ - require.Less(t, secondPreprocessorCalledCount, 2) + require.Less(t, secondPreprocessorCalledCount, 3) return record, nil }, ) - record := p2p.GossipSubSpamRecord{ - Decay: 0.5, - Penalty: 1, - } - added := cache.Add("peerA", record) - assert.True(t, added) + peerId := unittest.PeerIdFixture(t) + adjustedRecord, err := cache.Adjust(peerId, func(record p2p.GossipSubSpamRecord) p2p.GossipSubSpamRecord { + record.Penalty = 1 + record.Decay = 0.5 + return record + }) + require.NoError(t, err) + require.Equal(t, 1.0, adjustedRecord.Penalty) + require.Equal(t, 0.5, adjustedRecord.Decay) - // verifies that the preprocessors were called and the penalty was updated accordingly. - cachedRecord, err, ok := cache.Get("peerA") + // verifies that the preprocessors were called and the penalty was adjusted accordingly. + cachedRecord, err, ok := cache.Get(peerId) require.NoError(t, err) - assert.True(t, ok) - assert.Equal(t, 2.0, cachedRecord.Penalty) // penalty should be updated by the first preprocessor (1 + 1 = 2) - assert.Equal(t, 0.5, cachedRecord.Decay) + require.True(t, ok) + require.Equal(t, 2.0, cachedRecord.Penalty) // penalty should be adjusted by the first preprocessor (1 + 1 = 2) + require.Equal(t, 0.5, cachedRecord.Decay) // query the cache again that should trigger the second preprocessor to return an error. - cachedRecord, err, ok = cache.Get("peerA") + cachedRecord, err, ok = cache.Get(peerId) require.Error(t, err) - assert.False(t, ok) - assert.Nil(t, cachedRecord) + require.False(t, ok) + require.Nil(t, cachedRecord) - // verifies that the third preprocessor was not called. - assert.Equal(t, 1, thirdPreprocessorCalledCount) - // verifies that the second preprocessor was called only twice (one success, and one failure). - assert.Equal(t, 2, secondPreprocessorCalledCount) + // verifies that the third preprocessor was called only twice (two success calls). + require.Equal(t, 2, thirdPreprocessorCalledCount) + // verifies that the second preprocessor was called three times (two success calls and one failure call). + require.Equal(t, 3, secondPreprocessorCalledCount) } // TestGossipSubSpamRecordCache_Get_Without_Preprocessors tests when no preprocessors are provided to the cache constructor // that the cache returns the original record without any modifications. func TestGossipSubSpamRecordCache_Get_Without_Preprocessors(t *testing.T) { - cache := netcache.NewGossipSubSpamRecordCache(10, unittest.Logger(), metrics.NewNoopCollector()) + cache := netcache.NewGossipSubSpamRecordCache(10, unittest.Logger(), metrics.NewNoopCollector(), func() p2p.GossipSubSpamRecord { + return p2p.GossipSubSpamRecord{ + Decay: 0, + Penalty: 0, + LastDecayAdjustment: time.Now(), + } + }) - record := p2p.GossipSubSpamRecord{ - Decay: 0.5, - Penalty: 1, - } - added := cache.Add("peerA", record) - assert.True(t, added) - - // verifies that no preprocessors were called and the record was not updated. - cachedRecord, err, ok := cache.Get("peerA") - assert.NoError(t, err) - assert.True(t, ok) - assert.Equal(t, 1.0, cachedRecord.Penalty) - assert.Equal(t, 0.5, cachedRecord.Decay) + peerId := unittest.PeerIdFixture(t) + adjustedRecord, err := cache.Adjust(peerId, func(record p2p.GossipSubSpamRecord) p2p.GossipSubSpamRecord { + record.Penalty = 1 + record.Decay = 0.5 + return record + }) + require.NoError(t, err) + require.Equal(t, 1.0, adjustedRecord.Penalty) + require.Equal(t, 0.5, adjustedRecord.Decay) + + // verifies that no preprocessors were called and the record was not adjusted. + cachedRecord, err, ok := cache.Get(peerId) + require.NoError(t, err) + require.True(t, ok) + require.Equal(t, 1.0, cachedRecord.Penalty) + require.Equal(t, 0.5, cachedRecord.Decay) } -// TestGossipSubSpamRecordCache_Duplicate_Add_Sequential tests if the cache returns false when a duplicate record is added to the cache. +// TestGossipSubSpamRecordCache_Duplicate_Adjust_Sequential tests if the cache returns false when a duplicate record is added to the cache. // This test evaluates that the cache de-duplicates the records based on their peer id and not content, and hence // each peer id can only be added once to the cache. -func TestGossipSubSpamRecordCache_Duplicate_Add_Sequential(t *testing.T) { - cache := netcache.NewGossipSubSpamRecordCache(10, unittest.Logger(), metrics.NewNoopCollector()) +func TestGossipSubSpamRecordCache_Duplicate_Adjust_Sequential(t *testing.T) { + cache := netcache.NewGossipSubSpamRecordCache(10, unittest.Logger(), metrics.NewNoopCollector(), func() p2p.GossipSubSpamRecord { + return p2p.GossipSubSpamRecord{ + Decay: 0, + Penalty: 0, + LastDecayAdjustment: time.Now(), + } + }) - record := p2p.GossipSubSpamRecord{ - Decay: 0.5, - Penalty: 1, - } - added := cache.Add("peerA", record) - assert.True(t, added) + peerId := unittest.PeerIdFixture(t) + adjustedRecord, err := cache.Adjust(peerId, func(record p2p.GossipSubSpamRecord) p2p.GossipSubSpamRecord { + record.Penalty = 1 + record.Decay = 0.5 + return record + }) + require.NoError(t, err) + require.Equal(t, 1.0, adjustedRecord.Penalty) + require.Equal(t, 0.5, adjustedRecord.Decay) - // verifies that the cache returns false when a duplicate record is added. - added = cache.Add("peerA", record) - assert.False(t, added) + // duplicate adjust should return the same record. + adjustedRecord, err = cache.Adjust(peerId, func(record p2p.GossipSubSpamRecord) p2p.GossipSubSpamRecord { + record.Penalty = 1 + record.Decay = 0.5 + return record + }) + require.NoError(t, err) + require.Equal(t, 1.0, adjustedRecord.Penalty) + require.Equal(t, 0.5, adjustedRecord.Decay) // verifies that the cache deduplicates the records based on their peer id and not content. - record.Penalty = 2 - added = cache.Add("peerA", record) - assert.False(t, added) + adjustedRecord, err = cache.Adjust(peerId, func(record p2p.GossipSubSpamRecord) p2p.GossipSubSpamRecord { + record.Penalty = 3 + record.Decay = 2 + return record + }) + require.NoError(t, err) + require.Equal(t, 3.0, adjustedRecord.Penalty) + require.Equal(t, 2.0, adjustedRecord.Decay) } -// TestGossipSubSpamRecordCache_Duplicate_Add_Concurrent tests if the cache returns false when a duplicate record is added to the cache. -// Test is the concurrent version of TestAppScoreCache_DuplicateAdd_Sequential. -func TestGossipSubSpamRecordCache_Duplicate_Add_Concurrent(t *testing.T) { - cache := netcache.NewGossipSubSpamRecordCache(10, unittest.Logger(), metrics.NewNoopCollector()) +// TestGossipSubSpamRecordCache_Duplicate_Adjust_Concurrent tests if the cache returns false when a duplicate record is added to the cache. +// Test is the concurrent version of TestAppScoreCache_Duplicate_Adjust_Sequential. +func TestGossipSubSpamRecordCache_Duplicate_Adjust_Concurrent(t *testing.T) { + cache := netcache.NewGossipSubSpamRecordCache(10, unittest.Logger(), metrics.NewNoopCollector(), func() p2p.GossipSubSpamRecord { + return p2p.GossipSubSpamRecord{ + Decay: 0, + Penalty: 0, + LastDecayAdjustment: time.Now(), + } + }) successAdd := atomic.Int32{} successAdd.Store(0) record1 := p2p.GossipSubSpamRecord{ - Decay: 0.5, + Decay: 1, Penalty: 1, } record2 := p2p.GossipSubSpamRecord{ - Decay: 0.5, + Decay: 1, Penalty: 2, } wg := sync.WaitGroup{} // wait group to wait for all goroutines to finish. wg.Add(2) + peerId := unittest.PeerIdFixture(t) // adds a record to the cache concurrently. - add := func(record p2p.GossipSubSpamRecord) { - added := cache.Add("peerA", record) - if added { - successAdd.Inc() - } + add := func(newRecord p2p.GossipSubSpamRecord) { + _, err := cache.Adjust(peerId, func(record p2p.GossipSubSpamRecord) p2p.GossipSubSpamRecord { + record.Penalty = newRecord.Penalty + record.Decay = newRecord.Decay + record.LastDecayAdjustment = newRecord.LastDecayAdjustment + return record + }) + require.NoError(t, err) + successAdd.Inc() + wg.Done() } @@ -476,6 +532,12 @@ func TestGossipSubSpamRecordCache_Duplicate_Add_Concurrent(t *testing.T) { unittest.RequireReturnsBefore(t, wg.Wait, 100*time.Millisecond, "could not add records to the cache") - // verifies that only one of the records was added to the cache. - assert.Equal(t, int32(1), successAdd.Load()) + // verifies that both of the records was added to the cache. + require.Equal(t, int32(2), successAdd.Load()) + + // verifies that the record is adjusted to one of the records. + cachedRecord, err, ok := cache.Get(peerId) + require.NoError(t, err) + require.True(t, ok) + require.True(t, cachedRecord.Penalty == 1 && cachedRecord.Decay == 1 || cachedRecord.Penalty == 2 && cachedRecord.Decay == 1) } diff --git a/network/p2p/cache/node_blocklist_wrapper.go b/network/p2p/cache/node_blocklist_wrapper.go deleted file mode 100644 index ae045ecff62..00000000000 --- a/network/p2p/cache/node_blocklist_wrapper.go +++ /dev/null @@ -1,206 +0,0 @@ -package cache - -import ( - "errors" - "fmt" - "sync" - - "github.com/dgraph-io/badger/v2" - "github.com/libp2p/go-libp2p/core/peer" - - "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/module" - "github.com/onflow/flow-go/network/p2p" - "github.com/onflow/flow-go/storage" - "github.com/onflow/flow-go/storage/badger/operation" -) - -// IdentifierSet represents a set of node IDs (operator-defined) whose communication should be blocked. -type IdentifierSet map[flow.Identifier]struct{} - -// Contains returns true iff id ∈ s -func (s IdentifierSet) Contains(id flow.Identifier) bool { - _, found := s[id] - return found -} - -// NodeBlocklistWrapper is a wrapper for an `module.IdentityProvider` instance, where the -// wrapper overrides the `Ejected` flag to true for all NodeIDs in a `blocklist`. -// To avoid modifying the source of the identities, the wrapper creates shallow copies -// of the identities (whenever necessary) and modifies the `Ejected` flag only in -// the copy. -// The `NodeBlocklistWrapper` internally represents the `blocklist` as a map, to enable -// performant lookup. However, the exported API works with `flow.IdentifierList` for -// blocklist, as this is a broadly supported data structure which lends itself better -// to config or command-line inputs. -type NodeBlocklistWrapper struct { - m sync.RWMutex - db *badger.DB - - identityProvider module.IdentityProvider - blocklist IdentifierSet // `IdentifierSet` is a map, hence efficient O(1) lookup - distributor p2p.DisallowListNotificationDistributor // distributor for the blocklist update notifications -} - -var _ module.IdentityProvider = (*NodeBlocklistWrapper)(nil) - -// NewNodeBlocklistWrapper wraps the given `IdentityProvider`. The blocklist is -// loaded from the database (or assumed to be empty if no database entry is present). -func NewNodeBlocklistWrapper( - identityProvider module.IdentityProvider, - db *badger.DB, - distributor p2p.DisallowListNotificationDistributor) (*NodeBlocklistWrapper, error) { - - blocklist, err := retrieveBlocklist(db) - if err != nil { - return nil, fmt.Errorf("failed to read set of blocked node IDs from data base: %w", err) - } - - return &NodeBlocklistWrapper{ - db: db, - identityProvider: identityProvider, - blocklist: blocklist, - distributor: distributor, - }, nil -} - -// Update sets the wrapper's internal set of blocked nodes to `blocklist`. Empty list and `nil` -// (equivalent to empty list) are accepted inputs. To avoid legacy entries in the data base, this -// function purges the entire data base entry if `blocklist` is empty. -// This implementation is _eventually consistent_, where changes are written to the data base first -// and then (non-atomically!) the in-memory set of blocked nodes is updated. This strongly -// benefits performance and modularity. No errors are expected during normal operations. -func (w *NodeBlocklistWrapper) Update(blocklist flow.IdentifierList) error { - b := blocklist.Lookup() // converts slice to map - - w.m.Lock() - defer w.m.Unlock() - err := persistBlocklist(b, w.db) - if err != nil { - return fmt.Errorf("failed to persist set of blocked nodes to the data base: %w", err) - } - w.blocklist = b - err = w.distributor.DistributeBlockListNotification(blocklist) - - if err != nil { - return fmt.Errorf("failed to distribute blocklist update notification: %w", err) - } - - return nil -} - -// ClearBlocklist purges the set of blocked node IDs. Convenience function -// equivalent to w.Update(nil). No errors are expected during normal operations. -func (w *NodeBlocklistWrapper) ClearBlocklist() error { - return w.Update(nil) -} - -// GetBlocklist returns the set of blocked node IDs. -func (w *NodeBlocklistWrapper) GetBlocklist() flow.IdentifierList { - w.m.RLock() - defer w.m.RUnlock() - - identifiers := make(flow.IdentifierList, 0, len(w.blocklist)) - for i := range w.blocklist { - identifiers = append(identifiers, i) - } - return identifiers -} - -// Identities returns the full identities of _all_ nodes currently known to the -// protocol that pass the provided filter. Caution, this includes ejected nodes. -// Please check the `Ejected` flag in the returned identities (or provide a -// filter for removing ejected nodes). -func (w *NodeBlocklistWrapper) Identities(filter flow.IdentityFilter) flow.IdentityList { - identities := w.identityProvider.Identities(filter) - if len(identities) == 0 { - return identities - } - - // Iterate over all returned identities and set ejected flag to true. We - // copy both the return slice and identities of blocked nodes to avoid - // any possibility of accidentally modifying the wrapped IdentityProvider - idtx := make(flow.IdentityList, 0, len(identities)) - w.m.RLock() - for _, identity := range identities { - if w.blocklist.Contains(identity.NodeID) { - var i = *identity // shallow copy is sufficient, because `Ejected` flag is in top-level struct - i.Ejected = true - if filter(&i) { // we need to check the filter here again, because the filter might drop ejected nodes and we are modifying the ejected status here - idtx = append(idtx, &i) - } - } else { - idtx = append(idtx, identity) - } - } - w.m.RUnlock() - return idtx -} - -// ByNodeID returns the full identity for the node with the given Identifier, -// where Identifier is the way the protocol refers to the node. The function -// has the same semantics as a map lookup, where the boolean return value is -// true if and only if Identity has been found, i.e. `Identity` is not nil. -// Caution: function returns include ejected nodes. Please check the `Ejected` -// flag in the identity. -func (w *NodeBlocklistWrapper) ByNodeID(identifier flow.Identifier) (*flow.Identity, bool) { - identity, b := w.identityProvider.ByNodeID(identifier) - return w.setEjectedIfBlocked(identity), b -} - -// setEjectedIfBlocked checks whether the node with the given identity is on the `blocklist`. -// Shortcuts: -// - If the node's identity is nil, there is nothing to do because we don't generate identities here. -// - If the node is already ejected, we don't have to check the blocklist. -func (w *NodeBlocklistWrapper) setEjectedIfBlocked(identity *flow.Identity) *flow.Identity { - if identity == nil || identity.Ejected { - return identity - } - - w.m.RLock() - isBlocked := w.blocklist.Contains(identity.NodeID) - w.m.RUnlock() - if !isBlocked { - return identity - } - - // For blocked nodes, we want to return their `Identity` with the `Ejected` flag - // set to true. Caution: we need to copy the `Identity` before we override `Ejected`, as we - // would otherwise potentially change the wrapped IdentityProvider. - var i = *identity // shallow copy is sufficient, because `Ejected` flag is in top-level struct - i.Ejected = true - return &i -} - -// ByPeerID returns the full identity for the node with the given peer ID, -// peer.ID is the libp2p-level identifier of a Flow node. The function -// has the same semantics as a map lookup, where the boolean return value is -// true if and only if Identity has been found, i.e. `Identity` is not nil. -// Caution: function returns include ejected nodes. Please check the `Ejected` -// flag in the identity. -func (w *NodeBlocklistWrapper) ByPeerID(p peer.ID) (*flow.Identity, bool) { - identity, b := w.identityProvider.ByPeerID(p) - return w.setEjectedIfBlocked(identity), b -} - -// persistBlocklist writes the given blocklist to the database. To avoid legacy -// entries in the database, we prune the entire data base entry if `blocklist` is -// empty. No errors are expected during normal operations. -func persistBlocklist(blocklist IdentifierSet, db *badger.DB) error { - if len(blocklist) == 0 { - return db.Update(operation.PurgeBlocklist()) - } - return db.Update(operation.PersistBlocklist(blocklist)) -} - -// retrieveBlocklist reads the set of blocked nodes from the data base. -// In case no database entry exists, an empty set (nil map) is returned. -// No errors are expected during normal operations. -func retrieveBlocklist(db *badger.DB) (IdentifierSet, error) { - var blocklist map[flow.Identifier]struct{} - err := db.View(operation.RetrieveBlocklist(&blocklist)) - if err != nil && !errors.Is(err, storage.ErrNotFound) { - return nil, fmt.Errorf("unexpected error reading set of blocked nodes from data base: %w", err) - } - return blocklist, nil -} diff --git a/network/p2p/cache/node_blocklist_wrapper_test.go b/network/p2p/cache/node_blocklist_wrapper_test.go deleted file mode 100644 index cdc32b546f5..00000000000 --- a/network/p2p/cache/node_blocklist_wrapper_test.go +++ /dev/null @@ -1,390 +0,0 @@ -package cache_test - -import ( - "fmt" - "testing" - - "github.com/dgraph-io/badger/v2" - "github.com/libp2p/go-libp2p/core/peer" - "github.com/stretchr/testify/mock" - "github.com/stretchr/testify/require" - "github.com/stretchr/testify/suite" - "go.uber.org/atomic" - - "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/model/flow/filter" - mocks "github.com/onflow/flow-go/module/mock" - "github.com/onflow/flow-go/network/p2p" - "github.com/onflow/flow-go/network/p2p/cache" - mockp2p "github.com/onflow/flow-go/network/p2p/mock" - "github.com/onflow/flow-go/utils/unittest" -) - -type NodeBlocklistWrapperTestSuite struct { - suite.Suite - DB *badger.DB - provider *mocks.IdentityProvider - - wrapper *cache.NodeBlocklistWrapper - distributor *mockp2p.DisallowListNotificationDistributor -} - -func (s *NodeBlocklistWrapperTestSuite) SetupTest() { - s.DB, _ = unittest.TempBadgerDB(s.T()) - s.provider = new(mocks.IdentityProvider) - - var err error - s.distributor = mockp2p.NewDisallowListNotificationDistributor(s.T()) - s.wrapper, err = cache.NewNodeBlocklistWrapper(s.provider, s.DB, s.distributor) - require.NoError(s.T(), err) -} - -func TestNodeBlocklistWrapperTestSuite(t *testing.T) { - suite.Run(t, new(NodeBlocklistWrapperTestSuite)) -} - -// TestHonestNode verifies: -// For nodes _not_ on the blocklist, the `cache.NodeBlocklistWrapper` should forward -// the identities from the wrapped `IdentityProvider` without modification. -func (s *NodeBlocklistWrapperTestSuite) TestHonestNode() { - s.Run("ByNodeID", func() { - identity := unittest.IdentityFixture() - s.provider.On("ByNodeID", identity.NodeID).Return(identity, true) - - i, found := s.wrapper.ByNodeID(identity.NodeID) - require.True(s.T(), found) - require.Equal(s.T(), i, identity) - }) - s.Run("ByPeerID", func() { - identity := unittest.IdentityFixture() - peerID := (peer.ID)("some_peer_ID") - s.provider.On("ByPeerID", peerID).Return(identity, true) - - i, found := s.wrapper.ByPeerID(peerID) - require.True(s.T(), found) - require.Equal(s.T(), i, identity) - }) - s.Run("Identities", func() { - identities := unittest.IdentityListFixture(11) - f := filter.In(identities[3:4]) - expectedFilteredIdentities := identities.Filter(f) - s.provider.On("Identities", mock.Anything).Return( - func(filter flow.IdentityFilter) flow.IdentityList { - return identities.Filter(filter) - }, - nil, - ) - require.Equal(s.T(), expectedFilteredIdentities, s.wrapper.Identities(f)) - }) -} - -// TestDenylistedNode tests proper handling of identities _on_ the blocklist: -// - For any identity `i` with `i.NodeID ∈ blocklist`, the returned identity -// should have `i.Ejected` set to `true` (irrespective of the `Ejected` -// flag's initial returned by the wrapped `IdentityProvider`). -// - The wrapper should _copy_ the identity and _not_ write into the wrapped -// IdentityProvider's memory. -// - For `IdentityProvider.ByNodeID` and `IdentityProvider.ByPeerID`: -// whether or not the wrapper modifies the `Ejected` flag should depend only -// in the NodeID of the returned identity, irrespective of the second return -// value (boolean). -// While returning (non-nil identity, false) is not a defined return value, -// we expect the wrapper to nevertheless handle this case to increase its -// generality. -func (s *NodeBlocklistWrapperTestSuite) TestDenylistedNode() { - blocklist := unittest.IdentityListFixture(11) - s.distributor.On("DistributeBlockListNotification", blocklist.NodeIDs()).Return(nil).Once() - err := s.wrapper.Update(blocklist.NodeIDs()) - require.NoError(s.T(), err) - - index := atomic.NewInt32(0) - for _, b := range []bool{true, false} { - expectedfound := b - - s.Run(fmt.Sprintf("IdentityProvider.ByNodeID returning (<non-nil identity>, %v)", expectedfound), func() { - originalIdentity := blocklist[index.Inc()] - s.provider.On("ByNodeID", originalIdentity.NodeID).Return(originalIdentity, expectedfound) - - var expectedIdentity = *originalIdentity // expected Identity is a copy of the original - expectedIdentity.Ejected = true // with the `Ejected` flag set to true - - i, found := s.wrapper.ByNodeID(originalIdentity.NodeID) - require.Equal(s.T(), expectedfound, found) - require.Equal(s.T(), &expectedIdentity, i) - - // check that originalIdentity returned by wrapped `IdentityProvider` is _not_ modified - require.False(s.T(), originalIdentity.Ejected) - }) - - s.Run(fmt.Sprintf("IdentityProvider.ByPeerID returning (<non-nil identity>, %v)", expectedfound), func() { - originalIdentity := blocklist[index.Inc()] - peerID := (peer.ID)(originalIdentity.NodeID.String()) - s.provider.On("ByPeerID", peerID).Return(originalIdentity, expectedfound) - - var expectedIdentity = *originalIdentity // expected Identity is a copy of the original - expectedIdentity.Ejected = true // with the `Ejected` flag set to true - - i, found := s.wrapper.ByPeerID(peerID) - require.Equal(s.T(), expectedfound, found) - require.Equal(s.T(), &expectedIdentity, i) - - // check that originalIdentity returned by `IdentityProvider` is _not_ modified by wrapper - require.False(s.T(), originalIdentity.Ejected) - }) - } - - s.Run("Identities", func() { - blocklistLookup := blocklist.Lookup() - honestIdentities := unittest.IdentityListFixture(8) - combinedIdentities := honestIdentities.Union(blocklist) - combinedIdentities = combinedIdentities.DeterministicShuffle(1234) - numIdentities := len(combinedIdentities) - - s.provider.On("Identities", mock.Anything).Return(combinedIdentities) - - noFilter := filter.Not(filter.In(nil)) - identities := s.wrapper.Identities(noFilter) - - require.Equal(s.T(), numIdentities, len(identities)) // expected number resulting identities have the - for _, i := range identities { - _, isBlocked := blocklistLookup[i.NodeID] - require.Equal(s.T(), isBlocked, i.Ejected) - } - - // check that original `combinedIdentities` returned by `IdentityProvider` are _not_ modified by wrapper - require.Equal(s.T(), numIdentities, len(combinedIdentities)) // length of list should not be modified by wrapper - for _, i := range combinedIdentities { - require.False(s.T(), i.Ejected) // Ejected flag should still have the original value (false here) - } - }) - - // this tests the edge case where the Identities func is invoked with the p2p.NotEjectedFilter. Block listed - // nodes are expected to be filtered from the identity list returned after setting the ejected field. - s.Run("Identities(p2p.NotEjectedFilter) should not return block listed nodes", func() { - blocklistLookup := blocklist.Lookup() - honestIdentities := unittest.IdentityListFixture(8) - combinedIdentities := honestIdentities.Union(blocklist) - combinedIdentities = combinedIdentities.DeterministicShuffle(1234) - numIdentities := len(combinedIdentities) - - s.provider.On("Identities", mock.Anything).Return(combinedIdentities) - - identities := s.wrapper.Identities(p2p.NotEjectedFilter) - - require.Equal(s.T(), len(honestIdentities), len(identities)) // expected only honest nodes to be returned - for _, i := range identities { - _, isBlocked := blocklistLookup[i.NodeID] - require.False(s.T(), isBlocked) - require.False(s.T(), i.Ejected) - } - - // check that original `combinedIdentities` returned by `IdentityProvider` are _not_ modified by wrapper - require.Equal(s.T(), numIdentities, len(combinedIdentities)) // length of list should not be modified by wrapper - for _, i := range combinedIdentities { - require.False(s.T(), i.Ejected) // Ejected flag should still have the original value (false here) - } - }) -} - -// TestUnknownNode verifies that the wrapper forwards nil identities -// irrespective of the boolean return values. -func (s *NodeBlocklistWrapperTestSuite) TestUnknownNode() { - for _, b := range []bool{true, false} { - s.Run(fmt.Sprintf("IdentityProvider.ByNodeID returning (nil, %v)", b), func() { - id := unittest.IdentifierFixture() - s.provider.On("ByNodeID", id).Return(nil, b) - - identity, found := s.wrapper.ByNodeID(id) - require.Equal(s.T(), b, found) - require.Nil(s.T(), identity) - }) - - s.Run(fmt.Sprintf("IdentityProvider.ByPeerID returning (nil, %v)", b), func() { - peerID := (peer.ID)(unittest.IdentifierFixture().String()) - s.provider.On("ByPeerID", peerID).Return(nil, b) - - identity, found := s.wrapper.ByPeerID(peerID) - require.Equal(s.T(), b, found) - require.Nil(s.T(), identity) - }) - } -} - -// TestBlocklistAddRemove checks that adding and subsequently removing a node from the blocklist -// it in combination a no-op. We test two scenarious -// - Node whose original `Identity` has `Ejected = false`: -// After adding the node to the blocklist and then removing it again, the `Ejected` should be false. -// - Node whose original `Identity` has `Ejected = true`: -// After adding the node to the blocklist and then removing it again, the `Ejected` should be still be true. -func (s *NodeBlocklistWrapperTestSuite) TestBlocklistAddRemove() { - for _, originalEjected := range []bool{true, false} { - s.Run(fmt.Sprintf("Add & remove node with Ejected = %v", originalEjected), func() { - originalIdentity := unittest.IdentityFixture() - originalIdentity.Ejected = originalEjected - peerID := (peer.ID)(originalIdentity.NodeID.String()) - s.provider.On("ByNodeID", originalIdentity.NodeID).Return(originalIdentity, true) - s.provider.On("ByPeerID", peerID).Return(originalIdentity, true) - - // step 1: before putting node on blocklist, - // an Identity with `Ejected` equal to the original value should be returned - i, found := s.wrapper.ByNodeID(originalIdentity.NodeID) - require.True(s.T(), found) - require.Equal(s.T(), originalEjected, i.Ejected) - - i, found = s.wrapper.ByPeerID(peerID) - require.True(s.T(), found) - require.Equal(s.T(), originalEjected, i.Ejected) - - // step 2: _after_ putting node on blocklist, - // an Identity with `Ejected` equal to `true` should be returned - s.distributor.On("DistributeBlockListNotification", flow.IdentifierList{originalIdentity.NodeID}).Return(nil).Once() - err := s.wrapper.Update(flow.IdentifierList{originalIdentity.NodeID}) - require.NoError(s.T(), err) - - i, found = s.wrapper.ByNodeID(originalIdentity.NodeID) - require.True(s.T(), found) - require.True(s.T(), i.Ejected) - - i, found = s.wrapper.ByPeerID(peerID) - require.True(s.T(), found) - require.True(s.T(), i.Ejected) - - // step 3: after removing the node from the blocklist, - // an Identity with `Ejected` equal to the original value should be returned - s.distributor.On("DistributeBlockListNotification", flow.IdentifierList{}).Return(nil).Once() - err = s.wrapper.Update(flow.IdentifierList{}) - require.NoError(s.T(), err) - - i, found = s.wrapper.ByNodeID(originalIdentity.NodeID) - require.True(s.T(), found) - require.Equal(s.T(), originalEjected, i.Ejected) - - i, found = s.wrapper.ByPeerID(peerID) - require.True(s.T(), found) - require.Equal(s.T(), originalEjected, i.Ejected) - }) - } -} - -// TestUpdate tests updating, clearing and retrieving the blocklist. -// This test verifies that the wrapper updates _its own internal state_ correctly. -// Note: -// conceptually, the blocklist is a set, i.e. not order dependent. -// The wrapper internally converts the list to a set and vice versa. Therefore -// the order is not preserved by `GetBlocklist`. Consequently, we compare -// map-based representations here. -func (s *NodeBlocklistWrapperTestSuite) TestUpdate() { - blocklist1 := unittest.IdentifierListFixture(8) - blocklist2 := unittest.IdentifierListFixture(11) - blocklist3 := unittest.IdentifierListFixture(5) - - s.distributor.On("DistributeBlockListNotification", blocklist1).Return(nil).Once() - err := s.wrapper.Update(blocklist1) - require.NoError(s.T(), err) - require.Equal(s.T(), blocklist1.Lookup(), s.wrapper.GetBlocklist().Lookup()) - - s.distributor.On("DistributeBlockListNotification", blocklist2).Return(nil).Once() - err = s.wrapper.Update(blocklist2) - require.NoError(s.T(), err) - require.Equal(s.T(), blocklist2.Lookup(), s.wrapper.GetBlocklist().Lookup()) - - s.distributor.On("DistributeBlockListNotification", (flow.IdentifierList)(nil)).Return(nil).Once() - err = s.wrapper.ClearBlocklist() - require.NoError(s.T(), err) - require.Empty(s.T(), s.wrapper.GetBlocklist()) - - s.distributor.On("DistributeBlockListNotification", blocklist3).Return(nil).Once() - err = s.wrapper.Update(blocklist3) - require.NoError(s.T(), err) - require.Equal(s.T(), blocklist3.Lookup(), s.wrapper.GetBlocklist().Lookup()) -} - -// TestDataBasePersist verifies database interactions of the wrapper with the data base. -// This test verifies that the blocklist updates are persisted across restarts. -// To decouple this test from the lower-level data base design, we proceed as follows: -// - We do data-base operation through the exported methods from `NodeBlocklistWrapper` -// - Then, we create a new `NodeBlocklistWrapper` backed by the same data base. Since it is a -// new wrapper, it must read its state from the data base. Hence, if the new wrapper returns -// the correct data, we have strong evidence that data-base interactions are correct. -// -// Note: The wrapper internally converts the list to a set and vice versa. Therefore -// the order is not preserved by `GetBlocklist`. Consequently, we compare -// map-based representations here. -func (s *NodeBlocklistWrapperTestSuite) TestDataBasePersist() { - blocklist := unittest.IdentifierListFixture(8) - blocklist2 := unittest.IdentifierListFixture(8) - - s.Run("Get blocklist from empty database", func() { - require.Empty(s.T(), s.wrapper.GetBlocklist()) - }) - - s.Run("Clear blocklist on empty database", func() { - s.distributor.On("DistributeBlockListNotification", (flow.IdentifierList)(nil)).Return(nil).Once() - err := s.wrapper.ClearBlocklist() // No-op as data base does not contain any block list - require.NoError(s.T(), err) - require.Empty(s.T(), s.wrapper.GetBlocklist()) - - // newly created wrapper should read `blocklist` from data base during initialization - w, err := cache.NewNodeBlocklistWrapper(s.provider, s.DB, s.distributor) - require.NoError(s.T(), err) - require.Empty(s.T(), w.GetBlocklist()) - }) - - s.Run("Update blocklist and init new wrapper from database", func() { - s.distributor.On("DistributeBlockListNotification", blocklist).Return(nil).Once() - err := s.wrapper.Update(blocklist) - require.NoError(s.T(), err) - - // newly created wrapper should read `blocklist` from data base during initialization - w, err := cache.NewNodeBlocklistWrapper(s.provider, s.DB, s.distributor) - require.NoError(s.T(), err) - require.Equal(s.T(), blocklist.Lookup(), w.GetBlocklist().Lookup()) - }) - - s.Run("Update and overwrite blocklist and then init new wrapper from database", func() { - s.distributor.On("DistributeBlockListNotification", blocklist).Return(nil).Once() - err := s.wrapper.Update(blocklist) - require.NoError(s.T(), err) - - s.distributor.On("DistributeBlockListNotification", blocklist2).Return(nil).Once() - err = s.wrapper.Update(blocklist2) - require.NoError(s.T(), err) - - // newly created wrapper should read initial state from data base - w, err := cache.NewNodeBlocklistWrapper(s.provider, s.DB, s.distributor) - require.NoError(s.T(), err) - require.Equal(s.T(), blocklist2.Lookup(), w.GetBlocklist().Lookup()) - }) - - s.Run("Update & clear & update and then init new wrapper from database", func() { - // set blocklist -> - // newly created wrapper should now read this list from data base during initialization - s.distributor.On("DistributeBlockListNotification", blocklist).Return(nil).Once() - err := s.wrapper.Update(blocklist) - require.NoError(s.T(), err) - - w0, err := cache.NewNodeBlocklistWrapper(s.provider, s.DB, s.distributor) - require.NoError(s.T(), err) - require.Equal(s.T(), blocklist.Lookup(), w0.GetBlocklist().Lookup()) - - // clear blocklist -> - // newly created wrapper should now read empty blocklist from data base during initialization - s.distributor.On("DistributeBlockListNotification", (flow.IdentifierList)(nil)).Return(nil).Once() - err = s.wrapper.ClearBlocklist() - require.NoError(s.T(), err) - - w1, err := cache.NewNodeBlocklistWrapper(s.provider, s.DB, s.distributor) - require.NoError(s.T(), err) - require.Empty(s.T(), w1.GetBlocklist()) - - // set blocklist2 -> - // newly created wrapper should now read this list from data base during initialization - s.distributor.On("DistributeBlockListNotification", blocklist2).Return(nil).Once() - err = s.wrapper.Update(blocklist2) - require.NoError(s.T(), err) - - w2, err := cache.NewNodeBlocklistWrapper(s.provider, s.DB, s.distributor) - require.NoError(s.T(), err) - require.Equal(s.T(), blocklist2.Lookup(), w2.GetBlocklist().Lookup()) - }) -} diff --git a/network/p2p/cache/node_disallow_list_wrapper.go b/network/p2p/cache/node_disallow_list_wrapper.go new file mode 100644 index 00000000000..ac7edde9be5 --- /dev/null +++ b/network/p2p/cache/node_disallow_list_wrapper.go @@ -0,0 +1,202 @@ +package cache + +import ( + "fmt" + "sync" + + "github.com/libp2p/go-libp2p/core/peer" + + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module" + "github.com/onflow/flow-go/network" + "github.com/onflow/flow-go/storage" + "github.com/onflow/flow-go/storage/store" +) + +// IdentifierSet represents a set of node IDs (operator-defined) whose communication should be blocked. +type IdentifierSet map[flow.Identifier]struct{} + +// Contains returns true iff id ∈ s +func (s IdentifierSet) Contains(id flow.Identifier) bool { + _, found := s[id] + return found +} + +// NodeDisallowListWrapper is a wrapper for an `module.IdentityProvider` instance, where the +// wrapper overrides the `Ejected` flag to true for all NodeIDs in a `disallowList`. +// To avoid modifying the source of the identities, the wrapper creates shallow copies +// of the identities (whenever necessary) and modifies the `Ejected` flag only in +// the copy. +// The `NodeDisallowListWrapper` internally represents the `disallowList` as a map, to enable +// performant lookup. However, the exported API works with `flow.IdentifierList` for +// disallowList, as this is a broadly supported data structure which lends itself better +// to config or command-line inputs. +// When a node is disallow-listed, the networking layer connection to that node is closed and no +// incoming or outgoing connections are established with that node. +type NodeDisallowListWrapper struct { + m sync.RWMutex + nodeDisallowListStore storage.NodeDisallowList + + identityProvider module.IdentityProvider + disallowList IdentifierSet // `IdentifierSet` is a map, hence efficient O(1) lookup + + // updateConsumerOracle is called whenever the disallow-list is updated. + // Note that we do not use the `updateConsumer` directly due to the circular dependency between the + // networking layer Underlay interface (i.e., updateConsumer), and the wrapper (i.e., NodeDisallowListingWrapper). + // Underlay needs identity provider to be initialized, and identity provider needs this wrapper to be initialized. + // Hence, if we pass the updateConsumer by the interface value, it will be nil at the time of initialization. + // Instead, we use the oracle function to get the updateConsumer whenever we need it. + updateConsumerOracle func() network.DisallowListNotificationConsumer +} + +var _ module.IdentityProvider = (*NodeDisallowListWrapper)(nil) + +// NewNodeDisallowListWrapper wraps the given `IdentityProvider`. The disallow-list is +// loaded from the database (or assumed to be empty if no database entry is present). +func NewNodeDisallowListWrapper( + identityProvider module.IdentityProvider, + db storage.DB, + updateConsumerOracle func() network.DisallowListNotificationConsumer, +) (*NodeDisallowListWrapper, error) { + nodeDisallowListStore := store.NewNodeDisallowList(db) + + var disallowList map[flow.Identifier]struct{} + err := nodeDisallowListStore.Retrieve(&disallowList) + if err != nil { + return nil, fmt.Errorf("failed to read set of disallowed node IDs from data base: %w", err) + } + + return &NodeDisallowListWrapper{ + nodeDisallowListStore: nodeDisallowListStore, + identityProvider: identityProvider, + disallowList: disallowList, + updateConsumerOracle: updateConsumerOracle, + }, nil +} + +// Update sets the wrapper's internal set of blocked nodes to `disallowList`. Empty list and `nil` +// (equivalent to empty list) are accepted inputs. To avoid legacy entries in the database, this +// function purges the entire data base entry if `disallowList` is empty. +// This implementation is _eventually consistent_, where changes are written to the database first +// and then (non-atomically!) the in-memory set of blocked nodes is updated. This strongly +// benefits performance and modularity. No errors are expected during normal operations. +// +// Args: +// - disallowList: list of node IDs to be disallow-listed from the networking layer, i.e., the existing connections +// to these nodes will be closed and no new connections will be established (neither incoming nor outgoing). +// +// Returns: +// - error: if the update fails, e.g., due to a database error. Any returned error is irrecoverable and the caller +// should abort the process. +func (w *NodeDisallowListWrapper) Update(disallowList flow.IdentifierList) error { + b := disallowList.Lookup() // converts slice to map + + w.m.Lock() + defer w.m.Unlock() + err := w.nodeDisallowListStore.Store(b) + if err != nil { + return fmt.Errorf("failed to persist set of blocked nodes to the data base: %w", err) + } + w.disallowList = b + w.updateConsumerOracle().OnDisallowListNotification(&network.DisallowListingUpdate{ + FlowIds: disallowList, + Cause: network.DisallowListedCauseAdmin, + }) + + return nil +} + +// ClearDisallowList purges the set of blocked node IDs. Convenience function +// equivalent to w.Update(nil). No errors are expected during normal operations. +func (w *NodeDisallowListWrapper) ClearDisallowList() error { + return w.Update(nil) +} + +// GetDisallowList returns the set of blocked node IDs. +func (w *NodeDisallowListWrapper) GetDisallowList() flow.IdentifierList { + w.m.RLock() + defer w.m.RUnlock() + + identifiers := make(flow.IdentifierList, 0, len(w.disallowList)) + for i := range w.disallowList { + identifiers = append(identifiers, i) + } + return identifiers +} + +// Identities returns the full identities of _all_ nodes currently known to the +// protocol that pass the provided filter. Caution, this includes ejected nodes. +// Please check the `Ejected` flag in the returned identities (or provide a +// filter for removing ejected nodes). +func (w *NodeDisallowListWrapper) Identities(filter flow.IdentityFilter[flow.Identity]) flow.IdentityList { + identities := w.identityProvider.Identities(filter) + if len(identities) == 0 { + return identities + } + + // Iterate over all returned identities and set the `EpochParticipationStatus` to `flow.EpochParticipationStatusEjected`. + // We copy both the return slice and identities of blocked nodes to avoid + // any possibility of accidentally modifying the wrapped IdentityProvider + idtx := make(flow.IdentityList, 0, len(identities)) + w.m.RLock() + for _, identity := range identities { + if w.disallowList.Contains(identity.NodeID) { + var i = *identity // shallow copy is sufficient, because `EpochParticipationStatus` is a value type in DynamicIdentity which is also a value type. + i.EpochParticipationStatus = flow.EpochParticipationStatusEjected + if filter(&i) { // we need to check the filter here again, because the filter might drop ejected nodes and we are modifying the ejected status here + idtx = append(idtx, &i) + } + } else { + idtx = append(idtx, identity) + } + } + w.m.RUnlock() + return idtx +} + +// ByNodeID returns the full identity for the node with the given Identifier, +// where Identifier is the way the protocol refers to the node. The function +// has the same semantics as a map lookup, where the boolean return value is +// true if and only if Identity has been found, i.e. `Identity` is not nil. +// Caution: function returns include ejected nodes. Please check the `Ejected` +// flag in the identity. +func (w *NodeDisallowListWrapper) ByNodeID(identifier flow.Identifier) (*flow.Identity, bool) { + identity, b := w.identityProvider.ByNodeID(identifier) + return w.setEjectedIfBlocked(identity), b +} + +// setEjectedIfBlocked checks whether the node with the given identity is on the `disallowList`. +// Shortcuts: +// - If the node's identity is nil, there is nothing to do because we don't generate identities here. +// - If the node is already ejected, we don't have to check the disallowList. +func (w *NodeDisallowListWrapper) setEjectedIfBlocked(identity *flow.Identity) *flow.Identity { + if identity == nil || identity.IsEjected() { + return identity + } + + w.m.RLock() + isBlocked := w.disallowList.Contains(identity.NodeID) + w.m.RUnlock() + if !isBlocked { + return identity + } + + // For blocked nodes, we want to return their `Identity` with the `EpochParticipationStatus` + // set to `flow.EpochParticipationStatusEjected`. + // Caution: we need to copy the `Identity` before we override `EpochParticipationStatus`, as we + // would otherwise potentially change the wrapped IdentityProvider. + var i = *identity // shallow copy is sufficient, because `EpochParticipationStatus` is a value type in DynamicIdentity which is also a value type. + i.EpochParticipationStatus = flow.EpochParticipationStatusEjected + return &i +} + +// ByPeerID returns the full identity for the node with the given peer ID, +// peer.ID is the libp2p-level identifier of a Flow node. The function +// has the same semantics as a map lookup, where the boolean return value is +// true if and only if Identity has been found, i.e. `Identity` is not nil. +// Caution: function returns include ejected nodes. Please check the `Ejected` +// flag in the identity. +func (w *NodeDisallowListWrapper) ByPeerID(p peer.ID) (*flow.Identity, bool) { + identity, b := w.identityProvider.ByPeerID(p) + return w.setEjectedIfBlocked(identity), b +} diff --git a/network/p2p/cache/node_disallow_list_wrapper_test.go b/network/p2p/cache/node_disallow_list_wrapper_test.go new file mode 100644 index 00000000000..3f6c62bd6d7 --- /dev/null +++ b/network/p2p/cache/node_disallow_list_wrapper_test.go @@ -0,0 +1,460 @@ +package cache_test + +import ( + "fmt" + "testing" + + "github.com/libp2p/go-libp2p/core/peer" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" + "go.uber.org/atomic" + + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/model/flow/filter" + mocks "github.com/onflow/flow-go/module/mock" + "github.com/onflow/flow-go/network" + mocknetwork "github.com/onflow/flow-go/network/mock" + "github.com/onflow/flow-go/network/p2p/cache" + "github.com/onflow/flow-go/storage" + "github.com/onflow/flow-go/storage/operation/pebbleimpl" + "github.com/onflow/flow-go/utils/unittest" +) + +type NodeDisallowListWrapperTestSuite struct { + suite.Suite + DB storage.DB + provider *mocks.IdentityProvider + + wrapper *cache.NodeDisallowListWrapper + updateConsumer *mocknetwork.DisallowListNotificationConsumer +} + +func newNodeDisallowListWrapperTestSuite(db storage.DB) *NodeDisallowListWrapperTestSuite { + return &NodeDisallowListWrapperTestSuite{ + DB: db, + } +} + +func (s *NodeDisallowListWrapperTestSuite) SetupTest() { + s.provider = new(mocks.IdentityProvider) + + var err error + s.updateConsumer = mocknetwork.NewDisallowListNotificationConsumer(s.T()) + s.wrapper, err = cache.NewNodeDisallowListWrapper(s.provider, s.DB, func() network.DisallowListNotificationConsumer { + return s.updateConsumer + }) + require.NoError(s.T(), err) +} + +func TestNodeDisallowListWrapperWithBadgerTestSuite(t *testing.T) { + pdb, _ := unittest.TempPebbleDB(t) + suite.Run(t, newNodeDisallowListWrapperTestSuite(pebbleimpl.ToDB(pdb))) +} + +func TestNodeDisallowListWrapperWithPebbleTestSuite(t *testing.T) { + pdb, _ := unittest.TempPebbleDB(t) + suite.Run(t, newNodeDisallowListWrapperTestSuite(pebbleimpl.ToDB(pdb))) +} + +// TestHonestNode verifies: +// For nodes _not_ on the disallowList, the `cache.NodeDisallowListingWrapper` should forward +// the identities from the wrapped `IdentityProvider` without modification. +func (s *NodeDisallowListWrapperTestSuite) TestHonestNode() { + s.Run("ByNodeID", func() { + identity := unittest.IdentityFixture() + s.provider.On("ByNodeID", identity.NodeID).Return(identity, true) + + i, found := s.wrapper.ByNodeID(identity.NodeID) + require.True(s.T(), found) + require.Equal(s.T(), i, identity) + }) + s.Run("ByPeerID", func() { + identity := unittest.IdentityFixture() + peerID := (peer.ID)("some_peer_ID") + s.provider.On("ByPeerID", peerID).Return(identity, true) + + i, found := s.wrapper.ByPeerID(peerID) + require.True(s.T(), found) + require.Equal(s.T(), i, identity) + }) + s.Run("Identities", func() { + identities := unittest.IdentityListFixture(11) + f := filter.In(identities[3:4]) + expectedFilteredIdentities := identities.Filter(f) + s.provider.On("Identities", mock.Anything).Return( + func(filter flow.IdentityFilter[flow.Identity]) flow.IdentityList { + return identities.Filter(filter) + }, + nil, + ) + require.Equal(s.T(), expectedFilteredIdentities, s.wrapper.Identities(f)) + }) +} + +// TestDisallowListNode tests proper handling of identities _on_ the disallowList: +// - For any identity `i` with `i.NodeID ∈ disallowList`, the returned identity +// should have `i.Ejected` set to `true` (irrespective of the `Ejected` +// flag's initial returned by the wrapped `IdentityProvider`). +// - The wrapper should _copy_ the identity and _not_ write into the wrapped +// IdentityProvider's memory. +// - For `IdentityProvider.ByNodeID` and `IdentityProvider.ByPeerID`: +// whether or not the wrapper modifies the `Ejected` flag should depend only +// in the NodeID of the returned identity, irrespective of the second return +// value (boolean). +// While returning (non-nil identity, false) is not a defined return value, +// we expect the wrapper to nevertheless handle this case to increase its +// generality. +func (s *NodeDisallowListWrapperTestSuite) TestDisallowListNode() { + disallowlist := unittest.IdentityListFixture(11) + s.updateConsumer.On("OnDisallowListNotification", &network.DisallowListingUpdate{ + FlowIds: disallowlist.NodeIDs(), + Cause: network.DisallowListedCauseAdmin, + }).Return().Once() + err := s.wrapper.Update(disallowlist.NodeIDs()) + require.NoError(s.T(), err) + + index := atomic.NewInt32(0) + for _, b := range []bool{true, false} { + expectedfound := b + + s.Run(fmt.Sprintf("IdentityProvider.ByNodeID returning (<non-nil identity>, %v)", expectedfound), func() { + originalIdentity := disallowlist[index.Inc()] + s.provider.On("ByNodeID", originalIdentity.NodeID).Return(originalIdentity, expectedfound) + + var expectedIdentity = *originalIdentity // expected Identity is a copy of the original + expectedIdentity.EpochParticipationStatus = flow.EpochParticipationStatusEjected // with the `Ejected` flag set to true + + i, found := s.wrapper.ByNodeID(originalIdentity.NodeID) + require.Equal(s.T(), expectedfound, found) + require.Equal(s.T(), &expectedIdentity, i) + + // check that originalIdentity returned by wrapped `IdentityProvider` is _not_ modified + require.False(s.T(), originalIdentity.IsEjected()) + }) + + s.Run(fmt.Sprintf("IdentityProvider.ByPeerID returning (<non-nil identity>, %v)", expectedfound), func() { + originalIdentity := disallowlist[index.Inc()] + peerID := (peer.ID)(originalIdentity.NodeID.String()) + s.provider.On("ByPeerID", peerID).Return(originalIdentity, expectedfound) + + var expectedIdentity = *originalIdentity // expected Identity is a copy of the original + expectedIdentity.EpochParticipationStatus = flow.EpochParticipationStatusEjected // with the `Ejected` flag set to true + + i, found := s.wrapper.ByPeerID(peerID) + require.Equal(s.T(), expectedfound, found) + require.Equal(s.T(), &expectedIdentity, i) + + // check that originalIdentity returned by `IdentityProvider` is _not_ modified by wrapper + require.False(s.T(), originalIdentity.IsEjected()) + }) + } + + s.Run("Identities", func() { + disallowlistLookup := disallowlist.Lookup() + honestIdentities := unittest.IdentityListFixture(8) + combinedIdentities := honestIdentities.Union(disallowlist) + combinedIdentities, err = combinedIdentities.Shuffle() + require.NoError(s.T(), err) + numIdentities := len(combinedIdentities) + + s.provider.On("Identities", mock.Anything).Return(combinedIdentities) + + noFilter := filter.Not(filter.In[flow.Identity](nil)) + identities := s.wrapper.Identities(noFilter) + + require.Equal(s.T(), numIdentities, len(identities)) // expected number resulting identities have the + for _, i := range identities { + _, isBlocked := disallowlistLookup[i.NodeID] + require.Equal(s.T(), isBlocked, i.IsEjected()) + } + + // check that original `combinedIdentities` returned by `IdentityProvider` are _not_ modified by wrapper + require.Equal(s.T(), numIdentities, len(combinedIdentities)) // length of list should not be modified by wrapper + for _, i := range combinedIdentities { + require.False(s.T(), i.IsEjected()) // Ejected flag should still have the original value (false here) + } + }) + + // this tests the edge case where the Identities func is invoked with the p2p.NotEjectedFilter. Block listed + // nodes are expected to be filtered from the identity list returned after setting the ejected field. + s.Run("Identities(p2p.NotEjectedFilter) should not return block listed nodes", func() { + disallowlistLookup := disallowlist.Lookup() + honestIdentities := unittest.IdentityListFixture(8) + combinedIdentities := honestIdentities.Union(disallowlist) + combinedIdentities, err = combinedIdentities.Shuffle() + require.NoError(s.T(), err) + numIdentities := len(combinedIdentities) + + s.provider.On("Identities", mock.Anything).Return(combinedIdentities) + + identities := s.wrapper.Identities(filter.NotEjectedFilter) + + require.Equal(s.T(), len(honestIdentities), len(identities)) // expected only honest nodes to be returned + for _, i := range identities { + _, isBlocked := disallowlistLookup[i.NodeID] + require.False(s.T(), isBlocked) + require.False(s.T(), i.IsEjected()) + } + + // check that original `combinedIdentities` returned by `IdentityProvider` are _not_ modified by wrapper + require.Equal(s.T(), numIdentities, len(combinedIdentities)) // length of list should not be modified by wrapper + for _, i := range combinedIdentities { + require.False(s.T(), i.IsEjected()) // Ejected flag should still have the original value (false here) + } + }) +} + +// TestUnknownNode verifies that the wrapper forwards nil identities +// irrespective of the boolean return values. +func (s *NodeDisallowListWrapperTestSuite) TestUnknownNode() { + for _, b := range []bool{true, false} { + s.Run(fmt.Sprintf("IdentityProvider.ByNodeID returning (nil, %v)", b), func() { + id := unittest.IdentifierFixture() + s.provider.On("ByNodeID", id).Return(nil, b) + + identity, found := s.wrapper.ByNodeID(id) + require.Equal(s.T(), b, found) + require.Nil(s.T(), identity) + }) + + s.Run(fmt.Sprintf("IdentityProvider.ByPeerID returning (nil, %v)", b), func() { + peerID := (peer.ID)(unittest.IdentifierFixture().String()) + s.provider.On("ByPeerID", peerID).Return(nil, b) + + identity, found := s.wrapper.ByPeerID(peerID) + require.Equal(s.T(), b, found) + require.Nil(s.T(), identity) + }) + } +} + +// TestDisallowListAddRemove checks that adding and subsequently removing a node from the disallowList +// it in combination a no-op. We test two scenarious +// - Node whose original `Identity` has `Ejected = false`: +// After adding the node to the disallowList and then removing it again, the `Ejected` should be false. +// - Node whose original `Identity` has `EpochParticipationStatus = flow.EpochParticipationStatusEjected`: +// After adding the node to the disallowList and then removing it again, the `Ejected` should be still be true. +func (s *NodeDisallowListWrapperTestSuite) TestDisallowListAddRemove() { + for _, originalParticipationStatus := range []flow.EpochParticipationStatus{flow.EpochParticipationStatusEjected, flow.EpochParticipationStatusActive} { + s.Run(fmt.Sprintf("Add & remove node with EpochParticipationStatus = %v", originalParticipationStatus), func() { + originalIdentity := unittest.IdentityFixture() + originalIdentity.EpochParticipationStatus = originalParticipationStatus + peerID := (peer.ID)(originalIdentity.NodeID.String()) + s.provider.On("ByNodeID", originalIdentity.NodeID).Return(originalIdentity, true) + s.provider.On("ByPeerID", peerID).Return(originalIdentity, true) + + // step 1: before putting node on disallowList, + // an Identity with `Ejected` equal to the original value should be returned + i, found := s.wrapper.ByNodeID(originalIdentity.NodeID) + require.True(s.T(), found) + require.Equal(s.T(), originalParticipationStatus, i.EpochParticipationStatus) + + i, found = s.wrapper.ByPeerID(peerID) + require.True(s.T(), found) + require.Equal(s.T(), originalParticipationStatus, i.EpochParticipationStatus) + + // step 2: _after_ putting node on disallowList, + // an Identity with `Ejected` equal to `true` should be returned + s.updateConsumer.On("OnDisallowListNotification", &network.DisallowListingUpdate{ + FlowIds: flow.IdentifierList{originalIdentity.NodeID}, + Cause: network.DisallowListedCauseAdmin, + }).Return().Once() + err := s.wrapper.Update(flow.IdentifierList{originalIdentity.NodeID}) + require.NoError(s.T(), err) + + i, found = s.wrapper.ByNodeID(originalIdentity.NodeID) + require.True(s.T(), found) + require.True(s.T(), i.IsEjected()) + + i, found = s.wrapper.ByPeerID(peerID) + require.True(s.T(), found) + require.True(s.T(), i.IsEjected()) + + // step 3: after removing the node from the disallowList, + // an Identity with `Ejected` equal to the original value should be returned + s.updateConsumer.On("OnDisallowListNotification", &network.DisallowListingUpdate{ + FlowIds: flow.IdentifierList{}, + Cause: network.DisallowListedCauseAdmin, + }).Return().Once() + err = s.wrapper.Update(flow.IdentifierList{}) + require.NoError(s.T(), err) + + i, found = s.wrapper.ByNodeID(originalIdentity.NodeID) + require.True(s.T(), found) + require.Equal(s.T(), originalParticipationStatus, i.EpochParticipationStatus) + + i, found = s.wrapper.ByPeerID(peerID) + require.True(s.T(), found) + require.Equal(s.T(), originalParticipationStatus, i.EpochParticipationStatus) + }) + } +} + +// TestUpdate tests updating, clearing and retrieving the disallowList. +// This test verifies that the wrapper updates _its own internal state_ correctly. +// Note: +// conceptually, the disallowList is a set, i.e. not order dependent. +// The wrapper internally converts the list to a set and vice versa. Therefore +// the order is not preserved by `GetDisallowList`. Consequently, we compare +// map-based representations here. +func (s *NodeDisallowListWrapperTestSuite) TestUpdate() { + disallowList1 := unittest.IdentifierListFixture(8) + disallowList2 := unittest.IdentifierListFixture(11) + disallowList3 := unittest.IdentifierListFixture(5) + + s.updateConsumer.On("OnDisallowListNotification", &network.DisallowListingUpdate{ + FlowIds: disallowList1, + Cause: network.DisallowListedCauseAdmin, + }).Return().Once() + err := s.wrapper.Update(disallowList1) + require.NoError(s.T(), err) + require.Equal(s.T(), disallowList1.Lookup(), s.wrapper.GetDisallowList().Lookup()) + + s.updateConsumer.On("OnDisallowListNotification", &network.DisallowListingUpdate{ + FlowIds: disallowList2, + Cause: network.DisallowListedCauseAdmin, + }).Return().Once() + err = s.wrapper.Update(disallowList2) + require.NoError(s.T(), err) + require.Equal(s.T(), disallowList2.Lookup(), s.wrapper.GetDisallowList().Lookup()) + + s.updateConsumer.On("OnDisallowListNotification", &network.DisallowListingUpdate{ + FlowIds: nil, + Cause: network.DisallowListedCauseAdmin, + }).Return().Once() + err = s.wrapper.ClearDisallowList() + require.NoError(s.T(), err) + require.Empty(s.T(), s.wrapper.GetDisallowList()) + + s.updateConsumer.On("OnDisallowListNotification", &network.DisallowListingUpdate{ + FlowIds: disallowList3, + Cause: network.DisallowListedCauseAdmin, + }).Return().Once() + err = s.wrapper.Update(disallowList3) + require.NoError(s.T(), err) + require.Equal(s.T(), disallowList3.Lookup(), s.wrapper.GetDisallowList().Lookup()) +} + +// TestDataBasePersist verifies database interactions of the wrapper with the data base. +// This test verifies that the disallowList updates are persisted across restarts. +// To decouple this test from the lower-level data base design, we proceed as follows: +// - We do data-base operation through the exported methods from `NodeDisallowListingWrapper` +// - Then, we create a new `NodeDisallowListingWrapper` backed by the same data base. Since it is a +// new wrapper, it must read its state from the data base. Hence, if the new wrapper returns +// the correct data, we have strong evidence that data-base interactions are correct. +// +// Note: The wrapper internally converts the list to a set and vice versa. Therefore +// the order is not preserved by `GetDisallowList`. Consequently, we compare +// map-based representations here. +func (s *NodeDisallowListWrapperTestSuite) TestDataBasePersist() { + disallowList1 := unittest.IdentifierListFixture(8) + disallowList2 := unittest.IdentifierListFixture(8) + + s.Run("Get disallowList from empty database", func() { + require.Empty(s.T(), s.wrapper.GetDisallowList()) + }) + + s.Run("Clear disallow-list on empty database", func() { + s.updateConsumer.On("OnDisallowListNotification", &network.DisallowListingUpdate{ + FlowIds: nil, + Cause: network.DisallowListedCauseAdmin, + }).Return().Once() + err := s.wrapper.ClearDisallowList() // No-op as data base does not contain any block list + require.NoError(s.T(), err) + require.Empty(s.T(), s.wrapper.GetDisallowList()) + + // newly created wrapper should read `disallowList` from data base during initialization + w, err := cache.NewNodeDisallowListWrapper(s.provider, s.DB, func() network.DisallowListNotificationConsumer { + return s.updateConsumer + }) + require.NoError(s.T(), err) + require.Empty(s.T(), w.GetDisallowList()) + }) + + s.Run("Update disallowList and init new wrapper from database", func() { + s.updateConsumer.On("OnDisallowListNotification", &network.DisallowListingUpdate{ + FlowIds: disallowList1, + Cause: network.DisallowListedCauseAdmin, + }).Return().Once() + err := s.wrapper.Update(disallowList1) + require.NoError(s.T(), err) + + // newly created wrapper should read `disallowList` from data base during initialization + w, err := cache.NewNodeDisallowListWrapper(s.provider, s.DB, func() network.DisallowListNotificationConsumer { + return s.updateConsumer + }) + require.NoError(s.T(), err) + require.Equal(s.T(), disallowList1.Lookup(), w.GetDisallowList().Lookup()) + }) + + s.Run("Update and overwrite disallowList and then init new wrapper from database", func() { + s.updateConsumer.On("OnDisallowListNotification", &network.DisallowListingUpdate{ + FlowIds: disallowList1, + Cause: network.DisallowListedCauseAdmin, + }).Return().Once() + err := s.wrapper.Update(disallowList1) + require.NoError(s.T(), err) + + s.updateConsumer.On("OnDisallowListNotification", &network.DisallowListingUpdate{ + FlowIds: disallowList2, + Cause: network.DisallowListedCauseAdmin, + }).Return().Once() + err = s.wrapper.Update(disallowList2) + require.NoError(s.T(), err) + + // newly created wrapper should read initial state from data base + w, err := cache.NewNodeDisallowListWrapper(s.provider, s.DB, func() network.DisallowListNotificationConsumer { + return s.updateConsumer + }) + require.NoError(s.T(), err) + require.Equal(s.T(), disallowList2.Lookup(), w.GetDisallowList().Lookup()) + }) + + s.Run("Update & clear & update and then init new wrapper from database", func() { + // set disallowList -> + // newly created wrapper should now read this list from data base during initialization + s.updateConsumer.On("OnDisallowListNotification", &network.DisallowListingUpdate{ + FlowIds: disallowList1, + Cause: network.DisallowListedCauseAdmin, + }).Return().Once() + err := s.wrapper.Update(disallowList1) + require.NoError(s.T(), err) + + w0, err := cache.NewNodeDisallowListWrapper(s.provider, s.DB, func() network.DisallowListNotificationConsumer { + return s.updateConsumer + }) + require.NoError(s.T(), err) + require.Equal(s.T(), disallowList1.Lookup(), w0.GetDisallowList().Lookup()) + + // clear disallowList -> + // newly created wrapper should now read empty disallowList from data base during initialization + s.updateConsumer.On("OnDisallowListNotification", &network.DisallowListingUpdate{ + FlowIds: nil, + Cause: network.DisallowListedCauseAdmin, + }).Return().Once() + err = s.wrapper.ClearDisallowList() + require.NoError(s.T(), err) + + w1, err := cache.NewNodeDisallowListWrapper(s.provider, s.DB, func() network.DisallowListNotificationConsumer { + return s.updateConsumer + }) + require.NoError(s.T(), err) + require.Empty(s.T(), w1.GetDisallowList()) + + // set disallowList2 -> + // newly created wrapper should now read this list from data base during initialization + s.updateConsumer.On("OnDisallowListNotification", &network.DisallowListingUpdate{ + FlowIds: disallowList2, + Cause: network.DisallowListedCauseAdmin, + }).Return().Once() + err = s.wrapper.Update(disallowList2) + require.NoError(s.T(), err) + + w2, err := cache.NewNodeDisallowListWrapper(s.provider, s.DB, func() network.DisallowListNotificationConsumer { + return s.updateConsumer + }) + require.NoError(s.T(), err) + require.Equal(s.T(), disallowList2.Lookup(), w2.GetDisallowList().Lookup()) + }) +} diff --git a/network/p2p/cache/protocol_state_provider.go b/network/p2p/cache/protocol_state_provider.go index cf3bba4e49a..846f78e1ad9 100644 --- a/network/p2p/cache/protocol_state_provider.go +++ b/network/p2p/cache/protocol_state_provider.go @@ -66,11 +66,10 @@ func NewProtocolStateIDCache( // of an Epoch transition that just occurred. Upon such notification, the internally-cached // Identity table of authorized network participants is updated. // -// TODO: per API contract, implementations of `EpochTransition` should be non-blocking +// TODO(EFM, #6123): per API contract, implementations of `EpochTransition` should be non-blocking // and virtually latency free. However, we run data base queries and acquire locks here, // which is undesired. func (p *ProtocolStateIDCache) EpochTransition(newEpochCounter uint64, header *flow.Header) { - p.logger.Info().Uint64("newEpochCounter", newEpochCounter).Msg("epoch transition") p.update(header.ID()) } @@ -78,11 +77,10 @@ func (p *ProtocolStateIDCache) EpochTransition(newEpochCounter uint64, header *f // that the EpochSetup Phase has just stared. Upon such notification, the internally-cached // Identity table of authorized network participants is updated. // -// TODO: per API contract, implementations of `EpochSetupPhaseStarted` should be non-blocking +// TODO(EFM, #6123): per API contract, implementations of `EpochSetupPhaseStarted` should be non-blocking // and virtually latency free. However, we run data base queries and acquire locks here, // which is undesired. func (p *ProtocolStateIDCache) EpochSetupPhaseStarted(currentEpochCounter uint64, header *flow.Header) { - p.logger.Info().Uint64("currentEpochCounter", currentEpochCounter).Msg("epoch setup phase started") p.update(header.ID()) } @@ -90,11 +88,10 @@ func (p *ProtocolStateIDCache) EpochSetupPhaseStarted(currentEpochCounter uint64 // that the EpochCommitted Phase has just stared. Upon such notification, the internally-cached // Identity table of authorized network participants is updated. // -// TODO: per API contract, implementations of `EpochCommittedPhaseStarted` should be non-blocking +// TODO(EFM, #6123): per API contract, implementations of `EpochCommittedPhaseStarted` should be non-blocking // and virtually latency free. However, we run data base queries and acquire locks here, // which is undesired. func (p *ProtocolStateIDCache) EpochCommittedPhaseStarted(currentEpochCounter uint64, header *flow.Header) { - p.logger.Info().Uint64("currentEpochCounter", currentEpochCounter).Msg("epoch committed phase started") p.update(header.ID()) } @@ -141,7 +138,7 @@ func (p *ProtocolStateIDCache) update(blockID flow.Identifier) { // protocol that pass the provided filter. Caution, this includes ejected nodes. // Please check the `Ejected` flag in the identities (or provide a filter for // removing ejected nodes). -func (p *ProtocolStateIDCache) Identities(filter flow.IdentityFilter) flow.IdentityList { +func (p *ProtocolStateIDCache) Identities(filter flow.IdentityFilter[flow.Identity]) flow.IdentityList { p.mu.RLock() defer p.mu.RUnlock() return p.identities.Filter(filter) diff --git a/network/p2p/cache/protocol_state_provider_test.go b/network/p2p/cache/protocol_state_provider_test.go index 4aa593ef2a3..65713767e9f 100644 --- a/network/p2p/cache/protocol_state_provider_test.go +++ b/network/p2p/cache/protocol_state_provider_test.go @@ -68,19 +68,19 @@ func (suite *ProtocolStateProviderTestSuite) triggerUpdate() { suite.participants = unittest.IdentityListFixture(5, unittest.WithAllRoles(), unittest.WithKeys) block := unittest.BlockFixture() - suite.head = block.Header + suite.head = block.ToHeader() // set up protocol snapshot mock snapshot := &mockprotocol.Snapshot{} snapshot.On("Identities", mock.Anything).Return( - func(filter flow.IdentityFilter) flow.IdentityList { + func(filter flow.IdentityFilter[flow.Identity]) flow.IdentityList { return suite.participants.Filter(filter) }, nil, ) snapshot.On("Identity", mock.Anything).Return(func(id flow.Identifier) *flow.Identity { for _, n := range suite.participants { - if n.ID() == id { + if n.NodeID == id { return n } } diff --git a/network/p2p/compressed/mockStream.go b/network/p2p/compressed/mockStream.go deleted file mode 100644 index afb2fee08c6..00000000000 --- a/network/p2p/compressed/mockStream.go +++ /dev/null @@ -1,86 +0,0 @@ -package compressed - -import ( - "io" - "time" - - "github.com/hashicorp/go-multierror" - "github.com/libp2p/go-libp2p/core/network" - "github.com/libp2p/go-libp2p/core/protocol" -) - -// mockStream is a mocked libp2p stream that is implemented as a pipe with a reader and writer. -// Whatever is written on the stream is written by the writer on the pipe, which in turn makes -// it available for read by the reader. -type mockStream struct { - pw *io.PipeWriter - pr *io.PipeReader -} - -func newMockStream(pw *io.PipeWriter, pr *io.PipeReader) *mockStream { - return &mockStream{ - pw: pw, - pr: pr, - } -} - -func (m *mockStream) Read(p []byte) (int, error) { - n, err := m.pr.Read(p) - return n, err -} - -func (m *mockStream) Write(p []byte) (int, error) { - return m.pw.Write(p) -} - -func (m *mockStream) Close() error { - return multierror.Append(m.CloseRead(), m.CloseWrite()) -} - -func (m *mockStream) CloseRead() error { - return m.pr.Close() -} - -func (m *mockStream) CloseWrite() error { - return m.pw.Close() -} - -func (m *mockStream) Reset() error { - return nil -} - -func (m *mockStream) SetDeadline(_ time.Time) error { - return nil -} - -func (m *mockStream) SetReadDeadline(_ time.Time) error { - return nil -} - -func (m *mockStream) SetWriteDeadline(_ time.Time) error { - return nil -} - -func (m *mockStream) ID() string { - return "" -} - -func (m *mockStream) Protocol() protocol.ID { - return "" -} - -func (m *mockStream) SetProtocol(_ protocol.ID) error { - return nil -} - -func (m *mockStream) Stat() network.Stats { - return network.Stats{} -} - -func (m *mockStream) Conn() network.Conn { - return nil -} - -func (m *mockStream) Scope() network.StreamScope { - return nil -} diff --git a/network/p2p/conduit/conduit.go b/network/p2p/conduit/conduit.go index 7a5070edb68..76758e6d7be 100644 --- a/network/p2p/conduit/conduit.go +++ b/network/p2p/conduit/conduit.go @@ -4,72 +4,36 @@ import ( "context" "fmt" - "github.com/rs/zerolog" - "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/module" - "github.com/onflow/flow-go/module/component" - "github.com/onflow/flow-go/module/irrecoverable" "github.com/onflow/flow-go/network" - "github.com/onflow/flow-go/network/alsp" "github.com/onflow/flow-go/network/channels" ) -// DefaultConduitFactory is a wrapper around the network Adapter. +// DefaultConduitFactory is a wrapper around the network ConduitAdapter. // It directly passes the incoming messages to the corresponding methods of the -// network Adapter. +// network ConduitAdapter. type DefaultConduitFactory struct { - *component.ComponentManager - adapter network.Adapter - misbehaviorManager network.MisbehaviorReportManager + adapter network.ConduitAdapter } -// DefaultConduitFactoryOpt is a function that applies an option to the DefaultConduitFactory. -type DefaultConduitFactoryOpt func(*DefaultConduitFactory) - -// WithMisbehaviorManager overrides the misbehavior manager for the conduit factory. -func WithMisbehaviorManager(misbehaviorManager network.MisbehaviorReportManager) DefaultConduitFactoryOpt { - return func(d *DefaultConduitFactory) { - d.misbehaviorManager = misbehaviorManager - } -} +var _ network.ConduitFactory = (*DefaultConduitFactory)(nil) // NewDefaultConduitFactory creates a new DefaultConduitFactory, this is the default conduit factory used by the node. // Args: // -// logger: zerolog.Logger, the logger used by the conduit factory. -// metrics: module.AlspMetrics (an instance of module.NetworkMetrics can be used). -// opts: DefaultConduitFactoryOpt, optional arguments to override the default behavior of the conduit factory. +// none // // Returns: // -// *DefaultConduitFactory, the created conduit factory. -func NewDefaultConduitFactory(logger zerolog.Logger, metrics module.AlspMetrics, opts ...DefaultConduitFactoryOpt) *DefaultConduitFactory { - d := &DefaultConduitFactory{ - misbehaviorManager: alsp.NewMisbehaviorReportManager(logger, metrics), - } - - for _, apply := range opts { - apply(d) - } - - // worker added so conduit factory doesn't immediately shut down when it's started - cm := component.NewComponentManagerBuilder(). - AddWorker(func(ctx irrecoverable.SignalerContext, ready component.ReadyFunc) { - ready() - - <-ctx.Done() - }).Build() - - d.ComponentManager = cm - - return d +// a new instance of the DefaultConduitFactory. +func NewDefaultConduitFactory() *DefaultConduitFactory { + return &DefaultConduitFactory{} } -// RegisterAdapter sets the Adapter component of the factory. -// The Adapter is a wrapper around the Network layer that only exposes the set of methods +// RegisterAdapter sets the ConduitAdapter component of the factory. +// The ConduitAdapter is a wrapper around the Network layer that only exposes the set of methods // that are needed by a conduit. -func (d *DefaultConduitFactory) RegisterAdapter(adapter network.Adapter) error { +func (d *DefaultConduitFactory) RegisterAdapter(adapter network.ConduitAdapter) error { if d.adapter != nil { return fmt.Errorf("could not register a new network adapter, one already exists") } @@ -80,7 +44,7 @@ func (d *DefaultConduitFactory) RegisterAdapter(adapter network.Adapter) error { } // NewConduit creates a conduit on the specified channel. -// Prior to creating any conduit, the factory requires an Adapter to be registered with it. +// Prior to creating any conduit, the factory requires an ConduitAdapter to be registered with it. func (d *DefaultConduitFactory) NewConduit(ctx context.Context, channel channels.Channel) (network.Conduit, error) { if d.adapter == nil { return nil, fmt.Errorf("could not create a new conduit, missing a registered network adapter") @@ -89,11 +53,10 @@ func (d *DefaultConduitFactory) NewConduit(ctx context.Context, channel channels child, cancel := context.WithCancel(ctx) return &Conduit{ - ctx: child, - cancel: cancel, - channel: channel, - adapter: d.adapter, - misbehaviorManager: d.misbehaviorManager, + ctx: child, + cancel: cancel, + channel: channel, + adapter: d.adapter, }, nil } @@ -101,11 +64,10 @@ func (d *DefaultConduitFactory) NewConduit(ctx context.Context, channel channels // sending messages within a single engine process. It sends all messages to // what can be considered a bus reserved for that specific engine. type Conduit struct { - ctx context.Context - cancel context.CancelFunc - channel channels.Channel - adapter network.Adapter - misbehaviorManager network.MisbehaviorReportManager + ctx context.Context + cancel context.CancelFunc + channel channels.Channel + adapter network.ConduitAdapter } var _ network.Conduit = (*Conduit)(nil) @@ -145,7 +107,7 @@ func (c *Conduit) Multicast(event interface{}, num uint, targetIDs ...flow.Ident // The misbehavior is reported to the networking layer to penalize the misbehaving node. // The implementation must be thread-safe and non-blocking. func (c *Conduit) ReportMisbehavior(report network.MisbehaviorReport) { - c.misbehaviorManager.HandleMisbehaviorReport(c.channel, report) + c.adapter.ReportMisbehaviorOnChannel(c.channel, report) } func (c *Conduit) Close() error { diff --git a/network/p2p/config/errors.go b/network/p2p/config/errors.go new file mode 100644 index 00000000000..ff5f989fe21 --- /dev/null +++ b/network/p2p/config/errors.go @@ -0,0 +1,32 @@ +package p2pconfig + +import ( + "errors" + "fmt" + + p2pmsg "github.com/onflow/flow-go/network/p2p/message" +) + +// InvalidLimitConfigError indicates the validation limit is < 0. +type InvalidLimitConfigError struct { + err error +} + +func (e InvalidLimitConfigError) Error() string { + return e.err.Error() +} + +func (e InvalidLimitConfigError) Unwrap() error { + return e.err +} + +// NewInvalidLimitConfigErr returns a new ErrValidationLimit. +func NewInvalidLimitConfigErr(controlMsg p2pmsg.ControlMessageType, err error) InvalidLimitConfigError { + return InvalidLimitConfigError{fmt.Errorf("invalid rpc control message %s validation limit configuration: %w", controlMsg, err)} +} + +// IsInvalidLimitConfigError returns whether an error is ErrInvalidLimitConfig. +func IsInvalidLimitConfigError(err error) bool { + var e InvalidLimitConfigError + return errors.As(err, &e) +} diff --git a/network/p2p/config/errors_test.go b/network/p2p/config/errors_test.go new file mode 100644 index 00000000000..58db000e1be --- /dev/null +++ b/network/p2p/config/errors_test.go @@ -0,0 +1,30 @@ +package p2pconfig + +import ( + "fmt" + "testing" + + "github.com/stretchr/testify/assert" + + p2pmsg "github.com/onflow/flow-go/network/p2p/message" +) + +// TestErrInvalidLimitConfigRoundTrip ensures correct error formatting for ErrInvalidLimitConfig. +func TestErrInvalidLimitConfigRoundTrip(t *testing.T) { + controlMsg := p2pmsg.CtrlMsgGraft + limit := uint64(500) + + e := fmt.Errorf("invalid rate limit value %d must be greater than 0", limit) + err := NewInvalidLimitConfigErr(controlMsg, e) + + // tests the error message formatting. + expectedErrMsg := fmt.Errorf("invalid rpc control message %s validation limit configuration: %w", controlMsg, e).Error() + assert.Equal(t, expectedErrMsg, err.Error(), "the error message should be correctly formatted") + + // tests the IsInvalidLimitConfigError function. + assert.True(t, IsInvalidLimitConfigError(err), "IsInvalidLimitConfigError should return true for ErrInvalidLimitConfig error") + + // test IsInvalidLimitConfigError with a different error type. + dummyErr := fmt.Errorf("dummy error") + assert.False(t, IsInvalidLimitConfigError(dummyErr), "IsInvalidLimitConfigError should return false for non-ErrInvalidLimitConfig error") +} diff --git a/network/p2p/config/gossipsub.go b/network/p2p/config/gossipsub.go new file mode 100644 index 00000000000..a8ea16ce604 --- /dev/null +++ b/network/p2p/config/gossipsub.go @@ -0,0 +1,195 @@ +package p2pconfig + +import ( + "strconv" + "strings" + "time" +) + +// ResourceManagerConfig returns the resource manager configuration for the libp2p node. +// The resource manager is used to limit the number of open connections and streams (as well as any other resources +// used by libp2p) for each peer. +type ResourceManagerConfig struct { + Override ResourceManagerOverrideScope `mapstructure:"limits-override"` // override limits for specific peers, protocols, etc. + MemoryLimitRatio float64 `mapstructure:"memory-limit-ratio"` // maximum allowed fraction of memory to be allocated by the libp2p resources in (0,1] + FileDescriptorsRatio float64 `mapstructure:"file-descriptors-ratio"` // maximum allowed fraction of file descriptors to be allocated by the libp2p resources in (0,1] +} + +type ResourceManagerOverrideScope struct { + // System is the limit for the resource at the entire system. + // For a specific limit, the system-wide dictates the maximum allowed value across all peers and protocols at the entire node level. + System ResourceManagerOverrideLimit `mapstructure:"system"` + + // Transient is the limit for the resource at the transient scope. Transient limits are used for resources that have not fully established and are under negotiation. + Transient ResourceManagerOverrideLimit `mapstructure:"transient"` + + // Protocol is the limit for the resource at the protocol scope, e.g., DHT, GossipSub, etc. It dictates the maximum allowed resource across all peers for that protocol. + Protocol ResourceManagerOverrideLimit `mapstructure:"protocol"` + + // Peer is the limit for the resource at the peer scope. It dictates the maximum allowed resource for a specific peer. + Peer ResourceManagerOverrideLimit `mapstructure:"peer"` + + // Connection is the limit for the resource for a pair of (peer, protocol), e.g., (peer1, DHT), (peer1, GossipSub), etc. It dictates the maximum allowed resource for a protocol and a peer. + PeerProtocol ResourceManagerOverrideLimit `mapstructure:"peer-protocol"` +} + +// ResourceManagerOverrideLimit is the configuration for the resource manager override limit at a certain scope. +// Any value that is not set will be ignored and the default value will be used. +type ResourceManagerOverrideLimit struct { + // System is the limit for the resource at the entire system. if not set, the default value will be used. + // For a specific limit, the system-wide dictates the maximum allowed value across all peers and protocols at the entire node scope. + StreamsInbound int `validate:"gte=0" mapstructure:"streams-inbound"` + + // StreamsOutbound is the max number of outbound streams allowed, at the resource scope. + StreamsOutbound int `validate:"gte=0" mapstructure:"streams-outbound"` + + // ConnectionsInbound is the max number of inbound connections allowed, at the resource scope. + ConnectionsInbound int `validate:"gte=0" mapstructure:"connections-inbound"` + + // ConnectionsOutbound is the max number of outbound connections allowed, at the resource scope. + ConnectionsOutbound int `validate:"gte=0" mapstructure:"connections-outbound"` + + // FD is the max number of file descriptors allowed, at the resource scope. + FD int `validate:"gte=0" mapstructure:"fd"` + + // Memory is the max amount of memory allowed (bytes), at the resource scope. + Memory int `validate:"gte=0" mapstructure:"memory-bytes"` +} + +// GossipSubParameters keys. +const ( + RpcInspectorKey = "rpc-inspector" + RpcTracerKey = "rpc-tracer" + PeerScoringEnabledKey = "peer-scoring-enabled" + ScoreParamsKey = "scoring-parameters" + SubscriptionProviderKey = "subscription-provider" + PeerGaterKey = "peer-gater" + SourceDecayKey = "source-decay" + TopicDeliveryWeightsKey = "topic-delivery-weights-override" +) + +// GossipSubParameters is the configuration for the GossipSub pubsub implementation. +type GossipSubParameters struct { + // RpcInspectorParameters configuration for all gossipsub RPC control message inspectors. + RpcInspector RpcInspectorParameters `mapstructure:"rpc-inspector"` + + // GossipSubScoringRegistryConfig is the configuration for the GossipSub score registry. + // GossipSubTracerParameters is the configuration for the gossipsub tracer. GossipSub tracer is used to trace the local mesh events and peer scores. + RpcTracer GossipSubTracerParameters `mapstructure:"rpc-tracer"` + + // ScoringParameters is whether to enable GossipSub peer scoring. + PeerScoringEnabled bool `mapstructure:"peer-scoring-enabled"` + SubscriptionProvider SubscriptionProviderParameters `mapstructure:"subscription-provider"` + ScoringParameters ScoringParameters `mapstructure:"scoring-parameters"` + + // PeerGaterEnabled enables the peer gater. + PeerGaterEnabled bool `mapstructure:"peer-gater-enabled"` + // PeerGaterSourceDecay the per IP decay for all counters tracked by the peer gater for a peer. + PeerGaterSourceDecay time.Duration `mapstructure:"peer-gater-source-decay"` + // PeerGaterTopicDeliveryWeightsOverride topic delivery weights that will override the default value for the specified channel. + // This is a comma separated list "channel:weight, channel2:weight, channel3:weight". + // i.e: consensus-committee: 1.5, sync-committee: .75 + PeerGaterTopicDeliveryWeightsOverride string `mapstructure:"peer-gater-topic-delivery-weights-override"` +} + +const ( + DecayIntervalKey = "decay-interval" +) + +// ScoringParameters are the parameters for the score option. +// Parameters are "numerical values" that are used to compute or build components that compute the score of a peer in GossipSub system. +type ScoringParameters struct { + PeerScoring PeerScoringParameters `validate:"required" mapstructure:"peer-scoring"` + ScoringRegistryParameters ScoringRegistryParameters `validate:"required" mapstructure:"scoring-registry"` +} + +// PeerGaterTopicDeliveryWeights returns the topic delivery weights configured on this struct as a map[string]float64 . +// Note: When new topic delivery weights are added to the struct this func should be updated. +func (g *GossipSubParameters) PeerGaterTopicDeliveryWeights() (map[string]float64, error) { + m := make(map[string]float64) + for _, weightConfig := range strings.Split(g.PeerGaterTopicDeliveryWeightsOverride, ",") { + wc := strings.Split(weightConfig, ":") + f, err := strconv.ParseFloat(strings.TrimSpace(wc[1]), 64) + if err != nil { + return nil, err + } + m[strings.TrimSpace(wc[0])] = f + } + + return m, nil +} + +// SubscriptionProviderParameters keys. +const ( + UpdateIntervalKey = "update-interval" + CacheSizeKey = "cache-size" +) + +type SubscriptionProviderParameters struct { + // UpdateInterval is the interval for updating the list of topics the node have subscribed to; as well as the list of all + // peers subscribed to each of those topics. This is used to penalize peers that have an invalid subscription based on their role. + UpdateInterval time.Duration `validate:"gt=0s" mapstructure:"update-interval"` + + // CacheSize is the size of the cache that keeps the list of peers subscribed to each topic as the local node. + // This is the local view of the current node towards the subscription status of other nodes in the system. + // The cache must be large enough to accommodate the maximum number of nodes in the system, otherwise the view of the local node will be incomplete + // due to cache eviction. + CacheSize uint32 `validate:"gt=0" mapstructure:"cache-size"` +} + +// GossipSubTracerParameters keys. +const ( + LocalMeshLogIntervalKey = "local-mesh-logging-interval" + ScoreTracerIntervalKey = "score-tracer-interval" + RPCSentTrackerCacheSizeKey = "rpc-sent-tracker-cache-size" + RPCSentTrackerQueueCacheSizeKey = "rpc-sent-tracker-queue-cache-size" + RPCSentTrackerNumOfWorkersKey = "rpc-sent-tracker-workers" + DuplicateMessageCacheTrackerKey = "duplicate-message-tracker" + DuplicateMessageCacheTrackerSizeKey = "cache-size" + DuplicateMessageCacheTrackerDecayKey = "decay" +) + +// GossipSubTracerParameters is the config for the gossipsub tracer. GossipSub tracer is used to trace the local mesh events and peer scores. +type GossipSubTracerParameters struct { + DuplicateMessageTrackerConfig DuplicateMessageTrackerConfig `validate:"required" mapstructure:"duplicate-message-tracker"` + // LocalMeshLogInterval is the interval at which the local mesh is logged. + LocalMeshLogInterval time.Duration `validate:"gt=0s" mapstructure:"local-mesh-logging-interval"` + // ScoreTracerInterval is the interval at which the score tracer logs the peer scores. + ScoreTracerInterval time.Duration `validate:"gt=0s" mapstructure:"score-tracer-interval"` + // RPCSentTrackerCacheSize cache size of the rpc sent tracker used by the gossipsub mesh tracer. + RPCSentTrackerCacheSize uint32 `validate:"gt=0" mapstructure:"rpc-sent-tracker-cache-size"` + // RPCSentTrackerQueueCacheSize cache size of the rpc sent tracker queue used for async tracking. + RPCSentTrackerQueueCacheSize uint32 `validate:"gt=0" mapstructure:"rpc-sent-tracker-queue-cache-size"` + // RpcSentTrackerNumOfWorkers number of workers for rpc sent tracker worker pool. + RpcSentTrackerNumOfWorkers int `validate:"gt=0" mapstructure:"rpc-sent-tracker-workers"` +} + +// DuplicateMessageTrackerConfig duplicate message cache config. +type DuplicateMessageTrackerConfig struct { + // CacheSize cache size of the gossipsub duplicate message tracker. + CacheSize uint32 `validate:"gt=0" mapstructure:"cache-size"` + // Decay rate of decay for the peer duplicate message counters. + Decay float64 `validate:"gt=0,lt=1" mapstructure:"decay"` + // SkipDecayThreshold the threshold for which when the counter is below this value, the decay function will not be called + SkipDecayThreshold float64 `validate:"gt=0,lt=1" mapstructure:"skip-decay-threshold"` +} + +// ResourceScope is the scope of the resource, e.g., system, transient, protocol, peer, peer-protocol. +type ResourceScope string + +func (r ResourceScope) String() string { + return string(r) +} + +const ( + // ResourceScopeSystem is the system scope; the system scope dictates the maximum allowed value across all peers and protocols at the entire node level. + ResourceScopeSystem ResourceScope = "system" + // ResourceScopeTransient is the transient scope; the transient scope is used for resources that have not fully established and are under negotiation. + ResourceScopeTransient ResourceScope = "transient" + // ResourceScopeProtocol is the protocol scope; the protocol scope dictates the maximum allowed resource across all peers for that protocol. + ResourceScopeProtocol ResourceScope = "protocol" + // ResourceScopePeer is the peer scope; the peer scope dictates the maximum allowed resource for a specific peer. + ResourceScopePeer ResourceScope = "peer" + // ResourceScopePeerProtocol is the peer-protocol scope; the peer-protocol scope dictates the maximum allowed resource for a protocol and a peer. + ResourceScopePeerProtocol ResourceScope = "peer-protocol" +) diff --git a/network/p2p/config/gossipsub_rpc_inspectors.go b/network/p2p/config/gossipsub_rpc_inspectors.go new file mode 100644 index 00000000000..4144ff2843c --- /dev/null +++ b/network/p2p/config/gossipsub_rpc_inspectors.go @@ -0,0 +1,250 @@ +package p2pconfig + +// RpcInspectorParameters keys. +const ( + ValidationConfigKey = "validation" + MetricsConfigKey = "metrics" + NotificationCacheSizeKey = "notification-cache-size" +) + +// RpcInspectorParameters contains the "numerical values" for the gossipsub RPC control message inspectors parameters. +type RpcInspectorParameters struct { + // RpcValidationInspector control message validation inspector validation configuration and limits. + Validation RpcValidationInspector `mapstructure:"validation"` + // NotificationCacheSize size of the queue for notifications about invalid RPC messages. + NotificationCacheSize uint32 `mapstructure:"notification-cache-size"` +} + +// RpcValidationInspectorParameters keys. +const ( + ProcessKey = "process" + ClusterPrefixedMessageConfigKey = "cluster-prefixed-messages" + IWantConfigKey = "iwant" + IHaveConfigKey = "ihave" + GraftPruneKey = "graft-and-prune" + PublishMessagesConfigKey = "publish-messages" + InspectionQueueConfigKey = "inspection-queue" +) + +// RpcValidationInspector rpc control message validation inspector configuration. +type RpcValidationInspector struct { + ClusterPrefixedMessage ClusterPrefixedMessageInspectionParameters `mapstructure:"cluster-prefixed-messages"` + IWant IWantRpcInspectionParameters `mapstructure:"iwant"` + IHave IHaveRpcInspectionParameters `mapstructure:"ihave"` + GraftPrune GraftPruneRpcInspectionParameters `mapstructure:"graft-and-prune"` + PublishMessages PublishMessageInspectionParameters `mapstructure:"publish-messages"` + InspectionQueue InspectionQueueParameters `mapstructure:"inspection-queue"` + // InspectionProcess configuration that controls which aspects of rpc inspection are enabled and disabled during inspect message request processing. + InspectionProcess InspectionProcess `mapstructure:"process"` +} + +// InspectionProcess configuration that controls which aspects of rpc inspection are enabled and disabled during inspect message request processing. +type InspectionProcess struct { + Inspect Inspect `validate:"required" mapstructure:"inspection"` + Truncate Truncate `validate:"required" mapstructure:"truncation"` +} + +const ( + InspectionKey = "inspection" + TruncationKey = "truncation" + EnableKey = "enable" + EnabledKey = "enabled" + DisabledKey = "disabled" + MessageIDKey = "message-id" + RejectUnstakedPeers = "reject-unstaked-peers" +) + +// Inspect configuration to enable/disable RPC inspection for a particular control message type. +type Inspect struct { + // Disabled serves as a fail-safe mechanism to globally deactivate inspection logic. When this fail-safe is activated it disables all + // aspects of the inspection logic, irrespective of individual configurations like inspection.enable-graft, inspection.enable-prune, etc. + // Consequently, all metrics collection and logging related to the rpc and inspection will also be disabled. + // It is important to note that activating this fail-safe results in a comprehensive deactivation inspection features. + // Please use this setting judiciously, considering its broad impact on the behavior of control message handling. + Disabled bool `mapstructure:"disabled"` + // EnableGraft enable graft control message inspection. + EnableGraft bool `mapstructure:"enable-graft"` + // EnablePrune enable prune control message inspection. + EnablePrune bool `mapstructure:"enable-prune"` + // EnableIHave enable iHave control message inspection. + EnableIHave bool `mapstructure:"enable-ihave"` + // EnableIWant enable iWant control message inspection. + EnableIWant bool `mapstructure:"enable-iwant"` + // EnablePublish enable publish message inspection. + EnablePublish bool `mapstructure:"enable-publish"` + // RejectUnstakedPeers when set to true RPC's will be rejected from unstaked peers. + RejectUnstakedPeers bool `mapstructure:"reject-unstaked-peers"` +} + +// Truncate configuration to enable/disable RPC truncation for a particular control message type. +type Truncate struct { + // Disabled serves as a fail-safe mechanism to globally deactivate truncation logic. When this fail-safe is activated it disables all + // aspects of the truncation logic, irrespective of individual configurations like truncation.enable-graft, truncation.enable-prune, etc. + // Consequently, all metrics collection and logging related to the rpc and inspection will also be disabled. + // It is important to note that activating this fail-safe results in a comprehensive deactivation truncation features. + // Please use this setting judiciously, considering its broad impact on the behavior of control message handling. + Disabled bool `mapstructure:"disabled"` + // EnableGraft enable graft control message truncation. + EnableGraft bool `mapstructure:"enable-graft"` + // EnablePrune enable prune control message truncation. + EnablePrune bool `mapstructure:"enable-prune"` + // EnableIHave enable iHave control message truncation. + EnableIHave bool `mapstructure:"enable-ihave"` + // EnableIHaveMessageIds enable iHave message id truncation. + EnableIHaveMessageIds bool `mapstructure:"enable-ihave-message-id"` + // EnableIWant enable iWant control message truncation. + EnableIWant bool `mapstructure:"enable-iwant"` + // EnableIWantMessageIds enable iWant message id truncation. + EnableIWantMessageIds bool `mapstructure:"enable-iwant-message-id"` +} + +const ( + QueueSizeKey = "queue-size" +) + +// InspectionQueueParameters contains the "numerical values" for the control message validation inspector. +// Incoming GossipSub RPCs are queued for async inspection by a worker pool. This worker pool is configured +// by the parameters in this struct. +// Each RPC has a number of "publish messages" accompanied by control messages. +type InspectionQueueParameters struct { + // NumberOfWorkers number of worker pool workers. + NumberOfWorkers int `validate:"gte=1" mapstructure:"workers"` + // Size size of the queue used by worker pool for the control message validation inspector. + Size uint32 `validate:"gte=100" mapstructure:"queue-size"` +} + +const ( + MaxSampleSizeKey = "max-sample-size" + MessageErrorThresholdKey = "error-threshold" +) + +// PublishMessageInspectionParameters contains the "numerical values" for the publish control message inspection. +// Each RPC has a number of "publish messages" accompanied by control messages. This struct contains the limits +// for the inspection of these publish messages. +type PublishMessageInspectionParameters struct { + // MaxSampleSize is the maximum number of messages in a single RPC message that are randomly sampled for async inspection. + // When the size of a single RPC message exceeds this threshold, a random sample is taken for inspection, but the RPC message is not truncated. + MaxSampleSize int `validate:"gte=0" mapstructure:"max-sample-size"` + // ErrorThreshold the threshold at which an error will be returned if the number of invalid RPC messages exceeds this value. + ErrorThreshold int `validate:"gte=0" mapstructure:"error-threshold"` +} + +// GraftPruneRpcInspectionParameters contains the "numerical values" for the graft and prune control message inspection. +// Each RPC has a number of "publish messages" accompanied by control messages. This struct contains the limits +// for the inspection of these graft and prune control messages. +type GraftPruneRpcInspectionParameters struct { + // MessageCountThreshold is the maximum number of GRAFT or PRUNE messages in a single RPC message. + // When the total number of GRAFT or PRUNE messages in a single RPC message exceeds this threshold, + // a random sample of GRAFT or PRUNE messages will be taken and the RPC message will be truncated to this sample size. + MessageCountThreshold int `validate:"gte=0" mapstructure:"message-count-threshold"` + + // DuplicateTopicIdThreshold is the tolerance threshold for having duplicate topics in a single GRAFT or PRUNE message under inspection. + // Ideally, a GRAFT or PRUNE message should not have any duplicate topics, hence a topic ID is counted as a duplicate only if it is repeated more than once. + // When the total number of duplicate topic ids in a single GRAFT or PRUNE message exceeds this threshold, the inspection of message will fail. + DuplicateTopicIdThreshold int `validate:"gte=0" mapstructure:"duplicate-topic-id-threshold"` + + // InvalidTopicIdThreshold Maximum number of total invalid topic ids in a single GRAFT or PRUNE message, ideally this should be 0 but we allow for some tolerance + // to avoid penalizing peers that are not malicious but are misbehaving due to bugs or other issues. + InvalidTopicIdThreshold int `validate:"gte=0" mapstructure:"invalid-topic-id-threshold"` +} + +const ( + MessageCountThreshold = "message-count-threshold" + MessageIdCountThreshold = "message-id-count-threshold" + CacheMissThresholdKey = "cache-miss-threshold" + DuplicateMsgIDThresholdKey = "duplicate-message-id-threshold" + InvalidTopicIdThresholdKey = "invalid-topic-id-threshold" +) + +// IWantRpcInspectionParameters contains the "numerical values" for iwant rpc control inspection. +// Each RPC has a number of "publish messages" accompanied by control messages. This struct contains the limits +// for the inspection of the iwant control messages. +type IWantRpcInspectionParameters struct { + // MessageCountThreshold is the maximum allowed number of iWant messages in a single RPC message. + // Each iWant message represents the list of message ids. When the total number of iWant messages + // in a single RPC message exceeds this threshold, a random sample of iWant messages will be taken and the RPC message will be truncated to this sample size. + // The sample size is equal to the configured MessageCountThreshold. + MessageCountThreshold uint `validate:"gt=0" mapstructure:"message-count-threshold"` + // MessageIdCountThreshold is the maximum allowed number of message ids in a single iWant message. + // Each iWant message represents the list of message ids for a specific topic, and this parameter controls the maximum number of message ids + // that can be included in a single iWant message. When the total number of message ids in a single iWant message exceeds this threshold, + // a random sample of message ids will be taken and the iWant message will be truncated to this sample size. + // The sample size is equal to the configured MessageIdCountThreshold. + MessageIdCountThreshold int `validate:"gte=0" mapstructure:"message-id-count-threshold"` + // CacheMissThreshold is the threshold of tolerance for the total cache misses in all iWant messages in a single RPC message. + // When the total number of cache misses in all iWant messages in a single RPC message exceeds this threshold, the inspection of message will fail. + // An iWant message is considered a cache miss if it contains a message id that is not present in the local cache for iHave messages, i.e., the node + // does not have a record of an iHave message for this message id. + // When the total number of cache misses in all iWant messages in a single RPC message exceeds this threshold, the inspection of message will fail, and + // a single misbehavior notification will be reported. + CacheMissThreshold int `validate:"gt=0" mapstructure:"cache-miss-threshold"` + // DuplicateMsgIdThreshold is the maximum allowed number of duplicate message ids in a all iWant messages in a single RPC message. + // Each iWant message represents the list of message ids, and this parameter controls the maximum number of duplicate message ids + // that can be included in all iWant messages in a single RPC message. When the total number of duplicate message ids in a single iWant message exceeds this threshold, + // a single misbehavior notification will be reported, and the inspection of message will fail. + DuplicateMsgIdThreshold int `validate:"gt=0" mapstructure:"duplicate-message-id-threshold"` +} + +const ( + DuplicateTopicIdThresholdKey = "duplicate-topic-id-threshold" + DuplicateMessageIdThresholdKey = "duplicate-message-id-threshold" +) + +// IHaveRpcInspectionParameters contains the "numerical values" for ihave rpc control inspection. +// Each RPC has a number of "publish messages" accompanied by control messages. This struct contains the limits +// for the inspection of the ihave control messages. +type IHaveRpcInspectionParameters struct { + // MessageCountThreshold is the maximum allowed number of iHave messages in a single RPC message. + // Each iHave message represents the list of message ids for a specific topic. When the total number of iHave messages + // in a single RPC message exceeds this threshold, a random sample of iHave messages will be taken and the RPC message will be truncated to this sample size. + // The sample size is equal to the configured MessageCountThreshold. + MessageCountThreshold int `validate:"gte=0" mapstructure:"message-count-threshold"` + // MessageIdCountThreshold is the maximum allowed number of message ids in a single iHave message. + // Each iHave message represents the list of message ids for a specific topic, and this parameter controls the maximum number of message ids + // that can be included in a single iHave message. When the total number of message ids in a single iHave message exceeds this threshold, + // a random sample of message ids will be taken and the iHave message will be truncated to this sample size. + // The sample size is equal to the configured MessageIdCountThreshold. + MessageIdCountThreshold int `validate:"gte=0" mapstructure:"message-id-count-threshold"` + + // DuplicateTopicIdThreshold is the tolerance threshold for having duplicate topics in an iHave message under inspection. + // When the total number of duplicate topic ids in a single iHave message exceeds this threshold, the inspection of message will fail. + // Note that a topic ID is counted as a duplicate only if it is repeated more than DuplicateTopicIdThreshold times. + DuplicateTopicIdThreshold int `validate:"gte=0" mapstructure:"duplicate-topic-id-threshold"` + + // DuplicateMessageIdThreshold is the threshold of tolerance for having duplicate message IDs in a single iHave message under inspection. + // When the total number of duplicate message ids in a single iHave message exceeds this threshold, the inspection of message will fail. + // Ideally, an iHave message should not have any duplicate message IDs, hence a message id is considered duplicate when it is repeated more than once + // within the same iHave message. When the total number of duplicate message ids in a single iHave message exceeds this threshold, the inspection of message will fail. + DuplicateMessageIdThreshold int `validate:"gte=0" mapstructure:"duplicate-message-id-threshold"` + + // InvalidTopicIdThreshold Maximum number of total invalid topic ids in a single IHAVE message, ideally this should be 0 but we allow for some tolerance + // to avoid penalizing peers that are not malicious but are misbehaving due to bugs or other issues. + InvalidTopicIdThreshold int `validate:"gte=0" mapstructure:"invalid-topic-id-threshold"` +} + +const ( + HardThresholdKey = "hard-threshold" + TrackerCacheSizeKey = "tracker-cache-size" + TrackerCacheDecayKey = "tracker-cache-decay" +) + +// ClusterPrefixedMessageInspectionParameters contains the "numerical values" for cluster prefixed control message inspection. +// Each RPC has a number of "publish messages" accompanied by control messages. This struct contains the limits for the inspection +// of messages (publish messages and control messages) that belongs to cluster prefixed topics. +// Cluster-prefixed topics are topics that are prefixed with the cluster ID of the node that published the message. +type ClusterPrefixedMessageInspectionParameters struct { + // HardThreshold the upper bound on the amount of cluster prefixed control messages that will be processed + // before a node starts to get penalized. This allows LN nodes to process some cluster prefixed control messages during startup + // when the cluster ID's provider is set asynchronously. It also allows processing of some stale messages that may be sent by nodes + // that fall behind in the protocol. After the amount of cluster prefixed control messages processed exceeds this threshold the node + // will be pushed to the edge of the network mesh. + HardThreshold float64 `validate:"gte=0" mapstructure:"hard-threshold"` + // ControlMsgsReceivedCacheSize size of the cache used to track the amount of cluster prefixed topics received by peers. + ControlMsgsReceivedCacheSize uint32 `validate:"gt=0" mapstructure:"tracker-cache-size"` + // ControlMsgsReceivedCacheDecay decay val used for the geometric decay of cache counters used to keep track of cluster prefixed topics received by peers. + ControlMsgsReceivedCacheDecay float64 `validate:"gt=0" mapstructure:"tracker-cache-decay"` +} + +const ( + NumberOfWorkersKey = "workers" +) diff --git a/network/p2p/config/peer_scoring.go b/network/p2p/config/peer_scoring.go new file mode 100644 index 00000000000..45785b89b0a --- /dev/null +++ b/network/p2p/config/peer_scoring.go @@ -0,0 +1,239 @@ +package p2pconfig + +import "time" + +const ( + PeerScoringKey = "peer-scoring" + InternalKey = "internal" + ProtocolKey = "protocol" +) + +// PeerScoringParameters encapsulates the parameters of the GossipSub scoring system. +type PeerScoringParameters struct { + // Internal is the internal parameters of the GossipSub scoring system that are hosted by + // the GossipSub system, and are not exposed to the Flow protocol. + // The internal parameters are hosted by the GossipSub system. + Internal InternalGossipSubScoreParams `validate:"required" mapstructure:"internal"` + // Protocol is the protocol parameters of the peer scoring system that is hosted by the Flow protocol. + Protocol ProtocolLevelGossipSubScoreParams `validate:"required" mapstructure:"protocol"` +} + +const ( + AppSpecificScoreWeightKey = "app-specific-score-weight" + DecayToZeroKey = "decay-to-zero" + BehaviourKey = "behaviour" + TopicKey = "topic" + ThresholdsKey = "thresholds" + ThresholdKey = "threshold" +) + +type InternalGossipSubScoreParams struct { + // AppSpecificScoreWeight is the weight for app-specific scores. It is used to scale the app-specific + // scores to the same range as the other scores. At the current version, we don't distinguish between the app-specific + // scores and the other scores, so we set it to 1. + AppSpecificScoreWeight float64 `validate:"gt=0,lte=1" mapstructure:"app-specific-score-weight"` + // DecayInterval is the decay interval for the overall score of a peer at the GossipSub scoring + // system. We set it to 1 minute so that it is not too short so that a malicious node can recover from a penalty + // and is not too long so that a well-behaved node can't recover from a penalty. + DecayInterval time.Duration `validate:"gte=1m" mapstructure:"decay-interval"` + // DecayToZero is the decay to zero for the overall score of a peer at the GossipSub scoring system. + // It defines the maximum value below which a peer scoring counter is reset to zero. + // This is to prevent the counter from decaying to a very small value. + // The value is 0.01, which means that a counter will be reset to zero if it decays to 0.01. + // When a counter hits the DecayToZero threshold, it means that the peer did not exhibit the behavior + // for a long time, and we can reset the counter. + DecayToZero float64 `validate:"required" mapstructure:"decay-to-zero"` + TopicParameters TopicScoringParameters `validate:"required" mapstructure:"topic"` + Thresholds InternalScoringThresholds `validate:"required" mapstructure:"thresholds"` + Behaviour InternalScoringBehavioural `validate:"required" mapstructure:"behaviour"` +} + +const ( + MaxDebugLogsKey = "max-debug-logs" + AppSpecificKey = "application-specific" +) + +type ProtocolLevelGossipSubScoreParams struct { + MaxDebugLogs uint32 `validate:"lte=50" mapstructure:"max-debug-logs"` + AppSpecificScore ApplicationSpecificScoreParameters `validate:"required" mapstructure:"application-specific"` +} + +const ( + MaxAppSpecificKey = "max-app-specific" + MinAppSpecificKey = "min-app-specific" + UnknownIdentityKey = "unknown-identity" + InvalidSubscriptionKey = "invalid-subscription" + StakedIdentityKey = "staked-identity" + DuplicateMessageKey = "duplicate-message" + RewardKey = "reward" + PenaltyKey = "penalty" +) + +type ApplicationSpecificScoreParameters struct { + // MaxAppSpecificPenalty the maximum penalty for sever offenses that we apply to a remote node score. The score + // mechanism of GossipSub in Flow is designed in a way that all other infractions are penalized with a fraction of + // this value. We have also set the other parameters such as DefaultGraylistThreshold, DefaultGossipThreshold and DefaultPublishThreshold to + // be a bit higher than this, i.e., MaxAppSpecificPenalty + 1. This ensures that a node with a score of MaxAppSpecificPenalty + // will be graylisted (i.e., all incoming and outgoing RPCs are rejected) and will not be able to publish or gossip any messages. + MaxAppSpecificPenalty float64 `validate:"lt=0" mapstructure:"max-app-specific-penalty"` + // MinAppSpecificPenalty the minimum penalty for sever offenses that we apply to a remote node score. + MinAppSpecificPenalty float64 `validate:"lt=0" mapstructure:"min-app-specific-penalty"` + // UnknownIdentityPenalty is the penalty for unknown identity. It is applied to the peer's score when + // the peer is not in the identity list. + UnknownIdentityPenalty float64 `validate:"lt=0" mapstructure:"unknown-identity-penalty"` + // InvalidSubscriptionPenalty is the penalty for invalid subscription. It is applied to the peer's score when + // the peer subscribes to a topic that it is not authorized to subscribe to. + InvalidSubscriptionPenalty float64 `validate:"lt=0" mapstructure:"invalid-subscription-penalty"` + // DuplicateMessagePenalty is the penalty for duplicate messages detected by the gossipsub tracer for a peer. + // The penalty is multiplied by the current duplicate message count for a peer before it is applied to the application specific score. + DuplicateMessagePenalty float64 `validate:"lt=0" mapstructure:"duplicate-message-penalty"` + // DuplicateMessageThreshold the threshold at which the duplicate message count for a peer will result in the peer being penalized. + DuplicateMessageThreshold float64 `validate:"gt=0" mapstructure:"duplicate-message-threshold"` + // MaxAppSpecificReward is the reward for well-behaving staked peers. If a peer does not have + // any misbehavior record, e.g., invalid subscription, invalid message, etc., it will be rewarded with this score. + MaxAppSpecificReward float64 `validate:"gt=0" mapstructure:"max-app-specific-reward"` + // StakedIdentityReward is the reward for staking peers. It is applied to the peer's score when + // the peer does not have any misbehavior record, e.g., invalid subscription, invalid message, etc. + // The purpose is to reward the staking peers for their contribution to the network and prioritize them in neighbor selection. + StakedIdentityReward float64 `validate:"gt=0" mapstructure:"staked-identity-reward"` +} + +const ( + GossipThresholdKey = "gossip" + GraylistThresholdKey = "graylist" + AcceptPXThresholdKey = "accept-px" + OpportunisticGraftThresholdKey = "opportunistic-graft" +) + +// InternalScoringThresholds score option threshold configuration parameters. +type InternalScoringThresholds struct { + // Gossip when a peer's penalty drops below this threshold, + // no gossip is emitted towards that peer and gossip from that peer is ignored. + Gossip float64 `validate:"lt=0" mapstructure:"gossip"` + // Publish when a peer's penalty drops below this threshold, + // self-published messages are not propagated towards this peer. + Publish float64 `validate:"lt=0" mapstructure:"publish"` + // Graylist when a peer's penalty drops below this threshold, the peer is graylisted, i.e., + // incoming RPCs from the peer are ignored. + Graylist float64 `validate:"lt=0" mapstructure:"graylist"` + // AcceptPX when a peer sends us PX information with a prune, we only accept it and connect to the supplied + // peers if the originating peer's penalty exceeds this threshold. + AcceptPX float64 `validate:"gt=0" mapstructure:"accept-px"` + // OpportunisticGraft when the median peer penalty in the mesh drops below this value, + // the peer may select more peers with penalty above the median to opportunistically graft on the mesh. + OpportunisticGraft float64 `validate:"gt=0" mapstructure:"opportunistic-graft"` +} + +const ( + BehaviourPenaltyThresholdKey = "penalty-threshold" + BehaviourPenaltyWeightKey = "penalty-weight" + BehaviourPenaltyDecayKey = "penalty-decay" +) + +// InternalScoringBehavioural score option behaviour configuration parameters. +type InternalScoringBehavioural struct { + // PenaltyThreshold is the threshold when the behavior of a peer is considered as bad by GossipSub. + // Currently, the misbehavior is defined as advertising an iHave without responding to the iWants (iHave broken promises), as well as attempting + // on GRAFT when the peer is considered for a PRUNE backoff, i.e., the local peer does not allow the peer to join the local topic mesh + // for a while, and the remote peer keep attempting on GRAFT (aka GRAFT flood). + // When the misbehavior counter of a peer goes beyond this threshold, the peer is penalized by BehaviorPenaltyWeight (see below) for the excess misbehavior. + // + // An iHave broken promise means that a peer advertises an iHave for a message, but does not respond to the iWant requests for that message. + // For iHave broken promises, the gossipsub scoring works as follows: + // It samples ONLY A SINGLE iHave out of the entire RPC. + // If that iHave is not followed by an actual message within the next 3 seconds, the peer misbehavior counter is incremented by 1. + // + // The counter is also decayed by (0.99) every decay interval (DecayInterval) i.e., every minute. + // Note that misbehaviors are counted by GossipSub across all topics (and is different from the Application Layer Misbehaviors that we count through + // the ALSP system). + PenaltyThreshold float64 `validate:"gt=0" mapstructure:"penalty-threshold"` + // PenaltyWeight is the weight for applying penalty when a peer misbehavior goes beyond the threshold. + // Misbehavior of a peer at gossipsub layer is defined as advertising an iHave without responding to the iWants (broken promises), as well as attempting + // on GRAFT when the peer is considered for a PRUNE backoff, i.e., the local peer does not allow the peer to join the local topic mesh + // This is detected by the GossipSub scoring system, and the peer is penalized by BehaviorPenaltyWeight. + // + // An iHave broken promise means that a peer advertises an iHave for a message, but does not respond to the iWant requests for that message. + // For iHave broken promises, the gossipsub scoring works as follows: + // It samples ONLY A SINGLE iHave out of the entire RPC. + // If that iHave is not followed by an actual message within the next 3 seconds, the peer misbehavior counter is incremented by 1. + PenaltyWeight float64 `validate:"lt=0" mapstructure:"penalty-weight"` + // PenaltyDecay is the decay interval for the misbehavior counter of a peer. The misbehavior counter is + // incremented by GossipSub for iHave broken promises or the GRAFT flooding attacks (i.e., each GRAFT received from a remote peer while that peer is on a PRUNE backoff). + // + // An iHave broken promise means that a peer advertises an iHave for a message, but does not respond to the iWant requests for that message. + // For iHave broken promises, the gossipsub scoring works as follows: + // It samples ONLY A SINGLE iHave out of the entire RPC. + // If that iHave is not followed by an actual message within the next 3 seconds, the peer misbehavior counter is incremented by 1. + // This means that regardless of how many iHave broken promises an RPC contains, the misbehavior counter is incremented by 1. + // That is why we decay the misbehavior counter very slow, as this counter indicates a severe misbehavior. + // The misbehavior counter is decayed per decay interval (i.e., DecayInterval = 1 minute) by GossipSub. + // + // Note that misbehaviors are counted by GossipSub across all topics (and is different from the Application Layer Misbehaviors that we count through + // the ALSP system that is based on the engines report). + PenaltyDecay float64 `validate:"gt=0,lt=1" mapstructure:"penalty-decay"` +} + +const ( + SkipAtomicValidationKey = "skip-atomic-validation" + InvalidMessageDeliveriesWeightKey = "invalid-message-deliveries-weight" + InvalidMessageDeliveriesDecayKey = "invalid-message-deliveries-decay" + TimeInMeshQuantumKey = "time-in-mesh-quantum" + TopicWeightKey = "topic-weight" + MeshMessageDeliveriesDecayKey = "mesh-message-deliveries-decay" + MeshMessageDeliveriesCapKey = "mesh-message-deliveries-cap" + MeshMessageDeliveryThresholdKey = "mesh-message-deliveries-threshold" + MeshDeliveriesWeightKey = "mesh-deliveries-weight" + MeshMessageDeliveriesWindowKey = "mesh-message-deliveries-window" + MeshMessageDeliveryActivationKey = "mesh-message-delivery-activation" +) + +// TopicScoringParameters score option topic validation configuration parameters. +type TopicScoringParameters struct { + // SkipAtomicValidation is the value for the skip atomic validation flag for topics. + // If set it to true, the gossipsub parameter validation will not fail if we leave some of the + // topic parameters at their values, i.e., zero. + SkipAtomicValidation bool `validate:"required" mapstructure:"skip-atomic-validation"` + // InvalidMessageDeliveriesWeight this value is applied to the square of the number of invalid message deliveries on a topic. + // It is used to penalize peers that send invalid messages. By an invalid message, we mean a message that is not signed by the + // publisher, or a message that is not signed by the peer that sent it. + InvalidMessageDeliveriesWeight float64 `validate:"lt=0" mapstructure:"invalid-message-deliveries-weight"` + // InvalidMessageDeliveriesDecay decay factor used to decay the number of invalid message deliveries. + // The total number of invalid message deliveries is multiplied by this factor at each heartbeat interval to + // decay the number of invalid message deliveries, and prevent the peer from being disconnected if it stops + // sending invalid messages. + InvalidMessageDeliveriesDecay float64 `validate:"gt=0,lt=1" mapstructure:"invalid-message-deliveries-decay"` + // TimeInMeshQuantum is the time in mesh quantum for the GossipSub scoring system. It is used to gauge + // a discrete time interval for the time in mesh counter. + TimeInMeshQuantum time.Duration `validate:"gte=1h" mapstructure:"time-in-mesh-quantum"` + // Weight is the weight of a topic in the GossipSub scoring system. + // The overall score of a peer in a topic mesh is multiplied by the weight of the topic when calculating the overall score of the peer. + TopicWeight float64 `validate:"gt=0" mapstructure:"topic-weight"` + // MeshMessageDeliveriesDecay is applied to the number of actual message deliveries in a topic mesh + // at each decay interval (i.e., DecayInterval). + // It is used to decay the number of actual message deliveries, and prevents past message + // deliveries from affecting the current score of the peer. + MeshMessageDeliveriesDecay float64 `validate:"gt=0" mapstructure:"mesh-message-deliveries-decay"` + // MeshMessageDeliveriesCap is the maximum number of actual message deliveries in a topic + // mesh that is used to calculate the score of a peer in that topic mesh. + MeshMessageDeliveriesCap float64 `validate:"gt=0" mapstructure:"mesh-message-deliveries-cap"` + // MeshMessageDeliveryThreshold is the threshold for the number of actual message deliveries in a + // topic mesh that is used to calculate the score of a peer in that topic mesh. + // If the number of actual message deliveries in a topic mesh is less than this value, + // the peer will be penalized by square of the difference between the actual message deliveries and the threshold, + // i.e., -w * (actual - threshold)^2 where `actual` and `threshold` are the actual message deliveries and the + // threshold, respectively, and `w` is the weight (i.e., MeshMessageDeliveriesWeight). + MeshMessageDeliveryThreshold float64 `validate:"gt=0" mapstructure:"mesh-message-deliveries-threshold"` + // MeshDeliveriesWeight is the weight for applying penalty when a peer is under-performing in a topic mesh. + // Upon every decay interval, if the number of actual message deliveries is less than the topic mesh message deliveries threshold + // (i.e., MeshMessageDeliveriesThreshold), the peer will be penalized by square of the difference between the actual + // message deliveries and the threshold, multiplied by this weight, i.e., -w * (actual - threshold)^2 where w is the weight, and + // `actual` and `threshold` are the actual message deliveries and the threshold, respectively. + MeshDeliveriesWeight float64 `validate:"lt=0" mapstructure:"mesh-deliveries-weight"` + // MeshMessageDeliveriesWindow is the window size is time interval that we count a delivery of an already + // seen message towards the score of a peer in a topic mesh. The delivery is counted + // by GossipSub only if the previous sender of the message is different from the current sender. + MeshMessageDeliveriesWindow time.Duration `validate:"gte=1m" mapstructure:"mesh-message-deliveries-window"` + // MeshMessageDeliveryActivation is the time interval that we wait for a new peer that joins a topic mesh + // till start counting the number of actual message deliveries of that peer in that topic mesh. + MeshMessageDeliveryActivation time.Duration `validate:"gte=2m" mapstructure:"mesh-message-delivery-activation"` +} diff --git a/network/p2p/config/score_registry.go b/network/p2p/config/score_registry.go new file mode 100644 index 00000000000..ef35dc8bf77 --- /dev/null +++ b/network/p2p/config/score_registry.go @@ -0,0 +1,107 @@ +package p2pconfig + +import "time" + +const ( + SpamRecordCacheKey = "spam-record-cache" + ScoringRegistryKey = "scoring-registry" + AppSpecificScoreRegistryKey = "app-specific-score" + StartupSilenceDurationKey = "startup-silence-duration" +) + +type ScoringRegistryParameters struct { + // StartupSilenceDuration defines the duration of time, after the node startup, + // during which the scoring registry remains inactive before penalizing nodes. + // Throughout this startup silence period, the application-specific penalty + // for all nodes will be set to 0, and any invalid control message notifications + // will be ignored. + // + // This configuration allows nodes to stabilize and initialize before + // applying penalties or responding processing invalid control message notifications. + StartupSilenceDuration time.Duration `validate:"gt=10m" mapstructure:"startup-silence-duration"` + AppSpecificScore AppSpecificScoreParameters `validate:"required" mapstructure:"app-specific-score"` + SpamRecordCache SpamRecordCacheParameters `validate:"required" mapstructure:"spam-record-cache"` + MisbehaviourPenalties MisbehaviourPenalties `validate:"required" mapstructure:"misbehaviour-penalties"` +} + +const ( + ScoreUpdateWorkerNumKey = "score-update-worker-num" + ScoreUpdateRequestQueueSizeKey = "score-update-request-queue-size" + ScoreTTLKey = "score-ttl" + InvalidControlMessageNotificationQueueSizeKey = "invalid-control-message-notification-queue-size" +) + +// AppSpecificScoreParameters is the parameters for the GossipSubAppSpecificScoreRegistry. +// Parameters are "numerical values" that are used to compute or build components that compute or maintain the application specific score of peers. +type AppSpecificScoreParameters struct { + // ScoreUpdateWorkerNum is the number of workers in the worker pool for handling the application specific score update of peers in a non-blocking way. + ScoreUpdateWorkerNum int `validate:"gt=0" mapstructure:"score-update-worker-num"` + + // ScoreUpdateRequestQueueSize is the size of the worker pool for handling the application specific score update of peers in a non-blocking way. + ScoreUpdateRequestQueueSize uint32 `validate:"gt=0" mapstructure:"score-update-request-queue-size"` + + // InvalidControlMessageNotificationQueueSize is the size of the queue for handling invalid control message notifications in a non-blocking way. + InvalidControlMessageNotificationQueueSize uint32 `validate:"gt=0" mapstructure:"invalid-control-message-notification-queue-size"` + + // ScoreTTL is the time to live of the application specific score of a peer; the registry keeps a cached copy of the + // application specific score of a peer for this duration. When the duration expires, the application specific score + // of the peer is updated asynchronously. As long as the update is in progress, the cached copy of the application + // specific score of the peer is used even if it is expired. + ScoreTTL time.Duration `validate:"required" mapstructure:"score-ttl"` +} + +const ( + DecayKey = "decay" +) + +type SpamRecordCacheParameters struct { + // CacheSize is size of the cache used to store the spam records of peers. + // The spam records are used to penalize peers that send invalid messages. + CacheSize uint32 `validate:"gt=0" mapstructure:"cache-size"` + Decay SpamRecordCacheDecay `validate:"required" mapstructure:"decay"` +} + +const ( + PenaltyDecaySlowdownThresholdKey = "penalty-decay-slowdown-threshold" + DecayRateReductionFactorKey = "penalty-decay-rate-reduction-factor" + PenaltyDecayEvaluationPeriodKey = "penalty-decay-evaluation-period" + MinimumSpamPenaltyDecayFactorKey = "minimum-spam-penalty-decay-factor" + MaximumSpamPenaltyDecayFactorKey = "maximum-spam-penalty-decay-factor" + SkipDecayThresholdKey = "skip-decay-threshold" +) + +type SpamRecordCacheDecay struct { + // PenaltyDecaySlowdownThreshold defines the penalty level which the decay rate is reduced by `DecayRateReductionFactor` every time the penalty of a node falls below the threshold, thereby slowing down the decay process. + // This mechanism ensures that malicious nodes experience longer decay periods, while honest nodes benefit from quicker decay. + PenaltyDecaySlowdownThreshold float64 `validate:"lt=0" mapstructure:"penalty-decay-slowdown-threshold"` + + // DecayRateReductionFactor defines the value by which the decay rate is decreased every time the penalty is below the PenaltyDecaySlowdownThreshold. A reduced decay rate extends the time it takes for penalties to diminish. + DecayRateReductionFactor float64 `validate:"gt=0,lt=1" mapstructure:"penalty-decay-rate-reduction-factor"` + + // PenaltyDecayEvaluationPeriod defines the interval at which the decay for a spam record is okay to be adjusted. + PenaltyDecayEvaluationPeriod time.Duration `validate:"gt=0" mapstructure:"penalty-decay-evaluation-period"` + + SkipDecayThreshold float64 `validate:"gt=-1,lt=0" mapstructure:"skip-decay-threshold"` + + MinimumSpamPenaltyDecayFactor float64 `validate:"gt=0,lte=1" mapstructure:"minimum-spam-penalty-decay-factor"` + MaximumSpamPenaltyDecayFactor float64 `validate:"gt=0,lte=1" mapstructure:"maximum-spam-penalty-decay-factor"` +} + +const ( + MisbehaviourPenaltiesKey = "misbehaviour-penalties" + GraftKey = "graft" + PruneKey = "prune" + IWantKey = "iwant" + IHaveKey = "ihave" + PublishKey = "publish" + ClusterPrefixedReductionFactorKey = "cluster-prefixed-reduction-factor" +) + +type MisbehaviourPenalties struct { + GraftMisbehaviour float64 `validate:"lt=0" mapstructure:"graft"` + PruneMisbehaviour float64 `validate:"lt=0" mapstructure:"prune"` + IHaveMisbehaviour float64 `validate:"lt=0" mapstructure:"ihave"` + IWantMisbehaviour float64 `validate:"lt=0" mapstructure:"iwant"` + PublishMisbehaviour float64 `validate:"lt=0" mapstructure:"publish"` + ClusterPrefixedReductionFactor float64 `validate:"gt=0,lte=1" mapstructure:"cluster-prefixed-reduction-factor"` +} diff --git a/network/p2p/connection/connManager.go b/network/p2p/connection/connManager.go index 9483da30d75..094c5ffa833 100644 --- a/network/p2p/connection/connManager.go +++ b/network/p2p/connection/connManager.go @@ -3,7 +3,6 @@ package connection import ( "context" "fmt" - "time" "github.com/libp2p/go-libp2p/core/connmgr" "github.com/libp2p/go-libp2p/core/network" @@ -12,35 +11,10 @@ import ( "github.com/rs/zerolog" "github.com/onflow/flow-go/module" + "github.com/onflow/flow-go/network/netconf" "github.com/onflow/flow-go/network/p2p/connection/internal" ) -const ( - // defaultHighWatermark is the default value for the high watermark (i.e., max number of connections). - // We assume a complete topology graph with maximum of 500 nodes. - defaultHighWatermark = 500 - - // defaultLowWatermark is the default value for the low watermark (i.e., min number of connections). - // We assume a complete topology graph with minimum of 450 nodes. - defaultLowWatermark = 450 - - // defaultGracePeriod is the default value for the grace period (i.e., time to wait before pruning a new connection). - defaultGracePeriod = 1 * time.Minute - - // defaultSilencePeriod is the default value for the silence period (i.e., time to wait before start pruning connections). - defaultSilencePeriod = 10 * time.Second -) - -// DefaultConnManagerConfig returns the default configuration for the connection manager. -func DefaultConnManagerConfig() *ManagerConfig { - return &ManagerConfig{ - HighWatermark: defaultHighWatermark, - LowWatermark: defaultLowWatermark, - GracePeriod: defaultGracePeriod, - SilencePeriod: defaultSilencePeriod, - } -} - // ConnManager provides an implementation of Libp2p's ConnManager interface (https://pkg.go.dev/github.com/libp2p/go-libp2p/core/connmgr#ConnManager) // It is called back by libp2p when certain events occur such as opening/closing a stream, opening/closing connection etc. // Current implementation primarily acts as a wrapper around libp2p's BasicConnMgr (https://pkg.go.dev/github.com/libp2p/go-libp2p/p2p/net/connmgr#BasicConnMgr). @@ -53,33 +27,11 @@ type ConnManager struct { var _ connmgr.ConnManager = (*ConnManager)(nil) -type ManagerConfig struct { - // HighWatermark and LowWatermark govern the number of connections are maintained by the ConnManager. - // When the peer count exceeds the HighWatermark, as many peers will be pruned (and - // their connections terminated) until LowWatermark peers remain. In other words, whenever the - // peer count is x > HighWatermark, the ConnManager will prune x - LowWatermark peers. - // The pruning algorithm is as follows: - // 1. The ConnManager will not prune any peers that have been connected for less than GracePeriod. - // 2. The ConnManager will not prune any peers that are protected. - // 3. The ConnManager will sort the peers based on their number of streams and direction of connections, and - // prunes the peers with the least number of streams. If there are ties, the peer with the incoming connection - // will be pruned. If both peers have incoming connections, and there are still ties, one of the peers will be - // pruned at random. - // Algorithm implementation is in https://github.com/libp2p/go-libp2p/blob/master/p2p/net/connmgr/connmgr.go#L262-L318 - HighWatermark int // naming from libp2p - LowWatermark int // naming from libp2p - - // SilencePeriod is the time to wait before start pruning connections. - SilencePeriod time.Duration // naming from libp2p - // GracePeriod is the time to wait before pruning a new connection. - GracePeriod time.Duration // naming from libp2p -} - // NewConnManager creates a new connection manager. // It errors if creating the basic connection manager of libp2p fails. // The error is not benign, and we should crash the node if it happens. // It is a malpractice to start the node without connection manager. -func NewConnManager(logger zerolog.Logger, metric module.LibP2PConnectionMetrics, cfg *ManagerConfig) (*ConnManager, error) { +func NewConnManager(logger zerolog.Logger, metric module.LibP2PConnectionMetrics, cfg *netconf.ConnectionManager) (*ConnManager, error) { basic, err := libp2pconnmgr.NewConnManager( cfg.LowWatermark, cfg.HighWatermark, @@ -146,3 +98,7 @@ func (cm *ConnManager) TrimOpenConns(ctx context.Context) { func (cm *ConnManager) Close() error { return cm.basicConnMgr.Close() } + +func (cm *ConnManager) CheckLimit(l connmgr.GetConnLimiter) error { + return cm.basicConnMgr.CheckLimit(l) +} diff --git a/network/p2p/connection/connManager_test.go b/network/p2p/connection/connManager_test.go index 33808381de0..6b74e9c097c 100644 --- a/network/p2p/connection/connManager_test.go +++ b/network/p2p/connection/connManager_test.go @@ -11,13 +11,15 @@ import ( "github.com/rs/zerolog" "github.com/stretchr/testify/require" + "github.com/onflow/flow-go/config" + "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/module/irrecoverable" + "github.com/onflow/flow-go/module/metrics" "github.com/onflow/flow-go/network/internal/p2pfixtures" + "github.com/onflow/flow-go/network/netconf" "github.com/onflow/flow-go/network/p2p/connection" p2ptest "github.com/onflow/flow-go/network/p2p/test" "github.com/onflow/flow-go/network/p2p/utils" - - "github.com/onflow/flow-go/module/metrics" "github.com/onflow/flow-go/utils/unittest" ) @@ -54,8 +56,10 @@ var isNotProtected = fun{ func TestConnectionManagerProtection(t *testing.T) { log := zerolog.New(os.Stderr).Level(zerolog.ErrorLevel) + flowConfig, err := config.DefaultConfig() + require.NoError(t, err) noopMetrics := metrics.NewNoopCollector() - connManager, err := connection.NewConnManager(log, noopMetrics, connection.DefaultConnManagerConfig()) + connManager, err := connection.NewConnManager(log, noopMetrics, &flowConfig.NetworkConfig.ConnectionManager) require.NoError(t, err) testCases := [][]fun{ @@ -89,7 +93,7 @@ func testSequence(t *testing.T, sequence []fun, connMgr *connection.ConnManager) func generatePeerInfo(t *testing.T) peer.ID { key := p2pfixtures.NetworkingKeyFixtures(t) identity := unittest.IdentityFixture(unittest.WithNetworkingKey(key.PublicKey()), unittest.WithAddress("1.1.1.1:0")) - pInfo, err := utils.PeerAddressInfo(*identity) + pInfo, err := utils.PeerAddressInfo(identity.IdentitySkeleton) require.NoError(t, err) return pInfo.ID } @@ -102,7 +106,7 @@ func TestConnectionManager_Watermarking(t *testing.T) { signalerCtx := irrecoverable.NewMockSignalerContext(t, ctx) defer cancel() - cfg := &connection.ManagerConfig{ + cfg := &netconf.ConnectionManager{ HighWatermark: 4, // whenever the number of connections exceeds 4, connection manager prune connections. LowWatermark: 2, // connection manager prune connections until the number of connections is 2. GracePeriod: 500 * time.Millisecond, // extra connections will be pruned if they are older than a second (just for testing). @@ -113,23 +117,25 @@ func TestConnectionManager_Watermarking(t *testing.T) { metrics.NewNoopCollector(), cfg) require.NoError(t, err) - - thisNode, _ := p2ptest.NodeFixture( + idProvider := unittest.NewUpdatableIDProvider(flow.IdentityList{}) + thisNode, identity := p2ptest.NodeFixture( t, sporkId, t.Name(), + idProvider, p2ptest.WithConnectionManager(thisConnMgr)) + idProvider.SetIdentities(flow.IdentityList{&identity}) - otherNodes, _ := p2ptest.NodesFixture(t, sporkId, t.Name(), 5) + otherNodes, _ := p2ptest.NodesFixture(t, sporkId, t.Name(), 5, idProvider) nodes := append(otherNodes, thisNode) - p2ptest.StartNodes(t, signalerCtx, nodes, 100*time.Millisecond) - defer p2ptest.StopNodes(t, nodes, cancel, 100*time.Millisecond) + p2ptest.StartNodes(t, signalerCtx, nodes) + defer p2ptest.StopNodes(t, nodes, cancel) // connect this node to all other nodes. for _, otherNode := range otherNodes { - require.NoError(t, thisNode.Host().Connect(ctx, otherNode.Host().Peerstore().PeerInfo(otherNode.Host().ID()))) + require.NoError(t, thisNode.Host().Connect(ctx, otherNode.Host().Peerstore().PeerInfo(otherNode.ID()))) } // ensures this node is connected to all other nodes (based on the number of connections). @@ -147,8 +153,8 @@ func TestConnectionManager_Watermarking(t *testing.T) { // connects this node to one of the other nodes that is pruned by connection manager. for _, otherNode := range otherNodes { - if len(thisNode.Host().Network().ConnsToPeer(otherNode.Host().ID())) == 0 { - require.NoError(t, thisNode.Host().Connect(ctx, otherNode.Host().Peerstore().PeerInfo(otherNode.Host().ID()))) + if len(thisNode.Host().Network().ConnsToPeer(otherNode.ID())) == 0 { + require.NoError(t, thisNode.Host().Connect(ctx, otherNode.Host().Peerstore().PeerInfo(otherNode.ID()))) break // we only need to connect to one node. } } diff --git a/network/p2p/connection/connection_gater.go b/network/p2p/connection/connection_gater.go index 2ee0df16331..140f92cb87b 100644 --- a/network/p2p/connection/connection_gater.go +++ b/network/p2p/connection/connection_gater.go @@ -1,9 +1,9 @@ package connection import ( + "fmt" "sync" - "github.com/libp2p/go-libp2p/core/connmgr" "github.com/libp2p/go-libp2p/core/control" "github.com/libp2p/go-libp2p/core/network" "github.com/libp2p/go-libp2p/core/peer" @@ -12,10 +12,11 @@ import ( "github.com/onflow/flow-go/module" "github.com/onflow/flow-go/network/p2p" + p2plogging "github.com/onflow/flow-go/network/p2p/logging" "github.com/onflow/flow-go/utils/logging" ) -var _ connmgr.ConnectionGater = (*ConnGater)(nil) +var _ p2p.ConnectionGater = (*ConnGater)(nil) // ConnGaterOption allow the connection gater to be configured with a list of PeerFilter funcs for a specific conn gater callback. // In the current implementation of the ConnGater the following callbacks can be configured with peer filters. @@ -44,6 +45,11 @@ type ConnGater struct { onInterceptPeerDialFilters []p2p.PeerFilter onInterceptSecuredFilters []p2p.PeerFilter + // disallowListOracle is consulted upon every incoming or outgoing connection attempt, and the connection is only + // allowed if the remote peer is not on the disallow list. + // A ConnGater must have a disallowListOracle set, and if one is not set the ConnGater will panic. + disallowListOracle p2p.DisallowListOracle + // identityProvider provides the identity of a node given its peer ID for logging purposes only. // It is not used for allowlisting or filtering. We use the onInterceptPeerDialFilters and onInterceptSecuredFilters // to determine if a node should be allowed to connect. @@ -66,7 +72,15 @@ func NewConnGater(log zerolog.Logger, identityProvider module.IdentityProvider, // InterceptPeerDial - a callback which allows or disallows outbound connection func (c *ConnGater) InterceptPeerDial(p peer.ID) bool { - lg := c.log.With().Str("peer_id", p.String()).Logger() + lg := c.log.With().Str("peer_id", p2plogging.PeerId(p)).Logger() + + disallowListCauses, disallowListed := c.disallowListOracle.IsDisallowListed(p) + if disallowListed { + lg.Warn(). + Str("disallow_list_causes", fmt.Sprintf("%v", disallowListCauses)). + Msg("outbound connection attempt to disallow listed peer is rejected") + return false + } if len(c.onInterceptPeerDialFilters) == 0 { lg.Warn(). @@ -95,7 +109,7 @@ func (c *ConnGater) InterceptPeerDial(p peer.ID) bool { return false } - lg.Info().Msg("outbound connection established") + lg.Debug().Msg("outbound connection established") return true } @@ -115,10 +129,18 @@ func (c *ConnGater) InterceptSecured(dir network.Direction, p peer.ID, addr netw switch dir { case network.DirInbound: lg := c.log.With(). - Str("peer_id", p.String()). + Str("peer_id", p2plogging.PeerId(p)). Str("remote_address", addr.RemoteMultiaddr().String()). Logger() + disallowListCauses, disallowListed := c.disallowListOracle.IsDisallowListed(p) + if disallowListed { + lg.Warn(). + Str("disallow_list_causes", fmt.Sprintf("%v", disallowListCauses)). + Msg("inbound connection attempt to disallow listed peer is rejected") + return false + } + if len(c.onInterceptSecuredFilters) == 0 { lg.Warn().Msg("inbound connection established with no intercept secured filters") return true @@ -147,7 +169,7 @@ func (c *ConnGater) InterceptSecured(dir network.Direction, p peer.ID, addr netw return false } - lg.Info().Msg("inbound connection established") + lg.Debug().Msg("inbound connection established") return true default: // outbound connection should have been already blocked before this call @@ -169,3 +191,28 @@ func (c *ConnGater) peerIDPassesAllFilters(p peer.ID, filters []p2p.PeerFilter) return nil } + +// SetDisallowListOracle sets the disallow list oracle for the connection gater. +// If one is set, the oracle is consulted upon every incoming or outgoing connection attempt, and +// the connection is only allowed if the remote peer is not on the disallow list. +// In Flow blockchain, it is not optional to dismiss the disallow list oracle, and if one is not set +// the connection gater will panic. +// Also, it follows a dependency injection pattern and does not allow to set the disallow list oracle more than once, +// any subsequent calls to this method will result in a panic. +// Args: +// +// oracle: the disallow list oracle to set. +// +// Returns: +// +// none +// +// Panics: +// +// if the disallow list oracle is already set. +func (c *ConnGater) SetDisallowListOracle(oracle p2p.DisallowListOracle) { + if c.disallowListOracle != nil { + panic("disallow list oracle already set") + } + c.disallowListOracle = oracle +} diff --git a/network/p2p/connection/connection_gater_test.go b/network/p2p/connection/connection_gater_test.go index 88868624042..beb7cc218ac 100644 --- a/network/p2p/connection/connection_gater_test.go +++ b/network/p2p/connection/connection_gater_test.go @@ -13,15 +13,19 @@ import ( "github.com/stretchr/testify/require" "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/model/messages" "github.com/onflow/flow-go/module/irrecoverable" mockmodule "github.com/onflow/flow-go/module/mock" "github.com/onflow/flow-go/network/channels" "github.com/onflow/flow-go/network/internal/p2pfixtures" - "github.com/onflow/flow-go/network/internal/testutils" "github.com/onflow/flow-go/network/p2p" + p2pbuilderconfig "github.com/onflow/flow-go/network/p2p/builder/config" + "github.com/onflow/flow-go/network/p2p/connection" + p2plogging "github.com/onflow/flow-go/network/p2p/logging" mockp2p "github.com/onflow/flow-go/network/p2p/mock" p2ptest "github.com/onflow/flow-go/network/p2p/test" "github.com/onflow/flow-go/network/p2p/unicast/stream" + "github.com/onflow/flow-go/utils/concurrentmap" "github.com/onflow/flow-go/utils/unittest" ) @@ -33,36 +37,38 @@ func TestConnectionGating(t *testing.T) { sporkID := unittest.IdentifierFixture() idProvider := mockmodule.NewIdentityProvider(t) // create 2 nodes - node1Peers := unittest.NewProtectedMap[peer.ID, struct{}]() + node1Peers := concurrentmap.New[peer.ID, struct{}]() node1, node1Id := p2ptest.NodeFixture( t, sporkID, t.Name(), - p2ptest.WithConnectionGater(testutils.NewConnectionGater(idProvider, func(p peer.ID) error { + idProvider, + p2ptest.WithConnectionGater(p2ptest.NewConnectionGater(idProvider, func(p peer.ID) error { if !node1Peers.Has(p) { - return fmt.Errorf("id not found: %s", p.String()) + return fmt.Errorf("id not found: %s", p2plogging.PeerId(p)) } return nil }))) - idProvider.On("ByPeerID", node1.Host().ID()).Return(&node1Id, true).Maybe() + idProvider.On("ByPeerID", node1.ID()).Return(&node1Id, true).Maybe() - node2Peers := unittest.NewProtectedMap[peer.ID, struct{}]() + node2Peers := concurrentmap.New[peer.ID, struct{}]() node2, node2Id := p2ptest.NodeFixture( t, sporkID, t.Name(), - p2ptest.WithConnectionGater(testutils.NewConnectionGater(idProvider, func(p peer.ID) error { + idProvider, + p2ptest.WithConnectionGater(p2ptest.NewConnectionGater(idProvider, func(p peer.ID) error { if !node2Peers.Has(p) { - return fmt.Errorf("id not found: %s", p.String()) + return fmt.Errorf("id not found: %s", p2plogging.PeerId(p)) } return nil }))) - idProvider.On("ByPeerID", node2.Host().ID()).Return(&node2Id, true).Maybe() + idProvider.On("ByPeerID", node2.ID()).Return(&node2Id, true).Maybe() nodes := []p2p.LibP2PNode{node1, node2} ids := flow.IdentityList{&node1Id, &node2Id} - p2ptest.StartNodes(t, signalerCtx, nodes, 100*time.Millisecond) - defer p2ptest.StopNodes(t, nodes, cancel, 100*time.Millisecond) + p2ptest.StartNodes(t, signalerCtx, nodes) + defer p2ptest.StopNodes(t, nodes, cancel) p2pfixtures.AddNodesToEachOthersPeerStore(t, nodes, ids) @@ -70,7 +76,7 @@ func TestConnectionGating(t *testing.T) { // although nodes have each other addresses, they are not in the allow-lists of each other. // so they should not be able to connect to each other. p2pfixtures.EnsureNoStreamCreationBetweenGroups(t, ctx, []p2p.LibP2PNode{node1}, []p2p.LibP2PNode{node2}, func(t *testing.T, err error) { - require.True(t, stream.IsErrGaterDisallowedConnection(err)) + require.Truef(t, stream.IsErrGaterDisallowedConnection(err), "expected ErrGaterDisallowedConnection, got: %v", err) }) }) @@ -80,12 +86,12 @@ func TestConnectionGating(t *testing.T) { // the connection gater on the listening node is checking the allow-list upon accepting the connection. // add node2 to node1's allow list, but not the other way around. - node1Peers.Add(node2.Host().ID(), struct{}{}) + node1Peers.Add(node2.ID(), struct{}{}) // from node2 -> node1 should also NOT work, since node 1 is not in node2's allow list for dialing! p2pfixtures.EnsureNoStreamCreation(t, ctx, []p2p.LibP2PNode{node2}, []p2p.LibP2PNode{node1}, func(t *testing.T, err error) { // dialing node-1 by node-2 should fail locally at the connection gater of node-2. - require.True(t, stream.IsErrGaterDisallowedConnection(err)) + require.Truef(t, stream.IsErrGaterDisallowedConnection(err), "expected ErrGaterDisallowedConnection, got: %v", err) }) // now node2 should be able to connect to node1. @@ -95,8 +101,8 @@ func TestConnectionGating(t *testing.T) { t.Run("outbound connection to an approved node is allowed", func(t *testing.T) { // adding both nodes to each other's allow lists. - node1Peers.Add(node2.Host().ID(), struct{}{}) - node2Peers.Add(node1.Host().ID(), struct{}{}) + node1Peers.Add(node2.ID(), struct{}{}) + node2Peers.Add(node1.ID(), struct{}{}) // now both nodes should be able to connect to each other. p2ptest.EnsureStreamCreationInBothDirections(t, ctx, []p2p.LibP2PNode{node1, node2}) @@ -107,6 +113,7 @@ func TestConnectionGating(t *testing.T) { // The test directly mocks the underlying resource manager metrics of the libp2p native resource manager to ensure that the // expected set of resources are allocated for the connection upon establishment. func TestConnectionGating_ResourceAllocation_AllowListing(t *testing.T) { + unittest.SkipUnless(t, unittest.TEST_FLAKY, "flakey tests") ctx, cancel := context.WithCancel(context.Background()) signalerCtx := irrecoverable.NewMockSignalerContext(t, ctx) @@ -117,6 +124,7 @@ func TestConnectionGating_ResourceAllocation_AllowListing(t *testing.T) { t, sporkID, t.Name(), + idProvider, p2ptest.WithRole(flow.RoleConsensus)) node2Metrics := mockmodule.NewNetworkMetrics(t) @@ -126,11 +134,11 @@ func TestConnectionGating_ResourceAllocation_AllowListing(t *testing.T) { // we expect the libp2p.identify service to be used to establish the connection. node2Metrics.On("AllowService", "libp2p.identify").Return() // we expect the node2 attaching node1 to the incoming connection. - node2Metrics.On("AllowPeer", node1.Host().ID()).Return() + node2Metrics.On("AllowPeer", node1.ID()).Return() // we expect node2 allocate memory for the incoming connection. node2Metrics.On("AllowMemory", mock.Anything) // we expect node2 to allow the stream to be created. - node2Metrics.On("AllowStream", node1.Host().ID(), mock.Anything) + node2Metrics.On("AllowStream", node1.ID(), mock.Anything) // we expect node2 to attach protocol to the created stream. node2Metrics.On("AllowProtocol", mock.Anything).Return() @@ -139,25 +147,33 @@ func TestConnectionGating_ResourceAllocation_AllowListing(t *testing.T) { node2Metrics.On("InboundConnections", mock.Anything).Return() node2Metrics.On("OutboundConnections", mock.Anything).Return() + // Libp2p control message validation metrics, these may or may not be called depending on the machine the test is running on and how long + // the nodes in the test run for. + node2Metrics.On("BlockingPreProcessingStarted", mock.Anything, mock.Anything).Maybe() + node2Metrics.On("BlockingPreProcessingFinished", mock.Anything, mock.Anything, mock.Anything).Maybe() + node2Metrics.On("AsyncProcessingStarted", mock.Anything).Maybe() + node2Metrics.On("AsyncProcessingFinished", mock.Anything, mock.Anything).Maybe() + // we create node2 with a connection gater that allows all connections and the mocked metrics collector. node2, node2Id := p2ptest.NodeFixture( t, sporkID, t.Name(), + idProvider, p2ptest.WithRole(flow.RoleConsensus), p2ptest.WithMetricsCollector(node2Metrics), // we use default resource manager rather than the test resource manager to ensure that the metrics are called. p2ptest.WithDefaultResourceManager(), - p2ptest.WithConnectionGater(testutils.NewConnectionGater(idProvider, func(p peer.ID) error { + p2ptest.WithConnectionGater(p2ptest.NewConnectionGater(idProvider, func(p peer.ID) error { return nil // allow all connections. }))) - idProvider.On("ByPeerID", node1.Host().ID()).Return(&node1Id, true).Maybe() - idProvider.On("ByPeerID", node2.Host().ID()).Return(&node2Id, true).Maybe() + idProvider.On("ByPeerID", node1.ID()).Return(&node1Id, true).Maybe() + idProvider.On("ByPeerID", node2.ID()).Return(&node2Id, true).Maybe() nodes := []p2p.LibP2PNode{node1, node2} ids := flow.IdentityList{&node1Id, &node2Id} - p2ptest.StartNodes(t, signalerCtx, nodes, 100*time.Millisecond) - defer p2ptest.StopNodes(t, nodes, cancel, 100*time.Millisecond) + p2ptest.StartNodes(t, signalerCtx, nodes) + defer p2ptest.StopNodes(t, nodes, cancel) p2pfixtures.AddNodesToEachOthersPeerStore(t, nodes, ids) @@ -179,6 +195,7 @@ func TestConnectionGating_ResourceAllocation_DisAllowListing(t *testing.T) { t, sporkID, t.Name(), + idProvider, p2ptest.WithRole(flow.RoleConsensus)) node2Metrics := mockmodule.NewNetworkMetrics(t) @@ -187,20 +204,21 @@ func TestConnectionGating_ResourceAllocation_DisAllowListing(t *testing.T) { t, sporkID, t.Name(), + idProvider, p2ptest.WithRole(flow.RoleConsensus), p2ptest.WithMetricsCollector(node2Metrics), // we use default resource manager rather than the test resource manager to ensure that the metrics are called. p2ptest.WithDefaultResourceManager(), - p2ptest.WithConnectionGater(testutils.NewConnectionGater(idProvider, func(p peer.ID) error { + p2ptest.WithConnectionGater(p2ptest.NewConnectionGater(idProvider, func(p peer.ID) error { return fmt.Errorf("disallowed connection") // rejecting all connections. }))) - idProvider.On("ByPeerID", node1.Host().ID()).Return(&node1Id, true).Maybe() - idProvider.On("ByPeerID", node2.Host().ID()).Return(&node2Id, true).Maybe() + idProvider.On("ByPeerID", node1.ID()).Return(&node1Id, true).Maybe() + idProvider.On("ByPeerID", node2.ID()).Return(&node2Id, true).Maybe() nodes := []p2p.LibP2PNode{node1, node2} ids := flow.IdentityList{&node1Id, &node2Id} - p2ptest.StartNodes(t, signalerCtx, nodes, 100*time.Millisecond) - defer p2ptest.StopNodes(t, nodes, cancel, 100*time.Millisecond) + p2ptest.StartNodes(t, signalerCtx, nodes) + defer p2ptest.StopNodes(t, nodes, cancel) p2pfixtures.AddNodesToEachOthersPeerStore(t, nodes, ids) @@ -228,21 +246,27 @@ func TestConnectionGater_InterceptUpgrade(t *testing.T) { count := 5 nodes := make([]p2p.LibP2PNode, 0, count) inbounds := make([]chan string, 0, count) + identities := make(flow.IdentityList, 0, count) - disallowedPeerIds := unittest.NewProtectedMap[peer.ID, struct{}]() + disallowedPeerIds := concurrentmap.New[peer.ID, struct{}]() allPeerIds := make(peer.IDSlice, 0, count) - + idProvider := mockmodule.NewIdentityProvider(t) connectionGater := mockp2p.NewConnectionGater(t) for i := 0; i < count; i++ { handler, inbound := p2ptest.StreamHandlerFixture(t) - node, _ := p2ptest.NodeFixture( + node, id := p2ptest.NodeFixture( t, sporkId, t.Name(), + idProvider, p2ptest.WithRole(flow.RoleConsensus), p2ptest.WithDefaultStreamHandler(handler), // enable peer manager, with a 1-second refresh rate, and connection pruning enabled. - p2ptest.WithPeerManagerEnabled(true, 1*time.Second, func() peer.IDSlice { + p2ptest.WithPeerManagerEnabled(&p2pbuilderconfig.PeerManagerConfig{ + ConnectionPruning: true, + UpdateInterval: 1 * time.Second, + ConnectorFactory: connection.DefaultLibp2pBackoffConnectorFactory(), + }, func() peer.IDSlice { list := make(peer.IDSlice, 0) for _, pid := range allPeerIds { if !disallowedPeerIds.Has(pid) { @@ -252,9 +276,10 @@ func TestConnectionGater_InterceptUpgrade(t *testing.T) { return list }), p2ptest.WithConnectionGater(connectionGater)) - + idProvider.On("ByPeerID", node.ID()).Return(&id, true).Maybe() nodes = append(nodes, node) - allPeerIds = append(allPeerIds, node.Host().ID()) + identities = append(identities, &id) + allPeerIds = append(allPeerIds, node.ID()) inbounds = append(inbounds, inbound) } @@ -272,11 +297,11 @@ func TestConnectionGater_InterceptUpgrade(t *testing.T) { connectionGater.On("InterceptAccept", mock.Anything).Return(true) // adds first node to disallowed list - disallowedPeerIds.Add(nodes[0].Host().ID(), struct{}{}) + disallowedPeerIds.Add(nodes[0].ID(), struct{}{}) // starts the nodes - p2ptest.StartNodes(t, signalerCtx, nodes, 1*time.Second) - defer p2ptest.StopNodes(t, nodes, cancel, 1*time.Second) + p2ptest.StartNodes(t, signalerCtx, nodes) + defer p2ptest.StopNodes(t, nodes, cancel) // Checks that only an allowed REMOTE node can establish an upgradable connection. connectionGater.On("InterceptUpgraded", mock.Anything).Run(func(args mock.Arguments) { @@ -289,7 +314,7 @@ func TestConnectionGater_InterceptUpgrade(t *testing.T) { require.False(t, disallowedPeerIds.Has(remote)) }).Return(true, control.DisconnectReason(0)) - ensureCommunicationSilenceAmongGroups(t, ctx, sporkId, nodes[:1], nodes[1:]) + ensureCommunicationSilenceAmongGroups(t, ctx, sporkId, nodes[:1], identities[:1].NodeIDs(), nodes[1:], identities[1:].NodeIDs()) ensureCommunicationOverAllProtocols(t, ctx, sporkId, nodes[1:], inbounds[1:]) } @@ -308,7 +333,7 @@ func TestConnectionGater_Disallow_Integration(t *testing.T) { ids := flow.IdentityList{} inbounds := make([]chan string, 0, 5) - disallowedList := unittest.NewProtectedMap[*flow.Identity, struct{}]() + disallowedList := concurrentmap.New[*flow.Identity, struct{}]() for i := 0; i < count; i++ { handler, inbound := p2ptest.StreamHandlerFixture(t) @@ -316,10 +341,15 @@ func TestConnectionGater_Disallow_Integration(t *testing.T) { t, sporkId, t.Name(), + idProvider, p2ptest.WithRole(flow.RoleConsensus), p2ptest.WithDefaultStreamHandler(handler), // enable peer manager, with a 1-second refresh rate, and connection pruning enabled. - p2ptest.WithPeerManagerEnabled(true, 1*time.Second, func() peer.IDSlice { + p2ptest.WithPeerManagerEnabled(&p2pbuilderconfig.PeerManagerConfig{ + ConnectionPruning: true, + UpdateInterval: 1 * time.Second, + ConnectorFactory: connection.DefaultLibp2pBackoffConnectorFactory(), + }, func() peer.IDSlice { list := make(peer.IDSlice, 0) for _, id := range ids { if disallowedList.Has(id) { @@ -333,7 +363,7 @@ func TestConnectionGater_Disallow_Integration(t *testing.T) { } return list }), - p2ptest.WithConnectionGater(testutils.NewConnectionGater(idProvider, func(pid peer.ID) error { + p2ptest.WithConnectionGater(p2ptest.NewConnectionGater(idProvider, func(pid peer.ID) error { return disallowedList.ForEach(func(id *flow.Identity, _ struct{}) error { bid, err := unittest.PeerIDFromFlowID(id) require.NoError(t, err) @@ -343,15 +373,15 @@ func TestConnectionGater_Disallow_Integration(t *testing.T) { return nil }) }))) - idProvider.On("ByPeerID", node.Host().ID()).Return(&id, true).Maybe() + idProvider.On("ByPeerID", node.ID()).Return(&id, true).Maybe() nodes = append(nodes, node) ids = append(ids, &id) inbounds = append(inbounds, inbound) } - p2ptest.StartNodes(t, signalerCtx, nodes, 1*time.Second) - defer p2ptest.StopNodes(t, nodes, cancel, 1*time.Second) + p2ptest.StartNodes(t, signalerCtx, nodes) + defer p2ptest.StopNodes(t, nodes, cancel) p2ptest.LetNodesDiscoverEachOther(t, ctx, nodes, ids) @@ -363,35 +393,52 @@ func TestConnectionGater_Disallow_Integration(t *testing.T) { // let peer manager prune the connections to the disallow-listed node. time.Sleep(1 * time.Second) // ensures no connection, unicast, or pubsub going to or coming from the disallow-listed node. - ensureCommunicationSilenceAmongGroups(t, ctx, sporkId, nodes[:count-1], nodes[count-1:]) + ensureCommunicationSilenceAmongGroups(t, ctx, sporkId, nodes[:count-1], ids[:count-1].NodeIDs(), nodes[count-1:], ids[count-1:].NodeIDs()) // now we add another node (the second last node) to the disallowed list. disallowedList.Add(ids[len(ids)-2], struct{}{}) // let peer manager prune the connections to the disallow-listed node. time.Sleep(1 * time.Second) // ensures no connection, unicast, or pubsub going to and coming from the disallow-listed nodes. - ensureCommunicationSilenceAmongGroups(t, ctx, sporkId, nodes[:count-2], nodes[count-2:]) + ensureCommunicationSilenceAmongGroups(t, ctx, sporkId, nodes[:count-2], ids[:count-2].NodeIDs(), nodes[count-2:], ids[count-2:].NodeIDs()) // ensures that all nodes are other non-disallow-listed nodes can exchange messages over the pubsub and unicast. ensureCommunicationOverAllProtocols(t, ctx, sporkId, nodes[:count-2], inbounds[:count-2]) } // ensureCommunicationSilenceAmongGroups ensures no connection, unicast, or pubsub going to or coming from between the two groups of nodes. -func ensureCommunicationSilenceAmongGroups(t *testing.T, ctx context.Context, sporkId flow.Identifier, groupA []p2p.LibP2PNode, groupB []p2p.LibP2PNode) { +func ensureCommunicationSilenceAmongGroups( + t *testing.T, + ctx context.Context, + sporkId flow.Identifier, + groupANodes []p2p.LibP2PNode, + groupAIdentifiers flow.IdentifierList, + groupBNodes []p2p.LibP2PNode, + groupBIdentifiers flow.IdentifierList) { // ensures no connection, unicast, or pubsub going to the disallow-listed nodes - p2ptest.EnsureNotConnectedBetweenGroups(t, ctx, groupA, groupB) - p2ptest.EnsureNoPubsubExchangeBetweenGroups(t, ctx, groupA, groupB, func() (interface{}, channels.Topic) { - blockTopic := channels.TopicFromChannel(channels.PushBlocks, sporkId) - return unittest.ProposalFixture(), blockTopic - }) - p2pfixtures.EnsureNoStreamCreationBetweenGroups(t, ctx, groupA, groupB) + p2ptest.EnsureNotConnectedBetweenGroups(t, ctx, groupANodes, groupBNodes) + + blockTopic := channels.TopicFromChannel(channels.PushBlocks, sporkId) + p2ptest.EnsureNoPubsubExchangeBetweenGroups( + t, + ctx, + groupANodes, + groupAIdentifiers, + groupBNodes, + groupBIdentifiers, + blockTopic, + 1, + func() interface{} { + return (*messages.Proposal)(unittest.ProposalFixture()) + }) + p2pfixtures.EnsureNoStreamCreationBetweenGroups(t, ctx, groupANodes, groupBNodes) } // ensureCommunicationOverAllProtocols ensures that all nodes are connected to each other, and they can exchange messages over the pubsub and unicast. func ensureCommunicationOverAllProtocols(t *testing.T, ctx context.Context, sporkId flow.Identifier, nodes []p2p.LibP2PNode, inbounds []chan string) { - p2ptest.EnsureConnected(t, ctx, nodes) - p2ptest.EnsurePubsubMessageExchange(t, ctx, nodes, func() (interface{}, channels.Topic) { - blockTopic := channels.TopicFromChannel(channels.PushBlocks, sporkId) - return unittest.ProposalFixture(), blockTopic + blockTopic := channels.TopicFromChannel(channels.PushBlocks, sporkId) + p2ptest.TryConnectionAndEnsureConnected(t, ctx, nodes) + p2ptest.EnsurePubsubMessageExchange(t, ctx, nodes, blockTopic, 1, func() interface{} { + return (*messages.Proposal)(unittest.ProposalFixture()) }) p2pfixtures.EnsureMessageExchangeOverUnicast(t, ctx, nodes, inbounds, p2pfixtures.LongStringMessageFactoryFixture(t)) } diff --git a/network/p2p/connection/connector.go b/network/p2p/connection/connector.go index bfbba1e15d1..f551cff7c10 100644 --- a/network/p2p/connection/connector.go +++ b/network/p2p/connection/connector.go @@ -2,14 +2,13 @@ package connection import ( "context" - "fmt" "github.com/libp2p/go-libp2p/core/peer" - discoveryBackoff "github.com/libp2p/go-libp2p/p2p/discovery/backoff" "github.com/rs/zerolog" "github.com/onflow/flow-go/network/internal/p2putils" "github.com/onflow/flow-go/network/p2p" + p2plogging "github.com/onflow/flow-go/network/p2p/logging" "github.com/onflow/flow-go/utils/logging" "github.com/onflow/flow-go/utils/rand" ) @@ -26,16 +25,16 @@ const ( PruningDisabled = false ) -// Libp2pConnector is a libp2p based Connector implementation to connect and disconnect from peers -type Libp2pConnector struct { - backoffConnector *discoveryBackoff.BackoffConnector +// PeerUpdater is a connector that connects to a list of peers and disconnects from any other connection that the libp2p node might have. +type PeerUpdater struct { + connector p2p.Connector host p2p.ConnectorHost log zerolog.Logger pruneConnections bool } -// ConnectorConfig is the configuration for the libp2p based connector. -type ConnectorConfig struct { +// PeerUpdaterConfig is the configuration for the libp2p based connector. +type PeerUpdaterConfig struct { // PruneConnections is a boolean flag to enable pruning of connections to peers that are not part of the explicit update list. PruneConnections bool @@ -45,32 +44,23 @@ type ConnectorConfig struct { // Host is the libp2p host to be used by the connector. Host p2p.ConnectorHost - // BackoffConnectorFactory is a factory function to create a new BackoffConnector. - BackoffConnectorFactory func() (*discoveryBackoff.BackoffConnector, error) + // ConnectorFactory is a factory function to create a new connector. + Connector p2p.Connector } -var _ p2p.Connector = &Libp2pConnector{} +var _ p2p.PeerUpdater = (*PeerUpdater)(nil) -// NewLibp2pConnector creates a new libp2p based connector +// NewPeerUpdater creates a new libp2p based connector // Args: // - cfg: configuration for the connector // // Returns: -// - *Libp2pConnector: a new libp2p based connector +// - *PeerUpdater: a new libp2p based connector // - error: an error if there is any error while creating the connector. The errors are irrecoverable and unexpected. -func NewLibp2pConnector(cfg *ConnectorConfig) (*Libp2pConnector, error) { - connector, err := cfg.BackoffConnectorFactory() - if err != nil { - return nil, fmt.Errorf("failed to create libP2P connector: %w", err) - } - - if err != nil { - return nil, fmt.Errorf("failed to create peer ID slice shuffler: %w", err) - } - - libP2PConnector := &Libp2pConnector{ - log: cfg.Logger, - backoffConnector: connector, +func NewPeerUpdater(cfg *PeerUpdaterConfig) (*PeerUpdater, error) { + libP2PConnector := &PeerUpdater{ + log: cfg.Logger.With().Str("component", "peer-updater").Logger(), + connector: cfg.Connector, host: cfg.Host, pruneConnections: cfg.PruneConnections, } @@ -80,7 +70,7 @@ func NewLibp2pConnector(cfg *ConnectorConfig) (*Libp2pConnector, error) { // UpdatePeers is the implementation of the Connector.UpdatePeers function. It connects to all of the ids and // disconnects from any other connection that the libp2p node might have. -func (l *Libp2pConnector) UpdatePeers(ctx context.Context, peerIDs peer.IDSlice) { +func (l *PeerUpdater) UpdatePeers(ctx context.Context, peerIDs peer.IDSlice) { // connect to each of the peer.AddrInfo in pInfos l.connectToPeers(ctx, peerIDs) @@ -93,7 +83,7 @@ func (l *Libp2pConnector) UpdatePeers(ctx context.Context, peerIDs peer.IDSlice) } // connectToPeers connects each of the peer in pInfos -func (l *Libp2pConnector) connectToPeers(ctx context.Context, peerIDs peer.IDSlice) { +func (l *PeerUpdater) connectToPeers(ctx context.Context, peerIDs peer.IDSlice) { // create a channel of peer.AddrInfo as expected by the connector peerCh := make(chan peer.AddrInfo, len(peerIDs)) @@ -109,6 +99,10 @@ func (l *Libp2pConnector) connectToPeers(ctx context.Context, peerIDs peer.IDSli } for _, peerID := range peerIDs { + if l.host.IsConnectedTo(peerID) { + l.log.Trace().Str("peer_id", p2plogging.PeerId(peerID)).Msg("already connected to peer, skipping connection") + continue + } peerCh <- peer.AddrInfo{ID: peerID} } @@ -116,13 +110,13 @@ func (l *Libp2pConnector) connectToPeers(ctx context.Context, peerIDs peer.IDSli close(peerCh) // ask the connector to connect to all the peers - l.backoffConnector.Connect(ctx, peerCh) + l.connector.Connect(ctx, peerCh) } // pruneAllConnectionsExcept trims all connections of the node from peers not part of peerIDs. // A node would have created such extra connections earlier when the identity list may have been different, or // it may have been target of such connections from node which have now been excluded. -func (l *Libp2pConnector) pruneAllConnectionsExcept(peerIDs peer.IDSlice) { +func (l *PeerUpdater) pruneAllConnectionsExcept(peerIDs peer.IDSlice) { // convert the peerInfos to a peer.ID -> bool map peersToKeep := make(map[peer.ID]bool, len(peerIDs)) for _, pid := range peerIDs { @@ -152,6 +146,11 @@ func (l *Libp2pConnector) pruneAllConnectionsExcept(peerIDs peer.IDSlice) { if flowStream != nil { lg = lg.With().Str("flow_stream", string(flowStream.Protocol())).Logger() } + for _, stream := range conn.GetStreams() { + if err := stream.Close(); err != nil { + lg.Warn().Err(err).Msg("failed to close stream, when pruning connections") + } + } // close the connection with the peer if it is not part of the current fanout err := l.host.ClosePeer(peerID) diff --git a/network/p2p/connection/connector_factory.go b/network/p2p/connection/connector_factory.go index a5c8be29704..b00a1016e64 100644 --- a/network/p2p/connection/connector_factory.go +++ b/network/p2p/connection/connector_factory.go @@ -1,17 +1,23 @@ package connection import ( + "crypto/rand" "fmt" - "math/rand" "time" "github.com/libp2p/go-libp2p/core/host" discoveryBackoff "github.com/libp2p/go-libp2p/p2p/discovery/backoff" + "github.com/onflow/crypto/random" + + "github.com/onflow/flow-go/network/p2p" ) const ( // minBackoff is the minimum backoff duration for the backoff connector. - minBackoff = time.Second * 10 + // We set it to 1 second as we want to let the LibP2PNode be in charge of connection establishment and can disconnect + // and reconnect to peers as soon as it needs. This is essential to ensure that the allow-listing and disallow-listing + // time intervals are working as expected. + minBackoff = 1 * time.Second // maxBackoff is the maximum backoff duration for the backoff connector. When the backoff duration reaches this value, // it will not increase any further. maxBackoff = time.Hour @@ -21,7 +27,7 @@ const ( timeUnit = time.Second // exponentialBackOffBase is the base for the exponential backoff. The backoff duration will be a multiple of the time unit // multiplied by the exponential base raised to the exponential offset, i.e., exponentialBase^(timeUnit*attempt). - exponentialBackOffBase = 5.0 + exponentialBackOffBase = 2.0 // exponentialBackOffOffset is the offset for the exponential backoff. It acts as a constant that is added result // of the exponential base raised to the exponential offset, i.e., exponentialBase^(timeUnit*attempt) + exponentialBackOffOffset. // This is used to ensure that the backoff duration is always greater than the time unit. We set this to 0 as we want the @@ -32,9 +38,12 @@ const ( // DefaultLibp2pBackoffConnectorFactory is a factory function to create a new BackoffConnector. It uses the default // values for the backoff connector. // (https://github.com/libp2p/go-libp2p-pubsub/blob/master/discovery.go#L34) -func DefaultLibp2pBackoffConnectorFactory(host host.Host) func() (*discoveryBackoff.BackoffConnector, error) { - return func() (*discoveryBackoff.BackoffConnector, error) { - rngSrc := rand.NewSource(rand.Int63()) +func DefaultLibp2pBackoffConnectorFactory() p2p.ConnectorFactory { + return func(host host.Host) (p2p.Connector, error) { + rngSrc, err := newSource() + if err != nil { + return nil, fmt.Errorf("failed to generate a random source: %w", err) + } cacheSize := 100 dialTimeout := time.Minute * 2 @@ -54,3 +63,39 @@ func DefaultLibp2pBackoffConnectorFactory(host host.Host) func() (*discoveryBack return backoffConnector, nil } } + +// `source` implements math/rand.Source so it can be used +// by libp2p's `NewExponentialBackoff`. +// It is backed by a more secure randomness than math/rand's `NewSource`. +// `source` is only implemented to avoid using math/rand's `NewSource`. +type source struct { + prg random.Rand +} + +// Seed is not used by the backoff object from `NewExponentialBackoff` +func (src *source) Seed(seed int64) {} + +// Int63 is used by `NewExponentialBackoff` and is based on a crypto PRG +func (src *source) Int63() int64 { + return int64(src.prg.UintN(1 << 63)) +} + +// creates a source using a crypto PRG and secure random seed +// returned errors: +// - exception error if the system randomness fails (the system and other components would +// have many other issues if this happens) +// - exception error if the CSPRG (Chacha20) isn't initialized properly (should not happen in normal +// operations) +func newSource() (*source, error) { + seed := make([]byte, random.Chacha20SeedLen) + _, err := rand.Read(seed) // checking err only is enough + if err != nil { + return nil, fmt.Errorf("failed to generate a seed: %w", err) + } + prg, err := random.NewChacha20PRG(seed, nil) + if err != nil { + // should not happen in normal operations because `seed` has the correct length + return nil, fmt.Errorf("failed to generate a PRG: %w", err) + } + return &source{prg}, nil +} diff --git a/network/p2p/connection/connector_host.go b/network/p2p/connection/connector_host.go index 6af6ecc4777..04cfd56b28a 100644 --- a/network/p2p/connection/connector_host.go +++ b/network/p2p/connection/connector_host.go @@ -27,6 +27,18 @@ func (c *ConnectorHost) Connections() []network.Conn { return c.h.Network().Conns() } +// IsConnectedTo returns true if the given peer.ID is connected to the underlying host. +// Args: +// +// peerID: peer.ID for which the connection status is requested +// +// Returns: +// +// true if the given peer.ID is connected to the underlying host. +func (c *ConnectorHost) IsConnectedTo(peerID peer.ID) bool { + return c.h.Network().Connectedness(peerID) == network.Connected && len(c.h.Network().ConnsToPeer(peerID)) > 0 +} + // PeerInfo returns the peer.AddrInfo for the given peer.ID. // Args: // diff --git a/network/p2p/connection/internal/loggerNotifiee.go b/network/p2p/connection/internal/loggerNotifiee.go index 9dc6fab9f75..26a38db4491 100644 --- a/network/p2p/connection/internal/loggerNotifiee.go +++ b/network/p2p/connection/internal/loggerNotifiee.go @@ -6,6 +6,7 @@ import ( "github.com/rs/zerolog" "github.com/onflow/flow-go/module" + p2plogging "github.com/onflow/flow-go/network/p2p/logging" ) type LoggerNotifiee struct { @@ -24,30 +25,30 @@ func NewLoggerNotifiee(logger zerolog.Logger, metrics module.LibP2PConnectionMet func (l *LoggerNotifiee) Listen(_ network.Network, multiaddr multiaddr.Multiaddr) { // just log the multiaddress on which we listen - l.logger.Info().Str("multiaddress", multiaddr.String()).Msg("listen started") + l.logger.Debug().Str("multiaddress", multiaddr.String()).Msg("listen started") } func (l *LoggerNotifiee) ListenClose(_ network.Network, multiaddr multiaddr.Multiaddr) { - l.logger.Info().Str("multiaddress", multiaddr.String()).Msg("listen stopped") + l.logger.Debug().Str("multiaddress", multiaddr.String()).Msg("listen stopped") } func (l *LoggerNotifiee) Connected(n network.Network, conn network.Conn) { l.updateConnectionMetric(n) lg := l.connectionUpdateLogger(n, conn) - lg.Info().Msg("connection established") + lg.Debug().Msg("connection established") } func (l *LoggerNotifiee) Disconnected(n network.Network, conn network.Conn) { l.updateConnectionMetric(n) lg := l.connectionUpdateLogger(n, conn) - lg.Warn().Msg("connection closed") + lg.Debug().Msg("connection closed") } func (l *LoggerNotifiee) connectionUpdateLogger(n network.Network, con network.Conn) zerolog.Logger { return l.logger.With(). - Str("remote_peer", con.RemotePeer().String()). + Str("remote_peer", p2plogging.PeerId(con.RemotePeer())). Str("remote_address", con.RemoteMultiaddr().String()). - Str("local_peer", con.LocalPeer().String()). + Str("local_peer", p2plogging.PeerId(con.LocalPeer())). Str("local_address", con.LocalMultiaddr().String()). Str("direction", con.Stat().Direction.String()). Int("total_connections", len(n.Conns())).Logger() diff --git a/network/p2p/connection/peerManager.go b/network/p2p/connection/peerManager.go index d82c5b779b6..737ef3ad0e6 100644 --- a/network/p2p/connection/peerManager.go +++ b/network/p2p/connection/peerManager.go @@ -3,7 +3,6 @@ package connection import ( "context" "fmt" - mrand "math/rand" "sync" "time" @@ -13,11 +12,14 @@ import ( "github.com/onflow/flow-go/module/component" "github.com/onflow/flow-go/module/irrecoverable" "github.com/onflow/flow-go/network/p2p" + p2plogging "github.com/onflow/flow-go/network/p2p/logging" "github.com/onflow/flow-go/utils/logging" + "github.com/onflow/flow-go/utils/rand" ) -// DefaultPeerUpdateInterval is default duration for which the peer manager waits in between attempts to update peer connections -var DefaultPeerUpdateInterval = 10 * time.Minute +// DefaultPeerUpdateInterval is default duration for which the peer manager waits in between attempts to update peer connections. +// We set it to 1 second to be aligned with the heartbeat intervals of libp2p, alsp, and gossipsub. +var DefaultPeerUpdateInterval = time.Second var _ p2p.PeerManager = (*PeerManager)(nil) var _ component.Component = (*PeerManager)(nil) @@ -30,7 +32,7 @@ type PeerManager struct { logger zerolog.Logger peersProvider p2p.PeersProvider // callback to retrieve list of peers to connect to peerRequestQ chan struct{} // a channel to queue a peer update request - connector p2p.Connector // connector to connect or disconnect from peers + connector p2p.PeerUpdater // connector to connect or disconnect from peers peerUpdateInterval time.Duration // interval the peer manager runs on peersProviderMu sync.RWMutex @@ -38,9 +40,9 @@ type PeerManager struct { // NewPeerManager creates a new peer manager which calls the peersProvider callback to get a list of peers to connect to // and it uses the connector to actually connect or disconnect from peers. -func NewPeerManager(logger zerolog.Logger, updateInterval time.Duration, connector p2p.Connector) *PeerManager { +func NewPeerManager(logger zerolog.Logger, updateInterval time.Duration, connector p2p.PeerUpdater) *PeerManager { pm := &PeerManager{ - logger: logger, + logger: logger.With().Str("component", "peer-manager").Logger(), connector: connector, peerRequestQ: make(chan struct{}, 1), peerUpdateInterval: updateInterval, @@ -85,7 +87,11 @@ func (pm *PeerManager) updateLoop(ctx irrecoverable.SignalerContext) { func (pm *PeerManager) periodicLoop(ctx irrecoverable.SignalerContext) { // add a random delay to initial launch to avoid synchronizing this // potentially expensive operation across the network - delay := time.Duration(mrand.Int63n(pm.peerUpdateInterval.Nanoseconds())) + r, err := rand.Uint64n(uint64(pm.peerUpdateInterval.Nanoseconds())) + if err != nil { + ctx.Throw(fmt.Errorf("unable to generate random interval: %w", err)) + } + delay := time.Duration(r) ticker := time.NewTicker(pm.peerUpdateInterval) defer ticker.Stop() @@ -160,7 +166,7 @@ func (pm *PeerManager) SetPeersProvider(peersProvider p2p.PeersProvider) { // is disconnected immediately after being rate limited. func (pm *PeerManager) OnRateLimitedPeer(pid peer.ID, role, msgType, topic, reason string) { pm.logger.Warn(). - Str("peer_id", pid.String()). + Str("peer_id", p2plogging.PeerId(pid)). Str("role", role). Str("message_type", msgType). Str("topic", topic). diff --git a/network/p2p/connection/peerManager_integration_test.go b/network/p2p/connection/peerManager_integration_test.go index 391dac3d840..28b6e6927c9 100644 --- a/network/p2p/connection/peerManager_integration_test.go +++ b/network/p2p/connection/peerManager_integration_test.go @@ -22,7 +22,7 @@ import ( "github.com/onflow/flow-go/utils/unittest" ) -// TestPeerManager_Integration tests the correctness of integration between PeerManager and Libp2pConnector over +// TestPeerManager_Integration tests the correctness of integration between PeerManager and PeerUpdater over // a fully connected topology. // PeerManager should be able to connect to all peers using the connector, and must also tear down the connection to // peers that are excluded from its identity provider. @@ -33,10 +33,11 @@ func TestPeerManager_Integration(t *testing.T) { signalerCtx := irrecoverable.NewMockSignalerContext(t, ctx) // create nodes - nodes, identities := p2ptest.NodesFixture(t, unittest.IdentifierFixture(), "test_peer_manager", count) - - p2ptest.StartNodes(t, signalerCtx, nodes, 100*time.Millisecond) - defer p2ptest.StopNodes(t, nodes, cancel, 100*time.Millisecond) + idProvider := unittest.NewUpdatableIDProvider(flow.IdentityList{}) + nodes, identities := p2ptest.NodesFixture(t, unittest.IdentifierFixture(), "test_peer_manager", count, idProvider) + idProvider.SetIdentities(identities) + p2ptest.StartNodes(t, signalerCtx, nodes) + defer p2ptest.StopNodes(t, nodes, cancel) thisNode := nodes[0] topologyPeers := identities[1:] @@ -48,19 +49,21 @@ func TestPeerManager_Integration(t *testing.T) { thisNode.Host().Peerstore().SetAddrs(i.ID, i.Addrs, peerstore.PermanentAddrTTL) } + connector, err := connection.DefaultLibp2pBackoffConnectorFactory()(thisNode.Host()) + require.NoError(t, err) // setup - connector, err := connection.NewLibp2pConnector(&connection.ConnectorConfig{ - PruneConnections: connection.PruningEnabled, - Logger: unittest.Logger(), - Host: connection.NewConnectorHost(thisNode.Host()), - BackoffConnectorFactory: connection.DefaultLibp2pBackoffConnectorFactory(thisNode.Host()), + peerUpdater, err := connection.NewPeerUpdater(&connection.PeerUpdaterConfig{ + PruneConnections: connection.PruningEnabled, + Logger: unittest.Logger(), + Host: connection.NewConnectorHost(thisNode.Host()), + Connector: connector, }) require.NoError(t, err) idTranslator, err := translator.NewFixedTableIdentityTranslator(identities) require.NoError(t, err) - peerManager := connection.NewPeerManager(unittest.Logger(), connection.DefaultPeerUpdateInterval, connector) + peerManager := connection.NewPeerManager(unittest.Logger(), connection.DefaultPeerUpdateInterval, peerUpdater) peerManager.SetPeersProvider(func() peer.IDSlice { // peerManager is furnished with a full topology that connects to all nodes // in the topologyPeers. diff --git a/network/p2p/connection/peerManager_test.go b/network/p2p/connection/peerManager_test.go index f2a9305c31b..b776143dc12 100644 --- a/network/p2p/connection/peerManager_test.go +++ b/network/p2p/connection/peerManager_test.go @@ -18,9 +18,9 @@ import ( "github.com/onflow/flow-go/module/irrecoverable" "github.com/onflow/flow-go/network/internal/p2pfixtures" - "github.com/onflow/flow-go/network/mocknetwork" "github.com/onflow/flow-go/network/p2p/connection" "github.com/onflow/flow-go/network/p2p/keyutils" + mockp2p "github.com/onflow/flow-go/network/p2p/mock" "github.com/onflow/flow-go/utils/unittest" ) @@ -60,8 +60,8 @@ func (suite *PeerManagerTestSuite) TestUpdatePeers() { pids := suite.generatePeerIDs(10) // create the connector mock to check ids requested for connect and disconnect - connector := new(mocknetwork.Connector) - connector.On("UpdatePeers", mock.Anything, mock.AnythingOfType("peer.IDSlice")). + peerUpdater := mockp2p.NewPeerUpdater(suite.T()) + peerUpdater.On("UpdatePeers", mock.Anything, mock.AnythingOfType("peer.IDSlice")). Run(func(args mock.Arguments) { idArg := args[1].(peer.IDSlice) assert.ElementsMatch(suite.T(), pids, idArg) @@ -69,7 +69,7 @@ func (suite *PeerManagerTestSuite) TestUpdatePeers() { Return(nil) // create the peer manager (but don't start it) - pm := connection.NewPeerManager(suite.log, connection.DefaultPeerUpdateInterval, connector) + pm := connection.NewPeerManager(suite.log, connection.DefaultPeerUpdateInterval, peerUpdater) pm.SetPeersProvider(func() peer.IDSlice { return pids }) @@ -77,7 +77,7 @@ func (suite *PeerManagerTestSuite) TestUpdatePeers() { // very first call to updatepeer suite.Run("updatePeers only connects to all peers the first time", func() { pm.ForceUpdatePeers(ctx) - connector.AssertNumberOfCalls(suite.T(), "UpdatePeers", 1) + peerUpdater.AssertNumberOfCalls(suite.T(), "UpdatePeers", 1) }) // a subsequent call to updatePeers should request a connector.UpdatePeers to existing ids and new ids @@ -87,7 +87,7 @@ func (suite *PeerManagerTestSuite) TestUpdatePeers() { pids = append(pids, newPIDs...) pm.ForceUpdatePeers(ctx) - connector.AssertNumberOfCalls(suite.T(), "UpdatePeers", 2) + peerUpdater.AssertNumberOfCalls(suite.T(), "UpdatePeers", 2) }) // when ids are only excluded, connector.UpdatePeers should be called @@ -96,7 +96,7 @@ func (suite *PeerManagerTestSuite) TestUpdatePeers() { pids = removeRandomElement(pids) pm.ForceUpdatePeers(ctx) - connector.AssertNumberOfCalls(suite.T(), "UpdatePeers", 3) + peerUpdater.AssertNumberOfCalls(suite.T(), "UpdatePeers", 3) }) // addition and deletion of ids should result in a call to connector.UpdatePeers @@ -111,7 +111,7 @@ func (suite *PeerManagerTestSuite) TestUpdatePeers() { pm.ForceUpdatePeers(ctx) - connector.AssertNumberOfCalls(suite.T(), "UpdatePeers", 4) + peerUpdater.AssertNumberOfCalls(suite.T(), "UpdatePeers", 4) }) } @@ -131,13 +131,13 @@ func (suite *PeerManagerTestSuite) TestPeriodicPeerUpdate() { // create some test ids pids := suite.generatePeerIDs(10) - connector := new(mocknetwork.Connector) + peerUpdater := mockp2p.NewPeerUpdater(suite.T()) wg := &sync.WaitGroup{} // keeps track of number of calls on `ConnectPeers` mu := &sync.Mutex{} // provides mutual exclusion on calls to `ConnectPeers` count := 0 times := 2 // we expect it to be called twice at least wg.Add(times) - connector.On("UpdatePeers", mock.Anything, mock.Anything).Run(func(args mock.Arguments) { + peerUpdater.On("UpdatePeers", mock.Anything, mock.Anything).Run(func(args mock.Arguments) { mu.Lock() defer mu.Unlock() @@ -148,7 +148,7 @@ func (suite *PeerManagerTestSuite) TestPeriodicPeerUpdate() { }).Return(nil) peerUpdateInterval := 10 * time.Millisecond - pm := connection.NewPeerManager(suite.log, peerUpdateInterval, connector) + pm := connection.NewPeerManager(suite.log, peerUpdateInterval, peerUpdater) pm.SetPeersProvider(func() peer.IDSlice { return pids }) @@ -173,15 +173,15 @@ func (suite *PeerManagerTestSuite) TestOnDemandPeerUpdate() { // chooses peer interval rate deliberately long to capture on demand peer update peerUpdateInterval := time.Hour - // creates mock connector + // creates mock peerUpdater wg := &sync.WaitGroup{} // keeps track of number of calls on `ConnectPeers` mu := &sync.Mutex{} // provides mutual exclusion on calls to `ConnectPeers` count := 0 times := 2 // we expect it to be called twice overall wg.Add(1) // this accounts for one invocation, the other invocation is subsequent - connector := new(mocknetwork.Connector) + peerUpdater := mockp2p.NewPeerUpdater(suite.T()) // captures the first periodic update initiated after start to complete - connector.On("UpdatePeers", mock.Anything, mock.Anything).Run(func(args mock.Arguments) { + peerUpdater.On("UpdatePeers", mock.Anything, mock.Anything).Run(func(args mock.Arguments) { mu.Lock() defer mu.Unlock() @@ -191,7 +191,7 @@ func (suite *PeerManagerTestSuite) TestOnDemandPeerUpdate() { } }).Return(nil) - pm := connection.NewPeerManager(suite.log, peerUpdateInterval, connector) + pm := connection.NewPeerManager(suite.log, peerUpdateInterval, peerUpdater) pm.SetPeersProvider(func() peer.IDSlice { return pids }) @@ -220,17 +220,17 @@ func (suite *PeerManagerTestSuite) TestConcurrentOnDemandPeerUpdate() { // create some test ids pids := suite.generatePeerIDs(10) - connector := new(mocknetwork.Connector) - // connectPeerGate channel gates the return of the connector + peerUpdater := mockp2p.NewPeerUpdater(suite.T()) + // connectPeerGate channel gates the return of the peerUpdater connectPeerGate := make(chan time.Time) defer close(connectPeerGate) // choose the periodic interval as a high value so that periodic runs don't interfere with this test peerUpdateInterval := time.Hour - connector.On("UpdatePeers", mock.Anything, mock.Anything).Return(nil). + peerUpdater.On("UpdatePeers", mock.Anything, mock.Anything).Return(nil). WaitUntil(connectPeerGate) // blocks call for connectPeerGate channel - pm := connection.NewPeerManager(suite.log, peerUpdateInterval, connector) + pm := connection.NewPeerManager(suite.log, peerUpdateInterval, peerUpdater) pm.SetPeersProvider(func() peer.IDSlice { return pids }) @@ -243,7 +243,7 @@ func (suite *PeerManagerTestSuite) TestConcurrentOnDemandPeerUpdate() { // assert that the first update started assert.Eventually(suite.T(), func() bool { - return connector.AssertNumberOfCalls(suite.T(), "UpdatePeers", 1) + return len(peerUpdater.Calls) > 0 && peerUpdater.AssertNumberOfCalls(suite.T(), "UpdatePeers", 1) }, 3*time.Second, 100*time.Millisecond) // makes 10 concurrent request for peer update @@ -255,6 +255,6 @@ func (suite *PeerManagerTestSuite) TestConcurrentOnDemandPeerUpdate() { // assert that only two calls to UpdatePeers were made (one by the periodic update and one by the on-demand update) assert.Eventually(suite.T(), func() bool { - return connector.AssertNumberOfCalls(suite.T(), "UpdatePeers", 2) + return len(peerUpdater.Calls) > 1 && peerUpdater.AssertNumberOfCalls(suite.T(), "UpdatePeers", 2) }, 10*time.Second, 100*time.Millisecond) } diff --git a/network/p2p/connectionGater.go b/network/p2p/connectionGater.go index d2732fbd713..212dea51102 100644 --- a/network/p2p/connectionGater.go +++ b/network/p2p/connectionGater.go @@ -1,23 +1,24 @@ package p2p -import ( - "github.com/libp2p/go-libp2p/core/control" - "github.com/libp2p/go-libp2p/core/network" - "github.com/libp2p/go-libp2p/core/peer" - "github.com/multiformats/go-multiaddr" -) +import "github.com/libp2p/go-libp2p/core/connmgr" -// ConnectionGater is a copy of the libp2p ConnectionGater interface: -// https://github.com/libp2p/go-libp2p/blob/master/core/connmgr/gater.go#L54 -// We use it here to generate a mock for testing through testify mock. +// ConnectionGater the customized interface for the connection gater in the p2p package. +// It acts as a wrapper around the libp2p connmgr.ConnectionGater interface and adds some custom methods. type ConnectionGater interface { - InterceptPeerDial(p peer.ID) (allow bool) + connmgr.ConnectionGater - InterceptAddrDial(peer.ID, multiaddr.Multiaddr) (allow bool) - - InterceptAccept(network.ConnMultiaddrs) (allow bool) - - InterceptSecured(network.Direction, peer.ID, network.ConnMultiaddrs) (allow bool) - - InterceptUpgraded(network.Conn) (allow bool, reason control.DisconnectReason) + // SetDisallowListOracle sets the disallow list oracle for the connection gater. + // If one is set, the oracle is consulted upon every incoming or outgoing connection attempt, and + // the connection is only allowed if the remote peer is not on the disallow list. + // In Flow blockchain, it is not optional to dismiss the disallow list oracle, and if one is not set + // the connection gater will panic. + // Also, it follows a dependency injection pattern and does not allow to set the disallow list oracle more than once, + // any subsequent calls to this method will result in a panic. + // Args: + // oracle: the disallow list oracle to set. + // Returns: + // none + // Panics: + // if the disallow list oracle is already set. + SetDisallowListOracle(oracle DisallowListOracle) } diff --git a/network/p2p/connector.go b/network/p2p/connector.go index 2bbf9f24dea..f9c5897352c 100644 --- a/network/p2p/connector.go +++ b/network/p2p/connector.go @@ -3,12 +3,14 @@ package p2p import ( "context" + "github.com/libp2p/go-libp2p/core/host" "github.com/libp2p/go-libp2p/core/network" "github.com/libp2p/go-libp2p/core/peer" ) -// Connector connects to peer and disconnects from peer using the underlying networking library -type Connector interface { +// PeerUpdater connects to the given peer.IDs. It also disconnects from any other peers with which it may have +// previously established connection. +type PeerUpdater interface { // UpdatePeers connects to the given peer.IDs. It also disconnects from any other peers with which it may have // previously established connection. // UpdatePeers implementation should be idempotent such that multiple calls to connect to the same peer should not @@ -16,6 +18,22 @@ type Connector interface { UpdatePeers(ctx context.Context, peerIDs peer.IDSlice) } +// Connector is an interface that allows connecting to a peer.ID. +type Connector interface { + // Connect connects to the given peer.ID. + // Note that connection may be established asynchronously. Any error encountered while connecting to the peer.ID + // is benign and should not be returned. Also, Connect implementation should not cause any blocking or crash. + // Args: + // ctx: context.Context to be used for the connection + // peerChan: channel to which the peer.AddrInfo of the connected peer.ID is sent. + // Returns: + // none. + Connect(ctx context.Context, peerChan <-chan peer.AddrInfo) +} + +// ConnectorFactory is a factory function to create a new Connector. +type ConnectorFactory func(host host.Host) (Connector, error) + type PeerFilter func(peer.ID) error // AllowAllPeerFilter returns a peer filter that does not do any filtering. @@ -31,26 +49,33 @@ type ConnectorHost interface { // Connections returns all the connections of the underlying host. Connections() []network.Conn + // IsConnectedTo returns true if the given peer.ID is connected to the underlying host. + // Args: + // peerID: peer.ID for which the connection status is requested + // Returns: + // true if the given peer.ID is connected to the underlying host. + IsConnectedTo(peerId peer.ID) bool + // PeerInfo returns the peer.AddrInfo for the given peer.ID. // Args: // id: peer.ID for which the peer.AddrInfo is requested // Returns: // peer.AddrInfo for the given peer.ID - PeerInfo(id peer.ID) peer.AddrInfo + PeerInfo(peerId peer.ID) peer.AddrInfo // IsProtected returns true if the given peer.ID is protected from pruning. // Args: // id: peer.ID for which the protection status is requested // Returns: // true if the given peer.ID is protected from pruning - IsProtected(id peer.ID) bool + IsProtected(peerId peer.ID) bool // ClosePeer closes the connection to the given peer.ID. // Args: // id: peer.ID for which the connection is to be closed // Returns: // error if there is any error while closing the connection to the given peer.ID. All errors are benign. - ClosePeer(id peer.ID) error + ClosePeer(peerId peer.ID) error // ID returns the peer.ID of the underlying host. // Returns: diff --git a/network/p2p/consumer.go b/network/p2p/consumer.go deleted file mode 100644 index 099c735aca3..00000000000 --- a/network/p2p/consumer.go +++ /dev/null @@ -1,131 +0,0 @@ -package p2p - -import ( - pubsub "github.com/libp2p/go-libp2p-pubsub" - "github.com/libp2p/go-libp2p/core/peer" - - "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/module/component" -) - -// DisallowListConsumer consumes notifications from the cache.NodeBlocklistWrapper whenever the block list is updated. -// Implementations must: -// - be concurrency safe -// - be non-blocking -type DisallowListConsumer interface { - // OnNodeDisallowListUpdate notifications whenever the node block list is updated. - // Prerequisites: - // Implementation must be concurrency safe; Non-blocking; - // and must handle repetition of the same events (with some processing overhead). - OnNodeDisallowListUpdate(list flow.IdentifierList) -} - -// ControlMessageType is the type of control message, as defined in the libp2p pubsub spec. -type ControlMessageType string - -const ( - CtrlMsgIHave ControlMessageType = "IHAVE" - CtrlMsgIWant ControlMessageType = "IWANT" - CtrlMsgGraft ControlMessageType = "GRAFT" - CtrlMsgPrune ControlMessageType = "PRUNE" -) - -func (c ControlMessageType) String() string { - return string(c) -} - -// ControlMessageTypes returns list of all libp2p control message types. -func ControlMessageTypes() []ControlMessageType { - return []ControlMessageType{CtrlMsgIHave, CtrlMsgIWant, CtrlMsgGraft, CtrlMsgPrune} -} - -// DisallowListUpdateNotification is the event that is submitted to the distributor when the disallow list is updated. -type DisallowListUpdateNotification struct { - DisallowList flow.IdentifierList -} - -type DisallowListNotificationConsumer interface { - // OnDisallowListNotification is called when a new disallow list update notification is distributed. - // Any error on consuming event must handle internally. - // The implementation must be concurrency safe, but can be blocking. - OnDisallowListNotification(*DisallowListUpdateNotification) -} - -type DisallowListNotificationDistributor interface { - component.Component - // DistributeBlockListNotification distributes the event to all the consumers. - // Any error returned by the distributor is non-recoverable and will cause the node to crash. - // Implementation must be concurrency safe, and non-blocking. - DistributeBlockListNotification(list flow.IdentifierList) error - - // AddConsumer adds a consumer to the distributor. The consumer will be called the distributor distributes a new event. - // AddConsumer must be concurrency safe. Once a consumer is added, it must be called for all future events. - // There is no guarantee that the consumer will be called for events that were already received by the distributor. - AddConsumer(DisallowListNotificationConsumer) -} - -// GossipSubInspectorNotifDistributor is the interface for the distributor that distributes gossip sub inspector notifications. -// It is used to distribute notifications to the consumers in an asynchronous manner and non-blocking manner. -// The implementation should guarantee that all registered consumers are called upon distribution of a new event. -type GossipSubInspectorNotifDistributor interface { - component.Component - // DistributeInvalidControlMessageNotification distributes the event to all the consumers. - // Any error returned by the distributor is non-recoverable and will cause the node to crash. - // Implementation must be concurrency safe, and non-blocking. - Distribute(notification *InvCtrlMsgNotif) error - - // AddConsumer adds a consumer to the distributor. The consumer will be called the distributor distributes a new event. - // AddConsumer must be concurrency safe. Once a consumer is added, it must be called for all future events. - // There is no guarantee that the consumer will be called for events that were already received by the distributor. - AddConsumer(GossipSubInvCtrlMsgNotifConsumer) -} - -// InvCtrlMsgNotif is the notification sent to the consumer when an invalid control message is received. -// It models the information that is available to the consumer about a misbehaving peer. -type InvCtrlMsgNotif struct { - // PeerID is the ID of the peer that sent the invalid control message. - PeerID peer.ID - // MsgType is the type of control message that was received. - MsgType ControlMessageType - // Count is the number of invalid control messages received from the peer that is reported in this notification. - Count uint64 - // Err any error associated with the invalid control message. - Err error -} - -// NewInvalidControlMessageNotification returns a new *InvCtrlMsgNotif -func NewInvalidControlMessageNotification(peerID peer.ID, msgType ControlMessageType, count uint64, err error) *InvCtrlMsgNotif { - return &InvCtrlMsgNotif{ - PeerID: peerID, - MsgType: msgType, - Count: count, - Err: err, - } -} - -// GossipSubInvCtrlMsgNotifConsumer is the interface for the consumer that consumes gossip sub inspector notifications. -// It is used to consume notifications in an asynchronous manner. -// The implementation must be concurrency safe, but can be blocking. This is due to the fact that the consumer is called -// asynchronously by the distributor. -type GossipSubInvCtrlMsgNotifConsumer interface { - // OnInvalidControlMessageNotification is called when a new invalid control message notification is distributed. - // Any error on consuming event must handle internally. - // The implementation must be concurrency safe, but can be blocking. - OnInvalidControlMessageNotification(*InvCtrlMsgNotif) -} - -// GossipSubInspectorSuite is the interface for the GossipSub inspector suite. -// It encapsulates the rpc inspectors and the notification distributors. -type GossipSubInspectorSuite interface { - component.Component - // InspectFunc returns the inspect function that is used to inspect the gossipsub rpc messages. - // This function follows a dependency injection pattern, where the inspect function is injected into the gossipsu, and - // is called whenever a gossipsub rpc message is received. - InspectFunc() func(peer.ID, *pubsub.RPC) error - - // AddInvalidCtrlMsgNotificationConsumer adds a consumer to the invalid control message notification distributor. - // This consumer is notified when a misbehaving peer regarding gossipsub control messages is detected. This follows a pub/sub - // pattern where the consumer is notified when a new notification is published. - // A consumer is only notified once for each notification, and only receives notifications that were published after it was added. - AddInvCtrlMsgNotifConsumer(GossipSubInvCtrlMsgNotifConsumer) -} diff --git a/network/p2p/consumers.go b/network/p2p/consumers.go new file mode 100644 index 00000000000..21e4bd4dfe3 --- /dev/null +++ b/network/p2p/consumers.go @@ -0,0 +1,78 @@ +package p2p + +import ( + "github.com/libp2p/go-libp2p/core/peer" + + p2pmsg "github.com/onflow/flow-go/network/p2p/message" +) + +// CtrlMsgTopicType represents the type of the topic within a control message. +type CtrlMsgTopicType uint64 + +const ( + // CtrlMsgNonClusterTopicType represents a non-cluster-prefixed topic. + CtrlMsgNonClusterTopicType CtrlMsgTopicType = iota + // CtrlMsgTopicTypeClusterPrefixed represents a cluster-prefixed topic. + CtrlMsgTopicTypeClusterPrefixed +) + +func (t CtrlMsgTopicType) String() string { + switch t { + case CtrlMsgNonClusterTopicType: + return "non-cluster-prefixed" + case CtrlMsgTopicTypeClusterPrefixed: + return "cluster-prefixed" + default: + return "unknown" + } +} + +// InvCtrlMsgNotif is the notification sent to the consumer when an invalid control message is received. +// It models the information that is available to the consumer about a misbehaving peer. +type InvCtrlMsgNotif struct { + // PeerID is the ID of the peer that sent the invalid control message. + PeerID peer.ID + // Error the error that occurred during validation. + Error error + // MsgType the control message type. + MsgType p2pmsg.ControlMessageType + // Count the number of errors. + Count uint64 + // TopicType reports whether the error occurred on a cluster-prefixed topic within the control message. + // Notifications must be explicitly marked as cluster-prefixed or not because the penalty applied to the GossipSub score + // for an error on a cluster-prefixed topic is more lenient than the penalty applied to a non-cluster-prefixed topic. + // This distinction ensures that nodes engaged in cluster-prefixed topic communication are not penalized too harshly, + // as such communication is vital to the progress of the chain. + TopicType CtrlMsgTopicType +} + +// NewInvalidControlMessageNotification returns a new *InvCtrlMsgNotif +// Args: +// - peerID: peer id of the offender. +// - ctlMsgType: the control message type of the rpc message that caused the error. +// - err: the error that occurred. +// - count: the number of occurrences of the error. +// +// Returns: +// - *InvCtlMsgNotif: invalid control message notification. +func NewInvalidControlMessageNotification(peerID peer.ID, ctlMsgType p2pmsg.ControlMessageType, err error, count uint64, topicType CtrlMsgTopicType) *InvCtrlMsgNotif { + return &InvCtrlMsgNotif{ + PeerID: peerID, + Error: err, + MsgType: ctlMsgType, + Count: count, + TopicType: topicType, + } +} + +// GossipSubInvCtrlMsgNotifConsumer is the interface for the consumer that consumes gossipsub inspector notifications. +// It is used to consume notifications in an asynchronous manner. +// The implementation must be concurrency safe, but can be blocking. This is due to the fact that the consumer is called +// asynchronously by the distributor. +type GossipSubInvCtrlMsgNotifConsumer interface { + // OnInvalidControlMessageNotification is called when a new invalid control message notification is distributed. + // Any error on consuming event must handle internally. + // The implementation must be concurrency safe and non-blocking. + // Note: there is no real-time guarantee on processing the notification. + OnInvalidControlMessageNotification(*InvCtrlMsgNotif) +} diff --git a/network/p2p/dht/dht.go b/network/p2p/dht/dht.go index 459a84ea21a..e4104397160 100644 --- a/network/p2p/dht/dht.go +++ b/network/p2p/dht/dht.go @@ -10,6 +10,7 @@ import ( "github.com/rs/zerolog" "github.com/onflow/flow-go/module" + p2plogging "github.com/onflow/flow-go/network/p2p/logging" ) // This produces a new IPFS DHT @@ -33,12 +34,12 @@ func NewDHT(ctx context.Context, host host.Host, prefix protocol.ID, logger zero peerAddedCb := routingTable.PeerAdded routingTable.PeerRemoved = func(pid peer.ID) { peerRemovedCb(pid) - dhtLogger.Debug().Str("peer_id", pid.String()).Msg("peer removed from routing table") + dhtLogger.Debug().Str("peer_id", p2plogging.PeerId(pid)).Msg("peer removed from routing table") metrics.RoutingTablePeerRemoved() } routingTable.PeerAdded = func(pid peer.ID) { peerAddedCb(pid) - dhtLogger.Debug().Str("peer_id", pid.String()).Msg("peer added to routing table") + dhtLogger.Debug().Str("peer_id", p2plogging.PeerId(pid)).Msg("peer added to routing table") metrics.RoutingTablePeerAdded() } diff --git a/network/p2p/dht/dht_test.go b/network/p2p/dht/dht_test.go index bc0cc970fd9..ef23ccb9901 100644 --- a/network/p2p/dht/dht_test.go +++ b/network/p2p/dht/dht_test.go @@ -11,8 +11,10 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "github.com/onflow/flow-go/model/flow" libp2pmsg "github.com/onflow/flow-go/model/libp2p/message" "github.com/onflow/flow-go/module/irrecoverable" + mockmodule "github.com/onflow/flow-go/module/mock" "github.com/onflow/flow-go/network/channels" "github.com/onflow/flow-go/network/message" "github.com/onflow/flow-go/network/p2p" @@ -35,17 +37,33 @@ func TestFindPeerWithDHT(t *testing.T) { golog.SetAllLoggers(golog.LevelFatal) // change this to Debug if libp2p logs are needed sporkId := unittest.IdentifierFixture() - dhtServerNodes, _ := p2ptest.NodesFixture(t, sporkId, "dht_test", 2, p2ptest.WithDHTOptions(dht.AsServer())) + idProvider := unittest.NewUpdatableIDProvider(flow.IdentityList{}) + dhtServerNodes, serverIDs := p2ptest.NodesFixture( + t, + sporkId, + "dht_test", + 2, + idProvider, + p2ptest.WithRole(flow.RoleExecution), + p2ptest.WithDHTOptions(dht.AsServer())) require.Len(t, dhtServerNodes, 2) - dhtClientNodes, _ := p2ptest.NodesFixture(t, sporkId, "dht_test", count-2, p2ptest.WithDHTOptions(dht.AsClient())) + dhtClientNodes, clientIDs := p2ptest.NodesFixture( + t, + sporkId, + "dht_test", + count-2, + idProvider, + p2ptest.WithRole(flow.RoleExecution), + p2ptest.WithDHTOptions(dht.AsClient())) nodes := append(dhtServerNodes, dhtClientNodes...) - p2ptest.StartNodes(t, signalerCtx, nodes, 100*time.Millisecond) - defer p2ptest.StopNodes(t, nodes, cancel, 100*time.Millisecond) + idProvider.SetIdentities(append(serverIDs, clientIDs...)) + p2ptest.StartNodes(t, signalerCtx, nodes) + defer p2ptest.StopNodes(t, nodes, cancel) getDhtServerAddr := func(i uint) peer.AddrInfo { - return peer.AddrInfo{ID: dhtServerNodes[i].Host().ID(), Addrs: dhtServerNodes[i].Host().Addrs()} + return peer.AddrInfo{ID: dhtServerNodes[i].ID(), Addrs: dhtServerNodes[i].Host().Addrs()} } // connect even numbered clients to the first DHT server, and odd number clients to the second @@ -55,37 +73,45 @@ func TestFindPeerWithDHT(t *testing.T) { } // wait for clients to connect to DHT servers and update their routing tables - require.Eventually(t, func() bool { - for i, clientNode := range dhtClientNodes { - if clientNode.RoutingTable().Find(getDhtServerAddr(uint(i%2)).ID) == "" { - return false + require.Eventually( + t, func() bool { + for i, clientNode := range dhtClientNodes { + if clientNode.RoutingTable().Find(getDhtServerAddr(uint(i%2)).ID) == "" { + return false + } } - } - return true - }, time.Second*5, ticksForAssertEventually, "nodes failed to connect") + return true + }, time.Second*5, ticksForAssertEventually, "nodes failed to connect") // connect the two DHT servers to each other err := dhtServerNodes[0].Host().Connect(ctx, getDhtServerAddr(1)) require.NoError(t, err) // wait for the first server to connect to the second and update its routing table - require.Eventually(t, func() bool { - return dhtServerNodes[0].RoutingTable().Find(getDhtServerAddr(1).ID) != "" - }, time.Second*5, ticksForAssertEventually, "dht servers failed to connect") + require.Eventually( + t, func() bool { + return dhtServerNodes[0].RoutingTable().Find(getDhtServerAddr(1).ID) != "" + }, time.Second*5, ticksForAssertEventually, "dht servers failed to connect") // check that all even numbered clients can create streams with all odd numbered clients for i := 0; i < len(dhtClientNodes); i += 2 { for j := 1; j < len(dhtClientNodes); j += 2 { // client i should not yet know the address of client j, but we clear any addresses // here just in case. - dhtClientNodes[i].Host().Peerstore().ClearAddrs(dhtClientNodes[j].Host().ID()) + dhtClientNodes[i].Host().Peerstore().ClearAddrs(dhtClientNodes[j].ID()) // Try to create a stream from client i to client j. This should resort to a DHT // lookup since client i does not know client j's address. - unittest.RequireReturnsBefore(t, func() { - _, err = dhtClientNodes[i].CreateStream(ctx, dhtClientNodes[j].Host().ID()) - require.NoError(t, err) - }, 1*time.Second, "could not create stream on time") + unittest.RequireReturnsBefore( + t, func() { + err = dhtClientNodes[i].OpenAndWriteOnStream( + ctx, dhtClientNodes[j].ID(), t.Name(), func(stream network.Stream) error { + // do nothing + require.NotNil(t, stream) + return nil + }) + require.NoError(t, err) + }, 1*time.Second, "could not create stream on time") } } } @@ -98,7 +124,6 @@ func TestPubSubWithDHTDiscovery(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) signalerCtx := irrecoverable.NewMockSignalerContext(t, ctx) - topic := channels.Topic("/flow/" + unittest.IdentifierFixture().String()) count := 5 golog.SetAllLoggers(golog.LevelFatal) // change this to Debug if libp2p logs are needed @@ -118,23 +143,44 @@ func TestPubSubWithDHTDiscovery(t *testing.T) { // N4 N5 N4-----N5 sporkId := unittest.IdentifierFixture() + topic := channels.TopicFromChannel(channels.TestNetworkChannel, sporkId) + idProvider := mockmodule.NewIdentityProvider(t) // create one node running the DHT Server (mimicking the staked AN) - dhtServerNodes, _ := p2ptest.NodesFixture(t, sporkId, "dht_test", 1, p2ptest.WithDHTOptions(dht.AsServer())) + dhtServerNodes, serverIDs := p2ptest.NodesFixture( + t, + sporkId, + "dht_test", + 1, + idProvider, + p2ptest.WithRole(flow.RoleExecution), + p2ptest.WithDHTOptions(dht.AsServer())) require.Len(t, dhtServerNodes, 1) dhtServerNode := dhtServerNodes[0] // crate other nodes running the DHT Client (mimicking the unstaked ANs) - dhtClientNodes, _ := p2ptest.NodesFixture(t, sporkId, "dht_test", count-1, p2ptest.WithDHTOptions(dht.AsClient())) - + dhtClientNodes, clientIDs := p2ptest.NodesFixture( + t, + sporkId, + "dht_test", + count-1, + idProvider, + p2ptest.WithRole(flow.RoleExecution), + p2ptest.WithDHTOptions(dht.AsClient())) + + ids := append(serverIDs, clientIDs...) nodes := append(dhtServerNodes, dhtClientNodes...) - p2ptest.StartNodes(t, signalerCtx, nodes, 100*time.Millisecond) - defer p2ptest.StopNodes(t, nodes, cancel, 100*time.Millisecond) + for i, node := range nodes { + idProvider.On("ByPeerID", node.ID()).Return(&ids[i], true).Maybe() + + } + p2ptest.StartNodes(t, signalerCtx, nodes) + defer p2ptest.StopNodes(t, nodes, cancel) // Step 2: Connect all nodes running a DHT client to the node running the DHT server // This has to be done before subscribing to any topic, otherwise the node gives up on advertising // its topics of interest and becomes undiscoverable by other nodes // (see: https://github.com/libp2p/go-libp2p-pubsub/issues/442) - dhtServerAddr := peer.AddrInfo{ID: dhtServerNode.Host().ID(), Addrs: dhtServerNode.Host().Addrs()} + dhtServerAddr := peer.AddrInfo{ID: dhtServerNode.ID(), Addrs: dhtServerNode.Host().Addrs()} for _, clientNode := range dhtClientNodes { err := clientNode.Host().Connect(ctx, dhtServerAddr) require.NoError(t, err) @@ -145,18 +191,15 @@ func TestPubSubWithDHTDiscovery(t *testing.T) { // hence expect count and not count - 1 messages to be received (one by each node, including the sender) ch := make(chan peer.ID, count) - codec := unittest.NetworkCodec() - - payload, _ := codec.Encode(&libp2pmsg.TestMessage{}) - msg := &message.Message{ - Payload: payload, - } - - data, err := msg.Marshal() + messageScope, err := message.NewOutgoingScope( + ids.NodeIDs(), + topic, + &libp2pmsg.TestMessage{}, + unittest.NetworkCodec().Encode, + message.ProtocolTypePubSub) require.NoError(t, err) logger := unittest.Logger() - topicValidator := flowpubsub.TopicValidator(logger, unittest.AllowAllPeerFilter()) for _, n := range nodes { s, err := n.Subscribe(topic, topicValidator) @@ -166,16 +209,16 @@ func TestPubSubWithDHTDiscovery(t *testing.T) { msg, err := s.Next(ctx) require.NoError(t, err) require.NotNil(t, msg) - assert.Equal(t, data, msg.Data) + assert.Equal(t, messageScope.Proto().Payload, msg.Data) ch <- nodeID - }(s, n.Host().ID()) + }(s, n.ID()) } // fullyConnectedGraph checks that each node is directly connected to all the other nodes fullyConnectedGraph := func() bool { for i := 0; i < len(nodes); i++ { for j := i + 1; j < len(nodes); j++ { - if nodes[i].Host().Network().Connectedness(nodes[j].Host().ID()) == network.NotConnected { + if nodes[i].Host().Network().Connectedness(nodes[j].ID()) == network.NotConnected { return false } } @@ -186,7 +229,7 @@ func TestPubSubWithDHTDiscovery(t *testing.T) { require.Eventually(t, fullyConnectedGraph, time.Second*5, ticksForAssertEventually, "nodes failed to discover each other") // Step 4: publish a message to the topic - require.NoError(t, dhtServerNode.Publish(ctx, topic, data)) + require.NoError(t, dhtServerNode.Publish(ctx, messageScope)) // Step 5: By now, all peers would have been discovered and the message should have been successfully published // A hash set to keep track of the nodes who received the message @@ -200,8 +243,8 @@ loop: case <-time.After(3 * time.Second): var missing peer.IDSlice for _, n := range nodes { - if _, found := recv[n.Host().ID()]; !found { - missing = append(missing, n.Host().ID()) + if _, found := recv[n.ID()]; !found { + missing = append(missing, n.ID()) } } assert.Failf(t, "messages not received by some nodes", "%+v", missing) @@ -211,6 +254,6 @@ loop: // Step 6: unsubscribes all nodes from the topic for _, n := range nodes { - assert.NoError(t, n.UnSubscribe(topic)) + assert.NoError(t, n.Unsubscribe(topic)) } } diff --git a/network/p2p/disallowListCache.go b/network/p2p/disallowListCache.go new file mode 100644 index 00000000000..b153084b6cf --- /dev/null +++ b/network/p2p/disallowListCache.go @@ -0,0 +1,51 @@ +package p2p + +import ( + "github.com/libp2p/go-libp2p/core/peer" + + "github.com/onflow/flow-go/module" + "github.com/onflow/flow-go/network" +) + +// DisallowListCache is an interface for a cache that keeps the list of disallow-listed peers. +// It is designed to present a centralized interface for keeping track of disallow-listed peers for different reasons. +type DisallowListCache interface { + // IsDisallowListed determines whether the given peer is disallow-listed for any reason. + // Args: + // - peerID: the peer to check. + // Returns: + // - []network.DisallowListedCause: the list of causes for which the given peer is disallow-listed. If the peer is not disallow-listed for any reason, + // a nil slice is returned. + // - bool: true if the peer is disallow-listed for any reason, false otherwise. + IsDisallowListed(peerID peer.ID) ([]network.DisallowListedCause, bool) + + // DisallowFor disallow-lists a peer for a cause. + // Args: + // - peerID: the peerID of the peer to be disallow-listed. + // - cause: the cause for disallow-listing the peer. + // Returns: + // - the list of causes for which the peer is disallow-listed. + // - error if the operation fails, error is irrecoverable. + DisallowFor(peerID peer.ID, cause network.DisallowListedCause) ([]network.DisallowListedCause, error) + + // AllowFor removes a cause from the disallow list cache entity for the peerID. + // Args: + // - peerID: the peerID of the peer to be allow-listed. + // - cause: the cause for allow-listing the peer. + // Returns: + // - the list of causes for which the peer is disallow-listed. If the peer is not disallow-listed for any reason, + // an empty list is returned. + AllowFor(peerID peer.ID, cause network.DisallowListedCause) []network.DisallowListedCause +} + +// DisallowListCacheConfig is the configuration for the disallow-list cache. +// The disallow-list cache is used to temporarily disallow-list peers. +type DisallowListCacheConfig struct { + // MaxSize is the maximum number of peers that can be disallow-listed at any given time. + // When the cache is full, no further new peers can be disallow-listed. + // Recommended size is 100 * number of staked nodes. + MaxSize uint32 + + // Metrics is the HeroCache metrics collector to be used for the disallow-list cache. + Metrics module.HeroCacheMetrics +} diff --git a/network/p2p/distributor/disallow_list.go b/network/p2p/distributor/disallow_list.go deleted file mode 100644 index 848baa925bb..00000000000 --- a/network/p2p/distributor/disallow_list.go +++ /dev/null @@ -1,114 +0,0 @@ -package distributor - -import ( - "sync" - - "github.com/rs/zerolog" - - "github.com/onflow/flow-go/engine" - "github.com/onflow/flow-go/engine/common/worker" - "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/module/component" - "github.com/onflow/flow-go/module/mempool/queue" - "github.com/onflow/flow-go/module/metrics" - "github.com/onflow/flow-go/network/p2p" -) - -const ( - // DefaultDisallowListNotificationQueueCacheSize is the default size of the disallow list notification queue. - DefaultDisallowListNotificationQueueCacheSize = 100 -) - -// DisallowListNotificationDistributor is a component that distributes disallow list updates to registered consumers in an -// asynchronous, fan-out manner. It is thread-safe and can be used concurrently from multiple goroutines. -type DisallowListNotificationDistributor struct { - component.Component - cm *component.ComponentManager - logger zerolog.Logger - - consumerLock sync.RWMutex // protects the consumer field from concurrent updates - consumers []p2p.DisallowListNotificationConsumer - workerPool *worker.Pool[*p2p.DisallowListUpdateNotification] -} - -var _ p2p.DisallowListNotificationDistributor = (*DisallowListNotificationDistributor)(nil) - -// DefaultDisallowListNotificationDistributor creates a new disallow list notification distributor with default configuration. -func DefaultDisallowListNotificationDistributor(logger zerolog.Logger, opts ...queue.HeroStoreConfigOption) *DisallowListNotificationDistributor { - cfg := &queue.HeroStoreConfig{ - SizeLimit: DefaultDisallowListNotificationQueueCacheSize, - Collector: metrics.NewNoopCollector(), - } - - for _, opt := range opts { - opt(cfg) - } - - store := queue.NewHeroStore(cfg.SizeLimit, logger, cfg.Collector) - return NewDisallowListConsumer(logger, store) -} - -// NewDisallowListConsumer creates a new disallow list notification distributor. -// It takes a message store as a parameter, which is used to store the events that are distributed to the consumers. -// The message store is used to ensure that DistributeBlockListNotification is non-blocking. -func NewDisallowListConsumer(logger zerolog.Logger, store engine.MessageStore) *DisallowListNotificationDistributor { - lg := logger.With().Str("component", "node_disallow_distributor").Logger() - - d := &DisallowListNotificationDistributor{ - logger: lg, - } - - pool := worker.NewWorkerPoolBuilder[*p2p.DisallowListUpdateNotification]( - lg, - store, - d.distribute).Build() - - d.workerPool = pool - - cm := component.NewComponentManagerBuilder() - cm.AddWorker(d.workerPool.WorkerLogic()) - - d.cm = cm.Build() - d.Component = d.cm - - return d -} - -// distribute is called by the workers to process the event. It calls the OnDisallowListNotification method on all registered -// consumers. -// It does not return an error because the event is already in the store, so it will be retried. -func (d *DisallowListNotificationDistributor) distribute(notification *p2p.DisallowListUpdateNotification) error { - d.consumerLock.RLock() - defer d.consumerLock.RUnlock() - - for _, consumer := range d.consumers { - consumer.OnDisallowListNotification(notification) - } - - return nil -} - -// AddConsumer adds a consumer to the distributor. The consumer will be called the distributor distributes a new event. -// AddConsumer must be concurrency safe. Once a consumer is added, it must be called for all future events. -// There is no guarantee that the consumer will be called for events that were already received by the distributor. -func (d *DisallowListNotificationDistributor) AddConsumer(consumer p2p.DisallowListNotificationConsumer) { - d.consumerLock.Lock() - defer d.consumerLock.Unlock() - - d.consumers = append(d.consumers, consumer) -} - -// DistributeBlockListNotification distributes the event to all the consumers. -// Implementation is non-blocking, it submits the event to the worker pool and returns immediately. -// The event will be distributed to the consumers in the order it was submitted but asynchronously. -// If the worker pool is full, the event will be dropped and a warning will be logged. -// This implementation returns no error. -func (d *DisallowListNotificationDistributor) DistributeBlockListNotification(disallowList flow.IdentifierList) error { - ok := d.workerPool.Submit(&p2p.DisallowListUpdateNotification{DisallowList: disallowList}) - if !ok { - // we use a queue to buffer the events, so this may happen if the queue is full or the event is duplicate. In this case, we log a warning. - d.logger.Warn().Msg("node disallow list update notification queue is full or the event is duplicate, dropping event") - } - - return nil -} diff --git a/network/p2p/distributor/disallow_list_test.go b/network/p2p/distributor/disallow_list_test.go deleted file mode 100644 index 39cf9532f46..00000000000 --- a/network/p2p/distributor/disallow_list_test.go +++ /dev/null @@ -1,100 +0,0 @@ -package distributor_test - -import ( - "context" - "math/rand" - "sync" - "testing" - "time" - - "github.com/stretchr/testify/mock" - "github.com/stretchr/testify/require" - - "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/module/irrecoverable" - "github.com/onflow/flow-go/network/p2p" - "github.com/onflow/flow-go/network/p2p/distributor" - mockp2p "github.com/onflow/flow-go/network/p2p/mock" - "github.com/onflow/flow-go/utils/unittest" -) - -// TestDisallowListNotificationDistributor tests the disallow list notification distributor by adding two consumers to the -// notification distributor component and sending a random set of notifications to the notification component. The test -// verifies that the consumers receive the notifications and that each consumer sees each notification only once. -func TestDisallowListNotificationDistributor(t *testing.T) { - d := distributor.DefaultDisallowListNotificationDistributor(unittest.Logger()) - - c1 := mockp2p.NewDisallowListNotificationConsumer(t) - c2 := mockp2p.NewDisallowListNotificationConsumer(t) - - d.AddConsumer(c1) - d.AddConsumer(c2) - - tt := disallowListUpdateNotificationsFixture(50) - - c1Done := sync.WaitGroup{} - c1Done.Add(len(tt)) - c1Seen := unittest.NewProtectedMap[flow.Identifier, struct{}]() - c1.On("OnDisallowListNotification", mock.Anything).Run(func(args mock.Arguments) { - n, ok := args.Get(0).(*p2p.DisallowListUpdateNotification) - require.True(t, ok) - - require.Contains(t, tt, n) - - // ensure consumer see each peer once - hash := flow.MerkleRoot(n.DisallowList...) - require.False(t, c1Seen.Has(hash)) - c1Seen.Add(hash, struct{}{}) - - c1Done.Done() - }).Return() - - c2Done := sync.WaitGroup{} - c2Done.Add(len(tt)) - c2Seen := unittest.NewProtectedMap[flow.Identifier, struct{}]() - c2.On("OnDisallowListNotification", mock.Anything, mock.Anything, mock.Anything).Run(func(args mock.Arguments) { - n, ok := args.Get(0).(*p2p.DisallowListUpdateNotification) - require.True(t, ok) - - require.Contains(t, tt, n) - - // ensure consumer see each peer once - hash := flow.MerkleRoot(n.DisallowList...) - require.False(t, c2Seen.Has(hash)) - c2Seen.Add(hash, struct{}{}) - - c2Done.Done() - }).Return() - - cancelCtx, cancel := context.WithCancel(context.Background()) - defer cancel() - ctx, _ := irrecoverable.WithSignaler(cancelCtx) - d.Start(ctx) - - unittest.RequireCloseBefore(t, d.Ready(), 100*time.Millisecond, "could not start distributor") - - for i := 0; i < len(tt); i++ { - go func(i int) { - require.NoError(t, d.DistributeBlockListNotification(tt[i].DisallowList)) - }(i) - } - - unittest.RequireReturnsBefore(t, c1Done.Wait, 1*time.Second, "events are not received by consumer 1") - unittest.RequireReturnsBefore(t, c2Done.Wait, 1*time.Second, "events are not received by consumer 2") - cancel() - unittest.RequireCloseBefore(t, d.Done(), 100*time.Millisecond, "could not stop distributor") -} - -func disallowListUpdateNotificationsFixture(n int) []*p2p.DisallowListUpdateNotification { - tt := make([]*p2p.DisallowListUpdateNotification, n) - for i := 0; i < n; i++ { - tt[i] = disallowListUpdateNotificationFixture() - } - return tt -} - -func disallowListUpdateNotificationFixture() *p2p.DisallowListUpdateNotification { - return &p2p.DisallowListUpdateNotification{ - DisallowList: unittest.IdentifierListFixture(rand.Int()%100 + 1), - } -} diff --git a/network/p2p/distributor/gossipsub_inspector.go b/network/p2p/distributor/gossipsub_inspector.go deleted file mode 100644 index 9c9eec28c61..00000000000 --- a/network/p2p/distributor/gossipsub_inspector.go +++ /dev/null @@ -1,114 +0,0 @@ -package distributor - -import ( - "sync" - - "github.com/rs/zerolog" - - "github.com/onflow/flow-go/engine" - "github.com/onflow/flow-go/engine/common/worker" - "github.com/onflow/flow-go/module/component" - "github.com/onflow/flow-go/module/mempool/queue" - "github.com/onflow/flow-go/module/metrics" - "github.com/onflow/flow-go/network/p2p" -) - -const ( - // DefaultGossipSubInspectorNotificationQueueCacheSize is the default cache size for the gossipsub rpc inspector notification queue. - DefaultGossipSubInspectorNotificationQueueCacheSize = 10_000 - // defaultGossipSubInspectorNotificationQueueWorkerCount is the default number of workers that will process the gossipsub rpc inspector notifications. - defaultGossipSubInspectorNotificationQueueWorkerCount = 1 -) - -var _ p2p.GossipSubInspectorNotifDistributor = (*GossipSubInspectorNotifDistributor)(nil) - -// GossipSubInspectorNotifDistributor is a component that distributes gossipsub rpc inspector notifications to -// registered consumers in a non-blocking manner and asynchronously. It is thread-safe and can be used concurrently from -// multiple goroutines. The distribution is done by a worker pool. The worker pool is configured with a queue that has a -// fixed size. If the queue is full, the notification is discarded. The queue size and the number of workers can be -// configured. -type GossipSubInspectorNotifDistributor struct { - component.Component - cm *component.ComponentManager - logger zerolog.Logger - - workerPool *worker.Pool[*p2p.InvCtrlMsgNotif] - consumerLock sync.RWMutex // protects the consumer field from concurrent updates - consumers []p2p.GossipSubInvCtrlMsgNotifConsumer -} - -// DefaultGossipSubInspectorNotificationDistributor returns a new GossipSubInspectorNotifDistributor component with the default configuration. -func DefaultGossipSubInspectorNotificationDistributor(logger zerolog.Logger, opts ...queue.HeroStoreConfigOption) *GossipSubInspectorNotifDistributor { - cfg := &queue.HeroStoreConfig{ - SizeLimit: DefaultGossipSubInspectorNotificationQueueCacheSize, - Collector: metrics.NewNoopCollector(), - } - - for _, opt := range opts { - opt(cfg) - } - - store := queue.NewHeroStore(cfg.SizeLimit, logger, cfg.Collector) - return NewGossipSubInspectorNotificationDistributor(logger, store) -} - -// NewGossipSubInspectorNotificationDistributor returns a new GossipSubInspectorNotifDistributor component. -// It takes a message store to store the notifications in memory and process them asynchronously. -func NewGossipSubInspectorNotificationDistributor(log zerolog.Logger, store engine.MessageStore) *GossipSubInspectorNotifDistributor { - lg := log.With().Str("component", "gossipsub_rpc_inspector_distributor").Logger() - - d := &GossipSubInspectorNotifDistributor{ - logger: lg, - } - - pool := worker.NewWorkerPoolBuilder[*p2p.InvCtrlMsgNotif](lg, store, d.distribute).Build() - d.workerPool = pool - - cm := component.NewComponentManagerBuilder() - - for i := 0; i < defaultGossipSubInspectorNotificationQueueWorkerCount; i++ { - cm.AddWorker(pool.WorkerLogic()) - } - - d.cm = cm.Build() - d.Component = d.cm - - return d -} - -// Distribute distributes the gossipsub rpc inspector notification to all registered consumers. -// The distribution is done asynchronously and non-blocking. The notification is added to a queue and processed by a worker pool. -// DistributeEvent in this implementation does not return an error, but it logs a warning if the queue is full. -func (g *GossipSubInspectorNotifDistributor) Distribute(notification *p2p.InvCtrlMsgNotif) error { - if ok := g.workerPool.Submit(notification); !ok { - // we use a queue with a fixed size, so this can happen when queue is full or when the notification is duplicate. - g.logger.Warn().Msg("gossipsub rpc inspector notification queue is full or notification is duplicate, discarding notification") - } - return nil -} - -// AddConsumer adds a consumer to the distributor. The consumer will be called when distributor distributes a new event. -// AddConsumer must be concurrency safe. Once a consumer is added, it must be called for all future events. -// There is no guarantee that the consumer will be called for events that were already received by the distributor. -func (g *GossipSubInspectorNotifDistributor) AddConsumer(consumer p2p.GossipSubInvCtrlMsgNotifConsumer) { - g.consumerLock.Lock() - defer g.consumerLock.Unlock() - - g.consumers = append(g.consumers, consumer) -} - -// distribute calls the ConsumeEvent method of all registered consumers. It is called by the workers of the worker pool. -// It is concurrency safe and can be called concurrently by multiple workers. However, the consumers may be blocking -// on the ConsumeEvent method. -func (g *GossipSubInspectorNotifDistributor) distribute(notification *p2p.InvCtrlMsgNotif) error { - g.consumerLock.RLock() - defer g.consumerLock.RUnlock() - - g.logger.Trace().Msg("distributing gossipsub rpc inspector notification") - for _, consumer := range g.consumers { - consumer.OnInvalidControlMessageNotification(notification) - } - g.logger.Trace().Msg("gossipsub rpc inspector notification distributed") - - return nil -} diff --git a/network/p2p/distributor/gossipsub_inspector_test.go b/network/p2p/distributor/gossipsub_inspector_test.go deleted file mode 100644 index e5e94af36ce..00000000000 --- a/network/p2p/distributor/gossipsub_inspector_test.go +++ /dev/null @@ -1,100 +0,0 @@ -package distributor_test - -import ( - "context" - "math/rand" - "sync" - "testing" - "time" - - "github.com/libp2p/go-libp2p/core/peer" - "github.com/stretchr/testify/mock" - "github.com/stretchr/testify/require" - - "github.com/onflow/flow-go/module/irrecoverable" - "github.com/onflow/flow-go/network/p2p" - "github.com/onflow/flow-go/network/p2p/distributor" - mockp2p "github.com/onflow/flow-go/network/p2p/mock" - p2ptest "github.com/onflow/flow-go/network/p2p/test" - "github.com/onflow/flow-go/utils/unittest" -) - -// TestGossipSubInspectorNotification tests the GossipSub inspector notification by adding two consumers to the -// notification distributor component and sending a random set of notifications to the notification component. The test -// verifies that the consumers receive the notifications. -func TestGossipSubInspectorNotification(t *testing.T) { - g := distributor.DefaultGossipSubInspectorNotificationDistributor(unittest.Logger()) - - c1 := mockp2p.NewGossipSubInvalidControlMessageNotificationConsumer(t) - c2 := mockp2p.NewGossipSubInvalidControlMessageNotificationConsumer(t) - - g.AddConsumer(c1) - g.AddConsumer(c2) - - tt := invalidControlMessageNotificationListFixture(t, 100) - - c1Done := sync.WaitGroup{} - c1Done.Add(len(tt)) - c1Seen := unittest.NewProtectedMap[peer.ID, struct{}]() - c1.On("OnInvalidControlMessageNotification", mock.Anything).Run(func(args mock.Arguments) { - notification, ok := args.Get(0).(*p2p.InvCtrlMsgNotif) - require.True(t, ok) - - require.Contains(t, tt, notification) - - // ensure consumer see each peer once - require.False(t, c1Seen.Has(notification.PeerID)) - c1Seen.Add(notification.PeerID, struct{}{}) - - c1Done.Done() - }).Return() - - c2Done := sync.WaitGroup{} - c2Done.Add(len(tt)) - c2Seen := unittest.NewProtectedMap[peer.ID, struct{}]() - c2.On("OnInvalidControlMessageNotification", mock.Anything).Run(func(args mock.Arguments) { - notification, ok := args.Get(0).(*p2p.InvCtrlMsgNotif) - require.True(t, ok) - - require.Contains(t, tt, notification) - // ensure consumer see each peer once - require.False(t, c2Seen.Has(notification.PeerID)) - c2Seen.Add(notification.PeerID, struct{}{}) - - c2Done.Done() - }).Return() - - cancelCtx, cancel := context.WithCancel(context.Background()) - defer cancel() - ctx, _ := irrecoverable.WithSignaler(cancelCtx) - g.Start(ctx) - - unittest.RequireCloseBefore(t, g.Ready(), 100*time.Millisecond, "could not start distributor") - - for i := 0; i < len(tt); i++ { - go func(i int) { - require.NoError(t, g.Distribute(tt[i])) - }(i) - } - - unittest.RequireReturnsBefore(t, c1Done.Wait, 1*time.Second, "events are not received by consumer 1") - unittest.RequireReturnsBefore(t, c2Done.Wait, 1*time.Second, "events are not received by consumer 2") - cancel() - unittest.RequireCloseBefore(t, g.Done(), 100*time.Millisecond, "could not stop distributor") -} - -func invalidControlMessageNotificationListFixture(t *testing.T, n int) []*p2p.InvCtrlMsgNotif { - list := make([]*p2p.InvCtrlMsgNotif, n) - for i := 0; i < n; i++ { - list[i] = invalidControlMessageNotificationFixture(t) - } - return list -} - -func invalidControlMessageNotificationFixture(t *testing.T) *p2p.InvCtrlMsgNotif { - return &p2p.InvCtrlMsgNotif{ - PeerID: p2ptest.PeerIdFixture(t), - MsgType: []p2p.ControlMessageType{p2p.CtrlMsgGraft, p2p.CtrlMsgPrune, p2p.CtrlMsgIHave, p2p.CtrlMsgIWant}[rand.Intn(4)], - Count: rand.Uint64(), - } -} diff --git a/network/p2p/dns/resolver_test.go b/network/p2p/dns/resolver_test.go index 3991887dd14..a69089a6bc2 100644 --- a/network/p2p/dns/resolver_test.go +++ b/network/p2p/dns/resolver_test.go @@ -14,7 +14,7 @@ import ( "github.com/onflow/flow-go/module/irrecoverable" "github.com/onflow/flow-go/module/mempool/herocache" "github.com/onflow/flow-go/module/metrics" - "github.com/onflow/flow-go/network/mocknetwork" + mocknetwork "github.com/onflow/flow-go/network/mock" "github.com/onflow/flow-go/utils/unittest" testnetwork "github.com/onflow/flow-go/utils/unittest/network" ) @@ -40,7 +40,7 @@ func TestResolver_HappyPath(t *testing.T) { cancelCtx, cancel := context.WithCancel(context.Background()) defer cancel() - ctx, _ := irrecoverable.WithSignaler(cancelCtx) + ctx := irrecoverable.NewMockSignalerContext(t, cancelCtx) resolver.Start(ctx) unittest.RequireCloseBefore(t, resolver.Ready(), 100*time.Millisecond, "could not start dns resolver on time") @@ -79,7 +79,7 @@ func TestResolver_CacheExpiry(t *testing.T) { cancelCtx, cancel := context.WithCancel(context.Background()) defer cancel() - ctx, _ := irrecoverable.WithSignaler(cancelCtx) + ctx := irrecoverable.NewMockSignalerContext(t, cancelCtx) resolver.Start(ctx) unittest.RequireCloseBefore(t, resolver.Ready(), 100*time.Millisecond, "could not start dns resolver on time") @@ -125,7 +125,7 @@ func TestResolver_Error(t *testing.T) { cancelCtx, cancel := context.WithCancel(context.Background()) defer cancel() - ctx, _ := irrecoverable.WithSignaler(cancelCtx) + ctx := irrecoverable.NewMockSignalerContext(t, cancelCtx) resolver.Start(ctx) unittest.RequireCloseBefore(t, resolver.Ready(), 100*time.Millisecond, "could not start dns resolver on time") @@ -171,7 +171,7 @@ func TestResolver_Expired_Invalidated(t *testing.T) { cancelCtx, cancel := context.WithCancel(context.Background()) defer cancel() - ctx, _ := irrecoverable.WithSignaler(cancelCtx) + ctx := irrecoverable.NewMockSignalerContext(t, cancelCtx) resolver.Start(ctx) unittest.RequireCloseBefore(t, resolver.Ready(), 100*time.Millisecond, "could not start dns resolver on time") diff --git a/network/p2p/inspector/README.MD b/network/p2p/inspector/README.MD new file mode 100644 index 00000000000..df09b36d3e8 --- /dev/null +++ b/network/p2p/inspector/README.MD @@ -0,0 +1,208 @@ +# Control Message Validation Inspector Overview + +## Component Overview +The Control Message Validation Inspector (`ControlMsgValidationInspector`) is an injectable component responsible for asynchronous inspection of incoming GossipSub RPC. +It is entirely developed and maintained at Flow blockchain codebase and is injected into the GossipSub protocol of libp2p at the startup of the node. +All incoming RPC messages are passed through this inspection to ensure their validity and compliance with the Flow protocol semantics. + +The inspector performs two primary functions: +1. **RPC truncation (blocking)**: It truncates size of incoming RPC messages to prevent excessive resource consumption, if needed. This is done by sampling the messages and reducing their size to a configurable threshold. +2. **RPC inspection (aka validation) (non-blocking)**: It inspects (aka validates) the truncated or original RPC messages for compliance with the Flow protocol semantics. This includes validation of message structure, topic, sender, and other relevant attributes. + +Figure below shows the high-level overview of the Control Message Validation Inspector and its interaction with the GossipSub protocol and the Flow node. +The blue box represents the GossipSub protocol, which is responsible for handling the pub-sub messaging system and is an external dependency of the Flow node. +The green boxes represent various components of the Flow node's networking layer that are involved in the inspection and processing of incoming RPC messages. +The steps that are marked with an asterisk (*) are performed concurrently, while the rest are performed sequentially. +As shown in this figure, an incoming RPC message is passed by GossipSub to the Control Message Validation Inspector, which then performs the blocking truncation process and queues the RPC for asynchronous non-blocking inspection processes. +As soon as the RPC is queued for inspection, it is also passed to the GossipSub protocol for further processing. The results of the inspection are used for internal metrics, logging, and feedback to the GossipSub scoring system. +Once the GossipSub processes the RPC it passes the message to the libp2p node component of the networking layer of the Flow node, which then processes the message and sends it to the rest of the Flow node for further processing. +Note that the validation process is non-blocking, hence even a malformed RPC is allowed to proceed through the GossipSub protocol to the Flow node. +However, based on the result of the asynchronous inspection, the message may be scored negatively, and the sender may be penalized in the peer scoring system. +The rationale behind this is that post truncation, as far as the RPC size is within the configured limits, a single (or few) non-compliant RPCs do not drastically affect the system's health, hence, the RPCs are allowed to proceed for further processing. +What matters is the persistent behavior of the sender, and the sender's reputation and future message propagation are _eventually_ affected based on the inspection results. +![rpc-inspection-process.png](rpc-inspection-process.png) +## What is an RPC? +RPC stands for Remote Procedure Call. In the context of GossipSub, it is a message that is sent from one peer to another peer over the GossipSub protocol. +The message is sent in the form of a protobuf message and is used to communicate information about the state of the network, such as topic membership, message propagation, and other relevant information. +It encapsulates various types of messages and commands that peers exchange to implement the GossipSub protocol, a pub-sub (publish-subscribe) messaging system. +Remember that the purpose of GossipSub is to efficiently disseminate messages to interested subscribers in the network without requiring a central broker or server. +Here is what an RPC message looks like in the context of GossipSub: +```go +type RPC struct { + Subscriptions []*RPC_SubOpts `protobuf:"bytes,1,rep,name=subscriptions" json:"subscriptions,omitempty"` + Publish []*Message `protobuf:"bytes,2,rep,name=publish" json:"publish,omitempty"` + Control *ControlMessage `protobuf:"bytes,3,opt,name=control" json:"control,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} +``` + +Here's a breakdown of the components within the GossipSub's `RPC` struct: +1. **Subscriptions (`[]*RPC_SubOpts`)**: This field contains a list of subscription options (`RPC_SubOpts`). + Each `RPC_SubOpts` represents a peer's intent to subscribe or unsubscribe from a topic. + This allows peers to dynamically adjust their interest in various topics and manage their subscription list. +2. **Publish (`[]*Message`)**: The `Publish` field contains a list of messages that the peer wishes to publish (or gossip) to the network. + Each `Message` is intended for a specific topic, and peers subscribing to that topic should receive the message. + This field is essential for the dissemination of information and data across the network. +3. **Control (`*ControlMessage`)** + The `Control` field holds a control message, which contains various types of control information required for the operation of the GossipSub protocol. + This can include information about grafting (joining a mesh for a topic), pruning (leaving a mesh), + and other control signals related to the maintenance and optimization of the pub-sub network. + The control messages play a crucial role in the mesh overlay maintenance, ensuring efficient and reliable message propagation. +4. **XXX Fields** These fields (`XXX_NoUnkeyedLiteral`, `XXX_unrecognized`, and `XXX_sizecache`) are generated by the protobuf compiler and are not directly used by the GossipSub protocol. + They are used internally by the protobuf library for various purposes like caching and ensuring correct marshalling and unmarshalling of the protobuf data. + +### Closer Look at the Control Message +In GossipSub, a Control Message is a part of the `RPC` structure and plays a crucial role in maintaining and optimizing the network. +It contains several fields, each corresponding to different types of control information. +The primary purpose of these control messages is to manage the mesh overlay that underpins the GossipSub protocol, +ensuring efficient and reliable message propagation. + +At the core, the control messages are used to maintain the mesh overlay for each topic, allowing peers to join and leave the mesh as their interests and network connectivity change. +The control messages include the following types: + +1. **IHAVE (`[]*ControlIHave`)**: the `IHAVE` messages are used to advertise to peers that the sender has certain messages. + This is part of the message propagation mechanism. + When a peer receives an `IHAVE` message and is interested in the advertised messages (because it doesn't have them yet), + it can request those messages from the sender using an `IWANT` message. + +2. **IWANT (`[]*ControlIWant`)**: the `IWANT` messages are requests sent to peers to ask for specific messages previously + advertised in an `IHAVE` message. + This mechanism ensures that messages propagate through the network, + reaching interested subscribers even if they are not directly connected to the message's original publisher. + +3. **GRAFT (`[]*ControlGraft`)**: The `GRAFT` messages are used to express the sender's intention to join the mesh for a specific topic. + In GossipSub, each peer maintains a local mesh network for each topic it is interested in. + Each local mesh is a subset of the peers in the network that are interested in the same topic. The complete mesh for a topic is formed by the union of all local meshes, which must be connected to ensure efficient message propagation + (the peer scoring ensures that the mesh is well-connected and that peers are not overloaded with messages) + Sending a `GRAFT` message is a way to join the local mesh of a peer, indicating that the sender wants to receive and forward messages for the specific topic. + +4. **PRUNE (`[]*ControlPrune`)**: conversely, `PRUNE` messages are sent when a peer wants to leave the local mesh for a specific topic. + This could be because the peer is no longer interested in the topic or is optimizing its network connections. + Upon receiving a `PRUNE` message, peers will remove the sender from their mesh for the specific topic. + +```go +type ControlMessage struct { + Ihave []*ControlIHave `protobuf:"bytes,1,rep,name=ihave" json:"ihave,omitempty"` + Iwant []*ControlIWant `protobuf:"bytes,2,rep,name=iwant" json:"iwant,omitempty"` + Graft []*ControlGraft `protobuf:"bytes,3,rep,name=graft" json:"graft,omitempty"` + Prune []*ControlPrune `protobuf:"bytes,4,rep,name=prune" json:"prune,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} +``` + +## Why is RPC Inspection Necessary? +In the context of the Flow blockchain, RPC inspection is necessary for the following reasons: +1. **Security**: The inspection process mitigates potential security risks such as spamming, message replay attacks, or malicious content dissemination, and provides complementing feedbacks for the internal GossipSub scoring system. + +2. **Resource Management**: By validating and potentially truncating incoming RPC messages, the system manages its computational and memory resources more effectively. + This prevents resource exhaustion attacks where an adversary might attempt to overwhelm the system by sending a large volume of non-compliant or oversized messages. + +3. **Metrics and Monitoring**: The inspection process provides valuable insights into the network's health and performance. + By monitoring the incoming RPC messages, the system can collect metrics and statistics about message propagation, topic membership, and other relevant network attributes. + +## RPC Truncation (Blocking) +The Control Message Validation Inspector is responsible for truncating the size of incoming RPC messages to prevent excessive resource consumption. This is done by sampling the messages and reducing their size to a configurable threshold. +The truncation process is entirely done in a blocking manner, i.e., it is performed at the entry point of the GossipSub through an injected interceptor, and the incoming RPC messages are modified before they are further processed by the GossipSub protocol. +The truncation process is applied to different components of the RPC message, specifically the control message types (`GRAFT`, `PRUNE`, `IHAVE`, `IWANT`) and their respective message IDs. +Truncation is triggered if the count of messages or message IDs exceeds certain configured thresholds, ensuring that the system resources are not overwhelmed. +When the number of messages or message IDs exceeds the threshold, a random sample of messages or message IDs is selected, and the rest are discarded. + +### Message vs Message ID Truncation +In the context of GossipSub RPC inspection, there is a subtle distinction between the count of messages and the count of message IDs: + +1. **Count of Messages:** + - This refers to the number of control messages (like `GRAFT`, `PRUNE`, `IHAVE`, `IWANT`) that are part of the `ControlMessage` structure within an RPC message, i.e., size of the `Graft`, `Prune`, `Ihave`, and `Iwant` slice fields. + - Each control message type serves a different purpose in the GossipSub protocol (e.g., `GRAFT` for joining a mesh for a topic, `PRUNE` for leaving a mesh). + - When we talk about the "count of messages," we're referring to how many individual control messages of each type are included in the RPC. + - Truncation based on the count of messages ensures that the number of control messages of each type doesn't exceed a configured threshold, preventing overwhelming the receiving peer with too many control instructions at once. + +2. **Count of Message IDs:** + - This refers to the number of unique identifiers for actual published messages that are being referenced within control messages like `IHAVE` and `IWANT`. + - `IHAVE` messages contain IDs of messages that the sender has and is announcing to peers. `IWANT` messages contain IDs of messages that the sender wants from peers. + - Each _individual_ `IHAVE` or `IWANT` control message can reference multiple message IDs. The "count of message IDs" is the total number of such IDs contained within each `IHAVE` or `IWANT` control message. + - Truncation based on the count of message IDs ensures that each `IHAVE` or `IWANT` control message doesn't reference an excessively large number of messages. This prevents a scenario where a peer might be asked to process an overwhelming number of message requests at once, which could lead to resource exhaustion. + +## RPC Validation (Non-Blocking) +The Control Message Validation Inspector is also responsible for inspecting the truncated or original RPC messages for compliance with the Flow protocol semantics. +The inspection process is done post truncation and is entirely non-blocking, i.e., it does not prevent the further processing of the RPC messages by the GossipSub protocol. +In other words, the RPC messages are passed through after truncation for further processing by the GossipSub protocol, regardless of whether they pass the inspection or not. +At the same time, each incoming RPC message is queued for asynchronous inspection, and the results of the inspection are used for internal metrics, logging, and feedback to the GossipSub scoring system. +This means that even a non-compliant RPC message is allowed to proceed through the GossipSub protocol to the Flow node. However, based on the result of the asynchronous inspection, +the message may be scored negatively, and the sender may be penalized in the peer scoring system. Hence, its future messages may be de-prioritized or ignored by the GossipSub protocol. +This follows the principle that post truncation, as far as the RPC size is within the configured limits, a single (or few) non-compliant RPCs do not drastically affect the system's health, +hence, the RPCs are allowed to proceed for further processing. However, the sender's reputation and future message propagation are affected based on the inspection results. + +The queued RPCs are picked by a pool of worker threads, and the inspection is performed in parallel to the GossipSub protocol's processing of the RPC messages. +Each RPC message is inspected for the following attributes sequentially, and once a non-compliance is detected, the inspection process is terminated with a failure result. A failure result +will cause an _invalid control message notification_ (`p2p.InvCtrlMsgNotif`) to be sent to the `GossipSubAppSpecificScoreRegistry`, which will then be used for penalizing the sender in the peer scoring system. +The `GossipSubAppSpecificScoreRegistry` is a Flow-level component that decides on part of the individual peer's scoring based on their Flow-specific behavior. +It directly provides feedback to the GossipSub protocol for scoring the peers. + +The [order of inspections for a single RPC](https://github.com/onflow/flow-go/blob/master/network/p2p/inspector/validation/control_message_validation_inspector.go#L270-L323) is as follows. Note that in the +descriptions below, when we say an RPC is flagged as invalid or the inspection process is terminated with a failure result, and an _invalid control message notification_ is sent to the `GossipSubAppSpecificScoreRegistry`, which +will then be used for penalizing the sender in the peer scoring system. +1. `GRAFT` messages validation: Each RPC contains one or more `GRAFT` messages. Each `GRAFT` message contains a topic ID indicating the mesh the peer wants to join. + The validation process involves iterating through each `GRAFT` message received in the (potentially truncated) RPC. + For each `GRAFT` message, the topic ID is validated to ensure it corresponds to a valid and recognized topic within the Flow-network. + Topic validation might involve checking if the topic is known, if it's within the scope of the peer's interests or subscriptions, and if it aligns with the network's current configuration (e.g., checking against the active spork ID). + If the topic is cluster-prefixed, additional validations ensure that the topic is part of the active cluster IDs. + If (even one) topic ID is invalid or unrecognized, the `GRAFT` message is flagged as invalid, and the inspection process is terminated with a failure result. + In future we may relax this condition to allow for a certain number of invalid topics, but for now, a single invalid topic results in a failure. + The inspection process also system keeps track of the topics seen in the `GRAFT` messages of the same RPC. + If a topic is repeated (i.e., if there are duplicate topics in the `GRAFT` messages of the same RPC), this is usually a sign of a protocol violation or misbehavior. + The validation process counts these duplicates and, if the number exceeds a certain threshold, it flags RPC message as invalid and terminates the inspection process with a failure result. + Note that all `GRAFT` messages on the same (potentially truncated) RPC are validated together, without any sampling, as the number of `GRAFT` messages is usually assumed small, and validating + them is not assumed to be resource-intensive. +2. `PRUNE` messages validation: Similar to `GRAFT`s, each RPC contains one or more `PRUNE` messages. Each `PRUNE` message contains a topic ID indicating the mesh the peer wants to leave. + The validation process involves iterating through each `PRUNE` message received in the (potentially truncated) RPC. + For each `PRUNE` message, the topic ID is validated to ensure it corresponds to a valid and recognized topic within the Flow-network. + Topic validation might involve checking if the topic is known, if it's within the scope of the peer's interests or subscriptions, and if it aligns with the network's current configuration (e.g., checking against the active spork ID). + If the topic is cluster-prefixed, additional validations ensure that the topic is part of the active cluster IDs. + If (even one) topic ID is invalid or unrecognized, the `PRUNE` message is flagged as invalid, and the inspection process is terminated with a failure result. + In future we may relax this condition to allow for a certain number of invalid topics, but for now, a single invalid topic results in a failure. + The inspection process also system keeps track of the topics seen in the `PRUNE` messages of the same RPC. + If a topic is repeated (i.e., if there are duplicate topics in the `PRUNE` messages of the same RPC), this is usually a sign of a protocol violation or misbehavior. + The validation process counts these duplicates and, if the number exceeds a certain threshold, it flags RPC message as invalid and terminates the inspection process with a failure result. + Note that all `PRUNE` messages on the same (potentially truncated) RPC are validated together, without any sampling, as the number of `PRUNE` messages is usually assumed small, and validating + them is not assumed to be resource-intensive. +3. `IWANT` messages validation: Each RPC contains one or more `IWANT` messages. Each `IWANT` message contains a list of message IDs that the sender wants from the receiver as the result of an `IHAVE` message. + The validation process involves iterating through each `IWANT` message received in the (potentially truncated) RPC. + For each `IWANT` message, the message IDs are validated to ensure they correspond to a valid message ID that recently advertised by the sender in an `IHAVE` message. + We define an `IWANT` cache miss as the event of an `IWANT` message ID does not correspond to a valid recently advertised `IHAVE` message ID. + When number of `IWANT` cache misses exceeds a certain threshold, the `IWANT` message is flagged as invalid, and the inspection process is terminated with a failure result. + The inspection process also system keeps track of the message IDs seen in the `IWANT` messages of the same RPC. + If a message ID is repeated (i.e., if there are duplicate message IDs in the `IWANT` messages of the same RPC), this is usually a sign of a protocol violation or misbehavior. + The validation process counts these duplicates and, if the number exceeds a certain threshold, it flags RPC message as invalid and terminates the inspection process with a failure result. + Note that all `IWANT` messages on the same (potentially truncated) RPC are validated together, without any sampling, as the number of `IWANT` messages is usually assumed small, and validating + them is not assumed to be resource-intensive. +4. `IHAVE` messages validation: Each RPC contains one or more `IHAVE` messages. Each `IHAVE` message contains a list of message IDs that the sender has and is advertising to the receiver. + The validation process involves iterating through each `IHAVE` message received in the (potentially truncated) RPC. + Each `IHAVE` message is composed of a topic ID as well as the list of message IDs advertised for that topic. + Each topic ID is validated to ensure it corresponds to a valid and recognized topic within the Flow-network. + Topic validation might involve checking if the topic is known, if it's within the scope of the peer's interests or subscriptions, and if it aligns with the network's current configuration (e.g., checking against the active spork ID). + If the topic is cluster-prefixed, additional validations ensure that the topic is part of the active cluster IDs. + If (even one) topic ID is invalid or unrecognized, the `IHAVE` message is flagged as invalid, and the inspection process is terminated with a failure result. + The inspection process also system keeps track of the topics seen in the `IHAVE` messages of the same RPC. When a topic is repeated (i.e., if there are duplicate topics in the `IHAVE` messages of the same RPC), this is usually a sign of a protocol violation or misbehavior. + The validation process counts these duplicates and, if the number exceeds a certain threshold, it flags RPC message as invalid and terminates the inspection process with a failure result. + The message IDs advertised in the `IHAVE` messages are also validated ensure there are no duplicates. When a message ID is repeated (i.e., if there are duplicate message IDs in the `IHAVE` messages of the same RPC), this is usually a sign of a protocol violation or misbehavior. + The validation process counts these duplicates and, if the number exceeds a certain threshold, it flags RPC message as invalid and terminates the inspection process with a failure result. + Note that all `IHAVE` messages on the same (potentially truncated) RPC are validated together, without any sampling, as the number of `IHAVE` messages is usually assumed small, and validating + them is not assumed to be resource-intensive. +5. `Publish` messages validation: Each RPC contains a list of `Publish` messages that are intended to be gossiped to the network. + The validation process involves iterating through each `Publish` message received in the (potentially truncated) RPC. + To validate the `Publish` messages of an RPC, the inspector samples a subset of the `Publish` messages and validates them for compliance with the Flow protocol semantics. + This is done to avoid adding excessive computational overhead to the inspection process, as the number of `Publish` messages in an RPC can be large, and validating each message can be resource-intensive. + The validation of each `Publish` message involves several steps: (1) whether the sender is a valid (staked) Flow node, + (2) whether the topic ID is a valid based on the Flow protocol semantics, and (3) whether the local peer has a valid subscription to the topic. + Failure in any of these steps results in a validation error for the `Publish` message. + However, validation error for a single `Publish` message does not cause inspection process to terminate with a failure result for the entire RPC. + Rather the inspection process continues to validate the rest of the `Publish` messages in the sampled RPC. + Once the entire sampled RPC is validated, the inspection process is terminated with a success if the number of validation errors is within a certain threshold. + Otherwise, when the number of validation errors exceeds the threshold, the inspection process is terminated with a failure result, which + will cause an _invalid control message notification_ to be sent to the `GossipSubAppSpecificScoreRegistry`, which will then be used for penalizing the sender in the peer scoring system. + As this is the last step in the inspection process, when an RPC reaches this step, it means that the RPC has passed all the previous inspections and is only being validated for the `Publish` messages. + Hence, result of this step is used to determine the final result of the inspection process. \ No newline at end of file diff --git a/network/p2p/inspector/control_message_metrics.go b/network/p2p/inspector/control_message_metrics.go deleted file mode 100644 index 9047d0f9484..00000000000 --- a/network/p2p/inspector/control_message_metrics.go +++ /dev/null @@ -1,101 +0,0 @@ -package inspector - -import ( - "fmt" - - pubsub "github.com/libp2p/go-libp2p-pubsub" - "github.com/libp2p/go-libp2p/core/peer" - "github.com/rs/zerolog" - - "github.com/onflow/flow-go/engine/common/worker" - "github.com/onflow/flow-go/module/component" - "github.com/onflow/flow-go/module/mempool/queue" - "github.com/onflow/flow-go/module/metrics" - "github.com/onflow/flow-go/network/p2p" - "github.com/onflow/flow-go/network/p2p/inspector/internal" -) - -const ( - // DefaultControlMsgMetricsInspectorNumberOfWorkers default number of workers for the inspector component. - DefaultControlMsgMetricsInspectorNumberOfWorkers = 1 - // DefaultControlMsgMetricsInspectorQueueCacheSize is the default size of the message queue. - DefaultControlMsgMetricsInspectorQueueCacheSize = 100 - // rpcInspectorComponentName the rpc inspector component name. - rpcInspectorComponentName = "gossipsub_rpc_metrics_observer_inspector" -) - -// ObserveRPCMetricsRequest represents a request to capture metrics for the provided RPC -type ObserveRPCMetricsRequest struct { - // Nonce adds random value so that when msg req is stored on hero store a unique ID can be created from the struct fields. - Nonce []byte - // From the sender of the RPC. - From peer.ID - // rpc the rpc message. - rpc *pubsub.RPC -} - -// ControlMsgMetricsInspector a GossipSub RPC inspector that will observe incoming RPC's and collect metrics related to control messages. -type ControlMsgMetricsInspector struct { - component.Component - logger zerolog.Logger - // NumberOfWorkers number of component workers. - NumberOfWorkers int - // workerPool queue that stores *ObserveRPCMetricsRequest that will be processed by component workers. - workerPool *worker.Pool[*ObserveRPCMetricsRequest] - metrics p2p.GossipSubControlMetricsObserver -} - -var _ p2p.GossipSubRPCInspector = (*ControlMsgMetricsInspector)(nil) - -// Inspect submits a request to the worker pool to observe metrics for the rpc. -// All errors returned from this function can be considered benign. -func (c *ControlMsgMetricsInspector) Inspect(from peer.ID, rpc *pubsub.RPC) error { - nonce, err := internal.Nonce() - if err != nil { - return fmt.Errorf("failed to get observe rpc metrics request nonce: %w", err) - } - c.workerPool.Submit(&ObserveRPCMetricsRequest{Nonce: nonce, From: from, rpc: rpc}) - return nil -} - -// ObserveRPC collects metrics for the rpc. -// No error is ever returned from this func. -func (c *ControlMsgMetricsInspector) ObserveRPC(req *ObserveRPCMetricsRequest) error { - c.metrics.ObserveRPC(req.From, req.rpc) - return nil -} - -// Name returns the name of the rpc inspector. -func (c *ControlMsgMetricsInspector) Name() string { - return rpcInspectorComponentName -} - -// NewControlMsgMetricsInspector returns a new *ControlMsgMetricsInspector -func NewControlMsgMetricsInspector(logger zerolog.Logger, metricsObserver p2p.GossipSubControlMetricsObserver, numberOfWorkers int, heroStoreOpts ...queue.HeroStoreConfigOption) *ControlMsgMetricsInspector { - lg := logger.With().Str("component", "gossip_sub_rpc_metrics_observer_inspector").Logger() - c := &ControlMsgMetricsInspector{ - logger: lg, - NumberOfWorkers: numberOfWorkers, - metrics: metricsObserver, - } - - cfg := &queue.HeroStoreConfig{ - SizeLimit: DefaultControlMsgMetricsInspectorQueueCacheSize, - Collector: metrics.NewNoopCollector(), - } - - for _, opt := range heroStoreOpts { - opt(cfg) - } - store := queue.NewHeroStore(cfg.SizeLimit, logger, cfg.Collector) - pool := worker.NewWorkerPoolBuilder[*ObserveRPCMetricsRequest](c.logger, store, c.ObserveRPC).Build() - c.workerPool = pool - - builder := component.NewComponentManagerBuilder() - for i := 0; i < c.NumberOfWorkers; i++ { - builder.AddWorker(pool.WorkerLogic()) - } - c.Component = builder.Build() - - return c -} diff --git a/network/p2p/inspector/internal/cache/cache.go b/network/p2p/inspector/internal/cache/cache.go new file mode 100644 index 00000000000..6cfddff9d39 --- /dev/null +++ b/network/p2p/inspector/internal/cache/cache.go @@ -0,0 +1,196 @@ +package cache + +import ( + "fmt" + "time" + + "github.com/libp2p/go-libp2p/core/peer" + "github.com/rs/zerolog" + + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module" + herocache "github.com/onflow/flow-go/module/mempool/herocache/backdata" + "github.com/onflow/flow-go/module/mempool/herocache/backdata/heropool" + "github.com/onflow/flow-go/module/mempool/stdmap" + "github.com/onflow/flow-go/network/p2p" + "github.com/onflow/flow-go/network/p2p/scoring" +) + +type recordFactory func(nodeID flow.Identifier) *ClusterPrefixedMessagesReceivedRecord + +type RecordCacheConfig struct { + sizeLimit uint32 + logger zerolog.Logger + collector module.HeroCacheMetrics + // recordDecay decay factor used by the cache to perform geometric decay on gauge values. + recordDecay float64 +} + +// RecordCache is a cache that stores ClusterPrefixedMessagesReceivedRecord by peer node ID. Each record +// contains a float64 Gauge field that indicates the current approximate number cluster prefixed control messages that were allowed to bypass +// validation due to some error that will prevent the message from being validated. +// Each record contains a float64 Gauge field that is decayed overtime back to 0. This ensures that nodes that fall +// behind in the protocol can catch up. +type RecordCache struct { + // recordFactory is a factory function that creates a new ClusterPrefixedMessagesReceivedRecord. + recordFactory recordFactory + // c is the underlying cache. + c *stdmap.Backend[flow.Identifier, *ClusterPrefixedMessagesReceivedRecord] + // decayFunc decay func used by the cache to perform decay on gauges. + decayFunc decayFunc +} + +// NewRecordCache creates a new *RecordCache. +// Args: +// - config: record cache config. +// - recordFactory: a factory function that creates a new spam record. +// Returns: +// - *RecordCache, the created cache. +// Note that this cache is supposed to keep the cluster prefix control messages received record for the authorized (staked) nodes. Since the number of such nodes is +// expected to be small, we do not eject any records from the cache. The cache size must be large enough to hold all +// the records of the authorized nodes. Also, this cache is keeping at most one record per peer id, so the +// size of the cache must be at least the number of authorized nodes. +func NewRecordCache(config *RecordCacheConfig, recordFactory recordFactory) (*RecordCache, error) { + backData := herocache.NewCache[*ClusterPrefixedMessagesReceivedRecord]( + config.sizeLimit, + herocache.DefaultOversizeFactor, + heropool.LRUEjection, + config.logger.With().Str("mempool", "gossipsub=cluster-prefix-control-messages-received-records").Logger(), + config.collector, + ) + return &RecordCache{ + recordFactory: recordFactory, + decayFunc: defaultDecayFunction(config.recordDecay), + c: stdmap.NewBackend(stdmap.WithMutableBackData[flow.Identifier, *ClusterPrefixedMessagesReceivedRecord](backData)), + }, nil +} + +// ReceivedClusterPrefixedMessage applies an adjustment that increments the number of cluster prefixed control messages received by a peer. +// Returns number of cluster prefix control messages received after the adjustment. The record is initialized before +// the adjustment func is applied that will increment the Gauge. +// Args: +// - pid: the peer ID of the sender of the control message. +// Returns: +// - The cluster prefix control messages received gauge value after the adjustment. +// - exception only in cases of internal data inconsistency or bugs. No errors are expected. +func (r *RecordCache) ReceivedClusterPrefixedMessage(pid peer.ID) (float64, error) { + var err error + adjustFunc := func(record *ClusterPrefixedMessagesReceivedRecord) *ClusterPrefixedMessagesReceivedRecord { + record, err = r.decayAdjustment(record) // first decay the record + if err != nil { + return record + } + return r.incrementAdjustment(record) // then increment the record + } + nodeID := p2p.MakeId(pid) + adjustedRecord, adjusted := r.c.AdjustWithInit(nodeID, adjustFunc, func() *ClusterPrefixedMessagesReceivedRecord { + return r.recordFactory(nodeID) + }) + + if err != nil { + return 0, fmt.Errorf("unexpected error while applying decay and increment adjustments for peer %s: %w", pid, err) + } + + if !adjusted { + return 0, fmt.Errorf("adjustment failed for peer %s", pid) + } + + return adjustedRecord.Gauge, nil +} + +// GetWithInit returns the current number of cluster prefixed control messages received from a peer. +// The record is initialized before the count is returned. +// Before the control messages received gauge value is returned it is decayed using the configured decay function. +// Returns the record and true if the record exists, nil and false otherwise. +// Args: +// - pid: the peer ID of the sender of the control message. +// Returns: +// - The cluster prefixed control messages received gauge value after the decay and true if the record exists, 0 and false otherwise. +// No errors are expected during normal operation. +func (r *RecordCache) GetWithInit(pid peer.ID) (float64, bool, error) { + var err error + adjustLogic := func(record *ClusterPrefixedMessagesReceivedRecord) *ClusterPrefixedMessagesReceivedRecord { + // perform decay on gauge value + record, err = r.decayAdjustment(record) + return record + } + nodeID := p2p.MakeId(pid) + adjustedRecord, adjusted := r.c.AdjustWithInit(nodeID, adjustLogic, func() *ClusterPrefixedMessagesReceivedRecord { + return r.recordFactory(nodeID) + }) + if err != nil { + return 0, false, fmt.Errorf("unexpected error while applying decay adjustment for peer %s: %w", pid, err) + } + if !adjusted { + return 0, false, fmt.Errorf("decay adjustment failed for peer %s", pid) + } + + return adjustedRecord.Gauge, true, nil +} + +// Remove removes the record of the given peer id from the cache. +// Returns true if the record is removed, false otherwise (i.e., the record does not exist). +// Args: +// - pid: the peer ID of the sender of the control message. +// Returns: +// - true if the record is removed, false otherwise (i.e., the record does not exist). +func (r *RecordCache) Remove(pid peer.ID) bool { + return r.c.Remove(p2p.MakeId(pid)) +} + +// NodeIDs returns the list of identities of the nodes that have a spam record in the cache. +func (r *RecordCache) NodeIDs() flow.IdentifierList { + all := r.c.All() + + nodeIDs := make(flow.IdentifierList, 0, len(all)) + for nodeID := range all { + nodeIDs = append(nodeIDs, nodeID) + } + return nodeIDs +} + +// Size returns the number of records in the cache. +func (r *RecordCache) Size() uint { + return r.c.Size() +} + +func (r *RecordCache) incrementAdjustment(record *ClusterPrefixedMessagesReceivedRecord) *ClusterPrefixedMessagesReceivedRecord { + record.Gauge++ + record.lastUpdated = time.Now() + // Return the adjusted record. + return record +} + +// All errors returned from this function are unexpected and irrecoverable. +func (r *RecordCache) decayAdjustment(record *ClusterPrefixedMessagesReceivedRecord) (*ClusterPrefixedMessagesReceivedRecord, error) { + var err error + record, err = r.decayFunc(record) + if err != nil { + return record, err + } + record.lastUpdated = time.Now() + // Return the adjusted record. + return record, nil +} + +// decayFunc the callback used to apply a decay method to the record. +// All errors returned from this callback are unexpected and irrecoverable. +type decayFunc func(record *ClusterPrefixedMessagesReceivedRecord) (*ClusterPrefixedMessagesReceivedRecord, error) + +// defaultDecayFunction is the default decay function that is used to decay the cluster prefixed control message received gauge of a peer. +// All errors returned are unexpected and irrecoverable. +func defaultDecayFunction(decay float64) decayFunc { + return func(record *ClusterPrefixedMessagesReceivedRecord) (*ClusterPrefixedMessagesReceivedRecord, error) { + received := record.Gauge + if received == 0 { + return record, nil + } + + decayedVal, err := scoring.GeometricDecay(received, decay, record.lastUpdated) + if err != nil { + return record, fmt.Errorf("could not decay cluster prefixed control messages received gauge: %w", err) + } + record.Gauge = decayedVal + return record, nil + } +} diff --git a/network/p2p/inspector/internal/cache/cache_test.go b/network/p2p/inspector/internal/cache/cache_test.go new file mode 100644 index 00000000000..a9ccfeb5ed9 --- /dev/null +++ b/network/p2p/inspector/internal/cache/cache_test.go @@ -0,0 +1,510 @@ +package cache + +import ( + "fmt" + "sync" + "testing" + "time" + + "github.com/libp2p/go-libp2p/core/peer" + "github.com/rs/zerolog" + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module" + "github.com/onflow/flow-go/module/metrics" + "github.com/onflow/flow-go/network/p2p" + "github.com/onflow/flow-go/utils/unittest" +) + +const defaultDecay = 0.99 + +// TestRecordCache_Init tests the Init method of the RecordCache. +// It ensures that the method returns true when a new record is initialized +// and false when an existing record is initialized. +func TestRecordCache_Init(t *testing.T) { + cache := cacheFixture(t, 100, defaultDecay, zerolog.Nop(), metrics.NewNoopCollector()) + + peerID1 := unittest.PeerIdFixture(t) + peerID2 := unittest.PeerIdFixture(t) + + // test initializing a record for an node ID that doesn't exist in the cache + gauge, ok, err := cache.GetWithInit(peerID1) + require.NoError(t, err) + require.True(t, ok, "expected record to exist") + require.Zerof(t, gauge, "expected gauge to be 0") + require.Equal(t, uint(1), cache.Size(), "expected cache to have one additional record") + + // test initializing a record for an node ID that already exists in the cache + gaugeAgain, ok, err := cache.GetWithInit(peerID1) + require.NoError(t, err) + require.True(t, ok, "expected record to still exist") + require.Zerof(t, gaugeAgain, "expected same gauge to be 0") + require.Equal(t, gauge, gaugeAgain, "expected records to be the same") + require.Equal(t, uint(1), cache.Size(), "expected cache to still have one additional record") + + // test initializing a record for another node ID + gauge2, ok, err := cache.GetWithInit(peerID2) + require.NoError(t, err) + require.True(t, ok, "expected record to exist") + require.Zerof(t, gauge2, "expected second gauge to be 0") + require.Equal(t, uint(2), cache.Size(), "expected cache to have two additional records") +} + +// TestRecordCache_ConcurrentInit tests the concurrent initialization of records. +// The test covers the following scenarios: +// 1. Multiple goroutines initializing records for different node IDs. +// 2. Ensuring that all records are correctly initialized. +func TestRecordCache_ConcurrentInit(t *testing.T) { + cache := cacheFixture(t, 100, defaultDecay, zerolog.Nop(), metrics.NewNoopCollector()) + + pids := unittest.PeerIdFixtures(t, 10) + + var wg sync.WaitGroup + wg.Add(len(pids)) + + for _, pid := range pids { + go func(id peer.ID) { + defer wg.Done() + gauge, found, err := cache.GetWithInit(id) + require.NoError(t, err) + require.True(t, found) + require.Zerof(t, gauge, "expected all gauge values to be initialized to 0") + }(pid) + } + + unittest.RequireReturnsBefore(t, wg.Wait, 100*time.Millisecond, "timed out waiting for goroutines to finish") +} + +// TestRecordCache_ConcurrentSameRecordInit tests the concurrent initialization of the same record. +// The test covers the following scenarios: +// 1. Multiple goroutines attempting to initialize the same record concurrently. +// 2. Only one goroutine successfully initializes the record, and others receive false on initialization. +// 3. The record is correctly initialized in the cache and can be retrieved using the GetWithInit method. +func TestRecordCache_ConcurrentSameRecordInit(t *testing.T) { + cache := cacheFixture(t, 100, defaultDecay, zerolog.Nop(), metrics.NewNoopCollector()) + + nodeID := unittest.PeerIdFixture(t) + const concurrentAttempts = 10 + + var wg sync.WaitGroup + wg.Add(concurrentAttempts) + + for i := 0; i < concurrentAttempts; i++ { + go func() { + defer wg.Done() + gauge, found, err := cache.GetWithInit(nodeID) + require.NoError(t, err) + require.True(t, found) + require.Zero(t, gauge) + }() + } + + unittest.RequireReturnsBefore(t, wg.Wait, 100*time.Millisecond, "timed out waiting for goroutines to finish") + + // ensure that only one goroutine successfully initialized the record + require.Equal(t, uint(1), cache.Size()) +} + +// TestRecordCache_ReceivedClusterPrefixedMessage tests the ReceivedClusterPrefixedMessage method of the RecordCache. +// The test covers the following scenarios: +// 1. Updating a record gauge for an existing node ID. +// 2. Attempting to update a record gauge for a non-existing node ID should not result in error. ReceivedClusterPrefixedMessage should always attempt to initialize the gauge. +// 3. Multiple updates on the same record only initialize the record once. +func TestRecordCache_ReceivedClusterPrefixedMessage(t *testing.T) { + cache := cacheFixture(t, 100, defaultDecay, zerolog.Nop(), metrics.NewNoopCollector()) + + peerID1 := unittest.PeerIdFixture(t) + peerID2 := unittest.PeerIdFixture(t) + + gauge, err := cache.ReceivedClusterPrefixedMessage(peerID1) + require.NoError(t, err) + require.Equal(t, float64(1), gauge) + + // get will apply a slightl decay resulting + // in a gauge value less than gauge which is 1 but greater than 0.9 + currentGauge, ok, err := cache.GetWithInit(peerID1) + require.NoError(t, err) + require.True(t, ok) + require.LessOrEqual(t, currentGauge, gauge) + require.Greater(t, currentGauge, 0.9) + + _, ok, err = cache.GetWithInit(peerID2) + require.NoError(t, err) + require.True(t, ok) + + // test adjusting the spam record for a non-existing node ID + peerID3 := unittest.PeerIdFixture(t) + gauge3, err := cache.ReceivedClusterPrefixedMessage(peerID3) + require.NoError(t, err) + require.Equal(t, float64(1), gauge3) + + // when updated the value should be incremented from 1 -> 2 and slightly decayed resulting + // in a gauge value less than 2 but greater than 1.9 + gauge3, err = cache.ReceivedClusterPrefixedMessage(peerID3) + require.NoError(t, err) + require.LessOrEqual(t, gauge3, 2.0) + require.Greater(t, gauge3, 1.9) +} + +// TestRecordCache_UpdateDecay ensures that a gauge in the record cache is eventually decayed back to 0 after some time. +func TestRecordCache_Decay(t *testing.T) { + cache := cacheFixture(t, 100, 0.09, zerolog.Nop(), metrics.NewNoopCollector()) + + peerID1 := unittest.PeerIdFixture(t) + + // initialize spam records for peerID1 and peerID2 + gauge, err := cache.ReceivedClusterPrefixedMessage(peerID1) + require.Equal(t, float64(1), gauge) + require.NoError(t, err) + gauge, ok, err := cache.GetWithInit(peerID1) + require.True(t, ok) + require.NoError(t, err) + // gauge should have been delayed slightly + require.True(t, gauge < float64(1)) + + time.Sleep(time.Second) + + gauge, ok, err = cache.GetWithInit(peerID1) + require.True(t, ok) + require.NoError(t, err) + // gauge should have been delayed slightly, but closer to 0 + require.Less(t, gauge, 0.1) +} + +// TestRecordCache_Identities tests the NodeIDs method of the RecordCache. +// The test covers the following scenarios: +// 1. Initializing the cache with multiple records. +// 2. Checking if the NodeIDs method returns the correct set of node IDs. +func TestRecordCache_Identities(t *testing.T) { + cache := cacheFixture(t, 100, defaultDecay, zerolog.Nop(), metrics.NewNoopCollector()) + + // initialize spam records for a few node IDs + peerID1 := unittest.PeerIdFixture(t) + peerID2 := unittest.PeerIdFixture(t) + peerID3 := unittest.PeerIdFixture(t) + + _, ok, err := cache.GetWithInit(peerID1) + require.NoError(t, err) + require.True(t, ok) + _, ok, err = cache.GetWithInit(peerID2) + require.NoError(t, err) + require.True(t, ok) + _, ok, err = cache.GetWithInit(peerID3) + require.NoError(t, err) + require.True(t, ok) + + // check if the NodeIDs method returns the correct set of node IDs + identities := cache.NodeIDs() + require.Equal(t, 3, len(identities)) + require.ElementsMatch(t, identities, []flow.Identifier{p2p.MakeId(peerID1), p2p.MakeId(peerID2), p2p.MakeId(peerID3)}) +} + +// TestRecordCache_Remove tests the Remove method of the RecordCache. +// The test covers the following scenarios: +// 1. Initializing the cache with multiple records. +// 2. Removing a record and checking if it is removed correctly. +// 3. Ensuring the other records are still in the cache after removal. +// 4. Attempting to remove a non-existent node ID. +func TestRecordCache_Remove(t *testing.T) { + cache := cacheFixture(t, 100, defaultDecay, zerolog.Nop(), metrics.NewNoopCollector()) + + // initialize spam records for a few node IDs + peerID1 := unittest.PeerIdFixture(t) + peerID2 := unittest.PeerIdFixture(t) + peerID3 := unittest.PeerIdFixture(t) + + _, ok, err := cache.GetWithInit(peerID1) + require.NoError(t, err) + require.True(t, ok) + _, ok, err = cache.GetWithInit(peerID2) + require.NoError(t, err) + require.True(t, ok) + _, ok, err = cache.GetWithInit(peerID3) + require.NoError(t, err) + require.True(t, ok) + + numOfIds := uint(3) + require.Equal(t, numOfIds, cache.Size(), fmt.Sprintf("expected size of the cache to be %d", numOfIds)) + // remove peerID1 and check if the record is removed + require.True(t, cache.Remove(peerID1)) + require.NotContains(t, peerID1, cache.NodeIDs()) + + // check if the other node IDs are still in the cache + _, exists, err := cache.GetWithInit(peerID2) + require.NoError(t, err) + require.True(t, exists) + _, exists, err = cache.GetWithInit(peerID3) + require.NoError(t, err) + require.True(t, exists) + + // attempt to remove a non-existent node ID + nodeID4 := unittest.PeerIdFixture(t) + require.False(t, cache.Remove(nodeID4)) +} + +// TestRecordCache_ConcurrentRemove tests the concurrent removal of records for different node IDs. +// The test covers the following scenarios: +// 1. Multiple goroutines removing records for different node IDs concurrently. +// 2. The records are correctly removed from the cache. +func TestRecordCache_ConcurrentRemove(t *testing.T) { + cache := cacheFixture(t, 100, defaultDecay, zerolog.Nop(), metrics.NewNoopCollector()) + + peerIds := unittest.PeerIdFixtures(t, 10) + for _, pid := range peerIds { + _, ok, err := cache.GetWithInit(pid) + require.NoError(t, err) + require.True(t, ok) + } + + var wg sync.WaitGroup + wg.Add(len(peerIds)) + + for _, pid := range peerIds { + go func(id peer.ID) { + defer wg.Done() + removed := cache.Remove(id) + require.True(t, removed) + require.NotContains(t, id, cache.NodeIDs()) + }(pid) + } + + unittest.RequireReturnsBefore(t, wg.Wait, 100*time.Millisecond, "timed out waiting for goroutines to finish") + + require.Equal(t, uint(0), cache.Size()) +} + +// TestRecordCache_ConcurrentUpdatesAndReads tests the concurrent adjustments and reads of records for different +// node IDs. The test covers the following scenarios: +// 1. Multiple goroutines adjusting records for different node IDs concurrently. +// 2. Multiple goroutines getting records for different node IDs concurrently. +// 3. The adjusted records are correctly updated in the cache. +func TestRecordCache_ConcurrentUpdatesAndReads(t *testing.T) { + cache := cacheFixture(t, 100, defaultDecay, zerolog.Nop(), metrics.NewNoopCollector()) + + peerIds := unittest.PeerIdFixtures(t, 10) + for _, pid := range peerIds { + _, ok, err := cache.GetWithInit(pid) + require.NoError(t, err) + require.True(t, ok) + } + + var wg sync.WaitGroup + wg.Add(len(peerIds) * 2) + + for _, pid := range peerIds { + // adjust spam records concurrently + go func(id peer.ID) { + defer wg.Done() + _, err := cache.ReceivedClusterPrefixedMessage(id) + require.NoError(t, err) + }(pid) + + // get spam records concurrently + go func(id peer.ID) { + defer wg.Done() + _, found, err := cache.GetWithInit(id) + require.NoError(t, err) + require.True(t, found) + }(pid) + } + + unittest.RequireReturnsBefore(t, wg.Wait, 100*time.Millisecond, "timed out waiting for goroutines to finish") + + // ensure that the records are correctly updated in the cache + for _, pid := range peerIds { + gauge, found, err := cache.GetWithInit(pid) + require.NoError(t, err) + require.True(t, found) + // slight decay will result in 0.9 < gauge < 1 + require.LessOrEqual(t, gauge, 1.0) + require.Greater(t, gauge, 0.9) + } +} + +// TestRecordCache_ConcurrentInitAndRemove tests the concurrent initialization and removal of records for different +// node IDs. The test covers the following scenarios: +// 1. Multiple goroutines initializing records for different node IDs concurrently. +// 2. Multiple goroutines removing records for different node IDs concurrently. +// 3. The initialized records are correctly added to the cache. +// 4. The removed records are correctly removed from the cache. +func TestRecordCache_ConcurrentInitAndRemove(t *testing.T) { + cache := cacheFixture(t, 100, defaultDecay, zerolog.Nop(), metrics.NewNoopCollector()) + + peerIds := unittest.PeerIdFixtures(t, 20) + peerIdsToAdd := peerIds[:10] + peerIdsToRemove := peerIds[10:] + + for _, pid := range peerIdsToRemove { + _, ok, err := cache.GetWithInit(pid) + require.NoError(t, err) + require.True(t, ok) + } + + var wg sync.WaitGroup + wg.Add(len(peerIds)) + + // initialize spam records concurrently + for _, pid := range peerIdsToAdd { + go func(id peer.ID) { + defer wg.Done() + _, ok, err := cache.GetWithInit(id) + require.NoError(t, err) + require.True(t, ok) + }(pid) + } + + // remove spam records concurrently + for _, pid := range peerIdsToRemove { + go func(id peer.ID) { + defer wg.Done() + cache.Remove(id) + require.NotContains(t, id, cache.NodeIDs()) + }(pid) + } + + unittest.RequireReturnsBefore(t, wg.Wait, 100*time.Millisecond, "timed out waiting for goroutines to finish") + + // ensure that the initialized records are correctly added to the cache + // and removed records are correctly removed from the cache + expectedIds := make([]flow.Identifier, len(peerIdsToAdd)) + for i, pid := range peerIdsToAdd { + expectedIds[i] = p2p.MakeId(pid) + } + require.ElementsMatch(t, expectedIds, cache.NodeIDs()) +} + +// TestRecordCache_ConcurrentInitRemoveUpdate tests the concurrent initialization, removal, and adjustment of +// records for different node IDs. The test covers the following scenarios: +// 1. Multiple goroutines initializing records for different node IDs concurrently. +// 2. Multiple goroutines removing records for different node IDs concurrently. +// 3. Multiple goroutines adjusting records for different node IDs concurrently. +func TestRecordCache_ConcurrentInitRemoveUpdate(t *testing.T) { + cache := cacheFixture(t, 100, defaultDecay, zerolog.Nop(), metrics.NewNoopCollector()) + + peerIds := unittest.PeerIdFixtures(t, 30) + peerIdsToAdd := peerIds[:10] + peerIdsToRemove := peerIds[10:20] + peerIdsToAdjust := peerIds[20:] + + for _, pid := range peerIdsToRemove { + _, ok, err := cache.GetWithInit(pid) + require.NoError(t, err) + require.True(t, ok) + } + + var wg sync.WaitGroup + wg.Add(len(peerIds)) + + // Initialize spam records concurrently + for _, pid := range peerIdsToAdd { + go func(id peer.ID) { + defer wg.Done() + _, ok, err := cache.GetWithInit(id) + require.NoError(t, err) + require.True(t, ok) + }(pid) + } + + // Remove spam records concurrently + for _, pid := range peerIdsToRemove { + go func(id peer.ID) { + defer wg.Done() + cache.Remove(id) + require.NotContains(t, id, cache.NodeIDs()) + }(pid) + } + + // Adjust spam records concurrently + for _, pid := range peerIdsToAdjust { + go func(id peer.ID) { + defer wg.Done() + _, _ = cache.ReceivedClusterPrefixedMessage(id) + }(pid) + } + + expectedPeerIds := append(peerIdsToAdd, peerIdsToAdjust...) + expectedIds := make([]flow.Identifier, len(expectedPeerIds)) + for i, pid := range expectedPeerIds { + expectedIds[i] = p2p.MakeId(pid) + } + unittest.RequireReturnsBefore(t, wg.Wait, 100*time.Millisecond, "timed out waiting for goroutines to finish") + require.ElementsMatch(t, expectedIds, cache.NodeIDs()) +} + +// TestRecordCache_EdgeCasesAndInvalidInputs tests the edge cases and invalid inputs for RecordCache methods. +// The test covers the following scenarios: +// 1. Initializing a record multiple times. +// 2. Adjusting a non-existent record. +// 3. Removing a record multiple times. +func TestRecordCache_EdgeCasesAndInvalidInputs(t *testing.T) { + cache := cacheFixture(t, 100, defaultDecay, zerolog.Nop(), metrics.NewNoopCollector()) + + peerIds := unittest.PeerIdFixtures(t, 20) + peerIdsToAdd := peerIds[:10] + peerIdsToRemove := peerIds[10:20] + + for _, pid := range peerIdsToRemove { + _, ok, err := cache.GetWithInit(pid) + require.NoError(t, err) + require.True(t, ok) + } + + var wg sync.WaitGroup + wg.Add(len(peerIds) + 10) + + // initialize spam records concurrently + for _, pid := range peerIdsToAdd { + go func(id peer.ID) { + defer wg.Done() + retrieved, ok, err := cache.GetWithInit(id) + require.NoError(t, err) + require.True(t, ok) + require.Zero(t, retrieved) + }(pid) + } + + // remove spam records concurrently + for _, pid := range peerIdsToRemove { + go func(id peer.ID) { + defer wg.Done() + require.True(t, cache.Remove(id)) + require.NotContains(t, p2p.MakeId(id), cache.NodeIDs()) + }(pid) + } + + expectedIds := make([]flow.Identifier, len(peerIds)) + for i, pid := range peerIds { + expectedIds[i] = p2p.MakeId(pid) + } + // call NodeIDs method concurrently + for i := 0; i < 10; i++ { + go func() { + defer wg.Done() + ids := cache.NodeIDs() + // the number of returned IDs should be less than or equal to the number of node IDs + require.True(t, len(ids) <= len(peerIds)) + // the returned IDs should be a subset of the node IDs + for _, id := range ids { + require.Contains(t, expectedIds, id) + } + }() + } + unittest.RequireReturnsBefore(t, wg.Wait, 1*time.Second, "timed out waiting for goroutines to finish") +} + +// cacheFixture returns a new *RecordCache. +func cacheFixture(t *testing.T, sizeLimit uint32, recordDecay float64, logger zerolog.Logger, collector module.HeroCacheMetrics) *RecordCache { + config := &RecordCacheConfig{ + sizeLimit: sizeLimit, + logger: logger, + collector: collector, + recordDecay: recordDecay, + } + r, err := NewRecordCache(config, NewClusterPrefixedMessagesReceivedRecord) + require.NoError(t, err) + // expect cache to be empty + require.Equalf(t, uint(0), r.Size(), "cache size must be 0") + require.NotNil(t, r) + return r +} diff --git a/network/p2p/inspector/internal/cache/cluster_prefixed_received_tracker.go b/network/p2p/inspector/internal/cache/cluster_prefixed_received_tracker.go new file mode 100644 index 00000000000..7bf156d0cae --- /dev/null +++ b/network/p2p/inspector/internal/cache/cluster_prefixed_received_tracker.go @@ -0,0 +1,65 @@ +package cache + +import ( + "fmt" + + "github.com/libp2p/go-libp2p/core/peer" + "github.com/rs/zerolog" + "go.uber.org/atomic" + + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module" +) + +// ClusterPrefixedMessagesReceivedTracker struct that keeps track of the amount of cluster prefixed control messages received by a peer. +type ClusterPrefixedMessagesReceivedTracker struct { + cache *RecordCache + // activeClusterIds atomic pointer that stores the current active cluster IDs. This ensures safe concurrent access to the activeClusterIds internal flow.ChainIDList. + activeClusterIds *atomic.Pointer[flow.ChainIDList] +} + +// NewClusterPrefixedMessagesReceivedTracker returns a new *ClusterPrefixedMessagesReceivedTracker. +func NewClusterPrefixedMessagesReceivedTracker(logger zerolog.Logger, sizeLimit uint32, clusterPrefixedCacheCollector module.HeroCacheMetrics, decay float64) (*ClusterPrefixedMessagesReceivedTracker, + error) { + config := &RecordCacheConfig{ + sizeLimit: sizeLimit, + logger: logger, + collector: clusterPrefixedCacheCollector, + recordDecay: decay, + } + recordCache, err := NewRecordCache(config, NewClusterPrefixedMessagesReceivedRecord) + if err != nil { + return nil, fmt.Errorf("failed to create new record cahe: %w", err) + } + return &ClusterPrefixedMessagesReceivedTracker{cache: recordCache, activeClusterIds: atomic.NewPointer[flow.ChainIDList](&flow.ChainIDList{})}, nil +} + +// Inc increments the cluster prefixed control messages received Gauge for the peer. +// All errors returned from this func are unexpected and irrecoverable. +func (c *ClusterPrefixedMessagesReceivedTracker) Inc(pid peer.ID) (float64, error) { + count, err := c.cache.ReceivedClusterPrefixedMessage(pid) + if err != nil { + return 0, fmt.Errorf("failed to increment cluster prefixed received tracker gauge value for peer %s: %w", pid, err) + } + return count, nil +} + +// Load loads the current number of cluster prefixed control messages received by a peer. +// All errors returned from this func are unexpected and irrecoverable. +func (c *ClusterPrefixedMessagesReceivedTracker) Load(pid peer.ID) (float64, error) { + count, _, err := c.cache.GetWithInit(pid) + if err != nil { + return 0, fmt.Errorf("failed to get cluster prefixed received tracker gauge value for peer %s: %w", pid, err) + } + return count, nil +} + +// StoreActiveClusterIds stores the active cluster Ids in the underlying record cache. +func (c *ClusterPrefixedMessagesReceivedTracker) StoreActiveClusterIds(clusterIdList flow.ChainIDList) { + c.activeClusterIds.Store(&clusterIdList) +} + +// GetActiveClusterIds gets the active cluster Ids from the underlying record cache. +func (c *ClusterPrefixedMessagesReceivedTracker) GetActiveClusterIds() flow.ChainIDList { + return *c.activeClusterIds.Load() +} diff --git a/network/p2p/inspector/internal/cache/record.go b/network/p2p/inspector/internal/cache/record.go new file mode 100644 index 00000000000..7c93817aa94 --- /dev/null +++ b/network/p2p/inspector/internal/cache/record.go @@ -0,0 +1,26 @@ +package cache + +import ( + "time" + + "github.com/onflow/flow-go/model/flow" +) + +// ClusterPrefixedMessagesReceivedRecord cache record that keeps track of the amount of cluster prefixed control messages received from a peer. +// This struct implements the flow.Entity interface and uses node ID of the sender for deduplication. +type ClusterPrefixedMessagesReceivedRecord struct { + // NodeID the node ID of the sender. + NodeID flow.Identifier + // Gauge represents the approximate amount of cluster prefixed messages received by a peer, this + // value is decayed back to 0 after some time. + Gauge float64 + lastUpdated time.Time +} + +func NewClusterPrefixedMessagesReceivedRecord(nodeID flow.Identifier) *ClusterPrefixedMessagesReceivedRecord { + return &ClusterPrefixedMessagesReceivedRecord{ + NodeID: nodeID, + Gauge: 0.0, + lastUpdated: time.Now(), + } +} diff --git a/network/p2p/inspector/internal/cache/tracker_test.go b/network/p2p/inspector/internal/cache/tracker_test.go new file mode 100644 index 00000000000..ce89a2ec631 --- /dev/null +++ b/network/p2p/inspector/internal/cache/tracker_test.go @@ -0,0 +1,142 @@ +package cache + +import ( + "sync" + "testing" + "time" + + "github.com/rs/zerolog" + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module/metrics" + "github.com/onflow/flow-go/utils/unittest" +) + +// TestClusterPrefixedMessagesReceivedTracker_Inc ensures cluster prefixed received tracker increments a cluster prefixed control messages received gauge value correctly. +func TestClusterPrefixedMessagesReceivedTracker_Inc(t *testing.T) { + tracker := mockTracker(t) + id := unittest.PeerIdFixture(t) + n := float64(5) + prevGuage := 0.0 + for i := float64(1); i <= n; i++ { + guage, err := tracker.Inc(id) + require.NoError(t, err) + // on each increment the current gauge value should + // always be greater than the previous gauge value but + // slightly less than i due to the decay. + require.LessOrEqual(t, guage, i) + require.Greater(t, guage, prevGuage) + } +} + +// TestClusterPrefixedMessagesReceivedTracker_IncConcurrent ensures cluster prefixed received tracker increments a cluster prefixed control messages received gauge value correctly concurrently. +func TestClusterPrefixedMessagesReceivedTracker_IncConcurrent(t *testing.T) { + tracker := mockTracker(t) + n := float64(5) + id := unittest.PeerIdFixture(t) + var wg sync.WaitGroup + wg.Add(5) + for i := float64(0); i < n; i++ { + go func() { + defer wg.Done() + _, err := tracker.Inc(id) + require.NoError(t, err) + }() + } + unittest.RequireReturnsBefore(t, wg.Wait, 100*time.Millisecond, "timed out waiting for goroutines to finish") + // after each decay is applied the gauge value result should be slightly less than n + gaugeVal, err := tracker.Load(id) + require.NoError(t, err) + require.InDelta(t, n, gaugeVal, .2) +} + +// TestClusterPrefixedMessagesReceivedTracker_ConcurrentIncAndLoad ensures cluster prefixed received tracker increments/loads the cluster prefixed control messages received gauge value correctly concurrently. +func TestClusterPrefixedMessagesReceivedTracker_ConcurrentIncAndLoad(t *testing.T) { + tracker := mockTracker(t) + n := float64(5) + id := unittest.PeerIdFixture(t) + var wg sync.WaitGroup + wg.Add(10) + + go func() { + for i := float64(0); i < n; i++ { + go func() { + defer wg.Done() + _, err := tracker.Inc(id) + require.NoError(t, err) + }() + } + }() + + // slight sleep so that each goroutine does not start at the same exact time + time.Sleep(500 * time.Millisecond) + go func() { + for i := float64(0); i < n; i++ { + go func() { + defer wg.Done() + gaugeVal, err := tracker.Load(id) + require.NoError(t, err) + require.Greater(t, gaugeVal, float64(0)) + require.LessOrEqual(t, gaugeVal, n) + }() + } + }() + unittest.RequireReturnsBefore(t, wg.Wait, 100*time.Millisecond, "timed out waiting for goroutines to finish") + gaugeVal, err := tracker.Load(id) + require.NoError(t, err) + // after each decay is applied the gauge value result should be slightly less than n + require.InDelta(t, n, gaugeVal, .2) +} + +func TestClusterPrefixedMessagesReceivedTracker_StoreAndGetActiveClusterIds(t *testing.T) { + tracker := mockTracker(t) + activeClusterIds := []flow.ChainIDList{chainIDListFixture(), chainIDListFixture(), chainIDListFixture()} + for _, chainIDList := range activeClusterIds { + tracker.StoreActiveClusterIds(chainIDList) + actualChainIdList := tracker.GetActiveClusterIds() + require.Equal(t, chainIDList, actualChainIdList) + } +} + +func TestClusterPrefixedMessagesReceivedTracker_StoreAndGetActiveClusterIdsConcurrent(t *testing.T) { + tracker := mockTracker(t) + activeClusterIds := []flow.ChainIDList{chainIDListFixture(), chainIDListFixture(), chainIDListFixture()} + expectedLen := len(activeClusterIds[0]) + var wg sync.WaitGroup + wg.Add(len(activeClusterIds)) + for _, chainIDList := range activeClusterIds { + go func(ids flow.ChainIDList) { + defer wg.Done() + tracker.StoreActiveClusterIds(ids) + actualChainIdList := tracker.GetActiveClusterIds() + require.NotNil(t, actualChainIdList) + require.Equal(t, expectedLen, len(actualChainIdList)) // each fixture is of the same len + }(chainIDList) + } + + unittest.RequireReturnsBefore(t, wg.Wait, 100*time.Millisecond, "timed out waiting for goroutines to finish") + + actualChainIdList := tracker.GetActiveClusterIds() + require.NotNil(t, actualChainIdList) + require.Equal(t, expectedLen, len(actualChainIdList)) // each fixture is of the same len +} + +func mockTracker(t *testing.T) *ClusterPrefixedMessagesReceivedTracker { + logger := zerolog.Nop() + sizeLimit := uint32(100) + collector := metrics.NewNoopCollector() + decay := defaultDecay + tracker, err := NewClusterPrefixedMessagesReceivedTracker(logger, sizeLimit, collector, decay) + require.NoError(t, err) + return tracker +} + +func chainIDListFixture() flow.ChainIDList { + return flow.ChainIDList{ + flow.ChainID(unittest.IdentifierFixture().String()), + flow.ChainID(unittest.IdentifierFixture().String()), + flow.ChainID(unittest.IdentifierFixture().String()), + flow.ChainID(unittest.IdentifierFixture().String()), + } +} diff --git a/network/p2p/inspector/internal/ratelimit/control_message_rate_limiter.go b/network/p2p/inspector/internal/ratelimit/control_message_rate_limiter.go index 6a43c87ff96..e3d135383e5 100644 --- a/network/p2p/inspector/internal/ratelimit/control_message_rate_limiter.go +++ b/network/p2p/inspector/internal/ratelimit/control_message_rate_limiter.go @@ -4,9 +4,11 @@ import ( "time" "github.com/libp2p/go-libp2p/core/peer" + "github.com/rs/zerolog" "golang.org/x/time/rate" "github.com/onflow/flow-go/network/p2p" + "github.com/onflow/flow-go/network/p2p/unicast/ratelimit" "github.com/onflow/flow-go/network/p2p/utils/ratelimiter" ) @@ -19,7 +21,13 @@ var _ p2p.BasicRateLimiter = (*ControlMessageRateLimiter)(nil) // NewControlMessageRateLimiter returns a new ControlMessageRateLimiter. The cleanup loop will be started in a // separate goroutine and should be stopped by calling Close. -func NewControlMessageRateLimiter(limit rate.Limit, burst int) p2p.BasicRateLimiter { +func NewControlMessageRateLimiter(logger zerolog.Logger, limit rate.Limit, burst int) p2p.BasicRateLimiter { + if limit == 0 { + logger.Warn().Msg("control message rate limit set to 0 using noop rate limiter") + // setup noop rate limiter if rate limiting is disabled + return ratelimit.NewNoopRateLimiter() + } + // NOTE: we use a lockout duration of 0 because we only need to expose the basic functionality of the // rate limiter and not the lockout feature. lockoutDuration := time.Duration(0) diff --git a/network/p2p/inspector/rpc-inspection-process.png b/network/p2p/inspector/rpc-inspection-process.png new file mode 100644 index 00000000000..eac93e112c2 Binary files /dev/null and b/network/p2p/inspector/rpc-inspection-process.png differ diff --git a/network/p2p/inspector/validation/control_message_validation.go b/network/p2p/inspector/validation/control_message_validation.go deleted file mode 100644 index a837bdff68d..00000000000 --- a/network/p2p/inspector/validation/control_message_validation.go +++ /dev/null @@ -1,327 +0,0 @@ -package validation - -import ( - "fmt" - - pubsub "github.com/libp2p/go-libp2p-pubsub" - pubsub_pb "github.com/libp2p/go-libp2p-pubsub/pb" - "github.com/libp2p/go-libp2p/core/peer" - "github.com/rs/zerolog" - - "github.com/onflow/flow-go/engine/common/worker" - "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/module/component" - "github.com/onflow/flow-go/module/irrecoverable" - "github.com/onflow/flow-go/module/mempool/queue" - "github.com/onflow/flow-go/module/metrics" - "github.com/onflow/flow-go/network/channels" - "github.com/onflow/flow-go/network/p2p" - "github.com/onflow/flow-go/network/p2p/inspector/internal" - "github.com/onflow/flow-go/utils/logging" -) - -const ( - // DefaultNumberOfWorkers default number of workers for the inspector component. - DefaultNumberOfWorkers = 5 - // DefaultControlMsgValidationInspectorQueueCacheSize is the default size of the inspect message queue. - DefaultControlMsgValidationInspectorQueueCacheSize = 100 - // rpcInspectorComponentName the rpc inspector component name. - rpcInspectorComponentName = "gossipsub_rpc_validation_inspector" -) - -// InspectMsgRequest represents a short digest of an RPC control message. It is used for further message inspection by component workers. -type InspectMsgRequest struct { - // Nonce adds random value so that when msg req is stored on hero store a unique ID can be created from the struct fields. - Nonce []byte - // Peer sender of the message. - Peer peer.ID - // CtrlMsg the control message that will be inspected. - ctrlMsg *pubsub_pb.ControlMessage - validationConfig *CtrlMsgValidationConfig -} - -// ControlMsgValidationInspectorConfig validation configuration for each type of RPC control message. -type ControlMsgValidationInspectorConfig struct { - // NumberOfWorkers number of component workers to start for processing RPC messages. - NumberOfWorkers int - // InspectMsgStoreOpts options used to configure the underlying herocache message store. - InspectMsgStoreOpts []queue.HeroStoreConfigOption - // GraftValidationCfg validation configuration for GRAFT control messages. - GraftValidationCfg *CtrlMsgValidationConfig - // PruneValidationCfg validation configuration for PRUNE control messages. - PruneValidationCfg *CtrlMsgValidationConfig -} - -// getCtrlMsgValidationConfig returns the CtrlMsgValidationConfig for the specified p2p.ControlMessageType. -func (conf *ControlMsgValidationInspectorConfig) getCtrlMsgValidationConfig(controlMsg p2p.ControlMessageType) (*CtrlMsgValidationConfig, bool) { - switch controlMsg { - case p2p.CtrlMsgGraft: - return conf.GraftValidationCfg, true - case p2p.CtrlMsgPrune: - return conf.PruneValidationCfg, true - default: - return nil, false - } -} - -// allCtrlMsgValidationConfig returns all control message validation configs in a list. -func (conf *ControlMsgValidationInspectorConfig) allCtrlMsgValidationConfig() CtrlMsgValidationConfigs { - return CtrlMsgValidationConfigs{conf.GraftValidationCfg, conf.PruneValidationCfg} -} - -// ControlMsgValidationInspector RPC message inspector that inspects control messages and performs some validation on them, -// when some validation rule is broken feedback is given via the Peer scoring notifier. -type ControlMsgValidationInspector struct { - component.Component - logger zerolog.Logger - sporkID flow.Identifier - // config control message validation configurations. - config *ControlMsgValidationInspectorConfig - // distributor used to disseminate invalid RPC message notifications. - distributor p2p.GossipSubInspectorNotifDistributor - // workerPool queue that stores *InspectMsgRequest that will be processed by component workers. - workerPool *worker.Pool[*InspectMsgRequest] -} - -var _ component.Component = (*ControlMsgValidationInspector)(nil) -var _ p2p.GossipSubRPCInspector = (*ControlMsgValidationInspector)(nil) - -// NewInspectMsgRequest returns a new *InspectMsgRequest. -func NewInspectMsgRequest(from peer.ID, validationConfig *CtrlMsgValidationConfig, ctrlMsg *pubsub_pb.ControlMessage) (*InspectMsgRequest, error) { - nonce, err := internal.Nonce() - if err != nil { - return nil, fmt.Errorf("failed to get inspect message request nonce: %w", err) - } - return &InspectMsgRequest{Nonce: nonce, Peer: from, validationConfig: validationConfig, ctrlMsg: ctrlMsg}, nil -} - -// NewControlMsgValidationInspector returns new ControlMsgValidationInspector -func NewControlMsgValidationInspector( - logger zerolog.Logger, - sporkID flow.Identifier, - config *ControlMsgValidationInspectorConfig, - distributor p2p.GossipSubInspectorNotifDistributor, -) *ControlMsgValidationInspector { - lg := logger.With().Str("component", "gossip_sub_rpc_validation_inspector").Logger() - c := &ControlMsgValidationInspector{ - logger: lg, - sporkID: sporkID, - config: config, - distributor: distributor, - } - - cfg := &queue.HeroStoreConfig{ - SizeLimit: DefaultControlMsgValidationInspectorQueueCacheSize, - Collector: metrics.NewNoopCollector(), - } - - for _, opt := range config.InspectMsgStoreOpts { - opt(cfg) - } - - store := queue.NewHeroStore(cfg.SizeLimit, logger, cfg.Collector) - pool := worker.NewWorkerPoolBuilder[*InspectMsgRequest](lg, store, c.processInspectMsgReq).Build() - - c.workerPool = pool - - builder := component.NewComponentManagerBuilder() - builder.AddWorker(func(ctx irrecoverable.SignalerContext, ready component.ReadyFunc) { - distributor.Start(ctx) - select { - case <-ctx.Done(): - case <-distributor.Ready(): - ready() - } - <-distributor.Done() - }) - // start rate limiters cleanup loop in workers - for _, conf := range c.config.allCtrlMsgValidationConfig() { - validationConfig := conf - builder.AddWorker(func(ctx irrecoverable.SignalerContext, ready component.ReadyFunc) { - ready() - validationConfig.RateLimiter.Start(ctx) - }) - } - for i := 0; i < c.config.NumberOfWorkers; i++ { - builder.AddWorker(pool.WorkerLogic()) - } - c.Component = builder.Build() - return c -} - -// Inspect inspects the rpc received and returns an error if any validation rule is broken. -// For each control message type an initial inspection is done synchronously to check the amount -// of messages in the control message. Further inspection is done asynchronously to check rate limits -// and validate topic IDS each control message if initial validation is passed. -// All errors returned from this function can be considered benign. -// errors returned: -// -// ErrDiscardThreshold - if the message count for the control message type exceeds the discard threshold. -func (c *ControlMsgValidationInspector) Inspect(from peer.ID, rpc *pubsub.RPC) error { - control := rpc.GetControl() - for _, ctrlMsgType := range p2p.ControlMessageTypes() { - lg := c.logger.With(). - Str("peer_id", from.String()). - Str("ctrl_msg_type", string(ctrlMsgType)).Logger() - validationConfig, ok := c.config.getCtrlMsgValidationConfig(ctrlMsgType) - if !ok { - lg.Trace().Msg("validation configuration for control type does not exists skipping") - continue - } - - // mandatory blocking pre-processing of RPC to check discard threshold. - err := c.blockingPreprocessingRpc(from, validationConfig, control) - if err != nil { - lg.Error(). - Err(err). - Str("peer_id", from.String()). - Str("ctrl_msg_type", string(ctrlMsgType)). - Msg("could not pre-process rpc, aborting") - return fmt.Errorf("could not pre-process rpc, aborting: %w", err) - } - - // queue further async inspection - req, err := NewInspectMsgRequest(from, validationConfig, control) - if err != nil { - lg.Error(). - Err(err). - Str("peer_id", from.String()). - Str("ctrl_msg_type", string(ctrlMsgType)). - Msg("failed to get inspect message request") - return fmt.Errorf("failed to get inspect message request: %w", err) - } - c.workerPool.Submit(req) - } - - return nil -} - -// Name returns the name of the rpc inspector. -func (c *ControlMsgValidationInspector) Name() string { - return rpcInspectorComponentName -} - -// blockingPreprocessingRpc ensures the RPC control message count does not exceed the configured discard threshold. -func (c *ControlMsgValidationInspector) blockingPreprocessingRpc(from peer.ID, validationConfig *CtrlMsgValidationConfig, controlMessage *pubsub_pb.ControlMessage) error { - lg := c.logger.With(). - Str("peer_id", from.String()). - Str("ctrl_msg_type", string(validationConfig.ControlMsg)).Logger() - - count := c.getCtrlMsgCount(validationConfig.ControlMsg, controlMessage) - // if Count greater than discard threshold drop message and penalize - if count > validationConfig.DiscardThreshold { - discardThresholdErr := NewDiscardThresholdErr(validationConfig.ControlMsg, count, validationConfig.DiscardThreshold) - lg.Warn(). - Err(discardThresholdErr). - Uint64("ctrl_msg_count", count). - Uint64("upper_threshold", discardThresholdErr.discardThreshold). - Bool(logging.KeySuspicious, true). - Msg("rejecting rpc control message") - err := c.distributor.Distribute(p2p.NewInvalidControlMessageNotification(from, validationConfig.ControlMsg, count, discardThresholdErr)) - if err != nil { - lg.Error(). - Err(err). - Bool(logging.KeySuspicious, true). - Msg("failed to distribute invalid control message notification") - return err - } - return discardThresholdErr - } - - return nil -} - -// processInspectMsgReq func used by component workers to perform further inspection of control messages that will check if the messages are rate limited -// and ensure all topic IDS are valid when the amount of messages is above the configured safety threshold. -func (c *ControlMsgValidationInspector) processInspectMsgReq(req *InspectMsgRequest) error { - count := c.getCtrlMsgCount(req.validationConfig.ControlMsg, req.ctrlMsg) - lg := c.logger.With(). - Str("peer_id", req.Peer.String()). - Str("ctrl_msg_type", string(req.validationConfig.ControlMsg)). - Uint64("ctrl_msg_count", count).Logger() - var validationErr error - switch { - case !req.validationConfig.RateLimiter.Allow(req.Peer, int(count)): // check if Peer RPC messages are rate limited - validationErr = NewRateLimitedControlMsgErr(req.validationConfig.ControlMsg) - case count > req.validationConfig.SafetyThreshold: // check if Peer RPC messages Count greater than safety threshold further inspect each message individually - validationErr = c.validateTopics(req.validationConfig.ControlMsg, req.ctrlMsg) - default: - lg.Trace(). - Uint64("upper_threshold", req.validationConfig.DiscardThreshold). - Uint64("safety_threshold", req.validationConfig.SafetyThreshold). - Msg(fmt.Sprintf("control message %s inspection passed %d is below configured safety threshold", req.validationConfig.ControlMsg, count)) - return nil - } - if validationErr != nil { - lg.Error(). - Err(validationErr). - Bool(logging.KeySuspicious, true). - Msg("rpc control message async inspection failed") - err := c.distributor.Distribute(p2p.NewInvalidControlMessageNotification(req.Peer, req.validationConfig.ControlMsg, count, validationErr)) - if err != nil { - lg.Error(). - Err(err). - Bool(logging.KeySuspicious, true). - Msg("failed to distribute invalid control message notification") - } - } - return nil -} - -// getCtrlMsgCount returns the amount of specified control message type in the rpc ControlMessage. -func (c *ControlMsgValidationInspector) getCtrlMsgCount(ctrlMsgType p2p.ControlMessageType, ctrlMsg *pubsub_pb.ControlMessage) uint64 { - switch ctrlMsgType { - case p2p.CtrlMsgGraft: - return uint64(len(ctrlMsg.GetGraft())) - case p2p.CtrlMsgPrune: - return uint64(len(ctrlMsg.GetPrune())) - default: - return 0 - } -} - -// validateTopics ensures all topics in the specified control message are valid flow topic/channel and no duplicate topics exist. -// All errors returned from this function can be considered benign. -func (c *ControlMsgValidationInspector) validateTopics(ctrlMsgType p2p.ControlMessageType, ctrlMsg *pubsub_pb.ControlMessage) error { - seen := make(map[channels.Topic]struct{}) - validateTopic := func(topic channels.Topic) error { - if _, ok := seen[topic]; ok { - return NewIDuplicateTopicErr(topic) - } - seen[topic] = struct{}{} - err := c.validateTopic(topic) - if err != nil { - return err - } - return nil - } - switch ctrlMsgType { - case p2p.CtrlMsgGraft: - for _, graft := range ctrlMsg.GetGraft() { - topic := channels.Topic(graft.GetTopicID()) - err := validateTopic(topic) - if err != nil { - return err - } - } - case p2p.CtrlMsgPrune: - for _, prune := range ctrlMsg.GetPrune() { - topic := channels.Topic(prune.GetTopicID()) - err := validateTopic(topic) - if err != nil { - return err - } - } - } - return nil -} - -// validateTopic the topic is a valid flow topic/channel. -// All errors returned from this function can be considered benign. -func (c *ControlMsgValidationInspector) validateTopic(topic channels.Topic) error { - err := channels.IsValidFlowTopic(topic, c.sporkID) - if err != nil { - return NewInvalidTopicErr(topic, err) - } - return nil -} diff --git a/network/p2p/inspector/validation/control_message_validation_config.go b/network/p2p/inspector/validation/control_message_validation_config.go deleted file mode 100644 index 61162207f4e..00000000000 --- a/network/p2p/inspector/validation/control_message_validation_config.go +++ /dev/null @@ -1,95 +0,0 @@ -package validation - -import ( - "golang.org/x/time/rate" - - "github.com/onflow/flow-go/network/p2p" - "github.com/onflow/flow-go/network/p2p/inspector/internal/ratelimit" -) - -const ( - // DiscardThresholdMapKey key used to set the discard threshold config limit. - DiscardThresholdMapKey = "discardthreshold" - // SafetyThresholdMapKey key used to set the safety threshold config limit. - SafetyThresholdMapKey = "safetythreshold" - // RateLimitMapKey key used to set the rate limit config limit. - RateLimitMapKey = "ratelimit" - - // DefaultGraftDiscardThreshold upper bound for graft messages, RPC control messages with a count - // above the discard threshold are automatically discarded. - DefaultGraftDiscardThreshold = 30 - // DefaultGraftSafetyThreshold a lower bound for graft messages, RPC control messages with a message count - // lower than the safety threshold bypass validation. - DefaultGraftSafetyThreshold = .5 * DefaultGraftDiscardThreshold - // DefaultGraftRateLimit the rate limit for graft control messages. - // Currently, the default rate limit is equal to the discard threshold amount. - // This will result in a rate limit of 30 grafts/sec. - DefaultGraftRateLimit = DefaultGraftDiscardThreshold - - // DefaultPruneDiscardThreshold upper bound for prune messages, RPC control messages with a count - // above the discard threshold are automatically discarded. - DefaultPruneDiscardThreshold = 30 - // DefaultPruneSafetyThreshold a lower bound for prune messages, RPC control messages with a message count - // lower than the safety threshold bypass validation. - DefaultPruneSafetyThreshold = .5 * DefaultPruneDiscardThreshold - // DefaultPruneRateLimit the rate limit for prune control messages. - // Currently, the default rate limit is equal to the discard threshold amount. - // This will result in a rate limit of 30 prunes/sec. - DefaultPruneRateLimit = DefaultPruneDiscardThreshold -) - -// CtrlMsgValidationLimits limits used to construct control message validation configuration. -type CtrlMsgValidationLimits map[string]int - -func (c CtrlMsgValidationLimits) DiscardThreshold() uint64 { - return uint64(c[DiscardThresholdMapKey]) -} - -func (c CtrlMsgValidationLimits) SafetyThreshold() uint64 { - return uint64(c[SafetyThresholdMapKey]) -} - -func (c CtrlMsgValidationLimits) RateLimit() int { - return c[RateLimitMapKey] -} - -// CtrlMsgValidationConfigs list of *CtrlMsgValidationConfig -type CtrlMsgValidationConfigs []*CtrlMsgValidationConfig - -// CtrlMsgValidationConfig configuration values for upper, lower threshold and rate limit. -type CtrlMsgValidationConfig struct { - // ControlMsg the type of RPC control message. - ControlMsg p2p.ControlMessageType - // DiscardThreshold indicates the hard limit for size of the RPC control message - // any RPC messages with size > DiscardThreshold should be dropped. - DiscardThreshold uint64 - // SafetyThreshold lower limit for the size of the RPC control message, any RPC messages - // with a size < SafetyThreshold can skip validation step to avoid resource wasting. - SafetyThreshold uint64 - - // RateLimiter basic limiter without lockout duration. - RateLimiter p2p.BasicRateLimiter -} - -// NewCtrlMsgValidationConfig ensures each config limit value is greater than 0 before returning a new CtrlMsgValidationConfig. -// errors returned: -// -// ErrValidationLimit - if any of the validation limits provided are less than 0. This error is non-recoverable -// and the node should crash if this error is encountered. -func NewCtrlMsgValidationConfig(controlMsg p2p.ControlMessageType, cfgLimitValues CtrlMsgValidationLimits) (*CtrlMsgValidationConfig, error) { - switch { - case cfgLimitValues.RateLimit() <= 0: - return nil, NewInvalidLimitConfigErr(controlMsg, RateLimitMapKey, uint64(cfgLimitValues.RateLimit())) - case cfgLimitValues.DiscardThreshold() <= 0: - return nil, NewInvalidLimitConfigErr(controlMsg, DiscardThresholdMapKey, cfgLimitValues.DiscardThreshold()) - case cfgLimitValues.RateLimit() <= 0: - return nil, NewInvalidLimitConfigErr(controlMsg, SafetyThresholdMapKey, cfgLimitValues.SafetyThreshold()) - default: - return &CtrlMsgValidationConfig{ - ControlMsg: controlMsg, - DiscardThreshold: cfgLimitValues.DiscardThreshold(), - SafetyThreshold: cfgLimitValues.SafetyThreshold(), - RateLimiter: ratelimit.NewControlMessageRateLimiter(rate.Limit(cfgLimitValues.RateLimit()), cfgLimitValues.RateLimit()), - }, nil - } -} diff --git a/network/p2p/inspector/validation/control_message_validation_inspector.go b/network/p2p/inspector/validation/control_message_validation_inspector.go new file mode 100644 index 00000000000..e88495b28fb --- /dev/null +++ b/network/p2p/inspector/validation/control_message_validation_inspector.go @@ -0,0 +1,1116 @@ +package validation + +import ( + "fmt" + "time" + + "github.com/go-playground/validator/v10" + "github.com/hashicorp/go-multierror" + pubsub "github.com/libp2p/go-libp2p-pubsub" + pubsub_pb "github.com/libp2p/go-libp2p-pubsub/pb" + "github.com/libp2p/go-libp2p/core/peer" + "github.com/rs/zerolog" + + "github.com/onflow/flow-go/engine/common/worker" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module" + "github.com/onflow/flow-go/module/component" + "github.com/onflow/flow-go/module/irrecoverable" + "github.com/onflow/flow-go/module/mempool/queue" + "github.com/onflow/flow-go/module/metrics" + "github.com/onflow/flow-go/network" + "github.com/onflow/flow-go/network/channels" + "github.com/onflow/flow-go/network/p2p" + p2pconfig "github.com/onflow/flow-go/network/p2p/config" + "github.com/onflow/flow-go/network/p2p/inspector/internal/cache" + p2plogging "github.com/onflow/flow-go/network/p2p/logging" + p2pmsg "github.com/onflow/flow-go/network/p2p/message" + "github.com/onflow/flow-go/state/protocol" + "github.com/onflow/flow-go/state/protocol/events" + "github.com/onflow/flow-go/utils/logging" + flowrand "github.com/onflow/flow-go/utils/rand" +) + +const ( + RPCInspectionDisabledWarning = "rpc inspection disabled for all control message types, skipping inspection" + GraftInspectionDisabledWarning = "rpc graft inspection disabled skipping" + PruneInspectionDisabledWarning = "rpc prune inspection disabled skipping" + IWantInspectionDisabledWarning = "rpc iwant inspection disabled skipping" + IHaveInspectionDisabledWarning = "rpc ihave inspection disabled skipping" + PublishInspectionDisabledWarning = "rpc publish message inspection disabled skipping" + + RPCTruncationDisabledWarning = "rpc truncation disabled for all control message types, skipping truncation" + GraftTruncationDisabledWarning = "rpc graft truncation disabled skipping" + PruneTruncationDisabledWarning = "rpc prune truncation disabled skipping" + IHaveTruncationDisabledWarning = "rpc ihave truncation disabled skipping" + IHaveMessageIDTruncationDisabledWarning = "ihave message ids truncation disabled skipping" + IWantTruncationDisabledWarning = "rpc iwant truncation disabled skipping" + IWantMessageIDTruncationDisabledWarning = "iwant message ids truncation disabled skipping" + + // rpcInspectorComponentName the rpc inspector component name. + rpcInspectorComponentName = "gossipsub_rpc_validation_inspector" +) + +// ControlMsgValidationInspector RPC message inspector that inspects control messages and performs some validation on them, +// when some validation rule is broken feedback is given via the Peer scoring notifier. +type ControlMsgValidationInspector struct { + component.Component + events.Noop + ctx irrecoverable.SignalerContext + logger zerolog.Logger + sporkID flow.Identifier + metrics module.GossipSubRpcValidationInspectorMetrics + // config control message validation configurations. + config *p2pconfig.RpcValidationInspector + // workerPool queue that stores *InspectRPCRequest that will be processed by component workers. + workerPool *worker.Pool[*InspectRPCRequest] + // tracker is a map that associates the hash of a peer's ID with the + // number of cluster-prefix topic control messages received from that peer. It helps in tracking + // and managing the rate of incoming control messages from each peer, ensuring that the system + // stays performant and resilient against potential spam or abuse. + // The counter is incremented in the following scenarios: + // 1. The cluster prefix topic is received while the inspector waits for the cluster IDs provider to be set (this can happen during the startup or epoch transitions). + // 2. The node sends a cluster prefix topic where the cluster prefix does not match any of the active cluster IDs. + // In such cases, the inspector will allow a configured number of these messages from the corresponding peer. + tracker *cache.ClusterPrefixedMessagesReceivedTracker + idProvider module.IdentityProvider + rpcTracker p2p.RpcControlTracking + // networkingType indicates public or private network, rpc publish messages are inspected for unstaked senders when running the private network. + networkingType network.NetworkingType + // topicOracle callback used to retrieve the current subscribed topics of the libp2p node. + topicOracle func() p2p.TopicProvider + // notificationConsumer the consumer that will be notified when a misbehavior is detected upon inspection of an RPC. + // For each RPC, at most one notification is sent to the consumer. + // Each notification acts as a penalty to the peer's score. + notificationConsumer p2p.GossipSubInvCtrlMsgNotifConsumer +} + +type InspectorParams struct { + // Logger the logger used by the inspector. + Logger zerolog.Logger `validate:"required"` + // SporkID the current spork ID. + SporkID flow.Identifier `validate:"required"` + // Config inspector configuration. + Config *p2pconfig.RpcValidationInspector `validate:"required"` + // HeroCacheMetricsFactory the metrics factory. + HeroCacheMetricsFactory metrics.HeroCacheMetricsFactory `validate:"required"` + // IdProvider identity provider is used to get the flow identifier for a peer. + IdProvider module.IdentityProvider `validate:"required"` + // InspectorMetrics metrics for the validation inspector. + InspectorMetrics module.GossipSubRpcValidationInspectorMetrics `validate:"required"` + // RpcTracker tracker used to track iHave RPC's sent and last size. + RpcTracker p2p.RpcControlTracking `validate:"required"` + // NetworkingType the networking type of the node. + NetworkingType network.NetworkingType `validate:"required"` + // TopicOracle callback used to retrieve the current subscribed topics of the libp2p node. + // It is set as a callback to avoid circular dependencies between the topic oracle and the inspector. + TopicOracle func() p2p.TopicProvider `validate:"required"` + + // InvalidControlMessageNotificationConsumer the consumer that will be notified when a misbehavior is detected upon inspection of an RPC. + // For each RPC, at most one notification is sent to the consumer. + // Each notification acts as a penalty to the peer's score. + InvalidControlMessageNotificationConsumer p2p.GossipSubInvCtrlMsgNotifConsumer `validate:"required"` +} + +var _ component.Component = (*ControlMsgValidationInspector)(nil) +var _ p2p.GossipSubRPCInspector = (*ControlMsgValidationInspector)(nil) +var _ protocol.Consumer = (*ControlMsgValidationInspector)(nil) + +// NewControlMsgValidationInspector returns new ControlMsgValidationInspector +// Args: +// - *InspectorParams: params used to create the inspector. +// +// Returns: +// - *ControlMsgValidationInspector: a new control message validation inspector. +// - error: an error if there is any error while creating the inspector. All errors are irrecoverable and unexpected. +func NewControlMsgValidationInspector(params *InspectorParams) (*ControlMsgValidationInspector, error) { + err := validator.New().Struct(params) + if err != nil { + return nil, fmt.Errorf("inspector params validation failed: %w", err) + } + lg := params.Logger.With().Str("component", "gossip_sub_rpc_validation_inspector").Logger() + + inspectMsgQueueCacheCollector := metrics.GossipSubRPCInspectorQueueMetricFactory(params.HeroCacheMetricsFactory, params.NetworkingType) + clusterPrefixedCacheCollector := metrics.GossipSubRPCInspectorClusterPrefixedCacheMetricFactory(params.HeroCacheMetricsFactory, params.NetworkingType) + + clusterPrefixedTracker, err := cache.NewClusterPrefixedMessagesReceivedTracker(params.Logger, + params.Config.ClusterPrefixedMessage.ControlMsgsReceivedCacheSize, + clusterPrefixedCacheCollector, + params.Config.ClusterPrefixedMessage.ControlMsgsReceivedCacheDecay) + if err != nil { + return nil, fmt.Errorf("failed to create cluster prefix topics received tracker") + } + + if params.Config.PublishMessages.MaxSampleSize < params.Config.PublishMessages.ErrorThreshold { + return nil, fmt.Errorf("rpc message max sample size must be greater than or equal to rpc message error threshold, got %d and %d respectively", + params.Config.PublishMessages.MaxSampleSize, + params.Config.PublishMessages.ErrorThreshold) + } + + c := &ControlMsgValidationInspector{ + logger: lg, + sporkID: params.SporkID, + config: params.Config, + tracker: clusterPrefixedTracker, + rpcTracker: params.RpcTracker, + idProvider: params.IdProvider, + metrics: params.InspectorMetrics, + networkingType: params.NetworkingType, + topicOracle: params.TopicOracle, + notificationConsumer: params.InvalidControlMessageNotificationConsumer, + } + + store := queue.NewHeroStore(params.Config.InspectionQueue.Size, params.Logger, inspectMsgQueueCacheCollector) + + pool := worker.NewWorkerPoolBuilder[*InspectRPCRequest](lg, store, c.processInspectRPCReq).Build() + + c.workerPool = pool + + builder := component.NewComponentManagerBuilder() + for i := 0; i < c.config.InspectionQueue.NumberOfWorkers; i++ { + builder.AddWorker(pool.WorkerLogic()) + } + c.Component = builder.Build() + return c, nil +} + +func (c *ControlMsgValidationInspector) Start(parent irrecoverable.SignalerContext) { + if c.topicOracle == nil { + parent.Throw(fmt.Errorf("control message validation inspector topic oracle not set")) + } + c.Component.Start(parent) +} + +// Name returns the name of the rpc inspector. +func (c *ControlMsgValidationInspector) Name() string { + return rpcInspectorComponentName +} + +// ActiveClustersChanged consumes cluster ID update protocol events. +func (c *ControlMsgValidationInspector) ActiveClustersChanged(clusterIDList flow.ChainIDList) { + c.tracker.StoreActiveClusterIds(clusterIDList) +} + +// Inspect is called by gossipsub upon reception of a rpc from a remote node. +// It creates a new InspectRPCRequest for the RPC to be inspected async by the worker pool. +// Args: +// - from: the sender. +// - rpc: the control message RPC. +// +// Returns: +// - error: if a new inspect rpc request cannot be created, all errors returned are considered irrecoverable. +func (c *ControlMsgValidationInspector) Inspect(from peer.ID, rpc *pubsub.RPC) error { + if c.config.InspectionProcess.Inspect.Disabled { + c.logger. + Trace(). + Str("peer_id", p2plogging.PeerId(from)). + Bool(logging.KeyNetworkingSecurity, true). + Msg(RPCInspectionDisabledWarning) + return nil + } + + // check peer identity when running private network + // sanity check: rpc inspection should be disabled on public networks + if c.networkingType == network.PrivateNetwork && c.config.InspectionProcess.Inspect.RejectUnstakedPeers { + _, err := c.checkSenderIdentity(from) + if err != nil { + c.notificationConsumer.OnInvalidControlMessageNotification(p2p.NewInvalidControlMessageNotification(from, p2pmsg.CtrlMsgRPC, err, 1, p2p.CtrlMsgNonClusterTopicType)) + c.logger. + Error(). + Err(err). + Str("peer_id", p2plogging.PeerId(from)). + Bool(logging.KeyNetworkingSecurity, true). + Msg("rpc received from unstaked peer") + c.metrics.OnInvalidControlMessageNotificationSent() + c.metrics.OnRpcRejectedFromUnknownSender() + return err + } + } + + // first truncate the rpc to the configured max sample size; if needed + c.truncateRPC(from, rpc) + + // second, queue further async inspection + req, err := NewInspectRPCRequest(from, rpc) + if err != nil { + c.logger.Error(). + Err(err). + Bool(logging.KeyNetworkingSecurity, true). + Str("peer_id", p2plogging.PeerId(from)). + Msg("failed to get inspect RPC request") + return fmt.Errorf("failed to get inspect RPC request: %w", err) + } + c.workerPool.Submit(req) + return nil +} + +// updateMetrics updates the metrics for the received RPC. +// Args: +// - from: the sender. +// +// - rpc: the control message RPC. +func (c *ControlMsgValidationInspector) updateMetrics(from peer.ID, rpc *pubsub.RPC) { + includedMessages := len(rpc.GetPublish()) + iHaveCount, iWantCount, graftCount, pruneCount := 0, 0, 0, 0 + ctl := rpc.GetControl() + if ctl != nil { + iHaveCount = len(ctl.GetIhave()) + iWantCount = len(ctl.GetIwant()) + graftCount = len(ctl.GetGraft()) + pruneCount = len(ctl.GetPrune()) + } + c.metrics.OnIncomingRpcReceived(iHaveCount, iWantCount, graftCount, pruneCount, includedMessages) + if c.logger.GetLevel() > zerolog.TraceLevel { + return // skip logging if trace level is not enabled + } + c.logger.Trace(). + Str("peer_id", p2plogging.PeerId(from)). + Int("iHaveCount", iHaveCount). + Int("iWantCount", iWantCount). + Int("graftCount", graftCount). + Int("pruneCount", pruneCount). + Int("included_message_count", includedMessages). + Msg("received rpc with control messages") +} + +// processInspectRPCReq func used by component workers to perform further inspection of RPC control messages that will validate ensure all control message +// types are valid in the RPC. +// Args: +// - req: the inspect rpc request. +// +// Returns: +// - error: no error is expected to be returned from this func as they are logged and distributed in invalid control message notifications. +func (c *ControlMsgValidationInspector) processInspectRPCReq(req *InspectRPCRequest) error { + c.updateMetrics(req.Peer, req.rpc) + c.metrics.AsyncProcessingStarted() + start := time.Now() + defer func() { + c.metrics.AsyncProcessingFinished(time.Since(start)) + }() + + activeClusterIDS := c.tracker.GetActiveClusterIds() + for _, ctrlMsgType := range p2pmsg.ControlMessageTypes() { + switch ctrlMsgType { + case p2pmsg.CtrlMsgGraft: + err, topicType := c.inspectGraftMessages(req.Peer, req.rpc.GetControl().GetGraft(), activeClusterIDS) + if err != nil { + c.logAndDistributeAsyncInspectErrs(req, p2pmsg.CtrlMsgGraft, err, 1, topicType) + return nil + } + case p2pmsg.CtrlMsgPrune: + err, topicType := c.inspectPruneMessages(req.Peer, req.rpc.GetControl().GetPrune(), activeClusterIDS) + if err != nil { + c.logAndDistributeAsyncInspectErrs(req, p2pmsg.CtrlMsgPrune, err, 1, topicType) + return nil + } + case p2pmsg.CtrlMsgIWant: + err := c.inspectIWantMessages(req.Peer, req.rpc.GetControl().GetIwant()) + if err != nil { + c.logAndDistributeAsyncInspectErrs(req, p2pmsg.CtrlMsgIWant, err, 1, p2p.CtrlMsgNonClusterTopicType) + return nil + } + case p2pmsg.CtrlMsgIHave: + err, topicType := c.inspectIHaveMessages(req.Peer, req.rpc.GetControl().GetIhave(), activeClusterIDS) + if err != nil { + c.logAndDistributeAsyncInspectErrs(req, p2pmsg.CtrlMsgIHave, err, 1, topicType) + return nil + } + } + } + + // inspect rpc publish messages after all control message validation has passed + err, errCount := c.inspectRpcPublishMessages(req.Peer, req.rpc.GetPublish(), activeClusterIDS) + if err != nil { + c.logAndDistributeAsyncInspectErrs(req, p2pmsg.RpcPublishMessage, err, errCount, p2p.CtrlMsgNonClusterTopicType) + return nil + } + + return nil +} + +// checkSenderIdentity checks the identity of the peer with pid and ensures they are not unstaked, or ejected. +// This check is only required on private networks. +// Args: +// - pid : the peer ID. +// +// Returns: +// - error: sender is unknown or the identity is ejected. +// +// All errors returned from this function can be considered benign. +func (c *ControlMsgValidationInspector) checkSenderIdentity(pid peer.ID) (*flow.Identity, error) { + id, ok := c.idProvider.ByPeerID(pid) + if !ok { + return nil, NewUnstakedPeerErr(pid) + } + + if id.IsEjected() { + return nil, NewEjectedPeerErr(pid) + } + + return id, nil +} + +// inspectGraftMessages performs topic validation on all grafts in the control message using the provided validateTopic func while tracking duplicates. +// Args: +// - from: peer ID of the sender. +// - grafts: the list of grafts to inspect. +// - activeClusterIDS: the list of active cluster ids. +// Returns: +// - DuplicateTopicErr: if there are any duplicate topics in the list of grafts +// - error: if any error occurs while sampling or validating topics, all returned errors are benign and should not cause the node to crash. +// - bool: true if an error is returned and the topic that failed validation was a cluster prefixed topic, false otherwise. +func (c *ControlMsgValidationInspector) inspectGraftMessages(from peer.ID, grafts []*pubsub_pb.ControlGraft, activeClusterIDS flow.ChainIDList) (error, p2p.CtrlMsgTopicType) { + if !c.config.InspectionProcess.Inspect.EnableGraft { + c.logger. + Trace(). + Str("peer_id", p2plogging.PeerId(from)). + Bool(logging.KeyNetworkingSecurity, true). + Msg(GraftInspectionDisabledWarning) + return nil, p2p.CtrlMsgNonClusterTopicType + } + + duplicateTopicTracker := make(duplicateStrTracker) + totalDuplicateTopicIds := 0 + totalInvalidTopicIdErrs := 0 + defer func() { + // regardless of inspection result, update metrics + c.metrics.OnGraftMessageInspected(totalDuplicateTopicIds, totalInvalidTopicIdErrs) + }() + + for _, graft := range grafts { + topic := channels.Topic(graft.GetTopicID()) + if duplicateTopicTracker.track(topic.String()) > 1 { + // ideally, a GRAFT message should not have any duplicate topics, hence a topic ID is counted as a duplicate only if it is repeated more than once. + totalDuplicateTopicIds++ + // check if the total number of duplicates exceeds the configured threshold. + if totalDuplicateTopicIds > c.config.GraftPrune.DuplicateTopicIdThreshold { + c.metrics.OnGraftDuplicateTopicIdsExceedThreshold() + return NewDuplicateTopicIDThresholdExceeded(totalDuplicateTopicIds, len(grafts), c.config.GraftPrune.DuplicateTopicIdThreshold), p2p.CtrlMsgNonClusterTopicType + } + } + err, ctrlMsgType := c.validateTopic(from, topic, activeClusterIDS) + if err != nil { + totalInvalidTopicIdErrs++ + c.metrics.OnInvalidTopicIdDetectedForControlMessage(p2pmsg.CtrlMsgGraft) + if totalInvalidTopicIdErrs > c.config.GraftPrune.InvalidTopicIdThreshold { + return NewInvalidTopicIDThresholdExceeded(totalInvalidTopicIdErrs, c.config.GraftPrune.InvalidTopicIdThreshold), ctrlMsgType + } + } + } + return nil, p2p.CtrlMsgNonClusterTopicType +} + +// inspectPruneMessages performs topic validation on all prunes in the control message using the provided validateTopic func while tracking duplicates. +// Args: +// - from: peer ID of the sender. +// - prunes: the list of iHaves to inspect. +// - activeClusterIDS: the list of active cluster ids. +// Returns: +// - DuplicateTopicErr: if there are any duplicate topics found in the list of iHaves +// or any duplicate message ids found inside a single iHave. +// - error: if any error occurs while sampling or validating topics, all returned errors are benign and should not cause the node to crash. +// - bool: true if an error is returned and the topic that failed validation was a cluster prefixed topic, false otherwise. +func (c *ControlMsgValidationInspector) inspectPruneMessages(from peer.ID, prunes []*pubsub_pb.ControlPrune, activeClusterIDS flow.ChainIDList) (error, p2p.CtrlMsgTopicType) { + if !c.config.InspectionProcess.Inspect.EnablePrune { + c.logger. + Trace(). + Str("peer_id", p2plogging.PeerId(from)). + Bool(logging.KeyNetworkingSecurity, true). + Msg(PruneInspectionDisabledWarning) + return nil, p2p.CtrlMsgNonClusterTopicType + } + tracker := make(duplicateStrTracker) + totalDuplicateTopicIds := 0 + totalInvalidTopicIdErrs := 0 + defer func() { + // regardless of inspection result, update metrics + c.metrics.OnPruneMessageInspected(totalDuplicateTopicIds, totalInvalidTopicIdErrs) + }() + for _, prune := range prunes { + topic := channels.Topic(prune.GetTopicID()) + if tracker.track(topic.String()) > 1 { + // ideally, a PRUNE message should not have any duplicate topics, hence a topic ID is counted as a duplicate only if it is repeated more than once. + totalDuplicateTopicIds++ + // check if the total number of duplicates exceeds the configured threshold. + if totalDuplicateTopicIds > c.config.GraftPrune.DuplicateTopicIdThreshold { + c.metrics.OnPruneDuplicateTopicIdsExceedThreshold() + return NewDuplicateTopicIDThresholdExceeded(totalDuplicateTopicIds, len(prunes), c.config.GraftPrune.DuplicateTopicIdThreshold), p2p.CtrlMsgNonClusterTopicType + } + } + err, ctrlMsgType := c.validateTopic(from, topic, activeClusterIDS) + if err != nil { + totalInvalidTopicIdErrs++ + c.metrics.OnInvalidTopicIdDetectedForControlMessage(p2pmsg.CtrlMsgPrune) + if totalInvalidTopicIdErrs > c.config.GraftPrune.InvalidTopicIdThreshold { + return NewInvalidTopicIDThresholdExceeded(totalInvalidTopicIdErrs, c.config.GraftPrune.InvalidTopicIdThreshold), ctrlMsgType + } + } + } + return nil, p2p.CtrlMsgNonClusterTopicType +} + +// inspectIHaveMessages performs topic validation on all ihaves in the control message using the provided validateTopic func while tracking duplicates. +// Args: +// - from: peer ID of the sender. +// - iHaves: the list of iHaves to inspect. +// - activeClusterIDS: the list of active cluster ids. +// Returns: +// - DuplicateTopicErr: if there are any duplicate topics found in the list of iHaves +// or any duplicate message ids found inside a single iHave. +// - error: if any error occurs while sampling or validating topics, all returned errors are benign and should not cause the node to crash. +// - bool: true if an error is returned and the topic that failed validation was a cluster prefixed topic, false otherwise. +func (c *ControlMsgValidationInspector) inspectIHaveMessages(from peer.ID, ihaves []*pubsub_pb.ControlIHave, activeClusterIDS flow.ChainIDList) (error, p2p.CtrlMsgTopicType) { + if !c.config.InspectionProcess.Inspect.EnableIHave { + c.logger. + Trace(). + Str("peer_id", p2plogging.PeerId(from)). + Bool(logging.KeyNetworkingSecurity, true). + Msg(IHaveInspectionDisabledWarning) + return nil, p2p.CtrlMsgNonClusterTopicType + } + + if len(ihaves) == 0 { + return nil, p2p.CtrlMsgNonClusterTopicType + } + lg := c.logger.With(). + Str("peer_id", p2plogging.PeerId(from)). + Int("sample_size", len(ihaves)). + Int("max_sample_size", c.config.IHave.MessageCountThreshold). + Logger() + duplicateTopicTracker := make(duplicateStrTracker) + duplicateMessageIDTracker := make(duplicateStrTracker) + totalMessageIds := 0 + totalDuplicateTopicIds := 0 + totalDuplicateMessageIds := 0 + totalInvalidTopicIdErrs := 0 + defer func() { + // regardless of inspection result, update metrics + c.metrics.OnIHaveMessagesInspected(totalDuplicateTopicIds, totalDuplicateMessageIds, totalInvalidTopicIdErrs) + }() + for _, ihave := range ihaves { + messageIds := ihave.GetMessageIDs() + topic := ihave.GetTopicID() + totalMessageIds += len(messageIds) + + // first check if the topic is valid, fail fast if it is not + err, ctrlMsgType := c.validateTopic(from, channels.Topic(topic), activeClusterIDS) + if err != nil { + totalInvalidTopicIdErrs++ + c.metrics.OnInvalidTopicIdDetectedForControlMessage(p2pmsg.CtrlMsgIHave) + if totalInvalidTopicIdErrs > c.config.IHave.InvalidTopicIdThreshold { + return NewInvalidTopicIDThresholdExceeded(totalInvalidTopicIdErrs, c.config.IHave.InvalidTopicIdThreshold), ctrlMsgType + } + } + + // then track the topic ensuring it is not beyond a duplicate threshold. + if duplicateTopicTracker.track(topic) > 1 { + totalDuplicateTopicIds++ + // the topic is duplicated, check if the total number of duplicates exceeds the configured threshold + if totalDuplicateTopicIds > c.config.IHave.DuplicateTopicIdThreshold { + c.metrics.OnIHaveDuplicateTopicIdsExceedThreshold() + return NewDuplicateTopicIDThresholdExceeded(totalDuplicateTopicIds, len(ihaves), c.config.IHave.DuplicateTopicIdThreshold), p2p.CtrlMsgNonClusterTopicType + } + } + + for _, messageID := range messageIds { + if duplicateMessageIDTracker.track(messageID) > 1 { + totalDuplicateMessageIds++ + // the message is duplicated, check if the total number of duplicates exceeds the configured threshold + if totalDuplicateMessageIds > c.config.IHave.DuplicateMessageIdThreshold { + c.metrics.OnIHaveDuplicateMessageIdsExceedThreshold() + return NewDuplicateMessageIDErr(messageID, totalDuplicateMessageIds, p2pmsg.CtrlMsgIHave), p2p.CtrlMsgNonClusterTopicType + } + } + } + } + lg.Debug(). + Int("total_message_ids", totalMessageIds). + Int("total_duplicate_topic_ids", totalDuplicateTopicIds). + Int("total_duplicate_message_ids", totalDuplicateMessageIds). + Msg("ihave control message validation complete") + return nil, p2p.CtrlMsgNonClusterTopicType +} + +// inspectIWantMessages inspects RPC iWant control messages. This func will sample the iWants and perform validation on each iWant in the sample. +// Ensuring that the following are true: +// - Each iWant corresponds to an iHave that was sent. +// - Each topic in the iWant sample is a valid topic. +// If the number of iWants that do not have a corresponding iHave exceed the configured threshold an error is returned. +// Args: +// - from: peer ID of the sender. +// - iWant: the list of iWant control messages. +// Returns: +// - DuplicateTopicErr: if there are any duplicate message ids found in any of the iWants. +// - IWantCacheMissThresholdErr: if the rate of cache misses exceeds the configured allowed threshold. +func (c *ControlMsgValidationInspector) inspectIWantMessages(from peer.ID, iWants []*pubsub_pb.ControlIWant) error { + if !c.config.InspectionProcess.Inspect.EnableIWant { + c.logger. + Trace(). + Str("peer_id", p2plogging.PeerId(from)). + Bool(logging.KeyNetworkingSecurity, true). + Msg(IWantInspectionDisabledWarning) + return nil + } + + if len(iWants) == 0 { + return nil + } + lastHighest := c.rpcTracker.LastHighestIHaveRPCSize() + lg := c.logger.With(). + Str("peer_id", p2plogging.PeerId(from)). + Uint("max_sample_size", c.config.IWant.MessageCountThreshold). + Int64("last_highest_ihave_rpc_size", lastHighest). + Logger() + duplicateMsgIdTracker := make(duplicateStrTracker) + cacheMisses := 0 + duplicateMessageIds := 0 + defer func() { + // regardless of inspection result, update metrics + c.metrics.OnIWantMessagesInspected(duplicateMessageIds, cacheMisses) + }() + + lg = lg.With(). + Int("iwant_msg_count", len(iWants)). + Int("cache_misses_threshold", c.config.IWant.CacheMissThreshold). + Int("duplicates_threshold", c.config.IWant.DuplicateMsgIdThreshold).Logger() + + lg.Trace().Msg("validating sample of message ids from iwant control message") + + totalMessageIds := 0 + for _, iWant := range iWants { + messageIds := iWant.GetMessageIDs() + messageIDCount := uint(len(messageIds)) + for _, messageID := range messageIds { + // check duplicate allowed threshold + if duplicateMsgIdTracker.track(messageID) > 1 { + // ideally, an iWant message should not have any duplicate message IDs, hence a message id is considered duplicate when it is repeated more than once. + duplicateMessageIds++ + if duplicateMessageIds > c.config.IWant.DuplicateMsgIdThreshold { + c.metrics.OnIWantDuplicateMessageIdsExceedThreshold() + return NewIWantDuplicateMsgIDThresholdErr(duplicateMessageIds, messageIDCount, c.config.IWant.DuplicateMsgIdThreshold) + } + } + // check cache miss threshold + if !c.rpcTracker.WasIHaveRPCSent(messageID) { + cacheMisses++ + if cacheMisses > c.config.IWant.CacheMissThreshold { + c.metrics.OnIWantCacheMissMessageIdsExceedThreshold() + return NewIWantCacheMissThresholdErr(cacheMisses, messageIDCount, c.config.IWant.CacheMissThreshold) + } + } + duplicateMsgIdTracker.track(messageID) + totalMessageIds++ + } + } + + lg.Debug(). + Int("total_message_ids", totalMessageIds). + Int("cache_misses", cacheMisses). + Int("total_duplicate_message_ids", duplicateMessageIds). + Msg("iwant control message validation complete") + + return nil +} + +// inspectRpcPublishMessages inspects a sample of the RPC gossip messages and performs topic validation that ensures the following: +// - Topics are known flow topics. +// - Topics are valid flow topics. +// - Topics are in the nodes subscribe topics list. +// If more than half the topics in the sample contain invalid topics an error will be returned. +// Args: +// - from: peer ID of the sender. +// - messages: rpc publish messages. +// - activeClusterIDS: the list of active cluster ids. +// Returns: +// - InvalidRpcPublishMessagesErr: if the amount of invalid messages exceeds the configured RPCMessageErrorThreshold. +// - int: the number of invalid pubsub messages +func (c *ControlMsgValidationInspector) inspectRpcPublishMessages(from peer.ID, messages []*pubsub_pb.Message, activeClusterIDS flow.ChainIDList) (error, uint64) { + if !c.config.InspectionProcess.Inspect.EnablePublish { + c.logger. + Trace(). + Str("peer_id", p2plogging.PeerId(from)). + Bool(logging.KeyNetworkingSecurity, true). + Msg(PublishInspectionDisabledWarning) + return nil, 0 + } + totalMessages := len(messages) + if totalMessages == 0 { + return nil, 0 + } + + sampleSize := c.config.PublishMessages.MaxSampleSize + if sampleSize > totalMessages { + sampleSize = totalMessages + } + c.performSample(p2pmsg.RpcPublishMessage, uint(totalMessages), uint(sampleSize), func(i, j uint) { + messages[i], messages[j] = messages[j], messages[i] + }) + + subscribedTopics := c.topicOracle().GetTopics() + hasSubscription := func(topic string) bool { + for _, subscribedTopic := range subscribedTopics { + if topic == subscribedTopic { + return true + } + } + return false + } + var errs *multierror.Error + invalidTopicIdsCount := 0 + invalidSubscriptionsCount := 0 + invalidSendersCount := 0 + defer func() { + // regardless of inspection result, update metrics + errCnt := 0 + if errs != nil { + errCnt = errs.Len() + } + c.metrics.OnPublishMessageInspected(errCnt, invalidTopicIdsCount, invalidSubscriptionsCount, invalidSendersCount) + }() + + idCheckCache := make(map[peer.ID]error) + for _, message := range messages[:sampleSize] { + topic := channels.Topic(message.GetTopic()) + // The boolean value returned when validating a topic, indicating whether the topic is cluster-prefixed or not, is intentionally ignored. + // This is because we have already set a threshold for errors allowed on publish messages. Reducing the penalty further based on + // cluster prefix status is unnecessary when the error threshold is exceeded. + err, _ := c.validateTopic(from, topic, activeClusterIDS) + if err != nil { + // we can skip checking for subscription of topic that failed validation and continue + invalidTopicIdsCount++ + errs = multierror.Append(errs, err) + continue + } + + if !hasSubscription(topic.String()) { + invalidSubscriptionsCount++ + errs = multierror.Append(errs, fmt.Errorf("subscription for topic %s not found", topic)) + continue + } + + if c.networkingType == network.PrivateNetwork { + pid, err := peer.IDFromBytes(message.GetFrom()) + if err != nil { + invalidSendersCount++ + errs = multierror.Append(errs, fmt.Errorf("failed to get peer ID from bytes: %w", err)) + continue + } + + if idCheckErr, ok := idCheckCache[pid]; ok { + if idCheckErr != nil { + errs = multierror.Append(errs, idCheckErr) + continue + } + } + + _, idErr := c.checkSenderIdentity(pid) + if idErr != nil { + invalidSendersCount++ + errs = multierror.Append(errs, idErr) + idCheckCache[pid] = idErr + continue + } + + idCheckCache[pid] = nil + } + } + // return an error when we exceed the error threshold + if errs != nil && errs.Len() > c.config.PublishMessages.ErrorThreshold { + c.metrics.OnPublishMessagesInspectionErrorExceedsThreshold() + return NewInvalidRpcPublishMessagesErr(errs.ErrorOrNil(), errs.Len()), uint64(errs.Len()) + } + + return nil, 0 +} + +// truncateRPC truncates the RPC by truncating each control message type using the configured max sample size values. +// Args: +// - from: peer ID of the sender. +// - rpc: the pubsub RPC. +func (c *ControlMsgValidationInspector) truncateRPC(from peer.ID, rpc *pubsub.RPC) { + if c.config.InspectionProcess.Truncate.Disabled { + c.logger. + Trace(). + Str("peer_id", p2plogging.PeerId(from)). + Bool(logging.KeyNetworkingSecurity, true). + Msg(RPCTruncationDisabledWarning) + return + } + + for _, ctlMsgType := range p2pmsg.ControlMessageTypes() { + switch ctlMsgType { + case p2pmsg.CtrlMsgGraft: + c.truncateGraftMessages(from, rpc) + case p2pmsg.CtrlMsgPrune: + c.truncatePruneMessages(from, rpc) + case p2pmsg.CtrlMsgIHave: + c.truncateIHaveMessages(from, rpc) + c.truncateIHaveMessageIds(from, rpc) + case p2pmsg.CtrlMsgIWant: + c.truncateIWantMessages(from, rpc) + c.truncateIWantMessageIds(from, rpc) + default: + // sanity check this should never happen + c.logAndThrowError(fmt.Errorf("unknown control message type encountered during RPC truncation")) + } + } +} + +// truncateGraftMessages truncates the Graft control messages in the RPC. If the total number of Grafts in the RPC exceeds the configured +// GraftPruneMessageMaxSampleSize the list of Grafts will be truncated. +// Args: +// - rpc: the rpc message to truncate. +func (c *ControlMsgValidationInspector) truncateGraftMessages(from peer.ID, rpc *pubsub.RPC) { + if !c.config.InspectionProcess.Truncate.EnableGraft { + c.logger. + Trace(). + Str("peer_id", p2plogging.PeerId(from)). + Bool(logging.KeyNetworkingSecurity, true). + Msg(GraftTruncationDisabledWarning) + return + } + + grafts := rpc.GetControl().GetGraft() + originalGraftSize := len(grafts) + if originalGraftSize <= c.config.GraftPrune.MessageCountThreshold { + return // nothing to truncate + } + + // truncate grafts and update metrics + sampleSize := c.config.GraftPrune.MessageCountThreshold + c.performSample(p2pmsg.CtrlMsgGraft, uint(originalGraftSize), uint(sampleSize), func(i, j uint) { + grafts[i], grafts[j] = grafts[j], grafts[i] + }) + rpc.Control.Graft = grafts[:sampleSize] + c.metrics.OnControlMessagesTruncated(p2pmsg.CtrlMsgGraft, originalGraftSize-len(rpc.Control.Graft)) +} + +// truncatePruneMessages truncates the Prune control messages in the RPC. If the total number of Prunes in the RPC exceeds the configured +// GraftPruneMessageMaxSampleSize the list of Prunes will be truncated. +// Args: +// - rpc: the rpc message to truncate. +func (c *ControlMsgValidationInspector) truncatePruneMessages(from peer.ID, rpc *pubsub.RPC) { + if !c.config.InspectionProcess.Truncate.EnablePrune { + c.logger. + Trace(). + Str("peer_id", p2plogging.PeerId(from)). + Bool(logging.KeyNetworkingSecurity, true). + Msg(PruneTruncationDisabledWarning) + return + } + + prunes := rpc.GetControl().GetPrune() + originalPruneSize := len(prunes) + if originalPruneSize <= c.config.GraftPrune.MessageCountThreshold { + return // nothing to truncate + } + + sampleSize := c.config.GraftPrune.MessageCountThreshold + c.performSample(p2pmsg.CtrlMsgPrune, uint(originalPruneSize), uint(sampleSize), func(i, j uint) { + prunes[i], prunes[j] = prunes[j], prunes[i] + }) + rpc.Control.Prune = prunes[:sampleSize] + c.metrics.OnControlMessagesTruncated(p2pmsg.CtrlMsgPrune, originalPruneSize-len(rpc.Control.Prune)) +} + +// truncateIHaveMessages truncates the iHaves control messages in the RPC. If the total number of iHaves in the RPC exceeds the configured +// MessageCountThreshold the list of iHaves will be truncated. +// Args: +// - rpc: the rpc message to truncate. +func (c *ControlMsgValidationInspector) truncateIHaveMessages(from peer.ID, rpc *pubsub.RPC) { + if !c.config.InspectionProcess.Truncate.EnableIHave { + c.logger. + Trace(). + Str("peer_id", p2plogging.PeerId(from)). + Bool(logging.KeyNetworkingSecurity, true). + Msg(IHaveTruncationDisabledWarning) + return + } + + ihaves := rpc.GetControl().GetIhave() + originalIHaveCount := len(ihaves) + if originalIHaveCount == 0 { + return + } + + if originalIHaveCount > c.config.IHave.MessageCountThreshold { + // truncate ihaves and update metrics + sampleSize := c.config.IHave.MessageCountThreshold + if sampleSize > originalIHaveCount { + sampleSize = originalIHaveCount + } + c.performSample(p2pmsg.CtrlMsgIHave, uint(originalIHaveCount), uint(sampleSize), func(i, j uint) { + ihaves[i], ihaves[j] = ihaves[j], ihaves[i] + }) + rpc.Control.Ihave = ihaves[:sampleSize] + c.metrics.OnControlMessagesTruncated(p2pmsg.CtrlMsgIHave, originalIHaveCount-len(rpc.Control.Ihave)) + } +} + +// truncateIHaveMessageIds truncates the message ids for each iHave control message in the RPC. If the total number of message ids in a single iHave exceeds the configured +// MessageIdCountThreshold the list of message ids will be truncated. Before message ids are truncated the iHave control messages should have been truncated themselves. +// Args: +// - rpc: the rpc message to truncate. +func (c *ControlMsgValidationInspector) truncateIHaveMessageIds(from peer.ID, rpc *pubsub.RPC) { + if !c.config.InspectionProcess.Truncate.EnableIHaveMessageIds { + c.logger. + Trace(). + Str("peer_id", p2plogging.PeerId(from)). + Bool(logging.KeyNetworkingSecurity, true). + Msg(IHaveMessageIDTruncationDisabledWarning) + return + } + + for _, ihave := range rpc.GetControl().GetIhave() { + messageIDs := ihave.GetMessageIDs() + originalMessageIdCount := len(messageIDs) + if originalMessageIdCount == 0 { + continue // nothing to truncate; skip + } + + if originalMessageIdCount > c.config.IHave.MessageIdCountThreshold { + sampleSize := c.config.IHave.MessageIdCountThreshold + if sampleSize > originalMessageIdCount { + sampleSize = originalMessageIdCount + } + c.performSample(p2pmsg.CtrlMsgIHave, uint(originalMessageIdCount), uint(sampleSize), func(i, j uint) { + messageIDs[i], messageIDs[j] = messageIDs[j], messageIDs[i] + }) + ihave.MessageIDs = messageIDs[:sampleSize] + c.metrics.OnIHaveControlMessageIdsTruncated(originalMessageIdCount - len(ihave.MessageIDs)) + } + c.metrics.OnIHaveMessageIDsReceived(ihave.GetTopicID(), len(ihave.MessageIDs)) + } +} + +// truncateIWantMessages truncates the iWant control messages in the RPC. If the total number of iWants in the RPC exceeds the configured +// MessageCountThreshold the list of iWants will be truncated. +// Args: +// - rpc: the rpc message to truncate. +func (c *ControlMsgValidationInspector) truncateIWantMessages(from peer.ID, rpc *pubsub.RPC) { + if !c.config.InspectionProcess.Truncate.EnableIWant { + c.logger. + Trace(). + Str("peer_id", p2plogging.PeerId(from)). + Bool(logging.KeyNetworkingSecurity, true). + Msg(IWantTruncationDisabledWarning) + return + } + + iWants := rpc.GetControl().GetIwant() + originalIWantCount := uint(len(iWants)) + if originalIWantCount == 0 { + return + } + + if originalIWantCount > c.config.IWant.MessageCountThreshold { + // truncate iWants and update metrics + sampleSize := c.config.IWant.MessageCountThreshold + if sampleSize > originalIWantCount { + sampleSize = originalIWantCount + } + c.performSample(p2pmsg.CtrlMsgIWant, originalIWantCount, sampleSize, func(i, j uint) { + iWants[i], iWants[j] = iWants[j], iWants[i] + }) + rpc.Control.Iwant = iWants[:sampleSize] + c.metrics.OnControlMessagesTruncated(p2pmsg.CtrlMsgIWant, int(originalIWantCount)-len(rpc.Control.Iwant)) + } +} + +// truncateIWantMessageIds truncates the message ids for each iWant control message in the RPC. If the total number of message ids in a single iWant exceeds the configured +// MessageIdCountThreshold the list of message ids will be truncated. Before message ids are truncated the iWant control messages should have been truncated themselves. +// Args: +// - rpc: the rpc message to truncate. +func (c *ControlMsgValidationInspector) truncateIWantMessageIds(from peer.ID, rpc *pubsub.RPC) { + if !c.config.InspectionProcess.Truncate.EnableIWantMessageIds { + c.logger. + Trace(). + Str("peer_id", p2plogging.PeerId(from)). + Bool(logging.KeyNetworkingSecurity, true). + Msg(IWantMessageIDTruncationDisabledWarning) + return + } + + lastHighest := c.rpcTracker.LastHighestIHaveRPCSize() + lg := c.logger.With(). + Str("peer_id", p2plogging.PeerId(from)). + Uint("max_sample_size", c.config.IWant.MessageCountThreshold). + Int64("last_highest_ihave_rpc_size", lastHighest). + Logger() + + sampleSize := int(10 * lastHighest) + if sampleSize == 0 || sampleSize > c.config.IWant.MessageIdCountThreshold { + // invalid or 0 sample size is suspicious + lg.Debug().Str(logging.KeySuspicious, "true").Msg("zero or invalid sample size, using default max sample size") + sampleSize = c.config.IWant.MessageIdCountThreshold + } + for _, iWant := range rpc.GetControl().GetIwant() { + messageIDs := iWant.GetMessageIDs() + totalMessageIdCount := len(messageIDs) + if totalMessageIdCount == 0 { + continue // nothing to truncate; skip + } + + if totalMessageIdCount > sampleSize { + c.performSample(p2pmsg.CtrlMsgIWant, uint(totalMessageIdCount), uint(sampleSize), func(i, j uint) { + messageIDs[i], messageIDs[j] = messageIDs[j], messageIDs[i] + }) + iWant.MessageIDs = messageIDs[:sampleSize] + c.metrics.OnIWantControlMessageIdsTruncated(totalMessageIdCount - len(iWant.MessageIDs)) + } + c.metrics.OnIWantMessageIDsReceived(len(iWant.MessageIDs)) + } +} + +// performSample performs sampling on the specified control message that will randomize +// the items in the control message slice up to index sampleSize-1. Any error encountered during sampling is considered +// irrecoverable and will cause the node to crash. +func (c *ControlMsgValidationInspector) performSample(ctrlMsg p2pmsg.ControlMessageType, totalSize, sampleSize uint, swap func(i, j uint)) { + err := flowrand.Samples(totalSize, sampleSize, swap) + if err != nil { + c.logAndThrowError(fmt.Errorf("failed to get random sample of %s control messages: %w", ctrlMsg, err)) + } +} + +// validateTopic ensures the topic is a valid flow topic/channel. +// Expected error returns during normal operations: +// - channels.InvalidTopicErr: if topic is invalid. +// - ErrActiveClusterIdsNotSet: if the cluster ID provider is not set. +// - channels.UnknownClusterIDErr: if the topic contains a cluster ID prefix that is not in the active cluster IDs list. +// +// This func returns an exception in case of unexpected bug or state corruption if cluster prefixed topic validation +// fails due to unexpected error returned when getting the active cluster IDS. +func (c *ControlMsgValidationInspector) validateTopic(from peer.ID, topic channels.Topic, activeClusterIds flow.ChainIDList) (error, p2p.CtrlMsgTopicType) { + channel, ok := channels.ChannelFromTopic(topic) + if !ok { + return channels.NewInvalidTopicErr(topic, fmt.Errorf("failed to get channel from topic")), p2p.CtrlMsgNonClusterTopicType + } + // handle cluster prefixed topics + if channels.IsClusterChannel(channel) { + return c.validateClusterPrefixedTopic(from, topic, activeClusterIds), p2p.CtrlMsgTopicTypeClusterPrefixed + } + + // non cluster prefixed topic validation + err := channels.IsValidNonClusterFlowTopic(topic, c.sporkID) + if err != nil { + return err, p2p.CtrlMsgNonClusterTopicType + } + return nil, p2p.CtrlMsgNonClusterTopicType +} + +// validateClusterPrefixedTopic validates cluster prefixed topics. +// Expected error returns during normal operations: +// - ErrActiveClusterIdsNotSet: if the cluster ID provider is not set. +// - channels.InvalidTopicErr: if topic is invalid. +// - channels.UnknownClusterIDErr: if the topic contains a cluster ID prefix that is not in the active cluster IDs list. +// +// In the case where an ErrActiveClusterIdsNotSet or UnknownClusterIDErr is encountered and the cluster prefixed topic received +// tracker for the peer is less than or equal to the configured HardThreshold an error will only be logged and not returned. +// At the point where the hard threshold is crossed the error will be returned and the sender will start to be penalized. +// Any errors encountered while incrementing or loading the cluster prefixed control message gauge for a peer will result in an irrecoverable error being thrown, these +// errors are unexpected and irrecoverable indicating a bug. +func (c *ControlMsgValidationInspector) validateClusterPrefixedTopic(from peer.ID, topic channels.Topic, activeClusterIds flow.ChainIDList) error { + lg := c.logger.With(). + Str("from", p2plogging.PeerId(from)). + Logger() + + if len(activeClusterIds) == 0 { + // cluster IDs have not been updated yet + _, incErr := c.tracker.Inc(from) + if incErr != nil { + // irrecoverable error encountered + c.logAndThrowError(fmt.Errorf("error encountered while incrementing the cluster prefixed control message gauge %s: %w", from, incErr)) + } + + // if the amount of messages received is below our hard threshold log the error and return nil. + if ok := c.checkClusterPrefixHardThreshold(from); ok { + lg.Warn(). + Str("topic", topic.String()). + Msg("failed to validate cluster prefixed control message with cluster pre-fixed topic active cluster ids not set") + return nil + } + + return NewActiveClusterIdsNotSetErr(topic) + } + + err := channels.IsValidFlowClusterTopic(topic, activeClusterIds) + if err != nil { + if channels.IsUnknownClusterIDErr(err) { + // unknown cluster ID error could indicate that a node has fallen + // behind and needs to catchup increment to topics received cache. + _, incErr := c.tracker.Inc(from) + if incErr != nil { + c.logAndThrowError(fmt.Errorf("error encountered while incrementing the cluster prefixed control message gauge %s: %w", from, err)) + } + // if the amount of messages received is below our hard threshold log the error and return nil. + if c.checkClusterPrefixHardThreshold(from) { + lg.Warn(). + Err(err). + Str("topic", topic.String()). + Msg("processing unknown cluster prefixed topic received below cluster prefixed discard threshold peer may be behind in the protocol") + return nil + } + } + return err + } + + return nil +} + +// checkClusterPrefixHardThreshold returns true if the cluster prefix received tracker count is less than +// the configured HardThreshold, false otherwise. +// If any error is encountered while loading from the tracker this func will throw an error on the signaler context, these errors +// are unexpected and irrecoverable indicating a bug. +func (c *ControlMsgValidationInspector) checkClusterPrefixHardThreshold(pid peer.ID) bool { + gauge, err := c.tracker.Load(pid) + if err != nil { + // irrecoverable error encountered + c.logAndThrowError(fmt.Errorf("cluster prefixed control message gauge during hard threshold check failed for peer %s: %w", pid, err)) + } + return gauge <= c.config.ClusterPrefixedMessage.HardThreshold +} + +// logAndDistributeErr logs the provided error and attempts to disseminate an invalid control message validation notification for the error. +// Args: +// - req: inspect rpc request that failed validation. +// - ctlMsgType: the control message type of the rpc message that caused the error. +// - err: the error that occurred. +// - count: the number of occurrences of the error. +// - isClusterPrefixed: indicates if the errors occurred on a cluster prefixed topic. +func (c *ControlMsgValidationInspector) logAndDistributeAsyncInspectErrs(req *InspectRPCRequest, ctlMsgType p2pmsg.ControlMessageType, err error, count uint64, topicType p2p.CtrlMsgTopicType) { + lg := c.logger.With(). + Err(err). + Str("control_message_type", ctlMsgType.String()). + Bool(logging.KeySuspicious, true). + Bool(logging.KeyNetworkingSecurity, true). + Str("topic_type", topicType.String()). + Uint64("error_count", count). + Str("peer_id", p2plogging.PeerId(req.Peer)). + Logger() + + switch { + case IsErrActiveClusterIDsNotSet(err): + c.metrics.OnActiveClusterIDsNotSetErr() + lg.Warn().Msg("active cluster ids not set") + case IsErrUnstakedPeer(err): + c.metrics.OnUnstakedPeerInspectionFailed() + lg.Warn().Msg("control message received from unstaked peer") + default: + c.notificationConsumer.OnInvalidControlMessageNotification(p2p.NewInvalidControlMessageNotification(req.Peer, ctlMsgType, err, count, topicType)) + lg.Error().Msg("rpc control message async inspection failed, notification sent") + c.metrics.OnInvalidControlMessageNotificationSent() + } +} + +// logAndThrowError logs and throws irrecoverable errors on the context. +// Args: +// +// err: the error encountered. +func (c *ControlMsgValidationInspector) logAndThrowError(err error) { + c.logger.Error(). + Err(err). + Bool(logging.KeySuspicious, true). + Bool(logging.KeyNetworkingSecurity, true). + Msg("unexpected irrecoverable error encountered") + c.ctx.Throw(err) +} diff --git a/network/p2p/inspector/validation/control_message_validation_inspector_test.go b/network/p2p/inspector/validation/control_message_validation_inspector_test.go new file mode 100644 index 00000000000..8258f598f0f --- /dev/null +++ b/network/p2p/inspector/validation/control_message_validation_inspector_test.go @@ -0,0 +1,1838 @@ +package validation_test + +import ( + "context" + "fmt" + "io" + "math/rand" + "sync" + "testing" + "time" + + pubsub "github.com/libp2p/go-libp2p-pubsub" + pubsub_pb "github.com/libp2p/go-libp2p-pubsub/pb" + "github.com/libp2p/go-libp2p/core/peer" + "github.com/rs/zerolog" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + "go.uber.org/atomic" + + "github.com/onflow/flow-go/config" + "github.com/onflow/flow-go/engine/common/worker" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module/irrecoverable" + "github.com/onflow/flow-go/module/metrics" + mockmodule "github.com/onflow/flow-go/module/mock" + "github.com/onflow/flow-go/network" + "github.com/onflow/flow-go/network/channels" + "github.com/onflow/flow-go/network/p2p" + "github.com/onflow/flow-go/network/p2p/inspector/validation" + p2pmsg "github.com/onflow/flow-go/network/p2p/message" + mockp2p "github.com/onflow/flow-go/network/p2p/mock" + p2ptest "github.com/onflow/flow-go/network/p2p/test" + randutils "github.com/onflow/flow-go/utils/rand" + "github.com/onflow/flow-go/utils/unittest" +) + +func TestNewControlMsgValidationInspector(t *testing.T) { + t.Run("should create validation inspector without error", func(t *testing.T) { + sporkID := unittest.IdentifierFixture() + flowConfig, err := config.DefaultConfig() + require.NoError(t, err, "failed to get default flow config") + consumer := mockp2p.NewGossipSubInvCtrlMsgNotifConsumer(t) + idProvider := mockmodule.NewIdentityProvider(t) + topicProvider := p2ptest.NewUpdatableTopicProviderFixture() + inspector, err := validation.NewControlMsgValidationInspector(&validation.InspectorParams{ + Logger: unittest.Logger(), + SporkID: sporkID, + Config: &flowConfig.NetworkConfig.GossipSub.RpcInspector.Validation, + IdProvider: idProvider, + HeroCacheMetricsFactory: metrics.NewNoopHeroCacheMetricsFactory(), + InspectorMetrics: metrics.NewNoopCollector(), + RpcTracker: mockp2p.NewRpcControlTracking(t), + NetworkingType: network.PublicNetwork, + InvalidControlMessageNotificationConsumer: consumer, + TopicOracle: func() p2p.TopicProvider { + return topicProvider + }, + }) + require.NoError(t, err) + require.NotNil(t, inspector) + }) + t.Run("should return error if any of the params are nil", func(t *testing.T) { + inspector, err := validation.NewControlMsgValidationInspector(&validation.InspectorParams{ + Logger: unittest.Logger(), + SporkID: unittest.IdentifierFixture(), + Config: nil, + IdProvider: nil, + HeroCacheMetricsFactory: nil, + InspectorMetrics: nil, + RpcTracker: nil, + TopicOracle: nil, + InvalidControlMessageNotificationConsumer: nil, + }) + require.Nil(t, inspector) + require.Error(t, err) + s := err.Error() + require.Contains(t, s, "validation for 'Config' failed on the 'required'") + require.Contains(t, s, "validation for 'InvalidControlMessageNotificationConsumer' failed on the 'required'") + require.Contains(t, s, "validation for 'IdProvider' failed on the 'required'") + require.Contains(t, s, "validation for 'HeroCacheMetricsFactory' failed on the 'required'") + require.Contains(t, s, "validation for 'InspectorMetrics' failed on the 'required'") + require.Contains(t, s, "validation for 'RpcTracker' failed on the 'required'") + require.Contains(t, s, "validation for 'NetworkingType' failed on the 'required'") + require.Contains(t, s, "validation for 'TopicOracle' failed on the 'required'") + }) +} + +// TestControlMessageValidationInspector_TruncateRPC verifies the expected truncation behavior of RPC control messages. +// Message truncation for each control message type occurs when the count of control +// messages exceeds the configured maximum sample size for that control message type. +func TestControlMessageValidationInspector_truncateRPC(t *testing.T) { + t.Run("graft truncation", func(t *testing.T) { + graftPruneMessageMaxSampleSize := 1000 + inspector, signalerCtx, cancel, consumer, rpcTracker, _, idProvider, _ := inspectorFixture(t, func(params *validation.InspectorParams) { + params.Config.GraftPrune.MessageCountThreshold = graftPruneMessageMaxSampleSize + }) + // topic validation is ignored set any topic oracle + consumer.On("OnInvalidControlMessageNotification", mock.AnythingOfType("*p2p.InvCtrlMsgNotif")).Return(nil).Maybe() + rpcTracker.On("LastHighestIHaveRPCSize").Return(int64(100)).Maybe() + rpcTracker.On("WasIHaveRPCSent", mock.AnythingOfType("string")).Return(true).Maybe() + inspector.Start(signalerCtx) + unittest.RequireComponentsReadyBefore(t, 1*time.Second, inspector) + + // topic validation not performed so we can use random strings + graftsGreaterThanMaxSampleSize := unittest.P2PRPCFixture(unittest.WithGrafts(unittest.P2PRPCGraftFixtures(unittest.IdentifierListFixture(2000).Strings()...)...)) + require.Greater(t, len(graftsGreaterThanMaxSampleSize.GetControl().GetGraft()), graftPruneMessageMaxSampleSize) + graftsLessThanMaxSampleSize := unittest.P2PRPCFixture(unittest.WithGrafts(unittest.P2PRPCGraftFixtures(unittest.IdentifierListFixture(50).Strings()...)...)) + require.Less(t, len(graftsLessThanMaxSampleSize.GetControl().GetGraft()), graftPruneMessageMaxSampleSize) + + from := unittest.PeerIdFixture(t) + idProvider.On("ByPeerID", from).Return(unittest.IdentityFixture(), true).Twice() + require.NoError(t, inspector.Inspect(from, graftsGreaterThanMaxSampleSize)) + require.NoError(t, inspector.Inspect(from, graftsLessThanMaxSampleSize)) + require.Eventually(t, func() bool { + // rpc with grafts greater than configured max sample size should be truncated to GraftPruneMessageMaxSampleSize + shouldBeTruncated := len(graftsGreaterThanMaxSampleSize.GetControl().GetGraft()) == graftPruneMessageMaxSampleSize + // rpc with grafts less than GraftPruneMessageMaxSampleSize should not be truncated + shouldNotBeTruncated := len(graftsLessThanMaxSampleSize.GetControl().GetGraft()) == 50 + return shouldBeTruncated && shouldNotBeTruncated + }, time.Second, 500*time.Millisecond) + cancel() + unittest.RequireCloseBefore(t, inspector.Done(), 5*time.Second, "inspector did not stop") + }) + + t.Run("prune truncation", func(t *testing.T) { + graftPruneMessageMaxSampleSize := 1000 + inspector, signalerCtx, cancel, consumer, rpcTracker, _, idProvider, _ := inspectorFixture(t, func(params *validation.InspectorParams) { + params.Config.GraftPrune.MessageCountThreshold = graftPruneMessageMaxSampleSize + }) + // topic validation is ignored set any topic oracle + rpcTracker.On("LastHighestIHaveRPCSize").Return(int64(100)).Maybe() + rpcTracker.On("WasIHaveRPCSent", mock.AnythingOfType("string")).Return(true).Maybe() + consumer.On("OnInvalidControlMessageNotification", mock.AnythingOfType("*p2p.InvCtrlMsgNotif")).Return(nil).Maybe() + + inspector.Start(signalerCtx) + unittest.RequireComponentsReadyBefore(t, 1*time.Second, inspector) + + // topic validation not performed, so we can use random strings + prunesGreaterThanMaxSampleSize := unittest.P2PRPCFixture(unittest.WithPrunes(unittest.P2PRPCPruneFixtures(unittest.IdentifierListFixture(2000).Strings()...)...)) + require.Greater(t, len(prunesGreaterThanMaxSampleSize.GetControl().GetPrune()), graftPruneMessageMaxSampleSize) + prunesLessThanMaxSampleSize := unittest.P2PRPCFixture(unittest.WithPrunes(unittest.P2PRPCPruneFixtures(unittest.IdentifierListFixture(50).Strings()...)...)) + require.Less(t, len(prunesLessThanMaxSampleSize.GetControl().GetPrune()), graftPruneMessageMaxSampleSize) + from := unittest.PeerIdFixture(t) + idProvider.On("ByPeerID", from).Return(unittest.IdentityFixture(), true).Twice() + require.NoError(t, inspector.Inspect(from, prunesGreaterThanMaxSampleSize)) + require.NoError(t, inspector.Inspect(from, prunesLessThanMaxSampleSize)) + require.Eventually(t, func() bool { + // rpc with prunes greater than configured max sample size should be truncated to GraftPruneMessageMaxSampleSize + shouldBeTruncated := len(prunesGreaterThanMaxSampleSize.GetControl().GetPrune()) == graftPruneMessageMaxSampleSize + // rpc with prunes less than GraftPruneMessageMaxSampleSize should not be truncated + shouldNotBeTruncated := len(prunesLessThanMaxSampleSize.GetControl().GetPrune()) == 50 + return shouldBeTruncated && shouldNotBeTruncated + }, time.Second, 500*time.Millisecond) + cancel() + unittest.RequireCloseBefore(t, inspector.Done(), 5*time.Second, "inspector did not stop") + }) + + t.Run("ihave message id truncation", func(t *testing.T) { + maxSampleSize := 1000 + inspector, signalerCtx, cancel, consumer, rpcTracker, _, idProvider, _ := inspectorFixture(t, func(params *validation.InspectorParams) { + params.Config.IHave.MessageCountThreshold = maxSampleSize + }) + // topic validation is ignored set any topic oracle + rpcTracker.On("LastHighestIHaveRPCSize").Return(int64(100)).Maybe() + rpcTracker.On("WasIHaveRPCSent", mock.AnythingOfType("string")).Return(true).Maybe() + consumer.On("OnInvalidControlMessageNotification", mock.AnythingOfType("*p2p.InvCtrlMsgNotif")).Return(nil).Maybe() + + inspector.Start(signalerCtx) + unittest.RequireComponentsReadyBefore(t, 1*time.Second, inspector) + + // topic validation not performed so we can use random strings + iHavesGreaterThanMaxSampleSize := unittest.P2PRPCFixture(unittest.WithIHaves(unittest.P2PRPCIHaveFixtures(2000, unittest.IdentifierListFixture(2000).Strings()...)...)) + require.Greater(t, len(iHavesGreaterThanMaxSampleSize.GetControl().GetIhave()), maxSampleSize) + iHavesLessThanMaxSampleSize := unittest.P2PRPCFixture(unittest.WithIHaves(unittest.P2PRPCIHaveFixtures(200, unittest.IdentifierListFixture(50).Strings()...)...)) + require.Less(t, len(iHavesLessThanMaxSampleSize.GetControl().GetIhave()), maxSampleSize) + + from := unittest.PeerIdFixture(t) + idProvider.On("ByPeerID", from).Return(unittest.IdentityFixture(), true).Twice() + require.NoError(t, inspector.Inspect(from, iHavesGreaterThanMaxSampleSize)) + require.NoError(t, inspector.Inspect(from, iHavesLessThanMaxSampleSize)) + require.Eventually(t, func() bool { + // rpc with iHaves greater than configured max sample size should be truncated to MessageCountThreshold + shouldBeTruncated := len(iHavesGreaterThanMaxSampleSize.GetControl().GetIhave()) == maxSampleSize + // rpc with iHaves less than MessageCountThreshold should not be truncated + shouldNotBeTruncated := len(iHavesLessThanMaxSampleSize.GetControl().GetIhave()) == 50 + return shouldBeTruncated && shouldNotBeTruncated + }, time.Second, 500*time.Millisecond) + cancel() + unittest.RequireCloseBefore(t, inspector.Done(), 5*time.Second, "inspector did not stop") + }) + + t.Run("ihave message ids truncation", func(t *testing.T) { + maxMessageIDSampleSize := 1000 + inspector, signalerCtx, cancel, consumer, rpcTracker, _, idProvider, _ := inspectorFixture(t, func(params *validation.InspectorParams) { + params.Config.IHave.MessageIdCountThreshold = maxMessageIDSampleSize + }) + // topic validation is ignored set any topic oracle + rpcTracker.On("LastHighestIHaveRPCSize").Return(int64(100)).Maybe() + rpcTracker.On("WasIHaveRPCSent", mock.AnythingOfType("string")).Return(true).Maybe() + consumer.On("OnInvalidControlMessageNotification", mock.AnythingOfType("*p2p.InvCtrlMsgNotif")).Return(nil).Maybe() + inspector.Start(signalerCtx) + unittest.RequireComponentsReadyBefore(t, 1*time.Second, inspector) + + // topic validation not performed so we can use random strings + iHavesGreaterThanMaxSampleSize := unittest.P2PRPCFixture(unittest.WithIHaves(unittest.P2PRPCIHaveFixtures(2000, unittest.IdentifierListFixture(10).Strings()...)...)) + iHavesLessThanMaxSampleSize := unittest.P2PRPCFixture(unittest.WithIHaves(unittest.P2PRPCIHaveFixtures(50, unittest.IdentifierListFixture(10).Strings()...)...)) + + from := unittest.PeerIdFixture(t) + idProvider.On("ByPeerID", from).Return(unittest.IdentityFixture(), true).Twice() + require.NoError(t, inspector.Inspect(from, iHavesGreaterThanMaxSampleSize)) + require.NoError(t, inspector.Inspect(from, iHavesLessThanMaxSampleSize)) + require.Eventually(t, func() bool { + for _, iHave := range iHavesGreaterThanMaxSampleSize.GetControl().GetIhave() { + // rpc with iHaves message ids greater than configured max sample size should be truncated to MessageCountThreshold + if len(iHave.GetMessageIDs()) != maxMessageIDSampleSize { + return false + } + } + for _, iHave := range iHavesLessThanMaxSampleSize.GetControl().GetIhave() { + // rpc with iHaves message ids less than MessageCountThreshold should not be truncated + if len(iHave.GetMessageIDs()) != 50 { + return false + } + } + return true + }, time.Second, 500*time.Millisecond) + cancel() + unittest.RequireCloseBefore(t, inspector.Done(), 5*time.Second, "inspector did not stop") + }) + + t.Run("iwant message truncation", func(t *testing.T) { + maxSampleSize := uint(100) + inspector, signalerCtx, cancel, consumer, rpcTracker, _, idProvider, _ := inspectorFixture(t, func(params *validation.InspectorParams) { + params.Config.IWant.MessageCountThreshold = maxSampleSize + }) + // topic validation is ignored set any topic oracle + rpcTracker.On("LastHighestIHaveRPCSize").Return(int64(100)).Maybe() + rpcTracker.On("WasIHaveRPCSent", mock.AnythingOfType("string")).Return(true).Maybe() + consumer.On("OnInvalidControlMessageNotification", mock.AnythingOfType("*p2p.InvCtrlMsgNotif")).Return(nil).Maybe() + inspector.Start(signalerCtx) + unittest.RequireComponentsReadyBefore(t, 1*time.Second, inspector) + + iWantsGreaterThanMaxSampleSize := unittest.P2PRPCFixture(unittest.WithIWants(unittest.P2PRPCIWantFixtures(200, 200)...)) + require.Greater(t, uint(len(iWantsGreaterThanMaxSampleSize.GetControl().GetIwant())), maxSampleSize) + iWantsLessThanMaxSampleSize := unittest.P2PRPCFixture(unittest.WithIWants(unittest.P2PRPCIWantFixtures(50, 200)...)) + require.Less(t, uint(len(iWantsLessThanMaxSampleSize.GetControl().GetIwant())), maxSampleSize) + + from := unittest.PeerIdFixture(t) + idProvider.On("ByPeerID", from).Return(unittest.IdentityFixture(), true).Twice() + require.NoError(t, inspector.Inspect(from, iWantsGreaterThanMaxSampleSize)) + require.NoError(t, inspector.Inspect(from, iWantsLessThanMaxSampleSize)) + require.Eventually(t, func() bool { + // rpc with iWants greater than configured max sample size should be truncated to MessageCountThreshold + shouldBeTruncated := len(iWantsGreaterThanMaxSampleSize.GetControl().GetIwant()) == int(maxSampleSize) + // rpc with iWants less than MessageCountThreshold should not be truncated + shouldNotBeTruncated := len(iWantsLessThanMaxSampleSize.GetControl().GetIwant()) == 50 + return shouldBeTruncated && shouldNotBeTruncated + }, time.Second, 500*time.Millisecond) + cancel() + unittest.RequireCloseBefore(t, inspector.Done(), 5*time.Second, "inspector did not stop") + }) + + t.Run("iwant message id truncation", func(t *testing.T) { + maxMessageIDSampleSize := 1000 + inspector, signalerCtx, cancel, consumer, rpcTracker, _, idProvider, _ := inspectorFixture(t, func(params *validation.InspectorParams) { + params.Config.IWant.MessageIdCountThreshold = maxMessageIDSampleSize + }) + // topic validation is ignored set any topic oracle + rpcTracker.On("LastHighestIHaveRPCSize").Return(int64(100)).Maybe() + rpcTracker.On("WasIHaveRPCSent", mock.AnythingOfType("string")).Return(true).Maybe() + consumer.On("OnInvalidControlMessageNotification", mock.AnythingOfType("*p2p.InvCtrlMsgNotif")).Return(nil).Maybe() + inspector.Start(signalerCtx) + unittest.RequireComponentsReadyBefore(t, 1*time.Second, inspector) + + iWantsGreaterThanMaxSampleSize := unittest.P2PRPCFixture(unittest.WithIWants(unittest.P2PRPCIWantFixtures(10, 2000)...)) + iWantsLessThanMaxSampleSize := unittest.P2PRPCFixture(unittest.WithIWants(unittest.P2PRPCIWantFixtures(10, 50)...)) + + from := unittest.PeerIdFixture(t) + idProvider.On("ByPeerID", from).Return(unittest.IdentityFixture(), true).Twice() + require.NoError(t, inspector.Inspect(from, iWantsGreaterThanMaxSampleSize)) + require.NoError(t, inspector.Inspect(from, iWantsLessThanMaxSampleSize)) + require.Eventually(t, func() bool { + for _, iWant := range iWantsGreaterThanMaxSampleSize.GetControl().GetIwant() { + // rpc with iWants message ids greater than configured max sample size should be truncated to MessageCountThreshold + if len(iWant.GetMessageIDs()) != maxMessageIDSampleSize { + return false + } + } + for _, iWant := range iWantsLessThanMaxSampleSize.GetControl().GetIwant() { + // rpc with iWants less than MessageCountThreshold should not be truncated + if len(iWant.GetMessageIDs()) != 50 { + return false + } + } + return true + }, time.Second, 500*time.Millisecond) + cancel() + unittest.RequireCloseBefore(t, inspector.Done(), 5*time.Second, "inspector did not stop") + }) +} + +// TestControlMessageInspection_ValidRpc ensures inspector does not disseminate invalid control message notifications for a valid RPC. +func TestControlMessageInspection_ValidRpc(t *testing.T) { + logCounter := atomic.NewInt64(0) + logger := hookedLogger(logCounter, zerolog.TraceLevel, worker.QueuedItemProcessedLog) + inspector, signalerCtx, cancel, consumer, rpcTracker, sporkID, idProvider, topicProviderOracle := inspectorFixture(t, func(params *validation.InspectorParams) { + params.Logger = logger + }) + defer consumer.AssertNotCalled(t, "OnInvalidControlMessageNotification") + + topics := []string{ + fmt.Sprintf("%s/%s", channels.TestNetworkChannel, sporkID), + fmt.Sprintf("%s/%s", channels.PushBlocks, sporkID), + fmt.Sprintf("%s/%s", channels.SyncCommittee, sporkID), + fmt.Sprintf("%s/%s", channels.RequestChunks, sporkID), + } + // avoid unknown topics errors + topicProviderOracle.UpdateTopics(topics) + inspector.Start(signalerCtx) + unittest.RequireComponentsReadyBefore(t, 1*time.Second, inspector) + + grafts := unittest.P2PRPCGraftFixtures(topics...) + prunes := unittest.P2PRPCPruneFixtures(topics...) + ihaves := unittest.P2PRPCIHaveFixtures(50, topics...) + iwants := unittest.P2PRPCIWantFixtures(2, 50) + pubsubMsgs := unittest.GossipSubMessageFixtures(10, topics[0]) + + rpc := unittest.P2PRPCFixture( + unittest.WithGrafts(grafts...), + unittest.WithPrunes(prunes...), + unittest.WithIHaves(ihaves...), + unittest.WithIWants(iwants...), + unittest.WithPubsubMessages(pubsubMsgs...)) + rpcTracker.On("LastHighestIHaveRPCSize").Return(int64(100)).Maybe() + rpcTracker.On("WasIHaveRPCSent", mock.AnythingOfType("string")).Return(true).Run(func(args mock.Arguments) { + id, ok := args[0].(string) + require.True(t, ok) + for _, iwant := range iwants { + for _, messageID := range iwant.GetMessageIDs() { + if id == messageID { + return + } + } + } + require.Fail(t, "message id not found in iwant messages") + }) + + from := unittest.PeerIdFixture(t) + idProvider.On("ByPeerID", from).Return(unittest.IdentityFixture(), true).Once() + require.NoError(t, inspector.Inspect(from, rpc)) + require.Eventually(t, func() bool { + return logCounter.Load() == 1 + }, time.Second, 500*time.Millisecond) + cancel() + unittest.RequireCloseBefore(t, inspector.Done(), 5*time.Second, "inspector did not stop") +} + +// TestGraftInspection_InvalidTopic_BelowThreshold ensures inspector does not disseminate an invalid control message notification for +// graft messages when the invalid topic id count does not exceed the configured threshold. +func TestGraftInspection_InvalidTopic_BelowThreshold(t *testing.T) { + c, err := config.DefaultConfig() + require.NoError(t, err) + cfg := &c.NetworkConfig.GossipSub.RpcInspector.Validation + logCounter := atomic.NewInt64(0) + logger := hookedLogger(logCounter, zerolog.TraceLevel, worker.QueuedItemProcessedLog) + inspector, signalerCtx, cancel, consumer, rpcTracker, sporkID, idProvider, topicProviderOracle := inspectorFixture(t, func(params *validation.InspectorParams) { + params.Logger = logger + }) + + var unknownTopicGrafts []*pubsub_pb.ControlGraft + var malformedTopicGrafts []*pubsub_pb.ControlGraft + var invalidSporkIDTopicGrafts []*pubsub_pb.ControlGraft + var allTopics []string + for i := 0; i < cfg.GraftPrune.InvalidTopicIdThreshold; i++ { + // create unknown topic + unknownTopic, malformedTopic, invalidSporkIDTopic := invalidTopics(t, sporkID) + allTopics = append(allTopics, unknownTopic, malformedTopic, invalidSporkIDTopic) + unknownTopicGrafts = append(unknownTopicGrafts, unittest.P2PRPCGraftFixture(&unknownTopic)) + malformedTopicGrafts = append(malformedTopicGrafts, unittest.P2PRPCGraftFixture(&malformedTopic)) + invalidSporkIDTopicGrafts = append(invalidSporkIDTopicGrafts, unittest.P2PRPCGraftFixture(&invalidSporkIDTopic)) + } + // avoid unknown topics errors + topicProviderOracle.UpdateTopics(allTopics) + unknownTopicReq := unittest.P2PRPCFixture(unittest.WithGrafts(unknownTopicGrafts...)) + malformedTopicReq := unittest.P2PRPCFixture(unittest.WithGrafts(malformedTopicGrafts...)) + invalidSporkIDTopicReq := unittest.P2PRPCFixture(unittest.WithGrafts(invalidSporkIDTopicGrafts...)) + + from := unittest.PeerIdFixture(t) + idProvider.On("ByPeerID", from).Return(unittest.IdentityFixture(), true).Times(3) + rpcTracker.On("LastHighestIHaveRPCSize").Return(int64(100)).Maybe() + consumer.AssertNotCalled(t, "OnInvalidControlMessageNotification", mock.AnythingOfType("*p2p.InvCtrlMsgNotif")) + inspector.Start(signalerCtx) + unittest.RequireComponentsReadyBefore(t, 1*time.Second, inspector) + + require.NoError(t, inspector.Inspect(from, unknownTopicReq)) + require.NoError(t, inspector.Inspect(from, malformedTopicReq)) + require.NoError(t, inspector.Inspect(from, invalidSporkIDTopicReq)) + require.Eventually(t, func() bool { + return logCounter.Load() == 3 + }, time.Second, 500*time.Millisecond) + cancel() + unittest.RequireCloseBefore(t, inspector.Done(), 5*time.Second, "inspector did not stop") +} + +// TestGraftInspection_InvalidTopic_AboveThreshold ensures inspector disseminates an invalid control message notification for +// graft messages when the invalid topic id count exceeds the configured threshold. +func TestGraftInspection_InvalidTopic_AboveThreshold(t *testing.T) { + c, err := config.DefaultConfig() + require.NoError(t, err) + cfg := &c.NetworkConfig.GossipSub.RpcInspector.Validation + logCounter := atomic.NewInt64(0) + logger := hookedLogger(logCounter, zerolog.TraceLevel, worker.QueuedItemProcessedLog) + inspector, signalerCtx, cancel, consumer, rpcTracker, sporkID, idProvider, topicProviderOracle := inspectorFixture(t, func(params *validation.InspectorParams) { + params.Config = cfg + params.Logger = logger + }) + + var unknownTopicGrafts []*pubsub_pb.ControlGraft + var malformedTopicGrafts []*pubsub_pb.ControlGraft + var invalidSporkIDTopicGrafts []*pubsub_pb.ControlGraft + var allTopics []string + for i := 0; i < cfg.GraftPrune.InvalidTopicIdThreshold+1; i++ { + // create unknown topic + unknownTopic, malformedTopic, invalidSporkIDTopic := invalidTopics(t, sporkID) + allTopics = append(allTopics, unknownTopic, malformedTopic, invalidSporkIDTopic) + unknownTopicGrafts = append(unknownTopicGrafts, unittest.P2PRPCGraftFixture(&unknownTopic)) + malformedTopicGrafts = append(malformedTopicGrafts, unittest.P2PRPCGraftFixture(&malformedTopic)) + invalidSporkIDTopicGrafts = append(invalidSporkIDTopicGrafts, unittest.P2PRPCGraftFixture(&invalidSporkIDTopic)) + } + + // avoid unknown topics errors + topicProviderOracle.UpdateTopics(allTopics) + unknownTopicReq := unittest.P2PRPCFixture(unittest.WithGrafts(unknownTopicGrafts...)) + malformedTopicReq := unittest.P2PRPCFixture(unittest.WithGrafts(malformedTopicGrafts...)) + invalidSporkIDTopicReq := unittest.P2PRPCFixture(unittest.WithGrafts(invalidSporkIDTopicGrafts...)) + + from := unittest.PeerIdFixture(t) + idProvider.On("ByPeerID", from).Return(unittest.IdentityFixture(), true).Times(3) + checkNotification := checkNotificationFunc(t, from, p2pmsg.CtrlMsgGraft, validation.IsInvalidTopicIDThresholdExceeded, p2p.CtrlMsgNonClusterTopicType) + rpcTracker.On("LastHighestIHaveRPCSize").Return(int64(100)).Maybe() + consumer.On("OnInvalidControlMessageNotification", mock.AnythingOfType("*p2p.InvCtrlMsgNotif")).Return(nil).Times(3).Run(checkNotification) + + inspector.Start(signalerCtx) + unittest.RequireComponentsReadyBefore(t, 1*time.Second, inspector) + + require.NoError(t, inspector.Inspect(from, unknownTopicReq)) + require.NoError(t, inspector.Inspect(from, malformedTopicReq)) + require.NoError(t, inspector.Inspect(from, invalidSporkIDTopicReq)) + require.Eventually(t, func() bool { + return logCounter.Load() == 3 + }, time.Second, 500*time.Millisecond) + cancel() + unittest.RequireCloseBefore(t, inspector.Done(), 5*time.Second, "inspector did not stop") +} + +// TestGraftInspection_DuplicateTopicIds_BelowThreshold ensures inspector does not disseminate invalid control message notifications +// for a valid RPC with duplicate graft topic ids below the threshold. +func TestGraftInspection_DuplicateTopicIds_BelowThreshold(t *testing.T) { + logCounter := atomic.NewInt64(0) + logger := hookedLogger(logCounter, zerolog.TraceLevel, worker.QueuedItemProcessedLog) + inspector, signalerCtx, cancel, consumer, rpcTracker, sporkID, idProvider, topicProviderOracle := inspectorFixture(t, func(params *validation.InspectorParams) { + params.Logger = logger + }) + duplicateTopic := fmt.Sprintf("%s/%s", channels.TestNetworkChannel, sporkID) + // avoid unknown topics errors + topicProviderOracle.UpdateTopics([]string{duplicateTopic}) + var grafts []*pubsub_pb.ControlGraft + cfg, err := config.DefaultConfig() + require.NoError(t, err) + for i := 0; i < cfg.NetworkConfig.GossipSub.RpcInspector.Validation.GraftPrune.DuplicateTopicIdThreshold; i++ { + grafts = append(grafts, unittest.P2PRPCGraftFixture(&duplicateTopic)) + } + from := unittest.PeerIdFixture(t) + idProvider.On("ByPeerID", from).Return(unittest.IdentityFixture(), true).Once() + rpc := unittest.P2PRPCFixture(unittest.WithGrafts(grafts...)) + rpcTracker.On("LastHighestIHaveRPCSize").Return(int64(100)).Maybe() + // no notification should be disseminated for valid messages as long as the number of duplicates is below the threshold + consumer.AssertNotCalled(t, "OnInvalidControlMessageNotification", mock.AnythingOfType("*p2p.InvCtrlMsgNotif")) + + inspector.Start(signalerCtx) + unittest.RequireComponentsReadyBefore(t, 100*time.Millisecond, inspector) + + require.NoError(t, inspector.Inspect(from, rpc)) + require.Eventually(t, func() bool { + return logCounter.Load() == 1 + }, time.Second, 500*time.Millisecond) + cancel() + unittest.RequireCloseBefore(t, inspector.Done(), 5*time.Second, "inspector did not stop") +} + +func TestGraftInspection_DuplicateTopicIds_AboveThreshold(t *testing.T) { + logCounter := atomic.NewInt64(0) + logger := hookedLogger(logCounter, zerolog.TraceLevel, worker.QueuedItemProcessedLog) + inspector, signalerCtx, cancel, consumer, rpcTracker, sporkID, idProvider, topicProviderOracle := inspectorFixture(t, func(params *validation.InspectorParams) { + params.Logger = logger + }) + duplicateTopic := fmt.Sprintf("%s/%s", channels.TestNetworkChannel, sporkID) + // avoid unknown topics errors + topicProviderOracle.UpdateTopics([]string{duplicateTopic}) + var grafts []*pubsub_pb.ControlGraft + cfg, err := config.DefaultConfig() + require.NoError(t, err) + for i := 0; i < cfg.NetworkConfig.GossipSub.RpcInspector.Validation.GraftPrune.DuplicateTopicIdThreshold+2; i++ { + grafts = append(grafts, unittest.P2PRPCGraftFixture(&duplicateTopic)) + } + from := unittest.PeerIdFixture(t) + idProvider.On("ByPeerID", from).Return(unittest.IdentityFixture(), true).Once() + rpc := unittest.P2PRPCFixture(unittest.WithGrafts(grafts...)) + rpcTracker.On("LastHighestIHaveRPCSize").Return(int64(100)).Maybe() + + consumer.On("OnInvalidControlMessageNotification", mock.AnythingOfType("*p2p.InvCtrlMsgNotif")).Return(nil).Once().Run(func(args mock.Arguments) { + notification, ok := args[0].(*p2p.InvCtrlMsgNotif) + require.True(t, ok) + require.Equal(t, notification.TopicType, p2p.CtrlMsgNonClusterTopicType, "expected p2p.CtrlMsgNonClusterTopicType notification type, no RPC with cluster prefixed topic sent in this test") + require.Equal(t, from, notification.PeerID) + require.Equal(t, p2pmsg.CtrlMsgGraft, notification.MsgType) + require.True(t, validation.IsDuplicateTopicIDThresholdExceeded(notification.Error)) + }) + + inspector.Start(signalerCtx) + unittest.RequireComponentsReadyBefore(t, 100*time.Millisecond, inspector) + + require.NoError(t, inspector.Inspect(from, rpc)) + require.Eventually(t, func() bool { + return logCounter.Load() == 1 + }, time.Second, 500*time.Millisecond) + cancel() + unittest.RequireCloseBefore(t, inspector.Done(), 5*time.Second, "inspector did not stop") +} + +// TestPruneInspection_InvalidTopic_BelowThreshold ensures inspector does not disseminate an invalid control message notification for +// prune messages when the invalid topic id count does not exceed the configured threshold. +func TestPruneInspection_InvalidTopic_BelowThreshold(t *testing.T) { + c, err := config.DefaultConfig() + require.NoError(t, err) + cfg := &c.NetworkConfig.GossipSub.RpcInspector.Validation + inspector, signalerCtx, cancel, consumer, rpcTracker, sporkID, idProvider, topicProviderOracle := inspectorFixture(t, func(params *validation.InspectorParams) { + params.Config = cfg + }) + + var unknownTopicPrunes []*pubsub_pb.ControlPrune + var malformedTopicPrunes []*pubsub_pb.ControlPrune + var invalidSporkIDTopicPrunes []*pubsub_pb.ControlPrune + var allTopics []string + for i := 0; i < cfg.GraftPrune.InvalidTopicIdThreshold; i++ { + // create unknown topic + unknownTopic, malformedTopic, invalidSporkIDTopic := invalidTopics(t, sporkID) + allTopics = append(allTopics, unknownTopic, malformedTopic, invalidSporkIDTopic) + unknownTopicPrunes = append(unknownTopicPrunes, unittest.P2PRPCPruneFixture(&unknownTopic)) + malformedTopicPrunes = append(malformedTopicPrunes, unittest.P2PRPCPruneFixture(&malformedTopic)) + invalidSporkIDTopicPrunes = append(invalidSporkIDTopicPrunes, unittest.P2PRPCPruneFixture(&invalidSporkIDTopic)) + } + + // avoid unknown topics errors + topicProviderOracle.UpdateTopics(allTopics) + unknownTopicReq := unittest.P2PRPCFixture(unittest.WithPrunes(unknownTopicPrunes...)) + malformedTopicReq := unittest.P2PRPCFixture(unittest.WithPrunes(malformedTopicPrunes...)) + invalidSporkIDTopicReq := unittest.P2PRPCFixture(unittest.WithPrunes(invalidSporkIDTopicPrunes...)) + + from := unittest.PeerIdFixture(t) + idProvider.On("ByPeerID", from).Return(unittest.IdentityFixture(), true).Times(3) + rpcTracker.On("LastHighestIHaveRPCSize").Return(int64(100)).Maybe() + // no notification should be disseminated for valid messages as long as the number of invalid topic ids is below the threshold + consumer.AssertNotCalled(t, "OnInvalidControlMessageNotification", mock.AnythingOfType("*p2p.InvCtrlMsgNotif")) + inspector.Start(signalerCtx) + unittest.RequireComponentsReadyBefore(t, 1*time.Second, inspector) + + require.NoError(t, inspector.Inspect(from, unknownTopicReq)) + require.NoError(t, inspector.Inspect(from, malformedTopicReq)) + require.NoError(t, inspector.Inspect(from, invalidSporkIDTopicReq)) + + // sleep for 1 second to ensure rpc's is processed + time.Sleep(2 * time.Second) + cancel() + unittest.RequireCloseBefore(t, inspector.Done(), 5*time.Second, "inspector did not stop") +} + +// TestPruneInspection_InvalidTopic_AboveThreshold ensures inspector disseminates an invalid control message notification for +// prune messages when the invalid topic id count exceeds the configured threshold. +func TestPruneInspection_InvalidTopic_AboveThreshold(t *testing.T) { + c, err := config.DefaultConfig() + require.NoError(t, err) + cfg := &c.NetworkConfig.GossipSub.RpcInspector.Validation + inspector, signalerCtx, cancel, consumer, rpcTracker, sporkID, idProvider, topicProviderOracle := inspectorFixture(t, func(params *validation.InspectorParams) { + params.Config = cfg + }) + + var unknownTopicPrunes []*pubsub_pb.ControlPrune + var malformedTopicPrunes []*pubsub_pb.ControlPrune + var invalidSporkIDTopicPrunes []*pubsub_pb.ControlPrune + var allTopics []string + for i := 0; i < cfg.GraftPrune.InvalidTopicIdThreshold+1; i++ { + // create unknown topic + unknownTopic, malformedTopic, invalidSporkIDTopic := invalidTopics(t, sporkID) + allTopics = append(allTopics, unknownTopic, malformedTopic, invalidSporkIDTopic) + unknownTopicPrunes = append(unknownTopicPrunes, unittest.P2PRPCPruneFixture(&unknownTopic)) + malformedTopicPrunes = append(malformedTopicPrunes, unittest.P2PRPCPruneFixture(&malformedTopic)) + invalidSporkIDTopicPrunes = append(invalidSporkIDTopicPrunes, unittest.P2PRPCPruneFixture(&invalidSporkIDTopic)) + } + + // avoid unknown topics errors + topicProviderOracle.UpdateTopics(allTopics) + unknownTopicReq := unittest.P2PRPCFixture(unittest.WithPrunes(unknownTopicPrunes...)) + malformedTopicReq := unittest.P2PRPCFixture(unittest.WithPrunes(malformedTopicPrunes...)) + invalidSporkIDTopicReq := unittest.P2PRPCFixture(unittest.WithPrunes(invalidSporkIDTopicPrunes...)) + + from := unittest.PeerIdFixture(t) + idProvider.On("ByPeerID", from).Return(unittest.IdentityFixture(), true).Times(3) + checkNotification := checkNotificationFunc(t, from, p2pmsg.CtrlMsgPrune, validation.IsInvalidTopicIDThresholdExceeded, p2p.CtrlMsgNonClusterTopicType) + rpcTracker.On("LastHighestIHaveRPCSize").Return(int64(100)).Maybe() + consumer.On("OnInvalidControlMessageNotification", mock.AnythingOfType("*p2p.InvCtrlMsgNotif")).Return(nil).Times(3).Run(checkNotification) + + inspector.Start(signalerCtx) + unittest.RequireComponentsReadyBefore(t, 1*time.Second, inspector) + + require.NoError(t, inspector.Inspect(from, unknownTopicReq)) + require.NoError(t, inspector.Inspect(from, malformedTopicReq)) + require.NoError(t, inspector.Inspect(from, invalidSporkIDTopicReq)) + + // sleep for 1 second to ensure rpc's is processed + time.Sleep(time.Second) + cancel() + unittest.RequireCloseBefore(t, inspector.Done(), 5*time.Second, "inspector did not stop") +} + +// TestPruneInspection_DuplicateTopicIds_AboveThreshold ensures inspector disseminates an invalid control message notification for +// prune messages when the number of duplicate topic ids is above the threshold. +func TestPruneInspection_DuplicateTopicIds_AboveThreshold(t *testing.T) { + logCounter := atomic.NewInt64(0) + logger := hookedLogger(logCounter, zerolog.TraceLevel, worker.QueuedItemProcessedLog) + inspector, signalerCtx, cancel, consumer, rpcTracker, sporkID, idProvider, topicProviderOracle := inspectorFixture(t, func(params *validation.InspectorParams) { + params.Logger = logger + }) + duplicateTopic := fmt.Sprintf("%s/%s", channels.TestNetworkChannel, sporkID) + // avoid unknown topics errors + topicProviderOracle.UpdateTopics([]string{duplicateTopic}) + var prunes []*pubsub_pb.ControlPrune + cfg, err := config.DefaultConfig() + require.NoError(t, err) + // we need threshold + 1 to trigger the invalid control message notification; as the first duplicate topic id is not counted + for i := 0; i < cfg.NetworkConfig.GossipSub.RpcInspector.Validation.GraftPrune.DuplicateTopicIdThreshold+2; i++ { + prunes = append(prunes, unittest.P2PRPCPruneFixture(&duplicateTopic)) + } + from := unittest.PeerIdFixture(t) + idProvider.On("ByPeerID", from).Return(unittest.IdentityFixture(), true).Once() + rpc := unittest.P2PRPCFixture(unittest.WithPrunes(prunes...)) + rpcTracker.On("LastHighestIHaveRPCSize").Return(int64(100)).Maybe() + + consumer.On("OnInvalidControlMessageNotification", mock.AnythingOfType("*p2p.InvCtrlMsgNotif")).Return(nil).Once().Run(func(args mock.Arguments) { + notification, ok := args[0].(*p2p.InvCtrlMsgNotif) + require.True(t, ok) + require.Equal(t, notification.TopicType, p2p.CtrlMsgNonClusterTopicType, "expected p2p.CtrlMsgNonClusterTopicType notification type, no RPC with cluster prefixed topic sent in this test") + require.Equal(t, from, notification.PeerID) + require.Equal(t, p2pmsg.CtrlMsgPrune, notification.MsgType) + require.True(t, validation.IsDuplicateTopicIDThresholdExceeded(notification.Error)) + }) + + inspector.Start(signalerCtx) + unittest.RequireComponentsReadyBefore(t, 100*time.Millisecond, inspector) + + require.NoError(t, inspector.Inspect(from, rpc)) + require.Eventually(t, func() bool { + return logCounter.Load() == 1 + }, time.Second, 500*time.Millisecond) + cancel() + unittest.RequireCloseBefore(t, inspector.Done(), 5*time.Second, "inspector did not stop") +} + +// TestPruneInspection_DuplicateTopicIds_BelowThreshold ensures inspector does not disseminate invalid control message notifications +// for a valid RPC with duplicate prune topic ids below the threshold. +func TestPrueInspection_DuplicateTopicIds_BelowThreshold(t *testing.T) { + logCounter := atomic.NewInt64(0) + logger := hookedLogger(logCounter, zerolog.TraceLevel, worker.QueuedItemProcessedLog) + inspector, signalerCtx, cancel, consumer, rpcTracker, sporkID, idProvider, topicProviderOracle := inspectorFixture(t, func(params *validation.InspectorParams) { + params.Logger = logger + }) + duplicateTopic := fmt.Sprintf("%s/%s", channels.TestNetworkChannel, sporkID) + // avoid unknown topics errors + topicProviderOracle.UpdateTopics([]string{duplicateTopic}) + var prunes []*pubsub_pb.ControlPrune + cfg, err := config.DefaultConfig() + require.NoError(t, err) + for i := 0; i < cfg.NetworkConfig.GossipSub.RpcInspector.Validation.GraftPrune.DuplicateTopicIdThreshold; i++ { + prunes = append(prunes, unittest.P2PRPCPruneFixture(&duplicateTopic)) + } + from := unittest.PeerIdFixture(t) + idProvider.On("ByPeerID", from).Return(unittest.IdentityFixture(), true).Once() + rpc := unittest.P2PRPCFixture(unittest.WithPrunes(prunes...)) + rpcTracker.On("LastHighestIHaveRPCSize").Return(int64(100)).Maybe() + + // no notification should be disseminated for valid messages as long as the number of duplicates is below the threshold + consumer.AssertNotCalled(t, "OnInvalidControlMessageNotification", mock.AnythingOfType("*p2p.InvCtrlMsgNotif")) + + inspector.Start(signalerCtx) + unittest.RequireComponentsReadyBefore(t, 100*time.Millisecond, inspector) + + require.NoError(t, inspector.Inspect(from, rpc)) + require.Eventually(t, func() bool { + return logCounter.Load() == 1 + }, time.Second, 500*time.Millisecond) + cancel() + unittest.RequireCloseBefore(t, inspector.Done(), 5*time.Second, "inspector did not stop") +} + +// TestIHaveInspection_InvalidTopic_AboveThreshold ensures inspector disseminates an invalid control message notification for +// ihave messages when the invalid topic id count exceeds the configured threshold. +func TestIHaveInspection_InvalidTopic_AboveThreshold(t *testing.T) { + c, err := config.DefaultConfig() + require.NoError(t, err) + cfg := &c.NetworkConfig.GossipSub.RpcInspector.Validation + logCounter := atomic.NewInt64(0) + logger := hookedLogger(logCounter, zerolog.TraceLevel, worker.QueuedItemProcessedLog) + inspector, signalerCtx, cancel, consumer, rpcTracker, sporkID, idProvider, topicProviderOracle := inspectorFixture(t, func(params *validation.InspectorParams) { + params.Config = cfg + params.Logger = logger + }) + + var unknownTopicIHaves []*pubsub_pb.ControlIHave + var malformedTopicIHaves []*pubsub_pb.ControlIHave + var invalidSporkIDTopicIHaves []*pubsub_pb.ControlIHave + var allTopics []string + for i := 0; i < cfg.GraftPrune.InvalidTopicIdThreshold+1; i++ { + // create unknown topic + unknownTopic, malformedTopic, invalidSporkIDTopic := invalidTopics(t, sporkID) + allTopics = append(allTopics, unknownTopic, malformedTopic, invalidSporkIDTopic) + unknownTopicIHaves = append(unknownTopicIHaves, unittest.P2PRPCIHaveFixture(&unknownTopic, unittest.IdentifierListFixture(5).Strings()...)) + malformedTopicIHaves = append(malformedTopicIHaves, unittest.P2PRPCIHaveFixture(&malformedTopic, unittest.IdentifierListFixture(5).Strings()...)) + invalidSporkIDTopicIHaves = append(invalidSporkIDTopicIHaves, unittest.P2PRPCIHaveFixture(&invalidSporkIDTopic, unittest.IdentifierListFixture(5).Strings()...)) + } + + // avoid unknown topics errors + topicProviderOracle.UpdateTopics(allTopics) + unknownTopicReq := unittest.P2PRPCFixture(unittest.WithIHaves(unknownTopicIHaves...)) + malformedTopicReq := unittest.P2PRPCFixture(unittest.WithIHaves(malformedTopicIHaves...)) + invalidSporkIDTopicReq := unittest.P2PRPCFixture(unittest.WithIHaves(invalidSporkIDTopicIHaves...)) + + from := unittest.PeerIdFixture(t) + idProvider.On("ByPeerID", from).Return(unittest.IdentityFixture(), true).Times(3) + rpcTracker.On("LastHighestIHaveRPCSize").Return(int64(100)).Maybe() + checkNotification := checkNotificationFunc(t, from, p2pmsg.CtrlMsgIHave, validation.IsInvalidTopicIDThresholdExceeded, p2p.CtrlMsgNonClusterTopicType) + consumer.On("OnInvalidControlMessageNotification", mock.AnythingOfType("*p2p.InvCtrlMsgNotif")).Return(nil).Times(3).Run(checkNotification) + inspector.Start(signalerCtx) + unittest.RequireComponentsReadyBefore(t, 1*time.Second, inspector) + + require.NoError(t, inspector.Inspect(from, unknownTopicReq)) + require.NoError(t, inspector.Inspect(from, malformedTopicReq)) + require.NoError(t, inspector.Inspect(from, invalidSporkIDTopicReq)) + require.Eventually(t, func() bool { + return logCounter.Load() == 3 + }, time.Second, 500*time.Millisecond) + cancel() + unittest.RequireCloseBefore(t, inspector.Done(), 5*time.Second, "inspector did not stop") +} + +// TestIHaveInspection_InvalidTopic_BelowThreshold ensures inspector does not disseminate an invalid control message notification for +// ihave messages when the invalid topic id count does not exceed the configured threshold. +func TestIHaveInspection_InvalidTopic_BelowThreshold(t *testing.T) { + c, err := config.DefaultConfig() + require.NoError(t, err) + cfg := &c.NetworkConfig.GossipSub.RpcInspector.Validation + logCounter := atomic.NewInt64(0) + logger := hookedLogger(logCounter, zerolog.TraceLevel, worker.QueuedItemProcessedLog) + inspector, signalerCtx, cancel, consumer, rpcTracker, sporkID, idProvider, topicProviderOracle := inspectorFixture(t, func(params *validation.InspectorParams) { + params.Config = cfg + params.Logger = logger + }) + + var unknownTopicIHaves []*pubsub_pb.ControlIHave + var malformedTopicIHaves []*pubsub_pb.ControlIHave + var invalidSporkIDTopicIHaves []*pubsub_pb.ControlIHave + var allTopics []string + for i := 0; i < cfg.GraftPrune.InvalidTopicIdThreshold; i++ { + // create unknown topic + unknownTopic, malformedTopic, invalidSporkIDTopic := invalidTopics(t, sporkID) + allTopics = append(allTopics, unknownTopic, malformedTopic, invalidSporkIDTopic) + unknownTopicIHaves = append(unknownTopicIHaves, unittest.P2PRPCIHaveFixture(&unknownTopic, unittest.IdentifierListFixture(5).Strings()...)) + malformedTopicIHaves = append(malformedTopicIHaves, unittest.P2PRPCIHaveFixture(&malformedTopic, unittest.IdentifierListFixture(5).Strings()...)) + invalidSporkIDTopicIHaves = append(invalidSporkIDTopicIHaves, unittest.P2PRPCIHaveFixture(&invalidSporkIDTopic, unittest.IdentifierListFixture(5).Strings()...)) + } + + // avoid unknown topics errors + topicProviderOracle.UpdateTopics(allTopics) + unknownTopicReq := unittest.P2PRPCFixture(unittest.WithIHaves(unknownTopicIHaves...)) + malformedTopicReq := unittest.P2PRPCFixture(unittest.WithIHaves(malformedTopicIHaves...)) + invalidSporkIDTopicReq := unittest.P2PRPCFixture(unittest.WithIHaves(invalidSporkIDTopicIHaves...)) + + from := unittest.PeerIdFixture(t) + idProvider.On("ByPeerID", from).Return(unittest.IdentityFixture(), true).Times(3) + rpcTracker.On("LastHighestIHaveRPCSize").Return(int64(100)).Maybe() + // no notification should be disseminated for valid messages as long as the number of invalid topic ids is below the threshold + consumer.AssertNotCalled(t, "OnInvalidControlMessageNotification", mock.AnythingOfType("*p2p.InvCtrlMsgNotif")) + inspector.Start(signalerCtx) + unittest.RequireComponentsReadyBefore(t, 1*time.Second, inspector) + + require.NoError(t, inspector.Inspect(from, unknownTopicReq)) + require.NoError(t, inspector.Inspect(from, malformedTopicReq)) + require.NoError(t, inspector.Inspect(from, invalidSporkIDTopicReq)) + require.Eventually(t, func() bool { + return logCounter.Load() == 3 + }, time.Second, 500*time.Millisecond) + + cancel() + unittest.RequireCloseBefore(t, inspector.Done(), 5*time.Second, "inspector did not stop") +} + +// TestIHaveInspection_DuplicateTopicIds_BelowThreshold ensures inspector does not disseminate an invalid control message notification for +// iHave messages when duplicate topic ids are below allowed threshold. +func TestIHaveInspection_DuplicateTopicIds_BelowThreshold(t *testing.T) { + logCounter := atomic.NewInt64(0) + logger := hookedLogger(logCounter, zerolog.TraceLevel, worker.QueuedItemProcessedLog) + inspector, signalerCtx, cancel, consumer, rpcTracker, sporkID, idProvider, topicProviderOracle := inspectorFixture(t, func(params *validation.InspectorParams) { + params.Logger = logger + }) + validTopic := fmt.Sprintf("%s/%s", channels.PushBlocks.String(), sporkID) + // avoid unknown topics errors + topicProviderOracle.UpdateTopics([]string{validTopic}) + + cfg, err := config.DefaultConfig() + require.NoError(t, err) + validTopicIHave := unittest.P2PRPCIHaveFixture(&validTopic, unittest.IdentifierListFixture(5).Strings()...) + ihaves := []*pubsub_pb.ControlIHave{validTopicIHave} + // duplicate the valid topic id on other iHave messages but with different message ids + for i := 0; i < cfg.NetworkConfig.GossipSub.RpcInspector.Validation.IHave.DuplicateTopicIdThreshold-1; i++ { + ihaves = append(ihaves, unittest.P2PRPCIHaveFixture(&validTopic, unittest.IdentifierListFixture(5).Strings()...)) + } + // creates an RPC with duplicate topic ids but different message ids + duplicateMsgIDRpc := unittest.P2PRPCFixture(unittest.WithIHaves(ihaves...)) + from := unittest.PeerIdFixture(t) + idProvider.On("ByPeerID", from).Return(unittest.IdentityFixture(), true).Once() + rpcTracker.On("LastHighestIHaveRPCSize").Return(int64(100)).Maybe() + + // no notification should be disseminated for valid messages as long as the number of duplicates is below the threshold + consumer.AssertNotCalled(t, "OnInvalidControlMessageNotification", mock.AnythingOfType("*p2p.InvCtrlMsgNotif")) + inspector.Start(signalerCtx) + unittest.RequireComponentsReadyBefore(t, 1*time.Second, inspector) + + require.NoError(t, inspector.Inspect(from, duplicateMsgIDRpc)) + require.Eventually(t, func() bool { + return logCounter.Load() == 1 + }, time.Second, 500*time.Millisecond) + cancel() + unittest.RequireCloseBefore(t, inspector.Done(), 5*time.Second, "inspector did not stop") +} + +// TestIHaveInspection_DuplicateTopicIds_AboveThreshold ensures inspector disseminate an invalid control message notification for +// iHave messages when duplicate topic ids are above allowed threshold. +func TestIHaveInspection_DuplicateTopicIds_AboveThreshold(t *testing.T) { + logCounter := atomic.NewInt64(0) + logger := hookedLogger(logCounter, zerolog.TraceLevel, worker.QueuedItemProcessedLog) + inspector, signalerCtx, cancel, consumer, rpcTracker, sporkID, idProvider, topicProviderOracle := inspectorFixture(t, func(params *validation.InspectorParams) { + params.Logger = logger + }) + validTopic := fmt.Sprintf("%s/%s", channels.PushBlocks.String(), sporkID) + // avoid unknown topics errors + topicProviderOracle.UpdateTopics([]string{validTopic}) + + cfg, err := config.DefaultConfig() + require.NoError(t, err) + validTopicIHave := unittest.P2PRPCIHaveFixture(&validTopic, unittest.IdentifierListFixture(5).Strings()...) + ihaves := []*pubsub_pb.ControlIHave{validTopicIHave} + // duplicate the valid topic id on other iHave messages but with different message ids up to the threshold + for i := 0; i < cfg.NetworkConfig.GossipSub.RpcInspector.Validation.IHave.DuplicateTopicIdThreshold+2; i++ { + ihaves = append(ihaves, unittest.P2PRPCIHaveFixture(&validTopic, unittest.IdentifierListFixture(5).Strings()...)) + } + // creates an RPC with duplicate topic ids but different message ids + duplicateMsgIDRpc := unittest.P2PRPCFixture(unittest.WithIHaves(ihaves...)) + from := unittest.PeerIdFixture(t) + idProvider.On("ByPeerID", from).Return(unittest.IdentityFixture(), true).Once() + rpcTracker.On("LastHighestIHaveRPCSize").Return(int64(100)).Maybe() + + // one notification should be disseminated for invalid messages when the number of duplicates exceeds the threshold + checkNotification := checkNotificationFunc(t, from, p2pmsg.CtrlMsgIHave, validation.IsDuplicateTopicIDThresholdExceeded, p2p.CtrlMsgNonClusterTopicType) + consumer.On("OnInvalidControlMessageNotification", mock.AnythingOfType("*p2p.InvCtrlMsgNotif")).Return(nil).Once().Run(checkNotification) + + inspector.Start(signalerCtx) + unittest.RequireComponentsReadyBefore(t, 1*time.Second, inspector) + + require.NoError(t, inspector.Inspect(from, duplicateMsgIDRpc)) + require.Eventually(t, func() bool { + return logCounter.Load() == 1 + }, time.Second, 500*time.Millisecond) + cancel() + unittest.RequireCloseBefore(t, inspector.Done(), 5*time.Second, "inspector did not stop") +} + +// TestIHaveInspection_DuplicateMessageIds_BelowThreshold ensures inspector does not disseminate an invalid control message notification for +// iHave messages when duplicate message ids are below allowed threshold. +func TestIHaveInspection_DuplicateMessageIds_BelowThreshold(t *testing.T) { + logCounter := atomic.NewInt64(0) + logger := hookedLogger(logCounter, zerolog.TraceLevel, worker.QueuedItemProcessedLog) + inspector, signalerCtx, cancel, consumer, rpcTracker, sporkID, idProvider, topicProviderOracle := inspectorFixture(t, func(params *validation.InspectorParams) { + params.Logger = logger + }) + validTopic := fmt.Sprintf("%s/%s", channels.PushBlocks.String(), sporkID) + // avoid unknown topics errors + topicProviderOracle.UpdateTopics([]string{validTopic}) + duplicateMsgID := unittest.IdentifierFixture() + + cfg, err := config.DefaultConfig() + require.NoError(t, err) + msgIds := flow.IdentifierList{} + // includes as many duplicates as allowed by the threshold + for i := 0; i < cfg.NetworkConfig.GossipSub.RpcInspector.Validation.IHave.DuplicateMessageIdThreshold; i++ { + msgIds = append(msgIds, duplicateMsgID) + } + duplicateMsgIDIHave := unittest.P2PRPCIHaveFixture(&validTopic, append(msgIds, unittest.IdentifierListFixture(5)...).Strings()...) + duplicateMsgIDRpc := unittest.P2PRPCFixture(unittest.WithIHaves(duplicateMsgIDIHave)) + from := unittest.PeerIdFixture(t) + idProvider.On("ByPeerID", from).Return(unittest.IdentityFixture(), true).Once() + rpcTracker.On("LastHighestIHaveRPCSize").Return(int64(100)).Maybe() + + // no notification should be disseminated for valid messages as long as the number of duplicates is below the threshold + consumer.AssertNotCalled(t, "OnInvalidControlMessageNotification", mock.AnythingOfType("*p2p.InvCtrlMsgNotif")) + inspector.Start(signalerCtx) + unittest.RequireComponentsReadyBefore(t, 1*time.Second, inspector) + + require.NoError(t, inspector.Inspect(from, duplicateMsgIDRpc)) + require.Eventually(t, func() bool { + return logCounter.Load() == 1 + }, time.Second, 500*time.Millisecond) + cancel() + unittest.RequireCloseBefore(t, inspector.Done(), 5*time.Second, "inspector did not stop") +} + +// TestIHaveInspection_DuplicateMessageIds_AboveThreshold ensures inspector disseminates an invalid control message notification for +// iHave messages when duplicate message ids are above allowed threshold. +func TestIHaveInspection_DuplicateMessageIds_AboveThreshold(t *testing.T) { + logCounter := atomic.NewInt64(0) + logger := hookedLogger(logCounter, zerolog.TraceLevel, worker.QueuedItemProcessedLog) + inspector, signalerCtx, cancel, consumer, rpcTracker, sporkID, idProvider, topicProviderOracle := inspectorFixture(t, func(params *validation.InspectorParams) { + params.Logger = logger + }) + validTopic := fmt.Sprintf("%s/%s", channels.PushBlocks.String(), sporkID) + // avoid unknown topics errors + topicProviderOracle.UpdateTopics([]string{validTopic}) + duplicateMsgID := unittest.IdentifierFixture() + + cfg, err := config.DefaultConfig() + require.NoError(t, err) + msgIds := flow.IdentifierList{} + // includes as many duplicates as beyond the threshold + for i := 0; i < cfg.NetworkConfig.GossipSub.RpcInspector.Validation.IHave.DuplicateMessageIdThreshold+2; i++ { + msgIds = append(msgIds, duplicateMsgID) + } + duplicateMsgIDIHave := unittest.P2PRPCIHaveFixture(&validTopic, append(msgIds, unittest.IdentifierListFixture(5)...).Strings()...) + duplicateMsgIDRpc := unittest.P2PRPCFixture(unittest.WithIHaves(duplicateMsgIDIHave)) + from := unittest.PeerIdFixture(t) + idProvider.On("ByPeerID", from).Return(unittest.IdentityFixture(), true).Once() + rpcTracker.On("LastHighestIHaveRPCSize").Return(int64(100)).Maybe() + + // one notification should be disseminated for invalid messages when the number of duplicates exceeds the threshold + checkNotification := checkNotificationFunc(t, from, p2pmsg.CtrlMsgIHave, validation.IsDuplicateMessageIDErr, p2p.CtrlMsgNonClusterTopicType) + consumer.On("OnInvalidControlMessageNotification", mock.AnythingOfType("*p2p.InvCtrlMsgNotif")).Return(nil).Once().Run(checkNotification) + inspector.Start(signalerCtx) + unittest.RequireComponentsReadyBefore(t, 1*time.Second, inspector) + + require.NoError(t, inspector.Inspect(from, duplicateMsgIDRpc)) + require.Eventually(t, func() bool { + return logCounter.Load() == 1 + }, time.Second, 500*time.Millisecond) + cancel() + unittest.RequireCloseBefore(t, inspector.Done(), 5*time.Second, "inspector did not stop") +} + +// TestIWantInspection_DuplicateMessageIds_BelowThreshold ensures inspector does not disseminate an invalid control message notification for +// iWant messages when duplicate message ids are below allowed threshold. +func TestIWantInspection_DuplicateMessageIds_BelowThreshold(t *testing.T) { + logCounter := atomic.NewInt64(0) + logger := hookedLogger(logCounter, zerolog.TraceLevel, worker.QueuedItemProcessedLog) + inspector, signalerCtx, cancel, consumer, rpcTracker, _, idProvider, _ := inspectorFixture(t, func(params *validation.InspectorParams) { + params.Logger = logger + }) + // oracle must be set even though iWant messages do not have topic IDs + duplicateMsgID := unittest.IdentifierFixture() + duplicates := flow.IdentifierList{} + cfg, err := config.DefaultConfig() + require.NoError(t, err) + // includes as many duplicates as allowed by the threshold + for i := 0; i < int(cfg.NetworkConfig.GossipSub.RpcInspector.Validation.IWant.DuplicateMsgIdThreshold)-2; i++ { + duplicates = append(duplicates, duplicateMsgID) + } + msgIds := append(duplicates, unittest.IdentifierListFixture(5)...).Strings() + duplicateMsgIDIWant := unittest.P2PRPCIWantFixture(msgIds...) + + duplicateMsgIDRpc := unittest.P2PRPCFixture(unittest.WithIWants(duplicateMsgIDIWant)) + + from := unittest.PeerIdFixture(t) + idProvider.On("ByPeerID", from).Return(unittest.IdentityFixture(), true).Once() + // no notification should be disseminated for valid messages as long as the number of duplicates is below the threshold + consumer.AssertNotCalled(t, "OnInvalidControlMessageNotification", mock.AnythingOfType("*p2p.InvCtrlMsgNotif")) + rpcTracker.On("LastHighestIHaveRPCSize").Return(int64(100)).Maybe() + rpcTracker.On("WasIHaveRPCSent", mock.AnythingOfType("string")).Return(true).Run(func(args mock.Arguments) { + id, ok := args[0].(string) + require.True(t, ok) + require.Contains(t, msgIds, id) + }).Maybe() // if iwant message ids count are not bigger than cache miss check size, this method is not called, anyway in this test we do not care about this method. + + inspector.Start(signalerCtx) + unittest.RequireComponentsReadyBefore(t, 1*time.Second, inspector) + + require.NoError(t, inspector.Inspect(from, duplicateMsgIDRpc)) + require.Eventually(t, func() bool { + return logCounter.Load() == 1 + }, time.Second, 500*time.Millisecond) + cancel() + unittest.RequireCloseBefore(t, inspector.Done(), 5*time.Second, "inspector did not stop") +} + +// TestIWantInspection_DuplicateMessageIds_AboveThreshold ensures inspector disseminates invalid control message notifications for iWant messages when duplicate message ids exceeds allowed threshold. +func TestIWantInspection_DuplicateMessageIds_AboveThreshold(t *testing.T) { + logCounter := atomic.NewInt64(0) + logger := hookedLogger(logCounter, zerolog.TraceLevel, worker.QueuedItemProcessedLog) + inspector, signalerCtx, cancel, consumer, rpcTracker, _, idProvider, _ := inspectorFixture(t, func(params *validation.InspectorParams) { + params.Logger = logger + }) + // oracle must be set even though iWant messages do not have topic IDs + duplicateMsgID := unittest.IdentifierFixture() + duplicates := flow.IdentifierList{} + cfg, err := config.DefaultConfig() + require.NoError(t, err) + // includes as many duplicates as allowed by the threshold + for i := 0; i < int(cfg.NetworkConfig.GossipSub.RpcInspector.Validation.IWant.DuplicateMsgIdThreshold)+2; i++ { + duplicates = append(duplicates, duplicateMsgID) + } + msgIds := append(duplicates, unittest.IdentifierListFixture(5)...).Strings() + duplicateMsgIDIWant := unittest.P2PRPCIWantFixture(msgIds...) + + duplicateMsgIDRpc := unittest.P2PRPCFixture(unittest.WithIWants(duplicateMsgIDIWant)) + + from := unittest.PeerIdFixture(t) + idProvider.On("ByPeerID", from).Return(unittest.IdentityFixture(), true).Once() + checkNotification := checkNotificationFunc(t, from, p2pmsg.CtrlMsgIWant, validation.IsIWantDuplicateMsgIDThresholdErr, p2p.CtrlMsgNonClusterTopicType) + consumer.On("OnInvalidControlMessageNotification", mock.AnythingOfType("*p2p.InvCtrlMsgNotif")).Return(nil).Once().Run(checkNotification) + rpcTracker.On("LastHighestIHaveRPCSize").Return(int64(100)).Maybe() + rpcTracker.On("WasIHaveRPCSent", mock.AnythingOfType("string")).Return(true).Run(func(args mock.Arguments) { + id, ok := args[0].(string) + require.True(t, ok) + require.Contains(t, msgIds, id) + }).Maybe() // if iwant message ids count are not bigger than cache miss check size, this method is not called, anyway in this test we do not care about this method. + + inspector.Start(signalerCtx) + unittest.RequireComponentsReadyBefore(t, 1*time.Second, inspector) + + require.NoError(t, inspector.Inspect(from, duplicateMsgIDRpc)) + require.Eventually(t, func() bool { + return logCounter.Load() == 1 + }, time.Second, 500*time.Millisecond) + cancel() + unittest.RequireCloseBefore(t, inspector.Done(), 5*time.Second, "inspector did not stop") +} + +// TestIWantInspection_CacheMiss_AboveThreshold ensures inspector disseminates invalid control message notifications for iWant messages when cache misses exceeds allowed threshold. +func TestIWantInspection_CacheMiss_AboveThreshold(t *testing.T) { + logCounter := atomic.NewInt64(0) + logger := hookedLogger(logCounter, zerolog.TraceLevel, worker.QueuedItemProcessedLog) + inspector, signalerCtx, cancel, consumer, rpcTracker, _, idProvider, _ := inspectorFixture(t, func(params *validation.InspectorParams) { + // set high cache miss threshold to ensure we only disseminate notification when it is exceeded + params.Config.IWant.CacheMissThreshold = 900 + params.Logger = logger + }) + // 10 iwant messages, each with 100 message ids; total of 1000 message ids, which when imitated as cache misses should trigger notification dissemination. + inspectMsgRpc := unittest.P2PRPCFixture(unittest.WithIWants(unittest.P2PRPCIWantFixtures(10, 100)...)) + + from := unittest.PeerIdFixture(t) + idProvider.On("ByPeerID", from).Return(unittest.IdentityFixture(), true).Once() + checkNotification := checkNotificationFunc(t, from, p2pmsg.CtrlMsgIWant, validation.IsIWantCacheMissThresholdErr, p2p.CtrlMsgNonClusterTopicType) + consumer.On("OnInvalidControlMessageNotification", mock.AnythingOfType("*p2p.InvCtrlMsgNotif")).Return(nil).Once().Run(checkNotification) + rpcTracker.On("LastHighestIHaveRPCSize").Return(int64(100)).Maybe() + // return false each time to eventually force a notification to be disseminated when the cache miss count finally exceeds the 90% threshold + allIwantsChecked := sync.WaitGroup{} + allIwantsChecked.Add(901) // 901 message ids + rpcTracker.On("WasIHaveRPCSent", mock.AnythingOfType("string")).Return(false).Run(func(args mock.Arguments) { + defer allIwantsChecked.Done() + + id, ok := args[0].(string) + require.True(t, ok) + found := false + for _, iwant := range inspectMsgRpc.GetControl().GetIwant() { + for _, messageID := range iwant.GetMessageIDs() { + if id == messageID { + found = true + } + } + } + require.True(t, found) + }) + + inspector.Start(signalerCtx) + unittest.RequireComponentsReadyBefore(t, 1*time.Second, inspector) + + require.NoError(t, inspector.Inspect(from, inspectMsgRpc)) + unittest.RequireReturnsBefore(t, allIwantsChecked.Wait, 1*time.Second, "all iwant messages should be checked for cache misses") + + require.Eventually(t, func() bool { + return logCounter.Load() == 1 + }, time.Second, 500*time.Millisecond) + cancel() + unittest.RequireCloseBefore(t, inspector.Done(), 5*time.Second, "inspector did not stop") +} + +func TestIWantInspection_CacheMiss_BelowThreshold(t *testing.T) { + logCounter := atomic.NewInt64(0) + logger := hookedLogger(logCounter, zerolog.TraceLevel, worker.QueuedItemProcessedLog) + inspector, signalerCtx, cancel, consumer, rpcTracker, _, idProvider, _ := inspectorFixture(t, func(params *validation.InspectorParams) { + // set high cache miss threshold to ensure that we do not disseminate notification in this test + params.Config.IWant.CacheMissThreshold = 99 + params.Logger = logger + }) + // oracle must be set even though iWant messages do not have topic IDs + defer consumer.AssertNotCalled(t, "OnInvalidControlMessageNotification") + + msgIds := unittest.IdentifierListFixture(98).Strings() // one less than cache miss threshold + inspectMsgRpc := unittest.P2PRPCFixture(unittest.WithIWants(unittest.P2PRPCIWantFixture(msgIds...))) + rpcTracker.On("LastHighestIHaveRPCSize").Return(int64(100)).Maybe() + + allIwantsChecked := sync.WaitGroup{} + allIwantsChecked.Add(len(msgIds)) + // returns false each time to imitate cache misses; however, since the number of cache misses is below the threshold, no notification should be disseminated. + rpcTracker.On("WasIHaveRPCSent", mock.AnythingOfType("string")).Return(false).Run(func(args mock.Arguments) { + defer allIwantsChecked.Done() + id, ok := args[0].(string) + require.True(t, ok) + require.Contains(t, msgIds, id) + }) + + from := unittest.PeerIdFixture(t) + idProvider.On("ByPeerID", from).Return(unittest.IdentityFixture(), true).Once() + inspector.Start(signalerCtx) + unittest.RequireComponentsReadyBefore(t, 1*time.Second, inspector) + + require.NoError(t, inspector.Inspect(from, inspectMsgRpc)) + unittest.RequireReturnsBefore(t, allIwantsChecked.Wait, 1*time.Second, "all iwant messages should be checked for cache misses") + + require.Eventually(t, func() bool { + return logCounter.Load() == 1 + }, time.Second, 500*time.Millisecond) + cancel() + unittest.RequireCloseBefore(t, inspector.Done(), 5*time.Second, "inspector did not stop") +} + +// TestControlMessageInspection_ExceedingErrThreshold ensures inspector disseminates invalid control message notifications for RPCs that exceed the configured error threshold. +func TestPublishMessageInspection_ExceedingErrThreshold(t *testing.T) { + logCounter := atomic.NewInt64(0) + logger := hookedLogger(logCounter, zerolog.TraceLevel, worker.QueuedItemProcessedLog) + errThreshold := 500 + inspector, signalerCtx, cancel, consumer, rpcTracker, sporkID, idProvider, topicProviderOracle := inspectorFixture(t, func(params *validation.InspectorParams) { + params.Config.PublishMessages.ErrorThreshold = errThreshold + params.Logger = logger + }) + // create unknown topic + unknownTopic := channels.Topic(fmt.Sprintf("%s/%s", unittest.IdentifierFixture(), sporkID)).String() + // create malformed topic + malformedTopic := channels.Topic("!@#$%^&**((").String() + // a topics spork ID is considered invalid if it does not match the current spork ID + invalidSporkIDTopic := channels.Topic(fmt.Sprintf("%s/%s", channels.PushBlocks, unittest.IdentifierFixture())).String() + publisher := unittest.PeerIdFixture(t) + // create 10 normal messages + pubsubMsgs := unittest.GossipSubMessageFixtures(50, fmt.Sprintf("%s/%s", channels.TestNetworkChannel, sporkID), p2ptest.WithFrom(publisher)) + // add 550 invalid messages to force notification dissemination + invalidMessageFixtures := []*pubsub_pb.Message{ + {Topic: &unknownTopic, From: []byte(publisher)}, + {Topic: &malformedTopic, From: []byte(publisher)}, + {Topic: &invalidSporkIDTopic, From: []byte(publisher)}, + } + for i := 0; i < errThreshold+1; i++ { + pubsubMsgs = append(pubsubMsgs, invalidMessageFixtures[rand.Intn(len(invalidMessageFixtures))]) + } + rpc := unittest.P2PRPCFixture(unittest.WithPubsubMessages(pubsubMsgs...)) + topics := make([]string, len(pubsubMsgs)) + for i, msg := range pubsubMsgs { + topics[i] = *msg.Topic + } + + // set topic oracle to return list of topics to avoid hasSubscription errors and force topic validation + topicProviderOracle.UpdateTopics(topics) + from := unittest.PeerIdFixture(t) + idProvider.On("ByPeerID", from).Return(unittest.IdentityFixture(), true) + idProvider.On("ByPeerID", publisher).Return(nil, false) + rpcTracker.On("LastHighestIHaveRPCSize").Return(int64(100)).Maybe() + + checkNotification := checkNotificationFunc(t, from, p2pmsg.RpcPublishMessage, validation.IsInvalidRpcPublishMessagesErr, p2p.CtrlMsgNonClusterTopicType) + consumer.On("OnInvalidControlMessageNotification", mock.AnythingOfType("*p2p.InvCtrlMsgNotif")).Return(nil).Once().Run(checkNotification) + + inspector.Start(signalerCtx) + unittest.RequireComponentsReadyBefore(t, 1*time.Second, inspector) + + require.NoError(t, inspector.Inspect(from, rpc)) + require.Eventually(t, func() bool { + return logCounter.Load() == 1 + }, time.Second, 500*time.Millisecond) + cancel() + unittest.RequireCloseBefore(t, inspector.Done(), 5*time.Second, "inspector did not stop") +} + +// TestControlMessageInspection_MissingSubscription ensures inspector disseminates invalid control message notifications for RPCs that the peer is not subscribed to. +func TestPublishMessageInspection_MissingSubscription(t *testing.T) { + logCounter := atomic.NewInt64(0) + logger := hookedLogger(logCounter, zerolog.TraceLevel, worker.QueuedItemProcessedLog) + errThreshold := 500 + inspector, signalerCtx, cancel, consumer, rpcTracker, sporkID, idProvider, _ := inspectorFixture(t, func(params *validation.InspectorParams) { + params.Config.PublishMessages.ErrorThreshold = errThreshold + params.Logger = logger + }) + publisher := unittest.PeerIdFixture(t) + pubsubMsgs := unittest.GossipSubMessageFixtures(errThreshold+1, fmt.Sprintf("%s/%s", channels.TestNetworkChannel, sporkID), p2ptest.WithFrom(publisher)) + from := unittest.PeerIdFixture(t) + idProvider.On("ByPeerID", from).Return(unittest.IdentityFixture(), true) + rpc := unittest.P2PRPCFixture(unittest.WithPubsubMessages(pubsubMsgs...)) + checkNotification := checkNotificationFunc(t, from, p2pmsg.RpcPublishMessage, validation.IsInvalidRpcPublishMessagesErr, p2p.CtrlMsgNonClusterTopicType) + rpcTracker.On("LastHighestIHaveRPCSize").Return(int64(100)).Maybe() + consumer.On("OnInvalidControlMessageNotification", mock.AnythingOfType("*p2p.InvCtrlMsgNotif")).Return(nil).Once().Run(checkNotification) + inspector.Start(signalerCtx) + unittest.RequireComponentsReadyBefore(t, 1*time.Second, inspector) + + require.NoError(t, inspector.Inspect(from, rpc)) + require.Eventually(t, func() bool { + return logCounter.Load() == 1 + }, time.Second, 500*time.Millisecond) + cancel() + unittest.RequireCloseBefore(t, inspector.Done(), 5*time.Second, "inspector did not stop") +} + +// TestPublishMessageInspection_MissingTopic ensures inspector disseminates invalid control message notifications for published messages with missing topics. +func TestPublishMessageInspection_MissingTopic(t *testing.T) { + logCounter := atomic.NewInt64(0) + logger := hookedLogger(logCounter, zerolog.TraceLevel, worker.QueuedItemProcessedLog) + errThreshold := 500 + inspector, signalerCtx, cancel, consumer, rpcTracker, _, idProvider, _ := inspectorFixture(t, func(params *validation.InspectorParams) { + // 5 invalid pubsub messages will force notification dissemination + params.Config.PublishMessages.ErrorThreshold = errThreshold + params.Logger = logger + }) + publisher := unittest.PeerIdFixture(t) + pubsubMsgs := unittest.GossipSubMessageFixtures(errThreshold+1, "", p2ptest.WithFrom(publisher)) + rpc := unittest.P2PRPCFixture(unittest.WithPubsubMessages(pubsubMsgs...)) + for _, msg := range pubsubMsgs { + msg.Topic = nil + } + from := unittest.PeerIdFixture(t) + idProvider.On("ByPeerID", from).Return(unittest.IdentityFixture(), true) + checkNotification := checkNotificationFunc(t, from, p2pmsg.RpcPublishMessage, validation.IsInvalidRpcPublishMessagesErr, p2p.CtrlMsgNonClusterTopicType) + rpcTracker.On("LastHighestIHaveRPCSize").Return(int64(100)).Maybe() + consumer.On("OnInvalidControlMessageNotification", mock.AnythingOfType("*p2p.InvCtrlMsgNotif")).Return(nil).Once().Run(checkNotification) + inspector.Start(signalerCtx) + unittest.RequireComponentsReadyBefore(t, 1*time.Second, inspector) + + require.NoError(t, inspector.Inspect(from, rpc)) + require.Eventually(t, func() bool { + return logCounter.Load() == 1 + }, time.Second, 500*time.Millisecond) + cancel() + unittest.RequireCloseBefore(t, inspector.Done(), 5*time.Second, "inspector did not stop") +} + +// TestRpcInspectionDeactivatedOnPublicNetwork ensures inspector does not inspect RPCs on public networks. +func TestRpcInspectionDeactivatedOnPublicNetwork(t *testing.T) { + logCounter := atomic.NewInt64(0) + logger := hookedLogger(logCounter, zerolog.TraceLevel, worker.QueuedItemProcessedLog) + inspector, signalerCtx, cancel, _, rpcTracker, sporkID, idProvider, topicProviderOracle := inspectorFixture(t, func(params *validation.InspectorParams) { + params.Logger = logger + params.NetworkingType = network.PublicNetwork + }) + from := unittest.PeerIdFixture(t) + defer idProvider.AssertNotCalled(t, "ByPeerID", from) + topic := fmt.Sprintf("%s/%s", channels.TestNetworkChannel, sporkID) + topicProviderOracle.UpdateTopics([]string{topic}) + pubsubMsgs := unittest.GossipSubMessageFixtures(10, topic, unittest.WithFrom(from)) + rpc := unittest.P2PRPCFixture(unittest.WithPubsubMessages(pubsubMsgs...)) + inspector.Start(signalerCtx) + unittest.RequireComponentsReadyBefore(t, 1*time.Second, inspector) + rpcTracker.On("LastHighestIHaveRPCSize").Return(int64(100)).Maybe() + require.NoError(t, inspector.Inspect(from, rpc)) + require.Eventually(t, func() bool { + return logCounter.Load() == 1 + }, time.Second, 500*time.Millisecond) + cancel() + unittest.RequireCloseBefore(t, inspector.Done(), 5*time.Second, "inspector did not stop") +} + +// TestInspection_Unstaked_Peer ensures inspector disseminates invalid control message notifications for rpc's from unstaked peers when running private network. +func TestInspection_Unstaked_Peer(t *testing.T) { + inspector, signalerCtx, cancel, consumer, rpcTracker, sporkID, idProvider, topicProviderOracle := inspectorFixture(t, func(params *validation.InspectorParams) { + // override the inspector and params, run the inspector in private mode + params.NetworkingType = network.PrivateNetwork + }) + unstakedPeer := unittest.PeerIdFixture(t) + topic := fmt.Sprintf("%s/%s", channels.TestNetworkChannel, sporkID) + topicProviderOracle.UpdateTopics([]string{topic}) + idProvider.On("ByPeerID", unstakedPeer).Return(nil, false).Once() + checkNotification := checkNotificationFunc(t, unstakedPeer, p2pmsg.CtrlMsgRPC, validation.IsErrUnstakedPeer, p2p.CtrlMsgNonClusterTopicType) + rpcTracker.On("LastHighestIHaveRPCSize").Return(int64(100)).Maybe() + consumer.On("OnInvalidControlMessageNotification", mock.AnythingOfType("*p2p.InvCtrlMsgNotif")).Return(nil).Once().Run(checkNotification) + inspector.Start(signalerCtx) + unittest.RequireComponentsReadyBefore(t, 1*time.Second, inspector) + + require.Error(t, inspector.Inspect(unstakedPeer, unittest.P2PRPCFixture())) + cancel() + unittest.RequireCloseBefore(t, inspector.Done(), 5*time.Second, "inspector did not stop") +} + +// TestControlMessageInspection_Unstaked_From ensures inspector disseminates invalid control message notifications for published messages from unstaked peers. +func TestPublishMessageInspection_Unstaked_From(t *testing.T) { + logCounter := atomic.NewInt64(0) + logger := hookedLogger(logCounter, zerolog.TraceLevel, worker.QueuedItemProcessedLog) + inspector, signalerCtx, cancel, consumer, rpcTracker, sporkID, idProvider, topicProviderOracle := inspectorFixture(t, func(params *validation.InspectorParams) { + // override the inspector and params, run the inspector in private mode + params.NetworkingType = network.PrivateNetwork + params.Logger = logger + }) + from := unittest.PeerIdFixture(t) + unstakedPeer := unittest.PeerIdFixture(t) + topic := fmt.Sprintf("%s/%s", channels.TestNetworkChannel, sporkID) + topicProviderOracle.UpdateTopics([]string{topic}) + // default RpcMessageErrorThreshold is 500, 501 messages should trigger a notification + pubsubMsgs := unittest.GossipSubMessageFixtures(501, topic, unittest.WithFrom(unstakedPeer)) + idProvider.On("ByPeerID", unstakedPeer).Return(nil, false) + idProvider.On("ByPeerID", from).Return(unittest.IdentityFixture(), true) + rpc := unittest.P2PRPCFixture(unittest.WithPubsubMessages(pubsubMsgs...)) + checkNotification := checkNotificationFunc(t, from, p2pmsg.RpcPublishMessage, validation.IsInvalidRpcPublishMessagesErr, p2p.CtrlMsgNonClusterTopicType) + rpcTracker.On("LastHighestIHaveRPCSize").Return(int64(100)).Maybe() + consumer.On("OnInvalidControlMessageNotification", mock.AnythingOfType("*p2p.InvCtrlMsgNotif")).Return(nil).Once().Run(checkNotification) + inspector.Start(signalerCtx) + unittest.RequireComponentsReadyBefore(t, 1*time.Second, inspector) + + require.NoError(t, inspector.Inspect(from, rpc)) + require.Eventually(t, func() bool { + return logCounter.Load() == 1 + }, time.Second, 500*time.Millisecond) + cancel() + unittest.RequireCloseBefore(t, inspector.Done(), 5*time.Second, "inspector did not stop") +} + +// TestControlMessageInspection_Ejected_From ensures inspector disseminates invalid control message notifications for published messages from ejected peers. +func TestPublishMessageInspection_Ejected_From(t *testing.T) { + logCounter := atomic.NewInt64(0) + logger := hookedLogger(logCounter, zerolog.TraceLevel, worker.QueuedItemProcessedLog) + inspector, signalerCtx, cancel, consumer, rpcTracker, sporkID, idProvider, topicProviderOracle := inspectorFixture(t, func(params *validation.InspectorParams) { + // override the inspector and params, run the inspector in private mode + params.NetworkingType = network.PrivateNetwork + params.Logger = logger + }) + + from := unittest.PeerIdFixture(t) + id := unittest.IdentityFixture() + + ejectedNode := unittest.PeerIdFixture(t) + ejectedId := unittest.IdentityFixture() + ejectedId.EpochParticipationStatus = flow.EpochParticipationStatusEjected + + topic := fmt.Sprintf("%s/%s", channels.TestNetworkChannel, sporkID) + topicProviderOracle.UpdateTopics([]string{topic}) + pubsubMsgs := unittest.GossipSubMessageFixtures(501, topic, unittest.WithFrom(ejectedNode)) + idProvider.On("ByPeerID", ejectedNode).Return(ejectedId, true) + idProvider.On("ByPeerID", from).Return(id, true) + + rpc := unittest.P2PRPCFixture(unittest.WithPubsubMessages(pubsubMsgs...)) + checkNotification := checkNotificationFunc(t, from, p2pmsg.RpcPublishMessage, validation.IsInvalidRpcPublishMessagesErr, p2p.CtrlMsgNonClusterTopicType) + rpcTracker.On("LastHighestIHaveRPCSize").Return(int64(100)).Maybe() + consumer.On("OnInvalidControlMessageNotification", mock.AnythingOfType("*p2p.InvCtrlMsgNotif")).Return(nil).Once().Run(checkNotification) + inspector.Start(signalerCtx) + unittest.RequireComponentsReadyBefore(t, 1*time.Second, inspector) + + require.NoError(t, inspector.Inspect(from, rpc)) + require.Eventually(t, func() bool { + return logCounter.Load() == 1 + }, time.Second, 500*time.Millisecond) + cancel() + unittest.RequireCloseBefore(t, inspector.Done(), 5*time.Second, "inspector did not stop") +} + +// TestNewControlMsgValidationInspector_validateClusterPrefixedTopic ensures cluster prefixed topics are validated as expected. +func TestNewControlMsgValidationInspector_validateClusterPrefixedTopic(t *testing.T) { + t.Run("validateClusterPrefixedTopic should not return an error for valid cluster prefixed topics", func(t *testing.T) { + logCounter := atomic.NewInt64(0) + logger := hookedLogger(logCounter, zerolog.TraceLevel, worker.QueuedItemProcessedLog) + + inspector, signalerCtx, cancel, consumer, rpcTracker, sporkID, idProvider, topicProviderOracle := inspectorFixture(t, func(params *validation.InspectorParams) { + params.Logger = logger + }) + defer consumer.AssertNotCalled(t, "OnInvalidControlMessageNotification") + clusterID := flow.ChainID(unittest.IdentifierFixture().String()) + clusterPrefixedTopic := channels.Topic(fmt.Sprintf("%s/%s", channels.SyncCluster(clusterID), sporkID)).String() + topicProviderOracle.UpdateTopics([]string{clusterPrefixedTopic}) + from := unittest.PeerIdFixture(t) + idProvider.On("ByPeerID", from).Return(unittest.IdentityFixture(), true).Once() + inspectMsgRpc := unittest.P2PRPCFixture(unittest.WithGrafts(unittest.P2PRPCGraftFixture(&clusterPrefixedTopic))) + rpcTracker.On("LastHighestIHaveRPCSize").Return(int64(100)).Maybe() + inspector.ActiveClustersChanged(flow.ChainIDList{clusterID, flow.ChainID(unittest.IdentifierFixture().String()), flow.ChainID(unittest.IdentifierFixture().String())}) + inspector.Start(signalerCtx) + unittest.RequireComponentsReadyBefore(t, 1*time.Second, inspector) + + require.NoError(t, inspector.Inspect(from, inspectMsgRpc)) + require.Eventually(t, func() bool { + return logCounter.Load() == 1 + }, time.Second, 500*time.Millisecond) + cancel() + unittest.RequireCloseBefore(t, inspector.Done(), 5*time.Second, "inspector did not stop") + }) + + t.Run("validateClusterPrefixedTopic should not return error if cluster prefixed hard threshold not exceeded for unknown cluster ids", func(t *testing.T) { + logCounter := atomic.NewInt64(0) + logger := hookedLogger(logCounter, zerolog.TraceLevel, worker.QueuedItemProcessedLog) + inspector, signalerCtx, cancel, consumer, rpcTracker, sporkID, idProvider, _ := inspectorFixture(t, func(params *validation.InspectorParams) { + // set hard threshold to small number , ensure that a single unknown cluster prefix id does not cause a notification to be disseminated + params.Config.ClusterPrefixedMessage.HardThreshold = 2 + params.Logger = logger + }) + defer consumer.AssertNotCalled(t, "OnInvalidControlMessageNotification") + clusterID := flow.ChainID(unittest.IdentifierFixture().String()) + clusterPrefixedTopic := channels.Topic(fmt.Sprintf("%s/%s", channels.SyncCluster(clusterID), sporkID)).String() + from := unittest.PeerIdFixture(t) + inspectMsgRpc := unittest.P2PRPCFixture(unittest.WithGrafts(unittest.P2PRPCGraftFixture(&clusterPrefixedTopic))) + id := unittest.IdentityFixture() + idProvider.On("ByPeerID", from).Return(id, true).Once() + rpcTracker.On("LastHighestIHaveRPCSize").Return(int64(100)).Maybe() + inspector.Start(signalerCtx) + unittest.RequireComponentsReadyBefore(t, 1*time.Second, inspector) + + require.NoError(t, inspector.Inspect(from, inspectMsgRpc)) + require.Eventually(t, func() bool { + return logCounter.Load() == 1 + }, time.Second, 500*time.Millisecond) + cancel() + unittest.RequireCloseBefore(t, inspector.Done(), 5*time.Second, "inspector did not stop") + }) + + t.Run("validateClusterPrefixedTopic should return error if cluster prefixed hard threshold exceeded for unknown cluster ids", func(t *testing.T) { + logCounter := atomic.NewInt64(0) + logger := hookedLogger(logCounter, zerolog.TraceLevel, worker.QueuedItemProcessedLog) + inspector, signalerCtx, cancel, consumer, rpcTracker, sporkID, idProvider, topicProviderOracle := inspectorFixture(t, func(params *validation.InspectorParams) { + // the 11th unknown cluster ID error should cause an error + params.Config.ClusterPrefixedMessage.HardThreshold = 10 + params.Config.GraftPrune.InvalidTopicIdThreshold = 0 + params.Logger = logger + }) + clusterID := flow.ChainID(unittest.IdentifierFixture().String()) + clusterPrefixedTopic := channels.Topic(fmt.Sprintf("%s/%s", channels.SyncCluster(clusterID), sporkID)).String() + topicProviderOracle.UpdateTopics([]string{clusterPrefixedTopic}) + from := unittest.PeerIdFixture(t) + identity := unittest.IdentityFixture() + idProvider.On("ByPeerID", from).Return(identity, true).Times(11) + checkNotification := checkNotificationFunc(t, from, p2pmsg.CtrlMsgGraft, validation.IsInvalidTopicIDThresholdExceeded, p2p.CtrlMsgTopicTypeClusterPrefixed) + inspectMsgRpc := unittest.P2PRPCFixture(unittest.WithGrafts(unittest.P2PRPCGraftFixture(&clusterPrefixedTopic))) + inspector.ActiveClustersChanged(flow.ChainIDList{flow.ChainID(unittest.IdentifierFixture().String())}) + rpcTracker.On("LastHighestIHaveRPCSize").Return(int64(100)).Maybe() + consumer.On("OnInvalidControlMessageNotification", mock.AnythingOfType("*p2p.InvCtrlMsgNotif")).Return(nil).Once().Run(checkNotification) + inspector.Start(signalerCtx) + unittest.RequireComponentsReadyBefore(t, 1*time.Second, inspector) + + for i := 0; i < 11; i++ { + require.NoError(t, inspector.Inspect(from, inspectMsgRpc)) + } + require.Eventually(t, func() bool { + return logCounter.Load() == 11 + }, time.Second, 100*time.Millisecond) + cancel() + unittest.RequireCloseBefore(t, inspector.Done(), 5*time.Second, "inspector did not stop") + }) +} + +// TestControlMessageValidationInspector_ActiveClustersChanged validates the expected update of the active cluster IDs list. +func TestControlMessageValidationInspector_ActiveClustersChanged(t *testing.T) { + logCounter := atomic.NewInt64(0) + logger := hookedLogger(logCounter, zerolog.TraceLevel, worker.QueuedItemProcessedLog) + + inspector, signalerCtx, cancel, consumer, rpcTracker, sporkID, idProvider, _ := inspectorFixture(t, func(params *validation.InspectorParams) { + params.Logger = logger + }) + defer consumer.AssertNotCalled(t, "OnInvalidControlMessageNotification") + identity := unittest.IdentityFixture() + idProvider.On("ByPeerID", mock.AnythingOfType("peer.ID")).Return(identity, true).Times(5) + activeClusterIds := make(flow.ChainIDList, 0) + for _, id := range unittest.IdentifierListFixture(5) { + activeClusterIds = append(activeClusterIds, flow.ChainID(id.String())) + } + inspector.ActiveClustersChanged(activeClusterIds) + inspector.Start(signalerCtx) + unittest.RequireComponentsReadyBefore(t, 1*time.Second, inspector) + rpcTracker.On("LastHighestIHaveRPCSize").Return(int64(100)).Maybe() + from := unittest.PeerIdFixture(t) + for _, id := range activeClusterIds { + topic := channels.Topic(fmt.Sprintf("%s/%s", channels.SyncCluster(id), sporkID)).String() + rpc := unittest.P2PRPCFixture(unittest.WithGrafts(unittest.P2PRPCGraftFixture(&topic))) + require.NoError(t, inspector.Inspect(from, rpc)) + } + // sleep for 1 second to ensure rpc's is processed + require.Eventually(t, func() bool { + return logCounter.Load() == int64(len(activeClusterIds)) + }, time.Second, 500*time.Millisecond) + + cancel() + unittest.RequireCloseBefore(t, inspector.Done(), 5*time.Second, "inspector did not stop") +} + +// TestControlMessageValidationInspector_TruncationConfigToggle ensures that rpc's are not truncated when truncation is disabled through configs. +func TestControlMessageValidationInspector_TruncationConfigToggle(t *testing.T) { + t.Run("should not perform truncation when disabled is set to true", func(t *testing.T) { + numOfMsgs := 5000 + logCounter := atomic.NewInt64(0) + logger := hookedLogger(logCounter, zerolog.TraceLevel, validation.RPCTruncationDisabledWarning, worker.QueuedItemProcessedLog) + inspector, signalerCtx, cancel, consumer, rpcTracker, _, idProvider, _ := inspectorFixture(t, func(params *validation.InspectorParams) { + params.Config.GraftPrune.MessageCountThreshold = numOfMsgs + params.Logger = logger + // disable truncation for all control message types + params.Config.InspectionProcess.Truncate.Disabled = true + }) + + // topic validation is ignored set any topic oracle + consumer.On("OnInvalidControlMessageNotification", mock.AnythingOfType("*p2p.InvCtrlMsgNotif")).Return(nil).Maybe() + rpcTracker.On("LastHighestIHaveRPCSize").Return(int64(100)).Maybe() + rpcTracker.On("WasIHaveRPCSent", mock.AnythingOfType("string")).Return(true).Maybe() + inspector.Start(signalerCtx) + + rpc := unittest.P2PRPCFixture( + unittest.WithGrafts(unittest.P2PRPCGraftFixtures(unittest.IdentifierListFixture(numOfMsgs).Strings()...)...), + unittest.WithPrunes(unittest.P2PRPCPruneFixtures(unittest.IdentifierListFixture(numOfMsgs).Strings()...)...), + unittest.WithIHaves(unittest.P2PRPCIHaveFixtures(numOfMsgs, unittest.IdentifierListFixture(numOfMsgs).Strings()...)...), + unittest.WithIWants(unittest.P2PRPCIWantFixtures(numOfMsgs, numOfMsgs)...), + ) + + from := unittest.PeerIdFixture(t) + idProvider.On("ByPeerID", from).Return(unittest.IdentityFixture(), true).Once() + require.NoError(t, inspector.Inspect(from, rpc)) + + require.Eventually(t, func() bool { + return logCounter.Load() == 2 + }, time.Second, 500*time.Millisecond) + + // ensure truncation not performed + require.Len(t, rpc.GetControl().GetGraft(), numOfMsgs) + require.Len(t, rpc.GetControl().GetPrune(), numOfMsgs) + require.Len(t, rpc.GetControl().GetIhave(), numOfMsgs) + ensureMessageIdsLen(t, p2pmsg.CtrlMsgIHave, rpc, numOfMsgs) + require.Len(t, rpc.GetControl().GetIwant(), numOfMsgs) + ensureMessageIdsLen(t, p2pmsg.CtrlMsgIWant, rpc, numOfMsgs) + + cancel() + unittest.RequireCloseBefore(t, inspector.Done(), 5*time.Second, "inspector did not stop") + }) + + t.Run("should not perform truncation when disabled for each individual control message type directly", func(t *testing.T) { + numOfMsgs := 5000 + expectedLogStrs := []string{ + validation.GraftTruncationDisabledWarning, + validation.PruneTruncationDisabledWarning, + validation.IHaveTruncationDisabledWarning, + validation.IHaveMessageIDTruncationDisabledWarning, + validation.IWantTruncationDisabledWarning, + validation.IWantMessageIDTruncationDisabledWarning, + worker.QueuedItemProcessedLog, + } + logCounter := atomic.NewInt64(0) + logger := hookedLogger(logCounter, zerolog.TraceLevel, expectedLogStrs...) + inspector, signalerCtx, cancel, consumer, rpcTracker, _, idProvider, _ := inspectorFixture(t, func(params *validation.InspectorParams) { + params.Config.GraftPrune.MessageCountThreshold = numOfMsgs + params.Logger = logger + // disable truncation for all control message types individually + params.Config.InspectionProcess.Truncate.EnableGraft = false + params.Config.InspectionProcess.Truncate.EnablePrune = false + params.Config.InspectionProcess.Truncate.EnableIHave = false + params.Config.InspectionProcess.Truncate.EnableIHaveMessageIds = false + params.Config.InspectionProcess.Truncate.EnableIWant = false + params.Config.InspectionProcess.Truncate.EnableIWantMessageIds = false + }) + + // topic validation is ignored set any topic oracle + consumer.On("OnInvalidControlMessageNotification", mock.AnythingOfType("*p2p.InvCtrlMsgNotif")).Return(nil).Maybe() + rpcTracker.On("LastHighestIHaveRPCSize").Return(int64(100)).Maybe() + rpcTracker.On("WasIHaveRPCSent", mock.AnythingOfType("string")).Return(true).Maybe() + inspector.Start(signalerCtx) + + rpc := unittest.P2PRPCFixture( + unittest.WithGrafts(unittest.P2PRPCGraftFixtures(unittest.IdentifierListFixture(numOfMsgs).Strings()...)...), + unittest.WithPrunes(unittest.P2PRPCPruneFixtures(unittest.IdentifierListFixture(numOfMsgs).Strings()...)...), + unittest.WithIHaves(unittest.P2PRPCIHaveFixtures(numOfMsgs, unittest.IdentifierListFixture(numOfMsgs).Strings()...)...), + unittest.WithIWants(unittest.P2PRPCIWantFixtures(numOfMsgs, numOfMsgs)...), + ) + + from := unittest.PeerIdFixture(t) + idProvider.On("ByPeerID", from).Return(unittest.IdentityFixture(), true).Once() + require.NoError(t, inspector.Inspect(from, rpc)) + + require.Eventually(t, func() bool { + return logCounter.Load() == int64(len(expectedLogStrs)) + }, time.Second, 500*time.Millisecond) + + // ensure truncation not performed + require.Len(t, rpc.GetControl().GetGraft(), numOfMsgs) + require.Len(t, rpc.GetControl().GetPrune(), numOfMsgs) + require.Len(t, rpc.GetControl().GetIhave(), numOfMsgs) + ensureMessageIdsLen(t, p2pmsg.CtrlMsgIHave, rpc, numOfMsgs) + require.Len(t, rpc.GetControl().GetIwant(), numOfMsgs) + ensureMessageIdsLen(t, p2pmsg.CtrlMsgIWant, rpc, numOfMsgs) + + cancel() + unittest.RequireCloseBefore(t, inspector.Done(), 5*time.Second, "inspector did not stop") + }) +} + +// TestControlMessageValidationInspector_InspectionConfigToggle ensures that rpc's are not inspected when inspection is disabled through configs. +func TestControlMessageValidationInspector_InspectionConfigToggle(t *testing.T) { + t.Run("should not perform inspection when disabled is set to true", func(t *testing.T) { + numOfMsgs := 5000 + logCounter := atomic.NewInt64(0) + logger := hookedLogger(logCounter, zerolog.TraceLevel, validation.RPCInspectionDisabledWarning) + inspector, signalerCtx, cancel, consumer, rpcTracker, _, _, _ := inspectorFixture(t, func(params *validation.InspectorParams) { + params.Logger = logger + // disable inspector for all control message types + params.Config.InspectionProcess.Inspect.Disabled = true + }) + + // notification consumer should never be called when inspection is disabled + defer consumer.AssertNotCalled(t, "OnInvalidControlMessageNotification") + rpcTracker.On("LastHighestIHaveRPCSize").Return(int64(100)).Maybe() + rpcTracker.On("WasIHaveRPCSent", mock.AnythingOfType("string")).Return(true).Maybe() + inspector.Start(signalerCtx) + + rpc := unittest.P2PRPCFixture( + unittest.WithGrafts(unittest.P2PRPCGraftFixtures(unittest.IdentifierListFixture(numOfMsgs).Strings()...)...), + unittest.WithPrunes(unittest.P2PRPCPruneFixtures(unittest.IdentifierListFixture(numOfMsgs).Strings()...)...), + unittest.WithIHaves(unittest.P2PRPCIHaveFixtures(numOfMsgs, unittest.IdentifierListFixture(numOfMsgs).Strings()...)...), + unittest.WithIWants(unittest.P2PRPCIWantFixtures(numOfMsgs, numOfMsgs)...), + ) + + from := unittest.PeerIdFixture(t) + require.NoError(t, inspector.Inspect(from, rpc)) + + require.Eventually(t, func() bool { + return logCounter.Load() == 1 + }, time.Second, 500*time.Millisecond) + + cancel() + unittest.RequireCloseBefore(t, inspector.Done(), 5*time.Second, "inspector did not stop") + }) + + t.Run("should not check identity when reject-unstaked-peers is false", func(t *testing.T) { + inspector, signalerCtx, cancel, consumer, rpcTracker, _, idProvider, _ := inspectorFixture(t, func(params *validation.InspectorParams) { + // disable inspector for all control message types + params.Config.InspectionProcess.Inspect.RejectUnstakedPeers = false + }) + + // notification consumer should never be called when inspection is disabled + defer consumer.AssertNotCalled(t, "OnInvalidControlMessageNotification") + rpcTracker.On("LastHighestIHaveRPCSize").Return(int64(100)).Maybe() + rpcTracker.On("WasIHaveRPCSent", mock.AnythingOfType("string")).Return(true).Maybe() + + from := unittest.PeerIdFixture(t) + + defer idProvider.AssertNotCalled(t, "ByPeerID", from) + inspector.Start(signalerCtx) + + require.NoError(t, inspector.Inspect(from, unittest.P2PRPCFixture())) + + time.Sleep(time.Second) + cancel() + unittest.RequireCloseBefore(t, inspector.Done(), 5*time.Second, "inspector did not stop") + }) + + t.Run("should check identity when reject-unstaked-peers is true", func(t *testing.T) { + inspector, signalerCtx, cancel, consumer, rpcTracker, _, idProvider, _ := inspectorFixture(t, func(params *validation.InspectorParams) { + // disable inspector for all control message types + params.Config.InspectionProcess.Inspect.RejectUnstakedPeers = true + }) + + // notification consumer should never be called when inspection is disabled + consumer.On("OnInvalidControlMessageNotification", mock.AnythingOfType("*p2p.InvCtrlMsgNotif")).Return(nil).Once().Run(func(args mock.Arguments) { + notification, ok := args.Get(0).(*p2p.InvCtrlMsgNotif) + require.True(t, ok) + require.True(t, validation.IsErrUnstakedPeer(notification.Error)) + }) + rpcTracker.On("LastHighestIHaveRPCSize").Return(int64(100)).Maybe() + rpcTracker.On("WasIHaveRPCSent", mock.AnythingOfType("string")).Return(true).Maybe() + + from := unittest.PeerIdFixture(t) + + idProvider.On("ByPeerID", from).Return(nil, false).Once() + inspector.Start(signalerCtx) + + require.Error(t, inspector.Inspect(from, unittest.P2PRPCFixture())) + + time.Sleep(time.Second) + cancel() + unittest.RequireCloseBefore(t, inspector.Done(), 5*time.Second, "inspector did not stop") + }) + + t.Run("should not perform inspection when disabled for each individual control message type directly", func(t *testing.T) { + numOfMsgs := 5000 + expectedLogStrs := []string{ + validation.GraftInspectionDisabledWarning, + validation.PruneInspectionDisabledWarning, + validation.IHaveInspectionDisabledWarning, + validation.IWantInspectionDisabledWarning, + validation.PublishInspectionDisabledWarning, + worker.QueuedItemProcessedLog, + } + logCounter := atomic.NewInt64(0) + logger := hookedLogger(logCounter, zerolog.TraceLevel, expectedLogStrs...) + inspector, signalerCtx, cancel, consumer, rpcTracker, _, idProvider, _ := inspectorFixture(t, func(params *validation.InspectorParams) { + params.Config.GraftPrune.MessageCountThreshold = numOfMsgs + params.Logger = logger + // disable inspection for all control message types individually + params.Config.InspectionProcess.Inspect.EnableGraft = false + params.Config.InspectionProcess.Inspect.EnablePrune = false + params.Config.InspectionProcess.Inspect.EnableIHave = false + params.Config.InspectionProcess.Inspect.EnableIWant = false + params.Config.InspectionProcess.Inspect.EnablePublish = false + }) + + // notification consumer should never be called when inspection is disabled + defer consumer.AssertNotCalled(t, "OnInvalidControlMessageNotification") + rpcTracker.On("LastHighestIHaveRPCSize").Return(int64(100)).Maybe() + rpcTracker.On("WasIHaveRPCSent", mock.AnythingOfType("string")).Return(true).Maybe() + inspector.Start(signalerCtx) + + topic, err := randutils.GenerateRandomString(100) + require.NoError(t, err) + + rpc := unittest.P2PRPCFixture( + unittest.WithGrafts(unittest.P2PRPCGraftFixtures(unittest.IdentifierListFixture(numOfMsgs).Strings()...)...), + unittest.WithPrunes(unittest.P2PRPCPruneFixtures(unittest.IdentifierListFixture(numOfMsgs).Strings()...)...), + unittest.WithIHaves(unittest.P2PRPCIHaveFixtures(numOfMsgs, unittest.IdentifierListFixture(numOfMsgs).Strings()...)...), + unittest.WithIWants(unittest.P2PRPCIWantFixtures(numOfMsgs, numOfMsgs)...), + unittest.WithPubsubMessages(unittest.GossipSubMessageFixtures(numOfMsgs, topic, unittest.WithFrom(unittest.PeerIdFixture(t)))...), + ) + + from := unittest.PeerIdFixture(t) + idProvider.On("ByPeerID", from).Return(unittest.IdentityFixture(), true).Once() + require.NoError(t, inspector.Inspect(from, rpc)) + + require.Eventually(t, func() bool { + return logCounter.Load() == int64(len(expectedLogStrs)) + }, time.Second, 500*time.Millisecond) + + cancel() + unittest.RequireCloseBefore(t, inspector.Done(), 5*time.Second, "inspector did not stop") + }) +} + +// invalidTopics returns 3 invalid topics. +// - unknown topic +// - malformed topic +// - topic with invalid spork ID +func invalidTopics(t *testing.T, sporkID flow.Identifier) (string, string, string) { + // create unknown topic + unknownTopic := channels.Topic(fmt.Sprintf("%s/%s", unittest.IdentifierFixture(), sporkID)).String() + // create malformed topic + malformedTopic, err := randutils.GenerateRandomString(100) + require.NoError(t, err) + // a topics spork ID is considered invalid if it does not match the current spork ID + invalidSporkIDTopic := channels.Topic(fmt.Sprintf("%s/%s", channels.PushBlocks, unittest.IdentifierFixture())).String() + return unknownTopic, malformedTopic, invalidSporkIDTopic +} + +// checkNotificationFunc returns util func used to ensure invalid control message notification disseminated contains expected information. +func checkNotificationFunc(t *testing.T, + expectedPeerID peer.ID, + expectedMsgType p2pmsg.ControlMessageType, + isExpectedErr func(err error) bool, + topicType p2p.CtrlMsgTopicType) func(args mock.Arguments) { + return func(args mock.Arguments) { + notification, ok := args[0].(*p2p.InvCtrlMsgNotif) + require.True(t, ok) + require.Equal(t, topicType, notification.TopicType) + require.Equal(t, expectedPeerID, notification.PeerID) + require.Equal(t, expectedMsgType, notification.MsgType) + require.True(t, isExpectedErr(notification.Error)) + } +} + +func inspectorFixture(t *testing.T, opts ...func(params *validation.InspectorParams)) (*validation.ControlMsgValidationInspector, + *irrecoverable.MockSignalerContext, + context.CancelFunc, *mockp2p.GossipSubInvCtrlMsgNotifConsumer, + *mockp2p.RpcControlTracking, + flow.Identifier, + *mockmodule.IdentityProvider, + *p2ptest.UpdatableTopicProviderFixture) { + + sporkID := unittest.IdentifierFixture() + flowConfig, err := config.DefaultConfig() + require.NoError(t, err) + + consumer := mockp2p.NewGossipSubInvCtrlMsgNotifConsumer(t) + idProvider := mockmodule.NewIdentityProvider(t) + rpcTracker := mockp2p.NewRpcControlTracking(t) + topicProviderOracle := p2ptest.NewUpdatableTopicProviderFixture() + params := &validation.InspectorParams{ + Logger: unittest.Logger(), + SporkID: sporkID, + Config: &flowConfig.NetworkConfig.GossipSub.RpcInspector.Validation, + IdProvider: idProvider, + HeroCacheMetricsFactory: metrics.NewNoopHeroCacheMetricsFactory(), + InspectorMetrics: metrics.NewNoopCollector(), + RpcTracker: rpcTracker, + InvalidControlMessageNotificationConsumer: consumer, + NetworkingType: network.PrivateNetwork, + TopicOracle: func() p2p.TopicProvider { + return topicProviderOracle + }, + } + for _, opt := range opts { + opt(params) + } + validationInspector, err := validation.NewControlMsgValidationInspector(params) + require.NoError(t, err, "failed to create control message validation inspector fixture") + ctx, cancel := context.WithCancel(context.Background()) + signalerCtx := irrecoverable.NewMockSignalerContext(t, ctx) + return validationInspector, signalerCtx, cancel, consumer, rpcTracker, sporkID, idProvider, topicProviderOracle +} + +// utility function to track the number of expected logs for the expected log level. +func hookedLogger(counter *atomic.Int64, expectedLogLevel zerolog.Level, expectedLogs ...string) zerolog.Logger { + hook := zerolog.HookFunc(func(e *zerolog.Event, level zerolog.Level, message string) { + if level == expectedLogLevel { + for _, s := range expectedLogs { + if message == s { + counter.Inc() + } + } + } + }) + return zerolog.New(io.Discard).Level(expectedLogLevel).Hook(hook) +} + +// ensureMessageIdsLen ensures RPC IHave and IWant message ids are the expected len. +func ensureMessageIdsLen(t *testing.T, msgType p2pmsg.ControlMessageType, rpc *pubsub.RPC, expectedLen int) { + switch msgType { + case p2pmsg.CtrlMsgIHave: + for _, ihave := range rpc.GetControl().GetIhave() { + require.Len(t, ihave.GetMessageIDs(), expectedLen) + } + case p2pmsg.CtrlMsgIWant: + for _, iwant := range rpc.GetControl().GetIwant() { + require.Len(t, iwant.GetMessageIDs(), expectedLen) + } + default: + require.Fail(t, "control message type provided does not contain message ids expected ihave or iwant") + } +} diff --git a/network/p2p/inspector/validation/errors.go b/network/p2p/inspector/validation/errors.go index ab1cb4be11e..b343e556b23 100644 --- a/network/p2p/inspector/validation/errors.go +++ b/network/p2p/inspector/validation/errors.go @@ -4,117 +4,237 @@ import ( "errors" "fmt" + "github.com/libp2p/go-libp2p/core/peer" + "github.com/onflow/flow-go/network/channels" - "github.com/onflow/flow-go/network/p2p" + p2pmsg "github.com/onflow/flow-go/network/p2p/message" ) -// ErrDiscardThreshold indicates that the amount of RPC messages received exceeds discard threshold. -type ErrDiscardThreshold struct { - // controlMsg the control message type. - controlMsg p2p.ControlMessageType - // amount the amount of control messages. - amount uint64 - // discardThreshold configured discard threshold. - discardThreshold uint64 +// IWantDuplicateMsgIDThresholdErr indicates that the amount of duplicate message ids exceeds the allowed threshold. +type IWantDuplicateMsgIDThresholdErr struct { + duplicates int + sampleSize uint + threshold int } -func (e ErrDiscardThreshold) Error() string { - return fmt.Sprintf("number of %s messges received exceeds the configured discard threshold: received %d discard threshold %d", e.controlMsg, e.amount, e.discardThreshold) +func (e IWantDuplicateMsgIDThresholdErr) Error() string { + return fmt.Sprintf("%d/%d iWant duplicate message ids exceeds the allowed threshold: %d", e.duplicates, e.sampleSize, e.threshold) } -// NewDiscardThresholdErr returns a new ErrDiscardThreshold. -func NewDiscardThresholdErr(controlMsg p2p.ControlMessageType, amount, discardThreshold uint64) ErrDiscardThreshold { - return ErrDiscardThreshold{controlMsg: controlMsg, amount: amount, discardThreshold: discardThreshold} +// NewIWantDuplicateMsgIDThresholdErr returns a new IWantDuplicateMsgIDThresholdErr. +func NewIWantDuplicateMsgIDThresholdErr(duplicates int, sampleSize uint, threshold int) IWantDuplicateMsgIDThresholdErr { + return IWantDuplicateMsgIDThresholdErr{duplicates, sampleSize, threshold} } -// IsErrDiscardThreshold returns true if an error is ErrDiscardThreshold -func IsErrDiscardThreshold(err error) bool { - var e ErrDiscardThreshold +// IsIWantDuplicateMsgIDThresholdErr returns true if an error is IWantDuplicateMsgIDThresholdErr +func IsIWantDuplicateMsgIDThresholdErr(err error) bool { + var e IWantDuplicateMsgIDThresholdErr return errors.As(err, &e) } -// ErrInvalidLimitConfig indicates the validation limit is < 0. -type ErrInvalidLimitConfig struct { - // controlMsg the control message type. - controlMsg p2p.ControlMessageType - // limit the value of the configuration limit. - limit uint64 - // limitStr the string representation of the config limit. - limitStr string +// IWantCacheMissThresholdErr indicates that the amount of cache misses exceeds the allowed threshold. +type IWantCacheMissThresholdErr struct { + cacheMissCount int // total iwant cache misses + sampleSize uint + threshold int } -func (e ErrInvalidLimitConfig) Error() string { - return fmt.Sprintf("invalid rpc control message %s validation limit %s configuration value must be greater than 0:%d", e.controlMsg, e.limitStr, e.limit) +func (e IWantCacheMissThresholdErr) Error() string { + return fmt.Sprintf("%d/%d iWant cache misses exceeds the allowed threshold: %d", e.cacheMissCount, e.sampleSize, e.threshold) } -// NewInvalidLimitConfigErr returns a new ErrValidationLimit. -func NewInvalidLimitConfigErr(controlMsg p2p.ControlMessageType, limitStr string, limit uint64) ErrInvalidLimitConfig { - return ErrInvalidLimitConfig{controlMsg: controlMsg, limit: limit, limitStr: limitStr} +// NewIWantCacheMissThresholdErr returns a new IWantCacheMissThresholdErr. +func NewIWantCacheMissThresholdErr(cacheMissCount int, sampleSize uint, threshold int) IWantCacheMissThresholdErr { + return IWantCacheMissThresholdErr{cacheMissCount, sampleSize, threshold} } -// IsErrInvalidLimitConfig returns whether an error is ErrInvalidLimitConfig -func IsErrInvalidLimitConfig(err error) bool { - var e ErrInvalidLimitConfig +// IsIWantCacheMissThresholdErr returns true if an error is IWantCacheMissThresholdErr +func IsIWantCacheMissThresholdErr(err error) bool { + var e IWantCacheMissThresholdErr return errors.As(err, &e) } -// ErrRateLimitedControlMsg indicates the specified RPC control message is rate limited for the specified peer. -type ErrRateLimitedControlMsg struct { - controlMsg p2p.ControlMessageType +// DuplicateTopicErr error that indicates a duplicate has been detected. This can be duplicate topic or message ID tracking. +type DuplicateTopicErr struct { + topic string // the topic that is duplicated + count int // the number of times the topic has been duplicated + msgType p2pmsg.ControlMessageType // the control message type that the topic was found in } -func (e ErrRateLimitedControlMsg) Error() string { - return fmt.Sprintf("control message %s is rate limited for peer", e.controlMsg) +func (e DuplicateTopicErr) Error() string { + return fmt.Sprintf("duplicate topic found in %s control message type: %s", e.msgType, e.topic) } -// NewRateLimitedControlMsgErr returns a new ErrValidationLimit. -func NewRateLimitedControlMsgErr(controlMsg p2p.ControlMessageType) ErrRateLimitedControlMsg { - return ErrRateLimitedControlMsg{controlMsg: controlMsg} +// NewDuplicateTopicErr returns a new DuplicateTopicErr. +// Args: +// +// topic: the topic that is duplicated +// count: the number of times the topic has been duplicated +// msgType: the control message type that the topic was found in +// +// Returns: +// +// A new DuplicateTopicErr. +func NewDuplicateTopicErr(topic string, count int, msgType p2pmsg.ControlMessageType) DuplicateTopicErr { + + return DuplicateTopicErr{topic, count, msgType} } -// IsErrRateLimitedControlMsg returns whether an error is ErrRateLimitedControlMsg -func IsErrRateLimitedControlMsg(err error) bool { - var e ErrRateLimitedControlMsg +// IsDuplicateTopicErr returns true if an error is DuplicateTopicErr. +func IsDuplicateTopicErr(err error) bool { + var e DuplicateTopicErr return errors.As(err, &e) } -// ErrInvalidTopic error wrapper that indicates an error when checking if a Topic is a valid Flow Topic. -type ErrInvalidTopic struct { - topic channels.Topic - err error +// DuplicateMessageIDErr error that indicates a duplicate message ID has been detected in a IHAVE or IWANT control message. +type DuplicateMessageIDErr struct { + id string // id of the message that is duplicated + count int // the number of times the message ID has been duplicated + msgType p2pmsg.ControlMessageType // the control message type that the message ID was found in } -func (e ErrInvalidTopic) Error() string { - return fmt.Errorf("invalid topic %s: %w", e.topic, e.err).Error() +func (e DuplicateMessageIDErr) Error() string { + return fmt.Sprintf("duplicate message ID foud in %s control message type: %s", e.msgType, e.id) } -// NewInvalidTopicErr returns a new ErrMalformedTopic -func NewInvalidTopicErr(topic channels.Topic, err error) ErrInvalidTopic { - return ErrInvalidTopic{topic: topic, err: err} +// NewDuplicateMessageIDErr returns a new DuplicateMessageIDErr. +// Args: +// +// id: id of the message that is duplicated +// count: the number of times the message ID has been duplicated +// msgType: the control message type that the message ID was found in. +func NewDuplicateMessageIDErr(id string, count int, msgType p2pmsg.ControlMessageType) DuplicateMessageIDErr { + return DuplicateMessageIDErr{id, count, msgType} } -// IsErrInvalidTopic returns true if an error is ErrInvalidTopic -func IsErrInvalidTopic(err error) bool { - var e ErrInvalidTopic +// IsDuplicateMessageIDErr returns true if an error is DuplicateMessageIDErr. +func IsDuplicateMessageIDErr(err error) bool { + var e DuplicateMessageIDErr return errors.As(err, &e) } -// ErrDuplicateTopic error that indicates a duplicate topic in control message has been detected. -type ErrDuplicateTopic struct { +// ErrActiveClusterIdsNotSet error that indicates a cluster prefixed control message has been received but the cluster IDs have not been set yet. +type ErrActiveClusterIdsNotSet struct { topic channels.Topic } -func (e ErrDuplicateTopic) Error() string { - return fmt.Errorf("duplicate topic %s", e.topic).Error() +func (e ErrActiveClusterIdsNotSet) Error() string { + return fmt.Errorf("failed to validate cluster prefixed topic %s no active cluster IDs set", e.topic).Error() +} + +// NewActiveClusterIdsNotSetErr returns a new ErrActiveClusterIdsNotSet. +func NewActiveClusterIdsNotSetErr(topic channels.Topic) ErrActiveClusterIdsNotSet { + return ErrActiveClusterIdsNotSet{topic: topic} +} + +// IsErrActiveClusterIDsNotSet returns true if an error is ErrActiveClusterIdsNotSet. +func IsErrActiveClusterIDsNotSet(err error) bool { + var e ErrActiveClusterIdsNotSet + return errors.As(err, &e) +} + +// ErrUnstakedPeer error that indicates a cluster prefixed control message has been from an unstaked peer. +type ErrUnstakedPeer struct { + pid peer.ID +} + +func (e ErrUnstakedPeer) Error() string { + return fmt.Sprintf("unstaked peer: %s", e.pid) +} + +// NewUnstakedPeerErr returns a new ErrUnstakedPeer. +func NewUnstakedPeerErr(pid peer.ID) ErrUnstakedPeer { + return ErrUnstakedPeer{pid: pid} +} + +// IsErrUnstakedPeer returns true if an error is ErrUnstakedPeer. +func IsErrUnstakedPeer(err error) bool { + var e ErrUnstakedPeer + return errors.As(err, &e) +} + +// ErrEjectedPeer error that indicates a cluster prefixed control message has been received from an ejected peer. +type ErrEjectedPeer struct { + pid peer.ID +} + +func (e ErrEjectedPeer) Error() string { + return fmt.Sprintf("ejected peer: %s", e.pid) +} + +// NewEjectedPeerErr returns a new ErrEjectedPeer. +func NewEjectedPeerErr(pid peer.ID) ErrEjectedPeer { + return ErrEjectedPeer{pid: pid} +} + +// IsErrEjectedPeer returns true if an error is ErrEjectedPeer. +func IsErrEjectedPeer(err error) bool { + var e ErrEjectedPeer + return errors.As(err, &e) +} + +// InvalidRpcPublishMessagesErr error indicates that rpc publish message validation failed. +type InvalidRpcPublishMessagesErr struct { + // err the original error returned by the calling func. + err error + // count the number of times this err was encountered. + count int +} + +func (e InvalidRpcPublishMessagesErr) Error() string { + return fmt.Errorf("rpc publish messages validation failed %d error(s) encountered: %w", e.count, e.err).Error() +} + +// NewInvalidRpcPublishMessagesErr returns a new InvalidRpcPublishMessagesErr. +func NewInvalidRpcPublishMessagesErr(err error, count int) InvalidRpcPublishMessagesErr { + return InvalidRpcPublishMessagesErr{err: err, count: count} +} + +// IsInvalidRpcPublishMessagesErr returns true if an error is InvalidRpcPublishMessagesErr. +func IsInvalidRpcPublishMessagesErr(err error) bool { + var e InvalidRpcPublishMessagesErr + return errors.As(err, &e) +} + +// DuplicateTopicIDThresholdExceeded indicates that the number of duplicate topic IDs exceeds the allowed threshold. +type DuplicateTopicIDThresholdExceeded struct { + duplicates int + sampleSize int + threshold int +} + +func (e DuplicateTopicIDThresholdExceeded) Error() string { + return fmt.Sprintf("%d/%d duplicate topic IDs exceed the allowed threshold: %d", e.duplicates, e.sampleSize, e.threshold) +} + +// NewDuplicateTopicIDThresholdExceeded returns a new DuplicateTopicIDThresholdExceeded error. +func NewDuplicateTopicIDThresholdExceeded(duplicates int, sampleSize int, threshold int) DuplicateTopicIDThresholdExceeded { + return DuplicateTopicIDThresholdExceeded{duplicates, sampleSize, threshold} +} + +// IsDuplicateTopicIDThresholdExceeded returns true if an error is DuplicateTopicIDThresholdExceeded +func IsDuplicateTopicIDThresholdExceeded(err error) bool { + var e DuplicateTopicIDThresholdExceeded + return errors.As(err, &e) +} + +// InvalidTopicIDThresholdExceeded indicates that the number of invalid topic IDs exceeds the allowed threshold. +type InvalidTopicIDThresholdExceeded struct { + invalidCount int + threshold int +} + +func (e InvalidTopicIDThresholdExceeded) Error() string { + return fmt.Sprintf("%d invalid topic IDs exceed the allowed threshold: %d", e.invalidCount, e.threshold) } -// NewIDuplicateTopicErr returns a new ErrDuplicateTopic -func NewIDuplicateTopicErr(topic channels.Topic) ErrDuplicateTopic { - return ErrDuplicateTopic{topic: topic} +// NewInvalidTopicIDThresholdExceeded returns a new InvalidTopicIDThresholdExceeded error. +func NewInvalidTopicIDThresholdExceeded(invalidCount, threshold int) InvalidTopicIDThresholdExceeded { + return InvalidTopicIDThresholdExceeded{invalidCount, threshold} } -// IsErrDuplicateTopic returns true if an error is ErrDuplicateTopic -func IsErrDuplicateTopic(err error) bool { - var e ErrDuplicateTopic +// IsInvalidTopicIDThresholdExceeded returns true if an error is InvalidTopicIDThresholdExceeded. +func IsInvalidTopicIDThresholdExceeded(err error) bool { + var e InvalidTopicIDThresholdExceeded return errors.As(err, &e) } diff --git a/network/p2p/inspector/validation/errors_test.go b/network/p2p/inspector/validation/errors_test.go new file mode 100644 index 00000000000..cc56ca52fde --- /dev/null +++ b/network/p2p/inspector/validation/errors_test.go @@ -0,0 +1,131 @@ +package validation + +import ( + "fmt" + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/onflow/flow-go/network/channels" + p2pmsg "github.com/onflow/flow-go/network/p2p/message" +) + +// TestErrActiveClusterIDsNotSetRoundTrip ensures correct error formatting for ErrActiveClusterIdsNotSet. +func TestErrActiveClusterIDsNotSetRoundTrip(t *testing.T) { + topic := channels.Topic("test-topic") + err := NewActiveClusterIdsNotSetErr(topic) + + // tests the error message formatting. + expectedErrMsg := fmt.Errorf("failed to validate cluster prefixed topic %s no active cluster IDs set", topic).Error() + assert.Equal(t, expectedErrMsg, err.Error(), "the error message should be correctly formatted") + + // tests the IsErrActiveClusterIDsNotSet function. + assert.True(t, IsErrActiveClusterIDsNotSet(err), "IsErrActiveClusterIDsNotSet should return true for ErrActiveClusterIdsNotSet error") + + // test IsErrActiveClusterIDsNotSet with a different error type. + dummyErr := fmt.Errorf("dummy error") + assert.False(t, IsErrActiveClusterIDsNotSet(dummyErr), "IsErrActiveClusterIDsNotSet should return false for non-ErrActiveClusterIdsNotSet error") +} + +// TestErrDuplicateTopicRoundTrip ensures correct error formatting for DuplicateTopicErr. +func TestDuplicateTopicErrRoundTrip(t *testing.T) { + expectedErrorMsg := fmt.Sprintf("duplicate topic found in %s control message type: %s", p2pmsg.CtrlMsgGraft, channels.TestNetworkChannel) + err := NewDuplicateTopicErr(channels.TestNetworkChannel.String(), 1, p2pmsg.CtrlMsgGraft) + assert.Equal(t, expectedErrorMsg, err.Error(), "the error message should be correctly formatted") + // tests the IsDuplicateTopicErr function. + assert.True(t, IsDuplicateTopicErr(err), "IsDuplicateTopicErr should return true for DuplicateTopicErr error") + // test IsDuplicateTopicErr with a different error type. + dummyErr := fmt.Errorf("dummy error") + assert.False(t, IsDuplicateTopicErr(dummyErr), "IsDuplicateTopicErr should return false for non-DuplicateTopicErr error") +} + +// TestErrDuplicateTopicRoundTrip ensures correct error formatting for DuplicateTopicErr. +func TestDuplicateMessageIDErrRoundTrip(t *testing.T) { + msgID := "flow-1804flkjnafo" + expectedErrMsg1 := fmt.Sprintf("duplicate message ID foud in %s control message type: %s", p2pmsg.CtrlMsgIHave, msgID) + expectedErrMsg2 := fmt.Sprintf("duplicate message ID foud in %s control message type: %s", p2pmsg.CtrlMsgIWant, msgID) + err := NewDuplicateMessageIDErr(msgID, 1, p2pmsg.CtrlMsgIHave) + assert.Equal(t, expectedErrMsg1, err.Error(), "the error message should be correctly formatted") + // tests the IsDuplicateTopicErr function. + assert.True(t, IsDuplicateMessageIDErr(err), "IsDuplicateMessageIDErr should return true for DuplicateMessageIDErr error") + err = NewDuplicateMessageIDErr(msgID, 1, p2pmsg.CtrlMsgIWant) + assert.Equal(t, expectedErrMsg2, err.Error(), "the error message should be correctly formatted") + // tests the IsDuplicateTopicErr function. + assert.True(t, IsDuplicateMessageIDErr(err), "IsDuplicateMessageIDErr should return true for DuplicateMessageIDErr error") + // test IsDuplicateMessageIDErr with a different error type. + dummyErr := fmt.Errorf("dummy error") + assert.False(t, IsDuplicateMessageIDErr(dummyErr), "IsDuplicateMessageIDErr should return false for non-DuplicateMessageIDErr error") +} + +// TestIWantCacheMissThresholdErrRoundTrip ensures correct error formatting for IWantCacheMissThresholdErr. +func TestIWantCacheMissThresholdErrRoundTrip(t *testing.T) { + err := NewIWantCacheMissThresholdErr(5, 10, 5) + + // tests the error message formatting. + expectedErrMsg := "5/10 iWant cache misses exceeds the allowed threshold: 5" + assert.Equal(t, expectedErrMsg, err.Error(), "the error message should be correctly formatted") + + // tests the IsIWantCacheMissThresholdErr function. + assert.True(t, IsIWantCacheMissThresholdErr(err), "IsIWantCacheMissThresholdErr should return true for IWantCacheMissThresholdErr error") + + // test IsIWantCacheMissThresholdErr with a different error type. + dummyErr := fmt.Errorf("dummy error") + assert.False(t, IsIWantCacheMissThresholdErr(dummyErr), "IsIWantCacheMissThresholdErr should return false for non-IWantCacheMissThresholdErr error") +} + +// TestIWantDuplicateMsgIDThresholdErrRoundTrip ensures correct error formatting for IWantDuplicateMsgIDThresholdErr. +func TestIWantDuplicateMsgIDThresholdErrRoundTrip(t *testing.T) { + err := NewIWantDuplicateMsgIDThresholdErr(5, 10, 5) + + // tests the error message formatting. + expectedErrMsg := "5/10 iWant duplicate message ids exceeds the allowed threshold: 5" + assert.Equal(t, expectedErrMsg, err.Error(), "the error message should be correctly formatted") + + // tests the IsIWantDuplicateMsgIDThresholdErr function. + assert.True(t, IsIWantDuplicateMsgIDThresholdErr(err), "IsIWantDuplicateMsgIDThresholdErr should return true for IWantDuplicateMsgIDThresholdErr error") + + // test IsIWantDuplicateMsgIDThresholdErr with a different error type. + dummyErr := fmt.Errorf("dummy error") + assert.False(t, IsIWantDuplicateMsgIDThresholdErr(dummyErr), "IsIWantDuplicateMsgIDThresholdErr should return false for non-IWantDuplicateMsgIDThresholdErr error") +} + +// TestInvalidRpcPublishMessagesErrRoundTrip ensures correct error formatting for InvalidRpcPublishMessagesErr. +func TestInvalidRpcPublishMessagesErrRoundTrip(t *testing.T) { + wrappedErr := fmt.Errorf("invalid topic") + err := NewInvalidRpcPublishMessagesErr(wrappedErr, 1) + + // tests the error message formatting. + expectedErrMsg := "rpc publish messages validation failed 1 error(s) encountered: invalid topic" + assert.Equal(t, expectedErrMsg, err.Error(), "the error message should be correctly formatted") + + // tests the IsInvalidRpcPublishMessagesErr function. + assert.True(t, IsInvalidRpcPublishMessagesErr(err), "IsInvalidRpcPublishMessagesErr should return true for InvalidRpcPublishMessagesErr error") + + // test IsInvalidRpcPublishMessagesErr with a different error type. + dummyErr := fmt.Errorf("dummy error") + assert.False(t, IsInvalidRpcPublishMessagesErr(dummyErr), "IsInvalidRpcPublishMessagesErr should return false for non-InvalidRpcPublishMessagesErr error") +} + +// TestErrDuplicateTopicIDThresholdExceededRoundTrip ensures correct error formatting for DuplicateTopicIDThresholdExceeded. +func TestDuplicateTopicIDThresholdExceededRoundTrip(t *testing.T) { + expectedErrorMsg := "3/5 duplicate topic IDs exceed the allowed threshold: 2" + err := NewDuplicateTopicIDThresholdExceeded(3, 5, 2) + assert.Equal(t, expectedErrorMsg, err.Error(), "the error message should be correctly formatted") + // tests the IsDuplicateTopicIDThresholdExceeded function. + assert.True(t, IsDuplicateTopicIDThresholdExceeded(err), "IsDuplicateTopicIDThresholdExceeded should return true for DuplicateTopicIDThresholdExceeded error") + // test IsDuplicateTopicIDThresholdExceeded with a different error type. + dummyErr := fmt.Errorf("dummy error") + assert.False(t, IsDuplicateTopicIDThresholdExceeded(dummyErr), "IsDuplicateTopicIDThresholdExceeded should return false for non-DuplicateTopicIDThresholdExceeded error") +} + +// TestErrInvalidTopicIDThresholdExceededRoundTrip ensures correct error formatting for InvalidTopicIDThresholdExceeded. +func TestInvalidTopicIDThresholdExceededRoundTrip(t *testing.T) { + expectedErrorMsg := "8 invalid topic IDs exceed the allowed threshold: 5" + err := NewInvalidTopicIDThresholdExceeded(8, 5) + assert.Equal(t, expectedErrorMsg, err.Error(), "the error message should be correctly formatted") + // tests the IsInvalidTopicIDThresholdExceeded function. + assert.True(t, IsInvalidTopicIDThresholdExceeded(err), "IsInvalidTopicIDThresholdExceeded should return true for InvalidTopicIDThresholdExceeded error") + // test IsInvalidTopicIDThresholdExceeded with a different error type. + dummyErr := fmt.Errorf("dummy error") + assert.False(t, IsInvalidTopicIDThresholdExceeded(dummyErr), "IsInvalidTopicIDThresholdExceeded should return false for non-InvalidTopicIDThresholdExceeded error") +} diff --git a/network/p2p/inspector/validation/inspect_message_request.go b/network/p2p/inspector/validation/inspect_message_request.go new file mode 100644 index 00000000000..f0367e5d216 --- /dev/null +++ b/network/p2p/inspector/validation/inspect_message_request.go @@ -0,0 +1,28 @@ +package validation + +import ( + "fmt" + + pubsub "github.com/libp2p/go-libp2p-pubsub" + "github.com/libp2p/go-libp2p/core/peer" + + "github.com/onflow/flow-go/network/p2p/inspector/internal" +) + +// InspectRPCRequest represents a short digest of an RPC control message. It is used for further message inspection by component workers. +type InspectRPCRequest struct { + // Nonce adds random value so that when msg req is stored on hero store a unique ID can be created from the struct fields. + Nonce []byte + // Peer sender of the message. + Peer peer.ID + rpc *pubsub.RPC +} + +// NewInspectRPCRequest returns a new *InspectRPCRequest. +func NewInspectRPCRequest(from peer.ID, rpc *pubsub.RPC) (*InspectRPCRequest, error) { + nonce, err := internal.Nonce() + if err != nil { + return nil, fmt.Errorf("failed to get inspect message request nonce: %w", err) + } + return &InspectRPCRequest{Nonce: nonce, Peer: from, rpc: rpc}, nil +} diff --git a/network/p2p/inspector/validation/utils.go b/network/p2p/inspector/validation/utils.go new file mode 100644 index 00000000000..d84bdd0cbf2 --- /dev/null +++ b/network/p2p/inspector/validation/utils.go @@ -0,0 +1,24 @@ +package validation + +// duplicateStrTracker is a map of strings to the number of times they have been tracked. +// It is a non-concurrent map, so it should only be used in a single goroutine. +// It is used to track duplicate strings. +type duplicateStrTracker map[string]int + +// track stores the string and returns the number of times it has been tracked and whether it is a duplicate. +// If the string has not been tracked before, it is stored with a count of 1. +// If the string has been tracked before, the count is incremented. +// Args: +// +// s: the string to track +// +// Returns: +// The number of times this string has been tracked, e.g., 1 if it is the first time, 2 if it is the second time, etc. +func (d duplicateStrTracker) track(s string) int { + if _, ok := d[s]; !ok { + d[s] = 0 + } + d[s]++ + + return d[s] +} diff --git a/network/p2p/inspector/validation/utils_test.go b/network/p2p/inspector/validation/utils_test.go new file mode 100644 index 00000000000..81f0ffca936 --- /dev/null +++ b/network/p2p/inspector/validation/utils_test.go @@ -0,0 +1,22 @@ +package validation + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +// TestDuplicateStringTracker tests the duplicateStrTracker.track function. +func TestDuplicateStringTracker(t *testing.T) { + tracker := make(duplicateStrTracker) + require.Equal(t, 1, tracker.track("test1")) + require.Equal(t, 2, tracker.track("test1")) + + // tracking a new string, 3 times + require.Equal(t, 1, tracker.track("test2")) + require.Equal(t, 2, tracker.track("test2")) + require.Equal(t, 3, tracker.track("test2")) + + // tracking an empty string + require.Equal(t, 1, tracker.track("")) +} diff --git a/network/p2p/keyutils/keyTranslator.go b/network/p2p/keyutils/keyTranslator.go index 3dd3eab8088..2e29faee401 100644 --- a/network/p2p/keyutils/keyTranslator.go +++ b/network/p2p/keyutils/keyTranslator.go @@ -10,9 +10,7 @@ import ( lcrypto "github.com/libp2p/go-libp2p/core/crypto" lcrypto_pb "github.com/libp2p/go-libp2p/core/crypto/pb" "github.com/libp2p/go-libp2p/core/peer" - - "github.com/onflow/flow-go/crypto" - fcrypto "github.com/onflow/flow-go/crypto" + "github.com/onflow/crypto" ) // This module is meant to help libp2p <-> flow public key conversions @@ -46,7 +44,7 @@ func setPubKey(c elliptic.Curve, x *big.Int, y *big.Int) *goecdsa.PublicKey { // These utility functions convert a Flow crypto key to a LibP2P key (Flow --> LibP2P) // PeerIDFromFlowPublicKey converts a Flow public key to a LibP2P peer ID. -func PeerIDFromFlowPublicKey(networkPubKey fcrypto.PublicKey) (pid peer.ID, err error) { +func PeerIDFromFlowPublicKey(networkPubKey crypto.PublicKey) (pid peer.ID, err error) { pk, err := LibP2PPublicKeyFromFlow(networkPubKey) if err != nil { err = fmt.Errorf("failed to convert Flow key to LibP2P key: %w", err) @@ -63,7 +61,7 @@ func PeerIDFromFlowPublicKey(networkPubKey fcrypto.PublicKey) (pid peer.ID, err } // LibP2PPrivKeyFromFlow converts a Flow private key to a LibP2P Private key -func LibP2PPrivKeyFromFlow(fpk fcrypto.PrivateKey) (lcrypto.PrivKey, error) { +func LibP2PPrivKeyFromFlow(fpk crypto.PrivateKey) (lcrypto.PrivKey, error) { // get the signature algorithm keyType, err := keyType(fpk.Algorithm()) if err != nil { @@ -94,7 +92,7 @@ func LibP2PPrivKeyFromFlow(fpk fcrypto.PrivateKey) (lcrypto.PrivKey, error) { } // LibP2PPublicKeyFromFlow converts a Flow public key to a LibP2P public key -func LibP2PPublicKeyFromFlow(fpk fcrypto.PublicKey) (lcrypto.PubKey, error) { +func LibP2PPublicKeyFromFlow(fpk crypto.PublicKey) (lcrypto.PubKey, error) { keyType, err := keyType(fpk.Algorithm()) if err != nil { return nil, err @@ -130,7 +128,7 @@ func LibP2PPublicKeyFromFlow(fpk fcrypto.PublicKey) (lcrypto.PubKey, error) { // This converts some libp2p PubKeys to a flow PublicKey // - the supported key types are ECDSA P-256 and ECDSA Secp256k1 public keys, // - libp2p also supports RSA and Ed25519 keys, which Flow doesn't, their conversion will return an error. -func FlowPublicKeyFromLibP2P(lpk lcrypto.PubKey) (fcrypto.PublicKey, error) { +func FlowPublicKeyFromLibP2P(lpk lcrypto.PubKey) (crypto.PublicKey, error) { switch ktype := lpk.Type(); ktype { case lcrypto_pb.KeyType_ECDSA: @@ -178,11 +176,11 @@ func FlowPublicKeyFromLibP2P(lpk lcrypto.PubKey) (fcrypto.PublicKey, error) { } // keyType translates Flow signing algorithm constants to the corresponding LibP2P constants -func keyType(sa fcrypto.SigningAlgorithm) (lcrypto_pb.KeyType, error) { +func keyType(sa crypto.SigningAlgorithm) (lcrypto_pb.KeyType, error) { switch sa { - case fcrypto.ECDSAP256: + case crypto.ECDSAP256: return lcrypto_pb.KeyType_ECDSA, nil - case fcrypto.ECDSASecp256k1: + case crypto.ECDSASecp256k1: return lcrypto_pb.KeyType_Secp256k1, nil default: return -1, lcrypto.ErrBadKeyType diff --git a/network/p2p/keyutils/keyTranslator_test.go b/network/p2p/keyutils/keyTranslator_test.go index e8cc10599a5..4f630b1ffb4 100644 --- a/network/p2p/keyutils/keyTranslator_test.go +++ b/network/p2p/keyutils/keyTranslator_test.go @@ -12,7 +12,7 @@ import ( "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" - fcrypto "github.com/onflow/flow-go/crypto" + fcrypto "github.com/onflow/crypto" ) // KeyTranslatorTestSuite tests key conversion from Flow keys to LibP2P keys diff --git a/network/p2p/libp2pNode.go b/network/p2p/libp2pNode.go index 1a7a87bd03d..e38342aacb7 100644 --- a/network/p2p/libp2pNode.go +++ b/network/p2p/libp2pNode.go @@ -10,53 +10,57 @@ import ( "github.com/libp2p/go-libp2p/core/protocol" "github.com/libp2p/go-libp2p/core/routing" + "github.com/onflow/flow-go/engine/collection" "github.com/onflow/flow-go/module" "github.com/onflow/flow-go/module/component" "github.com/onflow/flow-go/module/irrecoverable" + "github.com/onflow/flow-go/network" + flownet "github.com/onflow/flow-go/network" "github.com/onflow/flow-go/network/channels" "github.com/onflow/flow-go/network/p2p/unicast/protocols" ) -// LibP2PNode represents a flow libp2p node. It provides the network layer with the necessary interface to -// control the underlying libp2p node. It is essentially the flow wrapper around the libp2p node, and allows -// us to define different types of libp2p nodes that can operate in different ways by overriding these methods. -// TODO: this interface is highly coupled with the current implementation of the libp2p node. We should -// -// consider refactoring it to be more generic and less coupled with the current implementation. -// https://github.com/dapperlabs/flow-go/issues/6575 -type LibP2PNode interface { - module.ReadyDoneAware - Subscriptions - // PeerConnections connection status information per peer. - PeerConnections - // PeerScore exposes the peer score API. - PeerScore +// CoreP2P service management capabilities +type CoreP2P interface { // Start the libp2p node. Start(ctx irrecoverable.SignalerContext) // Stop terminates the libp2p node. Stop() error - // AddPeer adds a peer to this node by adding it to this node's peerstore and connecting to it. - AddPeer(ctx context.Context, peerInfo peer.AddrInfo) error + // GetIPPort returns the IP and Port the libp2p node is listening on. + GetIPPort() (string, string, error) + // Host returns pointer to host object of node. + Host() host.Host + // SetComponentManager sets the component manager for the node. + // SetComponentManager may be called at most once. + SetComponentManager(cm *component.ComponentManager) +} + +// PeerManagement set of node traits related to its lifecycle and metadata retrieval +type PeerManagement interface { + // ConnectToPeer connects to the peer with the given peer address information. + // This method is used to connect to a peer that is not in the peer store. + ConnectToPeer(ctx context.Context, peerInfo peer.AddrInfo) error // RemovePeer closes the connection with the peer. RemovePeer(peerID peer.ID) error + // ListPeers returns list of peer IDs for peers subscribed to the topic. + ListPeers(topic string) []peer.ID // GetPeersForProtocol returns slice peer IDs for the specified protocol ID. GetPeersForProtocol(pid protocol.ID) peer.IDSlice - // CreateStream returns an existing stream connected to the peer if it exists, or creates a new stream with it. - CreateStream(ctx context.Context, peerID peer.ID) (libp2pnet.Stream, error) // GetIPPort returns the IP and Port the libp2p node is listening on. GetIPPort() (string, string, error) // RoutingTable returns the node routing table RoutingTable() *kbucket.RoutingTable - // ListPeers returns list of peer IDs for peers subscribed to the topic. - ListPeers(topic string) []peer.ID // Subscribe subscribes the node to the given topic and returns the subscription Subscribe(topic channels.Topic, topicValidator TopicValidatorFunc) (Subscription, error) - // UnSubscribe cancels the subscriber and closes the topic. - UnSubscribe(topic channels.Topic) error + // Unsubscribe cancels the subscriber and closes the topic corresponding to the given channel. + Unsubscribe(topic channels.Topic) error // Publish publishes the given payload on the topic. - Publish(ctx context.Context, topic channels.Topic, data []byte) error + Publish(ctx context.Context, messageScope network.OutgoingMessageScope) error // Host returns pointer to host object of node. Host() host.Host + // ID returns the peer.ID of the node, which is the unique identifier of the node at the libp2p level. + // For other libp2p nodes, the current node is identified by this ID. + ID() peer.ID // WithDefaultUnicastProtocol overrides the default handler of the unicast manager and registers all preferred protocols. WithDefaultUnicastProtocol(defaultHandler libp2pnet.StreamHandler, preferred []protocols.ProtocolName) error // WithPeersProvider sets the PeersProvider for the peer manager. @@ -66,17 +70,102 @@ type LibP2PNode interface { PeerManagerComponent() component.Component // RequestPeerUpdate requests an update to the peer connections of this node using the peer manager. RequestPeerUpdate() +} + +// Routable set of node routing capabilities +type Routable interface { + // RoutingTable returns the node routing table + RoutingTable() *kbucket.RoutingTable // SetRouting sets the node's routing implementation. // SetRouting may be called at most once. - SetRouting(r routing.Routing) + // Returns: + // - error: An error, if any occurred during the process; any returned error is irrecoverable. + SetRouting(r routing.Routing) error // Routing returns node routing object. Routing() routing.Routing +} + +// UnicastManagement abstracts the unicast management capabilities of the node. +type UnicastManagement interface { + // OpenAndWriteOnStream opens a new stream to a peer with a protection tag. The protection tag can be used to ensure + // that the connection to the peer is maintained for a particular purpose. The stream is opened to the given peerID + // and writingLogic is executed on the stream. The created stream does not need to be reused and can be inexpensively + // created for each send. Moreover, the stream creation does not incur a round-trip time as the stream negotiation happens + // on an existing connection. + // + // Args: + // - ctx: The context used to control the stream's lifecycle. + // - peerID: The ID of the peer to open the stream to. + // - protectionTag: A tag that protects the connection and ensures that the connection manager keeps it alive, and + // won't prune the connection while the tag is active. + // - writingLogic: A callback function that contains the logic for writing to the stream. It allows an external caller to + // write to the stream without having to worry about the stream creation and management. + // + // Returns: + // error: An error, if any occurred during the process. This includes failure in creating the stream, setting the write + // deadline, executing the writing logic, resetting the stream if the writing logic fails, or closing the stream. + // All returned errors during this process can be considered benign. + OpenAndWriteOnStream(ctx context.Context, peerID peer.ID, protectionTag string, writingLogic func(stream libp2pnet.Stream) error) error + // WithDefaultUnicastProtocol overrides the default handler of the unicast manager and registers all preferred protocols. + WithDefaultUnicastProtocol(defaultHandler libp2pnet.StreamHandler, preferred []protocols.ProtocolName) error +} + +// PubSub publish subscribe features for node +type PubSub interface { + // Subscribe subscribes the node to the given topic and returns the subscription + Subscribe(topic channels.Topic, topicValidator TopicValidatorFunc) (Subscription, error) + // Unsubscribe cancels the subscriber and closes the topic. + Unsubscribe(topic channels.Topic) error + // Publish publishes the given payload on the topic. + Publish(ctx context.Context, messageScope flownet.OutgoingMessageScope) error // SetPubSub sets the node's pubsub implementation. // SetPubSub may be called at most once. SetPubSub(ps PubSubAdapter) - // SetComponentManager sets the component manager for the node. - // SetComponentManager may be called at most once. - SetComponentManager(cm *component.ComponentManager) + + // GetLocalMeshPeers returns the list of peers in the local mesh for the given topic. + // Args: + // - topic: the topic. + // Returns: + // - []peer.ID: the list of peers in the local mesh for the given topic. + GetLocalMeshPeers(topic channels.Topic) []peer.ID +} + +// LibP2PNode represents a Flow libp2p node. It provides the network layer with the necessary interface to +// control the underlying libp2p node. It is essentially the Flow wrapper around the libp2p node, and allows +// us to define different types of libp2p nodes that can operate in different ways by overriding these methods. +type LibP2PNode interface { + module.ReadyDoneAware + Subscriptions + // PeerConnections connection status information per peer. + PeerConnections + // PeerScore exposes the peer score API. + PeerScore + // DisallowListNotificationConsumer exposes the disallow list notification consumer API for the node so that + // it will be notified when a new disallow list update is distributed. + DisallowListNotificationConsumer + // CollectionClusterChangesConsumer is the interface for consuming the events of changes in the collection cluster. + // This is used to notify the node of changes in the collection cluster. + // LibP2PNode implements this interface and consumes the events to be notified of changes in the clustering channels. + // The clustering channels are used by the collection nodes of a cluster to communicate with each other. + // As the cluster (and hence their cluster channels) of collection nodes changes over time (per epoch) the node needs to be notified of these changes. + CollectionClusterChangesConsumer + // DisallowListOracle exposes the disallow list oracle API for external consumers to query about the disallow list. + DisallowListOracle + + // CoreP2P service management capabilities + CoreP2P + + // PeerManagement current peer management functions + PeerManagement + + // Routable routing related features + Routable + + // PubSub publish subscribe features for node + PubSub + + // UnicastManagement node stream management + UnicastManagement } // Subscriptions set of funcs related to current subscription info of a node. @@ -87,16 +176,21 @@ type Subscriptions interface { SetUnicastManager(uniMgr UnicastManager) } +// CollectionClusterChangesConsumer is the interface for consuming the events of changes in the collection cluster. +// This is used to notify the node of changes in the collection cluster. +// LibP2PNode implements this interface and consumes the events to be notified of changes in the clustering channels. +// The clustering channels are used by the collection nodes of a cluster to communicate with each other. +// As the cluster (and hence their cluster channels) of collection nodes changes over time (per epoch) the node needs to be notified of these changes. +type CollectionClusterChangesConsumer interface { + collection.ClusterEvents +} + // PeerScore is the interface for the peer score module. It is used to expose the peer score to other // components of the node. It is also used to set the peer score exposer implementation. type PeerScore interface { - // SetPeerScoreExposer sets the node's peer score exposer implementation. - // SetPeerScoreExposer may be called at most once. It is an irrecoverable error to call this - // method if the node's peer score exposer has already been set. - SetPeerScoreExposer(e PeerScoreExposer) // PeerScoreExposer returns the node's peer score exposer implementation. // If the node's peer score exposer has not been set, the second return value will be false. - PeerScoreExposer() (PeerScoreExposer, bool) + PeerScoreExposer() PeerScoreExposer } // PeerConnections subset of funcs related to underlying libp2p host connections. @@ -109,3 +203,38 @@ type PeerConnections interface { // to the peer is not empty. This indicates a bug within libp2p. IsConnected(peerID peer.ID) (bool, error) } + +// DisallowListNotificationConsumer is an interface for consuming disallow/allow list update notifications. +type DisallowListNotificationConsumer interface { + // OnDisallowListNotification is called when a new disallow list update notification is distributed. + // Any error on consuming event must handle internally. + // The implementation must be concurrency safe. + // Args: + // id: peer ID of the peer being disallow-listed. + // cause: cause of the peer being disallow-listed (only this cause is added to the peer's disallow-listed causes). + // Returns: + // none + OnDisallowListNotification(id peer.ID, cause network.DisallowListedCause) + + // OnAllowListNotification is called when a new allow list update notification is distributed. + // Any error on consuming event must handle internally. + // The implementation must be concurrency safe. + // Args: + // id: peer ID of the peer being allow-listed. + // cause: cause of the peer being allow-listed (only this cause is removed from the peer's disallow-listed causes). + // Returns: + // none + OnAllowListNotification(id peer.ID, cause network.DisallowListedCause) +} + +// DisallowListOracle is an interface for querying disallow-listed peers. +type DisallowListOracle interface { + // IsDisallowListed determines whether the given peer is disallow-listed for any reason. + // Args: + // - peerID: the peer to check. + // Returns: + // - []network.DisallowListedCause: the list of causes for which the given peer is disallow-listed. If the peer is not disallow-listed for any reason, + // a nil slice is returned. + // - bool: true if the peer is disallow-listed for any reason, false otherwise. + IsDisallowListed(peerId peer.ID) ([]network.DisallowListedCause, bool) +} diff --git a/network/p2p/logging/internal/peerIdCache.go b/network/p2p/logging/internal/peerIdCache.go new file mode 100644 index 00000000000..e5bf8c538cc --- /dev/null +++ b/network/p2p/logging/internal/peerIdCache.go @@ -0,0 +1,56 @@ +package internal + +import ( + "fmt" + + lru "github.com/hashicorp/golang-lru/v2" + "github.com/libp2p/go-libp2p/core/peer" +) + +type PeerIdCache struct { + // TODO: Note that we use lru.Cache as there is an inherent import cycle when using the HeroCache. + // Moving forward we should consider moving the HeroCache to a separate repository and transition + // to using it here. + // This PeerIdCache is used extensively across the codebase, so any minor import cycle will cause + // a lot of trouble. + peerCache *lru.Cache[peer.ID, string] +} + +func NewPeerIdCache(size int) (*PeerIdCache, error) { + c, err := lru.New[peer.ID, string](size) + if err != nil { + return nil, fmt.Errorf("failed to create peer id cache: %w", err) + } + return &PeerIdCache{ + peerCache: c, + }, nil +} + +// PeerIdString returns the base58 encoded peer id string, it looks up the peer id in a cache to avoid +// expensive base58 encoding, and caches the result for future use in case of a cache miss. +// It is safe to call this method concurrently. +func (p *PeerIdCache) PeerIdString(pid peer.ID) string { + pidStr, ok := p.peerCache.Get(pid) + if ok { + return pidStr + } + + pidStr0 := pid.String() + p.peerCache.Add(pid, pidStr0) + return pidStr0 +} + +// Size returns the number of entries in the cache; it is mainly used for testing. +func (p *PeerIdCache) Size() int { + return p.peerCache.Len() +} + +// ByPeerId returns the base58 encoded peer id string by directly looking up the peer id in the cache. It is only +// used for testing and since this is an internal package, it is not exposed to the outside world. +func (p *PeerIdCache) ByPeerId(pid peer.ID) (string, bool) { + pidStr, ok := p.peerCache.Get(pid) + if ok { + return pidStr, true + } + return "", false +} diff --git a/network/p2p/logging/internal/peerIdCache_test.go b/network/p2p/logging/internal/peerIdCache_test.go new file mode 100644 index 00000000000..8e03efa7fbb --- /dev/null +++ b/network/p2p/logging/internal/peerIdCache_test.go @@ -0,0 +1,106 @@ +package internal_test + +import ( + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/onflow/flow-go/network/p2p/logging/internal" + "github.com/onflow/flow-go/utils/unittest" +) + +// TestNewPeerIdCache tests the basic functionality of the peer ID cache. It ensures that the cache +// is created successfully. +func TestNewPeerIdCache(t *testing.T) { + cacheSize := 100 + cache, err := internal.NewPeerIdCache(cacheSize) + assert.NoError(t, err) + assert.NotNil(t, cache) +} + +// TestPeerIdCache_PeerIdString tests the basic functionality of the peer ID cache. It ensures that the cache +// returns the same string as the peer.ID.String() method. +func TestPeerIdCache_PeerIdString(t *testing.T) { + cacheSize := 100 + cache, err := internal.NewPeerIdCache(cacheSize) + assert.NoError(t, err) + + t.Run("existing peer ID", func(t *testing.T) { + pid := unittest.PeerIdFixture(t) + pidStr := cache.PeerIdString(pid) + assert.NotEmpty(t, pidStr) + assert.Equal(t, pid.String(), pidStr) + + gotPidStr, ok := cache.ByPeerId(pid) + assert.True(t, ok, "expected pid to be in the cache") + assert.Equal(t, pid.String(), gotPidStr) + }) + + t.Run("non-existing peer ID", func(t *testing.T) { + pid1 := unittest.PeerIdFixture(t) + pid2 := unittest.PeerIdFixture(t) + + cache.PeerIdString(pid1) + pidStr := cache.PeerIdString(pid2) + assert.NotEmpty(t, pidStr) + assert.Equal(t, pid2.String(), pidStr) + + gotPidStr, ok := cache.ByPeerId(pid2) + assert.True(t, ok, "expected pid to be in the cache") + assert.Equal(t, pid2.String(), gotPidStr) + + gotPidStr, ok = cache.ByPeerId(pid1) + assert.True(t, ok, "expected pid to be in the cache") + assert.Equal(t, pid1.String(), gotPidStr) + }) +} + +// TestPeerIdCache_EjectionScenarios tests the eviction logic of the peer ID cache. It ensures that the cache +// evicts the least recently added peer ID when the cache is full. +func TestPeerIdCache_EjectionScenarios(t *testing.T) { + cacheSize := 3 + cache, err := internal.NewPeerIdCache(cacheSize) + assert.NoError(t, err) + assert.Equal(t, 0, cache.Size()) + + // add peer IDs to fill the cache + pid1 := unittest.PeerIdFixture(t) + pid2 := unittest.PeerIdFixture(t) + pid3 := unittest.PeerIdFixture(t) + + cache.PeerIdString(pid1) + assert.Equal(t, 1, cache.Size()) + cache.PeerIdString(pid2) + assert.Equal(t, 2, cache.Size()) + cache.PeerIdString(pid3) + assert.Equal(t, 3, cache.Size()) + + // check that all peer IDs are in the cache + assert.Equal(t, pid1.String(), cache.PeerIdString(pid1)) + assert.Equal(t, pid2.String(), cache.PeerIdString(pid2)) + assert.Equal(t, pid3.String(), cache.PeerIdString(pid3)) + assert.Equal(t, 3, cache.Size()) + + // add a new peer ID + pid4 := unittest.PeerIdFixture(t) + cache.PeerIdString(pid4) + assert.Equal(t, 3, cache.Size()) + + // check that pid1 is now the one that has been evicted + gotId1Str, ok := cache.ByPeerId(pid1) + assert.False(t, ok, "expected pid1 to be evicted") + assert.Equal(t, "", gotId1Str) + + // confirm other peer IDs are still in the cache + gotId2Str, ok := cache.ByPeerId(pid2) + assert.True(t, ok, "expected pid2 to be in the cache") + assert.Equal(t, pid2.String(), gotId2Str) + + gotId3Str, ok := cache.ByPeerId(pid3) + assert.True(t, ok, "expected pid3 to be in the cache") + assert.Equal(t, pid3.String(), gotId3Str) + + gotId4Str, ok := cache.ByPeerId(pid4) + assert.True(t, ok, "expected pid4 to be in the cache") + assert.Equal(t, pid4.String(), gotId4Str) +} diff --git a/network/p2p/logging/logging.go b/network/p2p/logging/logging.go new file mode 100644 index 00000000000..2ab4345aa6b --- /dev/null +++ b/network/p2p/logging/logging.go @@ -0,0 +1,27 @@ +package p2plogging + +import ( + "github.com/libp2p/go-libp2p/core/peer" + + "github.com/onflow/flow-go/network/p2p/logging/internal" +) + +// peerIdCache is a global cache of peer ids, it is used to avoid expensive base58 encoding of peer ids. +var peerIdCache *internal.PeerIdCache + +// init is called before the package is initialized. This is used to initialize +// the peer id cache before any other code is run, so that the cache is ready +// to use. +func init() { + cache, err := internal.NewPeerIdCache(10_000) + if err != nil { + panic(err) + } + peerIdCache = cache +} + +// PeerId is a logger helper that returns the base58 encoded peer id string, it looks up the peer id in a cache to avoid +// expensive base58 encoding, and caches the result for future use in case of a cache miss. +func PeerId(pid peer.ID) string { + return peerIdCache.PeerIdString(pid) +} diff --git a/network/p2p/logging/logging_test.go b/network/p2p/logging/logging_test.go new file mode 100644 index 00000000000..1dc80e3af4d --- /dev/null +++ b/network/p2p/logging/logging_test.go @@ -0,0 +1,52 @@ +package p2plogging_test + +import ( + "testing" + + "github.com/libp2p/go-libp2p/core/peer" + "github.com/stretchr/testify/require" + + p2plogging "github.com/onflow/flow-go/network/p2p/logging" + "github.com/onflow/flow-go/utils/unittest" +) + +// TestPeerIdLogging checks the end-to-end functionality of the PeerId logger helper. +// It ensures that the PeerId logger helper returns the same string as the peer.ID.String() method. +func TestPeerIdLogging(t *testing.T) { + pid := unittest.PeerIdFixture(t) + pidStr := p2plogging.PeerId(pid) + require.Equal(t, pid.String(), pidStr) +} + +// BenchmarkPeerIdString benchmarks the peer.ID.String() method. +func BenchmarkPeerIdString(b *testing.B) { + unittest.SkipBenchmarkUnless(b, unittest.BENCHMARK_EXPERIMENT, "skips peer id string benchmarking, set environment variable to enable") + + count := 100 + pids := make([]peer.ID, 0, count) + for i := 0; i < count; i++ { + pids = append(pids, unittest.PeerIdFixture(b)) + } + + b.ResetTimer() + for i := 0; i < b.N; i++ { + _ = pids[i%count].String() + } +} + +// BenchmarkPeerIdLogging benchmarks the PeerId logger helper, which is expected to be faster than the peer.ID.String() method, +// as it caches the base58 encoded peer ID strings. +func BenchmarkPeerIdLogging(b *testing.B) { + unittest.SkipBenchmarkUnless(b, unittest.BENCHMARK_EXPERIMENT, "skips peer id logging benchmarking, set environment variable to enable") + + count := 100 + pids := make([]peer.ID, 0, count) + for i := 0; i < count; i++ { + pids = append(pids, unittest.PeerIdFixture(b)) + } + + b.ResetTimer() + for i := 0; i < b.N; i++ { + _ = p2plogging.PeerId(pids[i%count]) + } +} diff --git a/network/p2p/message/types.go b/network/p2p/message/types.go new file mode 100644 index 00000000000..baab4384253 --- /dev/null +++ b/network/p2p/message/types.go @@ -0,0 +1,22 @@ +package p2pmsg + +// ControlMessageType is the type of control message, as defined in the libp2p pubsub spec. +type ControlMessageType string + +func (c ControlMessageType) String() string { + return string(c) +} + +const ( + CtrlMsgRPC ControlMessageType = "RPC" + CtrlMsgIHave ControlMessageType = "IHAVE" + CtrlMsgIWant ControlMessageType = "IWANT" + CtrlMsgGraft ControlMessageType = "GRAFT" + CtrlMsgPrune ControlMessageType = "PRUNE" + RpcPublishMessage ControlMessageType = "RpcPublishMessage" +) + +// ControlMessageTypes returns list of all libp2p control message types. +func ControlMessageTypes() []ControlMessageType { + return []ControlMessageType{CtrlMsgIHave, CtrlMsgIWant, CtrlMsgGraft, CtrlMsgPrune} +} diff --git a/network/p2p/middleware/middleware.go b/network/p2p/middleware/middleware.go deleted file mode 100644 index 58e15638943..00000000000 --- a/network/p2p/middleware/middleware.go +++ /dev/null @@ -1,868 +0,0 @@ -// (c) 2019 Dapper Labs - ALL RIGHTS RESERVED - -package middleware - -import ( - "bufio" - "context" - "errors" - "fmt" - "io" - "sync" - "time" - - ggio "github.com/gogo/protobuf/io" - "github.com/ipfs/go-datastore" - libp2pnetwork "github.com/libp2p/go-libp2p/core/network" - "github.com/libp2p/go-libp2p/core/peer" - "github.com/libp2p/go-libp2p/core/peerstore" - "github.com/libp2p/go-libp2p/core/protocol" - "github.com/rs/zerolog" - - "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/module" - "github.com/onflow/flow-go/module/component" - "github.com/onflow/flow-go/module/irrecoverable" - "github.com/onflow/flow-go/network" - "github.com/onflow/flow-go/network/channels" - "github.com/onflow/flow-go/network/codec" - "github.com/onflow/flow-go/network/internal/p2putils" - "github.com/onflow/flow-go/network/message" - "github.com/onflow/flow-go/network/p2p" - "github.com/onflow/flow-go/network/p2p/blob" - "github.com/onflow/flow-go/network/p2p/p2pnode" - "github.com/onflow/flow-go/network/p2p/ping" - "github.com/onflow/flow-go/network/p2p/unicast/protocols" - "github.com/onflow/flow-go/network/p2p/unicast/ratelimit" - "github.com/onflow/flow-go/network/p2p/utils" - "github.com/onflow/flow-go/network/slashing" - "github.com/onflow/flow-go/network/validator" - flowpubsub "github.com/onflow/flow-go/network/validator/pubsub" - _ "github.com/onflow/flow-go/utils/binstat" - "github.com/onflow/flow-go/utils/logging" -) - -const ( - _ = iota - _ = 1 << (10 * iota) - mb - gb -) - -const ( - // DefaultMaxUnicastMsgSize defines maximum message size in unicast mode for most messages - DefaultMaxUnicastMsgSize = 10 * mb // 10 mb - - // LargeMsgMaxUnicastMsgSize defines maximum message size in unicast mode for large messages - LargeMsgMaxUnicastMsgSize = gb // 1 gb - - // DefaultUnicastTimeout is the default maximum time to wait for a default unicast request to complete - // assuming at least a 1mb/sec connection - DefaultUnicastTimeout = 5 * time.Second - - // LargeMsgUnicastTimeout is the maximum time to wait for a unicast request to complete for large message size - LargeMsgUnicastTimeout = 1000 * time.Second -) - -var ( - _ network.Middleware = (*Middleware)(nil) - _ p2p.DisallowListNotificationConsumer = (*Middleware)(nil) - - // ErrUnicastMsgWithoutSub error is provided to the slashing violations consumer in the case where - // the middleware receives a message via unicast but does not have a corresponding subscription for - // the channel in that message. - ErrUnicastMsgWithoutSub = errors.New("middleware does not have subscription for the channel ID indicated in the unicast message received") -) - -// Middleware handles the input & output on the direct connections we have to -// our neighbours on the peer-to-peer network. -type Middleware struct { - sync.Mutex - ctx context.Context - log zerolog.Logger - ov network.Overlay - // TODO: using a waitgroup here doesn't actually guarantee that we'll wait for all - // goroutines to exit, because new goroutines could be started after we've already - // returned from wg.Wait(). We need to solve this the right way using ComponentManager - // and worker routines. - wg sync.WaitGroup - libP2PNode p2p.LibP2PNode - preferredUnicasts []protocols.ProtocolName - me flow.Identifier - bitswapMetrics module.BitswapMetrics - rootBlockID flow.Identifier - validators []network.MessageValidator - peerManagerFilters []p2p.PeerFilter - unicastMessageTimeout time.Duration - idTranslator p2p.IDTranslator - previousProtocolStatePeers []peer.AddrInfo - codec network.Codec - slashingViolationsConsumer slashing.ViolationsConsumer - unicastRateLimiters *ratelimit.RateLimiters - authorizedSenderValidator *validator.AuthorizedSenderValidator - component.Component -} - -type MiddlewareOption func(*Middleware) - -func WithMessageValidators(validators ...network.MessageValidator) MiddlewareOption { - return func(mw *Middleware) { - mw.validators = validators - } -} - -func WithPreferredUnicastProtocols(unicasts []protocols.ProtocolName) MiddlewareOption { - return func(mw *Middleware) { - mw.preferredUnicasts = unicasts - } -} - -// WithPeerManagerFilters sets a list of p2p.PeerFilter funcs that are used to -// filter out peers provided by the peer manager PeersProvider. -func WithPeerManagerFilters(peerManagerFilters []p2p.PeerFilter) MiddlewareOption { - return func(mw *Middleware) { - mw.peerManagerFilters = peerManagerFilters - } -} - -// WithUnicastRateLimiters sets the unicast rate limiters. -func WithUnicastRateLimiters(rateLimiters *ratelimit.RateLimiters) MiddlewareOption { - return func(mw *Middleware) { - mw.unicastRateLimiters = rateLimiters - } -} - -// NewMiddleware creates a new middleware instance -// libP2PNodeFactory is the factory used to create a LibP2PNode -// flowID is this node's Flow ID -// metrics is the interface to report network related metrics -// unicastMessageTimeout is the timeout used for unicast messages -// connectionGating if set to True, restricts this node to only talk to other nodes which are part of the identity list -// validators are the set of the different message validators that each inbound messages is passed through -// During normal operations any error returned by Middleware.start is considered to be catastrophic -// and will be thrown by the irrecoverable.SignalerContext causing the node to crash. -func NewMiddleware( - log zerolog.Logger, - libP2PNode p2p.LibP2PNode, - flowID flow.Identifier, - bitswapMet module.BitswapMetrics, - rootBlockID flow.Identifier, - unicastMessageTimeout time.Duration, - idTranslator p2p.IDTranslator, - codec network.Codec, - slashingViolationsConsumer slashing.ViolationsConsumer, - opts ...MiddlewareOption) *Middleware { - - if unicastMessageTimeout <= 0 { - unicastMessageTimeout = DefaultUnicastTimeout - } - - // create the node entity and inject dependencies & config - mw := &Middleware{ - log: log, - me: flowID, - libP2PNode: libP2PNode, - bitswapMetrics: bitswapMet, - rootBlockID: rootBlockID, - validators: DefaultValidators(log, flowID), - unicastMessageTimeout: unicastMessageTimeout, - idTranslator: idTranslator, - codec: codec, - slashingViolationsConsumer: slashingViolationsConsumer, - unicastRateLimiters: ratelimit.NoopRateLimiters(), - } - - for _, opt := range opts { - opt(mw) - } - - builder := component.NewComponentManagerBuilder() - for _, limiter := range mw.unicastRateLimiters.Limiters() { - rateLimiter := limiter - builder.AddWorker(func(ctx irrecoverable.SignalerContext, ready component.ReadyFunc) { - ready() - rateLimiter.Start(ctx) - <-rateLimiter.Ready() - ready() - <-rateLimiter.Done() - }) - } - builder.AddWorker(func(ctx irrecoverable.SignalerContext, ready component.ReadyFunc) { - // TODO: refactor to avoid storing ctx altogether - mw.ctx = ctx - - if err := mw.start(ctx); err != nil { - ctx.Throw(err) - } - - ready() - - <-ctx.Done() - mw.log.Info().Str("component", "middleware").Msg("stopping subroutines") - - // wait for the readConnection and readSubscription routines to stop - mw.wg.Wait() - - mw.log.Info().Str("component", "middleware").Msg("stopped subroutines") - }) - - mw.Component = builder.Build() - return mw -} - -func DefaultValidators(log zerolog.Logger, flowID flow.Identifier) []network.MessageValidator { - return []network.MessageValidator{ - validator.ValidateNotSender(flowID), // validator to filter out messages sent by this node itself - validator.ValidateTarget(log, flowID), // validator to filter out messages not intended for this node - } -} - -// isProtocolParticipant returns a PeerFilter that returns true if a peer is a staked node. -func (m *Middleware) isProtocolParticipant() p2p.PeerFilter { - return func(p peer.ID) error { - if _, ok := m.ov.Identity(p); !ok { - return fmt.Errorf("failed to get identity of unknown peer with peer id %s", p.String()) - } - return nil - } -} - -func (m *Middleware) NewBlobService(channel channels.Channel, ds datastore.Batching, opts ...network.BlobServiceOption) network.BlobService { - return blob.NewBlobService(m.libP2PNode.Host(), m.libP2PNode.Routing(), channel.String(), ds, m.bitswapMetrics, m.log, opts...) -} - -func (m *Middleware) NewPingService(pingProtocol protocol.ID, provider network.PingInfoProvider) network.PingService { - return ping.NewPingService(m.libP2PNode.Host(), pingProtocol, m.log, provider) -} - -func (m *Middleware) peerIDs(flowIDs flow.IdentifierList) peer.IDSlice { - result := make([]peer.ID, 0, len(flowIDs)) - - for _, fid := range flowIDs { - pid, err := m.idTranslator.GetPeerID(fid) - if err != nil { - // We probably don't need to fail the entire function here, since the other - // translations may still succeed - m.log.Err(err).Str("flowID", fid.String()).Msg("failed to translate to peer ID") - continue - } - - result = append(result, pid) - } - - return result -} - -// Me returns the flow identifier of this middleware -func (m *Middleware) Me() flow.Identifier { - return m.me -} - -// GetIPPort returns the ip address and port number associated with the middleware -// All errors returned from this function can be considered benign. -func (m *Middleware) GetIPPort() (string, string, error) { - ipOrHostname, port, err := m.libP2PNode.GetIPPort() - if err != nil { - return "", "", fmt.Errorf("failed to get ip and port from libP2P node: %w", err) - } - - return ipOrHostname, port, nil -} - -func (m *Middleware) UpdateNodeAddresses() { - m.log.Info().Msg("Updating protocol state node addresses") - - ids := m.ov.Identities() - newInfos, invalid := utils.PeerInfosFromIDs(ids) - - for id, err := range invalid { - m.log.Err(err).Str("node_id", id.String()).Msg("failed to extract peer info from identity") - } - - m.Lock() - defer m.Unlock() - - // set old addresses to expire - for _, oldInfo := range m.previousProtocolStatePeers { - m.libP2PNode.Host().Peerstore().SetAddrs(oldInfo.ID, oldInfo.Addrs, peerstore.TempAddrTTL) - } - - for _, info := range newInfos { - m.libP2PNode.Host().Peerstore().SetAddrs(info.ID, info.Addrs, peerstore.PermanentAddrTTL) - } - - m.previousProtocolStatePeers = newInfos -} - -func (m *Middleware) SetOverlay(ov network.Overlay) { - m.ov = ov -} - -// start will start the middleware. -// No errors are expected during normal operation. -func (m *Middleware) start(ctx context.Context) error { - if m.ov == nil { - return fmt.Errorf("could not start middleware: overlay must be configured by calling SetOverlay before middleware can be started") - } - - m.authorizedSenderValidator = validator.NewAuthorizedSenderValidator(m.log, m.slashingViolationsConsumer, m.ov.Identity) - - err := m.libP2PNode.WithDefaultUnicastProtocol(m.handleIncomingStream, m.preferredUnicasts) - if err != nil { - return fmt.Errorf("could not register preferred unicast protocols on libp2p node: %w", err) - } - - m.UpdateNodeAddresses() - - m.libP2PNode.WithPeersProvider(m.topologyPeers) - - return nil -} - -// topologyPeers callback used by the peer manager to get the list of peer ID's -// which this node should be directly connected to as peers. The peer ID list -// returned will be filtered through any configured m.peerManagerFilters. If the -// underlying libp2p node has a peer manager configured this func will be used as the -// peers provider. -func (m *Middleware) topologyPeers() peer.IDSlice { - peerIDs := make([]peer.ID, 0) - for _, id := range m.peerIDs(m.ov.Topology().NodeIDs()) { - peerAllowed := true - for _, filter := range m.peerManagerFilters { - if err := filter(id); err != nil { - m.log.Debug(). - Err(err). - Str("peer_id", id.String()). - Msg("filtering topology peer") - - peerAllowed = false - break - } - } - - if peerAllowed { - peerIDs = append(peerIDs, id) - } - } - - return peerIDs -} - -// OnDisallowListNotification is called when a new disallow list update notification is distributed. -// It disconnects from all peers in the disallow list. -func (m *Middleware) OnDisallowListNotification(notification *p2p.DisallowListUpdateNotification) { - for _, pid := range m.peerIDs(notification.DisallowList) { - err := m.libP2PNode.RemovePeer(pid) - if err != nil { - m.log.Error().Err(err).Str("peer_id", pid.String()).Msg("failed to disconnect from blocklisted peer") - } - } -} - -// SendDirect sends msg on a 1-1 direct connection to the target ID. It models a guaranteed delivery asynchronous -// direct one-to-one connection on the underlying network. No intermediate node on the overlay is utilized -// as the router. -// -// Dispatch should be used whenever guaranteed delivery to a specific target is required. Otherwise, Publish is -// a more efficient candidate. -// -// The following benign errors can be returned: -// - the peer ID for the target node ID cannot be found. -// - the msg size was too large. -// - failed to send message to peer. -// -// All errors returned from this function can be considered benign. -func (m *Middleware) SendDirect(msg *network.OutgoingMessageScope) error { - // since it is a unicast, we only need to get the first peer ID. - peerID, err := m.idTranslator.GetPeerID(msg.TargetIds()[0]) - if err != nil { - return fmt.Errorf("could not find peer id for target id: %w", err) - } - - maxMsgSize := unicastMaxMsgSize(msg.PayloadType()) - if msg.Size() > maxMsgSize { - // message size goes beyond maximum size that the serializer can handle. - // proceeding with this message results in closing the connection by the target side, and - // delivery failure. - return fmt.Errorf("message size %d exceeds configured max message size %d", msg.Size(), maxMsgSize) - } - - maxTimeout := m.unicastMaxMsgDuration(msg.PayloadType()) - - // pass in a context with timeout to make the unicast call fail fast - ctx, cancel := context.WithTimeout(m.ctx, maxTimeout) - defer cancel() - - // protect the underlying connection from being inadvertently pruned by the peer manager while the stream and - // connection creation is being attempted, and remove it from protected list once stream created. - tag := fmt.Sprintf("%v:%v", msg.Channel(), msg.PayloadType()) - m.libP2PNode.Host().ConnManager().Protect(peerID, tag) - defer m.libP2PNode.Host().ConnManager().Unprotect(peerID, tag) - - // create new stream - // streams don't need to be reused and are fairly inexpensive to be created for each send. - // A stream creation does NOT incur an RTT as stream negotiation happens as part of the first message - // sent out the receiver - stream, err := m.libP2PNode.CreateStream(ctx, peerID) - if err != nil { - return fmt.Errorf("failed to create stream for %s: %w", msg.TargetIds()[0], err) - } - - success := false - - defer func() { - if success { - // close the stream immediately - err = stream.Close() - if err != nil { - err = fmt.Errorf("failed to close the stream for %s: %w", msg.TargetIds()[0], err) - } - } else { - resetErr := stream.Reset() - if resetErr != nil { - m.log.Err(resetErr).Msg("failed to reset stream") - } - } - }() - - deadline, _ := ctx.Deadline() - err = stream.SetWriteDeadline(deadline) - if err != nil { - return fmt.Errorf("failed to set write deadline for stream: %w", err) - } - - // create a gogo protobuf writer - bufw := bufio.NewWriter(stream) - writer := ggio.NewDelimitedWriter(bufw) - - err = writer.WriteMsg(msg.Proto()) - if err != nil { - return fmt.Errorf("failed to send message to %s: %w", msg.TargetIds()[0], err) - } - - // flush the stream - err = bufw.Flush() - if err != nil { - return fmt.Errorf("failed to flush stream for %s: %w", msg.TargetIds()[0], err) - } - - success = true - - return nil -} - -// handleIncomingStream handles an incoming stream from a remote peer -// it is a callback that gets called for each incoming stream by libp2p with a new stream object -func (m *Middleware) handleIncomingStream(s libp2pnetwork.Stream) { - // qualify the logger with local and remote address - log := p2putils.StreamLogger(m.log, s) - - log.Info().Msg("incoming stream received") - - success := false - - remotePeer := s.Conn().RemotePeer() - - defer func() { - if success { - err := s.Close() - if err != nil { - log.Err(err).Msg("failed to close stream") - } - } else { - err := s.Reset() - if err != nil { - log.Err(err).Msg("failed to reset stream") - } - } - }() - - // check if peer is currently rate limited before continuing to process stream. - if m.unicastRateLimiters.MessageRateLimiter.IsRateLimited(remotePeer) || m.unicastRateLimiters.BandWidthRateLimiter.IsRateLimited(remotePeer) { - log.Debug(). - Bool(logging.KeySuspicious, true). - Msg("dropping unicast stream from rate limited peer") - return - } - - // TODO: We need to allow per-topic timeouts and message size limits. - // This allows us to configure higher limits for topics on which we expect - // to receive large messages (e.g. Chunk Data Packs), and use the normal - // limits for other topics. In order to enable this, we will need to register - // a separate stream handler for each topic. - ctx, cancel := context.WithTimeout(m.ctx, LargeMsgUnicastTimeout) - defer cancel() - - deadline, _ := ctx.Deadline() - - err := s.SetReadDeadline(deadline) - if err != nil { - log.Err(err).Msg("failed to set read deadline for stream") - return - } - - // create the reader - r := ggio.NewDelimitedReader(s, LargeMsgMaxUnicastMsgSize) - for { - if ctx.Err() != nil { - return - } - - // Note: message fields must not be trusted until explicitly validated - var msg message.Message - // read the next message (blocking call) - err = r.ReadMsg(&msg) - if err != nil { - if err == io.EOF { - break - } - - m.log.Err(err).Msg("failed to read message") - return - } - - channel := channels.Channel(msg.ChannelID) - topic := channels.TopicFromChannel(channel, m.rootBlockID) - - // ignore messages if node does not have subscription to topic - if !m.libP2PNode.HasSubscription(topic) { - violation := &slashing.Violation{ - Identity: nil, PeerID: remotePeer.String(), Channel: channel, Protocol: message.ProtocolTypeUnicast, - } - - msgCode, err := codec.MessageCodeFromPayload(msg.Payload) - if err != nil { - violation.Err = err - m.slashingViolationsConsumer.OnUnknownMsgTypeError(violation) - return - } - - // msg type is not guaranteed to be correct since it is set by the client - _, what, err := codec.InterfaceFromMessageCode(msgCode) - if err != nil { - violation.Err = err - m.slashingViolationsConsumer.OnUnknownMsgTypeError(violation) - return - } - - violation.MsgType = what - violation.Err = ErrUnicastMsgWithoutSub - m.slashingViolationsConsumer.OnUnauthorizedUnicastOnChannel(violation) - return - } - - // check if unicast messages have reached rate limit before processing next message - if !m.unicastRateLimiters.MessageAllowed(remotePeer) { - return - } - - // check if we can get a role for logging and metrics label if this is not a public channel - role := "" - if !channels.IsPublicChannel(channels.Channel(msg.ChannelID)) { - if identity, ok := m.ov.Identity(remotePeer); ok { - role = identity.Role.String() - } - } - - // check unicast bandwidth rate limiter for peer - if !m.unicastRateLimiters.BandwidthAllowed( - remotePeer, - role, - msg.Size(), - network.MessageType(msg.Payload), - channels.Topic(msg.ChannelID)) { - return - } - - m.wg.Add(1) - go func() { - defer m.wg.Done() - m.processUnicastStreamMessage(remotePeer, &msg) - }() - } - - success = true -} - -// Subscribe subscribes the middleware to a channel. -// No errors are expected during normal operation. -func (m *Middleware) Subscribe(channel channels.Channel) error { - - topic := channels.TopicFromChannel(channel, m.rootBlockID) - - var peerFilter p2p.PeerFilter - var validators []validator.PubSubMessageValidator - if channels.IsPublicChannel(channel) { - // NOTE: for public channels the callback used to check if a node is staked will - // return true for every node. - peerFilter = p2p.AllowAllPeerFilter() - } else { - // for channels used by the staked nodes, add the topic validator to filter out messages from non-staked nodes - validators = append(validators, m.authorizedSenderValidator.PubSubMessageValidator(channel)) - - // NOTE: For non-public channels the libP2P node topic validator will reject - // messages from unstaked nodes. - peerFilter = m.isProtocolParticipant() - } - - topicValidator := flowpubsub.TopicValidator(m.log, peerFilter, validators...) - s, err := m.libP2PNode.Subscribe(topic, topicValidator) - if err != nil { - return fmt.Errorf("could not subscribe to topic (%s): %w", topic, err) - } - - // create a new readSubscription with the context of the middleware - rs := newReadSubscription(s, m.processPubSubMessages, m.log) - m.wg.Add(1) - - // kick off the receive loop to continuously receive messages - go func() { - defer m.wg.Done() - rs.receiveLoop(m.ctx) - }() - - // update peers to add some nodes interested in the same topic as direct peers - m.libP2PNode.RequestPeerUpdate() - - return nil -} - -// processPubSubMessages processes messages received from the pubsub subscription. -func (m *Middleware) processPubSubMessages(msg *message.Message, peerID peer.ID) { - m.processAuthenticatedMessage(msg, peerID, message.ProtocolTypePubSub) -} - -// Unsubscribe unsubscribes the middleware from a channel. -// The following benign errors are expected during normal operations from libP2P: -// - the libP2P node fails to unsubscribe to the topic created from the provided channel. -// -// All errors returned from this function can be considered benign. -func (m *Middleware) Unsubscribe(channel channels.Channel) error { - topic := channels.TopicFromChannel(channel, m.rootBlockID) - err := m.libP2PNode.UnSubscribe(topic) - if err != nil { - return fmt.Errorf("failed to unsubscribe from channel (%s): %w", channel, err) - } - - // update peers to remove nodes subscribed to channel - m.libP2PNode.RequestPeerUpdate() - - return nil -} - -// processUnicastStreamMessage will decode, perform authorized sender validation and process a message -// sent via unicast stream. This func should be invoked in a separate goroutine to avoid creating a message decoding bottleneck. -func (m *Middleware) processUnicastStreamMessage(remotePeer peer.ID, msg *message.Message) { - channel := channels.Channel(msg.ChannelID) - - // TODO: once we've implemented per topic message size limits per the TODO above, - // we can remove this check - maxSize, err := unicastMaxMsgSizeByCode(msg.Payload) - if err != nil { - m.slashingViolationsConsumer.OnUnknownMsgTypeError(&slashing.Violation{ - Identity: nil, PeerID: remotePeer.String(), MsgType: "", Channel: channel, Protocol: message.ProtocolTypeUnicast, Err: err, - }) - return - } - if msg.Size() > maxSize { - // message size exceeded - m.log.Error(). - Str("peer_id", remotePeer.String()). - Str("channel", msg.ChannelID). - Int("max_size", maxSize). - Int("size", msg.Size()). - Bool(logging.KeySuspicious, true). - Msg("received message exceeded permissible message maxSize") - return - } - - // if message channel is not public perform authorized sender validation - if !channels.IsPublicChannel(channel) { - messageType, err := m.authorizedSenderValidator.Validate(remotePeer, msg.Payload, channel, message.ProtocolTypeUnicast) - if err != nil { - m.log. - Error(). - Err(err). - Str("peer_id", remotePeer.String()). - Str("type", messageType). - Str("channel", msg.ChannelID). - Msg("unicast authorized sender validation failed") - return - } - } - m.processAuthenticatedMessage(msg, remotePeer, message.ProtocolTypeUnicast) -} - -// processAuthenticatedMessage processes a message and a source (indicated by its peer ID) and eventually passes it to the overlay -// In particular, it populates the `OriginID` field of the message with a Flow ID translated from this source. -func (m *Middleware) processAuthenticatedMessage(msg *message.Message, peerID peer.ID, protocol message.ProtocolType) { - originId, err := m.idTranslator.GetFlowID(peerID) - if err != nil { - // this error should never happen. by the time the message gets here, the peer should be - // authenticated which means it must be known - m.log.Error(). - Err(err). - Str("peer_id", peerID.String()). - Bool(logging.KeySuspicious, true). - Msg("dropped message from unknown peer") - return - } - - channel := channels.Channel(msg.ChannelID) - decodedMsgPayload, err := m.codec.Decode(msg.Payload) - switch { - case codec.IsErrUnknownMsgCode(err): - // slash peer if message contains unknown message code byte - violation := &slashing.Violation{ - PeerID: peerID.String(), OriginID: originId, Channel: channel, Protocol: protocol, Err: err, - } - m.slashingViolationsConsumer.OnUnknownMsgTypeError(violation) - return - case codec.IsErrMsgUnmarshal(err) || codec.IsErrInvalidEncoding(err): - // slash if peer sent a message that could not be marshalled into the message type denoted by the message code byte - violation := &slashing.Violation{ - PeerID: peerID.String(), OriginID: originId, Channel: channel, Protocol: protocol, Err: err, - } - m.slashingViolationsConsumer.OnInvalidMsgError(violation) - return - case err != nil: - // this condition should never happen and indicates there's a bug - // don't crash as a result of external inputs since that creates a DoS vector - // collect slashing data because this could potentially lead to slashing - err = fmt.Errorf("unexpected error during message validation: %w", err) - violation := &slashing.Violation{ - PeerID: peerID.String(), OriginID: originId, Channel: channel, Protocol: protocol, Err: err, - } - m.slashingViolationsConsumer.OnUnexpectedError(violation) - return - } - - scope, err := network.NewIncomingScope(originId, protocol, msg, decodedMsgPayload) - if err != nil { - m.log.Error(). - Err(err). - Str("peer_id", peerID.String()). - Str("origin_id", originId.String()). - Msg("could not create incoming message scope") - return - } - - m.processMessage(scope) -} - -// processMessage processes a message and eventually passes it to the overlay -func (m *Middleware) processMessage(scope *network.IncomingMessageScope) { - - logger := m.log.With(). - Str("channel", scope.Channel().String()). - Str("type", scope.Protocol().String()). - Int("msg_size", scope.Size()). - Hex("origin_id", logging.ID(scope.OriginId())). - Logger() - - // run through all the message validators - for _, v := range m.validators { - // if any one fails, stop message propagation - if !v.Validate(*scope) { - logger.Debug().Msg("new message filtered by message validators") - return - } - } - - logger.Debug().Msg("processing new message") - - // if validation passed, send the message to the overlay - err := m.ov.Receive(scope) - if err != nil { - m.log.Error().Err(err).Msg("could not deliver payload") - } -} - -// Publish publishes a message on the channel. It models a distributed broadcast where the message is meant for all or -// a many nodes subscribing to the channel. It does not guarantee the delivery though, and operates on a best -// effort. -// The following benign errors are expected during normal operations: -// - the msg cannot be marshalled. -// - the msg size exceeds DefaultMaxPubSubMsgSize. -// - the libP2P node fails to publish the message. -// -// All errors returned from this function can be considered benign. -func (m *Middleware) Publish(msg *network.OutgoingMessageScope) error { - m.log.Debug(). - Str("channel", msg.Channel().String()). - Interface("msg", msg.Proto()). - Str("type", msg.PayloadType()). - Int("msg_size", msg.Size()). - Msg("publishing new message") - - // convert the message to bytes to be put on the wire. - data, err := msg.Proto().Marshal() - if err != nil { - return fmt.Errorf("failed to marshal the message: %w", err) - } - - msgSize := len(data) - if msgSize > p2pnode.DefaultMaxPubSubMsgSize { - // libp2p pubsub will silently drop the message if its size is greater than the configured pubsub max message size - // hence return an error as this message is undeliverable - return fmt.Errorf("message size %d exceeds configured max message size %d", msgSize, p2pnode.DefaultMaxPubSubMsgSize) - } - - topic := channels.TopicFromChannel(msg.Channel(), m.rootBlockID) - - // publish the bytes on the topic - err = m.libP2PNode.Publish(m.ctx, topic, data) - if err != nil { - return fmt.Errorf("failed to publish the message: %w", err) - } - - return nil -} - -// IsConnected returns true if this node is connected to the node with id nodeID. -// All errors returned from this function can be considered benign. -func (m *Middleware) IsConnected(nodeID flow.Identifier) (bool, error) { - peerID, err := m.idTranslator.GetPeerID(nodeID) - if err != nil { - return false, fmt.Errorf("could not find peer id for target id: %w", err) - } - return m.libP2PNode.IsConnected(peerID) -} - -// unicastMaxMsgSize returns the max permissible size for a unicast message -func unicastMaxMsgSize(messageType string) int { - switch messageType { - case "messages.ChunkDataResponse": - return LargeMsgMaxUnicastMsgSize - default: - return DefaultMaxUnicastMsgSize - } -} - -// unicastMaxMsgSizeByCode returns the max permissible size for a unicast message code -func unicastMaxMsgSizeByCode(payload []byte) (int, error) { - msgCode, err := codec.MessageCodeFromPayload(payload) - if err != nil { - return 0, err - } - _, messageType, err := codec.InterfaceFromMessageCode(msgCode) - if err != nil { - return 0, err - } - - maxSize := unicastMaxMsgSize(messageType) - return maxSize, nil -} - -// unicastMaxMsgDuration returns the max duration to allow for a unicast send to complete -func (m *Middleware) unicastMaxMsgDuration(messageType string) time.Duration { - switch messageType { - case "messages.ChunkDataResponse": - if LargeMsgUnicastTimeout > m.unicastMessageTimeout { - return LargeMsgUnicastTimeout - } - return m.unicastMessageTimeout - default: - return m.unicastMessageTimeout - } -} diff --git a/network/p2p/mock/adjust_function.go b/network/p2p/mock/adjust_function.go deleted file mode 100644 index 675dddb2efd..00000000000 --- a/network/p2p/mock/adjust_function.go +++ /dev/null @@ -1,42 +0,0 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. - -package mockp2p - -import ( - p2p "github.com/onflow/flow-go/network/p2p" - mock "github.com/stretchr/testify/mock" -) - -// AdjustFunction is an autogenerated mock type for the AdjustFunction type -type AdjustFunction struct { - mock.Mock -} - -// Execute provides a mock function with given fields: record -func (_m *AdjustFunction) Execute(record p2p.GossipSubSpamRecord) p2p.GossipSubSpamRecord { - ret := _m.Called(record) - - var r0 p2p.GossipSubSpamRecord - if rf, ok := ret.Get(0).(func(p2p.GossipSubSpamRecord) p2p.GossipSubSpamRecord); ok { - r0 = rf(record) - } else { - r0 = ret.Get(0).(p2p.GossipSubSpamRecord) - } - - return r0 -} - -type mockConstructorTestingTNewAdjustFunction interface { - mock.TestingT - Cleanup(func()) -} - -// NewAdjustFunction creates a new instance of AdjustFunction. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewAdjustFunction(t mockConstructorTestingTNewAdjustFunction) *AdjustFunction { - mock := &AdjustFunction{} - mock.Mock.Test(t) - - t.Cleanup(func() { mock.AssertExpectations(t) }) - - return mock -} diff --git a/network/p2p/mock/basic_gossip_sub_rpc_inspector.go b/network/p2p/mock/basic_gossip_sub_rpc_inspector.go deleted file mode 100644 index c6c261e75e1..00000000000 --- a/network/p2p/mock/basic_gossip_sub_rpc_inspector.go +++ /dev/null @@ -1,45 +0,0 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. - -package mockp2p - -import ( - mock "github.com/stretchr/testify/mock" - - peer "github.com/libp2p/go-libp2p/core/peer" - - pubsub "github.com/libp2p/go-libp2p-pubsub" -) - -// BasicGossipSubRPCInspector is an autogenerated mock type for the BasicGossipSubRPCInspector type -type BasicGossipSubRPCInspector struct { - mock.Mock -} - -// Inspect provides a mock function with given fields: _a0, _a1 -func (_m *BasicGossipSubRPCInspector) Inspect(_a0 peer.ID, _a1 *pubsub.RPC) error { - ret := _m.Called(_a0, _a1) - - var r0 error - if rf, ok := ret.Get(0).(func(peer.ID, *pubsub.RPC) error); ok { - r0 = rf(_a0, _a1) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -type mockConstructorTestingTNewBasicGossipSubRPCInspector interface { - mock.TestingT - Cleanup(func()) -} - -// NewBasicGossipSubRPCInspector creates a new instance of BasicGossipSubRPCInspector. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewBasicGossipSubRPCInspector(t mockConstructorTestingTNewBasicGossipSubRPCInspector) *BasicGossipSubRPCInspector { - mock := &BasicGossipSubRPCInspector{} - mock.Mock.Test(t) - - t.Cleanup(func() { mock.AssertExpectations(t) }) - - return mock -} diff --git a/network/p2p/mock/basic_rate_limiter.go b/network/p2p/mock/basic_rate_limiter.go index d76bc2956e1..19330e4c1ff 100644 --- a/network/p2p/mock/basic_rate_limiter.go +++ b/network/p2p/mock/basic_rate_limiter.go @@ -1,6 +1,6 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. -package mockp2p +package mock import ( irrecoverable "github.com/onflow/flow-go/module/irrecoverable" @@ -18,6 +18,10 @@ type BasicRateLimiter struct { func (_m *BasicRateLimiter) Allow(peerID peer.ID, msgSize int) bool { ret := _m.Called(peerID, msgSize) + if len(ret) == 0 { + panic("no return value specified for Allow") + } + var r0 bool if rf, ok := ret.Get(0).(func(peer.ID, int) bool); ok { r0 = rf(peerID, msgSize) @@ -28,10 +32,14 @@ func (_m *BasicRateLimiter) Allow(peerID peer.ID, msgSize int) bool { return r0 } -// Done provides a mock function with given fields: +// Done provides a mock function with no fields func (_m *BasicRateLimiter) Done() <-chan struct{} { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Done") + } + var r0 <-chan struct{} if rf, ok := ret.Get(0).(func() <-chan struct{}); ok { r0 = rf() @@ -44,10 +52,14 @@ func (_m *BasicRateLimiter) Done() <-chan struct{} { return r0 } -// Ready provides a mock function with given fields: +// Ready provides a mock function with no fields func (_m *BasicRateLimiter) Ready() <-chan struct{} { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Ready") + } + var r0 <-chan struct{} if rf, ok := ret.Get(0).(func() <-chan struct{}); ok { r0 = rf() @@ -65,13 +77,12 @@ func (_m *BasicRateLimiter) Start(_a0 irrecoverable.SignalerContext) { _m.Called(_a0) } -type mockConstructorTestingTNewBasicRateLimiter interface { +// NewBasicRateLimiter creates a new instance of BasicRateLimiter. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewBasicRateLimiter(t interface { mock.TestingT Cleanup(func()) -} - -// NewBasicRateLimiter creates a new instance of BasicRateLimiter. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewBasicRateLimiter(t mockConstructorTestingTNewBasicRateLimiter) *BasicRateLimiter { +}) *BasicRateLimiter { mock := &BasicRateLimiter{} mock.Mock.Test(t) diff --git a/network/p2p/mock/collection_cluster_changes_consumer.go b/network/p2p/mock/collection_cluster_changes_consumer.go new file mode 100644 index 00000000000..0476289512f --- /dev/null +++ b/network/p2p/mock/collection_cluster_changes_consumer.go @@ -0,0 +1,32 @@ +// Code generated by mockery. DO NOT EDIT. + +package mock + +import ( + flow "github.com/onflow/flow-go/model/flow" + mock "github.com/stretchr/testify/mock" +) + +// CollectionClusterChangesConsumer is an autogenerated mock type for the CollectionClusterChangesConsumer type +type CollectionClusterChangesConsumer struct { + mock.Mock +} + +// ActiveClustersChanged provides a mock function with given fields: _a0 +func (_m *CollectionClusterChangesConsumer) ActiveClustersChanged(_a0 flow.ChainIDList) { + _m.Called(_a0) +} + +// NewCollectionClusterChangesConsumer creates a new instance of CollectionClusterChangesConsumer. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewCollectionClusterChangesConsumer(t interface { + mock.TestingT + Cleanup(func()) +}) *CollectionClusterChangesConsumer { + mock := &CollectionClusterChangesConsumer{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/network/p2p/mock/connection_gater.go b/network/p2p/mock/connection_gater.go index d5943e8efa9..a033fe68fe7 100644 --- a/network/p2p/mock/connection_gater.go +++ b/network/p2p/mock/connection_gater.go @@ -1,6 +1,6 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. -package mockp2p +package mock import ( control "github.com/libp2p/go-libp2p/core/control" @@ -10,6 +10,8 @@ import ( network "github.com/libp2p/go-libp2p/core/network" + p2p "github.com/onflow/flow-go/network/p2p" + peer "github.com/libp2p/go-libp2p/core/peer" ) @@ -22,6 +24,10 @@ type ConnectionGater struct { func (_m *ConnectionGater) InterceptAccept(_a0 network.ConnMultiaddrs) bool { ret := _m.Called(_a0) + if len(ret) == 0 { + panic("no return value specified for InterceptAccept") + } + var r0 bool if rf, ok := ret.Get(0).(func(network.ConnMultiaddrs) bool); ok { r0 = rf(_a0) @@ -36,6 +42,10 @@ func (_m *ConnectionGater) InterceptAccept(_a0 network.ConnMultiaddrs) bool { func (_m *ConnectionGater) InterceptAddrDial(_a0 peer.ID, _a1 multiaddr.Multiaddr) bool { ret := _m.Called(_a0, _a1) + if len(ret) == 0 { + panic("no return value specified for InterceptAddrDial") + } + var r0 bool if rf, ok := ret.Get(0).(func(peer.ID, multiaddr.Multiaddr) bool); ok { r0 = rf(_a0, _a1) @@ -50,6 +60,10 @@ func (_m *ConnectionGater) InterceptAddrDial(_a0 peer.ID, _a1 multiaddr.Multiadd func (_m *ConnectionGater) InterceptPeerDial(p peer.ID) bool { ret := _m.Called(p) + if len(ret) == 0 { + panic("no return value specified for InterceptPeerDial") + } + var r0 bool if rf, ok := ret.Get(0).(func(peer.ID) bool); ok { r0 = rf(p) @@ -64,6 +78,10 @@ func (_m *ConnectionGater) InterceptPeerDial(p peer.ID) bool { func (_m *ConnectionGater) InterceptSecured(_a0 network.Direction, _a1 peer.ID, _a2 network.ConnMultiaddrs) bool { ret := _m.Called(_a0, _a1, _a2) + if len(ret) == 0 { + panic("no return value specified for InterceptSecured") + } + var r0 bool if rf, ok := ret.Get(0).(func(network.Direction, peer.ID, network.ConnMultiaddrs) bool); ok { r0 = rf(_a0, _a1, _a2) @@ -78,6 +96,10 @@ func (_m *ConnectionGater) InterceptSecured(_a0 network.Direction, _a1 peer.ID, func (_m *ConnectionGater) InterceptUpgraded(_a0 network.Conn) (bool, control.DisconnectReason) { ret := _m.Called(_a0) + if len(ret) == 0 { + panic("no return value specified for InterceptUpgraded") + } + var r0 bool var r1 control.DisconnectReason if rf, ok := ret.Get(0).(func(network.Conn) (bool, control.DisconnectReason)); ok { @@ -98,13 +120,17 @@ func (_m *ConnectionGater) InterceptUpgraded(_a0 network.Conn) (bool, control.Di return r0, r1 } -type mockConstructorTestingTNewConnectionGater interface { - mock.TestingT - Cleanup(func()) +// SetDisallowListOracle provides a mock function with given fields: oracle +func (_m *ConnectionGater) SetDisallowListOracle(oracle p2p.DisallowListOracle) { + _m.Called(oracle) } // NewConnectionGater creates a new instance of ConnectionGater. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewConnectionGater(t mockConstructorTestingTNewConnectionGater) *ConnectionGater { +// The first argument is typically a *testing.T value. +func NewConnectionGater(t interface { + mock.TestingT + Cleanup(func()) +}) *ConnectionGater { mock := &ConnectionGater{} mock.Mock.Test(t) diff --git a/network/p2p/mock/connector.go b/network/p2p/mock/connector.go index d1e6733cbab..e5fcd5e2047 100644 --- a/network/p2p/mock/connector.go +++ b/network/p2p/mock/connector.go @@ -1,6 +1,6 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. -package mockp2p +package mock import ( context "context" @@ -15,18 +15,17 @@ type Connector struct { mock.Mock } -// UpdatePeers provides a mock function with given fields: ctx, peerIDs -func (_m *Connector) UpdatePeers(ctx context.Context, peerIDs peer.IDSlice) { - _m.Called(ctx, peerIDs) +// Connect provides a mock function with given fields: ctx, peerChan +func (_m *Connector) Connect(ctx context.Context, peerChan <-chan peer.AddrInfo) { + _m.Called(ctx, peerChan) } -type mockConstructorTestingTNewConnector interface { +// NewConnector creates a new instance of Connector. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewConnector(t interface { mock.TestingT Cleanup(func()) -} - -// NewConnector creates a new instance of Connector. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewConnector(t mockConstructorTestingTNewConnector) *Connector { +}) *Connector { mock := &Connector{} mock.Mock.Test(t) diff --git a/network/p2p/mock/connector_host.go b/network/p2p/mock/connector_host.go index 549c013db28..90615a861e7 100644 --- a/network/p2p/mock/connector_host.go +++ b/network/p2p/mock/connector_host.go @@ -1,6 +1,6 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. -package mockp2p +package mock import ( network "github.com/libp2p/go-libp2p/core/network" @@ -14,13 +14,17 @@ type ConnectorHost struct { mock.Mock } -// ClosePeer provides a mock function with given fields: id -func (_m *ConnectorHost) ClosePeer(id peer.ID) error { - ret := _m.Called(id) +// ClosePeer provides a mock function with given fields: peerId +func (_m *ConnectorHost) ClosePeer(peerId peer.ID) error { + ret := _m.Called(peerId) + + if len(ret) == 0 { + panic("no return value specified for ClosePeer") + } var r0 error if rf, ok := ret.Get(0).(func(peer.ID) error); ok { - r0 = rf(id) + r0 = rf(peerId) } else { r0 = ret.Error(0) } @@ -28,10 +32,14 @@ func (_m *ConnectorHost) ClosePeer(id peer.ID) error { return r0 } -// Connections provides a mock function with given fields: +// Connections provides a mock function with no fields func (_m *ConnectorHost) Connections() []network.Conn { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Connections") + } + var r0 []network.Conn if rf, ok := ret.Get(0).(func() []network.Conn); ok { r0 = rf() @@ -44,10 +52,14 @@ func (_m *ConnectorHost) Connections() []network.Conn { return r0 } -// ID provides a mock function with given fields: +// ID provides a mock function with no fields func (_m *ConnectorHost) ID() peer.ID { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for ID") + } + var r0 peer.ID if rf, ok := ret.Get(0).(func() peer.ID); ok { r0 = rf() @@ -58,13 +70,35 @@ func (_m *ConnectorHost) ID() peer.ID { return r0 } -// IsProtected provides a mock function with given fields: id -func (_m *ConnectorHost) IsProtected(id peer.ID) bool { - ret := _m.Called(id) +// IsConnectedTo provides a mock function with given fields: peerId +func (_m *ConnectorHost) IsConnectedTo(peerId peer.ID) bool { + ret := _m.Called(peerId) + + if len(ret) == 0 { + panic("no return value specified for IsConnectedTo") + } + + var r0 bool + if rf, ok := ret.Get(0).(func(peer.ID) bool); ok { + r0 = rf(peerId) + } else { + r0 = ret.Get(0).(bool) + } + + return r0 +} + +// IsProtected provides a mock function with given fields: peerId +func (_m *ConnectorHost) IsProtected(peerId peer.ID) bool { + ret := _m.Called(peerId) + + if len(ret) == 0 { + panic("no return value specified for IsProtected") + } var r0 bool if rf, ok := ret.Get(0).(func(peer.ID) bool); ok { - r0 = rf(id) + r0 = rf(peerId) } else { r0 = ret.Get(0).(bool) } @@ -72,13 +106,17 @@ func (_m *ConnectorHost) IsProtected(id peer.ID) bool { return r0 } -// PeerInfo provides a mock function with given fields: id -func (_m *ConnectorHost) PeerInfo(id peer.ID) peer.AddrInfo { - ret := _m.Called(id) +// PeerInfo provides a mock function with given fields: peerId +func (_m *ConnectorHost) PeerInfo(peerId peer.ID) peer.AddrInfo { + ret := _m.Called(peerId) + + if len(ret) == 0 { + panic("no return value specified for PeerInfo") + } var r0 peer.AddrInfo if rf, ok := ret.Get(0).(func(peer.ID) peer.AddrInfo); ok { - r0 = rf(id) + r0 = rf(peerId) } else { r0 = ret.Get(0).(peer.AddrInfo) } @@ -86,13 +124,12 @@ func (_m *ConnectorHost) PeerInfo(id peer.ID) peer.AddrInfo { return r0 } -type mockConstructorTestingTNewConnectorHost interface { +// NewConnectorHost creates a new instance of ConnectorHost. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewConnectorHost(t interface { mock.TestingT Cleanup(func()) -} - -// NewConnectorHost creates a new instance of ConnectorHost. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewConnectorHost(t mockConstructorTestingTNewConnectorHost) *ConnectorHost { +}) *ConnectorHost { mock := &ConnectorHost{} mock.Mock.Test(t) diff --git a/network/p2p/mock/core_p2_p.go b/network/p2p/mock/core_p2_p.go new file mode 100644 index 00000000000..e2fe3c123b6 --- /dev/null +++ b/network/p2p/mock/core_p2_p.go @@ -0,0 +1,114 @@ +// Code generated by mockery. DO NOT EDIT. + +package mock + +import ( + host "github.com/libp2p/go-libp2p/core/host" + component "github.com/onflow/flow-go/module/component" + + irrecoverable "github.com/onflow/flow-go/module/irrecoverable" + + mock "github.com/stretchr/testify/mock" +) + +// CoreP2P is an autogenerated mock type for the CoreP2P type +type CoreP2P struct { + mock.Mock +} + +// GetIPPort provides a mock function with no fields +func (_m *CoreP2P) GetIPPort() (string, string, error) { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for GetIPPort") + } + + var r0 string + var r1 string + var r2 error + if rf, ok := ret.Get(0).(func() (string, string, error)); ok { + return rf() + } + if rf, ok := ret.Get(0).(func() string); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(string) + } + + if rf, ok := ret.Get(1).(func() string); ok { + r1 = rf() + } else { + r1 = ret.Get(1).(string) + } + + if rf, ok := ret.Get(2).(func() error); ok { + r2 = rf() + } else { + r2 = ret.Error(2) + } + + return r0, r1, r2 +} + +// Host provides a mock function with no fields +func (_m *CoreP2P) Host() host.Host { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Host") + } + + var r0 host.Host + if rf, ok := ret.Get(0).(func() host.Host); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(host.Host) + } + } + + return r0 +} + +// SetComponentManager provides a mock function with given fields: cm +func (_m *CoreP2P) SetComponentManager(cm *component.ComponentManager) { + _m.Called(cm) +} + +// Start provides a mock function with given fields: ctx +func (_m *CoreP2P) Start(ctx irrecoverable.SignalerContext) { + _m.Called(ctx) +} + +// Stop provides a mock function with no fields +func (_m *CoreP2P) Stop() error { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Stop") + } + + var r0 error + if rf, ok := ret.Get(0).(func() error); ok { + r0 = rf() + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// NewCoreP2P creates a new instance of CoreP2P. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewCoreP2P(t interface { + mock.TestingT + Cleanup(func()) +}) *CoreP2P { + mock := &CoreP2P{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/network/p2p/mock/create_node_func.go b/network/p2p/mock/create_node_func.go deleted file mode 100644 index 3169c71cb1e..00000000000 --- a/network/p2p/mock/create_node_func.go +++ /dev/null @@ -1,48 +0,0 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. - -package mockp2p - -import ( - host "github.com/libp2p/go-libp2p/core/host" - mock "github.com/stretchr/testify/mock" - - p2p "github.com/onflow/flow-go/network/p2p" - - zerolog "github.com/rs/zerolog" -) - -// CreateNodeFunc is an autogenerated mock type for the CreateNodeFunc type -type CreateNodeFunc struct { - mock.Mock -} - -// Execute provides a mock function with given fields: _a0, _a1, _a2, _a3 -func (_m *CreateNodeFunc) Execute(_a0 zerolog.Logger, _a1 host.Host, _a2 p2p.ProtocolPeerCache, _a3 p2p.PeerManager) p2p.LibP2PNode { - ret := _m.Called(_a0, _a1, _a2, _a3) - - var r0 p2p.LibP2PNode - if rf, ok := ret.Get(0).(func(zerolog.Logger, host.Host, p2p.ProtocolPeerCache, p2p.PeerManager) p2p.LibP2PNode); ok { - r0 = rf(_a0, _a1, _a2, _a3) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(p2p.LibP2PNode) - } - } - - return r0 -} - -type mockConstructorTestingTNewCreateNodeFunc interface { - mock.TestingT - Cleanup(func()) -} - -// NewCreateNodeFunc creates a new instance of CreateNodeFunc. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewCreateNodeFunc(t mockConstructorTestingTNewCreateNodeFunc) *CreateNodeFunc { - mock := &CreateNodeFunc{} - mock.Mock.Test(t) - - t.Cleanup(func() { mock.AssertExpectations(t) }) - - return mock -} diff --git a/network/p2p/mock/disallow_list_cache.go b/network/p2p/mock/disallow_list_cache.go new file mode 100644 index 00000000000..23e75b88f9e --- /dev/null +++ b/network/p2p/mock/disallow_list_cache.go @@ -0,0 +1,109 @@ +// Code generated by mockery. DO NOT EDIT. + +package mock + +import ( + network "github.com/onflow/flow-go/network" + mock "github.com/stretchr/testify/mock" + + peer "github.com/libp2p/go-libp2p/core/peer" +) + +// DisallowListCache is an autogenerated mock type for the DisallowListCache type +type DisallowListCache struct { + mock.Mock +} + +// AllowFor provides a mock function with given fields: peerID, cause +func (_m *DisallowListCache) AllowFor(peerID peer.ID, cause network.DisallowListedCause) []network.DisallowListedCause { + ret := _m.Called(peerID, cause) + + if len(ret) == 0 { + panic("no return value specified for AllowFor") + } + + var r0 []network.DisallowListedCause + if rf, ok := ret.Get(0).(func(peer.ID, network.DisallowListedCause) []network.DisallowListedCause); ok { + r0 = rf(peerID, cause) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]network.DisallowListedCause) + } + } + + return r0 +} + +// DisallowFor provides a mock function with given fields: peerID, cause +func (_m *DisallowListCache) DisallowFor(peerID peer.ID, cause network.DisallowListedCause) ([]network.DisallowListedCause, error) { + ret := _m.Called(peerID, cause) + + if len(ret) == 0 { + panic("no return value specified for DisallowFor") + } + + var r0 []network.DisallowListedCause + var r1 error + if rf, ok := ret.Get(0).(func(peer.ID, network.DisallowListedCause) ([]network.DisallowListedCause, error)); ok { + return rf(peerID, cause) + } + if rf, ok := ret.Get(0).(func(peer.ID, network.DisallowListedCause) []network.DisallowListedCause); ok { + r0 = rf(peerID, cause) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]network.DisallowListedCause) + } + } + + if rf, ok := ret.Get(1).(func(peer.ID, network.DisallowListedCause) error); ok { + r1 = rf(peerID, cause) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// IsDisallowListed provides a mock function with given fields: peerID +func (_m *DisallowListCache) IsDisallowListed(peerID peer.ID) ([]network.DisallowListedCause, bool) { + ret := _m.Called(peerID) + + if len(ret) == 0 { + panic("no return value specified for IsDisallowListed") + } + + var r0 []network.DisallowListedCause + var r1 bool + if rf, ok := ret.Get(0).(func(peer.ID) ([]network.DisallowListedCause, bool)); ok { + return rf(peerID) + } + if rf, ok := ret.Get(0).(func(peer.ID) []network.DisallowListedCause); ok { + r0 = rf(peerID) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]network.DisallowListedCause) + } + } + + if rf, ok := ret.Get(1).(func(peer.ID) bool); ok { + r1 = rf(peerID) + } else { + r1 = ret.Get(1).(bool) + } + + return r0, r1 +} + +// NewDisallowListCache creates a new instance of DisallowListCache. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewDisallowListCache(t interface { + mock.TestingT + Cleanup(func()) +}) *DisallowListCache { + mock := &DisallowListCache{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/network/p2p/mock/disallow_list_consumer.go b/network/p2p/mock/disallow_list_consumer.go deleted file mode 100644 index 2800a5aa909..00000000000 --- a/network/p2p/mock/disallow_list_consumer.go +++ /dev/null @@ -1,33 +0,0 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. - -package mockp2p - -import ( - flow "github.com/onflow/flow-go/model/flow" - mock "github.com/stretchr/testify/mock" -) - -// DisallowListConsumer is an autogenerated mock type for the DisallowListConsumer type -type DisallowListConsumer struct { - mock.Mock -} - -// OnNodeDisallowListUpdate provides a mock function with given fields: list -func (_m *DisallowListConsumer) OnNodeDisallowListUpdate(list flow.IdentifierList) { - _m.Called(list) -} - -type mockConstructorTestingTNewDisallowListConsumer interface { - mock.TestingT - Cleanup(func()) -} - -// NewDisallowListConsumer creates a new instance of DisallowListConsumer. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewDisallowListConsumer(t mockConstructorTestingTNewDisallowListConsumer) *DisallowListConsumer { - mock := &DisallowListConsumer{} - mock.Mock.Test(t) - - t.Cleanup(func() { mock.AssertExpectations(t) }) - - return mock -} diff --git a/network/p2p/mock/disallow_list_notification_consumer.go b/network/p2p/mock/disallow_list_notification_consumer.go index 7df8437ddcf..7f7f7103def 100644 --- a/network/p2p/mock/disallow_list_notification_consumer.go +++ b/network/p2p/mock/disallow_list_notification_consumer.go @@ -1,10 +1,12 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. -package mockp2p +package mock import ( - p2p "github.com/onflow/flow-go/network/p2p" + network "github.com/onflow/flow-go/network" mock "github.com/stretchr/testify/mock" + + peer "github.com/libp2p/go-libp2p/core/peer" ) // DisallowListNotificationConsumer is an autogenerated mock type for the DisallowListNotificationConsumer type @@ -12,18 +14,22 @@ type DisallowListNotificationConsumer struct { mock.Mock } -// OnDisallowListNotification provides a mock function with given fields: _a0 -func (_m *DisallowListNotificationConsumer) OnDisallowListNotification(_a0 *p2p.DisallowListUpdateNotification) { - _m.Called(_a0) +// OnAllowListNotification provides a mock function with given fields: id, cause +func (_m *DisallowListNotificationConsumer) OnAllowListNotification(id peer.ID, cause network.DisallowListedCause) { + _m.Called(id, cause) } -type mockConstructorTestingTNewDisallowListNotificationConsumer interface { - mock.TestingT - Cleanup(func()) +// OnDisallowListNotification provides a mock function with given fields: id, cause +func (_m *DisallowListNotificationConsumer) OnDisallowListNotification(id peer.ID, cause network.DisallowListedCause) { + _m.Called(id, cause) } // NewDisallowListNotificationConsumer creates a new instance of DisallowListNotificationConsumer. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewDisallowListNotificationConsumer(t mockConstructorTestingTNewDisallowListNotificationConsumer) *DisallowListNotificationConsumer { +// The first argument is typically a *testing.T value. +func NewDisallowListNotificationConsumer(t interface { + mock.TestingT + Cleanup(func()) +}) *DisallowListNotificationConsumer { mock := &DisallowListNotificationConsumer{} mock.Mock.Test(t) diff --git a/network/p2p/mock/disallow_list_notification_distributor.go b/network/p2p/mock/disallow_list_notification_distributor.go deleted file mode 100644 index 82419cb87e1..00000000000 --- a/network/p2p/mock/disallow_list_notification_distributor.go +++ /dev/null @@ -1,88 +0,0 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. - -package mockp2p - -import ( - flow "github.com/onflow/flow-go/model/flow" - irrecoverable "github.com/onflow/flow-go/module/irrecoverable" - - mock "github.com/stretchr/testify/mock" - - p2p "github.com/onflow/flow-go/network/p2p" -) - -// DisallowListNotificationDistributor is an autogenerated mock type for the DisallowListNotificationDistributor type -type DisallowListNotificationDistributor struct { - mock.Mock -} - -// AddConsumer provides a mock function with given fields: _a0 -func (_m *DisallowListNotificationDistributor) AddConsumer(_a0 p2p.DisallowListNotificationConsumer) { - _m.Called(_a0) -} - -// DistributeBlockListNotification provides a mock function with given fields: list -func (_m *DisallowListNotificationDistributor) DistributeBlockListNotification(list flow.IdentifierList) error { - ret := _m.Called(list) - - var r0 error - if rf, ok := ret.Get(0).(func(flow.IdentifierList) error); ok { - r0 = rf(list) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// Done provides a mock function with given fields: -func (_m *DisallowListNotificationDistributor) Done() <-chan struct{} { - ret := _m.Called() - - var r0 <-chan struct{} - if rf, ok := ret.Get(0).(func() <-chan struct{}); ok { - r0 = rf() - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(<-chan struct{}) - } - } - - return r0 -} - -// Ready provides a mock function with given fields: -func (_m *DisallowListNotificationDistributor) Ready() <-chan struct{} { - ret := _m.Called() - - var r0 <-chan struct{} - if rf, ok := ret.Get(0).(func() <-chan struct{}); ok { - r0 = rf() - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(<-chan struct{}) - } - } - - return r0 -} - -// Start provides a mock function with given fields: _a0 -func (_m *DisallowListNotificationDistributor) Start(_a0 irrecoverable.SignalerContext) { - _m.Called(_a0) -} - -type mockConstructorTestingTNewDisallowListNotificationDistributor interface { - mock.TestingT - Cleanup(func()) -} - -// NewDisallowListNotificationDistributor creates a new instance of DisallowListNotificationDistributor. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewDisallowListNotificationDistributor(t mockConstructorTestingTNewDisallowListNotificationDistributor) *DisallowListNotificationDistributor { - mock := &DisallowListNotificationDistributor{} - mock.Mock.Test(t) - - t.Cleanup(func() { mock.AssertExpectations(t) }) - - return mock -} diff --git a/network/p2p/mock/disallow_list_oracle.go b/network/p2p/mock/disallow_list_oracle.go new file mode 100644 index 00000000000..760569cc7cf --- /dev/null +++ b/network/p2p/mock/disallow_list_oracle.go @@ -0,0 +1,59 @@ +// Code generated by mockery. DO NOT EDIT. + +package mock + +import ( + network "github.com/onflow/flow-go/network" + mock "github.com/stretchr/testify/mock" + + peer "github.com/libp2p/go-libp2p/core/peer" +) + +// DisallowListOracle is an autogenerated mock type for the DisallowListOracle type +type DisallowListOracle struct { + mock.Mock +} + +// IsDisallowListed provides a mock function with given fields: peerId +func (_m *DisallowListOracle) IsDisallowListed(peerId peer.ID) ([]network.DisallowListedCause, bool) { + ret := _m.Called(peerId) + + if len(ret) == 0 { + panic("no return value specified for IsDisallowListed") + } + + var r0 []network.DisallowListedCause + var r1 bool + if rf, ok := ret.Get(0).(func(peer.ID) ([]network.DisallowListedCause, bool)); ok { + return rf(peerId) + } + if rf, ok := ret.Get(0).(func(peer.ID) []network.DisallowListedCause); ok { + r0 = rf(peerId) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]network.DisallowListedCause) + } + } + + if rf, ok := ret.Get(1).(func(peer.ID) bool); ok { + r1 = rf(peerId) + } else { + r1 = ret.Get(1).(bool) + } + + return r0, r1 +} + +// NewDisallowListOracle creates a new instance of DisallowListOracle. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewDisallowListOracle(t interface { + mock.TestingT + Cleanup(func()) +}) *DisallowListOracle { + mock := &DisallowListOracle{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/network/p2p/mock/get_time_now.go b/network/p2p/mock/get_time_now.go deleted file mode 100644 index b7088a4b3ed..00000000000 --- a/network/p2p/mock/get_time_now.go +++ /dev/null @@ -1,43 +0,0 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. - -package mockp2p - -import ( - mock "github.com/stretchr/testify/mock" - - time "time" -) - -// GetTimeNow is an autogenerated mock type for the GetTimeNow type -type GetTimeNow struct { - mock.Mock -} - -// Execute provides a mock function with given fields: -func (_m *GetTimeNow) Execute() time.Time { - ret := _m.Called() - - var r0 time.Time - if rf, ok := ret.Get(0).(func() time.Time); ok { - r0 = rf() - } else { - r0 = ret.Get(0).(time.Time) - } - - return r0 -} - -type mockConstructorTestingTNewGetTimeNow interface { - mock.TestingT - Cleanup(func()) -} - -// NewGetTimeNow creates a new instance of GetTimeNow. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewGetTimeNow(t mockConstructorTestingTNewGetTimeNow) *GetTimeNow { - mock := &GetTimeNow{} - mock.Mock.Test(t) - - t.Cleanup(func() { mock.AssertExpectations(t) }) - - return mock -} diff --git a/network/p2p/mock/gossip_sub_adapter_config_func.go b/network/p2p/mock/gossip_sub_adapter_config_func.go deleted file mode 100644 index c207b692350..00000000000 --- a/network/p2p/mock/gossip_sub_adapter_config_func.go +++ /dev/null @@ -1,44 +0,0 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. - -package mockp2p - -import ( - p2p "github.com/onflow/flow-go/network/p2p" - mock "github.com/stretchr/testify/mock" -) - -// GossipSubAdapterConfigFunc is an autogenerated mock type for the GossipSubAdapterConfigFunc type -type GossipSubAdapterConfigFunc struct { - mock.Mock -} - -// Execute provides a mock function with given fields: _a0 -func (_m *GossipSubAdapterConfigFunc) Execute(_a0 *p2p.BasePubSubAdapterConfig) p2p.PubSubAdapterConfig { - ret := _m.Called(_a0) - - var r0 p2p.PubSubAdapterConfig - if rf, ok := ret.Get(0).(func(*p2p.BasePubSubAdapterConfig) p2p.PubSubAdapterConfig); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(p2p.PubSubAdapterConfig) - } - } - - return r0 -} - -type mockConstructorTestingTNewGossipSubAdapterConfigFunc interface { - mock.TestingT - Cleanup(func()) -} - -// NewGossipSubAdapterConfigFunc creates a new instance of GossipSubAdapterConfigFunc. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewGossipSubAdapterConfigFunc(t mockConstructorTestingTNewGossipSubAdapterConfigFunc) *GossipSubAdapterConfigFunc { - mock := &GossipSubAdapterConfigFunc{} - mock.Mock.Test(t) - - t.Cleanup(func() { mock.AssertExpectations(t) }) - - return mock -} diff --git a/network/p2p/mock/gossip_sub_app_specific_rpc_inspector.go b/network/p2p/mock/gossip_sub_app_specific_rpc_inspector.go deleted file mode 100644 index a3e95b1e712..00000000000 --- a/network/p2p/mock/gossip_sub_app_specific_rpc_inspector.go +++ /dev/null @@ -1,45 +0,0 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. - -package mockp2p - -import ( - mock "github.com/stretchr/testify/mock" - - peer "github.com/libp2p/go-libp2p/core/peer" - - pubsub "github.com/libp2p/go-libp2p-pubsub" -) - -// GossipSubAppSpecificRpcInspector is an autogenerated mock type for the GossipSubAppSpecificRpcInspector type -type GossipSubAppSpecificRpcInspector struct { - mock.Mock -} - -// Inspect provides a mock function with given fields: _a0, _a1 -func (_m *GossipSubAppSpecificRpcInspector) Inspect(_a0 peer.ID, _a1 *pubsub.RPC) error { - ret := _m.Called(_a0, _a1) - - var r0 error - if rf, ok := ret.Get(0).(func(peer.ID, *pubsub.RPC) error); ok { - r0 = rf(_a0, _a1) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -type mockConstructorTestingTNewGossipSubAppSpecificRpcInspector interface { - mock.TestingT - Cleanup(func()) -} - -// NewGossipSubAppSpecificRpcInspector creates a new instance of GossipSubAppSpecificRpcInspector. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewGossipSubAppSpecificRpcInspector(t mockConstructorTestingTNewGossipSubAppSpecificRpcInspector) *GossipSubAppSpecificRpcInspector { - mock := &GossipSubAppSpecificRpcInspector{} - mock.Mock.Test(t) - - t.Cleanup(func() { mock.AssertExpectations(t) }) - - return mock -} diff --git a/network/p2p/mock/gossip_sub_application_specific_score_cache.go b/network/p2p/mock/gossip_sub_application_specific_score_cache.go new file mode 100644 index 00000000000..fc8a2481c57 --- /dev/null +++ b/network/p2p/mock/gossip_sub_application_specific_score_cache.go @@ -0,0 +1,83 @@ +// Code generated by mockery. DO NOT EDIT. + +package mock + +import ( + mock "github.com/stretchr/testify/mock" + + peer "github.com/libp2p/go-libp2p/core/peer" + + time "time" +) + +// GossipSubApplicationSpecificScoreCache is an autogenerated mock type for the GossipSubApplicationSpecificScoreCache type +type GossipSubApplicationSpecificScoreCache struct { + mock.Mock +} + +// AdjustWithInit provides a mock function with given fields: peerID, score, _a2 +func (_m *GossipSubApplicationSpecificScoreCache) AdjustWithInit(peerID peer.ID, score float64, _a2 time.Time) error { + ret := _m.Called(peerID, score, _a2) + + if len(ret) == 0 { + panic("no return value specified for AdjustWithInit") + } + + var r0 error + if rf, ok := ret.Get(0).(func(peer.ID, float64, time.Time) error); ok { + r0 = rf(peerID, score, _a2) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// Get provides a mock function with given fields: peerID +func (_m *GossipSubApplicationSpecificScoreCache) Get(peerID peer.ID) (float64, time.Time, bool) { + ret := _m.Called(peerID) + + if len(ret) == 0 { + panic("no return value specified for Get") + } + + var r0 float64 + var r1 time.Time + var r2 bool + if rf, ok := ret.Get(0).(func(peer.ID) (float64, time.Time, bool)); ok { + return rf(peerID) + } + if rf, ok := ret.Get(0).(func(peer.ID) float64); ok { + r0 = rf(peerID) + } else { + r0 = ret.Get(0).(float64) + } + + if rf, ok := ret.Get(1).(func(peer.ID) time.Time); ok { + r1 = rf(peerID) + } else { + r1 = ret.Get(1).(time.Time) + } + + if rf, ok := ret.Get(2).(func(peer.ID) bool); ok { + r2 = rf(peerID) + } else { + r2 = ret.Get(2).(bool) + } + + return r0, r1, r2 +} + +// NewGossipSubApplicationSpecificScoreCache creates a new instance of GossipSubApplicationSpecificScoreCache. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewGossipSubApplicationSpecificScoreCache(t interface { + mock.TestingT + Cleanup(func()) +}) *GossipSubApplicationSpecificScoreCache { + mock := &GossipSubApplicationSpecificScoreCache{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/network/p2p/mock/gossip_sub_builder.go b/network/p2p/mock/gossip_sub_builder.go index e01ff021e0d..cd1b3004c20 100644 --- a/network/p2p/mock/gossip_sub_builder.go +++ b/network/p2p/mock/gossip_sub_builder.go @@ -1,26 +1,18 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. -package mockp2p +package mock import ( host "github.com/libp2p/go-libp2p/core/host" - channels "github.com/onflow/flow-go/network/channels" - irrecoverable "github.com/onflow/flow-go/module/irrecoverable" mock "github.com/stretchr/testify/mock" - module "github.com/onflow/flow-go/module" - p2p "github.com/onflow/flow-go/network/p2p" - peer "github.com/libp2p/go-libp2p/core/peer" - pubsub "github.com/libp2p/go-libp2p-pubsub" routing "github.com/libp2p/go-libp2p/core/routing" - - time "time" ) // GossipSubBuilder is an autogenerated mock type for the GossipSubBuilder type @@ -29,13 +21,16 @@ type GossipSubBuilder struct { } // Build provides a mock function with given fields: _a0 -func (_m *GossipSubBuilder) Build(_a0 irrecoverable.SignalerContext) (p2p.PubSubAdapter, p2p.PeerScoreTracer, error) { +func (_m *GossipSubBuilder) Build(_a0 irrecoverable.SignalerContext) (p2p.PubSubAdapter, error) { ret := _m.Called(_a0) + if len(ret) == 0 { + panic("no return value specified for Build") + } + var r0 p2p.PubSubAdapter - var r1 p2p.PeerScoreTracer - var r2 error - if rf, ok := ret.Get(0).(func(irrecoverable.SignalerContext) (p2p.PubSubAdapter, p2p.PeerScoreTracer, error)); ok { + var r1 error + if rf, ok := ret.Get(0).(func(irrecoverable.SignalerContext) (p2p.PubSubAdapter, error)); ok { return rf(_a0) } if rf, ok := ret.Get(0).(func(irrecoverable.SignalerContext) p2p.PubSubAdapter); ok { @@ -46,55 +41,37 @@ func (_m *GossipSubBuilder) Build(_a0 irrecoverable.SignalerContext) (p2p.PubSub } } - if rf, ok := ret.Get(1).(func(irrecoverable.SignalerContext) p2p.PeerScoreTracer); ok { + if rf, ok := ret.Get(1).(func(irrecoverable.SignalerContext) error); ok { r1 = rf(_a0) } else { - if ret.Get(1) != nil { - r1 = ret.Get(1).(p2p.PeerScoreTracer) - } + r1 = ret.Error(1) } - if rf, ok := ret.Get(2).(func(irrecoverable.SignalerContext) error); ok { - r2 = rf(_a0) - } else { - r2 = ret.Error(2) - } - - return r0, r1, r2 -} - -// SetAppSpecificScoreParams provides a mock function with given fields: _a0 -func (_m *GossipSubBuilder) SetAppSpecificScoreParams(_a0 func(peer.ID) float64) { - _m.Called(_a0) + return r0, r1 } -// SetGossipSubConfigFunc provides a mock function with given fields: _a0 -func (_m *GossipSubBuilder) SetGossipSubConfigFunc(_a0 p2p.GossipSubAdapterConfigFunc) { +// EnableGossipSubScoringWithOverride provides a mock function with given fields: _a0 +func (_m *GossipSubBuilder) EnableGossipSubScoringWithOverride(_a0 *p2p.PeerScoringConfigOverride) { _m.Called(_a0) } -// SetGossipSubFactory provides a mock function with given fields: _a0 -func (_m *GossipSubBuilder) SetGossipSubFactory(_a0 p2p.GossipSubFactoryFunc) { +// OverrideDefaultRpcInspectorFactory provides a mock function with given fields: _a0 +func (_m *GossipSubBuilder) OverrideDefaultRpcInspectorFactory(_a0 p2p.GossipSubRpcInspectorFactoryFunc) { _m.Called(_a0) } -// SetGossipSubPeerScoring provides a mock function with given fields: _a0 -func (_m *GossipSubBuilder) SetGossipSubPeerScoring(_a0 bool) { +// OverrideDefaultValidateQueueSize provides a mock function with given fields: _a0 +func (_m *GossipSubBuilder) OverrideDefaultValidateQueueSize(_a0 int) { _m.Called(_a0) } -// SetGossipSubRPCInspectorSuite provides a mock function with given fields: _a0 -func (_m *GossipSubBuilder) SetGossipSubRPCInspectorSuite(_a0 p2p.GossipSubInspectorSuite) { - _m.Called(_a0) -} - -// SetGossipSubScoreTracerInterval provides a mock function with given fields: _a0 -func (_m *GossipSubBuilder) SetGossipSubScoreTracerInterval(_a0 time.Duration) { +// SetGossipSubConfigFunc provides a mock function with given fields: _a0 +func (_m *GossipSubBuilder) SetGossipSubConfigFunc(_a0 p2p.GossipSubAdapterConfigFunc) { _m.Called(_a0) } -// SetGossipSubTracer provides a mock function with given fields: _a0 -func (_m *GossipSubBuilder) SetGossipSubTracer(_a0 p2p.PubSubTracer) { +// SetGossipSubFactory provides a mock function with given fields: _a0 +func (_m *GossipSubBuilder) SetGossipSubFactory(_a0 p2p.GossipSubFactoryFunc) { _m.Called(_a0) } @@ -103,11 +80,6 @@ func (_m *GossipSubBuilder) SetHost(_a0 host.Host) { _m.Called(_a0) } -// SetIDProvider provides a mock function with given fields: _a0 -func (_m *GossipSubBuilder) SetIDProvider(_a0 module.IdentityProvider) { - _m.Called(_a0) -} - // SetRoutingSystem provides a mock function with given fields: _a0 func (_m *GossipSubBuilder) SetRoutingSystem(_a0 routing.Routing) { _m.Called(_a0) @@ -118,18 +90,12 @@ func (_m *GossipSubBuilder) SetSubscriptionFilter(_a0 pubsub.SubscriptionFilter) _m.Called(_a0) } -// SetTopicScoreParams provides a mock function with given fields: topic, topicScoreParams -func (_m *GossipSubBuilder) SetTopicScoreParams(topic channels.Topic, topicScoreParams *pubsub.TopicScoreParams) { - _m.Called(topic, topicScoreParams) -} - -type mockConstructorTestingTNewGossipSubBuilder interface { +// NewGossipSubBuilder creates a new instance of GossipSubBuilder. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewGossipSubBuilder(t interface { mock.TestingT Cleanup(func()) -} - -// NewGossipSubBuilder creates a new instance of GossipSubBuilder. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewGossipSubBuilder(t mockConstructorTestingTNewGossipSubBuilder) *GossipSubBuilder { +}) *GossipSubBuilder { mock := &GossipSubBuilder{} mock.Mock.Test(t) diff --git a/network/p2p/mock/gossip_sub_control_metrics_observer.go b/network/p2p/mock/gossip_sub_control_metrics_observer.go deleted file mode 100644 index 333bb990c6c..00000000000 --- a/network/p2p/mock/gossip_sub_control_metrics_observer.go +++ /dev/null @@ -1,36 +0,0 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. - -package mockp2p - -import ( - mock "github.com/stretchr/testify/mock" - - peer "github.com/libp2p/go-libp2p/core/peer" - - pubsub "github.com/libp2p/go-libp2p-pubsub" -) - -// GossipSubControlMetricsObserver is an autogenerated mock type for the GossipSubControlMetricsObserver type -type GossipSubControlMetricsObserver struct { - mock.Mock -} - -// ObserveRPC provides a mock function with given fields: _a0, _a1 -func (_m *GossipSubControlMetricsObserver) ObserveRPC(_a0 peer.ID, _a1 *pubsub.RPC) { - _m.Called(_a0, _a1) -} - -type mockConstructorTestingTNewGossipSubControlMetricsObserver interface { - mock.TestingT - Cleanup(func()) -} - -// NewGossipSubControlMetricsObserver creates a new instance of GossipSubControlMetricsObserver. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewGossipSubControlMetricsObserver(t mockConstructorTestingTNewGossipSubControlMetricsObserver) *GossipSubControlMetricsObserver { - mock := &GossipSubControlMetricsObserver{} - mock.Mock.Test(t) - - t.Cleanup(func() { mock.AssertExpectations(t) }) - - return mock -} diff --git a/network/p2p/mock/gossip_sub_factory_func.go b/network/p2p/mock/gossip_sub_factory_func.go deleted file mode 100644 index 06cd0346c8c..00000000000 --- a/network/p2p/mock/gossip_sub_factory_func.go +++ /dev/null @@ -1,60 +0,0 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. - -package mockp2p - -import ( - context "context" - - host "github.com/libp2p/go-libp2p/core/host" - mock "github.com/stretchr/testify/mock" - - p2p "github.com/onflow/flow-go/network/p2p" - - zerolog "github.com/rs/zerolog" -) - -// GossipSubFactoryFunc is an autogenerated mock type for the GossipSubFactoryFunc type -type GossipSubFactoryFunc struct { - mock.Mock -} - -// Execute provides a mock function with given fields: _a0, _a1, _a2, _a3 -func (_m *GossipSubFactoryFunc) Execute(_a0 context.Context, _a1 zerolog.Logger, _a2 host.Host, _a3 p2p.PubSubAdapterConfig) (p2p.PubSubAdapter, error) { - ret := _m.Called(_a0, _a1, _a2, _a3) - - var r0 p2p.PubSubAdapter - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, zerolog.Logger, host.Host, p2p.PubSubAdapterConfig) (p2p.PubSubAdapter, error)); ok { - return rf(_a0, _a1, _a2, _a3) - } - if rf, ok := ret.Get(0).(func(context.Context, zerolog.Logger, host.Host, p2p.PubSubAdapterConfig) p2p.PubSubAdapter); ok { - r0 = rf(_a0, _a1, _a2, _a3) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(p2p.PubSubAdapter) - } - } - - if rf, ok := ret.Get(1).(func(context.Context, zerolog.Logger, host.Host, p2p.PubSubAdapterConfig) error); ok { - r1 = rf(_a0, _a1, _a2, _a3) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -type mockConstructorTestingTNewGossipSubFactoryFunc interface { - mock.TestingT - Cleanup(func()) -} - -// NewGossipSubFactoryFunc creates a new instance of GossipSubFactoryFunc. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewGossipSubFactoryFunc(t mockConstructorTestingTNewGossipSubFactoryFunc) *GossipSubFactoryFunc { - mock := &GossipSubFactoryFunc{} - mock.Mock.Test(t) - - t.Cleanup(func() { mock.AssertExpectations(t) }) - - return mock -} diff --git a/network/p2p/mock/gossip_sub_inspector_notif_distributor.go b/network/p2p/mock/gossip_sub_inspector_notif_distributor.go deleted file mode 100644 index b378c9fac2b..00000000000 --- a/network/p2p/mock/gossip_sub_inspector_notif_distributor.go +++ /dev/null @@ -1,86 +0,0 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. - -package mockp2p - -import ( - irrecoverable "github.com/onflow/flow-go/module/irrecoverable" - mock "github.com/stretchr/testify/mock" - - p2p "github.com/onflow/flow-go/network/p2p" -) - -// GossipSubInspectorNotifDistributor is an autogenerated mock type for the GossipSubInspectorNotifDistributor type -type GossipSubInspectorNotifDistributor struct { - mock.Mock -} - -// AddConsumer provides a mock function with given fields: _a0 -func (_m *GossipSubInspectorNotifDistributor) AddConsumer(_a0 p2p.GossipSubInvCtrlMsgNotifConsumer) { - _m.Called(_a0) -} - -// Distribute provides a mock function with given fields: notification -func (_m *GossipSubInspectorNotifDistributor) Distribute(notification *p2p.InvCtrlMsgNotif) error { - ret := _m.Called(notification) - - var r0 error - if rf, ok := ret.Get(0).(func(*p2p.InvCtrlMsgNotif) error); ok { - r0 = rf(notification) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// Done provides a mock function with given fields: -func (_m *GossipSubInspectorNotifDistributor) Done() <-chan struct{} { - ret := _m.Called() - - var r0 <-chan struct{} - if rf, ok := ret.Get(0).(func() <-chan struct{}); ok { - r0 = rf() - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(<-chan struct{}) - } - } - - return r0 -} - -// Ready provides a mock function with given fields: -func (_m *GossipSubInspectorNotifDistributor) Ready() <-chan struct{} { - ret := _m.Called() - - var r0 <-chan struct{} - if rf, ok := ret.Get(0).(func() <-chan struct{}); ok { - r0 = rf() - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(<-chan struct{}) - } - } - - return r0 -} - -// Start provides a mock function with given fields: _a0 -func (_m *GossipSubInspectorNotifDistributor) Start(_a0 irrecoverable.SignalerContext) { - _m.Called(_a0) -} - -type mockConstructorTestingTNewGossipSubInspectorNotifDistributor interface { - mock.TestingT - Cleanup(func()) -} - -// NewGossipSubInspectorNotifDistributor creates a new instance of GossipSubInspectorNotifDistributor. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewGossipSubInspectorNotifDistributor(t mockConstructorTestingTNewGossipSubInspectorNotifDistributor) *GossipSubInspectorNotifDistributor { - mock := &GossipSubInspectorNotifDistributor{} - mock.Mock.Test(t) - - t.Cleanup(func() { mock.AssertExpectations(t) }) - - return mock -} diff --git a/network/p2p/mock/gossip_sub_inspector_notification_distributor.go b/network/p2p/mock/gossip_sub_inspector_notification_distributor.go deleted file mode 100644 index 757cd8fa363..00000000000 --- a/network/p2p/mock/gossip_sub_inspector_notification_distributor.go +++ /dev/null @@ -1,86 +0,0 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. - -package mockp2p - -import ( - irrecoverable "github.com/onflow/flow-go/module/irrecoverable" - mock "github.com/stretchr/testify/mock" - - p2p "github.com/onflow/flow-go/network/p2p" -) - -// GossipSubInspectorNotificationDistributor is an autogenerated mock type for the GossipSubInspectorNotificationDistributor type -type GossipSubInspectorNotificationDistributor struct { - mock.Mock -} - -// AddConsumer provides a mock function with given fields: _a0 -func (_m *GossipSubInspectorNotificationDistributor) AddConsumer(_a0 p2p.GossipSubInvCtrlMsgNotifConsumer) { - _m.Called(_a0) -} - -// DistributeInvalidControlMessageNotification provides a mock function with given fields: notification -func (_m *GossipSubInspectorNotificationDistributor) Distribute(notification *p2p.InvCtrlMsgNotif) error { - ret := _m.Called(notification) - - var r0 error - if rf, ok := ret.Get(0).(func(*p2p.InvCtrlMsgNotif) error); ok { - r0 = rf(notification) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// Done provides a mock function with given fields: -func (_m *GossipSubInspectorNotificationDistributor) Done() <-chan struct{} { - ret := _m.Called() - - var r0 <-chan struct{} - if rf, ok := ret.Get(0).(func() <-chan struct{}); ok { - r0 = rf() - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(<-chan struct{}) - } - } - - return r0 -} - -// Ready provides a mock function with given fields: -func (_m *GossipSubInspectorNotificationDistributor) Ready() <-chan struct{} { - ret := _m.Called() - - var r0 <-chan struct{} - if rf, ok := ret.Get(0).(func() <-chan struct{}); ok { - r0 = rf() - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(<-chan struct{}) - } - } - - return r0 -} - -// Start provides a mock function with given fields: _a0 -func (_m *GossipSubInspectorNotificationDistributor) Start(_a0 irrecoverable.SignalerContext) { - _m.Called(_a0) -} - -type mockConstructorTestingTNewGossipSubInspectorNotificationDistributor interface { - mock.TestingT - Cleanup(func()) -} - -// NewGossipSubInspectorNotificationDistributor creates a new instance of GossipSubInspectorNotificationDistributor. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewGossipSubInspectorNotificationDistributor(t mockConstructorTestingTNewGossipSubInspectorNotificationDistributor) *GossipSubInspectorNotificationDistributor { - mock := &GossipSubInspectorNotificationDistributor{} - mock.Mock.Test(t) - - t.Cleanup(func() { mock.AssertExpectations(t) }) - - return mock -} diff --git a/network/p2p/mock/gossip_sub_inspector_suite.go b/network/p2p/mock/gossip_sub_inspector_suite.go deleted file mode 100644 index 873dfca39cf..00000000000 --- a/network/p2p/mock/gossip_sub_inspector_suite.go +++ /dev/null @@ -1,92 +0,0 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. - -package mockp2p - -import ( - irrecoverable "github.com/onflow/flow-go/module/irrecoverable" - mock "github.com/stretchr/testify/mock" - - p2p "github.com/onflow/flow-go/network/p2p" - - peer "github.com/libp2p/go-libp2p/core/peer" - - pubsub "github.com/libp2p/go-libp2p-pubsub" -) - -// GossipSubInspectorSuite is an autogenerated mock type for the GossipSubInspectorSuite type -type GossipSubInspectorSuite struct { - mock.Mock -} - -// AddInvCtrlMsgNotifConsumer provides a mock function with given fields: _a0 -func (_m *GossipSubInspectorSuite) AddInvCtrlMsgNotifConsumer(_a0 p2p.GossipSubInvCtrlMsgNotifConsumer) { - _m.Called(_a0) -} - -// Done provides a mock function with given fields: -func (_m *GossipSubInspectorSuite) Done() <-chan struct{} { - ret := _m.Called() - - var r0 <-chan struct{} - if rf, ok := ret.Get(0).(func() <-chan struct{}); ok { - r0 = rf() - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(<-chan struct{}) - } - } - - return r0 -} - -// InspectFunc provides a mock function with given fields: -func (_m *GossipSubInspectorSuite) InspectFunc() func(peer.ID, *pubsub.RPC) error { - ret := _m.Called() - - var r0 func(peer.ID, *pubsub.RPC) error - if rf, ok := ret.Get(0).(func() func(peer.ID, *pubsub.RPC) error); ok { - r0 = rf() - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(func(peer.ID, *pubsub.RPC) error) - } - } - - return r0 -} - -// Ready provides a mock function with given fields: -func (_m *GossipSubInspectorSuite) Ready() <-chan struct{} { - ret := _m.Called() - - var r0 <-chan struct{} - if rf, ok := ret.Get(0).(func() <-chan struct{}); ok { - r0 = rf() - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(<-chan struct{}) - } - } - - return r0 -} - -// Start provides a mock function with given fields: _a0 -func (_m *GossipSubInspectorSuite) Start(_a0 irrecoverable.SignalerContext) { - _m.Called(_a0) -} - -type mockConstructorTestingTNewGossipSubInspectorSuite interface { - mock.TestingT - Cleanup(func()) -} - -// NewGossipSubInspectorSuite creates a new instance of GossipSubInspectorSuite. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewGossipSubInspectorSuite(t mockConstructorTestingTNewGossipSubInspectorSuite) *GossipSubInspectorSuite { - mock := &GossipSubInspectorSuite{} - mock.Mock.Test(t) - - t.Cleanup(func() { mock.AssertExpectations(t) }) - - return mock -} diff --git a/network/p2p/mock/gossip_sub_inv_ctrl_msg_notif_consumer.go b/network/p2p/mock/gossip_sub_inv_ctrl_msg_notif_consumer.go index 56de1ef6093..ffb33f60fc8 100644 --- a/network/p2p/mock/gossip_sub_inv_ctrl_msg_notif_consumer.go +++ b/network/p2p/mock/gossip_sub_inv_ctrl_msg_notif_consumer.go @@ -1,6 +1,6 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. -package mockp2p +package mock import ( p2p "github.com/onflow/flow-go/network/p2p" @@ -17,13 +17,12 @@ func (_m *GossipSubInvCtrlMsgNotifConsumer) OnInvalidControlMessageNotification( _m.Called(_a0) } -type mockConstructorTestingTNewGossipSubInvCtrlMsgNotifConsumer interface { +// NewGossipSubInvCtrlMsgNotifConsumer creates a new instance of GossipSubInvCtrlMsgNotifConsumer. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewGossipSubInvCtrlMsgNotifConsumer(t interface { mock.TestingT Cleanup(func()) -} - -// NewGossipSubInvCtrlMsgNotifConsumer creates a new instance of GossipSubInvCtrlMsgNotifConsumer. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewGossipSubInvCtrlMsgNotifConsumer(t mockConstructorTestingTNewGossipSubInvCtrlMsgNotifConsumer) *GossipSubInvCtrlMsgNotifConsumer { +}) *GossipSubInvCtrlMsgNotifConsumer { mock := &GossipSubInvCtrlMsgNotifConsumer{} mock.Mock.Test(t) diff --git a/network/p2p/mock/gossip_sub_invalid_control_message_notification_consumer.go b/network/p2p/mock/gossip_sub_invalid_control_message_notification_consumer.go deleted file mode 100644 index 8df3aae5870..00000000000 --- a/network/p2p/mock/gossip_sub_invalid_control_message_notification_consumer.go +++ /dev/null @@ -1,33 +0,0 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. - -package mockp2p - -import ( - p2p "github.com/onflow/flow-go/network/p2p" - mock "github.com/stretchr/testify/mock" -) - -// GossipSubInvalidControlMessageNotificationConsumer is an autogenerated mock type for the GossipSubInvalidControlMessageNotificationConsumer type -type GossipSubInvalidControlMessageNotificationConsumer struct { - mock.Mock -} - -// OnInvalidControlMessageNotification provides a mock function with given fields: _a0 -func (_m *GossipSubInvalidControlMessageNotificationConsumer) OnInvalidControlMessageNotification(_a0 *p2p.InvCtrlMsgNotif) { - _m.Called(_a0) -} - -type mockConstructorTestingTNewGossipSubInvalidControlMessageNotificationConsumer interface { - mock.TestingT - Cleanup(func()) -} - -// NewGossipSubInvalidControlMessageNotificationConsumer creates a new instance of GossipSubInvalidControlMessageNotificationConsumer. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewGossipSubInvalidControlMessageNotificationConsumer(t mockConstructorTestingTNewGossipSubInvalidControlMessageNotificationConsumer) *GossipSubInvalidControlMessageNotificationConsumer { - mock := &GossipSubInvalidControlMessageNotificationConsumer{} - mock.Mock.Test(t) - - t.Cleanup(func() { mock.AssertExpectations(t) }) - - return mock -} diff --git a/network/p2p/mock/gossip_sub_rpc_inspector.go b/network/p2p/mock/gossip_sub_rpc_inspector.go index fa7453b5bc2..df010a8c657 100644 --- a/network/p2p/mock/gossip_sub_rpc_inspector.go +++ b/network/p2p/mock/gossip_sub_rpc_inspector.go @@ -1,9 +1,11 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. -package mockp2p +package mock import ( + flow "github.com/onflow/flow-go/model/flow" irrecoverable "github.com/onflow/flow-go/module/irrecoverable" + mock "github.com/stretchr/testify/mock" peer "github.com/libp2p/go-libp2p/core/peer" @@ -16,10 +18,19 @@ type GossipSubRPCInspector struct { mock.Mock } -// Done provides a mock function with given fields: +// ActiveClustersChanged provides a mock function with given fields: _a0 +func (_m *GossipSubRPCInspector) ActiveClustersChanged(_a0 flow.ChainIDList) { + _m.Called(_a0) +} + +// Done provides a mock function with no fields func (_m *GossipSubRPCInspector) Done() <-chan struct{} { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Done") + } + var r0 <-chan struct{} if rf, ok := ret.Get(0).(func() <-chan struct{}); ok { r0 = rf() @@ -36,6 +47,10 @@ func (_m *GossipSubRPCInspector) Done() <-chan struct{} { func (_m *GossipSubRPCInspector) Inspect(_a0 peer.ID, _a1 *pubsub.RPC) error { ret := _m.Called(_a0, _a1) + if len(ret) == 0 { + panic("no return value specified for Inspect") + } + var r0 error if rf, ok := ret.Get(0).(func(peer.ID, *pubsub.RPC) error); ok { r0 = rf(_a0, _a1) @@ -46,10 +61,14 @@ func (_m *GossipSubRPCInspector) Inspect(_a0 peer.ID, _a1 *pubsub.RPC) error { return r0 } -// Name provides a mock function with given fields: +// Name provides a mock function with no fields func (_m *GossipSubRPCInspector) Name() string { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Name") + } + var r0 string if rf, ok := ret.Get(0).(func() string); ok { r0 = rf() @@ -60,10 +79,14 @@ func (_m *GossipSubRPCInspector) Name() string { return r0 } -// Ready provides a mock function with given fields: +// Ready provides a mock function with no fields func (_m *GossipSubRPCInspector) Ready() <-chan struct{} { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Ready") + } + var r0 <-chan struct{} if rf, ok := ret.Get(0).(func() <-chan struct{}); ok { r0 = rf() @@ -81,13 +104,12 @@ func (_m *GossipSubRPCInspector) Start(_a0 irrecoverable.SignalerContext) { _m.Called(_a0) } -type mockConstructorTestingTNewGossipSubRPCInspector interface { +// NewGossipSubRPCInspector creates a new instance of GossipSubRPCInspector. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewGossipSubRPCInspector(t interface { mock.TestingT Cleanup(func()) -} - -// NewGossipSubRPCInspector creates a new instance of GossipSubRPCInspector. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewGossipSubRPCInspector(t mockConstructorTestingTNewGossipSubRPCInspector) *GossipSubRPCInspector { +}) *GossipSubRPCInspector { mock := &GossipSubRPCInspector{} mock.Mock.Test(t) diff --git a/network/p2p/mock/gossip_sub_spam_record_cache.go b/network/p2p/mock/gossip_sub_spam_record_cache.go index 35e674fdffb..7431f1eda1e 100644 --- a/network/p2p/mock/gossip_sub_spam_record_cache.go +++ b/network/p2p/mock/gossip_sub_spam_record_cache.go @@ -1,6 +1,6 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. -package mockp2p +package mock import ( p2p "github.com/onflow/flow-go/network/p2p" @@ -14,24 +14,44 @@ type GossipSubSpamRecordCache struct { mock.Mock } -// Add provides a mock function with given fields: peerId, record -func (_m *GossipSubSpamRecordCache) Add(peerId peer.ID, record p2p.GossipSubSpamRecord) bool { - ret := _m.Called(peerId, record) +// Adjust provides a mock function with given fields: peerID, updateFunc +func (_m *GossipSubSpamRecordCache) Adjust(peerID peer.ID, updateFunc p2p.UpdateFunction) (*p2p.GossipSubSpamRecord, error) { + ret := _m.Called(peerID, updateFunc) - var r0 bool - if rf, ok := ret.Get(0).(func(peer.ID, p2p.GossipSubSpamRecord) bool); ok { - r0 = rf(peerId, record) + if len(ret) == 0 { + panic("no return value specified for Adjust") + } + + var r0 *p2p.GossipSubSpamRecord + var r1 error + if rf, ok := ret.Get(0).(func(peer.ID, p2p.UpdateFunction) (*p2p.GossipSubSpamRecord, error)); ok { + return rf(peerID, updateFunc) + } + if rf, ok := ret.Get(0).(func(peer.ID, p2p.UpdateFunction) *p2p.GossipSubSpamRecord); ok { + r0 = rf(peerID, updateFunc) } else { - r0 = ret.Get(0).(bool) + if ret.Get(0) != nil { + r0 = ret.Get(0).(*p2p.GossipSubSpamRecord) + } } - return r0 + if rf, ok := ret.Get(1).(func(peer.ID, p2p.UpdateFunction) error); ok { + r1 = rf(peerID, updateFunc) + } else { + r1 = ret.Error(1) + } + + return r0, r1 } // Get provides a mock function with given fields: peerID func (_m *GossipSubSpamRecordCache) Get(peerID peer.ID) (*p2p.GossipSubSpamRecord, error, bool) { ret := _m.Called(peerID) + if len(ret) == 0 { + panic("no return value specified for Get") + } + var r0 *p2p.GossipSubSpamRecord var r1 error var r2 bool @@ -65,6 +85,10 @@ func (_m *GossipSubSpamRecordCache) Get(peerID peer.ID) (*p2p.GossipSubSpamRecor func (_m *GossipSubSpamRecordCache) Has(peerID peer.ID) bool { ret := _m.Called(peerID) + if len(ret) == 0 { + panic("no return value specified for Has") + } + var r0 bool if rf, ok := ret.Get(0).(func(peer.ID) bool); ok { r0 = rf(peerID) @@ -75,39 +99,12 @@ func (_m *GossipSubSpamRecordCache) Has(peerID peer.ID) bool { return r0 } -// Update provides a mock function with given fields: peerID, updateFunc -func (_m *GossipSubSpamRecordCache) Update(peerID peer.ID, updateFunc p2p.UpdateFunction) (*p2p.GossipSubSpamRecord, error) { - ret := _m.Called(peerID, updateFunc) - - var r0 *p2p.GossipSubSpamRecord - var r1 error - if rf, ok := ret.Get(0).(func(peer.ID, p2p.UpdateFunction) (*p2p.GossipSubSpamRecord, error)); ok { - return rf(peerID, updateFunc) - } - if rf, ok := ret.Get(0).(func(peer.ID, p2p.UpdateFunction) *p2p.GossipSubSpamRecord); ok { - r0 = rf(peerID, updateFunc) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*p2p.GossipSubSpamRecord) - } - } - - if rf, ok := ret.Get(1).(func(peer.ID, p2p.UpdateFunction) error); ok { - r1 = rf(peerID, updateFunc) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -type mockConstructorTestingTNewGossipSubSpamRecordCache interface { +// NewGossipSubSpamRecordCache creates a new instance of GossipSubSpamRecordCache. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewGossipSubSpamRecordCache(t interface { mock.TestingT Cleanup(func()) -} - -// NewGossipSubSpamRecordCache creates a new instance of GossipSubSpamRecordCache. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewGossipSubSpamRecordCache(t mockConstructorTestingTNewGossipSubSpamRecordCache) *GossipSubSpamRecordCache { +}) *GossipSubSpamRecordCache { mock := &GossipSubSpamRecordCache{} mock.Mock.Test(t) diff --git a/network/p2p/mock/id_translator.go b/network/p2p/mock/id_translator.go index 6bf13761fe1..f4d536b9f55 100644 --- a/network/p2p/mock/id_translator.go +++ b/network/p2p/mock/id_translator.go @@ -1,6 +1,6 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. -package mockp2p +package mock import ( flow "github.com/onflow/flow-go/model/flow" @@ -18,6 +18,10 @@ type IDTranslator struct { func (_m *IDTranslator) GetFlowID(_a0 peer.ID) (flow.Identifier, error) { ret := _m.Called(_a0) + if len(ret) == 0 { + panic("no return value specified for GetFlowID") + } + var r0 flow.Identifier var r1 error if rf, ok := ret.Get(0).(func(peer.ID) (flow.Identifier, error)); ok { @@ -44,6 +48,10 @@ func (_m *IDTranslator) GetFlowID(_a0 peer.ID) (flow.Identifier, error) { func (_m *IDTranslator) GetPeerID(_a0 flow.Identifier) (peer.ID, error) { ret := _m.Called(_a0) + if len(ret) == 0 { + panic("no return value specified for GetPeerID") + } + var r0 peer.ID var r1 error if rf, ok := ret.Get(0).(func(flow.Identifier) (peer.ID, error)); ok { @@ -64,13 +72,12 @@ func (_m *IDTranslator) GetPeerID(_a0 flow.Identifier) (peer.ID, error) { return r0, r1 } -type mockConstructorTestingTNewIDTranslator interface { +// NewIDTranslator creates a new instance of IDTranslator. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewIDTranslator(t interface { mock.TestingT Cleanup(func()) -} - -// NewIDTranslator creates a new instance of IDTranslator. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewIDTranslator(t mockConstructorTestingTNewIDTranslator) *IDTranslator { +}) *IDTranslator { mock := &IDTranslator{} mock.Mock.Test(t) diff --git a/network/p2p/mock/lib_p2_p_factory_func.go b/network/p2p/mock/lib_p2_p_factory_func.go deleted file mode 100644 index cde65cd1e35..00000000000 --- a/network/p2p/mock/lib_p2_p_factory_func.go +++ /dev/null @@ -1,54 +0,0 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. - -package mockp2p - -import ( - p2p "github.com/onflow/flow-go/network/p2p" - mock "github.com/stretchr/testify/mock" -) - -// LibP2PFactoryFunc is an autogenerated mock type for the LibP2PFactoryFunc type -type LibP2PFactoryFunc struct { - mock.Mock -} - -// Execute provides a mock function with given fields: -func (_m *LibP2PFactoryFunc) Execute() (p2p.LibP2PNode, error) { - ret := _m.Called() - - var r0 p2p.LibP2PNode - var r1 error - if rf, ok := ret.Get(0).(func() (p2p.LibP2PNode, error)); ok { - return rf() - } - if rf, ok := ret.Get(0).(func() p2p.LibP2PNode); ok { - r0 = rf() - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(p2p.LibP2PNode) - } - } - - if rf, ok := ret.Get(1).(func() error); ok { - r1 = rf() - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -type mockConstructorTestingTNewLibP2PFactoryFunc interface { - mock.TestingT - Cleanup(func()) -} - -// NewLibP2PFactoryFunc creates a new instance of LibP2PFactoryFunc. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewLibP2PFactoryFunc(t mockConstructorTestingTNewLibP2PFactoryFunc) *LibP2PFactoryFunc { - mock := &LibP2PFactoryFunc{} - mock.Mock.Test(t) - - t.Cleanup(func() { mock.AssertExpectations(t) }) - - return mock -} diff --git a/network/p2p/mock/lib_p2_p_node.go b/network/p2p/mock/lib_p2_p_node.go index 326b2280eca..98b497bc0e8 100644 --- a/network/p2p/mock/lib_p2_p_node.go +++ b/network/p2p/mock/lib_p2_p_node.go @@ -1,6 +1,6 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. -package mockp2p +package mock import ( component "github.com/onflow/flow-go/module/component" @@ -8,6 +8,10 @@ import ( context "context" + corenetwork "github.com/libp2p/go-libp2p/core/network" + + flow "github.com/onflow/flow-go/model/flow" + host "github.com/libp2p/go-libp2p/core/host" irrecoverable "github.com/onflow/flow-go/module/irrecoverable" @@ -16,7 +20,7 @@ import ( mock "github.com/stretchr/testify/mock" - network "github.com/libp2p/go-libp2p/core/network" + network "github.com/onflow/flow-go/network" p2p "github.com/onflow/flow-go/network/p2p" @@ -34,10 +38,19 @@ type LibP2PNode struct { mock.Mock } -// AddPeer provides a mock function with given fields: ctx, peerInfo -func (_m *LibP2PNode) AddPeer(ctx context.Context, peerInfo peer.AddrInfo) error { +// ActiveClustersChanged provides a mock function with given fields: _a0 +func (_m *LibP2PNode) ActiveClustersChanged(_a0 flow.ChainIDList) { + _m.Called(_a0) +} + +// ConnectToPeer provides a mock function with given fields: ctx, peerInfo +func (_m *LibP2PNode) ConnectToPeer(ctx context.Context, peerInfo peer.AddrInfo) error { ret := _m.Called(ctx, peerInfo) + if len(ret) == 0 { + panic("no return value specified for ConnectToPeer") + } + var r0 error if rf, ok := ret.Get(0).(func(context.Context, peer.AddrInfo) error); ok { r0 = rf(ctx, peerInfo) @@ -48,36 +61,14 @@ func (_m *LibP2PNode) AddPeer(ctx context.Context, peerInfo peer.AddrInfo) error return r0 } -// CreateStream provides a mock function with given fields: ctx, peerID -func (_m *LibP2PNode) CreateStream(ctx context.Context, peerID peer.ID) (network.Stream, error) { - ret := _m.Called(ctx, peerID) - - var r0 network.Stream - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, peer.ID) (network.Stream, error)); ok { - return rf(ctx, peerID) - } - if rf, ok := ret.Get(0).(func(context.Context, peer.ID) network.Stream); ok { - r0 = rf(ctx, peerID) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(network.Stream) - } - } - - if rf, ok := ret.Get(1).(func(context.Context, peer.ID) error); ok { - r1 = rf(ctx, peerID) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// Done provides a mock function with given fields: +// Done provides a mock function with no fields func (_m *LibP2PNode) Done() <-chan struct{} { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Done") + } + var r0 <-chan struct{} if rf, ok := ret.Get(0).(func() <-chan struct{}); ok { r0 = rf() @@ -90,10 +81,14 @@ func (_m *LibP2PNode) Done() <-chan struct{} { return r0 } -// GetIPPort provides a mock function with given fields: +// GetIPPort provides a mock function with no fields func (_m *LibP2PNode) GetIPPort() (string, string, error) { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for GetIPPort") + } + var r0 string var r1 string var r2 error @@ -121,10 +116,34 @@ func (_m *LibP2PNode) GetIPPort() (string, string, error) { return r0, r1, r2 } +// GetLocalMeshPeers provides a mock function with given fields: topic +func (_m *LibP2PNode) GetLocalMeshPeers(topic channels.Topic) []peer.ID { + ret := _m.Called(topic) + + if len(ret) == 0 { + panic("no return value specified for GetLocalMeshPeers") + } + + var r0 []peer.ID + if rf, ok := ret.Get(0).(func(channels.Topic) []peer.ID); ok { + r0 = rf(topic) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]peer.ID) + } + } + + return r0 +} + // GetPeersForProtocol provides a mock function with given fields: pid func (_m *LibP2PNode) GetPeersForProtocol(pid protocol.ID) peer.IDSlice { ret := _m.Called(pid) + if len(ret) == 0 { + panic("no return value specified for GetPeersForProtocol") + } + var r0 peer.IDSlice if rf, ok := ret.Get(0).(func(protocol.ID) peer.IDSlice); ok { r0 = rf(pid) @@ -141,6 +160,10 @@ func (_m *LibP2PNode) GetPeersForProtocol(pid protocol.ID) peer.IDSlice { func (_m *LibP2PNode) HasSubscription(topic channels.Topic) bool { ret := _m.Called(topic) + if len(ret) == 0 { + panic("no return value specified for HasSubscription") + } + var r0 bool if rf, ok := ret.Get(0).(func(channels.Topic) bool); ok { r0 = rf(topic) @@ -151,10 +174,14 @@ func (_m *LibP2PNode) HasSubscription(topic channels.Topic) bool { return r0 } -// Host provides a mock function with given fields: +// Host provides a mock function with no fields func (_m *LibP2PNode) Host() host.Host { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Host") + } + var r0 host.Host if rf, ok := ret.Get(0).(func() host.Host); ok { r0 = rf() @@ -167,10 +194,32 @@ func (_m *LibP2PNode) Host() host.Host { return r0 } +// ID provides a mock function with no fields +func (_m *LibP2PNode) ID() peer.ID { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for ID") + } + + var r0 peer.ID + if rf, ok := ret.Get(0).(func() peer.ID); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(peer.ID) + } + + return r0 +} + // IsConnected provides a mock function with given fields: peerID func (_m *LibP2PNode) IsConnected(peerID peer.ID) (bool, error) { ret := _m.Called(peerID) + if len(ret) == 0 { + panic("no return value specified for IsConnected") + } + var r0 bool var r1 error if rf, ok := ret.Get(0).(func(peer.ID) (bool, error)); ok { @@ -191,10 +240,44 @@ func (_m *LibP2PNode) IsConnected(peerID peer.ID) (bool, error) { return r0, r1 } +// IsDisallowListed provides a mock function with given fields: peerId +func (_m *LibP2PNode) IsDisallowListed(peerId peer.ID) ([]network.DisallowListedCause, bool) { + ret := _m.Called(peerId) + + if len(ret) == 0 { + panic("no return value specified for IsDisallowListed") + } + + var r0 []network.DisallowListedCause + var r1 bool + if rf, ok := ret.Get(0).(func(peer.ID) ([]network.DisallowListedCause, bool)); ok { + return rf(peerId) + } + if rf, ok := ret.Get(0).(func(peer.ID) []network.DisallowListedCause); ok { + r0 = rf(peerId) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]network.DisallowListedCause) + } + } + + if rf, ok := ret.Get(1).(func(peer.ID) bool); ok { + r1 = rf(peerId) + } else { + r1 = ret.Get(1).(bool) + } + + return r0, r1 +} + // ListPeers provides a mock function with given fields: topic func (_m *LibP2PNode) ListPeers(topic string) []peer.ID { ret := _m.Called(topic) + if len(ret) == 0 { + panic("no return value specified for ListPeers") + } + var r0 []peer.ID if rf, ok := ret.Get(0).(func(string) []peer.ID); ok { r0 = rf(topic) @@ -207,10 +290,42 @@ func (_m *LibP2PNode) ListPeers(topic string) []peer.ID { return r0 } -// PeerManagerComponent provides a mock function with given fields: +// OnAllowListNotification provides a mock function with given fields: id, cause +func (_m *LibP2PNode) OnAllowListNotification(id peer.ID, cause network.DisallowListedCause) { + _m.Called(id, cause) +} + +// OnDisallowListNotification provides a mock function with given fields: id, cause +func (_m *LibP2PNode) OnDisallowListNotification(id peer.ID, cause network.DisallowListedCause) { + _m.Called(id, cause) +} + +// OpenAndWriteOnStream provides a mock function with given fields: ctx, peerID, protectionTag, writingLogic +func (_m *LibP2PNode) OpenAndWriteOnStream(ctx context.Context, peerID peer.ID, protectionTag string, writingLogic func(corenetwork.Stream) error) error { + ret := _m.Called(ctx, peerID, protectionTag, writingLogic) + + if len(ret) == 0 { + panic("no return value specified for OpenAndWriteOnStream") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, peer.ID, string, func(corenetwork.Stream) error) error); ok { + r0 = rf(ctx, peerID, protectionTag, writingLogic) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// PeerManagerComponent provides a mock function with no fields func (_m *LibP2PNode) PeerManagerComponent() component.Component { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for PeerManagerComponent") + } + var r0 component.Component if rf, ok := ret.Get(0).(func() component.Component); ok { r0 = rf() @@ -223,15 +338,15 @@ func (_m *LibP2PNode) PeerManagerComponent() component.Component { return r0 } -// PeerScoreExposer provides a mock function with given fields: -func (_m *LibP2PNode) PeerScoreExposer() (p2p.PeerScoreExposer, bool) { +// PeerScoreExposer provides a mock function with no fields +func (_m *LibP2PNode) PeerScoreExposer() p2p.PeerScoreExposer { ret := _m.Called() - var r0 p2p.PeerScoreExposer - var r1 bool - if rf, ok := ret.Get(0).(func() (p2p.PeerScoreExposer, bool)); ok { - return rf() + if len(ret) == 0 { + panic("no return value specified for PeerScoreExposer") } + + var r0 p2p.PeerScoreExposer if rf, ok := ret.Get(0).(func() p2p.PeerScoreExposer); ok { r0 = rf() } else { @@ -240,22 +355,20 @@ func (_m *LibP2PNode) PeerScoreExposer() (p2p.PeerScoreExposer, bool) { } } - if rf, ok := ret.Get(1).(func() bool); ok { - r1 = rf() - } else { - r1 = ret.Get(1).(bool) - } - - return r0, r1 + return r0 } -// Publish provides a mock function with given fields: ctx, topic, data -func (_m *LibP2PNode) Publish(ctx context.Context, topic channels.Topic, data []byte) error { - ret := _m.Called(ctx, topic, data) +// Publish provides a mock function with given fields: ctx, messageScope +func (_m *LibP2PNode) Publish(ctx context.Context, messageScope network.OutgoingMessageScope) error { + ret := _m.Called(ctx, messageScope) + + if len(ret) == 0 { + panic("no return value specified for Publish") + } var r0 error - if rf, ok := ret.Get(0).(func(context.Context, channels.Topic, []byte) error); ok { - r0 = rf(ctx, topic, data) + if rf, ok := ret.Get(0).(func(context.Context, network.OutgoingMessageScope) error); ok { + r0 = rf(ctx, messageScope) } else { r0 = ret.Error(0) } @@ -263,10 +376,14 @@ func (_m *LibP2PNode) Publish(ctx context.Context, topic channels.Topic, data [] return r0 } -// Ready provides a mock function with given fields: +// Ready provides a mock function with no fields func (_m *LibP2PNode) Ready() <-chan struct{} { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Ready") + } + var r0 <-chan struct{} if rf, ok := ret.Get(0).(func() <-chan struct{}); ok { r0 = rf() @@ -283,6 +400,10 @@ func (_m *LibP2PNode) Ready() <-chan struct{} { func (_m *LibP2PNode) RemovePeer(peerID peer.ID) error { ret := _m.Called(peerID) + if len(ret) == 0 { + panic("no return value specified for RemovePeer") + } + var r0 error if rf, ok := ret.Get(0).(func(peer.ID) error); ok { r0 = rf(peerID) @@ -293,15 +414,19 @@ func (_m *LibP2PNode) RemovePeer(peerID peer.ID) error { return r0 } -// RequestPeerUpdate provides a mock function with given fields: +// RequestPeerUpdate provides a mock function with no fields func (_m *LibP2PNode) RequestPeerUpdate() { _m.Called() } -// Routing provides a mock function with given fields: +// Routing provides a mock function with no fields func (_m *LibP2PNode) Routing() routing.Routing { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Routing") + } + var r0 routing.Routing if rf, ok := ret.Get(0).(func() routing.Routing); ok { r0 = rf() @@ -314,10 +439,14 @@ func (_m *LibP2PNode) Routing() routing.Routing { return r0 } -// RoutingTable provides a mock function with given fields: +// RoutingTable provides a mock function with no fields func (_m *LibP2PNode) RoutingTable() *kbucket.RoutingTable { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for RoutingTable") + } + var r0 *kbucket.RoutingTable if rf, ok := ret.Get(0).(func() *kbucket.RoutingTable); ok { r0 = rf() @@ -335,19 +464,27 @@ func (_m *LibP2PNode) SetComponentManager(cm *component.ComponentManager) { _m.Called(cm) } -// SetPeerScoreExposer provides a mock function with given fields: e -func (_m *LibP2PNode) SetPeerScoreExposer(e p2p.PeerScoreExposer) { - _m.Called(e) -} - // SetPubSub provides a mock function with given fields: ps func (_m *LibP2PNode) SetPubSub(ps p2p.PubSubAdapter) { _m.Called(ps) } // SetRouting provides a mock function with given fields: r -func (_m *LibP2PNode) SetRouting(r routing.Routing) { - _m.Called(r) +func (_m *LibP2PNode) SetRouting(r routing.Routing) error { + ret := _m.Called(r) + + if len(ret) == 0 { + panic("no return value specified for SetRouting") + } + + var r0 error + if rf, ok := ret.Get(0).(func(routing.Routing) error); ok { + r0 = rf(r) + } else { + r0 = ret.Error(0) + } + + return r0 } // SetUnicastManager provides a mock function with given fields: uniMgr @@ -360,10 +497,14 @@ func (_m *LibP2PNode) Start(ctx irrecoverable.SignalerContext) { _m.Called(ctx) } -// Stop provides a mock function with given fields: +// Stop provides a mock function with no fields func (_m *LibP2PNode) Stop() error { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Stop") + } + var r0 error if rf, ok := ret.Get(0).(func() error); ok { r0 = rf() @@ -378,6 +519,10 @@ func (_m *LibP2PNode) Stop() error { func (_m *LibP2PNode) Subscribe(topic channels.Topic, topicValidator p2p.TopicValidatorFunc) (p2p.Subscription, error) { ret := _m.Called(topic, topicValidator) + if len(ret) == 0 { + panic("no return value specified for Subscribe") + } + var r0 p2p.Subscription var r1 error if rf, ok := ret.Get(0).(func(channels.Topic, p2p.TopicValidatorFunc) (p2p.Subscription, error)); ok { @@ -400,10 +545,14 @@ func (_m *LibP2PNode) Subscribe(topic channels.Topic, topicValidator p2p.TopicVa return r0, r1 } -// UnSubscribe provides a mock function with given fields: topic -func (_m *LibP2PNode) UnSubscribe(topic channels.Topic) error { +// Unsubscribe provides a mock function with given fields: topic +func (_m *LibP2PNode) Unsubscribe(topic channels.Topic) error { ret := _m.Called(topic) + if len(ret) == 0 { + panic("no return value specified for Unsubscribe") + } + var r0 error if rf, ok := ret.Get(0).(func(channels.Topic) error); ok { r0 = rf(topic) @@ -415,11 +564,15 @@ func (_m *LibP2PNode) UnSubscribe(topic channels.Topic) error { } // WithDefaultUnicastProtocol provides a mock function with given fields: defaultHandler, preferred -func (_m *LibP2PNode) WithDefaultUnicastProtocol(defaultHandler network.StreamHandler, preferred []protocols.ProtocolName) error { +func (_m *LibP2PNode) WithDefaultUnicastProtocol(defaultHandler corenetwork.StreamHandler, preferred []protocols.ProtocolName) error { ret := _m.Called(defaultHandler, preferred) + if len(ret) == 0 { + panic("no return value specified for WithDefaultUnicastProtocol") + } + var r0 error - if rf, ok := ret.Get(0).(func(network.StreamHandler, []protocols.ProtocolName) error); ok { + if rf, ok := ret.Get(0).(func(corenetwork.StreamHandler, []protocols.ProtocolName) error); ok { r0 = rf(defaultHandler, preferred) } else { r0 = ret.Error(0) @@ -433,13 +586,12 @@ func (_m *LibP2PNode) WithPeersProvider(peersProvider p2p.PeersProvider) { _m.Called(peersProvider) } -type mockConstructorTestingTNewLibP2PNode interface { +// NewLibP2PNode creates a new instance of LibP2PNode. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewLibP2PNode(t interface { mock.TestingT Cleanup(func()) -} - -// NewLibP2PNode creates a new instance of LibP2PNode. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewLibP2PNode(t mockConstructorTestingTNewLibP2PNode) *LibP2PNode { +}) *LibP2PNode { mock := &LibP2PNode{} mock.Mock.Test(t) diff --git a/network/p2p/mock/network_opt_function.go b/network/p2p/mock/network_opt_function.go deleted file mode 100644 index 50048811456..00000000000 --- a/network/p2p/mock/network_opt_function.go +++ /dev/null @@ -1,33 +0,0 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. - -package mockp2p - -import ( - p2p "github.com/onflow/flow-go/network/p2p" - mock "github.com/stretchr/testify/mock" -) - -// NetworkOptFunction is an autogenerated mock type for the NetworkOptFunction type -type NetworkOptFunction struct { - mock.Mock -} - -// Execute provides a mock function with given fields: _a0 -func (_m *NetworkOptFunction) Execute(_a0 *p2p.Network) { - _m.Called(_a0) -} - -type mockConstructorTestingTNewNetworkOptFunction interface { - mock.TestingT - Cleanup(func()) -} - -// NewNetworkOptFunction creates a new instance of NetworkOptFunction. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewNetworkOptFunction(t mockConstructorTestingTNewNetworkOptFunction) *NetworkOptFunction { - mock := &NetworkOptFunction{} - mock.Mock.Test(t) - - t.Cleanup(func() { mock.AssertExpectations(t) }) - - return mock -} diff --git a/network/p2p/mock/node_block_list_consumer.go b/network/p2p/mock/node_block_list_consumer.go deleted file mode 100644 index 41a5b05751d..00000000000 --- a/network/p2p/mock/node_block_list_consumer.go +++ /dev/null @@ -1,33 +0,0 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. - -package mockp2p - -import ( - flow "github.com/onflow/flow-go/model/flow" - mock "github.com/stretchr/testify/mock" -) - -// NodeBlockListConsumer is an autogenerated mock type for the NodeBlockListConsumer type -type NodeBlockListConsumer struct { - mock.Mock -} - -// OnNodeBlockListUpdate provides a mock function with given fields: list -func (_m *NodeBlockListConsumer) OnNodeDisallowListUpdate(list flow.IdentifierList) { - _m.Called(list) -} - -type mockConstructorTestingTNewNodeBlockListConsumer interface { - mock.TestingT - Cleanup(func()) -} - -// NewNodeBlockListConsumer creates a new instance of NodeBlockListConsumer. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewNodeBlockListConsumer(t mockConstructorTestingTNewNodeBlockListConsumer) *NodeBlockListConsumer { - mock := &NodeBlockListConsumer{} - mock.Mock.Test(t) - - t.Cleanup(func() { mock.AssertExpectations(t) }) - - return mock -} diff --git a/network/p2p/mock/node_builder.go b/network/p2p/mock/node_builder.go index 70184e2ecaf..a8c1595ee39 100644 --- a/network/p2p/mock/node_builder.go +++ b/network/p2p/mock/node_builder.go @@ -1,6 +1,6 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. -package mockp2p +package mock import ( context "context" @@ -13,8 +13,6 @@ import ( mock "github.com/stretchr/testify/mock" - module "github.com/onflow/flow-go/module" - network "github.com/libp2p/go-libp2p/core/network" p2p "github.com/onflow/flow-go/network/p2p" @@ -22,8 +20,6 @@ import ( pubsub "github.com/libp2p/go-libp2p-pubsub" routing "github.com/libp2p/go-libp2p/core/routing" - - time "time" ) // NodeBuilder is an autogenerated mock type for the NodeBuilder type @@ -31,10 +27,14 @@ type NodeBuilder struct { mock.Mock } -// Build provides a mock function with given fields: +// Build provides a mock function with no fields func (_m *NodeBuilder) Build() (p2p.LibP2PNode, error) { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Build") + } + var r0 p2p.LibP2PNode var r1 error if rf, ok := ret.Get(0).(func() (p2p.LibP2PNode, error)); ok { @@ -57,44 +57,16 @@ func (_m *NodeBuilder) Build() (p2p.LibP2PNode, error) { return r0, r1 } -// EnableGossipSubPeerScoring provides a mock function with given fields: _a0, _a1 -func (_m *NodeBuilder) EnableGossipSubPeerScoring(_a0 module.IdentityProvider, _a1 *p2p.PeerScoringConfig) p2p.NodeBuilder { - ret := _m.Called(_a0, _a1) - - var r0 p2p.NodeBuilder - if rf, ok := ret.Get(0).(func(module.IdentityProvider, *p2p.PeerScoringConfig) p2p.NodeBuilder); ok { - r0 = rf(_a0, _a1) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(p2p.NodeBuilder) - } - } - - return r0 -} - -// SetBasicResolver provides a mock function with given fields: _a0 -func (_m *NodeBuilder) SetBasicResolver(_a0 madns.BasicResolver) p2p.NodeBuilder { +// OverrideDefaultRpcInspectorFactory provides a mock function with given fields: _a0 +func (_m *NodeBuilder) OverrideDefaultRpcInspectorFactory(_a0 p2p.GossipSubRpcInspectorFactoryFunc) p2p.NodeBuilder { ret := _m.Called(_a0) - var r0 p2p.NodeBuilder - if rf, ok := ret.Get(0).(func(madns.BasicResolver) p2p.NodeBuilder); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(p2p.NodeBuilder) - } + if len(ret) == 0 { + panic("no return value specified for OverrideDefaultRpcInspectorFactory") } - return r0 -} - -// SetConnectionGater provides a mock function with given fields: _a0 -func (_m *NodeBuilder) SetConnectionGater(_a0 connmgr.ConnectionGater) p2p.NodeBuilder { - ret := _m.Called(_a0) - var r0 p2p.NodeBuilder - if rf, ok := ret.Get(0).(func(connmgr.ConnectionGater) p2p.NodeBuilder); ok { + if rf, ok := ret.Get(0).(func(p2p.GossipSubRpcInspectorFactoryFunc) p2p.NodeBuilder); ok { r0 = rf(_a0) } else { if ret.Get(0) != nil { @@ -105,28 +77,16 @@ func (_m *NodeBuilder) SetConnectionGater(_a0 connmgr.ConnectionGater) p2p.NodeB return r0 } -// SetConnectionManager provides a mock function with given fields: _a0 -func (_m *NodeBuilder) SetConnectionManager(_a0 connmgr.ConnManager) p2p.NodeBuilder { +// OverrideDefaultValidateQueueSize provides a mock function with given fields: _a0 +func (_m *NodeBuilder) OverrideDefaultValidateQueueSize(_a0 int) p2p.NodeBuilder { ret := _m.Called(_a0) - var r0 p2p.NodeBuilder - if rf, ok := ret.Get(0).(func(connmgr.ConnManager) p2p.NodeBuilder); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(p2p.NodeBuilder) - } + if len(ret) == 0 { + panic("no return value specified for OverrideDefaultValidateQueueSize") } - return r0 -} - -// SetCreateNode provides a mock function with given fields: _a0 -func (_m *NodeBuilder) SetCreateNode(_a0 p2p.CreateNodeFunc) p2p.NodeBuilder { - ret := _m.Called(_a0) - var r0 p2p.NodeBuilder - if rf, ok := ret.Get(0).(func(p2p.CreateNodeFunc) p2p.NodeBuilder); ok { + if rf, ok := ret.Get(0).(func(int) p2p.NodeBuilder); ok { r0 = rf(_a0) } else { if ret.Get(0) != nil { @@ -137,10 +97,14 @@ func (_m *NodeBuilder) SetCreateNode(_a0 p2p.CreateNodeFunc) p2p.NodeBuilder { return r0 } -// SetGossipSubFactory provides a mock function with given fields: _a0, _a1 -func (_m *NodeBuilder) SetGossipSubFactory(_a0 p2p.GossipSubFactoryFunc, _a1 p2p.GossipSubAdapterConfigFunc) p2p.NodeBuilder { +// OverrideGossipSubFactory provides a mock function with given fields: _a0, _a1 +func (_m *NodeBuilder) OverrideGossipSubFactory(_a0 p2p.GossipSubFactoryFunc, _a1 p2p.GossipSubAdapterConfigFunc) p2p.NodeBuilder { ret := _m.Called(_a0, _a1) + if len(ret) == 0 { + panic("no return value specified for OverrideGossipSubFactory") + } + var r0 p2p.NodeBuilder if rf, ok := ret.Get(0).(func(p2p.GossipSubFactoryFunc, p2p.GossipSubAdapterConfigFunc) p2p.NodeBuilder); ok { r0 = rf(_a0, _a1) @@ -153,12 +117,16 @@ func (_m *NodeBuilder) SetGossipSubFactory(_a0 p2p.GossipSubFactoryFunc, _a1 p2p return r0 } -// SetGossipSubRpcInspectorSuite provides a mock function with given fields: _a0 -func (_m *NodeBuilder) SetGossipSubRpcInspectorSuite(_a0 p2p.GossipSubInspectorSuite) p2p.NodeBuilder { +// OverrideGossipSubScoringConfig provides a mock function with given fields: _a0 +func (_m *NodeBuilder) OverrideGossipSubScoringConfig(_a0 *p2p.PeerScoringConfigOverride) p2p.NodeBuilder { ret := _m.Called(_a0) + if len(ret) == 0 { + panic("no return value specified for OverrideGossipSubScoringConfig") + } + var r0 p2p.NodeBuilder - if rf, ok := ret.Get(0).(func(p2p.GossipSubInspectorSuite) p2p.NodeBuilder); ok { + if rf, ok := ret.Get(0).(func(*p2p.PeerScoringConfigOverride) p2p.NodeBuilder); ok { r0 = rf(_a0) } else { if ret.Get(0) != nil { @@ -169,12 +137,16 @@ func (_m *NodeBuilder) SetGossipSubRpcInspectorSuite(_a0 p2p.GossipSubInspectorS return r0 } -// SetGossipSubScoreTracerInterval provides a mock function with given fields: _a0 -func (_m *NodeBuilder) SetGossipSubScoreTracerInterval(_a0 time.Duration) p2p.NodeBuilder { +// OverrideNodeConstructor provides a mock function with given fields: _a0 +func (_m *NodeBuilder) OverrideNodeConstructor(_a0 p2p.NodeConstructor) p2p.NodeBuilder { ret := _m.Called(_a0) + if len(ret) == 0 { + panic("no return value specified for OverrideNodeConstructor") + } + var r0 p2p.NodeBuilder - if rf, ok := ret.Get(0).(func(time.Duration) p2p.NodeBuilder); ok { + if rf, ok := ret.Get(0).(func(p2p.NodeConstructor) p2p.NodeBuilder); ok { r0 = rf(_a0) } else { if ret.Get(0) != nil { @@ -185,12 +157,16 @@ func (_m *NodeBuilder) SetGossipSubScoreTracerInterval(_a0 time.Duration) p2p.No return r0 } -// SetGossipSubTracer provides a mock function with given fields: _a0 -func (_m *NodeBuilder) SetGossipSubTracer(_a0 p2p.PubSubTracer) p2p.NodeBuilder { +// SetBasicResolver provides a mock function with given fields: _a0 +func (_m *NodeBuilder) SetBasicResolver(_a0 madns.BasicResolver) p2p.NodeBuilder { ret := _m.Called(_a0) + if len(ret) == 0 { + panic("no return value specified for SetBasicResolver") + } + var r0 p2p.NodeBuilder - if rf, ok := ret.Get(0).(func(p2p.PubSubTracer) p2p.NodeBuilder); ok { + if rf, ok := ret.Get(0).(func(madns.BasicResolver) p2p.NodeBuilder); ok { r0 = rf(_a0) } else { if ret.Get(0) != nil { @@ -201,13 +177,17 @@ func (_m *NodeBuilder) SetGossipSubTracer(_a0 p2p.PubSubTracer) p2p.NodeBuilder return r0 } -// SetPeerManagerOptions provides a mock function with given fields: _a0, _a1 -func (_m *NodeBuilder) SetPeerManagerOptions(_a0 bool, _a1 time.Duration) p2p.NodeBuilder { - ret := _m.Called(_a0, _a1) +// SetConnectionGater provides a mock function with given fields: _a0 +func (_m *NodeBuilder) SetConnectionGater(_a0 p2p.ConnectionGater) p2p.NodeBuilder { + ret := _m.Called(_a0) + + if len(ret) == 0 { + panic("no return value specified for SetConnectionGater") + } var r0 p2p.NodeBuilder - if rf, ok := ret.Get(0).(func(bool, time.Duration) p2p.NodeBuilder); ok { - r0 = rf(_a0, _a1) + if rf, ok := ret.Get(0).(func(p2p.ConnectionGater) p2p.NodeBuilder); ok { + r0 = rf(_a0) } else { if ret.Get(0) != nil { r0 = ret.Get(0).(p2p.NodeBuilder) @@ -217,12 +197,16 @@ func (_m *NodeBuilder) SetPeerManagerOptions(_a0 bool, _a1 time.Duration) p2p.No return r0 } -// SetRateLimiterDistributor provides a mock function with given fields: _a0 -func (_m *NodeBuilder) SetRateLimiterDistributor(_a0 p2p.UnicastRateLimiterDistributor) p2p.NodeBuilder { +// SetConnectionManager provides a mock function with given fields: _a0 +func (_m *NodeBuilder) SetConnectionManager(_a0 connmgr.ConnManager) p2p.NodeBuilder { ret := _m.Called(_a0) + if len(ret) == 0 { + panic("no return value specified for SetConnectionManager") + } + var r0 p2p.NodeBuilder - if rf, ok := ret.Get(0).(func(p2p.UnicastRateLimiterDistributor) p2p.NodeBuilder); ok { + if rf, ok := ret.Get(0).(func(connmgr.ConnManager) p2p.NodeBuilder); ok { r0 = rf(_a0) } else { if ret.Get(0) != nil { @@ -237,6 +221,10 @@ func (_m *NodeBuilder) SetRateLimiterDistributor(_a0 p2p.UnicastRateLimiterDistr func (_m *NodeBuilder) SetResourceManager(_a0 network.ResourceManager) p2p.NodeBuilder { ret := _m.Called(_a0) + if len(ret) == 0 { + panic("no return value specified for SetResourceManager") + } + var r0 p2p.NodeBuilder if rf, ok := ret.Get(0).(func(network.ResourceManager) p2p.NodeBuilder); ok { r0 = rf(_a0) @@ -253,24 +241,12 @@ func (_m *NodeBuilder) SetResourceManager(_a0 network.ResourceManager) p2p.NodeB func (_m *NodeBuilder) SetRoutingSystem(_a0 func(context.Context, host.Host) (routing.Routing, error)) p2p.NodeBuilder { ret := _m.Called(_a0) - var r0 p2p.NodeBuilder - if rf, ok := ret.Get(0).(func(func(context.Context, host.Host) (routing.Routing, error)) p2p.NodeBuilder); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(p2p.NodeBuilder) - } + if len(ret) == 0 { + panic("no return value specified for SetRoutingSystem") } - return r0 -} - -// SetStreamCreationRetryInterval provides a mock function with given fields: _a0 -func (_m *NodeBuilder) SetStreamCreationRetryInterval(_a0 time.Duration) p2p.NodeBuilder { - ret := _m.Called(_a0) - var r0 p2p.NodeBuilder - if rf, ok := ret.Get(0).(func(time.Duration) p2p.NodeBuilder); ok { + if rf, ok := ret.Get(0).(func(func(context.Context, host.Host) (routing.Routing, error)) p2p.NodeBuilder); ok { r0 = rf(_a0) } else { if ret.Get(0) != nil { @@ -285,6 +261,10 @@ func (_m *NodeBuilder) SetStreamCreationRetryInterval(_a0 time.Duration) p2p.Nod func (_m *NodeBuilder) SetSubscriptionFilter(_a0 pubsub.SubscriptionFilter) p2p.NodeBuilder { ret := _m.Called(_a0) + if len(ret) == 0 { + panic("no return value specified for SetSubscriptionFilter") + } + var r0 p2p.NodeBuilder if rf, ok := ret.Get(0).(func(pubsub.SubscriptionFilter) p2p.NodeBuilder); ok { r0 = rf(_a0) @@ -297,13 +277,12 @@ func (_m *NodeBuilder) SetSubscriptionFilter(_a0 pubsub.SubscriptionFilter) p2p. return r0 } -type mockConstructorTestingTNewNodeBuilder interface { +// NewNodeBuilder creates a new instance of NodeBuilder. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewNodeBuilder(t interface { mock.TestingT Cleanup(func()) -} - -// NewNodeBuilder creates a new instance of NodeBuilder. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewNodeBuilder(t mockConstructorTestingTNewNodeBuilder) *NodeBuilder { +}) *NodeBuilder { mock := &NodeBuilder{} mock.Mock.Test(t) diff --git a/network/p2p/mock/peer_connections.go b/network/p2p/mock/peer_connections.go index 0ce59963b84..cd7b4348ecf 100644 --- a/network/p2p/mock/peer_connections.go +++ b/network/p2p/mock/peer_connections.go @@ -1,6 +1,6 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. -package mockp2p +package mock import ( mock "github.com/stretchr/testify/mock" @@ -17,6 +17,10 @@ type PeerConnections struct { func (_m *PeerConnections) IsConnected(peerID peer.ID) (bool, error) { ret := _m.Called(peerID) + if len(ret) == 0 { + panic("no return value specified for IsConnected") + } + var r0 bool var r1 error if rf, ok := ret.Get(0).(func(peer.ID) (bool, error)); ok { @@ -37,13 +41,12 @@ func (_m *PeerConnections) IsConnected(peerID peer.ID) (bool, error) { return r0, r1 } -type mockConstructorTestingTNewPeerConnections interface { +// NewPeerConnections creates a new instance of PeerConnections. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewPeerConnections(t interface { mock.TestingT Cleanup(func()) -} - -// NewPeerConnections creates a new instance of PeerConnections. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewPeerConnections(t mockConstructorTestingTNewPeerConnections) *PeerConnections { +}) *PeerConnections { mock := &PeerConnections{} mock.Mock.Test(t) diff --git a/network/p2p/mock/peer_filter.go b/network/p2p/mock/peer_filter.go deleted file mode 100644 index 52f6dbd139f..00000000000 --- a/network/p2p/mock/peer_filter.go +++ /dev/null @@ -1,43 +0,0 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. - -package mockp2p - -import ( - mock "github.com/stretchr/testify/mock" - - peer "github.com/libp2p/go-libp2p/core/peer" -) - -// PeerFilter is an autogenerated mock type for the PeerFilter type -type PeerFilter struct { - mock.Mock -} - -// Execute provides a mock function with given fields: _a0 -func (_m *PeerFilter) Execute(_a0 peer.ID) error { - ret := _m.Called(_a0) - - var r0 error - if rf, ok := ret.Get(0).(func(peer.ID) error); ok { - r0 = rf(_a0) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -type mockConstructorTestingTNewPeerFilter interface { - mock.TestingT - Cleanup(func()) -} - -// NewPeerFilter creates a new instance of PeerFilter. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewPeerFilter(t mockConstructorTestingTNewPeerFilter) *PeerFilter { - mock := &PeerFilter{} - mock.Mock.Test(t) - - t.Cleanup(func() { mock.AssertExpectations(t) }) - - return mock -} diff --git a/network/p2p/mock/peer_management.go b/network/p2p/mock/peer_management.go new file mode 100644 index 00000000000..0149b5028af --- /dev/null +++ b/network/p2p/mock/peer_management.go @@ -0,0 +1,330 @@ +// Code generated by mockery. DO NOT EDIT. + +package mock + +import ( + component "github.com/onflow/flow-go/module/component" + channels "github.com/onflow/flow-go/network/channels" + + context "context" + + corenetwork "github.com/libp2p/go-libp2p/core/network" + + host "github.com/libp2p/go-libp2p/core/host" + + kbucket "github.com/libp2p/go-libp2p-kbucket" + + mock "github.com/stretchr/testify/mock" + + network "github.com/onflow/flow-go/network" + + p2p "github.com/onflow/flow-go/network/p2p" + + peer "github.com/libp2p/go-libp2p/core/peer" + + protocol "github.com/libp2p/go-libp2p/core/protocol" + + protocols "github.com/onflow/flow-go/network/p2p/unicast/protocols" +) + +// PeerManagement is an autogenerated mock type for the PeerManagement type +type PeerManagement struct { + mock.Mock +} + +// ConnectToPeer provides a mock function with given fields: ctx, peerInfo +func (_m *PeerManagement) ConnectToPeer(ctx context.Context, peerInfo peer.AddrInfo) error { + ret := _m.Called(ctx, peerInfo) + + if len(ret) == 0 { + panic("no return value specified for ConnectToPeer") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, peer.AddrInfo) error); ok { + r0 = rf(ctx, peerInfo) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// GetIPPort provides a mock function with no fields +func (_m *PeerManagement) GetIPPort() (string, string, error) { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for GetIPPort") + } + + var r0 string + var r1 string + var r2 error + if rf, ok := ret.Get(0).(func() (string, string, error)); ok { + return rf() + } + if rf, ok := ret.Get(0).(func() string); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(string) + } + + if rf, ok := ret.Get(1).(func() string); ok { + r1 = rf() + } else { + r1 = ret.Get(1).(string) + } + + if rf, ok := ret.Get(2).(func() error); ok { + r2 = rf() + } else { + r2 = ret.Error(2) + } + + return r0, r1, r2 +} + +// GetPeersForProtocol provides a mock function with given fields: pid +func (_m *PeerManagement) GetPeersForProtocol(pid protocol.ID) peer.IDSlice { + ret := _m.Called(pid) + + if len(ret) == 0 { + panic("no return value specified for GetPeersForProtocol") + } + + var r0 peer.IDSlice + if rf, ok := ret.Get(0).(func(protocol.ID) peer.IDSlice); ok { + r0 = rf(pid) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(peer.IDSlice) + } + } + + return r0 +} + +// Host provides a mock function with no fields +func (_m *PeerManagement) Host() host.Host { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Host") + } + + var r0 host.Host + if rf, ok := ret.Get(0).(func() host.Host); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(host.Host) + } + } + + return r0 +} + +// ID provides a mock function with no fields +func (_m *PeerManagement) ID() peer.ID { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for ID") + } + + var r0 peer.ID + if rf, ok := ret.Get(0).(func() peer.ID); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(peer.ID) + } + + return r0 +} + +// ListPeers provides a mock function with given fields: topic +func (_m *PeerManagement) ListPeers(topic string) []peer.ID { + ret := _m.Called(topic) + + if len(ret) == 0 { + panic("no return value specified for ListPeers") + } + + var r0 []peer.ID + if rf, ok := ret.Get(0).(func(string) []peer.ID); ok { + r0 = rf(topic) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]peer.ID) + } + } + + return r0 +} + +// PeerManagerComponent provides a mock function with no fields +func (_m *PeerManagement) PeerManagerComponent() component.Component { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for PeerManagerComponent") + } + + var r0 component.Component + if rf, ok := ret.Get(0).(func() component.Component); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(component.Component) + } + } + + return r0 +} + +// Publish provides a mock function with given fields: ctx, messageScope +func (_m *PeerManagement) Publish(ctx context.Context, messageScope network.OutgoingMessageScope) error { + ret := _m.Called(ctx, messageScope) + + if len(ret) == 0 { + panic("no return value specified for Publish") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, network.OutgoingMessageScope) error); ok { + r0 = rf(ctx, messageScope) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// RemovePeer provides a mock function with given fields: peerID +func (_m *PeerManagement) RemovePeer(peerID peer.ID) error { + ret := _m.Called(peerID) + + if len(ret) == 0 { + panic("no return value specified for RemovePeer") + } + + var r0 error + if rf, ok := ret.Get(0).(func(peer.ID) error); ok { + r0 = rf(peerID) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// RequestPeerUpdate provides a mock function with no fields +func (_m *PeerManagement) RequestPeerUpdate() { + _m.Called() +} + +// RoutingTable provides a mock function with no fields +func (_m *PeerManagement) RoutingTable() *kbucket.RoutingTable { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for RoutingTable") + } + + var r0 *kbucket.RoutingTable + if rf, ok := ret.Get(0).(func() *kbucket.RoutingTable); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*kbucket.RoutingTable) + } + } + + return r0 +} + +// Subscribe provides a mock function with given fields: topic, topicValidator +func (_m *PeerManagement) Subscribe(topic channels.Topic, topicValidator p2p.TopicValidatorFunc) (p2p.Subscription, error) { + ret := _m.Called(topic, topicValidator) + + if len(ret) == 0 { + panic("no return value specified for Subscribe") + } + + var r0 p2p.Subscription + var r1 error + if rf, ok := ret.Get(0).(func(channels.Topic, p2p.TopicValidatorFunc) (p2p.Subscription, error)); ok { + return rf(topic, topicValidator) + } + if rf, ok := ret.Get(0).(func(channels.Topic, p2p.TopicValidatorFunc) p2p.Subscription); ok { + r0 = rf(topic, topicValidator) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(p2p.Subscription) + } + } + + if rf, ok := ret.Get(1).(func(channels.Topic, p2p.TopicValidatorFunc) error); ok { + r1 = rf(topic, topicValidator) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Unsubscribe provides a mock function with given fields: topic +func (_m *PeerManagement) Unsubscribe(topic channels.Topic) error { + ret := _m.Called(topic) + + if len(ret) == 0 { + panic("no return value specified for Unsubscribe") + } + + var r0 error + if rf, ok := ret.Get(0).(func(channels.Topic) error); ok { + r0 = rf(topic) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// WithDefaultUnicastProtocol provides a mock function with given fields: defaultHandler, preferred +func (_m *PeerManagement) WithDefaultUnicastProtocol(defaultHandler corenetwork.StreamHandler, preferred []protocols.ProtocolName) error { + ret := _m.Called(defaultHandler, preferred) + + if len(ret) == 0 { + panic("no return value specified for WithDefaultUnicastProtocol") + } + + var r0 error + if rf, ok := ret.Get(0).(func(corenetwork.StreamHandler, []protocols.ProtocolName) error); ok { + r0 = rf(defaultHandler, preferred) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// WithPeersProvider provides a mock function with given fields: peersProvider +func (_m *PeerManagement) WithPeersProvider(peersProvider p2p.PeersProvider) { + _m.Called(peersProvider) +} + +// NewPeerManagement creates a new instance of PeerManagement. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewPeerManagement(t interface { + mock.TestingT + Cleanup(func()) +}) *PeerManagement { + mock := &PeerManagement{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/network/p2p/mock/peer_manager.go b/network/p2p/mock/peer_manager.go index 6200b1d837c..df48e1c28e0 100644 --- a/network/p2p/mock/peer_manager.go +++ b/network/p2p/mock/peer_manager.go @@ -1,6 +1,6 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. -package mockp2p +package mock import ( context "context" @@ -18,10 +18,14 @@ type PeerManager struct { mock.Mock } -// Done provides a mock function with given fields: +// Done provides a mock function with no fields func (_m *PeerManager) Done() <-chan struct{} { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Done") + } + var r0 <-chan struct{} if rf, ok := ret.Get(0).(func() <-chan struct{}); ok { r0 = rf() @@ -44,10 +48,14 @@ func (_m *PeerManager) OnRateLimitedPeer(pid peer.ID, role string, msgType strin _m.Called(pid, role, msgType, topic, reason) } -// Ready provides a mock function with given fields: +// Ready provides a mock function with no fields func (_m *PeerManager) Ready() <-chan struct{} { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Ready") + } + var r0 <-chan struct{} if rf, ok := ret.Get(0).(func() <-chan struct{}); ok { r0 = rf() @@ -60,7 +68,7 @@ func (_m *PeerManager) Ready() <-chan struct{} { return r0 } -// RequestPeerUpdate provides a mock function with given fields: +// RequestPeerUpdate provides a mock function with no fields func (_m *PeerManager) RequestPeerUpdate() { _m.Called() } @@ -75,13 +83,12 @@ func (_m *PeerManager) Start(_a0 irrecoverable.SignalerContext) { _m.Called(_a0) } -type mockConstructorTestingTNewPeerManager interface { +// NewPeerManager creates a new instance of PeerManager. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewPeerManager(t interface { mock.TestingT Cleanup(func()) -} - -// NewPeerManager creates a new instance of PeerManager. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewPeerManager(t mockConstructorTestingTNewPeerManager) *PeerManager { +}) *PeerManager { mock := &PeerManager{} mock.Mock.Test(t) diff --git a/network/p2p/mock/peer_manager_factory_func.go b/network/p2p/mock/peer_manager_factory_func.go deleted file mode 100644 index 189c9b3e282..00000000000 --- a/network/p2p/mock/peer_manager_factory_func.go +++ /dev/null @@ -1,58 +0,0 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. - -package mockp2p - -import ( - host "github.com/libp2p/go-libp2p/core/host" - mock "github.com/stretchr/testify/mock" - - p2p "github.com/onflow/flow-go/network/p2p" - - zerolog "github.com/rs/zerolog" -) - -// PeerManagerFactoryFunc is an autogenerated mock type for the PeerManagerFactoryFunc type -type PeerManagerFactoryFunc struct { - mock.Mock -} - -// Execute provides a mock function with given fields: _a0, peersProvider, logger -func (_m *PeerManagerFactoryFunc) Execute(_a0 host.Host, peersProvider p2p.PeersProvider, logger zerolog.Logger) (p2p.PeerManager, error) { - ret := _m.Called(_a0, peersProvider, logger) - - var r0 p2p.PeerManager - var r1 error - if rf, ok := ret.Get(0).(func(host.Host, p2p.PeersProvider, zerolog.Logger) (p2p.PeerManager, error)); ok { - return rf(_a0, peersProvider, logger) - } - if rf, ok := ret.Get(0).(func(host.Host, p2p.PeersProvider, zerolog.Logger) p2p.PeerManager); ok { - r0 = rf(_a0, peersProvider, logger) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(p2p.PeerManager) - } - } - - if rf, ok := ret.Get(1).(func(host.Host, p2p.PeersProvider, zerolog.Logger) error); ok { - r1 = rf(_a0, peersProvider, logger) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -type mockConstructorTestingTNewPeerManagerFactoryFunc interface { - mock.TestingT - Cleanup(func()) -} - -// NewPeerManagerFactoryFunc creates a new instance of PeerManagerFactoryFunc. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewPeerManagerFactoryFunc(t mockConstructorTestingTNewPeerManagerFactoryFunc) *PeerManagerFactoryFunc { - mock := &PeerManagerFactoryFunc{} - mock.Mock.Test(t) - - t.Cleanup(func() { mock.AssertExpectations(t) }) - - return mock -} diff --git a/network/p2p/mock/peer_score.go b/network/p2p/mock/peer_score.go index 374d03d6749..d8fd9585bb0 100644 --- a/network/p2p/mock/peer_score.go +++ b/network/p2p/mock/peer_score.go @@ -1,6 +1,6 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. -package mockp2p +package mock import ( p2p "github.com/onflow/flow-go/network/p2p" @@ -12,15 +12,15 @@ type PeerScore struct { mock.Mock } -// PeerScoreExposer provides a mock function with given fields: -func (_m *PeerScore) PeerScoreExposer() (p2p.PeerScoreExposer, bool) { +// PeerScoreExposer provides a mock function with no fields +func (_m *PeerScore) PeerScoreExposer() p2p.PeerScoreExposer { ret := _m.Called() - var r0 p2p.PeerScoreExposer - var r1 bool - if rf, ok := ret.Get(0).(func() (p2p.PeerScoreExposer, bool)); ok { - return rf() + if len(ret) == 0 { + panic("no return value specified for PeerScoreExposer") } + + var r0 p2p.PeerScoreExposer if rf, ok := ret.Get(0).(func() p2p.PeerScoreExposer); ok { r0 = rf() } else { @@ -29,27 +29,15 @@ func (_m *PeerScore) PeerScoreExposer() (p2p.PeerScoreExposer, bool) { } } - if rf, ok := ret.Get(1).(func() bool); ok { - r1 = rf() - } else { - r1 = ret.Get(1).(bool) - } - - return r0, r1 + return r0 } -// SetPeerScoreExposer provides a mock function with given fields: e -func (_m *PeerScore) SetPeerScoreExposer(e p2p.PeerScoreExposer) { - _m.Called(e) -} - -type mockConstructorTestingTNewPeerScore interface { +// NewPeerScore creates a new instance of PeerScore. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewPeerScore(t interface { mock.TestingT Cleanup(func()) -} - -// NewPeerScore creates a new instance of PeerScore. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewPeerScore(t mockConstructorTestingTNewPeerScore) *PeerScore { +}) *PeerScore { mock := &PeerScore{} mock.Mock.Test(t) diff --git a/network/p2p/mock/peer_score_exposer.go b/network/p2p/mock/peer_score_exposer.go index 53dc52e5367..a0d58bf7ff3 100644 --- a/network/p2p/mock/peer_score_exposer.go +++ b/network/p2p/mock/peer_score_exposer.go @@ -1,6 +1,6 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. -package mockp2p +package mock import ( p2p "github.com/onflow/flow-go/network/p2p" @@ -18,6 +18,10 @@ type PeerScoreExposer struct { func (_m *PeerScoreExposer) GetAppScore(peerID peer.ID) (float64, bool) { ret := _m.Called(peerID) + if len(ret) == 0 { + panic("no return value specified for GetAppScore") + } + var r0 float64 var r1 bool if rf, ok := ret.Get(0).(func(peer.ID) (float64, bool)); ok { @@ -42,6 +46,10 @@ func (_m *PeerScoreExposer) GetAppScore(peerID peer.ID) (float64, bool) { func (_m *PeerScoreExposer) GetBehaviourPenalty(peerID peer.ID) (float64, bool) { ret := _m.Called(peerID) + if len(ret) == 0 { + panic("no return value specified for GetBehaviourPenalty") + } + var r0 float64 var r1 bool if rf, ok := ret.Get(0).(func(peer.ID) (float64, bool)); ok { @@ -66,6 +74,10 @@ func (_m *PeerScoreExposer) GetBehaviourPenalty(peerID peer.ID) (float64, bool) func (_m *PeerScoreExposer) GetIPColocationFactor(peerID peer.ID) (float64, bool) { ret := _m.Called(peerID) + if len(ret) == 0 { + panic("no return value specified for GetIPColocationFactor") + } + var r0 float64 var r1 bool if rf, ok := ret.Get(0).(func(peer.ID) (float64, bool)); ok { @@ -90,6 +102,10 @@ func (_m *PeerScoreExposer) GetIPColocationFactor(peerID peer.ID) (float64, bool func (_m *PeerScoreExposer) GetScore(peerID peer.ID) (float64, bool) { ret := _m.Called(peerID) + if len(ret) == 0 { + panic("no return value specified for GetScore") + } + var r0 float64 var r1 bool if rf, ok := ret.Get(0).(func(peer.ID) (float64, bool)); ok { @@ -114,6 +130,10 @@ func (_m *PeerScoreExposer) GetScore(peerID peer.ID) (float64, bool) { func (_m *PeerScoreExposer) GetTopicScores(peerID peer.ID) (map[string]p2p.TopicScoreSnapshot, bool) { ret := _m.Called(peerID) + if len(ret) == 0 { + panic("no return value specified for GetTopicScores") + } + var r0 map[string]p2p.TopicScoreSnapshot var r1 bool if rf, ok := ret.Get(0).(func(peer.ID) (map[string]p2p.TopicScoreSnapshot, bool)); ok { @@ -136,13 +156,12 @@ func (_m *PeerScoreExposer) GetTopicScores(peerID peer.ID) (map[string]p2p.Topic return r0, r1 } -type mockConstructorTestingTNewPeerScoreExposer interface { +// NewPeerScoreExposer creates a new instance of PeerScoreExposer. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewPeerScoreExposer(t interface { mock.TestingT Cleanup(func()) -} - -// NewPeerScoreExposer creates a new instance of PeerScoreExposer. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewPeerScoreExposer(t mockConstructorTestingTNewPeerScoreExposer) *PeerScoreExposer { +}) *PeerScoreExposer { mock := &PeerScoreExposer{} mock.Mock.Test(t) diff --git a/network/p2p/mock/peer_score_tracer.go b/network/p2p/mock/peer_score_tracer.go index 88791c7656b..45b57b1c93f 100644 --- a/network/p2p/mock/peer_score_tracer.go +++ b/network/p2p/mock/peer_score_tracer.go @@ -1,6 +1,6 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. -package mockp2p +package mock import ( irrecoverable "github.com/onflow/flow-go/module/irrecoverable" @@ -18,10 +18,14 @@ type PeerScoreTracer struct { mock.Mock } -// Done provides a mock function with given fields: +// Done provides a mock function with no fields func (_m *PeerScoreTracer) Done() <-chan struct{} { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Done") + } + var r0 <-chan struct{} if rf, ok := ret.Get(0).(func() <-chan struct{}); ok { r0 = rf() @@ -38,6 +42,10 @@ func (_m *PeerScoreTracer) Done() <-chan struct{} { func (_m *PeerScoreTracer) GetAppScore(peerID peer.ID) (float64, bool) { ret := _m.Called(peerID) + if len(ret) == 0 { + panic("no return value specified for GetAppScore") + } + var r0 float64 var r1 bool if rf, ok := ret.Get(0).(func(peer.ID) (float64, bool)); ok { @@ -62,6 +70,10 @@ func (_m *PeerScoreTracer) GetAppScore(peerID peer.ID) (float64, bool) { func (_m *PeerScoreTracer) GetBehaviourPenalty(peerID peer.ID) (float64, bool) { ret := _m.Called(peerID) + if len(ret) == 0 { + panic("no return value specified for GetBehaviourPenalty") + } + var r0 float64 var r1 bool if rf, ok := ret.Get(0).(func(peer.ID) (float64, bool)); ok { @@ -86,6 +98,10 @@ func (_m *PeerScoreTracer) GetBehaviourPenalty(peerID peer.ID) (float64, bool) { func (_m *PeerScoreTracer) GetIPColocationFactor(peerID peer.ID) (float64, bool) { ret := _m.Called(peerID) + if len(ret) == 0 { + panic("no return value specified for GetIPColocationFactor") + } + var r0 float64 var r1 bool if rf, ok := ret.Get(0).(func(peer.ID) (float64, bool)); ok { @@ -110,6 +126,10 @@ func (_m *PeerScoreTracer) GetIPColocationFactor(peerID peer.ID) (float64, bool) func (_m *PeerScoreTracer) GetScore(peerID peer.ID) (float64, bool) { ret := _m.Called(peerID) + if len(ret) == 0 { + panic("no return value specified for GetScore") + } + var r0 float64 var r1 bool if rf, ok := ret.Get(0).(func(peer.ID) (float64, bool)); ok { @@ -134,6 +154,10 @@ func (_m *PeerScoreTracer) GetScore(peerID peer.ID) (float64, bool) { func (_m *PeerScoreTracer) GetTopicScores(peerID peer.ID) (map[string]p2p.TopicScoreSnapshot, bool) { ret := _m.Called(peerID) + if len(ret) == 0 { + panic("no return value specified for GetTopicScores") + } + var r0 map[string]p2p.TopicScoreSnapshot var r1 bool if rf, ok := ret.Get(0).(func(peer.ID) (map[string]p2p.TopicScoreSnapshot, bool)); ok { @@ -156,10 +180,14 @@ func (_m *PeerScoreTracer) GetTopicScores(peerID peer.ID) (map[string]p2p.TopicS return r0, r1 } -// Ready provides a mock function with given fields: +// Ready provides a mock function with no fields func (_m *PeerScoreTracer) Ready() <-chan struct{} { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Ready") + } + var r0 <-chan struct{} if rf, ok := ret.Get(0).(func() <-chan struct{}); ok { r0 = rf() @@ -177,10 +205,14 @@ func (_m *PeerScoreTracer) Start(_a0 irrecoverable.SignalerContext) { _m.Called(_a0) } -// UpdateInterval provides a mock function with given fields: +// UpdateInterval provides a mock function with no fields func (_m *PeerScoreTracer) UpdateInterval() time.Duration { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for UpdateInterval") + } + var r0 time.Duration if rf, ok := ret.Get(0).(func() time.Duration); ok { r0 = rf() @@ -196,13 +228,12 @@ func (_m *PeerScoreTracer) UpdatePeerScoreSnapshots(_a0 map[peer.ID]*p2p.PeerSco _m.Called(_a0) } -type mockConstructorTestingTNewPeerScoreTracer interface { +// NewPeerScoreTracer creates a new instance of PeerScoreTracer. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewPeerScoreTracer(t interface { mock.TestingT Cleanup(func()) -} - -// NewPeerScoreTracer creates a new instance of PeerScoreTracer. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewPeerScoreTracer(t mockConstructorTestingTNewPeerScoreTracer) *PeerScoreTracer { +}) *PeerScoreTracer { mock := &PeerScoreTracer{} mock.Mock.Test(t) diff --git a/network/p2p/mock/peer_scoring_builder.go b/network/p2p/mock/peer_scoring_builder.go deleted file mode 100644 index 51a7e2c68fb..00000000000 --- a/network/p2p/mock/peer_scoring_builder.go +++ /dev/null @@ -1,42 +0,0 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. - -package mockp2p - -import ( - channels "github.com/onflow/flow-go/network/channels" - mock "github.com/stretchr/testify/mock" - - peer "github.com/libp2p/go-libp2p/core/peer" - - pubsub "github.com/libp2p/go-libp2p-pubsub" -) - -// PeerScoringBuilder is an autogenerated mock type for the PeerScoringBuilder type -type PeerScoringBuilder struct { - mock.Mock -} - -// SetAppSpecificScoreParams provides a mock function with given fields: _a0 -func (_m *PeerScoringBuilder) SetAppSpecificScoreParams(_a0 func(peer.ID) float64) { - _m.Called(_a0) -} - -// SetTopicScoreParams provides a mock function with given fields: topic, topicScoreParams -func (_m *PeerScoringBuilder) SetTopicScoreParams(topic channels.Topic, topicScoreParams *pubsub.TopicScoreParams) { - _m.Called(topic, topicScoreParams) -} - -type mockConstructorTestingTNewPeerScoringBuilder interface { - mock.TestingT - Cleanup(func()) -} - -// NewPeerScoringBuilder creates a new instance of PeerScoringBuilder. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewPeerScoringBuilder(t mockConstructorTestingTNewPeerScoringBuilder) *PeerScoringBuilder { - mock := &PeerScoringBuilder{} - mock.Mock.Test(t) - - t.Cleanup(func() { mock.AssertExpectations(t) }) - - return mock -} diff --git a/network/p2p/mock/peer_updater.go b/network/p2p/mock/peer_updater.go new file mode 100644 index 00000000000..43c5fe69c5c --- /dev/null +++ b/network/p2p/mock/peer_updater.go @@ -0,0 +1,35 @@ +// Code generated by mockery. DO NOT EDIT. + +package mock + +import ( + context "context" + + mock "github.com/stretchr/testify/mock" + + peer "github.com/libp2p/go-libp2p/core/peer" +) + +// PeerUpdater is an autogenerated mock type for the PeerUpdater type +type PeerUpdater struct { + mock.Mock +} + +// UpdatePeers provides a mock function with given fields: ctx, peerIDs +func (_m *PeerUpdater) UpdatePeers(ctx context.Context, peerIDs peer.IDSlice) { + _m.Called(ctx, peerIDs) +} + +// NewPeerUpdater creates a new instance of PeerUpdater. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewPeerUpdater(t interface { + mock.TestingT + Cleanup(func()) +}) *PeerUpdater { + mock := &PeerUpdater{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/network/p2p/mock/peers_provider.go b/network/p2p/mock/peers_provider.go deleted file mode 100644 index ac94b23d7dc..00000000000 --- a/network/p2p/mock/peers_provider.go +++ /dev/null @@ -1,45 +0,0 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. - -package mockp2p - -import ( - mock "github.com/stretchr/testify/mock" - - peer "github.com/libp2p/go-libp2p/core/peer" -) - -// PeersProvider is an autogenerated mock type for the PeersProvider type -type PeersProvider struct { - mock.Mock -} - -// Execute provides a mock function with given fields: -func (_m *PeersProvider) Execute() peer.IDSlice { - ret := _m.Called() - - var r0 peer.IDSlice - if rf, ok := ret.Get(0).(func() peer.IDSlice); ok { - r0 = rf() - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(peer.IDSlice) - } - } - - return r0 -} - -type mockConstructorTestingTNewPeersProvider interface { - mock.TestingT - Cleanup(func()) -} - -// NewPeersProvider creates a new instance of PeersProvider. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewPeersProvider(t mockConstructorTestingTNewPeersProvider) *PeersProvider { - mock := &PeersProvider{} - mock.Mock.Test(t) - - t.Cleanup(func() { mock.AssertExpectations(t) }) - - return mock -} diff --git a/network/p2p/mock/protocol_peer_cache.go b/network/p2p/mock/protocol_peer_cache.go index 80d21ff3814..93b43754656 100644 --- a/network/p2p/mock/protocol_peer_cache.go +++ b/network/p2p/mock/protocol_peer_cache.go @@ -1,6 +1,6 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. -package mockp2p +package mock import ( mock "github.com/stretchr/testify/mock" @@ -21,15 +21,19 @@ func (_m *ProtocolPeerCache) AddProtocols(peerID peer.ID, protocols []protocol.I } // GetPeers provides a mock function with given fields: pid -func (_m *ProtocolPeerCache) GetPeers(pid protocol.ID) map[peer.ID]struct{} { +func (_m *ProtocolPeerCache) GetPeers(pid protocol.ID) peer.IDSlice { ret := _m.Called(pid) - var r0 map[peer.ID]struct{} - if rf, ok := ret.Get(0).(func(protocol.ID) map[peer.ID]struct{}); ok { + if len(ret) == 0 { + panic("no return value specified for GetPeers") + } + + var r0 peer.IDSlice + if rf, ok := ret.Get(0).(func(protocol.ID) peer.IDSlice); ok { r0 = rf(pid) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(map[peer.ID]struct{}) + r0 = ret.Get(0).(peer.IDSlice) } } @@ -46,13 +50,12 @@ func (_m *ProtocolPeerCache) RemoveProtocols(peerID peer.ID, protocols []protoco _m.Called(peerID, protocols) } -type mockConstructorTestingTNewProtocolPeerCache interface { +// NewProtocolPeerCache creates a new instance of ProtocolPeerCache. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewProtocolPeerCache(t interface { mock.TestingT Cleanup(func()) -} - -// NewProtocolPeerCache creates a new instance of ProtocolPeerCache. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewProtocolPeerCache(t mockConstructorTestingTNewProtocolPeerCache) *ProtocolPeerCache { +}) *ProtocolPeerCache { mock := &ProtocolPeerCache{} mock.Mock.Test(t) diff --git a/network/p2p/mock/pub_sub.go b/network/p2p/mock/pub_sub.go new file mode 100644 index 00000000000..02b5054a609 --- /dev/null +++ b/network/p2p/mock/pub_sub.go @@ -0,0 +1,127 @@ +// Code generated by mockery. DO NOT EDIT. + +package mock + +import ( + context "context" + + channels "github.com/onflow/flow-go/network/channels" + + mock "github.com/stretchr/testify/mock" + + network "github.com/onflow/flow-go/network" + + p2p "github.com/onflow/flow-go/network/p2p" + + peer "github.com/libp2p/go-libp2p/core/peer" +) + +// PubSub is an autogenerated mock type for the PubSub type +type PubSub struct { + mock.Mock +} + +// GetLocalMeshPeers provides a mock function with given fields: topic +func (_m *PubSub) GetLocalMeshPeers(topic channels.Topic) []peer.ID { + ret := _m.Called(topic) + + if len(ret) == 0 { + panic("no return value specified for GetLocalMeshPeers") + } + + var r0 []peer.ID + if rf, ok := ret.Get(0).(func(channels.Topic) []peer.ID); ok { + r0 = rf(topic) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]peer.ID) + } + } + + return r0 +} + +// Publish provides a mock function with given fields: ctx, messageScope +func (_m *PubSub) Publish(ctx context.Context, messageScope network.OutgoingMessageScope) error { + ret := _m.Called(ctx, messageScope) + + if len(ret) == 0 { + panic("no return value specified for Publish") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, network.OutgoingMessageScope) error); ok { + r0 = rf(ctx, messageScope) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// SetPubSub provides a mock function with given fields: ps +func (_m *PubSub) SetPubSub(ps p2p.PubSubAdapter) { + _m.Called(ps) +} + +// Subscribe provides a mock function with given fields: topic, topicValidator +func (_m *PubSub) Subscribe(topic channels.Topic, topicValidator p2p.TopicValidatorFunc) (p2p.Subscription, error) { + ret := _m.Called(topic, topicValidator) + + if len(ret) == 0 { + panic("no return value specified for Subscribe") + } + + var r0 p2p.Subscription + var r1 error + if rf, ok := ret.Get(0).(func(channels.Topic, p2p.TopicValidatorFunc) (p2p.Subscription, error)); ok { + return rf(topic, topicValidator) + } + if rf, ok := ret.Get(0).(func(channels.Topic, p2p.TopicValidatorFunc) p2p.Subscription); ok { + r0 = rf(topic, topicValidator) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(p2p.Subscription) + } + } + + if rf, ok := ret.Get(1).(func(channels.Topic, p2p.TopicValidatorFunc) error); ok { + r1 = rf(topic, topicValidator) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Unsubscribe provides a mock function with given fields: topic +func (_m *PubSub) Unsubscribe(topic channels.Topic) error { + ret := _m.Called(topic) + + if len(ret) == 0 { + panic("no return value specified for Unsubscribe") + } + + var r0 error + if rf, ok := ret.Get(0).(func(channels.Topic) error); ok { + r0 = rf(topic) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// NewPubSub creates a new instance of PubSub. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewPubSub(t interface { + mock.TestingT + Cleanup(func()) +}) *PubSub { + mock := &PubSub{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/network/p2p/mock/pub_sub_adapter.go b/network/p2p/mock/pub_sub_adapter.go index d8f2cf533a2..113eae03b06 100644 --- a/network/p2p/mock/pub_sub_adapter.go +++ b/network/p2p/mock/pub_sub_adapter.go @@ -1,9 +1,13 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. -package mockp2p +package mock import ( + flow "github.com/onflow/flow-go/model/flow" + channels "github.com/onflow/flow-go/network/channels" + irrecoverable "github.com/onflow/flow-go/module/irrecoverable" + mock "github.com/stretchr/testify/mock" p2p "github.com/onflow/flow-go/network/p2p" @@ -16,10 +20,19 @@ type PubSubAdapter struct { mock.Mock } -// Done provides a mock function with given fields: +// ActiveClustersChanged provides a mock function with given fields: _a0 +func (_m *PubSubAdapter) ActiveClustersChanged(_a0 flow.ChainIDList) { + _m.Called(_a0) +} + +// Done provides a mock function with no fields func (_m *PubSubAdapter) Done() <-chan struct{} { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Done") + } + var r0 <-chan struct{} if rf, ok := ret.Get(0).(func() <-chan struct{}); ok { r0 = rf() @@ -32,10 +45,34 @@ func (_m *PubSubAdapter) Done() <-chan struct{} { return r0 } -// GetTopics provides a mock function with given fields: +// GetLocalMeshPeers provides a mock function with given fields: topic +func (_m *PubSubAdapter) GetLocalMeshPeers(topic channels.Topic) []peer.ID { + ret := _m.Called(topic) + + if len(ret) == 0 { + panic("no return value specified for GetLocalMeshPeers") + } + + var r0 []peer.ID + if rf, ok := ret.Get(0).(func(channels.Topic) []peer.ID); ok { + r0 = rf(topic) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]peer.ID) + } + } + + return r0 +} + +// GetTopics provides a mock function with no fields func (_m *PubSubAdapter) GetTopics() []string { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for GetTopics") + } + var r0 []string if rf, ok := ret.Get(0).(func() []string); ok { r0 = rf() @@ -52,6 +89,10 @@ func (_m *PubSubAdapter) GetTopics() []string { func (_m *PubSubAdapter) Join(topic string) (p2p.Topic, error) { ret := _m.Called(topic) + if len(ret) == 0 { + panic("no return value specified for Join") + } + var r0 p2p.Topic var r1 error if rf, ok := ret.Get(0).(func(string) (p2p.Topic, error)); ok { @@ -78,6 +119,10 @@ func (_m *PubSubAdapter) Join(topic string) (p2p.Topic, error) { func (_m *PubSubAdapter) ListPeers(topic string) []peer.ID { ret := _m.Called(topic) + if len(ret) == 0 { + panic("no return value specified for ListPeers") + } + var r0 []peer.ID if rf, ok := ret.Get(0).(func(string) []peer.ID); ok { r0 = rf(topic) @@ -90,10 +135,34 @@ func (_m *PubSubAdapter) ListPeers(topic string) []peer.ID { return r0 } -// Ready provides a mock function with given fields: +// PeerScoreExposer provides a mock function with no fields +func (_m *PubSubAdapter) PeerScoreExposer() p2p.PeerScoreExposer { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for PeerScoreExposer") + } + + var r0 p2p.PeerScoreExposer + if rf, ok := ret.Get(0).(func() p2p.PeerScoreExposer); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(p2p.PeerScoreExposer) + } + } + + return r0 +} + +// Ready provides a mock function with no fields func (_m *PubSubAdapter) Ready() <-chan struct{} { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Ready") + } + var r0 <-chan struct{} if rf, ok := ret.Get(0).(func() <-chan struct{}); ok { r0 = rf() @@ -110,6 +179,10 @@ func (_m *PubSubAdapter) Ready() <-chan struct{} { func (_m *PubSubAdapter) RegisterTopicValidator(topic string, topicValidator p2p.TopicValidatorFunc) error { ret := _m.Called(topic, topicValidator) + if len(ret) == 0 { + panic("no return value specified for RegisterTopicValidator") + } + var r0 error if rf, ok := ret.Get(0).(func(string, p2p.TopicValidatorFunc) error); ok { r0 = rf(topic, topicValidator) @@ -129,6 +202,10 @@ func (_m *PubSubAdapter) Start(_a0 irrecoverable.SignalerContext) { func (_m *PubSubAdapter) UnregisterTopicValidator(topic string) error { ret := _m.Called(topic) + if len(ret) == 0 { + panic("no return value specified for UnregisterTopicValidator") + } + var r0 error if rf, ok := ret.Get(0).(func(string) error); ok { r0 = rf(topic) @@ -139,13 +216,12 @@ func (_m *PubSubAdapter) UnregisterTopicValidator(topic string) error { return r0 } -type mockConstructorTestingTNewPubSubAdapter interface { +// NewPubSubAdapter creates a new instance of PubSubAdapter. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewPubSubAdapter(t interface { mock.TestingT Cleanup(func()) -} - -// NewPubSubAdapter creates a new instance of PubSubAdapter. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewPubSubAdapter(t mockConstructorTestingTNewPubSubAdapter) *PubSubAdapter { +}) *PubSubAdapter { mock := &PubSubAdapter{} mock.Mock.Test(t) diff --git a/network/p2p/mock/pub_sub_adapter_config.go b/network/p2p/mock/pub_sub_adapter_config.go index 113ef45a163..1c7974c66ef 100644 --- a/network/p2p/mock/pub_sub_adapter_config.go +++ b/network/p2p/mock/pub_sub_adapter_config.go @@ -1,12 +1,14 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. -package mockp2p +package mock import ( p2p "github.com/onflow/flow-go/network/p2p" mock "github.com/stretchr/testify/mock" routing "github.com/libp2p/go-libp2p/core/routing" + + time "time" ) // PubSubAdapterConfig is an autogenerated mock type for the PubSubAdapterConfig type @@ -14,21 +16,26 @@ type PubSubAdapterConfig struct { mock.Mock } -// WithInspectorSuite provides a mock function with given fields: _a0 -func (_m *PubSubAdapterConfig) WithInspectorSuite(_a0 p2p.GossipSubInspectorSuite) { - _m.Called(_a0) -} - // WithMessageIdFunction provides a mock function with given fields: f func (_m *PubSubAdapterConfig) WithMessageIdFunction(f func([]byte) string) { _m.Called(f) } +// WithPeerGater provides a mock function with given fields: topicDeliveryWeights, sourceDecay +func (_m *PubSubAdapterConfig) WithPeerGater(topicDeliveryWeights map[string]float64, sourceDecay time.Duration) { + _m.Called(topicDeliveryWeights, sourceDecay) +} + // WithRoutingDiscovery provides a mock function with given fields: _a0 func (_m *PubSubAdapterConfig) WithRoutingDiscovery(_a0 routing.ContentRouting) { _m.Called(_a0) } +// WithRpcInspector provides a mock function with given fields: _a0 +func (_m *PubSubAdapterConfig) WithRpcInspector(_a0 p2p.GossipSubRPCInspector) { + _m.Called(_a0) +} + // WithScoreOption provides a mock function with given fields: _a0 func (_m *PubSubAdapterConfig) WithScoreOption(_a0 p2p.ScoreOptionBuilder) { _m.Called(_a0) @@ -49,13 +56,17 @@ func (_m *PubSubAdapterConfig) WithTracer(t p2p.PubSubTracer) { _m.Called(t) } -type mockConstructorTestingTNewPubSubAdapterConfig interface { - mock.TestingT - Cleanup(func()) +// WithValidateQueueSize provides a mock function with given fields: _a0 +func (_m *PubSubAdapterConfig) WithValidateQueueSize(_a0 int) { + _m.Called(_a0) } // NewPubSubAdapterConfig creates a new instance of PubSubAdapterConfig. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewPubSubAdapterConfig(t mockConstructorTestingTNewPubSubAdapterConfig) *PubSubAdapterConfig { +// The first argument is typically a *testing.T value. +func NewPubSubAdapterConfig(t interface { + mock.TestingT + Cleanup(func()) +}) *PubSubAdapterConfig { mock := &PubSubAdapterConfig{} mock.Mock.Test(t) diff --git a/network/p2p/mock/pub_sub_tracer.go b/network/p2p/mock/pub_sub_tracer.go index c243118110d..d4c05ba73d7 100644 --- a/network/p2p/mock/pub_sub_tracer.go +++ b/network/p2p/mock/pub_sub_tracer.go @@ -1,9 +1,11 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. -package mockp2p +package mock import ( irrecoverable "github.com/onflow/flow-go/module/irrecoverable" + channels "github.com/onflow/flow-go/network/channels" + mock "github.com/stretchr/testify/mock" peer "github.com/libp2p/go-libp2p/core/peer" @@ -28,10 +30,14 @@ func (_m *PubSubTracer) DeliverMessage(msg *pubsub.Message) { _m.Called(msg) } -// Done provides a mock function with given fields: +// Done provides a mock function with no fields func (_m *PubSubTracer) Done() <-chan struct{} { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Done") + } + var r0 <-chan struct{} if rf, ok := ret.Get(0).(func() <-chan struct{}); ok { r0 = rf() @@ -54,6 +60,44 @@ func (_m *PubSubTracer) DuplicateMessage(msg *pubsub.Message) { _m.Called(msg) } +// DuplicateMessageCount provides a mock function with given fields: _a0 +func (_m *PubSubTracer) DuplicateMessageCount(_a0 peer.ID) float64 { + ret := _m.Called(_a0) + + if len(ret) == 0 { + panic("no return value specified for DuplicateMessageCount") + } + + var r0 float64 + if rf, ok := ret.Get(0).(func(peer.ID) float64); ok { + r0 = rf(_a0) + } else { + r0 = ret.Get(0).(float64) + } + + return r0 +} + +// GetLocalMeshPeers provides a mock function with given fields: topic +func (_m *PubSubTracer) GetLocalMeshPeers(topic channels.Topic) []peer.ID { + ret := _m.Called(topic) + + if len(ret) == 0 { + panic("no return value specified for GetLocalMeshPeers") + } + + var r0 []peer.ID + if rf, ok := ret.Get(0).(func(channels.Topic) []peer.ID); ok { + r0 = rf(topic) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]peer.ID) + } + } + + return r0 +} + // Graft provides a mock function with given fields: p, topic func (_m *PubSubTracer) Graft(p peer.ID, topic string) { _m.Called(p, topic) @@ -64,6 +108,24 @@ func (_m *PubSubTracer) Join(topic string) { _m.Called(topic) } +// LastHighestIHaveRPCSize provides a mock function with no fields +func (_m *PubSubTracer) LastHighestIHaveRPCSize() int64 { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for LastHighestIHaveRPCSize") + } + + var r0 int64 + if rf, ok := ret.Get(0).(func() int64); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(int64) + } + + return r0 +} + // Leave provides a mock function with given fields: topic func (_m *PubSubTracer) Leave(topic string) { _m.Called(topic) @@ -74,10 +136,14 @@ func (_m *PubSubTracer) Prune(p peer.ID, topic string) { _m.Called(p, topic) } -// Ready provides a mock function with given fields: +// Ready provides a mock function with no fields func (_m *PubSubTracer) Ready() <-chan struct{} { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Ready") + } + var r0 <-chan struct{} if rf, ok := ret.Get(0).(func() <-chan struct{}); ok { r0 = rf() @@ -130,13 +196,30 @@ func (_m *PubSubTracer) ValidateMessage(msg *pubsub.Message) { _m.Called(msg) } -type mockConstructorTestingTNewPubSubTracer interface { - mock.TestingT - Cleanup(func()) +// WasIHaveRPCSent provides a mock function with given fields: messageID +func (_m *PubSubTracer) WasIHaveRPCSent(messageID string) bool { + ret := _m.Called(messageID) + + if len(ret) == 0 { + panic("no return value specified for WasIHaveRPCSent") + } + + var r0 bool + if rf, ok := ret.Get(0).(func(string) bool); ok { + r0 = rf(messageID) + } else { + r0 = ret.Get(0).(bool) + } + + return r0 } // NewPubSubTracer creates a new instance of PubSubTracer. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewPubSubTracer(t mockConstructorTestingTNewPubSubTracer) *PubSubTracer { +// The first argument is typically a *testing.T value. +func NewPubSubTracer(t interface { + mock.TestingT + Cleanup(func()) +}) *PubSubTracer { mock := &PubSubTracer{} mock.Mock.Test(t) diff --git a/network/p2p/mock/rate_limiter.go b/network/p2p/mock/rate_limiter.go index 1c1e6c98c73..2d6f8f27b44 100644 --- a/network/p2p/mock/rate_limiter.go +++ b/network/p2p/mock/rate_limiter.go @@ -1,6 +1,6 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. -package mockp2p +package mock import ( irrecoverable "github.com/onflow/flow-go/module/irrecoverable" @@ -18,6 +18,10 @@ type RateLimiter struct { func (_m *RateLimiter) Allow(peerID peer.ID, msgSize int) bool { ret := _m.Called(peerID, msgSize) + if len(ret) == 0 { + panic("no return value specified for Allow") + } + var r0 bool if rf, ok := ret.Get(0).(func(peer.ID, int) bool); ok { r0 = rf(peerID, msgSize) @@ -28,10 +32,14 @@ func (_m *RateLimiter) Allow(peerID peer.ID, msgSize int) bool { return r0 } -// Done provides a mock function with given fields: +// Done provides a mock function with no fields func (_m *RateLimiter) Done() <-chan struct{} { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Done") + } + var r0 <-chan struct{} if rf, ok := ret.Get(0).(func() <-chan struct{}); ok { r0 = rf() @@ -48,6 +56,10 @@ func (_m *RateLimiter) Done() <-chan struct{} { func (_m *RateLimiter) IsRateLimited(peerID peer.ID) bool { ret := _m.Called(peerID) + if len(ret) == 0 { + panic("no return value specified for IsRateLimited") + } + var r0 bool if rf, ok := ret.Get(0).(func(peer.ID) bool); ok { r0 = rf(peerID) @@ -58,10 +70,14 @@ func (_m *RateLimiter) IsRateLimited(peerID peer.ID) bool { return r0 } -// Ready provides a mock function with given fields: +// Ready provides a mock function with no fields func (_m *RateLimiter) Ready() <-chan struct{} { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Ready") + } + var r0 <-chan struct{} if rf, ok := ret.Get(0).(func() <-chan struct{}); ok { r0 = rf() @@ -79,13 +95,12 @@ func (_m *RateLimiter) Start(_a0 irrecoverable.SignalerContext) { _m.Called(_a0) } -type mockConstructorTestingTNewRateLimiter interface { +// NewRateLimiter creates a new instance of RateLimiter. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewRateLimiter(t interface { mock.TestingT Cleanup(func()) -} - -// NewRateLimiter creates a new instance of RateLimiter. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewRateLimiter(t mockConstructorTestingTNewRateLimiter) *RateLimiter { +}) *RateLimiter { mock := &RateLimiter{} mock.Mock.Test(t) diff --git a/network/p2p/mock/rate_limiter_consumer.go b/network/p2p/mock/rate_limiter_consumer.go index 3385f180319..45ef6eb6f29 100644 --- a/network/p2p/mock/rate_limiter_consumer.go +++ b/network/p2p/mock/rate_limiter_consumer.go @@ -1,6 +1,6 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. -package mockp2p +package mock import ( mock "github.com/stretchr/testify/mock" @@ -18,13 +18,12 @@ func (_m *RateLimiterConsumer) OnRateLimitedPeer(pid peer.ID, role string, msgTy _m.Called(pid, role, msgType, topic, reason) } -type mockConstructorTestingTNewRateLimiterConsumer interface { +// NewRateLimiterConsumer creates a new instance of RateLimiterConsumer. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewRateLimiterConsumer(t interface { mock.TestingT Cleanup(func()) -} - -// NewRateLimiterConsumer creates a new instance of RateLimiterConsumer. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewRateLimiterConsumer(t mockConstructorTestingTNewRateLimiterConsumer) *RateLimiterConsumer { +}) *RateLimiterConsumer { mock := &RateLimiterConsumer{} mock.Mock.Test(t) diff --git a/network/p2p/mock/rate_limiter_opt.go b/network/p2p/mock/rate_limiter_opt.go deleted file mode 100644 index 04df105091c..00000000000 --- a/network/p2p/mock/rate_limiter_opt.go +++ /dev/null @@ -1,33 +0,0 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. - -package mockp2p - -import ( - p2p "github.com/onflow/flow-go/network/p2p" - mock "github.com/stretchr/testify/mock" -) - -// RateLimiterOpt is an autogenerated mock type for the RateLimiterOpt type -type RateLimiterOpt struct { - mock.Mock -} - -// Execute provides a mock function with given fields: limiter -func (_m *RateLimiterOpt) Execute(limiter p2p.RateLimiter) { - _m.Called(limiter) -} - -type mockConstructorTestingTNewRateLimiterOpt interface { - mock.TestingT - Cleanup(func()) -} - -// NewRateLimiterOpt creates a new instance of RateLimiterOpt. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewRateLimiterOpt(t mockConstructorTestingTNewRateLimiterOpt) *RateLimiterOpt { - mock := &RateLimiterOpt{} - mock.Mock.Test(t) - - t.Cleanup(func() { mock.AssertExpectations(t) }) - - return mock -} diff --git a/network/p2p/mock/routable.go b/network/p2p/mock/routable.go new file mode 100644 index 00000000000..46a86b93e19 --- /dev/null +++ b/network/p2p/mock/routable.go @@ -0,0 +1,87 @@ +// Code generated by mockery. DO NOT EDIT. + +package mock + +import ( + kbucket "github.com/libp2p/go-libp2p-kbucket" + mock "github.com/stretchr/testify/mock" + + routing "github.com/libp2p/go-libp2p/core/routing" +) + +// Routable is an autogenerated mock type for the Routable type +type Routable struct { + mock.Mock +} + +// Routing provides a mock function with no fields +func (_m *Routable) Routing() routing.Routing { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Routing") + } + + var r0 routing.Routing + if rf, ok := ret.Get(0).(func() routing.Routing); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(routing.Routing) + } + } + + return r0 +} + +// RoutingTable provides a mock function with no fields +func (_m *Routable) RoutingTable() *kbucket.RoutingTable { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for RoutingTable") + } + + var r0 *kbucket.RoutingTable + if rf, ok := ret.Get(0).(func() *kbucket.RoutingTable); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*kbucket.RoutingTable) + } + } + + return r0 +} + +// SetRouting provides a mock function with given fields: r +func (_m *Routable) SetRouting(r routing.Routing) error { + ret := _m.Called(r) + + if len(ret) == 0 { + panic("no return value specified for SetRouting") + } + + var r0 error + if rf, ok := ret.Get(0).(func(routing.Routing) error); ok { + r0 = rf(r) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// NewRoutable creates a new instance of Routable. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewRoutable(t interface { + mock.TestingT + Cleanup(func()) +}) *Routable { + mock := &Routable{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/network/p2p/mock/rpc_control_tracking.go b/network/p2p/mock/rpc_control_tracking.go new file mode 100644 index 00000000000..91c8c9a1676 --- /dev/null +++ b/network/p2p/mock/rpc_control_tracking.go @@ -0,0 +1,60 @@ +// Code generated by mockery. DO NOT EDIT. + +package mock + +import mock "github.com/stretchr/testify/mock" + +// RpcControlTracking is an autogenerated mock type for the RpcControlTracking type +type RpcControlTracking struct { + mock.Mock +} + +// LastHighestIHaveRPCSize provides a mock function with no fields +func (_m *RpcControlTracking) LastHighestIHaveRPCSize() int64 { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for LastHighestIHaveRPCSize") + } + + var r0 int64 + if rf, ok := ret.Get(0).(func() int64); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(int64) + } + + return r0 +} + +// WasIHaveRPCSent provides a mock function with given fields: messageID +func (_m *RpcControlTracking) WasIHaveRPCSent(messageID string) bool { + ret := _m.Called(messageID) + + if len(ret) == 0 { + panic("no return value specified for WasIHaveRPCSent") + } + + var r0 bool + if rf, ok := ret.Get(0).(func(string) bool); ok { + r0 = rf(messageID) + } else { + r0 = ret.Get(0).(bool) + } + + return r0 +} + +// NewRpcControlTracking creates a new instance of RpcControlTracking. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewRpcControlTracking(t interface { + mock.TestingT + Cleanup(func()) +}) *RpcControlTracking { + mock := &RpcControlTracking{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/network/p2p/mock/score_option.go b/network/p2p/mock/score_option.go deleted file mode 100644 index 476154d5a2b..00000000000 --- a/network/p2p/mock/score_option.go +++ /dev/null @@ -1,45 +0,0 @@ -// Code generated by mockery v2.13.1. DO NOT EDIT. - -package mockp2p - -import ( - mock "github.com/stretchr/testify/mock" - - pubsub "github.com/libp2p/go-libp2p-pubsub" -) - -// ScoreOption is an autogenerated mock type for the ScoreOption type -type ScoreOption struct { - mock.Mock -} - -// BuildFlowPubSubScoreOption provides a mock function with given fields: -func (_m *ScoreOption) BuildFlowPubSubScoreOption() pubsub.Option { - ret := _m.Called() - - var r0 pubsub.Option - if rf, ok := ret.Get(0).(func() pubsub.Option); ok { - r0 = rf() - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(pubsub.Option) - } - } - - return r0 -} - -type mockConstructorTestingTNewScoreOption interface { - mock.TestingT - Cleanup(func()) -} - -// NewScoreOption creates a new instance of ScoreOption. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewScoreOption(t mockConstructorTestingTNewScoreOption) *ScoreOption { - mock := &ScoreOption{} - mock.Mock.Test(t) - - t.Cleanup(func() { mock.AssertExpectations(t) }) - - return mock -} diff --git a/network/p2p/mock/score_option_builder.go b/network/p2p/mock/score_option_builder.go index eabe096b50a..e37c7de9210 100644 --- a/network/p2p/mock/score_option_builder.go +++ b/network/p2p/mock/score_option_builder.go @@ -1,8 +1,9 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. -package mockp2p +package mock import ( + irrecoverable "github.com/onflow/flow-go/module/irrecoverable" mock "github.com/stretchr/testify/mock" pubsub "github.com/libp2p/go-libp2p-pubsub" @@ -13,29 +14,109 @@ type ScoreOptionBuilder struct { mock.Mock } -// BuildFlowPubSubScoreOption provides a mock function with given fields: -func (_m *ScoreOptionBuilder) BuildFlowPubSubScoreOption() pubsub.Option { +// BuildFlowPubSubScoreOption provides a mock function with no fields +func (_m *ScoreOptionBuilder) BuildFlowPubSubScoreOption() (*pubsub.PeerScoreParams, *pubsub.PeerScoreThresholds) { ret := _m.Called() - var r0 pubsub.Option - if rf, ok := ret.Get(0).(func() pubsub.Option); ok { + if len(ret) == 0 { + panic("no return value specified for BuildFlowPubSubScoreOption") + } + + var r0 *pubsub.PeerScoreParams + var r1 *pubsub.PeerScoreThresholds + if rf, ok := ret.Get(0).(func() (*pubsub.PeerScoreParams, *pubsub.PeerScoreThresholds)); ok { + return rf() + } + if rf, ok := ret.Get(0).(func() *pubsub.PeerScoreParams); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*pubsub.PeerScoreParams) + } + } + + if rf, ok := ret.Get(1).(func() *pubsub.PeerScoreThresholds); ok { + r1 = rf() + } else { + if ret.Get(1) != nil { + r1 = ret.Get(1).(*pubsub.PeerScoreThresholds) + } + } + + return r0, r1 +} + +// Done provides a mock function with no fields +func (_m *ScoreOptionBuilder) Done() <-chan struct{} { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Done") + } + + var r0 <-chan struct{} + if rf, ok := ret.Get(0).(func() <-chan struct{}); ok { r0 = rf() } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(pubsub.Option) + r0 = ret.Get(0).(<-chan struct{}) } } return r0 } -type mockConstructorTestingTNewScoreOptionBuilder interface { - mock.TestingT - Cleanup(func()) +// Ready provides a mock function with no fields +func (_m *ScoreOptionBuilder) Ready() <-chan struct{} { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Ready") + } + + var r0 <-chan struct{} + if rf, ok := ret.Get(0).(func() <-chan struct{}); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(<-chan struct{}) + } + } + + return r0 +} + +// Start provides a mock function with given fields: _a0 +func (_m *ScoreOptionBuilder) Start(_a0 irrecoverable.SignalerContext) { + _m.Called(_a0) +} + +// TopicScoreParams provides a mock function with given fields: _a0 +func (_m *ScoreOptionBuilder) TopicScoreParams(_a0 *pubsub.Topic) *pubsub.TopicScoreParams { + ret := _m.Called(_a0) + + if len(ret) == 0 { + panic("no return value specified for TopicScoreParams") + } + + var r0 *pubsub.TopicScoreParams + if rf, ok := ret.Get(0).(func(*pubsub.Topic) *pubsub.TopicScoreParams); ok { + r0 = rf(_a0) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*pubsub.TopicScoreParams) + } + } + + return r0 } // NewScoreOptionBuilder creates a new instance of ScoreOptionBuilder. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewScoreOptionBuilder(t mockConstructorTestingTNewScoreOptionBuilder) *ScoreOptionBuilder { +// The first argument is typically a *testing.T value. +func NewScoreOptionBuilder(t interface { + mock.TestingT + Cleanup(func()) +}) *ScoreOptionBuilder { mock := &ScoreOptionBuilder{} mock.Mock.Test(t) diff --git a/network/p2p/mock/stream_factory.go b/network/p2p/mock/stream_factory.go new file mode 100644 index 00000000000..2d98bb6e7f3 --- /dev/null +++ b/network/p2p/mock/stream_factory.go @@ -0,0 +1,68 @@ +// Code generated by mockery. DO NOT EDIT. + +package mock + +import ( + context "context" + + network "github.com/libp2p/go-libp2p/core/network" + mock "github.com/stretchr/testify/mock" + + peer "github.com/libp2p/go-libp2p/core/peer" + + protocol "github.com/libp2p/go-libp2p/core/protocol" +) + +// StreamFactory is an autogenerated mock type for the StreamFactory type +type StreamFactory struct { + mock.Mock +} + +// NewStream provides a mock function with given fields: _a0, _a1, _a2 +func (_m *StreamFactory) NewStream(_a0 context.Context, _a1 peer.ID, _a2 protocol.ID) (network.Stream, error) { + ret := _m.Called(_a0, _a1, _a2) + + if len(ret) == 0 { + panic("no return value specified for NewStream") + } + + var r0 network.Stream + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, peer.ID, protocol.ID) (network.Stream, error)); ok { + return rf(_a0, _a1, _a2) + } + if rf, ok := ret.Get(0).(func(context.Context, peer.ID, protocol.ID) network.Stream); ok { + r0 = rf(_a0, _a1, _a2) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(network.Stream) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, peer.ID, protocol.ID) error); ok { + r1 = rf(_a0, _a1, _a2) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// SetStreamHandler provides a mock function with given fields: _a0, _a1 +func (_m *StreamFactory) SetStreamHandler(_a0 protocol.ID, _a1 network.StreamHandler) { + _m.Called(_a0, _a1) +} + +// NewStreamFactory creates a new instance of StreamFactory. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewStreamFactory(t interface { + mock.TestingT + Cleanup(func()) +}) *StreamFactory { + mock := &StreamFactory{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/network/p2p/mock/subscription.go b/network/p2p/mock/subscription.go index a54d673b661..3eeeec294ac 100644 --- a/network/p2p/mock/subscription.go +++ b/network/p2p/mock/subscription.go @@ -1,6 +1,6 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. -package mockp2p +package mock import ( context "context" @@ -15,7 +15,7 @@ type Subscription struct { mock.Mock } -// Cancel provides a mock function with given fields: +// Cancel provides a mock function with no fields func (_m *Subscription) Cancel() { _m.Called() } @@ -24,6 +24,10 @@ func (_m *Subscription) Cancel() { func (_m *Subscription) Next(_a0 context.Context) (*pubsub.Message, error) { ret := _m.Called(_a0) + if len(ret) == 0 { + panic("no return value specified for Next") + } + var r0 *pubsub.Message var r1 error if rf, ok := ret.Get(0).(func(context.Context) (*pubsub.Message, error)); ok { @@ -46,10 +50,14 @@ func (_m *Subscription) Next(_a0 context.Context) (*pubsub.Message, error) { return r0, r1 } -// Topic provides a mock function with given fields: +// Topic provides a mock function with no fields func (_m *Subscription) Topic() string { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Topic") + } + var r0 string if rf, ok := ret.Get(0).(func() string); ok { r0 = rf() @@ -60,13 +68,12 @@ func (_m *Subscription) Topic() string { return r0 } -type mockConstructorTestingTNewSubscription interface { +// NewSubscription creates a new instance of Subscription. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewSubscription(t interface { mock.TestingT Cleanup(func()) -} - -// NewSubscription creates a new instance of Subscription. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewSubscription(t mockConstructorTestingTNewSubscription) *Subscription { +}) *Subscription { mock := &Subscription{} mock.Mock.Test(t) diff --git a/network/p2p/mock/subscription_filter.go b/network/p2p/mock/subscription_filter.go index 6f66b9ec75b..6a4a467c98d 100644 --- a/network/p2p/mock/subscription_filter.go +++ b/network/p2p/mock/subscription_filter.go @@ -1,6 +1,6 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. -package mockp2p +package mock import ( mock "github.com/stretchr/testify/mock" @@ -19,6 +19,10 @@ type SubscriptionFilter struct { func (_m *SubscriptionFilter) CanSubscribe(_a0 string) bool { ret := _m.Called(_a0) + if len(ret) == 0 { + panic("no return value specified for CanSubscribe") + } + var r0 bool if rf, ok := ret.Get(0).(func(string) bool); ok { r0 = rf(_a0) @@ -33,6 +37,10 @@ func (_m *SubscriptionFilter) CanSubscribe(_a0 string) bool { func (_m *SubscriptionFilter) FilterIncomingSubscriptions(_a0 peer.ID, _a1 []*pubsub_pb.RPC_SubOpts) ([]*pubsub_pb.RPC_SubOpts, error) { ret := _m.Called(_a0, _a1) + if len(ret) == 0 { + panic("no return value specified for FilterIncomingSubscriptions") + } + var r0 []*pubsub_pb.RPC_SubOpts var r1 error if rf, ok := ret.Get(0).(func(peer.ID, []*pubsub_pb.RPC_SubOpts) ([]*pubsub_pb.RPC_SubOpts, error)); ok { @@ -55,13 +63,12 @@ func (_m *SubscriptionFilter) FilterIncomingSubscriptions(_a0 peer.ID, _a1 []*pu return r0, r1 } -type mockConstructorTestingTNewSubscriptionFilter interface { +// NewSubscriptionFilter creates a new instance of SubscriptionFilter. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewSubscriptionFilter(t interface { mock.TestingT Cleanup(func()) -} - -// NewSubscriptionFilter creates a new instance of SubscriptionFilter. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewSubscriptionFilter(t mockConstructorTestingTNewSubscriptionFilter) *SubscriptionFilter { +}) *SubscriptionFilter { mock := &SubscriptionFilter{} mock.Mock.Test(t) diff --git a/network/p2p/mock/subscription_provider.go b/network/p2p/mock/subscription_provider.go index bc119c00f02..f1d20e23a0e 100644 --- a/network/p2p/mock/subscription_provider.go +++ b/network/p2p/mock/subscription_provider.go @@ -1,8 +1,9 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. -package mockp2p +package mock import ( + irrecoverable "github.com/onflow/flow-go/module/irrecoverable" mock "github.com/stretchr/testify/mock" peer "github.com/libp2p/go-libp2p/core/peer" @@ -13,10 +14,34 @@ type SubscriptionProvider struct { mock.Mock } +// Done provides a mock function with no fields +func (_m *SubscriptionProvider) Done() <-chan struct{} { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Done") + } + + var r0 <-chan struct{} + if rf, ok := ret.Get(0).(func() <-chan struct{}); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(<-chan struct{}) + } + } + + return r0 +} + // GetSubscribedTopics provides a mock function with given fields: pid func (_m *SubscriptionProvider) GetSubscribedTopics(pid peer.ID) []string { ret := _m.Called(pid) + if len(ret) == 0 { + panic("no return value specified for GetSubscribedTopics") + } + var r0 []string if rf, ok := ret.Get(0).(func(peer.ID) []string); ok { r0 = rf(pid) @@ -29,13 +54,37 @@ func (_m *SubscriptionProvider) GetSubscribedTopics(pid peer.ID) []string { return r0 } -type mockConstructorTestingTNewSubscriptionProvider interface { - mock.TestingT - Cleanup(func()) +// Ready provides a mock function with no fields +func (_m *SubscriptionProvider) Ready() <-chan struct{} { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Ready") + } + + var r0 <-chan struct{} + if rf, ok := ret.Get(0).(func() <-chan struct{}); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(<-chan struct{}) + } + } + + return r0 +} + +// Start provides a mock function with given fields: _a0 +func (_m *SubscriptionProvider) Start(_a0 irrecoverable.SignalerContext) { + _m.Called(_a0) } // NewSubscriptionProvider creates a new instance of SubscriptionProvider. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewSubscriptionProvider(t mockConstructorTestingTNewSubscriptionProvider) *SubscriptionProvider { +// The first argument is typically a *testing.T value. +func NewSubscriptionProvider(t interface { + mock.TestingT + Cleanup(func()) +}) *SubscriptionProvider { mock := &SubscriptionProvider{} mock.Mock.Test(t) diff --git a/network/p2p/mock/subscription_validator.go b/network/p2p/mock/subscription_validator.go index b7f71843639..5de8728e573 100644 --- a/network/p2p/mock/subscription_validator.go +++ b/network/p2p/mock/subscription_validator.go @@ -1,12 +1,12 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. -package mockp2p +package mock import ( flow "github.com/onflow/flow-go/model/flow" - mock "github.com/stretchr/testify/mock" + irrecoverable "github.com/onflow/flow-go/module/irrecoverable" - p2p "github.com/onflow/flow-go/network/p2p" + mock "github.com/stretchr/testify/mock" peer "github.com/libp2p/go-libp2p/core/peer" ) @@ -20,6 +20,10 @@ type SubscriptionValidator struct { func (_m *SubscriptionValidator) CheckSubscribedToAllowedTopics(pid peer.ID, role flow.Role) error { ret := _m.Called(pid, role) + if len(ret) == 0 { + panic("no return value specified for CheckSubscribedToAllowedTopics") + } + var r0 error if rf, ok := ret.Get(0).(func(peer.ID, flow.Role) error); ok { r0 = rf(pid, role) @@ -30,27 +34,57 @@ func (_m *SubscriptionValidator) CheckSubscribedToAllowedTopics(pid peer.ID, rol return r0 } -// RegisterSubscriptionProvider provides a mock function with given fields: provider -func (_m *SubscriptionValidator) RegisterSubscriptionProvider(provider p2p.SubscriptionProvider) error { - ret := _m.Called(provider) +// Done provides a mock function with no fields +func (_m *SubscriptionValidator) Done() <-chan struct{} { + ret := _m.Called() - var r0 error - if rf, ok := ret.Get(0).(func(p2p.SubscriptionProvider) error); ok { - r0 = rf(provider) + if len(ret) == 0 { + panic("no return value specified for Done") + } + + var r0 <-chan struct{} + if rf, ok := ret.Get(0).(func() <-chan struct{}); ok { + r0 = rf() } else { - r0 = ret.Error(0) + if ret.Get(0) != nil { + r0 = ret.Get(0).(<-chan struct{}) + } } return r0 } -type mockConstructorTestingTNewSubscriptionValidator interface { - mock.TestingT - Cleanup(func()) +// Ready provides a mock function with no fields +func (_m *SubscriptionValidator) Ready() <-chan struct{} { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Ready") + } + + var r0 <-chan struct{} + if rf, ok := ret.Get(0).(func() <-chan struct{}); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(<-chan struct{}) + } + } + + return r0 +} + +// Start provides a mock function with given fields: _a0 +func (_m *SubscriptionValidator) Start(_a0 irrecoverable.SignalerContext) { + _m.Called(_a0) } // NewSubscriptionValidator creates a new instance of SubscriptionValidator. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewSubscriptionValidator(t mockConstructorTestingTNewSubscriptionValidator) *SubscriptionValidator { +// The first argument is typically a *testing.T value. +func NewSubscriptionValidator(t interface { + mock.TestingT + Cleanup(func()) +}) *SubscriptionValidator { mock := &SubscriptionValidator{} mock.Mock.Test(t) diff --git a/network/p2p/mock/subscriptions.go b/network/p2p/mock/subscriptions.go index 910b17cd676..6e409ebb967 100644 --- a/network/p2p/mock/subscriptions.go +++ b/network/p2p/mock/subscriptions.go @@ -1,6 +1,6 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. -package mockp2p +package mock import ( channels "github.com/onflow/flow-go/network/channels" @@ -18,6 +18,10 @@ type Subscriptions struct { func (_m *Subscriptions) HasSubscription(topic channels.Topic) bool { ret := _m.Called(topic) + if len(ret) == 0 { + panic("no return value specified for HasSubscription") + } + var r0 bool if rf, ok := ret.Get(0).(func(channels.Topic) bool); ok { r0 = rf(topic) @@ -33,13 +37,12 @@ func (_m *Subscriptions) SetUnicastManager(uniMgr p2p.UnicastManager) { _m.Called(uniMgr) } -type mockConstructorTestingTNewSubscriptions interface { +// NewSubscriptions creates a new instance of Subscriptions. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewSubscriptions(t interface { mock.TestingT Cleanup(func()) -} - -// NewSubscriptions creates a new instance of Subscriptions. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewSubscriptions(t mockConstructorTestingTNewSubscriptions) *Subscriptions { +}) *Subscriptions { mock := &Subscriptions{} mock.Mock.Test(t) diff --git a/network/p2p/mock/topic.go b/network/p2p/mock/topic.go index 58602ec7fcc..5b0c3e11d31 100644 --- a/network/p2p/mock/topic.go +++ b/network/p2p/mock/topic.go @@ -1,6 +1,6 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. -package mockp2p +package mock import ( context "context" @@ -14,10 +14,14 @@ type Topic struct { mock.Mock } -// Close provides a mock function with given fields: +// Close provides a mock function with no fields func (_m *Topic) Close() error { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Close") + } + var r0 error if rf, ok := ret.Get(0).(func() error); ok { r0 = rf() @@ -32,6 +36,10 @@ func (_m *Topic) Close() error { func (_m *Topic) Publish(_a0 context.Context, _a1 []byte) error { ret := _m.Called(_a0, _a1) + if len(ret) == 0 { + panic("no return value specified for Publish") + } + var r0 error if rf, ok := ret.Get(0).(func(context.Context, []byte) error); ok { r0 = rf(_a0, _a1) @@ -42,10 +50,14 @@ func (_m *Topic) Publish(_a0 context.Context, _a1 []byte) error { return r0 } -// String provides a mock function with given fields: +// String provides a mock function with no fields func (_m *Topic) String() string { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for String") + } + var r0 string if rf, ok := ret.Get(0).(func() string); ok { r0 = rf() @@ -56,10 +68,14 @@ func (_m *Topic) String() string { return r0 } -// Subscribe provides a mock function with given fields: +// Subscribe provides a mock function with no fields func (_m *Topic) Subscribe() (p2p.Subscription, error) { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Subscribe") + } + var r0 p2p.Subscription var r1 error if rf, ok := ret.Get(0).(func() (p2p.Subscription, error)); ok { @@ -82,13 +98,12 @@ func (_m *Topic) Subscribe() (p2p.Subscription, error) { return r0, r1 } -type mockConstructorTestingTNewTopic interface { +// NewTopic creates a new instance of Topic. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewTopic(t interface { mock.TestingT Cleanup(func()) -} - -// NewTopic creates a new instance of Topic. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewTopic(t mockConstructorTestingTNewTopic) *Topic { +}) *Topic { mock := &Topic{} mock.Mock.Test(t) diff --git a/network/p2p/mock/topic_provider.go b/network/p2p/mock/topic_provider.go index 690eb7428e3..062fa689ae1 100644 --- a/network/p2p/mock/topic_provider.go +++ b/network/p2p/mock/topic_provider.go @@ -1,6 +1,6 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. -package mockp2p +package mock import ( mock "github.com/stretchr/testify/mock" @@ -13,10 +13,14 @@ type TopicProvider struct { mock.Mock } -// GetTopics provides a mock function with given fields: +// GetTopics provides a mock function with no fields func (_m *TopicProvider) GetTopics() []string { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for GetTopics") + } + var r0 []string if rf, ok := ret.Get(0).(func() []string); ok { r0 = rf() @@ -33,6 +37,10 @@ func (_m *TopicProvider) GetTopics() []string { func (_m *TopicProvider) ListPeers(topic string) []peer.ID { ret := _m.Called(topic) + if len(ret) == 0 { + panic("no return value specified for ListPeers") + } + var r0 []peer.ID if rf, ok := ret.Get(0).(func(string) []peer.ID); ok { r0 = rf(topic) @@ -45,13 +53,12 @@ func (_m *TopicProvider) ListPeers(topic string) []peer.ID { return r0 } -type mockConstructorTestingTNewTopicProvider interface { +// NewTopicProvider creates a new instance of TopicProvider. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewTopicProvider(t interface { mock.TestingT Cleanup(func()) -} - -// NewTopicProvider creates a new instance of TopicProvider. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewTopicProvider(t mockConstructorTestingTNewTopicProvider) *TopicProvider { +}) *TopicProvider { mock := &TopicProvider{} mock.Mock.Test(t) diff --git a/network/p2p/mock/topic_validator_func.go b/network/p2p/mock/topic_validator_func.go deleted file mode 100644 index b059355db8a..00000000000 --- a/network/p2p/mock/topic_validator_func.go +++ /dev/null @@ -1,48 +0,0 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. - -package mockp2p - -import ( - context "context" - - p2p "github.com/onflow/flow-go/network/p2p" - mock "github.com/stretchr/testify/mock" - - peer "github.com/libp2p/go-libp2p/core/peer" - - pubsub "github.com/libp2p/go-libp2p-pubsub" -) - -// TopicValidatorFunc is an autogenerated mock type for the TopicValidatorFunc type -type TopicValidatorFunc struct { - mock.Mock -} - -// Execute provides a mock function with given fields: _a0, _a1, _a2 -func (_m *TopicValidatorFunc) Execute(_a0 context.Context, _a1 peer.ID, _a2 *pubsub.Message) p2p.ValidationResult { - ret := _m.Called(_a0, _a1, _a2) - - var r0 p2p.ValidationResult - if rf, ok := ret.Get(0).(func(context.Context, peer.ID, *pubsub.Message) p2p.ValidationResult); ok { - r0 = rf(_a0, _a1, _a2) - } else { - r0 = ret.Get(0).(p2p.ValidationResult) - } - - return r0 -} - -type mockConstructorTestingTNewTopicValidatorFunc interface { - mock.TestingT - Cleanup(func()) -} - -// NewTopicValidatorFunc creates a new instance of TopicValidatorFunc. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewTopicValidatorFunc(t mockConstructorTestingTNewTopicValidatorFunc) *TopicValidatorFunc { - mock := &TopicValidatorFunc{} - mock.Mock.Test(t) - - t.Cleanup(func() { mock.AssertExpectations(t) }) - - return mock -} diff --git a/network/p2p/mock/unicast_management.go b/network/p2p/mock/unicast_management.go new file mode 100644 index 00000000000..8935c9e0584 --- /dev/null +++ b/network/p2p/mock/unicast_management.go @@ -0,0 +1,69 @@ +// Code generated by mockery. DO NOT EDIT. + +package mock + +import ( + context "context" + + network "github.com/libp2p/go-libp2p/core/network" + mock "github.com/stretchr/testify/mock" + + peer "github.com/libp2p/go-libp2p/core/peer" + + protocols "github.com/onflow/flow-go/network/p2p/unicast/protocols" +) + +// UnicastManagement is an autogenerated mock type for the UnicastManagement type +type UnicastManagement struct { + mock.Mock +} + +// OpenAndWriteOnStream provides a mock function with given fields: ctx, peerID, protectionTag, writingLogic +func (_m *UnicastManagement) OpenAndWriteOnStream(ctx context.Context, peerID peer.ID, protectionTag string, writingLogic func(network.Stream) error) error { + ret := _m.Called(ctx, peerID, protectionTag, writingLogic) + + if len(ret) == 0 { + panic("no return value specified for OpenAndWriteOnStream") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, peer.ID, string, func(network.Stream) error) error); ok { + r0 = rf(ctx, peerID, protectionTag, writingLogic) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// WithDefaultUnicastProtocol provides a mock function with given fields: defaultHandler, preferred +func (_m *UnicastManagement) WithDefaultUnicastProtocol(defaultHandler network.StreamHandler, preferred []protocols.ProtocolName) error { + ret := _m.Called(defaultHandler, preferred) + + if len(ret) == 0 { + panic("no return value specified for WithDefaultUnicastProtocol") + } + + var r0 error + if rf, ok := ret.Get(0).(func(network.StreamHandler, []protocols.ProtocolName) error); ok { + r0 = rf(defaultHandler, preferred) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// NewUnicastManagement creates a new instance of UnicastManagement. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewUnicastManagement(t interface { + mock.TestingT + Cleanup(func()) +}) *UnicastManagement { + mock := &UnicastManagement{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/network/p2p/mock/unicast_manager.go b/network/p2p/mock/unicast_manager.go index 212f678ccc9..4ae43d3b500 100644 --- a/network/p2p/mock/unicast_manager.go +++ b/network/p2p/mock/unicast_manager.go @@ -1,14 +1,12 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. -package mockp2p +package mock import ( context "context" - multiaddr "github.com/multiformats/go-multiaddr" - mock "github.com/stretchr/testify/mock" - network "github.com/libp2p/go-libp2p/core/network" + mock "github.com/stretchr/testify/mock" peer "github.com/libp2p/go-libp2p/core/peer" @@ -20,45 +18,44 @@ type UnicastManager struct { mock.Mock } -// CreateStream provides a mock function with given fields: ctx, peerID, maxAttempts -func (_m *UnicastManager) CreateStream(ctx context.Context, peerID peer.ID, maxAttempts int) (network.Stream, []multiaddr.Multiaddr, error) { - ret := _m.Called(ctx, peerID, maxAttempts) +// CreateStream provides a mock function with given fields: ctx, peerID +func (_m *UnicastManager) CreateStream(ctx context.Context, peerID peer.ID) (network.Stream, error) { + ret := _m.Called(ctx, peerID) + + if len(ret) == 0 { + panic("no return value specified for CreateStream") + } var r0 network.Stream - var r1 []multiaddr.Multiaddr - var r2 error - if rf, ok := ret.Get(0).(func(context.Context, peer.ID, int) (network.Stream, []multiaddr.Multiaddr, error)); ok { - return rf(ctx, peerID, maxAttempts) + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, peer.ID) (network.Stream, error)); ok { + return rf(ctx, peerID) } - if rf, ok := ret.Get(0).(func(context.Context, peer.ID, int) network.Stream); ok { - r0 = rf(ctx, peerID, maxAttempts) + if rf, ok := ret.Get(0).(func(context.Context, peer.ID) network.Stream); ok { + r0 = rf(ctx, peerID) } else { if ret.Get(0) != nil { r0 = ret.Get(0).(network.Stream) } } - if rf, ok := ret.Get(1).(func(context.Context, peer.ID, int) []multiaddr.Multiaddr); ok { - r1 = rf(ctx, peerID, maxAttempts) + if rf, ok := ret.Get(1).(func(context.Context, peer.ID) error); ok { + r1 = rf(ctx, peerID) } else { - if ret.Get(1) != nil { - r1 = ret.Get(1).([]multiaddr.Multiaddr) - } + r1 = ret.Error(1) } - if rf, ok := ret.Get(2).(func(context.Context, peer.ID, int) error); ok { - r2 = rf(ctx, peerID, maxAttempts) - } else { - r2 = ret.Error(2) - } - - return r0, r1, r2 + return r0, r1 } // Register provides a mock function with given fields: unicast func (_m *UnicastManager) Register(unicast protocols.ProtocolName) error { ret := _m.Called(unicast) + if len(ret) == 0 { + panic("no return value specified for Register") + } + var r0 error if rf, ok := ret.Get(0).(func(protocols.ProtocolName) error); ok { r0 = rf(unicast) @@ -69,18 +66,17 @@ func (_m *UnicastManager) Register(unicast protocols.ProtocolName) error { return r0 } -// WithDefaultHandler provides a mock function with given fields: defaultHandler -func (_m *UnicastManager) WithDefaultHandler(defaultHandler network.StreamHandler) { +// SetDefaultHandler provides a mock function with given fields: defaultHandler +func (_m *UnicastManager) SetDefaultHandler(defaultHandler network.StreamHandler) { _m.Called(defaultHandler) } -type mockConstructorTestingTNewUnicastManager interface { +// NewUnicastManager creates a new instance of UnicastManager. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewUnicastManager(t interface { mock.TestingT Cleanup(func()) -} - -// NewUnicastManager creates a new instance of UnicastManager. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewUnicastManager(t mockConstructorTestingTNewUnicastManager) *UnicastManager { +}) *UnicastManager { mock := &UnicastManager{} mock.Mock.Test(t) diff --git a/network/p2p/mock/unicast_manager_factory_func.go b/network/p2p/mock/unicast_manager_factory_func.go deleted file mode 100644 index fc529ca22d9..00000000000 --- a/network/p2p/mock/unicast_manager_factory_func.go +++ /dev/null @@ -1,54 +0,0 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. - -package mockp2p - -import ( - flow "github.com/onflow/flow-go/model/flow" - mock "github.com/stretchr/testify/mock" - - module "github.com/onflow/flow-go/module" - - p2p "github.com/onflow/flow-go/network/p2p" - - stream "github.com/onflow/flow-go/network/p2p/unicast/stream" - - time "time" - - zerolog "github.com/rs/zerolog" -) - -// UnicastManagerFactoryFunc is an autogenerated mock type for the UnicastManagerFactoryFunc type -type UnicastManagerFactoryFunc struct { - mock.Mock -} - -// Execute provides a mock function with given fields: logger, streamFactory, sporkId, createStreamRetryDelay, connStatus, metrics -func (_m *UnicastManagerFactoryFunc) Execute(logger zerolog.Logger, streamFactory stream.Factory, sporkId flow.Identifier, createStreamRetryDelay time.Duration, connStatus p2p.PeerConnections, metrics module.UnicastManagerMetrics) p2p.UnicastManager { - ret := _m.Called(logger, streamFactory, sporkId, createStreamRetryDelay, connStatus, metrics) - - var r0 p2p.UnicastManager - if rf, ok := ret.Get(0).(func(zerolog.Logger, stream.Factory, flow.Identifier, time.Duration, p2p.PeerConnections, module.UnicastManagerMetrics) p2p.UnicastManager); ok { - r0 = rf(logger, streamFactory, sporkId, createStreamRetryDelay, connStatus, metrics) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(p2p.UnicastManager) - } - } - - return r0 -} - -type mockConstructorTestingTNewUnicastManagerFactoryFunc interface { - mock.TestingT - Cleanup(func()) -} - -// NewUnicastManagerFactoryFunc creates a new instance of UnicastManagerFactoryFunc. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewUnicastManagerFactoryFunc(t mockConstructorTestingTNewUnicastManagerFactoryFunc) *UnicastManagerFactoryFunc { - mock := &UnicastManagerFactoryFunc{} - mock.Mock.Test(t) - - t.Cleanup(func() { mock.AssertExpectations(t) }) - - return mock -} diff --git a/network/p2p/mock/unicast_rate_limiter_distributor.go b/network/p2p/mock/unicast_rate_limiter_distributor.go index 0bdceb2b72d..88a587b06aa 100644 --- a/network/p2p/mock/unicast_rate_limiter_distributor.go +++ b/network/p2p/mock/unicast_rate_limiter_distributor.go @@ -1,6 +1,6 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. -package mockp2p +package mock import ( p2p "github.com/onflow/flow-go/network/p2p" @@ -24,13 +24,12 @@ func (_m *UnicastRateLimiterDistributor) OnRateLimitedPeer(pid peer.ID, role str _m.Called(pid, role, msgType, topic, reason) } -type mockConstructorTestingTNewUnicastRateLimiterDistributor interface { +// NewUnicastRateLimiterDistributor creates a new instance of UnicastRateLimiterDistributor. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewUnicastRateLimiterDistributor(t interface { mock.TestingT Cleanup(func()) -} - -// NewUnicastRateLimiterDistributor creates a new instance of UnicastRateLimiterDistributor. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewUnicastRateLimiterDistributor(t mockConstructorTestingTNewUnicastRateLimiterDistributor) *UnicastRateLimiterDistributor { +}) *UnicastRateLimiterDistributor { mock := &UnicastRateLimiterDistributor{} mock.Mock.Test(t) diff --git a/network/p2p/mock/update_function.go b/network/p2p/mock/update_function.go deleted file mode 100644 index 1b1b98ed66b..00000000000 --- a/network/p2p/mock/update_function.go +++ /dev/null @@ -1,42 +0,0 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. - -package mockp2p - -import ( - p2p "github.com/onflow/flow-go/network/p2p" - mock "github.com/stretchr/testify/mock" -) - -// UpdateFunction is an autogenerated mock type for the UpdateFunction type -type UpdateFunction struct { - mock.Mock -} - -// Execute provides a mock function with given fields: record -func (_m *UpdateFunction) Execute(record p2p.GossipSubSpamRecord) p2p.GossipSubSpamRecord { - ret := _m.Called(record) - - var r0 p2p.GossipSubSpamRecord - if rf, ok := ret.Get(0).(func(p2p.GossipSubSpamRecord) p2p.GossipSubSpamRecord); ok { - r0 = rf(record) - } else { - r0 = ret.Get(0).(p2p.GossipSubSpamRecord) - } - - return r0 -} - -type mockConstructorTestingTNewUpdateFunction interface { - mock.TestingT - Cleanup(func()) -} - -// NewUpdateFunction creates a new instance of UpdateFunction. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewUpdateFunction(t mockConstructorTestingTNewUpdateFunction) *UpdateFunction { - mock := &UpdateFunction{} - mock.Mock.Test(t) - - t.Cleanup(func() { mock.AssertExpectations(t) }) - - return mock -} diff --git a/network/p2p/network.go b/network/p2p/network.go deleted file mode 100644 index a0159aefb5c..00000000000 --- a/network/p2p/network.go +++ /dev/null @@ -1,509 +0,0 @@ -package p2p - -import ( - "errors" - "fmt" - "sync" - "time" - - "github.com/ipfs/go-datastore" - "github.com/libp2p/go-libp2p/core/peer" - "github.com/libp2p/go-libp2p/core/protocol" - "github.com/rs/zerolog" - - "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/model/flow/filter" - "github.com/onflow/flow-go/module" - "github.com/onflow/flow-go/module/component" - "github.com/onflow/flow-go/module/irrecoverable" - "github.com/onflow/flow-go/network" - netcache "github.com/onflow/flow-go/network/cache" - "github.com/onflow/flow-go/network/channels" - "github.com/onflow/flow-go/network/message" - "github.com/onflow/flow-go/network/p2p/conduit" - "github.com/onflow/flow-go/network/queue" - _ "github.com/onflow/flow-go/utils/binstat" - "github.com/onflow/flow-go/utils/logging" -) - -const ( - // DefaultReceiveCacheSize represents size of receive cache that keeps hash of incoming messages - // for sake of deduplication. - DefaultReceiveCacheSize = 10e4 -) - -// NotEjectedFilter is an identity filter that, when applied to the identity -// table at a given snapshot, returns all nodes that we should communicate with -// over the networking layer. -// -// NOTE: The protocol state includes nodes from the previous/next epoch that should -// be included in network communication. We omit any nodes that have been ejected. -var NotEjectedFilter = filter.Not(filter.Ejected) - -type NetworkOptFunction func(*Network) - -func WithConduitFactory(f network.ConduitFactory) NetworkOptFunction { - return func(n *Network) { - n.conduitFactory = f - } -} - -// Network represents the overlay network of our peer-to-peer network, including -// the protocols for handshakes, authentication, gossiping and heartbeats. -type Network struct { - sync.RWMutex - *component.ComponentManager - identityProvider module.IdentityProvider - logger zerolog.Logger - codec network.Codec - me module.Local - mw network.Middleware - metrics module.NetworkCoreMetrics - receiveCache *netcache.ReceiveCache // used to deduplicate incoming messages - queue network.MessageQueue - subscriptionManager network.SubscriptionManager // used to keep track of subscribed channels - conduitFactory network.ConduitFactory - topology network.Topology - registerEngineRequests chan *registerEngineRequest - registerBlobServiceRequests chan *registerBlobServiceRequest -} - -var _ network.Network = &Network{} -var _ network.Overlay = &Network{} - -type registerEngineRequest struct { - channel channels.Channel - messageProcessor network.MessageProcessor - respChan chan *registerEngineResp -} - -type registerEngineResp struct { - conduit network.Conduit - err error -} - -type registerBlobServiceRequest struct { - channel channels.Channel - ds datastore.Batching - opts []network.BlobServiceOption - respChan chan *registerBlobServiceResp -} - -type registerBlobServiceResp struct { - blobService network.BlobService - err error -} - -var ErrNetworkShutdown = errors.New("network has already shutdown") - -type NetworkParameters struct { - Logger zerolog.Logger - Codec network.Codec - Me module.Local - MiddlewareFactory func() (network.Middleware, error) - Topology network.Topology - SubscriptionManager network.SubscriptionManager - Metrics module.NetworkCoreMetrics - IdentityProvider module.IdentityProvider - ReceiveCache *netcache.ReceiveCache - Options []NetworkOptFunction -} - -var _ network.Network = (*Network)(nil) - -// NewNetwork creates a new naive overlay network, using the given middleware to -// communicate to direct peers, using the given codec for serialization, and -// using the given state & cache interfaces to track volatile information. -// csize determines the size of the cache dedicated to keep track of received messages -func NewNetwork(param *NetworkParameters) (*Network, error) { - - mw, err := param.MiddlewareFactory() - if err != nil { - return nil, fmt.Errorf("could not create middleware: %w", err) - } - - n := &Network{ - logger: param.Logger, - codec: param.Codec, - me: param.Me, - mw: mw, - receiveCache: param.ReceiveCache, - topology: param.Topology, - metrics: param.Metrics, - subscriptionManager: param.SubscriptionManager, - identityProvider: param.IdentityProvider, - conduitFactory: conduit.NewDefaultConduitFactory(param.Logger, param.Metrics), - registerEngineRequests: make(chan *registerEngineRequest), - registerBlobServiceRequests: make(chan *registerBlobServiceRequest), - } - - for _, opt := range param.Options { - opt(n) - } - - n.mw.SetOverlay(n) - - if err := n.conduitFactory.RegisterAdapter(n); err != nil { - return nil, fmt.Errorf("could not register network adapter: %w", err) - } - - n.ComponentManager = component.NewComponentManagerBuilder(). - AddWorker(n.runMiddleware). - AddWorker(n.processRegisterEngineRequests). - AddWorker(n.processRegisterBlobServiceRequests).Build() - - return n, nil -} - -func (n *Network) processRegisterEngineRequests(parent irrecoverable.SignalerContext, ready component.ReadyFunc) { - <-n.mw.Ready() - ready() - - for { - select { - case req := <-n.registerEngineRequests: - conduit, err := n.handleRegisterEngineRequest(parent, req.channel, req.messageProcessor) - resp := ®isterEngineResp{ - conduit: conduit, - err: err, - } - - select { - case <-parent.Done(): - return - case req.respChan <- resp: - } - case <-parent.Done(): - return - } - } -} - -func (n *Network) processRegisterBlobServiceRequests(parent irrecoverable.SignalerContext, ready component.ReadyFunc) { - <-n.mw.Ready() - ready() - - for { - select { - case req := <-n.registerBlobServiceRequests: - blobService, err := n.handleRegisterBlobServiceRequest(parent, req.channel, req.ds, req.opts) - resp := ®isterBlobServiceResp{ - blobService: blobService, - err: err, - } - - select { - case <-parent.Done(): - return - case req.respChan <- resp: - } - case <-parent.Done(): - return - } - } -} - -func (n *Network) runMiddleware(ctx irrecoverable.SignalerContext, ready component.ReadyFunc) { - // setup the message queue - // create priority queue - n.queue = queue.NewMessageQueue(ctx, queue.GetEventPriority, n.metrics) - - // create workers to read from the queue and call queueSubmitFunc - queue.CreateQueueWorkers(ctx, queue.DefaultNumWorkers, n.queue, n.queueSubmitFunc) - - n.mw.Start(ctx) - <-n.mw.Ready() - - ready() - - <-n.mw.Done() -} - -func (n *Network) handleRegisterEngineRequest(parent irrecoverable.SignalerContext, channel channels.Channel, engine network.MessageProcessor) (network.Conduit, error) { - if !channels.ChannelExists(channel) { - return nil, fmt.Errorf("unknown channel: %s, should be registered in topic map", channel) - } - - err := n.subscriptionManager.Register(channel, engine) - if err != nil { - return nil, fmt.Errorf("failed to register engine for channel %s: %w", channel, err) - } - - n.logger.Info(). - Str("channel_id", channel.String()). - Msg("channel successfully registered") - - // create the conduit - newConduit, err := n.conduitFactory.NewConduit(parent, channel) - if err != nil { - return nil, fmt.Errorf("could not create conduit using factory: %w", err) - } - - return newConduit, nil -} - -func (n *Network) handleRegisterBlobServiceRequest(parent irrecoverable.SignalerContext, channel channels.Channel, ds datastore.Batching, opts []network.BlobServiceOption) (network.BlobService, error) { - bs := n.mw.NewBlobService(channel, ds, opts...) - - // start the blob service using the network's context - bs.Start(parent) - - return bs, nil -} - -// Register will register the given engine with the given unique engine engineID, -// returning a conduit to directly submit messages to the message bus of the -// engine. -func (n *Network) Register(channel channels.Channel, messageProcessor network.MessageProcessor) (network.Conduit, error) { - respChan := make(chan *registerEngineResp) - - select { - case <-n.ComponentManager.ShutdownSignal(): - return nil, ErrNetworkShutdown - case n.registerEngineRequests <- ®isterEngineRequest{ - channel: channel, - messageProcessor: messageProcessor, - respChan: respChan, - }: - select { - case <-n.ComponentManager.ShutdownSignal(): - return nil, ErrNetworkShutdown - case resp := <-respChan: - return resp.conduit, resp.err - } - } -} - -func (n *Network) RegisterPingService(pingProtocol protocol.ID, provider network.PingInfoProvider) (network.PingService, error) { - select { - case <-n.ComponentManager.ShutdownSignal(): - return nil, ErrNetworkShutdown - default: - return n.mw.NewPingService(pingProtocol, provider), nil - } -} - -// RegisterBlobService registers a BlobService on the given channel. -// The returned BlobService can be used to request blobs from the network. -func (n *Network) RegisterBlobService(channel channels.Channel, ds datastore.Batching, opts ...network.BlobServiceOption) (network.BlobService, error) { - respChan := make(chan *registerBlobServiceResp) - - select { - case <-n.ComponentManager.ShutdownSignal(): - return nil, ErrNetworkShutdown - case n.registerBlobServiceRequests <- ®isterBlobServiceRequest{ - channel: channel, - ds: ds, - opts: opts, - respChan: respChan, - }: - select { - case <-n.ComponentManager.ShutdownSignal(): - return nil, ErrNetworkShutdown - case resp := <-respChan: - return resp.blobService, resp.err - } - } -} - -// UnRegisterChannel unregisters the engine for the specified channel. The engine will no longer be able to send or -// receive messages from that channel. -func (n *Network) UnRegisterChannel(channel channels.Channel) error { - err := n.subscriptionManager.Unregister(channel) - if err != nil { - return fmt.Errorf("failed to unregister engine for channel %s: %w", channel, err) - } - return nil -} - -func (n *Network) Identities() flow.IdentityList { - return n.identityProvider.Identities(NotEjectedFilter) -} - -func (n *Network) Identity(pid peer.ID) (*flow.Identity, bool) { - return n.identityProvider.ByPeerID(pid) -} - -func (n *Network) Receive(msg *network.IncomingMessageScope) error { - n.metrics.InboundMessageReceived(msg.Size(), msg.Channel().String(), msg.Protocol().String(), msg.PayloadType()) - - err := n.processNetworkMessage(msg) - if err != nil { - return fmt.Errorf("could not process message: %w", err) - } - return nil -} - -func (n *Network) processNetworkMessage(msg *network.IncomingMessageScope) error { - // checks the cache for deduplication and adds the message if not already present - if !n.receiveCache.Add(msg.EventID()) { - // drops duplicate message - n.logger.Debug(). - Hex("sender_id", logging.ID(msg.OriginId())). - Hex("event_id", msg.EventID()). - Str("channel", msg.Channel().String()). - Msg("dropping message due to duplication") - - n.metrics.DuplicateInboundMessagesDropped(msg.Channel().String(), msg.Protocol().String(), msg.PayloadType()) - - return nil - } - - // create queue message - qm := queue.QMessage{ - Payload: msg.DecodedPayload(), - Size: msg.Size(), - Target: msg.Channel(), - SenderID: msg.OriginId(), - } - - // insert the message in the queue - err := n.queue.Insert(qm) - if err != nil { - return fmt.Errorf("failed to insert message in queue: %w", err) - } - - return nil -} - -// UnicastOnChannel sends the message in a reliable way to the given recipient. -// It uses 1-1 direct messaging over the underlying network to deliver the message. -// It returns an error if unicasting fails. -func (n *Network) UnicastOnChannel(channel channels.Channel, payload interface{}, targetID flow.Identifier) error { - if targetID == n.me.NodeID() { - n.logger.Debug().Msg("network skips self unicasting") - return nil - } - - msg, err := network.NewOutgoingScope( - flow.IdentifierList{targetID}, - channel, - payload, - n.codec.Encode, - message.ProtocolTypeUnicast) - if err != nil { - return fmt.Errorf("could not generate outgoing message scope for unicast: %w", err) - } - - n.metrics.UnicastMessageSendingStarted(msg.Channel().String()) - defer n.metrics.UnicastMessageSendingCompleted(msg.Channel().String()) - err = n.mw.SendDirect(msg) - if err != nil { - return fmt.Errorf("failed to send message to %x: %w", targetID, err) - } - - n.metrics.OutboundMessageSent(msg.Size(), msg.Channel().String(), message.ProtocolTypeUnicast.String(), msg.PayloadType()) - - return nil -} - -// PublishOnChannel sends the message in an unreliable way to the given recipients. -// In this context, unreliable means that the message is published over a libp2p pub-sub -// channel and can be read by any node subscribed to that channel. -// The selector could be used to optimize or restrict delivery. -func (n *Network) PublishOnChannel(channel channels.Channel, message interface{}, targetIDs ...flow.Identifier) error { - filteredIDs := flow.IdentifierList(targetIDs).Filter(n.removeSelfFilter()) - - if len(filteredIDs) == 0 { - return network.EmptyTargetList - } - - err := n.sendOnChannel(channel, message, filteredIDs) - - if err != nil { - return fmt.Errorf("failed to publish on channel %s: %w", channel, err) - } - - return nil -} - -// MulticastOnChannel unreliably sends the specified event over the channel to randomly selected 'num' number of recipients -// selected from the specified targetIDs. -func (n *Network) MulticastOnChannel(channel channels.Channel, message interface{}, num uint, targetIDs ...flow.Identifier) error { - selectedIDs := flow.IdentifierList(targetIDs).Filter(n.removeSelfFilter()).Sample(num) - - if len(selectedIDs) == 0 { - return network.EmptyTargetList - } - - err := n.sendOnChannel(channel, message, selectedIDs) - - // publishes the message to the selected targets - if err != nil { - return fmt.Errorf("failed to multicast on channel %s: %w", channel, err) - } - - return nil -} - -// removeSelfFilter removes the flow.Identifier of this node if present, from the list of nodes -func (n *Network) removeSelfFilter() flow.IdentifierFilter { - return func(id flow.Identifier) bool { - return id != n.me.NodeID() - } -} - -// sendOnChannel sends the message on channel to targets. -func (n *Network) sendOnChannel(channel channels.Channel, msg interface{}, targetIDs []flow.Identifier) error { - n.logger.Debug(). - Interface("message", msg). - Str("channel", channel.String()). - Str("target_ids", fmt.Sprintf("%v", targetIDs)). - Msg("sending new message on channel") - - // generate network message (encoding) based on list of recipients - scope, err := network.NewOutgoingScope(targetIDs, channel, msg, n.codec.Encode, message.ProtocolTypePubSub) - if err != nil { - return fmt.Errorf("failed to generate outgoing message scope %s: %w", channel, err) - } - - // publish the message through the channel, however, the message - // is only restricted to targetIDs (if they subscribed to channel). - err = n.mw.Publish(scope) - if err != nil { - return fmt.Errorf("failed to send message on channel %s: %w", channel, err) - } - - n.metrics.OutboundMessageSent(scope.Size(), scope.Channel().String(), message.ProtocolTypePubSub.String(), scope.PayloadType()) - - return nil -} - -// queueSubmitFunc submits the message to the engine synchronously. It is the callback for the queue worker -// when it gets a message from the queue -func (n *Network) queueSubmitFunc(message interface{}) { - qm := message.(queue.QMessage) - - logger := n.logger.With(). - Str("channel_id", qm.Target.String()). - Str("sender_id", qm.SenderID.String()). - Logger() - - eng, err := n.subscriptionManager.GetEngine(qm.Target) - if err != nil { - // This means the message was received on a channel that the node has not registered an - // engine for. This may be because the message was received during startup and the node - // hasn't subscribed to the channel yet, or there is a bug. - logger.Err(err).Msg("failed to submit message") - return - } - - logger.Debug().Msg("submitting message to engine") - - n.metrics.MessageProcessingStarted(qm.Target.String()) - - // submits the message to the engine synchronously and - // tracks its processing time. - startTimestamp := time.Now() - - err = eng.Process(qm.Target, qm.SenderID, qm.Payload) - if err != nil { - logger.Err(err).Msg("failed to process message") - } - - n.metrics.MessageProcessingFinished(qm.Target.String(), time.Since(startTimestamp)) -} - -func (n *Network) Topology() flow.IdentityList { - return n.topology.Fanout(n.Identities()) -} diff --git a/network/p2p/node/disallow_listing_test.go b/network/p2p/node/disallow_listing_test.go new file mode 100644 index 00000000000..2e9faf43ce3 --- /dev/null +++ b/network/p2p/node/disallow_listing_test.go @@ -0,0 +1,100 @@ +package p2pnode_test + +import ( + "context" + "testing" + "time" + + "github.com/libp2p/go-libp2p/core/peer" + + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module/irrecoverable" + mockmodule "github.com/onflow/flow-go/module/mock" + "github.com/onflow/flow-go/network" + "github.com/onflow/flow-go/network/p2p" + p2pbuilderconfig "github.com/onflow/flow-go/network/p2p/builder/config" + "github.com/onflow/flow-go/network/p2p/connection" + p2ptest "github.com/onflow/flow-go/network/p2p/test" + "github.com/onflow/flow-go/utils/unittest" +) + +// TestDisconnectingFromDisallowListedNode ensures that: +// (1) the node disconnects from a disallow listed node while the node is connected to other (allow listed) nodes. +// (2) new inbound or outbound connections to and from disallow-listed nodes are rejected. +// (3) When a disallow-listed node is allow-listed again, the node reconnects to it. +func TestDisconnectingFromDisallowListedNode(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + signalerCtx := irrecoverable.NewMockSignalerContext(t, ctx) + + sporkID := unittest.IdentifierFixture() + idProvider := mockmodule.NewIdentityProvider(t) + + peerIDSlice := peer.IDSlice{} + // node 1 is the node that will be disallow-listing another node (node 2). + node1, identity1 := p2ptest.NodeFixture(t, + sporkID, + t.Name(), + idProvider, + p2ptest.WithPeerManagerEnabled(&p2pbuilderconfig.PeerManagerConfig{ + ConnectionPruning: true, + UpdateInterval: connection.DefaultPeerUpdateInterval, + ConnectorFactory: connection.DefaultLibp2pBackoffConnectorFactory(), + }, + func() peer.IDSlice { + return peerIDSlice + }), + p2ptest.WithConnectionGater(p2ptest.NewConnectionGater(idProvider, func(p peer.ID) error { + // allow all the connections, except for the ones that are disallow-listed, which are determined when + // this connection gater object queries the disallow listing oracle that will be provided to it by + // the libp2p node. So, here, we don't need to do anything except just enabling the connection gater. + return nil + }))) + idProvider.On("ByPeerID", node1.ID()).Return(&identity1, true).Maybe() + peerIDSlice = append(peerIDSlice, node1.ID()) + + // node 2 is the node that will be disallow-listed by node 1. + node2, identity2 := p2ptest.NodeFixture(t, sporkID, t.Name(), idProvider) + idProvider.On("ByPeerID", node2.ID()).Return(&identity2, true).Maybe() + peerIDSlice = append(peerIDSlice, node2.ID()) + + // node 3 is the node that will be connected to node 1 (to ensure that node 1 is still able to connect to other nodes + // after disallow-listing node 2). + node3, identity3 := p2ptest.NodeFixture(t, sporkID, t.Name(), idProvider) + idProvider.On("ByPeerID", node3.ID()).Return(&identity3, true).Maybe() + peerIDSlice = append(peerIDSlice, node3.ID()) + + nodes := []p2p.LibP2PNode{node1, node2, node3} + ids := flow.IdentityList{&identity1, &identity2, &identity3} + + p2ptest.StartNodes(t, signalerCtx, nodes) + defer p2ptest.StopNodes(t, nodes, cancel) + + p2ptest.LetNodesDiscoverEachOther(t, ctx, nodes, ids) + + // initially all nodes should be connected to each other. + p2ptest.RequireConnectedEventually(t, nodes, 100*time.Millisecond, 2*time.Second) + + // phase-1: node 1 disallow-lists node 2. + node1.OnDisallowListNotification(node2.ID(), network.DisallowListedCauseAlsp) + + // eventually node 1 should be disconnected from node 2 while other nodes should remain connected. + // we choose a timeout of 2 seconds because peer manager updates peers every 1 second. + p2ptest.RequireEventuallyNotConnected(t, []p2p.LibP2PNode{node1}, []p2p.LibP2PNode{node2}, 100*time.Millisecond, 2*time.Second) + + // but nodes 1 and 3 should remain connected as well as nodes 2 and 3. + // we choose a short timeout because we expect the nodes to remain connected. + p2ptest.RequireConnectedEventually(t, []p2p.LibP2PNode{node1, node3}, 1*time.Millisecond, 100*time.Millisecond) + p2ptest.RequireConnectedEventually(t, []p2p.LibP2PNode{node2, node3}, 1*time.Millisecond, 100*time.Millisecond) + + // while node 2 is disallow-listed, it cannot connect to node 1. Also, node 1 cannot directly dial and connect to node 2, unless + // it is allow-listed again. + p2ptest.EnsureNotConnectedBetweenGroups(t, ctx, []p2p.LibP2PNode{node1}, []p2p.LibP2PNode{node2}) + + // phase-2: now we allow-list node 1 back + node1.OnAllowListNotification(node2.ID(), network.DisallowListedCauseAlsp) + + // eventually node 1 should be connected to node 2 again, hence all nodes should be connected to each other. + // we choose a timeout of 5 seconds because peer manager updates peers every 1 second and we need to wait for + // any potential random backoffs to expire (min 1 second). + p2ptest.RequireConnectedEventually(t, nodes, 100*time.Millisecond, 5*time.Second) +} diff --git a/network/p2p/node/gossipSubAdapter.go b/network/p2p/node/gossipSubAdapter.go new file mode 100644 index 00000000000..ee678c5625a --- /dev/null +++ b/network/p2p/node/gossipSubAdapter.go @@ -0,0 +1,252 @@ +package p2pnode + +import ( + "context" + "fmt" + + pubsub "github.com/libp2p/go-libp2p-pubsub" + "github.com/libp2p/go-libp2p/core/host" + "github.com/libp2p/go-libp2p/core/peer" + "github.com/rs/zerolog" + + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module/component" + "github.com/onflow/flow-go/module/irrecoverable" + "github.com/onflow/flow-go/network/channels" + "github.com/onflow/flow-go/network/p2p" + "github.com/onflow/flow-go/network/p2p/utils" + "github.com/onflow/flow-go/utils/logging" +) + +// GossipSubAdapter is a wrapper around the libp2p GossipSub implementation +// that implements the PubSubAdapter interface for the Flow network. +type GossipSubAdapter struct { + component.Component + gossipSub *pubsub.PubSub + // topicScoreParamFunc is a function that returns the topic score params for a given topic. + // If no function is provided the node will join the topic with no scoring params. As the + // node will not be able to score other peers in the topic, it may be vulnerable to routing + // attacks on the topic that may also affect the overall function of the node. + // It is not recommended to use this adapter without a topicScoreParamFunc. Also in mature + // implementations of the Flow network, the topicScoreParamFunc must be a required parameter. + topicScoreParamFunc func(topic *pubsub.Topic) *pubsub.TopicScoreParams + logger zerolog.Logger + peerScoreExposer p2p.PeerScoreExposer + localMeshTracer p2p.PubSubTracer + // clusterChangeConsumer is a callback that is invoked when the set of active clusters of collection nodes changes. + // This callback is implemented by the rpc inspector suite of the GossipSubAdapter, and consumes the cluster changes + // to update the rpc inspector state of the recent topics (i.e., channels). + clusterChangeConsumer p2p.CollectionClusterChangesConsumer +} + +var _ p2p.PubSubAdapter = (*GossipSubAdapter)(nil) + +func NewGossipSubAdapter( + ctx context.Context, + logger zerolog.Logger, + h host.Host, + cfg p2p.PubSubAdapterConfig, + clusterChangeConsumer p2p.CollectionClusterChangesConsumer, +) (p2p.PubSubAdapter, error) { + gossipSubConfig, ok := cfg.(*GossipSubAdapterConfig) + if !ok { + return nil, fmt.Errorf("invalid gossipsub config type: %T", cfg) + } + + gossipSub, err := pubsub.NewGossipSub(ctx, h, gossipSubConfig.Build()...) + if err != nil { + return nil, err + } + + builder := component.NewComponentManagerBuilder() + + a := &GossipSubAdapter{ + gossipSub: gossipSub, + logger: logger.With().Str("component", "gossipsub-adapter").Logger(), + clusterChangeConsumer: clusterChangeConsumer, + } + + topicScoreParamFunc, ok := gossipSubConfig.TopicScoreParamFunc() + if ok { + a.topicScoreParamFunc = topicScoreParamFunc + } else { + a.logger.Warn().Msg("no topic score param func provided") + } + + if scoreTracer := gossipSubConfig.ScoreTracer(); scoreTracer != nil { + builder.AddWorker(func(ctx irrecoverable.SignalerContext, ready component.ReadyFunc) { + ready() + a.logger.Info().Msg("starting score tracer") + scoreTracer.Start(ctx) + select { + case <-ctx.Done(): + a.logger.Warn().Msg("aborting score tracer startup due to context done") + case <-scoreTracer.Ready(): + a.logger.Info().Msg("score tracer is ready") + } + ready() + + <-ctx.Done() + a.logger.Info().Msg("stopping score tracer") + <-scoreTracer.Done() + a.logger.Info().Msg("score tracer stopped") + }) + a.peerScoreExposer = scoreTracer + } + + if tracer := gossipSubConfig.PubSubTracer(); tracer != nil { + builder.AddWorker(func(ctx irrecoverable.SignalerContext, ready component.ReadyFunc) { + a.logger.Info().Msg("starting pubsub tracer") + tracer.Start(ctx) + select { + case <-ctx.Done(): + a.logger.Warn().Msg("aborting pubsub tracer startup due to context done") + case <-tracer.Ready(): + a.logger.Info().Msg("pubsub tracer is ready") + } + ready() + + <-ctx.Done() + a.logger.Info().Msg("stopping pubsub tracer") + <-tracer.Done() + a.logger.Info().Msg("pubsub tracer stopped") + }) + a.localMeshTracer = tracer + } + + if inspectorSuite := gossipSubConfig.RpcInspectorComponent(); inspectorSuite != nil { + builder.AddWorker(func(ctx irrecoverable.SignalerContext, ready component.ReadyFunc) { + a.logger.Info().Msg("starting inspector suite") + inspectorSuite.Start(ctx) + select { + case <-ctx.Done(): + a.logger.Warn().Msg("aborting inspector suite startup due to context done") + case <-inspectorSuite.Ready(): + a.logger.Info().Msg("inspector suite is ready") + } + ready() + + <-ctx.Done() + a.logger.Info().Msg("stopping inspector suite") + <-inspectorSuite.Done() + a.logger.Info().Msg("inspector suite stopped") + }) + } + + if scoringComponent := gossipSubConfig.ScoringComponent(); scoringComponent != nil { + builder.AddWorker(func(ctx irrecoverable.SignalerContext, ready component.ReadyFunc) { + a.logger.Info().Msg("starting gossipsub scoring component") + scoringComponent.Start(ctx) + select { + case <-ctx.Done(): + a.logger.Warn().Msg("aborting gossipsub scoring component startup due to context done") + case <-scoringComponent.Ready(): + a.logger.Info().Msg("gossipsub scoring component is ready") + } + ready() + + <-ctx.Done() + a.logger.Info().Msg("stopping gossipsub scoring component") + <-scoringComponent.Done() + a.logger.Info().Msg("gossipsub scoring component stopped") + }) + } + + a.Component = builder.Build() + + return a, nil +} + +func (g *GossipSubAdapter) RegisterTopicValidator(topic string, topicValidator p2p.TopicValidatorFunc) error { + // wrap the topic validator function into a libp2p topic validator function. + var v pubsub.ValidatorEx = func(ctx context.Context, from peer.ID, message *pubsub.Message) pubsub.ValidationResult { + switch result := topicValidator(ctx, from, message); result { + case p2p.ValidationAccept: + return pubsub.ValidationAccept + case p2p.ValidationIgnore: + return pubsub.ValidationIgnore + case p2p.ValidationReject: + return pubsub.ValidationReject + default: + // should never happen, indicates a bug in the topic validator + g.logger.Fatal().Msgf("invalid validation result: %v", result) + } + // should never happen, indicates a bug in the topic validator, but we need to return something + g.logger.Warn(). + Bool(logging.KeySuspicious, true). + Msg("invalid validation result, returning reject") + return pubsub.ValidationReject + } + + return g.gossipSub.RegisterTopicValidator(topic, v, pubsub.WithValidatorInline(true)) +} + +func (g *GossipSubAdapter) UnregisterTopicValidator(topic string) error { + return g.gossipSub.UnregisterTopicValidator(topic) +} + +func (g *GossipSubAdapter) Join(topic string) (p2p.Topic, error) { + t, err := g.gossipSub.Join(topic) + if err != nil { + return nil, fmt.Errorf("could not join topic %s: %w", topic, err) + } + + if g.topicScoreParamFunc != nil { + topicParams := g.topicScoreParamFunc(t) + err = t.SetScoreParams(topicParams) + if err != nil { + return nil, fmt.Errorf("could not set score params for topic %s: %w", topic, err) + } + topicParamsLogger := utils.TopicScoreParamsLogger(g.logger, topic, topicParams) + topicParamsLogger.Info().Msg("joined topic with score params set") + } else { + g.logger.Warn(). + Bool(logging.KeyNetworkingSecurity, true). + Str("topic", topic). + Msg("joining topic without score params, this is not recommended from a security perspective") + } + return NewGossipSubTopic(t), nil +} + +func (g *GossipSubAdapter) GetTopics() []string { + return g.gossipSub.GetTopics() +} + +func (g *GossipSubAdapter) ListPeers(topic string) []peer.ID { + return g.gossipSub.ListPeers(topic) +} + +// GetLocalMeshPeers returns the list of peers in the local mesh for the given topic. +// Args: +// - topic: the topic. +// Returns: +// - []peer.ID: the list of peers in the local mesh for the given topic. +func (g *GossipSubAdapter) GetLocalMeshPeers(topic channels.Topic) []peer.ID { + return g.localMeshTracer.GetLocalMeshPeers(topic) +} + +// PeerScoreExposer returns the peer score exposer for the gossipsub adapter. The exposer is a read-only interface +// for querying peer scores and returns the local scoring table of the underlying gossipsub node. +// The exposer is only available if the gossipsub adapter was configured with a score tracer. +// If the gossipsub adapter was not configured with a score tracer, the exposer will be nil. +// Args: +// +// None. +// +// Returns: +// +// The peer score exposer for the gossipsub adapter. +func (g *GossipSubAdapter) PeerScoreExposer() p2p.PeerScoreExposer { + return g.peerScoreExposer +} + +// ActiveClustersChanged is called when the active clusters of collection nodes changes. +// GossipSubAdapter implements this method to forward the call to the clusterChangeConsumer (rpc inspector), +// which will then update the cluster state of the rpc inspector. +// Args: +// - lst: the list of active clusters +// Returns: +// - void +func (g *GossipSubAdapter) ActiveClustersChanged(lst flow.ChainIDList) { + g.clusterChangeConsumer.ActiveClustersChanged(lst) +} diff --git a/network/p2p/node/gossipSubAdapterConfig.go b/network/p2p/node/gossipSubAdapterConfig.go new file mode 100644 index 00000000000..e47ebace309 --- /dev/null +++ b/network/p2p/node/gossipSubAdapterConfig.go @@ -0,0 +1,242 @@ +package p2pnode + +import ( + "time" + + pubsub "github.com/libp2p/go-libp2p-pubsub" + pb "github.com/libp2p/go-libp2p-pubsub/pb" + "github.com/libp2p/go-libp2p/core/peer" + "github.com/libp2p/go-libp2p/core/routing" + discoveryrouting "github.com/libp2p/go-libp2p/p2p/discovery/routing" + + "github.com/onflow/flow-go/module/component" + "github.com/onflow/flow-go/network/p2p" +) + +// GossipSubAdapterConfig is a wrapper around libp2p pubsub options that +// implements the PubSubAdapterConfig interface for the Flow network. +type GossipSubAdapterConfig struct { + options []pubsub.Option + scoreTracer p2p.PeerScoreTracer + scoreOption p2p.ScoreOptionBuilder + pubsubTracer p2p.PubSubTracer + inspector p2p.GossipSubRPCInspector // currently only used to manage the lifecycle. +} + +var _ p2p.PubSubAdapterConfig = (*GossipSubAdapterConfig)(nil) + +// NewGossipSubAdapterConfig creates a new GossipSubAdapterConfig with the default options. +// Args: +// - base: the base pubsub adapter config +// +// Returns: +// - a new GossipSubAdapterConfig +func NewGossipSubAdapterConfig(base *p2p.BasePubSubAdapterConfig) *GossipSubAdapterConfig { + return &GossipSubAdapterConfig{ + options: defaultPubsubOptions(base), + } +} + +// WithRoutingDiscovery adds a routing discovery option to the config. +// Args: +// - routing: the routing discovery to use +// +// Returns: +// -None +func (g *GossipSubAdapterConfig) WithRoutingDiscovery(routing routing.ContentRouting) { + g.options = append(g.options, pubsub.WithDiscovery(discoveryrouting.NewRoutingDiscovery(routing))) +} + +// WithSubscriptionFilter adds a subscription filter option to the config. +// Args: +// - filter: the subscription filter to use +// +// Returns: +// -None +func (g *GossipSubAdapterConfig) WithSubscriptionFilter(filter p2p.SubscriptionFilter) { + g.options = append(g.options, pubsub.WithSubscriptionFilter(filter)) +} + +// WithScoreOption adds a score option to the config. +// Args: +// - option: the score option to use +// Returns: +// -None +func (g *GossipSubAdapterConfig) WithScoreOption(option p2p.ScoreOptionBuilder) { + params, thresholds := option.BuildFlowPubSubScoreOption() + g.scoreOption = option + g.options = append(g.options, pubsub.WithPeerScore(params, thresholds)) +} + +// WithMessageIdFunction adds a message ID function option to the config. +// Args: +// - f: the message ID function to use +// Returns: +// -None +func (g *GossipSubAdapterConfig) WithMessageIdFunction(f func([]byte) string) { + g.options = append(g.options, pubsub.WithMessageIdFn(func(pmsg *pb.Message) string { + return f(pmsg.Data) + })) +} + +// WithInspectorSuite adds an inspector suite option to the config. +// Args: +// - suite: the inspector suite to use +// Returns: +// -None +func (g *GossipSubAdapterConfig) WithRpcInspector(inspector p2p.GossipSubRPCInspector) { + g.options = append(g.options, pubsub.WithAppSpecificRpcInspector(inspector.Inspect)) + g.inspector = inspector +} + +// WithTracer adds a tracer option to the config. +// Args: +// - tracer: the tracer to use +// Returns: +// -None +func (g *GossipSubAdapterConfig) WithTracer(tracer p2p.PubSubTracer) { + g.pubsubTracer = tracer + g.options = append(g.options, pubsub.WithRawTracer(tracer)) +} + +// WithPeerGater adds a peer gater option to the config. +// Args: +// - params: the topic delivery weights to use +// Returns: +// -None +func (g *GossipSubAdapterConfig) WithPeerGater(topicDeliveryWeights map[string]float64, sourceDecay time.Duration) { + peerGaterParams := pubsub.NewPeerGaterParams(pubsub.DefaultPeerGaterThreshold, pubsub.DefaultPeerGaterGlobalDecay, pubsub.ScoreParameterDecay(sourceDecay)).WithTopicDeliveryWeights(topicDeliveryWeights) + g.options = append(g.options, pubsub.WithPeerGater(peerGaterParams)) +} + +// WithValidateQueueSize overrides the validation queue size from 32 to the given size. +// CAUTION: Be careful setting this to a larger number as it will change the backpressure behavior of the system. +func (g *GossipSubAdapterConfig) WithValidateQueueSize(size int) { + g.options = append(g.options, pubsub.WithValidateQueueSize(size)) +} + +// ScoreTracer returns the tracer for the peer score. +// Args: +// - None +// +// Returns: +// - p2p.PeerScoreTracer: the tracer for the peer score. +func (g *GossipSubAdapterConfig) ScoreTracer() p2p.PeerScoreTracer { + return g.scoreTracer +} + +// PubSubTracer returns the tracer for the pubsub. +// Args: +// - None +// Returns: +// - p2p.PubSubTracer: the tracer for the pubsub. +func (g *GossipSubAdapterConfig) PubSubTracer() p2p.PubSubTracer { + return g.pubsubTracer +} + +func (g *GossipSubAdapterConfig) ScoringComponent() component.Component { + return g.scoreOption +} + +// RpcInspectorComponent returns the component that manages the lifecycle of the inspector suite. +// This is used to start and stop the inspector suite by the PubSubAdapter. +// Args: +// - None +// +// Returns: +// - component.Component: the component that manages the lifecycle of the inspector suite. +func (g *GossipSubAdapterConfig) RpcInspectorComponent() component.Component { + return g.inspector +} + +// WithScoreTracer sets the tracer for the peer score. +// Args: +// - tracer: the tracer for the peer score. +// +// Returns: +// - None +func (g *GossipSubAdapterConfig) WithScoreTracer(tracer p2p.PeerScoreTracer) { + g.scoreTracer = tracer + g.options = append(g.options, pubsub.WithPeerScoreInspect(func(snapshot map[peer.ID]*pubsub.PeerScoreSnapshot) { + tracer.UpdatePeerScoreSnapshots(convertPeerScoreSnapshots(snapshot)) + }, tracer.UpdateInterval())) +} + +// convertPeerScoreSnapshots converts a libp2p pubsub peer score snapshot to a Flow peer score snapshot. +// Args: +// - snapshot: the libp2p pubsub peer score snapshot. +// +// Returns: +// - map[peer.ID]*p2p.PeerScoreSnapshot: the Flow peer score snapshot. +func convertPeerScoreSnapshots(snapshot map[peer.ID]*pubsub.PeerScoreSnapshot) map[peer.ID]*p2p.PeerScoreSnapshot { + newSnapshot := make(map[peer.ID]*p2p.PeerScoreSnapshot) + for id, snap := range snapshot { + newSnapshot[id] = &p2p.PeerScoreSnapshot{ + Topics: convertTopicScoreSnapshot(snap.Topics), + Score: snap.Score, + AppSpecificScore: snap.AppSpecificScore, + BehaviourPenalty: snap.BehaviourPenalty, + IPColocationFactor: snap.IPColocationFactor, + } + } + return newSnapshot +} + +// convertTopicScoreSnapshot converts a libp2p pubsub topic score snapshot to a Flow topic score snapshot. +// Args: +// - snapshot: the libp2p pubsub topic score snapshot. +// +// Returns: +// - map[string]*p2p.TopicScoreSnapshot: the Flow topic score snapshot. +func convertTopicScoreSnapshot(snapshot map[string]*pubsub.TopicScoreSnapshot) map[string]*p2p.TopicScoreSnapshot { + newSnapshot := make(map[string]*p2p.TopicScoreSnapshot) + for topic, snap := range snapshot { + newSnapshot[topic] = &p2p.TopicScoreSnapshot{ + TimeInMesh: snap.TimeInMesh, + FirstMessageDeliveries: snap.FirstMessageDeliveries, + MeshMessageDeliveries: snap.MeshMessageDeliveries, + InvalidMessageDeliveries: snap.InvalidMessageDeliveries, + } + } + + return newSnapshot +} + +// TopicScoreParamFunc returns the topic score param function. This function is used to get the topic score params for a topic. +// The topic score params are used to set the topic parameters in GossipSub at the time of joining the topic. +// Args: +// - None +// +// Returns: +// - func(topic *pubsub.Topic) *pubsub.TopicScoreParams: the topic score param function if set, nil otherwise. +// - bool: true if the topic score param function is set, false otherwise. +func (g *GossipSubAdapterConfig) TopicScoreParamFunc() (func(topic *pubsub.Topic) *pubsub.TopicScoreParams, bool) { + if g.scoreOption != nil { + return func(topic *pubsub.Topic) *pubsub.TopicScoreParams { + return g.scoreOption.TopicScoreParams(topic) + }, true + } + + return nil, false +} + +// Build returns the libp2p pubsub options. +// Args: +// - None +// +// Returns: +// - []pubsub.Option: the libp2p pubsub options. +// +// Build is idempotent. +func (g *GossipSubAdapterConfig) Build() []pubsub.Option { + return g.options +} + +// defaultPubsubOptions returns the default libp2p pubsub options. These options are used by the Flow network to create a libp2p pubsub. +func defaultPubsubOptions(base *p2p.BasePubSubAdapterConfig) []pubsub.Option { + return []pubsub.Option{ + pubsub.WithMessageSigning(true), + pubsub.WithStrictSignatureVerification(true), + pubsub.WithMaxMessageSize(base.MaxMessageSize), + } +} diff --git a/network/p2p/p2pnode/gossipSubTopic.go b/network/p2p/node/gossipSubTopic.go similarity index 100% rename from network/p2p/p2pnode/gossipSubTopic.go rename to network/p2p/node/gossipSubTopic.go diff --git a/network/p2p/node/internal/cache.go b/network/p2p/node/internal/cache.go new file mode 100644 index 00000000000..b3c05ca4df0 --- /dev/null +++ b/network/p2p/node/internal/cache.go @@ -0,0 +1,119 @@ +package internal + +import ( + "fmt" + + "github.com/libp2p/go-libp2p/core/peer" + "github.com/rs/zerolog" + "golang.org/x/exp/maps" + + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module" + herocache "github.com/onflow/flow-go/module/mempool/herocache/backdata" + "github.com/onflow/flow-go/module/mempool/herocache/backdata/heropool" + "github.com/onflow/flow-go/module/mempool/stdmap" + "github.com/onflow/flow-go/network" + "github.com/onflow/flow-go/network/p2p" +) + +// DisallowListCache is the disallow-list cache. It is used to keep track of the disallow-listed peers and the reasons for it. +// Stored disallow-list causes are keyed by the hash of the peerID. +type DisallowListCache struct { + c *stdmap.Backend[flow.Identifier, map[network.DisallowListedCause]struct{}] +} + +// NewDisallowListCache creates a new disallow-list cache. The cache is backed by a stdmap.Backend. +// Args: +// - sizeLimit: the size limit of the cache, i.e., the maximum number of records that the cache can hold, recommended size is 100 * number of authorized nodes. +// - logger: the logger used by the cache. +// - collector: the metrics collector used by the cache. +// Returns: +// - *DisallowListCache: the created cache. +func NewDisallowListCache(sizeLimit uint32, logger zerolog.Logger, collector module.HeroCacheMetrics) *DisallowListCache { + backData := herocache.NewCache[map[network.DisallowListedCause]struct{}](sizeLimit, + herocache.DefaultOversizeFactor, + heropool.LRUEjection, + logger.With().Str("mempool", "disallow-list-records").Logger(), + collector) + + return &DisallowListCache{ + c: stdmap.NewBackend(stdmap.WithMutableBackData[flow.Identifier, map[network.DisallowListedCause]struct{}](backData)), + } +} + +// IsDisallowListed determines whether the given peer is disallow-listed for any reason. +// Args: +// - peerID: the peer to check. +// Returns: +// - []network.DisallowListedCause: the list of causes for which the given peer is disallow-listed. If the peer is not disallow-listed for any reason, +// a nil slice is returned. +// - bool: true if the peer is disallow-listed for any reason, false otherwise. +func (d *DisallowListCache) IsDisallowListed(peerID peer.ID) ([]network.DisallowListedCause, bool) { + causes, exists := d.c.Get(p2p.MakeId(peerID)) + if !exists { + return nil, false + } + + if len(causes) == 0 { + return nil, false + } + + return maps.Keys(causes), true +} + +// DisallowFor disallow-lists a peer for a cause. +// Args: +// - peerID: the peerID of the peer to be disallow-listed. +// - cause: the cause for disallow-listing the peer. +// Returns: +// - []network.DisallowListedCause: the list of causes for which the peer is disallow-listed. +// - error: if the operation fails, error is irrecoverable. +func (d *DisallowListCache) DisallowFor(peerID peer.ID, cause network.DisallowListedCause) ([]network.DisallowListedCause, error) { + initLogic := func() map[network.DisallowListedCause]struct{} { + return make(map[network.DisallowListedCause]struct{}) + } + + adjustLogic := func(causes map[network.DisallowListedCause]struct{}) map[network.DisallowListedCause]struct{} { + causes[cause] = struct{}{} + return causes + } + adjustedCauses, adjusted := d.c.AdjustWithInit(p2p.MakeId(peerID), adjustLogic, initLogic) + if !adjusted { + return nil, fmt.Errorf("failed to disallow list peer %s for cause %s", peerID, cause) + } + + // returning a deep copy of causes (to avoid being mutated externally). + updatedCauses := make([]network.DisallowListedCause, 0, len(adjustedCauses)) + for c := range adjustedCauses { + updatedCauses = append(updatedCauses, c) + } + + return updatedCauses, nil +} + +// AllowFor removes a cause from the disallow list cache entity for the peerID. +// Args: +// - peerID: the peerID of the peer to be allow-listed. +// - cause: the cause for allow-listing the peer. +// Returns: +// - the list of causes for which the peer is disallow-listed. +// - error if the entity for the peerID is not found in the cache it returns ErrDisallowCacheEntityNotFound, which is a benign error. +func (d *DisallowListCache) AllowFor(peerID peer.ID, cause network.DisallowListedCause) []network.DisallowListedCause { + adjustedCauses, adjusted := d.c.Adjust(p2p.MakeId(peerID), func(causes map[network.DisallowListedCause]struct{}) map[network.DisallowListedCause]struct{} { + delete(causes, cause) + return causes + }) + + if !adjusted { + // if the entity is not found in the cache, we return an empty list. + // we don't return a nil to be consistent with the case that entity is found but the list of causes is empty. + return make([]network.DisallowListedCause, 0) + } + + // returning a deep copy of causes (to avoid being mutated externally). + causes := make([]network.DisallowListedCause, 0, len(adjustedCauses)) + for c := range adjustedCauses { + causes = append(causes, c) + } + return causes +} diff --git a/network/p2p/node/internal/cache_test.go b/network/p2p/node/internal/cache_test.go new file mode 100644 index 00000000000..b5fc0533034 --- /dev/null +++ b/network/p2p/node/internal/cache_test.go @@ -0,0 +1,355 @@ +package internal_test + +import ( + "fmt" + "sync" + "testing" + "time" + + "github.com/libp2p/go-libp2p/core/peer" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/module/metrics" + "github.com/onflow/flow-go/network" + "github.com/onflow/flow-go/network/p2p/node/internal" + "github.com/onflow/flow-go/utils/unittest" +) + +// TestNewDisallowListCache tests the NewDisallowListCache function. It verifies that the returned disallowListCache +// is not nil. +func TestNewDisallowListCache(t *testing.T) { + disallowListCache := internal.NewDisallowListCache(uint32(100), unittest.Logger(), metrics.NewNoopCollector()) + + // Verify that the new disallowListCache is not nil + assert.NotNil(t, disallowListCache) +} + +// TestDisallowFor_SinglePeer tests the DisallowFor function for a single peer. It verifies that the peerID is +// disallow-listed for the given cause and that the cause is returned when the peerID is disallow-listed again. +func TestDisallowFor_SinglePeer(t *testing.T) { + disallowListCache := internal.NewDisallowListCache(uint32(100), unittest.Logger(), metrics.NewNoopCollector()) + require.NotNil(t, disallowListCache) + + // disallowing a peerID for a cause when the peerID doesn't exist in the cache + causes, err := disallowListCache.DisallowFor(peer.ID("peer1"), network.DisallowListedCauseAdmin) + require.NoError(t, err) + require.Len(t, causes, 1) + require.Contains(t, causes, network.DisallowListedCauseAdmin) + + // disallowing a peerID for a cause when the peerID already exists in the cache + causes, err = disallowListCache.DisallowFor(peer.ID("peer1"), network.DisallowListedCauseAlsp) + require.NoError(t, err) + require.Len(t, causes, 2) + require.ElementsMatch(t, causes, []network.DisallowListedCause{network.DisallowListedCauseAdmin, network.DisallowListedCauseAlsp}) + + // disallowing a peerID for a duplicate cause + causes, err = disallowListCache.DisallowFor(peer.ID("peer1"), network.DisallowListedCauseAdmin) + require.NoError(t, err) + require.Len(t, causes, 2) + require.ElementsMatch(t, causes, []network.DisallowListedCause{network.DisallowListedCauseAdmin, network.DisallowListedCauseAlsp}) +} + +// TestDisallowFor_MultiplePeers tests the DisallowFor function for multiple peers. It verifies that the peerIDs are +// disallow-listed for the given cause and that the cause is returned when the peerIDs are disallow-listed again. +func TestDisallowFor_MultiplePeers(t *testing.T) { + disallowListCache := internal.NewDisallowListCache(uint32(100), unittest.Logger(), metrics.NewNoopCollector()) + require.NotNil(t, disallowListCache) + + for i := 0; i <= 10; i++ { + // disallowing a peerID for a cause when the peerID doesn't exist in the cache + causes, err := disallowListCache.DisallowFor(peer.ID(fmt.Sprintf("peer-%d", i)), network.DisallowListedCauseAdmin) + require.NoError(t, err) + require.Len(t, causes, 1) + require.Contains(t, causes, network.DisallowListedCauseAdmin) + } + + for i := 0; i <= 10; i++ { + // disallowing a peerID for a cause when the peerID already exists in the cache + causes, err := disallowListCache.DisallowFor(peer.ID(fmt.Sprintf("peer-%d", i)), network.DisallowListedCauseAlsp) + require.NoError(t, err) + require.Len(t, causes, 2) + require.ElementsMatch(t, causes, []network.DisallowListedCause{network.DisallowListedCauseAdmin, network.DisallowListedCauseAlsp}) + } + + for i := 0; i <= 10; i++ { + // getting the disallow-listed causes for a peerID + causes, disallowListed := disallowListCache.IsDisallowListed(peer.ID(fmt.Sprintf("peer-%d", i))) + require.True(t, disallowListed) + require.Len(t, causes, 2) + require.ElementsMatch(t, causes, []network.DisallowListedCause{network.DisallowListedCauseAdmin, network.DisallowListedCauseAlsp}) + } +} + +// TestAllowFor_SinglePeer is a unit test function to verify the behavior of DisallowListCache for a single peer. +// The test checks the following functionalities in sequence: +// 1. Allowing a peerID for a cause when the peerID already exists in the cache. +// 2. Disallowing the peerID for a cause when the peerID doesn't exist in the cache. +// 3. Getting the disallow-listed causes for the peerID. +// 4. Allowing a peerID for a cause when the peerID already exists in the cache. +// 5. Getting the disallow-listed causes for the peerID. +// 6. Disallowing the peerID for a cause. +// 7. Allowing the peerID for a different cause than it is disallowed when the peerID already exists in the cache. +// 8. Disallowing the peerID for another cause. +// 9. Allowing the peerID for the first cause. +// 10. Allowing the peerID for the second cause. +func TestAllowFor_SinglePeer(t *testing.T) { + disallowListCache := internal.NewDisallowListCache(uint32(100), unittest.Logger(), metrics.NewNoopCollector()) + require.NotNil(t, disallowListCache) + peerID := peer.ID("peer1") + + // allowing the peerID for a cause when the peerID already exists in the cache + causes := disallowListCache.AllowFor(peerID, network.DisallowListedCauseAdmin) + require.Len(t, causes, 0) + + // disallowing the peerID for a cause when the peerID doesn't exist in the cache + causes, err := disallowListCache.DisallowFor(peerID, network.DisallowListedCauseAdmin) + require.NoError(t, err) + require.Len(t, causes, 1) + require.Contains(t, causes, network.DisallowListedCauseAdmin) + + // getting the disallow-listed causes for the peerID + causes, disallowListed := disallowListCache.IsDisallowListed(peerID) + require.True(t, disallowListed) + require.Len(t, causes, 1) + require.Contains(t, causes, network.DisallowListedCauseAdmin) + + // allowing a peerID for a cause when the peerID already exists in the cache + causes = disallowListCache.AllowFor(peerID, network.DisallowListedCauseAdmin) + require.NoError(t, err) + require.Len(t, causes, 0) + + // getting the disallow-listed causes for the peerID + causes, disallowListed = disallowListCache.IsDisallowListed(peerID) + require.False(t, disallowListed) + require.Len(t, causes, 0) + + // disallowing the peerID for a cause + causes, err = disallowListCache.DisallowFor(peerID, network.DisallowListedCauseAdmin) + require.NoError(t, err) + require.Len(t, causes, 1) + + // allowing the peerID for a different cause than it is disallowed when the peerID already exists in the cache + causes = disallowListCache.AllowFor(peerID, network.DisallowListedCauseAlsp) + require.NoError(t, err) + require.Len(t, causes, 1) + require.Contains(t, causes, network.DisallowListedCauseAdmin) // the peerID is still disallow-listed for the previous cause + + // disallowing the peerID for another cause + causes, err = disallowListCache.DisallowFor(peerID, network.DisallowListedCauseAlsp) + require.NoError(t, err) + require.Len(t, causes, 2) + require.ElementsMatch(t, causes, []network.DisallowListedCause{network.DisallowListedCauseAdmin, network.DisallowListedCauseAlsp}) + + // allowing the peerID for the first cause + causes = disallowListCache.AllowFor(peerID, network.DisallowListedCauseAdmin) + require.NoError(t, err) + require.Len(t, causes, 1) + require.Contains(t, causes, network.DisallowListedCauseAlsp) // the peerID is still disallow-listed for the previous cause + + // allowing the peerID for the second cause + causes = disallowListCache.AllowFor(peerID, network.DisallowListedCauseAlsp) + require.NoError(t, err) + require.Len(t, causes, 0) +} + +// TestAllowFor_MultiplePeers_Sequentially is a unit test function to test the behavior of DisallowListCache with multiple peers. +// The test checks the following functionalities in sequence: +// 1. Allowing a peerID for a cause when the peerID doesn't exist in the cache. +// 2. Disallowing peers for a cause. +// 3. Getting the disallow-listed causes for a peerID. +// 4. Allowing the peer ids for a cause different than the one they are disallow-listed for. +// 5. Disallowing the peer ids for a different cause. +// 6. Allowing the peer ids for the first cause. +// 7. Allowing the peer ids for the second cause. +func TestAllowFor_MultiplePeers_Sequentially(t *testing.T) { + disallowListCache := internal.NewDisallowListCache(uint32(100), unittest.Logger(), metrics.NewNoopCollector()) + require.NotNil(t, disallowListCache) + + for i := 0; i <= 10; i++ { + // allowing a peerID for a cause when the peerID doesn't exist in the cache + causes := disallowListCache.AllowFor(peer.ID(fmt.Sprintf("peer-%d", i)), network.DisallowListedCauseAdmin) + require.Len(t, causes, 0) + } + + for i := 0; i <= 10; i++ { + // disallowing peers for a cause + causes, err := disallowListCache.DisallowFor(peer.ID(fmt.Sprintf("peer-%d", i)), network.DisallowListedCauseAlsp) + require.NoError(t, err) + require.Len(t, causes, 1) + require.Contains(t, causes, network.DisallowListedCauseAlsp) + } + + for i := 0; i <= 10; i++ { + // getting the disallow-listed causes for a peerID + causes, disallowListed := disallowListCache.IsDisallowListed(peer.ID(fmt.Sprintf("peer-%d", i))) + require.True(t, disallowListed) + require.Len(t, causes, 1) + require.Contains(t, causes, network.DisallowListedCauseAlsp) + } + + for i := 0; i <= 10; i++ { + // allowing the peer ids for a cause different than the one they are disallow-listed for + causes := disallowListCache.AllowFor(peer.ID(fmt.Sprintf("peer-%d", i)), network.DisallowListedCauseAdmin) + require.Len(t, causes, 1) + require.Contains(t, causes, network.DisallowListedCauseAlsp) + } + + for i := 0; i <= 10; i++ { + // disallowing the peer ids for a different cause + causes, err := disallowListCache.DisallowFor(peer.ID(fmt.Sprintf("peer-%d", i)), network.DisallowListedCauseAdmin) + require.NoError(t, err) + require.Len(t, causes, 2) + require.ElementsMatch(t, causes, []network.DisallowListedCause{network.DisallowListedCauseAdmin, network.DisallowListedCauseAlsp}) + } + + for i := 0; i <= 10; i++ { + // allowing the peer ids for the first cause + causes := disallowListCache.AllowFor(peer.ID(fmt.Sprintf("peer-%d", i)), network.DisallowListedCauseAdmin) + require.Len(t, causes, 1) + require.Contains(t, causes, network.DisallowListedCauseAlsp) + } + + for i := 0; i <= 10; i++ { + // allowing the peer ids for the second cause + causes := disallowListCache.AllowFor(peer.ID(fmt.Sprintf("peer-%d", i)), network.DisallowListedCauseAlsp) + require.Len(t, causes, 0) + } +} + +// TestAllowFor_MultiplePeers_Concurrently is a unit test function that verifies the behavior of DisallowListCache +// when multiple peerIDs are added and managed concurrently. This test is designed to confirm that DisallowListCache +// works as expected under concurrent access, an important aspect for a system dealing with multiple connections. +// +// The test runs multiple goroutines simultaneously, each handling a different peerID and performs the following +// operations in the sequence: +// 1. Allowing a peerID for a cause when the peerID doesn't exist in the cache. +// 2. Disallowing peers for a cause. +// 3. Getting the disallow-listed causes for a peerID. +// 4. Allowing the peer ids for a cause different than the one they are disallow-listed for. +// 5. Disallowing the peer ids for a different cause. +// 6. Allowing the peer ids for the first cause. +// 7. Allowing the peer ids for the second cause. +// 8. Getting the disallow-listed causes for a peerID. +// 9. Allowing a peerID for a cause when the peerID doesn't exist in the cache for a new set of peers. +func TestAllowFor_MultiplePeers_Concurrently(t *testing.T) { + disallowListCache := internal.NewDisallowListCache(uint32(100), unittest.Logger(), metrics.NewNoopCollector()) + require.NotNil(t, disallowListCache) + + var wg sync.WaitGroup + for i := 0; i <= 10; i++ { + wg.Add(1) + go func(i int) { + defer wg.Done() + + // allowing a peerID for a cause when the peerID doesn't exist in the cache + causes := disallowListCache.AllowFor(peer.ID(fmt.Sprintf("peer-%d", i)), network.DisallowListedCauseAdmin) + require.Len(t, causes, 0) + }(i) + } + unittest.RequireReturnsBefore(t, wg.Wait, 100*time.Millisecond, "timed out waiting for goroutines to finish") + + for i := 0; i <= 10; i++ { + wg.Add(1) + go func(i int) { + defer wg.Done() + + // disallowing peers for a cause + causes, err := disallowListCache.DisallowFor(peer.ID(fmt.Sprintf("peer-%d", i)), network.DisallowListedCauseAlsp) + require.NoError(t, err) + require.Len(t, causes, 1) + require.Contains(t, causes, network.DisallowListedCauseAlsp) + }(i) + } + unittest.RequireReturnsBefore(t, wg.Wait, 100*time.Millisecond, "timed out waiting for goroutines to finish") + + for i := 0; i <= 10; i++ { + wg.Add(1) + go func(i int) { + defer wg.Done() + + // getting the disallow-listed causes for a peerID + causes, disallowListed := disallowListCache.IsDisallowListed(peer.ID(fmt.Sprintf("peer-%d", i))) + require.Len(t, causes, 1) + require.True(t, disallowListed) + require.Contains(t, causes, network.DisallowListedCauseAlsp) + }(i) + } + unittest.RequireReturnsBefore(t, wg.Wait, 100*time.Millisecond, "timed out waiting for goroutines to finish") + + for i := 0; i <= 10; i++ { + wg.Add(1) + go func(i int) { + defer wg.Done() + + // allowing the peer ids for a cause different than the one they are disallow-listed for + causes := disallowListCache.AllowFor(peer.ID(fmt.Sprintf("peer-%d", i)), network.DisallowListedCauseAdmin) + require.Len(t, causes, 1) + require.Contains(t, causes, network.DisallowListedCauseAlsp) + }(i) + } + unittest.RequireReturnsBefore(t, wg.Wait, 100*time.Millisecond, "timed out waiting for goroutines to finish") + + for i := 0; i <= 10; i++ { + wg.Add(1) + go func(i int) { + defer wg.Done() + + // disallowing the peer ids for a different cause + causes, err := disallowListCache.DisallowFor(peer.ID(fmt.Sprintf("peer-%d", i)), network.DisallowListedCauseAdmin) + require.NoError(t, err) + require.Len(t, causes, 2) + require.ElementsMatch(t, causes, []network.DisallowListedCause{network.DisallowListedCauseAdmin, network.DisallowListedCauseAlsp}) + }(i) + } + unittest.RequireReturnsBefore(t, wg.Wait, 100*time.Millisecond, "timed out waiting for goroutines to finish") + + for i := 0; i <= 10; i++ { + wg.Add(1) + go func(i int) { + defer wg.Done() + + // allowing the peer ids for the first cause + causes := disallowListCache.AllowFor(peer.ID(fmt.Sprintf("peer-%d", i)), network.DisallowListedCauseAdmin) + require.Len(t, causes, 1) + require.Contains(t, causes, network.DisallowListedCauseAlsp) + }(i) + } + unittest.RequireReturnsBefore(t, wg.Wait, 100*time.Millisecond, "timed out waiting for goroutines to finish") + + for i := 0; i <= 10; i++ { + wg.Add(1) + go func(i int) { + defer wg.Done() + + // allowing the peer ids for the second cause + causes := disallowListCache.AllowFor(peer.ID(fmt.Sprintf("peer-%d", i)), network.DisallowListedCauseAlsp) + require.Len(t, causes, 0) + }(i) + } + unittest.RequireReturnsBefore(t, wg.Wait, 100*time.Millisecond, "timed out waiting for goroutines to finish") + + for i := 0; i <= 10; i++ { + wg.Add(1) + go func(i int) { + defer wg.Done() + + // getting the disallow-listed causes for a peerID + causes, disallowListed := disallowListCache.IsDisallowListed(peer.ID(fmt.Sprintf("peer-%d", i))) + require.False(t, disallowListed) + require.Len(t, causes, 0) + }(i) + } + unittest.RequireReturnsBefore(t, wg.Wait, 100*time.Millisecond, "timed out waiting for goroutines to finish") + + for i := 11; i <= 20; i++ { + wg.Add(1) + go func(i int) { + defer wg.Done() + + // allowing a peerID for a cause when the peerID doesn't exist in the cache + causes := disallowListCache.AllowFor(peer.ID(fmt.Sprintf("peer-%d", i)), network.DisallowListedCauseAdmin) + require.Len(t, causes, 0) + }(i) + } +} diff --git a/network/p2p/node/internal/protocolPeerCache.go b/network/p2p/node/internal/protocolPeerCache.go new file mode 100644 index 00000000000..81af4a06538 --- /dev/null +++ b/network/p2p/node/internal/protocolPeerCache.go @@ -0,0 +1,116 @@ +package internal + +import ( + "fmt" + "sync" + + "github.com/libp2p/go-libp2p/core/event" + "github.com/libp2p/go-libp2p/core/host" + libp2pnet "github.com/libp2p/go-libp2p/core/network" + "github.com/libp2p/go-libp2p/core/peer" + "github.com/libp2p/go-libp2p/core/protocol" + "github.com/rs/zerolog" + "golang.org/x/exp/maps" + + p2plogging "github.com/onflow/flow-go/network/p2p/logging" +) + +// ProtocolPeerCache store a mapping from protocol ID to peers who support that protocol +type ProtocolPeerCache struct { + protocolPeers map[protocol.ID]map[peer.ID]struct{} + sync.RWMutex +} + +// NewProtocolPeerCache creates a new ProtocolPeerCache instance using the given host and supported protocols +// Only protocols passed in the protocols list will be tracked +func NewProtocolPeerCache(logger zerolog.Logger, h host.Host, protocols []protocol.ID) (*ProtocolPeerCache, error) { + protocolPeers := make(map[protocol.ID]map[peer.ID]struct{}) + for _, pid := range protocols { + protocolPeers[pid] = make(map[peer.ID]struct{}) + } + p := &ProtocolPeerCache{protocolPeers: protocolPeers} + + // If no protocols are passed, this is a noop cache + if len(protocols) == 0 { + return p, nil + } + + sub, err := h.EventBus(). + Subscribe([]interface{}{new(event.EvtPeerIdentificationCompleted), new(event.EvtPeerProtocolsUpdated)}) + if err != nil { + return nil, fmt.Errorf("could not subscribe to peer protocol update events: %w", err) + } + + h.Network().Notify(&libp2pnet.NotifyBundle{ + DisconnectedF: func(n libp2pnet.Network, c libp2pnet.Conn) { + peer := c.RemotePeer() + if len(n.ConnsToPeer(peer)) == 0 { + p.RemovePeer(peer) + } + }, + }) + go p.consumeSubscription(logger, h, sub) + + return p, nil +} + +func (p *ProtocolPeerCache) RemovePeer(peerID peer.ID) { + p.Lock() + defer p.Unlock() + for _, peers := range p.protocolPeers { + delete(peers, peerID) + } +} + +func (p *ProtocolPeerCache) AddProtocols(peerID peer.ID, protocols []protocol.ID) { + p.Lock() + defer p.Unlock() + for _, pid := range protocols { + if peers, ok := p.protocolPeers[pid]; ok { + peers[peerID] = struct{}{} + } + } +} + +func (p *ProtocolPeerCache) RemoveProtocols(peerID peer.ID, protocols []protocol.ID) { + p.Lock() + defer p.Unlock() + for _, pid := range protocols { + if peers, ok := p.protocolPeers[pid]; ok { + delete(peers, peerID) + } + } +} + +func (p *ProtocolPeerCache) GetPeers(pid protocol.ID) peer.IDSlice { + p.RLock() + defer p.RUnlock() + + peers, ok := p.protocolPeers[pid] + if !ok { + return peer.IDSlice{} + } + + return maps.Keys(peers) +} + +func (p *ProtocolPeerCache) consumeSubscription(logger zerolog.Logger, h host.Host, sub event.Subscription) { + defer sub.Close() + logger.Debug().Msg("starting peer protocol event subscription loop") + for e := range sub.Out() { + logger.Debug().Interface("event", e).Msg("received new peer protocol event") + switch evt := e.(type) { + case event.EvtPeerIdentificationCompleted: + protocols, err := h.Peerstore().GetProtocols(evt.Peer) + if err != nil { + logger.Err(err).Str("peer_id", p2plogging.PeerId(evt.Peer)).Msg("failed to get protocols for peer") + continue + } + p.AddProtocols(evt.Peer, protocols) + case event.EvtPeerProtocolsUpdated: + p.AddProtocols(evt.Peer, evt.Added) + p.RemoveProtocols(evt.Peer, evt.Removed) + } + } + logger.Debug().Msg("exiting peer protocol event subscription loop") +} diff --git a/network/p2p/node/internal/protocolPeerCache_test.go b/network/p2p/node/internal/protocolPeerCache_test.go new file mode 100644 index 00000000000..cc6d4202996 --- /dev/null +++ b/network/p2p/node/internal/protocolPeerCache_test.go @@ -0,0 +1,82 @@ +package internal_test + +import ( + "context" + "slices" + "testing" + "time" + + "github.com/libp2p/go-libp2p/core/host" + "github.com/libp2p/go-libp2p/core/network" + "github.com/libp2p/go-libp2p/core/protocol" + "github.com/onflow/crypto" + "github.com/rs/zerolog" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + p2pbuilder "github.com/onflow/flow-go/network/p2p/builder" + "github.com/onflow/flow-go/network/p2p/node/internal" + "github.com/onflow/flow-go/utils/unittest" +) + +func TestProtocolPeerCache(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + p1 := protocol.ID("p1") + p2 := protocol.ID("p2") + p3 := protocol.ID("p3") + + // create three hosts, and a pcache for the first + // the cache supports all 3 + h1, err := p2pbuilder.DefaultLibP2PHost(unittest.DefaultAddress, unittest.KeyFixture(crypto.ECDSASecp256k1)) + require.NoError(t, err) + pcache, err := internal.NewProtocolPeerCache(zerolog.Nop(), h1, []protocol.ID{p1, p2, p3}) + require.NoError(t, err) + h2, err := p2pbuilder.DefaultLibP2PHost(unittest.DefaultAddress, unittest.KeyFixture(crypto.ECDSASecp256k1)) + require.NoError(t, err) + h3, err := p2pbuilder.DefaultLibP2PHost(unittest.DefaultAddress, unittest.KeyFixture(crypto.ECDSASecp256k1)) + require.NoError(t, err) + + // register each host on a separate protocol + noopHandler := func(s network.Stream) {} + h1.SetStreamHandler(p1, noopHandler) + h2.SetStreamHandler(p2, noopHandler) + h3.SetStreamHandler(p3, noopHandler) + + // connect the hosts to each other + require.NoError(t, h1.Connect(ctx, *host.InfoFromHost(h2))) + require.NoError(t, h1.Connect(ctx, *host.InfoFromHost(h3))) + require.NoError(t, h2.Connect(ctx, *host.InfoFromHost(h3))) + + // check that h1's pcache reflects the protocols supported by h2 and h3 + assert.Eventually(t, func() bool { + peers2 := pcache.GetPeers(p2) + peers3 := pcache.GetPeers(p3) + ok2 := slices.Contains(peers2, h2.ID()) + ok3 := slices.Contains(peers3, h3.ID()) + return len(peers2) == 1 && len(peers3) == 1 && ok2 && ok3 + }, 3*time.Second, 50*time.Millisecond) + + // remove h2's support for p2 + h2.RemoveStreamHandler(p2) + + // check that h1's pcache reflects the change + assert.Eventually(t, func() bool { + return len(pcache.GetPeers(p2)) == 0 + }, 3*time.Second, 50*time.Millisecond) + + // add support for p4 on h2 and h3 + // note: pcache does NOT support p4 and should not cache it + p4 := protocol.ID("p4") + h2.SetStreamHandler(p4, noopHandler) + h3.SetStreamHandler(p4, noopHandler) + + // check that h1's pcache never contains p4 + assert.Never(t, func() bool { + peers4 := pcache.GetPeers(p4) + ok2 := slices.Contains(peers4, h2.ID()) + ok3 := slices.Contains(peers4, h3.ID()) + return len(peers4) == 2 && ok2 && ok3 + }, 1*time.Second, 50*time.Millisecond) +} diff --git a/network/p2p/node/libp2pNode.go b/network/p2p/node/libp2pNode.go new file mode 100644 index 00000000000..05069106f86 --- /dev/null +++ b/network/p2p/node/libp2pNode.go @@ -0,0 +1,673 @@ +// Package p2pnode encapsulates the libp2p library +package p2pnode + +import ( + "context" + "errors" + "fmt" + "sync" + "time" + + "github.com/go-playground/validator/v10" + "github.com/hashicorp/go-multierror" + dht "github.com/libp2p/go-libp2p-kad-dht" + kbucket "github.com/libp2p/go-libp2p-kbucket" + "github.com/libp2p/go-libp2p/core/host" + libp2pnet "github.com/libp2p/go-libp2p/core/network" + "github.com/libp2p/go-libp2p/core/peer" + "github.com/libp2p/go-libp2p/core/protocol" + "github.com/libp2p/go-libp2p/core/routing" + "github.com/rs/zerolog" + + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module/component" + "github.com/onflow/flow-go/module/irrecoverable" + flownet "github.com/onflow/flow-go/network" + "github.com/onflow/flow-go/network/channels" + "github.com/onflow/flow-go/network/internal/p2putils" + "github.com/onflow/flow-go/network/p2p" + p2plogging "github.com/onflow/flow-go/network/p2p/logging" + nodeinternal "github.com/onflow/flow-go/network/p2p/node/internal" + "github.com/onflow/flow-go/network/p2p/unicast/protocols" + "github.com/onflow/flow-go/utils/logging" +) + +const ( + _ = iota + _ = 1 << (10 * iota) + mb +) + +const ( + // DefaultMaxPubSubMsgSize defines the maximum message size in publish and multicast modes + DefaultMaxPubSubMsgSize = 5 * mb // 5 mb + + // timeout for FindPeer queries to the routing system + // TODO: is this a sensible value? + findPeerQueryTimeout = 10 * time.Second +) + +var _ p2p.LibP2PNode = (*Node)(nil) + +// Node is a wrapper around the LibP2P host. +type Node struct { + component.Component + sync.RWMutex + uniMgr p2p.UnicastManager + host host.Host // reference to the libp2p host (https://godoc.org/github.com/libp2p/go-libp2p/core/host) + pubSub p2p.PubSubAdapter + logger zerolog.Logger // used to provide logging + topics map[channels.Topic]p2p.Topic // map of a topic string to an actual topic instance + subs map[channels.Topic]p2p.Subscription // map of a topic string to an actual subscription + routing routing.Routing + pCache p2p.ProtocolPeerCache + peerManager p2p.PeerManager + // Cache of temporary disallow-listed peers, when a peer is disallow-listed, the connections to that peer + // are closed and further connections are not allowed till the peer is removed from the disallow-list. + disallowListedCache p2p.DisallowListCache + parameters *p2p.NodeParameters +} + +// NewNode creates a new libp2p node and sets its parameters. +// Args: +// - cfg: The configuration for the libp2p node. +// +// Returns: +// - *Node: The created libp2p node. +// +// - error: An error, if any occurred during the process. This includes failure in creating the node. The returned error is irrecoverable, and the node cannot be used. +func NewNode(cfg *p2p.NodeConfig) (*Node, error) { + err := validator.New().Struct(cfg) + if err != nil { + return nil, fmt.Errorf("invalid config: %w", err) + } + + pCache, err := nodeinternal.NewProtocolPeerCache(cfg.Logger, cfg.Host, cfg.ProtocolPeerCacheList) + if err != nil { + return nil, fmt.Errorf("failed to create protocol peer cache: %w", err) + } + + return &Node{ + host: cfg.Host, + logger: cfg.Logger.With().Str("component", "libp2p-node").Logger(), + topics: make(map[channels.Topic]p2p.Topic), + subs: make(map[channels.Topic]p2p.Subscription), + pCache: pCache, + peerManager: cfg.PeerManager, + parameters: cfg.Parameters, + disallowListedCache: nodeinternal.NewDisallowListCache( + cfg.DisallowListCacheCfg.MaxSize, + cfg.Logger.With().Str("module", "disallow-list-cache").Logger(), + cfg.DisallowListCacheCfg.Metrics, + ), + }, nil +} + +func (n *Node) Start(ctx irrecoverable.SignalerContext) { + n.Component.Start(ctx) +} + +// Stop terminates the libp2p node. +// All errors returned from this function can be considered benign. +func (n *Node) Stop() error { + var result error + + n.logger.Debug().Msg("unsubscribing from all topics") + for t := range n.topics { + err := n.unsubscribeTopic(t) + // context cancelled errors are expected while unsubscribing from topics during shutdown + if err != nil && !errors.Is(err, context.Canceled) { + result = multierror.Append(result, err) + } + } + + n.logger.Debug().Msg("stopping libp2p node") + if err := n.host.Close(); err != nil { + result = multierror.Append(result, err) + } + + n.logger.Debug().Msg("closing peer store") + // to prevent peerstore routine leak (https://github.com/libp2p/go-libp2p/issues/718) + if err := n.host.Peerstore().Close(); err != nil { + n.logger.Debug().Err(err).Msg("closing peer store") + result = multierror.Append(result, err) + } + + if result != nil { + return result + } + + addrs := len(n.host.Network().ListenAddresses()) + ticker := time.NewTicker(time.Millisecond * 2) + defer ticker.Stop() + timeout := time.After(time.Second) + for addrs > 0 { + // wait for all listen addresses to have been removed + select { + case <-timeout: + n.logger.Error().Int("port", addrs).Msg("listen addresses still open") + return nil + case <-ticker.C: + addrs = len(n.host.Network().ListenAddresses()) + } + } + + n.logger.Debug().Msg("libp2p node stopped successfully") + + return nil +} + +// ConnectToPeerAddrInfo adds a peer to this node by adding it to this node's peerstore and connecting to it. +// All errors returned from this function can be considered benign. +func (n *Node) ConnectToPeer(ctx context.Context, peerInfo peer.AddrInfo) error { + return n.host.Connect(ctx, peerInfo) +} + +// RemovePeer closes the connection with the peer. +// All errors returned from this function can be considered benign. +func (n *Node) RemovePeer(peerID peer.ID) error { + err := n.host.Network().ClosePeer(peerID) + if err != nil { + return fmt.Errorf("failed to remove peer %s: %w", peerID, err) + } + // logging with suspicious level as we only expect to disconnect from a peer if it is not part of the + // protocol state. + n.logger.Warn(). + Str("peer_id", p2plogging.PeerId(peerID)). + Bool(logging.KeySuspicious, true). + Msg("disconnected from peer") + + return nil +} + +// GetPeersForProtocol returns slice peer IDs for the specified protocol ID. +func (n *Node) GetPeersForProtocol(pid protocol.ID) peer.IDSlice { + return n.pCache.GetPeers(pid) +} + +// OpenAndWriteOnStream opens a new stream to a peer. The stream is opened to the given peerID +// and writingLogic is executed on the stream. The created stream does not need to be reused and can be inexpensively +// created for each send. Moreover, the stream creation does not incur a round-trip time as the stream negotiation happens +// on an existing connection. +// +// Args: +// - ctx: The context used to control the stream's lifecycle. +// - peerID: The ID of the peer to open the stream to. +// - protectionTag: A tag that protects the connection and ensures that the connection manager keeps it alive, and +// won't prune the connection while the tag is active. +// - writingLogic: A callback function that contains the logic for writing to the stream. It allows an external caller to +// write to the stream without having to worry about the stream creation and management. +// +// Returns: +// error: An error, if any occurred during the process. This includes failure in creating the stream, setting the write +// deadline, executing the writing logic, resetting the stream if the writing logic fails, or closing the stream. +// All returned errors during this process can be considered benign. +func (n *Node) OpenAndWriteOnStream(ctx context.Context, peerID peer.ID, protectionTag string, writingLogic func(stream libp2pnet.Stream) error) error { + lg := n.logger.With().Str("remote_peer_id", p2plogging.PeerId(peerID)).Logger() + if n.parameters.EnableProtectedStreams { + n.host.ConnManager().Protect(peerID, protectionTag) + defer n.host.ConnManager().Unprotect(peerID, protectionTag) + lg = lg.With().Str("protection_tag", protectionTag).Logger() + lg.Trace().Msg("attempting to create protected stream") + } + + // streams don't need to be reused and are fairly inexpensive to be created for each send. + // A stream creation does NOT incur an RTT as stream negotiation happens on an existing connection. + s, err := n.createStream(ctx, peerID) + if err != nil { + return fmt.Errorf("failed to create stream for %s: %w", peerID, err) + } + lg.Trace().Msg("successfully created stream") + + deadline, _ := ctx.Deadline() + err = s.SetWriteDeadline(deadline) + if err != nil { + return fmt.Errorf("failed to set write deadline for stream: %w", err) + } + lg.Trace().Msg("successfully set write deadline on stream") + + err = writingLogic(s) + if err != nil { + // reset the stream to ensure that the next stream creation is not affected by the error. + resetErr := s.Reset() + if resetErr != nil { + n.logger.Error(). + Str("target_peer_id", p2plogging.PeerId(peerID)). + Err(resetErr). + Msg("failed to reset stream") + } + + return fmt.Errorf("writing logic failed for %s: %w", peerID, err) + } + lg.Trace().Msg("successfully wrote on stream") + + // close the stream immediately + err = s.Close() + if err != nil { + return fmt.Errorf("failed to close the stream for %s: %w", peerID, err) + } + lg.Trace().Msg("successfully closed stream") + + return nil +} + +// createStream creates a new stream to the given peer. +// Args: +// - ctx: The context used to control the stream's lifecycle. +// - peerID: The ID of the peer to open the stream to. +// +// Returns: +// - libp2pnet.Stream: The created stream. +// - error: An error, if any occurred during the process. This includes failure in creating the stream. All returned +// errors during this process can be considered benign. +func (n *Node) createStream(ctx context.Context, peerID peer.ID) (libp2pnet.Stream, error) { + lg := n.logger.With().Str("peer_id", p2plogging.PeerId(peerID)).Logger() + + // If we do not currently have any addresses for the given peer, stream creation will almost + // certainly fail. If this Node was configured with a routing system, we can try to use it to + // look up the address of the peer. + if len(n.host.Peerstore().Addrs(peerID)) == 0 && n.routing != nil { + lg.Debug().Msg("address not found in peer store, searching for peer in routing system") + + var err error + func() { + timedCtx, cancel := context.WithTimeout(ctx, findPeerQueryTimeout) + defer cancel() + // try to find the peer using the routing system + _, err = n.routing.FindPeer(timedCtx, peerID) + }() + + if err != nil { + lg.Warn().Err(err).Msg("address not found in both peer store and routing system") + } else { + lg.Debug().Msg("address not found in peer store, but found in routing system search") + } + } + + stream, err := n.uniMgr.CreateStream(ctx, peerID) + if err != nil { + return nil, flownet.NewPeerUnreachableError(fmt.Errorf("could not create stream peer_id: %s: %w", peerID, err)) + } + + lg.Info(). + Str("networking_protocol_id", string(stream.Protocol())). + Msg("stream successfully created to remote peer") + return stream, nil +} + +// ID returns the peer.ID of the node, which is the unique identifier of the node at the libp2p level. +// For other libp2p nodes, the current node is identified by this ID. +func (n *Node) ID() peer.ID { + return n.host.ID() +} + +// GetIPPort returns the IP and Port the libp2p node is listening on. +// All errors returned from this function can be considered benign. +func (n *Node) GetIPPort() (string, string, error) { + return p2putils.IPPortFromMultiAddress(n.host.Network().ListenAddresses()...) +} + +// RoutingTable returns the node routing table +func (n *Node) RoutingTable() *kbucket.RoutingTable { + return n.routing.(*dht.IpfsDHT).RoutingTable() +} + +// ListPeers returns list of peer IDs for peers subscribed to the topic. +func (n *Node) ListPeers(topic string) []peer.ID { + return n.pubSub.ListPeers(topic) +} + +// Subscribe subscribes the node to the given topic and returns the subscription +// All errors returned from this function can be considered benign. +func (n *Node) Subscribe(topic channels.Topic, topicValidator p2p.TopicValidatorFunc) (p2p.Subscription, error) { + n.Lock() + defer n.Unlock() + + // Check if the topic has been already created and is in the cache + n.pubSub.GetTopics() + tp, found := n.topics[topic] + var err error + if !found { + if err := n.pubSub.RegisterTopicValidator(topic.String(), topicValidator); err != nil { + n.logger.Err(err).Str("topic", topic.String()).Msg("failed to register topic validator, aborting subscription") + return nil, fmt.Errorf("failed to register topic validator: %w", err) + } + + tp, err = n.pubSub.Join(topic.String()) + if err != nil { + if err := n.pubSub.UnregisterTopicValidator(topic.String()); err != nil { + n.logger.Err(err).Str("topic", topic.String()).Msg("failed to unregister topic validator") + } + + return nil, fmt.Errorf("could not join topic (%s): %w", topic, err) + } + + n.topics[topic] = tp + } + + // Create a new subscription + s, err := tp.Subscribe() + if err != nil { + return s, fmt.Errorf("could not subscribe to topic (%s): %w", topic, err) + } + + // Add the subscription to the cache + n.subs[topic] = s + + n.logger.Debug(). + Str("topic", topic.String()). + Msg("subscribed to topic") + return s, err +} + +// Unsubscribe cancels the subscriber and closes the topic. +// Args: +// topic: topic to unsubscribe from. +// Returns: +// error: error if any, which means unsubscribe failed. +// All errors returned from this function can be considered benign. +func (n *Node) Unsubscribe(topic channels.Topic) error { + err := n.unsubscribeTopic(topic) + if err != nil { + return fmt.Errorf("failed to unsubscribe from topic: %w", err) + } + + n.RequestPeerUpdate() + + return nil +} + +// unsubscribeTopic cancels the subscriber and closes the topic. +// All errors returned from this function can be considered benign. +// Args: +// +// topic: topic to unsubscribe from +// +// Returns: +// error: error if any. +func (n *Node) unsubscribeTopic(topic channels.Topic) error { + n.Lock() + defer n.Unlock() + + // Remove the Subscriber from the cache + if s, found := n.subs[topic]; found { + s.Cancel() + n.subs[topic] = nil + delete(n.subs, topic) + } + + tp, found := n.topics[topic] + if !found { + err := fmt.Errorf("could not find topic (%s)", topic) + return err + } + + if err := n.pubSub.UnregisterTopicValidator(topic.String()); err != nil { + return fmt.Errorf("failed to unregister topic validator: %w", err) + } + + // attempt to close the topic + err := tp.Close() + if err != nil { + return fmt.Errorf("could not close topic (%s): %w", topic, err) + } + n.topics[topic] = nil + delete(n.topics, topic) + + n.logger.Debug(). + Str("topic", topic.String()). + Msg("unsubscribed from topic") + + return nil +} + +// Publish publishes the given payload on the topic. +// All errors returned from this function can be considered benign. +func (n *Node) Publish(ctx context.Context, messageScope flownet.OutgoingMessageScope) error { + lg := n.logger.With(). + Str("topic", messageScope.Topic().String()). + Interface("proto_message", messageScope.Proto()). + Str("payload_type", messageScope.PayloadType()). + Int("message_size", messageScope.Size()).Logger() + lg.Debug().Msg("received message to publish") + + // convert the message to bytes to be put on the wire. + data, err := messageScope.Proto().Marshal() + if err != nil { + return fmt.Errorf("failed to marshal the message: %w", err) + } + + msgSize := len(data) + if msgSize > DefaultMaxPubSubMsgSize { + // libp2p pubsub will silently drop the message if its size is greater than the configured pubsub max message size + // hence return an error as this message is undeliverable + return fmt.Errorf("message size %d exceeds configured max message size %d", msgSize, DefaultMaxPubSubMsgSize) + } + + ps, found := n.topics[messageScope.Topic()] + if !found { + return fmt.Errorf("could not find topic (%s)", messageScope.Topic()) + } + err = ps.Publish(ctx, data) + if err != nil { + return fmt.Errorf("could not publish to topic (%s): %w", messageScope.Topic(), err) + } + + lg.Debug().Msg("published message to topic") + return nil +} + +// HasSubscription returns true if the node currently has an active subscription to the topic. +func (n *Node) HasSubscription(topic channels.Topic) bool { + n.RLock() + defer n.RUnlock() + _, ok := n.subs[topic] + return ok +} + +// Host returns pointer to host object of node. +func (n *Node) Host() host.Host { + return n.host +} + +// WithDefaultUnicastProtocol overrides the default handler of the unicast manager and registers all preferred protocols. +func (n *Node) WithDefaultUnicastProtocol(defaultHandler libp2pnet.StreamHandler, preferred []protocols.ProtocolName) error { + n.uniMgr.SetDefaultHandler(defaultHandler) + for _, p := range preferred { + err := n.uniMgr.Register(p) + if err != nil { + return fmt.Errorf("could not register unicast protocls: %w", err) + } + } + + return nil +} + +// WithPeersProvider sets the PeersProvider for the peer manager. +// If a peer manager factory is set, this method will set the peer manager's PeersProvider. +func (n *Node) WithPeersProvider(peersProvider p2p.PeersProvider) { + // TODO: chore: we should not allow overriding the peers provider if one is already set. + if n.peerManager != nil { + n.peerManager.SetPeersProvider( + func() peer.IDSlice { + authorizedPeersIds := peersProvider() + allowListedPeerIds := peer.IDSlice{} // subset of authorizedPeersIds that are not disallowed + for _, peerId := range authorizedPeersIds { + // exclude the disallowed peers from the authorized peers list + causes, disallowListed := n.disallowListedCache.IsDisallowListed(peerId) + if disallowListed { + n.logger.Warn(). + Str("peer_id", p2plogging.PeerId(peerId)). + Str("causes", fmt.Sprintf("%v", causes)). + Msg("peer is disallowed for a cause, removing from authorized peers of peer manager") + + // exclude the peer from the authorized peers list + continue + } + allowListedPeerIds = append(allowListedPeerIds, peerId) + } + + return allowListedPeerIds + }, + ) + } +} + +// PeerManagerComponent returns the component interface of the peer manager. +func (n *Node) PeerManagerComponent() component.Component { + return n.peerManager +} + +// RequestPeerUpdate requests an update to the peer connections of this node using the peer manager. +func (n *Node) RequestPeerUpdate() { + if n.peerManager != nil { + n.peerManager.RequestPeerUpdate() + } +} + +// IsConnected returns true if address is a direct peer of this node else false. +// Peers are considered not connected if the underlying libp2p host reports the +// peers as not connected and there are no connections in the connection list. +// error returns: +// - network.ErrIllegalConnectionState if the underlying libp2p host reports connectedness as NotConnected but the connections list +// to the peer is not empty. This would normally indicate a bug within libp2p. Although the network.ErrIllegalConnectionState a bug in libp2p there is a small chance that this error will be returned due +// to a race condition between the time we check Connectedness and ConnsToPeer. There is a chance that a connection could be established +// after we check Connectedness but right before we check ConnsToPeer. +func (n *Node) IsConnected(peerID peer.ID) (bool, error) { + isConnected := n.host.Network().Connectedness(peerID) + numOfConns := len(n.host.Network().ConnsToPeer(peerID)) + if isConnected == libp2pnet.NotConnected && numOfConns > 0 { + return true, flownet.NewConnectionStatusErr(peerID, numOfConns) + } + return isConnected == libp2pnet.Connected && numOfConns > 0, nil +} + +// SetRouting sets the node's routing implementation. +// SetRouting may be called at most once. +func (n *Node) SetRouting(r routing.Routing) error { + if n.routing != nil { + // we should not allow overriding the routing implementation if one is already set; crashing the node. + return fmt.Errorf("routing already set") + } + + n.routing = r + return nil +} + +// Routing returns the node's routing implementation. +func (n *Node) Routing() routing.Routing { + return n.routing +} + +// PeerScoreExposer returns the node's peer score exposer implementation. +// If the node's peer score exposer has not been set, the second return value will be false. +func (n *Node) PeerScoreExposer() p2p.PeerScoreExposer { + return n.pubSub.PeerScoreExposer() +} + +// SetPubSub sets the node's pubsub implementation. +// SetPubSub may be called at most once. +func (n *Node) SetPubSub(ps p2p.PubSubAdapter) { + if n.pubSub != nil { + n.logger.Fatal().Msg("pubSub already set") + } + + n.pubSub = ps +} + +// GetLocalMeshPeers returns the list of peers in the local mesh for the given topic. +// Args: +// - topic: the topic. +// Returns: +// - []peer.ID: the list of peers in the local mesh for the given topic. +func (n *Node) GetLocalMeshPeers(topic channels.Topic) []peer.ID { + return n.pubSub.GetLocalMeshPeers(topic) +} + +// SetComponentManager sets the component manager for the node. +// SetComponentManager may be called at most once. +func (n *Node) SetComponentManager(cm *component.ComponentManager) { + if n.Component != nil { + n.logger.Fatal().Msg("component already set") + } + + n.Component = cm +} + +// SetUnicastManager sets the unicast manager for the node. +// SetUnicastManager may be called at most once. +func (n *Node) SetUnicastManager(uniMgr p2p.UnicastManager) { + if n.uniMgr != nil { + n.logger.Fatal().Msg("unicast manager already set") + } + n.uniMgr = uniMgr +} + +// OnDisallowListNotification is called when a new disallow list update notification is distributed. +// Any error on consuming event must handle internally. +// The implementation must be concurrency safe. +// Args: +// +// id: peer ID of the peer being disallow-listed. +// cause: cause of the peer being disallow-listed (only this cause is added to the peer's disallow-listed causes). +// +// Returns: +// +// none +func (n *Node) OnDisallowListNotification(peerId peer.ID, cause flownet.DisallowListedCause) { + causes, err := n.disallowListedCache.DisallowFor(peerId, cause) + if err != nil { + // returned error is fatal. + n.logger.Fatal().Err(err).Str("peer_id", p2plogging.PeerId(peerId)).Msg("failed to add peer to disallow list") + } + + // TODO: this code should further be refactored to also log the Flow id. + n.logger.Warn(). + Str("peer_id", p2plogging.PeerId(peerId)). + Str("notification_cause", cause.String()). + Str("causes", fmt.Sprintf("%v", causes)). + Msg("peer added to disallow list cache") +} + +// OnAllowListNotification is called when a new allow list update notification is distributed. +// Any error on consuming event must handle internally. +// The implementation must be concurrency safe. +// Args: +// +// id: peer ID of the peer being allow-listed. +// cause: cause of the peer being allow-listed (only this cause is removed from the peer's disallow-listed causes). +// +// Returns: +// +// none +func (n *Node) OnAllowListNotification(peerId peer.ID, cause flownet.DisallowListedCause) { + remainingCauses := n.disallowListedCache.AllowFor(peerId, cause) + + n.logger.Debug(). + Str("peer_id", p2plogging.PeerId(peerId)). + Str("causes", fmt.Sprintf("%v", cause)). + Str("remaining_causes", fmt.Sprintf("%v", remainingCauses)). + Msg("peer is allow-listed for cause") +} + +// IsDisallowListed determines whether the given peer is disallow-listed for any reason. +// Args: +// - peerID: the peer to check. +// Returns: +// - []network.DisallowListedCause: the list of causes for which the given peer is disallow-listed. If the peer is not disallow-listed for any reason, +// a nil slice is returned. +// - bool: true if the peer is disallow-listed for any reason, false otherwise. +func (n *Node) IsDisallowListed(peerId peer.ID) ([]flownet.DisallowListedCause, bool) { + return n.disallowListedCache.IsDisallowListed(peerId) +} + +// ActiveClustersChanged is called when the active clusters list of the collection clusters has changed. +// The LibP2PNode implementation directly calls the ActiveClustersChanged method of the pubsub implementation, as +// the pubsub implementation is responsible for the actual handling of the event. +// Args: +// - list: the new active clusters list. +// Returns: +// - none +func (n *Node) ActiveClustersChanged(list flow.ChainIDList) { + n.pubSub.ActiveClustersChanged(list) +} diff --git a/network/p2p/node/libp2pNode_test.go b/network/p2p/node/libp2pNode_test.go new file mode 100644 index 00000000000..04c25e4694a --- /dev/null +++ b/network/p2p/node/libp2pNode_test.go @@ -0,0 +1,344 @@ +package p2pnode_test + +import ( + "context" + "fmt" + "sync" + "testing" + "time" + + "github.com/libp2p/go-libp2p/core/network" + "github.com/libp2p/go-libp2p/core/peer" + "github.com/libp2p/go-libp2p/core/peerstore" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module/irrecoverable" + mockmodule "github.com/onflow/flow-go/module/mock" + "github.com/onflow/flow-go/network/channels" + "github.com/onflow/flow-go/network/internal/p2pfixtures" + "github.com/onflow/flow-go/network/internal/p2putils" + "github.com/onflow/flow-go/network/p2p" + p2plogging "github.com/onflow/flow-go/network/p2p/logging" + p2ptest "github.com/onflow/flow-go/network/p2p/test" + "github.com/onflow/flow-go/network/p2p/utils" + validator "github.com/onflow/flow-go/network/validator/pubsub" + "github.com/onflow/flow-go/utils/concurrentmap" + "github.com/onflow/flow-go/utils/unittest" +) + +// TestMultiAddress evaluates correct translations from +// dns and ip4 to libp2p multi-address +func TestMultiAddress(t *testing.T) { + key := p2pfixtures.NetworkingKeyFixtures(t) + + tt := []struct { + identity *flow.Identity + multiaddress string + }{ + { + // ip4 test case + identity: unittest.IdentityFixture(unittest.WithNetworkingKey(key.PublicKey()), unittest.WithAddress("172.16.254.1:72")), + multiaddress: "/ip4/172.16.254.1/tcp/72", + }, + { + // dns test case + identity: unittest.IdentityFixture(unittest.WithNetworkingKey(key.PublicKey()), unittest.WithAddress("consensus:2222")), + multiaddress: "/dns4/consensus/tcp/2222", + }, + { + // dns test case + identity: unittest.IdentityFixture(unittest.WithNetworkingKey(key.PublicKey()), unittest.WithAddress("flow.com:3333")), + multiaddress: "/dns4/flow.com/tcp/3333", + }, + } + + for _, tc := range tt { + ip, port, _, err := p2putils.NetworkingInfo(tc.identity.IdentitySkeleton) + require.NoError(t, err) + + actualAddress := utils.MultiAddressStr(ip, port) + assert.Equal(t, tc.multiaddress, actualAddress, "incorrect multi-address translation") + } + +} + +// TestSingleNodeLifeCycle evaluates correct lifecycle translation from start to stop the node +func TestSingleNodeLifeCycle(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + signalerCtx := irrecoverable.NewMockSignalerContext(t, ctx) + idProvider := mockmodule.NewIdentityProvider(t) + node, _ := p2ptest.NodeFixture(t, unittest.IdentifierFixture(), "test_single_node_life_cycle", idProvider) + + node.Start(signalerCtx) + unittest.RequireComponentsReadyBefore(t, 100*time.Millisecond, node) + + cancel() + unittest.RequireComponentsDoneBefore(t, 100*time.Millisecond, node) +} + +// TestGetPeerInfo evaluates the deterministic translation between the nodes address and +// their libp2p info. It generates an address, and checks whether repeated translations +// yields the same info or not. +func TestGetPeerInfo(t *testing.T) { + for i := 0; i < 10; i++ { + key := p2pfixtures.NetworkingKeyFixtures(t) + + // creates node-i identity + identity := unittest.IdentityFixture(unittest.WithNetworkingKey(key.PublicKey()), unittest.WithAddress("1.1.1.1:0")) + + // translates node-i address into info + info, err := utils.PeerAddressInfo(identity.IdentitySkeleton) + require.NoError(t, err) + + // repeats the translation for node-i + for j := 0; j < 10; j++ { + rinfo, err := utils.PeerAddressInfo(identity.IdentitySkeleton) + require.NoError(t, err) + assert.Equal(t, rinfo.String(), info.String(), "inconsistent id generated") + } + } +} + +// TestAddPeers checks if nodes can be added as peers to a given node +func TestAddPeers(t *testing.T) { + count := 3 + ctx, cancel := context.WithCancel(context.Background()) + signalerCtx := irrecoverable.NewMockSignalerContext(t, ctx) + idProvider := unittest.NewUpdatableIDProvider(flow.IdentityList{}) + + nodes, identities := p2ptest.NodesFixture(t, unittest.IdentifierFixture(), "test_add_peers", count, idProvider) + p2ptest.StartNodes(t, signalerCtx, nodes) + defer p2ptest.StopNodes(t, nodes, cancel) + + // add the remaining nodes to the first node as its set of peers + for _, identity := range identities[1:] { + peerInfo, err := utils.PeerAddressInfo(identity.IdentitySkeleton) + require.NoError(t, err) + require.NoError(t, nodes[0].ConnectToPeer(ctx, peerInfo)) + } + + // Checks if both of the other nodes have been added as peers to the first node + assert.Len(t, nodes[0].Host().Network().Peers(), count-1) +} + +// TestRemovePeers checks if nodes can be removed as peers from a given node +func TestRemovePeers(t *testing.T) { + count := 3 + ctx, cancel := context.WithCancel(context.Background()) + signalerCtx := irrecoverable.NewMockSignalerContext(t, ctx) + idProvider := unittest.NewUpdatableIDProvider(flow.IdentityList{}) + // create nodes + nodes, identities := p2ptest.NodesFixture(t, unittest.IdentifierFixture(), "test_remove_peers", count, idProvider) + peerInfos, errs := utils.PeerInfosFromIDs(identities) + assert.Len(t, errs, 0) + + p2ptest.StartNodes(t, signalerCtx, nodes) + defer p2ptest.StopNodes(t, nodes, cancel) + + // add nodes two and three to the first node as its peers + for _, pInfo := range peerInfos[1:] { + require.NoError(t, nodes[0].ConnectToPeer(ctx, pInfo)) + } + + // check if all other nodes have been added as peers to the first node + assert.Len(t, nodes[0].Host().Network().Peers(), count-1) + + // disconnect from each peer and assert that the connection no longer exists + for _, pInfo := range peerInfos[1:] { + require.NoError(t, nodes[0].RemovePeer(pInfo.ID)) + assert.Equal(t, network.NotConnected, nodes[0].Host().Network().Connectedness(pInfo.ID)) + } +} + +func TestConnGater(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + signalerCtx := irrecoverable.NewMockSignalerContext(t, ctx) + + sporkID := unittest.IdentifierFixture() + idProvider := mockmodule.NewIdentityProvider(t) + + node1Peers := concurrentmap.New[peer.ID, struct{}]() + node1, identity1 := p2ptest.NodeFixture(t, sporkID, t.Name(), idProvider, p2ptest.WithConnectionGater(p2ptest.NewConnectionGater(idProvider, func(pid peer.ID) error { + if !node1Peers.Has(pid) { + return fmt.Errorf("peer id not found: %s", p2plogging.PeerId(pid)) + } + return nil + }))) + idProvider.On("ByPeerID", node1.ID()).Return(&identity1, true).Maybe() + + p2ptest.StartNode(t, signalerCtx, node1) + defer p2ptest.StopNode(t, node1, cancel) + + node1Info, err := utils.PeerAddressInfo(identity1.IdentitySkeleton) + assert.NoError(t, err) + + node2Peers := concurrentmap.New[peer.ID, struct{}]() + node2, identity2 := p2ptest.NodeFixture(t, sporkID, t.Name(), idProvider, p2ptest.WithConnectionGater(p2ptest.NewConnectionGater(idProvider, func(pid peer.ID) error { + if !node2Peers.Has(pid) { + return fmt.Errorf("id not found: %s", p2plogging.PeerId(pid)) + } + return nil + }))) + idProvider.On("ByPeerID", node2.ID()).Return(&identity2, + + true).Maybe() + + p2ptest.StartNode(t, signalerCtx, node2) + defer p2ptest.StopNode(t, node2, cancel) + + node2Info, err := utils.PeerAddressInfo(identity2.IdentitySkeleton) + assert.NoError(t, err) + + node1.Host().Peerstore().AddAddrs(node2Info.ID, node2Info.Addrs, peerstore.PermanentAddrTTL) + node2.Host().Peerstore().AddAddrs(node1Info.ID, node1Info.Addrs, peerstore.PermanentAddrTTL) + + err = node1.OpenAndWriteOnStream(ctx, node2Info.ID, t.Name(), func(stream network.Stream) error { + // no-op, as the connection should not be possible + return nil + }) + require.ErrorContains(t, err, "target node is not on the approved list of nodes") + + err = node2.OpenAndWriteOnStream(ctx, node1Info.ID, t.Name(), func(stream network.Stream) error { + // no-op, as the connection should not be possible + return nil + }) + require.ErrorContains(t, err, "target node is not on the approved list of nodes") + + node1Peers.Add(node2Info.ID, struct{}{}) + err = node1.OpenAndWriteOnStream(ctx, node2Info.ID, t.Name(), func(stream network.Stream) error { + // no-op, as the connection should not be possible + return nil + }) + require.Error(t, err) + + node2Peers.Add(node1Info.ID, struct{}{}) + err = node1.OpenAndWriteOnStream(ctx, node2Info.ID, t.Name(), func(stream network.Stream) error { + // no-op, as the connection should not be possible + return nil + }) + require.NoError(t, err) +} + +// TestNode_HasSubscription checks that when a node subscribes to a topic HasSubscription should return true. +func TestNode_HasSubscription(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + signalerCtx := irrecoverable.NewMockSignalerContext(t, ctx) + idProvider := unittest.NewUpdatableIDProvider(flow.IdentityList{}) + sporkID := unittest.IdentifierFixture() + node, _ := p2ptest.NodeFixture(t, sporkID, "test_has_subscription", idProvider) + + p2ptest.StartNode(t, signalerCtx, node) + defer p2ptest.StopNode(t, node, cancel) + + logger := unittest.Logger() + + topicValidator := validator.TopicValidator(logger, func(id peer.ID) error { + return nil + }) + + // create test topic + topic := channels.TopicFromChannel(channels.TestNetworkChannel, unittest.IdentifierFixture()) + _, err := node.Subscribe(topic, topicValidator) + require.NoError(t, err) + + require.True(t, node.HasSubscription(topic)) + + // create topic with no subscription + topic = channels.TopicFromChannel(channels.ConsensusCommittee, unittest.IdentifierFixture()) + require.False(t, node.HasSubscription(topic)) +} + +// TestCreateStream_SinglePairwiseConnection ensures that despite the number of concurrent streams created from peer -> peer, only a single +// connection will ever be created between two peers on initial peer dialing and subsequent streams will reuse that connection. +func TestCreateStream_SinglePairwiseConnection(t *testing.T) { + sporkId := unittest.IdentifierFixture() + nodeCount := 3 + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + signalerCtx := irrecoverable.NewMockSignalerContext(t, ctx) + idProvider := unittest.NewUpdatableIDProvider(flow.IdentityList{}) + nodes, ids := p2ptest.NodesFixture(t, sporkId, "test_create_stream_single_pairwise_connection", nodeCount, idProvider, p2ptest.WithDefaultResourceManager()) + idProvider.SetIdentities(ids) + + p2ptest.StartNodes(t, signalerCtx, nodes) + defer p2ptest.StopNodes(t, nodes, cancel) + + numOfStreamsPerNode := 100 // create large number of streamChan per node per connection to ensure the resource manager does not cause starvation of resources + expectedTotalNumOfStreams := 600 + + // create a number of streamChan concurrently between each node + streamChan := make(chan network.Stream, expectedTotalNumOfStreams) + createConcurrentStreams(t, ctx, nodes, ids, numOfStreamsPerNode, streamChan, expectedTotalNumOfStreams) + + // ensure only a single connection exists between all nodes + ensureSinglePairwiseConnection(t, nodes) + close(streamChan) +} + +// createStreams will attempt to create n number of streams concurrently between each combination of node pairs. +func createConcurrentStreams(t *testing.T, ctx context.Context, nodes []p2p.LibP2PNode, ids flow.IdentityList, n int, streams chan network.Stream, expectedTotalNumOfStreams int) { + ctx, cancel := context.WithCancel(ctx) + // cancel called below to shutdown all streams + + streamHandler := func(stream network.Stream) error { + streams <- stream + + // wait for the done signal to close the stream + <-ctx.Done() + return nil + } + + var wg sync.WaitGroup + for _, this := range nodes { + for i, other := range nodes { + if this == other { + continue + } + + pInfo, err := utils.PeerAddressInfo(ids[i].IdentitySkeleton) + require.NoError(t, err) + this.Host().Peerstore().AddAddrs(pInfo.ID, pInfo.Addrs, peerstore.AddressTTL) + + for j := 0; j < n; j++ { + wg.Add(1) + go func(sender p2p.LibP2PNode) { + defer wg.Done() + err := sender.OpenAndWriteOnStream(ctx, pInfo.ID, t.Name(), streamHandler) + require.NoError(t, err) + }(this) + } + } + // brief sleep to prevent sender and receiver dialing each other at the same time if separate goroutines resulting + // in 2 connections 1 created by each node, this happens because we are calling CreateStream concurrently. + time.Sleep(500 * time.Millisecond) + } + + // pause until all streams are created + require.Eventually(t, func() bool { + return len(streams) >= expectedTotalNumOfStreams + }, 3*time.Second, 10*time.Millisecond, "could not create streams on time") + + require.Len(t, + streams, + expectedTotalNumOfStreams, + fmt.Sprintf("expected %d total number of streamChan created got %d", expectedTotalNumOfStreams, len(streams))) + + // cancel the context to trigger streams to shutdown + cancel() + + unittest.RequireReturnsBefore(t, wg.Wait, 1*time.Second, "could not shutdown streams on time") +} + +// ensureSinglePairwiseConnection ensure each node in the list has exactly one connection to every other node in the list. +func ensureSinglePairwiseConnection(t *testing.T, nodes []p2p.LibP2PNode) { + for _, this := range nodes { + for _, other := range nodes { + if this == other { + continue + } + require.Len(t, this.Host().Network().ConnsToPeer(other.ID()), 1) + } + } +} diff --git a/network/p2p/node/libp2pStream_test.go b/network/p2p/node/libp2pStream_test.go new file mode 100644 index 00000000000..9b8e8453014 --- /dev/null +++ b/network/p2p/node/libp2pStream_test.go @@ -0,0 +1,591 @@ +package p2pnode_test + +import ( + "bufio" + "context" + "errors" + "fmt" + "io" + "regexp" + "sync" + "testing" + "time" + + "github.com/libp2p/go-libp2p/core" + "github.com/libp2p/go-libp2p/core/network" + "github.com/libp2p/go-libp2p/core/peerstore" + "github.com/libp2p/go-libp2p/p2p/net/swarm" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/config" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module/irrecoverable" + mockmodule "github.com/onflow/flow-go/module/mock" + "github.com/onflow/flow-go/network/internal/p2pfixtures" + "github.com/onflow/flow-go/network/internal/p2putils" + "github.com/onflow/flow-go/network/p2p" + p2ptest "github.com/onflow/flow-go/network/p2p/test" + "github.com/onflow/flow-go/network/p2p/unicast" + "github.com/onflow/flow-go/network/p2p/unicast/protocols" + "github.com/onflow/flow-go/network/p2p/utils" + "github.com/onflow/flow-go/utils/unittest" +) + +// TestStreamClosing tests 1-1 communication with streams closed using libp2p2 handler.FullClose +func TestStreamClosing(t *testing.T) { + count := 10 + ctx, cancel := context.WithCancel(context.Background()) + signalerCtx := irrecoverable.NewMockSignalerContext(t, ctx) + + var msgRegex = regexp.MustCompile("^hello[0-9]") + + handler, streamCloseWG := mockStreamHandlerForMessages(t, ctx, count, msgRegex) + idProvider := unittest.NewUpdatableIDProvider(flow.IdentityList{}) + // Creates nodes + nodes, identities := p2ptest.NodesFixture(t, unittest.IdentifierFixture(), "test_stream_closing", 2, idProvider, p2ptest.WithDefaultStreamHandler(handler)) + idProvider.SetIdentities(identities) + + p2ptest.StartNodes(t, signalerCtx, nodes) + defer p2ptest.StopNodes(t, nodes, cancel) + + nodeInfo1, err := utils.PeerAddressInfo(identities[1].IdentitySkeleton) + require.NoError(t, err) + + senderWG := sync.WaitGroup{} + senderWG.Add(count) + for i := 0; i < count; i++ { + go func(i int) { + // Create stream from node 1 to node 2 (reuse if one already exists) + nodes[0].Host().Peerstore().AddAddrs(nodeInfo1.ID, nodeInfo1.Addrs, peerstore.AddressTTL) + err := nodes[0].OpenAndWriteOnStream(ctx, nodeInfo1.ID, t.Name(), func(s network.Stream) error { + w := bufio.NewWriter(s) + + // Send message from node 1 to 2 + msg := fmt.Sprintf("hello%d\n", i) + _, err = w.WriteString(msg) + assert.NoError(t, err) + + // Flush the stream + require.NoError(t, w.Flush()) + + // returning will close the stream + return nil + }) + require.NoError(t, err) + + senderWG.Done() + }(i) + } + + // wait for stream to be closed + unittest.RequireReturnsBefore(t, senderWG.Wait, 3*time.Second, "could not send messages on time") + unittest.RequireReturnsBefore(t, streamCloseWG.Wait, 3*time.Second, "could not close stream at receiver side") +} + +// mockStreamHandlerForMessages creates a stream handler that expects receiving `msgCount` unique messages that match the input regexp. +// The returned wait group will be unlocked when all messages are completely received and associated streams are closed. +func mockStreamHandlerForMessages(t *testing.T, ctx context.Context, msgCount int, msgRegexp *regexp.Regexp) (network.StreamHandler, *sync.WaitGroup) { + streamCloseWG := &sync.WaitGroup{} + streamCloseWG.Add(msgCount) + + h := func(s network.Stream) { + go func(s network.Stream) { + rw := bufio.NewReadWriter(bufio.NewReader(s), bufio.NewWriter(s)) + for { + str, err := rw.ReadString('\n') + if err != nil { + if errors.Is(err, io.EOF) { + err := s.Close() + require.NoError(t, err) + + streamCloseWG.Done() + return + } + require.Fail(t, fmt.Sprintf("received error %v", err)) + err = s.Reset() + require.NoError(t, err) + return + } + select { + case <-ctx.Done(): + return + default: + require.True(t, msgRegexp.MatchString(str), str) + } + } + }(s) + + } + return h, streamCloseWG +} + +// TestCreateStream_WithDefaultUnicast evaluates correctness of creating default (tcp) unicast streams between two libp2p nodes. +func TestCreateStream_WithDefaultUnicast(t *testing.T) { + sporkId := unittest.IdentifierFixture() + testCreateStream(t, + sporkId, + nil, // sends nil as preferred unicast so that nodes run on default plain tcp streams. + protocols.FlowProtocolID(sporkId)) +} + +// TestCreateStream_WithPreferredGzipUnicast evaluates correctness of creating gzip-compressed tcp unicast streams between two libp2p nodes. +func TestCreateStream_WithPreferredGzipUnicast(t *testing.T) { + sporkId := unittest.IdentifierFixture() + testCreateStream(t, + sporkId, + []protocols.ProtocolName{protocols.GzipCompressionUnicast}, + protocols.FlowGzipProtocolId(sporkId)) +} + +// testCreateStreams checks if a new streams of "preferred" type is created each time when CreateStream is called and an existing stream is not +// reused. The "preferred" stream type is the one with the largest index in `unicasts` list. +// To check that the streams are of "preferred" type, it evaluates the protocol id of established stream against the input `protocolID`. +func testCreateStream(t *testing.T, sporkId flow.Identifier, unicasts []protocols.ProtocolName, protocolID core.ProtocolID) { + count := 2 + ctx, cancel := context.WithCancel(context.Background()) + signalerCtx := irrecoverable.NewMockSignalerContext(t, ctx) + idProvider := unittest.NewUpdatableIDProvider(flow.IdentityList{}) + nodes, identities := p2ptest.NodesFixture(t, sporkId, "test_create_stream", count, idProvider, p2ptest.WithPreferredUnicasts(unicasts)) + idProvider.SetIdentities(identities) + p2ptest.StartNodes(t, signalerCtx, nodes) + + id2 := identities[1] + + // Assert that there is no outbound stream to the target yet + require.Equal(t, 0, p2putils.CountStream(nodes[0].Host(), nodes[1].ID(), p2putils.Protocol(protocolID), p2putils.Direction(network.DirOutbound))) + + // Now attempt to create another 100 outbound stream to the same destination by calling CreateStream + streamCount := 100 + var streams []network.Stream + allStreamsClosedWg := sync.WaitGroup{} + for i := 0; i < streamCount; i++ { + allStreamsClosedWg.Add(1) + pInfo, err := utils.PeerAddressInfo(id2.IdentitySkeleton) + require.NoError(t, err) + nodes[0].Host().Peerstore().AddAddrs(pInfo.ID, pInfo.Addrs, peerstore.AddressTTL) + go func() { + err := nodes[0].OpenAndWriteOnStream(ctx, pInfo.ID, t.Name(), func(stream network.Stream) error { + require.NotNil(t, stream) + streams = append(streams, stream) + // if we return this function, the stream will be closed, but we need to keep it open for the test + // hence we wait for the context to be done + <-ctx.Done() + allStreamsClosedWg.Done() + return nil + }) + if err != nil { + // we omit errors due to closing the stream. This is because we close the stream in the test. + require.Contains(t, err.Error(), "failed to close the stream") + } + }() + } + + require.Eventually(t, func() bool { + return streamCount == p2putils.CountStream(nodes[0].Host(), nodes[1].ID(), p2putils.Protocol(protocolID), p2putils.Direction(network.DirOutbound)) + }, 5*time.Second, 100*time.Millisecond, "could not create streams on time") + + // checks that the number of connections is 1 despite the number of streams; i.e., all streams are created on the same connection + require.Len(t, nodes[0].Host().Network().Conns(), 1) + + // we don't use defer as the moment we stop the nodes, the streams will be closed, and we want to assess the number of streams + p2ptest.StopNodes(t, nodes, cancel) + + // wait for all streams to be closed + unittest.RequireReturnsBefore(t, allStreamsClosedWg.Wait, 1*time.Second, "could not close streams on time") +} + +// TestCreateStream_FallBack checks two libp2p nodes with conflicting supported unicast protocols fall back +// to default (tcp) unicast protocol during their negotiation. +// To do this, a node with preferred gzip-compressed tcp unicast tries creating stream to another node that only +// supports default plain tcp unicast. The test evaluates that the unicast stream established between two nodes +// are of type default plain tcp. +func TestCreateStream_FallBack(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + signalerCtx := irrecoverable.NewMockSignalerContext(t, ctx) + + // Creates two nodes: one with preferred gzip, and other one with default protocol + sporkId := unittest.IdentifierFixture() + idProvider := mockmodule.NewIdentityProvider(t) + thisNode, thisID := p2ptest.NodeFixture(t, + sporkId, + t.Name(), + idProvider, + p2ptest.WithPreferredUnicasts([]protocols.ProtocolName{protocols.GzipCompressionUnicast})) + otherNode, otherId := p2ptest.NodeFixture(t, + sporkId, + t.Name(), + idProvider) + identities := []flow.Identity{thisID, otherId} + nodes := []p2p.LibP2PNode{thisNode, otherNode} + for i, node := range nodes { + idProvider.On("ByPeerID", node.ID()).Return(&identities[i], true).Maybe() + + } + p2ptest.StartNodes(t, signalerCtx, nodes) + + // Assert that there is no outbound stream to the target yet (neither default nor preferred) + defaultProtocolId := protocols.FlowProtocolID(sporkId) + preferredProtocolId := protocols.FlowGzipProtocolId(sporkId) + require.Equal(t, 0, p2putils.CountStream(thisNode.Host(), otherNode.ID(), p2putils.Protocol(defaultProtocolId), p2putils.Direction(network.DirOutbound))) + require.Equal(t, 0, p2putils.CountStream(thisNode.Host(), otherNode.ID(), p2putils.Protocol(preferredProtocolId), p2putils.Direction(network.DirOutbound))) + + // Now attempt to create another 100 outbound stream to the same destination by calling CreateStream + streamCount := 10 + var streams []network.Stream + allStreamsClosedWg := sync.WaitGroup{} + for i := 0; i < streamCount; i++ { + allStreamsClosedWg.Add(1) + pInfo, err := utils.PeerAddressInfo(otherId.IdentitySkeleton) + require.NoError(t, err) + thisNode.Host().Peerstore().AddAddrs(pInfo.ID, pInfo.Addrs, peerstore.AddressTTL) + + // a new stream must be created + go func() { + err = thisNode.OpenAndWriteOnStream(ctx, pInfo.ID, t.Name(), func(stream network.Stream) error { + require.NotNil(t, stream) + streams = append(streams, stream) + + // if we return this function, the stream will be closed, but we need to keep it open for the test + // hence we wait for the context to be done + <-ctx.Done() + allStreamsClosedWg.Done() + return nil + }) + }() + } + + // wait for the stream to be created on the default protocol id. + require.Eventually(t, func() bool { + return streamCount == p2putils.CountStream(nodes[0].Host(), nodes[1].ID(), p2putils.Protocol(defaultProtocolId), p2putils.Direction(network.DirOutbound)) + }, 5*time.Second, 100*time.Millisecond, "could not create streams on time") + + // no stream must be created on the preferred protocol id + require.Equal(t, 0, p2putils.CountStream(thisNode.Host(), otherNode.ID(), p2putils.Protocol(preferredProtocolId), p2putils.Direction(network.DirOutbound))) + + // checks that the number of connections is 1 despite the number of streams; i.e., all streams are created on the same connection + require.Len(t, nodes[0].Host().Network().Conns(), 1) + + // we don't use defer as the moment we stop the nodes, the streams will be closed, and we want to assess the number of streams + p2ptest.StopNodes(t, nodes, cancel) + + // wait for all streams to be closed + unittest.RequireReturnsBefore(t, allStreamsClosedWg.Wait, 1*time.Second, "could not close streams on time") +} + +// TestCreateStreamIsConcurrencySafe tests that the CreateStream is concurrency safe +func TestCreateStreamIsConcurrencySafe(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + signalerCtx := irrecoverable.NewMockSignalerContext(t, ctx) + idProvider := unittest.NewUpdatableIDProvider(flow.IdentityList{}) + // create two nodes + nodes, identities := p2ptest.NodesFixture(t, + unittest.IdentifierFixture(), + t.Name(), + 2, + idProvider) + require.Len(t, identities, 2) + idProvider.SetIdentities(flow.IdentityList{identities[0], identities[1]}) + p2ptest.StartNodes(t, signalerCtx, nodes) + defer p2ptest.StopNodes(t, nodes, cancel) + + nodeInfo1, err := utils.PeerAddressInfo(identities[1].IdentitySkeleton) + require.NoError(t, err) + + wg := sync.WaitGroup{} + + // create a gate which gates the call to CreateStream for all concurrent go routines + gate := make(chan struct{}) + + createStream := func() { + <-gate + nodes[0].Host().Peerstore().AddAddrs(nodeInfo1.ID, nodeInfo1.Addrs, peerstore.AddressTTL) + err := nodes[0].OpenAndWriteOnStream(ctx, nodeInfo1.ID, t.Name(), func(stream network.Stream) error { + // no-op stream writer, we just check that the stream was created + return nil + }) + require.NoError(t, err) // assert that stream was successfully created + wg.Done() + } + + // kick off 10 concurrent calls to CreateStream + for i := 0; i < 10; i++ { + wg.Add(1) + go createStream() + } + // open the gate by closing the channel + close(gate) + + // no call should block + unittest.AssertReturnsBefore(t, wg.Wait, 10*time.Second) +} + +// TestNoBackoffWhenCreatingStream checks that backoff is not enabled between attempts to connect to a remote peer +// for one-to-one direct communication. +func TestNoBackoffWhenCreatingStream(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + // setup per node contexts so they can be stopped independently + ctx1, cancel1 := context.WithCancel(ctx) + signalerCtx1 := irrecoverable.NewMockSignalerContext(t, ctx1) + + ctx2, cancel2 := context.WithCancel(ctx) + signalerCtx2 := irrecoverable.NewMockSignalerContext(t, ctx2) + idProvider := unittest.NewUpdatableIDProvider(flow.IdentityList{}) + count := 2 + // Creates nodes + nodes, identities := p2ptest.NodesFixture(t, + unittest.IdentifierFixture(), + t.Name(), + count, + idProvider) + node1 := nodes[0] + node2 := nodes[1] + idProvider.SetIdentities(flow.IdentityList{identities[0], identities[1]}) + p2ptest.StartNode(t, signalerCtx1, node1) + p2ptest.StartNode(t, signalerCtx2, node2) + + // stop node 2 immediately + p2ptest.StopNode(t, node2, cancel2) + defer p2ptest.StopNode(t, node1, cancel1) + + id2 := identities[1] + pInfo, err := utils.PeerAddressInfo(id2.IdentitySkeleton) + require.NoError(t, err) + nodes[0].Host().Peerstore().AddAddrs(pInfo.ID, pInfo.Addrs, peerstore.AddressTTL) + + cfg, err := config.DefaultConfig() + require.NoError(t, err) + + maxTimeToWait := time.Duration(cfg.NetworkConfig.Unicast.UnicastManager.MaxStreamCreationRetryAttemptTimes) * unicast.MaxRetryJitter * time.Millisecond + + // need to add some buffer time so that RequireReturnsBefore waits slightly longer than maxTimeToWait to avoid + // a race condition + someGraceTime := 100 * time.Millisecond + totalWaitTime := maxTimeToWait + someGraceTime + + // each CreateStream() call may try to connect up to MaxDialRetryAttemptTimes (3) times. + + // there are 2 scenarios that we need to account for: + // + // 1. machines where a timeout occurs on the first connection attempt - this can be due to local firewall rules or other processes running on the machine. + // In this case, we need to create a scenario where a backoff would have normally occured. This is why we initiate a second connection attempt. + // Libp2p remembers the peer we are trying to connect to between CreateStream() calls and would have initiated a backoff if backoff wasn't turned off. + // The second CreateStream() call will make a second connection attempt MaxDialRetryAttemptTimes times and that should never result in a backoff error. + // + // 2. machines where a timeout does NOT occur on the first connection attempt - this is on CI machines and some local dev machines without a firewall / too many other processes. + // In this case, there will be MaxDialRetryAttemptTimes (3) connection attempts on the first CreateStream() call and MaxDialRetryAttemptTimes (3) attempts on the second CreateStream() call. + + // make two separate stream creation attempt and assert that no connection back off happened + for i := 0; i < 2; i++ { + + // limit the maximum amount of time to wait for a connection to be established by using a context that times out + ctx, cancel := context.WithTimeout(ctx, maxTimeToWait) + + unittest.RequireReturnsBefore(t, func() { + err = node1.OpenAndWriteOnStream(ctx, pInfo.ID, t.Name(), func(stream network.Stream) error { + // do nothing, this is a no-op stream writer, we just check that the stream was created + return nil + }) + require.Error(t, err) + }, totalWaitTime, fmt.Sprintf("create stream did not error within %s", totalWaitTime.String())) + require.NotContainsf(t, err.Error(), swarm.ErrDialBackoff.Error(), "swarm dialer unexpectedly did a back off for a one-to-one connection") + cancel() + } +} + +// TestUnicastOverStream_WithPlainStream checks two nodes can send and receive unicast messages on libp2p plain streams. +func TestUnicastOverStream_WithPlainStream(t *testing.T) { + testUnicastOverStream(t) +} + +// TestUnicastOverStream_WithGzipStreamCompression checks two nodes can send and receive unicast messages on gzip compressed streams +// when both nodes have gzip stream compression enabled. +func TestUnicastOverStream_WithGzipStreamCompression(t *testing.T) { + testUnicastOverStream(t, p2ptest.WithPreferredUnicasts([]protocols.ProtocolName{protocols.GzipCompressionUnicast})) +} + +// testUnicastOverStream sends a message from node 1 to node 2 and then from node 2 to node 1 over a unicast stream. +func testUnicastOverStream(t *testing.T, opts ...p2ptest.NodeFixtureParameterOption) { + ctx, cancel := context.WithCancel(context.Background()) + signalerCtx := irrecoverable.NewMockSignalerContext(t, ctx) + + // Creates nodes + sporkId := unittest.IdentifierFixture() + idProvider := mockmodule.NewIdentityProvider(t) + streamHandler1, inbound1 := p2ptest.StreamHandlerFixture(t) + node1, id1 := p2ptest.NodeFixture(t, + sporkId, + t.Name(), + idProvider, + append(opts, p2ptest.WithDefaultStreamHandler(streamHandler1))...) + + streamHandler2, inbound2 := p2ptest.StreamHandlerFixture(t) + node2, id2 := p2ptest.NodeFixture(t, + sporkId, + t.Name(), + idProvider, + append(opts, p2ptest.WithDefaultStreamHandler(streamHandler2))...) + ids := flow.IdentityList{&id1, &id2} + nodes := []p2p.LibP2PNode{node1, node2} + for i, node := range nodes { + idProvider.On("ByPeerID", node.ID()).Return(ids[i], true).Maybe() + + } + p2ptest.StartNodes(t, signalerCtx, nodes) + defer p2ptest.StopNodes(t, nodes, cancel) + + p2ptest.LetNodesDiscoverEachOther(t, ctx, nodes, ids) + + p2pfixtures.EnsureMessageExchangeOverUnicast(t, + ctx, + nodes, + []chan string{inbound1, inbound2}, + p2pfixtures.LongStringMessageFactoryFixture(t)) +} + +// TestUnicastOverStream_Fallback checks two nodes with asymmetric sets of preferred unicast protocols can create streams and +// send and receive unicasts. Despite the asymmetry, the nodes must fall back to the libp2p plain stream during negotiation. +func TestUnicastOverStream_Fallback(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + signalerCtx := irrecoverable.NewMockSignalerContext(t, ctx) + + // Creates nodes + // node1: supports only plain unicast protocol + // node2: supports plain and gzip + sporkId := unittest.IdentifierFixture() + idProvider := mockmodule.NewIdentityProvider(t) + streamHandler1, inbound1 := p2ptest.StreamHandlerFixture(t) + node1, id1 := p2ptest.NodeFixture(t, + sporkId, + t.Name(), + idProvider, + p2ptest.WithDefaultStreamHandler(streamHandler1)) + + streamHandler2, inbound2 := p2ptest.StreamHandlerFixture(t) + node2, id2 := p2ptest.NodeFixture(t, + sporkId, + t.Name(), + idProvider, + p2ptest.WithDefaultStreamHandler(streamHandler2), + p2ptest.WithPreferredUnicasts([]protocols.ProtocolName{protocols.GzipCompressionUnicast})) + + ids := flow.IdentityList{&id1, &id2} + nodes := []p2p.LibP2PNode{node1, node2} + for i, node := range nodes { + idProvider.On("ByPeerID", node.ID()).Return(ids[i], true).Maybe() + + } + p2ptest.StartNodes(t, signalerCtx, nodes) + defer p2ptest.StopNodes(t, nodes, cancel) + + p2ptest.LetNodesDiscoverEachOther(t, ctx, nodes, ids) + p2pfixtures.EnsureMessageExchangeOverUnicast( + t, + ctx, + nodes, + []chan string{inbound1, inbound2}, p2pfixtures.LongStringMessageFactoryFixture(t)) +} + +// TestCreateStreamTimeoutWithUnresponsiveNode tests that the CreateStream call does not block longer than the +// timeout interval +func TestCreateStreamTimeoutWithUnresponsiveNode(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + signalerCtx := irrecoverable.NewMockSignalerContext(t, ctx) + idProvider := unittest.NewUpdatableIDProvider(flow.IdentityList{}) + // creates a regular node + nodes, identities := p2ptest.NodesFixture(t, + unittest.IdentifierFixture(), + t.Name(), + 1, + idProvider) + require.Len(t, identities, 1) + idProvider.SetIdentities(identities) + p2ptest.StartNodes(t, signalerCtx, nodes) + defer p2ptest.StopNodes(t, nodes, cancel) + + // create a silent node which never replies + listener, silentNodeId := p2pfixtures.SilentNodeFixture(t) + defer func() { + require.NoError(t, listener.Close()) + }() + + silentNodeInfo, err := utils.PeerAddressInfo(silentNodeId.IdentitySkeleton) + require.NoError(t, err) + + timeout := 1 * time.Second + tctx, tcancel := context.WithTimeout(ctx, timeout) + defer tcancel() + + // attempt to create a stream from node 1 to node 2 and assert that it fails after timeout + grace := 100 * time.Millisecond + unittest.AssertReturnsBefore(t, + func() { + nodes[0].Host().Peerstore().AddAddrs(silentNodeInfo.ID, silentNodeInfo.Addrs, peerstore.AddressTTL) + err = nodes[0].OpenAndWriteOnStream(tctx, silentNodeInfo.ID, t.Name(), func(stream network.Stream) error { + // do nothing, this is a no-op stream writer, we just check that the stream was created + return nil + }) + require.Error(t, err) + }, timeout+grace) +} + +// TestCreateStreamIsConcurrent tests that CreateStream calls can be made concurrently such that one blocked call +// does not block another concurrent call. +func TestCreateStreamIsConcurrent(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + signalerCtx := irrecoverable.NewMockSignalerContext(t, ctx) + idProvider := unittest.NewUpdatableIDProvider(flow.IdentityList{}) + // create two regular node + goodNodes, goodNodeIds := p2ptest.NodesFixture(t, + unittest.IdentifierFixture(), + t.Name(), + 2, + idProvider) + require.Len(t, goodNodeIds, 2) + idProvider.SetIdentities(goodNodeIds) + p2ptest.StartNodes(t, signalerCtx, goodNodes) + defer p2ptest.StopNodes(t, goodNodes, cancel) + + goodNodeInfo1, err := utils.PeerAddressInfo(goodNodeIds[1].IdentitySkeleton) + require.NoError(t, err) + + // create a silent node which never replies + listener, silentNodeId := p2pfixtures.SilentNodeFixture(t) + defer func() { + require.NoError(t, listener.Close()) + }() + silentNodeInfo, err := utils.PeerAddressInfo(silentNodeId.IdentitySkeleton) + require.NoError(t, err) + + // creates a stream to unresponsive node and makes sure that the stream creation is blocked + blockedCallCh := unittest.RequireNeverReturnBefore(t, + func() { + goodNodes[0].Host().Peerstore().AddAddrs(silentNodeInfo.ID, silentNodeInfo.Addrs, peerstore.AddressTTL) + // the subsequent call will be blocked + _ = goodNodes[0].OpenAndWriteOnStream(ctx, silentNodeInfo.ID, t.Name(), func(stream network.Stream) error { + // do nothing, the stream creation will be blocked so this should never be called + require.Fail(t, "this should never be called") + return nil + }) + }, 1*time.Second, "CreateStream attempt to the unresponsive peer did not block") + + // requires same peer can still connect to the other regular peer without being blocked + unittest.RequireReturnsBefore(t, + func() { + goodNodes[0].Host().Peerstore().AddAddrs(goodNodeInfo1.ID, goodNodeInfo1.Addrs, peerstore.AddressTTL) + err := goodNodes[0].OpenAndWriteOnStream(ctx, goodNodeInfo1.ID, t.Name(), func(stream network.Stream) error { + // do nothing, this is a no-op stream writer, we just check that the stream was created + return nil + }) + require.NoError(t, err) + }, 1*time.Second, "creating stream to a responsive node failed while concurrently blocked on unresponsive node") + + // requires the CreateStream call to the unresponsive node was blocked while we attempted the CreateStream to the + // good address + unittest.RequireNeverClosedWithin(t, + blockedCallCh, + 1*time.Millisecond, + "CreateStream attempt to the unresponsive peer did not block after connecting to good node") +} diff --git a/network/p2p/p2pnode/libp2pUtils_test.go b/network/p2p/node/libp2pUtils_test.go similarity index 94% rename from network/p2p/p2pnode/libp2pUtils_test.go rename to network/p2p/node/libp2pUtils_test.go index 7d4d676c66d..c7d0d52274b 100644 --- a/network/p2p/p2pnode/libp2pUtils_test.go +++ b/network/p2p/node/libp2pUtils_test.go @@ -6,16 +6,14 @@ import ( "github.com/libp2p/go-libp2p/core/peer" "github.com/multiformats/go-multiaddr" + "github.com/onflow/crypto" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" - "github.com/onflow/flow-go/network/p2p/utils" - - "github.com/onflow/flow-go/crypto" - "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/network/p2p/keyutils" + "github.com/onflow/flow-go/network/p2p/utils" "github.com/onflow/flow-go/utils/unittest" ) @@ -31,7 +29,7 @@ func TestLibP2PUtilsTestSuite(t *testing.T) { func (ts *LibP2PUtilsTestSuite) TestPeerInfoFromID() { ids, exceptedPeerInfos := idsAndPeerInfos(ts.T()) for i, id := range ids { - actualAddrInfo, err := utils.PeerAddressInfo(*id) + actualAddrInfo, err := utils.PeerAddressInfo(id.IdentitySkeleton) assert.NoError(ts.T(), err) assert.Equal(ts.T(), exceptedPeerInfos[i].String(), actualAddrInfo.String()) } @@ -85,6 +83,6 @@ func BenchmarkPeerInfoFromID(b *testing.B) { id.Address = "1.1.1.1:3569" b.StartTimer() for n := 0; n < b.N; n++ { - _, _ = utils.PeerAddressInfo(*id) + _, _ = utils.PeerAddressInfo(id.IdentitySkeleton) } } diff --git a/network/p2p/node/resourceManager_test.go b/network/p2p/node/resourceManager_test.go new file mode 100644 index 00000000000..b68624fd604 --- /dev/null +++ b/network/p2p/node/resourceManager_test.go @@ -0,0 +1,430 @@ +package p2pnode_test + +import ( + "context" + "fmt" + "math" + "sync" + "testing" + "time" + + "github.com/libp2p/go-libp2p" + "github.com/libp2p/go-libp2p/core/network" + rcmgr "github.com/libp2p/go-libp2p/p2p/host/resource-manager" + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/config" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module/irrecoverable" + mockmodule "github.com/onflow/flow-go/module/mock" + "github.com/onflow/flow-go/network/internal/p2putils" + "github.com/onflow/flow-go/network/p2p" + p2ptest "github.com/onflow/flow-go/network/p2p/test" + "github.com/onflow/flow-go/network/p2p/unicast/protocols" + "github.com/onflow/flow-go/utils/unittest" +) + +// TestCreateStream_InboundConnResourceLimit ensures that the setting the resource limit config for +// PeerDefaultLimits.ConnsInbound restricts the number of inbound connections created from a peer to the configured value. +// NOTE: If this test becomes flaky, it indicates a violation of the single inbound connection guarantee. +// In such cases the test should not be quarantined but requires immediate resolution. +func TestCreateStream_InboundConnResourceLimit(t *testing.T) { + idProvider := mockmodule.NewIdentityProvider(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + signalerCtx := irrecoverable.NewMockSignalerContext(t, ctx) + + cfg, err := config.DefaultConfig() + require.NoError(t, err) + cfg.NetworkConfig.Unicast.UnicastManager.CreateStreamBackoffDelay = 10 * time.Millisecond + + sporkID := unittest.IdentifierFixture() + + sender, id1 := p2ptest.NodeFixture( + t, + sporkID, + t.Name(), + idProvider, + p2ptest.WithDefaultResourceManager(), + p2ptest.OverrideFlowConfig(cfg)) + + receiver, id2 := p2ptest.NodeFixture( + t, + sporkID, + t.Name(), + idProvider, + p2ptest.WithDefaultResourceManager(), + p2ptest.OverrideFlowConfig(cfg)) + + idProvider.On("ByPeerID", sender.ID()).Return(&id1, true).Maybe() + idProvider.On("ByPeerID", receiver.ID()).Return(&id2, true).Maybe() + + p2ptest.StartNodes(t, signalerCtx, []p2p.LibP2PNode{sender, receiver}) + defer p2ptest.StopNodes(t, []p2p.LibP2PNode{sender, receiver}, cancel) + + p2ptest.LetNodesDiscoverEachOther(t, signalerCtx, []p2p.LibP2PNode{sender, receiver}, flow.IdentityList{&id1, &id2}) + + var allStreamsCreated sync.WaitGroup + // at this point both nodes have discovered each other and we can now create an + // arbitrary number of streams from sender -> receiver. This will force libp2p + // to create multiple streams concurrently and attempt to reuse the single pairwise + // connection. If more than one connection is established while creating the conccurent + // streams this indicates a bug in the libp2p PeerBaseLimitConnsInbound limit. + defaultProtocolID := protocols.FlowProtocolID(sporkID) + expectedNumOfStreams := int64(50) + for i := int64(0); i < expectedNumOfStreams; i++ { + allStreamsCreated.Add(1) + go func() { + defer allStreamsCreated.Done() + require.NoError(t, sender.Host().Connect(ctx, receiver.Host().Peerstore().PeerInfo(receiver.ID()))) + _, err := sender.Host().NewStream(ctx, receiver.ID(), defaultProtocolID) + require.NoError(t, err) + }() + } + + unittest.RequireReturnsBefore(t, allStreamsCreated.Wait, 2*time.Second, "could not create streams on time") + require.Len(t, receiver.Host().Network().ConnsToPeer(sender.ID()), 1) + actualNumOfStreams := p2putils.CountStream(sender.Host(), receiver.ID(), p2putils.Protocol(defaultProtocolID), p2putils.Direction(network.DirOutbound)) + require.Equal(t, + expectedNumOfStreams, + int64(actualNumOfStreams), + fmt.Sprintf("expected to create %d number of streams got %d", expectedNumOfStreams, actualNumOfStreams)) +} + +type testPeerLimitConfig struct { + // nodeCount is the number of nodes in the test. + nodeCount int + + // maxInboundPeerStream is the maximum number of inbound streams from a single peer to the receiver. + maxInboundPeerStream int + + // maxInboundStreamProtocol is the maximum number of inbound streams at the receiver using a specific protocol; it accumulates all streams from all senders. + maxInboundStreamProtocol int + + // maxInboundStreamPeerProtocol is the maximum number of inbound streams at the receiver from a single peer using a specific protocol. + maxInboundStreamPeerProtocol int + + // maxInboundStreamTransient is the maximum number of inbound transient streams at the receiver; it accumulates all streams from all senders across all protocols. + // transient streams are those that are not associated fully with a peer and protocol. + maxInboundStreamTransient int + + // maxInboundStreamSystem is the maximum number of inbound streams at the receiver; it accumulates all streams from all senders across all protocols. + maxInboundStreamSystem int + + // unknownProtocol when set to true will cause senders to use an unknown protocol ID when creating streams. + unknownProtocol bool +} + +// maxLimit returns the maximum limit across all limits. +func (t testPeerLimitConfig) maxLimit() int { + max := 0 + if t.maxInboundPeerStream > max && t.maxInboundPeerStream != math.MaxInt { + max = t.maxInboundPeerStream + } + if t.maxInboundStreamProtocol > max && t.maxInboundStreamProtocol != math.MaxInt { + max = t.maxInboundStreamProtocol + } + if t.maxInboundStreamPeerProtocol > max && t.maxInboundStreamPeerProtocol != math.MaxInt { + max = t.maxInboundStreamPeerProtocol + } + if t.maxInboundStreamTransient > max && t.maxInboundStreamTransient != math.MaxInt { + max = t.maxInboundStreamTransient + } + if t.maxInboundStreamSystem > max && t.maxInboundStreamSystem != math.MaxInt { + max = t.maxInboundStreamSystem + } + return max +} + +// baseCreateStreamInboundStreamResourceLimitConfig returns a testPeerLimitConfig with default values. +func baseCreateStreamInboundStreamResourceLimitConfig() *testPeerLimitConfig { + return &testPeerLimitConfig{ + nodeCount: 10, + maxInboundPeerStream: 100, + maxInboundStreamProtocol: 100, + maxInboundStreamPeerProtocol: 100, + maxInboundStreamTransient: 100, + maxInboundStreamSystem: 100, + } +} + +func TestCreateStream_DefaultConfig(t *testing.T) { + testCreateStreamInboundStreamResourceLimits(t, baseCreateStreamInboundStreamResourceLimitConfig()) +} + +func TestCreateStream_MinPeerLimit(t *testing.T) { + base := baseCreateStreamInboundStreamResourceLimitConfig() + base.maxInboundPeerStream = 1 + testCreateStreamInboundStreamResourceLimits(t, base) +} + +func TestCreateStream_MaxPeerLimit(t *testing.T) { + + base := baseCreateStreamInboundStreamResourceLimitConfig() + base.maxInboundPeerStream = math.MaxInt + testCreateStreamInboundStreamResourceLimits(t, base) +} + +func TestCreateStream_MinProtocolLimit(t *testing.T) { + // max inbound protocol is not preserved; can be partially due to count stream not counting inbound streams on a protocol + unittest.SkipUnless(t, unittest.TEST_TODO, "broken test") + base := baseCreateStreamInboundStreamResourceLimitConfig() + base.maxInboundStreamProtocol = 1 + testCreateStreamInboundStreamResourceLimits(t, base) +} + +func TestCreateStream_MaxProtocolLimit(t *testing.T) { + base := baseCreateStreamInboundStreamResourceLimitConfig() + base.maxInboundStreamProtocol = math.MaxInt + testCreateStreamInboundStreamResourceLimits(t, base) +} + +func TestCreateStream_MinPeerProtocolLimit(t *testing.T) { + // max inbound stream peer protocol is not preserved; can be partially due to count stream not counting inbound streams on a protocol + unittest.SkipUnless(t, unittest.TEST_TODO, "broken test") + base := baseCreateStreamInboundStreamResourceLimitConfig() + base.maxInboundStreamPeerProtocol = 1 + testCreateStreamInboundStreamResourceLimits(t, base) +} + +func TestCreateStream_MaxPeerProtocolLimit(t *testing.T) { + base := baseCreateStreamInboundStreamResourceLimitConfig() + base.maxInboundStreamPeerProtocol = math.MaxInt + testCreateStreamInboundStreamResourceLimits(t, base) +} + +func TestCreateStream_MinTransientLimit(t *testing.T) { + base := baseCreateStreamInboundStreamResourceLimitConfig() + base.maxInboundStreamTransient = 1 + testCreateStreamInboundStreamResourceLimits(t, base) +} + +func TestCreateStream_MaxTransientLimit(t *testing.T) { + base := baseCreateStreamInboundStreamResourceLimitConfig() + base.maxInboundStreamTransient = math.MaxInt + testCreateStreamInboundStreamResourceLimits(t, base) +} + +func TestCreateStream_MinSystemLimit(t *testing.T) { + base := baseCreateStreamInboundStreamResourceLimitConfig() + base.maxInboundStreamSystem = 1 + testCreateStreamInboundStreamResourceLimits(t, base) +} + +func TestCreateStream_MaxSystemLimit(t *testing.T) { + // max inbound stream protocol is not preserved; can be partially due to count stream not counting inbound streams on a protocol + unittest.SkipUnless(t, unittest.TEST_TODO, "broken test") + base := baseCreateStreamInboundStreamResourceLimitConfig() + base.maxInboundStreamSystem = math.MaxInt + testCreateStreamInboundStreamResourceLimits(t, base) +} + +func TestCreateStream_DefaultConfigWithUnknownProtocol(t *testing.T) { + // limits are not enforced when using an unknown protocol ID + unittest.SkipUnless(t, unittest.TEST_TODO, "broken test") + base := baseCreateStreamInboundStreamResourceLimitConfig() + base.unknownProtocol = true + testCreateStreamInboundStreamResourceLimits(t, base) +} + +func TestCreateStream_PeerLimitLessThanPeerProtocolLimit(t *testing.T) { + // the case where peer-level limit is lower than the peer-protocol-level limit. + base := baseCreateStreamInboundStreamResourceLimitConfig() + base.maxInboundPeerStream = 5 // each peer can only create 5 streams. + base.maxInboundStreamPeerProtocol = 10 // each peer can create 10 streams on a specific protocol (but should still be limited by the peer-level limit). + testCreateStreamInboundStreamResourceLimits(t, base) +} + +func TestCreateStream_PeerLimitGreaterThanPeerProtocolLimit(t *testing.T) { + // the case where peer-level limit is higher than the peer-protocol-level limit. + // max inbound stream peer protocol is not preserved; can be partially due to count stream not counting inbound streams on a protocol + unittest.SkipUnless(t, unittest.TEST_TODO, "broken test") + base := baseCreateStreamInboundStreamResourceLimitConfig() + base.maxInboundPeerStream = 10 // each peer can create 10 streams. + base.maxInboundStreamPeerProtocol = 5 // each peer can create 5 streams on a specific protocol. + base.maxInboundStreamProtocol = 100 // overall limit is 100 streams on a specific protocol (across all peers). + base.maxInboundStreamTransient = 1000 // overall limit is 1000 transient streams. + base.maxInboundStreamSystem = 1000 // overall limit is 1000 system-wide streams. + testCreateStreamInboundStreamResourceLimits(t, base) +} + +func TestCreateStream_ProtocolLimitLessThanPeerProtocolLimit(t *testing.T) { + // max inbound stream peer protocol is not preserved; can be partially due to count stream not counting inbound streams on a protocol + unittest.SkipUnless(t, + unittest.TEST_TODO, "broken test") + // the case where protocol-level limit is lower than the peer-protocol-level limit. + base := baseCreateStreamInboundStreamResourceLimitConfig() + base.maxInboundStreamProtocol = 5 // each peer can create 5 streams on a specific protocol. + base.maxInboundStreamPeerProtocol = 10 // each peer can create 10 streams on a specific protocol (but should still be limited by the protocol-level limit). + testCreateStreamInboundStreamResourceLimits(t, base) +} + +func TestCreateStream_ProtocolLimitGreaterThanPeerProtocolLimit(t *testing.T) { + // TODO: with libp2p upgrade to v0.32.2; this test is failing as the peer protocol limit is not being enforced, and + // rather the protocol limit is being enforced, this test expects each peer not to be allowed more than 5 streams on a specific protocol. + // However, the maximum number of streams on a specific protocol (and specific peer) are being enforced instead. + // A quick investigation shows that it may be due to the way libp2p treats our unicast protocol (it is not a limit-enforcing protocol). + // But further investigation is required to confirm this. + unittest.SkipUnless(t, unittest.TEST_TODO, "broken test") + // the case where protocol-level limit is higher than the peer-protocol-level limit. + base := baseCreateStreamInboundStreamResourceLimitConfig() + base.maxInboundStreamProtocol = 10 // overall limit is 10 streams on a specific protocol (across all peers). + base.maxInboundStreamPeerProtocol = 5 // each peer can create 5 streams on a specific protocol. + base.maxInboundStreamTransient = 1000 // overall limit is 1000 transient streams. + base.maxInboundStreamSystem = 1000 // overall limit is 1000 system-wide streams. + testCreateStreamInboundStreamResourceLimits(t, base) +} + +func TestCreateStream_TransientLimitLessThanPeerProtocolLimit(t *testing.T) { + // the case where transient-level limit is lower than the peer-protocol-level limit. + base := baseCreateStreamInboundStreamResourceLimitConfig() + base.maxInboundStreamTransient = 5 // overall limit is 5 transient streams (across all peers). + base.maxInboundStreamPeerProtocol = 10 // each peer can create 10 streams on a specific protocol (but should still be limited by the transient-level limit). + testCreateStreamInboundStreamResourceLimits(t, base) +} + +// testCreateStreamInboundStreamResourceLimits tests the inbound stream limits for a given testPeerLimitConfig. It creates +// a number of senders and a single receiver. The receiver will have a resource manager with the given limits. +// The senders will have a resource manager with infinite limits to ensure that they can create as many streams as they want. +// The test will create a number of streams from each sender to the receiver. The test will then check that the limits are +// being enforced correctly. +// The number of streams is determined by the maxLimit() of the testPeerLimitConfig, which is the maximum limit across all limits (peer, protocol, transient, system), excluding +// the math.MaxInt limits. +func testCreateStreamInboundStreamResourceLimits(t *testing.T, cfg *testPeerLimitConfig) { + idProvider := mockmodule.NewIdentityProvider(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + signalerCtx := irrecoverable.NewMockSignalerContext(t, ctx) + sporkID := unittest.IdentifierFixture() + + flowCfg, err := config.DefaultConfig() + require.NoError(t, err) + flowCfg.NetworkConfig.Unicast.UnicastManager.CreateStreamBackoffDelay = 10 * time.Millisecond + + // sender nodes will have infinite stream limit to ensure that they can create as many streams as they want. + resourceManagerSnd, err := rcmgr.NewResourceManager(rcmgr.NewFixedLimiter(rcmgr.InfiniteLimits)) + require.NoError(t, err) + senders, senderIds := p2ptest.NodesFixture(t, + sporkID, + t.Name(), cfg.nodeCount, + idProvider, + p2ptest.WithResourceManager(resourceManagerSnd), + p2ptest.OverrideFlowConfig(flowCfg)) + + // receiver node will run with default limits and no scaling. + limits := rcmgr.DefaultLimits + libp2p.SetDefaultServiceLimits(&limits) + l := limits.Scale(0, 0) + partial := rcmgr.PartialLimitConfig{ + System: rcmgr.ResourceLimits{ + StreamsInbound: rcmgr.LimitVal(cfg.maxInboundStreamSystem), + ConnsInbound: rcmgr.LimitVal(cfg.nodeCount), + }, + Transient: rcmgr.ResourceLimits{ + ConnsInbound: rcmgr.LimitVal(cfg.nodeCount), + StreamsInbound: rcmgr.LimitVal(cfg.maxInboundStreamTransient), + }, + ProtocolDefault: rcmgr.ResourceLimits{ + StreamsInbound: rcmgr.LimitVal(cfg.maxInboundStreamProtocol), + }, + ProtocolPeerDefault: rcmgr.ResourceLimits{ + StreamsInbound: rcmgr.LimitVal(cfg.maxInboundStreamPeerProtocol), + }, + PeerDefault: rcmgr.ResourceLimits{ + StreamsInbound: rcmgr.LimitVal(cfg.maxInboundPeerStream), + }, + Conn: rcmgr.ResourceLimits{ + StreamsInbound: rcmgr.LimitVal(cfg.maxInboundPeerStream), + }, + Stream: rcmgr.ResourceLimits{ + StreamsInbound: rcmgr.LimitVal(cfg.maxInboundPeerStream), + }, + } + l = partial.Build(l) + resourceManagerRcv, err := rcmgr.NewResourceManager(rcmgr.NewFixedLimiter(l)) + require.NoError(t, err) + receiver, id2 := p2ptest.NodeFixture(t, + sporkID, + t.Name(), + idProvider, + p2ptest.WithResourceManager(resourceManagerRcv), + p2ptest.OverrideFlowConfig(flowCfg)) + + for i, sender := range senders { + idProvider.On("ByPeerID", sender.ID()).Return(senderIds[i], true).Maybe() + } + idProvider.On("ByPeerID", receiver.ID()).Return(&id2, true).Maybe() + + nodes := append(senders, receiver) + ids := append(senderIds, &id2) + + p2ptest.StartNodes(t, signalerCtx, nodes) + defer p2ptest.StopNodes(t, nodes, cancel) + + p2ptest.LetNodesDiscoverEachOther(t, signalerCtx, nodes, ids) + + var allStreamsCreated sync.WaitGroup + + protocolID := protocols.FlowProtocolID(sporkID) + if cfg.unknownProtocol { + protocolID = protocols.FlowProtocolID(unittest.IdentifierFixture()) + } + + loadLimit := cfg.maxLimit() + require.Greaterf(t, loadLimit, 0, "test limit must be greater than 0; got %d", loadLimit) + + streamListMu := sync.Mutex{} // mutex to protect the streamsList. + streamsList := make([]network.Stream, 0) // list of all streams created to avoid garbage collection. + for sIndex := range senders { + for i := 0; i < loadLimit; i++ { + allStreamsCreated.Add(1) + go func(sIndex int) { + defer allStreamsCreated.Done() + sender := senders[sIndex] + s, err := sender.Host().NewStream(ctx, receiver.ID(), protocolID) + if err != nil { + // we don't care about the error here; as we are trying to break a limit; so we expect some of the stream creations to fail. + return + } + + require.NotNil(t, s) + streamListMu.Lock() + streamsList = append(streamsList, s) + streamListMu.Unlock() + }(sIndex) + } + } + + unittest.RequireReturnsBefore(t, allStreamsCreated.Wait, 2*time.Second, "could not create streams on time") + + // transient sanity-check + require.NoError(t, resourceManagerRcv.ViewTransient(func(scope network.ResourceScope) error { + // number of in-transient streams must be less than or equal to the max transient limit + require.LessOrEqual(t, int64(scope.Stat().NumStreamsInbound), int64(cfg.maxInboundStreamTransient)) + + // number of in-transient streams must be less than or equal the total number of streams created. + require.LessOrEqual(t, int64(scope.Stat().NumStreamsInbound), int64(len(streamsList))) + return nil + })) + + // system-wide limit sanity-check + require.NoError(t, resourceManagerRcv.ViewSystem(func(scope network.ResourceScope) error { + require.LessOrEqual(t, int64(scope.Stat().NumStreamsInbound), int64(cfg.maxInboundStreamSystem), "system-wide limit is not being enforced") + return nil + })) + + totalInboundStreams := 0 + for _, sender := range senders { + actualNumOfStreams := p2putils.CountStream(receiver.Host(), sender.ID(), p2putils.Direction(network.DirInbound)) + // number of inbound streams must be less than or equal to the peer-level limit for each sender. + require.LessOrEqual(t, int64(actualNumOfStreams), int64(cfg.maxInboundPeerStream)) + require.LessOrEqual(t, int64(actualNumOfStreams), int64(cfg.maxInboundStreamPeerProtocol)) + totalInboundStreams += actualNumOfStreams + } + // sanity check; the total number of inbound streams must be less than or equal to the system-wide limit. + // TODO: this must be a hard equal check; but falls short; to be shared with libp2p community. + // Failing at this line means the system-wide limit is not being enforced. + require.LessOrEqual(t, totalInboundStreams, cfg.maxInboundStreamSystem) + // sanity check; the total number of inbound streams must be less than or equal to the protocol-level limit. + require.LessOrEqual(t, totalInboundStreams, cfg.maxInboundStreamProtocol) +} diff --git a/network/p2p/p2pbuilder/config/config.go b/network/p2p/p2pbuilder/config/config.go deleted file mode 100644 index a950a6b2fb1..00000000000 --- a/network/p2p/p2pbuilder/config/config.go +++ /dev/null @@ -1,32 +0,0 @@ -package p2pconfig - -import ( - "time" - - "github.com/onflow/flow-go/network/p2p" -) - -// UnicastConfig configuration parameters for the unicast manager. -type UnicastConfig struct { - // StreamRetryInterval is the initial delay between failing to establish a connection with another node and retrying. This - // delay increases exponentially (exponential backoff) with the number of subsequent failures to establish a connection. - StreamRetryInterval time.Duration - // RateLimiterDistributor distributor that distributes notifications whenever a peer is rate limited to all consumers. - RateLimiterDistributor p2p.UnicastRateLimiterDistributor -} - -// ConnectionGaterConfig configuration parameters for the connection gater. -type ConnectionGaterConfig struct { - // InterceptPeerDialFilters list of peer filters used to filter peers on outgoing connections in the InterceptPeerDial callback. - InterceptPeerDialFilters []p2p.PeerFilter - // InterceptSecuredFilters list of peer filters used to filter peers and accept or reject inbound connections in InterceptSecured callback. - InterceptSecuredFilters []p2p.PeerFilter -} - -// PeerManagerConfig configuration parameters for the peer manager. -type PeerManagerConfig struct { - // ConnectionPruning enables connection pruning in the connection manager. - ConnectionPruning bool - // UpdateInterval interval used by the libp2p node peer manager component to periodically request peer updates. - UpdateInterval time.Duration -} diff --git a/network/p2p/p2pbuilder/config/metrics.go b/network/p2p/p2pbuilder/config/metrics.go deleted file mode 100644 index 1283035e5a6..00000000000 --- a/network/p2p/p2pbuilder/config/metrics.go +++ /dev/null @@ -1,20 +0,0 @@ -package p2pconfig - -import ( - "github.com/onflow/flow-go/module" - "github.com/onflow/flow-go/module/metrics" -) - -// MetricsConfig is a wrapper around the metrics configuration for the libp2p node. -// It is used to pass the metrics configuration to the libp2p node builder. -type MetricsConfig struct { - // HeroCacheFactory is the factory for the HeroCache metrics. It is used to - // create a HeroCache metrics instance for each cache when needed. By passing - // the factory to the libp2p node builder, the libp2p node can create the - // HeroCache metrics instance for each cache internally, which reduces the - // number of arguments needed to be passed to the libp2p node builder. - HeroCacheFactory metrics.HeroCacheMetricsFactory - - // LibP2PMetrics is the metrics instance for the libp2p node. - Metrics module.LibP2PMetrics -} diff --git a/network/p2p/p2pbuilder/gossipsub/gossipSubBuilder.go b/network/p2p/p2pbuilder/gossipsub/gossipSubBuilder.go deleted file mode 100644 index e4422c31c70..00000000000 --- a/network/p2p/p2pbuilder/gossipsub/gossipSubBuilder.go +++ /dev/null @@ -1,243 +0,0 @@ -package gossipsubbuilder - -import ( - "context" - "fmt" - "time" - - pubsub "github.com/libp2p/go-libp2p-pubsub" - "github.com/libp2p/go-libp2p/core/host" - "github.com/libp2p/go-libp2p/core/peer" - "github.com/libp2p/go-libp2p/core/routing" - "github.com/rs/zerolog" - - "github.com/onflow/flow-go/module" - "github.com/onflow/flow-go/module/irrecoverable" - "github.com/onflow/flow-go/network/channels" - "github.com/onflow/flow-go/network/p2p" - "github.com/onflow/flow-go/network/p2p/p2pnode" - "github.com/onflow/flow-go/network/p2p/scoring" - "github.com/onflow/flow-go/network/p2p/tracer" - "github.com/onflow/flow-go/network/p2p/utils" -) - -// The Builder struct is used to configure and create a new GossipSub pubsub system. -type Builder struct { - logger zerolog.Logger - metrics module.GossipSubMetrics - h host.Host - subscriptionFilter pubsub.SubscriptionFilter - gossipSubFactory p2p.GossipSubFactoryFunc - gossipSubConfigFunc p2p.GossipSubAdapterConfigFunc - gossipSubPeerScoring bool // whether to enable gossipsub peer scoring - gossipSubScoreTracerInterval time.Duration // the interval at which the gossipsub score tracer logs the peer scores. - // gossipSubTracer is a callback interface that is called by the gossipsub implementation upon - // certain events. Currently, we use it to log and observe the local mesh of the node. - gossipSubTracer p2p.PubSubTracer - scoreOptionConfig *scoring.ScoreOptionConfig - idProvider module.IdentityProvider - routingSystem routing.Routing - rpcInspectorSuite p2p.GossipSubInspectorSuite -} - -var _ p2p.GossipSubBuilder = (*Builder)(nil) - -// SetHost sets the host of the builder. -// If the host has already been set, a fatal error is logged. -func (g *Builder) SetHost(h host.Host) { - if g.h != nil { - g.logger.Fatal().Msg("host has already been set") - return - } - g.h = h -} - -// SetSubscriptionFilter sets the subscription filter of the builder. -// If the subscription filter has already been set, a fatal error is logged. -func (g *Builder) SetSubscriptionFilter(subscriptionFilter pubsub.SubscriptionFilter) { - if g.subscriptionFilter != nil { - g.logger.Fatal().Msg("subscription filter has already been set") - } - g.subscriptionFilter = subscriptionFilter -} - -// SetGossipSubFactory sets the gossipsub factory of the builder. -// We expect the node to initialize with a default gossipsub factory. Hence, this function overrides the default config. -func (g *Builder) SetGossipSubFactory(gossipSubFactory p2p.GossipSubFactoryFunc) { - if g.gossipSubFactory != nil { - g.logger.Warn().Msg("gossipsub factory has already been set, overriding the previous factory.") - } - g.gossipSubFactory = gossipSubFactory -} - -// SetGossipSubConfigFunc sets the gossipsub config function of the builder. -// We expect the node to initialize with a default gossipsub config. Hence, this function overrides the default config. -func (g *Builder) SetGossipSubConfigFunc(gossipSubConfigFunc p2p.GossipSubAdapterConfigFunc) { - if g.gossipSubConfigFunc != nil { - g.logger.Warn().Msg("gossipsub config function has already been set, overriding the previous config function.") - } - g.gossipSubConfigFunc = gossipSubConfigFunc -} - -// SetGossipSubPeerScoring sets the gossipsub peer scoring of the builder. -// If the gossipsub peer scoring flag has already been set, a fatal error is logged. -func (g *Builder) SetGossipSubPeerScoring(gossipSubPeerScoring bool) { - if g.gossipSubPeerScoring { - g.logger.Fatal().Msg("gossipsub peer scoring has already been set") - return - } - g.gossipSubPeerScoring = gossipSubPeerScoring -} - -// SetGossipSubScoreTracerInterval sets the gossipsub score tracer interval of the builder. -// If the gossipsub score tracer interval has already been set, a fatal error is logged. -func (g *Builder) SetGossipSubScoreTracerInterval(gossipSubScoreTracerInterval time.Duration) { - if g.gossipSubScoreTracerInterval != time.Duration(0) { - g.logger.Fatal().Msg("gossipsub score tracer interval has already been set") - return - } - g.gossipSubScoreTracerInterval = gossipSubScoreTracerInterval -} - -// SetGossipSubTracer sets the gossipsub tracer of the builder. -// If the gossipsub tracer has already been set, a fatal error is logged. -func (g *Builder) SetGossipSubTracer(gossipSubTracer p2p.PubSubTracer) { - if g.gossipSubTracer != nil { - g.logger.Fatal().Msg("gossipsub tracer has already been set") - return - } - g.gossipSubTracer = gossipSubTracer -} - -// SetIDProvider sets the identity provider of the builder. -// If the identity provider has already been set, a fatal error is logged. -func (g *Builder) SetIDProvider(idProvider module.IdentityProvider) { - if g.idProvider != nil { - g.logger.Fatal().Msg("id provider has already been set") - return - } - - g.idProvider = idProvider - g.scoreOptionConfig.SetProvider(idProvider) -} - -// SetRoutingSystem sets the routing system of the builder. -// If the routing system has already been set, a fatal error is logged. -func (g *Builder) SetRoutingSystem(routingSystem routing.Routing) { - if g.routingSystem != nil { - g.logger.Fatal().Msg("routing system has already been set") - return - } - g.routingSystem = routingSystem -} - -func (g *Builder) SetTopicScoreParams(topic channels.Topic, topicScoreParams *pubsub.TopicScoreParams) { - g.scoreOptionConfig.SetTopicScoreParams(topic, topicScoreParams) -} - -func (g *Builder) SetAppSpecificScoreParams(f func(peer.ID) float64) { - g.scoreOptionConfig.SetAppSpecificScoreFunction(f) -} - -// SetGossipSubRPCInspectorSuite sets the gossipsub rpc inspector suite of the builder. It contains the -// inspector function that is injected into the gossipsub rpc layer, as well as the notification distributors that -// are used to notify the app specific scoring mechanism of misbehaving peers.. -func (g *Builder) SetGossipSubRPCInspectorSuite(inspectorSuite p2p.GossipSubInspectorSuite) { - g.rpcInspectorSuite = inspectorSuite -} - -func NewGossipSubBuilder(logger zerolog.Logger, metrics module.GossipSubMetrics) *Builder { - lg := logger.With().Str("component", "gossipsub").Logger() - return &Builder{ - logger: lg, - metrics: metrics, - gossipSubFactory: defaultGossipSubFactory(), - gossipSubConfigFunc: defaultGossipSubAdapterConfig(), - scoreOptionConfig: scoring.NewScoreOptionConfig(lg), - } -} - -func defaultGossipSubFactory() p2p.GossipSubFactoryFunc { - return func(ctx context.Context, logger zerolog.Logger, h host.Host, cfg p2p.PubSubAdapterConfig) (p2p.PubSubAdapter, error) { - return p2pnode.NewGossipSubAdapter(ctx, logger, h, cfg) - } -} - -func defaultGossipSubAdapterConfig() p2p.GossipSubAdapterConfigFunc { - return func(cfg *p2p.BasePubSubAdapterConfig) p2p.PubSubAdapterConfig { - return p2pnode.NewGossipSubAdapterConfig(cfg) - } -} - -// Build creates a new GossipSub pubsub system. -// It returns the newly created GossipSub pubsub system and any errors encountered during its creation. -// Arguments: -// - ctx: the irrecoverable context of the node. -// -// Returns: -// - p2p.PubSubAdapter: a GossipSub pubsub system for the libp2p node. -// - p2p.PeerScoreTracer: a peer score tracer for the GossipSub pubsub system (if enabled, otherwise nil). -// - error: if an error occurs during the creation of the GossipSub pubsub system, it is returned. Otherwise, nil is returned. -// Note that on happy path, the returned error is nil. Any error returned is unexpected and should be handled as irrecoverable. -func (g *Builder) Build(ctx irrecoverable.SignalerContext) (p2p.PubSubAdapter, p2p.PeerScoreTracer, error) { - gossipSubConfigs := g.gossipSubConfigFunc(&p2p.BasePubSubAdapterConfig{ - MaxMessageSize: p2pnode.DefaultMaxPubSubMsgSize, - }) - gossipSubConfigs.WithMessageIdFunction(utils.MessageID) - - if g.routingSystem == nil { - return nil, nil, fmt.Errorf("could not create gossipsub: routing system is nil") - } - gossipSubConfigs.WithRoutingDiscovery(g.routingSystem) - - if g.subscriptionFilter != nil { - gossipSubConfigs.WithSubscriptionFilter(g.subscriptionFilter) - } - - if g.rpcInspectorSuite != nil { - gossipSubConfigs.WithInspectorSuite(g.rpcInspectorSuite) - } - - var scoreOpt *scoring.ScoreOption - var scoreTracer p2p.PeerScoreTracer - if g.gossipSubPeerScoring { - if g.rpcInspectorSuite != nil { - g.scoreOptionConfig.SetRegisterNotificationConsumerFunc(g.rpcInspectorSuite.AddInvCtrlMsgNotifConsumer) - } - - scoreOpt = scoring.NewScoreOption(g.scoreOptionConfig) - gossipSubConfigs.WithScoreOption(scoreOpt) - - if g.gossipSubScoreTracerInterval > 0 { - scoreTracer = tracer.NewGossipSubScoreTracer( - g.logger, - g.idProvider, - g.metrics, - g.gossipSubScoreTracerInterval) - gossipSubConfigs.WithScoreTracer(scoreTracer) - } - - } - - if g.gossipSubTracer != nil { - gossipSubConfigs.WithTracer(g.gossipSubTracer) - } - - if g.h == nil { - return nil, nil, fmt.Errorf("could not create gossipsub: host is nil") - } - - gossipSub, err := g.gossipSubFactory(ctx, g.logger, g.h, gossipSubConfigs) - if err != nil { - return nil, nil, fmt.Errorf("could not create gossipsub: %w", err) - } - - if scoreOpt != nil { - err := scoreOpt.SetSubscriptionProvider(scoring.NewSubscriptionProvider(g.logger, gossipSub)) - if err != nil { - return nil, nil, fmt.Errorf("could not set subscription provider: %w", err) - } - } - - return gossipSub, scoreTracer, nil -} diff --git a/network/p2p/p2pbuilder/inspector/rpc_inspector_builder.go b/network/p2p/p2pbuilder/inspector/rpc_inspector_builder.go deleted file mode 100644 index 817a8e41924..00000000000 --- a/network/p2p/p2pbuilder/inspector/rpc_inspector_builder.go +++ /dev/null @@ -1,204 +0,0 @@ -package inspector - -import ( - "fmt" - - "github.com/rs/zerolog" - - "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/module/mempool/queue" - "github.com/onflow/flow-go/module/metrics" - "github.com/onflow/flow-go/network/p2p" - "github.com/onflow/flow-go/network/p2p/distributor" - "github.com/onflow/flow-go/network/p2p/inspector" - "github.com/onflow/flow-go/network/p2p/inspector/validation" - p2pconfig "github.com/onflow/flow-go/network/p2p/p2pbuilder/config" - "github.com/onflow/flow-go/network/p2p/p2pbuilder/inspector/suite" - "github.com/onflow/flow-go/network/p2p/p2pnode" -) - -// GossipSubRPCValidationInspectorConfigs validation limits used for gossipsub RPC control message inspection. -type GossipSubRPCValidationInspectorConfigs struct { - // NumberOfWorkers number of worker pool workers. - NumberOfWorkers int - // CacheSize size of the queue used by worker pool for the control message validation inspector. - CacheSize uint32 - // GraftLimits GRAFT control message validation limits. - GraftLimits map[string]int - // PruneLimits PRUNE control message validation limits. - PruneLimits map[string]int -} - -// GossipSubRPCMetricsInspectorConfigs rpc metrics observer inspector configuration. -type GossipSubRPCMetricsInspectorConfigs struct { - // NumberOfWorkers number of worker pool workers. - NumberOfWorkers int - // CacheSize size of the queue used by worker pool for the control message metrics inspector. - CacheSize uint32 -} - -// GossipSubRPCInspectorsConfig encompasses configuration related to gossipsub RPC message inspectors. -type GossipSubRPCInspectorsConfig struct { - // GossipSubRPCInspectorNotificationCacheSize size of the queue for notifications about invalid RPC messages. - GossipSubRPCInspectorNotificationCacheSize uint32 - // ValidationInspectorConfigs control message validation inspector validation configuration and limits. - ValidationInspectorConfigs *GossipSubRPCValidationInspectorConfigs - // MetricsInspectorConfigs control message metrics inspector configuration. - MetricsInspectorConfigs *GossipSubRPCMetricsInspectorConfigs -} - -func DefaultGossipSubRPCInspectorsConfig() *GossipSubRPCInspectorsConfig { - return &GossipSubRPCInspectorsConfig{ - GossipSubRPCInspectorNotificationCacheSize: distributor.DefaultGossipSubInspectorNotificationQueueCacheSize, - ValidationInspectorConfigs: &GossipSubRPCValidationInspectorConfigs{ - NumberOfWorkers: validation.DefaultNumberOfWorkers, - CacheSize: validation.DefaultControlMsgValidationInspectorQueueCacheSize, - GraftLimits: map[string]int{ - validation.DiscardThresholdMapKey: validation.DefaultGraftDiscardThreshold, - validation.SafetyThresholdMapKey: validation.DefaultGraftSafetyThreshold, - validation.RateLimitMapKey: validation.DefaultGraftRateLimit, - }, - PruneLimits: map[string]int{ - validation.DiscardThresholdMapKey: validation.DefaultPruneDiscardThreshold, - validation.SafetyThresholdMapKey: validation.DefaultPruneSafetyThreshold, - validation.RateLimitMapKey: validation.DefaultPruneRateLimit, - }, - }, - MetricsInspectorConfigs: &GossipSubRPCMetricsInspectorConfigs{ - NumberOfWorkers: inspector.DefaultControlMsgMetricsInspectorNumberOfWorkers, - CacheSize: inspector.DefaultControlMsgMetricsInspectorQueueCacheSize, - }, - } -} - -// GossipSubInspectorBuilder builder that constructs all rpc inspectors used by gossip sub. The following -// rpc inspectors are created with this builder. -// - validation inspector: performs validation on all control messages. -// - metrics inspector: observes metrics for each rpc message received. -type GossipSubInspectorBuilder struct { - logger zerolog.Logger - sporkID flow.Identifier - inspectorsConfig *GossipSubRPCInspectorsConfig - metricsCfg *p2pconfig.MetricsConfig - publicNetwork bool -} - -// NewGossipSubInspectorBuilder returns new *GossipSubInspectorBuilder. -func NewGossipSubInspectorBuilder(logger zerolog.Logger, sporkID flow.Identifier, inspectorsConfig *GossipSubRPCInspectorsConfig) *GossipSubInspectorBuilder { - return &GossipSubInspectorBuilder{ - logger: logger, - sporkID: sporkID, - inspectorsConfig: inspectorsConfig, - metricsCfg: &p2pconfig.MetricsConfig{ - Metrics: metrics.NewNoopCollector(), - HeroCacheFactory: metrics.NewNoopHeroCacheMetricsFactory(), - }, - publicNetwork: p2p.PublicNetwork, - } -} - -// SetMetrics sets the network metrics and registry. -func (b *GossipSubInspectorBuilder) SetMetrics(metricsCfg *p2pconfig.MetricsConfig) *GossipSubInspectorBuilder { - b.metricsCfg = metricsCfg - return b -} - -// SetPublicNetwork used to differentiate between libp2p nodes used for public vs private networks. -// Currently, there are different metrics collectors for public vs private networks. -func (b *GossipSubInspectorBuilder) SetPublicNetwork(public bool) *GossipSubInspectorBuilder { - b.publicNetwork = public - return b -} - -// buildGossipSubMetricsInspector builds the gossipsub rpc metrics inspector. -func (b *GossipSubInspectorBuilder) buildGossipSubMetricsInspector() p2p.GossipSubRPCInspector { - gossipSubMetrics := p2pnode.NewGossipSubControlMessageMetrics(b.metricsCfg.Metrics, b.logger) - metricsInspector := inspector.NewControlMsgMetricsInspector( - b.logger, - gossipSubMetrics, - b.inspectorsConfig.MetricsInspectorConfigs.NumberOfWorkers, - []queue.HeroStoreConfigOption{ - queue.WithHeroStoreSizeLimit(b.inspectorsConfig.MetricsInspectorConfigs.CacheSize), - queue.WithHeroStoreCollector(metrics.GossipSubRPCMetricsObserverInspectorQueueMetricFactory(b.metricsCfg.HeroCacheFactory, b.publicNetwork)), - }...) - return metricsInspector -} - -// validationInspectorConfig returns a new inspector.ControlMsgValidationInspectorConfig using configuration provided by the node builder. -func (b *GossipSubInspectorBuilder) validationInspectorConfig(validationConfigs *GossipSubRPCValidationInspectorConfigs) (*validation.ControlMsgValidationInspectorConfig, error) { - // setup rpc validation configuration for each control message type - graftValidationCfg, err := validation.NewCtrlMsgValidationConfig(p2p.CtrlMsgGraft, validationConfigs.GraftLimits) - if err != nil { - return nil, fmt.Errorf("failed to create gossupsub RPC validation configuration: %w", err) - } - pruneValidationCfg, err := validation.NewCtrlMsgValidationConfig(p2p.CtrlMsgPrune, validationConfigs.PruneLimits) - if err != nil { - return nil, fmt.Errorf("failed to create gossupsub RPC validation configuration: %w", err) - } - - // setup gossip sub RPC control message inspector config - controlMsgRPCInspectorCfg := &validation.ControlMsgValidationInspectorConfig{ - NumberOfWorkers: validationConfigs.NumberOfWorkers, - InspectMsgStoreOpts: []queue.HeroStoreConfigOption{ - queue.WithHeroStoreSizeLimit(validationConfigs.CacheSize), - queue.WithHeroStoreCollector(metrics.GossipSubRPCInspectorQueueMetricFactory(b.metricsCfg.HeroCacheFactory, b.publicNetwork))}, - GraftValidationCfg: graftValidationCfg, - PruneValidationCfg: pruneValidationCfg, - } - return controlMsgRPCInspectorCfg, nil -} - -// buildGossipSubValidationInspector builds the gossipsub rpc validation inspector. -func (b *GossipSubInspectorBuilder) buildGossipSubValidationInspector() (p2p.GossipSubRPCInspector, *distributor.GossipSubInspectorNotifDistributor, error) { - controlMsgRPCInspectorCfg, err := b.validationInspectorConfig(b.inspectorsConfig.ValidationInspectorConfigs) - if err != nil { - return nil, nil, fmt.Errorf("failed to create gossipsub rpc inspector config: %w", err) - } - - notificationDistributor := distributor.DefaultGossipSubInspectorNotificationDistributor( - b.logger, - []queue.HeroStoreConfigOption{ - queue.WithHeroStoreSizeLimit(b.inspectorsConfig.GossipSubRPCInspectorNotificationCacheSize), - queue.WithHeroStoreCollector(metrics.RpcInspectorNotificationQueueMetricFactory(b.metricsCfg.HeroCacheFactory, b.publicNetwork))}...) - - rpcValidationInspector := validation.NewControlMsgValidationInspector( - b.logger, - b.sporkID, - controlMsgRPCInspectorCfg, - notificationDistributor, - ) - return rpcValidationInspector, notificationDistributor, nil -} - -// Build builds the rpc inspectors used by gossipsub. -// Any returned error from this func indicates a problem setting up rpc inspectors. -// In libp2p node setup, the returned error should be treated as a fatal error. -func (b *GossipSubInspectorBuilder) Build() (p2p.GossipSubInspectorSuite, error) { - metricsInspector := b.buildGossipSubMetricsInspector() - validationInspector, notificationDistributor, err := b.buildGossipSubValidationInspector() - if err != nil { - return nil, err - } - return suite.NewGossipSubInspectorSuite([]p2p.GossipSubRPCInspector{metricsInspector, validationInspector}, notificationDistributor), nil -} - -// DefaultRPCValidationConfig returns default RPC control message inspector config. -func DefaultRPCValidationConfig(opts ...queue.HeroStoreConfigOption) *validation.ControlMsgValidationInspectorConfig { - graftCfg, _ := validation.NewCtrlMsgValidationConfig(p2p.CtrlMsgGraft, validation.CtrlMsgValidationLimits{ - validation.DiscardThresholdMapKey: validation.DefaultGraftDiscardThreshold, - validation.SafetyThresholdMapKey: validation.DefaultGraftSafetyThreshold, - validation.RateLimitMapKey: validation.DefaultGraftRateLimit, - }) - pruneCfg, _ := validation.NewCtrlMsgValidationConfig(p2p.CtrlMsgPrune, validation.CtrlMsgValidationLimits{ - validation.DiscardThresholdMapKey: validation.DefaultPruneDiscardThreshold, - validation.SafetyThresholdMapKey: validation.DefaultPruneSafetyThreshold, - validation.RateLimitMapKey: validation.DefaultPruneRateLimit, - }) - - return &validation.ControlMsgValidationInspectorConfig{ - NumberOfWorkers: validation.DefaultNumberOfWorkers, - InspectMsgStoreOpts: opts, - GraftValidationCfg: graftCfg, - PruneValidationCfg: pruneCfg, - } -} diff --git a/network/p2p/p2pbuilder/inspector/suite/aggregate.go b/network/p2p/p2pbuilder/inspector/suite/aggregate.go deleted file mode 100644 index d3370b76bad..00000000000 --- a/network/p2p/p2pbuilder/inspector/suite/aggregate.go +++ /dev/null @@ -1,34 +0,0 @@ -package suite - -import ( - "github.com/hashicorp/go-multierror" - pubsub "github.com/libp2p/go-libp2p-pubsub" - "github.com/libp2p/go-libp2p/core/peer" - - "github.com/onflow/flow-go/network/p2p" -) - -// AggregateRPCInspector gossip sub RPC inspector that combines multiple RPC inspectors into a single inspector. Each -// individual inspector will be invoked synchronously. -type AggregateRPCInspector struct { - inspectors []p2p.GossipSubRPCInspector -} - -// NewAggregateRPCInspector returns new aggregate RPC inspector. -func NewAggregateRPCInspector(inspectors ...p2p.GossipSubRPCInspector) *AggregateRPCInspector { - return &AggregateRPCInspector{ - inspectors: inspectors, - } -} - -// Inspect func with the p2p.GossipSubAppSpecificRpcInspector func signature that will invoke all the configured inspectors. -func (a *AggregateRPCInspector) Inspect(peerID peer.ID, rpc *pubsub.RPC) error { - var errs *multierror.Error - for _, inspector := range a.inspectors { - err := inspector.Inspect(peerID, rpc) - if err != nil { - errs = multierror.Append(errs, err) - } - } - return errs.ErrorOrNil() -} diff --git a/network/p2p/p2pbuilder/inspector/suite/suite.go b/network/p2p/p2pbuilder/inspector/suite/suite.go deleted file mode 100644 index b25a3999c1c..00000000000 --- a/network/p2p/p2pbuilder/inspector/suite/suite.go +++ /dev/null @@ -1,71 +0,0 @@ -package suite - -import ( - pubsub "github.com/libp2p/go-libp2p-pubsub" - "github.com/libp2p/go-libp2p/core/peer" - - "github.com/onflow/flow-go/module/component" - "github.com/onflow/flow-go/module/irrecoverable" - "github.com/onflow/flow-go/network/p2p" -) - -// GossipSubInspectorSuite encapsulates what is exposed to the libp2p node regarding the gossipsub RPC inspectors as -// well as their notification distributors. -type GossipSubInspectorSuite struct { - component.Component - aggregatedInspector *AggregateRPCInspector - ctrlMsgInspectDistributor p2p.GossipSubInspectorNotifDistributor -} - -// NewGossipSubInspectorSuite creates a new GossipSubInspectorSuite. -// The suite is composed of the aggregated inspector, which is used to inspect the gossipsub rpc messages, and the -// control message notification distributor, which is used to notify consumers when a misbehaving peer regarding gossipsub -// control messages is detected. -// The suite is also a component, which is used to start and stop the rpc inspectors. -// Args: -// - inspectors: the rpc inspectors that are used to inspect the gossipsub rpc messages. -// - ctrlMsgInspectDistributor: the notification distributor that is used to notify consumers when a misbehaving peer -// -// regarding gossipsub control messages is detected. -// Returns: -// - the new GossipSubInspectorSuite. -func NewGossipSubInspectorSuite(inspectors []p2p.GossipSubRPCInspector, ctrlMsgInspectDistributor p2p.GossipSubInspectorNotifDistributor) *GossipSubInspectorSuite { - s := &GossipSubInspectorSuite{ - ctrlMsgInspectDistributor: ctrlMsgInspectDistributor, - aggregatedInspector: NewAggregateRPCInspector(inspectors...), - } - - builder := component.NewComponentManagerBuilder() - for _, inspector := range inspectors { - inspector := inspector // capture loop variable - builder.AddWorker(func(ctx irrecoverable.SignalerContext, ready component.ReadyFunc) { - inspector.Start(ctx) - - select { - case <-ctx.Done(): - case <-inspector.Ready(): - ready() - } - - <-inspector.Done() - }) - } - - s.Component = builder.Build() - return s -} - -// InspectFunc returns the inspect function that is used to inspect the gossipsub rpc messages. -// This function follows a dependency injection pattern, where the inspect function is injected into the gossipsu, and -// is called whenever a gossipsub rpc message is received. -func (s *GossipSubInspectorSuite) InspectFunc() func(peer.ID, *pubsub.RPC) error { - return s.aggregatedInspector.Inspect -} - -// AddInvalidCtrlMsgNotificationConsumer adds a consumer to the invalid control message notification distributor. -// This consumer is notified when a misbehaving peer regarding gossipsub control messages is detected. This follows a pub/sub -// pattern where the consumer is notified when a new notification is published. -// A consumer is only notified once for each notification, and only receives notifications that were published after it was added. -func (s *GossipSubInspectorSuite) AddInvCtrlMsgNotifConsumer(c p2p.GossipSubInvCtrlMsgNotifConsumer) { - s.ctrlMsgInspectDistributor.AddConsumer(c) -} diff --git a/network/p2p/p2pbuilder/libp2pNodeBuilder.go b/network/p2p/p2pbuilder/libp2pNodeBuilder.go deleted file mode 100644 index a2c035cb2f2..00000000000 --- a/network/p2p/p2pbuilder/libp2pNodeBuilder.go +++ /dev/null @@ -1,595 +0,0 @@ -package p2pbuilder - -import ( - "context" - "errors" - "fmt" - "net" - "time" - - "github.com/libp2p/go-libp2p" - pubsub "github.com/libp2p/go-libp2p-pubsub" - "github.com/libp2p/go-libp2p/config" - "github.com/libp2p/go-libp2p/core/connmgr" - "github.com/libp2p/go-libp2p/core/host" - "github.com/libp2p/go-libp2p/core/network" - "github.com/libp2p/go-libp2p/core/routing" - "github.com/libp2p/go-libp2p/core/transport" - rcmgr "github.com/libp2p/go-libp2p/p2p/host/resource-manager" - "github.com/libp2p/go-libp2p/p2p/transport/tcp" - "github.com/multiformats/go-multiaddr" - madns "github.com/multiformats/go-multiaddr-dns" - "github.com/rs/zerolog" - - "github.com/onflow/flow-go/network/p2p" - "github.com/onflow/flow-go/network/p2p/connection" - "github.com/onflow/flow-go/network/p2p/dht" - p2pconfig "github.com/onflow/flow-go/network/p2p/p2pbuilder/config" - "github.com/onflow/flow-go/network/p2p/p2pbuilder/inspector" - "github.com/onflow/flow-go/network/p2p/p2pnode" - "github.com/onflow/flow-go/network/p2p/subscription" - "github.com/onflow/flow-go/network/p2p/tracer" - "github.com/onflow/flow-go/network/p2p/unicast/protocols" - "github.com/onflow/flow-go/network/p2p/unicast/stream" - "github.com/onflow/flow-go/network/p2p/utils" - - fcrypto "github.com/onflow/flow-go/crypto" - "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/module" - "github.com/onflow/flow-go/module/component" - "github.com/onflow/flow-go/module/irrecoverable" - "github.com/onflow/flow-go/network/p2p/keyutils" - gossipsubbuilder "github.com/onflow/flow-go/network/p2p/p2pbuilder/gossipsub" - "github.com/onflow/flow-go/network/p2p/unicast" -) - -const ( - // defaultMemoryLimitRatio flow default - defaultMemoryLimitRatio = 0.2 - // defaultFileDescriptorsRatio libp2p default - defaultFileDescriptorsRatio = 0.5 - // defaultPeerBaseLimitConnsInbound default value for libp2p PeerBaseLimitConnsInbound. This limit - // restricts the amount of inbound connections from a peer to 1, forcing libp2p to reuse the connection. - // Without this limit peers can end up in a state where there exists n number of connections per peer which - // can lead to resource exhaustion of the libp2p node. - defaultPeerBaseLimitConnsInbound = 1 - - // defaultPeerScoringEnabled is the default value for enabling peer scoring. - defaultPeerScoringEnabled = true // enable peer scoring by default on node builder - - // defaultMeshTracerLoggingInterval is the default interval at which the mesh tracer logs the mesh - // topology. This is used for debugging and forensics purposes. - // Note that we purposefully choose this logging interval high enough to avoid spamming the logs. Moreover, the - // mesh updates will be logged individually and separately. The logging interval is only used to log the mesh - // topology as a whole specially when there are no updates to the mesh topology for a long time. - defaultMeshTracerLoggingInterval = 1 * time.Minute - - // defaultGossipSubScoreTracerInterval is the default interval at which the gossipsub score tracer logs the peer scores. - // This is used for debugging and forensics purposes. - // Note that we purposefully choose this logging interval high enough to avoid spamming the logs. - defaultGossipSubScoreTracerInterval = 1 * time.Minute -) - -// DefaultGossipSubConfig returns the default configuration for the gossipsub protocol. -func DefaultGossipSubConfig() *GossipSubConfig { - return &GossipSubConfig{ - PeerScoring: defaultPeerScoringEnabled, - LocalMeshLogInterval: defaultMeshTracerLoggingInterval, - ScoreTracerInterval: defaultGossipSubScoreTracerInterval, - RpcInspector: inspector.DefaultGossipSubRPCInspectorsConfig(), - } -} - -// LibP2PFactoryFunc is a factory function type for generating libp2p Node instances. -type LibP2PFactoryFunc func() (p2p.LibP2PNode, error) -type GossipSubFactoryFunc func(context.Context, zerolog.Logger, host.Host, p2p.PubSubAdapterConfig) (p2p.PubSubAdapter, error) -type CreateNodeFunc func(logger zerolog.Logger, - host host.Host, - pCache *p2pnode.ProtocolPeerCache, - peerManager *connection.PeerManager) p2p.LibP2PNode -type GossipSubAdapterConfigFunc func(*p2p.BasePubSubAdapterConfig) p2p.PubSubAdapterConfig - -// DefaultLibP2PNodeFactory returns a LibP2PFactoryFunc which generates the libp2p host initialized with the -// default options for the host, the pubsub and the ping service. -func DefaultLibP2PNodeFactory(log zerolog.Logger, - address string, - flowKey fcrypto.PrivateKey, - sporkId flow.Identifier, - idProvider module.IdentityProvider, - metricsCfg *p2pconfig.MetricsConfig, - resolver madns.BasicResolver, - role string, - connGaterCfg *p2pconfig.ConnectionGaterConfig, - peerManagerCfg *p2pconfig.PeerManagerConfig, - gossipCfg *GossipSubConfig, - rCfg *ResourceManagerConfig, - uniCfg *p2pconfig.UnicastConfig, -) p2p.LibP2PFactoryFunc { - return func() (p2p.LibP2PNode, error) { - builder, err := DefaultNodeBuilder(log, - address, - flowKey, - sporkId, - idProvider, - metricsCfg, - resolver, - role, - connGaterCfg, - peerManagerCfg, - gossipCfg, - rCfg, - uniCfg) - - if err != nil { - return nil, fmt.Errorf("could not create node builder: %w", err) - } - - return builder.Build() - } -} - -// ResourceManagerConfig returns the resource manager configuration for the libp2p node. -// The resource manager is used to limit the number of open connections and streams (as well as any other resources -// used by libp2p) for each peer. -type ResourceManagerConfig struct { - MemoryLimitRatio float64 // maximum allowed fraction of memory to be allocated by the libp2p resources in (0,1] - FileDescriptorsRatio float64 // maximum allowed fraction of file descriptors to be allocated by the libp2p resources in (0,1] - PeerBaseLimitConnsInbound int // the maximum amount of allowed inbound connections per peer -} - -// GossipSubConfig is the configuration for the GossipSub pubsub implementation. -type GossipSubConfig struct { - // LocalMeshLogInterval is the interval at which the local mesh is logged. - LocalMeshLogInterval time.Duration - // ScoreTracerInterval is the interval at which the score tracer logs the peer scores. - ScoreTracerInterval time.Duration - // PeerScoring is whether to enable GossipSub peer scoring. - PeerScoring bool - // RpcInspector configuration for all gossipsub RPC control message inspectors. - RpcInspector *inspector.GossipSubRPCInspectorsConfig -} - -func DefaultResourceManagerConfig() *ResourceManagerConfig { - return &ResourceManagerConfig{ - MemoryLimitRatio: defaultMemoryLimitRatio, - FileDescriptorsRatio: defaultFileDescriptorsRatio, - PeerBaseLimitConnsInbound: defaultPeerBaseLimitConnsInbound, - } -} - -type LibP2PNodeBuilder struct { - gossipSubBuilder p2p.GossipSubBuilder - sporkID flow.Identifier - addr string - networkKey fcrypto.PrivateKey - logger zerolog.Logger - metrics module.LibP2PMetrics - basicResolver madns.BasicResolver - - resourceManager network.ResourceManager - resourceManagerCfg *ResourceManagerConfig - connManager connmgr.ConnManager - connGater connmgr.ConnectionGater - routingFactory func(context.Context, host.Host) (routing.Routing, error) - peerManagerEnablePruning bool - peerManagerUpdateInterval time.Duration - createNode p2p.CreateNodeFunc - createStreamRetryInterval time.Duration - rateLimiterDistributor p2p.UnicastRateLimiterDistributor - gossipSubTracer p2p.PubSubTracer -} - -func NewNodeBuilder(logger zerolog.Logger, - metrics module.LibP2PMetrics, - addr string, - networkKey fcrypto.PrivateKey, - sporkID flow.Identifier, - rCfg *ResourceManagerConfig) *LibP2PNodeBuilder { - return &LibP2PNodeBuilder{ - logger: logger, - sporkID: sporkID, - addr: addr, - networkKey: networkKey, - createNode: DefaultCreateNodeFunc, - metrics: metrics, - resourceManagerCfg: rCfg, - gossipSubBuilder: gossipsubbuilder.NewGossipSubBuilder(logger, metrics), - } -} - -// SetBasicResolver sets the DNS resolver for the node. -func (builder *LibP2PNodeBuilder) SetBasicResolver(br madns.BasicResolver) p2p.NodeBuilder { - builder.basicResolver = br - return builder -} - -// SetSubscriptionFilter sets the pubsub subscription filter for the node. -func (builder *LibP2PNodeBuilder) SetSubscriptionFilter(filter pubsub.SubscriptionFilter) p2p.NodeBuilder { - builder.gossipSubBuilder.SetSubscriptionFilter(filter) - return builder -} - -// SetResourceManager sets the resource manager for the node. -func (builder *LibP2PNodeBuilder) SetResourceManager(manager network.ResourceManager) p2p.NodeBuilder { - builder.resourceManager = manager - return builder -} - -// SetConnectionManager sets the connection manager for the node. -func (builder *LibP2PNodeBuilder) SetConnectionManager(manager connmgr.ConnManager) p2p.NodeBuilder { - builder.connManager = manager - return builder -} - -// SetConnectionGater sets the connection gater for the node. -func (builder *LibP2PNodeBuilder) SetConnectionGater(gater connmgr.ConnectionGater) p2p.NodeBuilder { - builder.connGater = gater - return builder -} - -// SetRoutingSystem sets the routing system factory function. -func (builder *LibP2PNodeBuilder) SetRoutingSystem(f func(context.Context, host.Host) (routing.Routing, error)) p2p.NodeBuilder { - builder.routingFactory = f - return builder -} - -func (builder *LibP2PNodeBuilder) SetGossipSubFactory(gf p2p.GossipSubFactoryFunc, cf p2p.GossipSubAdapterConfigFunc) p2p.NodeBuilder { - builder.gossipSubBuilder.SetGossipSubFactory(gf) - builder.gossipSubBuilder.SetGossipSubConfigFunc(cf) - return builder -} - -// EnableGossipSubPeerScoring enables peer scoring for the GossipSub pubsub system. -// Arguments: -// - module.IdentityProvider: the identity provider for the node (must be set before calling this method). -// - *PeerScoringConfig: the peer scoring configuration for the GossipSub pubsub system. If nil, the default configuration is used. -func (builder *LibP2PNodeBuilder) EnableGossipSubPeerScoring(provider module.IdentityProvider, config *p2p.PeerScoringConfig) p2p.NodeBuilder { - builder.gossipSubBuilder.SetGossipSubPeerScoring(true) - builder.gossipSubBuilder.SetIDProvider(provider) - if config != nil { - if config.AppSpecificScoreParams != nil { - builder.gossipSubBuilder.SetAppSpecificScoreParams(config.AppSpecificScoreParams) - } - if config.TopicScoreParams != nil { - for topic, params := range config.TopicScoreParams { - builder.gossipSubBuilder.SetTopicScoreParams(topic, params) - } - } - } - - return builder -} - -// SetPeerManagerOptions sets the peer manager options. -func (builder *LibP2PNodeBuilder) SetPeerManagerOptions(connectionPruning bool, updateInterval time.Duration) p2p.NodeBuilder { - builder.peerManagerEnablePruning = connectionPruning - builder.peerManagerUpdateInterval = updateInterval - return builder -} - -func (builder *LibP2PNodeBuilder) SetGossipSubTracer(tracer p2p.PubSubTracer) p2p.NodeBuilder { - builder.gossipSubBuilder.SetGossipSubTracer(tracer) - builder.gossipSubTracer = tracer - return builder -} - -func (builder *LibP2PNodeBuilder) SetCreateNode(f p2p.CreateNodeFunc) p2p.NodeBuilder { - builder.createNode = f - return builder -} - -func (builder *LibP2PNodeBuilder) SetRateLimiterDistributor(distributor p2p.UnicastRateLimiterDistributor) p2p.NodeBuilder { - builder.rateLimiterDistributor = distributor - return builder -} - -func (builder *LibP2PNodeBuilder) SetStreamCreationRetryInterval(createStreamRetryInterval time.Duration) p2p.NodeBuilder { - builder.createStreamRetryInterval = createStreamRetryInterval - return builder -} - -func (builder *LibP2PNodeBuilder) SetGossipSubScoreTracerInterval(interval time.Duration) p2p.NodeBuilder { - builder.gossipSubBuilder.SetGossipSubScoreTracerInterval(interval) - return builder -} - -func (builder *LibP2PNodeBuilder) SetGossipSubRpcInspectorSuite(inspectorSuite p2p.GossipSubInspectorSuite) p2p.NodeBuilder { - builder.gossipSubBuilder.SetGossipSubRPCInspectorSuite(inspectorSuite) - return builder -} - -// buildRouting creates a new routing system factory for a libp2p node using the provided host. -// It returns the newly created routing system and any errors encountered during its creation. -// -// Arguments: -// - ctx: a context.Context object used to manage the lifecycle of the node. -// - h: a libp2p host.Host object used to initialize the routing system. -// -// Returns: -// - routing.Routing: a routing system for the libp2p node. -// - error: if an error occurs during the creation of the routing system, it is returned. Otherwise, nil is returned. -// Note that on happy path, the returned error is nil. Any non-nil error indicates that the routing system could not be created -// and is non-recoverable. In case of an error the node should be stopped. -func (builder *LibP2PNodeBuilder) buildRouting(ctx context.Context, h host.Host) (routing.Routing, error) { - routingSystem, err := builder.routingFactory(ctx, h) - if err != nil { - return nil, fmt.Errorf("could not create libp2p node routing system: %w", err) - } - return routingSystem, nil -} - -// Build creates a new libp2p node using the configured options. -func (builder *LibP2PNodeBuilder) Build() (p2p.LibP2PNode, error) { - if builder.routingFactory == nil { - return nil, errors.New("routing system factory is not set") - } - - var opts []libp2p.Option - - if builder.basicResolver != nil { - resolver, err := madns.NewResolver(madns.WithDefaultResolver(builder.basicResolver)) - - if err != nil { - return nil, fmt.Errorf("could not create resolver: %w", err) - } - - opts = append(opts, libp2p.MultiaddrResolver(resolver)) - } - - if builder.resourceManager != nil { - opts = append(opts, libp2p.ResourceManager(builder.resourceManager)) - builder.logger.Warn(). - Msg("libp2p resource manager is overridden by the node builder, metrics may not be available") - } else { - // setting up default resource manager, by hooking in the resource manager metrics reporter. - limits := rcmgr.DefaultLimits - - libp2p.SetDefaultServiceLimits(&limits) - - mem, err := allowedMemory(builder.resourceManagerCfg.MemoryLimitRatio) - if err != nil { - return nil, fmt.Errorf("could not get allowed memory: %w", err) - } - fd, err := allowedFileDescriptors(builder.resourceManagerCfg.FileDescriptorsRatio) - if err != nil { - return nil, fmt.Errorf("could not get allowed file descriptors: %w", err) - } - limits.PeerBaseLimit.ConnsInbound = builder.resourceManagerCfg.PeerBaseLimitConnsInbound - l := limits.Scale(mem, fd) - mgr, err := rcmgr.NewResourceManager(rcmgr.NewFixedLimiter(l), rcmgr.WithMetrics(builder.metrics)) - if err != nil { - return nil, fmt.Errorf("could not create libp2p resource manager: %w", err) - } - builder.logger.Info(). - Str("key", keyResourceManagerLimit). - Int64("allowed_memory", mem). - Int("allowed_file_descriptors", fd). - Msg("allowed memory and file descriptors are fetched from the system") - newLimitConfigLogger(builder.logger).logResourceManagerLimits(l) - - opts = append(opts, libp2p.ResourceManager(mgr)) - builder.logger.Info().Msg("libp2p resource manager is set to default with metrics") - } - - if builder.connManager != nil { - opts = append(opts, libp2p.ConnectionManager(builder.connManager)) - } - - if builder.connGater != nil { - opts = append(opts, libp2p.ConnectionGater(builder.connGater)) - } - - h, err := DefaultLibP2PHost(builder.addr, builder.networkKey, opts...) - if err != nil { - return nil, err - } - builder.gossipSubBuilder.SetHost(h) - - pCache, err := p2pnode.NewProtocolPeerCache(builder.logger, h) - if err != nil { - return nil, err - } - - var peerManager p2p.PeerManager - if builder.peerManagerUpdateInterval > 0 { - connector, err := connection.NewLibp2pConnector(&connection.ConnectorConfig{ - PruneConnections: builder.peerManagerEnablePruning, - Logger: builder.logger, - Host: connection.NewConnectorHost(h), - BackoffConnectorFactory: connection.DefaultLibp2pBackoffConnectorFactory(h), - }) - if err != nil { - return nil, fmt.Errorf("failed to create libp2p connector: %w", err) - } - - peerManager = connection.NewPeerManager(builder.logger, builder.peerManagerUpdateInterval, connector) - - if builder.rateLimiterDistributor != nil { - builder.rateLimiterDistributor.AddConsumer(peerManager) - } - } - - node := builder.createNode(builder.logger, h, pCache, peerManager) - - unicastManager := unicast.NewUnicastManager(builder.logger, - stream.NewLibP2PStreamFactory(h), - builder.sporkID, - builder.createStreamRetryInterval, - node, - builder.metrics) - node.SetUnicastManager(unicastManager) - - cm := component.NewComponentManagerBuilder(). - AddWorker(func(ctx irrecoverable.SignalerContext, ready component.ReadyFunc) { - // routing system is created here, because it needs to be created during the node startup. - routingSystem, err := builder.buildRouting(ctx, h) - if err != nil { - ctx.Throw(fmt.Errorf("could not create routing system: %w", err)) - } - node.SetRouting(routingSystem) - builder.gossipSubBuilder.SetRoutingSystem(routingSystem) - - // gossipsub is created here, because it needs to be created during the node startup. - gossipSub, scoreTracer, err := builder.gossipSubBuilder.Build(ctx) - if err != nil { - ctx.Throw(fmt.Errorf("could not create gossipsub: %w", err)) - } - if scoreTracer != nil { - node.SetPeerScoreExposer(scoreTracer) - } - node.SetPubSub(gossipSub) - gossipSub.Start(ctx) - ready() - - <-gossipSub.Done() - }). - AddWorker(func(ctx irrecoverable.SignalerContext, ready component.ReadyFunc) { - // encapsulates shutdown logic for the libp2p node. - ready() - <-ctx.Done() - // we wait till the context is done, and then we stop the libp2p node. - - err = node.Stop() - if err != nil { - // ignore context cancellation errors - if !errors.Is(err, context.Canceled) && !errors.Is(err, context.DeadlineExceeded) { - ctx.Throw(fmt.Errorf("could not stop libp2p node: %w", err)) - } - } - }) - - node.SetComponentManager(cm.Build()) - - return node, nil -} - -// DefaultLibP2PHost returns a libp2p host initialized to listen on the given address and using the given private key and -// customized with options -func DefaultLibP2PHost(address string, key fcrypto.PrivateKey, options ...config.Option) (host.Host, error) { - defaultOptions, err := defaultLibP2POptions(address, key) - if err != nil { - return nil, err - } - - allOptions := append(defaultOptions, options...) - - // create the libp2p host - libP2PHost, err := libp2p.New(allOptions...) - if err != nil { - return nil, fmt.Errorf("could not create libp2p host: %w", err) - } - - return libP2PHost, nil -} - -// defaultLibP2POptions creates and returns the standard LibP2P host options that are used for the Flow Libp2p network -func defaultLibP2POptions(address string, key fcrypto.PrivateKey) ([]config.Option, error) { - - libp2pKey, err := keyutils.LibP2PPrivKeyFromFlow(key) - if err != nil { - return nil, fmt.Errorf("could not generate libp2p key: %w", err) - } - - ip, port, err := net.SplitHostPort(address) - if err != nil { - return nil, fmt.Errorf("could not split node address %s:%w", address, err) - } - - sourceMultiAddr, err := multiaddr.NewMultiaddr(utils.MultiAddressStr(ip, port)) - if err != nil { - return nil, fmt.Errorf("failed to translate Flow address to Libp2p multiaddress: %w", err) - } - - // create a transport which disables port reuse and web socket. - // Port reuse enables listening and dialing from the same TCP port (https://github.com/libp2p/go-reuseport) - // While this sounds great, it intermittently causes a 'broken pipe' error - // as the 1-k discovery process and the 1-1 messaging both sometimes attempt to open connection to the same target - // As of now there is no requirement of client sockets to be a well-known port, so disabling port reuse all together. - t := libp2p.Transport(func(u transport.Upgrader) (*tcp.TcpTransport, error) { - return tcp.NewTCPTransport(u, nil, tcp.DisableReuseport()) - }) - - // gather all the options for the libp2p node - options := []config.Option{ - libp2p.ListenAddrs(sourceMultiAddr), // set the listen address - libp2p.Identity(libp2pKey), // pass in the networking key - t, // set the transport - } - - return options, nil -} - -// DefaultCreateNodeFunc returns new libP2P node. -func DefaultCreateNodeFunc(logger zerolog.Logger, - host host.Host, - pCache p2p.ProtocolPeerCache, - peerManager p2p.PeerManager) p2p.LibP2PNode { - return p2pnode.NewNode(logger, host, pCache, peerManager) -} - -// DefaultNodeBuilder returns a node builder. -func DefaultNodeBuilder(log zerolog.Logger, - address string, - flowKey fcrypto.PrivateKey, - sporkId flow.Identifier, - idProvider module.IdentityProvider, - metricsCfg *p2pconfig.MetricsConfig, - resolver madns.BasicResolver, - role string, - connGaterCfg *p2pconfig.ConnectionGaterConfig, - peerManagerCfg *p2pconfig.PeerManagerConfig, - gossipCfg *GossipSubConfig, - rCfg *ResourceManagerConfig, - uniCfg *p2pconfig.UnicastConfig) (p2p.NodeBuilder, error) { - - connManager, err := connection.NewConnManager(log, metricsCfg.Metrics, connection.DefaultConnManagerConfig()) - if err != nil { - return nil, fmt.Errorf("could not create connection manager: %w", err) - } - - // set the default connection gater peer filters for both InterceptPeerDial and InterceptSecured callbacks - peerFilter := notEjectedPeerFilter(idProvider) - peerFilters := []p2p.PeerFilter{peerFilter} - - connGater := connection.NewConnGater(log, - idProvider, - connection.WithOnInterceptPeerDialFilters(append(peerFilters, connGaterCfg.InterceptPeerDialFilters...)), - connection.WithOnInterceptSecuredFilters(append(peerFilters, connGaterCfg.InterceptSecuredFilters...))) - - rpcInspectorSuite, err := inspector.NewGossipSubInspectorBuilder(log, sporkId, gossipCfg.RpcInspector). - SetPublicNetwork(p2p.PrivateNetwork). - SetMetrics(metricsCfg). - Build() - if err != nil { - return nil, fmt.Errorf("failed to create gossipsub rpc inspectors for default libp2p node: %w", err) - } - - builder := NewNodeBuilder(log, metricsCfg.Metrics, address, flowKey, sporkId, rCfg). - SetBasicResolver(resolver). - SetConnectionManager(connManager). - SetConnectionGater(connGater). - SetRoutingSystem(func(ctx context.Context, host host.Host) (routing.Routing, error) { - return dht.NewDHT(ctx, host, protocols.FlowDHTProtocolID(sporkId), log, metricsCfg.Metrics, dht.AsServer()) - }). - SetPeerManagerOptions(peerManagerCfg.ConnectionPruning, peerManagerCfg.UpdateInterval). - SetStreamCreationRetryInterval(uniCfg.StreamRetryInterval). - SetCreateNode(DefaultCreateNodeFunc). - SetRateLimiterDistributor(uniCfg.RateLimiterDistributor). - SetGossipSubRpcInspectorSuite(rpcInspectorSuite) - - if gossipCfg.PeerScoring { - // currently, we only enable peer scoring with default parameters. So, we set the score parameters to nil. - builder.EnableGossipSubPeerScoring(idProvider, nil) - } - - meshTracer := tracer.NewGossipSubMeshTracer(log, metricsCfg.Metrics, idProvider, gossipCfg.LocalMeshLogInterval) - builder.SetGossipSubTracer(meshTracer) - builder.SetGossipSubScoreTracerInterval(gossipCfg.ScoreTracerInterval) - - if role != "ghost" { - r, _ := flow.ParseRole(role) - builder.SetSubscriptionFilter(subscription.NewRoleBasedFilter(r, idProvider)) - } - - return builder, nil -} diff --git a/network/p2p/p2pbuilder/libp2pscaler_test.go b/network/p2p/p2pbuilder/libp2pscaler_test.go deleted file mode 100644 index 789554866d0..00000000000 --- a/network/p2p/p2pbuilder/libp2pscaler_test.go +++ /dev/null @@ -1,104 +0,0 @@ -package p2pbuilder - -import ( - "testing" - - "github.com/pbnjay/memory" - "github.com/stretchr/testify/require" -) - -func TestAllowedMemoryScale(t *testing.T) { - m := memory.TotalMemory() - require.True(t, m > 0) - - // scaling with factor of 1 should return the total memory. - s, err := allowedMemory(1) - require.NoError(t, err) - require.Equal(t, int64(m), s) - - // scaling with factor of 0 should return an error. - _, err = allowedMemory(0) - require.Error(t, err) - - // scaling with factor of -1 should return an error. - _, err = allowedMemory(-1) - require.Error(t, err) - - // scaling with factor of 2 should return an error. - _, err = allowedMemory(2) - require.Error(t, err) - - // scaling with factor of 0.5 should return half the total memory. - s, err = allowedMemory(0.5) - require.NoError(t, err) - require.Equal(t, int64(m/2), s) - - // scaling with factor of 0.1 should return 10% of the total memory. - s, err = allowedMemory(0.1) - require.NoError(t, err) - require.Equal(t, int64(m/10), s) - - // scaling with factor of 0.01 should return 1% of the total memory. - s, err = allowedMemory(0.01) - require.NoError(t, err) - require.Equal(t, int64(m/100), s) - - // scaling with factor of 0.001 should return 0.1% of the total memory. - s, err = allowedMemory(0.001) - require.NoError(t, err) - require.Equal(t, int64(m/1000), s) - - // scaling with factor of 0.0001 should return 0.01% of the total memory. - s, err = allowedMemory(0.0001) - require.NoError(t, err) - require.Equal(t, int64(m/10000), s) -} - -func TestAllowedFileDescriptorsScale(t *testing.T) { - // getting actual file descriptor limit. - fd, err := getNumFDs() - require.NoError(t, err) - require.True(t, fd > 0) - - // scaling with factor of 1 should return the total file descriptors. - s, err := allowedFileDescriptors(1) - require.NoError(t, err) - require.Equal(t, fd, s) - - // scaling with factor of 0 should return an error. - _, err = allowedFileDescriptors(0) - require.Error(t, err) - - // scaling with factor of -1 should return an error. - _, err = allowedFileDescriptors(-1) - require.Error(t, err) - - // scaling with factor of 2 should return an error. - _, err = allowedFileDescriptors(2) - require.Error(t, err) - - // scaling with factor of 0.5 should return half the total file descriptors. - s, err = allowedFileDescriptors(0.5) - require.NoError(t, err) - require.Equal(t, fd/2, s) - - // scaling with factor of 0.1 should return 10% of the total file descriptors. - s, err = allowedFileDescriptors(0.1) - require.NoError(t, err) - require.Equal(t, fd/10, s) - - // scaling with factor of 0.01 should return 1% of the total file descriptors. - s, err = allowedFileDescriptors(0.01) - require.NoError(t, err) - require.Equal(t, fd/100, s) - - // scaling with factor of 0.001 should return 0.1% of the total file descriptors. - s, err = allowedFileDescriptors(0.001) - require.NoError(t, err) - require.Equal(t, fd/1000, s) - - // scaling with factor of 0.0001 should return 0.01% of the total file descriptors. - s, err = allowedFileDescriptors(0.0001) - require.NoError(t, err) - require.Equal(t, fd/10000, s) -} diff --git a/network/p2p/p2pbuilder/utils.go b/network/p2p/p2pbuilder/utils.go deleted file mode 100644 index 29b4d143698..00000000000 --- a/network/p2p/p2pbuilder/utils.go +++ /dev/null @@ -1,122 +0,0 @@ -package p2pbuilder - -import ( - "fmt" - - "github.com/libp2p/go-libp2p/core/peer" - "github.com/libp2p/go-libp2p/core/protocol" - rcmgr "github.com/libp2p/go-libp2p/p2p/host/resource-manager" - "github.com/rs/zerolog" - - "github.com/onflow/flow-go/module" - "github.com/onflow/flow-go/network/p2p" -) - -const keyResourceManagerLimit = "libp2p_resource_manager_limit" - -// notEjectedPeerFilter returns a PeerFilter that will return an error if the peer is unknown or ejected. -func notEjectedPeerFilter(idProvider module.IdentityProvider) p2p.PeerFilter { - return func(p peer.ID) error { - if id, found := idProvider.ByPeerID(p); !found { - return fmt.Errorf("failed to get identity of unknown peer with peer id %s", p.String()) - } else if id.Ejected { - return fmt.Errorf("peer %s with node id %s is ejected", p.String(), id.NodeID.String()) - } - - return nil - } -} - -type limitConfigLogger struct { - logger zerolog.Logger -} - -// newLimitConfigLogger creates a new limitConfigLogger. -func newLimitConfigLogger(logger zerolog.Logger) *limitConfigLogger { - return &limitConfigLogger{logger: logger} -} - -// withBaseLimit appends the base limit to the logger with the given prefix. -func (l *limitConfigLogger) withBaseLimit(prefix string, baseLimit rcmgr.BaseLimit) zerolog.Logger { - return l.logger.With(). - Str("key", keyResourceManagerLimit). - Int(fmt.Sprintf("%s_streams", prefix), baseLimit.Streams). - Int(fmt.Sprintf("%s_streams_inbound", prefix), baseLimit.StreamsInbound). - Int(fmt.Sprintf("%s_streams_outbound", prefix), baseLimit.StreamsOutbound). - Int(fmt.Sprintf("%s_conns", prefix), baseLimit.Conns). - Int(fmt.Sprintf("%s_conns_inbound", prefix), baseLimit.ConnsInbound). - Int(fmt.Sprintf("%s_conns_outbound", prefix), baseLimit.ConnsOutbound). - Int(fmt.Sprintf("%s_file_descriptors", prefix), baseLimit.FD). - Int64(fmt.Sprintf("%s_memory", prefix), baseLimit.Memory).Logger() -} - -func (l *limitConfigLogger) logResourceManagerLimits(config rcmgr.LimitConfig) { - l.logGlobalResourceLimits(config) - l.logServiceLimits(config.Service) - l.logProtocolLimits(config.Protocol) - l.logPeerLimits(config.Peer) - l.logPeerProtocolLimits(config.ProtocolPeer) -} - -func (l *limitConfigLogger) logGlobalResourceLimits(config rcmgr.LimitConfig) { - lg := l.withBaseLimit("system", config.System) - lg.Info().Msg("system limits set") - - lg = l.withBaseLimit("transient", config.Transient) - lg.Info().Msg("transient limits set") - - lg = l.withBaseLimit("allowed_listed_system", config.AllowlistedSystem) - lg.Info().Msg("allowed listed system limits set") - - lg = l.withBaseLimit("allowed_lister_transient", config.AllowlistedTransient) - lg.Info().Msg("allowed listed transient limits set") - - lg = l.withBaseLimit("service_default", config.ServiceDefault) - lg.Info().Msg("service default limits set") - - lg = l.withBaseLimit("service_peer_default", config.ServicePeerDefault) - lg.Info().Msg("service peer default limits set") - - lg = l.withBaseLimit("protocol_default", config.ProtocolDefault) - lg.Info().Msg("protocol default limits set") - - lg = l.withBaseLimit("protocol_peer_default", config.ProtocolPeerDefault) - lg.Info().Msg("protocol peer default limits set") - - lg = l.withBaseLimit("peer_default", config.PeerDefault) - lg.Info().Msg("peer default limits set") - - lg = l.withBaseLimit("connections", config.Conn) - lg.Info().Msg("connection limits set") - - lg = l.withBaseLimit("streams", config.Stream) - lg.Info().Msg("stream limits set") -} - -func (l *limitConfigLogger) logServiceLimits(s map[string]rcmgr.BaseLimit) { - for sName, sLimits := range s { - lg := l.withBaseLimit(fmt.Sprintf("service_%s", sName), sLimits) - lg.Info().Msg("service limits set") - } -} - -func (l *limitConfigLogger) logProtocolLimits(p map[protocol.ID]rcmgr.BaseLimit) { - for pName, pLimits := range p { - lg := l.withBaseLimit(fmt.Sprintf("protocol_%s", pName), pLimits) - lg.Info().Msg("protocol limits set") - } -} - -func (l *limitConfigLogger) logPeerLimits(p map[peer.ID]rcmgr.BaseLimit) { - for pId, pLimits := range p { - lg := l.withBaseLimit(fmt.Sprintf("peer_%s", pId.String()), pLimits) - lg.Info().Msg("peer limits set") - } -} - -func (l *limitConfigLogger) logPeerProtocolLimits(p map[protocol.ID]rcmgr.BaseLimit) { - for pName, pLimits := range p { - lg := l.withBaseLimit(fmt.Sprintf("protocol_peer_%s", pName), pLimits) - lg.Info().Msg("protocol peer limits set") - } -} diff --git a/network/p2p/p2pnode/gossipSubAdapter.go b/network/p2p/p2pnode/gossipSubAdapter.go deleted file mode 100644 index ab72db379f9..00000000000 --- a/network/p2p/p2pnode/gossipSubAdapter.go +++ /dev/null @@ -1,136 +0,0 @@ -package p2pnode - -import ( - "context" - "fmt" - - pubsub "github.com/libp2p/go-libp2p-pubsub" - "github.com/libp2p/go-libp2p/core/host" - "github.com/libp2p/go-libp2p/core/peer" - "github.com/rs/zerolog" - - "github.com/onflow/flow-go/module/component" - "github.com/onflow/flow-go/module/irrecoverable" - "github.com/onflow/flow-go/network/p2p" - "github.com/onflow/flow-go/utils/logging" -) - -// GossipSubAdapter is a wrapper around the libp2p GossipSub implementation -// that implements the PubSubAdapter interface for the Flow network. -type GossipSubAdapter struct { - component.Component - gossipSub *pubsub.PubSub - logger zerolog.Logger -} - -var _ p2p.PubSubAdapter = (*GossipSubAdapter)(nil) - -func NewGossipSubAdapter(ctx context.Context, logger zerolog.Logger, h host.Host, cfg p2p.PubSubAdapterConfig) (p2p.PubSubAdapter, error) { - gossipSubConfig, ok := cfg.(*GossipSubAdapterConfig) - if !ok { - return nil, fmt.Errorf("invalid gossipsub config type: %T", cfg) - } - - gossipSub, err := pubsub.NewGossipSub(ctx, h, gossipSubConfig.Build()...) - if err != nil { - return nil, err - } - - builder := component.NewComponentManagerBuilder() - - a := &GossipSubAdapter{ - gossipSub: gossipSub, - logger: logger, - } - - if scoreTracer := gossipSubConfig.ScoreTracer(); scoreTracer != nil { - builder.AddWorker(func(ctx irrecoverable.SignalerContext, ready component.ReadyFunc) { - ready() - a.logger.Debug().Str("component", "gossipsub_score_tracer").Msg("starting score tracer") - scoreTracer.Start(ctx) - a.logger.Debug().Str("component", "gossipsub_score_tracer").Msg("score tracer started") - - <-scoreTracer.Done() - a.logger.Debug().Str("component", "gossipsub_score_tracer").Msg("score tracer stopped") - }) - } - - if tracer := gossipSubConfig.PubSubTracer(); tracer != nil { - builder.AddWorker(func(ctx irrecoverable.SignalerContext, ready component.ReadyFunc) { - ready() - a.logger.Debug().Str("component", "gossipsub_tracer").Msg("starting tracer") - tracer.Start(ctx) - a.logger.Debug().Str("component", "gossipsub_tracer").Msg("tracer started") - - <-tracer.Done() - a.logger.Debug().Str("component", "gossipsub_tracer").Msg("tracer stopped") - }) - } - - if inspectorSuite := gossipSubConfig.InspectorSuiteComponent(); inspectorSuite != nil { - builder.AddWorker(func(ctx irrecoverable.SignalerContext, ready component.ReadyFunc) { - a.logger.Debug().Str("component", "gossipsub_inspector_suite").Msg("starting inspector suite") - inspectorSuite.Start(ctx) - a.logger.Debug().Str("component", "gossipsub_inspector_suite").Msg("inspector suite started") - - select { - case <-ctx.Done(): - a.logger.Debug().Str("component", "gossipsub_inspector_suite").Msg("inspector suite context done") - case <-inspectorSuite.Ready(): - ready() - a.logger.Debug().Str("component", "gossipsub_inspector_suite").Msg("inspector suite ready") - } - - <-inspectorSuite.Done() - a.logger.Debug().Str("component", "gossipsub_inspector_suite").Msg("inspector suite stopped") - }) - } - - a.Component = builder.Build() - - return a, nil -} - -func (g *GossipSubAdapter) RegisterTopicValidator(topic string, topicValidator p2p.TopicValidatorFunc) error { - // wrap the topic validator function into a libp2p topic validator function. - var v pubsub.ValidatorEx = func(ctx context.Context, from peer.ID, message *pubsub.Message) pubsub.ValidationResult { - switch result := topicValidator(ctx, from, message); result { - case p2p.ValidationAccept: - return pubsub.ValidationAccept - case p2p.ValidationIgnore: - return pubsub.ValidationIgnore - case p2p.ValidationReject: - return pubsub.ValidationReject - default: - // should never happen, indicates a bug in the topic validator - g.logger.Fatal().Msgf("invalid validation result: %v", result) - } - // should never happen, indicates a bug in the topic validator, but we need to return something - g.logger.Warn(). - Bool(logging.KeySuspicious, true). - Msg("invalid validation result, returning reject") - return pubsub.ValidationReject - } - - return g.gossipSub.RegisterTopicValidator(topic, v, pubsub.WithValidatorInline(true)) -} - -func (g *GossipSubAdapter) UnregisterTopicValidator(topic string) error { - return g.gossipSub.UnregisterTopicValidator(topic) -} - -func (g *GossipSubAdapter) Join(topic string) (p2p.Topic, error) { - t, err := g.gossipSub.Join(topic) - if err != nil { - return nil, fmt.Errorf("could not join topic %s: %w", topic, err) - } - return NewGossipSubTopic(t), nil -} - -func (g *GossipSubAdapter) GetTopics() []string { - return g.gossipSub.GetTopics() -} - -func (g *GossipSubAdapter) ListPeers(topic string) []peer.ID { - return g.gossipSub.ListPeers(topic) -} diff --git a/network/p2p/p2pnode/gossipSubAdapterConfig.go b/network/p2p/p2pnode/gossipSubAdapterConfig.go deleted file mode 100644 index c5fafd20dbe..00000000000 --- a/network/p2p/p2pnode/gossipSubAdapterConfig.go +++ /dev/null @@ -1,199 +0,0 @@ -package p2pnode - -import ( - pubsub "github.com/libp2p/go-libp2p-pubsub" - pb "github.com/libp2p/go-libp2p-pubsub/pb" - "github.com/libp2p/go-libp2p/core/peer" - "github.com/libp2p/go-libp2p/core/routing" - discoveryrouting "github.com/libp2p/go-libp2p/p2p/discovery/routing" - - "github.com/onflow/flow-go/module/component" - "github.com/onflow/flow-go/network/p2p" -) - -// GossipSubAdapterConfig is a wrapper around libp2p pubsub options that -// implements the PubSubAdapterConfig interface for the Flow network. -type GossipSubAdapterConfig struct { - options []pubsub.Option - scoreTracer p2p.PeerScoreTracer - pubsubTracer p2p.PubSubTracer - inspectorSuite p2p.GossipSubInspectorSuite // currently only used to manage the lifecycle. -} - -var _ p2p.PubSubAdapterConfig = (*GossipSubAdapterConfig)(nil) - -// NewGossipSubAdapterConfig creates a new GossipSubAdapterConfig with the default options. -// Args: -// - base: the base pubsub adapter config -// -// Returns: -// - a new GossipSubAdapterConfig -func NewGossipSubAdapterConfig(base *p2p.BasePubSubAdapterConfig) *GossipSubAdapterConfig { - return &GossipSubAdapterConfig{ - options: defaultPubsubOptions(base), - } -} - -// WithRoutingDiscovery adds a routing discovery option to the config. -// Args: -// - routing: the routing discovery to use -// -// Returns: -// -None -func (g *GossipSubAdapterConfig) WithRoutingDiscovery(routing routing.ContentRouting) { - g.options = append(g.options, pubsub.WithDiscovery(discoveryrouting.NewRoutingDiscovery(routing))) -} - -// WithSubscriptionFilter adds a subscription filter option to the config. -// Args: -// - filter: the subscription filter to use -// -// Returns: -// -None -func (g *GossipSubAdapterConfig) WithSubscriptionFilter(filter p2p.SubscriptionFilter) { - g.options = append(g.options, pubsub.WithSubscriptionFilter(filter)) -} - -// WithScoreOption adds a score option to the config. -// Args: -// - option: the score option to use -// Returns: -// -None -func (g *GossipSubAdapterConfig) WithScoreOption(option p2p.ScoreOptionBuilder) { - g.options = append(g.options, option.BuildFlowPubSubScoreOption()) -} - -// WithMessageIdFunction adds a message ID function option to the config. -// Args: -// - f: the message ID function to use -// Returns: -// -None -func (g *GossipSubAdapterConfig) WithMessageIdFunction(f func([]byte) string) { - g.options = append(g.options, pubsub.WithMessageIdFn(func(pmsg *pb.Message) string { - return f(pmsg.Data) - })) -} - -// WithInspectorSuite adds an inspector suite option to the config. -// Args: -// - suite: the inspector suite to use -// Returns: -// -None -func (g *GossipSubAdapterConfig) WithInspectorSuite(suite p2p.GossipSubInspectorSuite) { - g.options = append(g.options, pubsub.WithAppSpecificRpcInspector(suite.InspectFunc())) - g.inspectorSuite = suite -} - -// WithTracer adds a tracer option to the config. -// Args: -// - tracer: the tracer to use -// Returns: -// -None -func (g *GossipSubAdapterConfig) WithTracer(tracer p2p.PubSubTracer) { - g.pubsubTracer = tracer - g.options = append(g.options, pubsub.WithRawTracer(tracer)) -} - -// ScoreTracer returns the tracer for the peer score. -// Args: -// - None -// -// Returns: -// - p2p.PeerScoreTracer: the tracer for the peer score. -func (g *GossipSubAdapterConfig) ScoreTracer() p2p.PeerScoreTracer { - return g.scoreTracer -} - -// PubSubTracer returns the tracer for the pubsub. -// Args: -// - None -// Returns: -// - p2p.PubSubTracer: the tracer for the pubsub. -func (g *GossipSubAdapterConfig) PubSubTracer() p2p.PubSubTracer { - return g.pubsubTracer -} - -// InspectorSuiteComponent returns the component that manages the lifecycle of the inspector suite. -// This is used to start and stop the inspector suite by the PubSubAdapter. -// Args: -// - None -// -// Returns: -// - component.Component: the component that manages the lifecycle of the inspector suite. -func (g *GossipSubAdapterConfig) InspectorSuiteComponent() component.Component { - return g.inspectorSuite -} - -// WithScoreTracer sets the tracer for the peer score. -// Args: -// - tracer: the tracer for the peer score. -// -// Returns: -// - None -func (g *GossipSubAdapterConfig) WithScoreTracer(tracer p2p.PeerScoreTracer) { - g.scoreTracer = tracer - g.options = append(g.options, pubsub.WithPeerScoreInspect(func(snapshot map[peer.ID]*pubsub.PeerScoreSnapshot) { - tracer.UpdatePeerScoreSnapshots(convertPeerScoreSnapshots(snapshot)) - }, tracer.UpdateInterval())) -} - -// convertPeerScoreSnapshots converts a libp2p pubsub peer score snapshot to a Flow peer score snapshot. -// Args: -// - snapshot: the libp2p pubsub peer score snapshot. -// -// Returns: -// - map[peer.ID]*p2p.PeerScoreSnapshot: the Flow peer score snapshot. -func convertPeerScoreSnapshots(snapshot map[peer.ID]*pubsub.PeerScoreSnapshot) map[peer.ID]*p2p.PeerScoreSnapshot { - newSnapshot := make(map[peer.ID]*p2p.PeerScoreSnapshot) - for id, snap := range snapshot { - newSnapshot[id] = &p2p.PeerScoreSnapshot{ - Topics: convertTopicScoreSnapshot(snap.Topics), - Score: snap.Score, - AppSpecificScore: snap.AppSpecificScore, - BehaviourPenalty: snap.BehaviourPenalty, - IPColocationFactor: snap.IPColocationFactor, - } - } - return newSnapshot -} - -// convertTopicScoreSnapshot converts a libp2p pubsub topic score snapshot to a Flow topic score snapshot. -// Args: -// - snapshot: the libp2p pubsub topic score snapshot. -// -// Returns: -// - map[string]*p2p.TopicScoreSnapshot: the Flow topic score snapshot. -func convertTopicScoreSnapshot(snapshot map[string]*pubsub.TopicScoreSnapshot) map[string]*p2p.TopicScoreSnapshot { - newSnapshot := make(map[string]*p2p.TopicScoreSnapshot) - for topic, snap := range snapshot { - newSnapshot[topic] = &p2p.TopicScoreSnapshot{ - TimeInMesh: snap.TimeInMesh, - FirstMessageDeliveries: snap.FirstMessageDeliveries, - MeshMessageDeliveries: snap.MeshMessageDeliveries, - InvalidMessageDeliveries: snap.InvalidMessageDeliveries, - } - } - - return newSnapshot -} - -// Build returns the libp2p pubsub options. -// Args: -// - None -// -// Returns: -// - []pubsub.Option: the libp2p pubsub options. -// -// Build is idempotent. -func (g *GossipSubAdapterConfig) Build() []pubsub.Option { - return g.options -} - -// defaultPubsubOptions returns the default libp2p pubsub options. These options are used by the Flow network to create a libp2p pubsub. -func defaultPubsubOptions(base *p2p.BasePubSubAdapterConfig) []pubsub.Option { - return []pubsub.Option{ - pubsub.WithMessageSigning(true), - pubsub.WithStrictSignatureVerification(true), - pubsub.WithMaxMessageSize(base.MaxMessageSize), - } -} diff --git a/network/p2p/p2pnode/gossipsubMetrics.go b/network/p2p/p2pnode/gossipsubMetrics.go deleted file mode 100644 index 37cf96f6a82..00000000000 --- a/network/p2p/p2pnode/gossipsubMetrics.go +++ /dev/null @@ -1,57 +0,0 @@ -package p2pnode - -import ( - pubsub "github.com/libp2p/go-libp2p-pubsub" - "github.com/libp2p/go-libp2p/core/peer" - "github.com/rs/zerolog" - - "github.com/onflow/flow-go/module" - "github.com/onflow/flow-go/network/p2p" -) - -// GossipSubControlMessageMetrics is a metrics and observability wrapper component for the incoming RPCs to a -// GossipSub router. It records metrics on the number of control messages received in each RPC. -type GossipSubControlMessageMetrics struct { - metrics module.GossipSubRouterMetrics - logger zerolog.Logger -} - -var _ p2p.GossipSubControlMetricsObserver = (*GossipSubControlMessageMetrics)(nil) - -func NewGossipSubControlMessageMetrics(metrics module.GossipSubRouterMetrics, logger zerolog.Logger) *GossipSubControlMessageMetrics { - return &GossipSubControlMessageMetrics{ - logger: logger.With().Str("module", "gossipsub-control-message-metrics").Logger(), - metrics: metrics, - } -} - -// ObserveRPC is invoked to record metrics on incoming RPC messages. -func (o *GossipSubControlMessageMetrics) ObserveRPC(from peer.ID, rpc *pubsub.RPC) { - lg := o.logger.With().Str("peer_id", from.String()).Logger() - includedMessages := len(rpc.GetPublish()) - - ctl := rpc.GetControl() - if ctl == nil && includedMessages == 0 { - lg.Trace().Msg("received rpc with no control message and no publish messages") - return - } - - iHaveCount := len(ctl.GetIhave()) - iWantCount := len(ctl.GetIwant()) - graftCount := len(ctl.GetGraft()) - pruneCount := len(ctl.GetPrune()) - - lg.Trace(). - Int("iHaveCount", iHaveCount). - Int("iWantCount", iWantCount). - Int("graftCount", graftCount). - Int("pruneCount", pruneCount). - Int("included_message_count", includedMessages). - Msg("received rpc with control messages") - - o.metrics.OnIHaveReceived(iHaveCount) - o.metrics.OnIWantReceived(iWantCount) - o.metrics.OnGraftReceived(graftCount) - o.metrics.OnPruneReceived(pruneCount) - o.metrics.OnPublishedGossipMessagesReceived(includedMessages) -} diff --git a/network/p2p/p2pnode/libp2pNode.go b/network/p2p/p2pnode/libp2pNode.go deleted file mode 100644 index 977a5b393d3..00000000000 --- a/network/p2p/p2pnode/libp2pNode.go +++ /dev/null @@ -1,446 +0,0 @@ -// Package p2pnode encapsulates the libp2p library -package p2pnode - -import ( - "context" - "errors" - "fmt" - "sync" - "time" - - "github.com/hashicorp/go-multierror" - dht "github.com/libp2p/go-libp2p-kad-dht" - kbucket "github.com/libp2p/go-libp2p-kbucket" - "github.com/libp2p/go-libp2p/core/host" - libp2pnet "github.com/libp2p/go-libp2p/core/network" - "github.com/libp2p/go-libp2p/core/peer" - "github.com/libp2p/go-libp2p/core/protocol" - "github.com/libp2p/go-libp2p/core/routing" - "github.com/rs/zerolog" - - "github.com/onflow/flow-go/module/component" - "github.com/onflow/flow-go/module/irrecoverable" - flownet "github.com/onflow/flow-go/network" - "github.com/onflow/flow-go/network/channels" - "github.com/onflow/flow-go/network/internal/p2putils" - "github.com/onflow/flow-go/network/p2p" - "github.com/onflow/flow-go/network/p2p/unicast/protocols" - "github.com/onflow/flow-go/utils/logging" -) - -const ( - _ = iota - _ = 1 << (10 * iota) - mb -) - -const ( - // MaxConnectAttempt is the maximum number of attempts to be made to connect to a remote node for 1-1 direct communication - MaxConnectAttempt = 3 - - // DefaultMaxPubSubMsgSize defines the maximum message size in publish and multicast modes - DefaultMaxPubSubMsgSize = 5 * mb // 5 mb - - // timeout for FindPeer queries to the routing system - // TODO: is this a sensible value? - findPeerQueryTimeout = 10 * time.Second -) - -// Node is a wrapper around the LibP2P host. -type Node struct { - component.Component - sync.RWMutex - uniMgr p2p.UnicastManager - host host.Host // reference to the libp2p host (https://godoc.org/github.com/libp2p/go-libp2p/core/host) - pubSub p2p.PubSubAdapter - logger zerolog.Logger // used to provide logging - topics map[channels.Topic]p2p.Topic // map of a topic string to an actual topic instance - subs map[channels.Topic]p2p.Subscription // map of a topic string to an actual subscription - routing routing.Routing - pCache p2p.ProtocolPeerCache - peerManager p2p.PeerManager - peerScoreExposer p2p.PeerScoreExposer -} - -// NewNode creates a new libp2p node and sets its parameters. -func NewNode( - logger zerolog.Logger, - host host.Host, - pCache p2p.ProtocolPeerCache, - peerManager p2p.PeerManager, -) *Node { - return &Node{ - host: host, - logger: logger.With().Str("component", "libp2p-node").Logger(), - topics: make(map[channels.Topic]p2p.Topic), - subs: make(map[channels.Topic]p2p.Subscription), - pCache: pCache, - peerManager: peerManager, - } -} - -var _ component.Component = (*Node)(nil) - -func (n *Node) Start(ctx irrecoverable.SignalerContext) { - n.Component.Start(ctx) -} - -// Stop terminates the libp2p node. -// All errors returned from this function can be considered benign. -func (n *Node) Stop() error { - var result error - - n.logger.Debug().Msg("unsubscribing from all topics") - for t := range n.topics { - err := n.UnSubscribe(t) - // context cancelled errors are expected while unsubscribing from topics during shutdown - if err != nil && !errors.Is(err, context.Canceled) { - result = multierror.Append(result, err) - } - } - - n.logger.Debug().Msg("stopping libp2p node") - if err := n.host.Close(); err != nil { - result = multierror.Append(result, err) - } - - n.logger.Debug().Msg("closing peer store") - // to prevent peerstore routine leak (https://github.com/libp2p/go-libp2p/issues/718) - if err := n.host.Peerstore().Close(); err != nil { - n.logger.Debug().Err(err).Msg("closing peer store") - result = multierror.Append(result, err) - } - - if result != nil { - return result - } - - addrs := len(n.host.Network().ListenAddresses()) - ticker := time.NewTicker(time.Millisecond * 2) - defer ticker.Stop() - timeout := time.After(time.Second) - for addrs > 0 { - // wait for all listen addresses to have been removed - select { - case <-timeout: - n.logger.Error().Int("port", addrs).Msg("listen addresses still open") - return nil - case <-ticker.C: - addrs = len(n.host.Network().ListenAddresses()) - } - } - - n.logger.Debug().Msg("libp2p node stopped successfully") - - return nil -} - -// AddPeer adds a peer to this node by adding it to this node's peerstore and connecting to it. -// All errors returned from this function can be considered benign. -func (n *Node) AddPeer(ctx context.Context, peerInfo peer.AddrInfo) error { - return n.host.Connect(ctx, peerInfo) -} - -// RemovePeer closes the connection with the peer. -// All errors returned from this function can be considered benign. -func (n *Node) RemovePeer(peerID peer.ID) error { - err := n.host.Network().ClosePeer(peerID) - if err != nil { - return fmt.Errorf("failed to remove peer %s: %w", peerID, err) - } - // logging with suspicious level as we only expect to disconnect from a peer if it is not part of the - // protocol state. - n.logger.Warn(). - Str("peer_id", peerID.String()). - Bool(logging.KeySuspicious, true). - Msg("disconnected from peer") - - return nil -} - -// GetPeersForProtocol returns slice peer IDs for the specified protocol ID. -func (n *Node) GetPeersForProtocol(pid protocol.ID) peer.IDSlice { - pMap := n.pCache.GetPeers(pid) - peers := make(peer.IDSlice, 0, len(pMap)) - for p := range pMap { - peers = append(peers, p) - } - return peers -} - -// CreateStream returns an existing stream connected to the peer if it exists, or creates a new stream with it. -// All errors returned from this function can be considered benign. -func (n *Node) CreateStream(ctx context.Context, peerID peer.ID) (libp2pnet.Stream, error) { - lg := n.logger.With().Str("peer_id", peerID.String()).Logger() - - // If we do not currently have any addresses for the given peer, stream creation will almost - // certainly fail. If this Node was configured with a routing system, we can try to use it to - // look up the address of the peer. - if len(n.host.Peerstore().Addrs(peerID)) == 0 && n.routing != nil { - lg.Info().Msg("address not found in peer store, searching for peer in routing system") - - var err error - func() { - timedCtx, cancel := context.WithTimeout(ctx, findPeerQueryTimeout) - defer cancel() - // try to find the peer using the routing system - _, err = n.routing.FindPeer(timedCtx, peerID) - }() - - if err != nil { - lg.Warn().Err(err).Msg("address not found in both peer store and routing system") - } else { - lg.Debug().Msg("address not found in peer store, but found in routing system search") - } - } - - stream, dialAddrs, err := n.uniMgr.CreateStream(ctx, peerID, MaxConnectAttempt) - if err != nil { - return nil, flownet.NewPeerUnreachableError(fmt.Errorf("could not create stream (peer_id: %s, dialing address(s): %v): %w", peerID, - dialAddrs, err)) - } - - lg.Info(). - Str("networking_protocol_id", string(stream.Protocol())). - Str("dial_address", fmt.Sprintf("%v", dialAddrs)). - Msg("stream successfully created to remote peer") - return stream, nil -} - -// GetIPPort returns the IP and Port the libp2p node is listening on. -// All errors returned from this function can be considered benign. -func (n *Node) GetIPPort() (string, string, error) { - return p2putils.IPPortFromMultiAddress(n.host.Network().ListenAddresses()...) -} - -// RoutingTable returns the node routing table -func (n *Node) RoutingTable() *kbucket.RoutingTable { - return n.routing.(*dht.IpfsDHT).RoutingTable() -} - -// ListPeers returns list of peer IDs for peers subscribed to the topic. -func (n *Node) ListPeers(topic string) []peer.ID { - return n.pubSub.ListPeers(topic) -} - -// Subscribe subscribes the node to the given topic and returns the subscription -// All errors returned from this function can be considered benign. -func (n *Node) Subscribe(topic channels.Topic, topicValidator p2p.TopicValidatorFunc) (p2p.Subscription, error) { - n.Lock() - defer n.Unlock() - - // Check if the topic has been already created and is in the cache - n.pubSub.GetTopics() - tp, found := n.topics[topic] - var err error - if !found { - if err := n.pubSub.RegisterTopicValidator(topic.String(), topicValidator); err != nil { - n.logger.Err(err).Str("topic", topic.String()).Msg("failed to register topic validator, aborting subscription") - return nil, fmt.Errorf("failed to register topic validator: %w", err) - } - - tp, err = n.pubSub.Join(topic.String()) - if err != nil { - if err := n.pubSub.UnregisterTopicValidator(topic.String()); err != nil { - n.logger.Err(err).Str("topic", topic.String()).Msg("failed to unregister topic validator") - } - - return nil, fmt.Errorf("could not join topic (%s): %w", topic, err) - } - - n.topics[topic] = tp - } - - // Create a new subscription - s, err := tp.Subscribe() - if err != nil { - return s, fmt.Errorf("could not subscribe to topic (%s): %w", topic, err) - } - - // Add the subscription to the cache - n.subs[topic] = s - - n.logger.Debug(). - Str("topic", topic.String()). - Msg("subscribed to topic") - return s, err -} - -// UnSubscribe cancels the subscriber and closes the topic. -// All errors returned from this function can be considered benign. -func (n *Node) UnSubscribe(topic channels.Topic) error { - n.Lock() - defer n.Unlock() - // Remove the Subscriber from the cache - if s, found := n.subs[topic]; found { - s.Cancel() - n.subs[topic] = nil - delete(n.subs, topic) - } - - tp, found := n.topics[topic] - if !found { - err := fmt.Errorf("could not find topic (%s)", topic) - return err - } - - if err := n.pubSub.UnregisterTopicValidator(topic.String()); err != nil { - n.logger.Err(err).Str("topic", topic.String()).Msg("failed to unregister topic validator") - } - - // attempt to close the topic - err := tp.Close() - if err != nil { - err = fmt.Errorf("could not close topic (%s): %w", topic, err) - return err - } - n.topics[topic] = nil - delete(n.topics, topic) - - n.logger.Debug(). - Str("topic", topic.String()). - Msg("unsubscribed from topic") - return err -} - -// Publish publishes the given payload on the topic. -// All errors returned from this function can be considered benign. -func (n *Node) Publish(ctx context.Context, topic channels.Topic, data []byte) error { - ps, found := n.topics[topic] - if !found { - return fmt.Errorf("could not find topic (%s)", topic) - } - err := ps.Publish(ctx, data) - if err != nil { - return fmt.Errorf("could not publish to topic (%s): %w", topic, err) - } - return nil -} - -// HasSubscription returns true if the node currently has an active subscription to the topic. -func (n *Node) HasSubscription(topic channels.Topic) bool { - n.RLock() - defer n.RUnlock() - _, ok := n.subs[topic] - return ok -} - -// Host returns pointer to host object of node. -func (n *Node) Host() host.Host { - return n.host -} - -// WithDefaultUnicastProtocol overrides the default handler of the unicast manager and registers all preferred protocols. -func (n *Node) WithDefaultUnicastProtocol(defaultHandler libp2pnet.StreamHandler, preferred []protocols.ProtocolName) error { - n.uniMgr.WithDefaultHandler(defaultHandler) - for _, p := range preferred { - err := n.uniMgr.Register(p) - if err != nil { - return fmt.Errorf("could not register unicast protocls: %w", err) - } - } - - return nil -} - -// WithPeersProvider sets the PeersProvider for the peer manager. -// If a peer manager factory is set, this method will set the peer manager's PeersProvider. -func (n *Node) WithPeersProvider(peersProvider p2p.PeersProvider) { - if n.peerManager != nil { - n.peerManager.SetPeersProvider(peersProvider) - } -} - -// PeerManagerComponent returns the component interface of the peer manager. -func (n *Node) PeerManagerComponent() component.Component { - return n.peerManager -} - -// RequestPeerUpdate requests an update to the peer connections of this node using the peer manager. -func (n *Node) RequestPeerUpdate() { - if n.peerManager != nil { - n.peerManager.RequestPeerUpdate() - } -} - -// IsConnected returns true if address is a direct peer of this node else false. -// Peers are considered not connected if the underlying libp2p host reports the -// peers as not connected and there are no connections in the connection list. -// error returns: -// - network.ErrIllegalConnectionState if the underlying libp2p host reports connectedness as NotConnected but the connections list -// to the peer is not empty. This would normally indicate a bug within libp2p. Although the network.ErrIllegalConnectionState a bug in libp2p there is a small chance that this error will be returned due -// to a race condition between the time we check Connectedness and ConnsToPeer. There is a chance that a connection could be established -// after we check Connectedness but right before we check ConnsToPeer. -func (n *Node) IsConnected(peerID peer.ID) (bool, error) { - isConnected := n.host.Network().Connectedness(peerID) - numOfConns := len(n.host.Network().ConnsToPeer(peerID)) - if isConnected == libp2pnet.NotConnected && numOfConns > 0 { - return true, flownet.NewConnectionStatusErr(peerID, numOfConns) - } - return isConnected == libp2pnet.Connected && numOfConns > 0, nil -} - -// SetRouting sets the node's routing implementation. -// SetRouting may be called at most once. -func (n *Node) SetRouting(r routing.Routing) { - if n.routing != nil { - n.logger.Fatal().Msg("routing already set") - } - - n.routing = r -} - -// Routing returns the node's routing implementation. -func (n *Node) Routing() routing.Routing { - return n.routing -} - -// SetPeerScoreExposer sets the node's peer score exposer implementation. -// SetPeerScoreExposer may be called at most once. It is an irrecoverable error to call this -// method if the node's peer score exposer has already been set. -func (n *Node) SetPeerScoreExposer(e p2p.PeerScoreExposer) { - if n.peerScoreExposer != nil { - n.logger.Fatal().Msg("peer score exposer already set") - } - - n.peerScoreExposer = e -} - -// PeerScoreExposer returns the node's peer score exposer implementation. -// If the node's peer score exposer has not been set, the second return value will be false. -func (n *Node) PeerScoreExposer() (p2p.PeerScoreExposer, bool) { - if n.peerScoreExposer == nil { - return nil, false - } - - return n.peerScoreExposer, true -} - -// SetPubSub sets the node's pubsub implementation. -// SetPubSub may be called at most once. -func (n *Node) SetPubSub(ps p2p.PubSubAdapter) { - if n.pubSub != nil { - n.logger.Fatal().Msg("pubSub already set") - } - - n.pubSub = ps -} - -// SetComponentManager sets the component manager for the node. -// SetComponentManager may be called at most once. -func (n *Node) SetComponentManager(cm *component.ComponentManager) { - if n.Component != nil { - n.logger.Fatal().Msg("component already set") - } - - n.Component = cm -} - -// SetUnicastManager sets the unicast manager for the node. -// SetUnicastManager may be called at most once. -func (n *Node) SetUnicastManager(uniMgr p2p.UnicastManager) { - if n.uniMgr != nil { - n.logger.Fatal().Msg("unicast manager already set") - } - n.uniMgr = uniMgr -} diff --git a/network/p2p/p2pnode/libp2pNode_test.go b/network/p2p/p2pnode/libp2pNode_test.go deleted file mode 100644 index 3d97096a22a..00000000000 --- a/network/p2p/p2pnode/libp2pNode_test.go +++ /dev/null @@ -1,486 +0,0 @@ -package p2pnode_test - -import ( - "context" - "fmt" - "os" - "strings" - "sync" - "testing" - "time" - - "github.com/libp2p/go-libp2p/core/network" - "github.com/libp2p/go-libp2p/core/peer" - "github.com/libp2p/go-libp2p/core/peerstore" - "github.com/rs/zerolog" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/mock" - "github.com/stretchr/testify/require" - "go.uber.org/atomic" - - "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/module/irrecoverable" - mockmodule "github.com/onflow/flow-go/module/mock" - "github.com/onflow/flow-go/network/channels" - "github.com/onflow/flow-go/network/internal/p2pfixtures" - "github.com/onflow/flow-go/network/internal/p2putils" - "github.com/onflow/flow-go/network/internal/testutils" - "github.com/onflow/flow-go/network/p2p" - "github.com/onflow/flow-go/network/p2p/p2pnode" - p2ptest "github.com/onflow/flow-go/network/p2p/test" - "github.com/onflow/flow-go/network/p2p/unicast/protocols" - "github.com/onflow/flow-go/network/p2p/utils" - validator "github.com/onflow/flow-go/network/validator/pubsub" - "github.com/onflow/flow-go/utils/unittest" -) - -// TestMultiAddress evaluates correct translations from -// dns and ip4 to libp2p multi-address -func TestMultiAddress(t *testing.T) { - key := p2pfixtures.NetworkingKeyFixtures(t) - - tt := []struct { - identity *flow.Identity - multiaddress string - }{ - { // ip4 test case - identity: unittest.IdentityFixture(unittest.WithNetworkingKey(key.PublicKey()), unittest.WithAddress("172.16.254.1:72")), - multiaddress: "/ip4/172.16.254.1/tcp/72", - }, - { // dns test case - identity: unittest.IdentityFixture(unittest.WithNetworkingKey(key.PublicKey()), unittest.WithAddress("consensus:2222")), - multiaddress: "/dns4/consensus/tcp/2222", - }, - { // dns test case - identity: unittest.IdentityFixture(unittest.WithNetworkingKey(key.PublicKey()), unittest.WithAddress("flow.com:3333")), - multiaddress: "/dns4/flow.com/tcp/3333", - }, - } - - for _, tc := range tt { - ip, port, _, err := p2putils.NetworkingInfo(*tc.identity) - require.NoError(t, err) - - actualAddress := utils.MultiAddressStr(ip, port) - assert.Equal(t, tc.multiaddress, actualAddress, "incorrect multi-address translation") - } - -} - -// TestSingleNodeLifeCycle evaluates correct lifecycle translation from start to stop the node -func TestSingleNodeLifeCycle(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - signalerCtx := irrecoverable.NewMockSignalerContext(t, ctx) - - node, _ := p2ptest.NodeFixture( - t, - unittest.IdentifierFixture(), - "test_single_node_life_cycle", - ) - - node.Start(signalerCtx) - unittest.RequireComponentsReadyBefore(t, 100*time.Millisecond, node) - - cancel() - unittest.RequireComponentsDoneBefore(t, 100*time.Millisecond, node) -} - -// TestGetPeerInfo evaluates the deterministic translation between the nodes address and -// their libp2p info. It generates an address, and checks whether repeated translations -// yields the same info or not. -func TestGetPeerInfo(t *testing.T) { - for i := 0; i < 10; i++ { - key := p2pfixtures.NetworkingKeyFixtures(t) - - // creates node-i identity - identity := unittest.IdentityFixture(unittest.WithNetworkingKey(key.PublicKey()), unittest.WithAddress("1.1.1.1:0")) - - // translates node-i address into info - info, err := utils.PeerAddressInfo(*identity) - require.NoError(t, err) - - // repeats the translation for node-i - for j := 0; j < 10; j++ { - rinfo, err := utils.PeerAddressInfo(*identity) - require.NoError(t, err) - assert.Equal(t, rinfo.String(), info.String(), "inconsistent id generated") - } - } -} - -// TestAddPeers checks if nodes can be added as peers to a given node -func TestAddPeers(t *testing.T) { - count := 3 - ctx, cancel := context.WithCancel(context.Background()) - signalerCtx := irrecoverable.NewMockSignalerContext(t, ctx) - - // create nodes - nodes, identities := p2ptest.NodesFixture(t, unittest.IdentifierFixture(), "test_add_peers", count) - p2ptest.StartNodes(t, signalerCtx, nodes, 100*time.Millisecond) - defer p2ptest.StopNodes(t, nodes, cancel, 100*time.Millisecond) - - // add the remaining nodes to the first node as its set of peers - for _, identity := range identities[1:] { - peerInfo, err := utils.PeerAddressInfo(*identity) - require.NoError(t, err) - require.NoError(t, nodes[0].AddPeer(ctx, peerInfo)) - } - - // Checks if both of the other nodes have been added as peers to the first node - assert.Len(t, nodes[0].Host().Network().Peers(), count-1) -} - -// TestRemovePeers checks if nodes can be removed as peers from a given node -func TestRemovePeers(t *testing.T) { - count := 3 - ctx, cancel := context.WithCancel(context.Background()) - signalerCtx := irrecoverable.NewMockSignalerContext(t, ctx) - - // create nodes - nodes, identities := p2ptest.NodesFixture(t, unittest.IdentifierFixture(), "test_remove_peers", count) - peerInfos, errs := utils.PeerInfosFromIDs(identities) - assert.Len(t, errs, 0) - - p2ptest.StartNodes(t, signalerCtx, nodes, 100*time.Millisecond) - defer p2ptest.StopNodes(t, nodes, cancel, 100*time.Millisecond) - - // add nodes two and three to the first node as its peers - for _, pInfo := range peerInfos[1:] { - require.NoError(t, nodes[0].AddPeer(ctx, pInfo)) - } - - // check if all other nodes have been added as peers to the first node - assert.Len(t, nodes[0].Host().Network().Peers(), count-1) - - // disconnect from each peer and assert that the connection no longer exists - for _, pInfo := range peerInfos[1:] { - require.NoError(t, nodes[0].RemovePeer(pInfo.ID)) - assert.Equal(t, network.NotConnected, nodes[0].Host().Network().Connectedness(pInfo.ID)) - } -} - -func TestConnGater(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - signalerCtx := irrecoverable.NewMockSignalerContext(t, ctx) - - sporkID := unittest.IdentifierFixture() - idProvider := mockmodule.NewIdentityProvider(t) - - node1Peers := unittest.NewProtectedMap[peer.ID, struct{}]() - node1, identity1 := p2ptest.NodeFixture( - t, - sporkID, - t.Name(), - p2ptest.WithConnectionGater(testutils.NewConnectionGater(idProvider, func(pid peer.ID) error { - if !node1Peers.Has(pid) { - return fmt.Errorf("peer id not found: %s", pid.String()) - } - return nil - }))) - idProvider.On("ByPeerID", node1.Host().ID()).Return(&identity1, true).Maybe() - - p2ptest.StartNode(t, signalerCtx, node1, 100*time.Millisecond) - defer p2ptest.StopNode(t, node1, cancel, 100*time.Millisecond) - - node1Info, err := utils.PeerAddressInfo(identity1) - assert.NoError(t, err) - - node2Peers := unittest.NewProtectedMap[peer.ID, struct{}]() - node2, identity2 := p2ptest.NodeFixture( - t, - sporkID, t.Name(), - p2ptest.WithConnectionGater(testutils.NewConnectionGater(idProvider, func(pid peer.ID) error { - if !node2Peers.Has(pid) { - return fmt.Errorf("id not found: %s", pid.String()) - } - return nil - }))) - idProvider.On("ByPeerID", node2.Host().ID()).Return(&identity2, - - true).Maybe() - - p2ptest.StartNode(t, signalerCtx, node2, 100*time.Millisecond) - defer p2ptest.StopNode(t, node2, cancel, 100*time.Millisecond) - - node2Info, err := utils.PeerAddressInfo(identity2) - assert.NoError(t, err) - - node1.Host().Peerstore().AddAddrs(node2Info.ID, node2Info.Addrs, peerstore.PermanentAddrTTL) - node2.Host().Peerstore().AddAddrs(node1Info.ID, node1Info.Addrs, peerstore.PermanentAddrTTL) - - _, err = node1.CreateStream(ctx, node2Info.ID) - assert.Error(t, err, "connection should not be possible") - - _, err = node2.CreateStream(ctx, node1Info.ID) - assert.Error(t, err, "connection should not be possible") - - node1Peers.Add(node2Info.ID, struct{}{}) - _, err = node1.CreateStream(ctx, node2Info.ID) - assert.Error(t, err, "connection should not be possible") - - node2Peers.Add(node1Info.ID, struct{}{}) - _, err = node1.CreateStream(ctx, node2Info.ID) - assert.NoError(t, err, "connection should not be blocked") -} - -// TestNode_HasSubscription checks that when a node subscribes to a topic HasSubscription should return true. -func TestNode_HasSubscription(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - signalerCtx := irrecoverable.NewMockSignalerContext(t, ctx) - - sporkID := unittest.IdentifierFixture() - node, _ := p2ptest.NodeFixture(t, sporkID, "test_has_subscription") - - p2ptest.StartNode(t, signalerCtx, node, 100*time.Millisecond) - defer p2ptest.StopNode(t, node, cancel, 100*time.Millisecond) - - logger := unittest.Logger() - - topicValidator := validator.TopicValidator(logger, func(id peer.ID) error { - return nil - }) - - // create test topic - topic := channels.TopicFromChannel(channels.TestNetworkChannel, unittest.IdentifierFixture()) - _, err := node.Subscribe(topic, topicValidator) - require.NoError(t, err) - - require.True(t, node.HasSubscription(topic)) - - // create topic with no subscription - topic = channels.TopicFromChannel(channels.ConsensusCommittee, unittest.IdentifierFixture()) - require.False(t, node.HasSubscription(topic)) -} - -// TestCreateStream_SinglePairwiseConnection ensures that despite the number of concurrent streams created from peer -> peer, only a single -// connection will ever be created between two peers on initial peer dialing and subsequent streams will reuse that connection. -func TestCreateStream_SinglePairwiseConnection(t *testing.T) { - sporkId := unittest.IdentifierFixture() - nodeCount := 3 - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - signalerCtx := irrecoverable.NewMockSignalerContext(t, ctx) - - nodes, ids := p2ptest.NodesFixture(t, - sporkId, - "test_create_stream_single_pairwise_connection", - nodeCount, - p2ptest.WithDefaultResourceManager()) - - p2ptest.StartNodes(t, signalerCtx, nodes, 100*time.Millisecond) - defer p2ptest.StopNodes(t, nodes, cancel, 100*time.Millisecond) - - ctxWithTimeout, cancel := context.WithTimeout(ctx, 3*time.Second) - defer cancel() - done := make(chan struct{}) - numOfStreamsPerNode := 100 // create large number of streams per node per connection to ensure the resource manager does not cause starvation of resources - expectedTotalNumOfStreams := 600 - - // create a number of streams concurrently between each node - streams := make(chan network.Stream, expectedTotalNumOfStreams) - - go createConcurrentStreams(t, ctxWithTimeout, nodes, ids, numOfStreamsPerNode, streams, done) - unittest.RequireCloseBefore(t, done, 5*time.Second, "could not create streams on time") - require.Len(t, streams, expectedTotalNumOfStreams, fmt.Sprintf("expected %d total number of streams created got %d", expectedTotalNumOfStreams, len(streams))) - - // ensure only a single connection exists between all nodes - ensureSinglePairwiseConnection(t, nodes) - close(streams) - for s := range streams { - _ = s.Close() - } -} - -// TestCreateStream_SinglePeerDial ensures that the unicast manager only attempts to dial a peer once, retries dialing a peer the expected max amount of times when an -// error is encountered and retries creating the stream the expected max amount of times when unicast.ErrDialInProgress is encountered. -func TestCreateStream_SinglePeerDial(t *testing.T) { - createStreamRetries := atomic.NewInt64(0) - dialPeerRetries := atomic.NewInt64(0) - hook := zerolog.HookFunc(func(e *zerolog.Event, level zerolog.Level, message string) { - if level == zerolog.WarnLevel { - switch { - case strings.Contains(message, "retrying create stream, dial to peer in progress"): - createStreamRetries.Inc() - case strings.Contains(message, "retrying peer dialing"): - dialPeerRetries.Inc() - } - } - }) - logger := zerolog.New(os.Stdout).Level(zerolog.InfoLevel).Hook(hook) - idProvider := mockmodule.NewIdentityProvider(t) - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - signalerCtx := irrecoverable.NewMockSignalerContext(t, ctx) - - sporkID := unittest.IdentifierFixture() - - // mock metrics we expected only a single call to CreateStream to initiate the dialing to the peer, which will result in 3 failed attempts - // the next call to CreateStream will encounter a DialInProgress error which will result in 3 failed attempts - m := mockmodule.NewNetworkMetrics(t) - m.On("OnPeerDialFailure", mock.Anything, 3).Once() - m.On("OnStreamCreationFailure", mock.Anything, mock.Anything).Twice().Run(func(args mock.Arguments) { - attempts := args.Get(1).(int) - // We expect OnCreateStream to be called twice: once in each separate call to CreateStream. The first call that initializes - // the peer dialing should not attempt to retry CreateStream because all peer dialing attempts will be made which will not - // return the DialInProgress err that kicks off the CreateStream retries so we expect attempts to be 1 in this case. In the - // second call to CreateStream we expect all 3 attempts to be made as we wait for the DialInProgress to complete, in this case - // we expect attempts to be 3. Thus we only expect this method to be called twice with either 1 or 3 attempts. - require.False(t, attempts != 1 && attempts != 3, fmt.Sprintf("expected either 1 or 3 attempts got %d", attempts)) - }) - - sender, id1 := p2ptest.NodeFixture( - t, - sporkID, - t.Name(), - p2ptest.WithConnectionGater(testutils.NewConnectionGater(idProvider, func(pid peer.ID) error { - // avoid connection gating outbound messages on sender - return nil - })), - // add very small delay so that when the sender attempts to create multiple streams - // the func fails fast before the first routine can finish the peer dialing retries - // this prevents us from making another call to dial peer - p2ptest.WithCreateStreamRetryDelay(10*time.Millisecond), - p2ptest.WithLogger(logger), - p2ptest.WithMetricsCollector(m)) - - receiver, id2 := p2ptest.NodeFixture( - t, - sporkID, - t.Name(), - p2ptest.WithConnectionGater(testutils.NewConnectionGater(idProvider, func(pid peer.ID) error { - // connection gate all incoming connections forcing the senders unicast manager to perform retries - return fmt.Errorf("gate keep") - })), - p2ptest.WithCreateStreamRetryDelay(10*time.Millisecond), - p2ptest.WithLogger(logger)) - - idProvider.On("ByPeerID", sender.Host().ID()).Return(&id1, true).Maybe() - idProvider.On("ByPeerID", receiver.Host().ID()).Return(&id2, true).Maybe() - - p2ptest.StartNodes(t, signalerCtx, []p2p.LibP2PNode{sender, receiver}, 100*time.Millisecond) - defer p2ptest.StopNodes(t, []p2p.LibP2PNode{sender, receiver}, cancel, 100*time.Millisecond) - - var wg sync.WaitGroup - wg.Add(2) - // attempt to create two concurrent streams - go func() { - defer wg.Done() - _, err := sender.CreateStream(ctx, receiver.Host().ID()) - require.Error(t, err) - }() - go func() { - defer wg.Done() - _, err := sender.CreateStream(ctx, receiver.Host().ID()) - require.Error(t, err) - }() - - unittest.RequireReturnsBefore(t, wg.Wait, 3*time.Second, "cannot create streams on time") - - // we expect a single routine to start attempting to dial thus the number of retries - // before failure should be at most p2pnode.MaxConnectAttempt - expectedNumOfDialRetries := int64(p2pnode.MaxConnectAttempt) - // we expect the second routine to retry creating a stream p2pnode.MaxConnectAttempt when dialing is in progress - expectedCreateStreamRetries := int64(p2pnode.MaxConnectAttempt) - require.Equal(t, expectedNumOfDialRetries, dialPeerRetries.Load(), fmt.Sprintf("expected %d dial peer retries got %d", expectedNumOfDialRetries, dialPeerRetries.Load())) - require.Equal(t, expectedCreateStreamRetries, createStreamRetries.Load(), fmt.Sprintf("expected %d dial peer retries got %d", expectedCreateStreamRetries, createStreamRetries.Load())) -} - -// TestCreateStream_InboundConnResourceLimit ensures that the setting the resource limit config for -// PeerDefaultLimits.ConnsInbound restricts the number of inbound connections created from a peer to the configured value. -// NOTE: If this test becomes flaky, it indicates a violation of the single inbound connection guarantee. -// In such cases the test should not be quarantined but requires immediate resolution. -func TestCreateStream_InboundConnResourceLimit(t *testing.T) { - idProvider := mockmodule.NewIdentityProvider(t) - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - signalerCtx := irrecoverable.NewMockSignalerContext(t, ctx) - - sporkID := unittest.IdentifierFixture() - - sender, id1 := p2ptest.NodeFixture( - t, - sporkID, - t.Name(), - p2ptest.WithDefaultResourceManager(), - p2ptest.WithCreateStreamRetryDelay(10*time.Millisecond)) - - receiver, id2 := p2ptest.NodeFixture( - t, - sporkID, - t.Name(), - p2ptest.WithDefaultResourceManager(), - p2ptest.WithCreateStreamRetryDelay(10*time.Millisecond)) - - idProvider.On("ByPeerID", sender.Host().ID()).Return(&id1, true).Maybe() - idProvider.On("ByPeerID", receiver.Host().ID()).Return(&id2, true).Maybe() - - p2ptest.StartNodes(t, signalerCtx, []p2p.LibP2PNode{sender, receiver}, 100*time.Millisecond) - defer p2ptest.StopNodes(t, []p2p.LibP2PNode{sender, receiver}, cancel, 100*time.Millisecond) - - p2ptest.LetNodesDiscoverEachOther(t, signalerCtx, []p2p.LibP2PNode{sender, receiver}, flow.IdentityList{&id1, &id2}) - - var allStreamsCreated sync.WaitGroup - // at this point both nodes have discovered each other and we can now create an - // arbitrary number of streams from sender -> receiver. This will force libp2p - // to create multiple streams concurrently and attempt to reuse the single pairwise - // connection. If more than one connection is established while creating the conccurent - // streams this indicates a bug in the libp2p PeerBaseLimitConnsInbound limit. - defaultProtocolID := protocols.FlowProtocolID(sporkID) - expectedNumOfStreams := int64(50) - for i := int64(0); i < expectedNumOfStreams; i++ { - allStreamsCreated.Add(1) - go func() { - defer allStreamsCreated.Done() - _, err := sender.Host().NewStream(ctx, receiver.Host().ID(), defaultProtocolID) - require.NoError(t, err) - }() - } - - unittest.RequireReturnsBefore(t, allStreamsCreated.Wait, 2*time.Second, "could not create streams on time") - require.Len(t, receiver.Host().Network().ConnsToPeer(sender.Host().ID()), 1) - actualNumOfStreams := p2putils.CountStream(sender.Host(), receiver.Host().ID(), defaultProtocolID, network.DirOutbound) - require.Equal(t, expectedNumOfStreams, int64(actualNumOfStreams), fmt.Sprintf("expected to create %d number of streams got %d", expectedNumOfStreams, actualNumOfStreams)) -} - -// createStreams will attempt to create n number of streams concurrently between each combination of node pairs. -func createConcurrentStreams(t *testing.T, ctx context.Context, nodes []p2p.LibP2PNode, ids flow.IdentityList, n int, streams chan network.Stream, done chan struct{}) { - defer close(done) - var wg sync.WaitGroup - for _, this := range nodes { - for i, other := range nodes { - if this == other { - continue - } - - pInfo, err := utils.PeerAddressInfo(*ids[i]) - require.NoError(t, err) - this.Host().Peerstore().AddAddrs(pInfo.ID, pInfo.Addrs, peerstore.AddressTTL) - - for j := 0; j < n; j++ { - wg.Add(1) - go func(sender p2p.LibP2PNode) { - defer wg.Done() - s, err := sender.CreateStream(ctx, pInfo.ID) - require.NoError(t, err) - streams <- s - }(this) - } - } - // brief sleep to prevent sender and receiver dialing each other at the same time if separate goroutines resulting - // in 2 connections 1 created by each node, this happens because we are calling CreateStream concurrently. - time.Sleep(500 * time.Millisecond) - } - wg.Wait() -} - -// ensureSinglePairwiseConnection ensure each node in the list has exactly one connection to every other node in the list. -func ensureSinglePairwiseConnection(t *testing.T, nodes []p2p.LibP2PNode) { - for _, this := range nodes { - for _, other := range nodes { - if this == other { - continue - } - require.Len(t, this.Host().Network().ConnsToPeer(other.Host().ID()), 1) - } - } -} diff --git a/network/p2p/p2pnode/libp2pStream_test.go b/network/p2p/p2pnode/libp2pStream_test.go deleted file mode 100644 index fb184d58ecc..00000000000 --- a/network/p2p/p2pnode/libp2pStream_test.go +++ /dev/null @@ -1,554 +0,0 @@ -package p2pnode_test - -import ( - "bufio" - "context" - "errors" - "fmt" - "io" - "regexp" - "sync" - "testing" - "time" - - "github.com/onflow/flow-go/network/p2p" - p2ptest "github.com/onflow/flow-go/network/p2p/test" - - "github.com/libp2p/go-libp2p/core" - "github.com/libp2p/go-libp2p/core/network" - "github.com/libp2p/go-libp2p/core/peerstore" - "github.com/libp2p/go-libp2p/p2p/net/swarm" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/module/irrecoverable" - "github.com/onflow/flow-go/network/internal/p2pfixtures" - "github.com/onflow/flow-go/network/internal/p2putils" - "github.com/onflow/flow-go/network/p2p/p2pnode" - "github.com/onflow/flow-go/network/p2p/unicast" - "github.com/onflow/flow-go/network/p2p/unicast/protocols" - "github.com/onflow/flow-go/network/p2p/utils" - "github.com/onflow/flow-go/utils/unittest" -) - -// TestStreamClosing tests 1-1 communication with streams closed using libp2p2 handler.FullClose -func TestStreamClosing(t *testing.T) { - count := 10 - ctx, cancel := context.WithCancel(context.Background()) - signalerCtx := irrecoverable.NewMockSignalerContext(t, ctx) - - var msgRegex = regexp.MustCompile("^hello[0-9]") - - handler, streamCloseWG := mockStreamHandlerForMessages(t, ctx, count, msgRegex) - - // Creates nodes - nodes, identities := p2ptest.NodesFixture(t, - unittest.IdentifierFixture(), - "test_stream_closing", - 2, - p2ptest.WithDefaultStreamHandler(handler)) - - p2ptest.StartNodes(t, signalerCtx, nodes, 100*time.Millisecond) - defer p2ptest.StopNodes(t, nodes, cancel, 100*time.Millisecond) - - nodeInfo1, err := utils.PeerAddressInfo(*identities[1]) - require.NoError(t, err) - - senderWG := sync.WaitGroup{} - senderWG.Add(count) - for i := 0; i < count; i++ { - go func(i int) { - // Create stream from node 1 to node 2 (reuse if one already exists) - nodes[0].Host().Peerstore().AddAddrs(nodeInfo1.ID, nodeInfo1.Addrs, peerstore.AddressTTL) - s, err := nodes[0].CreateStream(ctx, nodeInfo1.ID) - assert.NoError(t, err) - w := bufio.NewWriter(s) - - // Send message from node 1 to 2 - msg := fmt.Sprintf("hello%d\n", i) - _, err = w.WriteString(msg) - assert.NoError(t, err) - - // Flush the stream - assert.NoError(t, w.Flush()) - - // close the stream - err = s.Close() - require.NoError(t, err) - - senderWG.Done() - }(i) - } - - // wait for stream to be closed - unittest.RequireReturnsBefore(t, senderWG.Wait, 3*time.Second, "could not send messages on time") - unittest.RequireReturnsBefore(t, streamCloseWG.Wait, 3*time.Second, "could not close stream at receiver side") -} - -// mockStreamHandlerForMessages creates a stream handler that expects receiving `msgCount` unique messages that match the input regexp. -// The returned wait group will be unlocked when all messages are completely received and associated streams are closed. -func mockStreamHandlerForMessages(t *testing.T, ctx context.Context, msgCount int, msgRegexp *regexp.Regexp) (network.StreamHandler, *sync.WaitGroup) { - streamCloseWG := &sync.WaitGroup{} - streamCloseWG.Add(msgCount) - - h := func(s network.Stream) { - go func(s network.Stream) { - rw := bufio.NewReadWriter(bufio.NewReader(s), bufio.NewWriter(s)) - for { - str, err := rw.ReadString('\n') - if err != nil { - if errors.Is(err, io.EOF) { - err := s.Close() - require.NoError(t, err) - - streamCloseWG.Done() - return - } - require.Fail(t, fmt.Sprintf("received error %v", err)) - err = s.Reset() - require.NoError(t, err) - return - } - select { - case <-ctx.Done(): - return - default: - require.True(t, msgRegexp.MatchString(str), str) - } - } - }(s) - - } - return h, streamCloseWG -} - -// TestCreateStream_WithDefaultUnicast evaluates correctness of creating default (tcp) unicast streams between two libp2p nodes. -func TestCreateStream_WithDefaultUnicast(t *testing.T) { - sporkId := unittest.IdentifierFixture() - testCreateStream(t, - sporkId, - nil, // sends nil as preferred unicast so that nodes run on default plain tcp streams. - protocols.FlowProtocolID(sporkId)) -} - -// TestCreateStream_WithPreferredGzipUnicast evaluates correctness of creating gzip-compressed tcp unicast streams between two libp2p nodes. -func TestCreateStream_WithPreferredGzipUnicast(t *testing.T) { - sporkId := unittest.IdentifierFixture() - testCreateStream(t, - sporkId, - []protocols.ProtocolName{protocols.GzipCompressionUnicast}, - protocols.FlowGzipProtocolId(sporkId)) -} - -// testCreateStreams checks if a new streams of "preferred" type is created each time when CreateStream is called and an existing stream is not -// reused. The "preferred" stream type is the one with the largest index in `unicasts` list. -// To check that the streams are of "preferred" type, it evaluates the protocol id of established stream against the input `protocolID`. -func testCreateStream(t *testing.T, sporkId flow.Identifier, unicasts []protocols.ProtocolName, protocolID core.ProtocolID) { - count := 2 - ctx, cancel := context.WithCancel(context.Background()) - signalerCtx := irrecoverable.NewMockSignalerContext(t, ctx) - - nodes, identities := p2ptest.NodesFixture(t, - sporkId, - "test_create_stream", - count, - p2ptest.WithPreferredUnicasts(unicasts)) - - p2ptest.StartNodes(t, signalerCtx, nodes, 100*time.Millisecond) - defer p2ptest.StopNodes(t, nodes, cancel, 100*time.Millisecond) - - id2 := identities[1] - - // Assert that there is no outbound stream to the target yet - require.Equal(t, 0, p2putils.CountStream(nodes[0].Host(), nodes[1].Host().ID(), protocolID, network.DirOutbound)) - - // Now attempt to create another 100 outbound stream to the same destination by calling CreateStream - streamCount := 100 - var streams []network.Stream - for i := 0; i < streamCount; i++ { - pInfo, err := utils.PeerAddressInfo(*id2) - require.NoError(t, err) - nodes[0].Host().Peerstore().AddAddrs(pInfo.ID, pInfo.Addrs, peerstore.AddressTTL) - anotherStream, err := nodes[0].CreateStream(ctx, pInfo.ID) - // Assert that a stream was returned without error - require.NoError(t, err) - require.NotNil(t, anotherStream) - // assert that the stream count within libp2p incremented (a new stream was created) - require.Equal(t, i+1, p2putils.CountStream(nodes[0].Host(), nodes[1].Host().ID(), protocolID, network.DirOutbound)) - // assert that the same connection is reused - require.Len(t, nodes[0].Host().Network().Conns(), 1) - streams = append(streams, anotherStream) - } - - // reverse loop to close all the streams - for i := streamCount - 1; i >= 0; i-- { - s := streams[i] - wg := sync.WaitGroup{} - wg.Add(1) - go func() { - err := s.Close() - assert.NoError(t, err) - wg.Done() - }() - unittest.RequireReturnsBefore(t, wg.Wait, 1*time.Second, "could not close streams on time") - // assert that the stream count within libp2p decremented - require.Equal(t, i, p2putils.CountStream(nodes[0].Host(), nodes[1].Host().ID(), protocolID, network.DirOutbound)) - } -} - -// TestCreateStream_FallBack checks two libp2p nodes with conflicting supported unicast protocols fall back -// to default (tcp) unicast protocol during their negotiation. -// To do this, a node with preferred gzip-compressed tcp unicast tries creating stream to another node that only -// supports default plain tcp unicast. The test evaluates that the unicast stream established between two nodes -// are of type default plain tcp. -func TestCreateStream_FallBack(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - signalerCtx := irrecoverable.NewMockSignalerContext(t, ctx) - - // Creates two nodes: one with preferred gzip, and other one with default protocol - sporkId := unittest.IdentifierFixture() - thisNode, _ := p2ptest.NodeFixture(t, - sporkId, - "test_create_stream_fallback", - p2ptest.WithPreferredUnicasts([]protocols.ProtocolName{protocols.GzipCompressionUnicast})) - otherNode, otherId := p2ptest.NodeFixture(t, sporkId, "test_create_stream_fallback") - - nodes := []p2p.LibP2PNode{thisNode, otherNode} - p2ptest.StartNodes(t, signalerCtx, nodes, 100*time.Millisecond) - defer p2ptest.StopNodes(t, nodes, cancel, 100*time.Millisecond) - - // Assert that there is no outbound stream to the target yet (neither default nor preferred) - defaultProtocolId := protocols.FlowProtocolID(sporkId) - preferredProtocolId := protocols.FlowGzipProtocolId(sporkId) - require.Equal(t, 0, p2putils.CountStream(thisNode.Host(), otherNode.Host().ID(), defaultProtocolId, network.DirOutbound)) - require.Equal(t, 0, p2putils.CountStream(thisNode.Host(), otherNode.Host().ID(), preferredProtocolId, network.DirOutbound)) - - // Now attempt to create another 100 outbound stream to the same destination by calling CreateStream - streamCount := 100 - var streams []network.Stream - for i := 0; i < streamCount; i++ { - pInfo, err := utils.PeerAddressInfo(otherId) - require.NoError(t, err) - thisNode.Host().Peerstore().AddAddrs(pInfo.ID, pInfo.Addrs, peerstore.AddressTTL) - - // a new stream must be created - anotherStream, err := thisNode.CreateStream(ctx, pInfo.ID) - require.NoError(t, err) - require.NotNil(t, anotherStream) - - // number of default-protocol streams must be incremented, while preferred ones must be zero, since the other node - // only supports default ones. - require.Equal(t, i+1, p2putils.CountStream(thisNode.Host(), otherNode.Host().ID(), defaultProtocolId, network.DirOutbound)) - require.Equal(t, 0, p2putils.CountStream(thisNode.Host(), otherNode.Host().ID(), preferredProtocolId, network.DirOutbound)) - - // assert that the same connection is reused - require.Len(t, thisNode.Host().Network().Conns(), 1) - streams = append(streams, anotherStream) - } - - // reverse loop to close all the streams - for i := streamCount - 1; i >= 0; i-- { - s := streams[i] - wg := sync.WaitGroup{} - wg.Add(1) - go func() { - err := s.Close() - assert.NoError(t, err) - wg.Done() - }() - unittest.RequireReturnsBefore(t, wg.Wait, 1*time.Second, "could not close streams on time") - - // number of default-protocol streams must be decremented, while preferred ones must be zero, since the other node - // only supports default ones. - require.Equal(t, i, p2putils.CountStream(thisNode.Host(), otherNode.Host().ID(), defaultProtocolId, network.DirOutbound)) - require.Equal(t, 0, p2putils.CountStream(thisNode.Host(), otherNode.Host().ID(), preferredProtocolId, network.DirOutbound)) - } -} - -// TestCreateStreamIsConcurrencySafe tests that the CreateStream is concurrency safe -func TestCreateStreamIsConcurrencySafe(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - signalerCtx := irrecoverable.NewMockSignalerContext(t, ctx) - - // create two nodes - nodes, identities := p2ptest.NodesFixture(t, unittest.IdentifierFixture(), "test_create_stream_is_concurrency_safe", 2) - require.Len(t, identities, 2) - - p2ptest.StartNodes(t, signalerCtx, nodes, 100*time.Millisecond) - defer p2ptest.StopNodes(t, nodes, cancel, 100*time.Millisecond) - - nodeInfo1, err := utils.PeerAddressInfo(*identities[1]) - require.NoError(t, err) - - wg := sync.WaitGroup{} - - // create a gate which gates the call to CreateStream for all concurrent go routines - gate := make(chan struct{}) - - createStream := func() { - <-gate - nodes[0].Host().Peerstore().AddAddrs(nodeInfo1.ID, nodeInfo1.Addrs, peerstore.AddressTTL) - _, err := nodes[0].CreateStream(ctx, nodeInfo1.ID) - assert.NoError(t, err) // assert that stream was successfully created - wg.Done() - } - - // kick off 10 concurrent calls to CreateStream - for i := 0; i < 10; i++ { - wg.Add(1) - go createStream() - } - // open the gate by closing the channel - close(gate) - - // no call should block - unittest.AssertReturnsBefore(t, wg.Wait, 10*time.Second) -} - -// TestNoBackoffWhenCreatingStream checks that backoff is not enabled between attempts to connect to a remote peer -// for one-to-one direct communication. -func TestNoBackoffWhenCreatingStream(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - // setup per node contexts so they can be stopped independently - ctx1, cancel1 := context.WithCancel(ctx) - signalerCtx1 := irrecoverable.NewMockSignalerContext(t, ctx1) - - ctx2, cancel2 := context.WithCancel(ctx) - signalerCtx2 := irrecoverable.NewMockSignalerContext(t, ctx2) - - count := 2 - // Creates nodes - nodes, identities := p2ptest.NodesFixture(t, - unittest.IdentifierFixture(), - "test_no_backoff_when_create_stream", - count, - ) - node1 := nodes[0] - node2 := nodes[1] - - p2ptest.StartNode(t, signalerCtx1, node1, 100*time.Millisecond) - p2ptest.StartNode(t, signalerCtx2, node2, 100*time.Millisecond) - - // stop node 2 immediately - p2ptest.StopNode(t, node2, cancel2, 100*time.Millisecond) - defer p2ptest.StopNode(t, node1, cancel1, 100*time.Millisecond) - - id2 := identities[1] - pInfo, err := utils.PeerAddressInfo(*id2) - require.NoError(t, err) - nodes[0].Host().Peerstore().AddAddrs(pInfo.ID, pInfo.Addrs, peerstore.AddressTTL) - maxTimeToWait := p2pnode.MaxConnectAttempt * unicast.MaxRetryJitter * time.Millisecond - - // need to add some buffer time so that RequireReturnsBefore waits slightly longer than maxTimeToWait to avoid - // a race condition - someGraceTime := 100 * time.Millisecond - totalWaitTime := maxTimeToWait + someGraceTime - - //each CreateStream() call may try to connect up to MaxConnectAttempt (3) times. - - //there are 2 scenarios that we need to account for: - // - //1. machines where a timeout occurs on the first connection attempt - this can be due to local firewall rules or other processes running on the machine. - // In this case, we need to create a scenario where a backoff would have normally occured. This is why we initiate a second connection attempt. - // Libp2p remembers the peer we are trying to connect to between CreateStream() calls and would have initiated a backoff if backoff wasn't turned off. - // The second CreateStream() call will make a second connection attempt MaxConnectAttempt times and that should never result in a backoff error. - // - //2. machines where a timeout does NOT occur on the first connection attempt - this is on CI machines and some local dev machines without a firewall / too many other processes. - // In this case, there will be MaxConnectAttempt (3) connection attempts on the first CreateStream() call and MaxConnectAttempt (3) attempts on the second CreateStream() call. - - // make two separate stream creation attempt and assert that no connection back off happened - for i := 0; i < 2; i++ { - - // limit the maximum amount of time to wait for a connection to be established by using a context that times out - ctx, cancel := context.WithTimeout(ctx, maxTimeToWait) - - unittest.RequireReturnsBefore(t, func() { - _, err = node1.CreateStream(ctx, pInfo.ID) - }, totalWaitTime, fmt.Sprintf("create stream did not error within %s", totalWaitTime.String())) - require.Error(t, err) - require.NotContainsf(t, err.Error(), swarm.ErrDialBackoff.Error(), "swarm dialer unexpectedly did a back off for a one-to-one connection") - cancel() - } -} - -// TestUnicastOverStream_WithPlainStream checks two nodes can send and receive unicast messages on libp2p plain streams. -func TestUnicastOverStream_WithPlainStream(t *testing.T) { - testUnicastOverStream(t) -} - -// TestUnicastOverStream_WithGzipStreamCompression checks two nodes can send and receive unicast messages on gzip compressed streams -// when both nodes have gzip stream compression enabled. -func TestUnicastOverStream_WithGzipStreamCompression(t *testing.T) { - testUnicastOverStream(t, p2ptest.WithPreferredUnicasts([]protocols.ProtocolName{protocols.GzipCompressionUnicast})) -} - -// testUnicastOverStream sends a message from node 1 to node 2 and then from node 2 to node 1 over a unicast stream. -func testUnicastOverStream(t *testing.T, opts ...p2ptest.NodeFixtureParameterOption) { - ctx, cancel := context.WithCancel(context.Background()) - signalerCtx := irrecoverable.NewMockSignalerContext(t, ctx) - - // Creates nodes - sporkId := unittest.IdentifierFixture() - - streamHandler1, inbound1 := p2ptest.StreamHandlerFixture(t) - node1, id1 := p2ptest.NodeFixture( - t, - sporkId, - t.Name(), - append(opts, p2ptest.WithDefaultStreamHandler(streamHandler1))...) - - streamHandler2, inbound2 := p2ptest.StreamHandlerFixture(t) - node2, id2 := p2ptest.NodeFixture( - t, - sporkId, - t.Name(), - append(opts, p2ptest.WithDefaultStreamHandler(streamHandler2))...) - - nodes := []p2p.LibP2PNode{node1, node2} - ids := flow.IdentityList{&id1, &id2} - p2ptest.StartNodes(t, signalerCtx, nodes, 100*time.Millisecond) - defer p2ptest.StopNodes(t, nodes, cancel, 100*time.Millisecond) - - p2ptest.LetNodesDiscoverEachOther(t, ctx, nodes, ids) - - p2pfixtures.EnsureMessageExchangeOverUnicast( - t, - ctx, - nodes, - []chan string{inbound1, inbound2}, - p2pfixtures.LongStringMessageFactoryFixture(t)) -} - -// TestUnicastOverStream_Fallback checks two nodes with asymmetric sets of preferred unicast protocols can create streams and -// send and receive unicasts. Despite the asymmetry, the nodes must fall back to the libp2p plain stream during negotiation. -func TestUnicastOverStream_Fallback(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - signalerCtx := irrecoverable.NewMockSignalerContext(t, ctx) - - // Creates nodes - // node1: supports only plain unicast protocol - // node2: supports plain and gzip - sporkId := unittest.IdentifierFixture() - - streamHandler1, inbound1 := p2ptest.StreamHandlerFixture(t) - node1, id1 := p2ptest.NodeFixture( - t, - sporkId, - t.Name(), - p2ptest.WithDefaultStreamHandler(streamHandler1), - ) - - streamHandler2, inbound2 := p2ptest.StreamHandlerFixture(t) - node2, id2 := p2ptest.NodeFixture( - t, - sporkId, - t.Name(), - p2ptest.WithDefaultStreamHandler(streamHandler2), - p2ptest.WithPreferredUnicasts([]protocols.ProtocolName{protocols.GzipCompressionUnicast}), - ) - - nodes := []p2p.LibP2PNode{node1, node2} - ids := flow.IdentityList{&id1, &id2} - p2ptest.StartNodes(t, signalerCtx, nodes, 100*time.Millisecond) - defer p2ptest.StopNodes(t, nodes, cancel, 100*time.Millisecond) - - p2ptest.LetNodesDiscoverEachOther(t, ctx, nodes, ids) - p2pfixtures.EnsureMessageExchangeOverUnicast(t, ctx, nodes, []chan string{inbound1, inbound2}, p2pfixtures.LongStringMessageFactoryFixture(t)) -} - -// TestCreateStreamTimeoutWithUnresponsiveNode tests that the CreateStream call does not block longer than the -// timeout interval -func TestCreateStreamTimeoutWithUnresponsiveNode(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - signalerCtx := irrecoverable.NewMockSignalerContext(t, ctx) - - // creates a regular node - nodes, identities := p2ptest.NodesFixture(t, - unittest.IdentifierFixture(), - "test_create_stream_timeout_with_unresponsive_node", - 1, - ) - require.Len(t, identities, 1) - - p2ptest.StartNodes(t, signalerCtx, nodes, 100*time.Millisecond) - defer p2ptest.StopNodes(t, nodes, cancel, 100*time.Millisecond) - - // create a silent node which never replies - listener, silentNodeId := p2pfixtures.SilentNodeFixture(t) - defer func() { - require.NoError(t, listener.Close()) - }() - - silentNodeInfo, err := utils.PeerAddressInfo(silentNodeId) - require.NoError(t, err) - - timeout := 1 * time.Second - tctx, tcancel := context.WithTimeout(ctx, timeout) - defer tcancel() - - // attempt to create a stream from node 1 to node 2 and assert that it fails after timeout - grace := 100 * time.Millisecond - unittest.AssertReturnsBefore(t, - func() { - nodes[0].Host().Peerstore().AddAddrs(silentNodeInfo.ID, silentNodeInfo.Addrs, peerstore.AddressTTL) - _, err = nodes[0].CreateStream(tctx, silentNodeInfo.ID) - }, - timeout+grace) - assert.Error(t, err) -} - -// TestCreateStreamIsConcurrent tests that CreateStream calls can be made concurrently such that one blocked call -// does not block another concurrent call. -func TestCreateStreamIsConcurrent(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - signalerCtx := irrecoverable.NewMockSignalerContext(t, ctx) - - // create two regular node - goodNodes, goodNodeIds := p2ptest.NodesFixture(t, - unittest.IdentifierFixture(), - "test_create_stream_is_concurrent", - 2, - ) - require.Len(t, goodNodeIds, 2) - - p2ptest.StartNodes(t, signalerCtx, goodNodes, 100*time.Millisecond) - defer p2ptest.StopNodes(t, goodNodes, cancel, 100*time.Millisecond) - - goodNodeInfo1, err := utils.PeerAddressInfo(*goodNodeIds[1]) - require.NoError(t, err) - - // create a silent node which never replies - listener, silentNodeId := p2pfixtures.SilentNodeFixture(t) - defer func() { - require.NoError(t, listener.Close()) - }() - silentNodeInfo, err := utils.PeerAddressInfo(silentNodeId) - require.NoError(t, err) - - // creates a stream to unresponsive node and makes sure that the stream creation is blocked - blockedCallCh := unittest.RequireNeverReturnBefore(t, - func() { - goodNodes[0].Host().Peerstore().AddAddrs(silentNodeInfo.ID, silentNodeInfo.Addrs, peerstore.AddressTTL) - _, _ = goodNodes[0].CreateStream(ctx, silentNodeInfo.ID) // this call will block - }, - 1*time.Second, - "CreateStream attempt to the unresponsive peer did not block") - - // requires same peer can still connect to the other regular peer without being blocked - unittest.RequireReturnsBefore(t, - func() { - goodNodes[0].Host().Peerstore().AddAddrs(goodNodeInfo1.ID, goodNodeInfo1.Addrs, peerstore.AddressTTL) - _, err := goodNodes[0].CreateStream(ctx, goodNodeInfo1.ID) - require.NoError(t, err) - }, - 1*time.Second, "creating stream to a responsive node failed while concurrently blocked on unresponsive node") - - // requires the CreateStream call to the unresponsive node was blocked while we attempted the CreateStream to the - // good address - unittest.RequireNeverClosedWithin(t, blockedCallCh, 1*time.Millisecond, - "CreateStream attempt to the unresponsive peer did not block after connecting to good node") - -} diff --git a/network/p2p/p2pnode/protocolPeerCache.go b/network/p2p/p2pnode/protocolPeerCache.go deleted file mode 100644 index 41fc42ef4d8..00000000000 --- a/network/p2p/p2pnode/protocolPeerCache.go +++ /dev/null @@ -1,108 +0,0 @@ -package p2pnode - -import ( - "fmt" - "sync" - - "github.com/libp2p/go-libp2p/core/event" - "github.com/libp2p/go-libp2p/core/host" - libp2pnet "github.com/libp2p/go-libp2p/core/network" - "github.com/libp2p/go-libp2p/core/peer" - "github.com/libp2p/go-libp2p/core/protocol" - "github.com/rs/zerolog" -) - -// ProtocolPeerCache store a mapping from protocol ID to peers who support that protocol -type ProtocolPeerCache struct { - protocolPeers map[protocol.ID]map[peer.ID]struct{} - sync.RWMutex -} - -func NewProtocolPeerCache(logger zerolog.Logger, h host.Host) (*ProtocolPeerCache, error) { - sub, err := h.EventBus(). - Subscribe([]interface{}{new(event.EvtPeerIdentificationCompleted), new(event.EvtPeerProtocolsUpdated)}) - if err != nil { - return nil, fmt.Errorf("could not subscribe to peer protocol update events: %w", err) - } - p := &ProtocolPeerCache{protocolPeers: make(map[protocol.ID]map[peer.ID]struct{})} - h.Network().Notify(&libp2pnet.NotifyBundle{ - DisconnectedF: func(n libp2pnet.Network, c libp2pnet.Conn) { - peer := c.RemotePeer() - if len(n.ConnsToPeer(peer)) == 0 { - p.RemovePeer(peer) - } - }, - }) - go p.consumeSubscription(logger, h, sub) - - return p, nil -} - -func (p *ProtocolPeerCache) RemovePeer(peerID peer.ID) { - p.Lock() - defer p.Unlock() - for pid, peers := range p.protocolPeers { - delete(peers, peerID) - if len(peers) == 0 { - delete(p.protocolPeers, pid) - } - } -} - -func (p *ProtocolPeerCache) AddProtocols(peerID peer.ID, protocols []protocol.ID) { - p.Lock() - defer p.Unlock() - for _, pid := range protocols { - peers, ok := p.protocolPeers[pid] - if !ok { - peers = make(map[peer.ID]struct{}) - p.protocolPeers[pid] = peers - } - peers[peerID] = struct{}{} - } -} - -func (p *ProtocolPeerCache) RemoveProtocols(peerID peer.ID, protocols []protocol.ID) { - p.Lock() - defer p.Unlock() - for _, pid := range protocols { - peers := p.protocolPeers[pid] - delete(peers, peerID) - if len(peers) == 0 { - delete(p.protocolPeers, pid) - } - } -} - -func (p *ProtocolPeerCache) GetPeers(pid protocol.ID) map[peer.ID]struct{} { - p.RLock() - defer p.RUnlock() - - // it is not safe to return a reference to the map, so we make a copy - peersCopy := make(map[peer.ID]struct{}, len(p.protocolPeers[pid])) - for peerID := range p.protocolPeers[pid] { - peersCopy[peerID] = struct{}{} - } - return peersCopy -} - -func (p *ProtocolPeerCache) consumeSubscription(logger zerolog.Logger, h host.Host, sub event.Subscription) { - defer sub.Close() - logger.Debug().Msg("starting peer protocol event subscription loop") - for e := range sub.Out() { - logger.Debug().Interface("event", e).Msg("received new peer protocol event") - switch evt := e.(type) { - case event.EvtPeerIdentificationCompleted: - protocols, err := h.Peerstore().GetProtocols(evt.Peer) - if err != nil { - logger.Err(err).Str("peer", evt.Peer.String()).Msg("failed to get protocols for peer") - continue - } - p.AddProtocols(evt.Peer, protocol.ConvertFromStrings(protocols)) - case event.EvtPeerProtocolsUpdated: - p.AddProtocols(evt.Peer, evt.Added) - p.RemoveProtocols(evt.Peer, evt.Removed) - } - } - logger.Debug().Msg("exiting peer protocol event subscription loop") -} diff --git a/network/p2p/p2pnode/protocolPeerCache_test.go b/network/p2p/p2pnode/protocolPeerCache_test.go deleted file mode 100644 index cc15d6cfc87..00000000000 --- a/network/p2p/p2pnode/protocolPeerCache_test.go +++ /dev/null @@ -1,78 +0,0 @@ -package p2pnode_test - -import ( - "context" - "testing" - "time" - - "github.com/libp2p/go-libp2p/core/host" - "github.com/libp2p/go-libp2p/core/network" - "github.com/libp2p/go-libp2p/core/protocol" - "github.com/rs/zerolog" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - fcrypto "github.com/onflow/flow-go/crypto" - "github.com/onflow/flow-go/network/p2p/p2pbuilder" - "github.com/onflow/flow-go/network/p2p/p2pnode" - "github.com/onflow/flow-go/utils/unittest" -) - -func TestProtocolPeerCache(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - // create three hosts, and a pcache for the first - h1, err := p2pbuilder.DefaultLibP2PHost(unittest.DefaultAddress, unittest.KeyFixture(fcrypto.ECDSASecp256k1)) - require.NoError(t, err) - pcache, err := p2pnode.NewProtocolPeerCache(zerolog.Nop(), h1) - require.NoError(t, err) - h2, err := p2pbuilder.DefaultLibP2PHost(unittest.DefaultAddress, unittest.KeyFixture(fcrypto.ECDSASecp256k1)) - require.NoError(t, err) - h3, err := p2pbuilder.DefaultLibP2PHost(unittest.DefaultAddress, unittest.KeyFixture(fcrypto.ECDSASecp256k1)) - require.NoError(t, err) - - // register each host on a separate protocol - p1 := protocol.ID("p1") - p2 := protocol.ID("p2") - p3 := protocol.ID("p3") - noopHandler := func(s network.Stream) {} - h1.SetStreamHandler(p1, noopHandler) - h2.SetStreamHandler(p2, noopHandler) - h3.SetStreamHandler(p3, noopHandler) - - // connect the hosts to each other - require.NoError(t, h1.Connect(ctx, *host.InfoFromHost(h2))) - require.NoError(t, h1.Connect(ctx, *host.InfoFromHost(h3))) - require.NoError(t, h2.Connect(ctx, *host.InfoFromHost(h3))) - - // check that h1's pcache reflects the protocols supported by h2 and h3 - assert.Eventually(t, func() bool { - peers2 := pcache.GetPeers(p2) - peers3 := pcache.GetPeers(p3) - _, ok2 := peers2[h2.ID()] - _, ok3 := peers3[h3.ID()] - return len(peers2) == 1 && len(peers3) == 1 && ok2 && ok3 - }, 3*time.Second, 50*time.Millisecond) - - // remove h2's support for p2 - h2.RemoveStreamHandler(p2) - - // check that h1's pcache reflects the change - assert.Eventually(t, func() bool { - return len(pcache.GetPeers(p2)) == 0 - }, 3*time.Second, 50*time.Millisecond) - - // add support for p4 on h2 and h3 - p4 := protocol.ID("p4") - h2.SetStreamHandler(p4, noopHandler) - h3.SetStreamHandler(p4, noopHandler) - - // check that h1's pcache reflects the change - assert.Eventually(t, func() bool { - peers4 := pcache.GetPeers(p4) - _, ok2 := peers4[h2.ID()] - _, ok3 := peers4[h3.ID()] - return len(peers4) == 2 && ok2 && ok3 - }, 3*time.Second, 50*time.Millisecond) -} diff --git a/network/p2p/pubsub.go b/network/p2p/pubsub.go index 8634f90c36f..c62cd114216 100644 --- a/network/p2p/pubsub.go +++ b/network/p2p/pubsub.go @@ -10,19 +10,14 @@ import ( "github.com/libp2p/go-libp2p/core/peer" "github.com/libp2p/go-libp2p/core/routing" + "github.com/onflow/flow-go/engine/collection" "github.com/onflow/flow-go/module/component" + "github.com/onflow/flow-go/network/channels" ) type ValidationResult int const ( - // PublicNetwork indicates that the unstaked public-side of the Flow blockchain that nodes can join and leave at will - // with no staking requirement. - PublicNetwork = true - // PrivateNetwork indicates that the staked private-side of the Flow blockchain that nodes can only join and leave - // with a staking requirement. - PrivateNetwork = false - ValidationAccept ValidationResult = iota ValidationIgnore ValidationReject @@ -33,6 +28,12 @@ type TopicValidatorFunc func(context.Context, peer.ID, *pubsub.Message) Validati // PubSubAdapter is the abstraction of the underlying pubsub logic that is used by the Flow network. type PubSubAdapter interface { component.Component + // CollectionClusterChangesConsumer is the interface for consuming the events of changes in the collection cluster. + // This is used to notify the node of changes in the collection cluster. + // PubSubAdapter implements this interface and consumes the events to be notified of changes in the clustering channels. + // The clustering channels are used by the collection nodes of a cluster to communicate with each other. + // As the cluster (and hence their cluster channels) of collection nodes changes over time (per epoch) the node needs to be notified of these changes. + CollectionClusterChangesConsumer // RegisterTopicValidator registers a validator for topic. RegisterTopicValidator(topic string, topicValidator TopicValidatorFunc) error @@ -53,6 +54,23 @@ type PubSubAdapter interface { // For example, if current peer has subscribed to topics A and B, then ListPeers only return // subscribed peers for topics A and B, and querying for topic C will return an empty list. ListPeers(topic string) []peer.ID + + // GetLocalMeshPeers returns the list of peers in the local mesh for the given topic. + // Args: + // - topic: the topic. + // Returns: + // - []peer.ID: the list of peers in the local mesh for the given topic. + GetLocalMeshPeers(topic channels.Topic) []peer.ID + + // PeerScoreExposer returns the peer score exposer for the gossipsub adapter. The exposer is a read-only interface + // for querying peer scores and returns the local scoring table of the underlying gossipsub node. + // The exposer is only available if the gossipsub adapter was configured with a score tracer. + // If the gossipsub adapter was not configured with a score tracer, the exposer will be nil. + // Args: + // None. + // Returns: + // The peer score exposer for the gossipsub adapter. + PeerScoreExposer() PeerScoreExposer } // PubSubAdapterConfig abstracts the configuration for the underlying pubsub implementation. @@ -65,19 +83,20 @@ type PubSubAdapterConfig interface { // WithScoreTracer sets the tracer for the underlying pubsub score implementation. // This is used to expose the local scoring table of the GossipSub node to its higher level components. WithScoreTracer(tracer PeerScoreTracer) - WithInspectorSuite(GossipSubInspectorSuite) -} - -// GossipSubControlMetricsObserver funcs used to observe gossipsub related metrics. -type GossipSubControlMetricsObserver interface { - ObserveRPC(peer.ID, *pubsub.RPC) + WithRpcInspector(GossipSubRPCInspector) + WithPeerGater(topicDeliveryWeights map[string]float64, sourceDecay time.Duration) + WithValidateQueueSize(int) } -// GossipSubRPCInspector app specific RPC inspector used to inspect and validate incoming RPC messages before they are processed by libp2p. +// GossipSubRPCInspector abstracts the general behavior of an app specific RPC inspector specifically +// used to inspect and validate incoming. It is used to implement custom message validation logic. It is injected into +// the GossipSubRouter and run on every incoming RPC message before the message is processed by libp2p. If the message +// is invalid the RPC message will be dropped. // Implementations must: // - be concurrency safe // - be non-blocking type GossipSubRPCInspector interface { + collection.ClusterEvents component.Component // Name returns the name of the rpc inspector. @@ -106,8 +125,12 @@ type Topic interface { // ScoreOptionBuilder abstracts the configuration for the underlying pubsub score implementation. type ScoreOptionBuilder interface { + component.Component // BuildFlowPubSubScoreOption builds the pubsub score options as pubsub.Option for the Flow network. - BuildFlowPubSubScoreOption() pubsub.Option + BuildFlowPubSubScoreOption() (*pubsub.PeerScoreParams, *pubsub.PeerScoreThresholds) + // TopicScoreParams returns the topic score params for the given topic. + // If the topic score params for the given topic does not exist, it will return the default topic score params. + TopicScoreParams(*pubsub.Topic) *pubsub.TopicScoreParams } // Subscription is the abstraction of the underlying pubsub subscription that is used by the Flow network. @@ -145,6 +168,29 @@ type SubscriptionFilter interface { type PubSubTracer interface { component.Component pubsub.RawTracer + RpcControlTracking + // DuplicateMessageCount returns the current duplicate message count for the peer. + // Args: + // - peer.ID: the peer ID. + // Returns: + // - float64: duplicate message count. + DuplicateMessageCount(peer.ID) float64 + // GetLocalMeshPeers returns the list of peers in the mesh for the given topic. + // Args: + // - topic: the topic. + // Returns: + // - []peer.ID: the list of peers in the mesh for the given topic. + GetLocalMeshPeers(topic channels.Topic) []peer.ID +} + +// RpcControlTracking is the abstraction of the underlying libp2p control message tracker used to track message ids advertised by the iHave control messages. +// This collection of methods can ensure an iWant control message for a message-id corresponds to a broadcast iHave message id. Implementations +// must be non-blocking and concurrency safe. +type RpcControlTracking interface { + // LastHighestIHaveRPCSize returns the last highest size of iHaves sent in a rpc. + LastHighestIHaveRPCSize() int64 + // WasIHaveRPCSent checks if an iHave control message with the provided message ID was sent. + WasIHaveRPCSent(messageID string) bool } // PeerScoreSnapshot is a snapshot of the overall peer score at a given time. @@ -188,24 +234,26 @@ func (p PeerScoreSnapshot) IsWarning() bool { // Check overall score. switch { - case p.Score < 0: + case p.Score < -1: // If the overall score is negative, the peer is in warning state, it means that the peer is suspected to be // misbehaving at the GossipSub level. return true // Check app-specific score. - case p.AppSpecificScore < 0: + case p.AppSpecificScore < -1: // If the app specific score is negative, the peer is in warning state, it means that the peer behaves in a way // that is not allowed by the Flow protocol. return true // Check IP colocation factor. - case p.IPColocationFactor > 0: + case p.IPColocationFactor > 5: // If the IP colocation factor is positive, the peer is in warning state, it means that the peer is running on the - // same IP as another peer and is suspected to be a sybil node. + // same IP as another peer and is suspected to be a sybil node. For now, we set it to a high value to make sure + // that peers from the same operator are not marked as sybil nodes. + // TODO: this should be revisited once the collocation penalty is enabled. return true // Check behaviour penalty. - case p.BehaviourPenalty > 0: + case p.BehaviourPenalty > 20: // If the behaviour penalty is positive, the peer is in warning state, it means that the peer is suspected to be - // misbehaving at the GossipSub level, e.g. sending too many duplicate messages. + // misbehaving at the GossipSub level, e.g. sending too many duplicate messages. Setting it to 20 to reduce the noise; 20 is twice the threshold (defaultBehaviourPenaltyThreshold). return true // If none of the conditions are met, return false. default: diff --git a/network/p2p/scoring/README.md b/network/p2p/scoring/README.md index a965d324052..dda1cd7cb0b 100644 --- a/network/p2p/scoring/README.md +++ b/network/p2p/scoring/README.md @@ -73,6 +73,173 @@ scoreOption := NewScoreOption(config) 5. `AcceptPXThreshold`: The threshold above which a peer's score will result in accepting PX information with a prune from that peer. PX stands for "Peer Exchange" in the context of libp2p's gossipsub protocol. When a peer sends a PRUNE control message to another peer, it can include a list of other peers as PX information. The purpose of this is to help the pruned peer find new peers to replace the ones that have been pruned from its mesh. When a node receives a PRUNE message containing PX information, it can decide whether to connect to the suggested peers based on its own criteria. In this package, the `DefaultAcceptPXThreshold` is used to determine if the originating peer's penalty score is good enough to accept the PX information. If the originating peer's penalty score exceeds the threshold, the node will consider connecting to the suggested peers. 6. `OpportunisticGraftThreshold`: The threshold below which the median peer score in the mesh may result in selecting more peers with a higher score for opportunistic grafting. +### Flow Specific Scoring Parameters and Thresholds +# GossipSub Scoring Parameters Explained +1. `DefaultAppSpecificScoreWeight = 1`: This is the default weight for application-specific scoring. It basically tells us how important the application-specific score is in comparison to other scores. +2. `MaxAppSpecificPenalty = -100` and `MinAppSpecificPenalty = -1`: These values define the range for application-specific penalties. A peer can have a maximum penalty of -100 and a minimum penalty of -1. +3. `MaxAppSpecificReward = 100`: This is the maximum reward a peer can earn for good behavior. +4. `DefaultStakedIdentityReward = MaxAppSpecificReward`: This reward is given to peers that contribute positively to the network (i.e., no misbehavior). It’s to encourage them and prioritize them in neighbor selection. +5. `DefaultUnknownIdentityPenalty = MaxAppSpecificPenalty`: This penalty is given to a peer if it's not in the identity list. It's to discourage anonymity. +6. `DefaultInvalidSubscriptionPenalty = MaxAppSpecificPenalty`: This penalty is for peers that subscribe to topics they are not authorized to subscribe to. +7. `DefaultGossipThreshold = -99`: If a peer's penalty goes below this threshold, the peer is ignored for gossip. It means no gossip is sent to or received from that peer. +8. `DefaultPublishThreshold = -99`: If a peer's penalty goes below this threshold, self-published messages will not be sent to this peer. +9. `DefaultGraylistThreshold = -99`: If a peer's penalty goes below this threshold, it is graylisted. This means all incoming messages from this peer are ignored. +10. `DefaultAcceptPXThreshold = 99`: This is a threshold for accepting peers. If a peer sends information and its score is above this threshold, the information is accepted. +11. `DefaultOpportunisticGraftThreshold = MaxAppSpecificReward + 1`: This value is used to selectively connect to new peers if the median score of the current peers drops below this threshold. +12. `defaultScoreCacheSize = 1000`: Sets the default size of the cache used to store the application-specific penalty of peers. +13. `defaultDecayInterval = 1 * time.Minute`: Sets the default interval at which the score of a peer will be decayed. +14. `defaultDecayToZero = 0.01`: This is a threshold below which a decayed score is reset to zero. It prevents the score from decaying to a very small value. +15. `defaultTopicTimeInMeshQuantum` is a parameter in the GossipSub scoring system that represents a fixed time interval used to count the amount of time a peer stays in a topic mesh. It is set to 1 hour, meaning that for each hour a peer remains in a topic mesh, its time-in-mesh counter increases by 1, contributing to its availability score. This is to reward peers that stay in the mesh for longer durations and discourage those that frequently join and leave. +16. `defaultTopicInvalidMessageDeliveriesWeight` is set to -1.0 and is used to penalize peers that send invalid messages by applying it to the square of the number of such messages. A message is considered invalid if it is not properly signed. A peer will be disconnected if it sends around 14 invalid messages within a gossipsub heartbeat interval. +17. `defaultTopicInvalidMessageDeliveriesDecay` is a decay factor set to 0.99. It is used to reduce the number of invalid message deliveries counted against a peer by 1% at each heartbeat interval. This prevents the peer from being disconnected if it stops sending invalid messages. The heartbeat interval in the gossipsub scoring system is set to 1 minute by default. + +## GossipSub Message Delivery Scoring +This section provides an overview of the GossipSub message delivery scoring mechanism used in the Flow network. +It's designed to maintain an efficient, secure and stable peer-to-peer network by scoring each peer based on their message delivery performance. +The system ensures the reliability of message propagation by scoring peers, which discourages malicious behaviors and enhances overall network performance. + +### Comprehensive System Overview +The GossipSub message delivery scoring mechanism used in the Flow network is an integral component of its P2P communication model. +It is designed to monitor and incentivize appropriate network behaviors by attributing scores to peers based on their message delivery performance. +This scoring system is fundamental to ensure that messages are reliably propagated across the network, creating a robust P2P communication infrastructure. + +The scoring system is per topic, which means it tracks the efficiency of peers in delivering messages in each specific topic they are participating in. +These per-topic scores then contribute to an overall score for each peer, providing a comprehensive view of a peer's effectiveness within the network. +In GossipSub, a crucial aspect of a peer's responsibility is to relay messages effectively to other nodes in the network. +The role of the scoring mechanism is to objectively assess a peer's efficiency in delivering these messages. +It takes into account several factors to determine the effectiveness of the peers. + +1. **Message Delivery Rate** - A peer's ability to deliver messages quickly is a vital metric. Slow delivery could lead to network lags and inefficiency. +2. **Message Delivery Volume** - A peer's capacity to deliver a large number of messages accurately and consistently. +3. **Continuity of Performance** - The scoring mechanism tracks not only the rate and volume of the messages but also the consistency in a peer's performance over time. +4. **Prevention of Malicious Behaviors** - The scoring system also helps in mitigating potential network attacks such as spamming and message replay attacks. + +The system utilizes several parameters to maintain and adjust the scores of the peers: +- `defaultTopicMeshMessageDeliveriesDecay`(value: 0.5): This parameter dictates how rapidly a peer's message delivery count decays with time. With a value of 0.5, it indicates a 50% decay at each decay interval. This mechanism ensures that past performances do not disproportionately impact the current score of the peer. +- `defaultTopicMeshMessageDeliveriesCap` (value: 1000): This parameter sets an upper limit on the number of message deliveries that can contribute to the score of a peer in a topic. With a cap set at 1000, it prevents the score from being overly influenced by large volumes of message deliveries, providing a balanced assessment of peer performance. +- `defaultTopicMeshMessageDeliveryThreshold` (value: 0.1 * `defaultTopicMeshMessageDeliveriesCap`): This threshold serves to identify under-performing peers. If a peer's message delivery count is below this threshold in a topic, the peer's score is penalized. This encourages peers to maintain a minimum level of performance. +- `defaultTopicMeshMessageDeliveriesWeight` (value: -0.05 * `MaxAppSpecificReward` / (`defaultTopicMeshMessageDeliveryThreshold` ^ 2) = 5^-4): This weight is applied when penalizing under-performing peers. The penalty is proportional to the square of the difference between the actual message deliveries and the threshold, multiplied by this weight. +- `defaultMeshMessageDeliveriesWindow` (value: `defaultDecayInterval` = 1 minute): This parameter defines the time window within which a message delivery is counted towards the score. This window is set to the decay interval, preventing replay attacks and counting only unique message deliveries. +- `defaultMeshMessageDeliveriesActivation` (value: 2 * `defaultDecayInterval` = 2 minutes): This time interval is the grace period before the scoring system starts tracking a new peer's performance. It accounts for the time it takes for a new peer to fully integrate into the network. + +By continually updating and adjusting the scores of peers based on these parameters, the GossipSub message delivery scoring mechanism ensures a robust, efficient, and secure P2P network. + +### Examples + +#### Scenario 1: Peer A Delivers Messages Within Cap and Above Threshold +Let's assume a Peer A that consistently delivers 500 messages per decay interval. This is within the `defaultTopicMeshMessageDeliveriesCap` (1000) and above the `defaultTopicMeshMessageDeliveryThreshold` (100). +As Peer A's deliveries are above the threshold and within the cap, its score will not be penalized. Instead, it will be maintained, promoting healthy network participation. + +#### Scenario 2: Peer B Delivers Messages Below Threshold +Now, assume Peer B delivers 50 messages per decay interval, below the `defaultTopicMeshMessageDeliveryThreshold` (100). +In this case, the score of Peer B will be penalized because its delivery rate is below the threshold. The penalty is calculated as `-|w| * (actual - threshold)^2`, where `w` is the weight (`defaultTopicMeshMessageDeliveriesWeight`), `actual` is the actual messages delivered (50), and `threshold` is the delivery threshold (100). + +#### Scenario 3: Peer C Delivers Messages Exceeding the Cap +Consider Peer C, which delivers 1500 messages per decay interval, exceeding the `defaultTopicMeshMessageDeliveriesCap` (1000). +In this case, even though Peer C is highly active, its score will not increase further once it hits the cap (1000). This is to avoid overemphasis on high delivery counts, which could skew the scoring system. + +#### Scenario 4: Peer D Joins a Topic Mesh +When a new Peer D joins a topic mesh, it will be given a grace period of `defaultMeshMessageDeliveriesActivation` (2 decay intervals) before its message delivery performance is tracked. This grace period allows the peer to set up and begin receiving messages from the network. +Remember, the parameters and scenarios described here aim to maintain a stable, efficient, and secure peer-to-peer network by carefully tracking and scoring each peer's message delivery performance. + +#### Scenario 5: Message Delivery Decay +To better understand how the message delivery decay (`defaultTopicMeshMessageDeliveriesDecay`) works in the GossipSub protocol, let's examine a hypothetical scenario. +Let's say we have a peer named `Peer A` who is actively participating in `Topic X`. `Peer A` has successfully delivered 800 messages in `Topic X` over a given time period. +**Initial State**: At this point, `Peer A`'s message delivery count for `Topic X` is 800. Now, the decay interval elapses without `Peer A` delivering any new messages in `Topic X`. +**After One Decay Interval**: Given that our `defaultTopicMeshMessageDeliveriesDecay` value is 0.5, after one decay interval, `Peer A`'s message delivery count for `Topic X` will decay by 50%. Therefore, `Peer A`'s count is now: + + 800 (previous message count) * 0.5 (decay factor) = 400 + +**After Two Decay Intervals** +If `Peer A` still hasn't delivered any new messages in `Topic X` during the next decay interval, the decay is applied again, further reducing the message delivery count: + + 400 (current message count) * 0.5 (decay factor) = 200 +And this process will continue at every decay interval, halving `Peer A`'s message delivery count for `Topic X` until `Peer A` delivers new messages in `Topic X` or the count reaches zero. +This decay process ensures that a peer cannot rest on its past deliveries; it must continually contribute to the network to maintain its score. +It helps maintain a lively and dynamic network environment, incentivizing constant active participation from all peers. + +### Scenario 6: Replay Attack +The `defaultMeshMessageDeliveriesWindow` and `defaultMeshMessageDeliveriesActivation` parameters play a crucial role in preventing replay attacks in the GossipSub protocol. Let's illustrate this with an example. +Consider a scenario where we have three peers: `Peer A`, `Peer B`, and `Peer C`. All three peers are active participants in `Topic X`. +**Initial State**: At Time = 0: `Peer A` generates and broadcasts a new message `M` in `Topic X`. `Peer B` and `Peer C` receive this message from `Peer A` and update their message caches accordingly. +**After Few Seconds**: At Time = 30 seconds: `Peer B`, with malicious intent, tries to rebroadcast the same message `M` back into `Topic X`. +Given that our `defaultMeshMessageDeliveriesWindow` value is equal to the decay interval (let's assume 1 minute), `Peer C` would have seen the original message `M` from `Peer A` less than one minute ago. +This is within the `defaultMeshMessageDeliveriesWindow`. Because `Peer A` (the original sender) is different from `Peer B` (the current sender), this delivery will be counted towards `Peer B`'s message delivery score in `Topic X`. +**After One Minute**: At Time = 61 seconds: `Peer B` tries to rebroadcast the same message `M` again. +Now, more than a minute has passed since `Peer C` first saw the message `M` from `Peer A`. This is outside the `defaultMeshMessageDeliveriesWindow`. +Therefore, the message `M` from `Peer B` will not count towards `Peer B`'s message delivery score in `Topic X` and `Peer B` still needs to fill up its threshold of message delivery in order not to be penalized for under-performing. +This effectively discouraging replay attacks of messages older than the `defaultMeshMessageDeliveriesWindow`. +This mechanism, combined with other parameters, helps maintain the security and efficiency of the network by discouraging harmful behaviors such as message replay attacks. + +## Mitigating iHave Broken Promises Attacks in GossipSub Protocol +### What is an iHave Broken Promise Attack? +In the GossipSub protocol, peers gossip information about new messages to a subset of random peers (out of their local mesh) in the form of an "iHave" message which basically tells the receiving peer what messages the sender has. +The receiving peer then replies with an "iWant" message, requesting for the messages it doesn't have. Note that for the peers in local mesh the actual new messages are sent instead of an "iHave" message (i.e., eager push). However, +iHave-iWant protocol is part of a complementary mechanism to ensure that the information is disseminated to the entire network in a timely manner (i.e., lazy pull). + +An "iHave Broken Promise" attack occurs when a peer advertises many "iHave" for a message but doesn't respond to the "iWant" requests for those messages. +This not only hinders the effective dissemination of information but can also strain the network with redundant requests. Hence, we stratify it as a spam behavior mounting a DoS attack on the network. + +### Detecting iHave Broken Promise Attacks +Detecting iHave Broken Promise Attacks is done by the GossipSub itself. On each incoming RPC from a remote node, the local GossipSub node checks if the RPC contains an iHave message. It then samples one (and only one) iHave message +randomly out of the entire set of iHave messages piggybacked on the incoming RPC. If the sampled iHave message is not literally addressed with the actual message, the local GossipSub node considers this as an iHave broken promise and +increases the behavior penalty counter for that remote node. Hence, incrementing the behavior penalty counter for a remote peer is done per RPC containing at least one iHave broken promise and not per iHave message. +Note that the behavior penalty counter also keeps track of GRAFT flood attacks that are done by a remote peer when it advertises many GRAFTs while it is on a PRUNE backoff by the local node. Mitigating iHave broken promise attacks also +mitigates GRAFT flood attacks. + +### Configuring GossipSub Parameters +In order to mitigate the iHave broken promises attacks, GossipSub expects the application layer (i.e., Flow protocol) to properly configure the relevant scoring parameters, notably: + +- `BehaviourPenaltyThreshold` is set to `defaultBehaviourPenaltyThreshold`, i.e., `10`. +- `BehaviourPenaltyWeight` is set to `defaultBehaviourPenaltyWeight`, i.e., `0.01` * `MaxAppSpecificPenalty` +- `BehaviourPenaltyDecay` is set to `defaultBehaviourPenaltyDecay`, i.e., `0.99`. + +#### 1. `defaultBehaviourPenaltyThreshold` +This parameter sets the threshold for when the behavior of a peer is considered bad. Misbehavior is defined as advertising an iHave without responding to the iWants (iHave broken promises), and attempting on GRAFT when the peer is considered for a PRUNE backoff. +If a remote peer sends an RPC that advertises at least one iHave for a message but doesn't respond to the iWant requests for that message within the next `3 seconds`, the peer misbehavior counter is incremented by `1`. This threshold is set to `10`, meaning that we at most tolerate 10 such RPCs containing iHave broken promises. After this, the peer is penalized for every excess RPC containing iHave broken promises. The counter decays by (`0.99`) every decay interval (defaultDecayInterval) i.e., every minute. + +#### 2. `defaultBehaviourPenaltyWeight` +This is the weight applied as a penalty when a peer's misbehavior goes beyond the `defaultBehaviourPenaltyThreshold`. +The penalty is applied to the square of the difference between the misbehavior counter and the threshold, i.e., -|w| * (misbehavior counter - threshold)^2, where `|w|` is the absolute value of the `defaultBehaviourPenaltyWeight`. +Note that `defaultBehaviourPenaltyWeight` is a negative value, meaning that the penalty is applied in the opposite direction of the misbehavior counter. For sake of illustration, we use the notion of `-|w|` to denote that a negative penalty is applied. +We set `defaultBehaviourPenaltyWeight` to `0.01 * MaxAppSpecificPenalty`, meaning a peer misbehaving `10` times more than the threshold (i.e., `10 + 10`) will lose its entire `MaxAppSpecificReward`, which is a reward given to all staked nodes in Flow blockchain. +This also means that a peer misbehaving `sqrt(2) * 10` times more than the threshold will cause the peer score to be dropped below the `MaxAppSpecificPenalty`, which is also below the `GraylistThreshold`, and the peer will be graylisted (i.e., all incoming and outgoing GossipSub RPCs from and to that peer will be rejected). +This means the peer is temporarily disconnected from the network, preventing it from causing further harm. + +#### 3. defaultBehaviourPenaltyDecay +This is the decay interval for the misbehavior counter of a peer. This counter is decayed by the `defaultBehaviourPenaltyDecay` parameter (e.g., `0.99`) per decay interval, which is currently every 1 minute. +This parameter helps to gradually reduce the effect of past misbehaviors and provides a chance for penalized nodes to rejoin the network. A very slow decay rate can help identify and isolate persistent offenders, while also allowing potentially honest nodes that had transient issues to regain their standing in the network. +The duration a peer remains graylisted is governed by the choice of `defaultBehaviourPenaltyWeight` and the decay parameters. +Based on the given configuration, a peer which has misbehaved on `sqrt(2) * 10` RPCs more than the threshold will get graylisted (disconnected at GossipSub level). +With the decay interval set to 1 minute and decay value of 0.99, a graylisted peer due to broken promises would be expected to be reconnected in about 50 minutes. +This is calculated by solving for `x` in the equation `(0.99)^x * (sqrt(2) * 10)^2 * MaxAppSpecificPenalty > GraylistThreshold`. +Simplifying, we find `x` to be approximately `527` decay intervals, or roughly `527` minutes. +This is the estimated time it would take for a severely misbehaving peer to have its penalty decayed enough to exceed the `GraylistThreshold` and thus be reconnected to the network. + +### Example Scenarios +**Scenario 1: Misbehaving Below Threshold** +In this scenario, consider peer `B` that has recently joined the network and is taking part in GossipSub. +This peer advertises to peer `A` many `iHave` messages over an RPC. But when other peer `A` requests these message with `iWant`s it fails to deliver the message within 3 seconds. +This action constitutes an _iHave broken promise_ for a single RPC and peer `A` increases the local behavior penalty counter of peer `B` by 1. +If the peer `B` commits this misbehavior infrequently, such that the total number of these RPCs does not exceed the `defaultBehaviourPenaltyThreshold` (set to 10 in our configuration), +the misbehavior counter for this peer will increment by 1 for each RPC and decays by `1%` evey decay interval (1 minute), but no additional penalty will be applied. +The misbehavior counter decays by a factor of `defaultBehaviourPenaltyDecay` (0.99) every minute, allowing the peer to recover from these minor infractions without significant disruption. + +**Scenario 2: Misbehaving Above Threshold But Below Graylisting** +Now consider that peer `B` frequently sends RPCs advertising many `iHaves` to peer `A` but fails to deliver the promised messages. +If the number of these misbehaviors exceeds our threshold (10 in our configuration), the peer `B` is now penalized by the local GossipSub mechanism of peer `A`. +The amount of the penalty is determined by the `defaultBehaviourPenaltyWeight` (set to 0.01 * MaxAppSpecificPenalty) applied to the square of the difference between the misbehavior counter and the threshold. +This penalty will progressively affect the peer's score, deteriorating its reputation in the local GossipSub scoring system of node `A`, but does not yet result in disconnection or graylisting. +The peer has a chance to amend its behavior before crossing into graylisting territory through stop misbehaving and letting the score to decay. +When peer `B` has a deteriorated score at node `A`, it will be less likely to be selected by node `A` as its local mesh peer (i.e., to directly receive new messages from node `A`), and is deprived of the opportunity to receive new messages earlier through node `A`. + +**Scenario 3: Graylisting** +Now assume that peer `B` peer has been continually misbehaving, with RPCs including iHave broken promises exceeding `sqrt(2) * 10` the threshold. +At this point, the peer's score drops below the `GraylistThreshold` due to the `defaultBehaviorPenaltyWeight` applied to the excess misbehavior. +The peer is then graylisted by peer `A`, i.e., peer `A` rejects all incoming RPCs to and from peer `B` at GossipSub level. +In our configuration, peer `B` will stay disconnected for at least `527` decay intervals or approximately `527` minutes. +This gives a strong disincentive for the peer to continue this behavior and also gives it time to recover and eventually be reconnected to the network. + ## Customization The scoring mechanism can be easily customized to suit the needs of the Flow network. This includes changing the scoring parameters, thresholds, and the scoring function itself. You can customize the scoring parameters and thresholds by using the various setter methods provided in the `ScoreOptionConfig` object. Additionally, you can provide a custom app-specific scoring function through the `SetAppSpecificScoreFunction` method. @@ -84,7 +251,6 @@ Example of setting custom app-specific scoring function: config.SetAppSpecificScoreFunction(customAppSpecificScoreFunction) ``` - ## Peer Scoring System Integration The peer scoring system is integrated with the GossipSub protocol through the `ScoreOption` configuration option. This option is passed to the GossipSub at the time of initialization. @@ -92,4 +258,38 @@ This option is passed to the GossipSub at the time of initialization. ```go flowPubSubOption := scoreOption.BuildFlowPubSubScoreOption() gossipSubOption := scoreOption.BuildGossipSubScoreOption() +``` + +# Caching Application Specific Score +![app-specific-score-cache.png](app-specific-score-cache.png) +The application-specific score of a peer is part of its overall score in the GossipSub protocol. In contrast to the rest of the GossipSub score of the peer that is computed +internally by the GossipSub protocol, the application-specific score of a peer is computed externally by the application, i.e., the Flow protocol-level semantics. +As the figure above illustrates, GossipSub's peer scoring mechanism invokes the application-specific scoring function on a peer id upon receiving a gossip message from that peer. +This means that the application-specific score of a peer is computed every time a gossip message is received from that peer. +This can be computationally expensive, especially when the network is large and the number of gossip messages is high. +As shown by the figure above, each time the application-specific score of a peer is computed, the score is computed from scratch by +computing the spam penalty, staking score and subscription penalty. Each of these computations involves a cache lookup and a computation. +Hence, a single computation of the application-specific score of a peer involves 3 cache lookups and 3 computations. +As the application-specific score of a peer is not expected to change frequently, we can cache the score of a peer and reuse it for a certain period of time. +This can significantly reduce the computational overhead of the scoring mechanism. +By caching the application-specific score of a peer, we can reduce the number of cache lookups and computations from 3 to 1 per computation of the application-specific score of a peer, which +results in a 66% reduction in the computational overhead of the scoring mechanism. +The caching mechanism is implemented in the `GossipSubAppSpecificScoreRegistry` struct. Each time the application-specific score of a peer is requested by the GossipSub protocol, the registry +checks if the score of the peer is cached. If the score is cached, the cached score is returned. Otherwise, a score of zero is returned, and a request for the application-specific score of the peer is +queued to the `appScoreUpdateWorkerPool` to be computed asynchronously. Once the score is computed, it is cached and the score is updated in the `appScoreCache`. +Each score record in the cache is associated with a TTL (time-to-live) value, which is the duration for which the score is valid. +When the retrieved score is expired, the expired score is still returned to the GossipSub protocol, but the score is updated asynchronously in the background by submitting a request to the `appScoreUpdateWorkerPool`. +The app-specific score configuration values are configurable through the following parameters in the `default-config.yaml` file: +```yaml + scoring-parameters: + app-specific-score: + # number of workers that asynchronously update the app specific score requests when they are expired. + score-update-worker-num: 5 + # size of the queue used by the worker pool for the app specific score update requests. The queue is used to buffer the app specific score update requests + # before they are processed by the worker pool. The queue size must be larger than total number of peers in the network. + # The queue is deduplicated based on the peer ids ensuring that there is only one app specific score update request per peer in the queue. + score-update-request-queue-size: 10_000 + # score ttl is the time to live for the app specific score. Once the score is expired; a new request will be sent to the app specific score provider to update the score. + # until the score is updated, the previous score will be used. + score-ttl: 1m ``` \ No newline at end of file diff --git a/network/p2p/scoring/app-specific-score-cache.png b/network/p2p/scoring/app-specific-score-cache.png new file mode 100644 index 00000000000..fe5b24dcbe4 Binary files /dev/null and b/network/p2p/scoring/app-specific-score-cache.png differ diff --git a/network/p2p/scoring/app_score_test.go b/network/p2p/scoring/app_score_test.go index 52dee463e84..1a5b728ca04 100644 --- a/network/p2p/scoring/app_score_test.go +++ b/network/p2p/scoring/app_score_test.go @@ -9,14 +9,17 @@ import ( mocktestify "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" + "github.com/onflow/flow-go/config" "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/model/messages" "github.com/onflow/flow-go/module/id" "github.com/onflow/flow-go/module/irrecoverable" "github.com/onflow/flow-go/module/mock" "github.com/onflow/flow-go/network/channels" "github.com/onflow/flow-go/network/internal/p2pfixtures" + "github.com/onflow/flow-go/network/message" "github.com/onflow/flow-go/network/p2p" - "github.com/onflow/flow-go/network/p2p/scoring" + p2pconfig "github.com/onflow/flow-go/network/p2p/config" p2ptest "github.com/onflow/flow-go/network/p2p/test" flowpubsub "github.com/onflow/flow-go/network/validator/pubsub" "github.com/onflow/flow-go/utils/unittest" @@ -33,14 +36,14 @@ func TestFullGossipSubConnectivity(t *testing.T) { // two groups of non-access nodes and one group of access nodes. groupOneNodes, groupOneIds := p2ptest.NodesFixture(t, sporkId, t.Name(), 5, - p2ptest.WithRole(flow.RoleConsensus), - p2ptest.WithPeerScoringEnabled(idProvider)) + idProvider, + p2ptest.WithRole(flow.RoleConsensus)) groupTwoNodes, groupTwoIds := p2ptest.NodesFixture(t, sporkId, t.Name(), 5, - p2ptest.WithRole(flow.RoleCollection), - p2ptest.WithPeerScoringEnabled(idProvider)) + idProvider, + p2ptest.WithRole(flow.RoleCollection)) accessNodeGroup, accessNodeIds := p2ptest.NodesFixture(t, sporkId, t.Name(), 5, - p2ptest.WithRole(flow.RoleAccess), - p2ptest.WithPeerScoringEnabled(idProvider)) + idProvider, + p2ptest.WithRole(flow.RoleAccess)) ids := append(append(groupOneIds, groupTwoIds...), accessNodeIds...) nodes := append(append(groupOneNodes, groupTwoNodes...), accessNodeGroup...) @@ -54,8 +57,8 @@ func TestFullGossipSubConnectivity(t *testing.T) { _, ok := provider.ByPeerID(peerId) return ok }) - p2ptest.StartNodes(t, signalerCtx, nodes, 100*time.Millisecond) - defer p2ptest.StopNodes(t, nodes, cancel, 2*time.Second) + p2ptest.StartNodes(t, signalerCtx, nodes) + defer p2ptest.StopNodes(t, nodes, cancel) blockTopic := channels.TopicFromChannel(channels.PushBlocks, sporkId) @@ -90,82 +93,63 @@ func TestFullGossipSubConnectivity(t *testing.T) { // checks end-to-end message delivery works // each node sends a distinct message to all and checks that all nodes receive it. for _, node := range nodes { - proposalMsg := p2pfixtures.MustEncodeEvent(t, unittest.ProposalFixture(), channels.PushBlocks) - require.NoError(t, node.Publish(ctx, blockTopic, proposalMsg)) + outgoingMessageScope, err := message.NewOutgoingScope( + ids.NodeIDs(), + channels.TopicFromChannel(channels.PushBlocks, sporkId), + (*messages.Proposal)(unittest.ProposalFixture()), + unittest.NetworkCodec().Encode, + message.ProtocolTypePubSub) + require.NoError(t, err) + require.NoError(t, node.Publish(ctx, outgoingMessageScope)) // checks that the message is received by all nodes. ctx1s, cancel1s := context.WithTimeout(ctx, 5*time.Second) - p2pfixtures.SubsMustReceiveMessage(t, ctx1s, proposalMsg, groupOneSubs) - p2pfixtures.SubsMustReceiveMessage(t, ctx1s, proposalMsg, accessNodeSubs) - p2pfixtures.SubsMustReceiveMessage(t, ctx1s, proposalMsg, groupTwoSubs) + expectedReceivedData, err := outgoingMessageScope.Proto().Marshal() + require.NoError(t, err) + p2pfixtures.SubsMustReceiveMessage(t, ctx1s, expectedReceivedData, groupOneSubs) + p2pfixtures.SubsMustReceiveMessage(t, ctx1s, expectedReceivedData, accessNodeSubs) + p2pfixtures.SubsMustReceiveMessage(t, ctx1s, expectedReceivedData, groupTwoSubs) cancel1s() } } -// TestFullGossipSubConnectivityAmongHonestNodesWithMaliciousMajority is part two of testing pushing access nodes to the edges of the network. +// TestFullGossipSubConnectivityAmongHonestNodesWithMaliciousMajority tests pushing access nodes to the edges of the network. // This test proves that if access nodes are PUSHED to the edge of the network, even their malicious majority cannot partition // the network of honest nodes. +// The scenario tests that whether two honest nodes are in each others topic mesh on GossipSub +// when the network topology is a complete graph (i.e., full topology) and a malicious majority of access nodes are present. +// The honest nodes (i.e., non-Access nodes) are enabled with peer scoring, then the honest nodes are enabled with peer scoring. func TestFullGossipSubConnectivityAmongHonestNodesWithMaliciousMajority(t *testing.T) { // Note: if this test is ever flaky, this means a bug in our scoring system. Please escalate to the team instead of skipping. - total := 10 - for i := 0; i < total; i++ { - if !testGossipSubMessageDeliveryUnderNetworkPartition(t, true) { - // even one failure should not happen, as it means that malicious majority can partition the network - // with our peer scoring parameters. - require.Fail(t, "honest nodes could not exchange message on GossipSub") - } - } -} - -// TestNetworkPartitionWithNoHonestPeerScoringInFullTopology is part one of testing pushing access nodes to the edges of the network. -// This test proves that if access nodes are NOT pushed to the edge of network, a malicious majority of them can -// partition the network by disconnecting honest nodes from each other even when the network topology is a complete graph (i.e., full topology). -func TestNetworkPartitionWithNoHonestPeerScoringInFullTopology(t *testing.T) { - unittest.SkipUnless(t, unittest.TEST_FLAKY, "to be fixed later") - total := 100 - for i := 0; i < total; i++ { - // false means no honest peer scoring. - if !testGossipSubMessageDeliveryUnderNetworkPartition(t, false) { - return // partition is successful - } - } - require.Fail(t, "expected at least one network partition") -} - -// testGossipSubMessageDeliveryUnderNetworkPartition tests that whether two honest nodes can exchange messages on GossipSub -// when the network topology is a complete graph (i.e., full topology) and a malicious majority of access nodes are present. -// If honestPeerScoring is true, then the honest nodes are enabled with peer scoring. -// A true return value means that the two honest nodes can exchange messages. -// A false return value means that the two honest nodes cannot exchange messages within the given timeout. -func testGossipSubMessageDeliveryUnderNetworkPartition(t *testing.T, honestPeerScoring bool) bool { ctx, cancel := context.WithCancel(context.Background()) signalerCtx := irrecoverable.NewMockSignalerContext(t, ctx) sporkId := unittest.IdentifierFixture() idProvider := mock.NewIdentityProvider(t) - // two (honest) consensus nodes - opts := []p2ptest.NodeFixtureParameterOption{p2ptest.WithRole(flow.RoleConsensus)} - if honestPeerScoring { - opts = append(opts, p2ptest.WithPeerScoringEnabled(idProvider)) - } - con1Node, con1Id := p2ptest.NodeFixture(t, sporkId, t.Name(), opts...) - con2Node, con2Id := p2ptest.NodeFixture(t, sporkId, t.Name(), opts...) + defaultConfig, err := config.DefaultConfig() + require.NoError(t, err) + + // override the default config to make the mesh tracer log more frequently + defaultConfig.NetworkConfig.GossipSub.RpcTracer.LocalMeshLogInterval = time.Second + + con1Node, con1Id := p2ptest.NodeFixture(t, sporkId, t.Name(), idProvider, p2ptest.WithRole(flow.RoleConsensus), p2ptest.OverrideFlowConfig(defaultConfig)) + con2Node, con2Id := p2ptest.NodeFixture(t, sporkId, t.Name(), idProvider, p2ptest.WithRole(flow.RoleConsensus), p2ptest.OverrideFlowConfig(defaultConfig)) // create > 2 * 12 malicious access nodes // 12 is the maximum size of default GossipSub mesh. - // We want to make sure that it is unlikely for honest nodes to be in the same mesh (hence messages from - // one honest node to the other is routed through the malicious nodes). + // We want to make sure that it is unlikely for honest nodes to be in the same mesh without peer scoring. accessNodeGroup, accessNodeIds := p2ptest.NodesFixture(t, sporkId, t.Name(), 30, + idProvider, p2ptest.WithRole(flow.RoleAccess), - p2ptest.WithPeerScoringEnabled(idProvider), // overrides the default peer scoring parameters to mute GossipSub traffic from/to honest nodes. - p2ptest.WithPeerScoreParamsOption(&p2p.PeerScoringConfig{ - AppSpecificScoreParams: maliciousAppSpecificScore(flow.IdentityList{&con1Id, &con2Id}), - })) + p2ptest.EnablePeerScoringWithOverride(&p2p.PeerScoringConfigOverride{ + AppSpecificScoreParams: maliciousAppSpecificScore(flow.IdentityList{&con1Id, &con2Id}, defaultConfig.NetworkConfig.GossipSub.ScoringParameters.PeerScoring.Protocol), + }), + ) allNodes := append([]p2p.LibP2PNode{con1Node, con2Node}, accessNodeGroup...) - allIds := append([]*flow.Identity{&con1Id, &con2Id}, accessNodeIds...) + allIds := append(flow.IdentityList{&con1Id, &con2Id}, accessNodeIds...) provider := id.NewFixedIdentityProvider(allIds) idProvider.On("ByPeerID", mocktestify.Anything).Return( @@ -177,56 +161,78 @@ func testGossipSubMessageDeliveryUnderNetworkPartition(t *testing.T, honestPeerS return ok }).Maybe() - p2ptest.StartNodes(t, signalerCtx, allNodes, 100*time.Millisecond) - defer p2ptest.StopNodes(t, allNodes, cancel, 2*time.Second) + p2ptest.StartNodes(t, signalerCtx, allNodes) + defer p2ptest.StopNodes(t, allNodes, cancel) blockTopic := channels.TopicFromChannel(channels.PushBlocks, sporkId) - logger := unittest.Logger() - // all nodes subscribe to block topic (common topic among all roles) - _, err := con1Node.Subscribe(blockTopic, flowpubsub.TopicValidator(logger, unittest.AllowAllPeerFilter())) + _, err = con1Node.Subscribe(blockTopic, flowpubsub.TopicValidator(unittest.Logger(), unittest.AllowAllPeerFilter())) require.NoError(t, err) - con2Sub, err := con2Node.Subscribe(blockTopic, flowpubsub.TopicValidator(logger, unittest.AllowAllPeerFilter())) + _, err = con2Node.Subscribe(blockTopic, flowpubsub.TopicValidator(unittest.Logger(), unittest.AllowAllPeerFilter())) require.NoError(t, err) // access node group accessNodeSubs := make([]p2p.Subscription, len(accessNodeGroup)) for i, node := range accessNodeGroup { - sub, err := node.Subscribe(blockTopic, flowpubsub.TopicValidator(logger, unittest.AllowAllPeerFilter())) - require.NoError(t, err) + sub, err := node.Subscribe(blockTopic, flowpubsub.TopicValidator(unittest.Logger(), unittest.AllowAllPeerFilter())) + require.NoError(t, err, "access node %d failed to subscribe to block topic", i) accessNodeSubs[i] = sub } // let nodes reside on a full topology, hence no partition is caused by the topology. p2ptest.LetNodesDiscoverEachOther(t, ctx, allNodes, allIds) - proposalMsg := p2pfixtures.MustEncodeEvent(t, unittest.ProposalFixture(), channels.PushBlocks) - require.NoError(t, con1Node.Publish(ctx, blockTopic, proposalMsg)) - - // we check that whether within a one-second window the message is received by the other honest consensus node. - // the one-second window is important because it triggers the heartbeat of the con1Node to perform a lazy pull (iHave). - // And con1Node may randomly choose con2Node as the peer to perform the lazy pull. - // However, under a network partition con2Node is not in the mesh of con1Node, and hence is deprived of the eager push from con1Node. - // - // If no honest peer scoring is enabled, then con1Node and con2Node are less-likely to be in the same mesh, and hence the message is not delivered. - // If honest peer scoring is enabled, then con1Node and con2Node are certainly in the same mesh, and hence the message is delivered. - ctx1s, cancel1s := context.WithTimeout(ctx, 1*time.Second) - defer cancel1s() - return p2pfixtures.HasSubReceivedMessage(t, ctx1s, proposalMsg, con2Sub) + // checks whether con1 and con2 are in the same mesh + tick := time.Second // Set the tick duration as needed + timeout := 5 * time.Second // Set the timeout duration as needed + + ticker := time.NewTicker(tick) + defer ticker.Stop() + timeoutCh := time.After(timeout) + + con1HasCon2 := false // denotes whether con1 has con2 in its mesh + con2HasCon1 := false // denotes whether con2 has con1 in its mesh + for { + select { + case <-ticker.C: + con1BlockTopicPeers := con1Node.GetLocalMeshPeers(blockTopic) + for _, p := range con1BlockTopicPeers { + if p == con2Node.ID() { + con2HasCon1 = true + break // con1 has con2 in its mesh, break out of the current loop + } + } + + con2BlockTopicPeers := con2Node.GetLocalMeshPeers(blockTopic) + for _, p := range con2BlockTopicPeers { + if p == con1Node.ID() { + con1HasCon2 = true + break // con2 has con1 in its mesh, break out of the current loop + } + } + + if con2HasCon1 && con1HasCon2 { + return + } + + case <-timeoutCh: + require.Fail(t, "timed out waiting for con1 to have con2 in its mesh; honest nodes are not on each others' topic mesh on GossipSub") + } + } } // maliciousAppSpecificScore returns a malicious app specific penalty function that rewards the malicious node and // punishes the honest nodes. -func maliciousAppSpecificScore(honestIds flow.IdentityList) func(peer.ID) float64 { +func maliciousAppSpecificScore(honestIds flow.IdentityList, optionCfg p2pconfig.ProtocolLevelGossipSubScoreParams) func(peer.ID) float64 { honestIdProvider := id.NewFixedIdentityProvider(honestIds) return func(p peer.ID) float64 { _, isHonest := honestIdProvider.ByPeerID(p) if isHonest { - return scoring.MaxAppSpecificPenalty + return optionCfg.AppSpecificScore.MaxAppSpecificPenalty } - return scoring.MaxAppSpecificReward + return optionCfg.AppSpecificScore.MaxAppSpecificReward } } diff --git a/network/p2p/scoring/decay_test.go b/network/p2p/scoring/decay_test.go index 28ff6dabe7f..643d22fba83 100644 --- a/network/p2p/scoring/decay_test.go +++ b/network/p2p/scoring/decay_test.go @@ -8,6 +8,7 @@ import ( "github.com/stretchr/testify/assert" + "github.com/onflow/flow-go/config" "github.com/onflow/flow-go/network/p2p" "github.com/onflow/flow-go/network/p2p/scoring" ) @@ -116,13 +117,14 @@ func TestGeometricDecay(t *testing.T) { wantErr: fmt.Errorf("last updated time cannot be in the future"), }, } + for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { got, err := scoring.GeometricDecay(tt.args.penalty, tt.args.decay, tt.args.lastUpdated) if tt.wantErr != nil { assert.Errorf(t, err, tt.wantErr.Error()) } - assert.Less(t, math.Abs(got-tt.want), 1e-3) + assert.LessOrEqual(t, truncateFloat(math.Abs(got-tt.want), 3), 1e-2) }) } } @@ -136,6 +138,9 @@ func TestGeometricDecay(t *testing.T) { // 4. penalty is negative and below the skipDecayThreshold and lastUpdated is too recent. In this case, the penalty should not be decayed. // 5. penalty is negative and below the skipDecayThreshold and lastUpdated is too old. In this case, the penalty should be decayed. func TestDefaultDecayFunction(t *testing.T) { + flowConfig, err := config.DefaultConfig() + assert.NoError(t, err) + type args struct { record p2p.GossipSubSpamRecord lastUpdated time.Time @@ -180,8 +185,9 @@ func TestDefaultDecayFunction(t *testing.T) { }, want: want{ record: p2p.GossipSubSpamRecord{ - Penalty: 0, // penalty is set to 0 - Decay: 0.8, + Penalty: 0, // penalty is set to 0 + Decay: 0.8, + LastDecayAdjustment: time.Time{}, }, }, }, @@ -198,8 +204,9 @@ func TestDefaultDecayFunction(t *testing.T) { }, want: want{ record: p2p.GossipSubSpamRecord{ - Penalty: 0, // penalty is set to 0 - Decay: 0.8, + Penalty: 0, // penalty is set to 0 + Decay: 0.8, + LastDecayAdjustment: time.Time{}, }, }, }, @@ -238,15 +245,84 @@ func TestDefaultDecayFunction(t *testing.T) { }, }, }, + { + // 6. penalty is negative and below slowerDecayPenaltyThreshold record decay should be adjusted. The `LastDecayAdjustment` has not been updated since initialization. + name: "penalty is negative and below slowerDecayPenaltyThreshold record decay should be adjusted", + args: args{ + record: p2p.GossipSubSpamRecord{ + Penalty: -100, + Decay: 0.8, + }, + lastUpdated: time.Now(), + }, + want: want{ + record: p2p.GossipSubSpamRecord{ + Penalty: -100, + Decay: 0.81, + }, + }, + }, + { + // 7. penalty is negative and below slowerDecayPenaltyThreshold but record.LastDecayAdjustment is too recent. In this case the decay should not be adjusted. + name: "penalty is negative and below slowerDecayPenaltyThreshold record decay should not be adjusted", + args: args{ + record: p2p.GossipSubSpamRecord{ + Penalty: -100, + Decay: 0.9, + LastDecayAdjustment: time.Now().Add(10 * time.Second), + }, + lastUpdated: time.Now(), + }, + want: want{ + record: p2p.GossipSubSpamRecord{ + Penalty: -100, + Decay: 0.9, + }, + }, + }, + { + // 8. penalty is negative and below slowerDecayPenaltyThreshold; and LastDecayAdjustment time passed the decay adjust interval. record decay should be adjusted. + name: "penalty is negative and below slowerDecayPenaltyThreshold and LastDecayAdjustment time passed the decay adjust interval. Record decay should be adjusted", + args: args{ + record: p2p.GossipSubSpamRecord{ + Penalty: -100, + Decay: 0.8, + LastDecayAdjustment: time.Now().Add(-flowConfig.NetworkConfig.GossipSub.ScoringParameters.ScoringRegistryParameters.SpamRecordCache.Decay.PenaltyDecayEvaluationPeriod), + }, + lastUpdated: time.Now(), + }, + want: want{ + record: p2p.GossipSubSpamRecord{ + Penalty: -100, + Decay: 0.81, + }, + }, + }, } + scoringRegistryConfig := flowConfig.NetworkConfig.GossipSub.ScoringParameters.ScoringRegistryParameters + decayFunc := scoring.DefaultDecayFunction(scoringRegistryConfig.SpamRecordCache.Decay) - decayFunc := scoring.DefaultDecayFunction() for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { got, err := decayFunc(tt.args.record, tt.args.lastUpdated) assert.NoError(t, err) - assert.Less(t, math.Abs(got.Penalty-tt.want.record.Penalty), 10e-3) - assert.Equal(t, got.Decay, tt.want.record.Decay) + tolerance := 0.01 // 1% tolerance + expectedPenalty := tt.want.record.Penalty + + // ensure expectedPenalty is not zero to avoid division by zero + if expectedPenalty != 0 { + normalizedDifference := math.Abs(got.Penalty-expectedPenalty) / math.Abs(expectedPenalty) + assert.Less(t, normalizedDifference, tolerance) + } else { + // handles the case where expectedPenalty is zero + assert.Less(t, math.Abs(got.Penalty), tolerance) + } + assert.Equal(t, tt.want.record.Decay, got.Decay) }) } } + +func truncateFloat(number float64, decimalPlaces int) float64 { + pow := math.Pow(10, float64(decimalPlaces)) + return float64(int(number*pow)) / pow +} diff --git a/network/p2p/scoring/internal/appSpecificScoreCache.go b/network/p2p/scoring/internal/appSpecificScoreCache.go new file mode 100644 index 00000000000..fba7b1c7abb --- /dev/null +++ b/network/p2p/scoring/internal/appSpecificScoreCache.go @@ -0,0 +1,92 @@ +package internal + +import ( + "fmt" + "time" + + "github.com/libp2p/go-libp2p/core/peer" + "github.com/rs/zerolog" + + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module" + herocache "github.com/onflow/flow-go/module/mempool/herocache/backdata" + "github.com/onflow/flow-go/module/mempool/herocache/backdata/heropool" + "github.com/onflow/flow-go/module/mempool/stdmap" + "github.com/onflow/flow-go/network/p2p" +) + +// AppSpecificScoreCache is a cache that stores the application specific score of peers by the hash of the peerID. +// The application specific score of a peer is used to calculate the GossipSub score of the peer. +// Note that the application specific score and the GossipSub score are solely used by the current peer to select the peers +// to which it will connect on a topic mesh. +type AppSpecificScoreCache struct { + c *stdmap.Backend[flow.Identifier, *appSpecificScoreRecord] +} + +var _ p2p.GossipSubApplicationSpecificScoreCache = (*AppSpecificScoreCache)(nil) + +// NewAppSpecificScoreCache creates a new application specific score cache with the given size limit. +// The cache has an LRU eviction policy. +// Args: +// - sizeLimit: the size limit of the cache. +// - logger: the logger to use for logging. +// - collector: the metrics collector to use for collecting metrics. +// Returns: +// - *AppSpecificScoreCache: the created cache. +func NewAppSpecificScoreCache(sizeLimit uint32, logger zerolog.Logger, collector module.HeroCacheMetrics) *AppSpecificScoreCache { + backData := herocache.NewCache[*appSpecificScoreRecord]( + sizeLimit, + herocache.DefaultOversizeFactor, + heropool.LRUEjection, + logger.With().Str("mempool", "gossipsub-app-specific-score-cache").Logger(), + collector, + ) + + return &AppSpecificScoreCache{ + c: stdmap.NewBackend(stdmap.WithMutableBackData[flow.Identifier, *appSpecificScoreRecord](backData)), + } +} + +// Get returns the application specific score of a peer from the cache. +// Args: +// - peerID: the peer ID of the peer in the GossipSub protocol. +// Returns: +// - float64: the application specific score of the peer. +// - time.Time: the time at which the score was last updated. +// - bool: true if the score was retrieved successfully, false otherwise. +func (a *AppSpecificScoreCache) Get(peerID peer.ID) (float64, time.Time, bool) { + record, ok := a.c.Get(p2p.MakeId(peerID)) + if !ok { + return 0, time.Time{}, false + } + return record.Score, record.LastUpdated, true +} + +// AdjustWithInit adds the application specific score of a peer to the cache. +// If the peer already has a score in the cache, the score is updated. +// Args: +// - peerID: the peer ID of the peer in the GossipSub protocol. +// - score: the application specific score of the peer. +// - time: the time at which the score was last updated. +// Returns: +// - error on failure to add the score. The returned error is irrecoverable and indicates an exception. +func (a *AppSpecificScoreCache) AdjustWithInit(peerID peer.ID, score float64, time time.Time) error { + initLogic := func() *appSpecificScoreRecord { + return &appSpecificScoreRecord{ + PeerID: peerID, + Score: score, + LastUpdated: time, + } + } + adjustLogic := func(record *appSpecificScoreRecord) *appSpecificScoreRecord { + record.Score = score + record.LastUpdated = time + return record + } + _, adjusted := a.c.AdjustWithInit(p2p.MakeId(peerID), adjustLogic, initLogic) + if !adjusted { + return fmt.Errorf("failed to adjust app specific score for peer %s", peerID) + } + + return nil +} diff --git a/network/p2p/scoring/internal/appSpecificScoreCache_test.go b/network/p2p/scoring/internal/appSpecificScoreCache_test.go new file mode 100644 index 00000000000..bea5f355833 --- /dev/null +++ b/network/p2p/scoring/internal/appSpecificScoreCache_test.go @@ -0,0 +1,166 @@ +package internal_test + +import ( + "sync" + "testing" + "time" + + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/module/metrics" + "github.com/onflow/flow-go/network/p2p/scoring/internal" + "github.com/onflow/flow-go/utils/unittest" +) + +// TestAppSpecificScoreCache tests the functionality of AppSpecificScoreCache; +// specifically, it tests the Add and Get methods. +// It does not test the eviction policy of the cache. +func TestAppSpecificScoreCache(t *testing.T) { + cache := internal.NewAppSpecificScoreCache(10, unittest.Logger(), metrics.NewNoopCollector()) + require.NotNil(t, cache, "failed to create AppSpecificScoreCache") + + peerID := unittest.PeerIdFixture(t) + score := 5.0 + updateTime := time.Now() + + err := cache.AdjustWithInit(peerID, score, updateTime) + require.Nil(t, err, "failed to add score to cache") + + // retrieve score from cache + retrievedScore, lastUpdated, found := cache.Get(peerID) + require.True(t, found, "failed to find score in cache") + require.Equal(t, score, retrievedScore, "retrieved score does not match expected") + require.Equal(t, updateTime, lastUpdated, "retrieved update time does not match expected") + + // test cache update + newScore := 10.0 + err = cache.AdjustWithInit(peerID, newScore, updateTime.Add(time.Minute)) + require.Nil(t, err, "Failed to update score in cache") + + // retrieve updated score + updatedScore, updatedTime, found := cache.Get(peerID) + require.True(t, found, "failed to find updated score in cache") + require.Equal(t, newScore, updatedScore, "updated score does not match expected") + require.Equal(t, updateTime.Add(time.Minute), updatedTime, "updated time does not match expected") +} + +// TestAppSpecificScoreCache_Concurrent_Add_Get_Update tests the concurrent functionality of AppSpecificScoreCache; +// specifically, it tests the Add and Get methods under concurrent access. +func TestAppSpecificScoreCache_Concurrent_Add_Get_Update(t *testing.T) { + cache := internal.NewAppSpecificScoreCache(10, unittest.Logger(), metrics.NewNoopCollector()) + require.NotNil(t, cache, "failed to create AppSpecificScoreCache") + + peerId1 := unittest.PeerIdFixture(t) + score1 := 5.0 + lastUpdated1 := time.Now() + + peerId2 := unittest.PeerIdFixture(t) + score2 := 10.0 + lastUpdated2 := time.Now().Add(time.Minute) + + wg := sync.WaitGroup{} + wg.Add(2) + go func() { + defer wg.Done() + err := cache.AdjustWithInit(peerId1, score1, lastUpdated1) + require.Nil(t, err, "failed to add score1 to cache") + }() + + go func() { + defer wg.Done() + err := cache.AdjustWithInit(peerId2, score2, lastUpdated2) + require.Nil(t, err, "failed to add score2 to cache") + }() + + unittest.RequireReturnsBefore(t, wg.Wait, 100*time.Millisecond, "failed to add scores to cache") + + // retrieve scores concurrently + wg.Add(2) + go func() { + defer wg.Done() + retrievedScore, lastUpdated, found := cache.Get(peerId1) + require.True(t, found, "failed to find score1 in cache") + require.Equal(t, score1, retrievedScore, "retrieved score1 does not match expected") + require.Equal(t, lastUpdated1, lastUpdated, "retrieved update time1 does not match expected") + }() + + go func() { + defer wg.Done() + retrievedScore, lastUpdated, found := cache.Get(peerId2) + require.True(t, found, "failed to find score2 in cache") + require.Equal(t, score2, retrievedScore, "retrieved score2 does not match expected") + require.Equal(t, lastUpdated2, lastUpdated, "retrieved update time2 does not match expected") + }() + + unittest.RequireReturnsBefore(t, wg.Wait, 100*time.Millisecond, "failed to retrieve scores from cache") + + // test cache update + newScore1 := 15.0 + newScore2 := 20.0 + lastUpdated1 = time.Now().Add(time.Minute) + lastUpdated2 = time.Now().Add(time.Minute) + + wg.Add(2) + go func() { + defer wg.Done() + err := cache.AdjustWithInit(peerId1, newScore1, lastUpdated1) + require.Nil(t, err, "failed to update score1 in cache") + }() + + go func() { + defer wg.Done() + err := cache.AdjustWithInit(peerId2, newScore2, lastUpdated2) + require.Nil(t, err, "failed to update score2 in cache") + }() + + unittest.RequireReturnsBefore(t, wg.Wait, 100*time.Millisecond, "failed to update scores in cache") + + // retrieve updated scores concurrently + wg.Add(2) + + go func() { + defer wg.Done() + updatedScore, updatedTime, found := cache.Get(peerId1) + require.True(t, found, "failed to find updated score1 in cache") + require.Equal(t, newScore1, updatedScore, "updated score1 does not match expected") + require.Equal(t, lastUpdated1, updatedTime, "updated time1 does not match expected") + }() + + go func() { + defer wg.Done() + updatedScore, updatedTime, found := cache.Get(peerId2) + require.True(t, found, "failed to find updated score2 in cache") + require.Equal(t, newScore2, updatedScore, "updated score2 does not match expected") + require.Equal(t, lastUpdated2, updatedTime, "updated time2 does not match expected") + }() + + unittest.RequireReturnsBefore(t, wg.Wait, 100*time.Millisecond, "failed to retrieve updated scores from cache") +} + +// TestAppSpecificScoreCache_Eviction tests the eviction policy of AppSpecificScoreCache; +// specifically, it tests that the cache evicts the least recently used record when the cache is full. +func TestAppSpecificScoreCache_Eviction(t *testing.T) { + cache := internal.NewAppSpecificScoreCache(10, unittest.Logger(), metrics.NewNoopCollector()) + require.NotNil(t, cache, "failed to create AppSpecificScoreCache") + + peerIds := unittest.PeerIdFixtures(t, 11) + scores := []float64{5.0, 10.0, 15.0, 20.0, 25.0, 30.0, 35.0, -1, -2, -3, -4} + require.Equal(t, len(peerIds), len(scores), "peer ids and scores must have the same length") + + // add scores to cache + for i := 0; i < len(peerIds); i++ { + err := cache.AdjustWithInit(peerIds[i], scores[i], time.Now()) + require.Nil(t, err, "failed to add score to cache") + } + + // retrieve scores from cache; the first score should have been evicted + for i := 1; i < len(peerIds); i++ { + retrievedScore, _, found := cache.Get(peerIds[i]) + require.True(t, found, "failed to find score in cache") + require.Equal(t, scores[i], retrievedScore, "retrieved score does not match expected") + } + + // the first score should not be in the cache + _, _, found := cache.Get(peerIds[0]) + require.False(t, found, "score should not be in cache") +} diff --git a/network/p2p/scoring/internal/appSpecificScoreRecord.go b/network/p2p/scoring/internal/appSpecificScoreRecord.go new file mode 100644 index 00000000000..2002cd9169f --- /dev/null +++ b/network/p2p/scoring/internal/appSpecificScoreRecord.go @@ -0,0 +1,19 @@ +package internal + +import ( + "time" + + "github.com/libp2p/go-libp2p/core/peer" +) + +// appSpecificScoreRecord represents the application specific score of a peer. +type appSpecificScoreRecord struct { + // PeerID is the peer ID of the peer in the GossipSub protocol. + PeerID peer.ID + + // Score is the application specific score of the peer. + Score float64 + + // LastUpdated is the last time the score was updated. + LastUpdated time.Time +} diff --git a/network/p2p/scoring/internal/subscriptionCache.go b/network/p2p/scoring/internal/subscriptionCache.go new file mode 100644 index 00000000000..94f64a8594f --- /dev/null +++ b/network/p2p/scoring/internal/subscriptionCache.go @@ -0,0 +1,145 @@ +package internal + +import ( + "fmt" + + "github.com/libp2p/go-libp2p/core/peer" + "github.com/rs/zerolog" + "go.uber.org/atomic" + + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module" + herocache "github.com/onflow/flow-go/module/mempool/herocache/backdata" + "github.com/onflow/flow-go/module/mempool/herocache/backdata/heropool" + "github.com/onflow/flow-go/module/mempool/stdmap" + "github.com/onflow/flow-go/network/p2p" +) + +// SubscriptionRecordCache manages the subscription records of peers in a network. +// It uses a currentCycle counter to track the update cycles of the cache, ensuring the relevance of subscription data. +// Stored subscription record are keyed by the hash of the peerID. +type SubscriptionRecordCache struct { + c *stdmap.Backend[flow.Identifier, *SubscriptionRecord] + + // currentCycle is an atomic counter used to track the update cycles of the subscription cache. + // It plays a critical role in maintaining the cache's data relevance and coherence. + // Each increment of currentCycle represents a new update cycle, signifying the cache's transition to a new state + // where only the most recent and relevant subscriptions are maintained. This design choice ensures that the cache + // does not retain stale or outdated subscription information, thereby reflecting the dynamic nature of peer + // subscriptions in the network. It is incremented every time the subscription cache is updated, either with new + // topic subscriptions or other update operations. + // The currentCycle is incremented atomically and externally by calling the MoveToNextUpdateCycle() function. + // This is called by the module that uses the subscription provider cache signaling that whatever updates it has + // made to the cache so far can be considered out-of-date, and the new updates to the cache records should + // overwrite the old ones. + currentCycle atomic.Uint64 +} + +// NewSubscriptionRecordCache creates a new subscription cache with the given size limit. +// Args: +// - sizeLimit: the size limit of the cache. +// - logger: the logger to use for logging. +// - collector: the metrics collector to use for collecting metrics. +func NewSubscriptionRecordCache(sizeLimit uint32, + logger zerolog.Logger, + collector module.HeroCacheMetrics) *SubscriptionRecordCache { + backData := herocache.NewCache[*SubscriptionRecord]( + sizeLimit, + herocache.DefaultOversizeFactor, + heropool.LRUEjection, + logger.With().Str("mempool", "subscription-records").Logger(), + collector, + ) + + return &SubscriptionRecordCache{ + c: stdmap.NewBackend(stdmap.WithMutableBackData[flow.Identifier, *SubscriptionRecord](backData)), + currentCycle: *atomic.NewUint64(0), + } +} + +// GetSubscribedTopics returns the list of topics a peer is subscribed to. +// Returns: +// - []string: the list of topics the peer is subscribed to. +// - bool: true if there is a record for the peer, false otherwise. +func (s *SubscriptionRecordCache) GetSubscribedTopics(pid peer.ID) ([]string, bool) { + record, ok := s.c.Get(p2p.MakeId(pid)) + if !ok { + return nil, false + } + return record.Topics, true +} + +// MoveToNextUpdateCycle moves the subscription cache to the next update cycle. +// A new update cycle is started when the subscription cache is first created, and then every time the subscription cache +// is updated. The update cycle is used to keep track of the last time the subscription cache was updated. It is used to +// implement a notion of time in the subscription cache. +// When the update cycle is moved forward, it means that all the updates made to the subscription cache so far are +// considered out-of-date, and the new updates to the cache records should overwrite the old ones. +// The expected behavior is that the update cycle is moved forward by the module that uses the subscription provider once +// per each update on the "entire" cache (and not per each update on a single record). +// In other words, assume a cache with 3 records: A, B, and C. If the module updates record A, then record B, and then +// record C, the module should move the update cycle forward only once after updating record C, and then update record A +// B, and C again. If the module moves the update cycle forward after updating record A, then again after updating +// record B, and then again after updating record C, the cache will be in an inconsistent state. +// Returns: +// - uint64: the current update cycle. +func (s *SubscriptionRecordCache) MoveToNextUpdateCycle() uint64 { + s.currentCycle.Inc() + return s.currentCycle.Load() +} + +// AddWithInitTopicForPeer appends a topic to the list of topics a peer is subscribed to. If the peer is not subscribed to any +// topics yet, a new record is created. +// If the last update cycle is older than the current cycle, the list of topics for the peer is first cleared, and then +// the topic is added to the list. This is to ensure that the list of topics for a peer is always up to date. +// Args: +// - pid: the peer id of the peer. +// - topic: the topic to add. +// Returns: +// - []string: the list of topics the peer is subscribed to after the update. +// - error: an error if the update failed; any returned error is an irrecoverable error and indicates a bug or misconfiguration. +// Implementation must be thread-safe. +func (s *SubscriptionRecordCache) AddWithInitTopicForPeer(pid peer.ID, topic string) ([]string, error) { + initLogic := func() *SubscriptionRecord { + return &SubscriptionRecord{ + PeerID: pid, + Topics: make([]string, 0), + LastUpdatedCycle: s.currentCycle.Load(), + } + } + var rErr error + adjustLogic := func(record *SubscriptionRecord) *SubscriptionRecord { + currentCycle := s.currentCycle.Load() + if record.LastUpdatedCycle > currentCycle { + // sanity check + // This should never happen, because the update cycle must be moved forward before adding a topic. + panic(fmt.Sprintf("invalid last updated cycle, expected <= %d, got: %d", currentCycle, record.LastUpdatedCycle)) + } + if record.LastUpdatedCycle < currentCycle { + // This record was not updated in the current cycle, so we can wipe its topics list (topic list is only + // valid for the current cycle). + record.Topics = make([]string, 0) + } + // check if the topic already exists; if it does, we do not need to update the record. + for _, t := range record.Topics { + if t == topic { + // topic already exists + return record + } + } + record.LastUpdatedCycle = currentCycle + record.Topics = append(record.Topics, topic) + + // Return the adjusted record. + return record + } + adjustedRecord, adjusted := s.c.AdjustWithInit(p2p.MakeId(pid), adjustLogic, initLogic) + if rErr != nil { + return nil, fmt.Errorf("failed to adjust record with error: %w", rErr) + } + if !adjusted { + return nil, fmt.Errorf("failed to adjust record, entity not found") + } + + return adjustedRecord.Topics, nil +} diff --git a/network/p2p/scoring/internal/subscriptionCache_test.go b/network/p2p/scoring/internal/subscriptionCache_test.go new file mode 100644 index 00000000000..54b88707702 --- /dev/null +++ b/network/p2p/scoring/internal/subscriptionCache_test.go @@ -0,0 +1,319 @@ +package internal_test + +import ( + "sync" + "testing" + "time" + + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/module/metrics" + "github.com/onflow/flow-go/network" + "github.com/onflow/flow-go/network/p2p/scoring/internal" + "github.com/onflow/flow-go/utils/unittest" +) + +// TestNewSubscriptionRecordCache tests that NewSubscriptionRecordCache returns a valid cache. +func TestNewSubscriptionRecordCache(t *testing.T) { + sizeLimit := uint32(100) + + cache := internal.NewSubscriptionRecordCache( + sizeLimit, + unittest.Logger(), + metrics.NewSubscriptionRecordCacheMetricsFactory(metrics.NewNoopHeroCacheMetricsFactory(), network.PrivateNetwork)) + + require.NotNil(t, cache, "cache should not be nil") + require.IsType(t, &internal.SubscriptionRecordCache{}, cache, "cache should be of type *SubscriptionRecordCache") +} + +// TestSubscriptionCache_GetSubscribedTopics tests the retrieval of subscribed topics for a peer. +func TestSubscriptionCache_GetSubscribedTopics(t *testing.T) { + sizeLimit := uint32(100) + cache := internal.NewSubscriptionRecordCache( + sizeLimit, + unittest.Logger(), + metrics.NewSubscriptionRecordCacheMetricsFactory(metrics.NewNoopHeroCacheMetricsFactory(), network.PrivateNetwork)) + + // create a dummy peer ID + peerID := unittest.PeerIdFixture(t) + + // case when the peer has a subscription + topics := []string{"topic1", "topic2"} + updatedTopics, err := cache.AddWithInitTopicForPeer(peerID, topics[0]) + require.NoError(t, err, "adding topic 1 should not produce an error") + require.Equal(t, topics[:1], updatedTopics, "updated topics should match the added topic") + updatedTopics, err = cache.AddWithInitTopicForPeer(peerID, topics[1]) + require.NoError(t, err, "adding topic 2 should not produce an error") + require.Equal(t, topics, updatedTopics, "updated topics should match the added topic") + + retrievedTopics, found := cache.GetSubscribedTopics(peerID) + require.True(t, found, "peer should be found") + require.ElementsMatch(t, topics, retrievedTopics, "retrieved topics should match the added topics") + + // case when the peer does not have a subscription + nonExistentPeerID := unittest.PeerIdFixture(t) + retrievedTopics, found = cache.GetSubscribedTopics(nonExistentPeerID) + require.False(t, found, "non-existent peer should not be found") + require.Nil(t, retrievedTopics, "retrieved topics for non-existent peer should be nil") +} + +// TestSubscriptionCache_MoveToNextUpdateCycle tests the increment of update cycles in SubscriptionRecordCache. +// The first increment should set the cycle to 1, and the second increment should set the cycle to 2. +func TestSubscriptionCache_MoveToNextUpdateCycle(t *testing.T) { + sizeLimit := uint32(100) + cache := internal.NewSubscriptionRecordCache( + sizeLimit, + unittest.Logger(), + metrics.NewSubscriptionRecordCacheMetricsFactory(metrics.NewNoopHeroCacheMetricsFactory(), network.PrivateNetwork)) + + // initial cycle should be 0, so first increment sets it to 1 + firstCycle := cache.MoveToNextUpdateCycle() + require.Equal(t, uint64(1), firstCycle, "first cycle should be 1 after first increment") + + // increment cycle again and verify it's now 2 + secondCycle := cache.MoveToNextUpdateCycle() + require.Equal(t, uint64(2), secondCycle, "second cycle should be 2 after second increment") +} + +// TestSubscriptionCache_TestAddTopicForPeer tests adding a topic for a peer. +func TestSubscriptionCache_TestAddTopicForPeer(t *testing.T) { + sizeLimit := uint32(100) + cache := internal.NewSubscriptionRecordCache( + sizeLimit, + unittest.Logger(), + metrics.NewSubscriptionRecordCacheMetricsFactory(metrics.NewNoopHeroCacheMetricsFactory(), network.PrivateNetwork)) + + // case when adding a topic to an existing peer + existingPeerID := unittest.PeerIdFixture(t) + firstTopic := "topic1" + secondTopic := "topic2" + + // add first topic to the existing peer + _, err := cache.AddWithInitTopicForPeer(existingPeerID, firstTopic) + require.NoError(t, err, "adding first topic to existing peer should not produce an error") + + // add second topic to the same peer + updatedTopics, err := cache.AddWithInitTopicForPeer(existingPeerID, secondTopic) + require.NoError(t, err, "adding second topic to existing peer should not produce an error") + require.ElementsMatch(t, []string{firstTopic, secondTopic}, updatedTopics, "updated topics should match the added topics") + + // case when adding a topic to a new peer + newPeerID := unittest.PeerIdFixture(t) + newTopic := "newTopic" + + // add a topic to the new peer + updatedTopics, err = cache.AddWithInitTopicForPeer(newPeerID, newTopic) + require.NoError(t, err, "adding topic to new peer should not produce an error") + require.Equal(t, []string{newTopic}, updatedTopics, "updated topics for new peer should match the added topic") + + // sanity check that the topics for existing peer are still the same + retrievedTopics, found := cache.GetSubscribedTopics(existingPeerID) + require.True(t, found, "existing peer should be found") + require.ElementsMatch(t, []string{firstTopic, secondTopic}, retrievedTopics, "retrieved topics should match the added topics") +} + +// TestSubscriptionCache_DuplicateTopics tests adding a duplicate topic for a peer. The duplicate topic should not be added. +func TestSubscriptionCache_DuplicateTopics(t *testing.T) { + sizeLimit := uint32(100) + cache := internal.NewSubscriptionRecordCache( + sizeLimit, + unittest.Logger(), + metrics.NewSubscriptionRecordCacheMetricsFactory(metrics.NewNoopHeroCacheMetricsFactory(), network.PrivateNetwork)) + + peerID := unittest.PeerIdFixture(t) + topic := "topic1" + + // add first topic to the existing peer + _, err := cache.AddWithInitTopicForPeer(peerID, topic) + require.NoError(t, err, "adding first topic to existing peer should not produce an error") + + // add second topic to the same peer + updatedTopics, err := cache.AddWithInitTopicForPeer(peerID, topic) + require.NoError(t, err, "adding duplicate topic to existing peer should not produce an error") + require.Equal(t, []string{topic}, updatedTopics, "duplicate topic should not be added") +} + +// TestSubscriptionCache_MoveUpdateCycle tests that (1) within one update cycle, "AddWithInitTopicForPeer" calls append the topics to the list of +// subscribed topics for peer, (2) as long as there is no "AddWithInitTopicForPeer" call, moving to the next update cycle +// does not change the subscribed topics for a peer, and (3) calling "AddWithInitTopicForPeer" after moving to the next update +// cycle clears the subscribed topics for a peer and adds the new topic. +func TestSubscriptionCache_MoveUpdateCycle(t *testing.T) { + sizeLimit := uint32(100) + cache := internal.NewSubscriptionRecordCache( + sizeLimit, + unittest.Logger(), + metrics.NewSubscriptionRecordCacheMetricsFactory(metrics.NewNoopHeroCacheMetricsFactory(), network.PrivateNetwork)) + + peerID := unittest.PeerIdFixture(t) + topic1 := "topic1" + topic2 := "topic2" + topic3 := "topic3" + topic4 := "topic4" + + // adds topic1, topic2, and topic3 to the peer + topics, err := cache.AddWithInitTopicForPeer(peerID, topic1) + require.NoError(t, err, "adding first topic to existing peer should not produce an error") + require.Equal(t, []string{topic1}, topics, "updated topics should match the added topic") + topics, err = cache.AddWithInitTopicForPeer(peerID, topic2) + require.NoError(t, err, "adding second topic to existing peer should not produce an error") + require.Equal(t, []string{topic1, topic2}, topics, "updated topics should match the added topics") + topics, err = cache.AddWithInitTopicForPeer(peerID, topic3) + require.NoError(t, err, "adding third topic to existing peer should not produce an error") + require.Equal(t, []string{topic1, topic2, topic3}, topics, "updated topics should match the added topics") + + // move to next update cycle + cache.MoveToNextUpdateCycle() + topics, found := cache.GetSubscribedTopics(peerID) + require.True(t, found, "existing peer should be found") + require.ElementsMatch(t, []string{topic1, topic2, topic3}, topics, "retrieved topics should match the added topics") + + // add topic4 to the peer; since we moved to the next update cycle, the topics for the peer should be cleared + // and topic4 should be the only topic for the peer + topics, err = cache.AddWithInitTopicForPeer(peerID, topic4) + require.NoError(t, err, "adding fourth topic to existing peer should not produce an error") + require.Equal(t, []string{topic4}, topics, "updated topics should match the added topic") + + // move to next update cycle + cache.MoveToNextUpdateCycle() + + // since we did not add any topic to the peer, the topics for the peer should be the same as before + topics, found = cache.GetSubscribedTopics(peerID) + require.True(t, found, "existing peer should be found") + require.ElementsMatch(t, []string{topic4}, topics, "retrieved topics should match the added topics") +} + +// TestSubscriptionCache_MoveUpdateCycleWithDifferentPeers tests that moving to the next update cycle does not affect the subscribed +// topics for other peers. +func TestSubscriptionCache_MoveUpdateCycleWithDifferentPeers(t *testing.T) { + sizeLimit := uint32(100) + cache := internal.NewSubscriptionRecordCache( + sizeLimit, + unittest.Logger(), + metrics.NewSubscriptionRecordCacheMetricsFactory(metrics.NewNoopHeroCacheMetricsFactory(), network.PrivateNetwork)) + + peer1 := unittest.PeerIdFixture(t) + peer2 := unittest.PeerIdFixture(t) + topic1 := "topic1" + topic2 := "topic2" + + // add topic1 to peer1 + topics, err := cache.AddWithInitTopicForPeer(peer1, topic1) + require.NoError(t, err, "adding first topic to peer1 should not produce an error") + require.Equal(t, []string{topic1}, topics, "updated topics should match the added topic") + + // add topic2 to peer2 + topics, err = cache.AddWithInitTopicForPeer(peer2, topic2) + require.NoError(t, err, "adding first topic to peer2 should not produce an error") + require.Equal(t, []string{topic2}, topics, "updated topics should match the added topic") + + // move to next update cycle + cache.MoveToNextUpdateCycle() + + // since we did not add any topic to the peers, the topics for the peers should be the same as before + topics, found := cache.GetSubscribedTopics(peer1) + require.True(t, found, "peer1 should be found") + require.ElementsMatch(t, []string{topic1}, topics, "retrieved topics should match the added topics") + + topics, found = cache.GetSubscribedTopics(peer2) + require.True(t, found, "peer2 should be found") + require.ElementsMatch(t, []string{topic2}, topics, "retrieved topics should match the added topics") + + // now add topic2 to peer1; it should overwrite the previous topics for peer1, but not affect the topics for peer2 + topics, err = cache.AddWithInitTopicForPeer(peer1, topic2) + require.NoError(t, err, "adding second topic to peer1 should not produce an error") + require.Equal(t, []string{topic2}, topics, "updated topics should match the added topic") + + topics, found = cache.GetSubscribedTopics(peer2) + require.True(t, found, "peer2 should be found") + require.ElementsMatch(t, []string{topic2}, topics, "retrieved topics should match the added topics") +} + +// TestSubscriptionCache_ConcurrentUpdate tests subscription cache update in a concurrent environment. +func TestSubscriptionCache_ConcurrentUpdate(t *testing.T) { + sizeLimit := uint32(100) + cache := internal.NewSubscriptionRecordCache( + sizeLimit, + unittest.Logger(), + metrics.NewSubscriptionRecordCacheMetricsFactory(metrics.NewNoopHeroCacheMetricsFactory(), network.PrivateNetwork)) + + peerIds := unittest.PeerIdFixtures(t, 100) + topics := []string{"topic1", "topic2", "topic3"} + + allUpdatesDone := sync.WaitGroup{} + for _, pid := range peerIds { + for _, topic := range topics { + pid := pid + topic := topic + allUpdatesDone.Add(1) + go func() { + defer allUpdatesDone.Done() + _, err := cache.AddWithInitTopicForPeer(pid, topic) + require.NoError(t, err, "adding topic to peer should not produce an error") + }() + } + } + + unittest.RequireReturnsBefore(t, allUpdatesDone.Wait, 1*time.Second, "all updates did not finish in time") + + // verify that all peers have all topics; concurrently + allTopicsVerified := sync.WaitGroup{} + for _, pid := range peerIds { + pid := pid + allTopicsVerified.Add(1) + go func() { + defer allTopicsVerified.Done() + topics, found := cache.GetSubscribedTopics(pid) + require.True(t, found, "peer should be found") + require.ElementsMatch(t, topics, topics, "retrieved topics should match the added topics") + }() + } + + unittest.RequireReturnsBefore(t, allTopicsVerified.Wait, 1*time.Second, "all topics were not verified in time") +} + +// TestSubscriptionCache_TestSizeLimit tests that the cache evicts the least recently used peer when the cache size limit is reached. +func TestSubscriptionCache_TestSizeLimit(t *testing.T) { + sizeLimit := uint32(100) + cache := internal.NewSubscriptionRecordCache( + sizeLimit, + unittest.Logger(), + metrics.NewSubscriptionRecordCacheMetricsFactory(metrics.NewNoopHeroCacheMetricsFactory(), network.PrivateNetwork)) + + peerIds := unittest.PeerIdFixtures(t, 100) + topics := []string{"topic1", "topic2", "topic3"} + + // add topics to peers + for _, pid := range peerIds { + for _, topic := range topics { + _, err := cache.AddWithInitTopicForPeer(pid, topic) + require.NoError(t, err, "adding topic to peer should not produce an error") + } + } + + // verify that all peers have all topics + for _, pid := range peerIds { + topics, found := cache.GetSubscribedTopics(pid) + require.True(t, found, "peer should be found") + require.ElementsMatch(t, topics, topics, "retrieved topics should match the added topics") + } + + // add one more peer and verify that the first peer is evicted + newPeerID := unittest.PeerIdFixture(t) + _, err := cache.AddWithInitTopicForPeer(newPeerID, topics[0]) + require.NoError(t, err, "adding topic to peer should not produce an error") + + _, found := cache.GetSubscribedTopics(peerIds[0]) + require.False(t, found, "peer should not be found") + + // verify that all other peers still have all topics + for _, pid := range peerIds[1:] { + topics, found := cache.GetSubscribedTopics(pid) + require.True(t, found, "peer should be found") + require.ElementsMatch(t, topics, topics, "retrieved topics should match the added topics") + } + + // verify that the new peer has the topic + topics, found = cache.GetSubscribedTopics(newPeerID) + require.True(t, found, "peer should be found") + require.ElementsMatch(t, topics, topics, "retrieved topics should match the added topics") +} diff --git a/network/p2p/scoring/internal/subscriptionRecord.go b/network/p2p/scoring/internal/subscriptionRecord.go new file mode 100644 index 00000000000..a056980af63 --- /dev/null +++ b/network/p2p/scoring/internal/subscriptionRecord.go @@ -0,0 +1,19 @@ +package internal + +import ( + "github.com/libp2p/go-libp2p/core/peer" +) + +// SubscriptionRecord represents the list of topics a peer is subscribed to. +// It is internally used by the SubscriptionRecordCache to store the subscription records in the cache. +type SubscriptionRecord struct { + // PeerID is the peer id of the peer that is the owner of the subscription. + PeerID peer.ID + + // Topics is the list of topics the peer is subscribed to. + Topics []string + + // LastUpdatedCycle is the last cycle counter value that this record was updated. + // This is used to clean up old records' topics upon update. + LastUpdatedCycle uint64 +} diff --git a/network/p2p/scoring/noopConsumer.go b/network/p2p/scoring/noopConsumer.go new file mode 100644 index 00000000000..b3eaa95ee8e --- /dev/null +++ b/network/p2p/scoring/noopConsumer.go @@ -0,0 +1,19 @@ +package scoring + +import "github.com/onflow/flow-go/network/p2p" + +// NoopInvCtrlMsgNotifConsumer is a no-op implementation of the p2p.GossipSubInvCtrlMsgNotifConsumer interface. +// It is used to consume invalid control message notifications from the GossipSub pubsub system and take no action. +// It is mainly used for cases when the peer scoring system is disabled. +type NoopInvCtrlMsgNotifConsumer struct { +} + +func NewNoopInvCtrlMsgNotifConsumer() *NoopInvCtrlMsgNotifConsumer { + return &NoopInvCtrlMsgNotifConsumer{} +} + +var _ p2p.GossipSubInvCtrlMsgNotifConsumer = (*NoopInvCtrlMsgNotifConsumer)(nil) + +func (n NoopInvCtrlMsgNotifConsumer) OnInvalidControlMessageNotification(_ *p2p.InvCtrlMsgNotif) { + // no-op +} diff --git a/network/p2p/scoring/registry.go b/network/p2p/scoring/registry.go index 15c67d55b33..0c3fc4eeb6f 100644 --- a/network/p2p/scoring/registry.go +++ b/network/p2p/scoring/registry.go @@ -2,69 +2,36 @@ package scoring import ( "fmt" + "math" "time" + "github.com/go-playground/validator/v10" "github.com/libp2p/go-libp2p/core/peer" "github.com/rs/zerolog" + "go.uber.org/atomic" + "github.com/onflow/flow-go/engine/common/worker" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/module" + "github.com/onflow/flow-go/module/component" + "github.com/onflow/flow-go/module/irrecoverable" + "github.com/onflow/flow-go/module/mempool/queue" + "github.com/onflow/flow-go/module/metrics" + "github.com/onflow/flow-go/network" "github.com/onflow/flow-go/network/p2p" netcache "github.com/onflow/flow-go/network/p2p/cache" + p2pconfig "github.com/onflow/flow-go/network/p2p/config" + p2plogging "github.com/onflow/flow-go/network/p2p/logging" + p2pmsg "github.com/onflow/flow-go/network/p2p/message" "github.com/onflow/flow-go/utils/logging" ) const ( - // skipDecayThreshold is the threshold for which when the negative penalty is above this value, the decay function will not be called. - // instead, the penalty will be set to 0. This is to prevent the penalty from keeping a small negative value for a long time. - skipDecayThreshold = -0.1 - // defaultDecay is the default decay value for the application specific penalty. - // this value is used when no custom decay value is provided, and decays the penalty by 1% every second. - // assume: - // penalty = -100 (the maximum application specific penalty is -100) - // skipDecayThreshold = -0.1 - // it takes around 459 seconds for the penalty to decay to reach greater than -0.1 and turn into 0. - // x * 0.99 ^ n > -0.1 (assuming negative x). - // 0.99 ^ n > -0.1 / x - // Now we can take the logarithm of both sides (with any base, but let's use base 10 for simplicity). - // log( 0.99 ^ n ) < log( 0.1 / x ) - // Using the properties of logarithms, we can bring down the exponent: - // n * log( 0.99 ) < log( -0.1 / x ) - // And finally, we can solve for n: - // n > log( -0.1 / x ) / log( 0.99 ) - // We can plug in x = -100: - // n > log( -0.1 / -100 ) / log( 0.99 ) - // n > log( 0.001 ) / log( 0.99 ) - // n > -3 / log( 0.99 ) - // n > 458.22 - defaultDecay = 0.99 // default decay value for the application specific penalty. - // graftMisbehaviourPenalty is the penalty applied to the application specific penalty when a peer conducts a graft misbehaviour. - graftMisbehaviourPenalty = -10 - // pruneMisbehaviourPenalty is the penalty applied to the application specific penalty when a peer conducts a prune misbehaviour. - pruneMisbehaviourPenalty = -10 - // iHaveMisbehaviourPenalty is the penalty applied to the application specific penalty when a peer conducts a iHave misbehaviour. - iHaveMisbehaviourPenalty = -10 - // iWantMisbehaviourPenalty is the penalty applied to the application specific penalty when a peer conducts a iWant misbehaviour. - iWantMisbehaviourPenalty = -10 + // NotificationSilencedMsg log messages for silenced notifications + NotificationSilencedMsg = "ignoring invalid control message notification for peer during silence period" ) -// GossipSubCtrlMsgPenaltyValue is the penalty value for each control message type. -type GossipSubCtrlMsgPenaltyValue struct { - Graft float64 // penalty value for an individual graft message misbehaviour. - Prune float64 // penalty value for an individual prune message misbehaviour. - IHave float64 // penalty value for an individual iHave message misbehaviour. - IWant float64 // penalty value for an individual iWant message misbehaviour. -} - -// DefaultGossipSubCtrlMsgPenaltyValue returns the default penalty value for each control message type. -func DefaultGossipSubCtrlMsgPenaltyValue() GossipSubCtrlMsgPenaltyValue { - return GossipSubCtrlMsgPenaltyValue{ - Graft: graftMisbehaviourPenalty, - Prune: pruneMisbehaviourPenalty, - IHave: iHaveMisbehaviourPenalty, - IWant: iWantMisbehaviourPenalty, - } -} +type SpamRecordInitFunc func() p2p.GossipSubSpamRecord // GossipSubAppSpecificScoreRegistry is the registry for the application specific score of peers in the GossipSub protocol. // The application specific score is part of the overall score of a peer, and is used to determine the peer's score based @@ -74,121 +41,306 @@ func DefaultGossipSubCtrlMsgPenaltyValue() GossipSubCtrlMsgPenaltyValue { // Similar to the GossipSub score, the application specific score is meant to be private to the local peer, and is not // shared with other peers in the network. type GossipSubAppSpecificScoreRegistry struct { + component.Component logger zerolog.Logger idProvider module.IdentityProvider + // spamScoreCache currently only holds the control message misbehaviour penalty (spam related penalty). spamScoreCache p2p.GossipSubSpamRecordCache - penalty GossipSubCtrlMsgPenaltyValue - // initial application specific penalty record, used to initialize the penalty cache entry. - init func() p2p.GossipSubSpamRecord + + penalty p2pconfig.MisbehaviourPenalties + + // getDuplicateMessageCount callback used to get a gauge of the number of duplicate messages detected for each peer. + getDuplicateMessageCount func(id peer.ID) float64 + validator p2p.SubscriptionValidator + + // scoreTTL is the time to live of the application specific score of a peer; the registry keeps a cached copy of the + // application specific score of a peer for this duration. When the duration expires, the application specific score + // of the peer is updated asynchronously. As long as the update is in progress, the cached copy of the application + // specific score of the peer is used even if it is expired. + scoreTTL time.Duration + + // appScoreCache is a cache that stores the application specific score of peers. + appScoreCache p2p.GossipSubApplicationSpecificScoreCache + + // appScoreUpdateWorkerPool is the worker pool for handling the application specific score update of peers in a non-blocking way. + appScoreUpdateWorkerPool *worker.Pool[peer.ID] + invCtrlMsgNotifWorkerPool *worker.Pool[*p2p.InvCtrlMsgNotif] + + appSpecificScoreParams p2pconfig.ApplicationSpecificScoreParameters + duplicateMessageThreshold float64 + collector module.GossipSubScoringRegistryMetrics + + // silencePeriodDuration duration that the startup silence period will last, during which nodes will not be penalized + silencePeriodDuration time.Duration + // silencePeriodStartTime time that the silence period begins, this is the time that the registry is started by the node. + silencePeriodStartTime time.Time + // silencePeriodElapsed atomic bool that stores a bool flag which indicates if the silence period is over or not. + silencePeriodElapsed *atomic.Bool } // GossipSubAppSpecificScoreRegistryConfig is the configuration for the GossipSubAppSpecificScoreRegistry. -// The configuration is used to initialize the registry. +// Configurations are the "union of parameters and other components" that are used to compute or build components that compute or maintain the application specific score of peers. type GossipSubAppSpecificScoreRegistryConfig struct { - Logger zerolog.Logger + Parameters p2pconfig.AppSpecificScoreParameters `validate:"required"` + + Logger zerolog.Logger `validate:"required"` // Validator is the subscription validator used to validate the subscriptions of peers, and determine if a peer is // authorized to subscribe to a topic. - Validator p2p.SubscriptionValidator + Validator p2p.SubscriptionValidator `validate:"required"` // Penalty encapsulates the penalty unit for each control message type misbehaviour. - Penalty GossipSubCtrlMsgPenaltyValue + Penalty p2pconfig.MisbehaviourPenalties `validate:"required"` // IdProvider is the identity provider used to translate peer ids at the networking layer to Flow identifiers (if // an authorized peer is found). - IdProvider module.IdentityProvider + IdProvider module.IdentityProvider `validate:"required"` - // Init is a factory function that returns a new GossipSubSpamRecord. It is used to initialize the spam record of - // a peer when the peer is first observed by the local peer. - Init func() p2p.GossipSubSpamRecord + // GetDuplicateMessageCount callback used to get a gauge of the number of duplicate messages detected for each peer. + GetDuplicateMessageCount func(id peer.ID) float64 - // CacheFactory is a factory function that returns a new GossipSubSpamRecordCache. It is used to initialize the spamScoreCache. + // SpamRecordCacheFactory is a factory function that returns a new GossipSubSpamRecordCache. It is used to initialize the spamScoreCache. // The cache is used to store the application specific penalty of peers. - CacheFactory func() p2p.GossipSubSpamRecordCache + SpamRecordCacheFactory func() p2p.GossipSubSpamRecordCache `validate:"required"` + + // AppScoreCacheFactory is a factory function that returns a new GossipSubApplicationSpecificScoreCache. It is used to initialize the appScoreCache. + // The cache is used to store the application specific score of peers. + AppScoreCacheFactory func() p2p.GossipSubApplicationSpecificScoreCache `validate:"required"` + + HeroCacheMetricsFactory metrics.HeroCacheMetricsFactory `validate:"required"` + + NetworkingType network.NetworkingType `validate:"required"` + + // ScoringRegistryStartupSilenceDuration defines the duration of time, after the node startup, + // during which the scoring registry remains inactive before penalizing nodes. + ScoringRegistryStartupSilenceDuration time.Duration + + AppSpecificScoreParams p2pconfig.ApplicationSpecificScoreParameters `validate:"required"` + + DuplicateMessageThreshold float64 `validate:"gt=0"` + + Collector module.GossipSubScoringRegistryMetrics `validate:"required"` } // NewGossipSubAppSpecificScoreRegistry returns a new GossipSubAppSpecificScoreRegistry. // Args: // -// config: the configuration for the registry. +// config: the config for the registry. // // Returns: // // a new GossipSubAppSpecificScoreRegistry. -func NewGossipSubAppSpecificScoreRegistry(config *GossipSubAppSpecificScoreRegistryConfig) *GossipSubAppSpecificScoreRegistry { +// +// error: if the configuration is invalid, an error is returned; any returned error is an irrecoverable error and indicates a bug or misconfiguration. +func NewGossipSubAppSpecificScoreRegistry(config *GossipSubAppSpecificScoreRegistryConfig) (*GossipSubAppSpecificScoreRegistry, error) { + if err := validator.New().Struct(config); err != nil { + return nil, fmt.Errorf("invalid config: %w", err) + } + + lg := config.Logger.With().Str("module", "app_score_registry").Logger() + reg := &GossipSubAppSpecificScoreRegistry{ - logger: config.Logger.With().Str("module", "app_score_registry").Logger(), - spamScoreCache: config.CacheFactory(), - penalty: config.Penalty, - init: config.Init, - validator: config.Validator, - idProvider: config.IdProvider, + logger: config.Logger.With().Str("module", "app_score_registry").Logger(), + getDuplicateMessageCount: config.GetDuplicateMessageCount, + spamScoreCache: config.SpamRecordCacheFactory(), + appScoreCache: config.AppScoreCacheFactory(), + penalty: config.Penalty, + validator: config.Validator, + idProvider: config.IdProvider, + scoreTTL: config.Parameters.ScoreTTL, + silencePeriodDuration: config.ScoringRegistryStartupSilenceDuration, + silencePeriodElapsed: atomic.NewBool(false), + appSpecificScoreParams: config.AppSpecificScoreParams, + duplicateMessageThreshold: config.DuplicateMessageThreshold, + collector: config.Collector, + } + + appSpecificScore := queue.NewHeroStore(config.Parameters.ScoreUpdateRequestQueueSize, + lg.With().Str("component", "app_specific_score_update").Logger(), + metrics.GossipSubAppSpecificScoreUpdateQueueMetricFactory(config.HeroCacheMetricsFactory, config.NetworkingType)) + reg.appScoreUpdateWorkerPool = worker.NewWorkerPoolBuilder[peer.ID](lg.With().Str("component", "app_specific_score_update_worker_pool").Logger(), appSpecificScore, + reg.processAppSpecificScoreUpdateWork).Build() + + invalidCtrlMsgNotificationStore := queue.NewHeroStore(config.Parameters.InvalidControlMessageNotificationQueueSize, + lg.With().Str("component", "invalid_control_message_notification_queue").Logger(), + metrics.RpcInspectorNotificationQueueMetricFactory(config.HeroCacheMetricsFactory, config.NetworkingType), + queue.WithMessageKeyFactory(queue.IdentifierOfMessageWithNonce)) + reg.invCtrlMsgNotifWorkerPool = worker.NewWorkerPoolBuilder[*p2p.InvCtrlMsgNotif](lg, invalidCtrlMsgNotificationStore, reg.handleMisbehaviourReport).Build() + + builder := component.NewComponentManagerBuilder() + builder.AddWorker(func(ctx irrecoverable.SignalerContext, ready component.ReadyFunc) { + reg.logger.Info().Msg("starting subscription validator") + reg.validator.Start(ctx) + select { + case <-ctx.Done(): + reg.logger.Warn().Msg("aborting subscription validator startup, context cancelled") + case <-reg.validator.Ready(): + reg.logger.Info().Msg("subscription validator started") + ready() + reg.logger.Info().Msg("subscription validator is ready") + } + <-ctx.Done() + reg.logger.Info().Msg("stopping subscription validator") + <-reg.validator.Done() + reg.logger.Info().Msg("subscription validator stopped") + }).AddWorker(func(parent irrecoverable.SignalerContext, ready component.ReadyFunc) { + if !reg.silencePeriodStartTime.IsZero() { + parent.Throw(fmt.Errorf("gossipsub scoring registry started more than once")) + } + reg.silencePeriodStartTime = time.Now() + ready() + }).AddWorker(reg.invCtrlMsgNotifWorkerPool.WorkerLogic()) // we must NOT have more than one worker for processing notifications; handling notifications are NOT idempotent. + + for i := 0; i < config.Parameters.ScoreUpdateWorkerNum; i++ { + builder.AddWorker(reg.appScoreUpdateWorkerPool.WorkerLogic()) } - return reg + reg.Component = builder.Build() + + return reg, nil } var _ p2p.GossipSubInvCtrlMsgNotifConsumer = (*GossipSubAppSpecificScoreRegistry)(nil) -// AppSpecificScoreFunc returns the application specific penalty function that is called by the GossipSub protocol to determine the application specific penalty of a peer. +// AppSpecificScoreFunc returns the application specific score function that is called by the GossipSub protocol to determine the application specific score of a peer. +// The application specific score is part of the overall score of a peer, and is used to determine the peer's score based +// This function reads the application specific score of a peer from the cache, and if the penalty is not found in the cache, it computes it. +// If the score is not found in the cache, it is computed and added to the cache. +// Also if the score is expired, it is computed and added to the cache. +// Returns: +// - func(peer.ID) float64: the application specific score function. +// Implementation must be thread-safe. func (r *GossipSubAppSpecificScoreRegistry) AppSpecificScoreFunc() func(peer.ID) float64 { return func(pid peer.ID) float64 { - appSpecificScore := float64(0) + lg := r.logger.With().Str("remote_peer_id", p2plogging.PeerId(pid)).Logger() - lg := r.logger.With().Str("peer_id", pid.String()).Logger() - // (1) spam penalty: the penalty is applied to the application specific penalty when a peer conducts a spamming misbehaviour. - spamRecord, err, spamRecordExists := r.spamScoreCache.Get(pid) - if err != nil { - // the error is considered fatal as it means the cache is not working properly. - // we should not continue with the execution as it may lead to routing attack vulnerability. - r.logger.Fatal().Str("peer_id", pid.String()).Err(err).Msg("could not get application specific penalty for peer") - return appSpecificScore // unreachable, but added to avoid proceeding with the execution if log level is changed. + // during startup silence period avoid penalizing nodes + if !r.afterSilencePeriod() { + lg.Trace().Msg("returning 0 app specific score penalty for node during silence period") + return 0 } - if spamRecordExists { - lg = lg.With().Float64("spam_penalty", spamRecord.Penalty).Logger() - appSpecificScore += spamRecord.Penalty + appSpecificScore, lastUpdated, ok := r.appScoreCache.Get(pid) + switch { + case !ok: + // record not found in the cache, or expired; submit a worker to update it. + submitted := r.appScoreUpdateWorkerPool.Submit(pid) + lg.Trace(). + Bool("worker_submitted", submitted). + Msg("application specific score not found in cache, submitting worker to update it") + return 0 // in the mean time, return 0, which is a neutral score. + case time.Since(lastUpdated) > r.scoreTTL: + // record found in the cache, but expired; submit a worker to update it. + submitted := r.appScoreUpdateWorkerPool.Submit(pid) + lg.Trace(). + Bool("worker_submitted", submitted). + Float64("app_specific_score", appSpecificScore). + Dur("score_ttl", r.scoreTTL). + Msg("application specific score expired, submitting worker to update it") + return appSpecificScore // in the mean time, return the expired score. + default: + // record found in the cache. + r.logger.Trace(). + Float64("app_specific_score", appSpecificScore). + Msg("application specific score found in cache") + return appSpecificScore } + } +} - // (2) staking score: for staked peers, a default positive reward is applied only if the peer has no penalty on spamming and subscription. - // for unknown peers a negative penalty is applied. - stakingScore, flowId, role := r.stakingScore(pid) - if stakingScore < 0 { - lg = lg.With().Float64("staking_penalty", stakingScore).Logger() - // staking penalty is applied right away. - appSpecificScore += stakingScore - } +// computeAppSpecificScore computes the application specific score of a peer. +// The application specific score is computed based on the spam penalty, staking score, and subscription penalty. +// The spam penalty is the penalty applied to the application specific score when a peer conducts a spamming misbehaviour. +// The staking score is the reward/penalty applied to the application specific score when a peer is staked/unstaked. +// The subscription penalty is the penalty applied to the application specific score when a peer is subscribed to a topic that it is not allowed to subscribe to based on its role. +// Args: +// - pid: the peer ID of the peer in the GossipSub protocol. +// Returns: +// - float64: the application specific score of the peer. +func (r *GossipSubAppSpecificScoreRegistry) computeAppSpecificScore(pid peer.ID) float64 { + appSpecificScore := float64(0) - if stakingScore >= 0 { - // (3) subscription penalty: the subscription penalty is applied to the application specific penalty when a - // peer is subscribed to a topic that it is not allowed to subscribe to based on its role. - // Note: subscription penalty can be considered only for staked peers, for non-staked peers, we cannot - // determine the role of the peer. - subscriptionPenalty := r.subscriptionPenalty(pid, flowId, role) - lg = lg.With().Float64("subscription_penalty", subscriptionPenalty).Logger() - if subscriptionPenalty < 0 { - appSpecificScore += subscriptionPenalty - } - } + lg := r.logger.With().Str("peer_id", p2plogging.PeerId(pid)).Logger() + // (1) spam penalty: the penalty is applied to the application specific penalty when a peer conducts a spamming misbehaviour. + spamRecord, err, spamRecordExists := r.spamScoreCache.Get(pid) + if err != nil { + // the error is considered fatal as it means the cache is not working properly. + // we should not continue with the execution as it may lead to routing attack vulnerability. + r.logger.Fatal().Str("peer_id", p2plogging.PeerId(pid)).Err(err).Msg("could not get application specific penalty for peer") + return appSpecificScore // unreachable, but added to avoid proceeding with the execution if log level is changed. + } - // (4) staking reward: for staked peers, a default positive reward is applied only if the peer has no penalty on spamming and subscription. - if stakingScore > 0 && appSpecificScore == float64(0) { - lg = lg.With().Float64("staking_reward", stakingScore).Logger() - appSpecificScore += stakingScore + if spamRecordExists { + lg = lg.With().Float64("spam_penalty", spamRecord.Penalty).Logger() + appSpecificScore += spamRecord.Penalty + } + + // (2) staking score: for staked peers, a default positive reward is applied only if the peer has no penalty on spamming and subscription. + // for unknown peers a negative penalty is applied. + stakingScore, flowId, role := r.stakingScore(pid) + if stakingScore < 0 { + lg = lg.With().Float64("staking_penalty", stakingScore).Logger() + // staking penalty is applied right away. + appSpecificScore += stakingScore + } + + if stakingScore >= 0 { + // (3) subscription penalty: the subscription penalty is applied to the application specific penalty when a + // peer is subscribed to a topic that it is not allowed to subscribe to based on its role. + // Note: subscription penalty can be considered only for staked peers, for non-staked peers, we cannot + // determine the role of the peer. + subscriptionPenalty := r.subscriptionPenalty(pid, flowId, role) + lg = lg.With().Float64("subscription_penalty", subscriptionPenalty).Logger() + if subscriptionPenalty < 0 { + appSpecificScore += subscriptionPenalty } + } - lg.Trace(). - Float64("total_app_specific_score", appSpecificScore). - Msg("application specific penalty computed") + // (4) duplicate messages penalty: the duplicate messages penalty is applied to the application specific penalty as long + // as the number of duplicate messages detected for a peer is greater than 0. This counter is decayed overtime, thus sustained + // good behavior should eventually lead to the duplicate messages penalty applied being 0. + duplicateMessagesPenalty := r.duplicateMessagesPenalty(pid) + if duplicateMessagesPenalty < 0 { + lg = lg.With().Float64("duplicate_messages_penalty", duplicateMessagesPenalty).Logger() + appSpecificScore += duplicateMessagesPenalty + } + + // (5) staking reward: for staked peers, a default positive reward is applied only if the peer has no penalty on spamming and subscription. + if stakingScore > 0 && appSpecificScore == float64(0) { + lg = lg.With().Float64("staking_reward", stakingScore).Logger() + appSpecificScore += stakingScore + } + + lg.Trace(). + Float64("total_app_specific_score", appSpecificScore). + Msg("application specific score computed") + return appSpecificScore +} - return appSpecificScore +// processMisbehaviorReport is the worker function that is called by the worker pool to update the application specific score of a peer. +// The function is called in a non-blocking way, and the worker pool is used to limit the number of concurrent executions of the function. +// Args: +// - pid: the peer ID of the peer in the GossipSub protocol. +// Returns: +// - error: an error if the update failed; any returned error is an irrecoverable error and indicates a bug or misconfiguration. +func (r *GossipSubAppSpecificScoreRegistry) processAppSpecificScoreUpdateWork(p peer.ID) error { + appSpecificScore := r.computeAppSpecificScore(p) + err := r.appScoreCache.AdjustWithInit(p, appSpecificScore, time.Now()) + if err != nil { + // the error is considered fatal as it means the cache is not working properly. + return fmt.Errorf("could not add application specific score %f for peer to cache: %w", appSpecificScore, err) } + r.logger.Trace(). + Str("remote_peer_id", p2plogging.PeerId(p)). + Float64("app_specific_score", appSpecificScore). + Msg("application specific score computed and cache updated") + return nil } func (r *GossipSubAppSpecificScoreRegistry) stakingScore(pid peer.ID) (float64, flow.Identifier, flow.Role) { - lg := r.logger.With().Str("peer_id", pid.String()).Logger() + lg := r.logger.With().Str("peer_id", p2plogging.PeerId(pid)).Logger() // checks if peer has a valid Flow protocol identity. flowId, err := HasValidFlowIdentity(r.idProvider, pid) @@ -197,7 +349,7 @@ func (r *GossipSubAppSpecificScoreRegistry) stakingScore(pid peer.ID) (float64, Err(err). Bool(logging.KeySuspicious, true). Msg("invalid peer identity, penalizing peer") - return DefaultUnknownIdentityPenalty, flow.Identifier{}, 0 + return r.appSpecificScoreParams.UnknownIdentityPenalty, flow.Identifier{}, 0 } lg = lg.With(). @@ -210,79 +362,140 @@ func (r *GossipSubAppSpecificScoreRegistry) stakingScore(pid peer.ID) (float64, if flowId.Role == flow.RoleAccess { lg.Trace(). Msg("pushing access node to edge by penalizing with minimum penalty value") - return MinAppSpecificPenalty, flowId.NodeID, flowId.Role + return r.appSpecificScoreParams.MinAppSpecificPenalty, flowId.NodeID, flowId.Role } lg.Trace(). Msg("rewarding well-behaved non-access node peer with maximum reward value") - return DefaultStakedIdentityReward, flowId.NodeID, flowId.Role + return r.appSpecificScoreParams.StakedIdentityReward, flowId.NodeID, flowId.Role } func (r *GossipSubAppSpecificScoreRegistry) subscriptionPenalty(pid peer.ID, flowId flow.Identifier, role flow.Role) float64 { // checks if peer has any subscription violation. if err := r.validator.CheckSubscribedToAllowedTopics(pid, role); err != nil { - r.logger.Err(err). - Str("peer_id", pid.String()). + r.logger.Warn(). + Err(err). + Str("peer_id", p2plogging.PeerId(pid)). Hex("flow_id", logging.ID(flowId)). Bool(logging.KeySuspicious, true). Msg("invalid subscription detected, penalizing peer") - return DefaultInvalidSubscriptionPenalty + return r.appSpecificScoreParams.InvalidSubscriptionPenalty } return 0 } +// duplicateMessagesPenalty returns the duplicate message penalty for a peer. A penalty is only returned if the duplicate +// message count for a peer exceeds the DefaultDuplicateMessageThreshold. A penalty is applied for the amount of duplicate +// messages above the DefaultDuplicateMessageThreshold. +func (r *GossipSubAppSpecificScoreRegistry) duplicateMessagesPenalty(pid peer.ID) float64 { + duplicateMessageCount, duplicateMessagePenalty := 0.0, 0.0 + defer func() { + r.collector.DuplicateMessagesCounts(duplicateMessageCount) + r.collector.DuplicateMessagePenalties(duplicateMessagePenalty) + }() + + duplicateMessageCount = r.getDuplicateMessageCount(pid) + if duplicateMessageCount > r.duplicateMessageThreshold { + duplicateMessagePenalty = (duplicateMessageCount - r.duplicateMessageThreshold) * r.appSpecificScoreParams.DuplicateMessagePenalty + if duplicateMessagePenalty < r.appSpecificScoreParams.MaxAppSpecificPenalty { + return r.appSpecificScoreParams.MaxAppSpecificPenalty + } + } + return duplicateMessagePenalty +} + // OnInvalidControlMessageNotification is called when a new invalid control message notification is distributed. // Any error on consuming event must handle internally. // The implementation must be concurrency safe, but can be blocking. +// Note: there is no real-time guarantee on processing the notification. func (r *GossipSubAppSpecificScoreRegistry) OnInvalidControlMessageNotification(notification *p2p.InvCtrlMsgNotif) { - // we use mutex to ensure the method is concurrency safe. + lg := r.logger.With().Str("peer_id", p2plogging.PeerId(notification.PeerID)).Logger() + if ok := r.invCtrlMsgNotifWorkerPool.Submit(notification); !ok { + // we use a queue with a fixed size, so this can happen when queue is full or when the notification is duplicate. + // TODO: we have to add a metric for this case. + // TODO: we should not have deduplication for this case, as we need to penalize the peer for each misbehaviour, we need to add a nonce to the notification. + lg.Warn().Msg("gossipsub rpc inspector notification queue is full or notification is duplicate, discarding notification") + } + lg.Trace().Msg("gossipsub rpc inspector notification submitted to the queue") +} +// handleMisbehaviourReport is the worker function that is called by the worker pool to handle the misbehaviour report of a peer. +// The function is called in a non-blocking way, and the worker pool is used to limit the number of concurrent executions of the function. +// Args: +// - notification: the notification of the misbehaviour report of a peer. +// Returns: +// - error: an error if the update failed; any returned error is an irrecoverable error and indicates a bug or misconfiguration. +func (r *GossipSubAppSpecificScoreRegistry) handleMisbehaviourReport(notification *p2p.InvCtrlMsgNotif) error { + // we use mutex to ensure the method is concurrency safe. lg := r.logger.With(). - Str("peer_id", notification.PeerID.String()). + Err(notification.Error). Str("misbehavior_type", notification.MsgType.String()).Logger() - // try initializing the application specific penalty for the peer if it is not yet initialized. - // this is done to avoid the case where the peer is not yet cached and the application specific penalty is not yet initialized. - // initialization is successful only if the peer is not yet cached. - initialized := r.spamScoreCache.Add(notification.PeerID, r.init()) - if initialized { - lg.Trace().Str("peer_id", notification.PeerID.String()).Msg("application specific penalty initialized for peer") + // during startup silence period avoid penalizing nodes, ignore all notifications + if !r.afterSilencePeriod() { + lg.Trace().Msg("ignoring invalid control message notification for peer during silence period") + return nil } - record, err := r.spamScoreCache.Update(notification.PeerID, func(record p2p.GossipSubSpamRecord) p2p.GossipSubSpamRecord { + record, err := r.spamScoreCache.Adjust(notification.PeerID, func(record p2p.GossipSubSpamRecord) p2p.GossipSubSpamRecord { + penalty := 0.0 switch notification.MsgType { - case p2p.CtrlMsgGraft: - record.Penalty += r.penalty.Graft - case p2p.CtrlMsgPrune: - record.Penalty += r.penalty.Prune - case p2p.CtrlMsgIHave: - record.Penalty += r.penalty.IHave - case p2p.CtrlMsgIWant: - record.Penalty += r.penalty.IWant + case p2pmsg.CtrlMsgGraft: + penalty += r.penalty.GraftMisbehaviour + case p2pmsg.CtrlMsgPrune: + penalty += r.penalty.PruneMisbehaviour + case p2pmsg.CtrlMsgIHave: + penalty += r.penalty.IHaveMisbehaviour + case p2pmsg.CtrlMsgIWant: + penalty += r.penalty.IWantMisbehaviour + case p2pmsg.RpcPublishMessage: + penalty += r.penalty.PublishMisbehaviour + case p2pmsg.CtrlMsgRPC: + penalty += r.penalty.PublishMisbehaviour default: // the error is considered fatal as it means that we have an unsupported misbehaviour type, we should crash the node to prevent routing attack vulnerability. lg.Fatal().Str("misbehavior_type", notification.MsgType.String()).Msg("unknown misbehaviour type") } + // reduce penalty for cluster prefixed topics allowing nodes that are potentially behind to catch up + if notification.TopicType == p2p.CtrlMsgTopicTypeClusterPrefixed { + penalty *= r.penalty.ClusterPrefixedReductionFactor + } + + record.Penalty += penalty + return record }) - if err != nil { // any returned error from adjust is non-recoverable and fatal, we crash the node. lg.Fatal().Err(err).Msg("could not adjust application specific penalty for peer") } lg.Debug(). - Float64("app_specific_score", record.Penalty). + Float64("spam_record_penalty", record.Penalty). Msg("applied misbehaviour penalty and updated application specific penalty") + + return nil +} + +// afterSilencePeriod returns true if registry silence period is over, false otherwise. +func (r *GossipSubAppSpecificScoreRegistry) afterSilencePeriod() bool { + if !r.silencePeriodElapsed.Load() { + if time.Since(r.silencePeriodStartTime) > r.silencePeriodDuration { + r.silencePeriodElapsed.Store(true) + return true + } + return false + } + return true } // DefaultDecayFunction is the default decay function that is used to decay the application specific penalty of a peer. // It is used if no decay function is provided in the configuration. // It decays the application specific penalty of a peer if it is negative. -func DefaultDecayFunction() netcache.PreprocessorFunc { +func DefaultDecayFunction(cfg p2pconfig.SpamRecordCacheDecay) netcache.PreprocessorFunc { return func(record p2p.GossipSubSpamRecord, lastUpdated time.Time) (p2p.GossipSubSpamRecord, error) { if record.Penalty >= 0 { // no need to decay the penalty if it is positive, the reason is currently the app specific penalty @@ -291,9 +504,11 @@ func DefaultDecayFunction() netcache.PreprocessorFunc { return record, nil } - if record.Penalty > skipDecayThreshold { + if record.Penalty > cfg.SkipDecayThreshold { // penalty is negative but greater than the threshold, we set it to 0. record.Penalty = 0 + record.Decay = cfg.MaximumSpamPenaltyDecayFactor + record.LastDecayAdjustment = time.Time{} return record, nil } @@ -303,16 +518,27 @@ func DefaultDecayFunction() netcache.PreprocessorFunc { return record, fmt.Errorf("could not decay application specific penalty: %w", err) } record.Penalty = penalty + + if record.Penalty <= cfg.PenaltyDecaySlowdownThreshold { + if time.Since(record.LastDecayAdjustment) > cfg.PenaltyDecayEvaluationPeriod || record.LastDecayAdjustment.IsZero() { + // reduces the decay speed flooring at MinimumSpamRecordDecaySpeed + record.Decay = math.Min(record.Decay+cfg.DecayRateReductionFactor, cfg.MinimumSpamPenaltyDecayFactor) + record.LastDecayAdjustment = time.Now() + } + } return record, nil } } -// InitAppScoreRecordState initializes the gossipsub spam record state for a peer. +// InitAppScoreRecordStateFunc returns a callback that initializes the gossipsub spam record state for a peer. // Returns: -// - a gossipsub spam record with the default decay value and 0 penalty. -func InitAppScoreRecordState() p2p.GossipSubSpamRecord { - return p2p.GossipSubSpamRecord{ - Decay: defaultDecay, - Penalty: 0, +// - a func that returns a gossipsub spam record with the default decay value and 0 penalty. +func InitAppScoreRecordStateFunc(maximumSpamPenaltyDecayFactor float64) func() p2p.GossipSubSpamRecord { + return func() p2p.GossipSubSpamRecord { + return p2p.GossipSubSpamRecord{ + Decay: maximumSpamPenaltyDecayFactor, + Penalty: 0, + LastDecayAdjustment: time.Now(), + } } } diff --git a/network/p2p/scoring/registry_test.go b/network/p2p/scoring/registry_test.go index 186ce7bf6bc..7b8bb9fe3d2 100644 --- a/network/p2p/scoring/registry_test.go +++ b/network/p2p/scoring/registry_test.go @@ -1,289 +1,775 @@ package scoring_test import ( + "context" "fmt" + "io" "math" + "sync" "testing" "time" "github.com/libp2p/go-libp2p/core/peer" + "github.com/rs/zerolog" "github.com/stretchr/testify/assert" testifymock "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" + "go.uber.org/atomic" + "github.com/onflow/flow-go/config" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module/irrecoverable" "github.com/onflow/flow-go/module/metrics" "github.com/onflow/flow-go/module/mock" + "github.com/onflow/flow-go/network" "github.com/onflow/flow-go/network/p2p" netcache "github.com/onflow/flow-go/network/p2p/cache" + p2pconfig "github.com/onflow/flow-go/network/p2p/config" + p2pmsg "github.com/onflow/flow-go/network/p2p/message" mockp2p "github.com/onflow/flow-go/network/p2p/mock" "github.com/onflow/flow-go/network/p2p/scoring" + "github.com/onflow/flow-go/network/p2p/scoring/internal" "github.com/onflow/flow-go/utils/unittest" ) -// TestNoPenaltyRecord tests that if there is no penalty record for a peer id, the app specific score should be the max -// app specific reward. This is the default reward for a staked peer that has valid subscriptions and has not been -// penalized. -func TestNoPenaltyRecord(t *testing.T) { +// TestScoreRegistry_FreshStart tests the app specific score computation of the node when there is no spam record for the peer id upon fresh start of the registry. +// It tests the state that a staked peer with a valid role and valid subscriptions has no spam records; hence it should "eventually" be rewarded with the default reward +// for its GossipSub app specific score. The "eventually" comes from the fact that the app specific score is updated asynchronously in the cache, and the cache is +// updated when the app specific score function is called by GossipSub. +func TestScoreRegistry_FreshStart(t *testing.T) { peerID := peer.ID("peer-1") - reg, spamRecords := newGossipSubAppSpecificScoreRegistry( - t, - withStakedIdentity(peerID), + + cfg, err := config.DefaultConfig() + require.NoError(t, err) + // refresh cached app-specific score every 100 milliseconds to speed up the test. + cfg.NetworkConfig.GossipSub.ScoringParameters.ScoringRegistryParameters.AppSpecificScore.ScoreTTL = 100 * time.Millisecond + + maximumSpamPenaltyDecayFactor := cfg.NetworkConfig.GossipSub.ScoringParameters.ScoringRegistryParameters.SpamRecordCache.Decay.MaximumSpamPenaltyDecayFactor + reg, spamRecords, appScoreCache := newGossipSubAppSpecificScoreRegistry(t, + cfg.NetworkConfig.GossipSub.ScoringParameters, + scoring.InitAppScoreRecordStateFunc(maximumSpamPenaltyDecayFactor), + withStakedIdentities(peerID), withValidSubscriptions(peerID)) + ctx, cancel := context.WithCancel(context.Background()) + signalerCtx := irrecoverable.NewMockSignalerContext(t, ctx) + reg.Start(signalerCtx) + unittest.RequireCloseBefore(t, reg.Ready(), 1*time.Second, "registry did not start in time") - // initially, the spamRecords should not have the peer id. - assert.False(t, spamRecords.Has(peerID)) + defer stopRegistry(t, cancel, reg) - score := reg.AppSpecificScoreFunc()(peerID) - // since the peer id does not have a spam record, the app specific score should be the max app specific reward, which - // is the default reward for a staked peer that has valid subscriptions. - assert.Equal(t, scoring.MaxAppSpecificReward, score) + // initially, the spamRecords should not have the peer id, and there should be no app-specific score in the cache. + require.False(t, spamRecords.Has(peerID)) + score, updated, exists := appScoreCache.Get(peerID) // get the score from the cache. + require.False(t, exists) + require.Equal(t, time.Time{}, updated) + require.Equal(t, float64(0), score) + + maxAppSpecificReward := cfg.NetworkConfig.GossipSub.ScoringParameters.PeerScoring.Protocol.AppSpecificScore.MaxAppSpecificReward + + queryTime := time.Now() + require.Eventually(t, func() bool { + // calling the app specific score function when there is no app specific score in the cache should eventually update the cache. + score := reg.AppSpecificScoreFunc()(peerID) + // since the peer id does not have a spam record, the app specific score should be the max app specific reward, which + // is the default reward for a staked peer that has valid subscriptions. + return score == maxAppSpecificReward + }, 5*time.Second, 100*time.Millisecond) // still the spamRecords should not have the peer id (as there is no spam record for the peer id). - assert.False(t, spamRecords.Has(peerID)) + require.False(t, spamRecords.Has(peerID)) + + // however, the app specific score should be updated in the cache. + score, updated, exists = appScoreCache.Get(peerID) // get the score from the cache. + require.True(t, exists) + require.True(t, updated.After(queryTime)) + require.Equal(t, maxAppSpecificReward, score) + + // stop the registry. + cancel() + unittest.RequireCloseBefore(t, reg.Done(), 1*time.Second, "failed to stop GossipSubAppSpecificScoreRegistry") } -// TestPeerWithSpamRecord tests the app specific penalty computation of the node when there is a spam record for the peer id. -// It tests the state that a staked peer with a valid role and valid subscriptions has spam records. -// Since the peer has spam records, it should be deprived of the default reward for its staked role, and only have the -// penalty value as the app specific score. -func TestPeerWithSpamRecord(t *testing.T) { +// TestScoreRegistry_PeerWithSpamRecord is a test suite designed to assess the app-specific penalty computation +// in a scenario where a peer with a staked identity and valid subscriptions has a spam record. The suite runs multiple +// sub-tests, each targeting a specific type of control message (graft, prune, ihave, iwant, RpcPublishMessage). The focus +// is on the impact of spam records on the app-specific score, specifically how such records negate the default reward +// a staked peer would otherwise receive, leaving only the penalty as the app-specific score. This testing reflects the +// asynchronous nature of app-specific score updates in GossipSub's cache. +func TestScoreRegistry_PeerWithSpamRecord(t *testing.T) { t.Run("graft", func(t *testing.T) { - testPeerWithSpamRecord(t, p2p.CtrlMsgGraft, penaltyValueFixtures().Graft) + testScoreRegistryPeerWithSpamRecord(t, p2pmsg.CtrlMsgGraft, penaltyValueFixtures().GraftMisbehaviour) }) t.Run("prune", func(t *testing.T) { - testPeerWithSpamRecord(t, p2p.CtrlMsgPrune, penaltyValueFixtures().Prune) + testScoreRegistryPeerWithSpamRecord(t, p2pmsg.CtrlMsgPrune, penaltyValueFixtures().PruneMisbehaviour) }) t.Run("ihave", func(t *testing.T) { - testPeerWithSpamRecord(t, p2p.CtrlMsgIHave, penaltyValueFixtures().IHave) + testScoreRegistryPeerWithSpamRecord(t, p2pmsg.CtrlMsgIHave, penaltyValueFixtures().IHaveMisbehaviour) }) t.Run("iwant", func(t *testing.T) { - testPeerWithSpamRecord(t, p2p.CtrlMsgIWant, penaltyValueFixtures().IWant) + testScoreRegistryPeerWithSpamRecord(t, p2pmsg.CtrlMsgIWant, penaltyValueFixtures().IWantMisbehaviour) + }) + t.Run("RpcPublishMessage", func(t *testing.T) { + testScoreRegistryPeerWithSpamRecord(t, p2pmsg.RpcPublishMessage, penaltyValueFixtures().PublishMisbehaviour) }) } -func testPeerWithSpamRecord(t *testing.T, messageType p2p.ControlMessageType, expectedPenalty float64) { +// testScoreRegistryPeerWithSpamRecord conducts an individual test within the TestScoreRegistry_PeerWithSpamRecord suite. +// It evaluates the ScoreRegistry's handling of a staked peer with valid subscriptions when a spam record is present for +// the peer ID. The function simulates the process of starting the registry, recording a misbehavior, and then verifying the +// updates to the spam records and app-specific score cache based on the type of control message received. +// Parameters: +// - t *testing.T: The test context. +// - messageType p2pmsg.ControlMessageType: The type of control message being tested. +// - expectedPenalty float64: The expected penalty value for the given control message type. +// This function specifically tests how the ScoreRegistry updates a peer's app-specific score in response to spam records, +// emphasizing the removal of the default reward for staked peers with valid roles and focusing on the asynchronous update +// mechanism of the app-specific score in the cache. +func testScoreRegistryPeerWithSpamRecord(t *testing.T, messageType p2pmsg.ControlMessageType, expectedPenalty float64) { peerID := peer.ID("peer-1") - reg, spamRecords := newGossipSubAppSpecificScoreRegistry( - t, - withStakedIdentity(peerID), + + cfg, err := config.DefaultConfig() + require.NoError(t, err) + // refresh cached app-specific score every 100 milliseconds to speed up the test. + cfg.NetworkConfig.GossipSub.ScoringParameters.ScoringRegistryParameters.AppSpecificScore.ScoreTTL = 10 * time.Millisecond + + maximumSpamPenaltyDecayFactor := cfg.NetworkConfig.GossipSub.ScoringParameters.ScoringRegistryParameters.SpamRecordCache.Decay.MaximumSpamPenaltyDecayFactor + reg, spamRecords, appScoreCache := newGossipSubAppSpecificScoreRegistry(t, + cfg.NetworkConfig.GossipSub.ScoringParameters, + scoring.InitAppScoreRecordStateFunc(maximumSpamPenaltyDecayFactor), + withStakedIdentities(peerID), withValidSubscriptions(peerID)) + ctx, cancel := context.WithCancel(context.Background()) + signalerCtx := irrecoverable.NewMockSignalerContext(t, ctx) + reg.Start(signalerCtx) + unittest.RequireCloseBefore(t, reg.Ready(), 1*time.Second, "registry did not start in time") - // initially, the spamRecords should not have the peer id. - assert.False(t, spamRecords.Has(peerID)) + defer stopRegistry(t, cancel, reg) - // since the peer id does not have a spam record, the app specific score should be the max app specific reward, which - // is the default reward for a staked peer that has valid subscriptions. - score := reg.AppSpecificScoreFunc()(peerID) - assert.Equal(t, scoring.MaxAppSpecificReward, score) + // initially, the spamRecords should not have the peer id; also the app specific score record should not be in the cache. + require.False(t, spamRecords.Has(peerID)) + score, updated, exists := appScoreCache.Get(peerID) // get the score from the cache. + require.False(t, exists) + require.Equal(t, time.Time{}, updated) + require.Equal(t, float64(0), score) + + scoreOptParameters := cfg.NetworkConfig.GossipSub.ScoringParameters.PeerScoring.Protocol.AppSpecificScore + + // eventually, the app specific score should be updated in the cache. + require.Eventually(t, func() bool { + // calling the app specific score function when there is no app specific score in the cache should eventually update the cache. + score := reg.AppSpecificScoreFunc()(peerID) + // since the peer id does not have a spam record, the app specific score should be the max app specific reward, which + // is the default reward for a staked peer that has valid subscriptions. + return scoreOptParameters.MaxAppSpecificReward == score + }, 5*time.Second, 100*time.Millisecond) // report a misbehavior for the peer id. reg.OnInvalidControlMessageNotification(&p2p.InvCtrlMsgNotif{ PeerID: peerID, MsgType: messageType, - Count: 1, }) - // the penalty should now be updated in the spamRecords - record, err, ok := spamRecords.Get(peerID) // get the record from the spamRecords. - assert.True(t, ok) - assert.NoError(t, err) - assert.Less(t, math.Abs(expectedPenalty-record.Penalty), 10e-3) // penalty should be updated to -10. - assert.Equal(t, scoring.InitAppScoreRecordState().Decay, record.Decay) // decay should be initialized to the initial state. - - // this peer has a spam record, with no subscription penalty. Hence, the app specific score should only be the spam penalty, - // and the peer should be deprived of the default reward for its valid staked role. - score = reg.AppSpecificScoreFunc()(peerID) - assert.Less(t, math.Abs(expectedPenalty-score), 10e-3) + queryTime := time.Now() + require.Eventually(t, func() bool { + // the notification is processed asynchronously, and the penalty should eventually be updated in the spamRecords + record, err, ok := spamRecords.Get(peerID) // get the record from the spamRecords. + if !ok { + return false + } + require.NoError(t, err) + if !unittest.AreNumericallyClose(expectedPenalty, record.Penalty, 10e-2) { + return false + } + require.Equal(t, scoring.InitAppScoreRecordStateFunc(maximumSpamPenaltyDecayFactor)().Decay, record.Decay) // decay should be initialized to the initial state. + + // eventually, the app specific score should be updated in the cache. + // this peer has a spam record, with no subscription penalty. Hence, the app specific score should only be the spam penalty, + // and the peer should be deprived of the default reward for its valid staked role. + // As the app specific score in the cache and spam penalty in the spamRecords are updated at different times, we account for 5% error. + return unittest.AreNumericallyClose(expectedPenalty, reg.AppSpecificScoreFunc()(peerID), 0.05) + }, 5*time.Second, 10*time.Millisecond) + + // the app specific score should now be updated in the cache. + score, updated, exists = appScoreCache.Get(peerID) // get the score from the cache. + require.True(t, exists) + require.True(t, updated.After(queryTime)) + require.True(t, unittest.AreNumericallyClose(expectedPenalty, score, 0.1)) // account for maximum 10% error due to decays and asynchrony. + + // stop the registry. + cancel() + unittest.RequireCloseBefore(t, reg.Done(), 1*time.Second, "failed to stop GossipSubAppSpecificScoreRegistry") } -func TestSpamRecord_With_UnknownIdentity(t *testing.T) { +// TestScoreRegistry_SpamRecordWithUnknownIdentity is a test suite for verifying the behavior of the ScoreRegistry +// when handling spam records associated with unknown identities. It tests various scenarios based on different control +// message types, including graft, prune, ihave, iwant, and RpcPublishMessage. Each sub-test validates the app-specific +// penalty computation and updates to the score registry when a peer with an unknown identity sends these control messages. +func TestScoreRegistry_SpamRecordWithUnknownIdentity(t *testing.T) { t.Run("graft", func(t *testing.T) { - testSpamRecordWithUnknownIdentity(t, p2p.CtrlMsgGraft, penaltyValueFixtures().Graft) + testScoreRegistrySpamRecordWithUnknownIdentity(t, p2pmsg.CtrlMsgGraft, penaltyValueFixtures().GraftMisbehaviour) }) t.Run("prune", func(t *testing.T) { - testSpamRecordWithUnknownIdentity(t, p2p.CtrlMsgPrune, penaltyValueFixtures().Prune) + testScoreRegistrySpamRecordWithUnknownIdentity(t, p2pmsg.CtrlMsgPrune, penaltyValueFixtures().PruneMisbehaviour) }) t.Run("ihave", func(t *testing.T) { - testSpamRecordWithUnknownIdentity(t, p2p.CtrlMsgIHave, penaltyValueFixtures().IHave) + testScoreRegistrySpamRecordWithUnknownIdentity(t, p2pmsg.CtrlMsgIHave, penaltyValueFixtures().IHaveMisbehaviour) }) t.Run("iwant", func(t *testing.T) { - testSpamRecordWithUnknownIdentity(t, p2p.CtrlMsgIWant, penaltyValueFixtures().IWant) + testScoreRegistrySpamRecordWithUnknownIdentity(t, p2pmsg.CtrlMsgIWant, penaltyValueFixtures().IWantMisbehaviour) + }) + t.Run("RpcPublishMessage", func(t *testing.T) { + testScoreRegistrySpamRecordWithUnknownIdentity(t, p2pmsg.RpcPublishMessage, penaltyValueFixtures().PublishMisbehaviour) }) } -// testSpamRecordWithUnknownIdentity tests the app specific penalty computation of the node when there is a spam record for the peer id and -// the peer id has an unknown identity. -func testSpamRecordWithUnknownIdentity(t *testing.T, messageType p2p.ControlMessageType, expectedPenalty float64) { +// testScoreRegistrySpamRecordWithUnknownIdentity tests the app-specific penalty computation of the node when there +// is a spam record for a peer ID with an unknown identity. It examines the functionality of the GossipSubAppSpecificScoreRegistry +// under various conditions, including the initialization state, spam record creation, and the impact of different control message types. +// Parameters: +// - t *testing.T: The testing context. +// - messageType p2pmsg.ControlMessageType: The type of control message being tested. +// - expectedPenalty float64: The expected penalty value for the given control message type. +// The function simulates the process of starting the registry, reporting a misbehavior for the peer ID, and verifying the +// updates to the spam records and app-specific score cache. It ensures that the penalties are correctly computed and applied +// based on the given control message type and the state of the peer ID (unknown identity and spam record presence). +func testScoreRegistrySpamRecordWithUnknownIdentity(t *testing.T, messageType p2pmsg.ControlMessageType, expectedPenalty float64) { peerID := peer.ID("peer-1") - reg, spamRecords := newGossipSubAppSpecificScoreRegistry( - t, + cfg, err := config.DefaultConfig() + require.NoError(t, err) + // refresh cached app-specific score every 100 milliseconds to speed up the test. + cfg.NetworkConfig.GossipSub.ScoringParameters.ScoringRegistryParameters.AppSpecificScore.ScoreTTL = 100 * time.Millisecond + maximumSpamPenaltyDecayFactor := cfg.NetworkConfig.GossipSub.ScoringParameters.ScoringRegistryParameters.SpamRecordCache.Decay.MaximumSpamPenaltyDecayFactor + reg, spamRecords, appScoreCache := newGossipSubAppSpecificScoreRegistry(t, + cfg.NetworkConfig.GossipSub.ScoringParameters, + scoring.InitAppScoreRecordStateFunc(maximumSpamPenaltyDecayFactor), withUnknownIdentity(peerID), withValidSubscriptions(peerID)) + ctx, cancel := context.WithCancel(context.Background()) + signalerCtx := irrecoverable.NewMockSignalerContext(t, ctx) + reg.Start(signalerCtx) + unittest.RequireCloseBefore(t, reg.Ready(), 1*time.Second, "registry did not start in time") - // initially, the spamRecords should not have the peer id. - assert.False(t, spamRecords.Has(peerID)) + defer stopRegistry(t, cancel, reg) - // peer does not have spam record, but has an unknown identity. Hence, the app specific score should be the staking penalty. - score := reg.AppSpecificScoreFunc()(peerID) - require.Equal(t, scoring.DefaultUnknownIdentityPenalty, score) + // initially, the spamRecords should not have the peer id; also the app specific score record should not be in the cache. + require.False(t, spamRecords.Has(peerID)) + score, updated, exists := appScoreCache.Get(peerID) // get the score from the cache. + require.False(t, exists) + require.Equal(t, time.Time{}, updated) + require.Equal(t, float64(0), score) + scoreOptParameters := cfg.NetworkConfig.GossipSub.ScoringParameters.PeerScoring.Protocol.AppSpecificScore + + // eventually the app specific score should be updated in the cache to the penalty value for unknown identity. + require.Eventually(t, func() bool { + // calling the app specific score function when there is no app specific score in the cache should eventually update the cache. + score := reg.AppSpecificScoreFunc()(peerID) + // peer does not have spam record, but has an unknown identity. Hence, the app specific score should be the staking penalty. + return scoreOptParameters.UnknownIdentityPenalty == score + }, 5*time.Second, 100*time.Millisecond) + + // queryTime := time.Now() // report a misbehavior for the peer id. reg.OnInvalidControlMessageNotification(&p2p.InvCtrlMsgNotif{ PeerID: peerID, MsgType: messageType, - Count: 1, }) - // the penalty should now be updated. - record, err, ok := spamRecords.Get(peerID) // get the record from the spamRecords. - assert.True(t, ok) - assert.NoError(t, err) - assert.Less(t, math.Abs(expectedPenalty-record.Penalty), 10e-3) // penalty should be updated to -10, we account for decay. - assert.Equal(t, scoring.InitAppScoreRecordState().Decay, record.Decay) // decay should be initialized to the initial state. + queryTime := time.Now() + require.Eventually(t, func() bool { + // the notification is processed asynchronously, and the penalty should eventually be updated in the spamRecords + record, err, ok := spamRecords.Get(peerID) // get the record from the spamRecords. + if !ok { + return false + } + require.NoError(t, err) + if !unittest.AreNumericallyClose(expectedPenalty, record.Penalty, 10e-2) { + return false + } + require.Equal(t, scoring.InitAppScoreRecordStateFunc(maximumSpamPenaltyDecayFactor)().Decay, record.Decay) // decay should be initialized to the initial state. + + // eventually, the app specific score should be updated in the cache. + // the peer has spam record as well as an unknown identity. Hence, the app specific score should be the spam penalty + // and the staking penalty. + // As the app specific score in the cache and spam penalty in the spamRecords are updated at different times, we account for 5% error. + return unittest.AreNumericallyClose(expectedPenalty+scoreOptParameters.UnknownIdentityPenalty, reg.AppSpecificScoreFunc()(peerID), 0.05) + }, 5*time.Second, 100*time.Millisecond) - // the peer has spam record as well as an unknown identity. Hence, the app specific score should be the spam penalty - // and the staking penalty. - score = reg.AppSpecificScoreFunc()(peerID) - assert.Less(t, math.Abs(expectedPenalty+scoring.DefaultUnknownIdentityPenalty-score), 10e-3) + // the app specific score should now be updated in the cache. + score, updated, exists = appScoreCache.Get(peerID) // get the score from the cache. + require.True(t, exists) + fmt.Println("updated", updated, "queryTime", queryTime) + require.True(t, updated.After(queryTime)) + fmt.Println("score", score, "expected", expectedPenalty+scoreOptParameters.UnknownIdentityPenalty) + unittest.RequireNumericallyClose(t, expectedPenalty+scoreOptParameters.UnknownIdentityPenalty, score, 0.1) // account for maximum 10% error due to decays and asynchrony. + + // stop the registry. + cancel() + unittest.RequireCloseBefore(t, reg.Done(), 1*time.Second, "failed to stop GossipSubAppSpecificScoreRegistry") } -func TestSpamRecord_With_SubscriptionPenalty(t *testing.T) { +// TestScoreRegistry_SpamRecordWithSubscriptionPenalty is a test suite for verifying the behavior of the ScoreRegistry +// in handling spam records associated with invalid subscriptions. It encompasses a series of sub-tests, each focusing on +// a different control message type: graft, prune, ihave, iwant, and RpcPublishMessage. These sub-tests are designed to +// validate the appropriate application of penalties in the ScoreRegistry when a peer with an invalid subscription is involved +// in spam activities, as indicated by these control messages. +func TestScoreRegistry_SpamRecordWithSubscriptionPenalty(t *testing.T) { t.Run("graft", func(t *testing.T) { - testSpamRecordWithSubscriptionPenalty(t, p2p.CtrlMsgGraft, penaltyValueFixtures().Graft) + testScoreRegistrySpamRecordWithSubscriptionPenalty(t, p2pmsg.CtrlMsgGraft, penaltyValueFixtures().GraftMisbehaviour) }) t.Run("prune", func(t *testing.T) { - testSpamRecordWithSubscriptionPenalty(t, p2p.CtrlMsgPrune, penaltyValueFixtures().Prune) + testScoreRegistrySpamRecordWithSubscriptionPenalty(t, p2pmsg.CtrlMsgPrune, penaltyValueFixtures().PruneMisbehaviour) }) t.Run("ihave", func(t *testing.T) { - testSpamRecordWithSubscriptionPenalty(t, p2p.CtrlMsgIHave, penaltyValueFixtures().IHave) + testScoreRegistrySpamRecordWithSubscriptionPenalty(t, p2pmsg.CtrlMsgIHave, penaltyValueFixtures().IHaveMisbehaviour) }) t.Run("iwant", func(t *testing.T) { - testSpamRecordWithSubscriptionPenalty(t, p2p.CtrlMsgIWant, penaltyValueFixtures().IWant) + testScoreRegistrySpamRecordWithSubscriptionPenalty(t, p2pmsg.CtrlMsgIWant, penaltyValueFixtures().IWantMisbehaviour) + }) + t.Run("RpcPublishMessage", func(t *testing.T) { + testScoreRegistrySpamRecordWithSubscriptionPenalty(t, p2pmsg.RpcPublishMessage, penaltyValueFixtures().PublishMisbehaviour) }) } -// testSpamRecordWithUnknownIdentity tests the app specific penalty computation of the node when there is a spam record for the peer id and -// the peer id has an invalid subscription as well. -func testSpamRecordWithSubscriptionPenalty(t *testing.T, messageType p2p.ControlMessageType, expectedPenalty float64) { +// testScoreRegistrySpamRecordWithSubscriptionPenalty tests the application-specific penalty computation in the ScoreRegistry +// when a spam record exists for a peer ID that also has an invalid subscription. The function simulates the process of +// initializing the registry, handling spam records, and updating penalties based on various control message types. +// Parameters: +// - t *testing.T: The testing context. +// - messageType p2pmsg.ControlMessageType: The type of control message being tested. +// - expectedPenalty float64: The expected penalty value for the given control message type. +// The function focuses on evaluating the registry's response to spam activities (as represented by control messages) from a +// peer with invalid subscriptions. It verifies that penalties are accurately computed and applied, taking into account both +// the spam record and the invalid subscription status of the peer. +func testScoreRegistrySpamRecordWithSubscriptionPenalty(t *testing.T, messageType p2pmsg.ControlMessageType, expectedPenalty float64) { peerID := peer.ID("peer-1") - reg, spamRecords := newGossipSubAppSpecificScoreRegistry( - t, - withStakedIdentity(peerID), + cfg, err := config.DefaultConfig() + require.NoError(t, err) + // refresh cached app-specific score every 100 milliseconds to speed up the test. + cfg.NetworkConfig.GossipSub.ScoringParameters.ScoringRegistryParameters.AppSpecificScore.ScoreTTL = 100 * time.Millisecond + maximumSpamPenaltyDecayFactor := cfg.NetworkConfig.GossipSub.ScoringParameters.ScoringRegistryParameters.SpamRecordCache.Decay.MaximumSpamPenaltyDecayFactor + reg, spamRecords, appScoreCache := newGossipSubAppSpecificScoreRegistry(t, + cfg.NetworkConfig.GossipSub.ScoringParameters, + scoring.InitAppScoreRecordStateFunc(maximumSpamPenaltyDecayFactor), + withStakedIdentities(peerID), withInvalidSubscriptions(peerID)) + ctx, cancel := context.WithCancel(context.Background()) + signalerCtx := irrecoverable.NewMockSignalerContext(t, ctx) + reg.Start(signalerCtx) + unittest.RequireCloseBefore(t, reg.Ready(), 1*time.Second, "registry did not start in time") + + defer stopRegistry(t, cancel, reg) + + // initially, the spamRecords should not have the peer id; also the app specific score record should not be in the cache. + require.False(t, spamRecords.Has(peerID)) + score, updated, exists := appScoreCache.Get(peerID) // get the score from the cache. + require.False(t, exists) + require.Equal(t, time.Time{}, updated) + require.Equal(t, float64(0), score) - // initially, the spamRecords should not have the peer id. - assert.False(t, spamRecords.Has(peerID)) + scoreOptParameters := cfg.NetworkConfig.GossipSub.ScoringParameters.PeerScoring.Protocol.AppSpecificScore // peer does not have spam record, but has invalid subscription. Hence, the app specific score should be subscription penalty. - score := reg.AppSpecificScoreFunc()(peerID) - require.Equal(t, scoring.DefaultInvalidSubscriptionPenalty, score) + // eventually the app specific score should be updated in the cache to the penalty value for subscription penalty. + require.Eventually(t, func() bool { + // calling the app specific score function when there is no app specific score in the cache should eventually update the cache. + score := reg.AppSpecificScoreFunc()(peerID) + // peer does not have spam record, but has an invalid subscription penalty. + return scoreOptParameters.InvalidSubscriptionPenalty == score + }, 5*time.Second, 100*time.Millisecond) // report a misbehavior for the peer id. reg.OnInvalidControlMessageNotification(&p2p.InvCtrlMsgNotif{ PeerID: peerID, MsgType: messageType, - Count: 1, }) - // the penalty should now be updated. - record, err, ok := spamRecords.Get(peerID) // get the record from the spamRecords. - assert.True(t, ok) - assert.NoError(t, err) - assert.Less(t, math.Abs(expectedPenalty-record.Penalty), 10e-3) - assert.Equal(t, scoring.InitAppScoreRecordState().Decay, record.Decay) // decay should be initialized to the initial state. + queryTime := time.Now() + require.Eventually(t, func() bool { + // the notification is processed asynchronously, and the penalty should eventually be updated in the spamRecords + record, err, ok := spamRecords.Get(peerID) // get the record from the spamRecords. + if !ok { + return false + } + require.NoError(t, err) + if !unittest.AreNumericallyClose(expectedPenalty, record.Penalty, 10e-2) { + return false + } + require.Equal(t, scoring.InitAppScoreRecordStateFunc(maximumSpamPenaltyDecayFactor)().Decay, record.Decay) // decay should be initialized to the initial state. + + // eventually, the app specific score should be updated in the cache. + // the peer has spam record as well as an unknown identity. Hence, the app specific score should be the spam penalty + // and the staking penalty. + // As the app specific score in the cache and spam penalty in the spamRecords are updated at different times, we account for 5% error. + return unittest.AreNumericallyClose(expectedPenalty+scoreOptParameters.InvalidSubscriptionPenalty, reg.AppSpecificScoreFunc()(peerID), 0.05) + }, 5*time.Second, 100*time.Millisecond) - // the peer has spam record as well as an unknown identity. Hence, the app specific score should be the spam penalty - // and the staking penalty. - score = reg.AppSpecificScoreFunc()(peerID) - assert.Less(t, math.Abs(expectedPenalty+scoring.DefaultInvalidSubscriptionPenalty-score), 10e-3) + // the app specific score should now be updated in the cache. + score, updated, exists = appScoreCache.Get(peerID) // get the score from the cache. + require.True(t, exists) + require.True(t, updated.After(queryTime)) + unittest.RequireNumericallyClose(t, expectedPenalty+scoreOptParameters.InvalidSubscriptionPenalty, score, 0.1) // account for maximum 10% error due to decays and asynchrony. + + // stop the registry. + cancel() + unittest.RequireCloseBefore(t, reg.Done(), 1*time.Second, "failed to stop GossipSubAppSpecificScoreRegistry") +} + +// TestScoreRegistry_SpamRecordWithDuplicateMessagesPenalty is a test suite for verifying the behavior of the ScoreRegistry +// in handling spam records when duplicate messages penalty is applied. It encompasses a series of sub-tests, each focusing on +// a different control message type: graft, prune, ihave, iwant, and RpcPublishMessage. These sub-tests are designed to +// validate the appropriate application of penalties in the ScoreRegistry when a peer has sent duplicate messages. +func TestScoreRegistry_SpamRecordWithDuplicateMessagesPenalty(t *testing.T) { + t.Run("graft", func(t *testing.T) { + testScoreRegistrySpamRecordWithDuplicateMessagesPenalty(t, p2pmsg.CtrlMsgGraft, penaltyValueFixtures().GraftMisbehaviour) + }) + t.Run("prune", func(t *testing.T) { + testScoreRegistrySpamRecordWithDuplicateMessagesPenalty(t, p2pmsg.CtrlMsgPrune, penaltyValueFixtures().PruneMisbehaviour) + }) + t.Run("ihave", func(t *testing.T) { + testScoreRegistrySpamRecordWithDuplicateMessagesPenalty(t, p2pmsg.CtrlMsgIHave, penaltyValueFixtures().IHaveMisbehaviour) + }) + t.Run("iwant", func(t *testing.T) { + testScoreRegistrySpamRecordWithDuplicateMessagesPenalty(t, p2pmsg.CtrlMsgIWant, penaltyValueFixtures().IWantMisbehaviour) + }) + t.Run("RpcPublishMessage", func(t *testing.T) { + testScoreRegistrySpamRecordWithDuplicateMessagesPenalty(t, p2pmsg.RpcPublishMessage, penaltyValueFixtures().PublishMisbehaviour) + }) +} + +// testScoreRegistryPeerDuplicateMessagesPenalty conducts an individual test within the TestScoreRegistry_SpamRecordWithDuplicateMessagesPenalty suite. +// It evaluates the ScoreRegistry's handling of a staked peer with valid subscriptions and when a record is present for +// the peer ID, and the peer has sent some duplicate messages. The function simulates the process of starting the registry, recording a misbehavior, receiving duplicate messages tracked via +// the mesh tracer duplicate messages tracker, and then verifying the expected app specific score. +// Parameters: +// - t *testing.T: The test context. +// - messageType p2pmsg.ControlMessageType: The type of control message being tested. +// - expectedPenalty float64: The expected penalty value for the given control message type. +// The function focuses on evaluating the registry's response to spam activities (as represented by control messages) from a +// peer that has sent duplicate messages. It verifies that penalties are accurately computed and applied, taking into account both +// the spam record and the duplicate message's penalty. +func testScoreRegistrySpamRecordWithDuplicateMessagesPenalty(t *testing.T, messageType p2pmsg.ControlMessageType, expectedPenalty float64) { + peerID := unittest.PeerIdFixture(t) + cfg, err := config.DefaultConfig() + require.NoError(t, err) + // refresh cached app-specific score every 100 milliseconds to speed up the test. + cfg.NetworkConfig.GossipSub.ScoringParameters.ScoringRegistryParameters.AppSpecificScore.ScoreTTL = 10 * time.Millisecond + duplicateMessageThreshold := cfg.NetworkConfig.GossipSub.ScoringParameters.PeerScoring.Protocol.AppSpecificScore.DuplicateMessageThreshold + duplicateMessagePenalty := cfg.NetworkConfig.GossipSub.ScoringParameters.PeerScoring.Protocol.AppSpecificScore.DuplicateMessagePenalty + maximumSpamPenaltyDecayFactor := cfg.NetworkConfig.GossipSub.ScoringParameters.ScoringRegistryParameters.SpamRecordCache.Decay.MaximumSpamPenaltyDecayFactor + duplicateMessagesCount := 10000.0 + reg, spamRecords, appScoreCache := newGossipSubAppSpecificScoreRegistry(t, cfg.NetworkConfig.GossipSub.ScoringParameters, + scoring.InitAppScoreRecordStateFunc(maximumSpamPenaltyDecayFactor), + withStakedIdentities(peerID), + withValidSubscriptions(peerID), + func(registryConfig *scoring.GossipSubAppSpecificScoreRegistryConfig) { + registryConfig.GetDuplicateMessageCount = func(_ peer.ID) float64 { + // we add the duplicate message threshold so that penalization is triggered + return duplicateMessagesCount + duplicateMessageThreshold + } + }) + + // starts the registry. + ctx, cancel := context.WithCancel(context.Background()) + signalerCtx := irrecoverable.NewMockSignalerContext(t, ctx) + reg.Start(signalerCtx) + unittest.RequireCloseBefore(t, reg.Ready(), 1*time.Second, "failed to start GossipSubAppSpecificScoreRegistry") + + // initially, the spamRecords should not have the peer id; also the app specific score record should not be in the cache. + require.False(t, spamRecords.Has(peerID)) + score, updated, exists := appScoreCache.Get(peerID) // get the score from the cache. + require.False(t, exists) + require.Equal(t, time.Time{}, updated) + require.Equal(t, float64(0), score) + + expectedDuplicateMessagesPenalty := duplicateMessagesCount * duplicateMessagePenalty + // eventually, the app specific score should be updated in the cache. + require.Eventually(t, func() bool { + // calling the app specific score function when there is no app specific score in the cache should eventually update the cache. + score := reg.AppSpecificScoreFunc()(peerID) + // since the peer id does no other penalties the score is eventually expected to be the expected penalty for 10000 duplicate messages + return score == expectedDuplicateMessagesPenalty + }, 5*time.Second, 100*time.Millisecond) + + // report a misbehavior for the peer id. + reg.OnInvalidControlMessageNotification(&p2p.InvCtrlMsgNotif{ + PeerID: peerID, + MsgType: messageType, + }) + + queryTime := time.Now() + require.Eventually(t, func() bool { + // the notification is processed asynchronously, and the penalty should eventually be updated in the spamRecords + record, err, ok := spamRecords.Get(peerID) // get the record from the spamRecords. + if !ok { + return false + } + require.NoError(t, err) + if !unittest.AreNumericallyClose(expectedPenalty, record.Penalty, 10e-2) { + return false + } + require.Equal(t, scoring.InitAppScoreRecordStateFunc(maximumSpamPenaltyDecayFactor)().Decay, record.Decay) // decay should be initialized to the initial state. + + // eventually, the app specific score should be updated in the cache. + // As the app specific score in the cache and spam penalty in the spamRecords are updated at different times, we account for 5% error. + return unittest.AreNumericallyClose(expectedPenalty+expectedDuplicateMessagesPenalty, reg.AppSpecificScoreFunc()(peerID), 0.05) + }, 5*time.Second, 100*time.Millisecond) + + // the app specific score should now be updated in the cache. + score, updated, exists = appScoreCache.Get(peerID) // get the score from the cache. + require.True(t, exists) + require.True(t, updated.After(queryTime)) + unittest.RequireNumericallyClose(t, expectedPenalty+expectedDuplicateMessagesPenalty, score, 0.1) // account for maximum 10% error due to decays and asynchrony. + + // stop the registry. + cancel() + unittest.RequireCloseBefore(t, reg.Done(), 1*time.Second, "failed to stop GossipSubAppSpecificScoreRegistry") +} + +// TestScoreRegistry_SpamRecordWithoutDuplicateMessagesPenalty is a test suite for verifying the behavior of the ScoreRegistry +// in handling spam records when duplicate messages exist but do not exceed the scoring.DefaultDuplicateMessageThreshold no penalty is applied. +// It encompasses a series of sub-tests, each focusing on a different control message type: graft, prune, ihave, iwant, and RpcPublishMessage. These sub-tests are designed to +// validate the appropriate application of penalties in the ScoreRegistry when a peer has sent duplicate messages. +func TestScoreRegistry_SpamRecordWithoutDuplicateMessagesPenalty(t *testing.T) { + t.Run("graft", func(t *testing.T) { + testScoreRegistrySpamRecordWithoutDuplicateMessagesPenalty(t, p2pmsg.CtrlMsgGraft, penaltyValueFixtures().GraftMisbehaviour) + }) + t.Run("prune", func(t *testing.T) { + testScoreRegistrySpamRecordWithoutDuplicateMessagesPenalty(t, p2pmsg.CtrlMsgPrune, penaltyValueFixtures().PruneMisbehaviour) + }) + t.Run("ihave", func(t *testing.T) { + testScoreRegistrySpamRecordWithoutDuplicateMessagesPenalty(t, p2pmsg.CtrlMsgIHave, penaltyValueFixtures().IHaveMisbehaviour) + }) + t.Run("iwant", func(t *testing.T) { + testScoreRegistrySpamRecordWithoutDuplicateMessagesPenalty(t, p2pmsg.CtrlMsgIWant, penaltyValueFixtures().IWantMisbehaviour) + }) + t.Run("RpcPublishMessage", func(t *testing.T) { + testScoreRegistrySpamRecordWithoutDuplicateMessagesPenalty(t, p2pmsg.RpcPublishMessage, penaltyValueFixtures().PublishMisbehaviour) + }) +} + +// testScoreRegistrySpamRecordWithoutDuplicateMessagesPenalty conducts an individual test within the TestScoreRegistry_SpamRecordWithoutDuplicateMessagesPenalty suite. +// It evaluates the ScoreRegistry's handling of a staked peer with valid subscriptions and when a record is present for +// the peer ID, and the peer has sent some duplicate messages. The function simulates the process of starting the registry, recording a misbehavior, receiving duplicate messages tracked via +// the mesh tracer duplicate messages tracker, and then verifying the expected app specific score. +// Parameters: +// - t *testing.T: The test context. +// - messageType p2pmsg.ControlMessageType: The type of control message being tested. +// The function focuses on evaluating the registry's response to spam activities (as represented by control messages) from a +// peer that has sent duplicate messages. It verifies that duplicate message penalty is not applied if the duplicate message count for a peer +// does not exceed scoring.DefaultDuplicateMessageThreshold. +func testScoreRegistrySpamRecordWithoutDuplicateMessagesPenalty(t *testing.T, messageType p2pmsg.ControlMessageType, expectedPenalty float64) { + peerID := unittest.PeerIdFixture(t) + cfg, err := config.DefaultConfig() + require.NoError(t, err) + // refresh cached app-specific score every 100 milliseconds to speed up the test. + cfg.NetworkConfig.GossipSub.ScoringParameters.ScoringRegistryParameters.AppSpecificScore.ScoreTTL = 10 * time.Millisecond + duplicateMessageThreshold := cfg.NetworkConfig.GossipSub.ScoringParameters.PeerScoring.Protocol.AppSpecificScore.DuplicateMessagePenalty + maximumSpamPenaltyDecayFactor := cfg.NetworkConfig.GossipSub.ScoringParameters.ScoringRegistryParameters.SpamRecordCache.Decay.MaximumSpamPenaltyDecayFactor + reg, spamRecords, appScoreCache := newGossipSubAppSpecificScoreRegistry(t, cfg.NetworkConfig.GossipSub.ScoringParameters, + scoring.InitAppScoreRecordStateFunc(maximumSpamPenaltyDecayFactor), + withStakedIdentities(peerID), + withValidSubscriptions(peerID), + func(registryConfig *scoring.GossipSubAppSpecificScoreRegistryConfig) { + registryConfig.GetDuplicateMessageCount = func(_ peer.ID) float64 { + // duplicate message count never exceeds scoring.DefaultDuplicateMessageThreshold so a penalty should never be applied + return duplicateMessageThreshold - 1 + } + }) + + // starts the registry. + ctx, cancel := context.WithCancel(context.Background()) + signalerCtx := irrecoverable.NewMockSignalerContext(t, ctx) + reg.Start(signalerCtx) + unittest.RequireCloseBefore(t, reg.Ready(), 1*time.Second, "failed to start GossipSubAppSpecificScoreRegistry") + + // initially, the spamRecords should not have the peer id; also the app specific score record should not be in the cache. + require.False(t, spamRecords.Has(peerID)) + score, updated, exists := appScoreCache.Get(peerID) // get the score from the cache. + require.False(t, exists) + require.Equal(t, time.Time{}, updated) + require.Equal(t, float64(0), score) + + // initial score will be 0 subsequent calls to get app specific score + // should reward the peer with the scoring.MaxAppSpecificPenalty for not having any spam record, staking, or subscription penalties + require.Eventually(t, func() bool { + // calling the app specific score function when there is no app specific score in the cache should eventually update the cache. + score := reg.AppSpecificScoreFunc()(peerID) + // since the peer id does no other penalties the score is eventually expected to be the max app specific reward + return score == cfg.NetworkConfig.GossipSub.ScoringParameters.PeerScoring.Protocol.AppSpecificScore.MaxAppSpecificReward + }, 1*time.Second, 10*time.Millisecond) + + // app specific score should not be effected by duplicate messages count + require.Never(t, func() bool { + score := reg.AppSpecificScoreFunc()(peerID) + return score != cfg.NetworkConfig.GossipSub.ScoringParameters.PeerScoring.Protocol.AppSpecificScore.MaxAppSpecificReward + }, 5*time.Second, 10*time.Millisecond) + + // report a misbehavior for the peer id. + reg.OnInvalidControlMessageNotification(&p2p.InvCtrlMsgNotif{ + PeerID: peerID, + MsgType: messageType, + }) + + require.Eventually(t, func() bool { + // the notification is processed asynchronously, and the penalty should eventually be updated in the spamRecords + record, err, ok := spamRecords.Get(peerID) // get the record from the spamRecords. + if !ok { + return false + } + require.NoError(t, err) + if !unittest.AreNumericallyClose(expectedPenalty, record.Penalty, 10e-2) { + return false + } + require.Equal(t, scoring.InitAppScoreRecordStateFunc(maximumSpamPenaltyDecayFactor)().Decay, record.Decay) // decay should be initialized to the initial state. + + return true + }, 5*time.Second, 10*time.Millisecond) + + queryTime := time.Now() + // eventually, the app specific score should be updated in the cache. + require.Eventually(t, func() bool { + score := reg.AppSpecificScoreFunc()(peerID) + return unittest.AreNumericallyClose(expectedPenalty, score, 0.2) + }, 5*time.Second, 10*time.Millisecond) + + // the app specific score should now be updated in the cache. + score, updated, exists = appScoreCache.Get(peerID) // get the score from the cache. + require.True(t, exists) + require.True(t, updated.After(queryTime)) + unittest.RequireNumericallyClose(t, expectedPenalty, score, 0.01) + + // stop the registry. + cancel() + unittest.RequireCloseBefore(t, reg.Done(), 1*time.Second, "failed to stop GossipSubAppSpecificScoreRegistry") } // TestSpamPenaltyDecaysInCache tests that the spam penalty records decay over time in the cache. -func TestSpamPenaltyDecaysInCache(t *testing.T) { +func TestScoreRegistry_SpamPenaltyDecaysInCache(t *testing.T) { peerID := peer.ID("peer-1") - reg, _ := newGossipSubAppSpecificScoreRegistry(t, - withStakedIdentity(peerID), + cfg, err := config.DefaultConfig() + require.NoError(t, err) + // refresh cached app-specific score every 100 milliseconds to speed up the test. + cfg.NetworkConfig.GossipSub.ScoringParameters.ScoringRegistryParameters.AppSpecificScore.ScoreTTL = 100 * time.Millisecond + maximumSpamPenaltyDecayFactor := cfg.NetworkConfig.GossipSub.ScoringParameters.ScoringRegistryParameters.SpamRecordCache.Decay.MaximumSpamPenaltyDecayFactor + reg, _, _ := newGossipSubAppSpecificScoreRegistry(t, + cfg.NetworkConfig.GossipSub.ScoringParameters, + scoring.InitAppScoreRecordStateFunc(maximumSpamPenaltyDecayFactor), + withStakedIdentities(peerID), withValidSubscriptions(peerID)) + ctx, cancel := context.WithCancel(context.Background()) + signalerCtx := irrecoverable.NewMockSignalerContext(t, ctx) + reg.Start(signalerCtx) + unittest.RequireCloseBefore(t, reg.Ready(), 1*time.Second, "registry did not start in time") + + defer stopRegistry(t, cancel, reg) // report a misbehavior for the peer id. reg.OnInvalidControlMessageNotification(&p2p.InvCtrlMsgNotif{ PeerID: peerID, - MsgType: p2p.CtrlMsgPrune, - Count: 1, + MsgType: p2pmsg.CtrlMsgPrune, + }) + + time.Sleep(1 * time.Second) // wait for the penalty to decay. + + reg.OnInvalidControlMessageNotification(&p2p.InvCtrlMsgNotif{ + PeerID: peerID, + MsgType: p2pmsg.CtrlMsgGraft, }) time.Sleep(1 * time.Second) // wait for the penalty to decay. reg.OnInvalidControlMessageNotification(&p2p.InvCtrlMsgNotif{ PeerID: peerID, - MsgType: p2p.CtrlMsgGraft, - Count: 1, + MsgType: p2pmsg.CtrlMsgIHave, }) time.Sleep(1 * time.Second) // wait for the penalty to decay. reg.OnInvalidControlMessageNotification(&p2p.InvCtrlMsgNotif{ PeerID: peerID, - MsgType: p2p.CtrlMsgIHave, - Count: 1, + MsgType: p2pmsg.CtrlMsgIWant, }) time.Sleep(1 * time.Second) // wait for the penalty to decay. reg.OnInvalidControlMessageNotification(&p2p.InvCtrlMsgNotif{ PeerID: peerID, - MsgType: p2p.CtrlMsgIWant, - Count: 1, + MsgType: p2pmsg.RpcPublishMessage, }) time.Sleep(1 * time.Second) // wait for the penalty to decay. - // when the app specific penalty function is called for the first time, the decay functionality should be kicked in - // the cache, and the penalty should be updated. Note that since the penalty values are negative, the default staked identity - // reward is not applied. Hence, the penalty is only comprised of the penalties. - score := reg.AppSpecificScoreFunc()(peerID) // the upper bound is the sum of the penalties without decay. - scoreUpperBound := penaltyValueFixtures().Prune + - penaltyValueFixtures().Graft + - penaltyValueFixtures().IHave + - penaltyValueFixtures().IWant + scoreUpperBound := penaltyValueFixtures().PruneMisbehaviour + + penaltyValueFixtures().GraftMisbehaviour + + penaltyValueFixtures().IHaveMisbehaviour + + penaltyValueFixtures().IWantMisbehaviour + + penaltyValueFixtures().PublishMisbehaviour // the lower bound is the sum of the penalties with decay assuming the decay is applied 4 times to the sum of the penalties. // in reality, the decay is applied 4 times to the first penalty, then 3 times to the second penalty, and so on. - scoreLowerBound := scoreUpperBound * math.Pow(scoring.InitAppScoreRecordState().Decay, 4) + r := scoring.InitAppScoreRecordStateFunc(maximumSpamPenaltyDecayFactor)() + scoreLowerBound := scoreUpperBound * math.Pow(r.Decay, 4) - // with decay, the penalty should be between the upper and lower bounds. - assert.Greater(t, score, scoreUpperBound) - assert.Less(t, score, scoreLowerBound) + // eventually, the app specific score should be updated in the cache. + require.Eventually(t, func() bool { + // when the app specific penalty function is called for the first time, the decay functionality should be kicked in + // the cache, and the penalty should be updated. Note that since the penalty values are negative, the default staked identity + // reward is not applied. Hence, the penalty is only comprised of the penalties. + score := reg.AppSpecificScoreFunc()(peerID) + // with decay, the penalty should be between the upper and lower bounds. + return score > scoreUpperBound && score < scoreLowerBound + }, 5*time.Second, 100*time.Millisecond) + + // stop the registry. + cancel() + unittest.RequireCloseBefore(t, reg.Done(), 1*time.Second, "failed to stop GossipSubAppSpecificScoreRegistry") } // TestSpamPenaltyDecayToZero tests that the spam penalty decays to zero over time, and when the spam penalty of // a peer is set back to zero, its app specific penalty is also reset to the initial state. -func TestSpamPenaltyDecayToZero(t *testing.T) { +func TestScoreRegistry_SpamPenaltyDecayToZero(t *testing.T) { peerID := peer.ID("peer-1") - reg, spamRecords := newGossipSubAppSpecificScoreRegistry( - t, - withStakedIdentity(peerID), - withValidSubscriptions(peerID), - withInitFunction(func() p2p.GossipSubSpamRecord { + cfg, err := config.DefaultConfig() + require.NoError(t, err) + // refresh cached app-specific score every 100 milliseconds to speed up the test. + cfg.NetworkConfig.GossipSub.ScoringParameters.ScoringRegistryParameters.AppSpecificScore.ScoreTTL = 100 * time.Millisecond + + reg, spamRecords, _ := newGossipSubAppSpecificScoreRegistry(t, + cfg.NetworkConfig.GossipSub.ScoringParameters, + func() p2p.GossipSubSpamRecord { return p2p.GossipSubSpamRecord{ Decay: 0.02, // we choose a small decay value to speed up the test. Penalty: 0, } - })) + }, + withStakedIdentities(peerID), + withValidSubscriptions(peerID)) + + // starts the registry. + ctx, cancel := context.WithCancel(context.Background()) + signalerCtx := irrecoverable.NewMockSignalerContext(t, ctx) + reg.Start(signalerCtx) + unittest.RequireCloseBefore(t, reg.Ready(), 1*time.Second, "registry did not start in time") + + defer stopRegistry(t, cancel, reg) + + scoreOptParameters := cfg.NetworkConfig.GossipSub.ScoringParameters.PeerScoring.Protocol.AppSpecificScore // report a misbehavior for the peer id. reg.OnInvalidControlMessageNotification(&p2p.InvCtrlMsgNotif{ PeerID: peerID, - MsgType: p2p.CtrlMsgGraft, - Count: 1, + MsgType: p2pmsg.CtrlMsgGraft, }) // decays happen every second, so we wait for 1 second to make sure the penalty is updated. time.Sleep(1 * time.Second) // the penalty should now be updated, it should be still negative but greater than the penalty value (due to decay). - score := reg.AppSpecificScoreFunc()(peerID) - require.Less(t, score, float64(0)) // the penalty should be less than zero. - require.Greater(t, score, penaltyValueFixtures().Graft) // the penalty should be less than the penalty value due to decay. + require.Eventually(t, func() bool { + score := reg.AppSpecificScoreFunc()(peerID) + // the penalty should be less than zero and greater than the penalty value (due to decay). + return score < 0 && score > penaltyValueFixtures().GraftMisbehaviour + }, 5*time.Second, 100*time.Millisecond) require.Eventually(t, func() bool { // the spam penalty should eventually decay to zero. @@ -293,7 +779,7 @@ func TestSpamPenaltyDecayToZero(t *testing.T) { require.Eventually(t, func() bool { // when the spam penalty is decayed to zero, the app specific penalty of the node should reset back to default staking reward. - return reg.AppSpecificScoreFunc()(peerID) == scoring.DefaultStakedIdentityReward + return reg.AppSpecificScoreFunc()(peerID) == scoreOptParameters.StakedIdentityReward }, 5*time.Second, 100*time.Millisecond) // the penalty should now be zero. @@ -301,42 +787,66 @@ func TestSpamPenaltyDecayToZero(t *testing.T) { assert.True(t, ok) assert.NoError(t, err) assert.Equal(t, 0.0, record.Penalty) // penalty should be zero. + + // stop the registry. + cancel() + unittest.RequireCloseBefore(t, reg.Done(), 1*time.Second, "failed to stop GossipSubAppSpecificScoreRegistry") } // TestPersistingUnknownIdentityPenalty tests that even though the spam penalty is decayed to zero, the unknown identity penalty // is persisted. This is because the unknown identity penalty is not decayed. -func TestPersistingUnknownIdentityPenalty(t *testing.T) { +func TestScoreRegistry_PersistingUnknownIdentityPenalty(t *testing.T) { peerID := peer.ID("peer-1") - reg, spamRecords := newGossipSubAppSpecificScoreRegistry( - t, - withUnknownIdentity(peerID), // the peer id has an unknown identity. - withValidSubscriptions(peerID), - withInitFunction(func() p2p.GossipSubSpamRecord { + + cfg, err := config.DefaultConfig() + require.NoError(t, err) + // refresh cached app-specific score every 100 milliseconds to speed up the test. + cfg.NetworkConfig.GossipSub.ScoringParameters.ScoringRegistryParameters.AppSpecificScore.ScoreTTL = 100 * time.Millisecond + + reg, spamRecords, _ := newGossipSubAppSpecificScoreRegistry(t, + cfg.NetworkConfig.GossipSub.ScoringParameters, + func() p2p.GossipSubSpamRecord { return p2p.GossipSubSpamRecord{ Decay: 0.02, // we choose a small decay value to speed up the test. Penalty: 0, } - })) + }, + withUnknownIdentity(peerID), // the peer id has an unknown identity. + withValidSubscriptions(peerID)) + + // starts the registry. + ctx, cancel := context.WithCancel(context.Background()) + signalerCtx := irrecoverable.NewMockSignalerContext(t, ctx) + reg.Start(signalerCtx) + unittest.RequireCloseBefore(t, reg.Ready(), 1*time.Second, "registry did not start in time") + + defer stopRegistry(t, cancel, reg) + + scoreOptParameters := cfg.NetworkConfig.GossipSub.ScoringParameters.PeerScoring.Protocol.AppSpecificScore // initially, the app specific score should be the default unknown identity penalty. - require.Equal(t, scoring.DefaultUnknownIdentityPenalty, reg.AppSpecificScoreFunc()(peerID)) + require.Eventually(t, func() bool { + score := reg.AppSpecificScoreFunc()(peerID) + return score == scoreOptParameters.UnknownIdentityPenalty + }, 5*time.Second, 100*time.Millisecond) // report a misbehavior for the peer id. reg.OnInvalidControlMessageNotification(&p2p.InvCtrlMsgNotif{ PeerID: peerID, - MsgType: p2p.CtrlMsgGraft, - Count: 1, + MsgType: p2pmsg.CtrlMsgGraft, }) - // with reported spam, the app specific score should be the default unknown identity + the spam penalty. - require.Less(t, math.Abs(scoring.DefaultUnknownIdentityPenalty+penaltyValueFixtures().Graft-reg.AppSpecificScoreFunc()(peerID)), 10e-3) - // decays happen every second, so we wait for 1 second to make sure the penalty is updated. time.Sleep(1 * time.Second) + // the penalty should now be updated, it should be still negative but greater than the penalty value (due to decay). - score := reg.AppSpecificScoreFunc()(peerID) - require.Less(t, score, float64(0)) // the penalty should be less than zero. - require.Greater(t, score, penaltyValueFixtures().Graft+scoring.DefaultUnknownIdentityPenalty) // the penalty should be less than the penalty value due to decay. + require.Eventually(t, func() bool { + score := reg.AppSpecificScoreFunc()(peerID) + // Ideally, the score should be the sum of the default invalid subscription penalty and the graft penalty, however, + // due to exponential decay of the spam penalty and asynchronous update the app specific score; score should be in the range of [scoring. + // (scoring.DefaultUnknownIdentityPenalty+penaltyValueFixtures().GraftMisbehaviour, scoring.DefaultUnknownIdentityPenalty). + return score < scoreOptParameters.UnknownIdentityPenalty && score > scoreOptParameters.UnknownIdentityPenalty+penaltyValueFixtures().GraftMisbehaviour + }, 5*time.Second, 100*time.Millisecond) require.Eventually(t, func() bool { // the spam penalty should eventually decay to zero. @@ -346,7 +856,7 @@ func TestPersistingUnknownIdentityPenalty(t *testing.T) { require.Eventually(t, func() bool { // when the spam penalty is decayed to zero, the app specific penalty of the node should only contain the unknown identity penalty. - return reg.AppSpecificScoreFunc()(peerID) == scoring.DefaultUnknownIdentityPenalty + return reg.AppSpecificScoreFunc()(peerID) == scoreOptParameters.UnknownIdentityPenalty }, 5*time.Second, 100*time.Millisecond) // the spam penalty should now be zero in spamRecords. @@ -354,42 +864,61 @@ func TestPersistingUnknownIdentityPenalty(t *testing.T) { assert.True(t, ok) assert.NoError(t, err) assert.Equal(t, 0.0, record.Penalty) // penalty should be zero. + + // stop the registry. + cancel() + unittest.RequireCloseBefore(t, reg.Done(), 1*time.Second, "failed to stop GossipSubAppSpecificScoreRegistry") } // TestPersistingInvalidSubscriptionPenalty tests that even though the spam penalty is decayed to zero, the invalid subscription penalty // is persisted. This is because the invalid subscription penalty is not decayed. -func TestPersistingInvalidSubscriptionPenalty(t *testing.T) { +func TestScoreRegistry_PersistingInvalidSubscriptionPenalty(t *testing.T) { peerID := peer.ID("peer-1") - reg, spamRecords := newGossipSubAppSpecificScoreRegistry( - t, - withStakedIdentity(peerID), - withInvalidSubscriptions(peerID), // the peer id has an invalid subscription. - withInitFunction(func() p2p.GossipSubSpamRecord { + + cfg, err := config.DefaultConfig() + require.NoError(t, err) + // refresh cached app-specific score every 100 milliseconds to speed up the test. + cfg.NetworkConfig.GossipSub.ScoringParameters.ScoringRegistryParameters.AppSpecificScore.ScoreTTL = 100 * time.Millisecond + + reg, spamRecords, _ := newGossipSubAppSpecificScoreRegistry(t, + cfg.NetworkConfig.GossipSub.ScoringParameters, + func() p2p.GossipSubSpamRecord { return p2p.GossipSubSpamRecord{ Decay: 0.02, // we choose a small decay value to speed up the test. Penalty: 0, } - })) + }, + withStakedIdentities(peerID), + withInvalidSubscriptions(peerID)) // the peer id has an invalid subscription + + // starts the registry. + ctx, cancel := context.WithCancel(context.Background()) + signalerCtx := irrecoverable.NewMockSignalerContext(t, ctx) + reg.Start(signalerCtx) + unittest.RequireCloseBefore(t, reg.Ready(), 1*time.Second, "failed to start GossipSubAppSpecificScoreRegistry") + + scoreOptParameters := cfg.NetworkConfig.GossipSub.ScoringParameters.PeerScoring.Protocol.AppSpecificScore // initially, the app specific score should be the default invalid subscription penalty. - require.Equal(t, scoring.DefaultUnknownIdentityPenalty, reg.AppSpecificScoreFunc()(peerID)) + require.Eventually(t, func() bool { + score := reg.AppSpecificScoreFunc()(peerID) + return score == scoreOptParameters.InvalidSubscriptionPenalty + }, 5*time.Second, 100*time.Millisecond) // report a misbehavior for the peer id. reg.OnInvalidControlMessageNotification(&p2p.InvCtrlMsgNotif{ PeerID: peerID, - MsgType: p2p.CtrlMsgGraft, - Count: 1, + MsgType: p2pmsg.CtrlMsgGraft, }) // with reported spam, the app specific score should be the default invalid subscription penalty + the spam penalty. - require.Less(t, math.Abs(scoring.DefaultInvalidSubscriptionPenalty+penaltyValueFixtures().Graft-reg.AppSpecificScoreFunc()(peerID)), 10e-3) - - // decays happen every second, so we wait for 1 second to make sure the penalty is updated. - time.Sleep(1 * time.Second) - // the penalty should now be updated, it should be still negative but greater than the penalty value (due to decay). - score := reg.AppSpecificScoreFunc()(peerID) - require.Less(t, score, float64(0)) // the penalty should be less than zero. - require.Greater(t, score, penaltyValueFixtures().Graft+scoring.DefaultInvalidSubscriptionPenalty) // the penalty should be less than the penalty value due to decay. + require.Eventually(t, func() bool { + score := reg.AppSpecificScoreFunc()(peerID) + // Ideally, the score should be the sum of the default invalid subscription penalty and the graft penalty, however, + // due to exponential decay of the spam penalty and asynchronous update the app specific score; score should be in the range of [scoring. + // (DefaultInvalidSubscriptionPenalty+penaltyValueFixtures().GraftMisbehaviour, scoring.DefaultInvalidSubscriptionPenalty). + return score < scoreOptParameters.InvalidSubscriptionPenalty && score > scoreOptParameters.InvalidSubscriptionPenalty+penaltyValueFixtures().GraftMisbehaviour + }, 5*time.Second, 100*time.Millisecond) require.Eventually(t, func() bool { // the spam penalty should eventually decay to zero. @@ -399,7 +928,7 @@ func TestPersistingInvalidSubscriptionPenalty(t *testing.T) { require.Eventually(t, func() bool { // when the spam penalty is decayed to zero, the app specific penalty of the node should only contain the default invalid subscription penalty. - return reg.AppSpecificScoreFunc()(peerID) == scoring.DefaultUnknownIdentityPenalty + return reg.AppSpecificScoreFunc()(peerID) == scoreOptParameters.UnknownIdentityPenalty }, 5*time.Second, 100*time.Millisecond) // the spam penalty should now be zero in spamRecords. @@ -407,21 +936,359 @@ func TestPersistingInvalidSubscriptionPenalty(t *testing.T) { assert.True(t, ok) assert.NoError(t, err) assert.Equal(t, 0.0, record.Penalty) // penalty should be zero. + + // stop the registry. + cancel() + unittest.RequireCloseBefore(t, reg.Done(), 1*time.Second, "failed to stop GossipSubAppSpecificScoreRegistry") +} + +// TestScoreRegistry_TestSpamRecordDecayAdjustment ensures that spam record decay is increased each time a peers score reaches the scoring.IncreaseDecayThreshold eventually +// sustained misbehavior will result in the spam record decay reaching the minimum decay speed .99, and the decay speed is reset to the max decay speed .8. +func TestScoreRegistry_TestSpamRecordDecayAdjustment(t *testing.T) { + cfg, err := config.DefaultConfig() + require.NoError(t, err) + // refresh cached app-specific score every 100 milliseconds to speed up the test. + cfg.NetworkConfig.GossipSub.ScoringParameters.ScoringRegistryParameters.AppSpecificScore.ScoreTTL = 100 * time.Millisecond + // increase configured DecayRateReductionFactor so that the decay time is increased faster + cfg.NetworkConfig.GossipSub.ScoringParameters.ScoringRegistryParameters.SpamRecordCache.Decay.DecayRateReductionFactor = .1 + cfg.NetworkConfig.GossipSub.ScoringParameters.ScoringRegistryParameters.SpamRecordCache.Decay.PenaltyDecayEvaluationPeriod = time.Second + maximumSpamPenaltyDecayFactor := cfg.NetworkConfig.GossipSub.ScoringParameters.ScoringRegistryParameters.SpamRecordCache.Decay.MaximumSpamPenaltyDecayFactor + peer1 := unittest.PeerIdFixture(t) + peer2 := unittest.PeerIdFixture(t) + reg, spamRecords, _ := newGossipSubAppSpecificScoreRegistry(t, + cfg.NetworkConfig.GossipSub.ScoringParameters, + scoring.InitAppScoreRecordStateFunc(maximumSpamPenaltyDecayFactor), + withStakedIdentities(peer1, peer2), + withValidSubscriptions(peer1, peer2)) + + ctx, cancel := context.WithCancel(context.Background()) + signalerCtx := irrecoverable.NewMockSignalerContext(t, ctx) + reg.Start(signalerCtx) + unittest.RequireCloseBefore(t, reg.Ready(), 1*time.Second, "failed to start GossipSubAppSpecificScoreRegistry") + + // initially, the spamRecords should not have the peer ids. + assert.False(t, spamRecords.Has(peer1)) + assert.False(t, spamRecords.Has(peer2)) + + scoreOptParameters := cfg.NetworkConfig.GossipSub.ScoringParameters.PeerScoring.Protocol.AppSpecificScore + scoringRegistryParameters := cfg.NetworkConfig.GossipSub.ScoringParameters.ScoringRegistryParameters + // since the both peers do not have a spam record, their app specific score should be the max app specific reward, which + // is the default reward for a staked peer that has valid subscriptions. + require.Eventually(t, func() bool { + // when the spam penalty is decayed to zero, the app specific penalty of the node should only contain the unknown identity penalty. + return scoreOptParameters.MaxAppSpecificReward == reg.AppSpecificScoreFunc()(peer1) && scoreOptParameters.MaxAppSpecificReward == reg.AppSpecificScoreFunc()(peer2) + }, 5*time.Second, 100*time.Millisecond) + + // simulate sustained malicious activity from peer1, eventually the decay speed + // for a spam record should be reduced to the MinimumSpamPenaltyDecayFactor + prevDecay := scoringRegistryParameters.SpamRecordCache.Decay.MaximumSpamPenaltyDecayFactor + tolerance := 0.1 + + require.Eventually(t, func() bool { + reg.OnInvalidControlMessageNotification(&p2p.InvCtrlMsgNotif{ + PeerID: peer1, + MsgType: p2pmsg.CtrlMsgPrune, + }) + + // the spam penalty should eventually updated in the spamRecords + record, err, ok := spamRecords.Get(peer1) + require.NoError(t, err) + if !ok { + return false + } + if math.Abs(prevDecay-record.Decay) > tolerance { + return false + } + prevDecay = record.Decay + return record.Decay == scoringRegistryParameters.SpamRecordCache.Decay.MinimumSpamPenaltyDecayFactor + }, 5*time.Second, 500*time.Millisecond) + + // initialize a spam record for peer2 + reg.OnInvalidControlMessageNotification(&p2p.InvCtrlMsgNotif{ + PeerID: peer2, + MsgType: p2pmsg.CtrlMsgPrune, + }) + + // eventually the spam record should appear in the cache + require.Eventually(t, func() bool { + _, err, ok := spamRecords.Get(peer2) + require.NoError(t, err) + return ok + }, 5*time.Second, 10*time.Millisecond) + + // reduce penalty and increase Decay to scoring.MinimumSpamPenaltyDecayFactor + record, err := spamRecords.Adjust(peer2, func(record p2p.GossipSubSpamRecord) p2p.GossipSubSpamRecord { + record.Penalty = -.1 + record.Decay = scoringRegistryParameters.SpamRecordCache.Decay.MinimumSpamPenaltyDecayFactor + return record + }) + require.NoError(t, err) + require.True(t, record.Decay == scoringRegistryParameters.SpamRecordCache.Decay.MinimumSpamPenaltyDecayFactor) + require.True(t, record.Penalty == -.1) + // simulate sustained good behavior from peer 2, each time the spam record is read from the cache + // using Get method the record penalty will be decayed until it is eventually reset to + // 0 at this point the decay speed for the record should be reset to MaximumSpamPenaltyDecayFactor + // eventually after penalty reaches the skipDecaThreshold the record decay will be reset to scoringRegistryParameters.MaximumSpamPenaltyDecayFactor + require.Eventually(t, func() bool { + record, err, ok := spamRecords.Get(peer2) + require.NoError(t, err) + require.True(t, ok) + return record.Decay == scoringRegistryParameters.SpamRecordCache.Decay.MaximumSpamPenaltyDecayFactor && + record.Penalty == 0 && + record.LastDecayAdjustment.IsZero() + }, 5*time.Second, time.Second) + + // ensure decay can be reduced again after recovery for peerID 2 + require.Eventually(t, func() bool { + reg.OnInvalidControlMessageNotification(&p2p.InvCtrlMsgNotif{ + PeerID: peer2, + MsgType: p2pmsg.CtrlMsgPrune, + }) + // the spam penalty should eventually updated in the spamRecords + record, err, ok := spamRecords.Get(peer1) + require.NoError(t, err) + if !ok { + return false + } + return record.Decay == scoringRegistryParameters.SpamRecordCache.Decay.MinimumSpamPenaltyDecayFactor + }, 5*time.Second, 500*time.Millisecond) + + // stop the registry. + cancel() + unittest.RequireCloseBefore(t, reg.Done(), 1*time.Second, "failed to stop GossipSubAppSpecificScoreRegistry") +} + +// TestPeerSpamPenaltyClusterPrefixed evaluates the application-specific penalty calculation for a node when a spam record is present +// for cluster-prefixed topics. In the case of an invalid control message notification marked as cluster-prefixed, +// the application-specific penalty should be reduced by the default reduction factor. This test verifies the accurate computation +// of the application-specific score under these conditions. +func TestPeerSpamPenaltyClusterPrefixed(t *testing.T) { + ctlMsgTypes := p2pmsg.ControlMessageTypes() + peerIds := unittest.PeerIdFixtures(t, len(ctlMsgTypes)) + + cfg, err := config.DefaultConfig() + require.NoError(t, err) + // refresh cached app-specific score every 100 milliseconds to speed up the test. + cfg.NetworkConfig.GossipSub.ScoringParameters.ScoringRegistryParameters.AppSpecificScore.ScoreTTL = 100 * time.Millisecond + maximumSpamPenaltyDecayFactor := cfg.NetworkConfig.GossipSub.ScoringParameters.ScoringRegistryParameters.SpamRecordCache.Decay.MaximumSpamPenaltyDecayFactor + reg, spamRecords, _ := newGossipSubAppSpecificScoreRegistry(t, + cfg.NetworkConfig.GossipSub.ScoringParameters, + scoring.InitAppScoreRecordStateFunc(maximumSpamPenaltyDecayFactor), + withStakedIdentities(peerIds...), + withValidSubscriptions(peerIds...)) + + // starts the registry. + ctx, cancel := context.WithCancel(context.Background()) + signalerCtx := irrecoverable.NewMockSignalerContext(t, ctx) + reg.Start(signalerCtx) + unittest.RequireCloseBefore(t, reg.Ready(), 1*time.Second, "failed to start GossipSubAppSpecificScoreRegistry") + + scoreOptParameters := cfg.NetworkConfig.GossipSub.ScoringParameters.PeerScoring.Protocol.AppSpecificScore + + for _, peerID := range peerIds { + // initially, the spamRecords should not have the peer id. + assert.False(t, spamRecords.Has(peerID)) + // since the peer id does not have a spam record, the app specific score should (eventually, due to caching) be the max app specific reward, which + // is the default reward for a staked peer that has valid subscriptions. + require.Eventually(t, func() bool { + // calling the app specific score function when there is no app specific score in the cache should eventually update the cache. + score := reg.AppSpecificScoreFunc()(peerID) + // since the peer id does not have a spam record, the app specific score should be the max app specific reward, which + // is the default reward for a staked peer that has valid subscriptions. + return score == scoreOptParameters.MaxAppSpecificReward + }, 5*time.Second, 100*time.Millisecond) + + } + + // Report consecutive misbehavior's for the specified peer ID. Two misbehavior's are reported concurrently: + // 1. With IsClusterPrefixed set to false, ensuring the penalty applied to the application-specific score is not reduced. + // 2. With IsClusterPrefixed set to true, reducing the penalty added to the overall app-specific score by the default reduction factor. + for i, ctlMsgType := range ctlMsgTypes { + peerID := peerIds[i] + var wg sync.WaitGroup + wg.Add(2) + go func() { + defer wg.Done() + reg.OnInvalidControlMessageNotification(&p2p.InvCtrlMsgNotif{ + PeerID: peerID, + MsgType: ctlMsgType, + TopicType: p2p.CtrlMsgNonClusterTopicType, + }) + }() + go func() { + defer wg.Done() + reg.OnInvalidControlMessageNotification(&p2p.InvCtrlMsgNotif{ + PeerID: peerID, + MsgType: ctlMsgType, + TopicType: p2p.CtrlMsgTopicTypeClusterPrefixed, + }) + }() + unittest.RequireReturnsBefore(t, wg.Wait, 100*time.Millisecond, "timed out waiting for goroutines to finish") + + // expected penalty should be penaltyValueFixtures().GraftMisbehaviour * (1 + clusterReductionFactor) + expectedPenalty := penaltyValueFixture(ctlMsgType) * (1 + penaltyValueFixtures().ClusterPrefixedReductionFactor) + + require.Eventually(t, func() bool { + // the notification is processed asynchronously, and the penalty should eventually be updated in the spamRecords + record, err, ok := spamRecords.Get(peerID) // get the record from the spamRecords. + if !ok { + return false + } + require.NoError(t, err) + if !unittest.AreNumericallyClose(expectedPenalty, record.Penalty, 10e-2) { + return false + } + require.Equal(t, scoring.InitAppScoreRecordStateFunc(maximumSpamPenaltyDecayFactor)().Decay, record.Decay) // decay should be initialized to the initial state. + return true + }, 5*time.Second, 100*time.Millisecond) + + // this peer has a spam record, with no subscription penalty. Hence, the app specific score should only be the spam penalty, + // and the peer should be deprived of the default reward for its valid staked role. + score := reg.AppSpecificScoreFunc()(peerID) + tolerance := 0.02 // 0.1% + if expectedPenalty == 0 { + assert.Less(t, math.Abs(expectedPenalty), tolerance) + } else { + assert.Less(t, math.Abs(expectedPenalty-score)/expectedPenalty, tolerance) + } + } + + // stop the registry. + cancel() + unittest.RequireCloseBefore(t, reg.Done(), 1*time.Second, "failed to stop GossipSubAppSpecificScoreRegistry") } -// withStakedIdentity returns a function that sets the identity provider to return an staked identity for the given peer id. +// TestScoringRegistrySilencePeriod ensures that the scoring registry does not penalize nodes during the silence period, and +// starts to penalize nodes only after the silence period is over. +func TestScoringRegistrySilencePeriod(t *testing.T) { + unittest.SkipUnless(t, unittest.TEST_FLAKY, "flakey tests") + peerID := unittest.PeerIdFixture(t) + silenceDuration := 5 * time.Second + silencedNotificationLogs := atomic.NewInt32(0) + hook := zerolog.HookFunc(func(e *zerolog.Event, level zerolog.Level, message string) { + if level == zerolog.TraceLevel { + if message == scoring.NotificationSilencedMsg { + silencedNotificationLogs.Inc() + } + } + }) + logger := zerolog.New(io.Discard).Level(zerolog.TraceLevel).Hook(hook) + + cfg, err := config.DefaultConfig() + require.NoError(t, err) + // refresh cached app-specific score every 10 milliseconds to speed up the test. + cfg.NetworkConfig.GossipSub.ScoringParameters.ScoringRegistryParameters.AppSpecificScore.ScoreTTL = 10 * time.Millisecond + cfg.NetworkConfig.GossipSub.ScoringParameters.ScoringRegistryParameters.SpamRecordCache.Decay.MaximumSpamPenaltyDecayFactor = .99 + maximumSpamPenaltyDecayFactor := cfg.NetworkConfig.GossipSub.ScoringParameters.ScoringRegistryParameters.SpamRecordCache.Decay.MaximumSpamPenaltyDecayFactor + reg, spamRecords, _ := newGossipSubAppSpecificScoreRegistry(t, + cfg.NetworkConfig.GossipSub.ScoringParameters, + scoring.InitAppScoreRecordStateFunc(maximumSpamPenaltyDecayFactor), + withUnknownIdentity(peerID), + withInvalidSubscriptions(peerID), + func(cfg *scoring.GossipSubAppSpecificScoreRegistryConfig) { + // we set the scoring registry silence duration 10 seconds + // the peer is not expected to be penalized for the first 5 seconds of the test + // after that an invalid control message notification is processed and the peer + // should be penalized + cfg.ScoringRegistryStartupSilenceDuration = silenceDuration + // hooked logger will capture the number of logs related to ignored notifications + cfg.Logger = logger + }) + + ctx, cancel := context.WithCancel(context.Background()) + signalerCtx := irrecoverable.NewMockSignalerContext(t, ctx) + defer stopRegistry(t, cancel, reg) + // capture approximate registry start time + reg.Start(signalerCtx) + unittest.RequireCloseBefore(t, reg.Ready(), 1*time.Second, "registry did not start in time") + + registryStartTime := time.Now() + expectedNumOfSilencedNotif := 0 + // while we are in the silence period all notifications should be ignored and the + // invalid subscription penalty should not be applied to the app specific score + // we ensure we stay within the silence duration by iterating only up until 1 second + // before silence period is over + for time.Since(registryStartTime) < (silenceDuration - time.Second) { + // report a misbehavior for the peer id. + reg.OnInvalidControlMessageNotification(&p2p.InvCtrlMsgNotif{ + PeerID: peerID, + MsgType: p2pmsg.CtrlMsgGraft, + }) + expectedNumOfSilencedNotif++ + // spam records should not be created during the silence period + _, err, ok := spamRecords.Get(peerID) + assert.False(t, ok) + assert.NoError(t, err) + // initially, the app specific score should be the default invalid subscription penalty. + require.Equal(t, float64(0), reg.AppSpecificScoreFunc()(peerID)) + } + + invalidSubscriptionPenalty := cfg.NetworkConfig.GossipSub.ScoringParameters.PeerScoring.Protocol.AppSpecificScore.InvalidSubscriptionPenalty + + require.Eventually(t, func() bool { + // we expect to have logged a debug message for all notifications ignored. + require.Equal(t, int32(expectedNumOfSilencedNotif), silencedNotificationLogs.Load()) + // after silence period the invalid subscription penalty should be applied to the app specific score + return invalidSubscriptionPenalty == reg.AppSpecificScoreFunc()(peerID) + }, 2*time.Second, 200*time.Millisecond) + + // after silence period the peer has spam record as well as an unknown identity. Hence, the app specific score should be the spam penalty— + // and the staking penalty. + reg.OnInvalidControlMessageNotification(&p2p.InvCtrlMsgNotif{ + PeerID: peerID, + MsgType: p2pmsg.CtrlMsgGraft, + }) + + require.Eventually(t, func() bool { + return spamRecords.Has(peerID) + }, time.Second, 100*time.Millisecond) + + // the penalty should now be applied and spam records created. + record, err, ok := spamRecords.Get(peerID) + assert.True(t, ok) + assert.NoError(t, err) + expectedPenalty := penaltyValueFixtures().GraftMisbehaviour + unittest.RequireNumericallyClose(t, expectedPenalty, record.Penalty, 10e-3) + assert.Equal(t, scoring.InitAppScoreRecordStateFunc(maximumSpamPenaltyDecayFactor)().Decay, record.Decay) // decay should be initialized to the initial state. +} + +// withStakedIdentities returns a function that sets the identity provider to return staked identities for the given peer ids. // It is used for testing purposes, and causes the given peer id to benefit from the staked identity reward in GossipSub. -func withStakedIdentity(peerId peer.ID) func(cfg *scoring.GossipSubAppSpecificScoreRegistryConfig) { +func withStakedIdentities(peerIds ...peer.ID) func(cfg *scoring.GossipSubAppSpecificScoreRegistryConfig) { return func(cfg *scoring.GossipSubAppSpecificScoreRegistryConfig) { - cfg.IdProvider.(*mock.IdentityProvider).On("ByPeerID", peerId).Return(unittest.IdentityFixture(), true).Maybe() + cfg.IdProvider.(*mock.IdentityProvider).On("ByPeerID", testifymock.AnythingOfType("peer.ID")). + Return(func(pid peer.ID) *flow.Identity { + for _, peerID := range peerIds { + if peerID == pid { + return unittest.IdentityFixture() + } + } + return nil + }, func(pid peer.ID) bool { + for _, peerID := range peerIds { + if peerID == pid { + return true + } + } + return false + }).Maybe() } } -// withValidSubscriptions returns a function that sets the subscription validator to return nil for the given peer id. +// withValidSubscriptions returns a function that sets the subscription validator to return nil for the given peer ids. // It is used for testing purposes and causes the given peer id to never be penalized for subscribing to invalid topics. -func withValidSubscriptions(peer peer.ID) func(cfg *scoring.GossipSubAppSpecificScoreRegistryConfig) { +func withValidSubscriptions(peerIds ...peer.ID) func(cfg *scoring.GossipSubAppSpecificScoreRegistryConfig) { return func(cfg *scoring.GossipSubAppSpecificScoreRegistryConfig) { - cfg.Validator.(*mockp2p.SubscriptionValidator).On("CheckSubscribedToAllowedTopics", peer, testifymock.Anything).Return(nil).Maybe() + cfg.Validator.(*mockp2p.SubscriptionValidator). + On("CheckSubscribedToAllowedTopics", testifymock.AnythingOfType("peer.ID"), testifymock.Anything). + Return(func(pid peer.ID, _ flow.Role) error { + for _, peerID := range peerIds { + if peerID == pid { + return nil + } + } + return fmt.Errorf("invalid subscriptions") + }).Maybe() } } @@ -437,44 +1304,123 @@ func withUnknownIdentity(peer peer.ID) func(cfg *scoring.GossipSubAppSpecificSco // It is used for testing purposes and causes the given peer id to be penalized for subscribing to invalid topics. func withInvalidSubscriptions(peer peer.ID) func(cfg *scoring.GossipSubAppSpecificScoreRegistryConfig) { return func(cfg *scoring.GossipSubAppSpecificScoreRegistryConfig) { - cfg.Validator.(*mockp2p.SubscriptionValidator).On("CheckSubscribedToAllowedTopics", peer, testifymock.Anything).Return(fmt.Errorf("invalid subscriptions")).Maybe() + cfg.Validator.(*mockp2p.SubscriptionValidator).On("CheckSubscribedToAllowedTopics", + peer, + testifymock.Anything).Return(fmt.Errorf("invalid subscriptions")).Maybe() } } -func withInitFunction(initFunction func() p2p.GossipSubSpamRecord) func(cfg *scoring.GossipSubAppSpecificScoreRegistryConfig) { - return func(cfg *scoring.GossipSubAppSpecificScoreRegistryConfig) { - cfg.Init = initFunction +// newGossipSubAppSpecificScoreRegistry creates a new instance of GossipSubAppSpecificScoreRegistry along with its associated +// GossipSubSpamRecordCache and AppSpecificScoreCache. This function is primarily used in testing scenarios to set up a controlled +// environment for evaluating the behavior of the GossipSub scoring mechanism. +// +// The function accepts a variable number of options to configure the GossipSubAppSpecificScoreRegistryConfig, allowing for +// customization of the registry's behavior in tests. These options can modify various aspects of the configuration, such as +// penalty values, identity providers, validators, and caching mechanisms. +// +// Parameters: +// - t *testing.T: The test context, used for asserting the absence of errors during the setup. +// - params p2pconfig.ScoringParameters: The scoring parameters used to configure the registry. +// - initFunction scoring.SpamRecordInitFunc: The function used to initialize the spam records. +// - opts ...func(*scoring.GossipSubAppSpecificScoreRegistryConfig): A variadic set of functions that modify the registry's configuration. +// +// Returns: +// - *scoring.GossipSubAppSpecificScoreRegistry: The configured GossipSub application-specific score registry. +// - *netcache.GossipSubSpamRecordCache: The cache used for storing spam records. +// - *internal.AppSpecificScoreCache: The cache for storing application-specific scores. +// +// This function initializes and configures the scoring registry with default and test-specific settings. It sets up a spam record cache +// and an application-specific score cache with predefined sizes and functionalities. The function also configures the scoring parameters +// with test-specific values, particularly modifying the ScoreTTL value for the purpose of the tests. The creation and configuration of +// the GossipSubAppSpecificScoreRegistry are validated to ensure no errors occur during the process. +func newGossipSubAppSpecificScoreRegistry(t *testing.T, + params p2pconfig.ScoringParameters, + initFunction scoring.SpamRecordInitFunc, + opts ...func(*scoring.GossipSubAppSpecificScoreRegistryConfig)) (*scoring.GossipSubAppSpecificScoreRegistry, + *netcache.GossipSubSpamRecordCache, + *internal.AppSpecificScoreCache) { + cache := netcache.NewGossipSubSpamRecordCache(100, + unittest.Logger(), + metrics.NewNoopCollector(), + initFunction, + scoring.DefaultDecayFunction(params.ScoringRegistryParameters.SpamRecordCache.Decay)) + appSpecificScoreCache := internal.NewAppSpecificScoreCache(100, unittest.Logger(), metrics.NewNoopCollector()) + + validator := mockp2p.NewSubscriptionValidator(t) + validator.On("Start", testifymock.Anything).Return().Maybe() + done := make(chan struct{}) + close(done) + f := func() <-chan struct{} { + return done } -} - -// newGossipSubAppSpecificScoreRegistry returns a new instance of GossipSubAppSpecificScoreRegistry with default values -// for the testing purposes. -func newGossipSubAppSpecificScoreRegistry(t *testing.T, opts ...func(*scoring.GossipSubAppSpecificScoreRegistryConfig)) (*scoring.GossipSubAppSpecificScoreRegistry, *netcache.GossipSubSpamRecordCache) { - cache := netcache.NewGossipSubSpamRecordCache(100, unittest.Logger(), metrics.NewNoopCollector(), scoring.DefaultDecayFunction()) + validator.On("Ready").Return(f()).Maybe() + validator.On("Done").Return(f()).Maybe() cfg := &scoring.GossipSubAppSpecificScoreRegistryConfig{ Logger: unittest.Logger(), - Init: scoring.InitAppScoreRecordState, Penalty: penaltyValueFixtures(), IdProvider: mock.NewIdentityProvider(t), - Validator: mockp2p.NewSubscriptionValidator(t), - CacheFactory: func() p2p.GossipSubSpamRecordCache { + Validator: validator, + AppScoreCacheFactory: func() p2p.GossipSubApplicationSpecificScoreCache { + return appSpecificScoreCache + }, + SpamRecordCacheFactory: func() p2p.GossipSubSpamRecordCache { return cache }, + GetDuplicateMessageCount: func(id peer.ID) float64 { + return 0 + }, + Parameters: params.ScoringRegistryParameters.AppSpecificScore, + HeroCacheMetricsFactory: metrics.NewNoopHeroCacheMetricsFactory(), + NetworkingType: network.PrivateNetwork, + AppSpecificScoreParams: params.PeerScoring.Protocol.AppSpecificScore, + DuplicateMessageThreshold: params.PeerScoring.Protocol.AppSpecificScore.DuplicateMessageThreshold, + Collector: metrics.NewNoopCollector(), + ScoringRegistryStartupSilenceDuration: 0, // turn off silence period by default } for _, opt := range opts { opt(cfg) } - return scoring.NewGossipSubAppSpecificScoreRegistry(cfg), cache + + reg, err := scoring.NewGossipSubAppSpecificScoreRegistry(cfg) + require.NoError(t, err, "failed to create GossipSubAppSpecificScoreRegistry") + + return reg, cache, appSpecificScoreCache } // penaltyValueFixtures returns a set of penalty values for testing purposes. // The values are not realistic. The important thing is that they are different from each other. This is to make sure // that the tests are not passing because of the default values. -func penaltyValueFixtures() scoring.GossipSubCtrlMsgPenaltyValue { - return scoring.GossipSubCtrlMsgPenaltyValue{ - Graft: -100, - Prune: -50, - IHave: -20, - IWant: -10, +func penaltyValueFixtures() p2pconfig.MisbehaviourPenalties { + return p2pconfig.MisbehaviourPenalties{ + GraftMisbehaviour: -100, + PruneMisbehaviour: -50, + IHaveMisbehaviour: -20, + IWantMisbehaviour: -10, + ClusterPrefixedReductionFactor: .5, + PublishMisbehaviour: -10, + } +} + +// penaltyValueFixture returns the set penalty of the provided control message type returned from the fixture func penaltyValueFixtures. +func penaltyValueFixture(msgType p2pmsg.ControlMessageType) float64 { + penaltyValues := penaltyValueFixtures() + switch msgType { + case p2pmsg.CtrlMsgGraft: + return penaltyValues.GraftMisbehaviour + case p2pmsg.CtrlMsgPrune: + return penaltyValues.PruneMisbehaviour + case p2pmsg.CtrlMsgIHave: + return penaltyValues.IHaveMisbehaviour + case p2pmsg.CtrlMsgIWant: + return penaltyValues.IWantMisbehaviour + case p2pmsg.RpcPublishMessage: + return penaltyValues.PublishMisbehaviour + default: + return penaltyValues.ClusterPrefixedReductionFactor } } + +func stopRegistry(t *testing.T, cancel context.CancelFunc, registry *scoring.GossipSubAppSpecificScoreRegistry) { + cancel() + unittest.RequireCloseBefore(t, registry.Done(), 5*time.Second, "registry did not stop") +} diff --git a/network/p2p/scoring/score_option.go b/network/p2p/scoring/score_option.go index c6bf52a21be..3136478176b 100644 --- a/network/p2p/scoring/score_option.go +++ b/network/p2p/scoring/score_option.go @@ -1,6 +1,7 @@ package scoring import ( + "fmt" "time" pubsub "github.com/libp2p/go-libp2p-pubsub" @@ -8,184 +9,93 @@ import ( "github.com/rs/zerolog" "github.com/onflow/flow-go/module" + "github.com/onflow/flow-go/module/component" + "github.com/onflow/flow-go/module/irrecoverable" "github.com/onflow/flow-go/module/metrics" + "github.com/onflow/flow-go/network" "github.com/onflow/flow-go/network/channels" "github.com/onflow/flow-go/network/p2p" netcache "github.com/onflow/flow-go/network/p2p/cache" + p2pconfig "github.com/onflow/flow-go/network/p2p/config" + "github.com/onflow/flow-go/network/p2p/scoring/internal" + "github.com/onflow/flow-go/network/p2p/utils" "github.com/onflow/flow-go/utils/logging" ) -const ( - DefaultAppSpecificScoreWeight = 1 - MaxAppSpecificPenalty = float64(-100) - MinAppSpecificPenalty = -1 - MaxAppSpecificReward = float64(100) - - // DefaultStakedIdentityReward is the default reward for staking peers. It is applied to the peer's score when - // the peer does not have any misbehavior record, e.g., invalid subscription, invalid message, etc. - // The purpose is to reward the staking peers for their contribution to the network and prioritize them in neighbor selection. - DefaultStakedIdentityReward = MaxAppSpecificReward - - // DefaultUnknownIdentityPenalty is the default penalty for unknown identity. It is applied to the peer's score when - // the peer is not in the identity list. - DefaultUnknownIdentityPenalty = MaxAppSpecificPenalty - - // DefaultInvalidSubscriptionPenalty is the default penalty for invalid subscription. It is applied to the peer's score when - // the peer subscribes to a topic that it is not authorized to subscribe to. - DefaultInvalidSubscriptionPenalty = MaxAppSpecificPenalty - - // DefaultGossipThreshold when a peer's penalty drops below this threshold, - // no gossip is emitted towards that peer and gossip from that peer is ignored. - // - // Validation Constraint: GossipThreshold >= PublishThreshold && GossipThreshold < 0 - // - // How we use it: - // As current max penalty is -100, we set the threshold to -99 so that all gossips - // to and from peers with penalty -100 are ignored. - DefaultGossipThreshold = -99 - - // DefaultPublishThreshold when a peer's penalty drops below this threshold, - // self-published messages are not propagated towards this peer. - // - // Validation Constraint: - // PublishThreshold >= GraylistThreshold && PublishThreshold <= GossipThreshold && PublishThreshold < 0. - // - // How we use it: - // As current max penalty is -100, we set the threshold to -99 so that all penalized peers are deprived of - // receiving any published messages. - DefaultPublishThreshold = -99 - - // DefaultGraylistThreshold when a peer's penalty drops below this threshold, the peer is graylisted, i.e., - // incoming RPCs from the peer are ignored. - // - // Validation Constraint: - // GraylistThreshold =< PublishThreshold && GraylistThreshold =< GossipThreshold && GraylistThreshold < 0 - // - // How we use it: - // As current max penalty is -100, we set the threshold to -99 so that all penalized peers are graylisted. - DefaultGraylistThreshold = -99 - - // DefaultAcceptPXThreshold when a peer sends us PX information with a prune, we only accept it and connect to the supplied - // peers if the originating peer's penalty exceeds this threshold. - // - // Validation Constraint: must be non-negative. - // - // How we use it: - // As current max reward is 100, we set the threshold to 99 so that we only receive supplied peers from - // well-behaved peers. - DefaultAcceptPXThreshold = 99 - - // DefaultOpportunisticGraftThreshold when the median peer penalty in the mesh drops below this value, - // the peer may select more peers with penalty above the median to opportunistically graft on the mesh. - // - // Validation Constraint: must be non-negative. - // - // How we use it: - // We set it to the MaxAppSpecificReward + 1 so that we only opportunistically graft peers that are not access nodes (i.e., with MinAppSpecificPenalty), - // or penalized peers (i.e., with MaxAppSpecificPenalty). - DefaultOpportunisticGraftThreshold = MaxAppSpecificReward + 1 - - // MaxDebugLogs sets the max number of debug/trace log events per second. Logs emitted above - // this threshold are dropped. - MaxDebugLogs = 50 - - // defaultScoreCacheSize is the default size of the cache used to store the app specific penalty of peers. - defaultScoreCacheSize = 1000 - - // defaultDecayInterval is the default decay interval for the overall score of a peer at the GossipSub scoring - // system. It is the interval over which we decay the effect of past behavior. So that the effect of past behavior - // is not permanent. - defaultDecayInterval = 1 * time.Hour - - // defaultDecayToZero is the default decay to zero for the overall score of a peer at the GossipSub scoring system. - // It defines the maximum value below which a peer scoring counter is reset to zero. - // This is to prevent the counter from decaying to a very small value. - // The default value is 0.01, which means that a counter will be reset to zero if it decays to 0.01. - // When a counter hits the DecayToZero threshold, it means that the peer did not exhibit the behavior - // for a long time, and we can reset the counter. - defaultDecayToZero = 0.01 -) - // ScoreOption is a functional option for configuring the peer scoring system. +// TODO: rename it to ScoreManager. type ScoreOption struct { + component.Component logger zerolog.Logger - peerScoreParams *pubsub.PeerScoreParams - peerThresholdParams *pubsub.PeerScoreThresholds - validator p2p.SubscriptionValidator - appScoreFunc func(peer.ID) float64 + peerScoreParams *pubsub.PeerScoreParams + peerThresholdParams *pubsub.PeerScoreThresholds + defaultTopicScoreParams *pubsub.TopicScoreParams + validator p2p.SubscriptionValidator + appScoreFunc func(peer.ID) float64 + appScoreRegistry *GossipSubAppSpecificScoreRegistry } type ScoreOptionConfig struct { - logger zerolog.Logger - provider module.IdentityProvider - cacheSize uint32 - cacheMetrics module.HeroCacheMetrics - appScoreFunc func(peer.ID) float64 - topicParams []func(map[string]*pubsub.TopicScoreParams) - registerNotificationConsumerFunc func(p2p.GossipSubInvCtrlMsgNotifConsumer) + logger zerolog.Logger + params p2pconfig.ScoringParameters + provider module.IdentityProvider + heroCacheMetricsFactory metrics.HeroCacheMetricsFactory + appScoreFunc func(peer.ID) float64 + topicParams []func(map[string]*pubsub.TopicScoreParams) + getDuplicateMessageCount func(id peer.ID) float64 + scoringRegistryMetricsCollector module.GossipSubScoringRegistryMetrics + networkingType network.NetworkingType } -func NewScoreOptionConfig(logger zerolog.Logger) *ScoreOptionConfig { +// NewScoreOptionConfig creates a new configuration for the GossipSub peer scoring option. +// Args: +// - logger: the logger to use. +// - hcMetricsFactory: HeroCache metrics factory to create metrics for the scoring-related caches. +// - idProvider: the identity provider to use. +// - networkingType: the networking type to use, public or private. +// Returns: +// - a new configuration for the GossipSub peer scoring option. +func NewScoreOptionConfig(logger zerolog.Logger, + params p2pconfig.ScoringParameters, + hcMetricsFactory metrics.HeroCacheMetricsFactory, + scoringRegistryMetricsCollector module.GossipSubScoringRegistryMetrics, + idProvider module.IdentityProvider, + getDuplicateMessageCount func(id peer.ID) float64, + networkingType network.NetworkingType) *ScoreOptionConfig { return &ScoreOptionConfig{ - logger: logger, - cacheSize: defaultScoreCacheSize, - cacheMetrics: metrics.NewNoopCollector(), // no metrics by default - topicParams: make([]func(map[string]*pubsub.TopicScoreParams), 0), + logger: logger.With().Str("module", "pubsub_score_option").Logger(), + provider: idProvider, + params: params, + heroCacheMetricsFactory: hcMetricsFactory, + topicParams: make([]func(map[string]*pubsub.TopicScoreParams), 0), + networkingType: networkingType, + getDuplicateMessageCount: getDuplicateMessageCount, + scoringRegistryMetricsCollector: scoringRegistryMetricsCollector, } } -// SetProvider sets the identity provider for the penalty option. -// It is used to retrieve the identity of a peer when calculating the app specific penalty. -// If the provider is not set, the penalty registry will crash. This is a required field. -// It is safe to call this method multiple times, the last call will be used. -func (c *ScoreOptionConfig) SetProvider(provider module.IdentityProvider) { - c.provider = provider -} - -// SetCacheSize sets the size of the cache used to store the app specific penalty of peers. -// If the cache size is not set, the default value will be used. -// It is safe to call this method multiple times, the last call will be used. -func (c *ScoreOptionConfig) SetCacheSize(size uint32) { - c.cacheSize = size -} - -// SetCacheMetrics sets the cache metrics collector for the penalty option. -// It is used to collect metrics for the app specific penalty cache. If the cache metrics collector is not set, -// a no-op collector will be used. -// It is safe to call this method multiple times, the last call will be used. -func (c *ScoreOptionConfig) SetCacheMetrics(metrics module.HeroCacheMetrics) { - c.cacheMetrics = metrics -} - -// SetAppSpecificScoreFunction sets the app specific penalty function for the penalty option. +// OverrideAppSpecificScoreFunction sets the app specific penalty function for the penalty option. // It is used to calculate the app specific penalty of a peer. // If the app specific penalty function is not set, the default one is used. // Note that it is always safer to use the default one, unless you know what you are doing. // It is safe to call this method multiple times, the last call will be used. -func (c *ScoreOptionConfig) SetAppSpecificScoreFunction(appSpecificScoreFunction func(peer.ID) float64) { +func (c *ScoreOptionConfig) OverrideAppSpecificScoreFunction(appSpecificScoreFunction func(peer.ID) float64) { c.appScoreFunc = appSpecificScoreFunction } -// SetTopicScoreParams adds the topic penalty parameters to the peer penalty parameters. -// It is used to configure the topic penalty parameters for the pubsub system. -// If there is already a topic penalty parameter for the given topic, the last call will be used. -func (c *ScoreOptionConfig) SetTopicScoreParams(topic channels.Topic, topicScoreParams *pubsub.TopicScoreParams) { +// OverrideTopicScoreParams overrides the topic score parameters for the given topic. +// It is used to override the default topic score parameters for a specific topic. +// If the topic score parameters are not set, the default ones will be used. +func (c *ScoreOptionConfig) OverrideTopicScoreParams(topic channels.Topic, topicScoreParams *pubsub.TopicScoreParams) { c.topicParams = append(c.topicParams, func(topics map[string]*pubsub.TopicScoreParams) { topics[topic.String()] = topicScoreParams }) } -// SetRegisterNotificationConsumerFunc sets the function to register the notification consumer for the penalty option. -// ScoreOption uses this function to register the notification consumer for the pubsub system so that it can receive -// notifications of invalid control messages. -func (c *ScoreOptionConfig) SetRegisterNotificationConsumerFunc(f func(p2p.GossipSubInvCtrlMsgNotifConsumer)) { - c.registerNotificationConsumerFunc = f -} - // NewScoreOption creates a new penalty option with the given configuration. -func NewScoreOption(cfg *ScoreOptionConfig) *ScoreOption { - throttledSampler := logging.BurstSampler(MaxDebugLogs, time.Second) +func NewScoreOption(cfg *ScoreOptionConfig, provider p2p.SubscriptionProvider) (*ScoreOption, error) { + throttledSampler := logging.BurstSampler(cfg.params.PeerScoring.Protocol.MaxDebugLogs, time.Second) logger := cfg.logger.With(). Str("module", "pubsub_score_option"). Logger(). @@ -193,34 +103,105 @@ func NewScoreOption(cfg *ScoreOptionConfig) *ScoreOption { TraceSampler: throttledSampler, DebugSampler: throttledSampler, }) - validator := NewSubscriptionValidator() - scoreRegistry := NewGossipSubAppSpecificScoreRegistry(&GossipSubAppSpecificScoreRegistryConfig{ - Logger: logger, - Penalty: DefaultGossipSubCtrlMsgPenaltyValue(), - Validator: validator, - Init: InitAppScoreRecordState, - IdProvider: cfg.provider, - CacheFactory: func() p2p.GossipSubSpamRecordCache { - return netcache.NewGossipSubSpamRecordCache(cfg.cacheSize, cfg.logger, cfg.cacheMetrics, DefaultDecayFunction()) + + validator := NewSubscriptionValidator(cfg.logger, provider) + scoreRegistry, err := NewGossipSubAppSpecificScoreRegistry(&GossipSubAppSpecificScoreRegistryConfig{ + Logger: logger, + Penalty: cfg.params.ScoringRegistryParameters.MisbehaviourPenalties, + Validator: validator, + IdProvider: cfg.provider, + HeroCacheMetricsFactory: cfg.heroCacheMetricsFactory, + AppScoreCacheFactory: func() p2p.GossipSubApplicationSpecificScoreCache { + collector := metrics.NewGossipSubApplicationSpecificScoreCacheMetrics(cfg.heroCacheMetricsFactory, cfg.networkingType) + return internal.NewAppSpecificScoreCache(cfg.params.ScoringRegistryParameters.SpamRecordCache.CacheSize, cfg.logger, collector) + }, + SpamRecordCacheFactory: func() p2p.GossipSubSpamRecordCache { + collector := metrics.GossipSubSpamRecordCacheMetricsFactory(cfg.heroCacheMetricsFactory, cfg.networkingType) + return netcache.NewGossipSubSpamRecordCache(cfg.params.ScoringRegistryParameters.SpamRecordCache.CacheSize, cfg.logger, collector, + InitAppScoreRecordStateFunc(cfg.params.ScoringRegistryParameters.SpamRecordCache.Decay.MaximumSpamPenaltyDecayFactor), + DefaultDecayFunction(cfg.params.ScoringRegistryParameters.SpamRecordCache.Decay)) }, + GetDuplicateMessageCount: func(id peer.ID) float64 { + return cfg.getDuplicateMessageCount(id) + }, + Parameters: cfg.params.ScoringRegistryParameters.AppSpecificScore, + NetworkingType: cfg.networkingType, + AppSpecificScoreParams: cfg.params.PeerScoring.Protocol.AppSpecificScore, + DuplicateMessageThreshold: cfg.params.PeerScoring.Protocol.AppSpecificScore.DuplicateMessageThreshold, + Collector: cfg.scoringRegistryMetricsCollector, }) + if err != nil { + return nil, fmt.Errorf("failed to create gossipsub app specific score registry: %w", err) + } + s := &ScoreOption{ - logger: logger, - validator: validator, - peerScoreParams: defaultPeerScoreParams(), + logger: logger, + validator: validator, + peerScoreParams: &pubsub.PeerScoreParams{ + Topics: make(map[string]*pubsub.TopicScoreParams), + // we don't set all the parameters, so we skip the atomic validation. + // atomic validation fails initialization if any parameter is not set. + SkipAtomicValidation: cfg.params.PeerScoring.Internal.TopicParameters.SkipAtomicValidation, + // DecayInterval is the interval over which we decay the effect of past behavior, so that + // a good or bad behavior will not have a permanent effect on the penalty. It is also the interval + // that GossipSub uses to refresh the scores of all peers. + DecayInterval: cfg.params.PeerScoring.Internal.DecayInterval, + // DecayToZero defines the maximum value below which a peer scoring counter is reset to zero. + // This is to prevent the counter from decaying to a very small value. + // When a counter hits the DecayToZero threshold, it means that the peer did not exhibit the behavior + // for a long time, and we can reset the counter. + DecayToZero: cfg.params.PeerScoring.Internal.DecayToZero, + // AppSpecificWeight is the weight of the application specific penalty. + AppSpecificWeight: cfg.params.PeerScoring.Internal.AppSpecificScoreWeight, + // PenaltyThreshold is the threshold above which a peer is penalized for GossipSub-level misbehaviors. + BehaviourPenaltyThreshold: cfg.params.PeerScoring.Internal.Behaviour.PenaltyThreshold, + // PenaltyWeight is the weight of the GossipSub-level penalty. + BehaviourPenaltyWeight: cfg.params.PeerScoring.Internal.Behaviour.PenaltyWeight, + // PenaltyDecay is the decay of the GossipSub-level penalty (applied every decay interval). + BehaviourPenaltyDecay: cfg.params.PeerScoring.Internal.Behaviour.PenaltyDecay, + }, + peerThresholdParams: &pubsub.PeerScoreThresholds{ + GossipThreshold: cfg.params.PeerScoring.Internal.Thresholds.Gossip, + PublishThreshold: cfg.params.PeerScoring.Internal.Thresholds.Publish, + GraylistThreshold: cfg.params.PeerScoring.Internal.Thresholds.Graylist, + AcceptPXThreshold: cfg.params.PeerScoring.Internal.Thresholds.AcceptPX, + OpportunisticGraftThreshold: cfg.params.PeerScoring.Internal.Thresholds.OpportunisticGraft, + }, + defaultTopicScoreParams: &pubsub.TopicScoreParams{ + TopicWeight: cfg.params.PeerScoring.Internal.TopicParameters.TopicWeight, + SkipAtomicValidation: cfg.params.PeerScoring.Internal.TopicParameters.SkipAtomicValidation, + InvalidMessageDeliveriesWeight: cfg.params.PeerScoring.Internal.TopicParameters.InvalidMessageDeliveriesWeight, + InvalidMessageDeliveriesDecay: cfg.params.PeerScoring.Internal.TopicParameters.InvalidMessageDeliveriesDecay, + TimeInMeshQuantum: cfg.params.PeerScoring.Internal.TopicParameters.TimeInMeshQuantum, + MeshMessageDeliveriesWeight: cfg.params.PeerScoring.Internal.TopicParameters.MeshDeliveriesWeight, + MeshMessageDeliveriesDecay: cfg.params.PeerScoring.Internal.TopicParameters.MeshMessageDeliveriesDecay, + MeshMessageDeliveriesCap: cfg.params.PeerScoring.Internal.TopicParameters.MeshMessageDeliveriesCap, + MeshMessageDeliveriesThreshold: cfg.params.PeerScoring.Internal.TopicParameters.MeshMessageDeliveryThreshold, + MeshMessageDeliveriesWindow: cfg.params.PeerScoring.Internal.TopicParameters.MeshMessageDeliveriesWindow, + MeshMessageDeliveriesActivation: cfg.params.PeerScoring.Internal.TopicParameters.MeshMessageDeliveryActivation, + }, + appScoreFunc: scoreRegistry.AppSpecificScoreFunc(), + appScoreRegistry: scoreRegistry, } // set the app specific penalty function for the penalty option // if the app specific penalty function is not set, use the default one - if cfg.appScoreFunc == nil { - s.appScoreFunc = scoreRegistry.AppSpecificScoreFunc() - } else { + if cfg.appScoreFunc != nil { s.appScoreFunc = cfg.appScoreFunc + s.logger. + Warn(). + Str(logging.KeyNetworkingSecurity, "true"). + Msg("app specific score function is overridden, should never happen in production") } - // registers the score registry as the consumer of the invalid control message notifications - if cfg.registerNotificationConsumerFunc != nil { - cfg.registerNotificationConsumerFunc(scoreRegistry) + if cfg.params.PeerScoring.Internal.DecayInterval > 0 && cfg.params.PeerScoring.Internal.DecayInterval != s.peerScoreParams.DecayInterval { + // overrides the default decay interval if the decay interval is set. + s.peerScoreParams.DecayInterval = cfg.params.PeerScoring.Internal.DecayInterval + s.logger. + Warn(). + Str(logging.KeyNetworkingSecurity, "true"). + Dur("decay_interval_ms", cfg.params.PeerScoring.Internal.DecayInterval). + Msg("decay interval is overridden, should never happen in production") } s.peerScoreParams.AppSpecificScore = s.appScoreFunc @@ -230,72 +211,65 @@ func NewScoreOption(cfg *ScoreOptionConfig) *ScoreOption { topicParams(s.peerScoreParams.Topics) } - return s + s.Component = component.NewComponentManagerBuilder().AddWorker(func(ctx irrecoverable.SignalerContext, ready component.ReadyFunc) { + s.logger.Info().Msg("starting score registry") + scoreRegistry.Start(ctx) + select { + case <-ctx.Done(): + s.logger.Warn().Msg("stopping score registry; context done") + case <-scoreRegistry.Ready(): + s.logger.Info().Msg("score registry started") + ready() + s.logger.Info().Msg("score registry ready") + } + + <-ctx.Done() + s.logger.Info().Msg("stopping score registry") + <-scoreRegistry.Done() + s.logger.Info().Msg("score registry stopped") + }).Build() + + return s, nil } -func (s *ScoreOption) SetSubscriptionProvider(provider *SubscriptionProvider) error { - return s.validator.RegisterSubscriptionProvider(provider) -} - -func (s *ScoreOption) BuildFlowPubSubScoreOption() pubsub.Option { - s.preparePeerScoreThresholds() - +func (s *ScoreOption) BuildFlowPubSubScoreOption() (*pubsub.PeerScoreParams, *pubsub.PeerScoreThresholds) { s.logger.Info(). Float64("gossip_threshold", s.peerThresholdParams.GossipThreshold). Float64("publish_threshold", s.peerThresholdParams.PublishThreshold). Float64("graylist_threshold", s.peerThresholdParams.GraylistThreshold). Float64("accept_px_threshold", s.peerThresholdParams.AcceptPXThreshold). Float64("opportunistic_graft_threshold", s.peerThresholdParams.OpportunisticGraftThreshold). - Msg("peer penalty thresholds configured") + Msg("pubsub score thresholds are set") - return pubsub.WithPeerScore( - s.peerScoreParams, - s.peerThresholdParams, - ) -} - -func (s *ScoreOption) preparePeerScoreThresholds() { - s.peerThresholdParams = &pubsub.PeerScoreThresholds{ - GossipThreshold: DefaultGossipThreshold, - PublishThreshold: DefaultPublishThreshold, - GraylistThreshold: DefaultGraylistThreshold, - AcceptPXThreshold: DefaultAcceptPXThreshold, - OpportunisticGraftThreshold: DefaultOpportunisticGraftThreshold, + for topic, topicParams := range s.peerScoreParams.Topics { + topicScoreParamLogger := utils.TopicScoreParamsLogger(s.logger, topic, topicParams) + topicScoreParamLogger.Info(). + Msg("pubsub score topic parameters are set for topic") } + + return s.peerScoreParams, s.peerThresholdParams } -func defaultPeerScoreParams() *pubsub.PeerScoreParams { - return &pubsub.PeerScoreParams{ - Topics: make(map[string]*pubsub.TopicScoreParams), - // we don't set all the parameters, so we skip the atomic validation. - // atomic validation fails initialization if any parameter is not set. - SkipAtomicValidation: true, - // DecayInterval is the interval over which we decay the effect of past behavior. So that - // a good or bad behavior will not have a permanent effect on the penalty. - DecayInterval: defaultDecayInterval, - // DecayToZero defines the maximum value below which a peer scoring counter is reset to zero. - // This is to prevent the counter from decaying to a very small value. - // When a counter hits the DecayToZero threshold, it means that the peer did not exhibit the behavior - // for a long time, and we can reset the counter. - DecayToZero: defaultDecayToZero, - // AppSpecificWeight is the weight of the application specific penalty. - AppSpecificWeight: DefaultAppSpecificScoreWeight, +// TopicScoreParams returns the topic score parameters for the given topic. If the topic +// score parameters are not set, it returns the default topic score parameters. +// The custom topic parameters are set at the initialization of the score option. +// Args: +// - topic: the topic for which the score parameters are requested. +// Returns: +// - the topic score parameters for the given topic, or the default topic score parameters if +// the topic score parameters are not set. +func (s *ScoreOption) TopicScoreParams(topic *pubsub.Topic) *pubsub.TopicScoreParams { + params, exists := s.peerScoreParams.Topics[topic.String()] + if !exists { + return s.defaultTopicScoreParams } + return params } -func (s *ScoreOption) BuildGossipSubScoreOption() pubsub.Option { - s.preparePeerScoreThresholds() - - s.logger.Info(). - Float64("gossip_threshold", s.peerThresholdParams.GossipThreshold). - Float64("publish_threshold", s.peerThresholdParams.PublishThreshold). - Float64("graylist_threshold", s.peerThresholdParams.GraylistThreshold). - Float64("accept_px_threshold", s.peerThresholdParams.AcceptPXThreshold). - Float64("opportunistic_graft_threshold", s.peerThresholdParams.OpportunisticGraftThreshold). - Msg("peer penalty thresholds configured") - - return pubsub.WithPeerScore( - s.peerScoreParams, - s.peerThresholdParams, - ) +// OnInvalidControlMessageNotification is called when a new invalid control message notification is distributed. +// Any error on consuming event must handle internally. +// The implementation must be concurrency safe and non-blocking. +// Note: there is no real-time guarantee on processing the notification. +func (s *ScoreOption) OnInvalidControlMessageNotification(notif *p2p.InvCtrlMsgNotif) { + s.appScoreRegistry.OnInvalidControlMessageNotification(notif) } diff --git a/network/p2p/scoring/scoring_test.go b/network/p2p/scoring/scoring_test.go index 613cb0d3b30..fb85db3e06a 100644 --- a/network/p2p/scoring/scoring_test.go +++ b/network/p2p/scoring/scoring_test.go @@ -7,71 +7,29 @@ import ( "testing" "time" - pubsub "github.com/libp2p/go-libp2p-pubsub" "github.com/libp2p/go-libp2p/core/peer" + "github.com/rs/zerolog" mocktestify "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" + "github.com/onflow/flow-go/config" "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/module/component" + "github.com/onflow/flow-go/model/messages" + "github.com/onflow/flow-go/module" "github.com/onflow/flow-go/module/id" "github.com/onflow/flow-go/module/irrecoverable" + "github.com/onflow/flow-go/module/metrics" "github.com/onflow/flow-go/module/mock" + flownet "github.com/onflow/flow-go/network" "github.com/onflow/flow-go/network/channels" "github.com/onflow/flow-go/network/p2p" + p2pconfig "github.com/onflow/flow-go/network/p2p/config" + p2pmsg "github.com/onflow/flow-go/network/p2p/message" + mockp2p "github.com/onflow/flow-go/network/p2p/mock" p2ptest "github.com/onflow/flow-go/network/p2p/test" "github.com/onflow/flow-go/utils/unittest" ) -// mockInspectorSuite is a mock implementation of the GossipSubInspectorSuite interface. -// It is used to test the impact of invalid control messages on the scoring and connectivity of nodes in a network. -type mockInspectorSuite struct { - component.Component - t *testing.T - consumer p2p.GossipSubInvCtrlMsgNotifConsumer -} - -// ensures that mockInspectorSuite implements the GossipSubInspectorSuite interface. -var _ p2p.GossipSubInspectorSuite = (*mockInspectorSuite)(nil) - -// newMockInspectorSuite creates a new mockInspectorSuite. -// Args: -// - t: the test object used for assertions. -// Returns: -// - a new mockInspectorSuite. -func newMockInspectorSuite(t *testing.T) *mockInspectorSuite { - i := &mockInspectorSuite{ - t: t, - } - - builder := component.NewComponentManagerBuilder() - builder.AddWorker(func(ctx irrecoverable.SignalerContext, ready component.ReadyFunc) { - ready() - <-ctx.Done() - }) - - i.Component = builder.Build() - return i -} - -// InspectFunc returns a function that is called when a node receives a control message. -// In this mock implementation, the function does nothing. -func (m *mockInspectorSuite) InspectFunc() func(peer.ID, *pubsub.RPC) error { - return nil -} - -// AddInvCtrlMsgNotifConsumer adds a consumer for invalid control message notifications. -// In this mock implementation, the consumer is stored in the mockInspectorSuite, and is used to simulate the reception of invalid control messages. -// Args: -// - c: the consumer to add. -// Returns: -// - nil. -// Note: this function will fail the test if the consumer is already set. -func (m *mockInspectorSuite) AddInvCtrlMsgNotifConsumer(c p2p.GossipSubInvCtrlMsgNotifConsumer) { - require.Nil(m.t, m.consumer) - m.consumer = c -} - // TestInvalidCtrlMsgScoringIntegration tests the impact of invalid control messages on the scoring and connectivity of nodes in a network. // It creates a network of 2 nodes, and sends a set of control messages with invalid topic IDs to one of the nodes. // It then checks that the node receiving the invalid control messages decreases its score for the peer spamming the invalid messages, and @@ -83,24 +41,58 @@ func TestInvalidCtrlMsgScoringIntegration(t *testing.T) { sporkId := unittest.IdentifierFixture() idProvider := mock.NewIdentityProvider(t) - inspectorSuite1 := newMockInspectorSuite(t) + cfg, err := config.DefaultConfig() + require.NoError(t, err) + + cfg.NetworkConfig.GossipSub.ScoringParameters.ScoringRegistryParameters.AppSpecificScore.ScoreTTL = 10 * time.Millisecond // speed up the test + + var notificationConsumer p2p.GossipSubInvCtrlMsgNotifConsumer + inspector := mockp2p.NewGossipSubRPCInspector(t) + inspector.On("Inspect", mocktestify.Anything, mocktestify.Anything).Return(nil) // no-op for the inspector + inspector.On("ActiveClustersChanged", mocktestify.Anything).Return().Maybe() // no-op for the inspector + inspector.On("Start", mocktestify.Anything).Return(nil) // no-op for the inspector + + // mocking the Ready and Done channels to be closed + done := make(chan struct{}) + close(done) + f := func() <-chan struct{} { + return done + } + inspector.On("Ready").Return(f()) // no-op for the inspector + inspector.On("Done").Return(f()) // no-op for the inspector node1, id1 := p2ptest.NodeFixture( t, sporkId, t.Name(), + idProvider, p2ptest.WithRole(flow.RoleConsensus), - p2ptest.WithPeerScoringEnabled(idProvider), - p2ptest.WithGossipSubRpcInspectorSuite(inspectorSuite1)) + p2ptest.OverrideFlowConfig(cfg), + p2ptest.OverrideGossipSubRpcInspectorFactory(func(logger zerolog.Logger, + _ flow.Identifier, + _ *p2pconfig.RpcInspectorParameters, + _ module.GossipSubMetrics, + _ metrics.HeroCacheMetricsFactory, + _ flownet.NetworkingType, + _ module.IdentityProvider, + _ func() p2p.TopicProvider, + consumer p2p.GossipSubInvCtrlMsgNotifConsumer) (p2p.GossipSubRPCInspector, error) { + // short-wire the consumer + notificationConsumer = consumer + return inspector, nil + })) node2, id2 := p2ptest.NodeFixture( t, sporkId, t.Name(), + idProvider, p2ptest.WithRole(flow.RoleConsensus), - p2ptest.WithPeerScoringEnabled(idProvider)) + p2ptest.OverrideFlowConfig(cfg)) ids := flow.IdentityList{&id1, &id2} nodes := []p2p.LibP2PNode{node1, node2} + // suppressing "peers provider not set error" + p2ptest.RegisterPeerProviders(t, nodes) provider := id.NewFixedIdentityProvider(ids) idProvider.On("ByPeerID", mocktestify.Anything).Return( @@ -111,30 +103,39 @@ func TestInvalidCtrlMsgScoringIntegration(t *testing.T) { _, ok := provider.ByPeerID(peerId) return ok }) - p2ptest.StartNodes(t, signalerCtx, nodes, 100*time.Millisecond) - defer p2ptest.StopNodes(t, nodes, cancel, 2*time.Second) + p2ptest.StartNodes(t, signalerCtx, nodes) + defer p2ptest.StopNodes(t, nodes, cancel) p2ptest.LetNodesDiscoverEachOther(t, ctx, nodes, ids) - - // checks end-to-end message delivery works on GossipSub - p2ptest.EnsurePubsubMessageExchange(t, ctx, nodes, func() (interface{}, channels.Topic) { - blockTopic := channels.TopicFromChannel(channels.PushBlocks, sporkId) - return unittest.ProposalFixture(), blockTopic + blockTopic := channels.TopicFromChannel(channels.PushBlocks, sporkId) + // checks end-to-end message delivery works on GossipSub. + p2ptest.EnsurePubsubMessageExchange(t, ctx, nodes, blockTopic, 1, func() interface{} { + return (*messages.Proposal)(unittest.ProposalFixture()) }) - // now simulates node2 spamming node1 with invalid gossipsub control messages. - for i := 0; i < 30; i++ { - inspectorSuite1.consumer.OnInvalidControlMessageNotification(&p2p.InvCtrlMsgNotif{ - PeerID: node2.Host().ID(), - MsgType: p2p.ControlMessageTypes()[rand.Intn(len(p2p.ControlMessageTypes()))], - Count: 1, - Err: fmt.Errorf("invalid control message"), + // simulates node2 spamming node1 with invalid gossipsub control messages until node2 gets dissallow listed. + // since the decay will start lower than .99 and will only be incremented by default .01, we need to spam a lot of messages so that the node gets disallow listed + for i := 0; i < 750; i++ { + notificationConsumer.OnInvalidControlMessageNotification(&p2p.InvCtrlMsgNotif{ + PeerID: node2.ID(), + MsgType: p2pmsg.ControlMessageTypes()[rand.Intn(len(p2pmsg.ControlMessageTypes()))], + Error: fmt.Errorf("invalid control message"), }) } + time.Sleep(1 * time.Second) // wait for app-specific score to be updated in the cache (remember that we need at least 100 ms for the score to be updated (ScoreTTL)) + // checks no GossipSub message exchange should no longer happen between node1 and node2. - p2ptest.EnsureNoPubsubExchangeBetweenGroups(t, ctx, []p2p.LibP2PNode{node1}, []p2p.LibP2PNode{node2}, func() (interface{}, channels.Topic) { - blockTopic := channels.TopicFromChannel(channels.PushBlocks, sporkId) - return unittest.ProposalFixture(), blockTopic - }) + p2ptest.EnsureNoPubsubExchangeBetweenGroups( + t, + ctx, + []p2p.LibP2PNode{node1}, + flow.IdentifierList{id1.NodeID}, + []p2p.LibP2PNode{node2}, + flow.IdentifierList{id2.NodeID}, + blockTopic, + 1, + func() interface{} { + return (*messages.Proposal)(unittest.ProposalFixture()) + }) } diff --git a/network/p2p/scoring/subscriptionCache.go b/network/p2p/scoring/subscriptionCache.go new file mode 100644 index 00000000000..a58ab79db5c --- /dev/null +++ b/network/p2p/scoring/subscriptionCache.go @@ -0,0 +1,35 @@ +package scoring + +import "github.com/libp2p/go-libp2p/core/peer" + +// SubscriptionCache implements an in-memory cache that keeps track of the topics a peer is subscribed to. +// The cache is modeled abstracted to be used in update cycles, i.e., every regular interval of time, the cache is updated for +// all peers. +type SubscriptionCache interface { + // GetSubscribedTopics returns the list of topics a peer is subscribed to. + // Returns: + // - []string: the list of topics the peer is subscribed to. + // - bool: true if there is a record for the peer, false otherwise. + GetSubscribedTopics(pid peer.ID) ([]string, bool) + + // MoveToNextUpdateCycle moves the subscription cache to the next update cycle. + // A new update cycle is started when the subscription cache is first created, and then every time the subscription cache + // is updated. The update cycle is used to keep track of the last time the subscription cache was updated. It is used to + // implement a notion of time in the subscription cache. + // Returns: + // - uint64: the current update cycle. + MoveToNextUpdateCycle() uint64 + + // AddTopicForPeer appends a topic to the list of topics a peer is subscribed to. If the peer is not subscribed to any + // topics yet, a new record is created. + // If the last update cycle is older than the current cycle, the list of topics for the peer is first cleared, and then + // the topic is added to the list. This is to ensure that the list of topics for a peer is always up to date. + // Args: + // - pid: the peer id of the peer. + // - topic: the topic to add. + // Returns: + // - []string: the list of topics the peer is subscribed to after the update. + // - error: an error if the update failed; any returned error is an irrecoverable error and indicates a bug or misconfiguration. + // Implementation must be thread-safe. + AddWithInitTopicForPeer(pid peer.ID, topic string) ([]string, error) +} diff --git a/network/p2p/scoring/subscription_provider.go b/network/p2p/scoring/subscription_provider.go index 23aea760de1..2bfd43bb870 100644 --- a/network/p2p/scoring/subscription_provider.go +++ b/network/p2p/scoring/subscription_provider.go @@ -1,123 +1,162 @@ package scoring import ( - "sync" + "fmt" + "time" + "github.com/go-playground/validator/v10" "github.com/libp2p/go-libp2p/core/peer" "github.com/rs/zerolog" "go.uber.org/atomic" + "github.com/onflow/flow-go/module" + "github.com/onflow/flow-go/module/component" + "github.com/onflow/flow-go/module/irrecoverable" + "github.com/onflow/flow-go/module/metrics" + "github.com/onflow/flow-go/network" "github.com/onflow/flow-go/network/p2p" + p2pconfig "github.com/onflow/flow-go/network/p2p/config" + p2plogging "github.com/onflow/flow-go/network/p2p/logging" + "github.com/onflow/flow-go/network/p2p/scoring/internal" + "github.com/onflow/flow-go/utils/logging" ) // SubscriptionProvider provides a list of topics a peer is subscribed to. type SubscriptionProvider struct { - logger zerolog.Logger - tp p2p.TopicProvider + component.Component + logger zerolog.Logger + topicProviderOracle func() p2p.TopicProvider - // allTopics is a list of all topics in the pubsub network // TODO: we should add an expiry time to this cache and clean up the cache periodically // to avoid leakage of stale topics. - peersByTopic sync.Map // map[topic]peers - peersByTopicUpdating sync.Map // whether a goroutine is already updating the list of peers for a topic + cache SubscriptionCache + + // idProvider translates the peer ids to flow ids. + idProvider module.IdentityProvider // allTopics is a list of all topics in the pubsub network that this node is subscribed to. - allTopicsLock sync.RWMutex // protects allTopics - allTopics []string // list of all topics in the pubsub network that this node has subscribed to. - allTopicsUpdate atomic.Bool // whether a goroutine is already updating the list of topics. + allTopicsUpdate atomic.Bool // whether a goroutine is already updating the list of topics + allTopicsUpdateInterval time.Duration // the interval for updating the list of topics in the pubsub network that this node has subscribed to. } -func NewSubscriptionProvider(logger zerolog.Logger, tp p2p.TopicProvider) *SubscriptionProvider { - return &SubscriptionProvider{ - logger: logger.With().Str("module", "subscription_provider").Logger(), - tp: tp, - allTopics: make([]string, 0), - } +type SubscriptionProviderConfig struct { + Logger zerolog.Logger `validate:"required"` + TopicProviderOracle func() p2p.TopicProvider `validate:"required"` + IdProvider module.IdentityProvider `validate:"required"` + HeroCacheMetricsFactory metrics.HeroCacheMetricsFactory `validate:"required"` + Params *p2pconfig.SubscriptionProviderParameters `validate:"required"` + NetworkingType network.NetworkingType `validate:"required"` } -// GetSubscribedTopics returns all the subscriptions of a peer within the pubsub network. -// Note that the current node can only see peer subscriptions to topics that it has also subscribed to -// e.g., if current node has subscribed to topics A and B, and peer1 has subscribed to topics A, B, and C, -// then GetSubscribedTopics(peer1) will return A and B. Since this node has not subscribed to topic C, -// it will not be able to query for other peers subscribed to topic C. -func (s *SubscriptionProvider) GetSubscribedTopics(pid peer.ID) []string { - topics := s.getAllTopics() +var _ p2p.SubscriptionProvider = (*SubscriptionProvider)(nil) - // finds the topics that this peer is subscribed to. - subscriptions := make([]string, 0) - for _, topic := range topics { - peers := s.getPeersByTopic(topic) - for _, p := range peers { - if p == pid { - subscriptions = append(subscriptions, topic) - } - } +func NewSubscriptionProvider(cfg *SubscriptionProviderConfig) (*SubscriptionProvider, error) { + if err := validator.New().Struct(cfg); err != nil { + return nil, fmt.Errorf("invalid subscription provider config: %w", err) } - return subscriptions -} + cacheMetrics := metrics.NewSubscriptionRecordCacheMetricsFactory(cfg.HeroCacheMetricsFactory, cfg.NetworkingType) + cache := internal.NewSubscriptionRecordCache(cfg.Params.CacheSize, cfg.Logger, cacheMetrics) -// getAllTopics returns all the topics in the pubsub network that this node (peer) has subscribed to. -// Note that this method always returns the cached version of the subscribed topics while querying the -// pubsub network for the list of topics in a goroutine. Hence, the first call to this method always returns an empty -// list. -func (s *SubscriptionProvider) getAllTopics() []string { - go func() { - // TODO: refactor this to a component manager worker once we have a startable libp2p node. - if updateInProgress := s.allTopicsUpdate.CompareAndSwap(false, true); updateInProgress { - // another goroutine is already updating the list of topics - return - } + p := &SubscriptionProvider{ + logger: cfg.Logger.With().Str("module", "subscription_provider").Logger(), + topicProviderOracle: cfg.TopicProviderOracle, + allTopicsUpdateInterval: cfg.Params.UpdateInterval, + idProvider: cfg.IdProvider, + cache: cache, + } + + builder := component.NewComponentManagerBuilder() + p.Component = builder.AddWorker( + func(ctx irrecoverable.SignalerContext, ready component.ReadyFunc) { + ready() + p.logger.Debug(). + Float64("update_interval_seconds", cfg.Params.UpdateInterval.Seconds()). + Msg("subscription provider started; starting update topics loop") + p.updateTopicsLoop(ctx) - allTopics := s.tp.GetTopics() - s.atomicUpdateAllTopics(allTopics) + <-ctx.Done() + p.logger.Debug().Msg("subscription provider stopped; stopping update topics loop") + }).Build() - // remove the update flag - s.allTopicsUpdate.Store(false) + return p, nil +} - s.logger.Trace().Msgf("all topics updated: %v", allTopics) - }() +func (s *SubscriptionProvider) updateTopicsLoop(ctx irrecoverable.SignalerContext) { + ticker := time.NewTicker(s.allTopicsUpdateInterval) + defer ticker.Stop() - s.allTopicsLock.RLock() - defer s.allTopicsLock.RUnlock() - return s.allTopics + for { + select { + case <-ctx.Done(): + return + case <-ticker.C: + if err := s.updateTopics(); err != nil { + ctx.Throw(fmt.Errorf("update loop failed: %w", err)) + return + } + } + } } -// getPeersByTopic returns all the peers subscribed to a topic. -// Note that this method always returns the cached version of the subscribed peers while querying the +// updateTopics returns all the topics in the pubsub network that this node (peer) has subscribed to. +// Note that this method always returns the cached version of the subscribed topics while querying the // pubsub network for the list of topics in a goroutine. Hence, the first call to this method always returns an empty // list. -// As this method is injected into GossipSub, it is vital that it never block the caller, otherwise it causes a -// deadlock on the GossipSub. -// Also note that, this peer itself should be subscribed to the topic, otherwise, it cannot find the list of peers -// subscribed to the topic in the pubsub network due to an inherent limitation of GossipSub. -func (s *SubscriptionProvider) getPeersByTopic(topic string) []peer.ID { - go func() { - // TODO: refactor this to a component manager worker once we have a startable libp2p node. - if _, updateInProgress := s.peersByTopicUpdating.LoadOrStore(topic, true); updateInProgress { - // another goroutine is already updating the list of peers for this topic - return - } +// Args: +// - ctx: the context of the caller. +// Returns: +// - error on failure to update the list of topics. The returned error is irrecoverable and indicates an exception. +func (s *SubscriptionProvider) updateTopics() error { + if updateInProgress := s.allTopicsUpdate.CompareAndSwap(false, true); updateInProgress { + // another goroutine is already updating the list of topics + s.logger.Trace().Msg("skipping topic update; another update is already in progress") + return nil + } - subscribedPeers := s.tp.ListPeers(topic) - s.peersByTopic.Store(topic, subscribedPeers) + // start of critical section; protected by updateInProgress atomic flag + allTopics := s.topicProviderOracle().GetTopics() + s.logger.Trace().Msgf("all topics updated: %v", allTopics) - // remove the update flag - s.peersByTopicUpdating.Delete(topic) + // increments the update cycle of the cache; so that the previous cache entries are invalidated upon a read or write. + s.cache.MoveToNextUpdateCycle() + for _, topic := range allTopics { + peers := s.topicProviderOracle().ListPeers(topic) - s.logger.Trace().Str("topic", topic).Msgf("peers by topic updated: %v", subscribedPeers) - }() + for _, p := range peers { + if _, authorized := s.idProvider.ByPeerID(p); !authorized { + // peer is not authorized (staked); hence it does not have a valid role in the network; and + // we skip the topic update for this peer (also avoiding sybil attacks on the cache). + s.logger.Debug(). + Str("remote_peer_id", p2plogging.PeerId(p)). + Bool(logging.KeyNetworkingSecurity, true). + Msg("skipping topic update for unauthorized peer") + continue + } - peerId, ok := s.peersByTopic.Load(topic) - if !ok { - return make([]peer.ID, 0) + updatedTopics, err := s.cache.AddWithInitTopicForPeer(p, topic) + if err != nil { + // this is an irrecoverable error; hence, we crash the node. + return fmt.Errorf("failed to update topics for peer %s: %w", p, err) + } + s.logger.Debug(). + Str("remote_peer_id", p2plogging.PeerId(p)). + Strs("updated_topics", updatedTopics). + Msg("updated topics for peer") + } } - return peerId.([]peer.ID) + + // remove the update flag; end of critical section + s.allTopicsUpdate.Store(false) + return nil } -// atomicUpdateAllTopics updates the list of all topics in the pubsub network that this node has subscribed to. -func (s *SubscriptionProvider) atomicUpdateAllTopics(allTopics []string) { - s.allTopicsLock.Lock() - s.allTopics = allTopics - s.allTopicsLock.Unlock() +// GetSubscribedTopics returns all the subscriptions of a peer within the pubsub network. +func (s *SubscriptionProvider) GetSubscribedTopics(pid peer.ID) []string { + topics, ok := s.cache.GetSubscribedTopics(pid) + if !ok { + s.logger.Trace().Str("peer_id", p2plogging.PeerId(pid)).Msg("no topics found for peer") + return nil + } + return topics } diff --git a/network/p2p/scoring/subscription_provider_test.go b/network/p2p/scoring/subscription_provider_test.go index 25d4be455c8..84f5aeb6896 100644 --- a/network/p2p/scoring/subscription_provider_test.go +++ b/network/p2p/scoring/subscription_provider_test.go @@ -1,13 +1,22 @@ package scoring_test import ( + "context" "testing" "time" "github.com/libp2p/go-libp2p/core/peer" "github.com/stretchr/testify/assert" + mockery "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" - "github.com/onflow/flow-go/network/internal/p2pfixtures" + "github.com/onflow/flow-go/config" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module/irrecoverable" + "github.com/onflow/flow-go/module/metrics" + "github.com/onflow/flow-go/module/mock" + "github.com/onflow/flow-go/network" + "github.com/onflow/flow-go/network/p2p" mockp2p "github.com/onflow/flow-go/network/p2p/mock" "github.com/onflow/flow-go/network/p2p/scoring" "github.com/onflow/flow-go/utils/slices" @@ -18,20 +27,48 @@ import ( // list of topics a peer is subscribed to. func TestSubscriptionProvider_GetSubscribedTopics(t *testing.T) { tp := mockp2p.NewTopicProvider(t) - sp := scoring.NewSubscriptionProvider(unittest.Logger(), tp) + cfg, err := config.DefaultConfig() + require.NoError(t, err) + idProvider := mock.NewIdentityProvider(t) + + // set a low update interval to speed up the test + cfg.NetworkConfig.GossipSub.SubscriptionProvider.UpdateInterval = 100 * time.Millisecond + + sp, err := scoring.NewSubscriptionProvider(&scoring.SubscriptionProviderConfig{ + Logger: unittest.Logger(), + TopicProviderOracle: func() p2p.TopicProvider { + return tp + }, + Params: &cfg.NetworkConfig.GossipSub.SubscriptionProvider, + HeroCacheMetricsFactory: metrics.NewNoopHeroCacheMetricsFactory(), + IdProvider: idProvider, + NetworkingType: network.PrivateNetwork, + }) + require.NoError(t, err) tp.On("GetTopics").Return([]string{"topic1", "topic2", "topic3"}).Maybe() - peer1 := p2pfixtures.PeerIdFixture(t) - peer2 := p2pfixtures.PeerIdFixture(t) - peer3 := p2pfixtures.PeerIdFixture(t) + peer1 := unittest.PeerIdFixture(t) + peer2 := unittest.PeerIdFixture(t) + peer3 := unittest.PeerIdFixture(t) + + idProvider.On("ByPeerID", mockery.Anything).Return(unittest.IdentityFixture(), true).Maybe() // mock peers 1 and 2 subscribed to topic 1 (along with other random peers) - tp.On("ListPeers", "topic1").Return(append([]peer.ID{peer1, peer2}, p2pfixtures.PeerIdsFixture(t, 10)...)) + tp.On("ListPeers", "topic1").Return(append([]peer.ID{peer1, peer2}, unittest.PeerIdFixtures(t, 10)...)) // mock peers 2 and 3 subscribed to topic 2 (along with other random peers) - tp.On("ListPeers", "topic2").Return(append([]peer.ID{peer2, peer3}, p2pfixtures.PeerIdsFixture(t, 10)...)) + tp.On("ListPeers", "topic2").Return(append([]peer.ID{peer2, peer3}, unittest.PeerIdFixtures(t, 10)...)) // mock peers 1 and 3 subscribed to topic 3 (along with other random peers) - tp.On("ListPeers", "topic3").Return(append([]peer.ID{peer1, peer3}, p2pfixtures.PeerIdsFixture(t, 10)...)) + tp.On("ListPeers", "topic3").Return(append([]peer.ID{peer1, peer3}, unittest.PeerIdFixtures(t, 10)...)) + + ctx, cancel := context.WithCancel(context.Background()) + defer func() { + cancel() + unittest.RequireCloseBefore(t, sp.Done(), 1*time.Second, "subscription provider did not stop in time") + }() + signalerCtx := irrecoverable.NewMockSignalerContext(t, ctx) + sp.Start(signalerCtx) + unittest.RequireCloseBefore(t, sp.Ready(), 1*time.Second, "subscription provider did not start in time") // As the calls to the TopicProvider are asynchronous, we need to wait for the goroutines to finish. assert.Eventually(t, func() bool { @@ -46,3 +83,77 @@ func TestSubscriptionProvider_GetSubscribedTopics(t *testing.T) { return slices.AreStringSlicesEqual([]string{"topic2", "topic3"}, sp.GetSubscribedTopics(peer3)) }, 1*time.Second, 100*time.Millisecond) } + +// TestSubscriptionProvider_GetSubscribedTopics_SkippingUnknownPeers tests that the SubscriptionProvider skips +// unknown peers when returning the list of topics a peer is subscribed to. In other words, if a peer is unknown, +// the SubscriptionProvider should not keep track of its subscriptions. +func TestSubscriptionProvider_GetSubscribedTopics_SkippingUnknownPeers(t *testing.T) { + tp := mockp2p.NewTopicProvider(t) + cfg, err := config.DefaultConfig() + require.NoError(t, err) + idProvider := mock.NewIdentityProvider(t) + + // set a low update interval to speed up the test + cfg.NetworkConfig.GossipSub.SubscriptionProvider.UpdateInterval = 100 * time.Millisecond + + sp, err := scoring.NewSubscriptionProvider(&scoring.SubscriptionProviderConfig{ + Logger: unittest.Logger(), + TopicProviderOracle: func() p2p.TopicProvider { + return tp + }, + Params: &cfg.NetworkConfig.GossipSub.SubscriptionProvider, + HeroCacheMetricsFactory: metrics.NewNoopHeroCacheMetricsFactory(), + IdProvider: idProvider, + NetworkingType: network.PrivateNetwork, + }) + require.NoError(t, err) + + tp.On("GetTopics").Return([]string{"topic1", "topic2", "topic3"}).Maybe() + + peer1 := unittest.PeerIdFixture(t) + peer2 := unittest.PeerIdFixture(t) + peer3 := unittest.PeerIdFixture(t) + + // mock peers 1 and 2 as a known peer; peer 3 as an unknown peer + idProvider.On("ByPeerID", mockery.Anything). + Return(func(pid peer.ID) *flow.Identity { + if pid == peer1 || pid == peer2 { + return unittest.IdentityFixture() + } + return nil + }, func(pid peer.ID) bool { + if pid == peer1 || pid == peer2 { + return true + } + return false + }).Maybe() + + // mock peers 1 and 2 subscribed to topic 1 (along with other random peers) + tp.On("ListPeers", "topic1").Return(append([]peer.ID{peer1, peer2}, unittest.PeerIdFixtures(t, 10)...)) + // mock peers 2 and 3 subscribed to topic 2 (along with other random peers) + tp.On("ListPeers", "topic2").Return(append([]peer.ID{peer2, peer3}, unittest.PeerIdFixtures(t, 10)...)) + // mock peers 1 and 3 subscribed to topic 3 (along with other random peers) + tp.On("ListPeers", "topic3").Return(append([]peer.ID{peer1, peer3}, unittest.PeerIdFixtures(t, 10)...)) + + ctx, cancel := context.WithCancel(context.Background()) + defer func() { + cancel() + unittest.RequireCloseBefore(t, sp.Done(), 1*time.Second, "subscription provider did not stop in time") + }() + signalerCtx := irrecoverable.NewMockSignalerContext(t, ctx) + sp.Start(signalerCtx) + unittest.RequireCloseBefore(t, sp.Ready(), 1*time.Second, "subscription provider did not start in time") + + // As the calls to the TopicProvider are asynchronous, we need to wait for the goroutines to finish. + // peer 1 should be eventually subscribed to topic 1 and topic 3; while peer 3 should not have any subscriptions record since it is unknown + assert.Eventually(t, func() bool { + return slices.AreStringSlicesEqual([]string{"topic1", "topic3"}, sp.GetSubscribedTopics(peer1)) && + slices.AreStringSlicesEqual([]string{}, sp.GetSubscribedTopics(peer3)) + }, 1*time.Second, 100*time.Millisecond) + + // peer 2 should be eventually subscribed to topic 1 and topic 2; while peer 3 should not have any subscriptions record since it is unknown + assert.Eventually(t, func() bool { + return slices.AreStringSlicesEqual([]string{"topic1", "topic2"}, sp.GetSubscribedTopics(peer2)) && + slices.AreStringSlicesEqual([]string{}, sp.GetSubscribedTopics(peer3)) + }, 1*time.Second, 100*time.Millisecond) +} diff --git a/network/p2p/scoring/subscription_validator.go b/network/p2p/scoring/subscription_validator.go index fbffe27752a..94a08c0e19c 100644 --- a/network/p2p/scoring/subscription_validator.go +++ b/network/p2p/scoring/subscription_validator.go @@ -1,47 +1,55 @@ package scoring import ( - "fmt" - "github.com/libp2p/go-libp2p/core/peer" + "github.com/rs/zerolog" "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module/component" + "github.com/onflow/flow-go/module/irrecoverable" "github.com/onflow/flow-go/network/p2p" + p2plogging "github.com/onflow/flow-go/network/p2p/logging" p2putils "github.com/onflow/flow-go/network/p2p/utils" ) // SubscriptionValidator validates that a peer is subscribed to topics that it is allowed to subscribe to. // It is used to penalize peers that subscribe to topics that they are not allowed to subscribe to in GossipSub. type SubscriptionValidator struct { + component.Component + logger zerolog.Logger subscriptionProvider p2p.SubscriptionProvider } -func NewSubscriptionValidator() *SubscriptionValidator { - return &SubscriptionValidator{} -} +func NewSubscriptionValidator(logger zerolog.Logger, provider p2p.SubscriptionProvider) *SubscriptionValidator { + v := &SubscriptionValidator{ + logger: logger.With().Str("component", "subscription_validator").Logger(), + subscriptionProvider: provider, + } -var _ p2p.SubscriptionValidator = (*SubscriptionValidator)(nil) + v.Component = component.NewComponentManagerBuilder(). + AddWorker(func(ctx irrecoverable.SignalerContext, ready component.ReadyFunc) { + logger.Debug().Msg("starting subscription validator") + v.subscriptionProvider.Start(ctx) + select { + case <-ctx.Done(): + logger.Debug().Msg("subscription validator is stopping") + case <-v.subscriptionProvider.Ready(): + logger.Debug().Msg("subscription validator started") + ready() + logger.Debug().Msg("subscription validator is ready") + } -// RegisterSubscriptionProvider registers the subscription provider with the subscription validator. -// This follows a dependency injection pattern. -// Args: -// -// provider: the subscription provider -// -// Returns: -// -// error: if the subscription provider is nil, an error is returned. The error is irrecoverable, i.e., -// it indicates an illegal state in the execution of the code. We expect this error only when there is a bug in the code. -// Such errors should lead to a crash of the node. -func (v *SubscriptionValidator) RegisterSubscriptionProvider(provider p2p.SubscriptionProvider) error { - if v.subscriptionProvider != nil { - return fmt.Errorf("subscription provider already registered") - } - v.subscriptionProvider = provider + <-ctx.Done() + logger.Debug().Msg("subscription validator is stopping") + <-v.subscriptionProvider.Done() + logger.Debug().Msg("subscription validator stopped") + }).Build() - return nil + return v } +var _ p2p.SubscriptionValidator = (*SubscriptionValidator)(nil) + // CheckSubscribedToAllowedTopics checks if a peer is subscribed to topics that it is allowed to subscribe to. // Args: // @@ -53,7 +61,10 @@ func (v *SubscriptionValidator) RegisterSubscriptionProvider(provider p2p.Subscr // The error is benign, i.e., it does not indicate an illegal state in the execution of the code. We expect this error // when there are malicious peers in the network. But such errors should not lead to a crash of the node. func (v *SubscriptionValidator) CheckSubscribedToAllowedTopics(pid peer.ID, role flow.Role) error { + lg := v.logger.With().Str("remote_peer_id", p2plogging.PeerId(pid)).Logger() + topics := v.subscriptionProvider.GetSubscribedTopics(pid) + lg.Trace().Strs("topics", topics).Msg("checking subscription for remote peer id") for _, topic := range topics { if !p2putils.AllowedSubscription(role, topic) { @@ -61,5 +72,6 @@ func (v *SubscriptionValidator) CheckSubscribedToAllowedTopics(pid peer.ID, role } } + lg.Trace().Msg("subscription is valid") return nil } diff --git a/network/p2p/scoring/subscription_validator_test.go b/network/p2p/scoring/subscription_validator_test.go index 05349f7dea4..1716d4e7daf 100644 --- a/network/p2p/scoring/subscription_validator_test.go +++ b/network/p2p/scoring/subscription_validator_test.go @@ -7,6 +7,8 @@ import ( "testing" "time" + "github.com/onflow/flow-go/config" + "github.com/onflow/flow-go/network/message" "github.com/onflow/flow-go/network/p2p" p2ptest "github.com/onflow/flow-go/network/p2p/test" flowpubsub "github.com/onflow/flow-go/network/validator/pubsub" @@ -31,12 +33,10 @@ import ( // any topic, the subscription validator returns no error. func TestSubscriptionValidator_NoSubscribedTopic(t *testing.T) { sp := mockp2p.NewSubscriptionProvider(t) - - sv := scoring.NewSubscriptionValidator() - require.NoError(t, sv.RegisterSubscriptionProvider(sp)) + sv := scoring.NewSubscriptionValidator(unittest.Logger(), sp) // mocks peer 1 not subscribed to any topic. - peer1 := p2pfixtures.PeerIdFixture(t) + peer1 := unittest.PeerIdFixture(t) sp.On("GetSubscribedTopics", peer1).Return([]string{}) // as peer 1 has not subscribed to any topic, the subscription validator should return no error regardless of the @@ -50,11 +50,10 @@ func TestSubscriptionValidator_NoSubscribedTopic(t *testing.T) { // topic, the subscription validator returns an error. func TestSubscriptionValidator_UnknownChannel(t *testing.T) { sp := mockp2p.NewSubscriptionProvider(t) - sv := scoring.NewSubscriptionValidator() - require.NoError(t, sv.RegisterSubscriptionProvider(sp)) + sv := scoring.NewSubscriptionValidator(unittest.Logger(), sp) // mocks peer 1 not subscribed to an unknown topic. - peer1 := p2pfixtures.PeerIdFixture(t) + peer1 := unittest.PeerIdFixture(t) sp.On("GetSubscribedTopics", peer1).Return([]string{"unknown-topic-1", "unknown-topic-2"}) // as peer 1 has subscribed to unknown topics, the subscription validator should return an error @@ -70,11 +69,10 @@ func TestSubscriptionValidator_UnknownChannel(t *testing.T) { // topics based on its Flow protocol role, the subscription validator returns no error. func TestSubscriptionValidator_ValidSubscriptions(t *testing.T) { sp := mockp2p.NewSubscriptionProvider(t) - sv := scoring.NewSubscriptionValidator() - require.NoError(t, sv.RegisterSubscriptionProvider(sp)) + sv := scoring.NewSubscriptionValidator(unittest.Logger(), sp) for _, role := range flow.Roles() { - peerId := p2pfixtures.PeerIdFixture(t) + peerId := unittest.PeerIdFixture(t) // allowed channels for the role excluding the test channels. allowedChannels := channels.ChannelsByRole(role).ExcludePattern(regexp.MustCompile("^(test).*")) sporkID := unittest.IdentifierFixture() @@ -101,8 +99,7 @@ func TestSubscriptionValidator_ValidSubscriptions(t *testing.T) { // is no longer true. func TestSubscriptionValidator_SubscribeToAllTopics(t *testing.T) { sp := mockp2p.NewSubscriptionProvider(t) - sv := scoring.NewSubscriptionValidator() - require.NoError(t, sv.RegisterSubscriptionProvider(sp)) + sv := scoring.NewSubscriptionValidator(unittest.Logger(), sp) allChannels := channels.Channels().ExcludePattern(regexp.MustCompile("^(test).*")) sporkID := unittest.IdentifierFixture() @@ -112,7 +109,7 @@ func TestSubscriptionValidator_SubscribeToAllTopics(t *testing.T) { } for _, role := range flow.Roles() { - peerId := p2pfixtures.PeerIdFixture(t) + peerId := unittest.PeerIdFixture(t) sp.On("GetSubscribedTopics", peerId).Return(allTopics) err := sv.CheckSubscribedToAllowedTopics(peerId, role) require.Error(t, err, role) @@ -124,11 +121,10 @@ func TestSubscriptionValidator_SubscribeToAllTopics(t *testing.T) { // topics based on its Flow protocol role, the subscription validator returns an error. func TestSubscriptionValidator_InvalidSubscriptions(t *testing.T) { sp := mockp2p.NewSubscriptionProvider(t) - sv := scoring.NewSubscriptionValidator() - require.NoError(t, sv.RegisterSubscriptionProvider(sp)) + sv := scoring.NewSubscriptionValidator(unittest.Logger(), sp) for _, role := range flow.Roles() { - peerId := p2pfixtures.PeerIdFixture(t) + peerId := unittest.PeerIdFixture(t) unauthorizedChannels := channels.Channels(). // all channels ExcludeChannels(channels.ChannelsByRole(role)). // excluding the channels for the role ExcludePattern(regexp.MustCompile("^(test).*")) // excluding the test channels. @@ -168,29 +164,49 @@ func TestSubscriptionValidator_InvalidSubscriptions(t *testing.T) { // 4. Verification node also publishes a chunk request on the RequestChunks channel. // 5. Test checks that consensus node does not receive the chunk request while the other verification node does. func TestSubscriptionValidator_Integration(t *testing.T) { + unittest.SkipUnless(t, unittest.TEST_FLAKY, "flakey tests") ctx, cancel := context.WithCancel(context.Background()) signalerCtx := irrecoverable.NewMockSignalerContext(t, ctx) + cfg, err := config.DefaultConfig() + require.NoError(t, err) + // set a low update interval to speed up the test + cfg.NetworkConfig.GossipSub.SubscriptionProvider.UpdateInterval = 10 * time.Millisecond + cfg.NetworkConfig.GossipSub.ScoringParameters.ScoringRegistryParameters.AppSpecificScore.ScoreTTL = 10 * time.Millisecond + // score tracer interval is set to 500 milliseconds to speed up the test, it should be shorter than the heartbeat interval (1 second) of gossipsub to catch the score updates in time. + cfg.NetworkConfig.GossipSub.RpcTracer.ScoreTracerInterval = 500 * time.Millisecond + sporkId := unittest.IdentifierFixture() idProvider := mock.NewIdentityProvider(t) // one consensus node. conNode, conId := p2ptest.NodeFixture(t, sporkId, t.Name(), + idProvider, p2ptest.WithLogger(unittest.Logger()), - p2ptest.WithPeerScoringEnabled(idProvider), + p2ptest.OverrideFlowConfig(cfg), p2ptest.WithRole(flow.RoleConsensus)) // two verification node. verNode1, verId1 := p2ptest.NodeFixture(t, sporkId, t.Name(), + idProvider, p2ptest.WithLogger(unittest.Logger()), - p2ptest.WithPeerScoringEnabled(idProvider), + p2ptest.OverrideFlowConfig(cfg), p2ptest.WithRole(flow.RoleVerification)) verNode2, verId2 := p2ptest.NodeFixture(t, sporkId, t.Name(), + idProvider, p2ptest.WithLogger(unittest.Logger()), - p2ptest.WithPeerScoringEnabled(idProvider), + p2ptest.OverrideFlowConfig(cfg), p2ptest.WithRole(flow.RoleVerification)) + // suppress peer provider error + peerProvider := func() peer.IDSlice { + return []peer.ID{conNode.ID(), verNode1.ID(), verNode2.ID()} + } + verNode1.WithPeersProvider(peerProvider) + verNode2.WithPeersProvider(peerProvider) + conNode.WithPeersProvider(peerProvider) + ids := flow.IdentityList{&conId, &verId1, &verId2} nodes := []p2p.LibP2PNode{conNode, verNode1, verNode2} @@ -204,8 +220,8 @@ func TestSubscriptionValidator_Integration(t *testing.T) { return ok }) - p2ptest.StartNodes(t, signalerCtx, nodes, 100*time.Millisecond) - defer p2ptest.StopNodes(t, nodes, cancel, 100*time.Millisecond) + p2ptest.StartNodes(t, signalerCtx, nodes) + defer p2ptest.StopNodes(t, nodes, cancel) blockTopic := channels.TopicFromChannel(channels.PushBlocks, sporkId) @@ -234,47 +250,81 @@ func TestSubscriptionValidator_Integration(t *testing.T) { // let the subscriptions be established time.Sleep(2 * time.Second) - proposalMsg := p2pfixtures.MustEncodeEvent(t, unittest.ProposalFixture(), channels.PushBlocks) - // consensus node publishes a proposal - require.NoError(t, conNode.Publish(ctx, blockTopic, proposalMsg)) + outgoingMessageScope, err := message.NewOutgoingScope( + ids.NodeIDs(), + channels.TopicFromChannel(channels.PushBlocks, sporkId), + (*messages.Proposal)(unittest.ProposalFixture()), + unittest.NetworkCodec().Encode, + message.ProtocolTypePubSub) + require.NoError(t, err) + require.NoError(t, conNode.Publish(ctx, outgoingMessageScope)) // checks that the message is received by all nodes. ctx1s, cancel1s := context.WithTimeout(ctx, 1*time.Second) defer cancel1s() - p2pfixtures.SubsMustReceiveMessage(t, ctx1s, proposalMsg, []p2p.Subscription{conSub, ver1SubBlocks, ver2SubBlocks}) + + expectedReceivedData, err := outgoingMessageScope.Proto().Marshal() + require.NoError(t, err) + + p2pfixtures.SubsMustReceiveMessage(t, ctx1s, expectedReceivedData, []p2p.Subscription{conSub, ver1SubBlocks, ver2SubBlocks}) // now consensus node is doing something very bad! // it is subscribing to a channel that it is not supposed to subscribe to. conSubChunks, err := conNode.Subscribe(channels.TopicFromChannel(channels.RequestChunks, sporkId), topicValidator) require.NoError(t, err) - // let's wait for a bit to subscription propagate. - time.Sleep(5 * time.Second) + invalidSubscriptionPenalty := cfg.NetworkConfig.GossipSub.ScoringParameters.PeerScoring.Protocol.AppSpecificScore.InvalidSubscriptionPenalty + require.Eventually(t, func() bool { + score, ok := verNode1.PeerScoreExposer().GetScore(conNode.ID()) + return score == invalidSubscriptionPenalty && ok + }, 5*time.Second, 200*time.Millisecond) + require.Eventually(t, func() bool { + score, ok := verNode2.PeerScoreExposer().GetScore(conNode.ID()) + return score == invalidSubscriptionPenalty && ok + }, 5*time.Second, 200*time.Millisecond) // consensus node publishes another proposal, but this time, it should not reach verification node. // since upon an unauthorized subscription, verification node should have slashed consensus node on // the GossipSub scoring protocol. - proposalMsg = p2pfixtures.MustEncodeEvent(t, unittest.ProposalFixture(), channels.PushBlocks) - // publishes a message to the topic. - require.NoError(t, conNode.Publish(ctx, blockTopic, proposalMsg)) + outgoingMessageScope, err = message.NewOutgoingScope( + ids.NodeIDs(), + channels.TopicFromChannel(channels.PushBlocks, sporkId), + (*messages.Proposal)(unittest.ProposalFixture()), + unittest.NetworkCodec().Encode, + message.ProtocolTypePubSub) + require.NoError(t, err) ctx5s, cancel5s := context.WithTimeout(ctx, 5*time.Second) defer cancel5s() - p2pfixtures.SubsMustNeverReceiveAnyMessage(t, ctx5s, []p2p.Subscription{ver1SubBlocks, ver2SubBlocks}) + p2pfixtures.SubsMustEventuallyStopReceivingAnyMessage(t, ctx5s, []p2p.Subscription{ver1SubBlocks, ver2SubBlocks}, func(t *testing.T) { + require.NoError(t, conNode.Publish(ctx, outgoingMessageScope)) + }) // moreover, a verification node publishing a message to the request chunk topic should not reach consensus node. // however, both verification nodes should receive the message. - chunkDataPackRequestMsg := p2pfixtures.MustEncodeEvent(t, &messages.ChunkDataRequest{ - ChunkID: unittest.IdentifierFixture(), - Nonce: rand.Uint64(), - }, channels.RequestChunks) - require.NoError(t, verNode1.Publish(ctx, channels.TopicFromChannel(channels.RequestChunks, sporkId), chunkDataPackRequestMsg)) + outgoingMessageScope, err = message.NewOutgoingScope( + ids.NodeIDs(), + channels.TopicFromChannel(channels.RequestChunks, sporkId), + &messages.ChunkDataRequest{ + ChunkID: unittest.IdentifierFixture(), + Nonce: rand.Uint64(), + }, + unittest.NetworkCodec().Encode, + message.ProtocolTypePubSub) + require.NoError(t, err) + + require.NoError(t, verNode1.Publish(ctx, outgoingMessageScope)) ctx1s, cancel1s = context.WithTimeout(ctx, 1*time.Second) defer cancel1s() - p2pfixtures.SubsMustReceiveMessage(t, ctx1s, chunkDataPackRequestMsg, []p2p.Subscription{ver1SubChunks, ver2SubChunks}) + expectedReceivedData, err = outgoingMessageScope.Proto().Marshal() + require.NoError(t, err) + + p2pfixtures.SubsMustReceiveMessage(t, ctx1s, expectedReceivedData, []p2p.Subscription{ver1SubChunks, ver2SubChunks}) ctx5s, cancel5s = context.WithTimeout(ctx, 5*time.Second) defer cancel5s() - p2pfixtures.SubsMustNeverReceiveAnyMessage(t, ctx5s, []p2p.Subscription{conSubChunks}) + p2pfixtures.SubsMustEventuallyStopReceivingAnyMessage(t, ctx5s, []p2p.Subscription{conSubChunks}, func(t *testing.T) { + require.NoError(t, verNode1.Publish(ctx, outgoingMessageScope)) + }) } diff --git a/network/p2p/scoring/utils.go b/network/p2p/scoring/utils.go index a1358f72f56..53d0ff0e620 100644 --- a/network/p2p/scoring/utils.go +++ b/network/p2p/scoring/utils.go @@ -14,7 +14,7 @@ func HasValidFlowIdentity(idProvider module.IdentityProvider, pid peer.ID) (*flo return nil, NewInvalidPeerIDError(pid, PeerIdStatusUnknown) } - if flowId.Ejected { + if flowId.IsEjected() { return nil, NewInvalidPeerIDError(pid, PeerIdStatusEjected) } diff --git a/network/p2p/scoring/utils_test.go b/network/p2p/scoring/utils_test.go index 5a458e1a730..3ddfdb09e97 100644 --- a/network/p2p/scoring/utils_test.go +++ b/network/p2p/scoring/utils_test.go @@ -5,15 +5,15 @@ import ( "github.com/stretchr/testify/require" + "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/module/mock" - "github.com/onflow/flow-go/network/internal/p2pfixtures" "github.com/onflow/flow-go/network/p2p/scoring" "github.com/onflow/flow-go/utils/unittest" ) // TestHasValidIdentity_Unknown tests that when a peer has an unknown identity, the HasValidIdentity returns InvalidPeerIDError func TestHasValidIdentity_Unknown(t *testing.T) { - peerId := p2pfixtures.PeerIdFixture(t) + peerId := unittest.PeerIdFixture(t) idProvider := mock.NewIdentityProvider(t) idProvider.On("ByPeerID", peerId).Return(nil, false) @@ -29,8 +29,8 @@ func TestHasValidIdentity_Ejected(t *testing.T) { idProvider := mock.NewIdentityProvider(t) ejectedIdentity := unittest.IdentityFixture() - ejectedIdentity.Ejected = true - peerId := p2pfixtures.PeerIdFixture(t) + ejectedIdentity.EpochParticipationStatus = flow.EpochParticipationStatusEjected + peerId := unittest.PeerIdFixture(t) idProvider.On("ByPeerID", peerId).Return(ejectedIdentity, true) identity, err := scoring.HasValidFlowIdentity(idProvider, peerId) @@ -45,7 +45,7 @@ func TestHasValidIdentity_Valid(t *testing.T) { idProvider := mock.NewIdentityProvider(t) trueID := unittest.IdentityFixture() - peerId := p2pfixtures.PeerIdFixture(t) + peerId := unittest.PeerIdFixture(t) idProvider.On("ByPeerID", peerId).Return(trueID, true) identity, err := scoring.HasValidFlowIdentity(idProvider, peerId) diff --git a/network/p2p/stream.go b/network/p2p/stream.go new file mode 100644 index 00000000000..a012ef8926c --- /dev/null +++ b/network/p2p/stream.go @@ -0,0 +1,19 @@ +package p2p + +import ( + "context" + + "github.com/libp2p/go-libp2p/core/network" + "github.com/libp2p/go-libp2p/core/peer" + "github.com/libp2p/go-libp2p/core/protocol" +) + +// StreamFactory is a wrapper around libp2p host.Host to provide abstraction and encapsulation for unicast stream manager so that +// it can create libp2p streams with finer granularity. +type StreamFactory interface { + SetStreamHandler(protocol.ID, network.StreamHandler) + // NewStream creates a new stream on the libp2p host. + // Expected errors during normal operations: + // - ErrProtocolNotSupported this indicates remote node is running on a different spork. + NewStream(context.Context, peer.ID, protocol.ID) (network.Stream, error) +} diff --git a/network/p2p/subscription.go b/network/p2p/subscription.go index 9d4a117d0bc..99212b566d1 100644 --- a/network/p2p/subscription.go +++ b/network/p2p/subscription.go @@ -7,10 +7,12 @@ import ( "github.com/libp2p/go-libp2p/core/peer" "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module/component" ) // SubscriptionProvider provides a list of topics a peer is subscribed to. type SubscriptionProvider interface { + component.Component // GetSubscribedTopics returns all the subscriptions of a peer within the pubsub network. // Note that the current peer must be subscribed to the topic for it to the same topics in order // to query for other peers, e.g., if current peer has subscribed to topics A and B, and peer1 @@ -22,9 +24,7 @@ type SubscriptionProvider interface { // SubscriptionValidator validates the subscription of a peer to a topic. // It is used to ensure that a peer is only subscribed to topics that it is allowed to subscribe to. type SubscriptionValidator interface { - // RegisterSubscriptionProvider registers the subscription provider with the subscription validator. - // If there is a subscription provider already registered, it will be replaced by the new one. - RegisterSubscriptionProvider(provider SubscriptionProvider) error + component.Component // CheckSubscribedToAllowedTopics checks if a peer is subscribed to topics that it is allowed to subscribe to. // Args: // pid: the peer ID of the peer to check diff --git a/network/p2p/subscription/subscriptionManager.go b/network/p2p/subscription/subscriptionManager.go index 72aba58d1b9..b4342a94226 100644 --- a/network/p2p/subscription/subscriptionManager.go +++ b/network/p2p/subscription/subscriptionManager.go @@ -11,15 +11,20 @@ import ( // ChannelSubscriptionManager manages subscriptions of engines running on the node to channels. // Each channel should be taken by at most a single engine. type ChannelSubscriptionManager struct { - mu sync.RWMutex - engines map[channels.Channel]network.MessageProcessor - mw network.Middleware + mu sync.RWMutex + engines map[channels.Channel]network.MessageProcessor + networkUnderlay network.Underlay // the Underlay interface of the network layer } -func NewChannelSubscriptionManager(mw network.Middleware) *ChannelSubscriptionManager { +// NewChannelSubscriptionManager creates a new subscription manager. +// Args: +// - networkUnderlay: the Underlay interface of the network layer. +// Returns: +// - a new subscription manager. +func NewChannelSubscriptionManager(underlay network.Underlay) *ChannelSubscriptionManager { return &ChannelSubscriptionManager{ - engines: make(map[channels.Channel]network.MessageProcessor), - mw: mw, + engines: make(map[channels.Channel]network.MessageProcessor), + networkUnderlay: underlay, } } @@ -34,8 +39,9 @@ func (sm *ChannelSubscriptionManager) Register(channel channels.Channel, engine return fmt.Errorf("subscriptionManager: channel already registered: %s", channel) } - // registers the channel with the middleware to let middleware start receiving messages - err := sm.mw.Subscribe(channel) + // registers the channel with the networkUnderlay to let networkUnderlay start receiving messages + // TODO: subscribe function should be replaced by a better abstraction of the network. + err := sm.networkUnderlay.Subscribe(channel) if err != nil { return fmt.Errorf("subscriptionManager: failed to subscribe to channel %s: %w", channel, err) } @@ -58,9 +64,9 @@ func (sm *ChannelSubscriptionManager) Unregister(channel channels.Channel) error return nil } - err := sm.mw.Unsubscribe(channel) + err := sm.networkUnderlay.Unsubscribe(channel) if err != nil { - return fmt.Errorf("subscriptionManager: failed to unregister from channel %s", channel) + return fmt.Errorf("subscriptionManager: failed to unregister from channel %s: %w", channel, err) } delete(sm.engines, channel) diff --git a/network/p2p/subscription/subscription_filter_test.go b/network/p2p/subscription/subscription_filter_test.go index 0c3d1d8b88c..da6429e3d7c 100644 --- a/network/p2p/subscription/subscription_filter_test.go +++ b/network/p2p/subscription/subscription_filter_test.go @@ -16,6 +16,7 @@ import ( "github.com/onflow/flow-go/module/irrecoverable" "github.com/onflow/flow-go/network/channels" "github.com/onflow/flow-go/network/internal/p2pfixtures" + "github.com/onflow/flow-go/network/message" "github.com/onflow/flow-go/network/p2p" "github.com/onflow/flow-go/network/p2p/subscription" p2ptest "github.com/onflow/flow-go/network/p2p/test" @@ -41,8 +42,8 @@ func TestFilterSubscribe(t *testing.T) { unstakedKey := unittest.NetworkingPrivKeyFixture() unstakedNode := p2pfixtures.CreateNode(t, unstakedKey, sporkId, zerolog.Nop(), ids) - require.NoError(t, node1.AddPeer(context.TODO(), *host.InfoFromHost(node2.Host()))) - require.NoError(t, node1.AddPeer(context.TODO(), *host.InfoFromHost(unstakedNode.Host()))) + require.NoError(t, node1.ConnectToPeer(context.TODO(), *host.InfoFromHost(node2.Host()))) + require.NoError(t, node1.ConnectToPeer(context.TODO(), *host.InfoFromHost(unstakedNode.Host()))) badTopic := channels.TopicFromChannel(channels.SyncCommittee, sporkId) @@ -67,7 +68,7 @@ func TestFilterSubscribe(t *testing.T) { // check that node1 and node2 don't accept unstakedNode as a peer require.Never(t, func() bool { for _, pid := range node1.ListPeers(badTopic.String()) { - if pid == unstakedNode.Host().ID() { + if pid == unstakedNode.ID() { return true } } @@ -78,16 +79,26 @@ func TestFilterSubscribe(t *testing.T) { wg.Add(2) testPublish := func(wg *sync.WaitGroup, from p2p.LibP2PNode, sub p2p.Subscription) { - data := []byte("hello") - err := from.Publish(context.TODO(), badTopic, data) + outgoingMessageScope, err := message.NewOutgoingScope( + ids.NodeIDs(), + channels.TopicFromChannel(channels.SyncCommittee, sporkId), + []byte("hello"), + unittest.NetworkCodec().Encode, + message.ProtocolTypePubSub) + require.NoError(t, err) + + err = from.Publish(context.TODO(), outgoingMessageScope) require.NoError(t, err) ctx, cancel := context.WithTimeout(context.Background(), time.Second) msg, err := sub.Next(ctx) cancel() require.NoError(t, err) - require.Equal(t, msg.Data, data) + + expectedReceivedData, err := outgoingMessageScope.Proto().Marshal() + require.NoError(t, err) + require.Equal(t, msg.Data, expectedReceivedData) ctx, cancel = context.WithTimeout(context.Background(), time.Second) _, err = unstakedSub.Next(ctx) @@ -122,8 +133,8 @@ func TestCanSubscribe(t *testing.T) { flow.IdentityList{identity}, p2pfixtures.WithSubscriptionFilter(subscriptionFilter(identity, flow.IdentityList{identity}))) - p2ptest.StartNode(t, signalerCtx, collectionNode, 100*time.Millisecond) - defer p2ptest.StopNode(t, collectionNode, cancel, 1*time.Second) + p2ptest.StartNode(t, signalerCtx, collectionNode) + defer p2ptest.StopNode(t, collectionNode, cancel) logger := unittest.Logger() topicValidator := flowpubsub.TopicValidator(logger, unittest.AllowAllPeerFilter()) diff --git a/network/p2p/test/fixtures.go b/network/p2p/test/fixtures.go index eabb7634fa8..88f3c7667cd 100644 --- a/network/p2p/test/fixtures.go +++ b/network/p2p/test/fixtures.go @@ -3,42 +3,67 @@ package p2ptest import ( "bufio" "context" - "crypto/rand" + crand "math/rand" + "sync" "testing" "time" dht "github.com/libp2p/go-libp2p-kad-dht" + pb "github.com/libp2p/go-libp2p-pubsub/pb" "github.com/libp2p/go-libp2p/core/connmgr" "github.com/libp2p/go-libp2p/core/host" "github.com/libp2p/go-libp2p/core/network" "github.com/libp2p/go-libp2p/core/peer" "github.com/libp2p/go-libp2p/core/protocol" "github.com/libp2p/go-libp2p/core/routing" - mh "github.com/multiformats/go-multihash" + discoveryBackoff "github.com/libp2p/go-libp2p/p2p/discovery/backoff" + "github.com/onflow/crypto" "github.com/rs/zerolog" "github.com/stretchr/testify/require" + "golang.org/x/exp/rand" - "github.com/onflow/flow-go/crypto" + "github.com/onflow/flow-go/config" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/module" "github.com/onflow/flow-go/module/irrecoverable" "github.com/onflow/flow-go/module/metrics" + flownet "github.com/onflow/flow-go/network" "github.com/onflow/flow-go/network/channels" "github.com/onflow/flow-go/network/internal/p2pfixtures" - "github.com/onflow/flow-go/network/internal/testutils" + "github.com/onflow/flow-go/network/message" "github.com/onflow/flow-go/network/p2p" + p2pbuilder "github.com/onflow/flow-go/network/p2p/builder" + p2pbuilderconfig "github.com/onflow/flow-go/network/p2p/builder/config" "github.com/onflow/flow-go/network/p2p/connection" p2pdht "github.com/onflow/flow-go/network/p2p/dht" - "github.com/onflow/flow-go/network/p2p/p2pbuilder" - inspectorbuilder "github.com/onflow/flow-go/network/p2p/p2pbuilder/inspector" - "github.com/onflow/flow-go/network/p2p/unicast" "github.com/onflow/flow-go/network/p2p/unicast/protocols" "github.com/onflow/flow-go/network/p2p/utils" validator "github.com/onflow/flow-go/network/validator/pubsub" "github.com/onflow/flow-go/utils/logging" + randutils "github.com/onflow/flow-go/utils/rand" "github.com/onflow/flow-go/utils/unittest" ) +const ( + // libp2pNodeStartupTimeout is the timeout for starting a libp2p node in tests. Note that the + // timeout has been selected to be large enough to allow for the node to start up on a CI even when + // the test is run in parallel with other tests. Hence, no further increase of the timeout is + // expected to be necessary. Any failure to start a node within this timeout is likely to be + // caused by a bug in the code. + libp2pNodeStartupTimeout = 10 * time.Second + // libp2pNodeStartupTimeout is the timeout for starting a libp2p node in tests. Note that the + // timeout has been selected to be large enough to allow for the node to start up on a CI even when + // the test is run in parallel with other tests. Hence, no further increase of the timeout is + // expected to be necessary. Any failure to start a node within this timeout is likely to be + // caused by a bug in the code. + libp2pNodeShutdownTimeout = 10 * time.Second + + // topicIDFixtureLen is the length of the topic ID fixture for testing. + topicIDFixtureLen = 10 + // messageIDFixtureLen is the length of the message ID fixture for testing. + messageIDFixtureLen = 10 +) + // NetworkingKeyFixtures is a test helper that generates a ECDSA flow key pair. func NetworkingKeyFixtures(t *testing.T) crypto.PrivateKey { seed := unittest.SeedFixture(48) @@ -53,66 +78,93 @@ func NodeFixture( t *testing.T, sporkID flow.Identifier, dhtPrefix string, + idProvider module.IdentityProvider, opts ...NodeFixtureParameterOption, ) (p2p.LibP2PNode, flow.Identity) { - logger := unittest.Logger().Level(zerolog.ErrorLevel) - - rpcInspectorSuite, err := inspectorbuilder.NewGossipSubInspectorBuilder( - logger, - sporkID, - inspectorbuilder.DefaultGossipSubRPCInspectorsConfig()). - Build() + defaultFlowConfig, err := config.DefaultConfig() require.NoError(t, err) + require.NotNil(t, idProvider) + connectionGater := NewConnectionGater(idProvider, func(p peer.ID) error { + return nil + }) + require.NotNil(t, connectionGater) parameters := &NodeFixtureParameters{ - HandlerFunc: func(network.Stream) {}, - Unicasts: nil, - Key: NetworkingKeyFixtures(t), - Address: unittest.DefaultAddress, - Logger: logger, - Role: flow.RoleCollection, - CreateStreamRetryDelay: unicast.DefaultRetryDelay, - Metrics: metrics.NewNoopCollector(), - ResourceManager: testutils.NewResourceManager(t), - GossipSubPeerScoreTracerInterval: 0, // disabled by default - GossipSubRPCInspector: rpcInspectorSuite, + NetworkingType: flownet.PrivateNetwork, + HandlerFunc: func(network.Stream) {}, + Unicasts: nil, + Key: NetworkingKeyFixtures(t), + Address: unittest.DefaultAddress, + Logger: unittest.Logger().Level(zerolog.WarnLevel), + Role: flow.RoleCollection, + IdProvider: idProvider, + MetricsCfg: &p2pbuilderconfig.MetricsConfig{ + HeroCacheFactory: metrics.NewNoopHeroCacheMetricsFactory(), + Metrics: metrics.NewNoopCollector(), + }, + ResourceManager: &network.NullResourceManager{}, + ConnGater: connectionGater, + PeerManagerConfig: PeerManagerConfigFixture(), // disabled by default + FlowConfig: defaultFlowConfig, } for _, opt := range opts { opt(parameters) } - identity := unittest.IdentityFixture( - unittest.WithNetworkingKey(parameters.Key.PublicKey()), + identity := unittest.IdentityFixture(unittest.WithNetworkingKey(parameters.Key.PublicKey()), unittest.WithAddress(parameters.Address), unittest.WithRole(parameters.Role)) - logger = parameters.Logger.With().Hex("node_id", logging.ID(identity.NodeID)).Logger() + logger := parameters.Logger.With().Hex("node_id", logging.ID(identity.NodeID)).Logger() - connManager, err := connection.NewConnManager(logger, parameters.Metrics, connection.DefaultConnManagerConfig()) + connManager, err := connection.NewConnManager(logger, parameters.MetricsCfg.Metrics, ¶meters.FlowConfig.NetworkConfig.ConnectionManager) require.NoError(t, err) builder := p2pbuilder.NewNodeBuilder( logger, - parameters.Metrics, + ¶meters.FlowConfig.NetworkConfig.GossipSub, + parameters.MetricsCfg, + parameters.NetworkingType, parameters.Address, parameters.Key, sporkID, - p2pbuilder.DefaultResourceManagerConfig()). + parameters.IdProvider, + ¶meters.FlowConfig.NetworkConfig.ResourceManager, + parameters.PeerManagerConfig, + &p2p.DisallowListCacheConfig{ + MaxSize: uint32(1000), + Metrics: metrics.NewNoopCollector(), + }, + &p2pbuilderconfig.UnicastConfig{ + Unicast: parameters.FlowConfig.NetworkConfig.Unicast, + RateLimiterDistributor: parameters.UnicastRateLimiterDistributor, + }). SetConnectionManager(connManager). - SetRoutingSystem(func(c context.Context, h host.Host) (routing.Routing, error) { - return p2pdht.NewDHT(c, h, + SetResourceManager(parameters.ResourceManager) + + if parameters.DhtOptions != nil && (parameters.Role != flow.RoleAccess && parameters.Role != flow.RoleExecution) { + require.Fail(t, "DHT should not be enabled for non-access and non-execution nodes") + } + + if parameters.Role == flow.RoleAccess || parameters.Role == flow.RoleExecution { + // Only access and execution nodes need to run DHT; + // Access nodes and execution nodes need DHT to run a blob service. + // Moreover, access nodes run a DHT to let un-staked (public) access nodes find each other on the public network. + builder.SetRoutingSystem(func(ctx context.Context, host host.Host) (routing.Routing, error) { + return p2pdht.NewDHT(ctx, + host, protocol.ID(protocols.FlowDHTProtocolIDPrefix+sporkID.String()+"/"+dhtPrefix), logger, - parameters.Metrics, - parameters.DhtOptions..., - ) - }). - SetCreateNode(p2pbuilder.DefaultCreateNodeFunc). - SetStreamCreationRetryInterval(parameters.CreateStreamRetryDelay). - SetResourceManager(parameters.ResourceManager). - SetGossipSubRpcInspectorSuite(parameters.GossipSubRPCInspector) + parameters.MetricsCfg.Metrics, + parameters.DhtOptions...) + }) + } + + if parameters.GossipSubRpcInspectorFactory != nil { + builder.OverrideDefaultRpcInspectorFactory(parameters.GossipSubRpcInspectorFactory) + } if parameters.ResourceManager != nil { builder.SetResourceManager(parameters.ResourceManager) @@ -123,33 +175,28 @@ func NodeFixture( } if parameters.PeerScoringEnabled { - builder.EnableGossipSubPeerScoring(parameters.IdProvider, parameters.PeerScoreConfig) - } - - if parameters.UpdateInterval != 0 { - require.NotNil(t, parameters.PeerProvider) - builder.SetPeerManagerOptions(parameters.ConnectionPruning, parameters.UpdateInterval) + builder.OverrideGossipSubScoringConfig(parameters.PeerScoringConfigOverride) } if parameters.GossipSubFactory != nil && parameters.GossipSubConfig != nil { - builder.SetGossipSubFactory(parameters.GossipSubFactory, parameters.GossipSubConfig) + builder.OverrideGossipSubFactory(parameters.GossipSubFactory, parameters.GossipSubConfig) } if parameters.ConnManager != nil { builder.SetConnectionManager(parameters.ConnManager) } - if parameters.PubSubTracer != nil { - builder.SetGossipSubTracer(parameters.PubSubTracer) + if parameters.ValidateQueueSize > 0 { + builder.OverrideDefaultValidateQueueSize(parameters.ValidateQueueSize) } - builder.SetGossipSubScoreTracerInterval(parameters.GossipSubPeerScoreTracerInterval) - n, err := builder.Build() require.NoError(t, err) - err = n.WithDefaultUnicastProtocol(parameters.HandlerFunc, parameters.Unicasts) - require.NoError(t, err) + if parameters.HandlerFunc != nil { + err = n.WithDefaultUnicastProtocol(parameters.HandlerFunc, parameters.Unicasts) + require.NoError(t, err) + } // get the actual IP and port that have been assigned by the subsystem ip, port, err := n.GetIPPort() @@ -163,56 +210,87 @@ func NodeFixture( return n, *identity } +// RegisterPeerProviders registers the peer provider for all the nodes in the input slice. +// All node ids are registered as the peers provider for all the nodes. +// This means that every node will be connected to every other node by the peer manager. +// This is useful for suppressing the "peer provider not set" verbose warning logs in tests scenarios where +// it is desirable to have all nodes connected to each other. +// Args: +// - t: testing.T- the test object; not used, but included in the signature to defensively prevent misuse of the test utility in production. +// - nodes: nodes to register the peer provider for, each node will be connected to all other nodes. +func RegisterPeerProviders(_ *testing.T, nodes []p2p.LibP2PNode) { + ids := peer.IDSlice{} + for _, node := range nodes { + ids = append(ids, node.ID()) + } + for _, node := range nodes { + node.WithPeersProvider(func() peer.IDSlice { + return ids + }) + } +} + type NodeFixtureParameterOption func(*NodeFixtureParameters) type NodeFixtureParameters struct { - HandlerFunc network.StreamHandler - Unicasts []protocols.ProtocolName - Key crypto.PrivateKey - Address string - DhtOptions []dht.Option - Role flow.Role - Logger zerolog.Logger - PeerScoringEnabled bool - IdProvider module.IdentityProvider - PeerScoreConfig *p2p.PeerScoringConfig - ConnectionPruning bool // peer manager parameter - UpdateInterval time.Duration // peer manager parameter - PeerProvider p2p.PeersProvider // peer manager parameter - ConnGater connmgr.ConnectionGater - ConnManager connmgr.ConnManager - GossipSubFactory p2p.GossipSubFactoryFunc - GossipSubConfig p2p.GossipSubAdapterConfigFunc - Metrics module.LibP2PMetrics - ResourceManager network.ResourceManager - PubSubTracer p2p.PubSubTracer - GossipSubPeerScoreTracerInterval time.Duration // intervals at which the peer score is updated and logged. - CreateStreamRetryDelay time.Duration - GossipSubRPCInspector p2p.GossipSubInspectorSuite -} - -func WithGossipSubRpcInspectorSuite(inspectorSuite p2p.GossipSubInspectorSuite) NodeFixtureParameterOption { + HandlerFunc network.StreamHandler + NetworkingType flownet.NetworkingType + Unicasts []protocols.ProtocolName + Key crypto.PrivateKey + Address string + DhtOptions []dht.Option + Role flow.Role + Logger zerolog.Logger + PeerScoringEnabled bool + IdProvider module.IdentityProvider + PeerScoringConfigOverride *p2p.PeerScoringConfigOverride + PeerManagerConfig *p2pbuilderconfig.PeerManagerConfig + PeerProvider p2p.PeersProvider // peer manager parameter + ConnGater p2p.ConnectionGater + ConnManager connmgr.ConnManager + GossipSubFactory p2p.GossipSubFactoryFunc + GossipSubConfig p2p.GossipSubAdapterConfigFunc + MetricsCfg *p2pbuilderconfig.MetricsConfig + ResourceManager network.ResourceManager + GossipSubRpcInspectorFactory p2p.GossipSubRpcInspectorFactoryFunc + FlowConfig *config.FlowConfig + UnicastRateLimiterDistributor p2p.UnicastRateLimiterDistributor + ValidateQueueSize int +} + +func WithUnicastRateLimitDistributor(distributor p2p.UnicastRateLimiterDistributor) NodeFixtureParameterOption { return func(p *NodeFixtureParameters) { - p.GossipSubRPCInspector = inspectorSuite + p.UnicastRateLimiterDistributor = distributor } } -func WithCreateStreamRetryDelay(delay time.Duration) NodeFixtureParameterOption { +func OverrideGossipSubRpcInspectorFactory(factory p2p.GossipSubRpcInspectorFactoryFunc) NodeFixtureParameterOption { return func(p *NodeFixtureParameters) { - p.CreateStreamRetryDelay = delay + p.GossipSubRpcInspectorFactory = factory } } -func WithPeerScoringEnabled(idProvider module.IdentityProvider) NodeFixtureParameterOption { +func OverrideFlowConfig(cfg *config.FlowConfig) NodeFixtureParameterOption { return func(p *NodeFixtureParameters) { - p.PeerScoringEnabled = true - p.IdProvider = idProvider + p.FlowConfig = cfg } } -func WithGossipSubTracer(tracer p2p.PubSubTracer) NodeFixtureParameterOption { +// EnablePeerScoringWithOverride enables peer scoring for the GossipSub pubsub system with the given override. +// Any existing peer scoring config attribute that is set in the override will override the default peer scoring config. +// Anything that is left to nil or zero value in the override will be ignored and the default value will be used. +// Note: it is not recommended to override the default peer scoring config in production unless you know what you are doing. +// Default Use Tip: use p2p.PeerScoringConfigNoOverride as the argument to this function to enable peer scoring without any override. +// Args: +// - PeerScoringConfigOverride: override for the peer scoring config- Recommended to use p2p.PeerScoringConfigNoOverride for production or when +// you don't want to override the default peer scoring config. +// +// Returns: +// - NodeFixtureParameterOption: a function that can be passed to the NodeFixture function to enable peer scoring. +func EnablePeerScoringWithOverride(override *p2p.PeerScoringConfigOverride) NodeFixtureParameterOption { return func(p *NodeFixtureParameters) { - p.PubSubTracer = tracer + p.PeerScoringEnabled = true + p.PeerScoringConfigOverride = override } } @@ -222,10 +300,9 @@ func WithDefaultStreamHandler(handler network.StreamHandler) NodeFixtureParamete } } -func WithPeerManagerEnabled(connectionPruning bool, updateInterval time.Duration, peerProvider p2p.PeersProvider) NodeFixtureParameterOption { +func WithPeerManagerEnabled(cfg *p2pbuilderconfig.PeerManagerConfig, peerProvider p2p.PeersProvider) NodeFixtureParameterOption { return func(p *NodeFixtureParameters) { - p.ConnectionPruning = connectionPruning - p.UpdateInterval = updateInterval + p.PeerManagerConfig = cfg p.PeerProvider = peerProvider } } @@ -254,7 +331,7 @@ func WithDHTOptions(opts ...dht.Option) NodeFixtureParameterOption { } } -func WithConnectionGater(connGater connmgr.ConnectionGater) NodeFixtureParameterOption { +func WithConnectionGater(connGater p2p.ConnectionGater) NodeFixtureParameterOption { return func(p *NodeFixtureParameters) { p.ConnGater = connGater } @@ -272,9 +349,9 @@ func WithRole(role flow.Role) NodeFixtureParameterOption { } } -func WithPeerScoreParamsOption(cfg *p2p.PeerScoringConfig) NodeFixtureParameterOption { +func WithPeerScoreParamsOption(cfg *p2p.PeerScoringConfigOverride) NodeFixtureParameterOption { return func(p *NodeFixtureParameters) { - p.PeerScoreConfig = cfg + p.PeerScoringConfigOverride = cfg } } @@ -286,35 +363,85 @@ func WithLogger(logger zerolog.Logger) NodeFixtureParameterOption { func WithMetricsCollector(metrics module.NetworkMetrics) NodeFixtureParameterOption { return func(p *NodeFixtureParameters) { - p.Metrics = metrics + p.MetricsCfg.Metrics = metrics } } -func WithPeerScoreTracerInterval(interval time.Duration) NodeFixtureParameterOption { +// WithDefaultResourceManager sets the resource manager to nil, which will cause the node to use the default resource manager. +// Otherwise, it uses the resource manager provided by the test (the infinite resource manager). +func WithDefaultResourceManager() NodeFixtureParameterOption { return func(p *NodeFixtureParameters) { - p.GossipSubPeerScoreTracerInterval = interval + p.ResourceManager = nil } } -// WithDefaultResourceManager sets the resource manager to nil, which will cause the node to use the default resource manager. +// WithResourceManager sets the resource manager to the provided resource manager. // Otherwise, it uses the resource manager provided by the test (the infinite resource manager). -func WithDefaultResourceManager() NodeFixtureParameterOption { +func WithResourceManager(resourceManager network.ResourceManager) NodeFixtureParameterOption { return func(p *NodeFixtureParameters) { - p.ResourceManager = nil + p.ResourceManager = resourceManager + } +} + +func WithUnicastHandlerFunc(handler network.StreamHandler) NodeFixtureParameterOption { + return func(p *NodeFixtureParameters) { + p.HandlerFunc = handler + } +} + +// WithValidateQueueSize sets the size of the validation queue for the node. +// Use this to set a higher value to prevent message loss during tests +func WithValidateQueueSize(size int) NodeFixtureParameterOption { + return func(p *NodeFixtureParameters) { + p.ValidateQueueSize = size + } +} + +// PeerManagerConfigFixture is a test fixture that sets the default config for the peer manager. +func PeerManagerConfigFixture(opts ...func(*p2pbuilderconfig.PeerManagerConfig)) *p2pbuilderconfig.PeerManagerConfig { + cfg := &p2pbuilderconfig.PeerManagerConfig{ + ConnectionPruning: true, + UpdateInterval: 1 * time.Second, + ConnectorFactory: connection.DefaultLibp2pBackoffConnectorFactory(), + } + for _, opt := range opts { + opt(cfg) + } + return cfg +} + +// WithZeroJitterAndZeroBackoff is a test fixture that sets the default config for the peer manager. +// It uses a backoff connector with zero jitter and zero backoff. +func WithZeroJitterAndZeroBackoff(t *testing.T) func(*p2pbuilderconfig.PeerManagerConfig) { + return func(cfg *p2pbuilderconfig.PeerManagerConfig) { + cfg.ConnectorFactory = func(host host.Host) (p2p.Connector, error) { + cacheSize := 100 + dialTimeout := time.Minute * 2 + backoff := discoveryBackoff.NewExponentialBackoff(1*time.Second, 1*time.Hour, func(_, _, _ time.Duration, _ *crand.Rand) time.Duration { + return 0 // no jitter + }, time.Second, 1, 0, crand.NewSource(crand.Int63())) + backoffConnector, err := discoveryBackoff.NewBackoffConnector(host, cacheSize, dialTimeout, backoff) + require.NoError(t, err) + return backoffConnector, nil + } } } // NodesFixture is a test fixture that creates a number of libp2p nodes with the given callback function for stream handling. // It returns the nodes and their identities. -func NodesFixture(t *testing.T, sporkID flow.Identifier, dhtPrefix string, count int, opts ...NodeFixtureParameterOption) ([]p2p.LibP2PNode, - flow.IdentityList) { +func NodesFixture(t *testing.T, + sporkID flow.Identifier, + dhtPrefix string, + count int, + idProvider module.IdentityProvider, + opts ...NodeFixtureParameterOption) ([]p2p.LibP2PNode, flow.IdentityList) { var nodes []p2p.LibP2PNode // creating nodes var identities flow.IdentityList for i := 0; i < count; i++ { // create a node on localhost with a random port assigned by the OS - node, identity := NodeFixture(t, sporkID, dhtPrefix, opts...) + node, identity := NodeFixture(t, sporkID, dhtPrefix, idProvider, opts...) nodes = append(nodes, node) identities = append(identities, &identity) } @@ -324,7 +451,7 @@ func NodesFixture(t *testing.T, sporkID flow.Identifier, dhtPrefix string, count // StartNodes start all nodes in the input slice using the provided context, timing out if nodes are // not all Ready() before duration expires -func StartNodes(t *testing.T, ctx irrecoverable.SignalerContext, nodes []p2p.LibP2PNode, timeout time.Duration) { +func StartNodes(t *testing.T, ctx irrecoverable.SignalerContext, nodes []p2p.LibP2PNode) { rdas := make([]module.ReadyDoneAware, 0, len(nodes)) for _, node := range nodes { node.Start(ctx) @@ -336,30 +463,48 @@ func StartNodes(t *testing.T, ctx irrecoverable.SignalerContext, nodes []p2p.Lib rdas = append(rdas, peerManager) } } - unittest.RequireComponentsReadyBefore(t, timeout, rdas...) + for _, r := range rdas { + // Any failure to start a node within this timeout is likely to be caused by a bug in the code. + unittest.RequireComponentsReadyBefore(t, libp2pNodeStartupTimeout, r) + } } // StartNode start a single node using the provided context, timing out if nodes are not all Ready() -// before duration expires -func StartNode(t *testing.T, ctx irrecoverable.SignalerContext, node p2p.LibP2PNode, timeout time.Duration) { +// before duration expires, (i.e., 2 seconds). +// Args: +// - t: testing.T- the test object. +// - ctx: context to use. +// - node: node to start. +func StartNode(t *testing.T, ctx irrecoverable.SignalerContext, node p2p.LibP2PNode) { node.Start(ctx) - unittest.RequireComponentsReadyBefore(t, timeout, node) + // Any failure to start a node within this timeout is likely to be caused by a bug in the code. + unittest.RequireComponentsReadyBefore(t, libp2pNodeStartupTimeout, node) } // StopNodes stops all nodes in the input slice using the provided cancel func, timing out if nodes are -// not all Done() before duration expires -func StopNodes(t *testing.T, nodes []p2p.LibP2PNode, cancel context.CancelFunc, timeout time.Duration) { +// not all Done() before duration expires (i.e., 5 seconds). +// Args: +// - t: testing.T- the test object. +// - nodes: nodes to stop. +// - cancel: cancel func, the function first cancels the context and then waits for the nodes to be done. +func StopNodes(t *testing.T, nodes []p2p.LibP2PNode, cancel context.CancelFunc) { cancel() for _, node := range nodes { - unittest.RequireComponentsDoneBefore(t, timeout, node) + // Any failure to start a node within this timeout is likely to be caused by a bug in the code. + unittest.RequireComponentsDoneBefore(t, libp2pNodeShutdownTimeout, node) } } // StopNode stops a single node using the provided cancel func, timing out if nodes are not all Done() -// before duration expires -func StopNode(t *testing.T, node p2p.LibP2PNode, cancel context.CancelFunc, timeout time.Duration) { +// before duration expires, (i.e., 2 seconds). +// Args: +// - t: testing.T- the test object. +// - node: node to stop. +// - cancel: cancel func, the function first cancels the context and then waits for the nodes to be done. +func StopNode(t *testing.T, node p2p.LibP2PNode, cancel context.CancelFunc) { cancel() - unittest.RequireComponentsDoneBefore(t, timeout, node) + // Any failure to start a node within this timeout is likely to be caused by a bug in the code. + unittest.RequireComponentsDoneBefore(t, libp2pNodeShutdownTimeout, node) } // StreamHandlerFixture returns a stream handler that writes the received message to the given channel. @@ -381,27 +526,78 @@ func LetNodesDiscoverEachOther(t *testing.T, ctx context.Context, nodes []p2p.Li if node == other { continue } - otherPInfo, err := utils.PeerAddressInfo(*ids[i]) + otherPInfo, err := utils.PeerAddressInfo(ids[i].IdentitySkeleton) require.NoError(t, err) - require.NoError(t, node.AddPeer(ctx, otherPInfo)) + require.NoError(t, node.ConnectToPeer(ctx, otherPInfo)) } } } -// EnsureConnected ensures that the given nodes are connected to each other. +// TryConnectionAndEnsureConnected tries connecting nodes to each other and ensures that the given nodes are connected to each other. // It fails the test if any of the nodes is not connected to any other node. -func EnsureConnected(t *testing.T, ctx context.Context, nodes []p2p.LibP2PNode) { +func TryConnectionAndEnsureConnected(t *testing.T, ctx context.Context, nodes []p2p.LibP2PNode) { for _, node := range nodes { for _, other := range nodes { if node == other { continue } - require.NoError(t, node.Host().Connect(ctx, other.Host().Peerstore().PeerInfo(other.Host().ID()))) - require.Equal(t, node.Host().Network().Connectedness(other.Host().ID()), network.Connected) + require.NoError(t, node.Host().Connect(ctx, other.Host().Peerstore().PeerInfo(other.ID()))) + // the other node should be connected to this node + require.Equal(t, node.Host().Network().Connectedness(other.ID()), network.Connected) + // at least one connection should be established + require.True(t, len(node.Host().Network().ConnsToPeer(other.ID())) > 0) } } } +// RequireConnectedEventually ensures eventually that the given nodes are already connected to each other. +// It fails the test if any of the nodes is not connected to any other node. +// Args: +// - nodes: the nodes to check +// - tick: the tick duration +// - timeout: the timeout duration +func RequireConnectedEventually(t *testing.T, nodes []p2p.LibP2PNode, tick time.Duration, timeout time.Duration) { + require.Eventually(t, func() bool { + for _, node := range nodes { + for _, other := range nodes { + if node == other { + continue + } + if node.Host().Network().Connectedness(other.ID()) != network.Connected { + return false + } + if len(node.Host().Network().ConnsToPeer(other.ID())) == 0 { + return false + } + } + } + return true + }, timeout, tick) +} + +// RequireEventuallyNotConnected ensures eventually that the given groups of nodes are not connected to each other. +// It fails the test if any of the nodes from groupA is connected to any of the nodes from groupB. +// Args: +// - groupA: the first group of nodes +// - groupB: the second group of nodes +// - tick: the tick duration +// - timeout: the timeout duration +func RequireEventuallyNotConnected(t *testing.T, groupA []p2p.LibP2PNode, groupB []p2p.LibP2PNode, tick time.Duration, timeout time.Duration) { + require.Eventually(t, func() bool { + for _, node := range groupA { + for _, other := range groupB { + if node.Host().Network().Connectedness(other.ID()) == network.Connected { + return false + } + if len(node.Host().Network().ConnsToPeer(other.ID())) > 0 { + return false + } + } + } + return true + }, timeout, tick) +} + // EnsureStreamCreationInBothDirections ensure that between each pair of nodes in the given list, a stream is created in both directions. func EnsureStreamCreationInBothDirections(t *testing.T, ctx context.Context, nodes []p2p.LibP2PNode) { for _, this := range nodes { @@ -410,25 +606,32 @@ func EnsureStreamCreationInBothDirections(t *testing.T, ctx context.Context, nod continue } // stream creation should pass without error - s, err := this.CreateStream(ctx, other.Host().ID()) + err := this.OpenAndWriteOnStream(ctx, other.ID(), t.Name(), func(stream network.Stream) error { + // do nothing + require.NotNil(t, stream) + return nil + }) require.NoError(t, err) - require.NotNil(t, s) + } } } // EnsurePubsubMessageExchange ensures that the given connected nodes exchange the given message on the given channel through pubsub. -// Note: EnsureConnected() must be called to connect all nodes before calling this function. -func EnsurePubsubMessageExchange(t *testing.T, ctx context.Context, nodes []p2p.LibP2PNode, messageFactory func() (interface{}, channels.Topic)) { - _, topic := messageFactory() - +// Args: +// - nodes: the nodes to exchange messages +// - ctx: the context- the test will fail if the context expires. +// - topic: the topic to exchange messages on +// - count: the number of messages to exchange from each node. +// - messageFactory: a function that creates a unique message to be published by the node. +// The function should return a different message each time it is called. +// +// Note-1: this function assumes a timeout of 5 seconds for each message to be received. +// Note-2: TryConnectionAndEnsureConnected() must be called to connect all nodes before calling this function. +func EnsurePubsubMessageExchange(t *testing.T, ctx context.Context, nodes []p2p.LibP2PNode, topic channels.Topic, count int, messageFactory func() interface{}) { subs := make([]p2p.Subscription, len(nodes)) for i, node := range nodes { - ps, err := node.Subscribe( - topic, - validator.TopicValidator( - unittest.Logger(), - unittest.AllowAllPeerFilter())) + ps, err := node.Subscribe(topic, validator.TopicValidator(unittest.Logger(), unittest.AllowAllPeerFilter())) require.NoError(t, err) subs[i] = ps } @@ -436,33 +639,74 @@ func EnsurePubsubMessageExchange(t *testing.T, ctx context.Context, nodes []p2p. // let subscriptions propagate time.Sleep(1 * time.Second) - channel, ok := channels.ChannelFromTopic(topic) - require.True(t, ok) - for _, node := range nodes { - // creates a unique message to be published by the node - msg, _ := messageFactory() - data := p2pfixtures.MustEncodeEvent(t, msg, channel) - require.NoError(t, node.Publish(ctx, topic, data)) + for i := 0; i < count; i++ { + // creates a unique message to be published by the node + payload := messageFactory() + outgoingMessageScope, err := message.NewOutgoingScope(flow.IdentifierList{unittest.IdentifierFixture()}, + topic, + payload, + unittest.NetworkCodec().Encode, + message.ProtocolTypePubSub) + require.NoError(t, err) + require.NoError(t, node.Publish(ctx, outgoingMessageScope)) - // wait for the message to be received by all nodes - ctx, cancel := context.WithTimeout(ctx, 5*time.Second) - p2pfixtures.SubsMustReceiveMessage(t, ctx, data, subs) - cancel() + // wait for the message to be received by all nodes + ctx, cancel := context.WithTimeout(ctx, 5*time.Second) + expectedReceivedData, err := outgoingMessageScope.Proto().Marshal() + require.NoError(t, err) + p2pfixtures.SubsMustReceiveMessage(t, ctx, expectedReceivedData, subs) + cancel() + } } } -// PeerIdFixture returns a random peer ID for testing. -// peer ID is the identifier of a node on the libp2p network. -func PeerIdFixture(t *testing.T) peer.ID { - buf := make([]byte, 16) - n, err := rand.Read(buf) +// EnsurePubsubMessageExchangeFromNode ensures that the given node exchanges the given message on the given channel through pubsub with the other nodes. +// Args: +// - node: the node to exchange messages +// +// - ctx: the context- the test will fail if the context expires. +// - sender: the node that sends the message to the other node. +// - receiverNode: the node that receives the message from the other node. +// - receiverIdentifier: the identifier of the receiver node. +// - topic: the topic to exchange messages on. +// - count: the number of messages to exchange from `sender` to `receiver`. +// - messageFactory: a function that creates a unique message to be published by the node. +func EnsurePubsubMessageExchangeFromNode(t *testing.T, + ctx context.Context, + sender p2p.LibP2PNode, + receiverNode p2p.LibP2PNode, + receiverIdentifier flow.Identifier, + topic channels.Topic, + count int, + messageFactory func() interface{}) { + _, err := sender.Subscribe(topic, validator.TopicValidator(unittest.Logger(), unittest.AllowAllPeerFilter())) require.NoError(t, err) - require.Equal(t, 16, n) - h, err := mh.Sum(buf, mh.SHA2_256, -1) + + toSub, err := receiverNode.Subscribe(topic, validator.TopicValidator(unittest.Logger(), unittest.AllowAllPeerFilter())) require.NoError(t, err) - return peer.ID(h) + // let subscriptions propagate + time.Sleep(1 * time.Second) + + for i := 0; i < count; i++ { + // creates a unique message to be published by the node + payload := messageFactory() + outgoingMessageScope, err := message.NewOutgoingScope(flow.IdentifierList{receiverIdentifier}, + topic, + payload, + unittest.NetworkCodec().Encode, + message.ProtocolTypePubSub) + require.NoError(t, err) + require.NoError(t, sender.Publish(ctx, outgoingMessageScope)) + + // wait for the message to be received by all nodes + ctx, cancel := context.WithTimeout(ctx, 5*time.Second) + expectedReceivedData, err := outgoingMessageScope.Proto().Marshal() + require.NoError(t, err) + p2pfixtures.SubsMustReceiveMessage(t, ctx, expectedReceivedData, []p2p.Subscription{toSub}) + cancel() + } } // EnsureNotConnectedBetweenGroups ensures no connection exists between the given groups of nodes. @@ -474,13 +718,23 @@ func EnsureNotConnectedBetweenGroups(t *testing.T, ctx context.Context, groupA [ } // EnsureNoPubsubMessageExchange ensures that the no pubsub message is exchanged "from" the given nodes "to" the given nodes. -func EnsureNoPubsubMessageExchange(t *testing.T, ctx context.Context, from []p2p.LibP2PNode, to []p2p.LibP2PNode, messageFactory func() (interface{}, channels.Topic)) { - _, topic := messageFactory() - +// Args: +// - from: the nodes that send messages to the other group but their message must not be received by the other group. +// +// - to: the nodes that are the target of the messages sent by the other group ("from") but must not receive any message from them. +// - topic: the topic to exchange messages on. +// - count: the number of messages to exchange from each node. +// - messageFactory: a function that creates a unique message to be published by the node. +func EnsureNoPubsubMessageExchange(t *testing.T, + ctx context.Context, + from []p2p.LibP2PNode, + to []p2p.LibP2PNode, + toIdentifiers flow.IdentifierList, + topic channels.Topic, + count int, + messageFactory func() interface{}) { subs := make([]p2p.Subscription, len(to)) - tv := validator.TopicValidator( - unittest.Logger(), - unittest.AllowAllPeerFilter()) + tv := validator.TopicValidator(unittest.Logger(), unittest.AllowAllPeerFilter()) var err error for _, node := range from { _, err = node.Subscribe(topic, tv) @@ -496,27 +750,56 @@ func EnsureNoPubsubMessageExchange(t *testing.T, ctx context.Context, from []p2p // let subscriptions propagate time.Sleep(1 * time.Second) + wg := &sync.WaitGroup{} for _, node := range from { - // creates a unique message to be published by the node. - msg, _ := messageFactory() - channel, ok := channels.ChannelFromTopic(topic) - require.True(t, ok) - data := p2pfixtures.MustEncodeEvent(t, msg, channel) - - // ensure the message is NOT received by any of the nodes. - require.NoError(t, node.Publish(ctx, topic, data)) - ctx, cancel := context.WithTimeout(ctx, 5*time.Second) - p2pfixtures.SubsMustNeverReceiveAnyMessage(t, ctx, subs) - cancel() + node := node // capture range variable + for i := 0; i < count; i++ { + wg.Add(1) + go func() { + // creates a unique message to be published by the node. + + payload := messageFactory() + outgoingMessageScope, err := message.NewOutgoingScope(toIdentifiers, topic, payload, unittest.NetworkCodec().Encode, message.ProtocolTypePubSub) + require.NoError(t, err) + require.NoError(t, node.Publish(ctx, outgoingMessageScope)) + + ctx, cancel := context.WithTimeout(ctx, 5*time.Second) + p2pfixtures.SubsMustNeverReceiveAnyMessage(t, ctx, subs) + cancel() + wg.Done() + }() + } } + + // we wait for 5 seconds at most for the messages to be exchanged, hence we wait for a total of 6 seconds here to ensure + // that the goroutines are done in a timely manner. + unittest.RequireReturnsBefore(t, wg.Wait, 6*time.Second, "timed out waiting for messages to be exchanged") } // EnsureNoPubsubExchangeBetweenGroups ensures that no pubsub message is exchanged between the given groups of nodes. -func EnsureNoPubsubExchangeBetweenGroups(t *testing.T, ctx context.Context, groupA []p2p.LibP2PNode, groupB []p2p.LibP2PNode, messageFactory func() (interface{}, channels.Topic)) { +// Args: +// - t: *testing.T instance +// - ctx: context.Context instance +// - groupANodes: first group of nodes- no message should be exchanged from any node of this group to the other group. +// - groupAIdentifiers: identifiers of the nodes in the first group. +// - groupBNodes: second group of nodes- no message should be exchanged from any node of this group to the other group. +// - groupBIdentifiers: identifiers of the nodes in the second group. +// - topic: pubsub topic- no message should be exchanged on this topic. +// - count: number of messages to be exchanged- no message should be exchanged. +// - messageFactory: function to create a unique message to be published by the node. +func EnsureNoPubsubExchangeBetweenGroups(t *testing.T, + ctx context.Context, + groupANodes []p2p.LibP2PNode, + groupAIdentifiers flow.IdentifierList, + groupBNodes []p2p.LibP2PNode, + groupBIdentifiers flow.IdentifierList, + topic channels.Topic, + count int, + messageFactory func() interface{}) { // ensure no message exchange from group A to group B - EnsureNoPubsubMessageExchange(t, ctx, groupA, groupB, messageFactory) + EnsureNoPubsubMessageExchange(t, ctx, groupANodes, groupBNodes, groupBIdentifiers, topic, count, messageFactory) // ensure no message exchange from group B to group A - EnsureNoPubsubMessageExchange(t, ctx, groupB, groupA, messageFactory) + EnsureNoPubsubMessageExchange(t, ctx, groupBNodes, groupANodes, groupAIdentifiers, topic, count, messageFactory) } // PeerIdSliceFixture returns a slice of random peer IDs for testing. @@ -529,7 +812,263 @@ func EnsureNoPubsubExchangeBetweenGroups(t *testing.T, ctx context.Context, grou func PeerIdSliceFixture(t *testing.T, n int) peer.IDSlice { ids := make([]peer.ID, n) for i := 0; i < n; i++ { - ids[i] = PeerIdFixture(t) + ids[i] = unittest.PeerIdFixture(t) } return ids } + +// NewConnectionGater creates a new connection gater for testing with given allow listing filter. +func NewConnectionGater(idProvider module.IdentityProvider, allowListFilter p2p.PeerFilter) p2p.ConnectionGater { + filters := []p2p.PeerFilter{allowListFilter} + return connection.NewConnGater(unittest.Logger(), idProvider, connection.WithOnInterceptPeerDialFilters(filters), connection.WithOnInterceptSecuredFilters(filters)) +} + +// GossipSubRpcFixtures returns a slice of random message IDs for testing. +// Args: +// - t: *testing.T instance +// - count: number of message IDs to generate +// Returns: +// - []string: slice of message IDs. +// Note: evey other parameters that are not explicitly set are set to 10. This function suites applications that need to generate a large number of RPC messages with +// filled random data. For a better control over the generated data, use GossipSubRpcFixture. +func GossipSubRpcFixtures(t *testing.T, count int) []*pb.RPC { + c := 10 + rpcs := make([]*pb.RPC, 0) + for i := 0; i < count; i++ { + rpcs = append(rpcs, + GossipSubRpcFixture(t, + c, + WithPrune(c, GossipSubTopicIdFixture()), + WithGraft(c, GossipSubTopicIdFixture()), + WithIHave(c, c, GossipSubTopicIdFixture()), + WithIWant(c, c))) + } + return rpcs +} + +// GossipSubRpcFixture returns a random GossipSub RPC message. An RPC message is the GossipSub-level message that is exchanged between nodes. +// It contains individual messages, subscriptions, and control messages. +// Args: +// - t: *testing.T instance +// - msgCnt: number of messages to generate +// - opts: options to customize control messages (not having an option means no control message). +// Returns: +// - *pb.RPC: a random GossipSub RPC message +// Note: the message is not signed. +func GossipSubRpcFixture(t *testing.T, msgCnt int, opts ...GossipSubCtrlOption) *pb.RPC { + rand.Seed(uint64(time.Now().UnixNano())) + + // creates a random number of Subscriptions + numSubscriptions := 10 + topicIdSize := 10 + subscriptions := make([]*pb.RPC_SubOpts, numSubscriptions) + for i := 0; i < numSubscriptions; i++ { + subscribe := rand.Intn(2) == 1 + topicID, err := randutils.GenerateRandomString(topicIdSize) + require.NoError(t, err) + subscriptions[i] = &pb.RPC_SubOpts{ + Subscribe: &subscribe, + Topicid: &topicID, + } + } + + // generates random messages + messages := make([]*pb.Message, msgCnt) + for i := 0; i < msgCnt; i++ { + messages[i] = GossipSubMessageFixture(t) + } + + // Create a Control Message + controlMessages := GossipSubCtrlFixture(opts...) + + // Create the RPC + rpc := &pb.RPC{ + Subscriptions: subscriptions, + Publish: messages, + Control: controlMessages, + } + + return rpc +} + +type GossipSubCtrlOption func(*pb.ControlMessage) + +// GossipSubCtrlFixture returns a ControlMessage with the given options. +func GossipSubCtrlFixture(opts ...GossipSubCtrlOption) *pb.ControlMessage { + msg := &pb.ControlMessage{} + for _, opt := range opts { + opt(msg) + } + return msg +} + +// WithIHave adds iHave control messages of the given size and number to the control message. +func WithIHave(msgCount, msgIDCount int, topicId string) GossipSubCtrlOption { + return func(msg *pb.ControlMessage) { + iHaves := make([]*pb.ControlIHave, msgCount) + for i := 0; i < msgCount; i++ { + iHaves[i] = &pb.ControlIHave{ + TopicID: &topicId, + MessageIDs: GossipSubMessageIdsFixture(msgIDCount), + } + } + msg.Ihave = iHaves + } +} + +// WithIHaveMessageIDs adds iHave control messages with the given message IDs to the control message. +func WithIHaveMessageIDs(msgIDs []string, topicId string) GossipSubCtrlOption { + return func(msg *pb.ControlMessage) { + msg.Ihave = []*pb.ControlIHave{ + { + TopicID: &topicId, + MessageIDs: msgIDs, + }, + } + } +} + +// WithIWant adds iWant control messages of the given size and number to the control message. +// The message IDs are generated randomly. +// Args: +// +// msgCount: number of iWant messages to add. +// msgIdsPerIWant: number of message IDs to add to each iWant message. +// +// Returns: +// A GossipSubCtrlOption that adds iWant messages to the control message. +// Example: WithIWant(2, 3) will add 2 iWant messages, each with 3 message IDs. +func WithIWant(iWantCount int, msgIdsPerIWant int) GossipSubCtrlOption { + return func(msg *pb.ControlMessage) { + iWants := make([]*pb.ControlIWant, iWantCount) + for i := 0; i < iWantCount; i++ { + iWants[i] = &pb.ControlIWant{ + MessageIDs: GossipSubMessageIdsFixture(msgIdsPerIWant), + } + } + msg.Iwant = iWants + } +} + +// WithGraft adds GRAFT control messages with given topicID to the control message. +func WithGraft(msgCount int, topicId string) GossipSubCtrlOption { + return func(msg *pb.ControlMessage) { + grafts := make([]*pb.ControlGraft, msgCount) + for i := 0; i < msgCount; i++ { + grafts[i] = &pb.ControlGraft{ + TopicID: &topicId, + } + } + msg.Graft = grafts + } +} + +// WithGrafts adds a GRAFT control message with each given topicID to the control message. +func WithGrafts(topicIds ...string) GossipSubCtrlOption { + return func(msg *pb.ControlMessage) { + grafts := make([]*pb.ControlGraft, len(topicIds)) + for i, topic := range topicIds { + grafts[i] = &pb.ControlGraft{ + TopicID: &topic, + } + } + msg.Graft = grafts + } +} + +// WithPrune adds PRUNE control messages with given topicID to the control message. +func WithPrune(msgCount int, topicId string) GossipSubCtrlOption { + return func(msg *pb.ControlMessage) { + prunes := make([]*pb.ControlPrune, msgCount) + for i := 0; i < msgCount; i++ { + prunes[i] = &pb.ControlPrune{ + TopicID: &topicId, + } + } + msg.Prune = prunes + } +} + +// WithPrunes adds a PRUNE control message with each given topicID to the control message. +func WithPrunes(topicIds ...string) GossipSubCtrlOption { + return func(msg *pb.ControlMessage) { + prunes := make([]*pb.ControlPrune, len(topicIds)) + for i, topic := range topicIds { + prunes[i] = &pb.ControlPrune{ + TopicID: &topic, + } + } + msg.Prune = prunes + } +} + +// gossipSubMessageIdFixture returns a random gossipSub message ID. +func gossipSubMessageIdFixture() string { + // TODO: messageID length should be a parameter. + return unittest.GenerateRandomStringWithLen(messageIDFixtureLen) +} + +// GossipSubTopicIdFixture returns a random gossipSub topic ID. +func GossipSubTopicIdFixture() string { + // TODO: topicID length should be a parameter. + return unittest.GenerateRandomStringWithLen(topicIDFixtureLen) +} + +// GossipSubMessageIdsFixture returns a slice of random gossipSub message IDs of the given size. +func GossipSubMessageIdsFixture(count int) []string { + msgIds := make([]string, count) + for i := 0; i < count; i++ { + msgIds[i] = gossipSubMessageIdFixture() + } + return msgIds +} + +// GossipSubMessageFixture returns a random gossipSub message; this contains a single pubsub message that is exchanged between nodes. +// The message is generated randomly. +// Args: +// - t: *testing.T instance +// Returns: +// - *pb.Message: a random gossipSub message +// Note: the message is not signed. +func GossipSubMessageFixture(t *testing.T) *pb.Message { + byteSize := 100 + topic, err := randutils.GenerateRandomString(byteSize) + require.NoError(t, err) + return &pb.Message{ + From: unittest.RandomBytes(byteSize), + Data: unittest.RandomBytes(byteSize), + Seqno: unittest.RandomBytes(byteSize), + Topic: &topic, + Signature: unittest.RandomBytes(byteSize), + Key: unittest.RandomBytes(byteSize), + } +} + +// UpdatableTopicProviderFixture is a mock implementation of the TopicProvider interface. +type UpdatableTopicProviderFixture struct { + topics []string + subscriptions map[string][]peer.ID +} + +func NewUpdatableTopicProviderFixture() *UpdatableTopicProviderFixture { + return &UpdatableTopicProviderFixture{ + topics: []string{}, + subscriptions: map[string][]peer.ID{}, + } +} + +func (m *UpdatableTopicProviderFixture) GetTopics() []string { + return m.topics +} + +func (m *UpdatableTopicProviderFixture) ListPeers(topic string) []peer.ID { + return m.subscriptions[topic] +} + +func (m *UpdatableTopicProviderFixture) UpdateTopics(topics []string) { + m.topics = topics +} + +func (m *UpdatableTopicProviderFixture) UpdateSubscriptions(topic string, peers []peer.ID) { + m.subscriptions[topic] = peers +} diff --git a/network/p2p/test/message.go b/network/p2p/test/message.go new file mode 100644 index 00000000000..4bb0efea1f1 --- /dev/null +++ b/network/p2p/test/message.go @@ -0,0 +1,68 @@ +package p2ptest + +import ( + "testing" + + pb "github.com/libp2p/go-libp2p-pubsub/pb" + "github.com/libp2p/go-libp2p/core/peer" + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/utils/rand" + "github.com/onflow/flow-go/utils/unittest" +) + +// WithFrom is a test helper that returns a function that sets the from field of a pubsub message to the given peer id. +func WithFrom(from peer.ID) func(*pb.Message) { + return func(m *pb.Message) { + m.From = []byte(from) + } +} + +// WithTopic is a test helper that returns a function that sets the topic of a pubsub message to the given topic. +func WithTopic(topic string) func(*pb.Message) { + return func(m *pb.Message) { + m.Topic = &topic + } +} + +// WithoutSignature is a test helper that returns a function that sets the signature of a pubsub message to nil, effectively removing the signature. +func WithoutSignature() func(*pb.Message) { + return func(m *pb.Message) { + m.Signature = nil + } +} + +// WithoutSignerId is a test helper that returns a function that sets the from field of a pubsub message to nil, effectively removing the signer id. +func WithoutSignerId() func(*pb.Message) { + return func(m *pb.Message) { + m.From = nil + } +} + +// PubsubMessageFixture is a test helper that returns a random pubsub message with the given options applied. +// If no options are provided, the message will be random. +// Args: +// +// t: testing.T +// +// opt: variadic list of options to apply to the message +// Returns: +// *pb.Message: pubsub message +func PubsubMessageFixture(t *testing.T, opts ...func(*pb.Message)) *pb.Message { + topic, err := rand.GenerateRandomString(10) + require.NoError(t, err) + + m := &pb.Message{ + Data: unittest.RandomByteSlice(t, 100), + Topic: &topic, + Signature: unittest.RandomByteSlice(t, 100), + From: unittest.RandomByteSlice(t, 100), + Seqno: unittest.RandomByteSlice(t, 100), + } + + for _, opt := range opts { + opt(m) + } + + return m +} diff --git a/network/p2p/test/mockStream.go b/network/p2p/test/mockStream.go new file mode 100644 index 00000000000..bfc7113cb2e --- /dev/null +++ b/network/p2p/test/mockStream.go @@ -0,0 +1,86 @@ +package p2ptest + +import ( + "io" + "time" + + "github.com/hashicorp/go-multierror" + "github.com/libp2p/go-libp2p/core/network" + "github.com/libp2p/go-libp2p/core/protocol" +) + +// MockStream is a mocked libp2p stream that is implemented as a pipe with a reader and writer. +// Whatever is written on the stream is written by the writer on the pipe, which in turn makes +// it available for read by the reader. +type MockStream struct { + pw *io.PipeWriter + pr *io.PipeReader +} + +func NewMockStream(pw *io.PipeWriter, pr *io.PipeReader) *MockStream { + return &MockStream{ + pw: pw, + pr: pr, + } +} + +func (m *MockStream) Read(p []byte) (int, error) { + n, err := m.pr.Read(p) + return n, err +} + +func (m *MockStream) Write(p []byte) (int, error) { + return m.pw.Write(p) +} + +func (m *MockStream) Close() error { + return multierror.Append(m.CloseRead(), m.CloseWrite()) +} + +func (m *MockStream) CloseRead() error { + return m.pr.Close() +} + +func (m *MockStream) CloseWrite() error { + return m.pw.Close() +} + +func (m *MockStream) Reset() error { + return nil +} + +func (m *MockStream) SetDeadline(_ time.Time) error { + return nil +} + +func (m *MockStream) SetReadDeadline(_ time.Time) error { + return nil +} + +func (m *MockStream) SetWriteDeadline(_ time.Time) error { + return nil +} + +func (m *MockStream) ID() string { + return "" +} + +func (m *MockStream) Protocol() protocol.ID { + return "" +} + +func (m *MockStream) SetProtocol(_ protocol.ID) error { + return nil +} + +func (m *MockStream) Stat() network.Stats { + return network.Stats{} +} + +func (m *MockStream) Conn() network.Conn { + return nil +} + +func (m *MockStream) Scope() network.StreamScope { + return nil +} diff --git a/network/p2p/test/sporking_test.go b/network/p2p/test/sporking_test.go index 1fa099013f3..2d0d8e9586e 100644 --- a/network/p2p/test/sporking_test.go +++ b/network/p2p/test/sporking_test.go @@ -5,16 +5,17 @@ import ( "testing" "time" - "github.com/onflow/flow-go/model/flow" - libp2pmessage "github.com/onflow/flow-go/model/libp2p/message" - "github.com/onflow/flow-go/network" - "github.com/onflow/flow-go/network/message" - + "github.com/libp2p/go-libp2p/core/network" "github.com/libp2p/go-libp2p/core/peer" "github.com/libp2p/go-libp2p/core/peerstore" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "github.com/onflow/flow-go/config" + "github.com/onflow/flow-go/model/flow" + libp2pmessage "github.com/onflow/flow-go/model/libp2p/message" + "github.com/onflow/flow-go/network/message" + "github.com/onflow/flow-go/network/p2p" p2ptest "github.com/onflow/flow-go/network/p2p/test" @@ -40,7 +41,7 @@ import ( // TestCrosstalkPreventionOnNetworkKeyChange tests that a node from the old chain cannot talk to a node in the new chain // if it's network key is updated while the libp2p protocol ID remains the same func TestCrosstalkPreventionOnNetworkKeyChange(t *testing.T) { - unittest.SkipUnless(t, unittest.TEST_FLAKY, "flaky test - passing in Flaky Test Monitor but keeps failing in CI and keeps blocking many PRs") + idProvider := unittest.NewUpdatableIDProvider(flow.IdentityList{}) ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -60,34 +61,43 @@ func TestCrosstalkPreventionOnNetworkKeyChange(t *testing.T) { node1, id1 := p2ptest.NodeFixture(t, sporkId, "test_crosstalk_prevention_on_network_key_change", + idProvider, p2ptest.WithNetworkingPrivateKey(node1key), ) + idProvider.SetIdentities(flow.IdentityList{&id1}) - p2ptest.StartNode(t, signalerCtx1, node1, 100*time.Millisecond) - defer p2ptest.StopNode(t, node1, cancel1, 100*time.Millisecond) + p2ptest.StartNode(t, signalerCtx1, node1) + defer p2ptest.StopNode(t, node1, cancel1) t.Logf(" %s node started on %s", id1.NodeID.String(), id1.Address) - t.Logf("libp2p ID for %s: %s", id1.NodeID.String(), node1.Host().ID()) + t.Logf("libp2p ID for %s: %s", id1.NodeID.String(), node1.ID()) // create and start node 2 on localhost and random port node2key := p2ptest.NetworkingKeyFixtures(t) node2, id2 := p2ptest.NodeFixture(t, sporkId, "test_crosstalk_prevention_on_network_key_change", + idProvider, p2ptest.WithNetworkingPrivateKey(node2key), ) - p2ptest.StartNode(t, signalerCtx2, node2, 100*time.Millisecond) + idProvider.SetIdentities(flow.IdentityList{&id1, &id2}) + + p2ptest.StartNode(t, signalerCtx2, node2) - peerInfo2, err := utils.PeerAddressInfo(id2) + peerInfo2, err := utils.PeerAddressInfo(id2.IdentitySkeleton) require.NoError(t, err) // create stream from node 1 to node 2 - testOneToOneMessagingSucceeds(t, node1, peerInfo2) + node1.Host().Peerstore().AddAddrs(peerInfo2.ID, peerInfo2.Addrs, peerstore.AddressTTL) + err = node1.OpenAndWriteOnStream(context.Background(), peerInfo2.ID, t.Name(), func(stream network.Stream) error { + require.NotNil(t, stream) + return nil + }) + require.NoError(t, err) // Simulate a hard-spoon: node1 is on the old chain, but node2 is moved from the old chain to the new chain - // stop node 2 and start it again with a different networking key but on the same IP and port - p2ptest.StopNode(t, node2, cancel2, 100*time.Millisecond) + p2ptest.StopNode(t, node2, cancel2) // start node2 with the same name, ip and port but with the new key node2keyNew := p2pfixtures.NetworkingKeyFixtures(t) @@ -95,12 +105,14 @@ func TestCrosstalkPreventionOnNetworkKeyChange(t *testing.T) { node2, id2New := p2ptest.NodeFixture(t, sporkId, "test_crosstalk_prevention_on_network_key_change", + idProvider, p2ptest.WithNetworkingPrivateKey(node2keyNew), p2ptest.WithNetworkingAddress(id2.Address), ) + idProvider.SetIdentities(flow.IdentityList{&id1, &id2New}) - p2ptest.StartNode(t, signalerCtx2a, node2, 100*time.Millisecond) - defer p2ptest.StopNode(t, node2, cancel2a, 100*time.Millisecond) + p2ptest.StartNode(t, signalerCtx2a, node2) + defer p2ptest.StopNode(t, node2, cancel2a) // make sure the node2 indeed came up on the old ip and port assert.Equal(t, id2New.Address, id2.Address) @@ -114,6 +126,7 @@ func TestCrosstalkPreventionOnNetworkKeyChange(t *testing.T) { // TestOneToOneCrosstalkPrevention tests that a node from the old chain cannot talk directly to a node in the new chain // if the Flow libp2p protocol ID is updated while the network keys are kept the same. func TestOneToOneCrosstalkPrevention(t *testing.T) { + idProvider := unittest.NewUpdatableIDProvider(flow.IdentityList{}) ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -129,35 +142,43 @@ func TestOneToOneCrosstalkPrevention(t *testing.T) { sporkId1 := unittest.IdentifierFixture() // create and start node 1 on localhost and random port - node1, id1 := p2ptest.NodeFixture(t, sporkId1, "test_one_to_one_crosstalk_prevention") + node1, id1 := p2ptest.NodeFixture(t, sporkId1, "test_one_to_one_crosstalk_prevention", idProvider) - p2ptest.StartNode(t, signalerCtx1, node1, 100*time.Millisecond) - defer p2ptest.StopNode(t, node1, cancel1, 100*time.Millisecond) + p2ptest.StartNode(t, signalerCtx1, node1) + defer p2ptest.StopNode(t, node1, cancel1) - peerInfo1, err := utils.PeerAddressInfo(id1) + peerInfo1, err := utils.PeerAddressInfo(id1.IdentitySkeleton) require.NoError(t, err) // create and start node 2 on localhost and random port - node2, id2 := p2ptest.NodeFixture(t, sporkId1, "test_one_to_one_crosstalk_prevention") + node2, id2 := p2ptest.NodeFixture(t, sporkId1, "test_one_to_one_crosstalk_prevention", idProvider) - p2ptest.StartNode(t, signalerCtx2, node2, 100*time.Millisecond) + idProvider.SetIdentities(flow.IdentityList{&id1, &id2}) + p2ptest.StartNode(t, signalerCtx2, node2) - // create stream from node 2 to node 1 - testOneToOneMessagingSucceeds(t, node2, peerInfo1) + // create stream from node 1 to node 2 + node2.Host().Peerstore().AddAddrs(peerInfo1.ID, peerInfo1.Addrs, peerstore.AddressTTL) + err = node2.OpenAndWriteOnStream(context.Background(), peerInfo1.ID, t.Name(), func(stream network.Stream) error { + assert.NotNil(t, stream) + return nil + }) + require.NoError(t, err) // Simulate a hard-spoon: node1 is on the old chain, but node2 is moved from the old chain to the new chain // stop node 2 and start it again with a different libp2p protocol id to listen for - p2ptest.StopNode(t, node2, cancel2, time.Second) + p2ptest.StopNode(t, node2, cancel2) // start node2 with the same address and root key but different root block id node2, id2New := p2ptest.NodeFixture(t, unittest.IdentifierFixture(), // update the flow root id for node 2. node1 is still listening on the old protocol "test_one_to_one_crosstalk_prevention", + idProvider, p2ptest.WithNetworkingAddress(id2.Address), ) + idProvider.SetIdentities(flow.IdentityList{&id1, &id2New}) - p2ptest.StartNode(t, signalerCtx2a, node2, 100*time.Millisecond) - defer p2ptest.StopNode(t, node2, cancel2a, 100*time.Millisecond) + p2ptest.StartNode(t, signalerCtx2a, node2) + defer p2ptest.StopNode(t, node2, cancel2a) // make sure the node2 indeed came up on the old ip and port assert.Equal(t, id2New.Address, id2.Address) @@ -170,6 +191,7 @@ func TestOneToOneCrosstalkPrevention(t *testing.T) { // TestOneToKCrosstalkPrevention tests that a node from the old chain cannot talk to a node in the new chain via PubSub // if the channel is updated while the network keys are kept the same. func TestOneToKCrosstalkPrevention(t *testing.T) { + idProvider := unittest.NewUpdatableIDProvider(flow.IdentityList{}) ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -183,24 +205,35 @@ func TestOneToKCrosstalkPrevention(t *testing.T) { previousSporkId := unittest.IdentifierFixture() // create and start node 1 on localhost and random port - node1, _ := p2ptest.NodeFixture(t, + cfg, err := config.DefaultConfig() + require.NoError(t, err) + // cross-talk prevention is intrinsically tied to how we encode topics, peer scoring adds another layer of protection by preventing unknown identifiers + // from joining the mesh. As this test simulates the scenario where a node is moved from the old chain to the new chain, we disable peer scoring + // to allow the node to join the mesh on the new chain, otherwise the node will be disconnected from the mesh due to peer scoring penalty for unknown identifiers. + cfg.NetworkConfig.GossipSub.PeerScoringEnabled = false + cfg.NetworkConfig.GossipSub.RpcInspector.Validation.InspectionProcess.Inspect.RejectUnstakedPeers = false + node1, id1 := p2ptest.NodeFixture(t, previousSporkId, "test_one_to_k_crosstalk_prevention", + idProvider, + p2ptest.OverrideFlowConfig(cfg), ) - p2ptest.StartNode(t, signalerCtx1, node1, 100*time.Millisecond) - defer p2ptest.StopNode(t, node1, cancel1, 100*time.Millisecond) + p2ptest.StartNode(t, signalerCtx1, node1) + defer p2ptest.StopNode(t, node1, cancel1) + idProvider.SetIdentities(flow.IdentityList{&id1}) // create and start node 2 on localhost and random port with the same root block ID node2, id2 := p2ptest.NodeFixture(t, previousSporkId, "test_one_to_k_crosstalk_prevention", + idProvider, ) - p2ptest.StartNode(t, signalerCtx2, node2, 100*time.Millisecond) - defer p2ptest.StopNode(t, node2, cancel2, 100*time.Millisecond) + p2ptest.StartNode(t, signalerCtx2, node2) + defer p2ptest.StopNode(t, node2, cancel2) - pInfo2, err := utils.PeerAddressInfo(id2) + pInfo2, err := utils.PeerAddressInfo(id2.IdentitySkeleton) require.NoError(t, err) // spork topic is derived by suffixing the channel with the root block ID @@ -216,61 +249,16 @@ func TestOneToKCrosstalkPrevention(t *testing.T) { require.NoError(t, err) // add node 2 as a peer of node 1 - err = node1.AddPeer(ctx, pInfo2) + err = node1.ConnectToPeer(ctx, pInfo2) require.NoError(t, err) // let the two nodes form the mesh time.Sleep(time.Second) // assert that node 1 can successfully send a message to node 2 via PubSub - testOneToKMessagingSucceeds(ctx, t, node1, sub2, topicBeforeSpork) - - // new root id after spork - rootIDAfterSpork := unittest.IdentifierFixture() - - // topic after the spork - topicAfterSpork := channels.TopicFromChannel(channels.TestNetworkChannel, rootIDAfterSpork) - - // mimic that node1 is now part of the new spork while node2 remains on the old spork - // by unsubscribing node1 from 'topicBeforeSpork' and subscribing it to 'topicAfterSpork' - // and keeping node2 subscribed to topic 'topicBeforeSpork' - err = node1.UnSubscribe(topicBeforeSpork) - require.NoError(t, err) - _, err = node1.Subscribe(topicAfterSpork, topicValidator) - require.NoError(t, err) - - // assert that node 1 can no longer send a message to node 2 via PubSub - testOneToKMessagingFails(ctx, t, node1, sub2, topicAfterSpork) -} - -func testOneToOneMessagingSucceeds(t *testing.T, sourceNode p2p.LibP2PNode, peerInfo peer.AddrInfo) { - // create stream from node 1 to node 2 - sourceNode.Host().Peerstore().AddAddrs(peerInfo.ID, peerInfo.Addrs, peerstore.AddressTTL) - s, err := sourceNode.CreateStream(context.Background(), peerInfo.ID) - // assert that stream creation succeeded - require.NoError(t, err) - assert.NotNil(t, s) -} - -func testOneToOneMessagingFails(t *testing.T, sourceNode p2p.LibP2PNode, peerInfo peer.AddrInfo) { - // create stream from source node to destination address - sourceNode.Host().Peerstore().AddAddrs(peerInfo.ID, peerInfo.Addrs, peerstore.AddressTTL) - _, err := sourceNode.CreateStream(context.Background(), peerInfo.ID) - // assert that stream creation failed - assert.Error(t, err) - // assert that it failed with the expected error - assert.Regexp(t, ".*failed to negotiate security protocol.*|.*protocol not supported.*", err) -} - -func testOneToKMessagingSucceeds(ctx context.Context, - t *testing.T, - sourceNode p2p.LibP2PNode, - dstnSub p2p.Subscription, - topic channels.Topic) { - - sentMsg, err := network.NewOutgoingScope( + outgoingMessageScope, err := message.NewOutgoingScope( flow.IdentifierList{unittest.IdentifierFixture()}, - channels.TestNetworkChannel, + topicBeforeSpork, &libp2pmessage.TestMessage{ Text: string("hello"), }, @@ -278,32 +266,40 @@ func testOneToKMessagingSucceeds(ctx context.Context, message.ProtocolTypePubSub) require.NoError(t, err) - sentData, err := sentMsg.Proto().Marshal() + expectedReceivedData, err := outgoingMessageScope.Proto().Marshal() require.NoError(t, err) // send a 1-k message from source node to destination node - err = sourceNode.Publish(ctx, topic, sentData) + err = node1.Publish(ctx, outgoingMessageScope) require.NoError(t, err) // assert that the message is received by the destination node unittest.AssertReturnsBefore(t, func() { - msg, err := dstnSub.Next(ctx) + msg, err := sub2.Next(ctx) require.NoError(t, err) - assert.Equal(t, sentData, msg.Data) + assert.Equal(t, expectedReceivedData, msg.Data) }, // libp2p hearbeats every second, so at most the message should take 1 second 2*time.Second) -} -func testOneToKMessagingFails(ctx context.Context, - t *testing.T, - sourceNode p2p.LibP2PNode, - dstnSub p2p.Subscription, - topic channels.Topic) { + // new root id after spork + rootIDAfterSpork := unittest.IdentifierFixture() - sentMsg, err := network.NewOutgoingScope( - flow.IdentifierList{unittest.IdentifierFixture()}, - channels.TestNetworkChannel, + // topic after the spork + topicAfterSpork := channels.TopicFromChannel(channels.TestNetworkChannel, rootIDAfterSpork) + + // mimic that node1 is now part of the new spork while node2 remains on the old spork + // by unsubscribing node1 from 'topicBeforeSpork' and subscribing it to 'topicAfterSpork' + // and keeping node2 subscribed to topic 'topicBeforeSpork' + err = node1.Unsubscribe(topicBeforeSpork) + require.NoError(t, err) + _, err = node1.Subscribe(topicAfterSpork, topicValidator) + require.NoError(t, err) + + // assert that node 1 can no longer send a message to node 2 via PubSub + outgoingMessageScope, err = message.NewOutgoingScope( + flow.IdentifierList{id2.NodeID}, + topicAfterSpork, &libp2pmessage.TestMessage{ Text: string("hello"), }, @@ -311,18 +307,31 @@ func testOneToKMessagingFails(ctx context.Context, message.ProtocolTypePubSub) require.NoError(t, err) - sentData, err := sentMsg.Proto().Marshal() - require.NoError(t, err) - // send a 1-k message from source node to destination node - err = sourceNode.Publish(ctx, topic, sentData) + err = node1.Publish(ctx, outgoingMessageScope) require.NoError(t, err) // assert that the message is never received by the destination node _ = unittest.RequireNeverReturnBefore(t, func() { - _, _ = dstnSub.Next(ctx) + _, _ = sub2.Next(ctx) }, // libp2p hearbeats every second, so at most the message should take 1 second 2*time.Second, "nodes on different sporks were able to communicate") } + +func testOneToOneMessagingFails(t *testing.T, sourceNode p2p.LibP2PNode, peerInfo peer.AddrInfo) { + // create stream from source node to destination address + sourceNode.Host().Peerstore().AddAddrs(peerInfo.ID, peerInfo.Addrs, peerstore.AddressTTL) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + err := sourceNode.OpenAndWriteOnStream(ctx, peerInfo.ID, t.Name(), func(stream network.Stream) error { + // this callback should never be called + assert.Fail(t, "stream creation should have failed") + return nil + }) + // assert that stream creation failed + require.Error(t, err) + // assert that it failed with the expected error + assert.Regexp(t, ".*failed to negotiate security protocol.*|.*protocols not supported.*", err) +} diff --git a/network/p2p/test/topic_validator_test.go b/network/p2p/test/topic_validator_test.go index 18229bd2e81..ac200851eb1 100644 --- a/network/p2p/test/topic_validator_test.go +++ b/network/p2p/test/topic_validator_test.go @@ -7,23 +7,28 @@ import ( "testing" "time" - "github.com/onflow/flow-go/network/p2p" - p2ptest "github.com/onflow/flow-go/network/p2p/test" - "github.com/libp2p/go-libp2p/core/peer" "github.com/stretchr/testify/require" - "github.com/onflow/flow-go/network/p2p/utils" - - "github.com/onflow/flow-go/network/p2p/translator" + "github.com/stretchr/testify/mock" + "github.com/onflow/flow-go/config" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/model/messages" "github.com/onflow/flow-go/module/irrecoverable" "github.com/onflow/flow-go/module/metrics" + mockmodule "github.com/onflow/flow-go/module/mock" + "github.com/onflow/flow-go/network" + "github.com/onflow/flow-go/network/alsp" "github.com/onflow/flow-go/network/channels" "github.com/onflow/flow-go/network/internal/p2pfixtures" "github.com/onflow/flow-go/network/message" + mocknetwork "github.com/onflow/flow-go/network/mock" + "github.com/onflow/flow-go/network/p2p" + p2plogging "github.com/onflow/flow-go/network/p2p/logging" + p2ptest "github.com/onflow/flow-go/network/p2p/test" + "github.com/onflow/flow-go/network/p2p/translator" + "github.com/onflow/flow-go/network/p2p/utils" "github.com/onflow/flow-go/network/slashing" "github.com/onflow/flow-go/network/validator" flowpubsub "github.com/onflow/flow-go/network/validator/pubsub" @@ -34,32 +39,33 @@ import ( func TestTopicValidator_Unstaked(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) signalerCtx := irrecoverable.NewMockSignalerContext(t, ctx) - + idProvider := mockmodule.NewIdentityProvider(t) // create a hooked logger logger, hook := unittest.HookedLogger() sporkId := unittest.IdentifierFixture() - sn1, identity1 := p2ptest.NodeFixture(t, sporkId, t.Name(), p2ptest.WithRole(flow.RoleConsensus), p2ptest.WithLogger(logger)) - sn2, identity2 := p2ptest.NodeFixture(t, sporkId, t.Name(), p2ptest.WithRole(flow.RoleConsensus), p2ptest.WithLogger(logger)) - + sn1, identity1 := p2ptest.NodeFixture(t, sporkId, t.Name(), idProvider, p2ptest.WithRole(flow.RoleConsensus), p2ptest.WithLogger(logger)) + sn2, identity2 := p2ptest.NodeFixture(t, sporkId, t.Name(), idProvider, p2ptest.WithRole(flow.RoleConsensus), p2ptest.WithLogger(logger)) + idProvider.On("ByPeerID", sn1.ID()).Return(&identity1, true).Maybe() + idProvider.On("ByPeerID", sn2.ID()).Return(&identity2, true).Maybe() nodes := []p2p.LibP2PNode{sn1, sn2} - p2ptest.StartNodes(t, signalerCtx, nodes, 100*time.Millisecond) - defer p2ptest.StopNodes(t, nodes, cancel, 100*time.Millisecond) + p2ptest.StartNodes(t, signalerCtx, nodes) + defer p2ptest.StopNodes(t, nodes, cancel) channel := channels.ConsensusCommittee topic := channels.TopicFromChannel(channel, sporkId) - //NOTE: identity2 is not in the ids list simulating an un-staked node + // NOTE: identity2 is not in the ids list simulating an un-staked node ids := flow.IdentityList{&identity1} - translator, err := translator.NewFixedTableIdentityTranslator(ids) + translatorFixture, err := translator.NewFixedTableIdentityTranslator(ids) require.NoError(t, err) // peer filter used by the topic validator to check if node is staked isStaked := func(pid peer.ID) error { - fid, err := translator.GetFlowID(pid) + fid, err := translatorFixture.GetFlowID(pid) if err != nil { - return fmt.Errorf("could not translate the peer_id %s to a Flow identifier: %w", pid.String(), err) + return fmt.Errorf("could not translate the peer_id %s to a Flow identifier: %w", p2plogging.PeerId(pid), err) } if _, ok := ids.ByNodeID(fid); !ok { @@ -69,12 +75,12 @@ func TestTopicValidator_Unstaked(t *testing.T) { return nil } - pInfo2, err := utils.PeerAddressInfo(identity2) + pInfo2, err := utils.PeerAddressInfo(identity2.IdentitySkeleton) require.NoError(t, err) // node1 is connected to node2 // sn1 <-> sn2 - require.NoError(t, sn1.AddPeer(ctx, pInfo2)) + require.NoError(t, sn1.ConnectToPeer(ctx, pInfo2)) // sn1 will subscribe with is staked callback that should force the TopicValidator to drop the message received from sn2 sub1, err := sn1.Subscribe(topic, flowpubsub.TopicValidator(logger, isStaked)) @@ -89,10 +95,16 @@ func TestTopicValidator_Unstaked(t *testing.T) { timedCtx, cancel5s := context.WithTimeout(ctx, 5*time.Second) defer cancel5s() - // create a dummy block proposal to publish from our SN node - data1 := p2pfixtures.MustEncodeEvent(t, unittest.ProposalFixture(), channel) - err = sn2.Publish(timedCtx, topic, data1) + outgoingMessageScope1, err := message.NewOutgoingScope( + flow.IdentifierList{identity1.NodeID, identity2.NodeID}, + topic, + (*messages.Proposal)(unittest.ProposalFixture()), + unittest.NetworkCodec().Encode, + message.ProtocolTypePubSub) + require.NoError(t, err) + + err = sn2.Publish(timedCtx, outgoingMessageScope1) require.NoError(t, err) // sn1 should not receive message from sn2 because sn2 is unstaked @@ -108,27 +120,28 @@ func TestTopicValidator_Unstaked(t *testing.T) { func TestTopicValidator_PublicChannel(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) signalerCtx := irrecoverable.NewMockSignalerContext(t, ctx) - + idProvider := mockmodule.NewIdentityProvider(t) sporkId := unittest.IdentifierFixture() logger := unittest.Logger() - sn1, _ := p2ptest.NodeFixture(t, sporkId, t.Name(), p2ptest.WithRole(flow.RoleConsensus), p2ptest.WithLogger(logger)) - sn2, identity2 := p2ptest.NodeFixture(t, sporkId, t.Name(), p2ptest.WithRole(flow.RoleConsensus), p2ptest.WithLogger(logger)) - + sn1, identity1 := p2ptest.NodeFixture(t, sporkId, t.Name(), idProvider, p2ptest.WithRole(flow.RoleConsensus), p2ptest.WithLogger(logger)) + sn2, identity2 := p2ptest.NodeFixture(t, sporkId, t.Name(), idProvider, p2ptest.WithRole(flow.RoleConsensus), p2ptest.WithLogger(logger)) + idProvider.On("ByPeerID", sn1.ID()).Return(&identity1, true).Maybe() + idProvider.On("ByPeerID", sn2.ID()).Return(&identity2, true).Maybe() nodes := []p2p.LibP2PNode{sn1, sn2} - p2ptest.StartNodes(t, signalerCtx, nodes, 100*time.Millisecond) - defer p2ptest.StopNodes(t, nodes, cancel, 100*time.Millisecond) + p2ptest.StartNodes(t, signalerCtx, nodes) + defer p2ptest.StopNodes(t, nodes, cancel) // unauthenticated messages should not be dropped on public channels channel := channels.PublicSyncCommittee topic := channels.TopicFromChannel(channel, sporkId) - pInfo2, err := utils.PeerAddressInfo(identity2) + pInfo2, err := utils.PeerAddressInfo(identity2.IdentitySkeleton) require.NoError(t, err) // node1 is connected to node2 // sn1 <-> sn2 - require.NoError(t, sn1.AddPeer(ctx, pInfo2)) + require.NoError(t, sn1.ConnectToPeer(ctx, pInfo2)) // sn1 & sn2 will subscribe with unauthenticated callback to allow it to send and receive unauthenticated messages sub1, err := sn1.Subscribe(topic, flowpubsub.TopicValidator(logger, unittest.AllowAllPeerFilter())) @@ -141,10 +154,16 @@ func TestTopicValidator_PublicChannel(t *testing.T) { timedCtx, cancel5s := context.WithTimeout(ctx, 5*time.Second) defer cancel5s() - // create a dummy sync request to publish from our SN node - data1 := p2pfixtures.MustEncodeEvent(t, &messages.SyncRequest{Nonce: 0, Height: 0}, channel) - err = sn2.Publish(timedCtx, topic, data1) + outgoingMessageScope1, err := message.NewOutgoingScope( + flow.IdentifierList{identity1.NodeID, identity2.NodeID}, + topic, + &messages.SyncRequest{Nonce: 0, Height: 0}, + unittest.NetworkCodec().Encode, + message.ProtocolTypePubSub) + require.NoError(t, err) + + err = sn2.Publish(timedCtx, outgoingMessageScope1) require.NoError(t, err) var wg sync.WaitGroup @@ -153,11 +172,14 @@ func TestTopicValidator_PublicChannel(t *testing.T) { timedCtx, cancel1s := context.WithTimeout(ctx, time.Second) defer cancel1s() + expectedReceivedData, err := outgoingMessageScope1.Proto().Marshal() + require.NoError(t, err) + // sn1 gets the message - p2pfixtures.SubMustReceiveMessage(t, timedCtx, data1, sub1) + p2pfixtures.SubMustReceiveMessage(t, timedCtx, expectedReceivedData, sub1) // sn2 also gets the message (as part of the libp2p loopback of published topic messages) - p2pfixtures.SubMustReceiveMessage(t, timedCtx, data1, sub2) + p2pfixtures.SubMustReceiveMessage(t, timedCtx, expectedReceivedData, sub2) unittest.RequireReturnsBefore(t, wg.Wait, 5*time.Second, "could not receive message on time") } @@ -166,28 +188,29 @@ func TestTopicValidator_PublicChannel(t *testing.T) { func TestTopicValidator_TopicMismatch(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) signalerCtx := irrecoverable.NewMockSignalerContext(t, ctx) - + idProvider := mockmodule.NewIdentityProvider(t) // create a hooked logger logger, hook := unittest.HookedLogger() sporkId := unittest.IdentifierFixture() - sn1, _ := p2ptest.NodeFixture(t, sporkId, t.Name(), p2ptest.WithRole(flow.RoleConsensus), p2ptest.WithLogger(logger)) - sn2, identity2 := p2ptest.NodeFixture(t, sporkId, t.Name(), p2ptest.WithRole(flow.RoleConsensus), p2ptest.WithLogger(logger)) - + sn1, identity1 := p2ptest.NodeFixture(t, sporkId, t.Name(), idProvider, p2ptest.WithRole(flow.RoleConsensus), p2ptest.WithLogger(logger)) + sn2, identity2 := p2ptest.NodeFixture(t, sporkId, t.Name(), idProvider, p2ptest.WithRole(flow.RoleConsensus), p2ptest.WithLogger(logger)) + idProvider.On("ByPeerID", sn1.ID()).Return(&identity1, true).Maybe() + idProvider.On("ByPeerID", sn2.ID()).Return(&identity2, true).Maybe() nodes := []p2p.LibP2PNode{sn1, sn2} - p2ptest.StartNodes(t, signalerCtx, nodes, 100*time.Millisecond) - defer p2ptest.StopNodes(t, nodes, cancel, 100*time.Millisecond) + p2ptest.StartNodes(t, signalerCtx, nodes) + defer p2ptest.StopNodes(t, nodes, cancel) channel := channels.ConsensusCommittee topic := channels.TopicFromChannel(channel, sporkId) - pInfo2, err := utils.PeerAddressInfo(identity2) + pInfo2, err := utils.PeerAddressInfo(identity2.IdentitySkeleton) require.NoError(t, err) // node1 is connected to node2 // sn1 <-> sn2 - require.NoError(t, sn1.AddPeer(ctx, pInfo2)) + require.NoError(t, sn1.ConnectToPeer(ctx, pInfo2)) // sn2 will subscribe with an unauthenticated callback to allow processing of message after the authorization check _, err = sn1.Subscribe(topic, flowpubsub.TopicValidator(logger, unittest.AllowAllPeerFilter())) @@ -202,11 +225,20 @@ func TestTopicValidator_TopicMismatch(t *testing.T) { timedCtx, cancel5s := context.WithTimeout(ctx, 5*time.Second) defer cancel5s() + // create a dummy block proposal to publish from our SN node - data1 := p2pfixtures.MustEncodeEvent(t, unittest.ProposalFixture(), channels.Channel("invalid-channel")) + outgoingMessageScope1, err := message.NewOutgoingScope( + flow.IdentifierList{identity1.NodeID, identity2.NodeID}, + topic, + (*messages.Proposal)(unittest.ProposalFixture()), + unittest.NetworkCodec().Encode, + message.ProtocolTypePubSub) + require.NoError(t, err) - err = sn2.Publish(timedCtx, topic, data1) + // intentionally overriding the channel id to be different from the topic + outgoingMessageScope1.Proto().ChannelID = channels.PublicSyncCommittee.String() + err = sn2.Publish(timedCtx, outgoingMessageScope1) // publish fails because the channel validation fails require.Error(t, err) @@ -218,27 +250,28 @@ func TestTopicValidator_TopicMismatch(t *testing.T) { func TestTopicValidator_InvalidTopic(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) signalerCtx := irrecoverable.NewMockSignalerContext(t, ctx) - + idProvider := mockmodule.NewIdentityProvider(t) // create a hooked logger logger, hook := unittest.HookedLogger() sporkId := unittest.IdentifierFixture() - sn1, _ := p2ptest.NodeFixture(t, sporkId, t.Name(), p2ptest.WithRole(flow.RoleConsensus), p2ptest.WithLogger(logger)) - sn2, identity2 := p2ptest.NodeFixture(t, sporkId, t.Name(), p2ptest.WithRole(flow.RoleConsensus), p2ptest.WithLogger(logger)) - + sn1, identity1 := p2ptest.NodeFixture(t, sporkId, t.Name(), idProvider, p2ptest.WithRole(flow.RoleConsensus), p2ptest.WithLogger(logger)) + sn2, identity2 := p2ptest.NodeFixture(t, sporkId, t.Name(), idProvider, p2ptest.WithRole(flow.RoleConsensus), p2ptest.WithLogger(logger)) + idProvider.On("ByPeerID", sn1.ID()).Return(&identity1, true).Maybe() + idProvider.On("ByPeerID", sn2.ID()).Return(&identity2, true).Maybe() nodes := []p2p.LibP2PNode{sn1, sn2} - p2ptest.StartNodes(t, signalerCtx, nodes, 100*time.Millisecond) - defer p2ptest.StopNodes(t, nodes, cancel, 100*time.Millisecond) + p2ptest.StartNodes(t, signalerCtx, nodes) + defer p2ptest.StopNodes(t, nodes, cancel) topic := channels.Topic("invalid-topic") - pInfo2, err := utils.PeerAddressInfo(identity2) + pInfo2, err := utils.PeerAddressInfo(identity2.IdentitySkeleton) require.NoError(t, err) // node1 is connected to node2 // sn1 <-> sn2 - require.NoError(t, sn1.AddPeer(ctx, pInfo2)) + require.NoError(t, sn1.ConnectToPeer(ctx, pInfo2)) // sn2 will subscribe with an unauthenticated callback to allow processing of message after the authorization check _, err = sn1.Subscribe(topic, flowpubsub.TopicValidator(logger, unittest.AllowAllPeerFilter())) @@ -253,14 +286,30 @@ func TestTopicValidator_InvalidTopic(t *testing.T) { timedCtx, cancel5s := context.WithTimeout(ctx, 5*time.Second) defer cancel5s() - // create a dummy block proposal to publish from our SN node - data1 := p2pfixtures.MustEncodeEvent(t, unittest.ProposalFixture(), channels.PushBlocks) - err = sn2.Publish(timedCtx, topic, data1) + // invalid topic is malformed, hence it cannot be used to create a message scope, as it faces an error. + // Hence, we create a dummy block proposal message scope to publish on a legit topic, and then override + // the topic in the next step to a malformed topic. + dummyMessageScope, err := message.NewOutgoingScope( + flow.IdentifierList{identity1.NodeID, identity2.NodeID}, + channels.TopicFromChannel(channels.PushBlocks, sporkId), + (*messages.Proposal)(unittest.ProposalFixture()), + unittest.NetworkCodec().Encode, + message.ProtocolTypePubSub) + require.NoError(t, err) + + // overrides the topic to be an invalid topic + corruptOutgoingMessageScope := mocknetwork.NewOutgoingMessageScope(t) + corruptOutgoingMessageScope.On("Topic").Return(topic) + corruptOutgoingMessageScope.On("Proto").Return(dummyMessageScope.Proto()) + corruptOutgoingMessageScope.On("PayloadType").Return(dummyMessageScope.PayloadType()) + corruptOutgoingMessageScope.On("Size").Return(dummyMessageScope.Size()) + + // create a dummy block proposal to publish from our SN node + err = sn2.Publish(timedCtx, corruptOutgoingMessageScope) // publish fails because the topic conversion fails require.Error(t, err) - // ensure the correct error is contained in the logged error require.Contains(t, hook.Logs(), "could not convert topic to channel") } @@ -269,31 +318,42 @@ func TestTopicValidator_InvalidTopic(t *testing.T) { func TestAuthorizedSenderValidator_Unauthorized(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) signalerCtx := irrecoverable.NewMockSignalerContext(t, ctx) - - // create a hooked logger - logger, hook := unittest.HookedLogger() + idProvider := mockmodule.NewIdentityProvider(t) + logger := unittest.Logger() sporkId := unittest.IdentifierFixture() - sn1, identity1 := p2ptest.NodeFixture(t, sporkId, t.Name(), p2ptest.WithRole(flow.RoleConsensus)) - sn2, identity2 := p2ptest.NodeFixture(t, sporkId, t.Name(), p2ptest.WithRole(flow.RoleConsensus)) - an1, identity3 := p2ptest.NodeFixture(t, sporkId, t.Name(), p2ptest.WithRole(flow.RoleAccess)) - + sn1, identity1 := p2ptest.NodeFixture(t, sporkId, t.Name(), idProvider, p2ptest.WithRole(flow.RoleConsensus)) + sn2, identity2 := p2ptest.NodeFixture(t, sporkId, t.Name(), idProvider, p2ptest.WithRole(flow.RoleConsensus)) + an1, identity3 := p2ptest.NodeFixture(t, sporkId, t.Name(), idProvider, p2ptest.WithRole(flow.RoleAccess)) + idProvider.On("ByPeerID", sn1.ID()).Return(&identity1, true).Maybe() + idProvider.On("ByPeerID", sn2.ID()).Return(&identity2, true).Maybe() + idProvider.On("ByPeerID", an1.ID()).Return(&identity3, true).Maybe() nodes := []p2p.LibP2PNode{sn1, sn2, an1} - p2ptest.StartNodes(t, signalerCtx, nodes, 100*time.Millisecond) - defer p2ptest.StopNodes(t, nodes, cancel, 100*time.Millisecond) + p2ptest.StartNodes(t, signalerCtx, nodes) + defer p2ptest.StopNodes(t, nodes, cancel) channel := channels.ConsensusCommittee topic := channels.TopicFromChannel(channel, sporkId) ids := flow.IdentityList{&identity1, &identity2, &identity3} - translator, err := translator.NewFixedTableIdentityTranslator(ids) + translatorFixture, err := translator.NewFixedTableIdentityTranslator(ids) require.NoError(t, err) - violationsConsumer := slashing.NewSlashingViolationsConsumer(logger, metrics.NewNoopCollector()) + violation := &network.Violation{ + Identity: &identity3, + PeerID: p2plogging.PeerId(an1.ID()), + OriginID: identity3.NodeID, + MsgType: "*messages.Proposal", + Channel: channel, + Protocol: message.ProtocolTypePubSub, + Err: message.ErrUnauthorizedRole, + } + violationsConsumer := mocknetwork.NewViolationsConsumer(t) + violationsConsumer.On("OnUnAuthorizedSenderError", violation).Once().Return(nil) getIdentity := func(pid peer.ID) (*flow.Identity, bool) { - fid, err := translator.GetFlowID(pid) + fid, err := translatorFixture.GetFlowID(pid) if err != nil { return &flow.Identity{}, false } @@ -303,16 +363,16 @@ func TestAuthorizedSenderValidator_Unauthorized(t *testing.T) { authorizedSenderValidator := validator.NewAuthorizedSenderValidator(logger, violationsConsumer, getIdentity) pubsubMessageValidator := authorizedSenderValidator.PubSubMessageValidator(channel) - pInfo1, err := utils.PeerAddressInfo(identity1) + pInfo1, err := utils.PeerAddressInfo(identity1.IdentitySkeleton) require.NoError(t, err) - pInfo2, err := utils.PeerAddressInfo(identity2) + pInfo2, err := utils.PeerAddressInfo(identity2.IdentitySkeleton) require.NoError(t, err) // node1 is connected to node2, and the an1 is connected to node1 // an1 <-> sn1 <-> sn2 - require.NoError(t, sn1.AddPeer(ctx, pInfo2)) - require.NoError(t, an1.AddPeer(ctx, pInfo1)) + require.NoError(t, sn1.ConnectToPeer(ctx, pInfo2)) + require.NoError(t, an1.ConnectToPeer(ctx, pInfo1)) // sn1 and sn2 subscribe to the topic with the topic validator sub1, err := sn1.Subscribe(topic, flowpubsub.TopicValidator(logger, unittest.AllowAllPeerFilter(), pubsubMessageValidator)) @@ -327,34 +387,52 @@ func TestAuthorizedSenderValidator_Unauthorized(t *testing.T) { timedCtx, cancel5s := context.WithTimeout(ctx, 60*time.Second) defer cancel5s() - // create a dummy block proposal to publish from our SN node - data1 := p2pfixtures.MustEncodeEvent(t, unittest.ProposalFixture(), channel) // sn2 publishes the block proposal, sn1 and an1 should receive the message because // SN nodes are authorized to send block proposals - err = sn2.Publish(timedCtx, topic, data1) + // create a dummy block proposal to publish from our SN node + outgoingMessageScope1, err := message.NewOutgoingScope( + flow.IdentifierList{identity1.NodeID, identity2.NodeID}, + topic, + (*messages.Proposal)(unittest.ProposalFixture()), + unittest.NetworkCodec().Encode, + message.ProtocolTypePubSub) + require.NoError(t, err) + err = sn2.Publish(timedCtx, outgoingMessageScope1) + require.NoError(t, err) + + expectedReceivedData1, err := outgoingMessageScope1.Proto().Marshal() require.NoError(t, err) // sn1 gets the message - p2pfixtures.SubMustReceiveMessage(t, timedCtx, data1, sub1) + p2pfixtures.SubMustReceiveMessage(t, timedCtx, expectedReceivedData1, sub1) // sn2 also gets the message (as part of the libp2p loopback of published topic messages) - p2pfixtures.SubMustReceiveMessage(t, timedCtx, data1, sub2) + p2pfixtures.SubMustReceiveMessage(t, timedCtx, expectedReceivedData1, sub2) // an1 also gets the message - p2pfixtures.SubMustReceiveMessage(t, timedCtx, data1, sub3) + p2pfixtures.SubMustReceiveMessage(t, timedCtx, expectedReceivedData1, sub3) timedCtx, cancel2s := context.WithTimeout(ctx, 2*time.Second) defer cancel2s() - data2 := p2pfixtures.MustEncodeEvent(t, unittest.ProposalFixture(), channel) // the access node now publishes the block proposal message, AN are not authorized to publish block proposals // the message should be rejected by the topic validator on sn1 - err = an1.Publish(timedCtx, topic, data2) + outgoingMessageScope2, err := message.NewOutgoingScope( + flow.IdentifierList{identity1.NodeID, identity2.NodeID}, + topic, + (*messages.Proposal)(unittest.ProposalFixture()), + unittest.NetworkCodec().Encode, + message.ProtocolTypePubSub) + require.NoError(t, err) + err = an1.Publish(timedCtx, outgoingMessageScope2) + require.NoError(t, err) + + expectedReceivedData2, err := outgoingMessageScope2.Proto().Marshal() require.NoError(t, err) // an1 receives its own message - p2pfixtures.SubMustReceiveMessage(t, timedCtx, data2, sub3) + p2pfixtures.SubMustReceiveMessage(t, timedCtx, expectedReceivedData2, sub3) var wg sync.WaitGroup @@ -369,39 +447,41 @@ func TestAuthorizedSenderValidator_Unauthorized(t *testing.T) { p2pfixtures.SubMustNeverReceiveAnyMessage(t, timedCtx, sub2) unittest.RequireReturnsBefore(t, wg.Wait, 5*time.Second, "could not receive message on time") - - // ensure the correct error is contained in the logged error - require.Contains(t, hook.Logs(), message.ErrUnauthorizedRole.Error()) } // TestAuthorizedSenderValidator_Authorized tests that the authorized sender validator rejects messages being sent on the wrong channel func TestAuthorizedSenderValidator_InvalidMsg(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) signalerCtx := irrecoverable.NewMockSignalerContext(t, ctx) - + idProvider := mockmodule.NewIdentityProvider(t) // create a hooked logger logger, hook := unittest.HookedLogger() sporkId := unittest.IdentifierFixture() - sn1, identity1 := p2ptest.NodeFixture(t, sporkId, "consensus_1", p2ptest.WithRole(flow.RoleConsensus)) - sn2, identity2 := p2ptest.NodeFixture(t, sporkId, "consensus_2", p2ptest.WithRole(flow.RoleConsensus)) - + sn1, identity1 := p2ptest.NodeFixture(t, sporkId, "consensus_1", idProvider, p2ptest.WithRole(flow.RoleConsensus)) + sn2, identity2 := p2ptest.NodeFixture(t, sporkId, "consensus_2", idProvider, p2ptest.WithRole(flow.RoleConsensus)) + idProvider.On("ByPeerID", sn1.ID()).Return(&identity1, true).Maybe() + idProvider.On("ByPeerID", sn2.ID()).Return(&identity2, true).Maybe() nodes := []p2p.LibP2PNode{sn1, sn2} - p2ptest.StartNodes(t, signalerCtx, nodes, 100*time.Millisecond) - defer p2ptest.StopNodes(t, nodes, cancel, 100*time.Millisecond) + p2ptest.StartNodes(t, signalerCtx, nodes) + defer p2ptest.StopNodes(t, nodes, cancel) - // try to publish BlockProposal on invalid SyncCommittee channel + // try to publish UntrustedProposal on invalid SyncCommittee channel channel := channels.SyncCommittee topic := channels.TopicFromChannel(channel, sporkId) ids := flow.IdentityList{&identity1, &identity2} - translator, err := translator.NewFixedTableIdentityTranslator(ids) + translatorFixture, err := translator.NewFixedTableIdentityTranslator(ids) require.NoError(t, err) - violationsConsumer := slashing.NewSlashingViolationsConsumer(logger, metrics.NewNoopCollector()) + expectedMisbehaviorReport, err := alsp.NewMisbehaviorReport(identity2.NodeID, alsp.UnAuthorizedSender) + require.NoError(t, err) + misbehaviorReportConsumer := mocknetwork.NewMisbehaviorReportConsumer(t) + misbehaviorReportConsumer.On("ReportMisbehaviorOnChannel", channel, expectedMisbehaviorReport).Once() + violationsConsumer := slashing.NewSlashingViolationsConsumer(logger, metrics.NewNoopCollector(), misbehaviorReportConsumer) getIdentity := func(pid peer.ID) (*flow.Identity, bool) { - fid, err := translator.GetFlowID(pid) + fid, err := translatorFixture.GetFlowID(pid) if err != nil { return &flow.Identity{}, false } @@ -411,12 +491,12 @@ func TestAuthorizedSenderValidator_InvalidMsg(t *testing.T) { authorizedSenderValidator := validator.NewAuthorizedSenderValidator(logger, violationsConsumer, getIdentity) pubsubMessageValidator := authorizedSenderValidator.PubSubMessageValidator(channel) - pInfo2, err := utils.PeerAddressInfo(identity2) + pInfo2, err := utils.PeerAddressInfo(identity2.IdentitySkeleton) require.NoError(t, err) // node1 is connected to node2 // sn1 <-> sn2 - require.NoError(t, sn1.AddPeer(ctx, pInfo2)) + require.NoError(t, sn1.ConnectToPeer(ctx, pInfo2)) // sn1 subscribe to the topic with the topic validator, while sn2 will subscribe without the topic validator to allow sn2 to publish unauthorized messages sub1, err := sn1.Subscribe(topic, flowpubsub.TopicValidator(logger, unittest.AllowAllPeerFilter(), pubsubMessageValidator)) @@ -429,11 +509,17 @@ func TestAuthorizedSenderValidator_InvalidMsg(t *testing.T) { timedCtx, cancel5s := context.WithTimeout(ctx, 5*time.Second) defer cancel5s() - // create a dummy block proposal to publish from our SN node - data1 := p2pfixtures.MustEncodeEvent(t, unittest.ProposalFixture(), channel) + // create a dummy block proposal to publish from our SN node // sn2 publishes the block proposal on the sync committee channel - err = sn2.Publish(timedCtx, topic, data1) + outgoingMessageScope1, err := message.NewOutgoingScope( + flow.IdentifierList{identity1.NodeID, identity2.NodeID}, + topic, + (*messages.Proposal)(unittest.ProposalFixture()), + unittest.NetworkCodec().Encode, + message.ProtocolTypePubSub) + require.NoError(t, err) + err = sn2.Publish(timedCtx, outgoingMessageScope1) require.NoError(t, err) // sn1 should not receive message from sn2 @@ -449,30 +535,40 @@ func TestAuthorizedSenderValidator_InvalidMsg(t *testing.T) { func TestAuthorizedSenderValidator_Ejected(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) signalerCtx := irrecoverable.NewMockSignalerContext(t, ctx) - + idProvider := mockmodule.NewIdentityProvider(t) // create a hooked logger logger, hook := unittest.HookedLogger() sporkId := unittest.IdentifierFixture() - sn1, identity1 := p2ptest.NodeFixture(t, sporkId, "consensus_1", p2ptest.WithRole(flow.RoleConsensus)) - sn2, identity2 := p2ptest.NodeFixture(t, sporkId, "consensus_2", p2ptest.WithRole(flow.RoleConsensus)) - an1, identity3 := p2ptest.NodeFixture(t, sporkId, "access_1", p2ptest.WithRole(flow.RoleAccess)) - + cfg, err := config.DefaultConfig() + require.NoError(t, err) + // turn off unstaked peer rejection so that nodes can connect + cfg.NetworkConfig.GossipSub.RpcInspector.Validation.InspectionProcess.Inspect.RejectUnstakedPeers = false + sn1, identity1 := p2ptest.NodeFixture(t, sporkId, "consensus_1", idProvider, p2ptest.WithRole(flow.RoleConsensus), p2ptest.OverrideFlowConfig(cfg)) + sn2, identity2 := p2ptest.NodeFixture(t, sporkId, "consensus_2", idProvider, p2ptest.WithRole(flow.RoleConsensus), p2ptest.OverrideFlowConfig(cfg)) + an1, identity3 := p2ptest.NodeFixture(t, sporkId, "access_1", idProvider, p2ptest.WithRole(flow.RoleAccess), p2ptest.OverrideFlowConfig(cfg)) + idProvider.On("ByPeerID", sn1.ID()).Return(&identity1, true).Maybe() + idProvider.On("ByPeerID", sn2.ID()).Return(&identity2, true).Maybe() + idProvider.On("ByPeerID", an1.ID()).Return(&identity3, true).Maybe() nodes := []p2p.LibP2PNode{sn1, sn2, an1} - p2ptest.StartNodes(t, signalerCtx, nodes, 100*time.Millisecond) - defer p2ptest.StopNodes(t, nodes, cancel, 100*time.Millisecond) + p2ptest.StartNodes(t, signalerCtx, nodes) + defer p2ptest.StopNodes(t, nodes, cancel) channel := channels.ConsensusCommittee topic := channels.TopicFromChannel(channel, sporkId) ids := flow.IdentityList{&identity1, &identity2, &identity3} - translator, err := translator.NewFixedTableIdentityTranslator(ids) + translatorFixture, err := translator.NewFixedTableIdentityTranslator(ids) require.NoError(t, err) - violationsConsumer := slashing.NewSlashingViolationsConsumer(logger, metrics.NewNoopCollector()) + expectedMisbehaviorReport, err := alsp.NewMisbehaviorReport(identity2.NodeID, alsp.SenderEjected) + require.NoError(t, err) + misbehaviorReportConsumer := mocknetwork.NewMisbehaviorReportConsumer(t) + misbehaviorReportConsumer.On("ReportMisbehaviorOnChannel", channel, expectedMisbehaviorReport).Once() + violationsConsumer := slashing.NewSlashingViolationsConsumer(logger, metrics.NewNoopCollector(), misbehaviorReportConsumer) getIdentity := func(pid peer.ID) (*flow.Identity, bool) { - fid, err := translator.GetFlowID(pid) + fid, err := translatorFixture.GetFlowID(pid) if err != nil { return &flow.Identity{}, false } @@ -482,16 +578,16 @@ func TestAuthorizedSenderValidator_Ejected(t *testing.T) { authorizedSenderValidator := validator.NewAuthorizedSenderValidator(logger, violationsConsumer, getIdentity) pubsubMessageValidator := authorizedSenderValidator.PubSubMessageValidator(channel) - pInfo1, err := utils.PeerAddressInfo(identity1) + pInfo1, err := utils.PeerAddressInfo(identity1.IdentitySkeleton) require.NoError(t, err) - pInfo2, err := utils.PeerAddressInfo(identity2) + pInfo2, err := utils.PeerAddressInfo(identity2.IdentitySkeleton) require.NoError(t, err) // node1 is connected to node2, and the an1 is connected to node1 // an1 <-> sn1 <-> sn2 - require.NoError(t, sn1.AddPeer(ctx, pInfo2)) - require.NoError(t, an1.AddPeer(ctx, pInfo1)) + require.NoError(t, sn1.ConnectToPeer(ctx, pInfo2)) + require.NoError(t, an1.ConnectToPeer(ctx, pInfo1)) // sn1 subscribe to the topic with the topic validator, while sn2 will subscribe without the topic validator to allow sn2 to publish unauthorized messages sub1, err := sn1.Subscribe(topic, flowpubsub.TopicValidator(logger, unittest.AllowAllPeerFilter(), pubsubMessageValidator)) @@ -506,29 +602,46 @@ func TestAuthorizedSenderValidator_Ejected(t *testing.T) { timedCtx, cancel5s := context.WithTimeout(ctx, 5*time.Second) defer cancel5s() - // create a dummy block proposal to publish from our SN node - data1 := p2pfixtures.MustEncodeEvent(t, unittest.ProposalFixture(), channel) // sn2 publishes the block proposal, sn1 and an1 should receive the message because // SN nodes are authorized to send block proposals - err = sn2.Publish(timedCtx, topic, data1) + // create a dummy block proposal to publish from our SN node + outgoingMessageScope1, err := message.NewOutgoingScope( + flow.IdentifierList{identity1.NodeID, identity2.NodeID}, + topic, + (*messages.Proposal)(unittest.ProposalFixture()), + unittest.NetworkCodec().Encode, + message.ProtocolTypePubSub) + require.NoError(t, err) + err = sn2.Publish(timedCtx, outgoingMessageScope1) + require.NoError(t, err) + + expectedReceivedData1, err := outgoingMessageScope1.Proto().Marshal() require.NoError(t, err) // sn1 gets the message - p2pfixtures.SubMustReceiveMessage(t, timedCtx, data1, sub1) + p2pfixtures.SubMustReceiveMessage(t, timedCtx, expectedReceivedData1, sub1) // sn2 also gets the message (as part of the libp2p loopback of published topic messages) - p2pfixtures.SubMustReceiveMessage(t, timedCtx, data1, sub2) + p2pfixtures.SubMustReceiveMessage(t, timedCtx, expectedReceivedData1, sub2) // an1 also gets the message - p2pfixtures.SubMustReceiveMessage(t, timedCtx, data1, sub3) + p2pfixtures.SubMustReceiveMessage(t, timedCtx, expectedReceivedData1, sub3) // "eject" sn2 to ensure messages published by ejected nodes get rejected - identity2.Ejected = true - data3 := p2pfixtures.MustEncodeEvent(t, unittest.ProposalFixture(), channel) + identity2.EpochParticipationStatus = flow.EpochParticipationStatusEjected + + outgoingMessageScope3, err := message.NewOutgoingScope( + flow.IdentifierList{identity1.NodeID, identity2.NodeID}, + topic, + (*messages.Proposal)(unittest.ProposalFixture()), + unittest.NetworkCodec().Encode, + message.ProtocolTypePubSub) + require.NoError(t, err) + timedCtx, cancel2s := context.WithTimeout(ctx, time.Second) defer cancel2s() - err = sn2.Publish(timedCtx, topic, data3) + err = sn2.Publish(timedCtx, outgoingMessageScope3) require.NoError(t, err) // sn1 should not receive rejected message from ejected sn2 @@ -544,28 +657,32 @@ func TestAuthorizedSenderValidator_Ejected(t *testing.T) { func TestAuthorizedSenderValidator_ClusterChannel(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) signalerCtx := irrecoverable.NewMockSignalerContext(t, ctx) - + idProvider := mockmodule.NewIdentityProvider(t) sporkId := unittest.IdentifierFixture() - ln1, identity1 := p2ptest.NodeFixture(t, sporkId, "collection_1", p2ptest.WithRole(flow.RoleCollection)) - ln2, identity2 := p2ptest.NodeFixture(t, sporkId, "collection_2", p2ptest.WithRole(flow.RoleCollection)) - ln3, identity3 := p2ptest.NodeFixture(t, sporkId, "collection_3", p2ptest.WithRole(flow.RoleCollection)) - + ln1, identity1 := p2ptest.NodeFixture(t, sporkId, "collection_1", idProvider, p2ptest.WithRole(flow.RoleCollection)) + ln2, identity2 := p2ptest.NodeFixture(t, sporkId, "collection_2", idProvider, p2ptest.WithRole(flow.RoleCollection)) + ln3, identity3 := p2ptest.NodeFixture(t, sporkId, "collection_3", idProvider, p2ptest.WithRole(flow.RoleCollection)) + idProvider.On("ByPeerID", ln1.ID()).Return(&identity1, true).Maybe() + idProvider.On("ByPeerID", ln2.ID()).Return(&identity2, true).Maybe() + idProvider.On("ByPeerID", ln3.ID()).Return(&identity3, true).Maybe() nodes := []p2p.LibP2PNode{ln1, ln2, ln3} - p2ptest.StartNodes(t, signalerCtx, nodes, 100*time.Millisecond) - defer p2ptest.StopNodes(t, nodes, cancel, 100*time.Millisecond) + p2ptest.StartNodes(t, signalerCtx, nodes) + defer p2ptest.StopNodes(t, nodes, cancel) channel := channels.SyncCluster(flow.Testnet) topic := channels.TopicFromChannel(channel, sporkId) ids := flow.IdentityList{&identity1, &identity2, &identity3} - translator, err := translator.NewFixedTableIdentityTranslator(ids) + translatorFixture, err := translator.NewFixedTableIdentityTranslator(ids) require.NoError(t, err) logger := unittest.Logger() - violationsConsumer := slashing.NewSlashingViolationsConsumer(logger, metrics.NewNoopCollector()) + misbehaviorReportConsumer := mocknetwork.NewMisbehaviorReportConsumer(t) + defer misbehaviorReportConsumer.AssertNotCalled(t, "ReportMisbehaviorOnChannel", mock.AnythingOfType("channels.Channel"), mock.AnythingOfType("*alsp.MisbehaviorReport")) + violationsConsumer := slashing.NewSlashingViolationsConsumer(logger, metrics.NewNoopCollector(), misbehaviorReportConsumer) getIdentity := func(pid peer.ID) (*flow.Identity, bool) { - fid, err := translator.GetFlowID(pid) + fid, err := translatorFixture.GetFlowID(pid) if err != nil { return &flow.Identity{}, false } @@ -575,15 +692,15 @@ func TestAuthorizedSenderValidator_ClusterChannel(t *testing.T) { authorizedSenderValidator := validator.NewAuthorizedSenderValidator(logger, violationsConsumer, getIdentity) pubsubMessageValidator := authorizedSenderValidator.PubSubMessageValidator(channel) - pInfo1, err := utils.PeerAddressInfo(identity1) + pInfo1, err := utils.PeerAddressInfo(identity1.IdentitySkeleton) require.NoError(t, err) - pInfo2, err := utils.PeerAddressInfo(identity2) + pInfo2, err := utils.PeerAddressInfo(identity2.IdentitySkeleton) require.NoError(t, err) // ln3 <-> sn1 <-> sn2 - require.NoError(t, ln1.AddPeer(ctx, pInfo2)) - require.NoError(t, ln3.AddPeer(ctx, pInfo1)) + require.NoError(t, ln1.ConnectToPeer(ctx, pInfo2)) + require.NoError(t, ln3.ConnectToPeer(ctx, pInfo1)) sub1, err := ln1.Subscribe(topic, flowpubsub.TopicValidator(logger, unittest.AllowAllPeerFilter(), pubsubMessageValidator)) require.NoError(t, err) @@ -597,19 +714,29 @@ func TestAuthorizedSenderValidator_ClusterChannel(t *testing.T) { timedCtx, cancel5s := context.WithTimeout(ctx, 5*time.Second) defer cancel5s() + // create a dummy sync request to publish from our LN node - data := p2pfixtures.MustEncodeEvent(t, &messages.RangeRequest{}, channel) + outgoingMessageScope1, err := message.NewOutgoingScope( + flow.IdentifierList{identity1.NodeID, identity2.NodeID}, + topic, + &messages.RangeRequest{}, + unittest.NetworkCodec().Encode, + message.ProtocolTypePubSub) + require.NoError(t, err) // ln2 publishes the sync request on the cluster channel - err = ln2.Publish(timedCtx, topic, data) + err = ln2.Publish(timedCtx, outgoingMessageScope1) + require.NoError(t, err) + + expectedReceivedData1, err := outgoingMessageScope1.Proto().Marshal() require.NoError(t, err) // ln1 gets the message - p2pfixtures.SubMustReceiveMessage(t, timedCtx, data, sub1) + p2pfixtures.SubMustReceiveMessage(t, timedCtx, expectedReceivedData1, sub1) // ln2 also gets the message (as part of the libp2p loopback of published topic messages) - p2pfixtures.SubMustReceiveMessage(t, timedCtx, data, sub2) + p2pfixtures.SubMustReceiveMessage(t, timedCtx, expectedReceivedData1, sub2) // ln3 also gets the message - p2pfixtures.SubMustReceiveMessage(t, timedCtx, data, sub3) + p2pfixtures.SubMustReceiveMessage(t, timedCtx, expectedReceivedData1, sub3) } diff --git a/network/p2p/tracer/gossipSubMeshTracer.go b/network/p2p/tracer/gossipSubMeshTracer.go index 7cd4dd2b692..a25f4e717d6 100644 --- a/network/p2p/tracer/gossipSubMeshTracer.go +++ b/network/p2p/tracer/gossipSubMeshTracer.go @@ -8,12 +8,19 @@ import ( pubsub "github.com/libp2p/go-libp2p-pubsub" "github.com/libp2p/go-libp2p/core/peer" + "github.com/libp2p/go-libp2p/core/protocol" "github.com/rs/zerolog" "github.com/onflow/flow-go/module" "github.com/onflow/flow-go/module/component" "github.com/onflow/flow-go/module/irrecoverable" + "github.com/onflow/flow-go/module/metrics" + "github.com/onflow/flow-go/network" + "github.com/onflow/flow-go/network/channels" "github.com/onflow/flow-go/network/p2p" + p2pconfig "github.com/onflow/flow-go/network/p2p/config" + p2plogging "github.com/onflow/flow-go/network/p2p/logging" + "github.com/onflow/flow-go/network/p2p/tracer/internal" "github.com/onflow/flow-go/utils/logging" ) @@ -23,6 +30,12 @@ const ( // MeshLogIntervalWarnMsg is the message logged by the tracer every logInterval if there are unknown peers in the mesh. MeshLogIntervalWarnMsg = "unknown peers in topic mesh peers of local node since last heartbeat" + + // defaultLastHighestIHaveRPCSizeResetInterval is the interval that we reset the tracker of max ihave size sent back + // to a default. We use ihave message max size to determine the health of requested iwants from remote peers. However, + // we don't desire an ihave size anomaly to persist forever, hence, we reset it back to a default every minute. + // The choice of the interval to be a minute is in harmony with the GossipSub decay interval. + defaultLastHighestIHaveRPCSizeResetInterval = time.Minute ) // The GossipSubMeshTracer component in the GossipSub pubsub.RawTracer that is designed to track the local @@ -35,31 +48,70 @@ const ( // Additionally, it allows users to configure the logging interval. type GossipSubMeshTracer struct { component.Component - pubsub.RawTracer - - topicMeshMu sync.RWMutex // to protect topicMeshMap - topicMeshMap map[string]map[peer.ID]struct{} // map of local mesh peers by topic. - logger zerolog.Logger - idProvider module.IdentityProvider - loggerInterval time.Duration - metrics module.GossipSubLocalMeshMetrics + topicMeshMu sync.RWMutex // to protect topicMeshMap + topicMeshMap map[string]map[peer.ID]struct{} // map of local mesh peers by topic. + logger zerolog.Logger + idProvider module.IdentityProvider + loggerInterval time.Duration + metrics module.LocalGossipSubRouterMetrics + rpcSentTracker *internal.RPCSentTracker + duplicateMessageTrackerCache *internal.DuplicateMessageTrackerCache } var _ p2p.PubSubTracer = (*GossipSubMeshTracer)(nil) -func NewGossipSubMeshTracer( - logger zerolog.Logger, - metrics module.GossipSubLocalMeshMetrics, - idProvider module.IdentityProvider, - loggerInterval time.Duration) *GossipSubMeshTracer { +type RpcSentTrackerConfig struct { + CacheSize uint32 `validate:"gt=0"` + WorkerQueueCacheSize uint32 `validate:"gt=0"` + WorkerQueueNumber int `validate:"gt=0"` +} + +type DuplicateMessageTrackerCacheConfig struct { + CacheSize uint32 `validate:"gt=0"` + Decay float64 `validate:"gt=0"` +} +type GossipSubMeshTracerConfig struct { + network.NetworkingType `validate:"required"` + metrics.HeroCacheMetricsFactory `validate:"required"` + Logger zerolog.Logger `validate:"required"` + Metrics module.LocalGossipSubRouterMetrics `validate:"required"` + IDProvider module.IdentityProvider `validate:"required"` + LoggerInterval time.Duration `validate:"required"` + DuplicateMessageTrackerCacheConfig p2pconfig.DuplicateMessageTrackerConfig `validate:"required"` + RpcSentTracker RpcSentTrackerConfig `validate:"required"` +} + +// NewGossipSubMeshTracer creates a new *GossipSubMeshTracer. +// Args: +// - *GossipSubMeshTracerConfig: the mesh tracer config. +// Returns: +// - *GossipSubMeshTracer: new mesh tracer. +func NewGossipSubMeshTracer(config *GossipSubMeshTracerConfig) *GossipSubMeshTracer { + lg := config.Logger.With().Str("component", "gossipsub_topology_tracer").Logger() + rpcSentTracker := internal.NewRPCSentTracker(&internal.RPCSentTrackerConfig{ + Logger: lg, + RPCSentCacheSize: config.RpcSentTracker.CacheSize, + RPCSentCacheCollector: metrics.GossipSubRPCSentTrackerMetricFactory(config.HeroCacheMetricsFactory, config.NetworkingType), + WorkerQueueCacheCollector: metrics.GossipSubRPCSentTrackerQueueMetricFactory(config.HeroCacheMetricsFactory, config.NetworkingType), + WorkerQueueCacheSize: config.RpcSentTracker.WorkerQueueCacheSize, + NumOfWorkers: config.RpcSentTracker.WorkerQueueNumber, + LastHighestIhavesSentResetInterval: defaultLastHighestIHaveRPCSizeResetInterval, + }) g := &GossipSubMeshTracer{ - RawTracer: NewGossipSubNoopTracer(), topicMeshMap: make(map[string]map[peer.ID]struct{}), - idProvider: idProvider, - metrics: metrics, - logger: logger.With().Str("component", "gossip_sub_topology_tracer").Logger(), - loggerInterval: loggerInterval, + idProvider: config.IDProvider, + metrics: config.Metrics, + logger: lg, + loggerInterval: config.LoggerInterval, + rpcSentTracker: rpcSentTracker, + duplicateMessageTrackerCache: internal.NewDuplicateMessageTrackerCache( + config.DuplicateMessageTrackerCacheConfig.CacheSize, + config.DuplicateMessageTrackerCacheConfig.Decay, + config.DuplicateMessageTrackerCacheConfig.SkipDecayThreshold, + config.Logger, + metrics.GossipSubDuplicateMessageTrackerCacheMetricFactory(config.HeroCacheMetricsFactory, config.NetworkingType), + ), } g.Component = component.NewComponentManagerBuilder(). @@ -67,29 +119,43 @@ func NewGossipSubMeshTracer( ready() g.logLoop(ctx) }). + AddWorker(func(ctx irrecoverable.SignalerContext, ready component.ReadyFunc) { + ready() + lg.Debug().Msg("starting rpc sent tracker") + g.rpcSentTracker.Start(ctx) + lg.Debug().Msg("rpc sent tracker started") + + <-g.rpcSentTracker.Done() + lg.Debug().Msg("rpc sent tracker stopped") + }). Build() return g } -// GetMeshPeers returns the local mesh peers for the given topic. -func (t *GossipSubMeshTracer) GetMeshPeers(topic string) []peer.ID { +// GetLocalMeshPeers returns the local mesh peers for the given topic. +// Args: +// - topic: the topic. +// Returns: +// - []peer.ID: the local mesh peers for the given topic. +func (t *GossipSubMeshTracer) GetLocalMeshPeers(topic channels.Topic) []peer.ID { t.topicMeshMu.RLock() defer t.topicMeshMu.RUnlock() - peers := make([]peer.ID, 0, len(t.topicMeshMap[topic])) - for p := range t.topicMeshMap[topic] { + peers := make([]peer.ID, 0, len(t.topicMeshMap[topic.String()])) + for p := range t.topicMeshMap[topic.String()] { peers = append(peers, p) } return peers } -// Graft is called when a peer is added to a topic mesh. The tracer uses this to track the mesh peers. +// Graft is called by GossipSub when a peer is added to a topic mesh. The tracer uses this to track the mesh peers. func (t *GossipSubMeshTracer) Graft(p peer.ID, topic string) { + t.metrics.OnPeerGraftTopic(topic) t.topicMeshMu.Lock() defer t.topicMeshMu.Unlock() - lg := t.logger.With().Str("topic", topic).Str("peer_id", p.String()).Logger() + lg := t.logger.With().Str("topic", topic).Str("peer_id", p2plogging.PeerId(p)).Logger() if _, ok := t.topicMeshMap[topic]; !ok { t.topicMeshMap[topic] = make(map[peer.ID]struct{}) @@ -108,15 +174,16 @@ func (t *GossipSubMeshTracer) Graft(p peer.ID, topic string) { return } - lg.Info().Hex("flow_id", logging.ID(id.NodeID)).Str("role", id.Role.String()).Msg("grafted peer") + lg.Debug().Hex("flow_id", logging.ID(id.NodeID)).Str("role", id.Role.String()).Msg("grafted peer") } -// Prune is called when a peer is removed from a topic mesh. The tracer uses this to track the mesh peers. +// Prune is called by GossipSub when a peer is removed from a topic mesh. The tracer uses this to track the mesh peers. func (t *GossipSubMeshTracer) Prune(p peer.ID, topic string) { + t.metrics.OnPeerPruneTopic(topic) t.topicMeshMu.Lock() defer t.topicMeshMu.Unlock() - lg := t.logger.With().Str("topic", topic).Str("peer_id", p.String()).Logger() + lg := t.logger.With().Str("topic", topic).Str("peer_id", p2plogging.PeerId(p)).Logger() if _, ok := t.topicMeshMap[topic]; !ok { return @@ -136,7 +203,302 @@ func (t *GossipSubMeshTracer) Prune(p peer.ID, topic string) { return } - lg.Info().Hex("flow_id", logging.ID(id.NodeID)).Str("role", id.Role.String()).Msg("pruned peer") + lg.Debug().Hex("flow_id", logging.ID(id.NodeID)).Str("role", id.Role.String()).Msg("pruned peer") +} + +// SendRPC is called by GossipSub when a RPC is sent. Currently, the GossipSubMeshTracer tracks iHave RPC messages that have been sent. +// This function can be updated to track other control messages in the future as required. +func (t *GossipSubMeshTracer) SendRPC(rpc *pubsub.RPC, p peer.ID) { + err := t.rpcSentTracker.Track(rpc) + if err != nil { + t.logger.Err(err).Bool(logging.KeyNetworkingSecurity, true).Msg("failed to track sent pubsbub rpc") + } + + msgCount, ihaveCount, iwantCount, graftCount, pruneCount := 0, 0, 0, 0, 0 + if rpc.Control != nil { + ihaveCount = len(rpc.Control.Ihave) + iwantCount = len(rpc.Control.Iwant) + graftCount = len(rpc.Control.Graft) + pruneCount = len(rpc.Control.Prune) + } + msgCount = len(rpc.Publish) + t.metrics.OnRpcReceived(msgCount, ihaveCount, iwantCount, graftCount, pruneCount) + if t.logger.GetLevel() == zerolog.TraceLevel { + t.logger.Trace(). + Str("remote_peer_id", p2plogging.PeerId(p)). + Int("subscription_option_count", len(rpc.Subscriptions)). + Int("publish_message_count", msgCount). + Int("ihave_size", ihaveCount). + Int("iwant_size", iwantCount). + Int("graft_size", graftCount). + Int("prune_size", pruneCount). + Msg("sent pubsub rpc") + } + + t.metrics.OnRpcSent(msgCount, ihaveCount, iwantCount, graftCount, pruneCount) +} + +// AddPeer is called by GossipSub as a callback when a peer is added to the local node on a protocol, i.e., the local node is connected to the peer on a protocol. +// The peer may or may not be subscribed to any topic. +func (t *GossipSubMeshTracer) AddPeer(p peer.ID, proto protocol.ID) { + if t.logger.GetLevel() == zerolog.TraceLevel { + t.logger.Trace(). + Str("local_peer_id", p2plogging.PeerId(p)). + Str("protocol", string(proto)). + Msg("peer added") + } + t.metrics.OnPeerAddedToProtocol(string(proto)) +} + +// RemovePeer is called by GossipSub as a callback when a peer is removed from the local node, +// i.e., the local node is no longer connected to the peer. +func (t *GossipSubMeshTracer) RemovePeer(p peer.ID) { + t.metrics.OnPeerRemovedFromProtocol() + if t.logger.GetLevel() == zerolog.TraceLevel { + t.logger.Trace(). + Str("local_peer_id", p2plogging.PeerId(p)). + Msg("peer removed") + } +} + +// Join is called by GossipSub as a callback when the local node joins a topic. +func (t *GossipSubMeshTracer) Join(topic string) { + t.metrics.OnLocalPeerJoinedTopic() + if t.logger.GetLevel() == zerolog.TraceLevel { + t.logger.Trace(). + Str("topic", topic). + Msg("local peer joined topic") + } +} + +// Leave is called by GossipSub as a callback when the local node leaves a topic. +func (t *GossipSubMeshTracer) Leave(topic string) { + t.metrics.OnLocalPeerLeftTopic() + if t.logger.GetLevel() == zerolog.TraceLevel { + t.logger.Trace(). + Str("topic", topic). + Msg("local peer left topic") + } +} + +// ValidateMessage is called by GossipSub as a callback when a message is received by the local node and entered the validation phase. +// As the result of the validation, the message may be rejected or passed to the application (i.e., Flow protocol). +func (t *GossipSubMeshTracer) ValidateMessage(msg *pubsub.Message) { + size := len(msg.Data) + t.metrics.OnMessageEnteredValidation(size) + + if t.logger.GetLevel() > zerolog.TraceLevel { + return // return fast if we are not logging at trace level + } + + lg := t.logger.With().Logger() + if msg.Topic != nil { + lg = lg.With().Str("topic", *msg.Topic).Logger() + } + from, err := peer.IDFromBytes(msg.From) + if err == nil { + lg = lg.With().Str("remote_peer_id", p2plogging.PeerId(from)).Logger() + } + + lg.Trace(). + Str("received_from", p2plogging.PeerId(msg.ReceivedFrom)). + Int("message_size", size). + Msg("received pubsub message entered validation phase") +} + +// DeliverMessage is called by GossipSub as a callback when the local node delivers a message to all subscribers of the topic. +func (t *GossipSubMeshTracer) DeliverMessage(msg *pubsub.Message) { + size := len(msg.Data) + t.metrics.OnMessageDeliveredToAllSubscribers(size) + + if t.logger.GetLevel() > zerolog.TraceLevel { + return // return fast if we are not logging at trace level + } + + lg := t.logger.With().Logger() + if msg.Topic != nil { + lg = lg.With().Str("topic", *msg.Topic).Logger() + } + from, err := peer.IDFromBytes(msg.From) + if err == nil { + lg = lg.With().Str("remote_peer_id", p2plogging.PeerId(from)).Logger() + } + + lg.Trace(). + Str("received_from", p2plogging.PeerId(msg.ReceivedFrom)). + Int("message_size", len(msg.Data)). + Msg("delivered pubsub message to all subscribers") +} + +// RejectMessage is called by GossipSub as a callback when a message is rejected by the local node. +// The message may be rejected for a variety of reasons, but the most common reason is that the message is invalid with respect to signature. +// Any message that arrives at the local node should contain the peer id of the source (i.e., the peer that created the message), the +// networking public key of the source, and the signature of the message. The local node uses this information to verify the message. +// If any of the information is missing or invalid, the message is rejected. +func (t *GossipSubMeshTracer) RejectMessage(msg *pubsub.Message, reason string) { + size := len(msg.Data) + t.metrics.OnMessageRejected(size, reason) + + if t.logger.GetLevel() > zerolog.TraceLevel { + return // return fast if we are not logging at trace level + } + + lg := t.logger.With().Logger() + if msg.Topic != nil { + lg = lg.With().Str("topic", *msg.Topic).Logger() + } + from, err := peer.IDFromBytes(msg.From) + if err == nil { + lg = lg.With().Str("remote_peer_id", p2plogging.PeerId(from)).Logger() + } + + lg.Trace(). + Str("received_from", p2plogging.PeerId(msg.ReceivedFrom)). + Int("message_size", size). + Msg("rejected pubsub message") + +} + +// DuplicateMessage is called by GossipSub as a callback when a duplicate message is received by the local node. +func (t *GossipSubMeshTracer) DuplicateMessage(msg *pubsub.Message) { + size := len(msg.Data) + t.metrics.OnMessageDuplicate(size) + + if t.logger.GetLevel() > zerolog.TraceLevel { + return // return fast if we are not logging at trace level + } + + lg := t.logger.With().Logger() + if msg.Topic != nil { + lg = lg.With().Str("topic", *msg.Topic).Logger() + } + from, err := peer.IDFromBytes(msg.From) + if err == nil { + lg = lg.With().Str("remote_peer_id", p2plogging.PeerId(from)).Logger() + } + + count, err := t.duplicateMessageTrackerCache.DuplicateMessageReceived(msg.ReceivedFrom) + if err != nil { + t.logger.Fatal(). + Err(err). + Bool(logging.KeyNetworkingSecurity, true). + Msg("failed to increment gossipsub duplicate message tracker count for peer") + return + } + + lg.Trace(). + Str("received_from", p2plogging.PeerId(msg.ReceivedFrom)). + Int("message_size", size). + Float64("duplicate_message_count", count). + Msg("received duplicate pubsub message") + +} + +// ThrottlePeer is called by GossipSub when a peer is throttled by the local node, i.e., the local node is not accepting any +// pubsub message from the peer but may still accept control messages. +func (t *GossipSubMeshTracer) ThrottlePeer(p peer.ID) { + t.logger.Warn(). + Bool(logging.KeyNetworkingSecurity, true). + Str("remote_peer_id", p2plogging.PeerId(p)). + Msg("throttled peer; no longer accepting pubsub messages from peer, but may still accept control messages") + t.metrics.OnPeerThrottled() +} + +// RecvRPC is called by GossipSub as a callback when an inbound RPC message is received by the local node, +// note that the RPC already passed the RPC inspection, hence its statistics may be different from the RPC inspector metrics, as +// the RPC inspector metrics are updated before the RPC inspection, and the RPC may gone through truncation or rejection. +// This callback tracks the RPC messages as they are completely received by the local GossipSub router. +func (t *GossipSubMeshTracer) RecvRPC(rpc *pubsub.RPC) { + msgCount, ihaveCount, iwantCount, graftCount, pruneCount := 0, 0, 0, 0, 0 + if rpc.Control != nil { + ihaveCount = len(rpc.Control.Ihave) + iwantCount = len(rpc.Control.Iwant) + graftCount = len(rpc.Control.Graft) + pruneCount = len(rpc.Control.Prune) + } + msgCount = len(rpc.Publish) + t.metrics.OnRpcReceived(msgCount, ihaveCount, iwantCount, graftCount, pruneCount) + if t.logger.GetLevel() == zerolog.TraceLevel { + t.logger.Trace(). + Int("subscription_option_count", len(rpc.Subscriptions)). + Int("publish_message_count", msgCount). + Int("ihave_size", ihaveCount). + Int("iwant_size", iwantCount). + Int("graft_size", graftCount). + Int("prune_size", pruneCount). + Msg("received pubsub rpc") + } +} + +// DropRPC is called by GossipSub as a callback when an outbound RPC message is dropped by the local node, typically because the local node +// outbound message queue is full; or the RPC is big and the local node cannot fragment it. +func (t *GossipSubMeshTracer) DropRPC(rpc *pubsub.RPC, p peer.ID) { + msgCount, ihaveCount, iwantCount, graftCount, pruneCount := 0, 0, 0, 0, 0 + if rpc.Control != nil { + ihaveCount = len(rpc.Control.Ihave) + iwantCount = len(rpc.Control.Iwant) + graftCount = len(rpc.Control.Graft) + pruneCount = len(rpc.Control.Prune) + } + msgCount = len(rpc.Publish) + t.metrics.OnRpcReceived(msgCount, ihaveCount, iwantCount, graftCount, pruneCount) + if t.logger.GetLevel() == zerolog.TraceLevel { + t.logger.Warn(). + Bool(logging.KeyNetworkingSecurity, true). + Str("remote_peer_id", p2plogging.PeerId(p)). + Int("subscription_option_count", len(rpc.Subscriptions)). + Int("publish_message_count", msgCount). + Int("ihave_size", ihaveCount). + Int("iwant_size", iwantCount). + Int("graft_size", graftCount). + Int("prune_size", pruneCount). + Msg("outbound rpc dropped") + } + t.metrics.OnOutboundRpcDropped() +} + +// UndeliverableMessage is called by GossipSub as a callback when a message is dropped by the local node, typically because the local node +// outbound message queue is full; or the message is big and the local node cannot fragment it. +func (t *GossipSubMeshTracer) UndeliverableMessage(msg *pubsub.Message) { + t.logger.Warn(). + Bool(logging.KeyNetworkingSecurity, true). + Str("topic", *msg.Topic). + Str("remote_peer_id", p2plogging.PeerId(msg.ReceivedFrom)). + Int("message_size", len(msg.Data)). + Msg("undeliverable pubsub message") + t.metrics.OnUndeliveredMessage() +} + +// WasIHaveRPCSent returns true if an iHave control message for the messageID was sent, otherwise false. +func (t *GossipSubMeshTracer) WasIHaveRPCSent(messageID string) bool { + return t.rpcSentTracker.WasIHaveRPCSent(messageID) +} + +// LastHighestIHaveRPCSize returns the last highest RPC iHave message sent. +func (t *GossipSubMeshTracer) LastHighestIHaveRPCSize() int64 { + return t.rpcSentTracker.LastHighestIHaveRPCSize() +} + +// DuplicateMessageCount returns the current duplicate message count for the peer. +func (t *GossipSubMeshTracer) DuplicateMessageCount(peerID peer.ID) float64 { + count, found, err := t.duplicateMessageTrackerCache.GetWithInit(peerID) + if err != nil { + t.logger.Fatal(). + Err(err). + Bool(logging.KeyNetworkingSecurity, true). + Str("peer_id", p2plogging.PeerId(peerID)). + Msg("failed to get duplicate message count for peer") + return 0 + } + if !found { + t.logger.Fatal(). + Err(err). + Bool(logging.KeyNetworkingSecurity, true). + Str("peer_id", peerID.String()). + Msg("failed to initialize duplicate message count for peer during get with init") + return 0 + } + return count } // logLoop logs the mesh peers of the local node for each topic at a regular interval. @@ -179,11 +541,11 @@ func (t *GossipSubMeshTracer) logPeers() { if !exists { shouldWarn = true - topicPeers = topicPeers.Str(strconv.Itoa(peerIndex), fmt.Sprintf("pid=%s, flow_id=unknown, role=unknown", p.String())) + topicPeers = topicPeers.Str(strconv.Itoa(peerIndex), fmt.Sprintf("pid=%s, flow_id=unknown, role=unknown", p2plogging.PeerId(p))) continue } - topicPeers = topicPeers.Str(strconv.Itoa(peerIndex), fmt.Sprintf("pid=%s, flow_id=%x, role=%s", p.String(), id.NodeID, id.Role.String())) + topicPeers = topicPeers.Str(strconv.Itoa(peerIndex), fmt.Sprintf("pid=%s, flow_id=%x, role=%s", p2plogging.PeerId(p), id.NodeID, id.Role.String())) } lg := t.logger.With(). @@ -198,6 +560,6 @@ func (t *GossipSubMeshTracer) logPeers() { Msg(MeshLogIntervalWarnMsg) continue } - lg.Info().Msg(MeshLogIntervalMsg) + lg.Debug().Msg(MeshLogIntervalMsg) } } diff --git a/network/p2p/tracer/gossipSubMeshTracer_test.go b/network/p2p/tracer/gossipSubMeshTracer_test.go index 0659885f929..40a5fe76d58 100644 --- a/network/p2p/tracer/gossipSubMeshTracer_test.go +++ b/network/p2p/tracer/gossipSubMeshTracer_test.go @@ -2,7 +2,7 @@ package tracer_test import ( "context" - "os" + "io" "testing" "time" @@ -11,8 +11,10 @@ import ( "github.com/stretchr/testify/require" "go.uber.org/atomic" + "github.com/onflow/flow-go/config" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/module/irrecoverable" + "github.com/onflow/flow-go/module/metrics" mockmodule "github.com/onflow/flow-go/module/mock" "github.com/onflow/flow-go/network/channels" "github.com/onflow/flow-go/network/p2p" @@ -29,21 +31,25 @@ import ( // One of the nodes is running with an unknown peer id, which the identity provider is mocked to return an error and // the mesh tracer should log a warning message. func TestGossipSubMeshTracer(t *testing.T) { + defaultConfig, err := config.DefaultConfig() + require.NoError(t, err) ctx, cancel := context.WithCancel(context.Background()) signalerCtx := irrecoverable.NewMockSignalerContext(t, ctx) sporkId := unittest.IdentifierFixture() idProvider := mockmodule.NewIdentityProvider(t) defer cancel() - topic1 := channels.TopicFromChannel(channels.PushBlocks, sporkId) - topic2 := channels.TopicFromChannel(channels.PushReceipts, sporkId) + channel1 := channels.PushBlocks + topic1 := channels.TopicFromChannel(channel1, sporkId) + channel2 := channels.PushReceipts + topic2 := channels.TopicFromChannel(channel2, sporkId) loggerCycle := atomic.NewInt32(0) warnLoggerCycle := atomic.NewInt32(0) // logger hook to count the number of times the meshTracer logs at the interval specified. hook := zerolog.HookFunc(func(e *zerolog.Event, level zerolog.Level, message string) { - if level == zerolog.InfoLevel { + if level == zerolog.DebugLevel { if message == tracer.MeshLogIntervalMsg { loggerCycle.Inc() } @@ -55,57 +61,72 @@ func TestGossipSubMeshTracer(t *testing.T) { } } }) - logger := zerolog.New(os.Stdout).Level(zerolog.InfoLevel).Hook(hook) + logger := zerolog.New(io.Discard).Level(zerolog.DebugLevel).Hook(hook) // creates one node with a gossipsub mesh meshTracer, and the other nodes without a gossipsub mesh meshTracer. // we only need one node with a meshTracer to test the meshTracer. // meshTracer logs at 1 second intervals for sake of testing. - collector := mockmodule.NewGossipSubLocalMeshMetrics(t) - meshTracer := tracer.NewGossipSubMeshTracer(logger, collector, idProvider, 1*time.Second) + // creates one node with a gossipsub mesh meshTracer, and the other nodes without a gossipsub mesh meshTracer. + // we only need one node with a meshTracer to test the meshTracer. + // meshTracer logs at 1 second intervals for sake of testing. + collector := newLocalMeshTracerMetricsCollector(t) + // set the meshTracer to log at 1 second intervals for sake of testing. + defaultConfig.NetworkConfig.GossipSub.RpcTracer.LocalMeshLogInterval = 1 * time.Second + // disables peer scoring for sake of testing; so that unknown peers are not penalized and could be detected by the meshTracer. + defaultConfig.NetworkConfig.GossipSub.PeerScoringEnabled = false + // disables rejection of RPC's from unstaked peer so that unknown peers could be detected bu the meshTracer + defaultConfig.NetworkConfig.GossipSub.RpcInspector.Validation.InspectionProcess.Inspect.RejectUnstakedPeers = false tracerNode, tracerId := p2ptest.NodeFixture( t, sporkId, t.Name(), - p2ptest.WithGossipSubTracer(meshTracer), + idProvider, + p2ptest.WithLogger(logger), + p2ptest.OverrideFlowConfig(defaultConfig), + p2ptest.WithMetricsCollector(collector), p2ptest.WithRole(flow.RoleConsensus)) - idProvider.On("ByPeerID", tracerNode.Host().ID()).Return(&tracerId, true).Maybe() + idProvider.On("ByPeerID", tracerNode.ID()).Return(&tracerId, true).Maybe() otherNode1, otherId1 := p2ptest.NodeFixture( t, sporkId, t.Name(), + idProvider, p2ptest.WithRole(flow.RoleConsensus)) - idProvider.On("ByPeerID", otherNode1.Host().ID()).Return(&otherId1, true).Maybe() + idProvider.On("ByPeerID", otherNode1.ID()).Return(&otherId1, true).Maybe() otherNode2, otherId2 := p2ptest.NodeFixture( t, sporkId, t.Name(), + idProvider, p2ptest.WithRole(flow.RoleConsensus)) - idProvider.On("ByPeerID", otherNode2.Host().ID()).Return(&otherId2, true).Maybe() + idProvider.On("ByPeerID", otherNode2.ID()).Return(&otherId2, true).Maybe() // create a node that does not have a valid flow identity to test whether mesh tracer logs a warning. unknownNode, unknownId := p2ptest.NodeFixture( t, sporkId, t.Name(), + idProvider, p2ptest.WithRole(flow.RoleConsensus)) - idProvider.On("ByPeerID", unknownNode.Host().ID()).Return(nil, false).Maybe() + idProvider.On("ByPeerID", unknownNode.ID()).Return(nil, false).Maybe() nodes := []p2p.LibP2PNode{tracerNode, otherNode1, otherNode2, unknownNode} ids := flow.IdentityList{&tracerId, &otherId1, &otherId2, &unknownId} - p2ptest.StartNodes(t, signalerCtx, nodes, 1*time.Second) - defer p2ptest.StopNodes(t, nodes, cancel, 1*time.Second) + p2ptest.RegisterPeerProviders(t, nodes) + p2ptest.StartNodes(t, signalerCtx, nodes) + defer p2ptest.StopNodes(t, nodes, cancel) p2ptest.LetNodesDiscoverEachOther(t, ctx, nodes, ids) // all nodes subscribe to topic1 // for topic 1 expect the meshTracer to be notified of the local mesh size being 1, 2, and 3 (when unknownNode, otherNode1, and otherNode2 join the mesh). - collector.On("OnLocalMeshSizeUpdated", topic1.String(), 1).Twice() // 1 for the first subscription, 1 for the first leave - collector.On("OnLocalMeshSizeUpdated", topic1.String(), 2).Twice() // 1 for the second subscription, 1 for the second leave - collector.On("OnLocalMeshSizeUpdated", topic1.String(), 3).Once() // 3 for the third subscription. + collector.l.On("OnLocalMeshSizeUpdated", topic1.String(), 1).Twice() // 1 for the first subscription, 1 for the first leave + collector.l.On("OnLocalMeshSizeUpdated", topic1.String(), 2).Twice() // 1 for the second subscription, 1 for the second leave + collector.l.On("OnLocalMeshSizeUpdated", topic1.String(), 3).Once() // 3 for the third subscription. for _, node := range nodes { _, err := node.Subscribe( @@ -118,7 +139,7 @@ func TestGossipSubMeshTracer(t *testing.T) { // the tracerNode and otherNode1 subscribe to topic2 // for topic 2 expect the meshTracer to be notified of the local mesh size being 1 (when otherNode1 join the mesh). - collector.On("OnLocalMeshSizeUpdated", topic2.String(), 1).Once() + collector.l.On("OnLocalMeshSizeUpdated", topic2.String(), 1).Once() for _, node := range []p2p.LibP2PNode{tracerNode, otherNode1} { _, err := node.Subscribe( @@ -132,15 +153,15 @@ func TestGossipSubMeshTracer(t *testing.T) { // eventually, the meshTracer should have the other nodes in its mesh. assert.Eventually(t, func() bool { topic1MeshSize := 0 - for _, peer := range meshTracer.GetMeshPeers(topic1.String()) { - if peer == otherNode1.Host().ID() || peer == otherNode2.Host().ID() { + for _, peerId := range tracerNode.GetLocalMeshPeers(topic1) { + if peerId == otherNode1.ID() || peerId == otherNode2.ID() { topic1MeshSize++ } } topic2MeshSize := 0 - for _, peer := range meshTracer.GetMeshPeers(topic2.String()) { - if peer == otherNode1.Host().ID() { + for _, peerId := range tracerNode.GetLocalMeshPeers(topic2) { + if peerId == otherNode1.ID() { topic2MeshSize++ } } @@ -154,28 +175,46 @@ func TestGossipSubMeshTracer(t *testing.T) { }, 2*time.Second, 10*time.Millisecond) // expect the meshTracer to be notified of the local mesh size being (when all nodes leave the mesh). - collector.On("OnLocalMeshSizeUpdated", topic1.String(), 0).Once() + collector.l.On("OnLocalMeshSizeUpdated", topic1.String(), 0).Once() // all nodes except the tracerNode unsubscribe from the topic1, which triggers sending a PRUNE to the tracerNode for each unsubscription. // We expect the tracerNode to remove the otherNode1, otherNode2, and unknownNode from its mesh. - require.NoError(t, otherNode1.UnSubscribe(topic1)) - require.NoError(t, otherNode2.UnSubscribe(topic1)) - require.NoError(t, unknownNode.UnSubscribe(topic1)) + require.NoError(t, otherNode1.Unsubscribe(topic1)) + require.NoError(t, otherNode2.Unsubscribe(topic1)) + require.NoError(t, unknownNode.Unsubscribe(topic1)) assert.Eventually(t, func() bool { // eventually, the tracerNode should not have the other node in its mesh for topic1. - for _, peer := range meshTracer.GetMeshPeers(topic1.String()) { - if peer == otherNode1.Host().ID() || peer == otherNode2.Host().ID() || peer == unknownNode.Host().ID() { + for _, peerId := range tracerNode.GetLocalMeshPeers(topic1) { + if peerId == otherNode1.ID() || peerId == otherNode2.ID() || peerId == unknownNode.ID() { return false } } // but the tracerNode should still have the otherNode1 in its mesh for topic2. - for _, peer := range meshTracer.GetMeshPeers(topic2.String()) { - if peer != otherNode1.Host().ID() { + for _, peerId := range tracerNode.GetLocalMeshPeers(topic2) { + if peerId != otherNode1.ID() { return false } } return true }, 2*time.Second, 10*time.Millisecond) } + +// localMeshTracerMetricsCollector is a mock metrics that can be mocked for GossipSubLocalMeshMetrics while acting as a NoopCollector for other metrics. +type localMeshTracerMetricsCollector struct { + *metrics.NoopCollector + l *mockmodule.LocalGossipSubRouterMetrics +} + +func newLocalMeshTracerMetricsCollector(t *testing.T) *localMeshTracerMetricsCollector { + return &localMeshTracerMetricsCollector{ + l: mockmodule.NewLocalGossipSubRouterMetrics(t), + NoopCollector: metrics.NewNoopCollector(), + } +} + +func (c *localMeshTracerMetricsCollector) OnLocalMeshSizeUpdated(topic string, size int) { + // calls the mock method to assert the metrics. + c.l.OnLocalMeshSizeUpdated(topic, size) +} diff --git a/network/p2p/tracer/gossipSubScoreTracer.go b/network/p2p/tracer/gossipSubScoreTracer.go index facdc8bd182..8fc58dc81db 100644 --- a/network/p2p/tracer/gossipSubScoreTracer.go +++ b/network/p2p/tracer/gossipSubScoreTracer.go @@ -12,6 +12,7 @@ import ( "github.com/onflow/flow-go/module/irrecoverable" "github.com/onflow/flow-go/network/channels" "github.com/onflow/flow-go/network/p2p" + p2plogging "github.com/onflow/flow-go/network/p2p/logging" "github.com/onflow/flow-go/utils/logging" ) @@ -225,7 +226,7 @@ func (g *GossipSubScoreTracer) logPeerScore(peerID peer.ID) bool { } lg = lg.With(). - Str("peer_id", peerID.String()). + Str("peer_id", p2plogging.PeerId(peerID)). Float64("overall_score", snapshot.Score). Float64("app_specific_score", snapshot.AppSpecificScore). Float64("ip_colocation_factor", snapshot.IPColocationFactor). @@ -255,6 +256,6 @@ func (g *GossipSubScoreTracer) logPeerScore(peerID peer.ID) bool { return true } - lg.Info().Msg(PeerScoreLogMessage) + lg.Debug().Msg(PeerScoreLogMessage) return false } diff --git a/network/p2p/tracer/gossipSubScoreTracer_test.go b/network/p2p/tracer/gossipSubScoreTracer_test.go index 233e3604b6d..04dc3627e3a 100644 --- a/network/p2p/tracer/gossipSubScoreTracer_test.go +++ b/network/p2p/tracer/gossipSubScoreTracer_test.go @@ -2,7 +2,7 @@ package tracer_test import ( "context" - "os" + "io" "testing" "time" @@ -14,6 +14,7 @@ import ( "github.com/stretchr/testify/require" "go.uber.org/atomic" + "github.com/onflow/flow-go/config" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/module" "github.com/onflow/flow-go/module/irrecoverable" @@ -55,13 +56,13 @@ func TestGossipSubScoreTracer(t *testing.T) { // 1. Creates a logger hook to count the number of times the score logs at the interval specified. hook := zerolog.HookFunc(func(e *zerolog.Event, level zerolog.Level, message string) { - if level == zerolog.InfoLevel { + if level == zerolog.DebugLevel { if message == tracer.PeerScoreLogMessage { loggerCycle.Inc() } } }) - logger := zerolog.New(os.Stdout).Level(zerolog.InfoLevel).Hook(hook) + logger := zerolog.New(io.Discard).Level(zerolog.DebugLevel).Hook(hook) // sets some fixed scores for the nodes for sake of testing based on their roles. consensusScore := float64(87) @@ -72,19 +73,26 @@ func TestGossipSubScoreTracer(t *testing.T) { topic1 := channels.TopicFromChannel(channels.PushBlocks, sporkId) // 3. Creates three nodes with different roles and sets their roles as consensus, access, and tracer, respectively. + cfg, err := config.DefaultConfig() + require.NoError(t, err) + // tracer will update the score and local mesh every 1 second (for testing purposes) + cfg.NetworkConfig.GossipSub.RpcTracer.LocalMeshLogInterval = 1 * time.Second + cfg.NetworkConfig.GossipSub.RpcTracer.ScoreTracerInterval = 1 * time.Second + // the libp2p node updates the subscription list as well as the app-specific score every 10 milliseconds (for testing purposes) + cfg.NetworkConfig.GossipSub.SubscriptionProvider.UpdateInterval = 10 * time.Millisecond + cfg.NetworkConfig.GossipSub.ScoringParameters.ScoringRegistryParameters.AppSpecificScore.ScoreTTL = 10 * time.Millisecond tracerNode, tracerId := p2ptest.NodeFixture( t, sporkId, t.Name(), + idProvider, p2ptest.WithMetricsCollector(&mockPeerScoreMetrics{ NoopCollector: metrics.NoopCollector{}, c: scoreMetrics, }), p2ptest.WithLogger(logger), - p2ptest.WithPeerScoreTracerInterval(1*time.Second), // set the peer score log interval to 1 second for sake of testing. - p2ptest.WithPeerScoringEnabled(idProvider), // enable peer scoring for sake of testing. - // 4. Sets some fixed scores for the nodes for the sake of testing based on their roles. - p2ptest.WithPeerScoreParamsOption(&p2p.PeerScoringConfig{ + p2ptest.OverrideFlowConfig(cfg), + p2ptest.EnablePeerScoringWithOverride(&p2p.PeerScoringConfigOverride{ AppSpecificScoreParams: func(pid peer.ID) float64 { id, ok := idProvider.ByPeerID(pid) require.True(t, ok) @@ -124,28 +132,30 @@ func TestGossipSubScoreTracer(t *testing.T) { }), p2ptest.WithRole(flow.RoleConsensus)) - idProvider.On("ByPeerID", tracerNode.Host().ID()).Return(&tracerId, true).Maybe() + idProvider.On("ByPeerID", tracerNode.ID()).Return(&tracerId, true).Maybe() consensusNode, consensusId := p2ptest.NodeFixture( t, sporkId, t.Name(), + idProvider, p2ptest.WithRole(flow.RoleConsensus)) - idProvider.On("ByPeerID", consensusNode.Host().ID()).Return(&consensusId, true).Maybe() + idProvider.On("ByPeerID", consensusNode.ID()).Return(&consensusId, true).Maybe() accessNode, accessId := p2ptest.NodeFixture( t, sporkId, t.Name(), + idProvider, p2ptest.WithRole(flow.RoleAccess)) - idProvider.On("ByPeerID", accessNode.Host().ID()).Return(&accessId, true).Maybe() + idProvider.On("ByPeerID", accessNode.ID()).Return(&accessId, true).Maybe() nodes := []p2p.LibP2PNode{tracerNode, consensusNode, accessNode} ids := flow.IdentityList{&tracerId, &consensusId, &accessId} // 5. Starts the nodes and lets them discover each other. - p2ptest.StartNodes(t, signalerCtx, nodes, 1*time.Second) - defer p2ptest.StopNodes(t, nodes, cancel, 1*time.Second) + p2ptest.StartNodes(t, signalerCtx, nodes) + defer p2ptest.StopNodes(t, nodes, cancel) p2ptest.LetNodesDiscoverEachOther(t, ctx, nodes, ids) @@ -162,7 +172,7 @@ func TestGossipSubScoreTracer(t *testing.T) { scoreMetrics.On("SetWarningStateCount", uint(0)).Return() // 6. Subscribes the nodes to a common topic. - _, err := tracerNode.Subscribe( + _, err = tracerNode.Subscribe( topic1, validator.TopicValidator( unittest.Logger(), @@ -185,55 +195,53 @@ func TestGossipSubScoreTracer(t *testing.T) { // 7. Expects the tracer node to have the correct app scores, a non-zero score, an existing behaviour score, an existing // IP score, and an existing mesh score. - assert.Eventually(t, func() bool { + require.Eventually(t, func() bool { // we expect the tracerNode to have the consensusNodes and accessNodes with the correct app scores. - exposer, ok := tracerNode.PeerScoreExposer() - require.True(t, ok) - - score, ok := exposer.GetAppScore(consensusNode.Host().ID()) + exposer := tracerNode.PeerScoreExposer() + score, ok := exposer.GetAppScore(consensusNode.ID()) if !ok || score != consensusScore { return false } - score, ok = exposer.GetAppScore(accessNode.Host().ID()) + score, ok = exposer.GetAppScore(accessNode.ID()) if !ok || score != accessScore { return false } // we expect the tracerNode to have the consensusNodes and accessNodes with a non-zero score. - score, ok = exposer.GetScore(consensusNode.Host().ID()) + score, ok = exposer.GetScore(consensusNode.ID()) if !ok || score == 0 { return false } - score, ok = exposer.GetScore(accessNode.Host().ID()) + score, ok = exposer.GetScore(accessNode.ID()) if !ok || score == 0 { return false } // we expect the tracerNode to have the consensusNodes and accessNodes with an existing behaviour score and ip score. - _, ok = exposer.GetBehaviourPenalty(consensusNode.Host().ID()) + _, ok = exposer.GetBehaviourPenalty(consensusNode.ID()) if !ok { return false } - _, ok = exposer.GetIPColocationFactor(consensusNode.Host().ID()) + _, ok = exposer.GetIPColocationFactor(consensusNode.ID()) if !ok { return false } - _, ok = exposer.GetBehaviourPenalty(accessNode.Host().ID()) + _, ok = exposer.GetBehaviourPenalty(accessNode.ID()) if !ok { return false } - _, ok = exposer.GetIPColocationFactor(accessNode.Host().ID()) + _, ok = exposer.GetIPColocationFactor(accessNode.ID()) if !ok { return false } // we expect the tracerNode to have the consensusNodes and accessNodes with an existing mesh score. - consensusMeshScores, ok := exposer.GetTopicScores(consensusNode.Host().ID()) + consensusMeshScores, ok := exposer.GetTopicScores(consensusNode.ID()) if !ok { return false } @@ -242,7 +250,7 @@ func TestGossipSubScoreTracer(t *testing.T) { return false } - accessMeshScore, ok := exposer.GetTopicScores(accessNode.Host().ID()) + accessMeshScore, ok := exposer.GetTopicScores(accessNode.ID()) if !ok { return false } diff --git a/network/p2p/tracer/internal/duplicate_msgs_counter_cache.go b/network/p2p/tracer/internal/duplicate_msgs_counter_cache.go new file mode 100644 index 00000000000..0287fb06980 --- /dev/null +++ b/network/p2p/tracer/internal/duplicate_msgs_counter_cache.go @@ -0,0 +1,150 @@ +package internal + +import ( + "fmt" + "time" + + "github.com/libp2p/go-libp2p/core/peer" + "github.com/rs/zerolog" + + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module" + herocache "github.com/onflow/flow-go/module/mempool/herocache/backdata" + "github.com/onflow/flow-go/module/mempool/herocache/backdata/heropool" + "github.com/onflow/flow-go/module/mempool/stdmap" + "github.com/onflow/flow-go/network/p2p" + "github.com/onflow/flow-go/network/p2p/scoring" +) + +// DuplicateMessageTrackerCache is a cache used to store the current count of duplicate messages detected +// from a peer. This count is utilized to calculate a penalty for duplicate messages, which is then applied +// to the peer's application-specific score. The duplicate message tracker decays over time to prevent perpetual +// penalization of a peer. +// Stored duplicate messages counters are keyed by the hash of the peerID. +type DuplicateMessageTrackerCache struct { + // the in-memory and thread-safe cache for storing the spam records of peers. + c *stdmap.Backend[flow.Identifier, *duplicateMessagesCounter] + decay float64 + // skipDecayThreshold The threshold for which when the counter is below this value, the decay function will not be called + skipDecayThreshold float64 +} + +// NewDuplicateMessageTrackerCache returns a new HeroCache-based duplicate message counter cache. +// Args: +// +// sizeLimit: the maximum number of entries that can be stored in the cache. +// decay: the record decay. +// logger: the logger to be used by the cache. +// collector: the metrics collector to be used by the cache. +// +// Returns: +// - *DuplicateMessageTrackerCache: the newly created cache with a HeroCache-based backend. +func NewDuplicateMessageTrackerCache(sizeLimit uint32, decay, skipDecayThreshold float64, logger zerolog.Logger, collector module.HeroCacheMetrics) *DuplicateMessageTrackerCache { + backData := herocache.NewCache[*duplicateMessagesCounter]( + sizeLimit, + herocache.DefaultOversizeFactor, + heropool.LRUEjection, + logger.With().Str("mempool", "gossipsub=duplicate-message-counter-cache").Logger(), + collector, + ) + return &DuplicateMessageTrackerCache{ + decay: decay, + skipDecayThreshold: skipDecayThreshold, + c: stdmap.NewBackend(stdmap.WithMutableBackData[flow.Identifier, *duplicateMessagesCounter](backData)), + } +} + +// DuplicateMessageReceived applies an adjustment that increments the number of duplicate messages received by a peer. +// Returns number of duplicate messages received after the adjustment. The record is initialized before +// the adjustment func is applied that will increment the counter value. +// - exception only in cases of internal data inconsistency or bugs. No errors are expected. +func (d *DuplicateMessageTrackerCache) DuplicateMessageReceived(peerID peer.ID) (float64, error) { + var err error + adjustFunc := func(counter *duplicateMessagesCounter) *duplicateMessagesCounter { + counter, err = d.decayAdjustment(counter) // first decay the record + if err != nil { + return counter + } + return d.incrementAdjustment(counter) // then increment the record + } + + adjustedCounter, adjusted := d.c.AdjustWithInit(p2p.MakeId(peerID), adjustFunc, func() *duplicateMessagesCounter { + return newDuplicateMessagesCounter() + }) + + if err != nil { + return 0, fmt.Errorf("unexpected error while applying decay and increment adjustments for peer %s: %w", peerID, err) + } + + if !adjusted { + return 0, fmt.Errorf("adjustment failed for peer %s", peerID) + } + + return adjustedCounter.Value, nil +} + +// GetWithInit returns the current number of duplicate messages received from a peer. +// The record is initialized before the count is returned. +// Before the counter value is returned it is decayed using the configured decay function. +// Returns the record and true if the record exists, nil and false otherwise. +// Args: +// - peerID: peerID of the remote peer. +// Returns: +// - The duplicate messages counter value after the decay and true if the record exists, 0 and false otherwise. +// No errors are expected during normal operation, all errors returned are considered irrecoverable. +func (d *DuplicateMessageTrackerCache) GetWithInit(peerID peer.ID) (float64, bool, error) { + var err error + adjustLogic := func(counter *duplicateMessagesCounter) *duplicateMessagesCounter { + // perform decay on gauge value + counter, err = d.decayAdjustment(counter) + return counter + } + + adjustedCounter, adjusted := d.c.AdjustWithInit(p2p.MakeId(peerID), adjustLogic, func() *duplicateMessagesCounter { + return newDuplicateMessagesCounter() + }) + if err != nil { + return 0, false, fmt.Errorf("unexpected error while applying decay adjustment for peer %s: %w", peerID, err) + } + if !adjusted { + return 0, false, fmt.Errorf("decay adjustment failed for peer %s", peerID) + } + + return adjustedCounter.Value, true, nil +} + +// incrementAdjustment performs a cache adjustment that increments the guage for the duplicateMessagesCounter +func (d *DuplicateMessageTrackerCache) incrementAdjustment(counter *duplicateMessagesCounter) *duplicateMessagesCounter { + counter.Value++ + counter.lastUpdated = time.Now() + // Return the adjusted counter. + return counter +} + +// decayAdjustment performs geometric recordDecay on the duplicate message counter gauge of a peer. This ensures a peer is not penalized forever. +// All errors returned from this function are unexpected and irrecoverable. +func (d *DuplicateMessageTrackerCache) decayAdjustment(counter *duplicateMessagesCounter) (*duplicateMessagesCounter, error) { + duplicateMessages := counter.Value + if duplicateMessages == 0 { + return counter, nil + } + + if duplicateMessages < d.skipDecayThreshold { + counter.Value = 0 + return counter, nil + } + + decayedVal, err := scoring.GeometricDecay(duplicateMessages, d.decay, counter.lastUpdated) + if err != nil { + return counter, fmt.Errorf("could not decay duplicate message counter: %w", err) + } + + if decayedVal > duplicateMessages { + return counter, fmt.Errorf("unexpected recordDecay value %f for duplicate message counter gauge %f", decayedVal, duplicateMessages) + } + + counter.Value = decayedVal + counter.lastUpdated = time.Now() + // Return the adjusted counter. + return counter, nil +} diff --git a/network/p2p/tracer/internal/duplicate_msgs_counter_cache_test.go b/network/p2p/tracer/internal/duplicate_msgs_counter_cache_test.go new file mode 100644 index 00000000000..6e53d89ec1f --- /dev/null +++ b/network/p2p/tracer/internal/duplicate_msgs_counter_cache_test.go @@ -0,0 +1,229 @@ +package internal + +import ( + "sync" + "testing" + "time" + + "github.com/libp2p/go-libp2p/core/peer" + "github.com/rs/zerolog" + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/module" + "github.com/onflow/flow-go/module/metrics" + "github.com/onflow/flow-go/utils/unittest" +) + +const defaultDecay = .99 +const defaultSkipDecayThreshold = 0.1 + +// TestDuplicateMessageTrackerCache_Init tests the Init method of the RecordCache. +// It ensures that the method returns true when a new record is initialized +// and false when an existing record is initialized. +func TestDuplicateMessageTrackerCache_Init(t *testing.T) { + cache := duplicateMessageTrackerCacheFixture(t, 100, defaultDecay, defaultSkipDecayThreshold, zerolog.Nop(), metrics.NewNoopCollector()) + + peerID1 := unittest.PeerIdFixture(t) + peerID2 := unittest.PeerIdFixture(t) + + // test initializing a record for an node ID that doesn't exist in the cache + gauge, ok, err := cache.GetWithInit(peerID1) + require.NoError(t, err) + require.True(t, ok, "expected record to exist") + require.Zerof(t, gauge, "expected gauge to be 0") + require.Equal(t, uint(1), cache.c.Size(), "expected cache to have one additional record") + + // test initializing a record for an node ID that already exists in the cache + gaugeAgain, ok, err := cache.GetWithInit(peerID1) + require.NoError(t, err) + require.True(t, ok, "expected record to still exist") + require.Zerof(t, gaugeAgain, "expected same gauge to be 0") + require.Equal(t, gauge, gaugeAgain, "expected records to be the same") + require.Equal(t, uint(1), cache.c.Size(), "expected cache to still have one additional record") + + // test initializing a record for another node ID + gauge2, ok, err := cache.GetWithInit(peerID2) + require.NoError(t, err) + require.True(t, ok, "expected record to exist") + require.Zerof(t, gauge2, "expected second gauge to be 0") + require.Equal(t, uint(2), cache.c.Size(), "expected cache to have two additional records") +} + +// TestDuplicateMessageTrackerCache_ConcurrentInit tests the concurrent initialization of records. +// The test covers the following scenarios: +// 1. Multiple goroutines initializing records for different node IDs. +// 2. Ensuring that all records are correctly initialized. +func TestDuplicateMessageTrackerCache_ConcurrentInit(t *testing.T) { + cache := duplicateMessageTrackerCacheFixture(t, 100, defaultDecay, defaultSkipDecayThreshold, zerolog.Nop(), metrics.NewNoopCollector()) + + peerIDs := unittest.PeerIdFixtures(t, 10) + + var wg sync.WaitGroup + wg.Add(len(peerIDs)) + + for _, peerID := range peerIDs { + go func(id peer.ID) { + defer wg.Done() + gauge, found, err := cache.GetWithInit(id) + require.NoError(t, err) + require.True(t, found) + require.Zerof(t, gauge, "expected all gauge values to be initialized to 0") + }(peerID) + } + + unittest.RequireReturnsBefore(t, wg.Wait, 100*time.Millisecond, "timed out waiting for goroutines to finish") +} + +// TestDuplicateMessageTrackerCache_ConcurrentSameRecordInit tests the concurrent initialization of the same record. +// The test covers the following scenarios: +// 1. Multiple goroutines attempting to initialize the same record concurrently. +// 2. Only one goroutine successfully initializes the record, and others receive false on initialization. +// 3. The record is correctly initialized in the cache and can be retrieved using the GetWithInit method. +func TestDuplicateMessageTrackerCache_ConcurrentSameRecordInit(t *testing.T) { + cache := duplicateMessageTrackerCacheFixture(t, 100, defaultDecay, defaultSkipDecayThreshold, zerolog.Nop(), metrics.NewNoopCollector()) + + peerID := unittest.PeerIdFixture(t) + const concurrentAttempts = 10 + + var wg sync.WaitGroup + wg.Add(concurrentAttempts) + + for i := 0; i < concurrentAttempts; i++ { + go func() { + defer wg.Done() + gauge, found, err := cache.GetWithInit(peerID) + require.NoError(t, err) + require.True(t, found) + require.Zero(t, gauge) + }() + } + + unittest.RequireReturnsBefore(t, wg.Wait, 100*time.Millisecond, "timed out waiting for goroutines to finish") + + // ensure that only one goroutine successfully initialized the record + require.Equal(t, uint(1), cache.c.Size()) +} + +// TestDuplicateMessageTrackerCache_DuplicateMessageReceived tests the DuplicateMessageReceived method of the RecordCache. +// The test covers the following scenarios: +// 1. Updating a record gauge for an existing peer ID. +// 2. Attempting to update a record gauge for a non-existing peer ID should not result in error. DuplicateMessageReceived should always attempt to initialize the gauge. +// 3. Multiple updates on the same record only initialize the record once. +func TestDuplicateMessageTrackerCache_DuplicateMessageReceived(t *testing.T) { + cache := duplicateMessageTrackerCacheFixture(t, 100, defaultDecay, defaultSkipDecayThreshold, zerolog.Nop(), metrics.NewNoopCollector()) + + peerID1 := unittest.PeerIdFixture(t) + peerID2 := unittest.PeerIdFixture(t) + + gauge, err := cache.DuplicateMessageReceived(peerID1) + require.NoError(t, err) + require.Equal(t, float64(1), gauge) + + // get will apply a slightl decay resulting + // in a gauge value less than gauge which is 1 but greater than 0.9 + currentGauge, ok, err := cache.GetWithInit(peerID1) + require.NoError(t, err) + require.True(t, ok) + require.LessOrEqual(t, currentGauge, gauge) + require.Greater(t, currentGauge, 0.9) + + _, ok, err = cache.GetWithInit(peerID2) + require.NoError(t, err) + require.True(t, ok) + + // test adjusting the spam record for a non-existing node ID + peerID3 := unittest.PeerIdFixture(t) + gauge3, err := cache.DuplicateMessageReceived(peerID3) + require.NoError(t, err) + require.Equal(t, float64(1), gauge3) + + // when updated the value should be incremented from 1 -> 2 and slightly decayed resulting + // in a gauge value less than 2 but greater than 1.9 + gauge3, err = cache.DuplicateMessageReceived(peerID3) + require.NoError(t, err) + require.LessOrEqual(t, gauge3, 2.0) + require.Greater(t, gauge3, 1.9) +} + +// TestDuplicateMessageTrackerCache_ConcurrentDuplicateMessageReceived tests the concurrent adjustments and reads of records for different +// node IDs. The test covers the following scenarios: +// 1. Multiple goroutines adjusting records for different peer IDs concurrently. +// 2. Multiple goroutines getting records for different peer IDs concurrently. +// 3. The adjusted records are correctly updated in the cache. +// 4. Ensure records are decayed as expected. +func TestDuplicateMessageTrackerCache_ConcurrentDuplicateMessageReceived(t *testing.T) { + cache := duplicateMessageTrackerCacheFixture(t, 100, defaultDecay, defaultSkipDecayThreshold, zerolog.Nop(), metrics.NewNoopCollector()) + + peerIDs := unittest.PeerIdFixtures(t, 10) + for _, peerID := range peerIDs { + _, ok, err := cache.GetWithInit(peerID) + require.NoError(t, err) + require.True(t, ok) + } + + var wg sync.WaitGroup + wg.Add(len(peerIDs) * 2) + + for _, peerID := range peerIDs { + // adjust spam records concurrently + go func(id peer.ID) { + defer wg.Done() + _, err := cache.DuplicateMessageReceived(id) + require.NoError(t, err) + }(peerID) + + // get spam records concurrently + go func(id peer.ID) { + defer wg.Done() + _, found, err := cache.GetWithInit(id) + require.NoError(t, err) + require.True(t, found) + }(peerID) + } + + unittest.RequireReturnsBefore(t, wg.Wait, 100*time.Millisecond, "timed out waiting for goroutines to finish") + + // ensure that the records are correctly updated in the cache + for _, nodeID := range peerIDs { + gauge, found, err := cache.GetWithInit(nodeID) + require.NoError(t, err) + require.True(t, found) + // slight decay will result in 0.9 < gauge < 1 + require.LessOrEqual(t, gauge, 1.0) + require.Greater(t, gauge, 0.9) + } +} + +// TestDuplicateMessageTrackerCache_UpdateDecay ensures that a counter value in the record cache is eventually decayed back to 0 after some time. +func TestDuplicateMessageTrackerCache_Decay(t *testing.T) { + cache := duplicateMessageTrackerCacheFixture(t, 100, 0.09, defaultSkipDecayThreshold, zerolog.Nop(), metrics.NewNoopCollector()) + + peerID := unittest.PeerIdFixture(t) + + // initialize spam records for peerID and nodeID2 + gauge, err := cache.DuplicateMessageReceived(peerID) + require.Equal(t, float64(1), gauge) + require.NoError(t, err) + gauge, ok, err := cache.GetWithInit(peerID) + require.True(t, ok) + require.NoError(t, err) + // gauge should have been delayed slightly + require.True(t, gauge < float64(1)) + + time.Sleep(time.Second) + + gauge, ok, err = cache.GetWithInit(peerID) + require.True(t, ok) + require.NoError(t, err) + // gauge should have been delayed slightly, but closer to 0 + require.Less(t, gauge, 0.1) +} + +// rpcSentCacheFixture returns a new *DuplicateMessageTrackerCache. +func duplicateMessageTrackerCacheFixture(t *testing.T, sizeLimit uint32, decay, skipDecayThreshold float64, logger zerolog.Logger, collector module.HeroCacheMetrics) *DuplicateMessageTrackerCache { + r := NewDuplicateMessageTrackerCache(sizeLimit, decay, skipDecayThreshold, logger, collector) + // expect cache to be empty + require.Equalf(t, uint(0), r.c.Size(), "cache size must be 0") + require.NotNil(t, r) + return r +} diff --git a/network/p2p/tracer/internal/duplicate_msgs_counter_entity.go b/network/p2p/tracer/internal/duplicate_msgs_counter_entity.go new file mode 100644 index 00000000000..a2024987857 --- /dev/null +++ b/network/p2p/tracer/internal/duplicate_msgs_counter_entity.go @@ -0,0 +1,19 @@ +package internal + +import ( + "time" +) + +// duplicateMessagesCounter cache record that keeps track of the amount of duplicate messages received from a peer. +type duplicateMessagesCounter struct { + // Value the number of duplicate messages. + Value float64 + lastUpdated time.Time +} + +func newDuplicateMessagesCounter() *duplicateMessagesCounter { + return &duplicateMessagesCounter{ + Value: 0.0, + lastUpdated: time.Now(), + } +} diff --git a/network/p2p/tracer/internal/rpc_sent_cache.go b/network/p2p/tracer/internal/rpc_sent_cache.go new file mode 100644 index 00000000000..25bd15eb727 --- /dev/null +++ b/network/p2p/tracer/internal/rpc_sent_cache.go @@ -0,0 +1,82 @@ +package internal + +import ( + "github.com/rs/zerolog" + + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module" + herocache "github.com/onflow/flow-go/module/mempool/herocache/backdata" + "github.com/onflow/flow-go/module/mempool/herocache/backdata/heropool" + "github.com/onflow/flow-go/module/mempool/stdmap" + p2pmsg "github.com/onflow/flow-go/network/p2p/message" +) + +// rpcCtrlMsgSentCacheConfig configuration for the rpc sent cache. +type rpcCtrlMsgSentCacheConfig struct { + logger zerolog.Logger + sizeLimit uint32 + collector module.HeroCacheMetrics +} + +// rpcSentCache cache that stores ControlMessageType by an ID from the messageID and control message type +// which represent RPC control messages sent from the local node. +type rpcSentCache struct { + // c is the underlying cache. + c *stdmap.Backend[flow.Identifier, p2pmsg.ControlMessageType] +} + +// newRPCSentCache creates a new *rpcSentCache. +// Args: +// - config: record cache config. +// Returns: +// - *rpcSentCache: the created cache. +// Note that this cache is intended to track control messages sent by the local node, +// it stores a ControlMessageType using a rpcSentID which should uniquely identifies the message being tracked. +func newRPCSentCache(config *rpcCtrlMsgSentCacheConfig) *rpcSentCache { + backData := herocache.NewCache[p2pmsg.ControlMessageType]( + config.sizeLimit, + herocache.DefaultOversizeFactor, + heropool.LRUEjection, + config.logger.With().Str("mempool", "gossipsub-rpc-control-messages-sent").Logger(), + config.collector, + ) + return &rpcSentCache{ + c: stdmap.NewBackend(stdmap.WithMutableBackData[flow.Identifier, p2pmsg.ControlMessageType](backData)), + } +} + +// add initializes the record cached for the given key (rpcSentID) if it does not exist. +// Returns true if the record is initialized, false otherwise (i.e.: the record already exists). +// Args: +// - messageId: the message ID. +// - controlMsgType: the rpc control message type. +// Returns: +// - bool: true if the record is initialized, false otherwise (i.e.: the record already exists). +// Note that if add is called multiple times for the same key (rpcSentID), the record is initialized only once, and the +// subsequent calls return false and do not change the record (i.e.: the record is not re-initialized). +func (r *rpcSentCache) add(messageId string, controlMsgType p2pmsg.ControlMessageType) bool { + return r.c.Add(r.rpcSentID(messageId, controlMsgType), controlMsgType) +} + +// has checks if the RPC message has been cached indicating it has been sent. +// Args: +// - messageId: the message ID. +// - controlMsgType: the rpc control message type. +// Returns: +// - bool: true if the RPC has been cache indicating it was sent from the local node. +func (r *rpcSentCache) has(messageId string, controlMsgType p2pmsg.ControlMessageType) bool { + return r.c.Has(r.rpcSentID(messageId, controlMsgType)) +} + +// size returns the number of records in the cache. +func (r *rpcSentCache) size() uint { + return r.c.Size() +} + +// rpcSentID creates an ID from the messageID and control message type. +// Args: +// - messageId: the message ID. +// - controlMsgType: the rpc control message type. +func (r *rpcSentCache) rpcSentID(messageId string, controlMsgType p2pmsg.ControlMessageType) flow.Identifier { + return flow.MakeIDFromFingerPrint([]byte(messageId + string(controlMsgType))) +} diff --git a/network/p2p/tracer/internal/rpc_sent_cache_test.go b/network/p2p/tracer/internal/rpc_sent_cache_test.go new file mode 100644 index 00000000000..91cdeda6df3 --- /dev/null +++ b/network/p2p/tracer/internal/rpc_sent_cache_test.go @@ -0,0 +1,118 @@ +package internal + +import ( + "sync" + "testing" + "time" + + "github.com/rs/zerolog" + "github.com/stretchr/testify/require" + "go.uber.org/atomic" + + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module" + "github.com/onflow/flow-go/module/metrics" + p2pmsg "github.com/onflow/flow-go/network/p2p/message" + "github.com/onflow/flow-go/utils/unittest" +) + +// TestCache_Add tests the add method of the rpcSentCache. +// It ensures that the method returns true when a new record is initialized +// and false when an existing record is initialized. +func TestCache_Add(t *testing.T) { + cache := rpcSentCacheFixture(t, 100, zerolog.Nop(), metrics.NewNoopCollector()) + controlMsgType := p2pmsg.CtrlMsgIHave + messageID1 := unittest.IdentifierFixture().String() + messageID2 := unittest.IdentifierFixture().String() + + // test initializing a record for an ID that doesn't exist in the cache + initialized := cache.add(messageID1, controlMsgType) + require.True(t, initialized, "expected record to be initialized") + require.True(t, cache.has(messageID1, controlMsgType), "expected record to exist") + + // test initializing a record for an ID that already exists in the cache + initialized = cache.add(messageID1, controlMsgType) + require.False(t, initialized, "expected record not to be initialized") + require.True(t, cache.has(messageID1, controlMsgType), "expected record to exist") + + // test initializing a record for another ID + initialized = cache.add(messageID2, controlMsgType) + require.True(t, initialized, "expected record to be initialized") + require.True(t, cache.has(messageID2, controlMsgType), "expected record to exist") +} + +// TestCache_ConcurrentInit tests the concurrent initialization of records. +// The test covers the following scenarios: +// 1. Multiple goroutines initializing records for different ids. +// 2. Ensuring that all records are correctly initialized. +func TestCache_ConcurrentAdd(t *testing.T) { + cache := rpcSentCacheFixture(t, 100, zerolog.Nop(), metrics.NewNoopCollector()) + controlMsgType := p2pmsg.CtrlMsgIHave + messageIds := unittest.IdentifierListFixture(10) + + var wg sync.WaitGroup + wg.Add(len(messageIds)) + + for _, id := range messageIds { + go func(id flow.Identifier) { + defer wg.Done() + cache.add(id.String(), controlMsgType) + }(id) + } + + unittest.RequireReturnsBefore(t, wg.Wait, 100*time.Millisecond, "timed out waiting for goroutines to finish") + + // ensure that all records are correctly initialized + for _, id := range messageIds { + require.True(t, cache.has(id.String(), controlMsgType)) + } +} + +// TestCache_ConcurrentSameRecordInit tests the concurrent initialization of the same record. +// The test covers the following scenarios: +// 1. Multiple goroutines attempting to initialize the same record concurrently. +// 2. Only one goroutine successfully initializes the record, and others receive false on initialization. +// 3. The record is correctly initialized in the cache and can be retrieved using the Get method. +func TestCache_ConcurrentSameRecordAdd(t *testing.T) { + cache := rpcSentCacheFixture(t, 100, zerolog.Nop(), metrics.NewNoopCollector()) + controlMsgType := p2pmsg.CtrlMsgIHave + messageID := unittest.IdentifierFixture().String() + const concurrentAttempts = 10 + + var wg sync.WaitGroup + wg.Add(concurrentAttempts) + + successGauge := atomic.Int32{} + + for i := 0; i < concurrentAttempts; i++ { + go func() { + defer wg.Done() + initSuccess := cache.add(messageID, controlMsgType) + if initSuccess { + successGauge.Inc() + } + }() + } + + unittest.RequireReturnsBefore(t, wg.Wait, 100*time.Millisecond, "timed out waiting for goroutines to finish") + + // ensure that only one goroutine successfully initialized the record + require.Equal(t, int32(1), successGauge.Load()) + + // ensure that the record is correctly initialized in the cache + require.True(t, cache.has(messageID, controlMsgType)) +} + +// rpcSentCacheFixture returns a new *RecordCache. +func rpcSentCacheFixture(t *testing.T, sizeLimit uint32, logger zerolog.Logger, collector module.HeroCacheMetrics) *rpcSentCache { + config := &rpcCtrlMsgSentCacheConfig{ + sizeLimit: sizeLimit, + logger: logger, + collector: collector, + } + r := newRPCSentCache(config) + // expect cache to be empty + require.Equalf(t, uint(0), r.size(), "cache size must be 0") + require.NotNil(t, r) + return r +} diff --git a/network/p2p/tracer/internal/rpc_sent_tracker.go b/network/p2p/tracer/internal/rpc_sent_tracker.go new file mode 100644 index 00000000000..fb0e766fea7 --- /dev/null +++ b/network/p2p/tracer/internal/rpc_sent_tracker.go @@ -0,0 +1,189 @@ +package internal + +import ( + "crypto/rand" + "fmt" + "sync" + "time" + + pubsub "github.com/libp2p/go-libp2p-pubsub" + pb "github.com/libp2p/go-libp2p-pubsub/pb" + "github.com/rs/zerolog" + + "github.com/onflow/flow-go/engine/common/worker" + "github.com/onflow/flow-go/module" + "github.com/onflow/flow-go/module/component" + "github.com/onflow/flow-go/module/mempool/queue" + p2pmsg "github.com/onflow/flow-go/network/p2p/message" +) + +const ( + iHaveRPCTrackedLog = "ihave rpc tracked successfully" +) + +// trackableRPC is an internal data structure for "temporarily" storing *pubsub.RPC sent in the queue before they are processed +// by the *RPCSentTracker. +type trackableRPC struct { + // Nonce prevents deduplication in the hero store + Nonce []byte + rpc *pubsub.RPC +} + +// lastHighestIHaveRPCSize tracks the last highest rpc control message size the time stamp it was last updated. +type lastHighestIHaveRPCSize struct { + sync.RWMutex + lastSize int64 + lastUpdate time.Time +} + +// RPCSentTracker tracks RPC messages and the size of the last largest iHave rpc control message sent. +type RPCSentTracker struct { + component.Component + *lastHighestIHaveRPCSize + logger zerolog.Logger + cache *rpcSentCache + workerPool *worker.Pool[trackableRPC] + lastHighestIHaveRPCSizeResetInterval time.Duration +} + +// RPCSentTrackerConfig configuration for the RPCSentTracker. +type RPCSentTrackerConfig struct { + Logger zerolog.Logger + //RPCSentCacheSize size of the *rpcSentCache cache. + RPCSentCacheSize uint32 + // RPCSentCacheCollector metrics collector for the *rpcSentCache cache. + RPCSentCacheCollector module.HeroCacheMetrics + // WorkerQueueCacheCollector metrics factory for the worker pool. + WorkerQueueCacheCollector module.HeroCacheMetrics + // WorkerQueueCacheSize the worker pool herostore cache size. + WorkerQueueCacheSize uint32 + // NumOfWorkers number of workers in the worker pool. + NumOfWorkers int + // LastHighestIhavesSentResetInterval the refresh interval to reset the lastHighestIHaveRPCSize. + LastHighestIhavesSentResetInterval time.Duration +} + +// NewRPCSentTracker returns a new *NewRPCSentTracker. +func NewRPCSentTracker(config *RPCSentTrackerConfig) *RPCSentTracker { + cacheConfig := &rpcCtrlMsgSentCacheConfig{ + sizeLimit: config.RPCSentCacheSize, + logger: config.Logger, + collector: config.RPCSentCacheCollector, + } + + store := queue.NewHeroStore( + config.WorkerQueueCacheSize, + config.Logger, + config.WorkerQueueCacheCollector) + + tracker := &RPCSentTracker{ + logger: config.Logger.With().Str("component", "rpc_sent_tracker").Logger(), + lastHighestIHaveRPCSize: &lastHighestIHaveRPCSize{sync.RWMutex{}, 0, time.Now()}, + cache: newRPCSentCache(cacheConfig), + lastHighestIHaveRPCSizeResetInterval: config.LastHighestIhavesSentResetInterval, + } + tracker.workerPool = worker.NewWorkerPoolBuilder[trackableRPC]( + config.Logger, + store, + tracker.rpcSentWorkerLogic).Build() + + builder := component.NewComponentManagerBuilder() + for i := 0; i < config.NumOfWorkers; i++ { + builder.AddWorker(tracker.workerPool.WorkerLogic()) + } + tracker.Component = builder.Build() + return tracker +} + +// Track submits the control message to the worker queue for async tracking. +// Args: +// - *pubsub.RPC: the rpc sent. +// All errors returned from this function can be considered benign. +func (t *RPCSentTracker) Track(rpc *pubsub.RPC) error { + n, err := nonce() + if err != nil { + return fmt.Errorf("failed to get track rpc work nonce: %w", err) + } + + if ok := t.workerPool.Submit(trackableRPC{Nonce: n, rpc: rpc}); !ok { + return fmt.Errorf("failed to track RPC could not submit work to worker pool") + } + return nil +} + +// rpcSentWorkerLogic tracks control messages sent in *pubsub.RPC. +func (t *RPCSentTracker) rpcSentWorkerLogic(work trackableRPC) error { + switch { + case len(work.rpc.GetControl().GetIhave()) > 0: + iHave := work.rpc.GetControl().GetIhave() + numOfMessageIdsTracked := t.iHaveRPCSent(iHave) + lastHighestIHaveCount := t.updateLastHighestIHaveRPCSize(int64(numOfMessageIdsTracked)) + t.logger.Debug(). + Int("num_of_ihaves", len(iHave)). + Int("num_of_message_ids", numOfMessageIdsTracked). + Int64("last_highest_ihave_count", lastHighestIHaveCount). + Msg(iHaveRPCTrackedLog) + } + return nil +} + +// updateLastHighestIHaveRPCSize updates the last highest if the provided size is larger than the current last highest or the reset interval has passed. +// Args: +// - size: size that was cached. +// Returns: +// - int64: the last highest size. +func (t *RPCSentTracker) updateLastHighestIHaveRPCSize(size int64) int64 { + t.Lock() + defer t.Unlock() + if t.lastSize < size || time.Since(t.lastUpdate) > t.lastHighestIHaveRPCSizeResetInterval { + // The last highest ihave RPC size is updated if the new size is larger than the current size, or if the time elapsed since the last update surpasses the reset interval. + t.lastSize = size + t.lastUpdate = time.Now() + } + return t.lastSize +} + +// iHaveRPCSent caches a unique entity message ID for each message ID included in each rpc iHave control message. +// Args: +// - []*pb.ControlIHave: list of iHave control messages. +// Returns: +// - int: the number of message ids cached by the tracker. +func (t *RPCSentTracker) iHaveRPCSent(iHaves []*pb.ControlIHave) int { + controlMsgType := p2pmsg.CtrlMsgIHave + messageIDCount := 0 + for _, iHave := range iHaves { + messageIDCount += len(iHave.GetMessageIDs()) + for _, messageID := range iHave.GetMessageIDs() { + t.cache.add(messageID, controlMsgType) + } + } + return messageIDCount +} + +// WasIHaveRPCSent checks if an iHave control message with the provided message ID was sent. +// Args: +// - messageID: the message ID of the iHave RPC. +// Returns: +// - bool: true if the iHave rpc with the provided message ID was sent. +func (t *RPCSentTracker) WasIHaveRPCSent(messageID string) bool { + return t.cache.has(messageID, p2pmsg.CtrlMsgIHave) +} + +// LastHighestIHaveRPCSize returns the last highest size of iHaves sent in a rpc. +// Returns: +// - int64: the last highest size. +func (t *RPCSentTracker) LastHighestIHaveRPCSize() int64 { + t.RLock() + defer t.RUnlock() + return t.lastSize +} + +// nonce returns random string that is used to store unique items in herocache. +func nonce() ([]byte, error) { + b := make([]byte, 16) + _, err := rand.Read(b) + if err != nil { + return nil, err + } + return b, nil +} diff --git a/network/p2p/tracer/internal/rpc_sent_tracker_test.go b/network/p2p/tracer/internal/rpc_sent_tracker_test.go new file mode 100644 index 00000000000..938a998cf46 --- /dev/null +++ b/network/p2p/tracer/internal/rpc_sent_tracker_test.go @@ -0,0 +1,259 @@ +package internal + +import ( + "context" + "os" + "testing" + "time" + + pubsub "github.com/libp2p/go-libp2p-pubsub" + pb "github.com/libp2p/go-libp2p-pubsub/pb" + "github.com/rs/zerolog" + "github.com/stretchr/testify/require" + "go.uber.org/atomic" + + "github.com/onflow/flow-go/config" + "github.com/onflow/flow-go/module/irrecoverable" + "github.com/onflow/flow-go/module/metrics" + "github.com/onflow/flow-go/utils/unittest" +) + +// TestNewRPCSentTracker ensures *RPCSenTracker is created as expected. +func TestNewRPCSentTracker(t *testing.T) { + tracker := mockTracker(t, time.Minute) + require.NotNil(t, tracker) +} + +// TestRPCSentTracker_IHave ensures *RPCSentTracker tracks sent iHave control messages as expected. +func TestRPCSentTracker_IHave(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + signalerCtx := irrecoverable.NewMockSignalerContext(t, ctx) + + tracker := mockTracker(t, time.Minute) + require.NotNil(t, tracker) + + tracker.Start(signalerCtx) + defer func() { + cancel() + unittest.RequireComponentsDoneBefore(t, time.Second, tracker) + }() + + t.Run("WasIHaveRPCSent should return false for iHave message Id that has not been tracked", func(t *testing.T) { + require.False(t, tracker.WasIHaveRPCSent("message_id")) + }) + + t.Run("WasIHaveRPCSent should return true for iHave message after it is tracked with iHaveRPCSent", func(t *testing.T) { + numOfMsgIds := 100 + testCases := []struct { + messageIDS []string + }{ + {unittest.IdentifierListFixture(numOfMsgIds).Strings()}, + {unittest.IdentifierListFixture(numOfMsgIds).Strings()}, + {unittest.IdentifierListFixture(numOfMsgIds).Strings()}, + {unittest.IdentifierListFixture(numOfMsgIds).Strings()}, + } + iHaves := make([]*pb.ControlIHave, len(testCases)) + for i, testCase := range testCases { + testCase := testCase + iHaves[i] = &pb.ControlIHave{ + MessageIDs: testCase.messageIDS, + } + } + rpc := rpcFixture(withIhaves(iHaves)) + require.NoError(t, tracker.Track(rpc)) + + // eventually we should have tracked numOfMsgIds per single topic + require.Eventually(t, func() bool { + return tracker.cache.size() == uint(len(testCases)*numOfMsgIds) + }, time.Second, 100*time.Millisecond) + + for _, testCase := range testCases { + for _, messageID := range testCase.messageIDS { + require.True(t, tracker.WasIHaveRPCSent(messageID)) + } + } + }) + +} + +// TestRPCSentTracker_DuplicateMessageID ensures the worker pool of the RPC tracker processes req with the same message ID but different nonce. +func TestRPCSentTracker_DuplicateMessageID(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + signalerCtx := irrecoverable.NewMockSignalerContext(t, ctx) + + processedWorkLogs := atomic.NewInt64(0) + hook := zerolog.HookFunc(func(e *zerolog.Event, level zerolog.Level, message string) { + if level == zerolog.DebugLevel { + if message == iHaveRPCTrackedLog { + processedWorkLogs.Inc() + } + } + }) + logger := zerolog.New(os.Stdout).Level(zerolog.DebugLevel).Hook(hook) + + tracker := mockTracker(t, time.Minute) + require.NotNil(t, tracker) + tracker.logger = logger + tracker.Start(signalerCtx) + defer func() { + cancel() + unittest.RequireComponentsDoneBefore(t, time.Second, tracker) + }() + + messageID := unittest.IdentifierFixture().String() + rpc := rpcFixture(withIhaves([]*pb.ControlIHave{{ + MessageIDs: []string{messageID}, + }})) + // track duplicate RPC's each will be processed by a worker + require.NoError(t, tracker.Track(rpc)) + require.NoError(t, tracker.Track(rpc)) + + // eventually we should have processed both RPCs + require.Eventually(t, func() bool { + return processedWorkLogs.Load() == 2 + }, time.Second, 100*time.Millisecond) +} + +// TestRPCSentTracker_ConcurrentTracking ensures that all message IDs in RPC's are tracked as expected when tracked concurrently. +func TestRPCSentTracker_ConcurrentTracking(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + signalerCtx := irrecoverable.NewMockSignalerContext(t, ctx) + + tracker := mockTracker(t, time.Minute) + require.NotNil(t, tracker) + + tracker.Start(signalerCtx) + defer func() { + cancel() + unittest.RequireComponentsDoneBefore(t, time.Second, tracker) + }() + + numOfMsgIds := 100 + numOfRPCs := 100 + rpcs := make([]*pubsub.RPC, numOfRPCs) + for i := 0; i < numOfRPCs; i++ { + i := i + go func() { + rpc := rpcFixture(withIhaves([]*pb.ControlIHave{{MessageIDs: unittest.IdentifierListFixture(numOfMsgIds).Strings()}})) + require.NoError(t, tracker.Track(rpc)) + rpcs[i] = rpc + }() + } + + // eventually we should have tracked numOfMsgIds per single topic + require.Eventually(t, func() bool { + return tracker.cache.size() == uint(numOfRPCs*numOfMsgIds) + }, time.Second, 100*time.Millisecond) + + for _, rpc := range rpcs { + ihaves := rpc.GetControl().GetIhave() + for _, messageID := range ihaves[0].GetMessageIDs() { + require.True(t, tracker.WasIHaveRPCSent(messageID)) + } + } +} + +// TestRPCSentTracker_IHave ensures *RPCSentTracker tracks the last largest iHave size as expected. +func TestRPCSentTracker_LastHighestIHaveRPCSize(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + signalerCtx := irrecoverable.NewMockSignalerContext(t, ctx) + + tracker := mockTracker(t, 3*time.Second) + require.NotNil(t, tracker) + + tracker.Start(signalerCtx) + defer func() { + cancel() + unittest.RequireComponentsDoneBefore(t, time.Second, tracker) + }() + + expectedLastHighestSize := 1000 + // adding a single message ID to the iHave enables us to track the expected cache size by the amount of iHaves. + numOfMessageIds := 1 + testCases := []struct { + rpcFixture *pubsub.RPC + numOfIhaves int + }{ + {rpcFixture(withIhaves(mockIHaveFixture(10, numOfMessageIds))), 10}, + {rpcFixture(withIhaves(mockIHaveFixture(100, numOfMessageIds))), 100}, + {rpcFixture(withIhaves(mockIHaveFixture(expectedLastHighestSize, numOfMessageIds))), expectedLastHighestSize}, + {rpcFixture(withIhaves(mockIHaveFixture(999, numOfMessageIds))), 999}, + {rpcFixture(withIhaves(mockIHaveFixture(23, numOfMessageIds))), 23}, + } + + expectedCacheSize := 0 + for _, testCase := range testCases { + require.NoError(t, tracker.Track(testCase.rpcFixture)) + expectedCacheSize += testCase.numOfIhaves + } + + // eventually we should have tracked numOfMsgIds per single topic + require.Eventually(t, func() bool { + return tracker.cache.size() == uint(expectedCacheSize) + }, time.Second, 100*time.Millisecond) + + require.Equal(t, int64(expectedLastHighestSize), tracker.LastHighestIHaveRPCSize()) + + // after setting sending large RPC lastHighestIHaveRPCSize should reset to 0 after lastHighestIHaveRPCSize reset loop tick + largeIhave := 50000 + require.NoError(t, tracker.Track(rpcFixture(withIhaves(mockIHaveFixture(largeIhave, numOfMessageIds))))) + require.Eventually(t, func() bool { + return tracker.LastHighestIHaveRPCSize() == int64(largeIhave) + }, 1*time.Second, 100*time.Millisecond) + + // we expect lastHighestIHaveRPCSize to be set to the current rpc size being tracked if it hasn't been updated since the configured lastHighestIHaveRPCSizeResetInterval + expectedEventualLastHighest := 8 + require.Eventually(t, func() bool { + require.NoError(t, tracker.Track(rpcFixture(withIhaves(mockIHaveFixture(expectedEventualLastHighest, numOfMessageIds))))) + return tracker.LastHighestIHaveRPCSize() == int64(expectedEventualLastHighest) + }, 4*time.Second, 100*time.Millisecond) +} + +// mockIHaveFixture generate list of iHaves of size n. Each iHave will be created with m number of random message ids. +func mockIHaveFixture(n, m int) []*pb.ControlIHave { + iHaves := make([]*pb.ControlIHave, n) + for i := 0; i < n; i++ { + // topic does not have to be a valid flow topic, for teting purposes we can use a random string + topic := unittest.IdentifierFixture().String() + iHaves[i] = &pb.ControlIHave{ + TopicID: &topic, + MessageIDs: unittest.IdentifierListFixture(m).Strings(), + } + } + return iHaves +} + +func mockTracker(t *testing.T, lastHighestIhavesSentResetInterval time.Duration) *RPCSentTracker { + cfg, err := config.DefaultConfig() + require.NoError(t, err) + tracker := NewRPCSentTracker(&RPCSentTrackerConfig{ + Logger: zerolog.Nop(), + RPCSentCacheSize: cfg.NetworkConfig.GossipSub.RpcTracer.RPCSentTrackerCacheSize, + RPCSentCacheCollector: metrics.NewNoopCollector(), + WorkerQueueCacheCollector: metrics.NewNoopCollector(), + WorkerQueueCacheSize: cfg.NetworkConfig.GossipSub.RpcTracer.RPCSentTrackerQueueCacheSize, + NumOfWorkers: 1, + LastHighestIhavesSentResetInterval: lastHighestIhavesSentResetInterval, + }) + return tracker +} + +type rpcFixtureOpt func(*pubsub.RPC) + +func withIhaves(iHave []*pb.ControlIHave) rpcFixtureOpt { + return func(rpc *pubsub.RPC) { + rpc.Control.Ihave = iHave + } +} + +func rpcFixture(opts ...rpcFixtureOpt) *pubsub.RPC { + rpc := &pubsub.RPC{ + RPC: pb.RPC{ + Control: &pb.ControlMessage{}, + }, + } + for _, opt := range opts { + opt(rpc) + } + return rpc +} diff --git a/network/p2p/translator/identity_provider_translator.go b/network/p2p/translator/identity_provider_translator.go index c2bee0170a3..ddd1f41c004 100644 --- a/network/p2p/translator/identity_provider_translator.go +++ b/network/p2p/translator/identity_provider_translator.go @@ -10,6 +10,7 @@ import ( "github.com/onflow/flow-go/module" "github.com/onflow/flow-go/network/p2p" "github.com/onflow/flow-go/network/p2p/keyutils" + p2plogging "github.com/onflow/flow-go/network/p2p/logging" ) // IdentityProviderIDTranslator implements an `p2p.IDTranslator` which provides ID @@ -31,13 +32,13 @@ func (t *IdentityProviderIDTranslator) GetFlowID(p peer.ID) (flow.Identifier, er } ids := t.idProvider.Identities(filter.HasNetworkingKey(flowKey)) if len(ids) == 0 { - return flow.ZeroID, fmt.Errorf("could not find identity corresponding to peer id %v", p.String()) + return flow.ZeroID, fmt.Errorf("could not find identity corresponding to peer id %v", p2plogging.PeerId(p)) } return ids[0].NodeID, nil } func (t *IdentityProviderIDTranslator) GetPeerID(n flow.Identifier) (peer.ID, error) { - ids := t.idProvider.Identities(filter.HasNodeID(n)) + ids := t.idProvider.Identities(filter.HasNodeID[flow.Identity](n)) if len(ids) == 0 { return "", fmt.Errorf("could not find identity with id %v", n.String()) } diff --git a/network/p2p/translator/unstaked_translator.go b/network/p2p/translator/unstaked_translator.go index a1386ba6119..99bda4cb6a8 100644 --- a/network/p2p/translator/unstaked_translator.go +++ b/network/p2p/translator/unstaked_translator.go @@ -29,7 +29,7 @@ func NewPublicNetworkIDTranslator() *PublicNetworkIDTranslator { var _ p2p.IDTranslator = (*PublicNetworkIDTranslator)(nil) // GetPeerID returns the peer ID for the given Flow ID. -// TODO: implement BFT-compliant error handling -> https://github.com/onflow/flow-go/blob/master/CodingConventions.md +// TODO: implement BFT-compliant error handling -> https://github.com/onflow/flow-go/blob/master/docs/CodingConventions.md func (t *PublicNetworkIDTranslator) GetPeerID(flowID flow.Identifier) (peer.ID, error) { data := append([]byte{0x02}, flowID[:]...) @@ -48,11 +48,11 @@ func (t *PublicNetworkIDTranslator) GetPeerID(flowID flow.Identifier) (peer.ID, } // GetFlowID returns the Flow ID for the given peer ID. -// TODO: implement BFT-compliant error handling -> https://github.com/onflow/flow-go/blob/master/CodingConventions.md +// TODO: implement BFT-compliant error handling -> https://github.com/onflow/flow-go/blob/master/docs/CodingConventions.md func (t *PublicNetworkIDTranslator) GetFlowID(peerID peer.ID) (flow.Identifier, error) { pk, err := peerID.ExtractPublicKey() if err != nil { - return flow.ZeroID, fmt.Errorf("cannot generate an unstaked FlowID for peerID %v: corresponding libp2p key is not extractible from PeerID", peerID) + return flow.ZeroID, fmt.Errorf("cannot generate an unstaked FlowID for peerID %v: corresponding libp2p key is not extractible from PeerID: %w", peerID, err) } if pk.Type() != crypto_pb.KeyType_Secp256k1 { diff --git a/network/p2p/translator/unstaked_translator_test.go b/network/p2p/translator/unstaked_translator_test.go index 939e2eb2441..d8ef5f82137 100644 --- a/network/p2p/translator/unstaked_translator_test.go +++ b/network/p2p/translator/unstaked_translator_test.go @@ -5,13 +5,11 @@ import ( "testing" "github.com/libp2p/go-libp2p/core/peer" + fcrypto "github.com/onflow/crypto" "github.com/stretchr/testify/require" - "github.com/onflow/flow-go/network/p2p/translator" - - fcrypto "github.com/onflow/flow-go/crypto" - "github.com/onflow/flow-go/network/p2p/keyutils" + "github.com/onflow/flow-go/network/p2p/translator" ) // For these test, refer to https://github.com/libp2p/specs/blob/master/peer-ids/peer-ids.md for libp2p diff --git a/network/p2p/unicast/README.MD b/network/p2p/unicast/README.MD new file mode 100644 index 00000000000..47829c6c80f --- /dev/null +++ b/network/p2p/unicast/README.MD @@ -0,0 +1,125 @@ +# Unicast Manager + +## Overview +In Flow blockchain, nodes communicate with each other in 3 different ways; `unicast`, `multicast`, and `publish`. +The `multicast` and `publish` are handled by the pubsub (GossipSub) protocol. +The `unicast` is a protocol that is used to send messages over direct (one-to-one) connections to remote nodes. +Each `unicast` message is sent through a single-used, one-time stream. One can see a stream as a virtual protocol +that expands the base direct connection into a full-duplex communication channel. +Figure below illustrates the notion of direct connection and streams between nodes A and B. The direct +connection is established between the nodes and then the nodes can open multiple streams over the connection. +The streams are shown with dashed green lines, while the direct connection is illustrated by blue lines that +encapsulates the streams. +![streams.png](streams.png) + +The `unicast` `Manager` is responsible for _establishing_ streams between nodes when they need to communicate +over `unicast` protocol. When the manager receives a `CreateStream` invocation, it will try to establish a stream to the +remote `peer` whose identifier is provided in the invocation (`peer.ID`). The manager is expanding the libp2p +functionalities, hence, it operates on the notion of the `peer` (rather than Flow node), and `peer.ID` rather +than `flow.Identifier`. It is the responsibility of the caller to provide the correct `peer.ID` of the remote +node. + +The `UnicastManager` relies on the underlying libp2p node to establish the connection to the remote peer. Once the underlying +libp2p node receives a stream creation request from the `UnicastManager`, it will try to establish a connection to the remote peer if +there is no existing connection to the peer. Otherwise, it will pick and re-use the best existing connection to the remote peer. +Hence, the `UnicastManager` does not (and should not) care about the connection establishment, and rather relies on the underlying +libp2p node to establish the connection. The `UnicastManager` only cares about the stream creation, and will return an error +if the underlying libp2p node fails to establish a connection to the remote peer. + + +A stream is a one-time communication channel, i.e., it is assumed to be closed +by the caller once the message is sent. The caller (i.e., the Flow node) does not necessarily re-use a stream, and the +`Manager` creates one stream per request (i.e., `CreateStream` invocation), which is typically a single message. + +Note: the limit of number of streams and connections between nodes is set throught eh libp2p resource manager limits (see `config/default-config.yml`): + +Note: `pubsub` protocol also establishes connections between nodes to exchange gossip messages with each other. +The connection type is the same between `pubsub` and `unicast` protocols, as they both consult the underlying LibP2P node to +establish the connection. However, the level of reliability, life-cycle, and other aspects of the connections are different +between the two protocols. For example, `pubsub` requires some _number_ of connections to some _number_ of peers, which in most cases +is regardless of their identity. However, `unicast` requires a connection to a specific peer, and the connection is assumed +to be persistent. Hence, both these protocols have their own notion of connection management; the `unicast` `Manager` is responsible +for establishing connections when `unicast` protocol needs to send a message to a remote peer, while the `PeerManager` is responsible +for establishing connections when `pubsub`. These two work in isolation and independent of each other to satisfy different requirements. + +The `PeerManager` regularly checks the health of the connections and closes the connections to the peers that are not part of the Flow +protocol state. One the other hand, the `unicast` `Manager` only establishes a connection if there is no existing connection to the remote +peer. Currently, Flow nodes operate on a full mesh topology, meaning that every node is connected to every other node through `PeerManager`. +The `PeerManager` starts connecting to every remote node of the Flow protocol upon startup, and then maintains the connections unless the node +is disallow-listed or ejected by the protocol state. Accordingly, it is a rare event that a node does not have a connection to another node. +Also, that is the reason behind the `unicast` `Manager` not closing the connection after the stream is closed. The `unicast` `Manager` assumes +that the connection is persistent and will be kept open by the `PeerManager`. + +## Backoff and Retry Attempts +The flowchart below explains the abstract logic of the `UnicastManager` when it receives a `CreateStream` invocation. +On the happy path, the `UnicastManager` successfully opens a stream to the peer. +However, there can be cases that the remote peer is not reliable for stream creation, or the remote peer acts +maliciously and does not respond stream creation requests. In order to distinguish between the cases that the remote peer +is not reliable and the cases that the remote peer is malicious, the `UnicastManager` uses a backoff and retry mechanism. + +![retry.png](retry.png) + +### Addressing Unreliable Remote Peer +To address the unreliability of remote peer, upon an unsuccessful attempt to establish a stream, the `UnicastManager` will wait for a certain +amount of time before it tries to establish (i.e., the backoff mechanism), and will retry a certain number of times before it gives up (i.e., the retry mechanism). +The backoff and retry parameters are configurable through runtime flags. +If all backoff and retry attempts fail, the `UnicastManager` will return an error to the caller. The caller can then decide to retry the request or not. +By default, `UnicastManager` retries each stream creation attempt 3 times. Also, the backoff intervals for dialing and stream creation are initialized to 1 second and progress +exponentially with a factor of 2, i.e., the `i-th` retry attempt is made after `t * 2^(i-1)`, where `t` is the backoff interval. +For example, if the backoff interval is 1s, the first attempt is made right-away, the first (retry) attempt is made after 1s * 2^(1 - 1) = 1s, the third (retry) attempt is made +after `1s * 2^(2 - 1) = 2s`, and so on. + +These parameters are configured using the `config/default-config.yml` file: +```yaml + # Unicast create stream retry delay is initial delay used in the exponential backoff for create stream retries + unicast-create-stream-retry-delay: 1s +``` + +### Addressing Malicious Remote Peer +The backoff and retry mechanism is used to address the cases that the remote peer is not reliable. +However, there can be cases that the remote peer is malicious and does not respond to stream creation requests. +Such cases may cause the `UnicastManager` to wait for a long time before it gives up, resulting in a resource exhaustion and slow-down of the stream creation. +To mitigate such cases, the `UnicastManager` uses a retry budget for the stream creation. The retry budgets are initialized +using the `config/default-config.yml` file: +```yaml + # The maximum number of retry attempts for creating a unicast stream to a remote peer before giving up. If it is set to 3 for example, it means that if a peer fails to create + # retry a unicast stream to a remote peer 3 times, the peer will give up and will not retry creating a unicast stream to that remote peer. + # When it is set to zero it means that the peer will not retry creating a unicast stream to a remote peer if it fails. + unicast-max-stream-creation-retry-attempt-times: 3 +``` + +As shown in the above snippet, the stream creation is set to 3 by default for every remote peer. +Each time the `UnicastManager` is invoked on `CreateStream` to `pid` (`peer.ID`), it loads the retry budgets for `pid` from the unicast config cache. +If no unicast config record exists for `pid`, one is created with the default retry budgets. The `UnicastManager` then uses the retry budgets to decide +whether to retry the stream creation attempt or not. If the retry budget for stream creation is exhausted, the `UnicastManager` +will not retry the stream creation attempt, and returns an error to the caller. The caller can then decide to retry the request or not. +Note that even when the retry budget is exhausted, the `UnicastManager` will try the stream creation attempt once, though it will not retry the attempt if it fails. + +#### Penalizing Malicious Remote Peer +Each time the `UnicastManager` fails to create a stream to a remote peer and exhausts the retry budget, it penalizes the remote peer as follows: +- If the `UnicastManager` exhausts the retry budget for stream creation, it will decrement the stream creation retry budget for the remote peer. +- If the retry budget reaches zero, the `UnicastManager` will only attempt once to create a stream to the remote peer, and will not retry the attempt, and rather return an error to the caller. +- When the budget reaches zero, the `UnicastManager` will not decrement the budget anymore. + +**Note:** `UnicastManager` is part of the networking layer of the Flow node, which is a lower-order component than +the Flow protocol engines who call the `UnicastManager` to send messages to remote peers. Hence, the `UnicastManager` _must not_ outsmart +the Flow protocol engines on deciding whether to _create stream_ in the first place. This means that `UnicastManager` will attempt +to create stream even to peers with zero retry budgets. However, `UnicastManager` does not retry attempts for the peers with zero budgets, and rather +returns an error immediately upon a failure. This is the responsibility of the Flow protocol engines to decide whether +to send a message to a remote peer or not after a certain number of failures. + +#### Restoring Retry Budgets + +The `UnicastManager` may reset the stream creation budget for a remote peers _from zero to the default values_ in the following cases: + +- **Restoring Stream Creation Retry Budget**: To restore the stream creation budget from zero to the default value, the `UnicastManager` keeps track of the _consecutive_ + successful streams created to the remote peer. Everytime a stream is created successfully, the `UnicastManager` increments a counter for the remote peer. The counter is + reset to zero upon the _first failure_ to create a stream to the remote peer. If the counter reaches a certain threshold, the `UnicastManager` will reset the stream creation + budget for the remote peer to the default value. The threshold is configurable through the `config/default-config.yml` file: + ```yaml + # The minimum number of consecutive successful streams to reset the unicast stream creation retry budget from zero to the maximum default. If it is set to 100 for example, it + # means that if a peer has 100 consecutive successful streams to the remote peer, and the remote peer has a zero stream creation budget, + # the unicast stream creation retry budget for that remote peer will be reset to the maximum default. + unicast-stream-zero-retry-reset-threshold: 100 + ``` + Reaching the threshold means that the remote peer is reliable enough to regain the default retry budget for stream creation. \ No newline at end of file diff --git a/network/p2p/unicast/cache/unicastConfigCache.go b/network/p2p/unicast/cache/unicastConfigCache.go new file mode 100644 index 00000000000..c52205555ec --- /dev/null +++ b/network/p2p/unicast/cache/unicastConfigCache.go @@ -0,0 +1,143 @@ +package unicastcache + +import ( + "fmt" + + "github.com/libp2p/go-libp2p/core/peer" + "github.com/rs/zerolog" + + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module" + "github.com/onflow/flow-go/module/mempool" + herocache "github.com/onflow/flow-go/module/mempool/herocache/backdata" + "github.com/onflow/flow-go/module/mempool/herocache/backdata/heropool" + "github.com/onflow/flow-go/module/mempool/stdmap" + "github.com/onflow/flow-go/network/p2p" + "github.com/onflow/flow-go/network/p2p/unicast" +) + +// UnicastConfigCache cache that stores the unicast configs for all types of nodes. +// Stored configs are keyed by the hash of the peerID. +type UnicastConfigCache struct { + peerCache *stdmap.Backend[flow.Identifier, unicast.Config] + cfgFactory func() unicast.Config // factory function that creates a new unicast config. +} + +var _ unicast.ConfigCache = (*UnicastConfigCache)(nil) + +// NewUnicastConfigCache creates a new UnicastConfigCache. +// Args: +// - size: the maximum number of unicast configs that the cache can hold. +// - logger: the logger used by the cache. +// - collector: the metrics collector used by the cache. +// - cfgFactory: a factory function that creates a new unicast config. +// Returns: +// - *UnicastConfigCache, the created cache. +// Note that the cache is supposed to keep the unicast config for all types of nodes. Since the number of such nodes is +// expected to be small, size must be large enough to hold all the unicast configs of the authorized nodes. +// To avoid any crash-failure, the cache is configured to eject the least recently used configs when the cache is full. +// Hence, we recommend setting the size to a large value to minimize the ejections. +func NewUnicastConfigCache( + size uint32, + logger zerolog.Logger, + collector module.HeroCacheMetrics, + cfgFactory func() unicast.Config, +) *UnicastConfigCache { + return &UnicastConfigCache{ + peerCache: stdmap.NewBackend( + stdmap.WithMutableBackData[flow.Identifier, unicast.Config]( + herocache.NewCache[unicast.Config]( + size, + herocache.DefaultOversizeFactor, + heropool.LRUEjection, + logger.With().Str("module", "unicast-config-cache").Logger(), + collector, + ), + ), + ), + cfgFactory: cfgFactory, + } +} + +// AdjustWithInit applies the given adjust function to the unicast config of the given peer ID, and stores the adjusted config in the cache. +// It returns an error if the adjustFunc returns an error. +// Note that if the Adjust is called when the config does not exist, the config is initialized and the +// adjust function is applied to the initialized config again. In this case, the adjust function should not return an error. +// Args: +// - peerID: the peer id of the unicast config. +// - adjustFunc: the function that adjusts the unicast config. +// Returns: +// - error any returned error should be considered as an irrecoverable error and indicates a bug. +func (d *UnicastConfigCache) AdjustWithInit(peerID peer.ID, adjustFunc unicast.UnicastConfigAdjustFunc) (*unicast.Config, error) { + var rErr error + // wraps external adjust function to adjust the unicast config. + wrapAdjustFunc := func(config unicast.Config) unicast.Config { + // adjust the unicast config. + adjustedCfg, err := adjustFunc(config) + if err != nil { + rErr = fmt.Errorf("adjust function failed: %w", err) + return config // returns the original config (reverse the adjustment). + } + + // Return the adjusted config. + return adjustedCfg + } + + initFunc := func() unicast.Config { + return d.cfgFactory() + } + + adjustedConfig, adjusted := d.peerCache.AdjustWithInit(p2p.MakeId(peerID), wrapAdjustFunc, initFunc) + if rErr != nil { + return nil, fmt.Errorf("adjust operation aborted with an error: %w", rErr) + } + + if !adjusted { + return nil, fmt.Errorf("adjust operation aborted, unicast config was not adjusted") + } + + return &unicast.Config{ + StreamCreationRetryAttemptBudget: adjustedConfig.StreamCreationRetryAttemptBudget, + ConsecutiveSuccessfulStream: adjustedConfig.ConsecutiveSuccessfulStream, + }, nil +} + +// GetWithInit returns the unicast config for the given peer id. If the config does not exist, it creates a new config +// using the factory function and stores it in the cache. +// Args: +// - peerID: the peer id of the unicast config. +// Returns: +// - *Config, the unicast config for the given peer id. +// - error if the factory function returns an error. Any error should be treated as an irrecoverable error and indicates a bug. +func (d *UnicastConfigCache) GetWithInit(peerID peer.ID) (*unicast.Config, error) { + // ensuring that the init-and-get operation is atomic. + key := p2p.MakeId(peerID) + + var config unicast.Config + err := d.peerCache.Run(func(backData mempool.BackData[flow.Identifier, unicast.Config]) error { + val, ok := backData.Get(key) + if ok { + config = val + return nil + } + + config = d.cfgFactory() + backData.Add(key, config) + + return nil + }) + if err != nil { + return nil, fmt.Errorf("run operation aborted with an error: %w", err) + } + + // return a copy of the config (we do not want the caller to modify the config). + return &unicast.Config{ + StreamCreationRetryAttemptBudget: config.StreamCreationRetryAttemptBudget, + ConsecutiveSuccessfulStream: config.ConsecutiveSuccessfulStream, + }, nil +} + +// Size returns the number of unicast configs in the cache. +func (d *UnicastConfigCache) Size() uint { + return d.peerCache.Size() +} diff --git a/network/p2p/unicast/cache/unicastConfigCache_test.go b/network/p2p/unicast/cache/unicastConfigCache_test.go new file mode 100644 index 00000000000..23d83f1a354 --- /dev/null +++ b/network/p2p/unicast/cache/unicastConfigCache_test.go @@ -0,0 +1,260 @@ +package unicastcache_test + +import ( + "fmt" + "sync" + "testing" + "time" + + "github.com/libp2p/go-libp2p/core/peer" + "github.com/rs/zerolog" + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/module/metrics" + "github.com/onflow/flow-go/network/p2p/unicast" + unicastcache "github.com/onflow/flow-go/network/p2p/unicast/cache" + "github.com/onflow/flow-go/utils/unittest" +) + +// TestNewUnicastConfigCache tests the creation of a new UnicastConfigCache. +// It asserts that the cache is created and its size is 0. +func TestNewUnicastConfigCache(t *testing.T) { + sizeLimit := uint32(100) + logger := zerolog.Nop() + collector := metrics.NewNoopCollector() + cache := unicastcache.NewUnicastConfigCache(sizeLimit, logger, collector, unicastConfigFixture) + require.NotNil(t, cache) + require.Equalf(t, uint(0), cache.Size(), "cache size must be 0") +} + +// unicastConfigFixture returns a unicast config fixture. +// The unicast config is initialized with the default values. +func unicastConfigFixture() unicast.Config { + return unicast.Config{ + StreamCreationRetryAttemptBudget: 3, + } +} + +// TestUnicastConfigCache_Adjust tests the Adjust method of the UnicastConfigCache. It asserts that the unicast config is initialized, adjusted, +// and stored in the cache. +func TestUnicastConfigCache_Adjust_Init(t *testing.T) { + sizeLimit := uint32(100) + logger := zerolog.Nop() + collector := metrics.NewNoopCollector() + + unicastFactoryCalled := 0 + unicastConfigFactory := func() unicast.Config { + require.Less(t, unicastFactoryCalled, 2, "unicast config factory must be called at most twice") + unicastFactoryCalled++ + return unicastConfigFixture() + } + adjustFuncIncrement := func(cfg unicast.Config) (unicast.Config, error) { + cfg.StreamCreationRetryAttemptBudget++ + return cfg, nil + } + + cache := unicastcache.NewUnicastConfigCache(sizeLimit, logger, collector, unicastConfigFactory) + require.NotNil(t, cache) + require.Zerof(t, cache.Size(), "cache size must be 0") + + peerID1 := unittest.PeerIdFixture(t) + peerID2 := unittest.PeerIdFixture(t) + + // Initializing the unicast config for peerID1 through GetWithInit. + // unicast config for peerID1 does not exist in the cache, so it must be initialized when using GetWithInit. + cfg, err := cache.GetWithInit(peerID1) + require.NoError(t, err) + require.NotNil(t, cfg, "unicast config must not be nil") + require.Equal(t, unicastConfigFixture(), *cfg, "unicast config must be initialized with the default values") + require.Equal(t, uint(1), cache.Size(), "cache size must be 1") + + // Initializing and adjusting the unicast config for peerID2 through Adjust. + // unicast config for peerID2 does not exist in the cache, so it must be initialized when using Adjust. + cfg, err = cache.AdjustWithInit(peerID2, adjustFuncIncrement) + require.NoError(t, err) + // adjusting a non-existing unicast config must not initialize the config. + require.Equal(t, uint(2), cache.Size(), "cache size must be 2") + require.Equal(t, cfg.StreamCreationRetryAttemptBudget, unicastConfigFixture().StreamCreationRetryAttemptBudget+1, "stream backoff must be 2") + + // Retrieving the unicast config of peerID2 through GetWithInit. + // retrieve the unicast config for peerID2 and assert than it is initialized with the default values; and the adjust function is applied. + cfg, err = cache.GetWithInit(peerID2) + require.NoError(t, err, "unicast config must exist in the cache") + require.NotNil(t, cfg, "unicast config must not be nil") + // retrieving an existing unicast config must not change the cache size. + require.Equal(t, uint(2), cache.Size(), "cache size must be 2") + // config should be the same as the one returned by Adjust. + require.Equal(t, cfg.StreamCreationRetryAttemptBudget, unicastConfigFixture().StreamCreationRetryAttemptBudget+1, "stream backoff must be 2") + + // Adjusting the unicast config of peerID1 through Adjust. + // unicast config for peerID1 already exists in the cache, so it must be adjusted when using Adjust. + cfg, err = cache.AdjustWithInit(peerID1, adjustFuncIncrement) + require.NoError(t, err) + // adjusting an existing unicast config must not change the cache size. + require.Equal(t, uint(2), cache.Size(), "cache size must be 2") + require.Equal(t, cfg.StreamCreationRetryAttemptBudget, unicastConfigFixture().StreamCreationRetryAttemptBudget+1, "stream backoff must be 2") + + // Recurring adjustment of the unicast config of peerID1 through Adjust. + // unicast config for peerID1 already exists in the cache, so it must be adjusted when using Adjust. + cfg, err = cache.AdjustWithInit(peerID1, adjustFuncIncrement) + require.NoError(t, err) + // adjusting an existing unicast config must not change the cache size. + require.Equal(t, uint(2), cache.Size(), "cache size must be 2") + require.Equal(t, cfg.StreamCreationRetryAttemptBudget, unicastConfigFixture().StreamCreationRetryAttemptBudget+2, "stream backoff must be 3") +} + +// TestUnicastConfigCache_Adjust tests the Adjust method of the UnicastConfigCache. It asserts that the unicast config is adjusted, +// and stored in the cache as expected under concurrent adjustments. +func TestUnicastConfigCache_Concurrent_Adjust(t *testing.T) { + sizeLimit := uint32(100) + logger := zerolog.Nop() + collector := metrics.NewNoopCollector() + + cache := unicastcache.NewUnicastConfigCache(sizeLimit, logger, collector, func() unicast.Config { + return unicast.Config{} // empty unicast config + }) + require.NotNil(t, cache) + require.Zerof(t, cache.Size(), "cache size must be 0") + + peerIds := make([]peer.ID, sizeLimit) + for i := 0; i < int(sizeLimit); i++ { + peerId := unittest.PeerIdFixture(t) + require.NotContainsf(t, peerIds, peerId, "peer id must be unique") + peerIds[i] = peerId + } + + wg := sync.WaitGroup{} + for i := 0; i < int(sizeLimit); i++ { + // adjusts the ith unicast config for peerID i times, concurrently. + for j := 0; j < i+1; j++ { + wg.Add(1) + go func(peerId peer.ID) { + defer wg.Done() + _, err := cache.AdjustWithInit(peerId, func(cfg unicast.Config) (unicast.Config, error) { + cfg.StreamCreationRetryAttemptBudget++ + return cfg, nil + }) + require.NoError(t, err) + }(peerIds[i]) + } + } + + unittest.RequireReturnsBefore(t, wg.Wait, time.Second*1, "adjustments must be done on time") + + // assert that the cache size is equal to the size limit. + require.Equal(t, uint(sizeLimit), cache.Size(), "cache size must be equal to the size limit") + + // assert that the unicast config for each peer is adjusted i times, concurrently. + for i := 0; i < int(sizeLimit); i++ { + wg.Add(1) + go func(j int) { + wg.Done() + + peerID := peerIds[j] + cfg, err := cache.GetWithInit(peerID) + require.NoError(t, err) + require.Equal(t, + uint64(j+1), + cfg.StreamCreationRetryAttemptBudget, + fmt.Sprintf("peerId %s unicast backoff must be adjusted %d times got: %d", peerID, j+1, cfg.StreamCreationRetryAttemptBudget)) + }(i) + } + + unittest.RequireReturnsBefore(t, wg.Wait, time.Second*1, "retrievals must be done on time") +} + +// TestConcurrent_Adjust_And_Get_Is_Safe tests that concurrent adjustments and retrievals are safe, and do not cause error even if they cause eviction. The test stress tests the cache +// with 2 * SizeLimit concurrent operations (SizeLimit times concurrent adjustments and SizeLimit times concurrent retrievals). +// It asserts that the cache size is equal to the size limit, and the unicast config for each peer is adjusted and retrieved correctly. +func TestConcurrent_Adjust_And_Get_Is_Safe(t *testing.T) { + sizeLimit := uint32(100) + logger := zerolog.Nop() + collector := metrics.NewNoopCollector() + + cache := unicastcache.NewUnicastConfigCache(sizeLimit, logger, collector, unicastConfigFixture) + require.NotNil(t, cache) + require.Zerof(t, cache.Size(), "cache size must be 0") + + wg := sync.WaitGroup{} + for i := 0; i < int(sizeLimit); i++ { + // concurrently adjusts the unicast configs. + wg.Add(1) + go func() { + defer wg.Done() + peerId := unittest.PeerIdFixture(t) + updatedConfig, err := cache.AdjustWithInit(peerId, func(cfg unicast.Config) (unicast.Config, error) { + cfg.StreamCreationRetryAttemptBudget = 2 // some random adjustment + cfg.ConsecutiveSuccessfulStream = 3 // some random adjustment + return cfg, nil + }) + require.NoError(t, err) // concurrent adjustment must not fail. + require.Equal(t, uint64(2), updatedConfig.StreamCreationRetryAttemptBudget) + require.Equal(t, uint64(3), updatedConfig.ConsecutiveSuccessfulStream) + }() + } + + // assert that the unicast config for each peer is adjusted i times, concurrently. + for i := 0; i < int(sizeLimit); i++ { + wg.Add(1) + go func() { + wg.Done() + peerId := unittest.PeerIdFixture(t) + cfg, err := cache.GetWithInit(peerId) + require.NoError(t, err) // concurrent retrieval must not fail. + require.Equal(t, unicastConfigFixture().StreamCreationRetryAttemptBudget, cfg.StreamCreationRetryAttemptBudget) + require.Equal(t, uint64(0), cfg.ConsecutiveSuccessfulStream) + }() + } + + unittest.RequireReturnsBefore(t, wg.Wait, time.Second*1, "all operations must be done on time") + + // cache was stress-tested with 2 * SizeLimit concurrent operations. Nevertheless, the cache size must be equal to the size limit due to LRU eviction. + require.Equal(t, uint(sizeLimit), cache.Size(), "cache size must be equal to the size limit") +} + +// TestUnicastConfigCache_LRU_Eviction tests that the cache evicts the least recently used unicast config when the cache size reaches the size limit. +func TestUnicastConfigCache_LRU_Eviction(t *testing.T) { + sizeLimit := uint32(100) + logger := zerolog.Nop() + collector := metrics.NewNoopCollector() + + cache := unicastcache.NewUnicastConfigCache(sizeLimit, logger, collector, unicastConfigFixture) + require.NotNil(t, cache) + require.Zerof(t, cache.Size(), "cache size must be 0") + + peerIds := make([]peer.ID, sizeLimit+1) + for i := 0; i < int(sizeLimit+1); i++ { + peerId := unittest.PeerIdFixture(t) + require.NotContainsf(t, peerIds, peerId, "peer id must be unique") + peerIds[i] = peerId + } + for i := 0; i < int(sizeLimit+1); i++ { + updatedConfig, err := cache.AdjustWithInit(peerIds[i], func(cfg unicast.Config) (unicast.Config, error) { + cfg.StreamCreationRetryAttemptBudget = 2 // some random adjustment + cfg.ConsecutiveSuccessfulStream = 3 // some random adjustment + return cfg, nil + }) + require.NoError(t, err) // concurrent adjustment must not fail. + require.Equal(t, uint64(2), updatedConfig.StreamCreationRetryAttemptBudget) + require.Equal(t, uint64(3), updatedConfig.ConsecutiveSuccessfulStream) + } + + // except the first peer id, all other peer ids should stay intact in the cache. + for i := 1; i < int(sizeLimit+1); i++ { + cfg, err := cache.GetWithInit(peerIds[i]) + require.NoError(t, err) + require.Equal(t, uint64(2), cfg.StreamCreationRetryAttemptBudget) + require.Equal(t, uint64(3), cfg.ConsecutiveSuccessfulStream) + } + + require.Equal(t, uint(sizeLimit), cache.Size(), "cache size must be equal to the size limit") + + // querying the first peer id should return a fresh unicast config, + // since it should be evicted due to LRU eviction, and the initiated with the default values. + cfg, err := cache.GetWithInit(peerIds[0]) + require.NoError(t, err) + require.Equal(t, unicastConfigFixture().StreamCreationRetryAttemptBudget, cfg.StreamCreationRetryAttemptBudget) + require.Equal(t, uint64(0), cfg.ConsecutiveSuccessfulStream) + + require.Equal(t, uint(sizeLimit), cache.Size(), "cache size must be equal to the size limit") +} diff --git a/network/p2p/unicast/dialConfig.go b/network/p2p/unicast/dialConfig.go new file mode 100644 index 00000000000..e88c4fd7554 --- /dev/null +++ b/network/p2p/unicast/dialConfig.go @@ -0,0 +1,13 @@ +package unicast + +// Config is a struct that represents the dial config for a peer. +type Config struct { + StreamCreationRetryAttemptBudget uint64 // number of times we have to try to open a stream to the peer before we give up. + ConsecutiveSuccessfulStream uint64 // consecutive number of successful streams to the peer since the last time stream creation failed. +} + +// UnicastConfigAdjustFunc is a function that is used to adjust the fields of a DialConfigEntity. +// The function is called with the current config and should return the adjusted record. +// Returned error indicates that the adjustment is not applied, and the config should not be updated. +// In BFT setup, the returned error should be treated as a fatal error. +type UnicastConfigAdjustFunc func(Config) (Config, error) diff --git a/network/p2p/unicast/dialConfigCache.go b/network/p2p/unicast/dialConfigCache.go new file mode 100644 index 00000000000..9696e3dcc14 --- /dev/null +++ b/network/p2p/unicast/dialConfigCache.go @@ -0,0 +1,30 @@ +package unicast + +import ( + "github.com/libp2p/go-libp2p/core/peer" +) + +// ConfigCache is a thread-safe cache for dial configs. It is used by the unicast service to store +// the dial configs for peers. +type ConfigCache interface { + // GetWithInit returns the dial config for the given peer id. If the config does not exist, it creates a new config + // using the factory function and stores it in the cache. + // Args: + // - peerID: the peer id of the dial config. + // Returns: + // - *Config, the dial config for the given peer id. + // - error if the factory function returns an error. Any error should be treated as an irrecoverable error and indicates a bug. + GetWithInit(peerID peer.ID) (*Config, error) + + // Adjust adjusts the dial config for the given peer id using the given adjustFunc. + // It returns an error if the adjustFunc returns an error. + // Args: + // - peerID: the peer id of the dial config. + // - adjustFunc: the function that adjusts the dial config. + // Returns: + // - error if the adjustFunc returns an error. Any error should be treated as an irrecoverable error and indicates a bug. + AdjustWithInit(peerID peer.ID, adjustFunc UnicastConfigAdjustFunc) (*Config, error) + + // Size returns the number of dial configs in the cache. + Size() uint +} diff --git a/network/p2p/unicast/errors.go b/network/p2p/unicast/errors.go index 85690508e91..99bb8bdeaed 100644 --- a/network/p2p/unicast/errors.go +++ b/network/p2p/unicast/errors.go @@ -3,30 +3,8 @@ package unicast import ( "errors" "fmt" - - "github.com/libp2p/go-libp2p/core/peer" ) -// ErrDialInProgress indicates that the libp2p node is currently dialing the peer. -type ErrDialInProgress struct { - pid peer.ID -} - -func (e ErrDialInProgress) Error() string { - return fmt.Sprintf("dialing to peer %s already in progress", e.pid.String()) -} - -// NewDialInProgressErr returns a new ErrDialInProgress. -func NewDialInProgressErr(pid peer.ID) ErrDialInProgress { - return ErrDialInProgress{pid: pid} -} - -// IsErrDialInProgress returns whether an error is ErrDialInProgress -func IsErrDialInProgress(err error) bool { - var e ErrDialInProgress - return errors.As(err, &e) -} - // ErrMaxRetries indicates retries completed with max retries without a successful attempt. type ErrMaxRetries struct { attempts uint64 diff --git a/network/p2p/unicast/manager.go b/network/p2p/unicast/manager.go index f45c2ce7bcd..e51c3d0fc42 100644 --- a/network/p2p/unicast/manager.go +++ b/network/p2p/unicast/manager.go @@ -2,84 +2,121 @@ package unicast import ( "context" + "errors" "fmt" - "sync" "time" + "github.com/go-playground/validator/v10" "github.com/hashicorp/go-multierror" libp2pnet "github.com/libp2p/go-libp2p/core/network" "github.com/libp2p/go-libp2p/core/peer" "github.com/libp2p/go-libp2p/core/protocol" - "github.com/multiformats/go-multiaddr" + "github.com/libp2p/go-libp2p/p2p/net/swarm" "github.com/rs/zerolog" "github.com/sethvargo/go-retry" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/module" "github.com/onflow/flow-go/network/p2p" + p2plogging "github.com/onflow/flow-go/network/p2p/logging" "github.com/onflow/flow-go/network/p2p/unicast/protocols" "github.com/onflow/flow-go/network/p2p/unicast/stream" + "github.com/onflow/flow-go/utils/logging" ) const ( // MaxRetryJitter is the maximum number of milliseconds to wait between attempts for a 1-1 direct connection MaxRetryJitter = 5 - - // DefaultRetryDelay Initial delay between failing to establish a connection with another node and retrying. This delay - // increases exponentially (exponential backoff) with the number of subsequent failures to establish a connection. - DefaultRetryDelay = 1 * time.Second ) var ( _ p2p.UnicastManager = (*Manager)(nil) ) +type DialConfigCacheFactory func(configFactory func() Config) ConfigCache + // Manager manages libp2p stream negotiation and creation, which is utilized for unicast dispatches. type Manager struct { - logger zerolog.Logger - streamFactory stream.Factory - protocols []protocols.Protocol - defaultHandler libp2pnet.StreamHandler - sporkId flow.Identifier - connStatus p2p.PeerConnections - peerDialing sync.Map - createStreamRetryDelay time.Duration - metrics module.UnicastManagerMetrics + logger zerolog.Logger + streamFactory p2p.StreamFactory + protocols []protocols.Protocol + defaultHandler libp2pnet.StreamHandler + sporkId flow.Identifier + metrics module.UnicastManagerMetrics + + // createStreamBackoffDelay is the delay between each stream creation retry attempt. + // The manager uses an exponential backoff strategy to retry stream creation, and this parameter + // is the initial delay between each retry attempt. The delay is doubled after each retry attempt. + createStreamBackoffDelay time.Duration + + // dialConfigCache is a cache to store the dial config for each peer. + // TODO: encapsulation can be further improved by wrapping the dialConfigCache together with the dial config adjustment logic into a single struct. + dialConfigCache ConfigCache + + // streamZeroBackoffResetThreshold is the threshold that determines when to reset the stream creation backoff budget to the default value. + // + // For example the default value of 100 means that if the stream creation backoff budget is decreased to 0, then it will be reset to default value + // when the number of consecutive successful streams reaches 100. + // + // This is to prevent the backoff budget from being reset too frequently, as the backoff budget is used to gauge the reliability of the stream creation. + // When the stream creation backoff budget is reset to the default value, it means that the stream creation is reliable enough to be trusted again. + // This parameter mandates when the stream creation is reliable enough to be trusted again; i.e., when the number of consecutive successful streams reaches this threshold. + // Note that the counter is reset to 0 when the stream creation fails, so the value of for example 100 means that the stream creation is reliable enough that the recent + // 100 stream creations are all successful. + streamZeroBackoffResetThreshold uint64 + + // maxStreamCreationAttemptTimes is the maximum number of attempts to be made to create a stream to a remote node over a direct unicast (1:1) connection before we give up. + maxStreamCreationAttemptTimes uint64 } -func NewUnicastManager(logger zerolog.Logger, - streamFactory stream.Factory, - sporkId flow.Identifier, - createStreamRetryDelay time.Duration, - connStatus p2p.PeerConnections, - metrics module.UnicastManagerMetrics, -) *Manager { - return &Manager{ - logger: logger.With().Str("module", "unicast-manager").Logger(), - streamFactory: streamFactory, - sporkId: sporkId, - connStatus: connStatus, - peerDialing: sync.Map{}, - createStreamRetryDelay: createStreamRetryDelay, - metrics: metrics, +// NewUnicastManager creates a new unicast manager. +// Args: +// - cfg: configuration for the unicast manager. +// +// Returns: +// - a new unicast manager. +// - an error if the configuration is invalid, any error is irrecoverable. +func NewUnicastManager(cfg *ManagerConfig) (*Manager, error) { + if err := validator.New().Struct(cfg); err != nil { + return nil, fmt.Errorf("invalid unicast manager config: %w", err) + } + + m := &Manager{ + logger: cfg.Logger.With().Str("module", "unicast-manager").Logger(), + dialConfigCache: cfg.UnicastConfigCacheFactory(func() Config { + return Config{ + StreamCreationRetryAttemptBudget: cfg.Parameters.MaxStreamCreationRetryAttemptTimes, + } + }), + streamFactory: cfg.StreamFactory, + sporkId: cfg.SporkId, + metrics: cfg.Metrics, + createStreamBackoffDelay: cfg.Parameters.CreateStreamBackoffDelay, + streamZeroBackoffResetThreshold: cfg.Parameters.StreamZeroRetryResetThreshold, + maxStreamCreationAttemptTimes: cfg.Parameters.MaxStreamCreationRetryAttemptTimes, } + + m.logger.Info(). + Hex("spork_id", logging.ID(cfg.SporkId)). + Dur("create_stream_backoff_delay", cfg.Parameters.CreateStreamBackoffDelay). + Uint64("stream_zero_backoff_reset_threshold", cfg.Parameters.StreamZeroRetryResetThreshold). + Msg("unicast manager created") + + return m, nil } -// WithDefaultHandler sets the default stream handler for this unicast manager. The default handler is utilized +// SetDefaultHandler sets the default stream handler for this unicast manager. The default handler is utilized // as the core handler for other unicast protocols, e.g., compressions. -func (m *Manager) WithDefaultHandler(defaultHandler libp2pnet.StreamHandler) { +func (m *Manager) SetDefaultHandler(defaultHandler libp2pnet.StreamHandler) { defaultProtocolID := protocols.FlowProtocolID(m.sporkId) - m.defaultHandler = defaultHandler - if len(m.protocols) > 0 { panic("default handler must be set only once before any unicast registration") } + m.defaultHandler = defaultHandler + m.protocols = []protocols.Protocol{ - &PlainStream{ - protocolId: defaultProtocolID, - handler: defaultHandler, - }, + stream.NewPlainStream(defaultHandler, defaultProtocolID), } m.streamFactory.SetStreamHandler(defaultProtocolID, defaultHandler) @@ -104,196 +141,141 @@ func (m *Manager) Register(protocol protocols.ProtocolName) error { } // CreateStream tries establishing a libp2p stream to the remote peer id. It tries creating streams in the descending order of preference until -// it either creates a successful stream or runs out of options. Creating stream on each protocol is tried at most `maxAttempts`, and then falls -// back to the less preferred one. -func (m *Manager) CreateStream(ctx context.Context, peerID peer.ID, maxAttempts int) (libp2pnet.Stream, []multiaddr.Multiaddr, error) { +// it either creates a successful stream or runs out of options. +// Args: +// - ctx: context for the stream creation. +// - peerID: peer ID of the remote peer. +// +// Returns: +// - a new libp2p stream. +// - error if the stream creation fails; the error is benign and can be retried. +func (m *Manager) CreateStream(ctx context.Context, peerID peer.ID) (libp2pnet.Stream, error) { var errs error + dialCfg, err := m.getDialConfig(peerID) + if err != nil { + // TODO: technically, we better to return an error here, but the error must be irrecoverable, and we cannot + // guarantee a clear distinction between recoverable and irrecoverable errors at the moment with CreateStream. + // We have to revisit this once we studied the error handling paths in the unicast manager. + m.logger.Fatal(). + Err(err). + Bool(logging.KeyNetworkingSecurity, true). + Str("peer_id", p2plogging.PeerId(peerID)). + Msg("failed to retrieve dial config for peer id") + } + + m.logger.Debug(). + Str("peer_id", p2plogging.PeerId(peerID)). + Str("dial_config", fmt.Sprintf("%+v", dialCfg)). + Msg("dial config for the peer retrieved") + for i := len(m.protocols) - 1; i >= 0; i-- { - s, addrs, err := m.tryCreateStream(ctx, peerID, uint64(maxAttempts), m.protocols[i]) + s, err := m.createStream(ctx, peerID, m.protocols[i], dialCfg) if err != nil { errs = multierror.Append(errs, err) continue } // return first successful stream - return s, addrs, nil + return s, nil } - return nil, nil, fmt.Errorf("could not create stream on any available unicast protocol: %w", errs) -} - -// tryCreateStream will retry createStream with the configured exponential backoff delay and maxAttempts. -// During retries, each error encountered is aggregated in a multierror. If max attempts are made before a -// stream can be successfully the multierror will be returned. During stream creation when IsErrDialInProgress -// is encountered during retries this would indicate that no connection to the peer exists yet. -// In this case we will retry creating the stream with a backoff until a connection is established. -func (m *Manager) tryCreateStream(ctx context.Context, peerID peer.ID, maxAttempts uint64, protocol protocols.Protocol) (libp2pnet.Stream, []multiaddr.Multiaddr, error) { - var err error - var s libp2pnet.Stream - var addrs []multiaddr.Multiaddr // address on which we dial peerID - - // configure back off retry delay values - backoff := retry.NewExponential(m.createStreamRetryDelay) - // https://github.com/sethvargo/go-retry#maxretries retries counter starts at zero and library will make last attempt - // when retries == maxAttempts causing 1 more func invocation than expected. - maxRetries := maxAttempts - 1 - backoff = retry.WithMaxRetries(maxRetries, backoff) - - attempts := 0 - // retryable func will attempt to create the stream and only retry if dialing the peer is in progress - f := func(context.Context) error { - attempts++ - s, addrs, err = m.createStream(ctx, peerID, maxAttempts, protocol) - if err != nil { - if IsErrDialInProgress(err) { - m.logger.Warn(). - Err(err). - Str("peer_id", peerID.String()). - Int("attempt", attempts). - Uint64("max_attempts", maxAttempts). - Msg("retrying create stream, dial to peer in progress") - return retry.RetryableError(err) - } - return err - } - - return nil - } - start := time.Now() - err = retry.Do(ctx, backoff, f) - duration := time.Since(start) + updatedCfg, err := m.adjustUnsuccessfulStreamAttempt(peerID) if err != nil { - m.metrics.OnStreamCreationFailure(duration, attempts) - return nil, nil, err + // TODO: technically, we better to return an error here, but the error must be irrecoverable, and we cannot + // guarantee a clear distinction between recoverable and irrecoverable errors at the moment with CreateStream. + // We have to revisit this once we studied the error handling paths in the unicast manager. + m.logger.Fatal(). + Err(err). + Bool(logging.KeyNetworkingSecurity, true). + Str("peer_id", p2plogging.PeerId(peerID)). + Msg("failed to adjust dial config for peer id") } - m.metrics.OnStreamCreated(duration, attempts) - return s, addrs, nil -} - -// createStream creates a stream to the peerID with the provided protocol. -func (m *Manager) createStream(ctx context.Context, peerID peer.ID, maxAttempts uint64, protocol protocols.Protocol) (libp2pnet.Stream, []multiaddr.Multiaddr, error) { - s, addrs, err := m.rawStreamWithProtocol(ctx, protocol.ProtocolId(), peerID, maxAttempts) - if err != nil { - return nil, nil, err - } + m.logger.Warn(). + Err(errs). + Bool(logging.KeySuspicious, true). + Str("peer_id", p2plogging.PeerId(peerID)). + Str("dial_config", fmt.Sprintf("%+v", updatedCfg)). + Msg("failed to create stream to peer id, dial config adjusted") - s, err = protocol.UpgradeRawStream(s) - if err != nil { - return nil, nil, err - } - - return s, addrs, nil + return nil, fmt.Errorf("could not create stream on any available unicast protocol: %w", errs) } -// rawStreamWithProtocol creates a stream raw libp2p stream on specified protocol. -// -// Note: a raw stream must be upgraded by the given unicast protocol id. +// createStream attempts to establish a new stream with a peer using the specified protocol. It employs +// exponential backoff with a maximum number of attempts defined by dialCfg.StreamCreationRetryAttemptBudget. +// If the stream cannot be established after the maximum attempts, it returns a compiled multierror of all +// encountered errors. Errors related to in-progress dials trigger a retry until a connection is established +// or the attempt budget is exhausted. // -// It makes at most `maxAttempts` to create a stream with the peer. -// This was put in as a fix for #2416. PubSub and 1-1 communication compete with each other when trying to connect to -// remote nodes and once in a while NewStream returns an error 'both yamux endpoints are clients'. +// The function increments the Config's ConsecutiveSuccessfulStream count upon success. In the case of +// adjustment errors in Config, a fatal error is logged indicating an issue that requires attention. +// Metrics are collected to monitor the duration and number of attempts for stream creation. // -// Note that in case an existing TCP connection underneath to `peerID` exists, that connection is utilized for creating a new stream. -// The multiaddr.Multiaddr return value represents the addresses of `peerID` we dial while trying to create a stream to it, the -// multiaddr is only returned when a peer is initially dialed. -// Expected errors during normal operations: -// - ErrDialInProgress if no connection to the peer exists and there is already a dial in progress to the peer. If a dial to -// the peer is already in progress the caller needs to wait until it is completed, a peer should be dialed only once. +// Arguments: +// - ctx: Context to control the lifecycle of the stream creation. +// - peerID: The ID of the peer with which the stream is to be established. +// - protocol: The specific protocol used for the stream. +// - dialCfg: Configuration parameters for dialing and stream creation, including retry logic. // -// Unexpected errors during normal operations: -// - network.ErrIllegalConnectionState indicates bug in libpp2p when checking IsConnected status of peer. -func (m *Manager) rawStreamWithProtocol(ctx context.Context, - protocolID protocol.ID, - peerID peer.ID, - maxAttempts uint64, -) (libp2pnet.Stream, []multiaddr.Multiaddr, error) { - isConnected, err := m.connStatus.IsConnected(peerID) - if err != nil { - return nil, nil, err - } - - // check connection status and attempt to dial the peer if dialing is not in progress - if !isConnected { - // return error if we can't start dialing - if m.dialingInProgress(peerID) { - return nil, nil, NewDialInProgressErr(peerID) - } - defer m.dialingComplete(peerID) - dialAddr, err := m.dialPeer(ctx, peerID, maxAttempts) - if err != nil { - return nil, dialAddr, err - } - } +// Returns: +// - libp2pnet.Stream: The successfully created stream, or nil if the stream creation fails. +// - error: An aggregated multierror of all encountered errors during stream creation, or nil if successful; any returned error is benign and can be retried. +func (m *Manager) createStream(ctx context.Context, peerID peer.ID, protocol protocols.Protocol, dialCfg *Config) (libp2pnet.Stream, error) { + var err error + var s libp2pnet.Stream - // at this point dialing should have completed, we are already connected we can attempt to create the stream - s, err := m.rawStream(ctx, peerID, protocolID, maxAttempts) + s, err = m.createStreamWithRetry(ctx, peerID, protocol.ProtocolId(), dialCfg) if err != nil { - return nil, nil, err + return nil, fmt.Errorf("failed to create a stream to peer: %w", err) } - return s, nil, nil -} - -// dialPeer dial peer with retries. -// Expected errors during normal operations: -// - ErrMaxRetries if retry attempts are exhausted -func (m *Manager) dialPeer(ctx context.Context, peerID peer.ID, maxAttempts uint64) ([]multiaddr.Multiaddr, error) { - // aggregated retryable errors that occur during retries, errs will be returned - // if retry context times out or maxAttempts have been made before a successful retry occurs - var errs error - var dialAddr []multiaddr.Multiaddr - dialAttempts := 0 - backoff := m.retryBackoff(maxAttempts) - f := func(context.Context) error { - dialAttempts++ - select { - case <-ctx.Done(): - return fmt.Errorf("context done before stream could be created (retry attempt: %d, errors: %w)", dialAttempts, errs) - default: - } - // libp2p internally uses swarm dial - https://github.com/libp2p/go-libp2p-swarm/blob/master/swarm_dial.go - // to connect to a peer. Swarm dial adds a back off each time it fails connecting to a peer. While this is - // the desired behaviour for pub-sub (1-k style of communication) for 1-1 style we want to retry the connection - // immediately without backing off and fail-fast. - // Hence, explicitly cancel the dial back off (if any) and try connecting again - - // cancel the dial back off (if any), since we want to connect immediately - dialAddr = m.streamFactory.DialAddress(peerID) - m.streamFactory.ClearBackoff(peerID) - err := m.streamFactory.Connect(ctx, peer.AddrInfo{ID: peerID}) - if err != nil { - // if the connection was rejected due to invalid node id or - // if the connection was rejected due to connection gating skip the re-attempt - if stream.IsErrSecurityProtocolNegotiationFailed(err) || stream.IsErrGaterDisallowedConnection(err) { - return multierror.Append(errs, err) - } - m.logger.Warn(). - Err(err). - Str("peer_id", peerID.String()). - Int("attempt", dialAttempts). - Uint64("max_attempts", maxAttempts). - Msg("retrying peer dialing") - return retry.RetryableError(multierror.Append(errs, err)) - } - return nil + s, err = protocol.UpgradeRawStream(s) + if err != nil { + return nil, fmt.Errorf("failed to upgrade raw stream: %w", err) } - start := time.Now() - err := retry.Do(ctx, backoff, f) - duration := time.Since(start) + updatedConfig, err := m.dialConfigCache.AdjustWithInit(peerID, func(config Config) (Config, error) { + config.ConsecutiveSuccessfulStream++ // increase consecutive successful stream count. + return config, nil + }) if err != nil { - m.metrics.OnPeerDialFailure(duration, dialAttempts) - return dialAddr, m.retryFailedError(uint64(dialAttempts), maxAttempts, fmt.Errorf("failed to dial peer: %w", err)) + // This is not a connection retryable error, this is a fatal error. + // TODO: technically, we better to return an error here, but the error must be irrecoverable, and we cannot + // guarantee a clear distinction between recoverable and irrecoverable errors at the moment with CreateStream. + // We have to revisit this once we studied the error handling paths in the unicast manager. + m.logger.Fatal(). + Err(err). + Bool(logging.KeyNetworkingSecurity, true). + Str("peer_id", p2plogging.PeerId(peerID)). + Msg("failed to adjust dial config for peer id") } - m.metrics.OnPeerDialed(duration, dialAttempts) - return dialAddr, nil + m.logger.Debug(). + Str("peer_id", p2plogging.PeerId(peerID)). + Str("updated_dial_config", fmt.Sprintf("%+v", updatedConfig)). + Msg("stream created successfully") + return s, nil } -// rawStream creates a stream to peer with retries. -// Expected errors during normal operations: -// - ErrMaxRetries if retry attempts are exhausted -func (m *Manager) rawStream(ctx context.Context, peerID peer.ID, protocolID protocol.ID, maxAttempts uint64) (libp2pnet.Stream, error) { +// createStreamWithRetry attempts to create a new stream to the specified peer using the given protocolID. +// This function is streamlined for use-cases where retries are managed externally or +// not required at all. +// +// Expected errors: +// - If the context expires before stream creation, it returns a context-related error with the number of attempts. +// - If the protocol ID is not supported, no retries are attempted and the error is returned immediately. +// +// Metrics are collected to monitor the duration and attempts of the stream creation process. +// +// Arguments: +// - ctx: Context to control the lifecycle of the stream creation. +// - peerID: The ID of the peer with which the stream is to be established. +// - protocolID: The identifier for the protocol used for the stream. +// - dialCfg: Configuration parameters for dialing, including the retry attempt budget. +// +// Returns: +// - libp2pnet.Stream: The successfully created stream, or nil if an error occurs. +// - error: An error encountered during the stream creation, or nil if the stream is successfully established. +func (m *Manager) createStreamWithRetry(ctx context.Context, peerID peer.ID, protocolID protocol.ID, dialCfg *Config) (libp2pnet.Stream, error) { // aggregated retryable errors that occur during retries, errs will be returned // if retry context times out or maxAttempts have been made before a successful retry occurs var errs error @@ -308,14 +290,14 @@ func (m *Manager) rawStream(ctx context.Context, peerID peer.ID, protocolID prot } var err error - // add libp2p context value NoDial to prevent the underlying host from dialingComplete the peer while creating the stream - // we've already ensured that a connection already exists. - ctx = libp2pnet.WithNoDial(ctx, "application ensured connection to peer exists") // creates stream using stream factory s, err = m.streamFactory.NewStream(ctx, peerID, protocolID) if err != nil { - // if the stream creation failed due to invalid protocol id, skip the re-attempt - if stream.IsErrProtocolNotSupported(err) { + // if the stream creation failed due to invalid protocol id or no address, skip the re-attempt + if stream.IsErrProtocolNotSupported(err) || + errors.Is(err, swarm.ErrNoAddresses) || + stream.IsErrSecurityProtocolNegotiationFailed(err) || + stream.IsErrGaterDisallowedConnection(err) { return err } return retry.RetryableError(multierror.Append(errs, err)) @@ -324,44 +306,102 @@ func (m *Manager) rawStream(ctx context.Context, peerID peer.ID, protocolID prot } start := time.Now() - err := retry.Do(ctx, m.retryBackoff(maxAttempts), f) + err := retry.Do(ctx, retryBackoff(dialCfg.StreamCreationRetryAttemptBudget, m.createStreamBackoffDelay), f) duration := time.Since(start) if err != nil { m.metrics.OnEstablishStreamFailure(duration, attempts) - return nil, m.retryFailedError(uint64(attempts), maxAttempts, fmt.Errorf("failed to create a stream to peer: %w", err)) + return nil, retryFailedError(uint64(attempts), dialCfg.StreamCreationRetryAttemptBudget, fmt.Errorf("failed to create a stream to peer: %w", err)) } m.metrics.OnStreamEstablished(duration, attempts) return s, nil } -// retryBackoff returns an exponential retry with jitter and max attempts. -func (m *Manager) retryBackoff(maxAttempts uint64) retry.Backoff { +// retryBackoff creates and returns a retry exponential backoff with the given maximum number of retries. +// Note that the retryBackoff by default makes one attempt. Hence, that total number of attempts are 1 + maxRetries. +// Args: +// - maxRetries: maximum number of retries (in addition to the first backoff). +// - retryInterval: initial retry interval for exponential backoff. +// Returns: +// - a retry backoff object that makes maximum of maxRetries + 1 attempts. +func retryBackoff(maxRetries uint64, retryInterval time.Duration) retry.Backoff { // create backoff - backoff := retry.NewConstant(time.Second) + backoff := retry.NewConstant(retryInterval) // add a MaxRetryJitter*time.Millisecond jitter to our backoff to ensure that this node and the target node don't attempt to reconnect at the same time backoff = retry.WithJitter(MaxRetryJitter*time.Millisecond, backoff) + // https://github.com/sethvargo/go-retry#maxretries retries counter starts at zero and library will make last attempt - // when retries == maxAttempts causing 1 more func invocation than expected. - maxRetries := maxAttempts - 1 + // when retries == maxRetries. Hence, the total number of invocations is maxRetires + 1 backoff = retry.WithMaxRetries(maxRetries, backoff) return backoff } // retryFailedError wraps the given error in a ErrMaxRetries if maxAttempts were made. -func (m *Manager) retryFailedError(dialAttempts, maxAttempts uint64, err error) error { +func retryFailedError(dialAttempts, maxAttempts uint64, err error) error { if dialAttempts == maxAttempts { return NewMaxRetriesErr(dialAttempts, err) } return err } -// dialingInProgress sets the value for peerID key in our map if it does not already exist. -func (m *Manager) dialingInProgress(peerID peer.ID) bool { - _, loaded := m.peerDialing.LoadOrStore(peerID, struct{}{}) - return loaded +// getDialConfig gets the dial config for the given peer id. +// It also adjusts the dial config if necessary based on the current dial config, i.e., it resets the dial backoff budget to the default value if the last successful dial was long enough ago, +// and it resets the stream creation backoff budget to the default value if the number of consecutive successful streams reaches the threshold. +// Args: +// - peerID: peer id of the remote peer. +// +// Returns: +// - dial config for the given peer id. +// - error if the dial config cannot be retrieved or adjusted; any error is irrecoverable and indicates a fatal error. +func (m *Manager) getDialConfig(peerID peer.ID) (*Config, error) { + dialCfg, err := m.dialConfigCache.GetWithInit(peerID) + if err != nil { + return nil, fmt.Errorf("failed to get or init dial config for peer id: %w", err) + } + + if dialCfg.StreamCreationRetryAttemptBudget == uint64(0) && dialCfg.ConsecutiveSuccessfulStream >= m.streamZeroBackoffResetThreshold { + // reset the stream creation backoff budget to the default value if the number of consecutive successful streams reaches the threshold, + // as the stream creation is reliable enough to be trusted again. + dialCfg, err = m.dialConfigCache.AdjustWithInit(peerID, func(config Config) (Config, error) { + config.StreamCreationRetryAttemptBudget = m.maxStreamCreationAttemptTimes + m.metrics.OnStreamCreationRetryBudgetUpdated(config.StreamCreationRetryAttemptBudget) + m.metrics.OnStreamCreationRetryBudgetResetToDefault() + return config, nil + }) + if err != nil { + return nil, fmt.Errorf("failed to adjust dial config for peer id (resetting stream creation attempt budget): %w", err) + } + } + return dialCfg, nil } -// dialingComplete removes peerDialing value for peerID indicating dialing to peerID no longer in progress. -func (m *Manager) dialingComplete(peerID peer.ID) { - m.peerDialing.Delete(peerID) +// adjustUnsuccessfulStreamAttempt adjusts the dial config for the given peer id if the stream creation fails. +// It resets the stream creation backoff budget to the default value if the number of consecutive successful streams reaches the threshold, +// and it resets the dial backoff budget to the default value if there is no connection to the peer. +// Args: +// - peerID: peer id of the remote peer. +// +// Returns: +// - dial config for the given peer id. +// - connected indicates whether there is a connection to the peer. +// - error if the dial config cannot be adjusted; any error is irrecoverable and indicates a fatal error. +func (m *Manager) adjustUnsuccessfulStreamAttempt(peerID peer.ID) (*Config, error) { + updatedCfg, err := m.dialConfigCache.AdjustWithInit(peerID, func(config Config) (Config, error) { + // consecutive successful stream count is reset to 0 if we fail to create a stream or connection to the peer. + config.ConsecutiveSuccessfulStream = 0 + + // there is a connection to the peer it means that the stream creation failed, hence we decrease the stream backoff budget + // to try to create a stream with a more strict dial config next time. + if config.StreamCreationRetryAttemptBudget > 0 { + config.StreamCreationRetryAttemptBudget-- + m.metrics.OnStreamCreationRetryBudgetUpdated(config.StreamCreationRetryAttemptBudget) + } + + return config, nil + }) + + if err != nil { + return nil, fmt.Errorf("failed to adjust dial config for peer id: %w", err) + } + + return updatedCfg, nil } diff --git a/network/p2p/unicast/manager_config.go b/network/p2p/unicast/manager_config.go new file mode 100644 index 00000000000..ea40c4f97bb --- /dev/null +++ b/network/p2p/unicast/manager_config.go @@ -0,0 +1,22 @@ +package unicast + +import ( + "github.com/rs/zerolog" + + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module" + "github.com/onflow/flow-go/network/netconf" + "github.com/onflow/flow-go/network/p2p" +) + +type ManagerConfig struct { + Logger zerolog.Logger `validate:"required"` + StreamFactory p2p.StreamFactory `validate:"required"` + SporkId flow.Identifier `validate:"required"` + Metrics module.UnicastManagerMetrics `validate:"required"` + + Parameters *netconf.UnicastManager `validate:"required"` + + // UnicastConfigCacheFactory is a factory function to create a new dial config cache. + UnicastConfigCacheFactory DialConfigCacheFactory `validate:"required"` +} diff --git a/network/p2p/unicast/manager_test.go b/network/p2p/unicast/manager_test.go new file mode 100644 index 00000000000..1ab85e16cd8 --- /dev/null +++ b/network/p2p/unicast/manager_test.go @@ -0,0 +1,506 @@ +package unicast_test + +import ( + "context" + "fmt" + "testing" + + libp2pnet "github.com/libp2p/go-libp2p/core/network" + "github.com/libp2p/go-libp2p/core/protocol" + "github.com/libp2p/go-libp2p/p2p/net/swarm" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/config" + "github.com/onflow/flow-go/module/metrics" + mockp2p "github.com/onflow/flow-go/network/p2p/mock" + p2ptest "github.com/onflow/flow-go/network/p2p/test" + "github.com/onflow/flow-go/network/p2p/unicast" + unicastcache "github.com/onflow/flow-go/network/p2p/unicast/cache" + "github.com/onflow/flow-go/network/p2p/unicast/stream" + "github.com/onflow/flow-go/utils/unittest" +) + +func unicastManagerFixture(t *testing.T) (*unicast.Manager, *mockp2p.StreamFactory, unicast.ConfigCache) { + streamFactory := mockp2p.NewStreamFactory(t) + streamFactory.On("SetStreamHandler", mock.AnythingOfType("protocol.ID"), mock.AnythingOfType("network.StreamHandler")).Return().Once() + + cfg, err := config.DefaultConfig() + require.NoError(t, err) + + unicastConfigCache := unicastcache.NewUnicastConfigCache(cfg.NetworkConfig.Unicast.UnicastManager.ConfigCacheSize, + unittest.Logger(), + metrics.NewNoopCollector(), + func() unicast.Config { + return unicast.Config{ + StreamCreationRetryAttemptBudget: cfg.NetworkConfig.Unicast.UnicastManager.MaxStreamCreationRetryAttemptTimes, + } + }) + + mgr, err := unicast.NewUnicastManager(&unicast.ManagerConfig{ + Logger: unittest.Logger(), + StreamFactory: streamFactory, + SporkId: unittest.IdentifierFixture(), + Metrics: metrics.NewNoopCollector(), + Parameters: &cfg.NetworkConfig.Unicast.UnicastManager, + UnicastConfigCacheFactory: func(func() unicast.Config) unicast.ConfigCache { + return unicastConfigCache + }, + }) + require.NoError(t, err) + mgr.SetDefaultHandler(func(libp2pnet.Stream) {}) // no-op handler, we don't care about the handler for this test + + return mgr, streamFactory, unicastConfigCache +} + +// TestManagerConfigValidation tests the validation of the unicast manager config. +// It tests that the config is valid when all the required fields are provided. +func TestManagerConfigValidation(t *testing.T) { + cfg, err := config.DefaultConfig() + require.NoError(t, err) + + validConfig := unicast.ManagerConfig{ + Logger: unittest.Logger(), + StreamFactory: mockp2p.NewStreamFactory(t), + SporkId: unittest.IdentifierFixture(), + Parameters: &cfg.NetworkConfig.Unicast.UnicastManager, + Metrics: metrics.NewNoopCollector(), + UnicastConfigCacheFactory: func(func() unicast.Config) unicast.ConfigCache { + return unicastcache.NewUnicastConfigCache(cfg.NetworkConfig.Unicast.UnicastManager.ConfigCacheSize, + unittest.Logger(), + metrics.NewNoopCollector(), + func() unicast.Config { + return unicast.Config{ + StreamCreationRetryAttemptBudget: cfg.NetworkConfig.Unicast.UnicastManager.MaxStreamCreationRetryAttemptTimes, + } + }) + }, + } + + t.Run("Valid Config", func(t *testing.T) { + mgr, err := unicast.NewUnicastManager(&validConfig) + require.NoError(t, err) + require.NotNil(t, mgr) + }) + + t.Run("Missing Fields", func(t *testing.T) { + cfg := &unicast.ManagerConfig{} + mgr, err := unicast.NewUnicastManager(cfg) + require.Error(t, err) + require.Nil(t, mgr) + }) + + t.Run("Nil Parameters", func(t *testing.T) { + cfg := validConfig + cfg.Parameters = nil + mgr, err := unicast.NewUnicastManager(&cfg) + require.Error(t, err) + require.Nil(t, mgr) + }) + + t.Run("Invalid UnicastConfigCacheFactory", func(t *testing.T) { + cfg := validConfig + cfg.UnicastConfigCacheFactory = nil + mgr, err := unicast.NewUnicastManager(&cfg) + require.Error(t, err) + require.Nil(t, mgr) + }) + + t.Run("Missing StreamFactory", func(t *testing.T) { + cfg := validConfig + cfg.StreamFactory = nil + mgr, err := unicast.NewUnicastManager(&cfg) + require.Error(t, err) + require.Nil(t, mgr) + }) + + t.Run("Missing Metrics", func(t *testing.T) { + cfg := validConfig + cfg.Metrics = nil + mgr, err := unicast.NewUnicastManager(&cfg) + require.Error(t, err) + require.Nil(t, mgr) + }) +} + +// TestUnicastManager_SuccessfulStream tests that when CreateStream is successful on the first attempt for stream creation, +// it updates the consecutive successful stream counter. +func TestUnicastManager_SuccessfulStream(t *testing.T) { + peerID := unittest.PeerIdFixture(t) + mgr, streamFactory, configCache := unicastManagerFixture(t) + + cfg, err := config.DefaultConfig() + require.NoError(t, err) + + streamFactory.On("NewStream", mock.Anything, peerID, mock.Anything).Return(&p2ptest.MockStream{}, nil).Once() + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + s, err := mgr.CreateStream(ctx, peerID) + require.NoError(t, err) + require.NotNil(t, s) + + // The unicast config must be updated with the backoff budget decremented. + unicastCfg, err := configCache.GetWithInit(peerID) + require.NoError(t, err) + require.Equal(t, cfg.NetworkConfig.Unicast.UnicastManager.MaxStreamCreationRetryAttemptTimes, unicastCfg.StreamCreationRetryAttemptBudget) // stream backoff budget must remain intact. + require.Equal(t, uint64(1), unicastCfg.ConsecutiveSuccessfulStream) // consecutive successful stream must incremented. +} + +// TestUnicastManager_StreamBackoff tests the backoff mechanism of the unicast manager for stream creation. +// It tests the situation that CreateStream is called but the stream creation fails. +// It tests that it tries to create a stream some number of times (unicastmodel.MaxStreamCreationAttemptTimes), before giving up. +// It also checks the consecutive successful stream counter is reset when the stream creation fails. +func TestUnicastManager_StreamBackoff(t *testing.T) { + peerID := unittest.PeerIdFixture(t) + mgr, streamFactory, configCache := unicastManagerFixture(t) + + cfg, err := config.DefaultConfig() + require.NoError(t, err) + + // mocks that it attempts to create a stream some number of times, before giving up. + streamFactory.On("NewStream", mock.Anything, peerID, mock.Anything). + Return(nil, fmt.Errorf("some error")). + Times(int(cfg.NetworkConfig.Unicast.UnicastManager.MaxStreamCreationRetryAttemptTimes + 1)) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + s, err := mgr.CreateStream(ctx, peerID) + require.Error(t, err) + require.Nil(t, s) + + // The unicast config must be updated with the backoff budget decremented. + unicastCfg, err := configCache.GetWithInit(peerID) + require.NoError(t, err) + // stream backoff budget must be decremented by 1 since all budget is used up. + require.Equal(t, cfg.NetworkConfig.Unicast.UnicastManager.MaxStreamCreationRetryAttemptTimes-1, unicastCfg.StreamCreationRetryAttemptBudget) + // consecutive successful stream must be reset to zero, since the stream creation failed. + require.Equal(t, uint64(0), unicastCfg.ConsecutiveSuccessfulStream) +} + +// TestUnicastManager_StreamFactory_StreamBackoff tests the backoff mechanism of the unicast manager for stream creation. +// It tests when there is a connection, but no stream, it tries to create a stream some number of times (unicastmodel.MaxStreamCreationAttemptTimes), before +// giving up. +func TestUnicastManager_StreamFactory_StreamBackoff(t *testing.T) { + mgr, streamFactory, unicastConfigCache := unicastManagerFixture(t) + peerID := unittest.PeerIdFixture(t) + + cfg, err := config.DefaultConfig() + require.NoError(t, err) + + // mocks that it attempts to create a stream some number of times, before giving up. + streamFactory.On("NewStream", mock.Anything, peerID, mock.Anything). + Return(nil, fmt.Errorf("some error")). + Times(int(cfg.NetworkConfig.Unicast.UnicastManager.MaxStreamCreationRetryAttemptTimes + 1)) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + s, err := mgr.CreateStream(ctx, peerID) + require.Error(t, err) + require.Nil(t, s) + + // The unicast config must be updated with the stream backoff budget decremented. + unicastCfg, err := unicastConfigCache.GetWithInit(peerID) + require.NoError(t, err) + // stream backoff budget must be decremented by 1. + require.Equal(t, cfg.NetworkConfig.Unicast.UnicastManager.MaxStreamCreationRetryAttemptTimes-1, unicastCfg.StreamCreationRetryAttemptBudget) + // consecutive successful stream must be zero as we have not created a successful stream yet. + require.Equal(t, uint64(0), unicastCfg.ConsecutiveSuccessfulStream) +} + +// TestUnicastManager_Stream_ConsecutiveStreamCreation_Increment tests that when stream creation is successful, +// it increments the consecutive successful stream counter in the unicast config. +func TestUnicastManager_Stream_ConsecutiveStreamCreation_Increment(t *testing.T) { + mgr, streamFactory, unicastConfigCache := unicastManagerFixture(t) + peerID := unittest.PeerIdFixture(t) + + cfg, err := config.DefaultConfig() + require.NoError(t, err) + + // total times we successfully create a stream to the peer. + totalSuccessAttempts := 10 + + // mocks that it attempts to create a stream 10 times, and each time it succeeds. + streamFactory.On("NewStream", mock.Anything, peerID, mock.Anything).Return(&p2ptest.MockStream{}, nil).Times(totalSuccessAttempts) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + for i := 0; i < totalSuccessAttempts; i++ { + s, err := mgr.CreateStream(ctx, peerID) + require.NoError(t, err) + require.NotNil(t, s) + + // The unicast config must be updated with the stream backoff budget decremented. + unicastCfg, err := unicastConfigCache.GetWithInit(peerID) + require.NoError(t, err) + // stream backoff budget must be intact (all stream creation attempts are successful). + require.Equal(t, cfg.NetworkConfig.Unicast.UnicastManager.MaxStreamCreationRetryAttemptTimes, unicastCfg.StreamCreationRetryAttemptBudget) + // consecutive successful stream must be incremented. + require.Equal(t, uint64(i+1), unicastCfg.ConsecutiveSuccessfulStream) + } +} + +// TestUnicastManager_Stream_ConsecutiveStreamCreation_Reset tests that when the stream creation fails, it resets +// the consecutive successful stream counter in the unicast config. +func TestUnicastManager_Stream_ConsecutiveStreamCreation_Reset(t *testing.T) { + mgr, streamFactory, unicastConfigCache := unicastManagerFixture(t) + peerID := unittest.PeerIdFixture(t) + + // mocks that it attempts to create a stream once and fails. + streamFactory.On("NewStream", mock.Anything, peerID, mock.Anything). + Return(nil, fmt.Errorf("some error")). + Once() + + adjustedUnicastConfig, err := unicastConfigCache.AdjustWithInit(peerID, func(unicastConfig unicast.Config) (unicast.Config, error) { + // sets the consecutive successful stream to 5 meaning that the last 5 stream creation attempts were successful. + unicastConfig.ConsecutiveSuccessfulStream = 5 + // sets the stream back budget to 0 meaning that the stream backoff budget is exhausted. + unicastConfig.StreamCreationRetryAttemptBudget = 0 + + return unicastConfig, nil + }) + require.NoError(t, err) + require.Equal(t, uint64(5), adjustedUnicastConfig.ConsecutiveSuccessfulStream) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + s, err := mgr.CreateStream(ctx, peerID) + require.Error(t, err) + require.Nil(t, s) + + // The unicast config must be updated with the stream backoff budget decremented. + unicastCfg, err := unicastConfigCache.GetWithInit(peerID) + require.NoError(t, err) + + // stream backoff budget must be intact (we can't decrement it below 0). + require.Equal(t, uint64(0), unicastCfg.StreamCreationRetryAttemptBudget) + // consecutive successful stream must be reset to 0. + require.Equal(t, uint64(0), unicastCfg.ConsecutiveSuccessfulStream) +} + +// TestUnicastManager_StreamFactory_ErrProtocolNotSupported tests that when there is a protocol not supported error, it does not retry creating a stream. +func TestUnicastManager_StreamFactory_ErrProtocolNotSupported(t *testing.T) { + mgr, streamFactory, _ := unicastManagerFixture(t) + peerID := unittest.PeerIdFixture(t) + + // mocks that upon creating a stream, it returns a protocol not supported error, the mock is set to once, meaning that it won't retry stream creation again. + streamFactory.On("NewStream", mock.Anything, peerID, mock.Anything). + Return(nil, stream.NewProtocolNotSupportedErr(peerID, protocol.ID("protocol-1"), fmt.Errorf("some error"))). + Once() + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + s, err := mgr.CreateStream(ctx, peerID) + require.Error(t, err) + require.Nil(t, s) +} + +// TestUnicastManager_StreamFactory_ErrNoAddresses tests that when stream creation returns a no addresses error, +// it does not retry stream creation again and returns an error immediately. +func TestUnicastManager_StreamFactory_ErrNoAddresses(t *testing.T) { + mgr, streamFactory, unicastConfigCache := unicastManagerFixture(t) + + cfg, err := config.DefaultConfig() + require.NoError(t, err) + + peerID := unittest.PeerIdFixture(t) + + // mocks that stream creation returns a no addresses error, and the mock is set to once, meaning that it won't retry stream creation again. + streamFactory.On("NewStream", mock.Anything, peerID, mock.Anything). + Return(nil, fmt.Errorf("some error to ensure wrapping works fine: %w", swarm.ErrNoAddresses)). + Once() + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + s, err := mgr.CreateStream(ctx, peerID) + require.Error(t, err) + require.Nil(t, s) + + unicastCfg, err := unicastConfigCache.GetWithInit(peerID) + require.NoError(t, err) + + // stream backoff budget must be reduced by 1 due to failed stream creation. + require.Equal(t, cfg.NetworkConfig.Unicast.UnicastManager.MaxStreamCreationRetryAttemptTimes-1, unicastCfg.StreamCreationRetryAttemptBudget) + // consecutive successful stream must be set to zero. + require.Equal(t, uint64(0), unicastCfg.ConsecutiveSuccessfulStream) +} + +// TestUnicastManager_Stream_ErrSecurityProtocolNegotiationFailed tests that when there is a security protocol negotiation error, it does not retry stream creation. +func TestUnicastManager_Stream_ErrSecurityProtocolNegotiationFailed(t *testing.T) { + mgr, streamFactory, unicastConfigCache := unicastManagerFixture(t) + + cfg, err := config.DefaultConfig() + require.NoError(t, err) + + peerID := unittest.PeerIdFixture(t) + + // mocks that stream creation returns a security protocol negotiation error, and the mock is set to once, meaning that it won't retry stream creation. + streamFactory.On("NewStream", mock.Anything, peerID, mock.Anything). + Return(nil, stream.NewSecurityProtocolNegotiationErr(peerID, fmt.Errorf("some error"))). + Once() + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + s, err := mgr.CreateStream(ctx, peerID) + require.Error(t, err) + require.Nil(t, s) + + unicastCfg, err := unicastConfigCache.GetWithInit(peerID) + require.NoError(t, err) + // stream retry budget must be decremented by 1 (since we didn't have a successful stream creation, the budget is decremented). + require.Equal(t, cfg.NetworkConfig.Unicast.UnicastManager.MaxStreamCreationRetryAttemptTimes-1, unicastCfg.StreamCreationRetryAttemptBudget) + // consecutive successful stream must be set to zero. + require.Equal(t, uint64(0), unicastCfg.ConsecutiveSuccessfulStream) +} + +// TestUnicastManager_StreamFactory_ErrGaterDisallowedConnection tests that when there is a connection-gater disallow listing error, it does not retry stream creation. +func TestUnicastManager_StreamFactory_ErrGaterDisallowedConnection(t *testing.T) { + mgr, streamFactory, unicastConfigCache := unicastManagerFixture(t) + peerID := unittest.PeerIdFixture(t) + + cfg, err := config.DefaultConfig() + require.NoError(t, err) + + // mocks that stream creation to the peer returns a connection gater disallow-listing, and the mock is set to once, meaning that it won't retry stream creation. + streamFactory.On("NewStream", mock.Anything, peerID, mock.Anything). + Return(nil, stream.NewGaterDisallowedConnectionErr(fmt.Errorf("some error"))). + Once() + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + s, err := mgr.CreateStream(ctx, peerID) + require.Error(t, err) + require.Nil(t, s) + + unicastCfg, err := unicastConfigCache.GetWithInit(peerID) + require.NoError(t, err) + // stream backoff budget must be reduced by 1 due to failed stream creation. + require.Equal(t, cfg.NetworkConfig.Unicast.UnicastManager.MaxStreamCreationRetryAttemptTimes-1, unicastCfg.StreamCreationRetryAttemptBudget) + // consecutive successful stream must be set to zero. + require.Equal(t, uint64(0), unicastCfg.ConsecutiveSuccessfulStream) +} + +// TestUnicastManager_Connection_BackoffBudgetDecremented tests that everytime the unicast manger gives up on creating a stream (after retrials), +// it decrements the backoff budget for the remote peer. +func TestUnicastManager_Stream_BackoffBudgetDecremented(t *testing.T) { + mgr, streamFactory, unicastConfigCache := unicastManagerFixture(t) + peerID := unittest.PeerIdFixture(t) + + cfg, err := config.DefaultConfig() + require.NoError(t, err) + + // totalAttempts is the total number of times that unicast manager calls NewStream on the stream factory to create stream to the peer. + // Note that it already assumes that the connection is established, so it does not try to connect to the peer. + // Let's consider x = unicastmodel.MaxStreamCreationRetryAttemptTimes + 1. Then the test tries x times CreateStream. With dynamic backoffs, + // the first CreateStream call will try to NewStream x times, the second CreateStream call will try to NewStream x-1 times, + // and so on. So the total number of Connect calls is x + (x-1) + (x-2) + ... + 1 = x(x+1)/2. + maxStreamRetryBudget := cfg.NetworkConfig.Unicast.UnicastManager.MaxStreamCreationRetryAttemptTimes + maxStreamAttempt := maxStreamRetryBudget + 1 // 1 attempt + retry times + totalAttempts := maxStreamAttempt * (maxStreamAttempt + 1) / 2 + + streamFactory.On("NewStream", mock.Anything, peerID, mock.Anything). + Return(nil, fmt.Errorf("some error")). + Times(int(totalAttempts)) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + for i := 0; i < int(maxStreamRetryBudget); i++ { + s, err := mgr.CreateStream(ctx, peerID) + require.Error(t, err) + require.Nil(t, s) + + unicastCfg, err := unicastConfigCache.GetWithInit(peerID) + require.NoError(t, err) + + if i == int(maxStreamRetryBudget)-1 { + require.Equal(t, uint64(0), unicastCfg.StreamCreationRetryAttemptBudget) + } else { + require.Equal(t, maxStreamRetryBudget-uint64(i)-1, unicastCfg.StreamCreationRetryAttemptBudget) + } + } + // At this time the backoff budget for connection must be 0. + unicastCfg, err := unicastConfigCache.GetWithInit(peerID) + require.NoError(t, err) + require.Equal(t, uint64(0), unicastCfg.StreamCreationRetryAttemptBudget) + + // After all the backoff budget is used up, it should stay at 0. + s, err := mgr.CreateStream(ctx, peerID) + require.Error(t, err) + require.Nil(t, s) + + unicastCfg, err = unicastConfigCache.GetWithInit(peerID) + require.NoError(t, err) + require.Equal(t, uint64(0), unicastCfg.StreamCreationRetryAttemptBudget) +} + +// TestUnicastManager_Stream_BackoffBudgetResetToDefault tests that when the stream retry attempt budget is zero, and the consecutive successful stream counter is above the reset threshold, +// it resets the stream retry attempt budget to the default value and increments the consecutive successful stream counter. +func TestUnicastManager_Stream_BackoffBudgetResetToDefault(t *testing.T) { + mgr, streamFactory, unicastConfigCache := unicastManagerFixture(t) + peerID := unittest.PeerIdFixture(t) + + cfg, err := config.DefaultConfig() + require.NoError(t, err) + + // mocks that it attempts to create a stream once and succeeds. + streamFactory.On("NewStream", mock.Anything, peerID, mock.Anything).Return(&p2ptest.MockStream{}, nil).Once() + + // update the unicast config of the peer to have a zero stream backoff budget but a consecutive successful stream counter above the reset threshold. + adjustedCfg, err := unicastConfigCache.AdjustWithInit(peerID, func(unicastConfig unicast.Config) (unicast.Config, error) { + unicastConfig.StreamCreationRetryAttemptBudget = 0 + unicastConfig.ConsecutiveSuccessfulStream = cfg.NetworkConfig.Unicast.UnicastManager.StreamZeroRetryResetThreshold + 1 + return unicastConfig, nil + }) + require.NoError(t, err) + require.Equal(t, uint64(0), adjustedCfg.StreamCreationRetryAttemptBudget) + require.Equal(t, cfg.NetworkConfig.Unicast.UnicastManager.StreamZeroRetryResetThreshold+1, adjustedCfg.ConsecutiveSuccessfulStream) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + s, err := mgr.CreateStream(ctx, peerID) + require.NoError(t, err) + require.NotNil(t, s) + + unicastCfg, err := unicastConfigCache.GetWithInit(peerID) + require.NoError(t, err) + // stream backoff budget must reset to default. + require.Equal(t, cfg.NetworkConfig.Unicast.UnicastManager.MaxStreamCreationRetryAttemptTimes, unicastCfg.StreamCreationRetryAttemptBudget) + // consecutive successful stream must increment by 1 (it was threshold + 1 before). + require.Equal(t, cfg.NetworkConfig.Unicast.UnicastManager.StreamZeroRetryResetThreshold+1+1, unicastCfg.ConsecutiveSuccessfulStream) +} + +// TestUnicastManager_Stream_NoBackoff_When_Budget_Is_Zero tests that when the stream backoff budget is zero and the consecutive successful stream counter is not above the +// zero rest threshold, the unicast manager does not backoff if the stream creation attempt fails. +func TestUnicastManager_Stream_NoBackoff_When_Budget_Is_Zero(t *testing.T) { + mgr, streamFactory, unicastConfigCache := unicastManagerFixture(t) + peerID := unittest.PeerIdFixture(t) + + // mocks that it attempts to create a stream once and fails, and does not retry. + streamFactory.On("NewStream", mock.Anything, peerID, mock.Anything).Return(nil, fmt.Errorf("some error")).Once() + + adjustedCfg, err := unicastConfigCache.AdjustWithInit(peerID, func(unicastConfig unicast.Config) (unicast.Config, error) { + unicastConfig.ConsecutiveSuccessfulStream = 2 // set the consecutive successful stream to 2, which is below the reset threshold. + unicastConfig.StreamCreationRetryAttemptBudget = 0 // set the stream backoff budget to 0, meaning that the stream backoff budget is exhausted. + return unicastConfig, nil + }) + require.NoError(t, err) + require.Equal(t, uint64(0), adjustedCfg.StreamCreationRetryAttemptBudget) + require.Equal(t, uint64(2), adjustedCfg.ConsecutiveSuccessfulStream) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + s, err := mgr.CreateStream(ctx, peerID) + require.Error(t, err) + require.Nil(t, s) + + unicastCfg, err := unicastConfigCache.GetWithInit(peerID) + require.NoError(t, err) + require.Equal(t, uint64(0), unicastCfg.StreamCreationRetryAttemptBudget) // stream backoff budget must remain zero. + require.Equal(t, uint64(0), unicastCfg.ConsecutiveSuccessfulStream) // consecutive successful stream must be set to zero. +} diff --git a/network/p2p/unicast/plain.go b/network/p2p/unicast/plain.go deleted file mode 100644 index b1c4c827586..00000000000 --- a/network/p2p/unicast/plain.go +++ /dev/null @@ -1,25 +0,0 @@ -package unicast - -import ( - libp2pnet "github.com/libp2p/go-libp2p/core/network" - "github.com/libp2p/go-libp2p/core/protocol" -) - -// PlainStream is a stream factory that reflects the same input stream without any modification. -type PlainStream struct { - handler libp2pnet.StreamHandler - protocolId protocol.ID -} - -// UpgradeRawStream implements protocol interface and returns the input stream without any modification. -func (p PlainStream) UpgradeRawStream(s libp2pnet.Stream) (libp2pnet.Stream, error) { - return s, nil -} - -func (p PlainStream) Handler(s libp2pnet.Stream) { - p.handler(s) -} - -func (p PlainStream) ProtocolId() protocol.ID { - return p.protocolId -} diff --git a/network/p2p/unicast/protocols/gzip.go b/network/p2p/unicast/protocols/gzip.go index 3aca00d5e0e..d2b28823f3e 100644 --- a/network/p2p/unicast/protocols/gzip.go +++ b/network/p2p/unicast/protocols/gzip.go @@ -7,7 +7,7 @@ import ( "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/network/compressor" - "github.com/onflow/flow-go/network/p2p/compressed" + "github.com/onflow/flow-go/network/p2p/unicast/protocols/internal" ) const GzipCompressionUnicast = ProtocolName("gzip-compression") @@ -33,7 +33,7 @@ func NewGzipCompressedUnicast(logger zerolog.Logger, sporkId flow.Identifier, de // UpgradeRawStream wraps gzip compression and decompression around the plain libp2p stream. func (g GzipStream) UpgradeRawStream(s libp2pnet.Stream) (libp2pnet.Stream, error) { - return compressed.NewCompressedStream(s, compressor.GzipStreamCompressor{}) + return internal.NewCompressedStream(s, compressor.GzipStreamCompressor{}) } func (g GzipStream) Handler(s libp2pnet.Stream) { diff --git a/network/p2p/compressed/compressedStream.go b/network/p2p/unicast/protocols/internal/compressedStream.go similarity index 78% rename from network/p2p/compressed/compressedStream.go rename to network/p2p/unicast/protocols/internal/compressedStream.go index d0c59cab4c7..9338908da80 100644 --- a/network/p2p/compressed/compressedStream.go +++ b/network/p2p/unicast/protocols/internal/compressedStream.go @@ -1,4 +1,4 @@ -package compressed +package internal import ( "fmt" @@ -11,10 +11,10 @@ import ( flownet "github.com/onflow/flow-go/network" ) -// compressedStream is an internal networking layer data structure, +// CompressedStream is an internal networking layer data structure, // which implements a compression mechanism as a wrapper around a native // libp2p stream. -type compressedStream struct { +type CompressedStream struct { network.Stream writeLock sync.Mutex @@ -26,8 +26,8 @@ type compressedStream struct { } // NewCompressedStream creates a compressed stream with gzip as default compressor. -func NewCompressedStream(s network.Stream, compressor flownet.Compressor) (*compressedStream, error) { - c := &compressedStream{ +func NewCompressedStream(s network.Stream, compressor flownet.Compressor) (*CompressedStream, error) { + c := &CompressedStream{ Stream: s, compressor: compressor, } @@ -42,7 +42,7 @@ func NewCompressedStream(s network.Stream, compressor flownet.Compressor) (*comp return c, nil } -func (c *compressedStream) Write(b []byte) (int, error) { +func (c *CompressedStream) Write(b []byte) (int, error) { c.writeLock.Lock() defer c.writeLock.Unlock() @@ -51,7 +51,7 @@ func (c *compressedStream) Write(b []byte) (int, error) { return n, multierr.Combine(err, c.w.Flush()) } -func (c *compressedStream) Read(b []byte) (int, error) { +func (c *CompressedStream) Read(b []byte) (int, error) { c.readLock.Lock() defer c.readLock.Unlock() @@ -71,7 +71,7 @@ func (c *compressedStream) Read(b []byte) (int, error) { return n, err } -func (c *compressedStream) Close() error { +func (c *CompressedStream) Close() error { c.writeLock.Lock() defer c.writeLock.Unlock() diff --git a/network/p2p/compressed/compressedStream_test.go b/network/p2p/unicast/protocols/internal/compressedStream_test.go similarity index 83% rename from network/p2p/compressed/compressedStream_test.go rename to network/p2p/unicast/protocols/internal/compressedStream_test.go index 615ac937701..e4ebee9b547 100644 --- a/network/p2p/compressed/compressedStream_test.go +++ b/network/p2p/unicast/protocols/internal/compressedStream_test.go @@ -1,4 +1,4 @@ -package compressed +package internal_test import ( "io" @@ -9,6 +9,8 @@ import ( "github.com/stretchr/testify/require" "github.com/onflow/flow-go/network/compressor" + p2ptest "github.com/onflow/flow-go/network/p2p/test" + "github.com/onflow/flow-go/network/p2p/unicast/protocols/internal" "github.com/onflow/flow-go/utils/unittest" ) @@ -98,25 +100,25 @@ func TestUnhappyPath(t *testing.T) { // newStreamPair is a test helper that creates a pair of compressed streams a and b such that // a reads what b writes and b reads what a writes. -func newStreamPair() (*mockStream, *mockStream) { +func newStreamPair() (*p2ptest.MockStream, *p2ptest.MockStream) { ra, wb := io.Pipe() rb, wa := io.Pipe() - sa := newMockStream(wa, ra) - sb := newMockStream(wb, rb) + sa := p2ptest.NewMockStream(wa, ra) + sb := p2ptest.NewMockStream(wb, rb) return sa, sb } // newCompressedStreamPair is a test helper that creates a pair of compressed streams a and b such that // a reads what b writes and b reads what a writes. -func newCompressedStreamPair(t *testing.T) (*compressedStream, *mockStream, *compressedStream, *mockStream) { +func newCompressedStreamPair(t *testing.T) (*internal.CompressedStream, *p2ptest.MockStream, *internal.CompressedStream, *p2ptest.MockStream) { sa, sb := newStreamPair() - mca, err := NewCompressedStream(sa, compressor.GzipStreamCompressor{}) + mca, err := internal.NewCompressedStream(sa, compressor.GzipStreamCompressor{}) require.NoError(t, err) - mcb, err := NewCompressedStream(sb, compressor.GzipStreamCompressor{}) + mcb, err := internal.NewCompressedStream(sb, compressor.GzipStreamCompressor{}) require.NoError(t, err) return mca, sa, mcb, sb diff --git a/network/p2p/unicast/ratelimit/bandwidth_rate_limiter_test.go b/network/p2p/unicast/ratelimit/bandwidth_rate_limiter_test.go index 16df3b62f78..1bcb28a46cc 100644 --- a/network/p2p/unicast/ratelimit/bandwidth_rate_limiter_test.go +++ b/network/p2p/unicast/ratelimit/bandwidth_rate_limiter_test.go @@ -6,7 +6,6 @@ import ( "github.com/onflow/flow-go/model/flow" libp2pmessage "github.com/onflow/flow-go/model/libp2p/message" - "github.com/onflow/flow-go/network" "github.com/onflow/flow-go/network/message" "github.com/stretchr/testify/require" @@ -37,9 +36,10 @@ func TestBandWidthRateLimiter_Allow(t *testing.T) { b[i] = byte('X') } - msg, err := network.NewOutgoingScope( + sporkId := unittest.IdentifierFixture() + msg, err := message.NewOutgoingScope( flow.IdentifierList{unittest.IdentifierFixture()}, - channels.TestNetworkChannel, + channels.TopicFromChannel(channels.TestNetworkChannel, sporkId), &libp2pmessage.TestMessage{ Text: string(b), }, @@ -73,7 +73,7 @@ func TestBandWidthRateLimiter_IsRateLimited(t *testing.T) { burst := 1000 // setup bandwidth rate limiter - bandwidthRateLimiter := NewBandWidthRateLimiter(limit, burst, 1) + bandwidthRateLimiter := NewBandWidthRateLimiter(limit, burst, time.Second) // for the duration of a simulated second we will send 3 messages. Each message is about // 400 bytes, the 3rd message will put our limiter over the 1000 byte limit at 1200 bytes. Thus @@ -90,9 +90,10 @@ func TestBandWidthRateLimiter_IsRateLimited(t *testing.T) { require.False(t, bandwidthRateLimiter.IsRateLimited(peerID)) - msg, err := network.NewOutgoingScope( + sporkId := unittest.IdentifierFixture() + msg, err := message.NewOutgoingScope( flow.IdentifierList{unittest.IdentifierFixture()}, - channels.TestNetworkChannel, + channels.TopicFromChannel(channels.TestNetworkChannel, sporkId), &libp2pmessage.TestMessage{ Text: string(b), }, diff --git a/network/p2p/unicast/retry.png b/network/p2p/unicast/retry.png new file mode 100644 index 00000000000..c86edb3ce5f Binary files /dev/null and b/network/p2p/unicast/retry.png differ diff --git a/network/p2p/unicast/stream/errors.go b/network/p2p/unicast/stream/errors.go index dc3f5250edd..844617b27e3 100644 --- a/network/p2p/unicast/stream/errors.go +++ b/network/p2p/unicast/stream/errors.go @@ -6,6 +6,8 @@ import ( "github.com/libp2p/go-libp2p/core/peer" "github.com/libp2p/go-libp2p/core/protocol" + + p2plogging "github.com/onflow/flow-go/network/p2p/logging" ) // ErrSecurityProtocolNegotiationFailed indicates security protocol negotiation failed during the stream factory connect attempt. @@ -15,12 +17,7 @@ type ErrSecurityProtocolNegotiationFailed struct { } func (e ErrSecurityProtocolNegotiationFailed) Error() string { - return fmt.Errorf("failed to dial remote peer %s in stream factory invalid node ID: %w", e.pid.String(), e.err).Error() -} - -// NewSecurityProtocolNegotiationErr returns a new ErrSecurityProtocolNegotiationFailed. -func NewSecurityProtocolNegotiationErr(pid peer.ID, err error) ErrSecurityProtocolNegotiationFailed { - return ErrSecurityProtocolNegotiationFailed{pid: pid, err: err} + return fmt.Errorf("failed to dial remote peer %s in stream factory invalid node ID: %w", p2plogging.PeerId(e.pid), e.err).Error() } // IsErrSecurityProtocolNegotiationFailed returns whether an error is ErrSecurityProtocolNegotiationFailed. @@ -29,20 +26,28 @@ func IsErrSecurityProtocolNegotiationFailed(err error) bool { return errors.As(err, &e) } +// NewSecurityProtocolNegotiationErr returns a new ErrSecurityProtocolNegotiationFailed. +func NewSecurityProtocolNegotiationErr(pid peer.ID, err error) ErrSecurityProtocolNegotiationFailed { + return ErrSecurityProtocolNegotiationFailed{pid: pid, err: err} +} + // ErrProtocolNotSupported indicates node is running on a different spork. type ErrProtocolNotSupported struct { - peerID peer.ID - protocolIDS []protocol.ID - err error + peerID peer.ID + protocolID protocol.ID + err error } func (e ErrProtocolNotSupported) Error() string { - return fmt.Errorf("failed to dial remote peer %s remote node is running on a different spork: %w, protocol attempted: %s", e.peerID.String(), e.err, e.protocolIDS).Error() + return fmt.Errorf("failed to dial remote peer %s remote node is running on a different spork: %w, protocol attempted: %s", + p2plogging.PeerId(e.peerID), + e.err, + e.protocolID).Error() } // NewProtocolNotSupportedErr returns a new ErrSecurityProtocolNegotiationFailed. -func NewProtocolNotSupportedErr(peerID peer.ID, protocolIDS []protocol.ID, err error) ErrProtocolNotSupported { - return ErrProtocolNotSupported{peerID: peerID, protocolIDS: protocolIDS, err: err} +func NewProtocolNotSupportedErr(peerID peer.ID, protocolID protocol.ID, err error) ErrProtocolNotSupported { + return ErrProtocolNotSupported{peerID: peerID, protocolID: protocolID, err: err} } // IsErrProtocolNotSupported returns whether an error is ErrProtocolNotSupported. diff --git a/network/p2p/unicast/stream/factory.go b/network/p2p/unicast/stream/factory.go index 6213981e4ec..8336836d3a7 100644 --- a/network/p2p/unicast/stream/factory.go +++ b/network/p2p/unicast/stream/factory.go @@ -3,6 +3,7 @@ package stream import ( "context" "errors" + "fmt" "strings" "github.com/libp2p/go-libp2p/core/host" @@ -10,7 +11,8 @@ import ( "github.com/libp2p/go-libp2p/core/peer" "github.com/libp2p/go-libp2p/core/protocol" "github.com/libp2p/go-libp2p/p2p/net/swarm" - "github.com/multiformats/go-multiaddr" + + "github.com/onflow/flow-go/network/p2p" ) const ( @@ -18,27 +20,13 @@ const ( protocolNotSupportedStr = "protocol not supported" ) -// Factory is a wrapper around libp2p host.Host to provide abstraction and encapsulation for unicast stream manager so that -// it can create libp2p streams with finer granularity. -type Factory interface { - SetStreamHandler(protocol.ID, network.StreamHandler) - DialAddress(peer.ID) []multiaddr.Multiaddr - ClearBackoff(peer.ID) - // Connect connects host to peer with peerID. - // Expected errors during normal operations: - // - NewSecurityProtocolNegotiationErr this indicates there was an issue upgrading the connection. - Connect(context.Context, peer.AddrInfo) error - // NewStream creates a new stream on the libp2p host. - // Expected errors during normal operations: - // - ErrProtocolNotSupported this indicates remote node is running on a different spork. - NewStream(context.Context, peer.ID, ...protocol.ID) (network.Stream, error) -} - type LibP2PStreamFactory struct { host host.Host } -func NewLibP2PStreamFactory(h host.Host) Factory { +var _ p2p.StreamFactory = (*LibP2PStreamFactory)(nil) + +func NewLibP2PStreamFactory(h host.Host) p2p.StreamFactory { return &LibP2PStreamFactory{host: h} } @@ -46,43 +34,59 @@ func (l *LibP2PStreamFactory) SetStreamHandler(pid protocol.ID, handler network. l.host.SetStreamHandler(pid, handler) } -func (l *LibP2PStreamFactory) DialAddress(p peer.ID) []multiaddr.Multiaddr { - return l.host.Peerstore().Addrs(p) -} - -func (l *LibP2PStreamFactory) ClearBackoff(p peer.ID) { - if swm, ok := l.host.Network().(*swarm.Swarm); ok { - swm.Backoff().Clear(p) - } -} - -// Connect connects host to peer with peerAddrInfo. -// Expected errors during normal operations: +// NewStream establishes a new stream with the given peer using the provided protocol.ID on the libp2p host. +// This function is a critical part of the network communication, facilitating the creation of a dedicated +// bidirectional channel (stream) between two nodes in the network. +// If there exists no connection between the two nodes, the function attempts to establish one before creating the stream. +// If there are multiple connections between the two nodes, the function selects the best one (based on libp2p internal criteria) to create the stream. +// +// Usage: +// The function is intended to be used when there is a need to initiate a direct communication stream with a peer. +// It is typically invoked in scenarios where a node wants to send a message or start a series of messages to another +// node using a specific protocol. The protocol ID is used to ensure that both nodes communicate over the same +// protocol, which defines the structure and semantics of the communication. +// +// Expected errors: +// During normal operation, the function may encounter specific expected errors, which are handled as follows: +// +// - ErrProtocolNotSupported: This error occurs when the remote node does not support the specified protocol ID, +// which may indicate that the remote node is running a different version of the software or a different spork. +// The error contains details about the peer ID and the unsupported protocol, and it is generated when the +// underlying error message indicates a protocol mismatch. This is a critical error as it signifies that the +// two nodes cannot communicate using the requested protocol, and it must be handled by either retrying with +// a different protocol ID or by performing some form of negotiation or fallback. +// // - ErrSecurityProtocolNegotiationFailed this indicates there was an issue upgrading the connection. -func (l *LibP2PStreamFactory) Connect(ctx context.Context, peerAddrInfo peer.AddrInfo) error { - err := l.host.Connect(ctx, peerAddrInfo) +// +// - ErrGaterDisallowedConnection this indicates the connection was disallowed by the gater. +// +// - Any other error returned by the libp2p host: This error indicates that the stream creation failed due to +// some unexpected error, which may be caused by a variety of reasons. This is NOT a critical error, and it +// can be handled by retrying the stream creation or by performing some other action. Crashing node upon this +// error is NOT recommended. +// +// Arguments: +// - ctx: A context.Context that governs the lifetime of the stream creation. It can be used to cancel the +// operation or to set deadlines. +// - p: The peer.ID of the target node with which the stream is to be established. +// - pid: The protocol.ID that specifies the communication protocol to be used for the stream. +// +// Returns: +// - network.Stream: The successfully created stream, ready for reading and writing, or nil if an error occurs. +// - error: An error encountered during stream creation, wrapped in a contextually appropriate error type when necessary, +// or nil if the operation is successful. +func (l *LibP2PStreamFactory) NewStream(ctx context.Context, p peer.ID, pid protocol.ID) (network.Stream, error) { + s, err := l.host.NewStream(ctx, p, pid) switch { case err == nil: - return nil + return s, nil + case strings.Contains(err.Error(), protocolNotSupportedStr): + return nil, NewProtocolNotSupportedErr(p, pid, err) case strings.Contains(err.Error(), protocolNegotiationFailedStr): - return NewSecurityProtocolNegotiationErr(peerAddrInfo.ID, err) + return nil, NewSecurityProtocolNegotiationErr(p, err) case errors.Is(err, swarm.ErrGaterDisallowedConnection): - return NewGaterDisallowedConnectionErr(err) + return nil, NewGaterDisallowedConnectionErr(err) default: - return err - } -} - -// NewStream creates a new stream on the libp2p host. -// Expected errors during normal operations: -// - ErrProtocolNotSupported this indicates remote node is running on a different spork. -func (l *LibP2PStreamFactory) NewStream(ctx context.Context, p peer.ID, pids ...protocol.ID) (network.Stream, error) { - s, err := l.host.NewStream(ctx, p, pids...) - if err != nil { - if strings.Contains(err.Error(), protocolNotSupportedStr) { - return nil, NewProtocolNotSupportedErr(p, pids, err) - } - return nil, err + return nil, fmt.Errorf("failed to create stream: %w", err) } - return s, err } diff --git a/network/p2p/unicast/stream/plain.go b/network/p2p/unicast/stream/plain.go new file mode 100644 index 00000000000..edf974e15d4 --- /dev/null +++ b/network/p2p/unicast/stream/plain.go @@ -0,0 +1,38 @@ +package stream + +import ( + libp2pnet "github.com/libp2p/go-libp2p/core/network" + "github.com/libp2p/go-libp2p/core/protocol" +) + +// PlainStream is a stream factory that reflects the same input stream without any modification. +type PlainStream struct { + handler libp2pnet.StreamHandler + protocolId protocol.ID +} + +// NewPlainStream creates a new PlainStream. +// Args: +// - handler: the stream handler that handles the input stream. +// - protocolId: the protocol id of the stream. +// Returns: +// - PlainStream instance. +func NewPlainStream(handler libp2pnet.StreamHandler, protocolId protocol.ID) PlainStream { + return PlainStream{ + handler: handler, + protocolId: protocolId, + } +} + +// UpgradeRawStream implements protocol interface and returns the input stream without any modification. +func (p PlainStream) UpgradeRawStream(s libp2pnet.Stream) (libp2pnet.Stream, error) { + return s, nil +} + +func (p PlainStream) Handler(s libp2pnet.Stream) { + p.handler(s) +} + +func (p PlainStream) ProtocolId() protocol.ID { + return p.protocolId +} diff --git a/network/p2p/unicast/streams.png b/network/p2p/unicast/streams.png new file mode 100644 index 00000000000..b07934c1d08 Binary files /dev/null and b/network/p2p/unicast/streams.png differ diff --git a/network/p2p/unicast_manager.go b/network/p2p/unicast_manager.go index 0a106b538f8..fbd31dc8327 100644 --- a/network/p2p/unicast_manager.go +++ b/network/p2p/unicast_manager.go @@ -5,7 +5,6 @@ import ( libp2pnet "github.com/libp2p/go-libp2p/core/network" "github.com/libp2p/go-libp2p/core/peer" - "github.com/multiformats/go-multiaddr" "github.com/onflow/flow-go/network/p2p/unicast/protocols" ) @@ -14,7 +13,7 @@ import ( type UnicastManager interface { // WithDefaultHandler sets the default stream handler for this unicast manager. The default handler is utilized // as the core handler for other unicast protocols, e.g., compressions. - WithDefaultHandler(defaultHandler libp2pnet.StreamHandler) + SetDefaultHandler(defaultHandler libp2pnet.StreamHandler) // Register registers given protocol name as preferred unicast. Each invocation of register prioritizes the current protocol // over previously registered ones. // All errors returned from this function can be considered benign. @@ -23,5 +22,5 @@ type UnicastManager interface { // it either creates a successful stream or runs out of options. Creating stream on each protocol is tried at most `maxAttempts`, and then falls // back to the less preferred one. // All errors returned from this function can be considered benign. - CreateStream(ctx context.Context, peerID peer.ID, maxAttempts int) (libp2pnet.Stream, []multiaddr.Multiaddr, error) + CreateStream(ctx context.Context, peerID peer.ID) (libp2pnet.Stream, error) } diff --git a/network/p2p/utils/logger.go b/network/p2p/utils/logger.go new file mode 100644 index 00000000000..b535d567ccd --- /dev/null +++ b/network/p2p/utils/logger.go @@ -0,0 +1,33 @@ +package utils + +import ( + pubsub "github.com/libp2p/go-libp2p-pubsub" + "github.com/rs/zerolog" +) + +// TopicScoreParamsLogger is a helper function that returns a logger with the topic score params added as fields. +// Args: +// logger: zerolog.Logger - logger to add fields to +// topicName: string - name of the topic +// params: pubsub.TopicScoreParams - topic score params +func TopicScoreParamsLogger(logger zerolog.Logger, topicName string, topicParams *pubsub.TopicScoreParams) zerolog.Logger { + return logger.With().Str("topic", topicName). + Bool("atomic_validation", topicParams.SkipAtomicValidation). + Float64("topic_weight", topicParams.TopicWeight). + Float64("time_in_mesh_weight", topicParams.TimeInMeshWeight). + Dur("time_in_mesh_quantum", topicParams.TimeInMeshQuantum). + Float64("time_in_mesh_cap", topicParams.TimeInMeshCap). + Float64("first_message_deliveries_weight", topicParams.FirstMessageDeliveriesWeight). + Float64("first_message_deliveries_decay", topicParams.FirstMessageDeliveriesDecay). + Float64("first_message_deliveries_cap", topicParams.FirstMessageDeliveriesCap). + Float64("mesh_message_deliveries_weight", topicParams.MeshMessageDeliveriesWeight). + Float64("mesh_message_deliveries_decay", topicParams.MeshMessageDeliveriesDecay). + Float64("mesh_message_deliveries_cap", topicParams.MeshMessageDeliveriesCap). + Float64("mesh_message_deliveries_threshold", topicParams.MeshMessageDeliveriesThreshold). + Dur("mesh_message_deliveries_window", topicParams.MeshMessageDeliveriesWindow). + Dur("mesh_message_deliveries_activation", topicParams.MeshMessageDeliveriesActivation). + Float64("mesh_failure_penalty_weight", topicParams.MeshFailurePenaltyWeight). + Float64("mesh_failure_penalty_decay", topicParams.MeshFailurePenaltyDecay). + Float64("invalid_message_deliveries_weight", topicParams.InvalidMessageDeliveriesWeight). + Float64("invalid_message_deliveries_decay", topicParams.InvalidMessageDeliveriesDecay).Logger() +} diff --git a/network/p2p/utils/p2putils.go b/network/p2p/utils/p2putils.go index 552aa5c99a6..524eb8aae1e 100644 --- a/network/p2p/utils/p2putils.go +++ b/network/p2p/utils/p2putils.go @@ -6,8 +6,8 @@ import ( "github.com/libp2p/go-libp2p/core/peer" "github.com/multiformats/go-multiaddr" + "github.com/onflow/crypto/hash" - "github.com/onflow/flow-go/crypto/hash" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/network/channels" "github.com/onflow/flow-go/network/internal/p2putils" @@ -19,7 +19,7 @@ import ( // flow.Identity ---> peer.AddrInfo // |-- Address ---> |-- []multiaddr.Multiaddr // |-- NetworkPublicKey ---> |-- ID -func PeerAddressInfo(identity flow.Identity) (peer.AddrInfo, error) { +func PeerAddressInfo(identity flow.IdentitySkeleton) (peer.AddrInfo, error) { ip, port, key, err := p2putils.NetworkingInfo(identity) if err != nil { return peer.AddrInfo{}, fmt.Errorf("could not translate identity to networking info %s: %w", identity.NodeID.String(), err) @@ -39,14 +39,14 @@ func PeerAddressInfo(identity flow.Identity) (peer.AddrInfo, error) { return pInfo, err } -// PeerInfosFromIDs converts the given flow.Identities to peer.AddrInfo. +// PeerInfosFromIDs converts the given flow.Identity to peer.AddrInfo. // For each identity, if the conversion succeeds, the peer.AddrInfo is included in the result else it is // included in the error map with the corresponding error func PeerInfosFromIDs(ids flow.IdentityList) ([]peer.AddrInfo, map[flow.Identifier]error) { validIDs := make([]peer.AddrInfo, 0, len(ids)) invalidIDs := make(map[flow.Identifier]error) for _, id := range ids { - peerInfo, err := PeerAddressInfo(*id) + peerInfo, err := PeerAddressInfo(id.IdentitySkeleton) if err != nil { invalidIDs[id.NodeID] = err continue diff --git a/network/p2p/utils/ratelimiter/internal/rate_limiter_map_test.go b/network/p2p/utils/ratelimiter/internal/rate_limiter_map_test.go index 5c6a8a0b1d6..79bbe0fad6a 100644 --- a/network/p2p/utils/ratelimiter/internal/rate_limiter_map_test.go +++ b/network/p2p/utils/ratelimiter/internal/rate_limiter_map_test.go @@ -15,7 +15,6 @@ import ( // TestLimiterMap_get checks true is returned for stored items and false for missing items. func TestLimiterMap_get(t *testing.T) { - t.Parallel() m := internal.NewLimiterMap(time.Second, time.Second) peerID := peer.ID("id") m.Store(peerID, rate.NewLimiter(0, 0)) @@ -28,7 +27,6 @@ func TestLimiterMap_get(t *testing.T) { // TestLimiterMap_remove checks the map removes keys as expected. func TestLimiterMap_remove(t *testing.T) { - t.Parallel() m := internal.NewLimiterMap(time.Second, time.Second) peerID := peer.ID("id") m.Store(peerID, rate.NewLimiter(0, 0)) @@ -43,8 +41,6 @@ func TestLimiterMap_remove(t *testing.T) { // TestLimiterMap_cleanup checks the map removes expired keys as expected. func TestLimiterMap_cleanup(t *testing.T) { - t.Parallel() - // set fake ttl to 10 minutes ttl := 10 * time.Minute @@ -92,8 +88,6 @@ func TestLimiterMap_cleanup(t *testing.T) { // TestLimiterMap_cleanupLoopCtxCanceled checks that the Cleanup loop runs when ctx is canceled before cleanup loop exits. func TestLimiterMap_cleanupLoopCtxCanceled(t *testing.T) { - t.Parallel() - // set fake ttl to 10 minutes ttl := 10 * time.Minute diff --git a/network/p2p/utils/ratelimiter/rate_limiter.go b/network/p2p/utils/ratelimiter/rate_limiter.go index 46ddc456db4..fa29ef0d5b4 100644 --- a/network/p2p/utils/ratelimiter/rate_limiter.go +++ b/network/p2p/utils/ratelimiter/rate_limiter.go @@ -39,7 +39,7 @@ func NewRateLimiter(limit rate.Limit, burst int, lockoutDuration time.Duration, limiterMap: internal.NewLimiterMap(rateLimiterTTL, cleanUpTickInterval), limit: limit, burst: burst, - rateLimitLockoutDuration: lockoutDuration * time.Second, + rateLimitLockoutDuration: lockoutDuration, } for _, opt := range opts { diff --git a/network/p2p/utils/ratelimiter/rate_limiter_test.go b/network/p2p/utils/ratelimiter/rate_limiter_test.go index 6b45857ae52..8864011263d 100644 --- a/network/p2p/utils/ratelimiter/rate_limiter_test.go +++ b/network/p2p/utils/ratelimiter/rate_limiter_test.go @@ -23,7 +23,7 @@ func TestRateLimiter_Allow(t *testing.T) { require.NoError(t, err) // setup rate limiter - rateLimiter := NewRateLimiter(limit, burst, 1) + rateLimiter := NewRateLimiter(limit, burst, time.Second) require.True(t, rateLimiter.Allow(peerID, 0)) @@ -49,7 +49,7 @@ func TestRateLimiter_IsRateLimited(t *testing.T) { require.NoError(t, err) // setup rate limiter - rateLimiter := NewRateLimiter(limit, burst, 1) + rateLimiter := NewRateLimiter(limit, burst, time.Second) require.False(t, rateLimiter.IsRateLimited(peerID)) require.True(t, rateLimiter.Allow(peerID, 0)) diff --git a/network/proxy/network.go b/network/proxy/network.go index 57ce6d2f965..6fb270bd30e 100644 --- a/network/proxy/network.go +++ b/network/proxy/network.go @@ -7,13 +7,13 @@ import ( ) type ProxyNetwork struct { - network.Network + network.EngineRegistry targetNodeID flow.Identifier } // NewProxyNetwork creates a new proxy network. All messages sent on this network are // sent only to the node identified by the given target ID. -func NewProxyNetwork(net network.Network, targetNodeID flow.Identifier) *ProxyNetwork { +func NewProxyNetwork(net network.EngineRegistry, targetNodeID flow.Identifier) *ProxyNetwork { return &ProxyNetwork{ net, targetNodeID, @@ -22,7 +22,7 @@ func NewProxyNetwork(net network.Network, targetNodeID flow.Identifier) *ProxyNe // Register registers an engine with the proxy network. func (n *ProxyNetwork) Register(channel channels.Channel, engine network.Engine) (network.Conduit, error) { - con, err := n.Network.Register(channel, engine) + con, err := n.EngineRegistry.Register(channel, engine) if err != nil { return nil, err diff --git a/network/proxy/network_test.go b/network/proxy/network_test.go index d3452fb88c8..c9ec7dc06c0 100644 --- a/network/proxy/network_test.go +++ b/network/proxy/network_test.go @@ -9,7 +9,7 @@ import ( "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/network" "github.com/onflow/flow-go/network/channels" - "github.com/onflow/flow-go/network/mocknetwork" + mocknetwork "github.com/onflow/flow-go/network/mock" "github.com/onflow/flow-go/network/proxy" "github.com/onflow/flow-go/utils/unittest" ) @@ -24,7 +24,7 @@ func getEvent() interface{} { type Suite struct { suite.Suite - net network.Network + net network.EngineRegistry targetNodeID flow.Identifier proxyNet *proxy.ProxyNetwork con *mocknetwork.Conduit @@ -36,7 +36,7 @@ func TestProxyNetwork(t *testing.T) { } func (suite *Suite) SetupTest() { - net := new(mocknetwork.Network) + net := new(mocknetwork.EngineRegistry) suite.net = net suite.con = new(mocknetwork.Conduit) suite.targetNodeID = unittest.IdentifierFixture() diff --git a/network/queue/eventPriority.go b/network/queue/eventPriority.go index b61b233c73e..ad64278aa87 100644 --- a/network/queue/eventPriority.go +++ b/network/queue/eventPriority.go @@ -40,7 +40,7 @@ func GetEventPriority(message interface{}) (Priority, error) { func getPriorityByType(message interface{}) Priority { switch message.(type) { // consensus - case *messages.BlockProposal: + case *messages.Proposal: return HighPriority case *messages.BlockVote: return HighPriority @@ -57,8 +57,8 @@ func getPriorityByType(message interface{}) Priority { case *messages.BlockResponse: return HighPriority - // cluster consensus - case *messages.ClusterBlockProposal: + // cluster consensus (effectively collections) + case *messages.ClusterProposal: return HighPriority case *messages.ClusterBlockVote: return HighPriority @@ -66,17 +66,15 @@ func getPriorityByType(message interface{}) Priority { return HighPriority // collections, guarantees & transactions - case *flow.CollectionGuarantee: + case *messages.CollectionGuarantee: return HighPriority - case *flow.TransactionBody: - return HighPriority - case *flow.Transaction: + case *messages.TransactionBody: return HighPriority // core messages for execution & verification - case *flow.ExecutionReceipt: + case *messages.ExecutionReceipt: return HighPriority - case *flow.ResultApproval: + case *messages.ResultApproval: return HighPriority // data exchange for execution of blocks diff --git a/network/queue/messageQueue_test.go b/network/queue/messageQueue_test.go index 159ce7506cb..5fd7cf86839 100644 --- a/network/queue/messageQueue_test.go +++ b/network/queue/messageQueue_test.go @@ -217,7 +217,7 @@ func createMessages(messageCnt int, priorityFunc queue.MessagePriorityFunc) map[ } func randomPriority(_ interface{}) (queue.Priority, error) { - rand.Seed(time.Now().UnixNano()) + p := rand.Intn(int(queue.HighPriority-queue.LowPriority+1)) + int(queue.LowPriority) return queue.Priority(p), nil } diff --git a/network/relay/network.go b/network/relay/network.go index 347dbed3069..4e65acbc318 100644 --- a/network/relay/network.go +++ b/network/relay/network.go @@ -14,17 +14,17 @@ import ( ) type RelayNetwork struct { - originNet network.Network - destinationNet network.Network + originNet network.EngineRegistry + destinationNet network.EngineRegistry logger zerolog.Logger channels map[channels.Channel]channels.Channel } -var _ network.Network = (*RelayNetwork)(nil) +var _ network.EngineRegistry = (*RelayNetwork)(nil) func NewRelayNetwork( - originNetwork network.Network, - destinationNetwork network.Network, + originNetwork network.EngineRegistry, + destinationNetwork network.EngineRegistry, logger zerolog.Logger, channels map[channels.Channel]channels.Channel, ) *RelayNetwork { diff --git a/network/relay/relayer.go b/network/relay/relayer.go index 01ddb48f9ef..682c026b3c4 100644 --- a/network/relay/relayer.go +++ b/network/relay/relayer.go @@ -6,6 +6,7 @@ import ( "golang.org/x/sync/errgroup" "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/model/messages" "github.com/onflow/flow-go/network" "github.com/onflow/flow-go/network/channels" ) @@ -25,7 +26,7 @@ func (n *noopProcessor) Process(channel channels.Channel, originID flow.Identifi var _ network.MessageProcessor = (*Relayer)(nil) -func NewRelayer(destinationNetwork network.Network, channel channels.Channel, processor network.MessageProcessor) (*Relayer, error) { +func NewRelayer(destinationNetwork network.EngineRegistry, channel channels.Channel, processor network.MessageProcessor) (*Relayer, error) { conduit, err := destinationNetwork.Register(channel, &noopProcessor{}) if err != nil { @@ -51,7 +52,11 @@ func (r *Relayer) Process(channel channels.Channel, originID flow.Identifier, ev }) g.Go(func() error { - if err := r.destinationConduit.Publish(event, flow.ZeroID); err != nil { + msg, err := messages.InternalToMessage(event) + if err != nil { + return fmt.Errorf("failed to convert event to message: %v", err) + } + if err := r.destinationConduit.Publish(msg, flow.ZeroID); err != nil { return fmt.Errorf("failed to relay message to network: %w", err) } diff --git a/network/slashing/consumer.go b/network/slashing/consumer.go index aaac28fccc5..3ba8d656c21 100644 --- a/network/slashing/consumer.go +++ b/network/slashing/consumer.go @@ -7,35 +7,34 @@ import ( "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/module" + "github.com/onflow/flow-go/network" + "github.com/onflow/flow-go/network/alsp" "github.com/onflow/flow-go/utils/logging" ) const ( - unknown = "unknown" - unExpectedValidationError = "unexpected_validation_error" - unAuthorizedSenderViolation = "unauthorized_sender" - unknownMsgTypeViolation = "unknown_message_type" - invalidMsgViolation = "invalid_message" - senderEjectedViolation = "sender_ejected" - unauthorizedUnicastOnChannel = "unauthorized_unicast_on_channel" + unknown = "unknown" ) // Consumer is a struct that logs a message for any slashable offenses. // This struct will be updated in the future when slashing is implemented. type Consumer struct { - log zerolog.Logger - metrics module.NetworkSecurityMetrics + log zerolog.Logger + metrics module.NetworkSecurityMetrics + misbehaviorReportConsumer network.MisbehaviorReportConsumer } // NewSlashingViolationsConsumer returns a new Consumer. -func NewSlashingViolationsConsumer(log zerolog.Logger, metrics module.NetworkSecurityMetrics) *Consumer { +func NewSlashingViolationsConsumer(log zerolog.Logger, metrics module.NetworkSecurityMetrics, misbehaviorReportConsumer network.MisbehaviorReportConsumer) *Consumer { return &Consumer{ - log: log.With().Str("module", "network_slashing_consumer").Logger(), - metrics: metrics, + log: log.With().Str("module", "network_slashing_consumer").Logger(), + metrics: metrics, + misbehaviorReportConsumer: misbehaviorReportConsumer, } } -func (c *Consumer) logOffense(networkOffense string, violation *Violation) { +// logOffense logs the slashing violation with details. +func (c *Consumer) logOffense(misbehavior network.Misbehavior, violation *network.Violation) { // if violation fails before the message is decoded the violation.MsgType will be unknown if len(violation.MsgType) == 0 { violation.MsgType = unknown @@ -51,7 +50,7 @@ func (c *Consumer) logOffense(networkOffense string, violation *Violation) { e := c.log.Error(). Str("peer_id", violation.PeerID). - Str("networking_offense", networkOffense). + Str("misbehavior", misbehavior.String()). Str("message_type", violation.MsgType). Str("channel", violation.Channel.String()). Str("protocol", violation.Protocol.String()). @@ -62,37 +61,77 @@ func (c *Consumer) logOffense(networkOffense string, violation *Violation) { e.Msg(fmt.Sprintf("potential slashable offense: %s", violation.Err)) // capture unauthorized message count metric - c.metrics.OnUnauthorizedMessage(role, violation.MsgType, violation.Channel.String(), networkOffense) + c.metrics.OnUnauthorizedMessage(role, violation.MsgType, violation.Channel.String(), misbehavior.String()) } -// OnUnAuthorizedSenderError logs an error for unauthorized sender error. -func (c *Consumer) OnUnAuthorizedSenderError(violation *Violation) { - c.logOffense(unAuthorizedSenderViolation, violation) +// reportMisbehavior reports the slashing violation to the alsp misbehavior report manager. When violation identity +// is nil this indicates the misbehavior occurred either on a public network and the identity of the sender is unknown +// we can skip reporting the misbehavior. +// Args: +// - misbehavior: the network misbehavior. +// - violation: the slashing violation. +// Any error encountered while creating the misbehavior report is considered irrecoverable and will result in a fatal log. +func (c *Consumer) reportMisbehavior(misbehavior network.Misbehavior, violation *network.Violation) { + if violation.Identity == nil { + c.log.Debug(). + Bool(logging.KeySuspicious, true). + Str("peerID", violation.PeerID). + Msg("violation identity unknown (or public) skipping misbehavior reporting") + c.metrics.OnViolationReportSkipped() + return + } + report, err := alsp.NewMisbehaviorReport(violation.Identity.NodeID, misbehavior) + if err != nil { + // failing to create the misbehavior report is unlikely. If an error is encountered while + // creating the misbehavior report it indicates a bug and processing can not proceed. + c.log.Fatal(). + Err(err). + Str("peerID", violation.PeerID). + Msg("failed to create misbehavior report") + } + c.misbehaviorReportConsumer.ReportMisbehaviorOnChannel(violation.Channel, report) +} + +// OnUnAuthorizedSenderError logs an error for unauthorized sender error and reports a misbehavior to alsp misbehavior report manager. +func (c *Consumer) OnUnAuthorizedSenderError(violation *network.Violation) { + c.logOffense(alsp.UnAuthorizedSender, violation) + c.reportMisbehavior(alsp.UnAuthorizedSender, violation) } -// OnUnknownMsgTypeError logs an error for unknown message type error. -func (c *Consumer) OnUnknownMsgTypeError(violation *Violation) { - c.logOffense(unknownMsgTypeViolation, violation) +// OnUnknownMsgTypeError logs an error for unknown message type error and reports a misbehavior to alsp misbehavior report manager. +func (c *Consumer) OnUnknownMsgTypeError(violation *network.Violation) { + c.logOffense(alsp.UnknownMsgType, violation) + c.reportMisbehavior(alsp.UnknownMsgType, violation) } // OnInvalidMsgError logs an error for messages that contained payloads that could not -// be unmarshalled into the message type denoted by message code byte. -func (c *Consumer) OnInvalidMsgError(violation *Violation) { - c.logOffense(invalidMsgViolation, violation) +// be unmarshalled into the message type denoted by message code byte and reports a misbehavior to alsp misbehavior report manager. +func (c *Consumer) OnInvalidMsgError(violation *network.Violation) { + c.logOffense(alsp.InvalidMessage, violation) + c.reportMisbehavior(alsp.InvalidMessage, violation) +} + +// OnSenderEjectedError logs an error for sender ejected error and reports a misbehavior to alsp misbehavior report manager. +func (c *Consumer) OnSenderEjectedError(violation *network.Violation) { + c.logOffense(alsp.SenderEjected, violation) + c.reportMisbehavior(alsp.SenderEjected, violation) } -// OnSenderEjectedError logs an error for sender ejected error. -func (c *Consumer) OnSenderEjectedError(violation *Violation) { - c.logOffense(senderEjectedViolation, violation) +// OnUnauthorizedUnicastOnChannel logs an error for messages unauthorized to be sent via unicast and reports a misbehavior to alsp misbehavior report manager. +func (c *Consumer) OnUnauthorizedUnicastOnChannel(violation *network.Violation) { + c.logOffense(alsp.UnauthorizedUnicastOnChannel, violation) + c.reportMisbehavior(alsp.UnauthorizedUnicastOnChannel, violation) } -// OnUnauthorizedUnicastOnChannel logs an error for messages unauthorized to be sent via unicast. -func (c *Consumer) OnUnauthorizedUnicastOnChannel(violation *Violation) { - c.logOffense(unauthorizedUnicastOnChannel, violation) +// OnUnauthorizedPublishOnChannel logs an error for messages unauthorized to be sent via pubsub. +func (c *Consumer) OnUnauthorizedPublishOnChannel(violation *network.Violation) { + c.logOffense(alsp.UnauthorizedPublishOnChannel, violation) + c.reportMisbehavior(alsp.UnauthorizedPublishOnChannel, violation) } // OnUnexpectedError logs an error for unexpected errors. This indicates message validation -// has failed for an unknown reason and could potentially be n slashable offense. -func (c *Consumer) OnUnexpectedError(violation *Violation) { - c.logOffense(unExpectedValidationError, violation) +// has failed for an unknown reason and could potentially be n slashable offense and reports a misbehavior to alsp misbehavior report manager. +func (c *Consumer) OnUnexpectedError(violation *network.Violation) { + c.logOffense(alsp.UnExpectedValidationError, violation) + c.reportMisbehavior(alsp.UnExpectedValidationError, violation) } diff --git a/network/slashing/violations_consumer.go b/network/slashing/violations_consumer.go deleted file mode 100644 index cf1f8ea7d85..00000000000 --- a/network/slashing/violations_consumer.go +++ /dev/null @@ -1,38 +0,0 @@ -package slashing - -import ( - "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/network/channels" - "github.com/onflow/flow-go/network/message" -) - -type ViolationsConsumer interface { - // OnUnAuthorizedSenderError logs an error for unauthorized sender error - OnUnAuthorizedSenderError(violation *Violation) - - // OnUnknownMsgTypeError logs an error for unknown message type error - OnUnknownMsgTypeError(violation *Violation) - - // OnInvalidMsgError logs an error for messages that contained payloads that could not - // be unmarshalled into the message type denoted by message code byte. - OnInvalidMsgError(violation *Violation) - - // OnSenderEjectedError logs an error for sender ejected error - OnSenderEjectedError(violation *Violation) - - // OnUnauthorizedUnicastOnChannel logs an error for messages unauthorized to be sent via unicast - OnUnauthorizedUnicastOnChannel(violation *Violation) - - // OnUnexpectedError logs an error for unknown errors - OnUnexpectedError(violation *Violation) -} - -type Violation struct { - Identity *flow.Identity - PeerID string - OriginID flow.Identifier - MsgType string - Channel channels.Channel - Protocol message.ProtocolType - Err error -} diff --git a/network/stub/buffer.go b/network/stub/buffer.go index ecee0388e00..d5bd3bc6832 100644 --- a/network/stub/buffer.go +++ b/network/stub/buffer.go @@ -4,6 +4,7 @@ import ( "sync" "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/model/messages" "github.com/onflow/flow-go/network/channels" ) @@ -12,7 +13,7 @@ type PendingMessage struct { // The sender node id From flow.Identifier Channel channels.Channel - Event interface{} + Event messages.UntrustedMessage // The id of the receiver nodes TargetIDs []flow.Identifier } diff --git a/network/stub/hash.go b/network/stub/hash.go index fd13f0906f8..4730e4b096d 100644 --- a/network/stub/hash.go +++ b/network/stub/hash.go @@ -4,7 +4,8 @@ import ( "encoding/hex" "fmt" - "github.com/onflow/flow-go/crypto/hash" + "github.com/onflow/crypto/hash" + "github.com/onflow/flow-go/model/encoding/json" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/network/channels" diff --git a/network/stub/network.go b/network/stub/network.go index 8bdb1056312..65468296e14 100644 --- a/network/stub/network.go +++ b/network/stub/network.go @@ -12,12 +12,11 @@ import ( "github.com/stretchr/testify/require" "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/module/metrics" + "github.com/onflow/flow-go/model/messages" "github.com/onflow/flow-go/network" "github.com/onflow/flow-go/network/channels" - "github.com/onflow/flow-go/network/mocknetwork" + mocknetwork "github.com/onflow/flow-go/network/mock" "github.com/onflow/flow-go/network/p2p/conduit" - "github.com/onflow/flow-go/utils/unittest" ) // Network is a mocked Network layer made for testing engine's behavior. @@ -26,7 +25,7 @@ import ( // When an engine is attached on a Network instance, the mocked Network delivers // all engine's events to others using an in-memory delivery mechanism. type Network struct { - mocknetwork.Network + mocknetwork.EngineRegistry ctx context.Context sync.Mutex myId flow.Identifier // used to represent information of the attached node. @@ -43,6 +42,9 @@ func WithConduitFactory(factory network.ConduitFactory) func(*Network) { } } +var _ network.EngineRegistry = (*Network)(nil) +var _ network.ConduitAdapter = (*Network)(nil) + // NewNetwork create a mocked Network. // The committee has the identity of the node already, so only `committee` is needed // in order for a mock hub to find each other. @@ -54,7 +56,7 @@ func NewNetwork(t testing.TB, myId flow.Identifier, hub *Hub, opts ...func(*Netw engines: make(map[channels.Channel]network.MessageProcessor), seenEventIDs: make(map[string]struct{}), qCD: make(chan struct{}), - conduitFactory: conduit.NewDefaultConduitFactory(unittest.Logger(), metrics.NewNoopCollector()), + conduitFactory: conduit.NewDefaultConduitFactory(), } for _, opt := range opts { @@ -82,8 +84,6 @@ func NewNetwork(t testing.TB, myId flow.Identifier, hub *Hub, opts ...func(*Netw return net } -var _ network.Network = (*Network)(nil) - // GetID returns the identity of the attached node. func (n *Network) GetID() flow.Identifier { return n.myId @@ -119,10 +119,14 @@ func (n *Network) UnRegisterChannel(channel channels.Channel) error { // submit is called when the attached Engine to the channel is sending an event to an // Engine attached to the same channel on another node or nodes. func (n *Network) submit(channel channels.Channel, event interface{}, targetIDs ...flow.Identifier) error { + e, ok := event.(messages.UntrustedMessage) + if !ok { + return fmt.Errorf("invalid message type: expected messages.UntrustedMessage, got %T", event) + } m := &PendingMessage{ From: n.GetID(), Channel: channel, - Event: event, + Event: e, TargetIDs: targetIDs, } @@ -131,13 +135,17 @@ func (n *Network) submit(channel channels.Channel, event interface{}, targetIDs return nil } -// unicast is called when the attached Engine to the channel is sending an event to a single target +// UnicastOnChannel is called when the attached Engine to the channel is sending an event to a single target // Engine attached to the same channel on another node. func (n *Network) UnicastOnChannel(channel channels.Channel, event interface{}, targetID flow.Identifier) error { + msg, ok := event.(messages.UntrustedMessage) + if !ok { + return fmt.Errorf("invalid message type: expected messages.UntrustedMessage, got %T", event) + } m := &PendingMessage{ From: n.GetID(), Channel: channel, - Event: event, + Event: msg, TargetIDs: []flow.Identifier{targetID}, } @@ -161,7 +169,11 @@ func (n *Network) PublishOnChannel(channel channels.Channel, event interface{}, // Engines attached to the same channel on other nodes. The targeted nodes are selected based on the selector. // In this test helper implementation, multicast uses submit method under the hood. func (n *Network) MulticastOnChannel(channel channels.Channel, event interface{}, num uint, targetIDs ...flow.Identifier) error { - targetIDs = flow.Sample(num, targetIDs...) + var err error + targetIDs, err = flow.Sample(num, targetIDs...) + if err != nil { + return fmt.Errorf("sampling failed: %w", err) + } return n.submit(channel, event, targetIDs...) } @@ -261,15 +273,20 @@ func (n *Network) processWithEngine(syncOnProcess bool, key string, m *PendingMe return fmt.Errorf("could find engine ID: %v", m.Channel) } + internal, err := m.Event.ToInternal() + if err != nil { + return fmt.Errorf("could not convert message %T to internal: %v", m.Event, err) + } + if syncOnProcess { // sender and receiver are synced over processing the message - if err := receiverEngine.Process(m.Channel, m.From, m.Event); err != nil { - return fmt.Errorf("receiver engine failed to process event (%v): %w", m.Event, err) + if err := receiverEngine.Process(m.Channel, m.From, internal); err != nil { + return fmt.Errorf("receiver engine failed to process event (%v): %w", internal, err) } } else { // sender and receiver are synced over delivery of message go func() { - _ = receiverEngine.Process(m.Channel, m.From, m.Event) + _ = receiverEngine.Process(m.Channel, m.From, internal) }() } return nil @@ -306,3 +323,7 @@ func (n *Network) StartConDev(updateInterval time.Duration, recursive bool) { func (n *Network) StopConDev() { close(n.qCD) } + +func (n *Network) ReportMisbehaviorOnChannel(_ channels.Channel, _ network.MisbehaviorReport) { + // no-op for stub network. +} diff --git a/network/test/cohort1/meshengine_test.go b/network/test/cohort1/meshengine_test.go new file mode 100644 index 00000000000..0ee19c0dde3 --- /dev/null +++ b/network/test/cohort1/meshengine_test.go @@ -0,0 +1,530 @@ +package cohort1 + +import ( + "context" + "fmt" + "math/rand" + "os" + "strconv" + "strings" + "sync" + "testing" + "time" + + "github.com/ipfs/go-log" + pubsub "github.com/libp2p/go-libp2p-pubsub" + "github.com/rs/zerolog" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" + + "github.com/onflow/flow-go/config" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/model/flow/filter" + "github.com/onflow/flow-go/model/libp2p/message" + "github.com/onflow/flow-go/module/irrecoverable" + "github.com/onflow/flow-go/module/metrics" + "github.com/onflow/flow-go/module/observable" + "github.com/onflow/flow-go/network" + "github.com/onflow/flow-go/network/channels" + "github.com/onflow/flow-go/network/internal/testutils" + "github.com/onflow/flow-go/network/p2p" + p2pnode "github.com/onflow/flow-go/network/p2p/node" + p2ptest "github.com/onflow/flow-go/network/p2p/test" + "github.com/onflow/flow-go/network/underlay" + "github.com/onflow/flow-go/utils/unittest" +) + +// MeshEngineTestSuite evaluates the message delivery functionality for the overlay +// of engines over a complete graph +type MeshEngineTestSuite struct { + suite.Suite + testutils.ConduitWrapper // used as a wrapper around conduit methods + networks []*underlay.Network // used to keep track of the networks + libp2pNodes []p2p.LibP2PNode // used to keep track of the libp2p nodes + ids flow.IdentityList // used to keep track of the identifiers associated with networks + obs chan string // used to keep track of Protect events tagged by pubsub messages + cancel context.CancelFunc +} + +// TestMeshNetTestSuite runs all tests in this test suit +func TestMeshNetTestSuite(t *testing.T) { + suite.Run(t, new(MeshEngineTestSuite)) +} + +// SetupTest is executed prior to each test in this test suite. It creates and initializes +// a set of network instances, sets up connection managers, nodes, identities, observables, etc. +// This setup ensures that all necessary configurations are in place before running the tests. +func (suite *MeshEngineTestSuite) SetupTest() { + // defines total number of nodes in our network (minimum 3 needed to use 1-k messaging) + const count = 10 + logger := zerolog.New(os.Stderr).Level(zerolog.ErrorLevel) + log.SetAllLoggers(log.LevelError) + + // set up a channel to receive pubsub tags from connManagers of the nodes + peerChannel := make(chan string) + + // Tag Observables Usage Explanation: + // The tagsObserver is used to observe connections tagged by pubsub messages. This is instrumental in understanding + // the connectivity between different peers and verifying the formation of the mesh within this test suite. + // Issues: + // - Deviation from Production Code: The usage of tag observables here may not reflect the behavior in the production environment. + // - Mask Issues in the Production Environment: The observables tied to testing might lead to behaviors or errors that are + // masked or not evident within the actual production code. + // TODO: Evaluate the necessity of tag observables in this test and consider addressing the deviation from production + // code and potential mask issues. Evaluate the possibility of removing this part eventually. + ob := tagsObserver{ + tags: peerChannel, + log: logger, + } + + ctx, cancel := context.WithCancel(context.Background()) + suite.cancel = cancel + + signalerCtx := irrecoverable.NewMockSignalerContext(suite.T(), ctx) + + sporkId := unittest.IdentifierFixture() + libP2PNodes := make([]p2p.LibP2PNode, 0) + identities := make(flow.IdentityList, 0) + tagObservables := make([]observable.Observable, 0) + idProvider := unittest.NewUpdatableIDProvider(flow.IdentityList{}) + defaultFlowConfig, err := config.DefaultConfig() + require.NoError(suite.T(), err) + opts := []p2ptest.NodeFixtureParameterOption{p2ptest.WithUnicastHandlerFunc(nil)} + + for i := 0; i < count; i++ { + connManager, err := testutils.NewTagWatchingConnManager( + unittest.Logger(), + metrics.NewNoopCollector(), + &defaultFlowConfig.NetworkConfig.ConnectionManager) + require.NoError(suite.T(), err) + + opts = append(opts, p2ptest.WithConnectionManager(connManager)) + node, nodeId := p2ptest.NodeFixture(suite.T(), + sporkId, + suite.T().Name(), + idProvider, + opts...) + libP2PNodes = append(libP2PNodes, node) + identities = append(identities, &nodeId) + tagObservables = append(tagObservables, connManager) + } + idProvider.SetIdentities(identities) + + suite.libp2pNodes = libP2PNodes + suite.ids = identities + + suite.networks, _ = testutils.NetworksFixture(suite.T(), sporkId, suite.ids, suite.libp2pNodes) + // starts the nodes and networks + testutils.StartNodes(signalerCtx, suite.T(), suite.libp2pNodes) + for _, net := range suite.networks { + testutils.StartNetworks(signalerCtx, suite.T(), []network.EngineRegistry{net}) + unittest.RequireComponentsReadyBefore(suite.T(), 1*time.Second, net) + } + + for _, observableConnMgr := range tagObservables { + observableConnMgr.Subscribe(&ob) + } + suite.obs = peerChannel +} + +// TearDownTest closes the networks within a specified timeout +func (suite *MeshEngineTestSuite) TearDownTest() { + suite.cancel() + testutils.StopComponents(suite.T(), suite.networks, 3*time.Second) + testutils.StopComponents(suite.T(), suite.libp2pNodes, 3*time.Second) +} + +// TestAllToAll_Publish evaluates the network of mesh engines against allToAllScenario scenario. +// Network instances during this test use their Publish method to disseminate messages. +func (suite *MeshEngineTestSuite) TestAllToAll_Publish() { + suite.allToAllScenario(suite.Publish) +} + +// TestAllToAll_Multicast evaluates the network of mesh engines against allToAllScenario scenario. +// Network instances during this test use their Multicast method to disseminate messages. +func (suite *MeshEngineTestSuite) TestAllToAll_Multicast() { + suite.allToAllScenario(suite.Multicast) +} + +// TestAllToAll_Unicast evaluates the network of mesh engines against allToAllScenario scenario. +// Network instances during this test use their Unicast method to disseminate messages. +func (suite *MeshEngineTestSuite) TestAllToAll_Unicast() { + suite.allToAllScenario(suite.Unicast) +} + +// TestTargetedValidators_Unicast tests if only the intended recipients in a 1-k messaging actually receive the message. +// The messages are disseminated through the Unicast method of conduits. +func (suite *MeshEngineTestSuite) TestTargetedValidators_Unicast() { + suite.targetValidatorScenario(suite.Unicast) +} + +// TestTargetedValidators_Multicast tests if only the intended recipients in a 1-k messaging actually receive the +// message. +// The messages are disseminated through the Multicast method of conduits. +func (suite *MeshEngineTestSuite) TestTargetedValidators_Multicast() { + suite.targetValidatorScenario(suite.Multicast) +} + +// TestTargetedValidators_Publish tests if only the intended recipients in a 1-k messaging actually receive the message. +// The messages are disseminated through the Multicast method of conduits. +func (suite *MeshEngineTestSuite) TestTargetedValidators_Publish() { + suite.targetValidatorScenario(suite.Publish) +} + +// TestMaxMessageSize_Unicast evaluates the messageSizeScenario scenario using +// the Unicast method of conduits. +func (suite *MeshEngineTestSuite) TestMaxMessageSize_Unicast() { + suite.messageSizeScenario(suite.Unicast, underlay.DefaultMaxUnicastMsgSize) +} + +// TestMaxMessageSize_Multicast evaluates the messageSizeScenario scenario using +// the Multicast method of conduits. +func (suite *MeshEngineTestSuite) TestMaxMessageSize_Multicast() { + suite.messageSizeScenario(suite.Multicast, p2pnode.DefaultMaxPubSubMsgSize) +} + +// TestMaxMessageSize_Publish evaluates the messageSizeScenario scenario using the +// Publish method of conduits. +func (suite *MeshEngineTestSuite) TestMaxMessageSize_Publish() { + suite.messageSizeScenario(suite.Publish, p2pnode.DefaultMaxPubSubMsgSize) +} + +// TestUnregister_Publish tests that an engine cannot send any message using Publish +// or receive any messages after the conduit is closed +func (suite *MeshEngineTestSuite) TestUnregister_Publish() { + suite.conduitCloseScenario(suite.Publish) +} + +// TestUnregister_Publish tests that an engine cannot send any message using Multicast +// or receive any messages after the conduit is closed +func (suite *MeshEngineTestSuite) TestUnregister_Multicast() { + suite.conduitCloseScenario(suite.Multicast) +} + +// TestUnregister_Publish tests that an engine cannot send any message using Unicast +// or receive any messages after the conduit is closed +func (suite *MeshEngineTestSuite) TestUnregister_Unicast() { + suite.conduitCloseScenario(suite.Unicast) +} + +// allToAllScenario creates a complete mesh of the engines, where each engine x sends a +// "hello from node x" to other engines. It then evaluates the correctness of message +// delivery as well as the content of the messages. This scenario tests the capability of +// the engines to communicate in a fully connected graph, ensuring both the reachability +// of messages and the integrity of their contents. +func (suite *MeshEngineTestSuite) allToAllScenario(send testutils.ConduitSendWrapperFunc) { + // allows nodes to find each other in case of Mulitcast and Publish + testutils.OptionalSleep(send) + + // creating engines + count := len(suite.networks) + engs := make([]*testutils.MeshEngine, 0) + wg := sync.WaitGroup{} + + // logs[i][j] keeps the message that node i sends to node j + logs := make(map[int][]string) + for i := range suite.networks { + eng := testutils.NewMeshEngine(suite.Suite.T(), suite.networks[i], count-1, channels.TestNetworkChannel) + engs = append(engs, eng) + logs[i] = make([]string, 0) + } + + // allow nodes to heartbeat and discover each other + // each node will register ~D protect messages, where D is the default out-degree + for i := 0; i < pubsub.GossipSubD*count; i++ { + select { + case <-suite.obs: + case <-time.After(8 * time.Second): + assert.FailNow(suite.T(), "could not receive pubsub tag indicating mesh formed") + } + } + + // Each node broadcasting a message to all others + for i := range suite.networks { + event := &message.TestMessage{ + Text: fmt.Sprintf("hello from node %v", i), + } + + // others keeps the identifier of all nodes except ith node + others := suite.ids.Filter(filter.Not(filter.HasNodeID[flow.Identity](suite.ids[i].NodeID))).NodeIDs() + require.NoError(suite.Suite.T(), send(event, engs[i].Con, others...)) + wg.Add(count - 1) + } + + // fires a goroutine for each engine that listens to incoming messages + for i := range suite.networks { + go func(e *testutils.MeshEngine) { + for x := 0; x < count-1; x++ { + <-e.Received + wg.Done() + } + }(engs[i]) + } + + unittest.AssertReturnsBefore(suite.Suite.T(), wg.Wait, 30*time.Second) + + // evaluates that all messages are received + for index, e := range engs { + // confirms the number of received messages at each node + if len(e.Event) != (count - 1) { + assert.Fail(suite.Suite.T(), + fmt.Sprintf("Message reception mismatch at node %v. Expected: %v, Got: %v", index, count-1, len(e.Event))) + } + + for i := 0; i < count-1; i++ { + assertChannelReceived(suite.T(), e, channels.TestNetworkChannel) + } + + // extracts failed messages + receivedIndices, err := extractSenderID(count, e.Event, "hello from node") + require.NoError(suite.Suite.T(), err) + + for j := 0; j < count; j++ { + // evaluates self-gossip + if j == index { + assert.False(suite.Suite.T(), (receivedIndices)[index], fmt.Sprintf("self gossiped for node %v detected", index)) + } + // evaluates content + if !(receivedIndices)[j] { + assert.False(suite.Suite.T(), (receivedIndices)[index], + fmt.Sprintf("Message not found in node #%v's messages. Expected: Message from node %v. Got: No message", index, j)) + } + } + } +} + +// targetValidatorScenario sends a single message from last node to the first half of the nodes +// based on identifiers list. +// It then verifies that only the intended recipients receive the message. +// Message dissemination is done using the send wrapper of conduit. +func (suite *MeshEngineTestSuite) targetValidatorScenario(send testutils.ConduitSendWrapperFunc) { + // creating engines + count := len(suite.networks) + engs := make([]*testutils.MeshEngine, 0) + wg := sync.WaitGroup{} + + for i := range suite.networks { + eng := testutils.NewMeshEngine(suite.Suite.T(), suite.networks[i], count-1, channels.TestNetworkChannel) + engs = append(engs, eng) + } + + // allow nodes to heartbeat and discover each other + // each node will register ~D protect messages, where D is the default out-degree + for i := 0; i < pubsub.GossipSubD*count; i++ { + select { + case <-suite.obs: + case <-time.After(2 * time.Second): + assert.FailNow(suite.T(), "could not receive pubsub tag indicating mesh formed") + } + } + + // choose half of the nodes as target + allIds := suite.ids.NodeIDs() + var targets []flow.Identifier + // create a target list of half of the nodes + for i := 0; i < len(allIds)/2; i++ { + targets = append(targets, allIds[i]) + } + + // node 0 broadcasting a message to all targets + event := &message.TestMessage{ + Text: "hello from node 0", + } + require.NoError(suite.Suite.T(), send(event, engs[len(engs)-1].Con, targets...)) + + // fires a goroutine for all engines to listens for the incoming message + for i := 0; i < len(allIds)/2; i++ { + wg.Add(1) + go func(e *testutils.MeshEngine) { + <-e.Received + wg.Done() + }(engs[i]) + } + + unittest.AssertReturnsBefore(suite.T(), wg.Wait, 10*time.Second) + + // evaluates that all messages are received + for index, e := range engs { + if index < len(engs)/2 { + assert.Len(suite.Suite.T(), e.Event, 1, fmt.Sprintf("message not received %v", index)) + assertChannelReceived(suite.T(), e, channels.TestNetworkChannel) + } else { + assert.Len(suite.Suite.T(), e.Event, 0, fmt.Sprintf("message received when none was expected %v", index)) + } + } +} + +// messageSizeScenario provides a scenario to check if a message of maximum permissible size can be sent +// successfully. +// It broadcasts a message from the first node to all the nodes in the identifiers list using send wrapper function. +func (suite *MeshEngineTestSuite) messageSizeScenario(send testutils.ConduitSendWrapperFunc, size uint) { + // creating engines + count := len(suite.networks) + engs := make([]*testutils.MeshEngine, 0) + wg := sync.WaitGroup{} + + for i := range suite.networks { + eng := testutils.NewMeshEngine(suite.Suite.T(), suite.networks[i], count-1, channels.TestNetworkChannel) + engs = append(engs, eng) + } + + // allow nodes to heartbeat and discover each other + // each node will register ~D protect messages per mesh setup, where D is the default out-degree + for i := 0; i < pubsub.GossipSubD*count; i++ { + select { + case <-suite.obs: + case <-time.After(8 * time.Second): + assert.FailNow(suite.T(), "could not receive pubsub tag indicating mesh formed") + } + } + // others keeps the identifier of all nodes except node that is sender. + others := suite.ids.Filter(filter.Not(filter.HasNodeID[flow.Identity](suite.ids[0].NodeID))).NodeIDs() + + // generates and sends an event of custom size to the network + payload := testutils.NetworkPayloadFixture(suite.T(), size) + event := &message.TestMessage{ + Text: string(payload), + } + + require.NoError(suite.T(), send(event, engs[0].Con, others...)) + + // fires a goroutine for all engines (except sender) to listen for the incoming message + for _, eng := range engs[1:] { + wg.Add(1) + go func(e *testutils.MeshEngine) { + <-e.Received + wg.Done() + }(eng) + } + + unittest.AssertReturnsBefore(suite.Suite.T(), wg.Wait, 30*time.Second) + + // evaluates that all messages are received + for index, e := range engs[1:] { + assert.Len(suite.Suite.T(), e.Event, 1, "message not received by engine %d", index+1) + assertChannelReceived(suite.T(), e, channels.TestNetworkChannel) + } +} + +// conduitCloseScenario tests after a Conduit is closed, an engine cannot send or receive a message for that channel. +func (suite *MeshEngineTestSuite) conduitCloseScenario(send testutils.ConduitSendWrapperFunc) { + + testutils.OptionalSleep(send) + + // creating engines + count := len(suite.networks) + engs := make([]*testutils.MeshEngine, 0) + wg := sync.WaitGroup{} + + for i := range suite.networks { + eng := testutils.NewMeshEngine(suite.Suite.T(), suite.networks[i], count-1, channels.TestNetworkChannel) + engs = append(engs, eng) + } + + // allow nodes to heartbeat and discover each other + // each node will register ~D protect messages, where D is the default out-degree + for i := 0; i < pubsub.GossipSubD*count; i++ { + select { + case <-suite.obs: + case <-time.After(2 * time.Second): + assert.FailNow(suite.T(), "could not receive pubsub tag indicating mesh formed") + } + } + + // unregister a random engine from the test topic by calling close on it's conduit + unregisterIndex := rand.Intn(count) + err := engs[unregisterIndex].Con.Close() + assert.NoError(suite.T(), err) + + // waits enough for peer manager to unsubscribe the node from the topic + // while libp2p is unsubscribing the node, the topology gets unstable + // and connections to the node may be refused (although very unlikely). + time.Sleep(2 * time.Second) + + // each node attempts to broadcast a message to all others + for i := range suite.networks { + event := &message.TestMessage{ + Text: fmt.Sprintf("hello from node %v", i), + } + + // others keeps the identifier of all nodes except ith node and the node that unregistered from the topic. + // nodes without valid topic registration for a channel will reject messages on that channel via unicast. + others := suite.ids.Filter(filter.Not(filter.HasNodeID[flow.Identity](suite.ids[i].NodeID, suite.ids[unregisterIndex].NodeID))).NodeIDs() + + if i == unregisterIndex { + // assert that unsubscribed engine cannot publish on that topic + require.Error(suite.Suite.T(), send(event, engs[i].Con, others...)) + continue + } + + require.NoError(suite.Suite.T(), send(event, engs[i].Con, others...)) + } + + // fire a goroutine to listen for incoming messages for each engine except for the one which unregistered + for i := range suite.networks { + if i == unregisterIndex { + continue + } + wg.Add(1) + go func(e *testutils.MeshEngine) { + expectedMsgCnt := count - 2 // count less self and unsubscribed engine + for x := 0; x < expectedMsgCnt; x++ { + <-e.Received + } + wg.Done() + }(engs[i]) + } + + // assert every one except the unsubscribed engine received the message + unittest.AssertReturnsBefore(suite.Suite.T(), wg.Wait, 2*time.Second) + + // assert that the unregistered engine did not receive the message + unregisteredEng := engs[unregisterIndex] + assert.Emptyf(suite.T(), unregisteredEng.Received, "unregistered engine received the topic message") +} + +// assertChannelReceived asserts that the given channel was received on the given engine +func assertChannelReceived(t *testing.T, e *testutils.MeshEngine, channel channels.Channel) { + unittest.AssertReturnsBefore(t, func() { + assert.Equal(t, channel, <-e.Channel) + }, 100*time.Millisecond) +} + +// extractSenderID returns a bool array with the index i true if there is a message from node i in the provided messages. +// enginesNum is the number of engines +// events is the channel of received events +// expectedMsgTxt is the common prefix among all the messages that we expect to receive, for example +// we expect to receive "hello from node x" in this test, and then expectedMsgTxt is "hello form node" +func extractSenderID(enginesNum int, events chan interface{}, expectedMsgTxt string) ([]bool, error) { + indices := make([]bool, enginesNum) + expectedMsgSize := len(expectedMsgTxt) + for i := 0; i < enginesNum-1; i++ { + var event interface{} + select { + case event = <-events: + default: + continue + } + echo := event.(*flow.TestMessage) + msg := echo.Text + if len(msg) < expectedMsgSize { + return nil, fmt.Errorf("invalid message format") + } + senderIndex := msg[expectedMsgSize:] + senderIndex = strings.TrimLeft(senderIndex, " ") + nodeID, err := strconv.Atoi(senderIndex) + if err != nil { + return nil, fmt.Errorf("could not extract the node id from: %v", msg) + } + + if indices[nodeID] { + return nil, fmt.Errorf("duplicate message reception: %v", msg) + } + + if msg == fmt.Sprintf("%s %v", expectedMsgTxt, nodeID) { + indices[nodeID] = true + } + } + return indices, nil +} diff --git a/network/test/cohort1/network_test.go b/network/test/cohort1/network_test.go new file mode 100644 index 00000000000..7934c3fa203 --- /dev/null +++ b/network/test/cohort1/network_test.go @@ -0,0 +1,970 @@ +package cohort1 + +import ( + "context" + "fmt" + "math/rand" + "regexp" + "strings" + "sync" + "testing" + "time" + + "github.com/libp2p/go-libp2p/core/peer" + "github.com/libp2p/go-libp2p/p2p/net/swarm" + "github.com/rs/zerolog" + "github.com/stretchr/testify/assert" + mockery "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" + "go.uber.org/atomic" + "golang.org/x/time/rate" + + "github.com/onflow/flow-go/config" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/model/flow/filter" + libp2pmessage "github.com/onflow/flow-go/model/libp2p/message" + "github.com/onflow/flow-go/model/messages" + "github.com/onflow/flow-go/module/counters" + "github.com/onflow/flow-go/module/irrecoverable" + "github.com/onflow/flow-go/module/metrics" + "github.com/onflow/flow-go/module/observable" + "github.com/onflow/flow-go/network" + "github.com/onflow/flow-go/network/channels" + "github.com/onflow/flow-go/network/internal/p2pfixtures" + "github.com/onflow/flow-go/network/internal/testutils" + "github.com/onflow/flow-go/network/message" + mocknetwork "github.com/onflow/flow-go/network/mock" + "github.com/onflow/flow-go/network/p2p" + p2pnode "github.com/onflow/flow-go/network/p2p/node" + p2ptest "github.com/onflow/flow-go/network/p2p/test" + "github.com/onflow/flow-go/network/p2p/unicast/ratelimit" + "github.com/onflow/flow-go/network/p2p/utils/ratelimiter" + "github.com/onflow/flow-go/network/underlay" + "github.com/onflow/flow-go/utils/concurrentmap" + "github.com/onflow/flow-go/utils/unittest" +) + +// libp2p emits a call to `Protect` with a topic-specific tag upon establishing each peering connection in a GossipSub mesh, see: +// https://github.com/libp2p/go-libp2p-pubsub/blob/master/tag_tracer.go +// One way to make sure such a mesh has formed, asynchronously, in unit tests, is to wait for libp2p.GossipSub such calls, +// and that's what we do with tagsObserver. +// Usage: +// The tagsObserver struct observes the OnNext, OnError, and OnComplete events related to peer tags. +// A channel 'tags' is used to communicate these tags, and the observer is subscribed to the observable connection manager. +// Advantages: +// Using tag observables helps understand the connectivity between different peers, +// and can be valuable in testing scenarios where network connectivity is critical. +// Issues: +// - Deviation from Production Code: This tag observation might be unique to the test environment, +// and therefore not reflect the behavior of the production code. +// - Mask Issues in the Production Environment: The observables are tied to testing and might +// lead to behaviors or errors that are masked or not evident within the actual production environment. +// +// TODO: Evaluate the necessity of tag observables in this test. Consider addressing the deviation from +// production code and potential mask issues in the production environment. Evaluate the possibility +// of removing this part eventually. +type tagsObserver struct { + tags chan string + log zerolog.Logger +} + +func (co *tagsObserver) OnNext(peertag interface{}) { + pt, ok := peertag.(testutils.PeerTag) + + if ok { + co.tags <- fmt.Sprintf("peer: %v tag: %v", pt.Peer, pt.Tag) + } + +} +func (co *tagsObserver) OnError(err error) { + co.log.Error().Err(err).Msg("Tags Observer closed on an error") + close(co.tags) +} +func (co *tagsObserver) OnComplete() { + close(co.tags) +} + +// TODO: eventually this should be moved to the p2pnet package. +type NetworkTestSuite struct { + suite.Suite + sync.RWMutex + size int // used to determine number of networks under test + libP2PNodes []p2p.LibP2PNode + networks []*underlay.Network + obs chan string // used to keep track of Protect events tagged by pubsub messages + ids []*flow.Identity + metrics *metrics.NoopCollector // no-op performance monitoring simulation + logger zerolog.Logger + providers []*unittest.UpdatableIDProvider + sporkId flow.Identifier + mwCancel context.CancelFunc + mwCtx irrecoverable.SignalerContext +} + +// TestNetworkTestSuit runs all the test methods in this test suit +func TestNetworkTestSuite(t *testing.T) { + // should not run in parallel, some tests are stateful. + suite.Run(t, new(NetworkTestSuite)) +} + +// SetupTest initiates the test setups prior to each test +func (suite *NetworkTestSuite) SetupTest() { + suite.logger = unittest.Logger() + + suite.size = 2 // operates on two networks + suite.metrics = metrics.NewNoopCollector() + + // create and start the networks and inject a connection observer + peerChannel := make(chan string) + ob := tagsObserver{ + tags: peerChannel, + log: suite.logger, + } + + suite.sporkId = unittest.IdentifierFixture() + + libP2PNodes := make([]p2p.LibP2PNode, 0) + identities := make(flow.IdentityList, 0) + tagObservables := make([]observable.Observable, 0) + idProvider := unittest.NewUpdatableIDProvider(flow.IdentityList{}) + defaultFlowConfig, err := config.DefaultConfig() + require.NoError(suite.T(), err) + defaultFlowConfig.NetworkConfig.Unicast.UnicastManager.CreateStreamBackoffDelay = 1 * time.Millisecond + + opts := []p2ptest.NodeFixtureParameterOption{p2ptest.WithUnicastHandlerFunc(nil)} + + for i := 0; i < suite.size; i++ { + connManager, err := testutils.NewTagWatchingConnManager( + unittest.Logger(), + metrics.NewNoopCollector(), + &defaultFlowConfig.NetworkConfig.ConnectionManager) + require.NoError(suite.T(), err) + + opts = append(opts, + p2ptest.WithConnectionManager(connManager), + p2ptest.WithRole(flow.RoleExecution), + p2ptest.OverrideFlowConfig(defaultFlowConfig)) // to suppress exponential backoff + node, nodeId := p2ptest.NodeFixture(suite.T(), + suite.sporkId, + suite.T().Name(), + idProvider, + opts...) + libP2PNodes = append(libP2PNodes, node) + identities = append(identities, &nodeId) + tagObservables = append(tagObservables, connManager) + } + idProvider.SetIdentities(identities) + + suite.ids = identities + suite.libP2PNodes = libP2PNodes + + suite.networks, suite.providers = testutils.NetworksFixture(suite.T(), suite.sporkId, suite.ids, suite.libP2PNodes) + for _, observableConnMgr := range tagObservables { + observableConnMgr.Subscribe(&ob) + } + suite.obs = peerChannel + + require.Len(suite.Suite.T(), tagObservables, suite.size) + require.Len(suite.Suite.T(), suite.ids, suite.size) + + ctx, cancel := context.WithCancel(context.Background()) + suite.mwCancel = cancel + + suite.mwCtx = irrecoverable.NewMockSignalerContext(suite.T(), ctx) + + testutils.StartNodes(suite.mwCtx, suite.T(), suite.libP2PNodes) + + for i, net := range suite.networks { + unittest.RequireComponentsReadyBefore(suite.T(), 1*time.Second, libP2PNodes[i]) + net.Start(suite.mwCtx) + unittest.RequireComponentsReadyBefore(suite.T(), 1*time.Second, net) + } +} + +func (suite *NetworkTestSuite) TearDownTest() { + suite.mwCancel() + + testutils.StopComponents(suite.T(), suite.networks, 1*time.Second) + testutils.StopComponents(suite.T(), suite.libP2PNodes, 1*time.Second) + suite.libP2PNodes = nil + suite.ids = nil + suite.size = 0 +} + +// TestUpdateNodeAddresses tests that the UpdateNodeAddresses method correctly updates +// the addresses of the staked network participants. +func (suite *NetworkTestSuite) TestUpdateNodeAddresses() { + ctx, cancel := context.WithCancel(suite.mwCtx) + irrecoverableCtx := irrecoverable.NewMockSignalerContext(suite.T(), ctx) + + // create a new staked identity + ids, libP2PNodes := testutils.LibP2PNodeForNetworkFixture(suite.T(), suite.sporkId, 1) + idProvider := unittest.NewUpdatableIDProvider(append(suite.ids, ids...)) + networkCfg := testutils.NetworkConfigFixture( + suite.T(), + *ids[0], + idProvider, + suite.sporkId, + libP2PNodes[0]) + newNet, err := underlay.NewNetwork(networkCfg) + require.NoError(suite.T(), err) + require.Len(suite.T(), ids, 1) + newId := ids[0] + + // start up nodes and peer managers + testutils.StartNodes(irrecoverableCtx, suite.T(), libP2PNodes) + defer testutils.StopComponents(suite.T(), libP2PNodes, 2*time.Second) + + newNet.Start(irrecoverableCtx) + defer testutils.StopComponents(suite.T(), []network.EngineRegistry{newNet}, 2*time.Second) + unittest.RequireComponentsReadyBefore(suite.T(), 1*time.Second, newNet) + + idList := flow.IdentityList(append(suite.ids, newId)) + + // needed to enable ID translation + suite.providers[0].SetIdentities(idList) + + message := &libp2pmessage.TestMessage{ + Text: "TestUpdateNodeAddresses", + } + + senderID := suite.ids[0].NodeID + senderMessageProcessor := mocknetwork.NewMessageProcessor(suite.T()) + receiverMessageProcessor := mocknetwork.NewMessageProcessor(suite.T()) + internal, err := message.ToInternal() + require.NoError(suite.T(), err) + receiverMessageProcessor. + On("Process", channels.TestNetworkChannel, senderID, internal). + Return(nil). + Maybe() // we may not actually process this message depending on how fast runs + + con, err := suite.networks[0].Register(channels.TestNetworkChannel, senderMessageProcessor) + require.NoError(suite.T(), err) + _, err = newNet.Register(channels.TestNetworkChannel, receiverMessageProcessor) + require.NoError(suite.T(), err) + + // unicast should fail to send because no address is known yet for the new identity + err = con.Unicast(message, newId.NodeID) + require.True(suite.T(), strings.Contains(err.Error(), swarm.ErrNoAddresses.Error())) + + // update the addresses + suite.networks[0].UpdateNodeAddresses() + + // now the message should send successfully + err = con.Unicast(message, newId.NodeID) + require.NoError(suite.T(), err) + + cancel() +} + +func (suite *NetworkTestSuite) TestUnicastRateLimit_Messages() { + unittest.SkipUnless(suite.T(), unittest.TEST_FLAKY, "flaky") + // limiter limit will be set to 5 events/sec the 6th event per interval will be rate limited + limit := rate.Limit(5) + + // burst per interval + burst := 5 + + for _, net := range suite.networks { + require.NoError(suite.T(), net.Subscribe(channels.TestNetworkChannel)) + } + + messageRateLimiter := ratelimiter.NewRateLimiter(limit, burst, 3*time.Second) + + // we only expect messages from the first networks on the test suite + expectedPID, err := unittest.PeerIDFromFlowID(suite.ids[0]) + require.NoError(suite.T(), err) + + // the onRateLimit call back we will use to keep track of how many times a rate limit happens. + rateLimits := atomic.NewUint64(0) + + onRateLimit := func(peerID peer.ID, role, msgType, topic, reason string) { + require.Equal(suite.T(), reason, ratelimit.ReasonMessageCount.String()) + require.Equal(suite.T(), expectedPID, peerID) + // update hook calls + rateLimits.Inc() + } + + // setup rate limit distributor that will be used to track the number of rate limits via the onRateLimit callback. + consumer := testutils.NewRateLimiterConsumer(onRateLimit) + distributor := ratelimit.NewUnicastRateLimiterDistributor() + distributor.AddConsumer(consumer) + + opts := []ratelimit.RateLimitersOption{ratelimit.WithMessageRateLimiter(messageRateLimiter), ratelimit.WithNotifier(distributor), ratelimit.WithDisabledRateLimiting(false)} + rateLimiters := ratelimit.NewRateLimiters(opts...) + + defaultFlowConfig, err := config.DefaultConfig() + require.NoError(suite.T(), err) + defaultFlowConfig.NetworkConfig.Unicast.UnicastManager.CreateStreamBackoffDelay = 1 * time.Millisecond + + idProvider := unittest.NewUpdatableIDProvider(suite.ids) + ids, libP2PNodes := testutils.LibP2PNodeForNetworkFixture(suite.T(), + suite.sporkId, + 1, + p2ptest.WithUnicastRateLimitDistributor(distributor), + p2ptest.OverrideFlowConfig(defaultFlowConfig), // to suppress exponential backoff + p2ptest.WithConnectionGater(p2ptest.NewConnectionGater(idProvider, func(pid peer.ID) error { + if messageRateLimiter.IsRateLimited(pid) { + return fmt.Errorf("rate-limited peer") + } + return nil + }))) + idProvider.SetIdentities(append(suite.ids, ids...)) + + netCfg := testutils.NetworkConfigFixture( + suite.T(), + *ids[0], + idProvider, + suite.sporkId, + libP2PNodes[0]) + newNet, err := underlay.NewNetwork( + netCfg, + underlay.WithUnicastRateLimiters(rateLimiters), + underlay.WithPeerManagerFilters(testutils.IsRateLimitedPeerFilter(messageRateLimiter))) + require.NoError(suite.T(), err) + + require.Len(suite.T(), ids, 1) + newId := ids[0] + idList := flow.IdentityList(append(suite.ids, newId)) + + suite.providers[0].SetIdentities(idList) + + ctx, cancel := context.WithCancel(suite.mwCtx) + irrecoverableCtx := irrecoverable.NewMockSignalerContext(suite.T(), ctx) + testutils.StartNodes(irrecoverableCtx, suite.T(), libP2PNodes) + defer testutils.StopComponents(suite.T(), libP2PNodes, 1*time.Second) + testutils.StartNetworks(irrecoverableCtx, suite.T(), []network.EngineRegistry{newNet}) + + calls := atomic.NewUint64(0) + ch := make(chan struct{}) + // registers an engine on the new network + newEngine := &mocknetwork.MessageProcessor{} + _, err = newNet.Register(channels.TestNetworkChannel, newEngine) + require.NoError(suite.T(), err) + newEngine.On("Process", channels.TestNetworkChannel, suite.ids[0].NodeID, mockery.Anything).Run(func(args mockery.Arguments) { + calls.Inc() + if calls.Load() >= 5 { + close(ch) + } + }).Return(nil) + + // needed to enable ID translation + suite.providers[0].SetIdentities(idList) + + // update the addresses + suite.networks[0].UpdateNodeAddresses() + + // add our sender node as a direct peer to our receiving node, this allows us to ensure + // that connections to peers that are rate limited are completely prune. IsConnected will + // return true only if the node is a direct peer of the other, after rate limiting this direct + // peer should be removed by the peer manager. + p2ptest.LetNodesDiscoverEachOther(suite.T(), ctx, []p2p.LibP2PNode{libP2PNodes[0], suite.libP2PNodes[0]}, flow.IdentityList{ids[0], suite.ids[0]}) + p2ptest.TryConnectionAndEnsureConnected(suite.T(), ctx, []p2p.LibP2PNode{libP2PNodes[0], suite.libP2PNodes[0]}) + + con0, err := suite.networks[0].Register(channels.TestNetworkChannel, &mocknetwork.MessageProcessor{}) + require.NoError(suite.T(), err) + + // with the rate limit configured to 5 msg/sec we send 10 messages at once and expect the rate limiter + // to be invoked at-least once. We send 10 messages due to the flakiness that is caused by async stream + // handling of streams. + for i := 0; i < 10; i++ { + err = con0.Unicast(&libp2pmessage.TestMessage{ + Text: fmt.Sprintf("hello-%d", i), + }, newId.NodeID) + require.NoError(suite.T(), err) + } + // wait for all rate limits before shutting down network + unittest.RequireCloseBefore(suite.T(), ch, 100*time.Millisecond, "could not stop rate limit test ch on time") + + // sleep for 1 seconds to allow connection pruner to prune connections + time.Sleep(2 * time.Second) + + // ensure connection to rate limited peer is pruned + p2ptest.EnsureNotConnectedBetweenGroups(suite.T(), ctx, []p2p.LibP2PNode{libP2PNodes[0]}, []p2p.LibP2PNode{suite.libP2PNodes[0]}) + p2pfixtures.EnsureNoStreamCreationBetweenGroups(suite.T(), ctx, []p2p.LibP2PNode{libP2PNodes[0]}, []p2p.LibP2PNode{suite.libP2PNodes[0]}) + + // eventually the rate limited node should be able to reconnect and send messages + require.Eventually(suite.T(), func() bool { + err = con0.Unicast(&libp2pmessage.TestMessage{ + Text: "hello", + }, newId.NodeID) + return err == nil + }, 3*time.Second, 100*time.Millisecond) + + // shutdown our network so that each message can be processed + cancel() + unittest.RequireCloseBefore(suite.T(), libP2PNodes[0].Done(), 100*time.Millisecond, "could not stop libp2p node on time") + unittest.RequireCloseBefore(suite.T(), newNet.Done(), 100*time.Millisecond, "could not stop network on time") + + // expect our rate limited peer callback to be invoked once + require.True(suite.T(), rateLimits.Load() > 0) +} + +func (suite *NetworkTestSuite) TestUnicastRateLimit_Bandwidth() { + // limiter limit will be set up to 1000 bytes/sec + limit := rate.Limit(1000) + + // burst per interval + burst := 1000 + + // we only expect messages from the first network on the test suite + expectedPID, err := unittest.PeerIDFromFlowID(suite.ids[0]) + require.NoError(suite.T(), err) + + // setup bandwidth rate limiter + bandwidthRateLimiter := ratelimit.NewBandWidthRateLimiter(limit, burst, 4*time.Second) + + // the onRateLimit call back we will use to keep track of how many times a rate limit happens + // after 5 rate limits we will close ch. + ch := make(chan struct{}) + rateLimits := atomic.NewUint64(0) + onRateLimit := func(peerID peer.ID, role, msgType, topic, reason string) { + require.Equal(suite.T(), reason, ratelimit.ReasonBandwidth.String()) + + // we only expect messages from the first network on the test suite + require.NoError(suite.T(), err) + require.Equal(suite.T(), expectedPID, peerID) + // update hook calls + rateLimits.Inc() + close(ch) + } + + consumer := testutils.NewRateLimiterConsumer(onRateLimit) + distributor := ratelimit.NewUnicastRateLimiterDistributor() + distributor.AddConsumer(consumer) + opts := []ratelimit.RateLimitersOption{ratelimit.WithBandwidthRateLimiter(bandwidthRateLimiter), ratelimit.WithNotifier(distributor), ratelimit.WithDisabledRateLimiting(false)} + rateLimiters := ratelimit.NewRateLimiters(opts...) + + defaultFlowConfig, err := config.DefaultConfig() + require.NoError(suite.T(), err) + defaultFlowConfig.NetworkConfig.Unicast.UnicastManager.CreateStreamBackoffDelay = 1 * time.Millisecond + + idProvider := unittest.NewUpdatableIDProvider(suite.ids) + // create a new staked identity + ids, libP2PNodes := testutils.LibP2PNodeForNetworkFixture(suite.T(), + suite.sporkId, + 1, + p2ptest.WithUnicastRateLimitDistributor(distributor), + p2ptest.OverrideFlowConfig(defaultFlowConfig), // to suppress exponential backoff + p2ptest.WithConnectionGater(p2ptest.NewConnectionGater(idProvider, func(pid peer.ID) error { + // create connection gater, connection gater will refuse connections from rate limited nodes + if bandwidthRateLimiter.IsRateLimited(pid) { + return fmt.Errorf("rate-limited peer") + } + + return nil + }))) + idProvider.SetIdentities(append(suite.ids, ids...)) + suite.providers[0].SetIdentities(append(suite.ids, ids...)) + + netCfg := testutils.NetworkConfigFixture( + suite.T(), + *ids[0], + idProvider, + suite.sporkId, + libP2PNodes[0]) + newNet, err := underlay.NewNetwork( + netCfg, + underlay.WithUnicastRateLimiters(rateLimiters), + underlay.WithPeerManagerFilters(testutils.IsRateLimitedPeerFilter(bandwidthRateLimiter))) + require.NoError(suite.T(), err) + require.Len(suite.T(), ids, 1) + newId := ids[0] + + ctx, cancel := context.WithCancel(suite.mwCtx) + irrecoverableCtx := irrecoverable.NewMockSignalerContext(suite.T(), ctx) + + testutils.StartNodes(irrecoverableCtx, suite.T(), libP2PNodes) + defer testutils.StopComponents(suite.T(), libP2PNodes, 1*time.Second) + + testutils.StartNetworks(irrecoverableCtx, suite.T(), []network.EngineRegistry{newNet}) + unittest.RequireComponentsReadyBefore(suite.T(), 1*time.Second, newNet) + + // registers an engine on the new network so that it can receive messages on the TestNetworkChannel + newEngine := mocknetwork.NewMessageProcessor(suite.T()) + _, err = newNet.Register(channels.TestNetworkChannel, newEngine) + require.NoError(suite.T(), err) + + callCount := counters.NewMonotonicCounter(0) + newEngine.On("Process", channels.TestNetworkChannel, suite.ids[0].NodeID, mockery.Anything).Run(func(args mockery.Arguments) { + _ = callCount.Increment() + }).Return(nil) + + idList := flow.IdentityList(append(suite.ids, newId)) + + // needed to enable ID translation + suite.providers[0].SetIdentities(idList) + + // update the addresses + suite.networks[0].UpdateNodeAddresses() + + // add our sender node as a direct peer to our receiving node, this allows us to ensure + // that connections to peers that are rate limited are completely prune. IsConnected will + // return true only if the node is a direct peer of the other, after rate limiting this direct + // peer should be removed by the peer manager. + p2ptest.LetNodesDiscoverEachOther(suite.T(), ctx, []p2p.LibP2PNode{libP2PNodes[0], suite.libP2PNodes[0]}, flow.IdentityList{ids[0], suite.ids[0]}) + + // create message with about 400bytes (300 random bytes + 100bytes message info) + generate := func(letter rune) string { + b := make([]byte, 300) + for i := range b { + b[i] = byte(letter) + } + return string(b) + } + + // send 3 messages at once with a size of 400 bytes each. The third message will be rate limited + // as it is more than our allowed bandwidth of 1000 bytes. + con0, err := suite.networks[0].Register(channels.TestNetworkChannel, mocknetwork.NewMessageProcessor(suite.T())) + require.NoError(suite.T(), err) + + err = con0.Unicast(&libp2pmessage.TestMessage{ + Text: generate('A'), + }, newId.NodeID) + require.NoError(suite.T(), err) + + err = con0.Unicast(&libp2pmessage.TestMessage{ + Text: generate('B'), + }, newId.NodeID) + require.NoError(suite.T(), err) + + // this message will be rate limited. The remote node will reset the stream, so depending on how + // quickly the send happens, we may get an error from attempting to close a reset stream + err = con0.Unicast(&libp2pmessage.TestMessage{ + Text: generate('C'), + }, newId.NodeID) + if err != nil { + require.Contains(suite.T(), err.Error(), "stream reset") + } + + // wait for all rate limits before shutting down network + unittest.RequireCloseBefore(suite.T(), ch, 100*time.Millisecond, "could not stop on rate limit test ch on time") + + // remote node should have received the first 2 messages + assert.Equal(suite.T(), uint64(2), callCount.Value()) + + // sleep for 1 seconds to allow connection pruner to prune connections + time.Sleep(1 * time.Second) + + // ensure connection to rate limited peer is pruned + p2ptest.EnsureNotConnectedBetweenGroups(suite.T(), ctx, []p2p.LibP2PNode{libP2PNodes[0]}, []p2p.LibP2PNode{suite.libP2PNodes[0]}) + p2pfixtures.EnsureNoStreamCreationBetweenGroups(suite.T(), ctx, []p2p.LibP2PNode{libP2PNodes[0]}, []p2p.LibP2PNode{suite.libP2PNodes[0]}) + + // eventually the rate limited node should be able to reconnect and send messages + require.Eventually(suite.T(), func() bool { + err = con0.Unicast(&libp2pmessage.TestMessage{ + Text: "", + }, newId.NodeID) + return err == nil + }, 5*time.Second, 100*time.Millisecond) + + require.Eventually(suite.T(), func() bool { + return callCount.Value() == 3 + }, 1*time.Second, 100*time.Millisecond) + + // shutdown our network so that each message can be processed + cancel() + unittest.RequireComponentsDoneBefore(suite.T(), 5*time.Second, newNet) + + // expect our rate limited peer callback to be invoked once + require.Equal(suite.T(), uint64(1), rateLimits.Load()) +} + +func (suite *NetworkTestSuite) createOverlay(provider *unittest.UpdatableIDProvider) *mocknetwork.Underlay { + overlay := &mocknetwork.Underlay{} + overlay.On("Identities").Maybe().Return(func() flow.IdentityList { + return provider.Identities(filter.Any) + }) + overlay.On("Topology").Maybe().Return(func() flow.IdentityList { + return provider.Identities(filter.Any) + }, nil) + // this test is not testing the topic validator, especially in spoofing, + // so we always return a valid identity. We only care about the node role for the test TestMaxMessageSize_Unicast + // where EN are the only node authorized to send chunk data response. + identityOpts := unittest.WithRole(flow.RoleExecution) + overlay.On("Identity", mockery.AnythingOfType("peer.ID")).Maybe().Return(unittest.IdentityFixture(identityOpts), true) + return overlay +} + +// TestPing sends a message from the first network of the test suit to the last one and checks that the +// last network receives the message and that the message is correctly decoded. +func (suite *NetworkTestSuite) TestPing() { + receiveWG := sync.WaitGroup{} + receiveWG.Add(1) + // extracts sender id based on the mock option + var err error + + senderNodeIndex := 0 + targetNodeIndex := suite.size - 1 + + expectedPayload := "TestPingContentReception" + // mocks a target engine on the last node of the test suit that will receive the message on the test channel. + targetEngine := &mocknetwork.MessageProcessor{} // target engine on the last node of the test suit that will receive the message + _, err = suite.networks[targetNodeIndex].Register(channels.TestNetworkChannel, targetEngine) + require.NoError(suite.T(), err) + // target engine must receive the message once with the expected payload + targetEngine.On("Process", mockery.Anything, mockery.Anything, mockery.Anything). + Run(func(args mockery.Arguments) { + receiveWG.Done() + + msgChannel, ok := args[0].(channels.Channel) + require.True(suite.T(), ok) + require.Equal(suite.T(), channels.TestNetworkChannel, msgChannel) // channel + + msgOriginID, ok := args[1].(flow.Identifier) + require.True(suite.T(), ok) + require.Equal(suite.T(), suite.ids[senderNodeIndex].NodeID, msgOriginID) // sender id + + msgPayload, ok := args[2].(*flow.TestMessage) + require.True(suite.T(), ok) + require.Equal(suite.T(), expectedPayload, msgPayload.Text) // payload + }).Return(nil).Once() + + // sends a direct message from first node to the last node + con0, err := suite.networks[senderNodeIndex].Register(channels.TestNetworkChannel, &mocknetwork.MessageProcessor{}) + require.NoError(suite.T(), err) + err = con0.Unicast(&libp2pmessage.TestMessage{ + Text: expectedPayload, + }, suite.ids[targetNodeIndex].NodeID) + require.NoError(suite.Suite.T(), err) + + unittest.RequireReturnsBefore(suite.T(), receiveWG.Wait, 1*time.Second, "did not receive message") +} + +// TestMultiPing_TwoPings sends two messages concurrently from the first network of the test suit to the last one, +// and checks that the last network receives the messages and that the messages are correctly decoded. +func (suite *NetworkTestSuite) TestMultiPing_TwoPings() { + suite.MultiPing(2) +} + +// TestMultiPing_FourPings sends four messages concurrently from the first network of the test suit to the last one, +// and checks that the last network receives the messages and that the messages are correctly decoded. +func (suite *NetworkTestSuite) TestMultiPing_FourPings() { + suite.MultiPing(4) +} + +// TestMultiPing_EightPings sends eight messages concurrently from the first network of the test suit to the last one, +// and checks that the last network receives the messages and that the messages are correctly decoded. +func (suite *NetworkTestSuite) TestMultiPing_EightPings() { + suite.MultiPing(8) +} + +// MultiPing sends count-many distinct messages concurrently from the first network of the test suit to the last one. +// It evaluates the correctness of reception of the content of the messages. Each message must be received by the +// last network of the test suit exactly once. +func (suite *NetworkTestSuite) MultiPing(count int) { + receiveWG := sync.WaitGroup{} + sendWG := sync.WaitGroup{} + senderNodeIndex := 0 + targetNodeIndex := suite.size - 1 + + receivedPayloads := concurrentmap.New[string, struct{}]() // keep track of unique payloads received. + + // regex to extract the payload from the message + regex := regexp.MustCompile(`^hello from: \d`) + + // creates a conduit on sender to send messages to the target on the test channel. + con0, err := suite.networks[senderNodeIndex].Register(channels.TestNetworkChannel, &mocknetwork.MessageProcessor{}) + require.NoError(suite.T(), err) + + // mocks a target engine on the last node of the test suit that will receive the message on the test channel. + targetEngine := &mocknetwork.MessageProcessor{} + _, err = suite.networks[targetNodeIndex].Register(channels.TestNetworkChannel, targetEngine) + targetEngine.On("Process", mockery.Anything, mockery.Anything, mockery.Anything). + Run(func(args mockery.Arguments) { + receiveWG.Done() + + msgChannel, ok := args[0].(channels.Channel) + require.True(suite.T(), ok) + require.Equal(suite.T(), channels.TestNetworkChannel, msgChannel) // channel + + msgOriginID, ok := args[1].(flow.Identifier) + require.True(suite.T(), ok) + require.Equal(suite.T(), suite.ids[senderNodeIndex].NodeID, msgOriginID) // sender id + + msgPayload, ok := args[2].(*flow.TestMessage) + require.True(suite.T(), ok) + // payload + require.True(suite.T(), regex.MatchString(msgPayload.Text)) + require.False(suite.T(), receivedPayloads.Has(msgPayload.Text)) // payload must be unique + receivedPayloads.Add(msgPayload.Text, struct{}{}) + }).Return(nil) + + for i := 0; i < count; i++ { + receiveWG.Add(1) + sendWG.Add(1) + + expectedPayloadText := fmt.Sprintf("hello from: %d", i) + require.NoError(suite.T(), err) + err = con0.Unicast(&libp2pmessage.TestMessage{ + Text: expectedPayloadText, + }, suite.ids[targetNodeIndex].NodeID) + require.NoError(suite.Suite.T(), err) + + go func() { + // sends a direct message from first node to the last node + err := con0.Unicast(&libp2pmessage.TestMessage{ + Text: expectedPayloadText, + }, suite.ids[targetNodeIndex].NodeID) + require.NoError(suite.Suite.T(), err) + + sendWG.Done() + }() + } + + unittest.RequireReturnsBefore(suite.T(), sendWG.Wait, 1*time.Second, "could not send unicasts on time") + unittest.RequireReturnsBefore(suite.T(), receiveWG.Wait, 1*time.Second, "could not receive unicasts on time") +} + +// TestEcho sends an echo message from first network to the last network +// the last network echos back the message. The test evaluates the correctness +// of the message reception as well as its content +func (suite *NetworkTestSuite) TestEcho() { + wg := sync.WaitGroup{} + // extracts sender id based on the mock option + var err error + + wg.Add(2) + first := 0 + last := suite.size - 1 + // mocks a target engine on the first and last nodes of the test suit that will receive the message on the test channel. + targetEngine1 := &mocknetwork.MessageProcessor{} + con1, err := suite.networks[first].Register(channels.TestNetworkChannel, targetEngine1) + require.NoError(suite.T(), err) + targetEngine2 := &mocknetwork.MessageProcessor{} + con2, err := suite.networks[last].Register(channels.TestNetworkChannel, targetEngine2) + require.NoError(suite.T(), err) + + // message sent from first node to the last node. + expectedSendMsg := "TestEcho" + // reply from last node to the first node. + expectedReplyMsg := "TestEcho response" + + // mocks the target engine on the last node of the test suit that will receive the message on the test channel, and + // echos back the reply message to the sender. + targetEngine2.On("Process", mockery.Anything, mockery.Anything, mockery.Anything). + Run(func(args mockery.Arguments) { + wg.Done() + + msgChannel, ok := args[0].(channels.Channel) + require.True(suite.T(), ok) + require.Equal(suite.T(), channels.TestNetworkChannel, msgChannel) // channel + + msgOriginID, ok := args[1].(flow.Identifier) + require.True(suite.T(), ok) + require.Equal(suite.T(), suite.ids[first].NodeID, msgOriginID) // sender id + + msgPayload, ok := args[2].(*flow.TestMessage) + require.True(suite.T(), ok) + require.Equal(suite.T(), expectedSendMsg, msgPayload.Text) // payload + + // echos back the same message back to the sender + require.NoError(suite.T(), con2.Unicast(&libp2pmessage.TestMessage{ + Text: expectedReplyMsg, + }, suite.ids[first].NodeID)) + }).Return(nil).Once() + + // mocks the target engine on the last node of the test suit that will receive the message on the test channel, and + // echos back the reply message to the sender. + targetEngine1.On("Process", mockery.Anything, mockery.Anything, mockery.Anything). + Run(func(args mockery.Arguments) { + wg.Done() + + msgChannel, ok := args[0].(channels.Channel) + require.True(suite.T(), ok) + require.Equal(suite.T(), channels.TestNetworkChannel, msgChannel) // channel + + msgOriginID, ok := args[1].(flow.Identifier) + require.True(suite.T(), ok) + require.Equal(suite.T(), suite.ids[last].NodeID, msgOriginID) // sender id + + msgPayload, ok := args[2].(*flow.TestMessage) + require.True(suite.T(), ok) + require.Equal(suite.T(), expectedReplyMsg, msgPayload.Text) // payload + }).Return(nil) + + // sends a direct message from first node to the last node + require.NoError(suite.T(), con1.Unicast(&libp2pmessage.TestMessage{ + Text: expectedSendMsg, + }, suite.ids[last].NodeID)) + + unittest.RequireReturnsBefore(suite.T(), wg.Wait, 5*time.Second, "could not receive unicast on time") +} + +// TestMaxMessageSize_Unicast evaluates that invoking Unicast method of the network on a message +// size beyond the permissible unicast message size returns an error. +func (suite *NetworkTestSuite) TestMaxMessageSize_Unicast() { + first := 0 + last := suite.size - 1 + + // creates a network payload beyond the maximum message size + // Note: networkPayloadFixture considers 1000 bytes as the overhead of the encoded message, + // so the generated payload is 1000 bytes below the maximum unicast message size. + // We hence add up 1000 bytes to the input of network payload fixture to make + // sure that payload is beyond the permissible size. + payload := testutils.NetworkPayloadFixture(suite.T(), uint(underlay.DefaultMaxUnicastMsgSize)+1000) + event := &libp2pmessage.TestMessage{ + Text: string(payload), + } + + // sends a direct message from first node to the last node + con0, err := suite.networks[first].Register(channels.TestNetworkChannel, &mocknetwork.MessageProcessor{}) + require.NoError(suite.T(), err) + require.Error(suite.T(), con0.Unicast(event, suite.ids[last].NodeID)) +} + +// TestLargeMessageSize_SendDirect asserts that a ChunkDataResponse is treated as a large message and can be unicasted +// successfully even though it's size is greater than the default message size. +func (suite *NetworkTestSuite) TestLargeMessageSize_SendDirect() { + sourceIndex := 0 + targetIndex := suite.size - 1 + targetId := suite.ids[targetIndex].NodeID + + // creates a network payload with a size greater than the default max size using a known large message type + targetSize := uint64(underlay.DefaultMaxUnicastMsgSize) + 1000 + event := unittest.ChunkDataResponseMsgFixture(unittest.IdentifierFixture(), unittest.WithApproximateSize(targetSize)) + + // expect one message to be received by the target + ch := make(chan struct{}) + // mocks a target engine on the last node of the test suit that will receive the message on the test channel. + targetEngine := &mocknetwork.MessageProcessor{} + _, err := suite.networks[targetIndex].Register(channels.ProvideChunks, targetEngine) + require.NoError(suite.T(), err) + targetEngine.On("Process", mockery.Anything, mockery.Anything, mockery.Anything). + Run(func(args mockery.Arguments) { + defer close(ch) + + msgChannel, ok := args[0].(channels.Channel) + require.True(suite.T(), ok) + require.Equal(suite.T(), channels.ProvideChunks, msgChannel) // channel + + msgOriginID, ok := args[1].(flow.Identifier) + require.True(suite.T(), ok) + require.Equal(suite.T(), suite.ids[sourceIndex].NodeID, msgOriginID) // sender id + + msgPayload, ok := args[2].(*flow.ChunkDataResponse) + require.True(suite.T(), ok) + + internal, err := event.ToInternal() + require.NoError(suite.T(), err) + + require.Equal(suite.T(), internal, msgPayload) // payload + }).Return(nil).Once() + + // sends a direct message from source node to the target node + con0, err := suite.networks[sourceIndex].Register(channels.ProvideChunks, &mocknetwork.MessageProcessor{}) + require.NoError(suite.T(), err) + require.NoError(suite.T(), con0.Unicast(event, targetId)) + + // check message reception on target + unittest.RequireCloseBefore(suite.T(), ch, 5*time.Second, "source node failed to send large message to target") +} + +// TestMaxMessageSize_Publish evaluates that invoking Publish method of the network on a message +// size beyond the permissible publish message size returns an error. +func (suite *NetworkTestSuite) TestMaxMessageSize_Publish() { + firstIndex := 0 + lastIndex := suite.size - 1 + lastNodeId := suite.ids[lastIndex].NodeID + + // creates a network payload beyond the maximum message size + // Note: networkPayloadFixture considers 1000 bytes as the overhead of the encoded message, + // so the generated payload is 1000 bytes below the maximum publish message size. + // We hence add up 1000 bytes to the input of network payload fixture to make + // sure that payload is beyond the permissible size. + payload := testutils.NetworkPayloadFixture(suite.T(), uint(p2pnode.DefaultMaxPubSubMsgSize)+1000) + event := &libp2pmessage.TestMessage{ + Text: string(payload), + } + con0, err := suite.networks[firstIndex].Register(channels.TestNetworkChannel, &mocknetwork.MessageProcessor{}) + require.NoError(suite.T(), err) + + err = con0.Publish(event, lastNodeId) + require.Error(suite.Suite.T(), err) + require.ErrorContains(suite.T(), err, "exceeds configured max message size") +} + +// TestUnsubscribe tests that an engine can unsubscribe from a topic it was earlier subscribed to and stop receiving +// messages. +func (suite *NetworkTestSuite) TestUnsubscribe() { + senderIndex := 0 + targetIndex := suite.size - 1 + targetId := suite.ids[targetIndex].NodeID + + msgRcvd := make(chan struct{}, 2) + msgRcvdFun := func() { + <-msgRcvd + } + + targetEngine := &mocknetwork.MessageProcessor{} + con2, err := suite.networks[targetIndex].Register(channels.TestNetworkChannel, targetEngine) + require.NoError(suite.T(), err) + targetEngine.On("Process", mockery.Anything, mockery.Anything, mockery.Anything). + Run(func(args mockery.Arguments) { + msgChannel, ok := args[0].(channels.Channel) + require.True(suite.T(), ok) + require.Equal(suite.T(), channels.TestNetworkChannel, msgChannel) // channel + + msgOriginID, ok := args[1].(flow.Identifier) + require.True(suite.T(), ok) + require.Equal(suite.T(), suite.ids[senderIndex].NodeID, msgOriginID) // sender id + + msgPayload, ok := args[2].(*flow.TestMessage) + require.True(suite.T(), ok) + require.True(suite.T(), msgPayload.Text == "hello1") // payload, we only expect hello 1 that was sent before unsubscribe. + msgRcvd <- struct{}{} + }).Return(nil) + + // first test that when both nodes are subscribed to the channel, the target node receives the message + con1, err := suite.networks[senderIndex].Register(channels.TestNetworkChannel, &mocknetwork.MessageProcessor{}) + require.NoError(suite.T(), err) + + // set up waiting for suite.size pubsub tags indicating a mesh has formed + for i := 0; i < suite.size; i++ { + select { + case <-suite.obs: + case <-time.After(2 * time.Second): + assert.FailNow(suite.T(), "could not receive pubsub tag indicating mesh formed") + } + } + + err = con1.Publish(&libp2pmessage.TestMessage{ + Text: string("hello1"), + }, targetId) + require.NoError(suite.T(), err) + + unittest.RequireReturnsBefore(suite.T(), msgRcvdFun, 3*time.Second, "message not received") + + // now unsubscribe the target node from the channel + require.NoError(suite.T(), con2.Close()) + + // now publish a new message from the first node + err = con1.Publish(&libp2pmessage.TestMessage{ + Text: string("hello2"), + }, targetId) + assert.NoError(suite.T(), err) + + // assert that the new message is not received by the target node + unittest.RequireNeverReturnBefore(suite.T(), msgRcvdFun, 2*time.Second, "message received unexpectedly") +} + +// TestChunkDataPackMaxMessageSize tests that the max message size for a chunk data pack response is set to the large message size. +func TestChunkDataPackMaxMessageSize(t *testing.T) { + // creates an outgoing chunk data pack response message (imitating an EN is sending a chunk data pack response to VN). + msg, err := message.NewOutgoingScope( + flow.IdentifierList{unittest.IdentifierFixture()}, + channels.TopicFromChannel(channels.ProvideChunks, unittest.IdentifierFixture()), + &messages.ChunkDataResponse{ + ChunkDataPack: flow.UntrustedChunkDataPack(*unittest.ChunkDataPackFixture(unittest.IdentifierFixture())), + Nonce: rand.Uint64(), + }, + unittest.NetworkCodec().Encode, + message.ProtocolTypeUnicast) + require.NoError(t, err) + + // get the max message size for the message + size, err := underlay.UnicastMaxMsgSizeByCode(msg.Proto().Payload) + require.NoError(t, err) + require.Equal(t, underlay.LargeMsgMaxUnicastMsgSize, size) +} diff --git a/network/test/blob_service_test.go b/network/test/cohort2/blob_service_test.go similarity index 75% rename from network/test/blob_service_test.go rename to network/test/cohort2/blob_service_test.go index bcce039fa35..ed31131d0cc 100644 --- a/network/test/blob_service_test.go +++ b/network/test/cohort2/blob_service_test.go @@ -1,22 +1,24 @@ -package test +package cohort2 import ( "context" "fmt" - "os" "testing" "time" + "github.com/ipfs/boxo/blockstore" "github.com/ipfs/go-cid" "github.com/ipfs/go-datastore" "github.com/ipfs/go-datastore/sync" - blockstore "github.com/ipfs/go-ipfs-blockstore" - "github.com/rs/zerolog" + "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" "go.uber.org/atomic" + p2pbuilderconfig "github.com/onflow/flow-go/network/p2p/builder/config" + "github.com/onflow/flow-go/network/p2p/connection" "github.com/onflow/flow-go/network/p2p/dht" - + p2ptest "github.com/onflow/flow-go/network/p2p/test" + "github.com/onflow/flow-go/network/underlay" "github.com/onflow/flow-go/utils/unittest" "github.com/onflow/flow-go/model/flow" @@ -26,7 +28,6 @@ import ( "github.com/onflow/flow-go/network" "github.com/onflow/flow-go/network/channels" "github.com/onflow/flow-go/network/internal/testutils" - "github.com/onflow/flow-go/network/mocknetwork" ) // conditionalTopology is a topology that behaves like the underlying topology when the condition is true, @@ -50,7 +51,7 @@ type BlobServiceTestSuite struct { suite.Suite cancel context.CancelFunc - networks []network.Network + networks []*underlay.Network blobServices []network.BlobService datastores []datastore.Batching blobCids []cid.Cid @@ -58,7 +59,6 @@ type BlobServiceTestSuite struct { } func TestBlobService(t *testing.T) { - t.Parallel() suite.Run(t, new(BlobServiceTestSuite)) } @@ -71,8 +71,6 @@ func (suite *BlobServiceTestSuite) putBlob(ds datastore.Batching, blob blobs.Blo func (suite *BlobServiceTestSuite) SetupTest() { suite.numNodes = 3 - logger := zerolog.New(os.Stdout) - // Bitswap listens to connect events but doesn't iterate over existing connections, and fixing this without // race conditions is tricky given the way the code is architected. As a result, libP2P hosts must first listen // on Bitswap before connecting to each other, otherwise their Bitswap requests may never reach each other. @@ -84,22 +82,31 @@ func (suite *BlobServiceTestSuite) SetupTest() { signalerCtx := irrecoverable.NewMockSignalerContext(suite.T(), ctx) - ids, nodes, mws, networks, _ := testutils.GenerateIDsMiddlewaresNetworks( + sporkId := unittest.IdentifierFixture() + ids, nodes := testutils.LibP2PNodeForNetworkFixture( suite.T(), + sporkId, suite.numNodes, - logger, - unittest.NetworkCodec(), - mocknetwork.NewViolationsConsumer(suite.T()), - testutils.WithDHT("blob_service_test", dht.AsServer()), - testutils.WithPeerUpdateInterval(time.Second), - ) - suite.networks = networks - - testutils.StartNodesAndNetworks(signalerCtx, suite.T(), nodes, networks, 100*time.Millisecond) + p2ptest.WithRole(flow.RoleExecution), + p2ptest.WithDHTOptions(dht.AsServer()), + p2ptest.WithPeerManagerEnabled( + &p2pbuilderconfig.PeerManagerConfig{ + UpdateInterval: 1 * time.Second, + ConnectionPruning: true, + ConnectorFactory: connection.DefaultLibp2pBackoffConnectorFactory(), + }, nil)) + + suite.networks, _ = testutils.NetworksFixture(suite.T(), sporkId, ids, nodes) + // starts the nodes and networks + testutils.StartNodes(signalerCtx, suite.T(), nodes) + for _, net := range suite.networks { + testutils.StartNetworks(signalerCtx, suite.T(), []network.EngineRegistry{net}) + unittest.RequireComponentsReadyBefore(suite.T(), 1*time.Second, net) + } blobExchangeChannel := channels.Channel("blob-exchange") - for i, net := range networks { + for i, net := range suite.networks { ds := sync.MutexWrap(datastore.NewMapDatastore()) suite.datastores = append(suite.datastores, ds) blob := blobs.NewBlob([]byte(fmt.Sprintf("foo%v", i))) @@ -107,24 +114,25 @@ func (suite *BlobServiceTestSuite) SetupTest() { suite.putBlob(ds, blob) blobService, err := net.RegisterBlobService(blobExchangeChannel, ds) suite.Require().NoError(err) - <-blobService.Ready() + unittest.RequireCloseBefore(suite.T(), blobService.Ready(), 100*time.Millisecond, "blob service not ready") suite.blobServices = append(suite.blobServices, blobService) } // let nodes connect to each other only after they are all listening on Bitswap topologyActive.Store(true) - suite.Require().Eventually(func() bool { - for i, mw := range mws { - for j := i + 1; j < suite.numNodes; j++ { - connected, err := mw.IsConnected(ids[j].NodeID) - suite.Require().NoError(err) - if !connected { - return false + suite.Require().Eventually( + func() bool { + for i, libp2pNode := range nodes { + for j := i + 1; j < suite.numNodes; j++ { + connected, err := libp2pNode.IsConnected(nodes[j].ID()) + require.NoError(suite.T(), err) + if !connected { + return false + } } } - } - return true - }, 3*time.Second, 100*time.Millisecond) + return true + }, 3*time.Second, 100*time.Millisecond) } func (suite *BlobServiceTestSuite) TearDownTest() { @@ -213,18 +221,19 @@ func (suite *BlobServiceTestSuite) TestHas() { } // check that blobs are not received until Has is called by the server - suite.Require().Never(func() bool { - for _, blobChan := range blobChans { - select { - case _, ok := <-blobChan: - if ok { - return true + suite.Require().Never( + func() bool { + for _, blobChan := range blobChans { + select { + case _, ok := <-blobChan: + if ok { + return true + } + default: } - default: } - } - return false - }, time.Second, 100*time.Millisecond) + return false + }, time.Second, 100*time.Millisecond) for i, bex := range suite.blobServices { ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) diff --git a/network/test/echoengine.go b/network/test/cohort2/echoengine.go similarity index 95% rename from network/test/echoengine.go rename to network/test/cohort2/echoengine.go index d9fdf1cbeac..1ad06064c53 100644 --- a/network/test/echoengine.go +++ b/network/test/cohort2/echoengine.go @@ -1,4 +1,4 @@ -package test +package cohort2 import ( "fmt" @@ -34,7 +34,7 @@ type EchoEngine struct { mockcomponent.Component } -func NewEchoEngine(t *testing.T, net network.Network, cap int, channel channels.Channel, echo bool, send testutils.ConduitSendWrapperFunc) *EchoEngine { +func NewEchoEngine(t *testing.T, net network.EngineRegistry, cap int, channel channels.Channel, echo bool, send testutils.ConduitSendWrapperFunc) *EchoEngine { te := &EchoEngine{ t: t, echomsg: "this is an echo", @@ -89,7 +89,7 @@ func (te *EchoEngine) Process(channel channels.Channel, originID flow.Identifier te.received <- struct{}{} // asserting event as string - lip2pEvent, ok := (event).(*message.TestMessage) + lip2pEvent, ok := (event).(*flow.TestMessage) require.True(te.t, ok, "could not assert event as TestMessage") // checks for duplication diff --git a/network/test/echoengine_test.go b/network/test/cohort2/echoengine_test.go similarity index 84% rename from network/test/echoengine_test.go rename to network/test/cohort2/echoengine_test.go index d04c1a6007c..c8dcde26d08 100644 --- a/network/test/echoengine_test.go +++ b/network/test/cohort2/echoengine_test.go @@ -1,18 +1,14 @@ -package test +package cohort2 import ( "context" "fmt" - "os" "strings" "sync" "testing" "time" - "github.com/onflow/flow-go/network/p2p" - "github.com/ipfs/go-log" - "github.com/rs/zerolog" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" @@ -23,32 +19,32 @@ import ( "github.com/onflow/flow-go/network" "github.com/onflow/flow-go/network/channels" "github.com/onflow/flow-go/network/internal/testutils" - "github.com/onflow/flow-go/network/mocknetwork" + "github.com/onflow/flow-go/network/p2p" + "github.com/onflow/flow-go/network/underlay" "github.com/onflow/flow-go/utils/unittest" ) -// EchoEngineTestSuite tests the correctness of the entire pipeline of network -> middleware -> libp2p +// EchoEngineTestSuite tests the correctness of the entire pipeline of network -> libp2p // protocol stack. It creates two instances of a stubengine, connects them through network, and sends a // single message from one engine to the other one through different scenarios. type EchoEngineTestSuite struct { suite.Suite - testutils.ConduitWrapper // used as a wrapper around conduit methods - nets []network.Network // used to keep track of the networks - mws []network.Middleware // used to keep track of the middlewares - ids flow.IdentityList // used to keep track of the identifiers associated with networks + testutils.ConduitWrapper // used as a wrapper around conduit methods + networks []*underlay.Network // used to keep track of the networks + libp2pNodes []p2p.LibP2PNode // used to keep track of the libp2p nodes + ids flow.IdentityList // used to keep track of the identifiers associated with networks cancel context.CancelFunc } // Some tests are skipped in short mode to speedup the build. -// TestStubEngineTestSuite runs all the test methods in this test suit -func TestStubEngineTestSuite(t *testing.T) { +// TestEchoEngineTestSuite runs all the test methods in this test suit +func TestEchoEngineTestSuite(t *testing.T) { suite.Run(t, new(EchoEngineTestSuite)) } func (suite *EchoEngineTestSuite) SetupTest() { const count = 2 - logger := zerolog.New(os.Stderr).Level(zerolog.ErrorLevel) log.SetAllLoggers(log.LevelError) ctx, cancel := context.WithCancel(context.Background()) @@ -58,39 +54,41 @@ func (suite *EchoEngineTestSuite) SetupTest() { // both nodes should be of the same role to get connected on epidemic dissemination var nodes []p2p.LibP2PNode - suite.ids, nodes, suite.mws, suite.nets, _ = testutils.GenerateIDsMiddlewaresNetworks( - suite.T(), - count, - logger, - unittest.NetworkCodec(), - mocknetwork.NewViolationsConsumer(suite.T()), - ) - - testutils.StartNodesAndNetworks(signalerCtx, suite.T(), nodes, suite.nets, 100*time.Millisecond) + sporkId := unittest.IdentifierFixture() + + suite.ids, nodes = testutils.LibP2PNodeForNetworkFixture(suite.T(), sporkId, count) + suite.libp2pNodes = nodes + suite.networks, _ = testutils.NetworksFixture(suite.T(), sporkId, suite.ids, nodes) + // starts the nodes and networks + testutils.StartNodes(signalerCtx, suite.T(), nodes) + for _, net := range suite.networks { + testutils.StartNetworks(signalerCtx, suite.T(), []network.EngineRegistry{net}) + unittest.RequireComponentsReadyBefore(suite.T(), 1*time.Second, net) + } } // TearDownTest closes the networks within a specified timeout func (suite *EchoEngineTestSuite) TearDownTest() { suite.cancel() - testutils.StopComponents(suite.T(), suite.nets, 3*time.Second) - testutils.StopComponents(suite.T(), suite.mws, 3*time.Second) + testutils.StopComponents(suite.T(), suite.networks, 3*time.Second) + testutils.StopComponents(suite.T(), suite.libp2pNodes, 3*time.Second) } // TestUnknownChannel evaluates that registering an engine with an unknown channel returns an error. // All channels should be registered as topics in engine.topicMap. func (suite *EchoEngineTestSuite) TestUnknownChannel() { - e := NewEchoEngine(suite.T(), suite.nets[0], 1, channels.TestNetworkChannel, false, suite.Unicast) - _, err := suite.nets[0].Register("unknown-channel-id", e) + e := NewEchoEngine(suite.T(), suite.networks[0], 1, channels.TestNetworkChannel, false, suite.Unicast) + _, err := suite.networks[0].Register("unknown-channel-id", e) require.Error(suite.T(), err) } // TestClusterChannel evaluates that registering a cluster channel is done without any error. func (suite *EchoEngineTestSuite) TestClusterChannel() { - e := NewEchoEngine(suite.T(), suite.nets[0], 1, channels.TestNetworkChannel, false, suite.Unicast) + e := NewEchoEngine(suite.T(), suite.networks[0], 1, channels.TestNetworkChannel, false, suite.Unicast) // creates a cluster channel clusterChannel := channels.SyncCluster(flow.Testnet) // registers engine with cluster channel - _, err := suite.nets[0].Register(clusterChannel, e) + _, err := suite.networks[0].Register(clusterChannel, e) // registering cluster channel should not cause an error require.NoError(suite.T(), err) } @@ -98,11 +96,11 @@ func (suite *EchoEngineTestSuite) TestClusterChannel() { // TestDuplicateChannel evaluates that registering an engine with duplicate channel returns an error. func (suite *EchoEngineTestSuite) TestDuplicateChannel() { // creates an echo engine, which registers it on test network channel - e := NewEchoEngine(suite.T(), suite.nets[0], 1, channels.TestNetworkChannel, false, suite.Unicast) + e := NewEchoEngine(suite.T(), suite.networks[0], 1, channels.TestNetworkChannel, false, suite.Unicast) // attempts to register the same engine again on test network channel which // should cause an error - _, err := suite.nets[0].Register(channels.TestNetworkChannel, e) + _, err := suite.networks[0].Register(channels.TestNetworkChannel, e) require.Error(suite.T(), err) } @@ -161,6 +159,7 @@ func (suite *EchoEngineTestSuite) TestDuplicateMessageSequential_Multicast() { // on deduplicating the received messages via Publish method of nodes' Conduits. // Messages are delivered to the receiver in parallel via the Publish method of Conduits. func (suite *EchoEngineTestSuite) TestDuplicateMessageParallel_Publish() { + unittest.SkipUnless(suite.T(), unittest.TEST_LONG_RUNNING, "covered by TestDuplicateMessageParallel_Multicast") suite.duplicateMessageParallel(suite.Publish) } @@ -209,10 +208,10 @@ func (suite *EchoEngineTestSuite) duplicateMessageSequential(send testutils.Cond rcvID := 1 // registers engines in the network // sender's engine - sender := NewEchoEngine(suite.Suite.T(), suite.nets[sndID], 10, channels.TestNetworkChannel, false, send) + sender := NewEchoEngine(suite.Suite.T(), suite.networks[sndID], 10, channels.TestNetworkChannel, false, send) // receiver's engine - receiver := NewEchoEngine(suite.Suite.T(), suite.nets[rcvID], 10, channels.TestNetworkChannel, false, send) + receiver := NewEchoEngine(suite.Suite.T(), suite.networks[rcvID], 10, channels.TestNetworkChannel, false, send) // allow nodes to heartbeat and discover each other if using PubSub testutils.OptionalSleep(send) @@ -244,10 +243,10 @@ func (suite *EchoEngineTestSuite) duplicateMessageParallel(send testutils.Condui rcvID := 1 // registers engines in the network // sender's engine - sender := NewEchoEngine(suite.Suite.T(), suite.nets[sndID], 10, channels.TestNetworkChannel, false, send) + sender := NewEchoEngine(suite.Suite.T(), suite.networks[sndID], 10, channels.TestNetworkChannel, false, send) // receiver's engine - receiver := NewEchoEngine(suite.Suite.T(), suite.nets[rcvID], 10, channels.TestNetworkChannel, false, send) + receiver := NewEchoEngine(suite.Suite.T(), suite.networks[rcvID], 10, channels.TestNetworkChannel, false, send) // allow nodes to heartbeat and discover each other testutils.OptionalSleep(send) @@ -267,7 +266,10 @@ func (suite *EchoEngineTestSuite) duplicateMessageParallel(send testutils.Condui }() } unittest.RequireReturnsBefore(suite.T(), wg.Wait, 3*time.Second, "could not send message on time") - time.Sleep(1 * time.Second) + + require.Eventually(suite.T(), func() bool { + return len(receiver.seen) > 0 + }, 3*time.Second, 500*time.Millisecond) // receiver should only see the message once, and the rest should be dropped due to // duplication @@ -291,18 +293,18 @@ func (suite *EchoEngineTestSuite) duplicateMessageDifferentChan(send testutils.C // registers engines in the network // first type // sender'suite engine - sender1 := NewEchoEngine(suite.Suite.T(), suite.nets[sndNode], 10, channel1, false, send) + sender1 := NewEchoEngine(suite.Suite.T(), suite.networks[sndNode], 10, channel1, false, send) // receiver's engine - receiver1 := NewEchoEngine(suite.Suite.T(), suite.nets[rcvNode], 10, channel1, false, send) + receiver1 := NewEchoEngine(suite.Suite.T(), suite.networks[rcvNode], 10, channel1, false, send) // second type // registers engines in the network // sender'suite engine - sender2 := NewEchoEngine(suite.Suite.T(), suite.nets[sndNode], 10, channel2, false, send) + sender2 := NewEchoEngine(suite.Suite.T(), suite.networks[sndNode], 10, channel2, false, send) // receiver's engine - receiver2 := NewEchoEngine(suite.Suite.T(), suite.nets[rcvNode], 10, channel2, false, send) + receiver2 := NewEchoEngine(suite.Suite.T(), suite.networks[rcvNode], 10, channel2, false, send) // allow nodes to heartbeat and discover each other testutils.OptionalSleep(send) @@ -350,10 +352,10 @@ func (suite *EchoEngineTestSuite) singleMessage(echo bool, send testutils.Condui // registers engines in the network // sender's engine - sender := NewEchoEngine(suite.Suite.T(), suite.nets[sndID], 10, channels.TestNetworkChannel, echo, send) + sender := NewEchoEngine(suite.Suite.T(), suite.networks[sndID], 10, channels.TestNetworkChannel, echo, send) // receiver's engine - receiver := NewEchoEngine(suite.Suite.T(), suite.nets[rcvID], 10, channels.TestNetworkChannel, echo, send) + receiver := NewEchoEngine(suite.Suite.T(), suite.networks[rcvID], 10, channels.TestNetworkChannel, echo, send) // allow nodes to heartbeat and discover each other testutils.OptionalSleep(send) @@ -415,10 +417,10 @@ func (suite *EchoEngineTestSuite) multiMessageSync(echo bool, count int, send te rcvID := 1 // registers engines in the network // sender's engine - sender := NewEchoEngine(suite.Suite.T(), suite.nets[sndID], 10, channels.TestNetworkChannel, echo, send) + sender := NewEchoEngine(suite.Suite.T(), suite.networks[sndID], 10, channels.TestNetworkChannel, echo, send) // receiver's engine - receiver := NewEchoEngine(suite.Suite.T(), suite.nets[rcvID], 10, channels.TestNetworkChannel, echo, send) + receiver := NewEchoEngine(suite.Suite.T(), suite.networks[rcvID], 10, channels.TestNetworkChannel, echo, send) // allow nodes to heartbeat and discover each other testutils.OptionalSleep(send) @@ -486,10 +488,10 @@ func (suite *EchoEngineTestSuite) multiMessageAsync(echo bool, count int, send t // registers engines in the network // sender's engine - sender := NewEchoEngine(suite.Suite.T(), suite.nets[sndID], 10, channels.TestNetworkChannel, echo, send) + sender := NewEchoEngine(suite.Suite.T(), suite.networks[sndID], 10, channels.TestNetworkChannel, echo, send) // receiver's engine - receiver := NewEchoEngine(suite.Suite.T(), suite.nets[rcvID], 10, channels.TestNetworkChannel, echo, send) + receiver := NewEchoEngine(suite.Suite.T(), suite.networks[rcvID], 10, channels.TestNetworkChannel, echo, send) // allow nodes to heartbeat and discover each other testutils.OptionalSleep(send) @@ -523,7 +525,7 @@ func (suite *EchoEngineTestSuite) multiMessageAsync(echo bool, count int, send t unittest.AssertReturnsBefore(suite.T(), func() { // evaluates proper reception of event // casts the received event at the receiver side - rcvEvent, ok := (<-receiver.event).(*message.TestMessage) + rcvEvent, ok := (<-receiver.event).(*flow.TestMessage) // evaluates correctness of casting require.True(suite.T(), ok) @@ -561,7 +563,7 @@ func (suite *EchoEngineTestSuite) multiMessageAsync(echo bool, count int, send t unittest.AssertReturnsBefore(suite.T(), func() { // evaluates proper reception of event // casts the received event at the receiver side - rcvEvent, ok := (<-sender.event).(*message.TestMessage) + rcvEvent, ok := (<-sender.event).(*flow.TestMessage) // evaluates correctness of casting require.True(suite.T(), ok) // evaluates content of received echo message @@ -591,7 +593,7 @@ func assertMessageReceived(t *testing.T, e *EchoEngine, m *message.TestMessage, unittest.AssertReturnsBefore(t, func() { // evaluates proper reception of event // casts the received event at the receiver side - rcvEvent, ok := (<-e.event).(*message.TestMessage) + rcvEvent, ok := (<-e.event).(*flow.TestMessage) // evaluates correctness of casting require.True(t, ok) // evaluates content of received message diff --git a/network/test/epochtransition_test.go b/network/test/cohort2/epochtransition_test.go similarity index 82% rename from network/test/epochtransition_test.go rename to network/test/cohort2/epochtransition_test.go index 8b7c0a655bd..e5a22bfd80c 100644 --- a/network/test/epochtransition_test.go +++ b/network/test/cohort2/epochtransition_test.go @@ -1,4 +1,4 @@ -package test +package cohort2 import ( "context" @@ -23,8 +23,10 @@ import ( "github.com/onflow/flow-go/model/libp2p/message" "github.com/onflow/flow-go/module/irrecoverable" "github.com/onflow/flow-go/network" + "github.com/onflow/flow-go/network/channels" "github.com/onflow/flow-go/network/internal/testutils" - "github.com/onflow/flow-go/network/mocknetwork" + "github.com/onflow/flow-go/network/p2p" + "github.com/onflow/flow-go/network/underlay" mockprotocol "github.com/onflow/flow-go/state/protocol/mock" "github.com/onflow/flow-go/utils/unittest" ) @@ -45,13 +47,13 @@ type MutableIdentityTableSuite struct { cancels []context.CancelFunc } -// testNode encapsulates the node state which includes its identity, middleware, network, +// testNode encapsulates the node state which includes its identity, libp2p node, network, // mesh engine and the id refresher type testNode struct { - id *flow.Identity - mw network.Middleware - net network.Network - engine *testutils.MeshEngine + id *flow.Identity + libp2pNode p2p.LibP2PNode + network *underlay.Network + engine *testutils.MeshEngine } // testNodeList encapsulates a list of test node and @@ -110,16 +112,26 @@ func (t *testNodeList) engines() []*testutils.MeshEngine { return engs } -func (t *testNodeList) networks() []network.Network { +func (t *testNodeList) networks() []network.EngineRegistry { t.RLock() defer t.RUnlock() - nets := make([]network.Network, len(t.nodes)) + nets := make([]network.EngineRegistry, len(t.nodes)) for i, node := range t.nodes { - nets[i] = node.net + nets[i] = node.network } return nets } +func (t *testNodeList) libp2pNodes() []p2p.LibP2PNode { + t.RLock() + defer t.RUnlock() + nodes := make([]p2p.LibP2PNode, len(t.nodes)) + for i, node := range t.nodes { + nodes[i] = node.libp2pNode + } + return nodes +} + func TestMutableIdentityTable(t *testing.T) { unittest.SkipUnless(t, unittest.TEST_TODO, "broken test") suite.Run(t, new(MutableIdentityTableSuite)) @@ -128,14 +140,14 @@ func TestMutableIdentityTable(t *testing.T) { // signalIdentityChanged update IDs for all the current set of nodes (simulating an epoch) func (suite *MutableIdentityTableSuite) signalIdentityChanged() { for _, n := range suite.testNodes.nodes { - n.mw.UpdateNodeAddresses() + n.network.UpdateNodeAddresses() } } func (suite *MutableIdentityTableSuite) SetupTest() { suite.testNodes = newTestNodeList() suite.removedTestNodes = newTestNodeList() - rand.Seed(time.Now().UnixNano()) + nodeCount := 10 suite.logger = zerolog.New(os.Stderr).Level(zerolog.ErrorLevel) log.SetAllLoggers(log.LevelError) @@ -165,13 +177,13 @@ func (suite *MutableIdentityTableSuite) setupStateMock() { suite.state = new(mockprotocol.State) suite.snapshot = new(mockprotocol.Snapshot) suite.snapshot.On("Head").Return(&final, nil) - suite.snapshot.On("Phase").Return(flow.EpochPhaseCommitted, nil) + suite.snapshot.On("EpochPhase").Return(flow.EpochPhaseCommitted, nil) // return all the current list of ids for the state.Final.Identities call made by the network suite.snapshot.On("Identities", mock.Anything).Return( - func(flow.IdentityFilter) flow.IdentityList { + func(flow.IdentityFilter[flow.Identity]) flow.IdentityList { return suite.testNodes.ids() }, - func(flow.IdentityFilter) error { return nil }) + func(flow.IdentityFilter[flow.Identity]) error { return nil }) suite.state.On("Final").Return(suite.snapshot, nil) } @@ -179,29 +191,32 @@ func (suite *MutableIdentityTableSuite) setupStateMock() { func (suite *MutableIdentityTableSuite) addNodes(count int) { ctx, cancel := context.WithCancel(context.Background()) signalerCtx := irrecoverable.NewMockSignalerContext(suite.T(), ctx) - - // create the ids, middlewares and networks - ids, nodes, mws, nets, _ := testutils.GenerateIDsMiddlewaresNetworks( - suite.T(), - count, - suite.logger, - unittest.NetworkCodec(), - mocknetwork.NewViolationsConsumer(suite.T()), - ) + sporkId := unittest.IdentifierFixture() + ids, nodes := testutils.LibP2PNodeForNetworkFixture(suite.T(), sporkId, count) + nets, _ := testutils.NetworksFixture(suite.T(), sporkId, ids, nodes) suite.cancels = append(suite.cancels, cancel) - testutils.StartNodesAndNetworks(signalerCtx, suite.T(), nodes, nets, 100*time.Millisecond) + // starts the nodes and networks + testutils.StartNodes(signalerCtx, suite.T(), nodes) + for _, net := range nets { + testutils.StartNetworks(signalerCtx, suite.T(), []network.EngineRegistry{net}) + unittest.RequireComponentsReadyBefore(suite.T(), 1*time.Second, net) + } // create the engines for the new nodes - engines := testutils.GenerateEngines(suite.T(), nets) + engines := make([]*testutils.MeshEngine, count) + for i, n := range nets { + eng := testutils.NewMeshEngine(suite.T(), n, 100, channels.TestNetworkChannel) + engines[i] = eng + } // create the test engines for i := 0; i < count; i++ { node := testNode{ - id: ids[i], - mw: mws[i], - net: nets[i], - engine: engines[i], + id: ids[i], + libp2pNode: nodes[i], + network: nets[i], + engine: engines[i], } suite.testNodes.append(node) } @@ -224,7 +239,6 @@ func (suite *MutableIdentityTableSuite) TestNewNodeAdded() { newNode, err := suite.testNodes.lastAdded() require.NoError(suite.T(), err) newID := newNode.id - newMiddleware := newNode.mw suite.logger.Debug(). Str("new_node", newID.NodeID.String()). @@ -238,7 +252,7 @@ func (suite *MutableIdentityTableSuite) TestNewNodeAdded() { // check if the new node has sufficient connections with the existing nodes // if it does, then it has been inducted successfully in the network - suite.assertConnected(newMiddleware, ids.Filter(filter.Not(filter.HasNodeID(newID.NodeID)))) + suite.assertConnected(newNode.libp2pNode, suite.testNodes.libp2pNodes()) // check that all the engines on this new epoch can talk to each other using any of the three networking primitives suite.assertNetworkPrimitives(ids, engs, nil, nil) @@ -248,11 +262,9 @@ func (suite *MutableIdentityTableSuite) TestNewNodeAdded() { // list (ie. as a result of an ejection or transition into an epoch where that node // has un-staked) then it cannot connect to the network. func (suite *MutableIdentityTableSuite) TestNodeRemoved() { - // removed a node removedNode := suite.removeNode() removedID := removedNode.id - removedMiddleware := removedNode.mw removedEngine := removedNode.engine // update IDs for all the remaining nodes @@ -263,7 +275,7 @@ func (suite *MutableIdentityTableSuite) TestNodeRemoved() { remainingEngs := suite.testNodes.engines() // assert that the removed node has no connections with any of the other nodes - suite.assertDisconnected(removedMiddleware, remainingIDs) + suite.assertDisconnected(removedNode.libp2pNode, suite.testNodes.libp2pNodes()) // check that all remaining engines can still talk to each other while the ones removed can't // using any of the three networking primitives @@ -282,15 +294,12 @@ func (suite *MutableIdentityTableSuite) TestNodesAddedAndRemoved() { // remove a node removedNode := suite.removeNode() removedID := removedNode.id - removedMiddleware := removedNode.mw removedEngine := removedNode.engine // add a node suite.addNodes(1) newNode, err := suite.testNodes.lastAdded() require.NoError(suite.T(), err) - newID := newNode.id - newMiddleware := newNode.mw // update all current nodes suite.signalIdentityChanged() @@ -299,10 +308,10 @@ func (suite *MutableIdentityTableSuite) TestNodesAddedAndRemoved() { remainingEngs := suite.testNodes.engines() // check if the new node has sufficient connections with the existing nodes - suite.assertConnected(newMiddleware, remainingIDs.Filter(filter.Not(filter.HasNodeID(newID.NodeID)))) + suite.assertConnected(newNode.libp2pNode, suite.testNodes.libp2pNodes()) // assert that the removed node has no connections with any of the other nodes - suite.assertDisconnected(removedMiddleware, remainingIDs) + suite.assertDisconnected(removedNode.libp2pNode, suite.testNodes.libp2pNodes()) // check that all remaining engines can still talk to each other while the ones removed can't // using any of the three networking primitives @@ -313,15 +322,19 @@ func (suite *MutableIdentityTableSuite) TestNodesAddedAndRemoved() { suite.assertNetworkPrimitives(remainingIDs, remainingEngs, removedIDs, removedEngines) } -// assertConnected checks that the middleware of a node is directly connected +// assertConnected checks that a libp2p node is directly connected // to at least half of the other nodes. -func (suite *MutableIdentityTableSuite) assertConnected(mw network.Middleware, ids flow.IdentityList) { +func (suite *MutableIdentityTableSuite) assertConnected(thisNode p2p.LibP2PNode, allNodes []p2p.LibP2PNode) { t := suite.T() - threshold := len(ids) / 2 + threshold := len(allNodes) / 2 require.Eventuallyf(t, func() bool { connections := 0 - for _, id := range ids { - connected, err := mw.IsConnected(id.NodeID) + for _, node := range allNodes { + if node == thisNode { + // we don't want to check if a node is connected to itself + continue + } + connected, err := thisNode.IsConnected(node.ID()) require.NoError(t, err) if connected { connections++ @@ -335,13 +348,13 @@ func (suite *MutableIdentityTableSuite) assertConnected(mw network.Middleware, i }, 5*time.Second, 100*time.Millisecond, "node is not connected to enough nodes") } -// assertDisconnected checks that the middleware of a node is not connected to any of the other nodes specified in the -// ids list -func (suite *MutableIdentityTableSuite) assertDisconnected(mw network.Middleware, ids flow.IdentityList) { +// assertDisconnected checks that a libp2p node is not connected to any of the other nodes specified in the +// ids list. +func (suite *MutableIdentityTableSuite) assertDisconnected(thisNode p2p.LibP2PNode, allNodes []p2p.LibP2PNode) { t := suite.T() require.Eventuallyf(t, func() bool { - for _, id := range ids { - connected, err := mw.IsConnected(id.NodeID) + for _, node := range allNodes { + connected, err := thisNode.IsConnected(node.ID()) require.NoError(t, err) if connected { return false @@ -384,7 +397,7 @@ func (suite *MutableIdentityTableSuite) exchangeMessages( for i, allowedEng := range allowedEngs { fromID := allowedIDs[i].NodeID - targetIDs := allowedIDs.Filter(filter.Not(filter.HasNodeID(allowedIDs[i].NodeID))) + targetIDs := allowedIDs.Filter(filter.Not(filter.HasNodeID[flow.Identity](allowedIDs[i].NodeID))) err := suite.sendMessage(fromID, allowedEng, targetIDs, send) require.NoError(suite.T(), err) diff --git a/network/test/cohort2/unicast_authorization_test.go b/network/test/cohort2/unicast_authorization_test.go new file mode 100644 index 00000000000..c3f55e54738 --- /dev/null +++ b/network/test/cohort2/unicast_authorization_test.go @@ -0,0 +1,552 @@ +package cohort2 + +import ( + "context" + "io" + "reflect" + "testing" + "time" + + "github.com/rs/zerolog" + mockery "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" + + "github.com/onflow/flow-go/model/flow" + libp2pmessage "github.com/onflow/flow-go/model/libp2p/message" + "github.com/onflow/flow-go/model/messages" + "github.com/onflow/flow-go/module/irrecoverable" + "github.com/onflow/flow-go/network" + "github.com/onflow/flow-go/network/channels" + "github.com/onflow/flow-go/network/codec" + "github.com/onflow/flow-go/network/internal/testutils" + "github.com/onflow/flow-go/network/message" + mocknetwork "github.com/onflow/flow-go/network/mock" + "github.com/onflow/flow-go/network/p2p" + p2plogging "github.com/onflow/flow-go/network/p2p/logging" + "github.com/onflow/flow-go/network/underlay" + "github.com/onflow/flow-go/network/validator" + "github.com/onflow/flow-go/utils/unittest" +) + +// UnicastAuthorizationTestSuite tests that messages sent via unicast that are unauthenticated or unauthorized are correctly rejected. Each test on the test suite +// uses 2 networks, a sender and receiver. A mock slashing violation's consumer is used to assert the messages were rejected. Networks and the cancel func +// are set during each test run inside the test and remove after each test run in the TearDownTest callback. +type UnicastAuthorizationTestSuite struct { + suite.Suite + channelCloseDuration time.Duration + logger zerolog.Logger + + codec *overridableMessageEncoder + + libP2PNodes []p2p.LibP2PNode + // senderNetwork is the networking layer instance that will be used to send the message. + senderNetwork network.EngineRegistry + // senderID the identity on the mw sending the message + senderID *flow.Identity + // receiverNetwork is the networking layer instance that will be used to receive the message. + receiverNetwork network.EngineRegistry + // receiverID the identity on the mw sending the message + receiverID *flow.Identity + // providers id providers generated at beginning of a test run + providers []*unittest.UpdatableIDProvider + // cancel is the cancel func from the context that was used to start the networks in a test run + cancel context.CancelFunc + sporkId flow.Identifier + // waitCh is the channel used to wait for the networks to perform authorization and invoke the slashing + // violation's consumer before making mock assertions and cleaning up resources + waitCh chan struct{} +} + +// TestUnicastAuthorizationTestSuite runs all the test methods in this test suit +func TestUnicastAuthorizationTestSuite(t *testing.T) { + suite.Run(t, new(UnicastAuthorizationTestSuite)) +} + +func (u *UnicastAuthorizationTestSuite) SetupTest() { + u.logger = unittest.Logger() + u.channelCloseDuration = 100 * time.Millisecond + // this ch will allow us to wait until the expected method call happens before shutting down networks. + u.waitCh = make(chan struct{}) +} + +func (u *UnicastAuthorizationTestSuite) TearDownTest() { + u.stopNetworksAndLibp2pNodes() +} + +// setupNetworks will setup the sender and receiver networks with the given slashing violations consumer. +func (u *UnicastAuthorizationTestSuite) setupNetworks(slashingViolationsConsumer network.ViolationsConsumer) { + u.sporkId = unittest.IdentifierFixture() + ids, libP2PNodes := testutils.LibP2PNodeForNetworkFixture(u.T(), u.sporkId, 2) + u.codec = newOverridableMessageEncoder(unittest.NetworkCodec()) + nets, providers := testutils.NetworksFixture( + u.T(), + u.sporkId, + ids, + libP2PNodes, + underlay.WithCodec(u.codec), + underlay.WithSlashingViolationConsumerFactory(func(_ network.ConduitAdapter) network.ViolationsConsumer { + return slashingViolationsConsumer + })) + require.Len(u.T(), ids, 2) + require.Len(u.T(), providers, 2) + require.Len(u.T(), nets, 2) + + u.senderNetwork = nets[0] + u.receiverNetwork = nets[1] + u.senderID = ids[0] + u.receiverID = ids[1] + u.providers = providers + u.libP2PNodes = libP2PNodes +} + +// startNetworksAndLibp2pNodes will start both sender and receiver networks with an irrecoverable signaler context and set the context cancel func. +func (u *UnicastAuthorizationTestSuite) startNetworksAndLibp2pNodes() { + ctx, cancel := context.WithCancel(context.Background()) + sigCtx := irrecoverable.NewMockSignalerContext(u.T(), ctx) + + testutils.StartNodes(sigCtx, u.T(), u.libP2PNodes) + testutils.StartNetworks(sigCtx, u.T(), []network.EngineRegistry{u.senderNetwork, u.receiverNetwork}) + unittest.RequireComponentsReadyBefore(u.T(), 1*time.Second, u.senderNetwork, u.receiverNetwork) + + u.cancel = cancel +} + +// stopNetworksAndLibp2pNodes will stop all networks and libp2p nodes and wait for them to stop. +func (u *UnicastAuthorizationTestSuite) stopNetworksAndLibp2pNodes() { + u.cancel() // cancel context to stop libp2p nodes. + + testutils.StopComponents(u.T(), []network.EngineRegistry{u.senderNetwork, u.receiverNetwork}, 1*time.Second) + unittest.RequireComponentsDoneBefore(u.T(), 1*time.Second, u.senderNetwork, u.receiverNetwork) +} + +// TestUnicastAuthorization_UnstakedPeer tests that messages sent via unicast by an unstaked peer is correctly rejected. +func (u *UnicastAuthorizationTestSuite) TestUnicastAuthorization_UnstakedPeer() { + slashingViolationsConsumer := mocknetwork.NewViolationsConsumer(u.T()) + u.setupNetworks(slashingViolationsConsumer) + + expectedSenderPeerID, err := unittest.PeerIDFromFlowID(u.senderID) + require.NoError(u.T(), err) + + var nilID *flow.Identity + expectedViolation := &network.Violation{ + Identity: nilID, // because the peer will be unverified this identity will be nil + PeerID: p2plogging.PeerId(expectedSenderPeerID), + MsgType: "", // message will not be decoded before OnSenderEjectedError is logged, we won't log message type + Channel: channels.TestNetworkChannel, // message will not be decoded before OnSenderEjectedError is logged, we won't log peer ID + Protocol: message.ProtocolTypeUnicast, + Err: validator.ErrIdentityUnverified, + } + slashingViolationsConsumer.On("OnUnAuthorizedSenderError", expectedViolation).Return(nil).Once().Run(func(args mockery.Arguments) { + close(u.waitCh) + }) + + u.startNetworksAndLibp2pNodes() + + // overriding the identity provide of the receiver node to return an empty identity list so that the + // sender node looks unstaked to its networking layer and hence it sends an UnAuthorizedSenderError upon receiving a message + // from the sender node + u.providers[1].SetIdentities(nil) + + _, err = u.receiverNetwork.Register(channels.TestNetworkChannel, &mocknetwork.MessageProcessor{}) + require.NoError(u.T(), err) + + senderCon, err := u.senderNetwork.Register(channels.TestNetworkChannel, &mocknetwork.MessageProcessor{}) + require.NoError(u.T(), err) + + // send message via unicast + err = senderCon.Unicast(&libp2pmessage.TestMessage{ + Text: string("hello"), + }, u.receiverID.NodeID) + require.NoError(u.T(), err) + + // wait for slashing violations consumer mock to invoke run func and close ch if expected method call happens + unittest.RequireCloseBefore(u.T(), u.waitCh, u.channelCloseDuration, "could close ch on time") +} + +// TestUnicastAuthorization_EjectedPeer tests that messages sent via unicast by an ejected peer is correctly rejected. +func (u *UnicastAuthorizationTestSuite) TestUnicastAuthorization_EjectedPeer() { + slashingViolationsConsumer := mocknetwork.NewViolationsConsumer(u.T()) + u.setupNetworks(slashingViolationsConsumer) + // NOTE: setup ejected identity + u.senderID.EpochParticipationStatus = flow.EpochParticipationStatusEjected + + // overriding the identity provide of the receiver node to return the ejected identity so that the + // sender node looks ejected to its networking layer and hence it sends a SenderEjectedError upon receiving a message + // from the sender node + u.providers[1].SetIdentities(flow.IdentityList{u.senderID}) + + expectedSenderPeerID, err := unittest.PeerIDFromFlowID(u.senderID) + require.NoError(u.T(), err) + + expectedViolation := &network.Violation{ + Identity: u.senderID, // we expect this method to be called with the ejected identity + OriginID: u.senderID.NodeID, + PeerID: p2plogging.PeerId(expectedSenderPeerID), + MsgType: "", // message will not be decoded before OnSenderEjectedError is logged, we won't log message type + Channel: channels.TestNetworkChannel, // message will not be decoded before OnSenderEjectedError is logged, we won't log peer ID + Protocol: message.ProtocolTypeUnicast, + Err: validator.ErrSenderEjected, + } + slashingViolationsConsumer.On("OnSenderEjectedError", expectedViolation). + Return(nil).Once().Run(func(args mockery.Arguments) { + close(u.waitCh) + }) + + u.startNetworksAndLibp2pNodes() + + _, err = u.receiverNetwork.Register(channels.TestNetworkChannel, &mocknetwork.MessageProcessor{}) + require.NoError(u.T(), err) + + senderCon, err := u.senderNetwork.Register(channels.TestNetworkChannel, &mocknetwork.MessageProcessor{}) + require.NoError(u.T(), err) + + // send message via unicast + err = senderCon.Unicast(&libp2pmessage.TestMessage{ + Text: string("hello"), + }, u.receiverID.NodeID) + require.NoError(u.T(), err) + + // wait for slashing violations consumer mock to invoke run func and close ch if expected method call happens + unittest.RequireCloseBefore(u.T(), u.waitCh, u.channelCloseDuration, "could close ch on time") +} + +// TestUnicastAuthorization_UnauthorizedPeer tests that messages sent via unicast by an unauthorized peer is correctly rejected. +func (u *UnicastAuthorizationTestSuite) TestUnicastAuthorization_UnauthorizedPeer() { + slashingViolationsConsumer := mocknetwork.NewViolationsConsumer(u.T()) + u.setupNetworks(slashingViolationsConsumer) + + expectedSenderPeerID, err := unittest.PeerIDFromFlowID(u.senderID) + require.NoError(u.T(), err) + + expectedViolation := &network.Violation{ + Identity: u.senderID, + OriginID: u.senderID.NodeID, + PeerID: p2plogging.PeerId(expectedSenderPeerID), + MsgType: "*message.TestMessage", + Channel: channels.ConsensusCommittee, + Protocol: message.ProtocolTypeUnicast, + Err: message.ErrUnauthorizedMessageOnChannel, + } + + slashingViolationsConsumer.On("OnUnAuthorizedSenderError", expectedViolation). + Return(nil).Once().Run(func(args mockery.Arguments) { + close(u.waitCh) + }) + + u.startNetworksAndLibp2pNodes() + + _, err = u.receiverNetwork.Register(channels.ConsensusCommittee, &mocknetwork.MessageProcessor{}) + require.NoError(u.T(), err) + + senderCon, err := u.senderNetwork.Register(channels.ConsensusCommittee, &mocknetwork.MessageProcessor{}) + require.NoError(u.T(), err) + + // send message via unicast; a test message must only be unicasted on the TestNetworkChannel, not on the ConsensusCommittee channel + // so we expect an unauthorized sender error + err = senderCon.Unicast(&libp2pmessage.TestMessage{ + Text: string("hello"), + }, u.receiverID.NodeID) + require.NoError(u.T(), err) + + // wait for slashing violations consumer mock to invoke run func and close ch if expected method call happens + unittest.RequireCloseBefore(u.T(), u.waitCh, u.channelCloseDuration, "could close ch on time") +} + +// TestUnicastAuthorization_UnknownMsgCode tests that messages sent via unicast with an unknown message code is correctly rejected. +func (u *UnicastAuthorizationTestSuite) TestUnicastAuthorization_UnknownMsgCode() { + slashingViolationsConsumer := mocknetwork.NewViolationsConsumer(u.T()) + u.setupNetworks(slashingViolationsConsumer) + + expectedSenderPeerID, err := unittest.PeerIDFromFlowID(u.senderID) + require.NoError(u.T(), err) + + invalidMessageCode := codec.MessageCode(byte('X')) + // register a custom encoder that encodes the message with an invalid message code when encoding a string. + u.codec.RegisterEncoder(reflect.TypeOf(""), func(v interface{}) ([]byte, error) { + e, err := unittest.NetworkCodec().Encode(&libp2pmessage.TestMessage{ + Text: v.(string), + }) + require.NoError(u.T(), err) + // manipulate message code byte + invalidMessageCode := codec.MessageCode(byte('X')) + e[0] = invalidMessageCode.Uint8() + return e, nil + }) + + var nilID *flow.Identity + expectedViolation := &network.Violation{ + Identity: nilID, + PeerID: p2plogging.PeerId(expectedSenderPeerID), + MsgType: "", + Channel: channels.TestNetworkChannel, + Protocol: message.ProtocolTypeUnicast, + Err: codec.NewUnknownMsgCodeErr(invalidMessageCode), + } + + slashingViolationsConsumer.On("OnUnknownMsgTypeError", expectedViolation). + Return(nil).Once().Run(func(args mockery.Arguments) { + close(u.waitCh) + }) + + u.startNetworksAndLibp2pNodes() + + _, err = u.receiverNetwork.Register(channels.TestNetworkChannel, &mocknetwork.MessageProcessor{}) + require.NoError(u.T(), err) + + senderCon, err := u.senderNetwork.Register(channels.TestNetworkChannel, &mocknetwork.MessageProcessor{}) + require.NoError(u.T(), err) + + // send message via unicast + err = senderCon.Unicast("hello!", u.receiverID.NodeID) + require.NoError(u.T(), err) + + // wait for slashing violations consumer mock to invoke run func and close ch if expected method call happens + unittest.RequireCloseBefore(u.T(), u.waitCh, u.channelCloseDuration, "could close ch on time") +} + +// TestUnicastAuthorization_WrongMsgCode tests that messages sent via unicast with a message code that does not match the underlying message type are correctly rejected. +func (u *UnicastAuthorizationTestSuite) TestUnicastAuthorization_WrongMsgCode() { + slashingViolationsConsumer := mocknetwork.NewViolationsConsumer(u.T()) + u.setupNetworks(slashingViolationsConsumer) + + expectedSenderPeerID, err := unittest.PeerIDFromFlowID(u.senderID) + require.NoError(u.T(), err) + + modifiedMessageCode := codec.CodeDKGMessage + // register a custom encoder that overrides the message code when encoding a TestMessage. + u.codec.RegisterEncoder(reflect.TypeOf(&libp2pmessage.TestMessage{}), func(v interface{}) ([]byte, error) { + e, err := unittest.NetworkCodec().Encode(v) + require.NoError(u.T(), err) + e[0] = modifiedMessageCode.Uint8() + return e, nil + }) + + expectedViolation := &network.Violation{ + Identity: u.senderID, + OriginID: u.senderID.NodeID, + PeerID: p2plogging.PeerId(expectedSenderPeerID), + MsgType: "*messages.DKGMessage", + Channel: channels.TestNetworkChannel, + Protocol: message.ProtocolTypeUnicast, + Err: message.ErrUnauthorizedMessageOnChannel, + } + + slashingViolationsConsumer.On("OnUnAuthorizedSenderError", expectedViolation). + Return(nil).Once().Run(func(args mockery.Arguments) { + close(u.waitCh) + }) + + u.startNetworksAndLibp2pNodes() + + _, err = u.receiverNetwork.Register(channels.TestNetworkChannel, &mocknetwork.MessageProcessor{}) + require.NoError(u.T(), err) + + senderCon, err := u.senderNetwork.Register(channels.TestNetworkChannel, &mocknetwork.MessageProcessor{}) + require.NoError(u.T(), err) + + // send message via unicast + err = senderCon.Unicast(&libp2pmessage.TestMessage{ + Text: string("hello"), + }, u.receiverID.NodeID) + require.NoError(u.T(), err) + + // wait for slashing violations consumer mock to invoke run func and close ch if expected method call happens + unittest.RequireCloseBefore(u.T(), u.waitCh, u.channelCloseDuration, "could close ch on time") +} + +// TestUnicastAuthorization_PublicChannel tests that messages sent via unicast on a public channel are not rejected for any reason. +func (u *UnicastAuthorizationTestSuite) TestUnicastAuthorization_PublicChannel() { + slashingViolationsConsumer := mocknetwork.NewViolationsConsumer(u.T()) + u.setupNetworks(slashingViolationsConsumer) + u.startNetworksAndLibp2pNodes() + + msg := &flow.TestMessage{ + Text: string("hello"), + } + + // mock a message processor that will receive the message. + receiverEngine := &mocknetwork.MessageProcessor{} + receiverEngine.On("Process", channels.PublicPushBlocks, u.senderID.NodeID, msg).Run( + func(args mockery.Arguments) { + close(u.waitCh) + }).Return(nil).Once() + _, err := u.receiverNetwork.Register(channels.PublicPushBlocks, receiverEngine) + require.NoError(u.T(), err) + + senderCon, err := u.senderNetwork.Register(channels.PublicPushBlocks, &mocknetwork.MessageProcessor{}) + require.NoError(u.T(), err) + + // send message via unicast + err = senderCon.Unicast(&libp2pmessage.TestMessage{ + Text: string("hello"), + }, u.receiverID.NodeID) + require.NoError(u.T(), err) + + // wait for slashing violations consumer mock to invoke run func and close ch if expected method call happens + unittest.RequireCloseBefore(u.T(), u.waitCh, u.channelCloseDuration, "could close ch on time") +} + +// TestUnicastAuthorization_UnauthorizedUnicastOnChannel tests that messages sent via unicast that are not authorized for unicast are rejected. +func (u *UnicastAuthorizationTestSuite) TestUnicastAuthorization_UnauthorizedUnicastOnChannel() { + slashingViolationsConsumer := mocknetwork.NewViolationsConsumer(u.T()) + u.setupNetworks(slashingViolationsConsumer) + + // set sender id role to RoleConsensus to avoid unauthorized sender validation error + u.senderID.Role = flow.RoleConsensus + + expectedSenderPeerID, err := unittest.PeerIDFromFlowID(u.senderID) + require.NoError(u.T(), err) + + expectedViolation := &network.Violation{ + Identity: u.senderID, + OriginID: u.senderID.NodeID, + PeerID: p2plogging.PeerId(expectedSenderPeerID), + MsgType: "*messages.Proposal", + Channel: channels.ConsensusCommittee, + Protocol: message.ProtocolTypeUnicast, + Err: message.ErrUnauthorizedUnicastOnChannel, + } + + slashingViolationsConsumer.On("OnUnauthorizedUnicastOnChannel", expectedViolation). + Return(nil).Once().Run(func(args mockery.Arguments) { + close(u.waitCh) + }) + + u.startNetworksAndLibp2pNodes() + + _, err = u.receiverNetwork.Register(channels.ConsensusCommittee, &mocknetwork.MessageProcessor{}) + require.NoError(u.T(), err) + + senderCon, err := u.senderNetwork.Register(channels.ConsensusCommittee, &mocknetwork.MessageProcessor{}) + require.NoError(u.T(), err) + + // messages.Proposal is not authorized to be sent via unicast over the ConsensusCommittee channel + payload := messages.Proposal(*unittest.ProposalFixture()) + // send message via unicast + err = senderCon.Unicast(&payload, u.receiverID.NodeID) + require.NoError(u.T(), err) + + // wait for slashing violations consumer mock to invoke run func and close ch if expected method call happens + unittest.RequireCloseBefore(u.T(), u.waitCh, u.channelCloseDuration, "could close ch on time") +} + +// TestUnicastAuthorization_ReceiverHasNoSubscription tests that messages sent via unicast are rejected on the receiver end if the receiver does not have a subscription +// to the channel of the message. +func (u *UnicastAuthorizationTestSuite) TestUnicastAuthorization_ReceiverHasNoSubscription() { + slashingViolationsConsumer := mocknetwork.NewViolationsConsumer(u.T()) + u.setupNetworks(slashingViolationsConsumer) + + expectedSenderPeerID, err := unittest.PeerIDFromFlowID(u.senderID) + require.NoError(u.T(), err) + + expectedViolation := &network.Violation{ + Identity: nil, + PeerID: p2plogging.PeerId(expectedSenderPeerID), + MsgType: "*message.TestMessage", + Channel: channels.TestNetworkChannel, + Protocol: message.ProtocolTypeUnicast, + Err: underlay.ErrUnicastMsgWithoutSub, + } + + slashingViolationsConsumer.On("OnUnauthorizedUnicastOnChannel", expectedViolation). + Return(nil).Once().Run(func(args mockery.Arguments) { + close(u.waitCh) + }) + + u.startNetworksAndLibp2pNodes() + + senderCon, err := u.senderNetwork.Register(channels.TestNetworkChannel, &mocknetwork.MessageProcessor{}) + require.NoError(u.T(), err) + + // send message via unicast + err = senderCon.Unicast(&libp2pmessage.TestMessage{ + Text: string("hello"), + }, u.receiverID.NodeID) + require.NoError(u.T(), err) + + // wait for slashing violations consumer mock to invoke run func and close ch if expected method call happens + unittest.RequireCloseBefore(u.T(), u.waitCh, u.channelCloseDuration, "could close ch on time") +} + +// TestUnicastAuthorization_ReceiverHasSubscription tests that messages sent via unicast are processed on the receiver end if the receiver does have a subscription +// to the channel of the message. +func (u *UnicastAuthorizationTestSuite) TestUnicastAuthorization_ReceiverHasSubscription() { + slashingViolationsConsumer := mocknetwork.NewViolationsConsumer(u.T()) + u.setupNetworks(slashingViolationsConsumer) + u.startNetworksAndLibp2pNodes() + + msg := &messages.EntityRequest{ + EntityIDs: unittest.IdentifierListFixture(10), + } + + internal, err := msg.ToInternal() + require.NoError(u.T(), err) + + // both sender and receiver must have an authorized role to send and receive messages on the ConsensusCommittee channel. + u.senderID.Role = flow.RoleConsensus + u.receiverID.Role = flow.RoleExecution + + receiverEngine := &mocknetwork.MessageProcessor{} + receiverEngine.On("Process", channels.RequestReceiptsByBlockID, u.senderID.NodeID, internal).Run( + func(args mockery.Arguments) { + close(u.waitCh) + }).Return(nil).Once() + _, err = u.receiverNetwork.Register(channels.RequestReceiptsByBlockID, receiverEngine) + require.NoError(u.T(), err) + + senderCon, err := u.senderNetwork.Register(channels.RequestReceiptsByBlockID, &mocknetwork.MessageProcessor{}) + require.NoError(u.T(), err) + + // send message via unicast + err = senderCon.Unicast(msg, u.receiverID.NodeID) + require.NoError(u.T(), err) + + // wait for slashing violations consumer mock to invoke run func and close ch if expected method call happens + unittest.RequireCloseBefore(u.T(), u.waitCh, u.channelCloseDuration, "could close ch on time") +} + +// overridableMessageEncoder is a codec that allows to override the encoder for a specific type only for sake of testing. +// We specifically use this to override the encoder for the TestMessage type to encode it with an invalid message code. +type overridableMessageEncoder struct { + codec network.Codec + specificEncoder map[reflect.Type]func(interface{}) ([]byte, error) +} + +var _ network.Codec = (*overridableMessageEncoder)(nil) + +func newOverridableMessageEncoder(codec network.Codec) *overridableMessageEncoder { + return &overridableMessageEncoder{ + codec: codec, + specificEncoder: make(map[reflect.Type]func(interface{}) ([]byte, error)), + } +} + +// RegisterEncoder registers an encoder for a specific type, overriding the default encoder for that type. +func (u *overridableMessageEncoder) RegisterEncoder(t reflect.Type, encoder func(interface{}) ([]byte, error)) { + u.specificEncoder[t] = encoder +} + +// NewEncoder creates a new encoder. +func (u *overridableMessageEncoder) NewEncoder(w io.Writer) network.Encoder { + return u.codec.NewEncoder(w) +} + +// NewDecoder creates a new decoder. +func (u *overridableMessageEncoder) NewDecoder(r io.Reader) network.Decoder { + return u.codec.NewDecoder(r) +} + +// Encode encodes a value into a byte slice. If a specific encoder is registered for the type of the value, it will be used. +// Otherwise, the default encoder will be used. +func (u *overridableMessageEncoder) Encode(v interface{}) ([]byte, error) { + if encoder, ok := u.specificEncoder[reflect.TypeOf(v)]; ok { + return encoder(v) + } + return u.codec.Encode(v) +} + +// Decode decodes a byte slice into a value. It uses the default decoder. +func (u *overridableMessageEncoder) Decode(data []byte) (messages.UntrustedMessage, error) { + return u.codec.Decode(data) +} diff --git a/network/test/meshengine_test.go b/network/test/meshengine_test.go deleted file mode 100644 index 221bba44bc2..00000000000 --- a/network/test/meshengine_test.go +++ /dev/null @@ -1,491 +0,0 @@ -package test - -import ( - "context" - "fmt" - "math/rand" - "os" - "strconv" - "strings" - "sync" - "testing" - "time" - - "github.com/onflow/flow-go/network/p2p" - - "github.com/ipfs/go-log" - pubsub "github.com/libp2p/go-libp2p-pubsub" - "github.com/rs/zerolog" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - "github.com/stretchr/testify/suite" - - "github.com/onflow/flow-go/network/mocknetwork" - "github.com/onflow/flow-go/network/p2p/middleware" - "github.com/onflow/flow-go/network/p2p/p2pnode" - - "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/model/flow/filter" - "github.com/onflow/flow-go/model/libp2p/message" - "github.com/onflow/flow-go/module/irrecoverable" - "github.com/onflow/flow-go/module/observable" - "github.com/onflow/flow-go/network" - "github.com/onflow/flow-go/network/channels" - "github.com/onflow/flow-go/network/internal/testutils" - "github.com/onflow/flow-go/utils/unittest" -) - -// MeshEngineTestSuite evaluates the message delivery functionality for the overlay -// of engines over a complete graph -type MeshEngineTestSuite struct { - suite.Suite - testutils.ConduitWrapper // used as a wrapper around conduit methods - nets []network.Network // used to keep track of the networks - mws []network.Middleware // used to keep track of the middlewares - ids flow.IdentityList // used to keep track of the identifiers associated with networks - obs chan string // used to keep track of Protect events tagged by pubsub messages - cancel context.CancelFunc -} - -// TestMeshNetTestSuite runs all tests in this test suit -func TestMeshNetTestSuite(t *testing.T) { - suite.Run(t, new(MeshEngineTestSuite)) -} - -// SetupTest is executed prior to each test in this test suit -// it creates and initializes a set of network instances -func (suite *MeshEngineTestSuite) SetupTest() { - // defines total number of nodes in our network (minimum 3 needed to use 1-k messaging) - const count = 10 - logger := zerolog.New(os.Stderr).Level(zerolog.ErrorLevel) - log.SetAllLoggers(log.LevelError) - - // set up a channel to receive pubsub tags from connManagers of the nodes - var obs []observable.Observable - peerChannel := make(chan string) - ob := tagsObserver{ - tags: peerChannel, - log: logger, - } - - ctx, cancel := context.WithCancel(context.Background()) - suite.cancel = cancel - - signalerCtx := irrecoverable.NewMockSignalerContext(suite.T(), ctx) - - var nodes []p2p.LibP2PNode - suite.ids, nodes, suite.mws, suite.nets, obs = testutils.GenerateIDsMiddlewaresNetworks( - suite.T(), - count, - logger, - unittest.NetworkCodec(), - mocknetwork.NewViolationsConsumer(suite.T()), - testutils.WithIdentityOpts(unittest.WithAllRoles()), - ) - - testutils.StartNodesAndNetworks(signalerCtx, suite.T(), nodes, suite.nets, 100*time.Millisecond) - - for _, observableConnMgr := range obs { - observableConnMgr.Subscribe(&ob) - } - suite.obs = peerChannel -} - -// TearDownTest closes the networks within a specified timeout -func (suite *MeshEngineTestSuite) TearDownTest() { - suite.cancel() - testutils.StopComponents(suite.T(), suite.nets, 3*time.Second) - testutils.StopComponents(suite.T(), suite.mws, 3*time.Second) -} - -// TestAllToAll_Publish evaluates the network of mesh engines against allToAllScenario scenario. -// Network instances during this test use their Publish method to disseminate messages. -func (suite *MeshEngineTestSuite) TestAllToAll_Publish() { - suite.allToAllScenario(suite.Publish) -} - -// TestAllToAll_Multicast evaluates the network of mesh engines against allToAllScenario scenario. -// Network instances during this test use their Multicast method to disseminate messages. -func (suite *MeshEngineTestSuite) TestAllToAll_Multicast() { - suite.allToAllScenario(suite.Multicast) -} - -// TestAllToAll_Unicast evaluates the network of mesh engines against allToAllScenario scenario. -// Network instances during this test use their Unicast method to disseminate messages. -func (suite *MeshEngineTestSuite) TestAllToAll_Unicast() { - suite.allToAllScenario(suite.Unicast) -} - -// TestTargetedValidators_Unicast tests if only the intended recipients in a 1-k messaging actually receive the message. -// The messages are disseminated through the Unicast method of conduits. -func (suite *MeshEngineTestSuite) TestTargetedValidators_Unicast() { - suite.targetValidatorScenario(suite.Unicast) -} - -// TestTargetedValidators_Multicast tests if only the intended recipients in a 1-k messaging actually receive the -// message. -// The messages are disseminated through the Multicast method of conduits. -func (suite *MeshEngineTestSuite) TestTargetedValidators_Multicast() { - suite.targetValidatorScenario(suite.Multicast) -} - -// TestTargetedValidators_Publish tests if only the intended recipients in a 1-k messaging actually receive the message. -// The messages are disseminated through the Multicast method of conduits. -func (suite *MeshEngineTestSuite) TestTargetedValidators_Publish() { - suite.targetValidatorScenario(suite.Publish) -} - -// TestMaxMessageSize_Unicast evaluates the messageSizeScenario scenario using -// the Unicast method of conduits. -func (suite *MeshEngineTestSuite) TestMaxMessageSize_Unicast() { - suite.messageSizeScenario(suite.Unicast, middleware.DefaultMaxUnicastMsgSize) -} - -// TestMaxMessageSize_Multicast evaluates the messageSizeScenario scenario using -// the Multicast method of conduits. -func (suite *MeshEngineTestSuite) TestMaxMessageSize_Multicast() { - suite.messageSizeScenario(suite.Multicast, p2pnode.DefaultMaxPubSubMsgSize) -} - -// TestMaxMessageSize_Publish evaluates the messageSizeScenario scenario using the -// Publish method of conduits. -func (suite *MeshEngineTestSuite) TestMaxMessageSize_Publish() { - suite.messageSizeScenario(suite.Publish, p2pnode.DefaultMaxPubSubMsgSize) -} - -// TestUnregister_Publish tests that an engine cannot send any message using Publish -// or receive any messages after the conduit is closed -func (suite *MeshEngineTestSuite) TestUnregister_Publish() { - suite.conduitCloseScenario(suite.Publish) -} - -// TestUnregister_Publish tests that an engine cannot send any message using Multicast -// or receive any messages after the conduit is closed -func (suite *MeshEngineTestSuite) TestUnregister_Multicast() { - suite.conduitCloseScenario(suite.Multicast) -} - -// TestUnregister_Publish tests that an engine cannot send any message using Unicast -// or receive any messages after the conduit is closed -func (suite *MeshEngineTestSuite) TestUnregister_Unicast() { - suite.conduitCloseScenario(suite.Unicast) -} - -// allToAllScenario creates a complete mesh of the engines -// each engine x then sends a "hello from node x" to other engines -// it evaluates the correctness of message delivery as well as content of the message -func (suite *MeshEngineTestSuite) allToAllScenario(send testutils.ConduitSendWrapperFunc) { - // allows nodes to find each other in case of Mulitcast and Publish - testutils.OptionalSleep(send) - - // creating engines - count := len(suite.nets) - engs := make([]*testutils.MeshEngine, 0) - wg := sync.WaitGroup{} - - // logs[i][j] keeps the message that node i sends to node j - logs := make(map[int][]string) - for i := range suite.nets { - eng := testutils.NewMeshEngine(suite.Suite.T(), suite.nets[i], count-1, channels.TestNetworkChannel) - engs = append(engs, eng) - logs[i] = make([]string, 0) - } - - // allow nodes to heartbeat and discover each other - // each node will register ~D protect messages, where D is the default out-degree - for i := 0; i < pubsub.GossipSubD*count; i++ { - select { - case <-suite.obs: - case <-time.After(8 * time.Second): - assert.FailNow(suite.T(), "could not receive pubsub tag indicating mesh formed") - } - } - - // Each node broadcasting a message to all others - for i := range suite.nets { - event := &message.TestMessage{ - Text: fmt.Sprintf("hello from node %v", i), - } - - // others keeps the identifier of all nodes except ith node - others := suite.ids.Filter(filter.Not(filter.HasNodeID(suite.ids[i].NodeID))).NodeIDs() - require.NoError(suite.Suite.T(), send(event, engs[i].Con, others...)) - wg.Add(count - 1) - } - - // fires a goroutine for each engine that listens to incoming messages - for i := range suite.nets { - go func(e *testutils.MeshEngine) { - for x := 0; x < count-1; x++ { - <-e.Received - wg.Done() - } - }(engs[i]) - } - - unittest.AssertReturnsBefore(suite.Suite.T(), wg.Wait, 30*time.Second) - - // evaluates that all messages are received - for index, e := range engs { - // confirms the number of received messages at each node - if len(e.Event) != (count - 1) { - assert.Fail(suite.Suite.T(), - fmt.Sprintf("Message reception mismatch at node %v. Expected: %v, Got: %v", index, count-1, len(e.Event))) - } - - for i := 0; i < count-1; i++ { - assertChannelReceived(suite.T(), e, channels.TestNetworkChannel) - } - - // extracts failed messages - receivedIndices, err := extractSenderID(count, e.Event, "hello from node") - require.NoError(suite.Suite.T(), err) - - for j := 0; j < count; j++ { - // evaluates self-gossip - if j == index { - assert.False(suite.Suite.T(), (receivedIndices)[index], fmt.Sprintf("self gossiped for node %v detected", index)) - } - // evaluates content - if !(receivedIndices)[j] { - assert.False(suite.Suite.T(), (receivedIndices)[index], - fmt.Sprintf("Message not found in node #%v's messages. Expected: Message from node %v. Got: No message", index, j)) - } - } - } -} - -// targetValidatorScenario sends a single message from last node to the first half of the nodes -// based on identifiers list. -// It then verifies that only the intended recipients receive the message. -// Message dissemination is done using the send wrapper of conduit. -func (suite *MeshEngineTestSuite) targetValidatorScenario(send testutils.ConduitSendWrapperFunc) { - // creating engines - count := len(suite.nets) - engs := make([]*testutils.MeshEngine, 0) - wg := sync.WaitGroup{} - - for i := range suite.nets { - eng := testutils.NewMeshEngine(suite.Suite.T(), suite.nets[i], count-1, channels.TestNetworkChannel) - engs = append(engs, eng) - } - - // allow nodes to heartbeat and discover each other - // each node will register ~D protect messages, where D is the default out-degree - for i := 0; i < pubsub.GossipSubD*count; i++ { - select { - case <-suite.obs: - case <-time.After(2 * time.Second): - assert.FailNow(suite.T(), "could not receive pubsub tag indicating mesh formed") - } - } - - // choose half of the nodes as target - allIds := suite.ids.NodeIDs() - var targets []flow.Identifier - // create a target list of half of the nodes - for i := 0; i < len(allIds)/2; i++ { - targets = append(targets, allIds[i]) - } - - // node 0 broadcasting a message to all targets - event := &message.TestMessage{ - Text: "hello from node 0", - } - require.NoError(suite.Suite.T(), send(event, engs[len(engs)-1].Con, targets...)) - - // fires a goroutine for all engines to listens for the incoming message - for i := 0; i < len(allIds)/2; i++ { - wg.Add(1) - go func(e *testutils.MeshEngine) { - <-e.Received - wg.Done() - }(engs[i]) - } - - unittest.AssertReturnsBefore(suite.T(), wg.Wait, 10*time.Second) - - // evaluates that all messages are received - for index, e := range engs { - if index < len(engs)/2 { - assert.Len(suite.Suite.T(), e.Event, 1, fmt.Sprintf("message not received %v", index)) - assertChannelReceived(suite.T(), e, channels.TestNetworkChannel) - } else { - assert.Len(suite.Suite.T(), e.Event, 0, fmt.Sprintf("message received when none was expected %v", index)) - } - } -} - -// messageSizeScenario provides a scenario to check if a message of maximum permissible size can be sent -// successfully. -// It broadcasts a message from the first node to all the nodes in the identifiers list using send wrapper function. -func (suite *MeshEngineTestSuite) messageSizeScenario(send testutils.ConduitSendWrapperFunc, size uint) { - // creating engines - count := len(suite.nets) - engs := make([]*testutils.MeshEngine, 0) - wg := sync.WaitGroup{} - - for i := range suite.nets { - eng := testutils.NewMeshEngine(suite.Suite.T(), suite.nets[i], count-1, channels.TestNetworkChannel) - engs = append(engs, eng) - } - - // allow nodes to heartbeat and discover each other - // each node will register ~D protect messages per mesh setup, where D is the default out-degree - for i := 0; i < pubsub.GossipSubD*count; i++ { - select { - case <-suite.obs: - case <-time.After(8 * time.Second): - assert.FailNow(suite.T(), "could not receive pubsub tag indicating mesh formed") - } - } - // others keeps the identifier of all nodes except node that is sender. - others := suite.ids.Filter(filter.Not(filter.HasNodeID(suite.ids[0].NodeID))).NodeIDs() - - // generates and sends an event of custom size to the network - payload := testutils.NetworkPayloadFixture(suite.T(), size) - event := &message.TestMessage{ - Text: string(payload), - } - - require.NoError(suite.T(), send(event, engs[0].Con, others...)) - - // fires a goroutine for all engines (except sender) to listen for the incoming message - for _, eng := range engs[1:] { - wg.Add(1) - go func(e *testutils.MeshEngine) { - <-e.Received - wg.Done() - }(eng) - } - - unittest.AssertReturnsBefore(suite.Suite.T(), wg.Wait, 30*time.Second) - - // evaluates that all messages are received - for index, e := range engs[1:] { - assert.Len(suite.Suite.T(), e.Event, 1, "message not received by engine %d", index+1) - assertChannelReceived(suite.T(), e, channels.TestNetworkChannel) - } -} - -// conduitCloseScenario tests after a Conduit is closed, an engine cannot send or receive a message for that channel. -func (suite *MeshEngineTestSuite) conduitCloseScenario(send testutils.ConduitSendWrapperFunc) { - - testutils.OptionalSleep(send) - - // creating engines - count := len(suite.nets) - engs := make([]*testutils.MeshEngine, 0) - wg := sync.WaitGroup{} - - for i := range suite.nets { - eng := testutils.NewMeshEngine(suite.Suite.T(), suite.nets[i], count-1, channels.TestNetworkChannel) - engs = append(engs, eng) - } - - // allow nodes to heartbeat and discover each other - // each node will register ~D protect messages, where D is the default out-degree - for i := 0; i < pubsub.GossipSubD*count; i++ { - select { - case <-suite.obs: - case <-time.After(2 * time.Second): - assert.FailNow(suite.T(), "could not receive pubsub tag indicating mesh formed") - } - } - - // unregister a random engine from the test topic by calling close on it's conduit - unregisterIndex := rand.Intn(count) - err := engs[unregisterIndex].Con.Close() - assert.NoError(suite.T(), err) - - // waits enough for peer manager to unsubscribe the node from the topic - // while libp2p is unsubscribing the node, the topology gets unstable - // and connections to the node may be refused (although very unlikely). - time.Sleep(2 * time.Second) - - // each node attempts to broadcast a message to all others - for i := range suite.nets { - event := &message.TestMessage{ - Text: fmt.Sprintf("hello from node %v", i), - } - - // others keeps the identifier of all nodes except ith node and the node that unregistered from the topic. - // nodes without valid topic registration for a channel will reject messages on that channel via unicast. - others := suite.ids.Filter(filter.Not(filter.HasNodeID(suite.ids[i].NodeID, suite.ids[unregisterIndex].NodeID))).NodeIDs() - - if i == unregisterIndex { - // assert that unsubscribed engine cannot publish on that topic - require.Error(suite.Suite.T(), send(event, engs[i].Con, others...)) - continue - } - - require.NoError(suite.Suite.T(), send(event, engs[i].Con, others...)) - } - - // fire a goroutine to listen for incoming messages for each engine except for the one which unregistered - for i := range suite.nets { - if i == unregisterIndex { - continue - } - wg.Add(1) - go func(e *testutils.MeshEngine) { - expectedMsgCnt := count - 2 // count less self and unsubscribed engine - for x := 0; x < expectedMsgCnt; x++ { - <-e.Received - } - wg.Done() - }(engs[i]) - } - - // assert every one except the unsubscribed engine received the message - unittest.AssertReturnsBefore(suite.Suite.T(), wg.Wait, 2*time.Second) - - // assert that the unregistered engine did not receive the message - unregisteredEng := engs[unregisterIndex] - assert.Emptyf(suite.T(), unregisteredEng.Received, "unregistered engine received the topic message") -} - -// assertChannelReceived asserts that the given channel was received on the given engine -func assertChannelReceived(t *testing.T, e *testutils.MeshEngine, channel channels.Channel) { - unittest.AssertReturnsBefore(t, func() { - assert.Equal(t, channel, <-e.Channel) - }, 100*time.Millisecond) -} - -// extractSenderID returns a bool array with the index i true if there is a message from node i in the provided messages. -// enginesNum is the number of engines -// events is the channel of received events -// expectedMsgTxt is the common prefix among all the messages that we expect to receive, for example -// we expect to receive "hello from node x" in this test, and then expectedMsgTxt is "hello form node" -func extractSenderID(enginesNum int, events chan interface{}, expectedMsgTxt string) ([]bool, error) { - indices := make([]bool, enginesNum) - expectedMsgSize := len(expectedMsgTxt) - for i := 0; i < enginesNum-1; i++ { - var event interface{} - select { - case event = <-events: - default: - continue - } - echo := event.(*message.TestMessage) - msg := echo.Text - if len(msg) < expectedMsgSize { - return nil, fmt.Errorf("invalid message format") - } - senderIndex := msg[expectedMsgSize:] - senderIndex = strings.TrimLeft(senderIndex, " ") - nodeID, err := strconv.Atoi(senderIndex) - if err != nil { - return nil, fmt.Errorf("could not extract the node id from: %v", msg) - } - - if indices[nodeID] { - return nil, fmt.Errorf("duplicate message reception: %v", msg) - } - - if msg == fmt.Sprintf("%s %v", expectedMsgTxt, nodeID) { - indices[nodeID] = true - } - } - return indices, nil -} diff --git a/network/test/middleware_test.go b/network/test/middleware_test.go deleted file mode 100644 index 3fe9ecc042f..00000000000 --- a/network/test/middleware_test.go +++ /dev/null @@ -1,938 +0,0 @@ -package test - -import ( - "bytes" - "context" - "fmt" - "regexp" - "strings" - "sync" - "testing" - "time" - - "github.com/libp2p/go-libp2p/core/peer" - "github.com/libp2p/go-libp2p/p2p/net/swarm" - "github.com/rs/zerolog" - "github.com/stretchr/testify/assert" - mockery "github.com/stretchr/testify/mock" - "github.com/stretchr/testify/require" - "github.com/stretchr/testify/suite" - "go.uber.org/atomic" - "golang.org/x/time/rate" - - "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/model/flow/filter" - libp2pmessage "github.com/onflow/flow-go/model/libp2p/message" - "github.com/onflow/flow-go/module/irrecoverable" - "github.com/onflow/flow-go/module/metrics" - "github.com/onflow/flow-go/module/observable" - "github.com/onflow/flow-go/network" - "github.com/onflow/flow-go/network/channels" - "github.com/onflow/flow-go/network/internal/p2pfixtures" - "github.com/onflow/flow-go/network/internal/testutils" - "github.com/onflow/flow-go/network/message" - "github.com/onflow/flow-go/network/mocknetwork" - "github.com/onflow/flow-go/network/p2p" - "github.com/onflow/flow-go/network/p2p/middleware" - "github.com/onflow/flow-go/network/p2p/p2pnode" - p2ptest "github.com/onflow/flow-go/network/p2p/test" - "github.com/onflow/flow-go/network/p2p/unicast/ratelimit" - "github.com/onflow/flow-go/network/p2p/utils/ratelimiter" - "github.com/onflow/flow-go/network/slashing" - "github.com/onflow/flow-go/utils/unittest" -) - -const testChannel = channels.TestNetworkChannel - -// libp2p emits a call to `Protect` with a topic-specific tag upon establishing each peering connection in a GossipSUb mesh, see: -// https://github.com/libp2p/go-libp2p-pubsub/blob/master/tag_tracer.go -// One way to make sure such a mesh has formed, asynchronously, in unit tests, is to wait for libp2p.GossipSubD such calls, -// and that's what we do with tagsObserver. -type tagsObserver struct { - tags chan string - log zerolog.Logger -} - -func (co *tagsObserver) OnNext(peertag interface{}) { - pt, ok := peertag.(testutils.PeerTag) - - if ok { - co.tags <- fmt.Sprintf("peer: %v tag: %v", pt.Peer, pt.Tag) - } - -} -func (co *tagsObserver) OnError(err error) { - co.log.Error().Err(err).Msg("Tags Observer closed on an error") - close(co.tags) -} -func (co *tagsObserver) OnComplete() { - close(co.tags) -} - -type MiddlewareTestSuite struct { - suite.Suite - sync.RWMutex - size int // used to determine number of middlewares under test - nodes []p2p.LibP2PNode - mws []network.Middleware // used to keep track of middlewares under test - ov []*mocknetwork.Overlay - obs chan string // used to keep track of Protect events tagged by pubsub messages - ids []*flow.Identity - metrics *metrics.NoopCollector // no-op performance monitoring simulation - logger zerolog.Logger - providers []*testutils.UpdatableIDProvider - - mwCancel context.CancelFunc - mwCtx irrecoverable.SignalerContext - - slashingViolationsConsumer slashing.ViolationsConsumer -} - -// TestMiddlewareTestSuit runs all the test methods in this test suit -func TestMiddlewareTestSuite(t *testing.T) { - t.Parallel() - suite.Run(t, new(MiddlewareTestSuite)) -} - -// SetupTest initiates the test setups prior to each test -func (m *MiddlewareTestSuite) SetupTest() { - m.logger = unittest.Logger() - - m.size = 2 // operates on two middlewares - m.metrics = metrics.NewNoopCollector() - - // create and start the middlewares and inject a connection observer - var obs []observable.Observable - peerChannel := make(chan string) - ob := tagsObserver{ - tags: peerChannel, - log: m.logger, - } - - m.slashingViolationsConsumer = mocknetwork.NewViolationsConsumer(m.T()) - - m.ids, m.nodes, m.mws, obs, m.providers = testutils.GenerateIDsAndMiddlewares(m.T(), - m.size, - m.logger, - unittest.NetworkCodec(), - m.slashingViolationsConsumer) - - for _, observableConnMgr := range obs { - observableConnMgr.Subscribe(&ob) - } - m.obs = peerChannel - - require.Len(m.Suite.T(), obs, m.size) - require.Len(m.Suite.T(), m.ids, m.size) - require.Len(m.Suite.T(), m.mws, m.size) - - // create the mock overlays - for i := 0; i < m.size; i++ { - m.ov = append(m.ov, m.createOverlay(m.providers[i])) - } - - ctx, cancel := context.WithCancel(context.Background()) - m.mwCancel = cancel - - m.mwCtx = irrecoverable.NewMockSignalerContext(m.T(), ctx) - - testutils.StartNodes(m.mwCtx, m.T(), m.nodes, 100*time.Millisecond) - - for i, mw := range m.mws { - mw.SetOverlay(m.ov[i]) - mw.Start(m.mwCtx) - unittest.RequireComponentsReadyBefore(m.T(), 100*time.Millisecond, mw) - require.NoError(m.T(), mw.Subscribe(testChannel)) - } -} - -func (m *MiddlewareTestSuite) TearDownTest() { - m.mwCancel() - - testutils.StopComponents(m.T(), m.mws, 100*time.Millisecond) - testutils.StopComponents(m.T(), m.nodes, 100*time.Millisecond) - - m.mws = nil - m.nodes = nil - m.ov = nil - m.ids = nil - m.size = 0 -} - -// TestUpdateNodeAddresses tests that the UpdateNodeAddresses method correctly updates -// the addresses of the staked network participants. -func (m *MiddlewareTestSuite) TestUpdateNodeAddresses() { - ctx, cancel := context.WithCancel(m.mwCtx) - irrecoverableCtx := irrecoverable.NewMockSignalerContext(m.T(), ctx) - - // create a new staked identity - ids, libP2PNodes, _ := testutils.GenerateIDs(m.T(), m.logger, 1) - - mws, providers := testutils.GenerateMiddlewares(m.T(), m.logger, ids, libP2PNodes, unittest.NetworkCodec(), m.slashingViolationsConsumer) - require.Len(m.T(), ids, 1) - require.Len(m.T(), providers, 1) - require.Len(m.T(), mws, 1) - newId := ids[0] - newMw := mws[0] - - overlay := m.createOverlay(providers[0]) - overlay.On("Receive", m.ids[0].NodeID, mockery.AnythingOfType("*message.Message")).Return(nil) - newMw.SetOverlay(overlay) - - // start up nodes and peer managers - testutils.StartNodes(irrecoverableCtx, m.T(), libP2PNodes, 100*time.Millisecond) - defer testutils.StopComponents(m.T(), libP2PNodes, 100*time.Millisecond) - - newMw.Start(irrecoverableCtx) - defer testutils.StopComponents(m.T(), mws, 100*time.Millisecond) - unittest.RequireComponentsReadyBefore(m.T(), 100*time.Millisecond, newMw) - - idList := flow.IdentityList(append(m.ids, newId)) - - // needed to enable ID translation - m.providers[0].SetIdentities(idList) - - outMsg, err := network.NewOutgoingScope( - flow.IdentifierList{newId.NodeID}, - testChannel, - &libp2pmessage.TestMessage{ - Text: "TestUpdateNodeAddresses", - }, - unittest.NetworkCodec().Encode, - message.ProtocolTypeUnicast) - require.NoError(m.T(), err) - // message should fail to send because no address is known yet - // for the new identity - err = m.mws[0].SendDirect(outMsg) - require.True(m.T(), strings.Contains(err.Error(), swarm.ErrNoAddresses.Error())) - - // update the addresses - m.mws[0].UpdateNodeAddresses() - - // now the message should send successfully - err = m.mws[0].SendDirect(outMsg) - require.NoError(m.T(), err) - - cancel() - unittest.RequireComponentsReadyBefore(m.T(), 100*time.Millisecond, newMw) -} - -func (m *MiddlewareTestSuite) TestUnicastRateLimit_Messages() { - // limiter limit will be set to 5 events/sec the 6th event per interval will be rate limited - limit := rate.Limit(5) - - // burst per interval - burst := 5 - - messageRateLimiter := ratelimiter.NewRateLimiter(limit, burst, 3) - - // we only expect messages from the first middleware on the test suite - expectedPID, err := unittest.PeerIDFromFlowID(m.ids[0]) - require.NoError(m.T(), err) - - // the onRateLimit call back we will use to keep track of how many times a rate limit happens. - rateLimits := atomic.NewUint64(0) - - onRateLimit := func(peerID peer.ID, role, msgType, topic, reason string) { - require.Equal(m.T(), reason, ratelimit.ReasonMessageCount.String()) - require.Equal(m.T(), expectedPID, peerID) - // update hook calls - rateLimits.Inc() - } - - // setup rate limit distributor that will be used to track the number of rate limits via the onRateLimit callback. - consumer := testutils.NewRateLimiterConsumer(onRateLimit) - distributor := ratelimit.NewUnicastRateLimiterDistributor() - distributor.AddConsumer(consumer) - - opts := []ratelimit.RateLimitersOption{ratelimit.WithMessageRateLimiter(messageRateLimiter), ratelimit.WithNotifier(distributor), ratelimit.WithDisabledRateLimiting(false)} - rateLimiters := ratelimit.NewRateLimiters(opts...) - - idProvider := testutils.NewUpdatableIDProvider(m.ids) - // create a new staked identity - connGater := testutils.NewConnectionGater(idProvider, func(pid peer.ID) error { - if messageRateLimiter.IsRateLimited(pid) { - return fmt.Errorf("rate-limited peer") - } - return nil - }) - ids, libP2PNodes, _ := testutils.GenerateIDs(m.T(), - m.logger, - 1, - testutils.WithUnicastRateLimiterDistributor(distributor), - testutils.WithConnectionGater(connGater)) - idProvider.SetIdentities(append(m.ids, ids...)) - - // create middleware - mws, providers := testutils.GenerateMiddlewares(m.T(), - m.logger, - ids, - libP2PNodes, - unittest.NetworkCodec(), - m.slashingViolationsConsumer, - testutils.WithUnicastRateLimiters(rateLimiters), - testutils.WithPeerManagerFilters(testutils.IsRateLimitedPeerFilter(messageRateLimiter))) - - require.Len(m.T(), ids, 1) - require.Len(m.T(), providers, 1) - require.Len(m.T(), mws, 1) - newId := ids[0] - newMw := mws[0] - idList := flow.IdentityList(append(m.ids, newId)) - - providers[0].SetIdentities(idList) - - overlay := m.createOverlay(providers[0]) - - calls := atomic.NewUint64(0) - ch := make(chan struct{}) - overlay.On("Receive", mockery.AnythingOfType("*network.IncomingMessageScope")).Return(nil).Run(func(args mockery.Arguments) { - calls.Inc() - if calls.Load() >= 5 { - close(ch) - } - }) - - newMw.SetOverlay(overlay) - - ctx, cancel := context.WithCancel(m.mwCtx) - irrecoverableCtx := irrecoverable.NewMockSignalerContext(m.T(), ctx) - - testutils.StartNodes(irrecoverableCtx, m.T(), libP2PNodes, 100*time.Millisecond) - defer testutils.StopComponents(m.T(), libP2PNodes, 100*time.Millisecond) - - newMw.Start(irrecoverableCtx) - unittest.RequireComponentsReadyBefore(m.T(), 100*time.Millisecond, newMw) - - require.NoError(m.T(), newMw.Subscribe(testChannel)) - - // needed to enable ID translation - m.providers[0].SetIdentities(idList) - - // update the addresses - m.mws[0].UpdateNodeAddresses() - - // add our sender node as a direct peer to our receiving node, this allows us to ensure - // that connections to peers that are rate limited are completely prune. IsConnected will - // return true only if the node is a direct peer of the other, after rate limiting this direct - // peer should be removed by the peer manager. - p2ptest.LetNodesDiscoverEachOther(m.T(), ctx, []p2p.LibP2PNode{libP2PNodes[0], m.nodes[0]}, flow.IdentityList{ids[0], m.ids[0]}) - p2ptest.EnsureConnected(m.T(), ctx, []p2p.LibP2PNode{libP2PNodes[0], m.nodes[0]}) - - // with the rate limit configured to 5 msg/sec we send 10 messages at once and expect the rate limiter - // to be invoked at-least once. We send 10 messages due to the flakiness that is caused by async stream - // handling of streams. - for i := 0; i < 10; i++ { - msg, err := network.NewOutgoingScope( - flow.IdentifierList{newId.NodeID}, - testChannel, - &libp2pmessage.TestMessage{ - Text: fmt.Sprintf("hello-%d", i), - }, - unittest.NetworkCodec().Encode, - message.ProtocolTypeUnicast) - require.NoError(m.T(), err) - err = m.mws[0].SendDirect(msg) - require.NoError(m.T(), err) - } - // wait for all rate limits before shutting down middleware - unittest.RequireCloseBefore(m.T(), ch, 100*time.Millisecond, "could not stop rate limit test ch on time") - - // sleep for 1 seconds to allow connection pruner to prune connections - time.Sleep(1 * time.Second) - - // ensure connection to rate limited peer is pruned - p2ptest.EnsureNotConnectedBetweenGroups(m.T(), ctx, []p2p.LibP2PNode{libP2PNodes[0]}, []p2p.LibP2PNode{m.nodes[0]}) - p2pfixtures.EnsureNoStreamCreationBetweenGroups(m.T(), ctx, []p2p.LibP2PNode{libP2PNodes[0]}, []p2p.LibP2PNode{m.nodes[0]}) - - // eventually the rate limited node should be able to reconnect and send messages - require.Eventually(m.T(), func() bool { - msg, err := network.NewOutgoingScope( - flow.IdentifierList{newId.NodeID}, - testChannel, - &libp2pmessage.TestMessage{ - Text: "hello", - }, - unittest.NetworkCodec().Encode, - message.ProtocolTypeUnicast) - require.NoError(m.T(), err) - return m.mws[0].SendDirect(msg) == nil - }, 3*time.Second, 100*time.Millisecond) - - // shutdown our middleware so that each message can be processed - cancel() - unittest.RequireCloseBefore(m.T(), libP2PNodes[0].Done(), 100*time.Millisecond, "could not stop libp2p node on time") - unittest.RequireCloseBefore(m.T(), newMw.Done(), 100*time.Millisecond, "could not stop middleware on time") - - // expect our rate limited peer callback to be invoked once - require.True(m.T(), rateLimits.Load() > 0) -} - -func (m *MiddlewareTestSuite) TestUnicastRateLimit_Bandwidth() { - //limiter limit will be set up to 1000 bytes/sec - limit := rate.Limit(1000) - - //burst per interval - burst := 1000 - - // we only expect messages from the first middleware on the test suite - expectedPID, err := unittest.PeerIDFromFlowID(m.ids[0]) - require.NoError(m.T(), err) - - // setup bandwidth rate limiter - bandwidthRateLimiter := ratelimit.NewBandWidthRateLimiter(limit, burst, 4) - - // the onRateLimit call back we will use to keep track of how many times a rate limit happens - // after 5 rate limits we will close ch. - ch := make(chan struct{}) - rateLimits := atomic.NewUint64(0) - onRateLimit := func(peerID peer.ID, role, msgType, topic, reason string) { - require.Equal(m.T(), reason, ratelimit.ReasonBandwidth.String()) - - // we only expect messages from the first middleware on the test suite - require.NoError(m.T(), err) - require.Equal(m.T(), expectedPID, peerID) - // update hook calls - rateLimits.Inc() - close(ch) - } - - consumer := testutils.NewRateLimiterConsumer(onRateLimit) - distributor := ratelimit.NewUnicastRateLimiterDistributor() - distributor.AddConsumer(consumer) - opts := []ratelimit.RateLimitersOption{ratelimit.WithBandwidthRateLimiter(bandwidthRateLimiter), ratelimit.WithNotifier(distributor), ratelimit.WithDisabledRateLimiting(false)} - rateLimiters := ratelimit.NewRateLimiters(opts...) - - idProvider := testutils.NewUpdatableIDProvider(m.ids) - // create connection gater, connection gater will refuse connections from rate limited nodes - connGater := testutils.NewConnectionGater(idProvider, func(pid peer.ID) error { - if bandwidthRateLimiter.IsRateLimited(pid) { - return fmt.Errorf("rate-limited peer") - } - - return nil - }) - // create a new staked identity - ids, libP2PNodes, _ := testutils.GenerateIDs(m.T(), - m.logger, - 1, - testutils.WithUnicastRateLimiterDistributor(distributor), - testutils.WithConnectionGater(connGater)) - idProvider.SetIdentities(append(m.ids, ids...)) - - // create middleware - mws, providers := testutils.GenerateMiddlewares(m.T(), - m.logger, - ids, - libP2PNodes, - unittest.NetworkCodec(), - m.slashingViolationsConsumer, - testutils.WithUnicastRateLimiters(rateLimiters), - testutils.WithPeerManagerFilters(testutils.IsRateLimitedPeerFilter(bandwidthRateLimiter))) - require.Len(m.T(), ids, 1) - require.Len(m.T(), providers, 1) - require.Len(m.T(), mws, 1) - newId := ids[0] - newMw := mws[0] - overlay := m.createOverlay(providers[0]) - overlay.On("Receive", m.ids[0].NodeID, mockery.AnythingOfType("*message.Message")).Return(nil) - - newMw.SetOverlay(overlay) - - ctx, cancel := context.WithCancel(m.mwCtx) - irrecoverableCtx := irrecoverable.NewMockSignalerContext(m.T(), ctx) - - testutils.StartNodes(irrecoverableCtx, m.T(), libP2PNodes, 100*time.Millisecond) - defer testutils.StopComponents(m.T(), libP2PNodes, 100*time.Millisecond) - - newMw.Start(irrecoverableCtx) - unittest.RequireComponentsReadyBefore(m.T(), 100*time.Millisecond, newMw) - - require.NoError(m.T(), newMw.Subscribe(testChannel)) - - idList := flow.IdentityList(append(m.ids, newId)) - - // needed to enable ID translation - m.providers[0].SetIdentities(idList) - - // create message with about 400bytes (300 random bytes + 100bytes message info) - b := make([]byte, 300) - for i := range b { - b[i] = byte('X') - } - - msg, err := network.NewOutgoingScope( - flow.IdentifierList{newId.NodeID}, - testChannel, - &libp2pmessage.TestMessage{ - Text: string(b), - }, - unittest.NetworkCodec().Encode, - message.ProtocolTypeUnicast) - require.NoError(m.T(), err) - - // update the addresses - m.mws[0].UpdateNodeAddresses() - - // add our sender node as a direct peer to our receiving node, this allows us to ensure - // that connections to peers that are rate limited are completely prune. IsConnected will - // return true only if the node is a direct peer of the other, after rate limiting this direct - // peer should be removed by the peer manager. - p2ptest.LetNodesDiscoverEachOther(m.T(), ctx, []p2p.LibP2PNode{libP2PNodes[0], m.nodes[0]}, flow.IdentityList{ids[0], m.ids[0]}) - - // send 3 messages at once with a size of 400 bytes each. The third message will be rate limited - // as it is more than our allowed bandwidth of 1000 bytes. - for i := 0; i < 3; i++ { - err = m.mws[0].SendDirect(msg) - require.NoError(m.T(), err) - } - - // wait for all rate limits before shutting down middleware - unittest.RequireCloseBefore(m.T(), ch, 100*time.Millisecond, "could not stop on rate limit test ch on time") - - // sleep for 1 seconds to allow connection pruner to prune connections - time.Sleep(1 * time.Second) - - // ensure connection to rate limited peer is pruned - p2ptest.EnsureNotConnectedBetweenGroups(m.T(), ctx, []p2p.LibP2PNode{libP2PNodes[0]}, []p2p.LibP2PNode{m.nodes[0]}) - p2pfixtures.EnsureNoStreamCreationBetweenGroups(m.T(), ctx, []p2p.LibP2PNode{libP2PNodes[0]}, []p2p.LibP2PNode{m.nodes[0]}) - - // eventually the rate limited node should be able to reconnect and send messages - require.Eventually(m.T(), func() bool { - msg, err = network.NewOutgoingScope( - flow.IdentifierList{newId.NodeID}, - testChannel, - &libp2pmessage.TestMessage{ - Text: "", - }, - unittest.NetworkCodec().Encode, - message.ProtocolTypeUnicast) - require.NoError(m.T(), err) - return m.mws[0].SendDirect(msg) == nil - }, 3*time.Second, 100*time.Millisecond) - - // shutdown our middleware so that each message can be processed - cancel() - unittest.RequireComponentsDoneBefore(m.T(), 100*time.Millisecond, newMw) - - // expect our rate limited peer callback to be invoked once - require.Equal(m.T(), uint64(1), rateLimits.Load()) -} - -func (m *MiddlewareTestSuite) createOverlay(provider *testutils.UpdatableIDProvider) *mocknetwork.Overlay { - overlay := &mocknetwork.Overlay{} - overlay.On("Identities").Maybe().Return(func() flow.IdentityList { - return provider.Identities(filter.Any) - }) - overlay.On("Topology").Maybe().Return(func() flow.IdentityList { - return provider.Identities(filter.Any) - }, nil) - // this test is not testing the topic validator, especially in spoofing, - // so we always return a valid identity. We only care about the node role for the test TestMaxMessageSize_SendDirect - // where EN are the only node authorized to send chunk data response. - identityOpts := unittest.WithRole(flow.RoleExecution) - overlay.On("Identity", mockery.AnythingOfType("peer.ID")).Maybe().Return(unittest.IdentityFixture(identityOpts), true) - return overlay -} - -// TestMultiPing tests the middleware against type of received payload -// of distinct messages that are sent concurrently from a node to another -func (m *MiddlewareTestSuite) TestMultiPing() { - // one distinct message - m.MultiPing(1) - - // two distinct messages - m.MultiPing(2) - - // 10 distinct messages - m.MultiPing(10) -} - -// TestPing sends a message from the first middleware of the test suit to the last one and checks that the -// last middleware receives the message and that the message is correctly decoded. -func (m *MiddlewareTestSuite) TestPing() { - receiveWG := sync.WaitGroup{} - receiveWG.Add(1) - // extracts sender id based on the mock option - var err error - - // mocks Overlay.Receive for middleware.Overlay.Receive(*nodeID, payload) - firstNodeIndex := 0 - lastNodeIndex := m.size - 1 - - expectedPayload := "TestPingContentReception" - msg, err := network.NewOutgoingScope( - flow.IdentifierList{m.ids[lastNodeIndex].NodeID}, - testChannel, - &libp2pmessage.TestMessage{ - Text: expectedPayload, - }, - unittest.NetworkCodec().Encode, - message.ProtocolTypeUnicast) - require.NoError(m.T(), err) - - m.ov[lastNodeIndex].On("Receive", mockery.Anything).Return(nil).Once(). - Run(func(args mockery.Arguments) { - receiveWG.Done() - - msg, ok := args[0].(*network.IncomingMessageScope) - require.True(m.T(), ok) - - require.Equal(m.T(), testChannel, msg.Channel()) // channel - require.Equal(m.T(), m.ids[firstNodeIndex].NodeID, msg.OriginId()) // sender id - require.Equal(m.T(), m.ids[lastNodeIndex].NodeID, msg.TargetIDs()[0]) // target id - require.Equal(m.T(), message.ProtocolTypeUnicast, msg.Protocol()) // protocol - require.Equal(m.T(), expectedPayload, msg.DecodedPayload().(*libp2pmessage.TestMessage).Text) // payload - }) - - // sends a direct message from first node to the last node - err = m.mws[firstNodeIndex].SendDirect(msg) - require.NoError(m.Suite.T(), err) - - unittest.RequireReturnsBefore(m.T(), receiveWG.Wait, 1000*time.Millisecond, "did not receive message") - - // evaluates the mock calls - for i := 1; i < m.size; i++ { - m.ov[i].AssertExpectations(m.T()) - } - -} - -// MultiPing sends count-many distinct messages concurrently from the first middleware of the test suit to the last one. -// It evaluates the correctness of reception of the content of the messages. Each message must be received by the -// last middleware of the test suit exactly once. -func (m *MiddlewareTestSuite) MultiPing(count int) { - receiveWG := sync.WaitGroup{} - sendWG := sync.WaitGroup{} - // extracts sender id based on the mock option - // mocks Overlay.Receive for middleware.Overlay.Receive(*nodeID, payload) - firstNodeIndex := 0 - lastNodeIndex := m.size - 1 - - receivedPayloads := unittest.NewProtectedMap[string, struct{}]() // keep track of unique payloads received. - - // regex to extract the payload from the message - regex := regexp.MustCompile(`^hello from: \d`) - - for i := 0; i < count; i++ { - receiveWG.Add(1) - sendWG.Add(1) - - expectedPayloadText := fmt.Sprintf("hello from: %d", i) - msg, err := network.NewOutgoingScope( - flow.IdentifierList{m.ids[lastNodeIndex].NodeID}, - testChannel, - &libp2pmessage.TestMessage{ - Text: expectedPayloadText, - }, - unittest.NetworkCodec().Encode, - message.ProtocolTypeUnicast) - require.NoError(m.T(), err) - - m.ov[lastNodeIndex].On("Receive", mockery.Anything).Return(nil).Once(). - Run(func(args mockery.Arguments) { - receiveWG.Done() - - msg, ok := args[0].(*network.IncomingMessageScope) - require.True(m.T(), ok) - - require.Equal(m.T(), testChannel, msg.Channel()) // channel - require.Equal(m.T(), m.ids[firstNodeIndex].NodeID, msg.OriginId()) // sender id - require.Equal(m.T(), m.ids[lastNodeIndex].NodeID, msg.TargetIDs()[0]) // target id - require.Equal(m.T(), message.ProtocolTypeUnicast, msg.Protocol()) // protocol - - // payload - decodedPayload := msg.DecodedPayload().(*libp2pmessage.TestMessage).Text - require.True(m.T(), regex.MatchString(decodedPayload)) - require.False(m.T(), receivedPayloads.Has(decodedPayload)) // payload must be unique - receivedPayloads.Add(decodedPayload, struct{}{}) - }) - go func() { - // sends a direct message from first node to the last node - err := m.mws[firstNodeIndex].SendDirect(msg) - require.NoError(m.Suite.T(), err) - - sendWG.Done() - }() - } - - unittest.RequireReturnsBefore(m.T(), sendWG.Wait, 1*time.Second, "could not send unicasts on time") - unittest.RequireReturnsBefore(m.T(), receiveWG.Wait, 1*time.Second, "could not receive unicasts on time") - - // evaluates the mock calls - for i := 1; i < m.size; i++ { - m.ov[i].AssertExpectations(m.T()) - } -} - -// TestEcho sends an echo message from first middleware to the last middleware -// the last middleware echos back the message. The test evaluates the correctness -// of the message reception as well as its content -func (m *MiddlewareTestSuite) TestEcho() { - wg := sync.WaitGroup{} - // extracts sender id based on the mock option - var err error - - wg.Add(2) - // mocks Overlay.Receive for middleware.Overlay.Receive(*nodeID, payload) - first := 0 - last := m.size - 1 - firstNode := m.ids[first].NodeID - lastNode := m.ids[last].NodeID - - // message sent from first node to the last node. - expectedSendMsg := "TestEcho" - sendMsg, err := network.NewOutgoingScope( - flow.IdentifierList{lastNode}, - testChannel, - &libp2pmessage.TestMessage{ - Text: expectedSendMsg, - }, - unittest.NetworkCodec().Encode, - message.ProtocolTypeUnicast) - require.NoError(m.T(), err) - - // reply from last node to the first node. - expectedReplyMsg := "TestEcho response" - replyMsg, err := network.NewOutgoingScope( - flow.IdentifierList{firstNode}, - testChannel, - &libp2pmessage.TestMessage{ - Text: expectedReplyMsg, - }, - unittest.NetworkCodec().Encode, - message.ProtocolTypeUnicast) - require.NoError(m.T(), err) - - // last node - m.ov[last].On("Receive", mockery.Anything).Return(nil).Once(). - Run(func(args mockery.Arguments) { - wg.Done() - - // sanity checks the message content. - msg, ok := args[0].(*network.IncomingMessageScope) - require.True(m.T(), ok) - - require.Equal(m.T(), testChannel, msg.Channel()) // channel - require.Equal(m.T(), m.ids[first].NodeID, msg.OriginId()) // sender id - require.Equal(m.T(), lastNode, msg.TargetIDs()[0]) // target id - require.Equal(m.T(), message.ProtocolTypeUnicast, msg.Protocol()) // protocol - require.Equal(m.T(), expectedSendMsg, msg.DecodedPayload().(*libp2pmessage.TestMessage).Text) // payload - // event id - eventId, err := network.EventId(msg.Channel(), msg.Proto().Payload) - require.NoError(m.T(), err) - require.True(m.T(), bytes.Equal(eventId, msg.EventID())) - - // echos back the same message back to the sender - err = m.mws[last].SendDirect(replyMsg) - assert.NoError(m.T(), err) - }) - - // first node - m.ov[first].On("Receive", mockery.Anything).Return(nil).Once(). - Run(func(args mockery.Arguments) { - wg.Done() - // sanity checks the message content. - msg, ok := args[0].(*network.IncomingMessageScope) - require.True(m.T(), ok) - - require.Equal(m.T(), testChannel, msg.Channel()) // channel - require.Equal(m.T(), m.ids[last].NodeID, msg.OriginId()) // sender id - require.Equal(m.T(), firstNode, msg.TargetIDs()[0]) // target id - require.Equal(m.T(), message.ProtocolTypeUnicast, msg.Protocol()) // protocol - require.Equal(m.T(), expectedReplyMsg, msg.DecodedPayload().(*libp2pmessage.TestMessage).Text) // payload - // event id - eventId, err := network.EventId(msg.Channel(), msg.Proto().Payload) - require.NoError(m.T(), err) - require.True(m.T(), bytes.Equal(eventId, msg.EventID())) - }) - - // sends a direct message from first node to the last node - err = m.mws[first].SendDirect(sendMsg) - require.NoError(m.Suite.T(), err) - - unittest.RequireReturnsBefore(m.T(), wg.Wait, 100*time.Second, "could not receive unicast on time") - - // evaluates the mock calls - for i := 1; i < m.size; i++ { - m.ov[i].AssertExpectations(m.T()) - } -} - -// TestMaxMessageSize_SendDirect evaluates that invoking SendDirect method of the middleware on a message -// size beyond the permissible unicast message size returns an error. -func (m *MiddlewareTestSuite) TestMaxMessageSize_SendDirect() { - first := 0 - last := m.size - 1 - lastNode := m.ids[last].NodeID - - // creates a network payload beyond the maximum message size - // Note: networkPayloadFixture considers 1000 bytes as the overhead of the encoded message, - // so the generated payload is 1000 bytes below the maximum unicast message size. - // We hence add up 1000 bytes to the input of network payload fixture to make - // sure that payload is beyond the permissible size. - payload := testutils.NetworkPayloadFixture(m.T(), uint(middleware.DefaultMaxUnicastMsgSize)+1000) - event := &libp2pmessage.TestMessage{ - Text: string(payload), - } - - msg, err := network.NewOutgoingScope( - flow.IdentifierList{lastNode}, - testChannel, - event, - unittest.NetworkCodec().Encode, - message.ProtocolTypeUnicast) - require.NoError(m.T(), err) - - // sends a direct message from first node to the last node - err = m.mws[first].SendDirect(msg) - require.Error(m.Suite.T(), err) -} - -// TestLargeMessageSize_SendDirect asserts that a ChunkDataResponse is treated as a large message and can be unicasted -// successfully even though it's size is greater than the default message size. -func (m *MiddlewareTestSuite) TestLargeMessageSize_SendDirect() { - sourceIndex := 0 - targetIndex := m.size - 1 - targetNode := m.ids[targetIndex].NodeID - targetMW := m.mws[targetIndex] - - // subscribe to channels.ProvideChunks so that the message is not dropped - require.NoError(m.T(), targetMW.Subscribe(channels.ProvideChunks)) - - // creates a network payload with a size greater than the default max size using a known large message type - targetSize := uint64(middleware.DefaultMaxUnicastMsgSize) + 1000 - event := unittest.ChunkDataResponseMsgFixture(unittest.IdentifierFixture(), unittest.WithApproximateSize(targetSize)) - - msg, err := network.NewOutgoingScope( - flow.IdentifierList{targetNode}, - channels.ProvideChunks, - event, - unittest.NetworkCodec().Encode, - message.ProtocolTypeUnicast) - require.NoError(m.T(), err) - - // expect one message to be received by the target - ch := make(chan struct{}) - m.ov[targetIndex].On("Receive", mockery.Anything).Return(nil).Once(). - Run(func(args mockery.Arguments) { - msg, ok := args[0].(*network.IncomingMessageScope) - require.True(m.T(), ok) - - require.Equal(m.T(), channels.ProvideChunks, msg.Channel()) - require.Equal(m.T(), m.ids[sourceIndex].NodeID, msg.OriginId()) - require.Equal(m.T(), targetNode, msg.TargetIDs()[0]) - require.Equal(m.T(), message.ProtocolTypeUnicast, msg.Protocol()) - - eventId, err := network.EventId(msg.Channel(), msg.Proto().Payload) - require.NoError(m.T(), err) - require.True(m.T(), bytes.Equal(eventId, msg.EventID())) - close(ch) - }) - - // sends a direct message from source node to the target node - err = m.mws[sourceIndex].SendDirect(msg) - // SendDirect should not error since this is a known large message - require.NoError(m.Suite.T(), err) - - // check message reception on target - unittest.RequireCloseBefore(m.T(), ch, 60*time.Second, "source node failed to send large message to target") - - m.ov[targetIndex].AssertExpectations(m.T()) -} - -// TestMaxMessageSize_Publish evaluates that invoking Publish method of the middleware on a message -// size beyond the permissible publish message size returns an error. -func (m *MiddlewareTestSuite) TestMaxMessageSize_Publish() { - first := 0 - last := m.size - 1 - lastNode := m.ids[last].NodeID - - // creates a network payload beyond the maximum message size - // Note: networkPayloadFixture considers 1000 bytes as the overhead of the encoded message, - // so the generated payload is 1000 bytes below the maximum publish message size. - // We hence add up 1000 bytes to the input of network payload fixture to make - // sure that payload is beyond the permissible size. - payload := testutils.NetworkPayloadFixture(m.T(), uint(p2pnode.DefaultMaxPubSubMsgSize)+1000) - event := &libp2pmessage.TestMessage{ - Text: string(payload), - } - msg, err := network.NewOutgoingScope( - flow.IdentifierList{lastNode}, - testChannel, - event, - unittest.NetworkCodec().Encode, - message.ProtocolTypePubSub) - require.NoError(m.T(), err) - - // sends a direct message from first node to the last node - err = m.mws[first].Publish(msg) - require.Error(m.Suite.T(), err) -} - -// TestUnsubscribe tests that an engine can unsubscribe from a topic it was earlier subscribed to and stop receiving -// messages. -func (m *MiddlewareTestSuite) TestUnsubscribe() { - first := 0 - last := m.size - 1 - firstNode := m.ids[first].NodeID - lastNode := m.ids[last].NodeID - - // set up waiting for m.size pubsub tags indicating a mesh has formed - for i := 0; i < m.size; i++ { - select { - case <-m.obs: - case <-time.After(2 * time.Second): - assert.FailNow(m.T(), "could not receive pubsub tag indicating mesh formed") - } - } - - msgRcvd := make(chan struct{}, 2) - msgRcvdFun := func() { - <-msgRcvd - } - - message1, err := network.NewOutgoingScope( - flow.IdentifierList{lastNode}, - testChannel, - &libp2pmessage.TestMessage{ - Text: string("hello1"), - }, - unittest.NetworkCodec().Encode, - message.ProtocolTypeUnicast) - require.NoError(m.T(), err) - - m.ov[last].On("Receive", mockery.Anything).Return(nil).Run(func(args mockery.Arguments) { - msg, ok := args[0].(*network.IncomingMessageScope) - require.True(m.T(), ok) - require.Equal(m.T(), firstNode, msg.OriginId()) - msgRcvd <- struct{}{} - }) - - // first test that when both nodes are subscribed to the channel, the target node receives the message - err = m.mws[first].Publish(message1) - assert.NoError(m.T(), err) - - unittest.RequireReturnsBefore(m.T(), msgRcvdFun, 2*time.Second, "message not received") - - // now unsubscribe the target node from the channel - err = m.mws[last].Unsubscribe(testChannel) - assert.NoError(m.T(), err) - - // create and send a new message on the channel from the origin node - message2, err := network.NewOutgoingScope( - flow.IdentifierList{lastNode}, - testChannel, - &libp2pmessage.TestMessage{ - Text: string("hello2"), - }, - unittest.NetworkCodec().Encode, - message.ProtocolTypeUnicast) - require.NoError(m.T(), err) - - err = m.mws[first].Publish(message2) - assert.NoError(m.T(), err) - - // assert that the new message is not received by the target node - unittest.RequireNeverReturnBefore(m.T(), msgRcvdFun, 2*time.Second, "message received unexpectedly") -} diff --git a/network/test/unicast_authorization_test.go b/network/test/unicast_authorization_test.go deleted file mode 100644 index 6fe4d0b8b58..00000000000 --- a/network/test/unicast_authorization_test.go +++ /dev/null @@ -1,670 +0,0 @@ -package test - -import ( - "context" - "testing" - "time" - - "github.com/rs/zerolog" - "github.com/stretchr/testify/mock" - mockery "github.com/stretchr/testify/mock" - "github.com/stretchr/testify/require" - "github.com/stretchr/testify/suite" - - "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/model/flow/filter" - libp2pmessage "github.com/onflow/flow-go/model/libp2p/message" - "github.com/onflow/flow-go/model/messages" - "github.com/onflow/flow-go/module/irrecoverable" - "github.com/onflow/flow-go/network" - "github.com/onflow/flow-go/network/channels" - "github.com/onflow/flow-go/network/codec" - "github.com/onflow/flow-go/network/internal/testutils" - "github.com/onflow/flow-go/network/message" - "github.com/onflow/flow-go/network/mocknetwork" - "github.com/onflow/flow-go/network/p2p" - "github.com/onflow/flow-go/network/p2p/middleware" - "github.com/onflow/flow-go/network/slashing" - "github.com/onflow/flow-go/network/validator" - "github.com/onflow/flow-go/utils/unittest" -) - -// UnicastAuthorizationTestSuite tests that messages sent via unicast that are unauthenticated or unauthorized are correctly rejected. Each test on the test suite -// uses 2 middlewares, a sender and receiver. A mock slashing violation's consumer is used to assert the messages were rejected. Middleware and the cancel func -// are set during each test run inside the test and remove after each test run in the TearDownTest callback. -type UnicastAuthorizationTestSuite struct { - suite.Suite - channelCloseDuration time.Duration - logger zerolog.Logger - - libP2PNodes []p2p.LibP2PNode - // senderMW is the mw that will be sending the message - senderMW network.Middleware - // senderID the identity on the mw sending the message - senderID *flow.Identity - // receiverMW is the mw that will be sending the message - receiverMW network.Middleware - // receiverID the identity on the mw sending the message - receiverID *flow.Identity - // providers id providers generated at beginning of a test run - providers []*testutils.UpdatableIDProvider - // cancel is the cancel func from the context that was used to start the middlewares in a test run - cancel context.CancelFunc - // waitCh is the channel used to wait for the middleware to perform authorization and invoke the slashing - //violation's consumer before making mock assertions and cleaning up resources - waitCh chan struct{} -} - -// TestUnicastAuthorizationTestSuite runs all the test methods in this test suit -func TestUnicastAuthorizationTestSuite(t *testing.T) { - t.Parallel() - suite.Run(t, new(UnicastAuthorizationTestSuite)) -} - -func (u *UnicastAuthorizationTestSuite) SetupTest() { - u.logger = unittest.Logger() - u.channelCloseDuration = 100 * time.Millisecond - // this ch will allow us to wait until the expected method call happens before shutting down middleware - u.waitCh = make(chan struct{}) -} - -func (u *UnicastAuthorizationTestSuite) TearDownTest() { - u.stopMiddlewares() -} - -// setupMiddlewaresAndProviders will setup 2 middlewares that will be used as a sender and receiver in each suite test. -func (u *UnicastAuthorizationTestSuite) setupMiddlewaresAndProviders(slashingViolationsConsumer slashing.ViolationsConsumer) { - ids, libP2PNodes, _ := testutils.GenerateIDs(u.T(), u.logger, 2) - mws, providers := testutils.GenerateMiddlewares(u.T(), u.logger, ids, libP2PNodes, unittest.NetworkCodec(), slashingViolationsConsumer) - require.Len(u.T(), ids, 2) - require.Len(u.T(), providers, 2) - require.Len(u.T(), mws, 2) - - u.senderID = ids[0] - u.senderMW = mws[0] - u.receiverID = ids[1] - u.receiverMW = mws[1] - u.providers = providers - u.libP2PNodes = libP2PNodes -} - -// startMiddlewares will start both sender and receiver middlewares with an irrecoverable signaler context and set the context cancel func. -func (u *UnicastAuthorizationTestSuite) startMiddlewares(overlay *mocknetwork.Overlay) { - ctx, cancel := context.WithCancel(context.Background()) - sigCtx, _ := irrecoverable.WithSignaler(ctx) - - testutils.StartNodes(sigCtx, u.T(), u.libP2PNodes, 100*time.Millisecond) - - u.senderMW.SetOverlay(overlay) - u.senderMW.Start(sigCtx) - - u.receiverMW.SetOverlay(overlay) - u.receiverMW.Start(sigCtx) - - unittest.RequireComponentsReadyBefore(u.T(), 100*time.Millisecond, u.senderMW, u.receiverMW) - - u.cancel = cancel -} - -// stopMiddlewares will stop all middlewares. -func (u *UnicastAuthorizationTestSuite) stopMiddlewares() { - u.cancel() - unittest.RequireCloseBefore(u.T(), u.senderMW.Done(), u.channelCloseDuration, "could not stop middleware on time") - unittest.RequireCloseBefore(u.T(), u.receiverMW.Done(), u.channelCloseDuration, "could not stop middleware on time") -} - -// TestUnicastAuthorization_UnstakedPeer tests that messages sent via unicast by an unstaked peer is correctly rejected. -func (u *UnicastAuthorizationTestSuite) TestUnicastAuthorization_UnstakedPeer() { - // setup mock slashing violations consumer and middlewares - slashingViolationsConsumer := mocknetwork.NewViolationsConsumer(u.T()) - u.setupMiddlewaresAndProviders(slashingViolationsConsumer) - - expectedSenderPeerID, err := unittest.PeerIDFromFlowID(u.senderID) - require.NoError(u.T(), err) - - var nilID *flow.Identity - expectedViolation := &slashing.Violation{ - Identity: nilID, // because the peer will be unverified this identity will be nil - PeerID: expectedSenderPeerID.String(), - MsgType: "", // message will not be decoded before OnSenderEjectedError is logged, we won't log message type - Channel: channels.TestNetworkChannel, // message will not be decoded before OnSenderEjectedError is logged, we won't log peer ID - Protocol: message.ProtocolTypeUnicast, - Err: validator.ErrIdentityUnverified, - } - slashingViolationsConsumer.On( - "OnUnAuthorizedSenderError", - expectedViolation, - ).Once().Run(func(args mockery.Arguments) { - close(u.waitCh) - }) - - overlay := mocknetwork.NewOverlay(u.T()) - overlay.On("Identities").Maybe().Return(func() flow.IdentityList { - return u.providers[0].Identities(filter.Any) - }) - overlay.On("Topology").Maybe().Return(func() flow.IdentityList { - return u.providers[0].Identities(filter.Any) - }, nil) - - //NOTE: return (nil, false) simulating unstaked node - overlay.On("Identity", mock.AnythingOfType("peer.ID")).Return(nil, false) - // message will be rejected so assert overlay never receives it - defer overlay.AssertNotCalled(u.T(), "Receive", mockery.Anything) - - u.startMiddlewares(overlay) - - require.NoError(u.T(), u.receiverMW.Subscribe(testChannel)) - require.NoError(u.T(), u.senderMW.Subscribe(testChannel)) - - msg, err := network.NewOutgoingScope( - flow.IdentifierList{u.receiverID.NodeID}, - testChannel, - &libp2pmessage.TestMessage{ - Text: string("hello"), - }, - unittest.NetworkCodec().Encode, - message.ProtocolTypeUnicast) - require.NoError(u.T(), err) - - // send message via unicast - err = u.senderMW.SendDirect(msg) - require.NoError(u.T(), err) - - // wait for slashing violations consumer mock to invoke run func and close ch if expected method call happens - unittest.RequireCloseBefore(u.T(), u.waitCh, u.channelCloseDuration, "could close ch on time") -} - -// TestUnicastAuthorization_EjectedPeer tests that messages sent via unicast by an ejected peer is correctly rejected. -func (u *UnicastAuthorizationTestSuite) TestUnicastAuthorization_EjectedPeer() { - // setup mock slashing violations consumer and middlewares - slashingViolationsConsumer := mocknetwork.NewViolationsConsumer(u.T()) - u.setupMiddlewaresAndProviders(slashingViolationsConsumer) - //NOTE: setup ejected identity - u.senderID.Ejected = true - - expectedSenderPeerID, err := unittest.PeerIDFromFlowID(u.senderID) - require.NoError(u.T(), err) - - expectedViolation := &slashing.Violation{ - Identity: u.senderID, // we expect this method to be called with the ejected identity - PeerID: expectedSenderPeerID.String(), - MsgType: "", // message will not be decoded before OnSenderEjectedError is logged, we won't log message type - Channel: channels.TestNetworkChannel, // message will not be decoded before OnSenderEjectedError is logged, we won't log peer ID - Protocol: message.ProtocolTypeUnicast, - Err: validator.ErrSenderEjected, - } - slashingViolationsConsumer.On( - "OnSenderEjectedError", - expectedViolation, - ).Once().Run(func(args mockery.Arguments) { - close(u.waitCh) - }) - - overlay := mocknetwork.NewOverlay(u.T()) - overlay.On("Identities").Maybe().Return(func() flow.IdentityList { - return u.providers[0].Identities(filter.Any) - }) - overlay.On("Topology").Maybe().Return(func() flow.IdentityList { - return u.providers[0].Identities(filter.Any) - }, nil) - //NOTE: return ejected identity causing validation to fail - overlay.On("Identity", mock.AnythingOfType("peer.ID")).Return(u.senderID, true) - // message will be rejected so assert overlay never receives it - defer overlay.AssertNotCalled(u.T(), "Receive", mockery.Anything) - - u.startMiddlewares(overlay) - - require.NoError(u.T(), u.receiverMW.Subscribe(testChannel)) - require.NoError(u.T(), u.senderMW.Subscribe(testChannel)) - - msg, err := network.NewOutgoingScope( - flow.IdentifierList{u.receiverID.NodeID}, - testChannel, - &libp2pmessage.TestMessage{ - Text: string("hello"), - }, - unittest.NetworkCodec().Encode, - message.ProtocolTypeUnicast) - require.NoError(u.T(), err) - - // send message via unicast - err = u.senderMW.SendDirect(msg) - require.NoError(u.T(), err) - - // wait for slashing violations consumer mock to invoke run func and close ch if expected method call happens - unittest.RequireCloseBefore(u.T(), u.waitCh, u.channelCloseDuration, "could close ch on time") -} - -// TestUnicastAuthorization_UnauthorizedPeer tests that messages sent via unicast by an unauthorized peer is correctly rejected. -func (u *UnicastAuthorizationTestSuite) TestUnicastAuthorization_UnauthorizedPeer() { - // setup mock slashing violations consumer and middlewares - slashingViolationsConsumer := mocknetwork.NewViolationsConsumer(u.T()) - u.setupMiddlewaresAndProviders(slashingViolationsConsumer) - - expectedSenderPeerID, err := unittest.PeerIDFromFlowID(u.senderID) - require.NoError(u.T(), err) - - expectedViolation := &slashing.Violation{ - Identity: u.senderID, - PeerID: expectedSenderPeerID.String(), - MsgType: "*message.TestMessage", - Channel: channels.ConsensusCommittee, - Protocol: message.ProtocolTypeUnicast, - Err: message.ErrUnauthorizedMessageOnChannel, - } - - slashingViolationsConsumer.On( - "OnUnAuthorizedSenderError", - expectedViolation, - ).Once().Run(func(args mockery.Arguments) { - close(u.waitCh) - }) - - overlay := mocknetwork.NewOverlay(u.T()) - overlay.On("Identities").Maybe().Return(func() flow.IdentityList { - return u.providers[0].Identities(filter.Any) - }) - overlay.On("Topology").Maybe().Return(func() flow.IdentityList { - return u.providers[0].Identities(filter.Any) - }, nil) - overlay.On("Identity", mock.AnythingOfType("peer.ID")).Return(u.senderID, true) - // message will be rejected so assert overlay never receives it - defer overlay.AssertNotCalled(u.T(), "Receive", mockery.Anything) - - u.startMiddlewares(overlay) - - channel := channels.ConsensusCommittee - require.NoError(u.T(), u.receiverMW.Subscribe(channel)) - require.NoError(u.T(), u.senderMW.Subscribe(channel)) - - msg, err := network.NewOutgoingScope( - flow.IdentifierList{u.receiverID.NodeID}, - channel, - &libp2pmessage.TestMessage{ - Text: string("hello"), - }, - unittest.NetworkCodec().Encode, - message.ProtocolTypeUnicast) - require.NoError(u.T(), err) - - // send message via unicast - err = u.senderMW.SendDirect(msg) - require.NoError(u.T(), err) - - // wait for slashing violations consumer mock to invoke run func and close ch if expected method call happens - unittest.RequireCloseBefore(u.T(), u.waitCh, u.channelCloseDuration, "could close ch on time") -} - -// TestUnicastAuthorization_UnknownMsgCode tests that messages sent via unicast with an unknown message code is correctly rejected. -func (u *UnicastAuthorizationTestSuite) TestUnicastAuthorization_UnknownMsgCode() { - // setup mock slashing violations consumer and middlewares - slashingViolationsConsumer := mocknetwork.NewViolationsConsumer(u.T()) - u.setupMiddlewaresAndProviders(slashingViolationsConsumer) - - expectedSenderPeerID, err := unittest.PeerIDFromFlowID(u.senderID) - require.NoError(u.T(), err) - - invalidMessageCode := codec.MessageCode(byte('X')) - - var nilID *flow.Identity - expectedViolation := &slashing.Violation{ - Identity: nilID, - PeerID: expectedSenderPeerID.String(), - MsgType: "", - Channel: channels.TestNetworkChannel, - Protocol: message.ProtocolTypeUnicast, - Err: codec.NewUnknownMsgCodeErr(invalidMessageCode), - } - - slashingViolationsConsumer.On( - "OnUnknownMsgTypeError", - expectedViolation, - ).Once().Run(func(args mockery.Arguments) { - close(u.waitCh) - }) - - overlay := mocknetwork.NewOverlay(u.T()) - overlay.On("Identities").Maybe().Return(func() flow.IdentityList { - return u.providers[0].Identities(filter.Any) - }) - overlay.On("Topology").Maybe().Return(func() flow.IdentityList { - return u.providers[0].Identities(filter.Any) - }, nil) - overlay.On("Identity", mock.AnythingOfType("peer.ID")).Return(u.senderID, true) - - // message will be rejected so assert overlay never receives it - defer overlay.AssertNotCalled(u.T(), "Receive", u.senderID.NodeID, mock.AnythingOfType("*message.Message")) - - u.startMiddlewares(overlay) - - require.NoError(u.T(), u.receiverMW.Subscribe(testChannel)) - require.NoError(u.T(), u.senderMW.Subscribe(testChannel)) - - msg, err := network.NewOutgoingScope( - flow.IdentifierList{u.receiverID.NodeID}, - testChannel, - &libp2pmessage.TestMessage{ - Text: "hello", - }, - // we use a custom encoder that encodes the message with an invalid message code. - func(msg interface{}) ([]byte, error) { - e, err := unittest.NetworkCodec().Encode(msg) - require.NoError(u.T(), err) - // manipulate message code byte - e[0] = invalidMessageCode.Uint8() - return e, nil - }, - message.ProtocolTypeUnicast) - require.NoError(u.T(), err) - - // send message via unicast - err = u.senderMW.SendDirect(msg) - require.NoError(u.T(), err) - - // wait for slashing violations consumer mock to invoke run func and close ch if expected method call happens - unittest.RequireCloseBefore(u.T(), u.waitCh, u.channelCloseDuration, "could close ch on time") -} - -// TestUnicastAuthorization_WrongMsgCode tests that messages sent via unicast with a message code that does not match the underlying message type are correctly rejected. -func (u *UnicastAuthorizationTestSuite) TestUnicastAuthorization_WrongMsgCode() { - // setup mock slashing violations consumer and middlewares - slashingViolationsConsumer := mocknetwork.NewViolationsConsumer(u.T()) - u.setupMiddlewaresAndProviders(slashingViolationsConsumer) - - expectedSenderPeerID, err := unittest.PeerIDFromFlowID(u.senderID) - require.NoError(u.T(), err) - - modifiedMessageCode := codec.CodeDKGMessage - - expectedViolation := &slashing.Violation{ - Identity: u.senderID, - PeerID: expectedSenderPeerID.String(), - MsgType: "*messages.DKGMessage", - Channel: channels.TestNetworkChannel, - Protocol: message.ProtocolTypeUnicast, - Err: message.ErrUnauthorizedMessageOnChannel, - } - - slashingViolationsConsumer.On( - "OnUnAuthorizedSenderError", - expectedViolation, - ).Once().Run(func(args mockery.Arguments) { - close(u.waitCh) - }) - - overlay := mocknetwork.NewOverlay(u.T()) - overlay.On("Identities").Maybe().Return(func() flow.IdentityList { - return u.providers[0].Identities(filter.Any) - }) - overlay.On("Topology").Maybe().Return(func() flow.IdentityList { - return u.providers[0].Identities(filter.Any) - }, nil) - overlay.On("Identity", expectedSenderPeerID).Return(u.senderID, true) - - // message will be rejected so assert overlay never receives it - defer overlay.AssertNotCalled(u.T(), "Receive", u.senderID.NodeID, mock.AnythingOfType("*message.Message")) - - u.startMiddlewares(overlay) - - require.NoError(u.T(), u.receiverMW.Subscribe(testChannel)) - require.NoError(u.T(), u.senderMW.Subscribe(testChannel)) - - msg, err := network.NewOutgoingScope( - flow.IdentifierList{u.receiverID.NodeID}, - testChannel, - &libp2pmessage.TestMessage{ - Text: "hello", - }, - // we use a custom encoder that encodes the message with an invalid message code. - func(msg interface{}) ([]byte, error) { - e, err := unittest.NetworkCodec().Encode(msg) - require.NoError(u.T(), err) - // manipulate message code byte - e[0] = modifiedMessageCode.Uint8() - return e, nil - }, - message.ProtocolTypeUnicast) - require.NoError(u.T(), err) - - // send message via unicast - err = u.senderMW.SendDirect(msg) - require.NoError(u.T(), err) - - // wait for slashing violations consumer mock to invoke run func and close ch if expected method call happens - unittest.RequireCloseBefore(u.T(), u.waitCh, u.channelCloseDuration, "could close ch on time") -} - -// TestUnicastAuthorization_PublicChannel tests that messages sent via unicast on a public channel are not rejected for any reason. -func (u *UnicastAuthorizationTestSuite) TestUnicastAuthorization_PublicChannel() { - // setup mock slashing violations consumer and middlewares - slashingViolationsConsumer := mocknetwork.NewViolationsConsumer(u.T()) - u.setupMiddlewaresAndProviders(slashingViolationsConsumer) - - expectedPayload := "hello" - msg, err := network.NewOutgoingScope( - flow.IdentifierList{u.receiverID.NodeID}, - testChannel, - &libp2pmessage.TestMessage{ - Text: expectedPayload, - }, - unittest.NetworkCodec().Encode, - message.ProtocolTypeUnicast) - require.NoError(u.T(), err) - - overlay := mocknetwork.NewOverlay(u.T()) - overlay.On("Identities").Maybe().Return(func() flow.IdentityList { - return u.providers[0].Identities(filter.Any) - }) - overlay.On("Topology").Maybe().Return(func() flow.IdentityList { - return u.providers[0].Identities(filter.Any) - }, nil) - overlay.On("Identity", mock.AnythingOfType("peer.ID")).Return(u.senderID, true) - - // we should receive the message on our overlay, at this point close the waitCh - overlay.On("Receive", mockery.Anything).Return(nil). - Once(). - Run(func(args mockery.Arguments) { - close(u.waitCh) - - msg, ok := args[0].(*network.IncomingMessageScope) - require.True(u.T(), ok) - - require.Equal(u.T(), testChannel, msg.Channel()) // channel - require.Equal(u.T(), u.senderID.NodeID, msg.OriginId()) // sender id - require.Equal(u.T(), u.receiverID.NodeID, msg.TargetIDs()[0]) // target id - require.Equal(u.T(), message.ProtocolTypeUnicast, msg.Protocol()) // protocol - require.Equal(u.T(), expectedPayload, msg.DecodedPayload().(*libp2pmessage.TestMessage).Text) // payload - }) - - u.startMiddlewares(overlay) - - require.NoError(u.T(), u.receiverMW.Subscribe(testChannel)) - require.NoError(u.T(), u.senderMW.Subscribe(testChannel)) - - // send message via unicast - err = u.senderMW.SendDirect(msg) - require.NoError(u.T(), err) - - // wait for slashing violations consumer mock to invoke run func and close ch if expected method call happens - unittest.RequireCloseBefore(u.T(), u.waitCh, u.channelCloseDuration, "could close ch on time") -} - -// TestUnicastAuthorization_UnauthorizedUnicastOnChannel tests that messages sent via unicast that are not authorized for unicast are rejected. -func (u *UnicastAuthorizationTestSuite) TestUnicastAuthorization_UnauthorizedUnicastOnChannel() { - // setup mock slashing violations consumer and middlewares - slashingViolationsConsumer := mocknetwork.NewViolationsConsumer(u.T()) - u.setupMiddlewaresAndProviders(slashingViolationsConsumer) - - // set sender id role to RoleConsensus to avoid unauthorized sender validation error - u.senderID.Role = flow.RoleConsensus - - expectedSenderPeerID, err := unittest.PeerIDFromFlowID(u.senderID) - require.NoError(u.T(), err) - - expectedViolation := &slashing.Violation{ - Identity: u.senderID, - PeerID: expectedSenderPeerID.String(), - MsgType: "*messages.BlockProposal", - Channel: channels.ConsensusCommittee, - Protocol: message.ProtocolTypeUnicast, - Err: message.ErrUnauthorizedUnicastOnChannel, - } - - slashingViolationsConsumer.On( - "OnUnauthorizedUnicastOnChannel", - expectedViolation, - ).Once().Run(func(args mockery.Arguments) { - close(u.waitCh) - }) - - overlay := mocknetwork.NewOverlay(u.T()) - overlay.On("Identities").Maybe().Return(func() flow.IdentityList { - return u.providers[0].Identities(filter.Any) - }) - overlay.On("Topology").Maybe().Return(func() flow.IdentityList { - return u.providers[0].Identities(filter.Any) - }, nil) - overlay.On("Identity", expectedSenderPeerID).Return(u.senderID, true) - - // message will be rejected so assert overlay never receives it - defer overlay.AssertNotCalled(u.T(), "Receive", u.senderID.NodeID, mock.AnythingOfType("*message.Message")) - - u.startMiddlewares(overlay) - - channel := channels.ConsensusCommittee - require.NoError(u.T(), u.receiverMW.Subscribe(channel)) - require.NoError(u.T(), u.senderMW.Subscribe(channel)) - - // messages.BlockProposal is not authorized to be sent via unicast over the ConsensusCommittee channel - payload := unittest.ProposalFixture() - - msg, err := network.NewOutgoingScope( - flow.IdentifierList{u.receiverID.NodeID}, - channel, - payload, - unittest.NetworkCodec().Encode, - message.ProtocolTypeUnicast) - require.NoError(u.T(), err) - - // send message via unicast - err = u.senderMW.SendDirect(msg) - require.NoError(u.T(), err) - - // wait for slashing violations consumer mock to invoke run func and close ch if expected method call happens - unittest.RequireCloseBefore(u.T(), u.waitCh, u.channelCloseDuration, "could close ch on time") -} - -// TestUnicastAuthorization_ReceiverHasNoSubscription tests that messages sent via unicast are rejected on the receiver end if the receiver does not have a subscription -// to the channel of the message. -func (u *UnicastAuthorizationTestSuite) TestUnicastAuthorization_ReceiverHasNoSubscription() { - // setup mock slashing violations consumer and middlewares - slashingViolationsConsumer := mocknetwork.NewViolationsConsumer(u.T()) - u.setupMiddlewaresAndProviders(slashingViolationsConsumer) - - expectedSenderPeerID, err := unittest.PeerIDFromFlowID(u.senderID) - require.NoError(u.T(), err) - - expectedViolation := &slashing.Violation{ - Identity: nil, - PeerID: expectedSenderPeerID.String(), - MsgType: "*message.TestMessage", - Channel: channels.TestNetworkChannel, - Protocol: message.ProtocolTypeUnicast, - Err: middleware.ErrUnicastMsgWithoutSub, - } - - slashingViolationsConsumer.On( - "OnUnauthorizedUnicastOnChannel", - expectedViolation, - ).Once().Run(func(args mockery.Arguments) { - close(u.waitCh) - }) - - overlay := mocknetwork.NewOverlay(u.T()) - overlay.On("Identities").Maybe().Return(func() flow.IdentityList { - return u.providers[0].Identities(filter.Any) - }) - overlay.On("Topology").Maybe().Return(func() flow.IdentityList { - return u.providers[0].Identities(filter.Any) - }, nil) - - // message will be rejected so assert overlay never receives it - defer overlay.AssertNotCalled(u.T(), "Receive", u.senderID.NodeID, mock.AnythingOfType("*message.Message")) - - u.startMiddlewares(overlay) - - channel := channels.TestNetworkChannel - - msg, err := network.NewOutgoingScope( - flow.IdentifierList{u.receiverID.NodeID}, - channel, - &libp2pmessage.TestMessage{ - Text: "TestUnicastAuthorization_ReceiverHasNoSubscription", - }, - unittest.NetworkCodec().Encode, - message.ProtocolTypeUnicast) - require.NoError(u.T(), err) - - // send message via unicast - err = u.senderMW.SendDirect(msg) - require.NoError(u.T(), err) - - // wait for slashing violations consumer mock to invoke run func and close ch if expected method call happens - unittest.RequireCloseBefore(u.T(), u.waitCh, u.channelCloseDuration, "could close ch on time") -} - -// TestUnicastAuthorization_ReceiverHasSubscription tests that messages sent via unicast are processed on the receiver end if the receiver does have a subscription -// to the channel of the message. -func (u *UnicastAuthorizationTestSuite) TestUnicastAuthorization_ReceiverHasSubscription() { - // setup mock slashing violations consumer and middlewares - slashingViolationsConsumer := mocknetwork.NewViolationsConsumer(u.T()) - u.setupMiddlewaresAndProviders(slashingViolationsConsumer) - channel := channels.RequestReceiptsByBlockID - - msg, err := network.NewOutgoingScope( - flow.IdentifierList{u.receiverID.NodeID}, - channel, - &messages.EntityRequest{}, - unittest.NetworkCodec().Encode, - message.ProtocolTypeUnicast) - require.NoError(u.T(), err) - - u.senderID.Role = flow.RoleConsensus - u.receiverID.Role = flow.RoleExecution - - overlay := mocknetwork.NewOverlay(u.T()) - overlay.On("Identities").Maybe().Return(func() flow.IdentityList { - return u.providers[0].Identities(filter.Any) - }) - overlay.On("Topology").Maybe().Return(func() flow.IdentityList { - return u.providers[0].Identities(filter.Any) - }, nil) - overlay.On("Identity", mock.AnythingOfType("peer.ID")).Return(u.senderID, true) - - // we should receive the message on our overlay, at this point close the waitCh - overlay.On("Receive", mockery.Anything).Return(nil). - Once(). - Run(func(args mockery.Arguments) { - close(u.waitCh) - - msg, ok := args[0].(*network.IncomingMessageScope) - require.True(u.T(), ok) - - require.Equal(u.T(), channel, msg.Channel()) // channel - require.Equal(u.T(), u.senderID.NodeID, msg.OriginId()) // sender id - require.Equal(u.T(), u.receiverID.NodeID, msg.TargetIDs()[0]) // target id - require.Equal(u.T(), message.ProtocolTypeUnicast, msg.Protocol()) // protocol - }) - - u.startMiddlewares(overlay) - - require.NoError(u.T(), u.receiverMW.Subscribe(channel)) - require.NoError(u.T(), u.senderMW.Subscribe(channel)) - - // send message via unicast - err = u.senderMW.SendDirect(msg) - require.NoError(u.T(), err) - - // wait for slashing violations consumer mock to invoke run func and close ch if expected method call happens - unittest.RequireCloseBefore(u.T(), u.waitCh, u.channelCloseDuration, "could close ch on time") -} diff --git a/network/p2p/middleware/readSubscription.go b/network/underlay/internal/readSubscription.go similarity index 79% rename from network/p2p/middleware/readSubscription.go rename to network/underlay/internal/readSubscription.go index 442fb152453..3e0a63e6dd3 100644 --- a/network/p2p/middleware/readSubscription.go +++ b/network/underlay/internal/readSubscription.go @@ -1,4 +1,4 @@ -package middleware +package internal import ( "context" @@ -19,17 +19,17 @@ import ( // ReadSubscriptionCallBackFunction the callback called when a new message is received on the read subscription type ReadSubscriptionCallBackFunction func(msg *message.Message, peerID peer.ID) -// readSubscription reads the messages coming in on the subscription and calls the given callback until +// ReadSubscription reads the messages coming in on the subscription and calls the given callback until // the context of the subscription is cancelled. -type readSubscription struct { +type ReadSubscription struct { log zerolog.Logger sub p2p.Subscription callback ReadSubscriptionCallBackFunction } -// newReadSubscription reads the messages coming in on the subscription -func newReadSubscription(sub p2p.Subscription, callback ReadSubscriptionCallBackFunction, log zerolog.Logger) *readSubscription { - r := readSubscription{ +// NewReadSubscription reads the messages coming in on the subscription +func NewReadSubscription(sub p2p.Subscription, callback ReadSubscriptionCallBackFunction, log zerolog.Logger) *ReadSubscription { + r := ReadSubscription{ log: log.With().Str("channel", sub.Topic()).Logger(), sub: sub, callback: callback, @@ -38,9 +38,9 @@ func newReadSubscription(sub p2p.Subscription, callback ReadSubscriptionCallBack return &r } -// receiveLoop must be run in a goroutine. It continuously receives +// ReceiveLoop must be run in a goroutine. It continuously receives // messages for the topic and calls the callback synchronously -func (r *readSubscription) receiveLoop(ctx context.Context) { +func (r *ReadSubscription) ReceiveLoop(ctx context.Context) { defer r.log.Debug().Msg("exiting receive routine") for { @@ -48,7 +48,7 @@ func (r *readSubscription) receiveLoop(ctx context.Context) { rawMsg, err := r.sub.Next(ctx) if err != nil { - // middleware may have cancelled the context + // network may have cancelled the context if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) { return } diff --git a/network/underlay/network.go b/network/underlay/network.go new file mode 100644 index 00000000000..ba4bfd82331 --- /dev/null +++ b/network/underlay/network.go @@ -0,0 +1,1280 @@ +package underlay + +import ( + "bufio" + "context" + "errors" + "fmt" + "io" + "sync" + "time" + + ggio "github.com/gogo/protobuf/io" + "github.com/ipfs/go-datastore" + libp2pnet "github.com/libp2p/go-libp2p/core/network" + "github.com/libp2p/go-libp2p/core/peer" + "github.com/libp2p/go-libp2p/core/peerstore" + "github.com/libp2p/go-libp2p/core/protocol" + "github.com/rs/zerolog" + + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/model/flow/filter" + "github.com/onflow/flow-go/module" + "github.com/onflow/flow-go/module/component" + "github.com/onflow/flow-go/module/irrecoverable" + "github.com/onflow/flow-go/network" + alspmgr "github.com/onflow/flow-go/network/alsp/manager" + netcache "github.com/onflow/flow-go/network/cache" + "github.com/onflow/flow-go/network/channels" + "github.com/onflow/flow-go/network/codec" + "github.com/onflow/flow-go/network/internal/p2putils" + "github.com/onflow/flow-go/network/message" + "github.com/onflow/flow-go/network/p2p" + "github.com/onflow/flow-go/network/p2p/blob" + p2plogging "github.com/onflow/flow-go/network/p2p/logging" + "github.com/onflow/flow-go/network/p2p/ping" + "github.com/onflow/flow-go/network/p2p/subscription" + "github.com/onflow/flow-go/network/p2p/unicast/protocols" + "github.com/onflow/flow-go/network/p2p/unicast/ratelimit" + "github.com/onflow/flow-go/network/p2p/utils" + "github.com/onflow/flow-go/network/queue" + "github.com/onflow/flow-go/network/underlay/internal" + "github.com/onflow/flow-go/network/validator" + flowpubsub "github.com/onflow/flow-go/network/validator/pubsub" + _ "github.com/onflow/flow-go/utils/binstat" + "github.com/onflow/flow-go/utils/logging" +) + +const ( + _ = iota + _ = 1 << (10 * iota) + mb + gb +) + +const ( + // DefaultMaxUnicastMsgSize defines maximum message size in unicast mode for most messages + DefaultMaxUnicastMsgSize = 10 * mb // 10 mb + + // LargeMsgMaxUnicastMsgSize defines maximum message size in unicast mode for large messages + LargeMsgMaxUnicastMsgSize = gb // 1 gb + + // DefaultUnicastTimeout is the default maximum time to wait for a default unicast request to complete + // assuming at least a 1mb/sec connection + DefaultUnicastTimeout = 5 * time.Second + + // LargeMsgUnicastTimeout is the maximum time to wait for a unicast request to complete for large message size + LargeMsgUnicastTimeout = 1000 * time.Second +) + +var ( + // ErrUnicastMsgWithoutSub error is provided to the slashing violations consumer in the case where + // the network receives a message via unicast but does not have a corresponding subscription for + // the channel in that message. + ErrUnicastMsgWithoutSub = errors.New("networking layer does not have subscription for the channel ID indicated in the unicast message received") +) + +// Network serves as the comprehensive networking layer that integrates three interfaces within Flow; Underlay, EngineRegistry, and ConduitAdapter. +// It is responsible for creating conduits through which engines can send and receive messages to and from other engines on the network, as well as registering other services +// such as BlobService and PingService. It also provides a set of APIs that can be used to send messages to other nodes on the network. +// Network is also responsible for managing the topology of the network, i.e., the set of nodes that are connected to each other. +// It is also responsible for managing the set of nodes that are connected to each other. +type Network struct { + // TODO: using a waitgroup here doesn't actually guarantee that we'll wait for all + // goroutines to exit, because new goroutines could be started after we've already + // returned from wg.Wait(). We need to solve this the right way using ComponentManager + // and worker routines. + wg sync.WaitGroup + *component.ComponentManager + ctx context.Context + sporkId flow.Identifier + identityProvider module.IdentityProvider + identityTranslator p2p.IDTranslator + logger zerolog.Logger + codec network.Codec + me module.Local + metrics module.NetworkCoreMetrics + receiveCache *netcache.ReceiveCache // used to deduplicate incoming messages + queue network.MessageQueue + subscriptionManager network.SubscriptionManager // used to keep track of subscribed channels + conduitFactory network.ConduitFactory + topology network.Topology + registerEngineRequests chan *registerEngineRequest + registerBlobServiceRequests chan *registerBlobServiceRequest + misbehaviorReportManager network.MisbehaviorReportManager + unicastMessageTimeout time.Duration + libP2PNode p2p.LibP2PNode + bitswapMetrics module.BitswapMetrics + peerUpdateLock sync.Mutex // protects the peer update process + previousProtocolStatePeers []peer.AddrInfo + slashingViolationsConsumer network.ViolationsConsumer + peerManagerFilters []p2p.PeerFilter + unicastRateLimiters *ratelimit.RateLimiters + validators []network.MessageValidator + authorizedSenderValidator *validator.AuthorizedSenderValidator + preferredUnicasts []protocols.ProtocolName +} + +var _ network.EngineRegistry = &Network{} +var _ network.Underlay = &Network{} +var _ network.ConduitAdapter = &Network{} + +type registerEngineRequest struct { + channel channels.Channel + messageProcessor network.MessageProcessor + respChan chan *registerEngineResp +} + +type registerEngineResp struct { + conduit network.Conduit + err error +} + +type registerBlobServiceRequest struct { + channel channels.Channel + ds datastore.Batching + opts []network.BlobServiceOption + respChan chan *registerBlobServiceResp +} + +type registerBlobServiceResp struct { + blobService network.BlobService + err error +} + +var ErrNetworkShutdown = errors.New("network has already shutdown") + +// NetworkConfig is a configuration struct for the network. It contains all the +// necessary components to create a new network. +type NetworkConfig struct { + Logger zerolog.Logger + Codec network.Codec + Me module.Local + Topology network.Topology + Metrics module.NetworkCoreMetrics + IdentityProvider module.IdentityProvider + IdentityTranslator p2p.IDTranslator + ReceiveCache *netcache.ReceiveCache + ConduitFactory network.ConduitFactory + AlspCfg *alspmgr.MisbehaviorReportManagerConfig + SporkId flow.Identifier + UnicastMessageTimeout time.Duration + Libp2pNode p2p.LibP2PNode + BitSwapMetrics module.BitswapMetrics + SlashingViolationConsumerFactory func(network.ConduitAdapter) network.ViolationsConsumer +} + +// Validate validates the configuration, and sets default values for any missing fields. +func (cfg *NetworkConfig) Validate() { + if cfg.UnicastMessageTimeout <= 0 { + cfg.UnicastMessageTimeout = DefaultUnicastTimeout + } +} + +// NetworkConfigOption is a function that can be used to override network config parmeters. +type NetworkConfigOption func(*NetworkConfig) + +// WithAlspConfig overrides the default misbehavior report manager config. It is mostly used for testing purposes. +// Note: do not override the default misbehavior report manager config in production unless you know what you are doing. +// Args: +// cfg: misbehavior report manager config +// Returns: +// NetworkConfigOption: network param option +func WithAlspConfig(cfg *alspmgr.MisbehaviorReportManagerConfig) NetworkConfigOption { + return func(params *NetworkConfig) { + params.AlspCfg = cfg + } +} + +// WithCodec overrides the default codec (i.e., encoder and decoder). It is mostly used for testing purposes. +// Note: do not override the default codec in production unless you know what you are doing. +func WithCodec(codec network.Codec) NetworkConfigOption { + return func(params *NetworkConfig) { + params.Codec = codec + } +} + +func WithSlashingViolationConsumerFactory(factory func(adapter network.ConduitAdapter) network.ViolationsConsumer) NetworkConfigOption { + return func(params *NetworkConfig) { + params.SlashingViolationConsumerFactory = factory + } +} + +// NetworkOption is a function that can be used to override network attributes. +// It is mostly used for testing purposes. +// Note: do not override network attributes in production unless you know what you are doing. +type NetworkOption func(*Network) + +// WithAlspManager sets the misbehavior report manager for the network. It overrides the default +// misbehavior report manager that is created from the config. +// Note that this option is mostly used for testing purposes, do not use it in production unless you +// know what you are doing. +// +// Args: +// +// mgr: misbehavior report manager +// +// Returns: +// +// NetworkOption: network option +func WithAlspManager(mgr network.MisbehaviorReportManager) NetworkOption { + return func(n *Network) { + n.misbehaviorReportManager = mgr + } +} + +// WithPeerManagerFilters sets the peer manager filters for the network. It overrides the default +// peer manager filters that are created from the config. +func WithPeerManagerFilters(filters ...p2p.PeerFilter) NetworkOption { + return func(n *Network) { + n.peerManagerFilters = filters + } +} + +// WithUnicastRateLimiters sets the unicast rate limiters for the network. It overrides the default +// unicast rate limiters that are created from the config. +func WithUnicastRateLimiters(limiters *ratelimit.RateLimiters) NetworkOption { + return func(n *Network) { + n.unicastRateLimiters = limiters + } +} + +// WithPreferredUnicastProtocols sets the preferred unicast protocols for the network. It overrides the default +// preferred unicast. +func WithPreferredUnicastProtocols(protocols ...protocols.ProtocolName) NetworkOption { + return func(n *Network) { + n.preferredUnicasts = protocols + } +} + +// WithMessageValidators sets the message validators for the network. It overrides the default +// message validators. +func WithMessageValidators(validators ...network.MessageValidator) NetworkOption { + return func(n *Network) { + n.validators = validators + } +} + +// NewNetwork creates a new network with the given configuration. +// Args: +// param: network configuration +// opts: network options +// Returns: +// Network: a new network +func NewNetwork(param *NetworkConfig, opts ...NetworkOption) (*Network, error) { + param.Validate() + + n := &Network{ + logger: param.Logger.With().Str("component", "network").Logger(), + codec: param.Codec, + me: param.Me, + receiveCache: param.ReceiveCache, + topology: param.Topology, + metrics: param.Metrics, + bitswapMetrics: param.BitSwapMetrics, + identityProvider: param.IdentityProvider, + conduitFactory: param.ConduitFactory, + registerEngineRequests: make(chan *registerEngineRequest), + registerBlobServiceRequests: make(chan *registerBlobServiceRequest), + sporkId: param.SporkId, + identityTranslator: param.IdentityTranslator, + unicastMessageTimeout: param.UnicastMessageTimeout, + libP2PNode: param.Libp2pNode, + unicastRateLimiters: ratelimit.NoopRateLimiters(), + validators: DefaultValidators(param.Logger.With().Str("component", "network-validators").Logger(), param.Me.NodeID()), + } + + n.subscriptionManager = subscription.NewChannelSubscriptionManager(n) + + misbehaviorMngr, err := alspmgr.NewMisbehaviorReportManager(param.AlspCfg, n) + if err != nil { + return nil, fmt.Errorf("could not create misbehavior report manager: %w", err) + } + n.misbehaviorReportManager = misbehaviorMngr + + for _, opt := range opts { + opt(n) + } + + if err := n.conduitFactory.RegisterAdapter(n); err != nil { + return nil, fmt.Errorf("could not register network adapter: %w", err) + } + + builder := component.NewComponentManagerBuilder() + builder.AddWorker(func(ctx irrecoverable.SignalerContext, ready component.ReadyFunc) { + n.logger.Debug().Msg("starting misbehavior manager") + n.misbehaviorReportManager.Start(ctx) + + select { + case <-n.misbehaviorReportManager.Ready(): + n.logger.Debug().Msg("misbehavior manager is ready") + ready() + case <-ctx.Done(): + // jumps to the end of the select statement to let a graceful shutdown. + } + + <-ctx.Done() + n.logger.Debug().Msg("stopping misbehavior manager") + <-n.misbehaviorReportManager.Done() + n.logger.Debug().Msg("misbehavior manager stopped") + }) + builder.AddWorker(func(ctx irrecoverable.SignalerContext, ready component.ReadyFunc) { + n.logger.Debug().Msg("setting up network context") + n.ctx = ctx + + ready() + + <-ctx.Done() + n.logger.Debug().Msg("network context is done") + }) + + for _, limiter := range n.unicastRateLimiters.Limiters() { + rateLimiter := limiter + builder.AddWorker(func(ctx irrecoverable.SignalerContext, ready component.ReadyFunc) { + ready() + rateLimiter.Start(ctx) + <-rateLimiter.Ready() + ready() + <-rateLimiter.Done() + }) + } + + builder.AddWorker(func(ctx irrecoverable.SignalerContext, ready component.ReadyFunc) { + // creation of slashing violations consumer should be postponed till here where the network + // is start and the overlay is set. + n.slashingViolationsConsumer = param.SlashingViolationConsumerFactory(n) + + n.authorizedSenderValidator = validator.NewAuthorizedSenderValidator( + n.logger, + n.slashingViolationsConsumer, + n.Identity) + + err := n.libP2PNode.WithDefaultUnicastProtocol(n.handleIncomingStream, n.preferredUnicasts) + if err != nil { + ctx.Throw(fmt.Errorf("could not register preferred unicast protocols on libp2p node: %w", err)) + } + + n.UpdateNodeAddresses() + n.libP2PNode.WithPeersProvider(n.authorizedPeers) + + ready() + + <-ctx.Done() + n.logger.Info().Str("component", "network").Msg("stopping subroutines, blocking on read connection loops to end") + + // wait for the readConnection and readSubscription routines to stop + n.wg.Wait() + n.logger.Info().Str("component", "network").Msg("stopped subroutines") + }) + + builder.AddWorker(n.createInboundMessageQueue) + builder.AddWorker(n.processRegisterEngineRequests) + builder.AddWorker(n.processRegisterBlobServiceRequests) + + n.ComponentManager = builder.Build() + return n, nil +} + +func (n *Network) processRegisterEngineRequests(parent irrecoverable.SignalerContext, ready component.ReadyFunc) { + ready() + + // we need to wait for the libp2p node to be ready before we can register engines + n.logger.Debug().Msg("waiting for libp2p node to be ready") + <-n.libP2PNode.Ready() + n.logger.Debug().Msg("libp2p node is ready") + + for { + select { + case req := <-n.registerEngineRequests: + conduit, err := n.handleRegisterEngineRequest(parent, req.channel, req.messageProcessor) + resp := ®isterEngineResp{ + conduit: conduit, + err: err, + } + + select { + case <-parent.Done(): + return + case req.respChan <- resp: + } + case <-parent.Done(): + return + } + } +} + +func (n *Network) processRegisterBlobServiceRequests(parent irrecoverable.SignalerContext, ready component.ReadyFunc) { + ready() + + n.logger.Debug().Msg("waiting for libp2p node to be ready") + <-n.libP2PNode.Ready() + n.logger.Debug().Msg("libp2p node is ready") + + for { + select { + case req := <-n.registerBlobServiceRequests: + blobService, err := n.handleRegisterBlobServiceRequest(parent, req.channel, req.ds, req.opts) + resp := ®isterBlobServiceResp{ + blobService: blobService, + err: err, + } + + select { + case <-parent.Done(): + return + case req.respChan <- resp: + } + case <-parent.Done(): + return + } + } +} + +// createInboundMessageQueue creates the queue that will be used to process incoming messages. +func (n *Network) createInboundMessageQueue(ctx irrecoverable.SignalerContext, ready component.ReadyFunc) { + n.queue = queue.NewMessageQueue(ctx, queue.GetEventPriority, n.metrics) + queue.CreateQueueWorkers(ctx, queue.DefaultNumWorkers, n.queue, n.queueSubmitFunc) + + ready() +} + +func (n *Network) handleRegisterEngineRequest(parent irrecoverable.SignalerContext, channel channels.Channel, engine network.MessageProcessor) (network.Conduit, error) { + if !channels.ChannelExists(channel) { + return nil, fmt.Errorf("unknown channel: %s, should be registered in topic map", channel) + } + + err := n.subscriptionManager.Register(channel, engine) + if err != nil { + return nil, fmt.Errorf("failed to register engine for channel %s: %w", channel, err) + } + + n.logger.Info(). + Str("channel_id", channel.String()). + Msg("channel successfully registered") + + // create the conduit + newConduit, err := n.conduitFactory.NewConduit(parent, channel) + if err != nil { + return nil, fmt.Errorf("could not create conduit using factory: %w", err) + } + + return newConduit, nil +} + +func (n *Network) handleRegisterBlobServiceRequest( + parent irrecoverable.SignalerContext, + channel channels.Channel, + ds datastore.Batching, + opts []network.BlobServiceOption, +) (network.BlobService, error) { + bs, err := blob.NewBlobService(n.libP2PNode.Host(), n.libP2PNode.Routing(), channel.String(), ds, n.bitswapMetrics, n.logger, opts...) + if err != nil { + return nil, fmt.Errorf("could not create blob service: %w", err) + } + + // start the blob service using the network's context + bs.Start(parent) + + return bs, nil +} + +// Register will register the given engine with the given unique engine engineID, +// returning a conduit to directly submit messages to the message bus of the +// engine. +func (n *Network) Register(channel channels.Channel, messageProcessor network.MessageProcessor) (network.Conduit, error) { + respChan := make(chan *registerEngineResp) + + select { + case <-n.ComponentManager.ShutdownSignal(): + return nil, ErrNetworkShutdown + case n.registerEngineRequests <- ®isterEngineRequest{ + channel: channel, + messageProcessor: messageProcessor, + respChan: respChan, + }: + select { + case <-n.ComponentManager.ShutdownSignal(): + return nil, ErrNetworkShutdown + case resp := <-respChan: + return resp.conduit, resp.err + } + } +} + +func (n *Network) RegisterPingService(pingProtocol protocol.ID, provider network.PingInfoProvider) (network.PingService, error) { + select { + case <-n.ComponentManager.ShutdownSignal(): + return nil, ErrNetworkShutdown + default: + return ping.NewPingService(n.libP2PNode.Host(), pingProtocol, n.logger, provider), nil + } +} + +// RegisterBlobService registers a BlobService on the given channel. +// The returned BlobService can be used to request blobs from the network. +func (n *Network) RegisterBlobService(channel channels.Channel, ds datastore.Batching, opts ...network.BlobServiceOption) (network.BlobService, error) { + respChan := make(chan *registerBlobServiceResp) + + select { + case <-n.ComponentManager.ShutdownSignal(): + return nil, ErrNetworkShutdown + case n.registerBlobServiceRequests <- ®isterBlobServiceRequest{ + channel: channel, + ds: ds, + opts: opts, + respChan: respChan, + }: + select { + case <-n.ComponentManager.ShutdownSignal(): + return nil, ErrNetworkShutdown + case resp := <-respChan: + return resp.blobService, resp.err + } + } +} + +// UnRegisterChannel unregisters the engine for the specified channel. The engine will no longer be able to send or +// receive messages from that channel. +func (n *Network) UnRegisterChannel(channel channels.Channel) error { + err := n.subscriptionManager.Unregister(channel) + if err != nil { + return fmt.Errorf("failed to unregister engine for channel %s: %w", channel, err) + } + return nil +} + +func (n *Network) Identities() flow.IdentityList { + return n.identityProvider.Identities(filter.NotEjectedFilter) +} + +func (n *Network) Identity(pid peer.ID) (*flow.Identity, bool) { + return n.identityProvider.ByPeerID(pid) +} + +func (n *Network) Receive(msg network.IncomingMessageScope) error { + n.metrics.InboundMessageReceived(msg.Size(), msg.Channel().String(), msg.Protocol().String(), msg.PayloadType()) + + err := n.processNetworkMessage(msg) + if err != nil { + return fmt.Errorf("could not process message: %w", err) + } + return nil +} + +func (n *Network) processNetworkMessage(msg network.IncomingMessageScope) error { + // checks the cache for deduplication and adds the message if not already present + if !n.receiveCache.Add(msg.EventID()) { + // drops duplicate message + n.logger.Debug(). + Hex("sender_id", logging.ID(msg.OriginId())). + Hex("event_id", msg.EventID()). + Str("channel", msg.Channel().String()). + Msg("dropping message due to duplication") + + n.metrics.DuplicateInboundMessagesDropped(msg.Channel().String(), msg.Protocol().String(), msg.PayloadType()) + + return nil + } + + // create queue message + qm := queue.QMessage{ + Payload: msg.DecodedPayload(), + Size: msg.Size(), + Target: msg.Channel(), + SenderID: msg.OriginId(), + } + + // insert the message in the queue + err := n.queue.Insert(qm) + if err != nil { + return fmt.Errorf("failed to insert message in queue: %w", err) + } + + return nil +} + +// UnicastOnChannel sends the message in a reliable way to the given recipient. +// It uses 1-1 direct messaging over the underlying network to deliver the message. +// It returns an error if unicasting fails. +func (n *Network) UnicastOnChannel(channel channels.Channel, payload interface{}, targetID flow.Identifier) error { + if targetID == n.me.NodeID() { + n.logger.Debug().Msg("network skips self unicasting") + return nil + } + + msg, err := message.NewOutgoingScope( + flow.IdentifierList{targetID}, + channels.TopicFromChannel(channel, n.sporkId), + payload, + n.codec.Encode, + message.ProtocolTypeUnicast) + if err != nil { + return fmt.Errorf("could not generate outgoing message scope for unicast: %w", err) + } + + n.metrics.UnicastMessageSendingStarted(channel.String()) + defer n.metrics.UnicastMessageSendingCompleted(channel.String()) + + // since it is a unicast, we only need to get the first peer ID. + peerID, err := n.identityTranslator.GetPeerID(msg.TargetIds()[0]) + if err != nil { + return fmt.Errorf("could not find peer id for target id: %w", err) + } + + maxMsgSize := unicastMaxMsgSize(msg.PayloadType()) + if msg.Size() > maxMsgSize { + // message size goes beyond maximum size that the serializer can handle. + // proceeding with this message results in closing the connection by the target side, and + // delivery failure. + return fmt.Errorf("message size %d exceeds configured max message size %d", msg.Size(), maxMsgSize) + } + + maxTimeout := n.unicastMaxMsgDuration(msg.PayloadType()) + + // pass in a context with timeout to make the unicast call fail fast + ctx, cancel := context.WithTimeout(n.ctx, maxTimeout) + defer cancel() + + // protect the underlying connection from being inadvertently pruned by the peer manager while the stream and + // connection creation is being attempted, and remove it from protected list once stream created. + channel, ok := channels.ChannelFromTopic(msg.Topic()) + if !ok { + return fmt.Errorf("could not find channel for topic %s", msg.Topic()) + } + streamProtectionTag := fmt.Sprintf("%v:%v", channel, msg.PayloadType()) + + err = n.libP2PNode.OpenAndWriteOnStream(ctx, peerID, streamProtectionTag, func(stream libp2pnet.Stream) error { + bufw := bufio.NewWriter(stream) + writer := ggio.NewDelimitedWriter(bufw) + + err = writer.WriteMsg(msg.Proto()) + if err != nil { + return fmt.Errorf("failed to send message to target id %x with peer id %s: %w", msg.TargetIds()[0], peerID, err) + } + + // flush the stream + err = bufw.Flush() + if err != nil { + return fmt.Errorf("failed to flush stream for target id %x with peer id %s: %w", msg.TargetIds()[0], peerID, err) + } + + return nil + }) + if err != nil { + return fmt.Errorf("failed to send message to %x: %w", targetID, err) + } + + n.metrics.OutboundMessageSent(msg.Size(), channel.String(), message.ProtocolTypeUnicast.String(), msg.PayloadType()) + return nil +} + +// PublishOnChannel sends the message in an unreliable way to the given recipients. +// In this context, unreliable means that the message is published over a libp2p pub-sub +// channel and can be read by any node subscribed to that channel. +// The selector could be used to optimize or restrict delivery. +func (n *Network) PublishOnChannel(channel channels.Channel, message interface{}, targetIDs ...flow.Identifier) error { + filteredIDs := flow.IdentifierList(targetIDs).Filter(n.removeSelfFilter()) + + if len(filteredIDs) == 0 { + return network.EmptyTargetList + } + + err := n.sendOnChannel(channel, message, filteredIDs) + + if err != nil { + return fmt.Errorf("failed to publish on channel %s: %w", channel, err) + } + + return nil +} + +// MulticastOnChannel unreliably sends the specified event over the channel to randomly selected 'num' number of recipients +// selected from the specified targetIDs. +func (n *Network) MulticastOnChannel(channel channels.Channel, message interface{}, num uint, targetIDs ...flow.Identifier) error { + selectedIDs, err := flow.IdentifierList(targetIDs).Filter(n.removeSelfFilter()).Sample(num) + if err != nil { + return fmt.Errorf("sampling failed: %w", err) + } + + if len(selectedIDs) == 0 { + return network.EmptyTargetList + } + + err = n.sendOnChannel(channel, message, selectedIDs) + + // publishes the message to the selected targets + if err != nil { + return fmt.Errorf("failed to multicast on channel %s: %w", channel, err) + } + + return nil +} + +// removeSelfFilter removes the flow.Identifier of this node if present, from the list of nodes +func (n *Network) removeSelfFilter() flow.IdentifierFilter { + return func(id flow.Identifier) bool { + return id != n.me.NodeID() + } +} + +// sendOnChannel sends the message on channel to targets. +func (n *Network) sendOnChannel(channel channels.Channel, msg interface{}, targetIDs []flow.Identifier) error { + n.logger.Debug(). + Interface("message", msg). + Str("channel", channel.String()). + Str("target_ids", fmt.Sprintf("%v", targetIDs)). + Msg("sending new message on channel") + + // generate network message (encoding) based on list of recipients + scope, err := message.NewOutgoingScope( + targetIDs, + channels.TopicFromChannel(channel, n.sporkId), + msg, + n.codec.Encode, + message.ProtocolTypePubSub) + if err != nil { + return fmt.Errorf("failed to generate outgoing message scope %s: %w", channel, err) + } + + // publish the message through the channel, however, the message + // is only restricted to targetIDs (if they subscribed to channel). + err = n.libP2PNode.Publish(n.ctx, scope) + if err != nil { + return fmt.Errorf("failed to send message on channel %s: %w", channel, err) + } + + n.metrics.OutboundMessageSent(scope.Size(), channel.String(), message.ProtocolTypePubSub.String(), scope.PayloadType()) + + return nil +} + +// queueSubmitFunc submits the message to the engine synchronously. It is the callback for the queue worker +// when it gets a message from the queue +func (n *Network) queueSubmitFunc(message interface{}) { + qm := message.(queue.QMessage) + + logger := n.logger.With(). + Str("channel_id", qm.Target.String()). + Str("sender_id", qm.SenderID.String()). + Logger() + + eng, err := n.subscriptionManager.GetEngine(qm.Target) + if err != nil { + // This means the message was received on a channel that the node has not registered an + // engine for. This may be because the message was received during startup and the node + // hasn't subscribed to the channel yet, or there is a bug. + logger.Err(err).Msg("failed to submit message") + return + } + + logger.Debug().Msg("submitting message to engine") + + n.metrics.MessageProcessingStarted(qm.Target.String()) + + // submits the message to the engine synchronously and + // tracks its processing time. + startTimestamp := time.Now() + + err = eng.Process(qm.Target, qm.SenderID, qm.Payload) + if err != nil { + logger.Err(err).Msg("failed to process message") + } + + n.metrics.MessageProcessingFinished(qm.Target.String(), time.Since(startTimestamp)) +} + +func (n *Network) Topology() flow.IdentityList { + return n.topology.Fanout(n.Identities()) +} + +// ReportMisbehaviorOnChannel reports the misbehavior of a node on sending a message to the current node that appears +// valid based on the networking layer but is considered invalid by the current node based on the Flow protocol. +// The misbehavior report is sent to the current node's networking layer on the given channel to be processed. +// Args: +// - channel: The channel on which the misbehavior report is sent. +// - report: The misbehavior report to be sent. +// Returns: +// none +func (n *Network) ReportMisbehaviorOnChannel(channel channels.Channel, report network.MisbehaviorReport) { + n.misbehaviorReportManager.HandleMisbehaviorReport(channel, report) +} + +func DefaultValidators(log zerolog.Logger, flowID flow.Identifier) []network.MessageValidator { + return []network.MessageValidator{ + validator.ValidateNotSender(flowID), // validator to filter out messages sent by this node itself + validator.ValidateTarget(log, flowID), // validator to filter out messages not intended for this node + } +} + +// isProtocolParticipant returns a PeerFilter that returns true if a peer is a staked (i.e., authorized) node. +func (n *Network) isProtocolParticipant() p2p.PeerFilter { + return func(p peer.ID) error { + if _, ok := n.Identity(p); !ok { + return fmt.Errorf("failed to get identity of unknown peer with peer id %s", p2plogging.PeerId(p)) + } + return nil + } +} + +func (n *Network) peerIDs(flowIDs flow.IdentifierList) peer.IDSlice { + result := make([]peer.ID, 0, len(flowIDs)) + + for _, fid := range flowIDs { + pid, err := n.identityTranslator.GetPeerID(fid) + if err != nil { + // We probably don't need to fail the entire function here, since the other + // translations may still succeed + n.logger. + Err(err). + Str(logging.KeySuspicious, "true"). + Hex("node_id", logging.ID(fid)). + Msg("failed to translate to peer ID") + continue + } + + result = append(result, pid) + } + + return result +} + +func (n *Network) UpdateNodeAddresses() { + n.logger.Info().Msg("updating protocol state node addresses") + + ids := n.Identities() + newInfos, invalid := utils.PeerInfosFromIDs(ids) + + for id, err := range invalid { + n.logger. + Err(err). + Bool(logging.KeySuspicious, true). + Hex("node_id", logging.ID(id)). + Msg("failed to extract peer info from identity") + } + + n.peerUpdateLock.Lock() + defer n.peerUpdateLock.Unlock() + + // set old addresses to expire + for _, oldInfo := range n.previousProtocolStatePeers { + n.libP2PNode.Host().Peerstore().SetAddrs(oldInfo.ID, oldInfo.Addrs, peerstore.TempAddrTTL) + } + + for _, info := range newInfos { + n.libP2PNode.Host().Peerstore().SetAddrs(info.ID, info.Addrs, peerstore.PermanentAddrTTL) + } + + n.previousProtocolStatePeers = newInfos +} + +// authorizedPeers is a peer manager callback used by the underlying libp2p node that updates who can connect to this node (as +// well as who this node can connect to). +// and who is not allowed to connect to this node. This function is called by the peer manager and connection gater components +// of libp2p. +// +// Args: +// none +// Returns: +// - peer.IDSlice: a list of peer IDs that are allowed to connect to this node (and that this node can connect to). Any peer +// not in this list is assumed to be disconnected from this node (if connected) and not allowed to connect to this node. +// This is the guarantee that the underlying libp2p node implementation makes. +func (n *Network) authorizedPeers() peer.IDSlice { + peerIDs := make([]peer.ID, 0) + for _, id := range n.peerIDs(n.Topology().NodeIDs()) { + peerAllowed := true + for _, filter := range n.peerManagerFilters { + if err := filter(id); err != nil { + n.logger.Debug(). + Err(err). + Str("peer_id", p2plogging.PeerId(id)). + Msg("filtering topology peer") + + peerAllowed = false + break + } + } + + if peerAllowed { + peerIDs = append(peerIDs, id) + } + } + + return peerIDs +} + +func (n *Network) OnDisallowListNotification(notification *network.DisallowListingUpdate) { + for _, pid := range n.peerIDs(notification.FlowIds) { + n.libP2PNode.OnDisallowListNotification(pid, notification.Cause) + } +} + +func (n *Network) OnAllowListNotification(notification *network.AllowListingUpdate) { + for _, pid := range n.peerIDs(notification.FlowIds) { + n.libP2PNode.OnAllowListNotification(pid, notification.Cause) + } +} + +// handleIncomingStream handles an incoming stream from a remote peer +// it is a callback that gets called for each incoming stream by libp2p with a new stream object. +// TODO: this should be eventually moved to libp2p node. +func (n *Network) handleIncomingStream(s libp2pnet.Stream) { + // qualify the logger with local and remote address + log := p2putils.StreamLogger(n.logger, s) + + log.Debug().Msg("incoming stream received") + + success := false + + remotePeer := s.Conn().RemotePeer() + + defer func() { + if success { + err := s.Close() + if err != nil { + log.Err(err).Msg("failed to close stream") + } + } else { + err := s.Reset() + if err != nil { + log.Err(err).Msg("failed to reset stream") + } + } + }() + + // check if peer is currently rate limited before continuing to process stream. + if n.unicastRateLimiters.MessageRateLimiter.IsRateLimited(remotePeer) || n.unicastRateLimiters.BandWidthRateLimiter.IsRateLimited(remotePeer) { + log.Debug(). + Bool(logging.KeySuspicious, true). + Msg("dropping unicast stream from rate limited peer") + return + } + + // TODO: We need to allow per-topic timeouts and message size limits. + // This allows us to configure higher limits for topics on which we expect + // to receive large messages (e.g. Chunk Data Packs), and use the normal + // limits for other topics. In order to enable this, we will need to register + // a separate stream handler for each topic. + ctx, cancel := context.WithTimeout(n.ctx, LargeMsgUnicastTimeout) + defer cancel() + + deadline, _ := ctx.Deadline() + + err := s.SetReadDeadline(deadline) + if err != nil { + log.Err(err).Msg("failed to set read deadline for stream") + return + } + + // create the reader + r := ggio.NewDelimitedReader(s, LargeMsgMaxUnicastMsgSize) + for { + if ctx.Err() != nil { + return + } + + // Note: message fields must not be trusted until explicitly validated + var msg message.Message + // read the next message (blocking call) + err = r.ReadMsg(&msg) + if err != nil { + if err == io.EOF { + break + } + + n.logger.Err(err).Msg("failed to read message") + return + } + + channel := channels.Channel(msg.ChannelID) + topic := channels.TopicFromChannel(channel, n.sporkId) + + // ignore messages if node does not have subscription to topic + if !n.libP2PNode.HasSubscription(topic) { + violation := &network.Violation{ + Identity: nil, PeerID: p2plogging.PeerId(remotePeer), Channel: channel, Protocol: message.ProtocolTypeUnicast, + } + + msgCode, err := codec.MessageCodeFromPayload(msg.Payload) + if err != nil { + violation.Err = err + n.slashingViolationsConsumer.OnUnknownMsgTypeError(violation) + return + } + + // msg type is not guaranteed to be correct since it is set by the client + _, what, err := codec.InterfaceFromMessageCode(msgCode) + if err != nil { + violation.Err = err + n.slashingViolationsConsumer.OnUnknownMsgTypeError(violation) + return + } + + violation.MsgType = what + violation.Err = ErrUnicastMsgWithoutSub + n.slashingViolationsConsumer.OnUnauthorizedUnicastOnChannel(violation) + return + } + + // check if unicast messages have reached rate limit before processing next message + if !n.unicastRateLimiters.MessageAllowed(remotePeer) { + return + } + + // check if we can get a role for logging and metrics label if this is not a public channel + role := "" + if !channels.IsPublicChannel(channels.Channel(msg.ChannelID)) { + if identity, ok := n.Identity(remotePeer); ok { + role = identity.Role.String() + } + } + + // check unicast bandwidth rate limiter for peer + if !n.unicastRateLimiters.BandwidthAllowed( + remotePeer, + role, + msg.Size(), + message.MessageType(msg.Payload), + channels.Topic(msg.ChannelID)) { + return + } + + n.wg.Add(1) + go func() { + defer n.wg.Done() + n.processUnicastStreamMessage(remotePeer, &msg) + }() + } + + success = true +} + +// Subscribe subscribes the network to a channel. +// No errors are expected during normal operation. +func (n *Network) Subscribe(channel channels.Channel) error { + topic := channels.TopicFromChannel(channel, n.sporkId) + + var peerFilter p2p.PeerFilter + var validators []validator.PubSubMessageValidator + if channels.IsPublicChannel(channel) { + // NOTE: for public channels the callback used to check if a node is staked will + // return true for every node. + peerFilter = p2p.AllowAllPeerFilter() + } else { + // for channels used by the staked nodes, add the topic validator to filter out messages from non-staked nodes + validators = append(validators, n.authorizedSenderValidator.PubSubMessageValidator(channel)) + + // NOTE: For non-public channels the libP2P node topic validator will reject + // messages from unstaked nodes. + peerFilter = n.isProtocolParticipant() + } + + topicValidator := flowpubsub.TopicValidator(n.logger, peerFilter, validators...) + s, err := n.libP2PNode.Subscribe(topic, topicValidator) + if err != nil { + return fmt.Errorf("could not subscribe to topic (%s): %w", topic, err) + } + + // create a new readSubscription with the context of the network + rs := internal.NewReadSubscription(s, n.processPubSubMessages, n.logger) + n.wg.Add(1) + + // kick off the receive loop to continuously receive messages + go func() { + defer n.wg.Done() + rs.ReceiveLoop(n.ctx) + }() + + // update peers to add some nodes interested in the same topic as direct peers + n.libP2PNode.RequestPeerUpdate() + + return nil +} + +// processPubSubMessages processes messages received from the pubsub subscription. +func (n *Network) processPubSubMessages(msg *message.Message, peerID peer.ID) { + n.processAuthenticatedMessage(msg, peerID, message.ProtocolTypePubSub) +} + +// Unsubscribe unsubscribes the network from a channel. +// The following benign errors are expected during normal operations from libP2P: +// - the libP2P node fails to unsubscribe to the topic created from the provided channel. +// +// All errors returned from this function can be considered benign. +func (n *Network) Unsubscribe(channel channels.Channel) error { + topic := channels.TopicFromChannel(channel, n.sporkId) + return n.libP2PNode.Unsubscribe(topic) +} + +// processUnicastStreamMessage will decode, perform authorized sender validation and process a message +// sent via unicast stream. This func should be invoked in a separate goroutine to avoid creating a message decoding bottleneck. +func (n *Network) processUnicastStreamMessage(remotePeer peer.ID, msg *message.Message) { + channel := channels.Channel(msg.ChannelID) + + // TODO: once we've implemented per topic message size limits per the TODO above, + // we can remove this check + maxSize, err := UnicastMaxMsgSizeByCode(msg.Payload) + if err != nil { + n.slashingViolationsConsumer.OnUnknownMsgTypeError(&network.Violation{ + Identity: nil, PeerID: p2plogging.PeerId(remotePeer), MsgType: "", Channel: channel, Protocol: message.ProtocolTypeUnicast, Err: err, + }) + return + } + if msg.Size() > maxSize { + // message size exceeded + n.logger.Error(). + Str("peer_id", p2plogging.PeerId(remotePeer)). + Str("channel", msg.ChannelID). + Int("max_size", maxSize). + Int("size", msg.Size()). + Bool(logging.KeySuspicious, true). + Msg("received message exceeded permissible message maxSize") + return + } + + // if message channel is not public perform authorized sender validation + if !channels.IsPublicChannel(channel) { + messageType, err := n.authorizedSenderValidator.Validate(remotePeer, msg.Payload, channel, message.ProtocolTypeUnicast) + if err != nil { + n.logger. + Error(). + Err(err). + Str("peer_id", p2plogging.PeerId(remotePeer)). + Str("type", messageType). + Str("channel", msg.ChannelID). + Msg("unicast authorized sender validation failed") + return + } + } + n.processAuthenticatedMessage(msg, remotePeer, message.ProtocolTypeUnicast) +} + +// processAuthenticatedMessage processes a message and a source (indicated by its peer ID) and eventually passes it to the overlay +// In particular, it populates the `OriginID` field of the message with a Flow ID translated from this source. +func (n *Network) processAuthenticatedMessage(msg *message.Message, peerID peer.ID, protocol message.ProtocolType) { + originId, err := n.identityTranslator.GetFlowID(peerID) + if err != nil { + // this error should never happen. by the time the message gets here, the peer should be + // authenticated which means it must be known + n.logger.Error(). + Err(err). + Str("peer_id", p2plogging.PeerId(peerID)). + Bool(logging.KeySuspicious, true). + Msg("dropped message from unknown peer") + return + } + + channel := channels.Channel(msg.ChannelID) + decodedMsgPayload, err := n.codec.Decode(msg.Payload) + switch { + case codec.IsErrUnknownMsgCode(err): + // slash peer if message contains unknown message code byte + violation := &network.Violation{ + PeerID: p2plogging.PeerId(peerID), OriginID: originId, Channel: channel, Protocol: protocol, Err: err, + } + n.slashingViolationsConsumer.OnUnknownMsgTypeError(violation) + return + case codec.IsErrMsgUnmarshal(err) || codec.IsErrInvalidEncoding(err): + // slash if peer sent a message that could not be marshalled into the message type denoted by the message code byte + violation := &network.Violation{ + PeerID: p2plogging.PeerId(peerID), OriginID: originId, Channel: channel, Protocol: protocol, Err: err, + } + n.slashingViolationsConsumer.OnInvalidMsgError(violation) + return + case err != nil: + // this condition should never happen and indicates there's a bug + // don't crash as a result of external inputs since that creates a DoS vector + // collect slashing data because this could potentially lead to slashing + err = fmt.Errorf("unexpected error during message validation: %w", err) + violation := &network.Violation{ + PeerID: p2plogging.PeerId(peerID), OriginID: originId, Channel: channel, Protocol: protocol, Err: err, + } + n.slashingViolationsConsumer.OnUnexpectedError(violation) + return + } + + internalMsg, err := decodedMsgPayload.ToInternal() + if err != nil { + err = fmt.Errorf("failed to convert message to internal: %w", err) + violation := &network.Violation{ + PeerID: p2plogging.PeerId(peerID), OriginID: originId, Channel: channel, Protocol: protocol, Err: err, + } + n.slashingViolationsConsumer.OnInvalidMsgError(violation) + return + } + scope, err := message.NewIncomingScope(originId, protocol, msg, internalMsg) + if err != nil { + n.logger.Error(). + Err(err). + Str("peer_id", p2plogging.PeerId(peerID)). + Str("origin_id", originId.String()). + Msg("could not create incoming message scope") + return + } + + n.processMessage(scope) +} + +// processMessage processes a message and eventually passes it to the overlay +func (n *Network) processMessage(scope network.IncomingMessageScope) { + logger := n.logger.With(). + Str("channel", scope.Channel().String()). + Str("type", scope.Protocol().String()). + Int("msg_size", scope.Size()). + Hex("origin_id", logging.ID(scope.OriginId())). + Logger() + + // run through all the message validators + for _, v := range n.validators { + // if any one fails, stop message propagation + if !v.Validate(scope) { + logger.Debug().Msg("new message filtered by message validators") + return + } + } + + logger.Debug().Msg("processing new message") + + // if validation passed, send the message to the overlay + err := n.Receive(scope) + if err != nil { + n.logger.Error().Err(err).Msg("could not deliver payload") + } +} + +// UnicastMaxMsgSizeByCode returns the max permissible size for a unicast message code +func UnicastMaxMsgSizeByCode(payload []byte) (int, error) { + msgCode, err := codec.MessageCodeFromPayload(payload) + if err != nil { + return 0, err + } + _, messageType, err := codec.InterfaceFromMessageCode(msgCode) + if err != nil { + return 0, err + } + + maxSize := unicastMaxMsgSize(messageType) + return maxSize, nil +} + +// unicastMaxMsgSize returns the max permissible size for a unicast message +func unicastMaxMsgSize(messageType string) int { + switch messageType { + case "*messages.ChunkDataResponse", "messages.ChunkDataResponse": + return LargeMsgMaxUnicastMsgSize + default: + return DefaultMaxUnicastMsgSize + } +} + +// unicastMaxMsgDuration returns the max duration to allow for a unicast send to complete +func (n *Network) unicastMaxMsgDuration(messageType string) time.Duration { + switch messageType { + case "*messages.ChunkDataResponse", "messages.ChunkDataResponse": + if LargeMsgUnicastTimeout > n.unicastMessageTimeout { + return LargeMsgUnicastTimeout + } + return n.unicastMessageTimeout + default: + return n.unicastMessageTimeout + } +} diff --git a/network/underlay/noop.go b/network/underlay/noop.go new file mode 100644 index 00000000000..8273ded7026 --- /dev/null +++ b/network/underlay/noop.go @@ -0,0 +1,51 @@ +package underlay + +import ( + "github.com/ipfs/go-datastore" + "github.com/libp2p/go-libp2p/core/protocol" + + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module" + "github.com/onflow/flow-go/network" + "github.com/onflow/flow-go/network/channels" +) + +type NoopConduit struct{} + +var _ network.Conduit = (*NoopConduit)(nil) + +func (n *NoopConduit) ReportMisbehavior(network.MisbehaviorReport) {} + +func (n *NoopConduit) Publish(event interface{}, targetIDs ...flow.Identifier) error { + return nil +} + +func (n *NoopConduit) Unicast(event interface{}, targetID flow.Identifier) error { + return nil +} + +func (n *NoopConduit) Multicast(event interface{}, num uint, targetIDs ...flow.Identifier) error { + return nil +} + +func (n *NoopConduit) Close() error { + return nil +} + +type NoopEngineRegister struct { + module.NoopComponent +} + +func (n NoopEngineRegister) Register(channel channels.Channel, messageProcessor network.MessageProcessor) (network.Conduit, error) { + return &NoopConduit{}, nil +} + +func (n NoopEngineRegister) RegisterBlobService(channel channels.Channel, store datastore.Batching, opts ...network.BlobServiceOption) (network.BlobService, error) { + return nil, nil +} + +func (n NoopEngineRegister) RegisterPingService(pingProtocolID protocol.ID, pingInfoProvider network.PingInfoProvider) (network.PingService, error) { + return nil, nil +} + +var _ network.EngineRegistry = (*NoopEngineRegister)(nil) diff --git a/network/validator.go b/network/validator.go index 0d40b9290c5..07d5c90daa2 100644 --- a/network/validator.go +++ b/network/validator.go @@ -1,6 +1,6 @@ package network -// MessageValidator validates the incoming message. Message validation happens in the middleware right before it is +// MessageValidator validates the incoming message. Message validation happens in the network right before it is // delivered to the network. type MessageValidator interface { // Validate validates the message and returns true if the message is to be retained and false if it needs to be dropped diff --git a/network/validator/authorized_sender_validator.go b/network/validator/authorized_sender_validator.go index 0af21b45e39..d4300e06e03 100644 --- a/network/validator/authorized_sender_validator.go +++ b/network/validator/authorized_sender_validator.go @@ -8,11 +8,12 @@ import ( "github.com/rs/zerolog" "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/network" "github.com/onflow/flow-go/network/channels" "github.com/onflow/flow-go/network/codec" "github.com/onflow/flow-go/network/message" "github.com/onflow/flow-go/network/p2p" - "github.com/onflow/flow-go/network/slashing" + p2plogging "github.com/onflow/flow-go/network/p2p/logging" ) var ( @@ -25,12 +26,12 @@ type GetIdentityFunc func(peer.ID) (*flow.Identity, bool) // AuthorizedSenderValidator performs message authorization validation. type AuthorizedSenderValidator struct { log zerolog.Logger - slashingViolationsConsumer slashing.ViolationsConsumer + slashingViolationsConsumer network.ViolationsConsumer getIdentity GetIdentityFunc } // NewAuthorizedSenderValidator returns a new AuthorizedSenderValidator -func NewAuthorizedSenderValidator(log zerolog.Logger, slashingViolationsConsumer slashing.ViolationsConsumer, getIdentity GetIdentityFunc) *AuthorizedSenderValidator { +func NewAuthorizedSenderValidator(log zerolog.Logger, slashingViolationsConsumer network.ViolationsConsumer, getIdentity GetIdentityFunc) *AuthorizedSenderValidator { return &AuthorizedSenderValidator{ log: log.With().Str("component", "authorized_sender_validator").Logger(), slashingViolationsConsumer: slashingViolationsConsumer, @@ -61,14 +62,14 @@ func (av *AuthorizedSenderValidator) Validate(from peer.ID, payload []byte, chan // something terrible went wrong. identity, ok := av.getIdentity(from) if !ok { - violation := &slashing.Violation{Identity: identity, PeerID: from.String(), Channel: channel, Protocol: protocol, Err: ErrIdentityUnverified} + violation := &network.Violation{PeerID: p2plogging.PeerId(from), Channel: channel, Protocol: protocol, Err: ErrIdentityUnverified} av.slashingViolationsConsumer.OnUnAuthorizedSenderError(violation) return "", ErrIdentityUnverified } msgCode, err := codec.MessageCodeFromPayload(payload) if err != nil { - violation := &slashing.Violation{Identity: identity, PeerID: from.String(), Channel: channel, Protocol: protocol, Err: err} + violation := &network.Violation{OriginID: identity.NodeID, Identity: identity, PeerID: p2plogging.PeerId(from), Channel: channel, Protocol: protocol, Err: err} av.slashingViolationsConsumer.OnUnknownMsgTypeError(violation) return "", err } @@ -77,28 +78,32 @@ func (av *AuthorizedSenderValidator) Validate(from peer.ID, payload []byte, chan switch { case err == nil: return msgType, nil - case message.IsUnknownMsgTypeErr(err): - violation := &slashing.Violation{Identity: identity, PeerID: from.String(), MsgType: msgType, Channel: channel, Protocol: protocol, Err: err} + case message.IsUnknownMsgTypeErr(err) || codec.IsErrUnknownMsgCode(err): + violation := &network.Violation{OriginID: identity.NodeID, Identity: identity, PeerID: p2plogging.PeerId(from), MsgType: msgType, Channel: channel, Protocol: protocol, Err: err} av.slashingViolationsConsumer.OnUnknownMsgTypeError(violation) return msgType, err case errors.Is(err, message.ErrUnauthorizedMessageOnChannel) || errors.Is(err, message.ErrUnauthorizedRole): - violation := &slashing.Violation{Identity: identity, PeerID: from.String(), MsgType: msgType, Channel: channel, Protocol: protocol, Err: err} + violation := &network.Violation{OriginID: identity.NodeID, Identity: identity, PeerID: p2plogging.PeerId(from), MsgType: msgType, Channel: channel, Protocol: protocol, Err: err} av.slashingViolationsConsumer.OnUnAuthorizedSenderError(violation) return msgType, err case errors.Is(err, ErrSenderEjected): - violation := &slashing.Violation{Identity: identity, PeerID: from.String(), MsgType: msgType, Channel: channel, Protocol: protocol, Err: err} + violation := &network.Violation{OriginID: identity.NodeID, Identity: identity, PeerID: p2plogging.PeerId(from), MsgType: msgType, Channel: channel, Protocol: protocol, Err: err} av.slashingViolationsConsumer.OnSenderEjectedError(violation) return msgType, err case errors.Is(err, message.ErrUnauthorizedUnicastOnChannel): - violation := &slashing.Violation{Identity: identity, PeerID: from.String(), MsgType: msgType, Channel: channel, Protocol: protocol, Err: err} + violation := &network.Violation{OriginID: identity.NodeID, Identity: identity, PeerID: p2plogging.PeerId(from), MsgType: msgType, Channel: channel, Protocol: protocol, Err: err} av.slashingViolationsConsumer.OnUnauthorizedUnicastOnChannel(violation) return msgType, err + case errors.Is(err, message.ErrUnauthorizedPublishOnChannel): + violation := &network.Violation{OriginID: identity.NodeID, Identity: identity, PeerID: p2plogging.PeerId(from), MsgType: msgType, Channel: channel, Protocol: protocol, Err: err} + av.slashingViolationsConsumer.OnUnauthorizedPublishOnChannel(violation) + return msgType, err default: // this condition should never happen and indicates there's a bug // don't crash as a result of external inputs since that creates a DoS vector // collect slashing data because this could potentially lead to slashing err = fmt.Errorf("unexpected error during message validation: %w", err) - violation := &slashing.Violation{Identity: identity, PeerID: from.String(), MsgType: msgType, Channel: channel, Protocol: protocol, Err: err} + violation := &network.Violation{OriginID: identity.NodeID, Identity: identity, PeerID: p2plogging.PeerId(from), MsgType: msgType, Channel: channel, Protocol: protocol, Err: err} av.slashingViolationsConsumer.OnUnexpectedError(violation) return msgType, err } @@ -117,7 +122,7 @@ func (av *AuthorizedSenderValidator) Validate(from peer.ID, payload []byte, chan // - message.ErrUnauthorizedMessageOnChannel if msg is not authorized to be sent on channel // - message.ErrUnauthorizedRole if sender role is not authorized to send msg func (av *AuthorizedSenderValidator) isAuthorizedSender(identity *flow.Identity, channel channels.Channel, msgCode codec.MessageCode, protocol message.ProtocolType) (string, error) { - if identity.Ejected { + if identity.IsEjected() { return "", ErrSenderEjected } diff --git a/network/validator/authorized_sender_validator_test.go b/network/validator/authorized_sender_validator_test.go index 966ae5ba127..58ade33594c 100644 --- a/network/validator/authorized_sender_validator_test.go +++ b/network/validator/authorized_sender_validator_test.go @@ -6,16 +6,20 @@ import ( "github.com/libp2p/go-libp2p/core/peer" "github.com/rs/zerolog" + "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" "github.com/onflow/flow-go/model/flow" + libp2pmessage "github.com/onflow/flow-go/model/libp2p/message" "github.com/onflow/flow-go/model/messages" "github.com/onflow/flow-go/module/metrics" "github.com/onflow/flow-go/network" + "github.com/onflow/flow-go/network/alsp" "github.com/onflow/flow-go/network/channels" "github.com/onflow/flow-go/network/codec" "github.com/onflow/flow-go/network/message" + mocknetwork "github.com/onflow/flow-go/network/mock" "github.com/onflow/flow-go/network/p2p" "github.com/onflow/flow-go/network/slashing" "github.com/onflow/flow-go/utils/unittest" @@ -43,7 +47,7 @@ type TestAuthorizedSenderValidatorSuite struct { unauthorizedUnicastOnChannel []TestCase authorizedUnicastOnChannel []TestCase log zerolog.Logger - slashingViolationsConsumer slashing.ViolationsConsumer + slashingViolationsConsumer network.ViolationsConsumer allMsgConfigs []message.MsgAuthConfig codec network.Codec } @@ -54,7 +58,6 @@ func (s *TestAuthorizedSenderValidatorSuite) SetupTest() { s.initializeInvalidMessageOnChannelTestCases() s.initializeUnicastOnChannelTestCases() s.log = unittest.Logger() - s.slashingViolationsConsumer = slashing.NewSlashingViolationsConsumer(s.log, metrics.NewNoopCollector()) s.codec = unittest.NetworkCodec() } @@ -64,37 +67,64 @@ func (s *TestAuthorizedSenderValidatorSuite) TestValidatorCallback_AuthorizedSen for _, c := range s.authorizedSenderTestCases { str := fmt.Sprintf("role (%s) should be authorized to send message type (%s) on channel (%s)", c.Identity.Role, c.MessageStr, c.Channel) s.Run(str, func() { - authorizedSenderValidator := NewAuthorizedSenderValidator(s.log, s.slashingViolationsConsumer, c.GetIdentity) - + misbehaviorReportConsumer := mocknetwork.NewMisbehaviorReportConsumer(s.T()) + defer misbehaviorReportConsumer.AssertNotCalled(s.T(), "ReportMisbehaviorOnChannel", mock.AnythingOfType("channels.Channel"), mock.AnythingOfType("*alsp.MisbehaviorReport")) + violationsConsumer := slashing.NewSlashingViolationsConsumer(s.log, metrics.NewNoopCollector(), misbehaviorReportConsumer) + authorizedSenderValidator := NewAuthorizedSenderValidator(s.log, violationsConsumer, c.GetIdentity) + validateUnicast := authorizedSenderValidator.Validate + validatePubsub := authorizedSenderValidator.PubSubMessageValidator(c.Channel) pid, err := unittest.PeerIDFromFlowID(c.Identity) require.NoError(s.T(), err) - + switch { // ensure according to the message auth config, if a message is authorized to be sent via unicast it - // is accepted or rejected. - msgType, err := authorizedSenderValidator.Validate(pid, []byte{c.MessageCode.Uint8()}, c.Channel, message.ProtocolTypeUnicast) - if c.Protocols.Contains(message.ProtocolTypeUnicast) { + // is accepted. + case c.Protocols.Contains(message.ProtocolTypeUnicast): + msgType, err := validateUnicast(pid, []byte{c.MessageCode.Uint8()}, c.Channel, message.ProtocolTypeUnicast) + if c.Protocols.Contains(message.ProtocolTypeUnicast) { + require.NoError(s.T(), err) + require.Equal(s.T(), c.MessageStr, msgType) + } + // ensure according to the message auth config, if a message is authorized to be sent via pubsub it + // is accepted. + case c.Protocols.Contains(message.ProtocolTypePubSub): + payload, err := s.codec.Encode(c.Message) require.NoError(s.T(), err) - require.Equal(s.T(), c.MessageStr, msgType) - } else { - require.ErrorIs(s.T(), err, message.ErrUnauthorizedUnicastOnChannel) - require.Equal(s.T(), c.MessageStr, msgType) - } - - payload, err := s.codec.Encode(c.Message) - require.NoError(s.T(), err) - m := &message.Message{ - ChannelID: c.Channel.String(), - Payload: payload, - } - validatePubsub := authorizedSenderValidator.PubSubMessageValidator(c.Channel) - pubsubResult := validatePubsub(pid, m) - if !c.Protocols.Contains(message.ProtocolTypePubSub) { - require.Equal(s.T(), p2p.ValidationReject, pubsubResult) - } else { + m := &message.Message{ + ChannelID: c.Channel.String(), + Payload: payload, + } + pubsubResult := validatePubsub(pid, m) require.Equal(s.T(), p2p.ValidationAccept, pubsubResult) + default: + s.T().Fatal("authconfig does not contain any protocols") } }) } + + s.Run("test messages should be allowed to be sent via both protocols unicast/pubsub on test channel", func() { + identity, _ := unittest.IdentityWithNetworkingKeyFixture(unittest.WithRole(flow.RoleCollection)) + misbehaviorReportConsumer := mocknetwork.NewMisbehaviorReportConsumer(s.T()) + defer misbehaviorReportConsumer.AssertNotCalled(s.T(), "ReportMisbehaviorOnChannel", mock.AnythingOfType("channels.Channel"), mock.AnythingOfType("*alsp.MisbehaviorReport")) + violationsConsumer := slashing.NewSlashingViolationsConsumer(s.log, metrics.NewNoopCollector(), misbehaviorReportConsumer) + getIdentityFunc := s.getIdentity(identity) + pid, err := unittest.PeerIDFromFlowID(identity) + require.NoError(s.T(), err) + authorizedSenderValidator := NewAuthorizedSenderValidator(s.log, violationsConsumer, getIdentityFunc) + + msgType, err := authorizedSenderValidator.Validate(pid, []byte{codec.CodeEcho.Uint8()}, channels.TestNetworkChannel, message.ProtocolTypeUnicast) + require.NoError(s.T(), err) + require.Equal(s.T(), "*message.TestMessage", msgType) + + payload, err := s.codec.Encode(&libp2pmessage.TestMessage{}) + require.NoError(s.T(), err) + m := &message.Message{ + ChannelID: channels.TestNetworkChannel.String(), + Payload: payload, + } + validatePubsub := authorizedSenderValidator.PubSubMessageValidator(channels.TestNetworkChannel) + pubsubResult := validatePubsub(pid, m) + require.Equal(s.T(), p2p.ValidationAccept, pubsubResult) + }) } // TestValidatorCallback_UnAuthorizedSender checks that AuthorizedSenderValidator.Validate return's p2p.ValidationReject @@ -105,8 +135,12 @@ func (s *TestAuthorizedSenderValidatorSuite) TestValidatorCallback_UnAuthorizedS s.Run(str, func() { pid, err := unittest.PeerIDFromFlowID(c.Identity) require.NoError(s.T(), err) - - authorizedSenderValidator := NewAuthorizedSenderValidator(s.log, s.slashingViolationsConsumer, c.GetIdentity) + expectedMisbehaviorReport, err := alsp.NewMisbehaviorReport(c.Identity.NodeID, alsp.UnAuthorizedSender) + require.NoError(s.T(), err) + misbehaviorReportConsumer := mocknetwork.NewMisbehaviorReportConsumer(s.T()) + misbehaviorReportConsumer.On("ReportMisbehaviorOnChannel", c.Channel, expectedMisbehaviorReport).Once() + violationsConsumer := slashing.NewSlashingViolationsConsumer(s.log, metrics.NewNoopCollector(), misbehaviorReportConsumer) + authorizedSenderValidator := NewAuthorizedSenderValidator(s.log, violationsConsumer, c.GetIdentity) payload, err := s.codec.Encode(c.Message) require.NoError(s.T(), err) @@ -129,8 +163,10 @@ func (s *TestAuthorizedSenderValidatorSuite) TestValidatorCallback_AuthorizedUni s.Run(str, func() { pid, err := unittest.PeerIDFromFlowID(c.Identity) require.NoError(s.T(), err) - - authorizedSenderValidator := NewAuthorizedSenderValidator(s.log, s.slashingViolationsConsumer, c.GetIdentity) + misbehaviorReportConsumer := mocknetwork.NewMisbehaviorReportConsumer(s.T()) + defer misbehaviorReportConsumer.AssertNotCalled(s.T(), "ReportMisbehaviorOnChannel", mock.AnythingOfType("channels.Channel"), mock.AnythingOfType("*alsp.MisbehaviorReport")) + violationsConsumer := slashing.NewSlashingViolationsConsumer(s.log, metrics.NewNoopCollector(), misbehaviorReportConsumer) + authorizedSenderValidator := NewAuthorizedSenderValidator(s.log, violationsConsumer, c.GetIdentity) msgType, err := authorizedSenderValidator.Validate(pid, []byte{c.MessageCode.Uint8()}, c.Channel, message.ProtocolTypeUnicast) require.NoError(s.T(), err) @@ -147,8 +183,12 @@ func (s *TestAuthorizedSenderValidatorSuite) TestValidatorCallback_UnAuthorizedU s.Run(str, func() { pid, err := unittest.PeerIDFromFlowID(c.Identity) require.NoError(s.T(), err) - - authorizedSenderValidator := NewAuthorizedSenderValidator(s.log, s.slashingViolationsConsumer, c.GetIdentity) + expectedMisbehaviorReport, err := alsp.NewMisbehaviorReport(c.Identity.NodeID, alsp.UnauthorizedUnicastOnChannel) + require.NoError(s.T(), err) + misbehaviorReportConsumer := mocknetwork.NewMisbehaviorReportConsumer(s.T()) + misbehaviorReportConsumer.On("ReportMisbehaviorOnChannel", c.Channel, expectedMisbehaviorReport).Once() + violationsConsumer := slashing.NewSlashingViolationsConsumer(s.log, metrics.NewNoopCollector(), misbehaviorReportConsumer) + authorizedSenderValidator := NewAuthorizedSenderValidator(s.log, violationsConsumer, c.GetIdentity) msgType, err := authorizedSenderValidator.Validate(pid, []byte{c.MessageCode.Uint8()}, c.Channel, message.ProtocolTypeUnicast) require.ErrorIs(s.T(), err, message.ErrUnauthorizedUnicastOnChannel) @@ -165,8 +205,12 @@ func (s *TestAuthorizedSenderValidatorSuite) TestValidatorCallback_UnAuthorizedM s.Run(str, func() { pid, err := unittest.PeerIDFromFlowID(c.Identity) require.NoError(s.T(), err) - - authorizedSenderValidator := NewAuthorizedSenderValidator(s.log, s.slashingViolationsConsumer, c.GetIdentity) + expectedMisbehaviorReport, err := alsp.NewMisbehaviorReport(c.Identity.NodeID, alsp.UnAuthorizedSender) + require.NoError(s.T(), err) + misbehaviorReportConsumer := mocknetwork.NewMisbehaviorReportConsumer(s.T()) + misbehaviorReportConsumer.On("ReportMisbehaviorOnChannel", c.Channel, expectedMisbehaviorReport).Twice() + violationsConsumer := slashing.NewSlashingViolationsConsumer(s.log, metrics.NewNoopCollector(), misbehaviorReportConsumer) + authorizedSenderValidator := NewAuthorizedSenderValidator(s.log, violationsConsumer, c.GetIdentity) msgType, err := authorizedSenderValidator.Validate(pid, []byte{c.MessageCode.Uint8()}, c.Channel, message.ProtocolTypeUnicast) require.ErrorIs(s.T(), err, message.ErrUnauthorizedMessageOnChannel) @@ -195,15 +239,27 @@ func (s *TestAuthorizedSenderValidatorSuite) TestValidatorCallback_ClusterPrefix pid, err := unittest.PeerIDFromFlowID(identity) require.NoError(s.T(), err) - authorizedSenderValidator := NewAuthorizedSenderValidator(s.log, s.slashingViolationsConsumer, getIdentityFunc) + expectedMisbehaviorReport, err := alsp.NewMisbehaviorReport(identity.NodeID, alsp.UnauthorizedUnicastOnChannel) + require.NoError(s.T(), err) + misbehaviorReportConsumer := mocknetwork.NewMisbehaviorReportConsumer(s.T()) + misbehaviorReportConsumer.On("ReportMisbehaviorOnChannel", channels.SyncCluster(clusterID), expectedMisbehaviorReport).Once() + misbehaviorReportConsumer.On("ReportMisbehaviorOnChannel", channels.ConsensusCluster(clusterID), expectedMisbehaviorReport).Once() + + violationsConsumer := slashing.NewSlashingViolationsConsumer(s.log, metrics.NewNoopCollector(), misbehaviorReportConsumer) + authorizedSenderValidator := NewAuthorizedSenderValidator(s.log, violationsConsumer, getIdentityFunc) - // ensure ClusterBlockProposal not allowed to be sent on channel via unicast - msgType, err := authorizedSenderValidator.Validate(pid, []byte{codec.CodeClusterBlockProposal.Uint8()}, channels.ConsensusCluster(clusterID), message.ProtocolTypeUnicast) + // validate collection sync cluster SyncRequest is not allowed to be sent on channel via unicast + msgType, err := authorizedSenderValidator.Validate(pid, []byte{codec.CodeSyncRequest.Uint8()}, channels.SyncCluster(clusterID), message.ProtocolTypeUnicast) require.ErrorIs(s.T(), err, message.ErrUnauthorizedUnicastOnChannel) - require.Equal(s.T(), "*messages.ClusterBlockProposal", msgType) + require.Equal(s.T(), "*messages.SyncRequest", msgType) - // ensure ClusterBlockProposal is allowed to be sent via pubsub by authorized sender - payload, err := s.codec.Encode(&messages.ClusterBlockProposal{}) + // ensure messages.ClusterProposal not allowed to be sent on channel via unicast + msgType, err = authorizedSenderValidator.Validate(pid, []byte{codec.CodeClusterBlockProposal.Uint8()}, channels.ConsensusCluster(clusterID), message.ProtocolTypeUnicast) + require.ErrorIs(s.T(), err, message.ErrUnauthorizedUnicastOnChannel) + require.Equal(s.T(), "*messages.ClusterProposal", msgType) + + // ensure messages.ClusterProposal is allowed to be sent via pubsub by authorized sender + payload, err := s.codec.Encode(&messages.ClusterProposal{}) require.NoError(s.T(), err) m := &message.Message{ ChannelID: channels.ConsensusCluster(clusterID).String(), @@ -213,11 +269,6 @@ func (s *TestAuthorizedSenderValidatorSuite) TestValidatorCallback_ClusterPrefix pubsubResult := validateCollConsensusPubsub(pid, m) require.Equal(s.T(), p2p.ValidationAccept, pubsubResult) - // validate collection sync cluster SyncRequest is not allowed to be sent on channel via unicast - msgType, err = authorizedSenderValidator.Validate(pid, []byte{codec.CodeSyncRequest.Uint8()}, channels.SyncCluster(clusterID), message.ProtocolTypeUnicast) - require.ErrorIs(s.T(), err, message.ErrUnauthorizedUnicastOnChannel) - require.Equal(s.T(), "*messages.SyncRequest", msgType) - // ensure SyncRequest is allowed to be sent via pubsub by authorized sender payload, err = s.codec.Encode(&messages.SyncRequest{}) require.NoError(s.T(), err) @@ -234,12 +285,17 @@ func (s *TestAuthorizedSenderValidatorSuite) TestValidatorCallback_ClusterPrefix func (s *TestAuthorizedSenderValidatorSuite) TestValidatorCallback_ValidationFailure() { s.Run("sender is ejected", func() { identity, _ := unittest.IdentityWithNetworkingKeyFixture() - identity.Ejected = true + identity.EpochParticipationStatus = flow.EpochParticipationStatusEjected getIdentityFunc := s.getIdentity(identity) pid, err := unittest.PeerIDFromFlowID(identity) require.NoError(s.T(), err) - authorizedSenderValidator := NewAuthorizedSenderValidator(s.log, s.slashingViolationsConsumer, getIdentityFunc) + expectedMisbehaviorReport, err := alsp.NewMisbehaviorReport(identity.NodeID, alsp.SenderEjected) + require.NoError(s.T(), err) + misbehaviorReportConsumer := mocknetwork.NewMisbehaviorReportConsumer(s.T()) + misbehaviorReportConsumer.On("ReportMisbehaviorOnChannel", channels.SyncCommittee, expectedMisbehaviorReport).Twice() + violationsConsumer := slashing.NewSlashingViolationsConsumer(s.log, metrics.NewNoopCollector(), misbehaviorReportConsumer) + authorizedSenderValidator := NewAuthorizedSenderValidator(s.log, violationsConsumer, getIdentityFunc) msgType, err := authorizedSenderValidator.Validate(pid, []byte{codec.CodeSyncRequest.Uint8()}, channels.SyncCommittee, message.ProtocolTypeUnicast) require.ErrorIs(s.T(), err, ErrSenderEjected) @@ -263,7 +319,12 @@ func (s *TestAuthorizedSenderValidatorSuite) TestValidatorCallback_ValidationFai pid, err := unittest.PeerIDFromFlowID(identity) require.NoError(s.T(), err) - authorizedSenderValidator := NewAuthorizedSenderValidator(s.log, s.slashingViolationsConsumer, getIdentityFunc) + expectedMisbehaviorReport, err := alsp.NewMisbehaviorReport(identity.NodeID, alsp.UnknownMsgType) + require.NoError(s.T(), err) + misbehaviorReportConsumer := mocknetwork.NewMisbehaviorReportConsumer(s.T()) + misbehaviorReportConsumer.On("ReportMisbehaviorOnChannel", channels.ConsensusCommittee, expectedMisbehaviorReport).Twice() + violationsConsumer := slashing.NewSlashingViolationsConsumer(s.log, metrics.NewNoopCollector(), misbehaviorReportConsumer) + authorizedSenderValidator := NewAuthorizedSenderValidator(s.log, violationsConsumer, getIdentityFunc) validatePubsub := authorizedSenderValidator.PubSubMessageValidator(channels.ConsensusCommittee) // unknown message types are rejected @@ -271,7 +332,7 @@ func (s *TestAuthorizedSenderValidatorSuite) TestValidatorCallback_ValidationFai require.True(s.T(), codec.IsErrUnknownMsgCode(err)) require.Equal(s.T(), "", msgType) - payload, err := s.codec.Encode(&messages.BlockProposal{}) + payload, err := s.codec.Encode(&messages.Proposal{}) require.NoError(s.T(), err) payload[0] = byte('x') netMsg := &message.Message{ @@ -291,7 +352,11 @@ func (s *TestAuthorizedSenderValidatorSuite) TestValidatorCallback_ValidationFai pid, err := unittest.PeerIDFromFlowID(identity) require.NoError(s.T(), err) - authorizedSenderValidator := NewAuthorizedSenderValidator(s.log, s.slashingViolationsConsumer, getIdentityFunc) + misbehaviorReportConsumer := mocknetwork.NewMisbehaviorReportConsumer(s.T()) + // we cannot penalize a peer if identity is not known, in this case we don't expect any misbehavior reports to be reported + defer misbehaviorReportConsumer.AssertNotCalled(s.T(), "ReportMisbehaviorOnChannel", mock.AnythingOfType("channels.Channel"), mock.AnythingOfType("*alsp.MisbehaviorReport")) + violationsConsumer := slashing.NewSlashingViolationsConsumer(s.log, metrics.NewNoopCollector(), misbehaviorReportConsumer) + authorizedSenderValidator := NewAuthorizedSenderValidator(s.log, violationsConsumer, getIdentityFunc) msgType, err := authorizedSenderValidator.Validate(pid, []byte{codec.CodeSyncRequest.Uint8()}, channels.SyncCommittee, message.ProtocolTypeUnicast) require.ErrorIs(s.T(), err, ErrIdentityUnverified) @@ -314,17 +379,21 @@ func (s *TestAuthorizedSenderValidatorSuite) TestValidatorCallback_UnauthorizedP for _, c := range s.authorizedUnicastOnChannel { str := fmt.Sprintf("message type (%s) is not authorized to be sent via libp2p publish", c.MessageStr) s.Run(str, func() { + // skip test message check + if c.MessageStr == "*message.TestMessage" { + return + } pid, err := unittest.PeerIDFromFlowID(c.Identity) require.NoError(s.T(), err) - - authorizedSenderValidator := NewAuthorizedSenderValidator(s.log, s.slashingViolationsConsumer, c.GetIdentity) + expectedMisbehaviorReport, err := alsp.NewMisbehaviorReport(c.Identity.NodeID, alsp.UnauthorizedPublishOnChannel) + require.NoError(s.T(), err) + misbehaviorReportConsumer := mocknetwork.NewMisbehaviorReportConsumer(s.T()) + misbehaviorReportConsumer.On("ReportMisbehaviorOnChannel", c.Channel, expectedMisbehaviorReport).Once() + violationsConsumer := slashing.NewSlashingViolationsConsumer(s.log, metrics.NewNoopCollector(), misbehaviorReportConsumer) + authorizedSenderValidator := NewAuthorizedSenderValidator(s.log, violationsConsumer, c.GetIdentity) msgType, err := authorizedSenderValidator.Validate(pid, []byte{c.MessageCode.Uint8()}, c.Channel, message.ProtocolTypePubSub) - if c.MessageStr == "*message.TestMessage" { - require.NoError(s.T(), err) - } else { - require.ErrorIs(s.T(), err, message.ErrUnauthorizedPublishOnChannel) - require.Equal(s.T(), c.MessageStr, msgType) - } + require.ErrorIs(s.T(), err, message.ErrUnauthorizedPublishOnChannel) + require.Equal(s.T(), c.MessageStr, msgType) }) } } diff --git a/network/validator/pubsub/topic_validator.go b/network/validator/pubsub/topic_validator.go index 954c5f6b401..c55689468d8 100644 --- a/network/validator/pubsub/topic_validator.go +++ b/network/validator/pubsub/topic_validator.go @@ -12,6 +12,7 @@ import ( "github.com/onflow/flow-go/network/channels" "github.com/onflow/flow-go/network/message" "github.com/onflow/flow-go/network/p2p" + p2plogging "github.com/onflow/flow-go/network/p2p/logging" "github.com/onflow/flow-go/network/validator" _ "github.com/onflow/flow-go/utils/binstat" "github.com/onflow/flow-go/utils/logging" @@ -72,9 +73,9 @@ func TopicValidator(log zerolog.Logger, peerFilter func(peer.ID) error, validato return func(ctx context.Context, receivedFrom peer.ID, rawMsg *pubsub.Message) p2p.ValidationResult { var msg message.Message // convert the incoming raw message payload to Message type - //bs := binstat.EnterTimeVal(binstat.BinNet+":wire>1protobuf2message", int64(len(rawMsg.Data))) + // bs := binstat.EnterTimeVal(binstat.BinNet+":wire>1protobuf2message", int64(len(rawMsg.Data))) err := msg.Unmarshal(rawMsg.Data) - //binstat.Leave(bs) + // binstat.Leave(bs) if err != nil { return p2p.ValidationReject } @@ -85,7 +86,7 @@ func TopicValidator(log zerolog.Logger, peerFilter func(peer.ID) error, validato } lg := log.With(). - Str("peer_id", from.String()). + Str("peer_id", p2plogging.PeerId(from)). Str("topic", rawMsg.GetTopic()). Int("raw_msg_size", len(rawMsg.Data)). Int("msg_size", msg.Size()). diff --git a/network/validator/target_validator.go b/network/validator/target_validator.go index 5a9b1ab73f9..d02901b166e 100644 --- a/network/validator/target_validator.go +++ b/network/validator/target_validator.go @@ -35,7 +35,8 @@ func (tv *TargetValidator) Validate(msg network.IncomingMessageScope) bool { } } tv.log.Debug(). - Hex("target", logging.ID(tv.target)). + Hex("message_target_id", logging.ID(tv.target)). + Hex("local_node_id", logging.ID(tv.target)). Hex("event_id", msg.EventID()). Msg("message not intended for target") return false diff --git a/network/violations_consumer.go b/network/violations_consumer.go new file mode 100644 index 00000000000..6c3de412c77 --- /dev/null +++ b/network/violations_consumer.go @@ -0,0 +1,44 @@ +package network + +import ( + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/network/channels" + "github.com/onflow/flow-go/network/message" +) + +// ViolationsConsumer logs reported slashing violation errors and reports those violations as misbehavior's to the ALSP +// misbehavior report manager. Any errors encountered while reporting the misbehavior are considered irrecoverable and +// will result in a fatal level log. +type ViolationsConsumer interface { + // OnUnAuthorizedSenderError logs an error for unauthorized sender error. + OnUnAuthorizedSenderError(violation *Violation) + + // OnUnknownMsgTypeError logs an error for unknown message type error. + OnUnknownMsgTypeError(violation *Violation) + + // OnInvalidMsgError logs an error for messages that contained payloads that could not + // be unmarshalled into the message type denoted by message code byte. + OnInvalidMsgError(violation *Violation) + + // OnSenderEjectedError logs an error for sender ejected error. + OnSenderEjectedError(violation *Violation) + + // OnUnauthorizedUnicastOnChannel logs an error for messages unauthorized to be sent via unicast. + OnUnauthorizedUnicastOnChannel(violation *Violation) + + // OnUnauthorizedPublishOnChannel logs an error for messages unauthorized to be sent via pubsub. + OnUnauthorizedPublishOnChannel(violation *Violation) + + // OnUnexpectedError logs an error for unknown errors. + OnUnexpectedError(violation *Violation) +} + +type Violation struct { + Identity *flow.Identity + PeerID string + OriginID flow.Identifier + MsgType string + Channel channels.Channel + Protocol message.ProtocolType + Err error +} diff --git a/scripts/update-cadence.sh b/scripts/update-cadence.sh new file mode 100755 index 00000000000..509f29ba582 --- /dev/null +++ b/scripts/update-cadence.sh @@ -0,0 +1,12 @@ +#!/bin/sh +# +# This script updates all cadence dependencies to a new version. +# Specify the desired version as the only argument when running the script: +# ./scripts/update-cadence.sh v1.2.3 + +go get github.com/onflow/cadence@$1 +cd integration +go get github.com/onflow/cadence@$1 +cd ../insecure/ +go get github.com/onflow/cadence@$1 +cd .. diff --git a/scripts/update-core-contracts.sh b/scripts/update-core-contracts.sh index eb11bf433ab..a2a294d0270 100755 --- a/scripts/update-core-contracts.sh +++ b/scripts/update-core-contracts.sh @@ -9,4 +9,7 @@ go get github.com/onflow/flow-core-contracts/lib/go/templates@$1 cd integration go get github.com/onflow/flow-core-contracts/lib/go/contracts@$1 go get github.com/onflow/flow-core-contracts/lib/go/templates@$1 +cd ../insecure/ +go get github.com/onflow/flow-core-contracts/lib/go/contracts@$1 +go get github.com/onflow/flow-core-contracts/lib/go/templates@$1 cd .. diff --git a/state/cluster/badger/mutator.go b/state/cluster/badger/mutator.go index f4797ee3034..13f27738f9f 100644 --- a/state/cluster/badger/mutator.go +++ b/state/cluster/badger/mutator.go @@ -6,7 +6,7 @@ import ( "fmt" "math" - "github.com/dgraph-io/badger/v2" + "github.com/jordanschalm/lockctx" "github.com/onflow/flow-go/model/cluster" "github.com/onflow/flow-go/model/flow" @@ -17,25 +17,26 @@ import ( clusterstate "github.com/onflow/flow-go/state/cluster" "github.com/onflow/flow-go/state/fork" "github.com/onflow/flow-go/storage" - "github.com/onflow/flow-go/storage/badger/operation" - "github.com/onflow/flow-go/storage/badger/procedure" + "github.com/onflow/flow-go/storage/operation" ) type MutableState struct { *State - tracer module.Tracer - headers storage.Headers - payloads storage.ClusterPayloads + lockManager lockctx.Manager + tracer module.Tracer + headers storage.Headers + payloads storage.ClusterPayloads } var _ clusterstate.MutableState = (*MutableState)(nil) -func NewMutableState(state *State, tracer module.Tracer, headers storage.Headers, payloads storage.ClusterPayloads) (*MutableState, error) { +func NewMutableState(state *State, lockManager lockctx.Manager, tracer module.Tracer, headers storage.Headers, payloads storage.ClusterPayloads) (*MutableState, error) { mutableState := &MutableState{ - State: state, - tracer: tracer, - headers: headers, - payloads: payloads, + State: state, + lockManager: lockManager, + tracer: tracer, + headers: headers, + payloads: payloads, } return mutableState, nil } @@ -57,36 +58,32 @@ func (m *MutableState) getExtendCtx(candidate *cluster.Block) (extendContext, er var ctx extendContext ctx.candidate = candidate - err := m.State.db.View(func(tx *badger.Txn) error { - // get the latest finalized cluster block and latest finalized consensus height - ctx.finalizedClusterBlock = new(flow.Header) - err := procedure.RetrieveLatestFinalizedClusterHeader(candidate.Header.ChainID, ctx.finalizedClusterBlock)(tx) - if err != nil { - return fmt.Errorf("could not retrieve finalized cluster head: %w", err) - } - err = operation.RetrieveFinalizedHeight(&ctx.finalizedConsensusHeight)(tx) - if err != nil { - return fmt.Errorf("could not retrieve finalized height on consensus chain: %w", err) - } + r := m.State.db.Reader() + // get the latest finalized cluster block and latest finalized consensus height + ctx.finalizedClusterBlock = new(flow.Header) + err := operation.RetrieveLatestFinalizedClusterHeader(r, candidate.ChainID, ctx.finalizedClusterBlock) + if err != nil { + return extendContext{}, fmt.Errorf("could not retrieve finalized cluster head: %w", err) + } + err = operation.RetrieveFinalizedHeight(r, &ctx.finalizedConsensusHeight) + if err != nil { + return extendContext{}, fmt.Errorf("could not retrieve finalized height on consensus chain: %w", err) + } - err = operation.RetrieveEpochFirstHeight(m.State.epoch, &ctx.epochFirstHeight)(tx) - if err != nil { - return fmt.Errorf("could not get operating epoch first height: %w", err) - } - err = operation.RetrieveEpochLastHeight(m.State.epoch, &ctx.epochLastHeight)(tx) - if err != nil { - if errors.Is(err, storage.ErrNotFound) { - ctx.epochHasEnded = false - return nil - } - return fmt.Errorf("unexpected failure to retrieve final height of operating epoch: %w", err) - } - ctx.epochHasEnded = true - return nil - }) + err = operation.RetrieveEpochFirstHeight(r, m.State.epoch, &ctx.epochFirstHeight) if err != nil { - return extendContext{}, fmt.Errorf("could not read required state information for Extend checks: %w", err) + return extendContext{}, fmt.Errorf("could not get operating epoch first height: %w", err) } + err = operation.RetrieveEpochLastHeight(r, m.State.epoch, &ctx.epochLastHeight) + if err != nil { + if errors.Is(err, storage.ErrNotFound) { + ctx.epochHasEnded = false + return ctx, nil + } + return extendContext{}, fmt.Errorf("unexpected failure to retrieve final height of operating epoch: %w", err) + } + ctx.epochHasEnded = true + return ctx, nil } @@ -98,19 +95,20 @@ func (m *MutableState) getExtendCtx(candidate *cluster.Block) (extendContext, er // - state.OutdatedExtensionError if the candidate block is outdated (e.g. orphaned) // - state.UnverifiableExtensionError if the reference block is _not_ a known finalized block // - state.InvalidExtensionError if the candidate block is invalid -func (m *MutableState) Extend(candidate *cluster.Block) error { +func (m *MutableState) Extend(proposal *cluster.Proposal) error { + candidate := proposal.Block parentSpan, ctx := m.tracer.StartCollectionSpan(context.Background(), candidate.ID(), trace.COLClusterStateMutatorExtend) defer parentSpan.End() span, _ := m.tracer.StartSpanFromContext(ctx, trace.COLClusterStateMutatorExtendCheckHeader) - err := m.checkHeaderValidity(candidate) + err := m.checkHeaderValidity(&candidate) span.End() if err != nil { return fmt.Errorf("error checking header validity: %w", err) } span, _ = m.tracer.StartSpanFromContext(ctx, trace.COLClusterStateMutatorExtendGetExtendCtx) - extendCtx, err := m.getExtendCtx(candidate) + extendCtx, err := m.getExtendCtx(&candidate) span.End() if err != nil { return fmt.Errorf("error gettting extend context data: %w", err) @@ -130,19 +128,29 @@ func (m *MutableState) Extend(candidate *cluster.Block) error { return fmt.Errorf("error checking reference block: %w", err) } + lctx := m.lockManager.NewContext() + err = lctx.AcquireLock(storage.LockInsertOrFinalizeClusterBlock) + if err != nil { + return fmt.Errorf("could not acquire lock for inserting cluster block: %w", err) + } + defer lctx.Release() + span, _ = m.tracer.StartSpanFromContext(ctx, trace.COLClusterStateMutatorExtendCheckTransactionsValid) - err = m.checkPayloadTransactions(extendCtx) + err = m.checkPayloadTransactions(lctx, extendCtx) span.End() if err != nil { return fmt.Errorf("error checking payload transactions: %w", err) } span, _ = m.tracer.StartSpanFromContext(ctx, trace.COLClusterStateMutatorExtendDBInsert) - err = operation.RetryOnConflict(m.State.db.Update, procedure.InsertClusterBlock(candidate)) + err = m.State.db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return operation.InsertClusterBlock(lctx, rw, proposal) + }) span.End() if err != nil { return fmt.Errorf("could not insert cluster block: %w", err) } + return nil } @@ -151,29 +159,27 @@ func (m *MutableState) Extend(candidate *cluster.Block) error { // Expected error returns: // - state.InvalidExtensionError if the candidate header is invalid func (m *MutableState) checkHeaderValidity(candidate *cluster.Block) error { - header := candidate.Header - // check chain ID - if header.ChainID != m.State.clusterID { - return state.NewInvalidExtensionErrorf("new block chain ID (%s) does not match configured (%s)", header.ChainID, m.State.clusterID) + if candidate.ChainID != m.State.clusterID { + return state.NewInvalidExtensionErrorf("new block chain ID (%s) does not match configured (%s)", candidate.ChainID, m.State.clusterID) } // get the header of the parent of the new block - parent, err := m.headers.ByBlockID(header.ParentID) + parent, err := m.headers.ByBlockID(candidate.ParentID) if err != nil { return irrecoverable.NewExceptionf("could not retrieve latest finalized header: %w", err) } // extending block must have correct parent view - if header.ParentView != parent.View { + if candidate.ParentView != parent.View { return state.NewInvalidExtensionErrorf("candidate build with inconsistent parent view (candidate: %d, parent %d)", - header.ParentView, parent.View) + candidate.ParentView, parent.View) } // the extending block must increase height by 1 from parent - if header.Height != parent.Height+1 { + if candidate.Height != parent.Height+1 { return state.NewInvalidExtensionErrorf("extending block height (%d) must be parent height + 1 (%d)", - header.Height, parent.Height) + candidate.Height, parent.Height) } return nil } @@ -181,19 +187,18 @@ func (m *MutableState) checkHeaderValidity(candidate *cluster.Block) error { // checkConnectsToFinalizedState validates that the candidate block connects to // the latest finalized state (ie. is not extending an orphaned fork). // Expected error returns: -// - state.UnverifiableExtensionError if the candidate extends an orphaned fork +// - state.OutdatedExtensionError if the candidate extends an orphaned fork func (m *MutableState) checkConnectsToFinalizedState(ctx extendContext) error { - header := ctx.candidate.Header + parentID := ctx.candidate.ParentID finalizedID := ctx.finalizedClusterBlock.ID() finalizedHeight := ctx.finalizedClusterBlock.Height // start with the extending block's parent - parentID := header.ParentID for parentID != finalizedID { // get the parent of current block ancestor, err := m.headers.ByBlockID(parentID) if err != nil { - return irrecoverable.NewExceptionf("could not get parent which must be known (%x): %w", header.ParentID, err) + return irrecoverable.NewExceptionf("could not get parent which must be known (%x): %w", parentID, err) } // if its height is below current boundary, the block does not connect @@ -266,7 +271,7 @@ func (m *MutableState) checkPayloadReferenceBlock(ctx extendContext) error { // Expected error returns: // - state.InvalidExtensionError if the reference block is invalid for use. // - state.UnverifiableExtensionError if the reference block is unknown. -func (m *MutableState) checkPayloadTransactions(ctx extendContext) error { +func (m *MutableState) checkPayloadTransactions(lctx lockctx.Proof, ctx extendContext) error { block := ctx.candidate payload := block.Payload @@ -334,7 +339,11 @@ func (m *MutableState) checkPayloadTransactions(ctx extendContext) error { } // second, check for duplicate transactions in the finalized ancestry - duplicateTxIDs, err = m.checkDupeTransactionsInFinalizedAncestry(txLookup, minRefHeight, maxRefHeight) + // CAUTION: Finalization might progress while we are running this logic. However, finalization is not guaranteed to + // follow the same fork as the one we are extending here. Hence, we might apply the transaction de-duplication logic + // against blocks that do not belong to our fork. If we erroneously find a duplicated transaction, based on a block + // that is not part of our fork, we would be raising an invalid slashing challenge, which would get this node slashed. + duplicateTxIDs, err = m.checkDupeTransactionsInFinalizedAncestry(lctx, txLookup, minRefHeight, maxRefHeight) if err != nil { return fmt.Errorf("could not check for duplicate txs in finalized ancestry: %w", err) } @@ -348,9 +357,8 @@ func (m *MutableState) checkPayloadTransactions(ctx extendContext) error { // checkDupeTransactionsInUnfinalizedAncestry checks for duplicate transactions in the un-finalized // ancestry of the given block, and returns a list of all duplicates if there are any. func (m *MutableState) checkDupeTransactionsInUnfinalizedAncestry(block *cluster.Block, includedTransactions map[flow.Identifier]struct{}, finalHeight uint64) ([]flow.Identifier, error) { - var duplicateTxIDs []flow.Identifier - err := fork.TraverseBackward(m.headers, block.Header.ParentID, func(ancestor *flow.Header) error { + err := fork.TraverseBackward(m.headers, block.ParentID, func(ancestor *flow.Header) error { payload, err := m.payloads.ByBlockID(ancestor.ID()) if err != nil { return fmt.Errorf("could not retrieve ancestor payload: %w", err) @@ -371,7 +379,7 @@ func (m *MutableState) checkDupeTransactionsInUnfinalizedAncestry(block *cluster // checkDupeTransactionsInFinalizedAncestry checks for duplicate transactions in the finalized // ancestry, and returns a list of all duplicates if there are any. -func (m *MutableState) checkDupeTransactionsInFinalizedAncestry(includedTransactions map[flow.Identifier]struct{}, minRefHeight, maxRefHeight uint64) ([]flow.Identifier, error) { +func (m *MutableState) checkDupeTransactionsInFinalizedAncestry(lctx lockctx.Proof, includedTransactions map[flow.Identifier]struct{}, minRefHeight, maxRefHeight uint64) ([]flow.Identifier, error) { var duplicatedTxIDs []flow.Identifier // Let E be the global transaction expiry constant, measured in blocks. For each @@ -385,8 +393,8 @@ func (m *MutableState) checkDupeTransactionsInFinalizedAncestry(includedTransact // Boundary conditions: // 1. C's reference block height is equal to the lowest reference block height of // all its constituent transactions. Hence, for collection C to potentially contain T, it must satisfy c <= t. - // 2. For T to be eligible for inclusion in collection C, _none_ of the transactions within C are allowed - // to be expired w.r.t. C's reference block. Hence, for collection C to potentially contain T, it must satisfy t < c + E. + // 2. For T to be eligible for inclusion in collection C, _none_ of the transactions within C are allowed to be + // expired w.r.t. C's reference block. Hence, for collection C to potentially contain T, it must satisfy t < c + E. // // Therefore, for collection C to potentially contain transaction T, it must satisfy t - E < c <= t. // In other words, we only need to inspect collections with reference block height c ∈ (t-E, t]. @@ -400,7 +408,7 @@ func (m *MutableState) checkDupeTransactionsInFinalizedAncestry(includedTransact start = 0 // overflow check } end := maxRefHeight - err := m.db.View(operation.LookupClusterBlocksByReferenceHeightRange(start, end, &clusterBlockIDs)) + err := operation.LookupClusterBlocksByReferenceHeightRange(lctx, m.db.Reader(), start, end, &clusterBlockIDs) if err != nil { return nil, fmt.Errorf("could not lookup finalized cluster blocks by reference height range [%d,%d]: %w", start, end, err) } diff --git a/state/cluster/badger/mutator_test.go b/state/cluster/badger/mutator_test.go index 280db39a055..5c4ca83e0a9 100644 --- a/state/cluster/badger/mutator_test.go +++ b/state/cluster/badger/mutator_test.go @@ -7,9 +7,8 @@ import ( "math/rand" "os" "testing" - "time" - "github.com/dgraph-io/badger/v2" + "github.com/jordanschalm/lockctx" "github.com/rs/zerolog" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -25,26 +24,30 @@ import ( pbadger "github.com/onflow/flow-go/state/protocol/badger" "github.com/onflow/flow-go/state/protocol/events" "github.com/onflow/flow-go/state/protocol/inmem" + "github.com/onflow/flow-go/state/protocol/protocol_state/kvstore" + protocol_state "github.com/onflow/flow-go/state/protocol/protocol_state/state" protocolutil "github.com/onflow/flow-go/state/protocol/util" - storage "github.com/onflow/flow-go/storage/badger" - "github.com/onflow/flow-go/storage/badger/operation" - "github.com/onflow/flow-go/storage/badger/procedure" - "github.com/onflow/flow-go/storage/util" + "github.com/onflow/flow-go/storage" + "github.com/onflow/flow-go/storage/operation" + "github.com/onflow/flow-go/storage/operation/pebbleimpl" + "github.com/onflow/flow-go/storage/store" "github.com/onflow/flow-go/utils/unittest" ) type MutatorSuite struct { suite.Suite - db *badger.DB - dbdir string + db storage.DB + dbdir string + lockManager lockctx.Manager genesis *model.Block chainID flow.ChainID epochCounter uint64 // protocol state for reference blocks for transactions - protoState protocol.FollowerState - protoGenesis *flow.Header + protoState protocol.FollowerState + mutableProtocolState protocol.MutableProtocolState + protoGenesis *flow.Block state cluster.MutableState } @@ -53,55 +56,86 @@ type MutatorSuite struct { func (suite *MutatorSuite) SetupTest() { var err error - // seed the RNG - rand.Seed(time.Now().UnixNano()) - - suite.genesis = model.Genesis() - suite.chainID = suite.genesis.Header.ChainID + suite.genesis, err = unittest.ClusterBlock.Genesis() + require.NoError(suite.T(), err) + suite.chainID = suite.genesis.ChainID suite.dbdir = unittest.TempDir(suite.T()) - suite.db = unittest.BadgerDB(suite.T(), suite.dbdir) + pdb := unittest.PebbleDB(suite.T(), suite.dbdir) + suite.db = pebbleimpl.ToDB(pdb) + suite.lockManager = storage.NewTestingLockManager() metrics := metrics.NewNoopCollector() tracer := trace.NewNoopTracer() log := zerolog.Nop() - all := util.StorageLayer(suite.T(), suite.db) - colPayloads := storage.NewClusterPayloads(metrics, suite.db) + all := store.InitAll(metrics, suite.db) + colPayloads := store.NewClusterPayloads(metrics, suite.db) // just bootstrap with a genesis block, we'll use this as reference genesis, result, seal := unittest.BootstrapFixture(unittest.IdentityListFixture(5, unittest.WithAllRoles())) + // ensure we don't enter a new epoch for tests that build many blocks - result.ServiceEvents[0].Event.(*flow.EpochSetup).FinalView = genesis.Header.View + 100_000 + result.ServiceEvents[0].Event.(*flow.EpochSetup).FinalView = genesis.View + 100_000 + seal.ResultID = result.ID() qc := unittest.QuorumCertificateFixture(unittest.QCWithRootBlockID(genesis.ID())) - rootSnapshot, err := inmem.SnapshotFromBootstrapState(genesis, result, seal, qc) + safetyParams, err := protocol.DefaultEpochSafetyParams(genesis.ChainID) require.NoError(suite.T(), err) - suite.epochCounter = rootSnapshot.Encodable().Epochs.Current.Counter + minEpochStateEntry, err := inmem.EpochProtocolStateFromServiceEvents( + result.ServiceEvents[0].Event.(*flow.EpochSetup), + result.ServiceEvents[1].Event.(*flow.EpochCommit), + ) + require.NoError(suite.T(), err) + rootProtocolState, err := kvstore.NewDefaultKVStore( + safetyParams.FinalizationSafetyThreshold, + safetyParams.EpochExtensionViewCount, + minEpochStateEntry.ID(), + ) + require.NoError(suite.T(), err) + genesis.Payload.ProtocolStateID = rootProtocolState.ID() + rootSnapshot, err := unittest.SnapshotFromBootstrapState(genesis, result, seal, qc) + require.NoError(suite.T(), err) + suite.epochCounter = rootSnapshot.Encodable().SealingSegment.LatestProtocolStateEntry().EpochEntry.EpochCounter() - suite.protoGenesis = genesis.Header + suite.protoGenesis = genesis state, err := pbadger.Bootstrap( metrics, suite.db, + suite.lockManager, all.Headers, all.Seals, all.Results, all.Blocks, all.QuorumCertificates, - all.Setups, + all.EpochSetups, all.EpochCommits, - all.Statuses, + all.EpochProtocolStateEntries, + all.ProtocolKVStore, all.VersionBeacons, rootSnapshot, ) require.NoError(suite.T(), err) - suite.protoState, err = pbadger.NewFollowerState(log, tracer, events.NewNoop(), state, all.Index, all.Payloads, protocolutil.MockBlockTimer()) + suite.protoState, err = pbadger.NewFollowerState( + log, tracer, events.NewNoop(), state, all.Index, all.Payloads, protocolutil.MockBlockTimer(), + ) require.NoError(suite.T(), err) + suite.mutableProtocolState = protocol_state.NewMutableProtocolState( + log, + all.EpochProtocolStateEntries, + all.ProtocolKVStore, + state.Params(), + all.Headers, + all.Results, + all.EpochSetups, + all.EpochCommits, + ) + clusterStateRoot, err := NewStateRoot(suite.genesis, unittest.QuorumCertificateFixture(), suite.epochCounter) suite.NoError(err) - clusterState, err := Bootstrap(suite.db, clusterStateRoot) + clusterState, err := Bootstrap(suite.db, suite.lockManager, clusterStateRoot) suite.Assert().Nil(err) - suite.state, err = NewMutableState(clusterState, tracer, all.Headers, colPayloads) + suite.state, err = NewMutableState(clusterState, suite.lockManager, tracer, all.Headers, colPayloads) suite.Assert().Nil(err) } @@ -131,35 +165,50 @@ func (suite *MutatorSuite) Payload(transactions ...*flow.TransactionBody) model. minRefID = refBlock.ID() } } - return model.PayloadFromTransactions(minRefID, transactions...) + + // avoid a nil transaction list + if len(transactions) == 0 { + transactions = []*flow.TransactionBody{} + } + + payload, err := model.NewPayload( + model.UntrustedPayload{ + ReferenceBlockID: minRefID, + Collection: flow.Collection{Transactions: transactions}, + }, + ) + suite.Assert().NoError(err) + + return *payload } -// BlockWithParent returns a valid block with the given parent. -func (suite *MutatorSuite) BlockWithParent(parent *model.Block) model.Block { - block := unittest.ClusterBlockWithParent(parent) - payload := suite.Payload() - block.SetPayload(payload) - return block +// ProposalWithParent returns a valid block proposal with the given parent and the given payload. +func (suite *MutatorSuite) ProposalWithParentAndPayload(parent *model.Block, payload model.Payload) model.Proposal { + block := unittest.ClusterBlockFixture( + unittest.ClusterBlock.WithParent(parent), + unittest.ClusterBlock.WithPayload(payload), + ) + return *unittest.ClusterProposalFromBlock(block) } -// Block returns a valid cluster block with genesis as parent. -func (suite *MutatorSuite) Block() model.Block { - return suite.BlockWithParent(suite.genesis) +// Proposal returns a valid cluster block proposal with genesis as parent. +func (suite *MutatorSuite) Proposal() model.Proposal { + return suite.ProposalWithParentAndPayload(suite.genesis, suite.Payload()) } func (suite *MutatorSuite) FinalizeBlock(block model.Block) { - err := suite.db.Update(func(tx *badger.Txn) error { - var refBlock flow.Header - err := operation.RetrieveHeader(block.Payload.ReferenceBlockID, &refBlock)(tx) - if err != nil { - return err - } - err = procedure.FinalizeClusterBlock(block.ID())(tx) - if err != nil { - return err - } - err = operation.IndexClusterBlockByReferenceHeight(refBlock.Height, block.ID())(tx) - return err + var refBlock flow.Header + err := operation.RetrieveHeader(suite.db.Reader(), block.Payload.ReferenceBlockID, &refBlock) + suite.Require().Nil(err) + + err = unittest.WithLock(suite.T(), suite.lockManager, storage.LockInsertOrFinalizeClusterBlock, func(lctx lockctx.Context) error { + return suite.db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + err := operation.FinalizeClusterBlock(lctx, rw, block.ID()) + if err != nil { + return err + } + return operation.IndexClusterBlockByReferenceHeight(lctx, rw.Writer(), refBlock.Height, block.ID()) + }) }) suite.Assert().NoError(err) } @@ -178,21 +227,14 @@ func TestMutator(t *testing.T) { } func (suite *MutatorSuite) TestBootstrap_InvalidHeight() { - suite.genesis.Header.Height = 1 + suite.genesis.Height = 1 _, err := NewStateRoot(suite.genesis, unittest.QuorumCertificateFixture(), suite.epochCounter) suite.Assert().Error(err) } func (suite *MutatorSuite) TestBootstrap_InvalidParentHash() { - suite.genesis.Header.ParentID = unittest.IdentifierFixture() - - _, err := NewStateRoot(suite.genesis, unittest.QuorumCertificateFixture(), suite.epochCounter) - suite.Assert().Error(err) -} - -func (suite *MutatorSuite) TestBootstrap_InvalidPayloadHash() { - suite.genesis.Header.PayloadHash = unittest.IdentifierFixture() + suite.genesis.ParentID = unittest.IdentifierFixture() _, err := NewStateRoot(suite.genesis, unittest.QuorumCertificateFixture(), suite.epochCounter) suite.Assert().Error(err) @@ -200,72 +242,81 @@ func (suite *MutatorSuite) TestBootstrap_InvalidPayloadHash() { func (suite *MutatorSuite) TestBootstrap_InvalidPayload() { // this is invalid because genesis collection should be empty - suite.genesis.Payload = unittest.ClusterPayloadFixture(2) + suite.genesis.Payload = *unittest.ClusterPayloadFixture(2) _, err := NewStateRoot(suite.genesis, unittest.QuorumCertificateFixture(), suite.epochCounter) suite.Assert().Error(err) } +// TestBootstrap_Successful verifies that basic information is successfully persisted during bootstrapping. +// 1. The collector's root block was inserted and indexed. Specifically: +// - The collection contained in the root block can be retrieved by its ID. +// - The transactions contained in the root block's collection can be looked up by the root block's ID. +// - The root block's header can be retrieved by its ID. +// (The payload, i.e. the collection, we already retrieved above.) +// - The root block can be looked up by its height. +// - The latest finalized cluster block height should be the height of the root block. func (suite *MutatorSuite) TestBootstrap_Successful() { - err := suite.db.View(func(tx *badger.Txn) error { - - // should insert collection - var collection flow.LightCollection - err := operation.RetrieveCollection(suite.genesis.Payload.Collection.ID(), &collection)(tx) + err := (func(r storage.Reader) error { + // Bootstrapping should have inserted the collection contained in the root block + collection := new(flow.LightCollection) + err := operation.RetrieveCollection(r, suite.genesis.Payload.Collection.ID(), collection) suite.Assert().Nil(err) suite.Assert().Equal(suite.genesis.Payload.Collection.Light(), collection) - // should index collection - collection = flow.LightCollection{} // reset the collection - err = operation.LookupCollectionPayload(suite.genesis.ID(), &collection.Transactions)(tx) + // Bootstrapping should have indexed the transactions contained in the collector's root block. + var txIDs []flow.Identifier // reset the collection + err = operation.LookupCollectionPayload(r, suite.genesis.ID(), &txIDs) suite.Assert().Nil(err) suite.Assert().Equal(suite.genesis.Payload.Collection.Light(), collection) - // should insert header + // Bootstrapping should have inserted the collector's root block header var header flow.Header - err = operation.RetrieveHeader(suite.genesis.ID(), &header)(tx) + err = operation.RetrieveHeader(r, suite.genesis.ID(), &header) suite.Assert().Nil(err) - suite.Assert().Equal(suite.genesis.Header.ID(), header.ID()) + suite.Assert().Equal(suite.genesis.ToHeader().ID(), header.ID()) - // should insert block height -> ID lookup + // Bootstrapping should have indexed the root block's by the root block's height. var blockID flow.Identifier - err = operation.LookupClusterBlockHeight(suite.genesis.Header.ChainID, suite.genesis.Header.Height, &blockID)(tx) + err = operation.LookupClusterBlockHeight(r, suite.genesis.ChainID, suite.genesis.Height, &blockID) suite.Assert().Nil(err) suite.Assert().Equal(suite.genesis.ID(), blockID) - // should insert boundary - var boundary uint64 - err = operation.RetrieveClusterFinalizedHeight(suite.genesis.Header.ChainID, &boundary)(tx) + // As the latest finalized cluster block height, bootstrapping should have indexed the root block. + var latestFinalizedClusterBlockHeight uint64 + err = operation.RetrieveClusterFinalizedHeight(r, suite.genesis.ChainID, &latestFinalizedClusterBlockHeight) suite.Assert().Nil(err) - suite.Assert().Equal(suite.genesis.Header.Height, boundary) + suite.Assert().Equal(suite.genesis.Height, latestFinalizedClusterBlockHeight) return nil - }) + })(suite.db.Reader()) suite.Assert().Nil(err) } func (suite *MutatorSuite) TestExtend_WithoutBootstrap() { - block := unittest.ClusterBlockWithParent(suite.genesis) - err := suite.state.Extend(&block) + block := unittest.ClusterBlockFixture( + unittest.ClusterBlock.WithParent(suite.genesis), + ) + err := suite.state.Extend(unittest.ClusterProposalFromBlock(block)) suite.Assert().Error(err) } func (suite *MutatorSuite) TestExtend_InvalidChainID() { - block := suite.Block() + proposal := suite.Proposal() // change the chain ID - block.Header.ChainID = flow.ChainID(fmt.Sprintf("%s-invalid", block.Header.ChainID)) + proposal.Block.ChainID = flow.ChainID(fmt.Sprintf("%s-invalid", proposal.Block.ChainID)) - err := suite.state.Extend(&block) + err := suite.state.Extend(&proposal) suite.Assert().Error(err) suite.Assert().True(state.IsInvalidExtensionError(err)) } func (suite *MutatorSuite) TestExtend_InvalidBlockHeight() { - block := suite.Block() + proposal := suite.Proposal() // change the block height - block.Header.Height = block.Header.Height - 1 + proposal.Block.Height = proposal.Block.Height + 1 - err := suite.state.Extend(&block) + err := suite.state.Extend(&proposal) suite.Assert().Error(err) suite.Assert().True(state.IsInvalidExtensionError(err)) } @@ -273,92 +324,100 @@ func (suite *MutatorSuite) TestExtend_InvalidBlockHeight() { // TestExtend_InvalidParentView tests if mutator rejects block with invalid ParentView. ParentView must be consistent // with view of block referred by ParentID. func (suite *MutatorSuite) TestExtend_InvalidParentView() { - block := suite.Block() - // change the block parent view - block.Header.ParentView-- + tx1 := suite.Tx() + tx2 := suite.Tx() + + proposal1 := suite.ProposalWithParentAndPayload(suite.genesis, suite.Payload(&tx1)) + + err := suite.state.Extend(&proposal1) + suite.Assert().Nil(err) + + suite.FinalizeBlock(proposal1.Block) + suite.Assert().Nil(err) - err := suite.state.Extend(&block) + proposal2 := suite.ProposalWithParentAndPayload(&proposal1.Block, suite.Payload(&tx2)) + // change the block ParentView + proposal2.Block.ParentView-- + + err = suite.state.Extend(&proposal2) suite.Assert().Error(err) suite.Assert().True(state.IsInvalidExtensionError(err)) } func (suite *MutatorSuite) TestExtend_DuplicateTxInPayload() { - block := suite.Block() // add the same transaction to a payload twice tx := suite.Tx() - payload := suite.Payload(&tx, &tx) - block.SetPayload(payload) + proposal := suite.ProposalWithParentAndPayload(suite.genesis, suite.Payload(&tx, &tx)) // should fail to extend block with invalid payload - err := suite.state.Extend(&block) + err := suite.state.Extend(&proposal) suite.Assert().Error(err) suite.Assert().True(state.IsInvalidExtensionError(err)) } func (suite *MutatorSuite) TestExtend_OnParentOfFinalized() { // build one block on top of genesis - block1 := suite.Block() - err := suite.state.Extend(&block1) + proposal1 := suite.Proposal() + err := suite.state.Extend(&proposal1) suite.Assert().Nil(err) // finalize the block - suite.FinalizeBlock(block1) + suite.FinalizeBlock(proposal1.Block) // insert another block on top of genesis // since we have already finalized block 1, this is invalid - block2 := suite.Block() + proposal2 := suite.Proposal() // try to extend with the invalid block - err = suite.state.Extend(&block2) + err = suite.state.Extend(&proposal2) suite.Assert().Error(err) suite.Assert().True(state.IsOutdatedExtensionError(err)) } func (suite *MutatorSuite) TestExtend_Success() { - block := suite.Block() - err := suite.state.Extend(&block) + proposal := suite.Proposal() + err := suite.state.Extend(&proposal) suite.Assert().Nil(err) // should be able to retrieve the block + r := suite.db.Reader() var extended model.Block - err = suite.db.View(procedure.RetrieveClusterBlock(block.ID(), &extended)) + err = operation.RetrieveClusterBlock(r, proposal.Block.ID(), &extended) suite.Assert().Nil(err) - suite.Assert().Equal(*block.Payload, *extended.Payload) + suite.Assert().Equal(proposal.Block.Payload, extended.Payload) // the block should be indexed by its parent var childIDs flow.IdentifierList - err = suite.db.View(procedure.LookupBlockChildren(suite.genesis.ID(), &childIDs)) + err = operation.RetrieveBlockChildren(r, suite.genesis.ID(), &childIDs) suite.Assert().Nil(err) suite.Require().Len(childIDs, 1) - suite.Assert().Equal(block.ID(), childIDs[0]) + suite.Assert().Equal(proposal.Block.ID(), childIDs[0]) } func (suite *MutatorSuite) TestExtend_WithEmptyCollection() { - block := suite.Block() // set an empty collection as the payload - block.SetPayload(suite.Payload()) - err := suite.state.Extend(&block) + proposal := suite.Proposal() + err := suite.state.Extend(&proposal) suite.Assert().Nil(err) } // an unknown reference block is unverifiable func (suite *MutatorSuite) TestExtend_WithNonExistentReferenceBlock() { suite.Run("empty collection", func() { - block := suite.Block() - block.Payload.ReferenceBlockID = unittest.IdentifierFixture() - block.SetPayload(*block.Payload) - err := suite.state.Extend(&block) + payload := suite.Payload() + payload.ReferenceBlockID = unittest.IdentifierFixture() + proposal := suite.ProposalWithParentAndPayload(suite.genesis, payload) + err := suite.state.Extend(&proposal) suite.Assert().Error(err) suite.Assert().True(state.IsUnverifiableExtensionError(err)) }) suite.Run("non-empty collection", func() { - block := suite.Block() tx := suite.Tx() payload := suite.Payload(&tx) // set a random reference block ID payload.ReferenceBlockID = unittest.IdentifierFixture() - block.SetPayload(payload) - err := suite.state.Extend(&block) + proposal := suite.ProposalWithParentAndPayload(suite.genesis, payload) + err := suite.state.Extend(&proposal) suite.Assert().Error(err) suite.Assert().True(state.IsUnverifiableExtensionError(err)) }) @@ -370,31 +429,26 @@ func (suite *MutatorSuite) TestExtend_WithExpiredReferenceBlock() { // the collection to be expired parent := suite.protoGenesis for i := 0; i < flow.DefaultTransactionExpiry+1; i++ { - next := unittest.BlockWithParentFixture(parent) - next.Payload.Guarantees = nil - next.SetPayload(*next.Payload) - err := suite.protoState.ExtendCertified(context.Background(), next, unittest.CertifyBlock(next.Header)) + next := unittest.BlockWithParentProtocolState(parent) + err := suite.protoState.ExtendCertified(context.Background(), unittest.NewCertifiedBlock(next)) suite.Require().Nil(err) err = suite.protoState.Finalize(context.Background(), next.ID()) suite.Require().Nil(err) - parent = next.Header + parent = next } - block := suite.Block() // set genesis as reference block - block.SetPayload(model.EmptyPayload(suite.protoGenesis.ID())) - err := suite.state.Extend(&block) + proposal := suite.ProposalWithParentAndPayload(suite.genesis, *model.NewEmptyPayload(suite.protoGenesis.ID())) + err := suite.state.Extend(&proposal) suite.Assert().Nil(err) } func (suite *MutatorSuite) TestExtend_WithReferenceBlockFromClusterChain() { // TODO skipping as this isn't implemented yet unittest.SkipUnless(suite.T(), unittest.TEST_TODO, "skipping as this isn't implemented yet") - - block := suite.Block() // set genesis from cluster chain as reference block - block.SetPayload(model.EmptyPayload(suite.genesis.ID())) - err := suite.state.Extend(&block) + proposal := suite.ProposalWithParentAndPayload(suite.genesis, *model.NewEmptyPayload(suite.genesis.ID())) + err := suite.state.Extend(&proposal) suite.Assert().Error(err) } @@ -402,16 +456,15 @@ func (suite *MutatorSuite) TestExtend_WithReferenceBlockFromClusterChain() { // using a reference block in a different epoch than the cluster's epoch. func (suite *MutatorSuite) TestExtend_WithReferenceBlockFromDifferentEpoch() { // build and complete the current epoch, then use a reference block from next epoch - eb := unittest.NewEpochBuilder(suite.T(), suite.protoState) + eb := unittest.NewEpochBuilder(suite.T(), suite.mutableProtocolState, suite.protoState) eb.BuildEpoch().CompleteEpoch() heights, ok := eb.EpochHeights(1) require.True(suite.T(), ok) nextEpochHeader, err := suite.protoState.AtHeight(heights.FinalHeight() + 1).Head() require.NoError(suite.T(), err) - block := suite.Block() - block.SetPayload(model.EmptyPayload(nextEpochHeader.ID())) - err = suite.state.Extend(&block) + proposal := suite.ProposalWithParentAndPayload(suite.genesis, *model.NewEmptyPayload(nextEpochHeader.ID())) + err = suite.state.Extend(&proposal) suite.Assert().Error(err) suite.Assert().True(state.IsInvalidExtensionError(err)) } @@ -421,15 +474,12 @@ func (suite *MutatorSuite) TestExtend_WithReferenceBlockFromDifferentEpoch() { // should be considered an unverifiable extension. It's possible that this reference // block has been finalized, we just haven't processed it yet. func (suite *MutatorSuite) TestExtend_WithUnfinalizedReferenceBlock() { - unfinalized := unittest.BlockWithParentFixture(suite.protoGenesis) - unfinalized.Payload.Guarantees = nil - unfinalized.SetPayload(*unfinalized.Payload) - err := suite.protoState.ExtendCertified(context.Background(), unfinalized, unittest.CertifyBlock(unfinalized.Header)) + unfinalized := unittest.BlockWithParentProtocolState(suite.protoGenesis) + err := suite.protoState.ExtendCertified(context.Background(), unittest.NewCertifiedBlock(unfinalized)) suite.Require().NoError(err) - block := suite.Block() - block.SetPayload(model.EmptyPayload(unfinalized.ID())) - err = suite.state.Extend(&block) + proposal := suite.ProposalWithParentAndPayload(suite.genesis, *model.NewEmptyPayload(unfinalized.ID())) + err = suite.state.Extend(&proposal) suite.Assert().Error(err) suite.Assert().True(state.IsUnverifiableExtensionError(err)) } @@ -440,23 +490,21 @@ func (suite *MutatorSuite) TestExtend_WithUnfinalizedReferenceBlock() { // to only use finalized blocks as reference, the proposer knowingly generated an invalid func (suite *MutatorSuite) TestExtend_WithOrphanedReferenceBlock() { // create a block extending genesis which is not finalized - orphaned := unittest.BlockWithParentFixture(suite.protoGenesis) - err := suite.protoState.ExtendCertified(context.Background(), orphaned, unittest.CertifyBlock(orphaned.Header)) + orphaned := unittest.BlockWithParentProtocolState(suite.protoGenesis) + err := suite.protoState.ExtendCertified(context.Background(), unittest.NewCertifiedBlock(orphaned)) suite.Require().NoError(err) // create a block extending genesis (conflicting with previous) which is finalized - finalized := unittest.BlockWithParentFixture(suite.protoGenesis) + finalized := unittest.BlockWithParentProtocolState(suite.protoGenesis) finalized.Payload.Guarantees = nil - finalized.SetPayload(*finalized.Payload) - err = suite.protoState.ExtendCertified(context.Background(), finalized, unittest.CertifyBlock(finalized.Header)) + err = suite.protoState.ExtendCertified(context.Background(), unittest.NewCertifiedBlock(finalized)) suite.Require().NoError(err) err = suite.protoState.Finalize(context.Background(), finalized.ID()) suite.Require().NoError(err) // test referencing the orphaned block - block := suite.Block() - block.SetPayload(model.EmptyPayload(orphaned.ID())) - err = suite.state.Extend(&block) + proposal := suite.ProposalWithParentAndPayload(suite.genesis, *model.NewEmptyPayload(orphaned.ID())) + err = suite.state.Extend(&proposal) suite.Assert().Error(err) suite.Assert().True(state.IsInvalidExtensionError(err)) } @@ -465,21 +513,17 @@ func (suite *MutatorSuite) TestExtend_UnfinalizedBlockWithDupeTx() { tx1 := suite.Tx() // create a block extending genesis containing tx1 - block1 := suite.Block() - payload1 := suite.Payload(&tx1) - block1.SetPayload(payload1) + proposal1 := suite.ProposalWithParentAndPayload(suite.genesis, suite.Payload(&tx1)) // should be able to extend block 1 - err := suite.state.Extend(&block1) + err := suite.state.Extend(&proposal1) suite.Assert().Nil(err) // create a block building on block1 ALSO containing tx1 - block2 := suite.BlockWithParent(&block1) - payload2 := suite.Payload(&tx1) - block2.SetPayload(payload2) + proposal2 := suite.ProposalWithParentAndPayload(&proposal1.Block, suite.Payload(&tx1)) // should be unable to extend block 2, as it contains a dupe transaction - err = suite.state.Extend(&block2) + err = suite.state.Extend(&proposal2) suite.Assert().Error(err) suite.Assert().True(state.IsInvalidExtensionError(err)) } @@ -488,25 +532,23 @@ func (suite *MutatorSuite) TestExtend_FinalizedBlockWithDupeTx() { tx1 := suite.Tx() // create a block extending genesis containing tx1 - block1 := suite.Block() - payload1 := suite.Payload(&tx1) - block1.SetPayload(payload1) + proposal1 := suite.ProposalWithParentAndPayload(suite.genesis, suite.Payload(&tx1)) // should be able to extend block 1 - err := suite.state.Extend(&block1) + err := suite.state.Extend(&proposal1) suite.Assert().Nil(err) // should be able to finalize block 1 - suite.FinalizeBlock(block1) + suite.FinalizeBlock(proposal1.Block) suite.Assert().Nil(err) // create a block building on block1 ALSO containing tx1 - block2 := suite.BlockWithParent(&block1) - payload2 := suite.Payload(&tx1) - block2.SetPayload(payload2) - + block2 := unittest.ClusterBlockFixture( + unittest.ClusterBlock.WithParent(&proposal1.Block), + unittest.ClusterBlock.WithPayload(suite.Payload(&tx1)), + ) // should be unable to extend block 2, as it contains a dupe transaction - err = suite.state.Extend(&block2) + err = suite.state.Extend(unittest.ClusterProposalFromBlock(block2)) suite.Assert().Error(err) suite.Assert().True(state.IsInvalidExtensionError(err)) } @@ -515,22 +557,18 @@ func (suite *MutatorSuite) TestExtend_ConflictingForkWithDupeTx() { tx1 := suite.Tx() // create a block extending genesis containing tx1 - block1 := suite.Block() - payload1 := suite.Payload(&tx1) - block1.SetPayload(payload1) + proposal1 := suite.ProposalWithParentAndPayload(suite.genesis, suite.Payload(&tx1)) // should be able to extend block 1 - err := suite.state.Extend(&block1) + err := suite.state.Extend(&proposal1) suite.Assert().Nil(err) // create a block ALSO extending genesis ALSO containing tx1 - block2 := suite.Block() - payload2 := suite.Payload(&tx1) - block2.SetPayload(payload2) + proposal2 := suite.ProposalWithParentAndPayload(suite.genesis, suite.Payload(&tx1)) // should be able to extend block2 // although it conflicts with block1, it is on a different fork - err = suite.state.Extend(&block2) + err = suite.state.Extend(&proposal2) suite.Assert().Nil(err) } @@ -543,7 +581,7 @@ func (suite *MutatorSuite) TestExtend_LargeHistory() { refID := final.ID() // keep track of the head of the chain - head := *suite.genesis + head := suite.genesis // keep track of transactions in orphaned forks (eligible for inclusion in future block) var invalidatedTransactions []*flow.TransactionBody @@ -567,34 +605,35 @@ func (suite *MutatorSuite) TestExtend_LargeHistory() { // by default, build on the head - if we are building a // conflicting fork, build on the parent of the head - parent := head + parent := *head if conflicting { - err = suite.db.View(procedure.RetrieveClusterBlock(parent.Header.ParentID, &parent)) + err = operation.RetrieveClusterBlock(suite.db.Reader(), parent.ParentID, &parent) assert.NoError(t, err) // add the transaction to the invalidated list invalidatedTransactions = append(invalidatedTransactions, &tx) - } else if head.Header.Height < 50 { + } else if head.Height < 50 { oldTransactions = append(oldTransactions, &tx) } // create a block containing the transaction - block := unittest.ClusterBlockWithParent(&head) - payload := suite.Payload(&tx) - block.SetPayload(payload) - err = suite.state.Extend(&block) + block := unittest.ClusterBlockFixture( + unittest.ClusterBlock.WithParent(head), + unittest.ClusterBlock.WithPayload(suite.Payload(&tx)), + ) + err = suite.state.Extend(unittest.ClusterProposalFromBlock(block)) assert.NoError(t, err) // reset the valid head if we aren't building a conflicting fork if !conflicting { head = block - suite.FinalizeBlock(block) + suite.FinalizeBlock(*block) assert.NoError(t, err) } // stop building blocks once we've built a history which exceeds the transaction // expiry length - this tests that deduplication works properly against old blocks // which nevertheless have a potentially conflicting reference block - if head.Header.Height > flow.DefaultTransactionExpiry+100 { + if head.Height > flow.DefaultTransactionExpiry+100 { break } } @@ -602,18 +641,20 @@ func (suite *MutatorSuite) TestExtend_LargeHistory() { t.Log("conflicting: ", len(invalidatedTransactions)) t.Run("should be able to extend with transactions in orphaned forks", func(t *testing.T) { - block := unittest.ClusterBlockWithParent(&head) - payload := suite.Payload(invalidatedTransactions...) - block.SetPayload(payload) - err = suite.state.Extend(&block) + block := unittest.ClusterBlockFixture( + unittest.ClusterBlock.WithParent(head), + unittest.ClusterBlock.WithPayload(suite.Payload(invalidatedTransactions...)), + ) + err = suite.state.Extend(unittest.ClusterProposalFromBlock(block)) assert.NoError(t, err) }) t.Run("should be unable to extend with conflicting transactions within reference height range of extending block", func(t *testing.T) { - block := unittest.ClusterBlockWithParent(&head) - payload := suite.Payload(oldTransactions...) - block.SetPayload(payload) - err = suite.state.Extend(&block) + block := unittest.ClusterBlockFixture( + unittest.ClusterBlock.WithParent(head), + unittest.ClusterBlock.WithPayload(suite.Payload(oldTransactions...)), + ) + err = suite.state.Extend(unittest.ClusterProposalFromBlock(block)) assert.Error(t, err) suite.Assert().True(state.IsInvalidExtensionError(err)) }) diff --git a/state/cluster/badger/params.go b/state/cluster/badger/params.go index ab557f2a7f2..afdbb7ff129 100644 --- a/state/cluster/badger/params.go +++ b/state/cluster/badger/params.go @@ -8,6 +8,6 @@ type Params struct { state *State } -func (p *Params) ChainID() (flow.ChainID, error) { - return p.state.clusterID, nil +func (p *Params) ChainID() flow.ChainID { + return p.state.clusterID } diff --git a/state/cluster/badger/snapshot.go b/state/cluster/badger/snapshot.go index 7823f700163..6dd8dad653e 100644 --- a/state/cluster/badger/snapshot.go +++ b/state/cluster/badger/snapshot.go @@ -1,94 +1,100 @@ package badger import ( + "errors" "fmt" - "github.com/dgraph-io/badger/v2" - "github.com/onflow/flow-go/model/cluster" "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/storage/badger/operation" - "github.com/onflow/flow-go/storage/badger/procedure" + "github.com/onflow/flow-go/module/irrecoverable" + clusterState "github.com/onflow/flow-go/state/cluster" + "github.com/onflow/flow-go/storage" + "github.com/onflow/flow-go/storage/operation" ) -// Snapshot represents a snapshot of chain state anchored at a particular -// reference block. +// Snapshot pertains to a specific fork of the collector cluster consensus. Specifically, +// it references one block denoted as the `Head`. This Snapshot type is for collector +// clusters, so we are referencing a cluster block, aka collection, here. +// +// This implementation must be used for KNOWN reference BLOCKs only. type Snapshot struct { - err error state *State blockID flow.Identifier } -func (s *Snapshot) Collection() (*flow.Collection, error) { - if s.err != nil { - return nil, s.err - } - - var collection flow.Collection - err := s.state.db.View(func(tx *badger.Txn) error { +var _ clusterState.Snapshot = (*Snapshot)(nil) - // get the header for this snapshot - var header flow.Header - err := s.head(&header)(tx) - if err != nil { - return fmt.Errorf("failed to get snapshot header: %w", err) - } - - // get the payload - var payload cluster.Payload - err = procedure.RetrieveClusterPayload(header.ID(), &payload)(tx) - if err != nil { - return fmt.Errorf("failed to get snapshot payload: %w", err) - } - - // set the collection - collection = payload.Collection +// newSnapshot instantiates a new snapshot for the given collection ID. +// CAUTION: This constructor must be called for KNOWN blocks. +// For unknown blocks, please use `invalid.NewSnapshot` or `invalid.NewSnapshotf`. +func newSnapshot(state *State, blockID flow.Identifier) *Snapshot { + return &Snapshot{ + state: state, + blockID: blockID, + } +} - return nil - }) +// Collection returns the collection designated as the reference for this +// snapshot. Technically, this is a portion of the payload of a cluster block. +// +// By contract of the constructor, the blockID must correspond to a known collection in the database. +// No error returns are expected during normal operation. +func (s *Snapshot) Collection() (*flow.Collection, error) { + // get the payload + var payload cluster.Payload + err := operation.RetrieveClusterPayload(s.state.db.Reader(), s.blockID, &payload) + if err != nil { + return nil, fmt.Errorf("failed to get snapshot payload: %w", err) + } - return &collection, err + collection := payload.Collection + return &collection, nil } +// Head returns the header of the collection that designated as the reference for this +// snapshot. +// +// By contract of the constructor, the blockID must correspond to a known collection in the database. +// No error returns are expected during normal operation. func (s *Snapshot) Head() (*flow.Header, error) { - if s.err != nil { - return nil, s.err - } - var head flow.Header - err := s.state.db.View(func(tx *badger.Txn) error { - return s.head(&head)(tx) - }) + err := operation.RetrieveHeader(s.state.db.Reader(), s.blockID, &head) + if err != nil { + // `storage.ErrNotFound` is the only error that the storage layer may return other than exceptions. + // In the context of this call, `s.blockID` should correspond to a known block, so receiving a + // `storage.ErrNotFound` is an exception here. + return nil, irrecoverable.NewExceptionf("could not retrieve header for block (%s): %w", s.blockID, err) + } return &head, err } +// Pending returns the IDs of all collections descending from the snapshot's head collection. +// The result is ordered such that parents are included before their children. While only valid +// descendants will be returned, note that the descendants may not be finalized yet. +// By contract of the constructor, the blockID must correspond to a known collection in the database. +// No error returns are expected during normal operation. func (s *Snapshot) Pending() ([]flow.Identifier, error) { - if s.err != nil { - return nil, s.err - } return s.pending(s.blockID) } -// head finds the header referenced by the snapshot. -func (s *Snapshot) head(head *flow.Header) func(*badger.Txn) error { - return func(tx *badger.Txn) error { - - // get the snapshot header - err := operation.RetrieveHeader(s.blockID, head)(tx) - if err != nil { - return fmt.Errorf("could not retrieve header for block (%s): %w", s.blockID, err) - } - - return nil - } -} - +// pending returns a slice with all blocks descending from the given blockID (children, grandchildren, etc). +// CAUTION: this function behaves only correctly for known blocks, which should always be the case as +// required by the constructor. +// No error returns are expected during normal operation. func (s *Snapshot) pending(blockID flow.Identifier) ([]flow.Identifier, error) { - var pendingIDs flow.IdentifierList - err := s.state.db.View(procedure.LookupBlockChildren(blockID, &pendingIDs)) + err := operation.RetrieveBlockChildren(s.state.db.Reader(), blockID, &pendingIDs) if err != nil { - return nil, fmt.Errorf("could not get pending children: %w", err) + if !errors.Is(err, storage.ErrNotFound) { + return nil, fmt.Errorf("could not get pending block %v: %w", blockID, err) + } + + // The low-level storage returns `storage.ErrNotFound` in two cases: + // 1. the block/collection is unknown + // 2. the block/collection is known but no children have been indexed yet + // By contract of the constructor, the blockID must correspond to a known collection in the database. + // A snapshot with s.err == nil is only created for known blocks. Hence, only case 2 is + // possible here, and we just return an empty list. } for _, pendingID := range pendingIDs { diff --git a/state/cluster/badger/snapshot_test.go b/state/cluster/badger/snapshot_test.go index 7964f3a1f1b..76db04a71f5 100644 --- a/state/cluster/badger/snapshot_test.go +++ b/state/cluster/badger/snapshot_test.go @@ -2,13 +2,12 @@ package badger import ( "math" - "math/rand" "os" "testing" - "time" - "github.com/dgraph-io/badger/v2" + "github.com/jordanschalm/lockctx" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" model "github.com/onflow/flow-go/model/cluster" @@ -18,17 +17,19 @@ import ( "github.com/onflow/flow-go/state/cluster" "github.com/onflow/flow-go/state/protocol" pbadger "github.com/onflow/flow-go/state/protocol/badger" - storage "github.com/onflow/flow-go/storage/badger" - "github.com/onflow/flow-go/storage/badger/operation" - "github.com/onflow/flow-go/storage/badger/procedure" - "github.com/onflow/flow-go/storage/util" + "github.com/onflow/flow-go/storage" + "github.com/onflow/flow-go/storage/operation" + "github.com/onflow/flow-go/storage/operation/pebbleimpl" + "github.com/onflow/flow-go/storage/store" "github.com/onflow/flow-go/utils/unittest" ) type SnapshotSuite struct { suite.Suite - db *badger.DB - dbdir string + + db storage.DB + dbdir string + lockManager lockctx.Manager genesis *model.Block chainID flow.ChainID @@ -43,35 +44,37 @@ type SnapshotSuite struct { func (suite *SnapshotSuite) SetupTest() { var err error - // seed the RNG - rand.Seed(time.Now().UnixNano()) - - suite.genesis = model.Genesis() - suite.chainID = suite.genesis.Header.ChainID + suite.genesis, err = unittest.ClusterBlock.Genesis() + require.NoError(suite.T(), err) + suite.chainID = suite.genesis.ChainID suite.dbdir = unittest.TempDir(suite.T()) - suite.db = unittest.BadgerDB(suite.T(), suite.dbdir) + pdb := unittest.PebbleDB(suite.T(), suite.dbdir) + suite.db = pebbleimpl.ToDB(pdb) + suite.lockManager = storage.NewTestingLockManager() metrics := metrics.NewNoopCollector() tracer := trace.NewNoopTracer() - all := util.StorageLayer(suite.T(), suite.db) - colPayloads := storage.NewClusterPayloads(metrics, suite.db) + all := store.InitAll(metrics, suite.db) + colPayloads := store.NewClusterPayloads(metrics, suite.db) root := unittest.RootSnapshotFixture(unittest.IdentityListFixture(5, unittest.WithAllRoles())) - suite.epochCounter = root.Encodable().Epochs.Current.Counter + suite.epochCounter = root.Encodable().SealingSegment.LatestProtocolStateEntry().EpochEntry.EpochCounter() suite.protoState, err = pbadger.Bootstrap( metrics, suite.db, + suite.lockManager, all.Headers, all.Seals, all.Results, all.Blocks, all.QuorumCertificates, - all.Setups, + all.EpochSetups, all.EpochCommits, - all.Statuses, + all.EpochProtocolStateEntries, + all.ProtocolKVStore, all.VersionBeacons, root, ) @@ -79,9 +82,9 @@ func (suite *SnapshotSuite) SetupTest() { clusterStateRoot, err := NewStateRoot(suite.genesis, unittest.QuorumCertificateFixture(), suite.epochCounter) suite.Require().NoError(err) - clusterState, err := Bootstrap(suite.db, clusterStateRoot) + clusterState, err := Bootstrap(suite.db, suite.lockManager, clusterStateRoot) suite.Require().NoError(err) - suite.state, err = NewMutableState(clusterState, tracer, all.Headers, colPayloads) + suite.state, err = NewMutableState(clusterState, suite.lockManager, tracer, all.Headers, colPayloads) suite.Require().NoError(err) } @@ -111,25 +114,44 @@ func (suite *SnapshotSuite) Payload(transactions ...*flow.TransactionBody) model minRefID = refBlock.ID() } } - return model.PayloadFromTransactions(minRefID, transactions...) + + // avoid a nil transaction list to match empty (but non-nil) list returned by snapshot query + if len(transactions) == 0 { + transactions = []*flow.TransactionBody{} + } + + payload, err := model.NewPayload( + model.UntrustedPayload{ + ReferenceBlockID: minRefID, + Collection: flow.Collection{Transactions: transactions}, + }, + ) + suite.Assert().NoError(err) + + return *payload } -// BlockWithParent returns a valid block with the given parent. -func (suite *SnapshotSuite) BlockWithParent(parent *model.Block) model.Block { - block := unittest.ClusterBlockWithParent(parent) - payload := suite.Payload() - block.SetPayload(payload) - return block +// ProposalWithParentAndPayload returns a valid block proposal with the given parent and payload. +func (suite *SnapshotSuite) ProposalWithParentAndPayload(parent *model.Block, payload model.Payload) model.Proposal { + block := unittest.ClusterBlockFixture( + unittest.ClusterBlock.WithParent(parent), + unittest.ClusterBlock.WithPayload(payload), + ) + return *unittest.ClusterProposalFromBlock(block) } -// Block returns a valid cluster block with genesis as parent. -func (suite *SnapshotSuite) Block() model.Block { - return suite.BlockWithParent(suite.genesis) +// Proposal returns a valid cluster block proposal with genesis as parent. +func (suite *SnapshotSuite) Proposal() model.Proposal { + return suite.ProposalWithParentAndPayload(suite.genesis, suite.Payload()) } -func (suite *SnapshotSuite) InsertBlock(block model.Block) { - err := suite.db.Update(procedure.InsertClusterBlock(&block)) - suite.Assert().Nil(err) +func (suite *SnapshotSuite) InsertBlock(proposal model.Proposal) { + err := unittest.WithLock(suite.T(), suite.lockManager, storage.LockInsertOrFinalizeClusterBlock, func(lctx lockctx.Context) error { + return suite.db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return operation.InsertClusterBlock(lctx, rw, &proposal) + }) + }) + suite.Require().NoError(err) } // InsertSubtree recursively inserts chain state as a subtree of the parent @@ -141,9 +163,9 @@ func (suite *SnapshotSuite) InsertSubtree(parent model.Block, depth, fanout int) } for i := 0; i < fanout; i++ { - block := suite.BlockWithParent(&parent) - suite.InsertBlock(block) - suite.InsertSubtree(block, depth-1, fanout) + proposal := suite.ProposalWithParentAndPayload(&parent, suite.Payload()) + suite.InsertBlock(proposal) + suite.InsertSubtree(proposal.Block, depth-1, fanout) } } @@ -171,65 +193,68 @@ func (suite *SnapshotSuite) TestAtBlockID() { // ensure collection is correct coll, err := snapshot.Collection() - assert.Nil(t, err) + assert.NoError(t, err) assert.Equal(t, &suite.genesis.Payload.Collection, coll) // ensure head is correct head, err := snapshot.Head() - assert.Nil(t, err) - assert.Equal(t, suite.genesis.ID(), head.ID()) + assert.NoError(t, err) + assert.Equal(t, suite.genesis.ToHeader().ID(), head.ID()) } func (suite *SnapshotSuite) TestEmptyCollection() { t := suite.T() // create a block with an empty collection - block := suite.BlockWithParent(suite.genesis) - block.SetPayload(model.EmptyPayload(flow.ZeroID)) - suite.InsertBlock(block) + proposal := suite.ProposalWithParentAndPayload(suite.genesis, *model.NewEmptyPayload(flow.ZeroID)) + suite.InsertBlock(proposal) - snapshot := suite.state.AtBlockID(block.ID()) + snapshot := suite.state.AtBlockID(proposal.Block.ID()) // ensure collection is correct coll, err := snapshot.Collection() - assert.Nil(t, err) - assert.Equal(t, &block.Payload.Collection, coll) + assert.NoError(t, err) + assert.Equal(t, &proposal.Block.Payload.Collection, coll) } func (suite *SnapshotSuite) TestFinalizedBlock() { t := suite.T() // create a new finalized block on genesis (height=1) - finalizedBlock1 := suite.Block() - err := suite.state.Extend(&finalizedBlock1) - assert.Nil(t, err) + finalizedProposal1 := suite.Proposal() + err := suite.state.Extend(&finalizedProposal1) + assert.NoError(t, err) // create an un-finalized block on genesis (height=1) - unFinalizedBlock1 := suite.Block() - err = suite.state.Extend(&unFinalizedBlock1) - assert.Nil(t, err) + unFinalizedProposal1 := suite.Proposal() + err = suite.state.Extend(&unFinalizedProposal1) + assert.NoError(t, err) // create a second un-finalized on top of the finalized block (height=2) - unFinalizedBlock2 := suite.BlockWithParent(&finalizedBlock1) - err = suite.state.Extend(&unFinalizedBlock2) - assert.Nil(t, err) + unFinalizedProposal2 := suite.ProposalWithParentAndPayload(&finalizedProposal1.Block, suite.Payload()) + err = suite.state.Extend(&unFinalizedProposal2) + assert.NoError(t, err) // finalize the block - err = suite.db.Update(procedure.FinalizeClusterBlock(finalizedBlock1.ID())) - assert.Nil(t, err) + err = unittest.WithLock(suite.T(), suite.lockManager, storage.LockInsertOrFinalizeClusterBlock, func(lctx lockctx.Context) error { + return suite.db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return operation.FinalizeClusterBlock(lctx, rw, finalizedProposal1.Block.ID()) + }) + }) + suite.Require().NoError(err) - // get the final snapshot, should map to finalizedBlock1 + // get the final snapshot, should map to finalizedProposal1 snapshot := suite.state.Final() // ensure collection is correct coll, err := snapshot.Collection() - assert.Nil(t, err) - assert.Equal(t, &finalizedBlock1.Payload.Collection, coll) + assert.NoError(t, err) + assert.Equal(t, &finalizedProposal1.Block.Payload.Collection, coll) // ensure head is correct head, err := snapshot.Head() - assert.Nil(t, err) - assert.Equal(t, finalizedBlock1.ID(), head.ID()) + assert.NoError(t, err) + assert.Equal(t, finalizedProposal1.Block.ToHeader().ID(), head.ID()) } // test that no pending blocks are returned when there are none @@ -251,9 +276,9 @@ func (suite *SnapshotSuite) TestPending_WithPendingBlocks() { parent := suite.genesis pendings := make([]flow.Identifier, 0, 10) for i := 0; i < 10; i++ { - next := suite.BlockWithParent(parent) + next := suite.ProposalWithParentAndPayload(parent, suite.Payload()) suite.InsertBlock(next) - pendings = append(pendings, next.ID()) + pendings = append(pendings, next.Block.ID()) } pending, err := suite.state.Final().Pending() @@ -282,21 +307,19 @@ func (suite *SnapshotSuite) TestPending_Grandchildren() { for _, blockID := range pending { var header flow.Header - err := suite.db.View(operation.RetrieveHeader(blockID, &header)) + err := operation.RetrieveHeader(suite.db.Reader(), blockID, &header) suite.Require().Nil(err) // we must have already seen the parent _, seen := parents[header.ParentID] - suite.Assert().True(seen, "pending list contained child (%x) before parent (%x)", header.ID(), header.ParentID) + suite.Assert().True(seen, "pending list contained child (%x) before parent (%x)", blockID, header.ParentID) // mark this block as seen - parents[header.ID()] = struct{}{} + parents[blockID] = struct{}{} } } func (suite *SnapshotSuite) TestParams_ChainID() { - - chainID, err := suite.state.Params().ChainID() - suite.Require().Nil(err) - suite.Assert().Equal(suite.genesis.Header.ChainID, chainID) + chainID := suite.state.Params().ChainID() + suite.Assert().Equal(suite.genesis.ChainID, chainID) } diff --git a/state/cluster/badger/state.go b/state/cluster/badger/state.go index f088328823e..10562e2ae7e 100644 --- a/state/cluster/badger/state.go +++ b/state/cluster/badger/state.go @@ -4,27 +4,45 @@ import ( "errors" "fmt" - "github.com/dgraph-io/badger/v2" + "github.com/jordanschalm/lockctx" "github.com/onflow/flow-go/consensus/hotstuff" + clustermodel "github.com/onflow/flow-go/model/cluster" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/module" + "github.com/onflow/flow-go/state" "github.com/onflow/flow-go/state/cluster" + "github.com/onflow/flow-go/state/cluster/invalid" "github.com/onflow/flow-go/storage" - "github.com/onflow/flow-go/storage/badger/operation" - "github.com/onflow/flow-go/storage/badger/procedure" + "github.com/onflow/flow-go/storage/operation" ) type State struct { - db *badger.DB + db storage.DB clusterID flow.ChainID // the chain ID for the cluster epoch uint64 // the operating epoch for the cluster } +var _ cluster.State = (*State)(nil) + // Bootstrap initializes the persistent cluster state with a genesis block. // The genesis block must have height 0, a parent hash of 32 zero bytes, // and an empty collection as payload. -func Bootstrap(db *badger.DB, stateRoot *StateRoot) (*State, error) { +func Bootstrap(db storage.DB, lockManager lockctx.Manager, stateRoot *StateRoot) (*State, error) { + lctx := lockManager.NewContext() + defer lctx.Release() + err := lctx.AcquireLock(storage.LockInsertOrFinalizeClusterBlock) + if err != nil { + return nil, fmt.Errorf("failed to acquire lock `storage.LockInsertOrFinalizeClusterBlock` for inserting cluster block: %w", err) + } + err = lctx.AcquireLock(storage.LockInsertSafetyData) + if err != nil { + return nil, fmt.Errorf("failed to acquire lock `storage.LockInsertSafetyData` for inserting safety data: %w", err) + } + err = lctx.AcquireLock(storage.LockInsertLivenessData) + if err != nil { + return nil, fmt.Errorf("failed to acquire lock `storage.LockInsertLivenessData` for inserting liveness data: %w", err) + } isBootstrapped, err := IsBootstrapped(db, stateRoot.ClusterID()) if err != nil { return nil, fmt.Errorf("failed to determine whether database contains bootstrapped state: %w", err) @@ -36,42 +54,51 @@ func Bootstrap(db *badger.DB, stateRoot *StateRoot) (*State, error) { genesis := stateRoot.Block() rootQC := stateRoot.QC() + // bootstrap cluster state - err = operation.RetryOnConflict(state.db.Update, func(tx *badger.Txn) error { - chainID := genesis.Header.ChainID - // insert the block - err := procedure.InsertClusterBlock(genesis)(tx) + err = state.db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + chainID := genesis.ChainID + // insert the block - by protocol convention, the genesis block does not have a proposer signature, which must be handled by the implementation + proposal, err := clustermodel.NewRootProposal( + clustermodel.UntrustedProposal{ + Block: *genesis, + ProposerSigData: nil, + }, + ) + if err != nil { + return fmt.Errorf("could not build root cluster proposal: %w", err) + } + err = operation.InsertClusterBlock(lctx, rw, proposal) if err != nil { return fmt.Errorf("could not insert genesis block: %w", err) } // insert block height -> ID mapping - err = operation.IndexClusterBlockHeight(chainID, genesis.Header.Height, genesis.ID())(tx) + err = operation.IndexClusterBlockHeight(lctx, rw, chainID, genesis.Height, genesis.ID()) if err != nil { return fmt.Errorf("failed to map genesis block height to block: %w", err) } // insert boundary - err = operation.InsertClusterFinalizedHeight(chainID, genesis.Header.Height)(tx) - // insert started view for hotstuff + err = operation.BootstrapClusterFinalizedHeight(lctx, rw, chainID, genesis.Height) if err != nil { return fmt.Errorf("could not insert genesis boundary: %w", err) } safetyData := &hotstuff.SafetyData{ - LockedOneChainView: genesis.Header.View, - HighestAcknowledgedView: genesis.Header.View, + LockedOneChainView: genesis.View, + HighestAcknowledgedView: genesis.View, } livenessData := &hotstuff.LivenessData{ - CurrentView: genesis.Header.View + 1, + CurrentView: genesis.View + 1, // starting view for hotstuff NewestQC: rootQC, } // insert safety data - err = operation.InsertSafetyData(chainID, safetyData)(tx) + err = operation.UpsertSafetyData(lctx, rw, chainID, safetyData) if err != nil { return fmt.Errorf("could not insert safety data: %w", err) } // insert liveness data - err = operation.InsertLivenessData(chainID, livenessData)(tx) + err = operation.UpsertLivenessData(lctx, rw, chainID, livenessData) if err != nil { return fmt.Errorf("could not insert liveness data: %w", err) } @@ -85,7 +112,7 @@ func Bootstrap(db *badger.DB, stateRoot *StateRoot) (*State, error) { return state, nil } -func OpenState(db *badger.DB, _ module.Tracer, _ storage.Headers, _ storage.ClusterPayloads, clusterID flow.ChainID, epoch uint64) (*State, error) { +func OpenState(db storage.DB, _ module.Tracer, _ storage.Headers, _ storage.ClusterPayloads, clusterID flow.ChainID, epoch uint64) (*State, error) { isBootstrapped, err := IsBootstrapped(db, clusterID) if err != nil { return nil, fmt.Errorf("failed to determine whether database contains bootstrapped state: %w", err) @@ -97,7 +124,7 @@ func OpenState(db *badger.DB, _ module.Tracer, _ storage.Headers, _ storage.Clus return state, nil } -func newState(db *badger.DB, clusterID flow.ChainID, epoch uint64) *State { +func newState(db storage.DB, clusterID flow.ChainID, epoch uint64) *State { state := &State{ db: db, clusterID: clusterID, @@ -114,47 +141,44 @@ func (s *State) Params() cluster.Params { } func (s *State) Final() cluster.Snapshot { - // get the finalized block ID - var blockID flow.Identifier - err := s.db.View(func(tx *badger.Txn) error { - var boundary uint64 - err := operation.RetrieveClusterFinalizedHeight(s.clusterID, &boundary)(tx) - if err != nil { - return fmt.Errorf("could not retrieve finalized boundary: %w", err) - } - - err = operation.LookupClusterBlockHeight(s.clusterID, boundary, &blockID)(tx) - if err != nil { - return fmt.Errorf("could not retrieve finalized ID: %w", err) - } - - return nil - }) + // get height of latest finalized collection and then the ID of the collection with the corresponding height + r := s.db.Reader() + var latestFinalizedClusterHeight uint64 + err := operation.RetrieveClusterFinalizedHeight(r, s.clusterID, &latestFinalizedClusterHeight) if err != nil { - return &Snapshot{ - err: err, - } + return invalid.NewSnapshotf("could not retrieve finalized boundary: %w", err) } - snapshot := &Snapshot{ - state: s, - blockID: blockID, + var blockID flow.Identifier + err = operation.LookupClusterBlockHeight(r, s.clusterID, latestFinalizedClusterHeight, &blockID) + if err != nil { + return invalid.NewSnapshotf("could not retrieve finalized ID: %w", err) } - return snapshot + + return newSnapshot(s, blockID) } +// AtBlockID returns the snapshot of the persistent cluster at the given +// block ID. It is available for any block that was introduced into the +// cluster state, and can thus represent an ambiguous state that was or +// will never be finalized. +// If the block is unknown, it returns an invalid snapshot, which returns +// state.ErrUnknownSnapshotReference for all methods func (s *State) AtBlockID(blockID flow.Identifier) cluster.Snapshot { - snapshot := &Snapshot{ - state: s, - blockID: blockID, + exists, err := operation.BlockExists(s.db.Reader(), blockID) + if err != nil { + return invalid.NewSnapshotf("could not check existence of reference block: %w", err) + } + if !exists { + return invalid.NewSnapshotf("unknown block %x: %w", blockID, state.ErrUnknownSnapshotReference) } - return snapshot + return newSnapshot(s, blockID) } // IsBootstrapped returns whether the database contains a bootstrapped state. -func IsBootstrapped(db *badger.DB, clusterID flow.ChainID) (bool, error) { +func IsBootstrapped(db storage.DB, clusterID flow.ChainID) (bool, error) { var finalized uint64 - err := db.View(operation.RetrieveClusterFinalizedHeight(clusterID, &finalized)) + err := operation.RetrieveClusterFinalizedHeight(db.Reader(), clusterID, &finalized) if errors.Is(err, storage.ErrNotFound) { return false, nil } diff --git a/state/cluster/badger/state_root.go b/state/cluster/badger/state_root.go index 50f15d0a373..2d9fb206175 100644 --- a/state/cluster/badger/state_root.go +++ b/state/cluster/badger/state_root.go @@ -28,17 +28,12 @@ func NewStateRoot(genesis *cluster.Block, qc *flow.QuorumCertificate, epoch uint func validateClusterGenesis(genesis *cluster.Block) error { // check height of genesis block - if genesis.Header.Height != 0 { - return fmt.Errorf("height of genesis cluster block should be 0 (got %d)", genesis.Header.Height) + if genesis.Height != 0 { + return fmt.Errorf("height of genesis cluster block should be 0 (got %d)", genesis.Height) } // check header parent ID - if genesis.Header.ParentID != flow.ZeroID { - return fmt.Errorf("genesis parent ID must be zero hash (got %x)", genesis.Header.ParentID) - } - - // check payload integrity - if genesis.Header.PayloadHash != genesis.Payload.Hash() { - return fmt.Errorf("computed payload hash does not match header") + if genesis.ParentID != flow.ZeroID { + return fmt.Errorf("genesis parent ID must be zero hash (got %x)", genesis.ParentID) } // check payload @@ -51,7 +46,7 @@ func validateClusterGenesis(genesis *cluster.Block) error { } func (s StateRoot) ClusterID() flow.ChainID { - return s.block.Header.ChainID + return s.block.ChainID } func (s StateRoot) Block() *cluster.Block { diff --git a/state/cluster/badger/state_test.go b/state/cluster/badger/state_test.go new file mode 100644 index 00000000000..63c5ceb6f21 --- /dev/null +++ b/state/cluster/badger/state_test.go @@ -0,0 +1,90 @@ +package badger + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/state" + "github.com/onflow/flow-go/storage" + "github.com/onflow/flow-go/storage/operation/dbtest" + "github.com/onflow/flow-go/utils/unittest" +) + +// TestUnknownSnapshotReference verifies that AtBlockID returns a snapshot that +// returns state.ErrUnknownSnapshotReference for all methods when given an unknown block ID. +func TestUnknownSnapshotReference(t *testing.T) { + dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { + lockManager := storage.NewTestingLockManager() + + // Setup + genesis, err := unittest.ClusterBlock.Genesis() + require.NoError(t, err) + + root := unittest.RootSnapshotFixture(unittest.IdentityListFixture(5, unittest.WithAllRoles())) + epochCounter := root.Encodable().SealingSegment.LatestProtocolStateEntry().EpochEntry.EpochCounter() + + clusterStateRoot, err := NewStateRoot(genesis, unittest.QuorumCertificateFixture(), epochCounter) + require.NoError(t, err) + clusterState, err := Bootstrap(db, lockManager, clusterStateRoot) + require.NoError(t, err) + + // Test + unknownBlockID := unittest.IdentifierFixture() + snapshot := clusterState.AtBlockID(unknownBlockID) + + // Verify that Collection() returns state.ErrUnknownSnapshotReference + _, err = snapshot.Collection() + assert.Error(t, err) + assert.ErrorIs(t, err, state.ErrUnknownSnapshotReference) + + // Verify that Head() returns state.ErrUnknownSnapshotReference + _, err = snapshot.Head() + assert.Error(t, err) + assert.ErrorIs(t, err, state.ErrUnknownSnapshotReference) + + // Verify that Pending() returns state.ErrUnknownSnapshotReference + _, err = snapshot.Pending() + assert.Error(t, err) + assert.ErrorIs(t, err, state.ErrUnknownSnapshotReference) + }) +} + +// TestValidSnapshotReference verifies that AtBlockID returns a working snapshot +// when given a valid block ID (the genesis block). +func TestValidSnapshotReference(t *testing.T) { + dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { + lockManager := storage.NewTestingLockManager() + + // Setup + genesis, err := unittest.ClusterBlock.Genesis() + require.NoError(t, err) + + root := unittest.RootSnapshotFixture(unittest.IdentityListFixture(5, unittest.WithAllRoles())) + epochCounter := root.Encodable().SealingSegment.LatestProtocolStateEntry().EpochEntry.EpochCounter() + + clusterStateRoot, err := NewStateRoot(genesis, unittest.QuorumCertificateFixture(), epochCounter) + require.NoError(t, err) + clusterState, err := Bootstrap(db, lockManager, clusterStateRoot) + require.NoError(t, err) + + // Test with valid block ID (genesis block) + snapshot := clusterState.AtBlockID(genesis.ID()) + + // Verify that Collection() works correctly + collection, err := snapshot.Collection() + assert.NoError(t, err) + assert.Equal(t, &genesis.Payload.Collection, collection) + + // Verify that Head() works correctly + head, err := snapshot.Head() + assert.NoError(t, err) + assert.Equal(t, genesis.ToHeader().ID(), head.ID()) + + // Verify that Pending() works correctly (should return empty list for genesis) + pending, err := snapshot.Pending() + assert.NoError(t, err) + assert.Empty(t, pending) + }) +} diff --git a/state/cluster/invalid/snapshot.go b/state/cluster/invalid/snapshot.go new file mode 100644 index 00000000000..02ccb6503ae --- /dev/null +++ b/state/cluster/invalid/snapshot.go @@ -0,0 +1,47 @@ +package invalid + +import ( + "errors" + "fmt" + + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/state" + "github.com/onflow/flow-go/state/cluster" +) + +// Snapshot represents a snapshot that does not exist or could not be queried. +type Snapshot struct { + err error +} + +// NewSnapshot returns a new invalid snapshot, containing an error describing why the +// snapshot could not be retrieved. The following are typical +// errors resulting in the construction of an invalid Snapshot: +// - state.ErrUnknownSnapshotReference if the reference point for the snapshot +// (height or block ID) does not resolve to a queriable block in the state. +// - generic error in case of unexpected state inconsistencies or bugs +func NewSnapshot(err error) *Snapshot { + if errors.Is(err, state.ErrUnknownSnapshotReference) { + return &Snapshot{err: err} + } + return &Snapshot{fmt.Errorf("critical unexpected error querying snapshot: %w", err)} +} + +var _ cluster.Snapshot = (*Snapshot)(nil) + +// NewSnapshotf is NewSnapshot with ergonomic error formatting. +func NewSnapshotf(msg string, args ...interface{}) *Snapshot { + return NewSnapshot(fmt.Errorf(msg, args...)) +} + +func (u *Snapshot) Collection() (*flow.Collection, error) { + return nil, u.err +} + +func (u *Snapshot) Head() (*flow.Header, error) { + return nil, u.err +} + +func (u *Snapshot) Pending() ([]flow.Identifier, error) { + return nil, u.err +} diff --git a/state/cluster/mock/mutable_state.go b/state/cluster/mock/mutable_state.go index 372fdc7503b..e0994a444b5 100644 --- a/state/cluster/mock/mutable_state.go +++ b/state/cluster/mock/mutable_state.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mock @@ -20,6 +20,10 @@ type MutableState struct { func (_m *MutableState) AtBlockID(blockID flow.Identifier) cluster.Snapshot { ret := _m.Called(blockID) + if len(ret) == 0 { + panic("no return value specified for AtBlockID") + } + var r0 cluster.Snapshot if rf, ok := ret.Get(0).(func(flow.Identifier) cluster.Snapshot); ok { r0 = rf(blockID) @@ -32,13 +36,17 @@ func (_m *MutableState) AtBlockID(blockID flow.Identifier) cluster.Snapshot { return r0 } -// Extend provides a mock function with given fields: candidate -func (_m *MutableState) Extend(candidate *modelcluster.Block) error { - ret := _m.Called(candidate) +// Extend provides a mock function with given fields: proposal +func (_m *MutableState) Extend(proposal *modelcluster.Proposal) error { + ret := _m.Called(proposal) + + if len(ret) == 0 { + panic("no return value specified for Extend") + } var r0 error - if rf, ok := ret.Get(0).(func(*modelcluster.Block) error); ok { - r0 = rf(candidate) + if rf, ok := ret.Get(0).(func(*modelcluster.Proposal) error); ok { + r0 = rf(proposal) } else { r0 = ret.Error(0) } @@ -46,10 +54,14 @@ func (_m *MutableState) Extend(candidate *modelcluster.Block) error { return r0 } -// Final provides a mock function with given fields: +// Final provides a mock function with no fields func (_m *MutableState) Final() cluster.Snapshot { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Final") + } + var r0 cluster.Snapshot if rf, ok := ret.Get(0).(func() cluster.Snapshot); ok { r0 = rf() @@ -62,10 +74,14 @@ func (_m *MutableState) Final() cluster.Snapshot { return r0 } -// Params provides a mock function with given fields: +// Params provides a mock function with no fields func (_m *MutableState) Params() cluster.Params { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Params") + } + var r0 cluster.Params if rf, ok := ret.Get(0).(func() cluster.Params); ok { r0 = rf() @@ -78,13 +94,12 @@ func (_m *MutableState) Params() cluster.Params { return r0 } -type mockConstructorTestingTNewMutableState interface { +// NewMutableState creates a new instance of MutableState. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewMutableState(t interface { mock.TestingT Cleanup(func()) -} - -// NewMutableState creates a new instance of MutableState. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewMutableState(t mockConstructorTestingTNewMutableState) *MutableState { +}) *MutableState { mock := &MutableState{} mock.Mock.Test(t) diff --git a/state/cluster/mock/params.go b/state/cluster/mock/params.go index 7d499e305e0..94be4347973 100644 --- a/state/cluster/mock/params.go +++ b/state/cluster/mock/params.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mock @@ -12,37 +12,30 @@ type Params struct { mock.Mock } -// ChainID provides a mock function with given fields: -func (_m *Params) ChainID() (flow.ChainID, error) { +// ChainID provides a mock function with no fields +func (_m *Params) ChainID() flow.ChainID { ret := _m.Called() - var r0 flow.ChainID - var r1 error - if rf, ok := ret.Get(0).(func() (flow.ChainID, error)); ok { - return rf() + if len(ret) == 0 { + panic("no return value specified for ChainID") } + + var r0 flow.ChainID if rf, ok := ret.Get(0).(func() flow.ChainID); ok { r0 = rf() } else { r0 = ret.Get(0).(flow.ChainID) } - if rf, ok := ret.Get(1).(func() error); ok { - r1 = rf() - } else { - r1 = ret.Error(1) - } - - return r0, r1 + return r0 } -type mockConstructorTestingTNewParams interface { +// NewParams creates a new instance of Params. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewParams(t interface { mock.TestingT Cleanup(func()) -} - -// NewParams creates a new instance of Params. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewParams(t mockConstructorTestingTNewParams) *Params { +}) *Params { mock := &Params{} mock.Mock.Test(t) diff --git a/state/cluster/mock/snapshot.go b/state/cluster/mock/snapshot.go index 21507885fb7..08938de8e26 100644 --- a/state/cluster/mock/snapshot.go +++ b/state/cluster/mock/snapshot.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mock @@ -12,10 +12,14 @@ type Snapshot struct { mock.Mock } -// Collection provides a mock function with given fields: +// Collection provides a mock function with no fields func (_m *Snapshot) Collection() (*flow.Collection, error) { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Collection") + } + var r0 *flow.Collection var r1 error if rf, ok := ret.Get(0).(func() (*flow.Collection, error)); ok { @@ -38,10 +42,14 @@ func (_m *Snapshot) Collection() (*flow.Collection, error) { return r0, r1 } -// Head provides a mock function with given fields: +// Head provides a mock function with no fields func (_m *Snapshot) Head() (*flow.Header, error) { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Head") + } + var r0 *flow.Header var r1 error if rf, ok := ret.Get(0).(func() (*flow.Header, error)); ok { @@ -64,10 +72,14 @@ func (_m *Snapshot) Head() (*flow.Header, error) { return r0, r1 } -// Pending provides a mock function with given fields: +// Pending provides a mock function with no fields func (_m *Snapshot) Pending() ([]flow.Identifier, error) { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Pending") + } + var r0 []flow.Identifier var r1 error if rf, ok := ret.Get(0).(func() ([]flow.Identifier, error)); ok { @@ -90,13 +102,12 @@ func (_m *Snapshot) Pending() ([]flow.Identifier, error) { return r0, r1 } -type mockConstructorTestingTNewSnapshot interface { +// NewSnapshot creates a new instance of Snapshot. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewSnapshot(t interface { mock.TestingT Cleanup(func()) -} - -// NewSnapshot creates a new instance of Snapshot. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewSnapshot(t mockConstructorTestingTNewSnapshot) *Snapshot { +}) *Snapshot { mock := &Snapshot{} mock.Mock.Test(t) diff --git a/state/cluster/mock/state.go b/state/cluster/mock/state.go index 35089d555f6..3f0a103d16c 100644 --- a/state/cluster/mock/state.go +++ b/state/cluster/mock/state.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mock @@ -18,6 +18,10 @@ type State struct { func (_m *State) AtBlockID(blockID flow.Identifier) cluster.Snapshot { ret := _m.Called(blockID) + if len(ret) == 0 { + panic("no return value specified for AtBlockID") + } + var r0 cluster.Snapshot if rf, ok := ret.Get(0).(func(flow.Identifier) cluster.Snapshot); ok { r0 = rf(blockID) @@ -30,10 +34,14 @@ func (_m *State) AtBlockID(blockID flow.Identifier) cluster.Snapshot { return r0 } -// Final provides a mock function with given fields: +// Final provides a mock function with no fields func (_m *State) Final() cluster.Snapshot { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Final") + } + var r0 cluster.Snapshot if rf, ok := ret.Get(0).(func() cluster.Snapshot); ok { r0 = rf() @@ -46,10 +54,14 @@ func (_m *State) Final() cluster.Snapshot { return r0 } -// Params provides a mock function with given fields: +// Params provides a mock function with no fields func (_m *State) Params() cluster.Params { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Params") + } + var r0 cluster.Params if rf, ok := ret.Get(0).(func() cluster.Params); ok { r0 = rf() @@ -62,13 +74,12 @@ func (_m *State) Params() cluster.Params { return r0 } -type mockConstructorTestingTNewState interface { +// NewState creates a new instance of State. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewState(t interface { mock.TestingT Cleanup(func()) -} - -// NewState creates a new instance of State. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewState(t mockConstructorTestingTNewState) *State { +}) *State { mock := &State{} mock.Mock.Test(t) diff --git a/state/cluster/params.go b/state/cluster/params.go index 78581809922..9df9c44840b 100644 --- a/state/cluster/params.go +++ b/state/cluster/params.go @@ -6,7 +6,6 @@ import ( // Params contains constant information about this cluster state. type Params interface { - // ChainID returns the chain ID for this cluster. - ChainID() (flow.ChainID, error) + ChainID() flow.ChainID } diff --git a/state/cluster/root_block.go b/state/cluster/root_block.go index 073c8e84322..c43b9cd8167 100644 --- a/state/cluster/root_block.go +++ b/state/cluster/root_block.go @@ -13,30 +13,41 @@ func CanonicalClusterID(epoch uint64, participants flow.IdentifierList) flow.Cha return flow.ChainID(fmt.Sprintf("cluster-%d-%s", epoch, participants.ID())) } -// these globals are filled by the static initializer -var rootBlockPayload = cluster.EmptyPayload(flow.ZeroID) -var rootBlockPayloadHash = rootBlockPayload.Hash() - // CanonicalRootBlock returns the canonical root block for the given // cluster in the given epoch. It contains an empty collection referencing -func CanonicalRootBlock(epoch uint64, participants flow.IdentityList) *cluster.Block { +func CanonicalRootBlock(epoch uint64, participants flow.IdentitySkeletonList) (*cluster.Block, error) { chainID := CanonicalClusterID(epoch, participants.NodeIDs()) - - header := &flow.Header{ + rootHeaderBody, err := flow.NewRootHeaderBody(flow.UntrustedHeaderBody{ ChainID: chainID, ParentID: flow.ZeroID, Height: 0, - PayloadHash: rootBlockPayloadHash, - Timestamp: flow.GenesisTime, + Timestamp: uint64(flow.GenesisTime.UnixMilli()), View: 0, + ParentView: 0, ParentVoterIndices: nil, ParentVoterSigData: nil, ProposerID: flow.ZeroID, - ProposerSigData: nil, + }) + if err != nil { + return nil, fmt.Errorf("failed to create root header body: %w", err) } - return &cluster.Block{ - Header: header, - Payload: &rootBlockPayload, + rootBlockPayload, err := cluster.NewRootPayload( + cluster.UntrustedPayload(*cluster.NewEmptyPayload(flow.ZeroID)), + ) + if err != nil { + return nil, fmt.Errorf("failed to create root cluster payload: %w", err) } + + block, err := cluster.NewRootBlock( + cluster.UntrustedBlock{ + HeaderBody: *rootHeaderBody, + Payload: *rootBlockPayload, + }, + ) + if err != nil { + return nil, fmt.Errorf("failed to create root cluster block: %w", err) + } + + return block, nil } diff --git a/state/cluster/snapshot.go b/state/cluster/snapshot.go index c69b73844eb..32f5539f44e 100644 --- a/state/cluster/snapshot.go +++ b/state/cluster/snapshot.go @@ -4,23 +4,33 @@ import ( "github.com/onflow/flow-go/model/flow" ) -// Snapshot represents an immutable snapshot at a specific point in the cluster -// state history. +// Snapshot pertains to a specific fork of the collector cluster consensus. Specifically, +// it references one block denoted as the `Head`. This Snapshot type is for collector +// clusters, so we are referencing a cluster block, aka collection, here. type Snapshot interface { - // Collection returns the collection generated in this step of the cluster - // state history. + // Collection returns the collection designated as the reference for this + // snapshot. Technically, this is a portion of the payload of a cluster block. + // + // Expected error returns during normal operations: + // - If the snapshot is for an unknown collection [state.ErrUnknownSnapshotReference] Collection() (*flow.Collection, error) - // Head returns the latest block at the selected point of the cluster state - // history. If the snapshot was selected by block ID, returns the header - // with that block ID. If the snapshot was selected as final, returns the - // latest finalized block. + // Head returns the header of the collection that is designated as the reference for + // this snapshot. Technically, this is the header of a [cluster.Block] + // + // Expected error returns during normal operations: + // - If the snapshot is for an unknown collection [state.ErrUnknownSnapshotReference] Head() (*flow.Header, error) - // Pending returns all children IDs for the snapshot head, which thus were - // potential extensions of the protocol state at this snapshot. The result - // is ordered such that parents are included before their children. These - // are NOT guaranteed to have been validated by HotStuff. + // Pending returns the IDs of *all* collections descending from the snapshot's head collection. + // The result is ordered such that parents are included before their children. While only valid + // descendants will be returned, note that the descendants may not be finalized yet. + // + // CAUTION: the list of descendants is constructed for each call via database reads, + // and may be expensive to compute, especially if the reference collection is older. + // + // Expected error returns during normal operations: + // - If the snapshot is for an unknown collection [state.ErrUnknownSnapshotReference] Pending() ([]flow.Identifier, error) } diff --git a/state/cluster/state.go b/state/cluster/state.go index ea01f7f908d..d613ced8cfa 100644 --- a/state/cluster/state.go +++ b/state/cluster/state.go @@ -22,8 +22,10 @@ type State interface { // AtBlockID returns the snapshot of the persistent cluster at the given // block ID. It is available for any block that was introduced into the - // the cluster state, and can thus represent an ambiguous state that was or + // cluster state, and can thus represent an ambiguous state that was or // will never be finalized. + // If the block is unknown, it returns an invalid snapshot, which returns + // state.ErrUnknownSnapshotReference for all methods AtBlockID(blockID flow.Identifier) Snapshot } @@ -39,5 +41,5 @@ type MutableState interface { // - state.OutdatedExtensionError if the candidate block is outdated (e.g. orphaned) // - state.UnverifiableExtensionError if the reference block is _not_ a known finalized block // - state.InvalidExtensionError if the candidate block is invalid - Extend(candidate *cluster.Block) error + Extend(proposal *cluster.Proposal) error } diff --git a/state/errors.go b/state/errors.go index d6997435df3..495aed5a4fe 100644 --- a/state/errors.go +++ b/state/errors.go @@ -19,10 +19,6 @@ type InvalidExtensionError struct { error } -func NewInvalidExtensionError(msg string) error { - return NewInvalidExtensionErrorf(msg) -} - func NewInvalidExtensionErrorf(msg string, args ...interface{}) error { return InvalidExtensionError{ error: fmt.Errorf(msg, args...), @@ -46,10 +42,6 @@ type OutdatedExtensionError struct { error } -func NewOutdatedExtensionError(msg string) error { - return NewOutdatedExtensionErrorf(msg) -} - func NewOutdatedExtensionErrorf(msg string, args ...interface{}) error { return OutdatedExtensionError{ error: fmt.Errorf(msg, args...), diff --git a/state/fork/Readme.md b/state/fork/Readme.md index 09938f46bf0..3054d2d4bd9 100644 --- a/state/fork/Readme.md +++ b/state/fork/Readme.md @@ -9,7 +9,7 @@ The traversal the walks `head <--> lowestBlock` (in either direction). There are a variety of ways to precisely specify `head` and `lowestBlock`: * At least one block, `head` or `lowestBlock`, must be specified by its ID to unambiguously identify the fork that should be traversed. - * The other block an either be specified by ID or height. + * The other block can either be specified by ID or height. * If both `head` and `lowestBlock` are specified by their ID, they must both be on the same fork. diff --git a/state/fork/terminal.go b/state/fork/terminal.go index d550aa36d33..b9fa8c95742 100644 --- a/state/fork/terminal.go +++ b/state/fork/terminal.go @@ -12,7 +12,7 @@ import ( // - the `head` of the fork that should be traversed // - the `lowestBlock` in that fork, which should be included in the traversal // -// The traversal the walks `head <--> lowestBlock` (in either direction). +// The traversal algorithm walks `head <--> lowestBlock` (in either direction). // // There are a variety of ways to precisely specify `head` and `lowestBlock`: // - At least one block, `head` or `lowestBlock`, must be specified by its ID diff --git a/state/fork/traversal.go b/state/fork/traversal.go index 18fdcdcbc36..e7d0ce61b85 100644 --- a/state/fork/traversal.go +++ b/state/fork/traversal.go @@ -40,9 +40,10 @@ func TraverseBackward(headers storage.Headers, forkHead flow.Identifier, visitor } // TraverseForward traverses the given fork (specified by block ID `forkHead`) -// in the order of increasing height. The `terminal` defines when the traversal +// in the order of increasing height. The `terminal` defines where the traversal // begins. The `visitor` callback is called for each block in this segment. -func TraverseForward(headers storage.Headers, +func TraverseForward( + headers storage.Headers, forkHead flow.Identifier, visitor onVisitBlock, terminal Terminal, @@ -107,9 +108,11 @@ func unsafeTraverse(headers storage.Headers, block *flow.Header, visitor onVisit return block, nil } - block, err = headers.ByBlockID(block.ParentID) + parent, err := headers.ByBlockID(block.ParentID) if err != nil { - return nil, fmt.Errorf("failed to revtrieve block header %x: %w", block.ParentID, err) + return nil, fmt.Errorf("failed to retrieve block header (id=%x height=%d): %w", block.ParentID, block.Height-1, err) } + + block = parent } } diff --git a/state/protocol/badger/mutator.go b/state/protocol/badger/mutator.go index db2284512be..07bc9eaceb2 100644 --- a/state/protocol/badger/mutator.go +++ b/state/protocol/badger/mutator.go @@ -1,5 +1,3 @@ -// (c) 2019 Dapper Labs - ALL RIGHTS RESERVED - package badger import ( @@ -7,7 +5,7 @@ import ( "errors" "fmt" - "github.com/dgraph-io/badger/v2" + "github.com/jordanschalm/lockctx" "github.com/rs/zerolog" "github.com/onflow/flow-go/engine" @@ -18,10 +16,10 @@ import ( "github.com/onflow/flow-go/module/trace" "github.com/onflow/flow-go/state" "github.com/onflow/flow-go/state/protocol" + protocol_state "github.com/onflow/flow-go/state/protocol/protocol_state/state" "github.com/onflow/flow-go/storage" - "github.com/onflow/flow-go/storage/badger/operation" - "github.com/onflow/flow-go/storage/badger/procedure" - "github.com/onflow/flow-go/storage/badger/transaction" + "github.com/onflow/flow-go/storage/deferred" + "github.com/onflow/flow-go/storage/operation" ) // FollowerState implements a lighter version of a mutable protocol state. @@ -37,12 +35,13 @@ import ( type FollowerState struct { *State - index storage.Index - payloads storage.Payloads - tracer module.Tracer - logger zerolog.Logger - consumer protocol.Consumer - blockTimer protocol.BlockTimer + index storage.Index + payloads storage.Payloads + tracer module.Tracer + logger zerolog.Logger + consumer protocol.Consumer + blockTimer protocol.BlockTimer + protocolState protocol.MutableProtocolState } var _ protocol.FollowerState = (*FollowerState)(nil) @@ -51,6 +50,7 @@ var _ protocol.FollowerState = (*FollowerState)(nil) // state with a new block, by checking the _entire_ block payload. type ParticipantState struct { *FollowerState + receiptValidator module.ReceiptValidator sealValidator module.SealValidator } @@ -76,6 +76,16 @@ func NewFollowerState( logger: logger, consumer: consumer, blockTimer: blockTimer, + protocolState: protocol_state.NewMutableProtocolState( + logger, + state.epochProtocolStateEntriesDB, + state.protocolKVStoreSnapshotsDB, + state.params, + state.headers, + state.results, + state.epoch.setups, + state.epoch.commits, + ), } return followerState, nil } @@ -116,18 +126,42 @@ func NewFullConsensusState( // ExtendCertified extends the protocol state of a CONSENSUS FOLLOWER. While it checks // the validity of the header; it does _not_ check the validity of the payload. -// Instead, the consensus follower relies on the consensus participants to -// validate the full payload. Payload validity can be proved by a valid quorum certificate. -// Certifying QC must match candidate block: +// Instead, the consensus follower relies on the consensus participants to validate the +// full payload. Payload validity must be proven by a valid quorum certificate +// (field `CertifiedBlock.CertifyingQC`). The certifying QC must match candidate block: // // candidate.View == certifyingQC.View && candidate.ID() == certifyingQC.BlockID // -// Caution: -// - This function expects that `certifyingQC` has been validated. -// - The parent block must already be stored. +// CAUTION: +// - This function expects that `certified.CertifyingQC` has been validated. (otherwise, the state will be corrupted) +// - The PARENT block must already have been INGESTED. +// - Attempts to extend the state with the _same block concurrently_ are not allowed. +// (will not corrupt the state, but may lead to an exception) +// +// Per convention, the protocol state requires that the candidate's parent has already been ingested. +// Other than that, all valid extensions are accepted. Even if we have enough information to determine that +// a candidate block is already orphaned (e.g. its view is below the latest finalized view), it is important +// to accept it nevertheless to avoid spamming vulnerabilities. If a block is orphaned, consensus rules +// guarantee that there exists only a limited number of descendants which cannot increase anymore. So there +// is only a finite (generally small) amount of work to do accepting orphaned blocks and all their descendants. +// However, if we were to drop orphaned blocks, e.g. block X of the orphaned fork X <- Y <- Z, we might not +// have enough information to reject blocks Y, Z later if we receive them. We would re-request X, then +// determine it is orphaned and drop it, attempt to ingest Y re-request the unknown parent X and repeat +// potentially very often. +// +// To ensure that all ancestors of a candidate block are correct and known to the FollowerState, some external +// ordering and queuing of incoming blocks is generally necessary (responsibility of Compliance Layer). Once a block +// is successfully ingested, repeated extension requests with this block are no-ops. This is convenient for the +// Compliance Layer after a crash, so it doesn't have to worry about which blocks have already been ingested before +// the crash. However, while running it is very easy for the Compliance Layer to avoid concurrent extension requests +// with the same block. Hence, for simplicity, the FollowerState may reject such requests with an exception. // // No errors are expected during normal operations. -func (m *FollowerState) ExtendCertified(ctx context.Context, candidate *flow.Block, certifyingQC *flow.QuorumCertificate) error { +// - In case of concurrent calls with the same `candidate` block, ExtendCertified may return a [storage.ErrAlreadyExists] +// or it may gracefully return. At the moment, ExtendCertified should be considered as NOT CONCURRENCY-SAFE. +func (m *FollowerState) ExtendCertified(ctx context.Context, certified *flow.CertifiedBlock) error { + candidate := &certified.Proposal.Block + certifyingQC := certified.CertifyingQC span, ctx := m.tracer.StartSpanFromContext(ctx, trace.ProtoStateMutatorHeaderExtend) defer span.End() @@ -139,15 +173,16 @@ func (m *FollowerState) ExtendCertified(ctx context.Context, candidate *flow.Blo } // sanity check if certifyingQC actually certifies candidate block - if certifyingQC.View != candidate.Header.View { - return fmt.Errorf("qc doesn't certify candidate block, expect %d view, got %d", candidate.Header.View, certifyingQC.View) + if certifyingQC.View != candidate.View { + return fmt.Errorf("qc doesn't certify candidate block, expect %d view, got %d", candidate.View, certifyingQC.View) } if certifyingQC.BlockID != blockID { return fmt.Errorf("qc doesn't certify candidate block, expect %x blockID, got %x", blockID, certifyingQC.BlockID) } + deferredBlockPersist := deferred.NewDeferredBlockPersist() // check if the block header is a valid extension of parent block - err = m.headerExtend(candidate) + err = m.headerExtend(ctx, certified.Proposal, certifyingQC, deferredBlockPersist) if err != nil { // since we have a QC for this block, it cannot be an invalid extension return fmt.Errorf("unexpected invalid block (id=%x) with certifying qc (id=%x): %s", @@ -155,43 +190,97 @@ func (m *FollowerState) ExtendCertified(ctx context.Context, candidate *flow.Blo } // find the last seal at the parent block - last, err := m.lastSealed(candidate) + latestSeal, err := m.lastSealed(candidate) if err != nil { - return fmt.Errorf("payload seal(s) not compliant with chain state: %w", err) + return fmt.Errorf("failed to determine the lastest sealed block in fork: %w", err) } + deferredBlockPersist.AddNextOperation(func(lctx lockctx.Proof, blockID flow.Identifier, rw storage.ReaderBatchWriter) error { + return operation.IndexLatestSealAtBlock(lctx, rw.Writer(), blockID, latestSeal.ID()) + }) - // insert the block, certifying QC and index the last seal for the block - err = m.insert(ctx, candidate, certifyingQC, last) + // TODO: we might not need the deferred db updates, because the candidate passed into + // the Extend method has already been fully constructed. + // evolve protocol state and verify consistency with commitment included in + err = m.evolveProtocolState(ctx, candidate, deferredBlockPersist) if err != nil { - return fmt.Errorf("failed to insert the block: %w", err) + return fmt.Errorf("evolving protocol state failed: %w", err) } - return nil + lctx := m.lockManager.NewContext() + defer lctx.Release() + err = lctx.AcquireLock(storage.LockInsertBlock) + if err != nil { + return err + } + + // Execute the deferred database operations as one atomic transaction and emit scheduled notifications on success. + // The `candidate` block _must be valid_ (otherwise, the state will be corrupted)! + // + // Note: The following database write is not concurrency-safe at the moment. If a candidate block is + // identified as a duplicate by `checkBlockAlreadyProcessed` in the beginning, `Extend` behaves as a no-op and + // gracefully returns. However, if two concurrent `Extend` calls with the same block pass the initial check + // for duplicates, both will eventually attempt to commit their deferred database operations. As documented + // in `headerExtend`, its deferred operations will abort the write batch with [storage.ErrAlreadyExists]. + // In this edge case of two concurrent calls with the same `candidate` block, `Extend` does not behave as + // an idempotent operation. + return m.db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return deferredBlockPersist.Execute(lctx, blockID, rw) + }) } // Extend extends the protocol state of a CONSENSUS PARTICIPANT. It checks // the validity of the _entire block_ (header and full payload). +// +// CAUTION: +// - per convention, the protocol state requires that the candidate's +// PARENT has already been INGESTED. Otherwise, an exception is returned. +// - Attempts to extend the state with the _same block concurrently_ are not allowed. +// (will not corrupt the state, but may lead to an exception) +// - We reject orphaned blocks with [state.OutdatedExtensionError] ! +// This is more performant, but requires careful handling by the calling code. Specifically, +// the caller should not just drop orphaned blocks from the cache to avoid wasteful re-requests. +// If we were to entirely forget orphaned blocks, e.g. block X of the orphaned fork X ← Y ← Z, +// we might not have enough information to reject blocks Y, Z later if we receive them. We would +// re-request X, then determine it is orphaned and drop it, attempt to ingest Y re-request the +// unknown parent X and repeat potentially very often. +// +// To ensure that all ancestors of a candidate block are correct and known to the Protocol State, some external +// ordering and queuing of incoming blocks is generally necessary (responsibility of Compliance Layer). Once a block +// is successfully ingested, repeated extension requests with this block are no-ops. This is convenient for the +// Compliance Layer after a crash, so it doesn't have to worry about which blocks have already been ingested before +// the crash. However, while running it is very easy for the Compliance Layer to avoid concurrent extension requests +// with the same block. Hence, for simplicity, the Protocol State may reject such requests with an exception. +// // Expected errors during normal operations: -// - state.OutdatedExtensionError if the candidate block is outdated (e.g. orphaned) -// - state.InvalidExtensionError if the candidate block is invalid -func (m *ParticipantState) Extend(ctx context.Context, candidate *flow.Block) error { +// - [state.OutdatedExtensionError] if the candidate block is orphaned +// - [state.InvalidExtensionError] if the candidate block is invalid +// - In case of concurrent calls with the same `candidate` block, `Extend` may return a [storage.ErrAlreadyExists] +// or it may gracefully return. At the moment, `Extend` should be considered as NOT CONCURRENCY-SAFE. +func (m *ParticipantState) Extend(ctx context.Context, candidateProposal *flow.Proposal) error { span, ctx := m.tracer.StartSpanFromContext(ctx, trace.ProtoStateMutatorExtend) defer span.End() + candidate := &candidateProposal.Block // check if candidate block has been already processed - isDuplicate, err := m.checkBlockAlreadyProcessed(candidate.ID()) + blockID := candidate.ID() + isDuplicate, err := m.checkBlockAlreadyProcessed(blockID) if err != nil || isDuplicate { return err } + deferredBlockPersist := deferred.NewDeferredBlockPersist() + // check if the block header is a valid extension of parent block - err = m.headerExtend(candidate) + err = m.headerExtend(ctx, candidateProposal, nil, deferredBlockPersist) if err != nil { return fmt.Errorf("header not compliant with chain state: %w", err) } - // check if the block header is a valid extension of the finalized state - err = m.checkOutdatedExtension(candidate.Header) + // The following function rejects the input block with an [state.OutdatedExtensionError] if and only if + // the block is orphaned or already finalized. If the block was to be finalized already, it would have been + // detected as already processed by the check above. Hence, `candidate` being orphaned is the only + // possible case to receive an [state.OutdatedExtensionError] here. + err = m.checkOutdatedExtension(candidate.HeaderBody) if err != nil { if state.IsOutdatedExtensionError(err) { return fmt.Errorf("candidate block is an outdated extension: %w", err) @@ -212,56 +301,97 @@ func (m *ParticipantState) Extend(ctx context.Context, candidate *flow.Block) er } // check if the seals in the payload is a valid extension of the finalized state - lastSeal, err := m.sealExtend(ctx, candidate) + _, err = m.sealExtend(ctx, candidate, deferredBlockPersist) if err != nil { return fmt.Errorf("payload seal(s) not compliant with chain state: %w", err) } - // insert the block and index the last seal for the block - err = m.insert(ctx, candidate, nil, lastSeal) + // evolve protocol state and verify consistency with commitment included in payload + err = m.evolveProtocolState(ctx, candidate, deferredBlockPersist) if err != nil { - return fmt.Errorf("failed to insert the block: %w", err) + return fmt.Errorf("evolving protocol state failed: %w", err) } - return nil + lctx := m.lockManager.NewContext() + defer lctx.Release() + err = lctx.AcquireLock(storage.LockInsertBlock) + if err != nil { + return err + } + + // Execute the deferred database operations and emit scheduled notifications on success. + // The `candidate` block _must be valid_ (otherwise, the state will be corrupted)! + // + // Note: The following database write is not concurrency-safe at the moment. If a candidate block is + // identified as a duplicate by `checkBlockAlreadyProcessed` in the beginning, `Extend` behaves as a no-op and + // gracefully returns. However, if two concurrent `Extend` calls with the same block pass the initial check + // for duplicates, both will eventually attempt to commit their deferred database operations. As documented + // in `headerExtend`, its deferred operations will abort the write batch with [storage.ErrAlreadyExists]. + // In this edge case of two concurrent calls with the same `candidate` block, `Extend` does not behave as + // an idempotent operation. + return m.db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return deferredBlockPersist.Execute(lctx, blockID, rw) + }) } // headerExtend verifies the validity of the block header (excluding verification of the -// consensus rules). Specifically, we check that the block connects to the last finalized block. +// consensus rules). Specifically, we check that +// 1. candidate header is consistent with its parent: +// - ChainID is identical +// - height increases by 1 +// - ParentView stated by the candidate block equals the parent's actual view +// 2. candidate's block time conforms to protocol rules +// 3. If a `certifyingQC` is given (can be nil), we sanity-check that it certifies the candidate block +// +// If all checks pass, this method queues the following operations to persist the candidate block and +// schedules `BlockProcessable` notification to be emitted in order of increasing height: +// +// 4a. store QC embedded into the candidate block and emit `BlockProcessable` notification for the parent +// 4b. store candidate block and populate corresponding indices: +// - store candidate block's proposer signature along with the block (needed to re-create an authenticated proposal) +// - index it as a child of its parent (needed for recovery to traverse unfinalized blocks) +// 4c. if we are given a certifyingQC, store it and queue a `BlockProcessable` notification for the candidate block +// +// If `headerExtend` is called by `ParticipantState.Extend` (full consensus participant) then `certifyingQC` will be nil, +// but the block payload will be validated and proposer signature will be present. If `headerExtend` is called by +// `FollowerState.Extend` (consensus follower), then `certifyingQC` must be not nil which proves payload validity. +// +// If the candidate block has already been ingested, the deferred database operations returned by this function call +// will error with the benign sentinel [storage.ErrAlreadyExists], aborting the database transaction (without corrupting +// the protocol state). +// // Expected errors during normal operations: // - state.InvalidExtensionError if the candidate block is invalid -func (m *FollowerState) headerExtend(candidate *flow.Block) error { - // FIRST: We do some initial cheap sanity checks, like checking the payload - // hash is consistent - - header := candidate.Header - payload := candidate.Payload - if payload.Hash() != header.PayloadHash { - return state.NewInvalidExtensionError("payload integrity check failed") - } - - // SECOND: Next, we can check whether the block is a valid descendant of the - // parent. It should have the same chain ID and a height that is one bigger. +func (m *FollowerState) headerExtend(ctx context.Context, candidate *flow.Proposal, certifyingQC *flow.QuorumCertificate, deferredBlockPersist *deferred.DeferredBlockPersist) error { + span, _ := m.tracer.StartSpanFromContext(ctx, trace.ProtoStateMutatorExtendCheckHeader) + defer span.End() + blockID := candidate.Block.ID() + headerBody := candidate.Block.HeaderBody - parent, err := m.headers.ByBlockID(header.ParentID) + // STEP 1: check whether the candidate (i) connects to the known block tree and + // (ii) has the same chain ID as its parent and a height incremented by 1. + parent, err := m.headers.ByBlockID(headerBody.ParentID) // (i) connects to the known block tree if err != nil { - return state.NewInvalidExtensionErrorf("could not retrieve parent: %s", err) + // The only sentinel error that can happen here is `storage.ErrNotFound`. However, by convention the + // protocol state must be extended in a parent-first order. This block's parent being unknown breaks + // with this API contract and results in an exception. + return irrecoverable.NewExceptionf("could not retrieve the candidate's parent block %v: %w", headerBody.ParentID, err) } - if header.ChainID != parent.ChainID { + if headerBody.ChainID != parent.ChainID { return state.NewInvalidExtensionErrorf("candidate built for invalid chain (candidate: %s, parent: %s)", - header.ChainID, parent.ChainID) + headerBody.ChainID, parent.ChainID) } - if header.ParentView != parent.View { + if headerBody.ParentView != parent.View { return state.NewInvalidExtensionErrorf("candidate build with inconsistent parent view (candidate: %d, parent %d)", - header.ParentView, parent.View) + headerBody.ParentView, parent.View) } - if header.Height != parent.Height+1 { + if headerBody.Height != parent.Height+1 { return state.NewInvalidExtensionErrorf("candidate built with invalid height (candidate: %d, parent: %d)", - header.Height, parent.Height) + headerBody.Height, parent.Height) } - // check validity of block timestamp using parent's timestamp - err = m.blockTimer.Validate(parent.Timestamp, candidate.Header.Timestamp) + // STEP 2: check validity of block timestamp using parent's timestamp + err = m.blockTimer.Validate(parent.Timestamp, headerBody.Timestamp) if err != nil { if protocol.IsInvalidBlockTimestampError(err) { return state.NewInvalidExtensionErrorf("candidate contains invalid timestamp: %w", err) @@ -269,6 +399,74 @@ func (m *FollowerState) headerExtend(candidate *flow.Block) error { return fmt.Errorf("validating block's time stamp failed with unexpected error: %w", err) } + // STEP 3: if a certifying QC is given (can be nil), sanity-check that it actually certifies the candidate block + if certifyingQC != nil { + if certifyingQC.View != headerBody.View { + return fmt.Errorf("qc doesn't certify candidate block, expect %d view, got %d", headerBody.View, certifyingQC.View) + } + if certifyingQC.BlockID != blockID { + return fmt.Errorf("qc doesn't certify candidate block, expect %x blockID, got %x", blockID, certifyingQC.BlockID) + } + } + + // STEP 4: + qc := candidate.Block.ParentQC() + deferredBlockPersist.AddNextOperation(func(lctx lockctx.Proof, blockID flow.Identifier, rw storage.ReaderBatchWriter) error { + // STEP 4a: Store QC for parent block and emit `BlockProcessable` notification if and only if + // - the QC for the parent has not been stored before (otherwise, we already emitted the notification) and + // - the parent block's height is larger than the finalized root height (the root block is already considered processed) + // Thereby, we reduce duplicated `BlockProcessable` notifications. + err = m.qcs.BatchStore(lctx, rw, qc) + if err != nil { + // [storage.ErrAlreadyExists] guarantees that 4a has already been executed for the parent. + if !errors.Is(err, storage.ErrAlreadyExists) { + return fmt.Errorf("could not store incorporated qc: %w", err) + } + } else { // no error entails that 4a has never been executed for the parent block + // add parent to index of certified blocks: + err := operation.IndexCertifiedBlockByView(lctx, rw, parent.View, qc.BlockID) + if err != nil { + return fmt.Errorf("could not index certified block by view %v: %w", parent.View, err) + } + + // trigger BlockProcessable for parent block above root height + if parent.Height > m.finalizedRootHeight { + storage.OnCommitSucceed(rw, func() { + m.consumer.BlockProcessable(parent, qc) + }) + } + } + + // STEP 4b: Store candidate block and index it as a child of its parent (needed for recovery to traverse unfinalized blocks) + err = m.blocks.BatchStore(lctx, rw, candidate) // insert the block into the database AND cache + if err != nil { + return fmt.Errorf("could not store candidate block: %w", err) + } + err = operation.IndexNewBlock(lctx, rw, blockID, headerBody.ParentID) + if err != nil { + return fmt.Errorf("could not index new block: %w", err) + } + + // STEP 4c: if we are given a certifyingQC, store it and queue a `BlockProcessable` notification for the candidate block + if certifyingQC != nil { + err = m.qcs.BatchStore(lctx, rw, certifyingQC) + if err != nil { + return fmt.Errorf("could not store certifying qc: %w", err) + } + + // add candidate to index of certified blocks: + err := operation.IndexCertifiedBlockByView(lctx, rw, headerBody.View, blockID) + if err != nil { + return fmt.Errorf("could not index certified block by view %v: %w", headerBody.View, err) + } + + storage.OnCommitSucceed(rw, func() { // queue a BlockProcessable event for candidate block, since it is certified + m.consumer.BlockProcessable(candidate.Block.ToHeader(), certifyingQC) + }) + } + return nil + }) + return nil } @@ -289,19 +487,46 @@ func (m *FollowerState) checkBlockAlreadyProcessed(blockID flow.Identifier) (boo return true, nil } -// checkOutdatedExtension checks whether given block is -// valid in the context of the entire state. For this, the block needs to -// directly connect, through its ancestors, to the last finalized block. +// checkOutdatedExtension rejects blocks that are either orphaned or already finalized, in which cases +// the sentinel [state.OutdatedExtensionError] is returned. Per convention, the ancestor blocks +// for any ingested block must be known (otherwise, we return an exception). +// +// APPROACH: +// Starting with `block`s parent, we walk the fork backwards in order of decreasing height. Eventually, +// we will reach a finalized block (this is always true, because a node starts with the genesis block +// or a root block that is known to be finalized and only accepts blocks that descend from this block). +// Let H denote the *latest* finalized height (in the implementation below called `finalizedHeight`). +// +// For `block.Height` > H, there are two cases: +// 1. When walking the fork backward, we reach the *latest* finalized block. Hence, `block` +// descends from the latest finalized block, i.e. it is not orphaned (yet). +// 2. We encounter a block at height H that is different from the latest finalized block. +// Therefore, our fork contains a block at height H that conflicts with the latest +// finalized block. Hence, `block` is orphaned. +// Example: +// A (Finalized) ← B (Finalized) ← C (Finalized) ← D ← E ← F +// ↖ G ↖ H ↖ I +// Block G is outdated, because its ancestry does not include C (latest finalized). +// Block H and I are not outdated, because they do have C as an ancestor. +// +// For `block.Height` ≤ H: +// - We emphasize that the traversal starts with `block`'s *parent*. Hence, the first block we +// visit when traversing the fork is at height `block.Height - 1` < H. Also in this case, our +// traversal reaches height H or below, _without_ encountering the latest finalized block. +// +// In summary, in the context of this function, we define a `block` to be OUTDATED if and only if +// `block` is orphaned or already finalized. +// // Expected errors during normal operations: -// - state.OutdatedExtensionError if the candidate block is outdated (e.g. orphaned) -func (m *ParticipantState) checkOutdatedExtension(header *flow.Header) error { - var finalizedHeight uint64 - err := m.db.View(operation.RetrieveFinalizedHeight(&finalizedHeight)) +// - [state.OutdatedExtensionError] if the candidate block is orphaned or finalized +func (m *ParticipantState) checkOutdatedExtension(header flow.HeaderBody) error { + var latestFinalizedHeight uint64 + err := operation.RetrieveFinalizedHeight(m.db.Reader(), &latestFinalizedHeight) if err != nil { return fmt.Errorf("could not retrieve finalized height: %w", err) } var finalID flow.Identifier - err = m.db.View(operation.LookupBlockHeight(finalizedHeight, &finalID)) + err = operation.LookupBlockHeight(m.db.Reader(), latestFinalizedHeight, &finalID) if err != nil { return fmt.Errorf("could not lookup finalized block: %w", err) } @@ -310,19 +535,13 @@ func (m *ParticipantState) checkOutdatedExtension(header *flow.Header) error { for ancestorID != finalID { ancestor, err := m.headers.ByBlockID(ancestorID) if err != nil { - return fmt.Errorf("could not retrieve ancestor (%x): %w", ancestorID, err) + return irrecoverable.NewExceptionf("could not retrieve ancestor %x: %w", ancestorID, err) } - if ancestor.Height < finalizedHeight { - // this happens when the candidate block is on a fork that does not include all the - // finalized blocks. - // for instance: - // A (Finalized) <- B (Finalized) <- C (Finalized) <- D <- E <- F - // ^- G ^- H ^- I - // block G is not a valid block, because it does not have C (which has been finalized) as an ancestor - // block H and I are valid, because they do have C as an ancestor + if ancestor.Height < latestFinalizedHeight { + // Candidate block is on a fork that does not include the latest finalized block. return state.NewOutdatedExtensionErrorf( "candidate block (height: %d) conflicts with finalized state (ancestor: %d final: %d)", - header.Height, ancestor.Height, finalizedHeight) + header.Height, ancestor.Height, latestFinalizedHeight) } ancestorID = ancestor.ParentID } @@ -332,39 +551,41 @@ func (m *ParticipantState) checkOutdatedExtension(header *flow.Header) error { // guaranteeExtend verifies the validity of the collection guarantees that are // included in the block. Specifically, we check for expired collections and // duplicated collections (also including ancestor blocks). +// Expected errors during normal operations: +// - state.InvalidExtensionError if the candidate block contains invalid collection guarantees func (m *ParticipantState) guaranteeExtend(ctx context.Context, candidate *flow.Block) error { - span, _ := m.tracer.StartSpanFromContext(ctx, trace.ProtoStateMutatorExtendCheckGuarantees) defer span.End() - header := candidate.Header + headerBody := candidate.HeaderBody payload := candidate.Payload // we only look as far back for duplicates as the transaction expiry limit; // if a guarantee was included before that, we will disqualify it on the // basis of the reference block anyway - limit := header.Height - flow.DefaultTransactionExpiry - if limit > header.Height { // overflow check + limit := headerBody.Height - flow.DefaultTransactionExpiry + if limit > headerBody.Height { // overflow check limit = 0 } - if limit < m.sporkRootBlockHeight { - limit = m.sporkRootBlockHeight + sporkRootBlockHeight := m.sporkRootBlock.Height + if limit < sporkRootBlockHeight { + limit = sporkRootBlockHeight } // build a list of all previously used guarantees on this part of the chain - ancestorID := header.ParentID + ancestorID := headerBody.ParentID lookup := make(map[flow.Identifier]struct{}) for { ancestor, err := m.headers.ByBlockID(ancestorID) if err != nil { - return fmt.Errorf("could not retrieve ancestor header (%x): %w", ancestorID, err) + return fmt.Errorf("could not retrieve ancestor headerBody (%x): %w", ancestorID, err) } index, err := m.index.ByBlockID(ancestorID) if err != nil { return fmt.Errorf("could not retrieve ancestor index (%x): %w", ancestorID, err) } - for _, collID := range index.CollectionIDs { - lookup[collID] = struct{}{} + for _, guaranteeID := range index.GuaranteeIDs { + lookup[guaranteeID] = struct{}{} } if ancestor.Height <= limit { break @@ -411,10 +632,11 @@ func (m *ParticipantState) guaranteeExtend(ctx context.Context, candidate *flow. return nil } -// sealExtend checks the compliance of the payload seals. Returns last seal that form a chain for -// candidate block. -func (m *ParticipantState) sealExtend(ctx context.Context, candidate *flow.Block) (*flow.Seal, error) { - +// sealExtend checks the compliance of the payload seals. It queues a deferred database +// operation for indexing the latest seal as of the candidate block and returns the latest seal. +// Expected errors during normal operations: +// - state.InvalidExtensionError if the candidate block has invalid seals +func (m *ParticipantState) sealExtend(ctx context.Context, candidate *flow.Block, deferredBlockPersist *deferred.DeferredBlockPersist) (*flow.Seal, error) { span, _ := m.tracer.StartSpanFromContext(ctx, trace.ProtoStateMutatorExtendCheckSeals) defer span.End() @@ -423,6 +645,10 @@ func (m *ParticipantState) sealExtend(ctx context.Context, candidate *flow.Block return nil, state.NewInvalidExtensionErrorf("seal validation error: %w", err) } + deferredBlockPersist.AddNextOperation(func(lctx lockctx.Proof, blockID flow.Identifier, rw storage.ReaderBatchWriter) error { + return operation.IndexLatestSealAtBlock(lctx, rw.Writer(), blockID, lastSeal.ID()) + }) + return lastSeal, nil } @@ -433,158 +659,87 @@ func (m *ParticipantState) sealExtend(ctx context.Context, candidate *flow.Block // - No seal has been included for the respective block in this particular fork // // We require the receipts to be sorted by block height (within a payload). +// +// Expected errors during normal operations: +// - state.InvalidExtensionError if the candidate block contains invalid receipts func (m *ParticipantState) receiptExtend(ctx context.Context, candidate *flow.Block) error { - span, _ := m.tracer.StartSpanFromContext(ctx, trace.ProtoStateMutatorExtendCheckReceipts) defer span.End() err := m.receiptValidator.ValidatePayload(candidate) if err != nil { - // TODO: this might be not an error, potentially it can be solved by requesting more data and processing this receipt again - if errors.Is(err, storage.ErrNotFound) { - return state.NewInvalidExtensionErrorf("some entities referenced by receipts are missing: %w", err) - } if engine.IsInvalidInputError(err) { return state.NewInvalidExtensionErrorf("payload includes invalid receipts: %w", err) } + if module.IsUnknownBlockError(err) { + // By convention, the protocol state must be extended in a parent-first order. This block's parent + // being unknown breaks with this API contract and results in an exception. + return irrecoverable.NewExceptionf("internal state corruption detected when validating receipts in candidate block %v: %w", candidate.ID(), err) + } return fmt.Errorf("unexpected payload validation error %w", err) } - return nil } // lastSealed returns the highest sealed block from the fork with head `candidate`. +// // For instance, here is the chain state: block 100 is the head, block 97 is finalized, // and 95 is the last sealed block at the state of block 100. // 95 (sealed) <- 96 <- 97 (finalized) <- 98 <- 99 <- 100 // Now, if block 101 is extending block 100, and its payload has a seal for 96, then it will -// be the last sealed for block 101. +// be the last sealed as of block 101. The result is independent of finalization. // No errors are expected during normal operation. -func (m *FollowerState) lastSealed(candidate *flow.Block) (*flow.Seal, error) { - header := candidate.Header +func (m *FollowerState) lastSealed(candidate *flow.Block) (latestSeal *flow.Seal, err error) { payload := candidate.Payload - // getting the last sealed block - last, err := m.seals.HighestInFork(header.ParentID) - if err != nil { - return nil, fmt.Errorf("could not retrieve parent seal (%x): %w", header.ParentID, err) - } - - // if the payload of the block has no seals, then the last seal is the seal for the highest block + // If the candidate blocks' payload has no seals, the latest seal in this fork remains unchanged, i.e. latest seal as of the + // parent is also the latest seal as of the candidate block. Otherwise, we take the latest seal included in the candidate block. + // Note that seals might not be ordered in the block. if len(payload.Seals) == 0 { - return last, nil - } - - ordered, err := protocol.OrderedSeals(payload, m.headers) - if err != nil { - // all errors are unexpected - differentiation is for clearer error messages - if errors.Is(err, storage.ErrNotFound) { - return nil, fmt.Errorf("ordering seals: candidate payload contains seals for unknown block: %s", err.Error()) - } - if errors.Is(err, protocol.ErrDiscontinuousSeals) || errors.Is(err, protocol.ErrMultipleSealsForSameHeight) { - return nil, fmt.Errorf("ordering seals: candidate payload contains invalid seal set: %s", err.Error()) - } - return nil, fmt.Errorf("unexpected error ordering seals: %w", err) - } - return ordered[len(ordered)-1], nil -} - -// insert stores the candidate block in the database. -// The `candidate` block _must be valid_ (otherwise, the state will be corrupted). -// dbUpdates contains other database operations which must be applied atomically -// with inserting the block. -// Caller is responsible for ensuring block validity. -// If insert is called from Extend(by consensus participant) then certifyingQC will be nil but the block payload will be validated. -// If insert is called from ExtendCertified(by consensus follower) then certifyingQC must be not nil which proves payload validity. -// No errors are expected during normal operations. -func (m *FollowerState) insert(ctx context.Context, candidate *flow.Block, certifyingQC *flow.QuorumCertificate, last *flow.Seal) error { - span, _ := m.tracer.StartSpanFromContext(ctx, trace.ProtoStateMutatorExtendDBInsert) - defer span.End() - - blockID := candidate.ID() - parentID := candidate.Header.ParentID - latestSealID := last.ID() - - parent, err := m.headers.ByBlockID(parentID) - if err != nil { - return fmt.Errorf("could not retrieve block header for %x: %w", parentID, err) - } - - // apply any state changes from service events sealed by this block's parent - dbUpdates, err := m.handleEpochServiceEvents(candidate) - if err != nil { - return fmt.Errorf("could not process service events: %w", err) - } - - qc := candidate.Header.QuorumCertificate() - - var events []func() - - // Both the header itself and its payload are in compliance with the protocol state. - // We can now store the candidate block, as well as adding its final seal - // to the seal index and initializing its children index. - err = operation.RetryOnConflictTx(m.db, transaction.Update, func(tx *transaction.Tx) error { - // insert the block into the database AND cache - err := m.blocks.StoreTx(candidate)(tx) + latestSeal, err = m.seals.HighestInFork(candidate.ParentID) if err != nil { - return fmt.Errorf("could not store candidate block: %w", err) + return nil, fmt.Errorf("could not retrieve parent seal (%x): %w", candidate.ParentID, err) } - - err = m.qcs.StoreTx(qc)(tx) + } else { + ordered, err := protocol.OrderedSeals(payload.Seals, m.headers) if err != nil { - if !errors.Is(err, storage.ErrAlreadyExists) { - return fmt.Errorf("could not store incorporated qc: %w", err) - } - } else { - // trigger BlockProcessable for parent blocks above root height - if parent.Height > m.rootHeight { - events = append(events, func() { - m.consumer.BlockProcessable(parent, qc) - }) + // all errors are unexpected - differentiation is for clearer error messages + if errors.Is(err, storage.ErrNotFound) { + return nil, irrecoverable.NewExceptionf("ordering seals: candidate payload contains seals for unknown block: %w", err) } - } - - if certifyingQC != nil { - err = m.qcs.StoreTx(certifyingQC)(tx) - if err != nil { - return fmt.Errorf("could not store certifying qc: %w", err) + if errors.Is(err, protocol.ErrDiscontinuousSeals) || errors.Is(err, protocol.ErrMultipleSealsForSameHeight) { + return nil, irrecoverable.NewExceptionf("ordering seals: candidate payload contains invalid seal set: %w", err) } - - // trigger BlockProcessable for candidate block if it's certified - events = append(events, func() { - m.consumer.BlockProcessable(candidate.Header, certifyingQC) - }) - } - - // index the latest sealed block in this fork - err = transaction.WithTx(operation.IndexLatestSealAtBlock(blockID, latestSealID))(tx) - if err != nil { - return fmt.Errorf("could not index candidate seal: %w", err) + return nil, fmt.Errorf("unexpected error ordering seals: %w", err) } + latestSeal = ordered[len(ordered)-1] + } - // index the child block for recovery - err = transaction.WithTx(procedure.IndexNewBlock(blockID, candidate.Header.ParentID))(tx) - if err != nil { - return fmt.Errorf("could not index new block: %w", err) - } + return latestSeal, nil +} - // apply any optional DB operations from service events - for _, apply := range dbUpdates { - err := apply(tx) - if err != nil { - return fmt.Errorf("could not apply operation: %w", err) - } - } +// evolveProtocolState +// - instantiates a Protocol State EvolvingState from the parent block's state +// - applies any state-changing service events sealed by this block +// - verifies that the resulting protocol state is consistent with the commitment in the block +// +// Expected errors during normal operations: +// - state.InvalidExtensionError if the Protocol State commitment in the candidate block does +// not match the Protocol State we constructed locally +func (m *FollowerState) evolveProtocolState(ctx context.Context, candidate *flow.Block, deferredBlockPersist *deferred.DeferredBlockPersist) error { + span, _ := m.tracer.StartSpanFromContext(ctx, trace.ProtoStateMutatorEvolveProtocolState) + defer span.End() - return nil - }) + // Evolve the Protocol State starting from the parent block's state. Information that may change the state is: + // the candidate block's view and Service Events from execution results sealed in the candidate block. + updatedStateID, err := m.protocolState.EvolveState(deferredBlockPersist, candidate.ParentID, candidate.View, candidate.Payload.Seals) if err != nil { - return fmt.Errorf("could not execute state extension: %w", err) + return fmt.Errorf("evolving protocol state failed: %w", err) } - // execute scheduled events - for _, event := range events { - event() + // verify Protocol State commitment in the candidate block matches the locally-constructed value + if updatedStateID != candidate.Payload.ProtocolStateID { + return state.NewInvalidExtensionErrorf("invalid protocol state commitment %x in block, which should be %x", candidate.Payload.ProtocolStateID, updatedStateID) } return nil @@ -595,6 +750,12 @@ func (m *FollowerState) insert(ctx context.Context, candidate *flow.Block, certi // Hence, the parent of `blockID` has to be the last finalized block. // No errors are expected during normal operations. func (m *FollowerState) Finalize(ctx context.Context, blockID flow.Identifier) error { + lctx := m.lockManager.NewContext() + defer lctx.Release() + err := lctx.AcquireLock(storage.LockFinalizeBlock) + if err != nil { + return err + } // preliminaries: start tracer and retrieve full block span, _ := m.tracer.StartSpanFromContext(ctx, trace.ProtoStateMutatorFinalize) @@ -603,11 +764,11 @@ func (m *FollowerState) Finalize(ctx context.Context, blockID flow.Identifier) e if err != nil { return fmt.Errorf("could not retrieve full block that should be finalized: %w", err) } - header := block.Header + header := block.ToHeader() // keep track of metrics updates and protocol events to emit: - // * metrics are updated after a successful database update - // * protocol events are emitted atomically with the database update + // - metrics are updated after a successful database update + // - protocol events are emitted atomically with the database update var metrics []func() var events []func() @@ -615,12 +776,12 @@ func (m *FollowerState) Finalize(ctx context.Context, blockID flow.Identifier) e // this must be the case, as the `Finalize` method only finalizes one block // at a time and hence the parent of `blockID` must already be finalized. var finalized uint64 - err = m.db.View(operation.RetrieveFinalizedHeight(&finalized)) + err = operation.RetrieveFinalizedHeight(m.db.Reader(), &finalized) if err != nil { return fmt.Errorf("could not retrieve finalized height: %w", err) } var finalID flow.Identifier - err = m.db.View(operation.LookupBlockHeight(finalized, &finalID)) + err = operation.LookupBlockHeight(m.db.Reader(), finalized, &finalID) if err != nil { return fmt.Errorf("could not retrieve final header: %w", err) } @@ -641,57 +802,28 @@ func (m *FollowerState) Finalize(ctx context.Context, blockID flow.Identifier) e // We update metrics and emit protocol events for epoch state changes when // the block corresponding to the state change is finalized - epochStatus, err := m.epoch.statuses.ByBlockID(blockID) + parentEpochState, err := m.protocolState.EpochStateAtBlockID(block.ParentID) if err != nil { - return fmt.Errorf("could not retrieve epoch state: %w", err) + return fmt.Errorf("could not retrieve parent protocol state snapshot: %w", err) } - currentEpochSetup, err := m.epoch.setups.ByID(epochStatus.CurrentEpoch.SetupID) + finalizingEpochState, err := m.protocolState.EpochStateAtBlockID(blockID) if err != nil { - return fmt.Errorf("could not retrieve setup event for current epoch: %w", err) - } - epochFallbackTriggered, err := m.isEpochEmergencyFallbackTriggered() - if err != nil { - return fmt.Errorf("could not check persisted epoch emergency fallback flag: %w", err) - } - - // if epoch fallback was not previously triggered, check whether this block triggers it - if !epochFallbackTriggered { - epochFallbackTriggered, err = m.epochFallbackTriggeredByFinalizedBlock(header, epochStatus, currentEpochSetup) - if err != nil { - return fmt.Errorf("could not check whether finalized block triggers epoch fallback: %w", err) - } - if epochFallbackTriggered { - // emit the protocol event only the first time epoch fallback is triggered - events = append(events, m.consumer.EpochEmergencyFallbackTriggered) - metrics = append(metrics, m.metrics.EpochEmergencyFallbackTriggered) - } + return fmt.Errorf("could not retrieve protocol state snapshot: %w", err) } + currentEpochSetup := finalizingEpochState.EpochSetup() - isFirstBlockOfEpoch, err := m.isFirstBlockOfEpoch(header, currentEpochSetup) + // Determine metric updates and protocol events related to epoch phase changes and epoch transitions. + epochPhaseMetrics, epochPhaseEvents, err := m.epochMetricsAndEventsOnBlockFinalized(parentEpochState, finalizingEpochState, header) if err != nil { - return fmt.Errorf("could not check if block is first of epoch: %w", err) + return fmt.Errorf("could not determine epoch phase metrics/events for finalized block: %w", err) } + metrics = append(metrics, epochPhaseMetrics...) + events = append(events, epochPhaseEvents...) - // Determine metric updates and protocol events related to epoch phase - // changes and epoch transitions. - // If epoch emergency fallback is triggered, the current epoch continues until - // the next spork - so skip these updates. - if !epochFallbackTriggered { - epochPhaseMetrics, epochPhaseEvents, err := m.epochPhaseMetricsAndEventsOnBlockFinalized(block, epochStatus) - if err != nil { - return fmt.Errorf("could not determine epoch phase metrics/events for finalized block: %w", err) - } - metrics = append(metrics, epochPhaseMetrics...) - events = append(events, epochPhaseEvents...) - - if isFirstBlockOfEpoch { - epochTransitionMetrics, epochTransitionEvents := m.epochTransitionMetricsAndEventsOnBlockFinalized(header, currentEpochSetup) - if err != nil { - return fmt.Errorf("could not determine epoch transition metrics/events for finalized block: %w", err) - } - metrics = append(metrics, epochTransitionMetrics...) - events = append(events, epochTransitionEvents...) - } + // Extract and validate version beacon events from the block seals. + versionBeacons, err := m.versionBeaconOnBlockFinalized(block) + if err != nil { + return fmt.Errorf("cannot process version beacon: %w", err) } // Persist updates in database @@ -701,27 +833,22 @@ func (m *FollowerState) Finalize(ctx context.Context, blockID flow.Identifier) e // This value could actually stay the same if it has no seals in // its payload, in which case the parent's seal is the same. // * set the epoch fallback flag, if it is triggered - err = operation.RetryOnConflict(m.db.Update, func(tx *badger.Txn) error { - err = operation.IndexBlockHeight(header.Height, blockID)(tx) + err = m.db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + err = operation.IndexFinalizedBlockByHeight(lctx, rw, header.Height, blockID) if err != nil { return fmt.Errorf("could not insert number mapping: %w", err) } - err = operation.UpdateFinalizedHeight(header.Height)(tx) + err = operation.UpsertFinalizedHeight(lctx, rw.Writer(), header.Height) if err != nil { return fmt.Errorf("could not update finalized height: %w", err) } - err = operation.UpdateSealedHeight(sealed.Height)(tx) + err = operation.UpsertSealedHeight(lctx, rw.Writer(), sealed.Height) if err != nil { return fmt.Errorf("could not update sealed height: %w", err) } - if epochFallbackTriggered { - err = operation.SetEpochEmergencyFallbackTriggered(blockID)(tx) - if err != nil { - return fmt.Errorf("could not set epoch fallback flag: %w", err) - } - } - if isFirstBlockOfEpoch && !epochFallbackTriggered { - err = operation.InsertEpochFirstHeight(currentEpochSetup.Counter, header.Height)(tx) + + if isFirstBlockOfEpoch(parentEpochState, finalizingEpochState) { + err = operation.InsertEpochFirstHeight(lctx, rw, currentEpochSetup.Counter, header.Height) if err != nil { return fmt.Errorf("could not insert epoch first block height: %w", err) } @@ -731,12 +858,21 @@ func (m *FollowerState) Finalize(ctx context.Context, blockID flow.Identifier) e // guarantees that only a single, continuous execution fork is sealed. Here, we index for // each block ID the ID of its _finalized_ seal. for _, seal := range block.Payload.Seals { - err = operation.IndexFinalizedSealByBlockID(seal.BlockID, seal.ID())(tx) + err = operation.IndexFinalizedSealByBlockID(rw.Writer(), seal.BlockID, seal.ID()) if err != nil { return fmt.Errorf("could not index the seal by the sealed block ID: %w", err) } } + if len(versionBeacons) > 0 { + // only index the last version beacon as that is the relevant one. + // TODO: The other version beacons can be used for validation. + err := operation.IndexVersionBeaconByHeight(rw.Writer(), versionBeacons[len(versionBeacons)-1]) + if err != nil { + return fmt.Errorf("could not index version beacon or height (%d): %w", header.Height, err) + } + } + return nil }) if err != nil { @@ -744,10 +880,12 @@ func (m *FollowerState) Finalize(ctx context.Context, blockID flow.Identifier) e } // update the cache - m.State.cachedFinal.Store(&cachedHeader{blockID, header}) - if len(block.Payload.Seals) > 0 { - m.State.cachedSealed.Store(&cachedHeader{lastSeal.BlockID, sealed}) - } + m.State.cachedLatest.Store(&cachedLatest{ + finalizedID: blockID, + finalizedHeader: header, + sealedID: lastSeal.BlockID, + sealedHeader: sealed, + }) // Emit protocol events after database transaction succeeds. Event delivery is guaranteed, // _except_ in case of a crash. Hence, when recovering from a crash, consumers need to deduce @@ -768,6 +906,11 @@ func (m *FollowerState) Finalize(ctx context.Context, blockID flow.Identifier) e } m.metrics.BlockSealed(sealedBlock) } + protocolSnapshot, err := m.protocolKVStoreSnapshotsDB.ByID(block.Payload.ProtocolStateID) + if err != nil { + return fmt.Errorf("could not retrieve protocol snapshot for block (%x): %w", blockID, err) + } + m.metrics.ProtocolStateVersion(protocolSnapshot.Version) // apply all queued metrics for _, updateMetric := range metrics { @@ -777,355 +920,167 @@ func (m *FollowerState) Finalize(ctx context.Context, blockID flow.Identifier) e return nil } -// epochFallbackTriggeredByFinalizedBlock checks whether finalizing the input block -// would trigger epoch emergency fallback mode. In particular, we trigger epoch -// fallback mode while finalizing block B in either of the following cases: -// 1. B is the head of a fork in which epoch fallback was tentatively triggered, -// due to incorporating an invalid service event. -// 2. (a) B is the first finalized block with view greater than or equal to the epoch -// commitment deadline for the current epoch AND -// (b) the next epoch has not been committed as of B. -// -// This function should only be called when epoch fallback *has not already been triggered*. -// See protocol.Params for more details on the epoch commitment deadline. -// -// No errors are expected during normal operation. -func (m *FollowerState) epochFallbackTriggeredByFinalizedBlock(block *flow.Header, epochStatus *flow.EpochStatus, currentEpochSetup *flow.EpochSetup) (bool, error) { - // 1. Epoch fallback is tentatively triggered on this fork - if epochStatus.InvalidServiceEventIncorporated { - return true, nil - } - - // 2.(a) determine whether block B is past the epoch commitment deadline - safetyThreshold, err := m.Params().EpochCommitSafetyThreshold() - if err != nil { - return false, fmt.Errorf("could not get epoch commit safety threshold: %w", err) - } - blockExceedsDeadline := block.View+safetyThreshold >= currentEpochSetup.FinalView - - // 2.(b) determine whether the next epoch is committed w.r.t. block B - currentEpochPhase, err := epochStatus.Phase() - if err != nil { - return false, fmt.Errorf("could not get current epoch phase: %w", err) - } - isNextEpochCommitted := currentEpochPhase == flow.EpochPhaseCommitted - - blockTriggersEpochFallback := blockExceedsDeadline && !isNextEpochCommitted - return blockTriggersEpochFallback, nil -} - -// isFirstBlockOfEpoch returns true if the given block is the first block of a new epoch. -// We accept the EpochSetup event for the current epoch (w.r.t. input block B) which contains -// the FirstView for the epoch (denoted W). By construction, B.View >= W. -// Definition: B is the first block of the epoch if and only if B.parent.View < W -// +// isFirstBlockOfEpoch returns true if the given block is the first block of a new epoch +// by comparing the block's Protocol State Snapshot to that of its parent. // NOTE: There can be multiple (un-finalized) blocks that qualify as the first block of epoch N. -// No errors are expected during normal operation. -func (m *FollowerState) isFirstBlockOfEpoch(block *flow.Header, currentEpochSetup *flow.EpochSetup) (bool, error) { - currentEpochFirstView := currentEpochSetup.FirstView - // sanity check: B.View >= W - if block.View < currentEpochFirstView { - return false, irrecoverable.NewExceptionf("data inconsistency: block (id=%x, view=%d) is below its epoch first view %d", block.ID(), block.View, currentEpochFirstView) - } - - parent, err := m.headers.ByBlockID(block.ParentID) - if err != nil { - return false, irrecoverable.NewExceptionf("could not retrieve parent (id=%s): %w", block.ParentID, err) - } - - return parent.View < currentEpochFirstView, nil +func isFirstBlockOfEpoch(parentEpochState, blockEpochState protocol.EpochProtocolState) bool { + return parentEpochState.Epoch() < blockEpochState.Epoch() } -// epochTransitionMetricsAndEventsOnBlockFinalized determines metrics to update -// and protocol events to emit for blocks which are the first block of a new epoch. -// Protocol events and updating metrics happen once when we finalize the _first_ -// block of the new Epoch (same convention as for Epoch-Phase-Changes). +// epochMetricsAndEventsOnBlockFinalized determines metrics to update and protocol +// events to emit upon finalizing a block. +// - We notify about an epoch transition when the first block of the new epoch is finalized +// - We notify about an epoch phase transition when the first block within the new epoch phase is finalized // -// NOTE: This function must only be called when input `block` is the first block -// of the epoch denoted by `currentEpochSetup`. -func (m *FollowerState) epochTransitionMetricsAndEventsOnBlockFinalized(block *flow.Header, currentEpochSetup *flow.EpochSetup) ( - metrics []func(), - events []func(), -) { - - events = append(events, func() { m.consumer.EpochTransition(currentEpochSetup.Counter, block) }) - // set current epoch counter corresponding to new epoch - metrics = append(metrics, func() { m.metrics.CurrentEpochCounter(currentEpochSetup.Counter) }) - // denote the most recent epoch transition height - metrics = append(metrics, func() { m.metrics.EpochTransitionHeight(block.Height) }) - // set epoch phase - since we are starting a new epoch we begin in the staking phase - metrics = append(metrics, func() { m.metrics.CurrentEpochPhase(flow.EpochPhaseStaking) }) - // set current epoch view values - metrics = append( - metrics, - func() { m.metrics.CurrentEpochFinalView(currentEpochSetup.FinalView) }, - func() { m.metrics.CurrentDKGPhase1FinalView(currentEpochSetup.DKGPhase1FinalView) }, - func() { m.metrics.CurrentDKGPhase2FinalView(currentEpochSetup.DKGPhase2FinalView) }, - func() { m.metrics.CurrentDKGPhase3FinalView(currentEpochSetup.DKGPhase3FinalView) }, - ) - - return -} - -// epochPhaseMetricsAndEventsOnBlockFinalized determines metrics to update and protocol -// events to emit. Service Events embedded into an execution result take effect, when the -// execution result's _seal is finalized_ (i.e. when the block holding a seal for the -// result is finalized). See also handleEpochServiceEvents for further details. Example: -// -// Convention: -// -// A <-- ... <-- C(Seal_A) -// -// Suppose an EpochSetup service event is emitted during execution of block A. C seals A, therefore -// we apply the metrics/events when C is finalized. The first block of the EpochSetup -// phase is block C. -// -// This function should only be called when epoch fallback *has not already been triggered*. +// This method must be called for each finalized block. // No errors are expected during normal operation. -func (m *FollowerState) epochPhaseMetricsAndEventsOnBlockFinalized(block *flow.Block, epochStatus *flow.EpochStatus) ( +func (m *FollowerState) epochMetricsAndEventsOnBlockFinalized(parentEpochState, finalizedEpochState protocol.EpochProtocolState, finalized *flow.Header) ( metrics []func(), events []func(), err error, ) { - - // block payload may not specify seals in order, so order them by block height before processing - orderedSeals, err := protocol.OrderedSeals(block.Payload, m.headers) - if err != nil { - if errors.Is(err, storage.ErrNotFound) { - return nil, nil, fmt.Errorf("ordering seals: parent payload contains seals for unknown block: %s", err.Error()) + parentEpochCounter := parentEpochState.Epoch() + childEpochCounter := finalizedEpochState.Epoch() + parentEpochPhase := parentEpochState.EpochPhase() + childEpochPhase := finalizedEpochState.EpochPhase() + + // Check for entering or exiting EFM + if !parentEpochState.EpochFallbackTriggered() && finalizedEpochState.EpochFallbackTriggered() { + // this block triggers EFM + events = append(events, func() { + m.consumer.EpochFallbackModeTriggered(childEpochCounter, finalized) + }) + metrics = append(metrics, m.metrics.EpochFallbackModeTriggered) + } + if parentEpochState.EpochFallbackTriggered() && !finalizedEpochState.EpochFallbackTriggered() { + // this block exits EFM + events = append(events, func() { + m.consumer.EpochFallbackModeExited(childEpochCounter, finalized) + }) + metrics = append(metrics, m.metrics.EpochFallbackModeExited) + } + + // Check for a new epoch extension + if len(finalizedEpochState.EpochExtensions()) > len(parentEpochState.EpochExtensions()) { + // We expect at most one additional epoch extension per block, but tolerate more here + for i := len(parentEpochState.EpochExtensions()); i < len(finalizedEpochState.EpochExtensions()); i++ { + finalizedExtension := finalizedEpochState.EpochExtensions()[i] + events = append(events, func() { m.consumer.EpochExtended(childEpochCounter, finalized, finalizedExtension) }) + metrics = append(metrics, func() { m.metrics.CurrentEpochFinalView(finalizedExtension.FinalView) }) } - return nil, nil, fmt.Errorf("unexpected error ordering seals: %w", err) - } - - // track service event driven metrics and protocol events that should be emitted - for _, seal := range orderedSeals { - result, err := m.results.ByID(seal.ResultID) - if err != nil { - return nil, nil, fmt.Errorf("could not retrieve result (id=%x) for seal (id=%x): %w", seal.ResultID, seal.ID(), err) - } - for _, event := range result.ServiceEvents { - switch ev := event.Event.(type) { - case *flow.EpochSetup: - // update current epoch phase - events = append(events, func() { m.metrics.CurrentEpochPhase(flow.EpochPhaseSetup) }) - // track epoch phase transition (staking->setup) - events = append(events, func() { m.consumer.EpochSetupPhaseStarted(ev.Counter-1, block.Header) }) - case *flow.EpochCommit: - // update current epoch phase - events = append(events, func() { m.metrics.CurrentEpochPhase(flow.EpochPhaseCommitted) }) - // track epoch phase transition (setup->committed) - events = append(events, func() { m.consumer.EpochCommittedPhaseStarted(ev.Counter-1, block.Header) }) - // track final view of committed epoch - nextEpochSetup, err := m.epoch.setups.ByID(epochStatus.NextEpoch.SetupID) - if err != nil { - return nil, nil, fmt.Errorf("could not retrieve setup event for next epoch: %w", err) - } - events = append(events, func() { m.metrics.CommittedEpochFinalView(nextEpochSetup.FinalView) }) - case *flow.VersionBeacon: - // do nothing for now - default: - return nil, nil, fmt.Errorf("invalid service event type in payload (%T)", event) - } - } - } - - return -} - -// epochStatus computes the EpochStatus for the given block *before* applying -// any service event state changes which come into effect with this block. -// -// Specifically, we must determine whether block is the first block of a new -// epoch in its respective fork. We do this by comparing the block's view to -// the Epoch data from its parent. If the block's view is _larger_ than the -// final View of the parent's epoch, the block starts a new Epoch. -// -// Possible outcomes: -// 1. Block is in same Epoch as parent (block.View < epoch.FinalView) -// -> the parent's EpochStatus.CurrentEpoch also applies for the current block -// 2. Block enters the next Epoch (block.View ≥ epoch.FinalView) -// a) HAPPY PATH: Epoch fallback is not triggered, we enter the next epoch: -// -> the parent's EpochStatus.NextEpoch is the current block's EpochStatus.CurrentEpoch -// b) FALLBACK PATH: Epoch fallback is triggered, we continue the current epoch: -// -> the parent's EpochStatus.CurrentEpoch also applies for the current block -// -// As the parent was a valid extension of the chain, by induction, the parent -// satisfies all consistency requirements of the protocol. -// -// Returns the EpochStatus for the input block. -// No error returns are expected under normal operations -func (m *FollowerState) epochStatus(block *flow.Header, epochFallbackTriggered bool) (*flow.EpochStatus, error) { - parentStatus, err := m.epoch.statuses.ByBlockID(block.ParentID) - if err != nil { - return nil, fmt.Errorf("could not retrieve epoch state for parent: %w", err) - } - parentSetup, err := m.epoch.setups.ByID(parentStatus.CurrentEpoch.SetupID) - if err != nil { - return nil, fmt.Errorf("could not retrieve EpochSetup event for parent: %w", err) - } - - // Case 1 or 2b (still in parent block's epoch or epoch fallback triggered): - if block.View <= parentSetup.FinalView || epochFallbackTriggered { - // IMPORTANT: copy the status to avoid modifying the parent status in the cache - return parentStatus.Copy(), nil - } - - // Case 2a (first block of new epoch): - // sanity check: parent's epoch Preparation should be completed and have EpochSetup and EpochCommit events - if parentStatus.NextEpoch.SetupID == flow.ZeroID { - return nil, fmt.Errorf("missing setup event for starting next epoch") } - if parentStatus.NextEpoch.CommitID == flow.ZeroID { - return nil, fmt.Errorf("missing commit event for starting next epoch") - } - epochStatus, err := flow.NewEpochStatus( - parentStatus.CurrentEpoch.SetupID, parentStatus.CurrentEpoch.CommitID, - parentStatus.NextEpoch.SetupID, parentStatus.NextEpoch.CommitID, - flow.ZeroID, flow.ZeroID, - ) - return epochStatus, err + // Different epoch counter - handle epoch transition and phase transition Committed->Staking + if parentEpochCounter != childEpochCounter { + childEpochSetup := finalizedEpochState.EpochSetup() + events = append(events, func() { m.consumer.EpochTransition(childEpochSetup.Counter, finalized) }) + // set current epoch counter corresponding to new epoch + metrics = append(metrics, func() { m.metrics.CurrentEpochCounter(childEpochSetup.Counter) }) + // denote the most recent epoch transition height + metrics = append(metrics, func() { m.metrics.EpochTransitionHeight(finalized.Height) }) + // set epoch phase + metrics = append(metrics, func() { m.metrics.CurrentEpochPhase(childEpochPhase) }) + // set current epoch view values + metrics = append( + metrics, + // Since we have just started a new epoch, there cannot be any extensions yet. + // Therefore, it is safe to directly use EpochSetup.FinalView here (epoch extensions are handled above). + func() { m.metrics.CurrentEpochFinalView(childEpochSetup.FinalView) }, + func() { + m.metrics.CurrentDKGPhaseViews(childEpochSetup.DKGPhase1FinalView, childEpochSetup.DKGPhase2FinalView, childEpochSetup.DKGPhase3FinalView) + }, + ) + return + } + + // Same epoch phase -> nothing to do + if parentEpochPhase == childEpochPhase { + return + } + + // Update the phase metric when any phase change occurs + events = append(events, func() { m.metrics.CurrentEpochPhase(childEpochPhase) }) + + // Handle phase transition Staking->Setup. `finalized` is first block in Setup phase. + if parentEpochPhase == flow.EpochPhaseStaking && childEpochPhase == flow.EpochPhaseSetup { + events = append(events, func() { m.consumer.EpochSetupPhaseStarted(childEpochCounter, finalized) }) + return + } + // Handle phase transition Setup/Fallback->Committed phase. `finalized` is first block in Committed phase. + if (parentEpochPhase == flow.EpochPhaseSetup || parentEpochPhase == flow.EpochPhaseFallback) && childEpochPhase == flow.EpochPhaseCommitted { + events = append(events, func() { m.consumer.EpochCommittedPhaseStarted(childEpochCounter, finalized) }) + return + } + // Handle phase transition Staking/Setup->Fallback phase + // NOTE: we can have the phase transition Committed->Fallback, but only across an epoch boundary (handled above) + if (parentEpochPhase == flow.EpochPhaseStaking || parentEpochPhase == flow.EpochPhaseSetup) && childEpochPhase == flow.EpochPhaseFallback { + // This conditional exists to capture this final set of valid phase transitions, to allow sanity check below + // In the future we could add a protocol event here for transition into the Fallback phase, if any consumers need this. + return + } + + return nil, nil, fmt.Errorf("sanity check failed: invalid subsequent [epoch-phase] [%d-%s]->[%d-%s]", + parentEpochCounter, parentEpochPhase, childEpochCounter, childEpochPhase) } -// handleEpochServiceEvents handles applying state changes which occur as a result -// of service events being included in a block payload: -// - inserting incorporated service events -// - updating EpochStatus for the candidate block -// -// Consider a chain where a service event is emitted during execution of block A. -// Block B contains a receipt for A. Block C contains a seal for block A. -// -// A <- .. <- B(RA) <- .. <- C(SA) -// -// Service events are included within execution results, which are stored -// opaquely as part of the block payload in block B. We only validate and insert -// the typed service event to storage once we process C, the block containing the -// seal for block A. This is because we rely on the sealing subsystem to validate -// correctness of the service event before processing it. -// Consequently, any change to the protocol state introduced by a service event -// emitted during execution of block A would only become visible when querying -// C or its descendants. -// -// This method will only apply service-event-induced state changes when the -// input block has the form of block C (ie. contains a seal for a block in -// which a service event was emitted). -// -// Return values: -// - dbUpdates - If the service events are valid, or there are no service events, -// this method returns a slice of Badger operations to apply while storing the block. -// This includes an operation to index the epoch status for every block, and -// operations to insert service events for blocks that include them. -// -// No errors are expected during normal operation. -func (m *FollowerState) handleEpochServiceEvents(candidate *flow.Block) (dbUpdates []func(*transaction.Tx) error, err error) { - epochFallbackTriggered, err := m.isEpochEmergencyFallbackTriggered() - if err != nil { - return nil, fmt.Errorf("could not retrieve epoch fallback status: %w", err) - } - epochStatus, err := m.epochStatus(candidate.Header, epochFallbackTriggered) - if err != nil { - return nil, fmt.Errorf("could not determine epoch status for candidate block: %w", err) - } - activeSetup, err := m.epoch.setups.ByID(epochStatus.CurrentEpoch.SetupID) - if err != nil { - return nil, fmt.Errorf("could not retrieve current epoch setup event: %w", err) - } - - // always persist the candidate's epoch status - // note: We are scheduling the operation to store the Epoch status using the _pointer_ variable `epochStatus`. - // The struct `epochStatus` points to will still be modified below. - blockID := candidate.ID() - dbUpdates = append(dbUpdates, m.epoch.statuses.StoreTx(blockID, epochStatus)) - - // never process service events after epoch fallback is triggered - if epochStatus.InvalidServiceEventIncorporated || epochFallbackTriggered { - return dbUpdates, nil - } - - // We apply service events from blocks which are sealed by this candidate block. - // The block's payload might contain epoch preparation service events for the next - // epoch. In this case, we need to update the tentative protocol state. - // We need to validate whether all information is available in the protocol - // state to go to the next epoch when needed. In cases where there is a bug - // in the smart contract, it could be that this happens too late and the - // chain finalization should halt. - - // block payload may not specify seals in order, so order them by block height before processing - orderedSeals, err := protocol.OrderedSeals(candidate.Payload, m.headers) +// versionBeaconOnBlockFinalized extracts and returns the VersionBeacons from the +// finalized block's seals. +// This could return multiple VersionBeacons if the parent block contains multiple Seals. +// The version beacons will be returned in the ascending height order of the seals. +// Technically only the last VersionBeacon is relevant. +func (m *FollowerState) versionBeaconOnBlockFinalized( + finalized *flow.Block, +) ([]*flow.SealedVersionBeacon, error) { + var versionBeacons []*flow.SealedVersionBeacon + + seals, err := protocol.OrderedSeals(finalized.Payload.Seals, m.headers) if err != nil { if errors.Is(err, storage.ErrNotFound) { - return nil, fmt.Errorf("ordering seals: parent payload contains seals for unknown block: %s", err.Error()) + return nil, fmt.Errorf( + "ordering seals: parent payload contains"+ + " seals for unknown block: %w", err) } return nil, fmt.Errorf("unexpected error ordering seals: %w", err) } - for _, seal := range orderedSeals { + + for _, seal := range seals { result, err := m.results.ByID(seal.ResultID) if err != nil { - return nil, fmt.Errorf("could not get result (id=%x) for seal (id=%x): %w", seal.ResultID, seal.ID(), err) + return nil, fmt.Errorf( + "could not retrieve result (id=%x) for seal (id=%x): %w", + seal.ResultID, + seal.ID(), + err) } - for _, event := range result.ServiceEvents { - switch ev := event.Event.(type) { - case *flow.EpochSetup: - // validate the service event - err := isValidExtendingEpochSetup(ev, activeSetup, epochStatus) - if err != nil { - if protocol.IsInvalidServiceEventError(err) { - // we have observed an invalid service event, which triggers epoch fallback mode - epochStatus.InvalidServiceEventIncorporated = true - return dbUpdates, nil - } - return nil, fmt.Errorf("unexpected error validating EpochSetup service event: %w", err) - } - - // prevents multiple setup events for same Epoch (including multiple setup events in payload of same block) - epochStatus.NextEpoch.SetupID = ev.ID() - - // we'll insert the setup event when we insert the block - dbUpdates = append(dbUpdates, m.epoch.setups.StoreTx(ev)) - - case *flow.EpochCommit: - // if we receive an EpochCommit event, we must have already observed an EpochSetup event - // => otherwise, we have observed an EpochCommit without corresponding EpochSetup, which triggers epoch fallback mode - if epochStatus.NextEpoch.SetupID == flow.ZeroID { - epochStatus.InvalidServiceEventIncorporated = true - return dbUpdates, nil - } - - // if we have observed an EpochSetup event, we must be able to retrieve it from the database - // => otherwise, this is a symptom of bug or data corruption since this component sets the SetupID field - extendingSetup, err := m.epoch.setups.ByID(epochStatus.NextEpoch.SetupID) - if err != nil { - if errors.Is(err, storage.ErrNotFound) { - return nil, irrecoverable.NewExceptionf("could not retrieve EpochSetup (id=%x) stored in EpochStatus for block %x: %w", - epochStatus.NextEpoch.SetupID, blockID, err) - } - return nil, fmt.Errorf("unexpected error retrieving next epoch setup: %w", err) - } - - // validate the service event - err = isValidExtendingEpochCommit(ev, extendingSetup, activeSetup, epochStatus) - if err != nil { - if protocol.IsInvalidServiceEventError(err) { - // we have observed an invalid service event, which triggers epoch fallback mode - epochStatus.InvalidServiceEventIncorporated = true - return dbUpdates, nil - } - return nil, fmt.Errorf("unexpected error validating EpochCommit service event: %w", err) - } - - // prevents multiple setup events for same Epoch (including multiple setup events in payload of same block) - epochStatus.NextEpoch.CommitID = ev.ID() - - // we'll insert the commit event when we insert the block - dbUpdates = append(dbUpdates, m.epoch.commits.StoreTx(ev)) - case *flow.VersionBeacon: - // do nothing for now - default: - return nil, fmt.Errorf("invalid service event type (type_name=%s, go_type=%T)", event.Type, ev) + ev, ok := event.Event.(*flow.VersionBeacon) + + if !ok { + // skip other service event types. + // validation if this is a known service event type is done elsewhere. + continue + } + + err := ev.Validate() + if err != nil { + m.logger.Warn(). + Err(err). + Str("block_id", finalized.ID().String()). + Interface("event", ev). + Msg("invalid VersionBeacon service event") + continue } + + // The version beacon only becomes actionable/valid/active once the block + // containing the version beacon has been sealed. That is why we set the + // Seal height to the current block height. + versionBeacons = append(versionBeacons, &flow.SealedVersionBeacon{ + VersionBeacon: ev, + SealHeight: finalized.Height, + }) } } - return + + return versionBeacons, nil } diff --git a/state/protocol/badger/mutator_test.go b/state/protocol/badger/mutator_test.go index 1b80664790f..47055894268 100644 --- a/state/protocol/badger/mutator_test.go +++ b/state/protocol/badger/mutator_test.go @@ -1,27 +1,26 @@ -// (c) 2019 Dapper Labs - ALL RIGHTS RESERVED - package badger_test import ( "context" "errors" + "fmt" "math/rand" "sync" "testing" - "time" - "github.com/dgraph-io/badger/v2" + "github.com/cockroachdb/pebble/v2" + "github.com/onflow/crypto" "github.com/rs/zerolog" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" - "github.com/onflow/flow-go/crypto" "github.com/onflow/flow-go/engine" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/model/flow/filter" - "github.com/onflow/flow-go/model/flow/order" + "github.com/onflow/flow-go/module" "github.com/onflow/flow-go/module/metrics" + mmetrics "github.com/onflow/flow-go/module/metrics" mockmodule "github.com/onflow/flow-go/module/mock" "github.com/onflow/flow-go/module/signature" "github.com/onflow/flow-go/module/trace" @@ -31,47 +30,44 @@ import ( "github.com/onflow/flow-go/state/protocol/events" "github.com/onflow/flow-go/state/protocol/inmem" mockprotocol "github.com/onflow/flow-go/state/protocol/mock" + protocol_state "github.com/onflow/flow-go/state/protocol/protocol_state/state" "github.com/onflow/flow-go/state/protocol/util" "github.com/onflow/flow-go/storage" - stoerr "github.com/onflow/flow-go/storage" - bstorage "github.com/onflow/flow-go/storage/badger" - "github.com/onflow/flow-go/storage/badger/operation" - storeutil "github.com/onflow/flow-go/storage/util" + "github.com/onflow/flow-go/storage/deferred" + "github.com/onflow/flow-go/storage/operation" + "github.com/onflow/flow-go/storage/operation/pebbleimpl" + "github.com/onflow/flow-go/storage/store" "github.com/onflow/flow-go/utils/unittest" ) -func init() { - rand.Seed(time.Now().UnixNano()) -} - var participants = unittest.IdentityListFixture(5, unittest.WithAllRoles()) func TestBootstrapValid(t *testing.T) { rootSnapshot := unittest.RootSnapshotFixture(participants) - util.RunWithBootstrapState(t, rootSnapshot, func(db *badger.DB, state *protocol.State) { + util.RunWithBootstrapState(t, rootSnapshot, func(db storage.DB, state *protocol.State) { var finalized uint64 - err := db.View(operation.RetrieveFinalizedHeight(&finalized)) + err := operation.RetrieveFinalizedHeight(db.Reader(), &finalized) require.NoError(t, err) var sealed uint64 - err = db.View(operation.RetrieveSealedHeight(&sealed)) + err = operation.RetrieveSealedHeight(db.Reader(), &sealed) require.NoError(t, err) var genesisID flow.Identifier - err = db.View(operation.LookupBlockHeight(0, &genesisID)) + err = operation.LookupBlockHeight(db.Reader(), 0, &genesisID) require.NoError(t, err) var header flow.Header - err = db.View(operation.RetrieveHeader(genesisID, &header)) + err = operation.RetrieveHeader(db.Reader(), genesisID, &header) require.NoError(t, err) var sealID flow.Identifier - err = db.View(operation.LookupLatestSealAtBlock(genesisID, &sealID)) + err = operation.LookupLatestSealAtBlock(db.Reader(), genesisID, &sealID) require.NoError(t, err) _, seal, err := rootSnapshot.SealedResult() require.NoError(t, err) - err = db.View(operation.RetrieveSeal(sealID, seal)) + err = operation.RetrieveSeal(db.Reader(), sealID, seal) require.NoError(t, err) block, err := rootSnapshot.Head() @@ -88,11 +84,13 @@ func TestBootstrapValid(t *testing.T) { // * BlockFinalized is emitted when the block is finalized // * BlockProcessable is emitted when a block's child is inserted func TestExtendValid(t *testing.T) { - unittest.RunWithBadgerDB(t, func(db *badger.DB) { + unittest.RunWithPebbleDB(t, func(pdb *pebble.DB) { + lockManager := storage.NewTestingLockManager() metrics := metrics.NewNoopCollector() tracer := trace.NewNoopTracer() + db := pebbleimpl.ToDB(pdb) log := zerolog.Nop() - all := storeutil.StorageLayer(t, db) + all := store.InitAll(metrics, db) distributor := events.NewDistributor() consumer := mockprotocol.NewConsumer(t) @@ -100,20 +98,22 @@ func TestExtendValid(t *testing.T) { block, result, seal := unittest.BootstrapFixture(participants) qc := unittest.QuorumCertificateFixture(unittest.QCWithRootBlockID(block.ID())) - rootSnapshot, err := inmem.SnapshotFromBootstrapState(block, result, seal, qc) + rootSnapshot, err := unittest.SnapshotFromBootstrapState(block, result, seal, qc) require.NoError(t, err) state, err := protocol.Bootstrap( metrics, db, + lockManager, all.Headers, all.Seals, all.Results, all.Blocks, all.QuorumCertificates, - all.Setups, + all.EpochSetups, all.EpochCommits, - all.Statuses, + all.EpochProtocolStateEntries, + all.ProtocolKVStore, all.VersionBeacons, rootSnapshot, ) @@ -133,31 +133,41 @@ func TestExtendValid(t *testing.T) { require.NoError(t, err) // insert block1 on top of the root block - block1 := unittest.BlockWithParentFixture(block.Header) - err = fullState.Extend(context.Background(), block1) + block1 := unittest.BlockWithParentProtocolState(block) + err = fullState.Extend(context.Background(), unittest.ProposalFromBlock(block1)) require.NoError(t, err) // we should not emit BlockProcessable for the root block - consumer.AssertNotCalled(t, "BlockProcessable", block.Header) + consumer.AssertNotCalled(t, "BlockProcessable", block.ToHeader(), mock.Anything) t.Run("BlockFinalized event should be emitted when block1 is finalized", func(t *testing.T) { - consumer.On("BlockFinalized", block1.Header).Once() + consumer.On("BlockFinalized", block1.ToHeader()).Once() err := fullState.Finalize(context.Background(), block1.ID()) require.NoError(t, err) }) t.Run("BlockProcessable event should be emitted when any child of block1 is inserted", func(t *testing.T) { - block2 := unittest.BlockWithParentFixture(block1.Header) - consumer.On("BlockProcessable", block1.Header, mock.Anything).Once() - err := fullState.Extend(context.Background(), block2) + block2 := unittest.BlockWithParentProtocolState(block1) + consumer.On("BlockProcessable", block1.ToHeader(), mock.Anything).Once() + err := fullState.Extend(context.Background(), unittest.ProposalFromBlock(block2)) require.NoError(t, err) + + // verify that block1's view is indexed as certified, because it has a child (block2) + var indexedID flow.Identifier + require.NoError(t, operation.LookupCertifiedBlockByView(db.Reader(), block1.View, &indexedID)) + require.Equal(t, block1.ID(), indexedID) + + // verify that block2's view is not indexed as certified, because it has no children + err = operation.LookupCertifiedBlockByView(db.Reader(), block2.View, &indexedID) + require.ErrorIs(t, err, storage.ErrNotFound) }) }) } func TestSealedIndex(t *testing.T) { rootSnapshot := unittest.RootSnapshotFixture(participants) - util.RunWithFullProtocolState(t, rootSnapshot, func(db *badger.DB, state *protocol.ParticipantState) { + rootProtocolStateID := getRootProtocolStateID(t, rootSnapshot) + util.RunWithFullProtocolState(t, rootSnapshot, func(db storage.DB, state *protocol.ParticipantState) { rootHeader, err := rootSnapshot.Head() require.NoError(t, err) @@ -168,58 +178,72 @@ func TestSealedIndex(t *testing.T) { // when B7 is finalized, can find seals for B2, B3 // block 1 - b1 := unittest.BlockWithParentFixture(rootHeader) - b1.SetPayload(flow.EmptyPayload()) - err = state.Extend(context.Background(), b1) + b1 := unittest.BlockWithParentAndPayload( + rootHeader, + unittest.PayloadFixture(unittest.WithProtocolStateID(rootProtocolStateID)), + ) + err = state.Extend(context.Background(), unittest.ProposalFromBlock(b1)) require.NoError(t, err) // block 2(result B1) b1Receipt := unittest.ReceiptForBlockFixture(b1) - b2 := unittest.BlockWithParentFixture(b1.Header) - b2.SetPayload(unittest.PayloadFixture(unittest.WithReceipts(b1Receipt))) - err = state.Extend(context.Background(), b2) + b2 := unittest.BlockWithParentAndPayload( + b1.ToHeader(), + unittest.PayloadFixture( + unittest.WithReceipts(b1Receipt), + unittest.WithProtocolStateID(rootProtocolStateID), + ), + ) + err = state.Extend(context.Background(), unittest.ProposalFromBlock(b2)) require.NoError(t, err) // block 3 - b3 := unittest.BlockWithParentFixture(b2.Header) - b3.SetPayload(flow.EmptyPayload()) - err = state.Extend(context.Background(), b3) + b3 := unittest.BlockWithParentProtocolState(b2) + err = state.Extend(context.Background(), unittest.ProposalFromBlock(b3)) require.NoError(t, err) // block 4 (resultB2, resultB3) b2Receipt := unittest.ReceiptForBlockFixture(b2) b3Receipt := unittest.ReceiptForBlockFixture(b3) - b4 := unittest.BlockWithParentFixture(b3.Header) - b4.SetPayload(flow.Payload{ - Receipts: []*flow.ExecutionReceiptMeta{b2Receipt.Meta(), b3Receipt.Meta()}, - Results: []*flow.ExecutionResult{&b2Receipt.ExecutionResult, &b3Receipt.ExecutionResult}, - }) - err = state.Extend(context.Background(), b4) + b4 := unittest.BlockWithParentAndPayload( + b3.ToHeader(), + flow.Payload{ + Receipts: []*flow.ExecutionReceiptStub{b2Receipt.Stub(), b3Receipt.Stub()}, + Results: []*flow.ExecutionResult{&b2Receipt.ExecutionResult, &b3Receipt.ExecutionResult}, + ProtocolStateID: rootProtocolStateID, + }, + ) + err = state.Extend(context.Background(), unittest.ProposalFromBlock(b4)) require.NoError(t, err) // block 5 (sealB1) b1Seal := unittest.Seal.Fixture(unittest.Seal.WithResult(&b1Receipt.ExecutionResult)) - b5 := unittest.BlockWithParentFixture(b4.Header) - b5.SetPayload(flow.Payload{ - Seals: []*flow.Seal{b1Seal}, - }) - err = state.Extend(context.Background(), b5) + b5 := unittest.BlockWithParentAndPayload( + b4.ToHeader(), + flow.Payload{ + Seals: []*flow.Seal{b1Seal}, + ProtocolStateID: rootProtocolStateID, + }, + ) + err = state.Extend(context.Background(), unittest.ProposalFromBlock(b5)) require.NoError(t, err) // block 6 (sealB2, sealB3) b2Seal := unittest.Seal.Fixture(unittest.Seal.WithResult(&b2Receipt.ExecutionResult)) b3Seal := unittest.Seal.Fixture(unittest.Seal.WithResult(&b3Receipt.ExecutionResult)) - b6 := unittest.BlockWithParentFixture(b5.Header) - b6.SetPayload(flow.Payload{ - Seals: []*flow.Seal{b2Seal, b3Seal}, - }) - err = state.Extend(context.Background(), b6) + b6 := unittest.BlockWithParentAndPayload( + b5.ToHeader(), + flow.Payload{ + Seals: []*flow.Seal{b2Seal, b3Seal}, + ProtocolStateID: rootProtocolStateID, + }, + ) + err = state.Extend(context.Background(), unittest.ProposalFromBlock(b6)) require.NoError(t, err) // block 7 - b7 := unittest.BlockWithParentFixture(b6.Header) - b7.SetPayload(flow.EmptyPayload()) - err = state.Extend(context.Background(), b7) + b7 := unittest.BlockWithParentProtocolState(b6) + err = state.Extend(context.Background(), unittest.ProposalFromBlock(b7)) require.NoError(t, err) // finalizing b1 - b4 @@ -234,7 +258,7 @@ func TestSealedIndex(t *testing.T) { require.NoError(t, err) metrics := metrics.NewNoopCollector() - seals := bstorage.NewSeals(metrics, db) + seals := store.NewSeals(metrics, db) // can only find seal for G _, err = seals.FinalizedSealForBlock(rootHeader.ID()) @@ -274,9 +298,190 @@ func TestSealedIndex(t *testing.T) { } +func TestVersionBeaconIndex(t *testing.T) { + rootSnapshot := unittest.RootSnapshotFixture(participants) + rootProtocolStateID := getRootProtocolStateID(t, rootSnapshot) + util.RunWithFullProtocolState(t, rootSnapshot, func(db storage.DB, state *protocol.ParticipantState) { + rootHeader, err := rootSnapshot.Head() + require.NoError(t, err) + + // build a chain: + // G <- B1 <- B2 (resultB1(vb1)) <- B3 <- B4 (resultB2(vb2), resultB3(vb3)) <- B5 (sealB1) <- B6 (sealB2, sealB3) + // up until and including finalization of B5 there should be no VBs indexed + // when B5 is finalized, index VB1 + // when B6 is finalized, we can index VB2 and VB3, but (only) the last one should be indexed by seal height + + // block 1 + b1 := unittest.BlockWithParentAndPayload( + rootHeader, + unittest.PayloadFixture(unittest.WithProtocolStateID(rootProtocolStateID)), + ) + err = state.Extend(context.Background(), unittest.ProposalFromBlock(b1)) + require.NoError(t, err) + + vb1 := unittest.VersionBeaconFixture( + unittest.WithBoundaries( + flow.VersionBoundary{ + BlockHeight: rootHeader.Height, + Version: "0.21.37", + }, + flow.VersionBoundary{ + BlockHeight: rootHeader.Height + 100, + Version: "0.21.38", + }, + ), + ) + vb2 := unittest.VersionBeaconFixture( + unittest.WithBoundaries( + flow.VersionBoundary{ + BlockHeight: rootHeader.Height, + Version: "0.21.37", + }, + flow.VersionBoundary{ + BlockHeight: rootHeader.Height + 101, + Version: "0.21.38", + }, + flow.VersionBoundary{ + BlockHeight: rootHeader.Height + 201, + Version: "0.21.39", + }, + ), + ) + vb3 := unittest.VersionBeaconFixture( + unittest.WithBoundaries( + flow.VersionBoundary{ + BlockHeight: rootHeader.Height, + Version: "0.21.37", + }, + flow.VersionBoundary{ + BlockHeight: rootHeader.Height + 99, + Version: "0.21.38", + }, + flow.VersionBoundary{ + BlockHeight: rootHeader.Height + 199, + Version: "0.21.39", + }, + flow.VersionBoundary{ + BlockHeight: rootHeader.Height + 299, + Version: "0.21.40", + }, + ), + ) + + b1Receipt := unittest.ReceiptForBlockFixture(b1) + b1Receipt.ExecutionResult.ServiceEvents = []flow.ServiceEvent{vb1.ServiceEvent()} + b2 := unittest.BlockWithParentAndPayload( + b1.ToHeader(), + unittest.PayloadFixture(unittest.WithReceipts(b1Receipt), unittest.WithProtocolStateID(rootProtocolStateID)), + ) + err = state.Extend(context.Background(), unittest.ProposalFromBlock(b2)) + require.NoError(t, err) + + // block 3 + b3 := unittest.BlockWithParentProtocolState(b2) + err = state.Extend(context.Background(), unittest.ProposalFromBlock(b3)) + require.NoError(t, err) + + // block 4 (resultB2, resultB3) + b2Receipt := unittest.ReceiptForBlockFixture(b2) + b2Receipt.ExecutionResult.ServiceEvents = []flow.ServiceEvent{vb2.ServiceEvent()} + + b3Receipt := unittest.ReceiptForBlockFixture(b3) + b3Receipt.ExecutionResult.ServiceEvents = []flow.ServiceEvent{vb3.ServiceEvent()} + + b4 := unittest.BlockWithParentAndPayload( + b3.ToHeader(), + flow.Payload{ + Receipts: []*flow.ExecutionReceiptStub{b2Receipt.Stub(), b3Receipt.Stub()}, + Results: []*flow.ExecutionResult{&b2Receipt.ExecutionResult, &b3Receipt.ExecutionResult}, + ProtocolStateID: rootProtocolStateID, + }, + ) + err = state.Extend(context.Background(), unittest.ProposalFromBlock(b4)) + require.NoError(t, err) + + // block 5 (sealB1) + b1Seal := unittest.Seal.Fixture(unittest.Seal.WithResult(&b1Receipt.ExecutionResult)) + b5 := unittest.BlockWithParentAndPayload( + b4.ToHeader(), + flow.Payload{ + Seals: []*flow.Seal{b1Seal}, + ProtocolStateID: rootProtocolStateID, + }, + ) + err = state.Extend(context.Background(), unittest.ProposalFromBlock(b5)) + require.NoError(t, err) + + // block 6 (sealB2, sealB3) + b2Seal := unittest.Seal.Fixture(unittest.Seal.WithResult(&b2Receipt.ExecutionResult)) + b3Seal := unittest.Seal.Fixture(unittest.Seal.WithResult(&b3Receipt.ExecutionResult)) + b6 := unittest.BlockWithParentAndPayload( + b5.ToHeader(), + flow.Payload{ + Seals: []*flow.Seal{b2Seal, b3Seal}, + ProtocolStateID: rootProtocolStateID, + }, + ) + err = state.Extend(context.Background(), unittest.ProposalFromBlock(b6)) + require.NoError(t, err) + + versionBeacons := store.NewVersionBeacons(db) + + // No VB can be found before finalizing anything + vb, err := versionBeacons.Highest(b6.Height) + require.NoError(t, err) + require.Nil(t, vb) + + // finalizing b1 - b5 + err = state.Finalize(context.Background(), b1.ID()) + require.NoError(t, err) + err = state.Finalize(context.Background(), b2.ID()) + require.NoError(t, err) + err = state.Finalize(context.Background(), b3.ID()) + require.NoError(t, err) + err = state.Finalize(context.Background(), b4.ID()) + require.NoError(t, err) + + // No VB can be found after finalizing B4 + vb, err = versionBeacons.Highest(b6.Height) + require.NoError(t, err) + require.Nil(t, vb) + + // once B5 is finalized, B1 and VB1 are sealed, hence index should now find it + err = state.Finalize(context.Background(), b5.ID()) + require.NoError(t, err) + + versionBeacon, err := versionBeacons.Highest(b6.Height) + require.NoError(t, err) + require.Equal(t, + &flow.SealedVersionBeacon{ + VersionBeacon: vb1, + SealHeight: b5.Height, + }, + versionBeacon, + ) + + // finalizing B6 should index events sealed by B6, so VB2 and VB3 + // while we don't expect multiple VBs in one block, we index newest, so last one emitted - VB3 + err = state.Finalize(context.Background(), b6.ID()) + require.NoError(t, err) + + versionBeacon, err = versionBeacons.Highest(b6.Height) + require.NoError(t, err) + require.Equal(t, + &flow.SealedVersionBeacon{ + VersionBeacon: vb3, + SealHeight: b6.Height, + }, + versionBeacon, + ) + }) +} + func TestExtendSealedBoundary(t *testing.T) { rootSnapshot := unittest.RootSnapshotFixture(participants) - util.RunWithFullProtocolState(t, rootSnapshot, func(db *badger.DB, state *protocol.ParticipantState) { + rootProtocolStateID := getRootProtocolStateID(t, rootSnapshot) + util.RunWithFullProtocolState(t, rootSnapshot, func(db storage.DB, state *protocol.ParticipantState) { head, err := rootSnapshot.Head() require.NoError(t, err) _, seal, err := rootSnapshot.SealedResult() @@ -286,28 +491,36 @@ func TestExtendSealedBoundary(t *testing.T) { require.Equal(t, seal.FinalState, finalCommit, "original commit should be root commit") // Create a first block on top of the snapshot - block1 := unittest.BlockWithParentFixture(head) - block1.SetPayload(flow.EmptyPayload()) - err = state.Extend(context.Background(), block1) + block1 := unittest.BlockWithParentAndPayload( + head, + unittest.PayloadFixture(unittest.WithProtocolStateID(rootProtocolStateID)), + ) + err = state.Extend(context.Background(), unittest.ProposalFromBlock(block1)) require.NoError(t, err) // Add a second block containing a receipt committing to the first block block1Receipt := unittest.ReceiptForBlockFixture(block1) - block2 := unittest.BlockWithParentFixture(block1.Header) - block2.SetPayload(flow.Payload{ - Receipts: []*flow.ExecutionReceiptMeta{block1Receipt.Meta()}, - Results: []*flow.ExecutionResult{&block1Receipt.ExecutionResult}, - }) - err = state.Extend(context.Background(), block2) + block2 := unittest.BlockWithParentAndPayload( + block1.ToHeader(), + flow.Payload{ + Receipts: []*flow.ExecutionReceiptStub{block1Receipt.Stub()}, + Results: []*flow.ExecutionResult{&block1Receipt.ExecutionResult}, + ProtocolStateID: rootProtocolStateID, + }, + ) + err = state.Extend(context.Background(), unittest.ProposalFromBlock(block2)) require.NoError(t, err) // Add a third block containing a seal for the first block block1Seal := unittest.Seal.Fixture(unittest.Seal.WithResult(&block1Receipt.ExecutionResult)) - block3 := unittest.BlockWithParentFixture(block2.Header) - block3.SetPayload(flow.Payload{ - Seals: []*flow.Seal{block1Seal}, - }) - err = state.Extend(context.Background(), block3) + block3 := unittest.BlockWithParentAndPayload( + block2.ToHeader(), + flow.Payload{ + Seals: []*flow.Seal{block1Seal}, + ProtocolStateID: rootProtocolStateID, + }, + ) + err = state.Extend(context.Background(), unittest.ProposalFromBlock(block3)) require.NoError(t, err) finalCommit, err = state.Final().Commit() @@ -337,93 +550,110 @@ func TestExtendSealedBoundary(t *testing.T) { }) } +// TestExtendMissingParent tests the behaviour when attempting to extend the protocol state by a block +// whose parent is unknown. Per convention, the protocol state requires that the candidate's +// parent has already been ingested. Otherwise, an exception is returned. func TestExtendMissingParent(t *testing.T) { rootSnapshot := unittest.RootSnapshotFixture(participants) - util.RunWithFullProtocolState(t, rootSnapshot, func(db *badger.DB, state *protocol.ParticipantState) { - extend := unittest.BlockFixture() - extend.Payload.Guarantees = nil - extend.Payload.Seals = nil - extend.Header.Height = 2 - extend.Header.View = 2 - extend.Header.ParentID = unittest.BlockFixture().ID() - extend.Header.PayloadHash = extend.Payload.Hash() + util.RunWithFullProtocolState(t, rootSnapshot, func(db storage.DB, state *protocol.ParticipantState) { + extend := unittest.BlockFixture( + unittest.Block.WithHeight(2), + unittest.Block.WithView(2), + unittest.Block.WithParentView(1), + ) - err := state.Extend(context.Background(), &extend) + err := state.Extend(context.Background(), unittest.ProposalFromBlock(extend)) require.Error(t, err) - require.True(t, st.IsInvalidExtensionError(err), err) + require.False(t, st.IsInvalidExtensionError(err), err) + require.False(t, st.IsOutdatedExtensionError(err), err) - // verify seal not indexed + // verify seal that was contained in candidate block is not indexed var sealID flow.Identifier - err = db.View(operation.LookupLatestSealAtBlock(extend.ID(), &sealID)) + err = operation.LookupLatestSealAtBlock(db.Reader(), extend.ID(), &sealID) require.Error(t, err) - require.ErrorIs(t, err, stoerr.ErrNotFound) + require.ErrorIs(t, err, storage.ErrNotFound) }) } +// TestExtendHeightTooSmall tests the behaviour when attempting to extend the protocol state by a block +// whose height is not larger than its parent's height. The protocol mandates that the candidate's +// height is exactly one larger than its parent's height. Otherwise, an exception should be returned. func TestExtendHeightTooSmall(t *testing.T) { rootSnapshot := unittest.RootSnapshotFixture(participants) - util.RunWithFullProtocolState(t, rootSnapshot, func(db *badger.DB, state *protocol.ParticipantState) { - head, err := rootSnapshot.Head() - require.NoError(t, err) + rootProtocolStateID := getRootProtocolStateID(t, rootSnapshot) + head, err := rootSnapshot.Head() + require.NoError(t, err) - extend := unittest.BlockFixture() - extend.SetPayload(flow.EmptyPayload()) - extend.Header.Height = 1 - extend.Header.View = 1 - extend.Header.ParentID = head.ID() - extend.Header.ParentView = head.View + // we create the following to descendants of head: + // head <- blockB <- blockC + // where blockB and blockC have exactly the same height + emptyPayload := unittest.PayloadFixture(unittest.WithProtocolStateID(rootProtocolStateID)) + blockB := unittest.BlockFixture( // creates child with increased height and view (protocol compliant) + unittest.Block.WithParent(head.ID(), head.View, head.Height), + unittest.Block.WithPayload(emptyPayload)) - err = state.Extend(context.Background(), &extend) - require.NoError(t, err) + blockC := unittest.BlockFixture( // creates child with height identical to parent (protocol violation) but increased view (protocol compliant) + unittest.Block.WithParent(blockB.ID(), blockB.View, blockB.Height), + unittest.Block.WithHeight(blockB.Height), + unittest.Block.WithPayload(emptyPayload)) - // create another block with the same height and view, that is coming after - extend.Header.ParentID = extend.Header.ID() - extend.Header.Height = 1 - extend.Header.View = 2 + util.RunWithFullProtocolState(t, rootSnapshot, func(db storage.DB, chainState *protocol.ParticipantState) { + require.NoError(t, chainState.Extend(context.Background(), unittest.ProposalFromBlock(blockB))) - err = state.Extend(context.Background(), &extend) + err = chainState.Extend(context.Background(), unittest.ProposalFromBlock(blockC)) require.Error(t, err) + require.True(t, st.IsInvalidExtensionError(err)) - // verify seal not indexed + // Whenever the state ingests a block, it indexes the latest seal as of this block. + // Therefore, we can use this as a check to confirm that blockB was successfully ingested, + // but the information from blockC was not. var sealID flow.Identifier - err = db.View(operation.LookupLatestSealAtBlock(extend.ID(), &sealID)) - require.Error(t, err) - require.ErrorIs(t, err, stoerr.ErrNotFound) + // latest seal for blockB should be found, as blockB was successfully ingested: + require.NoError(t, operation.LookupLatestSealAtBlock(db.Reader(), blockB.ID(), &sealID)) + // latest seal for blockC should NOT be found, because extending the state with blockC errored: + require.ErrorIs(t, + operation.LookupLatestSealAtBlock(db.Reader(), blockC.ID(), &sealID), + storage.ErrNotFound) }) } func TestExtendHeightTooLarge(t *testing.T) { rootSnapshot := unittest.RootSnapshotFixture(participants) - util.RunWithFullProtocolState(t, rootSnapshot, func(db *badger.DB, state *protocol.ParticipantState) { + util.RunWithFullProtocolState(t, rootSnapshot, func(db storage.DB, state *protocol.ParticipantState) { head, err := rootSnapshot.Head() require.NoError(t, err) - block := unittest.BlockWithParentFixture(head) - block.SetPayload(flow.EmptyPayload()) + block := unittest.BlockWithParentAndPayload( + head, + *flow.NewEmptyPayload(), + ) // set an invalid height - block.Header.Height = head.Height + 2 + block.Height = head.Height + 2 - err = state.Extend(context.Background(), block) + err = state.Extend(context.Background(), unittest.ProposalFromBlock(block)) require.Error(t, err) }) } -// TestExtendInconsistentParentView tests if mutator rejects block with invalid ParentView. ParentView must be consistent +// TestExtendInconsistentParentView tests if mutableState rejects block with invalid ParentView. ParentView must be consistent // with view of block referred by ParentID. func TestExtendInconsistentParentView(t *testing.T) { rootSnapshot := unittest.RootSnapshotFixture(participants) - util.RunWithFullProtocolState(t, rootSnapshot, func(db *badger.DB, state *protocol.ParticipantState) { + util.RunWithFullProtocolState(t, rootSnapshot, func(db storage.DB, state *protocol.ParticipantState) { head, err := rootSnapshot.Head() require.NoError(t, err) - block := unittest.BlockWithParentFixture(head) - block.SetPayload(flow.EmptyPayload()) + block := unittest.BlockWithParentAndPayload( + head, + *flow.NewEmptyPayload(), + ) // set an invalid parent view - block.Header.ParentView++ + block.ParentView++ + block.View++ - err = state.Extend(context.Background(), block) + err = state.Extend(context.Background(), unittest.ProposalFromBlock(block)) require.Error(t, err) require.True(t, st.IsInvalidExtensionError(err)) }) @@ -431,165 +661,204 @@ func TestExtendInconsistentParentView(t *testing.T) { func TestExtendBlockNotConnected(t *testing.T) { rootSnapshot := unittest.RootSnapshotFixture(participants) - util.RunWithFullProtocolState(t, rootSnapshot, func(db *badger.DB, state *protocol.ParticipantState) { + rootProtocolStateID := getRootProtocolStateID(t, rootSnapshot) + util.RunWithFullProtocolState(t, rootSnapshot, func(db storage.DB, state *protocol.ParticipantState) { head, err := rootSnapshot.Head() require.NoError(t, err) // add 2 blocks, the second finalizing/sealing the state of the first - extend := unittest.BlockWithParentFixture(head) - extend.SetPayload(flow.EmptyPayload()) + extend := unittest.BlockWithParentAndPayload( + head, + unittest.PayloadFixture(unittest.WithProtocolStateID(rootProtocolStateID)), + ) - err = state.Extend(context.Background(), extend) + err = state.Extend(context.Background(), unittest.ProposalFromBlock(extend)) require.NoError(t, err) err = state.Finalize(context.Background(), extend.ID()) require.NoError(t, err) // create a fork at view/height 1 and try to connect it to root - extend.Header.Timestamp = extend.Header.Timestamp.Add(time.Second) - extend.Header.ParentID = head.ID() + extend.Timestamp += 1000 // shift time stamp forward by 1 second = 1000ms + extend.ParentID = head.ID() - err = state.Extend(context.Background(), extend) + err = state.Extend(context.Background(), unittest.ProposalFromBlock(extend)) require.Error(t, err) // verify seal not indexed var sealID flow.Identifier - err = db.View(operation.LookupLatestSealAtBlock(extend.ID(), &sealID)) + err = operation.LookupLatestSealAtBlock(db.Reader(), extend.ID(), &sealID) require.Error(t, err) - require.ErrorIs(t, err, stoerr.ErrNotFound) + require.ErrorIs(t, err, storage.ErrNotFound) }) } func TestExtendInvalidChainID(t *testing.T) { rootSnapshot := unittest.RootSnapshotFixture(participants) - util.RunWithFullProtocolState(t, rootSnapshot, func(db *badger.DB, state *protocol.ParticipantState) { + util.RunWithFullProtocolState(t, rootSnapshot, func(db storage.DB, state *protocol.ParticipantState) { head, err := rootSnapshot.Head() require.NoError(t, err) - block := unittest.BlockWithParentFixture(head) - block.SetPayload(flow.EmptyPayload()) + block := unittest.BlockWithParentAndPayload( + head, + *flow.NewEmptyPayload(), + ) // use an invalid chain ID - block.Header.ChainID = head.ChainID + "-invalid" + block.ChainID = head.ChainID + "-invalid" - err = state.Extend(context.Background(), block) + err = state.Extend(context.Background(), unittest.ProposalFromBlock(block)) require.Error(t, err) require.True(t, st.IsInvalidExtensionError(err), err) }) } +// TestExtendReceiptsNotSorted tests the case where receipts are included in a block payload +// not sorted by height. Previously, this constraint was required (unordered receipts resulted +// in an error). Now, any ordering of receipts should be accepted by the EvolvingState. func TestExtendReceiptsNotSorted(t *testing.T) { - // TODO: this test needs to be updated: - // We don't require the receipts to be sorted by height anymore - // We could require an "parent first" ordering, which is less strict than - // a full ordering by height - unittest.SkipUnless(t, unittest.TEST_TODO, "needs update") - rootSnapshot := unittest.RootSnapshotFixture(participants) + rootProtocolStateID := getRootProtocolStateID(t, rootSnapshot) head, err := rootSnapshot.Head() require.NoError(t, err) - util.RunWithFullProtocolState(t, rootSnapshot, func(db *badger.DB, state *protocol.ParticipantState) { + util.RunWithFullProtocolState(t, rootSnapshot, func(db storage.DB, state *protocol.ParticipantState) { // create block2 and block3 - block2 := unittest.BlockWithParentFixture(head) - block2.Payload.Guarantees = nil - block2.Header.PayloadHash = block2.Payload.Hash() - err := state.Extend(context.Background(), block2) + block2 := unittest.BlockWithParentAndPayload( + head, + unittest.PayloadFixture(unittest.WithProtocolStateID(rootProtocolStateID)), + ) + err := state.Extend(context.Background(), unittest.ProposalFromBlock(block2)) require.NoError(t, err) - block3 := unittest.BlockWithParentFixture(block2.Header) - block3.Payload.Guarantees = nil - block3.Header.PayloadHash = block3.Payload.Hash() - err = state.Extend(context.Background(), block3) + block3 := unittest.BlockWithParentAndPayload( + block2.ToHeader(), + unittest.PayloadFixture(unittest.WithProtocolStateID(rootProtocolStateID)), + ) + err = state.Extend(context.Background(), unittest.ProposalFromBlock(block3)) require.NoError(t, err) receiptA := unittest.ReceiptForBlockFixture(block3) receiptB := unittest.ReceiptForBlockFixture(block2) // insert a block with payload receipts not sorted by block height. - block4 := unittest.BlockWithParentFixture(block3.Header) - block4.Payload = &flow.Payload{ - Receipts: []*flow.ExecutionReceiptMeta{receiptA.Meta(), receiptB.Meta()}, - Results: []*flow.ExecutionResult{&receiptA.ExecutionResult, &receiptB.ExecutionResult}, - } - block4.Header.PayloadHash = block4.Payload.Hash() - err = state.Extend(context.Background(), block4) - require.Error(t, err) - require.True(t, st.IsInvalidExtensionError(err), err) + block4 := unittest.BlockWithParentAndPayload( + block3.ToHeader(), + unittest.PayloadFixture( + unittest.WithProtocolStateID(rootProtocolStateID), + unittest.WithReceipts(receiptA, receiptB), + ), + ) + err = state.Extend(context.Background(), unittest.ProposalFromBlock(block4)) + require.NoError(t, err) }) } func TestExtendReceiptsInvalid(t *testing.T) { validator := mockmodule.NewReceiptValidator(t) - rootSnapshot := unittest.RootSnapshotFixture(participants) - util.RunWithFullProtocolStateAndValidator(t, rootSnapshot, validator, func(db *badger.DB, state *protocol.ParticipantState) { + rootProtocolStateID := getRootProtocolStateID(t, rootSnapshot) + util.RunWithFullProtocolStateAndValidator(t, rootSnapshot, validator, func(db storage.DB, state *protocol.ParticipantState) { head, err := rootSnapshot.Head() require.NoError(t, err) - validator.On("ValidatePayload", mock.Anything).Return(nil).Once() + // create block2 and block3 as descendants of head + block2 := unittest.BlockWithParentAndPayload( + head, + unittest.PayloadFixture(unittest.WithProtocolStateID(rootProtocolStateID)), + ) + receipt := unittest.ReceiptForBlockFixture(block2) // receipt for block 2 + block3 := unittest.BlockWithParentAndPayload( + block2.ToHeader(), + flow.Payload{ + Receipts: []*flow.ExecutionReceiptStub{receipt.Stub()}, + Results: []*flow.ExecutionResult{&receipt.ExecutionResult}, + ProtocolStateID: rootProtocolStateID, + }, + ) - // create block2 and block3 - block2 := unittest.BlockWithParentFixture(head) - block2.SetPayload(flow.EmptyPayload()) - err = state.Extend(context.Background(), block2) + // validator accepts block 2 + validator.On("ValidatePayload", block2).Return(nil).Once() + err = state.Extend(context.Background(), unittest.ProposalFromBlock(block2)) require.NoError(t, err) - // Add a receipt for block 2 - receipt := unittest.ExecutionReceiptFixture() + // but receipt for block 2 is invalid, which the ParticipantState should reject with an InvalidExtensionError + validator.On("ValidatePayload", block3).Return(engine.NewInvalidInputErrorf("")).Once() + err = state.Extend(context.Background(), unittest.ProposalFromBlock(block3)) + require.Error(t, err) + require.True(t, st.IsInvalidExtensionError(err), err) + }) +} - block3 := unittest.BlockWithParentFixture(block2.Header) - block3.SetPayload(flow.Payload{ - Receipts: []*flow.ExecutionReceiptMeta{receipt.Meta()}, - Results: []*flow.ExecutionResult{&receipt.ExecutionResult}, - }) +// TestOnReceiptValidatorExceptions tests that ParticipantState escalates unexpected errors and exceptions +// returned by the ReceiptValidator. We expect that such errors are *not* interpreted as the block being invalid. +func TestOnReceiptValidatorExceptions(t *testing.T) { + validator := mockmodule.NewReceiptValidator(t) + + rootSnapshot := unittest.RootSnapshotFixture(participants) + util.RunWithFullProtocolStateAndValidator(t, rootSnapshot, validator, func(db storage.DB, state *protocol.ParticipantState) { + head, err := rootSnapshot.Head() + require.NoError(t, err) + block := unittest.BlockWithParentFixture(head) - // force the receipt validator to refuse this payload - validator.On("ValidatePayload", block3).Return(engine.NewInvalidInputError("")).Once() + // Check that _unexpected_ failure causes the error to be escalated and is *not* interpreted as an invalid block. + validator.On("ValidatePayload", block).Return(fmt.Errorf("")).Once() + err = state.Extend(context.Background(), unittest.ProposalFromBlock(block)) + require.Error(t, err) + require.False(t, st.IsInvalidExtensionError(err), err) - err = state.Extend(context.Background(), block3) + // Check that an `UnknownBlockError` causes the error to be escalated and is *not* interpreted as an invalid receipt. + // Reasoning: per convention, the ParticipantState requires that the candidate's parent has already been ingested. + // Otherwise, an exception is returned. The `ReceiptValidator.ValidatePayload(..)` returning an `UnknownBlockError` + // indicates exactly this situation, where the parent block is unknown. + validator.On("ValidatePayload", block).Return(module.NewUnknownBlockError("")).Once() + err = state.Extend(context.Background(), unittest.ProposalFromBlock(block)) require.Error(t, err) - require.True(t, st.IsInvalidExtensionError(err), err) + require.False(t, st.IsInvalidExtensionError(err), err) }) } func TestExtendReceiptsValid(t *testing.T) { rootSnapshot := unittest.RootSnapshotFixture(participants) - util.RunWithFullProtocolState(t, rootSnapshot, func(db *badger.DB, state *protocol.ParticipantState) { + rootProtocolStateID := getRootProtocolStateID(t, rootSnapshot) + util.RunWithFullProtocolState(t, rootSnapshot, func(db storage.DB, state *protocol.ParticipantState) { head, err := rootSnapshot.Head() require.NoError(t, err) - block2 := unittest.BlockWithParentFixture(head) - block2.SetPayload(flow.EmptyPayload()) - err = state.Extend(context.Background(), block2) + block2 := unittest.BlockWithParentAndPayload( + head, + unittest.PayloadFixture(unittest.WithProtocolStateID(rootProtocolStateID)), + ) + err = state.Extend(context.Background(), unittest.ProposalFromBlock(block2)) require.NoError(t, err) - block3 := unittest.BlockWithParentFixture(block2.Header) - block3.SetPayload(flow.EmptyPayload()) - err = state.Extend(context.Background(), block3) + block3 := unittest.BlockWithParentProtocolState(block2) + err = state.Extend(context.Background(), unittest.ProposalFromBlock(block3)) require.NoError(t, err) - block4 := unittest.BlockWithParentFixture(block3.Header) - block4.SetPayload(flow.EmptyPayload()) - err = state.Extend(context.Background(), block4) + block4 := unittest.BlockWithParentProtocolState(block3) + err = state.Extend(context.Background(), unittest.ProposalFromBlock(block4)) require.NoError(t, err) receipt3a := unittest.ReceiptForBlockFixture(block3) receipt3b := unittest.ReceiptForBlockFixture(block3) receipt3c := unittest.ReceiptForBlockFixture(block4) - block5 := unittest.BlockWithParentFixture(block4.Header) - block5.SetPayload(flow.Payload{ - Receipts: []*flow.ExecutionReceiptMeta{ - receipt3a.Meta(), - receipt3b.Meta(), - receipt3c.Meta(), + block5 := unittest.BlockWithParentAndPayload( + block4.ToHeader(), + flow.Payload{ + Receipts: []*flow.ExecutionReceiptStub{ + receipt3a.Stub(), + receipt3b.Stub(), + receipt3c.Stub(), + }, + Results: []*flow.ExecutionResult{ + &receipt3a.ExecutionResult, + &receipt3b.ExecutionResult, + &receipt3c.ExecutionResult, + }, + ProtocolStateID: rootProtocolStateID, }, - Results: []*flow.ExecutionResult{ - &receipt3a.ExecutionResult, - &receipt3b.ExecutionResult, - &receipt3c.ExecutionResult, - }, - }) - err = state.Extend(context.Background(), block5) + ) + err = state.Extend(context.Background(), unittest.ProposalFromBlock(block5)) require.NoError(t, err) }) } @@ -619,8 +888,10 @@ func TestExtendEpochTransitionValid(t *testing.T) { consumer.On("BlockFinalized", mock.Anything) consumer.On("BlockProcessable", mock.Anything, mock.Anything) rootSnapshot := unittest.RootSnapshotFixture(participants) - - unittest.RunWithBadgerDB(t, func(db *badger.DB) { + rootProtocolStateID := getRootProtocolStateID(t, rootSnapshot) + unittest.RunWithPebbleDB(t, func(pdb *pebble.DB) { + lockManager := storage.NewTestingLockManager() + db := pebbleimpl.ToDB(pdb) // set up state and mock ComplianceMetrics object metrics := mockmodule.NewComplianceMetrics(t) @@ -628,41 +899,41 @@ func TestExtendEpochTransitionValid(t *testing.T) { metrics.On("SealedHeight", mock.Anything) metrics.On("FinalizedHeight", mock.Anything) metrics.On("BlockFinalized", mock.Anything) + metrics.On("ProtocolStateVersion", mock.Anything) // expect epoch metric calls on bootstrap - initialCurrentEpoch := rootSnapshot.Epochs().Current() - counter, err := initialCurrentEpoch.Counter() + initialCurrentEpoch, err := rootSnapshot.Epochs().Current() require.NoError(t, err) - finalView, err := initialCurrentEpoch.FinalView() - require.NoError(t, err) - initialPhase, err := rootSnapshot.Phase() + counter := initialCurrentEpoch.Counter() + finalView := initialCurrentEpoch.FinalView() + initialPhase, err := rootSnapshot.EpochPhase() require.NoError(t, err) + metrics.On("CurrentEpochCounter", counter).Once() metrics.On("CurrentEpochPhase", initialPhase).Once() - metrics.On("CommittedEpochFinalView", finalView).Once() - metrics.On("CurrentEpochFinalView", finalView).Once() - dkgPhase1FinalView, dkgPhase2FinalView, dkgPhase3FinalView, err := realprotocol.DKGPhaseViews(initialCurrentEpoch) - require.NoError(t, err) - metrics.On("CurrentDKGPhase1FinalView", dkgPhase1FinalView).Once() - metrics.On("CurrentDKGPhase2FinalView", dkgPhase2FinalView).Once() - metrics.On("CurrentDKGPhase3FinalView", dkgPhase3FinalView).Once() + metrics.On("CurrentDKGPhaseViews", + initialCurrentEpoch.DKGPhase1FinalView(), + initialCurrentEpoch.DKGPhase2FinalView(), + initialCurrentEpoch.DKGPhase3FinalView()).Once() tracer := trace.NewNoopTracer() log := zerolog.Nop() - all := storeutil.StorageLayer(t, db) + all := store.InitAll(mmetrics.NewNoopCollector(), db) protoState, err := protocol.Bootstrap( metrics, db, + lockManager, all.Headers, all.Seals, all.Results, all.Blocks, all.QuorumCertificates, - all.Setups, + all.EpochSetups, all.EpochCommits, - all.Statuses, + all.EpochProtocolStateEntries, + all.ProtocolKVStore, all.VersionBeacons, rootSnapshot, ) @@ -682,20 +953,36 @@ func TestExtendEpochTransitionValid(t *testing.T) { ) require.NoError(t, err) + mutableState := protocol_state.NewMutableProtocolState( + log, + all.EpochProtocolStateEntries, + all.ProtocolKVStore, + state.Params(), + all.Headers, + all.Results, + all.EpochSetups, + all.EpochCommits, + ) + expectedStateIdCalculator := calculateExpectedStateId(t, mutableState) + head, err := rootSnapshot.Head() require.NoError(t, err) result, _, err := rootSnapshot.SealedResult() require.NoError(t, err) + _, err = state.AtBlockID(head.ID()).Epochs().Current() + require.NoError(t, err) // we should begin the epoch in the staking phase - phase, err := state.AtBlockID(head.ID()).Phase() + phase, err := state.AtBlockID(head.ID()).EpochPhase() assert.NoError(t, err) require.Equal(t, flow.EpochPhaseStaking, phase) // add a block for the first seal to reference - block1 := unittest.BlockWithParentFixture(head) - block1.SetPayload(flow.EmptyPayload()) - err = state.Extend(context.Background(), block1) + block1 := unittest.BlockWithParentAndPayload( + head, + unittest.PayloadFixture(unittest.WithProtocolStateID(rootProtocolStateID)), + ) + err = state.Extend(context.Background(), unittest.ProposalFromBlock(block1)) require.NoError(t, err) err = state.Finalize(context.Background(), block1.ID()) require.NoError(t, err) @@ -705,7 +992,7 @@ func TestExtendEpochTransitionValid(t *testing.T) { // add a participant for the next epoch epoch2NewParticipant := unittest.IdentityFixture(unittest.WithRole(flow.RoleVerification)) - epoch2Participants := append(participants, epoch2NewParticipant).Sort(order.Canonical) + epoch2Participants := append(participants, epoch2NewParticipant).Sort(flow.Canonical[flow.Identity]).ToSkeleton() // create the epoch setup event for the second epoch epoch2Setup := unittest.EpochSetupFixture( @@ -714,70 +1001,73 @@ func TestExtendEpochTransitionValid(t *testing.T) { unittest.WithFinalView(epoch1FinalView+1000), unittest.WithFirstView(epoch1FinalView+1), ) - // create a receipt for block 1 containing the EpochSetup event - receipt1, seal1 := unittest.ReceiptAndSealForBlock(block1) - receipt1.ExecutionResult.ServiceEvents = []flow.ServiceEvent{epoch2Setup.ServiceEvent()} - seal1.ResultID = receipt1.ExecutionResult.ID() + receipt1, seal1 := unittest.ReceiptAndSealForBlock(block1, epoch2Setup.ServiceEvent()) // add a second block with the receipt for block 1 - block2 := unittest.BlockWithParentFixture(block1.Header) - block2.SetPayload(unittest.PayloadFixture(unittest.WithReceipts(receipt1))) + block2 := unittest.BlockWithParentAndPayload( + block1.ToHeader(), + unittest.PayloadFixture( + unittest.WithReceipts(receipt1), + unittest.WithProtocolStateID(block1.Payload.ProtocolStateID), + ), + ) - err = state.Extend(context.Background(), block2) + err = state.Extend(context.Background(), unittest.ProposalFromBlock(block2)) require.NoError(t, err) err = state.Finalize(context.Background(), block2.ID()) require.NoError(t, err) // block 3 contains the seal for block 1 - block3 := unittest.BlockWithParentFixture(block2.Header) - block3.SetPayload(flow.Payload{ - Seals: []*flow.Seal{seal1}, - }) - + seals := []*flow.Seal{seal1} + block3View := block2.View + 1 + block3 := unittest.BlockFixture( + unittest.Block.WithParent(block2.ID(), block2.View, block2.Height), + unittest.Block.WithPayload( + flow.Payload{ + Seals: seals, + ProtocolStateID: expectedStateIdCalculator(block2.ID(), block3View, seals), + }), + ) // insert the block sealing the EpochSetup event - err = state.Extend(context.Background(), block3) + err = state.Extend(context.Background(), unittest.ProposalFromBlock(block3)) require.NoError(t, err) // now that the setup event has been emitted, we should be in the setup phase - phase, err = state.AtBlockID(block3.ID()).Phase() + phase, err = state.AtBlockID(block3.ID()).EpochPhase() assert.NoError(t, err) require.Equal(t, flow.EpochPhaseSetup, phase) // we should NOT be able to query epoch 2 wrt blocks before 3 for _, blockID := range []flow.Identifier{block1.ID(), block2.ID()} { - _, err = state.AtBlockID(blockID).Epochs().Next().InitialIdentities() - require.Error(t, err) - _, err = state.AtBlockID(blockID).Epochs().Next().Clustering() + _, err = state.AtBlockID(blockID).Epochs().NextUnsafe() require.Error(t, err) } - // we should be able to query epoch 2 wrt block 3 - _, err = state.AtBlockID(block3.ID()).Epochs().Next().InitialIdentities() - assert.NoError(t, err) - _, err = state.AtBlockID(block3.ID()).Epochs().Next().Clustering() + // we should be able to query epoch 2 as a TentativeEpoch wrt block 3 + _, err = state.AtBlockID(block3.ID()).Epochs().NextUnsafe() assert.NoError(t, err) - // only setup event is finalized, not commit, so shouldn't be able to get certain info - _, err = state.AtBlockID(block3.ID()).Epochs().Next().DKG() + // only setup event is finalized, not commit, so shouldn't be able to read a CommittedEpoch + _, err = state.AtBlockID(block3.ID()).Epochs().NextCommitted() require.Error(t, err) // insert B4 - block4 := unittest.BlockWithParentFixture(block3.Header) - err = state.Extend(context.Background(), block4) + block4 := unittest.BlockWithParentProtocolState(block3) + err = state.Extend(context.Background(), unittest.ProposalFromBlock(block4)) require.NoError(t, err) - consumer.On("EpochSetupPhaseStarted", epoch2Setup.Counter-1, block3.Header).Once() + consumer.On("EpochSetupPhaseStarted", epoch2Setup.Counter-1, block3.ToHeader()).Once() metrics.On("CurrentEpochPhase", flow.EpochPhaseSetup).Once() // finalize block 3, so we can finalize subsequent blocks // ensure an epoch phase transition when we finalize block 3 err = state.Finalize(context.Background(), block3.ID()) require.NoError(t, err) - consumer.AssertCalled(t, "EpochSetupPhaseStarted", epoch2Setup.Counter-1, block3.Header) + consumer.AssertCalled(t, "EpochSetupPhaseStarted", epoch2Setup.Counter-1, block3.ToHeader()) metrics.AssertCalled(t, "CurrentEpochPhase", flow.EpochPhaseSetup) // now that the setup event has been emitted, we should be in the setup phase - phase, err = state.AtBlockID(block3.ID()).Phase() + phase, err = state.AtBlockID(block3.ID()).EpochPhase() require.NoError(t, err) require.Equal(t, flow.EpochPhaseSetup, phase) @@ -788,131 +1078,137 @@ func TestExtendEpochTransitionValid(t *testing.T) { epoch2Commit := unittest.EpochCommitFixture( unittest.CommitWithCounter(epoch2Setup.Counter), unittest.WithClusterQCsFromAssignments(epoch2Setup.Assignments), - unittest.WithDKGFromParticipants(epoch2Participants), + unittest.WithDKGFromParticipants(epoch2Participants.ToSkeleton()), ) - // create receipt and seal for block 2 // the receipt for block 2 contains the EpochCommit event - receipt2, seal2 := unittest.ReceiptAndSealForBlock(block2) - receipt2.ExecutionResult.ServiceEvents = []flow.ServiceEvent{epoch2Commit.ServiceEvent()} - seal2.ResultID = receipt2.ExecutionResult.ID() + receipt2, seal2 := unittest.ReceiptAndSealForBlock(block2, epoch2Commit.ServiceEvent()) // block 5 contains the receipt for block 2 - block5 := unittest.BlockWithParentFixture(block4.Header) - block5.SetPayload(unittest.PayloadFixture(unittest.WithReceipts(receipt2))) + block5 := unittest.BlockWithParentAndPayload( + block4.ToHeader(), + unittest.PayloadFixture( + unittest.WithReceipts(receipt2), + unittest.WithProtocolStateID(block4.Payload.ProtocolStateID), + ), + ) - err = state.Extend(context.Background(), block5) + err = state.Extend(context.Background(), unittest.ProposalFromBlock(block5)) require.NoError(t, err) err = state.Finalize(context.Background(), block5.ID()) require.NoError(t, err) // block 6 contains the seal for block 2 - block6 := unittest.BlockWithParentFixture(block5.Header) - block6.SetPayload(flow.Payload{ - Seals: []*flow.Seal{seal2}, - }) - - err = state.Extend(context.Background(), block6) + seals = []*flow.Seal{seal2} + block6View := block5.View + 1 + block6 := unittest.BlockFixture( + unittest.Block.WithParent(block5.ID(), block5.View, block5.Height), + unittest.Block.WithPayload( + flow.Payload{ + Seals: seals, + ProtocolStateID: expectedStateIdCalculator(block5.ID(), block6View, seals), + }), + ) + err = state.Extend(context.Background(), unittest.ProposalFromBlock(block6)) require.NoError(t, err) // we should NOT be able to query epoch 2 commit info wrt blocks before 6 for _, blockID := range []flow.Identifier{block4.ID(), block5.ID()} { - _, err = state.AtBlockID(blockID).Epochs().Next().DKG() + _, err = state.AtBlockID(blockID).Epochs().NextCommitted() require.Error(t, err) } - // now epoch 2 is fully ready, we can query anything we want about it wrt block 6 (or later) - _, err = state.AtBlockID(block6.ID()).Epochs().Next().InitialIdentities() + // now epoch 2 is committed, we can query anything we want about it wrt block 6 (or later) + _, err = state.AtBlockID(block6.ID()).Epochs().NextCommitted() require.NoError(t, err) - _, err = state.AtBlockID(block6.ID()).Epochs().Next().Clustering() - require.NoError(t, err) - _, err = state.AtBlockID(block6.ID()).Epochs().Next().DKG() - assert.NoError(t, err) // now that the commit event has been emitted, we should be in the committed phase - phase, err = state.AtBlockID(block6.ID()).Phase() + phase, err = state.AtBlockID(block6.ID()).EpochPhase() assert.NoError(t, err) require.Equal(t, flow.EpochPhaseCommitted, phase) // block 7 has the final view of the epoch, insert it, finalized after finalizing block 6 - block7 := unittest.BlockWithParentFixture(block6.Header) - block7.SetPayload(flow.EmptyPayload()) - block7.Header.View = epoch1FinalView - err = state.Extend(context.Background(), block7) + block7 := unittest.BlockWithParentProtocolState(block6) + block7.View = epoch1FinalView + err = state.Extend(context.Background(), unittest.ProposalFromBlock(block7)) require.NoError(t, err) // expect epoch phase transition once we finalize block 6 - consumer.On("EpochCommittedPhaseStarted", epoch2Setup.Counter-1, block6.Header).Once() - // expect committed final view to be updated, since we are committing epoch 2 - metrics.On("CommittedEpochFinalView", epoch2Setup.FinalView).Once() + consumer.On("EpochCommittedPhaseStarted", epoch2Setup.Counter-1, block6.ToHeader()).Once() metrics.On("CurrentEpochPhase", flow.EpochPhaseCommitted).Once() err = state.Finalize(context.Background(), block6.ID()) require.NoError(t, err) - consumer.AssertCalled(t, "EpochCommittedPhaseStarted", epoch2Setup.Counter-1, block6.Header) - metrics.AssertCalled(t, "CommittedEpochFinalView", epoch2Setup.FinalView) + consumer.AssertCalled(t, "EpochCommittedPhaseStarted", epoch2Setup.Counter-1, block6.ToHeader()) metrics.AssertCalled(t, "CurrentEpochPhase", flow.EpochPhaseCommitted) // we should still be in epoch 1 - epochCounter, err := state.AtBlockID(block4.ID()).Epochs().Current().Counter() + block4epoch, err := state.AtBlockID(block4.ID()).Epochs().Current() require.NoError(t, err) - require.Equal(t, epoch1Setup.Counter, epochCounter) + require.Equal(t, epoch1Setup.Counter, block4epoch.Counter()) err = state.Finalize(context.Background(), block7.ID()) require.NoError(t, err) // we should still be in epoch 1, since epochs are inclusive of final view - epochCounter, err = state.AtBlockID(block7.ID()).Epochs().Current().Counter() + block7epoch, err := state.AtBlockID(block7.ID()).Epochs().Current() require.NoError(t, err) - require.Equal(t, epoch1Setup.Counter, epochCounter) + require.Equal(t, epoch1Setup.Counter, block7epoch.Counter()) // block 8 has a view > final view of epoch 1, it will be considered the first block of epoch 2 - block8 := unittest.BlockWithParentFixture(block7.Header) - block8.SetPayload(flow.EmptyPayload()) // we should handle views that aren't exactly the first valid view of the epoch - block8.Header.View = epoch1FinalView + uint64(1+rand.Intn(10)) - - err = state.Extend(context.Background(), block8) + block8View := epoch1FinalView + uint64(1+rand.Intn(10)) + block8 := unittest.BlockFixture( + unittest.Block.WithParent(block7.ID(), block7.View, block7.Height), + unittest.Block.WithView(block8View), + unittest.Block.WithPayload( + flow.Payload{ + ProtocolStateID: expectedStateIdCalculator(block7.ID(), block8View, nil), + }), + ) + err = state.Extend(context.Background(), unittest.ProposalFromBlock(block8)) require.NoError(t, err) // now, at long last, we are in epoch 2 - epochCounter, err = state.AtBlockID(block8.ID()).Epochs().Current().Counter() + block8epoch, err := state.AtBlockID(block8.ID()).Epochs().Current() require.NoError(t, err) - require.Equal(t, epoch2Setup.Counter, epochCounter) + require.Equal(t, epoch2Setup.Counter, block8epoch.Counter()) // we should begin epoch 2 in staking phase - // how that the commit event has been emitted, we should be in the committed phase - phase, err = state.AtBlockID(block8.ID()).Phase() + // now that we have entered view range of epoch 2, should be in staking phase + phase, err = state.AtBlockID(block8.ID()).EpochPhase() assert.NoError(t, err) require.Equal(t, flow.EpochPhaseStaking, phase) - // expect epoch transition once we finalize block 9 - consumer.On("EpochTransition", epoch2Setup.Counter, block8.Header).Once() - metrics.On("EpochTransitionHeight", block8.Header.Height).Once() + // expect epoch transition once we finalize block 8 + consumer.On("EpochTransition", epoch2Setup.Counter, block8.ToHeader()).Once() + metrics.On("EpochTransitionHeight", block8.Height).Once() metrics.On("CurrentEpochCounter", epoch2Setup.Counter).Once() metrics.On("CurrentEpochPhase", flow.EpochPhaseStaking).Once() metrics.On("CurrentEpochFinalView", epoch2Setup.FinalView).Once() - metrics.On("CurrentDKGPhase1FinalView", epoch2Setup.DKGPhase1FinalView).Once() - metrics.On("CurrentDKGPhase2FinalView", epoch2Setup.DKGPhase2FinalView).Once() - metrics.On("CurrentDKGPhase3FinalView", epoch2Setup.DKGPhase3FinalView).Once() + metrics.On("CurrentDKGPhaseViews", epoch2Setup.DKGPhase1FinalView, epoch2Setup.DKGPhase2FinalView, epoch2Setup.DKGPhase3FinalView).Once() // before block 9 is finalized, the epoch 1-2 boundary is unknown - _, err = state.AtBlockID(block8.ID()).Epochs().Current().FinalHeight() - assert.ErrorIs(t, err, realprotocol.ErrEpochTransitionNotFinalized) - _, err = state.AtBlockID(block8.ID()).Epochs().Current().FirstHeight() - assert.ErrorIs(t, err, realprotocol.ErrEpochTransitionNotFinalized) + _, err = block8epoch.FinalHeight() + assert.ErrorIs(t, err, realprotocol.ErrUnknownEpochBoundary) + _, err = block8epoch.FirstHeight() + assert.ErrorIs(t, err, realprotocol.ErrUnknownEpochBoundary) err = state.Finalize(context.Background(), block8.ID()) require.NoError(t, err) // once block 8 is finalized, epoch 2 has unambiguously begun - the epoch 1-2 boundary is known - epoch1FinalHeight, err := state.AtBlockID(block8.ID()).Epochs().Previous().FinalHeight() + block8previous, err := state.AtBlockID(block8.ID()).Epochs().Previous() + require.NoError(t, err) + epoch1FinalHeight, err := block8previous.FinalHeight() + require.NoError(t, err) + assert.Equal(t, block7.Height, epoch1FinalHeight) + block8epoch, err = state.AtBlockID(block8.ID()).Epochs().Current() require.NoError(t, err) - assert.Equal(t, block7.Header.Height, epoch1FinalHeight) - epoch2FirstHeight, err := state.AtBlockID(block8.ID()).Epochs().Current().FirstHeight() + epoch2FirstHeight, err := block8epoch.FirstHeight() require.NoError(t, err) - assert.Equal(t, block8.Header.Height, epoch2FirstHeight) + assert.Equal(t, block8.Height, epoch2FirstHeight) }) } @@ -923,40 +1219,60 @@ func TestExtendEpochTransitionValid(t *testing.T) { // ROOT <--+ // \--B2<--B4(R2)<--B6(S2)<--B8 func TestExtendConflictingEpochEvents(t *testing.T) { - rootSnapshot := unittest.RootSnapshotFixture(participants) - util.RunWithFullProtocolState(t, rootSnapshot, func(db *badger.DB, state *protocol.ParticipantState) { + // add more collectors so that we can have multiple distinct cluster assignments + extraCollectors := unittest.IdentityListFixture(2, func(identity *flow.Identity) { + identity.Role = flow.RoleCollection + }) + rootSnapshot := unittest.RootSnapshotFixture(append(participants, extraCollectors...)) + rootProtocolStateID := getRootProtocolStateID(t, rootSnapshot) + util.RunWithFullProtocolStateAndMutator(t, rootSnapshot, func(db storage.DB, state *protocol.ParticipantState, mutableState realprotocol.MutableProtocolState) { + expectedStateIdCalculator := calculateExpectedStateId(t, mutableState) head, err := rootSnapshot.Head() require.NoError(t, err) result, _, err := rootSnapshot.SealedResult() require.NoError(t, err) + // In this test, we create two conflicting forks. To prevent accidentally creating byzantine scenarios, where + // multiple blocks have the same view, we keep track of used views and ensure that each new block has a unique view. + usedViews := make(map[uint64]struct{}) + usedViews[head.View] = struct{}{} + // add two conflicting blocks for each service event to reference - block1 := unittest.BlockWithParentFixture(head) - block1.SetPayload(flow.EmptyPayload()) - err = state.Extend(context.Background(), block1) + block1 := unittest.BlockWithParentAndPayloadAndUniqueView( + head, + unittest.PayloadFixture(unittest.WithProtocolStateID(rootProtocolStateID)), + usedViews, + ) + err = state.Extend(context.Background(), unittest.ProposalFromBlock(block1)) require.NoError(t, err) - block2 := unittest.BlockWithParentFixture(head) - block2.SetPayload(flow.EmptyPayload()) - err = state.Extend(context.Background(), block2) + block2 := unittest.BlockWithParentAndPayloadAndUniqueView( + head, + unittest.PayloadFixture(unittest.WithProtocolStateID(rootProtocolStateID)), + usedViews, + ) + err = state.Extend(context.Background(), unittest.ProposalFromBlock(block2)) require.NoError(t, err) rootSetup := result.ServiceEvents[0].Event.(*flow.EpochSetup) - // create two conflicting epoch setup events for the next epoch (final view differs) + // create two conflicting epoch setup events for the next epoch (clustering differs) nextEpochSetup1 := unittest.EpochSetupFixture( unittest.WithParticipants(rootSetup.Participants), unittest.SetupWithCounter(rootSetup.Counter+1), unittest.WithFinalView(rootSetup.FinalView+1000), unittest.WithFirstView(rootSetup.FinalView+1), ) + nextEpochSetup1.Assignments = unittest.ClusterAssignment(1, rootSetup.Participants) nextEpochSetup2 := unittest.EpochSetupFixture( unittest.WithParticipants(rootSetup.Participants), unittest.SetupWithCounter(rootSetup.Counter+1), - unittest.WithFinalView(rootSetup.FinalView+2000), // final view differs + unittest.WithFinalView(rootSetup.FinalView+1000), unittest.WithFirstView(rootSetup.FinalView+1), ) + nextEpochSetup2.Assignments = unittest.ClusterAssignment(2, rootSetup.Participants) + assert.NotEqual(t, nextEpochSetup1.Assignments, nextEpochSetup2.Assignments) // add blocks containing receipts for block1 and block2 (necessary for sealing) // block 1 receipt contains nextEpochSetup1 @@ -964,12 +1280,16 @@ func TestExtendConflictingEpochEvents(t *testing.T) { block1Receipt.ExecutionResult.ServiceEvents = []flow.ServiceEvent{nextEpochSetup1.ServiceEvent()} // add block 1 receipt to block 3 payload - block3 := unittest.BlockWithParentFixture(block1.Header) - block3.SetPayload(flow.Payload{ - Receipts: []*flow.ExecutionReceiptMeta{block1Receipt.Meta()}, - Results: []*flow.ExecutionResult{&block1Receipt.ExecutionResult}, - }) - err = state.Extend(context.Background(), block3) + block3 := unittest.BlockWithParentAndPayloadAndUniqueView( + block1.ToHeader(), + flow.Payload{ + Receipts: []*flow.ExecutionReceiptStub{block1Receipt.Stub()}, + Results: []*flow.ExecutionResult{&block1Receipt.ExecutionResult}, + ProtocolStateID: block1.Payload.ProtocolStateID, + }, + usedViews, + ) + err = state.Extend(context.Background(), unittest.ProposalFromBlock(block3)) require.NoError(t, err) // block 2 receipt contains nextEpochSetup2 @@ -977,54 +1297,78 @@ func TestExtendConflictingEpochEvents(t *testing.T) { block2Receipt.ExecutionResult.ServiceEvents = []flow.ServiceEvent{nextEpochSetup2.ServiceEvent()} // add block 2 receipt to block 4 payload - block4 := unittest.BlockWithParentFixture(block2.Header) - block4.SetPayload(flow.Payload{ - Receipts: []*flow.ExecutionReceiptMeta{block2Receipt.Meta()}, - Results: []*flow.ExecutionResult{&block2Receipt.ExecutionResult}, - }) - err = state.Extend(context.Background(), block4) + block4 := unittest.BlockWithParentAndPayloadAndUniqueView( + block2.ToHeader(), + flow.Payload{ + Receipts: []*flow.ExecutionReceiptStub{block2Receipt.Stub()}, + Results: []*flow.ExecutionResult{&block2Receipt.ExecutionResult}, + ProtocolStateID: block2.Payload.ProtocolStateID, + }, + usedViews, + ) + err = state.Extend(context.Background(), unittest.ProposalFromBlock(block4)) require.NoError(t, err) // seal for block 1 - seal1 := unittest.Seal.Fixture(unittest.Seal.WithResult(&block1Receipt.ExecutionResult)) + seals1 := []*flow.Seal{unittest.Seal.Fixture(unittest.Seal.WithResult(&block1Receipt.ExecutionResult))} // seal for block 2 - seal2 := unittest.Seal.Fixture(unittest.Seal.WithResult(&block2Receipt.ExecutionResult)) + seals2 := []*flow.Seal{unittest.Seal.Fixture(unittest.Seal.WithResult(&block2Receipt.ExecutionResult))} // block 5 builds on block 3, contains seal for block 1 - block5 := unittest.BlockWithParentFixture(block3.Header) - block5.SetPayload(flow.Payload{ - Seals: []*flow.Seal{seal1}, - }) - err = state.Extend(context.Background(), block5) + block5View := nextUnusedViewSince(block3.View, usedViews) + block5 := unittest.BlockFixture( + unittest.Block.WithParent(block3.ID(), block3.View, block3.Height), + unittest.Block.WithView(block5View), + unittest.Block.WithPayload( + flow.Payload{ + Seals: seals1, + ProtocolStateID: expectedStateIdCalculator(block3.ID(), block5View, seals1), + }), + ) + err = state.Extend(context.Background(), unittest.ProposalFromBlock(block5)) require.NoError(t, err) // block 6 builds on block 4, contains seal for block 2 - block6 := unittest.BlockWithParentFixture(block4.Header) - block6.SetPayload(flow.Payload{ - Seals: []*flow.Seal{seal2}, - }) - err = state.Extend(context.Background(), block6) + block6View := nextUnusedViewSince(block4.View, usedViews) + block6 := unittest.BlockFixture( + unittest.Block.WithParent(block4.ID(), block4.View, block4.Height), + unittest.Block.WithView(block6View), + unittest.Block.WithPayload( + flow.Payload{ + Seals: seals2, + ProtocolStateID: expectedStateIdCalculator(block4.ID(), block6View, seals2), + }), + ) + err = state.Extend(context.Background(), unittest.ProposalFromBlock(block6)) require.NoError(t, err) - // block 7 builds on block 5, contains QC for block 7 - block7 := unittest.BlockWithParentFixture(block5.Header) - err = state.Extend(context.Background(), block7) + // block 7 builds on block 5, contains QC for block 5 + block7 := unittest.BlockWithParentProtocolStateAndUniqueView(block5, usedViews) + err = state.Extend(context.Background(), unittest.ProposalFromBlock(block7)) require.NoError(t, err) // block 8 builds on block 6, contains QC for block 6 - block8 := unittest.BlockWithParentFixture(block6.Header) - err = state.Extend(context.Background(), block8) + block8 := unittest.BlockWithParentProtocolStateAndUniqueView(block6, usedViews) + err = state.Extend(context.Background(), unittest.ProposalFromBlock(block8)) require.NoError(t, err) - // should be able query each epoch from the appropriate reference block - setup1FinalView, err := state.AtBlockID(block7.ID()).Epochs().Next().FinalView() + // should be able to query each epoch from the appropriate reference block + nextEpoch1, err := state.AtBlockID(block7.ID()).Epochs().NextUnsafe() + require.NoError(t, err) + setup1clustering, err := nextEpoch1.Clustering() assert.NoError(t, err) - require.Equal(t, nextEpochSetup1.FinalView, setup1FinalView) + require.Equal(t, nextEpochSetup1.Assignments, setup1clustering.Assignments()) - setup2FinalView, err := state.AtBlockID(block8.ID()).Epochs().Next().FinalView() + phase, err := state.AtBlockID(block8.ID()).EpochPhase() + assert.NoError(t, err) + require.Equal(t, phase, flow.EpochPhaseSetup) + nextEpoch2, err := state.AtBlockID(block8.ID()).Epochs().NextUnsafe() + require.NoError(t, err) + setup2clustering, err := nextEpoch2.Clustering() assert.NoError(t, err) - require.Equal(t, nextEpochSetup2.FinalView, setup2FinalView) + require.Equal(t, nextEpochSetup2.Assignments, setup2clustering.Assignments()) + }) } @@ -1036,22 +1380,35 @@ func TestExtendConflictingEpochEvents(t *testing.T) { // \--B2<--B4(R2)<--B6(S2)<--B8 func TestExtendDuplicateEpochEvents(t *testing.T) { rootSnapshot := unittest.RootSnapshotFixture(participants) - util.RunWithFullProtocolState(t, rootSnapshot, func(db *badger.DB, state *protocol.ParticipantState) { + rootProtocolStateID := getRootProtocolStateID(t, rootSnapshot) + util.RunWithFullProtocolStateAndMutator(t, rootSnapshot, func(db storage.DB, state *protocol.ParticipantState, mutableState realprotocol.MutableProtocolState) { + expectedStateIdCalculator := calculateExpectedStateId(t, mutableState) head, err := rootSnapshot.Head() require.NoError(t, err) result, _, err := rootSnapshot.SealedResult() require.NoError(t, err) + // In this test, we create two conflicting forks. To prevent accidentally creating byzantine scenarios, where + // multiple blocks have the same view, we keep track of used views and ensure that each new block has a unique view. + usedViews := make(map[uint64]struct{}) + usedViews[head.View] = struct{}{} + // add two conflicting blocks for each service event to reference - block1 := unittest.BlockWithParentFixture(head) - block1.SetPayload(flow.EmptyPayload()) - err = state.Extend(context.Background(), block1) + block1 := unittest.BlockWithParentAndPayloadAndUniqueView( + head, + unittest.PayloadFixture(unittest.WithProtocolStateID(rootProtocolStateID)), + usedViews, + ) + err = state.Extend(context.Background(), unittest.ProposalFromBlock(block1)) require.NoError(t, err) - block2 := unittest.BlockWithParentFixture(head) - block2.SetPayload(flow.EmptyPayload()) - err = state.Extend(context.Background(), block2) + block2 := unittest.BlockWithParentAndPayloadAndUniqueView( + head, + unittest.PayloadFixture(unittest.WithProtocolStateID(rootProtocolStateID)), + usedViews, + ) + err = state.Extend(context.Background(), unittest.ProposalFromBlock(block2)) require.NoError(t, err) rootSetup := result.ServiceEvents[0].Event.(*flow.EpochSetup) @@ -1070,9 +1427,15 @@ func TestExtendDuplicateEpochEvents(t *testing.T) { block1Receipt.ExecutionResult.ServiceEvents = []flow.ServiceEvent{nextEpochSetup.ServiceEvent()} // add block 1 receipt to block 3 payload - block3 := unittest.BlockWithParentFixture(block1.Header) - block3.SetPayload(unittest.PayloadFixture(unittest.WithReceipts(block1Receipt))) - err = state.Extend(context.Background(), block3) + block3 := unittest.BlockWithParentAndPayloadAndUniqueView( + block1.ToHeader(), + unittest.PayloadFixture( + unittest.WithReceipts(block1Receipt), + unittest.WithProtocolStateID(rootProtocolStateID), + ), + usedViews, + ) + err = state.Extend(context.Background(), unittest.ProposalFromBlock(block3)) require.NoError(t, err) // block 2 receipt contains nextEpochSetup2 @@ -1080,52 +1443,70 @@ func TestExtendDuplicateEpochEvents(t *testing.T) { block2Receipt.ExecutionResult.ServiceEvents = []flow.ServiceEvent{nextEpochSetup.ServiceEvent()} // add block 2 receipt to block 4 payload - block4 := unittest.BlockWithParentFixture(block2.Header) - block4.SetPayload(unittest.PayloadFixture(unittest.WithReceipts(block2Receipt))) - err = state.Extend(context.Background(), block4) + block4 := unittest.BlockWithParentAndPayloadAndUniqueView( + block2.ToHeader(), + unittest.PayloadFixture( + unittest.WithReceipts(block2Receipt), + unittest.WithProtocolStateID(rootProtocolStateID), + ), + usedViews, + ) + err = state.Extend(context.Background(), unittest.ProposalFromBlock(block4)) require.NoError(t, err) // seal for block 1 - seal1 := unittest.Seal.Fixture(unittest.Seal.WithResult(&block1Receipt.ExecutionResult)) + seals1 := []*flow.Seal{unittest.Seal.Fixture(unittest.Seal.WithResult(&block1Receipt.ExecutionResult))} // seal for block 2 - seal2 := unittest.Seal.Fixture(unittest.Seal.WithResult(&block2Receipt.ExecutionResult)) + seals2 := []*flow.Seal{unittest.Seal.Fixture(unittest.Seal.WithResult(&block2Receipt.ExecutionResult))} // block 5 builds on block 3, contains seal for block 1 - block5 := unittest.BlockWithParentFixture(block3.Header) - block5.SetPayload(flow.Payload{ - Seals: []*flow.Seal{seal1}, - }) - err = state.Extend(context.Background(), block5) + block5View := nextUnusedViewSince(block3.View, usedViews) + block5 := unittest.BlockFixture( + unittest.Block.WithParent(block3.ID(), block3.View, block3.Height), + unittest.Block.WithView(block5View), + unittest.Block.WithPayload( + flow.Payload{ + Seals: seals1, + ProtocolStateID: expectedStateIdCalculator(block3.ID(), block5View, seals1), + }), + ) + err = state.Extend(context.Background(), unittest.ProposalFromBlock(block5)) require.NoError(t, err) // block 6 builds on block 4, contains seal for block 2 - block6 := unittest.BlockWithParentFixture(block4.Header) - block6.SetPayload(flow.Payload{ - Seals: []*flow.Seal{seal2}, - }) - err = state.Extend(context.Background(), block6) + block6View := nextUnusedViewSince(block4.View, usedViews) + block6 := unittest.BlockFixture( + unittest.Block.WithParent(block4.ID(), block4.View, block4.Height), + unittest.Block.WithView(block6View), + unittest.Block.WithPayload( + flow.Payload{ + Seals: seals2, + ProtocolStateID: expectedStateIdCalculator(block4.ID(), block6View, seals2), + }), + ) + err = state.Extend(context.Background(), unittest.ProposalFromBlock(block6)) require.NoError(t, err) - // block 7 builds on block 5, contains QC for block 7 - block7 := unittest.BlockWithParentFixture(block5.Header) - err = state.Extend(context.Background(), block7) + // block 7 builds on block 5, contains QC for block 5 + block7 := unittest.BlockWithParentProtocolStateAndUniqueView(block5, usedViews) + err = state.Extend(context.Background(), unittest.ProposalFromBlock(block7)) require.NoError(t, err) // block 8 builds on block 6, contains QC for block 6 // at this point we are inserting the duplicate EpochSetup, should not error - block8 := unittest.BlockWithParentFixture(block6.Header) - err = state.Extend(context.Background(), block8) + block8 := unittest.BlockWithParentProtocolStateAndUniqueView(block6, usedViews) + err = state.Extend(context.Background(), unittest.ProposalFromBlock(block8)) require.NoError(t, err) - // should be able query each epoch from the appropriate reference block - finalView, err := state.AtBlockID(block7.ID()).Epochs().Next().FinalView() - assert.NoError(t, err) - require.Equal(t, nextEpochSetup.FinalView, finalView) + // should be able to query each epoch from the appropriate reference block + block7next, err := state.AtBlockID(block7.ID()).Epochs().NextUnsafe() + require.NoError(t, err) + require.Equal(t, nextEpochSetup.Participants, block7next.InitialIdentities()) - finalView, err = state.AtBlockID(block8.ID()).Epochs().Next().FinalView() - assert.NoError(t, err) - require.Equal(t, nextEpochSetup.FinalView, finalView) + block8next, err := state.AtBlockID(block8.ID()).Epochs().NextUnsafe() + require.NoError(t, err) + require.Equal(t, nextEpochSetup.Participants, block8next.InitialIdentities()) }) } @@ -1133,11 +1514,12 @@ func TestExtendDuplicateEpochEvents(t *testing.T) { // service event should trigger epoch fallback when the fork is finalized. func TestExtendEpochSetupInvalid(t *testing.T) { rootSnapshot := unittest.RootSnapshotFixture(participants) + rootProtocolStateID := getRootProtocolStateID(t, rootSnapshot) // setupState initializes the protocol state for a test case // * creates and finalizes a new block for the first seal to reference // * creates a factory method for test cases to generated valid EpochSetup events - setupState := func(t *testing.T, db *badger.DB, state *protocol.ParticipantState) ( + setupState := func(t *testing.T, _ storage.DB, state *protocol.ParticipantState) ( *flow.Block, func(...func(*flow.EpochSetup)) (*flow.EpochSetup, *flow.ExecutionReceipt, *flow.Seal), ) { @@ -1148,15 +1530,17 @@ func TestExtendEpochSetupInvalid(t *testing.T) { require.NoError(t, err) // add a block for the first seal to reference - block1 := unittest.BlockWithParentFixture(head) - block1.SetPayload(flow.EmptyPayload()) + block1 := unittest.BlockWithParentAndPayload( + head, + unittest.PayloadFixture(unittest.WithProtocolStateID(rootProtocolStateID)), + ) unittest.InsertAndFinalize(t, state, block1) epoch1Setup := result.ServiceEvents[0].Event.(*flow.EpochSetup) // add a participant for the next epoch epoch2NewParticipant := unittest.IdentityFixture(unittest.WithRole(flow.RoleVerification)) - epoch2Participants := append(participants, epoch2NewParticipant).Sort(order.Canonical) + epoch2Participants := append(participants, epoch2NewParticipant).Sort(flow.Canonical[flow.Identity]).ToSkeleton() // this function will return a VALID setup event and seal, we will modify // in different ways in each test case @@ -1170,75 +1554,95 @@ func TestExtendEpochSetupInvalid(t *testing.T) { for _, apply := range opts { apply(setup) } - receipt, seal := unittest.ReceiptAndSealForBlock(block1) - receipt.ExecutionResult.ServiceEvents = []flow.ServiceEvent{setup.ServiceEvent()} - seal.ResultID = receipt.ExecutionResult.ID() + receipt, seal := unittest.ReceiptAndSealForBlock(block1, setup.ServiceEvent()) return setup, receipt, seal } return block1, createSetupEvent } - // expect a setup event with wrong counter to trigger EECC without error - t.Run("wrong counter (EECC)", func(t *testing.T) { - util.RunWithFullProtocolState(t, rootSnapshot, func(db *badger.DB, state *protocol.ParticipantState) { + // expect a setup event with wrong counter to trigger EFM without error + t.Run("wrong counter [EFM]", func(t *testing.T) { + util.RunWithFullProtocolStateAndMutator(t, rootSnapshot, func(db storage.DB, state *protocol.ParticipantState, mutableState realprotocol.MutableProtocolState) { block1, createSetup := setupState(t, db, state) _, receipt, seal := createSetup(func(setup *flow.EpochSetup) { setup.Counter = rand.Uint64() }) - receiptBlock, sealingBlock := unittest.SealBlock(t, state, block1, receipt, seal) + receiptBlock, sealingBlock := unittest.SealBlock(t, state, mutableState, block1, receipt, seal) err := state.Finalize(context.Background(), receiptBlock.ID()) require.NoError(t, err) // epoch fallback not triggered before finalization - assertEpochEmergencyFallbackTriggered(t, state, false) + assertEpochFallbackTriggered(t, state.Final(), false) err = state.Finalize(context.Background(), sealingBlock.ID()) require.NoError(t, err) // epoch fallback triggered after finalization - assertEpochEmergencyFallbackTriggered(t, state, true) + assertEpochFallbackTriggered(t, state.Final(), true) }) }) - // expect a setup event with wrong final view to trigger EECC without error - t.Run("invalid final view (EECC)", func(t *testing.T) { - util.RunWithFullProtocolState(t, rootSnapshot, func(db *badger.DB, state *protocol.ParticipantState) { + // expect a setup event with wrong final view to trigger EFM without error + t.Run("invalid final view [EFM]", func(t *testing.T) { + util.RunWithFullProtocolStateAndMutator(t, rootSnapshot, func(db storage.DB, state *protocol.ParticipantState, mutableState realprotocol.MutableProtocolState) { block1, createSetup := setupState(t, db, state) _, receipt, seal := createSetup(func(setup *flow.EpochSetup) { - setup.FinalView = block1.Header.View + setup.FinalView = block1.View }) - receiptBlock, sealingBlock := unittest.SealBlock(t, state, block1, receipt, seal) + receiptBlock, sealingBlock := unittest.SealBlock(t, state, mutableState, block1, receipt, seal) err := state.Finalize(context.Background(), receiptBlock.ID()) require.NoError(t, err) // epoch fallback not triggered before finalization - assertEpochEmergencyFallbackTriggered(t, state, false) + assertEpochFallbackTriggered(t, state.Final(), false) err = state.Finalize(context.Background(), sealingBlock.ID()) require.NoError(t, err) // epoch fallback triggered after finalization - assertEpochEmergencyFallbackTriggered(t, state, true) + assertEpochFallbackTriggered(t, state.Final(), true) }) }) - // expect a setup event with empty seed to trigger EECC without error - t.Run("empty seed (EECC)", func(t *testing.T) { - util.RunWithFullProtocolState(t, rootSnapshot, func(db *badger.DB, state *protocol.ParticipantState) { + // expect a setup event with empty seed to trigger EFM without error + t.Run("empty seed [EFM]", func(t *testing.T) { + util.RunWithFullProtocolStateAndMutator(t, rootSnapshot, func(db storage.DB, state *protocol.ParticipantState, mutableState realprotocol.MutableProtocolState) { block1, createSetup := setupState(t, db, state) _, receipt, seal := createSetup(func(setup *flow.EpochSetup) { setup.RandomSource = nil }) - receiptBlock, sealingBlock := unittest.SealBlock(t, state, block1, receipt, seal) + receiptBlock, sealingBlock := unittest.SealBlock(t, state, mutableState, block1, receipt, seal) + err := state.Finalize(context.Background(), receiptBlock.ID()) + require.NoError(t, err) + // epoch fallback not triggered before finalization + assertEpochFallbackTriggered(t, state.Final(), false) + err = state.Finalize(context.Background(), sealingBlock.ID()) + require.NoError(t, err) + // epoch fallback triggered after finalization + assertEpochFallbackTriggered(t, state.Final(), true) + }) + }) + + t.Run("participants not ordered [EFM]", func(t *testing.T) { + util.RunWithFullProtocolStateAndMutator(t, rootSnapshot, func(db storage.DB, state *protocol.ParticipantState, mutableState realprotocol.MutableProtocolState) { + block1, createSetup := setupState(t, db, state) + + _, receipt, seal := createSetup(func(setup *flow.EpochSetup) { + var err error + setup.Participants, err = setup.Participants.Shuffle() + require.NoError(t, err) + }) + + receiptBlock, sealingBlock := unittest.SealBlock(t, state, mutableState, block1, receipt, seal) err := state.Finalize(context.Background(), receiptBlock.ID()) require.NoError(t, err) // epoch fallback not triggered before finalization - assertEpochEmergencyFallbackTriggered(t, state, false) + assertEpochFallbackTriggered(t, state.Final(), false) err = state.Finalize(context.Background(), sealingBlock.ID()) require.NoError(t, err) // epoch fallback triggered after finalization - assertEpochEmergencyFallbackTriggered(t, state, true) + assertEpochFallbackTriggered(t, state.Final(), true) }) }) } @@ -1247,6 +1651,7 @@ func TestExtendEpochSetupInvalid(t *testing.T) { // service event should trigger epoch fallback when the fork is finalized. func TestExtendEpochCommitInvalid(t *testing.T) { rootSnapshot := unittest.RootSnapshotFixture(participants) + rootProtocolStateID := getRootProtocolStateID(t, rootSnapshot) // setupState initializes the protocol state for a test case // * creates and finalizes a new block for the first seal to reference @@ -1263,8 +1668,10 @@ func TestExtendEpochCommitInvalid(t *testing.T) { require.NoError(t, err) // add a block for the first seal to reference - block1 := unittest.BlockWithParentFixture(head) - block1.SetPayload(flow.EmptyPayload()) + block1 := unittest.BlockWithParentAndPayload( + head, + unittest.PayloadFixture(unittest.WithProtocolStateID(rootProtocolStateID)), + ) unittest.InsertAndFinalize(t, state, block1) epoch1Setup := result.ServiceEvents[0].Event.(*flow.EpochSetup) @@ -1272,9 +1679,9 @@ func TestExtendEpochCommitInvalid(t *testing.T) { // swap consensus node for a new one for epoch 2 epoch2NewParticipant := unittest.IdentityFixture(unittest.WithRole(flow.RoleConsensus)) epoch2Participants := append( - participants.Filter(filter.Not(filter.HasRole(flow.RoleConsensus))), + participants.Filter(filter.Not(filter.HasRole[flow.Identity](flow.RoleConsensus))), epoch2NewParticipant, - ).Sort(order.Canonical) + ).Sort(flow.Canonical[flow.Identity]).ToSkeleton() // factory method to create a valid EpochSetup method w.r.t. the generated state createSetup := func(block *flow.Block) (*flow.EpochSetup, *flow.ExecutionReceipt, *flow.Seal) { @@ -1285,9 +1692,7 @@ func TestExtendEpochCommitInvalid(t *testing.T) { unittest.WithFirstView(epoch1Setup.FinalView+1), ) - receipt, seal := unittest.ReceiptAndSealForBlock(block) - receipt.ExecutionResult.ServiceEvents = []flow.ServiceEvent{setup.ServiceEvent()} - seal.ResultID = receipt.ExecutionResult.ID() + receipt, seal := unittest.ReceiptAndSealForBlock(block, setup.ServiceEvent()) return setup, receipt, seal } @@ -1300,294 +1705,247 @@ func TestExtendEpochCommitInvalid(t *testing.T) { for _, apply := range opts { apply(commit) } - receipt, seal := unittest.ReceiptAndSealForBlock(block) - receipt.ExecutionResult.ServiceEvents = []flow.ServiceEvent{commit.ServiceEvent()} - seal.ResultID = receipt.ExecutionResult.ID() + receipt, seal := unittest.ReceiptAndSealForBlock(block, commit.ServiceEvent()) return commit, receipt, seal } return block1, createSetup, createCommit } - t.Run("without setup (EECC)", func(t *testing.T) { - util.RunWithFullProtocolState(t, rootSnapshot, func(db *badger.DB, state *protocol.ParticipantState) { + t.Run("without setup [EFM]", func(t *testing.T) { + util.RunWithFullProtocolStateAndMutator(t, rootSnapshot, func(db storage.DB, state *protocol.ParticipantState, mutableState realprotocol.MutableProtocolState) { block1, _, createCommit := setupState(t, state) _, receipt, seal := createCommit(block1) - receiptBlock, sealingBlock := unittest.SealBlock(t, state, block1, receipt, seal) + receiptBlock, sealingBlock := unittest.SealBlock(t, state, mutableState, block1, receipt, seal) err := state.Finalize(context.Background(), receiptBlock.ID()) require.NoError(t, err) // epoch fallback not triggered before finalization - assertEpochEmergencyFallbackTriggered(t, state, false) + assertEpochFallbackTriggered(t, state.Final(), false) err = state.Finalize(context.Background(), sealingBlock.ID()) require.NoError(t, err) // epoch fallback triggered after finalization - assertEpochEmergencyFallbackTriggered(t, state, true) + assertEpochFallbackTriggered(t, state.Final(), true) }) }) - // expect a commit event with wrong counter to trigger EECC without error - t.Run("inconsistent counter (EECC)", func(t *testing.T) { - util.RunWithFullProtocolState(t, rootSnapshot, func(db *badger.DB, state *protocol.ParticipantState) { + // expect a commit event with wrong counter to trigger EFM without error + t.Run("inconsistent counter [EFM]", func(t *testing.T) { + util.RunWithFullProtocolStateAndMutator(t, rootSnapshot, func(db storage.DB, state *protocol.ParticipantState, mutableState realprotocol.MutableProtocolState) { block1, createSetup, createCommit := setupState(t, state) // seal block 1, in which EpochSetup was emitted epoch2Setup, setupReceipt, setupSeal := createSetup(block1) - epochSetupReceiptBlock, epochSetupSealingBlock := unittest.SealBlock(t, state, block1, setupReceipt, setupSeal) + epochSetupReceiptBlock, epochSetupSealingBlock := unittest.SealBlock(t, state, mutableState, block1, setupReceipt, setupSeal) err := state.Finalize(context.Background(), epochSetupReceiptBlock.ID()) require.NoError(t, err) err = state.Finalize(context.Background(), epochSetupSealingBlock.ID()) require.NoError(t, err) // insert a block with a QC for block 2 - block3 := unittest.BlockWithParentFixture(epochSetupSealingBlock) + block3 := unittest.BlockWithParentProtocolState(epochSetupSealingBlock) unittest.InsertAndFinalize(t, state, block3) _, receipt, seal := createCommit(block3, func(commit *flow.EpochCommit) { commit.Counter = epoch2Setup.Counter + 1 }) - receiptBlock, sealingBlock := unittest.SealBlock(t, state, block3, receipt, seal) + receiptBlock, sealingBlock := unittest.SealBlock(t, state, mutableState, block3, receipt, seal) err = state.Finalize(context.Background(), receiptBlock.ID()) require.NoError(t, err) // epoch fallback not triggered before finalization - assertEpochEmergencyFallbackTriggered(t, state, false) + assertEpochFallbackTriggered(t, state.Final(), false) err = state.Finalize(context.Background(), sealingBlock.ID()) require.NoError(t, err) // epoch fallback triggered after finalization - assertEpochEmergencyFallbackTriggered(t, state, true) + assertEpochFallbackTriggered(t, state.Final(), true) }) }) - // expect a commit event with wrong cluster QCs to trigger EECC without error - t.Run("inconsistent cluster QCs (EECC)", func(t *testing.T) { - util.RunWithFullProtocolState(t, rootSnapshot, func(db *badger.DB, state *protocol.ParticipantState) { + // expect a commit event with wrong cluster QCs to trigger EFM without error + t.Run("inconsistent cluster QCs [EFM]", func(t *testing.T) { + util.RunWithFullProtocolStateAndMutator(t, rootSnapshot, func(db storage.DB, state *protocol.ParticipantState, mutableState realprotocol.MutableProtocolState) { block1, createSetup, createCommit := setupState(t, state) // seal block 1, in which EpochSetup was emitted _, setupReceipt, setupSeal := createSetup(block1) - epochSetupReceiptBlock, epochSetupSealingBlock := unittest.SealBlock(t, state, block1, setupReceipt, setupSeal) + epochSetupReceiptBlock, epochSetupSealingBlock := unittest.SealBlock(t, state, mutableState, block1, setupReceipt, setupSeal) err := state.Finalize(context.Background(), epochSetupReceiptBlock.ID()) require.NoError(t, err) err = state.Finalize(context.Background(), epochSetupSealingBlock.ID()) require.NoError(t, err) // insert a block with a QC for block 2 - block3 := unittest.BlockWithParentFixture(epochSetupSealingBlock) + block3 := unittest.BlockWithParentProtocolState(epochSetupSealingBlock) unittest.InsertAndFinalize(t, state, block3) _, receipt, seal := createCommit(block3, func(commit *flow.EpochCommit) { commit.ClusterQCs = append(commit.ClusterQCs, flow.ClusterQCVoteDataFromQC(unittest.QuorumCertificateWithSignerIDsFixture())) }) - receiptBlock, sealingBlock := unittest.SealBlock(t, state, block3, receipt, seal) + receiptBlock, sealingBlock := unittest.SealBlock(t, state, mutableState, block3, receipt, seal) err = state.Finalize(context.Background(), receiptBlock.ID()) require.NoError(t, err) // epoch fallback not triggered before finalization - assertEpochEmergencyFallbackTriggered(t, state, false) + assertEpochFallbackTriggered(t, state.Final(), false) err = state.Finalize(context.Background(), sealingBlock.ID()) require.NoError(t, err) // epoch fallback triggered after finalization - assertEpochEmergencyFallbackTriggered(t, state, true) + assertEpochFallbackTriggered(t, state.Final(), true) }) }) - // expect a commit event with wrong dkg participants to trigger EECC without error - t.Run("inconsistent DKG participants (EECC)", func(t *testing.T) { - util.RunWithFullProtocolState(t, rootSnapshot, func(db *badger.DB, state *protocol.ParticipantState) { + // expect a commit event with wrong dkg participants to trigger EFM without error + t.Run("inconsistent DKG participants [EFM]", func(t *testing.T) { + util.RunWithFullProtocolStateAndMutator(t, rootSnapshot, func(db storage.DB, state *protocol.ParticipantState, mutableState realprotocol.MutableProtocolState) { block1, createSetup, createCommit := setupState(t, state) // seal block 1, in which EpochSetup was emitted _, setupReceipt, setupSeal := createSetup(block1) - epochSetupReceiptBlock, epochSetupSealingBlock := unittest.SealBlock(t, state, block1, setupReceipt, setupSeal) + epochSetupReceiptBlock, epochSetupSealingBlock := unittest.SealBlock(t, state, mutableState, block1, setupReceipt, setupSeal) err := state.Finalize(context.Background(), epochSetupReceiptBlock.ID()) require.NoError(t, err) err = state.Finalize(context.Background(), epochSetupSealingBlock.ID()) require.NoError(t, err) // insert a block with a QC for block 2 - block3 := unittest.BlockWithParentFixture(epochSetupSealingBlock) + block3 := unittest.BlockWithParentProtocolState(epochSetupSealingBlock) unittest.InsertAndFinalize(t, state, block3) _, receipt, seal := createCommit(block3, func(commit *flow.EpochCommit) { - // add an extra dkg key + // add an extra Random Beacon key commit.DKGParticipantKeys = append(commit.DKGParticipantKeys, unittest.KeyFixture(crypto.BLSBLS12381).PublicKey()) }) - receiptBlock, sealingBlock := unittest.SealBlock(t, state, block3, receipt, seal) + receiptBlock, sealingBlock := unittest.SealBlock(t, state, mutableState, block3, receipt, seal) err = state.Finalize(context.Background(), receiptBlock.ID()) require.NoError(t, err) // epoch fallback not triggered before finalization - assertEpochEmergencyFallbackTriggered(t, state, false) + assertEpochFallbackTriggered(t, state.Final(), false) err = state.Finalize(context.Background(), sealingBlock.ID()) require.NoError(t, err) // epoch fallback triggered after finalization - assertEpochEmergencyFallbackTriggered(t, state, true) + assertEpochFallbackTriggered(t, state.Final(), true) }) }) } -// if we reach the first block of the next epoch before both setup and commit -// service events are finalized, the chain should halt -// -// ROOT <- B1 <- B2(R1) <- B3(S1) <- B4 -func TestExtendEpochTransitionWithoutCommit(t *testing.T) { - - // skipping because this case will now result in emergency epoch continuation kicking in - unittest.SkipUnless(t, unittest.TEST_TODO, "disabled as the current implementation uses a temporary fallback measure in this case (triggers EECC), rather than returning an error") +// TestEpochFallbackMode tests that epoch fallback mode is triggered +// when an epoch fails to be committed before the epoch commitment deadline, +// or when an invalid service event (indicating service account smart contract bug) +// is sealed. +func TestEpochFallbackMode(t *testing.T) { - rootSnapshot := unittest.RootSnapshotFixture(participants) - util.RunWithFullProtocolState(t, rootSnapshot, func(db *badger.DB, state *protocol.ParticipantState) { - head, err := rootSnapshot.Head() - require.NoError(t, err) - result, _, err := rootSnapshot.SealedResult() - require.NoError(t, err) + // if we finalize the first block past the epoch commitment deadline while + // in the EpochStaking phase, EFM should be triggered + // + // Epoch Commitment Deadline + // | Epoch Boundary + // | | + // v v + // ROOT <- B1 <- B2 + t.Run("passed epoch commitment deadline in EpochStaking phase - should trigger EFM", func(t *testing.T) { - // add a block for the first seal to reference - block1 := unittest.BlockWithParentFixture(head) - block1.SetPayload(flow.EmptyPayload()) - err = state.Extend(context.Background(), block1) - require.NoError(t, err) - err = state.Finalize(context.Background(), block1.ID()) - require.NoError(t, err) + rootSnapshot := unittest.RootSnapshotFixture(participants) + metricsMock := mockmodule.NewComplianceMetrics(t) + mockMetricsForRootSnapshot(metricsMock, rootSnapshot) + protoEventsMock := mockprotocol.NewConsumer(t) + protoEventsMock.On("BlockFinalized", mock.Anything) + protoEventsMock.On("BlockProcessable", mock.Anything, mock.Anything) - epoch1Setup := result.ServiceEvents[0].Event.(*flow.EpochSetup) - epoch1FinalView := epoch1Setup.FinalView - - // add a participant for the next epoch - epoch2NewParticipant := unittest.IdentityFixture(unittest.WithRole(flow.RoleVerification)) - epoch2Participants := append(participants, epoch2NewParticipant).Sort(order.Canonical) - - // create the epoch setup event for the second epoch - epoch2Setup := unittest.EpochSetupFixture( - unittest.WithParticipants(epoch2Participants), - unittest.SetupWithCounter(epoch1Setup.Counter+1), - unittest.WithFinalView(epoch1FinalView+1000), - unittest.WithFirstView(epoch1FinalView+1), - ) - - receipt1, seal1 := unittest.ReceiptAndSealForBlock(block1) - receipt1.ExecutionResult.ServiceEvents = []flow.ServiceEvent{epoch2Setup.ServiceEvent()} - - // add a block containing a receipt for block 1 - block2 := unittest.BlockWithParentFixture(block1.Header) - block2.SetPayload(unittest.PayloadFixture(unittest.WithReceipts(receipt1))) - err = state.Extend(context.Background(), block2) - require.NoError(t, err) - err = state.Finalize(context.Background(), block2.ID()) - require.NoError(t, err) - - // block 3 seals block 1 - block3 := unittest.BlockWithParentFixture(block2.Header) - block3.SetPayload(flow.Payload{ - Seals: []*flow.Seal{seal1}, - }) - err = state.Extend(context.Background(), block3) - require.NoError(t, err) - - // block 4 will be the first block for epoch 2 - block4 := unittest.BlockWithParentFixture(block3.Header) - block4.Header.View = epoch1Setup.FinalView + 1 - - err = state.Extend(context.Background(), block4) - require.Error(t, err) - }) -} - -// TestEmergencyEpochFallback tests that epoch emergency fallback is triggered -// when an epoch fails to be committed before the epoch commitment deadline, -// or when an invalid service event (indicating service account smart contract bug) -// is sealed. -func TestEmergencyEpochFallback(t *testing.T) { - - // if we finalize the first block past the epoch commitment deadline while - // in the EpochStaking phase, EECC should be triggered - // - // Epoch Commitment Deadline - // | Epoch Boundary - // | | - // v v - // ROOT <- B1 <- B2 - t.Run("passed epoch commitment deadline in EpochStaking phase - should trigger EECC", func(t *testing.T) { - - rootSnapshot := unittest.RootSnapshotFixture(participants) - metricsMock := mockmodule.NewComplianceMetrics(t) - mockMetricsForRootSnapshot(metricsMock, rootSnapshot) - protoEventsMock := mockprotocol.NewConsumer(t) - protoEventsMock.On("BlockFinalized", mock.Anything) - protoEventsMock.On("BlockProcessable", mock.Anything, mock.Anything) - - util.RunWithFullProtocolStateAndMetricsAndConsumer(t, rootSnapshot, metricsMock, protoEventsMock, func(db *badger.DB, state *protocol.ParticipantState) { + util.RunWithFullProtocolStateAndMetricsAndConsumer(t, rootSnapshot, metricsMock, protoEventsMock, func(db storage.DB, state *protocol.ParticipantState, mutableState realprotocol.MutableProtocolState) { head, err := rootSnapshot.Head() require.NoError(t, err) result, _, err := rootSnapshot.SealedResult() require.NoError(t, err) - safetyThreshold, err := rootSnapshot.Params().EpochCommitSafetyThreshold() + rootProtocolState, err := rootSnapshot.ProtocolState() require.NoError(t, err) + epochExtensionViewCount := rootProtocolState.GetEpochExtensionViewCount() + safetyThreshold := rootProtocolState.GetFinalizationSafetyThreshold() + require.GreaterOrEqual(t, epochExtensionViewCount, safetyThreshold, "epoch extension view count must be at least as large as safety threshold") + + expectedStateIdCalculator := calculateExpectedStateId(t, mutableState) epoch1Setup := result.ServiceEvents[0].Event.(*flow.EpochSetup) epoch1FinalView := epoch1Setup.FinalView epoch1CommitmentDeadline := epoch1FinalView - safetyThreshold - // finalizing block 1 should trigger EECC - metricsMock.On("EpochEmergencyFallbackTriggered").Once() - protoEventsMock.On("EpochEmergencyFallbackTriggered").Once() - // we begin the epoch in the EpochStaking phase and // block 1 will be the first block on or past the epoch commitment deadline - block1 := unittest.BlockWithParentFixture(head) - block1.Header.View = epoch1CommitmentDeadline + rand.Uint64()%2 - err = state.Extend(context.Background(), block1) + block1View := epoch1CommitmentDeadline + rand.Uint64()%2 + block1 := unittest.BlockFixture( + unittest.Block.WithParent(head.ID(), head.View, head.Height), + unittest.Block.WithView(block1View), + unittest.Block.WithPayload( + flow.Payload{ + ProtocolStateID: expectedStateIdCalculator(head.ID(), block1View, nil), + }), + ) + // finalizing block 1 should trigger EFM + metricsMock.On("EpochFallbackModeTriggered").Once() + metricsMock.On("CurrentEpochPhase", flow.EpochPhaseFallback).Once() + metricsMock.On("CurrentEpochFinalView", epoch1FinalView+epochExtensionViewCount) + protoEventsMock.On("EpochFallbackModeTriggered", epoch1Setup.Counter, block1.ToHeader()).Once() + protoEventsMock.On("EpochExtended", epoch1Setup.Counter, block1.ToHeader(), unittest.MatchEpochExtension(epoch1FinalView, epochExtensionViewCount)).Once() + + err = state.Extend(context.Background(), unittest.ProposalFromBlock(block1)) require.NoError(t, err) - assertEpochEmergencyFallbackTriggered(t, state, false) // not triggered before finalization + assertEpochFallbackTriggered(t, state.Final(), false) // not triggered before finalization err = state.Finalize(context.Background(), block1.ID()) require.NoError(t, err) - assertEpochEmergencyFallbackTriggered(t, state, true) // triggered after finalization + assertEpochFallbackTriggered(t, state.Final(), true) // triggered after finalization + assertInPhase(t, state.Final(), flow.EpochPhaseFallback) // immediately enter fallback phase // block 2 will be the first block past the first epoch boundary - block2 := unittest.BlockWithParentFixture(block1.Header) - block2.Header.View = epoch1FinalView + 1 - err = state.Extend(context.Background(), block2) + block2 := unittest.BlockWithParentProtocolState(block1) + block2.View = epoch1FinalView + 1 + err = state.Extend(context.Background(), unittest.ProposalFromBlock(block2)) require.NoError(t, err) err = state.Finalize(context.Background(), block2.ID()) require.NoError(t, err) - // since EECC has been triggered, epoch transition metrics should not be updated + // since EFM has been triggered, epoch transition metrics should not be updated metricsMock.AssertNotCalled(t, "EpochTransition", mock.Anything, mock.Anything) metricsMock.AssertNotCalled(t, "CurrentEpochCounter", epoch1Setup.Counter+1) }) }) // if we finalize the first block past the epoch commitment deadline while - // in the EpochSetup phase, EECC should be triggered + // in the EpochSetup phase, EFM should be triggered // // Epoch Commitment Deadline // | Epoch Boundary // | | // v v // ROOT <- B1 <- B2(R1) <- B3(S1) <- B4 - t.Run("passed epoch commitment deadline in EpochSetup phase - should trigger EECC", func(t *testing.T) { + t.Run("passed epoch commitment deadline in EpochSetup phase - should trigger EFM", func(t *testing.T) { rootSnapshot := unittest.RootSnapshotFixture(participants) + rootProtocolStateID := getRootProtocolStateID(t, rootSnapshot) metricsMock := mockmodule.NewComplianceMetrics(t) mockMetricsForRootSnapshot(metricsMock, rootSnapshot) protoEventsMock := mockprotocol.NewConsumer(t) protoEventsMock.On("BlockFinalized", mock.Anything) protoEventsMock.On("BlockProcessable", mock.Anything, mock.Anything) - util.RunWithFullProtocolStateAndMetricsAndConsumer(t, rootSnapshot, metricsMock, protoEventsMock, func(db *badger.DB, state *protocol.ParticipantState) { + util.RunWithFullProtocolStateAndMetricsAndConsumer(t, rootSnapshot, metricsMock, protoEventsMock, func(db storage.DB, state *protocol.ParticipantState, mutableState realprotocol.MutableProtocolState) { head, err := rootSnapshot.Head() require.NoError(t, err) result, _, err := rootSnapshot.SealedResult() require.NoError(t, err) - safetyThreshold, err := rootSnapshot.Params().EpochCommitSafetyThreshold() + rootProtocolState, err := rootSnapshot.ProtocolState() require.NoError(t, err) + epochExtensionViewCount := rootProtocolState.GetEpochExtensionViewCount() + safetyThreshold := rootProtocolState.GetFinalizationSafetyThreshold() + require.GreaterOrEqual(t, epochExtensionViewCount, safetyThreshold, "epoch extension view count must be at least as large as safety threshold") // add a block for the first seal to reference - block1 := unittest.BlockWithParentFixture(head) - block1.SetPayload(flow.EmptyPayload()) - err = state.Extend(context.Background(), block1) + block1 := unittest.BlockWithParentAndPayload( + head, + unittest.PayloadFixture(unittest.WithProtocolStateID(rootProtocolStateID)), + ) + err = state.Extend(context.Background(), unittest.ProposalFromBlock(block1)) require.NoError(t, err) err = state.Finalize(context.Background(), block1.ID()) require.NoError(t, err) @@ -1598,7 +1956,7 @@ func TestEmergencyEpochFallback(t *testing.T) { // add a participant for the next epoch epoch2NewParticipant := unittest.IdentityFixture(unittest.WithRole(flow.RoleVerification)) - epoch2Participants := append(participants, epoch2NewParticipant).Sort(order.Canonical) + epoch2Participants := append(participants, epoch2NewParticipant).Sort(flow.Canonical[flow.Identity]).ToSkeleton() // create the epoch setup event for the second epoch epoch2Setup := unittest.EpochSetupFixture( @@ -1607,46 +1965,58 @@ func TestEmergencyEpochFallback(t *testing.T) { unittest.WithFinalView(epoch1FinalView+1000), unittest.WithFirstView(epoch1FinalView+1), ) - - receipt1, seal1 := unittest.ReceiptAndSealForBlock(block1) - receipt1.ExecutionResult.ServiceEvents = []flow.ServiceEvent{epoch2Setup.ServiceEvent()} - seal1.ResultID = receipt1.ExecutionResult.ID() + receipt1, seal1 := unittest.ReceiptAndSealForBlock(block1, epoch2Setup.ServiceEvent()) // add a block containing a receipt for block 1 - block2 := unittest.BlockWithParentFixture(block1.Header) - block2.SetPayload(unittest.PayloadFixture(unittest.WithReceipts(receipt1))) - err = state.Extend(context.Background(), block2) + block2 := unittest.BlockWithParentAndPayload( + block1.ToHeader(), + unittest.PayloadFixture( + unittest.WithReceipts(receipt1), + unittest.WithProtocolStateID(rootProtocolStateID), + ), + ) + err = state.Extend(context.Background(), unittest.ProposalFromBlock(block2)) require.NoError(t, err) err = state.Finalize(context.Background(), block2.ID()) require.NoError(t, err) // block 3 seals block 1 and will be the first block on or past the epoch commitment deadline - block3 := unittest.BlockWithParentFixture(block2.Header) - block3.Header.View = epoch1CommitmentDeadline + rand.Uint64()%2 - block3.SetPayload(flow.Payload{ - Seals: []*flow.Seal{seal1}, - }) - err = state.Extend(context.Background(), block3) + block3View := epoch1CommitmentDeadline + rand.Uint64()%2 + seals := []*flow.Seal{seal1} + block3 := unittest.BlockFixture( + unittest.Block.WithParent(block2.ID(), block2.View, block2.Height), + unittest.Block.WithView(block3View), + unittest.Block.WithPayload( + flow.Payload{ + Seals: seals, + ProtocolStateID: calculateExpectedStateId(t, mutableState)(block2.ID(), block3View, seals), + }), + ) + err = state.Extend(context.Background(), unittest.ProposalFromBlock(block3)) require.NoError(t, err) - // finalizing block 3 should trigger EECC - metricsMock.On("EpochEmergencyFallbackTriggered").Once() - protoEventsMock.On("EpochEmergencyFallbackTriggered").Once() + // finalizing block 3 should trigger EFM + metricsMock.On("EpochFallbackModeTriggered").Once() + metricsMock.On("CurrentEpochPhase", flow.EpochPhaseFallback).Once() + metricsMock.On("CurrentEpochFinalView", epoch1FinalView+epochExtensionViewCount) + protoEventsMock.On("EpochFallbackModeTriggered", epoch1Setup.Counter, block3.ToHeader()).Once() + protoEventsMock.On("EpochExtended", epoch1Setup.Counter, block3.ToHeader(), unittest.MatchEpochExtension(epoch1FinalView, epochExtensionViewCount)).Once() - assertEpochEmergencyFallbackTriggered(t, state, false) // not triggered before finalization + assertEpochFallbackTriggered(t, state.Final(), false) // not triggered before finalization err = state.Finalize(context.Background(), block3.ID()) require.NoError(t, err) - assertEpochEmergencyFallbackTriggered(t, state, true) // triggered after finalization + assertEpochFallbackTriggered(t, state.Final(), true) // triggered after finalization + assertInPhase(t, state.Final(), flow.EpochPhaseFallback) // block 4 will be the first block past the first epoch boundary - block4 := unittest.BlockWithParentFixture(block3.Header) - block4.Header.View = epoch1FinalView + 1 - err = state.Extend(context.Background(), block4) + block4 := unittest.BlockWithParentProtocolState(block3) + block4.View = epoch1FinalView + 1 + err = state.Extend(context.Background(), unittest.ProposalFromBlock(block4)) require.NoError(t, err) err = state.Finalize(context.Background(), block4.ID()) require.NoError(t, err) - // since EECC has been triggered, epoch transition metrics should not be updated + // since EFM has been triggered, epoch transition metrics should not be updated metricsMock.AssertNotCalled(t, "EpochTransition", epoch2Setup.Counter, mock.Anything) metricsMock.AssertNotCalled(t, "CurrentEpochCounter", epoch2Setup.Counter) }) @@ -1654,31 +2024,37 @@ func TestEmergencyEpochFallback(t *testing.T) { // if an invalid epoch service event is incorporated, we should: // - not apply the phase transition corresponding to the invalid service event - // - immediately trigger EECC + // - immediately trigger EFM // // Epoch Boundary // | // v // ROOT <- B1 <- B2(R1) <- B3(S1) <- B4 - t.Run("epoch transition with invalid service event - should trigger EECC", func(t *testing.T) { + t.Run("epoch transition with invalid service event - should trigger EFM", func(t *testing.T) { rootSnapshot := unittest.RootSnapshotFixture(participants) + rootProtocolStateID := getRootProtocolStateID(t, rootSnapshot) metricsMock := mockmodule.NewComplianceMetrics(t) mockMetricsForRootSnapshot(metricsMock, rootSnapshot) protoEventsMock := mockprotocol.NewConsumer(t) protoEventsMock.On("BlockFinalized", mock.Anything) protoEventsMock.On("BlockProcessable", mock.Anything, mock.Anything) - util.RunWithFullProtocolStateAndMetricsAndConsumer(t, rootSnapshot, metricsMock, protoEventsMock, func(db *badger.DB, state *protocol.ParticipantState) { + util.RunWithFullProtocolStateAndMetricsAndConsumer(t, rootSnapshot, metricsMock, protoEventsMock, func(db storage.DB, state *protocol.ParticipantState, mutableState realprotocol.MutableProtocolState) { head, err := rootSnapshot.Head() require.NoError(t, err) result, _, err := rootSnapshot.SealedResult() require.NoError(t, err) + rootProtocolState, err := rootSnapshot.ProtocolState() + require.NoError(t, err) + epochExtensionViewCount := rootProtocolState.GetEpochExtensionViewCount() // add a block for the first seal to reference - block1 := unittest.BlockWithParentFixture(head) - block1.SetPayload(flow.EmptyPayload()) - err = state.Extend(context.Background(), block1) + block1 := unittest.BlockWithParentAndPayload( + head, + unittest.PayloadFixture(unittest.WithProtocolStateID(rootProtocolStateID)), + ) + err = state.Extend(context.Background(), unittest.ProposalFromBlock(block1)) require.NoError(t, err) err = state.Finalize(context.Background(), block1.ID()) require.NoError(t, err) @@ -1688,7 +2064,7 @@ func TestEmergencyEpochFallback(t *testing.T) { // add a participant for the next epoch epoch2NewParticipant := unittest.IdentityFixture(unittest.WithRole(flow.RoleVerification)) - epoch2Participants := append(participants, epoch2NewParticipant).Sort(order.Canonical) + epoch2Participants := append(participants, epoch2NewParticipant).Sort(flow.Canonical[flow.Identity]).ToSkeleton() // create the epoch setup event for the second epoch // this event is invalid because it used a non-contiguous first view @@ -1698,57 +2074,736 @@ func TestEmergencyEpochFallback(t *testing.T) { unittest.WithFinalView(epoch1FinalView+1000), unittest.WithFirstView(epoch1FinalView+10), // invalid first view ) - - receipt1, seal1 := unittest.ReceiptAndSealForBlock(block1) - receipt1.ExecutionResult.ServiceEvents = []flow.ServiceEvent{epoch2Setup.ServiceEvent()} - seal1.ResultID = receipt1.ExecutionResult.ID() + receipt1, seal1 := unittest.ReceiptAndSealForBlock(block1, epoch2Setup.ServiceEvent()) // add a block containing a receipt for block 1 - block2 := unittest.BlockWithParentFixture(block1.Header) - block2.SetPayload(unittest.PayloadFixture(unittest.WithReceipts(receipt1))) - err = state.Extend(context.Background(), block2) + block2 := unittest.BlockWithParentAndPayload( + block1.ToHeader(), + unittest.PayloadFixture( + unittest.WithReceipts(receipt1), + unittest.WithProtocolStateID(rootProtocolStateID), + ), + ) + err = state.Extend(context.Background(), unittest.ProposalFromBlock(block2)) require.NoError(t, err) err = state.Finalize(context.Background(), block2.ID()) require.NoError(t, err) // block 3 is where the service event state change comes into effect - block3 := unittest.BlockWithParentFixture(block2.Header) - block3.SetPayload(flow.Payload{ - Seals: []*flow.Seal{seal1}, - }) - err = state.Extend(context.Background(), block3) + seals := []*flow.Seal{seal1} + block3View := block2.View + 1 + block3 := unittest.BlockFixture( + unittest.Block.WithParent(block2.ID(), block2.View, block2.Height), + unittest.Block.WithPayload( + flow.Payload{ + Seals: seals, + ProtocolStateID: calculateExpectedStateId(t, mutableState)(block2.ID(), block3View, seals), + }), + ) + err = state.Extend(context.Background(), unittest.ProposalFromBlock(block3)) require.NoError(t, err) - // incorporating the service event should trigger EECC - metricsMock.On("EpochEmergencyFallbackTriggered").Once() - protoEventsMock.On("EpochEmergencyFallbackTriggered").Once() + // incorporating the service event should trigger EFM + metricsMock.On("EpochFallbackModeTriggered").Once() + metricsMock.On("CurrentEpochPhase", flow.EpochPhaseFallback).Once() + protoEventsMock.On("EpochFallbackModeTriggered", epoch1Setup.Counter, block3.ToHeader()).Once() - assertEpochEmergencyFallbackTriggered(t, state, false) // not triggered before finalization + assertEpochFallbackTriggered(t, state.Final(), false) // not triggered before finalization err = state.Finalize(context.Background(), block3.ID()) require.NoError(t, err) - assertEpochEmergencyFallbackTriggered(t, state, true) // triggered after finalization - - // block 5 is the first block past the current epoch boundary - block4 := unittest.BlockWithParentFixture(block3.Header) - block4.Header.View = epoch1Setup.FinalView + 1 - err = state.Extend(context.Background(), block4) + assertEpochFallbackTriggered(t, state.Final(), true) // triggered after finalization + assertInPhase(t, state.Final(), flow.EpochPhaseFallback) // immediately enters fallback phase + + // block 4 is the first block past the current epoch boundary + block4View := epoch1Setup.FinalView + 1 + block4 := unittest.BlockFixture( + unittest.Block.WithParent(block3.ID(), block3.View, block3.Height), + unittest.Block.WithView(block4View), + unittest.Block.WithPayload( + flow.Payload{ + ProtocolStateID: calculateExpectedStateId(t, mutableState)(block3.ID(), block4View, nil), + }), + ) + err = state.Extend(context.Background(), unittest.ProposalFromBlock(block4)) require.NoError(t, err) + + // we add the epoch extension after the epoch transition + metricsMock.On("CurrentEpochFinalView", epoch1FinalView+epochExtensionViewCount).Once() + protoEventsMock.On("EpochExtended", epoch1Setup.Counter, block4.ToHeader(), unittest.MatchEpochExtension(epoch1FinalView, epochExtensionViewCount)).Once() + err = state.Finalize(context.Background(), block4.ID()) require.NoError(t, err) - // since EECC has been triggered, epoch transition metrics should not be updated + // since EFM has been triggered, epoch transition metrics should not be updated metricsMock.AssertNotCalled(t, "EpochTransition", epoch2Setup.Counter, mock.Anything) metricsMock.AssertNotCalled(t, "CurrentEpochCounter", epoch2Setup.Counter) }) }) } +// TestRecoveryFromEpochFallbackMode tests a few scenarios where the protocol first enters EFM in different phases +// and then recovers from it by incorporating and finalizing a valid EpochRecover service event. +// We expect different behavior depending on the phase in which the protocol enters EFM, specifically for the committed phase, +// as the protocol cannot be immediately recovered from it. First, we need to enter the next epoch before we can accept an EpochRecover event. +// Specifically, for this case we make progress till the epoch extension event to make sure that we cover the most complex scenario. +func TestRecoveryFromEpochFallbackMode(t *testing.T) { + + // assertCorrectRecovery checks that the recovery epoch is correctly setup. + // We expect the next epoch will use setup and commit events from EpochRecover service event. + // According to the specification, the current epoch after processing an EpochRecover event must be in committed phase, + // since it contains EpochSetup and EpochCommit events. + assertCorrectRecovery := func(state *protocol.ParticipantState, epochRecover *flow.EpochRecover) { + finalSnap := state.Final() + epochState, err := finalSnap.EpochProtocolState() + require.NoError(t, err) + epochPhase := epochState.EpochPhase() + require.Equal(t, flow.EpochPhaseCommitted, epochPhase, "next epoch has to be committed") + require.Equal(t, &epochRecover.EpochSetup, epochState.Entry().NextEpochSetup, "next epoch has to be setup according to EpochRecover") + require.Equal(t, &epochRecover.EpochCommit, epochState.Entry().NextEpochCommit, "next epoch has to be committed according to EpochRecover") + } + + // if we enter EFM in the EpochStaking phase, we should be able to recover by incorporating a valid EpochRecover event + // since the epoch commitment deadline has not been reached. + // ROOT <- B1 <- B2(ER(B1, InvalidEpochSetup)) <- B3(S(ER(B1))) <- B4(ER(B2, EpochRecover)) <- B5(S(ER(B2))) + t.Run("entered-EFM-in-staking-phase", func(t *testing.T) { + + rootSnapshot := unittest.RootSnapshotFixture(participants) + metricsMock := mockmodule.NewComplianceMetrics(t) + mockMetricsForRootSnapshot(metricsMock, rootSnapshot) + protoEventsMock := mockprotocol.NewConsumer(t) + protoEventsMock.On("BlockFinalized", mock.Anything) + protoEventsMock.On("BlockProcessable", mock.Anything, mock.Anything) + + util.RunWithFullProtocolStateAndMetricsAndConsumer(t, rootSnapshot, metricsMock, protoEventsMock, func(db storage.DB, state *protocol.ParticipantState, mutableState realprotocol.MutableProtocolState) { + head, err := rootSnapshot.Head() + require.NoError(t, err) + rootResult, _, err := rootSnapshot.SealedResult() + require.NoError(t, err) + + expectedStateIdCalculator := calculateExpectedStateId(t, mutableState) + + // add a block for the first seal to reference + block1View := head.View + 1 + block1 := unittest.BlockFixture( + unittest.Block.WithParent(head.ID(), head.View, head.Height), + unittest.Block.WithPayload( + flow.Payload{ + ProtocolStateID: expectedStateIdCalculator(head.ID(), block1View, nil), + }), + ) + unittest.InsertAndFinalize(t, state, block1) + + // add a participant for the next epoch + epoch2NewParticipant := unittest.IdentityFixture(unittest.WithRole(flow.RoleVerification)) + epoch2Participants := append(participants, epoch2NewParticipant).Sort(flow.Canonical[flow.Identity]).ToSkeleton() + + // build an invalid setup event which will trigger EFM + epoch1Setup := rootResult.ServiceEvents[0].Event.(*flow.EpochSetup) + invalidSetup := unittest.EpochSetupFixture( + unittest.WithParticipants(epoch2Participants), + unittest.SetupWithCounter(epoch1Setup.Counter+10), // invalid counter + unittest.WithFinalView(epoch1Setup.FinalView+1000), + unittest.WithFirstView(epoch1Setup.FinalView+1), + ) + receipt, seal := unittest.ReceiptAndSealForBlock(block1, invalidSetup.ServiceEvent()) + + // ingesting block 2 and 3, block 3 seals the invalid setup event + block2, block3 := unittest.SealBlock(t, state, mutableState, block1, receipt, seal) + assertEpochFallbackTriggered(t, state.AtBlockID(block2.ID()), false) // EFM shouldn't be triggered since block 2 only incorporates the event, sealing happens in block 3 + assertEpochFallbackTriggered(t, state.AtBlockID(block3.ID()), true) // EFM has to be triggered at block 3, since it seals the invalid setup event + assertEpochFallbackTriggered(t, state.Final(), false) // EFM should still not be triggered for finalized state since the invalid service event does not have a finalized seal + + err = state.Finalize(context.Background(), block2.ID()) + require.NoError(t, err) + assertEpochFallbackTriggered(t, state.Final(), false) // EFM should still not be triggered after finalizing block 2 + + // Since we enter EFM before the commitment deadline, no epoch extension is added + metricsMock.On("EpochFallbackModeTriggered").Once() + metricsMock.On("CurrentEpochPhase", flow.EpochPhaseFallback).Once() + protoEventsMock.On("EpochFallbackModeTriggered", epoch1Setup.Counter, block3.ToHeader()).Once() + err = state.Finalize(context.Background(), block3.ID()) + require.NoError(t, err) + assertEpochFallbackTriggered(t, state.Final(), true) // finalizing block 3 should have triggered EFM since it seals invalid setup event + assertInPhase(t, state.Final(), flow.EpochPhaseFallback) + + // Block 4 incorporates Execution Result [ER] for block2, where the ER also includes EpochRecover event. + // Only when ingesting block 5, which _seals_ the EpochRecover event, the state should switch back to + // `EpochFallbackTriggered` being false. + epochRecover := unittest.EpochRecoverFixture( + unittest.WithParticipants(epoch2Participants), + unittest.SetupWithCounter(epoch1Setup.Counter+1), + unittest.WithFinalView(epoch1Setup.FinalView+1000), + unittest.WithFirstView(epoch1Setup.FinalView+1), + ) + receipt, seal = unittest.ReceiptAndSealForBlock(block2, epochRecover.ServiceEvent()) + + // ingesting block 4 and 5, block 5 seals the EpochRecover event + block4, block5 := unittest.SealBlock(t, state, mutableState, block3, receipt, seal) + assertEpochFallbackTriggered(t, state.AtBlockID(block4.ID()), true) + assertEpochFallbackTriggered(t, state.AtBlockID(block5.ID()), false) + assertEpochFallbackTriggered(t, state.Final(), true) // the latest finalized state should still be in EFM as `epochRecover` event does not have a finalized seal + + err = state.Finalize(context.Background(), block4.ID()) + require.NoError(t, err) + assertEpochFallbackTriggered(t, state.Final(), true) // should still be in EFM as `epochRecover` is not yet finalized + + // Epoch recovery results in entering Committed phase + metricsMock.On("CurrentEpochPhase", flow.EpochPhaseCommitted).Once() + metricsMock.On("EpochFallbackModeExited").Once() + protoEventsMock.On("EpochFallbackModeExited", epoch1Setup.Counter, block5.ToHeader()).Once() + protoEventsMock.On("EpochCommittedPhaseStarted", mock.Anything, mock.Anything).Once() + // finalize the block sealing the EpochRecover event + err = state.Finalize(context.Background(), block5.ID()) + require.NoError(t, err) + assertEpochFallbackTriggered(t, state.Final(), false) // should be unset after finalizing block 5 which contains a seal for EpochRecover. + assertInPhase(t, state.Final(), flow.EpochPhaseCommitted) // enter committed phase after recovery + assertCorrectRecovery(state, epochRecover) + }) + }) + + // if we enter EFM in the EpochSetup phase, we should be able to recover by incorporating a valid EpochRecover event + // since the epoch commitment deadline has not been reached. + // ROOT <- B1 <- B2(ER(B1, EpochSetup)) <- B3(S(ER(B1))) <- B4(ER(B2, InvalidEpochCommit)) <- B5(S(ER(B2))) <- B6(ER(B3, EpochRecover)) <- B7(S(ER(B3))) + t.Run("entered-EFM-in-setup-phase", func(t *testing.T) { + rootSnapshot := unittest.RootSnapshotFixture(participants) + metricsMock := mockmodule.NewComplianceMetrics(t) + mockMetricsForRootSnapshot(metricsMock, rootSnapshot) + protoEventsMock := mockprotocol.NewConsumer(t) + protoEventsMock.On("BlockFinalized", mock.Anything) + protoEventsMock.On("BlockProcessable", mock.Anything, mock.Anything) + + util.RunWithFullProtocolStateAndMetricsAndConsumer(t, rootSnapshot, metricsMock, protoEventsMock, func(db storage.DB, state *protocol.ParticipantState, mutableState realprotocol.MutableProtocolState) { + head, err := rootSnapshot.Head() + require.NoError(t, err) + rootResult, _, err := rootSnapshot.SealedResult() + require.NoError(t, err) + + expectedStateIdCalculator := calculateExpectedStateId(t, mutableState) + + // add a block for the first seal to reference + block1View := head.View + 1 + block1 := unittest.BlockFixture( + unittest.Block.WithParent(head.ID(), head.View, head.Height), + unittest.Block.WithPayload( + flow.Payload{ + ProtocolStateID: expectedStateIdCalculator(head.ID(), block1View, nil), + }), + ) + unittest.InsertAndFinalize(t, state, block1) + + // add a participant for the next epoch + epoch2NewParticipant := unittest.IdentityFixture(unittest.WithRole(flow.RoleVerification)) + epoch2Participants := append(participants, epoch2NewParticipant).Sort(flow.Canonical[flow.Identity]).ToSkeleton() + + // Block 2 incorporates Execution Result [ER] for block1, where the ER also includes `EpochSetup` event. + // Only when ingesting block 3, which _seals_ the `EpochSetup` event, the epoch moves to setup phase. + epoch1Setup := rootResult.ServiceEvents[0].Event.(*flow.EpochSetup) + epoch2Setup := unittest.EpochSetupFixture( + unittest.WithParticipants(epoch2Participants), + unittest.SetupWithCounter(epoch1Setup.Counter+1), + unittest.WithFinalView(epoch1Setup.FinalView+1000), + unittest.WithFirstView(epoch1Setup.FinalView+1), + ) + receipt, seal := unittest.ReceiptAndSealForBlock(block1, epoch2Setup.ServiceEvent()) + + // ingesting block 2 and 3, block 3 seals the EpochSetup event + block2, block3 := unittest.SealBlock(t, state, mutableState, block1, receipt, seal) + err = state.Finalize(context.Background(), block2.ID()) + require.NoError(t, err) + + metricsMock.On("CurrentEpochPhase", flow.EpochPhaseSetup).Once() + protoEventsMock.On("EpochSetupPhaseStarted", epoch2Setup.Counter-1, mock.Anything) + err = state.Finalize(context.Background(), block3.ID()) + require.NoError(t, err) + assertEpochFallbackTriggered(t, state.Final(), false) // EFM is not expected + + // Block 4 incorporates Execution Result [ER] for block2, where the ER also includes invalid service event. + // Only when ingesting block 5, which _seals_ the invalid service event, the state should switch to + // `EpochFallbackTriggered` being true. + invalidEpochCommit := unittest.EpochCommitFixture() // a random epoch commit event will be invalid + receipt, seal = unittest.ReceiptAndSealForBlock(block2, invalidEpochCommit.ServiceEvent()) + + // ingesting block 4 and 5, block 5 seals the invalid commit event + block4, block5 := unittest.SealBlock(t, state, mutableState, block3, receipt, seal) + assertEpochFallbackTriggered(t, state.AtBlockID(block4.ID()), false) // EFM shouldn't be triggered since block 4 only incorporates the event, sealing happens in block 5 + assertEpochFallbackTriggered(t, state.AtBlockID(block5.ID()), true) // EFM has to be triggered at block 5, since it seals the invalid commit event + assertEpochFallbackTriggered(t, state.Final(), false) // EFM should still not be triggered for finalized state since the invalid service event does not have a finalized seal + + err = state.Finalize(context.Background(), block4.ID()) + require.NoError(t, err) + assertEpochFallbackTriggered(t, state.Final(), false) // EFM should still not be triggered after finalizing block 4 + + metricsMock.On("EpochFallbackModeTriggered").Once() + metricsMock.On("CurrentEpochPhase", flow.EpochPhaseFallback).Once() + protoEventsMock.On("EpochFallbackModeTriggered", epoch1Setup.Counter, block5.ToHeader()).Once() + err = state.Finalize(context.Background(), block5.ID()) + require.NoError(t, err) + assertEpochFallbackTriggered(t, state.Final(), true) // finalizing block 5 should have triggered EFM + assertInPhase(t, state.Final(), flow.EpochPhaseFallback) // immediately enter fallback phase + + // Block 6 incorporates Execution Result [ER] for block3, where the ER also includes EpochRecover event. + // Only when ingesting block 7, which _seals_ the EpochRecover event, the state should switch back to + // `EpochFallbackTriggered` being false. + epochRecover := unittest.EpochRecoverFixture( + unittest.WithParticipants(epoch2Participants), + unittest.SetupWithCounter(epoch1Setup.Counter+1), + unittest.WithFinalView(epoch1Setup.FinalView+1000), + unittest.WithFirstView(epoch1Setup.FinalView+1), + ) + receipt, seal = unittest.ReceiptAndSealForBlock(block3, epochRecover.ServiceEvent()) + + // ingesting block 6 and 7, block 7 seals the `epochRecover` event + block6, block7 := unittest.SealBlock(t, state, mutableState, block5, receipt, seal) + assertEpochFallbackTriggered(t, state.AtBlockID(block6.ID()), true) + assertEpochFallbackTriggered(t, state.AtBlockID(block7.ID()), false) + assertEpochFallbackTriggered(t, state.Final(), true) // the latest finalized state should still be in EFM as `epochRecover` event does not have a finalized seal + + err = state.Finalize(context.Background(), block6.ID()) + require.NoError(t, err) + assertEpochFallbackTriggered(t, state.Final(), true) // should still be in EFM as `epochRecover` is not yet finalized + + // Epoch recovery results in entering Committed phase + metricsMock.On("CurrentEpochPhase", flow.EpochPhaseCommitted).Once() + metricsMock.On("EpochFallbackModeExited").Once() + protoEventsMock.On("EpochFallbackModeExited", epoch1Setup.Counter, block7.ToHeader()).Once() + protoEventsMock.On("EpochCommittedPhaseStarted", mock.Anything, mock.Anything).Once() + // finalize the block sealing the EpochRecover event + err = state.Finalize(context.Background(), block7.ID()) + require.NoError(t, err) + assertEpochFallbackTriggered(t, state.Final(), false) // should be unset after finalization + assertInPhase(t, state.Final(), flow.EpochPhaseCommitted) // enter committed phase after recovery + assertCorrectRecovery(state, epochRecover) + }) + }) + + // Entering EFM in the commit phase is the most complex case since we can't revert an already committed epoch. In this case, + // we proceed as follows: + // - We build valid EpochSetup and EpochCommit events for the next epoch, effectively moving the protocol to the EpochCommit phase. + // - Next, we incorporate an invalid EpochCommit event, which will trigger EFM. + // - At this point, we are in EFM but the next epoch has been committed, so we can't create EpochRecover event yet. + // - Instead, we progress to the next epoch. Note that it's possible to build an EpochRecover event at this point, + // but we want to test that epoch extension can be added. + // - We build a block with a view reaching the epoch commitment deadline, which should trigger the creation of an epoch extension. + // - Next, we build a valid EpochRecover event, incorporate and seal it, effectively recovering from EFM. + // - To check that the state waits for recovering from EFM until we enter the next epoch (recovery epoch), + // we build a block with a view that is past the epoch extension but not in the recovery epoch. + // - Finally, we build a block with a view which is in the recovery epoch to make sure that the state successfully enters it. + // ROOT <- B1 <- B2(ER(B1, EpochSetup)) <- B3(S(ER(B1))) <- B4(ER(B2, EpochCommit)) <- B5(S(ER(B2))) <- B6(ER(B3, InvalidEpochCommit)) <- + // <- B7(S(ER(B3))) <- B8 <- B9 <- B10 <- B11(ER(B4, EpochRecover)) <- B12(S(ER(B4))) <- B13 <- B14 + // ^ Epoch 1 Final View Last View of epoch extension ^ + // ^ Epoch 2 Commitment Deadline ^ Epoch 3(recovery) First View + // ^ Epoch 2 Final View + // ^ First View of epoch extension + // ^ Epoch 2 Setup Counter + t.Run("entered-EFM-in-commit-phase", func(t *testing.T) { + + rootSnapshot := unittest.RootSnapshotFixture(participants) + metricsMock := mockmodule.NewComplianceMetrics(t) + mockMetricsForRootSnapshot(metricsMock, rootSnapshot) + protoEventsMock := mockprotocol.NewConsumer(t) + protoEventsMock.On("BlockFinalized", mock.Anything) + protoEventsMock.On("BlockProcessable", mock.Anything, mock.Anything) + + util.RunWithFullProtocolStateAndMetricsAndConsumer(t, rootSnapshot, metricsMock, protoEventsMock, func(db storage.DB, state *protocol.ParticipantState, mutableState realprotocol.MutableProtocolState) { + head, err := rootSnapshot.Head() + require.NoError(t, err) + rootResult, _, err := rootSnapshot.SealedResult() + require.NoError(t, err) + rootProtocolState, err := rootSnapshot.ProtocolState() + require.NoError(t, err) + epochExtensionViewCount := rootProtocolState.GetEpochExtensionViewCount() + safetyThreshold := rootProtocolState.GetFinalizationSafetyThreshold() + require.GreaterOrEqual(t, epochExtensionViewCount, safetyThreshold, "epoch extension view count must be at least as large as safety threshold") + + expectedStateIdCalculator := calculateExpectedStateId(t, mutableState) + + // Constructing blocks + // ... <- B1 <- B2(ER(B1, EpochSetup)) <- B3(S(ER(B1))) <- B4(ER(B2, EpochCommit)) <- B5(S(ER(B2))) <- ... + // B1 will be the first block that we will use as reference block for first seal. Block B2 incorporates the Execution Result [ER] + // for block 1 and the EpochSetup service event. Block B3 seals the EpochSetup event. + // Block B4 incorporates the Execution Result [ER] for block 2 and the EpochCommit service event. Block B5 seals the EpochCommit event. + // We expect that the Protocol state at B5 enters `epoch committed` phase. + + // add a block for the first seal to reference + block1View := head.View + 1 + block1 := unittest.BlockFixture( + unittest.Block.WithParent(head.ID(), head.View, head.Height), + unittest.Block.WithPayload( + flow.Payload{ + ProtocolStateID: expectedStateIdCalculator(head.ID(), block1View, nil), + }), + ) + unittest.InsertAndFinalize(t, state, block1) + + // add a participant for the next epoch + epoch2NewParticipant := unittest.IdentityFixture(unittest.WithRole(flow.RoleVerification)) + epoch2Participants := append(participants, epoch2NewParticipant).Sort(flow.Canonical[flow.Identity]).ToSkeleton() + + // Block 2 incorporates Execution Result [ER] for block1, where the ER also includes `EpochSetup` event. + // Only when ingesting block 3, which _seals_ the `EpochSetup` event, epoch moves to the setup phase. + epoch1Setup := rootResult.ServiceEvents[0].Event.(*flow.EpochSetup) + epoch2Setup := unittest.EpochSetupFixture( + unittest.WithParticipants(epoch2Participants), + unittest.SetupWithCounter(epoch1Setup.Counter+1), + unittest.WithFinalView(epoch1Setup.FinalView+1000), + unittest.WithFirstView(epoch1Setup.FinalView+1), + ) + receipt, seal := unittest.ReceiptAndSealForBlock(block1, epoch2Setup.ServiceEvent()) + + // ingesting block 2 and 3, block 3 seals the `epochSetup` for the next epoch + block2, block3 := unittest.SealBlock(t, state, mutableState, block1, receipt, seal) + err = state.Finalize(context.Background(), block2.ID()) + require.NoError(t, err) + + metricsMock.On("CurrentEpochPhase", flow.EpochPhaseSetup).Once() + protoEventsMock.On("EpochSetupPhaseStarted", epoch2Setup.Counter-1, mock.Anything).Once() + err = state.Finalize(context.Background(), block3.ID()) + require.NoError(t, err) + assertEpochFallbackTriggered(t, state.Final(), false) // EFM is not expected + + // Block 4 incorporates Execution Result [ER] for block2, where the ER also includes `EpochCommit` event. + // Only when ingesting block 5, which _seals_ the `EpochCommit` event, the epoch moves to committed phase. + epoch2Commit := unittest.EpochCommitFixture( + unittest.CommitWithCounter(epoch2Setup.Counter), + unittest.WithClusterQCsFromAssignments(epoch2Setup.Assignments), + unittest.WithDKGFromParticipants(epoch2Participants.ToSkeleton()), + ) + receipt, seal = unittest.ReceiptAndSealForBlock(block2, epoch2Commit.ServiceEvent()) + + // ingesting block 4 and 5, block 5 seals the `epochCommit` for the next epoch + block4, block5 := unittest.SealBlock(t, state, mutableState, block3, receipt, seal) + err = state.Finalize(context.Background(), block4.ID()) + require.NoError(t, err) + + metricsMock.On("CurrentEpochPhase", flow.EpochPhaseCommitted).Once() + protoEventsMock.On("EpochCommittedPhaseStarted", epoch2Setup.Counter-1, mock.Anything).Once() + err = state.Finalize(context.Background(), block5.ID()) + require.NoError(t, err) + assertEpochFallbackTriggered(t, state.Final(), false) // EFM is not expected + + // Constructing blocks + // ... <- B6(ER(B3, InvalidEpochCommit)) <- B7(S(ER(B3))) <- B8 <- B9 <- ... + // Block B6 incorporates the Execution Result [ER] for block 3 and the invalid service event. + // Block B7 seals the invalid service event. + // We expect that the Protocol state at B7 switches `EpochFallbackTriggered` to true. + // B8 will be the first block past the epoch boundary, which will trigger epoch transition to the next epoch. + // B9 will be the first block past the epoch commitment deadline, which will trigger construction of an epoch extension. + + // Block 6 incorporates Execution Result [ER] for block3, where the ER also includes invalid service event. + // Only when ingesting block 7, which _seals_ the invalid service event, the state should switch to + // `EpochFallbackTriggered` being true. + invalidCommit := unittest.EpochCommitFixture() + receipt, seal = unittest.ReceiptAndSealForBlock(block3, invalidCommit.ServiceEvent()) + + // seal B3 by building two blocks on top of B5 that contain ER and seal respectively + block6, block7 := unittest.SealBlock(t, state, mutableState, block5, receipt, seal) + assertEpochFallbackTriggered(t, state.AtBlockID(block6.ID()), false) // EFM shouldn't be triggered since block 6 only incorporates the event, sealing happens in block 7 + assertEpochFallbackTriggered(t, state.AtBlockID(block7.ID()), true) // EFM has to be triggered at block 7, since it seals the invalid commit event + assertEpochFallbackTriggered(t, state.Final(), false) // EFM should still not be triggered for finalized state since the invalid service event does not have a finalized seal + + err = state.Finalize(context.Background(), block6.ID()) + require.NoError(t, err) + assertEpochFallbackTriggered(t, state.Final(), false) // EFM should still not be triggered after finalizing block 4 + + metricsMock.On("EpochFallbackModeTriggered").Once() + protoEventsMock.On("EpochFallbackModeTriggered", epoch1Setup.Counter, block7.ToHeader()).Once() + err = state.Finalize(context.Background(), block7.ID()) + require.NoError(t, err) + assertEpochFallbackTriggered(t, state.Final(), true) // finalizing block 7 should have triggered EFM + assertInPhase(t, state.Final(), flow.EpochPhaseCommitted) // remain in committed phase until next transition + + // TODO: try submitting EpochRecover. We don't do this in current implementation since there is no way + // to actually check that event was ignored. We will use pub/sub mechanism to notify about invalid service event. + // After we have notification mechanism in place, we can extend this test. + + // B8 will trigger epoch transition to already committed epoch + block8View := epoch1Setup.FinalView + 1 // first block past the epoch boundary + block8 := unittest.BlockFixture( + unittest.Block.WithParent(block7.ID(), block7.View, block7.Height), + unittest.Block.WithView(block8View), + unittest.Block.WithPayload( + flow.Payload{ + ProtocolStateID: expectedStateIdCalculator(block7.ID(), block8View, nil), + }), + ) + + metricsMock.On("CurrentEpochCounter", epoch2Setup.Counter).Once() + metricsMock.On("EpochTransitionHeight", block8.Height).Once() + metricsMock.On("CurrentEpochFinalView", epoch2Setup.FinalView).Once() + metricsMock.On("CurrentEpochPhase", flow.EpochPhaseFallback).Once() + protoEventsMock.On("EpochTransition", epoch2Setup.Counter, block8.ToHeader()).Once() + + // epoch transition happens at this point + unittest.InsertAndFinalize(t, state, block8) + assertInPhase(t, state.Final(), flow.EpochPhaseFallback) // enter fallback phase immediately after transition + + metricsMock.AssertCalled(t, "CurrentEpochCounter", epoch2Setup.Counter) + metricsMock.AssertCalled(t, "EpochTransitionHeight", block8.Height) + metricsMock.AssertCalled(t, "CurrentEpochFinalView", epoch2Setup.FinalView) + protoEventsMock.AssertCalled(t, "EpochTransition", epoch2Setup.Counter, block8.ToHeader()) + + // B9 doesn't have any seals, but it reaches the safety threshold for the current epoch, meaning we will create an EpochExtension + block9View := epoch2Setup.FinalView - safetyThreshold + block9 := unittest.BlockFixture( + unittest.Block.WithParent(block8.ID(), block8.View, block8.Height), + unittest.Block.WithView(block9View), + unittest.Block.WithPayload( + flow.Payload{ + ProtocolStateID: expectedStateIdCalculator(block8.ID(), block9View, nil), + }), + ) + err = state.Extend(context.Background(), unittest.ProposalFromBlock(block9)) + require.NoError(t, err) + + epochProtocolState, err := state.AtBlockID(block9.ID()).EpochProtocolState() + require.NoError(t, err) + epochExtensions := epochProtocolState.Entry().CurrentEpoch.EpochExtensions + require.Len(t, epochExtensions, 1) + require.Equal(t, epochExtensions[0].FirstView, epoch2Setup.FinalView+1) + + protoEventsMock.On("EpochExtended", epoch2Setup.Counter, block9.ToHeader(), unittest.MatchEpochExtension(epoch2Setup.FinalView, epochExtensionViewCount)).Once() + metricsMock.On("CurrentEpochFinalView", epoch2Setup.FinalView+epochExtensionViewCount) + err = state.Finalize(context.Background(), block9.ID()) + require.NoError(t, err) + + // After epoch extension, FinalView must be updated accordingly + epochAfterExtension, err := state.Final().Epochs().Current() + require.NoError(t, err) + finalView := epochAfterExtension.FinalView() + assert.Equal(t, epochExtensions[0].FinalView, finalView) + + // Constructing blocks + // ... <- B10 <- B11(ER(B4, EpochRecover)) <- B12(S(ER(B4))) <- ... + // B10 will be the first block past the epoch extension. Block B11 incorporates the Execution Result [ER] + // for block 10 and the EpochRecover service event. Block B12 seals the EpochRecover event. + // We expect that the Protocol state at B12 switches `EpochFallbackTriggered` back to false. + + // B10 will be the first block past the epoch extension + block10 := unittest.BlockWithParentProtocolState(block9) + block10.View = epochExtensions[0].FirstView + unittest.InsertAndFinalize(t, state, block10) + + // Block 11 incorporates Execution Result [ER] for block4, where the ER also includes EpochRecover event. + // Only when ingesting block 12, which _seals_ the EpochRecover event, the state should switch back to + // `EpochFallbackTriggered` being false. + epochRecover := unittest.EpochRecoverFixture( + unittest.WithParticipants(epoch2Participants), + unittest.SetupWithCounter(epoch2Setup.Counter+1), + unittest.WithFinalView(epochExtensions[0].FinalView+1000), + unittest.WithFirstView(epochExtensions[0].FinalView+1), + ) + receipt, seal = unittest.ReceiptAndSealForBlock(block4, epochRecover.ServiceEvent()) + + // ingesting block 11 and 12, block 12 seals the `epochRecover` event + block11, block12 := unittest.SealBlock(t, state, mutableState, block10, receipt, seal) + assertEpochFallbackTriggered(t, state.AtBlockID(block11.ID()), true) + assertEpochFallbackTriggered(t, state.AtBlockID(block12.ID()), false) + assertEpochFallbackTriggered(t, state.Final(), true) // the latest finalized state should still be in EFM as `epochRecover` event does not have a finalized seal + + err = state.Finalize(context.Background(), block11.ID()) + require.NoError(t, err) + assertEpochFallbackTriggered(t, state.Final(), true) // should still be in EFM as `epochRecover` is not yet finalized + + // Epoch recovery causes us to enter the Committed phase + metricsMock.On("CurrentEpochPhase", flow.EpochPhaseCommitted).Once() + metricsMock.On("EpochFallbackModeExited").Once() + protoEventsMock.On("EpochFallbackModeExited", epochRecover.EpochSetup.Counter-1, block12.ToHeader()).Once() + protoEventsMock.On("EpochCommittedPhaseStarted", epochRecover.EpochSetup.Counter-1, mock.Anything).Once() + // finalize the block sealing the EpochRecover event + err = state.Finalize(context.Background(), block12.ID()) + require.NoError(t, err) + assertEpochFallbackTriggered(t, state.Final(), false) // should be unset after finalization + assertInPhase(t, state.Final(), flow.EpochPhaseCommitted) // enter committed phase after recovery + assertCorrectRecovery(state, epochRecover) + + // Constructing blocks + // ... <- B13 <- B14 + // B13 will be a child block of B12 to ensure that we don't transition into recovered epoch immediately the next block + // but actually finish the epoch extension. B14 will be the first block past the epoch extension, + // which will trigger epoch transition to the recovered epoch. + // We expect that Protocol state will be at first view of recovered epoch and in epoch staking phase after B14 is incorporated. + + block13 := unittest.BlockWithParentProtocolState(block12) + unittest.InsertAndFinalize(t, state, block13) + + // ensure we are still in the current epoch and transition only when we reach the final view of the extension + epochProtocolState, err = state.Final().EpochProtocolState() + require.NoError(t, err) + require.Equal(t, epoch2Setup.Counter, epochProtocolState.Epoch(), "expect to be in the previously setup epoch") + + // B14 will be the first block past the epoch extension, meaning it will enter the next epoch which + // had been set up by EpochRecover event + block14View := epochExtensions[0].FinalView + 1 + block14 := unittest.BlockFixture( + unittest.Block.WithParent(block13.ID(), block13.View, block13.Height), + unittest.Block.WithView(block14View), + unittest.Block.WithPayload( + flow.Payload{ + ProtocolStateID: expectedStateIdCalculator(block13.ID(), block14View, nil), + }), + ) + + metricsMock.On("CurrentEpochCounter", epochRecover.EpochSetup.Counter).Once() + metricsMock.On("EpochTransitionHeight", block14.Height).Once() + metricsMock.On("CurrentEpochFinalView", epochRecover.EpochSetup.FinalView).Once() + protoEventsMock.On("EpochTransition", epochRecover.EpochSetup.Counter, block14.ToHeader()).Once() + + unittest.InsertAndFinalize(t, state, block14) + + epochProtocolState, err = state.Final().EpochProtocolState() + require.NoError(t, err) + require.Equal(t, epochRecover.EpochSetup.Counter, epochProtocolState.Epoch(), "expect to be in recovered epoch") + require.Equal(t, flow.EpochPhaseStaking, epochProtocolState.EpochPhase(), "expect to be in staking phase") + }) + }) +} + +// TestEpochTargetEndTime ensurers that the target end time of an epoch is correctly calculated depending on if the epoch is extended or not. +// When we haven't added an extension yet, then the TargetEndTime is simply the value from the epoch setup event. +// Otherwise, the TargetEndTime is calculated by adding the duration of the extension to the TargetEndTime of epoch setup. +// We assume we keep the same view duration for all views in the epoch and all extensions. +func TestEpochTargetEndTime(t *testing.T) { + rootSnapshot := unittest.RootSnapshotFixture(participants) + util.RunWithFullProtocolStateAndMutator(t, rootSnapshot, func(db storage.DB, state *protocol.ParticipantState, mutableState realprotocol.MutableProtocolState) { + head, err := rootSnapshot.Head() + require.NoError(t, err) + rootResult, _, err := rootSnapshot.SealedResult() + require.NoError(t, err) + + epoch1Setup := rootResult.ServiceEvents[0].Event.(*flow.EpochSetup) + currentEpoch, err := rootSnapshot.Epochs().Current() + require.NoError(t, err) + rootTargetEndTime := currentEpoch.TargetEndTime() + require.Equal(t, epoch1Setup.TargetEndTime, rootTargetEndTime) + + expectedStateIdCalculator := calculateExpectedStateId(t, mutableState) + + // add a block that will trigger EFM and add an epoch extension since the view of the epoch exceeds the safety threshold + block1View := epoch1Setup.FinalView + block1 := unittest.BlockFixture( + unittest.Block.WithParent(head.ID(), head.View, head.Height), + unittest.Block.WithView(block1View), + unittest.Block.WithPayload( + flow.Payload{ + ProtocolStateID: expectedStateIdCalculator(head.ID(), block1View, nil), + }), + ) + unittest.InsertAndFinalize(t, state, block1) + + block1snap := state.Final() + assertEpochFallbackTriggered(t, block1snap, true) + assertInPhase(t, block1snap, flow.EpochPhaseFallback) + + epochState, err := block1snap.EpochProtocolState() + require.NoError(t, err) + firstExtension := epochState.EpochExtensions()[0] + targetViewDuration := float64(epoch1Setup.TargetDuration) / float64(epoch1Setup.FinalView-epoch1Setup.FirstView+1) + expectedTargetEndTime := rootTargetEndTime + uint64(float64(firstExtension.FinalView-epoch1Setup.FinalView)*targetViewDuration) + afterFirstExtensionEpoch, err := block1snap.Epochs().Current() + require.NoError(t, err) + afterFirstExtensionTargetEndTime := afterFirstExtensionEpoch.TargetEndTime() + require.Equal(t, expectedTargetEndTime, afterFirstExtensionTargetEndTime) + + // add a second block that exceeds the safety threshold and triggers another epoch extension + block2View := firstExtension.FinalView + block2 := unittest.BlockFixture( + unittest.Block.WithParent(block1.ID(), block1.View, block1.Height), + unittest.Block.WithView(block2View), + unittest.Block.WithPayload( + flow.Payload{ + ProtocolStateID: expectedStateIdCalculator(block1.ID(), block2View, nil), + }), + ) + unittest.InsertAndFinalize(t, state, block2) + + block2snap := state.Final() + epochState, err = block2snap.EpochProtocolState() + require.NoError(t, err) + secondExtension := epochState.EpochExtensions()[1] + expectedTargetEndTime = rootTargetEndTime + uint64(float64(secondExtension.FinalView-epoch1Setup.FinalView)*targetViewDuration) + afterSecondExtensionEpoch, err := block2snap.Epochs().Current() + require.NoError(t, err) + afterSecondExtensionTargetEndTime := afterSecondExtensionEpoch.TargetEndTime() + require.Equal(t, expectedTargetEndTime, afterSecondExtensionTargetEndTime) + }) +} + +// TestEpochTargetDuration ensurers that the target duration of an epoch is correctly calculated depending on if the epoch is extended or not. +// When we haven't added an extension yet, then the TargetDuration is simply the value from the epoch setup event. +// Otherwise, the TargetDuration is calculated by adding the duration of the extension to the TargetDuration of epoch setup. +// We assume we keep the same view duration for all views in the epoch and all extensions. +func TestEpochTargetDuration(t *testing.T) { + rootSnapshot := unittest.RootSnapshotFixture(participants) + util.RunWithFullProtocolStateAndMutator(t, rootSnapshot, func(db storage.DB, state *protocol.ParticipantState, mutableState realprotocol.MutableProtocolState) { + head, err := rootSnapshot.Head() + require.NoError(t, err) + rootResult, _, err := rootSnapshot.SealedResult() + require.NoError(t, err) + + epoch1Setup := rootResult.ServiceEvents[0].Event.(*flow.EpochSetup) + currentEpoch, err := rootSnapshot.Epochs().Current() + require.NoError(t, err) + rootTargetDuration := currentEpoch.TargetDuration() + require.Equal(t, epoch1Setup.TargetDuration, rootTargetDuration) + + expectedStateIdCalculator := calculateExpectedStateId(t, mutableState) + + // add a block that will trigger EFM and add an epoch extension since the view of the epoch exceeds the safety threshold + block1View := epoch1Setup.FinalView + block1 := unittest.BlockFixture( + unittest.Block.WithParent(head.ID(), head.View, head.Height), + unittest.Block.WithView(block1View), + unittest.Block.WithPayload( + flow.Payload{ + ProtocolStateID: expectedStateIdCalculator(head.ID(), block1View, nil), + }), + ) + unittest.InsertAndFinalize(t, state, block1) + + assertEpochFallbackTriggered(t, state.Final(), true) + assertInPhase(t, state.Final(), flow.EpochPhaseFallback) + + epochState, err := state.Final().EpochProtocolState() + require.NoError(t, err) + firstExtension := epochState.EpochExtensions()[0] + targetViewDuration := float64(epoch1Setup.TargetDuration) / float64(epoch1Setup.FinalView-epoch1Setup.FirstView+1) + afterFirstExtensionEpoch, err := state.Final().Epochs().Current() + require.NoError(t, err) + afterFirstExtensionTargetDuration := afterFirstExtensionEpoch.TargetDuration() + expectedTargetDuration := rootTargetDuration + uint64(float64(firstExtension.FinalView-firstExtension.FirstView+1)*targetViewDuration) + require.Equal(t, expectedTargetDuration, afterFirstExtensionTargetDuration) + + // add a second block that exceeds the safety threshold and triggers another epoch extension + block2View := firstExtension.FinalView + block2 := unittest.BlockFixture( + unittest.Block.WithParent(block1.ID(), block1.View, block1.Height), + unittest.Block.WithView(block2View), + unittest.Block.WithPayload( + flow.Payload{ + ProtocolStateID: expectedStateIdCalculator(block1.ID(), block2View, nil), + }), + ) + unittest.InsertAndFinalize(t, state, block2) + + epochState, err = state.Final().EpochProtocolState() + require.NoError(t, err) + secondExtension := epochState.EpochExtensions()[1] + afterSecondExtensionEpoch, err := state.Final().Epochs().Current() + require.NoError(t, err) + afterSecondExtensionTargetDuration := afterSecondExtensionEpoch.TargetDuration() + expectedTargetDuration = rootTargetDuration + uint64(float64(secondExtension.FinalView-epoch1Setup.FinalView)*targetViewDuration) + require.Equal(t, expectedTargetDuration, afterSecondExtensionTargetDuration) + }) +} + func TestExtendInvalidSealsInBlock(t *testing.T) { - unittest.RunWithBadgerDB(t, func(db *badger.DB) { + unittest.RunWithPebbleDB(t, func(pdb *pebble.DB) { + lockManager := storage.NewTestingLockManager() metrics := metrics.NewNoopCollector() tracer := trace.NewNoopTracer() log := zerolog.Nop() - all := storeutil.StorageLayer(t, db) + db := pebbleimpl.ToDB(pdb) + all := store.InitAll(metrics, db) // create a event consumer to test epoch transition events distributor := events.NewDistributor() @@ -1757,18 +2812,21 @@ func TestExtendInvalidSealsInBlock(t *testing.T) { consumer.On("BlockProcessable", mock.Anything, mock.Anything) rootSnapshot := unittest.RootSnapshotFixture(participants) + rootProtocolStateID := getRootProtocolStateID(t, rootSnapshot) state, err := protocol.Bootstrap( metrics, db, + lockManager, all.Headers, all.Seals, all.Results, all.Blocks, all.QuorumCertificates, - all.Setups, + all.EpochSetups, all.EpochCommits, - all.Statuses, + all.EpochProtocolStateEntries, + all.ProtocolKVStore, all.VersionBeacons, rootSnapshot, ) @@ -1777,19 +2835,28 @@ func TestExtendInvalidSealsInBlock(t *testing.T) { head, err := rootSnapshot.Head() require.NoError(t, err) - block1 := unittest.BlockWithParentFixture(head) - block1.Payload.Guarantees = nil - block1.Header.PayloadHash = block1.Payload.Hash() + block1 := unittest.BlockWithParentAndPayload( + head, + unittest.PayloadFixture(unittest.WithProtocolStateID(rootProtocolStateID)), + ) block1Receipt := unittest.ReceiptForBlockFixture(block1) - block2 := unittest.BlockWithParentFixture(block1.Header) - block2.SetPayload(unittest.PayloadFixture(unittest.WithReceipts(block1Receipt))) + block2 := unittest.BlockWithParentAndPayload( + block1.ToHeader(), + unittest.PayloadFixture( + unittest.WithReceipts(block1Receipt), + unittest.WithProtocolStateID(rootProtocolStateID), + ), + ) block1Seal := unittest.Seal.Fixture(unittest.Seal.WithResult(&block1Receipt.ExecutionResult)) - block3 := unittest.BlockWithParentFixture(block2.Header) - block3.SetPayload(flow.Payload{ - Seals: []*flow.Seal{block1Seal}, - }) + block3 := unittest.BlockWithParentAndPayload( + block2.ToHeader(), + flow.Payload{ + Seals: []*flow.Seal{block1Seal}, + ProtocolStateID: rootProtocolStateID, + }, + ) sealValidator := mockmodule.NewSealValidator(t) sealValidator.On("Validate", mock.Anything). @@ -1797,13 +2864,13 @@ func TestExtendInvalidSealsInBlock(t *testing.T) { if candidate.ID() == block3.ID() { return nil } - seal, _ := all.Seals.HighestInFork(candidate.Header.ParentID) + seal, _ := all.Seals.HighestInFork(candidate.ParentID) return seal }, func(candidate *flow.Block) error { if candidate.ID() == block3.ID() { - return engine.NewInvalidInputError("") + return engine.NewInvalidInputErrorf("") } - _, err := all.Seals.HighestInFork(candidate.Header.ParentID) + _, err := all.Seals.HighestInFork(candidate.ParentID) return err }). Times(3) @@ -1821,11 +2888,11 @@ func TestExtendInvalidSealsInBlock(t *testing.T) { ) require.NoError(t, err) - err = fullState.Extend(context.Background(), block1) + err = fullState.Extend(context.Background(), unittest.ProposalFromBlock(block1)) require.NoError(t, err) - err = fullState.Extend(context.Background(), block2) + err = fullState.Extend(context.Background(), unittest.ProposalFromBlock(block2)) require.NoError(t, err) - err = fullState.Extend(context.Background(), block3) + err = fullState.Extend(context.Background(), unittest.ProposalFromBlock(block3)) require.Error(t, err) require.True(t, st.IsInvalidExtensionError(err)) }) @@ -1833,16 +2900,19 @@ func TestExtendInvalidSealsInBlock(t *testing.T) { func TestHeaderExtendValid(t *testing.T) { rootSnapshot := unittest.RootSnapshotFixture(participants) - util.RunWithFollowerProtocolState(t, rootSnapshot, func(db *badger.DB, state *protocol.FollowerState) { + rootProtocolStateID := getRootProtocolStateID(t, rootSnapshot) + util.RunWithFollowerProtocolState(t, rootSnapshot, func(db storage.DB, state *protocol.FollowerState) { head, err := rootSnapshot.Head() require.NoError(t, err) _, seal, err := rootSnapshot.SealedResult() require.NoError(t, err) - extend := unittest.BlockWithParentFixture(head) - extend.SetPayload(flow.EmptyPayload()) + extend := unittest.BlockWithParentAndPayload( + head, + unittest.PayloadFixture(unittest.WithProtocolStateID(rootProtocolStateID)), + ) - err = state.ExtendCertified(context.Background(), extend, unittest.CertifyBlock(extend.Header)) + err = state.ExtendCertified(context.Background(), unittest.NewCertifiedBlock(extend)) require.NoError(t, err) finalCommit, err := state.Final().Commit() @@ -1853,67 +2923,71 @@ func TestHeaderExtendValid(t *testing.T) { func TestHeaderExtendMissingParent(t *testing.T) { rootSnapshot := unittest.RootSnapshotFixture(participants) - util.RunWithFollowerProtocolState(t, rootSnapshot, func(db *badger.DB, state *protocol.FollowerState) { - extend := unittest.BlockFixture() - extend.Payload.Guarantees = nil - extend.Payload.Seals = nil - extend.Header.Height = 2 - extend.Header.View = 2 - extend.Header.ParentID = unittest.BlockFixture().ID() - extend.Header.PayloadHash = extend.Payload.Hash() + util.RunWithFollowerProtocolState(t, rootSnapshot, func(db storage.DB, state *protocol.FollowerState) { + extend := unittest.BlockFixture( + unittest.Block.WithHeight(2), + unittest.Block.WithParentView(1), + unittest.Block.WithView(2), + ) - err := state.ExtendCertified(context.Background(), &extend, unittest.CertifyBlock(extend.Header)) + err := state.ExtendCertified(context.Background(), unittest.NewCertifiedBlock(extend)) require.Error(t, err) require.False(t, st.IsInvalidExtensionError(err), err) // verify seal not indexed var sealID flow.Identifier - err = db.View(operation.LookupLatestSealAtBlock(extend.ID(), &sealID)) + err = operation.LookupLatestSealAtBlock(db.Reader(), extend.ID(), &sealID) require.Error(t, err) - require.ErrorIs(t, err, stoerr.ErrNotFound) + require.ErrorIs(t, err, storage.ErrNotFound) }) } func TestHeaderExtendHeightTooSmall(t *testing.T) { rootSnapshot := unittest.RootSnapshotFixture(participants) - util.RunWithFollowerProtocolState(t, rootSnapshot, func(db *badger.DB, state *protocol.FollowerState) { + rootProtocolStateID := getRootProtocolStateID(t, rootSnapshot) + util.RunWithFollowerProtocolState(t, rootSnapshot, func(db storage.DB, state *protocol.FollowerState) { head, err := rootSnapshot.Head() require.NoError(t, err) - block1 := unittest.BlockWithParentFixture(head) + block1 := unittest.BlockWithParentAndPayload( + head, + unittest.PayloadFixture(unittest.WithProtocolStateID(rootProtocolStateID)), + ) // create another block that points to the previous block `extend` as parent // but has _same_ height as parent. This violates the condition that a child's // height must increment the parent's height by one, i.e. it should be rejected // by the follower right away - block2 := unittest.BlockWithParentFixture(block1.Header) - block2.Header.Height = block1.Header.Height + block2 := unittest.BlockWithParentFixture(block1.ToHeader()) + block2.Height = block1.Height - err = state.ExtendCertified(context.Background(), block1, block2.Header.QuorumCertificate()) + err = state.ExtendCertified(context.Background(), unittest.CertifiedByChild(block1, block2)) require.NoError(t, err) - err = state.ExtendCertified(context.Background(), block2, unittest.CertifyBlock(block2.Header)) + err = state.ExtendCertified(context.Background(), unittest.NewCertifiedBlock(block2)) require.False(t, st.IsInvalidExtensionError(err)) // verify seal not indexed var sealID flow.Identifier - err = db.View(operation.LookupLatestSealAtBlock(block2.ID(), &sealID)) - require.ErrorIs(t, err, stoerr.ErrNotFound) + err = operation.LookupLatestSealAtBlock(db.Reader(), block2.ID(), &sealID) + require.ErrorIs(t, err, storage.ErrNotFound) }) } func TestHeaderExtendHeightTooLarge(t *testing.T) { rootSnapshot := unittest.RootSnapshotFixture(participants) - util.RunWithFollowerProtocolState(t, rootSnapshot, func(db *badger.DB, state *protocol.FollowerState) { + util.RunWithFollowerProtocolState(t, rootSnapshot, func(db storage.DB, state *protocol.FollowerState) { head, err := rootSnapshot.Head() require.NoError(t, err) - block := unittest.BlockWithParentFixture(head) - block.SetPayload(flow.EmptyPayload()) + block := unittest.BlockWithParentAndPayload( + head, + *flow.NewEmptyPayload(), + ) // set an invalid height - block.Header.Height = head.Height + 2 + block.Height = head.Height + 2 - err = state.ExtendCertified(context.Background(), block, unittest.CertifyBlock(block.Header)) + err = state.ExtendCertified(context.Background(), unittest.NewCertifiedBlock(block)) require.False(t, st.IsInvalidExtensionError(err)) }) } @@ -1924,28 +2998,32 @@ func TestExtendBlockProcessable(t *testing.T) { rootSnapshot := unittest.RootSnapshotFixture(participants) head, err := rootSnapshot.Head() require.NoError(t, err) + rootProtocolStateID := getRootProtocolStateID(t, rootSnapshot) consumer := mockprotocol.NewConsumer(t) - util.RunWithFullProtocolStateAndConsumer(t, rootSnapshot, consumer, func(db *badger.DB, state *protocol.ParticipantState) { - block := unittest.BlockWithParentFixture(head) - child := unittest.BlockWithParentFixture(block.Header) - grandChild := unittest.BlockWithParentFixture(child.Header) + util.RunWithFullProtocolStateAndConsumer(t, rootSnapshot, consumer, func(db storage.DB, state *protocol.ParticipantState) { + block := unittest.BlockWithParentAndPayload( + head, + unittest.PayloadFixture(unittest.WithProtocolStateID(rootProtocolStateID)), + ) + child := unittest.BlockWithParentProtocolState(block) + grandChild := unittest.BlockWithParentProtocolState(child) // extend block using certifying QC, expect that BlockProcessable will be emitted once - consumer.On("BlockProcessable", block.Header, child.Header.QuorumCertificate()).Once() - err := state.ExtendCertified(context.Background(), block, child.Header.QuorumCertificate()) + consumer.On("BlockProcessable", block.ToHeader(), child.ParentQC()).Once() + err := state.ExtendCertified(context.Background(), unittest.CertifiedByChild(block, child)) require.NoError(t, err) // extend block without certifying QC, expect that BlockProcessable won't be called - err = state.Extend(context.Background(), child) + err = state.Extend(context.Background(), unittest.ProposalFromBlock(child)) require.NoError(t, err) consumer.AssertNumberOfCalls(t, "BlockProcessable", 1) // extend block using certifying QC, expect that BlockProcessable will be emitted twice. // One for parent block and second for current block. - grandChildCertifyingQC := unittest.CertifyBlock(grandChild.Header) - consumer.On("BlockProcessable", child.Header, grandChild.Header.QuorumCertificate()).Once() - consumer.On("BlockProcessable", grandChild.Header, grandChildCertifyingQC).Once() - err = state.ExtendCertified(context.Background(), grandChild, grandChildCertifyingQC) + certifiedGrandchild := unittest.NewCertifiedBlock(grandChild) + consumer.On("BlockProcessable", child.ToHeader(), grandChild.ParentQC()).Once() + consumer.On("BlockProcessable", grandChild.ToHeader(), certifiedGrandchild.CertifyingQC).Once() + err = state.ExtendCertified(context.Background(), certifiedGrandchild) require.NoError(t, err) }) } @@ -1957,25 +3035,39 @@ func TestExtendBlockProcessable(t *testing.T) { // The Follower should accept this block since tracking of orphan blocks is implemented by another component. func TestFollowerHeaderExtendBlockNotConnected(t *testing.T) { rootSnapshot := unittest.RootSnapshotFixture(participants) - util.RunWithFollowerProtocolState(t, rootSnapshot, func(db *badger.DB, state *protocol.FollowerState) { + rootProtocolStateID := getRootProtocolStateID(t, rootSnapshot) + util.RunWithFollowerProtocolState(t, rootSnapshot, func(db storage.DB, state *protocol.FollowerState) { head, err := rootSnapshot.Head() require.NoError(t, err) - block1 := unittest.BlockWithParentFixture(head) - err = state.ExtendCertified(context.Background(), block1, unittest.CertifyBlock(block1.Header)) + // In this test, we create two conflicting forks. To prevent accidentally creating byzantine scenarios, where + // multiple blocks have the same view, we keep track of used views and ensure that each new block has a unique view. + usedViews := make(map[uint64]struct{}) + usedViews[head.View] = struct{}{} + + block1 := unittest.BlockWithParentAndPayloadAndUniqueView( + head, + unittest.PayloadFixture(unittest.WithProtocolStateID(rootProtocolStateID)), + usedViews, + ) + err = state.ExtendCertified(context.Background(), unittest.NewCertifiedBlock(block1)) require.NoError(t, err) err = state.Finalize(context.Background(), block1.ID()) require.NoError(t, err) // create a fork at view/height 1 and try to connect it to root - block2 := unittest.BlockWithParentFixture(head) - err = state.ExtendCertified(context.Background(), block2, unittest.CertifyBlock(block2.Header)) + block2 := unittest.BlockWithParentAndPayloadAndUniqueView( + head, + unittest.PayloadFixture(unittest.WithProtocolStateID(rootProtocolStateID)), + usedViews, + ) + err = state.ExtendCertified(context.Background(), unittest.NewCertifiedBlock(block2)) require.NoError(t, err) // verify seal not indexed var sealID flow.Identifier - err = db.View(operation.LookupLatestSealAtBlock(block2.ID(), &sealID)) + err = operation.LookupLatestSealAtBlock(db.Reader(), block2.ID(), &sealID) require.NoError(t, err) }) } @@ -1987,26 +3079,40 @@ func TestFollowerHeaderExtendBlockNotConnected(t *testing.T) { // The Participant should reject this block as an outdated chain extension func TestParticipantHeaderExtendBlockNotConnected(t *testing.T) { rootSnapshot := unittest.RootSnapshotFixture(participants) - util.RunWithFullProtocolState(t, rootSnapshot, func(db *badger.DB, state *protocol.ParticipantState) { + rootProtocolStateID := getRootProtocolStateID(t, rootSnapshot) + util.RunWithFullProtocolState(t, rootSnapshot, func(db storage.DB, state *protocol.ParticipantState) { head, err := rootSnapshot.Head() require.NoError(t, err) - block1 := unittest.BlockWithParentFixture(head) - err = state.Extend(context.Background(), block1) + // In this test, we create two conflicting forks. To prevent accidentally creating byzantine scenarios, where + // multiple blocks have the same view, we keep track of used views and ensure that each new block has a unique view. + usedViews := make(map[uint64]struct{}) + usedViews[head.View] = struct{}{} + + block1 := unittest.BlockWithParentAndPayloadAndUniqueView( + head, + unittest.PayloadFixture(unittest.WithProtocolStateID(rootProtocolStateID)), + usedViews, + ) + err = state.Extend(context.Background(), unittest.ProposalFromBlock(block1)) require.NoError(t, err) err = state.Finalize(context.Background(), block1.ID()) require.NoError(t, err) // create a fork at view/height 1 and try to connect it to root - block2 := unittest.BlockWithParentFixture(head) - err = state.Extend(context.Background(), block2) + block2 := unittest.BlockWithParentAndPayloadAndUniqueView( + head, + unittest.PayloadFixture(unittest.WithProtocolStateID(rootProtocolStateID)), + usedViews, + ) + err = state.Extend(context.Background(), unittest.ProposalFromBlock(block2)) require.True(t, st.IsOutdatedExtensionError(err), err) // verify seal not indexed var sealID flow.Identifier - err = db.View(operation.LookupLatestSealAtBlock(block2.ID(), &sealID)) - require.ErrorIs(t, err, stoerr.ErrNotFound) + err = operation.LookupLatestSealAtBlock(db.Reader(), block2.ID(), &sealID) + require.ErrorIs(t, err, storage.ErrNotFound) }) } @@ -2014,15 +3120,18 @@ func TestHeaderExtendHighestSeal(t *testing.T) { rootSnapshot := unittest.RootSnapshotFixture(participants) head, err := rootSnapshot.Head() require.NoError(t, err) - util.RunWithFollowerProtocolState(t, rootSnapshot, func(db *badger.DB, state *protocol.FollowerState) { + rootProtocolStateID := getRootProtocolStateID(t, rootSnapshot) + util.RunWithFollowerProtocolState(t, rootSnapshot, func(db storage.DB, state *protocol.FollowerState) { + // create block2 and block3 - block2 := unittest.BlockWithParentFixture(head) - block2.SetPayload(flow.EmptyPayload()) + block2 := unittest.BlockWithParentAndPayload( + head, + unittest.PayloadFixture(unittest.WithProtocolStateID(rootProtocolStateID)), + ) - block3 := unittest.BlockWithParentFixture(block2.Header) - block3.SetPayload(flow.EmptyPayload()) + block3 := unittest.BlockWithParentProtocolState(block2) - err := state.ExtendCertified(context.Background(), block2, block3.Header.QuorumCertificate()) + err := state.ExtendCertified(context.Background(), unittest.CertifiedByChild(block2, block3)) require.NoError(t, err) // create receipts and seals for block2 and block3 @@ -2030,23 +3139,33 @@ func TestHeaderExtendHighestSeal(t *testing.T) { receipt3, seal3 := unittest.ReceiptAndSealForBlock(block3) // include the seals in block4 - block4 := unittest.BlockWithParentFixture(block3.Header) - // include receipts and results - block4.SetPayload(unittest.PayloadFixture(unittest.WithReceipts(receipt3, receipt2))) + block4 := unittest.BlockWithParentAndPayload( + block3.ToHeader(), + // include receipts and results + unittest.PayloadFixture( + unittest.WithReceipts(receipt3, receipt2), + unittest.WithProtocolStateID(rootProtocolStateID), + ), + ) // include the seals in block4 - block5 := unittest.BlockWithParentFixture(block4.Header) - // placing seals in the reversed order to test - // Extend will pick the highest sealed block - block5.SetPayload(unittest.PayloadFixture(unittest.WithSeals(seal3, seal2))) + block5 := unittest.BlockWithParentAndPayload( + block4.ToHeader(), + // placing seals in the reversed order to test + // Extend will pick the highest sealed block + unittest.PayloadFixture( + unittest.WithSeals(seal3, seal2), + unittest.WithProtocolStateID(rootProtocolStateID), + ), + ) - err = state.ExtendCertified(context.Background(), block3, block4.Header.QuorumCertificate()) + err = state.ExtendCertified(context.Background(), unittest.CertifiedByChild(block3, block4)) require.NoError(t, err) - err = state.ExtendCertified(context.Background(), block4, block5.Header.QuorumCertificate()) + err = state.ExtendCertified(context.Background(), unittest.CertifiedByChild(block4, block5)) require.NoError(t, err) - err = state.ExtendCertified(context.Background(), block5, unittest.CertifyBlock(block5.Header)) + err = state.ExtendCertified(context.Background(), unittest.NewCertifiedBlock(block5)) require.NoError(t, err) finalCommit, err := state.AtBlockID(block5.ID()).Commit() @@ -2060,22 +3179,24 @@ func TestExtendCertifiedInvalidQC(t *testing.T) { rootSnapshot := unittest.RootSnapshotFixture(participants) head, err := rootSnapshot.Head() require.NoError(t, err) - util.RunWithFullProtocolState(t, rootSnapshot, func(db *badger.DB, state *protocol.ParticipantState) { + util.RunWithFullProtocolState(t, rootSnapshot, func(db storage.DB, state *protocol.ParticipantState) { // create child block - block := unittest.BlockWithParentFixture(head) - block.SetPayload(flow.EmptyPayload()) + block := unittest.BlockWithParentAndPayload( + head, + *flow.NewEmptyPayload(), + ) t.Run("qc-invalid-view", func(t *testing.T) { - certifyingQC := unittest.CertifyBlock(block.Header) - certifyingQC.View++ // invalidate block view - err = state.ExtendCertified(context.Background(), block, certifyingQC) + certified := unittest.NewCertifiedBlock(block) + certified.CertifyingQC.View++ // invalidate block view + err = state.ExtendCertified(context.Background(), certified) require.Error(t, err) require.False(t, st.IsOutdatedExtensionError(err)) }) t.Run("qc-invalid-block-id", func(t *testing.T) { - certifyingQC := unittest.CertifyBlock(block.Header) - certifyingQC.BlockID = unittest.IdentifierFixture() // invalidate blockID - err = state.ExtendCertified(context.Background(), block, certifyingQC) + certified := unittest.NewCertifiedBlock(block) + certified.CertifyingQC.BlockID = unittest.IdentifierFixture() // invalidate blockID + err = state.ExtendCertified(context.Background(), certified) require.Error(t, err) require.False(t, st.IsOutdatedExtensionError(err)) }) @@ -2086,7 +3207,8 @@ func TestExtendCertifiedInvalidQC(t *testing.T) { // guarantees with invalid guarantors func TestExtendInvalidGuarantee(t *testing.T) { rootSnapshot := unittest.RootSnapshotFixture(participants) - util.RunWithFullProtocolState(t, rootSnapshot, func(db *badger.DB, state *protocol.ParticipantState) { + rootProtocolStateID := getRootProtocolStateID(t, rootSnapshot) + util.RunWithFullProtocolState(t, rootSnapshot, func(db storage.DB, state *protocol.ParticipantState) { // create a valid block head, err := rootSnapshot.Head() require.NoError(t, err) @@ -2099,31 +3221,44 @@ func TestExtendInvalidGuarantee(t *testing.T) { validSignerIndices, err := signature.EncodeSignersToIndices(all, all) require.NoError(t, err) - block := unittest.BlockWithParentFixture(head) - payload := flow.EmptyPayload() - payload.Guarantees = []*flow.CollectionGuarantee{ - { - ChainID: cluster.ChainID(), - ReferenceBlockID: head.ID(), - SignerIndices: validSignerIndices, + // In this test, we create two conflicting forks. To prevent accidentally creating byzantine scenarios, where + // multiple blocks have the same view, we keep track of used views and ensure that each new block has a unique view. + usedViews := make(map[uint64]struct{}) + usedViews[head.View] = struct{}{} + + payload := flow.Payload{ + Guarantees: []*flow.CollectionGuarantee{ + { + ClusterChainID: cluster.ChainID(), + ReferenceBlockID: head.ID(), + SignerIndices: validSignerIndices, + }, }, + ProtocolStateID: rootProtocolStateID, } // now the valid block has a guarantee in the payload with valid signer indices. - block.SetPayload(payload) + block := unittest.BlockWithParentAndPayloadAndUniqueView( + head, + payload, + usedViews, + ) // check Extend should accept this valid block - err = state.Extend(context.Background(), block) + err = state.Extend(context.Background(), unittest.ProposalFromBlock(block)) require.NoError(t, err) // now the guarantee has invalid signer indices: the checksum should have 4 bytes, but it only has 1 payload.Guarantees[0].SignerIndices = []byte{byte(1)} // create new block that has invalid collection guarantee - block = unittest.BlockWithParentFixture(head) - block.SetPayload(payload) + block = unittest.BlockWithParentAndPayloadAndUniqueView( + head, + payload, + usedViews, + ) - err = state.Extend(context.Background(), block) + err = state.Extend(context.Background(), unittest.ProposalFromBlock(block)) require.True(t, signature.IsInvalidSignerIndicesError(err), err) require.ErrorIs(t, err, signature.ErrInvalidChecksum) require.True(t, st.IsInvalidExtensionError(err), err) @@ -2136,7 +3271,15 @@ func TestExtendInvalidGuarantee(t *testing.T) { checksumMismatch[0] = byte(2) } payload.Guarantees[0].SignerIndices = checksumMismatch - err = state.Extend(context.Background(), block) + block, err = flow.NewBlock( + flow.UntrustedBlock{ + HeaderBody: block.HeaderBody, + Payload: payload, + }, + ) + require.NoError(t, err) + + err = state.Extend(context.Background(), unittest.ProposalFromBlock(block)) require.True(t, signature.IsInvalidSignerIndicesError(err), err) require.ErrorIs(t, err, signature.ErrInvalidChecksum) require.True(t, st.IsInvalidExtensionError(err), err) @@ -2148,7 +3291,14 @@ func TestExtendInvalidGuarantee(t *testing.T) { wrongTailing[len(wrongTailing)-1] = byte(255) payload.Guarantees[0].SignerIndices = wrongTailing - err = state.Extend(context.Background(), block) + block, err = flow.NewBlock( + flow.UntrustedBlock{ + HeaderBody: block.HeaderBody, + Payload: payload, + }, + ) + require.NoError(t, err) + err = state.Extend(context.Background(), unittest.ProposalFromBlock(block)) require.Error(t, err) require.True(t, signature.IsInvalidSignerIndicesError(err), err) require.ErrorIs(t, err, signature.ErrIllegallyPaddedBitVector) @@ -2157,7 +3307,15 @@ func TestExtendInvalidGuarantee(t *testing.T) { // test imcompatible bit vector length wrongbitVectorLength := validSignerIndices[0 : len(validSignerIndices)-1] payload.Guarantees[0].SignerIndices = wrongbitVectorLength - err = state.Extend(context.Background(), block) + block, err = flow.NewBlock( + flow.UntrustedBlock{ + HeaderBody: block.HeaderBody, + Payload: payload, + }, + ) + require.NoError(t, err) + + err = state.Extend(context.Background(), unittest.ProposalFromBlock(block)) require.True(t, signature.IsInvalidSignerIndicesError(err), err) require.ErrorIs(t, err, signature.ErrIncompatibleBitVectorLength) require.True(t, st.IsInvalidExtensionError(err), err) @@ -2167,7 +3325,15 @@ func TestExtendInvalidGuarantee(t *testing.T) { // test the ReferenceBlockID is not found payload.Guarantees[0].ReferenceBlockID = flow.ZeroID - err = state.Extend(context.Background(), block) + block, err = flow.NewBlock( + flow.UntrustedBlock{ + HeaderBody: block.HeaderBody, + Payload: payload, + }, + ) + require.NoError(t, err) + + err = state.Extend(context.Background(), unittest.ProposalFromBlock(block)) require.ErrorIs(t, err, storage.ErrNotFound) require.True(t, st.IsInvalidExtensionError(err), err) @@ -2180,8 +3346,16 @@ func TestExtendInvalidGuarantee(t *testing.T) { // return the protocol.ErrNextEpochNotCommitted for testing // test the guarantee has wrong chain ID, and should return ErrClusterNotFound - payload.Guarantees[0].ChainID = flow.ChainID("some_bad_chain_ID") - err = state.Extend(context.Background(), block) + payload.Guarantees[0].ClusterChainID = flow.ChainID("some_bad_chain_ID") + block, err = flow.NewBlock( + flow.UntrustedBlock{ + HeaderBody: block.HeaderBody, + Payload: payload, + }, + ) + require.NoError(t, err) + + err = state.Extend(context.Background(), unittest.ProposalFromBlock(block)) require.Error(t, err) require.ErrorIs(t, err, realprotocol.ErrClusterNotFound) require.True(t, st.IsInvalidExtensionError(err), err) @@ -2191,36 +3365,47 @@ func TestExtendInvalidGuarantee(t *testing.T) { // If block B is finalized and contains a seal for block A, then A is the last sealed block func TestSealed(t *testing.T) { rootSnapshot := unittest.RootSnapshotFixture(participants) - util.RunWithFollowerProtocolState(t, rootSnapshot, func(db *badger.DB, state *protocol.FollowerState) { + rootProtocolStateID := getRootProtocolStateID(t, rootSnapshot) + util.RunWithFollowerProtocolState(t, rootSnapshot, func(db storage.DB, state *protocol.FollowerState) { head, err := rootSnapshot.Head() require.NoError(t, err) // block 1 will be sealed - block1 := unittest.BlockWithParentFixture(head) - + block1 := unittest.BlockWithParentAndPayload( + head, + unittest.PayloadFixture(unittest.WithProtocolStateID(rootProtocolStateID)), + ) receipt1, seal1 := unittest.ReceiptAndSealForBlock(block1) // block 2 contains receipt for block 1 - block2 := unittest.BlockWithParentFixture(block1.Header) - block2.SetPayload(unittest.PayloadFixture(unittest.WithReceipts(receipt1))) + block2 := unittest.BlockWithParentAndPayload( + block1.ToHeader(), + unittest.PayloadFixture( + unittest.WithReceipts(receipt1), + unittest.WithProtocolStateID(rootProtocolStateID), + ), + ) - err = state.ExtendCertified(context.Background(), block1, block2.Header.QuorumCertificate()) + err = state.ExtendCertified(context.Background(), unittest.CertifiedByChild(block1, block2)) require.NoError(t, err) err = state.Finalize(context.Background(), block1.ID()) require.NoError(t, err) // block 3 contains seal for block 1 - block3 := unittest.BlockWithParentFixture(block2.Header) - block3.SetPayload(flow.Payload{ - Seals: []*flow.Seal{seal1}, - }) + block3 := unittest.BlockWithParentAndPayload( + block2.ToHeader(), + flow.Payload{ + Seals: []*flow.Seal{seal1}, + ProtocolStateID: rootProtocolStateID, + }, + ) - err = state.ExtendCertified(context.Background(), block2, block3.Header.QuorumCertificate()) + err = state.ExtendCertified(context.Background(), unittest.CertifiedByChild(block2, block3)) require.NoError(t, err) err = state.Finalize(context.Background(), block2.ID()) require.NoError(t, err) - err = state.ExtendCertified(context.Background(), block3, unittest.CertifyBlock(block3.Header)) + err = state.ExtendCertified(context.Background(), unittest.NewCertifiedBlock(block3)) require.NoError(t, err) err = state.Finalize(context.Background(), block3.ID()) require.NoError(t, err) @@ -2237,21 +3422,25 @@ func TestSealed(t *testing.T) { // A non atomic bug would be: header is found in DB, but payload index is not found func TestCacheAtomicity(t *testing.T) { rootSnapshot := unittest.RootSnapshotFixture(participants) + rootProtocolStateID := getRootProtocolStateID(t, rootSnapshot) util.RunWithFollowerProtocolStateAndHeaders(t, rootSnapshot, - func(db *badger.DB, state *protocol.FollowerState, headers storage.Headers, index storage.Index) { + func(db storage.DB, state *protocol.FollowerState, headers storage.Headers, index storage.Index) { head, err := rootSnapshot.Head() require.NoError(t, err) - block := unittest.BlockWithParentFixture(head) + block := unittest.BlockWithParentAndPayload( + head, + unittest.PayloadFixture(unittest.WithProtocolStateID(rootProtocolStateID)), + ) blockID := block.ID() // check 100 times to see if either 1) or 2) satisfies var wg sync.WaitGroup wg.Add(1) go func(blockID flow.Identifier) { - for i := 0; i < 100; i++ { + for range 100 { _, err := headers.ByBlockID(blockID) - if errors.Is(err, stoerr.ErrNotFound) { + if errors.Is(err, storage.ErrNotFound) { continue } require.NoError(t, err) @@ -2264,7 +3453,7 @@ func TestCacheAtomicity(t *testing.T) { // storing the block to database, which supposed to be atomic updates to headers and index, // both to badger database and the cache. - err = state.ExtendCertified(context.Background(), block, unittest.CertifyBlock(block.Header)) + err = state.ExtendCertified(context.Background(), unittest.NewCertifiedBlock(block)) require.NoError(t, err) wg.Wait() }) @@ -2272,11 +3461,13 @@ func TestCacheAtomicity(t *testing.T) { // TestHeaderInvalidTimestamp tests that extending header with invalid timestamp results in sentinel error func TestHeaderInvalidTimestamp(t *testing.T) { - unittest.RunWithBadgerDB(t, func(db *badger.DB) { + unittest.RunWithPebbleDB(t, func(pdb *pebble.DB) { + lockManager := storage.NewTestingLockManager() metrics := metrics.NewNoopCollector() tracer := trace.NewNoopTracer() log := zerolog.Nop() - all := storeutil.StorageLayer(t, db) + db := pebbleimpl.ToDB(pdb) + all := store.InitAll(metrics, db) // create a event consumer to test epoch transition events distributor := events.NewDistributor() @@ -2285,20 +3476,22 @@ func TestHeaderInvalidTimestamp(t *testing.T) { block, result, seal := unittest.BootstrapFixture(participants) qc := unittest.QuorumCertificateFixture(unittest.QCWithRootBlockID(block.ID())) - rootSnapshot, err := inmem.SnapshotFromBootstrapState(block, result, seal, qc) + rootSnapshot, err := unittest.SnapshotFromBootstrapState(block, result, seal, qc) require.NoError(t, err) state, err := protocol.Bootstrap( metrics, db, + lockManager, all.Headers, all.Seals, all.Results, all.Blocks, all.QuorumCertificates, - all.Setups, + all.EpochSetups, all.EpochCommits, - all.Statuses, + all.EpochProtocolStateEntries, + all.ProtocolKVStore, all.VersionBeacons, rootSnapshot, ) @@ -2320,11 +3513,10 @@ func TestHeaderInvalidTimestamp(t *testing.T) { ) require.NoError(t, err) - extend := unittest.BlockWithParentFixture(block.Header) + extend := unittest.BlockWithParentFixture(block.ToHeader()) extend.Payload.Guarantees = nil - extend.Header.PayloadHash = extend.Payload.Hash() - err = fullState.Extend(context.Background(), extend) + err = fullState.Extend(context.Background(), unittest.ProposalFromBlock(extend)) assert.Error(t, err, "a proposal with invalid timestamp has to be rejected") assert.True(t, st.IsInvalidExtensionError(err), "if timestamp is invalid it should return invalid block error") }) @@ -2334,53 +3526,98 @@ func TestHeaderInvalidTimestamp(t *testing.T) { // where second extend doesn't result in an error and effectively is no-op. func TestProtocolStateIdempotent(t *testing.T) { rootSnapshot := unittest.RootSnapshotFixture(participants) + rootProtocolStateID := getRootProtocolStateID(t, rootSnapshot) head, err := rootSnapshot.Head() require.NoError(t, err) t.Run("follower", func(t *testing.T) { - util.RunWithFollowerProtocolState(t, rootSnapshot, func(db *badger.DB, state *protocol.FollowerState) { - block := unittest.BlockWithParentFixture(head) - err := state.ExtendCertified(context.Background(), block, unittest.CertifyBlock(block.Header)) + util.RunWithFollowerProtocolState(t, rootSnapshot, func(db storage.DB, state *protocol.FollowerState) { + block := unittest.BlockWithParentAndPayload( + head, + unittest.PayloadFixture(unittest.WithProtocolStateID(rootProtocolStateID)), + ) + err := state.ExtendCertified(context.Background(), unittest.NewCertifiedBlock(block)) require.NoError(t, err) // same operation should be no-op - err = state.ExtendCertified(context.Background(), block, unittest.CertifyBlock(block.Header)) + err = state.ExtendCertified(context.Background(), unittest.NewCertifiedBlock(block)) require.NoError(t, err) }) }) t.Run("participant", func(t *testing.T) { - util.RunWithFullProtocolState(t, rootSnapshot, func(db *badger.DB, state *protocol.ParticipantState) { - block := unittest.BlockWithParentFixture(head) - err := state.Extend(context.Background(), block) + util.RunWithFullProtocolState(t, rootSnapshot, func(db storage.DB, state *protocol.ParticipantState) { + block := unittest.BlockWithParentAndPayload( + head, + unittest.PayloadFixture(unittest.WithProtocolStateID(rootProtocolStateID)), + ) + err := state.Extend(context.Background(), unittest.ProposalFromBlock(block)) require.NoError(t, err) // same operation should be no-op - err = state.Extend(context.Background(), block) + err = state.Extend(context.Background(), unittest.ProposalFromBlock(block)) require.NoError(t, err) - err = state.ExtendCertified(context.Background(), block, unittest.CertifyBlock(block.Header)) + err = state.ExtendCertified(context.Background(), unittest.NewCertifiedBlock(block)) require.NoError(t, err) }) }) } -func assertEpochEmergencyFallbackTriggered(t *testing.T, state realprotocol.State, expected bool) { - triggered, err := state.Params().EpochFallbackTriggered() +// assertEpochFallbackTriggered tests that the given `stateSnapshot` has the `EpochFallbackTriggered` flag to the `expected` value. +func assertEpochFallbackTriggered(t *testing.T, stateSnapshot realprotocol.Snapshot, expected bool) { + epochState, err := stateSnapshot.EpochProtocolState() require.NoError(t, err) - assert.Equal(t, expected, triggered) + assert.Equal(t, expected, epochState.EpochFallbackTriggered()) +} + +// assertInPhase tests that the input snapshot is in the expected epoch phase. +func assertInPhase(t *testing.T, snap realprotocol.Snapshot, expectedPhase flow.EpochPhase) { + phase, err := snap.EpochPhase() + require.NoError(t, err) + assert.Equal(t, expectedPhase, phase) } // mockMetricsForRootSnapshot mocks the given metrics mock object to expect all // metrics which are set during bootstrapping and building blocks. func mockMetricsForRootSnapshot(metricsMock *mockmodule.ComplianceMetrics, rootSnapshot *inmem.Snapshot) { - metricsMock.On("CurrentEpochCounter", rootSnapshot.Encodable().Epochs.Current.Counter) - metricsMock.On("CurrentEpochPhase", rootSnapshot.Encodable().Phase) - metricsMock.On("CurrentEpochFinalView", rootSnapshot.Encodable().Epochs.Current.FinalView) - metricsMock.On("CommittedEpochFinalView", rootSnapshot.Encodable().Epochs.Current.FinalView) - metricsMock.On("CurrentDKGPhase1FinalView", rootSnapshot.Encodable().Epochs.Current.DKGPhase1FinalView) - metricsMock.On("CurrentDKGPhase2FinalView", rootSnapshot.Encodable().Epochs.Current.DKGPhase2FinalView) - metricsMock.On("CurrentDKGPhase3FinalView", rootSnapshot.Encodable().Epochs.Current.DKGPhase3FinalView) + epochProtocolState := rootSnapshot.Encodable().SealingSegment.LatestProtocolStateEntry().EpochEntry + epochSetup := epochProtocolState.CurrentEpochSetup + metricsMock.On("CurrentEpochCounter", epochSetup.Counter) + metricsMock.On("CurrentEpochPhase", epochProtocolState.EpochPhase()) + metricsMock.On("CurrentEpochFinalView", epochSetup.FinalView) + metricsMock.On("CurrentDKGPhaseViews", epochSetup.DKGPhase1FinalView, epochSetup.DKGPhase2FinalView, epochSetup.DKGPhase3FinalView) metricsMock.On("BlockSealed", mock.Anything) metricsMock.On("BlockFinalized", mock.Anything) + metricsMock.On("ProtocolStateVersion", mock.Anything) metricsMock.On("FinalizedHeight", mock.Anything) metricsMock.On("SealedHeight", mock.Anything) } + +func getRootProtocolStateID(t *testing.T, rootSnapshot *inmem.Snapshot) flow.Identifier { + rootProtocolState, err := rootSnapshot.ProtocolState() + require.NoError(t, err) + return rootProtocolState.ID() +} + +// calculateExpectedStateId is a utility function which makes easier to get expected protocol state ID after applying service events contained in seals. +func calculateExpectedStateId(t *testing.T, mutableProtocolState realprotocol.MutableProtocolState) func(parentBlockID flow.Identifier, candidateView uint64, candidateSeals []*flow.Seal) flow.Identifier { + return func(parentBlockID flow.Identifier, candidateView uint64, candidateSeals []*flow.Seal) flow.Identifier { + expectedStateID, err := mutableProtocolState.EvolveState(deferred.NewDeferredBlockPersist(), parentBlockID, candidateView, candidateSeals) + require.NoError(t, err) + return expectedStateID + } +} + +// nextUnusedViewSince is a utility function which: +// - returns the smallest view number which is greater than the given `view` +// and NOT contained in the `forbiddenViews` set. +// - the next unused view number is added to `forbiddenViews` to prevent it from being used again. +func nextUnusedViewSince(view uint64, forbiddenViews map[uint64]struct{}) uint64 { + next := view + 1 + for { + if _, exists := forbiddenViews[next]; !exists { + forbiddenViews[next] = struct{}{} + return next + } + next++ + } +} diff --git a/state/protocol/badger/params.go b/state/protocol/badger/params.go deleted file mode 100644 index 7f19d26234f..00000000000 --- a/state/protocol/badger/params.go +++ /dev/null @@ -1,113 +0,0 @@ -package badger - -import ( - "fmt" - - "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/state/protocol" - "github.com/onflow/flow-go/storage/badger/operation" -) - -type Params struct { - state *State -} - -var _ protocol.Params = (*Params)(nil) - -func (p Params) ChainID() (flow.ChainID, error) { - - // retrieve root header - root, err := p.Root() - if err != nil { - return "", fmt.Errorf("could not get root: %w", err) - } - - return root.ChainID, nil -} - -func (p Params) SporkID() (flow.Identifier, error) { - - var sporkID flow.Identifier - err := p.state.db.View(operation.RetrieveSporkID(&sporkID)) - if err != nil { - return flow.ZeroID, fmt.Errorf("could not get spork id: %w", err) - } - - return sporkID, nil -} - -func (p Params) SporkRootBlockHeight() (uint64, error) { - var sporkRootBlockHeight uint64 - err := p.state.db.View(operation.RetrieveSporkRootBlockHeight(&sporkRootBlockHeight)) - if err != nil { - return 0, fmt.Errorf("could not get spork root block height: %w", err) - } - - return sporkRootBlockHeight, nil -} - -func (p Params) ProtocolVersion() (uint, error) { - - var version uint - err := p.state.db.View(operation.RetrieveProtocolVersion(&version)) - if err != nil { - return 0, fmt.Errorf("could not get protocol version: %w", err) - } - - return version, nil -} - -func (p Params) EpochCommitSafetyThreshold() (uint64, error) { - - var threshold uint64 - err := p.state.db.View(operation.RetrieveEpochCommitSafetyThreshold(&threshold)) - if err != nil { - return 0, fmt.Errorf("could not get epoch commit safety threshold") - } - return threshold, nil -} - -func (p Params) EpochFallbackTriggered() (bool, error) { - var triggered bool - err := p.state.db.View(operation.CheckEpochEmergencyFallbackTriggered(&triggered)) - if err != nil { - return false, fmt.Errorf("could not check epoch fallback triggered: %w", err) - } - return triggered, nil -} - -func (p Params) Root() (*flow.Header, error) { - - // look up root block ID - var rootID flow.Identifier - err := p.state.db.View(operation.LookupBlockHeight(p.state.rootHeight, &rootID)) - if err != nil { - return nil, fmt.Errorf("could not look up root header: %w", err) - } - - // retrieve root header - header, err := p.state.headers.ByBlockID(rootID) - if err != nil { - return nil, fmt.Errorf("could not retrieve root header: %w", err) - } - - return header, nil -} - -func (p Params) Seal() (*flow.Seal, error) { - - // look up root header - var rootID flow.Identifier - err := p.state.db.View(operation.LookupBlockHeight(p.state.rootHeight, &rootID)) - if err != nil { - return nil, fmt.Errorf("could not look up root header: %w", err) - } - - // retrieve the root seal - seal, err := p.state.seals.HighestInFork(rootID) - if err != nil { - return nil, fmt.Errorf("could not retrieve root seal: %w", err) - } - - return seal, nil -} diff --git a/state/protocol/badger/snapshot.go b/state/protocol/badger/snapshot.go index 93f23b38c64..6a3e3b48768 100644 --- a/state/protocol/badger/snapshot.go +++ b/state/protocol/badger/snapshot.go @@ -1,33 +1,31 @@ -// (c) 2019 Dapper Labs - ALL RIGHTS RESERVED - package badger import ( "errors" "fmt" - "github.com/dgraph-io/badger/v2" - + "github.com/onflow/flow-go/consensus/hotstuff/model" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/model/flow/filter" - "github.com/onflow/flow-go/model/flow/mapfunc" - "github.com/onflow/flow-go/model/flow/order" + "github.com/onflow/flow-go/module/irrecoverable" "github.com/onflow/flow-go/state/fork" "github.com/onflow/flow-go/state/protocol" "github.com/onflow/flow-go/state/protocol/inmem" - "github.com/onflow/flow-go/state/protocol/invalid" - "github.com/onflow/flow-go/state/protocol/seed" + "github.com/onflow/flow-go/state/protocol/protocol_state/kvstore" "github.com/onflow/flow-go/storage" - "github.com/onflow/flow-go/storage/badger/operation" - "github.com/onflow/flow-go/storage/badger/procedure" + "github.com/onflow/flow-go/storage/operation" ) -// Snapshot implements the protocol.Snapshot interface. -// It represents a read-only immutable snapshot of the protocol state at the -// block it is constructed with. It allows efficient access to data associated directly -// with blocks at a given state (finalized, sealed), such as the related header, commit, +// Snapshot pertains to a specific fork of the main consensus. Specifically, it references +// one block denoted as the `Head`. It allows efficient access to the protocol state directly +// that was active at a specific block (finalized, sealed), such as the related header, commit, // seed or descending blocks. A block snapshot can lazily convert to an epoch snapshot in // order to make data associated directly with epochs accessible through its API. +// +// This Snapshot implements the [protocol.Snapshot] interface for KNOWN BLOCKS. +// Existence of the reference block is currently ensured, because Snapshot instances are +// only created by AtBlockID and AtHeight method of State, which both check the existence +// of the block first. type Snapshot struct { state *State blockID flow.Identifier // reference block for this snapshot @@ -45,6 +43,7 @@ var _ protocol.Snapshot = (*FinalizedSnapshot)(nil) // newSnapshotWithIncorporatedReferenceBlock creates a new state snapshot with the given reference block. // CAUTION: The caller is responsible for ensuring that the reference block has been incorporated. +// For unknown blocks, please use `invalid.NewSnapshot` or `invalid.NewSnapshotf`. func newSnapshotWithIncorporatedReferenceBlock(state *State, blockID flow.Identifier) *Snapshot { return &Snapshot{ state: state, @@ -69,8 +68,7 @@ func (s *FinalizedSnapshot) Head() (*flow.Header, error) { } func (s *Snapshot) Head() (*flow.Header, error) { - head, err := s.state.headers.ByBlockID(s.blockID) - return head, err + return s.state.headers.ByBlockID(s.blockID) } // QuorumCertificate (QC) returns a valid quorum certificate pointing to the @@ -84,103 +82,28 @@ func (s *Snapshot) QuorumCertificate() (*flow.QuorumCertificate, error) { return qc, nil } -func (s *Snapshot) Phase() (flow.EpochPhase, error) { - status, err := s.state.epoch.statuses.ByBlockID(s.blockID) +func (s *Snapshot) EpochPhase() (flow.EpochPhase, error) { + epochState, err := s.state.protocolState.EpochStateAtBlockID(s.blockID) if err != nil { - return flow.EpochPhaseUndefined, fmt.Errorf("could not retrieve epoch status: %w", err) + return flow.EpochPhaseUndefined, fmt.Errorf("could not retrieve protocol state snapshot: %w", err) } - phase, err := status.Phase() - return phase, err + return epochState.EpochPhase(), nil } -func (s *Snapshot) Identities(selector flow.IdentityFilter) (flow.IdentityList, error) { - - // TODO: CAUTION SHORTCUT - // we retrieve identities based on the initial identity table from the EpochSetup - // event here -- this will need revision to support mid-epoch identity changes - // once slashing is implemented - - status, err := s.state.epoch.statuses.ByBlockID(s.blockID) - if err != nil { - return nil, err - } - - setup, err := s.state.epoch.setups.ByID(status.CurrentEpoch.SetupID) +func (s *Snapshot) Identities(selector flow.IdentityFilter[flow.Identity]) (flow.IdentityList, error) { + epochState, err := s.state.protocolState.EpochStateAtBlockID(s.blockID) if err != nil { return nil, err } - // sort the identities so the 'IsCached' binary search works - identities := setup.Participants.Sort(order.Canonical) - - // get identities that are in either last/next epoch but NOT in the current epoch - var otherEpochIdentities flow.IdentityList - phase, err := status.Phase() - if err != nil { - return nil, fmt.Errorf("could not get phase: %w", err) - } - switch phase { - // during staking phase (the beginning of the epoch) we include identities - // from the previous epoch that are now un-staking - case flow.EpochPhaseStaking: - - if !status.HasPrevious() { - break - } - - previousSetup, err := s.state.epoch.setups.ByID(status.PreviousEpoch.SetupID) - if err != nil { - return nil, fmt.Errorf("could not get previous epoch setup event: %w", err) - } - - for _, identity := range previousSetup.Participants { - exists := identities.Exists(identity) - // add identity from previous epoch that is not in current epoch - if !exists { - otherEpochIdentities = append(otherEpochIdentities, identity) - } - } - - // during setup and committed phases (the end of the epoch) we include - // identities that will join in the next epoch - case flow.EpochPhaseSetup, flow.EpochPhaseCommitted: - - nextSetup, err := s.state.epoch.setups.ByID(status.NextEpoch.SetupID) - if err != nil { - return nil, fmt.Errorf("could not get next epoch setup: %w", err) - } - - for _, identity := range nextSetup.Participants { - exists := identities.Exists(identity) - - // add identity from next epoch that is not in current epoch - if !exists { - otherEpochIdentities = append(otherEpochIdentities, identity) - } - } - - default: - return nil, fmt.Errorf("invalid epoch phase: %s", phase) - } - - // add the identities from next/last epoch, with weight set to 0 - identities = append( - identities, - otherEpochIdentities.Map(mapfunc.WithWeight(0))..., - ) - // apply the filter to the participants - identities = identities.Filter(selector) - - // apply a deterministic sort to the participants - identities = identities.Sort(order.Canonical) - + identities := epochState.Identities().Filter(selector) return identities, nil } func (s *Snapshot) Identity(nodeID flow.Identifier) (*flow.Identity, error) { // filter identities at snapshot for node ID - identities, err := s.Identities(filter.HasNodeID(nodeID)) + identities, err := s.Identities(filter.HasNodeID[flow.Identity](nodeID)) if err != nil { return nil, fmt.Errorf("could not get identities: %w", err) } @@ -203,6 +126,11 @@ func (s *Snapshot) Commit() (flow.StateCommitment, error) { return seal.FinalState, nil } +// SealedResult returns the most recent included seal as of this block and +// the corresponding execution result. The seal may have been included in a +// parent block, if this block is empty. If this block contains multiple +// seals, this returns the seal for the block with the greatest height. +// TODO document error returns func (s *Snapshot) SealedResult() (*flow.ExecutionResult, *flow.Seal, error) { seal, err := s.state.seals.HighestInFork(s.blockID) if err != nil { @@ -226,33 +154,33 @@ func (s *Snapshot) SealedResult() (*flow.ExecutionResult, *flow.Seal, error) { // - protocol.UnfinalizedSealingSegmentError if sealing segment would contain unfinalized blocks (including orphaned blocks) func (s *Snapshot) SealingSegment() (*flow.SealingSegment, error) { // Lets denote the highest block in the sealing segment `head` (initialized below). - // Based on the tech spec `flow/sealing_segment.md`, the Sealing Segment must contain contain + // Based on the tech spec `flow/sealing_segment.md`, the Sealing Segment must contain // enough history to satisfy _all_ of the following conditions: // (i) The highest sealed block as of `head` needs to be included in the sealing segment. // This is relevant if `head` does not contain any seals. - // (ii) All blocks that are sealed by `head`. This is relevant if head` contains _multiple_ seals. + // (ii) All blocks that are sealed by `head`. This is relevant if `head` contains _multiple_ seals. // (iii) The sealing segment should contain the history back to (including): - // limitHeight := max(head.Height - flow.DefaultTransactionExpiry, SporkRootBlockHeight) + // limitHeight := max(blockSealedAtHead.Height - flow.DefaultTransactionExpiry, sporkRootBlock.Height) // Per convention, we include the blocks for (i) in the `SealingSegment.Blocks`, while the // additional blocks for (ii) and optionally (iii) are contained in as `SealingSegment.ExtraBlocks`. head, err := s.state.blocks.ByID(s.blockID) if err != nil { return nil, fmt.Errorf("could not get snapshot's reference block: %w", err) } - if head.Header.Height < s.state.rootHeight { + if head.Height < s.state.finalizedRootHeight { return nil, protocol.ErrSealingSegmentBelowRootBlock } // Verify that head of sealing segment is finalized. - finalizedBlockAtHeight, err := s.state.headers.BlockIDByHeight(head.Header.Height) + finalizedBlockAtHeight, err := s.state.headers.BlockIDByHeight(head.Height) if err != nil { if errors.Is(err, storage.ErrNotFound) { - return nil, protocol.NewUnfinalizedSealingSegmentErrorf("head of sealing segment at height %d is not finalized: %w", head.Header.Height, err) + return nil, protocol.NewUnfinalizedSealingSegmentErrorf("head of sealing segment at height %d is not finalized: %w", head.Height, err) } return nil, fmt.Errorf("exception while retrieving finzalized bloc, by height: %w", err) } if finalizedBlockAtHeight != s.blockID { // comparison of fixed-length arrays - return nil, protocol.NewUnfinalizedSealingSegmentErrorf("head of sealing segment is orphaned, finalized block at height %d is %x", head.Header.Height, finalizedBlockAtHeight) + return nil, protocol.NewUnfinalizedSealingSegmentErrorf("head of sealing segment is orphaned, finalized block at height %d is %x", head.Height, finalizedBlockAtHeight) } // STEP (i): highest sealed block as of `head` must be included. @@ -265,14 +193,44 @@ func (s *Snapshot) SealingSegment() (*flow.SealingSegment, error) { return nil, fmt.Errorf("could not get block: %w", err) } + // TODO this is a temporary measure resulting from epoch data being stored outside the + // protocol KV Store, once epoch data is in the KV Store, we can pass protocolKVStoreSnapshotsDB.ByID + // directly to NewSealingSegmentBuilder (similar to other getters) + getProtocolStateEntry := func(protocolStateID flow.Identifier) (*flow.ProtocolStateEntryWrapper, error) { + kvStoreEntry, err := s.state.protocolKVStoreSnapshotsDB.ByID(protocolStateID) + if err != nil { + return nil, fmt.Errorf("could not get kv store entry: %w", err) + } + kvStoreReader, err := kvstore.VersionedDecode(kvStoreEntry.Version, kvStoreEntry.Data) + if err != nil { + return nil, fmt.Errorf("could not decode kv store entry: %w", err) + } + epochDataEntry, err := s.state.epochProtocolStateEntriesDB.ByID(kvStoreReader.GetEpochStateID()) + if err != nil { + return nil, fmt.Errorf("could not get epoch data: %w", err) + } + return &flow.ProtocolStateEntryWrapper{ + KVStore: flow.PSKeyValueStoreData{ + Version: kvStoreEntry.Version, + Data: kvStoreEntry.Data, + }, + EpochEntry: epochDataEntry, + }, nil + } + // walk through the chain backward until we reach the block referenced by // the latest seal - the returned segment includes this block - builder := flow.NewSealingSegmentBuilder(s.state.results.ByID, s.state.seals.HighestInFork) + builder := flow.NewSealingSegmentBuilder( + s.state.results.ByID, + s.state.seals.HighestInFork, + getProtocolStateEntry, + s.state.sporkRootBlock, + ) scraper := func(header *flow.Header) error { blockID := header.ID() - block, err := s.state.blocks.ByID(blockID) + block, err := s.state.blocks.ProposalByID(blockID) if err != nil { - return fmt.Errorf("could not get block: %w", err) + return fmt.Errorf("could not get proposal: %w", err) } err = builder.AddBlock(block) @@ -300,24 +258,24 @@ func (s *Snapshot) SealingSegment() (*flow.SealingSegment, error) { } // STEP (iii): extended history to allow checking for duplicated collections, i.e. - // limitHeight = max(head.Height - flow.DefaultTransactionExpiry, SporkRootBlockHeight) - limitHeight := s.state.sporkRootBlockHeight - if head.Header.Height > s.state.sporkRootBlockHeight+flow.DefaultTransactionExpiry { - limitHeight = head.Header.Height - flow.DefaultTransactionExpiry + // limitHeight = max(blockSealedAtHead.Height - flow.DefaultTransactionExpiry, sporkRootBlock.Height) + limitHeight := s.state.sporkRootBlock.Height + if blockSealedAtHead.Height > s.state.sporkRootBlock.Height+flow.DefaultTransactionExpiry { + limitHeight = blockSealedAtHead.Height - flow.DefaultTransactionExpiry } // As we have to satisfy (ii) _and_ (iii), we have to take the longest history, i.e. the lowest height. if lowestSealedByHead.Height < limitHeight { limitHeight = lowestSealedByHead.Height - if limitHeight < s.state.sporkRootBlockHeight { // sanity check; should never happen - return nil, fmt.Errorf("unexpected internal error: calculated history-cutoff at height %d, which is lower than the spork's root height %d", limitHeight, s.state.sporkRootBlockHeight) + if limitHeight < s.state.sporkRootBlock.Height { // sanity check; should never happen + return nil, fmt.Errorf("unexpected internal error: calculated history-cutoff at height %d, which is lower than the spork's root height %d", limitHeight, s.state.sporkRootBlock.Height) } } if limitHeight < blockSealedAtHead.Height { // we need to include extra blocks in sealing segment extraBlocksScraper := func(header *flow.Header) error { blockID := header.ID() - block, err := s.state.blocks.ByID(blockID) + block, err := s.state.blocks.ProposalByID(blockID) if err != nil { return fmt.Errorf("could not get block: %w", err) } @@ -344,6 +302,15 @@ func (s *Snapshot) SealingSegment() (*flow.SealingSegment, error) { return segment, nil } +// Descendants returns the IDs of all descendants of the Head block. +// The IDs are ordered such that parents are included before their children. +// Since all blocks are fully validated before being inserted to the state, +// all returned blocks are validated. +// +// CAUTION: the list of descendants is constructed for each call via database reads, +// and may be expensive to compute, especially if the reference block is older. +// +// No errors returns expected under normal operation. func (s *Snapshot) Descendants() ([]flow.Identifier, error) { descendants, err := s.descendants(s.blockID) if err != nil { @@ -352,23 +319,27 @@ func (s *Snapshot) Descendants() ([]flow.Identifier, error) { return descendants, nil } -func (s *Snapshot) lookupChildren(blockID flow.Identifier) ([]flow.Identifier, error) { - var children flow.IdentifierList - err := s.state.db.View(procedure.LookupBlockChildren(blockID, &children)) - if err != nil { - return nil, fmt.Errorf("could not get children of block %v: %w", blockID, err) - } - return children, nil -} - +// descendants returns a slice the IDs of all known children of the given blockID. +// CAUTION: this function behaves only correctly for known blocks (see constructor). +// No error returns are expected during normal operation. func (s *Snapshot) descendants(blockID flow.Identifier) ([]flow.Identifier, error) { - descendantIDs, err := s.lookupChildren(blockID) + var descendantIDs flow.IdentifierList + err := operation.RetrieveBlockChildren(s.state.db.Reader(), blockID, &descendantIDs) if err != nil { - return nil, err + if !errors.Is(err, storage.ErrNotFound) { + return nil, fmt.Errorf("could not get children of block %v: %w", blockID, err) + } + + // The low-level storage returns `storage.ErrNotFound` in two cases: + // 1. the block/collection is unknown + // 2. the block/collection is known but no children have been indexed yet + // By contract of the constructor, the blockID must correspond to a known collection in the database. + // A snapshot with s.err == nil is only created for known blocks. Hence, only case 2 is + // possible here, and we just return an empty list. } - for _, descendantID := range descendantIDs { - additionalIDs, err := s.descendants(descendantID) + for _, child := range descendantIDs { + additionalIDs, err := s.descendants(child) if err != nil { return nil, err } @@ -377,7 +348,7 @@ func (s *Snapshot) descendants(blockID flow.Identifier) ([]flow.Identifier, erro return descendantIDs, nil } -// RandomSource returns the seed for the current block snapshot. +// RandomSource returns the seed for the current block's snapshot. // Expected error returns: // * storage.ErrNotFound is returned if the QC is unknown. func (s *Snapshot) RandomSource() ([]byte, error) { @@ -385,7 +356,7 @@ func (s *Snapshot) RandomSource() ([]byte, error) { if err != nil { return nil, err } - randomSource, err := seed.FromParentQCSignature(qc.SigData) + randomSource, err := model.BeaconSignature(qc) if err != nil { return nil, fmt.Errorf("could not create seed from QC's signature: %w", err) } @@ -402,111 +373,165 @@ func (s *Snapshot) Params() protocol.GlobalParams { return s.state.Params() } +// EpochProtocolState returns the epoch part of dynamic protocol state that the Head block commits to. +// The compliance layer guarantees that only valid blocks are appended to the protocol state. +// Returns state.ErrUnknownSnapshotReference if snapshot reference block is unknown. +// All other errors should be treated as exceptions. +// For each block stored there should be a protocol state stored. +func (s *Snapshot) EpochProtocolState() (protocol.EpochProtocolState, error) { + return s.state.protocolState.EpochStateAtBlockID(s.blockID) +} + +// ProtocolState returns the dynamic protocol state that the Head block commits to. +// The compliance layer guarantees that only valid blocks are appended to the protocol state. +// Returns state.ErrUnknownSnapshotReference if snapshot reference block is unknown. +// All other errors should be treated as exceptions. +// For each block stored there should be a protocol state stored. +func (s *Snapshot) ProtocolState() (protocol.KVStoreReader, error) { + return s.state.protocolState.KVStoreAtBlockID(s.blockID) +} + +func (s *Snapshot) VersionBeacon() (*flow.SealedVersionBeacon, error) { + head, err := s.state.headers.ByBlockID(s.blockID) + if err != nil { + return nil, err + } + + return s.state.versionBeacons.Highest(head.Height) +} + // EpochQuery encapsulates querying epochs w.r.t. a snapshot. type EpochQuery struct { snap *Snapshot } +var _ protocol.EpochQuery = (*EpochQuery)(nil) + // Current returns the current epoch. -func (q *EpochQuery) Current() protocol.Epoch { +// No errors are expected during normal operation. +func (q *EpochQuery) Current() (protocol.CommittedEpoch, error) { // all errors returned from storage reads here are unexpected, because all // snapshots reside within a current epoch, which must be queryable - status, err := q.snap.state.epoch.statuses.ByBlockID(q.snap.blockID) - if err != nil { - return invalid.NewEpochf("could not get epoch status for block %x: %w", q.snap.blockID, err) - } - setup, err := q.snap.state.epoch.setups.ByID(status.CurrentEpoch.SetupID) + epochState, err := q.snap.state.protocolState.EpochStateAtBlockID(q.snap.blockID) if err != nil { - return invalid.NewEpochf("could not get current EpochSetup (id=%x) for block %x: %w", status.CurrentEpoch.SetupID, q.snap.blockID, err) - } - commit, err := q.snap.state.epoch.commits.ByID(status.CurrentEpoch.CommitID) - if err != nil { - return invalid.NewEpochf("could not get current EpochCommit (id=%x) for block %x: %w", status.CurrentEpoch.CommitID, q.snap.blockID, err) + return nil, fmt.Errorf("could not get protocol state snapshot at block %x: %w", q.snap.blockID, err) } - firstHeight, _, epochStarted, _, err := q.retrieveEpochHeightBounds(setup.Counter) + setup := epochState.EpochSetup() + commit := epochState.EpochCommit() + firstHeight, _, isFirstHeightKnown, _, err := q.retrieveEpochHeightBounds(setup.Counter) if err != nil { - return invalid.NewEpochf("could not get current epoch height bounds: %s", err.Error()) + return nil, fmt.Errorf("could not get current epoch height bounds: %s", err.Error()) } - if epochStarted { - return inmem.NewStartedEpoch(setup, commit, firstHeight) + if isFirstHeightKnown { + return inmem.NewEpochWithStartBoundary(setup, commit, epochState.EpochExtensions(), firstHeight), nil } - return inmem.NewCommittedEpoch(setup, commit) + return inmem.NewCommittedEpoch(setup, commit, epochState.EpochExtensions()), nil } -// Next returns the next epoch, if it is available. -func (q *EpochQuery) Next() protocol.Epoch { - - status, err := q.snap.state.epoch.statuses.ByBlockID(q.snap.blockID) - if err != nil { - return invalid.NewEpochf("could not get epoch status for block %x: %w", q.snap.blockID, err) - } - phase, err := status.Phase() - if err != nil { - // critical error: malformed EpochStatus in storage - return invalid.NewEpochf("read malformed EpochStatus from storage: %w", err) - } - // if we are in the staking phase, the next epoch is not setup yet - if phase == flow.EpochPhaseStaking { - return invalid.NewEpoch(protocol.ErrNextEpochNotSetup) - } - - // if we are in setup phase, return a SetupEpoch - nextSetup, err := q.snap.state.epoch.setups.ByID(status.NextEpoch.SetupID) - if err != nil { - // all errors are critical, because we must be able to retrieve EpochSetup when in setup phase - return invalid.NewEpochf("could not get next EpochSetup (id=%x) for block %x: %w", status.NextEpoch.SetupID, q.snap.blockID, err) - } - if phase == flow.EpochPhaseSetup { - return inmem.NewSetupEpoch(nextSetup) +// NextUnsafe returns the next epoch, if it has been set up but not yet committed. +// Error returns: +// - protocol.ErrNextEpochNotSetup if the next epoch has not yet been set up as of the snapshot's reference block +// (the reference block resides in the EpochStaking phase) +// - protocol.ErrNextEpochAlreadyCommitted if the next epoch has already been committed at the snapshot's reference block +// (the reference block resides in the EpochCommitted phase) +// - generic error in case of unexpected critical internal corruption or bugs +func (q *EpochQuery) NextUnsafe() (protocol.TentativeEpoch, error) { + epochState, err := q.snap.state.protocolState.EpochStateAtBlockID(q.snap.blockID) + if err != nil { + return nil, fmt.Errorf("could not get protocol state snapshot at block %x: %w", q.snap.blockID, err) + } + switch epochState.EpochPhase() { + // if we are in the staking or fallback phase, the next epoch is not setup yet + case flow.EpochPhaseStaking, flow.EpochPhaseFallback: + return nil, protocol.ErrNextEpochNotSetup + // if we are in setup phase, return a [protocol.TentativeEpoch] backed by the [flow.SetupEpoch] event + case flow.EpochPhaseSetup: + return inmem.NewSetupEpoch(epochState.Entry().NextEpochSetup), nil + // if we are in committed phase, the caller should use the `NextCommitted` method instead, which we indicate by a sentinel error + case flow.EpochPhaseCommitted: + return nil, protocol.ErrNextEpochAlreadyCommitted + default: + return nil, fmt.Errorf("data corruption: unknown epoch phase implies malformed protocol state epoch data") } +} - // if we are in committed phase, return a CommittedEpoch - nextCommit, err := q.snap.state.epoch.commits.ByID(status.NextEpoch.CommitID) - if err != nil { - // all errors are critical, because we must be able to retrieve EpochCommit when in committed phase - return invalid.NewEpochf("could not get next EpochCommit (id=%x) for block %x: %w", status.NextEpoch.CommitID, q.snap.blockID, err) +// NextCommitted returns the next epoch as of this snapshot, only if it has been committed already. +// Error returns: +// - protocol.ErrNextEpochNotCommitted if the next epoch has not yet been committed at the snapshot's reference block +// (the reference block does not reside in the EpochCommitted phase) +// - generic error in case of unexpected critical internal corruption or bugs +func (q *EpochQuery) NextCommitted() (protocol.CommittedEpoch, error) { + epochState, err := q.snap.state.protocolState.EpochStateAtBlockID(q.snap.blockID) + if err != nil { + return nil, fmt.Errorf("could not get protocol state snapshot at block %x: %w", q.snap.blockID, err) + } + entry := epochState.Entry() + + switch epochState.EpochPhase() { + // if we are in the staking or fallback phase, the next epoch is neither setup nor committed yet + case flow.EpochPhaseStaking, flow.EpochPhaseFallback, flow.EpochPhaseSetup: + return nil, protocol.ErrNextEpochNotCommitted + case flow.EpochPhaseCommitted: + // A protocol state snapshot is immutable and only represents the state as of the corresponding block. The + // flow protocol implies that future epochs cannot have extensions, because in order to add extensions to + // an epoch, we have to enter that epoch. Hence, `entry.NextEpoch.EpochExtensions` must be empty: + if len(entry.NextEpoch.EpochExtensions) > 0 { + return nil, irrecoverable.NewExceptionf("state with current epoch %d corrupted, because future epoch %d already has %d extensions", + entry.CurrentEpochCommit.Counter, entry.NextEpochSetup.Counter, len(entry.NextEpoch.EpochExtensions)) + } + return inmem.NewCommittedEpoch(entry.NextEpochSetup, entry.NextEpochCommit, entry.NextEpoch.EpochExtensions), nil + default: + return nil, fmt.Errorf("data corruption: unknown epoch phase implies malformed protocol state epoch data") } - return inmem.NewCommittedEpoch(nextSetup, nextCommit) } // Previous returns the previous epoch. During the first epoch after the root -// block, this returns a sentinel error (since there is no previous epoch). +// block, this returns [protocol.ErrNoPreviousEpoch] (since there is no previous epoch). // For all other epochs, returns the previous epoch. -func (q *EpochQuery) Previous() protocol.Epoch { - - status, err := q.snap.state.epoch.statuses.ByBlockID(q.snap.blockID) +func (q *EpochQuery) Previous() (protocol.CommittedEpoch, error) { + epochState, err := q.snap.state.protocolState.EpochStateAtBlockID(q.snap.blockID) if err != nil { - return invalid.NewEpochf("could not get epoch status for block %x: %w", q.snap.blockID, err) + return nil, fmt.Errorf("could not get protocol state snapshot at block %x: %w", q.snap.blockID, err) } + entry := epochState.Entry() // CASE 1: there is no previous epoch - this indicates we are in the first // epoch after a spork root or genesis block - if !status.HasPrevious() { - return invalid.NewEpoch(protocol.ErrNoPreviousEpoch) + if !epochState.PreviousEpochExists() { + return nil, protocol.ErrNoPreviousEpoch } // CASE 2: we are in any other epoch - retrieve the setup and commit events // for the previous epoch - setup, err := q.snap.state.epoch.setups.ByID(status.PreviousEpoch.SetupID) + setup := entry.PreviousEpochSetup + commit := entry.PreviousEpochCommit + extensions := entry.PreviousEpoch.EpochExtensions + + firstHeight, finalHeight, firstHeightKnown, finalHeightKnown, err := q.retrieveEpochHeightBounds(setup.Counter) if err != nil { - // all errors are critical, because we must be able to retrieve EpochSetup for previous epoch - return invalid.NewEpochf("could not get previous EpochSetup (id=%x) for block %x: %w", status.PreviousEpoch.SetupID, q.snap.blockID, err) + return nil, fmt.Errorf("could not get epoch height bounds: %w", err) } - commit, err := q.snap.state.epoch.commits.ByID(status.PreviousEpoch.CommitID) - if err != nil { - // all errors are critical, because we must be able to retrieve EpochCommit for previous epoch - return invalid.NewEpochf("could not get current EpochCommit (id=%x) for block %x: %w", status.PreviousEpoch.CommitID, q.snap.blockID, err) + if firstHeightKnown && finalHeightKnown { + // typical case - we usually know both boundaries for a past epoch + return inmem.NewEpochWithStartAndEndBoundaries(setup, commit, extensions, firstHeight, finalHeight), nil } - - firstHeight, finalHeight, _, epochEnded, err := q.retrieveEpochHeightBounds(setup.Counter) - if err != nil { - return invalid.NewEpochf("could not get epoch height bounds: %w", err) + if firstHeightKnown && !finalHeightKnown { + // this case is possible when the snapshot reference block is un-finalized + // and is past an un-finalized epoch boundary + return inmem.NewEpochWithStartBoundary(setup, commit, extensions, firstHeight), nil + } + if !firstHeightKnown && finalHeightKnown { + // this case is possible when this node's lowest known block is after + // the queried epoch's start boundary + return inmem.NewEpochWithEndBoundary(setup, commit, extensions, finalHeight), nil } - if epochEnded { - return inmem.NewEndedEpoch(setup, commit, firstHeight, finalHeight) + if !firstHeightKnown && !finalHeightKnown { + // this case is possible when this node's lowest known block is after + // the queried epoch's end boundary + return inmem.NewCommittedEpoch(setup, commit, extensions), nil } - return inmem.NewStartedEpoch(setup, commit, firstHeight) + return nil, fmt.Errorf("sanity check failed: impossible combination of boundaries for previous epoch") } // retrieveEpochHeightBounds retrieves the height bounds for an epoch. @@ -530,41 +555,43 @@ func (q *EpochQuery) Previous() protocol.Epoch { // ╰ X <-|- X <- Y <- Z // // Returns: -// - (0, 0, false, false, nil) if epoch is not started -// - (firstHeight, 0, true, false, nil) if epoch is started but not ended -// - (firstHeight, finalHeight, true, true, nil) if epoch is ended +// - (0, 0, false, false, nil) if neither boundary is known +// - (firstHeight, 0, true, false, nil) if epoch start boundary is known but end boundary is not known +// - (firstHeight, finalHeight, true, true, nil) if epoch start and end boundary are known +// - (0, finalHeight, false, true, nil) if epoch start boundary is known but end boundary is not known // // No errors are expected during normal operation. -func (q *EpochQuery) retrieveEpochHeightBounds(epoch uint64) (firstHeight, finalHeight uint64, isFirstBlockFinalized, isLastBlockFinalized bool, err error) { - err = q.snap.state.db.View(func(tx *badger.Txn) error { - // Retrieve the epoch's first height - err = operation.RetrieveEpochFirstHeight(epoch, &firstHeight)(tx) - if err != nil { - if errors.Is(err, storage.ErrNotFound) { - isFirstBlockFinalized = false - isLastBlockFinalized = false - return nil - } - return err // unexpected error - } - isFirstBlockFinalized = true +func (q *EpochQuery) retrieveEpochHeightBounds(epoch uint64) ( + firstHeight, finalHeight uint64, + isFirstHeightKnown, isLastHeightKnown bool, + err error, +) { - var subsequentEpochFirstHeight uint64 - err = operation.RetrieveEpochFirstHeight(epoch+1, &subsequentEpochFirstHeight)(tx) - if err != nil { - if errors.Is(err, storage.ErrNotFound) { - isLastBlockFinalized = false - return nil - } - return err // unexpected error + r := q.snap.state.db.Reader() + // Retrieve the epoch's first height + err = operation.RetrieveEpochFirstHeight(r, epoch, &firstHeight) + if err != nil { + if errors.Is(err, storage.ErrNotFound) { + isFirstHeightKnown = false // unknown boundary + } else { + return 0, 0, false, false, err // unexpected error } - finalHeight = subsequentEpochFirstHeight - 1 - isLastBlockFinalized = true + } else { + isFirstHeightKnown = true // known boundary + } - return nil - }) + var subsequentEpochFirstHeight uint64 + err = operation.RetrieveEpochFirstHeight(r, epoch+1, &subsequentEpochFirstHeight) if err != nil { - return 0, 0, false, false, err + if errors.Is(err, storage.ErrNotFound) { + isLastHeightKnown = false // unknown boundary + } else { + return 0, 0, false, false, err // unexpected error + } + } else { // known boundary + isLastHeightKnown = true + finalHeight = subsequentEpochFirstHeight - 1 } - return firstHeight, finalHeight, isFirstBlockFinalized, isLastBlockFinalized, nil + + return firstHeight, finalHeight, isFirstHeightKnown, isLastHeightKnown, nil } diff --git a/state/protocol/badger/snapshot_test.go b/state/protocol/badger/snapshot_test.go index 93c72cbeb9e..2925e0845c0 100644 --- a/state/protocol/badger/snapshot_test.go +++ b/state/protocol/badger/snapshot_test.go @@ -1,15 +1,10 @@ -// (c) 2019 Dapper Labs - ALL RIGHTS RESERVED - package badger_test import ( "context" - "errors" "math/rand" "testing" - "time" - "github.com/dgraph-io/badger/v2" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -20,17 +15,12 @@ import ( statepkg "github.com/onflow/flow-go/state" "github.com/onflow/flow-go/state/protocol" bprotocol "github.com/onflow/flow-go/state/protocol/badger" - "github.com/onflow/flow-go/state/protocol/inmem" - "github.com/onflow/flow-go/state/protocol/seed" + "github.com/onflow/flow-go/state/protocol/prg" "github.com/onflow/flow-go/state/protocol/util" "github.com/onflow/flow-go/storage" "github.com/onflow/flow-go/utils/unittest" ) -func init() { - rand.Seed(time.Now().UnixNano()) -} - // TestUnknownReferenceBlock tests queries for snapshots which should be unknown. // We use this fixture: // - Root height: 100 @@ -40,20 +30,26 @@ func TestUnknownReferenceBlock(t *testing.T) { rootHeight := uint64(100) participants := unittest.IdentityListFixture(5, unittest.WithAllRoles()) rootSnapshot := unittest.RootSnapshotFixture(participants, func(block *flow.Block) { - block.Header.Height = rootHeight + block.Height = rootHeight }) + rootProtocolStateID := getRootProtocolStateID(t, rootSnapshot) - util.RunWithFullProtocolState(t, rootSnapshot, func(db *badger.DB, state *bprotocol.ParticipantState) { + util.RunWithFullProtocolState(t, rootSnapshot, func(db storage.DB, state *bprotocol.ParticipantState) { // build some finalized non-root blocks (heights 101-110) - head := rootSnapshot.Encodable().Head + head := unittest.BlockWithParentAndPayload( + rootSnapshot.Encodable().Head(), + unittest.PayloadFixture(unittest.WithProtocolStateID(rootProtocolStateID)), + ) + buildFinalizedBlock(t, state, head) + const nBlocks = 10 - for i := 0; i < nBlocks; i++ { - next := unittest.BlockWithParentFixture(head) + for i := 1; i < nBlocks; i++ { + next := unittest.BlockWithParentProtocolState(head) buildFinalizedBlock(t, state, next) - head = next.Header + head = next } // build an unfinalized block (height 111) - buildBlock(t, state, unittest.BlockWithParentFixture(head)) + buildBlock(t, state, unittest.BlockWithParentProtocolState(head)) finalizedHeader, err := state.Final().Head() require.NoError(t, err) @@ -82,7 +78,7 @@ func TestHead(t *testing.T) { rootSnapshot := unittest.RootSnapshotFixture(participants) head, err := rootSnapshot.Head() require.NoError(t, err) - util.RunWithBootstrapState(t, rootSnapshot, func(db *badger.DB, state *bprotocol.State) { + util.RunWithBootstrapState(t, rootSnapshot, func(db storage.DB, state *bprotocol.State) { t.Run("works with block number", func(t *testing.T) { retrieved, err := state.AtHeight(head.Height).Head() @@ -109,25 +105,25 @@ func TestHead(t *testing.T) { func TestSnapshot_Params(t *testing.T) { participants := unittest.IdentityListFixture(5, unittest.WithAllRoles()) rootSnapshot := unittest.RootSnapshotFixture(participants) + rootProtocolStateID := getRootProtocolStateID(t, rootSnapshot) - expectedChainID, err := rootSnapshot.Params().ChainID() - require.NoError(t, err) - expectedSporkID, err := rootSnapshot.Params().SporkID() - require.NoError(t, err) - expectedProtocolVersion, err := rootSnapshot.Params().ProtocolVersion() - require.NoError(t, err) + expectedChainID := rootSnapshot.Params().ChainID() + expectedSporkID := rootSnapshot.Params().SporkID() rootHeader, err := rootSnapshot.Head() require.NoError(t, err) - util.RunWithFullProtocolState(t, rootSnapshot, func(db *badger.DB, state *bprotocol.ParticipantState) { + util.RunWithFullProtocolState(t, rootSnapshot, func(db storage.DB, state *bprotocol.ParticipantState) { // build some non-root blocks head := rootHeader const nBlocks = 10 for i := 0; i < nBlocks; i++ { - next := unittest.BlockWithParentFixture(head) + next := unittest.BlockWithParentAndPayload( + head, + unittest.PayloadFixture(unittest.WithProtocolStateID(rootProtocolStateID)), + ) buildFinalizedBlock(t, state, next) - head = next.Header + head = next.ToHeader() } // test params from both root, final, and in between @@ -138,42 +134,48 @@ func TestSnapshot_Params(t *testing.T) { } for _, snapshot := range snapshots { t.Run("should be able to get chain ID from snapshot", func(t *testing.T) { - chainID, err := snapshot.Params().ChainID() - require.NoError(t, err) + chainID := snapshot.Params().ChainID() assert.Equal(t, expectedChainID, chainID) }) t.Run("should be able to get spork ID from snapshot", func(t *testing.T) { - sporkID, err := snapshot.Params().SporkID() - require.NoError(t, err) + sporkID := snapshot.Params().SporkID() assert.Equal(t, expectedSporkID, sporkID) }) - t.Run("should be able to get protocol version from snapshot", func(t *testing.T) { - protocolVersion, err := snapshot.Params().ProtocolVersion() - require.NoError(t, err) - assert.Equal(t, expectedProtocolVersion, protocolVersion) - }) } }) } // TestSnapshot_Descendants builds a sample chain with next structure: // -// A (finalized) <- B <- C <- D <- E <- F -// <- G <- H <- I <- J +// ↙ B ← C ← D ← E ← F +// A (finalized) +// ↖ G ← H ← I ← J // // snapshot.Descendants has to return [B, C, D, E, F, G, H, I, J]. func TestSnapshot_Descendants(t *testing.T) { participants := unittest.IdentityListFixture(5, unittest.WithAllRoles()) rootSnapshot := unittest.RootSnapshotFixture(participants) + rootProtocolStateID := getRootProtocolStateID(t, rootSnapshot) head, err := rootSnapshot.Head() require.NoError(t, err) - util.RunWithFullProtocolState(t, rootSnapshot, func(db *badger.DB, state *bprotocol.ParticipantState) { + util.RunWithFullProtocolState(t, rootSnapshot, func(db storage.DB, state *bprotocol.ParticipantState) { var expectedBlocks []flow.Identifier - for i := 5; i > 3; i-- { - for _, block := range unittest.ChainFixtureFrom(i, head) { - err := state.Extend(context.Background(), block) + // In this test, we create two conflicting forks. To prevent accidentally creating byzantine scenarios, where + // multiple blocks have the same view, we keep track of used views and ensure that each new block has a unique view. + usedViews := make(map[uint64]struct{}) + usedViews[head.View] = struct{}{} + for forkLength := range []int{5, 4} { // construct two forks with length 5 and 4, respectively + parent := head + for n := 0; n < forkLength; n++ { + block := unittest.BlockWithParentAndPayloadAndUniqueView( + parent, + unittest.PayloadFixture(unittest.WithProtocolStateID(rootProtocolStateID)), + usedViews, + ) + err := state.Extend(context.Background(), unittest.ProposalFromBlock(block)) require.NoError(t, err) expectedBlocks = append(expectedBlocks, block.ID()) + parent = block.ToHeader() } } @@ -186,7 +188,7 @@ func TestSnapshot_Descendants(t *testing.T) { func TestIdentities(t *testing.T) { identities := unittest.IdentityListFixture(5, unittest.WithAllRoles()) rootSnapshot := unittest.RootSnapshotFixture(identities) - util.RunWithBootstrapState(t, rootSnapshot, func(db *badger.DB, state *bprotocol.State) { + util.RunWithBootstrapState(t, rootSnapshot, func(db storage.DB, state *bprotocol.State) { t.Run("no filter", func(t *testing.T) { actual, err := state.Final().Identities(filter.Any) @@ -195,17 +197,20 @@ func TestIdentities(t *testing.T) { }) t.Run("single identity", func(t *testing.T) { - expected := identities.Sample(1)[0] + expected := identities[rand.Intn(len(identities))] actual, err := state.Final().Identity(expected.NodeID) require.NoError(t, err) assert.Equal(t, expected, actual) }) t.Run("filtered", func(t *testing.T) { - filters := []flow.IdentityFilter{ - filter.HasRole(flow.RoleCollection), - filter.HasNodeID(identities.SamplePct(0.1).NodeIDs()...), - filter.HasWeight(true), + sample, err := identities.SamplePct(0.1) + require.NoError(t, err) + filters := []flow.IdentityFilter[flow.Identity]{ + filter.HasRole[flow.Identity](flow.RoleCollection), + filter.HasNodeID[flow.Identity](sample.NodeIDs()...), + filter.HasInitialWeight[flow.Identity](true), + filter.IsValidCurrentEpochParticipant, } for _, filterfunc := range filters { @@ -225,22 +230,36 @@ func TestClusters(t *testing.T) { collectors := unittest.IdentityListFixture(nCollectors, unittest.WithRole(flow.RoleCollection)) identities := append(unittest.IdentityListFixture(4, unittest.WithAllRolesExcept(flow.RoleCollection)), collectors...) - root, result, seal := unittest.BootstrapFixture(identities) - qc := unittest.QuorumCertificateFixture(unittest.QCWithRootBlockID(root.ID())) - setup := result.ServiceEvents[0].Event.(*flow.EpochSetup) - commit := result.ServiceEvents[1].Event.(*flow.EpochCommit) - setup.Assignments = unittest.ClusterAssignment(uint(nClusters), collectors) - clusterQCs := unittest.QuorumCertificatesFromAssignments(setup.Assignments) - commit.ClusterQCs = flow.ClusterQCVoteDatasFromQCs(clusterQCs) + // bootstrap the protocol state + rootHeaderBody := unittest.Block.Genesis(flow.Emulator).HeaderBody + + counter := uint64(1) + setup := unittest.EpochSetupFixture( + unittest.WithParticipants(identities.ToSkeleton()), + unittest.SetupWithCounter(counter), + unittest.WithFirstView(rootHeaderBody.View), + unittest.WithFinalView(rootHeaderBody.View+100_000), + unittest.WithAssignments(unittest.ClusterAssignment(uint(nClusters), collectors.ToSkeleton())), + ) + commit := unittest.EpochCommitFixture( + unittest.CommitWithCounter(counter), + unittest.WithDKGFromParticipants(identities.ToSkeleton()), + unittest.WithClusterQCsFromAssignments(setup.Assignments), + ) + + root, result, seal := unittest.BootstrapFixtureWithSetupAndCommit(rootHeaderBody, setup, commit) seal.ResultID = result.ID() - rootSnapshot, err := inmem.SnapshotFromBootstrapState(root, result, seal, qc) + qc := unittest.QuorumCertificateFixture(unittest.QCWithRootBlockID(root.ID())) + rootSnapshot, err := unittest.SnapshotFromBootstrapState(root, result, seal, qc) require.NoError(t, err) - util.RunWithBootstrapState(t, rootSnapshot, func(db *badger.DB, state *bprotocol.State) { - expectedClusters, err := factory.NewClusterList(setup.Assignments, collectors) + util.RunWithBootstrapState(t, rootSnapshot, func(db storage.DB, state *bprotocol.State) { + expectedClusters, err := factory.NewClusterList(setup.Assignments, collectors.ToSkeleton()) require.NoError(t, err) - actualClusters, err := state.Final().Epochs().Current().Clustering() + currentEpoch, err := state.Final().Epochs().Current() + require.NoError(t, err) + actualClusters, err := currentEpoch.Clustering() require.NoError(t, err) require.Equal(t, nClusters, len(expectedClusters)) @@ -262,11 +281,12 @@ func TestClusters(t *testing.T) { func TestSealingSegment(t *testing.T) { identities := unittest.CompleteIdentitySet() rootSnapshot := unittest.RootSnapshotFixture(identities) + rootProtocolStateID := getRootProtocolStateID(t, rootSnapshot) head, err := rootSnapshot.Head() require.NoError(t, err) t.Run("root sealing segment", func(t *testing.T) { - util.RunWithFollowerProtocolState(t, rootSnapshot, func(db *badger.DB, state *bprotocol.FollowerState) { + util.RunWithFollowerProtocolState(t, rootSnapshot, func(db storage.DB, state *bprotocol.FollowerState) { expected, err := rootSnapshot.SealingSegment() require.NoError(t, err) actual, err := state.AtBlockID(head.ID()).SealingSegment() @@ -275,7 +295,8 @@ func TestSealingSegment(t *testing.T) { assert.Len(t, actual.ExecutionResults, 1) assert.Len(t, actual.Blocks, 1) assert.Empty(t, actual.ExtraBlocks) - unittest.AssertEqualBlocksLenAndOrder(t, expected.Blocks, actual.Blocks) + require.Equal(t, len(expected.Blocks), len(actual.Blocks)) + require.Equal(t, expected.Blocks[0].Block.ID(), actual.Blocks[0].Block.ID()) assertSealingSegmentBlocksQueryableAfterBootstrap(t, state.AtBlockID(head.ID())) }) @@ -286,20 +307,23 @@ func TestSealingSegment(t *testing.T) { // ROOT <- B1 // Expected sealing segment: [ROOT, B1], extra blocks: [] t.Run("non-root with root seal as latest seal", func(t *testing.T) { - util.RunWithFollowerProtocolState(t, rootSnapshot, func(db *badger.DB, state *bprotocol.FollowerState) { + util.RunWithFollowerProtocolState(t, rootSnapshot, func(db storage.DB, state *bprotocol.FollowerState) { // build an extra block on top of root - block1 := unittest.BlockWithParentFixture(head) + block1 := unittest.BlockWithParentAndPayload( + head, + unittest.PayloadFixture(unittest.WithProtocolStateID(rootProtocolStateID)), + ) buildFinalizedBlock(t, state, block1) segment, err := state.AtBlockID(block1.ID()).SealingSegment() require.NoError(t, err) // build a valid child B2 to ensure we have a QC - buildBlock(t, state, unittest.BlockWithParentFixture(block1.Header)) + buildBlock(t, state, unittest.BlockWithParentProtocolState(block1)) // sealing segment should contain B1 and B2 // B2 is reference of snapshot, B1 is latest sealed - unittest.AssertEqualBlocksLenAndOrder(t, []*flow.Block{rootSnapshot.Encodable().SealingSegment.Sealed(), block1}, segment.Blocks) + unittest.AssertEqualBlockSequences(t, []*flow.Block{rootSnapshot.Encodable().SealingSegment.Sealed(), block1}, segment.Blocks) assert.Len(t, segment.ExecutionResults, 1) assert.Empty(t, segment.ExtraBlocks) assertSealingSegmentBlocksQueryableAfterBootstrap(t, state.AtBlockID(block1.ID())) @@ -311,35 +335,50 @@ func TestSealingSegment(t *testing.T) { // ROOT <- B1 <- B2(R1) <- B3(S1) // Expected sealing segment: [B1, B2, B3], extra blocks: [ROOT] t.Run("non-root", func(t *testing.T) { - util.RunWithFollowerProtocolState(t, rootSnapshot, func(db *badger.DB, state *bprotocol.FollowerState) { + util.RunWithFullProtocolStateAndMutator(t, rootSnapshot, func(db storage.DB, state *bprotocol.ParticipantState, mutableState protocol.MutableProtocolState) { // build a block to seal - block1 := unittest.BlockWithParentFixture(head) + block1 := unittest.BlockWithParentAndPayload( + head, + unittest.PayloadFixture(unittest.WithProtocolStateID(rootProtocolStateID)), + ) buildFinalizedBlock(t, state, block1) receipt1, seal1 := unittest.ReceiptAndSealForBlock(block1) - block2 := unittest.BlockWithParentFixture(block1.Header) - block2.SetPayload(unittest.PayloadFixture(unittest.WithReceipts(receipt1))) + block2 := unittest.BlockWithParentAndPayload( + block1.ToHeader(), + unittest.PayloadFixture( + unittest.WithReceipts(receipt1), + unittest.WithProtocolStateID(rootProtocolStateID), + ), + ) buildFinalizedBlock(t, state, block2) // build a block sealing block1 - block3 := unittest.BlockWithParentFixture(block2.Header) - - block3.SetPayload(unittest.PayloadFixture(unittest.WithSeals(seal1))) + seals := []*flow.Seal{seal1} + block3View := block2.View + 1 + block3 := unittest.BlockFixture( + unittest.Block.WithParent(block2.ID(), block2.View, block2.Height), + unittest.Block.WithPayload( + flow.Payload{ + Seals: seals, + ProtocolStateID: calculateExpectedStateId(t, mutableState)(block2.ID(), block3View, seals), + }), + ) buildFinalizedBlock(t, state, block3) segment, err := state.AtBlockID(block3.ID()).SealingSegment() require.NoError(t, err) require.Len(t, segment.ExtraBlocks, 1) - assert.Equal(t, segment.ExtraBlocks[0].Header.Height, head.Height) + assert.Equal(t, segment.ExtraBlocks[0].Block.Height, head.Height) // build a valid child B3 to ensure we have a QC - buildBlock(t, state, unittest.BlockWithParentFixture(block3.Header)) + buildBlock(t, state, unittest.BlockWithParentProtocolState(block3)) // sealing segment should contain B1, B2, B3 // B3 is reference of snapshot, B1 is latest sealed - unittest.AssertEqualBlocksLenAndOrder(t, []*flow.Block{block1, block2, block3}, segment.Blocks) + unittest.AssertEqualBlockSequences(t, []*flow.Block{block1, block2, block3}, segment.Blocks) assert.Len(t, segment.ExecutionResults, 1) assertSealingSegmentBlocksQueryableAfterBootstrap(t, state.AtBlockID(block3.ID())) }) @@ -350,10 +389,12 @@ func TestSealingSegment(t *testing.T) { // ROOT <- B1 <- .... <- BN(S1) // Expected sealing segment: [B1, ..., BN], extra blocks: [ROOT] t.Run("long sealing segment", func(t *testing.T) { - util.RunWithFollowerProtocolState(t, rootSnapshot, func(db *badger.DB, state *bprotocol.FollowerState) { - + util.RunWithFollowerProtocolState(t, rootSnapshot, func(db storage.DB, state *bprotocol.FollowerState) { // build a block to seal - block1 := unittest.BlockWithParentFixture(head) + block1 := unittest.BlockWithParentAndPayload( + head, + unittest.PayloadFixture(unittest.WithProtocolStateID(rootProtocolStateID)), + ) buildFinalizedBlock(t, state, block1) receipt1, seal1 := unittest.ReceiptAndSealForBlock(block1) @@ -361,20 +402,33 @@ func TestSealingSegment(t *testing.T) { parent := block1 // build a large chain of intermediary blocks for i := 0; i < 100; i++ { - next := unittest.BlockWithParentFixture(parent.Header) + next := unittest.BlockWithParentProtocolState(parent) if i == 0 { // Repetitions of the same receipt in one fork would be a protocol violation. // Hence, we include the result only once in the direct child of B1. - next.SetPayload(unittest.PayloadFixture(unittest.WithReceipts(receipt1))) + next, err = flow.NewBlock( + flow.UntrustedBlock{ + HeaderBody: next.HeaderBody, + Payload: unittest.PayloadFixture( + unittest.WithReceipts(receipt1), + unittest.WithProtocolStateID(parent.Payload.ProtocolStateID), + ), + }, + ) + require.NoError(t, err) } buildFinalizedBlock(t, state, next) parent = next } // build the block sealing block 1 - blockN := unittest.BlockWithParentFixture(parent.Header) - - blockN.SetPayload(unittest.PayloadFixture(unittest.WithSeals(seal1))) + blockN := unittest.BlockWithParentAndPayload( + parent.ToHeader(), + unittest.PayloadFixture( + unittest.WithSeals(seal1), + unittest.WithProtocolStateID(rootProtocolStateID), + ), + ) buildFinalizedBlock(t, state, blockN) segment, err := state.AtBlockID(blockN.ID()).SealingSegment() @@ -384,10 +438,10 @@ func TestSealingSegment(t *testing.T) { // sealing segment should cover range [B1, BN] assert.Len(t, segment.Blocks, 102) assert.Len(t, segment.ExtraBlocks, 1) - assert.Equal(t, segment.ExtraBlocks[0].Header.Height, head.Height) + assert.Equal(t, segment.ExtraBlocks[0].Block.Height, head.Height) // first and last blocks should be B1, BN - assert.Equal(t, block1.ID(), segment.Blocks[0].ID()) - assert.Equal(t, blockN.ID(), segment.Blocks[101].ID()) + assert.Equal(t, block1.ID(), segment.Blocks[0].Block.ID()) + assert.Equal(t, blockN.ID(), segment.Blocks[101].Block.ID()) assertSealingSegmentBlocksQueryableAfterBootstrap(t, state.AtBlockID(blockN.ID())) }) }) @@ -397,42 +451,61 @@ func TestSealingSegment(t *testing.T) { // ROOT <- B1 <- B2(R1) <- B3 <- B4(R2, S1) <- B5 <- B6(S2) // Expected sealing segment: [B2, B3, B4], Extra blocks: [ROOT, B1] t.Run("overlapping sealing segment", func(t *testing.T) { - util.RunWithFollowerProtocolState(t, rootSnapshot, func(db *badger.DB, state *bprotocol.FollowerState) { + util.RunWithFollowerProtocolState(t, rootSnapshot, func(db storage.DB, state *bprotocol.FollowerState) { - block1 := unittest.BlockWithParentFixture(head) + block1 := unittest.BlockWithParentAndPayload( + head, + unittest.PayloadFixture(unittest.WithProtocolStateID(rootProtocolStateID)), + ) buildFinalizedBlock(t, state, block1) receipt1, seal1 := unittest.ReceiptAndSealForBlock(block1) - block2 := unittest.BlockWithParentFixture(block1.Header) - block2.SetPayload(unittest.PayloadFixture(unittest.WithReceipts(receipt1))) + block2 := unittest.BlockWithParentAndPayload( + block1.ToHeader(), + unittest.PayloadFixture( + unittest.WithReceipts(receipt1), + unittest.WithProtocolStateID(rootProtocolStateID), + ), + ) buildFinalizedBlock(t, state, block2) receipt2, seal2 := unittest.ReceiptAndSealForBlock(block2) - block3 := unittest.BlockWithParentFixture(block2.Header) + block3 := unittest.BlockWithParentProtocolState(block2) buildFinalizedBlock(t, state, block3) - block4 := unittest.BlockWithParentFixture(block3.Header) - block4.SetPayload(unittest.PayloadFixture(unittest.WithReceipts(receipt2), unittest.WithSeals(seal1))) + block4 := unittest.BlockWithParentAndPayload( + block3.ToHeader(), + unittest.PayloadFixture( + unittest.WithReceipts(receipt2), + unittest.WithSeals(seal1), + unittest.WithProtocolStateID(rootProtocolStateID), + ), + ) buildFinalizedBlock(t, state, block4) - block5 := unittest.BlockWithParentFixture(block4.Header) + block5 := unittest.BlockWithParentProtocolState(block4) buildFinalizedBlock(t, state, block5) - block6 := unittest.BlockWithParentFixture(block5.Header) - block6.SetPayload(unittest.PayloadFixture(unittest.WithSeals(seal2))) + block6 := unittest.BlockWithParentAndPayload( + block5.ToHeader(), + unittest.PayloadFixture( + unittest.WithSeals(seal2), + unittest.WithProtocolStateID(rootProtocolStateID), + ), + ) buildFinalizedBlock(t, state, block6) segment, err := state.AtBlockID(block6.ID()).SealingSegment() require.NoError(t, err) // build a valid child to ensure we have a QC - buildBlock(t, state, unittest.BlockWithParentFixture(block6.Header)) + buildBlock(t, state, unittest.BlockWithParentProtocolState(block6)) // sealing segment should be [B2, B3, B4, B5, B6] require.Len(t, segment.Blocks, 5) - unittest.AssertEqualBlocksLenAndOrder(t, []*flow.Block{block2, block3, block4, block5, block6}, segment.Blocks) - unittest.AssertEqualBlocksLenAndOrder(t, []*flow.Block{block1}, segment.ExtraBlocks[1:]) + unittest.AssertEqualBlockSequences(t, []*flow.Block{block2, block3, block4, block5, block6}, segment.Blocks) + unittest.AssertEqualBlockSequences(t, []*flow.Block{block1}, segment.ExtraBlocks[1:]) require.Len(t, segment.ExecutionResults, 1) assertSealingSegmentBlocksQueryableAfterBootstrap(t, state.AtBlockID(block6.ID())) @@ -444,7 +517,7 @@ func TestSealingSegment(t *testing.T) { // ROOT -> B1(Result_A, Receipt_A_1) -> B2(Result_B, Receipt_B, Receipt_A_2) -> B3(Receipt_C, Result_C) -> B4 -> B5(Seal_C) // the segment for B5 should be `[B2,B3,B4,B5] + [Result_A]` t.Run("sealing segment with 4 blocks and 1 execution result decoupled", func(t *testing.T) { - util.RunWithFollowerProtocolState(t, rootSnapshot, func(db *badger.DB, state *bprotocol.FollowerState) { + util.RunWithFollowerProtocolState(t, rootSnapshot, func(db storage.DB, state *bprotocol.FollowerState) { // simulate scenario where execution result is missing from block payload // SealingSegment() should get result from results db and store it on ExecutionReceipts // field on SealingSegment @@ -455,20 +528,41 @@ func TestSealingSegment(t *testing.T) { // receipt b also contains result b receiptB := unittest.ExecutionReceiptFixture() - block1 := unittest.BlockWithParentFixture(head) - block1.SetPayload(unittest.PayloadFixture(unittest.WithReceipts(receiptA1))) - - block2 := unittest.BlockWithParentFixture(block1.Header) - block2.SetPayload(unittest.PayloadFixture(unittest.WithReceipts(receiptB), unittest.WithReceiptsAndNoResults(receiptA2))) + block1 := unittest.BlockWithParentAndPayload( + head, + unittest.PayloadFixture( + unittest.WithReceipts(receiptA1), + unittest.WithProtocolStateID(rootProtocolStateID), + ), + ) + + block2 := unittest.BlockWithParentAndPayload( + block1.ToHeader(), + unittest.PayloadFixture( + unittest.WithReceipts(receiptB), + unittest.WithReceiptsAndNoResults(receiptA2), + unittest.WithProtocolStateID(rootProtocolStateID), + ), + ) receiptC, sealC := unittest.ReceiptAndSealForBlock(block2) - block3 := unittest.BlockWithParentFixture(block2.Header) - block3.SetPayload(unittest.PayloadFixture(unittest.WithReceipts(receiptC))) + block3 := unittest.BlockWithParentAndPayload( + block2.ToHeader(), + unittest.PayloadFixture( + unittest.WithReceipts(receiptC), + unittest.WithProtocolStateID(rootProtocolStateID), + ), + ) - block4 := unittest.BlockWithParentFixture(block3.Header) + block4 := unittest.BlockWithParentProtocolState(block3) - block5 := unittest.BlockWithParentFixture(block4.Header) - block5.SetPayload(unittest.PayloadFixture(unittest.WithSeals(sealC))) + block5 := unittest.BlockWithParentAndPayload( + block4.ToHeader(), + unittest.PayloadFixture( + unittest.WithSeals(sealC), + unittest.WithProtocolStateID(rootProtocolStateID), + ), + ) buildFinalizedBlock(t, state, block1) buildFinalizedBlock(t, state, block2) @@ -480,10 +574,10 @@ func TestSealingSegment(t *testing.T) { require.NoError(t, err) // build a valid child to ensure we have a QC - buildBlock(t, state, unittest.BlockWithParentFixture(block5.Header)) + buildBlock(t, state, unittest.BlockWithParentProtocolState(block5)) require.Len(t, segment.Blocks, 4) - unittest.AssertEqualBlocksLenAndOrder(t, []*flow.Block{block2, block3, block4, block5}, segment.Blocks) + unittest.AssertEqualBlockSequences(t, []*flow.Block{block2, block3, block4, block5}, segment.Blocks) require.Contains(t, segment.ExecutionResults, resultA) require.Len(t, segment.ExecutionResults, 2) @@ -496,7 +590,7 @@ func TestSealingSegment(t *testing.T) { // block3 also references ResultB, so it should exist in the segment execution results as well. // root -> B1[Result_A, Receipt_A_1] -> B2[Result_B, Receipt_B, Receipt_A_2] -> B3[Receipt_B_2, Receipt_for_seal, Receipt_A_3] -> B4 -> B5 (Seal_B2) t.Run("sealing segment with 4 blocks and 2 execution result decoupled", func(t *testing.T) { - util.RunWithFollowerProtocolState(t, rootSnapshot, func(db *badger.DB, state *bprotocol.FollowerState) { + util.RunWithFollowerProtocolState(t, rootSnapshot, func(db storage.DB, state *bprotocol.FollowerState) { // simulate scenario where execution result is missing from block payload // SealingSegment() should get result from results db and store it on ExecutionReceipts // field on SealingSegment @@ -512,21 +606,43 @@ func TestSealingSegment(t *testing.T) { // get second receipt for Result_B, now we have 2 receipts for a single execution result receiptB2 := unittest.ExecutionReceiptFixture(unittest.WithResult(&receiptB.ExecutionResult)) - block1 := unittest.BlockWithParentFixture(head) - block1.SetPayload(unittest.PayloadFixture(unittest.WithReceipts(receiptA1))) - - block2 := unittest.BlockWithParentFixture(block1.Header) - block2.SetPayload(unittest.PayloadFixture(unittest.WithReceipts(receiptB), unittest.WithReceiptsAndNoResults(receiptA2))) + block1 := unittest.BlockWithParentAndPayload( + head, + unittest.PayloadFixture( + unittest.WithReceipts(receiptA1), + unittest.WithProtocolStateID(rootProtocolStateID), + ), + ) + + block2 := unittest.BlockWithParentAndPayload( + block1.ToHeader(), + unittest.PayloadFixture( + unittest.WithReceipts(receiptB), + unittest.WithReceiptsAndNoResults(receiptA2), + unittest.WithProtocolStateID(rootProtocolStateID), + ), + ) receiptForSeal, seal := unittest.ReceiptAndSealForBlock(block2) - block3 := unittest.BlockWithParentFixture(block2.Header) - block3.SetPayload(unittest.PayloadFixture(unittest.WithReceipts(receiptForSeal), unittest.WithReceiptsAndNoResults(receiptB2, receiptA3))) - - block4 := unittest.BlockWithParentFixture(block3.Header) - - block5 := unittest.BlockWithParentFixture(block4.Header) - block5.SetPayload(unittest.PayloadFixture(unittest.WithSeals(seal))) + block3 := unittest.BlockWithParentAndPayload( + block2.ToHeader(), + unittest.PayloadFixture( + unittest.WithReceipts(receiptForSeal), + unittest.WithReceiptsAndNoResults(receiptB2, receiptA3), + unittest.WithProtocolStateID(rootProtocolStateID), + ), + ) + + block4 := unittest.BlockWithParentProtocolState(block3) + + block5 := unittest.BlockWithParentAndPayload( + block4.ToHeader(), + unittest.PayloadFixture( + unittest.WithSeals(seal), + unittest.WithProtocolStateID(rootProtocolStateID), + ), + ) buildFinalizedBlock(t, state, block1) buildFinalizedBlock(t, state, block2) @@ -538,10 +654,10 @@ func TestSealingSegment(t *testing.T) { require.NoError(t, err) // build a valid child to ensure we have a QC - buildBlock(t, state, unittest.BlockWithParentFixture(block5.Header)) + buildBlock(t, state, unittest.BlockWithParentProtocolState(block5)) require.Len(t, segment.Blocks, 4) - unittest.AssertEqualBlocksLenAndOrder(t, []*flow.Block{block2, block3, block4, block5}, segment.Blocks) + unittest.AssertEqualBlockSequences(t, []*flow.Block{block2, block3, block4, block5}, segment.Blocks) require.Contains(t, segment.ExecutionResults, resultA) // ResultA should only be added once even though it is referenced in 2 different blocks require.Len(t, segment.ExecutionResults, 2) @@ -555,92 +671,212 @@ func TestSealingSegment(t *testing.T) { // ROOT <- B1 <- B2(R1) <- B3 <- B4(S1) <- B5 // Expected sealing segment: [B1, B2, B3, B4, B5], Extra blocks: [ROOT] t.Run("sealing segment where highest block in segment does not seal lowest", func(t *testing.T) { - util.RunWithFollowerProtocolState(t, rootSnapshot, func(db *badger.DB, state *bprotocol.FollowerState) { + util.RunWithFollowerProtocolState(t, rootSnapshot, func(db storage.DB, state *bprotocol.FollowerState) { // build a block to seal - block1 := unittest.BlockWithParentFixture(head) + block1 := unittest.BlockWithParentAndPayload( + head, + unittest.PayloadFixture(unittest.WithProtocolStateID(rootProtocolStateID)), + ) buildFinalizedBlock(t, state, block1) // build a block sealing block1 - block2 := unittest.BlockWithParentFixture(block1.Header) + receipt1, seal1 := unittest.ReceiptAndSealForBlock(block1) - block2.SetPayload(unittest.PayloadFixture(unittest.WithReceipts(receipt1))) + block2 := unittest.BlockWithParentAndPayload( + block1.ToHeader(), + unittest.PayloadFixture( + unittest.WithReceipts(receipt1), + unittest.WithProtocolStateID(rootProtocolStateID), + ), + ) buildFinalizedBlock(t, state, block2) - block3 := unittest.BlockWithParentFixture(block2.Header) + block3 := unittest.BlockWithParentProtocolState(block2) buildFinalizedBlock(t, state, block3) - block4 := unittest.BlockWithParentFixture(block3.Header) - block4.SetPayload(unittest.PayloadFixture(unittest.WithSeals(seal1))) + block4 := unittest.BlockWithParentAndPayload( + block3.ToHeader(), + unittest.PayloadFixture( + unittest.WithSeals(seal1), + unittest.WithProtocolStateID(rootProtocolStateID), + ), + ) buildFinalizedBlock(t, state, block4) - block5 := unittest.BlockWithParentFixture(block4.Header) + block5 := unittest.BlockWithParentProtocolState(block4) buildFinalizedBlock(t, state, block5) snapshot := state.AtBlockID(block5.ID()) // build a valid child to ensure we have a QC - buildFinalizedBlock(t, state, unittest.BlockWithParentFixture(block5.Header)) + buildFinalizedBlock(t, state, unittest.BlockWithParentProtocolState(block5)) segment, err := snapshot.SealingSegment() require.NoError(t, err) // sealing segment should contain B1 and B5 // B5 is reference of snapshot, B1 is latest sealed - unittest.AssertEqualBlocksLenAndOrder(t, []*flow.Block{block1, block2, block3, block4, block5}, segment.Blocks) + unittest.AssertEqualBlockSequences(t, []*flow.Block{block1, block2, block3, block4, block5}, segment.Blocks) assert.Len(t, segment.ExecutionResults, 1) assertSealingSegmentBlocksQueryableAfterBootstrap(t, snapshot) }) }) + + // Root <- B1 <- B2 <- ... <- B700(Seal_B699) + // Expected sealing segment: [B699, B700], Extra blocks: [B98, B99, ..., B698] + // where DefaultTransactionExpiry = 600 + t.Run("test extra blocks contain exactly DefaultTransactionExpiry number of blocks below the sealed block", func(t *testing.T) { + util.RunWithFollowerProtocolState(t, rootSnapshot, func(db storage.DB, state *bprotocol.FollowerState) { + root := unittest.BlockFixture( + unittest.Block.WithParent(head.ID(), head.View, head.Height), + unittest.Block.WithPayload(unittest.PayloadFixture(unittest.WithProtocolStateID(rootProtocolStateID))), + ) + buildFinalizedBlock(t, state, root) + + blocks := make([]*flow.Block, 0, flow.DefaultTransactionExpiry+3) + parent := root + for i := 0; i < flow.DefaultTransactionExpiry+1; i++ { + next := unittest.BlockFixture( + unittest.Block.WithParent(parent.ID(), parent.View, parent.Height), + unittest.Block.WithPayload(unittest.PayloadFixture( + unittest.WithProtocolStateID(parent.Payload.ProtocolStateID)), + ), + ) + buildFinalizedBlock(t, state, next) + blocks = append(blocks, next) + parent = next + } + + // last sealed block + lastSealedBlock := parent + lastReceipt, lastSeal := unittest.ReceiptAndSealForBlock(lastSealedBlock) + prevLastBlock := unittest.BlockWithParentAndPayload( + lastSealedBlock.ToHeader(), + unittest.PayloadFixture( + unittest.WithReceipts(lastReceipt), + unittest.WithProtocolStateID(rootProtocolStateID), + ), + ) + buildFinalizedBlock(t, state, prevLastBlock) + + // last finalized block + lastBlock := unittest.BlockWithParentAndPayload( + prevLastBlock.ToHeader(), + unittest.PayloadFixture( + unittest.WithSeals(lastSeal), + unittest.WithProtocolStateID(rootProtocolStateID), + ), + ) + buildFinalizedBlock(t, state, lastBlock) + + // build a valid child to ensure we have a QC + buildFinalizedBlock(t, state, unittest.BlockWithParentProtocolState(lastBlock)) + + snapshot := state.AtBlockID(lastBlock.ID()) + segment, err := snapshot.SealingSegment() + require.NoError(t, err) + + assert.Equal(t, lastBlock.ToHeader(), segment.Highest().ToHeader()) + assert.Equal(t, lastBlock.ToHeader(), segment.Finalized().ToHeader()) + assert.Equal(t, lastSealedBlock.ToHeader(), segment.Sealed().ToHeader()) + + // there are DefaultTransactionExpiry number of blocks in total + unittest.AssertEqualBlockSequences(t, blocks[:flow.DefaultTransactionExpiry], segment.ExtraBlocks) + assert.Len(t, segment.ExtraBlocks, flow.DefaultTransactionExpiry) + assertSealingSegmentBlocksQueryableAfterBootstrap(t, snapshot) + + }) + }) // Test the case where the reference block of the snapshot contains seals for blocks that are lower than the lowest sealing segment's block. // This test case specifically checks if sealing segment includes both highest and lowest block sealed by head. // ROOT <- B1 <- B2 <- B3(Seal_B1) <- B4 <- ... <- LastBlock(Seal_B2, Seal_B3, Seal_B4) - // Expected sealing segment: [B4, ..., B5], Extra blocks: [B2, B3] + // Expected sealing segment: [B4, ..., B5], Extra blocks: [Root, B1, B2, B3] t.Run("highest block seals outside segment", func(t *testing.T) { - util.RunWithFollowerProtocolState(t, rootSnapshot, func(db *badger.DB, state *bprotocol.FollowerState) { + util.RunWithFollowerProtocolState(t, rootSnapshot, func(db storage.DB, state *bprotocol.FollowerState) { // build a block to seal - block1 := unittest.BlockWithParentFixture(head) + block1 := unittest.BlockFixture( + unittest.Block.WithParent(head.ID(), head.View, head.Height), + unittest.Block.WithPayload(unittest.PayloadFixture(unittest.WithProtocolStateID(rootProtocolStateID))), + ) buildFinalizedBlock(t, state, block1) // build a block sealing block1 - block2 := unittest.BlockWithParentFixture(block1.Header) receipt1, seal1 := unittest.ReceiptAndSealForBlock(block1) - block2.SetPayload(unittest.PayloadFixture(unittest.WithReceipts(receipt1))) + + block2 := unittest.BlockFixture( + unittest.Block.WithParent(block1.ID(), block1.View, block1.Height), + unittest.Block.WithPayload(unittest.PayloadFixture( + unittest.WithReceipts(receipt1), + unittest.WithProtocolStateID(rootProtocolStateID)), + ), + ) buildFinalizedBlock(t, state, block2) receipt2, seal2 := unittest.ReceiptAndSealForBlock(block2) - block3 := unittest.BlockWithParentFixture(block2.Header) - block3.SetPayload(unittest.PayloadFixture(unittest.WithSeals(seal1), unittest.WithReceipts(receipt2))) + block3 := unittest.BlockFixture( + unittest.Block.WithParent(block2.ID(), block2.View, block2.Height), + unittest.Block.WithPayload(unittest.PayloadFixture( + unittest.WithSeals(seal1), + unittest.WithReceipts(receipt2), + unittest.WithProtocolStateID(rootProtocolStateID)), + ), + ) buildFinalizedBlock(t, state, block3) receipt3, seal3 := unittest.ReceiptAndSealForBlock(block3) - block4 := unittest.BlockWithParentFixture(block3.Header) - block4.SetPayload(unittest.PayloadFixture(unittest.WithReceipts(receipt3))) + block4 := unittest.BlockFixture( + unittest.Block.WithParent(block3.ID(), block3.View, block3.Height), + unittest.Block.WithPayload(unittest.PayloadFixture( + unittest.WithReceipts(receipt3), + unittest.WithProtocolStateID(rootProtocolStateID)), + ), + ) buildFinalizedBlock(t, state, block4) // build chain, so it's long enough to not target blocks as inside of flow.DefaultTransactionExpiry window. parent := block4 for i := 0; i < 1.5*flow.DefaultTransactionExpiry; i++ { - next := unittest.BlockWithParentFixture(parent.Header) - next.Header.View = next.Header.Height + 1 // set view so we are still in the same epoch + next := unittest.BlockFixture( + unittest.Block.WithParent(parent.ID(), parent.View, parent.Height), + unittest.Block.WithPayload(unittest.PayloadFixture( + unittest.WithProtocolStateID(parent.Payload.ProtocolStateID)), + ), + ) buildFinalizedBlock(t, state, next) parent = next } receipt4, seal4 := unittest.ReceiptAndSealForBlock(block4) - lastBlock := unittest.BlockWithParentFixture(parent.Header) - lastBlock.SetPayload(unittest.PayloadFixture(unittest.WithSeals(seal2, seal3, seal4), unittest.WithReceipts(receipt4))) + prevLastBlock := unittest.BlockWithParentAndPayload( + parent.ToHeader(), + unittest.PayloadFixture( + unittest.WithReceipts(receipt4), + unittest.WithProtocolStateID(rootProtocolStateID), + ), + ) + buildFinalizedBlock(t, state, prevLastBlock) + + // since result and seal cannot be part of the same block, we need to build another block + lastBlock := unittest.BlockWithParentAndPayload( + prevLastBlock.ToHeader(), + unittest.PayloadFixture( + unittest.WithSeals(seal2, seal3, seal4), + unittest.WithProtocolStateID(rootProtocolStateID), + ), + ) buildFinalizedBlock(t, state, lastBlock) snapshot := state.AtBlockID(lastBlock.ID()) // build a valid child to ensure we have a QC - buildFinalizedBlock(t, state, unittest.BlockWithParentFixture(lastBlock.Header)) + buildFinalizedBlock(t, state, unittest.BlockWithParentProtocolState(lastBlock)) segment, err := snapshot.SealingSegment() require.NoError(t, err) - assert.Equal(t, lastBlock.Header, segment.Highest().Header) - assert.Equal(t, block4.Header, segment.Sealed().Header) - unittest.AssertEqualBlocksLenAndOrder(t, []*flow.Block{block2, block3}, segment.ExtraBlocks) + assert.Equal(t, lastBlock.ToHeader(), segment.Highest().ToHeader()) + assert.Equal(t, block4.ToHeader(), segment.Sealed().ToHeader()) + root := rootSnapshot.Encodable().SealingSegment.Sealed() + unittest.AssertEqualBlockSequences(t, []*flow.Block{root, block1, block2, block3}, segment.ExtraBlocks) assert.Len(t, segment.ExecutionResults, 2) assertSealingSegmentBlocksQueryableAfterBootstrap(t, snapshot) @@ -656,6 +892,7 @@ func TestSealingSegment(t *testing.T) { // (2b) An orphaned block is chosen as head; at this height a block other than the orphaned has been finalized. func TestSealingSegment_FailureCases(t *testing.T) { sporkRootSnapshot := unittest.RootSnapshotFixture(unittest.CompleteIdentitySet()) + rootProtocolStateID := getRootProtocolStateID(t, sporkRootSnapshot) sporkRoot, err := sporkRootSnapshot.Head() require.NoError(t, err) @@ -663,38 +900,53 @@ func TestSealingSegment_FailureCases(t *testing.T) { // Here, we want to specifically test correct handling of the edge case, where a block exists in storage // that has _lower height_ than the node's local root block. Such blocks are typically contained in the // bootstrapping data, such that all entities referenced in the local root block can be resolved. - // Is is possible to retrieve blocks that are lower than the local root block from storage, directly + // It is possible to retrieve blocks that are lower than the local root block from storage, directly // via their ID. Despite these blocks existing in storage, SealingSegment construction should be // because the known history is potentially insufficient when going below the root block. t.Run("sealing segment from block below local state root", func(t *testing.T) { // Step I: constructing bootstrapping snapshot with some short history: // - // ╭───── finalized blocks ─────╮ + // ╭───── finalized blocks ─────╮ // <- b1 <- b2(result(b1)) <- b3(seal(b1)) <- // └── head ──┘ // - b1 := unittest.BlockWithParentFixture(sporkRoot) // construct block b1, append to state and finalize + // construct block b1, append to state and finalize + b1 := unittest.BlockWithParentAndPayload( + sporkRoot, + unittest.PayloadFixture(unittest.WithProtocolStateID(rootProtocolStateID)), + ) receipt, seal := unittest.ReceiptAndSealForBlock(b1) - b2 := unittest.BlockWithParentFixture(b1.Header) // construct block b2, append to state and finalize - b2.SetPayload(unittest.PayloadFixture(unittest.WithReceipts(receipt))) - b3 := unittest.BlockWithParentFixture(b2.Header) // construct block b3 with seal for b1, append it to state and finalize - b3.SetPayload(unittest.PayloadFixture(unittest.WithSeals(seal))) - - multipleBlockSnapshot := snapshotAfter(t, sporkRootSnapshot, func(state *bprotocol.FollowerState) protocol.Snapshot { + // construct block b2, append to state and finalize + b2 := unittest.BlockWithParentAndPayload( + b1.ToHeader(), + unittest.PayloadFixture( + unittest.WithReceipts(receipt), + unittest.WithProtocolStateID(rootProtocolStateID), + ), + ) + // construct block b3 with seal for b1, append it to state and finalize + b3 := unittest.BlockWithParentAndPayload( + b2.ToHeader(), + unittest.PayloadFixture( + unittest.WithSeals(seal), + unittest.WithProtocolStateID(rootProtocolStateID), + ), + ) + + multipleBlockSnapshot := snapshotAfter(t, sporkRootSnapshot, func(state *bprotocol.FollowerState, mutableState protocol.MutableProtocolState) protocol.Snapshot { for _, b := range []*flow.Block{b1, b2, b3} { buildFinalizedBlock(t, state, b) } - b4 := unittest.BlockWithParentFixture(b3.Header) - require.NoError(t, state.ExtendCertified(context.Background(), b4, unittest.CertifyBlock(b4.Header))) // add child of b3 to ensure we have a QC for b3 + b4 := unittest.BlockWithParentProtocolState(b3) + require.NoError(t, state.ExtendCertified(context.Background(), unittest.NewCertifiedBlock(b4))) // add child of b3 to ensure we have a QC for b3 return state.AtBlockID(b3.ID()) }) // Step 2: bootstrapping new state based on sealing segment whose head is block b3. // Thereby, the state should have b3 as its local root block. In addition, the blocks contained in the sealing // segment, such as b2 should be stored in the state. - util.RunWithFollowerProtocolState(t, multipleBlockSnapshot, func(db *badger.DB, state *bprotocol.FollowerState) { - localStateRootBlock, err := state.Params().Root() - require.NoError(t, err) + util.RunWithFollowerProtocolState(t, multipleBlockSnapshot, func(db storage.DB, state *bprotocol.FollowerState) { + localStateRootBlock := state.Params().FinalizedRoot() assert.Equal(t, b3.ID(), localStateRootBlock.ID()) // verify that b2 is known to the protocol state, but constructing a sealing segment fails @@ -711,15 +963,21 @@ func TestSealingSegment_FailureCases(t *testing.T) { // SCENARIO 2a: A pending block is chosen as head; at this height no block has been finalized. t.Run("sealing segment from unfinalized, pending block", func(t *testing.T) { - util.RunWithFollowerProtocolState(t, sporkRootSnapshot, func(db *badger.DB, state *bprotocol.FollowerState) { - // add _unfinalized_ blocks b1 and b2 to state (block b5 is necessary, so b1 has a QC, which is a consistency requirement for subsequent finality) - b1 := unittest.BlockWithParentFixture(sporkRoot) - b2 := unittest.BlockWithParentFixture(b1.Header) - require.NoError(t, state.ExtendCertified(context.Background(), b1, b2.Header.QuorumCertificate())) - require.NoError(t, state.ExtendCertified(context.Background(), b2, unittest.CertifyBlock(b2.Header))) // adding block b5 (providing required QC for b1) + util.RunWithFollowerProtocolState(t, sporkRootSnapshot, func(db storage.DB, state *bprotocol.FollowerState) { + // add _unfinalized_ blocks b1 and b2 to state (block b2 is necessary, so b1 has a QC, which is a consistency requirement for subsequent finality) + b1 := unittest.BlockWithParentAndPayload( + sporkRoot, + unittest.PayloadFixture(unittest.WithProtocolStateID(rootProtocolStateID)), + ) + b2 := unittest.BlockWithParentAndPayload( + b1.ToHeader(), + unittest.PayloadFixture(unittest.WithProtocolStateID(rootProtocolStateID)), + ) + require.NoError(t, state.ExtendCertified(context.Background(), unittest.CertifiedByChild(b1, b2))) + require.NoError(t, state.ExtendCertified(context.Background(), unittest.NewCertifiedBlock(b2))) // adding block b2 (providing required QC for b1) // consistency check: there should be no finalized block in the protocol state at height `b1.Height` - _, err := state.AtHeight(b1.Header.Height).Head() // expect statepkg.ErrUnknownSnapshotReference as only finalized blocks are indexed by height + _, err = state.AtHeight(b1.Height).Head() // expect statepkg.ErrUnknownSnapshotReference as only finalized blocks are indexed by height assert.ErrorIs(t, err, statepkg.ErrUnknownSnapshotReference) // requesting a sealing segment from block b1 should fail, as b1 is not yet finalized @@ -730,15 +988,29 @@ func TestSealingSegment_FailureCases(t *testing.T) { // SCENARIO 2b: An orphaned block is chosen as head; at this height a block other than the orphaned has been finalized. t.Run("sealing segment from orphaned block", func(t *testing.T) { - util.RunWithFollowerProtocolState(t, sporkRootSnapshot, func(db *badger.DB, state *bprotocol.FollowerState) { - orphaned := unittest.BlockWithParentFixture(sporkRoot) - orphanedChild := unittest.BlockWithParentFixture(orphaned.Header) - require.NoError(t, state.ExtendCertified(context.Background(), orphaned, orphanedChild.Header.QuorumCertificate())) - require.NoError(t, state.ExtendCertified(context.Background(), orphanedChild, unittest.CertifyBlock(orphanedChild.Header))) - buildFinalizedBlock(t, state, unittest.BlockWithParentFixture(sporkRoot)) + // In this test, we create two conflicting forks. To prevent accidentally creating byzantine scenarios, where + // multiple blocks have the same view, we keep track of used views and ensure that each new block has a unique view. + usedViews := make(map[uint64]struct{}) + usedViews[sporkRoot.View] = struct{}{} + + util.RunWithFollowerProtocolState(t, sporkRootSnapshot, func(db storage.DB, state *bprotocol.FollowerState) { + orphaned := unittest.BlockWithParentAndPayloadAndUniqueView( + sporkRoot, + unittest.PayloadFixture(unittest.WithProtocolStateID(rootProtocolStateID)), + usedViews, + ) + orphanedChild := unittest.BlockWithParentProtocolStateAndUniqueView(orphaned, usedViews) + require.NoError(t, state.ExtendCertified(context.Background(), unittest.CertifiedByChild(orphaned, orphanedChild))) + require.NoError(t, state.ExtendCertified(context.Background(), unittest.NewCertifiedBlock(orphanedChild))) + block := unittest.BlockWithParentAndPayloadAndUniqueView( + sporkRoot, + unittest.PayloadFixture(unittest.WithProtocolStateID(rootProtocolStateID)), + usedViews, + ) + buildFinalizedBlock(t, state, block) // consistency check: the finalized block at height `orphaned.Height` should be different than `orphaned` - h, err := state.AtHeight(orphaned.Header.Height).Head() + h, err := state.AtHeight(orphaned.Height).Head() require.NoError(t, err) require.NotEqual(t, h.ID(), orphaned.ID()) @@ -747,46 +1019,66 @@ func TestSealingSegment_FailureCases(t *testing.T) { assert.True(t, protocol.IsUnfinalizedSealingSegmentError(err)) }) }) - } // TestBootstrapSealingSegmentWithExtraBlocks test sealing segment where the segment blocks contain collection // guarantees referencing blocks prior to the sealing segment. After bootstrapping from sealing segment we should be able to -// extend with B7 with contains a guarantee referring B1. +// extend with B7 with contains a guarantee referencing B1. // ROOT <- B1 <- B2(R1) <- B3 <- B4(S1) <- B5 <- B6(S2) // Expected sealing segment: [B2, B3, B4, B5, B6], Extra blocks: [ROOT, B1] func TestBootstrapSealingSegmentWithExtraBlocks(t *testing.T) { identities := unittest.CompleteIdentitySet() rootSnapshot := unittest.RootSnapshotFixture(identities) - rootEpoch := rootSnapshot.Epochs().Current() + rootProtocolStateID := getRootProtocolStateID(t, rootSnapshot) + rootEpoch, err := rootSnapshot.Epochs().Current() + require.NoError(t, err) cluster, err := rootEpoch.Cluster(0) require.NoError(t, err) collID := cluster.Members()[0].NodeID head, err := rootSnapshot.Head() require.NoError(t, err) - util.RunWithFullProtocolState(t, rootSnapshot, func(db *badger.DB, state *bprotocol.ParticipantState) { - block1 := unittest.BlockWithParentFixture(head) + util.RunWithFullProtocolState(t, rootSnapshot, func(db storage.DB, state *bprotocol.ParticipantState) { + block1 := unittest.BlockWithParentAndPayload( + head, + unittest.PayloadFixture(unittest.WithProtocolStateID(rootProtocolStateID)), + ) buildFinalizedBlock(t, state, block1) receipt1, seal1 := unittest.ReceiptAndSealForBlock(block1) - block2 := unittest.BlockWithParentFixture(block1.Header) - block2.SetPayload(unittest.PayloadFixture(unittest.WithReceipts(receipt1))) + block2 := unittest.BlockWithParentAndPayload( + block1.ToHeader(), + unittest.PayloadFixture( + unittest.WithReceipts(receipt1), + unittest.WithProtocolStateID(rootProtocolStateID), + ), + ) buildFinalizedBlock(t, state, block2) receipt2, seal2 := unittest.ReceiptAndSealForBlock(block2) - block3 := unittest.BlockWithParentFixture(block2.Header) + block3 := unittest.BlockWithParentProtocolState(block2) buildFinalizedBlock(t, state, block3) - block4 := unittest.BlockWithParentFixture(block3.Header) - block4.SetPayload(unittest.PayloadFixture(unittest.WithReceipts(receipt2), unittest.WithSeals(seal1))) + block4 := unittest.BlockWithParentAndPayload( + block3.ToHeader(), + unittest.PayloadFixture( + unittest.WithReceipts(receipt2), + unittest.WithSeals(seal1), + unittest.WithProtocolStateID(rootProtocolStateID), + ), + ) buildFinalizedBlock(t, state, block4) - block5 := unittest.BlockWithParentFixture(block4.Header) + block5 := unittest.BlockWithParentProtocolState(block4) buildFinalizedBlock(t, state, block5) - block6 := unittest.BlockWithParentFixture(block5.Header) - block6.SetPayload(unittest.PayloadFixture(unittest.WithSeals(seal2))) + block6 := unittest.BlockWithParentAndPayload( + block5.ToHeader(), + unittest.PayloadFixture( + unittest.WithSeals(seal2), + unittest.WithProtocolStateID(rootProtocolStateID), + ), + ) buildFinalizedBlock(t, state, block6) snapshot := state.AtBlockID(block6.ID()) @@ -794,28 +1086,32 @@ func TestBootstrapSealingSegmentWithExtraBlocks(t *testing.T) { require.NoError(t, err) // build a valid child to ensure we have a QC - buildBlock(t, state, unittest.BlockWithParentFixture(block6.Header)) + buildBlock(t, state, unittest.BlockWithParentProtocolState(block6)) // sealing segment should be [B2, B3, B4, B5, B6] require.Len(t, segment.Blocks, 5) - unittest.AssertEqualBlocksLenAndOrder(t, []*flow.Block{block2, block3, block4, block5, block6}, segment.Blocks) - unittest.AssertEqualBlocksLenAndOrder(t, []*flow.Block{block1}, segment.ExtraBlocks[1:]) + unittest.AssertEqualBlockSequences(t, []*flow.Block{block2, block3, block4, block5, block6}, segment.Blocks) + unittest.AssertEqualBlockSequences(t, []*flow.Block{block1}, segment.ExtraBlocks[1:]) require.Len(t, segment.ExecutionResults, 1) assertSealingSegmentBlocksQueryableAfterBootstrap(t, snapshot) // bootstrap from snapshot - util.RunWithFullProtocolState(t, snapshot, func(db *badger.DB, state *bprotocol.ParticipantState) { - block7 := unittest.BlockWithParentFixture(block6.Header) + util.RunWithFullProtocolState(t, snapshot, func(db storage.DB, state *bprotocol.ParticipantState) { guarantee := unittest.CollectionGuaranteeFixture(unittest.WithCollRef(block1.ID())) - guarantee.ChainID = cluster.ChainID() + guarantee.ClusterChainID = cluster.ChainID() signerIndices, err := signature.EncodeSignersToIndices( []flow.Identifier{collID}, []flow.Identifier{collID}) require.NoError(t, err) guarantee.SignerIndices = signerIndices - - block7.SetPayload(unittest.PayloadFixture(unittest.WithGuarantees(guarantee))) + block7 := unittest.BlockWithParentAndPayload( + block6.ToHeader(), + unittest.PayloadFixture( + unittest.WithGuarantees(guarantee), + unittest.WithProtocolStateID(block6.Payload.ProtocolStateID), + ), + ) buildBlock(t, state, block7) }) }) @@ -824,15 +1120,16 @@ func TestBootstrapSealingSegmentWithExtraBlocks(t *testing.T) { func TestLatestSealedResult(t *testing.T) { identities := unittest.CompleteIdentitySet() rootSnapshot := unittest.RootSnapshotFixture(identities) + rootProtocolStateID := getRootProtocolStateID(t, rootSnapshot) t.Run("root snapshot", func(t *testing.T) { - util.RunWithFollowerProtocolState(t, rootSnapshot, func(db *badger.DB, state *bprotocol.FollowerState) { + util.RunWithFollowerProtocolState(t, rootSnapshot, func(db storage.DB, state *bprotocol.FollowerState) { gotResult, gotSeal, err := state.Final().SealedResult() require.NoError(t, err) expectedResult, expectedSeal, err := rootSnapshot.SealedResult() require.NoError(t, err) - assert.Equal(t, expectedResult, gotResult) + assert.Equal(t, expectedResult.ID(), gotResult.ID()) assert.Equal(t, expectedSeal, gotSeal) }) }) @@ -841,34 +1138,51 @@ func TestLatestSealedResult(t *testing.T) { head, err := rootSnapshot.Head() require.NoError(t, err) - util.RunWithFollowerProtocolState(t, rootSnapshot, func(db *badger.DB, state *bprotocol.FollowerState) { - block1 := unittest.BlockWithParentFixture(head) - - block2 := unittest.BlockWithParentFixture(block1.Header) - + util.RunWithFollowerProtocolState(t, rootSnapshot, func(db storage.DB, state *bprotocol.FollowerState) { + block1 := unittest.BlockWithParentAndPayload( + head, + unittest.PayloadFixture(unittest.WithProtocolStateID(rootProtocolStateID)), + ) receipt1, seal1 := unittest.ReceiptAndSealForBlock(block1) - block2.SetPayload(unittest.PayloadFixture(unittest.WithReceipts(receipt1))) - block3 := unittest.BlockWithParentFixture(block2.Header) - block3.SetPayload(unittest.PayloadFixture(unittest.WithSeals(seal1))) + + block2 := unittest.BlockWithParentAndPayload( + block1.ToHeader(), + unittest.PayloadFixture( + unittest.WithReceipts(receipt1), + unittest.WithProtocolStateID(rootProtocolStateID), + ), + ) + block3 := unittest.BlockWithParentAndPayload( + block2.ToHeader(), + unittest.PayloadFixture( + unittest.WithSeals(seal1), + unittest.WithProtocolStateID(rootProtocolStateID)), + ) receipt2, seal2 := unittest.ReceiptAndSealForBlock(block2) receipt3, seal3 := unittest.ReceiptAndSealForBlock(block3) - block4 := unittest.BlockWithParentFixture(block3.Header) - block4.SetPayload(unittest.PayloadFixture( - unittest.WithReceipts(receipt2, receipt3), - )) - block5 := unittest.BlockWithParentFixture(block4.Header) - block5.SetPayload(unittest.PayloadFixture( - unittest.WithSeals(seal2, seal3), - )) - - err = state.ExtendCertified(context.Background(), block1, block2.Header.QuorumCertificate()) + block4 := unittest.BlockWithParentAndPayload( + block3.ToHeader(), + unittest.PayloadFixture( + unittest.WithReceipts(receipt2, receipt3), + unittest.WithProtocolStateID(rootProtocolStateID), + ), + ) + block5 := unittest.BlockWithParentAndPayload( + block4.ToHeader(), + unittest.PayloadFixture( + unittest.WithSeals(seal2, seal3), + unittest.WithProtocolStateID(rootProtocolStateID), + ), + ) + + err = state.ExtendCertified(context.Background(), unittest.CertifiedByChild(block1, block2)) require.NoError(t, err) - err = state.ExtendCertified(context.Background(), block2, block3.Header.QuorumCertificate()) + err = state.ExtendCertified(context.Background(), unittest.CertifiedByChild(block2, block3)) require.NoError(t, err) - err = state.ExtendCertified(context.Background(), block3, block4.Header.QuorumCertificate()) + err = state.ExtendCertified(context.Background(), unittest.CertifiedByChild(block3, block4)) require.NoError(t, err) // B1 <- B2(R1) <- B3(S1) @@ -880,7 +1194,7 @@ func TestLatestSealedResult(t *testing.T) { assert.Equal(t, block3.Payload.Seals[0], gotSeal) }) - err = state.ExtendCertified(context.Background(), block4, block5.Header.QuorumCertificate()) + err = state.ExtendCertified(context.Background(), unittest.CertifiedByChild(block4, block5)) require.NoError(t, err) // B1 <- B2(S1) <- B3(S1) <- B4(R2,R3) @@ -895,7 +1209,7 @@ func TestLatestSealedResult(t *testing.T) { // B1 <- B2(R1) <- B3(S1) <- B4(R2,R3) <- B5(S2,S3) // There are two seals in B5 - should return latest by height (S3,R3) t.Run("reference block contains multiple seals", func(t *testing.T) { - err = state.ExtendCertified(context.Background(), block5, unittest.CertifyBlock(block5.Header)) + err = state.ExtendCertified(context.Background(), unittest.NewCertifiedBlock(block5)) require.NoError(t, err) gotResult, gotSeal, err := state.AtBlockID(block5.ID()).SealedResult() @@ -911,17 +1225,20 @@ func TestLatestSealedResult(t *testing.T) { func TestQuorumCertificate(t *testing.T) { identities := unittest.IdentityListFixture(5, unittest.WithAllRoles()) rootSnapshot := unittest.RootSnapshotFixture(identities) + rootProtocolStateID := getRootProtocolStateID(t, rootSnapshot) head, err := rootSnapshot.Head() require.NoError(t, err) // should not be able to get QC or random beacon seed from a block with no children t.Run("no QC available", func(t *testing.T) { - util.RunWithFullProtocolState(t, rootSnapshot, func(db *badger.DB, state *bprotocol.ParticipantState) { + util.RunWithFullProtocolState(t, rootSnapshot, func(db storage.DB, state *bprotocol.ParticipantState) { // create a block to query - block1 := unittest.BlockWithParentFixture(head) - block1.SetPayload(flow.EmptyPayload()) - err := state.Extend(context.Background(), block1) + block1 := unittest.BlockWithParentAndPayload( + head, + unittest.PayloadFixture(unittest.WithProtocolStateID(rootProtocolStateID)), + ) + err := state.Extend(context.Background(), unittest.ProposalFromBlock(block1)) require.NoError(t, err) _, err = state.AtBlockID(block1.ID()).QuorumCertificate() @@ -934,25 +1251,28 @@ func TestQuorumCertificate(t *testing.T) { // should be able to get QC and random beacon seed from root block t.Run("root block", func(t *testing.T) { - util.RunWithFollowerProtocolState(t, rootSnapshot, func(db *badger.DB, state *bprotocol.FollowerState) { + util.RunWithFollowerProtocolState(t, rootSnapshot, func(db storage.DB, state *bprotocol.FollowerState) { // since we bootstrap with a root snapshot, this will be the root block _, err := state.AtBlockID(head.ID()).QuorumCertificate() assert.NoError(t, err) randomSeed, err := state.AtBlockID(head.ID()).RandomSource() assert.NoError(t, err) - assert.Equal(t, len(randomSeed), seed.RandomSourceLength) + assert.Equal(t, len(randomSeed), prg.RandomSourceLength) }) }) // should be able to get QC and random beacon seed from a certified block t.Run("follower-block-processable", func(t *testing.T) { - util.RunWithFollowerProtocolState(t, rootSnapshot, func(db *badger.DB, state *bprotocol.FollowerState) { + util.RunWithFollowerProtocolState(t, rootSnapshot, func(db storage.DB, state *bprotocol.FollowerState) { // add a block so we aren't testing against root - block1 := unittest.BlockWithParentFixture(head) - block1.SetPayload(flow.EmptyPayload()) - certifyingQC := unittest.CertifyBlock(block1.Header) - err := state.ExtendCertified(context.Background(), block1, certifyingQC) + block1 := unittest.BlockWithParentAndPayload( + head, + unittest.PayloadFixture(unittest.WithProtocolStateID(rootProtocolStateID)), + ) + certified := unittest.NewCertifiedBlock(block1) + certifyingQC := certified.CertifyingQC + err := state.ExtendCertified(context.Background(), certified) require.NoError(t, err) // should be able to get QC/seed @@ -961,7 +1281,7 @@ func TestQuorumCertificate(t *testing.T) { assert.Equal(t, certifyingQC.SignerIndices, qc.SignerIndices) assert.Equal(t, certifyingQC.SigData, qc.SigData) - assert.Equal(t, block1.Header.View, qc.View) + assert.Equal(t, block1.View, qc.View) _, err = state.AtBlockID(block1.ID()).RandomSource() require.NoError(t, err) @@ -970,26 +1290,27 @@ func TestQuorumCertificate(t *testing.T) { // should be able to get QC and random beacon seed from a block with child(has to be certified) t.Run("participant-block-processable", func(t *testing.T) { - util.RunWithFullProtocolState(t, rootSnapshot, func(db *badger.DB, state *bprotocol.ParticipantState) { + util.RunWithFullProtocolState(t, rootSnapshot, func(db storage.DB, state *bprotocol.ParticipantState) { // create a block to query - block1 := unittest.BlockWithParentFixture(head) - block1.SetPayload(flow.EmptyPayload()) - err := state.Extend(context.Background(), block1) + block1 := unittest.BlockWithParentAndPayload( + head, + unittest.PayloadFixture(unittest.WithProtocolStateID(rootProtocolStateID)), + ) + err := state.Extend(context.Background(), unittest.ProposalFromBlock(block1)) require.NoError(t, err) _, err = state.AtBlockID(block1.ID()).QuorumCertificate() assert.ErrorIs(t, err, storage.ErrNotFound) - block2 := unittest.BlockWithParentFixture(block1.Header) - block2.SetPayload(flow.EmptyPayload()) - err = state.Extend(context.Background(), block2) + block2 := unittest.BlockWithParentProtocolState(block1) + err = state.Extend(context.Background(), unittest.ProposalFromBlock(block2)) require.NoError(t, err) qc, err := state.AtBlockID(block1.ID()).QuorumCertificate() require.NoError(t, err) // should have view matching block1 view - assert.Equal(t, block1.Header.View, qc.View) + assert.Equal(t, block1.View, qc.View) assert.Equal(t, block1.ID(), qc.BlockID) }) }) @@ -1002,11 +1323,11 @@ func TestSnapshot_EpochQuery(t *testing.T) { result, _, err := rootSnapshot.SealedResult() require.NoError(t, err) - util.RunWithFullProtocolState(t, rootSnapshot, func(db *badger.DB, state *bprotocol.ParticipantState) { + util.RunWithFullProtocolStateAndMutator(t, rootSnapshot, func(db storage.DB, state *bprotocol.ParticipantState, mutableState protocol.MutableProtocolState) { epoch1Counter := result.ServiceEvents[0].Event.(*flow.EpochSetup).Counter epoch2Counter := epoch1Counter + 1 - epochBuilder := unittest.NewEpochBuilder(t, state) + epochBuilder := unittest.NewEpochBuilder(t, mutableState, state) // build epoch 1 (prepare epoch 2) epochBuilder. BuildEpoch(). @@ -1026,17 +1347,17 @@ func TestSnapshot_EpochQuery(t *testing.T) { t.Run("Current", func(t *testing.T) { t.Run("epoch 1", func(t *testing.T) { for _, height := range epoch1.Range() { - counter, err := state.AtHeight(height).Epochs().Current().Counter() + currentEpoch, err := state.AtHeight(height).Epochs().Current() require.NoError(t, err) - assert.Equal(t, epoch1Counter, counter) + assert.Equal(t, epoch1Counter, currentEpoch.Counter()) } }) t.Run("epoch 2", func(t *testing.T) { for _, height := range epoch2.Range() { - counter, err := state.AtHeight(height).Epochs().Current().Counter() + currentEpoch, err := state.AtHeight(height).Epochs().Current() require.NoError(t, err) - assert.Equal(t, epoch2Counter, counter) + assert.Equal(t, epoch2Counter, currentEpoch.Counter()) } }) }) @@ -1046,17 +1367,31 @@ func TestSnapshot_EpochQuery(t *testing.T) { t.Run("Next", func(t *testing.T) { t.Run("epoch 1: before next epoch available", func(t *testing.T) { for _, height := range epoch1.StakingRange() { - _, err := state.AtHeight(height).Epochs().Next().Counter() - assert.Error(t, err) - assert.True(t, errors.Is(err, protocol.ErrNextEpochNotSetup)) + _, err := state.AtHeight(height).Epochs().NextUnsafe() + assert.ErrorIs(t, err, protocol.ErrNextEpochNotSetup) + _, err = state.AtHeight(height).Epochs().NextCommitted() + assert.ErrorIs(t, err, protocol.ErrNextEpochNotCommitted) } }) t.Run("epoch 2: after next epoch available", func(t *testing.T) { - for _, height := range append(epoch1.SetupRange(), epoch1.CommittedRange()...) { - counter, err := state.AtHeight(height).Epochs().Next().Counter() + for _, height := range epoch1.SetupRange() { + // Tentative epoch is available + nextSetup, err := state.AtHeight(height).Epochs().NextUnsafe() require.NoError(t, err) - assert.Equal(t, epoch2Counter, counter) + assert.Equal(t, epoch2Counter, nextSetup.Counter()) + // Committed epoch is not available + _, err = state.AtHeight(height).Epochs().NextCommitted() + require.ErrorIs(t, err, protocol.ErrNextEpochNotCommitted) + } + for _, height := range epoch1.CommittedRange() { + // Tentative epoch is not available + _, err := state.AtHeight(height).Epochs().NextUnsafe() + require.ErrorIs(t, err, protocol.ErrNextEpochAlreadyCommitted) + // Committed epoch is available + nextCommitted, err := state.AtHeight(height).Epochs().NextCommitted() + require.NoError(t, err) + assert.Equal(t, epoch2Counter, nextCommitted.Counter()) } }) }) @@ -1067,17 +1402,16 @@ func TestSnapshot_EpochQuery(t *testing.T) { t.Run("Previous", func(t *testing.T) { t.Run("epoch 1", func(t *testing.T) { for _, height := range epoch1.Range() { - _, err := state.AtHeight(height).Epochs().Previous().Counter() - assert.Error(t, err) - assert.True(t, errors.Is(err, protocol.ErrNoPreviousEpoch)) + _, err := state.AtHeight(height).Epochs().Previous() + assert.ErrorIs(t, err, protocol.ErrNoPreviousEpoch) } }) t.Run("epoch 2", func(t *testing.T) { for _, height := range epoch2.Range() { - counter, err := state.AtHeight(height).Epochs().Previous().Counter() + previousEpoch, err := state.AtHeight(height).Epochs().Previous() require.NoError(t, err) - assert.Equal(t, epoch1Counter, counter) + assert.Equal(t, epoch1Counter, previousEpoch.Counter()) } }) }) @@ -1093,9 +1427,9 @@ func TestSnapshot_EpochFirstView(t *testing.T) { result, _, err := rootSnapshot.SealedResult() require.NoError(t, err) - util.RunWithFullProtocolState(t, rootSnapshot, func(db *badger.DB, state *bprotocol.ParticipantState) { + util.RunWithFullProtocolStateAndMutator(t, rootSnapshot, func(db storage.DB, state *bprotocol.ParticipantState, mutableState protocol.MutableProtocolState) { - epochBuilder := unittest.NewEpochBuilder(t, state) + epochBuilder := unittest.NewEpochBuilder(t, mutableState, state) // build epoch 1 (prepare epoch 2) epochBuilder. BuildEpoch(). @@ -1122,18 +1456,18 @@ func TestSnapshot_EpochFirstView(t *testing.T) { // test w.r.t. epoch 1 snapshot t.Run("Current", func(t *testing.T) { for _, height := range epoch1.Range() { - actualFirstView, err := state.AtHeight(height).Epochs().Current().FirstView() + currentEpoch, err := state.AtHeight(height).Epochs().Current() require.NoError(t, err) - assert.Equal(t, epoch1FirstView, actualFirstView) + assert.Equal(t, epoch1FirstView, currentEpoch.FirstView()) } }) // test w.r.t. epoch 2 snapshot t.Run("Previous", func(t *testing.T) { for _, height := range epoch2.Range() { - actualFirstView, err := state.AtHeight(height).Epochs().Previous().FirstView() + previousEpoch, err := state.AtHeight(height).Epochs().Previous() require.NoError(t, err) - assert.Equal(t, epoch1FirstView, actualFirstView) + assert.Equal(t, epoch1FirstView, previousEpoch.FirstView()) } }) }) @@ -1144,19 +1478,19 @@ func TestSnapshot_EpochFirstView(t *testing.T) { // test w.r.t. epoch 1 snapshot t.Run("Next", func(t *testing.T) { - for _, height := range append(epoch1.SetupRange(), epoch1.CommittedRange()...) { - actualFirstView, err := state.AtHeight(height).Epochs().Next().FirstView() + for _, height := range epoch1.CommittedRange() { + nextCommitted, err := state.AtHeight(height).Epochs().NextCommitted() require.NoError(t, err) - assert.Equal(t, epoch2FirstView, actualFirstView) + assert.Equal(t, epoch2FirstView, nextCommitted.FirstView()) } }) // test w.r.t. epoch 2 snapshot t.Run("Current", func(t *testing.T) { for _, height := range epoch2.Range() { - actualFirstView, err := state.AtHeight(height).Epochs().Current().FirstView() + currentEpoch, err := state.AtHeight(height).Epochs().Current() require.NoError(t, err) - assert.Equal(t, epoch2FirstView, actualFirstView) + assert.Equal(t, epoch2FirstView, currentEpoch.FirstView()) } }) }) @@ -1165,46 +1499,53 @@ func TestSnapshot_EpochFirstView(t *testing.T) { // TestSnapshot_EpochHeightBoundaries tests querying epoch height boundaries in various conditions. // - FirstHeight should be queryable as soon as the epoch's first block is finalized, -// otherwise should return protocol.ErrEpochTransitionNotFinalized +// otherwise should return protocol.ErrUnknownEpochBoundary // - FinalHeight should be queryable as soon as the next epoch's first block is finalized, -// otherwise should return protocol.ErrEpochTransitionNotFinalized +// otherwise should return protocol.ErrUnknownEpochBoundary func TestSnapshot_EpochHeightBoundaries(t *testing.T) { identities := unittest.CompleteIdentitySet() rootSnapshot := unittest.RootSnapshotFixture(identities) head, err := rootSnapshot.Head() require.NoError(t, err) - util.RunWithFullProtocolState(t, rootSnapshot, func(db *badger.DB, state *bprotocol.ParticipantState) { + util.RunWithFullProtocolStateAndMutator(t, rootSnapshot, func(db storage.DB, state *bprotocol.ParticipantState, mutableState protocol.MutableProtocolState) { - epochBuilder := unittest.NewEpochBuilder(t, state) + epochBuilder := unittest.NewEpochBuilder(t, mutableState, state) epoch1FirstHeight := head.Height t.Run("first epoch - EpochStaking phase", func(t *testing.T) { + currentEpoch, err := state.Final().Epochs().Current() + require.NoError(t, err) // first height of started current epoch should be known - firstHeight, err := state.Final().Epochs().Current().FirstHeight() + firstHeight, err := currentEpoch.FirstHeight() require.NoError(t, err) assert.Equal(t, epoch1FirstHeight, firstHeight) // final height of not completed current epoch should be unknown - _, err = state.Final().Epochs().Current().FinalHeight() - assert.ErrorIs(t, err, protocol.ErrEpochTransitionNotFinalized) + _, err = currentEpoch.FinalHeight() + assert.ErrorIs(t, err, protocol.ErrUnknownEpochBoundary) }) // build first epoch (but don't complete it yet) epochBuilder.BuildEpoch() t.Run("first epoch - EpochCommitted phase", func(t *testing.T) { + finalSnap := state.Final() + currentEpoch, err := finalSnap.Epochs().Current() + require.NoError(t, err) + nextEpoch, err := finalSnap.Epochs().NextCommitted() + require.NoError(t, err) // first height of started current epoch should be known - firstHeight, err := state.Final().Epochs().Current().FirstHeight() + firstHeight, err := currentEpoch.FirstHeight() require.NoError(t, err) assert.Equal(t, epoch1FirstHeight, firstHeight) // final height of not completed current epoch should be unknown - _, err = state.Final().Epochs().Current().FinalHeight() - assert.ErrorIs(t, err, protocol.ErrEpochTransitionNotFinalized) + _, err = currentEpoch.FinalHeight() + assert.ErrorIs(t, err, protocol.ErrUnknownEpochBoundary) // first and final height of not started next epoch should be unknown - _, err = state.Final().Epochs().Next().FirstHeight() - assert.ErrorIs(t, err, protocol.ErrEpochTransitionNotFinalized) - _, err = state.Final().Epochs().Next().FinalHeight() - assert.ErrorIs(t, err, protocol.ErrEpochTransitionNotFinalized) + _, err = nextEpoch.FirstHeight() + assert.ErrorIs(t, err, protocol.ErrUnknownEpochBoundary) + _, err = nextEpoch.FinalHeight() + assert.ErrorIs(t, err, protocol.ErrUnknownEpochBoundary) }) // complete epoch 1 (enter epoch 2) @@ -1215,21 +1556,26 @@ func TestSnapshot_EpochHeightBoundaries(t *testing.T) { epoch2FirstHeight := epoch1FinalHeight + 1 t.Run("second epoch - EpochStaking phase", func(t *testing.T) { + finalSnap := state.Final() + previousEpoch, err := finalSnap.Epochs().Previous() + require.NoError(t, err) // first and final height of completed previous epoch should be known - firstHeight, err := state.Final().Epochs().Previous().FirstHeight() + firstHeight, err := previousEpoch.FirstHeight() require.NoError(t, err) assert.Equal(t, epoch1FirstHeight, firstHeight) - finalHeight, err := state.Final().Epochs().Previous().FinalHeight() + finalHeight, err := previousEpoch.FinalHeight() require.NoError(t, err) assert.Equal(t, epoch1FinalHeight, finalHeight) + currentEpoch, err := finalSnap.Epochs().Current() + require.NoError(t, err) // first height of started current epoch should be known - firstHeight, err = state.Final().Epochs().Current().FirstHeight() + firstHeight, err = currentEpoch.FirstHeight() require.NoError(t, err) assert.Equal(t, epoch2FirstHeight, firstHeight) // final height of not completed current epoch should be unknown - _, err = state.Final().Epochs().Current().FinalHeight() - assert.ErrorIs(t, err, protocol.ErrEpochTransitionNotFinalized) + _, err = currentEpoch.FinalHeight() + assert.ErrorIs(t, err, protocol.ErrUnknownEpochBoundary) }) }) } @@ -1246,26 +1592,26 @@ func TestSnapshot_CrossEpochIdentities(t *testing.T) { // 1 identity added at epoch 2 that was not present in epoch 1 addedAtEpoch2 := unittest.IdentityFixture() // 1 identity removed in epoch 2 that was present in epoch 1 - removedAtEpoch2 := epoch1Identities.Sample(1)[0] + removedAtEpoch2 := epoch1Identities[rand.Intn(len(epoch1Identities))] // epoch 2 has partial overlap with epoch 1 epoch2Identities := append( - epoch1Identities.Filter(filter.Not(filter.HasNodeID(removedAtEpoch2.NodeID))), + epoch1Identities.Filter(filter.Not(filter.HasNodeID[flow.Identity](removedAtEpoch2.NodeID))), addedAtEpoch2) // epoch 3 has no overlap with epoch 2 epoch3Identities := unittest.IdentityListFixture(10, unittest.WithAllRoles()) rootSnapshot := unittest.RootSnapshotFixture(epoch1Identities) - util.RunWithFullProtocolState(t, rootSnapshot, func(db *badger.DB, state *bprotocol.ParticipantState) { + util.RunWithFullProtocolStateAndMutator(t, rootSnapshot, func(db storage.DB, state *bprotocol.ParticipantState, mutableState protocol.MutableProtocolState) { - epochBuilder := unittest.NewEpochBuilder(t, state) + epochBuilder := unittest.NewEpochBuilder(t, mutableState, state) // build epoch 1 (prepare epoch 2) epochBuilder. - UsingSetupOpts(unittest.WithParticipants(epoch2Identities)). + UsingSetupOpts(unittest.WithParticipants(epoch2Identities.ToSkeleton())). BuildEpoch(). CompleteEpoch() // build epoch 2 (prepare epoch 3) epochBuilder. - UsingSetupOpts(unittest.WithParticipants(epoch3Identities)). + UsingSetupOpts(unittest.WithParticipants(epoch3Identities.ToSkeleton())). BuildEpoch(). CompleteEpoch() @@ -1276,8 +1622,7 @@ func TestSnapshot_CrossEpochIdentities(t *testing.T) { require.True(t, ok) t.Run("should be able to query at root block", func(t *testing.T) { - root, err := state.Params().Root() - require.NoError(t, err) + root := state.Params().FinalizedRoot() snapshot := state.AtHeight(root.Height) identities, err := snapshot.Identities(filter.Any) require.NoError(t, err) @@ -1294,7 +1639,7 @@ func TestSnapshot_CrossEpochIdentities(t *testing.T) { snapshots := []protocol.Snapshot{state.AtHeight(epoch1.Setup), state.AtHeight(epoch1.Committed)} for _, snapshot := range snapshots { - phase, err := snapshot.Phase() + phase, err := snapshot.EpochPhase() require.NoError(t, err) t.Run("phase: "+phase.String(), func(t *testing.T) { @@ -1306,11 +1651,12 @@ func TestSnapshot_CrossEpochIdentities(t *testing.T) { // all current epoch identities should match configuration from EpochSetup event assert.ElementsMatch(t, epoch1Identities, identities.Filter(epoch1Identities.Selector())) - // should contain single next epoch identity with 0 weight - nextEpochIdentity := identities.Filter(filter.HasNodeID(addedAtEpoch2.NodeID))[0] - assert.Equal(t, uint64(0), nextEpochIdentity.Weight) // should have 0 weight - nextEpochIdentity.Weight = addedAtEpoch2.Weight - assert.Equal(t, addedAtEpoch2, nextEpochIdentity) // should be equal besides weight + // should contain single identity for next epoch with status `flow.EpochParticipationStatusJoining` + nextEpochIdentity := identities.Filter(filter.HasNodeID[flow.Identity](addedAtEpoch2.NodeID))[0] + assert.Equal(t, flow.EpochParticipationStatusJoining, nextEpochIdentity.EpochParticipationStatus, + "expect joining status since we are in setup & commit phase") + assert.Equal(t, addedAtEpoch2.IdentitySkeleton, nextEpochIdentity.IdentitySkeleton, + "expect skeleton to be identical") }) } }) @@ -1327,11 +1673,12 @@ func TestSnapshot_CrossEpochIdentities(t *testing.T) { // all current epoch identities should match configuration from EpochSetup event assert.ElementsMatch(t, epoch2Identities, identities.Filter(epoch2Identities.Selector())) - // should contain single previous epoch identity with 0 weight - lastEpochIdentity := identities.Filter(filter.HasNodeID(removedAtEpoch2.NodeID))[0] - assert.Equal(t, uint64(0), lastEpochIdentity.Weight) // should have 0 weight - lastEpochIdentity.Weight = removedAtEpoch2.Weight // overwrite weight - assert.Equal(t, removedAtEpoch2, lastEpochIdentity) // should be equal besides weight + // should contain single identity from previous epoch with status `flow.EpochParticipationStatusLeaving` + lastEpochIdentity := identities.Filter(filter.HasNodeID[flow.Identity](removedAtEpoch2.NodeID))[0] + assert.Equal(t, flow.EpochParticipationStatusLeaving, lastEpochIdentity.EpochParticipationStatus, + "expect leaving status since we are in staking phase") + assert.Equal(t, removedAtEpoch2.IdentitySkeleton, lastEpochIdentity.IdentitySkeleton, + "expect skeleton to be identical") }) t.Run("should not include previous epoch after staking phase", func(t *testing.T) { @@ -1340,7 +1687,7 @@ func TestSnapshot_CrossEpochIdentities(t *testing.T) { snapshots := []protocol.Snapshot{state.AtHeight(epoch2.Setup), state.AtHeight(epoch2.Committed)} for _, snapshot := range snapshots { - phase, err := snapshot.Phase() + phase, err := snapshot.EpochPhase() require.NoError(t, err) t.Run("phase: "+phase.String(), func(t *testing.T) { @@ -1352,13 +1699,14 @@ func TestSnapshot_CrossEpochIdentities(t *testing.T) { // all current epoch identities should match configuration from EpochSetup event assert.ElementsMatch(t, epoch2Identities, identities.Filter(epoch2Identities.Selector())) - // should contain next epoch identities with 0 weight + // should contain next epoch's identities with status `flow.EpochParticipationStatusJoining` for _, expected := range epoch3Identities { actual, exists := identities.ByNodeID(expected.NodeID) require.True(t, exists) - assert.Equal(t, uint64(0), actual.Weight) // should have 0 weight - actual.Weight = expected.Weight // overwrite weight - assert.Equal(t, expected, actual) // should be equal besides weight + assert.Equal(t, flow.EpochParticipationStatusJoining, actual.EpochParticipationStatus, + "expect joining status since we are in setup & commit phase") + assert.Equal(t, expected.IdentitySkeleton, actual.IdentitySkeleton, + "expect skeleton to be identical") } }) } @@ -1371,14 +1719,14 @@ func TestSnapshot_CrossEpochIdentities(t *testing.T) { func TestSnapshot_PostSporkIdentities(t *testing.T) { expected := unittest.CompleteIdentitySet() root, result, seal := unittest.BootstrapFixture(expected, func(block *flow.Block) { - block.Header.ParentID = unittest.IdentifierFixture() + block.ParentID = unittest.IdentifierFixture() }) qc := unittest.QuorumCertificateFixture(unittest.QCWithRootBlockID(root.ID())) - rootSnapshot, err := inmem.SnapshotFromBootstrapState(root, result, seal, qc) + rootSnapshot, err := unittest.SnapshotFromBootstrapState(root, result, seal, qc) require.NoError(t, err) - util.RunWithBootstrapState(t, rootSnapshot, func(db *badger.DB, state *bprotocol.State) { + util.RunWithBootstrapState(t, rootSnapshot, func(db storage.DB, state *bprotocol.State) { actual, err := state.Final().Identities(filter.Any) require.NoError(t, err) assert.ElementsMatch(t, expected, actual) diff --git a/state/protocol/badger/state.go b/state/protocol/badger/state.go index da8b955e7f2..c28539f3ea0 100644 --- a/state/protocol/badger/state.go +++ b/state/protocol/badger/state.go @@ -1,5 +1,3 @@ -// (c) 2019 Dapper Labs - ALL RIGHTS RESERVED - package badger import ( @@ -7,54 +5,67 @@ import ( "fmt" "sync/atomic" - "github.com/dgraph-io/badger/v2" + "github.com/jordanschalm/lockctx" "github.com/onflow/flow-go/consensus/hotstuff" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/module" statepkg "github.com/onflow/flow-go/state" "github.com/onflow/flow-go/state/protocol" + "github.com/onflow/flow-go/state/protocol/datastore" + "github.com/onflow/flow-go/state/protocol/inmem" "github.com/onflow/flow-go/state/protocol/invalid" + protocol_state "github.com/onflow/flow-go/state/protocol/protocol_state/state" "github.com/onflow/flow-go/storage" - "github.com/onflow/flow-go/storage/badger/operation" - "github.com/onflow/flow-go/storage/badger/transaction" + "github.com/onflow/flow-go/storage/operation" ) -// cachedHeader caches a block header and its ID. -type cachedHeader struct { - id flow.Identifier - header *flow.Header +// cachedLatest caches both latest finalized and sealed block +// since finalized block and sealed block are updated together atomically, +// we can cache them together +type cachedLatest struct { + finalizedID flow.Identifier + finalizedHeader *flow.Header + sealedID flow.Identifier + sealedHeader *flow.Header } type State struct { - metrics module.ComplianceMetrics - db *badger.DB - headers storage.Headers - blocks storage.Blocks - qcs storage.QuorumCertificates - results storage.ExecutionResults - seals storage.Seals - epoch struct { - setups storage.EpochSetups - commits storage.EpochCommits - statuses storage.EpochStatuses - } - versionBeacons storage.VersionBeacons - - // rootHeight marks the cutoff of the history this node knows about. We cache it in the state + metrics module.ComplianceMetrics + db storage.DB + lockManager lockctx.Manager + headers storage.Headers + blocks storage.Blocks + qcs storage.QuorumCertificates + results storage.ExecutionResults + seals storage.Seals + epoch struct { + setups storage.EpochSetups + commits storage.EpochCommits + } + params protocol.Params + protocolKVStoreSnapshotsDB storage.ProtocolKVStore + epochProtocolStateEntriesDB storage.EpochProtocolStateEntries // TODO remove when MinEpochStateEntry is stored in KVStore + protocolState protocol.ProtocolState + versionBeacons storage.VersionBeacons + + // finalizedRootHeight marks the cutoff of the history this node knows about. We cache it in the state // because it cannot change over the lifecycle of a protocol state instance. It is frequently // larger than the height of the root block of the spork, (also cached below as - // `sporkRootBlockHeight`), for instance if the node joined in an epoch after the last spork. - rootHeight uint64 - // sporkRootBlockHeight is the height of the root block in the current spork. We cache it in + // `sporkRootBlockHeight`), for instance, if the node joined in an epoch after the last spork. + finalizedRootHeight uint64 + // sealedRootHeight returns the root block that is sealed. We cache it in + // the state, because it cannot change over the lifecycle of a protocol state instance. + sealedRootHeight uint64 + // sporkRootBlock is the root block in the current spork. We cache it in // the state, because it cannot change over the lifecycle of a protocol state instance. // Caution: A node that joined in a later epoch past the spork, the node will likely _not_ // know the spork's root block in full (though it will always know the height). - sporkRootBlockHeight uint64 - // cache the latest finalized and sealed block headers as these are common queries. - // It can be cached because the protocol state is solely responsible for updating these values. - cachedFinal *atomic.Pointer[cachedHeader] - cachedSealed *atomic.Pointer[cachedHeader] + sporkRootBlock *flow.Block + // cachedLatest caches both the *latest* finalized header and sealed header, + // because the protocol state is solely responsible for updating it. + // finalized header and sealed header can be cached together since they are updated together atomically + cachedLatest *atomic.Pointer[cachedLatest] } var _ protocol.State = (*State)(nil) @@ -77,9 +88,12 @@ func SkipNetworkAddressValidation(conf *BootstrapConfig) { conf.SkipNetworkAddressValidation = true } +// Bootstrap initializes a the protocol state from the provided root snapshot and persists it to the database. +// No errors expected during normal operation. func Bootstrap( metrics module.ComplianceMetrics, - db *badger.DB, + db storage.DB, + lockManager lockctx.Manager, headers storage.Headers, seals storage.Seals, results storage.ExecutionResults, @@ -87,11 +101,37 @@ func Bootstrap( qcs storage.QuorumCertificates, setups storage.EpochSetups, commits storage.EpochCommits, - statuses storage.EpochStatuses, + epochProtocolStateSnapshots storage.EpochProtocolStateEntries, + protocolKVStoreSnapshots storage.ProtocolKVStore, versionBeacons storage.VersionBeacons, root protocol.Snapshot, options ...BootstrapConfigOptions, ) (*State, error) { + // we acquire both [storage.LockInsertBlock] and [storage.LockFinalizeBlock] because + // the bootstrapping process inserts and finalizes blocks (all blocks within the + // trusted root snapshot are presumed to be finalized) + lctx := lockManager.NewContext() + defer lctx.Release() + err := lctx.AcquireLock(storage.LockInsertBlock) + if err != nil { + return nil, err + } + err = lctx.AcquireLock(storage.LockFinalizeBlock) + if err != nil { + return nil, err + } + err = lctx.AcquireLock(storage.LockBootstrapping) + if err != nil { + return nil, err + } + err = lctx.AcquireLock(storage.LockInsertSafetyData) + if err != nil { + return nil, err + } + err = lctx.AcquireLock(storage.LockInsertLivenessData) + if err != nil { + return nil, err + } config := defaultBootstrapConfig() for _, opt := range options { @@ -106,21 +146,7 @@ func Bootstrap( return nil, fmt.Errorf("expected empty database") } - state := newState( - metrics, - db, - headers, - seals, - results, - blocks, - qcs, - setups, - commits, - statuses, - versionBeacons, - ) - - if err := IsValidRootSnapshot(root, !config.SkipNetworkAddressValidation); err != nil { + if err := datastore.IsValidRootSnapshot(root, !config.SkipNetworkAddressValidation); err != nil { return nil, fmt.Errorf("cannot bootstrap invalid root snapshot: %w", err) } @@ -129,130 +155,272 @@ func Bootstrap( return nil, fmt.Errorf("could not get sealing segment: %w", err) } - err = operation.RetryOnConflictTx(db, transaction.Update, func(tx *transaction.Tx) error { - // sealing segment is in ascending height order, so the tail is the - // oldest ancestor and head is the newest child in the segment - // TAIL <- ... <- HEAD - highest := segment.Highest() // reference block of the snapshot - lowest := segment.Sealed() // last sealed block + _, rootSeal, err := root.SealedResult() + if err != nil { + return nil, fmt.Errorf("could not get sealed result for sealing segment: %w", err) + } - // 1) bootstrap the sealing segment - err = state.bootstrapSealingSegment(segment, highest)(tx) - if err != nil { - return fmt.Errorf("could not bootstrap sealing chain segment blocks: %w", err) - } + // sealing segment lists blocks in order of ascending height, so the tail + // is the oldest ancestor and head is the newest child in the segment + // TAIL <- ... <- HEAD + // Per definition, the highest block in the sealing segment is the last finalized block. + // (The lowest block in sealing segment is the last sealed block, but we don't use that here.) + lastFinalized := segment.Finalized() // highest block in sealing segment; finalized by protocol convention + + // bootstrap the sealing segment + // creating sealed root block with the rootResult + // creating finalized root block with lastFinalized + err = bootstrapSealingSegment(lctx, db, blocks, qcs, segment, lastFinalized, rootSeal) + if err != nil { + return nil, fmt.Errorf("could not bootstrap sealing chain segment blocks: %w", err) + } - // 2) insert the root quorum certificate into the database - qc, err := root.QuorumCertificate() + err = db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + // bootstrap dynamic protocol state + err = bootstrapProtocolState(lctx, rw, segment, root.Params(), epochProtocolStateSnapshots, protocolKVStoreSnapshots, setups, commits, !config.SkipNetworkAddressValidation) if err != nil { - return fmt.Errorf("could not get root qc: %w", err) + return fmt.Errorf("could not bootstrap protocol state: %w", err) } - err = qcs.StoreTx(qc)(tx) + + // initialize version beacon + err = boostrapVersionBeacon(rw, root) if err != nil { - return fmt.Errorf("could not insert root qc: %w", err) + return fmt.Errorf("could not bootstrap version beacon: %w", err) } - // 3) initialize the current protocol state height/view pointers - err = transaction.WithTx(state.bootstrapStatePointers(root))(tx) + return nil + }) + if err != nil { + return nil, fmt.Errorf("bootstrapping failed: %w", err) + } + + // CAUTION: INSERT FINALIZED HEIGHT must be LAST, because we use its existence in the database + // as indicator that the protocol database has been bootstrapped successfully. Before we write the + // final piece of data to complete the bootstrapping, we query the current state of the database + // (sanity check) to ensure that it is still considered as not properly bootstrapped. + isBootstrapped, err = IsBootstrapped(db) + if err != nil { + return nil, fmt.Errorf("determining whether database is successfully bootstrapped failed with unexpected exception: %w", err) + } + if isBootstrapped { // we haven't written the latest finalized height yet, so this vaule must be false + return nil, fmt.Errorf("sanity check failed: while bootstrapping has not yet completed, the implementation already considers the protocol state as successfully bootstrapped") + } + err = db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + // initialize the current protocol state height/view pointers + return bootstrapStatePointers(lctx, rw, root) + }) + if err != nil { + return nil, fmt.Errorf("could not bootstrap height/view pointers: %w", err) + } + + state, err := OpenState(metrics, db, lockManager, headers, seals, results, blocks, qcs, setups, commits, epochProtocolStateSnapshots, protocolKVStoreSnapshots, versionBeacons) + if err != nil { + return nil, fmt.Errorf("bootstrapping failed, because the resulting database state is rejected: %w", err) + } + return state, nil +} + +// bootstrapProtocolStates bootstraps data structures needed for Dynamic Protocol State. +// The sealing segment may contain blocks committing to different Protocol State entries, +// in which case each of these protocol state entries are stored in the database during +// bootstrapping. +// For each distinct protocol state entry, we also store the associated EpochSetup and +// EpochCommit service events. +func bootstrapProtocolState( + lctx lockctx.Proof, + rw storage.ReaderBatchWriter, + segment *flow.SealingSegment, + params protocol.GlobalParams, + epochProtocolStateSnapshots storage.EpochProtocolStateEntries, + protocolKVStoreSnapshots storage.ProtocolKVStore, + epochSetups storage.EpochSetups, + epochCommits storage.EpochCommits, + verifyNetworkAddress bool, +) error { + // The sealing segment contains a protocol state entry for every block in the segment, including the root block. + for protocolStateID, stateEntry := range segment.ProtocolStateEntries { + // Store the protocol KV Store entry + err := protocolKVStoreSnapshots.BatchStore(rw, protocolStateID, &stateEntry.KVStore) if err != nil { - return fmt.Errorf("could not bootstrap height/view pointers: %w", err) + return fmt.Errorf("could not store protocol state kvstore: %w", err) } - // 4) initialize values related to the epoch logic - err = state.bootstrapEpoch(root.Epochs(), segment, !config.SkipNetworkAddressValidation)(tx) + // Store the epoch portion of the protocol state, including underlying EpochSetup/EpochCommit service events + dynamicEpochProtocolState, err := inmem.NewEpochProtocolStateAdapter( + inmem.UntrustedEpochProtocolStateAdapter{ + RichEpochStateEntry: stateEntry.EpochEntry, + Params: params, + }, + ) if err != nil { - return fmt.Errorf("could not bootstrap epoch values: %w", err) + return fmt.Errorf("could not construct epoch protocol state adapter: %w", err) } - - // 5) initialize spork params - err = transaction.WithTx(state.bootstrapSporkInfo(root))(tx) + err = bootstrapEpochForProtocolStateEntry(rw, epochProtocolStateSnapshots, epochSetups, epochCommits, dynamicEpochProtocolState, verifyNetworkAddress) if err != nil { - return fmt.Errorf("could not bootstrap spork info: %w", err) + return fmt.Errorf("could not store epoch service events for state entry (id=%x): %w", stateEntry.EpochEntry.ID(), err) } + } - // 6) set metric values - err = state.updateEpochMetrics(root) + for _, proposal := range segment.AllBlocks() { + blockID := proposal.Block.ID() + protocolStateEntryWrapper := segment.ProtocolStateEntries[proposal.Block.Payload.ProtocolStateID] + err := epochProtocolStateSnapshots.BatchIndex(lctx, rw, blockID, protocolStateEntryWrapper.EpochEntry.ID()) if err != nil { - return fmt.Errorf("could not update epoch metrics: %w", err) + return fmt.Errorf("could not index root protocol state: %w", err) } - state.metrics.BlockSealed(lowest) - state.metrics.SealedHeight(lowest.Header.Height) - state.metrics.FinalizedHeight(highest.Header.Height) - for _, block := range segment.Blocks { - state.metrics.BlockFinalized(block) + err = protocolKVStoreSnapshots.BatchIndex(lctx, rw, blockID, proposal.Block.Payload.ProtocolStateID) + if err != nil { + return fmt.Errorf("could not index root kv store: %w", err) } - - return nil - }) - if err != nil { - return nil, fmt.Errorf("bootstrapping failed: %w", err) } - // populate the protocol state cache - err = state.populateCache() - if err != nil { - return nil, fmt.Errorf("failed to populate cache: %w", err) - } - - return state, nil + return nil } -// bootstrapSealingSegment inserts all blocks and associated metadata for the -// protocol state root snapshot to disk. -func (state *State) bootstrapSealingSegment(segment *flow.SealingSegment, head *flow.Block) func(tx *transaction.Tx) error { - return func(tx *transaction.Tx) error { - +// bootstrapSealingSegment inserts all blocks and associated metadata for the protocol state root +// snapshot to disk. We proceed as follows: +// 1. we persist the auxiliary execution results from the sealing segment +// 2. persist extra blocks from the sealing segment; these blocks are below the history cut-off and +// therefore not fully indexed (we only index the blocks by height). +// 3. persist sealing segment Blocks and properly populate all indices of those blocks: +// - blocks are indexed by their heights +// - latest seal is indexed for each block +// - children of each block are initialized with the set containing the child block +// 4. For the highest seal (`rootSeal`), we index the sealed result ID in the database. +// This is necessary for the execution node to confirm that it is starting to execute from the +// correct state. +// 5. persist the spork root block. This block is always provided separately in the sealing +// segment, as it may or may not be included in SealingSegment.Blocks depending on how much +// history is covered. The spork root block is persisted as a root proposal without proposer +// signature (by convention). +func bootstrapSealingSegment( + lctx lockctx.Proof, + db storage.DB, + blocks storage.Blocks, + qcs storage.QuorumCertificates, + segment *flow.SealingSegment, + head *flow.Block, + rootSeal *flow.Seal, +) error { + // STEP 1: persist AUXILIARY EXECUTION RESULTS (should include the result sealed by segment.FirstSeal if that is not nil) + err := db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + w := rw.Writer() for _, result := range segment.ExecutionResults { - err := transaction.WithTx(operation.SkipDuplicates(operation.InsertExecutionResult(result)))(tx) + err := operation.InsertExecutionResult(w, result) if err != nil { return fmt.Errorf("could not insert execution result: %w", err) } - err = transaction.WithTx(operation.IndexExecutionResult(result.BlockID, result.ID()))(tx) + err = operation.IndexExecutionResult(w, result.BlockID, result.ID()) if err != nil { return fmt.Errorf("could not index execution result: %w", err) } } + return nil + }) + if err != nil { + return err + } - // insert the first seal (in case the segment's first block contains no seal) - if segment.FirstSeal != nil { - err := transaction.WithTx(operation.InsertSeal(segment.FirstSeal.ID(), segment.FirstSeal))(tx) - if err != nil { - return fmt.Errorf("could not insert first seal: %w", err) - } - } - - for _, block := range segment.ExtraBlocks { - blockID := block.ID() - height := block.Header.Height - err := state.blocks.StoreTx(block)(tx) + // STEP 2: persist EXTRA BLOCKS to the database + // These blocks are _ancestors_ of `segment.Blocks`, i.e. below the history cut-off. Therefore, we only persist the extra blocks + // and index them by height, while all the other indices are omitted, as they would potentially reference non-existent data. + // + // We PERSIST these blocks ONE-BY-ONE in order of increasing height, + // emulating the process during normal operations, the following reason: + // * Execution Receipts are incorporated into blocks for bookkeeping when and which execution results the ENs published. + // * Typically, most ENs commit to the same results. Therefore, Results in blocks are stored separately from the Receipts + // in blocks and deduplicated along the fork -- specifically, we only store the result along a fork in the first block + // containing an execution receipt committing to that result. For receipts committing to the same result in descending + // blocks, we only store the receipt and omit the result as it is already contained in an ancestor. + // * We want to ensure that for every receipt in a block that we store, the result is also going to be available in storage + // [Blocks.BatchStore] automatically performs this check and errors when attempting to store a block referencing unknown + // results. + // * During normal operations, we ingest and persist blocks one by one. However, during bootstrapping we need to store + // multiple blocks. Hypothetically, if we were to store all blocks in the same batch, results included in ancestor blocks + // would not be persisted in the database yet when attempting to persist their descendants. In other words, the check in + // [Blocks.BatchStore] can't distinguish between a receipt referencing a missing result vs a receipt referencing a result + // that is contained in a previous block being stored as part of the same batch. + for _, proposal := range segment.ExtraBlocks { + err := db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + blockID := proposal.Block.ID() + height := proposal.Block.Height + err := blocks.BatchStore(lctx, rw, proposal) if err != nil { return fmt.Errorf("could not insert SealingSegment extra block: %w", err) } - err = transaction.WithTx(operation.IndexBlockHeight(height, blockID))(tx) + err = operation.IndexFinalizedBlockByHeight(lctx, rw, height, blockID) if err != nil { return fmt.Errorf("could not index SealingSegment extra block (id=%x): %w", blockID, err) } - err = state.qcs.StoreTx(block.Header.QuorumCertificate())(tx) - if err != nil { - return fmt.Errorf("could not store qc for SealingSegment extra block (id=%x): %w", blockID, err) + + if proposal.Block.ContainsParentQC() { // Only spork root blocks or network genesis blocks do not contain a parent QC. + err = qcs.BatchStore(lctx, rw, proposal.Block.ParentQC()) + if err != nil { + return fmt.Errorf("could not store qc for SealingSegment extra block (id=%x): %w", blockID, err) + } + } + return nil + }) + if err != nil { + return err + } + } + + // STEP 3: persist sealing segment Blocks and properly populate all indices as if those blocks were ingested during normal operations. + // For each block B, we index the highest seal in the fork with head B. To sanity check proper state construction, we want to ensure that the referenced + // seal actually exists in the database at the end of the bootstrapping process. Therefore, we track all the seals that we are storing and error in case + // we attempt to reference a seal that is not in that set. It is fine to omit any seals in `segment.ExtraBlocks` for the following reason: + // * Let's consider the lowest-height block in `segment.Blocks`, by convention `segment.Blocks[0]`, and call it B1. + // * If B1 contains seals, then the latest seal as of B1 is part of the block's payload. S1 will be stored in the database while persisting B1. + // * If and only if B1 contains no seal, then `segment.FirstSeal` is set to the latest seal included in an ancestor of B1 (see [flow.SealingSegment] + // documentation). We explicitly store FirstSeal in the database. + // * By induction, this argument can be applied to all subsequent blocks in `segment.Blocks`. Hence, the index `LatestSealAtBlock` is correctly populated + // for all blocks in `segment.Blocks`. + sealsLookup := make(map[flow.Identifier]struct{}) + sealsLookup[rootSeal.ID()] = struct{}{} + if segment.FirstSeal != nil { // in case the segment's first block contains no seal, insert the first seal + sealsLookup[segment.FirstSeal.ID()] = struct{}{} + err = db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + if segment.FirstSeal != nil { + err := operation.InsertSeal(rw.Writer(), segment.FirstSeal.ID(), segment.FirstSeal) + if err != nil { + return fmt.Errorf("could not insert first seal: %w", err) + } } + return nil + }) + if err != nil { + return err } + } - for i, block := range segment.Blocks { - blockID := block.ID() - height := block.Header.Height + // PERSIST these blocks ONE-BY-ONE in order of increasing height, emulating the process during normal operations, + // so sanity checks from normal operations should continue to apply. + for i, proposal := range segment.Blocks { + err := db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + w := rw.Writer() + blockID := proposal.Block.ID() + height := proposal.Block.Height - err := state.blocks.StoreTx(block)(tx) + // persist block and index it by height (all blocks in sealing segment are finalized by convention) + err := blocks.BatchStore(lctx, rw, proposal) if err != nil { return fmt.Errorf("could not insert SealingSegment block: %w", err) } - err = transaction.WithTx(operation.IndexBlockHeight(height, blockID))(tx) + err = operation.IndexFinalizedBlockByHeight(lctx, rw, height, blockID) if err != nil { return fmt.Errorf("could not index SealingSegment block (id=%x): %w", blockID, err) } - err = state.qcs.StoreTx(block.Header.QuorumCertificate())(tx) - if err != nil { - return fmt.Errorf("could not store qc for SealingSegment block (id=%x): %w", blockID, err) + if proposal.Block.ContainsParentQC() { // Only spork root blocks or network genesis blocks do not contain a parent QC. + err = qcs.BatchStore(lctx, rw, proposal.Block.ParentQC()) + if err != nil { + return fmt.Errorf("could not store qc for SealingSegment block (id=%x): %w", blockID, err) + } + } + + // add seals in the block to our set of known seals (all of those will be persisted as part of storing the block) + for _, seal := range proposal.Block.Payload.Seals { + sealsLookup[seal.ID()] = struct{}{} } // index the latest seal as of this block @@ -260,325 +428,345 @@ func (state *State) bootstrapSealingSegment(segment *flow.SealingSegment, head * if !ok { return fmt.Errorf("missing latest seal for sealing segment block (id=%s)", blockID) } - // sanity check: make sure the seal exists - var latestSeal flow.Seal - err = transaction.WithTx(operation.RetrieveSeal(latestSealID, &latestSeal))(tx) - if err != nil { - return fmt.Errorf("could not verify latest seal for block (id=%x) exists: %w", blockID, err) + _, ok = sealsLookup[latestSealID] // sanity check: make sure that the latest seal as of this block is actually known + if !ok { + return fmt.Errorf("sanity check fail: missing latest seal for sealing segment block (id=%s)", blockID) } - err = transaction.WithTx(operation.IndexLatestSealAtBlock(blockID, latestSealID))(tx) + err = operation.IndexLatestSealAtBlock(lctx, w, blockID, latestSealID) // persist the mapping from block -> latest seal if err != nil { return fmt.Errorf("could not index block seal: %w", err) } - // for all but the first block in the segment, index the parent->child relationship + // For all but the first block in the segment, index the parent->child relationship: if i > 0 { - err = transaction.WithTx(operation.InsertBlockChildren(block.Header.ParentID, []flow.Identifier{blockID}))(tx) + // Reason for skipping block at index i == 0: + // * `segment.Blocks[0]` is the node's root block, history prior to that root block is not guaranteed to be known to the node. + // * For consistency, we don't want to index children for an unknown or non-existent parent. + // So by convention, we start populating the parent-child relationship only for the root block's children and its descendants. + // This convention also covers the genesis block, where no parent exists. + err = operation.IndexNewBlock(lctx, rw, blockID, proposal.Block.ParentID) if err != nil { - return fmt.Errorf("could not insert child index for block (id=%x): %w", blockID, err) + return fmt.Errorf("could not index block (id=%x): %w", blockID, err) } } - } - // insert an empty child index for the final block in the segment - err := transaction.WithTx(operation.InsertBlockChildren(head.ID(), nil))(tx) + return nil + }) if err != nil { - return fmt.Errorf("could not insert child index for head block (id=%x): %w", head.ID(), err) + return err } - - return nil } -} -// bootstrapStatePointers instantiates special pointers used to by the protocol -// state to keep track of special block heights and views. -func (state *State) bootstrapStatePointers(root protocol.Snapshot) func(*badger.Txn) error { - return func(tx *badger.Txn) error { - segment, err := root.SealingSegment() + // STEP 4: For the highest seal (`rootSeal`), we index the sealed result ID in the database. + err = db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + // sanity check existence of referenced execution result (should have been stored in STEP 1) + var result flow.ExecutionResult + err := operation.RetrieveExecutionResult(rw.GlobalReader(), rootSeal.ResultID, &result) if err != nil { - return fmt.Errorf("could not get sealing segment: %w", err) - } - highest := segment.Highest() - lowest := segment.Sealed() - // find the finalized seal that seals the lowest block, meaning seal.BlockID == lowest.ID() - seal, err := segment.FinalizedSeal() - if err != nil { - return fmt.Errorf("could not get finalized seal from sealing segment: %w", err) - } - - safetyData := &hotstuff.SafetyData{ - LockedOneChainView: highest.Header.View, - HighestAcknowledgedView: highest.Header.View, + return fmt.Errorf("missing sealed execution result %v: %w", rootSeal.ResultID, err) } - // Per convention, all blocks in the sealing segment must be finalized. Therefore, a QC must - // exist for the `highest` block in the sealing segment. The QC for `highest` should be - // contained in the `root` Snapshot and returned by `root.QuorumCertificate()`. Otherwise, - // the Snapshot is incomplete, because consensus nodes require this QC. To reduce the chance of - // accidental misconfiguration undermining consensus liveness, we do the following sanity checks: - // * `rootQC` should not be nil - // * `rootQC` should be for `highest` block, i.e. its view and blockID should match - rootQC, err := root.QuorumCertificate() + // If the sealed root block is different from the finalized root block, then it means the node dynamically + // bootstrapped. In that case, we index the result of the latest sealed result, so that the EN is able + // to confirm that it is loading the correct state to execute the next block. + err = operation.IndexExecutionResult(rw.Writer(), rootSeal.BlockID, rootSeal.ResultID) if err != nil { - return fmt.Errorf("could not get root QC: %w", err) - } - if rootQC == nil { - return fmt.Errorf("QC for highest (finalized) block in sealing segment cannot be nil") - } - if rootQC.View != highest.Header.View { - return fmt.Errorf("root QC's view %d does not match the highest block in sealing segment (view %d)", rootQC.View, highest.Header.View) - } - if rootQC.BlockID != highest.Header.ID() { - return fmt.Errorf("root QC is for block %v, which does not match the highest block %v in sealing segment", rootQC.BlockID, highest.Header.ID()) + return fmt.Errorf("could not index root result: %w", err) } - livenessData := &hotstuff.LivenessData{ - CurrentView: highest.Header.View + 1, - NewestQC: rootQC, - } + return nil + }) + if err != nil { + return err + } - // insert initial views for HotStuff - err = operation.InsertSafetyData(highest.Header.ChainID, safetyData)(tx) - if err != nil { - return fmt.Errorf("could not insert safety data: %w", err) - } - err = operation.InsertLivenessData(highest.Header.ChainID, livenessData)(tx) - if err != nil { - return fmt.Errorf("could not insert liveness data: %w", err) - } + // STEP 5: PERSIST spork root block + // The spork root block is always provided by a sealing segment separately. This is because the spork root block + // may or may not be part of [SealingSegment.Blocks] depending on how much history the sealing segment covers. + sporkRootBlock := segment.SporkRootBlock - // insert height pointers - err = operation.InsertRootHeight(highest.Header.Height)(tx) - if err != nil { - return fmt.Errorf("could not insert root height: %w", err) - } - err = operation.InsertFinalizedHeight(highest.Header.Height)(tx) - if err != nil { - return fmt.Errorf("could not insert finalized height: %w", err) - } - err = operation.InsertSealedHeight(lowest.Header.Height)(tx) - if err != nil { - return fmt.Errorf("could not insert sealed height: %w", err) - } - err = operation.IndexFinalizedSealByBlockID(seal.BlockID, seal.ID())(tx) + // create the spork root proposal + proposal, err := flow.NewRootProposal( + flow.UntrustedProposal{ + Block: *sporkRootBlock, + ProposerSigData: nil, // by protocol convention, the spork root block (or genesis block) don't have a proposer signature + }, + ) + if err != nil { + return fmt.Errorf("could not create root proposal for spork root block: %w", err) + } + + err = db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + err = blocks.BatchStore(lctx, rw, proposal) if err != nil { - return fmt.Errorf("could not index sealed block: %w", err) + // the spork root block may or may not have already been persisted, depending + // on whether the root snapshot sealing segment contained it. + if errors.Is(err, storage.ErrAlreadyExists) { + return nil + } + return fmt.Errorf("could not store spork root block: %w", err) } return nil - } -} + }) -// bootstrapEpoch bootstraps the protocol state database with information about -// the previous, current, and next epochs as of the root snapshot. -// -// The root snapshot's sealing segment must not straddle any epoch transitions -// or epoch phase transitions. -func (state *State) bootstrapEpoch(epochs protocol.EpochQuery, segment *flow.SealingSegment, verifyNetworkAddress bool) func(*transaction.Tx) error { - return func(tx *transaction.Tx) error { - previous := epochs.Previous() - current := epochs.Current() - next := epochs.Next() - - // build the status as we go - status := new(flow.EpochStatus) - var setups []*flow.EpochSetup - var commits []*flow.EpochCommit - - // insert previous epoch if it exists - _, err := previous.Counter() - if err == nil { - // if there is a previous epoch, both setup and commit events must exist - setup, err := protocol.ToEpochSetup(previous) - if err != nil { - return fmt.Errorf("could not get previous epoch setup event: %w", err) - } - commit, err := protocol.ToEpochCommit(previous) - if err != nil { - return fmt.Errorf("could not get previous epoch commit event: %w", err) - } + return nil +} - if err := verifyEpochSetup(setup, verifyNetworkAddress); err != nil { - return fmt.Errorf("invalid setup: %w", err) - } - if err := isValidEpochCommit(commit, setup); err != nil { - return fmt.Errorf("invalid commit: %w", err) - } +// bootstrapStatePointers instantiates central pointers used to by the protocol +// state for keeping track of lifecycle variables: +// - Consensus Safety and Liveness Data (only used by consensus participants) +// - Root Block's Height (heighest block in sealing segment) +// - Sealed Root Block Height (block height sealed as of the Root Block) +// - Latest Finalized Height (initialized to height of Root Block) +// - Latest Sealed Block Height (initialized to block height sealed as of the Root Block) +// - Spork root block ID (spork root block in sealing segment) +// - initial entry in map: +// Finalized Block ID -> ID of latest seal in fork with this block as head +func bootstrapStatePointers(lctx lockctx.Proof, rw storage.ReaderBatchWriter, root protocol.Snapshot) error { + // sealing segment lists blocks in order of ascending height, so the tail + // is the oldest ancestor and head is the newest child in the segment + // TAIL <- ... <- HEAD + segment, err := root.SealingSegment() + if err != nil { + return fmt.Errorf("could not get sealing segment: %w", err) + } + lastFinalized := segment.Finalized() // the lastFinalized block in sealing segment is the latest known finalized block + lastSealed := segment.Sealed() // the lastSealed block in sealing segment is the latest known sealed block + + enc, err := datastore.NewVersionedInstanceParams( + datastore.DefaultInstanceParamsVersion, + lastFinalized.ID(), + lastSealed.ID(), + root.Params().SporkID(), + ) + if err != nil { + return fmt.Errorf("could not create versioned instance params: %w", err) + } - err = indexFirstHeight(previous)(tx.DBTxn) - if err != nil { - return fmt.Errorf("could not index epoch first height: %w", err) - } + err = operation.InsertInstanceParams(lctx, rw, *enc) + if err != nil { + return fmt.Errorf("could not store instance params: %w", err) + } - setups = append(setups, setup) - commits = append(commits, commit) - status.PreviousEpoch.SetupID = setup.ID() - status.PreviousEpoch.CommitID = commit.ID() - } else if !errors.Is(err, protocol.ErrNoPreviousEpoch) { - return fmt.Errorf("could not retrieve previous epoch: %w", err) - } + // find the finalized seal that seals the lastSealed block, meaning seal.BlockID == lastSealed.ID() + seal, err := segment.FinalizedSeal() + if err != nil { + return fmt.Errorf("could not get finalized seal from sealing segment: %w", err) + } - // insert current epoch - both setup and commit events must exist - setup, err := protocol.ToEpochSetup(current) - if err != nil { - return fmt.Errorf("could not get current epoch setup event: %w", err) - } - commit, err := protocol.ToEpochCommit(current) - if err != nil { - return fmt.Errorf("could not get current epoch commit event: %w", err) - } + // Per convention, all blocks in the sealing segment must be finalized. Therefore, a QC must + // exist for the `lastFinalized` block in the sealing segment. The QC for `lastFinalized` should be + // contained in the `root` Snapshot and returned by `root.QuorumCertificate()`. Otherwise, + // the Snapshot is incomplete, because consensus nodes require this QC. To reduce the chance of + // accidental misconfiguration undermining consensus liveness, we do the following sanity checks: + // * `qcForLatestFinalizedBlock` should not be nil + // * `qcForLatestFinalizedBlock` should be for `lastFinalized` block, i.e. its view and blockID should match + qcForLatestFinalizedBlock, err := root.QuorumCertificate() + if err != nil { + return fmt.Errorf("failed to obtain QC for latest finalized block from root sanpshot: %w", err) + } + if qcForLatestFinalizedBlock == nil { + return fmt.Errorf("QC for latest finalized block in sealing segment cannot be nil") + } + if qcForLatestFinalizedBlock.BlockID != lastFinalized.ID() || qcForLatestFinalizedBlock.View != lastFinalized.View { + return fmt.Errorf("latest finalized block from sealing segment (id %v, view=%d) does not match the root snapshot's tailing QC (certifying block %v with view %d)", + lastFinalized.ID(), lastFinalized.View, qcForLatestFinalizedBlock.BlockID, qcForLatestFinalizedBlock.View) + } - if err := verifyEpochSetup(setup, verifyNetworkAddress); err != nil { - return fmt.Errorf("invalid setup: %w", err) - } - if err := isValidEpochCommit(commit, setup); err != nil { - return fmt.Errorf("invalid commit: %w", err) - } + // By definition, the root block / genesis block is the block with the lowest height and view. In other words, the latest + // finalized block's view must be equal or greater than the view of the spork root block. We sanity check this relationship here: + sporkRootBlockView := root.Params().SporkRootBlockView() + if !(sporkRootBlockView <= lastFinalized.View) { + return fmt.Errorf("sealing segment is invalid, because the latest finalized block's view %d is lower than the spork root block's view %d", lastFinalized.View, sporkRootBlockView) + } + safetyData := &hotstuff.SafetyData{ + LockedOneChainView: lastFinalized.View, + HighestAcknowledgedView: lastFinalized.View, + } - err = indexFirstHeight(current)(tx.DBTxn) - if err != nil { - return fmt.Errorf("could not index epoch first height: %w", err) - } + // We are given a QC for the latest finalized block, which proves that the view of the latest finalized block has been completed. + // Hence, a freshly-bootstrapped consensus participant continues from the next view. Note that this guarantees that we are starting + // in a view strictly greater than the spork root block's view, which is important for safety and liveness. + livenessData := &hotstuff.LivenessData{ + CurrentView: lastFinalized.View + 1, + NewestQC: qcForLatestFinalizedBlock, + } - setups = append(setups, setup) - commits = append(commits, commit) - status.CurrentEpoch.SetupID = setup.ID() - status.CurrentEpoch.CommitID = commit.ID() - - // insert next epoch, if it exists - _, err = next.Counter() - if err == nil { - // either only the setup event, or both the setup and commit events must exist - setup, err := protocol.ToEpochSetup(next) - if err != nil { - return fmt.Errorf("could not get next epoch setup event: %w", err) - } + // persist safety and liveness data plus the QuorumCertificate for the latest finalized block for HotStuff/Jolteon consensus + err = operation.UpsertSafetyData(lctx, rw, lastFinalized.ChainID, safetyData) + if err != nil { + return fmt.Errorf("could not insert safety data: %w", err) + } + err = operation.UpsertLivenessData(lctx, rw, lastFinalized.ChainID, livenessData) + if err != nil { + return fmt.Errorf("could not insert liveness data: %w", err) + } + err = operation.InsertQuorumCertificate(lctx, rw, qcForLatestFinalizedBlock) + if err != nil { + return fmt.Errorf("could not insert quorum certificate for the latest finalized block: %w", err) + } - if err := verifyEpochSetup(setup, verifyNetworkAddress); err != nil { - return fmt.Errorf("invalid setup: %w", err) - } + w := rw.Writer() + // insert height pointers + err = operation.UpsertFinalizedHeight(lctx, w, lastFinalized.Height) + if err != nil { + return fmt.Errorf("could not insert finalized height: %w", err) + } + err = operation.UpsertSealedHeight(lctx, w, lastSealed.Height) + if err != nil { + return fmt.Errorf("could not insert sealed height: %w", err) + } + err = operation.IndexFinalizedSealByBlockID(w, seal.BlockID, seal.ID()) + if err != nil { + return fmt.Errorf("could not index sealed block: %w", err) + } - setups = append(setups, setup) - status.NextEpoch.SetupID = setup.ID() - commit, err := protocol.ToEpochCommit(next) - if err != nil && !errors.Is(err, protocol.ErrNextEpochNotCommitted) { - return fmt.Errorf("could not get next epoch commit event: %w", err) - } - if err == nil { - if err := isValidEpochCommit(commit, setup); err != nil { - return fmt.Errorf("invalid commit") - } - commits = append(commits, commit) - status.NextEpoch.CommitID = commit.ID() - } - } else if !errors.Is(err, protocol.ErrNextEpochNotSetup) { - return fmt.Errorf("could not get next epoch: %w", err) - } + // insert first-height indices for epochs which begin within the sealing segment + err = indexEpochHeights(lctx, rw, segment) + if err != nil { + return fmt.Errorf("could not index epoch heights: %w", err) + } - // sanity check: ensure epoch status is valid - err = status.Check() - if err != nil { - return fmt.Errorf("bootstrapping resulting in invalid epoch status: %w", err) - } + return nil +} - // insert all epoch setup/commit service events - for _, setup := range setups { - err = state.epoch.setups.StoreTx(setup)(tx) - if err != nil { - return fmt.Errorf("could not store epoch setup event: %w", err) - } +// bootstrapEpochForProtocolStateEntry bootstraps the protocol state database with epoch +// information (in particular, EpochSetup and EpochCommit service events) associated with +// a particular Dynamic Protocol State entry. +// There may be several such entries within a single root snapshot, in which case this +// function is called once for each entry. Entries may overlap in which underlying +// epoch information (service events) they reference -- this only has a minor performance +// cost, as duplicate writes of the same data are idempotent. +func bootstrapEpochForProtocolStateEntry( + rw storage.ReaderBatchWriter, + epochProtocolStateSnapshots storage.EpochProtocolStateEntries, + epochSetups storage.EpochSetups, + epochCommits storage.EpochCommits, + epochProtocolStateEntry protocol.EpochProtocolState, + verifyNetworkAddress bool, +) error { + richEntry := epochProtocolStateEntry.Entry() + + // keep track of EpochSetup/EpochCommit service events, then store them after this step is complete + var setups []*flow.EpochSetup + var commits []*flow.EpochCommit + + // validate and insert previous epoch if it exists + if epochProtocolStateEntry.PreviousEpochExists() { + // if there is a previous epoch, both setup and commit events must exist + setup := richEntry.PreviousEpochSetup + commit := richEntry.PreviousEpochCommit + + if err := protocol.IsValidEpochSetup(setup, verifyNetworkAddress); err != nil { + return fmt.Errorf("invalid EpochSetup for previous epoch: %w", err) } - for _, commit := range commits { - err = state.epoch.commits.StoreTx(commit)(tx) - if err != nil { - return fmt.Errorf("could not store epoch commit event: %w", err) - } - } - - // NOTE: as specified in the godoc, this code assumes that each block - // in the sealing segment in within the same phase within the same epoch. - for _, block := range segment.AllBlocks() { - blockID := block.ID() - err = state.epoch.statuses.StoreTx(blockID, status)(tx) - if err != nil { - return fmt.Errorf("could not store epoch status for block (id=%x): %w", blockID, err) - } + if err := protocol.IsValidEpochCommit(commit, setup); err != nil { + return fmt.Errorf("invalid EpochCommit for previous epoch: %w", err) } - return nil + setups = append(setups, setup) + commits = append(commits, commit) } -} -// bootstrapSporkInfo bootstraps the protocol state with information about the -// spork which is used to disambiguate Flow networks. -func (state *State) bootstrapSporkInfo(root protocol.Snapshot) func(*badger.Txn) error { - return func(tx *badger.Txn) error { - params := root.Params() + { // validate and insert current epoch (always exist) + setup := richEntry.CurrentEpochSetup + commit := richEntry.CurrentEpochCommit - sporkID, err := params.SporkID() - if err != nil { - return fmt.Errorf("could not get spork ID: %w", err) + if err := protocol.IsValidEpochSetup(setup, verifyNetworkAddress); err != nil { + return fmt.Errorf("invalid EpochSetup for current epoch: %w", err) } - err = operation.InsertSporkID(sporkID)(tx) - if err != nil { - return fmt.Errorf("could not insert spork ID: %w", err) + if err := protocol.IsValidEpochCommit(commit, setup); err != nil { + return fmt.Errorf("invalid EpochCommit for current epoch: %w", err) } - sporkRootBlockHeight, err := params.SporkRootBlockHeight() - if err != nil { - return fmt.Errorf("could not get spork root block height: %w", err) - } - err = operation.InsertSporkRootBlockHeight(sporkRootBlockHeight)(tx) - if err != nil { - return fmt.Errorf("could not insert spork root block height: %w", err) - } + setups = append(setups, setup) + commits = append(commits, commit) + } - version, err := params.ProtocolVersion() - if err != nil { - return fmt.Errorf("could not get protocol version: %w", err) + // validate and insert next epoch, if it exists + if richEntry.NextEpoch != nil { + setup := richEntry.NextEpochSetup // must not be nil + commit := richEntry.NextEpochCommit // may be nil + + if err := protocol.IsValidEpochSetup(setup, verifyNetworkAddress); err != nil { + return fmt.Errorf("invalid EpochSetup for next epoch: %w", err) } - err = operation.InsertProtocolVersion(version)(tx) - if err != nil { - return fmt.Errorf("could not insert protocol version: %w", err) + setups = append(setups, setup) + + if commit != nil { + if err := protocol.IsValidEpochCommit(commit, setup); err != nil { + return fmt.Errorf("invalid EpochCommit for next epoch: %w", err) + } + commits = append(commits, commit) } + } - threshold, err := params.EpochCommitSafetyThreshold() + // insert all epoch setup/commit service events + // dynamic protocol state relies on these events being stored + for _, setup := range setups { + err := epochSetups.BatchStore(rw, setup) if err != nil { - return fmt.Errorf("could not get epoch commit safety threshold: %w", err) + return fmt.Errorf("could not store epoch setup event: %w", err) } - err = operation.InsertEpochCommitSafetyThreshold(threshold)(tx) + } + for _, commit := range commits { + err := epochCommits.BatchStore(rw, commit) if err != nil { - return fmt.Errorf("could not insert epoch commit safety threshold: %w", err) + return fmt.Errorf("could not store epoch commit event: %w", err) } + } - return nil + // insert epoch protocol state entry, which references above service events + err := epochProtocolStateSnapshots.BatchStore(rw.Writer(), richEntry.ID(), richEntry.MinEpochStateEntry) + if err != nil { + return fmt.Errorf("could not store epoch protocol state entry: %w", err) } + return nil } -// indexFirstHeight indexes the first height for the epoch, as part of bootstrapping. -// The input epoch must have been started (the first block of the epoch has been finalized). +// indexEpochHeights populates the epoch height index from the root snapshot. +// We index the FirstHeight for every epoch where the transition occurs within the sealing segment of the root snapshot, +// or for the first epoch of a spork if the snapshot is a spork root snapshot (1 block sealing segment). // No errors are expected during normal operation. -func indexFirstHeight(epoch protocol.Epoch) func(*badger.Txn) error { - return func(tx *badger.Txn) error { - counter, err := epoch.Counter() - if err != nil { - return fmt.Errorf("could not get epoch counter: %w", err) - } - firstHeight, err := epoch.FirstHeight() - if err != nil { - return fmt.Errorf("could not get epoch first height: %w", err) - } - err = operation.InsertEpochFirstHeight(counter, firstHeight)(tx) +func indexEpochHeights(lctx lockctx.Proof, rw storage.ReaderBatchWriter, segment *flow.SealingSegment) error { + // CASE 1: For spork root snapshots, there is exactly one block B and one epoch E. + // Index `E.counter → B.Height`. + if segment.IsSporkRoot() { + counter := segment.LatestProtocolStateEntry().EpochEntry.EpochCounter() + firstHeight := segment.Highest().Height + err := operation.InsertEpochFirstHeight(lctx, rw, counter, firstHeight) if err != nil { return fmt.Errorf("could not index first height %d for epoch %d: %w", firstHeight, counter, err) } return nil } + + // CASE 2: For all other snapshots, there is a segment of blocks which may span several epochs. + // We traverse all blocks in the segment in ascending height order. + // If we find two consecutive blocks B1, B2 so that `B1.EpochCounter` != `B2.EpochCounter`, + // then index `B2.EpochCounter → B2.Height`. + allBlocks := segment.AllBlocks() + lastBlock := allBlocks[0] + lastBlockEpochCounter := segment.ProtocolStateEntries[lastBlock.Block.Payload.ProtocolStateID].EpochEntry.EpochCounter() + for _, block := range allBlocks[1:] { + thisBlockEpochCounter := segment.ProtocolStateEntries[block.Block.Payload.ProtocolStateID].EpochEntry.EpochCounter() + if lastBlockEpochCounter != thisBlockEpochCounter { + firstHeight := block.Block.Height + err := operation.InsertEpochFirstHeight(lctx, rw, thisBlockEpochCounter, firstHeight) + if err != nil { + return fmt.Errorf("could not index first height %d for epoch %d: %w", firstHeight, thisBlockEpochCounter, err) + } + } + lastBlockEpochCounter = thisBlockEpochCounter + } + return nil } func OpenState( metrics module.ComplianceMetrics, - db *badger.DB, + db storage.DB, + lockManager lockctx.Manager, headers storage.Headers, seals storage.Seals, results storage.ExecutionResults, @@ -586,7 +774,8 @@ func OpenState( qcs storage.QuorumCertificates, setups storage.EpochSetups, commits storage.EpochCommits, - statuses storage.EpochStatuses, + epochProtocolState storage.EpochProtocolStateEntries, + protocolKVStoreSnapshots storage.ProtocolKVStore, versionBeacons storage.VersionBeacons, ) (*State, error) { isBootstrapped, err := IsBootstrapped(db) @@ -596,9 +785,29 @@ func OpenState( if !isBootstrapped { return nil, fmt.Errorf("expected database to contain bootstrapped state") } - state := newState( + instanceParams, err := datastore.ReadInstanceParams(db.Reader(), headers, seals, blocks) + if err != nil { + return nil, fmt.Errorf("could not read instance params: %w", err) + } + sporkRootBlock := instanceParams.SporkRootBlock() + + globalParams := inmem.NewParams( + inmem.EncodableParams{ + ChainID: sporkRootBlock.ChainID, + SporkID: sporkRootBlock.ID(), + SporkRootBlockHeight: sporkRootBlock.Height, + SporkRootBlockView: sporkRootBlock.View, + }, + ) + params := &datastore.Params{ + GlobalParams: globalParams, + InstanceParams: instanceParams, + } + + state, err := newState( metrics, db, + lockManager, headers, seals, results, @@ -606,30 +815,43 @@ func OpenState( qcs, setups, commits, - statuses, + epochProtocolState, + protocolKVStoreSnapshots, versionBeacons, - ) // populate the protocol state cache - err = state.populateCache() + params, + sporkRootBlock, + ) if err != nil { - return nil, fmt.Errorf("failed to populate cache: %w", err) + return nil, fmt.Errorf("could not create state: %w", err) } - // report last finalized and sealed block height + // report information about latest known finalized block finalSnapshot := state.Final() - head, err := finalSnapshot.Head() + latestFinalizedHeader, err := finalSnapshot.Head() if err != nil { return nil, fmt.Errorf("unexpected error to get finalized block: %w", err) } - metrics.FinalizedHeight(head.Height) + latestFinalizedBlock, err := state.blocks.ByHeight(latestFinalizedHeader.Height) + if err != nil { + return nil, fmt.Errorf("could not retrieve the latest sealed block by height: %w", err) + } + metrics.FinalizedHeight(latestFinalizedHeader.Height) + metrics.BlockFinalized(latestFinalizedBlock) - sealed, err := state.Sealed().Head() + // report information about latest known finalized block + latestSealedHeader, err := state.Sealed().Head() + if err != nil { + return nil, fmt.Errorf("could not get latest sealed block header: %w", err) + } + latestSealedBlock, err := state.blocks.ByHeight(latestSealedHeader.Height) if err != nil { - return nil, fmt.Errorf("could not get latest sealed block: %w", err) + return nil, fmt.Errorf("could not retrieve the latest sealed block by height: %w", err) } - metrics.SealedHeight(sealed.Height) + metrics.SealedHeight(latestSealedHeader.Height) + metrics.BlockSealed(latestSealedBlock) - // update all epoch related metrics - err = state.updateEpochMetrics(finalSnapshot) + // report information about latest known epoch + err = updateEpochMetrics(metrics, finalSnapshot) if err != nil { return nil, fmt.Errorf("failed to update epoch metrics: %w", err) } @@ -638,27 +860,27 @@ func OpenState( } func (state *State) Params() protocol.Params { - return Params{state: state} + return state.params } // Sealed returns a snapshot for the latest sealed block. A latest sealed block // must always exist, so this function always returns a valid snapshot. func (state *State) Sealed() protocol.Snapshot { - cached := state.cachedSealed.Load() + cached := state.cachedLatest.Load() if cached == nil { return invalid.NewSnapshotf("internal inconsistency: no cached sealed header") } - return NewFinalizedSnapshot(state, cached.id, cached.header) + return NewFinalizedSnapshot(state, cached.sealedID, cached.sealedHeader) } // Final returns a snapshot for the latest finalized block. A latest finalized // block must always exist, so this function always returns a valid snapshot. func (state *State) Final() protocol.Snapshot { - cached := state.cachedFinal.Load() + cached := state.cachedLatest.Load() if cached == nil { return invalid.NewSnapshotf("internal inconsistency: no cached final header") } - return NewFinalizedSnapshot(state, cached.id, cached.header) + return NewFinalizedSnapshot(state, cached.finalizedID, cached.finalizedHeader) } // AtHeight returns a snapshot for the finalized block at the given height. @@ -668,9 +890,7 @@ func (state *State) Final() protocol.Snapshot { // -> if the given height is below the root height // - exception for critical unexpected storage errors func (state *State) AtHeight(height uint64) protocol.Snapshot { - // retrieve the block ID for the finalized height - var blockID flow.Identifier - err := state.db.View(operation.LookupBlockHeight(height, &blockID)) + blockID, err := state.headers.BlockIDByHeight(height) if err != nil { if errors.Is(err, storage.ErrNotFound) { return invalid.NewSnapshotf("unknown finalized height %d: %w", height, statepkg.ErrUnknownSnapshotReference) @@ -704,7 +924,8 @@ func (state *State) AtBlockID(blockID flow.Identifier) protocol.Snapshot { // is expected to contain an already bootstrapped state or not func newState( metrics module.ComplianceMetrics, - db *badger.DB, + db storage.DB, + lockManager lockctx.Manager, headers storage.Headers, seals storage.Seals, results storage.ExecutionResults, @@ -712,36 +933,55 @@ func newState( qcs storage.QuorumCertificates, setups storage.EpochSetups, commits storage.EpochCommits, - statuses storage.EpochStatuses, + epochProtocolStateSnapshots storage.EpochProtocolStateEntries, + protocolKVStoreSnapshots storage.ProtocolKVStore, versionBeacons storage.VersionBeacons, -) *State { - return &State{ - metrics: metrics, - db: db, - headers: headers, - results: results, - seals: seals, - blocks: blocks, - qcs: qcs, + params protocol.Params, + sporkRootBlock *flow.Block, +) (*State, error) { + state := &State{ + metrics: metrics, + db: db, + lockManager: lockManager, + headers: headers, + results: results, + seals: seals, + blocks: blocks, + qcs: qcs, epoch: struct { - setups storage.EpochSetups - commits storage.EpochCommits - statuses storage.EpochStatuses + setups storage.EpochSetups + commits storage.EpochCommits }{ - setups: setups, - commits: commits, - statuses: statuses, + setups: setups, + commits: commits, }, + params: params, + protocolKVStoreSnapshotsDB: protocolKVStoreSnapshots, + epochProtocolStateEntriesDB: epochProtocolStateSnapshots, + protocolState: protocol_state. + NewProtocolState( + epochProtocolStateSnapshots, + protocolKVStoreSnapshots, + params, + ), versionBeacons: versionBeacons, - cachedFinal: new(atomic.Pointer[cachedHeader]), - cachedSealed: new(atomic.Pointer[cachedHeader]), + cachedLatest: new(atomic.Pointer[cachedLatest]), + sporkRootBlock: sporkRootBlock, } + + // populate the protocol state cache + err := state.populateCache() + if err != nil { + return nil, fmt.Errorf("failed to populate cache: %w", err) + } + + return state, nil } // IsBootstrapped returns whether the database contains a bootstrapped state -func IsBootstrapped(db *badger.DB) (bool, error) { +func IsBootstrapped(db storage.DB) (bool, error) { var finalized uint64 - err := db.View(operation.RetrieveFinalizedHeight(&finalized)) + err := operation.RetrieveFinalizedHeight(db.Reader(), &finalized) if errors.Is(err, storage.ErrNotFound) { return false, nil } @@ -753,160 +993,80 @@ func IsBootstrapped(db *badger.DB) (bool, error) { // updateEpochMetrics update the `consensus_compliance_current_epoch_counter` and the // `consensus_compliance_current_epoch_phase` metric -func (state *State) updateEpochMetrics(snap protocol.Snapshot) error { - - // update epoch counter - counter, err := snap.Epochs().Current().Counter() - if err != nil { - return fmt.Errorf("could not get current epoch counter: %w", err) - } - state.metrics.CurrentEpochCounter(counter) - - // update epoch phase - phase, err := snap.Phase() +func updateEpochMetrics(metrics module.ComplianceMetrics, snap protocol.Snapshot) error { + currentEpoch, err := snap.Epochs().Current() if err != nil { - return fmt.Errorf("could not get current epoch counter: %w", err) + return fmt.Errorf("could not get current epoch: %w", err) } - state.metrics.CurrentEpochPhase(phase) + metrics.CurrentEpochCounter(currentEpoch.Counter()) + metrics.CurrentEpochFinalView(currentEpoch.FinalView()) + metrics.CurrentDKGPhaseViews(currentEpoch.DKGPhase1FinalView(), currentEpoch.DKGPhase2FinalView(), currentEpoch.DKGPhase3FinalView()) - // update committed epoch final view - err = state.updateCommittedEpochFinalView(snap) + epochProtocolState, err := snap.EpochProtocolState() if err != nil { - return fmt.Errorf("could not update committed epoch final view") + return fmt.Errorf("could not get epoch protocol state: %w", err) } - - currentEpochFinalView, err := snap.Epochs().Current().FinalView() - if err != nil { - return fmt.Errorf("could not update current epoch final view: %w", err) + metrics.CurrentEpochPhase(epochProtocolState.EpochPhase()) // update epoch phase + // notify whether epoch fallback mode is active + if epochProtocolState.EpochFallbackTriggered() { + metrics.EpochFallbackModeTriggered() } - state.metrics.CurrentEpochFinalView(currentEpochFinalView) - dkgPhase1FinalView, dkgPhase2FinalView, dkgPhase3FinalView, err := protocol.DKGPhaseViews(snap.Epochs().Current()) - if err != nil { - return fmt.Errorf("could not get dkg phase final view: %w", err) - } - - state.metrics.CurrentDKGPhase1FinalView(dkgPhase1FinalView) - state.metrics.CurrentDKGPhase2FinalView(dkgPhase2FinalView) - state.metrics.CurrentDKGPhase3FinalView(dkgPhase3FinalView) + return nil +} - // EECC - check whether the epoch emergency fallback flag has been set - // in the database. If so, skip updating any epoch-related metrics. - epochFallbackTriggered, err := state.isEpochEmergencyFallbackTriggered() +// boostrapVersionBeacon bootstraps version beacon, by adding the latest beacon +// to an index, if present. +func boostrapVersionBeacon(rw storage.ReaderBatchWriter, snapshot protocol.Snapshot) error { + versionBeacon, err := snapshot.VersionBeacon() if err != nil { - return fmt.Errorf("could not check epoch emergency fallback flag: %w", err) + return err } - if epochFallbackTriggered { - state.metrics.EpochEmergencyFallbackTriggered() + if versionBeacon == nil { + return nil } - - return nil + return operation.IndexVersionBeaconByHeight(rw.Writer(), versionBeacon) } // populateCache is used after opening or bootstrapping the state to populate the cache. // The cache must be populated before the State receives any queries. // No errors expected during normal operations. func (state *State) populateCache() error { - // cache the initial value for finalized block - err := state.db.View(func(tx *badger.Txn) error { - // root height - err := state.db.View(operation.RetrieveRootHeight(&state.rootHeight)) - if err != nil { - return fmt.Errorf("could not read root block to populate cache: %w", err) - } - // spork root block height - err = state.db.View(operation.RetrieveSporkRootBlockHeight(&state.sporkRootBlockHeight)) - if err != nil { - return fmt.Errorf("could not get spork root block height: %w", err) - } - // finalized header - var finalizedHeight uint64 - err = operation.RetrieveFinalizedHeight(&finalizedHeight)(tx) - if err != nil { - return fmt.Errorf("could not lookup finalized height: %w", err) - } - var cachedFinalHeader cachedHeader - err = operation.LookupBlockHeight(finalizedHeight, &cachedFinalHeader.id)(tx) - if err != nil { - return fmt.Errorf("could not lookup finalized id (height=%d): %w", finalizedHeight, err) - } - cachedFinalHeader.header, err = state.headers.ByBlockID(cachedFinalHeader.id) - if err != nil { - return fmt.Errorf("could not get finalized block (id=%x): %w", cachedFinalHeader.id, err) - } - state.cachedFinal.Store(&cachedFinalHeader) - // sealed header - var sealedHeight uint64 - err = operation.RetrieveSealedHeight(&sealedHeight)(tx) - if err != nil { - return fmt.Errorf("could not lookup sealed height: %w", err) - } - var cachedSealedHeader cachedHeader - err = operation.LookupBlockHeight(finalizedHeight, &cachedSealedHeader.id)(tx) - if err != nil { - return fmt.Errorf("could not lookup sealed id (height=%d): %w", finalizedHeight, err) - } - cachedSealedHeader.header, err = state.headers.ByBlockID(cachedSealedHeader.id) - if err != nil { - return fmt.Errorf("could not get sealed block (id=%x): %w", cachedFinalHeader.id, err) - } - state.cachedSealed.Store(&cachedSealedHeader) - return nil - }) + // finalized header + r := state.db.Reader() + var finalizedHeight uint64 + err := operation.RetrieveFinalizedHeight(r, &finalizedHeight) if err != nil { - return fmt.Errorf("could not cache finalized header: %w", err) + return fmt.Errorf("could not lookup finalized height: %w", err) } - - return nil -} - -// updateCommittedEpochFinalView updates the `committed_epoch_final_view` metric -// based on the current epoch phase of the input snapshot. It should be called -// at startup and during transitions between EpochSetup and EpochCommitted phases. -// -// For example, suppose we have epochs N and N+1. -// If we are in epoch N's Staking or Setup Phase, then epoch N's final view should be the value of the metric. -// If we are in epoch N's Committed Phase, then epoch N+1's final view should be the value of the metric. -func (state *State) updateCommittedEpochFinalView(snap protocol.Snapshot) error { - - phase, err := snap.Phase() + var cachedLatest cachedLatest + err = operation.LookupBlockHeight(r, finalizedHeight, &cachedLatest.finalizedID) if err != nil { - return fmt.Errorf("could not get epoch phase: %w", err) + return fmt.Errorf("could not lookup finalized id (height=%d): %w", finalizedHeight, err) } - - // update metric based of epoch phase - switch phase { - case flow.EpochPhaseStaking, flow.EpochPhaseSetup: - - // if we are in Staking or Setup phase, then set the metric value to the current epoch's final view - finalView, err := snap.Epochs().Current().FinalView() - if err != nil { - return fmt.Errorf("could not get current epoch final view from snapshot: %w", err) - } - state.metrics.CommittedEpochFinalView(finalView) - case flow.EpochPhaseCommitted: - - // if we are in Committed phase, then set the metric value to the next epoch's final view - finalView, err := snap.Epochs().Next().FinalView() - if err != nil { - return fmt.Errorf("could not get next epoch final view from snapshot: %w", err) - } - state.metrics.CommittedEpochFinalView(finalView) - default: - return fmt.Errorf("invalid phase: %s", phase) + cachedLatest.finalizedHeader, err = state.headers.ByBlockID(cachedLatest.finalizedID) + if err != nil { + return fmt.Errorf("could not get finalized block (id=%x): %w", cachedLatest.finalizedID, err) } + // sealed header + var sealedHeight uint64 + err = operation.RetrieveSealedHeight(r, &sealedHeight) + if err != nil { + return fmt.Errorf("could not lookup sealed height: %w", err) + } + err = operation.LookupBlockHeight(r, sealedHeight, &cachedLatest.sealedID) + if err != nil { + return fmt.Errorf("could not lookup sealed id (height=%d): %w", sealedHeight, err) + } + cachedLatest.sealedHeader, err = state.headers.ByBlockID(cachedLatest.sealedID) + if err != nil { + return fmt.Errorf("could not get sealed block (id=%x): %w", cachedLatest.sealedID, err) + } + state.cachedLatest.Store(&cachedLatest) - return nil -} + state.finalizedRootHeight = state.Params().FinalizedRoot().Height + state.sealedRootHeight = state.Params().SealedRoot().Height -// isEpochEmergencyFallbackTriggered checks whether epoch fallback has been globally triggered. -// Returns: -// * (true, nil) if epoch fallback is triggered -// * (false, nil) if epoch fallback is not triggered (including if the flag is not set) -// * (false, err) if an unexpected error occurs -func (state *State) isEpochEmergencyFallbackTriggered() (bool, error) { - var triggered bool - err := state.db.View(operation.CheckEpochEmergencyFallbackTriggered(&triggered)) - return triggered, err + return nil } diff --git a/state/protocol/badger/state_test.go b/state/protocol/badger/state_test.go index ed20266d09b..cc9c024d3e6 100644 --- a/state/protocol/badger/state_test.go +++ b/state/protocol/badger/state_test.go @@ -5,9 +5,7 @@ import ( "fmt" "os" "testing" - "time" - "github.com/dgraph-io/badger/v2" "github.com/stretchr/testify/assert" testmock "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" @@ -20,8 +18,9 @@ import ( "github.com/onflow/flow-go/state/protocol/inmem" "github.com/onflow/flow-go/state/protocol/util" protoutil "github.com/onflow/flow-go/state/protocol/util" - storagebadger "github.com/onflow/flow-go/storage/badger" - storutil "github.com/onflow/flow-go/storage/util" + "github.com/onflow/flow-go/storage" + "github.com/onflow/flow-go/storage/operation/pebbleimpl" + "github.com/onflow/flow-go/storage/store" "github.com/onflow/flow-go/utils/unittest" ) @@ -32,55 +31,57 @@ func TestBootstrapAndOpen(t *testing.T) { // create a state root and bootstrap the protocol state with it participants := unittest.CompleteIdentitySet() rootSnapshot := unittest.RootSnapshotFixture(participants, func(block *flow.Block) { - block.Header.ParentID = unittest.IdentifierFixture() + block.ParentID = unittest.IdentifierFixture() }) - protoutil.RunWithBootstrapState(t, rootSnapshot, func(db *badger.DB, _ *bprotocol.State) { - + protoutil.RunWithBootstrapState(t, rootSnapshot, func(db storage.DB, _ *bprotocol.State) { + lockManager := storage.NewTestingLockManager() // expect the final view metric to be set to current epoch's final view - epoch := rootSnapshot.Epochs().Current() - finalView, err := epoch.FinalView() - require.NoError(t, err) - counter, err := epoch.Counter() + epoch, err := rootSnapshot.Epochs().Current() require.NoError(t, err) - phase, err := rootSnapshot.Phase() + counter := epoch.Counter() + phase, err := rootSnapshot.EpochPhase() require.NoError(t, err) complianceMetrics := new(mock.ComplianceMetrics) - complianceMetrics.On("CommittedEpochFinalView", finalView).Once() complianceMetrics.On("CurrentEpochCounter", counter).Once() complianceMetrics.On("CurrentEpochPhase", phase).Once() - complianceMetrics.On("CurrentEpochFinalView", finalView).Once() + complianceMetrics.On("CurrentEpochFinalView", epoch.FinalView()).Once() + complianceMetrics.On("BlockFinalized", testmock.Anything).Once() complianceMetrics.On("FinalizedHeight", testmock.Anything).Once() + complianceMetrics.On("BlockSealed", testmock.Anything).Once() complianceMetrics.On("SealedHeight", testmock.Anything).Once() - dkgPhase1FinalView, dkgPhase2FinalView, dkgPhase3FinalView, err := protocol.DKGPhaseViews(epoch) - require.NoError(t, err) - complianceMetrics.On("CurrentDKGPhase1FinalView", dkgPhase1FinalView).Once() - complianceMetrics.On("CurrentDKGPhase2FinalView", dkgPhase2FinalView).Once() - complianceMetrics.On("CurrentDKGPhase3FinalView", dkgPhase3FinalView).Once() + complianceMetrics.On("CurrentDKGPhaseViews", + epoch.DKGPhase1FinalView(), epoch.DKGPhase2FinalView(), epoch.DKGPhase3FinalView()).Once() noopMetrics := new(metrics.NoopCollector) - all := storagebadger.InitAll(noopMetrics, db) + all := store.InitAll(noopMetrics, db) // protocol state has been bootstrapped, now open a protocol state with the database state, err := bprotocol.OpenState( complianceMetrics, db, + lockManager, all.Headers, all.Seals, all.Results, all.Blocks, all.QuorumCertificates, - all.Setups, + all.EpochSetups, all.EpochCommits, - all.Statuses, + all.EpochProtocolStateEntries, + all.ProtocolKVStore, all.VersionBeacons, ) require.NoError(t, err) - complianceMetrics.AssertExpectations(t) - unittest.AssertSnapshotsEqual(t, rootSnapshot, state.Final()) + finalSnap := state.Final() + unittest.AssertSnapshotsEqual(t, rootSnapshot, finalSnap) + + vb, err := finalSnap.VersionBeacon() + require.NoError(t, err) + require.Nil(t, vb) }) } @@ -88,22 +89,21 @@ func TestBootstrapAndOpen(t *testing.T) { // root snapshot from EpochCommitted phase we should be able to open it and // got the same state. func TestBootstrapAndOpen_EpochCommitted(t *testing.T) { - // create a state root and bootstrap the protocol state with it participants := unittest.CompleteIdentitySet() rootSnapshot := unittest.RootSnapshotFixture(participants, func(block *flow.Block) { - block.Header.ParentID = unittest.IdentifierFixture() + block.ParentID = unittest.IdentifierFixture() }) rootBlock, err := rootSnapshot.Head() require.NoError(t, err) // build an epoch on the root state and return a snapshot from the committed phase - committedPhaseSnapshot := snapshotAfter(t, rootSnapshot, func(state *bprotocol.FollowerState) protocol.Snapshot { - unittest.NewEpochBuilder(t, state).BuildEpoch().CompleteEpoch() + committedPhaseSnapshot := snapshotAfter(t, rootSnapshot, func(state *bprotocol.FollowerState, mutableState protocol.MutableProtocolState) protocol.Snapshot { + unittest.NewEpochBuilder(t, mutableState, state).BuildEpoch().CompleteEpoch() // find the point where we transition to the epoch committed phase for height := rootBlock.Height + 1; ; height++ { - phase, err := state.AtHeight(height).Phase() + phase, err := state.AtHeight(height).EpochPhase() require.NoError(t, err) if phase == flow.EpochPhaseCommitted { return state.AtHeight(height) @@ -111,50 +111,45 @@ func TestBootstrapAndOpen_EpochCommitted(t *testing.T) { } }) - protoutil.RunWithBootstrapState(t, committedPhaseSnapshot, func(db *badger.DB, _ *bprotocol.State) { + protoutil.RunWithBootstrapState(t, committedPhaseSnapshot, func(db storage.DB, _ *bprotocol.State) { + lockManager := storage.NewTestingLockManager() complianceMetrics := new(mock.ComplianceMetrics) - // expect the final view metric to be set to next epoch's final view - finalView, err := committedPhaseSnapshot.Epochs().Next().FinalView() + currentEpoch, err := committedPhaseSnapshot.Epochs().Current() require.NoError(t, err) - complianceMetrics.On("CommittedEpochFinalView", finalView).Once() - // expect counter to be set to current epochs counter - counter, err := committedPhaseSnapshot.Epochs().Current().Counter() - require.NoError(t, err) + counter := currentEpoch.Counter() complianceMetrics.On("CurrentEpochCounter", counter).Once() // expect epoch phase to be set to current phase - phase, err := committedPhaseSnapshot.Phase() + phase, err := committedPhaseSnapshot.EpochPhase() require.NoError(t, err) complianceMetrics.On("CurrentEpochPhase", phase).Once() + complianceMetrics.On("CurrentEpochFinalView", currentEpoch.FinalView()).Once() + complianceMetrics.On("CurrentDKGPhaseViews", currentEpoch.DKGPhase1FinalView(), currentEpoch.DKGPhase2FinalView(), currentEpoch.DKGPhase3FinalView()).Once() - currentEpochFinalView, err := committedPhaseSnapshot.Epochs().Current().FinalView() - require.NoError(t, err) - complianceMetrics.On("CurrentEpochFinalView", currentEpochFinalView).Once() - - dkgPhase1FinalView, dkgPhase2FinalView, dkgPhase3FinalView, err := protocol.DKGPhaseViews(committedPhaseSnapshot.Epochs().Current()) - require.NoError(t, err) - complianceMetrics.On("CurrentDKGPhase1FinalView", dkgPhase1FinalView).Once() - complianceMetrics.On("CurrentDKGPhase2FinalView", dkgPhase2FinalView).Once() - complianceMetrics.On("CurrentDKGPhase3FinalView", dkgPhase3FinalView).Once() + // expect finalized and sealed to be set to the latest block complianceMetrics.On("FinalizedHeight", testmock.Anything).Once() + complianceMetrics.On("BlockFinalized", testmock.Anything).Once() complianceMetrics.On("SealedHeight", testmock.Anything).Once() + complianceMetrics.On("BlockSealed", testmock.Anything).Once() noopMetrics := new(metrics.NoopCollector) - all := storagebadger.InitAll(noopMetrics, db) + all := store.InitAll(noopMetrics, db) state, err := bprotocol.OpenState( complianceMetrics, db, + lockManager, all.Headers, all.Seals, all.Results, all.Blocks, all.QuorumCertificates, - all.Setups, + all.EpochSetups, all.EpochCommits, - all.Statuses, + all.EpochProtocolStateEntries, + all.ProtocolKVStore, all.VersionBeacons, ) require.NoError(t, err) @@ -168,81 +163,167 @@ func TestBootstrapAndOpen_EpochCommitted(t *testing.T) { // TestBootstrap_EpochHeightBoundaries tests that epoch height indexes are indexed // when they are available in the input snapshot. +// +// DIAGRAM LEGEND: +// +// < = low endpoint of a sealing segment +// > = high endpoint of a sealing segment +// x = root sealing segment +// | = epoch boundary func TestBootstrap_EpochHeightBoundaries(t *testing.T) { t.Parallel() // start with a regular post-spork root snapshot rootSnapshot := unittest.RootSnapshotFixture(unittest.CompleteIdentitySet()) - epoch1FirstHeight := rootSnapshot.Encodable().Head.Height + epoch1FirstHeight := rootSnapshot.Encodable().Head().Height - t.Run("root snapshot", func(t *testing.T) { - util.RunWithFollowerProtocolState(t, rootSnapshot, func(db *badger.DB, state *bprotocol.FollowerState) { + // For the spork root snapshot, only the first height of the root epoch should be indexed. + // [x] + t.Run("spork root snapshot", func(t *testing.T) { + util.RunWithFollowerProtocolState(t, rootSnapshot, func(db storage.DB, state *bprotocol.FollowerState) { + currentEpoch, err := state.Final().Epochs().Current() + require.NoError(t, err) // first height of started current epoch should be known - firstHeight, err := state.Final().Epochs().Current().FirstHeight() + firstHeight, err := currentEpoch.FirstHeight() require.NoError(t, err) assert.Equal(t, epoch1FirstHeight, firstHeight) // final height of not completed current epoch should be unknown - _, err = state.Final().Epochs().Current().FinalHeight() - assert.ErrorIs(t, err, protocol.ErrEpochTransitionNotFinalized) + _, err = currentEpoch.FinalHeight() + assert.ErrorIs(t, err, protocol.ErrUnknownEpochBoundary) }) }) - t.Run("with next epoch", func(t *testing.T) { - after := snapshotAfter(t, rootSnapshot, func(state *bprotocol.FollowerState) protocol.Snapshot { - builder := unittest.NewEpochBuilder(t, state) - builder.BuildEpoch().CompleteEpoch() - heights, ok := builder.EpochHeights(1) + // In this test we construct a snapshot where the sealing segment is entirely + // within a particular epoch (does not cross any boundary). In this case, + // no boundaries should be queriable in the API. + // [---<--->--] + t.Run("snapshot excludes start boundary", func(t *testing.T) { + var epochHeights *unittest.EpochHeights + after := snapshotAfter(t, rootSnapshot, func(state *bprotocol.FollowerState, mutableState protocol.MutableProtocolState) protocol.Snapshot { + builder := unittest.NewEpochBuilder(t, mutableState, state) + builder.BuildEpoch(). + AddBlocksWithSeals(flow.DefaultTransactionExpiry, 1). // ensure sealing segment excludes start boundary + CompleteEpoch() // building epoch 1 (prepare epoch 2) + var ok bool + epochHeights, ok = builder.EpochHeights(1) require.True(t, ok) - return state.AtHeight(heights.Committed) + // return a snapshot with reference block in the Committed phase of Epoch 1 + return state.AtHeight(epochHeights.CommittedFinal) }) bootstrap(t, after, func(state *bprotocol.State, err error) { require.NoError(t, err) - // first height of started current epoch should be known - firstHeight, err := state.Final().Epochs().Current().FirstHeight() - assert.Equal(t, epoch1FirstHeight, firstHeight) + finalSnap := state.Final() + currentEpoch, err := finalSnap.Epochs().Current() + require.NoError(t, err) + nextEpoch, err := finalSnap.Epochs().NextCommitted() require.NoError(t, err) + // first height of started current epoch should be unknown + _, err = currentEpoch.FirstHeight() + assert.ErrorIs(t, err, protocol.ErrUnknownEpochBoundary) // final height of not completed current epoch should be unknown - _, err = state.Final().Epochs().Current().FinalHeight() - assert.ErrorIs(t, err, protocol.ErrEpochTransitionNotFinalized) + _, err = currentEpoch.FinalHeight() + assert.ErrorIs(t, err, protocol.ErrUnknownEpochBoundary) // first and final height of not started next epoch should be unknown - _, err = state.Final().Epochs().Next().FirstHeight() - assert.ErrorIs(t, err, protocol.ErrEpochTransitionNotFinalized) - _, err = state.Final().Epochs().Next().FinalHeight() - assert.ErrorIs(t, err, protocol.ErrEpochTransitionNotFinalized) + _, err = nextEpoch.FirstHeight() + assert.ErrorIs(t, err, protocol.ErrUnknownEpochBoundary) + _, err = nextEpoch.FinalHeight() + assert.ErrorIs(t, err, protocol.ErrUnknownEpochBoundary) + // nonexistent previous epoch should be unknown + _, err = finalSnap.Epochs().Previous() + assert.ErrorIs(t, err, protocol.ErrNoPreviousEpoch) }) }) - t.Run("with previous epoch", func(t *testing.T) { - var epoch1FinalHeight uint64 - var epoch2FirstHeight uint64 - after := snapshotAfter(t, rootSnapshot, func(state *bprotocol.FollowerState) protocol.Snapshot { - builder := unittest.NewEpochBuilder(t, state) + + // In this test we construct a root snapshot such that the Previous epoch w.r.t + // the snapshot reference block has only the end boundary included in the + // sealing segment. Therefore, only FinalBlock should be queriable in the API. + // [---<---|--->---] + t.Run("root snapshot includes previous epoch end boundary only", func(t *testing.T) { + var epoch2Heights *unittest.EpochHeights + after := snapshotAfter(t, rootSnapshot, func(state *bprotocol.FollowerState, mutableState protocol.MutableProtocolState) protocol.Snapshot { + builder := unittest.NewEpochBuilder(t, mutableState, state) builder. - BuildEpoch().CompleteEpoch(). // build epoch 2 - BuildEpoch() // build epoch 3 - heights, ok := builder.EpochHeights(2) - epoch2FirstHeight = heights.FirstHeight() - epoch1FinalHeight = epoch2FirstHeight - 1 + BuildEpoch(). + AddBlocksWithSeals(flow.DefaultTransactionExpiry, 1). // ensure sealing segment excludes start boundary + CompleteEpoch(). // building epoch 1 (prepare epoch 2) + BuildEpoch() // building epoch 2 (prepare epoch 3) + var ok bool + epoch2Heights, ok = builder.EpochHeights(2) + require.True(t, ok) + + // return snapshot from Committed phase of epoch 2 + return state.AtHeight(epoch2Heights.Committed) + }) + + bootstrap(t, after, func(state *bprotocol.State, err error) { + require.NoError(t, err) + finalSnap := state.Final() + currentEpoch, err := finalSnap.Epochs().Current() + require.NoError(t, err) + // first height of started current epoch should be known + firstHeight, err := currentEpoch.FirstHeight() + assert.Equal(t, epoch2Heights.FirstHeight(), firstHeight) + require.NoError(t, err) + // final height of not completed current epoch should be unknown + _, err = currentEpoch.FinalHeight() + assert.ErrorIs(t, err, protocol.ErrUnknownEpochBoundary) + previousEpoch, err := finalSnap.Epochs().Previous() + require.NoError(t, err) + // first height of previous epoch should be unknown + _, err = previousEpoch.FirstHeight() + assert.ErrorIs(t, err, protocol.ErrUnknownEpochBoundary) + // final height of previous epoch should be known + finalHeight, err := previousEpoch.FinalHeight() + require.NoError(t, err) + assert.Equal(t, finalHeight, epoch2Heights.FirstHeight()-1) + }) + }) + + // In this test we construct a root snapshot such that the Previous epoch w.r.t + // the snapshot reference block has both start and end boundaries included in the + // sealing segment. Therefore, both boundaries should be queryable in the API. + // [---<---|---|--->---] + t.Run("root snapshot includes previous epoch start and end boundary", func(t *testing.T) { + var epoch3Heights *unittest.EpochHeights + var epoch2Heights *unittest.EpochHeights + after := snapshotAfter(t, rootSnapshot, func(state *bprotocol.FollowerState, mutableState protocol.MutableProtocolState) protocol.Snapshot { + builder := unittest.NewEpochBuilder(t, mutableState, state) + builder. + BuildEpoch().CompleteEpoch(). // building epoch 1 (prepare epoch 2) + BuildEpoch().CompleteEpoch(). // building epoch 2 (prepare epoch 3) + BuildEpoch() // building epoch 3 (prepare epoch 4) + var ok bool + epoch3Heights, ok = builder.EpochHeights(3) require.True(t, ok) - // return snapshot from within epoch 2 (middle epoch) - return state.AtHeight(heights.Setup) + epoch2Heights, ok = builder.EpochHeights(2) + require.True(t, ok) + + // return snapshot from Committed phase of epoch 3 + return state.AtHeight(epoch3Heights.Committed) }) bootstrap(t, after, func(state *bprotocol.State, err error) { + require.NoError(t, err) + finalSnap := state.Final() + currentEpoch, err := finalSnap.Epochs().Current() require.NoError(t, err) // first height of started current epoch should be known - firstHeight, err := state.Final().Epochs().Current().FirstHeight() - assert.Equal(t, epoch2FirstHeight, firstHeight) + firstHeight, err := currentEpoch.FirstHeight() + assert.Equal(t, epoch3Heights.FirstHeight(), firstHeight) require.NoError(t, err) // final height of not completed current epoch should be unknown - _, err = state.Final().Epochs().Current().FinalHeight() - assert.ErrorIs(t, err, protocol.ErrEpochTransitionNotFinalized) - // first and final height of completed previous epoch should be known - firstHeight, err = state.Final().Epochs().Previous().FirstHeight() + _, err = currentEpoch.FinalHeight() + assert.ErrorIs(t, err, protocol.ErrUnknownEpochBoundary) + previousEpoch, err := finalSnap.Epochs().Previous() + require.NoError(t, err) + // first height of previous epoch should be known + firstHeight, err = previousEpoch.FirstHeight() require.NoError(t, err) - assert.Equal(t, firstHeight, epoch1FirstHeight) - finalHeight, err := state.Final().Epochs().Previous().FinalHeight() + assert.Equal(t, epoch2Heights.FirstHeight(), firstHeight) + // final height of completed previous epoch should be known + finalHeight, err := previousEpoch.FinalHeight() require.NoError(t, err) - assert.Equal(t, finalHeight, epoch1FinalHeight) + assert.Equal(t, finalHeight, epoch2Heights.FinalHeight()) }) }) } @@ -257,26 +338,41 @@ func TestBootstrapNonRoot(t *testing.T) { // start with a regular post-spork root snapshot participants := unittest.CompleteIdentitySet() rootSnapshot := unittest.RootSnapshotFixture(participants) + rootProtocolStateID := getRootProtocolStateID(t, rootSnapshot) rootBlock, err := rootSnapshot.Head() require.NoError(t, err) // should be able to bootstrap from snapshot after sealing a non-root block // ROOT <- B1 <- B2(R1) <- B3(S1) <- CHILD t.Run("with sealed block", func(t *testing.T) { - after := snapshotAfter(t, rootSnapshot, func(state *bprotocol.FollowerState) protocol.Snapshot { - block1 := unittest.BlockWithParentFixture(rootBlock) + after := snapshotAfter(t, rootSnapshot, func(state *bprotocol.FollowerState, mutableState protocol.MutableProtocolState) protocol.Snapshot { + block1 := unittest.BlockWithParentAndPayload( + rootBlock, + unittest.PayloadFixture(unittest.WithProtocolStateID(rootProtocolStateID)), + ) buildFinalizedBlock(t, state, block1) receipt1, seal1 := unittest.ReceiptAndSealForBlock(block1) - block2 := unittest.BlockWithParentFixture(block1.Header) - block2.SetPayload(unittest.PayloadFixture(unittest.WithReceipts(receipt1))) + block2 := unittest.BlockWithParentAndPayload( + block1.ToHeader(), + unittest.PayloadFixture( + unittest.WithReceipts(receipt1), + unittest.WithProtocolStateID(rootProtocolStateID))) buildFinalizedBlock(t, state, block2) - block3 := unittest.BlockWithParentFixture(block2.Header) - block3.SetPayload(unittest.PayloadFixture(unittest.WithSeals(seal1))) + seals := []*flow.Seal{seal1} + block3View := block2.View + 1 + block3 := unittest.BlockFixture( + unittest.Block.WithParent(block2.ID(), block2.View, block2.Height), + unittest.Block.WithPayload( + flow.Payload{ + Seals: seals, + ProtocolStateID: calculateExpectedStateId(t, mutableState)(block2.ID(), block3View, seals), + }), + ) buildFinalizedBlock(t, state, block3) - child := unittest.BlockWithParentFixture(block3.Header) + child := unittest.BlockWithParentProtocolState(block3) buildBlock(t, state, child) return state.AtBlockID(block3.ID()) @@ -284,27 +380,179 @@ func TestBootstrapNonRoot(t *testing.T) { bootstrap(t, after, func(state *bprotocol.State, err error) { require.NoError(t, err) - unittest.AssertSnapshotsEqual(t, after, state.Final()) - // should be able to read all QCs - segment, err := state.Final().SealingSegment() + finalSnap := state.Final() + unittest.AssertSnapshotsEqual(t, after, finalSnap) + segment, err := finalSnap.SealingSegment() + require.NoError(t, err) + for _, proposal := range segment.Blocks { + snapshot := state.AtBlockID(proposal.Block.ID()) + // should be able to read all QCs + _, err := snapshot.QuorumCertificate() + require.NoError(t, err) + _, err = snapshot.RandomSource() + require.NoError(t, err) + } + }) + }) + + // should be able to bootstrap from snapshot when the sealing segment contains + // a block which references a result included outside the sealing segment. + // In this case, B2 contains the result for B1, but is omitted from the segment. + // B3 contains only the receipt for B1 and is included in the segment. + // + // Extra Blocks Sealing Segment + // [-----------------------][--------------------------------------] + // ROOT <- B1 <- B2(Receipt1a,Result1) <- B3(Receipt1b) <- ... <- G1 <- G2(R[G1]) <- G3(Seal[G1]) + t.Run("with detached execution result reference in sealing segment", func(t *testing.T) { + after := snapshotAfter(t, rootSnapshot, func(state *bprotocol.FollowerState, mutableState protocol.MutableProtocolState) protocol.Snapshot { + block1 := unittest.BlockWithParentAndPayload(rootBlock, + unittest.PayloadFixture(unittest.WithProtocolStateID(rootProtocolStateID))) + buildFinalizedBlock(t, state, block1) + + receipt1a, seal1 := unittest.ReceiptAndSealForBlock(block1) + receipt1b := unittest.ExecutionReceiptFixture(unittest.WithResult(&receipt1a.ExecutionResult)) + + block2 := unittest.BlockWithParentAndPayload(block1.ToHeader(), unittest.PayloadFixture( + unittest.WithReceipts(receipt1a), + unittest.WithProtocolStateID(rootProtocolStateID))) + buildFinalizedBlock(t, state, block2) + + block3 := unittest.BlockWithParentAndPayload(block2.ToHeader(), unittest.PayloadFixture( + unittest.WithReceiptsAndNoResults(receipt1b), + unittest.WithProtocolStateID(rootProtocolStateID))) + buildFinalizedBlock(t, state, block3) + + receipt2, seal2 := unittest.ReceiptAndSealForBlock(block2) + receipt3, seal3 := unittest.ReceiptAndSealForBlock(block3) + + receipts := []*flow.ExecutionReceipt{receipt2, receipt3} + seals := []*flow.Seal{seal1, seal2, seal3} + + parent := block3 + for i := 0; i < flow.DefaultTransactionExpiry-1; i++ { + next := unittest.BlockWithParentAndPayload(parent.ToHeader(), unittest.PayloadFixture( + unittest.WithReceipts(receipts[0]), + unittest.WithProtocolStateID(calculateExpectedStateId(t, mutableState)(parent.ID(), parent.View+1, []*flow.Seal{seals[0]})), + unittest.WithSeals(seals[0]))) + seals, receipts = seals[1:], receipts[1:] + + nextReceipt, nextSeal := unittest.ReceiptAndSealForBlock(next) + receipts = append(receipts, nextReceipt) + seals = append(seals, nextSeal) + buildFinalizedBlock(t, state, next) + parent = next + } + + // G1 adds all receipts from all blocks before G1 + blockG1 := unittest.BlockWithParentAndPayload(parent.ToHeader(), unittest.PayloadFixture( + unittest.WithReceipts(receipts...), + unittest.WithProtocolStateID(parent.Payload.ProtocolStateID))) + buildFinalizedBlock(t, state, blockG1) + + receiptS1, sealS1 := unittest.ReceiptAndSealForBlock(blockG1) + + // G2 adds all seals from all blocks before G1 + blockG2 := unittest.BlockWithParentAndPayload(blockG1.ToHeader(), unittest.PayloadFixture( + unittest.WithSeals(seals...), + unittest.WithProtocolStateID(calculateExpectedStateId(t, mutableState)(blockG1.ID(), blockG1.View+1, seals)), + unittest.WithReceipts(receiptS1))) + buildFinalizedBlock(t, state, blockG2) + + // G3 seals G1, creating a sealing segment + blockG3 := unittest.BlockWithParentAndPayload(blockG2.ToHeader(), unittest.PayloadFixture( + unittest.WithSeals(sealS1), + unittest.WithProtocolStateID(calculateExpectedStateId(t, mutableState)(blockG2.ID(), blockG2.View+1, []*flow.Seal{sealS1})))) + buildFinalizedBlock(t, state, blockG3) + + child := unittest.BlockWithParentAndPayload(blockG3.ToHeader(), unittest.PayloadFixture(unittest.WithProtocolStateID(blockG3.Payload.ProtocolStateID))) + buildFinalizedBlock(t, state, child) + + return state.AtBlockID(blockG3.ID()) + }) + + segment, err := after.SealingSegment() + require.NoError(t, err) + // To accurately test the desired edge case we require that the lowest block in ExtraBlocks is B3 + assert.Equal(t, uint64(3), segment.ExtraBlocks[0].Block.Height) + + bootstrap(t, after, func(state *bprotocol.State, err error) { + require.NoError(t, err) + }) + }) + + // should be able to bootstrap from snapshot after entering EFM because of sealing invalid service event + // ROOT <- B1 <- B2(R1) <- B3(S1) <- CHILD + t.Run("in EFM", func(t *testing.T) { + after := snapshotAfter(t, rootSnapshot, func(state *bprotocol.FollowerState, mutableState protocol.MutableProtocolState) protocol.Snapshot { + block1 := unittest.BlockWithParentAndPayload( + rootBlock, + unittest.PayloadFixture(unittest.WithProtocolStateID(rootProtocolStateID)), + ) + buildFinalizedBlock(t, state, block1) + + invalidEpochSetup := unittest.EpochSetupFixture() + receipt1, seal1 := unittest.ReceiptAndSealForBlock(block1, invalidEpochSetup.ServiceEvent()) + block2 := unittest.BlockWithParentAndPayload( + block1.ToHeader(), + unittest.PayloadFixture( + unittest.WithReceipts(receipt1), + unittest.WithProtocolStateID(rootProtocolStateID)), + ) + buildFinalizedBlock(t, state, block2) + + seals := []*flow.Seal{seal1} + block3View := block2.View + 1 + block3 := unittest.BlockFixture( + unittest.Block.WithParent(block2.ID(), block2.View, block2.Height), + unittest.Block.WithPayload( + flow.Payload{ + Seals: seals, + ProtocolStateID: calculateExpectedStateId(t, mutableState)(block2.ID(), block3View, seals), + }), + ) + buildFinalizedBlock(t, state, block3) + + child := unittest.BlockWithParentProtocolState(block3) + buildBlock(t, state, child) + + // ensure we have entered EFM + snapshot := state.AtBlockID(block3.ID()) + epochState, err := snapshot.EpochProtocolState() require.NoError(t, err) - for _, block := range segment.Blocks { - snapshot := state.AtBlockID(block.ID()) + require.Equal(t, flow.EpochPhaseFallback, epochState.EpochPhase()) + + return snapshot + }) + + bootstrap(t, after, func(state *bprotocol.State, err error) { + require.NoError(t, err) + finalSnap := state.Final() + unittest.AssertSnapshotsEqual(t, after, finalSnap) + segment, err := finalSnap.SealingSegment() + require.NoError(t, err) + for _, proposal := range segment.Blocks { + snapshot := state.AtBlockID(proposal.Block.ID()) + // should be able to read all QCs _, err := snapshot.QuorumCertificate() require.NoError(t, err) _, err = snapshot.RandomSource() require.NoError(t, err) } + + epochState, err := finalSnap.EpochProtocolState() + require.NoError(t, err) + require.True(t, epochState.EpochFallbackTriggered()) + require.Equal(t, flow.EpochPhaseFallback, epochState.EpochPhase()) }) }) t.Run("with setup next epoch", func(t *testing.T) { - after := snapshotAfter(t, rootSnapshot, func(state *bprotocol.FollowerState) protocol.Snapshot { - unittest.NewEpochBuilder(t, state).BuildEpoch() + after := snapshotAfter(t, rootSnapshot, func(state *bprotocol.FollowerState, mutableState protocol.MutableProtocolState) protocol.Snapshot { + unittest.NewEpochBuilder(t, mutableState, state).BuildEpoch() // find the point where we transition to the epoch setup phase for height := rootBlock.Height + 1; ; height++ { - phase, err := state.AtHeight(height).Phase() + phase, err := state.AtHeight(height).EpochPhase() require.NoError(t, err) if phase == flow.EpochPhaseSetup { return state.AtHeight(height) @@ -314,17 +562,29 @@ func TestBootstrapNonRoot(t *testing.T) { bootstrap(t, after, func(state *bprotocol.State, err error) { require.NoError(t, err) - unittest.AssertSnapshotsEqual(t, after, state.Final()) + finalSnap := state.Final() + unittest.AssertSnapshotsEqual(t, after, finalSnap) + + segment, err := finalSnap.SealingSegment() + require.NoError(t, err) + assert.GreaterOrEqual(t, len(segment.ProtocolStateEntries), 2, "should have >2 distinct protocol state entries") + for _, proposal := range segment.Blocks { + snapshot := state.AtBlockID(proposal.Block.ID()) + // should be able to read all protocol state entries + protocolStateEntry, err := snapshot.ProtocolState() + require.NoError(t, err) + assert.Equal(t, proposal.Block.Payload.ProtocolStateID, protocolStateEntry.ID()) + } }) }) t.Run("with committed next epoch", func(t *testing.T) { - after := snapshotAfter(t, rootSnapshot, func(state *bprotocol.FollowerState) protocol.Snapshot { - unittest.NewEpochBuilder(t, state).BuildEpoch().CompleteEpoch() + after := snapshotAfter(t, rootSnapshot, func(state *bprotocol.FollowerState, mutableState protocol.MutableProtocolState) protocol.Snapshot { + unittest.NewEpochBuilder(t, mutableState, state).BuildEpoch().CompleteEpoch() // find the point where we transition to the epoch committed phase for height := rootBlock.Height + 1; ; height++ { - phase, err := state.AtHeight(height).Phase() + phase, err := state.AtHeight(height).EpochPhase() require.NoError(t, err) if phase == flow.EpochPhaseCommitted { return state.AtHeight(height) @@ -334,26 +594,39 @@ func TestBootstrapNonRoot(t *testing.T) { bootstrap(t, after, func(state *bprotocol.State, err error) { require.NoError(t, err) - unittest.AssertSnapshotsEqual(t, after, state.Final()) + finalSnap := state.Final() + unittest.AssertSnapshotsEqual(t, after, finalSnap) + + segment, err := finalSnap.SealingSegment() + require.NoError(t, err) + assert.GreaterOrEqual(t, len(segment.ProtocolStateEntries), 2, "should have >2 distinct protocol state entries") + for _, proposal := range segment.Blocks { + snapshot := state.AtBlockID(proposal.Block.ID()) + // should be able to read all protocol state entries + protocolStateEntry, err := snapshot.ProtocolState() + require.NoError(t, err) + assert.Equal(t, proposal.Block.Payload.ProtocolStateID, protocolStateEntry.ID()) + } }) }) t.Run("with previous and next epoch", func(t *testing.T) { - after := snapshotAfter(t, rootSnapshot, func(state *bprotocol.FollowerState) protocol.Snapshot { - unittest.NewEpochBuilder(t, state). + after := snapshotAfter(t, rootSnapshot, func(state *bprotocol.FollowerState, mutableState protocol.MutableProtocolState) protocol.Snapshot { + unittest.NewEpochBuilder(t, mutableState, state). BuildEpoch().CompleteEpoch(). // build epoch 2 BuildEpoch() // build epoch 3 // find a snapshot from epoch setup phase in epoch 2 - epoch1Counter, err := rootSnapshot.Epochs().Current().Counter() + epoch1, err := rootSnapshot.Epochs().Current() require.NoError(t, err) + epoch1Counter := epoch1.Counter() for height := rootBlock.Height + 1; ; height++ { snap := state.AtHeight(height) - counter, err := snap.Epochs().Current().Counter() + epoch, err := snap.Epochs().Current() require.NoError(t, err) - phase, err := snap.Phase() + phase, err := snap.EpochPhase() require.NoError(t, err) - if phase == flow.EpochPhaseSetup && counter == epoch1Counter+1 { + if phase == flow.EpochPhaseSetup && epoch.Counter() == epoch1Counter+1 { return snap } } @@ -361,7 +634,19 @@ func TestBootstrapNonRoot(t *testing.T) { bootstrap(t, after, func(state *bprotocol.State, err error) { require.NoError(t, err) - unittest.AssertSnapshotsEqual(t, after, state.Final()) + finalSnap := state.Final() + unittest.AssertSnapshotsEqual(t, after, finalSnap) + + segment, err := finalSnap.SealingSegment() + require.NoError(t, err) + assert.GreaterOrEqual(t, len(segment.ProtocolStateEntries), 2, "should have >2 distinct protocol state entries") + for _, proposal := range segment.Blocks { + snapshot := state.AtBlockID(proposal.Block.ID()) + // should be able to read all protocol state entries + protocolStateEntry, err := snapshot.ProtocolState() + require.NoError(t, err) + assert.Equal(t, proposal.Block.Payload.ProtocolStateID, protocolStateEntry.ID()) + } }) }) } @@ -369,7 +654,9 @@ func TestBootstrapNonRoot(t *testing.T) { func TestBootstrap_InvalidIdentities(t *testing.T) { t.Run("duplicate node ID", func(t *testing.T) { participants := unittest.CompleteIdentitySet() - dupeIDIdentity := unittest.IdentityFixture(unittest.WithNodeID(participants[0].NodeID)) + // Make sure the duplicate node ID is not a consensus node, otherwise this will form an invalid DKGIDMapping + // See [flow.EpochCommit] for details. + dupeIDIdentity := unittest.IdentityFixture(unittest.WithNodeID(participants[0].NodeID), unittest.WithRole(flow.RoleVerification)) participants = append(participants, dupeIDIdentity) root := unittest.RootSnapshotFixture(participants) @@ -379,7 +666,7 @@ func TestBootstrap_InvalidIdentities(t *testing.T) { }) t.Run("zero weight", func(t *testing.T) { - zeroWeightIdentity := unittest.IdentityFixture(unittest.WithRole(flow.RoleVerification), unittest.WithWeight(0)) + zeroWeightIdentity := unittest.IdentityFixture(unittest.WithRole(flow.RoleVerification), unittest.WithInitialWeight(0)) participants := unittest.CompleteIdentitySet(zeroWeightIdentity) root := unittest.RootSnapshotFixture(participants) bootstrap(t, root, func(state *bprotocol.State, err error) { @@ -418,11 +705,20 @@ func TestBootstrap_InvalidIdentities(t *testing.T) { t.Run("non-canonical ordering", func(t *testing.T) { participants := unittest.IdentityListFixture(20, unittest.WithAllRoles()) + // randomly shuffle the identities so they are not canonically ordered + unorderedParticipants, err := participants.ToSkeleton().Shuffle() + require.NoError(t, err) root := unittest.RootSnapshotFixture(participants) - // randomly shuffle the identities so they are not canonically ordered encodable := root.Encodable() - encodable.Identities = participants.DeterministicShuffle(time.Now().UnixNano()) + + // modify EpochSetup participants, making them unordered + latestProtocolStateEntry := encodable.SealingSegment.LatestProtocolStateEntry() + currentEpochSetup := latestProtocolStateEntry.EpochEntry.CurrentEpochSetup + currentEpochSetup.Participants = unorderedParticipants + currentEpochSetup.Participants = unorderedParticipants + latestProtocolStateEntry.EpochEntry.CurrentEpoch.SetupID = currentEpochSetup.ID() + root = inmem.SnapshotFromEncodable(encodable) bootstrap(t, root, func(state *bprotocol.State, err error) { assert.Error(t, err) @@ -435,8 +731,8 @@ func TestBootstrap_DisconnectedSealingSegment(t *testing.T) { // convert to encodable to easily modify snapshot encodable := rootSnapshot.Encodable() // add an un-connected tail block to the sealing segment - tail := unittest.BlockFixture() - encodable.SealingSegment.Blocks = append([]*flow.Block{&tail}, encodable.SealingSegment.Blocks...) + tail := unittest.ProposalFixture() + encodable.SealingSegment.Blocks = append([]*flow.Proposal{tail}, encodable.SealingSegment.Blocks...) rootSnapshot = inmem.SnapshotFromEncodable(encodable) bootstrap(t, rootSnapshot, func(state *bprotocol.State, err error) { @@ -487,7 +783,9 @@ func TestBootstrap_SealMismatch(t *testing.T) { rootSnapshot := unittest.RootSnapshotFixture(unittest.CompleteIdentitySet()) // convert to encodable to easily modify snapshot encodable := rootSnapshot.Encodable() - encodable.LatestSeal.BlockID = unittest.IdentifierFixture() + latestSeal, err := encodable.LatestSeal() + require.NoError(t, err) + latestSeal.BlockID = unittest.IdentifierFixture() bootstrap(t, rootSnapshot, func(state *bprotocol.State, err error) { assert.Error(t, err) @@ -498,7 +796,9 @@ func TestBootstrap_SealMismatch(t *testing.T) { rootSnapshot := unittest.RootSnapshotFixture(unittest.CompleteIdentitySet()) // convert to encodable to easily modify snapshot encodable := rootSnapshot.Encodable() - encodable.LatestResult.BlockID = unittest.IdentifierFixture() + latestSealedResult, err := encodable.LatestSealedResult() + require.NoError(t, err) + latestSealedResult.BlockID = unittest.IdentifierFixture() bootstrap(t, rootSnapshot, func(state *bprotocol.State, err error) { assert.Error(t, err) @@ -509,7 +809,9 @@ func TestBootstrap_SealMismatch(t *testing.T) { rootSnapshot := unittest.RootSnapshotFixture(unittest.CompleteIdentitySet()) // convert to encodable to easily modify snapshot encodable := rootSnapshot.Encodable() - encodable.LatestSeal.ResultID = unittest.IdentifierFixture() + latestSeal, err := encodable.LatestSeal() + require.NoError(t, err) + latestSeal.ResultID = unittest.IdentifierFixture() bootstrap(t, rootSnapshot, func(state *bprotocol.State, err error) { assert.Error(t, err) @@ -517,26 +819,52 @@ func TestBootstrap_SealMismatch(t *testing.T) { }) } +// TestBootstrap_InvalidSporkBlockView verifies that bootstrapStatePointers +// returns an error when the SporkRootBlockView is set to a value higher or equal +// than the LivenessData.CurrentView. +func TestBootstrap_InvalidSporkBlockView(t *testing.T) { + rootSnapshot := unittest.RootSnapshotFixture(unittest.CompleteIdentitySet()) + // convert to encodable to easily modify snapshot + encodable := rootSnapshot.Encodable() + + segment, err := rootSnapshot.SealingSegment() + require.NoError(t, err) + + // invalid configuration, where the latest finalized block's view is lower than the spork root block's view: + encodable.Params.SporkRootBlockView = segment.Finalized().View + 1 + + rootSnapshot = inmem.SnapshotFromEncodable(encodable) + + bootstrap(t, rootSnapshot, func(state *bprotocol.State, err error) { + require.Error(t, err) + require.Contains(t, err.Error(), fmt.Sprintf("sealing segment is invalid, because the latest finalized block's view %d is lower than the spork root block's view %d", segment.Finalized().View, rootSnapshot.Params().SporkRootBlockView())) + }) +} + // bootstraps protocol state with the given snapshot and invokes the callback // with the result of the constructor func bootstrap(t *testing.T, rootSnapshot protocol.Snapshot, f func(*bprotocol.State, error)) { metrics := metrics.NewNoopCollector() dir := unittest.TempDir(t) defer os.RemoveAll(dir) - db := unittest.BadgerDB(t, dir) + pdb := unittest.PebbleDB(t, dir) + db := pebbleimpl.ToDB(pdb) + lockManager := storage.NewTestingLockManager() defer db.Close() - all := storutil.StorageLayer(t, db) + all := store.InitAll(metrics, db) state, err := bprotocol.Bootstrap( metrics, db, + lockManager, all.Headers, all.Seals, all.Results, all.Blocks, all.QuorumCertificates, - all.Setups, + all.EpochSetups, all.EpochCommits, - all.Statuses, + all.EpochProtocolStateEntries, + all.ProtocolKVStore, all.VersionBeacons, rootSnapshot, ) @@ -549,10 +877,10 @@ func bootstrap(t *testing.T, rootSnapshot protocol.Snapshot, f func(*bprotocol.S // // This is used for generating valid snapshots to use when testing bootstrapping // from non-root states. -func snapshotAfter(t *testing.T, rootSnapshot protocol.Snapshot, f func(*bprotocol.FollowerState) protocol.Snapshot) protocol.Snapshot { +func snapshotAfter(t *testing.T, rootSnapshot protocol.Snapshot, f func(*bprotocol.FollowerState, protocol.MutableProtocolState) protocol.Snapshot) protocol.Snapshot { var after protocol.Snapshot - protoutil.RunWithFollowerProtocolState(t, rootSnapshot, func(_ *badger.DB, state *bprotocol.FollowerState) { - snap := f(state) + protoutil.RunWithFullProtocolStateAndMutator(t, rootSnapshot, func(_ storage.DB, state *bprotocol.ParticipantState, mutableState protocol.MutableProtocolState) { + snap := f(state.FollowerState, mutableState) var err error after, err = inmem.FromSnapshot(snap) require.NoError(t, err) @@ -562,12 +890,12 @@ func snapshotAfter(t *testing.T, rootSnapshot protocol.Snapshot, f func(*bprotoc // buildBlock extends the protocol state by the given block func buildBlock(t *testing.T, state protocol.FollowerState, block *flow.Block) { - require.NoError(t, state.ExtendCertified(context.Background(), block, unittest.CertifyBlock(block.Header))) + require.NoError(t, state.ExtendCertified(context.Background(), unittest.NewCertifiedBlock(block))) } // buildFinalizedBlock extends the protocol state by the given block and marks the block as finalized func buildFinalizedBlock(t *testing.T, state protocol.FollowerState, block *flow.Block) { - require.NoError(t, state.ExtendCertified(context.Background(), block, unittest.CertifyBlock(block.Header))) + require.NoError(t, state.ExtendCertified(context.Background(), unittest.NewCertifiedBlock(block))) require.NoError(t, state.Finalize(context.Background(), block.ID())) } @@ -580,18 +908,17 @@ func assertSealingSegmentBlocksQueryableAfterBootstrap(t *testing.T, snapshot pr segment, err := state.Final().SealingSegment() require.NoError(t, err) - rootBlock, err := state.Params().Root() - require.NoError(t, err) + rootBlock := state.Params().FinalizedRoot() // root block should be the highest block from the sealing segment - assert.Equal(t, segment.Highest().Header, rootBlock) + assert.Equal(t, segment.Highest().ToHeader(), rootBlock) // for each block in the sealing segment we should be able to query: // * Head // * SealedResult // * Commit - for _, block := range segment.Blocks { - blockID := block.ID() + for _, proposal := range segment.Blocks { + blockID := proposal.Block.ID() snap := state.AtBlockID(blockID) header, err := snap.Head() assert.NoError(t, err) @@ -604,8 +931,8 @@ func assertSealingSegmentBlocksQueryableAfterBootstrap(t *testing.T, snapshot pr assert.Equal(t, seal.FinalState, commit) } // for all blocks but the head, we should be unable to query SealingSegment: - for _, block := range segment.Blocks[:len(segment.Blocks)-1] { - snap := state.AtBlockID(block.ID()) + for _, proposal := range segment.Blocks[:len(segment.Blocks)-1] { + snap := state.AtBlockID(proposal.Block.ID()) _, err := snap.SealingSegment() assert.ErrorIs(t, err, protocol.ErrSealingSegmentBelowRootBlock) } @@ -614,7 +941,7 @@ func assertSealingSegmentBlocksQueryableAfterBootstrap(t *testing.T, snapshot pr // BenchmarkFinal benchmarks retrieving the latest finalized block from storage. func BenchmarkFinal(b *testing.B) { - util.RunWithBootstrapState(b, unittest.RootSnapshotFixture(unittest.CompleteIdentitySet()), func(db *badger.DB, state *bprotocol.State) { + util.RunWithBootstrapState(b, unittest.RootSnapshotFixture(unittest.CompleteIdentitySet()), func(db storage.DB, state *bprotocol.State) { b.ResetTimer() for i := 0; i < b.N; i++ { header, err := state.Final().Head() @@ -626,7 +953,7 @@ func BenchmarkFinal(b *testing.B) { // BenchmarkFinal benchmarks retrieving the block by height from storage. func BenchmarkByHeight(b *testing.B) { - util.RunWithBootstrapState(b, unittest.RootSnapshotFixture(unittest.CompleteIdentitySet()), func(db *badger.DB, state *bprotocol.State) { + util.RunWithBootstrapState(b, unittest.RootSnapshotFixture(unittest.CompleteIdentitySet()), func(db storage.DB, state *bprotocol.State) { b.ResetTimer() for i := 0; i < b.N; i++ { header, err := state.AtHeight(0).Head() diff --git a/state/protocol/badger/validity.go b/state/protocol/badger/validity.go deleted file mode 100644 index 04379abbc29..00000000000 --- a/state/protocol/badger/validity.go +++ /dev/null @@ -1,410 +0,0 @@ -package badger - -import ( - "fmt" - - "github.com/onflow/flow-go/consensus/hotstuff/committees" - "github.com/onflow/flow-go/consensus/hotstuff/signature" - "github.com/onflow/flow-go/consensus/hotstuff/validator" - "github.com/onflow/flow-go/consensus/hotstuff/verification" - "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/model/flow/factory" - "github.com/onflow/flow-go/model/flow/filter" - "github.com/onflow/flow-go/model/flow/order" - "github.com/onflow/flow-go/state/protocol" -) - -// isValidExtendingEpochSetup checks whether an epoch setup service being -// added to the state is valid. In addition to intrinsic validity, we also -// check that it is valid w.r.t. the previous epoch setup event, and the -// current epoch status. -// Assumes all inputs besides extendingSetup are already validated. -// Expected errors during normal operations: -// * protocol.InvalidServiceEventError if the input service event is invalid to extend the currently active epoch status -func isValidExtendingEpochSetup(extendingSetup *flow.EpochSetup, activeSetup *flow.EpochSetup, status *flow.EpochStatus) error { - // We should only have a single epoch setup event per epoch. - if status.NextEpoch.SetupID != flow.ZeroID { - // true iff EpochSetup event for NEXT epoch was already included before - return protocol.NewInvalidServiceEventErrorf("duplicate epoch setup service event: %x", status.NextEpoch.SetupID) - } - - // The setup event should have the counter increased by one. - if extendingSetup.Counter != activeSetup.Counter+1 { - return protocol.NewInvalidServiceEventErrorf("next epoch setup has invalid counter (%d => %d)", activeSetup.Counter, extendingSetup.Counter) - } - - // The first view needs to be exactly one greater than the current epoch final view - if extendingSetup.FirstView != activeSetup.FinalView+1 { - return protocol.NewInvalidServiceEventErrorf( - "next epoch first view must be exactly 1 more than current epoch final view (%d != %d+1)", - extendingSetup.FirstView, - activeSetup.FinalView, - ) - } - - // Finally, the epoch setup event must contain all necessary information. - err := verifyEpochSetup(extendingSetup, true) - if err != nil { - return protocol.NewInvalidServiceEventErrorf("invalid epoch setup: %w", err) - } - - return nil -} - -// verifyEpochSetup checks whether an `EpochSetup` event is syntactically correct. -// The boolean parameter `verifyNetworkAddress` controls, whether we want to permit -// nodes to share a networking address. -// This is a side-effect-free function. Any error return indicates that the -// EpochSetup event is not compliant with protocol rules. -func verifyEpochSetup(setup *flow.EpochSetup, verifyNetworkAddress bool) error { - // STEP 1: general sanity checks - // the seed needs to be at least minimum length - if len(setup.RandomSource) != flow.EpochSetupRandomSourceLength { - return fmt.Errorf("seed has incorrect length (%d != %d)", len(setup.RandomSource), flow.EpochSetupRandomSourceLength) - } - - // STEP 2: sanity checks of all nodes listed as participants - // there should be no duplicate node IDs - identLookup := make(map[flow.Identifier]struct{}) - for _, participant := range setup.Participants { - _, ok := identLookup[participant.NodeID] - if ok { - return fmt.Errorf("duplicate node identifier (%x)", participant.NodeID) - } - identLookup[participant.NodeID] = struct{}{} - } - - if verifyNetworkAddress { - // there should be no duplicate node addresses - addrLookup := make(map[string]struct{}) - for _, participant := range setup.Participants { - _, ok := addrLookup[participant.Address] - if ok { - return fmt.Errorf("duplicate node address (%x)", participant.Address) - } - addrLookup[participant.Address] = struct{}{} - } - } - - // the participants must be listed in canonical order - if !setup.Participants.Sorted(order.Canonical) { - return fmt.Errorf("participants are not canonically ordered") - } - - // STEP 3: sanity checks for individual roles - // IMPORTANT: here we remove all nodes with zero weight, as they are allowed to partake - // in communication but not in respective node functions - activeParticipants := setup.Participants.Filter(filter.HasWeight(true)) - - // we need at least one node of each role - roles := make(map[flow.Role]uint) - for _, participant := range activeParticipants { - roles[participant.Role]++ - } - if roles[flow.RoleConsensus] < 1 { - return fmt.Errorf("need at least one consensus node") - } - if roles[flow.RoleCollection] < 1 { - return fmt.Errorf("need at least one collection node") - } - if roles[flow.RoleExecution] < 1 { - return fmt.Errorf("need at least one execution node") - } - if roles[flow.RoleVerification] < 1 { - return fmt.Errorf("need at least one verification node") - } - - // first view must be before final view - if setup.FirstView >= setup.FinalView { - return fmt.Errorf("first view (%d) must be before final view (%d)", setup.FirstView, setup.FinalView) - } - - // we need at least one collection cluster - if len(setup.Assignments) == 0 { - return fmt.Errorf("need at least one collection cluster") - } - - // the collection cluster assignments need to be valid - _, err := factory.NewClusterList(setup.Assignments, activeParticipants.Filter(filter.HasRole(flow.RoleCollection))) - if err != nil { - return fmt.Errorf("invalid cluster assignments: %w", err) - } - - return nil -} - -// isValidExtendingEpochCommit checks whether an epoch commit service being -// added to the state is valid. In addition to intrinsic validity, we also -// check that it is valid w.r.t. the previous epoch setup event, and the -// current epoch status. -// Assumes all inputs besides extendingCommit are already validated. -// Expected errors during normal operations: -// * protocol.InvalidServiceEventError if the input service event is invalid to extend the currently active epoch status -func isValidExtendingEpochCommit(extendingCommit *flow.EpochCommit, extendingSetup *flow.EpochSetup, activeSetup *flow.EpochSetup, status *flow.EpochStatus) error { - - // We should only have a single epoch commit event per epoch. - if status.NextEpoch.CommitID != flow.ZeroID { - // true iff EpochCommit event for NEXT epoch was already included before - return protocol.NewInvalidServiceEventErrorf("duplicate epoch commit service event: %x", status.NextEpoch.CommitID) - } - - // The epoch setup event needs to happen before the commit. - if status.NextEpoch.SetupID == flow.ZeroID { - return protocol.NewInvalidServiceEventErrorf("missing epoch setup for epoch commit") - } - - // The commit event should have the counter increased by one. - if extendingCommit.Counter != activeSetup.Counter+1 { - return protocol.NewInvalidServiceEventErrorf("next epoch commit has invalid counter (%d => %d)", activeSetup.Counter, extendingCommit.Counter) - } - - err := isValidEpochCommit(extendingCommit, extendingSetup) - if err != nil { - return protocol.NewInvalidServiceEventErrorf("invalid epoch commit: %s", err) - } - - return nil -} - -// isValidEpochCommit checks whether an epoch commit service event is intrinsically valid. -// Assumes the input flow.EpochSetup event has already been validated. -// Expected errors during normal operations: -// * protocol.InvalidServiceEventError if the EpochCommit is invalid -func isValidEpochCommit(commit *flow.EpochCommit, setup *flow.EpochSetup) error { - - if len(setup.Assignments) != len(commit.ClusterQCs) { - return protocol.NewInvalidServiceEventErrorf("number of clusters (%d) does not number of QCs (%d)", len(setup.Assignments), len(commit.ClusterQCs)) - } - - if commit.Counter != setup.Counter { - return protocol.NewInvalidServiceEventErrorf("inconsistent epoch counter between commit (%d) and setup (%d) events in same epoch", commit.Counter, setup.Counter) - } - - // make sure we have a valid DKG public key - if commit.DKGGroupKey == nil { - return protocol.NewInvalidServiceEventErrorf("missing DKG public group key") - } - - participants := setup.Participants.Filter(filter.IsValidDKGParticipant) - if len(participants) != len(commit.DKGParticipantKeys) { - return protocol.NewInvalidServiceEventErrorf("participant list (len=%d) does not match dkg key list (len=%d)", len(participants), len(commit.DKGParticipantKeys)) - } - - return nil -} - -// IsValidRootSnapshot checks internal consistency of root state snapshot -// if verifyResultID allows/disallows Result ID verification -func IsValidRootSnapshot(snap protocol.Snapshot, verifyResultID bool) error { - - segment, err := snap.SealingSegment() - if err != nil { - return fmt.Errorf("could not get sealing segment: %w", err) - } - result, seal, err := snap.SealedResult() - if err != nil { - return fmt.Errorf("could not latest sealed result: %w", err) - } - - err = segment.Validate() - if err != nil { - return fmt.Errorf("invalid root sealing segment: %w", err) - } - - highest := segment.Highest() // reference block of the snapshot - lowest := segment.Sealed() // last sealed block - highestID := highest.ID() - lowestID := lowest.ID() - - if result.BlockID != lowestID { - return fmt.Errorf("root execution result for wrong block (%x != %x)", result.BlockID, lowest.ID()) - } - - if seal.BlockID != lowestID { - return fmt.Errorf("root block seal for wrong block (%x != %x)", seal.BlockID, lowest.ID()) - } - - if verifyResultID { - if seal.ResultID != result.ID() { - return fmt.Errorf("root block seal for wrong execution result (%x != %x)", seal.ResultID, result.ID()) - } - } - - // identities must be canonically ordered - identities, err := snap.Identities(filter.Any) - if err != nil { - return fmt.Errorf("could not get identities for root snapshot: %w", err) - } - if !identities.Sorted(order.Canonical) { - return fmt.Errorf("identities are not canonically ordered") - } - - // root qc must be for reference block of snapshot - qc, err := snap.QuorumCertificate() - if err != nil { - return fmt.Errorf("could not get qc for root snapshot: %w", err) - } - if qc.BlockID != highestID { - return fmt.Errorf("qc is for wrong block (got: %x, expected: %x)", qc.BlockID, highestID) - } - - firstView, err := snap.Epochs().Current().FirstView() - if err != nil { - return fmt.Errorf("could not get first view: %w", err) - } - finalView, err := snap.Epochs().Current().FinalView() - if err != nil { - return fmt.Errorf("could not get final view: %w", err) - } - - // the segment must be fully within the current epoch - if firstView > lowest.Header.View { - return fmt.Errorf("lowest block of sealing segment has lower view than first view of epoch") - } - if highest.Header.View >= finalView { - return fmt.Errorf("final view of epoch less than first block view") - } - - return nil -} - -// IsValidRootSnapshotQCs checks internal consistency of QCs that are included in the root state snapshot -// It verifies QCs for main consensus and for each collection cluster. -func IsValidRootSnapshotQCs(snap protocol.Snapshot) error { - // validate main consensus QC - err := validateRootQC(snap) - if err != nil { - return fmt.Errorf("invalid root QC: %w", err) - } - - // validate each collection cluster separately - curEpoch := snap.Epochs().Current() - clusters, err := curEpoch.Clustering() - if err != nil { - return fmt.Errorf("could not get clustering for root snapshot: %w", err) - } - for clusterIndex := range clusters { - cluster, err := curEpoch.Cluster(uint(clusterIndex)) - if err != nil { - return fmt.Errorf("could not get cluster %d for root snapshot: %w", clusterIndex, err) - } - err = validateClusterQC(cluster) - if err != nil { - return fmt.Errorf("invalid cluster qc %d: %w", clusterIndex, err) - } - } - return nil -} - -// validateRootQC performs validation of root QC -// Returns nil on success -func validateRootQC(snap protocol.Snapshot) error { - identities, err := snap.Identities(filter.IsVotingConsensusCommitteeMember) - if err != nil { - return fmt.Errorf("could not get root snapshot identities: %w", err) - } - - rootQC, err := snap.QuorumCertificate() - if err != nil { - return fmt.Errorf("could not get root QC: %w", err) - } - - dkg, err := snap.Epochs().Current().DKG() - if err != nil { - return fmt.Errorf("could not get DKG for root snapshot: %w", err) - } - - committee, err := committees.NewStaticCommitteeWithDKG(identities, flow.Identifier{}, dkg) - if err != nil { - return fmt.Errorf("could not create static committee: %w", err) - } - verifier := verification.NewCombinedVerifier(committee, signature.NewConsensusSigDataPacker(committee)) - hotstuffValidator := validator.New(committee, verifier) - err = hotstuffValidator.ValidateQC(rootQC) - if err != nil { - return fmt.Errorf("could not validate root qc: %w", err) - } - return nil -} - -// validateClusterQC performs QC validation of single collection cluster -// Returns nil on success -func validateClusterQC(cluster protocol.Cluster) error { - committee, err := committees.NewStaticCommittee(cluster.Members(), flow.Identifier{}, nil, nil) - if err != nil { - return fmt.Errorf("could not create static committee: %w", err) - } - verifier := verification.NewStakingVerifier() - hotstuffValidator := validator.New(committee, verifier) - err = hotstuffValidator.ValidateQC(cluster.RootQC()) - if err != nil { - return fmt.Errorf("could not validate root qc: %w", err) - } - return nil -} - -// ValidRootSnapshotContainsEntityExpiryRange performs a sanity check to make sure the -// root snapshot has enough history to encompass at least one full entity expiry window. -// Entities (in particular transactions and collections) may reference a block within -// the past `flow.DefaultTransactionExpiry` blocks, so a new node must begin with at least -// this many blocks worth of history leading up to the snapshot's root block. -// -// Currently, Access Nodes and Consensus Nodes require root snapshots passing this validator function. -// -// - Consensus Nodes because they process guarantees referencing past blocks -// - Access Nodes because they index transactions referencing past blocks -// -// One of the following conditions must be satisfied to pass this validation: -// 1. This is a snapshot build from a first block of spork -// -> there is no earlier history which transactions/collections could reference -// 2. This snapshot sealing segment contains at least one expiry window of blocks -// -> all possible reference blocks in future transactions/collections will be within the initial history. -// 3. This snapshot sealing segment includes the spork root block -// -> there is no earlier history which transactions/collections could reference -func ValidRootSnapshotContainsEntityExpiryRange(snapshot protocol.Snapshot) error { - isSporkRootSnapshot, err := protocol.IsSporkRootSnapshot(snapshot) - if err != nil { - return fmt.Errorf("could not check if root snapshot is a spork root snapshot: %w", err) - } - // Condition 1 satisfied - if isSporkRootSnapshot { - return nil - } - - head, err := snapshot.Head() - if err != nil { - return fmt.Errorf("could not query root snapshot head: %w", err) - } - - sporkRootBlockHeight, err := snapshot.Params().SporkRootBlockHeight() - if err != nil { - return fmt.Errorf("could not query spork root block height: %w", err) - } - - sealingSegment, err := snapshot.SealingSegment() - if err != nil { - return fmt.Errorf("could not query sealing segment: %w", err) - } - - sealingSegmentLength := uint64(len(sealingSegment.AllBlocks())) - transactionExpiry := uint64(flow.DefaultTransactionExpiry) - blocksInSpork := head.Height - sporkRootBlockHeight + 1 // range is inclusive on both ends - - // Condition 3: - // check if head.Height - sporkRootBlockHeight < flow.DefaultTransactionExpiry - // this is the case where we bootstrap early into the spork and there is simply not enough blocks - if blocksInSpork < transactionExpiry { - // the distance to spork root is less than transaction expiry, we need all blocks back to the spork root. - if sealingSegmentLength != blocksInSpork { - return fmt.Errorf("invalid root snapshot length, expecting exactly (%d), got (%d)", blocksInSpork, sealingSegmentLength) - } - } else { - // Condition 2: - // the distance to spork root is more than transaction expiry, we need at least `transactionExpiry` many blocks - if sealingSegmentLength < transactionExpiry { - return fmt.Errorf("invalid root snapshot length, expecting at least (%d), got (%d)", - transactionExpiry, sealingSegmentLength) - } - } - return nil -} diff --git a/state/protocol/badger/validity_test.go b/state/protocol/badger/validity_test.go deleted file mode 100644 index 2c0e3372e4b..00000000000 --- a/state/protocol/badger/validity_test.go +++ /dev/null @@ -1,147 +0,0 @@ -package badger - -import ( - "testing" - "time" - - "github.com/stretchr/testify/require" - - "github.com/onflow/flow-go/crypto" - "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/model/flow/filter" - "github.com/onflow/flow-go/utils/unittest" -) - -var participants = unittest.IdentityListFixture(20, unittest.WithAllRoles()) - -func TestEpochSetupValidity(t *testing.T) { - t.Run("invalid first/final view", func(t *testing.T) { - _, result, _ := unittest.BootstrapFixture(participants) - setup := result.ServiceEvents[0].Event.(*flow.EpochSetup) - // set an invalid final view for the first epoch - setup.FinalView = setup.FirstView - - err := verifyEpochSetup(setup, true) - require.Error(t, err) - }) - - t.Run("non-canonically ordered identities", func(t *testing.T) { - _, result, _ := unittest.BootstrapFixture(participants) - setup := result.ServiceEvents[0].Event.(*flow.EpochSetup) - // randomly shuffle the identities so they are not canonically ordered - setup.Participants = setup.Participants.DeterministicShuffle(time.Now().UnixNano()) - - err := verifyEpochSetup(setup, true) - require.Error(t, err) - }) - - t.Run("invalid cluster assignments", func(t *testing.T) { - _, result, _ := unittest.BootstrapFixture(participants) - setup := result.ServiceEvents[0].Event.(*flow.EpochSetup) - // create an invalid cluster assignment (node appears in multiple clusters) - collector := participants.Filter(filter.HasRole(flow.RoleCollection))[0] - setup.Assignments = append(setup.Assignments, []flow.Identifier{collector.NodeID}) - - err := verifyEpochSetup(setup, true) - require.Error(t, err) - }) - - t.Run("short seed", func(t *testing.T) { - _, result, _ := unittest.BootstrapFixture(participants) - setup := result.ServiceEvents[0].Event.(*flow.EpochSetup) - setup.RandomSource = unittest.SeedFixture(crypto.SeedMinLenDKG - 1) - - err := verifyEpochSetup(setup, true) - require.Error(t, err) - }) -} - -func TestBootstrapInvalidEpochCommit(t *testing.T) { - t.Run("inconsistent counter", func(t *testing.T) { - _, result, _ := unittest.BootstrapFixture(participants) - setup := result.ServiceEvents[0].Event.(*flow.EpochSetup) - commit := result.ServiceEvents[1].Event.(*flow.EpochCommit) - // use a different counter for the commit - commit.Counter = setup.Counter + 1 - - err := isValidEpochCommit(commit, setup) - require.Error(t, err) - }) - - t.Run("inconsistent cluster QCs", func(t *testing.T) { - _, result, _ := unittest.BootstrapFixture(participants) - setup := result.ServiceEvents[0].Event.(*flow.EpochSetup) - commit := result.ServiceEvents[1].Event.(*flow.EpochCommit) - // add an extra QC to commit - extraQC := unittest.QuorumCertificateWithSignerIDsFixture() - commit.ClusterQCs = append(commit.ClusterQCs, flow.ClusterQCVoteDataFromQC(extraQC)) - - err := isValidEpochCommit(commit, setup) - require.Error(t, err) - }) - - t.Run("missing dkg group key", func(t *testing.T) { - _, result, _ := unittest.BootstrapFixture(participants) - setup := result.ServiceEvents[0].Event.(*flow.EpochSetup) - commit := result.ServiceEvents[1].Event.(*flow.EpochCommit) - commit.DKGGroupKey = nil - - err := isValidEpochCommit(commit, setup) - require.Error(t, err) - }) - - t.Run("inconsistent DKG participants", func(t *testing.T) { - _, result, _ := unittest.BootstrapFixture(participants) - setup := result.ServiceEvents[0].Event.(*flow.EpochSetup) - commit := result.ServiceEvents[1].Event.(*flow.EpochCommit) - // add an extra DKG participant key - commit.DKGParticipantKeys = append(commit.DKGParticipantKeys, unittest.KeyFixture(crypto.BLSBLS12381).PublicKey()) - - err := isValidEpochCommit(commit, setup) - require.Error(t, err) - }) -} - -// TestEntityExpirySnapshotValidation tests that we perform correct sanity checks when -// bootstrapping consensus nodes and access nodes we expect that we only bootstrap snapshots -// with sufficient history. -func TestEntityExpirySnapshotValidation(t *testing.T) { - t.Run("spork-root-snapshot", func(t *testing.T) { - rootSnapshot := unittest.RootSnapshotFixture(participants) - err := ValidRootSnapshotContainsEntityExpiryRange(rootSnapshot) - require.NoError(t, err) - }) - t.Run("not-enough-history", func(t *testing.T) { - rootSnapshot := unittest.RootSnapshotFixture(participants) - rootSnapshot.Encodable().Head.Height += 10 // advance height to be not spork root snapshot - err := ValidRootSnapshotContainsEntityExpiryRange(rootSnapshot) - require.Error(t, err) - }) - t.Run("enough-history-spork-just-started", func(t *testing.T) { - rootSnapshot := unittest.RootSnapshotFixture(participants) - // advance height to be not spork root snapshot, but still lower than transaction expiry - rootSnapshot.Encodable().Head.Height += flow.DefaultTransactionExpiry / 2 - // add blocks to sealing segment - rootSnapshot.Encodable().SealingSegment.ExtraBlocks = unittest.BlockFixtures(int(flow.DefaultTransactionExpiry / 2)) - err := ValidRootSnapshotContainsEntityExpiryRange(rootSnapshot) - require.NoError(t, err) - }) - t.Run("enough-history-long-spork", func(t *testing.T) { - rootSnapshot := unittest.RootSnapshotFixture(participants) - // advance height to be not spork root snapshot - rootSnapshot.Encodable().Head.Height += flow.DefaultTransactionExpiry * 2 - // add blocks to sealing segment - rootSnapshot.Encodable().SealingSegment.ExtraBlocks = unittest.BlockFixtures(int(flow.DefaultTransactionExpiry) - 1) - err := ValidRootSnapshotContainsEntityExpiryRange(rootSnapshot) - require.NoError(t, err) - }) - t.Run("more-history-than-needed", func(t *testing.T) { - rootSnapshot := unittest.RootSnapshotFixture(participants) - // advance height to be not spork root snapshot - rootSnapshot.Encodable().Head.Height += flow.DefaultTransactionExpiry * 2 - // add blocks to sealing segment - rootSnapshot.Encodable().SealingSegment.ExtraBlocks = unittest.BlockFixtures(flow.DefaultTransactionExpiry * 2) - err := ValidRootSnapshotContainsEntityExpiryRange(rootSnapshot) - require.NoError(t, err) - }) -} diff --git a/state/protocol/blocktimer.go b/state/protocol/blocktimer.go index 50f75c9bc83..2b4bae18b8b 100644 --- a/state/protocol/blocktimer.go +++ b/state/protocol/blocktimer.go @@ -1,16 +1,12 @@ package protocol -import ( - "time" -) - // BlockTimer constructs and validates block timestamps. type BlockTimer interface { // Build generates a timestamp based on definition of valid timestamp. - Build(parentTimestamp time.Time) time.Time + Build(parentTimestamp uint64) uint64 // Validate checks validity of a block's time stamp. // Error returns // * `model.InvalidBlockTimestampError` if time stamp is invalid. // * all other errors are unexpected and potentially symptoms of internal implementation bugs or state corruption (fatal). - Validate(parentTimestamp, currentTimestamp time.Time) error + Validate(parentTimestamp, currentTimestamp uint64) error } diff --git a/state/protocol/blocktimer/blocktimer.go b/state/protocol/blocktimer/blocktimer.go index eed843b007a..b44fdb3c0d7 100644 --- a/state/protocol/blocktimer/blocktimer.go +++ b/state/protocol/blocktimer/blocktimer.go @@ -7,9 +7,9 @@ import ( "github.com/onflow/flow-go/state/protocol" ) -// Is a functor that generates a current timestamp, usually it's just time.Now(). +// Is a functor that generates a current timestamp in Unix milliseconds, usually it's just time.Now(). // Used to make testing easier -type timestampGenerator = func() time.Time +type timestampGenerator = func() uint64 // BlockTimestamp is a helper structure that performs building and validation of valid // timestamp for blocks that are generated by block builder and checked by hotstuff event loop. @@ -20,15 +20,16 @@ type timestampGenerator = func() time.Time // if t < τ + minInterval, the proposer sets Timestamp := τ + minInterval // if τ + maxInterval < t, the proposer sets Timestamp := τ + maxInterval type BlockTimestamp struct { - minInterval time.Duration - maxInterval time.Duration + minInterval uint64 + maxInterval uint64 generator timestampGenerator } var DefaultBlockTimer = NewNoopBlockTimer() // NewBlockTimer creates new block timer with specific intervals and time.Now as generator -func NewBlockTimer(minInterval, maxInterval time.Duration) (*BlockTimestamp, error) { +// Intervals are measured in milliseconds +func NewBlockTimer(minInterval, maxInterval uint64) (*BlockTimestamp, error) { if minInterval >= maxInterval { return nil, fmt.Errorf("invariant minInterval < maxInterval is not satisfied, %d >= %d", minInterval, maxInterval) } @@ -39,25 +40,24 @@ func NewBlockTimer(minInterval, maxInterval time.Duration) (*BlockTimestamp, err return &BlockTimestamp{ minInterval: minInterval, maxInterval: maxInterval, - generator: func() time.Time { return time.Now().UTC() }, + generator: func() uint64 { return uint64(time.Now().UnixMilli()) }, }, nil } // Build generates a timestamp based on definition of valid timestamp. -func (b BlockTimestamp) Build(parentTimestamp time.Time) time.Time { +func (b BlockTimestamp) Build(parentTimestamp uint64) uint64 { // calculate the timestamp and cutoffs timestamp := b.generator() - from := parentTimestamp.Add(b.minInterval) - to := parentTimestamp.Add(b.maxInterval) + from := parentTimestamp + b.minInterval + to := parentTimestamp + b.maxInterval // adjust timestamp if outside of cutoffs - if timestamp.Before(from) { + if timestamp < from { timestamp = from } - if timestamp.After(to) { + if timestamp > to { timestamp = to } - return timestamp } @@ -67,10 +67,10 @@ func (b BlockTimestamp) Build(parentTimestamp time.Time) time.Time { // Returns: // - model.ErrInvalidBlockTimestamp - timestamp is invalid // - nil - success -func (b BlockTimestamp) Validate(parentTimestamp, currentTimestamp time.Time) error { - from := parentTimestamp.Add(b.minInterval) - to := parentTimestamp.Add(b.maxInterval) - if currentTimestamp.Before(from) || currentTimestamp.After(to) { +func (b BlockTimestamp) Validate(parentTimestamp, currentTimestamp uint64) error { + from := parentTimestamp + b.minInterval + to := parentTimestamp + b.maxInterval + if currentTimestamp < from || currentTimestamp > to { return protocol.NewInvalidBlockTimestamp("timestamp %v is not within interval [%v; %v]", currentTimestamp, from, to) } return nil diff --git a/state/protocol/blocktimer/blocktimer_test.go b/state/protocol/blocktimer/blocktimer_test.go index 1ac8f1fd5b6..f4a1b4701c7 100644 --- a/state/protocol/blocktimer/blocktimer_test.go +++ b/state/protocol/blocktimer/blocktimer_test.go @@ -13,38 +13,38 @@ import ( // TestBlockTimestamp_Validate tests that validation accepts valid time and rejects invalid func TestBlockTimestamp_Validate(t *testing.T) { t.Parallel() - builder, err := NewBlockTimer(10*time.Millisecond, 1*time.Second) + builder, err := NewBlockTimer(10, 1000) require.NoError(t, err) t.Run("parentTime + minInterval + 1", func(t *testing.T) { - parentTime := time.Now().UTC() - blockTime := parentTime.Add(builder.minInterval + time.Millisecond) + parentTime := uint64(time.Now().UnixMilli()) + blockTime := parentTime + builder.minInterval + 1 require.NoError(t, builder.Validate(parentTime, blockTime)) }) t.Run("parentTime + minInterval", func(t *testing.T) { - parentTime := time.Now().UTC() - blockTime := parentTime.Add(builder.minInterval) + parentTime := uint64(time.Now().UnixMilli()) + blockTime := parentTime + builder.minInterval require.NoError(t, builder.Validate(parentTime, blockTime)) }) t.Run("parentTime + minInterval - 1", func(t *testing.T) { - parentTime := time.Now().UTC() - blockTime := parentTime.Add(builder.minInterval - time.Millisecond) + parentTime := uint64(time.Now().UnixMilli()) + blockTime := parentTime + builder.minInterval - 1 err := builder.Validate(parentTime, blockTime) require.Error(t, err) require.True(t, protocol.IsInvalidBlockTimestampError(err)) }) t.Run("parentTime + maxInterval - 1", func(t *testing.T) { - parentTime := time.Now().UTC() - blockTime := parentTime.Add(builder.maxInterval - time.Millisecond) + parentTime := uint64(time.Now().UnixMilli()) + blockTime := parentTime + builder.maxInterval - 1 require.NoError(t, builder.Validate(parentTime, blockTime)) }) t.Run("parentTime + maxInterval", func(t *testing.T) { - parentTime := time.Now().UTC() - blockTime := parentTime.Add(builder.maxInterval) + parentTime := uint64(time.Now().UnixMilli()) + blockTime := parentTime + builder.maxInterval require.NoError(t, builder.Validate(parentTime, blockTime)) }) t.Run("parentTime + maxInterval + 1", func(t *testing.T) { - parentTime := time.Now().UTC() - blockTime := parentTime.Add(builder.maxInterval + time.Millisecond) + parentTime := uint64(time.Now().UnixMilli()) + blockTime := parentTime + builder.maxInterval + 1 err := builder.Validate(parentTime, blockTime) require.Error(t, err) require.True(t, protocol.IsInvalidBlockTimestampError(err)) @@ -54,11 +54,11 @@ func TestBlockTimestamp_Validate(t *testing.T) { // TestBlockTimestamp_Build tests that builder correctly generates new block time func TestBlockTimestamp_Build(t *testing.T) { t.Parallel() - minInterval := 100 * time.Millisecond - maxInterval := 10 * time.Second - deltas := []time.Duration{0, minInterval, maxInterval} + const minInterval uint64 = 100 // milliseconds + const maxInterval uint64 = 10_000 // milliseconds + deltas := []uint64{0, minInterval, maxInterval} - // this test tries to cover next scenarious in generic way: + // this test tries to cover next scenarios in generic way: // now = parent - 1 // now = parent // now = parent + 1 @@ -74,12 +74,12 @@ func TestBlockTimestamp_Build(t *testing.T) { builder, err := NewBlockTimer(minInterval, maxInterval) require.NoError(t, err) - parentTime := time.Now().UTC() + parentTime := uint64(time.Now().UnixMilli()) // now = parentTime + delta + {-1, 0, +1} for i := -1; i <= 1; i++ { - builder.generator = func() time.Time { - return parentTime.Add(duration + time.Millisecond*time.Duration(i)) + builder.generator = func() uint64 { + return parentTime + duration + uint64(i) } blockTime := builder.Build(parentTime) diff --git a/state/protocol/blocktimer/noop.go b/state/protocol/blocktimer/noop.go index 0d76f2fa50e..854d6b7fe81 100644 --- a/state/protocol/blocktimer/noop.go +++ b/state/protocol/blocktimer/noop.go @@ -10,10 +10,10 @@ func NewNoopBlockTimer() *NoopBlockTimer { return &NoopBlockTimer{} } -func (n NoopBlockTimer) Build(time.Time) time.Time { - return time.Now().UTC() +func (n NoopBlockTimer) Build(uint64) uint64 { + return uint64(time.Now().UnixMilli()) } -func (n NoopBlockTimer) Validate(time.Time, time.Time) error { +func (n NoopBlockTimer) Validate(uint64, uint64) error { return nil } diff --git a/state/protocol/chain_state.go b/state/protocol/chain_state.go new file mode 100644 index 00000000000..b400d1579b7 --- /dev/null +++ b/state/protocol/chain_state.go @@ -0,0 +1,122 @@ +package protocol + +import ( + "context" + + "github.com/onflow/flow-go/model/flow" +) + +// State represents the full protocol state of the local node. It allows us to +// obtain snapshots of the state at any point of the protocol state history. +type State interface { + + // Params gives access to a number of stable parameters of the protocol state. + Params() Params + + // Final returns the snapshot of the persistent protocol state at the latest + // finalized block, and the returned snapshot is therefore immutable over + // time. + Final() Snapshot + + // Sealed returns the snapshot of the persistent protocol state at the + // latest sealed block, and the returned snapshot is therefore immutable + // over time. + Sealed() Snapshot + + // AtHeight returns the snapshot of the persistent protocol state at the + // given block number. It is only available for finalized blocks and the + // returned snapshot is therefore immutable over time. + AtHeight(height uint64) Snapshot + + // AtBlockID returns the snapshot of the persistent protocol state at the + // given block ID. It is available for any block that was introduced into + // the protocol state, and can thus represent an ambiguous state that was or + // will never be finalized. + AtBlockID(blockID flow.Identifier) Snapshot +} + +// FollowerState is a mutable protocol state used by nodes following main consensus (ie. non-consensus nodes). +// All blocks must have a certifying QC when being added to the state to guarantee they are valid, +// so there is a one-block lag between block production and incorporation into the FollowerState. +// However, since all blocks are certified upon insertion, they are immediately processable by other components. +type FollowerState interface { + State + + // ExtendCertified introduces the block with the given ID into the persistent + // protocol state without modifying the current finalized state. It allows us + // to execute fork-aware queries against the known protocol state. As part of + // the CertifiedBlock, the caller must pass a Quorum Certificate [QC] (field + // `CertifyingQC`) to prove that the candidate block has been certified, and + // it's safe to add to it the protocol state. The QC cannot be nil and must + // certify candidate block: + // candidate.View == QC.View && candidate.BlockID == QC.BlockID + // + // CAUTION: + // - This function expects that `QC` has been validated. (otherwise, the state will be corrupted) + // - The parent block must already be stored. + // - Attempts to extend the state with the _same block concurrently_ are not allowed. + // (will not corrupt the state, but may lead to an exception) + // + // Aside from the requirement that ancestors must have been previously ingested, all blocks are + // accepted; no matter how old they are; or whether they are orphaned or not. + // + // Note: To ensure that all ancestors of a candidate block are correct and known to the FollowerState, some external + // ordering and queuing of incoming blocks is generally necessary (responsibility of Compliance Layer). Once a block + // is successfully ingested, repeated extension requests with this block are no-ops. This is convenient for the + // Compliance Layer after a crash, so it doesn't have to worry about which blocks have already been ingested before + // the crash. However, while running it is very easy for the Compliance Layer to avoid concurrent extension requests + // with the same block. Hence, for simplicity, the FollowerState may reject such requests with an exception. + // + // No errors are expected during normal operations. + // - In case of concurrent calls with the same `candidate` block, `ExtendCertified` may return a [storage.ErrAlreadyExists] + // or it may gracefully return. At the moment, `ExtendCertified` should be considered as NOT CONCURRENCY-SAFE. + ExtendCertified(ctx context.Context, certified *flow.CertifiedBlock) error + + // Finalize finalizes the block with the given hash. + // At this level, we can only finalize one block at a time. This implies + // that the parent of the pending block that is to be finalized has + // to be the last finalized block. + // It modifies the persistent immutable protocol state accordingly and + // forwards the pointer to the latest finalized state. + // No errors are expected during normal operations. + Finalize(ctx context.Context, blockID flow.Identifier) error +} + +// ParticipantState is a mutable protocol state used by active consensus participants (consensus nodes). +// All blocks are validated in full, including payload validation, prior to insertion. Only valid blocks are inserted. +type ParticipantState interface { + FollowerState + + // Extend introduces the block with the given ID into the persistent + // protocol state without modifying the current finalized state. It allows + // us to execute fork-aware queries against ambiguous protocol state, while + // still checking that the given block is a valid extension of the protocol state. + // The candidate block must have passed HotStuff validation before being passed to Extend. + // + // CAUTION: + // - per convention, the protocol state requires that the candidate's + // parent has already been ingested. Otherwise, an exception is returned. + // - Attempts to extend the state with the _same block concurrently_ are not allowed. + // (will not corrupt the state, but may lead to an exception) + // - We reject orphaned blocks with [state.OutdatedExtensionError] ! + // This is more performant, but requires careful handling by the calling code. Specifically, + // the caller should not just drop orphaned blocks from the cache to avoid wasteful re-requests. + // If we were to entirely forget orphaned blocks, e.g. block X of the orphaned fork X ← Y ← Z, + // we might not have enough information to reject blocks Y, Z later if we receive them. We would + // re-request X, then determine it is orphaned and drop it, attempt to ingest Y re-request the + // unknown parent X and repeat potentially very often. + // + // Note: To ensure that all ancestors of a candidate block are correct and known to the Protocol State, some external + // ordering and queuing of incoming blocks is generally necessary (responsibility of Compliance Layer). Once a block + // is successfully ingested, repeated extension requests with this block are no-ops. This is convenient for the + // Compliance Layer after a crash, so it doesn't have to worry about which blocks have already been ingested before + // the crash. However, while running it is very easy for the Compliance Layer to avoid concurrent extension requests + // with the same block. Hence, for simplicity, the FollowerState may reject such requests with an exception. + // + // Expected errors during normal operations: + // * [state.OutdatedExtensionError] if the candidate block is orphaned + // * [state.InvalidExtensionError] if the candidate block is invalid + // * In case of concurrent calls with the same `candidate` block, `Extend` may return a [storage.ErrAlreadyExists] + // or it may gracefully return. At the moment, `Extend` should be considered as NOT CONCURRENCY-SAFE. + Extend(ctx context.Context, candidate *flow.Proposal) error +} diff --git a/state/protocol/cluster.go b/state/protocol/cluster.go index a689adfc033..3001d026542 100644 --- a/state/protocol/cluster.go +++ b/state/protocol/cluster.go @@ -20,8 +20,10 @@ type Cluster interface { // EpochCounter returns the epoch counter for this cluster. EpochCounter() uint64 - // Members returns the initial set of collector nodes in this cluster. - Members() flow.IdentityList + // Members returns the IdentitySkeletons of the cluster members in canonical order. + // This represents the cluster composition at the time the cluster was specified by the epoch smart + // contract (hence, we return IdentitySkeletons as opposed to full identities). + Members() flow.IdentitySkeletonList // RootBlock returns the root block for this cluster. RootBlock() *cluster.Block diff --git a/state/protocol/convert.go b/state/protocol/convert.go deleted file mode 100644 index 8f8630b2230..00000000000 --- a/state/protocol/convert.go +++ /dev/null @@ -1,196 +0,0 @@ -package protocol - -import ( - "fmt" - - "github.com/onflow/flow-go/module/signature" - - "github.com/onflow/flow-go/crypto" - "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/model/flow/filter" -) - -// ToEpochSetup converts an Epoch interface instance to the underlying concrete -// epoch setup service event. The input must be a valid, set up epoch. -// Error returns: -// * protocol.ErrNoPreviousEpoch - if the epoch represents a previous epoch which does not exist. -// * protocol.ErrNextEpochNotSetup - if the epoch represents a next epoch which has not been set up. -// * state.ErrUnknownSnapshotReference - if the epoch is queried from an unresolvable snapshot. -func ToEpochSetup(epoch Epoch) (*flow.EpochSetup, error) { - counter, err := epoch.Counter() - if err != nil { - return nil, fmt.Errorf("could not get epoch counter: %w", err) - } - firstView, err := epoch.FirstView() - if err != nil { - return nil, fmt.Errorf("could not get epoch first view: %w", err) - } - finalView, err := epoch.FinalView() - if err != nil { - return nil, fmt.Errorf("could not get epoch final view: %w", err) - } - dkgPhase1FinalView, dkgPhase2FinalView, dkgPhase3FinalView, err := DKGPhaseViews(epoch) - if err != nil { - return nil, fmt.Errorf("could not get epoch dkg final views: %w", err) - } - participants, err := epoch.InitialIdentities() - if err != nil { - return nil, fmt.Errorf("could not get epoch participants: %w", err) - } - clustering, err := epoch.Clustering() - if err != nil { - return nil, fmt.Errorf("could not get epoch clustering: %w", err) - } - assignments := clustering.Assignments() - randomSource, err := epoch.RandomSource() - if err != nil { - return nil, fmt.Errorf("could not get epoch random source: %w", err) - } - - setup := &flow.EpochSetup{ - Counter: counter, - FirstView: firstView, - DKGPhase1FinalView: dkgPhase1FinalView, - DKGPhase2FinalView: dkgPhase2FinalView, - DKGPhase3FinalView: dkgPhase3FinalView, - FinalView: finalView, - Participants: participants, - Assignments: assignments, - RandomSource: randomSource, - } - return setup, nil -} - -// ToEpochCommit converts an Epoch interface instance to the underlying -// concrete epoch commit service event. The epoch must have been committed. -// Error returns: -// * protocol.ErrNoPreviousEpoch - if the epoch represents a previous epoch which does not exist. -// * protocol.ErrNextEpochNotSetup - if the epoch represents a next epoch which has not been set up. -// * protocol.ErrNextEpochNotCommitted - if the epoch has not been committed. -// * state.ErrUnknownSnapshotReference - if the epoch is queried from an unresolvable snapshot. -func ToEpochCommit(epoch Epoch) (*flow.EpochCommit, error) { - counter, err := epoch.Counter() - if err != nil { - return nil, fmt.Errorf("could not get epoch counter: %w", err) - } - clustering, err := epoch.Clustering() - if err != nil { - return nil, fmt.Errorf("could not get epoch clustering: %w", err) - } - qcs := make([]*flow.QuorumCertificateWithSignerIDs, 0, len(clustering)) - for i := range clustering { - cluster, err := epoch.Cluster(uint(i)) - if err != nil { - return nil, fmt.Errorf("could not get epoch cluster (index=%d): %w", i, err) - } - qc := cluster.RootQC() - // TODO: double check cluster.Members returns canonical order - signerIDs, err := signature.DecodeSignerIndicesToIdentifiers(cluster.Members().NodeIDs(), qc.SignerIndices) - if err != nil { - return nil, fmt.Errorf("could not encode signer indices: %w", err) - } - qcs = append(qcs, &flow.QuorumCertificateWithSignerIDs{ - View: qc.View, - BlockID: qc.BlockID, - SignerIDs: signerIDs, - SigData: qc.SigData, - }) - } - - participants, err := epoch.InitialIdentities() - if err != nil { - return nil, fmt.Errorf("could not get epoch participants: %w", err) - } - dkg, err := epoch.DKG() - if err != nil { - return nil, fmt.Errorf("could not get epoch dkg: %w", err) - } - dkgParticipantKeys, err := GetDKGParticipantKeys(dkg, participants.Filter(filter.IsValidDKGParticipant)) - if err != nil { - return nil, fmt.Errorf("could not get dkg participant keys: %w", err) - } - - commit := &flow.EpochCommit{ - Counter: counter, - ClusterQCs: flow.ClusterQCVoteDatasFromQCs(qcs), - DKGGroupKey: dkg.GroupKey(), - DKGParticipantKeys: dkgParticipantKeys, - } - return commit, nil -} - -// GetDKGParticipantKeys retrieves the canonically ordered list of DKG -// participant keys from the DKG. -// All errors indicate inconsistent or invalid inputs. -// No errors are expected during normal operation. -func GetDKGParticipantKeys(dkg DKG, participants flow.IdentityList) ([]crypto.PublicKey, error) { - - keys := make([]crypto.PublicKey, 0, len(participants)) - for i, identity := range participants { - - index, err := dkg.Index(identity.NodeID) - if err != nil { - return nil, fmt.Errorf("could not get index (node=%x): %w", identity.NodeID, err) - } - key, err := dkg.KeyShare(identity.NodeID) - if err != nil { - return nil, fmt.Errorf("could not get key share (node=%x): %w", identity.NodeID, err) - } - if uint(i) != index { - return nil, fmt.Errorf("participant list index (%d) does not match dkg index (%d)", i, index) - } - - keys = append(keys, key) - } - - return keys, nil -} - -// ToDKGParticipantLookup computes the nodeID -> DKGParticipant lookup for a -// DKG instance. The participants must exactly match the DKG instance configuration. -// All errors indicate inconsistent or invalid inputs. -// No errors are expected during normal operation. -func ToDKGParticipantLookup(dkg DKG, participants flow.IdentityList) (map[flow.Identifier]flow.DKGParticipant, error) { - - lookup := make(map[flow.Identifier]flow.DKGParticipant) - for _, identity := range participants { - - index, err := dkg.Index(identity.NodeID) - if err != nil { - return nil, fmt.Errorf("could not get index (node=%x): %w", identity.NodeID, err) - } - key, err := dkg.KeyShare(identity.NodeID) - if err != nil { - return nil, fmt.Errorf("could not get key share (node=%x): %w", identity.NodeID, err) - } - - lookup[identity.NodeID] = flow.DKGParticipant{ - Index: index, - KeyShare: key, - } - } - - return lookup, nil -} - -// DKGPhaseViews returns the DKG final phase views for an epoch. -// Error returns: -// * protocol.ErrNoPreviousEpoch - if the epoch represents a previous epoch which does not exist. -// * protocol.ErrNextEpochNotSetup - if the epoch represents a next epoch which has not been set up. -// * protocol.ErrNextEpochNotCommitted - if the epoch has not been committed. -// * state.ErrUnknownSnapshotReference - if the epoch is queried from an unresolvable snapshot. -func DKGPhaseViews(epoch Epoch) (phase1FinalView uint64, phase2FinalView uint64, phase3FinalView uint64, err error) { - phase1FinalView, err = epoch.DKGPhase1FinalView() - if err != nil { - return - } - phase2FinalView, err = epoch.DKGPhase2FinalView() - if err != nil { - return - } - phase3FinalView, err = epoch.DKGPhase3FinalView() - if err != nil { - return - } - return -} diff --git a/state/protocol/datastore/params.go b/state/protocol/datastore/params.go new file mode 100644 index 00000000000..7f6389b5abb --- /dev/null +++ b/state/protocol/datastore/params.go @@ -0,0 +1,160 @@ +package datastore + +import ( + "fmt" + + "github.com/vmihailenco/msgpack/v4" + + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/state/protocol" + "github.com/onflow/flow-go/storage" + "github.com/onflow/flow-go/storage/operation" +) + +const DefaultInstanceParamsVersion = 0 + +type Params struct { + protocol.GlobalParams + protocol.InstanceParams +} + +var _ protocol.Params = (*Params)(nil) + +// InstanceParams implements the interface [protocol.InstanceParams]. All values +// are cached after construction and do not incur database reads. +type InstanceParams struct { + // finalizedRoot marks the cutoff of the history this node knows about. It is the block at the tip + // of the root snapshot used to bootstrap this node - all newer blocks are synced from the network. + finalizedRoot *flow.Header + // sealedRoot is the latest sealed block with respect to `finalizedRoot`. + sealedRoot *flow.Header + // rootSeal is the seal for block `sealedRoot` - the newest incorporated seal with respect to `finalizedRoot`. + rootSeal *flow.Seal + // sporkRoot is the root block for the present spork. + sporkRootBlock *flow.Block +} + +var _ protocol.InstanceParams = (*InstanceParams)(nil) + +// ReadInstanceParams reads the instance parameters from the database and returns them as in-memory representation. +// It serves as a constructor for InstanceParams and only requires read-only access to the database (we never write). +// This information is immutable for the lifetime of a node and may be cached. +// No errors are expected during normal operation. +func ReadInstanceParams( + r storage.Reader, + headers storage.Headers, + seals storage.Seals, + blocks storage.Blocks, +) (*InstanceParams, error) { + params := &InstanceParams{} + + // The values below are written during bootstrapping and immutable for the lifetime of the node. All + // following parameters are uniquely defined by the values initially read. No atomicity is required. + var versioned flow.VersionedInstanceParams + err := operation.RetrieveInstanceParams(r, &versioned) + if err != nil { + return nil, fmt.Errorf("could not read instance params to populate cache: %w", err) + } + + switch versioned.Version { + case 0: + var v0 InstanceParamsV0 + if err := msgpack.Unmarshal(versioned.Data, &v0); err != nil { + return nil, fmt.Errorf("could not decode to InstanceParamsV0: %w", err) + } + params.finalizedRoot, err = headers.ByBlockID(v0.FinalizedRootID) + if err != nil { + return nil, fmt.Errorf("could not retrieve finalized root header: %w", err) + } + + params.sealedRoot, err = headers.ByBlockID(v0.SealedRootID) + if err != nil { + return nil, fmt.Errorf("could not retrieve sealed root header: %w", err) + } + + // retrieve the root seal + params.rootSeal, err = seals.HighestInFork(v0.FinalizedRootID) + if err != nil { + return nil, fmt.Errorf("could not retrieve root seal: %w", err) + } + + params.sporkRootBlock, err = blocks.ByID(v0.SporkRootBlockID) + if err != nil { + return nil, fmt.Errorf("could not retrieve spork root block: %w", err) + } + default: + return nil, fmt.Errorf("unsupported instance params version: %d", versioned.Version) + } + + return params, nil +} + +// FinalizedRoot returns the finalized root header of the current protocol state. This will be +// the head of the protocol state snapshot used to bootstrap this state and +// may differ from node to node for the same protocol state. +func (p *InstanceParams) FinalizedRoot() *flow.Header { + return p.finalizedRoot +} + +// SealedRoot returns the sealed root block. If it's different from FinalizedRoot() block, +// it means the node is bootstrapped from mid-spork. +func (p *InstanceParams) SealedRoot() *flow.Header { + return p.sealedRoot +} + +// Seal returns the root block seal of the current protocol state. This is the seal for the +// `SealedRoot` block that was used to bootstrap this state. It may differ from node to node. +func (p *InstanceParams) Seal() *flow.Seal { + return p.rootSeal +} + +// SporkRootBlock returns the root block for the present spork. +func (p *InstanceParams) SporkRootBlock() *flow.Block { + return p.sporkRootBlock +} + +// InstanceParamsV0 is the consolidated, serializable form of protocol instance +// parameters that are constant throughout the lifetime of a node. +type InstanceParamsV0 struct { + // FinalizedRootID is the ID of the finalized root block. + FinalizedRootID flow.Identifier + // SealedRootID is the ID of the sealed root block. + SealedRootID flow.Identifier + // SporkRootBlockID is the root block's ID for the present spork this node participates in. + SporkRootBlockID flow.Identifier +} + +// NewVersionedInstanceParams constructs a versioned binary blob representing the `InstanceParams`. +// Conceptually, the values in the `InstanceParams` are immutable during the lifetime of a node. +// However, versioning allows extending `InstanceParams` with new fields in the future. +// +// No errors are expected during normal operation. +func NewVersionedInstanceParams( + version uint64, + finalizedRootID flow.Identifier, + sealedRootID flow.Identifier, + sporkRootBlockID flow.Identifier, +) (*flow.VersionedInstanceParams, error) { + versionedInstanceParams := &flow.VersionedInstanceParams{ + Version: version, + } + var data interface{} + switch version { + case 0: + data = InstanceParamsV0{ + FinalizedRootID: finalizedRootID, + SealedRootID: sealedRootID, + SporkRootBlockID: sporkRootBlockID, + } + default: + return nil, fmt.Errorf("unsupported instance params version: %d", version) + } + + encodedData, err := msgpack.Marshal(data) + if err != nil { + return nil, fmt.Errorf("could not encode InstanceParams: %w", err) + } + versionedInstanceParams.Data = encodedData + + return versionedInstanceParams, nil +} diff --git a/state/protocol/datastore/params_test.go b/state/protocol/datastore/params_test.go new file mode 100644 index 00000000000..f55ed580cbe --- /dev/null +++ b/state/protocol/datastore/params_test.go @@ -0,0 +1,43 @@ +package datastore + +import ( + "testing" + + "github.com/stretchr/testify/require" + "github.com/vmihailenco/msgpack/v4" + + "github.com/onflow/flow-go/utils/unittest" +) + +// TestNewVersionedInstanceParams verifies that NewVersionedInstanceParams +// correctly constructs instance params for supported versions and +// returns an error for unsupported versions. +// Test cases: +// 1) If version is 0, it constructs InstanceParamsV0 with the provided data. +// 2) If version is unsupported, it returns an error. +func TestNewVersionedInstanceParams(t *testing.T) { + finalizedRootID := unittest.IdentifierFixture() + sealedRootID := unittest.IdentifierFixture() + sporkRootID := unittest.IdentifierFixture() + + t.Run("valid version 0", func(t *testing.T) { + versioned, err := NewVersionedInstanceParams(0, finalizedRootID, sealedRootID, sporkRootID) + require.NoError(t, err) + require.Equal(t, uint64(0), versioned.Version) + + var v0 InstanceParamsV0 + err = msgpack.Unmarshal(versioned.Data, &v0) + require.NoError(t, err) + + require.Equal(t, finalizedRootID, v0.FinalizedRootID) + require.Equal(t, sealedRootID, v0.SealedRootID) + require.Equal(t, sporkRootID, v0.SporkRootBlockID) + }) + + t.Run("unsupported version", func(t *testing.T) { + versioned, err := NewVersionedInstanceParams(99, finalizedRootID, sealedRootID, sporkRootID) + require.Nil(t, versioned) + require.Error(t, err) + require.Contains(t, err.Error(), "unsupported instance params version") + }) +} diff --git a/state/protocol/datastore/validity.go b/state/protocol/datastore/validity.go new file mode 100644 index 00000000000..61788e547ee --- /dev/null +++ b/state/protocol/datastore/validity.go @@ -0,0 +1,268 @@ +package datastore + +import ( + "fmt" + + "github.com/onflow/flow-go/consensus/hotstuff/committees" + "github.com/onflow/flow-go/consensus/hotstuff/signature" + "github.com/onflow/flow-go/consensus/hotstuff/validator" + "github.com/onflow/flow-go/consensus/hotstuff/verification" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/model/flow/filter" + "github.com/onflow/flow-go/state/protocol" +) + +// IsValidRootSnapshot checks internal consistency of root state snapshot +// if verifyResultID allows/disallows Result ID verification +func IsValidRootSnapshot(snap protocol.Snapshot, verifyResultID bool) error { + + segment, err := snap.SealingSegment() + if err != nil { + return fmt.Errorf("could not get sealing segment: %w", err) + } + err = segment.Validate() + if err != nil { + return fmt.Errorf("invalid root sealing segment: %w", err) + } + + result, seal, err := snap.SealedResult() + if err != nil { + return fmt.Errorf("could not latest sealed result: %w", err) + } + + highest := segment.Highest() // reference block of the snapshot + lowest := segment.Sealed() // last sealed block + highestID := highest.ID() + lowestID := lowest.ID() + + if result.BlockID != lowestID { + return fmt.Errorf("root execution result for wrong block (%x != %x)", result.BlockID, lowest.ID()) + } + + if seal.BlockID != lowestID { + return fmt.Errorf("root block seal for wrong block (%x != %x)", seal.BlockID, lowest.ID()) + } + + if verifyResultID { + if seal.ResultID != result.ID() { + return fmt.Errorf("root block seal for wrong execution result (%x != %x)", seal.ResultID, result.ID()) + } + } + + // identities must be canonically ordered + identities, err := snap.Identities(filter.Any) + if err != nil { + return fmt.Errorf("could not get identities for root snapshot: %w", err) + } + if !identities.Sorted(flow.Canonical[flow.Identity]) { + return fmt.Errorf("identities are not canonically ordered") + } + + // root qc must be for reference block of snapshot + qc, err := snap.QuorumCertificate() + if err != nil { + return fmt.Errorf("could not get qc for root snapshot: %w", err) + } + if qc.BlockID != highestID { + return fmt.Errorf("qc is for wrong block (got: %x, expected: %x)", qc.BlockID, highestID) + } + + currentEpoch, err := snap.Epochs().Current() + if err != nil { + return fmt.Errorf("could not get current epoch: %w", err) + } + firstView := currentEpoch.FirstView() + finalView := currentEpoch.FinalView() + + // the segment must be fully within the current epoch + if firstView > lowest.View { + return fmt.Errorf("lowest block of sealing segment has lower view than first view of epoch") + } + if highest.View >= finalView { + return fmt.Errorf("final view of epoch less than first block view") + } + + err = validateVersionBeacon(snap) + if err != nil { + return err + } + + return nil +} + +// IsValidRootSnapshotQCs checks internal consistency of QCs that are included in the root state snapshot +// It verifies QCs for main consensus and for each collection cluster. +func IsValidRootSnapshotQCs(snap protocol.Snapshot) error { + // validate main consensus QC + err := validateRootQC(snap) + if err != nil { + return fmt.Errorf("invalid root QC: %w", err) + } + + // validate each collection cluster separately + curEpoch, err := snap.Epochs().Current() + if err != nil { + return fmt.Errorf("could not get current epoch for root snapshot: %w", err) + } + clusters, err := curEpoch.Clustering() + if err != nil { + return fmt.Errorf("could not get clustering for root snapshot: %w", err) + } + for clusterIndex := range clusters { + cluster, err := curEpoch.Cluster(uint(clusterIndex)) + if err != nil { + return fmt.Errorf("could not get cluster %d for root snapshot: %w", clusterIndex, err) + } + err = validateClusterQC(cluster) + if err != nil { + return fmt.Errorf("invalid cluster qc %d: %w", clusterIndex, err) + } + } + return nil +} + +// validateRootQC performs validation of root QC +// Returns nil on success +func validateRootQC(snap protocol.Snapshot) error { + identities, err := snap.Identities(filter.IsVotingConsensusCommitteeMember) + if err != nil { + return fmt.Errorf("could not get root snapshot identities: %w", err) + } + + rootQC, err := snap.QuorumCertificate() + if err != nil { + return fmt.Errorf("could not get root QC: %w", err) + } + + currentEpoch, err := snap.Epochs().Current() + if err != nil { + return fmt.Errorf("could not get current epoch for root snapshot: %w", err) + } + dkg, err := currentEpoch.DKG() + if err != nil { + return fmt.Errorf("could not get DKG for root snapshot: %w", err) + } + + committee, err := committees.NewStaticCommitteeWithDKG(identities, flow.Identifier{}, dkg) + if err != nil { + return fmt.Errorf("could not create static committee: %w", err) + } + verifier := verification.NewCombinedVerifier(committee, signature.NewConsensusSigDataPacker(committee)) + hotstuffValidator := validator.New(committee, verifier) + err = hotstuffValidator.ValidateQC(rootQC) + if err != nil { + return fmt.Errorf("could not validate root qc: %w", err) + } + return nil +} + +// validateClusterQC performs QC validation of single collection cluster +// Returns nil on success +func validateClusterQC(cluster protocol.Cluster) error { + committee, err := committees.NewStaticReplicas(cluster.Members(), flow.Identifier{}, nil, nil) + if err != nil { + return fmt.Errorf("could not create static committee: %w", err) + } + verifier := verification.NewStakingVerifier() + hotstuffValidator := validator.New(committee, verifier) + err = hotstuffValidator.ValidateQC(cluster.RootQC()) + if err != nil { + return fmt.Errorf("could not validate root qc: %w", err) + } + return nil +} + +// validateVersionBeacon returns an InvalidServiceEventError if the snapshot +// version beacon is invalid +func validateVersionBeacon(snap protocol.Snapshot) error { + errf := func(msg string, args ...any) error { + return protocol.NewInvalidServiceEventErrorf(msg, args) + } + + versionBeacon, err := snap.VersionBeacon() + if err != nil { + return errf("could not get version beacon: %w", err) + } + + if versionBeacon == nil { + return nil + } + + head, err := snap.Head() + if err != nil { + return errf("could not get snapshot head: %w", err) + } + + // version beacon must be included in a past block to be effective + if versionBeacon.SealHeight > head.Height { + return errf("version table height higher than highest height") + } + + err = versionBeacon.Validate() + if err != nil { + return errf("version beacon is invalid: %w", err) + } + + return nil +} + +// ValidRootSnapshotContainsEntityExpiryRange performs a sanity check to make sure the +// root snapshot has enough history to encompass at least one full entity expiry window. +// Entities (in particular transactions and collections) may reference a block within +// the past `flow.DefaultTransactionExpiry` blocks, so a new node must begin with at least +// this many blocks worth of history leading up to the snapshot's root block. +// +// Currently, Access Nodes and Consensus Nodes require root snapshots passing this validator function. +// +// - Consensus Nodes because they process guarantees referencing past blocks +// - Access Nodes because they index transactions referencing past blocks +// +// One of the following conditions must be satisfied to pass this validation: +// 1. This is a snapshot build from a first block of spork +// -> there is no earlier history which transactions/collections could reference +// 2. This snapshot sealing segment contains at least one expiry window of blocks +// -> all possible reference blocks in future transactions/collections will be within the initial history. +// 3. This snapshot sealing segment includes the spork root block +// -> there is no earlier history which transactions/collections could reference +func ValidRootSnapshotContainsEntityExpiryRange(snapshot protocol.Snapshot) error { + isSporkRootSnapshot, err := protocol.IsSporkRootSnapshot(snapshot) + if err != nil { + return fmt.Errorf("could not check if root snapshot is a spork root snapshot: %w", err) + } + // Condition 1 satisfied + if isSporkRootSnapshot { + return nil + } + + head, err := snapshot.Head() + if err != nil { + return fmt.Errorf("could not query root snapshot head: %w", err) + } + sporkRootBlockHeight := snapshot.Params().SporkRootBlockHeight() + sealingSegment, err := snapshot.SealingSegment() + if err != nil { + return fmt.Errorf("could not query sealing segment: %w", err) + } + + sealingSegmentLength := uint64(len(sealingSegment.AllBlocks())) + transactionExpiry := uint64(flow.DefaultTransactionExpiry) + blocksInSpork := head.Height - sporkRootBlockHeight + 1 // range is inclusive on both ends + + // Condition 3: + // check if head.Height - sporkRootBlockHeight < flow.DefaultTransactionExpiry + // this is the case where we bootstrap early into the spork and there is simply not enough blocks + if blocksInSpork < transactionExpiry { + // the distance to spork root is less than transaction expiry, we need all blocks back to the spork root. + if sealingSegmentLength != blocksInSpork { + return fmt.Errorf("invalid root snapshot length, expecting exactly (%d), got (%d)", blocksInSpork, sealingSegmentLength) + } + } else { + // Condition 2: + // the distance to spork root is more than transaction expiry, we need at least `transactionExpiry` many blocks + if sealingSegmentLength < transactionExpiry { + return fmt.Errorf("invalid root snapshot length, expecting at least (%d), got (%d)", + transactionExpiry, sealingSegmentLength) + } + } + return nil +} diff --git a/state/protocol/datastore/validity_test.go b/state/protocol/datastore/validity_test.go new file mode 100644 index 00000000000..d0af9bcfff0 --- /dev/null +++ b/state/protocol/datastore/validity_test.go @@ -0,0 +1,147 @@ +package datastore + +import ( + "testing" + + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/state/protocol" + "github.com/onflow/flow-go/state/protocol/mock" + "github.com/onflow/flow-go/utils/unittest" +) + +var participants = unittest.IdentityListFixture(20, unittest.WithAllRoles()) + +// TestEntityExpirySnapshotValidation tests that we perform correct sanity checks when +// bootstrapping consensus nodes and access nodes we expect that we only bootstrap snapshots +// with sufficient history. +func TestEntityExpirySnapshotValidation(t *testing.T) { + t.Run("spork-root-snapshot", func(t *testing.T) { + rootSnapshot := unittest.RootSnapshotFixture(participants) + err := ValidRootSnapshotContainsEntityExpiryRange(rootSnapshot) + require.NoError(t, err) + }) + t.Run("not-enough-history", func(t *testing.T) { + rootSnapshot := unittest.RootSnapshotFixture(participants) + blockLen := len(rootSnapshot.Encodable().SealingSegment.Blocks) + rootSnapshot.Encodable().SealingSegment.Blocks[blockLen-1].Block.Height += 10 // advance height to be not spork root snapshot + err := ValidRootSnapshotContainsEntityExpiryRange(rootSnapshot) + require.Error(t, err) + }) + t.Run("enough-history-spork-just-started", func(t *testing.T) { + rootSnapshot := unittest.RootSnapshotFixture(participants) + // advance height to be not spork root snapshot, but still lower than transaction expiry + rootSnapshot.Encodable().Head().Height += flow.DefaultTransactionExpiry / 2 + // add blocks to sealing segment + rootSnapshot.Encodable().SealingSegment.ExtraBlocks = unittest.ProposalFixtures(int(flow.DefaultTransactionExpiry / 2)) + err := ValidRootSnapshotContainsEntityExpiryRange(rootSnapshot) + require.NoError(t, err) + }) + t.Run("enough-history-long-spork", func(t *testing.T) { + rootSnapshot := unittest.RootSnapshotFixture(participants) + // advance height to be not spork root snapshot + rootSnapshot.Encodable().Head().Height += flow.DefaultTransactionExpiry * 2 + // add blocks to sealing segment + rootSnapshot.Encodable().SealingSegment.ExtraBlocks = unittest.ProposalFixtures(int(flow.DefaultTransactionExpiry) - 1) + err := ValidRootSnapshotContainsEntityExpiryRange(rootSnapshot) + require.NoError(t, err) + }) + t.Run("more-history-than-needed", func(t *testing.T) { + rootSnapshot := unittest.RootSnapshotFixture(participants) + // advance height to be not spork root snapshot + rootSnapshot.Encodable().Head().Height += flow.DefaultTransactionExpiry * 2 + // add blocks to sealing segment + rootSnapshot.Encodable().SealingSegment.ExtraBlocks = unittest.ProposalFixtures(flow.DefaultTransactionExpiry * 2) + err := ValidRootSnapshotContainsEntityExpiryRange(rootSnapshot) + require.NoError(t, err) + }) +} + +func TestValidateVersionBeacon(t *testing.T) { + t.Run("no version beacon is ok", func(t *testing.T) { + snap := new(mock.Snapshot) + + snap.On("VersionBeacon").Return(nil, nil) + + err := validateVersionBeacon(snap) + require.NoError(t, err) + }) + t.Run("valid version beacon is ok", func(t *testing.T) { + snap := new(mock.Snapshot) + block := unittest.BlockFixture( + unittest.Block.WithHeight(100), + ) + + vb := &flow.SealedVersionBeacon{ + VersionBeacon: &flow.VersionBeacon{ + VersionBoundaries: []flow.VersionBoundary{ + { + BlockHeight: 1000, + Version: "1.0.0", + }, + }, + Sequence: 50, + }, + SealHeight: uint64(37), + } + + snap.On("Head").Return(block.ToHeader(), nil) + snap.On("VersionBeacon").Return(vb, nil) + + err := validateVersionBeacon(snap) + require.NoError(t, err) + }) + t.Run("height must be below highest block", func(t *testing.T) { + snap := new(mock.Snapshot) + block := unittest.BlockFixture( + unittest.Block.WithHeight(12), + ) + + vb := &flow.SealedVersionBeacon{ + VersionBeacon: &flow.VersionBeacon{ + VersionBoundaries: []flow.VersionBoundary{ + { + BlockHeight: 1000, + Version: "1.0.0", + }, + }, + Sequence: 50, + }, + SealHeight: uint64(37), + } + + snap.On("Head").Return(block.ToHeader(), nil) + snap.On("VersionBeacon").Return(vb, nil) + + err := validateVersionBeacon(snap) + require.Error(t, err) + require.True(t, protocol.IsInvalidServiceEventError(err)) + }) + t.Run("version beacon must be valid", func(t *testing.T) { + snap := new(mock.Snapshot) + block := unittest.BlockFixture( + unittest.Block.WithHeight(12), + ) + + vb := &flow.SealedVersionBeacon{ + VersionBeacon: &flow.VersionBeacon{ + VersionBoundaries: []flow.VersionBoundary{ + { + BlockHeight: 0, + Version: "asdf", // invalid semver - hence will be considered invalid + }, + }, + Sequence: 50, + }, + SealHeight: uint64(1), + } + + snap.On("Head").Return(block.ToHeader(), nil) + snap.On("VersionBeacon").Return(vb, nil) + + err := validateVersionBeacon(snap) + require.Error(t, err) + require.True(t, protocol.IsInvalidServiceEventError(err)) + }) +} diff --git a/state/protocol/defaults.go b/state/protocol/defaults.go index 320c897d638..c5732b15175 100644 --- a/state/protocol/defaults.go +++ b/state/protocol/defaults.go @@ -6,16 +6,54 @@ import ( "github.com/onflow/flow-go/model/flow" ) -// DefaultEpochCommitSafetyThreshold returns the default epoch commit safety -// threshold for each chain ID. Greater threshold values are generally safer, -// but require longer epochs and longer EpochCommit phases. See Params for -// more details on this value. -func DefaultEpochCommitSafetyThreshold(chain flow.ChainID) (uint64, error) { +// SafetyParams contains the safety parameters for the protocol related to the epochs. +// For extra details, refer to documentation of protocol.KVStoreReader. +type SafetyParams struct { + FinalizationSafetyThreshold uint64 + EpochExtensionViewCount uint64 +} + +// DefaultEpochSafetyParams returns the default epoch safety parameters +// for each chain ID. +func DefaultEpochSafetyParams(chain flow.ChainID) (SafetyParams, error) { switch chain { - case flow.Mainnet, flow.Testnet, flow.Sandboxnet: - return 1_000, nil + case flow.Mainnet, flow.Testnet, flow.Sandboxnet, flow.Previewnet: + return SafetyParams{ + FinalizationSafetyThreshold: 1_000, + EpochExtensionViewCount: 100_000, // approximately 1 day + }, nil case flow.Localnet, flow.Benchnet, flow.BftTestnet, flow.Emulator: - return 100, nil + return SafetyParams{ + FinalizationSafetyThreshold: 100, + EpochExtensionViewCount: 600, // approximately 10 minutes + }, nil } - return 0, fmt.Errorf("unkown chain id %s", chain.String()) + return SafetyParams{}, fmt.Errorf("unknown chain id %s", chain.String()) +} + +// RandomBeaconSafetyThreshold defines a production network safety threshold for random beacon protocol based on the size +// of the random beacon committee ℛ and the DKG committee 𝒟. +// +// We recall that the committee ℛ is defined as the subset of the consensus committee (ℛ ⊆ 𝒞) and the DKG +// committee (ℛ ⊆ 𝒟) that _successfully_ completed the DKG and is able to contribute with a random beacon share. +// +// An honest supermajority of consensus nodes must contain enough successful DKG participants +// (about |𝒟|/2 + 1) to produce a valid group signature for the random beacon at each block [1, 3]. +// Therefore, we have the approximate lower bound |ℛ| ≳ n/2 + 1 = |𝒟|/2 + 1 = len(DKGIndexMap)/2 + 1. +// Operating close to this lower bound would require that every random beacon key-holder ϱ ∈ ℛ remaining in the consensus committee is honest +// (incl. quickly responsive) *all the time*. Such a reliability assumption is unsuited for decentralized production networks. +// To reject configurations that are vulnerable to liveness failures, the protocol uses the threshold `t_safety` +// (heuristic, see [2]), which is implemented on the smart contract level. +// Ideally, |ℛ| and therefore |𝒟 ∩ 𝒞| (given that |ℛ| <= |𝒟 ∩ 𝒞|) should be well above 70% . |𝒟|. +// Values in the range 70%-62% of |𝒟| should be considered for short-term recovery cases. +// Values of 62% * |𝒟| or lower (i.e. |ℛ| ≤ 0.62·|𝒟|) are not recommended for any +// production network, as single-node crashes may already be enough to halt consensus. +// +// For further details, see +// - godoc for [flow.DKGIndexMap] +// - [1] https://www.notion.so/flowfoundation/Threshold-Signatures-7e26c6dd46ae40f7a83689ba75a785e3?pvs=4 +// - [2] https://www.notion.so/flowfoundation/DKG-contract-success-threshold-86c6bf2b92034855b3c185d7616eb6f1?pvs=4 +// - [3] https://www.notion.so/flowfoundation/Architecture-for-Concurrent-Vote-Processing-41704666bc414a03869b70ba1043605f?pvs=4 +func RandomBeaconSafetyThreshold(dkgCommitteeSize uint) uint { + return uint(0.62 * float64(dkgCommitteeSize)) } diff --git a/state/protocol/dkg.go b/state/protocol/dkg.go index 88d79c96f39..103a1e47a11 100644 --- a/state/protocol/dkg.go +++ b/state/protocol/dkg.go @@ -1,7 +1,8 @@ package protocol import ( - "github.com/onflow/flow-go/crypto" + "github.com/onflow/crypto" + "github.com/onflow/flow-go/model/flow" ) @@ -24,4 +25,14 @@ type DKG interface { // Error Returns: // * protocol.IdentityNotFoundError if nodeID is not a valid DKG participant. KeyShare(nodeID flow.Identifier) (crypto.PublicKey, error) + + // KeyShares returns the public portions of all threshold key shares. Note that there might not + // exist a private key corresponding to each entry (e.g. if the respective node failed the DKG). + KeyShares() []crypto.PublicKey + + // NodeID returns the node identifier for the given index. + // An exception is returned if the index is ≥ Size(). + // Intended for use outside the hotpath, with runtime + // scaling linearly in the number of DKG participants (ie. Size()). + NodeID(index uint) (flow.Identifier, error) } diff --git a/state/protocol/epoch.go b/state/protocol/epoch.go index 17a6f54da66..9e4e887fa25 100644 --- a/state/protocol/epoch.go +++ b/state/protocol/epoch.go @@ -10,23 +10,52 @@ type EpochQuery interface { // Current returns the current epoch as of this snapshot. All valid snapshots // have a current epoch. - Current() Epoch - - // Next returns the next epoch as of this snapshot. Valid snapshots must - // have a next epoch available after the transition to epoch setup phase. - Next() Epoch + // Error returns: + // - [state.ErrUnknownSnapshotReference] - if the epoch is queried from an unresolvable snapshot. + // - generic error in case of unexpected critical internal corruption or bugs + Current() (CommittedEpoch, error) + + // NextUnsafe should only be used by components that are actively involved in advancing + // the epoch from [flow.EpochPhaseSetup] to [flow.EpochPhaseCommitted]. + // NextUnsafe returns the tentative configuration for the next epoch as of this snapshot. + // Valid snapshots make such configuration available during the Epoch Setup Phase, which + // generally is the case only after an `EpochSetupPhaseStarted` notification has been emitted. + // CAUTION: epoch transition might not happen as described by the tentative configuration! + // + // Error returns: + // - [ErrNextEpochNotSetup] in the case that this method is queried w.r.t. a snapshot + // within the [flow.EpochPhaseStaking] phase or when we are in Epoch Fallback Mode. + // - [ErrNextEpochAlreadyCommitted] if the tentative epoch is requested from + // a snapshot within the [flow.EpochPhaseCommitted] phase. + // - [state.ErrUnknownSnapshotReference] if the epoch is queried from an unresolvable snapshot. + // - generic error in case of unexpected critical internal corruption or bugs + NextUnsafe() (TentativeEpoch, error) + + // NextCommitted returns the next epoch as of this snapshot, only if it has + // been committed already - generally that is the case only after an + // `EpochCommittedPhaseStarted` notification has been emitted. + // + // Error returns: + // - [ErrNextEpochNotCommitted] - in the case that committed epoch has been requested w.r.t a snapshot within + // the [flow.EpochPhaseStaking] or [flow.EpochPhaseSetup] phases. + // - [state.ErrUnknownSnapshotReference] - if the epoch is queried from an unresolvable snapshot. + // - generic error in case of unexpected critical internal corruption or bugs + NextCommitted() (CommittedEpoch, error) // Previous returns the previous epoch as of this snapshot. Valid snapshots // must have a previous epoch for all epochs except that immediately after // the root block - in other words, if a previous epoch exists, implementations // must arrange to expose it here. // - // Returns ErrNoPreviousEpoch in the case that this method is queried w.r.t. - // a snapshot from the first epoch after the root block. - Previous() Epoch + // Error returns: + // - [protocol.ErrNoPreviousEpoch] - if the epoch represents a previous epoch which does not exist. + // This happens when the previous epoch is queried within the first epoch of a spork. + // - [state.ErrUnknownSnapshotReference] - if the epoch is queried from an unresolvable snapshot. + // - generic error in case of unexpected critical internal corruption or bugs + Previous() (CommittedEpoch, error) } -// Epoch contains the information specific to a certain Epoch (defined +// CommittedEpoch contains the information specific to a certain Epoch (defined // by the epoch Counter). Note that the Epoch preparation can differ along // different forks, since the emission of service events is fork-dependent. // Therefore, an epoch exists RELATIVE to the snapshot from which it was @@ -35,7 +64,7 @@ type EpochQuery interface { // CAUTION: Clients must ensure to query epochs only for finalized blocks to // ensure they query finalized epoch information. // -// An Epoch instance is constant and reports the identical information +// A CommittedEpoch instance is constant and reports the identical information // even if progress is made later and more information becomes available in // subsequent blocks. // @@ -53,124 +82,98 @@ type EpochQuery interface { // 2. The error caching pattern encourages potentially dangerous snapshot query patterns // // See https://github.com/dapperlabs/flow-go/issues/6368 for details and proposal -type Epoch interface { - - // Counter returns the Epoch's counter. - // Error returns: - // * protocol.ErrNoPreviousEpoch - if the epoch represents a previous epoch which does not exist. - // * protocol.ErrNextEpochNotSetup - if the epoch represents a next epoch which has not been set up. - // * state.ErrUnknownSnapshotReference - if the epoch is queried from an unresolvable snapshot. - Counter() (uint64, error) +type CommittedEpoch interface { + TentativeEpoch // FirstView returns the first view of this epoch. - // Error returns: - // * protocol.ErrNoPreviousEpoch - if the epoch represents a previous epoch which does not exist. - // * protocol.ErrNextEpochNotSetup - if the epoch represents a next epoch which has not been set up. - // * state.ErrUnknownSnapshotReference - if the epoch is queried from an unresolvable snapshot. - FirstView() (uint64, error) + FirstView() uint64 // DKGPhase1FinalView returns the final view of DKG phase 1 - // Error returns: - // * protocol.ErrNoPreviousEpoch - if the epoch represents a previous epoch which does not exist. - // * protocol.ErrNextEpochNotSetup - if the epoch represents a next epoch which has not been set up. - // * state.ErrUnknownSnapshotReference - if the epoch is queried from an unresolvable snapshot. - DKGPhase1FinalView() (uint64, error) + DKGPhase1FinalView() uint64 // DKGPhase2FinalView returns the final view of DKG phase 2 - // Error returns: - // * protocol.ErrNoPreviousEpoch - if the epoch represents a previous epoch which does not exist. - // * protocol.ErrNextEpochNotSetup - if the epoch represents a next epoch which has not been set up. - // * state.ErrUnknownSnapshotReference - if the epoch is queried from an unresolvable snapshot. - DKGPhase2FinalView() (uint64, error) + DKGPhase2FinalView() uint64 // DKGPhase3FinalView returns the final view of DKG phase 3 - // Error returns: - // * protocol.ErrNoPreviousEpoch - if the epoch represents a previous epoch which does not exist. - // * protocol.ErrNextEpochNotSetup - if the epoch represents a next epoch which has not been set up. - // * state.ErrUnknownSnapshotReference - if the epoch is queried from an unresolvable snapshot. - DKGPhase3FinalView() (uint64, error) + DKGPhase3FinalView() uint64 // FinalView returns the largest view number which still belongs to this epoch. - // Error returns: - // * protocol.ErrNoPreviousEpoch - if the epoch represents a previous epoch which does not exist. - // * protocol.ErrNextEpochNotSetup - if the epoch represents a next epoch which has not been set up. - // * state.ErrUnknownSnapshotReference - if the epoch is queried from an unresolvable snapshot. - FinalView() (uint64, error) + // The largest view number is the greatest of: + // - the FinalView field of the flow.EpochSetup event for this epoch + // - the FinalView field of the most recent flow.EpochExtension for this epoch + // If EFM is not triggered during this epoch, this value will be static. + // If EFM is triggered during this epoch, this value may increase with increasing + // reference block heights, as new epoch extensions are included. + FinalView() uint64 + + // TargetDuration returns the desired real-world duration for this epoch, in seconds. + // This target is specified by the FlowEpoch smart contract along the TargetEndTime in + // the EpochSetup event and used by the Cruise Control system to moderate the block rate. + TargetDuration() uint64 + + // TargetEndTime returns the desired real-world end time for this epoch, represented as + // Unix Time (in units of seconds). This target is specified by the FlowEpoch smart contract in + // the EpochSetup event and used by the Cruise Control system to moderate the block rate. + TargetEndTime() uint64 // RandomSource returns the underlying random source of this epoch. // This source is currently generated by an on-chain contract using the // UnsafeRandom() Cadence function. - // Error returns: - // * protocol.ErrNoPreviousEpoch - if the epoch represents a previous epoch which does not exist. - // * protocol.ErrNextEpochNotSetup - if the epoch represents a next epoch which has not been set up. - // * state.ErrUnknownSnapshotReference - if the epoch is queried from an unresolvable snapshot. - RandomSource() ([]byte, error) - - // InitialIdentities returns the identities for this epoch as they were - // specified in the EpochSetup service event. - // Error returns: - // * protocol.ErrNoPreviousEpoch - if the epoch represents a previous epoch which does not exist. - // * protocol.ErrNextEpochNotSetup - if the epoch represents a next epoch which has not been set up. - // * state.ErrUnknownSnapshotReference - if the epoch is queried from an unresolvable snapshot. - InitialIdentities() (flow.IdentityList, error) - - // Clustering returns the cluster assignment for this epoch. - // Error returns: - // * protocol.ErrNoPreviousEpoch - if the epoch represents a previous epoch which does not exist. - // * protocol.ErrNextEpochNotSetup - if the epoch represents a next epoch which has not been set up. - // * state.ErrUnknownSnapshotReference - if the epoch is queried from an unresolvable snapshot. - Clustering() (flow.ClusterList, error) + RandomSource() []byte // Cluster returns the detailed cluster information for the cluster with the // given index, in this epoch. // Error returns: - // * protocol.ErrNoPreviousEpoch - if the epoch represents a previous epoch which does not exist. - // * protocol.ErrNextEpochNotSetup - if the epoch represents a next epoch which has not been set up. - // * state.ErrUnknownSnapshotReference - if the epoch is queried from an unresolvable snapshot. - // * protocol.ErrClusterNotFound - if no cluster has the given index (index > len(clusters)) + // * protocol.ErrClusterNotFound - if no cluster has the given index (index >= len(clusters)) + // * generic error in case of internal state corruption Cluster(index uint) (Cluster, error) // ClusterByChainID returns the detailed cluster information for the cluster with // the given chain ID, in this epoch // Error returns: - // * protocol.ErrNoPreviousEpoch - if the epoch represents a previous epoch which does not exist. - // * protocol.ErrNextEpochNotSetup - if the epoch represents a next epoch which has not been set up. - // * state.ErrUnknownSnapshotReference - if the epoch is queried from an unresolvable snapshot. - // * protocol.ErrNextEpochNotCommitted - if epoch has not been committed yet - // * protocol.ErrClusterNotFound - if cluster is not found by the given chainID + // * protocol.ErrClusterNotFound - if cluster is not found by the given chainID + // * generic error in case of internal state corruption ClusterByChainID(chainID flow.ChainID) (Cluster, error) // DKG returns the result of the distributed key generation procedure. - // Error returns: - // * protocol.ErrNoPreviousEpoch - if the epoch represents a previous epoch which does not exist. - // * protocol.ErrNextEpochNotSetup - if the epoch represents a next epoch which has not been set up. - // * protocol.ErrNextEpochNotCommitted if epoch has not been committed yet - // * state.ErrUnknownSnapshotReference - if the epoch is queried from an unresolvable snapshot. + // No errors expected during normal operation. DKG() (DKG, error) // FirstHeight returns the height of the first block of the epoch. // The first block of an epoch E is defined as the block B with the lowest // height so that: B.View >= E.FirstView - // The first block of an epoch is not defined until it is finalized, so this - // value is only guaranteed to be defined for `Current` epochs of finalized snapshots. + // The first block of an epoch is not defined until it is finalized. // Error returns: - // * protocol.ErrNoPreviousEpoch - if the epoch represents a previous epoch which does not exist. - // * protocol.ErrNextEpochNotSetup - if the epoch represents a next epoch which has not been set up. - // * protocol.ErrNextEpochNotCommitted if epoch has not been committed yet - // * protocol.ErrEpochTransitionNotFinalized - if the first block of the epoch has not been finalized yet. - // * state.ErrUnknownSnapshotReference - if the epoch is queried from an unresolvable snapshot. + // * protocol.ErrUnknownEpochBoundary - if the first block of the epoch is unknown or unfinalized. FirstHeight() (uint64, error) // FinalHeight returns the height of the final block of the epoch. // The final block of an epoch E is defined as the parent of the first // block in epoch E+1 (see definition from FirstHeight). - // The final block of an epoch is not defined until its child is finalized, - // so this value is only guaranteed to be defined for `Previous` epochs of finalized snapshots. + // The final block of an epoch is not defined until its child is finalized. // Error returns: - // * protocol.ErrNoPreviousEpoch - if the epoch represents a previous epoch which does not exist. - // * protocol.ErrNextEpochNotSetup - if the epoch represents a next epoch which has not been set up. - // * protocol.ErrNextEpochNotCommitted - if epoch has not been committed yet - // * protocol.ErrEpochTransitionNotFinalized - if the first block of the next epoch has not been finalized yet. - // * state.ErrUnknownSnapshotReference - if the epoch is queried from an unresolvable snapshot. + // * protocol.ErrUnknownEpochBoundary - if the first block of the next epoch is unknown or unfinalized. FinalHeight() (uint64, error) } + +// TentativeEpoch returns the tentative information about the upcoming epoch, +// which the protocol is in the process of configuring. +// Only the data that is strictly necessary for committing the epoch is exposed; +// after commitment, all epoch data is accessible through the [CommittedEpoch] interface. +// This should only be used during the Epoch Setup Phase by components that actively +// contribute to configuring the upcoming epoch. +// +// CAUTION: the epoch transition might not happen as described by the tentative configuration! +type TentativeEpoch interface { + + // Counter returns the Epoch's counter. + Counter() uint64 + + // InitialIdentities returns the identities for this epoch as they were + // specified in the EpochSetup service event. + InitialIdentities() flow.IdentitySkeletonList + + // Clustering returns the cluster assignment for this epoch. + // No errors expected during normal operation. + Clustering() (flow.ClusterList, error) +} diff --git a/state/protocol/errors.go b/state/protocol/errors.go index 85a08d590ae..2ed04bb43ae 100644 --- a/state/protocol/errors.go +++ b/state/protocol/errors.go @@ -20,11 +20,32 @@ var ( // ErrNextEpochNotCommitted is a sentinel error returned when the next epoch // has not been committed and information is queried that is only accessible // in the EpochCommitted phase. - ErrNextEpochNotCommitted = fmt.Errorf("queried info from EpochCommit event before it was emitted") - - // ErrEpochTransitionNotFinalized is a sentinel returned when a query is made - // for a block at an epoch boundary which has not yet been finalized. - ErrEpochTransitionNotFinalized = fmt.Errorf("cannot query block at un-finalized epoch transition") + ErrNextEpochNotCommitted = fmt.Errorf("next epoch has not yet been committed") + + // ErrNextEpochAlreadyCommitted is a sentinel error returned when code tries + // to retrieve an uncommitted TentativeEpoch during the EpochCommitted phase + ErrNextEpochAlreadyCommitted = fmt.Errorf("retrieving tentative epoch data when epoch is already committed") + + // ErrUnknownEpochBoundary is a sentinel returned when a query is made for an + // epoch boundary which is unknown to this node. + // + // There are 2 cases where an epoch boundary can be unknown. + // Consider an epoch boundary between epoch N and epoch M=N+1. + // Let: + // - n be the final block in epoch N + // - m be the first block in epoch M + // - r be this node's lowest known block + // - f be this node's latest finalized block + // + // CASE 1: `r.Height > n.Height` + // The boundary occurred before this node's lowest known block + // Note that this includes the case where `r == m` (we know the first block + // of epoch M but not the final block of epoch N). + // + // CASE 2: `f.Height < m.Height` + // The boundary has not been finalized yet. Note that we may have finalized + // n but not m. + ErrUnknownEpochBoundary = fmt.Errorf("unknown epoch boundary for current chain state") // ErrSealingSegmentBelowRootBlock is a sentinel error returned for queries // for a sealing segment below the root block (local history cutoff). diff --git a/state/protocol/events.go b/state/protocol/events.go index 08608d0ffd3..3ea3b9c8a06 100644 --- a/state/protocol/events.go +++ b/state/protocol/events.go @@ -29,7 +29,6 @@ import ( // NOTE: the epoch-related callbacks are only called once the fork containing // the relevant event has been finalized. type Consumer interface { - // BlockFinalized is called when a block is finalized. // Formally, this callback is informationally idempotent. I.e. the consumer // of this callback must handle repeated calls for the same block. @@ -57,15 +56,14 @@ type Consumer interface { // the current epoch. This is equivalent to the end of the epoch staking // phase for the current epoch. // - // Referencing the diagram below, the event is emitted when block c is incorporated. - // The block parameter is the first block of the epoch setup phase (block c). + // Referencing the diagram below, the event is emitted when block b is finalized. + // The block parameter is the first block of the epoch setup phase (block b). // // |<-- Epoch N ------------------------------------------------->| // |<-- StakingPhase -->|<-- SetupPhase -->|<-- CommittedPhase -->| // ^--- block A - this block's execution result contains an EpochSetup event - // ^--- block b - contains seal for block A - // ^--- block c - contains qc for block b, first block of Setup phase - // ^--- block d - finalizes block c, triggers EpochSetupPhaseStarted event + // ^--- block b - contains seal for block A, first block of Setup phase + // ^--- block c - finalizes block b, triggers EpochSetupPhaseStarted event // // NOTE: Only called once the phase transition has been finalized. EpochSetupPhaseStarted(currentEpochCounter uint64, first *flow.Header) @@ -74,24 +72,48 @@ type Consumer interface { // for the current epoch. This is equivalent to the end of the epoch setup // phase for the current epoch. // - // Referencing the diagram below, the event is emitted when block f is received. - // The block parameter is the first block of the epoch committed phase (block f). + // Referencing the diagram below, the event is emitted when block e is finalized. + // The block parameter is the first block of the epoch committed phase (block e). // // |<-- Epoch N ------------------------------------------------->| // |<-- StakingPhase -->|<-- SetupPhase -->|<-- CommittedPhase -->| // ^--- block D - this block's execution result contains an EpochCommit event - // ^--- block e - contains seal for block D - // ^--- block f - contains qc for block e, first block of Committed phase - // ^--- block g - finalizes block f, triggers EpochCommittedPhaseStarted event - /// + // ^--- block e - contains seal for block D, first block of Committed phase + // ^--- block f - finalizes block e, triggers EpochCommittedPhaseStarted event // // NOTE: Only called once the phase transition has been finalized. EpochCommittedPhaseStarted(currentEpochCounter uint64, first *flow.Header) - // EpochEmergencyFallbackTriggered is called when epoch fallback mode (EECC) is triggered. - // Since EECC is a permanent, spork-scoped state, this event is triggered only once. - // After this event is triggered, no further epoch transitions will occur, - // no further epoch phase transitions will occur, and no further epoch-related - // related protocol events (the events defined in this interface) will be emitted. - EpochEmergencyFallbackTriggered() + // EpochFallbackModeTriggered is called when Epoch Fallback Mode [EFM] is triggered. + // EFM is triggered when an invalid or unexpected epoch-related service event is observed, + // or an expected service event is not observed before the epoch commitment deadline. + // After EFM is triggered, we drop any potentially pending but uncommitted future epoch setup. + // When an EpochRecover event is observed, regular epoch transitions begin again. + // Usually, this means we remain in the current epoch until EFM is exited. + // If EFM was triggered within the EpochCommitted phase, then we complete the transition + // to the next, already-committed epoch, then remain in that epoch until EFM is exited. + // Consumers can get context for handling events from: + // - epochCounter is the current epoch counter at the block when EFM was triggered + // - header is the block when EFM was triggered + // + // NOTE: This notification is emitted when the block triggering EFM is finalized. + EpochFallbackModeTriggered(epochCounter uint64, header *flow.Header) + + // EpochFallbackModeExited is called when epoch fallback mode [EFM] is exited. + // EFM is exited when an EpochRecover service event is processed, which defines + // a final view for the current epoch and fully specifies the subsequent epoch. + // Consumers can get context for handling events from: + // - epochCounter is the current epoch counter at the block when EFM was triggered + // - header is the block when EFM was triggered + // + // NOTE: Only called once the block incorporating the EpochRecover is finalized. + EpochFallbackModeExited(epochCounter uint64, header *flow.Header) + + // EpochExtended is called when a flow.EpochExtension is added to the current epoch + // Consumers can get context for handling events from: + // - epochCounter is the current epoch counter at the block when EFM was triggered + // - header is the block when EFM was triggered + // + // NOTE: This notification is emitted when the block triggering the EFM extension is finalized. + EpochExtended(epochCounter uint64, header *flow.Header, extension flow.EpochExtension) } diff --git a/state/protocol/events/distributor.go b/state/protocol/events/distributor.go index db10f637756..b574edac078 100644 --- a/state/protocol/events/distributor.go +++ b/state/protocol/events/distributor.go @@ -7,12 +7,15 @@ import ( "github.com/onflow/flow-go/state/protocol" ) -// Distributor distributes events to a list of subscribers. +// Distributor implements the `protocol.Consumer` interface for ingesting notifications emitted +// by the protocol state. It distributes the notifications to all registered consumers. type Distributor struct { subscribers []protocol.Consumer mu sync.RWMutex } +var _ protocol.Consumer = (*Distributor)(nil) + // NewDistributor returns a new events distributor. func NewDistributor() *Distributor { return &Distributor{} @@ -64,10 +67,26 @@ func (d *Distributor) EpochCommittedPhaseStarted(epoch uint64, first *flow.Heade } } -func (d *Distributor) EpochEmergencyFallbackTriggered() { +func (d *Distributor) EpochFallbackModeTriggered(epochCounter uint64, header *flow.Header) { + d.mu.RLock() + defer d.mu.RUnlock() + for _, sub := range d.subscribers { + sub.EpochFallbackModeTriggered(epochCounter, header) + } +} + +func (d *Distributor) EpochFallbackModeExited(epochCounter uint64, header *flow.Header) { + d.mu.RLock() + defer d.mu.RUnlock() + for _, sub := range d.subscribers { + sub.EpochFallbackModeExited(epochCounter, header) + } +} + +func (d *Distributor) EpochExtended(epochCounter uint64, header *flow.Header, extension flow.EpochExtension) { d.mu.RLock() defer d.mu.RUnlock() for _, sub := range d.subscribers { - sub.EpochEmergencyFallbackTriggered() + sub.EpochExtended(epochCounter, header, extension) } } diff --git a/state/protocol/events/gadgets/identity_deltas.go b/state/protocol/events/gadgets/identity_deltas.go index a1d56cc1e9e..77131d6968e 100644 --- a/state/protocol/events/gadgets/identity_deltas.go +++ b/state/protocol/events/gadgets/identity_deltas.go @@ -9,7 +9,8 @@ import ( // subscribe to callbacks any time an identity table change (or possible change) // is finalized. // -// TODO add slashing/ejection events here once implemented +// TODO(EFM, #6123) add slashing/ejection events here once implemented +// TODO(EFM, #6123): Consider consolidating this with ProtocolStateIDCache type IdentityDeltas struct { events.Noop callback func() diff --git a/state/protocol/events/gadgets/views_test.go b/state/protocol/events/gadgets/views_test.go index 484531c4b53..a0393398322 100644 --- a/state/protocol/events/gadgets/views_test.go +++ b/state/protocol/events/gadgets/views_test.go @@ -19,7 +19,7 @@ type viewsMachine struct { expectedCalls int // expected value of calls at any given time } -func (m *viewsMachine) Init(_ *rapid.T) { +func (m *viewsMachine) init(_ *rapid.T) { m.views = NewViews() m.callbacks = make(map[uint64]int) m.calls = 0 @@ -27,7 +27,7 @@ func (m *viewsMachine) Init(_ *rapid.T) { } func (m *viewsMachine) OnView(t *rapid.T) { - view := rapid.Uint64().Draw(t, "view").(uint64) + view := rapid.Uint64().Draw(t, "view") m.views.OnView(view, func(_ *flow.Header) { m.calls++ // count actual number of calls invoked by Views }) @@ -37,7 +37,7 @@ func (m *viewsMachine) OnView(t *rapid.T) { } func (m *viewsMachine) BlockFinalized(t *rapid.T) { - view := rapid.Uint64().Draw(t, "view").(uint64) + view := rapid.Uint64().Draw(t, "view") block := unittest.BlockHeaderFixture() block.View = view @@ -58,5 +58,9 @@ func (m *viewsMachine) Check(t *rapid.T) { } func TestViewsRapid(t *testing.T) { - rapid.Check(t, rapid.Run(new(viewsMachine))) + rapid.Check(t, func(t *rapid.T) { + sm := new(viewsMachine) + sm.init(t) + t.Repeat(rapid.StateMachineActions(sm)) + }) } diff --git a/state/protocol/events/logger.go b/state/protocol/events/logger.go new file mode 100644 index 00000000000..942ba00e480 --- /dev/null +++ b/state/protocol/events/logger.go @@ -0,0 +1,42 @@ +package events + +import ( + "github.com/rs/zerolog" + + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/state/protocol" +) + +type EventLogger struct { + Noop // satisfy protocol events consumer interface + logger zerolog.Logger +} + +var _ protocol.Consumer = (*EventLogger)(nil) + +func NewEventLogger(logger zerolog.Logger) *EventLogger { + return &EventLogger{ + logger: logger.With().Str("module", "protocol_events_logger").Logger(), + } +} + +func (p EventLogger) EpochTransition(newEpochCounter uint64, header *flow.Header) { + p.logger.Info().Uint64("newEpochCounter", newEpochCounter). + Uint64("height", header.Height). + Uint64("view", header.View). + Msg("epoch transition") +} + +func (p EventLogger) EpochSetupPhaseStarted(currentEpochCounter uint64, header *flow.Header) { + p.logger.Info().Uint64("currentEpochCounter", currentEpochCounter). + Uint64("height", header.Height). + Uint64("view", header.View). + Msg("epoch setup phase started") +} + +func (p EventLogger) EpochCommittedPhaseStarted(currentEpochCounter uint64, header *flow.Header) { + p.logger.Info().Uint64("currentEpochCounter", currentEpochCounter). + Uint64("height", header.Height). + Uint64("view", header.View). + Msg("epoch committed phase started") +} diff --git a/state/protocol/events/mock/heights.go b/state/protocol/events/mock/heights.go index 677edc94ba3..97412124350 100644 --- a/state/protocol/events/mock/heights.go +++ b/state/protocol/events/mock/heights.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mock @@ -14,13 +14,12 @@ func (_m *Heights) OnHeight(height uint64, callback func()) { _m.Called(height, callback) } -type mockConstructorTestingTNewHeights interface { +// NewHeights creates a new instance of Heights. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewHeights(t interface { mock.TestingT Cleanup(func()) -} - -// NewHeights creates a new instance of Heights. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewHeights(t mockConstructorTestingTNewHeights) *Heights { +}) *Heights { mock := &Heights{} mock.Mock.Test(t) diff --git a/state/protocol/events/mock/on_view_callback.go b/state/protocol/events/mock/on_view_callback.go deleted file mode 100644 index 3e413a3c3f3..00000000000 --- a/state/protocol/events/mock/on_view_callback.go +++ /dev/null @@ -1,33 +0,0 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. - -package mock - -import ( - flow "github.com/onflow/flow-go/model/flow" - mock "github.com/stretchr/testify/mock" -) - -// OnViewCallback is an autogenerated mock type for the OnViewCallback type -type OnViewCallback struct { - mock.Mock -} - -// Execute provides a mock function with given fields: _a0 -func (_m *OnViewCallback) Execute(_a0 *flow.Header) { - _m.Called(_a0) -} - -type mockConstructorTestingTNewOnViewCallback interface { - mock.TestingT - Cleanup(func()) -} - -// NewOnViewCallback creates a new instance of OnViewCallback. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewOnViewCallback(t mockConstructorTestingTNewOnViewCallback) *OnViewCallback { - mock := &OnViewCallback{} - mock.Mock.Test(t) - - t.Cleanup(func() { mock.AssertExpectations(t) }) - - return mock -} diff --git a/state/protocol/events/mock/views.go b/state/protocol/events/mock/views.go index 8466c05a351..78850a425e0 100644 --- a/state/protocol/events/mock/views.go +++ b/state/protocol/events/mock/views.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mock @@ -17,13 +17,12 @@ func (_m *Views) OnView(view uint64, callback events.OnViewCallback) { _m.Called(view, callback) } -type mockConstructorTestingTNewViews interface { +// NewViews creates a new instance of Views. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewViews(t interface { mock.TestingT Cleanup(func()) -} - -// NewViews creates a new instance of Views. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewViews(t mockConstructorTestingTNewViews) *Views { +}) *Views { mock := &Views{} mock.Mock.Test(t) diff --git a/state/protocol/events/noop.go b/state/protocol/events/noop.go index 1925a5e4776..2c4c3dddd42 100644 --- a/state/protocol/events/noop.go +++ b/state/protocol/events/noop.go @@ -14,14 +14,20 @@ func NewNoop() *Noop { return &Noop{} } -func (n Noop) BlockFinalized(block *flow.Header) {} +func (n Noop) BlockFinalized(*flow.Header) {} -func (n Noop) BlockProcessable(block *flow.Header, certifyingQC *flow.QuorumCertificate) {} +func (n Noop) BlockProcessable(*flow.Header, *flow.QuorumCertificate) {} -func (n Noop) EpochTransition(newEpoch uint64, first *flow.Header) {} +func (n Noop) EpochTransition(uint64, *flow.Header) {} -func (n Noop) EpochSetupPhaseStarted(epoch uint64, first *flow.Header) {} +func (n Noop) EpochSetupPhaseStarted(uint64, *flow.Header) {} -func (n Noop) EpochCommittedPhaseStarted(epoch uint64, first *flow.Header) {} +func (n Noop) EpochCommittedPhaseStarted(uint64, *flow.Header) {} -func (n Noop) EpochEmergencyFallbackTriggered() {} +func (n Noop) EpochFallbackModeTriggered(uint64, *flow.Header) {} + +func (n Noop) EpochFallbackModeExited(uint64, *flow.Header) {} + +func (n Noop) EpochExtended(uint64, *flow.Header, flow.EpochExtension) {} + +func (n Noop) ActiveClustersChanged(flow.ChainIDList) {} diff --git a/state/protocol/execution.go b/state/protocol/execution.go new file mode 100644 index 00000000000..bae36c83706 --- /dev/null +++ b/state/protocol/execution.go @@ -0,0 +1,40 @@ +package protocol + +import "github.com/onflow/flow-go/model/flow" + +// SnapshotExecutionSubset is a subset of the protocol state snapshot that is needed by the FVM +// for execution. +type SnapshotExecutionSubset interface { + // RandomSource provides a source of entropy that can be + // expanded into randoms (using a pseudo-random generator). + // The returned slice should have at least 128 bits of entropy. + // The function doesn't error in normal operations, any + // error should be treated as an exception. + // + // `protocol.SnapshotExecutionSubset` implements `EntropyProvider` interface + // Note that `SnapshotExecutionSubset` possible errors for RandomSource() are: + // - storage.ErrNotFound if the QC is unknown. + // - state.ErrUnknownSnapshotReference if the snapshot reference block is unknown + // However, at this stage, snapshot reference block should be known and the QC should also be known, + // so no error is expected in normal operations, as required by `EntropyProvider`. + RandomSource() ([]byte, error) + + // VersionBeacon returns the latest sealed version beacon. + // If no version beacon has been sealed so far during the current spork, returns nil. + // The latest VersionBeacon is only updated for finalized blocks. This means that, when + // querying an un-finalized fork, `VersionBeacon` will have the same value as querying + // the snapshot for the latest finalized block, even if a newer version beacon is included + // in a seal along the un-finalized fork. + // + // The SealedVersionBeacon must contain at least one entry. The first entry is for a past block height. + // The remaining entries are for all future block heights. Future version boundaries + // can be removed, in which case the emitted event will not contain the removed version + // boundaries. + VersionBeacon() (*flow.SealedVersionBeacon, error) +} + +// SnapshotExecutionSubsetProvider is an interface that provides a subset of the protocol state +// at a specific block. +type SnapshotExecutionSubsetProvider interface { + AtBlockID(blockID flow.Identifier) SnapshotExecutionSubset +} diff --git a/state/protocol/inmem/cluster.go b/state/protocol/inmem/cluster.go index fd2b0b85108..de1ae407f0d 100644 --- a/state/protocol/inmem/cluster.go +++ b/state/protocol/inmem/cluster.go @@ -12,9 +12,9 @@ type Cluster struct { var _ protocol.Cluster = (*Cluster)(nil) -func (c Cluster) Index() uint { return c.enc.Index } -func (c Cluster) ChainID() flow.ChainID { return c.enc.RootBlock.Header.ChainID } -func (c Cluster) EpochCounter() uint64 { return c.enc.Counter } -func (c Cluster) Members() flow.IdentityList { return c.enc.Members } -func (c Cluster) RootBlock() *clustermodel.Block { return c.enc.RootBlock } -func (c Cluster) RootQC() *flow.QuorumCertificate { return c.enc.RootQC } +func (c Cluster) Index() uint { return c.enc.Index } +func (c Cluster) ChainID() flow.ChainID { return c.enc.RootBlock.ChainID } +func (c Cluster) EpochCounter() uint64 { return c.enc.Counter } +func (c Cluster) Members() flow.IdentitySkeletonList { return c.enc.Members } +func (c Cluster) RootBlock() *clustermodel.Block { return c.enc.RootBlock } +func (c Cluster) RootQC() *flow.QuorumCertificate { return c.enc.RootQC } diff --git a/state/protocol/inmem/convert.go b/state/protocol/inmem/convert.go index 411f6aae7df..d3bb596e5fe 100644 --- a/state/protocol/inmem/convert.go +++ b/state/protocol/inmem/convert.go @@ -1,14 +1,12 @@ package inmem import ( - "errors" "fmt" - "github.com/onflow/flow-go/model/encodable" "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/model/flow/filter" "github.com/onflow/flow-go/module/signature" "github.com/onflow/flow-go/state/protocol" + "github.com/onflow/flow-go/state/protocol/protocol_state" ) // FromSnapshot generates a memory-backed snapshot from the input snapshot. @@ -22,19 +20,6 @@ func FromSnapshot(from protocol.Snapshot) (*Snapshot, error) { ) // convert top-level fields - snap.Head, err = from.Head() - if err != nil { - return nil, fmt.Errorf("could not get head: %w", err) - } - snap.Identities, err = from.Identities(filter.Any) - if err != nil { - return nil, fmt.Errorf("could not get identities: %w", err) - } - snap.LatestResult, snap.LatestSeal, err = from.SealedResult() - if err != nil { - return nil, fmt.Errorf("could not get seal: %w", err) - } - snap.SealingSegment, err = from.SealingSegment() if err != nil { return nil, fmt.Errorf("could not get sealing segment: %w", err) @@ -43,37 +28,6 @@ func FromSnapshot(from protocol.Snapshot) (*Snapshot, error) { if err != nil { return nil, fmt.Errorf("could not get qc: %w", err) } - snap.Phase, err = from.Phase() - if err != nil { - return nil, fmt.Errorf("could not get phase: %w", err) - } - - // convert epochs - previous, err := FromEpoch(from.Epochs().Previous()) - // it is possible for valid snapshots to have no previous epoch - if errors.Is(err, protocol.ErrNoPreviousEpoch) { - snap.Epochs.Previous = nil - } else if err != nil { - return nil, fmt.Errorf("could not get previous epoch: %w", err) - } else { - snap.Epochs.Previous = &previous.enc - } - - current, err := FromEpoch(from.Epochs().Current()) - if err != nil { - return nil, fmt.Errorf("could not get current epoch: %w", err) - } - snap.Epochs.Current = current.enc - - next, err := FromEpoch(from.Epochs().Next()) - // it is possible for valid snapshots to have no next epoch - if errors.Is(err, protocol.ErrNextEpochNotSetup) { - snap.Epochs.Next = nil - } else if err != nil { - return nil, fmt.Errorf("could not get next epoch: %w", err) - } else { - snap.Epochs.Next = &next.enc - } // convert global state parameters params, err := FromParams(from.Params()) @@ -82,187 +36,34 @@ func FromSnapshot(from protocol.Snapshot) (*Snapshot, error) { } snap.Params = params.enc + // convert version beacon + versionBeacon, err := from.VersionBeacon() + if err != nil { + return nil, fmt.Errorf("could not get version beacon: %w", err) + } + + snap.SealedVersionBeacon = versionBeacon + return &Snapshot{snap}, nil } // FromParams converts any protocol.GlobalParams to a memory-backed Params. // TODO error docs func FromParams(from protocol.GlobalParams) (*Params, error) { - var ( - params EncodableParams - err error - ) - - params.ChainID, err = from.ChainID() - if err != nil { - return nil, fmt.Errorf("could not get chain id: %w", err) - } - params.SporkID, err = from.SporkID() - if err != nil { - return nil, fmt.Errorf("could not get spork id: %w", err) - } - params.SporkRootBlockHeight, err = from.SporkRootBlockHeight() - if err != nil { - return nil, fmt.Errorf("could not get spork root block height: %w", err) - } - params.ProtocolVersion, err = from.ProtocolVersion() - if err != nil { - return nil, fmt.Errorf("could not get protocol version: %w", err) - } - params.EpochCommitSafetyThreshold, err = from.EpochCommitSafetyThreshold() - if err != nil { - return nil, fmt.Errorf("could not get protocol version: %w", err) + params := EncodableParams{ + ChainID: from.ChainID(), + SporkID: from.SporkID(), + SporkRootBlockHeight: from.SporkRootBlockHeight(), + SporkRootBlockView: from.SporkRootBlockView(), } - return &Params{params}, nil } -// FromEpoch converts any protocol.Epoch to a memory-backed Epoch. -// Error returns: -// * protocol.ErrNoPreviousEpoch - if the epoch represents a previous epoch which does not exist. -// * protocol.ErrNextEpochNotSetup - if the epoch represents a next epoch which has not been set up. -// * state.ErrUnknownSnapshotReference - if the epoch is queried from an unresolvable snapshot. -func FromEpoch(from protocol.Epoch) (*Epoch, error) { - var ( - epoch EncodableEpoch - err error - ) - - // convert top-level fields - epoch.Counter, err = from.Counter() - if err != nil { - return nil, fmt.Errorf("could not get counter: %w", err) - } - epoch.InitialIdentities, err = from.InitialIdentities() - if err != nil { - return nil, fmt.Errorf("could not get initial identities: %w", err) - } - epoch.FirstView, err = from.FirstView() - if err != nil { - return nil, fmt.Errorf("could not get first view: %w", err) - } - epoch.FinalView, err = from.FinalView() - if err != nil { - return nil, fmt.Errorf("could not get final view: %w", err) - } - epoch.RandomSource, err = from.RandomSource() - if err != nil { - return nil, fmt.Errorf("could not get random source: %w", err) - } - epoch.DKGPhase1FinalView, epoch.DKGPhase2FinalView, epoch.DKGPhase3FinalView, err = protocol.DKGPhaseViews(from) - if err != nil { - return nil, fmt.Errorf("could not get dkg final views") - } - clustering, err := from.Clustering() - if err != nil { - return nil, fmt.Errorf("could not get clustering: %w", err) - } - epoch.Clustering = clustering - - // convert dkg - dkg, err := from.DKG() - // if this epoch hasn't been committed yet, return the epoch as-is - if errors.Is(err, protocol.ErrNextEpochNotCommitted) { - return &Epoch{epoch}, nil - } - if err != nil { - return nil, fmt.Errorf("could not get dkg: %w", err) - } - convertedDKG, err := FromDKG(dkg, epoch.InitialIdentities.Filter(filter.HasRole(flow.RoleConsensus))) - if err != nil { - return nil, err - } - epoch.DKG = &convertedDKG.enc - - // convert clusters - for index := range clustering { - cluster, err := from.Cluster(uint(index)) - if err != nil { - return nil, fmt.Errorf("could not get cluster %d: %w", index, err) - } - convertedCluster, err := FromCluster(cluster) - if err != nil { - return nil, fmt.Errorf("could not convert cluster %d: %w", index, err) - } - epoch.Clusters = append(epoch.Clusters, convertedCluster.enc) - } - - // convert height bounds - firstHeight, err := from.FirstHeight() - if errors.Is(err, protocol.ErrEpochTransitionNotFinalized) { - // if this epoch hasn't been started yet, return the epoch as-is - return &Epoch{epoch}, nil - } - if err != nil { - return nil, fmt.Errorf("could not get first height: %w", err) - } - epoch.FirstHeight = &firstHeight - finalHeight, err := from.FinalHeight() - if errors.Is(err, protocol.ErrEpochTransitionNotFinalized) { - // if this epoch hasn't ended yet, return the epoch as-is - return &Epoch{epoch}, nil - } - if err != nil { - return nil, fmt.Errorf("could not get final height: %w", err) - } - epoch.FinalHeight = &finalHeight - - return &Epoch{epoch}, nil -} - -// FromCluster converts any protocol.Cluster to a memory-backed Cluster. -// No errors are expected during normal operation. -func FromCluster(from protocol.Cluster) (*Cluster, error) { - cluster := EncodableCluster{ - Counter: from.EpochCounter(), - Index: from.Index(), - Members: from.Members(), - RootBlock: from.RootBlock(), - RootQC: from.RootQC(), - } - return &Cluster{cluster}, nil -} - -// FromDKG converts any protocol.DKG to a memory-backed DKG. -// -// The given participant list must exactly match the DKG members. -// All errors indicate inconsistent or invalid inputs. -// No errors are expected during normal operation. -func FromDKG(from protocol.DKG, participants flow.IdentityList) (*DKG, error) { - var dkg EncodableDKG - dkg.GroupKey = encodable.RandomBeaconPubKey{PublicKey: from.GroupKey()} - - lookup, err := protocol.ToDKGParticipantLookup(from, participants) - if err != nil { - return nil, fmt.Errorf("could not generate dkg participant lookup: %w", err) - } - dkg.Participants = lookup - - return &DKG{dkg}, nil -} - -// DKGFromEncodable returns a DKG backed by the given encodable representation. -func DKGFromEncodable(enc EncodableDKG) (*DKG, error) { - return &DKG{enc}, nil -} - // ClusterFromEncodable returns a Cluster backed by the given encodable representation. func ClusterFromEncodable(enc EncodableCluster) (*Cluster, error) { return &Cluster{enc}, nil } -// SnapshotFromBootstrapState generates a protocol.Snapshot representing a -// root bootstrap state. This is used to bootstrap the protocol state for -// genesis or post-spork states. -func SnapshotFromBootstrapState(root *flow.Block, result *flow.ExecutionResult, seal *flow.Seal, qc *flow.QuorumCertificate) (*Snapshot, error) { - version := flow.DefaultProtocolVersion - threshold, err := protocol.DefaultEpochCommitSafetyThreshold(root.Header.ChainID) - if err != nil { - return nil, fmt.Errorf("could not get default epoch commit safety threshold: %w", err) - } - return SnapshotFromBootstrapStateWithParams(root, result, seal, qc, version, threshold) -} - // SnapshotFromBootstrapStateWithParams is SnapshotFromBootstrapState // with a caller-specified protocol version. func SnapshotFromBootstrapStateWithParams( @@ -270,8 +71,7 @@ func SnapshotFromBootstrapStateWithParams( result *flow.ExecutionResult, seal *flow.Seal, qc *flow.QuorumCertificate, - protocolVersion uint, - epochCommitSafetyThreshold uint64, + kvStoreFactory func(epochStateID flow.Identifier) (protocol_state.KVStoreAPI, error), ) (*Snapshot, error) { setup, ok := result.ServiceEvents[0].Event.(*flow.EpochSetup) if !ok { @@ -302,38 +102,127 @@ func SnapshotFromBootstrapStateWithParams( return nil, fmt.Errorf("mismatching cluster and qc: %w", err) } } - encodable, err := FromEpoch(NewStartedEpoch(setup, commit, root.Header.Height)) + + params := EncodableParams{ + ChainID: root.ChainID, // chain ID must match the root block + SporkID: root.ID(), // use root block ID as the unique spork identifier + SporkRootBlockHeight: root.Height, // use root block height as the spork root block height + SporkRootBlockView: root.View, // use root block view as the spork root block view + } + + rootMinEpochState, err := EpochProtocolStateFromServiceEvents(setup, commit) + if err != nil { + return nil, fmt.Errorf("could not construct epoch protocol state: %w", err) + } + rootEpochStateID := rootMinEpochState.ID() + rootKvStore, err := kvStoreFactory(rootEpochStateID) if err != nil { - return nil, fmt.Errorf("could not convert epoch: %w", err) + return nil, fmt.Errorf("could not construct root kvstore: %w", err) + } + if rootKvStore.ID() != root.Payload.ProtocolStateID { + return nil, fmt.Errorf("incorrect protocol state ID in root block, expected (%x) but got (%x)", + root.Payload.ProtocolStateID, rootKvStore.ID()) } - epochs := EncodableEpochs{ - Current: encodable.enc, + kvStoreVersion, kvStoreData, err := rootKvStore.VersionedEncode() + if err != nil { + return nil, fmt.Errorf("could not encode kvstore: %w", err) } - params := EncodableParams{ - ChainID: root.Header.ChainID, // chain ID must match the root block - SporkID: root.ID(), // use root block ID as the unique spork identifier - SporkRootBlockHeight: root.Header.Height, // use root block height as the spork root block height - ProtocolVersion: protocolVersion, // major software version for this spork - EpochCommitSafetyThreshold: epochCommitSafetyThreshold, // see protocol.Params for details + rootEpochState, err := flow.NewEpochStateEntry( + flow.UntrustedEpochStateEntry{ + MinEpochStateEntry: rootMinEpochState, + PreviousEpochSetup: nil, + PreviousEpochCommit: nil, + CurrentEpochSetup: setup, + CurrentEpochCommit: commit, + NextEpochSetup: nil, + NextEpochCommit: nil, + }, + ) + if err != nil { + return nil, fmt.Errorf("could not construct root epoch state entry: %w", err) + } + richRootEpochState, err := flow.NewRichEpochStateEntry(rootEpochState) + if err != nil { + return nil, fmt.Errorf("could not construct root rich epoch state entry: %w", err) + } + + rootProtocolStateEntryWrapper := &flow.ProtocolStateEntryWrapper{ + KVStore: flow.PSKeyValueStoreData{ + Version: kvStoreVersion, + Data: kvStoreData, + }, + EpochEntry: richRootEpochState, + } + + proposal, err := flow.NewRootProposal( + flow.UntrustedProposal{ + Block: *root, + ProposerSigData: nil, + }, + ) + if err != nil { + return nil, fmt.Errorf("could not construct root proposal: %w", err) } snap := SnapshotFromEncodable(EncodableSnapshot{ - Head: root.Header, - Identities: setup.Participants, - LatestSeal: seal, - LatestResult: result, SealingSegment: &flow.SealingSegment{ - Blocks: []*flow.Block{root}, + Blocks: []*flow.Proposal{ + proposal, + }, ExecutionResults: flow.ExecutionResultList{result}, LatestSeals: map[flow.Identifier]flow.Identifier{root.ID(): seal.ID()}, - FirstSeal: seal, - ExtraBlocks: make([]*flow.Block, 0), + ProtocolStateEntries: map[flow.Identifier]*flow.ProtocolStateEntryWrapper{ + rootKvStore.ID(): rootProtocolStateEntryWrapper, + }, + FirstSeal: seal, + ExtraBlocks: make([]*flow.Proposal, 0), + SporkRootBlock: root, }, - QuorumCertificate: qc, - Phase: flow.EpochPhaseStaking, - Epochs: epochs, - Params: params, + QuorumCertificate: qc, + Params: params, + SealedVersionBeacon: nil, }) + return snap, nil } + +// EpochProtocolStateFromServiceEvents generates a protocol.MinEpochStateEntry for a root protocol state which is used for bootstrapping. +// +// CONTEXT: The EpochSetup event contains the IdentitySkeletons for each participant, thereby specifying active epoch members. +// While ejection status is not part of the EpochSetup event, we can supplement this information as follows: +// - Per convention, service events are delivered (asynchronously) in an *order-preserving* manner. Furthermore, +// node ejection is also mediated by system smart contracts and delivered via service events. +// - Therefore, the EpochSetup event contains the up-to-date snapshot of the epoch participants. Any node ejection +// that happened before should be reflected in the EpochSetup event. Specifically, ejected +// nodes should be no longer listed in the EpochSetup event. +// Hence, when the EpochSetup event is emitted / processed, the ejected flag is false for all epoch participants. +func EpochProtocolStateFromServiceEvents(setup *flow.EpochSetup, commit *flow.EpochCommit) (*flow.MinEpochStateEntry, error) { + identities := make(flow.DynamicIdentityEntryList, 0, len(setup.Participants)) + for _, identity := range setup.Participants { + identities = append(identities, &flow.DynamicIdentityEntry{ + NodeID: identity.NodeID, + Ejected: false, + }) + } + currentEpoch, err := flow.NewEpochStateContainer( + flow.UntrustedEpochStateContainer{ + SetupID: setup.ID(), + CommitID: commit.ID(), + ActiveIdentities: identities, + EpochExtensions: nil, + }, + ) + if err != nil { + return nil, fmt.Errorf("could not construct current epoch state: %w", err) + } + + return flow.NewMinEpochStateEntry( + flow.UntrustedMinEpochStateEntry{ + PreviousEpoch: nil, + CurrentEpoch: *currentEpoch, + NextEpoch: nil, + EpochFallbackTriggered: false, + }, + ) +} diff --git a/state/protocol/inmem/convert_test.go b/state/protocol/inmem/convert_test.go index 72047ac2efc..bd73ddf67fd 100644 --- a/state/protocol/inmem/convert_test.go +++ b/state/protocol/inmem/convert_test.go @@ -5,14 +5,15 @@ import ( "encoding/json" "testing" - "github.com/dgraph-io/badger/v2" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/state/protocol" bprotocol "github.com/onflow/flow-go/state/protocol/badger" "github.com/onflow/flow-go/state/protocol/inmem" "github.com/onflow/flow-go/state/protocol/util" + "github.com/onflow/flow-go/storage" "github.com/onflow/flow-go/utils/unittest" ) @@ -22,9 +23,9 @@ func TestFromSnapshot(t *testing.T) { identities := unittest.IdentityListFixture(10, unittest.WithAllRoles()) rootSnapshot := unittest.RootSnapshotFixture(identities) - util.RunWithFollowerProtocolState(t, rootSnapshot, func(db *badger.DB, state *bprotocol.FollowerState) { - - epochBuilder := unittest.NewEpochBuilder(t, state) + util.RunWithFullProtocolStateAndMutator(t, rootSnapshot, func(db storage.DB, fullState *bprotocol.ParticipantState, mutableState protocol.MutableProtocolState) { + state := fullState.FollowerState + epochBuilder := unittest.NewEpochBuilder(t, mutableState, state) // build epoch 1 (prepare epoch 2) epochBuilder. BuildEpoch(). @@ -40,10 +41,9 @@ func TestFromSnapshot(t *testing.T) { epoch2, ok := epochBuilder.EpochHeights(2) require.True(t, ok) - // test that we are able retrieve an in-memory version of root snapshot + // test that we are able to retrieve an in-memory version of root snapshot t.Run("root snapshot", func(t *testing.T) { - root, err := state.Params().Root() - require.NoError(t, err) + root := state.Params().FinalizedRoot() expected := state.AtHeight(root.Height) actual, err := inmem.FromSnapshot(expected) require.NoError(t, err) @@ -100,6 +100,35 @@ func TestFromSnapshot(t *testing.T) { testEncodeDecode(t, actual) }) }) + + // ensure last version beacon is included + t.Run("version beacon", func(t *testing.T) { + expectedVB := &flow.SealedVersionBeacon{ + VersionBeacon: unittest.VersionBeaconFixture( + unittest.WithBoundaries( + flow.VersionBoundary{ + BlockHeight: 1012, + Version: "1.2.3", + }), + ), + } + unittest.AddVersionBeacon(t, expectedVB.VersionBeacon, state) + + expected := state.Final() + head, err := expected.Head() + require.NoError(t, err) + + expectedVB.SealHeight = head.Height + + actual, err := inmem.FromSnapshot(expected) + require.NoError(t, err) + assertSnapshotsEqual(t, expected, actual) + testEncodeDecode(t, actual) + + actualVB, err := actual.VersionBeacon() + require.NoError(t, err) + require.Equal(t, expectedVB, actualVB) + }) }) } diff --git a/state/protocol/inmem/dkg.go b/state/protocol/inmem/dkg.go index 59431dc5420..b79bcc0ecb8 100644 --- a/state/protocol/inmem/dkg.go +++ b/state/protocol/inmem/dkg.go @@ -1,36 +1,67 @@ package inmem import ( - "github.com/onflow/flow-go/crypto" + "fmt" + + "github.com/onflow/crypto" + "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/state/protocol" ) -type DKG struct { - enc EncodableDKG -} +// DKG defines a new type using flow.EpochCommit as underlying type and implements the protocol.DKG interface. +type DKG flow.EpochCommit var _ protocol.DKG = (*DKG)(nil) -func (d DKG) Size() uint { return uint(len(d.enc.Participants)) } -func (d DKG) GroupKey() crypto.PublicKey { return d.enc.GroupKey.PublicKey } +// NewDKG creates a new DKG instance from the given setup and commit events. +func NewDKG(setup *flow.EpochSetup, commit *flow.EpochCommit) protocol.DKG { + return (*DKG)(commit) +} + +func (d *DKG) Size() uint { return uint(len(d.DKGParticipantKeys)) } +func (d *DKG) GroupKey() crypto.PublicKey { return d.DKGGroupKey } -// Index returns the index for the given node. Error Returns: -// protocol.IdentityNotFoundError if nodeID is not a valid DKG participant. -func (d DKG) Index(nodeID flow.Identifier) (uint, error) { - part, exists := d.enc.Participants[nodeID] +// Index returns the DKG index for the given node. +// Expected error during normal operations: +// - protocol.IdentityNotFoundError if nodeID is not a known DKG participant +func (d *DKG) Index(nodeID flow.Identifier) (uint, error) { + index, exists := d.DKGIndexMap[nodeID] if !exists { return 0, protocol.IdentityNotFoundError{NodeID: nodeID} } - return part.Index, nil + if index < 0 { // sanity check, to rule out underflow in subsequent conversion to `uint` + return 0, fmt.Errorf("for node %v, DKGIndexMap contains negative index %d in violation of protocol convention", nodeID, index) + } + return uint(index), nil } -// KeyShare returns the public key share for the given node. Error Returns: -// protocol.IdentityNotFoundError if nodeID is not a valid DKG participant. -func (d DKG) KeyShare(nodeID flow.Identifier) (crypto.PublicKey, error) { - part, exists := d.enc.Participants[nodeID] +// KeyShare returns the public key share for the given node. +// Expected error during normal operations: +// - protocol.IdentityNotFoundError if nodeID is not a known DKG participant +func (d *DKG) KeyShare(nodeID flow.Identifier) (crypto.PublicKey, error) { + index, exists := d.DKGIndexMap[nodeID] if !exists { return nil, protocol.IdentityNotFoundError{NodeID: nodeID} } - return part.KeyShare, nil + return d.DKGParticipantKeys[index], nil +} + +// KeyShares returns the public portions of all threshold key shares. Note that there might not +// exist a private key corresponding to each entry (e.g. if the respective node failed the DKG). +func (d *DKG) KeyShares() []crypto.PublicKey { + return d.DKGParticipantKeys +} + +// NodeID returns the node identifier for the given index. +// An exception is returned if the index is ≥ Size(). +// Intended for use outside the hotpath, with runtime +// scaling linearly in the number of DKG participants (ie. Size()) +func (d *DKG) NodeID(index uint) (flow.Identifier, error) { + for nodeID, dkgIndex := range d.DKGIndexMap { + if dkgIndex == int(index) { + return nodeID, nil + } + } + return flow.ZeroID, fmt.Errorf("inconsistent DKG state: missing index %d", index) } diff --git a/state/protocol/inmem/dkg_test.go b/state/protocol/inmem/dkg_test.go new file mode 100644 index 00000000000..95e637ce940 --- /dev/null +++ b/state/protocol/inmem/dkg_test.go @@ -0,0 +1,56 @@ +package inmem_test + +import ( + "testing" + + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/state/protocol" + "github.com/onflow/flow-go/state/protocol/inmem" + "github.com/onflow/flow-go/utils/unittest" +) + +// TestDKG tests the [inmem.DKG] implementation. +func TestDKG(t *testing.T) { + consensusParticipants := unittest.IdentityListFixture(5, unittest.WithRole(flow.RoleConsensus)).Sort(flow.Canonical[flow.Identity]) + otherParticipants := unittest.IdentityListFixture(10, unittest.WithAllRolesExcept(flow.RoleConsensus)) + setup := unittest.EpochSetupFixture(unittest.WithParticipants(append(consensusParticipants, otherParticipants...).ToSkeleton())) + commit := unittest.EpochCommitFixture(unittest.WithDKGFromParticipants(setup.Participants)) + dkg := inmem.NewDKG(setup, commit) + t.Run("Index", func(t *testing.T) { + for i, participant := range consensusParticipants { + index, err := dkg.Index(participant.NodeID) + require.NoError(t, err) + require.Equal(t, uint(i), index) + } + _, err := dkg.Index(otherParticipants[0].NodeID) + require.Error(t, err) + require.True(t, protocol.IsIdentityNotFound(err)) + }) + t.Run("NodeID", func(t *testing.T) { + for i, participant := range consensusParticipants { + nodeID, err := dkg.NodeID(uint(i)) + require.NoError(t, err) + require.Equal(t, participant.NodeID, nodeID) + } + _, err := dkg.NodeID(uint(len(consensusParticipants))) + require.Error(t, err) + }) + t.Run("KeyShare", func(t *testing.T) { + for i, participant := range consensusParticipants { + keyShare, err := dkg.KeyShare(participant.NodeID) + require.NoError(t, err) + require.Equal(t, commit.DKGParticipantKeys[i], keyShare) + } + _, err := dkg.KeyShare(otherParticipants[0].NodeID) + require.Error(t, err) + require.True(t, protocol.IsIdentityNotFound(err)) + }) + t.Run("Size", func(t *testing.T) { + require.Equal(t, uint(len(consensusParticipants)), dkg.Size()) + }) + t.Run("GroupKey", func(t *testing.T) { + require.Equal(t, commit.DKGGroupKey, dkg.GroupKey()) + }) +} diff --git a/state/protocol/inmem/encodable.go b/state/protocol/inmem/encodable.go index 4601ec36578..09c96f9370a 100644 --- a/state/protocol/inmem/encodable.go +++ b/state/protocol/inmem/encodable.go @@ -1,6 +1,8 @@ package inmem import ( + "fmt" + "github.com/onflow/flow-go/model/cluster" "github.com/onflow/flow-go/model/encodable" "github.com/onflow/flow-go/model/flow" @@ -8,67 +10,114 @@ import ( // EncodableSnapshot is the encoding format for protocol.Snapshot type EncodableSnapshot struct { - Head *flow.Header - Identities flow.IdentityList - LatestSeal *flow.Seal - LatestResult *flow.ExecutionResult - SealingSegment *flow.SealingSegment - QuorumCertificate *flow.QuorumCertificate - Phase flow.EpochPhase - Epochs EncodableEpochs - Params EncodableParams + SealingSegment *flow.SealingSegment + QuorumCertificate *flow.QuorumCertificate + Params EncodableParams + SealedVersionBeacon *flow.SealedVersionBeacon +} + +// Head returns the latest finalized header of the Snapshot, which is the block +// in the sealing segment with the greatest Height. +// The EncodableSnapshot receiver must be correctly formed. +func (snap EncodableSnapshot) Head() *flow.Header { + return snap.SealingSegment.Highest().ToHeader() } -// EncodableEpochs is the encoding format for protocol.EpochQuery -type EncodableEpochs struct { - Previous *EncodableEpoch - Current EncodableEpoch // cannot be nil - Next *EncodableEpoch +// LatestSeal returns the latest seal of the Snapshot. This is the seal +// for the block with the greatest height, of all seals in the Snapshot. +// The EncodableSnapshot receiver must be correctly formed. +// No errors are expected during normal operation. +func (snap EncodableSnapshot) LatestSeal() (*flow.Seal, error) { + head := snap.Head() + latestSealID := snap.SealingSegment.LatestSeals[head.ID()] + + // Genesis/Spork-Root Case: The spork root block is the latest sealed block. + // By protocol definition, FirstSeal seals the spork root block. + if snap.SealingSegment.FirstSeal != nil && snap.SealingSegment.FirstSeal.ID() == latestSealID { + return snap.SealingSegment.FirstSeal, nil + } + + // Common Case: The highest seal within the payload of any block in the sealing segment. + // Since seals are included in increasing height order, the latest seal must be in the + // first block (by height descending) which contains any seals. + for i := len(snap.SealingSegment.Blocks) - 1; i >= 0; i-- { + proposal := snap.SealingSegment.Blocks[i] + for _, seal := range proposal.Block.Payload.Seals { + if seal.ID() == latestSealID { + return seal, nil + } + } + if len(proposal.Block.Payload.Seals) > 0 { + // We encountered a block with some seals, but not the latest seal. + // This can only occur in a structurally invalid SealingSegment. + return nil, fmt.Errorf("LatestSeal: sanity check failed: no latest seal") + } + } + // Correctly formatted sealing segments must contain latest seal. + return nil, fmt.Errorf("LatestSeal: unreachable for correctly formatted sealing segments") } -// EncodableEpoch is the encoding format for protocol.Epoch -type EncodableEpoch struct { - Counter uint64 - FirstView uint64 - DKGPhase1FinalView uint64 - DKGPhase2FinalView uint64 - DKGPhase3FinalView uint64 - FinalView uint64 - RandomSource []byte - InitialIdentities flow.IdentityList - Clustering flow.ClusterList - Clusters []EncodableCluster - DKG *EncodableDKG - FirstHeight *uint64 - FinalHeight *uint64 +// LatestSealedResult returns the latest sealed result of the Snapshot. +// This is the result which is sealed by LatestSeal. +// The EncodableSnapshot receiver must be correctly formed. +// No errors are expected during normal operation. +func (snap EncodableSnapshot) LatestSealedResult() (*flow.ExecutionResult, error) { + latestSeal, err := snap.LatestSeal() + if err != nil { + return nil, fmt.Errorf("LatestSealedResult: could not get latest seal: %w", err) + } + + // For both spork root and mid-spork snapshots, the latest sealing result must + // either appear in a block payload or in the ExecutionResults field. + for i := len(snap.SealingSegment.Blocks) - 1; i >= 0; i-- { + proposal := snap.SealingSegment.Blocks[i] + for _, result := range proposal.Block.Payload.Results { + if latestSeal.ResultID == result.ID() { + return result, nil + } + } + } + for _, result := range snap.SealingSegment.ExecutionResults { + if latestSeal.ResultID == result.ID() { + return result, nil + } + } + // Correctly formatted sealing segments must contain latest result. + return nil, fmt.Errorf("LatestSealedResult: unreachable for correctly formatted sealing segments") } -// EncodableDKG is the encoding format for protocol.DKG -type EncodableDKG struct { +// ThresholdKeySet contains the key set for a threshold signature scheme. Typically, the ThresholdKeySet is used to +// encode the output of a trusted setup. In general, signature scheme is configured with a threshold parameter t, +// which is the number of malicious colluding nodes the signature scheme is safe against. To balance liveness and +// safety, the Flow protocol fixes threshold to t = floor((n-1)/2), for n the number of parties in the threshold +// cryptography scheme, specifically n = len(Participants). +// Without loss of generality, our threshold cryptography protocol with n parties identifies the individual +// participants by the indices {0, 1, …, n-1}. The slice Participants is ordered accordingly. +type ThresholdKeySet struct { GroupKey encodable.RandomBeaconPubKey - Participants map[flow.Identifier]flow.DKGParticipant + Participants []ThresholdParticipant } -type EncodableFullDKG struct { - GroupKey encodable.RandomBeaconPubKey - PrivKeyShares []encodable.RandomBeaconPrivKey - PubKeyShares []encodable.RandomBeaconPubKey +// ThresholdParticipant encodes the threshold key data for single participant. +type ThresholdParticipant struct { + PrivKeyShare encodable.RandomBeaconPrivKey + PubKeyShare encodable.RandomBeaconPubKey + NodeID flow.Identifier } // EncodableCluster is the encoding format for protocol.Cluster type EncodableCluster struct { Index uint Counter uint64 - Members flow.IdentityList + Members flow.IdentitySkeletonList RootBlock *cluster.Block RootQC *flow.QuorumCertificate } // EncodableParams is the encoding format for protocol.GlobalParams type EncodableParams struct { - ChainID flow.ChainID - SporkID flow.Identifier - SporkRootBlockHeight uint64 - ProtocolVersion uint - EpochCommitSafetyThreshold uint64 + ChainID flow.ChainID + SporkID flow.Identifier + SporkRootBlockHeight uint64 + SporkRootBlockView uint64 } diff --git a/state/protocol/inmem/encodable_test.go b/state/protocol/inmem/encodable_test.go index 22459e17b7a..7fe2e2f898d 100644 --- a/state/protocol/inmem/encodable_test.go +++ b/state/protocol/inmem/encodable_test.go @@ -34,29 +34,9 @@ func TestEncodeDecode(t *testing.T) { require.NoError(t, err) // check that the computed and stored result IDs are consistent - decodedResult, decodedSeal := decodedSnapshot.LatestResult, decodedSnapshot.LatestSeal - assert.Equal(t, decodedResult.ID(), decodedSeal.ResultID) -} - -// TestStrippedEncodeDecode tests that the protocol state snapshot can be encoded to JSON skipping the network address -// and decoded back successfully -func TestStrippedEncodeDecode(t *testing.T) { - participants := unittest.IdentityListFixture(10, unittest.WithAllRoles()) - initialSnapshot := unittest.RootSnapshotFixture(participants) - - // encode the snapshot - strippedSnapshot := inmem.StrippedInmemSnapshot(initialSnapshot.Encodable()) - snapshotJson, err := json.Marshal(strippedSnapshot) + decodedSeal, err := decodedSnapshot.LatestSeal() require.NoError(t, err) - // check that the json string does not contain "Address" - require.NotContains(t, snapshotJson, "Address") - - // decode the snapshots - var decodedSnapshot inmem.EncodableSnapshot - err = json.Unmarshal(snapshotJson, &decodedSnapshot) + decodedResult, err := decodedSnapshot.LatestSealedResult() require.NoError(t, err) - // check that the network addresses for all the identities are still empty - assert.Len(t, decodedSnapshot.Identities.Filter(func(id *flow.Identity) bool { - return id.Address == "" - }), len(participants)) + assert.Equal(t, decodedResult.ID(), decodedSeal.ResultID) } diff --git a/state/protocol/inmem/epoch.go b/state/protocol/inmem/epoch.go index a0be1b1d961..dd86f938a20 100644 --- a/state/protocol/inmem/epoch.go +++ b/state/protocol/inmem/epoch.go @@ -3,198 +3,213 @@ package inmem import ( "fmt" - "github.com/onflow/flow-go/model/encodable" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/model/flow/factory" "github.com/onflow/flow-go/model/flow/filter" + "github.com/onflow/flow-go/module/irrecoverable" "github.com/onflow/flow-go/module/signature" "github.com/onflow/flow-go/state/cluster" "github.com/onflow/flow-go/state/protocol" - "github.com/onflow/flow-go/state/protocol/invalid" ) -// Epoch is a memory-backed implementation of protocol.Epoch. -type Epoch struct { - enc EncodableEpoch -} - -var _ protocol.Epoch = (*Epoch)(nil) - -func (e Epoch) Encodable() EncodableEpoch { - return e.enc -} - -func (e Epoch) Counter() (uint64, error) { return e.enc.Counter, nil } -func (e Epoch) FirstView() (uint64, error) { return e.enc.FirstView, nil } -func (e Epoch) DKGPhase1FinalView() (uint64, error) { return e.enc.DKGPhase1FinalView, nil } -func (e Epoch) DKGPhase2FinalView() (uint64, error) { return e.enc.DKGPhase2FinalView, nil } -func (e Epoch) DKGPhase3FinalView() (uint64, error) { return e.enc.DKGPhase3FinalView, nil } -func (e Epoch) FinalView() (uint64, error) { return e.enc.FinalView, nil } -func (e Epoch) InitialIdentities() (flow.IdentityList, error) { - return e.enc.InitialIdentities, nil -} -func (e Epoch) RandomSource() ([]byte, error) { - return e.enc.RandomSource, nil +// Epochs is a wrapper around [flow.RichEpochStateEntry] implementing the [protocol.EpochQuery] interface. +// It is intended for bootstrapping nodes. For ease of transferring the bootstrapping data from node to node, +// Epochs is backed by easily serializable data and does not have access to a proper protocol-data-base that +// would be part of a staked node. Therefore, it can _not_ return values for the `FirstHeight()` and +// `FinalHeight()` methods, which are part of the [protocol.CommittedEpoch] API. +// CAUTION: use [badger.Snapshot] for protocol logic except when bootstrapping a node. +type Epochs struct { + entry flow.RichEpochStateEntry } -func (e Epoch) Clustering() (flow.ClusterList, error) { - return e.enc.Clustering, nil -} +var _ protocol.EpochQuery = (*Epochs)(nil) -func (e Epoch) DKG() (protocol.DKG, error) { - if e.enc.DKG != nil { - return DKG{*e.enc.DKG}, nil +// Previous returns the previous epoch as of this snapshot. Valid snapshots +// must have a previous epoch for all epochs except that immediately after the root block. +// Error returns: +// - [protocol.ErrNoPreviousEpoch] - if the previous epoch does not exist. +// This happens when the previous epoch is queried within the first epoch of a spork. +func (eq Epochs) Previous() (protocol.CommittedEpoch, error) { + if eq.entry.PreviousEpoch == nil { + return nil, protocol.ErrNoPreviousEpoch } - return nil, protocol.ErrNextEpochNotCommitted + return NewCommittedEpoch(eq.entry.PreviousEpochSetup, eq.entry.PreviousEpochCommit, eq.entry.PreviousEpoch.EpochExtensions), nil } -func (e Epoch) Cluster(i uint) (protocol.Cluster, error) { - if e.enc.Clusters == nil { - return nil, protocol.ErrNextEpochNotCommitted - } +// Current returns the current epoch as of this snapshot. All valid snapshots have a current epoch. +func (eq Epochs) Current() (protocol.CommittedEpoch, error) { + return NewCommittedEpoch(eq.entry.CurrentEpochSetup, eq.entry.CurrentEpochCommit, eq.entry.CurrentEpoch.EpochExtensions), nil +} - if i >= uint(len(e.enc.Clusters)) { - return nil, fmt.Errorf("no cluster with index %d: %w", i, protocol.ErrClusterNotFound) +// NextUnsafe should only be used by components that are actively involved in advancing +// the epoch from [flow.EpochPhaseSetup] to [flow.EpochPhaseCommitted]. +// NextUnsafe returns the tentative configuration for the next epoch as of this snapshot. +// Valid snapshots make such configuration available during the Epoch Setup Phase, which +// generally is the case only after an `EpochSetupPhaseStarted` notification has been emitted. +// CAUTION: epoch transition might not happen as described by the tentative configuration! +// +// Error returns: +// - [ErrNextEpochNotSetup] in the case that this method is queried w.r.t. a snapshot +// within the [flow.EpochPhaseStaking] phase or when we are in Epoch Fallback Mode. +// - [ErrNextEpochAlreadyCommitted] if the tentative epoch is requested from +// a snapshot within the [flow.EpochPhaseCommitted] phase. +// - generic error in case of unexpected critical internal corruption or bugs +func (eq Epochs) NextUnsafe() (protocol.TentativeEpoch, error) { + switch eq.entry.EpochPhase() { + case flow.EpochPhaseStaking, flow.EpochPhaseFallback: + return nil, protocol.ErrNextEpochNotSetup + case flow.EpochPhaseSetup: + return NewSetupEpoch(eq.entry.NextEpochSetup), nil + case flow.EpochPhaseCommitted: + return nil, protocol.ErrNextEpochAlreadyCommitted + default: + return nil, fmt.Errorf("unexpected epoch phase '%s' in protocol state entry", eq.entry.EpochPhase().String()) } - return Cluster{e.enc.Clusters[i]}, nil } -func (e Epoch) ClusterByChainID(chainID flow.ChainID) (protocol.Cluster, error) { - if e.enc.Clusters == nil { +// NextCommitted returns the next epoch as of this snapshot, only if it has +// been committed already - generally that is the case only after an +// `EpochCommittedPhaseStarted` notification has been emitted. +// +// Error returns: +// - [ErrNextEpochNotCommitted] - in the case that committed epoch has been requested w.r.t a snapshot within +// the [flow.EpochPhaseStaking] or [flow.EpochPhaseSetup] phases. +// - generic error in case of unexpected critical internal corruption or bugs +func (eq Epochs) NextCommitted() (protocol.CommittedEpoch, error) { + switch eq.entry.EpochPhase() { + case flow.EpochPhaseStaking, flow.EpochPhaseFallback, flow.EpochPhaseSetup: return nil, protocol.ErrNextEpochNotCommitted - } - - for _, cluster := range e.enc.Clusters { - if cluster.RootBlock.Header.ChainID == chainID { - return Cluster{cluster}, nil + case flow.EpochPhaseCommitted: + // A protocol state snapshot is immutable and only represents the state as of the corresponding block. The + // flow protocol implies that future epochs cannot have extensions, because in order to add extensions to + // an epoch, we have to enter that epoch. Hence, `eq.entry.NextEpoch.EpochExtensions` must be empty: + if len(eq.entry.NextEpoch.EpochExtensions) > 0 { + return nil, irrecoverable.NewExceptionf("state with current epoch %d corrupted, because future epoch %d already has %d extensions", + eq.entry.CurrentEpochCommit.Counter, eq.entry.NextEpochSetup.Counter, len(eq.entry.NextEpoch.EpochExtensions)) } + return NewCommittedEpoch(eq.entry.NextEpochSetup, eq.entry.NextEpochCommit, eq.entry.NextEpoch.EpochExtensions), nil + default: + return nil, fmt.Errorf("unexpected unknown phase in protocol state entry") } - chainIDs := make([]string, 0, len(e.enc.Clusters)) - for _, cluster := range e.enc.Clusters { - chainIDs = append(chainIDs, string(cluster.RootBlock.Header.ChainID)) - } - return nil, fmt.Errorf("no cluster with the given chain ID %v, available chainIDs %v: %w", chainID, chainIDs, protocol.ErrClusterNotFound) -} - -func (e Epoch) FinalHeight() (uint64, error) { - if e.enc.FinalHeight != nil { - return *e.enc.FinalHeight, nil - } - return 0, protocol.ErrEpochTransitionNotFinalized -} - -func (e Epoch) FirstHeight() (uint64, error) { - if e.enc.FirstHeight != nil { - return *e.enc.FirstHeight, nil - } - return 0, protocol.ErrEpochTransitionNotFinalized -} - -type Epochs struct { - enc EncodableEpochs -} - -var _ protocol.EpochQuery = (*Epochs)(nil) - -func (eq Epochs) Previous() protocol.Epoch { - if eq.enc.Previous != nil { - return Epoch{*eq.enc.Previous} - } - return invalid.NewEpoch(protocol.ErrNoPreviousEpoch) -} -func (eq Epochs) Current() protocol.Epoch { - return Epoch{eq.enc.Current} -} -func (eq Epochs) Next() protocol.Epoch { - if eq.enc.Next != nil { - return Epoch{*eq.enc.Next} - } - return invalid.NewEpoch(protocol.ErrNextEpochNotSetup) } -// setupEpoch is an implementation of protocol.Epoch backed by an EpochSetup -// service event. This is used for converting service events to inmem.Epoch. +// setupEpoch is an implementation of [protocol.TentativeEpoch] backed by a [flow.EpochSetup] service event. type setupEpoch struct { - // EpochSetup service event - setupEvent *flow.EpochSetup + setupEvent *flow.EpochSetup // EpochSetup service event } -func (es *setupEpoch) Counter() (uint64, error) { - return es.setupEvent.Counter, nil -} +var _ protocol.TentativeEpoch = (*setupEpoch)(nil) -func (es *setupEpoch) FirstView() (uint64, error) { - return es.setupEvent.FirstView, nil +func (es *setupEpoch) Counter() uint64 { + return es.setupEvent.Counter } -func (es *setupEpoch) DKGPhase1FinalView() (uint64, error) { - return es.setupEvent.DKGPhase1FinalView, nil +func (es *setupEpoch) InitialIdentities() flow.IdentitySkeletonList { + return es.setupEvent.Participants } -func (es *setupEpoch) DKGPhase2FinalView() (uint64, error) { - return es.setupEvent.DKGPhase2FinalView, nil +func (es *setupEpoch) Clustering() (flow.ClusterList, error) { + return ClusteringFromSetupEvent(es.setupEvent) } -func (es *setupEpoch) DKGPhase3FinalView() (uint64, error) { - return es.setupEvent.DKGPhase3FinalView, nil +// ClusteringFromSetupEvent generates a new clustering list from Epoch setup data. +// No errors expected during normal operation. +func ClusteringFromSetupEvent(setupEvent *flow.EpochSetup) (flow.ClusterList, error) { + // By convention, the Flow protocol accepts nodes with zero initial weight: those nodes are admitted into the network as spectators, + // but they can't actively contribute to any of the network functions. Specifically, collectors with zero weight cannot propose + // collections and neither would their votes count towards certifying collections. Consequently, zero-weighted collectors are not + // assigned to any cluster, and `factory.NewClusterList` function rejects inputs including such collectors. Instead, zero-weighted + // collectors should be dropped, before we call `factory.NewClusterList`. + collectorFilter := filter.And[flow.IdentitySkeleton]( + filter.HasRole[flow.IdentitySkeleton](flow.RoleCollection), + filter.HasInitialWeight[flow.IdentitySkeleton](true)) + clustering, err := factory.NewClusterList(setupEvent.Assignments, setupEvent.Participants.Filter(collectorFilter)) + if err != nil { + return nil, fmt.Errorf("failed to generate ClusterList from collector identities: %w", err) + } + return clustering, nil } -func (es *setupEpoch) FinalView() (uint64, error) { - return es.setupEvent.FinalView, nil +// committedEpoch is an implementation of [protocol.CommittedEpoch] backed by a [flow.EpochSetup] +// and [flow.EpochCommit] service events. +// Includes all epoch extensions which have been added as of the reference block. +type committedEpoch struct { + setupEpoch + commitEvent *flow.EpochCommit + extensions []flow.EpochExtension } -func (es *setupEpoch) RandomSource() ([]byte, error) { - return es.setupEvent.RandomSource, nil -} +var _ protocol.CommittedEpoch = (*committedEpoch)(nil) -func (es *setupEpoch) InitialIdentities() (flow.IdentityList, error) { - identities := es.setupEvent.Participants.Filter(filter.Any) - return identities, nil +func (es *committedEpoch) FirstView() uint64 { + return es.setupEvent.FirstView } -func (es *setupEpoch) Clustering() (flow.ClusterList, error) { - return ClusteringFromSetupEvent(es.setupEvent) +func (es *committedEpoch) DKGPhase1FinalView() uint64 { + return es.setupEvent.DKGPhase1FinalView } -func ClusteringFromSetupEvent(setupEvent *flow.EpochSetup) (flow.ClusterList, error) { - collectorFilter := filter.HasRole(flow.RoleCollection) - clustering, err := factory.NewClusterList(setupEvent.Assignments, setupEvent.Participants.Filter(collectorFilter)) - if err != nil { - return nil, fmt.Errorf("failed to generate ClusterList from collector identities: %w", err) - } - return clustering, nil +func (es *committedEpoch) DKGPhase2FinalView() uint64 { + return es.setupEvent.DKGPhase2FinalView } -func (es *setupEpoch) Cluster(_ uint) (protocol.Cluster, error) { - return nil, protocol.ErrNextEpochNotCommitted +func (es *committedEpoch) DKGPhase3FinalView() uint64 { + return es.setupEvent.DKGPhase3FinalView } -func (es *setupEpoch) ClusterByChainID(_ flow.ChainID) (protocol.Cluster, error) { - return nil, protocol.ErrNextEpochNotCommitted +// FinalView returns the final view of the epoch, taking into account possible epoch extensions. +// If there are no epoch extensions, the final view is the final view of the current epoch setup, +// otherwise it is the final view of the last epoch extension. +func (es *committedEpoch) FinalView() uint64 { + if len(es.extensions) > 0 { + return es.extensions[len(es.extensions)-1].FinalView + } + return es.setupEvent.FinalView +} + +// TargetDuration returns the desired real-world duration for this epoch, in seconds. +// This target is specified by the FlowEpoch smart contract in the EpochSetup event +// and used by the Cruise Control system to moderate the block rate. +// In case the epoch has extensions, the target duration is calculated based on the last extension, by calculating how many +// views were added by the extension and adding the proportional time to the target duration. +func (es *committedEpoch) TargetDuration() uint64 { + if len(es.extensions) == 0 { + return es.setupEvent.TargetDuration + } else { + viewDuration := float64(es.setupEvent.TargetDuration) / float64(es.setupEvent.FinalView-es.setupEvent.FirstView+1) + lastExtension := es.extensions[len(es.extensions)-1] + return es.setupEvent.TargetDuration + uint64(float64(lastExtension.FinalView-es.setupEvent.FinalView)*viewDuration) + } } -func (es *setupEpoch) DKG() (protocol.DKG, error) { - return nil, protocol.ErrNextEpochNotCommitted +// TargetEndTime returns the desired real-world end time for this epoch, represented as +// Unix Time (in units of seconds). This target is specified by the FlowEpoch smart contract in +// the EpochSetup event and used by the Cruise Control system to moderate the block rate. +// In case the epoch has extensions, the target end time is calculated based on the last extension, by calculating how many +// views were added by the extension and adding the proportional time to the target end time. +func (es *committedEpoch) TargetEndTime() uint64 { + if len(es.extensions) == 0 { + return es.setupEvent.TargetEndTime + } else { + viewDuration := float64(es.setupEvent.TargetDuration) / float64(es.setupEvent.FinalView-es.setupEvent.FirstView+1) + lastExtension := es.extensions[len(es.extensions)-1] + return es.setupEvent.TargetEndTime + uint64(float64(lastExtension.FinalView-es.setupEvent.FinalView)*viewDuration) + } } -func (es *setupEpoch) FirstHeight() (uint64, error) { - return 0, protocol.ErrEpochTransitionNotFinalized +func (es *committedEpoch) RandomSource() []byte { + return es.setupEvent.RandomSource } -func (es *setupEpoch) FinalHeight() (uint64, error) { - return 0, protocol.ErrEpochTransitionNotFinalized +func (es *committedEpoch) FirstHeight() (uint64, error) { + return 0, protocol.ErrUnknownEpochBoundary } -// committedEpoch is an implementation of protocol.Epoch backed by an EpochSetup -// and EpochCommit service event. This is used for converting service events to -// inmem.Epoch. -type committedEpoch struct { - setupEpoch - commitEvent *flow.EpochCommit +func (es *committedEpoch) FinalHeight() (uint64, error) { + return 0, protocol.ErrUnknownEpochBoundary } func (es *committedEpoch) Cluster(index uint) (protocol.Cluster, error) { - epochCounter := es.setupEvent.Counter clustering, err := es.Clustering() @@ -219,12 +234,18 @@ func (es *committedEpoch) Cluster(index uint) (protocol.Cluster, error) { return nil, fmt.Errorf("could not encode signer indices for rootQCVoteData.VoterIDs: %w", err) } - rootBlock := cluster.CanonicalRootBlock(epochCounter, members) - rootQC := &flow.QuorumCertificate{ - View: rootBlock.Header.View, + rootBlock, err := cluster.CanonicalRootBlock(epochCounter, members) + if err != nil { + return nil, fmt.Errorf("could not generate canonical root block: %w", err) + } + rootQC, err := flow.NewQuorumCertificate(flow.UntrustedQuorumCertificate{ + View: rootBlock.View, BlockID: rootBlock.ID(), SignerIndices: signerIndices, SigData: rootQCVoteData.SigData, + }) + if err != nil { + return nil, fmt.Errorf("could not build root quorum certificate: %w", err) } cluster, err := ClusterFromEncodable(EncodableCluster{ @@ -256,53 +277,42 @@ func (es *committedEpoch) ClusterByChainID(chainID flow.ChainID) (protocol.Clust } func (es *committedEpoch) DKG() (protocol.DKG, error) { - // filter initial participants to valid DKG participants - participants := es.setupEvent.Participants.Filter(filter.IsValidDKGParticipant) - lookup, err := flow.ToDKGParticipantLookup(participants, es.commitEvent.DKGParticipantKeys) - if err != nil { - return nil, fmt.Errorf("could not construct dkg lookup: %w", err) - } - - dkg, err := DKGFromEncodable(EncodableDKG{ - GroupKey: encodable.RandomBeaconPubKey{ - PublicKey: es.commitEvent.DKGGroupKey, - }, - Participants: lookup, - }) - return dkg, err + return NewDKG(es.setupEvent, es.commitEvent), nil } -// startedEpoch represents an epoch (with counter N) that has started, but there is no _finalized_ transition -// to the next epoch yet. Note that nodes can already be in views belonging to the _next_ Epoch, and it is -// possible that there are already unfinalized blocks in that next epoch. However, without finalized blocks -// in Epoch N+1, there is no definition of "last block" for Epoch N. +// heightBoundedEpoch represents an epoch (with counter N) for which we know either +// its start boundary, end boundary, or both. A boundary is included when: +// - it occurred after this node's lowest known block AND +// - it occurred before the latest finalized block (ie. the boundary is defined) // -// startedEpoch has all the information of a committedEpoch, plus the epoch's first block height. -type startedEpoch struct { +// heightBoundedEpoch has all the information of a committedEpoch, plus one or +// both height boundaries for the epoch. +type heightBoundedEpoch struct { committedEpoch - firstHeight uint64 + firstHeight *uint64 + finalHeight *uint64 } -func (e *startedEpoch) FirstHeight() (uint64, error) { - return e.firstHeight, nil -} +var _ protocol.CommittedEpoch = (*heightBoundedEpoch)(nil) -// endedEpoch is an epoch which has ended (ie. the previous epoch). It has all the -// information of a startedEpoch, plus the epoch's final block height. -type endedEpoch struct { - startedEpoch - finalHeight uint64 +func (e *heightBoundedEpoch) FirstHeight() (uint64, error) { + if e.firstHeight != nil { + return *e.firstHeight, nil + } + return 0, protocol.ErrUnknownEpochBoundary } -func (e *endedEpoch) FinalHeight() (uint64, error) { - return e.finalHeight, nil +func (e *heightBoundedEpoch) FinalHeight() (uint64, error) { + if e.finalHeight != nil { + return *e.finalHeight, nil + } + return 0, protocol.ErrUnknownEpochBoundary } -// NewSetupEpoch returns a memory-backed epoch implementation based on an -// EpochSetup event. Epoch information available after the setup phase will -// not be accessible in the resulting epoch instance. +// NewSetupEpoch returns a memory-backed epoch implementation based on an EpochSetup event. +// Epoch information available after the setup phase will not be accessible in the resulting epoch instance. // No errors are expected during normal operations. -func NewSetupEpoch(setupEvent *flow.EpochSetup) protocol.Epoch { +func NewSetupEpoch(setupEvent *flow.EpochSetup) protocol.TentativeEpoch { return &setupEpoch{ setupEvent: setupEvent, } @@ -311,44 +321,63 @@ func NewSetupEpoch(setupEvent *flow.EpochSetup) protocol.Epoch { // NewCommittedEpoch returns a memory-backed epoch implementation based on an // EpochSetup and EpochCommit events. // No errors are expected during normal operations. -func NewCommittedEpoch(setupEvent *flow.EpochSetup, commitEvent *flow.EpochCommit) protocol.Epoch { +func NewCommittedEpoch(setupEvent *flow.EpochSetup, commitEvent *flow.EpochCommit, extensions []flow.EpochExtension) protocol.CommittedEpoch { return &committedEpoch{ setupEpoch: setupEpoch{ setupEvent: setupEvent, }, commitEvent: commitEvent, + extensions: extensions, + } +} + +// NewEpochWithStartBoundary returns a memory-backed epoch implementation based on an +// EpochSetup and EpochCommit events, and the epoch's first block height (start boundary). +// No errors are expected during normal operations. +func NewEpochWithStartBoundary(setupEvent *flow.EpochSetup, commitEvent *flow.EpochCommit, extensions []flow.EpochExtension, firstHeight uint64) protocol.CommittedEpoch { + return &heightBoundedEpoch{ + committedEpoch: committedEpoch{ + setupEpoch: setupEpoch{ + setupEvent: setupEvent, + }, + commitEvent: commitEvent, + extensions: extensions, + }, + firstHeight: &firstHeight, + finalHeight: nil, } } -// NewStartedEpoch returns a memory-backed epoch implementation based on an -// EpochSetup and EpochCommit events, and the epoch's first block height. +// NewEpochWithEndBoundary returns a memory-backed epoch implementation based on an +// EpochSetup and EpochCommit events, and the epoch's final block height (end boundary). // No errors are expected during normal operations. -func NewStartedEpoch(setupEvent *flow.EpochSetup, commitEvent *flow.EpochCommit, firstHeight uint64) protocol.Epoch { - return &startedEpoch{ +func NewEpochWithEndBoundary(setupEvent *flow.EpochSetup, commitEvent *flow.EpochCommit, extensions []flow.EpochExtension, finalHeight uint64) protocol.CommittedEpoch { + return &heightBoundedEpoch{ committedEpoch: committedEpoch{ setupEpoch: setupEpoch{ setupEvent: setupEvent, }, commitEvent: commitEvent, + extensions: extensions, }, - firstHeight: firstHeight, + firstHeight: nil, + finalHeight: &finalHeight, } } -// NewEndedEpoch returns a memory-backed epoch implementation based on an -// EpochSetup and EpochCommit events, and the epoch's final block height. +// NewEpochWithStartAndEndBoundaries returns a memory-backed epoch implementation based on an +// EpochSetup and EpochCommit events, and the epoch's first and final block heights (start+end boundaries). // No errors are expected during normal operations. -func NewEndedEpoch(setupEvent *flow.EpochSetup, commitEvent *flow.EpochCommit, firstHeight, finalHeight uint64) protocol.Epoch { - return &endedEpoch{ - startedEpoch: startedEpoch{ - committedEpoch: committedEpoch{ - setupEpoch: setupEpoch{ - setupEvent: setupEvent, - }, - commitEvent: commitEvent, +func NewEpochWithStartAndEndBoundaries(setupEvent *flow.EpochSetup, commitEvent *flow.EpochCommit, extensions []flow.EpochExtension, firstHeight, finalHeight uint64) protocol.CommittedEpoch { + return &heightBoundedEpoch{ + committedEpoch: committedEpoch{ + setupEpoch: setupEpoch{ + setupEvent: setupEvent, }, - firstHeight: firstHeight, + commitEvent: commitEvent, + extensions: extensions, }, - finalHeight: finalHeight, + firstHeight: &firstHeight, + finalHeight: &finalHeight, } } diff --git a/state/protocol/inmem/epoch_protocol_state.go b/state/protocol/inmem/epoch_protocol_state.go new file mode 100644 index 00000000000..11d315638ac --- /dev/null +++ b/state/protocol/inmem/epoch_protocol_state.go @@ -0,0 +1,122 @@ +package inmem + +import ( + "fmt" + + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/state/protocol" +) + +// EpochProtocolStateAdapter implements protocol.EpochProtocolState by wrapping a flow.RichEpochStateEntry. +// +//structwrite:immutable - mutations allowed only within the constructor +type EpochProtocolStateAdapter struct { + *flow.RichEpochStateEntry + Params protocol.GlobalParams +} + +var _ protocol.EpochProtocolState = (*EpochProtocolStateAdapter)(nil) + +// UntrustedEpochProtocolStateAdapter is an untrusted input-only representation of a EpochProtocolStateAdapter, +// used for construction. +// +// This type exists to ensure that constructor functions are invoked explicitly +// with named fields, which improves clarity and reduces the risk of incorrect field +// ordering during construction. +// +// An instance of UntrustedEpochProtocolStateAdapter should be validated and converted into +// a trusted EpochProtocolStateAdapter using NewEpochProtocolStateAdapter constructor. +type UntrustedEpochProtocolStateAdapter EpochProtocolStateAdapter + +// NewEpochProtocolStateAdapter creates a new instance of EpochProtocolStateAdapter. +// Construction EpochProtocolStateAdapter allowed only within the constructor. +// +// All errors indicate a valid EpochProtocolStateAdapter cannot be constructed from the input. +func NewEpochProtocolStateAdapter(untrusted UntrustedEpochProtocolStateAdapter) (*EpochProtocolStateAdapter, error) { + if untrusted.Params == nil { + return nil, fmt.Errorf("params must not be nil") + } + if untrusted.RichEpochStateEntry == nil { + return nil, fmt.Errorf("rich epoch state must not be nil") + } + return &EpochProtocolStateAdapter{ + RichEpochStateEntry: untrusted.RichEpochStateEntry, + Params: untrusted.Params, + }, nil +} + +// Epoch returns the current epoch counter. +func (s *EpochProtocolStateAdapter) Epoch() uint64 { + return s.CurrentEpochSetup.Counter +} + +// Clustering returns the cluster assignment for the current epoch. +// No errors are expected during normal operations. +func (s *EpochProtocolStateAdapter) Clustering() (flow.ClusterList, error) { + clustering, err := ClusteringFromSetupEvent(s.CurrentEpochSetup) + if err != nil { + return nil, fmt.Errorf("could not extract cluster list from setup event: %w", err) + } + return clustering, nil +} + +// EpochSetup returns the flow.EpochSetup service event which partly defines the +// initial epoch state for the current epoch. +func (s *EpochProtocolStateAdapter) EpochSetup() *flow.EpochSetup { + return s.CurrentEpochSetup +} + +// EpochCommit returns the flow.EpochCommit service event which partly defines the +// initial epoch state for the current epoch. +func (s *EpochProtocolStateAdapter) EpochCommit() *flow.EpochCommit { + return s.CurrentEpochCommit +} + +// DKG returns the DKG information for the current epoch. +// No errors are expected during normal operations. +func (s *EpochProtocolStateAdapter) DKG() (protocol.DKG, error) { + return NewDKG(s.CurrentEpochSetup, s.CurrentEpochCommit), nil +} + +// Entry Returns low-level protocol state entry that was used to initialize this object. +// It shouldn't be used by high-level logic, it is useful for some cases such as bootstrapping. +// Prefer using other methods to access protocol state. +func (s *EpochProtocolStateAdapter) Entry() *flow.RichEpochStateEntry { + return s.RichEpochStateEntry.Copy() +} + +// Identities returns the identity table as of the current block. +func (s *EpochProtocolStateAdapter) Identities() flow.IdentityList { + return s.RichEpochStateEntry.CurrentEpochIdentityTable +} + +// GlobalParams returns spork-scoped global network parameters. +func (s *EpochProtocolStateAdapter) GlobalParams() protocol.GlobalParams { + return s.Params +} + +// EpochFallbackTriggered denotes whether an invalid epoch state transition was attempted +// on the fork ending this block. Once the first block where this flag is true is finalized, epoch +// fallback mode is triggered. +// TODO for 'leaving Epoch Fallback via special service event': at the moment, this is a one-way transition and requires a spork to recover - need to revisit for sporkless EFM recovery +func (s *EpochProtocolStateAdapter) EpochFallbackTriggered() bool { + return s.MinEpochStateEntry.EpochFallbackTriggered +} + +// PreviousEpochExists returns true if a previous epoch exists. This is true for all epoch +// except those immediately following a spork. +func (s *EpochProtocolStateAdapter) PreviousEpochExists() bool { + return s.PreviousEpoch != nil +} + +// EpochExtensions returns the epoch extensions associated with the current epoch, if any. +func (s *EpochProtocolStateAdapter) EpochExtensions() []flow.EpochExtension { + return s.CurrentEpoch.EpochExtensions +} + +// EpochPhase returns the epoch phase for the current epoch. +// The receiver must be properly constructed. +// See flow.EpochPhase for detailed documentation. +func (s *EpochProtocolStateAdapter) EpochPhase() flow.EpochPhase { + return s.Entry().EpochPhase() +} diff --git a/state/protocol/inmem/epoch_protocol_state_test.go b/state/protocol/inmem/epoch_protocol_state_test.go new file mode 100644 index 00000000000..60a478dc06f --- /dev/null +++ b/state/protocol/inmem/epoch_protocol_state_test.go @@ -0,0 +1,188 @@ +package inmem_test + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/model/flow/filter" + + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/state/protocol/inmem" + "github.com/onflow/flow-go/state/protocol/mock" + "github.com/onflow/flow-go/utils/unittest" +) + +// TestEpochProtocolStateAdapter tests if the EpochProtocolStateAdapter returns expected values when created +// using constructor passing a RichEpochStateEntry. +func TestEpochProtocolStateAdapter(t *testing.T) { + // construct a valid protocol state entry that has semantically correct DKGParticipantKeys + entry := unittest.EpochStateFixture(unittest.WithValidDKG()) + + globalParams := mock.NewGlobalParams(t) + adapter, err := inmem.NewEpochProtocolStateAdapter( + inmem.UntrustedEpochProtocolStateAdapter{ + RichEpochStateEntry: entry, + Params: globalParams, + }, + ) + require.NoError(t, err) + + t.Run("clustering", func(t *testing.T) { + clustering, err := inmem.ClusteringFromSetupEvent(entry.CurrentEpochSetup) + require.NoError(t, err) + actual, err := adapter.Clustering() + require.NoError(t, err) + assert.Equal(t, clustering, actual) + }) + t.Run("epoch", func(t *testing.T) { + assert.Equal(t, entry.CurrentEpochSetup.Counter, adapter.Epoch()) + }) + t.Run("setup", func(t *testing.T) { + assert.Equal(t, entry.CurrentEpochSetup, adapter.EpochSetup()) + }) + t.Run("commit", func(t *testing.T) { + assert.Equal(t, entry.CurrentEpochCommit, adapter.EpochCommit()) + }) + t.Run("dkg", func(t *testing.T) { + dkg, err := adapter.DKG() + require.NoError(t, err) + assert.Equal(t, entry.CurrentEpochCommit.DKGGroupKey, dkg.GroupKey()) + assert.Equal(t, len(entry.CurrentEpochCommit.DKGParticipantKeys), int(dkg.Size())) + dkgParticipants := entry.CurrentEpochSetup.Participants.Filter(filter.IsConsensusCommitteeMember) + for _, identity := range dkgParticipants { + keyShare, err := dkg.KeyShare(identity.NodeID) + require.NoError(t, err) + index, err := dkg.Index(identity.NodeID) + require.NoError(t, err) + assert.Equal(t, entry.CurrentEpochCommit.DKGParticipantKeys[index], keyShare) + } + }) + t.Run("entry", func(t *testing.T) { + actualEntry := adapter.Entry() + assert.Equal(t, entry, actualEntry, "entry should be equal to the one passed to the constructor") + assert.NotSame(t, entry, actualEntry, "entry should be a copy of the one passed to the constructor") + }) + t.Run("identities", func(t *testing.T) { + assert.Equal(t, entry.CurrentEpochIdentityTable, adapter.Identities()) + }) + t.Run("global-params", func(t *testing.T) { + expectedChainID := flow.Testnet + globalParams.On("ChainID").Return(expectedChainID, nil).Once() + actualChainID := adapter.GlobalParams().ChainID() + assert.Equal(t, expectedChainID, actualChainID) + }) + t.Run("epoch-phase-staking", func(t *testing.T) { + entry := unittest.EpochStateFixture() + adapter, err := inmem.NewEpochProtocolStateAdapter( + inmem.UntrustedEpochProtocolStateAdapter{ + RichEpochStateEntry: entry, + Params: globalParams, + }, + ) + require.NoError(t, err) + assert.Equal(t, flow.EpochPhaseStaking, adapter.EpochPhase()) + assert.True(t, adapter.PreviousEpochExists()) + assert.False(t, adapter.EpochFallbackTriggered()) + }) + t.Run("epoch-phase-setup", func(t *testing.T) { + entry := unittest.EpochStateFixture(unittest.WithNextEpochProtocolState()) + // cleanup the commit event, so we are in setup phase + entry.NextEpoch.CommitID = flow.ZeroID + entry.NextEpochCommit = nil + + adapter, err := inmem.NewEpochProtocolStateAdapter( + inmem.UntrustedEpochProtocolStateAdapter{ + RichEpochStateEntry: entry, + Params: globalParams, + }, + ) + require.NoError(t, err) + assert.Equal(t, flow.EpochPhaseSetup, adapter.EpochPhase()) + assert.True(t, adapter.PreviousEpochExists()) + assert.False(t, adapter.EpochFallbackTriggered()) + }) + t.Run("epoch-phase-commit", func(t *testing.T) { + entry := unittest.EpochStateFixture(unittest.WithNextEpochProtocolState()) + adapter, err := inmem.NewEpochProtocolStateAdapter( + inmem.UntrustedEpochProtocolStateAdapter{ + RichEpochStateEntry: entry, + Params: globalParams, + }, + ) + require.NoError(t, err) + assert.Equal(t, flow.EpochPhaseCommitted, adapter.EpochPhase()) + assert.True(t, adapter.PreviousEpochExists()) + assert.False(t, adapter.EpochFallbackTriggered()) + }) + t.Run("epoch-fallback-triggered", func(t *testing.T) { + t.Run("tentatively staking phase", func(t *testing.T) { + entry := unittest.EpochStateFixture(func(entry *flow.RichEpochStateEntry) { + entry.EpochFallbackTriggered = true + }) + adapter, err := inmem.NewEpochProtocolStateAdapter( + inmem.UntrustedEpochProtocolStateAdapter{ + RichEpochStateEntry: entry, + Params: globalParams, + }, + ) + require.NoError(t, err) + assert.True(t, adapter.EpochFallbackTriggered()) + assert.Equal(t, flow.EpochPhaseFallback, entry.EpochPhase()) + }) + t.Run("tentatively committed phase", func(t *testing.T) { + entry := unittest.EpochStateFixture(unittest.WithNextEpochProtocolState(), func(entry *flow.RichEpochStateEntry) { + entry.EpochFallbackTriggered = true + }) + adapter, err := inmem.NewEpochProtocolStateAdapter( + inmem.UntrustedEpochProtocolStateAdapter{ + RichEpochStateEntry: entry, + Params: globalParams, + }, + ) + require.NoError(t, err) + assert.True(t, adapter.EpochFallbackTriggered()) + assert.Equal(t, flow.EpochPhaseCommitted, entry.EpochPhase()) + }) + }) + t.Run("no-previous-epoch", func(t *testing.T) { + entry := unittest.EpochStateFixture(func(entry *flow.RichEpochStateEntry) { + entry.PreviousEpoch = nil + entry.PreviousEpochSetup = nil + entry.PreviousEpochCommit = nil + }) + adapter, err := inmem.NewEpochProtocolStateAdapter( + inmem.UntrustedEpochProtocolStateAdapter{ + RichEpochStateEntry: entry, + Params: globalParams, + }, + ) + require.NoError(t, err) + assert.False(t, adapter.PreviousEpochExists()) + }) + + // Invalid input with nil Params + t.Run("invalid - nil Params", func(t *testing.T) { + _, err := inmem.NewEpochProtocolStateAdapter( + inmem.UntrustedEpochProtocolStateAdapter{ + RichEpochStateEntry: unittest.EpochStateFixture(), + Params: nil, + }, + ) + require.Error(t, err) + require.Contains(t, err.Error(), "params must not be nil") + }) + + // Invalid input with nil RichEpochStateEntry + t.Run("invalid - nil RichEpochStateEntry", func(t *testing.T) { + _, err := inmem.NewEpochProtocolStateAdapter( + inmem.UntrustedEpochProtocolStateAdapter{ + RichEpochStateEntry: nil, + Params: globalParams, + }, + ) + require.Error(t, err) + require.Contains(t, err.Error(), "rich epoch state must not be nil") + }) +} diff --git a/state/protocol/inmem/params.go b/state/protocol/inmem/params.go index 15f01f20f6a..453eac55a55 100644 --- a/state/protocol/inmem/params.go +++ b/state/protocol/inmem/params.go @@ -11,22 +11,24 @@ type Params struct { var _ protocol.GlobalParams = (*Params)(nil) -func (p Params) ChainID() (flow.ChainID, error) { - return p.enc.ChainID, nil +func NewParams(enc EncodableParams) *Params { + return &Params{ + enc: enc, + } } -func (p Params) SporkID() (flow.Identifier, error) { - return p.enc.SporkID, nil +func (p Params) ChainID() flow.ChainID { + return p.enc.ChainID } -func (p Params) SporkRootBlockHeight() (uint64, error) { - return p.enc.SporkRootBlockHeight, nil +func (p Params) SporkID() flow.Identifier { + return p.enc.SporkID } -func (p Params) ProtocolVersion() (uint, error) { - return p.enc.ProtocolVersion, nil +func (p Params) SporkRootBlockHeight() uint64 { + return p.enc.SporkRootBlockHeight } -func (p Params) EpochCommitSafetyThreshold() (uint64, error) { - return p.enc.EpochCommitSafetyThreshold, nil +func (p Params) SporkRootBlockView() uint64 { + return p.enc.SporkRootBlockView } diff --git a/state/protocol/inmem/snapshot.go b/state/protocol/inmem/snapshot.go index 228c319aa91..9ec2293741a 100644 --- a/state/protocol/inmem/snapshot.go +++ b/state/protocol/inmem/snapshot.go @@ -1,9 +1,13 @@ package inmem import ( + "fmt" + + "github.com/onflow/flow-go/consensus/hotstuff/model" "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/model/flow/filter" "github.com/onflow/flow-go/state/protocol" - "github.com/onflow/flow-go/state/protocol/seed" + "github.com/onflow/flow-go/state/protocol/protocol_state/kvstore" ) // Snapshot is a memory-backed implementation of protocol.Snapshot. The snapshot @@ -16,31 +20,53 @@ type Snapshot struct { var _ protocol.Snapshot = (*Snapshot)(nil) func (s Snapshot) Head() (*flow.Header, error) { - return s.enc.Head, nil + return s.enc.Head(), nil } func (s Snapshot) QuorumCertificate() (*flow.QuorumCertificate, error) { return s.enc.QuorumCertificate, nil } -func (s Snapshot) Identities(selector flow.IdentityFilter) (flow.IdentityList, error) { - return s.enc.Identities.Filter(selector), nil +func (s Snapshot) Identities(selector flow.IdentityFilter[flow.Identity]) (flow.IdentityList, error) { + protocolState, err := s.EpochProtocolState() + if err != nil { + return nil, fmt.Errorf("could not access protocol state: %w", err) + } + return protocolState.Identities().Filter(selector), nil } func (s Snapshot) Identity(nodeID flow.Identifier) (*flow.Identity, error) { - identity, ok := s.enc.Identities.ByNodeID(nodeID) - if !ok { + // filter identities at snapshot for node ID + identities, err := s.Identities(filter.HasNodeID[flow.Identity](nodeID)) + if err != nil { + return nil, fmt.Errorf("could not get identities: %w", err) + } + + // check if node ID is part of identities + if len(identities) == 0 { return nil, protocol.IdentityNotFoundError{NodeID: nodeID} } - return identity, nil + return identities[0], nil } func (s Snapshot) Commit() (flow.StateCommitment, error) { - return s.enc.LatestSeal.FinalState, nil + latestSeal, err := s.enc.LatestSeal() + if err != nil { + return flow.StateCommitment{}, nil + } + return latestSeal.FinalState, nil } func (s Snapshot) SealedResult() (*flow.ExecutionResult, *flow.Seal, error) { - return s.enc.LatestResult, s.enc.LatestSeal, nil + latestSeal, err := s.enc.LatestSeal() + if err != nil { + return nil, nil, err + } + latestSealedResult, err := s.enc.LatestSealedResult() + if err != nil { + return nil, nil, err + } + return latestSealedResult, latestSeal, nil } func (s Snapshot) SealingSegment() (*flow.SealingSegment, error) { @@ -52,16 +78,22 @@ func (s Snapshot) Descendants() ([]flow.Identifier, error) { return nil, nil } -func (s Snapshot) Phase() (flow.EpochPhase, error) { - return s.enc.Phase, nil +func (s Snapshot) EpochPhase() (flow.EpochPhase, error) { + epochProtocolState, err := s.EpochProtocolState() + if err != nil { + return flow.EpochPhaseUndefined, fmt.Errorf("could not get epoch protocol state: %w", err) + } + return epochProtocolState.EpochPhase(), nil } func (s Snapshot) RandomSource() ([]byte, error) { - return seed.FromParentQCSignature(s.enc.QuorumCertificate.SigData) + return model.BeaconSignature(s.enc.QuorumCertificate) } func (s Snapshot) Epochs() protocol.EpochQuery { - return Epochs{s.enc.Epochs} + return Epochs{ + entry: *s.enc.SealingSegment.LatestProtocolStateEntry().EpochEntry, + } } func (s Snapshot) Params() protocol.GlobalParams { @@ -72,43 +104,27 @@ func (s Snapshot) Encodable() EncodableSnapshot { return s.enc } -func SnapshotFromEncodable(enc EncodableSnapshot) *Snapshot { - return &Snapshot{ - enc: enc, - } +func (s Snapshot) EpochProtocolState() (protocol.EpochProtocolState, error) { + entry := s.enc.SealingSegment.LatestProtocolStateEntry() + return NewEpochProtocolStateAdapter( + UntrustedEpochProtocolStateAdapter{ + RichEpochStateEntry: entry.EpochEntry, + Params: s.Params(), + }, + ) } -// StrippedInmemSnapshot removes all the networking address in the snapshot -func StrippedInmemSnapshot(snapshot EncodableSnapshot) EncodableSnapshot { - removeAddress := func(ids flow.IdentityList) { - for _, identity := range ids { - identity.Address = "" - } - } - - removeAddressFromEpoch := func(epoch *EncodableEpoch) { - if epoch == nil { - return - } - removeAddress(epoch.InitialIdentities) - for _, cluster := range epoch.Clustering { - removeAddress(cluster) - } - for _, c := range epoch.Clusters { - removeAddress(c.Members) - } - } +func (s Snapshot) ProtocolState() (protocol.KVStoreReader, error) { + entry := s.enc.SealingSegment.LatestProtocolStateEntry() + return kvstore.VersionedDecode(entry.KVStore.Version, entry.KVStore.Data) +} - removeAddress(snapshot.Identities) - removeAddressFromEpoch(snapshot.Epochs.Previous) - removeAddressFromEpoch(&snapshot.Epochs.Current) - removeAddressFromEpoch(snapshot.Epochs.Next) +func (s Snapshot) VersionBeacon() (*flow.SealedVersionBeacon, error) { + return s.enc.SealedVersionBeacon, nil +} - for _, event := range snapshot.LatestResult.ServiceEvents { - switch event.Type { - case flow.ServiceEventSetup: - removeAddress(event.Event.(*flow.EpochSetup).Participants) - } +func SnapshotFromEncodable(enc EncodableSnapshot) *Snapshot { + return &Snapshot{ + enc: enc, } - return snapshot } diff --git a/state/protocol/invalid/epoch.go b/state/protocol/invalid/epoch.go deleted file mode 100644 index cf4777b4f33..00000000000 --- a/state/protocol/invalid/epoch.go +++ /dev/null @@ -1,119 +0,0 @@ -package invalid - -import ( - "errors" - "fmt" - - "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/state" - "github.com/onflow/flow-go/state/protocol" -) - -// Epoch represents an epoch that does not exist or could not be retrieved. -type Epoch struct { - err error -} - -// NewEpoch returns a new invalid epoch, containing an error describing why the -// epoch could not be retrieved. The following are expected errors when constructing -// an invalid Epoch: -// - protocol.ErrNoPreviousEpoch - if the epoch represents a previous epoch which does not exist. -// This happens when the previous epoch is queried within the first epoch of a spork. -// - protocol.ErrNextEpochNotSetup - if the epoch represents a next epoch which has not been set up. -// This happens when the next epoch is queried within the EpochStaking phase of any epoch. -// - state.ErrUnknownSnapshotReference - if the epoch is queried from an unresolvable snapshot. -// - generic error in case of unexpected critical internal corruption or bugs -func NewEpoch(err error) *Epoch { - if errors.Is(err, protocol.ErrNoPreviousEpoch) { - return &Epoch{err: err} - } - if errors.Is(err, protocol.ErrNextEpochNotSetup) { - return &Epoch{err: err} - } - if errors.Is(err, state.ErrUnknownSnapshotReference) { - return &Epoch{err: err} - } - return &Epoch{err: fmt.Errorf("critical unexpected error querying epoch: %w", err)} -} - -// NewEpochf is NewEpoch with ergonomic error formatting. -func NewEpochf(msg string, args ...interface{}) *Epoch { - return NewEpoch(fmt.Errorf(msg, args...)) -} - -func (u *Epoch) Counter() (uint64, error) { - return 0, u.err -} - -func (u *Epoch) FirstView() (uint64, error) { - return 0, u.err -} - -func (u *Epoch) FinalView() (uint64, error) { - return 0, u.err -} - -func (u *Epoch) DKGPhase1FinalView() (uint64, error) { - return 0, u.err -} - -func (u *Epoch) DKGPhase2FinalView() (uint64, error) { - return 0, u.err -} - -func (u *Epoch) DKGPhase3FinalView() (uint64, error) { - return 0, u.err -} - -func (u *Epoch) InitialIdentities() (flow.IdentityList, error) { - return nil, u.err -} - -func (u *Epoch) Clustering() (flow.ClusterList, error) { - return nil, u.err -} - -func (u *Epoch) Cluster(uint) (protocol.Cluster, error) { - return nil, u.err -} - -func (u *Epoch) ClusterByChainID(chainID flow.ChainID) (protocol.Cluster, error) { - return nil, u.err -} - -func (u *Epoch) DKG() (protocol.DKG, error) { - return nil, u.err -} - -func (u *Epoch) RandomSource() ([]byte, error) { - return nil, u.err -} - -func (u *Epoch) FirstHeight() (uint64, error) { - return 0, u.err -} - -func (u *Epoch) FinalHeight() (uint64, error) { - return 0, u.err -} - -// Epochs is an epoch query for an invalid snapshot. -type Epochs struct { - err error -} - -func (u *Snapshot) Epochs() protocol.EpochQuery { - return &Epochs{err: u.err} -} - -func (u *Epochs) Current() protocol.Epoch { - return NewEpoch(u.err) -} - -func (u *Epochs) Next() protocol.Epoch { - return NewEpoch(u.err) -} - -func (u *Epochs) Previous() protocol.Epoch { - return NewEpoch(u.err) -} diff --git a/state/protocol/invalid/params.go b/state/protocol/invalid/params.go index a131d4517a8..9a4b77dd8c2 100644 --- a/state/protocol/invalid/params.go +++ b/state/protocol/invalid/params.go @@ -9,22 +9,18 @@ type Params struct { err error } -func (p Params) ChainID() (flow.ChainID, error) { - return "", p.err +func (p Params) ChainID() flow.ChainID { + return "" } -func (p Params) SporkID() (flow.Identifier, error) { - return flow.ZeroID, p.err +func (p Params) SporkID() flow.Identifier { + return flow.ZeroID } -func (p Params) SporkRootBlockHeight() (uint64, error) { - return 0, p.err +func (p Params) SporkRootBlockHeight() uint64 { + return 0 } -func (p Params) ProtocolVersion() (uint, error) { - return 0, p.err -} - -func (p Params) EpochCommitSafetyThreshold() (uint64, error) { - return 0, p.err +func (p Params) SporkRootBlockView() uint64 { + return 0 } diff --git a/state/protocol/invalid/snapshot.go b/state/protocol/invalid/snapshot.go index ab54103c191..814b5a388aa 100644 --- a/state/protocol/invalid/snapshot.go +++ b/state/protocol/invalid/snapshot.go @@ -27,6 +27,8 @@ func NewSnapshot(err error) *Snapshot { return &Snapshot{fmt.Errorf("critical unexpected error querying snapshot: %w", err)} } +var _ protocol.Snapshot = (*Snapshot)(nil) + // NewSnapshotf is NewSnapshot with ergonomic error formatting. func NewSnapshotf(msg string, args ...interface{}) *Snapshot { return NewSnapshot(fmt.Errorf(msg, args...)) @@ -40,11 +42,15 @@ func (u *Snapshot) QuorumCertificate() (*flow.QuorumCertificate, error) { return nil, u.err } -func (u *Snapshot) Phase() (flow.EpochPhase, error) { +func (u *Snapshot) EpochPhase() (flow.EpochPhase, error) { return 0, u.err } -func (u *Snapshot) Identities(_ flow.IdentityFilter) (flow.IdentityList, error) { +func (u *Snapshot) Epochs() protocol.EpochQuery { + return EpochQuery{u.err} +} + +func (u *Snapshot) Identities(_ flow.IdentityFilter[flow.Identity]) (flow.IdentityList, error) { return nil, u.err } @@ -75,3 +81,36 @@ func (u *Snapshot) RandomSource() ([]byte, error) { func (u *Snapshot) Params() protocol.GlobalParams { return Params{u.err} } + +func (u *Snapshot) EpochProtocolState() (protocol.EpochProtocolState, error) { + return nil, u.err +} + +func (u *Snapshot) ProtocolState() (protocol.KVStoreReader, error) { + return nil, u.err +} + +func (u *Snapshot) VersionBeacon() (*flow.SealedVersionBeacon, error) { + return nil, u.err +} + +// EpochQuery represents the epoch information for an invalid state snapshot query. +type EpochQuery struct { + err error +} + +func (e EpochQuery) Current() (protocol.CommittedEpoch, error) { + return nil, e.err +} + +func (e EpochQuery) NextUnsafe() (protocol.TentativeEpoch, error) { + return nil, e.err +} + +func (e EpochQuery) NextCommitted() (protocol.CommittedEpoch, error) { + return nil, e.err +} + +func (e EpochQuery) Previous() (protocol.CommittedEpoch, error) { + return nil, e.err +} diff --git a/state/protocol/kvstore.go b/state/protocol/kvstore.go new file mode 100644 index 00000000000..dd8e8db94ef --- /dev/null +++ b/state/protocol/kvstore.go @@ -0,0 +1,285 @@ +package protocol + +import ( + "io" + "math" + "slices" + + "github.com/ethereum/go-ethereum/rlp" + + "github.com/onflow/flow-go/model/flow" +) + +// This file contains versioned read interface to the Protocol State's +// key-value store and are used by the Protocol State Machine. +// +// When a key is added or removed, this requires a new protocol state version: +// - Create a new versioned model in ./protocol_state/kvstore/models.go (eg. modelv3 if latest model is modelv2) +// - Update the KVStoreReader and protocol_state.KVStoreAPI interfaces to include any new keys + +// KVStoreReader is the latest read-only interface to the Protocol State key-value store +// at a particular block. +// +// Caution: +// Engineers evolving this interface must ensure that it is backwards-compatible +// with all versions of Protocol State Snapshots that can be retrieved from the local +// database, which should exactly correspond to the versioned model types defined in +// ./kvstore/models.go +type KVStoreReader interface { + // ID returns an identifier for this key-value store snapshot by hashing internal fields. + // Two different model versions containing the same data must have different IDs. + // New models should use `makeVersionedModelID` to implement ID. + ID() flow.Identifier + + // v0/v1 + + VersionedEncodable + + // GetProtocolStateVersion returns the Protocol State Version that created the specific + // Snapshot backing this interface instance. Slightly simplified, the Protocol State + // Version defines the key-value store's data model (specifically, the set of all keys + // and the respective type for each corresponding value). + // Generally, changes in the protocol state version correspond to changes in the set + // of key-value pairs which are supported, and which model is used for serialization. + // The protocol state version is updated by UpdateKVStoreVersion service events. + GetProtocolStateVersion() uint64 + + // GetVersionUpgrade returns the upgrade version of protocol. + // VersionUpgrade is a view-based activator that specifies the version which has to be applied + // and the view from which on it has to be applied. After an upgrade activation view has passed, + // the (version, view) data remains in the state until the next upgrade is scheduled (essentially + // persisting the most recent past update until a subsequent update is scheduled). + GetVersionUpgrade() *ViewBasedActivator[uint64] + + // GetEpochStateID returns the state ID of the epoch state. + // This is part of the most basic model and is used to commit the epoch state to the KV store. + GetEpochStateID() flow.Identifier + + // GetEpochExtensionViewCount returns the number of views for a hypothetical epoch extension. Note + // that this value can change at runtime (through a service event). When a new extension is added, + // the view count is used right at this point in the protocol state's evolution. In other words, + // different extensions can have different view counts. + GetEpochExtensionViewCount() uint64 + + // GetFinalizationSafetyThreshold returns the FinalizationSafetyThreshold's current value `t`. + // The FinalizationSafetyThreshold is maintained by the protocol state, with correctness and + // consistency of updates across all nodes guaranteed by BFT consensus. + // + // In a nutshell, the FinalizationSafetyThreshold is a protocol axiom: + // It specifies the number of views `t`, such that when an honest node enters or surpasses + // view `v+t` the latest finalized view must be larger or equal to `v`. The value `t` is an + // empirical threshold, which must be chosen large enough that the probability of finalization + // halting for `t` or more views vanishes in practise. In the unlikely scenario that this + // threshold is exceeded, the protocol should halt. + // Formally, HotStuff (incl. its Jolteon derivative) provides no guarantees that finalization + // proceeds within `t` views, for _any_ value of `t`. Therefore, the FinalizationSafetyThreshold + // is an additional limitation on the *liveness* guarantees that HotStuff (Jolteon) provides. + // When entering view `v+t`, *safety-relevant* protocol logic should *confirm* that finalization + // has reached or exceeded view `v`. + // + // EXAMPLE: + // Given a threshold value `t`, the deadline for an epoch with final view `f` is: + // Epoch Commitment Deadline: d=f-t + // + // Epoch Commitment Deadline + // EPOCH N ↓ EPOCH N+1 + // ...---------------|--------------------------| |-----... + // ↑ ↑ ↑ + // view: d············t············>⋮ f+1 + // + // This deadline is used to determine when to trigger Epoch Fallback Mode [EFM]: + // if no valid configuration for epoch N+1 has been determined by view `d`, the + // protocol enters EFM for the following reason: + // * By the time a node surpasses the last view `f` of epoch N, it must know the leaders + // for every view of epoch N+1. + // * The leader selection for epoch N+1 is only unambiguously determined, if the configuration + // for epoch N+1 has been finalized. (Otherwise, different forks could contain different + // consensus committees for epoch N+1, which would lead to different leaders. Only finalization + // resolves this ambiguity by finalizing one and orphaning epoch configurations possibly + // contained in competing forks). + // * The latest point where we could still finalize a configuration for Epoch N+1 is the last view + // `f` of epoch N. As finalization is permitted to take up to `t` views, a valid configuration + // for epoch N+1 must be available at latest by view d=f-t. + // + // When selecting a threshold value, ensure: + // * The deadline is after the end of the DKG, with enough buffer between + // the two that the EpochCommit event is overwhelmingly likely to be emitted + // before the deadline, if it is emitted at all. + // * The buffer between the deadline and the final view of the epoch is large + // enough that the network is overwhelming likely to finalize at least one + // block with a view in this range + GetFinalizationSafetyThreshold() uint64 + + // v3 + + // GetCadenceComponentVersion returns the current Cadence component version. + // If not otherwise specified, during network bootstrapping or via service event, the component version is initialized to 0.0. + // Error Returns: + // - kvstore.ErrKeyNotSupported if invoked on a KVStore instance before v3. + GetCadenceComponentVersion() (MagnitudeVersion, error) + // GetCadenceComponentVersionUpgrade returns the most recent upgrade for the Cadence Component Version, + // if one exists (otherwise returns nil). The upgrade will be returned even if it has already been applied. + // Returns nil if invoked on a KVStore instance before v3. + GetCadenceComponentVersionUpgrade() *ViewBasedActivator[MagnitudeVersion] + + // GetExecutionComponentVersion returns the Execution component version. + // If not otherwise specified, during network bootstrapping or via service event, the component version is initialized to 0.0. + // Error Returns: + // - kvstore.ErrKeyNotSupported if invoked on a KVStore instance before v3. + GetExecutionComponentVersion() (MagnitudeVersion, error) + // GetExecutionComponentVersionUpgrade returns the most recent upgrade for the Execution Component Version, + // if one exists (otherwise returns nil). The upgrade will be returned even if it has already been applied. + // Returns nil if invoked on a KVStore instance before v3. + GetExecutionComponentVersionUpgrade() *ViewBasedActivator[MagnitudeVersion] + + // GetExecutionMeteringParameters returns the Execution metering parameters. + // Error Returns: + // - kvstore.ErrKeyNotSupported if invoked on a KVStore instance before v3. + GetExecutionMeteringParameters() (ExecutionMeteringParameters, error) + // GetExecutionMeteringParametersUpgrade returns the most recent upgrade for the Execution Metering Parameters, + // if one exists (otherwise returns nil). The upgrade will be returned even if it has already been applied. + // Returns nil if invoked on a KVStore instance before v3. + GetExecutionMeteringParametersUpgrade() *ViewBasedActivator[ExecutionMeteringParameters] +} + +// ExecutionMeteringParameters are used to measure resource usage of transactions, +// which affects fee calculations and transaction/script stopping conditions. +type ExecutionMeteringParameters struct { + // ExecutionEffortWeights maps execution effort kinds to their weights. The weights are used to tally up + // execution effort of a cadence transaction. If execution effort reaches the limit `DefaultMaxTransactionGasLimit` + // the transaction will be halted and considered failed. The transactions execution effort is also used to calculate + // the transaction fees. Unspecified weights default to `meter.DefaultComputationWeights`. + ExecutionEffortWeights map[uint32]uint64 + // ExecutionMemoryWeights maps execution memory kinds to their weights. The weights are used to tally up + // memory usage of a cadence transaction. If memory usage reaches the limit `ExecutionMemoryLimit` the transaction + // will be halted and considered failed. Unspecified weights default to `meter.DefaultMemoryWeights`. + ExecutionMemoryWeights map[uint32]uint64 + // ExecutionMemoryLimit is the maximum amount of memory that can be used by a transaction before it is halted and + // considered failed. If set to `math.MaxUint64` the `meter.DefaultMemoryLimit` is used. + ExecutionMemoryLimit uint64 +} + +// DefaultExecutionMeteringParameters returns the default set of execution metering parameters. +// This is the initial value automatically populated when: +// - the Protocol State first upgrades to a version supporting the ExecutionMeteringParameters field +// - a new network is bootstrapped without over-riding execution metering parameters +func DefaultExecutionMeteringParameters() ExecutionMeteringParameters { + return ExecutionMeteringParameters{ + ExecutionEffortWeights: make(map[uint32]uint64), + ExecutionMemoryWeights: make(map[uint32]uint64), + ExecutionMemoryLimit: math.MaxUint64, + } +} + +// EncodeRLP defines RLP encoding behaviour for ExecutionMeteringParameters, overriding the default behaviour. +// We convert maps to ordered slices of key-pairs before encoding, because RLP does not directly support maps. +// We require this KVStore field type to be RLP-encodable so we can compute the hash/ID of a kvstore model instance. +func (params *ExecutionMeteringParameters) EncodeRLP(w io.Writer) error { + type pair struct { + Key uint32 + Value uint64 + } + pairOrdering := func(a, b pair) int { + if a.Key < b.Key { + return -1 + } + if a.Key > b.Key { + return 1 + } + // Since we are ordering by key taken directly from a single Go map type, it is not possible to + // observe two identical keys while ordering. If we do, some invariant has been violated. + // Also, since the sort used is non-stable, this could result in non-deterministic hashes. + panic("critical invariant violated: map with duplicate keys") + } + + orderedEffortParams := make([]pair, 0, len(params.ExecutionEffortWeights)) + for k, v := range params.ExecutionEffortWeights { + orderedEffortParams = append(orderedEffortParams, pair{k, v}) + } + slices.SortFunc(orderedEffortParams, pairOrdering) + + orderedMemoryParams := make([]pair, 0, len(params.ExecutionMemoryWeights)) + for k, v := range params.ExecutionMemoryWeights { + orderedMemoryParams = append(orderedMemoryParams, pair{k, v}) + } + slices.SortFunc(orderedMemoryParams, pairOrdering) + + return rlp.Encode(w, struct { + ExecutionEffortWeights []pair + ExecutionMemoryWeights []pair + ExecutionMemoryLimit uint64 + }{ + ExecutionEffortWeights: orderedEffortParams, + ExecutionMemoryWeights: orderedMemoryParams, + ExecutionMemoryLimit: params.ExecutionMemoryLimit, + }) +} + +// VersionedEncodable defines the interface for a versioned key-value store independent +// of the set of keys which are supported. This allows the storage layer to support +// storing different key-value model versions within the same software version. +type VersionedEncodable interface { + // VersionedEncode encodes the key-value store, returning the version separately + // from the encoded bytes. + // No errors are expected during normal operation. + VersionedEncode() (uint64, []byte, error) +} + +// ViewBasedActivator represents a scheduled update to some protocol parameter P. +// (The relationship between a ViewBasedActivator and P is managed outside this model.) +// Once the ViewBasedActivator A is persisted to the protocol state, P is updated to value +// A.Data in the first block with view ≥ A.ActivationView (in each fork independently). +type ViewBasedActivator[T any] struct { + // Data is the pending new value, to be applied when reaching or exceeding ActivationView. + Data T + // ActivationView is the view at which the new value should be applied. + ActivationView uint64 +} + +// UpdatableField represents a protocol parameter which can be updated using a ViewBasedActivator. +type UpdatableField[T any] struct { + // CurrentValue is the value that is active after constructing the block + // that this Protocol State pertains to. + CurrentValue T + + // Update is optional and is nil if no value update has been scheduled yet. + // This field will hold the last scheduled update until a newer update + // directive is received, even if the value update has already happened. + // The update should be applied when reaching or exceeding the ActivationView. + Update *ViewBasedActivator[T] +} + +// MagnitudeVersion is intended as an intuitive representation of the “magnitude of change”. +// +// # CAUTION: Don't confuse this with semver! +// +// This versioning representation DEVIATES from established Semantic Versioning. +// Any two different versions of the Execution Stack are considered incompatible. +// In particular, two versions only differing in their minor, might be entirely downwards-INCOMPATIBLE. +// +// We generally recommend to use Integer Versioning for components. The MagnitudeVersion scheme should +// be only used when there is a clear advantage over Integer Versioning, which outweighs the risk of falsely +// making compatibility assumptions by confusing this scheme with Semantic Versioning! +// +// MagnitudeVersion helps with an intuitive representation of the “magnitude of change”. +// For example, for the execution stack, bug fixes closing unexploited edge-cases will be a relatively +// frequent cause of upgrades. Those bug fixes could be reflected by minor version bumps, whose +// imperfect downwards compatibility might frequently suffice to warrant Access Nodes using the same +// version (higher minor) across version boundaries. In comparison, major version change would generally +// indicate broader non-compatibility (or larger feature additions) where it is very unlikely that the Access +// Node can use one implementation for versions with different major. +// +// We emphasize again that this differentiation of “imperfect but good-enough downwards compatibility” +// is in no way reflected by the versioning scheme. Any automated decisions regarding compatibility of +// different versions are to be avoided (including versions where only the minor is different). +// +// Engineering teams using this scheme must be aware that the MagnitudeVersion is easily +// misleading wrt to incorrect assumptions about downwards compatibility. Avoiding problems (up to and +// including the possibility of mainnet outages) requires continued awareness of all engineers in the +// teams working with this version. The engineers in those teams must commit to diligently documenting +// all relevant changes, details regarding magnitude of changes and if applicable “imperfect but +// good-enough downwards compatibility”. +type MagnitudeVersion struct { + Major uint + Minor uint +} diff --git a/state/protocol/kvstore_test.go b/state/protocol/kvstore_test.go new file mode 100644 index 00000000000..f518e18106a --- /dev/null +++ b/state/protocol/kvstore_test.go @@ -0,0 +1,45 @@ +package protocol + +import ( + "math/rand" + "testing" + + "github.com/ethereum/go-ethereum/rlp" + clone "github.com/huandu/go-clone/generic" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +// TestExecutionMeteringParameters_EncodeRLP tests properties of the custom RLP encoding logic. +func TestExecutionMeteringParameters_EncodeRLP(t *testing.T) { + params1 := &ExecutionMeteringParameters{ + ExecutionMemoryLimit: rand.Uint64(), + ExecutionMemoryWeights: make(map[uint32]uint64, 10), + ExecutionEffortWeights: make(map[uint32]uint64, 10), + } + for range 10 { + params1.ExecutionMemoryWeights[rand.Uint32()] = rand.Uint64() + params1.ExecutionEffortWeights[rand.Uint32()] = rand.Uint64() + } + + t.Run("deterministic encoding", func(t *testing.T) { + enc1, err := rlp.EncodeToBytes(params1) + require.NoError(t, err) + enc2, err := rlp.EncodeToBytes(params1) + require.NoError(t, err) + assert.Equal(t, enc1, enc2) + }) + t.Run("unique encoding", func(t *testing.T) { + params2 := clone.Clone(params1) + for k, v := range params2.ExecutionMemoryWeights { + params2.ExecutionMemoryWeights[k] = v + 1 + assert.NotEqual(t, params1.ExecutionMemoryWeights[k], params2.ExecutionMemoryWeights[k]) + break + } + enc1, err := rlp.EncodeToBytes(params1) + require.NoError(t, err) + enc2, err := rlp.EncodeToBytes(params2) + require.NoError(t, err) + assert.NotEqual(t, enc1, enc2) + }) +} diff --git a/state/protocol/mock/block_timer.go b/state/protocol/mock/block_timer.go index 5baa7aa0ed8..16499090427 100644 --- a/state/protocol/mock/block_timer.go +++ b/state/protocol/mock/block_timer.go @@ -1,12 +1,8 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mock -import ( - mock "github.com/stretchr/testify/mock" - - time "time" -) +import mock "github.com/stretchr/testify/mock" // BlockTimer is an autogenerated mock type for the BlockTimer type type BlockTimer struct { @@ -14,25 +10,33 @@ type BlockTimer struct { } // Build provides a mock function with given fields: parentTimestamp -func (_m *BlockTimer) Build(parentTimestamp time.Time) time.Time { +func (_m *BlockTimer) Build(parentTimestamp uint64) uint64 { ret := _m.Called(parentTimestamp) - var r0 time.Time - if rf, ok := ret.Get(0).(func(time.Time) time.Time); ok { + if len(ret) == 0 { + panic("no return value specified for Build") + } + + var r0 uint64 + if rf, ok := ret.Get(0).(func(uint64) uint64); ok { r0 = rf(parentTimestamp) } else { - r0 = ret.Get(0).(time.Time) + r0 = ret.Get(0).(uint64) } return r0 } // Validate provides a mock function with given fields: parentTimestamp, currentTimestamp -func (_m *BlockTimer) Validate(parentTimestamp time.Time, currentTimestamp time.Time) error { +func (_m *BlockTimer) Validate(parentTimestamp uint64, currentTimestamp uint64) error { ret := _m.Called(parentTimestamp, currentTimestamp) + if len(ret) == 0 { + panic("no return value specified for Validate") + } + var r0 error - if rf, ok := ret.Get(0).(func(time.Time, time.Time) error); ok { + if rf, ok := ret.Get(0).(func(uint64, uint64) error); ok { r0 = rf(parentTimestamp, currentTimestamp) } else { r0 = ret.Error(0) @@ -41,13 +45,12 @@ func (_m *BlockTimer) Validate(parentTimestamp time.Time, currentTimestamp time. return r0 } -type mockConstructorTestingTNewBlockTimer interface { +// NewBlockTimer creates a new instance of BlockTimer. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewBlockTimer(t interface { mock.TestingT Cleanup(func()) -} - -// NewBlockTimer creates a new instance of BlockTimer. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewBlockTimer(t mockConstructorTestingTNewBlockTimer) *BlockTimer { +}) *BlockTimer { mock := &BlockTimer{} mock.Mock.Test(t) diff --git a/state/protocol/mock/cluster.go b/state/protocol/mock/cluster.go index aebb5a2af5b..46a12811ebf 100644 --- a/state/protocol/mock/cluster.go +++ b/state/protocol/mock/cluster.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mock @@ -14,10 +14,14 @@ type Cluster struct { mock.Mock } -// ChainID provides a mock function with given fields: +// ChainID provides a mock function with no fields func (_m *Cluster) ChainID() flow.ChainID { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for ChainID") + } + var r0 flow.ChainID if rf, ok := ret.Get(0).(func() flow.ChainID); ok { r0 = rf() @@ -28,10 +32,14 @@ func (_m *Cluster) ChainID() flow.ChainID { return r0 } -// EpochCounter provides a mock function with given fields: +// EpochCounter provides a mock function with no fields func (_m *Cluster) EpochCounter() uint64 { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for EpochCounter") + } + var r0 uint64 if rf, ok := ret.Get(0).(func() uint64); ok { r0 = rf() @@ -42,10 +50,14 @@ func (_m *Cluster) EpochCounter() uint64 { return r0 } -// Index provides a mock function with given fields: +// Index provides a mock function with no fields func (_m *Cluster) Index() uint { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Index") + } + var r0 uint if rf, ok := ret.Get(0).(func() uint); ok { r0 = rf() @@ -56,26 +68,34 @@ func (_m *Cluster) Index() uint { return r0 } -// Members provides a mock function with given fields: -func (_m *Cluster) Members() flow.IdentityList { +// Members provides a mock function with no fields +func (_m *Cluster) Members() flow.IdentitySkeletonList { ret := _m.Called() - var r0 flow.IdentityList - if rf, ok := ret.Get(0).(func() flow.IdentityList); ok { + if len(ret) == 0 { + panic("no return value specified for Members") + } + + var r0 flow.IdentitySkeletonList + if rf, ok := ret.Get(0).(func() flow.IdentitySkeletonList); ok { r0 = rf() } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(flow.IdentityList) + r0 = ret.Get(0).(flow.IdentitySkeletonList) } } return r0 } -// RootBlock provides a mock function with given fields: +// RootBlock provides a mock function with no fields func (_m *Cluster) RootBlock() *cluster.Block { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for RootBlock") + } + var r0 *cluster.Block if rf, ok := ret.Get(0).(func() *cluster.Block); ok { r0 = rf() @@ -88,10 +108,14 @@ func (_m *Cluster) RootBlock() *cluster.Block { return r0 } -// RootQC provides a mock function with given fields: +// RootQC provides a mock function with no fields func (_m *Cluster) RootQC() *flow.QuorumCertificate { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for RootQC") + } + var r0 *flow.QuorumCertificate if rf, ok := ret.Get(0).(func() *flow.QuorumCertificate); ok { r0 = rf() @@ -104,13 +128,12 @@ func (_m *Cluster) RootQC() *flow.QuorumCertificate { return r0 } -type mockConstructorTestingTNewCluster interface { +// NewCluster creates a new instance of Cluster. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewCluster(t interface { mock.TestingT Cleanup(func()) -} - -// NewCluster creates a new instance of Cluster. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewCluster(t mockConstructorTestingTNewCluster) *Cluster { +}) *Cluster { mock := &Cluster{} mock.Mock.Test(t) diff --git a/state/protocol/mock/committed_epoch.go b/state/protocol/mock/committed_epoch.go new file mode 100644 index 00000000000..271bbaf94ee --- /dev/null +++ b/state/protocol/mock/committed_epoch.go @@ -0,0 +1,389 @@ +// Code generated by mockery. DO NOT EDIT. + +package mock + +import ( + flow "github.com/onflow/flow-go/model/flow" + mock "github.com/stretchr/testify/mock" + + protocol "github.com/onflow/flow-go/state/protocol" +) + +// CommittedEpoch is an autogenerated mock type for the CommittedEpoch type +type CommittedEpoch struct { + mock.Mock +} + +// Cluster provides a mock function with given fields: index +func (_m *CommittedEpoch) Cluster(index uint) (protocol.Cluster, error) { + ret := _m.Called(index) + + if len(ret) == 0 { + panic("no return value specified for Cluster") + } + + var r0 protocol.Cluster + var r1 error + if rf, ok := ret.Get(0).(func(uint) (protocol.Cluster, error)); ok { + return rf(index) + } + if rf, ok := ret.Get(0).(func(uint) protocol.Cluster); ok { + r0 = rf(index) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(protocol.Cluster) + } + } + + if rf, ok := ret.Get(1).(func(uint) error); ok { + r1 = rf(index) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ClusterByChainID provides a mock function with given fields: chainID +func (_m *CommittedEpoch) ClusterByChainID(chainID flow.ChainID) (protocol.Cluster, error) { + ret := _m.Called(chainID) + + if len(ret) == 0 { + panic("no return value specified for ClusterByChainID") + } + + var r0 protocol.Cluster + var r1 error + if rf, ok := ret.Get(0).(func(flow.ChainID) (protocol.Cluster, error)); ok { + return rf(chainID) + } + if rf, ok := ret.Get(0).(func(flow.ChainID) protocol.Cluster); ok { + r0 = rf(chainID) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(protocol.Cluster) + } + } + + if rf, ok := ret.Get(1).(func(flow.ChainID) error); ok { + r1 = rf(chainID) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Clustering provides a mock function with no fields +func (_m *CommittedEpoch) Clustering() (flow.ClusterList, error) { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Clustering") + } + + var r0 flow.ClusterList + var r1 error + if rf, ok := ret.Get(0).(func() (flow.ClusterList, error)); ok { + return rf() + } + if rf, ok := ret.Get(0).(func() flow.ClusterList); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(flow.ClusterList) + } + } + + if rf, ok := ret.Get(1).(func() error); ok { + r1 = rf() + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Counter provides a mock function with no fields +func (_m *CommittedEpoch) Counter() uint64 { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Counter") + } + + var r0 uint64 + if rf, ok := ret.Get(0).(func() uint64); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(uint64) + } + + return r0 +} + +// DKG provides a mock function with no fields +func (_m *CommittedEpoch) DKG() (protocol.DKG, error) { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for DKG") + } + + var r0 protocol.DKG + var r1 error + if rf, ok := ret.Get(0).(func() (protocol.DKG, error)); ok { + return rf() + } + if rf, ok := ret.Get(0).(func() protocol.DKG); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(protocol.DKG) + } + } + + if rf, ok := ret.Get(1).(func() error); ok { + r1 = rf() + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// DKGPhase1FinalView provides a mock function with no fields +func (_m *CommittedEpoch) DKGPhase1FinalView() uint64 { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for DKGPhase1FinalView") + } + + var r0 uint64 + if rf, ok := ret.Get(0).(func() uint64); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(uint64) + } + + return r0 +} + +// DKGPhase2FinalView provides a mock function with no fields +func (_m *CommittedEpoch) DKGPhase2FinalView() uint64 { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for DKGPhase2FinalView") + } + + var r0 uint64 + if rf, ok := ret.Get(0).(func() uint64); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(uint64) + } + + return r0 +} + +// DKGPhase3FinalView provides a mock function with no fields +func (_m *CommittedEpoch) DKGPhase3FinalView() uint64 { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for DKGPhase3FinalView") + } + + var r0 uint64 + if rf, ok := ret.Get(0).(func() uint64); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(uint64) + } + + return r0 +} + +// FinalHeight provides a mock function with no fields +func (_m *CommittedEpoch) FinalHeight() (uint64, error) { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for FinalHeight") + } + + var r0 uint64 + var r1 error + if rf, ok := ret.Get(0).(func() (uint64, error)); ok { + return rf() + } + if rf, ok := ret.Get(0).(func() uint64); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(uint64) + } + + if rf, ok := ret.Get(1).(func() error); ok { + r1 = rf() + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// FinalView provides a mock function with no fields +func (_m *CommittedEpoch) FinalView() uint64 { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for FinalView") + } + + var r0 uint64 + if rf, ok := ret.Get(0).(func() uint64); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(uint64) + } + + return r0 +} + +// FirstHeight provides a mock function with no fields +func (_m *CommittedEpoch) FirstHeight() (uint64, error) { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for FirstHeight") + } + + var r0 uint64 + var r1 error + if rf, ok := ret.Get(0).(func() (uint64, error)); ok { + return rf() + } + if rf, ok := ret.Get(0).(func() uint64); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(uint64) + } + + if rf, ok := ret.Get(1).(func() error); ok { + r1 = rf() + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// FirstView provides a mock function with no fields +func (_m *CommittedEpoch) FirstView() uint64 { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for FirstView") + } + + var r0 uint64 + if rf, ok := ret.Get(0).(func() uint64); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(uint64) + } + + return r0 +} + +// InitialIdentities provides a mock function with no fields +func (_m *CommittedEpoch) InitialIdentities() flow.IdentitySkeletonList { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for InitialIdentities") + } + + var r0 flow.IdentitySkeletonList + if rf, ok := ret.Get(0).(func() flow.IdentitySkeletonList); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(flow.IdentitySkeletonList) + } + } + + return r0 +} + +// RandomSource provides a mock function with no fields +func (_m *CommittedEpoch) RandomSource() []byte { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for RandomSource") + } + + var r0 []byte + if rf, ok := ret.Get(0).(func() []byte); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]byte) + } + } + + return r0 +} + +// TargetDuration provides a mock function with no fields +func (_m *CommittedEpoch) TargetDuration() uint64 { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for TargetDuration") + } + + var r0 uint64 + if rf, ok := ret.Get(0).(func() uint64); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(uint64) + } + + return r0 +} + +// TargetEndTime provides a mock function with no fields +func (_m *CommittedEpoch) TargetEndTime() uint64 { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for TargetEndTime") + } + + var r0 uint64 + if rf, ok := ret.Get(0).(func() uint64); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(uint64) + } + + return r0 +} + +// NewCommittedEpoch creates a new instance of CommittedEpoch. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewCommittedEpoch(t interface { + mock.TestingT + Cleanup(func()) +}) *CommittedEpoch { + mock := &CommittedEpoch{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/state/protocol/mock/consumer.go b/state/protocol/mock/consumer.go index a7ddcc6f3ed..003bacda598 100644 --- a/state/protocol/mock/consumer.go +++ b/state/protocol/mock/consumer.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mock @@ -27,9 +27,19 @@ func (_m *Consumer) EpochCommittedPhaseStarted(currentEpochCounter uint64, first _m.Called(currentEpochCounter, first) } -// EpochEmergencyFallbackTriggered provides a mock function with given fields: -func (_m *Consumer) EpochEmergencyFallbackTriggered() { - _m.Called() +// EpochExtended provides a mock function with given fields: epochCounter, header, extension +func (_m *Consumer) EpochExtended(epochCounter uint64, header *flow.Header, extension flow.EpochExtension) { + _m.Called(epochCounter, header, extension) +} + +// EpochFallbackModeExited provides a mock function with given fields: epochCounter, header +func (_m *Consumer) EpochFallbackModeExited(epochCounter uint64, header *flow.Header) { + _m.Called(epochCounter, header) +} + +// EpochFallbackModeTriggered provides a mock function with given fields: epochCounter, header +func (_m *Consumer) EpochFallbackModeTriggered(epochCounter uint64, header *flow.Header) { + _m.Called(epochCounter, header) } // EpochSetupPhaseStarted provides a mock function with given fields: currentEpochCounter, first @@ -42,13 +52,12 @@ func (_m *Consumer) EpochTransition(newEpochCounter uint64, first *flow.Header) _m.Called(newEpochCounter, first) } -type mockConstructorTestingTNewConsumer interface { +// NewConsumer creates a new instance of Consumer. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewConsumer(t interface { mock.TestingT Cleanup(func()) -} - -// NewConsumer creates a new instance of Consumer. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewConsumer(t mockConstructorTestingTNewConsumer) *Consumer { +}) *Consumer { mock := &Consumer{} mock.Mock.Test(t) diff --git a/state/protocol/mock/dkg.go b/state/protocol/mock/dkg.go index 207719bd1ad..4de7eac9ce1 100644 --- a/state/protocol/mock/dkg.go +++ b/state/protocol/mock/dkg.go @@ -1,9 +1,9 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mock import ( - crypto "github.com/onflow/flow-go/crypto" + crypto "github.com/onflow/crypto" flow "github.com/onflow/flow-go/model/flow" mock "github.com/stretchr/testify/mock" @@ -14,10 +14,14 @@ type DKG struct { mock.Mock } -// GroupKey provides a mock function with given fields: +// GroupKey provides a mock function with no fields func (_m *DKG) GroupKey() crypto.PublicKey { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for GroupKey") + } + var r0 crypto.PublicKey if rf, ok := ret.Get(0).(func() crypto.PublicKey); ok { r0 = rf() @@ -34,6 +38,10 @@ func (_m *DKG) GroupKey() crypto.PublicKey { func (_m *DKG) Index(nodeID flow.Identifier) (uint, error) { ret := _m.Called(nodeID) + if len(ret) == 0 { + panic("no return value specified for Index") + } + var r0 uint var r1 error if rf, ok := ret.Get(0).(func(flow.Identifier) (uint, error)); ok { @@ -58,6 +66,10 @@ func (_m *DKG) Index(nodeID flow.Identifier) (uint, error) { func (_m *DKG) KeyShare(nodeID flow.Identifier) (crypto.PublicKey, error) { ret := _m.Called(nodeID) + if len(ret) == 0 { + panic("no return value specified for KeyShare") + } + var r0 crypto.PublicKey var r1 error if rf, ok := ret.Get(0).(func(flow.Identifier) (crypto.PublicKey, error)); ok { @@ -80,10 +92,64 @@ func (_m *DKG) KeyShare(nodeID flow.Identifier) (crypto.PublicKey, error) { return r0, r1 } -// Size provides a mock function with given fields: +// KeyShares provides a mock function with no fields +func (_m *DKG) KeyShares() []crypto.PublicKey { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for KeyShares") + } + + var r0 []crypto.PublicKey + if rf, ok := ret.Get(0).(func() []crypto.PublicKey); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]crypto.PublicKey) + } + } + + return r0 +} + +// NodeID provides a mock function with given fields: index +func (_m *DKG) NodeID(index uint) (flow.Identifier, error) { + ret := _m.Called(index) + + if len(ret) == 0 { + panic("no return value specified for NodeID") + } + + var r0 flow.Identifier + var r1 error + if rf, ok := ret.Get(0).(func(uint) (flow.Identifier, error)); ok { + return rf(index) + } + if rf, ok := ret.Get(0).(func(uint) flow.Identifier); ok { + r0 = rf(index) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(flow.Identifier) + } + } + + if rf, ok := ret.Get(1).(func(uint) error); ok { + r1 = rf(index) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Size provides a mock function with no fields func (_m *DKG) Size() uint { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Size") + } + var r0 uint if rf, ok := ret.Get(0).(func() uint); ok { r0 = rf() @@ -94,13 +160,12 @@ func (_m *DKG) Size() uint { return r0 } -type mockConstructorTestingTNewDKG interface { +// NewDKG creates a new instance of DKG. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewDKG(t interface { mock.TestingT Cleanup(func()) -} - -// NewDKG creates a new instance of DKG. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewDKG(t mockConstructorTestingTNewDKG) *DKG { +}) *DKG { mock := &DKG{} mock.Mock.Test(t) diff --git a/state/protocol/mock/epoch.go b/state/protocol/mock/epoch.go deleted file mode 100644 index d1bfabce547..00000000000 --- a/state/protocol/mock/epoch.go +++ /dev/null @@ -1,378 +0,0 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. - -package mock - -import ( - flow "github.com/onflow/flow-go/model/flow" - mock "github.com/stretchr/testify/mock" - - protocol "github.com/onflow/flow-go/state/protocol" -) - -// Epoch is an autogenerated mock type for the Epoch type -type Epoch struct { - mock.Mock -} - -// Cluster provides a mock function with given fields: index -func (_m *Epoch) Cluster(index uint) (protocol.Cluster, error) { - ret := _m.Called(index) - - var r0 protocol.Cluster - var r1 error - if rf, ok := ret.Get(0).(func(uint) (protocol.Cluster, error)); ok { - return rf(index) - } - if rf, ok := ret.Get(0).(func(uint) protocol.Cluster); ok { - r0 = rf(index) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(protocol.Cluster) - } - } - - if rf, ok := ret.Get(1).(func(uint) error); ok { - r1 = rf(index) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// ClusterByChainID provides a mock function with given fields: chainID -func (_m *Epoch) ClusterByChainID(chainID flow.ChainID) (protocol.Cluster, error) { - ret := _m.Called(chainID) - - var r0 protocol.Cluster - var r1 error - if rf, ok := ret.Get(0).(func(flow.ChainID) (protocol.Cluster, error)); ok { - return rf(chainID) - } - if rf, ok := ret.Get(0).(func(flow.ChainID) protocol.Cluster); ok { - r0 = rf(chainID) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(protocol.Cluster) - } - } - - if rf, ok := ret.Get(1).(func(flow.ChainID) error); ok { - r1 = rf(chainID) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// Clustering provides a mock function with given fields: -func (_m *Epoch) Clustering() (flow.ClusterList, error) { - ret := _m.Called() - - var r0 flow.ClusterList - var r1 error - if rf, ok := ret.Get(0).(func() (flow.ClusterList, error)); ok { - return rf() - } - if rf, ok := ret.Get(0).(func() flow.ClusterList); ok { - r0 = rf() - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(flow.ClusterList) - } - } - - if rf, ok := ret.Get(1).(func() error); ok { - r1 = rf() - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// Counter provides a mock function with given fields: -func (_m *Epoch) Counter() (uint64, error) { - ret := _m.Called() - - var r0 uint64 - var r1 error - if rf, ok := ret.Get(0).(func() (uint64, error)); ok { - return rf() - } - if rf, ok := ret.Get(0).(func() uint64); ok { - r0 = rf() - } else { - r0 = ret.Get(0).(uint64) - } - - if rf, ok := ret.Get(1).(func() error); ok { - r1 = rf() - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// DKG provides a mock function with given fields: -func (_m *Epoch) DKG() (protocol.DKG, error) { - ret := _m.Called() - - var r0 protocol.DKG - var r1 error - if rf, ok := ret.Get(0).(func() (protocol.DKG, error)); ok { - return rf() - } - if rf, ok := ret.Get(0).(func() protocol.DKG); ok { - r0 = rf() - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(protocol.DKG) - } - } - - if rf, ok := ret.Get(1).(func() error); ok { - r1 = rf() - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// DKGPhase1FinalView provides a mock function with given fields: -func (_m *Epoch) DKGPhase1FinalView() (uint64, error) { - ret := _m.Called() - - var r0 uint64 - var r1 error - if rf, ok := ret.Get(0).(func() (uint64, error)); ok { - return rf() - } - if rf, ok := ret.Get(0).(func() uint64); ok { - r0 = rf() - } else { - r0 = ret.Get(0).(uint64) - } - - if rf, ok := ret.Get(1).(func() error); ok { - r1 = rf() - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// DKGPhase2FinalView provides a mock function with given fields: -func (_m *Epoch) DKGPhase2FinalView() (uint64, error) { - ret := _m.Called() - - var r0 uint64 - var r1 error - if rf, ok := ret.Get(0).(func() (uint64, error)); ok { - return rf() - } - if rf, ok := ret.Get(0).(func() uint64); ok { - r0 = rf() - } else { - r0 = ret.Get(0).(uint64) - } - - if rf, ok := ret.Get(1).(func() error); ok { - r1 = rf() - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// DKGPhase3FinalView provides a mock function with given fields: -func (_m *Epoch) DKGPhase3FinalView() (uint64, error) { - ret := _m.Called() - - var r0 uint64 - var r1 error - if rf, ok := ret.Get(0).(func() (uint64, error)); ok { - return rf() - } - if rf, ok := ret.Get(0).(func() uint64); ok { - r0 = rf() - } else { - r0 = ret.Get(0).(uint64) - } - - if rf, ok := ret.Get(1).(func() error); ok { - r1 = rf() - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// FinalHeight provides a mock function with given fields: -func (_m *Epoch) FinalHeight() (uint64, error) { - ret := _m.Called() - - var r0 uint64 - var r1 error - if rf, ok := ret.Get(0).(func() (uint64, error)); ok { - return rf() - } - if rf, ok := ret.Get(0).(func() uint64); ok { - r0 = rf() - } else { - r0 = ret.Get(0).(uint64) - } - - if rf, ok := ret.Get(1).(func() error); ok { - r1 = rf() - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// FinalView provides a mock function with given fields: -func (_m *Epoch) FinalView() (uint64, error) { - ret := _m.Called() - - var r0 uint64 - var r1 error - if rf, ok := ret.Get(0).(func() (uint64, error)); ok { - return rf() - } - if rf, ok := ret.Get(0).(func() uint64); ok { - r0 = rf() - } else { - r0 = ret.Get(0).(uint64) - } - - if rf, ok := ret.Get(1).(func() error); ok { - r1 = rf() - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// FirstHeight provides a mock function with given fields: -func (_m *Epoch) FirstHeight() (uint64, error) { - ret := _m.Called() - - var r0 uint64 - var r1 error - if rf, ok := ret.Get(0).(func() (uint64, error)); ok { - return rf() - } - if rf, ok := ret.Get(0).(func() uint64); ok { - r0 = rf() - } else { - r0 = ret.Get(0).(uint64) - } - - if rf, ok := ret.Get(1).(func() error); ok { - r1 = rf() - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// FirstView provides a mock function with given fields: -func (_m *Epoch) FirstView() (uint64, error) { - ret := _m.Called() - - var r0 uint64 - var r1 error - if rf, ok := ret.Get(0).(func() (uint64, error)); ok { - return rf() - } - if rf, ok := ret.Get(0).(func() uint64); ok { - r0 = rf() - } else { - r0 = ret.Get(0).(uint64) - } - - if rf, ok := ret.Get(1).(func() error); ok { - r1 = rf() - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// InitialIdentities provides a mock function with given fields: -func (_m *Epoch) InitialIdentities() (flow.IdentityList, error) { - ret := _m.Called() - - var r0 flow.IdentityList - var r1 error - if rf, ok := ret.Get(0).(func() (flow.IdentityList, error)); ok { - return rf() - } - if rf, ok := ret.Get(0).(func() flow.IdentityList); ok { - r0 = rf() - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(flow.IdentityList) - } - } - - if rf, ok := ret.Get(1).(func() error); ok { - r1 = rf() - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// RandomSource provides a mock function with given fields: -func (_m *Epoch) RandomSource() ([]byte, error) { - ret := _m.Called() - - var r0 []byte - var r1 error - if rf, ok := ret.Get(0).(func() ([]byte, error)); ok { - return rf() - } - if rf, ok := ret.Get(0).(func() []byte); ok { - r0 = rf() - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).([]byte) - } - } - - if rf, ok := ret.Get(1).(func() error); ok { - r1 = rf() - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -type mockConstructorTestingTNewEpoch interface { - mock.TestingT - Cleanup(func()) -} - -// NewEpoch creates a new instance of Epoch. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewEpoch(t mockConstructorTestingTNewEpoch) *Epoch { - mock := &Epoch{} - mock.Mock.Test(t) - - t.Cleanup(func() { mock.AssertExpectations(t) }) - - return mock -} diff --git a/state/protocol/mock/epoch_protocol_state.go b/state/protocol/mock/epoch_protocol_state.go new file mode 100644 index 00000000000..2c045716118 --- /dev/null +++ b/state/protocol/mock/epoch_protocol_state.go @@ -0,0 +1,281 @@ +// Code generated by mockery. DO NOT EDIT. + +package mock + +import ( + flow "github.com/onflow/flow-go/model/flow" + mock "github.com/stretchr/testify/mock" + + protocol "github.com/onflow/flow-go/state/protocol" +) + +// EpochProtocolState is an autogenerated mock type for the EpochProtocolState type +type EpochProtocolState struct { + mock.Mock +} + +// Clustering provides a mock function with no fields +func (_m *EpochProtocolState) Clustering() (flow.ClusterList, error) { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Clustering") + } + + var r0 flow.ClusterList + var r1 error + if rf, ok := ret.Get(0).(func() (flow.ClusterList, error)); ok { + return rf() + } + if rf, ok := ret.Get(0).(func() flow.ClusterList); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(flow.ClusterList) + } + } + + if rf, ok := ret.Get(1).(func() error); ok { + r1 = rf() + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// DKG provides a mock function with no fields +func (_m *EpochProtocolState) DKG() (protocol.DKG, error) { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for DKG") + } + + var r0 protocol.DKG + var r1 error + if rf, ok := ret.Get(0).(func() (protocol.DKG, error)); ok { + return rf() + } + if rf, ok := ret.Get(0).(func() protocol.DKG); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(protocol.DKG) + } + } + + if rf, ok := ret.Get(1).(func() error); ok { + r1 = rf() + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Entry provides a mock function with no fields +func (_m *EpochProtocolState) Entry() *flow.RichEpochStateEntry { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Entry") + } + + var r0 *flow.RichEpochStateEntry + if rf, ok := ret.Get(0).(func() *flow.RichEpochStateEntry); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*flow.RichEpochStateEntry) + } + } + + return r0 +} + +// Epoch provides a mock function with no fields +func (_m *EpochProtocolState) Epoch() uint64 { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Epoch") + } + + var r0 uint64 + if rf, ok := ret.Get(0).(func() uint64); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(uint64) + } + + return r0 +} + +// EpochCommit provides a mock function with no fields +func (_m *EpochProtocolState) EpochCommit() *flow.EpochCommit { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for EpochCommit") + } + + var r0 *flow.EpochCommit + if rf, ok := ret.Get(0).(func() *flow.EpochCommit); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*flow.EpochCommit) + } + } + + return r0 +} + +// EpochExtensions provides a mock function with no fields +func (_m *EpochProtocolState) EpochExtensions() []flow.EpochExtension { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for EpochExtensions") + } + + var r0 []flow.EpochExtension + if rf, ok := ret.Get(0).(func() []flow.EpochExtension); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]flow.EpochExtension) + } + } + + return r0 +} + +// EpochFallbackTriggered provides a mock function with no fields +func (_m *EpochProtocolState) EpochFallbackTriggered() bool { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for EpochFallbackTriggered") + } + + var r0 bool + if rf, ok := ret.Get(0).(func() bool); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(bool) + } + + return r0 +} + +// EpochPhase provides a mock function with no fields +func (_m *EpochProtocolState) EpochPhase() flow.EpochPhase { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for EpochPhase") + } + + var r0 flow.EpochPhase + if rf, ok := ret.Get(0).(func() flow.EpochPhase); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(flow.EpochPhase) + } + + return r0 +} + +// EpochSetup provides a mock function with no fields +func (_m *EpochProtocolState) EpochSetup() *flow.EpochSetup { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for EpochSetup") + } + + var r0 *flow.EpochSetup + if rf, ok := ret.Get(0).(func() *flow.EpochSetup); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*flow.EpochSetup) + } + } + + return r0 +} + +// GlobalParams provides a mock function with no fields +func (_m *EpochProtocolState) GlobalParams() protocol.GlobalParams { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for GlobalParams") + } + + var r0 protocol.GlobalParams + if rf, ok := ret.Get(0).(func() protocol.GlobalParams); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(protocol.GlobalParams) + } + } + + return r0 +} + +// Identities provides a mock function with no fields +func (_m *EpochProtocolState) Identities() flow.IdentityList { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Identities") + } + + var r0 flow.IdentityList + if rf, ok := ret.Get(0).(func() flow.IdentityList); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(flow.IdentityList) + } + } + + return r0 +} + +// PreviousEpochExists provides a mock function with no fields +func (_m *EpochProtocolState) PreviousEpochExists() bool { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for PreviousEpochExists") + } + + var r0 bool + if rf, ok := ret.Get(0).(func() bool); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(bool) + } + + return r0 +} + +// NewEpochProtocolState creates a new instance of EpochProtocolState. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewEpochProtocolState(t interface { + mock.TestingT + Cleanup(func()) +}) *EpochProtocolState { + mock := &EpochProtocolState{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/state/protocol/mock/epoch_query.go b/state/protocol/mock/epoch_query.go index cb91773a108..e59fdc415aa 100644 --- a/state/protocol/mock/epoch_query.go +++ b/state/protocol/mock/epoch_query.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mock @@ -12,61 +12,132 @@ type EpochQuery struct { mock.Mock } -// Current provides a mock function with given fields: -func (_m *EpochQuery) Current() protocol.Epoch { +// Current provides a mock function with no fields +func (_m *EpochQuery) Current() (protocol.CommittedEpoch, error) { ret := _m.Called() - var r0 protocol.Epoch - if rf, ok := ret.Get(0).(func() protocol.Epoch); ok { + if len(ret) == 0 { + panic("no return value specified for Current") + } + + var r0 protocol.CommittedEpoch + var r1 error + if rf, ok := ret.Get(0).(func() (protocol.CommittedEpoch, error)); ok { + return rf() + } + if rf, ok := ret.Get(0).(func() protocol.CommittedEpoch); ok { r0 = rf() } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(protocol.Epoch) + r0 = ret.Get(0).(protocol.CommittedEpoch) } } - return r0 + if rf, ok := ret.Get(1).(func() error); ok { + r1 = rf() + } else { + r1 = ret.Error(1) + } + + return r0, r1 } -// Next provides a mock function with given fields: -func (_m *EpochQuery) Next() protocol.Epoch { +// NextCommitted provides a mock function with no fields +func (_m *EpochQuery) NextCommitted() (protocol.CommittedEpoch, error) { ret := _m.Called() - var r0 protocol.Epoch - if rf, ok := ret.Get(0).(func() protocol.Epoch); ok { + if len(ret) == 0 { + panic("no return value specified for NextCommitted") + } + + var r0 protocol.CommittedEpoch + var r1 error + if rf, ok := ret.Get(0).(func() (protocol.CommittedEpoch, error)); ok { + return rf() + } + if rf, ok := ret.Get(0).(func() protocol.CommittedEpoch); ok { r0 = rf() } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(protocol.Epoch) + r0 = ret.Get(0).(protocol.CommittedEpoch) } } - return r0 + if rf, ok := ret.Get(1).(func() error); ok { + r1 = rf() + } else { + r1 = ret.Error(1) + } + + return r0, r1 } -// Previous provides a mock function with given fields: -func (_m *EpochQuery) Previous() protocol.Epoch { +// NextUnsafe provides a mock function with no fields +func (_m *EpochQuery) NextUnsafe() (protocol.TentativeEpoch, error) { ret := _m.Called() - var r0 protocol.Epoch - if rf, ok := ret.Get(0).(func() protocol.Epoch); ok { + if len(ret) == 0 { + panic("no return value specified for NextUnsafe") + } + + var r0 protocol.TentativeEpoch + var r1 error + if rf, ok := ret.Get(0).(func() (protocol.TentativeEpoch, error)); ok { + return rf() + } + if rf, ok := ret.Get(0).(func() protocol.TentativeEpoch); ok { r0 = rf() } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(protocol.Epoch) + r0 = ret.Get(0).(protocol.TentativeEpoch) } } - return r0 + if rf, ok := ret.Get(1).(func() error); ok { + r1 = rf() + } else { + r1 = ret.Error(1) + } + + return r0, r1 } -type mockConstructorTestingTNewEpochQuery interface { - mock.TestingT - Cleanup(func()) +// Previous provides a mock function with no fields +func (_m *EpochQuery) Previous() (protocol.CommittedEpoch, error) { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Previous") + } + + var r0 protocol.CommittedEpoch + var r1 error + if rf, ok := ret.Get(0).(func() (protocol.CommittedEpoch, error)); ok { + return rf() + } + if rf, ok := ret.Get(0).(func() protocol.CommittedEpoch); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(protocol.CommittedEpoch) + } + } + + if rf, ok := ret.Get(1).(func() error); ok { + r1 = rf() + } else { + r1 = ret.Error(1) + } + + return r0, r1 } // NewEpochQuery creates a new instance of EpochQuery. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewEpochQuery(t mockConstructorTestingTNewEpochQuery) *EpochQuery { +// The first argument is typically a *testing.T value. +func NewEpochQuery(t interface { + mock.TestingT + Cleanup(func()) +}) *EpochQuery { mock := &EpochQuery{} mock.Mock.Test(t) diff --git a/state/protocol/mock/follower_state.go b/state/protocol/mock/follower_state.go index eaedf9029c0..4aa2c6b868a 100644 --- a/state/protocol/mock/follower_state.go +++ b/state/protocol/mock/follower_state.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mock @@ -20,6 +20,10 @@ type FollowerState struct { func (_m *FollowerState) AtBlockID(blockID flow.Identifier) protocol.Snapshot { ret := _m.Called(blockID) + if len(ret) == 0 { + panic("no return value specified for AtBlockID") + } + var r0 protocol.Snapshot if rf, ok := ret.Get(0).(func(flow.Identifier) protocol.Snapshot); ok { r0 = rf(blockID) @@ -36,6 +40,10 @@ func (_m *FollowerState) AtBlockID(blockID flow.Identifier) protocol.Snapshot { func (_m *FollowerState) AtHeight(height uint64) protocol.Snapshot { ret := _m.Called(height) + if len(ret) == 0 { + panic("no return value specified for AtHeight") + } + var r0 protocol.Snapshot if rf, ok := ret.Get(0).(func(uint64) protocol.Snapshot); ok { r0 = rf(height) @@ -48,13 +56,17 @@ func (_m *FollowerState) AtHeight(height uint64) protocol.Snapshot { return r0 } -// ExtendCertified provides a mock function with given fields: ctx, candidate, qc -func (_m *FollowerState) ExtendCertified(ctx context.Context, candidate *flow.Block, qc *flow.QuorumCertificate) error { - ret := _m.Called(ctx, candidate, qc) +// ExtendCertified provides a mock function with given fields: ctx, certified +func (_m *FollowerState) ExtendCertified(ctx context.Context, certified *flow.CertifiedBlock) error { + ret := _m.Called(ctx, certified) + + if len(ret) == 0 { + panic("no return value specified for ExtendCertified") + } var r0 error - if rf, ok := ret.Get(0).(func(context.Context, *flow.Block, *flow.QuorumCertificate) error); ok { - r0 = rf(ctx, candidate, qc) + if rf, ok := ret.Get(0).(func(context.Context, *flow.CertifiedBlock) error); ok { + r0 = rf(ctx, certified) } else { r0 = ret.Error(0) } @@ -62,10 +74,14 @@ func (_m *FollowerState) ExtendCertified(ctx context.Context, candidate *flow.Bl return r0 } -// Final provides a mock function with given fields: +// Final provides a mock function with no fields func (_m *FollowerState) Final() protocol.Snapshot { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Final") + } + var r0 protocol.Snapshot if rf, ok := ret.Get(0).(func() protocol.Snapshot); ok { r0 = rf() @@ -82,6 +98,10 @@ func (_m *FollowerState) Final() protocol.Snapshot { func (_m *FollowerState) Finalize(ctx context.Context, blockID flow.Identifier) error { ret := _m.Called(ctx, blockID) + if len(ret) == 0 { + panic("no return value specified for Finalize") + } + var r0 error if rf, ok := ret.Get(0).(func(context.Context, flow.Identifier) error); ok { r0 = rf(ctx, blockID) @@ -92,10 +112,14 @@ func (_m *FollowerState) Finalize(ctx context.Context, blockID flow.Identifier) return r0 } -// Params provides a mock function with given fields: +// Params provides a mock function with no fields func (_m *FollowerState) Params() protocol.Params { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Params") + } + var r0 protocol.Params if rf, ok := ret.Get(0).(func() protocol.Params); ok { r0 = rf() @@ -108,10 +132,14 @@ func (_m *FollowerState) Params() protocol.Params { return r0 } -// Sealed provides a mock function with given fields: +// Sealed provides a mock function with no fields func (_m *FollowerState) Sealed() protocol.Snapshot { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Sealed") + } + var r0 protocol.Snapshot if rf, ok := ret.Get(0).(func() protocol.Snapshot); ok { r0 = rf() @@ -124,13 +152,12 @@ func (_m *FollowerState) Sealed() protocol.Snapshot { return r0 } -type mockConstructorTestingTNewFollowerState interface { +// NewFollowerState creates a new instance of FollowerState. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewFollowerState(t interface { mock.TestingT Cleanup(func()) -} - -// NewFollowerState creates a new instance of FollowerState. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewFollowerState(t mockConstructorTestingTNewFollowerState) *FollowerState { +}) *FollowerState { mock := &FollowerState{} mock.Mock.Test(t) diff --git a/state/protocol/mock/global_params.go b/state/protocol/mock/global_params.go index 64829403fc3..216946d5111 100644 --- a/state/protocol/mock/global_params.go +++ b/state/protocol/mock/global_params.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mock @@ -12,135 +12,86 @@ type GlobalParams struct { mock.Mock } -// ChainID provides a mock function with given fields: -func (_m *GlobalParams) ChainID() (flow.ChainID, error) { +// ChainID provides a mock function with no fields +func (_m *GlobalParams) ChainID() flow.ChainID { ret := _m.Called() - var r0 flow.ChainID - var r1 error - if rf, ok := ret.Get(0).(func() (flow.ChainID, error)); ok { - return rf() + if len(ret) == 0 { + panic("no return value specified for ChainID") } + + var r0 flow.ChainID if rf, ok := ret.Get(0).(func() flow.ChainID); ok { r0 = rf() } else { r0 = ret.Get(0).(flow.ChainID) } - if rf, ok := ret.Get(1).(func() error); ok { - r1 = rf() - } else { - r1 = ret.Error(1) - } - - return r0, r1 + return r0 } -// EpochCommitSafetyThreshold provides a mock function with given fields: -func (_m *GlobalParams) EpochCommitSafetyThreshold() (uint64, error) { +// SporkID provides a mock function with no fields +func (_m *GlobalParams) SporkID() flow.Identifier { ret := _m.Called() - var r0 uint64 - var r1 error - if rf, ok := ret.Get(0).(func() (uint64, error)); ok { - return rf() - } - if rf, ok := ret.Get(0).(func() uint64); ok { - r0 = rf() - } else { - r0 = ret.Get(0).(uint64) + if len(ret) == 0 { + panic("no return value specified for SporkID") } - if rf, ok := ret.Get(1).(func() error); ok { - r1 = rf() + var r0 flow.Identifier + if rf, ok := ret.Get(0).(func() flow.Identifier); ok { + r0 = rf() } else { - r1 = ret.Error(1) + if ret.Get(0) != nil { + r0 = ret.Get(0).(flow.Identifier) + } } - return r0, r1 + return r0 } -// ProtocolVersion provides a mock function with given fields: -func (_m *GlobalParams) ProtocolVersion() (uint, error) { +// SporkRootBlockHeight provides a mock function with no fields +func (_m *GlobalParams) SporkRootBlockHeight() uint64 { ret := _m.Called() - var r0 uint - var r1 error - if rf, ok := ret.Get(0).(func() (uint, error)); ok { - return rf() - } - if rf, ok := ret.Get(0).(func() uint); ok { - r0 = rf() - } else { - r0 = ret.Get(0).(uint) + if len(ret) == 0 { + panic("no return value specified for SporkRootBlockHeight") } - if rf, ok := ret.Get(1).(func() error); ok { - r1 = rf() + var r0 uint64 + if rf, ok := ret.Get(0).(func() uint64); ok { + r0 = rf() } else { - r1 = ret.Error(1) + r0 = ret.Get(0).(uint64) } - return r0, r1 + return r0 } -// SporkID provides a mock function with given fields: -func (_m *GlobalParams) SporkID() (flow.Identifier, error) { +// SporkRootBlockView provides a mock function with no fields +func (_m *GlobalParams) SporkRootBlockView() uint64 { ret := _m.Called() - var r0 flow.Identifier - var r1 error - if rf, ok := ret.Get(0).(func() (flow.Identifier, error)); ok { - return rf() - } - if rf, ok := ret.Get(0).(func() flow.Identifier); ok { - r0 = rf() - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(flow.Identifier) - } - } - - if rf, ok := ret.Get(1).(func() error); ok { - r1 = rf() - } else { - r1 = ret.Error(1) + if len(ret) == 0 { + panic("no return value specified for SporkRootBlockView") } - return r0, r1 -} - -// SporkRootBlockHeight provides a mock function with given fields: -func (_m *GlobalParams) SporkRootBlockHeight() (uint64, error) { - ret := _m.Called() - var r0 uint64 - var r1 error - if rf, ok := ret.Get(0).(func() (uint64, error)); ok { - return rf() - } if rf, ok := ret.Get(0).(func() uint64); ok { r0 = rf() } else { r0 = ret.Get(0).(uint64) } - if rf, ok := ret.Get(1).(func() error); ok { - r1 = rf() - } else { - r1 = ret.Error(1) - } - - return r0, r1 + return r0 } -type mockConstructorTestingTNewGlobalParams interface { +// NewGlobalParams creates a new instance of GlobalParams. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewGlobalParams(t interface { mock.TestingT Cleanup(func()) -} - -// NewGlobalParams creates a new instance of GlobalParams. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewGlobalParams(t mockConstructorTestingTNewGlobalParams) *GlobalParams { +}) *GlobalParams { mock := &GlobalParams{} mock.Mock.Test(t) diff --git a/state/protocol/mock/instance_params.go b/state/protocol/mock/instance_params.go index fb428410d19..32fb69dbd5d 100644 --- a/state/protocol/mock/instance_params.go +++ b/state/protocol/mock/instance_params.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mock @@ -12,39 +12,55 @@ type InstanceParams struct { mock.Mock } -// EpochFallbackTriggered provides a mock function with given fields: -func (_m *InstanceParams) EpochFallbackTriggered() (bool, error) { +// FinalizedRoot provides a mock function with no fields +func (_m *InstanceParams) FinalizedRoot() *flow.Header { ret := _m.Called() - var r0 bool - var r1 error - if rf, ok := ret.Get(0).(func() (bool, error)); ok { - return rf() + if len(ret) == 0 { + panic("no return value specified for FinalizedRoot") } - if rf, ok := ret.Get(0).(func() bool); ok { + + var r0 *flow.Header + if rf, ok := ret.Get(0).(func() *flow.Header); ok { r0 = rf() } else { - r0 = ret.Get(0).(bool) + if ret.Get(0) != nil { + r0 = ret.Get(0).(*flow.Header) + } + } + + return r0 +} + +// Seal provides a mock function with no fields +func (_m *InstanceParams) Seal() *flow.Seal { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Seal") } - if rf, ok := ret.Get(1).(func() error); ok { - r1 = rf() + var r0 *flow.Seal + if rf, ok := ret.Get(0).(func() *flow.Seal); ok { + r0 = rf() } else { - r1 = ret.Error(1) + if ret.Get(0) != nil { + r0 = ret.Get(0).(*flow.Seal) + } } - return r0, r1 + return r0 } -// Root provides a mock function with given fields: -func (_m *InstanceParams) Root() (*flow.Header, error) { +// SealedRoot provides a mock function with no fields +func (_m *InstanceParams) SealedRoot() *flow.Header { ret := _m.Called() - var r0 *flow.Header - var r1 error - if rf, ok := ret.Get(0).(func() (*flow.Header, error)); ok { - return rf() + if len(ret) == 0 { + panic("no return value specified for SealedRoot") } + + var r0 *flow.Header if rf, ok := ret.Get(0).(func() *flow.Header); ok { r0 = rf() } else { @@ -53,48 +69,35 @@ func (_m *InstanceParams) Root() (*flow.Header, error) { } } - if rf, ok := ret.Get(1).(func() error); ok { - r1 = rf() - } else { - r1 = ret.Error(1) - } - - return r0, r1 + return r0 } -// Seal provides a mock function with given fields: -func (_m *InstanceParams) Seal() (*flow.Seal, error) { +// SporkRootBlock provides a mock function with no fields +func (_m *InstanceParams) SporkRootBlock() *flow.Block { ret := _m.Called() - var r0 *flow.Seal - var r1 error - if rf, ok := ret.Get(0).(func() (*flow.Seal, error)); ok { - return rf() + if len(ret) == 0 { + panic("no return value specified for SporkRootBlock") } - if rf, ok := ret.Get(0).(func() *flow.Seal); ok { + + var r0 *flow.Block + if rf, ok := ret.Get(0).(func() *flow.Block); ok { r0 = rf() } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(*flow.Seal) + r0 = ret.Get(0).(*flow.Block) } } - if rf, ok := ret.Get(1).(func() error); ok { - r1 = rf() - } else { - r1 = ret.Error(1) - } - - return r0, r1 + return r0 } -type mockConstructorTestingTNewInstanceParams interface { +// NewInstanceParams creates a new instance of InstanceParams. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewInstanceParams(t interface { mock.TestingT Cleanup(func()) -} - -// NewInstanceParams creates a new instance of InstanceParams. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewInstanceParams(t mockConstructorTestingTNewInstanceParams) *InstanceParams { +}) *InstanceParams { mock := &InstanceParams{} mock.Mock.Test(t) diff --git a/state/protocol/mock/kv_store_reader.go b/state/protocol/mock/kv_store_reader.go new file mode 100644 index 00000000000..906583507f1 --- /dev/null +++ b/state/protocol/mock/kv_store_reader.go @@ -0,0 +1,324 @@ +// Code generated by mockery. DO NOT EDIT. + +package mock + +import ( + flow "github.com/onflow/flow-go/model/flow" + mock "github.com/stretchr/testify/mock" + + protocol "github.com/onflow/flow-go/state/protocol" +) + +// KVStoreReader is an autogenerated mock type for the KVStoreReader type +type KVStoreReader struct { + mock.Mock +} + +// GetCadenceComponentVersion provides a mock function with no fields +func (_m *KVStoreReader) GetCadenceComponentVersion() (protocol.MagnitudeVersion, error) { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for GetCadenceComponentVersion") + } + + var r0 protocol.MagnitudeVersion + var r1 error + if rf, ok := ret.Get(0).(func() (protocol.MagnitudeVersion, error)); ok { + return rf() + } + if rf, ok := ret.Get(0).(func() protocol.MagnitudeVersion); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(protocol.MagnitudeVersion) + } + + if rf, ok := ret.Get(1).(func() error); ok { + r1 = rf() + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetCadenceComponentVersionUpgrade provides a mock function with no fields +func (_m *KVStoreReader) GetCadenceComponentVersionUpgrade() *protocol.ViewBasedActivator[protocol.MagnitudeVersion] { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for GetCadenceComponentVersionUpgrade") + } + + var r0 *protocol.ViewBasedActivator[protocol.MagnitudeVersion] + if rf, ok := ret.Get(0).(func() *protocol.ViewBasedActivator[protocol.MagnitudeVersion]); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*protocol.ViewBasedActivator[protocol.MagnitudeVersion]) + } + } + + return r0 +} + +// GetEpochExtensionViewCount provides a mock function with no fields +func (_m *KVStoreReader) GetEpochExtensionViewCount() uint64 { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for GetEpochExtensionViewCount") + } + + var r0 uint64 + if rf, ok := ret.Get(0).(func() uint64); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(uint64) + } + + return r0 +} + +// GetEpochStateID provides a mock function with no fields +func (_m *KVStoreReader) GetEpochStateID() flow.Identifier { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for GetEpochStateID") + } + + var r0 flow.Identifier + if rf, ok := ret.Get(0).(func() flow.Identifier); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(flow.Identifier) + } + } + + return r0 +} + +// GetExecutionComponentVersion provides a mock function with no fields +func (_m *KVStoreReader) GetExecutionComponentVersion() (protocol.MagnitudeVersion, error) { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for GetExecutionComponentVersion") + } + + var r0 protocol.MagnitudeVersion + var r1 error + if rf, ok := ret.Get(0).(func() (protocol.MagnitudeVersion, error)); ok { + return rf() + } + if rf, ok := ret.Get(0).(func() protocol.MagnitudeVersion); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(protocol.MagnitudeVersion) + } + + if rf, ok := ret.Get(1).(func() error); ok { + r1 = rf() + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetExecutionComponentVersionUpgrade provides a mock function with no fields +func (_m *KVStoreReader) GetExecutionComponentVersionUpgrade() *protocol.ViewBasedActivator[protocol.MagnitudeVersion] { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for GetExecutionComponentVersionUpgrade") + } + + var r0 *protocol.ViewBasedActivator[protocol.MagnitudeVersion] + if rf, ok := ret.Get(0).(func() *protocol.ViewBasedActivator[protocol.MagnitudeVersion]); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*protocol.ViewBasedActivator[protocol.MagnitudeVersion]) + } + } + + return r0 +} + +// GetExecutionMeteringParameters provides a mock function with no fields +func (_m *KVStoreReader) GetExecutionMeteringParameters() (protocol.ExecutionMeteringParameters, error) { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for GetExecutionMeteringParameters") + } + + var r0 protocol.ExecutionMeteringParameters + var r1 error + if rf, ok := ret.Get(0).(func() (protocol.ExecutionMeteringParameters, error)); ok { + return rf() + } + if rf, ok := ret.Get(0).(func() protocol.ExecutionMeteringParameters); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(protocol.ExecutionMeteringParameters) + } + + if rf, ok := ret.Get(1).(func() error); ok { + r1 = rf() + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetExecutionMeteringParametersUpgrade provides a mock function with no fields +func (_m *KVStoreReader) GetExecutionMeteringParametersUpgrade() *protocol.ViewBasedActivator[protocol.ExecutionMeteringParameters] { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for GetExecutionMeteringParametersUpgrade") + } + + var r0 *protocol.ViewBasedActivator[protocol.ExecutionMeteringParameters] + if rf, ok := ret.Get(0).(func() *protocol.ViewBasedActivator[protocol.ExecutionMeteringParameters]); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*protocol.ViewBasedActivator[protocol.ExecutionMeteringParameters]) + } + } + + return r0 +} + +// GetFinalizationSafetyThreshold provides a mock function with no fields +func (_m *KVStoreReader) GetFinalizationSafetyThreshold() uint64 { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for GetFinalizationSafetyThreshold") + } + + var r0 uint64 + if rf, ok := ret.Get(0).(func() uint64); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(uint64) + } + + return r0 +} + +// GetProtocolStateVersion provides a mock function with no fields +func (_m *KVStoreReader) GetProtocolStateVersion() uint64 { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for GetProtocolStateVersion") + } + + var r0 uint64 + if rf, ok := ret.Get(0).(func() uint64); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(uint64) + } + + return r0 +} + +// GetVersionUpgrade provides a mock function with no fields +func (_m *KVStoreReader) GetVersionUpgrade() *protocol.ViewBasedActivator[uint64] { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for GetVersionUpgrade") + } + + var r0 *protocol.ViewBasedActivator[uint64] + if rf, ok := ret.Get(0).(func() *protocol.ViewBasedActivator[uint64]); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*protocol.ViewBasedActivator[uint64]) + } + } + + return r0 +} + +// ID provides a mock function with no fields +func (_m *KVStoreReader) ID() flow.Identifier { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for ID") + } + + var r0 flow.Identifier + if rf, ok := ret.Get(0).(func() flow.Identifier); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(flow.Identifier) + } + } + + return r0 +} + +// VersionedEncode provides a mock function with no fields +func (_m *KVStoreReader) VersionedEncode() (uint64, []byte, error) { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for VersionedEncode") + } + + var r0 uint64 + var r1 []byte + var r2 error + if rf, ok := ret.Get(0).(func() (uint64, []byte, error)); ok { + return rf() + } + if rf, ok := ret.Get(0).(func() uint64); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(uint64) + } + + if rf, ok := ret.Get(1).(func() []byte); ok { + r1 = rf() + } else { + if ret.Get(1) != nil { + r1 = ret.Get(1).([]byte) + } + } + + if rf, ok := ret.Get(2).(func() error); ok { + r2 = rf() + } else { + r2 = ret.Error(2) + } + + return r0, r1, r2 +} + +// NewKVStoreReader creates a new instance of KVStoreReader. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewKVStoreReader(t interface { + mock.TestingT + Cleanup(func()) +}) *KVStoreReader { + mock := &KVStoreReader{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/state/protocol/mock/mutable_protocol_state.go b/state/protocol/mock/mutable_protocol_state.go new file mode 100644 index 00000000000..b8e4423a7cd --- /dev/null +++ b/state/protocol/mock/mutable_protocol_state.go @@ -0,0 +1,141 @@ +// Code generated by mockery. DO NOT EDIT. + +package mock + +import ( + flow "github.com/onflow/flow-go/model/flow" + deferred "github.com/onflow/flow-go/storage/deferred" + + mock "github.com/stretchr/testify/mock" + + protocol "github.com/onflow/flow-go/state/protocol" +) + +// MutableProtocolState is an autogenerated mock type for the MutableProtocolState type +type MutableProtocolState struct { + mock.Mock +} + +// EpochStateAtBlockID provides a mock function with given fields: blockID +func (_m *MutableProtocolState) EpochStateAtBlockID(blockID flow.Identifier) (protocol.EpochProtocolState, error) { + ret := _m.Called(blockID) + + if len(ret) == 0 { + panic("no return value specified for EpochStateAtBlockID") + } + + var r0 protocol.EpochProtocolState + var r1 error + if rf, ok := ret.Get(0).(func(flow.Identifier) (protocol.EpochProtocolState, error)); ok { + return rf(blockID) + } + if rf, ok := ret.Get(0).(func(flow.Identifier) protocol.EpochProtocolState); ok { + r0 = rf(blockID) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(protocol.EpochProtocolState) + } + } + + if rf, ok := ret.Get(1).(func(flow.Identifier) error); ok { + r1 = rf(blockID) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// EvolveState provides a mock function with given fields: deferredDBOps, parentBlockID, candidateView, candidateSeals +func (_m *MutableProtocolState) EvolveState(deferredDBOps *deferred.DeferredBlockPersist, parentBlockID flow.Identifier, candidateView uint64, candidateSeals []*flow.Seal) (flow.Identifier, error) { + ret := _m.Called(deferredDBOps, parentBlockID, candidateView, candidateSeals) + + if len(ret) == 0 { + panic("no return value specified for EvolveState") + } + + var r0 flow.Identifier + var r1 error + if rf, ok := ret.Get(0).(func(*deferred.DeferredBlockPersist, flow.Identifier, uint64, []*flow.Seal) (flow.Identifier, error)); ok { + return rf(deferredDBOps, parentBlockID, candidateView, candidateSeals) + } + if rf, ok := ret.Get(0).(func(*deferred.DeferredBlockPersist, flow.Identifier, uint64, []*flow.Seal) flow.Identifier); ok { + r0 = rf(deferredDBOps, parentBlockID, candidateView, candidateSeals) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(flow.Identifier) + } + } + + if rf, ok := ret.Get(1).(func(*deferred.DeferredBlockPersist, flow.Identifier, uint64, []*flow.Seal) error); ok { + r1 = rf(deferredDBOps, parentBlockID, candidateView, candidateSeals) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GlobalParams provides a mock function with no fields +func (_m *MutableProtocolState) GlobalParams() protocol.GlobalParams { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for GlobalParams") + } + + var r0 protocol.GlobalParams + if rf, ok := ret.Get(0).(func() protocol.GlobalParams); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(protocol.GlobalParams) + } + } + + return r0 +} + +// KVStoreAtBlockID provides a mock function with given fields: blockID +func (_m *MutableProtocolState) KVStoreAtBlockID(blockID flow.Identifier) (protocol.KVStoreReader, error) { + ret := _m.Called(blockID) + + if len(ret) == 0 { + panic("no return value specified for KVStoreAtBlockID") + } + + var r0 protocol.KVStoreReader + var r1 error + if rf, ok := ret.Get(0).(func(flow.Identifier) (protocol.KVStoreReader, error)); ok { + return rf(blockID) + } + if rf, ok := ret.Get(0).(func(flow.Identifier) protocol.KVStoreReader); ok { + r0 = rf(blockID) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(protocol.KVStoreReader) + } + } + + if rf, ok := ret.Get(1).(func(flow.Identifier) error); ok { + r1 = rf(blockID) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// NewMutableProtocolState creates a new instance of MutableProtocolState. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewMutableProtocolState(t interface { + mock.TestingT + Cleanup(func()) +}) *MutableProtocolState { + mock := &MutableProtocolState{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/state/protocol/mock/params.go b/state/protocol/mock/params.go index 6940960ba4b..8e007746636 100644 --- a/state/protocol/mock/params.go +++ b/state/protocol/mock/params.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mock @@ -12,111 +12,73 @@ type Params struct { mock.Mock } -// ChainID provides a mock function with given fields: -func (_m *Params) ChainID() (flow.ChainID, error) { +// ChainID provides a mock function with no fields +func (_m *Params) ChainID() flow.ChainID { ret := _m.Called() - var r0 flow.ChainID - var r1 error - if rf, ok := ret.Get(0).(func() (flow.ChainID, error)); ok { - return rf() + if len(ret) == 0 { + panic("no return value specified for ChainID") } + + var r0 flow.ChainID if rf, ok := ret.Get(0).(func() flow.ChainID); ok { r0 = rf() } else { r0 = ret.Get(0).(flow.ChainID) } - if rf, ok := ret.Get(1).(func() error); ok { - r1 = rf() - } else { - r1 = ret.Error(1) - } - - return r0, r1 + return r0 } -// EpochCommitSafetyThreshold provides a mock function with given fields: -func (_m *Params) EpochCommitSafetyThreshold() (uint64, error) { +// FinalizedRoot provides a mock function with no fields +func (_m *Params) FinalizedRoot() *flow.Header { ret := _m.Called() - var r0 uint64 - var r1 error - if rf, ok := ret.Get(0).(func() (uint64, error)); ok { - return rf() - } - if rf, ok := ret.Get(0).(func() uint64); ok { - r0 = rf() - } else { - r0 = ret.Get(0).(uint64) + if len(ret) == 0 { + panic("no return value specified for FinalizedRoot") } - if rf, ok := ret.Get(1).(func() error); ok { - r1 = rf() + var r0 *flow.Header + if rf, ok := ret.Get(0).(func() *flow.Header); ok { + r0 = rf() } else { - r1 = ret.Error(1) + if ret.Get(0) != nil { + r0 = ret.Get(0).(*flow.Header) + } } - return r0, r1 + return r0 } -// EpochFallbackTriggered provides a mock function with given fields: -func (_m *Params) EpochFallbackTriggered() (bool, error) { +// Seal provides a mock function with no fields +func (_m *Params) Seal() *flow.Seal { ret := _m.Called() - var r0 bool - var r1 error - if rf, ok := ret.Get(0).(func() (bool, error)); ok { - return rf() - } - if rf, ok := ret.Get(0).(func() bool); ok { - r0 = rf() - } else { - r0 = ret.Get(0).(bool) + if len(ret) == 0 { + panic("no return value specified for Seal") } - if rf, ok := ret.Get(1).(func() error); ok { - r1 = rf() + var r0 *flow.Seal + if rf, ok := ret.Get(0).(func() *flow.Seal); ok { + r0 = rf() } else { - r1 = ret.Error(1) + if ret.Get(0) != nil { + r0 = ret.Get(0).(*flow.Seal) + } } - return r0, r1 + return r0 } -// ProtocolVersion provides a mock function with given fields: -func (_m *Params) ProtocolVersion() (uint, error) { +// SealedRoot provides a mock function with no fields +func (_m *Params) SealedRoot() *flow.Header { ret := _m.Called() - var r0 uint - var r1 error - if rf, ok := ret.Get(0).(func() (uint, error)); ok { - return rf() - } - if rf, ok := ret.Get(0).(func() uint); ok { - r0 = rf() - } else { - r0 = ret.Get(0).(uint) + if len(ret) == 0 { + panic("no return value specified for SealedRoot") } - if rf, ok := ret.Get(1).(func() error); ok { - r1 = rf() - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// Root provides a mock function with given fields: -func (_m *Params) Root() (*flow.Header, error) { - ret := _m.Called() - var r0 *flow.Header - var r1 error - if rf, ok := ret.Get(0).(func() (*flow.Header, error)); ok { - return rf() - } if rf, ok := ret.Get(0).(func() *flow.Header); ok { r0 = rf() } else { @@ -125,98 +87,91 @@ func (_m *Params) Root() (*flow.Header, error) { } } - if rf, ok := ret.Get(1).(func() error); ok { - r1 = rf() - } else { - r1 = ret.Error(1) - } - - return r0, r1 + return r0 } -// Seal provides a mock function with given fields: -func (_m *Params) Seal() (*flow.Seal, error) { +// SporkID provides a mock function with no fields +func (_m *Params) SporkID() flow.Identifier { ret := _m.Called() - var r0 *flow.Seal - var r1 error - if rf, ok := ret.Get(0).(func() (*flow.Seal, error)); ok { - return rf() + if len(ret) == 0 { + panic("no return value specified for SporkID") } - if rf, ok := ret.Get(0).(func() *flow.Seal); ok { + + var r0 flow.Identifier + if rf, ok := ret.Get(0).(func() flow.Identifier); ok { r0 = rf() } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(*flow.Seal) + r0 = ret.Get(0).(flow.Identifier) } } - if rf, ok := ret.Get(1).(func() error); ok { - r1 = rf() - } else { - r1 = ret.Error(1) - } - - return r0, r1 + return r0 } -// SporkID provides a mock function with given fields: -func (_m *Params) SporkID() (flow.Identifier, error) { +// SporkRootBlock provides a mock function with no fields +func (_m *Params) SporkRootBlock() *flow.Block { ret := _m.Called() - var r0 flow.Identifier - var r1 error - if rf, ok := ret.Get(0).(func() (flow.Identifier, error)); ok { - return rf() + if len(ret) == 0 { + panic("no return value specified for SporkRootBlock") } - if rf, ok := ret.Get(0).(func() flow.Identifier); ok { + + var r0 *flow.Block + if rf, ok := ret.Get(0).(func() *flow.Block); ok { r0 = rf() } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(flow.Identifier) + r0 = ret.Get(0).(*flow.Block) } } - if rf, ok := ret.Get(1).(func() error); ok { - r1 = rf() - } else { - r1 = ret.Error(1) - } - - return r0, r1 + return r0 } -// SporkRootBlockHeight provides a mock function with given fields: -func (_m *Params) SporkRootBlockHeight() (uint64, error) { +// SporkRootBlockHeight provides a mock function with no fields +func (_m *Params) SporkRootBlockHeight() uint64 { ret := _m.Called() - var r0 uint64 - var r1 error - if rf, ok := ret.Get(0).(func() (uint64, error)); ok { - return rf() + if len(ret) == 0 { + panic("no return value specified for SporkRootBlockHeight") } + + var r0 uint64 if rf, ok := ret.Get(0).(func() uint64); ok { r0 = rf() } else { r0 = ret.Get(0).(uint64) } - if rf, ok := ret.Get(1).(func() error); ok { - r1 = rf() + return r0 +} + +// SporkRootBlockView provides a mock function with no fields +func (_m *Params) SporkRootBlockView() uint64 { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for SporkRootBlockView") + } + + var r0 uint64 + if rf, ok := ret.Get(0).(func() uint64); ok { + r0 = rf() } else { - r1 = ret.Error(1) + r0 = ret.Get(0).(uint64) } - return r0, r1 + return r0 } -type mockConstructorTestingTNewParams interface { +// NewParams creates a new instance of Params. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewParams(t interface { mock.TestingT Cleanup(func()) -} - -// NewParams creates a new instance of Params. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewParams(t mockConstructorTestingTNewParams) *Params { +}) *Params { mock := &Params{} mock.Mock.Test(t) diff --git a/state/protocol/mock/participant_state.go b/state/protocol/mock/participant_state.go index b0bfd3a54f3..e885439723b 100644 --- a/state/protocol/mock/participant_state.go +++ b/state/protocol/mock/participant_state.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mock @@ -20,6 +20,10 @@ type ParticipantState struct { func (_m *ParticipantState) AtBlockID(blockID flow.Identifier) protocol.Snapshot { ret := _m.Called(blockID) + if len(ret) == 0 { + panic("no return value specified for AtBlockID") + } + var r0 protocol.Snapshot if rf, ok := ret.Get(0).(func(flow.Identifier) protocol.Snapshot); ok { r0 = rf(blockID) @@ -36,6 +40,10 @@ func (_m *ParticipantState) AtBlockID(blockID flow.Identifier) protocol.Snapshot func (_m *ParticipantState) AtHeight(height uint64) protocol.Snapshot { ret := _m.Called(height) + if len(ret) == 0 { + panic("no return value specified for AtHeight") + } + var r0 protocol.Snapshot if rf, ok := ret.Get(0).(func(uint64) protocol.Snapshot); ok { r0 = rf(height) @@ -49,11 +57,15 @@ func (_m *ParticipantState) AtHeight(height uint64) protocol.Snapshot { } // Extend provides a mock function with given fields: ctx, candidate -func (_m *ParticipantState) Extend(ctx context.Context, candidate *flow.Block) error { +func (_m *ParticipantState) Extend(ctx context.Context, candidate *flow.Proposal) error { ret := _m.Called(ctx, candidate) + if len(ret) == 0 { + panic("no return value specified for Extend") + } + var r0 error - if rf, ok := ret.Get(0).(func(context.Context, *flow.Block) error); ok { + if rf, ok := ret.Get(0).(func(context.Context, *flow.Proposal) error); ok { r0 = rf(ctx, candidate) } else { r0 = ret.Error(0) @@ -62,13 +74,17 @@ func (_m *ParticipantState) Extend(ctx context.Context, candidate *flow.Block) e return r0 } -// ExtendCertified provides a mock function with given fields: ctx, candidate, qc -func (_m *ParticipantState) ExtendCertified(ctx context.Context, candidate *flow.Block, qc *flow.QuorumCertificate) error { - ret := _m.Called(ctx, candidate, qc) +// ExtendCertified provides a mock function with given fields: ctx, certified +func (_m *ParticipantState) ExtendCertified(ctx context.Context, certified *flow.CertifiedBlock) error { + ret := _m.Called(ctx, certified) + + if len(ret) == 0 { + panic("no return value specified for ExtendCertified") + } var r0 error - if rf, ok := ret.Get(0).(func(context.Context, *flow.Block, *flow.QuorumCertificate) error); ok { - r0 = rf(ctx, candidate, qc) + if rf, ok := ret.Get(0).(func(context.Context, *flow.CertifiedBlock) error); ok { + r0 = rf(ctx, certified) } else { r0 = ret.Error(0) } @@ -76,10 +92,14 @@ func (_m *ParticipantState) ExtendCertified(ctx context.Context, candidate *flow return r0 } -// Final provides a mock function with given fields: +// Final provides a mock function with no fields func (_m *ParticipantState) Final() protocol.Snapshot { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Final") + } + var r0 protocol.Snapshot if rf, ok := ret.Get(0).(func() protocol.Snapshot); ok { r0 = rf() @@ -96,6 +116,10 @@ func (_m *ParticipantState) Final() protocol.Snapshot { func (_m *ParticipantState) Finalize(ctx context.Context, blockID flow.Identifier) error { ret := _m.Called(ctx, blockID) + if len(ret) == 0 { + panic("no return value specified for Finalize") + } + var r0 error if rf, ok := ret.Get(0).(func(context.Context, flow.Identifier) error); ok { r0 = rf(ctx, blockID) @@ -106,10 +130,14 @@ func (_m *ParticipantState) Finalize(ctx context.Context, blockID flow.Identifie return r0 } -// Params provides a mock function with given fields: +// Params provides a mock function with no fields func (_m *ParticipantState) Params() protocol.Params { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Params") + } + var r0 protocol.Params if rf, ok := ret.Get(0).(func() protocol.Params); ok { r0 = rf() @@ -122,10 +150,14 @@ func (_m *ParticipantState) Params() protocol.Params { return r0 } -// Sealed provides a mock function with given fields: +// Sealed provides a mock function with no fields func (_m *ParticipantState) Sealed() protocol.Snapshot { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Sealed") + } + var r0 protocol.Snapshot if rf, ok := ret.Get(0).(func() protocol.Snapshot); ok { r0 = rf() @@ -138,13 +170,12 @@ func (_m *ParticipantState) Sealed() protocol.Snapshot { return r0 } -type mockConstructorTestingTNewParticipantState interface { +// NewParticipantState creates a new instance of ParticipantState. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewParticipantState(t interface { mock.TestingT Cleanup(func()) -} - -// NewParticipantState creates a new instance of ParticipantState. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewParticipantState(t mockConstructorTestingTNewParticipantState) *ParticipantState { +}) *ParticipantState { mock := &ParticipantState{} mock.Mock.Test(t) diff --git a/state/protocol/mock/protocol_state.go b/state/protocol/mock/protocol_state.go new file mode 100644 index 00000000000..a7c96a112d2 --- /dev/null +++ b/state/protocol/mock/protocol_state.go @@ -0,0 +1,109 @@ +// Code generated by mockery. DO NOT EDIT. + +package mock + +import ( + flow "github.com/onflow/flow-go/model/flow" + mock "github.com/stretchr/testify/mock" + + protocol "github.com/onflow/flow-go/state/protocol" +) + +// ProtocolState is an autogenerated mock type for the ProtocolState type +type ProtocolState struct { + mock.Mock +} + +// EpochStateAtBlockID provides a mock function with given fields: blockID +func (_m *ProtocolState) EpochStateAtBlockID(blockID flow.Identifier) (protocol.EpochProtocolState, error) { + ret := _m.Called(blockID) + + if len(ret) == 0 { + panic("no return value specified for EpochStateAtBlockID") + } + + var r0 protocol.EpochProtocolState + var r1 error + if rf, ok := ret.Get(0).(func(flow.Identifier) (protocol.EpochProtocolState, error)); ok { + return rf(blockID) + } + if rf, ok := ret.Get(0).(func(flow.Identifier) protocol.EpochProtocolState); ok { + r0 = rf(blockID) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(protocol.EpochProtocolState) + } + } + + if rf, ok := ret.Get(1).(func(flow.Identifier) error); ok { + r1 = rf(blockID) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GlobalParams provides a mock function with no fields +func (_m *ProtocolState) GlobalParams() protocol.GlobalParams { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for GlobalParams") + } + + var r0 protocol.GlobalParams + if rf, ok := ret.Get(0).(func() protocol.GlobalParams); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(protocol.GlobalParams) + } + } + + return r0 +} + +// KVStoreAtBlockID provides a mock function with given fields: blockID +func (_m *ProtocolState) KVStoreAtBlockID(blockID flow.Identifier) (protocol.KVStoreReader, error) { + ret := _m.Called(blockID) + + if len(ret) == 0 { + panic("no return value specified for KVStoreAtBlockID") + } + + var r0 protocol.KVStoreReader + var r1 error + if rf, ok := ret.Get(0).(func(flow.Identifier) (protocol.KVStoreReader, error)); ok { + return rf(blockID) + } + if rf, ok := ret.Get(0).(func(flow.Identifier) protocol.KVStoreReader); ok { + r0 = rf(blockID) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(protocol.KVStoreReader) + } + } + + if rf, ok := ret.Get(1).(func(flow.Identifier) error); ok { + r1 = rf(blockID) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// NewProtocolState creates a new instance of ProtocolState. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewProtocolState(t interface { + mock.TestingT + Cleanup(func()) +}) *ProtocolState { + mock := &ProtocolState{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/state/protocol/mock/snapshot.go b/state/protocol/mock/snapshot.go index 0cce1c96112..bda1dcc90a9 100644 --- a/state/protocol/mock/snapshot.go +++ b/state/protocol/mock/snapshot.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mock @@ -14,10 +14,14 @@ type Snapshot struct { mock.Mock } -// Commit provides a mock function with given fields: +// Commit provides a mock function with no fields func (_m *Snapshot) Commit() (flow.StateCommitment, error) { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Commit") + } + var r0 flow.StateCommitment var r1 error if rf, ok := ret.Get(0).(func() (flow.StateCommitment, error)); ok { @@ -40,10 +44,14 @@ func (_m *Snapshot) Commit() (flow.StateCommitment, error) { return r0, r1 } -// Descendants provides a mock function with given fields: +// Descendants provides a mock function with no fields func (_m *Snapshot) Descendants() ([]flow.Identifier, error) { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Descendants") + } + var r0 []flow.Identifier var r1 error if rf, ok := ret.Get(0).(func() ([]flow.Identifier, error)); ok { @@ -66,10 +74,72 @@ func (_m *Snapshot) Descendants() ([]flow.Identifier, error) { return r0, r1 } -// Epochs provides a mock function with given fields: +// EpochPhase provides a mock function with no fields +func (_m *Snapshot) EpochPhase() (flow.EpochPhase, error) { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for EpochPhase") + } + + var r0 flow.EpochPhase + var r1 error + if rf, ok := ret.Get(0).(func() (flow.EpochPhase, error)); ok { + return rf() + } + if rf, ok := ret.Get(0).(func() flow.EpochPhase); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(flow.EpochPhase) + } + + if rf, ok := ret.Get(1).(func() error); ok { + r1 = rf() + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// EpochProtocolState provides a mock function with no fields +func (_m *Snapshot) EpochProtocolState() (protocol.EpochProtocolState, error) { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for EpochProtocolState") + } + + var r0 protocol.EpochProtocolState + var r1 error + if rf, ok := ret.Get(0).(func() (protocol.EpochProtocolState, error)); ok { + return rf() + } + if rf, ok := ret.Get(0).(func() protocol.EpochProtocolState); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(protocol.EpochProtocolState) + } + } + + if rf, ok := ret.Get(1).(func() error); ok { + r1 = rf() + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Epochs provides a mock function with no fields func (_m *Snapshot) Epochs() protocol.EpochQuery { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Epochs") + } + var r0 protocol.EpochQuery if rf, ok := ret.Get(0).(func() protocol.EpochQuery); ok { r0 = rf() @@ -82,10 +152,14 @@ func (_m *Snapshot) Epochs() protocol.EpochQuery { return r0 } -// Head provides a mock function with given fields: +// Head provides a mock function with no fields func (_m *Snapshot) Head() (*flow.Header, error) { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Head") + } + var r0 *flow.Header var r1 error if rf, ok := ret.Get(0).(func() (*flow.Header, error)); ok { @@ -109,15 +183,19 @@ func (_m *Snapshot) Head() (*flow.Header, error) { } // Identities provides a mock function with given fields: selector -func (_m *Snapshot) Identities(selector flow.IdentityFilter) (flow.IdentityList, error) { +func (_m *Snapshot) Identities(selector flow.IdentityFilter[flow.Identity]) (flow.IdentityList, error) { ret := _m.Called(selector) + if len(ret) == 0 { + panic("no return value specified for Identities") + } + var r0 flow.IdentityList var r1 error - if rf, ok := ret.Get(0).(func(flow.IdentityFilter) (flow.IdentityList, error)); ok { + if rf, ok := ret.Get(0).(func(flow.IdentityFilter[flow.Identity]) (flow.IdentityList, error)); ok { return rf(selector) } - if rf, ok := ret.Get(0).(func(flow.IdentityFilter) flow.IdentityList); ok { + if rf, ok := ret.Get(0).(func(flow.IdentityFilter[flow.Identity]) flow.IdentityList); ok { r0 = rf(selector) } else { if ret.Get(0) != nil { @@ -125,7 +203,7 @@ func (_m *Snapshot) Identities(selector flow.IdentityFilter) (flow.IdentityList, } } - if rf, ok := ret.Get(1).(func(flow.IdentityFilter) error); ok { + if rf, ok := ret.Get(1).(func(flow.IdentityFilter[flow.Identity]) error); ok { r1 = rf(selector) } else { r1 = ret.Error(1) @@ -138,6 +216,10 @@ func (_m *Snapshot) Identities(selector flow.IdentityFilter) (flow.IdentityList, func (_m *Snapshot) Identity(nodeID flow.Identifier) (*flow.Identity, error) { ret := _m.Called(nodeID) + if len(ret) == 0 { + panic("no return value specified for Identity") + } + var r0 *flow.Identity var r1 error if rf, ok := ret.Get(0).(func(flow.Identifier) (*flow.Identity, error)); ok { @@ -160,10 +242,14 @@ func (_m *Snapshot) Identity(nodeID flow.Identifier) (*flow.Identity, error) { return r0, r1 } -// Params provides a mock function with given fields: +// Params provides a mock function with no fields func (_m *Snapshot) Params() protocol.GlobalParams { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Params") + } + var r0 protocol.GlobalParams if rf, ok := ret.Get(0).(func() protocol.GlobalParams); ok { r0 = rf() @@ -176,19 +262,25 @@ func (_m *Snapshot) Params() protocol.GlobalParams { return r0 } -// Phase provides a mock function with given fields: -func (_m *Snapshot) Phase() (flow.EpochPhase, error) { +// ProtocolState provides a mock function with no fields +func (_m *Snapshot) ProtocolState() (protocol.KVStoreReader, error) { ret := _m.Called() - var r0 flow.EpochPhase + if len(ret) == 0 { + panic("no return value specified for ProtocolState") + } + + var r0 protocol.KVStoreReader var r1 error - if rf, ok := ret.Get(0).(func() (flow.EpochPhase, error)); ok { + if rf, ok := ret.Get(0).(func() (protocol.KVStoreReader, error)); ok { return rf() } - if rf, ok := ret.Get(0).(func() flow.EpochPhase); ok { + if rf, ok := ret.Get(0).(func() protocol.KVStoreReader); ok { r0 = rf() } else { - r0 = ret.Get(0).(flow.EpochPhase) + if ret.Get(0) != nil { + r0 = ret.Get(0).(protocol.KVStoreReader) + } } if rf, ok := ret.Get(1).(func() error); ok { @@ -200,10 +292,14 @@ func (_m *Snapshot) Phase() (flow.EpochPhase, error) { return r0, r1 } -// QuorumCertificate provides a mock function with given fields: +// QuorumCertificate provides a mock function with no fields func (_m *Snapshot) QuorumCertificate() (*flow.QuorumCertificate, error) { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for QuorumCertificate") + } + var r0 *flow.QuorumCertificate var r1 error if rf, ok := ret.Get(0).(func() (*flow.QuorumCertificate, error)); ok { @@ -226,10 +322,14 @@ func (_m *Snapshot) QuorumCertificate() (*flow.QuorumCertificate, error) { return r0, r1 } -// RandomSource provides a mock function with given fields: +// RandomSource provides a mock function with no fields func (_m *Snapshot) RandomSource() ([]byte, error) { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for RandomSource") + } + var r0 []byte var r1 error if rf, ok := ret.Get(0).(func() ([]byte, error)); ok { @@ -252,10 +352,14 @@ func (_m *Snapshot) RandomSource() ([]byte, error) { return r0, r1 } -// SealedResult provides a mock function with given fields: +// SealedResult provides a mock function with no fields func (_m *Snapshot) SealedResult() (*flow.ExecutionResult, *flow.Seal, error) { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for SealedResult") + } + var r0 *flow.ExecutionResult var r1 *flow.Seal var r2 error @@ -287,10 +391,14 @@ func (_m *Snapshot) SealedResult() (*flow.ExecutionResult, *flow.Seal, error) { return r0, r1, r2 } -// SealingSegment provides a mock function with given fields: +// SealingSegment provides a mock function with no fields func (_m *Snapshot) SealingSegment() (*flow.SealingSegment, error) { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for SealingSegment") + } + var r0 *flow.SealingSegment var r1 error if rf, ok := ret.Get(0).(func() (*flow.SealingSegment, error)); ok { @@ -313,13 +421,42 @@ func (_m *Snapshot) SealingSegment() (*flow.SealingSegment, error) { return r0, r1 } -type mockConstructorTestingTNewSnapshot interface { - mock.TestingT - Cleanup(func()) +// VersionBeacon provides a mock function with no fields +func (_m *Snapshot) VersionBeacon() (*flow.SealedVersionBeacon, error) { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for VersionBeacon") + } + + var r0 *flow.SealedVersionBeacon + var r1 error + if rf, ok := ret.Get(0).(func() (*flow.SealedVersionBeacon, error)); ok { + return rf() + } + if rf, ok := ret.Get(0).(func() *flow.SealedVersionBeacon); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*flow.SealedVersionBeacon) + } + } + + if rf, ok := ret.Get(1).(func() error); ok { + r1 = rf() + } else { + r1 = ret.Error(1) + } + + return r0, r1 } // NewSnapshot creates a new instance of Snapshot. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewSnapshot(t mockConstructorTestingTNewSnapshot) *Snapshot { +// The first argument is typically a *testing.T value. +func NewSnapshot(t interface { + mock.TestingT + Cleanup(func()) +}) *Snapshot { mock := &Snapshot{} mock.Mock.Test(t) diff --git a/state/protocol/mock/snapshot_execution_subset.go b/state/protocol/mock/snapshot_execution_subset.go new file mode 100644 index 00000000000..70890194cc7 --- /dev/null +++ b/state/protocol/mock/snapshot_execution_subset.go @@ -0,0 +1,87 @@ +// Code generated by mockery. DO NOT EDIT. + +package mock + +import ( + flow "github.com/onflow/flow-go/model/flow" + mock "github.com/stretchr/testify/mock" +) + +// SnapshotExecutionSubset is an autogenerated mock type for the SnapshotExecutionSubset type +type SnapshotExecutionSubset struct { + mock.Mock +} + +// RandomSource provides a mock function with no fields +func (_m *SnapshotExecutionSubset) RandomSource() ([]byte, error) { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for RandomSource") + } + + var r0 []byte + var r1 error + if rf, ok := ret.Get(0).(func() ([]byte, error)); ok { + return rf() + } + if rf, ok := ret.Get(0).(func() []byte); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]byte) + } + } + + if rf, ok := ret.Get(1).(func() error); ok { + r1 = rf() + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// VersionBeacon provides a mock function with no fields +func (_m *SnapshotExecutionSubset) VersionBeacon() (*flow.SealedVersionBeacon, error) { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for VersionBeacon") + } + + var r0 *flow.SealedVersionBeacon + var r1 error + if rf, ok := ret.Get(0).(func() (*flow.SealedVersionBeacon, error)); ok { + return rf() + } + if rf, ok := ret.Get(0).(func() *flow.SealedVersionBeacon); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*flow.SealedVersionBeacon) + } + } + + if rf, ok := ret.Get(1).(func() error); ok { + r1 = rf() + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// NewSnapshotExecutionSubset creates a new instance of SnapshotExecutionSubset. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewSnapshotExecutionSubset(t interface { + mock.TestingT + Cleanup(func()) +}) *SnapshotExecutionSubset { + mock := &SnapshotExecutionSubset{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/state/protocol/mock/snapshot_execution_subset_provider.go b/state/protocol/mock/snapshot_execution_subset_provider.go new file mode 100644 index 00000000000..3b62ea57611 --- /dev/null +++ b/state/protocol/mock/snapshot_execution_subset_provider.go @@ -0,0 +1,49 @@ +// Code generated by mockery. DO NOT EDIT. + +package mock + +import ( + flow "github.com/onflow/flow-go/model/flow" + mock "github.com/stretchr/testify/mock" + + protocol "github.com/onflow/flow-go/state/protocol" +) + +// SnapshotExecutionSubsetProvider is an autogenerated mock type for the SnapshotExecutionSubsetProvider type +type SnapshotExecutionSubsetProvider struct { + mock.Mock +} + +// AtBlockID provides a mock function with given fields: blockID +func (_m *SnapshotExecutionSubsetProvider) AtBlockID(blockID flow.Identifier) protocol.SnapshotExecutionSubset { + ret := _m.Called(blockID) + + if len(ret) == 0 { + panic("no return value specified for AtBlockID") + } + + var r0 protocol.SnapshotExecutionSubset + if rf, ok := ret.Get(0).(func(flow.Identifier) protocol.SnapshotExecutionSubset); ok { + r0 = rf(blockID) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(protocol.SnapshotExecutionSubset) + } + } + + return r0 +} + +// NewSnapshotExecutionSubsetProvider creates a new instance of SnapshotExecutionSubsetProvider. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewSnapshotExecutionSubsetProvider(t interface { + mock.TestingT + Cleanup(func()) +}) *SnapshotExecutionSubsetProvider { + mock := &SnapshotExecutionSubsetProvider{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/state/protocol/mock/state.go b/state/protocol/mock/state.go index 51a1559eff1..ee85272bd91 100644 --- a/state/protocol/mock/state.go +++ b/state/protocol/mock/state.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mock @@ -18,6 +18,10 @@ type State struct { func (_m *State) AtBlockID(blockID flow.Identifier) protocol.Snapshot { ret := _m.Called(blockID) + if len(ret) == 0 { + panic("no return value specified for AtBlockID") + } + var r0 protocol.Snapshot if rf, ok := ret.Get(0).(func(flow.Identifier) protocol.Snapshot); ok { r0 = rf(blockID) @@ -34,6 +38,10 @@ func (_m *State) AtBlockID(blockID flow.Identifier) protocol.Snapshot { func (_m *State) AtHeight(height uint64) protocol.Snapshot { ret := _m.Called(height) + if len(ret) == 0 { + panic("no return value specified for AtHeight") + } + var r0 protocol.Snapshot if rf, ok := ret.Get(0).(func(uint64) protocol.Snapshot); ok { r0 = rf(height) @@ -46,10 +54,14 @@ func (_m *State) AtHeight(height uint64) protocol.Snapshot { return r0 } -// Final provides a mock function with given fields: +// Final provides a mock function with no fields func (_m *State) Final() protocol.Snapshot { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Final") + } + var r0 protocol.Snapshot if rf, ok := ret.Get(0).(func() protocol.Snapshot); ok { r0 = rf() @@ -62,10 +74,14 @@ func (_m *State) Final() protocol.Snapshot { return r0 } -// Params provides a mock function with given fields: +// Params provides a mock function with no fields func (_m *State) Params() protocol.Params { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Params") + } + var r0 protocol.Params if rf, ok := ret.Get(0).(func() protocol.Params); ok { r0 = rf() @@ -78,10 +94,14 @@ func (_m *State) Params() protocol.Params { return r0 } -// Sealed provides a mock function with given fields: +// Sealed provides a mock function with no fields func (_m *State) Sealed() protocol.Snapshot { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Sealed") + } + var r0 protocol.Snapshot if rf, ok := ret.Get(0).(func() protocol.Snapshot); ok { r0 = rf() @@ -94,13 +114,12 @@ func (_m *State) Sealed() protocol.Snapshot { return r0 } -type mockConstructorTestingTNewState interface { +// NewState creates a new instance of State. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewState(t interface { mock.TestingT Cleanup(func()) -} - -// NewState creates a new instance of State. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewState(t mockConstructorTestingTNewState) *State { +}) *State { mock := &State{} mock.Mock.Test(t) diff --git a/state/protocol/mock/tentative_epoch.go b/state/protocol/mock/tentative_epoch.go new file mode 100644 index 00000000000..b291d94df7f --- /dev/null +++ b/state/protocol/mock/tentative_epoch.go @@ -0,0 +1,95 @@ +// Code generated by mockery. DO NOT EDIT. + +package mock + +import ( + flow "github.com/onflow/flow-go/model/flow" + mock "github.com/stretchr/testify/mock" +) + +// TentativeEpoch is an autogenerated mock type for the TentativeEpoch type +type TentativeEpoch struct { + mock.Mock +} + +// Clustering provides a mock function with no fields +func (_m *TentativeEpoch) Clustering() (flow.ClusterList, error) { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Clustering") + } + + var r0 flow.ClusterList + var r1 error + if rf, ok := ret.Get(0).(func() (flow.ClusterList, error)); ok { + return rf() + } + if rf, ok := ret.Get(0).(func() flow.ClusterList); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(flow.ClusterList) + } + } + + if rf, ok := ret.Get(1).(func() error); ok { + r1 = rf() + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Counter provides a mock function with no fields +func (_m *TentativeEpoch) Counter() uint64 { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Counter") + } + + var r0 uint64 + if rf, ok := ret.Get(0).(func() uint64); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(uint64) + } + + return r0 +} + +// InitialIdentities provides a mock function with no fields +func (_m *TentativeEpoch) InitialIdentities() flow.IdentitySkeletonList { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for InitialIdentities") + } + + var r0 flow.IdentitySkeletonList + if rf, ok := ret.Get(0).(func() flow.IdentitySkeletonList); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(flow.IdentitySkeletonList) + } + } + + return r0 +} + +// NewTentativeEpoch creates a new instance of TentativeEpoch. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewTentativeEpoch(t interface { + mock.TestingT + Cleanup(func()) +}) *TentativeEpoch { + mock := &TentativeEpoch{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/state/protocol/mock/versioned_encodable.go b/state/protocol/mock/versioned_encodable.go new file mode 100644 index 00000000000..04ef23e4705 --- /dev/null +++ b/state/protocol/mock/versioned_encodable.go @@ -0,0 +1,61 @@ +// Code generated by mockery. DO NOT EDIT. + +package mock + +import mock "github.com/stretchr/testify/mock" + +// VersionedEncodable is an autogenerated mock type for the VersionedEncodable type +type VersionedEncodable struct { + mock.Mock +} + +// VersionedEncode provides a mock function with no fields +func (_m *VersionedEncodable) VersionedEncode() (uint64, []byte, error) { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for VersionedEncode") + } + + var r0 uint64 + var r1 []byte + var r2 error + if rf, ok := ret.Get(0).(func() (uint64, []byte, error)); ok { + return rf() + } + if rf, ok := ret.Get(0).(func() uint64); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(uint64) + } + + if rf, ok := ret.Get(1).(func() []byte); ok { + r1 = rf() + } else { + if ret.Get(1) != nil { + r1 = ret.Get(1).([]byte) + } + } + + if rf, ok := ret.Get(2).(func() error); ok { + r2 = rf() + } else { + r2 = ret.Error(2) + } + + return r0, r1, r2 +} + +// NewVersionedEncodable creates a new instance of VersionedEncodable. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewVersionedEncodable(t interface { + mock.TestingT + Cleanup(func()) +}) *VersionedEncodable { + mock := &VersionedEncodable{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/state/protocol/params.go b/state/protocol/params.go index 2c65ae73690..16c9314c09e 100644 --- a/state/protocol/params.go +++ b/state/protocol/params.go @@ -17,24 +17,21 @@ type Params interface { // different instance params. type InstanceParams interface { - // Root returns the root header of the current protocol state. This will be + // FinalizedRoot returns the finalized root header of the current protocol state. This will be // the head of the protocol state snapshot used to bootstrap this state and // may differ from node to node for the same protocol state. - // No errors are expected during normal operation. - Root() (*flow.Header, error) + FinalizedRoot() *flow.Header - // Seal returns the root block seal of the current protocol state. This will be - // the seal for the root block used to bootstrap this state and may differ from - // node to node for the same protocol state. - // No errors are expected during normal operation. - Seal() (*flow.Seal, error) + // SealedRoot returns the sealed root block. If it's different from FinalizedRoot() block, + // it means the node is bootstrapped from mid-spork. + SealedRoot() *flow.Header - // EpochFallbackTriggered returns whether epoch fallback mode (EECC) has been triggered. - // EECC is a permanent, spork-scoped state which is triggered when the next - // epoch fails to be committed in the allocated time. Once EECC is triggered, - // it will remain in effect until the next spork. - // No errors are expected during normal operation. - EpochFallbackTriggered() (bool, error) + // Seal returns the root block seal of the current protocol state. This is the seal for the + // `SealedRoot` block that was used to bootstrap this state. It may differ from node to node. + Seal() *flow.Seal + + // SporkRootBlock returns the root block for the present spork. + SporkRootBlock() *flow.Block } // GlobalParams represents protocol state parameters that do not vary between instances. @@ -44,71 +41,22 @@ type GlobalParams interface { // ChainID returns the chain ID for the current Flow network. The chain ID // uniquely identifies a Flow network in perpetuity across epochs and sporks. - // No errors are expected during normal operation. - ChainID() (flow.ChainID, error) + ChainID() flow.ChainID // SporkID returns the unique identifier for this network within the current spork. // This ID is determined at the beginning of a spork during bootstrapping and is // part of the root protocol state snapshot. - // No errors are expected during normal operation. - SporkID() (flow.Identifier, error) + SporkID() flow.Identifier // SporkRootBlockHeight returns the height of the spork's root block. // This value is determined at the beginning of a spork during bootstrapping. // If node uses a sealing segment for bootstrapping then this value will be carried over // as part of snapshot. - // No errors are expected during normal operation. - SporkRootBlockHeight() (uint64, error) - - // ProtocolVersion returns the protocol version, the major software version - // of the protocol software. - // No errors are expected during normal operation. - ProtocolVersion() (uint, error) + SporkRootBlockHeight() uint64 - // EpochCommitSafetyThreshold defines a deadline for sealing the EpochCommit - // service event near the end of each epoch - the "epoch commitment deadline". - // Given a safety threshold t, the deadline for an epoch with final view f is: - // Epoch Commitment Deadline: d=f-t - // - // DEFINITION: - // This deadline is used to determine when to trigger epoch emergency fallback mode. - // Epoch Emergency Fallback mode is triggered when the EpochCommit service event - // fails to be sealed. - // - // Example: A service event is emitted in block A. The seal for A is included in C. - // A<-B(RA)<-C(SA)<-...<-R - // - // A service event S is considered sealed w.r.t. a reference block R if: - // * S was emitted during execution of some block A, s.t. A is an ancestor of R - // * The seal for block A was included in some block C, s.t C is an ancestor of R - // - // When we finalize the first block B with B.View >= d: - // HAPPY PATH: If an EpochCommit service event has been sealed w.r.t. B, no action is taken. - // FALLBACK PATH: If no EpochCommit service event has been sealed w.r.t. B, epoch fallback mode (EECC) is triggered. - // - // CONTEXT: - // The epoch commitment deadline exists to ensure that all nodes agree on - // whether epoch fallback mode is triggered for a particular epoch, before - // the epoch actually ends. Although the use of this deadline DOES NOT - // guarantee these properties, it is a simpler way to assure them with high - // likelihood, given reasonable configuration. - // In particular, all nodes will agree about EECC being triggered (or not) - // if at least one block with view in [d, f] is finalized - in other words - // at least one block is finalized after the epoch commitment deadline, and - // before the next epoch begins. - // - // When selecting a threshold value, ensure: - // * The deadline is after the end of the DKG, with enough buffer between - // the two that the EpochCommit event is overwhelmingly likely to be emitted - // before the deadline, if it is emitted at all. - // * The buffer between the deadline and the final view of the epoch is large - // enough that the network is overwhelming likely to finalize at least one - // block with a view in this range - // - // /- Epoch Commitment Deadline - // EPOCH N v EPOCH N+1 - // ...------------|------||-----... - // - // No errors are expected during normal operation. - EpochCommitSafetyThreshold() (uint64, error) + // SporkRootBlockView returns the view of the spork's root block. + // This value is determined at the beginning of a spork during bootstrapping. + // If node uses a sealing segment for bootstrapping then this value will be carried over + // as part of snapshot. + SporkRootBlockView() uint64 } diff --git a/state/protocol/prg/customizers.go b/state/protocol/prg/customizers.go new file mode 100644 index 00000000000..520ffc51eeb --- /dev/null +++ b/state/protocol/prg/customizers.go @@ -0,0 +1,55 @@ +package prg + +import ( + "encoding/binary" + "fmt" + "math" +) + +// List of customizers used for different sub-protocol PRGs. +// These customizers help instantiate different PRGs from the +// same source of randomness. +// +// Customizers used by the Flow protocol should not be equal or +// prefixing each other to guarantee independent PRGs. This +// is enforced by test `TestProtocolConstants` in `./prg_test.go` + +var ( + // ConsensusLeaderSelection is the customizer for consensus leader selection + ConsensusLeaderSelection = customizerFromIndices(0) + // VerificationChunkAssignment is the customizer for verification chunk assignment + VerificationChunkAssignment = customizerFromIndices(2) + // ExecutionEnvironment is the customizer for Flow's transaction execution environment + // (used for Cadence `random` function) + ExecutionEnvironment = customizerFromIndices(1, 0) + // ExecutionRandomSourceHistory is the customizer for Flow's transaction execution environment + // (used for the source of randomness history core-contract) + ExecutionRandomSourceHistory = customizerFromIndices(1, 1) + // + // clusterLeaderSelectionPrefix is the prefix used for CollectorClusterLeaderSelection + clusterLeaderSelectionPrefix = []uint16{3} +) + +// CollectorClusterLeaderSelection returns the indices for the leader selection for the i-th collector cluster +func CollectorClusterLeaderSelection(clusterIndex uint) []byte { + if uint(math.MaxUint16) < clusterIndex { + // sanity check to guarantee no overflows during type conversion -- this should never happen + panic(fmt.Sprintf("input cluster index (%d) exceeds max uint16 value %d", clusterIndex, math.MaxUint16)) + } + indices := append(clusterLeaderSelectionPrefix, uint16(clusterIndex)) + return customizerFromIndices(indices...) +} + +// customizerFromIndices converts the input indices into a slice of bytes. +// The function has to be injective (no different indices map to the same customizer) +// +// The output is built as a concatenation of indices, each index is encoded over 2 bytes. +func customizerFromIndices(indices ...uint16) []byte { + customizerLen := 2 * len(indices) + customizer := make([]byte, customizerLen) + // concatenate the indices + for i, index := range indices { + binary.LittleEndian.PutUint16(customizer[2*i:2*i+2], index) + } + return customizer +} diff --git a/state/protocol/prg/prg.go b/state/protocol/prg/prg.go new file mode 100644 index 00000000000..d17fd1a9ac0 --- /dev/null +++ b/state/protocol/prg/prg.go @@ -0,0 +1,68 @@ +package prg + +import ( + "fmt" + + "golang.org/x/crypto/sha3" + + "github.com/onflow/crypto" + "github.com/onflow/crypto/random" +) + +const RandomSourceLength = crypto.SignatureLenBLSBLS12381 + +// New returns a PRG seeded by the input source of randomness [SoR]. +// The customizer is used to generate a task-specific PRG. A customizer can be any slice +// of 12 bytes or less. +// The diversifier is used to further diversify the PRGs beyond the customizer. A diversifier +// can be a slice of any length. If no diversification is needed, `diversifier` can be `nil`. +// +// The function uses an extendable-output function (xof) to extract and expand the input source, +// so that any source with enough entropy (at least 128 bits) can be used (no need to pre-hash). +// Current implementation generates a ChaCha20-based CSPRG. +// +// How to use the function in Flow protocol: any sub-protocol that requires deterministic and +// distributed randomness should rely on the Flow native randomness provided by the Random Beacon. +// The beacon SoR for block B is part of the QC certifying B and can be extracted using the +// function `consensus/hotstuff/model.BeaconSignature(*flow.QuorumCertificate)`. It can also be +// extracted using the `state/protocol/snapshot.RandomSource()` function. +// +// While the output is a distributed source of randomness, it should _not_ be used as random +// numbers itself. Instead, please use the function `New` to instantiate a PRG, +// for deterministic generation of random numbers or permutations (check the random.Rand interface). +// +// Every Flow sub-protocol should use its own customizer to create an independent PRG. Use the list in +// "customizers.go" to add new values. The same sub-protocol can further create independent PRGs +// by using `diversifier`. +func New(source []byte, customizer []byte, diversifier []byte) (random.Rand, error) { + seed, err := xof(source, diversifier, random.Chacha20SeedLen) + if err != nil { + return nil, fmt.Errorf("extendable output function failed: %w", err) + } + + // create random number generator from the seed and customizer + rng, err := random.NewChacha20PRG(seed, customizer) + if err != nil { + return nil, fmt.Errorf("could not create ChaCha20 PRG: %w", err) + } + return rng, nil +} + +// XOF (extendable output function) extracts and expands the input's entropy into +// an output byte-slice of length `outLen`. +// It also takes a `diversifier` slice as an input to create independent outputs. +// +// Purpose of this function: it abstracts the extraction and expansion of +// entropy from the rest of PRG logic. The source doesn't necessarily have a uniformly +// distributed entropy (for instance a cryptographic signature), and hashing doesn't necessarily +// output the number of bytes required for the PRG seed (the code currently relies on ChaCha20 but this +// choice could evolve). +func xof(source []byte, diversifier []byte, outLen int) ([]byte, error) { + // CShake is used in this case but any other primitive that acts as a xof + // and accepts a diversifier can be used. + shake := sha3.NewCShake128(nil, diversifier) + _, _ = shake.Write(source) // cshake Write doesn't error + out := make([]byte, outLen) + _, _ = shake.Read(out) // cshake Read doesn't error + return out, nil +} diff --git a/state/protocol/prg/prg_test.go b/state/protocol/prg/prg_test.go new file mode 100644 index 00000000000..bb47096eff0 --- /dev/null +++ b/state/protocol/prg/prg_test.go @@ -0,0 +1,79 @@ +package prg + +import ( + "bytes" + "crypto/rand" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func getRandomSource(t *testing.T) []byte { + seed := make([]byte, RandomSourceLength) + _, err := rand.Read(seed) // checking err is enough + require.NoError(t, err) + t.Logf("seed is %#x", seed) + return seed +} + +func getRandoms(t *testing.T, seed, customizer, diversifier []byte, N int) []byte { + prg, err := New(seed, customizer, diversifier) + require.NoError(t, err) + rand := make([]byte, N) + prg.Read(rand) + return rand +} + +// check PRGs created from the same source give the same outputs +func TestDeterministic(t *testing.T) { + seed := getRandomSource(t) + customizer := []byte("cust test") + diversifier := []byte("div test") + + rand1 := getRandoms(t, seed, customizer, diversifier, 100) + rand2 := getRandoms(t, seed, customizer, diversifier, 100) + assert.Equal(t, rand1, rand2) +} + +// check different customizers lead to different randoms +func TestDifferentInstances(t *testing.T) { + seed := getRandomSource(t) + customizer1 := []byte("cust test1") + customizer2 := []byte("cust test2") + diversifer1 := []byte("div test1") + diversifer2 := []byte("div test2") + // different customizers + rand1 := getRandoms(t, seed, customizer1, diversifer1, 2) + rand2 := getRandoms(t, seed, customizer2, diversifer1, 2) + assert.NotEqual(t, rand1, rand2) + // different customizers + rand1 = getRandoms(t, seed, customizer1, diversifer1, 2) + rand2 = getRandoms(t, seed, customizer1, diversifer2, 2) + assert.NotEqual(t, rand1, rand2) + // test no error is returned with empty customizer and diversifier + _ = getRandoms(t, seed, nil, nil, 2) // error is checked inside the call +} + +// Sanity check that all customizers used by the Flow protocol +// are different and are not prefixes of each other +func TestProtocolConstants(t *testing.T) { + // include all sub-protocol customizers + customizers := [][]byte{ + ConsensusLeaderSelection, + VerificationChunkAssignment, + ExecutionEnvironment, + ExecutionRandomSourceHistory, + customizerFromIndices(clusterLeaderSelectionPrefix...), + } + + // go through all couples + for i, c := range customizers { + for j, other := range customizers { + if i == j { + continue + } + assert.False(t, bytes.HasPrefix(c, other)) + } + } +} diff --git a/state/protocol/protocol_state.go b/state/protocol/protocol_state.go new file mode 100644 index 00000000000..a570ead3d6c --- /dev/null +++ b/state/protocol/protocol_state.go @@ -0,0 +1,167 @@ +package protocol + +import ( + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/storage/deferred" +) + +// EpochProtocolState represents the subset of the Protocol State KVStore related to epochs: +// the Identity Table, DKG, cluster assignment, etc. +// EpochProtocolState is fork-aware and can change on a block-by-block basis. +// Each EpochProtocolState instance refers to the state with respect to some reference block. +type EpochProtocolState interface { + // Epoch returns the current epoch counter. + Epoch() uint64 + + // Clustering returns initial clustering from epoch setup. + // CAUTION: This describes the initial epoch configuration from the view point of the Epoch + // Smart Contract. It does _not_ account for subsequent node ejections. For Byzantine Fault + // Tolerance, the calling code must account for ejections! + // No errors are expected during normal operations. + Clustering() (flow.ClusterList, error) + + // EpochSetup returns original epoch setup event that was used to initialize the protocol state. + // CAUTION: This describes the initial epoch configuration from the view point of the Epoch + // Smart Contract. It does _not_ account for subsequent node ejections. For Byzantine Fault + // Tolerance, the calling code must account for ejections! + EpochSetup() *flow.EpochSetup + + // EpochCommit returns original epoch commit event that was used to update the protocol state. + // CAUTION: This describes the initial epoch configuration from the view point of the Epoch + // Smart Contract. It does _not_ account for subsequent node ejections. For Byzantine Fault + // Tolerance, the calling code must account for ejections! + EpochCommit() *flow.EpochCommit + + // DKG returns information about DKG that was obtained from EpochCommit event. + // CAUTION: This describes the initial epoch configuration from the view point of the Epoch + // Smart Contract. It does _not_ account for subsequent node ejections. For Byzantine Fault + // Tolerance, the calling code must account for ejections! + // No errors are expected during normal operations. + DKG() (DKG, error) + + // EpochFallbackTriggered denotes whether an invalid epoch state transition was attempted + // on the fork ending in this block. Once the first block where this flag is true is finalized, epoch + // fallback mode is triggered. This flag is reset to false when finalizing a block that seals + // a valid EpochRecover service event. + EpochFallbackTriggered() bool + + // PreviousEpochExists returns true if a previous epoch exists. This is true for all epoch + // except those immediately following a spork. + PreviousEpochExists() bool + + // EpochPhase returns the epoch phase for the current epoch. + // See flow.EpochPhase for detailed documentation. + EpochPhase() flow.EpochPhase + + // EpochExtensions returns the epoch extensions associated with the current epoch, if any. + EpochExtensions() []flow.EpochExtension + + // Identities returns identities (in canonical ordering) that can participate in the current or + // previous or next epochs. Let P be the set of identities in the previous epoch, C be the set + // of identities in the current epoch, and S be the set of identities in the subsequent epoch. + // Let `\` denote the relative set complement (also called 'set difference'). + // The set of authorized identities this function returns is different depending on epoch state: + // EpochStaking phase: + // - nodes in C with status `flow.EpochParticipationStatusActive` + // - nodes in P\C with status `flow.EpochParticipationStatusLeaving` + // EpochSetup/EpochCommitted phase: + // - nodes in C with status `flow.EpochParticipationStatusActive` + // - nodes in S\C with status `flow.EpochParticipationStatusJoining` + Identities() flow.IdentityList + + // GlobalParams returns global, static network params that are same for all nodes in the network. + GlobalParams() GlobalParams + + // Entry returns low-level protocol state entry that was used to initialize this object. + // It shouldn't be used by high-level logic, it is useful for some cases such as bootstrapping. + // Prefer using other methods to access protocol state. + Entry() *flow.RichEpochStateEntry +} + +// ProtocolState is the read-only interface for protocol state. It allows querying the +// Protocol KVStore or Epoch sub-state by block, and retrieving global network params. +type ProtocolState interface { + // EpochStateAtBlockID returns epoch protocol state at block ID. + // The resulting epoch protocol state is returned AFTER applying updates that are contained in block. + // Can be queried for any block that has been added to the block tree. + // Returns: + // - (EpochProtocolState, nil) - if there is an epoch protocol state associated with given block ID. + // - (nil, storage.ErrNotFound) - if there is no epoch protocol state associated with given block ID. + // - (nil, exception) - any other error should be treated as exception. + EpochStateAtBlockID(blockID flow.Identifier) (EpochProtocolState, error) + + // KVStoreAtBlockID returns protocol state at block ID. + // The resulting protocol state is returned AFTER applying updates that are contained in block. + // Can be queried for any block that has been added to the block tree. + // Returns: + // - (KVStoreReader, nil) - if there is a protocol state associated with given block ID. + // - (nil, storage.ErrNotFound) - if there is no protocol state associated with given block ID. + // - (nil, exception) - any other error should be treated as exception. + KVStoreAtBlockID(blockID flow.Identifier) (KVStoreReader, error) + + // GlobalParams returns params that are the same for all nodes in the network. + GlobalParams() GlobalParams +} + +// MutableProtocolState is the read-write interface for protocol state. It allows evolving the protocol +// state by calling `EvolveState` for each block with arguments that might trigger state changes. +type MutableProtocolState interface { + ProtocolState + + // EvolveState updates the overall Protocol State based on information in the candidate block + // (potentially still under construction). Information that may change the state is: + // - the candidate block's view + // - Service Events from execution results sealed in the candidate block + // + // EvolveState is compatible with speculative processing: it evolves an *in-memory copy* of the parent state + // and collects *deferred database updates* for persisting the resulting Protocol State, including all of its + // dependencies and respective indices. Though, the resulting batch of deferred database updates still depends + // on the candidate block's ID, which is still unknown at the time of block construction. Executing the deferred + // database updates is the caller's responsibility. + // + // SAFETY REQUIREMENTS: + // 1. The seals must be a protocol-compliant extension of the parent block. Intuitively, we require that the + // seals follow the ancestry of this fork without gaps. The Consensus Participant's Compliance Layer enforces + // the necessary constrains. Analogously, the block building logic should always produce protocol-compliant + // seals. + // The seals guarantee correctness of the sealed execution result, including the contained service events. + // This is actively checked by the verification node, whose aggregated approvals in the form of a seal attest + // to the correctness of the sealed execution result (specifically the Service Events contained in the result + // and their order). + // 2. For Consensus Participants that are replicas, the calling code must check that the returned `stateID` matches + // the commitment in the block proposal! If they don't match, the proposer is byzantine and should be slashed. + // + // Consensus nodes actively verify protocol compliance for any block proposal they receive, including integrity of + // each seal individually as well as the seals continuously following the fork. Light clients only process certified + // blocks, which guarantees that consensus nodes already ran those checks and found the proposal to be valid. + // + // SERVICE EVENTS form an order-preserving, asynchronous, one-way message bus from the System Smart Contracts + // (verified honest execution) to the Protocol State. For example, consider a fork where a service event is + // emitted during execution of block A. Block B contains an execution receipt `RA` for A. Block C holds a + // seal `SA` for A's execution result. + // + // A ← … ← B(RA) ← … ← C(SA) + // + // Service Events are included within execution results, which are stored opaquely as part of the block payload + // (block B in our example). Though, to ensure correctness of the service events, we only process them upon sealing. + // There is some non-deterministic delay when the Protocol State observes the Service Events from block A. + // In our example, any change to the protocol state requested by the system smart contracts in block A, would only + // become visible in block C's Protocol State (and descendants). + // + // Error returns: + // [TLDR] All error returns indicate potential state corruption and should therefore be treated as fatal. + // - Per convention, the input seals from the block payload have already been confirmed to be protocol compliant. + // Hence, the service events in the sealed execution results represent the honest execution path. + // Therefore, the sealed service events should encode a valid evolution of the protocol state -- provided + // the system smart contracts are correct. + // - As we can rule out byzantine attacks as the source of failures, the only remaining sources of problems + // can be (a) bugs in the system smart contracts or (b) bugs in the node implementation. A service event + // not representing a valid state transition despite all consistency checks passing is interpreted as + // case (a) and _should be_ handled internally by the respective state machine. Otherwise, any bug or + // unforeseen edge cases in the system smart contracts would result in consensus halt, due to errors while + // evolving the protocol state. + // - A consistency or sanity check failing within the StateMutator is likely the symptom of an internal bug + // in the node software or state corruption, i.e. case (b). This is the only scenario where the error return + // of this function is not nil. If such an exception is returned, continuing is not an option. + EvolveState(deferredDBOps *deferred.DeferredBlockPersist, parentBlockID flow.Identifier, candidateView uint64, candidateSeals []*flow.Seal) (stateID flow.Identifier, err error) +} diff --git a/state/protocol/protocol_state/Readme.md b/state/protocol/protocol_state/Readme.md new file mode 100644 index 00000000000..ce4cd4b8e6f --- /dev/null +++ b/state/protocol/protocol_state/Readme.md @@ -0,0 +1,80 @@ +# Dynamic Protocol State in a nutshell + +- The Dynamic Protocol State is a framework for storing a snapshot of protocol-defined parameters + and supplemental protocol-data into each block. Think about it as a key value store in each block. +- The Flow network uses its Dynamic Protocol State to orchestrate Epoch switchovers and more generally control participation privileges + for the network (including ejection of misbehaving or compromised nodes). +- Furthermore, the Dynamic Protocol State makes it easily possible to update operational protocol parameters on the live network via a governance transaction. + For example, we could update consensus timing parameters such as `hotstuff-min-timeout`. + +These examples from above all use the same primitives provided by the Dynamic Protocol State: + - (i) a Key-value store, whose hash-commitment is included in the payload of every block, + - (ii) a set of rules (represented as a state machine) that updates the key-value-store from block to block, and + - (iii) dedicated `Service Events` originating from the System Smart Contracts (via verification and sealing) are the inputs to the state machines (ii). + +This provides us with a very powerful set of primitives to orchestrate the low-level protocol on the fly with inputs from the System Smart Contracts. +Engineers extending the protocol can add new entries to the Key-Value Store and provide custom state machines for updating their values. +Correct application of this state machine (i.e. correct evolution of data in the store) is guaranteed by the Dynamic Protocol State framework through BFT consensus. + + +# Core Concepts + +## Orthogonal State Machines + +Orthogonality means that state machines can operate completely independently and work on disjoint +sub-states. By convention, they all consume the same inputs (incl. the ordered sequence of +Service Events sealed in one block). In other words, each state machine $S_0, S_1,\ldots$ has full visibility +into the inputs, but each draws their own independent conclusions (maintaining their own exclusive state). +There is no information exchange between the state machines; one state machine cannot read the current state +of another. + +We emphasize that this architecture choice does not prevent us from implementing sequential state +machines for certain use-cases. For example: state machine $A$ provides its output as input to another +state machine $B$. Here, the order of running the state machines matters. This order-dependency is not +supported by the Protocol State, which executes the state machines in an arbitrary order. Therefore, +if we need state machines to be executed in some specific order, we have to bundle them into one composite state +machine (conceptually a processing pipeline) by hand. The composite state machine's execution as a +whole can then be managed by the Protocol State, because the composite state machine is orthogonal +to all other remaining state machines. +Requiring all State Machines to be orthogonal is a deliberate design choice. Thereby the default is +favouring modularity and strong logical independence. This is very beneficial for managing complexity +in the long term. + +### Key-Value-Store +The Flow protocol defines the Key-Value-Store's state $\mathcal{P}$ as the composition of disjoint sub-states +$P_0, P_1, \ldots, P_j$. Formally, we write $\mathcal{P} = P0 \otimes P1 \otimes \ldots \otimes Pj$, where $'\otimes'$ denotes the product state. We +loosely associate each $P_0, P_1,\ldots$ with one specific key-value entry in the store. Correspondingly, +we have conceptually independent state machines $S_0, S_1,\ldots$ operating each on their own respective +sub-state $P_0, P_1, \ldots$ A one-to-one correspondence between key-value-pair and state machine should be the +default, but is not strictly required. However, **the strong requirement is that no key-value-pair is operated +on my more than one state machine**. + +Formally we write: +- The overall protocol state $\mathcal{P}$ is composed of disjoint substates $\mathcal{P} = P_0 \otimes P_1 \otimes\ldots\otimes P_j$ +- For each state $P_i$, we have a dedicated state machine $S_i$ that exclusively operates on $P_i$ +- The state machines can be formalized as orthogonal regions of the composite state machine + $\mathcal{S} = S_0 \otimes S_1 \otimes \ldots \otimes S_j$. (Technically, we represent the state machine by its state-transition + function. All other details of the state machine are implicit.) +- The state machine $\mathcal{S}$ being in state $\mathcal{P}$ and observing the input $\xi = x_0\cdot x_1 \cdot x_2 \cdot\ldots\cdot x_z$ will output + state $\mathcal{P}'$. To emphasize that a certain state machine 𝒮 exclusively operates on state $\mathcal{P}$, we write + $\mathcal{S}[\mathcal{P}] = S_0[P_0] \otimes S_1[P_1] \otimes\ldots\otimes S_j[P_j]$. + Observing the events $\xi$, the output state is + $\mathcal{P}' = \mathcal{S}[\mathcal{P}] (\xi) = S_0 [P_0] (\xi) \otimes S_1 [P_1] (\xi) \otimes\ldots\otimes S_j [P_j] (\xi) = P'_0 \otimes P'_1 \otimes\ldots\otimes P'_j$, + where each state machine $S_i$ individually generated the output state $S_i [P_i] (\xi) = P'_i$. + +### Input ξ +Conceptually, the consensus leader first executes these state machines during their block building +process. At this point, the `ID` of the final block is unknown. Nevertheless, some part of the payload +construction already happened, because the sealed execution results are used as an input below. +There is a large degree of freedom in which data fields of the partially-constructed block we permit as possible inputs to the state +machines. At the moment, the primary purpose is for the execution environment (with results undergone +verification and sealing) to send Service Events to the protocol layer. Therefore, the current +convention is: +1. At time of state machine construction (for each block), the Protocol State framework provides: + - `candidateView`: view of the block currently under construction (or being currently validated) + - `parentID`: parent block's ID (generally used by state machines to read their respective sub-state) +2. The Service Events sealed in the candidate block (under construction) + are given to each state machine via the `EvolveState(..)` call. + +New key-value pairs and corresponding state machines can easily be added +by implementing the `OrthogonalStoreStateMachine` interface and adding a new entry to the Key-Value-Store's data model (file `./kvstore/models.go`). diff --git a/state/protocol/protocol_state/common/base_statemachine.go b/state/protocol/protocol_state/common/base_statemachine.go new file mode 100644 index 00000000000..c85c2e4fdff --- /dev/null +++ b/state/protocol/protocol_state/common/base_statemachine.go @@ -0,0 +1,45 @@ +package common + +import ( + "github.com/onflow/flow-go/state/protocol" + "github.com/onflow/flow-go/state/protocol/protocol_state" + "github.com/onflow/flow-go/storage/deferred" +) + +// BaseKeyValueStoreStateMachine implements a subset of the KeyValueStoreStateMachine interface which is usually common +// to all state machines that operate on the KV store. +// All implementors can override the methods as needed. +type BaseKeyValueStoreStateMachine struct { + candidateView uint64 + parentState protocol.KVStoreReader + EvolvingState protocol_state.KVStoreMutator +} + +// NewBaseKeyValueStoreStateMachine creates a new instance of BaseKeyValueStoreStateMachine. +func NewBaseKeyValueStoreStateMachine( + candidateView uint64, + parentState protocol.KVStoreReader, + evolvingState protocol_state.KVStoreMutator, +) BaseKeyValueStoreStateMachine { + return BaseKeyValueStoreStateMachine{ + candidateView: candidateView, + parentState: parentState, + EvolvingState: evolvingState, + } +} + +// Build is a no-op by default. If a state machine needs to persist data, it should override this method. +func (m *BaseKeyValueStoreStateMachine) Build() (*deferred.DeferredBlockPersist, error) { + return deferred.NewDeferredBlockPersist(), nil +} + +// View returns the view associated with this state machine. +// The view of the state machine equals the view of the block carrying the respective updates. +func (m *BaseKeyValueStoreStateMachine) View() uint64 { + return m.candidateView +} + +// ParentState returns parent state associated with this state machine. +func (m *BaseKeyValueStoreStateMachine) ParentState() protocol.KVStoreReader { + return m.parentState +} diff --git a/state/protocol/protocol_state/consumer.go b/state/protocol/protocol_state/consumer.go new file mode 100644 index 00000000000..35962fa5a9c --- /dev/null +++ b/state/protocol/protocol_state/consumer.go @@ -0,0 +1,27 @@ +package protocol_state + +import ( + "github.com/onflow/flow-go/model/flow" +) + +// StateMachineTelemetryConsumer consumes notifications produced by OrthogonalStoreStateMachine instances. Any state machine +// that performs processing of service events should notify the consumer about the events it received, successfully processed or +// detected as invalid. +// Implementations must: +// - be concurrency safe +// - be non-blocking +// - handle repetition of the same events (with some processing overhead). +type StateMachineTelemetryConsumer interface { + // OnInvalidServiceEvent notifications are produced when a service event is detected as invalid by the state machine. + OnInvalidServiceEvent(event flow.ServiceEvent, err error) + // OnServiceEventReceived notifications are produced when a service event is received by the state machine. + OnServiceEventReceived(event flow.ServiceEvent) + // OnServiceEventProcessed notifications are produced when a service event is successfully processed by the state machine. + OnServiceEventProcessed(event flow.ServiceEvent) +} + +// StateMachineEventsTelemetryFactory is a factory method for creating StateMachineTelemetryConsumer instances. +// It is useful for creating consumers that provide extra information about the context in which they are operating. +// State machines evolve state based on inputs in the form of service events that are incorporated in blocks. Thus, the consumer +// can be created based on the block carrying the service events. +type StateMachineEventsTelemetryFactory func(candidateView uint64) StateMachineTelemetryConsumer diff --git a/state/protocol/protocol_state/epochs/base_statemachine.go b/state/protocol/protocol_state/epochs/base_statemachine.go new file mode 100644 index 00000000000..f9205c0f680 --- /dev/null +++ b/state/protocol/protocol_state/epochs/base_statemachine.go @@ -0,0 +1,137 @@ +package epochs + +import ( + "fmt" + + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/state/protocol" + "github.com/onflow/flow-go/state/protocol/protocol_state" +) + +// baseStateMachine implements common logic for evolving protocol state both in happy path and epoch fallback +// operation modes. It partially implements `StateMachine` and is used as building block for more complex implementations. +type baseStateMachine struct { + telemetry protocol_state.StateMachineTelemetryConsumer + parentEpochState *flow.RichEpochStateEntry + state *flow.EpochStateEntry + ejector ejector + view uint64 +} + +// newBaseStateMachine creates a new instance of baseStateMachine and performs initialization of the internal ejector +// which keeps track of ejected identities. +// A protocol.InvalidServiceEventError is returned if the ejector fails to track the identities. +func newBaseStateMachine(telemetry protocol_state.StateMachineTelemetryConsumer, view uint64, parentState *flow.RichEpochStateEntry, state *flow.EpochStateEntry) (*baseStateMachine, error) { + ej := newEjector() + if state.PreviousEpoch != nil { + err := ej.TrackDynamicIdentityList(state.PreviousEpoch.ActiveIdentities) + if err != nil { + return nil, fmt.Errorf("could not track identities for previous epoch: %w", err) + } + } + err := ej.TrackDynamicIdentityList(state.CurrentEpoch.ActiveIdentities) + if err != nil { + return nil, fmt.Errorf("could not track identities for current epoch: %w", err) + } + if state.NextEpoch != nil { + err := ej.TrackDynamicIdentityList(state.NextEpoch.ActiveIdentities) + if err != nil { + return nil, fmt.Errorf("could not track identities for next epoch: %w", err) + } + } + return &baseStateMachine{ + telemetry: telemetry, + view: view, + parentEpochState: parentState, + state: state, + ejector: ej, + }, nil +} + +// Build returns updated protocol state entry, state ID and a flag indicating if there were any changes. +// CAUTION: +// Do NOT call Build, if the baseStateMachine instance has returned a `protocol.InvalidServiceEventError` +// at any time during its lifetime. After this error, the baseStateMachine is left with a potentially +// dysfunctional state and should be discarded. +func (u *baseStateMachine) Build() (updatedState *flow.EpochStateEntry, stateID flow.Identifier, hasChanges bool) { + updatedState = u.state.Copy() + stateID = updatedState.ID() + hasChanges = stateID != u.parentEpochState.ID() + return +} + +// View returns the view associated with this state machine. +// The view of the state machine equals the view of the block carrying the respective updates. +func (u *baseStateMachine) View() uint64 { + return u.view +} + +// ParentState returns parent protocol state associated with this state machine. +func (u *baseStateMachine) ParentState() *flow.RichEpochStateEntry { + return u.parentEpochState +} + +// EjectIdentity updates the identity table by changing the node's participation status to 'ejected' +// If and only if the node is active in the previous or current or next epoch, the node's ejection status +// is set to true for all occurrences, and we return true. If `nodeID` is not found, we return false. This +// method is idempotent and behaves identically for repeated calls with the same `nodeID` (repeated calls +// with the same input create minor performance overhead though). +func (u *baseStateMachine) EjectIdentity(ejectionEvent *flow.EjectNode) bool { + u.telemetry.OnServiceEventReceived(ejectionEvent.ServiceEvent()) + wasEjected := u.ejector.Eject(ejectionEvent.NodeID) + if wasEjected { + u.telemetry.OnServiceEventProcessed(ejectionEvent.ServiceEvent()) + } else { + u.telemetry.OnInvalidServiceEvent(ejectionEvent.ServiceEvent(), + protocol.NewInvalidServiceEventErrorf("could not eject node with unknown NodeID %v", ejectionEvent.NodeID)) + } + return wasEjected +} + +// TransitionToNextEpoch updates the notion of 'current epoch', 'previous' and 'next epoch' in the protocol +// state. An epoch transition is only allowed when _all_ of the following conditions are satisfied: +// - next epoch has been set up, +// - next epoch has been committed, +// - candidate block is in the next epoch. +// No errors are expected during normal operations. +func (u *baseStateMachine) TransitionToNextEpoch() error { + nextEpoch := u.state.NextEpoch + if nextEpoch == nil { // nextEpoch ≠ nil if and only if next epoch was already set up + return fmt.Errorf("protocol state for next epoch has not yet been setup") + } + if nextEpoch.CommitID == flow.ZeroID { // nextEpoch.CommitID ≠ flow.ZeroID if and only if next epoch was already committed + return fmt.Errorf("protocol state for next epoch has not yet been committed") + } + // Check if we are at the next epoch, only then a transition is allowed + if u.view < u.state.NextEpochSetup.FirstView { + return fmt.Errorf("epoch transition is only allowed when entering next epoch") + } + minEpochStateEntry, err := flow.NewMinEpochStateEntry( + flow.UntrustedMinEpochStateEntry{ + PreviousEpoch: &u.state.CurrentEpoch, + CurrentEpoch: *u.state.NextEpoch, + NextEpoch: nil, + EpochFallbackTriggered: u.state.EpochFallbackTriggered, + }, + ) + if err != nil { + return fmt.Errorf("could not create min epoch state: %w", err) + } + + u.state, err = flow.NewEpochStateEntry( + flow.UntrustedEpochStateEntry{ + MinEpochStateEntry: minEpochStateEntry, + PreviousEpochSetup: u.state.CurrentEpochSetup, + PreviousEpochCommit: u.state.CurrentEpochCommit, + CurrentEpochSetup: u.state.NextEpochSetup, + CurrentEpochCommit: u.state.NextEpochCommit, + NextEpochSetup: nil, + NextEpochCommit: nil, + }, + ) + if err != nil { + return fmt.Errorf("could not construct epoch state entry: %w", err) + } + + return nil +} diff --git a/state/protocol/protocol_state/epochs/factory.go b/state/protocol/protocol_state/epochs/factory.go new file mode 100644 index 00000000000..97987fdfa9e --- /dev/null +++ b/state/protocol/protocol_state/epochs/factory.go @@ -0,0 +1,55 @@ +package epochs + +import ( + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/state/protocol" + "github.com/onflow/flow-go/state/protocol/protocol_state" + "github.com/onflow/flow-go/storage" +) + +// EpochStateMachineFactory is a factory for creating EpochStateMachine instances. +// It holds all the necessary data to create a new instance of EpochStateMachine. +type EpochStateMachineFactory struct { + setups storage.EpochSetups + commits storage.EpochCommits + epochProtocolStateDB storage.EpochProtocolStateEntries + happyPathTelemetryFactory protocol_state.StateMachineEventsTelemetryFactory + fallbackTelemetryFactory protocol_state.StateMachineEventsTelemetryFactory +} + +var _ protocol_state.KeyValueStoreStateMachineFactory = (*EpochStateMachineFactory)(nil) + +func NewEpochStateMachineFactory( + setups storage.EpochSetups, + commits storage.EpochCommits, + epochProtocolStateDB storage.EpochProtocolStateEntries, + happyPathTelemetryFactory, fallbackTelemetryFactory protocol_state.StateMachineEventsTelemetryFactory, +) *EpochStateMachineFactory { + return &EpochStateMachineFactory{ + setups: setups, + commits: commits, + epochProtocolStateDB: epochProtocolStateDB, + happyPathTelemetryFactory: happyPathTelemetryFactory, + fallbackTelemetryFactory: fallbackTelemetryFactory, + } +} + +// Create creates a new instance of an underlying type that operates on KV Store and is created for a specific candidate block. +// No errors are expected during normal operations. +func (f *EpochStateMachineFactory) Create(candidateView uint64, parentBlockID flow.Identifier, parentState protocol.KVStoreReader, mutator protocol_state.KVStoreMutator) (protocol_state.KeyValueStoreStateMachine, error) { + return NewEpochStateMachine( + candidateView, + parentBlockID, + f.setups, + f.commits, + f.epochProtocolStateDB, + parentState, + mutator, + func(candidateView uint64, parentState *flow.RichEpochStateEntry) (StateMachine, error) { + return NewHappyPathStateMachine(f.happyPathTelemetryFactory(candidateView), candidateView, parentState) + }, + func(candidateView uint64, parentEpochState *flow.RichEpochStateEntry) (StateMachine, error) { + return NewFallbackStateMachine(parentState, f.fallbackTelemetryFactory(candidateView), candidateView, parentEpochState) + }, + ) +} diff --git a/state/protocol/protocol_state/epochs/fallback_statemachine.go b/state/protocol/protocol_state/epochs/fallback_statemachine.go new file mode 100644 index 00000000000..d271e49df86 --- /dev/null +++ b/state/protocol/protocol_state/epochs/fallback_statemachine.go @@ -0,0 +1,334 @@ +package epochs + +import ( + "fmt" + + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/state/protocol" + "github.com/onflow/flow-go/state/protocol/protocol_state" +) + +// FallbackStateMachine is a special structure that encapsulates logic for processing service events +// when protocol is in epoch fallback mode. The FallbackStateMachine ignores EpochSetup and EpochCommit +// events but still processes ejection events. +// +// Whenever invalid epoch state transition has been observed only epochFallbackStateMachines must be created for subsequent views. +type FallbackStateMachine struct { + baseStateMachine + parentState protocol.KVStoreReader +} + +var _ StateMachine = (*FallbackStateMachine)(nil) + +// NewFallbackStateMachine constructs a state machine for epoch fallback. It automatically sets +// EpochFallbackTriggered to true, thereby recording that we have entered epoch fallback mode. +// See flow.EpochPhase for detailed documentation about EFM and epoch phase transitions. +// No errors are expected during normal operations. +func NewFallbackStateMachine( + parentState protocol.KVStoreReader, + telemetry protocol_state.StateMachineTelemetryConsumer, + view uint64, + parentEpochState *flow.RichEpochStateEntry, +) (*FallbackStateMachine, error) { + state := parentEpochState.EpochStateEntry.Copy() + nextEpochCommitted := state.EpochPhase() == flow.EpochPhaseCommitted + + nextEpoch := state.NextEpoch + nextEpochSetup := state.NextEpochSetup + nextEpochCommit := state.NextEpochCommit + + // we are entering fallback mode, this logic needs to be executed only once + if !state.EpochFallbackTriggered { + // The next epoch has not been committed. Though setup event may be in the state, make sure it is cleared. + // CAUTION: this logic must be consistent with the `MinEpochStateEntry.EpochPhase()`, which + // determines the epoch phase based on the configuration of the fields we set here! + // Specifically, if and only if the next epoch is already committed as of the parent state, + // we go through with that committed epoch. Otherwise, we have tentative values of an epoch + // not yet properly specified, which we have to clear out. + if !nextEpochCommitted { + nextEpoch = nil + // update corresponding service events + nextEpochSetup = nil + nextEpochCommit = nil + } + + minEpochStateEntry, err := flow.NewMinEpochStateEntry( + flow.UntrustedMinEpochStateEntry{ + PreviousEpoch: state.PreviousEpoch, + CurrentEpoch: state.CurrentEpoch, + NextEpoch: nextEpoch, + EpochFallbackTriggered: true, + }, + ) + if err != nil { + return nil, fmt.Errorf("could not create min epoch state: %w", err) + } + + state, err = flow.NewEpochStateEntry( + flow.UntrustedEpochStateEntry{ + MinEpochStateEntry: minEpochStateEntry, + PreviousEpochSetup: state.PreviousEpochSetup, + PreviousEpochCommit: state.PreviousEpochCommit, + CurrentEpochSetup: state.CurrentEpochSetup, + CurrentEpochCommit: state.CurrentEpochCommit, + NextEpochSetup: nextEpochSetup, + NextEpochCommit: nextEpochCommit, + }, + ) + if err != nil { + return nil, fmt.Errorf("could not create epoch state entry: %w", err) + } + } + + base, err := newBaseStateMachine(telemetry, view, parentEpochState, state) + if err != nil { + return nil, fmt.Errorf("could not create base state machine: %w", err) + } + sm := &FallbackStateMachine{ + baseStateMachine: *base, + parentState: parentState, + } + + if !nextEpochCommitted && view+parentState.GetFinalizationSafetyThreshold() >= state.CurrentEpochFinalView() { + // we have reached safety threshold and we are still in the fallback mode + // prepare a new extension for the current epoch. + err := sm.extendCurrentEpoch(flow.EpochExtension{ + FirstView: state.CurrentEpochFinalView() + 1, + FinalView: state.CurrentEpochFinalView() + parentState.GetEpochExtensionViewCount(), + }) + if err != nil { + return nil, err + } + } + + return sm, nil +} + +// extendCurrentEpoch appends an epoch extension to the current epoch from underlying state. +// Internally, it performs sanity checks to ensure that the epoch extension is contiguous with the current epoch. +// It also ensures that the next epoch is not present, as epoch extensions are only allowed for the current epoch. +// No errors are expected during normal operation. +func (m *FallbackStateMachine) extendCurrentEpoch(epochExtension flow.EpochExtension) error { + state := m.state + if len(state.CurrentEpoch.EpochExtensions) > 0 { + lastExtension := state.CurrentEpoch.EpochExtensions[len(state.CurrentEpoch.EpochExtensions)-1] + if lastExtension.FinalView+1 != epochExtension.FirstView { + return fmt.Errorf("epoch extension is not contiguous with the last extension") + } + } else { + if epochExtension.FirstView != m.state.CurrentEpochSetup.FinalView+1 { + return fmt.Errorf("first epoch extension is not contiguous with current epoch") + } + } + + if state.NextEpoch != nil { + return fmt.Errorf("cannot extend current epoch when next epoch is present") + } + + epochExtensions := append(state.CurrentEpoch.EpochExtensions, epochExtension) + currentEpoch, err := flow.NewEpochStateContainer( + flow.UntrustedEpochStateContainer{ + SetupID: state.CurrentEpoch.SetupID, + CommitID: state.CurrentEpoch.CommitID, + ActiveIdentities: state.CurrentEpoch.ActiveIdentities, + EpochExtensions: epochExtensions, + }, + ) + if err != nil { + return fmt.Errorf("could not construct current epoch state: %w", err) + } + + newMinEpochStateEntry, err := flow.NewMinEpochStateEntry( + flow.UntrustedMinEpochStateEntry{ + PreviousEpoch: state.PreviousEpoch, + CurrentEpoch: *currentEpoch, + NextEpoch: state.NextEpoch, + EpochFallbackTriggered: state.EpochFallbackTriggered, + }, + ) + if err != nil { + return fmt.Errorf("could not create min epoch state: %w", err) + } + + m.state, err = flow.NewEpochStateEntry( + flow.UntrustedEpochStateEntry{ + MinEpochStateEntry: newMinEpochStateEntry, + PreviousEpochSetup: m.state.PreviousEpochSetup, + PreviousEpochCommit: m.state.PreviousEpochCommit, + CurrentEpochSetup: m.state.CurrentEpochSetup, + CurrentEpochCommit: m.state.CurrentEpochCommit, + NextEpochSetup: m.state.NextEpochSetup, + NextEpochCommit: m.state.NextEpochCommit, + }, + ) + if err != nil { + return fmt.Errorf("could not construct epoch state entry: %w", err) + } + + return nil +} + +// ProcessEpochSetup processes epoch setup service events, for epoch fallback we are ignoring this event. +func (m *FallbackStateMachine) ProcessEpochSetup(setup *flow.EpochSetup) (bool, error) { + m.telemetry.OnServiceEventReceived(setup.ServiceEvent()) + m.telemetry.OnInvalidServiceEvent(setup.ServiceEvent(), protocol.NewInvalidServiceEventErrorf("received EpochSetup in Epoch Fallback Mode")) + // Note that we are dropping _all_ EpochSetup events sealed by this block. As long as we are in EFM, this is + // the natural behaviour, as we have given up on following the instructions from the Epoch Smart Contracts. + // + // CAUTION: This leaves an edge case where, a valid `EpochRecover` event followed by an `EpochSetup` is sealed in the + // same block. Conceptually, this is a clear indication that the Epoch Smart contract is doing something unexpect. The + // reason is that the block with the `EpochRecover` event is at least `FinalizationSafetyThreshold` views before the + // switchover to the recovery epoch. Otherwise, the FallbackStateMachine constructor would have added an extension to + // the current epoch. Axiomatically, the `FinalizationSafetyThreshold` is large enough that we guarantee finalization of + // the epoch configuration (in this case the configuration of the recovery epoch provided by the `EpochRecover` event) + // _before_ the recovery epoch starts. For finalization, the block sealing the `EpochRecover` event must have descendants + // in the same epoch, i.e. an EpochSetup cannot occur in the same block as the `EpochRecover` event. + // + // Nevertheless, we ignore such an EpochSetup event here, despite knowing that it is an invalid input from the smart contract. + // If the epoch smart contract continues to behave unexpectedly, we will just re-enter EFM in a subsequent block. Though, + // if the smart contract happens to behave as expected for all subsequent blocks and manages to coordinate epoch transitions + // from here on, that is also acceptable. + // Essentially, the block sealing a valid EpochRecover event is a grace period, where we still tolerate unexpected events from + // the Epoch Smart Contract. This significantly simplifies the implementation of the FallbackStateMachine without impacting the + // robustness of the overall EFM mechanics. + return false, nil +} + +// ProcessEpochCommit processes epoch commit service events, for epoch fallback we are ignoring this event. +func (m *FallbackStateMachine) ProcessEpochCommit(setup *flow.EpochCommit) (bool, error) { + m.telemetry.OnServiceEventReceived(setup.ServiceEvent()) + m.telemetry.OnInvalidServiceEvent(setup.ServiceEvent(), protocol.NewInvalidServiceEventErrorf("received EpochCommit in Epoch Fallback Mode")) + // We ignore _all_ EpochCommit events here. This includes scenarios where a valid `EpochRecover` event is sealed in + // a block followed by `EpochSetup` and/or `EpochCommit` events -- technically, a clear indication that the Epoch Smart + // contract is doing something unexpected. For a detailed explanation why this is safe, see `ProcessEpochSetup` above. + return false, nil +} + +// ProcessEpochRecover updates the internally-maintained interim Epoch state with data from epoch recover +// event in an attempt to recover from Epoch Fallback Mode [EFM] and get back on happy path. +// Specifically, after successfully processing this event, we will have a next epoch (as specified by the +// EpochRecover event) in the protocol state, which is in the committed phase. Subsequently, the epoch +// protocol can proceed following the happy path. Therefore, we set `EpochFallbackTriggered` back to false. +// +// The boolean return indicates if the input event triggered a transition in the state machine or not. +// For the EpochRecover event, we never return an error to ensure that FallbackStateMachine is robust against any input and doesn't +// halt the chain even if the Epoch Smart Contract misbehaves. This is a safe choice since the error can only originate from +// an invalid EpochRecover event, in this case we just ignore the event and continue with the fallback mode. +// +// EDGE CASES: due to manual interventions for Epoch Recovery, there is a notable risk of unintended side-effects +// in terms of emitted events. Therefore, we aim to be resilient against invalid and/or inconsistent events: +// 1. Any amount of setup and commit events being sealed in the same block as an epoch recover event: +// EpochSetup and EpochCommit are consistently ignored by the FallbackStateMachine, also after a successful recovery. +// For a detailed explanation why this is safe, see `ProcessEpochSetup` above. +// 2. Multiple EpochRecover events sealed in the same block: +// - Invalid `EpochRecover` events are reported to telemetry and dropped. +// - The first valid `EpochRecover` event is accepted (if any is sealed in block) +// - Subsequent valid events are no-ops iff they are identical to the first valid EpochRecover event. +// Otherwise, they are reported to telemetry and dropped. +// An `EpochRecover` event is considered valid in this context if it specifies a valid successor of the +// current epoch (irrespective whether a `NextEpoch` in the `ProtocolStateEntry`) +// +// Error returns: +// - During normal operations, this method internally handle erroneous inputs. Error returns are +// symptoms of internal state corruption or critical bugs, making continuation impossible. +func (m *FallbackStateMachine) ProcessEpochRecover(epochRecover *flow.EpochRecover) (bool, error) { + m.telemetry.OnServiceEventReceived(epochRecover.ServiceEvent()) + err := m.ensureValidEpochRecover(epochRecover) + if err != nil { + m.telemetry.OnInvalidServiceEvent(epochRecover.ServiceEvent(), err) + return false, nil + } + nextEpoch := m.state.NextEpoch + if nextEpoch != nil { + // accept iff the EpochRecover is the same as the one we have already recovered. + if nextEpoch.SetupID != epochRecover.EpochSetup.ID() || + nextEpoch.CommitID != epochRecover.EpochCommit.ID() { + m.telemetry.OnInvalidServiceEvent(epochRecover.ServiceEvent(), + protocol.NewInvalidServiceEventErrorf("multiple inconsistent EpochRecover events sealed in the same block")) + return false, nil + } + } + // m.state.NextEpoch is either nil, or its EpochSetup and EpochCommit are identical to the given `epochRecover` + + // assemble EpochStateContainer for next epoch: + nextEpochParticipants, err := buildNextEpochActiveParticipants( + m.state.CurrentEpoch.ActiveIdentities.Lookup(), + m.state.CurrentEpochSetup, + &epochRecover.EpochSetup) + if err != nil { + m.telemetry.OnInvalidServiceEvent(epochRecover.ServiceEvent(), fmt.Errorf("rejecting EpochRecover event: %w", err)) + return false, nil + } + nextEpochState, err := flow.NewEpochStateContainer( + flow.UntrustedEpochStateContainer{ + SetupID: epochRecover.EpochSetup.ID(), + CommitID: epochRecover.EpochCommit.ID(), + ActiveIdentities: nextEpochParticipants, + EpochExtensions: nil, + }, + ) + if err != nil { + return false, fmt.Errorf("could not construct next epoch state: %w", err) + } + + // update corresponding service events + nextEpochSetup := epochRecover.EpochSetup + nextEpochCommit := epochRecover.EpochCommit + + err = m.ejector.TrackDynamicIdentityList(nextEpochState.ActiveIdentities) + if err != nil { + if protocol.IsInvalidServiceEventError(err) { + m.telemetry.OnInvalidServiceEvent(epochRecover.ServiceEvent(), fmt.Errorf("rejecting EpochRecover event: %w", err)) + return false, nil + } + return false, fmt.Errorf("unexpected errors tracking identity list: %w", err) + } + // if we have processed a valid EpochRecover event, we should exit EFM. + newMinEpochStateEntry, err := flow.NewMinEpochStateEntry( + flow.UntrustedMinEpochStateEntry{ + PreviousEpoch: m.state.PreviousEpoch, + CurrentEpoch: m.state.CurrentEpoch, + NextEpoch: nextEpochState, + EpochFallbackTriggered: false, + }, + ) + if err != nil { + return false, fmt.Errorf("could not create min epoch state: %w", err) + } + + m.state, err = flow.NewEpochStateEntry( + flow.UntrustedEpochStateEntry{ + MinEpochStateEntry: newMinEpochStateEntry, + PreviousEpochSetup: m.state.PreviousEpochSetup, + PreviousEpochCommit: m.state.PreviousEpochCommit, + CurrentEpochSetup: m.state.CurrentEpochSetup, + CurrentEpochCommit: m.state.CurrentEpochCommit, + NextEpochSetup: &nextEpochSetup, + NextEpochCommit: &nextEpochCommit, + }, + ) + if err != nil { + return false, fmt.Errorf("could not construct epoch state entry: %w", err) + } + m.telemetry.OnServiceEventProcessed(epochRecover.ServiceEvent()) + return true, nil +} + +// ensureValidEpochRecover performs validity checks on the epoch recover event. +// Expected errors during normal operations: +// * `protocol.InvalidServiceEventError` - if the service event is invalid or is not a valid state transition for the current protocol state. +// This is a side-effect-free function. This function only returns protocol.InvalidServiceEventError as errors. +func (m *FallbackStateMachine) ensureValidEpochRecover(epochRecover *flow.EpochRecover) error { + if m.view+m.parentState.GetFinalizationSafetyThreshold() >= m.state.CurrentEpochFinalView() { + return protocol.NewInvalidServiceEventErrorf("could not process epoch recover, safety threshold reached") + } + err := protocol.IsValidExtendingEpochSetup(&epochRecover.EpochSetup, m.state) + if err != nil { + return fmt.Errorf("invalid setup portion in EpochRecover event: %w", err) + } + err = protocol.IsValidEpochCommit(&epochRecover.EpochCommit, &epochRecover.EpochSetup) + if err != nil { + return fmt.Errorf("invalid commit portion in EpochRecover event: %w", err) + } + return nil +} diff --git a/state/protocol/protocol_state/epochs/fallback_statemachine_test.go b/state/protocol/protocol_state/epochs/fallback_statemachine_test.go new file mode 100644 index 00000000000..996c71a71ae --- /dev/null +++ b/state/protocol/protocol_state/epochs/fallback_statemachine_test.go @@ -0,0 +1,859 @@ +package epochs + +import ( + "testing" + + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" + "pgregory.net/rapid" + + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/model/flow/filter" + mockstate "github.com/onflow/flow-go/state/protocol/mock" + protocol_statemock "github.com/onflow/flow-go/state/protocol/protocol_state/mock" + "github.com/onflow/flow-go/utils/unittest" +) + +// extensionViewCount is the number of views for which the epoch is extended. This value is returned from KV store. +const extensionViewCount = uint64(10_000) + +func TestEpochFallbackStateMachine(t *testing.T) { + suite.Run(t, new(EpochFallbackStateMachineSuite)) +} + +// ProtocolStateMachineSuite is a dedicated test suite for testing happy path state machine. +type EpochFallbackStateMachineSuite struct { + BaseStateMachineSuite + kvstore *mockstate.KVStoreReader + + stateMachine *FallbackStateMachine +} + +func (s *EpochFallbackStateMachineSuite) SetupTest() { + s.BaseStateMachineSuite.SetupTest() + s.parentProtocolState.EpochFallbackTriggered = true + + s.kvstore = mockstate.NewKVStoreReader(s.T()) + s.kvstore.On("GetEpochExtensionViewCount").Return(extensionViewCount).Maybe() + s.kvstore.On("GetFinalizationSafetyThreshold").Return(uint64(200)) + + var err error + s.stateMachine, err = NewFallbackStateMachine(s.kvstore, s.consumer, s.candidate.View, s.parentProtocolState.Copy()) + require.NoError(s.T(), err) +} + +// ProcessEpochSetupIsNoop ensures that processing epoch setup event is noop. +func (s *EpochFallbackStateMachineSuite) TestProcessEpochSetupIsNoop() { + setup := unittest.EpochSetupFixture() + s.consumer.On("OnServiceEventReceived", setup.ServiceEvent()).Once() + s.consumer.On("OnInvalidServiceEvent", setup.ServiceEvent(), + unittest.MatchInvalidServiceEventError).Once() + applied, err := s.stateMachine.ProcessEpochSetup(setup) + require.NoError(s.T(), err) + require.False(s.T(), applied) + updatedState, stateID, hasChanges := s.stateMachine.Build() + require.False(s.T(), hasChanges) + require.Equal(s.T(), s.parentProtocolState.ID(), updatedState.ID()) + require.Equal(s.T(), updatedState.ID(), stateID) + require.Equal(s.T(), s.parentProtocolState.ID(), s.stateMachine.ParentState().ID()) +} + +// ProcessEpochCommitIsNoop ensures that processing epoch commit event is noop. +func (s *EpochFallbackStateMachineSuite) TestProcessEpochCommitIsNoop() { + commit := unittest.EpochCommitFixture() + s.consumer.On("OnServiceEventReceived", commit.ServiceEvent()).Once() + s.consumer.On("OnInvalidServiceEvent", commit.ServiceEvent(), + unittest.MatchInvalidServiceEventError).Once() + applied, err := s.stateMachine.ProcessEpochCommit(commit) + require.NoError(s.T(), err) + require.False(s.T(), applied) + updatedState, stateID, hasChanges := s.stateMachine.Build() + require.False(s.T(), hasChanges) + require.Equal(s.T(), s.parentProtocolState.ID(), updatedState.ID()) + require.Equal(s.T(), updatedState.ID(), stateID) + require.Equal(s.T(), s.parentProtocolState.ID(), s.stateMachine.ParentState().ID()) +} + +// TestProcessEpochRecover ensures that after processing EpochRecover event, the state machine initializes +// correctly the next epoch with expected values. Tests happy path scenario where the next epoch has been set up correctly. +func (s *EpochFallbackStateMachineSuite) TestProcessEpochRecover() { + nextEpochParticipants := s.parentProtocolState.CurrentEpochIdentityTable.Copy() + epochRecover := unittest.EpochRecoverFixture(func(setup *flow.EpochSetup) { + setup.Participants = nextEpochParticipants.ToSkeleton() + setup.Assignments = unittest.ClusterAssignment(1, nextEpochParticipants.ToSkeleton()) + setup.Counter = s.parentProtocolState.CurrentEpochSetup.Counter + 1 + setup.FirstView = s.parentProtocolState.CurrentEpochSetup.FinalView + 1 + setup.FinalView = setup.FirstView + 10_000 + }) + s.consumer.On("OnServiceEventReceived", epochRecover.ServiceEvent()).Once() + s.consumer.On("OnServiceEventProcessed", epochRecover.ServiceEvent()).Once() + processed, err := s.stateMachine.ProcessEpochRecover(epochRecover) + require.NoError(s.T(), err) + require.True(s.T(), processed) + updatedState, updatedStateID, hasChanges := s.stateMachine.Build() + require.True(s.T(), hasChanges, "should have changes") + require.Equal(s.T(), updatedState.ID(), updatedStateID, "state ID should be equal to updated state ID") + + expectedState := &flow.MinEpochStateEntry{ + PreviousEpoch: s.parentProtocolState.PreviousEpoch.Copy(), + CurrentEpoch: s.parentProtocolState.CurrentEpoch, + NextEpoch: &flow.EpochStateContainer{ + SetupID: epochRecover.EpochSetup.ID(), + CommitID: epochRecover.EpochCommit.ID(), + ActiveIdentities: flow.DynamicIdentityEntryListFromIdentities(nextEpochParticipants), + }, + EpochFallbackTriggered: false, + } + require.Equal(s.T(), expectedState, updatedState.MinEpochStateEntry, "updatedState should be equal to expected one") +} + +// TestProcessInvalidEpochRecover tests that processing epoch recover event which is invalid or is not compatible with current +// protocol state returns a sentinel error. +func (s *EpochFallbackStateMachineSuite) TestProcessInvalidEpochRecover() { + nextEpochParticipants := s.parentProtocolState.CurrentEpochIdentityTable.Copy() + mockConsumer := func(epochRecover *flow.EpochRecover) { + s.consumer.On("OnServiceEventReceived", epochRecover.ServiceEvent()).Once() + s.consumer.On("OnInvalidServiceEvent", epochRecover.ServiceEvent(), + unittest.MatchInvalidServiceEventError).Once() + } + s.Run("invalid-first-view", func() { + epochRecover := unittest.EpochRecoverFixture(func(setup *flow.EpochSetup) { + setup.Participants = nextEpochParticipants.ToSkeleton() + setup.Assignments = unittest.ClusterAssignment(1, nextEpochParticipants.ToSkeleton()) + setup.Counter = s.parentProtocolState.CurrentEpochSetup.Counter + 1 + setup.FirstView = s.parentProtocolState.CurrentEpochSetup.FinalView + 2 // invalid view + setup.FinalView = setup.FirstView + 10_000 + }) + mockConsumer(epochRecover) + processed, err := s.stateMachine.ProcessEpochRecover(epochRecover) + require.NoError(s.T(), err) + require.False(s.T(), processed) + }) + s.Run("invalid-first-view_ignores-epoch-extension", func() { + epochRecover := unittest.EpochRecoverFixture(func(setup *flow.EpochSetup) { + setup.Participants = nextEpochParticipants.ToSkeleton() + setup.Assignments = unittest.ClusterAssignment(1, nextEpochParticipants.ToSkeleton()) + setup.Counter = s.parentProtocolState.CurrentEpochSetup.Counter + 1 + setup.FirstView = s.parentProtocolState.CurrentEpochSetup.FinalView + 1 + setup.FinalView = setup.FirstView + 10_000 + }) + + parentProtocolState := s.parentProtocolState.Copy() + parentProtocolState.CurrentEpoch.EpochExtensions = []flow.EpochExtension{ + { + FirstView: s.parentProtocolState.CurrentEpochSetup.FinalView + 2, // invalid view for extension + FinalView: s.parentProtocolState.CurrentEpochSetup.FinalView + 1 + 10_000, + }, + } + + candidateView := s.parentProtocolState.CurrentEpochSetup.FinalView - s.kvstore.GetFinalizationSafetyThreshold() + 1 + stateMachine, err := NewFallbackStateMachine(s.kvstore, s.consumer, candidateView, parentProtocolState) + require.NoError(s.T(), err) + + mockConsumer(epochRecover) + processed, err := stateMachine.ProcessEpochRecover(epochRecover) + require.NoError(s.T(), err) + require.False(s.T(), processed) + }) + s.Run("invalid-counter", func() { + epochRecover := unittest.EpochRecoverFixture(func(setup *flow.EpochSetup) { + setup.Participants = nextEpochParticipants.ToSkeleton() + setup.Assignments = unittest.ClusterAssignment(1, nextEpochParticipants.ToSkeleton()) + setup.Counter = s.parentProtocolState.CurrentEpochSetup.Counter + 2 // invalid counter + setup.FirstView = s.parentProtocolState.CurrentEpochSetup.FinalView + 1 + setup.FinalView = setup.FirstView + 10_000 + }) + mockConsumer(epochRecover) + processed, err := s.stateMachine.ProcessEpochRecover(epochRecover) + require.NoError(s.T(), err) + require.False(s.T(), processed) + }) + s.Run("invalid-commit-counter", func() { + epochRecover := unittest.EpochRecoverFixture(func(setup *flow.EpochSetup) { + setup.Participants = nextEpochParticipants.ToSkeleton() + setup.Assignments = unittest.ClusterAssignment(1, nextEpochParticipants.ToSkeleton()) + setup.Counter = s.parentProtocolState.CurrentEpochSetup.Counter + 1 + setup.FirstView = s.parentProtocolState.CurrentEpochSetup.FinalView + 1 + setup.FinalView = setup.FirstView + 10_000 + }) + epochRecover.EpochCommit.Counter += 1 // invalid commit counter + mockConsumer(epochRecover) + processed, err := s.stateMachine.ProcessEpochRecover(epochRecover) + require.NoError(s.T(), err) + require.False(s.T(), processed) + }) + s.Run("invalid-cluster-qcs", func() { + epochRecover := unittest.EpochRecoverFixture(func(setup *flow.EpochSetup) { + setup.Participants = nextEpochParticipants.ToSkeleton() + setup.Assignments = unittest.ClusterAssignment(1, nextEpochParticipants.ToSkeleton()) + setup.Counter = s.parentProtocolState.CurrentEpochSetup.Counter + 1 + setup.FirstView = s.parentProtocolState.CurrentEpochSetup.FinalView + 1 + setup.FinalView = setup.FirstView + 10_000 + }) + epochRecover.EpochCommit.ClusterQCs = epochRecover.EpochCommit.ClusterQCs[1:] // invalid cluster QCs + mockConsumer(epochRecover) + processed, err := s.stateMachine.ProcessEpochRecover(epochRecover) + require.NoError(s.T(), err) + require.False(s.T(), processed) + }) + s.Run("invalid-DKG-group-key", func() { + epochRecover := unittest.EpochRecoverFixture(func(setup *flow.EpochSetup) { + setup.Participants = nextEpochParticipants.ToSkeleton() + setup.Assignments = unittest.ClusterAssignment(1, nextEpochParticipants.ToSkeleton()) + setup.Counter = s.parentProtocolState.CurrentEpochSetup.Counter + 1 + setup.FirstView = s.parentProtocolState.CurrentEpochSetup.FinalView + 1 + setup.FinalView = setup.FirstView + 10_000 + }) + epochRecover.EpochCommit.DKGGroupKey = nil // no DKG public group key + mockConsumer(epochRecover) + processed, err := s.stateMachine.ProcessEpochRecover(epochRecover) + require.NoError(s.T(), err) + require.False(s.T(), processed) + }) + s.Run("invalid-dkg-participants", func() { + epochRecover := unittest.EpochRecoverFixture(func(setup *flow.EpochSetup) { + setup.Participants = nextEpochParticipants.ToSkeleton() + setup.Assignments = unittest.ClusterAssignment(1, nextEpochParticipants.ToSkeleton()) + setup.Counter = s.parentProtocolState.CurrentEpochSetup.Counter + 1 + setup.FirstView = s.parentProtocolState.CurrentEpochSetup.FinalView + 1 + setup.FinalView = setup.FirstView + 10_000 + }) + epochRecover.EpochCommit.DKGParticipantKeys = epochRecover.EpochCommit.DKGParticipantKeys[1:] // invalid DKG participants + mockConsumer(epochRecover) + processed, err := s.stateMachine.ProcessEpochRecover(epochRecover) + require.NoError(s.T(), err) + require.False(s.T(), processed) + }) + s.Run("next-epoch-present", func() { + epochRecover := unittest.EpochRecoverFixture(func(setup *flow.EpochSetup) { + setup.Participants = nextEpochParticipants.ToSkeleton() + setup.Assignments = unittest.ClusterAssignment(1, nextEpochParticipants.ToSkeleton()) + setup.Counter = s.parentProtocolState.CurrentEpochSetup.Counter + 1 + setup.FirstView = s.parentProtocolState.CurrentEpochSetup.FinalView + 1 + setup.FinalView = setup.FirstView + 10_000 + }) + + parentProtocolState := s.parentProtocolState.Copy() + unittest.WithNextEpochProtocolState()(parentProtocolState) + + stateMachine, err := NewFallbackStateMachine(s.kvstore, s.consumer, s.candidate.View, parentProtocolState) + require.NoError(s.T(), err) + + mockConsumer(epochRecover) + processed, err := stateMachine.ProcessEpochRecover(epochRecover) + require.NoError(s.T(), err) + require.False(s.T(), processed) + }) + s.Run("reached-CommitSafetyThreshold_without-next-epoch-committed", func() { + epochRecover := unittest.EpochRecoverFixture(func(setup *flow.EpochSetup) { + setup.Participants = nextEpochParticipants.ToSkeleton() + setup.Assignments = unittest.ClusterAssignment(1, nextEpochParticipants.ToSkeleton()) + setup.Counter = s.parentProtocolState.CurrentEpochSetup.Counter + 1 + setup.FirstView = s.parentProtocolState.CurrentEpochSetup.FinalView + 1 + setup.FinalView = setup.FirstView + 10_000 + }) + thresholdView := s.parentProtocolState.CurrentEpochSetup.FinalView - s.kvstore.GetFinalizationSafetyThreshold() + stateMachine, err := NewFallbackStateMachine(s.kvstore, s.consumer, thresholdView, s.parentProtocolState) + require.NoError(s.T(), err) + + mockConsumer(epochRecover) + processed, err := stateMachine.ProcessEpochRecover(epochRecover) + require.NoError(s.T(), err) + require.False(s.T(), processed) + }) +} + +// TestTransitionToNextEpoch tests a scenario where the FallbackStateMachine processes first block from next epoch. +// It has to discard the parent state and build a new state with data from next epoch. +func (s *EpochFallbackStateMachineSuite) TestTransitionToNextEpoch() { + // update protocol state with next epoch information + unittest.WithNextEpochProtocolState()(s.parentProtocolState) + + candidate := unittest.BlockHeaderFixture( + unittest.HeaderWithView(s.parentProtocolState.CurrentEpochSetup.FinalView + 1)) + + expectedState := &flow.EpochStateEntry{ + MinEpochStateEntry: &flow.MinEpochStateEntry{ + PreviousEpoch: s.parentProtocolState.CurrentEpoch.Copy(), + CurrentEpoch: *s.parentProtocolState.NextEpoch.Copy(), + NextEpoch: nil, + EpochFallbackTriggered: true, + }, + PreviousEpochSetup: s.parentProtocolState.CurrentEpochSetup, + PreviousEpochCommit: s.parentProtocolState.CurrentEpochCommit, + CurrentEpochSetup: s.parentProtocolState.NextEpochSetup, + CurrentEpochCommit: s.parentProtocolState.NextEpochCommit, + NextEpochSetup: nil, + NextEpochCommit: nil, + } + + // Irrespective of whether the parent state is in EFM, the FallbackStateMachine should always set + // `EpochFallbackTriggered` to true and transition the next epoch, because the candidate block + // belongs to the next epoch. + var err error + for _, parentAlreadyInEFM := range []bool{true, false} { + parentProtocolState := s.parentProtocolState.Copy() + parentProtocolState.EpochFallbackTriggered = parentAlreadyInEFM + + s.stateMachine, err = NewFallbackStateMachine(s.kvstore, s.consumer, candidate.View, parentProtocolState.Copy()) + require.NoError(s.T(), err) + err = s.stateMachine.TransitionToNextEpoch() + require.NoError(s.T(), err) + updatedState, stateID, hasChanges := s.stateMachine.Build() + require.True(s.T(), hasChanges) + require.NotEqual(s.T(), parentProtocolState.ID(), updatedState.ID()) + require.Equal(s.T(), updatedState.ID(), stateID) + require.Equal(s.T(), expectedState, updatedState, "FallbackStateMachine produced unexpected Protocol State") + } +} + +// TestTransitionToNextEpochNotAllowed tests different scenarios where transition to next epoch is not allowed. +func (s *EpochFallbackStateMachineSuite) TestTransitionToNextEpochNotAllowed() { + s.Run("no next epoch protocol state", func() { + protocolState := unittest.EpochStateFixture() + candidate := unittest.BlockHeaderFixture( + unittest.HeaderWithView(protocolState.CurrentEpochSetup.FinalView + 1)) + stateMachine, err := NewFallbackStateMachine(s.kvstore, s.consumer, candidate.View, protocolState) + require.NoError(s.T(), err) + err = stateMachine.TransitionToNextEpoch() + require.Error(s.T(), err, "should not allow transition to next epoch if there is no next epoch protocol state") + }) + s.Run("next epoch not committed", func() { + protocolState := unittest.EpochStateFixture(unittest.WithNextEpochProtocolState(), func(entry *flow.RichEpochStateEntry) { + entry.NextEpoch.CommitID = flow.ZeroID + entry.NextEpochCommit = nil + entry.NextEpochIdentityTable = nil + }) + candidate := unittest.BlockHeaderFixture( + unittest.HeaderWithView(protocolState.CurrentEpochSetup.FinalView + 1)) + stateMachine, err := NewFallbackStateMachine(s.kvstore, s.consumer, candidate.View, protocolState) + require.NoError(s.T(), err) + err = stateMachine.TransitionToNextEpoch() + require.Error(s.T(), err, "should not allow transition to next epoch if it is not committed") + }) + s.Run("candidate block is not from next epoch", func() { + protocolState := unittest.EpochStateFixture(unittest.WithNextEpochProtocolState()) + candidate := unittest.BlockHeaderFixture( + unittest.HeaderWithView(protocolState.CurrentEpochSetup.FinalView)) + stateMachine, err := NewFallbackStateMachine(s.kvstore, s.consumer, candidate.View, protocolState) + require.NoError(s.T(), err) + err = stateMachine.TransitionToNextEpoch() + require.Error(s.T(), err, "should not allow transition to next epoch if next block is not first block from next epoch") + }) +} + +// TestNewEpochFallbackStateMachine tests that creating epoch fallback state machine sets +// `EpochFallbackTriggered` to true to record that we have entered epoch fallback mode[EFM]. +// It tests scenarios where the EFM is entered in different phases of the epoch, +// and verifies protocol-compliant addition of epoch extensions, depending on the candidate view and epoch phase. +func (s *EpochFallbackStateMachineSuite) TestNewEpochFallbackStateMachine() { + parentProtocolState := s.parentProtocolState.Copy() + parentProtocolState.EpochFallbackTriggered = false + + thresholdView := parentProtocolState.CurrentEpochSetup.FinalView - s.kvstore.GetFinalizationSafetyThreshold() + + // The view we enter EFM is in the staking phase. The resulting epoch state should be unchanged to the + // parent state _except_ that `EpochFallbackTriggered` is set to true. + // We expect no epoch extension to be added since we have not reached the threshold view. + s.Run("threshold-not-reached", func() { + candidateView := thresholdView - 1 + stateMachine, err := NewFallbackStateMachine(s.kvstore, s.consumer, candidateView, parentProtocolState.Copy()) + require.NoError(s.T(), err) + require.Equal(s.T(), parentProtocolState.ID(), stateMachine.ParentState().ID()) + require.Equal(s.T(), candidateView, stateMachine.View()) + + updatedState, stateID, hasChanges := stateMachine.Build() + require.True(s.T(), hasChanges, "EpochFallbackTriggered has to be updated") + require.Equal(s.T(), updatedState.ID(), stateID) + require.NotEqual(s.T(), parentProtocolState.ID(), stateID) + + expectedProtocolState := &flow.MinEpochStateEntry{ + PreviousEpoch: parentProtocolState.PreviousEpoch, + CurrentEpoch: flow.EpochStateContainer{ + SetupID: parentProtocolState.CurrentEpoch.SetupID, + CommitID: parentProtocolState.CurrentEpoch.CommitID, + ActiveIdentities: parentProtocolState.CurrentEpoch.ActiveIdentities, + }, + NextEpoch: nil, + EpochFallbackTriggered: true, + } + require.Equal(s.T(), expectedProtocolState, updatedState.MinEpochStateEntry, "state should be equal to expected one") + }) + + // The view we enter EFM is in the staking phase. The resulting epoch state should set `EpochFallbackTriggered` to true. + // We expect an epoch extension to be added since we have reached the threshold view. + s.Run("staking-phase", func() { + stateMachine, err := NewFallbackStateMachine(s.kvstore, s.consumer, thresholdView, parentProtocolState.Copy()) + require.NoError(s.T(), err) + require.Equal(s.T(), parentProtocolState.ID(), stateMachine.ParentState().ID()) + require.Equal(s.T(), thresholdView, stateMachine.View()) + + updatedState, stateID, hasChanges := stateMachine.Build() + require.True(s.T(), hasChanges, "EpochFallbackTriggered has to be updated") + require.Equal(s.T(), updatedState.ID(), stateID) + require.NotEqual(s.T(), parentProtocolState.ID(), stateID) + + expectedProtocolState := &flow.MinEpochStateEntry{ + PreviousEpoch: parentProtocolState.PreviousEpoch, + CurrentEpoch: flow.EpochStateContainer{ + SetupID: parentProtocolState.CurrentEpoch.SetupID, + CommitID: parentProtocolState.CurrentEpoch.CommitID, + ActiveIdentities: parentProtocolState.CurrentEpoch.ActiveIdentities, + EpochExtensions: []flow.EpochExtension{ + { + FirstView: parentProtocolState.CurrentEpochFinalView() + 1, + FinalView: parentProtocolState.CurrentEpochFinalView() + extensionViewCount, + }, + }, + }, + NextEpoch: nil, + EpochFallbackTriggered: true, + } + require.Equal(s.T(), expectedProtocolState, updatedState.MinEpochStateEntry, "state should be equal to expected one") + }) + + // The view we enter EFM is in the epoch setup phase. This means that a SetupEvent for the next epoch is in the parent block's + // protocol state. We expect an epoch extension to be added and the outdated information for the next epoch to be removed. + s.Run("setup-phase", func() { + parentProtocolState := parentProtocolState.Copy() + // setup next epoch but without commit event + unittest.WithNextEpochProtocolState()(parentProtocolState) + parentProtocolState.NextEpoch.CommitID = flow.ZeroID + parentProtocolState.NextEpochCommit = nil + + stateMachine, err := NewFallbackStateMachine(s.kvstore, s.consumer, thresholdView, parentProtocolState) + require.NoError(s.T(), err) + require.Equal(s.T(), parentProtocolState.ID(), stateMachine.ParentState().ID()) + require.Equal(s.T(), thresholdView, stateMachine.View()) + + updatedState, stateID, hasChanges := stateMachine.Build() + require.True(s.T(), hasChanges, "EpochFallbackTriggered has to be updated") + require.Nil(s.T(), updatedState.NextEpoch, "outdated information for the next epoch should have been removed") + require.Equal(s.T(), updatedState.ID(), stateID) + require.NotEqual(s.T(), parentProtocolState.ID(), stateID) + + expectedProtocolState := &flow.MinEpochStateEntry{ + PreviousEpoch: parentProtocolState.PreviousEpoch, + CurrentEpoch: flow.EpochStateContainer{ + SetupID: parentProtocolState.CurrentEpoch.SetupID, + CommitID: parentProtocolState.CurrentEpoch.CommitID, + ActiveIdentities: parentProtocolState.CurrentEpoch.ActiveIdentities, + EpochExtensions: []flow.EpochExtension{ + { + FirstView: parentProtocolState.CurrentEpochFinalView() + 1, + FinalView: parentProtocolState.CurrentEpochFinalView() + extensionViewCount, + }, + }, + }, + NextEpoch: nil, + EpochFallbackTriggered: true, + } + require.Equal(s.T(), expectedProtocolState, updatedState.MinEpochStateEntry, "state should be equal to expected one") + }) + + // If the next epoch has been committed, the extension shouldn't be added to the current epoch (verified below). Instead, the + // extension should be added to the next epoch when **next** epoch reaches its safety threshold, which is covered in separate test. + s.Run("commit-phase", func() { + parentProtocolState := parentProtocolState.Copy() + // setup next committed epoch + unittest.WithNextEpochProtocolState()(parentProtocolState) + + // if the next epoch has been committed, the extension shouldn't be added to the current epoch + // instead it will be added to the next epoch when **next** epoch reaches its safety threshold. + + stateMachine, err := NewFallbackStateMachine(s.kvstore, s.consumer, thresholdView, parentProtocolState) + require.NoError(s.T(), err) + require.Equal(s.T(), parentProtocolState.ID(), stateMachine.ParentState().ID()) + require.Equal(s.T(), thresholdView, stateMachine.View()) + + updatedState, stateID, hasChanges := stateMachine.Build() + require.True(s.T(), hasChanges, "EpochFallbackTriggered has to be updated") + require.Equal(s.T(), updatedState.ID(), stateID) + require.NotEqual(s.T(), parentProtocolState.ID(), stateID) + + expectedProtocolState := &flow.MinEpochStateEntry{ + PreviousEpoch: parentProtocolState.PreviousEpoch, + CurrentEpoch: flow.EpochStateContainer{ + SetupID: parentProtocolState.CurrentEpoch.SetupID, + CommitID: parentProtocolState.CurrentEpoch.CommitID, + ActiveIdentities: parentProtocolState.CurrentEpoch.ActiveIdentities, + }, + NextEpoch: parentProtocolState.NextEpoch, + EpochFallbackTriggered: true, + } + require.Equal(s.T(), expectedProtocolState, updatedState.MinEpochStateEntry, "state should be equal to expected one") + }) +} + +// TestEpochFallbackStateMachineInjectsMultipleExtensions tests that the state machine injects multiple extensions +// as it reaches the safety threshold of the current epoch and the extensions themselves. +// In this test, we are simulating the scenario where the current epoch enters EFM at a view when the next epoch has not been committed yet. +// When the next epoch has been committed the extension should be added to the next epoch, this is covered in separate test. +func (s *EpochFallbackStateMachineSuite) TestEpochFallbackStateMachineInjectsMultipleExtensions() { + parentStateInStakingPhase := s.parentProtocolState.Copy() + parentStateInStakingPhase.EpochFallbackTriggered = false + + parentStateInSetupPhase := parentStateInStakingPhase.Copy() + unittest.WithNextEpochProtocolState()(parentStateInSetupPhase) + parentStateInSetupPhase.NextEpoch.CommitID = flow.ZeroID + parentStateInSetupPhase.NextEpochCommit = nil + + for _, originalParentState := range []*flow.RichEpochStateEntry{parentStateInStakingPhase, parentStateInSetupPhase} { + // In the previous test `TestNewEpochFallbackStateMachine`, we verified that the first extension is added correctly. Below we + // test proper addition of the subsequent extension. A new extension should be added when we reach `firstExtensionViewThreshold`. + // When reaching (equality) this threshold, the next extension should be added + firstExtensionViewThreshold := originalParentState.CurrentEpochSetup.FinalView + extensionViewCount - s.kvstore.GetFinalizationSafetyThreshold() + secondExtensionViewThreshold := originalParentState.CurrentEpochSetup.FinalView + 2*extensionViewCount - s.kvstore.GetFinalizationSafetyThreshold() + // We progress through views that are strictly smaller than threshold. Up to this point, only the initial extension should exist + + // we will be asserting the validity of extensions after producing multiple extensions, + // we expect 2 extensions to be added to the current epoch + // 1 after we reach the commit threshold of the epoch and another one after reaching the threshold of the extension themselves + firstExtension := flow.EpochExtension{ + FirstView: originalParentState.CurrentEpochSetup.FinalView + 1, + FinalView: originalParentState.CurrentEpochSetup.FinalView + extensionViewCount, + } + secondExtension := flow.EpochExtension{ + FirstView: firstExtension.FinalView + 1, + FinalView: firstExtension.FinalView + extensionViewCount, + } + + parentProtocolState := originalParentState.Copy() + candidateView := originalParentState.CurrentEpochSetup.FirstView + 1 + // an utility function to progress state to target view + // updates variables that are defined in outer context + evolveStateToView := func(targetView uint64) { + for ; candidateView < targetView; candidateView++ { + stateMachine, err := NewFallbackStateMachine(s.kvstore, s.consumer, candidateView, parentProtocolState.Copy()) + require.NoError(s.T(), err) + updatedState, _, _ := stateMachine.Build() + + parentProtocolState, err = flow.NewRichEpochStateEntry(updatedState) + require.NoError(s.T(), err) + } + } + + type TestData struct { + TargetView uint64 + ExpectedExtensions []flow.EpochExtension + } + + for _, data := range []TestData{ + { + TargetView: firstExtensionViewThreshold, + ExpectedExtensions: []flow.EpochExtension{firstExtension}, + }, + { + TargetView: secondExtensionViewThreshold, + ExpectedExtensions: []flow.EpochExtension{firstExtension, secondExtension}, + }, + } { + evolveStateToView(data.TargetView) + + expectedState := &flow.MinEpochStateEntry{ + PreviousEpoch: originalParentState.PreviousEpoch, + CurrentEpoch: flow.EpochStateContainer{ + SetupID: originalParentState.CurrentEpoch.SetupID, + CommitID: originalParentState.CurrentEpoch.CommitID, + ActiveIdentities: originalParentState.CurrentEpoch.ActiveIdentities, + EpochExtensions: data.ExpectedExtensions, + }, + NextEpoch: nil, + EpochFallbackTriggered: true, + } + require.Equal(s.T(), expectedState, parentProtocolState.MinEpochStateEntry) + require.Greater(s.T(), parentProtocolState.CurrentEpochFinalView(), candidateView, + "final view should be greater than final view of test") + } + } +} + +// TestEpochFallbackStateMachineInjectsMultipleExtensions_NextEpochCommitted tests that the state machine injects multiple extensions +// as it reaches the safety threshold of the current epoch and the extensions themselves. +// In this test we are simulating the scenario where the current epoch enters fallback mode when the next epoch has been committed. +// It is expected that it will transition into the next epoch (since it was committed), +// then reach the safety threshold and add the extension to the next epoch, which at that point will be considered 'current'. +func (s *EpochFallbackStateMachineSuite) TestEpochFallbackStateMachineInjectsMultipleExtensions_NextEpochCommitted() { + originalParentState := s.parentProtocolState.Copy() + originalParentState.EpochFallbackTriggered = false + unittest.WithNextEpochProtocolState()(originalParentState) + + // assert the validity of extensions after producing multiple extensions + // we expect 3 extensions to be added to the current epoch + // 1 after we reach the commit threshold of the epoch and two more after reaching the threshold of the extensions themselves + firstExtension := flow.EpochExtension{ + FirstView: originalParentState.NextEpochSetup.FinalView + 1, + FinalView: originalParentState.NextEpochSetup.FinalView + extensionViewCount, + } + secondExtension := flow.EpochExtension{ + FirstView: firstExtension.FinalView + 1, + FinalView: firstExtension.FinalView + extensionViewCount, + } + thirdExtension := flow.EpochExtension{ + FirstView: secondExtension.FinalView + 1, + FinalView: secondExtension.FinalView + extensionViewCount, + } + + // In the previous test `TestNewEpochFallbackStateMachine`, we verified that the first extension is added correctly. Below we + // test proper addition of the subsequent extension. A new extension should be added when we reach `firstExtensionViewThreshold`. + // When reaching (equality) this threshold, the next extension should be added + firstExtensionViewThreshold := originalParentState.NextEpochSetup.FinalView + extensionViewCount - s.kvstore.GetFinalizationSafetyThreshold() + secondExtensionViewThreshold := originalParentState.NextEpochSetup.FinalView + 2*extensionViewCount - s.kvstore.GetFinalizationSafetyThreshold() + thirdExtensionViewThreshold := originalParentState.NextEpochSetup.FinalView + 3*extensionViewCount - s.kvstore.GetFinalizationSafetyThreshold() + + parentProtocolState := originalParentState.Copy() + candidateView := originalParentState.CurrentEpochSetup.FirstView + 1 + // an utility function to progress state to target view + // updates variables that are defined in outer context + evolveStateToView := func(targetView uint64) { + for ; candidateView < targetView; candidateView++ { + stateMachine, err := NewFallbackStateMachine(s.kvstore, s.consumer, candidateView, parentProtocolState.Copy()) + require.NoError(s.T(), err) + + if candidateView > parentProtocolState.CurrentEpochFinalView() { + require.NoError(s.T(), stateMachine.TransitionToNextEpoch()) + } + + updatedState, _, _ := stateMachine.Build() + parentProtocolState, err = flow.NewRichEpochStateEntry(updatedState) + require.NoError(s.T(), err) + } + } + + type TestData struct { + TargetView uint64 + ExpectedExtensions []flow.EpochExtension + } + + for _, data := range []TestData{ + { + TargetView: firstExtensionViewThreshold, + ExpectedExtensions: []flow.EpochExtension{firstExtension}, + }, + { + TargetView: secondExtensionViewThreshold, + ExpectedExtensions: []flow.EpochExtension{firstExtension, secondExtension}, + }, + { + TargetView: thirdExtensionViewThreshold, + ExpectedExtensions: []flow.EpochExtension{firstExtension, secondExtension, thirdExtension}, + }, + } { + evolveStateToView(data.TargetView) + + expectedState := &flow.MinEpochStateEntry{ + PreviousEpoch: originalParentState.CurrentEpoch.Copy(), + CurrentEpoch: flow.EpochStateContainer{ + SetupID: originalParentState.NextEpoch.SetupID, + CommitID: originalParentState.NextEpoch.CommitID, + ActiveIdentities: originalParentState.NextEpoch.ActiveIdentities, + EpochExtensions: data.ExpectedExtensions, + }, + NextEpoch: nil, + EpochFallbackTriggered: true, + } + require.Equal(s.T(), expectedState, parentProtocolState.MinEpochStateEntry) + require.Greater(s.T(), parentProtocolState.CurrentEpochFinalView(), candidateView, + "final view should be greater than final view of test") + } +} + +// TestEpochRecoverAndEjectionInSameBlock tests that state machine correctly handles ejection events and a subsequent +// epoch recover in the same block. Specifically we test two cases: +// 1. Happy Path: The Epoch Recover event excludes the previously ejected node. +// 2. Invalid Epoch Recover: an epoch recover event which re-admits an ejected identity is ignored. Such action should +// be considered illegal since smart contract emitted ejection before epoch recover and service events are delivered +// in an order-preserving manner. However, since the FallbackStateMachine is intended to keep the protocol alive even +// in the presence of (largely) arbitrary Epoch Smart Contract bugs, it should also handle this case gracefully. +// In this case, the EpochRecover service event should be discarded and the internal state should remain unchanged. +func (s *EpochFallbackStateMachineSuite) TestEpochRecoverAndEjectionInSameBlock() { + nextEpochParticipants := s.parentProtocolState.CurrentEpochIdentityTable.Copy() + ejectedIdentityID := nextEpochParticipants.Filter(filter.HasRole[flow.Identity](flow.RoleAccess))[0].NodeID + ejectionEvent := &flow.EjectNode{NodeID: ejectedIdentityID} + + s.Run("happy path", func() { + s.consumer.On("OnServiceEventReceived", ejectionEvent.ServiceEvent()).Once() + s.consumer.On("OnServiceEventProcessed", ejectionEvent.ServiceEvent()).Once() + wasEjected := s.stateMachine.EjectIdentity(ejectionEvent) + require.True(s.T(), wasEjected) + + epochRecover := unittest.EpochRecoverFixture(func(setup *flow.EpochSetup) { + setup.Participants = nextEpochParticipants.ToSkeleton().Filter( + filter.Not(filter.HasNodeID[flow.IdentitySkeleton](ejectedIdentityID))) + setup.Assignments = unittest.ClusterAssignment(1, setup.Participants.ToSkeleton()) + setup.Counter = s.parentProtocolState.CurrentEpochSetup.Counter + 1 + setup.FirstView = s.parentProtocolState.CurrentEpochSetup.FinalView + 1 + setup.FinalView = setup.FirstView + 10_000 + }) + s.consumer.On("OnServiceEventReceived", epochRecover.ServiceEvent()).Once() + s.consumer.On("OnServiceEventProcessed", epochRecover.ServiceEvent()).Once() + processed, err := s.stateMachine.ProcessEpochRecover(epochRecover) + require.NoError(s.T(), err) + require.True(s.T(), processed) + + updatedState, _, _ := s.stateMachine.Build() + require.False(s.T(), updatedState.EpochFallbackTriggered, "should exit EFM") + require.NotNil(s.T(), updatedState.NextEpoch, "should setup & commit next epoch") + }) + s.Run("invalid epoch recover event", func() { + s.kvstore = mockstate.NewKVStoreReader(s.T()) + s.kvstore.On("GetEpochExtensionViewCount").Return(extensionViewCount).Maybe() + s.kvstore.On("GetFinalizationSafetyThreshold").Return(uint64(200)) + + var err error + s.stateMachine, err = NewFallbackStateMachine(s.kvstore, s.consumer, s.candidate.View, s.parentProtocolState.Copy()) + require.NoError(s.T(), err) + + s.consumer.On("OnServiceEventReceived", ejectionEvent.ServiceEvent()).Once() + s.consumer.On("OnServiceEventProcessed", ejectionEvent.ServiceEvent()).Once() + wasEjected := s.stateMachine.EjectIdentity(ejectionEvent) + require.True(s.T(), wasEjected) + + epochRecover := unittest.EpochRecoverFixture(func(setup *flow.EpochSetup) { + setup.Participants = nextEpochParticipants.ToSkeleton() + setup.Assignments = unittest.ClusterAssignment(1, nextEpochParticipants.ToSkeleton()) + setup.Counter = s.parentProtocolState.CurrentEpochSetup.Counter + 1 + setup.FirstView = s.parentProtocolState.CurrentEpochSetup.FinalView + 1 + setup.FinalView = setup.FirstView + 10_000 + }) + s.consumer.On("OnServiceEventReceived", epochRecover.ServiceEvent()).Once() + s.consumer.On("OnInvalidServiceEvent", epochRecover.ServiceEvent(), + unittest.MatchInvalidServiceEventError).Once() + processed, err := s.stateMachine.ProcessEpochRecover(epochRecover) + require.NoError(s.T(), err) + require.False(s.T(), processed) + + updatedState, _, _ := s.stateMachine.Build() + require.True(s.T(), updatedState.EpochFallbackTriggered, "should remain in EFM") + require.Nil(s.T(), updatedState.NextEpoch, "next epoch should be nil as recover event is invalid") + }) +} + +// TestProcessingMultipleEventsInTheSameBlock tests that the state machine can process multiple events at the same block. +// EpochFallbackStateMachineSuite has to be able to process any combination of events in any order at the same block. +// This test generates a random amount of setup, commit and recover events and processes them in random order. +// A special rule is used to inject an ejection event, depending on the random draw we will inject an ejection event before or after the recover event. +// Depending on the ordering of events, the recovering event needs to be structured differently. +func (s *EpochFallbackStateMachineSuite) TestProcessingMultipleEventsInTheSameBlock() { + rapid.Check(s.T(), func(t *rapid.T) { + s.SetupTest() // start each time with clean state + // ATTENTION: drawing a rapid value can raise a panic which unwinds stack and exits functor that is being passed to Check(). + // It's not an issue on its own, but we are using telemetry to check correct invocations of state machine + // and when a panic occurs, it will still try to assert expectations on the consumer leading to a test failure. + // Specifically, for that reason, we are using a lower-level telemetry mock which allows manual assertion(in the end). + s.consumer = new(protocol_statemock.StateMachineTelemetryConsumer) + var err error + s.stateMachine, err = NewFallbackStateMachine(s.kvstore, s.consumer, s.candidate.View, s.parentProtocolState.Copy()) + require.NoError(s.T(), err) + + var events []flow.ServiceEvent + setupEvents := rapid.IntRange(0, 5).Draw(t, "number-of-setup-events") + for i := 0; i < setupEvents; i++ { + serviceEvent := unittest.EpochSetupFixture().ServiceEvent() + s.consumer.On("OnServiceEventReceived", serviceEvent).Once() + s.consumer.On("OnInvalidServiceEvent", serviceEvent, + unittest.MatchInvalidServiceEventError).Once() + events = append(events, serviceEvent) + } + + commitEvents := rapid.IntRange(0, 5).Draw(t, "number-of-commit-events") + for i := 0; i < commitEvents; i++ { + serviceEvent := unittest.EpochCommitFixture().ServiceEvent() + s.consumer.On("OnServiceEventReceived", serviceEvent).Once() + s.consumer.On("OnInvalidServiceEvent", serviceEvent, + unittest.MatchInvalidServiceEventError).Once() + events = append(events, serviceEvent) + } + + recoverEvents := rapid.IntRange(0, 5).Draw(t, "number-of-recover-events") + for i := 0; i < recoverEvents; i++ { + serviceEvent := unittest.EpochRecoverFixture().ServiceEvent() + s.consumer.On("OnServiceEventReceived", serviceEvent).Once() + s.consumer.On("OnInvalidServiceEvent", serviceEvent, + unittest.MatchInvalidServiceEventError).Once() + events = append(events, serviceEvent) + } + + var ejectedNodes flow.IdentifierList + var ejectionEvents flow.ServiceEventList + + includeEjection := rapid.Bool().Draw(t, "eject-node") + if includeEjection { + accessNodes := s.parentProtocolState.CurrentEpochSetup.Participants.Filter(filter.HasRole[flow.IdentitySkeleton](flow.RoleAccess)) + identity := rapid.SampledFrom(accessNodes).Draw(t, "ejection-node") + serviceEvent := (&flow.EjectNode{NodeID: identity.NodeID}).ServiceEvent() + s.consumer.On("OnServiceEventReceived", serviceEvent).Once() + s.consumer.On("OnServiceEventProcessed", serviceEvent).Once() + ejectionEvents = append(ejectionEvents, serviceEvent) + ejectedNodes = append(ejectedNodes, identity.NodeID) + } + + includeValidRecover := rapid.Bool().Draw(t, "include-valid-recover-event") + ejectionBeforeRecover := rapid.Bool().Draw(t, "ejection-before-recover") + if includeValidRecover { + serviceEvent := unittest.EpochRecoverFixture(func(setup *flow.EpochSetup) { + nextEpochParticipants := s.parentProtocolState.CurrentEpochIdentityTable.Copy() + if ejectionBeforeRecover { + // a valid recovery event cannot readmit a node ejected previously + setup.Participants = nextEpochParticipants.ToSkeleton().Filter( + filter.Not(filter.HasNodeID[flow.IdentitySkeleton](ejectedNodes...))) + } else { + setup.Participants = nextEpochParticipants.ToSkeleton() + } + setup.Assignments = unittest.ClusterAssignment(1, nextEpochParticipants.ToSkeleton()) + setup.Counter = s.parentProtocolState.CurrentEpochSetup.Counter + 1 + setup.FirstView = s.parentProtocolState.CurrentEpochSetup.FinalView + 1 + setup.FinalView = setup.FirstView + 10_000 + }).ServiceEvent() + s.consumer.On("OnServiceEventReceived", serviceEvent).Once() + s.consumer.On("OnServiceEventProcessed", serviceEvent).Once() + events = append(events, serviceEvent) + } + + events = rapid.Permutation(events).Draw(t, "events-permutation") + if ejectionBeforeRecover { + events = append(ejectionEvents, events...) + } else { + events = append(events, ejectionEvents...) + } + + for _, event := range events { + var err error + switch ev := event.Event.(type) { + case *flow.EpochSetup: + _, err = s.stateMachine.ProcessEpochSetup(ev) + case *flow.EpochCommit: + _, err = s.stateMachine.ProcessEpochCommit(ev) + case *flow.EpochRecover: + _, err = s.stateMachine.ProcessEpochRecover(ev) + case *flow.EjectNode: + _ = s.stateMachine.EjectIdentity(ev) + } + require.NoError(s.T(), err) + } + updatedState, _, hasChanges := s.stateMachine.Build() + for _, nodeID := range ejectedNodes { + ejectedIdentity, found := updatedState.CurrentEpoch.ActiveIdentities.ByNodeID(nodeID) + require.True(s.T(), found) + require.True(s.T(), ejectedIdentity.Ejected) + } + + require.Equal(t, includeValidRecover || includeEjection, hasChanges, + "changes are expected if we include valid recovery or eject nodes") + if includeValidRecover { + require.NotNil(t, updatedState.NextEpoch, "next epoch should be present") + for _, nodeID := range ejectedNodes { + ejectedIdentity, found := updatedState.NextEpoch.ActiveIdentities.ByNodeID(nodeID) + if ejectionBeforeRecover { + // if ejection is before recover, the ejected node should not be present in the next epoch + require.False(s.T(), found) + } else { + // in-case of ejection after recover, the ejected node should be present in the next epoch, but it has to be 'ejected'. + require.True(s.T(), found) + require.True(s.T(), ejectedIdentity.Ejected) + } + } + } + s.consumer.AssertExpectations(s.T()) + }) +} diff --git a/state/protocol/protocol_state/epochs/happy_path_statemachine.go b/state/protocol/protocol_state/epochs/happy_path_statemachine.go new file mode 100644 index 00000000000..1ba6de2bcde --- /dev/null +++ b/state/protocol/protocol_state/epochs/happy_path_statemachine.go @@ -0,0 +1,293 @@ +package epochs + +import ( + "fmt" + + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module/irrecoverable" + "github.com/onflow/flow-go/state/protocol" + "github.com/onflow/flow-go/state/protocol/protocol_state" +) + +// HappyPathStateMachine is a dedicated structure for evolving the Epoch-related portion of the overall Protocol State. +// Based on the content of a new block, it updates epoch data, including the identity table, on the happy path. +// The HappyPathStateMachine guarantees protocol-compliant evolution of Epoch-related sub-state via the +// following state transitions: +// - epoch setup: transitions current epoch from staking to setup phase, creates next epoch protocol state when processed. +// - epoch commit: transitions current epoch from setup to commit phase, commits next epoch protocol state when processed. +// - epoch transition: on the first block of the new epoch (Formally, the block's parent is still in the last epoch, +// while the new block has a view in the next epoch. Caution: the block's view is not necessarily the first view +// in the epoch, as there might be leader failures) +// - identity changes: updates identity table for previous (if available), current, and next epoch (if available). +// +// All updates are applied to a copy of parent protocol state, so parent protocol state is not modified. The stateMachine internally +// tracks the current protocol state. A separate instance should be created for each block to process the updates therein. +// See flow.EpochPhase for detailed documentation about EFM and epoch phase transitions. +type HappyPathStateMachine struct { + baseStateMachine +} + +var _ StateMachine = (*HappyPathStateMachine)(nil) + +// NewHappyPathStateMachine creates a new HappyPathStateMachine. +// An exception is returned in case the `EpochFallbackTriggered` flag is set in the `parentEpochState`. This means that +// the protocol state evolution has reached an undefined state from the perspective of the happy path state machine. +// No errors are expected during normal operations. +func NewHappyPathStateMachine(telemetry protocol_state.StateMachineTelemetryConsumer, view uint64, parentState *flow.RichEpochStateEntry) (*HappyPathStateMachine, error) { + if parentState.EpochFallbackTriggered { + return nil, irrecoverable.NewExceptionf("cannot create happy path protocol state machine at view (%d) for a parent state"+ + "which is in Epoch Fallback Mode", view) + } + base, err := newBaseStateMachine(telemetry, view, parentState, parentState.EpochStateEntry.Copy()) + if err != nil { + return nil, fmt.Errorf("could not create base state machine: %w", err) + } + return &HappyPathStateMachine{ + baseStateMachine: *base, + }, nil +} + +// ProcessEpochSetup updates the protocol state with data from the epoch setup event. +// Observing an epoch setup event also affects the identity table for current epoch: +// - it transitions the protocol state from Staking to Epoch Setup phase +// - we stop returning identities from previous+current epochs and instead returning identities from current+next epochs. +// +// As a result of this operation protocol state for the next epoch will be created. +// Returned boolean indicates if event triggered a transition in the state machine or not. +// Implementors must never return (true, error). +// Expected errors indicating that we are leaving the happy-path of the epoch transitions +// - `protocol.InvalidServiceEventError` - if the service event is invalid or is not a valid state transition for the current protocol state. +// CAUTION: the HappyPathStateMachine is left with a potentially dysfunctional state when this error occurs. Do NOT call the Build method +// after such error and discard the HappyPathStateMachine! +func (u *HappyPathStateMachine) ProcessEpochSetup(epochSetup *flow.EpochSetup) (bool, error) { + u.telemetry.OnServiceEventReceived(epochSetup.ServiceEvent()) + err := protocol.IsValidExtendingEpochSetup(epochSetup, u.state) + if err != nil { + u.telemetry.OnInvalidServiceEvent(epochSetup.ServiceEvent(), err) + return false, fmt.Errorf("invalid epoch setup event for epoch %d: %w", epochSetup.Counter, err) + } + if u.state.NextEpoch != nil { + err := protocol.NewInvalidServiceEventErrorf("repeated EpochSetup event for epoch %d", epochSetup.Counter) + u.telemetry.OnInvalidServiceEvent(epochSetup.ServiceEvent(), err) + return false, err + } + + // When observing setup event for subsequent epoch, construct the EpochStateContainer for `MinEpochStateEntry.NextEpoch`. + // Context: + // Note that the `EpochStateContainer.ActiveIdentities` only contains the nodes that are *active* in the next epoch. Active means + // that these nodes are authorized to contribute to extending the chain. Nodes are listed in `ActiveIdentities` if and only if + // they are part of the EpochSetup event for the respective epoch. + // + // sanity checking SAFETY-CRITICAL INVARIANT (I): + // - Per convention, the `flow.EpochSetup` event should list the IdentitySkeletons in canonical order. This is useful + // for most efficient construction of the full active Identities for an epoch. We enforce this here at the gateway + // to the protocol state, when we incorporate new information from the EpochSetup event. + // - Note that the system smart contracts manage the identity table as an unordered set! For the protocol state, we desire a fixed + // ordering to simplify various implementation details, like the DKG. Therefore, we order identities in `flow.EpochSetup` during + // conversion from cadence to Go in the function `convert.ServiceEvent(flow.ChainID, flow.Event)` in package `model/convert` + // sanity checking SAFETY-CRITICAL INVARIANT (II): + // While ejection status and dynamic weight are not part of the EpochSetup event, we can supplement this information as follows: + // - Per convention, service events are delivered (asynchronously) in an *order-preserving* manner. Furthermore, weight changes or + // node ejection is entirely mediated by system smart contracts and delivered via service events. + // - Therefore, the EpochSetup event contains the up-to-date snapshot of the epoch participants. Any weight changes or node ejection + // that happened before should be reflected in the EpochSetup event. Specifically, the initial weight should be reduced and ejected + // nodes should be no longer listed in the EpochSetup event. + // - Hence, the following invariant must be satisfied by the system smart contracts for all active nodes in the upcoming epoch: + // (i) The Ejected flag is false. Node X being ejected in epoch N (necessarily via a service event emitted by the system + // smart contracts earlier) but also being listed in the setup event for the subsequent epoch (service event emitted by + // the system smart contracts later) is illegal. + // (ii) When the EpochSetup event is emitted / processed, the weight of all active nodes equals their InitialWeight and + + // For collector clusters, we rely on invariants (I) and (II) holding. See `committees.Cluster` for details, specifically function + // `constructInitialClusterIdentities(..)`. While the system smart contract must satisfy this invariant, we run a sanity check below. + activeIdentitiesLookup := u.state.CurrentEpoch.ActiveIdentities.Lookup() // lookup NodeID → DynamicIdentityEntry for nodes _active_ in the current epoch + nextEpochActiveIdentities, err := buildNextEpochActiveParticipants(activeIdentitiesLookup, u.state.CurrentEpochSetup, epochSetup) + if err != nil { + u.telemetry.OnInvalidServiceEvent(epochSetup.ServiceEvent(), err) + return false, fmt.Errorf("failed to construct next epoch active participants: %w", err) + } + + // construct data container specifying next epoch + nextEpoch, err := flow.NewEpochStateContainer( + flow.UntrustedEpochStateContainer{ + SetupID: epochSetup.ID(), + CommitID: flow.ZeroID, + ActiveIdentities: nextEpochActiveIdentities, + EpochExtensions: nil, + }, + ) + if err != nil { + return false, fmt.Errorf("could not construct next epoch state: %w", err) + } + + newMinEpochStateEntry, err := flow.NewMinEpochStateEntry( + flow.UntrustedMinEpochStateEntry{ + PreviousEpoch: u.state.PreviousEpoch, + CurrentEpoch: u.state.CurrentEpoch, + NextEpoch: nextEpoch, + EpochFallbackTriggered: u.state.EpochFallbackTriggered, + }, + ) + if err != nil { + return false, fmt.Errorf("could not create min epoch state: %w", err) + } + + u.state, err = flow.NewEpochStateEntry( + flow.UntrustedEpochStateEntry{ + MinEpochStateEntry: newMinEpochStateEntry, + PreviousEpochSetup: u.state.PreviousEpochSetup, + PreviousEpochCommit: u.state.PreviousEpochCommit, + CurrentEpochSetup: u.state.CurrentEpochSetup, + CurrentEpochCommit: u.state.CurrentEpochCommit, + NextEpochSetup: epochSetup, + NextEpochCommit: u.state.NextEpochCommit, + }, + ) + if err != nil { + return false, fmt.Errorf("could not construct epoch state entry: %w", err) + } + + // subsequent epoch commit event and update identities afterwards. + err = u.ejector.TrackDynamicIdentityList(u.state.NextEpoch.ActiveIdentities) + if err != nil { + if protocol.IsInvalidServiceEventError(err) { + u.telemetry.OnInvalidServiceEvent(epochSetup.ServiceEvent(), err) + } + return false, fmt.Errorf("failed to track dynamic identity list for next epoch: %w", err) + + } + u.telemetry.OnServiceEventProcessed(epochSetup.ServiceEvent()) + return true, nil +} + +// ProcessEpochCommit updates current protocol state with data from epoch commit event. +// Observing an epoch setup commit, transitions protocol state from setup to commit phase. +// At this point, we have finished construction of the next epoch. +// As a result of this operation protocol state for next epoch will be committed. +// Returned boolean indicates if event triggered a transition in the state machine or not. +// Implementors must never return (true, error). +// Expected errors indicating that we are leaving the happy-path of the epoch transitions +// - `protocol.InvalidServiceEventError` - if the service event is invalid or is not a valid state transition for the current protocol state. +// CAUTION: the HappyPathStateMachine is left with a potentially dysfunctional state when this error occurs. Do NOT call the Build method +// after such error and discard the HappyPathStateMachine! +func (u *HappyPathStateMachine) ProcessEpochCommit(epochCommit *flow.EpochCommit) (bool, error) { + u.telemetry.OnServiceEventReceived(epochCommit.ServiceEvent()) + if u.state.NextEpoch == nil { + err := protocol.NewInvalidServiceEventErrorf("received EpochCommit without prior EpochSetup") + u.telemetry.OnInvalidServiceEvent(epochCommit.ServiceEvent(), err) + return false, err + } + if u.state.NextEpoch.CommitID != flow.ZeroID { + err := protocol.NewInvalidServiceEventErrorf("repeated EpochCommit event for epoch %d", epochCommit.Counter) + u.telemetry.OnInvalidServiceEvent(epochCommit.ServiceEvent(), err) + return false, err + } + err := protocol.IsValidExtendingEpochCommit(epochCommit, u.state.MinEpochStateEntry, u.state.NextEpochSetup) + if err != nil { + u.telemetry.OnInvalidServiceEvent(epochCommit.ServiceEvent(), err) + return false, fmt.Errorf("invalid epoch commit event for epoch %d: %w", epochCommit.Counter, err) + } + + nextEpoch, err := flow.NewEpochStateContainer( + flow.UntrustedEpochStateContainer{ + SetupID: u.state.NextEpoch.SetupID, + CommitID: epochCommit.ID(), + ActiveIdentities: u.state.NextEpoch.ActiveIdentities, + EpochExtensions: u.state.NextEpoch.EpochExtensions, + }, + ) + if err != nil { + return false, fmt.Errorf("could not construct next epoch state: %w", err) + } + + newMinEpochStateEntry, err := flow.NewMinEpochStateEntry( + flow.UntrustedMinEpochStateEntry{ + PreviousEpoch: u.state.PreviousEpoch, + CurrentEpoch: u.state.CurrentEpoch, + NextEpoch: nextEpoch, + EpochFallbackTriggered: u.state.EpochFallbackTriggered, + }, + ) + if err != nil { + return false, fmt.Errorf("could not create min epoch state: %w", err) + } + + u.state, err = flow.NewEpochStateEntry( + flow.UntrustedEpochStateEntry{ + MinEpochStateEntry: newMinEpochStateEntry, + PreviousEpochSetup: u.state.PreviousEpochSetup, + PreviousEpochCommit: u.state.PreviousEpochCommit, + CurrentEpochSetup: u.state.CurrentEpochSetup, + CurrentEpochCommit: u.state.CurrentEpochCommit, + NextEpochSetup: u.state.NextEpochSetup, + NextEpochCommit: epochCommit, + }, + ) + if err != nil { + return false, fmt.Errorf("could not construct epoch state entry: %w", err) + } + u.telemetry.OnServiceEventProcessed(epochCommit.ServiceEvent()) + return true, nil +} + +// ProcessEpochRecover returns the sentinel error `protocol.InvalidServiceEventError`, which +// indicates that `EpochRecover` are not expected on the happy path of epoch lifecycle. +func (u *HappyPathStateMachine) ProcessEpochRecover(epochRecover *flow.EpochRecover) (bool, error) { + u.telemetry.OnServiceEventReceived(epochRecover.ServiceEvent()) + err := protocol.NewInvalidServiceEventErrorf("epoch recover event for epoch %d received while on happy path", epochRecover.EpochSetup.Counter) + u.telemetry.OnInvalidServiceEvent(epochRecover.ServiceEvent(), err) + return false, err +} + +// When observing setup event for subsequent epoch, construct the EpochStateContainer for `ProtocolStateEntry.NextEpoch`. +// Context: +// Note that the `EpochStateContainer.ActiveIdentities` only contains the nodes that are *active* in the next epoch. Active means +// that these nodes are authorized to contribute to extending the chain. Nodes are listed in `ActiveIdentities` if and only if +// they are part of the EpochSetup event for the respective epoch. +// +// sanity checking SAFETY-CRITICAL INVARIANT (I): +// - Per convention, the `flow.EpochSetup` event should list the IdentitySkeletons in canonical order. This is useful +// for most efficient construction of the full active Identities for an epoch. We enforce this here at the gateway +// to the protocol state, when we incorporate new information from the EpochSetup event. +// - Note that the system smart contracts manage the identity table as an unordered set! For the protocol state, we desire a fixed +// ordering to simplify various implementation details, like the DKG. Therefore, we order identities in `flow.EpochSetup` during +// conversion from cadence to Go in the function `convert.ServiceEvent(flow.ChainID, flow.Event)` in package `model/convert` +// sanity checking SAFETY-CRITICAL INVARIANT (II): +// While ejection status and dynamic weight are not part of the EpochSetup event, we can supplement this information as follows: +// - Per convention, service events are delivered (asynchronously) in an *order-preserving* manner. Furthermore, weight changes or +// node ejection is entirely mediated by system smart contracts and delivered via service events. +// - Therefore, the EpochSetup event contains the up-to-date snapshot of the epoch participants. Any weight changes or node ejection +// that happened before should be reflected in the EpochSetup event. Specifically, the initial weight should be reduced and ejected +// nodes should be no longer listed in the EpochSetup event. +// - Hence, the following invariant must be satisfied by the system smart contracts for all active nodes in the upcoming epoch: +// (i) The Ejected flag is false. Node X being ejected in epoch N (necessarily via a service event emitted by the system +// smart contracts earlier) but also being listed in the setup event for the subsequent epoch (service event emitted by +// the system smart contracts later) is illegal. +// (ii) When the EpochSetup event is emitted / processed, the weight of all active nodes equals their InitialWeight and + +// For collector clusters, we rely on invariants (I) and (II) holding. See `committees.Cluster` for details, specifically function +// `constructInitialClusterIdentities(..)`. While the system smart contract must satisfy this invariant, we run a sanity check below. +// This is a side-effect-free function. This function only returns protocol.InvalidServiceEventError as errors. +func buildNextEpochActiveParticipants(activeIdentitiesLookup map[flow.Identifier]*flow.DynamicIdentityEntry, currentEpochSetup, nextEpochSetup *flow.EpochSetup) (flow.DynamicIdentityEntryList, error) { + nextEpochActiveIdentities := make(flow.DynamicIdentityEntryList, 0, len(nextEpochSetup.Participants)) + prevNodeID := nextEpochSetup.Participants[0].NodeID + for idx, nextEpochIdentitySkeleton := range nextEpochSetup.Participants { + // sanity checking invariant (I): + if idx > 0 && !flow.IsIdentifierCanonical(prevNodeID, nextEpochIdentitySkeleton.NodeID) { + return nil, protocol.NewInvalidServiceEventErrorf("epoch setup event lists active participants not in canonical ordering") + } + prevNodeID = nextEpochIdentitySkeleton.NodeID + + // sanity checking invariant (II.i): + currentEpochDynamicProperties, found := activeIdentitiesLookup[nextEpochIdentitySkeleton.NodeID] + if found && currentEpochDynamicProperties.Ejected { // invariant violated + return nil, protocol.NewInvalidServiceEventErrorf("node %v is ejected in current epoch %d but readmitted by EpochSetup event for epoch %d", nextEpochIdentitySkeleton.NodeID, currentEpochSetup.Counter, nextEpochSetup.Counter) + } + + nextEpochActiveIdentities = append(nextEpochActiveIdentities, &flow.DynamicIdentityEntry{ + NodeID: nextEpochIdentitySkeleton.NodeID, + Ejected: false, // according to invariant (II.i) + }) + } + return nextEpochActiveIdentities, nil +} diff --git a/state/protocol/protocol_state/epochs/happy_path_statemachine_test.go b/state/protocol/protocol_state/epochs/happy_path_statemachine_test.go new file mode 100644 index 00000000000..ef1902701ae --- /dev/null +++ b/state/protocol/protocol_state/epochs/happy_path_statemachine_test.go @@ -0,0 +1,595 @@ +package epochs + +import ( + "testing" + + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" + + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/model/flow/filter" + "github.com/onflow/flow-go/state/protocol" + protocol_statemock "github.com/onflow/flow-go/state/protocol/protocol_state/mock" + "github.com/onflow/flow-go/utils/unittest" +) + +func TestProtocolStateMachine(t *testing.T) { + suite.Run(t, new(ProtocolStateMachineSuite)) +} + +// BaseStateMachineSuite is a base test suite that holds common functionality for testing protocol state machines. +// It reflects the portion of data which is present in baseStateMachine. +type BaseStateMachineSuite struct { + suite.Suite + + parentProtocolState *flow.RichEpochStateEntry + parentBlock *flow.Header + candidate *flow.Header + consumer *protocol_statemock.StateMachineTelemetryConsumer +} + +func (s *BaseStateMachineSuite) SetupTest() { + s.parentProtocolState = unittest.EpochStateFixture(func(entry *flow.RichEpochStateEntry) { + // have a fixed boundary for the current epoch + entry.CurrentEpochSetup.FinalView = 5_000 + entry.CurrentEpoch.SetupID = entry.CurrentEpochSetup.ID() + }) + s.parentBlock = unittest.BlockHeaderFixture(unittest.HeaderWithView(s.parentProtocolState.CurrentEpochSetup.FirstView + 1)) + s.candidate = unittest.BlockHeaderWithParentFixture(s.parentBlock) + s.consumer = protocol_statemock.NewStateMachineTelemetryConsumer(s.T()) +} + +// ProtocolStateMachineSuite is a dedicated test suite for testing happy path state machine. +type ProtocolStateMachineSuite struct { + BaseStateMachineSuite + stateMachine *HappyPathStateMachine +} + +func (s *ProtocolStateMachineSuite) SetupTest() { + s.BaseStateMachineSuite.SetupTest() + var err error + s.stateMachine, err = NewHappyPathStateMachine(s.consumer, s.candidate.View, s.parentProtocolState.Copy()) + require.NoError(s.T(), err) +} + +// TestNewStateMachine tests if the constructor correctly setups invariants for HappyPathStateMachine. +func (s *ProtocolStateMachineSuite) TestNewStateMachine() { + require.NotSame(s.T(), s.stateMachine.parentEpochState, s.stateMachine.state, "except to take deep copy of parent state") + require.Nil(s.T(), s.stateMachine.parentEpochState.NextEpoch) + require.Nil(s.T(), s.stateMachine.state.NextEpoch) + require.Equal(s.T(), s.candidate.View, s.stateMachine.View()) + require.Equal(s.T(), s.parentProtocolState, s.stateMachine.ParentState()) +} + +// TestTransitionToNextEpoch tests a scenario where the HappyPathStateMachine processes first block from next epoch. +// It has to discard the parent state and build a new state with data from next epoch. +func (s *ProtocolStateMachineSuite) TestTransitionToNextEpoch() { + // update protocol state with next epoch information + unittest.WithNextEpochProtocolState()(s.parentProtocolState) + + candidate := unittest.BlockHeaderFixture( + unittest.HeaderWithView(s.parentProtocolState.CurrentEpochSetup.FinalView + 1)) + var err error + // since the candidate block is from next epoch, HappyPathStateMachine should transition to next epoch + s.stateMachine, err = NewHappyPathStateMachine(s.consumer, candidate.View, s.parentProtocolState.Copy()) + require.NoError(s.T(), err) + err = s.stateMachine.TransitionToNextEpoch() + require.NoError(s.T(), err) + updatedState, stateID, hasChanges := s.stateMachine.Build() + require.True(s.T(), hasChanges) + require.NotEqual(s.T(), s.parentProtocolState.ID(), updatedState.ID()) + require.Equal(s.T(), updatedState.ID(), stateID) + require.Equal(s.T(), s.parentProtocolState.ID(), s.stateMachine.ParentState().ID(), "should not modify parent protocol state") + require.Equal(s.T(), updatedState.CurrentEpoch.ID(), s.parentProtocolState.NextEpoch.ID(), "should transition into next epoch") + require.Nil(s.T(), updatedState.NextEpoch, "next epoch protocol state should be nil") +} + +// TestTransitionToNextEpochNotAllowed tests different scenarios where transition to next epoch is not allowed. +func (s *ProtocolStateMachineSuite) TestTransitionToNextEpochNotAllowed() { + s.Run("no next epoch protocol state", func() { + protocolState := unittest.EpochStateFixture() + candidate := unittest.BlockHeaderFixture( + unittest.HeaderWithView(protocolState.CurrentEpochSetup.FinalView + 1)) + stateMachine, err := NewHappyPathStateMachine(s.consumer, candidate.View, protocolState) + require.NoError(s.T(), err) + err = stateMachine.TransitionToNextEpoch() + require.Error(s.T(), err, "should not allow transition to next epoch if there is no next epoch protocol state") + }) + s.Run("next epoch not committed", func() { + protocolState := unittest.EpochStateFixture(unittest.WithNextEpochProtocolState(), func(entry *flow.RichEpochStateEntry) { + entry.NextEpoch.CommitID = flow.ZeroID + entry.NextEpochCommit = nil + }) + candidate := unittest.BlockHeaderFixture( + unittest.HeaderWithView(protocolState.CurrentEpochSetup.FinalView + 1)) + stateMachine, err := NewHappyPathStateMachine(s.consumer, candidate.View, protocolState) + require.NoError(s.T(), err) + err = stateMachine.TransitionToNextEpoch() + require.Error(s.T(), err, "should not allow transition to next epoch if it is not committed") + }) + s.Run("candidate block is not from next epoch", func() { + protocolState := unittest.EpochStateFixture(unittest.WithNextEpochProtocolState()) + candidate := unittest.BlockHeaderFixture( + unittest.HeaderWithView(protocolState.CurrentEpochSetup.FinalView)) + stateMachine, err := NewHappyPathStateMachine(s.consumer, candidate.View, protocolState) + require.NoError(s.T(), err) + err = stateMachine.TransitionToNextEpoch() + require.Error(s.T(), err, "should not allow transition to next epoch if next block is not first block from next epoch") + }) +} + +// TestBuild tests if the HappyPathStateMachine returns correct protocol state. +func (s *ProtocolStateMachineSuite) TestBuild() { + updatedState, stateID, hasChanges := s.stateMachine.Build() + require.Equal(s.T(), stateID, s.parentProtocolState.ID(), "should return same protocol state") + require.False(s.T(), hasChanges, "should not have changes") + require.NotSame(s.T(), updatedState, s.stateMachine.state, "should return a copy of protocol state") + require.Equal(s.T(), updatedState.ID(), stateID, "should return correct ID") + require.Equal(s.T(), s.parentProtocolState.ID(), s.stateMachine.ParentState().ID(), "should not modify parent protocol state") + + nodeIDforEjection := s.parentProtocolState.CurrentEpochIdentityTable[0].NodeID + serviceEvent := &flow.EjectNode{NodeID: nodeIDforEjection} + s.consumer.On("OnServiceEventReceived", serviceEvent.ServiceEvent()).Once() + s.consumer.On("OnServiceEventProcessed", serviceEvent.ServiceEvent()).Once() + wasEjected := s.stateMachine.EjectIdentity(serviceEvent) + require.True(s.T(), wasEjected) + updatedState, stateID, hasChanges = s.stateMachine.Build() + require.True(s.T(), hasChanges, "should have changes") + require.NotEqual(s.T(), stateID, s.parentProtocolState.ID(), "protocol state was modified but still has same ID") + require.Equal(s.T(), updatedState.ID(), stateID, "should return correct ID") + require.Equal(s.T(), s.parentProtocolState.ID(), s.stateMachine.ParentState().ID(), "should not modify parent protocol state") +} + +// TestCreateStateMachineAfterEFMTriggered tests if creating state machine after observing invalid state transition +// results in error . +func (s *ProtocolStateMachineSuite) TestCreateStateMachineAfterEFMTriggered() { + s.parentProtocolState.EpochFallbackTriggered = true + var err error + // create new HappyPathStateMachine with next epoch information + s.stateMachine, err = NewHappyPathStateMachine(s.consumer, s.candidate.View, s.parentProtocolState.Copy()) + require.Error(s.T(), err) +} + +// TestProcessEpochCommit tests if processing epoch commit event correctly updates internal state of HappyPathStateMachine and +// correctly behaves when invariants are violated. +func (s *ProtocolStateMachineSuite) TestProcessEpochCommit() { + mockConsumer := func(commit *flow.EpochCommit) { + s.consumer.On("OnServiceEventReceived", commit.ServiceEvent()).Once() + s.consumer.On("OnInvalidServiceEvent", commit.ServiceEvent(), + unittest.MatchInvalidServiceEventError).Once() + } + var err error + s.Run("invalid counter", func() { + s.stateMachine, err = NewHappyPathStateMachine(s.consumer, s.candidate.View, s.parentProtocolState.Copy()) + require.NoError(s.T(), err) + commit := unittest.EpochCommitFixture(func(commit *flow.EpochCommit) { + commit.Counter = s.parentProtocolState.CurrentEpochSetup.Counter + 10 // set invalid counter for next epoch + }) + mockConsumer(commit) + _, err := s.stateMachine.ProcessEpochCommit(commit) + require.Error(s.T(), err) + require.True(s.T(), protocol.IsInvalidServiceEventError(err)) + }) + s.Run("no next epoch protocol state", func() { + s.stateMachine, err = NewHappyPathStateMachine(s.consumer, s.candidate.View, s.parentProtocolState.Copy()) + require.NoError(s.T(), err) + commit := unittest.EpochCommitFixture(func(commit *flow.EpochCommit) { + commit.Counter = s.parentProtocolState.CurrentEpochSetup.Counter + 1 + }) + mockConsumer(commit) + _, err := s.stateMachine.ProcessEpochCommit(commit) + require.Error(s.T(), err) + require.True(s.T(), protocol.IsInvalidServiceEventError(err)) + }) + s.Run("conflicting epoch commit", func() { + s.stateMachine, err = NewHappyPathStateMachine(s.consumer, s.candidate.View, s.parentProtocolState.Copy()) + require.NoError(s.T(), err) + setup := unittest.EpochSetupFixture( + unittest.SetupWithCounter(s.parentProtocolState.CurrentEpochSetup.Counter+1), + unittest.WithFirstView(s.parentProtocolState.CurrentEpochSetup.FinalView+1), + unittest.WithFinalView(s.parentProtocolState.CurrentEpochSetup.FinalView+1000), + ) + // processing setup event results in creating next epoch protocol state + s.consumer.On("OnServiceEventReceived", setup.ServiceEvent()).Once() + s.consumer.On("OnServiceEventProcessed", setup.ServiceEvent()).Once() + _, err := s.stateMachine.ProcessEpochSetup(setup) + require.NoError(s.T(), err) + + updatedState, _, _ := s.stateMachine.Build() + + parentState, err := flow.NewRichEpochStateEntry(updatedState) + require.NoError(s.T(), err) + + s.stateMachine, err = NewHappyPathStateMachine(s.consumer, s.candidate.View+1, parentState) + require.NoError(s.T(), err) + commit := unittest.EpochCommitFixture( + unittest.CommitWithCounter(setup.Counter), + unittest.WithDKGFromParticipants(setup.Participants), + ) + + s.consumer.On("OnServiceEventReceived", commit.ServiceEvent()).Once() + s.consumer.On("OnServiceEventProcessed", commit.ServiceEvent()).Once() + _, err = s.stateMachine.ProcessEpochCommit(commit) + require.NoError(s.T(), err) + + // processing another epoch commit has to be an error since we have already processed one + mockConsumer(commit) + _, err = s.stateMachine.ProcessEpochCommit(commit) + require.Error(s.T(), err) + require.True(s.T(), protocol.IsInvalidServiceEventError(err)) + + newState, _, _ := s.stateMachine.Build() + require.Equal(s.T(), commit.ID(), newState.NextEpoch.CommitID, "next epoch should be committed since we have observed, a valid event") + }) + s.Run("happy path processing", func() { + s.stateMachine, err = NewHappyPathStateMachine(s.consumer, s.candidate.View, s.parentProtocolState.Copy()) + require.NoError(s.T(), err) + setup := unittest.EpochSetupFixture( + unittest.SetupWithCounter(s.parentProtocolState.CurrentEpochSetup.Counter+1), + unittest.WithFirstView(s.parentProtocolState.CurrentEpochSetup.FinalView+1), + unittest.WithFinalView(s.parentProtocolState.CurrentEpochSetup.FinalView+1000), + ) + // processing setup event results in creating next epoch protocol state + s.consumer.On("OnServiceEventReceived", setup.ServiceEvent()).Once() + s.consumer.On("OnServiceEventProcessed", setup.ServiceEvent()).Once() + _, err := s.stateMachine.ProcessEpochSetup(setup) + require.NoError(s.T(), err) + + updatedState, stateID, hasChanges := s.stateMachine.Build() + require.True(s.T(), hasChanges) + require.NotEqual(s.T(), s.parentProtocolState.ID(), updatedState.ID()) + require.Equal(s.T(), updatedState.ID(), stateID) + require.Equal(s.T(), s.parentProtocolState.ID(), s.stateMachine.ParentState().ID(), "should not modify parent protocol state") + + parentState, err := flow.NewRichEpochStateEntry(updatedState) + require.NoError(s.T(), err) + s.stateMachine, err = NewHappyPathStateMachine(s.consumer, s.candidate.View+1, parentState.Copy()) + require.NoError(s.T(), err) + commit := unittest.EpochCommitFixture( + unittest.CommitWithCounter(setup.Counter), + unittest.WithDKGFromParticipants(setup.Participants), + ) + s.consumer.On("OnServiceEventReceived", commit.ServiceEvent()).Once() + s.consumer.On("OnServiceEventProcessed", commit.ServiceEvent()).Once() + _, err = s.stateMachine.ProcessEpochCommit(commit) + require.NoError(s.T(), err) + + newState, newStateID, newStateHasChanges := s.stateMachine.Build() + require.True(s.T(), newStateHasChanges) + require.Equal(s.T(), commit.ID(), newState.NextEpoch.CommitID, "next epoch should be committed") + require.Equal(s.T(), newState.ID(), newStateID) + require.NotEqual(s.T(), s.parentProtocolState.ID(), newState.ID()) + require.NotEqual(s.T(), updatedState.ID(), newState.ID()) + require.Equal(s.T(), parentState.ID(), s.stateMachine.ParentState().ID(), + "should not modify parent protocol state") + }) +} + +// TestNodeEjectionOfUnknownID verifies that an `EjectNode` service event referencing an unknown +// node leaves the state invariant. +func (s *ProtocolStateMachineSuite) TestNodeEjectionOfUnknownID() { + serviceEvent := &flow.EjectNode{NodeID: unittest.IdentifierFixture()} + s.consumer.On("OnServiceEventReceived", serviceEvent.ServiceEvent()).Once() + s.consumer.On("OnInvalidServiceEvent", serviceEvent.ServiceEvent(), + unittest.MatchInvalidServiceEventError).Once() + wasEjected := s.stateMachine.EjectIdentity(serviceEvent) + require.False(s.T(), wasEjected, "should not be able to eject unknown identity") + + updatedState, updatedStateID, hasChanges := s.stateMachine.Build() + require.False(s.T(), hasChanges, "should not have changes") + require.Equal(s.T(), updatedState.ID(), s.parentProtocolState.ID()) + require.Equal(s.T(), updatedState.ID(), updatedStateID) +} + +// TestNodeEjectionHappyPath verifies that `EjectNode` service events are correctly processed +// and reflected in the resulting protocol state. +func (s *ProtocolStateMachineSuite) TestNodeEjectionHappyPath() { + // update protocol state to have next epoch protocol state + unittest.WithNextEpochProtocolState()(s.parentProtocolState) + var err error + s.stateMachine, err = NewHappyPathStateMachine(s.consumer, s.candidate.View, s.parentProtocolState.Copy()) + require.NoError(s.T(), err) + + currentEpochParticipants := s.parentProtocolState.CurrentEpochIdentityTable.Copy() + ejectedChanges, err := currentEpochParticipants.Sample(2) + require.NoError(s.T(), err) + + for _, update := range ejectedChanges { + serviceEvent := &flow.EjectNode{NodeID: update.NodeID} + s.consumer.On("OnServiceEventReceived", serviceEvent.ServiceEvent()).Once() + s.consumer.On("OnServiceEventProcessed", serviceEvent.ServiceEvent()).Once() + wasEjected := s.stateMachine.EjectIdentity(serviceEvent) + require.True(s.T(), wasEjected) + } + updatedState, updatedStateID, hasChanges := s.stateMachine.Build() + require.True(s.T(), hasChanges, "should have changes") + require.Equal(s.T(), updatedState.ID(), updatedStateID) + require.NotEqual(s.T(), s.parentProtocolState.ID(), updatedState.ID()) + require.Equal(s.T(), s.parentProtocolState.ID(), s.stateMachine.ParentState().ID(), + "should not modify parent protocol state") + + // assert that all changes made in the previous epoch are preserved + currentEpochLookup := updatedState.CurrentEpoch.ActiveIdentities.Lookup() + nextEpochLookup := updatedState.NextEpoch.ActiveIdentities.Lookup() + + for _, updated := range ejectedChanges { + currentEpochIdentity, foundInCurrentEpoch := currentEpochLookup[updated.NodeID] + if foundInCurrentEpoch { + require.Equal(s.T(), updated.NodeID, currentEpochIdentity.NodeID) + require.True(s.T(), currentEpochIdentity.Ejected) + } + + nextEpochIdentity, foundInNextEpoch := nextEpochLookup[updated.NodeID] + if foundInNextEpoch { + require.Equal(s.T(), updated.NodeID, nextEpochIdentity.NodeID) + require.True(s.T(), nextEpochIdentity.Ejected) + } + require.True(s.T(), foundInCurrentEpoch || foundInNextEpoch, "identity should be found in either current or next epoch") + } +} + +// TestProcessEpochSetupInvariants tests if processing epoch setup when invariants are violated doesn't update internal structures. +func (s *ProtocolStateMachineSuite) TestProcessEpochSetupInvariants() { + mockConsumer := func(setup *flow.EpochSetup) { + s.consumer.On("OnServiceEventReceived", setup.ServiceEvent()).Once() + s.consumer.On("OnInvalidServiceEvent", setup.ServiceEvent(), + unittest.MatchInvalidServiceEventError).Once() + } + s.Run("invalid counter", func() { + setup := unittest.EpochSetupFixture(func(setup *flow.EpochSetup) { + setup.Counter = s.parentProtocolState.CurrentEpochSetup.Counter + 10 // set invalid counter for next epoch + }) + mockConsumer(setup) + _, err := s.stateMachine.ProcessEpochSetup(setup) + require.Error(s.T(), err) + require.True(s.T(), protocol.IsInvalidServiceEventError(err)) + }) + s.Run("processing second epoch setup", func() { + stateMachine, err := NewHappyPathStateMachine(s.consumer, s.candidate.View, s.parentProtocolState.Copy()) + require.NoError(s.T(), err) + setup := unittest.EpochSetupFixture( + unittest.SetupWithCounter(s.parentProtocolState.CurrentEpochSetup.Counter+1), + unittest.WithFirstView(s.parentProtocolState.CurrentEpochSetup.FinalView+1), + unittest.WithFinalView(s.parentProtocolState.CurrentEpochSetup.FinalView+1000), + ) + s.consumer.On("OnServiceEventReceived", setup.ServiceEvent()).Once() + s.consumer.On("OnServiceEventProcessed", setup.ServiceEvent()).Once() + _, err = stateMachine.ProcessEpochSetup(setup) + require.NoError(s.T(), err) + + mockConsumer(setup) + _, err = stateMachine.ProcessEpochSetup(setup) + require.Error(s.T(), err) + require.True(s.T(), protocol.IsInvalidServiceEventError(err)) + }) + s.Run("participants not sorted", func() { + stateMachine, err := NewHappyPathStateMachine(s.consumer, s.candidate.View, s.parentProtocolState.Copy()) + require.NoError(s.T(), err) + setup := unittest.EpochSetupFixture(func(setup *flow.EpochSetup) { + setup.Counter = s.parentProtocolState.CurrentEpochSetup.Counter + 1 + var err error + setup.Participants, err = setup.Participants.Shuffle() + require.NoError(s.T(), err) + }) + mockConsumer(setup) + _, err = stateMachine.ProcessEpochSetup(setup) + require.Error(s.T(), err) + require.True(s.T(), protocol.IsInvalidServiceEventError(err)) + }) + s.Run("epoch setup state conflicts with protocol state", func() { + conflictingIdentity := s.parentProtocolState.MinEpochStateEntry.CurrentEpoch.ActiveIdentities[0] + conflictingIdentity.Ejected = true + + stateMachine, err := NewHappyPathStateMachine(s.consumer, s.candidate.View, s.parentProtocolState.Copy()) + require.NoError(s.T(), err) + setup := unittest.EpochSetupFixture(func(setup *flow.EpochSetup) { + setup.Counter = s.parentProtocolState.CurrentEpochSetup.Counter + 1 + // using same identities as in previous epoch should result in an error since + // we have ejected conflicting identity but it was added back in epoch setup + // such epoch setup event is invalid. + setup.Participants = s.parentProtocolState.CurrentEpochSetup.Participants + }) + + mockConsumer(setup) + _, err = stateMachine.ProcessEpochSetup(setup) + require.Error(s.T(), err) + require.True(s.T(), protocol.IsInvalidServiceEventError(err)) + }) +} + +// TestProcessEpochSetupHappyPath tests if processing epoch setup when invariants are not violated updates internal structures. +// We test correct construction of the *active identities* for the current and next epoch. Specifically, observing an EpochSetup +// event should leave `PreviousEpoch` and `CurrentEpoch`'s EpochStateContainer unchanged. +// The next epoch's EpochStateContainer should reference the EpochSetup event and hold the respective ActiveIdentities. +func (s *ProtocolStateMachineSuite) TestProcessEpochSetupHappyPath() { + setupParticipants := unittest.IdentityListFixture(5, unittest.WithAllRoles()).Sort(flow.Canonical[flow.Identity]) + setupParticipants[0].InitialWeight = 13 + setup := unittest.EpochSetupFixture( + unittest.SetupWithCounter(s.parentProtocolState.CurrentEpochSetup.Counter+1), + unittest.WithFirstView(s.parentProtocolState.CurrentEpochSetup.FinalView+1), + unittest.WithFinalView(s.parentProtocolState.CurrentEpochSetup.FinalView+1000), + unittest.WithParticipants(setupParticipants.ToSkeleton()), + ) + + // for next epoch we will have all the identities from setup event + expectedNextEpochActiveIdentities := flow.DynamicIdentityEntryListFromIdentities(setupParticipants) + + // process actual event + s.consumer.On("OnServiceEventReceived", setup.ServiceEvent()).Once() + s.consumer.On("OnServiceEventProcessed", setup.ServiceEvent()).Once() + _, err := s.stateMachine.ProcessEpochSetup(setup) + require.NoError(s.T(), err) + + updatedState, _, hasChanges := s.stateMachine.Build() + require.True(s.T(), hasChanges, "should have changes") + require.Equal(s.T(), s.parentProtocolState.PreviousEpoch, updatedState.PreviousEpoch, "previous epoch's EpochStateContainer should not change") + require.Equal(s.T(), s.parentProtocolState.CurrentEpoch, updatedState.CurrentEpoch, "current epoch's EpochStateContainer should not change") + nextEpoch := updatedState.NextEpoch + require.NotNil(s.T(), nextEpoch, "should have next epoch protocol state") + require.Equal(s.T(), nextEpoch.SetupID, setup.ID(), + "should have correct setup ID for next protocol state") + require.Equal(s.T(), nextEpoch.CommitID, flow.ZeroID, "ID for EpochCommit event should still be nil") + require.Equal(s.T(), expectedNextEpochActiveIdentities, nextEpoch.ActiveIdentities, + "should have filled active identities for next epoch") +} + +// TestProcessEpochSetupWithSameParticipants tests that processing epoch setup with overlapping participants results in correctly +// built updated protocol state. It should build a union of participants from current and next epoch for current and +// next epoch protocol states respectively. +func (s *ProtocolStateMachineSuite) TestProcessEpochSetupWithSameParticipants() { + participantsFromCurrentEpochSetup, err := flow.ComposeFullIdentities( + s.parentProtocolState.CurrentEpochSetup.Participants, + s.parentProtocolState.CurrentEpoch.ActiveIdentities, + flow.EpochParticipationStatusActive, + ) + require.NoError(s.T(), err) + // Function `ComposeFullIdentities` verified that `Participants` and `ActiveIdentities` have identical ordering w.r.t nodeID. + // By construction, `participantsFromCurrentEpochSetup` lists the full Identities in the same ordering as `Participants` and + // `ActiveIdentities`. By confirming that `participantsFromCurrentEpochSetup` follows canonical ordering, we can conclude that + // also `Participants` and `ActiveIdentities` are canonically ordered. + require.True(s.T(), participantsFromCurrentEpochSetup.Sorted(flow.Canonical[flow.Identity]), "participants in current epoch's setup event are not in canonical order") + + overlappingNodes, err := participantsFromCurrentEpochSetup.Sample(2) + require.NoError(s.T(), err) + setupParticipants := append(unittest.IdentityListFixture(len(s.parentProtocolState.CurrentEpochIdentityTable), unittest.WithAllRoles()), + overlappingNodes...).Sort(flow.Canonical[flow.Identity]) + setup := unittest.EpochSetupFixture( + unittest.SetupWithCounter(s.parentProtocolState.CurrentEpochSetup.Counter+1), + unittest.WithFirstView(s.parentProtocolState.CurrentEpochSetup.FinalView+1), + unittest.WithFinalView(s.parentProtocolState.CurrentEpochSetup.FinalView+1000), + unittest.WithParticipants(setupParticipants.ToSkeleton()), + ) + s.consumer.On("OnServiceEventReceived", setup.ServiceEvent()).Once() + s.consumer.On("OnServiceEventProcessed", setup.ServiceEvent()).Once() + _, err = s.stateMachine.ProcessEpochSetup(setup) + require.NoError(s.T(), err) + updatedState, _, _ := s.stateMachine.Build() + + require.Equal(s.T(), s.parentProtocolState.CurrentEpoch.ActiveIdentities, + updatedState.CurrentEpoch.ActiveIdentities, + "should not change active identities for current epoch") + + expectedNextEpochActiveIdentities := flow.DynamicIdentityEntryListFromIdentities(setupParticipants) + require.Equal(s.T(), expectedNextEpochActiveIdentities, updatedState.NextEpoch.ActiveIdentities, + "should have filled active identities for next epoch") +} + +// TestEpochSetupAfterIdentityChange tests that after processing epoch an setup event, all previously made changes to the identity table +// are preserved and reflected in the resulting protocol state. +func (s *ProtocolStateMachineSuite) TestEpochSetupAfterIdentityChange() { + participantsFromCurrentEpochSetup := s.parentProtocolState.CurrentEpochIdentityTable.Filter(func(i *flow.Identity) bool { + _, exists := s.parentProtocolState.CurrentEpochSetup.Participants.ByNodeID(i.NodeID) + return exists + }).Sort(flow.Canonical[flow.Identity]) + ejectedChanges, err := participantsFromCurrentEpochSetup.Sample(2) + require.NoError(s.T(), err) + for _, update := range ejectedChanges { + serviceEvent := &flow.EjectNode{NodeID: update.NodeID} + s.consumer.On("OnServiceEventReceived", serviceEvent.ServiceEvent()).Once() + s.consumer.On("OnServiceEventProcessed", serviceEvent.ServiceEvent()).Once() + wasEjected := s.stateMachine.EjectIdentity(serviceEvent) + require.True(s.T(), wasEjected) + } + updatedState, _, _ := s.stateMachine.Build() + + // Construct a valid flow.RichEpochStateEntry for next block + // We do this by copying the parent protocol state and updating the identities manually + updatedRichProtocolState := &flow.RichEpochStateEntry{ + EpochStateEntry: updatedState, + CurrentEpochIdentityTable: s.parentProtocolState.CurrentEpochIdentityTable.Copy(), + NextEpochIdentityTable: flow.IdentityList{}, + } + // Update enriched data with the changes made to the low-level updated table + for _, identity := range ejectedChanges { + toBeUpdated, _ := updatedRichProtocolState.CurrentEpochIdentityTable.ByNodeID(identity.NodeID) + toBeUpdated.EpochParticipationStatus = flow.EpochParticipationStatusEjected + } + + // now we can use it to construct HappyPathStateMachine for next block, which will process epoch setup event. + nextBlock := unittest.BlockHeaderWithParentFixture(s.candidate) + s.stateMachine, err = NewHappyPathStateMachine(s.consumer, nextBlock.View, updatedRichProtocolState) + require.NoError(s.T(), err) + + setup := unittest.EpochSetupFixture( + unittest.SetupWithCounter(s.parentProtocolState.CurrentEpochSetup.Counter+1), + unittest.WithFirstView(s.parentProtocolState.CurrentEpochSetup.FinalView+1), + unittest.WithFinalView(s.parentProtocolState.CurrentEpochSetup.FinalView+1000), + func(setup *flow.EpochSetup) { + // add those nodes that were changed in the previous epoch, but not those that were ejected + // it's important to exclude ejected nodes, since we expect that service smart contract has emitted ejection operation + // and service events are delivered (asynchronously) in an *order-preserving* manner meaning if ejection has happened before + // epoch setup then there is no possible way that it will include ejected node unless there is a severe bug in the service contract. + setup.Participants = setup.Participants.Filter( + filter.Not(filter.In(ejectedChanges.ToSkeleton()))).Sort(flow.Canonical[flow.IdentitySkeleton]) + }, + ) + + s.consumer.On("OnServiceEventReceived", setup.ServiceEvent()).Once() + s.consumer.On("OnServiceEventProcessed", setup.ServiceEvent()).Once() + _, err = s.stateMachine.ProcessEpochSetup(setup) + require.NoError(s.T(), err) + + updatedState, _, _ = s.stateMachine.Build() + + // assert that all changes made in previous epoch are preserved + currentEpochLookup := updatedState.CurrentEpoch.ActiveIdentities.Lookup() + nextEpochLookup := updatedState.NextEpoch.ActiveIdentities.Lookup() + + for _, updated := range ejectedChanges { + currentEpochIdentity := currentEpochLookup[updated.NodeID] + require.Equal(s.T(), updated.NodeID, currentEpochIdentity.NodeID) + require.True(s.T(), currentEpochIdentity.Ejected) + + _, foundInNextEpoch := nextEpochLookup[updated.NodeID] + require.False(s.T(), foundInNextEpoch) + } +} + +// TestEpochSetupAndEjectionInSameBlock tests that processing an epoch setup event which re-admits an ejected identity results in an error. +// Such action should be considered illegal since smart contract emitted ejection before epoch setup and service events are delivered +// in an order-preserving manner. +func (s *ProtocolStateMachineSuite) TestEpochSetupAndEjectionInSameBlock() { + setupParticipants := s.parentProtocolState.CurrentEpochSetup.Participants.Copy() // use same participants as in current epoch setup + ejectedIdentityID := setupParticipants[0].NodeID + + serviceEvent := &flow.EjectNode{NodeID: ejectedIdentityID} + s.consumer.On("OnServiceEventReceived", serviceEvent.ServiceEvent()).Once() + s.consumer.On("OnServiceEventProcessed", serviceEvent.ServiceEvent()).Once() + // ejected identity before processing epoch setup + wasEjected := s.stateMachine.EjectIdentity(serviceEvent) + require.True(s.T(), wasEjected) + + setup := unittest.EpochSetupFixture( + unittest.SetupWithCounter(s.parentProtocolState.CurrentEpochSetup.Counter+1), + unittest.WithFirstView(s.parentProtocolState.CurrentEpochSetup.FinalView+1), + unittest.WithFinalView(s.parentProtocolState.CurrentEpochSetup.FinalView+1000), + unittest.WithParticipants(setupParticipants), + ) + // epoch setup readmits the ejected identity, such events shouldn't be accepted. + s.consumer.On("OnServiceEventReceived", setup.ServiceEvent()).Once() + s.consumer.On("OnInvalidServiceEvent", setup.ServiceEvent(), + unittest.MatchInvalidServiceEventError).Once() + processed, err := s.stateMachine.ProcessEpochSetup(setup) + require.Error(s.T(), err) + require.True(s.T(), protocol.IsInvalidServiceEventError(err)) + require.False(s.T(), processed) +} + +// TestProcessEpochRecover ensures that HappyPathStateMachine returns a sentinel error when processing an EpochRecover event. +func (s *ProtocolStateMachineSuite) TestProcessEpochRecover() { + nextEpochParticipants := s.parentProtocolState.CurrentEpochIdentityTable.Copy() + epochRecover := unittest.EpochRecoverFixture(func(setup *flow.EpochSetup) { + setup.Participants = nextEpochParticipants.ToSkeleton() + setup.Assignments = unittest.ClusterAssignment(1, nextEpochParticipants.ToSkeleton()) + setup.Counter = s.parentProtocolState.CurrentEpochSetup.Counter + 1 + setup.FirstView = s.parentProtocolState.CurrentEpochSetup.FinalView + 1 + setup.FinalView = setup.FirstView + 10_000 + }) + s.consumer.On("OnServiceEventReceived", epochRecover.ServiceEvent()).Once() + s.consumer.On("OnInvalidServiceEvent", epochRecover.ServiceEvent(), + unittest.MatchInvalidServiceEventError).Once() + processed, err := s.stateMachine.ProcessEpochRecover(epochRecover) + require.Error(s.T(), err) + require.True(s.T(), protocol.IsInvalidServiceEventError(err)) + require.False(s.T(), processed) +} diff --git a/state/protocol/protocol_state/epochs/identity_ejector.go b/state/protocol/protocol_state/epochs/identity_ejector.go new file mode 100644 index 00000000000..02fb1ee0439 --- /dev/null +++ b/state/protocol/protocol_state/epochs/identity_ejector.go @@ -0,0 +1,83 @@ +package epochs + +import ( + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/state/protocol" +) + +// trackedDynamicIdentityList is a helper structure for tracking identity lists in the state machine. +// It is used to implement lazy initialization of the tracked identity list. +// The structure relies on holding a reference to the list that is being modified. +type trackedDynamicIdentityList struct { + dynamicIdentities flow.DynamicIdentityEntryList + identityLookup map[flow.Identifier]*flow.DynamicIdentityEntry +} + +// ejector is a dedicated structure for tracking ejected nodes in the state machine. +// It is capable of tracking multiple identity lists and ejecting nodes from them. +// The implementation is optimized for hot-path where ejections are rare, and by utilizing the lazy initialization, +// the data structures are not populated until the first ejection is requested. +// The ejector is used in the baseStateMachine to track ejected nodes and ensure that they are not readmitted during +// the lifetime of the state machine. +// It is not concurrency-safe. +type ejector struct { + identityLists []trackedDynamicIdentityList + ejected []flow.Identifier +} + +// newEjector implements a constructor for the ejector structure with a pre-allocated slice for identity lists. +// We are always going to add at least one element, and most often two (previous and current epoch), but never more than three. +func newEjector() ejector { + return ejector{ + identityLists: make([]trackedDynamicIdentityList, 0, 3), + } +} + +// Eject marks the node as ejected in all tracked identity lists. If and only if the node is active in the previous +// or current or next epoch, the node's ejection status is set to true for all occurrences, and we return true. If +// `nodeID` is not found, we return false. This method is idempotent and behaves identically for repeated calls with +// the same `nodeID`. Repeated calls with the same input create minor performance overhead. +// +// If it's the first ejection during the `ejector`s lifetime (i.e. this `ejector` has no previous ejection events +// memorized), it populates an internal lookup for each `DynamicIdentityList` is tracks. This lazy initialization +// benefits the vastly common happy path (no ejection events during the ejector's lifetime). +func (e *ejector) Eject(nodeID flow.Identifier) bool { + l := len(e.identityLists) + if len(e.ejected) == 0 { // if this is the first ejection sealed in this block, we have to populate the lookup first + for i := 0; i < l; i++ { + e.identityLists[i].identityLookup = e.identityLists[i].dynamicIdentities.Lookup() + } + } + e.ejected = append(e.ejected, nodeID) + + var nodeFound bool + for i := 0; i < l; i++ { + dynamicIdentity, found := e.identityLists[i].identityLookup[nodeID] + if found { + nodeFound = true + dynamicIdentity.Ejected = true + } + } + return nodeFound +} + +// TrackDynamicIdentityList tracks a new DynamicIdentityList in the state machine. +// It is not allowed to readmit nodes that were ejected. Whenever a new DynamicIdentityList is tracked, +// we ensure that the ejection status of previously ejected nodes is not reverted. +// If a node was previously ejected and the new DynamicIdentityList contains the node with an `Ejected` +// status of `false`, a `protocol.InvalidServiceEventError` is returned and the ejector remains unchanged. +func (e *ejector) TrackDynamicIdentityList(list flow.DynamicIdentityEntryList) error { + tracker := trackedDynamicIdentityList{dynamicIdentities: list} + if len(e.ejected) > 0 { + // nodes were already ejected in this block, so their ejection should not be reverted in the new `list` + tracker.identityLookup = list.Lookup() + for _, id := range e.ejected { + dynamicIdentity, found := tracker.identityLookup[id] + if found && !dynamicIdentity.Ejected { + return protocol.NewInvalidServiceEventErrorf("node %v was previously ejected but next DynamicIdentityList reverts its ejection status", id) + } + } + } + e.identityLists = append(e.identityLists, tracker) + return nil +} diff --git a/state/protocol/protocol_state/epochs/identity_ejector_test.go b/state/protocol/protocol_state/epochs/identity_ejector_test.go new file mode 100644 index 00000000000..063e4fd8c64 --- /dev/null +++ b/state/protocol/protocol_state/epochs/identity_ejector_test.go @@ -0,0 +1,89 @@ +package epochs + +import ( + "testing" + + "github.com/stretchr/testify/require" + "pgregory.net/rapid" + + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/state/protocol" + "github.com/onflow/flow-go/utils/unittest" +) + +// TestEjectorRapid fuzzy-tests the ejector, ensuring that it correctly tracks and ejects nodes. +// This test covers only happy-path scenario. +func TestEjectorRapid(t *testing.T) { + rapid.Check(t, func(t *rapid.T) { + ej := newEjector() + baseIdentities := unittest.DynamicIdentityEntryListFixture(5) + // track 1-3 identity lists, each containing extra 0-7 identities + trackedIdentities := rapid.Map(rapid.SliceOfN(rapid.IntRange(0, 7), 1, 3), func(n []int) []flow.DynamicIdentityEntryList { + var result []flow.DynamicIdentityEntryList + for _, count := range n { + identities := append(baseIdentities.Copy(), unittest.DynamicIdentityEntryListFixture(count)...) + identities = rapid.Permutation(identities).Draw(t, "shuffled-identities") + result = append(result, identities) + } + return result + }).Draw(t, "tracked-identities") + + for _, list := range trackedIdentities { + err := ej.TrackDynamicIdentityList(list) + require.NoError(t, err) + } + + var ejectedIdentities flow.IdentifierList + for _, list := range trackedIdentities { + nodeID := rapid.SampledFrom(list).Draw(t, "ejected-identity").NodeID + require.True(t, ej.Eject(nodeID)) + ejectedIdentities = append(ejectedIdentities, nodeID) + } + ejectedLookup := ejectedIdentities.Lookup() + + for _, list := range trackedIdentities { + for _, identity := range list { + _, expectedStatus := ejectedLookup[identity.NodeID] + require.Equal(t, expectedStatus, identity.Ejected, "incorrect ejection status") + } + } + }) +} + +// TestEjector_ReadmitEjectedIdentity ensures that a node that was ejected cannot be readmitted with subsequent track requests. +func TestEjector_ReadmitEjectedIdentity(t *testing.T) { + list := unittest.DynamicIdentityEntryListFixture(3) + ej := newEjector() + ejectedNodeID := list[0].NodeID + require.NoError(t, ej.TrackDynamicIdentityList(list)) + require.True(t, ej.Eject(ejectedNodeID)) + readmit := append(unittest.DynamicIdentityEntryListFixture(3), &flow.DynamicIdentityEntry{ + NodeID: ejectedNodeID, + Ejected: false, + }) + err := ej.TrackDynamicIdentityList(readmit) + require.Error(t, err) + require.True(t, protocol.IsInvalidServiceEventError(err)) +} + +// TestEjector_IdentityNotFound ensures that ejector returns false when the identity is not +// in any of the tracked lists. We test different scenarios where the identity is not tracked. +// Tested different scenarios where the identity is not tracked. +func TestEjector_IdentityNotFound(t *testing.T) { + t.Run("nothing-tracked", func(t *testing.T) { + ej := newEjector() + require.False(t, ej.Eject(unittest.IdentifierFixture())) + }) + t.Run("list-tracked", func(t *testing.T) { + ej := newEjector() + require.NoError(t, ej.TrackDynamicIdentityList(unittest.DynamicIdentityEntryListFixture(3))) + require.False(t, ej.Eject(unittest.IdentifierFixture())) + }) + t.Run("after-ejection", func(t *testing.T) { + ej := newEjector() + list := unittest.DynamicIdentityEntryListFixture(3) + require.NoError(t, ej.TrackDynamicIdentityList(list)) + require.True(t, ej.Eject(list[0].NodeID)) + require.False(t, ej.Eject(unittest.IdentifierFixture())) + }) +} diff --git a/state/protocol/protocol_state/epochs/mock/state_machine.go b/state/protocol/protocol_state/epochs/mock/state_machine.go new file mode 100644 index 00000000000..acc155da2de --- /dev/null +++ b/state/protocol/protocol_state/epochs/mock/state_machine.go @@ -0,0 +1,224 @@ +// Code generated by mockery. DO NOT EDIT. + +package mock + +import ( + flow "github.com/onflow/flow-go/model/flow" + mock "github.com/stretchr/testify/mock" +) + +// StateMachine is an autogenerated mock type for the StateMachine type +type StateMachine struct { + mock.Mock +} + +// Build provides a mock function with no fields +func (_m *StateMachine) Build() (*flow.EpochStateEntry, flow.Identifier, bool) { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Build") + } + + var r0 *flow.EpochStateEntry + var r1 flow.Identifier + var r2 bool + if rf, ok := ret.Get(0).(func() (*flow.EpochStateEntry, flow.Identifier, bool)); ok { + return rf() + } + if rf, ok := ret.Get(0).(func() *flow.EpochStateEntry); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*flow.EpochStateEntry) + } + } + + if rf, ok := ret.Get(1).(func() flow.Identifier); ok { + r1 = rf() + } else { + if ret.Get(1) != nil { + r1 = ret.Get(1).(flow.Identifier) + } + } + + if rf, ok := ret.Get(2).(func() bool); ok { + r2 = rf() + } else { + r2 = ret.Get(2).(bool) + } + + return r0, r1, r2 +} + +// EjectIdentity provides a mock function with given fields: ejectionEvent +func (_m *StateMachine) EjectIdentity(ejectionEvent *flow.EjectNode) bool { + ret := _m.Called(ejectionEvent) + + if len(ret) == 0 { + panic("no return value specified for EjectIdentity") + } + + var r0 bool + if rf, ok := ret.Get(0).(func(*flow.EjectNode) bool); ok { + r0 = rf(ejectionEvent) + } else { + r0 = ret.Get(0).(bool) + } + + return r0 +} + +// ParentState provides a mock function with no fields +func (_m *StateMachine) ParentState() *flow.RichEpochStateEntry { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for ParentState") + } + + var r0 *flow.RichEpochStateEntry + if rf, ok := ret.Get(0).(func() *flow.RichEpochStateEntry); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*flow.RichEpochStateEntry) + } + } + + return r0 +} + +// ProcessEpochCommit provides a mock function with given fields: epochCommit +func (_m *StateMachine) ProcessEpochCommit(epochCommit *flow.EpochCommit) (bool, error) { + ret := _m.Called(epochCommit) + + if len(ret) == 0 { + panic("no return value specified for ProcessEpochCommit") + } + + var r0 bool + var r1 error + if rf, ok := ret.Get(0).(func(*flow.EpochCommit) (bool, error)); ok { + return rf(epochCommit) + } + if rf, ok := ret.Get(0).(func(*flow.EpochCommit) bool); ok { + r0 = rf(epochCommit) + } else { + r0 = ret.Get(0).(bool) + } + + if rf, ok := ret.Get(1).(func(*flow.EpochCommit) error); ok { + r1 = rf(epochCommit) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ProcessEpochRecover provides a mock function with given fields: epochRecover +func (_m *StateMachine) ProcessEpochRecover(epochRecover *flow.EpochRecover) (bool, error) { + ret := _m.Called(epochRecover) + + if len(ret) == 0 { + panic("no return value specified for ProcessEpochRecover") + } + + var r0 bool + var r1 error + if rf, ok := ret.Get(0).(func(*flow.EpochRecover) (bool, error)); ok { + return rf(epochRecover) + } + if rf, ok := ret.Get(0).(func(*flow.EpochRecover) bool); ok { + r0 = rf(epochRecover) + } else { + r0 = ret.Get(0).(bool) + } + + if rf, ok := ret.Get(1).(func(*flow.EpochRecover) error); ok { + r1 = rf(epochRecover) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ProcessEpochSetup provides a mock function with given fields: epochSetup +func (_m *StateMachine) ProcessEpochSetup(epochSetup *flow.EpochSetup) (bool, error) { + ret := _m.Called(epochSetup) + + if len(ret) == 0 { + panic("no return value specified for ProcessEpochSetup") + } + + var r0 bool + var r1 error + if rf, ok := ret.Get(0).(func(*flow.EpochSetup) (bool, error)); ok { + return rf(epochSetup) + } + if rf, ok := ret.Get(0).(func(*flow.EpochSetup) bool); ok { + r0 = rf(epochSetup) + } else { + r0 = ret.Get(0).(bool) + } + + if rf, ok := ret.Get(1).(func(*flow.EpochSetup) error); ok { + r1 = rf(epochSetup) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// TransitionToNextEpoch provides a mock function with no fields +func (_m *StateMachine) TransitionToNextEpoch() error { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for TransitionToNextEpoch") + } + + var r0 error + if rf, ok := ret.Get(0).(func() error); ok { + r0 = rf() + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// View provides a mock function with no fields +func (_m *StateMachine) View() uint64 { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for View") + } + + var r0 uint64 + if rf, ok := ret.Get(0).(func() uint64); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(uint64) + } + + return r0 +} + +// NewStateMachine creates a new instance of StateMachine. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewStateMachine(t interface { + mock.TestingT + Cleanup(func()) +}) *StateMachine { + mock := &StateMachine{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/state/protocol/protocol_state/epochs/mock/state_machine_factory_method.go b/state/protocol/protocol_state/epochs/mock/state_machine_factory_method.go new file mode 100644 index 00000000000..838fae65393 --- /dev/null +++ b/state/protocol/protocol_state/epochs/mock/state_machine_factory_method.go @@ -0,0 +1,59 @@ +// Code generated by mockery. DO NOT EDIT. + +package mock + +import ( + flow "github.com/onflow/flow-go/model/flow" + epochs "github.com/onflow/flow-go/state/protocol/protocol_state/epochs" + + mock "github.com/stretchr/testify/mock" +) + +// StateMachineFactoryMethod is an autogenerated mock type for the StateMachineFactoryMethod type +type StateMachineFactoryMethod struct { + mock.Mock +} + +// Execute provides a mock function with given fields: candidateView, parentState +func (_m *StateMachineFactoryMethod) Execute(candidateView uint64, parentState *flow.RichEpochStateEntry) (epochs.StateMachine, error) { + ret := _m.Called(candidateView, parentState) + + if len(ret) == 0 { + panic("no return value specified for Execute") + } + + var r0 epochs.StateMachine + var r1 error + if rf, ok := ret.Get(0).(func(uint64, *flow.RichEpochStateEntry) (epochs.StateMachine, error)); ok { + return rf(candidateView, parentState) + } + if rf, ok := ret.Get(0).(func(uint64, *flow.RichEpochStateEntry) epochs.StateMachine); ok { + r0 = rf(candidateView, parentState) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(epochs.StateMachine) + } + } + + if rf, ok := ret.Get(1).(func(uint64, *flow.RichEpochStateEntry) error); ok { + r1 = rf(candidateView, parentState) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// NewStateMachineFactoryMethod creates a new instance of StateMachineFactoryMethod. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewStateMachineFactoryMethod(t interface { + mock.TestingT + Cleanup(func()) +}) *StateMachineFactoryMethod { + mock := &StateMachineFactoryMethod{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/state/protocol/protocol_state/epochs/mock_interfaces/state_machine_factory_method.go b/state/protocol/protocol_state/epochs/mock_interfaces/state_machine_factory_method.go new file mode 100644 index 00000000000..3635134277a --- /dev/null +++ b/state/protocol/protocol_state/epochs/mock_interfaces/state_machine_factory_method.go @@ -0,0 +1,11 @@ +package mockinterfaces + +import ( + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/state/protocol/protocol_state/epochs" +) + +// StateMachineFactoryMethod allows to create a mock for the StateMachineFactoryMethod callback +type StateMachineFactoryMethod interface { + Execute(candidateView uint64, parentState *flow.RichEpochStateEntry) (epochs.StateMachine, error) +} diff --git a/state/protocol/protocol_state/epochs/statemachine.go b/state/protocol/protocol_state/epochs/statemachine.go new file mode 100644 index 00000000000..9ba4c214cdc --- /dev/null +++ b/state/protocol/protocol_state/epochs/statemachine.go @@ -0,0 +1,395 @@ +package epochs + +import ( + "fmt" + + "github.com/jordanschalm/lockctx" + + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module/irrecoverable" + "github.com/onflow/flow-go/state/protocol" + "github.com/onflow/flow-go/state/protocol/protocol_state" + "github.com/onflow/flow-go/state/protocol/protocol_state/common" + "github.com/onflow/flow-go/storage" + "github.com/onflow/flow-go/storage/deferred" +) + +// StateMachine implements a low-level interface for state-changing operations on the Epoch state. +// It is used by higher level logic to coordinate the Epoch handover, evolving its internal state +// when Epoch-related Service Events are sealed or specific view-thresholds are reached. +// +// The StateMachine is fork-aware, in that it starts with the Epoch state of the parent block and +// evolves the state, based on the relevant information in the child block (specifically Service Events +// sealed in the child block and the child block's view). A separate instance must be created for each +// block that is being processed. Calling `Build()` constructs a snapshot of the resulting Epoch state. +// +// IMPORTANCE of the FinalizationSafetyThreshold: +// The FinalizationSafetyThreshold's value `t` acts as a deadline for sealing the EpochCommit service +// event near the end of each epoch. Specifically, if the current epoch N's final view is `f`, the +// EpochCommit event for configuring epoch N+1 must be received at latest by the: +// +// Epoch Commitment Deadline: d=f-t +// +// Epoch Commitment Deadline +// EPOCH N ↓ EPOCH N+1 +// ...---------------|--------------------------| |-----... +// ↑ ↑ ↑ +// view: d············t············>⋮ f+1 +// +// This deadline is used to determine when to trigger Epoch Fallback Mode [EFM]: +// if no valid configuration for epoch N+1 has been determined by view `d`, the +// protocol enters EFM for the following reason: +// - By the time a node surpasses the last view `f` of epoch N, it must know the leaders +// for every view of epoch N+1. +// - The leader selection for epoch N+1 is only unambiguously determined, if the configuration +// for epoch N+1 has been finalized. (Otherwise, different forks could contain different +// consensus committees for epoch N+1, which would lead to different leaders. Only finalization +// resolves this ambiguity by finalizing one and orphaning epoch configurations possibly +// contained in competing forks). +// - The latest point where we could still finalize a configuration for Epoch N+1 is the last view +// `f` of epoch N. As finalization is permitted to take up to `t` views, a valid configuration +// for epoch N+1 must be available at latest by view d=f-t. +// +// Example: A service event is emitted during the computation of block A. The execution result +// for block A, denoted as `RA`, is incorporated into block B. The seal `SA` for this result +// is included in block C: +// +// A ← B(RA) ← C(SA) ← ... ← R +// +// A service event σ is considered sealed w.r.t. a reference block R if: +// - σ was emitted during execution of some block A, s.t. A is an ancestor of R +// - The seal for block A was included in some block C, s.t C is an ancestor of R +// +// When we finalize the first block B with B.View >= d: +// - HAPPY PATH: If an EpochCommit service event has been sealed w.r.t. B, no action is taken. +// - FALLBACK PATH: If no EpochCommit service event has been sealed w.r.t. B, +// Epoch Fallback Mode [EFM] is triggered. +// +// CONTEXT: +// The Epoch Commitment Deadline exists to ensure that all nodes agree on whether EFM is triggered +// for a particular epoch, before the epoch actually ends. In particular, all nodes will agree about +// EFM being triggered (or not) if at least one block with view in [d, f] is finalized - in other words, +// we require at least one block being finalized after the epoch commitment deadline, and before the next +// epoch begins. +// +// It should be noted that we are employing a heuristic here, which succeeds with overwhelming probability +// of nearly 1. However, theoretically it is possible that no blocks are finalized within t views. In this +// edge case, the nodes would have not detected the epoch commit phase failing and the protocol would just +// halt at the end of the epoch. However, we emphasize that this is extremely unlikely, because the +// probability of randomly selecting t faulty leaders in sequence decays to zero exponentially with +// increasing t. Furthermore, failing to finalize blocks for a noticeable period entails halting block sealing, +// which would trigger human intervention on much smaller time scales than t views. Therefore, t should be +// chosen such that it takes more than 30mins to pass t views under happy path operation. Significant larger +// values are ok, but t views equalling 30 mins should be seen as a lower bound. +type StateMachine interface { + // Build returns updated protocol state entry, state ID and a flag indicating if there were any changes. + // CAUTION: + // Do NOT call Build, if the StateMachine instance has returned a `protocol.InvalidServiceEventError` + // at any time during its lifetime. After this error, the StateMachine is left with a potentially + // dysfunctional state and should be discarded. + Build() (updatedState *flow.EpochStateEntry, stateID flow.Identifier, hasChanges bool) + + // ProcessEpochSetup updates the internally-maintained interim Epoch state with data from epoch setup event. + // Processing an epoch setup event also affects the identity table for the current epoch. + // Specifically, we transition the Epoch state from staking to setup phase, we stop returning + // identities from previous+current epochs and start returning identities from current+next epochs. + // As a result of this operation protocol state for the next epoch will be created. + // Returned boolean indicates if event triggered a transition in the state machine or not. + // Implementors must never return (true, error). + // Expected errors indicating that we are leaving the happy-path of the epoch transitions + // - `protocol.InvalidServiceEventError` - if the service event is invalid or is not a valid state transition for the current protocol state. + // CAUTION: the StateMachine is left with a potentially dysfunctional state when this error occurs. Do NOT call the Build method + // after such error and discard the StateMachine! + ProcessEpochSetup(epochSetup *flow.EpochSetup) (bool, error) + + // ProcessEpochCommit updates the internally-maintained interim Epoch state with data from EpochCommit event. + // On the happy path, observing an epoch EpochCommit transitions the protocol state from setup to commit phase. + // At this point, we have fully determined the next epoch's configuration. + // Returned boolean indicates if event triggered a transition in the state machine or not. + // Implementors must never return (true, error). + // Expected errors indicating that we are leaving the happy-path of the epoch transitions + // - `protocol.InvalidServiceEventError` - if the service event is invalid or is not a valid state transition for the current protocol state. + // CAUTION: the StateMachine is left with a potentially dysfunctional state when this error occurs. Do NOT call the Build method + // after such error and discard the StateMachine! + ProcessEpochCommit(epochCommit *flow.EpochCommit) (bool, error) + + // ProcessEpochRecover updates the internally-maintained interim Epoch state with data from epoch recover + // event in an attempt to recover from Epoch Fallback Mode [EFM] and get back on happy path. + // Specifically, after successfully processing this event, we will have a next epoch (as specified by the + // EpochRecover event) in the protocol state, which is in the committed phase. Subsequently, the epoch + // protocol can proceed following the happy path. Therefore, we set `EpochFallbackTriggered` back to false. + // + // The boolean return indicates if the input event triggered a transition in the state machine or not. + // For the EpochRecover event, we return false if and only if there is an error. The reason is that + // either the `EpochRecover` event is rejected (leading to `InvalidServiceEventError`) or there is an + // exception processing the event. Otherwise, an `EpochRecover` event must always lead to a state change. + // Expected errors during normal operations: + // - `protocol.InvalidServiceEventError` - if the service event is invalid or is not a valid state transition for the current protocol state. + ProcessEpochRecover(epochRecover *flow.EpochRecover) (bool, error) + + // EjectIdentity updates the identity table by changing the node's participation status to 'ejected' + // If and only if the node is active in the previous or current or next epoch, the node's ejection status + // is set to true for all occurrences, and we return true. If `nodeID` is not found, we return false. This + // method is idempotent and behaves identically for repeated calls with the same `nodeID` (repeated calls + // with the same input create minor performance overhead though). + EjectIdentity(ejectionEvent *flow.EjectNode) bool + + // TransitionToNextEpoch transitions our reference frame of 'current epoch' to the pending but committed epoch. + // Epoch transition is only allowed when: + // - next epoch has been committed, + // - candidate block is in the next epoch. + // No errors are expected during normal operations. + TransitionToNextEpoch() error + + // View returns the view associated with this state machine. + // The view of the state machine equals the view of the block carrying the respective updates. + View() uint64 + + // ParentState returns parent protocol state associated with this state machine. + ParentState() *flow.RichEpochStateEntry +} + +// StateMachineFactoryMethod is a factory method to create state machines for evolving the protocol's epoch state. +// Currently, we have `HappyPathStateMachine` and `FallbackStateMachine` as StateMachine +// implementations, whose constructors both have the same signature as StateMachineFactoryMethod. +type StateMachineFactoryMethod func(candidateView uint64, parentState *flow.RichEpochStateEntry) (StateMachine, error) + +// EpochStateMachine is a hierarchical state machine that encapsulates the logic for protocol-compliant evolution of Epoch-related sub-state. +// EpochStateMachine processes a subset of service events that are relevant for the Epoch state, and ignores all other events. +// EpochStateMachine delegates the processing of service events to an embedded StateMachine, +// which is either a HappyPathStateMachine or a FallbackStateMachine depending on the operation mode of the protocol. +// It relies on Key-Value Store to read the parent state and to persist the snapshot of the updated Epoch state. +type EpochStateMachine struct { + common.BaseKeyValueStoreStateMachine + activeStateMachine StateMachine + epochFallbackStateMachineFactory func() (StateMachine, error) + + setups storage.EpochSetups + commits storage.EpochCommits + epochProtocolStateDB storage.EpochProtocolStateEntries + pendingDBUpdates *deferred.DeferredBlockPersist +} + +var _ protocol_state.KeyValueStoreStateMachine = (*EpochStateMachine)(nil) + +// NewEpochStateMachine creates a new higher-level hierarchical state machine for protocol-compliant evolution of Epoch-related sub-state. +// NewEpochStateMachine performs initialization of state machine depending on the operation mode of the protocol. +// - for the happy path, it initializes a HappyPathStateMachine, +// - for the epoch fallback mode it initializes a FallbackStateMachine. +// No errors are expected during normal operations. +func NewEpochStateMachine( + candidateView uint64, + parentBlockID flow.Identifier, + setups storage.EpochSetups, + commits storage.EpochCommits, + epochProtocolStateDB storage.EpochProtocolStateEntries, + parentState protocol.KVStoreReader, + evolvingState protocol_state.KVStoreMutator, + happyPathStateMachineFactory StateMachineFactoryMethod, + epochFallbackStateMachineFactory StateMachineFactoryMethod, +) (*EpochStateMachine, error) { + parentEpochState, err := epochProtocolStateDB.ByBlockID(parentBlockID) + if err != nil { + return nil, fmt.Errorf("could not query parent protocol state at block (%x): %w", parentBlockID, err) + } + + // sanity check: the parent epoch state ID must be set in KV store + if parentEpochState.ID() != parentState.GetEpochStateID() { + return nil, irrecoverable.NewExceptionf("broken invariant: parent epoch state ID mismatch, expected %x, got %x", + parentState.GetEpochStateID(), parentEpochState.ID()) + } + + var stateMachine StateMachine + candidateTriggersEpochFallback := epochFallbackTriggeredByIncorporatingCandidate(candidateView, parentState, parentEpochState) + if parentEpochState.EpochFallbackTriggered || candidateTriggersEpochFallback { + // Case 1: EpochFallbackTriggered is true, indicating that we have encountered an invalid + // epoch service event or an invalid state transition previously in this fork. + // Case 2: Incorporating the candidate block is itself an invalid epoch transition. + // + // In either case, Epoch Fallback Mode [EFM] has been tentatively triggered on this fork, + // and we must use only the `epochFallbackStateMachine` along this fork. + // + // TODO for 'leaving Epoch Fallback via special service event': this might need to change. + stateMachine, err = epochFallbackStateMachineFactory(candidateView, parentEpochState) + } else { + stateMachine, err = happyPathStateMachineFactory(candidateView, parentEpochState) + } + if err != nil { + return nil, fmt.Errorf("could not initialize protocol state machine: %w", err) + } + + return &EpochStateMachine{ + BaseKeyValueStoreStateMachine: common.NewBaseKeyValueStoreStateMachine(candidateView, parentState, evolvingState), + activeStateMachine: stateMachine, + epochFallbackStateMachineFactory: func() (StateMachine, error) { + return epochFallbackStateMachineFactory(candidateView, parentEpochState) + }, + setups: setups, + commits: commits, + epochProtocolStateDB: epochProtocolStateDB, + pendingDBUpdates: deferred.NewDeferredBlockPersist(), + }, nil +} + +// Build schedules updates to the protocol state by obtaining the updated state from the active state machine, +// preparing deferred DB updates and committing updated sub-state ID to the KV store. +// ATTENTION: In mature implementation all parts of the Dynamic Protocol State will rely on the Key-Value Store as storage +// but to avoid a large refactoring we are using a hybrid approach where only the epoch state ID is stored in the KV Store +// but the actual epoch state is stored separately, nevertheless, the epoch state ID is used to sanity check if the +// epoch state is consistent with the KV Store. Using this approach, we commit the epoch sub-state to the KV Store which in +// affects the Dynamic Protocol State ID which is essentially hash of the KV Store. +// TODO: update comments +func (e *EpochStateMachine) Build() (*deferred.DeferredBlockPersist, error) { + updatedEpochState, updatedStateID, hasChanges := e.activeStateMachine.Build() + + e.pendingDBUpdates.AddNextOperation(func(lctx lockctx.Proof, blockID flow.Identifier, rw storage.ReaderBatchWriter) error { + return e.epochProtocolStateDB.BatchIndex(lctx, rw, blockID, updatedStateID) + }) + + if hasChanges { + e.pendingDBUpdates.AddNextOperation(func(_ lockctx.Proof, blockID flow.Identifier, rw storage.ReaderBatchWriter) error { + return e.epochProtocolStateDB.BatchStore(rw.Writer(), updatedStateID, updatedEpochState.MinEpochStateEntry) + }) + } + e.EvolvingState.SetEpochStateID(updatedStateID) + + return e.pendingDBUpdates, nil +} + +// EvolveState applies the state change(s) on the Epoch sub-state, based on information from the candidate block +// (under construction). Information that potentially changes the state (compared to the parent block's state): +// - Service Events sealed in the candidate block +// - the candidate block's view (already provided at construction time) +// +// SAFETY REQUIREMENTS: +// - The seals for the execution results, from which the `sealedServiceEvents` originate, +// must be protocol compliant. +// - `sealedServiceEvents` must list the service Events in chronological order. This can be +// achieved by arranging the sealed execution results in order of increasing block height. +// Within each execution result, the service events are in chronological order. +// - EvolveState MUST be called for all candidate blocks, even if `sealedServiceEvents` is empty! +// This is because reaching a specific view can also trigger in state changes. (e.g. not having +// received the EpochCommit event for the next epoch, but approaching the end of the current epoch.) +// +// The block's payload might contain epoch preparation service events for the next epoch. In this case, +// we need to update the tentative protocol state. We need to validate whether all information is available +// in the protocol state to go to the next epoch when needed. In cases where there is a bug in the smart +// contract, it could be that this happens too late, and we should trigger epoch fallback mode. +// No errors are expected during normal operations. +func (e *EpochStateMachine) EvolveState(sealedServiceEvents []flow.ServiceEvent) error { + dbUpdates, err := e.evolveActiveStateMachine(sealedServiceEvents) + if err != nil { + if protocol.IsInvalidServiceEventError(err) { + // When the happy path state machine returns an InvalidServiceEventError, we discard its state and use the fallback state machine + // to handle the block's epoch state evolution. The fallback state machine sets the state's EFM flag and gracefully handle all + // service events to keep the protocol alive, no matter whether the service events are incorrect, inconsistent or unexpected. + // Once we enter EFM, the only way to return to normal is by processing an epoch recover event by the fallback state machine. + // Without loss of generality, we can assume that the error above is from the happy path state machine. In case of a bug, where + // the fallback state machine was already active above, yet it returned the `InvalidServiceEventError`, we would re-execute exactly + // that same logic below, arrive exactly at the same conclusion (fallback state machine returned an error which it shouldn't have) + // and crash. + e.activeStateMachine, err = e.epochFallbackStateMachineFactory() + if err != nil { + return fmt.Errorf("could not create epoch fallback state machine: %w", err) + } + dbUpdates, err = e.evolveActiveStateMachine(sealedServiceEvents) + if err != nil { + return irrecoverable.NewExceptionf("could not transition to epoch fallback mode: %w", err) + } + } else { + return irrecoverable.NewExceptionf("could not apply service events from ordered results: %w", err) + } + } + + e.pendingDBUpdates.Chain(dbUpdates) + return nil +} + +// evolveActiveStateMachine applies the state change(s) on the Epoch sub-state, based on information from the candidate +// block (under construction). Information that potentially changes the state (compared to the parent block's state): +// 1. the candidate block's view (already provided at construction time) +// 2. Service Events sealed in the candidate block +// +// This function applies all evolving state operations to the active state machine. In case of successful evolution, +// it returns the deferred DB updates to be applied to the storage. +// Expected errors during normal operations: +// - `protocol.InvalidServiceEventError` if any service event is invalid or is not a valid state transition for the current protocol state +func (e *EpochStateMachine) evolveActiveStateMachine(sealedServiceEvents []flow.ServiceEvent) (*deferred.DeferredBlockPersist, error) { + parentProtocolState := e.activeStateMachine.ParentState() + + // STEP 1: transition to next epoch if next epoch is committed *and* we are at first block of epoch + phase := parentProtocolState.EpochPhase() + if (phase == flow.EpochPhaseCommitted) && (e.activeStateMachine.View() > parentProtocolState.CurrentEpochFinalView()) { + err := e.activeStateMachine.TransitionToNextEpoch() + if err != nil { + return nil, fmt.Errorf("could not transition protocol state to next epoch: %w", err) + } + } + + // STEP 2: apply service events (input events already required to be ordered by block height). + dbUpdates := deferred.NewDeferredBlockPersist() + for _, event := range sealedServiceEvents { + switch ev := event.Event.(type) { + case *flow.EpochSetup: + processed, err := e.activeStateMachine.ProcessEpochSetup(ev) + if err != nil { + return nil, fmt.Errorf("could not process epoch setup event: %w", err) + } + if processed { + dbUpdates.AddNextOperation(func(_ lockctx.Proof, blockID flow.Identifier, rw storage.ReaderBatchWriter) error { + return e.setups.BatchStore(rw, ev) // we'll insert the setup event when we insert the block + }) + } + + case *flow.EpochCommit: + processed, err := e.activeStateMachine.ProcessEpochCommit(ev) + if err != nil { + return nil, fmt.Errorf("could not process epoch commit event: %w", err) + } + if processed { + dbUpdates.AddNextOperation(func(_ lockctx.Proof, blockID flow.Identifier, rw storage.ReaderBatchWriter) error { + return e.commits.BatchStore(rw, ev) // we'll insert the commit event when we insert the block + }) + } + case *flow.EpochRecover: + processed, err := e.activeStateMachine.ProcessEpochRecover(ev) + if err != nil { + return nil, fmt.Errorf("could not process epoch recover event: %w", err) + } + if processed { + dbUpdates.AddNextOperation(func(_ lockctx.Proof, blockID flow.Identifier, rw storage.ReaderBatchWriter) error { + err := e.setups.BatchStore(rw, &ev.EpochSetup) + if err != nil { + return err + } + return e.commits.BatchStore(rw, &ev.EpochCommit) // we'll insert the setup & commit events when we insert the block + }) + } + case *flow.EjectNode: + _ = e.activeStateMachine.EjectIdentity(ev) + default: + continue + } + } + return dbUpdates, nil +} + +// epochFallbackTriggeredByIncorporatingCandidate checks whether incorporating the input block B +// would trigger epoch fallback mode [EFM] along the current fork. We trigger epoch fallback mode +// when: +// 1. The next epoch has not been committed as of B (EpochPhase ≠ flow.EpochPhaseCommitted) AND +// 2. B is the first incorporated block with view greater than or equal to the epoch commitment +// deadline for the current epoch +// +// In protocol terms, condition 1 means that an EpochCommit service event for the upcoming epoch has +// not yet been sealed as of block B. Formally, a service event S is considered sealed as of block B if: +// - S was emitted during execution of some block A, s.t. A is an ancestor of B. +// - The seal for block A was included in some block C, s.t C is an ancestor of B. +// +// For further details see `KVStoreReader.GetFinalizationSafetyThreshold()`. +func epochFallbackTriggeredByIncorporatingCandidate(candidateView uint64, parentState protocol.KVStoreReader, parentEpochState *flow.RichEpochStateEntry) bool { + if parentEpochState.EpochPhase() == flow.EpochPhaseCommitted { // Requirement 1 + return false + } + return candidateView+parentState.GetFinalizationSafetyThreshold() >= parentEpochState.CurrentEpochSetup.FinalView // Requirement 2 +} diff --git a/state/protocol/protocol_state/epochs/statemachine_test.go b/state/protocol/protocol_state/epochs/statemachine_test.go new file mode 100644 index 00000000000..4b5d837d7ef --- /dev/null +++ b/state/protocol/protocol_state/epochs/statemachine_test.go @@ -0,0 +1,576 @@ +package epochs_test + +import ( + "errors" + "testing" + + "github.com/jordanschalm/lockctx" + "github.com/stretchr/testify/assert" + mocks "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" + + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module/irrecoverable" + "github.com/onflow/flow-go/state/protocol" + protocolmock "github.com/onflow/flow-go/state/protocol/mock" + "github.com/onflow/flow-go/state/protocol/protocol_state/epochs" + "github.com/onflow/flow-go/state/protocol/protocol_state/epochs/mock" + protocol_statemock "github.com/onflow/flow-go/state/protocol/protocol_state/mock" + "github.com/onflow/flow-go/storage" + storagemock "github.com/onflow/flow-go/storage/mock" + "github.com/onflow/flow-go/utils/unittest" +) + +func TestEpochStateMachine(t *testing.T) { + suite.Run(t, new(EpochStateMachineSuite)) +} + +// EpochStateMachineSuite is a dedicated test suite for testing hierarchical epoch state machine. +// All needed dependencies are mocked, including KV store as a whole, and all the necessary storages. +// Tests in this suite are designed to rely on automatic assertions when leaving the scope of the test. +type EpochStateMachineSuite struct { + suite.Suite + epochStateDB *storagemock.EpochProtocolStateEntries + setupsDB *storagemock.EpochSetups + commitsDB *storagemock.EpochCommits + globalParams *protocolmock.GlobalParams + parentState *protocolmock.KVStoreReader + parentEpochState *flow.RichEpochStateEntry + mutator *protocol_statemock.KVStoreMutator + happyPathStateMachine *mock.StateMachine + happyPathStateMachineFactory *mock.StateMachineFactoryMethod + fallbackPathStateMachineFactory *mock.StateMachineFactoryMethod + candidate *flow.Header + lockManager lockctx.Manager + + stateMachine *epochs.EpochStateMachine +} + +func (s *EpochStateMachineSuite) SetupTest() { + s.epochStateDB = storagemock.NewEpochProtocolStateEntries(s.T()) + s.setupsDB = storagemock.NewEpochSetups(s.T()) + s.commitsDB = storagemock.NewEpochCommits(s.T()) + s.parentState = protocolmock.NewKVStoreReader(s.T()) + s.parentState.On("GetFinalizationSafetyThreshold").Return(uint64(1_000)) + s.parentEpochState = unittest.EpochStateFixture() + s.mutator = protocol_statemock.NewKVStoreMutator(s.T()) + s.candidate = unittest.BlockHeaderFixture(unittest.HeaderWithView(s.parentEpochState.CurrentEpochSetup.FirstView + 1)) + s.happyPathStateMachine = mock.NewStateMachine(s.T()) + s.happyPathStateMachineFactory = mock.NewStateMachineFactoryMethod(s.T()) + s.fallbackPathStateMachineFactory = mock.NewStateMachineFactoryMethod(s.T()) + s.lockManager = storage.NewTestingLockManager() + + s.epochStateDB.On("ByBlockID", mocks.Anything).Return(func(_ flow.Identifier) *flow.RichEpochStateEntry { + return s.parentEpochState + }, func(_ flow.Identifier) error { + return nil + }) + s.parentState.On("GetEpochStateID").Return(func() flow.Identifier { + return s.parentEpochState.ID() + }) + + s.happyPathStateMachineFactory.On("Execute", s.candidate.View, s.parentEpochState). + Return(s.happyPathStateMachine, nil).Once() + + s.happyPathStateMachine.On("ParentState").Return(s.parentEpochState).Maybe() + + var err error + s.stateMachine, err = epochs.NewEpochStateMachine( + s.candidate.View, + s.candidate.ParentID, + s.setupsDB, + s.commitsDB, + s.epochStateDB, + s.parentState, + s.mutator, + s.happyPathStateMachineFactory.Execute, + s.fallbackPathStateMachineFactory.Execute, + ) + require.NoError(s.T(), err) +} + +// TestBuild_NoChanges tests that hierarchical epoch state machine maintains index of epoch states and commits +// epoch state ID in the KV store even when there were no events to process. +func (s *EpochStateMachineSuite) TestBuild_NoChanges() { + s.happyPathStateMachine.On("ParentState").Return(s.parentEpochState) + s.happyPathStateMachine.On("Build").Return(s.parentEpochState.EpochStateEntry, s.parentEpochState.ID(), false).Once() + + err := s.stateMachine.EvolveState(nil) + require.NoError(s.T(), err) + + rw := storagemock.NewReaderBatchWriter(s.T()) + + // Create a proper lock context proof for the BatchIndex operation + err = unittest.WithLock(s.T(), s.lockManager, storage.LockInsertBlock, func(lctx lockctx.Context) error { + s.epochStateDB.On("BatchIndex", lctx, rw, s.candidate.ID(), s.parentEpochState.ID()).Return(nil).Once() + s.mutator.On("SetEpochStateID", s.parentEpochState.ID()).Return(nil).Once() + + dbUpdates, err := s.stateMachine.Build() + require.NoError(s.T(), err) + + // Storage operations are deferred, because block ID is not known when the block is newly constructed. Only at the + // end after the block is fully constructed, its ID can be computed. We emulate this step here to verify that the + // deferred `dbOps` have been correctly constructed. Thereby, the expected mock methods should be called, + // which is asserted by the testify framework. + blockID := s.candidate.ID() + return dbUpdates.Execute(lctx, blockID, rw) + }) + require.NoError(s.T(), err) +} + +// TestBuild_HappyPath tests that hierarchical epoch state machine maintains index of epoch states and commits +// as well as stores updated epoch state in respective storage when there were updates made to the epoch state. +// This test also ensures that updated state ID is committed in the KV store. +func (s *EpochStateMachineSuite) TestBuild_HappyPath() { + s.happyPathStateMachine.On("ParentState").Return(s.parentEpochState) + updatedState := unittest.EpochStateFixture().EpochStateEntry + updatedStateID := updatedState.ID() + s.happyPathStateMachine.On("Build").Return(updatedState, updatedStateID, true).Once() + + epochSetup := unittest.EpochSetupFixture() + epochCommit := unittest.EpochCommitFixture() + + // expected both events to be processed + s.happyPathStateMachine.On("ProcessEpochSetup", epochSetup).Return(true, nil).Once() + s.happyPathStateMachine.On("ProcessEpochCommit", epochCommit).Return(true, nil).Once() + + w := storagemock.NewWriter(s.T()) + rw := storagemock.NewReaderBatchWriter(s.T()) + rw.On("Writer").Return(w).Once() // called by epochStateDB.BatchStore + // prepare a DB update for epoch setup + s.setupsDB.On("BatchStore", rw, epochSetup).Return(nil).Once() + + // prepare a DB update for epoch commit + s.commitsDB.On("BatchStore", rw, epochCommit).Return(nil).Once() + + err := s.stateMachine.EvolveState([]flow.ServiceEvent{epochSetup.ServiceEvent(), epochCommit.ServiceEvent()}) + require.NoError(s.T(), err) + + // Create a proper lock context proof for the BatchIndex operation + err = unittest.WithLock(s.T(), s.lockManager, storage.LockInsertBlock, func(lctx lockctx.Context) error { + // prepare a DB update for epoch state + s.epochStateDB.On("BatchIndex", lctx, rw, s.candidate.ID(), updatedStateID).Return(nil).Once() + s.epochStateDB.On("BatchStore", w, updatedStateID, updatedState.MinEpochStateEntry).Return(nil).Once() + s.mutator.On("SetEpochStateID", updatedStateID).Return(nil).Once() + + dbUpdates, err := s.stateMachine.Build() + require.NoError(s.T(), err) + + // Provide the blockID and execute the resulting `dbUpdates`. Thereby, the expected mock methods should be called, + // which is asserted by the testify framework. The lock context proof is passed to verify that the BatchIndex + // operation receives the proper lock context as required by the storage layer. + blockID := s.candidate.ID() + return dbUpdates.Execute(lctx, blockID, rw) + }) + require.NoError(s.T(), err) +} + +// TestEpochStateMachine_Constructor tests the behavior of the EpochStateMachine constructor. +// Specifically, we test the scenario, where the EpochCommit Service Event is still missing +// by the time we cross the `FinalizationSafetyThreshold`. We expect the constructor to select the +// appropriate internal state machine constructor (HappyPathStateMachine before the threshold +// and FallbackStateMachine when reaching or exceeding the view threshold). +// Any exceptions encountered when constructing the internal state machines should be passed up. +func (s *EpochStateMachineSuite) TestEpochStateMachine_Constructor() { + s.Run("EpochStaking phase", func() { + // Since we are before the epoch commitment deadline, we should instantiate a happy-path state machine + s.Run("before commitment deadline", func() { + happyPathStateMachineFactory := mock.NewStateMachineFactoryMethod(s.T()) + // expect to be called + happyPathStateMachineFactory.On("Execute", s.candidate.View, s.parentEpochState). + Return(s.happyPathStateMachine, nil).Once() + // don't expect to be called + fallbackPathStateMachineFactory := mock.NewStateMachineFactoryMethod(s.T()) + + candidate := unittest.BlockHeaderFixture(unittest.HeaderWithView(s.parentEpochState.CurrentEpochSetup.FirstView + 1)) + stateMachine, err := epochs.NewEpochStateMachine( + candidate.View, + candidate.ParentID, + s.setupsDB, + s.commitsDB, + s.epochStateDB, + s.parentState, + s.mutator, + happyPathStateMachineFactory.Execute, + fallbackPathStateMachineFactory.Execute, + ) + require.NoError(s.T(), err) + assert.NotNil(s.T(), stateMachine) + }) + // Since we are past the epoch commitment deadline, and have not entered the EpochCommitted + // phase, we should use the epoch fallback state machine. + s.Run("past commitment deadline", func() { + // don't expect to be called + happyPathStateMachineFactory := mock.NewStateMachineFactoryMethod(s.T()) + // expect to be called + fallbackPathStateMachineFactory := mock.NewStateMachineFactoryMethod(s.T()) + + candidate := unittest.BlockHeaderFixture(unittest.HeaderWithView(s.parentEpochState.CurrentEpochSetup.FinalView - 1)) + fallbackPathStateMachineFactory.On("Execute", candidate.View, s.parentEpochState). + Return(s.happyPathStateMachine, nil).Once() + stateMachine, err := epochs.NewEpochStateMachine( + candidate.View, + candidate.ParentID, + s.setupsDB, + s.commitsDB, + s.epochStateDB, + s.parentState, + s.mutator, + happyPathStateMachineFactory.Execute, + fallbackPathStateMachineFactory.Execute, + ) + require.NoError(s.T(), err) + assert.NotNil(s.T(), stateMachine) + }) + }) + + s.Run("EpochSetup phase", func() { + s.parentEpochState = unittest.EpochStateFixture(unittest.WithNextEpochProtocolState()) + s.parentEpochState.NextEpochCommit = nil + s.parentEpochState.NextEpoch.CommitID = flow.ZeroID + + // Since we are before the epoch commitment deadline, we should instantiate a happy-path state machine + s.Run("before commitment deadline", func() { + happyPathStateMachineFactory := mock.NewStateMachineFactoryMethod(s.T()) + // don't expect to be called + fallbackPathStateMachineFactory := mock.NewStateMachineFactoryMethod(s.T()) + + candidate := unittest.BlockHeaderFixture(unittest.HeaderWithView(s.parentEpochState.CurrentEpochSetup.FirstView + 1)) + // expect to be called + happyPathStateMachineFactory.On("Execute", candidate.View, s.parentEpochState). + Return(s.happyPathStateMachine, nil).Once() + stateMachine, err := epochs.NewEpochStateMachine( + candidate.View, + candidate.ParentID, + s.setupsDB, + s.commitsDB, + s.epochStateDB, + s.parentState, + s.mutator, + happyPathStateMachineFactory.Execute, + fallbackPathStateMachineFactory.Execute, + ) + require.NoError(s.T(), err) + assert.NotNil(s.T(), stateMachine) + }) + // Since we are past the epoch commitment deadline, and have not entered the EpochCommitted + // phase, we should use the epoch fallback state machine. + s.Run("past commitment deadline", func() { + // don't expect to be called + happyPathStateMachineFactory := mock.NewStateMachineFactoryMethod(s.T()) + fallbackPathStateMachineFactory := mock.NewStateMachineFactoryMethod(s.T()) + + candidate := unittest.BlockHeaderFixture(unittest.HeaderWithView(s.parentEpochState.CurrentEpochSetup.FinalView - 1)) + // expect to be called + fallbackPathStateMachineFactory.On("Execute", candidate.View, s.parentEpochState). + Return(s.happyPathStateMachine, nil).Once() + stateMachine, err := epochs.NewEpochStateMachine( + candidate.View, + candidate.ParentID, + s.setupsDB, + s.commitsDB, + s.epochStateDB, + s.parentState, + s.mutator, + happyPathStateMachineFactory.Execute, + fallbackPathStateMachineFactory.Execute, + ) + require.NoError(s.T(), err) + assert.NotNil(s.T(), stateMachine) + }) + }) + + s.Run("EpochCommitted phase", func() { + s.parentEpochState = unittest.EpochStateFixture(unittest.WithNextEpochProtocolState()) + // Since we are before the epoch commitment deadline, we should instantiate a happy-path state machine + s.Run("before commitment deadline", func() { + happyPathStateMachineFactory := mock.NewStateMachineFactoryMethod(s.T()) + // expect to be called + happyPathStateMachineFactory.On("Execute", s.candidate.View, s.parentEpochState). + Return(s.happyPathStateMachine, nil).Once() + // don't expect to be called + fallbackPathStateMachineFactory := mock.NewStateMachineFactoryMethod(s.T()) + + candidate := unittest.BlockHeaderFixture(unittest.HeaderWithView(s.parentEpochState.CurrentEpochSetup.FirstView + 1)) + stateMachine, err := epochs.NewEpochStateMachine( + candidate.View, + candidate.ParentID, + s.setupsDB, + s.commitsDB, + s.epochStateDB, + s.parentState, + s.mutator, + happyPathStateMachineFactory.Execute, + fallbackPathStateMachineFactory.Execute, + ) + require.NoError(s.T(), err) + assert.NotNil(s.T(), stateMachine) + }) + // Despite being past the epoch commitment deadline, since we are in the EpochCommitted phase + // already, we should proceed with the happy-path state machine + s.Run("past commitment deadline", func() { + happyPathStateMachineFactory := mock.NewStateMachineFactoryMethod(s.T()) + // don't expect to be called + fallbackPathStateMachineFactory := mock.NewStateMachineFactoryMethod(s.T()) + + candidate := unittest.BlockHeaderFixture(unittest.HeaderWithView(s.parentEpochState.CurrentEpochSetup.FinalView - 1)) + // expect to be called + happyPathStateMachineFactory.On("Execute", candidate.View, s.parentEpochState). + Return(s.happyPathStateMachine, nil).Once() + stateMachine, err := epochs.NewEpochStateMachine( + candidate.View, + candidate.ParentID, + s.setupsDB, + s.commitsDB, + s.epochStateDB, + s.parentState, + s.mutator, + happyPathStateMachineFactory.Execute, + fallbackPathStateMachineFactory.Execute, + ) + require.NoError(s.T(), err) + assert.NotNil(s.T(), stateMachine) + }) + }) + + // if a state machine constructor returns an error, the stateMutator constructor should fail + // and propagate the error to the caller + s.Run("state machine constructor returns error", func() { + s.Run("happy-path", func() { + exception := irrecoverable.NewExceptionf("exception") + happyPathStateMachineFactory := mock.NewStateMachineFactoryMethod(s.T()) + happyPathStateMachineFactory.On("Execute", s.candidate.View, s.parentEpochState).Return(nil, exception).Once() + fallbackPathStateMachineFactory := mock.NewStateMachineFactoryMethod(s.T()) + + stateMachine, err := epochs.NewEpochStateMachine( + s.candidate.View, + s.candidate.ParentID, + s.setupsDB, + s.commitsDB, + s.epochStateDB, + s.parentState, + s.mutator, + happyPathStateMachineFactory.Execute, + fallbackPathStateMachineFactory.Execute, + ) + assert.ErrorIs(s.T(), err, exception) + assert.Nil(s.T(), stateMachine) + }) + s.Run("epoch-fallback", func() { + s.parentEpochState.EpochFallbackTriggered = true // ensure we use epoch-fallback state machine + exception := irrecoverable.NewExceptionf("exception") + happyPathStateMachineFactory := mock.NewStateMachineFactoryMethod(s.T()) + fallbackPathStateMachineFactory := mock.NewStateMachineFactoryMethod(s.T()) + fallbackPathStateMachineFactory.On("Execute", s.candidate.View, s.parentEpochState).Return(nil, exception).Once() + + stateMachine, err := epochs.NewEpochStateMachine( + s.candidate.View, + s.candidate.ParentID, + s.setupsDB, + s.commitsDB, + s.epochStateDB, + s.parentState, + s.mutator, + happyPathStateMachineFactory.Execute, + fallbackPathStateMachineFactory.Execute, + ) + assert.ErrorIs(s.T(), err, exception) + assert.Nil(s.T(), stateMachine) + }) + }) +} + +// TestEvolveState_InvalidEpochSetup tests that hierarchical state machine rejects invalid epoch setup events +// (indicated by `InvalidServiceEventError` sentinel error) and replaces the happy path state machine with the +// fallback state machine. Errors other than `InvalidServiceEventError` should be bubbled up as exceptions. +func (s *EpochStateMachineSuite) TestEvolveState_InvalidEpochSetup() { + s.Run("invalid-epoch-setup", func() { + happyPathStateMachineFactory := mock.NewStateMachineFactoryMethod(s.T()) + happyPathStateMachineFactory.On("Execute", s.candidate.View, s.parentEpochState).Return(s.happyPathStateMachine, nil).Once() + fallbackPathStateMachineFactory := mock.NewStateMachineFactoryMethod(s.T()) + stateMachine, err := epochs.NewEpochStateMachine( + s.candidate.View, + s.candidate.ParentID, + s.setupsDB, + s.commitsDB, + s.epochStateDB, + s.parentState, + s.mutator, + happyPathStateMachineFactory.Execute, + fallbackPathStateMachineFactory.Execute, + ) + require.NoError(s.T(), err) + + epochSetup := unittest.EpochSetupFixture() + + s.happyPathStateMachine.On("ParentState").Return(s.parentEpochState) + s.happyPathStateMachine.On("ProcessEpochSetup", epochSetup). + Return(false, protocol.NewInvalidServiceEventErrorf("")).Once() + + fallbackStateMachine := mock.NewStateMachine(s.T()) + fallbackStateMachine.On("ParentState").Return(s.parentEpochState) + fallbackStateMachine.On("ProcessEpochSetup", epochSetup).Return(false, nil).Once() + fallbackPathStateMachineFactory.On("Execute", s.candidate.View, s.parentEpochState).Return(fallbackStateMachine, nil).Once() + + err = stateMachine.EvolveState([]flow.ServiceEvent{epochSetup.ServiceEvent()}) + require.NoError(s.T(), err) + }) + s.Run("process-epoch-setup-exception", func() { + epochSetup := unittest.EpochSetupFixture() + + exception := errors.New("exception") + s.happyPathStateMachine.On("ProcessEpochSetup", epochSetup).Return(false, exception).Once() + + err := s.stateMachine.EvolveState([]flow.ServiceEvent{epochSetup.ServiceEvent()}) + require.Error(s.T(), err) + require.False(s.T(), protocol.IsInvalidServiceEventError(err)) + }) +} + +// TestEvolveState_InvalidEpochCommit tests that hierarchical state machine rejects invalid epoch commit events +// (indicated by `InvalidServiceEventError` sentinel error) and replaces the happy path state machine with the +// fallback state machine. Errors other than `InvalidServiceEventError` should be bubbled up as exceptions. +func (s *EpochStateMachineSuite) TestEvolveState_InvalidEpochCommit() { + s.Run("invalid-epoch-commit", func() { + happyPathStateMachineFactory := mock.NewStateMachineFactoryMethod(s.T()) + happyPathStateMachineFactory.On("Execute", s.candidate.View, s.parentEpochState).Return(s.happyPathStateMachine, nil).Once() + fallbackPathStateMachineFactory := mock.NewStateMachineFactoryMethod(s.T()) + stateMachine, err := epochs.NewEpochStateMachine( + s.candidate.View, + s.candidate.ParentID, + s.setupsDB, + s.commitsDB, + s.epochStateDB, + s.parentState, + s.mutator, + happyPathStateMachineFactory.Execute, + fallbackPathStateMachineFactory.Execute, + ) + require.NoError(s.T(), err) + + epochCommit := unittest.EpochCommitFixture() + + s.happyPathStateMachine.On("ParentState").Return(s.parentEpochState) + s.happyPathStateMachine.On("ProcessEpochCommit", epochCommit). + Return(false, protocol.NewInvalidServiceEventErrorf("")).Once() + + fallbackStateMachine := mock.NewStateMachine(s.T()) + fallbackStateMachine.On("ParentState").Return(s.parentEpochState) + fallbackStateMachine.On("ProcessEpochCommit", epochCommit).Return(false, nil).Once() + fallbackPathStateMachineFactory.On("Execute", s.candidate.View, s.parentEpochState).Return(fallbackStateMachine, nil).Once() + + err = stateMachine.EvolveState([]flow.ServiceEvent{epochCommit.ServiceEvent()}) + require.NoError(s.T(), err) + }) + s.Run("process-epoch-commit-exception", func() { + epochCommit := unittest.EpochCommitFixture() + + exception := errors.New("exception") + s.happyPathStateMachine.On("ProcessEpochCommit", epochCommit).Return(false, exception).Once() + + err := s.stateMachine.EvolveState([]flow.ServiceEvent{epochCommit.ServiceEvent()}) + require.Error(s.T(), err) + require.False(s.T(), protocol.IsInvalidServiceEventError(err)) + }) +} + +// TestEvolveStateTransitionToNextEpoch tests that EpochStateMachine transitions to the next epoch +// when the epoch has been committed, and we are at the first block of the next epoch. +func (s *EpochStateMachineSuite) TestEvolveStateTransitionToNextEpoch() { + parentState := unittest.EpochStateFixture(unittest.WithNextEpochProtocolState()) + s.happyPathStateMachine.On("ParentState").Unset() + s.happyPathStateMachine.On("ParentState").Return(parentState) + // we are at the first block of the next epoch + s.happyPathStateMachine.On("View").Return(parentState.CurrentEpochSetup.FinalView + 1) + s.happyPathStateMachine.On("TransitionToNextEpoch").Return(nil).Once() + err := s.stateMachine.EvolveState(nil) + require.NoError(s.T(), err) +} + +// TestEvolveStateTransitionToNextEpoch_Error tests that error that has been +// observed when transitioning to the next epoch and propagated to the caller. +func (s *EpochStateMachineSuite) TestEvolveStateTransitionToNextEpoch_Error() { + parentState := unittest.EpochStateFixture(unittest.WithNextEpochProtocolState()) + s.happyPathStateMachine.On("ParentState").Unset() + s.happyPathStateMachine.On("ParentState").Return(parentState) + // we are at the first block of the next epoch + s.happyPathStateMachine.On("View").Return(parentState.CurrentEpochSetup.FinalView + 1) + exception := errors.New("exception") + s.happyPathStateMachine.On("TransitionToNextEpoch").Return(exception).Once() + err := s.stateMachine.EvolveState(nil) + require.Error(s.T(), err, exception) + require.ErrorContains(s.T(), err, "[exception!]") + require.False(s.T(), protocol.IsInvalidServiceEventError(err)) +} + +// TestEvolveState_EventsAreFiltered tests that EpochStateMachine filters out all events that are not expected. +func (s *EpochStateMachineSuite) TestEvolveState_EventsAreFiltered() { + err := s.stateMachine.EvolveState([]flow.ServiceEvent{ + unittest.ProtocolStateVersionUpgradeFixture().ServiceEvent(), + }) + require.NoError(s.T(), err) +} + +// TestEvolveStateTransitionToNextEpoch_WithInvalidStateTransition tests that EpochStateMachine transitions to the next epoch +// if an invalid state transition has been detected in a block which triggers transitioning to the next epoch. +// In such situation, we still need to enter the next epoch (because it has already been committed), but persist in the +// state that we have entered Epoch fallback mode (`flow.MinEpochStateEntry.EpochFallbackTriggered` is set to `true`). +// This test ensures that we don't drop previously committed next epoch. +func (s *EpochStateMachineSuite) TestEvolveStateTransitionToNextEpoch_WithInvalidStateTransition() { + s.parentEpochState = unittest.EpochStateFixture(unittest.WithNextEpochProtocolState()) + s.candidate.View = s.parentEpochState.NextEpochSetup.FirstView + happyPathTelemetry := protocol_statemock.NewStateMachineTelemetryConsumer(s.T()) + fallbackPathTelemetry := protocol_statemock.NewStateMachineTelemetryConsumer(s.T()) + happyPathTelemetryFactory := protocol_statemock.NewStateMachineEventsTelemetryFactory(s.T()) + fallbackTelemetryFactory := protocol_statemock.NewStateMachineEventsTelemetryFactory(s.T()) + happyPathTelemetryFactory.On("Execute", s.candidate.View).Return(happyPathTelemetry).Once() + fallbackTelemetryFactory.On("Execute", s.candidate.View).Return(fallbackPathTelemetry).Once() + stateMachine, err := epochs.NewEpochStateMachineFactory( + s.setupsDB, + s.commitsDB, + s.epochStateDB, + happyPathTelemetryFactory.Execute, + fallbackTelemetryFactory.Execute, + ).Create(s.candidate.View, s.candidate.ParentID, s.parentState, s.mutator) + require.NoError(s.T(), err) + + invalidServiceEvent := unittest.EpochSetupFixture() + happyPathTelemetry.On("OnServiceEventReceived", invalidServiceEvent.ServiceEvent()).Return().Once() + happyPathTelemetry.On("OnInvalidServiceEvent", invalidServiceEvent.ServiceEvent(), mocks.Anything).Return().Once() + fallbackPathTelemetry.On("OnServiceEventReceived", invalidServiceEvent.ServiceEvent()).Return().Once() + fallbackPathTelemetry.On("OnInvalidServiceEvent", invalidServiceEvent.ServiceEvent(), mocks.Anything).Return().Once() + err = stateMachine.EvolveState([]flow.ServiceEvent{invalidServiceEvent.ServiceEvent()}) + require.NoError(s.T(), err) + + // Create a proper lock context proof for the BatchIndex operation + err = unittest.WithLock(s.T(), s.lockManager, storage.LockInsertBlock, func(lctx lockctx.Context) error { + s.epochStateDB.On("BatchIndex", lctx, mocks.Anything, s.candidate.ID(), mocks.Anything).Return(nil).Once() + + expectedEpochState := &flow.MinEpochStateEntry{ + PreviousEpoch: s.parentEpochState.CurrentEpoch.Copy(), + CurrentEpoch: *s.parentEpochState.NextEpoch.Copy(), + NextEpoch: nil, + EpochFallbackTriggered: true, + } + + s.epochStateDB.On("BatchStore", mocks.Anything, expectedEpochState.ID(), expectedEpochState).Return(nil).Once() + s.mutator.On("SetEpochStateID", expectedEpochState.ID()).Return().Once() + + dbOps, err := stateMachine.Build() + require.NoError(s.T(), err) + + w := storagemock.NewWriter(s.T()) + rw := storagemock.NewReaderBatchWriter(s.T()) + rw.On("Writer").Return(w).Once() // called by epochStateDB.BatchStore + + // Storage operations are deferred, because block ID is not known when the block is newly constructed. Only at the + // end after the block is fully constructed, its ID can be computed. We emulate this step here to verify that the + // deferred `dbOps` have been correctly constructed. Thereby, the expected mock methods should be called, + // which is asserted by the testify framework. + blockID := s.candidate.ID() + return dbOps.Execute(lctx, blockID, rw) + }) + require.NoError(s.T(), err) + +} diff --git a/state/protocol/protocol_state/kvstore.go b/state/protocol/protocol_state/kvstore.go new file mode 100644 index 00000000000..592870e1894 --- /dev/null +++ b/state/protocol/protocol_state/kvstore.go @@ -0,0 +1,159 @@ +package protocol_state + +import ( + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/state/protocol" + "github.com/onflow/flow-go/storage/deferred" +) + +// This file contains versioned read-write interfaces to the Protocol State's +// key-value store and are used by the Protocol State Machine. +// +// When a key is added or removed, this requires a new protocol state version: +// - Create a new versioned model in ./kvstore/models.go (eg. modelv3 if latest model is modelv2) +// - Update the protocol.KVStoreReader and KVStoreAPI interfaces to include any new keys + +// KVStoreAPI is the latest interface to the Protocol State key-value store which implements 'Prototype' +// pattern for replicating protocol state between versions. +// +// Caution: +// Engineers evolving this interface must ensure that it is backwards-compatible +// with all versions of Protocol State Snapshots that can be retrieved from the local +// database, which should exactly correspond to the versioned model types defined in +// ./kvstore/models.go +type KVStoreAPI interface { + protocol.KVStoreReader + + // Replicate instantiates a Protocol State Snapshot of the given `protocolVersion`. + // We reference to the Protocol State Snapshot, whose `Replicate` method is called + // as the 'Parent Snapshot'. + // If the `protocolVersion` matches the version of the Parent Snapshot, `Replicate` behaves + // exactly like a deep copy. If `protocolVersion` is newer, the data model corresponding + // to the newer version is used and values from the Parent Snapshot are replicated into + // the new data model. In all cases, the new Snapshot can be mutated without changing the + // Parent Snapshot. + // + // Caution: + // Implementors of this function decide on their own how to perform the migration from parent protocol version + // to the given `protocolVersion`. It is required that outcome of `Replicate` is a valid KV store model which can be + // incorporated in the protocol state without extra operations. + // Expected errors during normal operations: + // - kvstore.ErrIncompatibleVersionChange if replicating the Parent Snapshot into a Snapshot + // with the specified `protocolVersion` is not supported. + Replicate(protocolVersion uint64) (KVStoreMutator, error) +} + +// KVStoreMutator is the latest read-writer interface to the Protocol State key-value store. +// +// Caution: +// Engineers evolving this interface must ensure that it is backwards-compatible +// with all versions of Protocol State Snapshots that can be retrieved from the local +// database, which should exactly correspond to the versioned model types defined in +// ./kvstore/models.go +type KVStoreMutator interface { + protocol.KVStoreReader + + // v0/v1 + + // SetVersionUpgrade sets the protocol upgrade version. This method is used + // to update the Protocol State version when a flow.ProtocolStateVersionUpgrade is processed. + // It contains the new version and the view at which it has to be applied. + SetVersionUpgrade(version *protocol.ViewBasedActivator[uint64]) + + // SetEpochStateID sets the state ID of the epoch state. + // This method is used to commit the epoch state to the KV store when the state of the epoch is updated. + SetEpochStateID(stateID flow.Identifier) + + // SetEpochExtensionViewCount sets the number of views for a hypothetical epoch extension. + // Expected errors during normal operations: + // - kvstore.ErrInvalidValue - if the view count is less than FinalizationSafetyThreshold*2. + SetEpochExtensionViewCount(viewCount uint64) error +} + +// OrthogonalStoreStateMachine represents a state machine that exclusively evolves its state P. +// The state's specific type P is kept as a generic. Generally, P is the type corresponding +// to one specific key in the Key-Value store. +// +// Orthogonal State Machines: +// Orthogonality means that state machines can operate completely independently and work on disjoint +// sub-states. By convention, they all consume the same inputs (incl. the ordered sequence of +// Service Events sealed in one block). In other words, each state machine has full visibility into +// the inputs, but each draws their on independent conclusions (maintain their own exclusive state). +// +// The Dynamic Protocol State comprises a Key-Value-Store. We loosely associate each key-value-pair +// with a dedicated state machine operating exclusively on this key-value pair. A one-to-one +// correspondence between key-value-pair and state machine should be the default, but is not strictly +// required. However, we strictly require that no key-value-pair is being operated on by *more* than +// one state machine. +// +// The Protocol State is the framework, which orchestrates the orthogonal state machines, feeds them +// with inputs, post-processes the outputs and overall manages state machines' life-cycle from block +// to block. New key-value pairs and corresponding state machines can easily be added by +// - adding a new entry to the Key-Value-Store's data model (file `./kvstore/models.go`) +// - implementing the `OrthogonalStoreStateMachine` interface +// +// For more details see `./Readme.md` +// +// NOT CONCURRENCY SAFE +type OrthogonalStoreStateMachine[P any] interface { + + // Build returns: + // - database updates necessary for persisting the updated protocol sub-state and its *dependencies*. + // It may contain updates for the sub-state itself and for any dependency that is affected by the update. + // Deferred updates must be applied in a transaction to ensure atomicity. + // + // No errors are expected during normal operations. + Build() (*deferred.DeferredBlockPersist, error) + + // EvolveState applies the state change(s) on sub-state P for the candidate block (under construction). + // Information that potentially changes the Epoch state (compared to the parent block's state): + // - Service Events sealed in the candidate block + // - the candidate block's view (already provided at construction time) + // + // SAFETY REQUIREMENTS: + // - The seals for the execution results, from which the `sealedServiceEvents` originate, + // must be protocol compliant. + // - `sealedServiceEvents` must list the service Events in chronological order. This can be + // achieved by arranging the sealed execution results in order of increasing block height. + // Within each execution result, the service events are in chronological order. + // - EvolveState MUST be called for all candidate blocks, even if `sealedServiceEvents` is empty! + // This is because reaching a specific view can also trigger in state changes. (e.g. not having + // received the EpochCommit event for the next epoch, but approaching the end of the current epoch.) + // + // CAUTION: + // Per convention, the input seals from the block payload have already been confirmed to be protocol compliant. + // Hence, the service events in the sealed execution results represent the *honest* execution path. Therefore, + // the sealed service events should encode a valid evolution of the protocol state -- provided the system smart + // contracts are correct. As we can rule out byzantine attacks as the source of failures, the only remaining + // sources of problems can be (a) bugs in the system smart contracts or (b) bugs in the node implementation. + // - A service event not representing a valid state transition despite all consistency checks passing is + // indicative of case (a) and _should be handled_ internally by the respective state machine. Otherwise, + // any bug or unforeseen edge cases in the system smart contracts would in consensus halt, due to errors + // while evolving the protocol state. + // - Consistency or sanity checks failing within the OrthogonalStoreStateMachine is likely the symptom of an + // internal bug in the node software or state corruption, i.e. case (b). This is the only scenario where the + // error return of this function is not nil. If such an exception is returned, continuing is not an option. + // + // No errors are expected during normal operations. + EvolveState(sealedServiceEvents []flow.ServiceEvent) error + + // View returns the view associated with this state machine. + // The view of the state machine equals the view of the block carrying the respective updates. + View() uint64 + + // ParentState returns parent state associated with this state machine. + ParentState() P +} + +// KeyValueStoreStateMachine is a type alias for a state machine that operates on an instance of KVStoreReader. +// StateMutator uses this type to store and perform operations on orthogonal state machines. +type KeyValueStoreStateMachine = OrthogonalStoreStateMachine[protocol.KVStoreReader] + +// KeyValueStoreStateMachineFactory is an abstract factory interface for creating KeyValueStoreStateMachine instances. +// It is used separate creation of state machines from their usage, which allows less coupling and superior testability. +// For each concrete type injected in State EvolvingState a dedicated abstract factory has to be created. +type KeyValueStoreStateMachineFactory interface { + // Create creates a new instance of an underlying type that operates on KV Store and is created for a specific candidate block. + // No errors are expected during normal operations. + Create(candidateView uint64, parentID flow.Identifier, parentState protocol.KVStoreReader, mutator KVStoreMutator) (KeyValueStoreStateMachine, error) +} diff --git a/state/protocol/protocol_state/kvstore/encoding.go b/state/protocol/protocol_state/kvstore/encoding.go new file mode 100644 index 00000000000..30025b353f8 --- /dev/null +++ b/state/protocol/protocol_state/kvstore/encoding.go @@ -0,0 +1,42 @@ +package kvstore + +import ( + "bytes" + + "github.com/vmihailenco/msgpack/v4" + + "github.com/onflow/flow-go/module/irrecoverable" + "github.com/onflow/flow-go/state/protocol/protocol_state" +) + +// versionedEncode is a helper function for implementing VersionedEncodable. +// No errors are expected during normal operation. +func versionedEncode(version uint64, pairs any) (uint64, []byte, error) { + bz, err := msgpack.Marshal(pairs) + if err != nil { + return 0, nil, irrecoverable.NewExceptionf("could not encode kvstore (version=%d): %w", version, err) + } + return version, bz, nil +} + +// VersionedDecode decodes a serialized key-value store instance with the given version. +// Errors: +// - ErrUnsupportedVersion if input version is not supported +func VersionedDecode(version uint64, bz []byte) (protocol_state.KVStoreAPI, error) { + var target protocol_state.KVStoreAPI + switch version { + case 0: + target = new(Modelv0) + case 1: + target = new(Modelv1) + case 2: + target = new(Modelv2) + default: + return nil, ErrUnsupportedVersion + } + err := msgpack.NewDecoder(bytes.NewBuffer(bz)).Decode(&target) + if err != nil { + return nil, irrecoverable.NewExceptionf("could not decode kvstore (version=%d): %w", version, err) + } + return target, nil +} diff --git a/state/protocol/protocol_state/kvstore/errors.go b/state/protocol/protocol_state/kvstore/errors.go new file mode 100644 index 00000000000..26994fef5d0 --- /dev/null +++ b/state/protocol/protocol_state/kvstore/errors.go @@ -0,0 +1,41 @@ +package kvstore + +import "errors" + +// ErrKeyNotSet is a sentinel returned when a key is queried and no value has been set. +// The key must exist in the currently active key-value store version. This sentinel +// is used to communicate an empty/unset value rather than using zero or nil values. +// This sentinel is applicable on a key-by-key basis: some keys will always have a value +// set, others will support unset values. +var ErrKeyNotSet = errors.New("no value for requested key in Protocol State's kvstore") + +// ErrKeyNotSupported is a sentinel returned when a key is read or written, but +// the key does not exist in the currently active version of the key-value store. +// This can happen in two circumstances, for example: +// 1. Current model is v2, software supports v3, and we query a key which was newly added in v3. +// 2. Current model is v3 and we query a key which was added in v2 then removed in v3 +var ErrKeyNotSupported = errors.New("protocol state's kvstore does not support the specified key at this version") + +// ErrUnsupportedVersion is a sentinel returned when we attempt to decode a key-value +// store instance, but provide an unsupported version. This could happen if we accept +// an already-encoded key-value store instance from an external source (should be +// avoided in general) or if the node software version is downgraded. +var ErrUnsupportedVersion = errors.New("unsupported version for the Protocol State's kvstore") + +// ErrInvalidUpgradeVersion is a sentinel returned when we attempt to set a new kvstore version +// via a ProtocolStateVersionUpgrade event, but the new version is not strictly greater than +// the current version. This error happens when smart contract has different understanding of +// the protocol state version than the node software. +var ErrInvalidUpgradeVersion = errors.New("invalid upgrade version for the Protocol State's kvstore") + +// ErrInvalidActivationView is a sentinel returned when we attempt to process a KV store update, +// which has an activation view `V` so that `CurrentView + SafetyBuffer < V` does NOT hold. +var ErrInvalidActivationView = errors.New("invalid activation view for the new Protocol State version") + +// ErrIncompatibleVersionChange is a sentinel returned when we attempt to replicate a parent KV store snapshot into a snapshot +// with the specified `protocolVersion` but such operation is not supported by the parent snapshot. +var ErrIncompatibleVersionChange = errors.New("incompatible version change when replicating the Protocol State's kvstore") + +// ErrInvalidValue is a sentinel returned when a value is not considered valid for a given key. +// This sentinel is applicable on a key-by-key basis: same value can be considered valid/invalid for different keys. +var ErrInvalidValue = errors.New("invalid value for the requested key in Protocol State's kvstore") diff --git a/state/protocol/protocol_state/kvstore/factory.go b/state/protocol/protocol_state/kvstore/factory.go new file mode 100644 index 00000000000..ddeee1e9f83 --- /dev/null +++ b/state/protocol/protocol_state/kvstore/factory.go @@ -0,0 +1,48 @@ +package kvstore + +import ( + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/state/protocol" + "github.com/onflow/flow-go/state/protocol/protocol_state" +) + +// PSVersionUpgradeStateMachineFactory is a factory for creating PSVersionUpgradeStateMachine instances. +type PSVersionUpgradeStateMachineFactory struct { + telemetry protocol_state.StateMachineTelemetryConsumer +} + +var _ protocol_state.KeyValueStoreStateMachineFactory = (*PSVersionUpgradeStateMachineFactory)(nil) + +// NewPSVersionUpgradeStateMachineFactory returns a factory for instantiating PSVersionUpgradeStateMachines. +// The created state machines report their operations to the provided telemetry consumer. +func NewPSVersionUpgradeStateMachineFactory(telemetry protocol_state.StateMachineTelemetryConsumer) *PSVersionUpgradeStateMachineFactory { + return &PSVersionUpgradeStateMachineFactory{ + telemetry: telemetry, + } +} + +// Create instantiates a new PSVersionUpgradeStateMachine, which processes ProtocolStateVersionUpgrade ServiceEvents +// that are sealed by the candidate block (possibly still under construction) with the given view. +// No errors are expected during normal operations. +func (f *PSVersionUpgradeStateMachineFactory) Create(candidateView uint64, _ flow.Identifier, parentState protocol.KVStoreReader, mutator protocol_state.KVStoreMutator) (protocol_state.KeyValueStoreStateMachine, error) { + return NewPSVersionUpgradeStateMachine(f.telemetry, candidateView, parentState, mutator), nil +} + +// SetValueStateMachineFactory is a factory for creating SetValueStateMachine instances. +type SetValueStateMachineFactory struct { + telemetry protocol_state.StateMachineTelemetryConsumer +} + +var _ protocol_state.KeyValueStoreStateMachineFactory = (*SetValueStateMachineFactory)(nil) + +// NewSetValueStateMachineFactory returns a factory for instantiating SetValueStateMachines. +// The created state machines report their operations to the provided telemetry consumer. +func NewSetValueStateMachineFactory(telemetry protocol_state.StateMachineTelemetryConsumer) *SetValueStateMachineFactory { + return &SetValueStateMachineFactory{telemetry: telemetry} +} + +// Create creates a new instance of SetValueStateMachine. +// No errors are expected during normal operations. +func (f *SetValueStateMachineFactory) Create(candidateView uint64, _ flow.Identifier, parentState protocol.KVStoreReader, mutator protocol_state.KVStoreMutator) (protocol_state.KeyValueStoreStateMachine, error) { + return NewSetValueStateMachine(f.telemetry, candidateView, parentState, mutator), nil +} diff --git a/state/protocol/protocol_state/kvstore/kvstore_storage.go b/state/protocol/protocol_state/kvstore/kvstore_storage.go new file mode 100644 index 00000000000..fc11fb97b54 --- /dev/null +++ b/state/protocol/protocol_state/kvstore/kvstore_storage.go @@ -0,0 +1,91 @@ +package kvstore + +import ( + "fmt" + + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/state/protocol" + "github.com/onflow/flow-go/state/protocol/protocol_state" + "github.com/onflow/flow-go/storage" +) + +// ProtocolKVStore persists different snapshots of key-value stores [KV-stores]. Here, we augment +// the low-level primitives provided by `storage.ProtocolKVStore` with logic for encoding and +// decoding the state snapshots into abstract representation `protocol_state.KVStoreAPI`. +// +// TODO (optional): include a cache of the _decoded_ Protocol States, so we don't decode+encode on each consensus view (hot-path) +type ProtocolKVStore struct { + storage.ProtocolKVStore +} + +var _ protocol_state.ProtocolKVStore = (*ProtocolKVStore)(nil) + +// NewProtocolKVStore instantiates a ProtocolKVStore for querying & storing deserialized `protocol_state.KVStoreAPIs`. +// At this abstraction level, we can only handle protocol state snapshots, whose data models are supported by the current +// software version. There might be serialized snapshots with legacy versions in the database, that are not supported +// anymore by this software version and can only be retrieved as versioned binary blobs via `storage.ProtocolKVStore`. +func NewProtocolKVStore(protocolStateSnapshots storage.ProtocolKVStore) *ProtocolKVStore { + return &ProtocolKVStore{ + ProtocolKVStore: protocolStateSnapshots, + } +} + +// BatchStore adds the KV-store snapshot in the database using the given ID as key. Per convention, all +// implementations of [protocol.KVStoreReader] should be able to successfully encode their state into a +// data blob. If the encoding fails, an error is returned. +// BatchStore is idempotent, i.e. it accepts repeated calls with the same pairs of (stateID, kvStore). +// Here, the ID is expected to be a collision-resistant hash of the snapshot (including the +// ProtocolStateVersion). +// +// No error is exepcted during normal operations +func (p *ProtocolKVStore) BatchStore(rw storage.ReaderBatchWriter, stateID flow.Identifier, kvStore protocol.KVStoreReader) error { + version, data, err := kvStore.VersionedEncode() + if err != nil { + return fmt.Errorf("failed to VersionedEncode protocol state: %w", err) + } + return p.ProtocolKVStore.BatchStore(rw, stateID, &flow.PSKeyValueStoreData{ + Version: version, + Data: data, + }) +} + +// ByID retrieves the KV store snapshot with the given ID. +// Expected errors during normal operations: +// - storage.ErrNotFound if no snapshot with the given Identifier is known. +// - ErrUnsupportedVersion if input version is not supported +func (p *ProtocolKVStore) ByID(protocolStateID flow.Identifier) (protocol_state.KVStoreAPI, error) { + versionedData, err := p.ProtocolKVStore.ByID(protocolStateID) + if err != nil { + return nil, fmt.Errorf("could not query KV store with ID %x: %w", protocolStateID, err) + } + kvStore, err := VersionedDecode(versionedData.Version, versionedData.Data) + if err != nil { + return nil, fmt.Errorf("could not decode protocol state (version=%d) with ID %x: %w", versionedData.Version, protocolStateID, err) + } + return kvStore, err +} + +// ByBlockID retrieves the kv-store snapshot that the block with the given ID proposes. +// CAUTION: this store snapshot requires confirmation by a QC and will only become active at the child block, +// _after_ validating the QC. Protocol convention: +// - Consider block B, whose ingestion might potentially lead to an updated KV store state. +// For example, the state changes if we seal some execution results emitting specific service events. +// - For the key `blockID`, we use the identity of block B which _proposes_ this updated KV store. As value, +// the hash of the resulting state at the end of processing B is to be used. +// - CAUTION: The updated state requires confirmation by a QC and will only become active at the child block, +// _after_ validating the QC. +// +// Expected errors during normal operations: +// - storage.ErrNotFound if no snapshot has been indexed for the given block. +// - ErrUnsupportedVersion if input version is not supported +func (p *ProtocolKVStore) ByBlockID(blockID flow.Identifier) (protocol_state.KVStoreAPI, error) { + versionedData, err := p.ProtocolKVStore.ByBlockID(blockID) + if err != nil { + return nil, fmt.Errorf("could not query KV store at block (%x): %w", blockID, err) + } + kvStore, err := VersionedDecode(versionedData.Version, versionedData.Data) + if err != nil { + return nil, fmt.Errorf("could not decode protocol state (version=%d) at block (%x): %w", versionedData.Version, blockID, err) + } + return kvStore, err +} diff --git a/state/protocol/protocol_state/kvstore/kvstore_storage_test.go b/state/protocol/protocol_state/kvstore/kvstore_storage_test.go new file mode 100644 index 00000000000..3392f619071 --- /dev/null +++ b/state/protocol/protocol_state/kvstore/kvstore_storage_test.go @@ -0,0 +1,229 @@ +package kvstore_test + +import ( + "errors" + "math" + "testing" + + "github.com/jordanschalm/lockctx" + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/state/protocol/protocol_state/kvstore" + protocol_statemock "github.com/onflow/flow-go/state/protocol/protocol_state/mock" + "github.com/onflow/flow-go/storage" + storagemock "github.com/onflow/flow-go/storage/mock" + "github.com/onflow/flow-go/utils/unittest" +) + +// TestProtocolKVStore_StoreTx verifies correct functioning of `ProtocolKVStore.StoreTx`. In a nutshell, +// `ProtocolKVStore` should encode the provided snapshot and call the lower-level storage abstraction +// to persist the encoded result. +func TestProtocolKVStore_StoreTx(t *testing.T) { + llStorage := storagemock.NewProtocolKVStore(t) // low-level storage of versioned binary Protocol State snapshots + kvState := protocol_statemock.NewKVStoreAPI(t) // instance of key-value store, which we want to persist + kvStateID := unittest.IdentifierFixture() + + store := kvstore.NewProtocolKVStore(llStorage) // instance that we are testing + + // On the happy path, where the input `kvState` encodes its state successfully, the wrapped store + // should be called to persist the version-encoded snapshot. + t.Run("happy path", func(t *testing.T) { + expectedVersion := uint64(13) + encData := unittest.RandomBytes(117) + versionedSnapshot := &flow.PSKeyValueStoreData{ + Version: expectedVersion, + Data: encData, + } + kvState.On("VersionedEncode").Return(expectedVersion, encData, nil).Once() + + rw := storagemock.NewReaderBatchWriter(t) + llStorage.On("BatchStore", rw, kvStateID, versionedSnapshot).Return(nil).Once() + + // Calling `BatchStore` should return the output of the wrapped low-level storage, which is a deferred database + // update. Conceptually, it is possible that `ProtocolKVStore` wraps the deferred database operation in faulty + // code, such that it cannot be executed. Therefore, we execute the top-level deferred database update below + // and verify that the deferred database operation returned by the lower-level is actually reached. + err := store.BatchStore(rw, kvStateID, kvState) + require.NoError(t, err) + }) + + // On the unhappy path, i.e. when the encoding of input `kvState` failed, `ProtocolKVStore` should produce + // a deferred database update that always returns the encoding error. + t.Run("encoding fails", func(t *testing.T) { + encodingError := errors.New("encoding error") + + kvState.On("VersionedEncode").Return(uint64(0), nil, encodingError).Once() + + rw := storagemock.NewReaderBatchWriter(t) + err := store.BatchStore(rw, kvStateID, kvState) + require.ErrorIs(t, err, encodingError) + }) +} + +// TestProtocolKVStore_IndexTx verifies that `ProtocolKVStore.IndexTx` delegate all calls directly to the +// low-level storage abstraction. +func TestProtocolKVStore_IndexTx(t *testing.T) { + blockID := unittest.IdentifierFixture() + stateID := unittest.IdentifierFixture() + llStorage := storagemock.NewProtocolKVStore(t) // low-level storage of versioned binary Protocol State snapshots + + store := kvstore.NewProtocolKVStore(llStorage) // instance that we are testing + + // should be called to persist the version-encoded snapshot. + t.Run("happy path", func(t *testing.T) { + lockManager := storage.NewTestingLockManager() + err := unittest.WithLock(t, lockManager, storage.LockInsertBlock, func(lctx lockctx.Context) error { + rw := storagemock.NewReaderBatchWriter(t) + llStorage.On("BatchIndex", lctx, rw, blockID, stateID).Return(nil).Once() + + // TODO: potentially update - we might be bringing back a functor here, because we acquire a lock as explained in slack thread https://flow-foundation.slack.com/archives/C071612SJJE/p1754600182033289?thread_ts=1752912083.194619&cid=C071612SJJE + // Calling `BatchIndex` should return the output of the wrapped low-level storage, which is a deferred database + // update. Conceptually, it is possible that `ProtocolKVStore` wraps the deferred database operation in faulty + // code, such that it cannot be executed. Therefore, we execute the top-level deferred database update below + // and verify that the deferred database operation returned by the lower-level is actually reached. + return store.BatchIndex(lctx, rw, blockID, stateID) + }) + require.NoError(t, err) + }) + + // On the unhappy path, the deferred database update from the lower level just errors upon execution. + // This error should be escalated. + t.Run("unhappy path", func(t *testing.T) { + lockManager := storage.NewTestingLockManager() + err := unittest.WithLock(t, lockManager, storage.LockInsertBlock, func(lctx lockctx.Context) error { + indexingError := errors.New("indexing error") + rw := storagemock.NewReaderBatchWriter(t) + llStorage.On("BatchIndex", lctx, rw, blockID, stateID).Return(indexingError).Once() + + err := store.BatchIndex(lctx, rw, blockID, stateID) + require.ErrorIs(t, err, indexingError) + return nil + }) + require.NoError(t, err) + }) +} + +// TestProtocolKVStore_ByBlockID verifies correct functioning of `ProtocolKVStore.ByBlockID`. In a nutshell, +// `ProtocolKVStore` should attempt to retrieve the encoded snapshot from the lower-level storage abstraction +// and return the decoded result. +func TestProtocolKVStore_ByBlockID(t *testing.T) { + blockID := unittest.IdentifierFixture() + llStorage := storagemock.NewProtocolKVStore(t) // low-level storage of versioned binary Protocol State snapshots + + store := kvstore.NewProtocolKVStore(llStorage) // instance that we are testing + + // On the happy path, `ProtocolKVStore` should decode the snapshot retrieved by the lowe-level storage abstraction. + // should be called to persist the version-encoded snapshot. + t.Run("happy path", func(t *testing.T) { + expectedState := &kvstore.Modelv1{ + Modelv0: kvstore.Modelv0{ + UpgradableModel: kvstore.UpgradableModel{}, + EpochStateID: unittest.IdentifierFixture(), + }, + } + version, encStateData, err := expectedState.VersionedEncode() + require.NoError(t, err) + encExpectedState := &flow.PSKeyValueStoreData{ + Version: version, + Data: encStateData, + } + llStorage.On("ByBlockID", blockID).Return(encExpectedState, nil).Once() + + decodedState, err := store.ByBlockID(blockID) + require.NoError(t, err) + require.Equal(t, expectedState, decodedState) + }) + + // On the unhappy path, either `ProtocolKVStore.ByBlockID` could error, or the decoding could fail. In either case, + // the error should be escalated to the caller. + t.Run("low-level `ProtocolKVStore.ByBlockID` errors", func(t *testing.T) { + someError := errors.New("some problem") + llStorage.On("ByBlockID", blockID).Return(nil, someError).Once() + + _, err := store.ByBlockID(blockID) + require.ErrorIs(t, err, someError) + }) + t.Run("decoding fails with `ErrUnsupportedVersion`", func(t *testing.T) { + versionedSnapshot := &flow.PSKeyValueStoreData{ + Version: math.MaxUint64, + Data: unittest.RandomBytes(117), + } + llStorage.On("ByBlockID", blockID).Return(versionedSnapshot, nil).Once() + + _, err := store.ByBlockID(blockID) + require.ErrorIs(t, err, kvstore.ErrUnsupportedVersion) + }) + t.Run("decoding yields exception", func(t *testing.T) { + versionedSnapshot := &flow.PSKeyValueStoreData{ + Version: 1, // model version 1 is known, but data is random, which should yield an `irrecoverable.Exception` + Data: unittest.RandomBytes(117), + } + llStorage.On("ByBlockID", blockID).Return(versionedSnapshot, nil).Once() + + _, err := store.ByBlockID(blockID) + require.NotErrorIs(t, err, kvstore.ErrUnsupportedVersion) + }) +} + +// TestProtocolKVStore_ByID verifies correct functioning of `ProtocolKVStore.ByID`. In a nutshell, +// `ProtocolKVStore` should attempt to retrieve the encoded snapshot from the lower-level storage +// abstraction and return the decoded result. +func TestProtocolKVStore_ByID(t *testing.T) { + protocolStateID := unittest.IdentifierFixture() + llStorage := storagemock.NewProtocolKVStore(t) // low-level storage of versioned binary Protocol State snapshots + + store := kvstore.NewProtocolKVStore(llStorage) // instance that we are testing + + // On the happy path, `ProtocolKVStore` should decode the snapshot retrieved by the lowe-level storage abstraction. + // should be called to persist the version-encoded snapshot. + t.Run("happy path", func(t *testing.T) { + expectedState := &kvstore.Modelv1{ + Modelv0: kvstore.Modelv0{ + UpgradableModel: kvstore.UpgradableModel{}, + EpochStateID: unittest.IdentifierFixture(), + }, + } + version, encStateData, err := expectedState.VersionedEncode() + require.NoError(t, err) + encExpectedState := &flow.PSKeyValueStoreData{ + Version: version, + Data: encStateData, + } + llStorage.On("ByID", protocolStateID).Return(encExpectedState, nil).Once() + + decodedState, err := store.ByID(protocolStateID) + require.NoError(t, err) + require.Equal(t, expectedState, decodedState) + }) + + // On the unhappy path, either `ProtocolKVStore.ByID` could error, or the decoding could fail. In either case, + // the error should be escalated to the caller. + t.Run("low-level `ProtocolKVStore.ByID` errors", func(t *testing.T) { + someError := errors.New("some problem") + llStorage.On("ByID", protocolStateID).Return(nil, someError).Once() + + _, err := store.ByID(protocolStateID) + require.ErrorIs(t, err, someError) + }) + t.Run("decoding fails with `ErrUnsupportedVersion`", func(t *testing.T) { + versionedSnapshot := &flow.PSKeyValueStoreData{ + Version: math.MaxUint64, + Data: unittest.RandomBytes(117), + } + llStorage.On("ByID", protocolStateID).Return(versionedSnapshot, nil).Once() + + _, err := store.ByID(protocolStateID) + require.ErrorIs(t, err, kvstore.ErrUnsupportedVersion) + }) + t.Run("decoding yields exception", func(t *testing.T) { + versionedSnapshot := &flow.PSKeyValueStoreData{ + Version: 1, // model version 1 is known, but data is random, which should yield an `irrecoverable.Exception` + Data: unittest.RandomBytes(117), + } + llStorage.On("ByID", protocolStateID).Return(versionedSnapshot, nil).Once() + + _, err := store.ByID(protocolStateID) + require.NotErrorIs(t, err, kvstore.ErrUnsupportedVersion) + }) +} diff --git a/state/protocol/protocol_state/kvstore/models.go b/state/protocol/protocol_state/kvstore/models.go new file mode 100644 index 00000000000..c41eed6db88 --- /dev/null +++ b/state/protocol/protocol_state/kvstore/models.go @@ -0,0 +1,471 @@ +package kvstore + +import ( + "fmt" + + clone "github.com/huandu/go-clone/generic" //nolint:goimports + + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module/irrecoverable" + "github.com/onflow/flow-go/state/protocol" + "github.com/onflow/flow-go/state/protocol/protocol_state" +) + +// This file contains the concrete types that define the structure of the underlying key-value store +// for a particular Protocol State version. +// Essentially enumerating the set of keys and values that are supported. +// When a key is added or removed, this requires a new protocol state version. +// To use new version of the protocol state, create a new versioned model in models.go (eg. modelv3 if latest model is modelv2) +// ATTENTION: All models should be public with public fields otherwise the encoding/decoding will not work. + +// UpgradableModel is a utility struct that must be embedded in all model versions to provide +// a common interface for managing protocol version upgrades. +type UpgradableModel struct { + VersionUpgrade *protocol.ViewBasedActivator[uint64] +} + +// SetVersionUpgrade sets the protocol upgrade version. This method is used +// to update the Protocol State version when a flow.ProtocolStateVersionUpgrade is processed. +// It contains the new version and the view at which it has to be applied. +func (model *UpgradableModel) SetVersionUpgrade(activator *protocol.ViewBasedActivator[uint64]) { + model.VersionUpgrade = activator +} + +// GetVersionUpgrade returns the upgrade version of protocol. +// VersionUpgrade is a view-based activator that specifies the version which has to be applied +// and the view from which on it has to be applied. After an upgrade activation view has passed, +// the (version, view) data remains in the state until the next upgrade is scheduled (essentially +// persisting the most recent past update until a subsequent update is scheduled). +func (model *UpgradableModel) GetVersionUpgrade() *protocol.ViewBasedActivator[uint64] { + return model.VersionUpgrade +} + +// This file contains the concrete types that define the structure of the +// underlying key-value store for a particular Protocol State version. +// Essentially enumerating the set of keys and values that are supported. +// +// When a key is added or removed, this requires a new protocol state version: +// - Create a new versioned model in models.go (eg. modelv3 if latest model is modelv2) +// - Update the KVStoreReader and KVStoreAPI interfaces to include any new keys + +// Modelv0 is v0 of the Protocol State key-value store. +// This model version is not intended to ever be the latest version supported by +// any software version. Since it is important that the store support managing +// different model version, this is here so that we can test the implementation +// with multiple supported KV model versions from the beginning. +type Modelv0 struct { + UpgradableModel + EpochStateID flow.Identifier + EpochExtensionViewCount uint64 + FinalizationSafetyThreshold uint64 +} + +var _ protocol_state.KVStoreAPI = (*Modelv0)(nil) +var _ protocol_state.KVStoreMutator = (*Modelv0)(nil) + +// ID returns an identifier for this key-value store snapshot by hashing internal fields and version number. +func (model *Modelv0) ID() flow.Identifier { + return makeVersionedModelID(model) +} + +// Replicate instantiates a Protocol State Snapshot of the given protocolVersion. +// It clones existing snapshot if protocolVersion = currentVersion. +// It transitions to next version if protocolVersion = currentVersion+1. +// Expected errors during normal operations: +// - ErrIncompatibleVersionChange if replicating the Parent Snapshot into a Snapshot +// with the specified `protocolVersion` is not supported. +func (model *Modelv0) Replicate(protocolVersion uint64) (protocol_state.KVStoreMutator, error) { + currentVersion := model.GetProtocolStateVersion() + if currentVersion == protocolVersion { + // no need for migration, return a complete copy + return clone.Clone(model), nil + } + nextVersion := currentVersion + 1 + if protocolVersion != nextVersion { + return nil, fmt.Errorf("unsupported replication version %d, expect %d: %w", + protocolVersion, nextVersion, ErrIncompatibleVersionChange) + } + + // perform actual replication to the next version + v1 := &Modelv1{ + Modelv0: clone.Clone(*model), + } + if v1.GetProtocolStateVersion() != protocolVersion { + return nil, fmt.Errorf("sanity check: replicate resulted in unexpected version (%d != %d)", v1.GetProtocolStateVersion(), protocolVersion) + } + return v1, nil +} + +// VersionedEncode encodes the key-value store, returning the version separately +// from the encoded bytes. +// No errors are expected during normal operation. +func (model *Modelv0) VersionedEncode() (uint64, []byte, error) { + return versionedEncode(model.GetProtocolStateVersion(), model) +} + +// GetProtocolStateVersion returns the version of the Protocol State Snapshot +// that is backing the `Reader` interface. It is the protocol version that originally +// created the Protocol State Snapshot. Changes in the protocol state version +// correspond to changes in the set of key-value pairs which are supported, +// and which model is used for serialization. +func (model *Modelv0) GetProtocolStateVersion() uint64 { + return 0 +} + +// GetEpochStateID returns the state ID of the epoch state. +// This is part of the most basic model and is used to commit the epoch state to the KV store. +func (model *Modelv0) GetEpochStateID() flow.Identifier { + return model.EpochStateID +} + +// SetEpochStateID sets the state ID of the epoch state. +// This method is used to commit the epoch state to the KV store when the state of the epoch is updated. +func (model *Modelv0) SetEpochStateID(id flow.Identifier) { + model.EpochStateID = id +} + +// SetEpochExtensionViewCount sets the number of views for a hypothetical epoch extension. +// Expected errors during normal operations: +// - kvstore.ErrInvalidValue - if the view count is less than FinalizationSafetyThreshold*2. +func (model *Modelv0) SetEpochExtensionViewCount(viewCount uint64) error { + // Strictly speaking it should be perfectly fine to use a value viewCount >= model.FinalizationSafetyThreshold. + // By using a slightly higher value (factor of 2), we ensure that each extension spans a sufficiently big time + // window for the human governance committee to submit a valid epoch recovery transaction. + if viewCount < model.FinalizationSafetyThreshold*2 { + return fmt.Errorf("invalid view count %d, expect at least %d: %w", viewCount, model.FinalizationSafetyThreshold*2, ErrInvalidValue) + } + model.EpochExtensionViewCount = viewCount + return nil +} + +// GetEpochExtensionViewCount returns the number of views for a hypothetical epoch extension. Note +// that this value can change at runtime (through a service event). When a new extension is added, +// the view count is used right at this point in the protocol state's evolution. In other words, +// different extensions can have different view counts. +func (model *Modelv0) GetEpochExtensionViewCount() uint64 { + return model.EpochExtensionViewCount +} + +func (model *Modelv0) GetFinalizationSafetyThreshold() uint64 { + return model.FinalizationSafetyThreshold +} + +// GetCadenceComponentVersion always returns ErrKeyNotSupported because this field is unsupported for Modelv0. +func (model *Modelv0) GetCadenceComponentVersion() (protocol.MagnitudeVersion, error) { + return protocol.MagnitudeVersion{}, ErrKeyNotSupported +} + +// GetCadenceComponentVersionUpgrade always returns nil because this field is unsupported for Modelv0. +func (model *Modelv0) GetCadenceComponentVersionUpgrade() *protocol.ViewBasedActivator[protocol.MagnitudeVersion] { + return nil +} + +// GetExecutionComponentVersion always returns ErrKeyNotSupported because this field is unsupported for Modelv0. +func (model *Modelv0) GetExecutionComponentVersion() (protocol.MagnitudeVersion, error) { + return protocol.MagnitudeVersion{}, ErrKeyNotSupported +} + +// GetExecutionComponentVersionUpgrade always returns nil because this field is unsupported for Modelv0. +func (model *Modelv0) GetExecutionComponentVersionUpgrade() *protocol.ViewBasedActivator[protocol.MagnitudeVersion] { + return nil +} + +// GetExecutionMeteringParameters always returns ErrKeyNotSupported because this field is unsupported for Modelv0. +func (model *Modelv0) GetExecutionMeteringParameters() (protocol.ExecutionMeteringParameters, error) { + return protocol.ExecutionMeteringParameters{}, ErrKeyNotSupported +} + +// GetExecutionMeteringParametersUpgrade always returns nil because this field is unsupported for Modelv0. +func (model *Modelv0) GetExecutionMeteringParametersUpgrade() *protocol.ViewBasedActivator[protocol.ExecutionMeteringParameters] { + return nil +} + +// Modelv1 is v1 of the Protocol State key-value store. +// This represents the first model version which will be considered "latest" by any +// deployed software version. +type Modelv1 struct { + Modelv0 +} + +var _ protocol_state.KVStoreAPI = (*Modelv1)(nil) +var _ protocol_state.KVStoreMutator = (*Modelv1)(nil) + +// ID returns an identifier for this key-value store snapshot by hashing internal fields and version number. +func (model *Modelv1) ID() flow.Identifier { + return makeVersionedModelID(model) +} + +// Replicate instantiates a Protocol State Snapshot of the given protocolVersion. +// It clones existing snapshot if protocolVersion = currentVersion. +// It transitions to next version if protocolVersion = currentVersion+1. +// Expected errors during normal operations: +// - ErrIncompatibleVersionChange if replicating the Parent Snapshot into a Snapshot +// with the specified `protocolVersion` is not supported. +func (model *Modelv1) Replicate(protocolVersion uint64) (protocol_state.KVStoreMutator, error) { + currentVersion := model.GetProtocolStateVersion() + if currentVersion == protocolVersion { + // no need for migration, return a complete copy + return clone.Clone(model), nil + } + nextVersion := currentVersion + 1 + if protocolVersion != nextVersion { + // can only Replicate into model with numerically consecutive version + return nil, fmt.Errorf("unsupported replication version %d, expect %d: %w", + protocolVersion, nextVersion, ErrIncompatibleVersionChange) + } + + // perform actual replication to the next version + v2 := &Modelv2{ + Modelv1: clone.Clone(*model), + } + if v2.GetProtocolStateVersion() != protocolVersion { + return nil, fmt.Errorf("sanity check: replicate resulted in unexpected version (%d != %d)", v2.GetProtocolStateVersion(), protocolVersion) + } + return v2, nil +} + +// VersionedEncode encodes the key-value store, returning the version separately +// from the encoded bytes. +// No errors are expected during normal operation. +func (model *Modelv1) VersionedEncode() (uint64, []byte, error) { + return versionedEncode(model.GetProtocolStateVersion(), model) +} + +// GetProtocolStateVersion returns the version of the Protocol State Snapshot +// that is backing the `Reader` interface. It is the protocol version that originally +// created the Protocol State Snapshot. Changes in the protocol state version +// correspond to changes in the set of key-value pairs which are supported, +// and which model is used for serialization. +func (model *Modelv1) GetProtocolStateVersion() uint64 { + return 1 +} + +// Modelv2 reflects a behavioural change of the protocol (compared to Modelv1). Despite there being no change of the +// actual data model, we increment the version to coordinate switching between the old and the new protocol behaviour. +// This version adds the following changes: +// - Non-system-chunk service event validation support (adds ChunkBody.ServiceEventCount field) +// - EFM Recovery (adds EpochCommit.DKGIndexMap field) +type Modelv2 struct { + Modelv1 +} + +// ID returns an identifier for this key-value store snapshot by hashing internal fields and version number. +func (model *Modelv2) ID() flow.Identifier { + return makeVersionedModelID(model) +} + +// Replicate instantiates a Protocol State Snapshot of the given protocolVersion. +// It clones existing snapshot if protocolVersion = currentVersion, other versions are not supported yet. +// Expected errors during normal operations: +// - ErrIncompatibleVersionChange if replicating the Parent Snapshot into a Snapshot +// with the specified `protocolVersion` is not supported. +func (model *Modelv2) Replicate(protocolVersion uint64) (protocol_state.KVStoreMutator, error) { + currentVersion := model.GetProtocolStateVersion() + if currentVersion == protocolVersion { + // no need for migration, return a complete copy + return clone.Clone(model), nil + } + nextVersion := currentVersion + 1 + if protocolVersion != nextVersion { + // can only Replicate into model with numerically consecutive version + return nil, fmt.Errorf("unsupported replication version %d, expect %d: %w", + protocolVersion, nextVersion, ErrIncompatibleVersionChange) + } + + // perform actual replication to the next version + v3 := &Modelv3{ + Modelv2: clone.Clone(*model), + // Execution component versions and metering parameters are set to default values when upgrading to v3 + CadenceComponentVersion: protocol.UpdatableField[protocol.MagnitudeVersion]{ + CurrentValue: protocol.MagnitudeVersion{Major: 0, Minor: 0}, + }, + ExecutionComponentVersion: protocol.UpdatableField[protocol.MagnitudeVersion]{ + CurrentValue: protocol.MagnitudeVersion{Major: 0, Minor: 0}, + }, + ExecutionMeteringParameters: protocol.UpdatableField[protocol.ExecutionMeteringParameters]{ + CurrentValue: protocol.DefaultExecutionMeteringParameters(), + }, + } + if v3.GetProtocolStateVersion() != protocolVersion { + return nil, fmt.Errorf("sanity check: replicate resulted in unexpected version (%d != %d)", v3.GetProtocolStateVersion(), protocolVersion) + } + return v3, nil +} + +// VersionedEncode encodes the key-value store, returning the version separately +// from the encoded bytes. +// No errors are expected during normal operation. +func (model *Modelv2) VersionedEncode() (uint64, []byte, error) { + return versionedEncode(model.GetProtocolStateVersion(), model) +} + +// GetProtocolStateVersion returns the version of the Protocol State Snapshot +// that is backing the `Reader` interface. It is the protocol version that originally +// created the Protocol State Snapshot. Changes in the protocol state version +// correspond to changes in the set of key-value pairs which are supported, +// and which model is used for serialization. +func (model *Modelv2) GetProtocolStateVersion() uint64 { + return 2 +} + +// Modelv3 adds fields for execution versioning and metering. +type Modelv3 struct { + Modelv2 + ExecutionMeteringParameters protocol.UpdatableField[protocol.ExecutionMeteringParameters] + ExecutionComponentVersion protocol.UpdatableField[protocol.MagnitudeVersion] + CadenceComponentVersion protocol.UpdatableField[protocol.MagnitudeVersion] +} + +// ID returns an identifier for this key-value store snapshot by hashing internal fields and version number. +func (model *Modelv3) ID() flow.Identifier { + return makeVersionedModelID(model) +} + +// Replicate instantiates a Protocol State Snapshot of the given protocolVersion. +// It clones existing snapshot if protocolVersion = currentVersion, other versions are not supported yet. +// Expected errors during normal operations: +// - ErrIncompatibleVersionChange if replicating the Parent Snapshot into a Snapshot +// with the specified `protocolVersion` is not supported. +func (model *Modelv3) Replicate(protocolVersion uint64) (protocol_state.KVStoreMutator, error) { + currentVersion := model.GetProtocolStateVersion() + if currentVersion == protocolVersion { + // no need for migration, return a complete copy + return clone.Clone(model), nil + } else { + return nil, fmt.Errorf("unsupported replication version %d: %w", + protocolVersion, ErrIncompatibleVersionChange) + } +} + +// VersionedEncode encodes the key-value store, returning the version separately +// from the encoded bytes. +// No errors are expected during normal operation. +func (model *Modelv3) VersionedEncode() (uint64, []byte, error) { + return versionedEncode(model.GetProtocolStateVersion(), model) +} + +// GetProtocolStateVersion returns the version of the Protocol State Snapshot +// that is backing the `Reader` interface. It is the protocol version that originally +// created the Protocol State Snapshot. Changes in the protocol state version +// correspond to changes in the set of key-value pairs which are supported, +// and which model is used for serialization. +func (model *Modelv3) GetProtocolStateVersion() uint64 { + return 3 +} + +// GetCadenceComponentVersion returns the current Cadence component version. +// If not otherwise specified, during network bootstrapping or via service event, the component version is initialized to 0.0. +// No errors are expected during normal operation. +func (model *Modelv3) GetCadenceComponentVersion() (protocol.MagnitudeVersion, error) { + return model.CadenceComponentVersion.CurrentValue, nil +} + +// GetCadenceComponentVersionUpgrade returns the most recent upgrade for the Cadence Component Version, +// if one exists (otherwise returns nil). The upgrade will be returned even if it has already been applied. +func (model *Modelv3) GetCadenceComponentVersionUpgrade() *protocol.ViewBasedActivator[protocol.MagnitudeVersion] { + return model.CadenceComponentVersion.Update +} + +// GetExecutionComponentVersion returns the current Execution component version. +// If not otherwise specified, during network bootstrapping or via service event, the component version is initialized to 0.0. +// No errors are expected during normal operation. +func (model *Modelv3) GetExecutionComponentVersion() (protocol.MagnitudeVersion, error) { + return model.ExecutionComponentVersion.CurrentValue, nil +} + +// GetExecutionComponentVersionUpgrade returns the most recent upgrade for the Execution Component Version, +// if one exists (otherwise returns nil). The upgrade will be returned even if it has already been applied. +func (model *Modelv3) GetExecutionComponentVersionUpgrade() *protocol.ViewBasedActivator[protocol.MagnitudeVersion] { + return model.ExecutionComponentVersion.Update +} + +// GetExecutionMeteringParameters returns the current Execution metering parameters. +// No errors are expected during normal operation. +func (model *Modelv3) GetExecutionMeteringParameters() (protocol.ExecutionMeteringParameters, error) { + return model.ExecutionMeteringParameters.CurrentValue, nil +} + +// GetExecutionMeteringParametersUpgrade returns the most recent upgrade for the Execution Metering Parameters, +// if one exists (otherwise returns nil). The upgrade will be returned even if it has already been applied. +func (model *Modelv3) GetExecutionMeteringParametersUpgrade() *protocol.ViewBasedActivator[protocol.ExecutionMeteringParameters] { + return model.ExecutionMeteringParameters.Update +} + +// NewDefaultKVStore constructs a default Key-Value Store of the *latest* protocol version for bootstrapping. +// Currently, the KV store is largely empty. +// TODO: Shortcut in bootstrapping; we will probably have to start with a non-empty KV store in the future; +// TODO(efm-recovery): we need to bootstrap with v1 in order to test the upgrade to v2. Afterward, we should bootstrap with v2 by default for new networks. +// Potentially we may need to carry over the KVStore during a spork (with possible migrations). +func NewDefaultKVStore(finalizationSafetyThreshold, epochExtensionViewCount uint64, epochStateID flow.Identifier) (protocol_state.KVStoreAPI, error) { + modelv0, err := newKVStoreV0(finalizationSafetyThreshold, epochExtensionViewCount, epochStateID) + if err != nil { + return nil, fmt.Errorf("could not construct v0 kvstore: %w", err) + } + return &Modelv2{Modelv1: Modelv1{Modelv0: *modelv0}}, nil +} + +// NewKVStore constructs a key-value store for a particular Protocol State version for bootstrapping. +// Caller must provide a supported version number, otherwise an exception is returned. +func NewKVStore( + version uint64, + finalizationSafetyThreshold, epochExtensionViewCount uint64, + epochStateID flow.Identifier, +) (protocol_state.KVStoreAPI, error) { + modelv0, err := newKVStoreV0(finalizationSafetyThreshold, epochExtensionViewCount, epochStateID) + if err != nil { + return nil, fmt.Errorf("could not construct v0 kvstore: %w", err) + } + + switch version { + case 0: + return modelv0, nil + case 1: + return &Modelv1{Modelv0: *modelv0}, nil + case 2: + return &Modelv2{Modelv1: Modelv1{Modelv0: *modelv0}}, nil + default: + return nil, fmt.Errorf("unsupported protocol state version: %d", version) + } +} + +// NewKVStoreV0 constructs a KVStore using the v0 model. This is used to test +// version upgrades, from v0 to v1. +func newKVStoreV0(finalizationSafetyThreshold, epochExtensionViewCount uint64, epochStateID flow.Identifier) (*Modelv0, error) { + model := &Modelv0{ + UpgradableModel: UpgradableModel{}, + EpochStateID: epochStateID, + FinalizationSafetyThreshold: finalizationSafetyThreshold, + } + // use a setter to ensure the default value is valid and is not accidentally lower than the safety threshold. + err := model.SetEpochExtensionViewCount(epochExtensionViewCount) + if err != nil { + return nil, irrecoverable.NewExceptionf("could not set default epoch extension view count: %s", err.Error()) + } + return model, nil +} + +// NewKVStoreV0 constructs a KVStore using the v0 model. This is used to test +// version upgrades, from v0 to v1. +func NewKVStoreV0(finalizationSafetyThreshold, epochExtensionViewCount uint64, epochStateID flow.Identifier) (protocol_state.KVStoreAPI, error) { + return newKVStoreV0(finalizationSafetyThreshold, epochExtensionViewCount, epochStateID) +} + +// versionedModel generically represents a versioned protocol state model. +type versionedModel interface { + GetProtocolStateVersion() uint64 + *Modelv0 | *Modelv1 | *Modelv2 | *Modelv3 +} + +// makeVersionedModelID produces an Identifier which includes both the model's +// internal fields and its version. This guarantees that two models with different +// versions but otherwise identical fields will have different IDs, a requirement +// of the protocol.KVStoreReader API. +func makeVersionedModelID[T versionedModel](model T) flow.Identifier { + return flow.MakeID(struct { + Version uint64 + Model T + }{ + Version: model.GetProtocolStateVersion(), + Model: model, + }) +} diff --git a/state/protocol/protocol_state/kvstore/models_test.go b/state/protocol/protocol_state/kvstore/models_test.go new file mode 100644 index 00000000000..41df93568f2 --- /dev/null +++ b/state/protocol/protocol_state/kvstore/models_test.go @@ -0,0 +1,508 @@ +package kvstore_test + +import ( + "fmt" + "reflect" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/state/protocol" + "github.com/onflow/flow-go/state/protocol/protocol_state" + "github.com/onflow/flow-go/state/protocol/protocol_state/kvstore" + "github.com/onflow/flow-go/utils/unittest" +) + +// TestEncodeDecode tests encoding and decoding all supported model versions. +// - VersionedEncode should return the correct version +// - instances should be equal after encoding, then decoding +func TestEncodeDecode(t *testing.T) { + t.Run("v0", func(t *testing.T) { + model := &kvstore.Modelv0{ + UpgradableModel: kvstore.UpgradableModel{ + VersionUpgrade: &protocol.ViewBasedActivator[uint64]{ + Data: 13, + ActivationView: 1000, + }, + }, + EpochStateID: unittest.IdentifierFixture(), + } + + version, encoded, err := model.VersionedEncode() + require.NoError(t, err) + assert.Equal(t, uint64(0), version) + + decoded, err := kvstore.VersionedDecode(version, encoded) + require.NoError(t, err) + assert.Equal(t, model, decoded) + }) + + t.Run("v1", func(t *testing.T) { + model := &kvstore.Modelv1{} + + version, encoded, err := model.VersionedEncode() + require.NoError(t, err) + assert.Equal(t, uint64(1), version) + + decoded, err := kvstore.VersionedDecode(version, encoded) + require.NoError(t, err) + assert.Equal(t, model, decoded) + }) + + t.Run("v2", func(t *testing.T) { + model := &kvstore.Modelv2{} + + version, encoded, err := model.VersionedEncode() + require.NoError(t, err) + assert.Equal(t, uint64(2), version) + + decoded, err := kvstore.VersionedDecode(version, encoded) + require.NoError(t, err) + assert.Equal(t, model, decoded) + }) +} + +// TestKVStoreAPI tests that all supported model versions satisfy the public interfaces. +// - should be able to read/write supported keys +// - should return the appropriate sentinel for unsupported keys +func TestKVStoreAPI(t *testing.T) { + t.Run("v0", func(t *testing.T) { + model := &kvstore.Modelv0{} + + // v0 + assertModelIsUpgradable(t, model) + + version := model.GetProtocolStateVersion() + assert.Equal(t, uint64(0), version) + }) + + t.Run("v1", func(t *testing.T) { + model := &kvstore.Modelv1{} + + // v0 + assertModelIsUpgradable(t, model) + + version := model.GetProtocolStateVersion() + assert.Equal(t, uint64(1), version) + }) + + t.Run("v2", func(t *testing.T) { + model := &kvstore.Modelv2{} + + // v0 + assertModelIsUpgradable(t, model) + + version := model.GetProtocolStateVersion() + assert.Equal(t, uint64(2), version) + }) +} + +// TestKVStoreAPI_Replicate tests that replication logic of KV store correctly works. All versions need to be support this. +// There are a few invariants that needs to be met: +// - if model M is replicated and the requested version is equal to M.Version then an exact copy needs to be returned. +// - if model M is replicated and the requested version is lower than M.Version then an error has to be returned. +// - if model M is replicated and the requested version is greater than M.Version then behavior depends on concrete model. +// If replication from version v to v' is not supported a sentinel error should be returned, otherwise component needs to return +// a new model with version which is equal to the requested version. +func TestKVStoreAPI_Replicate(t *testing.T) { + t.Run("v0", func(t *testing.T) { + t.Run("->v0", func(t *testing.T) { + model := &kvstore.Modelv0{ + UpgradableModel: kvstore.UpgradableModel{ + VersionUpgrade: &protocol.ViewBasedActivator[uint64]{ + Data: 13, + ActivationView: 1000, + }, + }, + } + cpy, err := model.Replicate(model.GetProtocolStateVersion()) + require.NoError(t, err) + require.True(t, reflect.DeepEqual(model, cpy)) // expect the same model + require.Equal(t, cpy.ID(), model.ID()) + + model.VersionUpgrade.ActivationView++ // change + require.False(t, reflect.DeepEqual(model, cpy), "expect to have a deep copy") + }) + t.Run("->v1", func(t *testing.T) { + model := &kvstore.Modelv0{ + UpgradableModel: kvstore.UpgradableModel{ + VersionUpgrade: &protocol.ViewBasedActivator[uint64]{ + Data: 13, + ActivationView: 1000, + }, + }, + } + newVersion, err := model.Replicate(1) + require.NoError(t, err) + require.Equal(t, uint64(1), newVersion.GetProtocolStateVersion()) + require.NotEqual(t, newVersion.ID(), model.ID(), "two models with the same data but different version must have different ID") + _, ok := newVersion.(*kvstore.Modelv1) + require.True(t, ok, "expected Modelv1") + require.Equal(t, newVersion.GetVersionUpgrade(), model.GetVersionUpgrade()) + }) + t.Run("invalid upgrade", func(t *testing.T) { + model := &kvstore.Modelv0{} + newVersion, err := model.Replicate(model.GetProtocolStateVersion() + 10) + require.ErrorIs(t, err, kvstore.ErrIncompatibleVersionChange) + require.Nil(t, newVersion) + }) + }) + + t.Run("v1", func(t *testing.T) { + t.Run("->v1", func(t *testing.T) { + model := &kvstore.Modelv1{ + Modelv0: kvstore.Modelv0{ + UpgradableModel: kvstore.UpgradableModel{ + VersionUpgrade: &protocol.ViewBasedActivator[uint64]{ + Data: 13, + ActivationView: 1000, + }, + }, + EpochStateID: unittest.IdentifierFixture(), + }, + } + cpy, err := model.Replicate(model.GetProtocolStateVersion()) + require.NoError(t, err) + require.True(t, reflect.DeepEqual(model, cpy)) + + model.VersionUpgrade.ActivationView++ // change + require.False(t, reflect.DeepEqual(model, cpy)) + }) + t.Run("invalid upgrade", func(t *testing.T) { + model := &kvstore.Modelv1{} + for _, version := range []uint64{ + model.GetProtocolStateVersion() - 1, + model.GetProtocolStateVersion() + 10, + } { + newVersion, err := model.Replicate(version) + require.ErrorIs(t, err, kvstore.ErrIncompatibleVersionChange) + require.Nil(t, newVersion) + } + }) + t.Run("->v2", func(t *testing.T) { + model := &kvstore.Modelv1{ + Modelv0: kvstore.Modelv0{ + UpgradableModel: kvstore.UpgradableModel{ + VersionUpgrade: &protocol.ViewBasedActivator[uint64]{ + Data: 13, + ActivationView: 1000, + }, + }, + }, + } + newVersion, err := model.Replicate(2) + require.NoError(t, err) + require.Equal(t, uint64(2), newVersion.GetProtocolStateVersion()) + require.NotEqual(t, newVersion.ID(), model.ID(), "two models with the same data but different version must have different ID") + _, ok := newVersion.(*kvstore.Modelv2) + require.True(t, ok, "expected Modelv2") + require.Equal(t, newVersion.GetVersionUpgrade(), model.GetVersionUpgrade()) + }) + }) + + t.Run("v2", func(t *testing.T) { + t.Run("->v2", func(t *testing.T) { + model := &kvstore.Modelv2{ + Modelv1: kvstore.Modelv1{ + Modelv0: kvstore.Modelv0{ + UpgradableModel: kvstore.UpgradableModel{ + VersionUpgrade: &protocol.ViewBasedActivator[uint64]{ + Data: 13, + ActivationView: 1000, + }, + }, + EpochStateID: unittest.IdentifierFixture(), + }, + }, + } + cpy, err := model.Replicate(model.GetProtocolStateVersion()) + require.NoError(t, err) + require.True(t, reflect.DeepEqual(model, cpy)) + + model.VersionUpgrade.ActivationView++ // change + require.False(t, reflect.DeepEqual(model, cpy)) + }) + t.Run("invalid upgrade", func(t *testing.T) { + model := &kvstore.Modelv2{} + for _, version := range []uint64{ + model.GetProtocolStateVersion() - 1, + model.GetProtocolStateVersion() + 10, + } { + newVersion, err := model.Replicate(version) + require.ErrorIs(t, err, kvstore.ErrIncompatibleVersionChange) + require.Nil(t, newVersion) + } + }) + t.Run("->v3", func(t *testing.T) { + model := &kvstore.Modelv2{ + Modelv1: kvstore.Modelv1{ + Modelv0: kvstore.Modelv0{ + UpgradableModel: kvstore.UpgradableModel{ + VersionUpgrade: &protocol.ViewBasedActivator[uint64]{ + Data: 13, + ActivationView: 1000, + }, + }, + }, + }, + } + upgradedKVStore, err := model.Replicate(3) + require.NoError(t, err) + require.Equal(t, uint64(3), upgradedKVStore.GetProtocolStateVersion()) + require.NotEqual(t, upgradedKVStore.ID(), model.ID(), "two models with the same data but different version must have different ID") + _, ok := upgradedKVStore.(*kvstore.Modelv3) + require.True(t, ok, "expected Modelv3") + require.Equal(t, upgradedKVStore.GetVersionUpgrade(), model.GetVersionUpgrade()) + + t.Run("v3-only fields are initialized", func(t *testing.T) { + cadenceVersion, err := upgradedKVStore.GetCadenceComponentVersion() + assert.NoError(t, err) + assert.Equal(t, protocol.MagnitudeVersion{}, cadenceVersion) + + assert.Nil(t, upgradedKVStore.GetCadenceComponentVersionUpgrade()) + + executionVersion, err := upgradedKVStore.GetExecutionComponentVersion() + assert.NoError(t, err) + assert.Equal(t, protocol.MagnitudeVersion{}, executionVersion) + + assert.Nil(t, upgradedKVStore.GetExecutionComponentVersionUpgrade()) + + meteringParams, err := upgradedKVStore.GetExecutionMeteringParameters() + assert.NoError(t, err) + assert.Equal(t, protocol.DefaultExecutionMeteringParameters(), meteringParams) + }) + }) + }) + + t.Run("v3", func(t *testing.T) { + t.Run("->v3", func(t *testing.T) { + model := &kvstore.Modelv3{ + Modelv2: kvstore.Modelv2{ + Modelv1: kvstore.Modelv1{ + Modelv0: kvstore.Modelv0{ + UpgradableModel: kvstore.UpgradableModel{ + VersionUpgrade: &protocol.ViewBasedActivator[uint64]{ + Data: 13, + ActivationView: 1000, + }, + }, + EpochStateID: unittest.IdentifierFixture(), + }, + }, + }, + } + cpy, err := model.Replicate(model.GetProtocolStateVersion()) + require.NoError(t, err) + require.True(t, reflect.DeepEqual(model, cpy)) + + model.VersionUpgrade.ActivationView++ // change + require.False(t, reflect.DeepEqual(model, cpy)) + }) + t.Run("invalid upgrade", func(t *testing.T) { + model := &kvstore.Modelv3{} + + for _, version := range []uint64{ + model.GetProtocolStateVersion() - 1, + model.GetProtocolStateVersion() + 1, + model.GetProtocolStateVersion() + 10, + } { + newVersion, err := model.Replicate(version) + require.ErrorIs(t, err, kvstore.ErrIncompatibleVersionChange) + require.Nil(t, newVersion) + } + }) + }) +} + +// assertModelIsUpgradable tests that the model satisfies the version upgrade interface. +// - should be able to set and get the upgrade version +// - setting nil version upgrade should work +// +// This has to be tested for every model version since version upgrade should be supported by all models. +func assertModelIsUpgradable(t *testing.T, api protocol_state.KVStoreMutator) { + oldVersion := api.GetProtocolStateVersion() + activationView := uint64(1000) + expected := &protocol.ViewBasedActivator[uint64]{ + Data: oldVersion + 1, + ActivationView: activationView, + } + + // check if setting version upgrade works + api.SetVersionUpgrade(expected) + actual := api.GetVersionUpgrade() + assert.Equal(t, expected, actual, "version upgrade should be set") + + // check if setting nil version upgrade works + api.SetVersionUpgrade(nil) + assert.Nil(t, api.GetVersionUpgrade(), "version upgrade should be nil") +} + +// TestNewDefaultKVStore tests that the default KV store is created correctly. +func TestNewDefaultKVStore(t *testing.T) { + t.Run("happy-path", func(t *testing.T) { + safetyParams, err := protocol.DefaultEpochSafetyParams(flow.Localnet) + require.NoError(t, err) + epochStateID := unittest.IdentifierFixture() + store, err := kvstore.NewDefaultKVStore(safetyParams.FinalizationSafetyThreshold, safetyParams.EpochExtensionViewCount, epochStateID) + require.NoError(t, err) + require.Equal(t, store.GetEpochStateID(), epochStateID) + require.Equal(t, store.GetFinalizationSafetyThreshold(), safetyParams.FinalizationSafetyThreshold) + require.Equal(t, store.GetEpochExtensionViewCount(), safetyParams.EpochExtensionViewCount) + require.GreaterOrEqual(t, store.GetEpochExtensionViewCount(), 2*safetyParams.FinalizationSafetyThreshold, + "extension view count should be at least 2*FinalizationSafetyThreshold") + }) + t.Run("invalid-kvstore-epoch-extension-view-count", func(t *testing.T) { + safetyParams, err := protocol.DefaultEpochSafetyParams(flow.Localnet) + require.NoError(t, err) + epochStateID := unittest.IdentifierFixture() + // invalid epoch extension view count, it has to be at least 2*FinalizationSafetyThreshold + store, err := kvstore.NewDefaultKVStore(safetyParams.FinalizationSafetyThreshold, safetyParams.FinalizationSafetyThreshold, epochStateID) + require.Error(t, err) + require.Nil(t, store) + }) + t.Run("unsupported-key", func(t *testing.T) { + safetyParams, err := protocol.DefaultEpochSafetyParams(flow.Localnet) + require.NoError(t, err) + epochStateID := unittest.IdentifierFixture() + store, err := kvstore.NewDefaultKVStore(safetyParams.FinalizationSafetyThreshold, safetyParams.EpochExtensionViewCount, epochStateID) + require.NoError(t, err) + + // Check GetCadenceComponentVersion + _, err = store.GetCadenceComponentVersion() + assert.ErrorIs(t, err, kvstore.ErrKeyNotSupported) + + // Check GetCadenceComponentVersionUpgrade + assert.Nil(t, store.GetCadenceComponentVersionUpgrade()) + + // Check GetExecutionComponentVersion + _, err = store.GetExecutionComponentVersion() + assert.ErrorIs(t, err, kvstore.ErrKeyNotSupported) + + // Check GetExecutionComponentVersionUpgrade + assert.Nil(t, store.GetExecutionComponentVersionUpgrade()) + + // Check GetExecutionMeteringParameters + _, err = store.GetExecutionMeteringParameters() + assert.ErrorIs(t, err, kvstore.ErrKeyNotSupported) + + // Check GetExecutionMeteringParametersUpgrade + assert.Nil(t, store.GetExecutionMeteringParametersUpgrade()) + }) +} + +// TestKVStoreMutator_SetEpochExtensionViewCount tests that setter performs an input validation and doesn't allow setting +// a value which is lower than 2*FinalizationSafetyThreshold. +func TestKVStoreMutator_SetEpochExtensionViewCount(t *testing.T) { + safetyParams, err := protocol.DefaultEpochSafetyParams(flow.Localnet) + require.NoError(t, err) + epochStateID := unittest.IdentifierFixture() + + t.Run("happy-path", func(t *testing.T) { + store, err := kvstore.NewDefaultKVStore(safetyParams.FinalizationSafetyThreshold, safetyParams.EpochExtensionViewCount, epochStateID) + require.NoError(t, err) + mutator, err := store.Replicate(store.GetProtocolStateVersion()) + require.NoError(t, err) + + newValue := safetyParams.FinalizationSafetyThreshold*2 + 1 + require.NotEqual(t, mutator.GetEpochExtensionViewCount(), newValue) + err = mutator.SetEpochExtensionViewCount(newValue) + require.NoError(t, err) + require.Equal(t, mutator.GetEpochExtensionViewCount(), newValue) + }) + t.Run("invalid-value", func(t *testing.T) { + store, err := kvstore.NewDefaultKVStore(safetyParams.FinalizationSafetyThreshold, safetyParams.EpochExtensionViewCount, epochStateID) + require.NoError(t, err) + mutator, err := store.Replicate(store.GetProtocolStateVersion()) + require.NoError(t, err) + + oldValue := mutator.GetEpochExtensionViewCount() + newValue := safetyParams.FinalizationSafetyThreshold*2 - 1 + require.NotEqual(t, oldValue, newValue) + err = mutator.SetEpochExtensionViewCount(newValue) + require.ErrorIs(t, err, kvstore.ErrInvalidValue) + require.Equal(t, mutator.GetEpochExtensionViewCount(), oldValue, "value should be unchanged") + }) +} + +// TestMalleability verifies that the entities which implements the ID are not malleable. +func TestMalleability(t *testing.T) { + t.Run("Modelv0", func(t *testing.T) { + unittest.RequireEntityNonMalleable(t, + &kvstore.Modelv0{ + UpgradableModel: kvstore.UpgradableModel{ + VersionUpgrade: unittest.ViewBasedActivatorFixture(), + }, + EpochStateID: unittest.IdentifierFixture(), + }, + ) + }) + + t.Run("Modelv1", func(t *testing.T) { + unittest.RequireEntityNonMalleable(t, + &kvstore.Modelv1{ + Modelv0: kvstore.Modelv0{ + UpgradableModel: kvstore.UpgradableModel{ + VersionUpgrade: unittest.ViewBasedActivatorFixture(), + }, + EpochStateID: unittest.IdentifierFixture(), + }, + }, + ) + }) +} + +// TestNewKVStore_SupportedVersions verifies that supported versions +// construct the expected key-value store without error. +func TestNewKVStore_SupportedVersions(t *testing.T) { + safetyParams, err := protocol.DefaultEpochSafetyParams(flow.Localnet) + require.NoError(t, err) + epochStateID := unittest.IdentifierFixture() + + defaultKVStore, err := kvstore.NewDefaultKVStore( + safetyParams.FinalizationSafetyThreshold, + safetyParams.EpochExtensionViewCount, + epochStateID, + ) + require.NoError(t, err) + + defaultVersion := defaultKVStore.GetProtocolStateVersion() + for version := uint64(0); version <= defaultVersion; version++ { + t.Run(fmt.Sprintf("version %d", version), func(t *testing.T) { + store, err := kvstore.NewKVStore( + version, + safetyParams.FinalizationSafetyThreshold, + safetyParams.EpochExtensionViewCount, + epochStateID, + ) + + require.NoError(t, err) + require.NotNil(t, store) + }) + } +} + +// TestNewKVStore_UnsupportedVersion verifies that an unsupported version +// returns a proper error and no store is constructed. +func TestNewKVStore_UnsupportedVersion(t *testing.T) { + safetyParams, err := protocol.DefaultEpochSafetyParams(flow.Localnet) + require.NoError(t, err) + epochStateID := unittest.IdentifierFixture() + + defaultKVStore, err := kvstore.NewDefaultKVStore(safetyParams.FinalizationSafetyThreshold, safetyParams.EpochExtensionViewCount, epochStateID) + require.NoError(t, err) + defaultVersion := defaultKVStore.GetProtocolStateVersion() + invalidVersion := defaultVersion + 1 + + store, err := kvstore.NewKVStore( + invalidVersion, + safetyParams.FinalizationSafetyThreshold, + safetyParams.EpochExtensionViewCount, + epochStateID, + ) + + require.Error(t, err) + require.Nil(t, store) + require.Contains(t, err.Error(), "unsupported protocol state version") +} diff --git a/state/protocol/protocol_state/kvstore/set_value_statemachine.go b/state/protocol/protocol_state/kvstore/set_value_statemachine.go new file mode 100644 index 00000000000..bf97811714a --- /dev/null +++ b/state/protocol/protocol_state/kvstore/set_value_statemachine.go @@ -0,0 +1,71 @@ +package kvstore + +import ( + "errors" + "fmt" + + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/state/protocol" + "github.com/onflow/flow-go/state/protocol/protocol_state" + "github.com/onflow/flow-go/state/protocol/protocol_state/common" +) + +// SetValueStateMachine encapsulates the logic for evolving sub-state of KV store by setting particular values. +// Specifically, it consumes service events that are sealed by the candidate block +// (possibly still under construction) with the given view. +// Each relevant event is validated before it is applied to the KV store. +// All updates are applied to a copy of parent KV store, so parent KV store is not modified. +// A separate instance should be created for each block to process the updates therein. +type SetValueStateMachine struct { + common.BaseKeyValueStoreStateMachine + telemetry protocol_state.StateMachineTelemetryConsumer +} + +var _ protocol_state.KeyValueStoreStateMachine = (*SetValueStateMachine)(nil) + +// NewSetValueStateMachine creates a new state machine to update a specific sub-state of the KV Store. +func NewSetValueStateMachine( + telemetry protocol_state.StateMachineTelemetryConsumer, + candidateView uint64, + parentState protocol.KVStoreReader, + evolvingState protocol_state.KVStoreMutator, +) *SetValueStateMachine { + return &SetValueStateMachine{ + BaseKeyValueStoreStateMachine: common.NewBaseKeyValueStoreStateMachine(candidateView, parentState, evolvingState), + telemetry: telemetry, + } +} + +// EvolveState applies the state change(s) on sub-state P for the candidate block (under construction). +// Implementation processes only relevant service events and ignores all other events. +// No errors are expected during normal operations. +func (m *SetValueStateMachine) EvolveState(orderedUpdates []flow.ServiceEvent) error { + for _, update := range orderedUpdates { + switch update.Type { + case flow.ServiceEventSetEpochExtensionViewCount: + setEpochExtensionViewCount, ok := update.Event.(*flow.SetEpochExtensionViewCount) + if !ok { + return fmt.Errorf("internal invalid type for SetEpochExtensionViewCount: %T", update.Event) + } + + m.telemetry.OnServiceEventReceived(update) + err := m.EvolvingState.SetEpochExtensionViewCount(setEpochExtensionViewCount.Value) + if err != nil { + if errors.Is(err, ErrInvalidValue) { + m.telemetry.OnInvalidServiceEvent(update, + protocol.NewInvalidServiceEventErrorf("ignoring invalid value %v in SetEpochExtensionViewCount event: %s", + setEpochExtensionViewCount.Value, err.Error())) + continue + } + return fmt.Errorf("unexpected error when processing SetEpochExtensionViewCount: %w", err) + } + m.telemetry.OnServiceEventProcessed(update) + + // Service events not explicitly expected are ignored + default: + continue + } + } + + return nil +} diff --git a/state/protocol/protocol_state/kvstore/set_value_statemachine_test.go b/state/protocol/protocol_state/kvstore/set_value_statemachine_test.go new file mode 100644 index 00000000000..5df6890ffc0 --- /dev/null +++ b/state/protocol/protocol_state/kvstore/set_value_statemachine_test.go @@ -0,0 +1,114 @@ +package kvstore_test + +import ( + "errors" + "testing" + + mocks "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" + + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/state/protocol" + mockprotocol "github.com/onflow/flow-go/state/protocol/mock" + "github.com/onflow/flow-go/state/protocol/protocol_state/kvstore" + "github.com/onflow/flow-go/state/protocol/protocol_state/mock" +) + +func TestSetKeyValueStoreValueStateMachine(t *testing.T) { + suite.Run(t, new(SetKeyValueStoreValueStateMachineSuite)) +} + +// SetKeyValueStoreValueStateMachineSuite is a dedicated test suite for testing KV store state machine. +type SetKeyValueStoreValueStateMachineSuite struct { + suite.Suite + + view uint64 + parentState *mockprotocol.KVStoreReader + mutator *mock.KVStoreMutator + telemetry *mock.StateMachineTelemetryConsumer + + stateMachine *kvstore.SetValueStateMachine +} + +func (s *SetKeyValueStoreValueStateMachineSuite) SetupTest() { + s.telemetry = mock.NewStateMachineTelemetryConsumer(s.T()) + s.parentState = mockprotocol.NewKVStoreReader(s.T()) + s.mutator = mock.NewKVStoreMutator(s.T()) + s.view = 1000 + + s.parentState.On("GetFinalizationSafetyThreshold").Return(uint64(100)).Maybe() + + s.stateMachine = kvstore.NewSetValueStateMachine(s.telemetry, s.view, s.parentState, s.mutator) + require.NotNil(s.T(), s.stateMachine) +} + +// TestInitialInvariants ensures that initial state machine invariants are met. +// It checks that state machine has the correct candidateView and parent state. +func (s *SetKeyValueStoreValueStateMachineSuite) TestInitialInvariants() { + require.Equal(s.T(), s.view, s.stateMachine.View()) + require.Equal(s.T(), s.parentState, s.stateMachine.ParentState()) +} + +// TestEvolveState_SetEpochExtensionViewCount ensures that state machine can process protocol state version upgrade event. +// It checks several cases including +// - happy path - valid extension length value +// - invalid extension length value +func (s *SetKeyValueStoreValueStateMachineSuite) TestEvolveState_SetEpochExtensionViewCount() { + s.Run("happy-path", func() { + ev := &flow.SetEpochExtensionViewCount{ + Value: 1000, + } + + s.telemetry.On("OnServiceEventReceived", ev.ServiceEvent()).Return().Once() + s.telemetry.On("OnServiceEventProcessed", ev.ServiceEvent()).Return().Once() + s.mutator.On("SetEpochExtensionViewCount", ev.Value).Return(nil) + err := s.stateMachine.EvolveState([]flow.ServiceEvent{ev.ServiceEvent()}) + require.NoError(s.T(), err) + }) + // process two events, one is valid and one is invalid, ensure: + // 1. valid event is processed + // 2. invalid event is ignored and reported + s.Run("invalid-value", func() { + s.mutator = mock.NewKVStoreMutator(s.T()) + s.stateMachine = kvstore.NewSetValueStateMachine(s.telemetry, s.view, s.parentState, s.mutator) + + valid := &flow.SetEpochExtensionViewCount{ + Value: 1000, + } + invalid := &flow.SetEpochExtensionViewCount{ + Value: 50, + } + + s.mutator.On("SetEpochExtensionViewCount", valid.Value).Return(nil).Once() + s.mutator.On("SetEpochExtensionViewCount", invalid.Value).Return(kvstore.ErrInvalidValue).Once() + s.telemetry.On("OnServiceEventReceived", valid.ServiceEvent()).Return().Once() + s.telemetry.On("OnServiceEventProcessed", valid.ServiceEvent()).Return().Once() + s.telemetry.On("OnServiceEventReceived", invalid.ServiceEvent()).Return().Once() + s.telemetry.On("OnInvalidServiceEvent", invalid.ServiceEvent(), + mocks.MatchedBy(protocol.IsInvalidServiceEventError)).Return().Once() + + err := s.stateMachine.EvolveState([]flow.ServiceEvent{invalid.ServiceEvent(), valid.ServiceEvent()}) + require.NoError(s.T(), err, "sentinel error has to be handled internally") + }) + s.Run("exception", func() { + s.mutator = mock.NewKVStoreMutator(s.T()) + s.stateMachine = kvstore.NewSetValueStateMachine(s.telemetry, s.view, s.parentState, s.mutator) + invalid := &flow.SetEpochExtensionViewCount{ + Value: 50, + } + + exception := errors.New("kvstore-exception") + s.mutator.On("SetEpochExtensionViewCount", invalid.Value).Return(exception).Once() + s.telemetry.On("OnServiceEventReceived", invalid.ServiceEvent()).Return().Once() + err := s.stateMachine.EvolveState([]flow.ServiceEvent{invalid.ServiceEvent(), invalid.ServiceEvent()}) + require.ErrorIs(s.T(), err, exception, "exception has to be propagated") + }) +} + +// TestBuild ensures that state machine returns empty list of deferred operations. +func (s *SetKeyValueStoreValueStateMachineSuite) TestBuild() { + dbOps, err := s.stateMachine.Build() + require.NoError(s.T(), err) + require.True(s.T(), dbOps.IsEmpty()) +} diff --git a/state/protocol/protocol_state/kvstore/upgrade_statemachine.go b/state/protocol/protocol_state/kvstore/upgrade_statemachine.go new file mode 100644 index 00000000000..28142cfb95c --- /dev/null +++ b/state/protocol/protocol_state/kvstore/upgrade_statemachine.go @@ -0,0 +1,136 @@ +package kvstore + +import ( + "fmt" + + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/state/protocol" + "github.com/onflow/flow-go/state/protocol/protocol_state" + "github.com/onflow/flow-go/state/protocol/protocol_state/common" +) + +// PSVersionUpgradeStateMachine encapsulates the logic for evolving the version of the Protocol State. +// Specifically, it consumes ProtocolStateVersionUpgrade ServiceEvent that are sealed by the candidate block +// (possibly still under construction) with the given view. +// Each relevant event is validated before it is applied to the KV store. +// All updates are applied to a copy of parent KV store, so parent KV store is not modified. +// A separate instance should be created for each block to process the updates therein. +type PSVersionUpgradeStateMachine struct { + common.BaseKeyValueStoreStateMachine + telemetry protocol_state.StateMachineTelemetryConsumer +} + +var _ protocol_state.KeyValueStoreStateMachine = (*PSVersionUpgradeStateMachine)(nil) + +// NewPSVersionUpgradeStateMachine creates a new state machine to update a specific sub-state of the KV Store. +// It schedules protocol state version upgrades upon receiving a `ProtocolStateVersionUpgrade` event. +// The actual model upgrade is handled in the upper layer (`ProtocolStateMachine`). +func NewPSVersionUpgradeStateMachine( + telemetry protocol_state.StateMachineTelemetryConsumer, + candidateView uint64, + parentState protocol.KVStoreReader, + evolvingState protocol_state.KVStoreMutator, +) *PSVersionUpgradeStateMachine { + return &PSVersionUpgradeStateMachine{ + BaseKeyValueStoreStateMachine: common.NewBaseKeyValueStoreStateMachine(candidateView, parentState, evolvingState), + telemetry: telemetry, + } +} + +// EvolveState applies the state change(s) on sub-state P for the candidate block (under construction). +// Implementation processes only relevant service events and ignores all other events. +// No errors are expected during normal operations. +func (m *PSVersionUpgradeStateMachine) EvolveState(orderedUpdates []flow.ServiceEvent) error { + for _, update := range orderedUpdates { + switch update.Type { + case flow.ServiceEventProtocolStateVersionUpgrade: + versionUpgrade, ok := update.Event.(*flow.ProtocolStateVersionUpgrade) + if !ok { + return fmt.Errorf("internal invalid type for ProtocolStateVersionUpgrade: %T", update.Event) + } + + m.telemetry.OnServiceEventReceived(update) + err := m.processSingleEvent(versionUpgrade) + if err != nil { + if protocol.IsInvalidServiceEventError(err) { + m.telemetry.OnInvalidServiceEvent(update, err) + continue + } + return fmt.Errorf("unexpected error when processing version upgrade event: %w", err) + } + m.telemetry.OnServiceEventProcessed(update) + + // Service events not explicitly expected are ignored + default: + continue + } + } + + return nil +} + +// processSingleEvent performs processing of a single protocol version upgrade event. +// Expected errors indicating that we have observed and invalid service event from protocol's point of view. +// - `protocol.InvalidServiceEventError` - if the service event is invalid for the current protocol state. +// +// All other errors should be treated as exceptions. +func (m *PSVersionUpgradeStateMachine) processSingleEvent(versionUpgrade *flow.ProtocolStateVersionUpgrade) error { + // To switch the protocol version, replica needs to process a block with a view >= activation view. + // But we cannot activate a new version till the block containing the seal is finalized, because when + // switching between forks with different highest views, we do not want to switch forth and back between versions. + // The problem is that finality is local to each node due to the nature of the consensus algorithm itself. + // We would like to guarantee that all nodes switch the protocol version at exactly the same block. + // To guarantee that all nodes switch the protocol version at exactly the same block, we require that the + // activation view is higher than the view + Δ when accepting the event. Δ represents the finalization lag + // to give time for replicas to finalize the block containing the seal for the version upgrade event. + // When replica reaches (or exceeds) the activation view *and* the latest finalized protocol state knows + // about the version upgrade, only then it's safe to switch the protocol version. + if m.View()+m.ParentState().GetFinalizationSafetyThreshold() >= versionUpgrade.ActiveView { + return protocol.NewInvalidServiceEventErrorf("view %d triggering version upgrade must be at least %d views in the future of current view %d: %w", + versionUpgrade.ActiveView, m.ParentState().GetFinalizationSafetyThreshold(), m.View(), ErrInvalidActivationView) + } + + if m.ParentState().GetProtocolStateVersion()+1 != versionUpgrade.NewProtocolStateVersion { + return protocol.NewInvalidServiceEventErrorf("invalid protocol state version upgrade %d -> %d: %w", + m.ParentState().GetProtocolStateVersion(), versionUpgrade.NewProtocolStateVersion, ErrInvalidUpgradeVersion) + } + + // checkPendingUpgrade checks if there is a pending upgrade in the state and validates if we can accept the upgrade request. + // We allow setting version upgrade if all of the following conditions are met: + // (i) the activation view is bigger than or equal to the current candidate block's view + Δ. + // (ii) if there is a pending upgrade, the new version should be the same as the pending upgrade. + // Condition (ii) is checked in this function. + checkPendingUpgrade := func(store protocol.KVStoreReader) error { + if pendingUpgrade := store.GetVersionUpgrade(); pendingUpgrade != nil { + if pendingUpgrade.ActivationView < m.View() { + // pending upgrade has been activated, we can ignore it. + return nil + } + + // we allow updating pending upgrade iff the new version is the same as the pending upgrade + // the activation view may differ, but it has to meet the same threshold. + if pendingUpgrade.Data != versionUpgrade.NewProtocolStateVersion { + return protocol.NewInvalidServiceEventErrorf("requested to upgrade to %d but pending upgrade with version already stored %d: %w", + pendingUpgrade.Data, versionUpgrade.NewProtocolStateVersion, ErrInvalidUpgradeVersion) + } + } + return nil + } + + // There can be multiple `versionUpgrade` Service Events sealed in one block. In case we have _not_ + // encountered any, `m.EvolvingState` contains the latest `versionUpgrade` as of the parent block, because + // we cloned it from the parent state. If we encountered some version upgrades, we already enforced that + // they are all upgrades to the same version. So we only need to check that the next `versionUpgrade` + // also has the same version. + err := checkPendingUpgrade(m.EvolvingState) + if err != nil { + return fmt.Errorf("version upgrade invalid with respect to the current state: %w", err) + } + + activator := &protocol.ViewBasedActivator[uint64]{ + Data: versionUpgrade.NewProtocolStateVersion, + ActivationView: versionUpgrade.ActiveView, + } + m.EvolvingState.SetVersionUpgrade(activator) + return nil +} diff --git a/state/protocol/protocol_state/kvstore/upgrade_statemachine_test.go b/state/protocol/protocol_state/kvstore/upgrade_statemachine_test.go new file mode 100644 index 00000000000..c1b0d9d8eac --- /dev/null +++ b/state/protocol/protocol_state/kvstore/upgrade_statemachine_test.go @@ -0,0 +1,139 @@ +package kvstore_test + +import ( + "testing" + + "github.com/pkg/errors" + mocks "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" + + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/state/protocol" + mockprotocol "github.com/onflow/flow-go/state/protocol/mock" + "github.com/onflow/flow-go/state/protocol/protocol_state/kvstore" + "github.com/onflow/flow-go/state/protocol/protocol_state/mock" + "github.com/onflow/flow-go/utils/unittest" +) + +func TestStateMachine(t *testing.T) { + suite.Run(t, new(StateMachineSuite)) +} + +// StateMachineSuite is a dedicated test suite for testing KV store state machine. +type StateMachineSuite struct { + suite.Suite + + view uint64 + parentState *mockprotocol.KVStoreReader + mutator *mock.KVStoreMutator + telemetry *mock.StateMachineTelemetryConsumer + + stateMachine *kvstore.PSVersionUpgradeStateMachine +} + +func (s *StateMachineSuite) SetupTest() { + s.parentState = mockprotocol.NewKVStoreReader(s.T()) + s.mutator = mock.NewKVStoreMutator(s.T()) + s.telemetry = mock.NewStateMachineTelemetryConsumer(s.T()) + s.view = 1000 + + s.parentState.On("GetFinalizationSafetyThreshold").Return(uint64(100)).Maybe() + + s.stateMachine = kvstore.NewPSVersionUpgradeStateMachine(s.telemetry, s.view, s.parentState, s.mutator) + require.NotNil(s.T(), s.stateMachine) +} + +// TestInitialInvariants ensures that initial state machine invariants are met. +// It checks that state machine has correct view and parent state. +func (s *StateMachineSuite) TestInitialInvariants() { + require.Equal(s.T(), s.view, s.stateMachine.View()) + require.Equal(s.T(), s.parentState, s.stateMachine.ParentState()) +} + +// TestEvolveState_ProtocolStateVersionUpgrade ensures that state machine can process protocol state version upgrade event. +// It checks several cases including +// * happy path - valid upgrade version and activation view +// * invalid upgrade version - has to return sentinel error since version is invalid +// * invalid activation view - has to return sentinel error since activation view doesn't meet threshold. +func (s *StateMachineSuite) TestEvolveState_ProtocolStateVersionUpgrade() { + s.Run("happy-path", func() { + oldVersion := uint64(0) + s.parentState.On("GetProtocolStateVersion").Return(oldVersion) + + upgrade := unittest.ProtocolStateVersionUpgradeFixture() + upgrade.ActiveView = s.view + s.parentState.GetFinalizationSafetyThreshold() + 1 + upgrade.NewProtocolStateVersion = oldVersion + 1 + + s.telemetry.On("OnServiceEventReceived", upgrade.ServiceEvent()).Return().Once() + s.telemetry.On("OnServiceEventProcessed", upgrade.ServiceEvent()).Return().Once() + s.mutator.On("GetVersionUpgrade").Return(nil) + s.mutator.On("SetVersionUpgrade", &protocol.ViewBasedActivator[uint64]{ + Data: upgrade.NewProtocolStateVersion, + ActivationView: upgrade.ActiveView, + }).Return() + + err := s.stateMachine.EvolveState([]flow.ServiceEvent{upgrade.ServiceEvent()}) + require.NoError(s.T(), err) + }) + s.Run("invalid-protocol-state-version", func() { + s.mutator = mock.NewKVStoreMutator(s.T()) + oldVersion := uint64(0) + s.parentState.On("GetProtocolStateVersion").Return(oldVersion) + + upgrade := unittest.ProtocolStateVersionUpgradeFixture() + upgrade.ActiveView = s.view + s.parentState.GetFinalizationSafetyThreshold() + 1 + upgrade.NewProtocolStateVersion = oldVersion + + s.telemetry.On("OnServiceEventReceived", upgrade.ServiceEvent()).Return().Once() + s.telemetry.On("OnInvalidServiceEvent", upgrade.ServiceEvent(), + mocks.MatchedBy(func(err error) bool { + return protocol.IsInvalidServiceEventError(err) && + errors.Is(err, kvstore.ErrInvalidUpgradeVersion) + })).Once() + _ = s.stateMachine.EvolveState([]flow.ServiceEvent{upgrade.ServiceEvent()}) + + s.mutator.AssertNumberOfCalls(s.T(), "SetVersionUpgrade", 0) + }) + s.Run("skipping-protocol-state-version", func() { + s.mutator = mock.NewKVStoreMutator(s.T()) + oldVersion := uint64(0) + s.parentState.On("GetProtocolStateVersion").Return(oldVersion) + + upgrade := unittest.ProtocolStateVersionUpgradeFixture() + upgrade.ActiveView = s.view + s.parentState.GetFinalizationSafetyThreshold() + 1 + upgrade.NewProtocolStateVersion = oldVersion + 2 // has to be exactly +1 + + s.telemetry.On("OnServiceEventReceived", upgrade.ServiceEvent()).Return().Once() + s.telemetry.On("OnInvalidServiceEvent", upgrade.ServiceEvent(), + mocks.MatchedBy(func(err error) bool { + return protocol.IsInvalidServiceEventError(err) && + errors.Is(err, kvstore.ErrInvalidUpgradeVersion) + })).Once() + _ = s.stateMachine.EvolveState([]flow.ServiceEvent{upgrade.ServiceEvent()}) + + s.mutator.AssertNumberOfCalls(s.T(), "SetVersionUpgrade", 0) + }) + s.Run("invalid-activation-view", func() { + s.mutator = mock.NewKVStoreMutator(s.T()) + upgrade := unittest.ProtocolStateVersionUpgradeFixture() + upgrade.ActiveView = s.view + s.parentState.GetFinalizationSafetyThreshold() + + s.telemetry.On("OnServiceEventReceived", upgrade.ServiceEvent()).Return().Once() + s.telemetry.On("OnInvalidServiceEvent", upgrade.ServiceEvent(), + mocks.MatchedBy(func(err error) bool { + return protocol.IsInvalidServiceEventError(err) && + errors.Is(err, kvstore.ErrInvalidActivationView) + })).Once() + _ = s.stateMachine.EvolveState([]flow.ServiceEvent{upgrade.ServiceEvent()}) + + s.mutator.AssertNumberOfCalls(s.T(), "SetVersionUpgrade", 0) + }) +} + +// TestBuild ensures that state machine returns empty list of deferred operations. +func (s *StateMachineSuite) TestBuild() { + dbOps, err := s.stateMachine.Build() + require.NoError(s.T(), err) + require.True(s.T(), dbOps.IsEmpty()) +} diff --git a/state/protocol/protocol_state/kvstore_storage.go b/state/protocol/protocol_state/kvstore_storage.go new file mode 100644 index 00000000000..96c4af7e9d8 --- /dev/null +++ b/state/protocol/protocol_state/kvstore_storage.go @@ -0,0 +1,64 @@ +package protocol_state + +import ( + "github.com/jordanschalm/lockctx" + + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/state/protocol" + "github.com/onflow/flow-go/storage" +) + +// ProtocolKVStore persists different snapshots of the Protocol State's Key-Calue stores [KV-stores]. +// Here, we augment the low-level primitives provided by `storage.ProtocolKVStore` with logic for +// encoding and decoding the state snapshots into abstract representation `protocol_state.KVStoreAPI`. +// +// At the abstraction level here, we can only handle protocol state snapshots, whose data models are +// supported by the current software version. There might be serialized snapshots with legacy versions +// in the database that are not supported anymore by this software version. +type ProtocolKVStore interface { + // BatchStore adds the KV-store snapshot in the database using the given ID as key. Per convention, all + // implementations of [protocol.KVStoreReader] should be able to successfully encode their state into a + // data blob. If the encoding fails, an error is returned. + // + // No error is expected during normal operations + BatchStore(rw storage.ReaderBatchWriter, stateID flow.Identifier, kvStore protocol.KVStoreReader) error + + // BatchIndex writes the blockID->stateID index to the input write batch. + // In a nutshell, we want to maintain a map from `blockID` to `stateID`, where `blockID` references the + // block that _proposes_ the updated key-value store. + // Protocol convention: + // - Consider block B, whose ingestion might potentially lead to an updated KV store. For example, + // the KV store changes if we seal some execution results emitting specific service events. + // - For the key `blockID`, we use the identity of block B which _proposes_ this updated KV store. + // - IMPORTANT: The updated state requires confirmation by a QC and will only become active at the + // child block, _after_ validating the QC. + // + // CAUTION: To prevent data corruption, we need to guarantee atomicity of existence-check and the subsequent + // database write. Hence, we require the caller to acquire [storage.LockInsertBlock] and hold it until the + // database write has been committed. + // + // Expected errors of the returned anonymous function: + // - [storage.ErrAlreadyExists] if a KV store for the given blockID has already been indexed. + BatchIndex(lctx lockctx.Proof, rw storage.ReaderBatchWriter, blockID flow.Identifier, stateID flow.Identifier) error + + // ByID retrieves the KV store snapshot with the given ID. + // Expected errors during normal operations: + // - storage.ErrNotFound if no snapshot with the given Identifier is known. + // - kvstore.ErrUnsupportedVersion if the version of the stored snapshot not supported by this implementation + ByID(id flow.Identifier) (KVStoreAPI, error) + + // ByBlockID retrieves the kv-store snapshot that the block with the given ID proposes. + // CAUTION: this store snapshot requires confirmation by a QC and will only become active at the child block, + // _after_ validating the QC. Protocol convention: + // - Consider block B, whose ingestion might potentially lead to an updated KV store state. + // For example, the state changes if we seal some execution results emitting specific service events. + // - For the key `blockID`, we use the identity of block B which _proposes_ this updated KV store. As value, + // the hash of the resulting state at the end of processing B is to be used. + // - CAUTION: The updated state requires confirmation by a QC and will only become active at the child block, + // _after_ validating the QC. + // + // Expected errors during normal operations: + // - storage.ErrNotFound if no snapshot has been indexed for the given block. + // - kvstore.ErrUnsupportedVersion if the version of the stored snapshot not supported by this implementation + ByBlockID(blockID flow.Identifier) (KVStoreAPI, error) +} diff --git a/state/protocol/protocol_state/mock/key_value_store_state_machine.go b/state/protocol/protocol_state/mock/key_value_store_state_machine.go new file mode 100644 index 00000000000..f3bed6f0689 --- /dev/null +++ b/state/protocol/protocol_state/mock/key_value_store_state_machine.go @@ -0,0 +1,117 @@ +// Code generated by mockery. DO NOT EDIT. + +package mock + +import ( + flow "github.com/onflow/flow-go/model/flow" + deferred "github.com/onflow/flow-go/storage/deferred" + + mock "github.com/stretchr/testify/mock" + + protocol "github.com/onflow/flow-go/state/protocol" +) + +// KeyValueStoreStateMachine is an autogenerated mock type for the KeyValueStoreStateMachine type +type KeyValueStoreStateMachine[P any] struct { + mock.Mock +} + +// Build provides a mock function with no fields +func (_m *KeyValueStoreStateMachine[P]) Build() (*deferred.DeferredBlockPersist, error) { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Build") + } + + var r0 *deferred.DeferredBlockPersist + var r1 error + if rf, ok := ret.Get(0).(func() (*deferred.DeferredBlockPersist, error)); ok { + return rf() + } + if rf, ok := ret.Get(0).(func() *deferred.DeferredBlockPersist); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*deferred.DeferredBlockPersist) + } + } + + if rf, ok := ret.Get(1).(func() error); ok { + r1 = rf() + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// EvolveState provides a mock function with given fields: sealedServiceEvents +func (_m *KeyValueStoreStateMachine[P]) EvolveState(sealedServiceEvents []flow.ServiceEvent) error { + ret := _m.Called(sealedServiceEvents) + + if len(ret) == 0 { + panic("no return value specified for EvolveState") + } + + var r0 error + if rf, ok := ret.Get(0).(func([]flow.ServiceEvent) error); ok { + r0 = rf(sealedServiceEvents) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// ParentState provides a mock function with no fields +func (_m *KeyValueStoreStateMachine[P]) ParentState() protocol.KVStoreReader { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for ParentState") + } + + var r0 protocol.KVStoreReader + if rf, ok := ret.Get(0).(func() protocol.KVStoreReader); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(protocol.KVStoreReader) + } + } + + return r0 +} + +// View provides a mock function with no fields +func (_m *KeyValueStoreStateMachine[P]) View() uint64 { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for View") + } + + var r0 uint64 + if rf, ok := ret.Get(0).(func() uint64); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(uint64) + } + + return r0 +} + +// NewKeyValueStoreStateMachine creates a new instance of KeyValueStoreStateMachine. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewKeyValueStoreStateMachine[P any](t interface { + mock.TestingT + Cleanup(func()) +}) *KeyValueStoreStateMachine[P] { + mock := &KeyValueStoreStateMachine[P]{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/state/protocol/protocol_state/mock/key_value_store_state_machine_factory.go b/state/protocol/protocol_state/mock/key_value_store_state_machine_factory.go new file mode 100644 index 00000000000..29e874b7355 --- /dev/null +++ b/state/protocol/protocol_state/mock/key_value_store_state_machine_factory.go @@ -0,0 +1,61 @@ +// Code generated by mockery. DO NOT EDIT. + +package mock + +import ( + flow "github.com/onflow/flow-go/model/flow" + mock "github.com/stretchr/testify/mock" + + protocol "github.com/onflow/flow-go/state/protocol" + + protocol_state "github.com/onflow/flow-go/state/protocol/protocol_state" +) + +// KeyValueStoreStateMachineFactory is an autogenerated mock type for the KeyValueStoreStateMachineFactory type +type KeyValueStoreStateMachineFactory struct { + mock.Mock +} + +// Create provides a mock function with given fields: candidateView, parentID, parentState, mutator +func (_m *KeyValueStoreStateMachineFactory) Create(candidateView uint64, parentID flow.Identifier, parentState protocol.KVStoreReader, mutator protocol_state.KVStoreMutator) (protocol_state.KeyValueStoreStateMachine, error) { + ret := _m.Called(candidateView, parentID, parentState, mutator) + + if len(ret) == 0 { + panic("no return value specified for Create") + } + + var r0 protocol_state.KeyValueStoreStateMachine + var r1 error + if rf, ok := ret.Get(0).(func(uint64, flow.Identifier, protocol.KVStoreReader, protocol_state.KVStoreMutator) (protocol_state.KeyValueStoreStateMachine, error)); ok { + return rf(candidateView, parentID, parentState, mutator) + } + if rf, ok := ret.Get(0).(func(uint64, flow.Identifier, protocol.KVStoreReader, protocol_state.KVStoreMutator) protocol_state.KeyValueStoreStateMachine); ok { + r0 = rf(candidateView, parentID, parentState, mutator) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(protocol_state.KeyValueStoreStateMachine) + } + } + + if rf, ok := ret.Get(1).(func(uint64, flow.Identifier, protocol.KVStoreReader, protocol_state.KVStoreMutator) error); ok { + r1 = rf(candidateView, parentID, parentState, mutator) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// NewKeyValueStoreStateMachineFactory creates a new instance of KeyValueStoreStateMachineFactory. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewKeyValueStoreStateMachineFactory(t interface { + mock.TestingT + Cleanup(func()) +}) *KeyValueStoreStateMachineFactory { + mock := &KeyValueStoreStateMachineFactory{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/state/protocol/protocol_state/mock/kv_store_api.go b/state/protocol/protocol_state/mock/kv_store_api.go new file mode 100644 index 00000000000..214482e93ba --- /dev/null +++ b/state/protocol/protocol_state/mock/kv_store_api.go @@ -0,0 +1,356 @@ +// Code generated by mockery. DO NOT EDIT. + +package mock + +import ( + flow "github.com/onflow/flow-go/model/flow" + mock "github.com/stretchr/testify/mock" + + protocol "github.com/onflow/flow-go/state/protocol" + + protocol_state "github.com/onflow/flow-go/state/protocol/protocol_state" +) + +// KVStoreAPI is an autogenerated mock type for the KVStoreAPI type +type KVStoreAPI struct { + mock.Mock +} + +// GetCadenceComponentVersion provides a mock function with no fields +func (_m *KVStoreAPI) GetCadenceComponentVersion() (protocol.MagnitudeVersion, error) { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for GetCadenceComponentVersion") + } + + var r0 protocol.MagnitudeVersion + var r1 error + if rf, ok := ret.Get(0).(func() (protocol.MagnitudeVersion, error)); ok { + return rf() + } + if rf, ok := ret.Get(0).(func() protocol.MagnitudeVersion); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(protocol.MagnitudeVersion) + } + + if rf, ok := ret.Get(1).(func() error); ok { + r1 = rf() + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetCadenceComponentVersionUpgrade provides a mock function with no fields +func (_m *KVStoreAPI) GetCadenceComponentVersionUpgrade() *protocol.ViewBasedActivator[protocol.MagnitudeVersion] { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for GetCadenceComponentVersionUpgrade") + } + + var r0 *protocol.ViewBasedActivator[protocol.MagnitudeVersion] + if rf, ok := ret.Get(0).(func() *protocol.ViewBasedActivator[protocol.MagnitudeVersion]); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*protocol.ViewBasedActivator[protocol.MagnitudeVersion]) + } + } + + return r0 +} + +// GetEpochExtensionViewCount provides a mock function with no fields +func (_m *KVStoreAPI) GetEpochExtensionViewCount() uint64 { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for GetEpochExtensionViewCount") + } + + var r0 uint64 + if rf, ok := ret.Get(0).(func() uint64); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(uint64) + } + + return r0 +} + +// GetEpochStateID provides a mock function with no fields +func (_m *KVStoreAPI) GetEpochStateID() flow.Identifier { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for GetEpochStateID") + } + + var r0 flow.Identifier + if rf, ok := ret.Get(0).(func() flow.Identifier); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(flow.Identifier) + } + } + + return r0 +} + +// GetExecutionComponentVersion provides a mock function with no fields +func (_m *KVStoreAPI) GetExecutionComponentVersion() (protocol.MagnitudeVersion, error) { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for GetExecutionComponentVersion") + } + + var r0 protocol.MagnitudeVersion + var r1 error + if rf, ok := ret.Get(0).(func() (protocol.MagnitudeVersion, error)); ok { + return rf() + } + if rf, ok := ret.Get(0).(func() protocol.MagnitudeVersion); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(protocol.MagnitudeVersion) + } + + if rf, ok := ret.Get(1).(func() error); ok { + r1 = rf() + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetExecutionComponentVersionUpgrade provides a mock function with no fields +func (_m *KVStoreAPI) GetExecutionComponentVersionUpgrade() *protocol.ViewBasedActivator[protocol.MagnitudeVersion] { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for GetExecutionComponentVersionUpgrade") + } + + var r0 *protocol.ViewBasedActivator[protocol.MagnitudeVersion] + if rf, ok := ret.Get(0).(func() *protocol.ViewBasedActivator[protocol.MagnitudeVersion]); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*protocol.ViewBasedActivator[protocol.MagnitudeVersion]) + } + } + + return r0 +} + +// GetExecutionMeteringParameters provides a mock function with no fields +func (_m *KVStoreAPI) GetExecutionMeteringParameters() (protocol.ExecutionMeteringParameters, error) { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for GetExecutionMeteringParameters") + } + + var r0 protocol.ExecutionMeteringParameters + var r1 error + if rf, ok := ret.Get(0).(func() (protocol.ExecutionMeteringParameters, error)); ok { + return rf() + } + if rf, ok := ret.Get(0).(func() protocol.ExecutionMeteringParameters); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(protocol.ExecutionMeteringParameters) + } + + if rf, ok := ret.Get(1).(func() error); ok { + r1 = rf() + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetExecutionMeteringParametersUpgrade provides a mock function with no fields +func (_m *KVStoreAPI) GetExecutionMeteringParametersUpgrade() *protocol.ViewBasedActivator[protocol.ExecutionMeteringParameters] { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for GetExecutionMeteringParametersUpgrade") + } + + var r0 *protocol.ViewBasedActivator[protocol.ExecutionMeteringParameters] + if rf, ok := ret.Get(0).(func() *protocol.ViewBasedActivator[protocol.ExecutionMeteringParameters]); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*protocol.ViewBasedActivator[protocol.ExecutionMeteringParameters]) + } + } + + return r0 +} + +// GetFinalizationSafetyThreshold provides a mock function with no fields +func (_m *KVStoreAPI) GetFinalizationSafetyThreshold() uint64 { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for GetFinalizationSafetyThreshold") + } + + var r0 uint64 + if rf, ok := ret.Get(0).(func() uint64); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(uint64) + } + + return r0 +} + +// GetProtocolStateVersion provides a mock function with no fields +func (_m *KVStoreAPI) GetProtocolStateVersion() uint64 { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for GetProtocolStateVersion") + } + + var r0 uint64 + if rf, ok := ret.Get(0).(func() uint64); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(uint64) + } + + return r0 +} + +// GetVersionUpgrade provides a mock function with no fields +func (_m *KVStoreAPI) GetVersionUpgrade() *protocol.ViewBasedActivator[uint64] { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for GetVersionUpgrade") + } + + var r0 *protocol.ViewBasedActivator[uint64] + if rf, ok := ret.Get(0).(func() *protocol.ViewBasedActivator[uint64]); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*protocol.ViewBasedActivator[uint64]) + } + } + + return r0 +} + +// ID provides a mock function with no fields +func (_m *KVStoreAPI) ID() flow.Identifier { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for ID") + } + + var r0 flow.Identifier + if rf, ok := ret.Get(0).(func() flow.Identifier); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(flow.Identifier) + } + } + + return r0 +} + +// Replicate provides a mock function with given fields: protocolVersion +func (_m *KVStoreAPI) Replicate(protocolVersion uint64) (protocol_state.KVStoreMutator, error) { + ret := _m.Called(protocolVersion) + + if len(ret) == 0 { + panic("no return value specified for Replicate") + } + + var r0 protocol_state.KVStoreMutator + var r1 error + if rf, ok := ret.Get(0).(func(uint64) (protocol_state.KVStoreMutator, error)); ok { + return rf(protocolVersion) + } + if rf, ok := ret.Get(0).(func(uint64) protocol_state.KVStoreMutator); ok { + r0 = rf(protocolVersion) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(protocol_state.KVStoreMutator) + } + } + + if rf, ok := ret.Get(1).(func(uint64) error); ok { + r1 = rf(protocolVersion) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// VersionedEncode provides a mock function with no fields +func (_m *KVStoreAPI) VersionedEncode() (uint64, []byte, error) { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for VersionedEncode") + } + + var r0 uint64 + var r1 []byte + var r2 error + if rf, ok := ret.Get(0).(func() (uint64, []byte, error)); ok { + return rf() + } + if rf, ok := ret.Get(0).(func() uint64); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(uint64) + } + + if rf, ok := ret.Get(1).(func() []byte); ok { + r1 = rf() + } else { + if ret.Get(1) != nil { + r1 = ret.Get(1).([]byte) + } + } + + if rf, ok := ret.Get(2).(func() error); ok { + r2 = rf() + } else { + r2 = ret.Error(2) + } + + return r0, r1, r2 +} + +// NewKVStoreAPI creates a new instance of KVStoreAPI. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewKVStoreAPI(t interface { + mock.TestingT + Cleanup(func()) +}) *KVStoreAPI { + mock := &KVStoreAPI{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/state/protocol/protocol_state/mock/kv_store_mutator.go b/state/protocol/protocol_state/mock/kv_store_mutator.go new file mode 100644 index 00000000000..82226218dfb --- /dev/null +++ b/state/protocol/protocol_state/mock/kv_store_mutator.go @@ -0,0 +1,352 @@ +// Code generated by mockery. DO NOT EDIT. + +package mock + +import ( + flow "github.com/onflow/flow-go/model/flow" + mock "github.com/stretchr/testify/mock" + + protocol "github.com/onflow/flow-go/state/protocol" +) + +// KVStoreMutator is an autogenerated mock type for the KVStoreMutator type +type KVStoreMutator struct { + mock.Mock +} + +// GetCadenceComponentVersion provides a mock function with no fields +func (_m *KVStoreMutator) GetCadenceComponentVersion() (protocol.MagnitudeVersion, error) { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for GetCadenceComponentVersion") + } + + var r0 protocol.MagnitudeVersion + var r1 error + if rf, ok := ret.Get(0).(func() (protocol.MagnitudeVersion, error)); ok { + return rf() + } + if rf, ok := ret.Get(0).(func() protocol.MagnitudeVersion); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(protocol.MagnitudeVersion) + } + + if rf, ok := ret.Get(1).(func() error); ok { + r1 = rf() + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetCadenceComponentVersionUpgrade provides a mock function with no fields +func (_m *KVStoreMutator) GetCadenceComponentVersionUpgrade() *protocol.ViewBasedActivator[protocol.MagnitudeVersion] { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for GetCadenceComponentVersionUpgrade") + } + + var r0 *protocol.ViewBasedActivator[protocol.MagnitudeVersion] + if rf, ok := ret.Get(0).(func() *protocol.ViewBasedActivator[protocol.MagnitudeVersion]); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*protocol.ViewBasedActivator[protocol.MagnitudeVersion]) + } + } + + return r0 +} + +// GetEpochExtensionViewCount provides a mock function with no fields +func (_m *KVStoreMutator) GetEpochExtensionViewCount() uint64 { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for GetEpochExtensionViewCount") + } + + var r0 uint64 + if rf, ok := ret.Get(0).(func() uint64); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(uint64) + } + + return r0 +} + +// GetEpochStateID provides a mock function with no fields +func (_m *KVStoreMutator) GetEpochStateID() flow.Identifier { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for GetEpochStateID") + } + + var r0 flow.Identifier + if rf, ok := ret.Get(0).(func() flow.Identifier); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(flow.Identifier) + } + } + + return r0 +} + +// GetExecutionComponentVersion provides a mock function with no fields +func (_m *KVStoreMutator) GetExecutionComponentVersion() (protocol.MagnitudeVersion, error) { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for GetExecutionComponentVersion") + } + + var r0 protocol.MagnitudeVersion + var r1 error + if rf, ok := ret.Get(0).(func() (protocol.MagnitudeVersion, error)); ok { + return rf() + } + if rf, ok := ret.Get(0).(func() protocol.MagnitudeVersion); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(protocol.MagnitudeVersion) + } + + if rf, ok := ret.Get(1).(func() error); ok { + r1 = rf() + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetExecutionComponentVersionUpgrade provides a mock function with no fields +func (_m *KVStoreMutator) GetExecutionComponentVersionUpgrade() *protocol.ViewBasedActivator[protocol.MagnitudeVersion] { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for GetExecutionComponentVersionUpgrade") + } + + var r0 *protocol.ViewBasedActivator[protocol.MagnitudeVersion] + if rf, ok := ret.Get(0).(func() *protocol.ViewBasedActivator[protocol.MagnitudeVersion]); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*protocol.ViewBasedActivator[protocol.MagnitudeVersion]) + } + } + + return r0 +} + +// GetExecutionMeteringParameters provides a mock function with no fields +func (_m *KVStoreMutator) GetExecutionMeteringParameters() (protocol.ExecutionMeteringParameters, error) { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for GetExecutionMeteringParameters") + } + + var r0 protocol.ExecutionMeteringParameters + var r1 error + if rf, ok := ret.Get(0).(func() (protocol.ExecutionMeteringParameters, error)); ok { + return rf() + } + if rf, ok := ret.Get(0).(func() protocol.ExecutionMeteringParameters); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(protocol.ExecutionMeteringParameters) + } + + if rf, ok := ret.Get(1).(func() error); ok { + r1 = rf() + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetExecutionMeteringParametersUpgrade provides a mock function with no fields +func (_m *KVStoreMutator) GetExecutionMeteringParametersUpgrade() *protocol.ViewBasedActivator[protocol.ExecutionMeteringParameters] { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for GetExecutionMeteringParametersUpgrade") + } + + var r0 *protocol.ViewBasedActivator[protocol.ExecutionMeteringParameters] + if rf, ok := ret.Get(0).(func() *protocol.ViewBasedActivator[protocol.ExecutionMeteringParameters]); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*protocol.ViewBasedActivator[protocol.ExecutionMeteringParameters]) + } + } + + return r0 +} + +// GetFinalizationSafetyThreshold provides a mock function with no fields +func (_m *KVStoreMutator) GetFinalizationSafetyThreshold() uint64 { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for GetFinalizationSafetyThreshold") + } + + var r0 uint64 + if rf, ok := ret.Get(0).(func() uint64); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(uint64) + } + + return r0 +} + +// GetProtocolStateVersion provides a mock function with no fields +func (_m *KVStoreMutator) GetProtocolStateVersion() uint64 { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for GetProtocolStateVersion") + } + + var r0 uint64 + if rf, ok := ret.Get(0).(func() uint64); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(uint64) + } + + return r0 +} + +// GetVersionUpgrade provides a mock function with no fields +func (_m *KVStoreMutator) GetVersionUpgrade() *protocol.ViewBasedActivator[uint64] { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for GetVersionUpgrade") + } + + var r0 *protocol.ViewBasedActivator[uint64] + if rf, ok := ret.Get(0).(func() *protocol.ViewBasedActivator[uint64]); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*protocol.ViewBasedActivator[uint64]) + } + } + + return r0 +} + +// ID provides a mock function with no fields +func (_m *KVStoreMutator) ID() flow.Identifier { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for ID") + } + + var r0 flow.Identifier + if rf, ok := ret.Get(0).(func() flow.Identifier); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(flow.Identifier) + } + } + + return r0 +} + +// SetEpochExtensionViewCount provides a mock function with given fields: viewCount +func (_m *KVStoreMutator) SetEpochExtensionViewCount(viewCount uint64) error { + ret := _m.Called(viewCount) + + if len(ret) == 0 { + panic("no return value specified for SetEpochExtensionViewCount") + } + + var r0 error + if rf, ok := ret.Get(0).(func(uint64) error); ok { + r0 = rf(viewCount) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// SetEpochStateID provides a mock function with given fields: stateID +func (_m *KVStoreMutator) SetEpochStateID(stateID flow.Identifier) { + _m.Called(stateID) +} + +// SetVersionUpgrade provides a mock function with given fields: version +func (_m *KVStoreMutator) SetVersionUpgrade(version *protocol.ViewBasedActivator[uint64]) { + _m.Called(version) +} + +// VersionedEncode provides a mock function with no fields +func (_m *KVStoreMutator) VersionedEncode() (uint64, []byte, error) { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for VersionedEncode") + } + + var r0 uint64 + var r1 []byte + var r2 error + if rf, ok := ret.Get(0).(func() (uint64, []byte, error)); ok { + return rf() + } + if rf, ok := ret.Get(0).(func() uint64); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(uint64) + } + + if rf, ok := ret.Get(1).(func() []byte); ok { + r1 = rf() + } else { + if ret.Get(1) != nil { + r1 = ret.Get(1).([]byte) + } + } + + if rf, ok := ret.Get(2).(func() error); ok { + r2 = rf() + } else { + r2 = ret.Error(2) + } + + return r0, r1, r2 +} + +// NewKVStoreMutator creates a new instance of KVStoreMutator. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewKVStoreMutator(t interface { + mock.TestingT + Cleanup(func()) +}) *KVStoreMutator { + mock := &KVStoreMutator{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/state/protocol/protocol_state/mock/orthogonal_store_state_machine.go b/state/protocol/protocol_state/mock/orthogonal_store_state_machine.go new file mode 100644 index 00000000000..171d2e55bce --- /dev/null +++ b/state/protocol/protocol_state/mock/orthogonal_store_state_machine.go @@ -0,0 +1,115 @@ +// Code generated by mockery. DO NOT EDIT. + +package mock + +import ( + flow "github.com/onflow/flow-go/model/flow" + deferred "github.com/onflow/flow-go/storage/deferred" + + mock "github.com/stretchr/testify/mock" +) + +// OrthogonalStoreStateMachine is an autogenerated mock type for the OrthogonalStoreStateMachine type +type OrthogonalStoreStateMachine[P any] struct { + mock.Mock +} + +// Build provides a mock function with no fields +func (_m *OrthogonalStoreStateMachine[P]) Build() (*deferred.DeferredBlockPersist, error) { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Build") + } + + var r0 *deferred.DeferredBlockPersist + var r1 error + if rf, ok := ret.Get(0).(func() (*deferred.DeferredBlockPersist, error)); ok { + return rf() + } + if rf, ok := ret.Get(0).(func() *deferred.DeferredBlockPersist); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*deferred.DeferredBlockPersist) + } + } + + if rf, ok := ret.Get(1).(func() error); ok { + r1 = rf() + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// EvolveState provides a mock function with given fields: sealedServiceEvents +func (_m *OrthogonalStoreStateMachine[P]) EvolveState(sealedServiceEvents []flow.ServiceEvent) error { + ret := _m.Called(sealedServiceEvents) + + if len(ret) == 0 { + panic("no return value specified for EvolveState") + } + + var r0 error + if rf, ok := ret.Get(0).(func([]flow.ServiceEvent) error); ok { + r0 = rf(sealedServiceEvents) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// ParentState provides a mock function with no fields +func (_m *OrthogonalStoreStateMachine[P]) ParentState() P { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for ParentState") + } + + var r0 P + if rf, ok := ret.Get(0).(func() P); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(P) + } + } + + return r0 +} + +// View provides a mock function with no fields +func (_m *OrthogonalStoreStateMachine[P]) View() uint64 { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for View") + } + + var r0 uint64 + if rf, ok := ret.Get(0).(func() uint64); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(uint64) + } + + return r0 +} + +// NewOrthogonalStoreStateMachine creates a new instance of OrthogonalStoreStateMachine. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewOrthogonalStoreStateMachine[P any](t interface { + mock.TestingT + Cleanup(func()) +}) *OrthogonalStoreStateMachine[P] { + mock := &OrthogonalStoreStateMachine[P]{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/state/protocol/protocol_state/mock/protocol_kv_store.go b/state/protocol/protocol_state/mock/protocol_kv_store.go new file mode 100644 index 00000000000..165100b9107 --- /dev/null +++ b/state/protocol/protocol_state/mock/protocol_kv_store.go @@ -0,0 +1,131 @@ +// Code generated by mockery. DO NOT EDIT. + +package mock + +import ( + lockctx "github.com/jordanschalm/lockctx" + flow "github.com/onflow/flow-go/model/flow" + + mock "github.com/stretchr/testify/mock" + + protocol "github.com/onflow/flow-go/state/protocol" + + protocol_state "github.com/onflow/flow-go/state/protocol/protocol_state" + + storage "github.com/onflow/flow-go/storage" +) + +// ProtocolKVStore is an autogenerated mock type for the ProtocolKVStore type +type ProtocolKVStore struct { + mock.Mock +} + +// BatchIndex provides a mock function with given fields: lctx, rw, blockID, stateID +func (_m *ProtocolKVStore) BatchIndex(lctx lockctx.Proof, rw storage.ReaderBatchWriter, blockID flow.Identifier, stateID flow.Identifier) error { + ret := _m.Called(lctx, rw, blockID, stateID) + + if len(ret) == 0 { + panic("no return value specified for BatchIndex") + } + + var r0 error + if rf, ok := ret.Get(0).(func(lockctx.Proof, storage.ReaderBatchWriter, flow.Identifier, flow.Identifier) error); ok { + r0 = rf(lctx, rw, blockID, stateID) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// BatchStore provides a mock function with given fields: rw, stateID, kvStore +func (_m *ProtocolKVStore) BatchStore(rw storage.ReaderBatchWriter, stateID flow.Identifier, kvStore protocol.KVStoreReader) error { + ret := _m.Called(rw, stateID, kvStore) + + if len(ret) == 0 { + panic("no return value specified for BatchStore") + } + + var r0 error + if rf, ok := ret.Get(0).(func(storage.ReaderBatchWriter, flow.Identifier, protocol.KVStoreReader) error); ok { + r0 = rf(rw, stateID, kvStore) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// ByBlockID provides a mock function with given fields: blockID +func (_m *ProtocolKVStore) ByBlockID(blockID flow.Identifier) (protocol_state.KVStoreAPI, error) { + ret := _m.Called(blockID) + + if len(ret) == 0 { + panic("no return value specified for ByBlockID") + } + + var r0 protocol_state.KVStoreAPI + var r1 error + if rf, ok := ret.Get(0).(func(flow.Identifier) (protocol_state.KVStoreAPI, error)); ok { + return rf(blockID) + } + if rf, ok := ret.Get(0).(func(flow.Identifier) protocol_state.KVStoreAPI); ok { + r0 = rf(blockID) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(protocol_state.KVStoreAPI) + } + } + + if rf, ok := ret.Get(1).(func(flow.Identifier) error); ok { + r1 = rf(blockID) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ByID provides a mock function with given fields: id +func (_m *ProtocolKVStore) ByID(id flow.Identifier) (protocol_state.KVStoreAPI, error) { + ret := _m.Called(id) + + if len(ret) == 0 { + panic("no return value specified for ByID") + } + + var r0 protocol_state.KVStoreAPI + var r1 error + if rf, ok := ret.Get(0).(func(flow.Identifier) (protocol_state.KVStoreAPI, error)); ok { + return rf(id) + } + if rf, ok := ret.Get(0).(func(flow.Identifier) protocol_state.KVStoreAPI); ok { + r0 = rf(id) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(protocol_state.KVStoreAPI) + } + } + + if rf, ok := ret.Get(1).(func(flow.Identifier) error); ok { + r1 = rf(id) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// NewProtocolKVStore creates a new instance of ProtocolKVStore. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewProtocolKVStore(t interface { + mock.TestingT + Cleanup(func()) +}) *ProtocolKVStore { + mock := &ProtocolKVStore{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/state/protocol/protocol_state/mock/state_machine_events_telemetry_factory.go b/state/protocol/protocol_state/mock/state_machine_events_telemetry_factory.go new file mode 100644 index 00000000000..315450d6f5d --- /dev/null +++ b/state/protocol/protocol_state/mock/state_machine_events_telemetry_factory.go @@ -0,0 +1,48 @@ +// Code generated by mockery. DO NOT EDIT. + +package mock + +import ( + mock "github.com/stretchr/testify/mock" + + protocol_state "github.com/onflow/flow-go/state/protocol/protocol_state" +) + +// StateMachineEventsTelemetryFactory is an autogenerated mock type for the StateMachineEventsTelemetryFactory type +type StateMachineEventsTelemetryFactory struct { + mock.Mock +} + +// Execute provides a mock function with given fields: candidateView +func (_m *StateMachineEventsTelemetryFactory) Execute(candidateView uint64) protocol_state.StateMachineTelemetryConsumer { + ret := _m.Called(candidateView) + + if len(ret) == 0 { + panic("no return value specified for Execute") + } + + var r0 protocol_state.StateMachineTelemetryConsumer + if rf, ok := ret.Get(0).(func(uint64) protocol_state.StateMachineTelemetryConsumer); ok { + r0 = rf(candidateView) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(protocol_state.StateMachineTelemetryConsumer) + } + } + + return r0 +} + +// NewStateMachineEventsTelemetryFactory creates a new instance of StateMachineEventsTelemetryFactory. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewStateMachineEventsTelemetryFactory(t interface { + mock.TestingT + Cleanup(func()) +}) *StateMachineEventsTelemetryFactory { + mock := &StateMachineEventsTelemetryFactory{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/state/protocol/protocol_state/mock/state_machine_telemetry_consumer.go b/state/protocol/protocol_state/mock/state_machine_telemetry_consumer.go new file mode 100644 index 00000000000..79a59c3c643 --- /dev/null +++ b/state/protocol/protocol_state/mock/state_machine_telemetry_consumer.go @@ -0,0 +1,42 @@ +// Code generated by mockery. DO NOT EDIT. + +package mock + +import ( + flow "github.com/onflow/flow-go/model/flow" + mock "github.com/stretchr/testify/mock" +) + +// StateMachineTelemetryConsumer is an autogenerated mock type for the StateMachineTelemetryConsumer type +type StateMachineTelemetryConsumer struct { + mock.Mock +} + +// OnInvalidServiceEvent provides a mock function with given fields: event, err +func (_m *StateMachineTelemetryConsumer) OnInvalidServiceEvent(event flow.ServiceEvent, err error) { + _m.Called(event, err) +} + +// OnServiceEventProcessed provides a mock function with given fields: event +func (_m *StateMachineTelemetryConsumer) OnServiceEventProcessed(event flow.ServiceEvent) { + _m.Called(event) +} + +// OnServiceEventReceived provides a mock function with given fields: event +func (_m *StateMachineTelemetryConsumer) OnServiceEventReceived(event flow.ServiceEvent) { + _m.Called(event) +} + +// NewStateMachineTelemetryConsumer creates a new instance of StateMachineTelemetryConsumer. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewStateMachineTelemetryConsumer(t interface { + mock.TestingT + Cleanup(func()) +}) *StateMachineTelemetryConsumer { + mock := &StateMachineTelemetryConsumer{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/state/protocol/protocol_state/mock_interfaces/state_machine_events_telemetry_factory.go b/state/protocol/protocol_state/mock_interfaces/state_machine_events_telemetry_factory.go new file mode 100644 index 00000000000..3c7604236ae --- /dev/null +++ b/state/protocol/protocol_state/mock_interfaces/state_machine_events_telemetry_factory.go @@ -0,0 +1,8 @@ +package mockinterfaces + +import "github.com/onflow/flow-go/state/protocol/protocol_state" + +// ExecForkActor allows to create a mock for the ExecForkActor callback +type StateMachineEventsTelemetryFactory interface { + Execute(candidateView uint64) protocol_state.StateMachineTelemetryConsumer +} diff --git a/state/protocol/protocol_state/pubsub/log_consumer.go b/state/protocol/protocol_state/pubsub/log_consumer.go new file mode 100644 index 00000000000..ae37bcddd05 --- /dev/null +++ b/state/protocol/protocol_state/pubsub/log_consumer.go @@ -0,0 +1,42 @@ +package pubsub + +import ( + "github.com/rs/zerolog" + + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/state/protocol/protocol_state" + "github.com/onflow/flow-go/utils/logging" +) + +// LogConsumer implements protocol_state.StateMachineTelemetryConsumer and logs all events. +type LogConsumer struct { + log zerolog.Logger +} + +var _ protocol_state.StateMachineTelemetryConsumer = (*LogConsumer)(nil) + +func NewLogConsumer(log zerolog.Logger) *LogConsumer { + lc := &LogConsumer{ + log: log, + } + return lc +} + +func (l *LogConsumer) OnInvalidServiceEvent(event flow.ServiceEvent, err error) { + l.log.Warn(). + Str(logging.KeySuspicious, "true"). + Str("type", event.Type.String()). + Msgf("invalid service event detected: %s", err.Error()) +} + +func (l *LogConsumer) OnServiceEventReceived(event flow.ServiceEvent) { + l.log.Info(). + Str("type", event.Type.String()). + Msg("received service event") +} + +func (l *LogConsumer) OnServiceEventProcessed(event flow.ServiceEvent) { + l.log.Info(). + Str("type", event.Type.String()). + Msg("successfully processed service event") +} diff --git a/state/protocol/protocol_state/state/mutable_protocol_state_test.go b/state/protocol/protocol_state/state/mutable_protocol_state_test.go new file mode 100644 index 00000000000..d2f0f3709ea --- /dev/null +++ b/state/protocol/protocol_state/state/mutable_protocol_state_test.go @@ -0,0 +1,654 @@ +package state + +import ( + "errors" + "testing" + + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" + "golang.org/x/exp/slices" + + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module/irrecoverable" + "github.com/onflow/flow-go/state/protocol" + psmock "github.com/onflow/flow-go/state/protocol/mock" + "github.com/onflow/flow-go/state/protocol/protocol_state" + protocol_statemock "github.com/onflow/flow-go/state/protocol/protocol_state/mock" + "github.com/onflow/flow-go/storage" + "github.com/onflow/flow-go/storage/deferred" + storagemock "github.com/onflow/flow-go/storage/mock" + "github.com/onflow/flow-go/utils/unittest" +) + +func TestProtocolStateMutator(t *testing.T) { + suite.Run(t, new(StateMutatorSuite)) +} + +// StateMutatorSuite is a test suite for the MutableProtocolState, it holds the minimum mocked state to set up a tested instance. +// Tests in this suite are designed to rely on automatic assertions when leaving the scope of the test. +type StateMutatorSuite struct { + suite.Suite + + // sub-components injected into MutableProtocolState + headersDB storagemock.Headers + resultsDB storagemock.ExecutionResults + protocolKVStoreDB protocol_statemock.ProtocolKVStore + epochProtocolStateDB storagemock.EpochProtocolStateEntries + globalParams psmock.GlobalParams + kvStateMachines []protocol_statemock.OrthogonalStoreStateMachine[protocol.KVStoreReader] + kvStateMachineFactories []protocol_statemock.KeyValueStoreStateMachineFactory + + // basic setup for happy path test + parentState protocol_statemock.KVStoreAPI // Protocol state of `candidate`s parent block + candidate flow.Header // candidate block, potentially still under construction + evolvingState protocol_statemock.KVStoreMutator + latestProtocolVersion uint64 + + mutableState *MutableProtocolState +} + +func (s *StateMutatorSuite) SetupTest() { + s.epochProtocolStateDB = *storagemock.NewEpochProtocolStateEntries(s.T()) + s.protocolKVStoreDB = *protocol_statemock.NewProtocolKVStore(s.T()) + s.globalParams = *psmock.NewGlobalParams(s.T()) + s.headersDB = *storagemock.NewHeaders(s.T()) + s.resultsDB = *storagemock.NewExecutionResults(s.T()) + + // Basic happy-path test scenario: + // - candidate block: + s.latestProtocolVersion = 1 + s.candidate = *unittest.BlockHeaderFixture(unittest.HeaderWithView(1000)) + + // - protocol state as of `candidate`s parent + s.parentState = *protocol_statemock.NewKVStoreAPI(s.T()) + s.protocolKVStoreDB.On("ByBlockID", s.candidate.ParentID).Return(&s.parentState, nil) + s.parentState.On("GetProtocolStateVersion").Return(s.latestProtocolVersion) + s.parentState.On("GetVersionUpgrade").Return(nil) // no version upgrade by default + s.parentState.On("ID").Return(unittest.IdentifierFixture(), nil) + s.parentState.On("Replicate", s.latestProtocolVersion).Return(&s.evolvingState, nil) + + // state replicated from the parent state; by default exactly the same as the parent state + // CAUTION: ID of evolving state must be defined by the tests. + s.evolvingState = *protocol_statemock.NewKVStoreMutator(s.T()) + + // Factories for the state machines expect `s.parentState` as parent state and `s.replicatedState` as target state. + // CAUTION: the behaviour of each state machine has to be defined by the tests. + s.kvStateMachines = make([]protocol_statemock.OrthogonalStoreStateMachine[protocol.KVStoreReader], 2) + s.kvStateMachineFactories = make([]protocol_statemock.KeyValueStoreStateMachineFactory, len(s.kvStateMachines)) + kvStateMachineFactories := make([]protocol_state.KeyValueStoreStateMachineFactory, len(s.kvStateMachines)) // slice of interface-typed pointers to the elements of s.kvStateMachineFactories + for i := range s.kvStateMachines { + s.kvStateMachineFactories[i] = *protocol_statemock.NewKeyValueStoreStateMachineFactory(s.T()) + s.kvStateMachineFactories[i].On("Create", s.candidate.View, s.candidate.ParentID, &s.parentState, &s.evolvingState).Return(&s.kvStateMachines[i], nil) + kvStateMachineFactories[i] = &s.kvStateMachineFactories[i] + } + + s.mutableState = newMutableProtocolState( + &s.epochProtocolStateDB, + &s.protocolKVStoreDB, + &s.globalParams, + &s.headersDB, + &s.resultsDB, + kvStateMachineFactories, + ) +} + +// testEvolveState is the main logic for testing `EvolveState`. Specifically, we test: +// - we _always_ require a deferred db update that indexes the protocol state by the candidate block's ID +// - we expect a deferred db update that persists the protocol state if and only if there was a state change compared to the parent protocol state +// +// Note that the `MutableProtocolState` bundles all deferred database updates into a `DeferredBlockPersist`. Conceptually, it is possible that +// the `MutableProtocolState` wraps the deferred database operations in faulty code, such that they are eventually not executed. Therefore, +// we explicitly test here whether the storage functors *generated* by `ProtocolKVStore.IndexTx` and “ProtocolKVStore.StoreTx` are +// actually called when executing the returned `DeferredBlockPersist` +func (s *StateMutatorSuite) testEvolveState(seals []*flow.Seal, expectedResultingStateID flow.Identifier, stateChangeExpected bool) { + // on the happy path, we _always_ require a deferred db update, which indexes the protocol state by the candidate block's ID + rw := storagemock.NewReaderBatchWriter(s.T()) + s.protocolKVStoreDB.On("BatchIndex", mock.Anything, rw, s.candidate.ID(), expectedResultingStateID).Return(nil).Once() + + // expect calls to prepare a deferred update for indexing and storing the resulting state: + // as state has not changed, we expect the parent blocks protocol state ID + if stateChangeExpected { + s.protocolKVStoreDB.On("BatchStore", rw, expectedResultingStateID, &s.evolvingState).Return(nil).Once() + } + + deferredDBOps := deferred.NewDeferredBlockPersist() + resultingStateID, err := s.mutableState.EvolveState(deferredDBOps, s.candidate.ParentID, s.candidate.View, seals) + require.NoError(s.T(), err) + require.Equal(s.T(), expectedResultingStateID, resultingStateID) + + // Provide the blockID and execute the resulting `DeferredDBUpdate`. Thereby, + // the expected mock methods should be called, which is asserted by the testify framework + blockID := s.candidate.ID() + err = deferredDBOps.Execute(nil, blockID, rw) + require.NoError(s.T(), err) + + // The testify framework calls `AssertExpectations` on all mocks when the test finishes. However, note that we are calling + // `testEvolveState` repeatedly across multiple sub-tests, which re-use mocks from the `StateMutatorSuite`. Therefore, by + // default testify would only enforce that the expected mock calls happened in all sub-tests combined, but not specifically + // in the sub-test where we expect them to. To avoid any problems, we call `AssertExpectations` below to enforce the expected + // mock calls happened that `testEvolveState` added. + s.protocolKVStoreDB.AssertExpectations(s.T()) + rw.AssertExpectations(s.T()) +} + +// Test_HappyPath_StateInvariant tests that `MutableProtocolState.EvolveState` returns all updates from sub-state state machines and +// prepares updates to the KV store, when building protocol state. Here, we focus on the path, where the *state remains invariant*. +func (s *StateMutatorSuite) Test_HappyPath_StateInvariant() { + parentProtocolStateID := s.parentState.ID() + s.evolvingState.On("ID").Return(parentProtocolStateID, nil) + + s.Run("nil seals slice, hence no service events", func() { + for i := range s.kvStateMachines { + s.kvStateMachines[i] = s.mockStateTransition().ServiceEventsMatch(emptySlice[flow.ServiceEvent]()).Mock() + } + + s.testEvolveState(nil, parentProtocolStateID, false) + }) + + s.Run("empty seals slice, hence no service events", func() { + for i := range s.kvStateMachines { + s.kvStateMachines[i] = s.mockStateTransition().ServiceEventsMatch(emptySlice[flow.ServiceEvent]()).Mock() + } + + s.testEvolveState([]*flow.Seal{}, parentProtocolStateID, false) + }) + + s.Run("seals without service events", func() { + sealedBlock := unittest.BlockHeaderFixture(unittest.WithHeaderHeight(s.candidate.View - 10)) + sealedResult := unittest.ExecutionResultFixture() + seal := unittest.Seal.Fixture(unittest.Seal.WithBlockID(sealedBlock.ID())) + s.headersDB.On("ByBlockID", seal.BlockID).Return(sealedBlock, nil) + s.resultsDB.On("ByID", seal.ResultID).Return(sealedResult, nil) + + for i := range s.kvStateMachines { + s.kvStateMachines[i] = s.mockStateTransition().ServiceEventsMatch(emptySlice[flow.ServiceEvent]()).Mock() + } + + s.testEvolveState([]*flow.Seal{seal}, parentProtocolStateID, false) + }) + + s.Run("seals with service events", func() { + sealedBlock := unittest.BlockHeaderFixture(unittest.WithHeaderHeight(s.candidate.View - 10)) + serviceEvents := []flow.ServiceEvent{unittest.EpochSetupFixture().ServiceEvent()} + sealedResult := unittest.ExecutionResultFixture(func(result *flow.ExecutionResult) { + result.ServiceEvents = serviceEvents + }) + seal := unittest.Seal.Fixture(unittest.Seal.WithBlockID(sealedBlock.ID())) + s.headersDB.On("ByBlockID", seal.BlockID).Return(sealedBlock, nil) + s.resultsDB.On("ByID", seal.ResultID).Return(sealedResult, nil) + + for i := range s.kvStateMachines { + s.kvStateMachines[i] = s.mockStateTransition().ExpectedServiceEvents(serviceEvents).Mock() + } + + s.testEvolveState([]*flow.Seal{seal}, parentProtocolStateID, false) + }) +} + +// Test_HappyPath_StateChange tests that `MutableProtocolState.EvolveState` returns all updates from sub-state state machines and +// prepares updates to the KV store, when building protocol state. Here, we focus on the path, where the *state is modified*. +// +// All mocked state machines return a single deferred db update that will be subsequently returned and executed. +// We also expect that the resulting state will be indexed but *not* stored in the protocol KV store (as there are no changes). To +// assert that, we mock the corresponding storage methods and expect them to be called when applying deferred updates in caller code. +func (s *StateMutatorSuite) Test_HappyPath_StateChange() { + s.Run("nil seals slice, hence no service events", func() { + expectedResultingStateID := unittest.IdentifierFixture() + modifyState := func(_ mock.Arguments) { + s.evolvingState.On("ID").Return(expectedResultingStateID, nil).Once() + } + s.kvStateMachines[0] = s.mockStateTransition().ServiceEventsMatch(emptySlice[flow.ServiceEvent]()).DuringEvolveState(modifyState).Mock() + s.kvStateMachines[1] = s.mockStateTransition().ServiceEventsMatch(emptySlice[flow.ServiceEvent]()).Mock() + + s.testEvolveState(nil, expectedResultingStateID, true) + }) + + s.Run("empty seals slice, hence no service events", func() { + expectedResultingStateID := unittest.IdentifierFixture() + modifyState := func(_ mock.Arguments) { + s.evolvingState.On("ID").Return(expectedResultingStateID, nil).Once() + } + s.kvStateMachines[0] = s.mockStateTransition().ServiceEventsMatch(emptySlice[flow.ServiceEvent]()).DuringEvolveState(modifyState).Mock() + s.kvStateMachines[1] = s.mockStateTransition().ServiceEventsMatch(emptySlice[flow.ServiceEvent]()).Mock() + + s.testEvolveState([]*flow.Seal{}, expectedResultingStateID, true) + }) + + s.Run("seals without service events", func() { + sealedBlock := unittest.BlockHeaderFixture(unittest.WithHeaderHeight(s.candidate.View - 10)) + sealedResult := unittest.ExecutionResultFixture() + seal := unittest.Seal.Fixture(unittest.Seal.WithBlockID(sealedBlock.ID())) + s.headersDB.On("ByBlockID", seal.BlockID).Return(sealedBlock, nil) + s.resultsDB.On("ByID", seal.ResultID).Return(sealedResult, nil) + + expectedResultingStateID := unittest.IdentifierFixture() + modifyState := func(_ mock.Arguments) { + s.evolvingState.On("ID").Return(expectedResultingStateID, nil).Once() + } + s.kvStateMachines[0] = s.mockStateTransition().ServiceEventsMatch(emptySlice[flow.ServiceEvent]()).DuringEvolveState(modifyState).Mock() + s.kvStateMachines[1] = s.mockStateTransition().ServiceEventsMatch(emptySlice[flow.ServiceEvent]()).Mock() + + s.testEvolveState([]*flow.Seal{seal}, expectedResultingStateID, true) + }) + + s.Run("seals with service events", func() { + sealedBlock := unittest.BlockHeaderFixture(unittest.WithHeaderHeight(s.candidate.View - 10)) + serviceEvents := []flow.ServiceEvent{unittest.EpochSetupFixture().ServiceEvent()} + sealedResult := unittest.ExecutionResultFixture(func(result *flow.ExecutionResult) { + result.ServiceEvents = serviceEvents + }) + seal := unittest.Seal.Fixture(unittest.Seal.WithBlockID(sealedBlock.ID())) + s.headersDB.On("ByBlockID", seal.BlockID).Return(sealedBlock, nil) + s.resultsDB.On("ByID", seal.ResultID).Return(sealedResult, nil) + + expectedResultingStateID := unittest.IdentifierFixture() + modifyState := func(_ mock.Arguments) { + s.evolvingState.On("ID").Return(expectedResultingStateID, nil).Once() + } + s.kvStateMachines[0] = s.mockStateTransition().ExpectedServiceEvents(serviceEvents).DuringEvolveState(modifyState).Mock() + s.kvStateMachines[1] = s.mockStateTransition().ExpectedServiceEvents(serviceEvents).Mock() + + s.testEvolveState([]*flow.Seal{seal}, expectedResultingStateID, true) + }) +} + +// Test_VersionUpgrade tests the behavior when a Version Upgrade is in the kv store. +// Note that the Version Upgrade was already applied if and only if the candidate block's +// view at least the activation view. +// - Check if there is a version upgrade available. +// - Replicate the parent state to the actual version. +// - Create a state machine for each sub-state of the Dynamic Protocol State. +func (s *StateMutatorSuite) Test_VersionUpgrade() { + parentStateID := unittest.IdentifierFixture() + + // The `ActivationView` for the upgrade is in the future of the candidate block. The MutableProtocolState + // should then replicate the parent state into data model of the same version + s.Run("upgrade at future view", func() { + newVersion := s.latestProtocolVersion + 1 + s.parentState = *protocol_statemock.NewKVStoreAPI(s.T()) + s.parentState.On("ID").Return(parentStateID, nil) + s.parentState.On("GetProtocolStateVersion").Return(s.latestProtocolVersion).Once() + s.parentState.On("GetVersionUpgrade").Return(&protocol.ViewBasedActivator[uint64]{ + Data: newVersion, + ActivationView: s.candidate.View + 1, + }).Once() + s.parentState.On("Replicate", s.latestProtocolVersion).Return(&s.evolvingState, nil) + s.evolvingState.On("ID").Return(parentStateID, nil).Once() + + for i := range s.kvStateMachines { + s.kvStateMachines[i] = s.mockStateTransition().ServiceEventsMatch(emptySlice[flow.ServiceEvent]()).Mock() + } + s.testEvolveState([]*flow.Seal{}, parentStateID, false) + }) + + // The `ActivationView` for the upgrade equals the candidate block's view. The MutableProtocolState + // should then replicate the parent state into the data model of the newer version. + s.Run("upgrade at candidate block view", func() { + newVersion := s.latestProtocolVersion + 1 + newStateID := unittest.IdentifierFixture() + s.parentState = *protocol_statemock.NewKVStoreAPI(s.T()) + s.parentState.On("ID").Return(parentStateID, nil) + s.parentState.On("GetProtocolStateVersion").Return(s.latestProtocolVersion).Once() + s.parentState.On("GetVersionUpgrade").Return(&protocol.ViewBasedActivator[uint64]{ + Data: newVersion, + ActivationView: s.candidate.View, + }).Once() + s.parentState.On("Replicate", newVersion).Return(&s.evolvingState, nil) + s.evolvingState.On("ID").Return(newStateID, nil).Once() + + for i := range s.kvStateMachines { + s.kvStateMachines[i] = s.mockStateTransition().ServiceEventsMatch(emptySlice[flow.ServiceEvent]()).Mock() + } + s.testEvolveState([]*flow.Seal{}, newStateID, true) + }) + + // The `ActivationView` for the upgrade is _smaller_ than candidate block's view but has not yet been applied. + // This happens, if there are no ancestors in this fork with views in [ActivationView, …, candidate.View-1] + // The MutableProtocolState should realize that it needs to apply the version upgrade new and replicate + // the parent state into the data model of the newer version. + s.Run("upgrade still pending with past ActivationView", func() { + newVersion := s.latestProtocolVersion + 1 + newStateID := unittest.IdentifierFixture() + s.parentState = *protocol_statemock.NewKVStoreAPI(s.T()) + s.parentState.On("ID").Return(parentStateID, nil) + s.parentState.On("GetProtocolStateVersion").Return(s.latestProtocolVersion).Once() + s.parentState.On("GetVersionUpgrade").Return(&protocol.ViewBasedActivator[uint64]{ + Data: newVersion, + ActivationView: s.candidate.View - 1, + }).Once() + s.parentState.On("Replicate", newVersion).Return(&s.evolvingState, nil) + s.evolvingState.On("ID").Return(newStateID, nil).Once() + + for i := range s.kvStateMachines { + s.kvStateMachines[i] = s.mockStateTransition().ServiceEventsMatch(emptySlice[flow.ServiceEvent]()).Mock() + } + s.testEvolveState([]*flow.Seal{}, newStateID, true) + }) + + // By convention, we leave Version Upgrades past their activation view in the Protocol State (so we can + // apply the upgrade, even if we don't produce blocks around the activation view). The MutableProtocolState + // should realize that the upgrade has already been applied and replicate the parent state into a data model + // of the same version. + s.Run("upgrade already done at earlier view", func() { + s.parentState = *protocol_statemock.NewKVStoreAPI(s.T()) + s.parentState.On("ID").Return(parentStateID, nil) + s.parentState.On("GetProtocolStateVersion").Return(s.latestProtocolVersion).Once() + s.parentState.On("GetVersionUpgrade").Return(&protocol.ViewBasedActivator[uint64]{ + Data: s.latestProtocolVersion, + ActivationView: s.candidate.View - 1, + }).Once() + s.parentState.On("Replicate", s.latestProtocolVersion).Return(&s.evolvingState, nil) + s.evolvingState.On("ID").Return(parentStateID, nil).Once() + + for i := range s.kvStateMachines { + s.kvStateMachines[i] = s.mockStateTransition().ServiceEventsMatch(emptySlice[flow.ServiceEvent]()).Mock() + } + s.testEvolveState([]*flow.Seal{}, parentStateID, false) + }) +} + +// Test_SealsOrdered verifies that `MutableProtocolState.EvolveState` processes service events from seals ordered by *increasing* block +// height, independently of the order the seals are provided with. Here, we explicitly provide seals in order of *decreasing* block height. +func (s *StateMutatorSuite) Test_SealsOrdered() { + // generate seals in order of *increasing* block height and store the resulting list of _ordered_ service events for reference in `orderedServiceEvents` + numberSeals := 7 + var seals []*flow.Seal + var orderedServiceEvents []flow.ServiceEvent + for i := 1; i <= numberSeals; i++ { + // create the seals in order of increasing block height: + sealedBlock := unittest.BlockHeaderFixture(unittest.WithHeaderHeight(s.candidate.View - 50 + uint64(i))) + sealedResult := unittest.ExecutionResultFixture( + unittest.WithExecutionResultBlockID(sealedBlock.ID()), + unittest.WithServiceEvents(i), + ) + seal := unittest.Seal.Fixture(unittest.Seal.WithBlockID(sealedBlock.ID()), unittest.Seal.WithResult(sealedResult)) + orderedServiceEvents = append(orderedServiceEvents, sealedResult.ServiceEvents...) + seals = append(seals, seal) + + s.headersDB.On("ByBlockID", sealedBlock.ID()).Return(sealedBlock, nil) + s.resultsDB.On("ByID", seal.ResultID).Return(sealedResult, nil) + } + slices.Reverse(seals) // revert order of seals, thereby they are listed in order of _decreasing_ block height + + s.Run("service events leave state invariant", func() { + parentProtocolStateID := s.parentState.ID() + s.evolvingState.On("ID").Return(parentProtocolStateID, nil).Once() + + s.kvStateMachines[0] = s.mockStateTransition().ExpectedServiceEvents(orderedServiceEvents).Mock() + s.kvStateMachines[1] = s.mockStateTransition().ExpectedServiceEvents(orderedServiceEvents).Mock() + + s.testEvolveState(seals, parentProtocolStateID, false) + }) + + s.Run("service events change state", func() { + expectedResultingStateID := unittest.IdentifierFixture() + modifyState := func(_ mock.Arguments) { + s.evolvingState.On("ID").Return(expectedResultingStateID, nil).Once() + } + s.kvStateMachines[0] = s.mockStateTransition().ExpectedServiceEvents(orderedServiceEvents).DuringEvolveState(modifyState).Mock() + s.kvStateMachines[1] = s.mockStateTransition().ExpectedServiceEvents(orderedServiceEvents).Mock() + + s.testEvolveState(seals, expectedResultingStateID, true) + }) + +} + +// Test_ParentNotFound checks the behaviour of `MutableProtocolState.EvolveState` when the specified parent block is not found. +func (s *StateMutatorSuite) Test_InvalidParent() { + unknownParent := unittest.IdentifierFixture() + s.protocolKVStoreDB.On("ByBlockID", unknownParent).Return(nil, storage.ErrNotFound) + + deferredDBOps := deferred.NewDeferredBlockPersist() + _, err := s.mutableState.EvolveState(deferredDBOps, unknownParent, s.candidate.View, []*flow.Seal{}) + require.Error(s.T(), err) + require.False(s.T(), protocol.IsInvalidServiceEventError(err)) + require.True(s.T(), deferredDBOps.IsEmpty()) +} + +// Test_ReplicateFails verifies that errors during the parent state replication are escalated to the caller. +// Because the failure is arising early, we don't expect calls to the state machine constructors to be called +// (they require a replica of the parent state as target, which is not available if replication fails) +func (s *StateMutatorSuite) Test_ReplicateFails() { + exception := errors.New("exception") + s.parentState = *protocol_statemock.NewKVStoreAPI(s.T()) + s.parentState.On("GetProtocolStateVersion").Return(s.latestProtocolVersion) + s.parentState.On("GetVersionUpgrade").Return(nil).Once() + s.parentState.On("Replicate", s.latestProtocolVersion).Return(nil, exception).Once() + + // `SetupTest` initializes the mock factories to expect to be called, so we overwrite the mocks here: + s.kvStateMachineFactories[0] = *protocol_statemock.NewKeyValueStoreStateMachineFactory(s.T()) + s.kvStateMachineFactories[1] = *protocol_statemock.NewKeyValueStoreStateMachineFactory(s.T()) + + deferredDBOps := deferred.NewDeferredBlockPersist() + _, err := s.mutableState.EvolveState(deferredDBOps, s.candidate.ParentID, s.candidate.View, []*flow.Seal{}) + require.ErrorIs(s.T(), err, exception) + require.True(s.T(), deferredDBOps.IsEmpty()) +} + +// Test_StateMachineFactoryFails verifies that errors received while creating the sub-state machines are escalated to the caller. +// Protocol Convention: +// - The Orthogonal Store State Machines have a 3-step process to evolve their respective sub-states: +// (i) construction (via the injected factories) +// (ii) processing ordered service events from sealed results (in `EvolveState` call) +// (iii) in the `Build` step, the state machine assembles their resulting sub-state and the corresponding database operations +// to index and persist its substate. +// - The protocol convention is that `MutableProtocolState` executes first step (i) on all state machines, then step (ii) and lastly (iii) +// +// This test also verifies that the `MutableProtocolState` does not engage in step (ii) or (iii) before the completing step (i) on all state machines. +func (s *StateMutatorSuite) Test_StateMachineFactoryFails() { + workingFactory := *protocol_statemock.NewKeyValueStoreStateMachineFactory(s.T()) + stateMachine := protocol_statemock.NewOrthogonalStoreStateMachine[protocol.KVStoreReader](s.T()) // we expect no methods to be called on this state machine + workingFactory.On("Create", s.candidate.View, s.candidate.ParentID, &s.parentState, &s.evolvingState).Return(stateMachine, nil).Maybe() + + exception := errors.New("exception") + failingFactory := *protocol_statemock.NewKeyValueStoreStateMachineFactory(s.T()) + failingFactory.On("Create", s.candidate.View, s.candidate.ParentID, &s.parentState, &s.evolvingState).Return(nil, exception).Once() + + s.Run("failing factory is last", func() { + s.kvStateMachineFactories[0], s.kvStateMachineFactories[1] = workingFactory, failingFactory //nolint:govet + + deferredDBOps := deferred.NewDeferredBlockPersist() + _, err := s.mutableState.EvolveState(deferredDBOps, s.candidate.ParentID, s.candidate.View, []*flow.Seal{}) + require.ErrorIs(s.T(), err, exception) + require.True(s.T(), deferredDBOps.IsEmpty()) + }) + + failingFactory.On("Create", s.candidate.View, s.candidate.ParentID, &s.parentState, &s.evolvingState).Return(nil, exception).Once() + s.Run("failing factory is first", func() { + s.kvStateMachineFactories[0], s.kvStateMachineFactories[1] = failingFactory, workingFactory //nolint:govet + deferredDBOps := deferred.NewDeferredBlockPersist() + _, err := s.mutableState.EvolveState(deferredDBOps, s.candidate.ParentID, s.candidate.View, []*flow.Seal{}) + require.ErrorIs(s.T(), err, exception) + require.True(s.T(), deferredDBOps.IsEmpty()) + }) +} + +// Test_StateMachineProcessingServiceEventsFails verifies that errors are escalated to the caller that any of the sub-state machines return +// when processing the service events. Specifically, the state machine should handle invalid events internally and only escalate internal +// errors in case of an irrecoverable problem. The MutableProtocolState should _not_ interpret errors from the sub-state machines as +// signs of invalid service events. +// Protocol Convention: +// - The Orthogonal Store State Machines have a 3-step process to evolve their respective sub-states: +// (i) construction (via the injected factories) +// (ii) processing ordered service events from sealed results (in `EvolveState` call) +// (iii) in the `Build` step, the state machine assembles their resulting sub-state and the corresponding database operations +// to index and persist its substate. +// - The protocol convention is that `MutableProtocolState` executes first step (i) on all state machines, then step (ii) and lastly (iii) +// +// This test also verifies that the `MutableProtocolState` does not engage in step (iii) before the completing step (ii) on all state machines. +func (s *StateMutatorSuite) Test_StateMachineProcessingServiceEventsFails() { + workingStateMachine := *protocol_statemock.NewOrthogonalStoreStateMachine[protocol.KVStoreReader](s.T()) + workingStateMachine.On("EvolveState", mock.MatchedBy(emptySlice[flow.ServiceEvent]())).Return(nil).Once() + + exception := errors.New("exception") + failingStateMachine := *protocol_statemock.NewOrthogonalStoreStateMachine[protocol.KVStoreReader](s.T()) + failingStateMachine.On("EvolveState", mock.MatchedBy(emptySlice[flow.ServiceEvent]())).Return(exception).Once() + + s.Run("failing state machine is last", func() { + s.kvStateMachines[0], s.kvStateMachines[1] = workingStateMachine, failingStateMachine //nolint:govet + deferredDBOps := deferred.NewDeferredBlockPersist() + _, err := s.mutableState.EvolveState(deferredDBOps, s.candidate.ParentID, s.candidate.View, []*flow.Seal{}) + require.ErrorIs(s.T(), err, exception) + require.False(s.T(), protocol.IsInvalidServiceEventError(err)) + require.True(s.T(), deferredDBOps.IsEmpty()) + }) + + failingStateMachine.On("EvolveState", mock.MatchedBy(emptySlice[flow.ServiceEvent]())).Return(exception).Once() + s.Run("failing state machine is first", func() { + s.kvStateMachines[0], s.kvStateMachines[1] = failingStateMachine, workingStateMachine //nolint:govet + deferredDBOps := deferred.NewDeferredBlockPersist() + _, err := s.mutableState.EvolveState(deferredDBOps, s.candidate.ParentID, s.candidate.View, []*flow.Seal{}) + require.ErrorIs(s.T(), err, exception) + require.False(s.T(), protocol.IsInvalidServiceEventError(err)) + require.True(s.T(), deferredDBOps.IsEmpty()) + }) +} + +// Test_StateMachineBuildFails verifies that errors are escalated to the caller that any of the sub-state machines return +// during their `Build` step. Specifically, the state machine should handle invalid events internally and only escalate internal +// errors in case of an irrecoverable problem. +func (s *StateMutatorSuite) Test_StateMachineBuildFails() { + workingStateMachine := *protocol_statemock.NewOrthogonalStoreStateMachine[protocol.KVStoreReader](s.T()) + workingStateMachine.On("EvolveState", mock.MatchedBy(emptySlice[flow.ServiceEvent]())).Return(nil).Twice() + workingStateMachine.On("Build").Return(deferred.NewDeferredBlockPersist(), nil).Maybe() + + exception := errors.New("exception") + failingStateMachine := *protocol_statemock.NewOrthogonalStoreStateMachine[protocol.KVStoreReader](s.T()) + failingStateMachine.On("EvolveState", mock.MatchedBy(emptySlice[flow.ServiceEvent]())).Return(nil).Twice() + failingStateMachine.On("Build").Return(nil, exception).Once() + + s.Run("failing state machine is last", func() { + s.kvStateMachines[0], s.kvStateMachines[1] = workingStateMachine, failingStateMachine //nolint:govet + deferredDBOps := deferred.NewDeferredBlockPersist() + _, err := s.mutableState.EvolveState(deferredDBOps, s.candidate.ParentID, s.candidate.View, []*flow.Seal{}) + require.ErrorIs(s.T(), err, exception) + require.False(s.T(), protocol.IsInvalidServiceEventError(err)) + require.Nil(s.T(), deferredDBOps.Execute(nil, flow.ZeroID, nil)) + }) + + failingStateMachine.On("Build").Return(nil, exception).Once() + s.Run("failing state machine is first", func() { + s.kvStateMachines[0], s.kvStateMachines[1] = failingStateMachine, workingStateMachine //nolint:govet + deferredDBOps := deferred.NewDeferredBlockPersist() + _, err := s.mutableState.EvolveState(deferredDBOps, s.candidate.ParentID, s.candidate.View, []*flow.Seal{}) + require.ErrorIs(s.T(), err, exception) + require.False(s.T(), protocol.IsInvalidServiceEventError(err)) + require.Nil(s.T(), deferredDBOps.Execute(nil, flow.ZeroID, nil)) + }) +} + +// Test_EncodeFailed tests that `stateMutator` returns an exception when encoding the resulting state fails. +func (s *StateMutatorSuite) Test_EncodeFailed() { + exception := errors.New("exception") + s.protocolKVStoreDB = *protocol_statemock.NewProtocolKVStore(s.T()) + s.protocolKVStoreDB.On("ByBlockID", s.candidate.ParentID).Return(&s.parentState, nil) + + expectedResultingStateID := unittest.IdentifierFixture() + modifyState := func(_ mock.Arguments) { + s.evolvingState.On("ID").Return(expectedResultingStateID, nil).Once() + } + s.kvStateMachines[0] = s.mockStateTransition().ServiceEventsMatch(emptySlice[flow.ServiceEvent]()).DuringEvolveState(modifyState).Mock() + s.kvStateMachines[1] = s.mockStateTransition().ServiceEventsMatch(emptySlice[flow.ServiceEvent]()).Mock() + + rw := storagemock.NewReaderBatchWriter(s.T()) + s.protocolKVStoreDB.On("BatchIndex", mock.Anything, mock.Anything, s.candidate.ID(), expectedResultingStateID).Return(nil).Once() + s.protocolKVStoreDB.On("BatchStore", mock.Anything, expectedResultingStateID, &s.evolvingState).Return(exception).Once() + + deferredDBOps := deferred.NewDeferredBlockPersist() + _, err := s.mutableState.EvolveState(deferredDBOps, s.candidate.ParentID, s.candidate.View, []*flow.Seal{}) + require.NoError(s.T(), err) // `EvolveState` should succeed, because storing the encoded snapshot only happens when we execute dbUpdates + + // Provide the blockID and execute the resulting `DeferredDBUpdate`. Thereby, + // the expected mock methods should be called, which is asserted by the testify framework + blockID := s.candidate.ID() + err = deferredDBOps.Execute(nil, blockID, rw) + + // We expect the business logic to wrap the unexpected `exception` from above into an irrecoverable error. + // Therefore, we should _not_ be able to unwrap the returned error to match the original `exception`. + // Furthermore, the business logic should _not_ erroneously interpret the error as an invalid service event error. + irrecErr := irrecoverable.NewExceptionf("") + require.ErrorAs(s.T(), err, &irrecErr) + require.NotErrorIs(s.T(), err, exception) + require.False(s.T(), protocol.IsInvalidServiceEventError(err)) + + s.protocolKVStoreDB.AssertExpectations(s.T()) +} + +/* *************************************************** utility methods *************************************************** */ + +// emptySlice returns a functor for testing that the input `slice` (with element type `T`) +// is empty. This functor is intended to be used with testify's `Matchby` +func emptySlice[T any]() func(interface{}) bool { + return func(slice interface{}) bool { + s := slice.([]T) + return len(s) == 0 + } +} + +// mockStateTransition instantiates a builder for configuring a `OrthogonalStoreStateMachine` mock with +// little code. See the struct `mockStateTransition` below for details. +func (s *StateMutatorSuite) mockStateTransition() *mockStateTransition { + return &mockStateTransition{T: s.T()} +} + +// mockStateTransition is a builder for configuring a `OrthogonalStoreStateMachine` mock with little code. +// The mock is tailored for the happy path and verifies that: +// - `EvolveState` is _always_ called before `Build` and each method is called only once +// - all deferred database operations that the state machine returns during the build step are eventually called +// +// In addition, `ExpectedServiceEvents` can be used to specify the exact slice of service events the state machine +// expects as inputs for its `EvolveState` call. Similarly, with `ServiceEventsMatch` you can provide an argument +// matcher to verify properties if the input slice of Service Events. +// Lastly, with `DuringEvolveState` we can specify logic to be executed during the state machine's `EvolveState` +// call. This is helpful for testing cases, where the state machine modifies the Protocol State. +type mockStateTransition struct { + T *testing.T + expectedServiceEvents interface{} + runInEvolveState func(_ mock.Arguments) +} + +// ExpectedServiceEvents specifies the exact slice of service events the state machine expects as inputs for its +// `EvolveState` call. Note that `ExpectedServiceEvents` and `ServiceEventsMatch` override all prior checks for +// the input slice of Service Events. The method returns a self-reference for chaining. +func (m *mockStateTransition) ExpectedServiceEvents(es []flow.ServiceEvent) *mockStateTransition { + m.expectedServiceEvents = es + return m +} + +// ServiceEventsMatch provides an argument matcher to verify properties if the input slice of Service Events. +// Note that `ExpectedServiceEvents` and `ServiceEventsMatch` override all prior checks for the input slice of Service Events. +// The method returns a self-reference for chaining. +func (m *mockStateTransition) ServiceEventsMatch(fn func(arg interface{}) bool) *mockStateTransition { + m.expectedServiceEvents = mock.MatchedBy(fn) + return m +} + +// DuringEvolveState provides a functor to be executed during the state machine's `EvolveState` call. This is +// helpful for testing cases, where the state machine modifies the Protocol State. Note that `ExpectedServiceEvents` +// and `ServiceEventsMatch` override all prior checks for the input slice of Service Events. The method returns a +// self-reference for chaining. +func (m *mockStateTransition) DuringEvolveState(fn func(mock.Arguments)) *mockStateTransition { + m.runInEvolveState = fn + return m +} + +// Mock constructs and configures the OrthogonalStoreStateMachine mock. +func (m *mockStateTransition) Mock() protocol_statemock.OrthogonalStoreStateMachine[protocol.KVStoreReader] { + evolveStateCalled := false + stateMachine := protocol_statemock.NewOrthogonalStoreStateMachine[protocol.KVStoreReader](m.T) + if m.expectedServiceEvents == nil { + m.expectedServiceEvents = mock.Anything + } + stateMachine.On("EvolveState", m.expectedServiceEvents).Run(func(args mock.Arguments) { + evolveStateCalled = true // repeated `EvolveState` calls will be denied by the mock + if m.runInEvolveState != nil { + m.runInEvolveState(args) + } + }).Return(nil).Once() + + stateMachine.On("Build").Run(func(args mock.Arguments) { + require.True(m.T, evolveStateCalled, "Method `OrthogonalStoreStateMachine.Build` called before `EvolveState`!") + }).Return(deferred.NewDeferredBlockPersist(), nil).Once() + return *stateMachine //nolint:govet +} diff --git a/state/protocol/protocol_state/state/protocol_state.go b/state/protocol/protocol_state/state/protocol_state.go new file mode 100644 index 00000000000..0e2012ad406 --- /dev/null +++ b/state/protocol/protocol_state/state/protocol_state.go @@ -0,0 +1,362 @@ +package state + +import ( + "errors" + "fmt" + + "github.com/jordanschalm/lockctx" + "github.com/rs/zerolog" + + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module/irrecoverable" + "github.com/onflow/flow-go/state/protocol" + "github.com/onflow/flow-go/state/protocol/inmem" + "github.com/onflow/flow-go/state/protocol/protocol_state" + "github.com/onflow/flow-go/state/protocol/protocol_state/epochs" + "github.com/onflow/flow-go/state/protocol/protocol_state/kvstore" + "github.com/onflow/flow-go/state/protocol/protocol_state/pubsub" + "github.com/onflow/flow-go/storage" + "github.com/onflow/flow-go/storage/deferred" +) + +// ProtocolState is an implementation of the read-only interface for protocol state, it allows querying information +// on a per-block and per-epoch basis. +// It is backed by a storage.EpochProtocolStateEntries and an in-memory protocol.GlobalParams. +type ProtocolState struct { + epochProtocolStateDB storage.EpochProtocolStateEntries + kvStoreSnapshots protocol_state.ProtocolKVStore + globalParams protocol.GlobalParams +} + +var _ protocol.ProtocolState = (*ProtocolState)(nil) + +func NewProtocolState(epochProtocolStateDB storage.EpochProtocolStateEntries, kvStoreSnapshots storage.ProtocolKVStore, globalParams protocol.GlobalParams) *ProtocolState { + return newProtocolState(epochProtocolStateDB, kvstore.NewProtocolKVStore(kvStoreSnapshots), globalParams) +} + +// newProtocolState creates a new ProtocolState instance. The exported constructor `NewProtocolState` only requires the +// lower-level `storage.ProtocolKVStore` as input. Though, internally we use the higher-level `protocol_state.ProtocolKVStore`, +// which wraps the lower-level ProtocolKVStore. +func newProtocolState(epochProtocolStateDB storage.EpochProtocolStateEntries, kvStoreSnapshots protocol_state.ProtocolKVStore, globalParams protocol.GlobalParams) *ProtocolState { + return &ProtocolState{ + epochProtocolStateDB: epochProtocolStateDB, + kvStoreSnapshots: kvStoreSnapshots, + globalParams: globalParams, + } +} + +// EpochStateAtBlockID returns epoch protocol state at block ID. +// The resulting epoch protocol state is returned AFTER applying updates that are contained in block. +// Can be queried for any block that has been added to the block tree. +// Returns: +// - (EpochProtocolState, nil) - if there is an epoch protocol state associated with given block ID. +// - (nil, storage.ErrNotFound) - if there is no epoch protocol state associated with given block ID. +// - (nil, exception) - any other error should be treated as exception. +func (s *ProtocolState) EpochStateAtBlockID(blockID flow.Identifier) (protocol.EpochProtocolState, error) { + protocolStateEntry, err := s.epochProtocolStateDB.ByBlockID(blockID) + if err != nil { + return nil, fmt.Errorf("could not query epoch protocol state at block (%x): %w", blockID, err) + } + return inmem.NewEpochProtocolStateAdapter( + inmem.UntrustedEpochProtocolStateAdapter{ + RichEpochStateEntry: protocolStateEntry, + Params: s.globalParams, + }, + ) +} + +// KVStoreAtBlockID returns protocol state at block ID. +// The resulting protocol state is returned AFTER applying updates that are contained in block. +// Can be queried for any block that has been added to the block tree. +// Returns: +// - (KVStoreReader, nil) - if there is a protocol state associated with given block ID. +// - (nil, storage.ErrNotFound) - if there is no protocol state associated with given block ID. +// - (nil, exception) - any other error should be treated as exception. +func (s *ProtocolState) KVStoreAtBlockID(blockID flow.Identifier) (protocol.KVStoreReader, error) { + return s.kvStoreSnapshots.ByBlockID(blockID) +} + +// GlobalParams returns an interface which can be used to query global protocol parameters. +func (s *ProtocolState) GlobalParams() protocol.GlobalParams { + return s.globalParams +} + +// MutableProtocolState is an implementation of the mutable interface for protocol state, it allows to evolve the protocol state +// by acting as factory for protocol.StateMutator which can be used to apply state-changing operations. +type MutableProtocolState struct { + ProtocolState + headers storage.Headers + results storage.ExecutionResults + kvStateMachineFactories []protocol_state.KeyValueStoreStateMachineFactory +} + +var _ protocol.MutableProtocolState = (*MutableProtocolState)(nil) + +// NewMutableProtocolState creates a new instance of MutableProtocolState. +func NewMutableProtocolState( + log zerolog.Logger, + epochProtocolStateDB storage.EpochProtocolStateEntries, + kvStoreSnapshots storage.ProtocolKVStore, + globalParams protocol.GlobalParams, + headers storage.Headers, + results storage.ExecutionResults, + setups storage.EpochSetups, + commits storage.EpochCommits, +) *MutableProtocolState { + log = log.With().Str("module", "dynamic_protocol_state").Logger() + // TODO [future generalization]: ideally, the telemetry consumers would be injected into the constructor + // mirroring telemetry collection in HotStuff. Thereby it would become possible to add more advanced supervision + // logic or to expose the telemetry as structured data by implementing custom telemetry consumers. At the moment, + // we only desire to log events picked up by the state machines, so the implementation below suffices. In case + // of two proposals for the same view, our current `StateMachineTelemetryConsumer` by itself does not collect + // sufficient context to differentiate events from the two blocks, as it only observes the view number. From the + // surrounding logs, we can infer the proposals' IDs. However, for more advanced analytics on the state machines, + // we might want to extend the current Telemetry implementation in the future. + epochHappyPathTelemetryFactory := func(candidateView uint64) protocol_state.StateMachineTelemetryConsumer { + return pubsub.NewLogConsumer( + log.With(). + Str("state_machine", "epoch_happy_path"). + Uint64("candidate_view", candidateView). + Logger(), + ) + } + epochFallbackTelemetryFactory := func(candidateView uint64) protocol_state.StateMachineTelemetryConsumer { + return pubsub.NewLogConsumer( + log.With(). + Str("state_machine", "epoch_fallback_path"). + Uint64("candidate_view", candidateView). + Logger(), + ) + } + psVersionUpgradeStateMachineTelemetry := pubsub.NewLogConsumer(log.With().Str("state_machine", "version_upgrade").Logger()) + setKVStoreValueTelemetry := pubsub.NewLogConsumer(log.With().Str("state_machine", "set_kvstore_value").Logger()) + + // an ordered list of factories to create state machines for different sub-states of the Dynamic Protocol State. + // all factories are expected to be called in order defined here. + kvStateMachineFactories := []protocol_state.KeyValueStoreStateMachineFactory{ + kvstore.NewPSVersionUpgradeStateMachineFactory(psVersionUpgradeStateMachineTelemetry), + epochs.NewEpochStateMachineFactory(setups, commits, epochProtocolStateDB, epochHappyPathTelemetryFactory, epochFallbackTelemetryFactory), + kvstore.NewSetValueStateMachineFactory(setKVStoreValueTelemetry), + } + return newMutableProtocolState(epochProtocolStateDB, kvstore.NewProtocolKVStore(kvStoreSnapshots), globalParams, headers, results, kvStateMachineFactories) +} + +// newMutableProtocolState creates a new instance of MutableProtocolState, where we inject factories for the orthogonal +// state machines evolving the sub-states. This constructor should be used mainly for testing (hence it is not exported). +// Specifically, the MutableProtocolState is conceptually independent of the specific functions that the state machines +// implement. Therefore, we test it independently of the state machines required for production. In comparison, the +// constructor `NewMutableProtocolState` is intended for production use, where the list of state machines is hard-coded. +func newMutableProtocolState( + epochProtocolStateDB storage.EpochProtocolStateEntries, + kvStoreSnapshots protocol_state.ProtocolKVStore, + globalParams protocol.GlobalParams, + headers storage.Headers, + results storage.ExecutionResults, + kvStateMachineFactories []protocol_state.KeyValueStoreStateMachineFactory, +) *MutableProtocolState { + return &MutableProtocolState{ + ProtocolState: *newProtocolState(epochProtocolStateDB, kvStoreSnapshots, globalParams), + headers: headers, + results: results, + kvStateMachineFactories: kvStateMachineFactories, + } +} + +// EvolveState updates the overall Protocol State based on information in the candidate block +// (potentially still under construction). Information that may change the state is: +// - the candidate block's view +// - Service Events from execution results sealed in the candidate block +// +// EvolveState is compatible with speculative processing: it evolves an *in-memory copy* of the parent state +// and collects *deferred database updates* for persisting the resulting Protocol State, including all of its +// dependencies and respective indices. Though, the resulting batch of deferred database updates still depends +// on the candidate block's ID, which is still unknown at the time of block construction. Executing the deferred +// database updates is the caller's responsibility. +// +// SAFETY REQUIREMENTS: +// 1. The seals must be a protocol-compliant extension of the parent block. Intuitively, we require that the +// seals follow the ancestry of this fork without gaps. The Consensus Participant's Compliance Layer enforces +// the necessary constrains. Analogously, the block building logic should always produce protocol-compliant +// seals. +// The seals guarantee correctness of the sealed execution result, including the contained service events. +// This is actively checked by the verification node, whose aggregated approvals in the form of a seal attest +// to the correctness of the sealed execution result (specifically the Service Events contained in the result +// and their order). +// 2. For Consensus Participants that are replicas, the calling code must check that the returned `stateID` matches +// the commitment in the block proposal! If they don't match, the proposal is byzantine and should be slashed. +// +// Error returns: +// [TLDR] All error returns indicate potential state corruption and should therefore be treated as fatal. +// - Per convention, the input seals from the block payload have already been confirmed to be protocol compliant. +// Hence, the service events in the sealed execution results represent the honest execution path. +// Therefore, the sealed service events should encode a valid evolution of the protocol state -- provided +// the system smart contracts are correct. +// - As we can rule out byzantine attacks as the source of failures, the only remaining sources of problems +// can be (a) bugs in the system smart contracts or (b) bugs in the node implementation. A service event +// not representing a valid state transition despite all consistency checks passing is interpreted as +// case (a) and _should be_ handled internally by the respective state machine. Otherwise, any bug or +// unforeseen edge cases in the system smart contracts would result in consensus halt, due to errors while +// evolving the protocol state. +// - A consistency or sanity check failing within the StateMutator is likely the symptom of an internal bug +// in the node software or state corruption, i.e. case (b). This is the only scenario where the error return +// of this function is not nil. If such an exception is returned, continuing is not an option. +func (s *MutableProtocolState) EvolveState( + deferredDBOps *deferred.DeferredBlockPersist, + parentBlockID flow.Identifier, + candidateView uint64, + candidateSeals []*flow.Seal, +) (flow.Identifier, error) { + serviceEvents, err := s.serviceEventsFromSeals(candidateSeals) + if err != nil { + return flow.ZeroID, fmt.Errorf("extracting service events from candidate seals failed: %w", err) + } + + parentStateID, stateMachines, evolvingState, err := s.initializeOrthogonalStateMachines(parentBlockID, candidateView) + if err != nil { + return flow.ZeroID, fmt.Errorf("failure initializing sub-state machines for evolving the Protocol State: %w", err) + } + + resultingStateID, err := s.build(deferredDBOps, parentStateID, stateMachines, serviceEvents, evolvingState) + if err != nil { + return flow.ZeroID, fmt.Errorf("evolving and building the resulting Protocol State failed: %w", err) + } + return resultingStateID, nil +} + +// initializeOrthogonalStateMachines instantiates the sub-state machines that in aggregate evolve the protocol state. +// In a nutshell, we proceed as follows: +// 1. We retrieve the protocol state snapshot that the parent block committed to. +// 2. We determine which Protocol State version should be active at the candidate block's view. Note that there might be a +// pending Version Upgrade that was supposed to take effect at an earlier view `activationView` < `candidateView`. However, +// it is possible that there are no blocks in the current fork with views in [activationView, …, candidateView]. In this +// case, the version upgrade is still pending and should be activated now, despite its activationView having already passed. +// 3. We replicate the parent block Protocol State -- if necessary changing the data model in accordance with the Protocol +// State version that should be active at the candidate block's view. Essentially, this replication is a deep copy, which +// guarantees that we are not accidentally modifying the parent block's protocol state. +// 4. Initialize the sub-state machines via the injected factories and provide the *replicated* state as an in-memory target +// for the state machines to write their evolved sub-states to. +func (s *MutableProtocolState) initializeOrthogonalStateMachines( + parentBlockID flow.Identifier, + candidateView uint64, +) (flow.Identifier, []protocol_state.KeyValueStoreStateMachine, protocol_state.KVStoreMutator, error) { + parentState, err := s.kvStoreSnapshots.ByBlockID(parentBlockID) + if err != nil { + if errors.Is(err, storage.ErrNotFound) { + return flow.ZeroID, nil, nil, irrecoverable.NewExceptionf("Protocol State at parent block %v was not found: %w", parentBlockID, err) + } + return flow.ZeroID, nil, nil, fmt.Errorf("unexpected exception while retrieving Protocol State at parent block %v: %w", parentBlockID, err) + } + + protocolVersion := parentState.GetProtocolStateVersion() + if versionUpgrade := parentState.GetVersionUpgrade(); versionUpgrade != nil { + if candidateView >= versionUpgrade.ActivationView { + protocolVersion = versionUpgrade.Data + } + } + + evolvingState, err := parentState.Replicate(protocolVersion) + if err != nil { + if errors.Is(err, kvstore.ErrIncompatibleVersionChange) { + return flow.ZeroID, nil, nil, irrecoverable.NewExceptionf("replicating parent block's protocol state failed due to unsupported version: %w", err) + } + return flow.ZeroID, nil, nil, fmt.Errorf("could not replicate parent KV store (version=%d) to protocol version %d: %w", parentState.GetProtocolStateVersion(), protocolVersion, err) + } + + stateMachines := make([]protocol_state.KeyValueStoreStateMachine, 0, len(s.kvStateMachineFactories)) + for _, factory := range s.kvStateMachineFactories { + stateMachine, err := factory.Create(candidateView, parentBlockID, parentState, evolvingState) + if err != nil { + return flow.ZeroID, nil, nil, fmt.Errorf("could not create state machine: %w", err) + } + stateMachines = append(stateMachines, stateMachine) + } + return parentState.ID(), stateMachines, evolvingState, nil +} + +// serviceEventsFromSeals arranges the sealed results in order of increasing height of the executed blocks +// and then extracts all Service Events from the sealed results. While the seals might be included in the +// candidate block in any order, _within_ a sealed execution result, the service events are chronologically +// ordered. Hence, by arranging the seals by increasing height, the total order of all extracted Service +// Events is also chronological. +func (s *MutableProtocolState) serviceEventsFromSeals(candidateSeals []*flow.Seal) ([]flow.ServiceEvent, error) { + // block payload may not specify seals in order, so order them by block height before processing + orderedSeals, err := protocol.OrderedSeals(candidateSeals, s.headers) + if err != nil { + // Per API contract, the input seals must have already passed verification, which necessitates + // successful ordering. Hence, calling protocol.OrderedSeals with the same inputs that succeeded + // earlier now failed. In all cases, this is an exception. + if errors.Is(err, protocol.ErrMultipleSealsForSameHeight) || errors.Is(err, protocol.ErrDiscontinuousSeals) || errors.Is(err, storage.ErrNotFound) { + return nil, irrecoverable.NewExceptionf("ordering already validated seals unexpectedly failed: %w", err) + } + return nil, fmt.Errorf("ordering already validated seals resulted in unexpected exception: %w", err) + } + + serviceEvents := make([]flow.ServiceEvent, 0) // we expect that service events are rare; most blocks have none + for _, seal := range orderedSeals { + result, err := s.results.ByID(seal.ResultID) + if err != nil { + if errors.Is(err, storage.ErrNotFound) { + return nil, irrecoverable.NewExceptionf("could not get result %x sealed by valid seal %x: %w", seal.ResultID, seal.ID(), err) + } + return nil, fmt.Errorf("retrieving result %x resulted in unexpected exception: %w", seal.ResultID, err) + } + serviceEvents = append(serviceEvents, result.ServiceEvents...) + } + return serviceEvents, nil +} + +// build assembled the final Protocol State. +// First, we apply the service events to all sub-state machines and then build the resulting state. +// Thereby, the framework supports a subtly more general way of partitioning the Protocol State machine, +// where state machines could exchange some information, if their chronological order of execution is strictly +// specified and guaranteed. The framework conceptually tolerates this, without explicitly supporting it (yet). +// +// Returns: +// - ID of the resulting Protocol State +// - deferred database operations for persisting the resulting Protocol State, including all of its +// dependencies and respective indices. Though, the resulting batch of deferred database updates still depends +// on the candidate block's ID, which is still unknown at the time of block construction. +// - err: All error returns indicate potential state corruption and should therefore be treated as fatal. +func (s *MutableProtocolState) build( + deferredDBOps *deferred.DeferredBlockPersist, + parentStateID flow.Identifier, + stateMachines []protocol_state.KeyValueStoreStateMachine, + serviceEvents []flow.ServiceEvent, + evolvingState protocol.KVStoreReader, +) (flow.Identifier, error) { + for _, stateMachine := range stateMachines { + err := stateMachine.EvolveState(serviceEvents) // state machine should only bubble up exceptions + if err != nil { + return flow.ZeroID, fmt.Errorf("exception from sub-state machine during state evolution: %w", err) + } + } + + // _after_ all state machines have ingested the available information, we build the resulting overall state + for _, stateMachine := range stateMachines { + dbOps, err := stateMachine.Build() + if err != nil { + return flow.ZeroID, fmt.Errorf("unexpected exception from sub-state machine while building its output state: %w", err) + } + deferredDBOps.Chain(dbOps) + } + resultingStateID := evolvingState.ID() + + // We _always_ index the protocol state by the candidate block's ID. But only if the + // state actually changed, we add a database operation to persist it. + deferredDBOps.AddNextOperation(func(lctx lockctx.Proof, blockID flow.Identifier, rw storage.ReaderBatchWriter) error { + return s.kvStoreSnapshots.BatchIndex(lctx, rw, blockID, resultingStateID) + }) + + if parentStateID != resultingStateID { + deferredDBOps.AddNextOperation(func(lctx lockctx.Proof, blockID flow.Identifier, rw storage.ReaderBatchWriter) error { + // no need to hold any lock, because the resultingStateID is a full content hash of the value + err := s.kvStoreSnapshots.BatchStore(rw, resultingStateID, evolvingState) + if err == nil { + return nil + } + return irrecoverable.NewExceptionf("unexpected error while trying to store new protocol state: %w", err) + }) + } + + return resultingStateID, nil +} diff --git a/state/protocol/protocol_state/state/protocol_state_test.go b/state/protocol/protocol_state/state/protocol_state_test.go new file mode 100644 index 00000000000..a0bfa75d8af --- /dev/null +++ b/state/protocol/protocol_state/state/protocol_state_test.go @@ -0,0 +1,129 @@ +package state + +import ( + "errors" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/state/protocol" + psmock "github.com/onflow/flow-go/state/protocol/mock" + "github.com/onflow/flow-go/state/protocol/protocol_state/kvstore" + "github.com/onflow/flow-go/storage" + storagemock "github.com/onflow/flow-go/storage/mock" + "github.com/onflow/flow-go/utils/unittest" +) + +// Test_ProtocolState verifies the different scenarios of retrieving a protocol state, global parameters +// and KV store snapshots by block ID for the `EpochProtocolStateEntries`. Happy and unhappy paths are covered. +func Test_ProtocolState(t *testing.T) { + epochProtocolStateDB := storagemock.NewEpochProtocolStateEntries(t) + protocolKVStoreDB := storagemock.NewProtocolKVStore(t) + globalParams := psmock.NewGlobalParams(t) + protocolState := NewProtocolState(epochProtocolStateDB, protocolKVStoreDB, globalParams) + + t.Run("testing `EpochProtocolStateEntries.AtBlockID`", func(t *testing.T) { + test_AtBlockID(t, protocolState, epochProtocolStateDB) + }) + t.Run("testing `EpochProtocolStateEntries.GlobalParams`", func(t *testing.T) { + test_GlobalParams(t, protocolState, globalParams) + }) + t.Run("testing `EpochProtocolStateEntries.KVStoreAtBlockID`", func(t *testing.T) { + test_KVStoreAtBlockID(t, protocolState, protocolKVStoreDB) + }) +} + +// Test_MutableProtocolState verifies the different scenarios of retrieving a protocol state, global parameters +// and KV store snapshots by block ID for the `MutableProtocolState`. Happy and unhappy paths are covered. +func Test_MutableProtocolState(t *testing.T) { + epochProtocolStateDB := storagemock.NewEpochProtocolStateEntries(t) + protocolKVStoreDB := storagemock.NewProtocolKVStore(t) + globalParams := psmock.NewGlobalParams(t) + headersDB := storagemock.NewHeaders(t) + resultsDB := storagemock.NewExecutionResults(t) + setupsDB := storagemock.NewEpochSetups(t) + commitsDB := storagemock.NewEpochCommits(t) + + mutableProtocolState := NewMutableProtocolState( + unittest.Logger(), + epochProtocolStateDB, + protocolKVStoreDB, + globalParams, + headersDB, + resultsDB, + setupsDB, + commitsDB) + + t.Run("testing `MutableProtocolState.AtBlockID`", func(t *testing.T) { + test_AtBlockID(t, mutableProtocolState, epochProtocolStateDB) + }) + t.Run("testing `MutableProtocolState.GlobalParams`", func(t *testing.T) { + test_GlobalParams(t, mutableProtocolState, globalParams) + }) + t.Run("testing `MutableProtocolState.KVStoreAtBlockID`", func(t *testing.T) { + test_KVStoreAtBlockID(t, mutableProtocolState, protocolKVStoreDB) + }) +} + +func test_AtBlockID(t *testing.T, protocolState protocol.ProtocolState, epochProtocolStateDB *storagemock.EpochProtocolStateEntries) { + blockID := unittest.IdentifierFixture() + + t.Run("retrieve epoch state for existing blocks", func(t *testing.T) { + epochState := unittest.EpochStateFixture(unittest.WithValidDKG()) + epochProtocolStateDB.On("ByBlockID", blockID).Return(epochState, nil).Once() + + epochProtocolState, err := protocolState.EpochStateAtBlockID(blockID) + require.NoError(t, err) + assert.Equal(t, epochState.CurrentEpochIdentityTable, epochProtocolState.Identities()) + }) + t.Run("retrieving epoch state for non-existing block yields storage.ErrNotFound error", func(t *testing.T) { + epochProtocolStateDB.On("ByBlockID", blockID).Return(nil, storage.ErrNotFound).Once() + _, err := protocolState.EpochStateAtBlockID(blockID) + require.ErrorIs(t, err, storage.ErrNotFound) + }) + t.Run("exception during retrieve is propagated", func(t *testing.T) { + exception := errors.New("exception") + epochProtocolStateDB.On("ByBlockID", blockID).Return(nil, exception).Once() + _, err := protocolState.EpochStateAtBlockID(blockID) + require.ErrorIs(t, err, exception) + }) +} + +func test_GlobalParams(t *testing.T, protocolState protocol.ProtocolState, globalParams *psmock.GlobalParams) { + expectedChainID := flow.Testnet + globalParams.On("ChainID").Return(expectedChainID, nil).Once() + actualChainID := protocolState.GlobalParams().ChainID() + assert.Equal(t, expectedChainID, actualChainID) +} + +func test_KVStoreAtBlockID(t *testing.T, protocolState protocol.ProtocolState, protocolKVStoreDB *storagemock.ProtocolKVStore) { + blockID := unittest.IdentifierFixture() + expectedState := &kvstore.Modelv1{ + Modelv0: kvstore.Modelv0{ + UpgradableModel: kvstore.UpgradableModel{}, + EpochStateID: unittest.IdentifierFixture(), + }, + } + version, encStateData, err := expectedState.VersionedEncode() + require.NoError(t, err) + encExpectedState := &flow.PSKeyValueStoreData{ + Version: version, + Data: encStateData, + } + + t.Run("retrieve KVStoreReader", func(t *testing.T) { + protocolKVStoreDB.On("ByBlockID", blockID).Return(encExpectedState, nil).Once() + state, err := protocolState.KVStoreAtBlockID(blockID) + assert.NoError(t, err) + assert.Equal(t, expectedState, state) + }) + + t.Run("error retrieving KVStoreReader", func(t *testing.T) { + exception := errors.New("exception") + protocolKVStoreDB.On("ByBlockID", blockID).Return(nil, exception).Once() + _, err := protocolState.KVStoreAtBlockID(blockID) + assert.ErrorIs(t, err, exception) + }) +} diff --git a/state/protocol/seed/customizers.go b/state/protocol/seed/customizers.go deleted file mode 100644 index 8b65564b412..00000000000 --- a/state/protocol/seed/customizers.go +++ /dev/null @@ -1,46 +0,0 @@ -package seed - -import "encoding/binary" - -// list of customizers used for different sub-protocol PRNGs. -// These customizers help instantiate different PRNGs from the -// same source of randomness. - -var ( - // ProtocolConsensusLeaderSelection is the customizer for consensus leader selection - ProtocolConsensusLeaderSelection = customizerFromIndices([]uint16{0, 1, 1}) - // ProtocolVerificationChunkAssignment is the customizer for verification nodes determines chunk assignment - ProtocolVerificationChunkAssignment = customizerFromIndices([]uint16{0, 2, 0}) - // collectorClusterLeaderSelectionPrefix is the prefix of the customizer for the leader selection of collector clusters - collectorClusterLeaderSelectionPrefix = []uint16{0, 0} - // executionChunkPrefix is the prefix of the customizer for executing chunks - executionChunkPrefix = []uint16{1} -) - -// ProtocolCollectorClusterLeaderSelection returns the indices for the leader selection for the i-th collector cluster -func ProtocolCollectorClusterLeaderSelection(clusterIndex uint) []byte { - indices := append(collectorClusterLeaderSelectionPrefix, uint16(clusterIndex)) - return customizerFromIndices(indices) -} - -// ExecutionChunk returns the indices for i-th chunk -func ExecutionChunk(chunkIndex uint16) []byte { - indices := append(executionChunkPrefix, chunkIndex) - return customizerFromIndices(indices) -} - -// customizerFromIndices maps the input indices into a slice of bytes. -// The implementation ensures there are no collisions of mapping of different indices. -// -// The output is built as a concatenation of indices, each index encoded over 2 bytes. -// (the implementation could be updated to map the indices differently depending on the -// constraints over the output length) -func customizerFromIndices(indices []uint16) []byte { - customizerLen := 2 * len(indices) - customizer := make([]byte, customizerLen) - // concatenate the indices - for i, index := range indices { - binary.LittleEndian.PutUint16(customizer[2*i:2*i+2], index) - } - return customizer -} diff --git a/state/protocol/seed/prg_test.go b/state/protocol/seed/prg_test.go deleted file mode 100644 index 5111fa50aa6..00000000000 --- a/state/protocol/seed/prg_test.go +++ /dev/null @@ -1,53 +0,0 @@ -package seed - -import ( - "math/rand" - "testing" - "time" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -func getRandomSource(t *testing.T) []byte { - r := time.Now().UnixNano() - rand.Seed(r) - t.Logf("math rand seed is %d", r) - seed := make([]byte, RandomSourceLength) - rand.Read(seed) - return seed -} - -// check PRGs created from the same source give the same outputs -func TestDeterministic(t *testing.T) { - seed := getRandomSource(t) - customizer := []byte("test") - prg1, err := PRGFromRandomSource(seed, customizer) - require.NoError(t, err) - prg2, err := PRGFromRandomSource(seed, customizer) - require.NoError(t, err) - - rand1 := make([]byte, 100) - prg1.Read(rand1) - rand2 := make([]byte, 100) - prg2.Read(rand2) - - assert.Equal(t, rand1, rand2) -} - -func TestCustomizer(t *testing.T) { - seed := getRandomSource(t) - customizer1 := []byte("test1") - prg1, err := PRGFromRandomSource(seed, customizer1) - require.NoError(t, err) - customizer2 := []byte("test2") - prg2, err := PRGFromRandomSource(seed, customizer2) - require.NoError(t, err) - - rand1 := make([]byte, 100) - prg1.Read(rand1) - rand2 := make([]byte, 100) - prg2.Read(rand2) - - assert.NotEqual(t, rand1, rand2) -} diff --git a/state/protocol/seed/seed.go b/state/protocol/seed/seed.go deleted file mode 100644 index f8160e1c334..00000000000 --- a/state/protocol/seed/seed.go +++ /dev/null @@ -1,43 +0,0 @@ -package seed - -import ( - "fmt" - - "github.com/onflow/flow-go/consensus/hotstuff/model" - "github.com/onflow/flow-go/crypto" - "github.com/onflow/flow-go/crypto/hash" - "github.com/onflow/flow-go/crypto/random" -) - -// PRGFromRandomSource returns a PRG seeded by the source of randomness of the protocol. -// The customizer is used to generate a task-specific PRG (customizer in this implementation -// is up to 12-bytes long). -// -// The function hashes the input random source to obtain the PRG seed. -// Hashing is required to uniformize the entropy over the output. -func PRGFromRandomSource(randomSource []byte, customizer []byte) (random.Rand, error) { - // hash the source of randomness (signature) to uniformize the entropy - var seed [hash.HashLenSHA3_256]byte - hash.ComputeSHA3_256(&seed, randomSource) - - // create random number generator from the seed and customizer - rng, err := random.NewChacha20PRG(seed[:], customizer) - if err != nil { - return nil, fmt.Errorf("could not create ChaCha20 PRG: %w", err) - } - return rng, nil -} - -const RandomSourceLength = crypto.SignatureLenBLSBLS12381 - -// FromParentQCSignature extracts the source of randomness from the given QC sigData. -// The sigData is an RLP encoded structure that is part of QuorumCertificate. -func FromParentQCSignature(sigData []byte) ([]byte, error) { - // unpack sig data to extract random beacon sig - randomBeaconSig, err := model.UnpackRandomBeaconSig(sigData) - if err != nil { - return nil, fmt.Errorf("could not unpack block signature: %w", err) - } - - return randomBeaconSig, nil -} diff --git a/state/protocol/snapshot.go b/state/protocol/snapshot.go index 73b3acf8930..39b77269052 100644 --- a/state/protocol/snapshot.go +++ b/state/protocol/snapshot.go @@ -1,5 +1,3 @@ -// (c) 2019 Dapper Labs - ALL RIGHTS RESERVED - package protocol import ( @@ -33,7 +31,10 @@ type Snapshot interface { // history. It can represent either a finalized or ambiguous block, // depending on our selection criteria. Either way, it's the block on which // we should build the next block in the context of the selected state. - // TODO document error returns + // Expected error returns: + // - state.ErrUnknownSnapshotReference if the reference point for the snapshot + // (height or block ID) does not resolve to a queriable block in the state. + // All other errors should be treated as exceptions. Head() (*flow.Header, error) // QuorumCertificate returns a valid quorum certificate for the header at @@ -50,16 +51,23 @@ type Snapshot interface { // epoch. At the end of an epoch, this includes identities scheduled to join // in the next epoch but are not active yet. // - // Identities are guaranteed to be returned in canonical order (order.Canonical). + // Identities are guaranteed to be returned in canonical order (flow.Canonical[flow.Identity]). // // It allows us to provide optional upfront filters which can be used by the // implementation to speed up database lookups. - // TODO document error returns - Identities(selector flow.IdentityFilter) (flow.IdentityList, error) + // Expected error returns: + // - state.ErrUnknownSnapshotReference if the reference point for the snapshot + // (height or block ID) does not resolve to a queriable block in the state. + // All other errors should be treated as exceptions. + Identities(selector flow.IdentityFilter[flow.Identity]) (flow.IdentityList, error) // Identity attempts to retrieve the node with the given identifier at the // selected point of the protocol state history. It will error if it doesn't exist. - // TODO document error returns + // Expected error returns: + // - state.ErrUnknownSnapshotReference if the reference point for the snapshot + // (height or block ID) does not resolve to a queriable block in the state. + // - protocol.IdentityNotFoundError if nodeID does not correspond to a valid node. + // All other errors should be treated as exceptions. Identity(nodeID flow.Identifier) (*flow.Identity, error) // SealedResult returns the most recent included seal as of this block and @@ -103,6 +111,10 @@ type Snapshot interface { // The IDs are ordered such that parents are included before their children. // Since all blocks are fully validated before being inserted to the state, // all returned blocks are validated. + // + // CAUTION: the list of descendants is constructed for each call via database reads, + // and may be expensive to compute, especially if the reference block is older. + // // No errors are expected under normal operation. Descendants() ([]flow.Identifier, error) @@ -122,7 +134,7 @@ type Snapshot interface { // Phase returns the epoch phase for the current epoch, as of the Head block. // TODO document error returns - Phase() (flow.EpochPhase, error) + EpochPhase() (flow.EpochPhase, error) // Epochs returns a query object enabling querying detailed information about // various epochs. @@ -136,4 +148,24 @@ type Snapshot interface { // Params returns global parameters of the state this snapshot is taken from. // Returns invalid.Params with state.ErrUnknownSnapshotReference if snapshot reference block is unknown. Params() GlobalParams + + // EpochProtocolState returns the epoch part of dynamic protocol state that the Head block commits to. + // The compliance layer guarantees that only valid blocks are appended to the protocol state. + // Returns state.ErrUnknownSnapshotReference if snapshot reference block is unknown. + // All other errors should be treated as exceptions. + EpochProtocolState() (EpochProtocolState, error) + + // ProtocolState returns the dynamic protocol state that the Head block commits to. + // The compliance layer guarantees that only valid blocks are appended to the protocol state. + // Returns state.ErrUnknownSnapshotReference if snapshot reference block is unknown. + // All other errors should be treated as exceptions. + ProtocolState() (KVStoreReader, error) + + // VersionBeacon returns the latest sealed version beacon. + // If no version beacon has been sealed so far during the current spork, returns nil. + // The latest VersionBeacon is only updated for finalized blocks. This means that, when + // querying an un-finalized fork, `VersionBeacon` will have the same value as querying + // the snapshot for the latest finalized block, even if a newer version beacon is included + // in a seal along the un-finalized fork. + VersionBeacon() (*flow.SealedVersionBeacon, error) } diff --git a/state/protocol/state.go b/state/protocol/state.go deleted file mode 100644 index e0285437c15..00000000000 --- a/state/protocol/state.go +++ /dev/null @@ -1,84 +0,0 @@ -// (c) 2019 Dapper Labs - ALL RIGHTS RESERVED - -package protocol - -import ( - "context" - - "github.com/onflow/flow-go/model/flow" -) - -// State represents the full protocol state of the local node. It allows us to -// obtain snapshots of the state at any point of the protocol state history. -type State interface { - - // Params gives access to a number of stable parameters of the protocol state. - Params() Params - - // Final returns the snapshot of the persistent protocol state at the latest - // finalized block, and the returned snapshot is therefore immutable over - // time. - Final() Snapshot - - // Sealed returns the snapshot of the persistent protocol state at the - // latest sealed block, and the returned snapshot is therefore immutable - // over time. - Sealed() Snapshot - - // AtHeight returns the snapshot of the persistent protocol state at the - // given block number. It is only available for finalized blocks and the - // returned snapshot is therefore immutable over time. - AtHeight(height uint64) Snapshot - - // AtBlockID returns the snapshot of the persistent protocol state at the - // given block ID. It is available for any block that was introduced into - // the protocol state, and can thus represent an ambiguous state that was or - // will never be finalized. - AtBlockID(blockID flow.Identifier) Snapshot -} - -// FollowerState is a mutable protocol state used by nodes following main consensus (ie. non-consensus nodes). -// All blocks must have a certifying QC when being added to the state to guarantee they are valid, -// so there is a one-block lag between block production and incorporation into the FollowerState. -// However, since all blocks are certified upon insertion, they are immediately processable by other components. -type FollowerState interface { - State - - // ExtendCertified introduces the block with the given ID into the persistent - // protocol state without modifying the current finalized state. It allows us - // to execute fork-aware queries against the known protocol state. The caller - // must pass a QC for candidate block to prove that the candidate block has - // been certified, and it's safe to add it to the protocol state. The QC - // cannot be nil and must certify candidate block: - // candidate.View == qc.View && candidate.BlockID == qc.BlockID - // The `candidate` block and its QC _must be valid_ (otherwise, the state will - // be corrupted). ExtendCertified inserts any given block, as long as its - // parent is already in the protocol state. Also orphaned blocks are excepted. - // No errors are expected during normal operations. - ExtendCertified(ctx context.Context, candidate *flow.Block, qc *flow.QuorumCertificate) error - - // Finalize finalizes the block with the given hash. - // At this level, we can only finalize one block at a time. This implies - // that the parent of the pending block that is to be finalized has - // to be the last finalized block. - // It modifies the persistent immutable protocol state accordingly and - // forwards the pointer to the latest finalized state. - // No errors are expected during normal operations. - Finalize(ctx context.Context, blockID flow.Identifier) error -} - -// ParticipantState is a mutable protocol state used by active consensus participants (consensus nodes). -// All blocks are validated in full, including payload validation, prior to insertion. Only valid blocks are inserted. -type ParticipantState interface { - FollowerState - - // Extend introduces the block with the given ID into the persistent - // protocol state without modifying the current finalized state. It allows - // us to execute fork-aware queries against ambiguous protocol state, while - // still checking that the given block is a valid extension of the protocol state. - // The candidate block must have passed HotStuff validation before being passed to Extend. - // Expected errors during normal operations: - // * state.OutdatedExtensionError if the candidate block is outdated (e.g. orphaned) - // * state.InvalidExtensionError if the candidate block is invalid - Extend(ctx context.Context, candidate *flow.Block) error -} diff --git a/state/protocol/util.go b/state/protocol/util.go index 6457bf93b6d..598d9594776 100644 --- a/state/protocol/util.go +++ b/state/protocol/util.go @@ -17,8 +17,8 @@ func IsNodeAuthorizedAt(snapshot Snapshot, id flow.Identifier) (bool, error) { return CheckNodeStatusAt( snapshot, id, - filter.HasWeight(true), - filter.Not(filter.Ejected), + filter.HasInitialWeight[flow.Identity](true), + filter.IsValidCurrentEpochParticipant, ) } @@ -32,9 +32,9 @@ func IsNodeAuthorizedWithRoleAt(snapshot Snapshot, id flow.Identifier, role flow return CheckNodeStatusAt( snapshot, id, - filter.HasWeight(true), - filter.Not(filter.Ejected), - filter.HasRole(role), + filter.HasInitialWeight[flow.Identity](true), + filter.IsValidCurrentEpochParticipant, + filter.HasRole[flow.Identity](role), ) } @@ -44,7 +44,7 @@ func IsNodeAuthorizedWithRoleAt(snapshot Snapshot, id flow.Identifier, role flow // - state.ErrUnknownSnapshotReference if snapshot references an unknown block // // All other errors are unexpected and potential symptoms of internal state corruption. -func CheckNodeStatusAt(snapshot Snapshot, id flow.Identifier, checks ...flow.IdentityFilter) (bool, error) { +func CheckNodeStatusAt(snapshot Snapshot, id flow.Identifier, checks ...flow.IdentityFilter[flow.Identity]) (bool, error) { identity, err := snapshot.Identity(id) if IsIdentityNotFound(err) { return false, nil @@ -65,10 +65,7 @@ func CheckNodeStatusAt(snapshot Snapshot, id flow.Identifier, checks ...flow.Ide // IsSporkRootSnapshot returns whether the given snapshot is the state snapshot // representing the initial state for a spork. func IsSporkRootSnapshot(snapshot Snapshot) (bool, error) { - sporkRootBlockHeight, err := snapshot.Params().SporkRootBlockHeight() - if err != nil { - return false, fmt.Errorf("could not get snapshot root block height: %w", err) - } + sporkRootBlockHeight := snapshot.Params().SporkRootBlockHeight() head, err := snapshot.Head() if err != nil { return false, fmt.Errorf("could not get snapshot head: %w", err) @@ -80,7 +77,7 @@ func IsSporkRootSnapshot(snapshot Snapshot) (bool, error) { // state snapshot. // No errors are expected during normal operation. func PreviousEpochExists(snap Snapshot) (bool, error) { - _, err := snap.Epochs().Previous().Counter() + _, err := snap.Epochs().Previous() if errors.Is(err, ErrNoPreviousEpoch) { return false, nil } @@ -98,14 +95,16 @@ func PreviousEpochExists(snap Snapshot) (bool, error) { // - protocol.ErrClusterNotFound if cluster is not found by the given chainID func FindGuarantors(state State, guarantee *flow.CollectionGuarantee) ([]flow.Identifier, error) { snapshot := state.AtBlockID(guarantee.ReferenceBlockID) - epochs := snapshot.Epochs() - epoch := epochs.Current() - cluster, err := epoch.ClusterByChainID(guarantee.ChainID) + epoch, err := snapshot.Epochs().Current() + if err != nil { + return nil, fmt.Errorf("could not get current epoch: %w", err) + } + cluster, err := epoch.ClusterByChainID(guarantee.ClusterChainID) if err != nil { return nil, fmt.Errorf( - "fail to retrieve collector clusters for guarantee (ReferenceBlockID: %v, ChainID: %v): %w", - guarantee.ReferenceBlockID, guarantee.ChainID, err) + "fail to retrieve collector clusters for guarantee (ReferenceBlockID: %v, ClusterChainID: %v): %w", + guarantee.ReferenceBlockID, guarantee.ClusterChainID, err) } guarantorIDs, err := signature.DecodeSignerIndicesToIdentifiers(cluster.Members().NodeIDs(), guarantee.SignerIndices) @@ -126,14 +125,14 @@ func FindGuarantors(state State, guarantee *flow.CollectionGuarantee) ([]flow.Id // - ErrMultipleSealsForSameHeight in case there are seals repeatedly sealing block at the same height // - ErrDiscontinuousSeals in case there are height-gaps in the sealed blocks // - storage.ErrNotFound if any of the seals references an unknown block -func OrderedSeals(payload *flow.Payload, headers storage.Headers) ([]*flow.Seal, error) { - numSeals := uint64(len(payload.Seals)) +func OrderedSeals(blockSeals []*flow.Seal, headers storage.Headers) ([]*flow.Seal, error) { + numSeals := uint64(len(blockSeals)) if numSeals == 0 { return nil, nil } heights := make([]uint64, numSeals) minHeight := uint64(math.MaxUint64) - for i, seal := range payload.Seals { + for i, seal := range blockSeals { header, err := headers.ByBlockID(seal.BlockID) if err != nil { return nil, fmt.Errorf("could not get block (id=%x) for seal: %w", seal.BlockID, err) // storage.ErrNotFound or exception @@ -146,7 +145,7 @@ func OrderedSeals(payload *flow.Payload, headers storage.Headers) ([]*flow.Seal, // As seals in a valid payload must have consecutive heights, we can populate // the ordered output by shifting by minHeight. seals := make([]*flow.Seal, numSeals) - for i, seal := range payload.Seals { + for i, seal := range blockSeals { idx := heights[i] - minHeight // (0) Per construction, `minHeight` is the smallest value in the `heights` slice. Hence, `idx ≥ 0` // (1) But if there are gaps in the heights of the sealed blocks (byzantine inputs), diff --git a/state/protocol/util/testing.go b/state/protocol/util/testing.go index 24eb8016f6f..220fb2d41bb 100644 --- a/state/protocol/util/testing.go +++ b/state/protocol/util/testing.go @@ -3,7 +3,7 @@ package util import ( "testing" - "github.com/dgraph-io/badger/v2" + "github.com/cockroachdb/pebble/v2" "github.com/rs/zerolog" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" @@ -11,14 +11,17 @@ import ( "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/module" "github.com/onflow/flow-go/module/metrics" + mmetrics "github.com/onflow/flow-go/module/metrics" modulemock "github.com/onflow/flow-go/module/mock" "github.com/onflow/flow-go/module/trace" "github.com/onflow/flow-go/state/protocol" pbadger "github.com/onflow/flow-go/state/protocol/badger" "github.com/onflow/flow-go/state/protocol/events" mockprotocol "github.com/onflow/flow-go/state/protocol/mock" + protocol_state "github.com/onflow/flow-go/state/protocol/protocol_state/state" "github.com/onflow/flow-go/storage" - "github.com/onflow/flow-go/storage/util" + "github.com/onflow/flow-go/storage/operation/pebbleimpl" + "github.com/onflow/flow-go/storage/store" "github.com/onflow/flow-go/utils/unittest" ) @@ -50,34 +53,38 @@ func MockSealValidator(sealsDB storage.Seals) module.SealValidator { if len(candidate.Payload.Seals) > 0 { return candidate.Payload.Seals[0] } - last, _ := sealsDB.HighestInFork(candidate.Header.ParentID) + last, _ := sealsDB.HighestInFork(candidate.ParentID) return last }, func(candidate *flow.Block) error { if len(candidate.Payload.Seals) > 0 { return nil } - _, err := sealsDB.HighestInFork(candidate.Header.ParentID) + _, err := sealsDB.HighestInFork(candidate.ParentID) return err }).Maybe() return validator } -func RunWithBootstrapState(t testing.TB, rootSnapshot protocol.Snapshot, f func(*badger.DB, *pbadger.State)) { - unittest.RunWithBadgerDB(t, func(db *badger.DB) { +func RunWithBootstrapState(t testing.TB, rootSnapshot protocol.Snapshot, f func(storage.DB, *pbadger.State)) { + unittest.RunWithPebbleDB(t, func(pdb *pebble.DB) { + lockManager := storage.NewTestingLockManager() + db := pebbleimpl.ToDB(pdb) metrics := metrics.NewNoopCollector() - all := util.StorageLayer(t, db) + all := store.InitAll(metrics, db) state, err := pbadger.Bootstrap( metrics, db, + lockManager, all.Headers, all.Seals, all.Results, all.Blocks, all.QuorumCertificates, - all.Setups, + all.EpochSetups, all.EpochCommits, - all.Statuses, + all.EpochProtocolStateEntries, + all.ProtocolKVStore, all.VersionBeacons, rootSnapshot, ) @@ -86,24 +93,28 @@ func RunWithBootstrapState(t testing.TB, rootSnapshot protocol.Snapshot, f func( }) } -func RunWithFullProtocolState(t testing.TB, rootSnapshot protocol.Snapshot, f func(*badger.DB, *pbadger.ParticipantState)) { - unittest.RunWithBadgerDB(t, func(db *badger.DB) { +func RunWithFullProtocolState(t testing.TB, rootSnapshot protocol.Snapshot, f func(storage.DB, *pbadger.ParticipantState)) { + unittest.RunWithPebbleDB(t, func(pdb *pebble.DB) { + lockManager := storage.NewTestingLockManager() + db := pebbleimpl.ToDB(pdb) metrics := metrics.NewNoopCollector() tracer := trace.NewNoopTracer() log := zerolog.Nop() consumer := events.NewNoop() - all := util.StorageLayer(t, db) + all := store.InitAll(metrics, db) state, err := pbadger.Bootstrap( metrics, db, + lockManager, all.Headers, all.Seals, all.Results, all.Blocks, all.QuorumCertificates, - all.Setups, + all.EpochSetups, all.EpochCommits, - all.Statuses, + all.EpochProtocolStateEntries, + all.ProtocolKVStore, all.VersionBeacons, rootSnapshot, ) @@ -111,29 +122,43 @@ func RunWithFullProtocolState(t testing.TB, rootSnapshot protocol.Snapshot, f fu receiptValidator := MockReceiptValidator() sealValidator := MockSealValidator(all.Seals) mockTimer := MockBlockTimer() - fullState, err := pbadger.NewFullConsensusState(log, tracer, consumer, state, all.Index, all.Payloads, mockTimer, receiptValidator, sealValidator) + fullState, err := pbadger.NewFullConsensusState( + log, + tracer, + consumer, + state, + all.Index, + all.Payloads, + mockTimer, + receiptValidator, + sealValidator, + ) require.NoError(t, err) f(db, fullState) }) } -func RunWithFullProtocolStateAndMetrics(t testing.TB, rootSnapshot protocol.Snapshot, metrics module.ComplianceMetrics, f func(*badger.DB, *pbadger.ParticipantState)) { - unittest.RunWithBadgerDB(t, func(db *badger.DB) { +func RunWithFullProtocolStateAndMetrics(t testing.TB, rootSnapshot protocol.Snapshot, metrics module.ComplianceMetrics, f func(storage.DB, *pbadger.ParticipantState)) { + unittest.RunWithPebbleDB(t, func(pdb *pebble.DB) { + lockManager := storage.NewTestingLockManager() + db := pebbleimpl.ToDB(pdb) tracer := trace.NewNoopTracer() log := zerolog.Nop() consumer := events.NewNoop() - all := util.StorageLayer(t, db) + all := store.InitAll(mmetrics.NewNoopCollector(), db) state, err := pbadger.Bootstrap( metrics, db, + lockManager, all.Headers, all.Seals, all.Results, all.Blocks, all.QuorumCertificates, - all.Setups, + all.EpochSetups, all.EpochCommits, - all.Statuses, + all.EpochProtocolStateEntries, + all.ProtocolKVStore, all.VersionBeacons, rootSnapshot, ) @@ -141,88 +166,129 @@ func RunWithFullProtocolStateAndMetrics(t testing.TB, rootSnapshot protocol.Snap receiptValidator := MockReceiptValidator() sealValidator := MockSealValidator(all.Seals) mockTimer := MockBlockTimer() - fullState, err := pbadger.NewFullConsensusState(log, tracer, consumer, state, all.Index, all.Payloads, mockTimer, receiptValidator, sealValidator) + + fullState, err := pbadger.NewFullConsensusState( + log, + tracer, + consumer, + state, + all.Index, + all.Payloads, + mockTimer, + receiptValidator, + sealValidator, + ) require.NoError(t, err) f(db, fullState) }) } -func RunWithFullProtocolStateAndValidator(t testing.TB, rootSnapshot protocol.Snapshot, validator module.ReceiptValidator, f func(*badger.DB, *pbadger.ParticipantState)) { - unittest.RunWithBadgerDB(t, func(db *badger.DB) { +func RunWithFullProtocolStateAndValidator(t testing.TB, rootSnapshot protocol.Snapshot, validator module.ReceiptValidator, f func(storage.DB, *pbadger.ParticipantState)) { + unittest.RunWithPebbleDB(t, func(pdb *pebble.DB) { + lockManager := storage.NewTestingLockManager() + db := pebbleimpl.ToDB(pdb) metrics := metrics.NewNoopCollector() tracer := trace.NewNoopTracer() log := zerolog.Nop() consumer := events.NewNoop() - all := util.StorageLayer(t, db) + all := store.InitAll(metrics, db) state, err := pbadger.Bootstrap( metrics, db, + lockManager, all.Headers, all.Seals, all.Results, all.Blocks, all.QuorumCertificates, - all.Setups, + all.EpochSetups, all.EpochCommits, - all.Statuses, + all.EpochProtocolStateEntries, + all.ProtocolKVStore, all.VersionBeacons, rootSnapshot, ) require.NoError(t, err) sealValidator := MockSealValidator(all.Seals) mockTimer := MockBlockTimer() - fullState, err := pbadger.NewFullConsensusState(log, tracer, consumer, state, all.Index, all.Payloads, mockTimer, validator, sealValidator) + fullState, err := pbadger.NewFullConsensusState( + log, + tracer, + consumer, + state, + all.Index, + all.Payloads, + mockTimer, + validator, + sealValidator, + ) require.NoError(t, err) f(db, fullState) }) } -func RunWithFollowerProtocolState(t testing.TB, rootSnapshot protocol.Snapshot, f func(*badger.DB, *pbadger.FollowerState)) { - unittest.RunWithBadgerDB(t, func(db *badger.DB) { +func RunWithFollowerProtocolState(t testing.TB, rootSnapshot protocol.Snapshot, f func(storage.DB, *pbadger.FollowerState)) { + unittest.RunWithPebbleDB(t, func(pdb *pebble.DB) { + lockManager := storage.NewTestingLockManager() + db := pebbleimpl.ToDB(pdb) metrics := metrics.NewNoopCollector() tracer := trace.NewNoopTracer() log := zerolog.Nop() consumer := events.NewNoop() - all := util.StorageLayer(t, db) + all := store.InitAll(metrics, db) state, err := pbadger.Bootstrap( metrics, db, + lockManager, all.Headers, all.Seals, all.Results, all.Blocks, all.QuorumCertificates, - all.Setups, + all.EpochSetups, all.EpochCommits, - all.Statuses, + all.EpochProtocolStateEntries, + all.ProtocolKVStore, all.VersionBeacons, rootSnapshot, ) require.NoError(t, err) mockTimer := MockBlockTimer() - followerState, err := pbadger.NewFollowerState(log, tracer, consumer, state, all.Index, all.Payloads, mockTimer) + followerState, err := pbadger.NewFollowerState( + log, + tracer, + consumer, + state, + all.Index, + all.Payloads, + mockTimer, + ) require.NoError(t, err) f(db, followerState) }) } -func RunWithFullProtocolStateAndConsumer(t testing.TB, rootSnapshot protocol.Snapshot, consumer protocol.Consumer, f func(*badger.DB, *pbadger.ParticipantState)) { - unittest.RunWithBadgerDB(t, func(db *badger.DB) { +func RunWithFullProtocolStateAndConsumer(t testing.TB, rootSnapshot protocol.Snapshot, consumer protocol.Consumer, f func(storage.DB, *pbadger.ParticipantState)) { + unittest.RunWithPebbleDB(t, func(pdb *pebble.DB) { + lockManager := storage.NewTestingLockManager() + db := pebbleimpl.ToDB(pdb) metrics := metrics.NewNoopCollector() tracer := trace.NewNoopTracer() log := zerolog.Nop() - all := util.StorageLayer(t, db) + all := store.InitAll(metrics, db) state, err := pbadger.Bootstrap( metrics, db, + lockManager, all.Headers, all.Seals, all.Results, all.Blocks, all.QuorumCertificates, - all.Setups, + all.EpochSetups, all.EpochCommits, - all.Statuses, + all.EpochProtocolStateEntries, + all.ProtocolKVStore, all.VersionBeacons, rootSnapshot, ) @@ -230,28 +296,42 @@ func RunWithFullProtocolStateAndConsumer(t testing.TB, rootSnapshot protocol.Sna receiptValidator := MockReceiptValidator() sealValidator := MockSealValidator(all.Seals) mockTimer := MockBlockTimer() - fullState, err := pbadger.NewFullConsensusState(log, tracer, consumer, state, all.Index, all.Payloads, mockTimer, receiptValidator, sealValidator) + fullState, err := pbadger.NewFullConsensusState( + log, + tracer, + consumer, + state, + all.Index, + all.Payloads, + mockTimer, + receiptValidator, + sealValidator, + ) require.NoError(t, err) f(db, fullState) }) } -func RunWithFullProtocolStateAndMetricsAndConsumer(t testing.TB, rootSnapshot protocol.Snapshot, metrics module.ComplianceMetrics, consumer protocol.Consumer, f func(*badger.DB, *pbadger.ParticipantState)) { - unittest.RunWithBadgerDB(t, func(db *badger.DB) { +func RunWithFullProtocolStateAndMetricsAndConsumer(t testing.TB, rootSnapshot protocol.Snapshot, metrics module.ComplianceMetrics, consumer protocol.Consumer, f func(storage.DB, *pbadger.ParticipantState, protocol.MutableProtocolState)) { + unittest.RunWithPebbleDB(t, func(pdb *pebble.DB) { + lockManager := storage.NewTestingLockManager() + db := pebbleimpl.ToDB(pdb) tracer := trace.NewNoopTracer() log := zerolog.Nop() - all := util.StorageLayer(t, db) + all := store.InitAll(mmetrics.NewNoopCollector(), db) state, err := pbadger.Bootstrap( metrics, db, + lockManager, all.Headers, all.Seals, all.Results, all.Blocks, all.QuorumCertificates, - all.Setups, + all.EpochSetups, all.EpochCommits, - all.Statuses, + all.EpochProtocolStateEntries, + all.ProtocolKVStore, all.VersionBeacons, rootSnapshot, ) @@ -259,37 +339,125 @@ func RunWithFullProtocolStateAndMetricsAndConsumer(t testing.TB, rootSnapshot pr receiptValidator := MockReceiptValidator() sealValidator := MockSealValidator(all.Seals) mockTimer := MockBlockTimer() - fullState, err := pbadger.NewFullConsensusState(log, tracer, consumer, state, all.Index, all.Payloads, mockTimer, receiptValidator, sealValidator) + fullState, err := pbadger.NewFullConsensusState( + log, + tracer, + consumer, + state, + all.Index, + all.Payloads, + mockTimer, + receiptValidator, + sealValidator, + ) require.NoError(t, err) - f(db, fullState) + mutableProtocolState := protocol_state.NewMutableProtocolState( + log, + all.EpochProtocolStateEntries, + all.ProtocolKVStore, + state.Params(), + all.Headers, + all.Results, + all.EpochSetups, + all.EpochCommits, + ) + f(db, fullState, mutableProtocolState) }) } -func RunWithFollowerProtocolStateAndHeaders(t testing.TB, rootSnapshot protocol.Snapshot, f func(*badger.DB, *pbadger.FollowerState, storage.Headers, storage.Index)) { - unittest.RunWithBadgerDB(t, func(db *badger.DB) { +func RunWithFollowerProtocolStateAndHeaders(t testing.TB, rootSnapshot protocol.Snapshot, f func(storage.DB, *pbadger.FollowerState, storage.Headers, storage.Index)) { + unittest.RunWithPebbleDB(t, func(pdb *pebble.DB) { + lockManager := storage.NewTestingLockManager() + db := pebbleimpl.ToDB(pdb) metrics := metrics.NewNoopCollector() tracer := trace.NewNoopTracer() log := zerolog.Nop() consumer := events.NewNoop() - all := util.StorageLayer(t, db) + all := store.InitAll(metrics, db) state, err := pbadger.Bootstrap( metrics, db, + lockManager, all.Headers, all.Seals, all.Results, all.Blocks, all.QuorumCertificates, - all.Setups, + all.EpochSetups, all.EpochCommits, - all.Statuses, + all.EpochProtocolStateEntries, + all.ProtocolKVStore, all.VersionBeacons, rootSnapshot, ) require.NoError(t, err) mockTimer := MockBlockTimer() - followerState, err := pbadger.NewFollowerState(log, tracer, consumer, state, all.Index, all.Payloads, mockTimer) + followerState, err := pbadger.NewFollowerState( + log, + tracer, + consumer, + state, + all.Index, + all.Payloads, + mockTimer, + ) require.NoError(t, err) f(db, followerState, all.Headers, all.Index) }) } + +func RunWithFullProtocolStateAndMutator(t testing.TB, rootSnapshot protocol.Snapshot, f func(storage.DB, *pbadger.ParticipantState, protocol.MutableProtocolState)) { + unittest.RunWithPebbleDB(t, func(pdb *pebble.DB) { + lockManager := storage.NewTestingLockManager() + db := pebbleimpl.ToDB(pdb) + metrics := metrics.NewNoopCollector() + tracer := trace.NewNoopTracer() + log := zerolog.Nop() + consumer := events.NewNoop() + all := store.InitAll(metrics, db) + state, err := pbadger.Bootstrap( + metrics, + db, + lockManager, + all.Headers, + all.Seals, + all.Results, + all.Blocks, + all.QuorumCertificates, + all.EpochSetups, + all.EpochCommits, + all.EpochProtocolStateEntries, + all.ProtocolKVStore, + all.VersionBeacons, + rootSnapshot, + ) + require.NoError(t, err) + receiptValidator := MockReceiptValidator() + sealValidator := MockSealValidator(all.Seals) + mockTimer := MockBlockTimer() + fullState, err := pbadger.NewFullConsensusState( + log, + tracer, + consumer, + state, + all.Index, + all.Payloads, + mockTimer, + receiptValidator, + sealValidator, + ) + require.NoError(t, err) + + mutableProtocolState := protocol_state.NewMutableProtocolState( + log, + all.EpochProtocolStateEntries, + all.ProtocolKVStore, + state.Params(), + all.Headers, + all.Results, + all.EpochSetups, + all.EpochCommits, + ) + f(db, fullState, mutableProtocolState) + }) +} diff --git a/state/protocol/util_test.go b/state/protocol/util_test.go index 7858f5767b7..eeadfe46c75 100644 --- a/state/protocol/util_test.go +++ b/state/protocol/util_test.go @@ -26,7 +26,8 @@ func TestIsSporkRootSnapshot(t *testing.T) { t.Run("other snapshot", func(t *testing.T) { snapshot := unittest.RootSnapshotFixture(unittest.IdentityListFixture(10, unittest.WithAllRoles())) - snapshot.Encodable().Head.Height += 1 // modify head height to break equivalence with spork root block height + blockLen := len(snapshot.Encodable().SealingSegment.Blocks) + snapshot.Encodable().SealingSegment.Blocks[blockLen-1].Block.Height += 1 // modify head height to break equivalence with spork root block height isSporkRoot, err := protocol.IsSporkRootSnapshot(snapshot) require.NoError(t, err) assert.False(t, isSporkRoot) @@ -36,10 +37,10 @@ func TestIsSporkRootSnapshot(t *testing.T) { // TestOrderedSeals tests that protocol.OrderedSeals returns a list of ordered seals for a payload. func TestOrderedSeals(t *testing.T) { t.Run("empty payload", func(t *testing.T) { - payload := flow.EmptyPayload() + payload := flow.NewEmptyPayload() headers := storagemock.NewHeaders(t) - ordered, err := protocol.OrderedSeals(&payload, headers) + ordered, err := protocol.OrderedSeals(payload.Seals, headers) require.NoError(t, err) require.Empty(t, ordered) }) @@ -49,7 +50,7 @@ func TestOrderedSeals(t *testing.T) { payload := unittest.PayloadFixture(unittest.WithSeals(seals...)) headers.On("ByBlockID", mock.Anything).Return(nil, storage.ErrNotFound) - ordered, err := protocol.OrderedSeals(&payload, headers) + ordered, err := protocol.OrderedSeals(payload.Seals, headers) require.ErrorIs(t, err, storage.ErrNotFound) require.Empty(t, ordered) }) @@ -60,7 +61,7 @@ func TestOrderedSeals(t *testing.T) { exception := errors.New("exception") headers.On("ByBlockID", mock.Anything).Return(nil, exception) - ordered, err := protocol.OrderedSeals(&payload, headers) + ordered, err := protocol.OrderedSeals(payload.Seals, headers) require.ErrorIs(t, err, exception) require.Empty(t, ordered) }) @@ -71,22 +72,22 @@ func TestOrderedSeals(t *testing.T) { seals := unittest.Seal.Fixtures(10) for i, seal := range seals { seal.BlockID = blocks[i].ID() - headers.On("ByBlockID", seal.BlockID).Return(blocks[i].Header, nil) + headers.On("ByBlockID", seal.BlockID).Return(blocks[i].ToHeader(), nil) } payload := unittest.PayloadFixture(unittest.WithSeals(seals...)) - ordered, err := protocol.OrderedSeals(&payload, headers) + ordered, err := protocol.OrderedSeals(payload.Seals, headers) require.NoError(t, err) require.Equal(t, seals, ordered) }) t.Run("unordered", func(t *testing.T) { headers := storagemock.NewHeaders(t) - - blocks := unittest.ChainFixtureFrom(10, flow.Genesis(flow.Localnet).Header) + genesisBlock := unittest.Block.Genesis(flow.Localnet) + blocks := unittest.ChainFixtureFrom(10, genesisBlock.ToHeader()) orderedSeals := unittest.Seal.Fixtures(len(blocks)) for i, seal := range orderedSeals { seal.BlockID = blocks[i].ID() - headers.On("ByBlockID", seal.BlockID).Return(blocks[i].Header, nil) + headers.On("ByBlockID", seal.BlockID).Return(blocks[i].ToHeader(), nil) } unorderedSeals := make([]*flow.Seal, len(orderedSeals)) copy(unorderedSeals, orderedSeals) @@ -96,7 +97,7 @@ func TestOrderedSeals(t *testing.T) { }) payload := unittest.PayloadFixture(unittest.WithSeals(unorderedSeals...)) - ordered, err := protocol.OrderedSeals(&payload, headers) + ordered, err := protocol.OrderedSeals(payload.Seals, headers) require.NoError(t, err) require.Equal(t, orderedSeals, ordered) }) diff --git a/state/protocol/validity.go b/state/protocol/validity.go new file mode 100644 index 00000000000..8704a8290b7 --- /dev/null +++ b/state/protocol/validity.go @@ -0,0 +1,220 @@ +package protocol + +import ( + "fmt" + + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/model/flow/factory" + "github.com/onflow/flow-go/model/flow/filter" + "github.com/onflow/flow-go/module/signature" +) + +// IsValidExtendingEpochSetup checks whether an EpochSetup service event being added to the state is valid. +// In addition to intrinsic validity, we also check that it is valid w.r.t. the previous epoch setup event, +// and the current epoch status. +// CAUTION: This function assumes that all inputs besides extendingCommit are already validated. +// Expected errors during normal operations: +// * protocol.InvalidServiceEventError if the input service event is invalid to extend the currently active epoch status +// This is a side-effect-free function. This function only returns protocol.InvalidServiceEventError as errors. +func IsValidExtendingEpochSetup(extendingSetup *flow.EpochSetup, epochState *flow.EpochStateEntry) error { + // Enforce EpochSetup is valid w.r.t to current epoch state + if epochState.NextEpoch != nil { // We should only have a single epoch setup event per epoch. + // true iff EpochSetup event for NEXT epoch was already included before + return NewInvalidServiceEventErrorf("duplicate epoch setup service event: %x", epochState.NextEpoch.SetupID) + } + if extendingSetup.Counter != epochState.EpochCounter()+1 { // The setup event should have the counter increased by one. + return NewInvalidServiceEventErrorf("next epoch setup has invalid counter (%d => %d)", epochState.EpochCounter(), extendingSetup.Counter) + } + if extendingSetup.FirstView != epochState.CurrentEpochFinalView()+1 { // The first view needs to be exactly one greater than the current epoch final view + return NewInvalidServiceEventErrorf( + "next epoch first view must be exactly 1 more than current epoch final view (%d != %d+1)", + extendingSetup.FirstView, + epochState.CurrentEpochFinalView(), + ) + } + + // Enforce the EpochSetup event is syntactically correct + err := IsValidEpochSetup(extendingSetup, true) + if err != nil { + return NewInvalidServiceEventErrorf("invalid epoch setup: %w", err) + } + return nil +} + +// IsValidEpochSetup checks whether an `EpochSetup` event is syntactically correct. The boolean parameter `verifyNetworkAddress` +// controls, whether we want to permit nodes to share a networking address. +// This is a side-effect-free function. Any error return indicates that the EpochSetup event is not compliant with protocol rules. +func IsValidEpochSetup(setup *flow.EpochSetup, verifyNetworkAddress bool) error { + // 1. CHECK: Enforce protocol compliance of Epoch parameters: + // - RandomSource of entropy in Epoch Setup event should the protocol-prescribed length + // - first view must be before final view + if len(setup.RandomSource) != flow.EpochSetupRandomSourceLength { + return fmt.Errorf("seed has incorrect length (%d != %d)", len(setup.RandomSource), flow.EpochSetupRandomSourceLength) + } + if setup.FirstView >= setup.FinalView { + return fmt.Errorf("first view (%d) must be before final view (%d)", setup.FirstView, setup.FinalView) + } + + // 2. CHECK: Enforce protocol compliance active participants: + // (a) each has a unique node ID, + // (b) each has a unique network address (if `verifyNetworkAddress` is true), + // (c) participants are sorted in canonical order. + // Note that the system smart contracts manage the identity table as an unordered set! For the protocol state, we desire a fixed + // ordering to simplify various implementation details, like the DKG. Therefore, we order identities in `flow.EpochSetup` during + // conversion from cadence to Go in the function `convert.ServiceEvent(flow.ChainID, flow.Event)` in package `model/convert` + identLookup := make(map[flow.Identifier]struct{}) + for _, participant := range setup.Participants { // (a) enforce uniqueness of NodeIDs + _, ok := identLookup[participant.NodeID] + if ok { + return fmt.Errorf("duplicate node identifier (%x)", participant.NodeID) + } + identLookup[participant.NodeID] = struct{}{} + } + + if verifyNetworkAddress { // (b) enforce uniqueness of networking address + addrLookup := make(map[string]struct{}) + for _, participant := range setup.Participants { + _, ok := addrLookup[participant.Address] + if ok { + return fmt.Errorf("duplicate node address (%x)", participant.Address) + } + addrLookup[participant.Address] = struct{}{} + } + } + + if !setup.Participants.Sorted(flow.Canonical[flow.IdentitySkeleton]) { // (c) enforce canonical ordering + return fmt.Errorf("participants are not canonically ordered") + } + + // 3. CHECK: Enforce sufficient number of nodes for each role + // IMPORTANT: here we remove all nodes with zero weight, as they are allowed to partake in communication but not in respective node functions + activeParticipants := setup.Participants.Filter(filter.HasInitialWeight[flow.IdentitySkeleton](true)) + activeNodeCountByRole := make(map[flow.Role]uint) + for _, participant := range activeParticipants { + activeNodeCountByRole[participant.Role]++ + } + if activeNodeCountByRole[flow.RoleConsensus] < 1 { + return fmt.Errorf("need at least one consensus node") + } + if activeNodeCountByRole[flow.RoleCollection] < 1 { + return fmt.Errorf("need at least one collection node") + } + if activeNodeCountByRole[flow.RoleExecution] < 1 { + return fmt.Errorf("need at least one execution node") + } + if activeNodeCountByRole[flow.RoleVerification] < 1 { + return fmt.Errorf("need at least one verification node") + } + + // 4. CHECK: Enforce protocol compliance of collector cluster assignment + // (0) there is at least one collector cluster + // (a) assignment only contains nodes with collector role and positive weight + // (b) collectors have unique node IDs + // (c) each collector is assigned exactly to one cluster and is only listed once within that cluster + // (d) cluster contains at least one collector (i.e. is not empty) + // (e) cluster is composed of known nodes + // (f) cluster assignment lists the nodes in canonical ordering + if len(setup.Assignments) == 0 { // enforce (0): at least one cluster + return fmt.Errorf("need at least one collection cluster") + } + // Unpacking the cluster assignments (NodeIDs → IdentitySkeletons) enforces (a) - (f) + _, err := factory.NewClusterList(setup.Assignments, activeParticipants.Filter(filter.HasRole[flow.IdentitySkeleton](flow.RoleCollection))) + if err != nil { + return fmt.Errorf("invalid cluster assignments: %w", err) + } + return nil +} + +// IsValidExtendingEpochCommit checks whether an EpochCommit service event being added to the state is valid. +// In addition to intrinsic validity, we also check that it is valid w.r.t. the previous epoch setup event, and +// the current epoch status. +// CAUTION: This function assumes that all inputs besides extendingCommit are already validated. +// Expected errors during normal operations: +// * protocol.InvalidServiceEventError if the input service event is invalid to extend the currently active epoch +// This is a side-effect-free function. This function only returns protocol.InvalidServiceEventError as errors. +func IsValidExtendingEpochCommit(extendingCommit *flow.EpochCommit, epochState *flow.MinEpochStateEntry, nextEpochSetupEvent *flow.EpochSetup) error { + // The epoch setup event needs to happen before the commit. + if epochState.NextEpoch == nil { + return NewInvalidServiceEventErrorf("missing epoch setup for epoch commit") + } + // Enforce EpochSetup is valid w.r.t to current epoch state + if epochState.NextEpoch.CommitID != flow.ZeroID { // We should only have a single epoch commit event per epoch. + return NewInvalidServiceEventErrorf("duplicate epoch commit service event: %x", epochState.NextEpoch.CommitID) + } + // Enforce the EpochSetup event is syntactically correct and compatible with the respective EpochSetup + err := IsValidEpochCommit(extendingCommit, nextEpochSetupEvent) + if err != nil { + return NewInvalidServiceEventErrorf("invalid epoch commit: %s", err) + } + return nil +} + +// IsValidEpochCommit checks whether an epoch commit service event is intrinsically valid. +// Assumes the input flow.EpochSetup event has already been validated. +// Expected errors during normal operations: +// * protocol.InvalidServiceEventError if the EpochCommit is invalid. +// This is a side-effect-free function. This function only returns protocol.InvalidServiceEventError as errors. +func IsValidEpochCommit(commit *flow.EpochCommit, setup *flow.EpochSetup) error { + if len(setup.Assignments) != len(commit.ClusterQCs) { + return NewInvalidServiceEventErrorf("number of clusters (%d) does not number of QCs (%d)", len(setup.Assignments), len(commit.ClusterQCs)) + } + + if commit.Counter != setup.Counter { + return NewInvalidServiceEventErrorf("inconsistent epoch counter between commit (%d) and setup (%d) events in same epoch", commit.Counter, setup.Counter) + } + + // make sure we have a Random Beacon group key: + if commit.DKGGroupKey == nil { + return NewInvalidServiceEventErrorf("missing DKG public group key") + } + + // enforce invariant: len(DKGParticipantKeys) == len(DKGIndexMap) + n := len(commit.DKGIndexMap) // size of the DKG committee + if len(commit.DKGParticipantKeys) != n { + return NewInvalidServiceEventErrorf("number of %d Random Beacon key shares is inconsistent with number of DKG participatns (len=%d)", len(commit.DKGParticipantKeys), len(commit.DKGIndexMap)) + } + + // enforce invariant: DKGIndexMap values form the set {0, 1, ..., n-1} where n=len(DKGParticipantKeys) + encounteredIndex := make([]bool, n) + for _, index := range commit.DKGIndexMap { + if index < 0 || index >= n { + return NewInvalidServiceEventErrorf("index %d is outside allowed range [0,n-1] for a DKG committee of size n=%d", index, n) + } + if encounteredIndex[index] { + return NewInvalidServiceEventErrorf("duplicated DKG index %d", index) + } + encounteredIndex[index] = true + } + // conclusion: there are n unique values in `DKGIndexMap`, each in the interval [0,n-1]. Hence, the values in DKGIndexMap form set {0, 1, ..., n-1}. + numberOfRandomBeaconParticipants := uint(0) + for _, identity := range setup.Participants.Filter(filter.IsConsensusCommitteeMember) { + if _, found := commit.DKGIndexMap[identity.NodeID]; found { + numberOfRandomBeaconParticipants++ + } + } + // Important SANITY CHECK: reject configurations where too few consensus nodes have valid random beacon key shares to + // reliably reach the required threshold of signers. Specifically, we enforce RandomBeaconSafetyThreshold ≤ |𝒞 ∩ 𝒟|. + // - 𝒞 is the set of all consensus committee members + // - 𝒟 is the set of all DKG participants + // - ℛ is the subset of the consensus committee (ℛ ⊆ 𝒞): it contains consensus nodes (and only those) with a + // private Random Beacon key share matching the respective public key share in the `EpochCommit` event. + // + // This is only a sanity check: on the protocol level, we only know which nodes (set 𝒟) could participate in the DKG, + // but not which consensus nodes obtained a *valid* random beacon key share. In other words, we only have access to the + // superset 𝒟 ∩ 𝒞 ⊇ ℛ here. If 𝒟 ∩ 𝒞 is already too small, we are certain that too few consensus nodes have valid random + // beacon keys (RandomBeaconSafetyThreshold > |𝒞 ∩ 𝒟| entails RandomBeaconSafetyThreshold > |ℛ|) and we reject the + // Epoch configuration. However, enough nodes in the superset |𝒞 ∩ 𝒟| does not guarantee that |ℛ| is above the critical + // threshold (e.g. too many nodes |𝒞 ∩ 𝒟| could have failed the DKG and therefore not be in ℛ). + // + // This is different than the check in the DKG smart contract, where the value of |ℛ| is known and compared + // to the threshold. Unlike the DKG contract, the protocol state does not have access to the value of |ℛ| from a past + // key generation (decentralized or not). + // + // [2] https://www.notion.so/flowfoundation/DKG-contract-success-threshold-86c6bf2b92034855b3c185d7616eb6f1?pvs=4 + if RandomBeaconSafetyThreshold(uint(n)) > numberOfRandomBeaconParticipants { + return NewInvalidServiceEventErrorf("not enough random beacon participants required %d, got %d", + signature.RandomBeaconThreshold(n), numberOfRandomBeaconParticipants) + } + + return nil +} diff --git a/state/protocol/validity_test.go b/state/protocol/validity_test.go new file mode 100644 index 00000000000..363038f53fd --- /dev/null +++ b/state/protocol/validity_test.go @@ -0,0 +1,306 @@ +package protocol_test + +import ( + "testing" + + "github.com/onflow/crypto" + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/model/flow/filter" + "github.com/onflow/flow-go/state/protocol" + "github.com/onflow/flow-go/utils/unittest" +) + +var participants = unittest.IdentityListFixture(20, unittest.WithAllRoles()) + +func TestEpochSetupValidity(t *testing.T) { + t.Run("invalid first/final view", func(t *testing.T) { + _, result, _ := unittest.BootstrapFixture(participants) + setup := result.ServiceEvents[0].Event.(*flow.EpochSetup) + // set an invalid final view for the first epoch + setup.FinalView = setup.FirstView + + err := protocol.IsValidEpochSetup(setup, true) + require.Error(t, err) + }) + + t.Run("non-canonically ordered identities", func(t *testing.T) { + _, result, _ := unittest.BootstrapFixture(participants) + setup := result.ServiceEvents[0].Event.(*flow.EpochSetup) + // randomly shuffle the identities so they are not canonically ordered + var err error + setup.Participants, err = setup.Participants.Shuffle() + require.NoError(t, err) + err = protocol.IsValidEpochSetup(setup, true) + require.Error(t, err) + }) + + t.Run("invalid cluster assignments", func(t *testing.T) { + _, result, _ := unittest.BootstrapFixture(participants) + setup := result.ServiceEvents[0].Event.(*flow.EpochSetup) + // create an invalid cluster assignment (node appears in multiple clusters) + collector := participants.Filter(filter.HasRole[flow.Identity](flow.RoleCollection))[0] + setup.Assignments = append(setup.Assignments, []flow.Identifier{collector.NodeID}) + + err := protocol.IsValidEpochSetup(setup, true) + require.Error(t, err) + }) + + t.Run("short seed", func(t *testing.T) { + _, result, _ := unittest.BootstrapFixture(participants) + setup := result.ServiceEvents[0].Event.(*flow.EpochSetup) + setup.RandomSource = unittest.SeedFixture(crypto.KeyGenSeedMinLen - 1) + + err := protocol.IsValidEpochSetup(setup, true) + require.Error(t, err) + }) + + t.Run("node role missing", func(t *testing.T) { + _, result, _ := unittest.BootstrapFixture(participants) + setup := result.ServiceEvents[0].Event.(*flow.EpochSetup) + allWithoutExecutionNodes := setup.Participants.Filter(func(identitySkeleton *flow.IdentitySkeleton) bool { + return identitySkeleton.Role != flow.RoleExecution + }) + setup.Participants = allWithoutExecutionNodes + + err := protocol.IsValidEpochSetup(setup, true) + require.Error(t, err) + }) + + t.Run("network addresses are not unique", func(t *testing.T) { + _, result, _ := unittest.BootstrapFixture(participants) + setup := result.ServiceEvents[0].Event.(*flow.EpochSetup) + setup.Participants[0].Address = setup.Participants[1].Address + + err := protocol.IsValidEpochSetup(setup, true) + require.Error(t, err) + }) + + t.Run("no cluster assignment", func(t *testing.T) { + _, result, _ := unittest.BootstrapFixture(participants) + setup := result.ServiceEvents[0].Event.(*flow.EpochSetup) + setup.Assignments = flow.AssignmentList{} + + err := protocol.IsValidEpochSetup(setup, true) + require.Error(t, err) + }) +} + +func TestBootstrapInvalidEpochCommit(t *testing.T) { + t.Run("inconsistent counter", func(t *testing.T) { + _, result, _ := unittest.BootstrapFixture(participants) + setup := result.ServiceEvents[0].Event.(*flow.EpochSetup) + commit := result.ServiceEvents[1].Event.(*flow.EpochCommit) + // use a different counter for the commit + commit.Counter = setup.Counter + 1 + + err := protocol.IsValidEpochCommit(commit, setup) + require.Error(t, err) + }) + + t.Run("inconsistent cluster QCs", func(t *testing.T) { + _, result, _ := unittest.BootstrapFixture(participants) + setup := result.ServiceEvents[0].Event.(*flow.EpochSetup) + commit := result.ServiceEvents[1].Event.(*flow.EpochCommit) + // add an extra QC to commit + extraQC := unittest.QuorumCertificateWithSignerIDsFixture() + commit.ClusterQCs = append(commit.ClusterQCs, flow.ClusterQCVoteDataFromQC(extraQC)) + + err := protocol.IsValidEpochCommit(commit, setup) + require.Error(t, err) + }) + + t.Run("missing dkg group key", func(t *testing.T) { + _, result, _ := unittest.BootstrapFixture(participants) + setup := result.ServiceEvents[0].Event.(*flow.EpochSetup) + commit := result.ServiceEvents[1].Event.(*flow.EpochCommit) + commit.DKGGroupKey = nil + + err := protocol.IsValidEpochCommit(commit, setup) + require.Error(t, err) + }) + + t.Run("inconsistent DKG participants", func(t *testing.T) { + _, result, _ := unittest.BootstrapFixture(participants) + setup := result.ServiceEvents[0].Event.(*flow.EpochSetup) + commit := result.ServiceEvents[1].Event.(*flow.EpochCommit) + // remove a DKG participant key, this will lead to a case where we have more DKG participants than resulting keys. + commit.DKGParticipantKeys = commit.DKGParticipantKeys[1:] + for nodeID, index := range commit.DKGIndexMap { + if index == 0 { + delete(commit.DKGIndexMap, nodeID) + break + } + } + + err := protocol.IsValidEpochCommit(commit, setup) + require.Error(t, err) + }) + + t.Run("inconsistent DKG index map", func(t *testing.T) { + _, result, _ := unittest.BootstrapFixture(participants) + setup := result.ServiceEvents[0].Event.(*flow.EpochSetup) + commit := result.ServiceEvents[1].Event.(*flow.EpochCommit) + // add an extra DKG participant key, this will lead to a case where size of index map is different from the number of keys. + commit.DKGParticipantKeys = append(commit.DKGParticipantKeys, unittest.KeyFixture(crypto.BLSBLS12381).PublicKey()) + + err := protocol.IsValidEpochCommit(commit, setup) + require.Error(t, err) + }) + + t.Run("DKG index map contains negative index", func(t *testing.T) { + _, result, _ := unittest.BootstrapFixture(participants) + setup := result.ServiceEvents[0].Event.(*flow.EpochSetup) + commit := result.ServiceEvents[1].Event.(*flow.EpochCommit) + // replace entity in the index map so the size matches but with negative index. + nodeID := setup.Participants.Filter(filter.IsConsensusCommitteeMember)[0].NodeID + commit.DKGIndexMap[nodeID] = -1 + + err := protocol.IsValidEpochCommit(commit, setup) + require.Error(t, err) + }) + + t.Run("DKG indexes are not consecutive", func(t *testing.T) { + _, result, _ := unittest.BootstrapFixture(participants) + setup := result.ServiceEvents[0].Event.(*flow.EpochSetup) + commit := result.ServiceEvents[1].Event.(*flow.EpochCommit) + nodeID := setup.Participants.Filter(filter.IsConsensusCommitteeMember)[0].NodeID + commit.DKGIndexMap[nodeID] = len(commit.DKGParticipantKeys) // change index so it's out of bound and not consecutive + + err := protocol.IsValidEpochCommit(commit, setup) + require.Error(t, err) + }) + + t.Run("DKG indexes are duplicated", func(t *testing.T) { + _, result, _ := unittest.BootstrapFixture(participants) + setup := result.ServiceEvents[0].Event.(*flow.EpochSetup) + commit := result.ServiceEvents[1].Event.(*flow.EpochCommit) + // replace entity in the index map so the size matches but with negative index. + nodeID := setup.Participants.Filter(filter.IsConsensusCommitteeMember)[0].NodeID + otherNodeID := setup.Participants.Filter(filter.IsConsensusCommitteeMember)[1].NodeID + commit.DKGIndexMap[nodeID] = commit.DKGIndexMap[otherNodeID] // change index so it's out of bound and not consecutive + + err := protocol.IsValidEpochCommit(commit, setup) + require.Error(t, err) + }) + + t.Run("random beacon safety threshold not met", func(t *testing.T) { + _, result, _ := unittest.BootstrapFixture(participants) + setup := result.ServiceEvents[0].Event.(*flow.EpochSetup) + commit := result.ServiceEvents[1].Event.(*flow.EpochCommit) + requiredThreshold := protocol.RandomBeaconSafetyThreshold(uint(len(commit.DKGIndexMap))) + require.Greater(t, requiredThreshold, uint(0), "threshold has to be at least 1, otherwise the test is invalid") + // sample one less than the required threshold, so the threshold is not met + sampled, err := setup.Participants.Filter(filter.IsConsensusCommitteeMember).Sample(requiredThreshold - 1) + require.NoError(t, err) + setup.Participants = sampled + + err = protocol.IsValidEpochCommit(commit, setup) + require.Error(t, err) + }) +} + +// TestIsValidExtendingEpochSetup tests that implementation enforces the following protocol rules in case they are violated: +// (a) We should only have a single epoch setup event per epoch. +// (b) The setup event should have the counter increased by one +// (c) The first view needs to be exactly one greater than the current epoch final view +// additionally we require other conditions, but they are tested by separate test `TestEpochSetupValidity`. +func TestIsValidExtendingEpochSetup(t *testing.T) { + t.Run("happy path", func(t *testing.T) { + protocolState := unittest.EpochStateFixture().EpochStateEntry + currentEpochSetup := protocolState.CurrentEpochSetup + extendingSetup := unittest.EpochSetupFixture( + unittest.WithFirstView(currentEpochSetup.FinalView+1), + unittest.WithFinalView(currentEpochSetup.FinalView+1000), + unittest.SetupWithCounter(currentEpochSetup.Counter+1), + unittest.WithParticipants(participants.ToSkeleton()), + ) + err := protocol.IsValidExtendingEpochSetup(extendingSetup, protocolState) + require.NoError(t, err) + }) + t.Run("(a) We should only have a single epoch setup event per epoch.", func(t *testing.T) { + protocolState := unittest.EpochStateFixture(unittest.WithNextEpochProtocolState()).EpochStateEntry + currentEpochSetup := protocolState.CurrentEpochSetup + extendingSetup := unittest.EpochSetupFixture( + unittest.WithFirstView(currentEpochSetup.FinalView+1), + unittest.WithFinalView(currentEpochSetup.FinalView+1000), + unittest.SetupWithCounter(currentEpochSetup.Counter+1), + unittest.WithParticipants(participants.ToSkeleton()), + ) + err := protocol.IsValidExtendingEpochSetup(extendingSetup, protocolState) + require.Error(t, err) + }) + t.Run("(b) The setup event should have the counter increased by one", func(t *testing.T) { + protocolState := unittest.EpochStateFixture().EpochStateEntry + currentEpochSetup := protocolState.CurrentEpochSetup + extendingSetup := unittest.EpochSetupFixture( + unittest.WithFirstView(currentEpochSetup.FinalView+1), + unittest.WithFinalView(currentEpochSetup.FinalView+1000), + unittest.SetupWithCounter(currentEpochSetup.Counter+2), + unittest.WithParticipants(participants.ToSkeleton()), + ) + err := protocol.IsValidExtendingEpochSetup(extendingSetup, protocolState) + require.Error(t, err) + }) + t.Run("(c) The first view needs to be exactly one greater than the current epoch final view", func(t *testing.T) { + protocolState := unittest.EpochStateFixture().EpochStateEntry + currentEpochSetup := protocolState.CurrentEpochSetup + extendingSetup := unittest.EpochSetupFixture( + unittest.WithFirstView(currentEpochSetup.FinalView+2), + unittest.WithFinalView(currentEpochSetup.FinalView+1000), + unittest.SetupWithCounter(currentEpochSetup.Counter+1), + unittest.WithParticipants(participants.ToSkeleton()), + ) + err := protocol.IsValidExtendingEpochSetup(extendingSetup, protocolState) + require.Error(t, err) + }) +} + +// TestIsValidExtendingEpochCommit tests that implementation enforces the following protocol rules in case they are violated: +// (a) The epoch setup event needs to happen before the commit. +// (b) We should only have a single epoch commit event per epoch. +// additionally we require other conditions, but they are tested by separate test `TestEpochCommitValidity`. +func TestIsValidExtendingEpochCommit(t *testing.T) { + t.Run("happy path", func(t *testing.T) { + protocolState := unittest.EpochStateFixture(unittest.WithNextEpochProtocolState(), func(entry *flow.RichEpochStateEntry) { + entry.NextEpochCommit = nil + entry.NextEpoch.CommitID = flow.ZeroID + }) + + nextEpochSetup := protocolState.NextEpochSetup + extendingSetup := unittest.EpochCommitFixture( + unittest.CommitWithCounter(nextEpochSetup.Counter), + unittest.WithDKGFromParticipants(nextEpochSetup.Participants), + ) + err := protocol.IsValidExtendingEpochCommit(extendingSetup, protocolState.MinEpochStateEntry, nextEpochSetup) + require.NoError(t, err) + }) + t.Run("(a) The epoch setup event needs to happen before the commit", func(t *testing.T) { + protocolState := unittest.EpochStateFixture() + currentEpochSetup := protocolState.CurrentEpochSetup + nextEpochSetup := unittest.EpochSetupFixture( + unittest.WithFirstView(currentEpochSetup.FinalView+1), + unittest.WithFinalView(currentEpochSetup.FinalView+1000), + unittest.SetupWithCounter(currentEpochSetup.Counter+1), + unittest.WithParticipants(participants.ToSkeleton()), + ) + extendingSetup := unittest.EpochCommitFixture( + unittest.CommitWithCounter(nextEpochSetup.Counter), + unittest.WithDKGFromParticipants(nextEpochSetup.Participants), + ) + err := protocol.IsValidExtendingEpochCommit(extendingSetup, protocolState.MinEpochStateEntry, nextEpochSetup) + require.Error(t, err) + }) + t.Run("We should only have a single epoch commit event per epoch", func(t *testing.T) { + protocolState := unittest.EpochStateFixture(unittest.WithNextEpochProtocolState()) + + nextEpochSetup := protocolState.NextEpochSetup + extendingSetup := unittest.EpochCommitFixture( + unittest.CommitWithCounter(nextEpochSetup.Counter), + unittest.WithDKGFromParticipants(nextEpochSetup.Participants), + ) + err := protocol.IsValidExtendingEpochCommit(extendingSetup, protocolState.MinEpochStateEntry, nextEpochSetup) + require.Error(t, err) + }) +} diff --git a/storage/all.go b/storage/all.go index eb2c9eb0328..f665dfbab3a 100644 --- a/storage/all.go +++ b/storage/all.go @@ -2,23 +2,26 @@ package storage // All includes all the storage modules type All struct { - Headers Headers - Guarantees Guarantees - Seals Seals - Index Index - Payloads Payloads - Blocks Blocks - QuorumCertificates QuorumCertificates - Setups EpochSetups - EpochCommits EpochCommits - Statuses EpochStatuses - Results ExecutionResults - Receipts ExecutionReceipts - ChunkDataPacks ChunkDataPacks - Commits Commits - Transactions Transactions - TransactionResults TransactionResults - Collections Collections - Events Events - VersionBeacons VersionBeacons + Headers Headers + Guarantees Guarantees + Seals Seals + Index Index + Payloads Payloads + Blocks Blocks + QuorumCertificates QuorumCertificates + Setups EpochSetups + EpochCommits EpochCommits + ChunkDataPacks ChunkDataPacks + Transactions Transactions + Collections Collections + EpochProtocolStateEntries EpochProtocolStateEntries + ProtocolKVStore ProtocolKVStore + VersionBeacons VersionBeacons + RegisterIndex RegisterIndex + + // These results are for reading and storing the result data from block payload + // EN uses a different results module to store their own results + // and receipts (see the Execution struct below) + Results ExecutionResults + Receipts ExecutionReceipts } diff --git a/storage/approvals.go b/storage/approvals.go index bfd77dadb47..304ad3f3b04 100644 --- a/storage/approvals.go +++ b/storage/approvals.go @@ -1,20 +1,48 @@ package storage import ( + "github.com/jordanschalm/lockctx" + "github.com/onflow/flow-go/model/flow" ) +// ResultApprovals implements persistent storage for result approvals. +// Implementations of this interface must be concurrency safe. +// +// CAUTION suitable only for _Verification Nodes_ for persisting their _own_ approvals! +// - In general, the Flow protocol requires multiple approvals for the same chunk from different +// verification nodes. In other words, there are multiple different approvals for the same chunk. +// - Internally, ResultApprovals populates an index from Executed Chunk ➜ ResultApproval. This is +// *only safe* for Verification Nodes when tracking their own approvals (for the same ExecutionResult, +// a Verifier will always produce the same approval) type ResultApprovals interface { - // Store stores a ResultApproval - Store(result *flow.ResultApproval) error - - // Index indexes a ResultApproval by result ID and chunk index - Index(resultID flow.Identifier, chunkIndex uint64, approvalID flow.Identifier) error + // StoreMyApproval returns a functor, whose execution + // - will store the given ResultApproval + // - and index it by result ID and chunk index. + // - requires storage.LockIndexResultApproval lock to be held by the caller + // The functor's expected error returns during normal operation are: + // - `storage.ErrDataMismatch` if a *different* approval for the same key pair (ExecutionResultID, chunk index) is already indexed + // + // CAUTION: the Flow protocol requires multiple approvals for the same chunk from different verification + // nodes. In other words, there are multiple different approvals for the same chunk. Therefore, the index + // Executed Chunk ➜ ResultApproval ID (populated here) is *only safe* to be used by Verification Nodes + // for tracking their own approvals. + // + // For the same ExecutionResult, a Verifier will always produce the same approval. Therefore, this operation + // is idempotent, i.e. repeated calls with the *same inputs* are equivalent to just calling the method once; + // still the method succeeds on each call. However, when attempting to index *different* ResultApproval IDs + // for the same key (resultID, chunkIndex) this method returns an exception, as this should never happen for + // a correct Verification Node indexing its own approvals. + // It returns a functor so that some computation (such as computing approval ID) can be done + // before acquiring the lock. + StoreMyApproval(approval *flow.ResultApproval) func(lctx lockctx.Proof) error - // ByID retrieves a ResultApproval by its ID + // ByID retrieves a ResultApproval by its ID. + // Returns [storage.ErrNotFound] if no Approval with the given ID has been stored. ByID(approvalID flow.Identifier) (*flow.ResultApproval, error) - // ByChunk retrieves a ResultApproval by result ID and chunk index + // ByChunk retrieves a ResultApproval by result ID and chunk index. + // Returns [storage.ErrNotFound] if no Approval for the given key (resultID, chunkIndex) has been stored. ByChunk(resultID flow.Identifier, chunkIndex uint64) (*flow.ResultApproval, error) } diff --git a/storage/badger/all.go b/storage/badger/all.go index 58bc45e6848..4a8e4dfb3c2 100644 --- a/storage/badger/all.go +++ b/storage/badger/all.go @@ -5,49 +5,47 @@ import ( "github.com/onflow/flow-go/module" "github.com/onflow/flow-go/storage" + "github.com/onflow/flow-go/storage/operation/badgerimpl" + "github.com/onflow/flow-go/storage/store" ) func InitAll(metrics module.CacheMetrics, db *badger.DB) *storage.All { - headers := NewHeaders(metrics, db) - guarantees := NewGuarantees(metrics, db, DefaultCacheSize) - seals := NewSeals(metrics, db) - index := NewIndex(metrics, db) - results := NewExecutionResults(metrics, db) - receipts := NewExecutionReceipts(metrics, db, results, DefaultCacheSize) - payloads := NewPayloads(db, index, guarantees, seals, receipts, results) - blocks := NewBlocks(db, headers, payloads) - qcs := NewQuorumCertificates(metrics, db, DefaultCacheSize) - setups := NewEpochSetups(metrics, db) - epochCommits := NewEpochCommits(metrics, db) - statuses := NewEpochStatuses(metrics, db) - versionBeacons := NewVersionBeacons(db) + sdb := badgerimpl.ToDB(db) + headers := store.NewHeaders(metrics, sdb) + guarantees := store.NewGuarantees(metrics, sdb, DefaultCacheSize, DefaultCacheSize) + seals := store.NewSeals(metrics, sdb) + index := store.NewIndex(metrics, sdb) + results := store.NewExecutionResults(metrics, sdb) + receipts := store.NewExecutionReceipts(metrics, sdb, results, DefaultCacheSize) + payloads := store.NewPayloads(sdb, index, guarantees, seals, receipts, results) + blocks := store.NewBlocks(sdb, headers, payloads) + qcs := store.NewQuorumCertificates(metrics, sdb, DefaultCacheSize) + setups := store.NewEpochSetups(metrics, sdb) + epochCommits := store.NewEpochCommits(metrics, sdb) + epochProtocolStateEntries := store.NewEpochProtocolStateEntries(metrics, setups, epochCommits, sdb, + store.DefaultEpochProtocolStateCacheSize, store.DefaultProtocolStateIndexCacheSize) + protocolKVStore := store.NewProtocolKVStore(metrics, sdb, store.DefaultProtocolKVStoreCacheSize, store.DefaultProtocolKVStoreByBlockIDCacheSize) + versionBeacons := store.NewVersionBeacons(sdb) - commits := NewCommits(metrics, db) - transactions := NewTransactions(metrics, db) - transactionResults := NewTransactionResults(metrics, db, 10000) - collections := NewCollections(db, transactions) - events := NewEvents(metrics, db) - chunkDataPacks := NewChunkDataPacks(metrics, db, collections, 1000) + transactions := store.NewTransactions(metrics, sdb) + collections := store.NewCollections(sdb, transactions) return &storage.All{ - Headers: headers, - Guarantees: guarantees, - Seals: seals, - Index: index, - Payloads: payloads, - Blocks: blocks, - QuorumCertificates: qcs, - Setups: setups, - EpochCommits: epochCommits, - Statuses: statuses, - VersionBeacons: versionBeacons, - Results: results, - Receipts: receipts, - ChunkDataPacks: chunkDataPacks, - Commits: commits, - Transactions: transactions, - TransactionResults: transactionResults, - Collections: collections, - Events: events, + Headers: headers, + Guarantees: guarantees, + Seals: seals, + Index: index, + Payloads: payloads, + Blocks: blocks, + QuorumCertificates: qcs, + Setups: setups, + EpochCommits: epochCommits, + EpochProtocolStateEntries: epochProtocolStateEntries, + ProtocolKVStore: protocolKVStore, + VersionBeacons: versionBeacons, + Results: results, + Receipts: receipts, + Transactions: transactions, + Collections: collections, } } diff --git a/storage/badger/approvals.go b/storage/badger/approvals.go deleted file mode 100644 index 5c33ef123f3..00000000000 --- a/storage/badger/approvals.go +++ /dev/null @@ -1,138 +0,0 @@ -package badger - -import ( - "errors" - "fmt" - - "github.com/dgraph-io/badger/v2" - - "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/module" - "github.com/onflow/flow-go/module/metrics" - "github.com/onflow/flow-go/storage" - "github.com/onflow/flow-go/storage/badger/operation" - "github.com/onflow/flow-go/storage/badger/transaction" -) - -// ResultApprovals implements persistent storage for result approvals. -type ResultApprovals struct { - db *badger.DB - cache *Cache -} - -func NewResultApprovals(collector module.CacheMetrics, db *badger.DB) *ResultApprovals { - - store := func(key interface{}, val interface{}) func(*transaction.Tx) error { - approval := val.(*flow.ResultApproval) - return transaction.WithTx(operation.SkipDuplicates(operation.InsertResultApproval(approval))) - } - - retrieve := func(key interface{}) func(tx *badger.Txn) (interface{}, error) { - approvalID := key.(flow.Identifier) - var approval flow.ResultApproval - return func(tx *badger.Txn) (interface{}, error) { - err := operation.RetrieveResultApproval(approvalID, &approval)(tx) - return &approval, err - } - } - - res := &ResultApprovals{ - db: db, - cache: newCache(collector, metrics.ResourceResultApprovals, - withLimit(flow.DefaultTransactionExpiry+100), - withStore(store), - withRetrieve(retrieve)), - } - - return res -} - -func (r *ResultApprovals) store(approval *flow.ResultApproval) func(*transaction.Tx) error { - return r.cache.PutTx(approval.ID(), approval) -} - -func (r *ResultApprovals) byID(approvalID flow.Identifier) func(*badger.Txn) (*flow.ResultApproval, error) { - return func(tx *badger.Txn) (*flow.ResultApproval, error) { - val, err := r.cache.Get(approvalID)(tx) - if err != nil { - return nil, err - } - return val.(*flow.ResultApproval), nil - } -} - -func (r *ResultApprovals) byChunk(resultID flow.Identifier, chunkIndex uint64) func(*badger.Txn) (*flow.ResultApproval, error) { - return func(tx *badger.Txn) (*flow.ResultApproval, error) { - var approvalID flow.Identifier - err := operation.LookupResultApproval(resultID, chunkIndex, &approvalID)(tx) - if err != nil { - return nil, fmt.Errorf("could not lookup result approval ID: %w", err) - } - return r.byID(approvalID)(tx) - } -} - -func (r *ResultApprovals) index(resultID flow.Identifier, chunkIndex uint64, approvalID flow.Identifier) func(*badger.Txn) error { - return func(tx *badger.Txn) error { - err := operation.IndexResultApproval(resultID, chunkIndex, approvalID)(tx) - if err == nil { - return nil - } - - if !errors.Is(err, storage.ErrAlreadyExists) { - return err - } - - // When trying to index an approval for a result, and there is already - // an approval for the result, double check if the indexed approval is - // the same. - // We don't allow indexing multiple approvals per chunk because the - // store is only used within Verification nodes, and it is impossible - // for a Verification node to compute different approvals for the same - // chunk. - var storedApprovalID flow.Identifier - err = operation.LookupResultApproval(resultID, chunkIndex, &storedApprovalID)(tx) - if err != nil { - return fmt.Errorf("there is an approval stored already, but cannot retrieve it: %w", err) - } - - if storedApprovalID != approvalID { - return fmt.Errorf("attempting to store conflicting approval (result: %v, chunk index: %d): storing: %v, stored: %v. %w", - resultID, chunkIndex, approvalID, storedApprovalID, storage.ErrDataMismatch) - } - - return nil - } -} - -// Store stores a ResultApproval -func (r *ResultApprovals) Store(approval *flow.ResultApproval) error { - return operation.RetryOnConflictTx(r.db, transaction.Update, r.store(approval)) -} - -// Index indexes a ResultApproval by chunk (ResultID + chunk index). -// operation is idempotent (repeated calls with the same value are equivalent to -// just calling the method once; still the method succeeds on each call). -func (r *ResultApprovals) Index(resultID flow.Identifier, chunkIndex uint64, approvalID flow.Identifier) error { - err := operation.RetryOnConflict(r.db.Update, r.index(resultID, chunkIndex, approvalID)) - if err != nil { - return fmt.Errorf("could not index result approval: %w", err) - } - return nil -} - -// ByID retrieves a ResultApproval by its ID -func (r *ResultApprovals) ByID(approvalID flow.Identifier) (*flow.ResultApproval, error) { - tx := r.db.NewTransaction(false) - defer tx.Discard() - return r.byID(approvalID)(tx) -} - -// ByChunk retrieves a ResultApproval by result ID and chunk index. The -// ResultApprovals store is only used within a verification node, where it is -// assumed that there is never more than one approval per chunk. -func (r *ResultApprovals) ByChunk(resultID flow.Identifier, chunkIndex uint64) (*flow.ResultApproval, error) { - tx := r.db.NewTransaction(false) - defer tx.Discard() - return r.byChunk(resultID, chunkIndex)(tx) -} diff --git a/storage/badger/approvals_test.go b/storage/badger/approvals_test.go deleted file mode 100644 index 1b13a49ae59..00000000000 --- a/storage/badger/approvals_test.go +++ /dev/null @@ -1,81 +0,0 @@ -package badger_test - -import ( - "errors" - "testing" - - "github.com/dgraph-io/badger/v2" - "github.com/stretchr/testify/require" - - "github.com/onflow/flow-go/module/metrics" - "github.com/onflow/flow-go/storage" - bstorage "github.com/onflow/flow-go/storage/badger" - "github.com/onflow/flow-go/utils/unittest" -) - -func TestApprovalStoreAndRetrieve(t *testing.T) { - unittest.RunWithBadgerDB(t, func(db *badger.DB) { - metrics := metrics.NewNoopCollector() - store := bstorage.NewResultApprovals(metrics, db) - - approval := unittest.ResultApprovalFixture() - err := store.Store(approval) - require.NoError(t, err) - - err = store.Index(approval.Body.ExecutionResultID, approval.Body.ChunkIndex, approval.ID()) - require.NoError(t, err) - - byID, err := store.ByID(approval.ID()) - require.NoError(t, err) - require.Equal(t, approval, byID) - - byChunk, err := store.ByChunk(approval.Body.ExecutionResultID, approval.Body.ChunkIndex) - require.NoError(t, err) - require.Equal(t, approval, byChunk) - }) -} - -func TestApprovalStoreTwice(t *testing.T) { - unittest.RunWithBadgerDB(t, func(db *badger.DB) { - metrics := metrics.NewNoopCollector() - store := bstorage.NewResultApprovals(metrics, db) - - approval := unittest.ResultApprovalFixture() - err := store.Store(approval) - require.NoError(t, err) - - err = store.Index(approval.Body.ExecutionResultID, approval.Body.ChunkIndex, approval.ID()) - require.NoError(t, err) - - err = store.Store(approval) - require.NoError(t, err) - - err = store.Index(approval.Body.ExecutionResultID, approval.Body.ChunkIndex, approval.ID()) - require.NoError(t, err) - }) -} - -func TestApprovalStoreTwoDifferentApprovalsShouldFail(t *testing.T) { - unittest.RunWithBadgerDB(t, func(db *badger.DB) { - metrics := metrics.NewNoopCollector() - store := bstorage.NewResultApprovals(metrics, db) - - approval1 := unittest.ResultApprovalFixture() - approval2 := unittest.ResultApprovalFixture() - - err := store.Store(approval1) - require.NoError(t, err) - - err = store.Index(approval1.Body.ExecutionResultID, approval1.Body.ChunkIndex, approval1.ID()) - require.NoError(t, err) - - // we can store a different approval, but we can't index a different - // approval for the same chunk. - err = store.Store(approval2) - require.NoError(t, err) - - err = store.Index(approval1.Body.ExecutionResultID, approval1.Body.ChunkIndex, approval2.ID()) - require.Error(t, err) - require.True(t, errors.Is(err, storage.ErrDataMismatch)) - }) -} diff --git a/storage/badger/batch.go b/storage/badger/batch.go index a3977544f4a..0ea68c82fcb 100644 --- a/storage/badger/batch.go +++ b/storage/badger/batch.go @@ -6,6 +6,10 @@ import ( "github.com/dgraph-io/badger/v2" ) +type BatchBuilder interface { + NewWriteBatch() *badger.WriteBatch +} + type Batch struct { writer *badger.WriteBatch @@ -13,7 +17,7 @@ type Batch struct { callbacks []func() } -func NewBatch(db *badger.DB) *Batch { +func NewBatch(db BatchBuilder) *Batch { batch := db.NewWriteBatch() return &Batch{ writer: batch, diff --git a/storage/badger/blocks.go b/storage/badger/blocks.go deleted file mode 100644 index cc0a35e3acd..00000000000 --- a/storage/badger/blocks.go +++ /dev/null @@ -1,154 +0,0 @@ -// (c) 2019 Dapper Labs - ALL RIGHTS RESERVED - -package badger - -import ( - "errors" - "fmt" - - "github.com/dgraph-io/badger/v2" - - "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/storage" - "github.com/onflow/flow-go/storage/badger/operation" - "github.com/onflow/flow-go/storage/badger/transaction" -) - -// Blocks implements a simple block storage around a badger DB. -type Blocks struct { - db *badger.DB - headers *Headers - payloads *Payloads -} - -// NewBlocks ... -func NewBlocks(db *badger.DB, headers *Headers, payloads *Payloads) *Blocks { - b := &Blocks{ - db: db, - headers: headers, - payloads: payloads, - } - return b -} - -func (b *Blocks) StoreTx(block *flow.Block) func(*transaction.Tx) error { - return func(tx *transaction.Tx) error { - err := b.headers.storeTx(block.Header)(tx) - if err != nil { - return fmt.Errorf("could not store header %v: %w", block.Header.ID(), err) - } - err = b.payloads.storeTx(block.ID(), block.Payload)(tx) - if err != nil { - return fmt.Errorf("could not store payload: %w", err) - } - return nil - } -} - -func (b *Blocks) retrieveTx(blockID flow.Identifier) func(*badger.Txn) (*flow.Block, error) { - return func(tx *badger.Txn) (*flow.Block, error) { - header, err := b.headers.retrieveTx(blockID)(tx) - if err != nil { - return nil, fmt.Errorf("could not retrieve header: %w", err) - } - payload, err := b.payloads.retrieveTx(blockID)(tx) - if err != nil { - return nil, fmt.Errorf("could not retrieve payload: %w", err) - } - block := &flow.Block{ - Header: header, - Payload: payload, - } - return block, nil - } -} - -// Store ... -func (b *Blocks) Store(block *flow.Block) error { - return operation.RetryOnConflictTx(b.db, transaction.Update, b.StoreTx(block)) -} - -// ByID ... -func (b *Blocks) ByID(blockID flow.Identifier) (*flow.Block, error) { - tx := b.db.NewTransaction(false) - defer tx.Discard() - return b.retrieveTx(blockID)(tx) -} - -// ByHeight ... -func (b *Blocks) ByHeight(height uint64) (*flow.Block, error) { - tx := b.db.NewTransaction(false) - defer tx.Discard() - - blockID, err := b.headers.retrieveIdByHeightTx(height)(tx) - if err != nil { - return nil, err - } - return b.retrieveTx(blockID)(tx) -} - -// ByCollectionID ... -func (b *Blocks) ByCollectionID(collID flow.Identifier) (*flow.Block, error) { - var blockID flow.Identifier - err := b.db.View(operation.LookupCollectionBlock(collID, &blockID)) - if err != nil { - return nil, fmt.Errorf("could not look up block: %w", err) - } - return b.ByID(blockID) -} - -// IndexBlockForCollections ... -func (b *Blocks) IndexBlockForCollections(blockID flow.Identifier, collIDs []flow.Identifier) error { - for _, collID := range collIDs { - err := operation.RetryOnConflict(b.db.Update, operation.SkipDuplicates(operation.IndexCollectionBlock(collID, blockID))) - if err != nil { - return fmt.Errorf("could not index collection block (%x): %w", collID, err) - } - } - return nil -} - -// InsertLastFullBlockHeightIfNotExists inserts the last full block height -func (b *Blocks) InsertLastFullBlockHeightIfNotExists(height uint64) error { - return operation.RetryOnConflict(b.db.Update, func(tx *badger.Txn) error { - err := operation.InsertLastCompleteBlockHeightIfNotExists(height)(tx) - if err != nil { - return fmt.Errorf("could not set LastFullBlockHeight: %w", err) - } - return nil - }) -} - -// UpdateLastFullBlockHeight upsert (update or insert) the last full block height -func (b *Blocks) UpdateLastFullBlockHeight(height uint64) error { - return operation.RetryOnConflict(b.db.Update, func(tx *badger.Txn) error { - - // try to update - err := operation.UpdateLastCompleteBlockHeight(height)(tx) - if err == nil { - return nil - } - - if !errors.Is(err, storage.ErrNotFound) { - return fmt.Errorf("could not update LastFullBlockHeight: %w", err) - } - - // if key does not exist, try insert. - err = operation.InsertLastCompleteBlockHeight(height)(tx) - if err != nil { - return fmt.Errorf("could not insert LastFullBlockHeight: %w", err) - } - - return nil - }) -} - -// GetLastFullBlockHeight ... -func (b *Blocks) GetLastFullBlockHeight() (uint64, error) { - var h uint64 - err := b.db.View(operation.RetrieveLastCompleteBlockHeight(&h)) - if err != nil { - return 0, fmt.Errorf("failed to retrieve LastFullBlockHeight: %w", err) - } - return h, nil -} diff --git a/storage/badger/blocks_test.go b/storage/badger/blocks_test.go deleted file mode 100644 index d459f00751d..00000000000 --- a/storage/badger/blocks_test.go +++ /dev/null @@ -1,72 +0,0 @@ -package badger_test - -import ( - "errors" - "testing" - - "github.com/dgraph-io/badger/v2" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/onflow/flow-go/module/metrics" - "github.com/onflow/flow-go/storage" - badgerstorage "github.com/onflow/flow-go/storage/badger" - "github.com/onflow/flow-go/utils/unittest" -) - -func TestBlocks(t *testing.T) { - unittest.RunWithBadgerDB(t, func(db *badger.DB) { - store := badgerstorage.NewBlocks(db, nil, nil) - - // check retrieval of non-existing key - _, err := store.GetLastFullBlockHeight() - assert.Error(t, err) - assert.True(t, errors.Is(err, storage.ErrNotFound)) - - // insert a value for height - var height1 = uint64(1234) - err = store.UpdateLastFullBlockHeight(height1) - assert.NoError(t, err) - - // check value can be retrieved - actual, err := store.GetLastFullBlockHeight() - assert.NoError(t, err) - assert.Equal(t, height1, actual) - - // update the value for height - var height2 = uint64(1234) - err = store.UpdateLastFullBlockHeight(height2) - assert.NoError(t, err) - - // check that the new value can be retrieved - actual, err = store.GetLastFullBlockHeight() - assert.NoError(t, err) - assert.Equal(t, height2, actual) - }) -} - -func TestBlockStoreAndRetrieve(t *testing.T) { - unittest.RunWithBadgerDB(t, func(db *badger.DB) { - cacheMetrics := &metrics.NoopCollector{} - // verify after storing a block should be able to retrieve it back - blocks := badgerstorage.InitAll(cacheMetrics, db).Blocks - block := unittest.FullBlockFixture() - block.SetPayload(unittest.PayloadFixture(unittest.WithAllTheFixins)) - - err := blocks.Store(&block) - require.NoError(t, err) - - retrieved, err := blocks.ByID(block.ID()) - require.NoError(t, err) - - require.Equal(t, &block, retrieved) - - // verify after a restart, the block stored in the database is the same - // as the original - blocksAfterRestart := badgerstorage.InitAll(cacheMetrics, db).Blocks - receivedAfterRestart, err := blocksAfterRestart.ByID(block.ID()) - require.NoError(t, err) - - require.Equal(t, &block, receivedAfterRestart) - }) -} diff --git a/storage/badger/cache.go b/storage/badger/cache.go index 5af5d23f8b1..c7984e17df9 100644 --- a/storage/badger/cache.go +++ b/storage/badger/cache.go @@ -5,92 +5,86 @@ import ( "fmt" "github.com/dgraph-io/badger/v2" - lru "github.com/hashicorp/golang-lru" + lru "github.com/hashicorp/golang-lru/v2" "github.com/onflow/flow-go/module" "github.com/onflow/flow-go/storage" "github.com/onflow/flow-go/storage/badger/transaction" ) -func withLimit(limit uint) func(*Cache) { - return func(c *Cache) { +func withLimit[K comparable, V any](limit uint) func(*Cache[K, V]) { + return func(c *Cache[K, V]) { c.limit = limit } } -type storeFunc func(key interface{}, val interface{}) func(*transaction.Tx) error +type storeFunc[K comparable, V any] func(key K, val V) func(*transaction.Tx) error const DefaultCacheSize = uint(1000) -func withStore(store storeFunc) func(*Cache) { - return func(c *Cache) { +func withStore[K comparable, V any](store storeFunc[K, V]) func(*Cache[K, V]) { + return func(c *Cache[K, V]) { c.store = store } } -func noStore(key interface{}, val interface{}) func(*transaction.Tx) error { +func noStore[K comparable, V any](_ K, _ V) func(*transaction.Tx) error { return func(tx *transaction.Tx) error { return fmt.Errorf("no store function for cache put available") } } -func noopStore(key interface{}, val interface{}) func(*transaction.Tx) error { - return func(tx *transaction.Tx) error { - return nil - } -} - -type retrieveFunc func(key interface{}) func(*badger.Txn) (interface{}, error) +type retrieveFunc[K comparable, V any] func(key K) func(*badger.Txn) (V, error) -func withRetrieve(retrieve retrieveFunc) func(*Cache) { - return func(c *Cache) { +func withRetrieve[K comparable, V any](retrieve retrieveFunc[K, V]) func(*Cache[K, V]) { + return func(c *Cache[K, V]) { c.retrieve = retrieve } } -func noRetrieve(key interface{}) func(*badger.Txn) (interface{}, error) { - return func(tx *badger.Txn) (interface{}, error) { - return nil, fmt.Errorf("no retrieve function for cache get available") +func noRetrieve[K comparable, V any](_ K) func(*badger.Txn) (V, error) { + return func(tx *badger.Txn) (V, error) { + var nullV V + return nullV, fmt.Errorf("no retrieve function for cache get available") } } -type Cache struct { +type Cache[K comparable, V any] struct { metrics module.CacheMetrics limit uint - store storeFunc - retrieve retrieveFunc + store storeFunc[K, V] + retrieve retrieveFunc[K, V] resource string - cache *lru.Cache + cache *lru.Cache[K, V] } -func newCache(collector module.CacheMetrics, resourceName string, options ...func(*Cache)) *Cache { - c := Cache{ +func newCache[K comparable, V any](collector module.CacheMetrics, resourceName string, options ...func(*Cache[K, V])) *Cache[K, V] { + c := Cache[K, V]{ metrics: collector, limit: 1000, - store: noStore, - retrieve: noRetrieve, + store: noStore[K, V], + retrieve: noRetrieve[K, V], resource: resourceName, } for _, option := range options { option(&c) } - c.cache, _ = lru.New(int(c.limit)) + c.cache, _ = lru.New[K, V](int(c.limit)) c.metrics.CacheEntries(c.resource, uint(c.cache.Len())) return &c } // IsCached returns true if the key exists in the cache. // It DOES NOT check whether the key exists in the underlying data store. -func (c *Cache) IsCached(key any) bool { - exists := c.cache.Contains(key) - return exists +func (c *Cache[K, V]) IsCached(key K) bool { + return c.cache.Contains(key) } // Get will try to retrieve the resource from cache first, and then from the // injected. During normal operations, the following error returns are expected: -// - `storage.ErrNotFound` if key is unknown. -func (c *Cache) Get(key interface{}) func(*badger.Txn) (interface{}, error) { - return func(tx *badger.Txn) (interface{}, error) { +// - [storage.ErrNotFound] if key is unknown. +func (c *Cache[K, V]) Get(key K) func(*badger.Txn) (V, error) { + return func(tx *badger.Txn) (V, error) { // check if we have it in the cache resource, cached := c.cache.Get(key) @@ -105,7 +99,8 @@ func (c *Cache) Get(key interface{}) func(*badger.Txn) (interface{}, error) { if errors.Is(err, storage.ErrNotFound) { c.metrics.CacheNotFound(c.resource) } - return nil, fmt.Errorf("could not retrieve resource: %w", err) + var nullV V + return nullV, fmt.Errorf("could not retrieve resource: %w", err) } c.metrics.CacheMiss(c.resource) @@ -120,12 +115,12 @@ func (c *Cache) Get(key interface{}) func(*badger.Txn) (interface{}, error) { } } -func (c *Cache) Remove(key interface{}) { +func (c *Cache[K, V]) Remove(key K) { c.cache.Remove(key) } // Insert will add a resource directly to the cache with the given ID -func (c *Cache) Insert(key interface{}, resource interface{}) { +func (c *Cache[K, V]) Insert(key K, resource V) { // cache the resource and eject least recently used one if we reached limit evicted := c.cache.Add(key, resource) if !evicted { @@ -134,7 +129,11 @@ func (c *Cache) Insert(key interface{}, resource interface{}) { } // PutTx will return tx which adds a resource to the cache with the given ID. -func (c *Cache) PutTx(key interface{}, resource interface{}) func(*transaction.Tx) error { +// +// Error returns: (Note actual errors depend on the specific store function used) +// - storage.ErrAlreadyExists if the key already exists in the database. +// - generic error in case of unexpected failure from the database layer or encoding failure. +func (c *Cache[K, V]) PutTx(key K, resource V) func(*transaction.Tx) error { storeOps := c.store(key, resource) // assemble DB operations to store resource (no execution) return func(tx *transaction.Tx) error { diff --git a/storage/badger/cache_test.go b/storage/badger/cache_test.go index fdc0e73dc51..76ea7ce18bc 100644 --- a/storage/badger/cache_test.go +++ b/storage/badger/cache_test.go @@ -5,13 +5,14 @@ import ( "github.com/stretchr/testify/assert" + "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/module/metrics" "github.com/onflow/flow-go/utils/unittest" ) // TestCache_Exists tests existence checking items in the cache. func TestCache_Exists(t *testing.T) { - cache := newCache(metrics.NewNoopCollector(), "test") + cache := newCache[flow.Identifier, any](metrics.NewNoopCollector(), "test") t.Run("non-existent", func(t *testing.T) { key := unittest.IdentifierFixture() diff --git a/storage/badger/chunkDataPacks.go b/storage/badger/chunkDataPacks.go deleted file mode 100644 index c54a95c1c80..00000000000 --- a/storage/badger/chunkDataPacks.go +++ /dev/null @@ -1,158 +0,0 @@ -package badger - -import ( - "fmt" - - "github.com/dgraph-io/badger/v2" - - "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/module" - "github.com/onflow/flow-go/module/metrics" - "github.com/onflow/flow-go/storage" - badgermodel "github.com/onflow/flow-go/storage/badger/model" - "github.com/onflow/flow-go/storage/badger/operation" - "github.com/onflow/flow-go/storage/badger/transaction" -) - -type ChunkDataPacks struct { - db *badger.DB - collections storage.Collections - byChunkIDCache *Cache -} - -func NewChunkDataPacks(collector module.CacheMetrics, db *badger.DB, collections storage.Collections, byChunkIDCacheSize uint) *ChunkDataPacks { - - store := func(key interface{}, val interface{}) func(*transaction.Tx) error { - chdp := val.(*badgermodel.StoredChunkDataPack) - return transaction.WithTx(operation.SkipDuplicates(operation.InsertChunkDataPack(chdp))) - } - - retrieve := func(key interface{}) func(tx *badger.Txn) (interface{}, error) { - chunkID := key.(flow.Identifier) - - var c badgermodel.StoredChunkDataPack - return func(tx *badger.Txn) (interface{}, error) { - err := operation.RetrieveChunkDataPack(chunkID, &c)(tx) - return &c, err - } - } - - cache := newCache(collector, metrics.ResourceChunkDataPack, - withLimit(byChunkIDCacheSize), - withStore(store), - withRetrieve(retrieve), - ) - - ch := ChunkDataPacks{ - db: db, - byChunkIDCache: cache, - collections: collections, - } - return &ch -} - -func (ch *ChunkDataPacks) Store(c *flow.ChunkDataPack) error { - sc := toStoredChunkDataPack(c) - err := operation.RetryOnConflictTx(ch.db, transaction.Update, ch.byChunkIDCache.PutTx(sc.ChunkID, sc)) - if err != nil { - return fmt.Errorf("could not store chunk datapack: %w", err) - } - return nil -} - -func (ch *ChunkDataPacks) Remove(chunkID flow.Identifier) error { - err := operation.RetryOnConflict(ch.db.Update, operation.RemoveChunkDataPack(chunkID)) - if err != nil { - return fmt.Errorf("could not remove chunk datapack: %w", err) - } - // TODO Integrate cache removal in a similar way as storage/retrieval is - ch.byChunkIDCache.Remove(chunkID) - return nil -} - -// BatchStore stores ChunkDataPack c keyed by its ChunkID in provided batch. -// No errors are expected during normal operation, but it may return generic error -// if entity is not serializable or Badger unexpectedly fails to process request -func (ch *ChunkDataPacks) BatchStore(c *flow.ChunkDataPack, batch storage.BatchStorage) error { - sc := toStoredChunkDataPack(c) - writeBatch := batch.GetWriter() - batch.OnSucceed(func() { - ch.byChunkIDCache.Insert(sc.ChunkID, sc) - }) - return operation.BatchInsertChunkDataPack(sc)(writeBatch) -} - -// BatchRemove removes ChunkDataPack c keyed by its ChunkID in provided batch -// No errors are expected during normal operation, even if no entries are matched. -// If Badger unexpectedly fails to process the request, the error is wrapped in a generic error and returned. -func (ch *ChunkDataPacks) BatchRemove(chunkID flow.Identifier, batch storage.BatchStorage) error { - writeBatch := batch.GetWriter() - batch.OnSucceed(func() { - ch.byChunkIDCache.Remove(chunkID) - }) - return operation.BatchRemoveChunkDataPack(chunkID)(writeBatch) -} - -func (ch *ChunkDataPacks) ByChunkID(chunkID flow.Identifier) (*flow.ChunkDataPack, error) { - schdp, err := ch.byChunkID(chunkID) - if err != nil { - return nil, err - } - - chdp := &flow.ChunkDataPack{ - ChunkID: schdp.ChunkID, - StartState: schdp.StartState, - Proof: schdp.Proof, - } - - if !schdp.SystemChunk { - collection, err := ch.collections.ByID(schdp.CollectionID) - if err != nil { - return nil, fmt.Errorf("could not retrive collection (id: %x) for stored chunk data pack: %w", schdp.CollectionID, err) - } - - chdp.Collection = collection - } - - return chdp, nil -} - -func (ch *ChunkDataPacks) byChunkID(chunkID flow.Identifier) (*badgermodel.StoredChunkDataPack, error) { - tx := ch.db.NewTransaction(false) - defer tx.Discard() - - schdp, err := ch.retrieveCHDP(chunkID)(tx) - if err != nil { - return nil, fmt.Errorf("could not retrive stored chunk data pack: %w", err) - } - - return schdp, nil -} - -func (ch *ChunkDataPacks) retrieveCHDP(chunkID flow.Identifier) func(*badger.Txn) (*badgermodel.StoredChunkDataPack, error) { - return func(tx *badger.Txn) (*badgermodel.StoredChunkDataPack, error) { - val, err := ch.byChunkIDCache.Get(chunkID)(tx) - if err != nil { - return nil, err - } - return val.(*badgermodel.StoredChunkDataPack), nil - } -} - -func toStoredChunkDataPack(c *flow.ChunkDataPack) *badgermodel.StoredChunkDataPack { - sc := &badgermodel.StoredChunkDataPack{ - ChunkID: c.ChunkID, - StartState: c.StartState, - Proof: c.Proof, - SystemChunk: false, - } - - if c.Collection != nil { - // non system chunk - sc.CollectionID = c.Collection.ID() - } else { - sc.SystemChunk = true - } - - return sc -} diff --git a/storage/badger/chunk_consumer_test.go b/storage/badger/chunk_consumer_test.go deleted file mode 100644 index 05af3a1ca29..00000000000 --- a/storage/badger/chunk_consumer_test.go +++ /dev/null @@ -1,11 +0,0 @@ -package badger - -import "testing" - -// 1. can init -// 2. can't set a process if never inited -// 3. can set after init -// 4. can read after init -// 5. can read after set -func TestChunkConsumer(t *testing.T) { -} diff --git a/storage/badger/chunk_data_pack_test.go b/storage/badger/chunk_data_pack_test.go deleted file mode 100644 index e1733d4e10f..00000000000 --- a/storage/badger/chunk_data_pack_test.go +++ /dev/null @@ -1,109 +0,0 @@ -package badger_test - -import ( - "errors" - "sync" - "testing" - "time" - - "github.com/dgraph-io/badger/v2" - - "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/module/metrics" - "github.com/onflow/flow-go/storage" - badgerstorage "github.com/onflow/flow-go/storage/badger" - "github.com/onflow/flow-go/utils/unittest" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -// TestChunkDataPacks_Store evaluates correct storage and retrieval of chunk data packs in the storage. -// It also evaluates that re-inserting is idempotent. -func TestChunkDataPacks_Store(t *testing.T) { - WithChunkDataPacks(t, 100, func(t *testing.T, chunkDataPacks []*flow.ChunkDataPack, chunkDataPackStore *badgerstorage.ChunkDataPacks, _ *badger.DB) { - wg := sync.WaitGroup{} - wg.Add(len(chunkDataPacks)) - for _, chunkDataPack := range chunkDataPacks { - go func(cdp flow.ChunkDataPack) { - err := chunkDataPackStore.Store(&cdp) - require.NoError(t, err) - - wg.Done() - }(*chunkDataPack) - } - - unittest.RequireReturnsBefore(t, wg.Wait, 1*time.Second, "could not store chunk data packs on time") - - // re-insert - should be idempotent - for _, chunkDataPack := range chunkDataPacks { - err := chunkDataPackStore.Store(chunkDataPack) - require.NoError(t, err) - } - }) -} - -// TestChunkDataPack_BatchStore evaluates correct batch storage and retrieval of chunk data packs in the storage. -func TestChunkDataPacks_BatchStore(t *testing.T) { - WithChunkDataPacks(t, 100, func(t *testing.T, chunkDataPacks []*flow.ChunkDataPack, chunkDataPackStore *badgerstorage.ChunkDataPacks, db *badger.DB) { - batch := badgerstorage.NewBatch(db) - - wg := sync.WaitGroup{} - wg.Add(len(chunkDataPacks)) - for _, chunkDataPack := range chunkDataPacks { - go func(cdp flow.ChunkDataPack) { - err := chunkDataPackStore.BatchStore(&cdp, batch) - require.NoError(t, err) - - wg.Done() - }(*chunkDataPack) - } - - unittest.RequireReturnsBefore(t, wg.Wait, 1*time.Second, "could not store chunk data packs on time") - - err := batch.Flush() - require.NoError(t, err) - }) -} - -// TestChunkDataPacks_MissingItem evaluates querying a missing item returns a storage.ErrNotFound error. -func TestChunkDataPacks_MissingItem(t *testing.T) { - unittest.RunWithBadgerDB(t, func(db *badger.DB) { - transactions := badgerstorage.NewTransactions(&metrics.NoopCollector{}, db) - collections := badgerstorage.NewCollections(db, transactions) - store := badgerstorage.NewChunkDataPacks(&metrics.NoopCollector{}, db, collections, 1) - - // attempt to get an invalid - _, err := store.ByChunkID(unittest.IdentifierFixture()) - assert.True(t, errors.Is(err, storage.ErrNotFound)) - }) -} - -// WithChunkDataPacks is a test helper that generates specified number of chunk data packs, store them using the storeFunc, and -// then evaluates whether they are successfully retrieved from storage. -func WithChunkDataPacks(t *testing.T, chunks int, storeFunc func(*testing.T, []*flow.ChunkDataPack, *badgerstorage.ChunkDataPacks, *badger.DB)) { - unittest.RunWithBadgerDB(t, func(db *badger.DB) { - transactions := badgerstorage.NewTransactions(&metrics.NoopCollector{}, db) - collections := badgerstorage.NewCollections(db, transactions) - // keep the cache size at 1 to make sure that entries are written and read from storage itself. - store := badgerstorage.NewChunkDataPacks(&metrics.NoopCollector{}, db, collections, 1) - - chunkDataPacks := unittest.ChunkDataPacksFixture(chunks) - for _, chunkDataPack := range chunkDataPacks { - // stores collection in Collections storage (which ChunkDataPacks store uses internally) - err := collections.Store(chunkDataPack.Collection) - require.NoError(t, err) - } - - // stores chunk data packs in the memory using provided store function. - storeFunc(t, chunkDataPacks, store, db) - - // stored chunk data packs should be retrieved successfully. - for _, expected := range chunkDataPacks { - actual, err := store.ByChunkID(expected.ChunkID) - require.NoError(t, err) - - assert.Equal(t, expected, actual) - } - }) -} diff --git a/storage/badger/chunks_queue.go b/storage/badger/chunks_queue.go deleted file mode 100644 index 430abe0241b..00000000000 --- a/storage/badger/chunks_queue.go +++ /dev/null @@ -1,117 +0,0 @@ -package badger - -import ( - "errors" - "fmt" - - "github.com/dgraph-io/badger/v2" - - "github.com/onflow/flow-go/model/chunks" - "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/storage" - "github.com/onflow/flow-go/storage/badger/operation" -) - -// ChunksQueue stores a queue of chunk locators that assigned to me to verify. -// Job consumers can read the locators as job from the queue by index. -// Chunk locators stored in this queue are unique. -type ChunksQueue struct { - db *badger.DB -} - -const JobQueueChunksQueue = "JobQueueChunksQueue" - -// NewChunkQueue will initialize the underlying badger database of chunk locator queue. -func NewChunkQueue(db *badger.DB) *ChunksQueue { - return &ChunksQueue{ - db: db, - } -} - -// Init initializes chunk queue's latest index with the given default index. -func (q *ChunksQueue) Init(defaultIndex uint64) (bool, error) { - _, err := q.LatestIndex() - if errors.Is(err, storage.ErrNotFound) { - err = q.db.Update(operation.InitJobLatestIndex(JobQueueChunksQueue, defaultIndex)) - if err != nil { - return false, fmt.Errorf("could not init chunk locator queue with default index %v: %w", defaultIndex, err) - } - return true, nil - } - if err != nil { - return false, fmt.Errorf("could not get latest index: %w", err) - } - - return false, nil -} - -// StoreChunkLocator stores a new chunk locator that assigned to me to the job queue. -// A true will be returned, if the locator was new. -// A false will be returned, if the locator was duplicate. -func (q *ChunksQueue) StoreChunkLocator(locator *chunks.Locator) (bool, error) { - err := operation.RetryOnConflict(q.db.Update, func(tx *badger.Txn) error { - // make sure the chunk locator is unique - err := operation.InsertChunkLocator(locator)(tx) - if err != nil { - return fmt.Errorf("failed to insert chunk locator: %w", err) - } - - // read the latest index - var latest uint64 - err = operation.RetrieveJobLatestIndex(JobQueueChunksQueue, &latest)(tx) - if err != nil { - return fmt.Errorf("failed to retrieve job index for chunk locator queue: %w", err) - } - - // insert to the next index - next := latest + 1 - err = operation.InsertJobAtIndex(JobQueueChunksQueue, next, locator.ID())(tx) - if err != nil { - return fmt.Errorf("failed to set job index for chunk locator queue at index %v: %w", next, err) - } - - // update the next index as the latest index - err = operation.SetJobLatestIndex(JobQueueChunksQueue, next)(tx) - if err != nil { - return fmt.Errorf("failed to update latest index %v: %w", next, err) - } - - return nil - }) - - // was trying to store a duplicate locator - if errors.Is(err, storage.ErrAlreadyExists) { - return false, nil - } - if err != nil { - return false, fmt.Errorf("failed to store chunk locator: %w", err) - } - return true, nil -} - -// LatestIndex returns the index of the latest chunk locator stored in the queue. -func (q *ChunksQueue) LatestIndex() (uint64, error) { - var latest uint64 - err := q.db.View(operation.RetrieveJobLatestIndex(JobQueueChunksQueue, &latest)) - if err != nil { - return 0, fmt.Errorf("could not retrieve latest index for chunks queue: %w", err) - } - return latest, nil -} - -// AtIndex returns the chunk locator stored at the given index in the queue. -func (q *ChunksQueue) AtIndex(index uint64) (*chunks.Locator, error) { - var locatorID flow.Identifier - err := q.db.View(operation.RetrieveJobAtIndex(JobQueueChunksQueue, index, &locatorID)) - if err != nil { - return nil, fmt.Errorf("could not retrieve chunk locator in queue: %w", err) - } - - var locator chunks.Locator - err = q.db.View(operation.RetrieveChunkLocator(locatorID, &locator)) - if err != nil { - return nil, fmt.Errorf("could not retrieve locator for chunk id %v: %w", locatorID, err) - } - - return &locator, nil -} diff --git a/storage/badger/chunks_queue_test.go b/storage/badger/chunks_queue_test.go deleted file mode 100644 index e1e9350afe8..00000000000 --- a/storage/badger/chunks_queue_test.go +++ /dev/null @@ -1,16 +0,0 @@ -package badger - -import "testing" - -// 1. should be able to read after store -// 2. should be able to read the latest index after store -// 3. should return false if a duplicate chunk is stored -// 4. should return true if a new chunk is stored -// 5. should return an increased index when a chunk is stored -// 6. storing 100 chunks concurrent should return last index as 100 -// 7. should not be able to read with wrong index -// 8. should return init index after init -// 9. storing chunk and updating the latest index should be atomic -func TestStoreAndRead(t *testing.T) { - // TODO -} diff --git a/storage/badger/cleaner.go b/storage/badger/cleaner.go index e69782bada6..2c5e92873b6 100644 --- a/storage/badger/cleaner.go +++ b/storage/badger/cleaner.go @@ -1,9 +1,6 @@ -// (c) 2019 Dapper Labs - ALL RIGHTS RESERVED - package badger import ( - "math/rand" "time" "github.com/dgraph-io/badger/v2" @@ -12,6 +9,7 @@ import ( "github.com/onflow/flow-go/module" "github.com/onflow/flow-go/module/component" "github.com/onflow/flow-go/module/irrecoverable" + "github.com/onflow/flow-go/utils/rand" ) // Cleaner uses component.ComponentManager to implement module.Startable and module.ReadyDoneAware @@ -82,7 +80,17 @@ func (c *Cleaner) gcWorkerRoutine(ctx irrecoverable.SignalerContext, ready compo // We add 20% jitter into the interval, so that we don't risk nodes syncing their GC calls over time. // Therefore GC is run every X seconds, where X is uniformly sampled from [interval, interval*1.2] func (c *Cleaner) nextWaitDuration() time.Duration { - return time.Duration(c.interval.Nanoseconds() + rand.Int63n(c.interval.Nanoseconds()/5)) + jitter, err := rand.Uint64n(uint64(c.interval.Nanoseconds() / 5)) + if err != nil { + // if randomness fails, do not use a jitter for this instance. + // TODO: address the error properly and not swallow it. + // In this specific case, `utils/rand` only errors if the system randomness fails + // which is a symptom of a wider failure. Many other node components would catch such + // a failure. + c.log.Warn().Msg("jitter is zero beacuse system randomness has failed") + jitter = 0 + } + return time.Duration(c.interval.Nanoseconds() + int64(jitter)) } // runGC runs garbage collection for badger DB, handles sentinel errors and reports metrics. diff --git a/storage/badger/cluster_blocks.go b/storage/badger/cluster_blocks.go deleted file mode 100644 index 88aef54526f..00000000000 --- a/storage/badger/cluster_blocks.go +++ /dev/null @@ -1,73 +0,0 @@ -package badger - -import ( - "fmt" - - "github.com/dgraph-io/badger/v2" - - "github.com/onflow/flow-go/model/cluster" - "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/storage/badger/operation" - "github.com/onflow/flow-go/storage/badger/transaction" -) - -// ClusterBlocks implements a simple block storage around a badger DB. -type ClusterBlocks struct { - db *badger.DB - chainID flow.ChainID - headers *Headers - payloads *ClusterPayloads -} - -func NewClusterBlocks(db *badger.DB, chainID flow.ChainID, headers *Headers, payloads *ClusterPayloads) *ClusterBlocks { - b := &ClusterBlocks{ - db: db, - chainID: chainID, - headers: headers, - payloads: payloads, - } - return b -} - -func (b *ClusterBlocks) Store(block *cluster.Block) error { - return operation.RetryOnConflictTx(b.db, transaction.Update, b.storeTx(block)) -} - -func (b *ClusterBlocks) storeTx(block *cluster.Block) func(*transaction.Tx) error { - return func(tx *transaction.Tx) error { - err := b.headers.storeTx(block.Header)(tx) - if err != nil { - return fmt.Errorf("could not store header: %w", err) - } - err = b.payloads.storeTx(block.ID(), block.Payload)(tx) - if err != nil { - return fmt.Errorf("could not store payload: %w", err) - } - return nil - } -} - -func (b *ClusterBlocks) ByID(blockID flow.Identifier) (*cluster.Block, error) { - header, err := b.headers.ByBlockID(blockID) - if err != nil { - return nil, fmt.Errorf("could not get header: %w", err) - } - payload, err := b.payloads.ByBlockID(blockID) - if err != nil { - return nil, fmt.Errorf("could not retrieve payload: %w", err) - } - block := cluster.Block{ - Header: header, - Payload: payload, - } - return &block, nil -} - -func (b *ClusterBlocks) ByHeight(height uint64) (*cluster.Block, error) { - var blockID flow.Identifier - err := b.db.View(operation.LookupClusterBlockHeight(b.chainID, height, &blockID)) - if err != nil { - return nil, fmt.Errorf("could not look up block: %w", err) - } - return b.ByID(blockID) -} diff --git a/storage/badger/cluster_payloads.go b/storage/badger/cluster_payloads.go deleted file mode 100644 index 84e260b9a75..00000000000 --- a/storage/badger/cluster_payloads.go +++ /dev/null @@ -1,71 +0,0 @@ -package badger - -import ( - "github.com/dgraph-io/badger/v2" - - "github.com/onflow/flow-go/model/cluster" - "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/module" - "github.com/onflow/flow-go/module/metrics" - "github.com/onflow/flow-go/storage/badger/operation" - "github.com/onflow/flow-go/storage/badger/procedure" - "github.com/onflow/flow-go/storage/badger/transaction" -) - -// ClusterPayloads implements storage of block payloads for collection node -// cluster consensus. -type ClusterPayloads struct { - db *badger.DB - cache *Cache -} - -func NewClusterPayloads(cacheMetrics module.CacheMetrics, db *badger.DB) *ClusterPayloads { - - store := func(key interface{}, val interface{}) func(*transaction.Tx) error { - blockID := key.(flow.Identifier) - payload := val.(*cluster.Payload) - return transaction.WithTx(procedure.InsertClusterPayload(blockID, payload)) - } - - retrieve := func(key interface{}) func(tx *badger.Txn) (interface{}, error) { - blockID := key.(flow.Identifier) - var payload cluster.Payload - return func(tx *badger.Txn) (interface{}, error) { - err := procedure.RetrieveClusterPayload(blockID, &payload)(tx) - return &payload, err - } - } - - cp := &ClusterPayloads{ - db: db, - cache: newCache(cacheMetrics, metrics.ResourceClusterPayload, - withLimit(flow.DefaultTransactionExpiry*4), - withStore(store), - withRetrieve(retrieve)), - } - - return cp -} - -func (cp *ClusterPayloads) storeTx(blockID flow.Identifier, payload *cluster.Payload) func(*transaction.Tx) error { - return cp.cache.PutTx(blockID, payload) -} -func (cp *ClusterPayloads) retrieveTx(blockID flow.Identifier) func(*badger.Txn) (*cluster.Payload, error) { - return func(tx *badger.Txn) (*cluster.Payload, error) { - val, err := cp.cache.Get(blockID)(tx) - if err != nil { - return nil, err - } - return val.(*cluster.Payload), nil - } -} - -func (cp *ClusterPayloads) Store(blockID flow.Identifier, payload *cluster.Payload) error { - return operation.RetryOnConflictTx(cp.db, transaction.Update, cp.storeTx(blockID, payload)) -} - -func (cp *ClusterPayloads) ByBlockID(blockID flow.Identifier) (*cluster.Payload, error) { - tx := cp.db.NewTransaction(false) - defer tx.Discard() - return cp.retrieveTx(blockID)(tx) -} diff --git a/storage/badger/cluster_payloads_test.go b/storage/badger/cluster_payloads_test.go deleted file mode 100644 index 797c0c701fa..00000000000 --- a/storage/badger/cluster_payloads_test.go +++ /dev/null @@ -1,51 +0,0 @@ -package badger_test - -import ( - "errors" - "testing" - - "github.com/dgraph-io/badger/v2" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/onflow/flow-go/module/metrics" - "github.com/onflow/flow-go/storage" - "github.com/onflow/flow-go/utils/unittest" - - badgerstorage "github.com/onflow/flow-go/storage/badger" -) - -func TestStoreRetrieveClusterPayload(t *testing.T) { - unittest.RunWithBadgerDB(t, func(db *badger.DB) { - metrics := metrics.NewNoopCollector() - store := badgerstorage.NewClusterPayloads(metrics, db) - - blockID := unittest.IdentifierFixture() - expected := unittest.ClusterPayloadFixture(5) - - // store payload - err := store.Store(blockID, expected) - require.NoError(t, err) - - // fetch payload - payload, err := store.ByBlockID(blockID) - require.NoError(t, err) - require.Equal(t, expected, payload) - - // storing again should error with key already exists - err = store.Store(blockID, expected) - require.True(t, errors.Is(err, storage.ErrAlreadyExists)) - }) -} - -func TestClusterPayloadRetrieveWithoutStore(t *testing.T) { - unittest.RunWithBadgerDB(t, func(db *badger.DB) { - metrics := metrics.NewNoopCollector() - store := badgerstorage.NewClusterPayloads(metrics, db) - - blockID := unittest.IdentifierFixture() - - _, err := store.ByBlockID(blockID) - assert.True(t, errors.Is(err, storage.ErrNotFound)) - }) -} diff --git a/storage/badger/collections.go b/storage/badger/collections.go deleted file mode 100644 index 748d4a04c74..00000000000 --- a/storage/badger/collections.go +++ /dev/null @@ -1,156 +0,0 @@ -package badger - -import ( - "errors" - "fmt" - - "github.com/dgraph-io/badger/v2" - - "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/storage" - "github.com/onflow/flow-go/storage/badger/operation" - "github.com/onflow/flow-go/storage/badger/transaction" -) - -type Collections struct { - db *badger.DB - transactions *Transactions -} - -func NewCollections(db *badger.DB, transactions *Transactions) *Collections { - c := &Collections{ - db: db, - transactions: transactions, - } - return c -} - -func (c *Collections) StoreLight(collection *flow.LightCollection) error { - err := operation.RetryOnConflict(c.db.Update, operation.InsertCollection(collection)) - if err != nil { - return fmt.Errorf("could not insert collection: %w", err) - } - - return nil -} - -func (c *Collections) Store(collection *flow.Collection) error { - return operation.RetryOnConflictTx(c.db, transaction.Update, func(ttx *transaction.Tx) error { - light := collection.Light() - err := transaction.WithTx(operation.SkipDuplicates(operation.InsertCollection(&light)))(ttx) - if err != nil { - return fmt.Errorf("could not insert collection: %w", err) - } - - for _, tx := range collection.Transactions { - err = c.transactions.storeTx(tx)(ttx) - if err != nil { - return fmt.Errorf("could not insert transaction: %w", err) - } - } - - return nil - }) -} - -func (c *Collections) ByID(colID flow.Identifier) (*flow.Collection, error) { - var ( - light flow.LightCollection - collection flow.Collection - ) - - err := c.db.View(func(btx *badger.Txn) error { - err := operation.RetrieveCollection(colID, &light)(btx) - if err != nil { - return fmt.Errorf("could not retrieve collection: %w", err) - } - - for _, txID := range light.Transactions { - tx, err := c.transactions.ByID(txID) - if err != nil { - return fmt.Errorf("could not retrieve transaction: %w", err) - } - - collection.Transactions = append(collection.Transactions, tx) - } - - return nil - }) - if err != nil { - return nil, err - } - - return &collection, nil -} - -func (c *Collections) LightByID(colID flow.Identifier) (*flow.LightCollection, error) { - var collection flow.LightCollection - - err := c.db.View(func(tx *badger.Txn) error { - err := operation.RetrieveCollection(colID, &collection)(tx) - if err != nil { - return fmt.Errorf("could not retrieve collection: %w", err) - } - - return nil - }) - if err != nil { - return nil, err - } - - return &collection, nil -} - -func (c *Collections) Remove(colID flow.Identifier) error { - return operation.RetryOnConflict(c.db.Update, func(btx *badger.Txn) error { - err := operation.RemoveCollection(colID)(btx) - if err != nil { - return fmt.Errorf("could not remove collection: %w", err) - } - return nil - }) -} - -func (c *Collections) StoreLightAndIndexByTransaction(collection *flow.LightCollection) error { - return operation.RetryOnConflict(c.db.Update, func(tx *badger.Txn) error { - err := operation.InsertCollection(collection)(tx) - if err != nil { - return fmt.Errorf("could not insert collection: %w", err) - } - - for _, txID := range collection.Transactions { - err = operation.IndexCollectionByTransaction(txID, collection.ID())(tx) - if errors.Is(err, storage.ErrAlreadyExists) { - continue - } - if err != nil { - return fmt.Errorf("could not insert transaction ID: %w", err) - } - } - - return nil - }) -} - -func (c *Collections) LightByTransactionID(txID flow.Identifier) (*flow.LightCollection, error) { - var collection flow.LightCollection - err := c.db.View(func(tx *badger.Txn) error { - collID := &flow.Identifier{} - err := operation.RetrieveCollectionID(txID, collID)(tx) - if err != nil { - return fmt.Errorf("could not retrieve collection id: %w", err) - } - - err = operation.RetrieveCollection(*collID, &collection)(tx) - if err != nil { - return fmt.Errorf("could not retrieve collection: %w", err) - } - - return nil - }) - if err != nil { - return nil, err - } - - return &collection, nil -} diff --git a/storage/badger/collections_test.go b/storage/badger/collections_test.go deleted file mode 100644 index f6a8db73729..00000000000 --- a/storage/badger/collections_test.go +++ /dev/null @@ -1,87 +0,0 @@ -package badger_test - -import ( - "testing" - - "github.com/dgraph-io/badger/v2" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/onflow/flow-go/module/metrics" - bstorage "github.com/onflow/flow-go/storage/badger" - "github.com/onflow/flow-go/utils/unittest" -) - -func TestCollections(t *testing.T) { - unittest.RunWithBadgerDB(t, func(db *badger.DB) { - - metrics := metrics.NewNoopCollector() - transactions := bstorage.NewTransactions(metrics, db) - collections := bstorage.NewCollections(db, transactions) - - // create a light collection with three transactions - expected := unittest.CollectionFixture(3).Light() - - // store the light collection and the transaction index - err := collections.StoreLightAndIndexByTransaction(&expected) - require.Nil(t, err) - - // retrieve the light collection by collection id - actual, err := collections.LightByID(expected.ID()) - require.Nil(t, err) - - // check if the light collection was indeed persisted - assert.Equal(t, &expected, actual) - - expectedID := expected.ID() - - // retrieve the collection light id by each of its transaction id - for _, txID := range expected.Transactions { - collLight, err := collections.LightByTransactionID(txID) - actualID := collLight.ID() - // check that the collection id can indeed be retrieved by transaction id - require.Nil(t, err) - assert.Equal(t, expectedID, actualID) - } - - }) -} - -func TestCollections_IndexDuplicateTx(t *testing.T) { - unittest.RunWithBadgerDB(t, func(db *badger.DB) { - metrics := metrics.NewNoopCollector() - transactions := bstorage.NewTransactions(metrics, db) - collections := bstorage.NewCollections(db, transactions) - - // create two collections which share 1 transaction - col1 := unittest.CollectionFixture(2) - col2 := unittest.CollectionFixture(1) - dupTx := col1.Transactions[0] // the duplicated transaction - col2Tx := col2.Transactions[0] // transaction that's only in col2 - col2.Transactions = append(col2.Transactions, dupTx) - - // insert col1 - col1Light := col1.Light() - err := collections.StoreLightAndIndexByTransaction(&col1Light) - require.NoError(t, err) - - // insert col2 - col2Light := col2.Light() - err = collections.StoreLightAndIndexByTransaction(&col2Light) - require.NoError(t, err) - - // should be able to retrieve col2 by ID - gotLightByCol2ID, err := collections.LightByID(col2.ID()) - require.NoError(t, err) - assert.Equal(t, &col2Light, gotLightByCol2ID) - - // should be able to retrieve col2 by the transaction which only appears in col2 - _, err = collections.LightByTransactionID(col2Tx.ID()) - require.NoError(t, err) - - // col1 (not col2) should be indexed by the shared transaction (since col1 was inserted first) - gotLightByDupTxID, err := collections.LightByTransactionID(dupTx.ID()) - require.NoError(t, err) - assert.Equal(t, &col1Light, gotLightByDupTxID) - }) -} diff --git a/storage/badger/commit_test.go b/storage/badger/commit_test.go deleted file mode 100644 index 25527c31c61..00000000000 --- a/storage/badger/commit_test.go +++ /dev/null @@ -1,43 +0,0 @@ -package badger_test - -import ( - "errors" - "testing" - - "github.com/dgraph-io/badger/v2" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/onflow/flow-go/module/metrics" - "github.com/onflow/flow-go/storage" - "github.com/onflow/flow-go/utils/unittest" - - badgerstorage "github.com/onflow/flow-go/storage/badger" -) - -// TestCommitsStoreAndRetrieve tests that a commit can be stored, retrieved and attempted to be stored again without an error -func TestCommitsStoreAndRetrieve(t *testing.T) { - unittest.RunWithBadgerDB(t, func(db *badger.DB) { - metrics := metrics.NewNoopCollector() - store := badgerstorage.NewCommits(metrics, db) - - // attempt to get a invalid commit - _, err := store.ByBlockID(unittest.IdentifierFixture()) - assert.True(t, errors.Is(err, storage.ErrNotFound)) - - // store a commit in db - blockID := unittest.IdentifierFixture() - expected := unittest.StateCommitmentFixture() - err = store.Store(blockID, expected) - require.NoError(t, err) - - // retrieve the commit by ID - actual, err := store.ByBlockID(blockID) - require.NoError(t, err) - assert.Equal(t, expected, actual) - - // re-insert the commit - should be idempotent - err = store.Store(blockID, expected) - require.NoError(t, err) - }) -} diff --git a/storage/badger/commits.go b/storage/badger/commits.go deleted file mode 100644 index af60946d043..00000000000 --- a/storage/badger/commits.go +++ /dev/null @@ -1,92 +0,0 @@ -package badger - -import ( - "github.com/dgraph-io/badger/v2" - - "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/module" - "github.com/onflow/flow-go/module/metrics" - "github.com/onflow/flow-go/storage" - "github.com/onflow/flow-go/storage/badger/operation" - "github.com/onflow/flow-go/storage/badger/transaction" -) - -type Commits struct { - db *badger.DB - cache *Cache -} - -func NewCommits(collector module.CacheMetrics, db *badger.DB) *Commits { - - store := func(key interface{}, val interface{}) func(*transaction.Tx) error { - blockID := key.(flow.Identifier) - commit := val.(flow.StateCommitment) - return transaction.WithTx(operation.SkipDuplicates(operation.IndexStateCommitment(blockID, commit))) - } - - retrieve := func(key interface{}) func(tx *badger.Txn) (interface{}, error) { - blockID := key.(flow.Identifier) - var commit flow.StateCommitment - return func(tx *badger.Txn) (interface{}, error) { - err := operation.LookupStateCommitment(blockID, &commit)(tx) - return commit, err - } - } - - c := &Commits{ - db: db, - cache: newCache(collector, metrics.ResourceCommit, - withLimit(1000), - withStore(store), - withRetrieve(retrieve), - ), - } - - return c -} - -func (c *Commits) storeTx(blockID flow.Identifier, commit flow.StateCommitment) func(*transaction.Tx) error { - return c.cache.PutTx(blockID, commit) -} - -func (c *Commits) retrieveTx(blockID flow.Identifier) func(tx *badger.Txn) (flow.StateCommitment, error) { - return func(tx *badger.Txn) (flow.StateCommitment, error) { - val, err := c.cache.Get(blockID)(tx) - if err != nil { - return flow.DummyStateCommitment, err - } - return val.(flow.StateCommitment), nil - } -} - -func (c *Commits) Store(blockID flow.Identifier, commit flow.StateCommitment) error { - return operation.RetryOnConflictTx(c.db, transaction.Update, c.storeTx(blockID, commit)) -} - -// BatchStore stores Commit keyed by blockID in provided batch -// No errors are expected during normal operation, even if no entries are matched. -// If Badger unexpectedly fails to process the request, the error is wrapped in a generic error and returned. -func (c *Commits) BatchStore(blockID flow.Identifier, commit flow.StateCommitment, batch storage.BatchStorage) error { - // we can't cache while using batches, as it's unknown at this point when, and if - // the batch will be committed. Cache will be populated on read however. - writeBatch := batch.GetWriter() - return operation.BatchIndexStateCommitment(blockID, commit)(writeBatch) -} - -func (c *Commits) ByBlockID(blockID flow.Identifier) (flow.StateCommitment, error) { - tx := c.db.NewTransaction(false) - defer tx.Discard() - return c.retrieveTx(blockID)(tx) -} - -func (c *Commits) RemoveByBlockID(blockID flow.Identifier) error { - return c.db.Update(operation.SkipNonExist(operation.RemoveStateCommitment(blockID))) -} - -// BatchRemoveByBlockID removes Commit keyed by blockID in provided batch -// No errors are expected during normal operation, even if no entries are matched. -// If Badger unexpectedly fails to process the request, the error is wrapped in a generic error and returned. -func (c *Commits) BatchRemoveByBlockID(blockID flow.Identifier, batch storage.BatchStorage) error { - writeBatch := batch.GetWriter() - return operation.BatchRemoveStateCommitment(blockID)(writeBatch) -} diff --git a/storage/badger/common.go b/storage/badger/common.go deleted file mode 100644 index 77c6c5e7296..00000000000 --- a/storage/badger/common.go +++ /dev/null @@ -1,21 +0,0 @@ -package badger - -import ( - "errors" - "fmt" - - "github.com/dgraph-io/badger/v2" - - "github.com/onflow/flow-go/storage" -) - -func handleError(err error, t interface{}) error { - if err != nil { - if errors.Is(err, badger.ErrKeyNotFound) { - return storage.ErrNotFound - } - - return fmt.Errorf("could not retrieve %T: %w", t, err) - } - return nil -} diff --git a/storage/badger/computation_result.go b/storage/badger/computation_result.go deleted file mode 100644 index 8338884334a..00000000000 --- a/storage/badger/computation_result.go +++ /dev/null @@ -1,49 +0,0 @@ -package badger - -import ( - "github.com/dgraph-io/badger/v2" - - "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/storage/badger/operation" -) - -type ComputationResultUploadStatus struct { - db *badger.DB -} - -func NewComputationResultUploadStatus(db *badger.DB) *ComputationResultUploadStatus { - return &ComputationResultUploadStatus{ - db: db, - } -} - -func (c *ComputationResultUploadStatus) Upsert(blockID flow.Identifier, - wasUploadCompleted bool) error { - return operation.RetryOnConflict(c.db.Update, func(btx *badger.Txn) error { - return operation.UpsertComputationResultUploadStatus(blockID, wasUploadCompleted)(btx) - }) -} - -func (c *ComputationResultUploadStatus) GetIDsByUploadStatus(targetUploadStatus bool) ([]flow.Identifier, error) { - ids := make([]flow.Identifier, 0) - err := c.db.View(operation.GetBlockIDsByStatus(&ids, targetUploadStatus)) - return ids, err -} - -func (c *ComputationResultUploadStatus) ByID(computationResultID flow.Identifier) (bool, error) { - var ret bool - err := c.db.View(func(btx *badger.Txn) error { - return operation.GetComputationResultUploadStatus(computationResultID, &ret)(btx) - }) - if err != nil { - return false, err - } - - return ret, nil -} - -func (c *ComputationResultUploadStatus) Remove(computationResultID flow.Identifier) error { - return operation.RetryOnConflict(c.db.Update, func(btx *badger.Txn) error { - return operation.RemoveComputationResultUploadStatus(computationResultID)(btx) - }) -} diff --git a/storage/badger/computation_result_test.go b/storage/badger/computation_result_test.go deleted file mode 100644 index 6575611632c..00000000000 --- a/storage/badger/computation_result_test.go +++ /dev/null @@ -1,109 +0,0 @@ -package badger_test - -import ( - "reflect" - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/dgraph-io/badger/v2" - - "github.com/onflow/flow-go/engine/execution" - "github.com/onflow/flow-go/engine/execution/testutil" - bstorage "github.com/onflow/flow-go/storage/badger" - "github.com/onflow/flow-go/utils/unittest" -) - -func TestUpsertAndRetrieveComputationResult(t *testing.T) { - unittest.RunWithBadgerDB(t, func(db *badger.DB) { - expected := testutil.ComputationResultFixture(t) - crStorage := bstorage.NewComputationResultUploadStatus(db) - crId := expected.ExecutableBlock.ID() - - // True case - upsert - testUploadStatus := true - err := crStorage.Upsert(crId, testUploadStatus) - require.NoError(t, err) - - actualUploadStatus, err := crStorage.ByID(crId) - require.NoError(t, err) - - assert.Equal(t, testUploadStatus, actualUploadStatus) - - // False case - update - testUploadStatus = false - err = crStorage.Upsert(crId, testUploadStatus) - require.NoError(t, err) - - actualUploadStatus, err = crStorage.ByID(crId) - require.NoError(t, err) - - assert.Equal(t, testUploadStatus, actualUploadStatus) - }) -} - -func TestRemoveComputationResults(t *testing.T) { - unittest.RunWithBadgerDB(t, func(db *badger.DB) { - t.Run("Remove ComputationResult", func(t *testing.T) { - expected := testutil.ComputationResultFixture(t) - crId := expected.ExecutableBlock.ID() - crStorage := bstorage.NewComputationResultUploadStatus(db) - - testUploadStatus := true - err := crStorage.Upsert(crId, testUploadStatus) - require.NoError(t, err) - - _, err = crStorage.ByID(crId) - require.NoError(t, err) - - err = crStorage.Remove(crId) - require.NoError(t, err) - - _, err = crStorage.ByID(crId) - assert.Error(t, err) - }) - }) -} - -func TestListComputationResults(t *testing.T) { - unittest.RunWithBadgerDB(t, func(db *badger.DB) { - t.Run("List all ComputationResult with given status", func(t *testing.T) { - expected := [...]*execution.ComputationResult{ - testutil.ComputationResultFixture(t), - testutil.ComputationResultFixture(t), - } - crStorage := bstorage.NewComputationResultUploadStatus(db) - - // Store a list of ComputationResult instances first - expectedIDs := make(map[string]bool, 0) - for _, cr := range expected { - crId := cr.ExecutableBlock.ID() - expectedIDs[crId.String()] = true - err := crStorage.Upsert(crId, true) - require.NoError(t, err) - } - // Add in entries with non-targeted status - unexpected := [...]*execution.ComputationResult{ - testutil.ComputationResultFixture(t), - testutil.ComputationResultFixture(t), - } - for _, cr := range unexpected { - crId := cr.ExecutableBlock.ID() - err := crStorage.Upsert(crId, false) - require.NoError(t, err) - } - - // Get the list of IDs for stored instances - crIDs, err := crStorage.GetIDsByUploadStatus(true) - require.NoError(t, err) - - crIDsStrMap := make(map[string]bool, 0) - for _, crID := range crIDs { - crIDsStrMap[crID.String()] = true - } - - assert.True(t, reflect.DeepEqual(crIDsStrMap, expectedIDs)) - }) - }) -} diff --git a/storage/badger/consumer_progress.go b/storage/badger/consumer_progress.go deleted file mode 100644 index 52855dd60b1..00000000000 --- a/storage/badger/consumer_progress.go +++ /dev/null @@ -1,50 +0,0 @@ -package badger - -import ( - "fmt" - - "github.com/dgraph-io/badger/v2" - - "github.com/onflow/flow-go/storage/badger/operation" -) - -type ConsumerProgress struct { - db *badger.DB - consumer string // to distinguish the consume progress between different consumers -} - -func NewConsumerProgress(db *badger.DB, consumer string) *ConsumerProgress { - return &ConsumerProgress{ - db: db, - consumer: consumer, - } -} - -func (cp *ConsumerProgress) ProcessedIndex() (uint64, error) { - var processed uint64 - err := cp.db.View(operation.RetrieveProcessedIndex(cp.consumer, &processed)) - if err != nil { - return 0, fmt.Errorf("failed to retrieve processed index: %w", err) - } - return processed, nil -} - -// InitProcessedIndex insert the default processed index to the storage layer, can only be done once. -// initialize for the second time will return storage.ErrAlreadyExists -func (cp *ConsumerProgress) InitProcessedIndex(defaultIndex uint64) error { - err := operation.RetryOnConflict(cp.db.Update, operation.InsertProcessedIndex(cp.consumer, defaultIndex)) - if err != nil { - return fmt.Errorf("could not update processed index: %w", err) - } - - return nil -} - -func (cp *ConsumerProgress) SetProcessedIndex(processed uint64) error { - err := operation.RetryOnConflict(cp.db.Update, operation.SetProcessedIndex(cp.consumer, processed)) - if err != nil { - return fmt.Errorf("could not update processed index: %w", err) - } - - return nil -} diff --git a/storage/badger/dkg_state.go b/storage/badger/dkg_state.go index 63beb4c23a2..fc84ac5e5dc 100644 --- a/storage/badger/dkg_state.go +++ b/storage/badger/dkg_state.go @@ -1,136 +1,185 @@ package badger import ( + "errors" "fmt" "github.com/dgraph-io/badger/v2" + "github.com/onflow/crypto" + "golang.org/x/exp/slices" - "github.com/onflow/flow-go/crypto" "github.com/onflow/flow-go/model/encodable" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/module" + "github.com/onflow/flow-go/module/irrecoverable" "github.com/onflow/flow-go/module/metrics" + "github.com/onflow/flow-go/storage" "github.com/onflow/flow-go/storage/badger/operation" "github.com/onflow/flow-go/storage/badger/transaction" ) -// DKGState stores state information about in-progress and completed DKGs, including -// computed keys. Must be instantiated using secrets database. -type DKGState struct { +// allowedStateTransitions defines the allowed state transitions for the Recoverable Random Beacon State Machine. +var allowedStateTransitions = map[flow.DKGState][]flow.DKGState{ + flow.DKGStateStarted: {flow.DKGStateCompleted, flow.DKGStateFailure, flow.RandomBeaconKeyCommitted}, + flow.DKGStateCompleted: {flow.RandomBeaconKeyCommitted, flow.DKGStateFailure}, + flow.RandomBeaconKeyCommitted: {}, // overwriting an already-committed key with a different one is not allowed! + flow.DKGStateFailure: {flow.RandomBeaconKeyCommitted, flow.DKGStateFailure}, + flow.DKGStateUninitialized: {flow.DKGStateStarted, flow.DKGStateFailure, flow.RandomBeaconKeyCommitted}, +} + +// RecoverablePrivateBeaconKeyStateMachine stores state information about in-progress and completed DKGs, including +// computed keys. Must be instantiated using secrets database. On the happy path, each consensus +// committee member takes part in the DKG, and after successfully finishing the DKG protocol it obtains a +// random beacon private key, which is stored in the database along with DKG state [flow.DKGStateCompleted]. +// If for any reason the DKG fails, then the private key will be nil and DKG state is set to [flow.DKGStateFailure]. +// When the epoch recovery takes place, we need to query the last valid beacon private key for the current replica and +// also set it for use during the Recovery Epoch, otherwise replicas won't be able to vote for blocks during the Recovery Epoch. +// CAUTION: This implementation heavily depends on atomic Badger transactions with interleaved reads and writes for correctness. +type RecoverablePrivateBeaconKeyStateMachine struct { db *badger.DB - keyCache *Cache + keyCache *Cache[uint64, *encodable.RandomBeaconPrivKey] + myNodeID flow.Identifier } -// NewDKGState returns the DKGState implementation backed by Badger DB. -func NewDKGState(collector module.CacheMetrics, db *badger.DB) (*DKGState, error) { +var _ storage.EpochRecoveryMyBeaconKey = (*RecoverablePrivateBeaconKeyStateMachine)(nil) + +// NewRecoverableRandomBeaconStateMachine returns the RecoverablePrivateBeaconKeyStateMachine implementation backed by Badger DB. +// No errors are expected during normal operations. +func NewRecoverableRandomBeaconStateMachine(collector module.CacheMetrics, db *badger.DB, myNodeID flow.Identifier) (*RecoverablePrivateBeaconKeyStateMachine, error) { err := operation.EnsureSecretDB(db) if err != nil { return nil, fmt.Errorf("cannot instantiate dkg state storage in non-secret db: %w", err) } - storeKey := func(key interface{}, val interface{}) func(*transaction.Tx) error { - epochCounter := key.(uint64) - info := val.(*encodable.RandomBeaconPrivKey) + storeKey := func(epochCounter uint64, info *encodable.RandomBeaconPrivKey) func(*transaction.Tx) error { return transaction.WithTx(operation.InsertMyBeaconPrivateKey(epochCounter, info)) } - retrieveKey := func(key interface{}) func(*badger.Txn) (interface{}, error) { - epochCounter := key.(uint64) - var info encodable.RandomBeaconPrivKey - return func(tx *badger.Txn) (interface{}, error) { + retrieveKey := func(epochCounter uint64) func(*badger.Txn) (*encodable.RandomBeaconPrivKey, error) { + return func(tx *badger.Txn) (*encodable.RandomBeaconPrivKey, error) { + var info encodable.RandomBeaconPrivKey err := operation.RetrieveMyBeaconPrivateKey(epochCounter, &info)(tx) return &info, err } } - cache := newCache(collector, metrics.ResourceBeaconKey, - withLimit(10), + cache := newCache[uint64, *encodable.RandomBeaconPrivKey](collector, metrics.ResourceBeaconKey, + withLimit[uint64, *encodable.RandomBeaconPrivKey](10), withStore(storeKey), withRetrieve(retrieveKey), ) - dkgState := &DKGState{ + return &RecoverablePrivateBeaconKeyStateMachine{ db: db, keyCache: cache, - } - - return dkgState, nil -} - -func (ds *DKGState) storeKeyTx(epochCounter uint64, key *encodable.RandomBeaconPrivKey) func(tx *transaction.Tx) error { - return ds.keyCache.PutTx(epochCounter, key) -} - -func (ds *DKGState) retrieveKeyTx(epochCounter uint64) func(tx *badger.Txn) (*encodable.RandomBeaconPrivKey, error) { - return func(tx *badger.Txn) (*encodable.RandomBeaconPrivKey, error) { - val, err := ds.keyCache.Get(epochCounter)(tx) - if err != nil { - return nil, err - } - return val.(*encodable.RandomBeaconPrivKey), nil - } + myNodeID: myNodeID, + }, nil } -// InsertMyBeaconPrivateKey stores the random beacon private key for an epoch. +// InsertMyBeaconPrivateKey stores the random beacon private key for an epoch and transitions the +// state machine into the [flow.DKGStateCompleted] state. // // CAUTION: these keys are stored before they are validated against the -// canonical key vector and may not be valid for use in signing. Use SafeBeaconKeys -// to guarantee only keys safe for signing are returned -func (ds *DKGState) InsertMyBeaconPrivateKey(epochCounter uint64, key crypto.PrivateKey) error { +// canonical key vector and may not be valid for use in signing. Use [storage.SafeBeaconKeys] +// interface to guarantee only keys safe for signing are returned. +// Error returns: +// - [storage.ErrAlreadyExists] - if there is already a key stored for given epoch. +// - [storage.InvalidDKGStateTransitionError] - if the requested state transition is invalid. +func (ds *RecoverablePrivateBeaconKeyStateMachine) InsertMyBeaconPrivateKey(epochCounter uint64, key crypto.PrivateKey) error { if key == nil { return fmt.Errorf("will not store nil beacon key") } encodableKey := &encodable.RandomBeaconPrivKey{PrivateKey: key} - return operation.RetryOnConflictTx(ds.db, transaction.Update, ds.storeKeyTx(epochCounter, encodableKey)) + return operation.RetryOnConflictTx(ds.db, transaction.Update, func(tx *transaction.Tx) error { + currentState, err := retrieveCurrentStateTx(epochCounter)(tx.DBTxn) + if err != nil { + return err + } + err = ds.keyCache.PutTx(epochCounter, encodableKey)(tx) + if err != nil { + return err + } + return ds.processStateTransition(epochCounter, currentState, flow.DKGStateCompleted)(tx) + }) } -// RetrieveMyBeaconPrivateKey retrieves the random beacon private key for an epoch. +// UnsafeRetrieveMyBeaconPrivateKey retrieves the random beacon private key for an epoch. // -// CAUTION: these keys are stored before they are validated against the -// canonical key vector and may not be valid for use in signing. Use SafeBeaconKeys -// to guarantee only keys safe for signing are returned -func (ds *DKGState) RetrieveMyBeaconPrivateKey(epochCounter uint64) (crypto.PrivateKey, error) { +// CAUTION: these keys were stored before they are validated against the +// canonical key vector and may not be valid for use in signing. Use [storage.SafeBeaconKeys] +// interface to guarantee only keys safe for signing are returned +// Error returns: +// - [storage.ErrNotFound] - if there is no key stored for given epoch. +func (ds *RecoverablePrivateBeaconKeyStateMachine) UnsafeRetrieveMyBeaconPrivateKey(epochCounter uint64) (crypto.PrivateKey, error) { tx := ds.db.NewTransaction(false) defer tx.Discard() - encodableKey, err := ds.retrieveKeyTx(epochCounter)(tx) + encodableKey, err := ds.keyCache.Get(epochCounter)(tx) if err != nil { return nil, err } return encodableKey.PrivateKey, nil } -// SetDKGStarted sets the flag indicating the DKG has started for the given epoch. -func (ds *DKGState) SetDKGStarted(epochCounter uint64) error { - return ds.db.Update(operation.InsertDKGStartedForEpoch(epochCounter)) -} - -// GetDKGStarted checks whether the DKG has been started for the given epoch. -func (ds *DKGState) GetDKGStarted(epochCounter uint64) (bool, error) { +// IsDKGStarted checks whether the DKG has been started for the given epoch. +// No errors expected during normal operation. +func (ds *RecoverablePrivateBeaconKeyStateMachine) IsDKGStarted(epochCounter uint64) (bool, error) { var started bool err := ds.db.View(operation.RetrieveDKGStartedForEpoch(epochCounter, &started)) return started, err } -// SetDKGEndState stores that the DKG has ended, and its end state. -func (ds *DKGState) SetDKGEndState(epochCounter uint64, endState flow.DKGEndState) error { - return ds.db.Update(operation.InsertDKGEndStateForEpoch(epochCounter, endState)) -} +// SetDKGState performs a state transition for the Random Beacon Recoverable State Machine. +// Some state transitions may not be possible using this method. For instance, we might not be able to enter [flow.DKGStateCompleted] +// state directly from [flow.DKGStateStarted], even if such transition is valid. The reason for this is that some states require additional +// data to be processed by the state machine before the transition can be made. For such cases there are dedicated methods that should be used, ex. +// InsertMyBeaconPrivateKey and UpsertMyBeaconPrivateKey, which allow to store the needed data and perform the transition in one atomic operation. +// Error returns: +// - [storage.InvalidDKGStateTransitionError] - if the requested state transition is invalid. +func (ds *RecoverablePrivateBeaconKeyStateMachine) SetDKGState(epochCounter uint64, newState flow.DKGState) error { + return operation.RetryOnConflictTx(ds.db, transaction.Update, func(tx *transaction.Tx) error { + currentState, err := retrieveCurrentStateTx(epochCounter)(tx.DBTxn) + if err != nil { + return err + } -// GetDKGEndState retrieves the DKG end state for the epoch. -func (ds *DKGState) GetDKGEndState(epochCounter uint64) (flow.DKGEndState, error) { - var endState flow.DKGEndState - err := ds.db.Update(operation.RetrieveDKGEndStateForEpoch(epochCounter, &endState)) - return endState, err + // `DKGStateStarted` or `DKGStateFailure` are the only accepted values for the target state, because transitioning + // into other states requires auxiliary information (e.g. key and or `EpochCommit` events) or is forbidden altogether. + if newState != flow.DKGStateStarted && newState != flow.DKGStateFailure { + return storage.NewInvalidDKGStateTransitionErrorf(currentState, newState, "transitioning into the target state is not allowed (at all or without auxiliary data)") + } + return operation.RetryOnConflictTx(ds.db, transaction.Update, ds.processStateTransition(epochCounter, currentState, newState)) + }) } -// SafeBeaconPrivateKeys is the safe beacon key storage backed by Badger DB. -type SafeBeaconPrivateKeys struct { - state *DKGState +// Error returns: +// - storage.InvalidDKGStateTransitionError - if the requested state transition is invalid +func (ds *RecoverablePrivateBeaconKeyStateMachine) processStateTransition(epochCounter uint64, currentState, newState flow.DKGState) func(*transaction.Tx) error { + return func(tx *transaction.Tx) error { + allowedStates := allowedStateTransitions[currentState] + if slices.Index(allowedStates, newState) < 0 { + return storage.NewInvalidDKGStateTransitionErrorf(currentState, newState, "not allowed") + } + + // ensure invariant holds and we still have a valid private key stored + if newState == flow.RandomBeaconKeyCommitted || newState == flow.DKGStateCompleted { + _, err := ds.keyCache.Get(epochCounter)(tx.DBTxn) + if err != nil { + return storage.NewInvalidDKGStateTransitionErrorf(currentState, newState, "cannot transition without a valid random beacon key: %w", err) + } + } + + return operation.UpsertDKGStateForEpoch(epochCounter, newState)(tx.DBTxn) + } } -// NewSafeBeaconPrivateKeys returns a safe beacon key storage backed by Badger DB. -func NewSafeBeaconPrivateKeys(state *DKGState) *SafeBeaconPrivateKeys { - return &SafeBeaconPrivateKeys{state: state} +// GetDKGState retrieves the current state of the state machine for the given epoch. +// If an error is returned, the state is undefined meaning that state machine is in initial state +// Error returns: +// - [storage.ErrNotFound] - if there is no state stored for given epoch, meaning the state machine is in initial state. +func (ds *RecoverablePrivateBeaconKeyStateMachine) GetDKGState(epochCounter uint64) (flow.DKGState, error) { + var currentState flow.DKGState + err := ds.db.View(operation.RetrieveDKGStateForEpoch(epochCounter, ¤tState)) + return currentState, err } // RetrieveMyBeaconPrivateKey retrieves my beacon private key for the given @@ -140,40 +189,157 @@ func NewSafeBeaconPrivateKeys(state *DKGState) *SafeBeaconPrivateKeys { // - (key, true, nil) if the key is present and confirmed valid // - (nil, false, nil) if the key has been marked invalid or unavailable // -> no beacon key will ever be available for the epoch in this case -// - (nil, false, storage.ErrNotFound) if the DKG has not ended +// - (nil, false, [storage.ErrNotFound]) if the DKG has not ended // - (nil, false, error) for any unexpected exception -func (keys *SafeBeaconPrivateKeys) RetrieveMyBeaconPrivateKey(epochCounter uint64) (key crypto.PrivateKey, safe bool, err error) { - err = keys.state.db.View(func(txn *badger.Txn) error { +func (ds *RecoverablePrivateBeaconKeyStateMachine) RetrieveMyBeaconPrivateKey(epochCounter uint64) (key crypto.PrivateKey, safe bool, err error) { + err = ds.db.View(func(txn *badger.Txn) error { // retrieve the end state - var endState flow.DKGEndState - err = operation.RetrieveDKGEndStateForEpoch(epochCounter, &endState)(txn) + var currentState flow.DKGState + err = operation.RetrieveDKGStateForEpoch(epochCounter, ¤tState)(txn) if err != nil { key = nil safe = false return err // storage.ErrNotFound or exception } - // for any end state besides success, the key is not safe - if endState != flow.DKGEndStateSuccess { + // a key is safe iff it was previously committed + if currentState == flow.RandomBeaconKeyCommitted { + // retrieve the key - any storage error (including `storage.ErrNotFound`) is an exception + var encodableKey *encodable.RandomBeaconPrivKey + encodableKey, err = ds.keyCache.Get(epochCounter)(txn) + if err != nil { + key = nil + safe = false + return irrecoverable.NewExceptionf("could not retrieve beacon key for epoch %d with successful DKG: %v", epochCounter, err) + } + + // return the key only for successful end state + safe = true + key = encodableKey.PrivateKey + } else { key = nil safe = false + return storage.ErrNotFound + } + + return nil + }) + return +} + +// CommitMyBeaconPrivateKey commits the previously inserted random beacon private key for an epoch. Effectively, this method +// transitions the state machine into the [flow.RandomBeaconKeyCommitted] state if the current state is [flow.DKGStateCompleted]. +// The caller needs to supply the [flow.EpochCommit] as evidence that the stored key is valid for the specified epoch. Repeated +// calls for the same epoch are accepted (idempotent operation), if and only if the provided EpochCommit confirms the already +// committed key. +// No errors are expected during normal operations. +func (ds *RecoverablePrivateBeaconKeyStateMachine) CommitMyBeaconPrivateKey(epochCounter uint64, commit *flow.EpochCommit) error { + return operation.RetryOnConflictTx(ds.db, transaction.Update, func(tx *transaction.Tx) error { + currentState, err := retrieveCurrentStateTx(epochCounter)(tx.DBTxn) + if err != nil { + return err + } + + // Repeated calls for same epoch are idempotent, but only if they consistently confirm the stored private key. We explicitly + // enforce consistency with the already committed key here. Repetitions are considered rare, so performance overhead is acceptable. + key, err := ds.keyCache.Get(epochCounter)(tx.DBTxn) + if err != nil { + return storage.NewInvalidDKGStateTransitionErrorf(currentState, flow.RandomBeaconKeyCommitted, "cannot transition without a valid random beacon key: %w", err) + } + // verify that the key is part of the EpochCommit + if err = ds.ensureKeyIncludedInEpoch(epochCounter, key, commit); err != nil { + return storage.NewInvalidDKGStateTransitionErrorf(currentState, flow.RandomBeaconKeyCommitted, + "according to EpochCommit event, my stored random beacon key is not valid for signing: %w", err) + } + // transition to RandomBeaconKeyCommitted, unless this is a repeated call, in which case there is nothing else to do + if currentState == flow.RandomBeaconKeyCommitted { return nil } + return ds.processStateTransition(epochCounter, currentState, flow.RandomBeaconKeyCommitted)(tx) + }) +} - // retrieve the key - any storage error (including not found) is an exception - var encodableKey *encodable.RandomBeaconPrivKey - encodableKey, err = keys.state.retrieveKeyTx(epochCounter)(txn) +// UpsertMyBeaconPrivateKey overwrites the random beacon private key for the epoch that recovers the protocol +// from Epoch Fallback Mode. The resulting state of this method call is [flow.RandomBeaconKeyCommitted]. +// State transitions are allowed if and only if the current state is not equal to [flow.RandomBeaconKeyCommitted]. +// Repeated calls for the same epoch are idempotent, if and only if the provided EpochCommit confirms the already +// committed key (error otherwise). +// No errors are expected during normal operations. +func (ds *RecoverablePrivateBeaconKeyStateMachine) UpsertMyBeaconPrivateKey(epochCounter uint64, key crypto.PrivateKey, commit *flow.EpochCommit) error { + if key == nil { + return fmt.Errorf("will not store nil beacon key") + } + encodableKey := &encodable.RandomBeaconPrivKey{PrivateKey: key} + err := operation.RetryOnConflictTx(ds.db, transaction.Update, func(tx *transaction.Tx) error { + currentState, err := retrieveCurrentStateTx(epochCounter)(tx.DBTxn) if err != nil { - key = nil - safe = false - return fmt.Errorf("[unexpected] could not retrieve beacon key for epoch %d with successful DKG: %v", epochCounter, err) + return err + } + // verify that the key is part of the EpochCommit + if err = ds.ensureKeyIncludedInEpoch(epochCounter, key, commit); err != nil { + return storage.NewInvalidDKGStateTransitionErrorf(currentState, flow.RandomBeaconKeyCommitted, + "according to EpochCommit event, the input random beacon key is not valid for signing: %w", err) } - // return the key only for successful end state - safe = true - key = encodableKey.PrivateKey - return nil + // Repeated calls for same epoch are idempotent, but only if they consistently confirm the stored private key. We explicitly + // enforce consistency with the already committed key here. Repetitions are considered rare, so performance overhead is acceptable. + if currentState == flow.RandomBeaconKeyCommitted { + storedKey, err := ds.keyCache.Get(epochCounter)(tx.DBTxn) + if err != nil { + return irrecoverable.NewExceptionf("could not retrieve a previously committed beacon key for epoch %d: %v", epochCounter, err) + } + if !key.Equals(storedKey.PrivateKey) { + return storage.NewInvalidDKGStateTransitionErrorf(currentState, flow.RandomBeaconKeyCommitted, + "cannot overwrite previously committed key for epoch: %d", epochCounter) + } + return nil + } // The following code will be reached if and only if no other Random Beacon key has previously been committed for this epoch. + + err = operation.UpsertMyBeaconPrivateKey(epochCounter, encodableKey)(tx.DBTxn) + if err != nil { + return err + } + return ds.processStateTransition(epochCounter, currentState, flow.RandomBeaconKeyCommitted)(tx) }) - return + if err != nil { + return fmt.Errorf("could not overwrite beacon key for epoch %d: %w", epochCounter, err) + } + // manually add the key to cache (next line does not touch database) + ds.keyCache.Insert(epochCounter, encodableKey) + return nil +} + +// ensureKeyIncludedInEpoch enforces that the input private `key` matches the public Random Beacon key in `EpochCommit` for this node. +// No errors are expected during normal operations. +func (ds *RecoverablePrivateBeaconKeyStateMachine) ensureKeyIncludedInEpoch(epochCounter uint64, key crypto.PrivateKey, commit *flow.EpochCommit) error { + if commit.Counter != epochCounter { + return fmt.Errorf("commit counter does not match epoch counter: %d != %d", epochCounter, commit.Counter) + } + publicKey := key.PublicKey() + myDKGIndex, found := commit.DKGIndexMap[ds.myNodeID] + if !found { + return fmt.Errorf("my node ID %v is not in the DKG committee for epoch %d", ds.myNodeID, epochCounter) + } + if myDKGIndex < 0 || myDKGIndex >= len(commit.DKGParticipantKeys) { + return fmt.Errorf("my DKG index %d is out of range for epoch %d", myDKGIndex, epochCounter) + } + expectedPublicKey := commit.DKGParticipantKeys[myDKGIndex] + if !publicKey.Equals(expectedPublicKey) { + return fmt.Errorf("stored private key does not match public key in epoch commit for epoch %d", epochCounter) + } + return nil +} + +// retrieveCurrentStateTx prepares a badger tx which retrieves the current state for the given epoch. +// No errors are expected during normal operations. +func retrieveCurrentStateTx(epochCounter uint64) func(*badger.Txn) (flow.DKGState, error) { + return func(txn *badger.Txn) (flow.DKGState, error) { + currentState := flow.DKGStateUninitialized + err := operation.RetrieveDKGStateForEpoch(epochCounter, ¤tState)(txn) + if err != nil && !errors.Is(err, storage.ErrNotFound) { + return currentState, fmt.Errorf("could not retrieve current state for epoch %d: %w", epochCounter, err) + } + return currentState, nil + } } diff --git a/storage/badger/dkg_state_test.go b/storage/badger/dkg_state_test.go index 3c9a6653b49..8723012ddbb 100644 --- a/storage/badger/dkg_state_test.go +++ b/storage/badger/dkg_state_test.go @@ -1,235 +1,795 @@ -package badger_test +package badger import ( - "errors" - "math/rand" "testing" - "time" "github.com/dgraph-io/badger/v2" - "github.com/stretchr/testify/assert" + "github.com/onflow/crypto" "github.com/stretchr/testify/require" + "go.uber.org/atomic" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/module/metrics" "github.com/onflow/flow-go/storage" - bstorage "github.com/onflow/flow-go/storage/badger" "github.com/onflow/flow-go/utils/unittest" ) -func TestDKGState_DKGStarted(t *testing.T) { - unittest.RunWithTypedBadgerDB(t, bstorage.InitSecret, func(db *badger.DB) { +// epochCounterGenerator defines a global variable for this test file that is used to generate unique epoch counters. +var epochCounterGenerator = atomic.NewUint64(0) + +// TestDKGState_UninitializedState verifies that for new epochs, the RecoverableRandomBeaconStateMachine starts +// in the state [flow.DKGStateUninitialized] and reports correct values for that Epoch's DKG state. +// For this test, we start with initial state of the Recoverable Random Beacon State Machine and +// try to perform all possible actions and transitions in it. +func TestDKGState_UninitializedState(t *testing.T) { + unittest.RunWithTypedBadgerDB(t, InitSecret, func(db *badger.DB) { metrics := metrics.NewNoopCollector() - store, err := bstorage.NewDKGState(metrics, db) + myNodeID := unittest.IdentifierFixture() + store, err := NewRecoverableRandomBeaconStateMachine(metrics, db, myNodeID) + require.NoError(t, err) + + setupState := func() uint64 { + return epochCounterGenerator.Add(1) + } + epochCounter := setupState() + + started, err := store.IsDKGStarted(epochCounter) require.NoError(t, err) + require.False(t, started) + + actualState, err := store.GetDKGState(epochCounter) + require.ErrorIs(t, err, storage.ErrNotFound) + require.Equal(t, flow.DKGStateUninitialized, actualState) + + pk, err := store.UnsafeRetrieveMyBeaconPrivateKey(epochCounter) + require.ErrorIs(t, err, storage.ErrNotFound) + require.Nil(t, pk) + + pk, safe, err := store.RetrieveMyBeaconPrivateKey(epochCounter) + require.ErrorIs(t, err, storage.ErrNotFound) + require.False(t, safe) + require.Nil(t, pk) + + t.Run("state transition flow.DKGStateUninitialized -> flow.DKGStateUninitialized should not be allowed", func(t *testing.T) { + err = store.SetDKGState(setupState(), flow.DKGStateUninitialized) + require.Error(t, err) + require.True(t, storage.IsInvalidDKGStateTransitionError(err)) + }) + + t.Run("state transition flow.DKGStateUninitialized -> flow.DKGStateStarted should be allowed", func(t *testing.T) { + err = store.SetDKGState(setupState(), flow.DKGStateStarted) + require.NoError(t, err) + }) + + t.Run("state transition flow.DKGStateUninitialized -> flow.DKGStateFailure should be allowed", func(t *testing.T) { + err = store.SetDKGState(setupState(), flow.DKGStateFailure) + require.NoError(t, err) + }) + + t.Run("state transition flow.DKGStateUninitialized -> flow.DKGStateCompleted should not be allowed", func(t *testing.T) { + err = store.SetDKGState(setupState(), flow.DKGStateCompleted) + require.Error(t, err, "should not be able to enter completed state without starting") + require.True(t, storage.IsInvalidDKGStateTransitionError(err)) + }) + + t.Run("state transition flow.DKGStateUninitialized -> flow.DKGStateCompleted by inserting a key should not be allowed", func(t *testing.T) { + err = store.InsertMyBeaconPrivateKey(setupState(), unittest.StakingPrivKeyFixture()) + require.Error(t, err, "should not be able to enter completed state without starting") + require.True(t, storage.IsInvalidDKGStateTransitionError(err)) + }) + + t.Run("while state transition flow.DKGStateUninitialized -> flow.RandomBeaconKeyCommitted is allowed, it should not proceed without a key being inserted first", func(t *testing.T) { + err = store.SetDKGState(setupState(), flow.RandomBeaconKeyCommitted) + require.Error(t, err, "should not be able to set DKG state to recovered, only using dedicated interface") + require.True(t, storage.IsInvalidDKGStateTransitionError(err)) + }) + + t.Run("state transition flow.DKGStateUninitialized -> flow.RandomBeaconKeyCommitted should be allowed, but only via upserting a key", func(t *testing.T) { + epochCounter := setupState() + expectedKey := unittest.StakingPrivKeyFixture() + evidence := unittest.EpochCommitFixture(func(commit *flow.EpochCommit) { + commit.Counter = epochCounter + commit.DKGParticipantKeys = []crypto.PublicKey{expectedKey.PublicKey()} + commit.DKGIndexMap = flow.DKGIndexMap{myNodeID: 0} + }) + err = store.UpsertMyBeaconPrivateKey(epochCounter, expectedKey, evidence) + require.NoError(t, err) + resultingState, err := store.GetDKGState(epochCounter) + require.NoError(t, err) + require.Equal(t, flow.RandomBeaconKeyCommitted, resultingState) + actualKey, safe, err := store.RetrieveMyBeaconPrivateKey(epochCounter) + require.NoError(t, err) + require.True(t, safe) + require.Equal(t, expectedKey, actualKey) + actualKey, err = store.UnsafeRetrieveMyBeaconPrivateKey(epochCounter) + require.NoError(t, err) + require.Equal(t, expectedKey, actualKey) + }) + }) +} + +// TestDKGState_StartedState verifies that for a DKG in the state [flow.DKGStateStarted], the RecoverableRandomBeaconStateMachine +// reports correct values and permits / rejects state transitions according to the state machine specification. +func TestDKGState_StartedState(t *testing.T) { + unittest.RunWithTypedBadgerDB(t, InitSecret, func(db *badger.DB) { + metrics := metrics.NewNoopCollector() + myNodeID := unittest.IdentifierFixture() + store, err := NewRecoverableRandomBeaconStateMachine(metrics, db, myNodeID) + require.NoError(t, err) + + setupState := func() uint64 { + epochCounter := epochCounterGenerator.Add(1) + err = store.SetDKGState(epochCounter, flow.DKGStateStarted) + require.NoError(t, err) + return epochCounter + } + epochCounter := setupState() + + actualState, err := store.GetDKGState(epochCounter) + require.NoError(t, err) + require.Equal(t, flow.DKGStateStarted, actualState) + + started, err := store.IsDKGStarted(epochCounter) + require.NoError(t, err) + require.True(t, started) + + pk, err := store.UnsafeRetrieveMyBeaconPrivateKey(epochCounter) + require.ErrorIs(t, err, storage.ErrNotFound) + require.Nil(t, pk) + + pk, safe, err := store.RetrieveMyBeaconPrivateKey(epochCounter) + require.ErrorIs(t, err, storage.ErrNotFound) + require.False(t, safe) + require.Nil(t, pk) + + t.Run("state transition flow.DKGStateStarted -> flow.DKGStateUninitialized should not be allowed", func(t *testing.T) { + err = store.SetDKGState(setupState(), flow.DKGStateUninitialized) + require.Error(t, err) + require.True(t, storage.IsInvalidDKGStateTransitionError(err)) + }) - epochCounter := rand.Uint64() + t.Run("state transition flow.DKGStateStarted -> flow.DKGStateStarted should not be allowed", func(t *testing.T) { + err = store.SetDKGState(setupState(), flow.DKGStateStarted) + require.Error(t, err) + require.True(t, storage.IsInvalidDKGStateTransitionError(err)) + }) - // check dkg-started flag for non-existent epoch - t.Run("DKGStarted should default to false", func(t *testing.T) { - started, err := store.GetDKGStarted(rand.Uint64()) - assert.NoError(t, err) - assert.False(t, started) + t.Run("state transition flow.DKGStateStarted -> flow.DKGStateFailure should be allowed", func(t *testing.T) { + err = store.SetDKGState(setupState(), flow.DKGStateFailure) + require.NoError(t, err) }) - // store dkg-started flag for epoch - t.Run("should be able to set DKGStarted", func(t *testing.T) { - err = store.SetDKGStarted(epochCounter) - assert.NoError(t, err) + t.Run("state transition flow.DKGStateStarted -> flow.DKGStateCompleted should be rejected if no key was inserted first", func(t *testing.T) { + err = store.SetDKGState(setupState(), flow.DKGStateCompleted) + require.Error(t, err, "should not be able to enter completed state without providing a private key") + require.True(t, storage.IsInvalidDKGStateTransitionError(err)) }) - // retrieve flag for epoch - t.Run("should be able to read DKGStarted", func(t *testing.T) { - started, err := store.GetDKGStarted(epochCounter) - assert.NoError(t, err) - assert.True(t, started) + t.Run("state transition flow.DKGStateStarted -> flow.DKGStateCompleted should be allowed, but only via inserting a key", func(t *testing.T) { + epochCounter := setupState() + expectedKey := unittest.StakingPrivKeyFixture() + err = store.InsertMyBeaconPrivateKey(epochCounter, expectedKey) + require.NoError(t, err) + resultingState, err := store.GetDKGState(epochCounter) + require.NoError(t, err) + require.Equal(t, flow.DKGStateCompleted, resultingState) + actualKey, err := store.UnsafeRetrieveMyBeaconPrivateKey(epochCounter) + require.NoError(t, err) + require.Equal(t, expectedKey, actualKey) + actualKey, safe, err = store.RetrieveMyBeaconPrivateKey(epochCounter) + require.ErrorIs(t, err, storage.ErrNotFound) + require.False(t, safe) + require.Nil(t, actualKey) + }) + + t.Run("while state transition flow.DKGStateStarted -> flow.RandomBeaconKeyCommitted is allowed, it should not proceed without a key being inserted first", func(t *testing.T) { + err = store.SetDKGState(setupState(), flow.RandomBeaconKeyCommitted) + require.Error(t, err, "should not be able to set DKG state to recovered, only using dedicated interface") + require.True(t, storage.IsInvalidDKGStateTransitionError(err)) + }) + + t.Run("state transition flow.DKGStateStarted -> flow.RandomBeaconKeyCommitted should be allowed, but only via upserting a key", func(t *testing.T) { + epochCounter := setupState() + expectedKey := unittest.StakingPrivKeyFixture() + evidence := unittest.EpochCommitFixture(func(commit *flow.EpochCommit) { + commit.Counter = epochCounter + commit.DKGParticipantKeys = []crypto.PublicKey{expectedKey.PublicKey()} + commit.DKGIndexMap = flow.DKGIndexMap{myNodeID: 0} + }) + err = store.UpsertMyBeaconPrivateKey(epochCounter, expectedKey, evidence) + require.NoError(t, err) + resultingState, err := store.GetDKGState(epochCounter) + require.NoError(t, err) + require.Equal(t, flow.RandomBeaconKeyCommitted, resultingState) + actualKey, safe, err := store.RetrieveMyBeaconPrivateKey(epochCounter) + require.NoError(t, err) + require.True(t, safe) + require.Equal(t, expectedKey, actualKey) + actualKey, err = store.UnsafeRetrieveMyBeaconPrivateKey(epochCounter) + require.NoError(t, err) + require.Equal(t, expectedKey, actualKey) }) }) } -func TestDKGState_BeaconKeys(t *testing.T) { - unittest.RunWithTypedBadgerDB(t, bstorage.InitSecret, func(db *badger.DB) { +// TestDKGState_CompletedState verifies that for a DKG in the state [flow.DKGStateCompleted], the RecoverableRandomBeaconStateMachine +// reports correct values and permits / rejects state transitions according to the state machine specification. +func TestDKGState_CompletedState(t *testing.T) { + unittest.RunWithTypedBadgerDB(t, InitSecret, func(db *badger.DB) { metrics := metrics.NewNoopCollector() - store, err := bstorage.NewDKGState(metrics, db) + myNodeID := unittest.IdentifierFixture() + store, err := NewRecoverableRandomBeaconStateMachine(metrics, db, myNodeID) + require.NoError(t, err) + + var evidence *flow.EpochCommit + var expectedKey crypto.PrivateKey + setupState := func() uint64 { + epochCounter := epochCounterGenerator.Add(1) + err = store.SetDKGState(epochCounter, flow.DKGStateStarted) + require.NoError(t, err) + expectedKey = unittest.StakingPrivKeyFixture() + evidence = unittest.EpochCommitFixture(func(commit *flow.EpochCommit) { + commit.Counter = epochCounter + commit.DKGParticipantKeys = []crypto.PublicKey{expectedKey.PublicKey()} + commit.DKGIndexMap = flow.DKGIndexMap{myNodeID: 0} + }) + err = store.InsertMyBeaconPrivateKey(epochCounter, expectedKey) + require.NoError(t, err) + return epochCounter + } + epochCounter := setupState() + + actualState, err := store.GetDKGState(epochCounter) require.NoError(t, err) + require.Equal(t, flow.DKGStateCompleted, actualState) - rand.Seed(time.Now().UnixNano()) - epochCounter := rand.Uint64() + started, err := store.IsDKGStarted(epochCounter) + require.NoError(t, err) + require.True(t, started) - // attempt to get a non-existent key - t.Run("should error if retrieving non-existent key", func(t *testing.T) { - _, err = store.RetrieveMyBeaconPrivateKey(epochCounter) - assert.True(t, errors.Is(err, storage.ErrNotFound)) + pk, err := store.UnsafeRetrieveMyBeaconPrivateKey(epochCounter) + require.NoError(t, err) + require.NotNil(t, pk) + + pk, safe, err := store.RetrieveMyBeaconPrivateKey(epochCounter) + require.ErrorIs(t, err, storage.ErrNotFound) + require.False(t, safe) + require.Nil(t, pk) + + t.Run("state transition flow.DKGStateCompleted -> flow.DKGStateUninitialized should not be allowed", func(t *testing.T) { + err = store.SetDKGState(setupState(), flow.DKGStateUninitialized) + require.Error(t, err) + require.True(t, storage.IsInvalidDKGStateTransitionError(err)) }) - // attempt to store a nil key should fail - use DKGState.SetEndState(flow.DKGEndStateNoKey) - t.Run("should fail to store a nil key instead)", func(t *testing.T) { - err = store.InsertMyBeaconPrivateKey(epochCounter, nil) - assert.Error(t, err) + t.Run("state transition flow.DKGStateCompleted -> flow.DKGStateStarted should not be allowed", func(t *testing.T) { + err = store.SetDKGState(setupState(), flow.DKGStateStarted) + require.Error(t, err) + require.True(t, storage.IsInvalidDKGStateTransitionError(err)) }) - // store a key in db - expected := unittest.RandomBeaconPriv() - t.Run("should be able to store and read a key", func(t *testing.T) { - err = store.InsertMyBeaconPrivateKey(epochCounter, expected) + t.Run("state transition flow.DKGStateCompleted -> flow.DKGStateFailure should be allowed", func(t *testing.T) { + err = store.SetDKGState(setupState(), flow.DKGStateFailure) require.NoError(t, err) }) - // retrieve the key by epoch counter - t.Run("should be able to retrieve stored key", func(t *testing.T) { - actual, err := store.RetrieveMyBeaconPrivateKey(epochCounter) + t.Run("state transition flow.DKGStateCompleted -> flow.DKGStateCompleted should not be allowed", func(t *testing.T) { + err = store.SetDKGState(setupState(), flow.DKGStateCompleted) + require.Error(t, err, "already in this state") + require.True(t, storage.IsInvalidDKGStateTransitionError(err)) + + err = store.InsertMyBeaconPrivateKey(setupState(), unittest.StakingPrivKeyFixture()) + require.Error(t, err, "already inserted private key") + require.ErrorIs(t, err, storage.ErrAlreadyExists) + }) + + t.Run("state transition flow.DKGStateCompleted -> flow.RandomBeaconKeyCommitted should be allowed only using dedicated function", func(t *testing.T) { + epochCounter := setupState() + err = store.SetDKGState(epochCounter, flow.RandomBeaconKeyCommitted) + require.Error(t, err, "should not be allowed since we need to use a dedicated function") + require.True(t, storage.IsInvalidDKGStateTransitionError(err)) + }) + + t.Run("state transition flow.DKGStateCompleted -> flow.RandomBeaconKeyCommitted should be allowed, because key is already stored", func(t *testing.T) { + epochCounter := setupState() + err = store.CommitMyBeaconPrivateKey(epochCounter, evidence) + require.NoError(t, err, "should be allowed since we have a stored private key") + resultingState, err := store.GetDKGState(epochCounter) + require.NoError(t, err) + require.Equal(t, flow.RandomBeaconKeyCommitted, resultingState) + actualKey, safe, err := store.RetrieveMyBeaconPrivateKey(epochCounter) require.NoError(t, err) - assert.Equal(t, expected, actual) + require.True(t, safe) + require.Equal(t, expectedKey, actualKey) + actualKey, err = store.UnsafeRetrieveMyBeaconPrivateKey(epochCounter) + require.NoError(t, err) + require.Equal(t, expectedKey, actualKey) }) - // test storing same key - t.Run("should fail to store a key twice", func(t *testing.T) { - err = store.InsertMyBeaconPrivateKey(epochCounter, expected) - require.True(t, errors.Is(err, storage.ErrAlreadyExists)) + t.Run("state transition flow.DKGStateCompleted -> flow.RandomBeaconKeyCommitted (recovery, overwriting existing key) should be allowed", func(t *testing.T) { + epochCounter := setupState() + expectedKey := unittest.StakingPrivKeyFixture() + evidence := unittest.EpochCommitFixture(func(commit *flow.EpochCommit) { + commit.Counter = epochCounter + commit.DKGParticipantKeys = []crypto.PublicKey{expectedKey.PublicKey()} + commit.DKGIndexMap = flow.DKGIndexMap{myNodeID: 0} + }) + err = store.UpsertMyBeaconPrivateKey(epochCounter, expectedKey, evidence) + require.NoError(t, err) + resultingState, err := store.GetDKGState(epochCounter) + require.NoError(t, err) + require.Equal(t, flow.RandomBeaconKeyCommitted, resultingState) + actualKey, safe, err := store.RetrieveMyBeaconPrivateKey(epochCounter) + require.NoError(t, err) + require.True(t, safe) + require.Equal(t, expectedKey, actualKey) + actualKey, err = store.UnsafeRetrieveMyBeaconPrivateKey(epochCounter) + require.NoError(t, err) + require.Equal(t, expectedKey, actualKey) }) }) } -func TestDKGState_EndState(t *testing.T) { - unittest.RunWithTypedBadgerDB(t, bstorage.InitSecret, func(db *badger.DB) { +// TestDKGState_FailureState verifies that for a DKG in the state [flow.DKGStateFailure], the RecoverableRandomBeaconStateMachine +// reports correct values and permits / rejects state transitions according to the state machine specification. +// This test is for a specific scenario when no private key has been inserted yet. +func TestDKGState_FailureState(t *testing.T) { + unittest.RunWithTypedBadgerDB(t, InitSecret, func(db *badger.DB) { metrics := metrics.NewNoopCollector() - store, err := bstorage.NewDKGState(metrics, db) + myNodeID := unittest.IdentifierFixture() + store, err := NewRecoverableRandomBeaconStateMachine(metrics, db, myNodeID) + require.NoError(t, err) + setupState := func() uint64 { + epochCounter := epochCounterGenerator.Add(1) + err = store.SetDKGState(epochCounter, flow.DKGStateFailure) + require.NoError(t, err) + return epochCounter + } + epochCounter := setupState() + + actualState, err := store.GetDKGState(epochCounter) + require.NoError(t, err) + require.Equal(t, flow.DKGStateFailure, actualState) + + started, err := store.IsDKGStarted(epochCounter) require.NoError(t, err) + require.True(t, started) + + pk, err := store.UnsafeRetrieveMyBeaconPrivateKey(epochCounter) + require.ErrorIs(t, err, storage.ErrNotFound) + require.Nil(t, pk) + + pk, safe, err := store.RetrieveMyBeaconPrivateKey(epochCounter) + require.ErrorIs(t, err, storage.ErrNotFound) + require.False(t, safe) + require.Nil(t, pk) - rand.Seed(time.Now().UnixNano()) - epochCounter := rand.Uint64() - endState := flow.DKGEndStateNoKey + t.Run("state transition flow.DKGStateFailure -> flow.DKGStateUninitialized should not be allowed", func(t *testing.T) { + err = store.SetDKGState(setupState(), flow.DKGStateUninitialized) + require.Error(t, err) + require.True(t, storage.IsInvalidDKGStateTransitionError(err)) + }) + + t.Run("state transition flow.DKGStateFailure -> flow.DKGStateStarted should not be allowed", func(t *testing.T) { + err = store.SetDKGState(setupState(), flow.DKGStateStarted) + require.Error(t, err) + require.True(t, storage.IsInvalidDKGStateTransitionError(err)) + }) - t.Run("should be able to store an end state", func(t *testing.T) { - err = store.SetDKGEndState(epochCounter, endState) + t.Run("state transition flow.DKGStateFailure -> flow.DKGStateFailure should be allowed", func(t *testing.T) { + epochCounter := setupState() + err = store.SetDKGState(epochCounter, flow.DKGStateFailure) require.NoError(t, err) + resultingState, err := store.GetDKGState(epochCounter) + require.NoError(t, err) + require.Equal(t, flow.DKGStateFailure, resultingState) + }) + + t.Run("state transition flow.DKGStateFailure -> flow.DKGStateCompleted should not be allowed", func(t *testing.T) { + err = store.SetDKGState(setupState(), flow.DKGStateCompleted) + require.Error(t, err) + require.True(t, storage.IsInvalidDKGStateTransitionError(err)) }) - t.Run("should be able to read an end state", func(t *testing.T) { - readEndState, err := store.GetDKGEndState(epochCounter) + t.Run("state transition flow.DKGStateFailure -> flow.DKGStateCompleted by inserting a key should not be allowed", func(t *testing.T) { + err = store.InsertMyBeaconPrivateKey(setupState(), unittest.StakingPrivKeyFixture()) + require.Error(t, err) + require.True(t, storage.IsInvalidDKGStateTransitionError(err)) + }) + + t.Run("state transition flow.DKGStateFailure -> flow.RandomBeaconKeyCommitted is allowed, it should not proceed without a key being inserted first", func(t *testing.T) { + err = store.SetDKGState(setupState(), flow.RandomBeaconKeyCommitted) + require.Error(t, err, "should not be able to set DKG state to recovered, only using dedicated interface") + require.True(t, storage.IsInvalidDKGStateTransitionError(err)) + }) + t.Run("state transition flow.DKGStateFailure -> flow.RandomBeaconKeyCommitted should be allowed via upserting the key (recovery path)", func(t *testing.T) { + epochCounter := setupState() + expectedKey := unittest.StakingPrivKeyFixture() + evidence := unittest.EpochCommitFixture(func(commit *flow.EpochCommit) { + commit.Counter = epochCounter + commit.DKGParticipantKeys = []crypto.PublicKey{expectedKey.PublicKey()} + commit.DKGIndexMap = flow.DKGIndexMap{myNodeID: 0} + }) + err = store.UpsertMyBeaconPrivateKey(epochCounter, expectedKey, evidence) + require.NoError(t, err) + resultingState, err := store.GetDKGState(epochCounter) + require.NoError(t, err) + require.Equal(t, flow.RandomBeaconKeyCommitted, resultingState) + actualKey, safe, err := store.RetrieveMyBeaconPrivateKey(epochCounter) require.NoError(t, err) - assert.Equal(t, endState, readEndState) + require.True(t, safe) + require.Equal(t, expectedKey, actualKey) + actualKey, err = store.UnsafeRetrieveMyBeaconPrivateKey(epochCounter) + require.NoError(t, err) + require.Equal(t, expectedKey, actualKey) }) }) } -func TestSafeBeaconPrivateKeys(t *testing.T) { - unittest.RunWithTypedBadgerDB(t, bstorage.InitSecret, func(db *badger.DB) { +// TestDKGState_FailureStateAfterCompleted verifies that for a DKG in the state [flow.DKGStateFailure], the RecoverableRandomBeaconStateMachine +// reports correct values and permits / rejects state transitions according to the state machine specification. +// This test is for a specific scenario when the private key was previously stored, +// which means that the state machine went through [flow.DKGStateCompleted] and then we have transitioned to [flow.DKGStateFailure]. +func TestDKGState_FailureStateAfterCompleted(t *testing.T) { + unittest.RunWithTypedBadgerDB(t, InitSecret, func(db *badger.DB) { metrics := metrics.NewNoopCollector() - dkgState, err := bstorage.NewDKGState(metrics, db) + myNodeID := unittest.IdentifierFixture() + store, err := NewRecoverableRandomBeaconStateMachine(metrics, db, myNodeID) + require.NoError(t, err) + + var storedPrivateKey crypto.PrivateKey + setupState := func() uint64 { + epochCounter := epochCounterGenerator.Add(1) + storedPrivateKey = unittest.StakingPrivKeyFixture() + err = store.SetDKGState(epochCounter, flow.DKGStateStarted) + require.NoError(t, err) + err = store.InsertMyBeaconPrivateKey(epochCounter, storedPrivateKey) + require.NoError(t, err) + err = store.SetDKGState(epochCounter, flow.DKGStateFailure) + require.NoError(t, err) + return epochCounter + } + epochCounter := setupState() + + actualState, err := store.GetDKGState(epochCounter) require.NoError(t, err) - safeKeys := bstorage.NewSafeBeaconPrivateKeys(dkgState) + require.Equal(t, flow.DKGStateFailure, actualState) - t.Run("non-existent key -> should return ErrNotFound", func(t *testing.T) { - epochCounter := rand.Uint64() - key, safe, err := safeKeys.RetrieveMyBeaconPrivateKey(epochCounter) - assert.Nil(t, key) - assert.False(t, safe) - assert.ErrorIs(t, err, storage.ErrNotFound) + started, err := store.IsDKGStarted(epochCounter) + require.NoError(t, err) + require.True(t, started) + + pk, err := store.UnsafeRetrieveMyBeaconPrivateKey(epochCounter) + require.NoError(t, err) + require.True(t, pk.Equals(storedPrivateKey)) + + pk, safe, err := store.RetrieveMyBeaconPrivateKey(epochCounter) + require.ErrorIs(t, err, storage.ErrNotFound) + require.False(t, safe) + require.Nil(t, pk) + + t.Run("state transition flow.DKGStateFailure -> flow.DKGStateUninitialized should not be allowed", func(t *testing.T) { + err = store.SetDKGState(setupState(), flow.DKGStateUninitialized) + require.Error(t, err) + require.True(t, storage.IsInvalidDKGStateTransitionError(err)) + }) + + t.Run("state transition flow.DKGStateFailure -> flow.DKGStateStarted should not be allowed", func(t *testing.T) { + err = store.SetDKGState(setupState(), flow.DKGStateStarted) + require.Error(t, err) + require.True(t, storage.IsInvalidDKGStateTransitionError(err)) + }) + + t.Run("state transition flow.DKGStateFailure -> flow.DKGStateFailure should be allowed", func(t *testing.T) { + epochCounter := setupState() + err = store.SetDKGState(epochCounter, flow.DKGStateFailure) + require.NoError(t, err) + resultingState, err := store.GetDKGState(epochCounter) + require.NoError(t, err) + require.Equal(t, flow.DKGStateFailure, resultingState) }) - t.Run("existent key, non-existent end state -> should return ErrNotFound", func(t *testing.T) { - epochCounter := rand.Uint64() + t.Run("state transition flow.DKGStateFailure -> flow.DKGStateCompleted should not be allowed", func(t *testing.T) { + err = store.SetDKGState(setupState(), flow.DKGStateCompleted) + require.Error(t, err) + require.True(t, storage.IsInvalidDKGStateTransitionError(err)) + }) - // store a key - expected := unittest.RandomBeaconPriv().PrivateKey - err := dkgState.InsertMyBeaconPrivateKey(epochCounter, expected) - assert.NoError(t, err) + t.Run("state transition flow.DKGStateFailure -> flow.DKGStateCompleted by inserting a key should not be allowed", func(t *testing.T) { + err = store.InsertMyBeaconPrivateKey(setupState(), unittest.StakingPrivKeyFixture()) + require.ErrorIs(t, err, storage.ErrAlreadyExists) + }) - key, safe, err := safeKeys.RetrieveMyBeaconPrivateKey(epochCounter) - assert.Nil(t, key) - assert.False(t, safe) - assert.ErrorIs(t, err, storage.ErrNotFound) + t.Run("state transition flow.DKGStateFailure -> flow.RandomBeaconKeyCommitted is allowed, it should not proceed without a key being inserted first", func(t *testing.T) { + err = store.SetDKGState(setupState(), flow.RandomBeaconKeyCommitted) + require.Error(t, err, "should not be able to set DKG state to recovered, only using dedicated interface") + require.True(t, storage.IsInvalidDKGStateTransitionError(err)) }) + t.Run("state transition flow.DKGStateFailure -> flow.RandomBeaconKeyCommitted should be allowed via upserting the key (recovery path)", func(t *testing.T) { + epochCounter := setupState() + expectedKey := unittest.StakingPrivKeyFixture() + evidence := unittest.EpochCommitFixture(func(commit *flow.EpochCommit) { + commit.Counter = epochCounter + commit.DKGParticipantKeys = []crypto.PublicKey{expectedKey.PublicKey()} + commit.DKGIndexMap = flow.DKGIndexMap{myNodeID: 0} + }) + err = store.UpsertMyBeaconPrivateKey(epochCounter, expectedKey, evidence) + require.NoError(t, err) + resultingState, err := store.GetDKGState(epochCounter) + require.NoError(t, err) + require.Equal(t, flow.RandomBeaconKeyCommitted, resultingState) + actualKey, safe, err := store.RetrieveMyBeaconPrivateKey(epochCounter) + require.NoError(t, err) + require.True(t, safe) + require.Equal(t, expectedKey, actualKey) + actualKey, err = store.UnsafeRetrieveMyBeaconPrivateKey(epochCounter) + require.NoError(t, err) + require.Equal(t, expectedKey, actualKey) + }) + }) +} + +// TestDKGState_RandomBeaconKeyCommittedState verifies that for a DKG in the state [flow.RandomBeaconKeyCommitted], the RecoverableRandomBeaconStateMachine +// reports correct values and permits / rejects state transitions according to the state machine specification. +func TestDKGState_RandomBeaconKeyCommittedState(t *testing.T) { + unittest.RunWithTypedBadgerDB(t, InitSecret, func(db *badger.DB) { + metrics := metrics.NewNoopCollector() + myNodeID := unittest.IdentifierFixture() + store, err := NewRecoverableRandomBeaconStateMachine(metrics, db, myNodeID) + require.NoError(t, err) - t.Run("existent key, unsuccessful end state -> not safe", func(t *testing.T) { - epochCounter := rand.Uint64() + var evidence *flow.EpochCommit + var privateKey crypto.PrivateKey + setupState := func() uint64 { + epochCounter := epochCounterGenerator.Add(1) + privateKey = unittest.StakingPrivKeyFixture() + evidence = unittest.EpochCommitFixture(func(commit *flow.EpochCommit) { + commit.Counter = epochCounter + commit.DKGParticipantKeys = []crypto.PublicKey{privateKey.PublicKey()} + commit.DKGIndexMap = flow.DKGIndexMap{myNodeID: 0} + }) + err = store.UpsertMyBeaconPrivateKey(epochCounter, privateKey, evidence) + require.NoError(t, err) + return epochCounter + } + epochCounter := setupState() + + actualState, err := store.GetDKGState(epochCounter) + require.NoError(t, err) + require.Equal(t, flow.RandomBeaconKeyCommitted, actualState) - // store a key - expected := unittest.RandomBeaconPriv().PrivateKey - err := dkgState.InsertMyBeaconPrivateKey(epochCounter, expected) - assert.NoError(t, err) - // mark dkg unsuccessful - err = dkgState.SetDKGEndState(epochCounter, flow.DKGEndStateInconsistentKey) - assert.NoError(t, err) + started, err := store.IsDKGStarted(epochCounter) + require.NoError(t, err) + require.True(t, started) + + pk, err := store.UnsafeRetrieveMyBeaconPrivateKey(epochCounter) + require.NoError(t, err) + require.NotNil(t, pk) - key, safe, err := safeKeys.RetrieveMyBeaconPrivateKey(epochCounter) - assert.Nil(t, key) - assert.False(t, safe) - assert.NoError(t, err) + pk, safe, err := store.RetrieveMyBeaconPrivateKey(epochCounter) + require.NoError(t, err) + require.True(t, safe) + require.NotNil(t, pk) + + t.Run("state transition flow.RandomBeaconKeyCommitted -> flow.DKGStateUninitialized should not be allowed", func(t *testing.T) { + err = store.SetDKGState(setupState(), flow.DKGStateUninitialized) + require.Error(t, err) + require.True(t, storage.IsInvalidDKGStateTransitionError(err)) }) - t.Run("existent key, inconsistent key end state -> not safe", func(t *testing.T) { - epochCounter := rand.Uint64() + t.Run("state transition flow.RandomBeaconKeyCommitted -> flow.DKGStateStarted should not be allowed", func(t *testing.T) { + err = store.SetDKGState(setupState(), flow.DKGStateStarted) + require.Error(t, err) + require.True(t, storage.IsInvalidDKGStateTransitionError(err)) + }) - // store a key - expected := unittest.RandomBeaconPriv().PrivateKey - err := dkgState.InsertMyBeaconPrivateKey(epochCounter, expected) - assert.NoError(t, err) - // mark dkg result as inconsistent - err = dkgState.SetDKGEndState(epochCounter, flow.DKGEndStateInconsistentKey) - assert.NoError(t, err) + t.Run("state transition flow.RandomBeaconKeyCommitted -> flow.DKGStateFailure should not be allowed", func(t *testing.T) { + err = store.SetDKGState(setupState(), flow.DKGStateFailure) + require.Error(t, err) + require.True(t, storage.IsInvalidDKGStateTransitionError(err)) + }) - key, safe, err := safeKeys.RetrieveMyBeaconPrivateKey(epochCounter) - assert.Nil(t, key) - assert.False(t, safe) - assert.NoError(t, err) + t.Run("state transition flow.RandomBeaconKeyCommitted -> flow.DKGStateCompleted should not be allowed", func(t *testing.T) { + err = store.SetDKGState(setupState(), flow.DKGStateCompleted) + require.Error(t, err) + require.True(t, storage.IsInvalidDKGStateTransitionError(err)) }) - t.Run("non-existent key, no key end state -> not safe", func(t *testing.T) { - epochCounter := rand.Uint64() + t.Run("state transition flow.RandomBeaconKeyCommitted -> flow.DKGStateCompleted by inserting a key should not be allowed", func(t *testing.T) { + err = store.InsertMyBeaconPrivateKey(setupState(), unittest.StakingPrivKeyFixture()) + require.ErrorIs(t, err, storage.ErrAlreadyExists) + }) - // mark dkg result as no key - err = dkgState.SetDKGEndState(epochCounter, flow.DKGEndStateNoKey) - assert.NoError(t, err) + t.Run("state transition flow.RandomBeaconKeyCommitted -> flow.RandomBeaconKeyCommitted should be idempotent for same key", func(t *testing.T) { + epochCounter := setupState() + err = store.CommitMyBeaconPrivateKey(epochCounter, evidence) + require.NoError(t, err, "should be possible as we are not changing the private key") + resultingState, err := store.GetDKGState(epochCounter) + require.NoError(t, err) + require.Equal(t, flow.RandomBeaconKeyCommitted, resultingState) + actualKey, safe, err := store.RetrieveMyBeaconPrivateKey(epochCounter) + require.NoError(t, err) + require.True(t, safe) + require.Equal(t, privateKey, actualKey) + actualKey, err = store.UnsafeRetrieveMyBeaconPrivateKey(epochCounter) + require.NoError(t, err) + require.Equal(t, privateKey, actualKey) - key, safe, err := safeKeys.RetrieveMyBeaconPrivateKey(epochCounter) - assert.Nil(t, key) - assert.False(t, safe) - assert.NoError(t, err) + err = store.UpsertMyBeaconPrivateKey(epochCounter, privateKey, evidence) + require.NoError(t, err, "should be possible ONLY for the same private key") }) - t.Run("existent key, successful end state -> safe", func(t *testing.T) { - epochCounter := rand.Uint64() + t.Run("state transition flow.RandomBeaconKeyCommitted -> flow.RandomBeaconKeyCommitted should not be allowed", func(t *testing.T) { + epochCounter := setupState() + err = store.CommitMyBeaconPrivateKey(epochCounter, evidence) + require.NoError(t, err, "should be possible as we are not changing the private key") + resultingState, err := store.GetDKGState(epochCounter) + require.NoError(t, err) + require.Equal(t, flow.RandomBeaconKeyCommitted, resultingState) + actualKey, safe, err := store.RetrieveMyBeaconPrivateKey(epochCounter) + require.NoError(t, err) + require.True(t, safe) + require.Equal(t, privateKey, actualKey) + actualKey, err = store.UnsafeRetrieveMyBeaconPrivateKey(epochCounter) + require.NoError(t, err) + require.Equal(t, privateKey, actualKey) + + otherKey := unittest.StakingPrivKeyFixture() + otherEvidence := unittest.EpochCommitFixture(func(commit *flow.EpochCommit) { + commit.Counter = epochCounter + commit.DKGParticipantKeys = []crypto.PublicKey{otherKey.PublicKey()} + commit.DKGIndexMap = flow.DKGIndexMap{myNodeID: 0} + }) + err = store.UpsertMyBeaconPrivateKey(epochCounter, otherKey, otherEvidence) + require.Error(t, err, "cannot overwrite previously committed key") + require.True(t, storage.IsInvalidDKGStateTransitionError(err)) + resultingState, err = store.GetDKGState(epochCounter) + require.NoError(t, err) + require.Equal(t, flow.RandomBeaconKeyCommitted, resultingState) + }) + }) +} - // store a key - expected := unittest.RandomBeaconPriv().PrivateKey - err := dkgState.InsertMyBeaconPrivateKey(epochCounter, expected) - assert.NoError(t, err) - // mark dkg successful - err = dkgState.SetDKGEndState(epochCounter, flow.DKGEndStateSuccess) - assert.NoError(t, err) +// TestDKGState_InsertedKeyIsIncludedInTheEpoch verifies that the inserted key is included in the epoch commit evidence. +// This test ensures that key is part of random beacon committee as well as the inserted key matches the local node ID. +func TestDKGState_InsertedKeyIsIncludedInTheEpoch(t *testing.T) { + unittest.RunWithTypedBadgerDB(t, InitSecret, func(db *badger.DB) { + metrics := metrics.NewNoopCollector() + myNodeID := unittest.IdentifierFixture() + store, err := NewRecoverableRandomBeaconStateMachine(metrics, db, myNodeID) + require.NoError(t, err) + + setupState := func() uint64 { + epochCounter := epochCounterGenerator.Add(1) + err = store.SetDKGState(epochCounter, flow.DKGStateStarted) + require.NoError(t, err) + return epochCounter + } + + t.Run("inserted key is included in the epoch, evidence with DKGIndexMap", func(t *testing.T) { + epochCounter := setupState() + expectedKey := unittest.StakingPrivKeyFixture() + err = store.InsertMyBeaconPrivateKey(epochCounter, expectedKey) + require.NoError(t, err) + evidence := unittest.EpochCommitFixture(func(commit *flow.EpochCommit) { + commit.Counter = epochCounter + commit.DKGParticipantKeys[0] = expectedKey.PublicKey() + commit.DKGIndexMap = flow.DKGIndexMap{ + myNodeID: 0, + unittest.IdentifierFixture(): 1, + } + }) + err = store.CommitMyBeaconPrivateKey(epochCounter, evidence) + require.NoError(t, err) + }) + + t.Run("inserted key is included in the epoch but current node is not part of the random beacon committee", func(t *testing.T) { + epochCounter := setupState() + expectedKey := unittest.StakingPrivKeyFixture() + err = store.InsertMyBeaconPrivateKey(epochCounter, expectedKey) + require.NoError(t, err) + evidence := unittest.EpochCommitFixture(func(commit *flow.EpochCommit) { + commit.Counter = epochCounter + commit.DKGParticipantKeys[0] = expectedKey.PublicKey() + commit.DKGIndexMap = flow.DKGIndexMap{ + unittest.IdentifierFixture(): 0, + unittest.IdentifierFixture(): 1, + } + }) + err = store.CommitMyBeaconPrivateKey(epochCounter, evidence) + require.Error(t, err) + require.True(t, storage.IsInvalidDKGStateTransitionError(err)) + }) - key, safe, err := safeKeys.RetrieveMyBeaconPrivateKey(epochCounter) - assert.NotNil(t, key) - assert.True(t, expected.Equals(key)) - assert.True(t, safe) - assert.NoError(t, err) + t.Run("inserted key is included in the epoch but current node's key doesn't match inserted key", func(t *testing.T) { + epochCounter := setupState() + expectedKey := unittest.StakingPrivKeyFixture() + err = store.InsertMyBeaconPrivateKey(epochCounter, expectedKey) + require.NoError(t, err) + evidence := unittest.EpochCommitFixture(func(commit *flow.EpochCommit) { + commit.Counter = epochCounter + commit.DKGIndexMap = flow.DKGIndexMap{ + myNodeID: 0, + unittest.IdentifierFixture(): 1, + } + }) + err = store.CommitMyBeaconPrivateKey(epochCounter, evidence) + require.Error(t, err) + require.True(t, storage.IsInvalidDKGStateTransitionError(err)) }) + }) +} - t.Run("non-existent key, successful end state -> exception!", func(t *testing.T) { - epochCounter := rand.Uint64() +// TestDKGState_UpsertedKeyIsIncludedInTheEpoch verifies that the upserted key is included in the epoch commit evidence. +// This test ensures that key is part of random beacon committee as well as the upserted key matches the local node ID. +func TestDKGState_UpsertedKeyIsIncludedInTheEpoch(t *testing.T) { + unittest.RunWithTypedBadgerDB(t, InitSecret, func(db *badger.DB) { + metrics := metrics.NewNoopCollector() + myNodeID := unittest.IdentifierFixture() + store, err := NewRecoverableRandomBeaconStateMachine(metrics, db, myNodeID) + require.NoError(t, err) - // mark dkg successful - err = dkgState.SetDKGEndState(epochCounter, flow.DKGEndStateSuccess) - assert.NoError(t, err) + setupState := func() uint64 { + epochCounter := epochCounterGenerator.Add(1) + err = store.SetDKGState(epochCounter, flow.DKGStateStarted) + require.NoError(t, err) + return epochCounter + } + + t.Run("upserted key is included in the epoch, evidence with DKGIndexMap", func(t *testing.T) { + epochCounter := setupState() + expectedKey := unittest.StakingPrivKeyFixture() + evidence := unittest.EpochCommitFixture(func(commit *flow.EpochCommit) { + commit.Counter = epochCounter + commit.DKGParticipantKeys[0] = expectedKey.PublicKey() + commit.DKGIndexMap = flow.DKGIndexMap{ + myNodeID: 0, + unittest.IdentifierFixture(): 1, + } + }) + err = store.UpsertMyBeaconPrivateKey(epochCounter, expectedKey, evidence) + require.NoError(t, err) + }) - key, safe, err := safeKeys.RetrieveMyBeaconPrivateKey(epochCounter) - assert.Nil(t, key) - assert.False(t, safe) - assert.Error(t, err) - assert.NotErrorIs(t, err, storage.ErrNotFound) + t.Run("upserted key is included in the epoch but current node is not part of the random beacon committee", func(t *testing.T) { + epochCounter := setupState() + expectedKey := unittest.StakingPrivKeyFixture() + evidence := unittest.EpochCommitFixture(func(commit *flow.EpochCommit) { + commit.Counter = epochCounter + commit.DKGParticipantKeys[0] = expectedKey.PublicKey() + commit.DKGIndexMap = flow.DKGIndexMap{ + unittest.IdentifierFixture(): 0, + unittest.IdentifierFixture(): 1, + } + }) + err = store.UpsertMyBeaconPrivateKey(epochCounter, expectedKey, evidence) + require.Error(t, err) + require.True(t, storage.IsInvalidDKGStateTransitionError(err)) }) + t.Run("upserted key is included in the epoch but current node's key doesn't match upserted key", func(t *testing.T) { + epochCounter := setupState() + expectedKey := unittest.StakingPrivKeyFixture() + evidence := unittest.EpochCommitFixture(func(commit *flow.EpochCommit) { + commit.Counter = epochCounter + commit.DKGIndexMap = flow.DKGIndexMap{ + myNodeID: 0, + unittest.IdentifierFixture(): 1, + } + }) + err = store.UpsertMyBeaconPrivateKey(epochCounter, expectedKey, evidence) + require.Error(t, err) + require.True(t, storage.IsInvalidDKGStateTransitionError(err)) + }) }) } -// TestSecretDBRequirement tests that the DKGState constructor will return an +// TestSecretDBRequirement tests that the RecoverablePrivateBeaconKeyStateMachine constructor will return an // error if instantiated using a database not marked with the correct type. func TestSecretDBRequirement(t *testing.T) { unittest.RunWithBadgerDB(t, func(db *badger.DB) { metrics := metrics.NewNoopCollector() - _, err := bstorage.NewDKGState(metrics, db) + _, err := NewRecoverableRandomBeaconStateMachine(metrics, db, unittest.IdentifierFixture()) require.Error(t, err) }) } diff --git a/storage/badger/epoch_commits.go b/storage/badger/epoch_commits.go deleted file mode 100644 index 8f9022e7f09..00000000000 --- a/storage/badger/epoch_commits.go +++ /dev/null @@ -1,72 +0,0 @@ -package badger - -import ( - "github.com/dgraph-io/badger/v2" - - "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/module" - "github.com/onflow/flow-go/module/metrics" - "github.com/onflow/flow-go/storage/badger/operation" - "github.com/onflow/flow-go/storage/badger/transaction" -) - -type EpochCommits struct { - db *badger.DB - cache *Cache -} - -func NewEpochCommits(collector module.CacheMetrics, db *badger.DB) *EpochCommits { - - store := func(key interface{}, val interface{}) func(*transaction.Tx) error { - id := key.(flow.Identifier) - commit := val.(*flow.EpochCommit) - return transaction.WithTx(operation.SkipDuplicates(operation.InsertEpochCommit(id, commit))) - } - - retrieve := func(key interface{}) func(*badger.Txn) (interface{}, error) { - id := key.(flow.Identifier) - var commit flow.EpochCommit - return func(tx *badger.Txn) (interface{}, error) { - err := operation.RetrieveEpochCommit(id, &commit)(tx) - return &commit, err - } - } - - ec := &EpochCommits{ - db: db, - cache: newCache(collector, metrics.ResourceEpochCommit, - withLimit(4*flow.DefaultTransactionExpiry), - withStore(store), - withRetrieve(retrieve)), - } - - return ec -} - -func (ec *EpochCommits) StoreTx(commit *flow.EpochCommit) func(*transaction.Tx) error { - return ec.cache.PutTx(commit.ID(), commit) -} - -func (ec *EpochCommits) retrieveTx(commitID flow.Identifier) func(tx *badger.Txn) (*flow.EpochCommit, error) { - return func(tx *badger.Txn) (*flow.EpochCommit, error) { - val, err := ec.cache.Get(commitID)(tx) - if err != nil { - return nil, err - } - return val.(*flow.EpochCommit), nil - } -} - -// TODO: can we remove this method? Its not contained in the interface. -func (ec *EpochCommits) Store(commit *flow.EpochCommit) error { - return operation.RetryOnConflictTx(ec.db, transaction.Update, ec.StoreTx(commit)) -} - -// ByID will return the EpochCommit event by its ID. -// Error returns: -// * storage.ErrNotFound if no EpochCommit with the ID exists -func (ec *EpochCommits) ByID(commitID flow.Identifier) (*flow.EpochCommit, error) { - tx := ec.db.NewTransaction(false) - defer tx.Discard() - return ec.retrieveTx(commitID)(tx) -} diff --git a/storage/badger/epoch_commits_test.go b/storage/badger/epoch_commits_test.go deleted file mode 100644 index aacbf81f7b9..00000000000 --- a/storage/badger/epoch_commits_test.go +++ /dev/null @@ -1,42 +0,0 @@ -package badger_test - -import ( - "errors" - "testing" - - "github.com/dgraph-io/badger/v2" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/onflow/flow-go/module/metrics" - "github.com/onflow/flow-go/storage" - "github.com/onflow/flow-go/utils/unittest" - - badgerstorage "github.com/onflow/flow-go/storage/badger" -) - -// TestEpochCommitStoreAndRetrieve tests that a commit can be stored, retrieved and attempted to be stored again without an error -func TestEpochCommitStoreAndRetrieve(t *testing.T) { - unittest.RunWithBadgerDB(t, func(db *badger.DB) { - metrics := metrics.NewNoopCollector() - store := badgerstorage.NewEpochCommits(metrics, db) - - // attempt to get a invalid commit - _, err := store.ByID(unittest.IdentifierFixture()) - assert.True(t, errors.Is(err, storage.ErrNotFound)) - - // store a commit in db - expected := unittest.EpochCommitFixture() - err = store.Store(expected) - require.NoError(t, err) - - // retrieve the commit by ID - actual, err := store.ByID(expected.ID()) - require.NoError(t, err) - assert.Equal(t, expected, actual) - - // test storing same epoch commit - err = store.Store(expected) - require.NoError(t, err) - }) -} diff --git a/storage/badger/epoch_setups.go b/storage/badger/epoch_setups.go deleted file mode 100644 index 4b31db4b867..00000000000 --- a/storage/badger/epoch_setups.go +++ /dev/null @@ -1,68 +0,0 @@ -package badger - -import ( - "github.com/dgraph-io/badger/v2" - - "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/module" - "github.com/onflow/flow-go/module/metrics" - "github.com/onflow/flow-go/storage/badger/operation" - "github.com/onflow/flow-go/storage/badger/transaction" -) - -type EpochSetups struct { - db *badger.DB - cache *Cache -} - -// NewEpochSetups instantiates a new EpochSetups storage. -func NewEpochSetups(collector module.CacheMetrics, db *badger.DB) *EpochSetups { - - store := func(key interface{}, val interface{}) func(*transaction.Tx) error { - id := key.(flow.Identifier) - setup := val.(*flow.EpochSetup) - return transaction.WithTx(operation.SkipDuplicates(operation.InsertEpochSetup(id, setup))) - } - - retrieve := func(key interface{}) func(*badger.Txn) (interface{}, error) { - id := key.(flow.Identifier) - var setup flow.EpochSetup - return func(tx *badger.Txn) (interface{}, error) { - err := operation.RetrieveEpochSetup(id, &setup)(tx) - return &setup, err - } - } - - es := &EpochSetups{ - db: db, - cache: newCache(collector, metrics.ResourceEpochSetup, - withLimit(4*flow.DefaultTransactionExpiry), - withStore(store), - withRetrieve(retrieve)), - } - - return es -} - -func (es *EpochSetups) StoreTx(setup *flow.EpochSetup) func(tx *transaction.Tx) error { - return es.cache.PutTx(setup.ID(), setup) -} - -func (es *EpochSetups) retrieveTx(setupID flow.Identifier) func(tx *badger.Txn) (*flow.EpochSetup, error) { - return func(tx *badger.Txn) (*flow.EpochSetup, error) { - val, err := es.cache.Get(setupID)(tx) - if err != nil { - return nil, err - } - return val.(*flow.EpochSetup), nil - } -} - -// ByID will return the EpochSetup event by its ID. -// Error returns: -// * storage.ErrNotFound if no EpochSetup with the ID exists -func (es *EpochSetups) ByID(setupID flow.Identifier) (*flow.EpochSetup, error) { - tx := es.db.NewTransaction(false) - defer tx.Discard() - return es.retrieveTx(setupID)(tx) -} diff --git a/storage/badger/epoch_setups_test.go b/storage/badger/epoch_setups_test.go deleted file mode 100644 index fae4b153c1c..00000000000 --- a/storage/badger/epoch_setups_test.go +++ /dev/null @@ -1,44 +0,0 @@ -package badger_test - -import ( - "errors" - "testing" - - "github.com/dgraph-io/badger/v2" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/onflow/flow-go/module/metrics" - "github.com/onflow/flow-go/storage" - "github.com/onflow/flow-go/utils/unittest" - - badgerstorage "github.com/onflow/flow-go/storage/badger" - "github.com/onflow/flow-go/storage/badger/operation" - "github.com/onflow/flow-go/storage/badger/transaction" -) - -// TestEpochSetupStoreAndRetrieve tests that a setup can be stored, retrieved and attempted to be stored again without an error -func TestEpochSetupStoreAndRetrieve(t *testing.T) { - unittest.RunWithBadgerDB(t, func(db *badger.DB) { - metrics := metrics.NewNoopCollector() - store := badgerstorage.NewEpochSetups(metrics, db) - - // attempt to get a setup that doesn't exist - _, err := store.ByID(unittest.IdentifierFixture()) - assert.True(t, errors.Is(err, storage.ErrNotFound)) - - // store a setup in db - expected := unittest.EpochSetupFixture() - err = operation.RetryOnConflictTx(db, transaction.Update, store.StoreTx(expected)) - require.NoError(t, err) - - // retrieve the setup by ID - actual, err := store.ByID(expected.ID()) - require.NoError(t, err) - assert.Equal(t, expected, actual) - - // test storing same epoch setup - err = operation.RetryOnConflictTx(db, transaction.Update, store.StoreTx(expected)) - require.NoError(t, err) - }) -} diff --git a/storage/badger/epoch_statuses.go b/storage/badger/epoch_statuses.go deleted file mode 100644 index e5be69ab080..00000000000 --- a/storage/badger/epoch_statuses.go +++ /dev/null @@ -1,68 +0,0 @@ -package badger - -import ( - "github.com/dgraph-io/badger/v2" - - "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/module" - "github.com/onflow/flow-go/module/metrics" - "github.com/onflow/flow-go/storage/badger/operation" - "github.com/onflow/flow-go/storage/badger/transaction" -) - -type EpochStatuses struct { - db *badger.DB - cache *Cache -} - -// NewEpochStatuses ... -func NewEpochStatuses(collector module.CacheMetrics, db *badger.DB) *EpochStatuses { - - store := func(key interface{}, val interface{}) func(*transaction.Tx) error { - blockID := key.(flow.Identifier) - status := val.(*flow.EpochStatus) - return transaction.WithTx(operation.InsertEpochStatus(blockID, status)) - } - - retrieve := func(key interface{}) func(*badger.Txn) (interface{}, error) { - blockID := key.(flow.Identifier) - var status flow.EpochStatus - return func(tx *badger.Txn) (interface{}, error) { - err := operation.RetrieveEpochStatus(blockID, &status)(tx) - return &status, err - } - } - - es := &EpochStatuses{ - db: db, - cache: newCache(collector, metrics.ResourceEpochStatus, - withLimit(4*flow.DefaultTransactionExpiry), - withStore(store), - withRetrieve(retrieve)), - } - - return es -} - -func (es *EpochStatuses) StoreTx(blockID flow.Identifier, status *flow.EpochStatus) func(tx *transaction.Tx) error { - return es.cache.PutTx(blockID, status) -} - -func (es *EpochStatuses) retrieveTx(blockID flow.Identifier) func(tx *badger.Txn) (*flow.EpochStatus, error) { - return func(tx *badger.Txn) (*flow.EpochStatus, error) { - val, err := es.cache.Get(blockID)(tx) - if err != nil { - return nil, err - } - return val.(*flow.EpochStatus), nil - } -} - -// ByBlockID will return the epoch status for the given block -// Error returns: -// * storage.ErrNotFound if EpochStatus for the block does not exist -func (es *EpochStatuses) ByBlockID(blockID flow.Identifier) (*flow.EpochStatus, error) { - tx := es.db.NewTransaction(false) - defer tx.Discard() - return es.retrieveTx(blockID)(tx) -} diff --git a/storage/badger/epoch_statuses_test.go b/storage/badger/epoch_statuses_test.go deleted file mode 100644 index ce560bee9d2..00000000000 --- a/storage/badger/epoch_statuses_test.go +++ /dev/null @@ -1,40 +0,0 @@ -package badger_test - -import ( - "errors" - "testing" - - "github.com/dgraph-io/badger/v2" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/onflow/flow-go/module/metrics" - "github.com/onflow/flow-go/storage" - "github.com/onflow/flow-go/utils/unittest" - - badgerstorage "github.com/onflow/flow-go/storage/badger" - "github.com/onflow/flow-go/storage/badger/operation" - "github.com/onflow/flow-go/storage/badger/transaction" -) - -func TestEpochStatusesStoreAndRetrieve(t *testing.T) { - unittest.RunWithBadgerDB(t, func(db *badger.DB) { - metrics := metrics.NewNoopCollector() - store := badgerstorage.NewEpochStatuses(metrics, db) - - blockID := unittest.IdentifierFixture() - expected := unittest.EpochStatusFixture() - - _, err := store.ByBlockID(unittest.IdentifierFixture()) - assert.True(t, errors.Is(err, storage.ErrNotFound)) - - // store epoch status - err = operation.RetryOnConflictTx(db, transaction.Update, store.StoreTx(blockID, expected)) - require.NoError(t, err) - - // retreive status - actual, err := store.ByBlockID(blockID) - require.NoError(t, err) - require.Equal(t, expected, actual) - }) -} diff --git a/storage/badger/events.go b/storage/badger/events.go deleted file mode 100644 index 59112f01e36..00000000000 --- a/storage/badger/events.go +++ /dev/null @@ -1,207 +0,0 @@ -package badger - -import ( - "fmt" - - "github.com/dgraph-io/badger/v2" - - "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/module" - "github.com/onflow/flow-go/module/metrics" - "github.com/onflow/flow-go/storage" - "github.com/onflow/flow-go/storage/badger/operation" -) - -type Events struct { - db *badger.DB - cache *Cache -} - -func NewEvents(collector module.CacheMetrics, db *badger.DB) *Events { - retrieve := func(key interface{}) func(tx *badger.Txn) (interface{}, error) { - blockID := key.(flow.Identifier) - var events []flow.Event - return func(tx *badger.Txn) (interface{}, error) { - err := operation.LookupEventsByBlockID(blockID, &events)(tx) - return events, handleError(err, flow.Event{}) - } - } - - return &Events{ - db: db, - cache: newCache(collector, metrics.ResourceEvents, - withStore(noopStore), - withRetrieve(retrieve)), - } -} - -// BatchStore stores events keyed by a blockID in provided batch -// No errors are expected during normal operation, but it may return generic error -// if badger fails to process request -func (e *Events) BatchStore(blockID flow.Identifier, blockEvents []flow.EventsList, batch storage.BatchStorage) error { - writeBatch := batch.GetWriter() - - // pre-allocating and indexing slice is faster than appending - sliceSize := 0 - for _, b := range blockEvents { - sliceSize += len(b) - } - - combinedEvents := make([]flow.Event, sliceSize) - - eventIndex := 0 - - for _, events := range blockEvents { - for _, event := range events { - err := operation.BatchInsertEvent(blockID, event)(writeBatch) - if err != nil { - return fmt.Errorf("cannot batch insert event: %w", err) - } - combinedEvents[eventIndex] = event - eventIndex++ - } - } - - callback := func() { - e.cache.Insert(blockID, combinedEvents) - } - batch.OnSucceed(callback) - return nil -} - -// ByBlockID returns the events for the given block ID -func (e *Events) ByBlockID(blockID flow.Identifier) ([]flow.Event, error) { - tx := e.db.NewTransaction(false) - defer tx.Discard() - val, err := e.cache.Get(blockID)(tx) - if err != nil { - return nil, err - } - return val.([]flow.Event), nil -} - -// ByBlockIDTransactionID returns the events for the given block ID and transaction ID -func (e *Events) ByBlockIDTransactionID(blockID flow.Identifier, txID flow.Identifier) ([]flow.Event, error) { - events, err := e.ByBlockID(blockID) - if err != nil { - return nil, handleError(err, flow.Event{}) - } - - var matched []flow.Event - for _, event := range events { - if event.TransactionID == txID { - matched = append(matched, event) - } - } - return matched, nil -} - -func (e *Events) ByBlockIDTransactionIndex(blockID flow.Identifier, txIndex uint32) ([]flow.Event, error) { - events, err := e.ByBlockID(blockID) - if err != nil { - return nil, handleError(err, flow.Event{}) - } - - var matched []flow.Event - for _, event := range events { - if event.TransactionIndex == txIndex { - matched = append(matched, event) - } - } - return matched, nil -} - -// ByBlockIDEventType returns the events for the given block ID and event type -func (e *Events) ByBlockIDEventType(blockID flow.Identifier, eventType flow.EventType) ([]flow.Event, error) { - events, err := e.ByBlockID(blockID) - if err != nil { - return nil, handleError(err, flow.Event{}) - } - - var matched []flow.Event - for _, event := range events { - if event.Type == eventType { - matched = append(matched, event) - } - } - return matched, nil -} - -// RemoveByBlockID removes events by block ID -func (e *Events) RemoveByBlockID(blockID flow.Identifier) error { - return e.db.Update(operation.RemoveEventsByBlockID(blockID)) -} - -// BatchRemoveByBlockID removes events keyed by a blockID in provided batch -// No errors are expected during normal operation, even if no entries are matched. -// If Badger unexpectedly fails to process the request, the error is wrapped in a generic error and returned. -func (e *Events) BatchRemoveByBlockID(blockID flow.Identifier, batch storage.BatchStorage) error { - writeBatch := batch.GetWriter() - return e.db.View(operation.BatchRemoveEventsByBlockID(blockID, writeBatch)) -} - -type ServiceEvents struct { - db *badger.DB - cache *Cache -} - -func NewServiceEvents(collector module.CacheMetrics, db *badger.DB) *ServiceEvents { - retrieve := func(key interface{}) func(tx *badger.Txn) (interface{}, error) { - blockID := key.(flow.Identifier) - var events []flow.Event - return func(tx *badger.Txn) (interface{}, error) { - err := operation.LookupServiceEventsByBlockID(blockID, &events)(tx) - return events, handleError(err, flow.Event{}) - } - } - - return &ServiceEvents{ - db: db, - cache: newCache(collector, metrics.ResourceEvents, - withStore(noopStore), - withRetrieve(retrieve)), - } -} - -// BatchStore stores service events keyed by a blockID in provided batch -// No errors are expected during normal operation, even if no entries are matched. -// If Badger unexpectedly fails to process the request, the error is wrapped in a generic error and returned. -func (e *ServiceEvents) BatchStore(blockID flow.Identifier, events []flow.Event, batch storage.BatchStorage) error { - writeBatch := batch.GetWriter() - for _, event := range events { - err := operation.BatchInsertServiceEvent(blockID, event)(writeBatch) - if err != nil { - return fmt.Errorf("cannot batch insert service event: %w", err) - } - } - - callback := func() { - e.cache.Insert(blockID, events) - } - batch.OnSucceed(callback) - return nil -} - -// ByBlockID returns the events for the given block ID -func (e *ServiceEvents) ByBlockID(blockID flow.Identifier) ([]flow.Event, error) { - tx := e.db.NewTransaction(false) - defer tx.Discard() - val, err := e.cache.Get(blockID)(tx) - if err != nil { - return nil, err - } - return val.([]flow.Event), nil -} - -// RemoveByBlockID removes service events by block ID -func (e *ServiceEvents) RemoveByBlockID(blockID flow.Identifier) error { - return e.db.Update(operation.RemoveServiceEventsByBlockID(blockID)) -} - -// BatchRemoveByBlockID removes service events keyed by a blockID in provided batch -// No errors are expected during normal operation, even if no entries are matched. -// If Badger unexpectedly fails to process the request, the error is wrapped in a generic error and returned. -func (e *ServiceEvents) BatchRemoveByBlockID(blockID flow.Identifier, batch storage.BatchStorage) error { - writeBatch := batch.GetWriter() - return e.db.View(operation.BatchRemoveServiceEventsByBlockID(blockID, writeBatch)) -} diff --git a/storage/badger/events_test.go b/storage/badger/events_test.go deleted file mode 100644 index c5d2bf706eb..00000000000 --- a/storage/badger/events_test.go +++ /dev/null @@ -1,124 +0,0 @@ -package badger_test - -import ( - "math/rand" - "testing" - - "github.com/dgraph-io/badger/v2" - "github.com/stretchr/testify/require" - - "github.com/onflow/flow-go/fvm/systemcontracts" - "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/module/metrics" - badgerstorage "github.com/onflow/flow-go/storage/badger" - "github.com/onflow/flow-go/utils/unittest" -) - -func TestEventStoreRetrieve(t *testing.T) { - unittest.RunWithBadgerDB(t, func(db *badger.DB) { - metrics := metrics.NewNoopCollector() - store := badgerstorage.NewEvents(metrics, db) - - blockID := unittest.IdentifierFixture() - tx1ID := unittest.IdentifierFixture() - tx2ID := unittest.IdentifierFixture() - evt1_1 := unittest.EventFixture(flow.EventAccountCreated, 0, 0, tx1ID, 0) - evt1_2 := unittest.EventFixture(flow.EventAccountCreated, 1, 1, tx2ID, 0) - - evt2_1 := unittest.EventFixture(flow.EventAccountUpdated, 2, 2, tx2ID, 0) - - expected := []flow.EventsList{ - {evt1_1, evt1_2}, - {evt2_1}, - } - - batch := badgerstorage.NewBatch(db) - // store event - err := store.BatchStore(blockID, expected, batch) - require.NoError(t, err) - - err = batch.Flush() - require.NoError(t, err) - - // retrieve by blockID - actual, err := store.ByBlockID(blockID) - require.NoError(t, err) - require.Len(t, actual, 3) - require.Contains(t, actual, evt1_1) - require.Contains(t, actual, evt1_2) - require.Contains(t, actual, evt2_1) - - // retrieve by blockID and event type - actual, err = store.ByBlockIDEventType(blockID, flow.EventAccountCreated) - require.NoError(t, err) - require.Len(t, actual, 2) - require.Contains(t, actual, evt1_1) - require.Contains(t, actual, evt1_2) - - actual, err = store.ByBlockIDEventType(blockID, flow.EventAccountUpdated) - require.NoError(t, err) - require.Len(t, actual, 1) - require.Contains(t, actual, evt2_1) - - events, err := systemcontracts.ServiceEventsForChain(flow.Emulator) - require.NoError(t, err) - - actual, err = store.ByBlockIDEventType(blockID, events.EpochSetup.EventType()) - require.NoError(t, err) - require.Len(t, actual, 0) - - // retrieve by blockID and transaction id - actual, err = store.ByBlockIDTransactionID(blockID, tx1ID) - require.NoError(t, err) - require.Len(t, actual, 1) - require.Contains(t, actual, evt1_1) - - // retrieve by blockID and transaction index - actual, err = store.ByBlockIDTransactionIndex(blockID, 1) - require.NoError(t, err) - require.Len(t, actual, 1) - require.Contains(t, actual, evt1_2) - - // test loading from database - - newStore := badgerstorage.NewEvents(metrics, db) - actual, err = newStore.ByBlockID(blockID) - require.NoError(t, err) - require.Len(t, actual, 3) - require.Contains(t, actual, evt1_1) - require.Contains(t, actual, evt1_2) - require.Contains(t, actual, evt2_1) - }) -} - -func TestEventRetrieveWithoutStore(t *testing.T) { - unittest.RunWithBadgerDB(t, func(db *badger.DB) { - metrics := metrics.NewNoopCollector() - store := badgerstorage.NewEvents(metrics, db) - - blockID := unittest.IdentifierFixture() - txID := unittest.IdentifierFixture() - txIndex := rand.Uint32() - - // retrieve by blockID - events, err := store.ByBlockID(blockID) - require.NoError(t, err) - require.True(t, len(events) == 0) - - // retrieve by blockID and event type - events, err = store.ByBlockIDEventType(blockID, flow.EventAccountCreated) - require.NoError(t, err) - require.True(t, len(events) == 0) - - // retrieve by blockID and transaction id - events, err = store.ByBlockIDTransactionID(blockID, txID) - require.NoError(t, err) - require.True(t, len(events) == 0) - - // retrieve by blockID and transaction id - events, err = store.ByBlockIDTransactionIndex(blockID, txIndex) - require.NoError(t, err) - require.True(t, len(events) == 0) - - }) -} diff --git a/storage/badger/guarantees.go b/storage/badger/guarantees.go deleted file mode 100644 index 23bc929db9c..00000000000 --- a/storage/badger/guarantees.go +++ /dev/null @@ -1,69 +0,0 @@ -package badger - -import ( - "github.com/dgraph-io/badger/v2" - - "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/module" - "github.com/onflow/flow-go/module/metrics" - "github.com/onflow/flow-go/storage/badger/operation" - "github.com/onflow/flow-go/storage/badger/transaction" -) - -// Guarantees implements persistent storage for collection guarantees. -type Guarantees struct { - db *badger.DB - cache *Cache -} - -func NewGuarantees(collector module.CacheMetrics, db *badger.DB, cacheSize uint) *Guarantees { - - store := func(key interface{}, val interface{}) func(*transaction.Tx) error { - collID := key.(flow.Identifier) - guarantee := val.(*flow.CollectionGuarantee) - return transaction.WithTx(operation.SkipDuplicates(operation.InsertGuarantee(collID, guarantee))) - } - - retrieve := func(key interface{}) func(*badger.Txn) (interface{}, error) { - collID := key.(flow.Identifier) - var guarantee flow.CollectionGuarantee - return func(tx *badger.Txn) (interface{}, error) { - err := operation.RetrieveGuarantee(collID, &guarantee)(tx) - return &guarantee, err - } - } - - g := &Guarantees{ - db: db, - cache: newCache(collector, metrics.ResourceGuarantee, - withLimit(cacheSize), - withStore(store), - withRetrieve(retrieve)), - } - - return g -} - -func (g *Guarantees) storeTx(guarantee *flow.CollectionGuarantee) func(*transaction.Tx) error { - return g.cache.PutTx(guarantee.ID(), guarantee) -} - -func (g *Guarantees) retrieveTx(collID flow.Identifier) func(*badger.Txn) (*flow.CollectionGuarantee, error) { - return func(tx *badger.Txn) (*flow.CollectionGuarantee, error) { - val, err := g.cache.Get(collID)(tx) - if err != nil { - return nil, err - } - return val.(*flow.CollectionGuarantee), nil - } -} - -func (g *Guarantees) Store(guarantee *flow.CollectionGuarantee) error { - return operation.RetryOnConflictTx(g.db, transaction.Update, g.storeTx(guarantee)) -} - -func (g *Guarantees) ByCollectionID(collID flow.Identifier) (*flow.CollectionGuarantee, error) { - tx := g.db.NewTransaction(false) - defer tx.Discard() - return g.retrieveTx(collID)(tx) -} diff --git a/storage/badger/guarantees_test.go b/storage/badger/guarantees_test.go deleted file mode 100644 index 778febfb49c..00000000000 --- a/storage/badger/guarantees_test.go +++ /dev/null @@ -1,38 +0,0 @@ -package badger_test - -import ( - "errors" - "testing" - - "github.com/dgraph-io/badger/v2" - "github.com/stretchr/testify/require" - - "github.com/onflow/flow-go/module/metrics" - "github.com/onflow/flow-go/storage" - "github.com/onflow/flow-go/utils/unittest" - - badgerstorage "github.com/onflow/flow-go/storage/badger" -) - -func TestGuaranteeStoreRetrieve(t *testing.T) { - unittest.RunWithBadgerDB(t, func(db *badger.DB) { - metrics := metrics.NewNoopCollector() - store := badgerstorage.NewGuarantees(metrics, db, 1000) - - // abiturary guarantees - expected := unittest.CollectionGuaranteeFixture() - - // retrieve guarantee without stored - _, err := store.ByCollectionID(expected.ID()) - require.True(t, errors.Is(err, storage.ErrNotFound)) - - // store guarantee - err = store.Store(expected) - require.NoError(t, err) - - // retreive by coll idx - actual, err := store.ByCollectionID(expected.ID()) - require.NoError(t, err) - require.Equal(t, expected, actual) - }) -} diff --git a/storage/badger/headers.go b/storage/badger/headers.go deleted file mode 100644 index ac1f0856beb..00000000000 --- a/storage/badger/headers.go +++ /dev/null @@ -1,204 +0,0 @@ -// (c) 2019 Dapper Labs - ALL RIGHTS RESERVED - -package badger - -import ( - "fmt" - - "github.com/dgraph-io/badger/v2" - - "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/module" - "github.com/onflow/flow-go/module/metrics" - "github.com/onflow/flow-go/storage/badger/operation" - "github.com/onflow/flow-go/storage/badger/procedure" - "github.com/onflow/flow-go/storage/badger/transaction" -) - -// Headers implements a simple read-only header storage around a badger DB. -type Headers struct { - db *badger.DB - cache *Cache - heightCache *Cache -} - -func NewHeaders(collector module.CacheMetrics, db *badger.DB) *Headers { - - store := func(key interface{}, val interface{}) func(*transaction.Tx) error { - blockID := key.(flow.Identifier) - header := val.(*flow.Header) - return transaction.WithTx(operation.InsertHeader(blockID, header)) - } - - // CAUTION: should only be used to index FINALIZED blocks by their - // respective height - storeHeight := func(key interface{}, val interface{}) func(*transaction.Tx) error { - height := key.(uint64) - id := val.(flow.Identifier) - return transaction.WithTx(operation.IndexBlockHeight(height, id)) - } - - retrieve := func(key interface{}) func(tx *badger.Txn) (interface{}, error) { - blockID := key.(flow.Identifier) - var header flow.Header - return func(tx *badger.Txn) (interface{}, error) { - err := operation.RetrieveHeader(blockID, &header)(tx) - return &header, err - } - } - - retrieveHeight := func(key interface{}) func(tx *badger.Txn) (interface{}, error) { - height := key.(uint64) - var id flow.Identifier - return func(tx *badger.Txn) (interface{}, error) { - err := operation.LookupBlockHeight(height, &id)(tx) - return id, err - } - } - - h := &Headers{ - db: db, - cache: newCache(collector, metrics.ResourceHeader, - withLimit(4*flow.DefaultTransactionExpiry), - withStore(store), - withRetrieve(retrieve)), - - heightCache: newCache(collector, metrics.ResourceFinalizedHeight, - withLimit(4*flow.DefaultTransactionExpiry), - withStore(storeHeight), - withRetrieve(retrieveHeight)), - } - - return h -} - -func (h *Headers) storeTx(header *flow.Header) func(*transaction.Tx) error { - return h.cache.PutTx(header.ID(), header) -} - -func (h *Headers) retrieveTx(blockID flow.Identifier) func(*badger.Txn) (*flow.Header, error) { - return func(tx *badger.Txn) (*flow.Header, error) { - val, err := h.cache.Get(blockID)(tx) - if err != nil { - return nil, err - } - return val.(*flow.Header), nil - } -} - -// results in `storage.ErrNotFound` for unknown height -func (h *Headers) retrieveIdByHeightTx(height uint64) func(*badger.Txn) (flow.Identifier, error) { - return func(tx *badger.Txn) (flow.Identifier, error) { - blockID, err := h.heightCache.Get(height)(tx) - if err != nil { - return flow.ZeroID, fmt.Errorf("failed to retrieve block ID for height %d: %w", height, err) - } - return blockID.(flow.Identifier), nil - } -} - -func (h *Headers) Store(header *flow.Header) error { - return operation.RetryOnConflictTx(h.db, transaction.Update, h.storeTx(header)) -} - -func (h *Headers) ByBlockID(blockID flow.Identifier) (*flow.Header, error) { - tx := h.db.NewTransaction(false) - defer tx.Discard() - return h.retrieveTx(blockID)(tx) -} - -func (h *Headers) ByHeight(height uint64) (*flow.Header, error) { - tx := h.db.NewTransaction(false) - defer tx.Discard() - - blockID, err := h.retrieveIdByHeightTx(height)(tx) - if err != nil { - return nil, err - } - return h.retrieveTx(blockID)(tx) -} - -// Exists returns true if a header with the given ID has been stored. -// No errors are expected during normal operation. -func (h *Headers) Exists(blockID flow.Identifier) (bool, error) { - // if the block is in the cache, return true - if ok := h.cache.IsCached(blockID); ok { - return ok, nil - } - // otherwise, check badger store - var exists bool - err := h.db.View(operation.BlockExists(blockID, &exists)) - if err != nil { - return false, fmt.Errorf("could not check existence: %w", err) - } - return exists, nil -} - -// BlockIDByHeight the block ID that is finalized at the given height. It is an optimized version -// of `ByHeight` that skips retrieving the block. Expected errors during normal operations: -// - `storage.ErrNotFound` if no finalized block is known at given height. -func (h *Headers) BlockIDByHeight(height uint64) (flow.Identifier, error) { - tx := h.db.NewTransaction(false) - defer tx.Discard() - - blockID, err := h.retrieveIdByHeightTx(height)(tx) - if err != nil { - return flow.ZeroID, fmt.Errorf("could not lookup block id by height %d: %w", height, err) - } - return blockID, nil -} - -func (h *Headers) ByParentID(parentID flow.Identifier) ([]*flow.Header, error) { - var blockIDs flow.IdentifierList - err := h.db.View(procedure.LookupBlockChildren(parentID, &blockIDs)) - if err != nil { - return nil, fmt.Errorf("could not look up children: %w", err) - } - headers := make([]*flow.Header, 0, len(blockIDs)) - for _, blockID := range blockIDs { - header, err := h.ByBlockID(blockID) - if err != nil { - return nil, fmt.Errorf("could not retrieve child (%x): %w", blockID, err) - } - headers = append(headers, header) - } - return headers, nil -} - -func (h *Headers) FindHeaders(filter func(header *flow.Header) bool) ([]flow.Header, error) { - blocks := make([]flow.Header, 0, 1) - err := h.db.View(operation.FindHeaders(filter, &blocks)) - return blocks, err -} - -// RollbackExecutedBlock update the executed block header to the given header. -// only useful for execution node to roll back executed block height -func (h *Headers) RollbackExecutedBlock(header *flow.Header) error { - return operation.RetryOnConflict(h.db.Update, func(txn *badger.Txn) error { - var blockID flow.Identifier - err := operation.RetrieveExecutedBlock(&blockID)(txn) - if err != nil { - return fmt.Errorf("cannot lookup executed block: %w", err) - } - - var highest flow.Header - err = operation.RetrieveHeader(blockID, &highest)(txn) - if err != nil { - return fmt.Errorf("cannot retrieve executed header: %w", err) - } - - // only rollback if the given height is below the current executed height - if header.Height >= highest.Height { - return fmt.Errorf("cannot roolback. expect the target height %v to be lower than highest executed height %v, but actually is not", - header.Height, highest.Height, - ) - } - - err = operation.UpdateExecutedBlock(header.ID())(txn) - if err != nil { - return fmt.Errorf("cannot update highest executed block: %w", err) - } - - return nil - }) -} diff --git a/storage/badger/headers_test.go b/storage/badger/headers_test.go deleted file mode 100644 index e0d55bec662..00000000000 --- a/storage/badger/headers_test.go +++ /dev/null @@ -1,52 +0,0 @@ -package badger_test - -import ( - "errors" - "testing" - - "github.com/onflow/flow-go/storage/badger/operation" - - "github.com/dgraph-io/badger/v2" - "github.com/stretchr/testify/require" - - "github.com/onflow/flow-go/module/metrics" - "github.com/onflow/flow-go/storage" - "github.com/onflow/flow-go/utils/unittest" - - badgerstorage "github.com/onflow/flow-go/storage/badger" -) - -func TestHeaderStoreRetrieve(t *testing.T) { - unittest.RunWithBadgerDB(t, func(db *badger.DB) { - metrics := metrics.NewNoopCollector() - headers := badgerstorage.NewHeaders(metrics, db) - - block := unittest.BlockFixture() - - // store header - err := headers.Store(block.Header) - require.NoError(t, err) - - // index the header - err = operation.RetryOnConflict(db.Update, operation.IndexBlockHeight(block.Header.Height, block.ID())) - require.NoError(t, err) - - // retrieve header by height - actual, err := headers.ByHeight(block.Header.Height) - require.NoError(t, err) - require.Equal(t, block.Header, actual) - }) -} - -func TestHeaderRetrieveWithoutStore(t *testing.T) { - unittest.RunWithBadgerDB(t, func(db *badger.DB) { - metrics := metrics.NewNoopCollector() - headers := badgerstorage.NewHeaders(metrics, db) - - header := unittest.BlockHeaderFixture() - - // retrieve header by height, should err as not store before height - _, err := headers.ByHeight(header.Height) - require.True(t, errors.Is(err, storage.ErrNotFound)) - }) -} diff --git a/storage/badger/index.go b/storage/badger/index.go deleted file mode 100644 index 4a5b4ba32b6..00000000000 --- a/storage/badger/index.go +++ /dev/null @@ -1,72 +0,0 @@ -// (c) 2019 Dapper Labs - ALL RIGHTS RESERVED - -package badger - -import ( - "github.com/dgraph-io/badger/v2" - - "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/module" - "github.com/onflow/flow-go/module/metrics" - "github.com/onflow/flow-go/storage/badger/operation" - "github.com/onflow/flow-go/storage/badger/procedure" - "github.com/onflow/flow-go/storage/badger/transaction" -) - -// Index implements a simple read-only payload storage around a badger DB. -type Index struct { - db *badger.DB - cache *Cache -} - -func NewIndex(collector module.CacheMetrics, db *badger.DB) *Index { - - store := func(key interface{}, val interface{}) func(*transaction.Tx) error { - blockID := key.(flow.Identifier) - index := val.(*flow.Index) - return transaction.WithTx(procedure.InsertIndex(blockID, index)) - } - - retrieve := func(key interface{}) func(tx *badger.Txn) (interface{}, error) { - blockID := key.(flow.Identifier) - var index flow.Index - return func(tx *badger.Txn) (interface{}, error) { - err := procedure.RetrieveIndex(blockID, &index)(tx) - return &index, err - } - } - - p := &Index{ - db: db, - cache: newCache(collector, metrics.ResourceIndex, - withLimit(flow.DefaultTransactionExpiry+100), - withStore(store), - withRetrieve(retrieve)), - } - - return p -} - -func (i *Index) storeTx(blockID flow.Identifier, index *flow.Index) func(*transaction.Tx) error { - return i.cache.PutTx(blockID, index) -} - -func (i *Index) retrieveTx(blockID flow.Identifier) func(*badger.Txn) (*flow.Index, error) { - return func(tx *badger.Txn) (*flow.Index, error) { - val, err := i.cache.Get(blockID)(tx) - if err != nil { - return nil, err - } - return val.(*flow.Index), nil - } -} - -func (i *Index) Store(blockID flow.Identifier, index *flow.Index) error { - return operation.RetryOnConflictTx(i.db, transaction.Update, i.storeTx(blockID, index)) -} - -func (i *Index) ByBlockID(blockID flow.Identifier) (*flow.Index, error) { - tx := i.db.NewTransaction(false) - defer tx.Discard() - return i.retrieveTx(blockID)(tx) -} diff --git a/storage/badger/index_test.go b/storage/badger/index_test.go deleted file mode 100644 index ba4e2f3d6d8..00000000000 --- a/storage/badger/index_test.go +++ /dev/null @@ -1,38 +0,0 @@ -package badger_test - -import ( - "errors" - "testing" - - "github.com/dgraph-io/badger/v2" - "github.com/stretchr/testify/require" - - "github.com/onflow/flow-go/module/metrics" - "github.com/onflow/flow-go/storage" - "github.com/onflow/flow-go/utils/unittest" - - badgerstorage "github.com/onflow/flow-go/storage/badger" -) - -func TestIndexStoreRetrieve(t *testing.T) { - unittest.RunWithBadgerDB(t, func(db *badger.DB) { - metrics := metrics.NewNoopCollector() - store := badgerstorage.NewIndex(metrics, db) - - blockID := unittest.IdentifierFixture() - expected := unittest.IndexFixture() - - // retreive without store - _, err := store.ByBlockID(blockID) - require.True(t, errors.Is(err, storage.ErrNotFound)) - - // store index - err = store.Store(blockID, expected) - require.NoError(t, err) - - // retreive index - actual, err := store.ByBlockID(blockID) - require.NoError(t, err) - require.Equal(t, expected, actual) - }) -} diff --git a/storage/badger/init.go b/storage/badger/init.go index a3d4691bc83..83499c3dfc2 100644 --- a/storage/badger/init.go +++ b/storage/badger/init.go @@ -1,10 +1,18 @@ package badger +// TODO(leo): rename to open.go + import ( + "errors" "fmt" + "io/fs" + "os" + "path/filepath" + "strings" "github.com/dgraph-io/badger/v2" + "github.com/onflow/flow-go/module/util" "github.com/onflow/flow-go/storage/badger/operation" ) @@ -13,13 +21,15 @@ import ( // return an error. Once a database type marker has been set using these methods, // the type cannot be changed. func InitPublic(opts badger.Options) (*badger.DB, error) { - - db, err := badger.Open(opts) + db, err := SafeOpen(opts) if err != nil { return nil, fmt.Errorf("could not open db: %w", err) } err = db.Update(operation.InsertPublicDBMarker) if err != nil { + // Close db before returning error. + db.Close() + return nil, fmt.Errorf("could not assert db type: %w", err) } @@ -31,15 +41,107 @@ func InitPublic(opts badger.Options) (*badger.DB, error) { // return an error. Once a database type marker has been set using these methods, // the type cannot be changed. func InitSecret(opts badger.Options) (*badger.DB, error) { - - db, err := badger.Open(opts) + db, err := SafeOpen(opts) if err != nil { return nil, fmt.Errorf("could not open db: %w", err) } err = db.Update(operation.InsertSecretDBMarker) if err != nil { + // Close db before returning error. + db.Close() + return nil, fmt.Errorf("could not assert db type: %w", err) } return db, nil } + +func IsBadgerFolder(dataDir string) (bool, error) { + // Check if the directory exists + info, err := os.Stat(dataDir) + if err != nil { + return false, err + } + if !info.IsDir() { + return false, errors.New("provided path is not a directory") + } + + // Flags to indicate presence of key BadgerDB files + var hasKeyRegistry, hasVLOG, hasManifest bool + + err = filepath.WalkDir(dataDir, func(path string, d fs.DirEntry, err error) error { + if err != nil { + return err + } + + if d.IsDir() { + return nil + } + + name := d.Name() + switch { + case strings.HasSuffix(name, ".vlog"): + hasVLOG = true + case name == "KEYREGISTRY": + hasKeyRegistry = true + case name == "MANIFEST": + hasManifest = true + } + + // Short-circuit once we know it's a Badger folder + if hasKeyRegistry && hasVLOG && hasManifest { + return fs.SkipDir + } + return nil + }) + + if err != nil && !errors.Is(err, fs.SkipDir) { + return false, err + } + + isBadger := hasKeyRegistry && hasVLOG && hasManifest + return isBadger, nil +} + +// EnsureBadgerFolder ensures the given directory is either empty (including does not exist), +// or is a valid Badger folder. It returns an error if the directory exists and is not a Badger folder. +func EnsureBadgerFolder(dataDir string) error { + ok, err := util.IsEmptyOrNotExists(dataDir) + if err != nil { + return fmt.Errorf("error checking if folder is empty or does not exist: %w", err) + } + + // if the folder is empty or does not exist, then it can be used as a Badger folder + if ok { + return nil + } + + isBadger, err := IsBadgerFolder(dataDir) + if err != nil { + return fmt.Errorf("error checking if folder is a Badger folder: %w", err) + } + if !isBadger { + return fmt.Errorf("folder %s is not a Badger folder", dataDir) + } + return nil +} + +// SafeOpen opens a Badger database with the provided options, ensuring that the +// directory is a valid Badger folder. If the directory is not valid, it returns an error. +// This is useful to prevent accidental opening of a non-Badger (pebble) directory as a Badger database, +// which could wipe out the existing data. +func SafeOpen(opts badger.Options) (*badger.DB, error) { + // Check if the directory is a Badger folder + err := EnsureBadgerFolder(opts.Dir) + if err != nil { + return nil, fmt.Errorf("could not assert badger folder: %w", err) + } + + // Open the database + db, err := badger.Open(opts) + if err != nil { + return nil, fmt.Errorf("could not open db: %w", err) + } + + return db, nil +} diff --git a/storage/badger/init_test.go b/storage/badger/init_test.go index 7392babce41..99c96959ece 100644 --- a/storage/badger/init_test.go +++ b/storage/badger/init_test.go @@ -3,6 +3,7 @@ package badger_test import ( "testing" + "github.com/cockroachdb/pebble/v2" "github.com/dgraph-io/badger/v2" "github.com/stretchr/testify/require" @@ -54,3 +55,30 @@ func TestEncryptionKeyMismatch(t *testing.T) { require.Error(t, err) }) } + +func TestIsBadgerFolder(t *testing.T) { + unittest.RunWithTempDir(t, func(dir string) { + ok, err := bstorage.IsBadgerFolder(dir) + require.NoError(t, err) + require.False(t, ok) + + db := unittest.BadgerDB(t, dir) + ok, err = bstorage.IsBadgerFolder(dir) + require.NoError(t, err) + require.True(t, ok) + require.NoError(t, db.Close()) + }) +} + +func TestPebbleIsNotBadgerFolder(t *testing.T) { + unittest.RunWithTempDir(t, func(dir string) { + db, err := pebble.Open(dir, &pebble.Options{}) + require.NoError(t, err) + + ok, err := bstorage.IsBadgerFolder(dir) + require.NoError(t, err) + require.False(t, ok) + + require.NoError(t, db.Close()) + }) +} diff --git a/storage/badger/model/storedChunkDataPack.go b/storage/badger/model/storedChunkDataPack.go deleted file mode 100644 index 28070cc5ebb..00000000000 --- a/storage/badger/model/storedChunkDataPack.go +++ /dev/null @@ -1,16 +0,0 @@ -package badgermodel - -import ( - "github.com/onflow/flow-go/model/flow" -) - -// StoredChunkDataPack is an in-storage representation of chunk data pack. -// Its prime difference is instead of an actual collection, it keeps a collection ID hence relying on maintaining -// the collection on a secondary storage. -type StoredChunkDataPack struct { - ChunkID flow.Identifier - StartState flow.StateCommitment - Proof flow.StorageProof - CollectionID flow.Identifier - SystemChunk bool -} diff --git a/storage/badger/my_receipts.go b/storage/badger/my_receipts.go deleted file mode 100644 index 37054a35145..00000000000 --- a/storage/badger/my_receipts.go +++ /dev/null @@ -1,162 +0,0 @@ -package badger - -import ( - "errors" - "fmt" - - "github.com/dgraph-io/badger/v2" - - "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/module" - "github.com/onflow/flow-go/module/metrics" - "github.com/onflow/flow-go/storage" - "github.com/onflow/flow-go/storage/badger/operation" - "github.com/onflow/flow-go/storage/badger/transaction" -) - -// MyExecutionReceipts holds and indexes Execution Receipts. -// MyExecutionReceipts is implemented as a wrapper around badger.ExecutionReceipts -// The wrapper adds the ability to "MY execution receipt", from the viewpoint -// of an individual Execution Node. -type MyExecutionReceipts struct { - genericReceipts *ExecutionReceipts - db *badger.DB - cache *Cache -} - -// NewMyExecutionReceipts creates instance of MyExecutionReceipts which is a wrapper wrapper around badger.ExecutionReceipts -// It's useful for execution nodes to keep track of produced execution receipts. -func NewMyExecutionReceipts(collector module.CacheMetrics, db *badger.DB, receipts *ExecutionReceipts) *MyExecutionReceipts { - store := func(key interface{}, val interface{}) func(*transaction.Tx) error { - receipt := val.(*flow.ExecutionReceipt) - // assemble DB operations to store receipt (no execution) - storeReceiptOps := receipts.storeTx(receipt) - // assemble DB operations to index receipt as one of my own (no execution) - blockID := receipt.ExecutionResult.BlockID - receiptID := receipt.ID() - indexOwnReceiptOps := transaction.WithTx(func(tx *badger.Txn) error { - err := operation.IndexOwnExecutionReceipt(blockID, receiptID)(tx) - // check if we are storing same receipt - if errors.Is(err, storage.ErrAlreadyExists) { - var savedReceiptID flow.Identifier - err := operation.LookupOwnExecutionReceipt(blockID, &savedReceiptID)(tx) - if err != nil { - return err - } - - if savedReceiptID == receiptID { - // if we are storing same receipt we shouldn't error - return nil - } - - return fmt.Errorf("indexing my receipt %v failed: different receipt %v for the same block %v is already indexed", receiptID, - savedReceiptID, blockID) - } - return err - }) - - return func(tx *transaction.Tx) error { - err := storeReceiptOps(tx) // execute operations to store receipt - if err != nil { - return fmt.Errorf("could not store receipt: %w", err) - } - err = indexOwnReceiptOps(tx) // execute operations to index receipt as one of my own - if err != nil { - return fmt.Errorf("could not index receipt as one of my own: %w", err) - } - return nil - } - } - - retrieve := func(key interface{}) func(tx *badger.Txn) (interface{}, error) { - blockID := key.(flow.Identifier) - - return func(tx *badger.Txn) (interface{}, error) { - var receiptID flow.Identifier - err := operation.LookupOwnExecutionReceipt(blockID, &receiptID)(tx) - if err != nil { - return nil, fmt.Errorf("could not lookup receipt ID: %w", err) - } - receipt, err := receipts.byID(receiptID)(tx) - if err != nil { - return nil, err - } - return receipt, nil - } - } - - return &MyExecutionReceipts{ - genericReceipts: receipts, - db: db, - cache: newCache(collector, metrics.ResourceMyReceipt, - withLimit(flow.DefaultTransactionExpiry+100), - withStore(store), - withRetrieve(retrieve)), - } -} - -// storeMyReceipt assembles the operations to store the receipt and marks it as mine (trusted). -func (m *MyExecutionReceipts) storeMyReceipt(receipt *flow.ExecutionReceipt) func(*transaction.Tx) error { - return m.cache.PutTx(receipt.ExecutionResult.BlockID, receipt) -} - -// storeMyReceipt assembles the operations to retrieve my receipt for the given block ID. -func (m *MyExecutionReceipts) myReceipt(blockID flow.Identifier) func(*badger.Txn) (*flow.ExecutionReceipt, error) { - retrievalOps := m.cache.Get(blockID) // assemble DB operations to retrieve receipt (no execution) - return func(tx *badger.Txn) (*flow.ExecutionReceipt, error) { - val, err := retrievalOps(tx) // execute operations to retrieve receipt - if err != nil { - return nil, err - } - return val.(*flow.ExecutionReceipt), nil - } -} - -// StoreMyReceipt stores the receipt and marks it as mine (trusted). My -// receipts are indexed by the block whose result they compute. Currently, -// we only support indexing a _single_ receipt per block. Attempting to -// store conflicting receipts for the same block will error. -func (m *MyExecutionReceipts) StoreMyReceipt(receipt *flow.ExecutionReceipt) error { - return operation.RetryOnConflictTx(m.db, transaction.Update, m.storeMyReceipt(receipt)) -} - -// BatchStoreMyReceipt stores blockID-to-my-receipt index entry keyed by blockID in a provided batch. -// No errors are expected during normal operation -// If entity fails marshalling, the error is wrapped in a generic error and returned. -// If Badger unexpectedly fails to process the request, the error is wrapped in a generic error and returned. -func (m *MyExecutionReceipts) BatchStoreMyReceipt(receipt *flow.ExecutionReceipt, batch storage.BatchStorage) error { - - writeBatch := batch.GetWriter() - - err := m.genericReceipts.BatchStore(receipt, batch) - if err != nil { - return fmt.Errorf("cannot batch store generic execution receipt inside my execution receipt batch store: %w", err) - } - - err = operation.BatchIndexOwnExecutionReceipt(receipt.ExecutionResult.BlockID, receipt.ID())(writeBatch) - if err != nil { - return fmt.Errorf("cannot batch index own execution receipt inside my execution receipt batch store: %w", err) - } - - return nil -} - -// MyReceipt retrieves my receipt for the given block. -// Returns storage.ErrNotFound if no receipt was persisted for the block. -func (m *MyExecutionReceipts) MyReceipt(blockID flow.Identifier) (*flow.ExecutionReceipt, error) { - tx := m.db.NewTransaction(false) - defer tx.Discard() - return m.myReceipt(blockID)(tx) -} - -func (m *MyExecutionReceipts) RemoveIndexByBlockID(blockID flow.Identifier) error { - return m.db.Update(operation.SkipNonExist(operation.RemoveOwnExecutionReceipt(blockID))) -} - -// BatchRemoveIndexByBlockID removes blockID-to-my-execution-receipt index entry keyed by a blockID in a provided batch -// No errors are expected during normal operation, even if no entries are matched. -// If Badger unexpectedly fails to process the request, the error is wrapped in a generic error and returned. -func (m *MyExecutionReceipts) BatchRemoveIndexByBlockID(blockID flow.Identifier, batch storage.BatchStorage) error { - writeBatch := batch.GetWriter() - return operation.BatchRemoveOwnExecutionReceipt(blockID)(writeBatch) -} diff --git a/storage/badger/my_receipts_test.go b/storage/badger/my_receipts_test.go deleted file mode 100644 index 942c771f041..00000000000 --- a/storage/badger/my_receipts_test.go +++ /dev/null @@ -1,72 +0,0 @@ -package badger_test - -import ( - "testing" - - "github.com/dgraph-io/badger/v2" - "github.com/stretchr/testify/require" - - "github.com/onflow/flow-go/module/metrics" - bstorage "github.com/onflow/flow-go/storage/badger" - "github.com/onflow/flow-go/utils/unittest" -) - -func TestMyExecutionReceiptsStorage(t *testing.T) { - withStore := func(t *testing.T, f func(store *bstorage.MyExecutionReceipts)) { - unittest.RunWithBadgerDB(t, func(db *badger.DB) { - metrics := metrics.NewNoopCollector() - results := bstorage.NewExecutionResults(metrics, db) - receipts := bstorage.NewExecutionReceipts(metrics, db, results, bstorage.DefaultCacheSize) - store := bstorage.NewMyExecutionReceipts(metrics, db, receipts) - - f(store) - }) - } - - t.Run("store one get one", func(t *testing.T) { - withStore(t, func(store *bstorage.MyExecutionReceipts) { - block := unittest.BlockFixture() - receipt1 := unittest.ReceiptForBlockFixture(&block) - - err := store.StoreMyReceipt(receipt1) - require.NoError(t, err) - - actual, err := store.MyReceipt(block.ID()) - require.NoError(t, err) - - require.Equal(t, receipt1, actual) - }) - }) - - t.Run("store same for the same block", func(t *testing.T) { - withStore(t, func(store *bstorage.MyExecutionReceipts) { - block := unittest.BlockFixture() - - receipt1 := unittest.ReceiptForBlockFixture(&block) - - err := store.StoreMyReceipt(receipt1) - require.NoError(t, err) - - err = store.StoreMyReceipt(receipt1) - require.NoError(t, err) - }) - }) - - t.Run("store different receipt for same block should fail", func(t *testing.T) { - withStore(t, func(store *bstorage.MyExecutionReceipts) { - block := unittest.BlockFixture() - - executor1 := unittest.IdentifierFixture() - executor2 := unittest.IdentifierFixture() - - receipt1 := unittest.ReceiptForBlockExecutorFixture(&block, executor1) - receipt2 := unittest.ReceiptForBlockExecutorFixture(&block, executor2) - - err := store.StoreMyReceipt(receipt1) - require.NoError(t, err) - - err = store.StoreMyReceipt(receipt2) - require.Error(t, err) - }) - }) -} diff --git a/storage/badger/operation/approvals.go b/storage/badger/operation/approvals.go deleted file mode 100644 index 8a994eed2a2..00000000000 --- a/storage/badger/operation/approvals.go +++ /dev/null @@ -1,31 +0,0 @@ -package operation - -import ( - "github.com/dgraph-io/badger/v2" - - "github.com/onflow/flow-go/model/flow" -) - -// InsertResultApproval inserts a ResultApproval by ID. -func InsertResultApproval(approval *flow.ResultApproval) func(*badger.Txn) error { - return insert(makePrefix(codeResultApproval, approval.ID()), approval) -} - -// RetrieveResultApproval retrieves an approval by ID. -func RetrieveResultApproval(approvalID flow.Identifier, approval *flow.ResultApproval) func(*badger.Txn) error { - return retrieve(makePrefix(codeResultApproval, approvalID), approval) -} - -// IndexResultApproval inserts a ResultApproval ID keyed by ExecutionResult ID -// and chunk index. If a value for this key exists, a storage.ErrAlreadyExists -// error is returned. This operation is only used by the ResultApprovals store, -// which is only used within a Verification node, where it is assumed that there -// is only one approval per chunk. -func IndexResultApproval(resultID flow.Identifier, chunkIndex uint64, approvalID flow.Identifier) func(*badger.Txn) error { - return insert(makePrefix(codeIndexResultApprovalByChunk, resultID, chunkIndex), approvalID) -} - -// LookupResultApproval finds a ResultApproval by result ID and chunk index. -func LookupResultApproval(resultID flow.Identifier, chunkIndex uint64, approvalID *flow.Identifier) func(*badger.Txn) error { - return retrieve(makePrefix(codeIndexResultApprovalByChunk, resultID, chunkIndex), approvalID) -} diff --git a/storage/badger/operation/bft.go b/storage/badger/operation/bft.go deleted file mode 100644 index 8a6c8d2e8b3..00000000000 --- a/storage/badger/operation/bft.go +++ /dev/null @@ -1,42 +0,0 @@ -package operation - -import ( - "errors" - "fmt" - - "github.com/dgraph-io/badger/v2" - - "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/storage" -) - -// PurgeBlocklist removes the set of blocked nodes IDs from the data base. -// If no corresponding entry exists, this function is a no-op. -// No errors are expected during normal operations. -// TODO: TEMPORARY manual override for adding node IDs to list of ejected nodes, applies to networking layer only -func PurgeBlocklist() func(*badger.Txn) error { - return func(tx *badger.Txn) error { - err := remove(makePrefix(blockedNodeIDs))(tx) - if err != nil && !errors.Is(err, storage.ErrNotFound) { - return fmt.Errorf("enexpected error while purging blocklist: %w", err) - } - return nil - } -} - -// PersistBlocklist writes the set of blocked nodes IDs into the data base. -// If an entry already exists, it is overwritten; otherwise a new entry is created. -// No errors are expected during normal operations. -// -// TODO: TEMPORARY manual override for adding node IDs to list of ejected nodes, applies to networking layer only -func PersistBlocklist(blocklist map[flow.Identifier]struct{}) func(*badger.Txn) error { - return upsert(makePrefix(blockedNodeIDs), blocklist) -} - -// RetrieveBlocklist reads the set of blocked node IDs from the data base. -// Returns `storage.ErrNotFound` error in case no respective data base entry is present. -// -// TODO: TEMPORARY manual override for adding node IDs to list of ejected nodes, applies to networking layer only -func RetrieveBlocklist(blocklist *map[flow.Identifier]struct{}) func(*badger.Txn) error { - return retrieve(makePrefix(blockedNodeIDs), blocklist) -} diff --git a/storage/badger/operation/bft_test.go b/storage/badger/operation/bft_test.go deleted file mode 100644 index f1b573659fc..00000000000 --- a/storage/badger/operation/bft_test.go +++ /dev/null @@ -1,95 +0,0 @@ -package operation - -import ( - "testing" - - "github.com/dgraph-io/badger/v2" - "github.com/stretchr/testify/require" - - "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/storage" - "github.com/onflow/flow-go/utils/unittest" -) - -// Test_PersistBlocklist tests the operations: -// - PersistBlocklist(blocklist map[flow.Identifier]struct{}) -// - RetrieveBlocklist(blocklist *map[flow.Identifier]struct{}) -// - PurgeBlocklist() -func Test_PersistBlocklist(t *testing.T) { - t.Run("Retrieving non-existing blocklist should return 'storage.ErrNotFound'", func(t *testing.T) { - unittest.RunWithBadgerDB(t, func(db *badger.DB) { - var blocklist map[flow.Identifier]struct{} - err := db.View(RetrieveBlocklist(&blocklist)) - require.ErrorIs(t, err, storage.ErrNotFound) - - }) - }) - - t.Run("Persisting and read blocklist", func(t *testing.T) { - unittest.RunWithBadgerDB(t, func(db *badger.DB) { - blocklist := unittest.IdentifierListFixture(8).Lookup() - err := db.Update(PersistBlocklist(blocklist)) - require.NoError(t, err) - - var b map[flow.Identifier]struct{} - err = db.View(RetrieveBlocklist(&b)) - require.NoError(t, err) - require.Equal(t, blocklist, b) - }) - }) - - t.Run("Overwrite blocklist", func(t *testing.T) { - unittest.RunWithBadgerDB(t, func(db *badger.DB) { - blocklist1 := unittest.IdentifierListFixture(8).Lookup() - err := db.Update(PersistBlocklist(blocklist1)) - require.NoError(t, err) - - blocklist2 := unittest.IdentifierListFixture(8).Lookup() - err = db.Update(PersistBlocklist(blocklist2)) - require.NoError(t, err) - - var b map[flow.Identifier]struct{} - err = db.View(RetrieveBlocklist(&b)) - require.NoError(t, err) - require.Equal(t, blocklist2, b) - }) - }) - - t.Run("Write & Purge & Write blocklist", func(t *testing.T) { - unittest.RunWithBadgerDB(t, func(db *badger.DB) { - blocklist1 := unittest.IdentifierListFixture(8).Lookup() - err := db.Update(PersistBlocklist(blocklist1)) - require.NoError(t, err) - - err = db.Update(PurgeBlocklist()) - require.NoError(t, err) - - var b map[flow.Identifier]struct{} - err = db.View(RetrieveBlocklist(&b)) - require.ErrorIs(t, err, storage.ErrNotFound) - - blocklist2 := unittest.IdentifierListFixture(8).Lookup() - err = db.Update(PersistBlocklist(blocklist2)) - require.NoError(t, err) - - err = db.View(RetrieveBlocklist(&b)) - require.NoError(t, err) - require.Equal(t, blocklist2, b) - }) - }) - - t.Run("Purge non-existing blocklist", func(t *testing.T) { - unittest.RunWithBadgerDB(t, func(db *badger.DB) { - var b map[flow.Identifier]struct{} - - err := db.View(RetrieveBlocklist(&b)) - require.ErrorIs(t, err, storage.ErrNotFound) - - err = db.Update(PurgeBlocklist()) - require.NoError(t, err) - - err = db.View(RetrieveBlocklist(&b)) - require.ErrorIs(t, err, storage.ErrNotFound) - }) - }) -} diff --git a/storage/badger/operation/children.go b/storage/badger/operation/children.go deleted file mode 100644 index 92eb0c35918..00000000000 --- a/storage/badger/operation/children.go +++ /dev/null @@ -1,22 +0,0 @@ -package operation - -import ( - "github.com/dgraph-io/badger/v2" - - "github.com/onflow/flow-go/model/flow" -) - -// InsertBlockChildren insert an index to lookup the direct child of a block by its ID -func InsertBlockChildren(blockID flow.Identifier, childrenIDs flow.IdentifierList) func(*badger.Txn) error { - return insert(makePrefix(codeBlockChildren, blockID), childrenIDs) -} - -// UpdateBlockChildren updates the children for a block. -func UpdateBlockChildren(blockID flow.Identifier, childrenIDs flow.IdentifierList) func(*badger.Txn) error { - return update(makePrefix(codeBlockChildren, blockID), childrenIDs) -} - -// RetrieveBlockChildren the child block ID by parent block ID -func RetrieveBlockChildren(blockID flow.Identifier, childrenIDs *flow.IdentifierList) func(*badger.Txn) error { - return retrieve(makePrefix(codeBlockChildren, blockID), childrenIDs) -} diff --git a/storage/badger/operation/children_test.go b/storage/badger/operation/children_test.go deleted file mode 100644 index 629488373aa..00000000000 --- a/storage/badger/operation/children_test.go +++ /dev/null @@ -1,33 +0,0 @@ -package operation - -import ( - "testing" - - "github.com/dgraph-io/badger/v2" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/utils/unittest" -) - -func TestBlockChildrenIndexUpdateLookup(t *testing.T) { - unittest.RunWithBadgerDB(t, func(db *badger.DB) { - blockID := unittest.IdentifierFixture() - childrenIDs := unittest.IdentifierListFixture(8) - var retrievedIDs flow.IdentifierList - - err := db.Update(InsertBlockChildren(blockID, childrenIDs)) - require.NoError(t, err) - err = db.View(RetrieveBlockChildren(blockID, &retrievedIDs)) - require.NoError(t, err) - assert.Equal(t, childrenIDs, retrievedIDs) - - altIDs := unittest.IdentifierListFixture(4) - err = db.Update(UpdateBlockChildren(blockID, altIDs)) - require.NoError(t, err) - err = db.View(RetrieveBlockChildren(blockID, &retrievedIDs)) - require.NoError(t, err) - assert.Equal(t, altIDs, retrievedIDs) - }) -} diff --git a/storage/badger/operation/chunkDataPacks.go b/storage/badger/operation/chunkDataPacks.go deleted file mode 100644 index 687712985d4..00000000000 --- a/storage/badger/operation/chunkDataPacks.go +++ /dev/null @@ -1,35 +0,0 @@ -package operation - -import ( - "github.com/dgraph-io/badger/v2" - - "github.com/onflow/flow-go/model/flow" - badgermodel "github.com/onflow/flow-go/storage/badger/model" -) - -// InsertChunkDataPack inserts a chunk data pack keyed by chunk ID. -func InsertChunkDataPack(c *badgermodel.StoredChunkDataPack) func(*badger.Txn) error { - return insert(makePrefix(codeChunkDataPack, c.ChunkID), c) -} - -// BatchInsertChunkDataPack inserts a chunk data pack keyed by chunk ID into a batch -func BatchInsertChunkDataPack(c *badgermodel.StoredChunkDataPack) func(batch *badger.WriteBatch) error { - return batchWrite(makePrefix(codeChunkDataPack, c.ChunkID), c) -} - -// BatchRemoveChunkDataPack removes a chunk data pack keyed by chunk ID, in a batch. -// No errors are expected during normal operation, even if no entries are matched. -// If Badger unexpectedly fails to process the request, the error is wrapped in a generic error and returned. -func BatchRemoveChunkDataPack(chunkID flow.Identifier) func(batch *badger.WriteBatch) error { - return batchRemove(makePrefix(codeChunkDataPack, chunkID)) -} - -// RetrieveChunkDataPack retrieves a chunk data pack by chunk ID. -func RetrieveChunkDataPack(chunkID flow.Identifier, c *badgermodel.StoredChunkDataPack) func(*badger.Txn) error { - return retrieve(makePrefix(codeChunkDataPack, chunkID), c) -} - -// RemoveChunkDataPack removes the chunk data pack with the given chunk ID. -func RemoveChunkDataPack(chunkID flow.Identifier) func(*badger.Txn) error { - return remove(makePrefix(codeChunkDataPack, chunkID)) -} diff --git a/storage/badger/operation/chunkDataPacks_test.go b/storage/badger/operation/chunkDataPacks_test.go deleted file mode 100644 index 0dc79ef7266..00000000000 --- a/storage/badger/operation/chunkDataPacks_test.go +++ /dev/null @@ -1,50 +0,0 @@ -package operation - -import ( - "testing" - - "github.com/dgraph-io/badger/v2" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - storagemodel "github.com/onflow/flow-go/storage/badger/model" - "github.com/onflow/flow-go/utils/unittest" -) - -func TestChunkDataPack(t *testing.T) { - unittest.RunWithBadgerDB(t, func(db *badger.DB) { - collectionID := unittest.IdentifierFixture() - expected := &storagemodel.StoredChunkDataPack{ - ChunkID: unittest.IdentifierFixture(), - StartState: unittest.StateCommitmentFixture(), - Proof: []byte{'p'}, - CollectionID: collectionID, - } - - t.Run("Retrieve non-existent", func(t *testing.T) { - var actual storagemodel.StoredChunkDataPack - err := db.View(RetrieveChunkDataPack(expected.ChunkID, &actual)) - assert.Error(t, err) - }) - - t.Run("Save", func(t *testing.T) { - err := db.Update(InsertChunkDataPack(expected)) - require.NoError(t, err) - - var actual storagemodel.StoredChunkDataPack - err = db.View(RetrieveChunkDataPack(expected.ChunkID, &actual)) - assert.NoError(t, err) - - assert.Equal(t, *expected, actual) - }) - - t.Run("Remove", func(t *testing.T) { - err := db.Update(RemoveChunkDataPack(expected.ChunkID)) - require.NoError(t, err) - - var actual storagemodel.StoredChunkDataPack - err = db.View(RetrieveChunkDataPack(expected.ChunkID, &actual)) - assert.Error(t, err) - }) - }) -} diff --git a/storage/badger/operation/chunk_locators.go b/storage/badger/operation/chunk_locators.go deleted file mode 100644 index ef7f11fec50..00000000000 --- a/storage/badger/operation/chunk_locators.go +++ /dev/null @@ -1,16 +0,0 @@ -package operation - -import ( - "github.com/dgraph-io/badger/v2" - - "github.com/onflow/flow-go/model/chunks" - "github.com/onflow/flow-go/model/flow" -) - -func InsertChunkLocator(locator *chunks.Locator) func(*badger.Txn) error { - return insert(makePrefix(codeChunk, locator.ID()), locator) -} - -func RetrieveChunkLocator(locatorID flow.Identifier, locator *chunks.Locator) func(*badger.Txn) error { - return retrieve(makePrefix(codeChunk, locatorID), locator) -} diff --git a/storage/badger/operation/cluster.go b/storage/badger/operation/cluster.go deleted file mode 100644 index 8163285c62f..00000000000 --- a/storage/badger/operation/cluster.go +++ /dev/null @@ -1,83 +0,0 @@ -package operation - -import ( - "github.com/dgraph-io/badger/v2" - - "github.com/onflow/flow-go/model/flow" -) - -// This file implements storage functions for chain state book-keeping of -// collection node cluster consensus. In contrast to the corresponding functions -// for regular consensus, these functions include the cluster ID in order to -// support storing multiple chains, for example during epoch switchover. - -// IndexClusterBlockHeight inserts a block number to block ID mapping for -// the given cluster. -func IndexClusterBlockHeight(clusterID flow.ChainID, number uint64, blockID flow.Identifier) func(*badger.Txn) error { - return insert(makePrefix(codeFinalizedCluster, clusterID, number), blockID) -} - -// LookupClusterBlockHeight retrieves a block ID by number for the given cluster -func LookupClusterBlockHeight(clusterID flow.ChainID, number uint64, blockID *flow.Identifier) func(*badger.Txn) error { - return retrieve(makePrefix(codeFinalizedCluster, clusterID, number), blockID) -} - -// InsertClusterFinalizedHeight inserts the finalized boundary for the given cluster. -func InsertClusterFinalizedHeight(clusterID flow.ChainID, number uint64) func(*badger.Txn) error { - return insert(makePrefix(codeClusterHeight, clusterID), number) -} - -// UpdateClusterFinalizedHeight updates the finalized boundary for the given cluster. -func UpdateClusterFinalizedHeight(clusterID flow.ChainID, number uint64) func(*badger.Txn) error { - return update(makePrefix(codeClusterHeight, clusterID), number) -} - -// RetrieveClusterFinalizedHeight retrieves the finalized boundary for the given cluster. -func RetrieveClusterFinalizedHeight(clusterID flow.ChainID, number *uint64) func(*badger.Txn) error { - return retrieve(makePrefix(codeClusterHeight, clusterID), number) -} - -// IndexReferenceBlockByClusterBlock inserts the reference block ID for the given -// cluster block ID. While each cluster block specifies a reference block in its -// payload, we maintain this additional lookup for performance reasons. -func IndexReferenceBlockByClusterBlock(clusterBlockID, refID flow.Identifier) func(*badger.Txn) error { - return insert(makePrefix(codeClusterBlockToRefBlock, clusterBlockID), refID) -} - -// LookupReferenceBlockByClusterBlock looks up the reference block ID for the given -// cluster block ID. While each cluster block specifies a reference block in its -// payload, we maintain this additional lookup for performance reasons. -func LookupReferenceBlockByClusterBlock(clusterBlockID flow.Identifier, refID *flow.Identifier) func(*badger.Txn) error { - return retrieve(makePrefix(codeClusterBlockToRefBlock, clusterBlockID), refID) -} - -// IndexClusterBlockByReferenceHeight indexes a cluster block ID by its reference -// block height. The cluster block ID is included in the key for more efficient -// traversal. Only finalized cluster blocks should be included in this index. -// The key looks like: <prefix 0:1><ref_height 1:9><cluster_block_id 9:41> -func IndexClusterBlockByReferenceHeight(refHeight uint64, clusterBlockID flow.Identifier) func(*badger.Txn) error { - return insert(makePrefix(codeRefHeightToClusterBlock, refHeight, clusterBlockID), nil) -} - -// LookupClusterBlocksByReferenceHeightRange traverses the ref_height->cluster_block -// index and returns any finalized cluster blocks which have a reference block with -// height in the given range. This is used to avoid including duplicate transaction -// when building or validating a new collection. -func LookupClusterBlocksByReferenceHeightRange(start, end uint64, clusterBlockIDs *[]flow.Identifier) func(*badger.Txn) error { - startPrefix := makePrefix(codeRefHeightToClusterBlock, start) - endPrefix := makePrefix(codeRefHeightToClusterBlock, end) - prefixLen := len(startPrefix) - - return iterate(startPrefix, endPrefix, func() (checkFunc, createFunc, handleFunc) { - check := func(key []byte) bool { - clusterBlockIDBytes := key[prefixLen:] - var clusterBlockID flow.Identifier - copy(clusterBlockID[:], clusterBlockIDBytes) - *clusterBlockIDs = append(*clusterBlockIDs, clusterBlockID) - - // the info we need is stored in the key, never process the value - return false - } - return check, nil, nil - }, withPrefetchValuesFalse) -} diff --git a/storage/badger/operation/cluster_test.go b/storage/badger/operation/cluster_test.go deleted file mode 100644 index 9a616e08490..00000000000 --- a/storage/badger/operation/cluster_test.go +++ /dev/null @@ -1,313 +0,0 @@ -package operation_test - -import ( - "errors" - "fmt" - "math/rand" - "testing" - - "github.com/dgraph-io/badger/v2" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/storage" - "github.com/onflow/flow-go/storage/badger/operation" - "github.com/onflow/flow-go/utils/unittest" -) - -func TestClusterHeights(t *testing.T) { - unittest.RunWithBadgerDB(t, func(db *badger.DB) { - var ( - clusterID flow.ChainID = "cluster" - height uint64 = 42 - expected = unittest.IdentifierFixture() - err error - ) - - t.Run("retrieve non-existent", func(t *testing.T) { - var actual flow.Identifier - err = db.View(operation.LookupClusterBlockHeight(clusterID, height, &actual)) - t.Log(err) - assert.True(t, errors.Is(err, storage.ErrNotFound)) - }) - - t.Run("insert/retrieve", func(t *testing.T) { - err = db.Update(operation.IndexClusterBlockHeight(clusterID, height, expected)) - assert.Nil(t, err) - - var actual flow.Identifier - err = db.View(operation.LookupClusterBlockHeight(clusterID, height, &actual)) - assert.Nil(t, err) - assert.Equal(t, expected, actual) - }) - - t.Run("multiple chain IDs", func(t *testing.T) { - for i := 0; i < 3; i++ { - // use different cluster ID but same block height - clusterID = flow.ChainID(fmt.Sprintf("cluster-%d", i)) - expected = unittest.IdentifierFixture() - - var actual flow.Identifier - err = db.View(operation.LookupClusterBlockHeight(clusterID, height, &actual)) - assert.True(t, errors.Is(err, storage.ErrNotFound)) - - err = db.Update(operation.IndexClusterBlockHeight(clusterID, height, expected)) - assert.Nil(t, err) - - err = db.View(operation.LookupClusterBlockHeight(clusterID, height, &actual)) - assert.Nil(t, err) - assert.Equal(t, expected, actual) - } - }) - }) -} - -func TestClusterBoundaries(t *testing.T) { - unittest.RunWithBadgerDB(t, func(db *badger.DB) { - var ( - clusterID flow.ChainID = "cluster" - expected uint64 = 42 - err error - ) - - t.Run("retrieve non-existant", func(t *testing.T) { - var actual uint64 - err = db.View(operation.RetrieveClusterFinalizedHeight(clusterID, &actual)) - t.Log(err) - assert.True(t, errors.Is(err, storage.ErrNotFound)) - }) - - t.Run("insert/retrieve", func(t *testing.T) { - err = db.Update(operation.InsertClusterFinalizedHeight(clusterID, 21)) - assert.Nil(t, err) - - err = db.Update(operation.UpdateClusterFinalizedHeight(clusterID, expected)) - assert.Nil(t, err) - - var actual uint64 - err = db.View(operation.RetrieveClusterFinalizedHeight(clusterID, &actual)) - assert.Nil(t, err) - assert.Equal(t, expected, actual) - }) - - t.Run("multiple chain IDs", func(t *testing.T) { - for i := 0; i < 3; i++ { - // use different cluster ID but same boundary - clusterID = flow.ChainID(fmt.Sprintf("cluster-%d", i)) - expected = uint64(i) - - var actual uint64 - err = db.View(operation.RetrieveClusterFinalizedHeight(clusterID, &actual)) - assert.True(t, errors.Is(err, storage.ErrNotFound)) - - err = db.Update(operation.InsertClusterFinalizedHeight(clusterID, expected)) - assert.Nil(t, err) - - err = db.View(operation.RetrieveClusterFinalizedHeight(clusterID, &actual)) - assert.Nil(t, err) - assert.Equal(t, expected, actual) - } - }) - }) -} - -func TestClusterBlockByReferenceHeight(t *testing.T) { - - unittest.RunWithBadgerDB(t, func(db *badger.DB) { - t.Run("should be able to index cluster block by reference height", func(t *testing.T) { - id := unittest.IdentifierFixture() - height := rand.Uint64() - err := db.Update(operation.IndexClusterBlockByReferenceHeight(height, id)) - assert.NoError(t, err) - - var retrieved []flow.Identifier - err = db.View(operation.LookupClusterBlocksByReferenceHeightRange(height, height, &retrieved)) - assert.NoError(t, err) - require.Len(t, retrieved, 1) - assert.Equal(t, id, retrieved[0]) - }) - }) - - unittest.RunWithBadgerDB(t, func(db *badger.DB) { - t.Run("should be able to index multiple cluster blocks at same reference height", func(t *testing.T) { - ids := unittest.IdentifierListFixture(10) - height := rand.Uint64() - for _, id := range ids { - err := db.Update(operation.IndexClusterBlockByReferenceHeight(height, id)) - assert.NoError(t, err) - } - - var retrieved []flow.Identifier - err := db.View(operation.LookupClusterBlocksByReferenceHeightRange(height, height, &retrieved)) - assert.NoError(t, err) - assert.Len(t, retrieved, len(ids)) - assert.ElementsMatch(t, ids, retrieved) - }) - }) - - unittest.RunWithBadgerDB(t, func(db *badger.DB) { - t.Run("should be able to lookup cluster blocks across height range", func(t *testing.T) { - ids := unittest.IdentifierListFixture(100) - nextHeight := rand.Uint64() - // keep track of height range - minHeight, maxHeight := nextHeight, nextHeight - // keep track of which ids are indexed at each nextHeight - lookup := make(map[uint64][]flow.Identifier) - - for i := 0; i < len(ids); i++ { - // randomly adjust the nextHeight, increasing on average - r := rand.Intn(100) - if r < 20 { - nextHeight -= 1 // 20% - } else if r < 40 { - // nextHeight stays the same - 20% - } else if r < 80 { - nextHeight += 1 // 40% - } else { - nextHeight += 2 // 20% - } - - lookup[nextHeight] = append(lookup[nextHeight], ids[i]) - if nextHeight < minHeight { - minHeight = nextHeight - } - if nextHeight > maxHeight { - maxHeight = nextHeight - } - - err := db.Update(operation.IndexClusterBlockByReferenceHeight(nextHeight, ids[i])) - assert.NoError(t, err) - } - - // determine which ids we expect to be retrieved for a given height range - idsInHeightRange := func(min, max uint64) []flow.Identifier { - var idsForHeight []flow.Identifier - for height, id := range lookup { - if min <= height && height <= max { - idsForHeight = append(idsForHeight, id...) - } - } - return idsForHeight - } - - // Test cases are described as follows: - // {---} represents the queried height range - // [---] represents the indexed height range - // [{ means the left endpoint of both ranges are the same - // {-[ means the left endpoint of the queried range is strictly less than the indexed range - t.Run("{-}--[-]", func(t *testing.T) { - var retrieved []flow.Identifier - err := db.View(operation.LookupClusterBlocksByReferenceHeightRange(minHeight-100, minHeight-1, &retrieved)) - assert.NoError(t, err) - assert.Len(t, retrieved, 0) - }) - t.Run("{-[--}-]", func(t *testing.T) { - var retrieved []flow.Identifier - min := minHeight - 100 - max := minHeight + (maxHeight-minHeight)/2 - err := db.View(operation.LookupClusterBlocksByReferenceHeightRange(min, max, &retrieved)) - assert.NoError(t, err) - - expected := idsInHeightRange(min, max) - assert.NotEmpty(t, expected, "test assumption broken") - assert.Len(t, retrieved, len(expected)) - assert.ElementsMatch(t, expected, retrieved) - }) - t.Run("{[--}--]", func(t *testing.T) { - var retrieved []flow.Identifier - min := minHeight - max := minHeight + (maxHeight-minHeight)/2 - err := db.View(operation.LookupClusterBlocksByReferenceHeightRange(min, max, &retrieved)) - assert.NoError(t, err) - - expected := idsInHeightRange(min, max) - assert.NotEmpty(t, expected, "test assumption broken") - assert.Len(t, retrieved, len(expected)) - assert.ElementsMatch(t, expected, retrieved) - }) - t.Run("[-{--}-]", func(t *testing.T) { - var retrieved []flow.Identifier - min := minHeight + 1 - max := maxHeight - 1 - err := db.View(operation.LookupClusterBlocksByReferenceHeightRange(min, max, &retrieved)) - assert.NoError(t, err) - - expected := idsInHeightRange(min, max) - assert.NotEmpty(t, expected, "test assumption broken") - assert.Len(t, retrieved, len(expected)) - assert.ElementsMatch(t, expected, retrieved) - }) - t.Run("[{----}]", func(t *testing.T) { - var retrieved []flow.Identifier - err := db.View(operation.LookupClusterBlocksByReferenceHeightRange(minHeight, maxHeight, &retrieved)) - assert.NoError(t, err) - - expected := idsInHeightRange(minHeight, maxHeight) - assert.NotEmpty(t, expected, "test assumption broken") - assert.Len(t, retrieved, len(expected)) - assert.ElementsMatch(t, expected, retrieved) - }) - t.Run("[--{--}]", func(t *testing.T) { - var retrieved []flow.Identifier - min := minHeight + (maxHeight-minHeight)/2 - max := maxHeight - err := db.View(operation.LookupClusterBlocksByReferenceHeightRange(min, max, &retrieved)) - assert.NoError(t, err) - - expected := idsInHeightRange(min, max) - assert.NotEmpty(t, expected, "test assumption broken") - assert.Len(t, retrieved, len(expected)) - assert.ElementsMatch(t, expected, retrieved) - }) - t.Run("[-{--]-}", func(t *testing.T) { - var retrieved []flow.Identifier - min := minHeight + (maxHeight-minHeight)/2 - max := maxHeight + 100 - err := db.View(operation.LookupClusterBlocksByReferenceHeightRange(min, max, &retrieved)) - assert.NoError(t, err) - - expected := idsInHeightRange(min, max) - assert.NotEmpty(t, expected, "test assumption broken") - assert.Len(t, retrieved, len(expected)) - assert.ElementsMatch(t, expected, retrieved) - }) - t.Run("[-]--{-}", func(t *testing.T) { - var retrieved []flow.Identifier - err := db.View(operation.LookupClusterBlocksByReferenceHeightRange(maxHeight+1, maxHeight+100, &retrieved)) - assert.NoError(t, err) - assert.Len(t, retrieved, 0) - }) - }) - }) -} - -// expected average case # of blocks to lookup on Mainnet -func BenchmarkLookupClusterBlocksByReferenceHeightRange_1200(b *testing.B) { - benchmarkLookupClusterBlocksByReferenceHeightRange(b, 1200) -} - -// 5x average case on Mainnet -func BenchmarkLookupClusterBlocksByReferenceHeightRange_6_000(b *testing.B) { - benchmarkLookupClusterBlocksByReferenceHeightRange(b, 6_000) -} - -func BenchmarkLookupClusterBlocksByReferenceHeightRange_100_000(b *testing.B) { - benchmarkLookupClusterBlocksByReferenceHeightRange(b, 100_000) -} - -func benchmarkLookupClusterBlocksByReferenceHeightRange(b *testing.B, n int) { - unittest.RunWithBadgerDB(b, func(db *badger.DB) { - for i := 0; i < n; i++ { - err := db.Update(operation.IndexClusterBlockByReferenceHeight(rand.Uint64()%1000, unittest.IdentifierFixture())) - require.NoError(b, err) - } - - b.ResetTimer() - for i := 0; i < b.N; i++ { - var blockIDs []flow.Identifier - err := db.View(operation.LookupClusterBlocksByReferenceHeightRange(0, 1000, &blockIDs)) - require.NoError(b, err) - } - }) -} diff --git a/storage/badger/operation/collections.go b/storage/badger/operation/collections.go deleted file mode 100644 index 4b8e0faf761..00000000000 --- a/storage/badger/operation/collections.go +++ /dev/null @@ -1,46 +0,0 @@ -// (c) 2019 Dapper Labs - ALL RIGHTS RESERVED - -package operation - -import ( - "github.com/dgraph-io/badger/v2" - - "github.com/onflow/flow-go/model/flow" -) - -// NOTE: These insert light collections, which only contain references -// to the constituent transactions. They do not modify transactions contained -// by the collections. - -func InsertCollection(collection *flow.LightCollection) func(*badger.Txn) error { - return insert(makePrefix(codeCollection, collection.ID()), collection) -} - -func RetrieveCollection(collID flow.Identifier, collection *flow.LightCollection) func(*badger.Txn) error { - return retrieve(makePrefix(codeCollection, collID), collection) -} - -func RemoveCollection(collID flow.Identifier) func(*badger.Txn) error { - return remove(makePrefix(codeCollection, collID)) -} - -// IndexCollectionPayload indexes the transactions within the collection payload -// of a cluster block. -func IndexCollectionPayload(blockID flow.Identifier, txIDs []flow.Identifier) func(*badger.Txn) error { - return insert(makePrefix(codeIndexCollection, blockID), txIDs) -} - -// LookupCollection looks up the collection for a given cluster payload. -func LookupCollectionPayload(blockID flow.Identifier, txIDs *[]flow.Identifier) func(*badger.Txn) error { - return retrieve(makePrefix(codeIndexCollection, blockID), txIDs) -} - -// IndexCollectionByTransaction inserts a collection id keyed by a transaction id -func IndexCollectionByTransaction(txID flow.Identifier, collectionID flow.Identifier) func(*badger.Txn) error { - return insert(makePrefix(codeIndexCollectionByTransaction, txID), collectionID) -} - -// LookupCollectionID retrieves a collection id by transaction id -func RetrieveCollectionID(txID flow.Identifier, collectionID *flow.Identifier) func(*badger.Txn) error { - return retrieve(makePrefix(codeIndexCollectionByTransaction, txID), collectionID) -} diff --git a/storage/badger/operation/collections_test.go b/storage/badger/operation/collections_test.go deleted file mode 100644 index 9bbe14386c8..00000000000 --- a/storage/badger/operation/collections_test.go +++ /dev/null @@ -1,80 +0,0 @@ -// (c) 2019 Dapper Labs - ALL RIGHTS RESERVED - -package operation - -import ( - "testing" - - "github.com/dgraph-io/badger/v2" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/utils/unittest" -) - -func TestCollections(t *testing.T) { - unittest.RunWithBadgerDB(t, func(db *badger.DB) { - expected := unittest.CollectionFixture(2).Light() - - t.Run("Retrieve nonexistant", func(t *testing.T) { - var actual flow.LightCollection - err := db.View(RetrieveCollection(expected.ID(), &actual)) - assert.Error(t, err) - }) - - t.Run("Save", func(t *testing.T) { - err := db.Update(InsertCollection(&expected)) - require.NoError(t, err) - - var actual flow.LightCollection - err = db.View(RetrieveCollection(expected.ID(), &actual)) - assert.NoError(t, err) - - assert.Equal(t, expected, actual) - }) - - t.Run("Remove", func(t *testing.T) { - err := db.Update(RemoveCollection(expected.ID())) - require.NoError(t, err) - - var actual flow.LightCollection - err = db.View(RetrieveCollection(expected.ID(), &actual)) - assert.Error(t, err) - }) - - t.Run("Index and lookup", func(t *testing.T) { - expected := unittest.CollectionFixture(1).Light() - blockID := unittest.IdentifierFixture() - - _ = db.Update(func(tx *badger.Txn) error { - err := InsertCollection(&expected)(tx) - assert.Nil(t, err) - err = IndexCollectionPayload(blockID, expected.Transactions)(tx) - assert.Nil(t, err) - return nil - }) - - var actual flow.LightCollection - err := db.View(LookupCollectionPayload(blockID, &actual.Transactions)) - assert.Nil(t, err) - - assert.Equal(t, expected, actual) - }) - - t.Run("Index and lookup by transaction ID", func(t *testing.T) { - expected := unittest.IdentifierFixture() - transactionID := unittest.IdentifierFixture() - actual := flow.Identifier{} - - _ = db.Update(func(tx *badger.Txn) error { - err := IndexCollectionByTransaction(transactionID, expected)(tx) - assert.Nil(t, err) - err = RetrieveCollectionID(transactionID, &actual)(tx) - assert.Nil(t, err) - return nil - }) - assert.Equal(t, expected, actual) - }) - }) -} diff --git a/storage/badger/operation/commits.go b/storage/badger/operation/commits.go deleted file mode 100644 index c7f13afd49f..00000000000 --- a/storage/badger/operation/commits.go +++ /dev/null @@ -1,42 +0,0 @@ -// (c) 2019 Dapper Labs - ALL RIGHTS RESERVED - -package operation - -import ( - "github.com/dgraph-io/badger/v2" - - "github.com/onflow/flow-go/model/flow" -) - -// IndexStateCommitment indexes a state commitment. -// -// State commitments are keyed by the block whose execution results in the state with the given commit. -func IndexStateCommitment(blockID flow.Identifier, commit flow.StateCommitment) func(*badger.Txn) error { - return insert(makePrefix(codeCommit, blockID), commit) -} - -// BatchIndexStateCommitment indexes a state commitment into a batch -// -// State commitments are keyed by the block whose execution results in the state with the given commit. -func BatchIndexStateCommitment(blockID flow.Identifier, commit flow.StateCommitment) func(batch *badger.WriteBatch) error { - return batchWrite(makePrefix(codeCommit, blockID), commit) -} - -// LookupStateCommitment gets a state commitment keyed by block ID -// -// State commitments are keyed by the block whose execution results in the state with the given commit. -func LookupStateCommitment(blockID flow.Identifier, commit *flow.StateCommitment) func(*badger.Txn) error { - return retrieve(makePrefix(codeCommit, blockID), commit) -} - -// RemoveStateCommitment removes the state commitment by block ID -func RemoveStateCommitment(blockID flow.Identifier) func(*badger.Txn) error { - return remove(makePrefix(codeCommit, blockID)) -} - -// BatchRemoveStateCommitment batch removes the state commitment by block ID -// No errors are expected during normal operation, even if no entries are matched. -// If Badger unexpectedly fails to process the request, the error is wrapped in a generic error and returned. -func BatchRemoveStateCommitment(blockID flow.Identifier) func(batch *badger.WriteBatch) error { - return batchRemove(makePrefix(codeCommit, blockID)) -} diff --git a/storage/badger/operation/commits_test.go b/storage/badger/operation/commits_test.go deleted file mode 100644 index 392331e935a..00000000000 --- a/storage/badger/operation/commits_test.go +++ /dev/null @@ -1,26 +0,0 @@ -package operation - -import ( - "testing" - - "github.com/dgraph-io/badger/v2" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/utils/unittest" -) - -func TestStateCommitments(t *testing.T) { - unittest.RunWithBadgerDB(t, func(db *badger.DB) { - expected := unittest.StateCommitmentFixture() - id := unittest.IdentifierFixture() - err := db.Update(IndexStateCommitment(id, expected)) - require.Nil(t, err) - - var actual flow.StateCommitment - err = db.View(LookupStateCommitment(id, &actual)) - require.Nil(t, err) - assert.Equal(t, expected, actual) - }) -} diff --git a/storage/badger/operation/common.go b/storage/badger/operation/common.go index 6dbe96224b4..c1deb1b7f5a 100644 --- a/storage/badger/operation/common.go +++ b/storage/badger/operation/common.go @@ -1,5 +1,3 @@ -// (c) 2019 Dapper Labs - ALL RIGHTS RESERVED - package operation import ( @@ -15,37 +13,6 @@ import ( "github.com/onflow/flow-go/storage" ) -// batchWrite will encode the given entity using msgpack and will upsert the resulting -// binary data in the badger wrote batch under the provided key - if the value already exists -// in the database it will be overridden. -// No errors are expected during normal operation. -func batchWrite(key []byte, entity interface{}) func(writeBatch *badger.WriteBatch) error { - return func(writeBatch *badger.WriteBatch) error { - - // update the maximum key size if the inserted key is bigger - if uint32(len(key)) > max { - max = uint32(len(key)) - err := SetMax(writeBatch) - if err != nil { - return fmt.Errorf("could not update max tracker: %w", err) - } - } - - // serialize the entity data - val, err := msgpack.Marshal(entity) - if err != nil { - return irrecoverable.NewExceptionf("could not encode entity: %w", err) - } - - // persist the entity data into the DB - err = writeBatch.Set(key, val) - if err != nil { - return irrecoverable.NewExceptionf("could not store data: %w", err) - } - return nil - } -} - // insert will encode the given entity using msgpack and will insert the resulting // binary data in the badger DB under the provided key. It will error if the // key already exists. @@ -177,19 +144,6 @@ func remove(key []byte) func(*badger.Txn) error { } } -// batchRemove removes entry under a given key in a write-batch. -// if key doesn't exist, does nothing. -// No errors are expected during normal operation. -func batchRemove(key []byte) func(writeBatch *badger.WriteBatch) error { - return func(writeBatch *badger.WriteBatch) error { - err := writeBatch.Delete(key) - if err != nil { - return irrecoverable.NewExceptionf("could not batch delete data: %w", err) - } - return nil - } -} - // removeByPrefix removes all the entities if the prefix of the key matches the given prefix. // if no key matches, this is a no-op // No errors are expected during normal operation. @@ -213,29 +167,6 @@ func removeByPrefix(prefix []byte) func(*badger.Txn) error { } } -// batchRemoveByPrefix removes all items under the keys match the given prefix in a batch write transaction. -// no error would be returned if no key was found with the given prefix. -// all error returned should be exception -func batchRemoveByPrefix(prefix []byte) func(tx *badger.Txn, writeBatch *badger.WriteBatch) error { - return func(tx *badger.Txn, writeBatch *badger.WriteBatch) error { - - opts := badger.DefaultIteratorOptions - opts.AllVersions = false - opts.PrefetchValues = false - it := tx.NewIterator(opts) - defer it.Close() - - for it.Seek(prefix); it.ValidForPrefix(prefix); it.Next() { - key := it.Item().KeyCopy(nil) - err := writeBatch.Delete(key) - if err != nil { - return irrecoverable.NewExceptionf("could not delete item in batch: %w", err) - } - } - return nil - } -} - // retrieve will retrieve the binary data under the given key from the badger DB // and decode it into the given entity. The provided entity needs to be a // pointer to an initialized entity of the correct type. @@ -333,14 +264,6 @@ func lookup(entityIDs *[]flow.Identifier) func() (checkFunc, createFunc, handleF } } -// withPrefetchValuesFalse configures a Badger iteration to NOT preemptively load -// the values when iterating over keys (ie. key-only iteration). Key-only iteration -// is several order of magnitudes faster than regular iteration, because it involves -// access to the LSM-tree only, which is usually resident entirely in RAM. -func withPrefetchValuesFalse(options *badger.IteratorOptions) { - options.PrefetchValues = false -} - // iterate iterates over a range of keys defined by a start and end key. The // start key may be higher than the end key, in which case we iterate in // reverse order. @@ -542,7 +465,7 @@ func findHighestAtOrBelow( it := tx.NewIterator(opts) defer it.Close() - it.Seek(append(prefix, b(height)...)) + it.Seek(append(prefix, keyPartToBinary(height)...)) if !it.Valid() { return storage.ErrNotFound diff --git a/storage/badger/operation/common_test.go b/storage/badger/operation/common_test.go index afae8b0c260..0bdbd77c629 100644 --- a/storage/badger/operation/common_test.go +++ b/storage/badger/operation/common_test.go @@ -1,14 +1,10 @@ -// (c) 2019 Dapper Labs - ALL RIGHTS RESERVED - package operation import ( "bytes" "fmt" - "math/rand" "reflect" "testing" - "time" "github.com/dgraph-io/badger/v2" "github.com/stretchr/testify/assert" @@ -20,10 +16,6 @@ import ( "github.com/onflow/flow-go/utils/unittest" ) -func init() { - rand.Seed(time.Now().UnixNano()) -} - type Entity struct { ID uint64 } @@ -373,7 +365,7 @@ func TestIterate(t *testing.T) { } err := db.View(iterate(keys[0], keys[2], iterationFunc)) - require.Nil(t, err) + require.NoError(t, err) assert.Equal(t, expected, actual) }) @@ -412,7 +404,7 @@ func TestTraverse(t *testing.T) { } err := db.View(traverse([]byte{0x42}, iterationFunc)) - require.Nil(t, err) + require.NoError(t, err) assert.Equal(t, expected, actual) }) @@ -628,7 +620,7 @@ func TestFindHighestAtOrBelow(t *testing.T) { entity3 := Entity{Value: 43} err := db.Update(func(tx *badger.Txn) error { - key := append(prefix, b(uint64(15))...) + key := append(prefix, keyPartToBinary(uint64(15))...) val, err := msgpack.Marshal(entity3) if err != nil { return err @@ -638,7 +630,7 @@ func TestFindHighestAtOrBelow(t *testing.T) { return err } - key = append(prefix, b(uint64(5))...) + key = append(prefix, keyPartToBinary(uint64(5))...) val, err = msgpack.Marshal(entity1) if err != nil { return err @@ -648,7 +640,7 @@ func TestFindHighestAtOrBelow(t *testing.T) { return err } - key = append(prefix, b(uint64(10))...) + key = append(prefix, keyPartToBinary(uint64(10))...) val, err = msgpack.Marshal(entity2) if err != nil { return err diff --git a/storage/badger/operation/computation_result.go b/storage/badger/operation/computation_result.go deleted file mode 100644 index 22238cc06e5..00000000000 --- a/storage/badger/operation/computation_result.go +++ /dev/null @@ -1,62 +0,0 @@ -package operation - -import ( - "github.com/dgraph-io/badger/v2" - - "github.com/onflow/flow-go/model/flow" -) - -// InsertComputationResult addes given instance of ComputationResult into local BadgerDB. -func InsertComputationResultUploadStatus(blockID flow.Identifier, - wasUploadCompleted bool) func(*badger.Txn) error { - return insert(makePrefix(codeComputationResults, blockID), wasUploadCompleted) -} - -// UpdateComputationResult updates given existing instance of ComputationResult in local BadgerDB. -func UpdateComputationResultUploadStatus(blockID flow.Identifier, - wasUploadCompleted bool) func(*badger.Txn) error { - return update(makePrefix(codeComputationResults, blockID), wasUploadCompleted) -} - -// UpsertComputationResult upserts given existing instance of ComputationResult in local BadgerDB. -func UpsertComputationResultUploadStatus(blockID flow.Identifier, - wasUploadCompleted bool) func(*badger.Txn) error { - return upsert(makePrefix(codeComputationResults, blockID), wasUploadCompleted) -} - -// RemoveComputationResult removes an instance of ComputationResult with given ID. -func RemoveComputationResultUploadStatus( - blockID flow.Identifier) func(*badger.Txn) error { - return remove(makePrefix(codeComputationResults, blockID)) -} - -// GetComputationResult returns stored ComputationResult instance with given ID. -func GetComputationResultUploadStatus(blockID flow.Identifier, - wasUploadCompleted *bool) func(*badger.Txn) error { - return retrieve(makePrefix(codeComputationResults, blockID), wasUploadCompleted) -} - -// GetBlockIDsByStatus returns all IDs of stored ComputationResult instances. -func GetBlockIDsByStatus(blockIDs *[]flow.Identifier, - targetUploadStatus bool) func(*badger.Txn) error { - return traverse(makePrefix(codeComputationResults), func() (checkFunc, createFunc, handleFunc) { - var currKey flow.Identifier - check := func(key []byte) bool { - currKey = flow.HashToID(key[1:]) - return true - } - - var wasUploadCompleted bool - create := func() interface{} { - return &wasUploadCompleted - } - - handle := func() error { - if blockIDs != nil && wasUploadCompleted == targetUploadStatus { - *blockIDs = append(*blockIDs, currKey) - } - return nil - } - return check, create, handle - }) -} diff --git a/storage/badger/operation/computation_result_test.go b/storage/badger/operation/computation_result_test.go deleted file mode 100644 index 79336a87964..00000000000 --- a/storage/badger/operation/computation_result_test.go +++ /dev/null @@ -1,144 +0,0 @@ -package operation - -import ( - "reflect" - "testing" - - "github.com/dgraph-io/badger/v2" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/onflow/flow-go/engine/execution" - "github.com/onflow/flow-go/engine/execution/testutil" - "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/storage" - "github.com/onflow/flow-go/utils/unittest" -) - -func TestInsertAndUpdateAndRetrieveComputationResultUpdateStatus(t *testing.T) { - unittest.RunWithBadgerDB(t, func(db *badger.DB) { - expected := testutil.ComputationResultFixture(t) - expectedId := expected.ExecutableBlock.ID() - - t.Run("Update existing ComputationResult", func(t *testing.T) { - // insert as False - testUploadStatusVal := false - - err := db.Update(InsertComputationResultUploadStatus(expectedId, testUploadStatusVal)) - require.NoError(t, err) - - var actualUploadStatus bool - err = db.View(GetComputationResultUploadStatus(expectedId, &actualUploadStatus)) - require.NoError(t, err) - - assert.Equal(t, testUploadStatusVal, actualUploadStatus) - - // update to True - testUploadStatusVal = true - err = db.Update(UpdateComputationResultUploadStatus(expectedId, testUploadStatusVal)) - require.NoError(t, err) - - // check if value is updated - err = db.View(GetComputationResultUploadStatus(expectedId, &actualUploadStatus)) - require.NoError(t, err) - - assert.Equal(t, testUploadStatusVal, actualUploadStatus) - }) - - t.Run("Update non-existed ComputationResult", func(t *testing.T) { - testUploadStatusVal := true - randomFlowID := flow.Identifier{} - err := db.Update(UpdateComputationResultUploadStatus(randomFlowID, testUploadStatusVal)) - require.Error(t, err) - require.Equal(t, err, storage.ErrNotFound) - }) - }) -} - -func TestUpsertAndRetrieveComputationResultUpdateStatus(t *testing.T) { - unittest.RunWithBadgerDB(t, func(db *badger.DB) { - expected := testutil.ComputationResultFixture(t) - expectedId := expected.ExecutableBlock.ID() - - t.Run("Upsert ComputationResult", func(t *testing.T) { - // first upsert as false - testUploadStatusVal := false - - err := db.Update(UpsertComputationResultUploadStatus(expectedId, testUploadStatusVal)) - require.NoError(t, err) - - var actualUploadStatus bool - err = db.View(GetComputationResultUploadStatus(expectedId, &actualUploadStatus)) - require.NoError(t, err) - - assert.Equal(t, testUploadStatusVal, actualUploadStatus) - - // upsert to true - testUploadStatusVal = true - err = db.Update(UpsertComputationResultUploadStatus(expectedId, testUploadStatusVal)) - require.NoError(t, err) - - // check if value is updated - err = db.View(GetComputationResultUploadStatus(expectedId, &actualUploadStatus)) - require.NoError(t, err) - - assert.Equal(t, testUploadStatusVal, actualUploadStatus) - }) - }) -} - -func TestRemoveComputationResultUploadStatus(t *testing.T) { - unittest.RunWithBadgerDB(t, func(db *badger.DB) { - expected := testutil.ComputationResultFixture(t) - expectedId := expected.ExecutableBlock.ID() - - t.Run("Remove ComputationResult", func(t *testing.T) { - testUploadStatusVal := true - - err := db.Update(InsertComputationResultUploadStatus(expectedId, testUploadStatusVal)) - require.NoError(t, err) - - var actualUploadStatus bool - err = db.View(GetComputationResultUploadStatus(expectedId, &actualUploadStatus)) - require.NoError(t, err) - - assert.Equal(t, testUploadStatusVal, actualUploadStatus) - - err = db.Update(RemoveComputationResultUploadStatus(expectedId)) - require.NoError(t, err) - - err = db.View(GetComputationResultUploadStatus(expectedId, &actualUploadStatus)) - assert.NotNil(t, err) - }) - }) -} - -func TestListComputationResults(t *testing.T) { - unittest.RunWithBadgerDB(t, func(db *badger.DB) { - expected := [...]*execution.ComputationResult{ - testutil.ComputationResultFixture(t), - testutil.ComputationResultFixture(t), - } - t.Run("List all ComputationResult with status True", func(t *testing.T) { - expectedIDs := make(map[string]bool, 0) - // Store a list of ComputationResult instances first - for _, cr := range expected { - expectedId := cr.ExecutableBlock.ID() - expectedIDs[expectedId.String()] = true - err := db.Update(InsertComputationResultUploadStatus(expectedId, true)) - require.NoError(t, err) - } - - // Get the list of IDs of stored ComputationResult - crIDs := make([]flow.Identifier, 0) - err := db.View(GetBlockIDsByStatus(&crIDs, true)) - require.NoError(t, err) - crIDsStrMap := make(map[string]bool, 0) - for _, crID := range crIDs { - crIDsStrMap[crID.String()] = true - } - - assert.True(t, reflect.DeepEqual(crIDsStrMap, expectedIDs)) - }) - }) -} diff --git a/storage/badger/operation/dkg.go b/storage/badger/operation/dkg.go index 7a468ed9f36..1d9334dfbe1 100644 --- a/storage/badger/operation/dkg.go +++ b/storage/badger/operation/dkg.go @@ -15,11 +15,22 @@ import ( // CAUTION: This method stores confidential information and should only be // used in the context of the secrets database. This is enforced in the above // layer (see storage.DKGState). -// Error returns: storage.ErrAlreadyExists +// Error returns: [storage.ErrAlreadyExists]. func InsertMyBeaconPrivateKey(epochCounter uint64, info *encodable.RandomBeaconPrivKey) func(*badger.Txn) error { return insert(makePrefix(codeBeaconPrivateKey, epochCounter), info) } +// UpsertMyBeaconPrivateKey stores the random beacon private key, irrespective of whether an entry for +// the given epoch counter already exists in the database or not. +// +// CAUTION: This method stores confidential information and should only be +// used in the context of the secrets database. This is enforced in the above +// layer (see storage.EpochRecoveryMyBeaconKey). +// This method has to be used only in very specific cases, like epoch recovery, for normal usage use InsertMyBeaconPrivateKey. +func UpsertMyBeaconPrivateKey(epochCounter uint64, info *encodable.RandomBeaconPrivKey) func(*badger.Txn) error { + return upsert(makePrefix(codeBeaconPrivateKey, epochCounter), info) +} + // RetrieveMyBeaconPrivateKey retrieves the random beacon private key for the given epoch. // // CAUTION: This method stores confidential information and should only be @@ -30,19 +41,13 @@ func RetrieveMyBeaconPrivateKey(epochCounter uint64, info *encodable.RandomBeaco return retrieve(makePrefix(codeBeaconPrivateKey, epochCounter), info) } -// InsertDKGStartedForEpoch stores a flag indicating that the DKG has been started for the given epoch. -// Returns: storage.ErrAlreadyExists -// Error returns: storage.ErrAlreadyExists -func InsertDKGStartedForEpoch(epochCounter uint64) func(*badger.Txn) error { - return insert(makePrefix(codeDKGStarted, epochCounter), true) -} - -// RetrieveDKGStartedForEpoch retrieves the DKG started flag for the given epoch. +// RetrieveDKGStartedForEpoch retrieves whether DKG has started for the given epoch. // If no flag is set, started is set to false and no error is returned. // No errors expected during normal operation. func RetrieveDKGStartedForEpoch(epochCounter uint64, started *bool) func(*badger.Txn) error { return func(tx *badger.Txn) error { - err := retrieve(makePrefix(codeDKGStarted, epochCounter), started)(tx) + var state flow.DKGState + err := RetrieveDKGStateForEpoch(epochCounter, &state)(tx) if errors.Is(err, storage.ErrNotFound) { // flag not set - therefore DKG not started *started = false @@ -51,19 +56,21 @@ func RetrieveDKGStartedForEpoch(epochCounter uint64, started *bool) func(*badger // storage error - set started to zero value *started = false return err + } else { + *started = true } return nil } } -// InsertDKGEndStateForEpoch stores the DKG end state for the epoch. -// Error returns: storage.ErrAlreadyExists -func InsertDKGEndStateForEpoch(epochCounter uint64, endState flow.DKGEndState) func(*badger.Txn) error { - return insert(makePrefix(codeDKGEnded, epochCounter), endState) +// UpsertDKGStateForEpoch stores the current state of Random Beacon Recoverable State Machine for the epoch, irrespective of whether an entry for +// the given epoch counter already exists in the database or not. +func UpsertDKGStateForEpoch(epochCounter uint64, newState flow.DKGState) func(*badger.Txn) error { + return upsert(makePrefix(codeDKGState, epochCounter), newState) } -// RetrieveDKGEndStateForEpoch retrieves the DKG end state for the epoch. -// Error returns: storage.ErrNotFound -func RetrieveDKGEndStateForEpoch(epochCounter uint64, endState *flow.DKGEndState) func(*badger.Txn) error { - return retrieve(makePrefix(codeDKGEnded, epochCounter), endState) +// RetrieveDKGStateForEpoch retrieves the current DKG state for the epoch. +// Error returns: [storage.ErrNotFound] +func RetrieveDKGStateForEpoch(epochCounter uint64, currentState *flow.DKGState) func(*badger.Txn) error { + return retrieve(makePrefix(codeDKGState, epochCounter), currentState) } diff --git a/storage/badger/operation/dkg_test.go b/storage/badger/operation/dkg_test.go index 03417e963f6..ffbc6aa944a 100644 --- a/storage/badger/operation/dkg_test.go +++ b/storage/badger/operation/dkg_test.go @@ -61,7 +61,7 @@ func TestDKGStartedForEpoch(t *testing.T) { epochCounter := rand.Uint64() // set the flag, ensure no error - err := db.Update(InsertDKGStartedForEpoch(epochCounter)) + err := db.Update(UpsertDKGStateForEpoch(epochCounter, flow.DKGStateStarted)) assert.NoError(t, err) // read the flag, should be true now @@ -78,23 +78,23 @@ func TestDKGStartedForEpoch(t *testing.T) { }) } -func TestDKGEndStateForEpoch(t *testing.T) { +func TestDKGSetStateForEpoch(t *testing.T) { unittest.RunWithBadgerDB(t, func(db *badger.DB) { epochCounter := rand.Uint64() - // should be able to write end state - endState := flow.DKGEndStateSuccess - err := db.Update(InsertDKGEndStateForEpoch(epochCounter, endState)) + // should be able to write new state + newState := flow.DKGStateStarted + err := db.Update(UpsertDKGStateForEpoch(epochCounter, newState)) assert.NoError(t, err) - // should be able to read end state - var readEndState flow.DKGEndState - err = db.View(RetrieveDKGEndStateForEpoch(epochCounter, &readEndState)) + // should be able to read current state + var readCurrentState flow.DKGState + err = db.View(RetrieveDKGStateForEpoch(epochCounter, &readCurrentState)) assert.NoError(t, err) - assert.Equal(t, endState, readEndState) + assert.Equal(t, newState, readCurrentState) - // attempting to overwrite should error - err = db.Update(InsertDKGEndStateForEpoch(epochCounter, flow.DKGEndStateDKGFailure)) - assert.ErrorIs(t, err, storage.ErrAlreadyExists) + // attempting to overwrite should succeed + err = db.Update(UpsertDKGStateForEpoch(epochCounter, flow.DKGStateFailure)) + assert.NoError(t, err) }) } diff --git a/storage/badger/operation/epoch.go b/storage/badger/operation/epoch.go deleted file mode 100644 index b5fcef7e029..00000000000 --- a/storage/badger/operation/epoch.go +++ /dev/null @@ -1,75 +0,0 @@ -package operation - -import ( - "errors" - - "github.com/dgraph-io/badger/v2" - - "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/storage" -) - -func InsertEpochSetup(eventID flow.Identifier, event *flow.EpochSetup) func(*badger.Txn) error { - return insert(makePrefix(codeEpochSetup, eventID), event) -} - -func RetrieveEpochSetup(eventID flow.Identifier, event *flow.EpochSetup) func(*badger.Txn) error { - return retrieve(makePrefix(codeEpochSetup, eventID), event) -} - -func InsertEpochCommit(eventID flow.Identifier, event *flow.EpochCommit) func(*badger.Txn) error { - return insert(makePrefix(codeEpochCommit, eventID), event) -} - -func RetrieveEpochCommit(eventID flow.Identifier, event *flow.EpochCommit) func(*badger.Txn) error { - return retrieve(makePrefix(codeEpochCommit, eventID), event) -} - -func InsertEpochStatus(blockID flow.Identifier, status *flow.EpochStatus) func(*badger.Txn) error { - return insert(makePrefix(codeBlockEpochStatus, blockID), status) -} - -func RetrieveEpochStatus(blockID flow.Identifier, status *flow.EpochStatus) func(*badger.Txn) error { - return retrieve(makePrefix(codeBlockEpochStatus, blockID), status) -} - -// SetEpochEmergencyFallbackTriggered sets a flag in the DB indicating that -// epoch emergency fallback has been triggered, and the block where it was triggered. -// -// EECC can be triggered in two ways: -// 1. Finalizing the first block past the epoch commitment deadline, when the -// next epoch has not yet been committed (see protocol.Params for more detail) -// 2. Finalizing a fork in which an invalid service event was incorporated. -// -// Calling this function multiple times is a no-op and returns no expected errors. -func SetEpochEmergencyFallbackTriggered(blockID flow.Identifier) func(txn *badger.Txn) error { - return SkipDuplicates(insert(makePrefix(codeEpochEmergencyFallbackTriggered), blockID)) -} - -// RetrieveEpochEmergencyFallbackTriggeredBlockID gets the block ID where epoch -// emergency was triggered. -func RetrieveEpochEmergencyFallbackTriggeredBlockID(blockID *flow.Identifier) func(*badger.Txn) error { - return retrieve(makePrefix(codeEpochEmergencyFallbackTriggered), blockID) -} - -// CheckEpochEmergencyFallbackTriggered retrieves the value of the flag -// indicating whether epoch emergency fallback has been triggered. If the key -// is not set, this results in triggered being set to false. -func CheckEpochEmergencyFallbackTriggered(triggered *bool) func(*badger.Txn) error { - return func(tx *badger.Txn) error { - var blockID flow.Identifier - err := RetrieveEpochEmergencyFallbackTriggeredBlockID(&blockID)(tx) - if errors.Is(err, storage.ErrNotFound) { - // flag unset, EECC not triggered - *triggered = false - return nil - } else if err != nil { - // storage error, set triggered to zero value - *triggered = false - return err - } - // flag is set, EECC triggered - *triggered = true - return err - } -} diff --git a/storage/badger/operation/epoch_test.go b/storage/badger/operation/epoch_test.go deleted file mode 100644 index a9d4938e486..00000000000 --- a/storage/badger/operation/epoch_test.go +++ /dev/null @@ -1,68 +0,0 @@ -package operation - -import ( - "testing" - - "github.com/dgraph-io/badger/v2" - "github.com/stretchr/testify/assert" - - "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/utils/unittest" -) - -func TestEpochEmergencyFallback(t *testing.T) { - - // the block ID where EECC was triggered - blockID := unittest.IdentifierFixture() - - t.Run("reading when unset should return false", func(t *testing.T) { - unittest.RunWithBadgerDB(t, func(db *badger.DB) { - var triggered bool - err := db.View(CheckEpochEmergencyFallbackTriggered(&triggered)) - assert.NoError(t, err) - assert.False(t, triggered) - }) - }) - t.Run("should be able to set flag to true", func(t *testing.T) { - unittest.RunWithBadgerDB(t, func(db *badger.DB) { - // set the flag, ensure no error - err := db.Update(SetEpochEmergencyFallbackTriggered(blockID)) - assert.NoError(t, err) - - // read the flag, should be true now - var triggered bool - err = db.View(CheckEpochEmergencyFallbackTriggered(&triggered)) - assert.NoError(t, err) - assert.True(t, triggered) - - // read the value of the block ID, should match - var storedBlockID flow.Identifier - err = db.View(RetrieveEpochEmergencyFallbackTriggeredBlockID(&storedBlockID)) - assert.NoError(t, err) - assert.Equal(t, blockID, storedBlockID) - }) - }) - t.Run("setting flag multiple time should have no additional effect", func(t *testing.T) { - unittest.RunWithBadgerDB(t, func(db *badger.DB) { - // set the flag, ensure no error - err := db.Update(SetEpochEmergencyFallbackTriggered(blockID)) - assert.NoError(t, err) - - // set the flag, should have no error and no effect on state - err = db.Update(SetEpochEmergencyFallbackTriggered(unittest.IdentifierFixture())) - assert.NoError(t, err) - - // read the flag, should be true - var triggered bool - err = db.View(CheckEpochEmergencyFallbackTriggered(&triggered)) - assert.NoError(t, err) - assert.True(t, triggered) - - // read the value of block ID, should equal the FIRST set ID - var storedBlockID flow.Identifier - err = db.View(RetrieveEpochEmergencyFallbackTriggeredBlockID(&storedBlockID)) - assert.NoError(t, err) - assert.Equal(t, blockID, storedBlockID) - }) - }) -} diff --git a/storage/badger/operation/events.go b/storage/badger/operation/events.go deleted file mode 100644 index f49c937c412..00000000000 --- a/storage/badger/operation/events.go +++ /dev/null @@ -1,115 +0,0 @@ -// (c) 2019 Dapper Labs - ALL RIGHTS RESERVED - -package operation - -import ( - "github.com/dgraph-io/badger/v2" - - "github.com/onflow/flow-go/model/flow" -) - -func eventPrefix(prefix byte, blockID flow.Identifier, event flow.Event) []byte { - return makePrefix(prefix, blockID, event.TransactionID, event.TransactionIndex, event.EventIndex) -} - -func InsertEvent(blockID flow.Identifier, event flow.Event) func(*badger.Txn) error { - return insert(eventPrefix(codeEvent, blockID, event), event) -} - -func BatchInsertEvent(blockID flow.Identifier, event flow.Event) func(batch *badger.WriteBatch) error { - return batchWrite(eventPrefix(codeEvent, blockID, event), event) -} - -func InsertServiceEvent(blockID flow.Identifier, event flow.Event) func(*badger.Txn) error { - return insert(eventPrefix(codeServiceEvent, blockID, event), event) -} - -func BatchInsertServiceEvent(blockID flow.Identifier, event flow.Event) func(batch *badger.WriteBatch) error { - return batchWrite(eventPrefix(codeServiceEvent, blockID, event), event) -} - -func RetrieveEvents(blockID flow.Identifier, transactionID flow.Identifier, events *[]flow.Event) func(*badger.Txn) error { - iterationFunc := eventIterationFunc(events) - return traverse(makePrefix(codeEvent, blockID, transactionID), iterationFunc) -} - -func LookupEventsByBlockID(blockID flow.Identifier, events *[]flow.Event) func(*badger.Txn) error { - iterationFunc := eventIterationFunc(events) - return traverse(makePrefix(codeEvent, blockID), iterationFunc) -} - -func LookupServiceEventsByBlockID(blockID flow.Identifier, events *[]flow.Event) func(*badger.Txn) error { - iterationFunc := eventIterationFunc(events) - return traverse(makePrefix(codeServiceEvent, blockID), iterationFunc) -} - -func LookupEventsByBlockIDEventType(blockID flow.Identifier, eventType flow.EventType, events *[]flow.Event) func(*badger.Txn) error { - iterationFunc := eventFilterIterationFunc(events, eventType) - return traverse(makePrefix(codeEvent, blockID), iterationFunc) -} - -func RemoveServiceEventsByBlockID(blockID flow.Identifier) func(*badger.Txn) error { - return removeByPrefix(makePrefix(codeServiceEvent, blockID)) -} - -// BatchRemoveServiceEventsByBlockID removes all service events for the given blockID. -// No errors are expected during normal operation, even if no entries are matched. -// If Badger unexpectedly fails to process the request, the error is wrapped in a generic error and returned. -func BatchRemoveServiceEventsByBlockID(blockID flow.Identifier, batch *badger.WriteBatch) func(*badger.Txn) error { - return func(txn *badger.Txn) error { - return batchRemoveByPrefix(makePrefix(codeServiceEvent, blockID))(txn, batch) - } -} - -func RemoveEventsByBlockID(blockID flow.Identifier) func(*badger.Txn) error { - return removeByPrefix(makePrefix(codeEvent, blockID)) -} - -// BatchRemoveEventsByBlockID removes all events for the given blockID. -// No errors are expected during normal operation, even if no entries are matched. -// If Badger unexpectedly fails to process the request, the error is wrapped in a generic error and returned. -func BatchRemoveEventsByBlockID(blockID flow.Identifier, batch *badger.WriteBatch) func(*badger.Txn) error { - return func(txn *badger.Txn) error { - return batchRemoveByPrefix(makePrefix(codeEvent, blockID))(txn, batch) - } - -} - -// eventIterationFunc returns an in iteration function which returns all events found during traversal or iteration -func eventIterationFunc(events *[]flow.Event) func() (checkFunc, createFunc, handleFunc) { - return func() (checkFunc, createFunc, handleFunc) { - check := func(key []byte) bool { - return true - } - var val flow.Event - create := func() interface{} { - return &val - } - handle := func() error { - *events = append(*events, val) - return nil - } - return check, create, handle - } -} - -// eventFilterIterationFunc returns an iteration function which filters the result by the given event type in the handleFunc -func eventFilterIterationFunc(events *[]flow.Event, eventType flow.EventType) func() (checkFunc, createFunc, handleFunc) { - return func() (checkFunc, createFunc, handleFunc) { - check := func(key []byte) bool { - return true - } - var val flow.Event - create := func() interface{} { - return &val - } - handle := func() error { - // filter out all events not of type eventType - if val.Type == eventType { - *events = append(*events, val) - } - return nil - } - return check, create, handle - } -} diff --git a/storage/badger/operation/events_test.go b/storage/badger/operation/events_test.go deleted file mode 100644 index ed2395e69d7..00000000000 --- a/storage/badger/operation/events_test.go +++ /dev/null @@ -1,137 +0,0 @@ -package operation - -import ( - "bytes" - "sort" - "testing" - - "github.com/dgraph-io/badger/v2" - "github.com/stretchr/testify/require" - - "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/utils/unittest" -) - -// TestRetrieveEventByBlockIDTxID tests event insertion, event retrieval by block id, block id and transaction id, -// and block id and event type -func TestRetrieveEventByBlockIDTxID(t *testing.T) { - unittest.RunWithBadgerDB(t, func(db *badger.DB) { - - // create block ids, transaction ids and event types slices - blockIDs := []flow.Identifier{flow.HashToID([]byte{0x01}), flow.HashToID([]byte{0x02})} - txIDs := []flow.Identifier{flow.HashToID([]byte{0x11}), flow.HashToID([]byte{0x12})} - eTypes := []flow.EventType{flow.EventAccountCreated, flow.EventAccountUpdated} - - // create map of block id to event, tx id to event and event type to event - blockMap := make(map[string][]flow.Event) - txMap := make(map[string][]flow.Event) - typeMap := make(map[string][]flow.Event) - - // initialize the maps and the db - for _, b := range blockIDs { - - bEvents := make([]flow.Event, 0) - - // all blocks share the same transactions - for i, tx := range txIDs { - - tEvents := make([]flow.Event, 0) - - // create one event for each possible event type - for j, etype := range eTypes { - - eEvents := make([]flow.Event, 0) - - event := unittest.EventFixture(etype, uint32(i), uint32(j), tx, 0) - - // insert event into the db - err := db.Update(InsertEvent(b, event)) - require.Nil(t, err) - - // update event arrays in the maps - bEvents = append(bEvents, event) - tEvents = append(tEvents, event) - eEvents = append(eEvents, event) - - key := b.String() + "_" + string(etype) - if _, ok := typeMap[key]; ok { - typeMap[key] = append(typeMap[key], eEvents...) - } else { - typeMap[key] = eEvents - } - } - txMap[b.String()+"_"+tx.String()] = tEvents - } - blockMap[b.String()] = bEvents - } - - assertFunc := func(err error, expected []flow.Event, actual []flow.Event) { - require.NoError(t, err) - sortEvent(expected) - sortEvent(actual) - require.Equal(t, expected, actual) - } - - t.Run("retrieve events by Block ID", func(t *testing.T) { - for _, b := range blockIDs { - var actualEvents = make([]flow.Event, 0) - - // lookup events by block id - err := db.View(LookupEventsByBlockID(b, &actualEvents)) - - expectedEvents := blockMap[b.String()] - assertFunc(err, expectedEvents, actualEvents) - } - }) - - t.Run("retrieve events by block ID and transaction ID", func(t *testing.T) { - for _, b := range blockIDs { - for _, t := range txIDs { - var actualEvents = make([]flow.Event, 0) - - //lookup events by block id and transaction id - err := db.View(RetrieveEvents(b, t, &actualEvents)) - - expectedEvents := txMap[b.String()+"_"+t.String()] - assertFunc(err, expectedEvents, actualEvents) - } - } - }) - - t.Run("retrieve events by block ID and event type", func(t *testing.T) { - for _, b := range blockIDs { - for _, et := range eTypes { - var actualEvents = make([]flow.Event, 0) - - //lookup events by block id and transaction id - err := db.View(LookupEventsByBlockIDEventType(b, et, &actualEvents)) - - expectedEvents := typeMap[b.String()+"_"+string(et)] - assertFunc(err, expectedEvents, actualEvents) - } - } - }) - }) -} - -// Event retrieval does not guarantee any order, hence a sort function to help compare expected and actual events -func sortEvent(events []flow.Event) { - sort.Slice(events, func(i, j int) bool { - - tComp := bytes.Compare(events[i].TransactionID[:], events[j].TransactionID[:]) - if tComp < 0 { - return true - } - if tComp > 0 { - return false - } - - txIndex := events[i].TransactionIndex == events[j].TransactionIndex - if !txIndex { - return events[i].TransactionIndex < events[j].TransactionIndex - } - - return events[i].EventIndex < events[j].EventIndex - - }) -} diff --git a/storage/badger/operation/guarantees.go b/storage/badger/operation/guarantees.go deleted file mode 100644 index cfefead5f5b..00000000000 --- a/storage/badger/operation/guarantees.go +++ /dev/null @@ -1,23 +0,0 @@ -package operation - -import ( - "github.com/dgraph-io/badger/v2" - - "github.com/onflow/flow-go/model/flow" -) - -func InsertGuarantee(collID flow.Identifier, guarantee *flow.CollectionGuarantee) func(*badger.Txn) error { - return insert(makePrefix(codeGuarantee, collID), guarantee) -} - -func RetrieveGuarantee(collID flow.Identifier, guarantee *flow.CollectionGuarantee) func(*badger.Txn) error { - return retrieve(makePrefix(codeGuarantee, collID), guarantee) -} - -func IndexPayloadGuarantees(blockID flow.Identifier, guarIDs []flow.Identifier) func(*badger.Txn) error { - return insert(makePrefix(codePayloadGuarantees, blockID), guarIDs) -} - -func LookupPayloadGuarantees(blockID flow.Identifier, guarIDs *[]flow.Identifier) func(*badger.Txn) error { - return retrieve(makePrefix(codePayloadGuarantees, blockID), guarIDs) -} diff --git a/storage/badger/operation/guarantees_test.go b/storage/badger/operation/guarantees_test.go deleted file mode 100644 index 3045799db58..00000000000 --- a/storage/badger/operation/guarantees_test.go +++ /dev/null @@ -1,122 +0,0 @@ -package operation - -import ( - "testing" - - "github.com/dgraph-io/badger/v2" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/onflow/flow-go/crypto" - "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/utils/unittest" -) - -func TestGuaranteeInsertRetrieve(t *testing.T) { - unittest.RunWithBadgerDB(t, func(db *badger.DB) { - g := unittest.CollectionGuaranteeFixture() - - err := db.Update(InsertGuarantee(g.CollectionID, g)) - require.Nil(t, err) - - var retrieved flow.CollectionGuarantee - err = db.View(RetrieveGuarantee(g.CollectionID, &retrieved)) - require.NoError(t, err) - - assert.Equal(t, g, &retrieved) - }) -} - -func TestIndexGuaranteedCollectionByBlockHashInsertRetrieve(t *testing.T) { - unittest.RunWithBadgerDB(t, func(db *badger.DB) { - blockID := flow.Identifier{0x10} - collID1 := flow.Identifier{0x01} - collID2 := flow.Identifier{0x02} - guarantees := []*flow.CollectionGuarantee{ - {CollectionID: collID1, Signature: crypto.Signature{0x10}}, - {CollectionID: collID2, Signature: crypto.Signature{0x20}}, - } - expected := flow.GetIDs(guarantees) - - err := db.Update(func(tx *badger.Txn) error { - for _, guarantee := range guarantees { - if err := InsertGuarantee(guarantee.ID(), guarantee)(tx); err != nil { - return err - } - } - if err := IndexPayloadGuarantees(blockID, expected)(tx); err != nil { - return err - } - return nil - }) - require.Nil(t, err) - - var actual []flow.Identifier - err = db.View(LookupPayloadGuarantees(blockID, &actual)) - require.Nil(t, err) - - assert.Equal(t, []flow.Identifier{collID1, collID2}, actual) - }) -} - -func TestIndexGuaranteedCollectionByBlockHashMultipleBlocks(t *testing.T) { - unittest.RunWithBadgerDB(t, func(db *badger.DB) { - blockID1 := flow.Identifier{0x10} - blockID2 := flow.Identifier{0x20} - collID1 := flow.Identifier{0x01} - collID2 := flow.Identifier{0x02} - collID3 := flow.Identifier{0x03} - collID4 := flow.Identifier{0x04} - set1 := []*flow.CollectionGuarantee{ - {CollectionID: collID1, Signature: crypto.Signature{0x1}}, - } - set2 := []*flow.CollectionGuarantee{ - {CollectionID: collID2, Signature: crypto.Signature{0x2}}, - {CollectionID: collID3, Signature: crypto.Signature{0x3}}, - {CollectionID: collID4, Signature: crypto.Signature{0x1}}, - } - ids1 := flow.GetIDs(set1) - ids2 := flow.GetIDs(set2) - - // insert block 1 - err := db.Update(func(tx *badger.Txn) error { - for _, guarantee := range set1 { - if err := InsertGuarantee(guarantee.CollectionID, guarantee)(tx); err != nil { - return err - } - } - if err := IndexPayloadGuarantees(blockID1, ids1)(tx); err != nil { - return err - } - return nil - }) - require.Nil(t, err) - - // insert block 2 - err = db.Update(func(tx *badger.Txn) error { - for _, guarantee := range set2 { - if err := InsertGuarantee(guarantee.CollectionID, guarantee)(tx); err != nil { - return err - } - } - if err := IndexPayloadGuarantees(blockID2, ids2)(tx); err != nil { - return err - } - return nil - }) - require.Nil(t, err) - - t.Run("should retrieve collections for block", func(t *testing.T) { - var actual1 []flow.Identifier - err = db.View(LookupPayloadGuarantees(blockID1, &actual1)) - assert.NoError(t, err) - assert.ElementsMatch(t, []flow.Identifier{collID1}, actual1) - - // get block 2 - var actual2 []flow.Identifier - err = db.View(LookupPayloadGuarantees(blockID2, &actual2)) - assert.NoError(t, err) - assert.Equal(t, []flow.Identifier{collID2, collID3, collID4}, actual2) - }) - }) -} diff --git a/storage/badger/operation/headers.go b/storage/badger/operation/headers.go deleted file mode 100644 index bd1c377cc16..00000000000 --- a/storage/badger/operation/headers.go +++ /dev/null @@ -1,77 +0,0 @@ -// (c) 2019 Dapper Labs - ALL RIGHTS RESERVED - -package operation - -import ( - "github.com/dgraph-io/badger/v2" - - "github.com/onflow/flow-go/model/flow" -) - -func InsertHeader(headerID flow.Identifier, header *flow.Header) func(*badger.Txn) error { - return insert(makePrefix(codeHeader, headerID), header) -} - -func RetrieveHeader(blockID flow.Identifier, header *flow.Header) func(*badger.Txn) error { - return retrieve(makePrefix(codeHeader, blockID), header) -} - -// IndexBlockHeight indexes the height of a block. It should only be called on -// finalized blocks. -func IndexBlockHeight(height uint64, blockID flow.Identifier) func(*badger.Txn) error { - return insert(makePrefix(codeHeightToBlock, height), blockID) -} - -// LookupBlockHeight retrieves finalized blocks by height. -func LookupBlockHeight(height uint64, blockID *flow.Identifier) func(*badger.Txn) error { - return retrieve(makePrefix(codeHeightToBlock, height), blockID) -} - -// BlockExists checks whether the block exists in the database. -// No errors are expected during normal operation. -func BlockExists(blockID flow.Identifier, blockExists *bool) func(*badger.Txn) error { - return exists(makePrefix(codeHeader, blockID), blockExists) -} - -func InsertExecutedBlock(blockID flow.Identifier) func(*badger.Txn) error { - return insert(makePrefix(codeExecutedBlock), blockID) -} - -func UpdateExecutedBlock(blockID flow.Identifier) func(*badger.Txn) error { - return update(makePrefix(codeExecutedBlock), blockID) -} - -func RetrieveExecutedBlock(blockID *flow.Identifier) func(*badger.Txn) error { - return retrieve(makePrefix(codeExecutedBlock), blockID) -} - -// IndexCollectionBlock indexes a block by a collection within that block. -func IndexCollectionBlock(collID flow.Identifier, blockID flow.Identifier) func(*badger.Txn) error { - return insert(makePrefix(codeCollectionBlock, collID), blockID) -} - -// LookupCollectionBlock looks up a block by a collection within that block. -func LookupCollectionBlock(collID flow.Identifier, blockID *flow.Identifier) func(*badger.Txn) error { - return retrieve(makePrefix(codeCollectionBlock, collID), blockID) -} - -// FindHeaders iterates through all headers, calling `filter` on each, and adding -// them to the `found` slice if `filter` returned true -func FindHeaders(filter func(header *flow.Header) bool, found *[]flow.Header) func(*badger.Txn) error { - return traverse(makePrefix(codeHeader), func() (checkFunc, createFunc, handleFunc) { - check := func(key []byte) bool { - return true - } - var val flow.Header - create := func() interface{} { - return &val - } - handle := func() error { - if filter(&val) { - *found = append(*found, val) - } - return nil - } - return check, create, handle - }) -} diff --git a/storage/badger/operation/headers_test.go b/storage/badger/operation/headers_test.go deleted file mode 100644 index 089ecea3848..00000000000 --- a/storage/badger/operation/headers_test.go +++ /dev/null @@ -1,74 +0,0 @@ -// (c) 2019 Dapper Labs - ALL RIGHTS RESERVED - -package operation - -import ( - "testing" - "time" - - "github.com/dgraph-io/badger/v2" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/onflow/flow-go/crypto" - "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/utils/unittest" -) - -func TestHeaderInsertCheckRetrieve(t *testing.T) { - unittest.RunWithBadgerDB(t, func(db *badger.DB) { - expected := &flow.Header{ - View: 1337, - Timestamp: time.Now().UTC(), - ParentID: flow.Identifier{0x11}, - PayloadHash: flow.Identifier{0x22}, - ParentVoterIndices: []byte{0x44}, - ParentVoterSigData: []byte{0x88}, - ProposerID: flow.Identifier{0x33}, - ProposerSigData: crypto.Signature{0x77}, - } - blockID := expected.ID() - - err := db.Update(InsertHeader(expected.ID(), expected)) - require.Nil(t, err) - - var actual flow.Header - err = db.View(RetrieveHeader(blockID, &actual)) - require.Nil(t, err) - - assert.Equal(t, *expected, actual) - }) -} - -func TestHeaderIDIndexByCollectionID(t *testing.T) { - unittest.RunWithBadgerDB(t, func(db *badger.DB) { - - headerID := unittest.IdentifierFixture() - collectionID := unittest.IdentifierFixture() - - err := db.Update(IndexCollectionBlock(collectionID, headerID)) - require.Nil(t, err) - - actualID := &flow.Identifier{} - err = db.View(LookupCollectionBlock(collectionID, actualID)) - require.Nil(t, err) - assert.Equal(t, headerID, *actualID) - }) -} - -func TestBlockHeightIndexLookup(t *testing.T) { - unittest.RunWithBadgerDB(t, func(db *badger.DB) { - - height := uint64(1337) - expected := flow.Identifier{0x01, 0x02, 0x03} - - err := db.Update(IndexBlockHeight(height, expected)) - require.Nil(t, err) - - var actual flow.Identifier - err = db.View(LookupBlockHeight(height, &actual)) - require.Nil(t, err) - - assert.Equal(t, expected, actual) - }) -} diff --git a/storage/badger/operation/heights.go b/storage/badger/operation/heights.go deleted file mode 100644 index 4e5d1c6b117..00000000000 --- a/storage/badger/operation/heights.go +++ /dev/null @@ -1,85 +0,0 @@ -// (c) 2019 Dapper Labs - ALL RIGHTS RESERVED - -package operation - -import ( - "github.com/dgraph-io/badger/v2" -) - -func InsertRootHeight(height uint64) func(*badger.Txn) error { - return insert(makePrefix(codeRootHeight), height) -} - -func RetrieveRootHeight(height *uint64) func(*badger.Txn) error { - return retrieve(makePrefix(codeRootHeight), height) -} - -func InsertFinalizedHeight(height uint64) func(*badger.Txn) error { - return insert(makePrefix(codeFinalizedHeight), height) -} - -func UpdateFinalizedHeight(height uint64) func(*badger.Txn) error { - return update(makePrefix(codeFinalizedHeight), height) -} - -func RetrieveFinalizedHeight(height *uint64) func(*badger.Txn) error { - return retrieve(makePrefix(codeFinalizedHeight), height) -} - -func InsertSealedHeight(height uint64) func(*badger.Txn) error { - return insert(makePrefix(codeSealedHeight), height) -} - -func UpdateSealedHeight(height uint64) func(*badger.Txn) error { - return update(makePrefix(codeSealedHeight), height) -} - -func RetrieveSealedHeight(height *uint64) func(*badger.Txn) error { - return retrieve(makePrefix(codeSealedHeight), height) -} - -// InsertEpochFirstHeight inserts the height of the first block in the given epoch. -// The first block of an epoch E is the finalized block with view >= E.FirstView. -// Although we don't store the final height of an epoch, it can be inferred from this index. -// Returns storage.ErrAlreadyExists if the height has already been indexed. -func InsertEpochFirstHeight(epoch, height uint64) func(*badger.Txn) error { - return insert(makePrefix(codeEpochFirstHeight, epoch), height) -} - -// RetrieveEpochFirstHeight retrieves the height of the first block in the given epoch. -// Returns storage.ErrNotFound if the first block of the epoch has not yet been finalized. -func RetrieveEpochFirstHeight(epoch uint64, height *uint64) func(*badger.Txn) error { - return retrieve(makePrefix(codeEpochFirstHeight, epoch), height) -} - -// RetrieveEpochLastHeight retrieves the height of the last block in the given epoch. -// It's a more readable, but equivalent query to RetrieveEpochFirstHeight when interested in the last height of an epoch. -// Returns storage.ErrNotFound if the first block of the epoch has not yet been finalized. -func RetrieveEpochLastHeight(epoch uint64, height *uint64) func(*badger.Txn) error { - var nextEpochFirstHeight uint64 - return func(tx *badger.Txn) error { - if err := retrieve(makePrefix(codeEpochFirstHeight, epoch+1), &nextEpochFirstHeight)(tx); err != nil { - return err - } - *height = nextEpochFirstHeight - 1 - return nil - } -} - -// InsertLastCompleteBlockHeightIfNotExists inserts the last full block height if it is not already set. -// Calling this function multiple times is a no-op and returns no expected errors. -func InsertLastCompleteBlockHeightIfNotExists(height uint64) func(*badger.Txn) error { - return SkipDuplicates(InsertLastCompleteBlockHeight(height)) -} - -func InsertLastCompleteBlockHeight(height uint64) func(*badger.Txn) error { - return insert(makePrefix(codeLastCompleteBlockHeight), height) -} - -func UpdateLastCompleteBlockHeight(height uint64) func(*badger.Txn) error { - return update(makePrefix(codeLastCompleteBlockHeight), height) -} - -func RetrieveLastCompleteBlockHeight(height *uint64) func(*badger.Txn) error { - return retrieve(makePrefix(codeLastCompleteBlockHeight), height) -} diff --git a/storage/badger/operation/heights_test.go b/storage/badger/operation/heights_test.go deleted file mode 100644 index 5cfa1a77099..00000000000 --- a/storage/badger/operation/heights_test.go +++ /dev/null @@ -1,140 +0,0 @@ -// (c) 2019 Dapper Labs - ALL RIGHTS RESERVED - -package operation - -import ( - "math/rand" - "testing" - - "github.com/dgraph-io/badger/v2" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/onflow/flow-go/storage" - "github.com/onflow/flow-go/utils/unittest" -) - -func TestFinalizedInsertUpdateRetrieve(t *testing.T) { - unittest.RunWithBadgerDB(t, func(db *badger.DB) { - height := uint64(1337) - - err := db.Update(InsertFinalizedHeight(height)) - require.NoError(t, err) - - var retrieved uint64 - err = db.View(RetrieveFinalizedHeight(&retrieved)) - require.NoError(t, err) - - assert.Equal(t, retrieved, height) - - height = 9999 - err = db.Update(UpdateFinalizedHeight(height)) - require.NoError(t, err) - - err = db.View(RetrieveFinalizedHeight(&retrieved)) - require.NoError(t, err) - - assert.Equal(t, retrieved, height) - }) -} - -func TestSealedInsertUpdateRetrieve(t *testing.T) { - unittest.RunWithBadgerDB(t, func(db *badger.DB) { - height := uint64(1337) - - err := db.Update(InsertSealedHeight(height)) - require.NoError(t, err) - - var retrieved uint64 - err = db.View(RetrieveSealedHeight(&retrieved)) - require.NoError(t, err) - - assert.Equal(t, retrieved, height) - - height = 9999 - err = db.Update(UpdateSealedHeight(height)) - require.NoError(t, err) - - err = db.View(RetrieveSealedHeight(&retrieved)) - require.NoError(t, err) - - assert.Equal(t, retrieved, height) - }) -} - -func TestEpochFirstBlockIndex_InsertRetrieve(t *testing.T) { - unittest.RunWithBadgerDB(t, func(db *badger.DB) { - height := rand.Uint64() - epoch := rand.Uint64() - - // retrieve when empty errors - var retrieved uint64 - err := db.View(RetrieveEpochFirstHeight(epoch, &retrieved)) - require.ErrorIs(t, err, storage.ErrNotFound) - - // can insert - err = db.Update(InsertEpochFirstHeight(epoch, height)) - require.NoError(t, err) - - // can retrieve - err = db.View(RetrieveEpochFirstHeight(epoch, &retrieved)) - require.NoError(t, err) - assert.Equal(t, retrieved, height) - - // retrieve non-existent key errors - err = db.View(RetrieveEpochFirstHeight(epoch+1, &retrieved)) - require.ErrorIs(t, err, storage.ErrNotFound) - - // insert existent key errors - err = db.Update(InsertEpochFirstHeight(epoch, height)) - require.ErrorIs(t, err, storage.ErrAlreadyExists) - }) -} - -func TestLastCompleteBlockHeightInsertUpdateRetrieve(t *testing.T) { - unittest.RunWithBadgerDB(t, func(db *badger.DB) { - height := uint64(1337) - - err := db.Update(InsertLastCompleteBlockHeight(height)) - require.NoError(t, err) - - var retrieved uint64 - err = db.View(RetrieveLastCompleteBlockHeight(&retrieved)) - require.NoError(t, err) - - assert.Equal(t, retrieved, height) - - height = 9999 - err = db.Update(UpdateLastCompleteBlockHeight(height)) - require.NoError(t, err) - - err = db.View(RetrieveLastCompleteBlockHeight(&retrieved)) - require.NoError(t, err) - - assert.Equal(t, retrieved, height) - }) -} - -func TestLastCompleteBlockHeightInsertIfNotExists(t *testing.T) { - unittest.RunWithBadgerDB(t, func(db *badger.DB) { - height1 := uint64(1337) - - err := db.Update(InsertLastCompleteBlockHeightIfNotExists(height1)) - require.NoError(t, err) - - var retrieved uint64 - err = db.View(RetrieveLastCompleteBlockHeight(&retrieved)) - require.NoError(t, err) - - assert.Equal(t, retrieved, height1) - - height2 := uint64(9999) - err = db.Update(InsertLastCompleteBlockHeightIfNotExists(height2)) - require.NoError(t, err) - - err = db.View(RetrieveLastCompleteBlockHeight(&retrieved)) - require.NoError(t, err) - - assert.Equal(t, retrieved, height1) - }) -} diff --git a/storage/badger/operation/interactions.go b/storage/badger/operation/interactions.go deleted file mode 100644 index 952b2f7a188..00000000000 --- a/storage/badger/operation/interactions.go +++ /dev/null @@ -1,25 +0,0 @@ -package operation - -import ( - "github.com/onflow/flow-go/fvm/storage/snapshot" - "github.com/onflow/flow-go/model/flow" - - "github.com/dgraph-io/badger/v2" -) - -func InsertExecutionStateInteractions( - blockID flow.Identifier, - executionSnapshots []*snapshot.ExecutionSnapshot, -) func(*badger.Txn) error { - return insert( - makePrefix(codeExecutionStateInteractions, blockID), - executionSnapshots) -} - -func RetrieveExecutionStateInteractions( - blockID flow.Identifier, - executionSnapshots *[]*snapshot.ExecutionSnapshot, -) func(*badger.Txn) error { - return retrieve( - makePrefix(codeExecutionStateInteractions, blockID), executionSnapshots) -} diff --git a/storage/badger/operation/interactions_test.go b/storage/badger/operation/interactions_test.go deleted file mode 100644 index fd334c3a6b8..00000000000 --- a/storage/badger/operation/interactions_test.go +++ /dev/null @@ -1,62 +0,0 @@ -// (c) 2019 Dapper Labs - ALL RIGHTS RESERVED - -package operation - -import ( - "testing" - - "github.com/dgraph-io/badger/v2" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/onflow/flow-go/fvm/storage/snapshot" - "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/utils/unittest" -) - -func TestStateInteractionsInsertCheckRetrieve(t *testing.T) { - unittest.RunWithBadgerDB(t, func(db *badger.DB) { - - id1 := flow.NewRegisterID( - string([]byte("\x89krg\u007fBN\x1d\xf5\xfb\xb8r\xbc4\xbd\x98ռ\xf1\xd0twU\xbf\x16N\xb4?,\xa0&;")), - "") - id2 := flow.NewRegisterID(string([]byte{2}), "") - id3 := flow.NewRegisterID(string([]byte{3}), "") - - executionSnapshot := &snapshot.ExecutionSnapshot{ - ReadSet: map[flow.RegisterID]struct{}{ - id2: struct{}{}, - id3: struct{}{}, - }, - WriteSet: map[flow.RegisterID]flow.RegisterValue{ - id1: []byte("zażółć gęślą jaźń"), - id2: []byte("c"), - }, - } - - interactions := []*snapshot.ExecutionSnapshot{ - executionSnapshot, - &snapshot.ExecutionSnapshot{}, - } - - blockID := unittest.IdentifierFixture() - - err := db.Update(InsertExecutionStateInteractions(blockID, interactions)) - require.Nil(t, err) - - var readInteractions []*snapshot.ExecutionSnapshot - - err = db.View(RetrieveExecutionStateInteractions(blockID, &readInteractions)) - require.NoError(t, err) - - assert.Equal(t, interactions, readInteractions) - assert.Equal( - t, - executionSnapshot.WriteSet, - readInteractions[0].WriteSet) - assert.Equal( - t, - executionSnapshot.ReadSet, - readInteractions[0].ReadSet) - }) -} diff --git a/storage/badger/operation/jobs.go b/storage/badger/operation/jobs.go deleted file mode 100644 index 0f9eb3166ad..00000000000 --- a/storage/badger/operation/jobs.go +++ /dev/null @@ -1,43 +0,0 @@ -package operation - -import ( - "github.com/dgraph-io/badger/v2" - - "github.com/onflow/flow-go/model/flow" -) - -func RetrieveJobLatestIndex(queue string, index *uint64) func(*badger.Txn) error { - return retrieve(makePrefix(codeJobQueuePointer, queue), index) -} - -func InitJobLatestIndex(queue string, index uint64) func(*badger.Txn) error { - return insert(makePrefix(codeJobQueuePointer, queue), index) -} - -func SetJobLatestIndex(queue string, index uint64) func(*badger.Txn) error { - return update(makePrefix(codeJobQueuePointer, queue), index) -} - -// RetrieveJobAtIndex returns the entity at the given index -func RetrieveJobAtIndex(queue string, index uint64, entity *flow.Identifier) func(*badger.Txn) error { - return retrieve(makePrefix(codeJobQueue, queue, index), entity) -} - -// InsertJobAtIndex insert an entity ID at the given index -func InsertJobAtIndex(queue string, index uint64, entity flow.Identifier) func(*badger.Txn) error { - return insert(makePrefix(codeJobQueue, queue, index), entity) -} - -// RetrieveProcessedIndex returns the processed index for a job consumer -func RetrieveProcessedIndex(jobName string, processed *uint64) func(*badger.Txn) error { - return retrieve(makePrefix(codeJobConsumerProcessed, jobName), processed) -} - -func InsertProcessedIndex(jobName string, processed uint64) func(*badger.Txn) error { - return insert(makePrefix(codeJobConsumerProcessed, jobName), processed) -} - -// SetProcessedIndex updates the processed index for a job consumer with given index -func SetProcessedIndex(jobName string, processed uint64) func(*badger.Txn) error { - return update(makePrefix(codeJobConsumerProcessed, jobName), processed) -} diff --git a/storage/badger/operation/modifiers.go b/storage/badger/operation/modifiers.go index 3965b5d204c..b8808e2b89b 100644 --- a/storage/badger/operation/modifiers.go +++ b/storage/badger/operation/modifiers.go @@ -21,6 +21,17 @@ func SkipDuplicates(op func(*badger.Txn) error) func(tx *badger.Txn) error { } } +func SkipDuplicatesTx(op func(*transaction.Tx) error) func(tx *transaction.Tx) error { + return func(tx *transaction.Tx) error { + err := op(tx) + if errors.Is(err, storage.ErrAlreadyExists) { + metrics.GetStorageCollector().SkipDuplicate() + return nil + } + return err + } +} + func SkipNonExist(op func(*badger.Txn) error) func(tx *badger.Txn) error { return func(tx *badger.Txn) error { err := op(tx) diff --git a/storage/badger/operation/modifiers_test.go b/storage/badger/operation/modifiers_test.go index ffeda8440ad..8824077c3c7 100644 --- a/storage/badger/operation/modifiers_test.go +++ b/storage/badger/operation/modifiers_test.go @@ -1,5 +1,3 @@ -// (c) 2019 Dapper Labs - ALL RIGHTS RESERVED - package operation import ( diff --git a/storage/badger/operation/prefix.go b/storage/badger/operation/prefix.go index 23daf37347d..a4d543a46e0 100644 --- a/storage/badger/operation/prefix.go +++ b/storage/badger/operation/prefix.go @@ -1,12 +1,7 @@ -// (c) 2019 Dapper Labs - ALL RIGHTS RESERVED - package operation import ( - "encoding/binary" - "fmt" - - "github.com/onflow/flow-go/model/flow" + op "github.com/onflow/flow-go/storage/operation" ) const ( @@ -16,126 +11,17 @@ const ( codeDBType = 2 // specifies a database type // codes for views with special meaning - codeSafetyData = 10 // safety data for hotstuff state - codeLivenessData = 11 // liveness data for hotstuff state - - // codes for fields associated with the root state - codeSporkID = 13 - codeProtocolVersion = 14 - codeEpochCommitSafetyThreshold = 15 - codeSporkRootBlockHeight = 16 - - // code for heights with special meaning - codeFinalizedHeight = 20 // latest finalized block height - codeSealedHeight = 21 // latest sealed block height - codeClusterHeight = 22 // latest finalized height on cluster - codeExecutedBlock = 23 // latest executed block with max height - codeRootHeight = 24 // the height of the highest block contained in the root snapshot - codeLastCompleteBlockHeight = 25 // the height of the last block for which all collections were received - codeEpochFirstHeight = 26 // the height of the first block in a given epoch - - // codes for single entity storage - // 31 was used for identities before epochs - codeHeader = 30 - codeGuarantee = 32 - codeSeal = 33 - codeTransaction = 34 - codeCollection = 35 - codeExecutionResult = 36 - codeExecutionReceiptMeta = 36 - codeResultApproval = 37 - codeChunk = 38 - - // codes for indexing single identifier by identifier/integeter - codeHeightToBlock = 40 // index mapping height to block ID - codeBlockIDToLatestSealID = 41 // index mapping a block its last payload seal - codeClusterBlockToRefBlock = 42 // index cluster block ID to reference block ID - codeRefHeightToClusterBlock = 43 // index reference block height to cluster block IDs - codeBlockIDToFinalizedSeal = 44 // index _finalized_ seal by sealed block ID - codeBlockIDToQuorumCertificate = 45 // index of quorum certificates by block ID - - // codes for indexing multiple identifiers by identifier - // NOTE: 51 was used for identity indexes before epochs - codeBlockChildren = 50 // index mapping block ID to children blocks - codePayloadGuarantees = 52 // index mapping block ID to payload guarantees - codePayloadSeals = 53 // index mapping block ID to payload seals - codeCollectionBlock = 54 // index mapping collection ID to block ID - codeOwnBlockReceipt = 55 // index mapping block ID to execution receipt ID for execution nodes - codeBlockEpochStatus = 56 // index mapping block ID to epoch status - codePayloadReceipts = 57 // index mapping block ID to payload receipts - codePayloadResults = 58 // index mapping block ID to payload results - codeAllBlockReceipts = 59 // index mapping of blockID to multiple receipts - // codes related to protocol level information - codeEpochSetup = 61 // EpochSetup service event, keyed by ID - codeEpochCommit = 62 // EpochCommit service event, keyed by ID codeBeaconPrivateKey = 63 // BeaconPrivateKey, keyed by epoch counter - codeDKGStarted = 64 // flag that the DKG for an epoch has been started - codeDKGEnded = 65 // flag that the DKG for an epoch has ended (stores end state) - codeVersionBeacon = 67 // flag for storing version beacons - - // code for ComputationResult upload status storage - // NOTE: for now only GCP uploader is supported. When other uploader (AWS e.g.) needs to - // be supported, we will need to define new code. - codeComputationResults = 66 - - // job queue consumers and producers - codeJobConsumerProcessed = 70 - codeJobQueue = 71 - codeJobQueuePointer = 72 - - // legacy codes (should be cleaned up) - codeChunkDataPack = 100 - codeCommit = 101 - codeEvent = 102 - codeExecutionStateInteractions = 103 - codeTransactionResult = 104 - codeFinalizedCluster = 105 - codeServiceEvent = 106 - codeTransactionResultIndex = 107 - codeIndexCollection = 200 - codeIndexExecutionResultByBlock = 202 - codeIndexCollectionByTransaction = 203 - codeIndexResultApprovalByChunk = 204 - - // TEMPORARY codes - blockedNodeIDs = 205 // manual override for adding node IDs to list of ejected nodes, applies to networking layer only - - // internal failure information that should be preserved across restarts - codeExecutionFork = 254 - codeEpochEmergencyFallbackTriggered = 255 + _ = 64 // DEPRECATED: flag that the DKG for an epoch has been started + _ = 65 // DEPRECATED: flag that the DKG for an epoch has ended (stores end state) + codeDKGState = 66 // current state of Recoverable Random Beacon State Machine for given epoch ) -func makePrefix(code byte, keys ...interface{}) []byte { - prefix := make([]byte, 1) - prefix[0] = code - for _, key := range keys { - prefix = append(prefix, b(key)...) - } - return prefix +func makePrefix(code byte, keys ...any) []byte { + return op.MakePrefix(code, keys...) } -func b(v interface{}) []byte { - switch i := v.(type) { - case uint8: - return []byte{i} - case uint32: - b := make([]byte, 4) - binary.BigEndian.PutUint32(b, i) - return b - case uint64: - b := make([]byte, 8) - binary.BigEndian.PutUint64(b, i) - return b - case string: - return []byte(i) - case flow.Role: - return []byte{byte(i)} - case flow.Identifier: - return i[:] - case flow.ChainID: - return []byte(i) - default: - panic(fmt.Sprintf("unsupported type to convert (%T)", v)) - } +func keyPartToBinary(v any) []byte { + return op.AppendPrefixKeyPart(nil, v) } diff --git a/storage/badger/operation/prefix_test.go b/storage/badger/operation/prefix_test.go index 4a2af4332e4..444311ece22 100644 --- a/storage/badger/operation/prefix_test.go +++ b/storage/badger/operation/prefix_test.go @@ -1,5 +1,3 @@ -// (c) 2019 Dapper Labs - ALL RIGHTS RESERVED - package operation import ( diff --git a/storage/badger/operation/qcs.go b/storage/badger/operation/qcs.go deleted file mode 100644 index 651a585b2b2..00000000000 --- a/storage/badger/operation/qcs.go +++ /dev/null @@ -1,19 +0,0 @@ -package operation - -import ( - "github.com/dgraph-io/badger/v2" - - "github.com/onflow/flow-go/model/flow" -) - -// InsertQuorumCertificate inserts a quorum certificate by block ID. -// Returns storage.ErrAlreadyExists if a QC has already been inserted for the block. -func InsertQuorumCertificate(qc *flow.QuorumCertificate) func(*badger.Txn) error { - return insert(makePrefix(codeBlockIDToQuorumCertificate, qc.BlockID), qc) -} - -// RetrieveQuorumCertificate retrieves a quorum certificate by blockID. -// Returns storage.ErrNotFound if no QC is stored for the block. -func RetrieveQuorumCertificate(blockID flow.Identifier, qc *flow.QuorumCertificate) func(*badger.Txn) error { - return retrieve(makePrefix(codeBlockIDToQuorumCertificate, blockID), qc) -} diff --git a/storage/badger/operation/qcs_test.go b/storage/badger/operation/qcs_test.go deleted file mode 100644 index 845f917f041..00000000000 --- a/storage/badger/operation/qcs_test.go +++ /dev/null @@ -1,27 +0,0 @@ -package operation - -import ( - "testing" - - "github.com/dgraph-io/badger/v2" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/utils/unittest" -) - -func TestInsertQuorumCertificate(t *testing.T) { - unittest.RunWithBadgerDB(t, func(db *badger.DB) { - expected := unittest.QuorumCertificateFixture() - - err := db.Update(InsertQuorumCertificate(expected)) - require.Nil(t, err) - - var actual flow.QuorumCertificate - err = db.View(RetrieveQuorumCertificate(expected.BlockID, &actual)) - require.Nil(t, err) - - assert.Equal(t, expected, &actual) - }) -} diff --git a/storage/badger/operation/receipts.go b/storage/badger/operation/receipts.go deleted file mode 100644 index 3dc923af8cb..00000000000 --- a/storage/badger/operation/receipts.go +++ /dev/null @@ -1,87 +0,0 @@ -package operation - -import ( - "github.com/dgraph-io/badger/v2" - - "github.com/onflow/flow-go/model/flow" -) - -// InsertExecutionReceiptMeta inserts an execution receipt meta by ID. -func InsertExecutionReceiptMeta(receiptID flow.Identifier, meta *flow.ExecutionReceiptMeta) func(*badger.Txn) error { - return insert(makePrefix(codeExecutionReceiptMeta, receiptID), meta) -} - -// BatchInsertExecutionReceiptMeta inserts an execution receipt meta by ID. -// TODO: rename to BatchUpdate -func BatchInsertExecutionReceiptMeta(receiptID flow.Identifier, meta *flow.ExecutionReceiptMeta) func(batch *badger.WriteBatch) error { - return batchWrite(makePrefix(codeExecutionReceiptMeta, receiptID), meta) -} - -// RetrieveExecutionReceipt retrieves a execution receipt meta by ID. -func RetrieveExecutionReceiptMeta(receiptID flow.Identifier, meta *flow.ExecutionReceiptMeta) func(*badger.Txn) error { - return retrieve(makePrefix(codeExecutionReceiptMeta, receiptID), meta) -} - -// IndexOwnExecutionReceipt inserts an execution receipt ID keyed by block ID -func IndexOwnExecutionReceipt(blockID flow.Identifier, receiptID flow.Identifier) func(*badger.Txn) error { - return insert(makePrefix(codeOwnBlockReceipt, blockID), receiptID) -} - -// BatchIndexOwnExecutionReceipt inserts an execution receipt ID keyed by block ID into a batch -// TODO: rename to BatchUpdate -func BatchIndexOwnExecutionReceipt(blockID flow.Identifier, receiptID flow.Identifier) func(batch *badger.WriteBatch) error { - return batchWrite(makePrefix(codeOwnBlockReceipt, blockID), receiptID) -} - -// LookupOwnExecutionReceipt finds execution receipt ID by block -func LookupOwnExecutionReceipt(blockID flow.Identifier, receiptID *flow.Identifier) func(*badger.Txn) error { - return retrieve(makePrefix(codeOwnBlockReceipt, blockID), receiptID) -} - -// RemoveOwnExecutionReceipt removes own execution receipt index by blockID -func RemoveOwnExecutionReceipt(blockID flow.Identifier) func(*badger.Txn) error { - return remove(makePrefix(codeOwnBlockReceipt, blockID)) -} - -// BatchRemoveOwnExecutionReceipt removes blockID-to-my-receiptID index entries keyed by a blockID in a provided batch. -// No errors are expected during normal operation, but it may return generic error -// if badger fails to process request -func BatchRemoveOwnExecutionReceipt(blockID flow.Identifier) func(batch *badger.WriteBatch) error { - return batchRemove(makePrefix(codeOwnBlockReceipt, blockID)) -} - -// IndexExecutionReceipts inserts an execution receipt ID keyed by block ID and receipt ID. -// one block could have multiple receipts, even if they are from the same executor -func IndexExecutionReceipts(blockID, receiptID flow.Identifier) func(*badger.Txn) error { - return insert(makePrefix(codeAllBlockReceipts, blockID, receiptID), receiptID) -} - -// BatchIndexExecutionReceipts inserts an execution receipt ID keyed by block ID and receipt ID into a batch -func BatchIndexExecutionReceipts(blockID, receiptID flow.Identifier) func(batch *badger.WriteBatch) error { - return batchWrite(makePrefix(codeAllBlockReceipts, blockID, receiptID), receiptID) -} - -// LookupExecutionReceipts finds all execution receipts by block ID -func LookupExecutionReceipts(blockID flow.Identifier, receiptIDs *[]flow.Identifier) func(*badger.Txn) error { - iterationFunc := receiptIterationFunc(receiptIDs) - return traverse(makePrefix(codeAllBlockReceipts, blockID), iterationFunc) -} - -// receiptIterationFunc returns an in iteration function which returns all receipt IDs found during traversal -func receiptIterationFunc(receiptIDs *[]flow.Identifier) func() (checkFunc, createFunc, handleFunc) { - check := func(key []byte) bool { - return true - } - - var receiptID flow.Identifier - create := func() interface{} { - return &receiptID - } - handle := func() error { - *receiptIDs = append(*receiptIDs, receiptID) - return nil - } - return func() (checkFunc, createFunc, handleFunc) { - return check, create, handle - } -} diff --git a/storage/badger/operation/receipts_test.go b/storage/badger/operation/receipts_test.go deleted file mode 100644 index 1c41f739ebb..00000000000 --- a/storage/badger/operation/receipts_test.go +++ /dev/null @@ -1,64 +0,0 @@ -// (c) 2019 Dapper Labs - ALL RIGHTS RESERVED - -package operation - -import ( - "testing" - - "github.com/dgraph-io/badger/v2" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/utils/unittest" -) - -func TestReceipts_InsertRetrieve(t *testing.T) { - unittest.RunWithBadgerDB(t, func(db *badger.DB) { - receipt := unittest.ExecutionReceiptFixture() - expected := receipt.Meta() - - err := db.Update(InsertExecutionReceiptMeta(receipt.ID(), expected)) - require.Nil(t, err) - - var actual flow.ExecutionReceiptMeta - err = db.View(RetrieveExecutionReceiptMeta(receipt.ID(), &actual)) - require.Nil(t, err) - - assert.Equal(t, expected, &actual) - }) -} - -func TestReceipts_Index(t *testing.T) { - unittest.RunWithBadgerDB(t, func(db *badger.DB) { - receipt := unittest.ExecutionReceiptFixture() - expected := receipt.ID() - blockID := receipt.ExecutionResult.BlockID - - err := db.Update(IndexOwnExecutionReceipt(blockID, expected)) - require.Nil(t, err) - - var actual flow.Identifier - err = db.View(LookupOwnExecutionReceipt(blockID, &actual)) - require.Nil(t, err) - - assert.Equal(t, expected, actual) - }) -} - -func TestReceipts_MultiIndex(t *testing.T) { - unittest.RunWithBadgerDB(t, func(db *badger.DB) { - expected := []flow.Identifier{unittest.IdentifierFixture(), unittest.IdentifierFixture()} - blockID := unittest.IdentifierFixture() - - for _, id := range expected { - err := db.Update(IndexExecutionReceipts(blockID, id)) - require.Nil(t, err) - } - var actual []flow.Identifier - err := db.View(LookupExecutionReceipts(blockID, &actual)) - require.Nil(t, err) - - assert.ElementsMatch(t, expected, actual) - }) -} diff --git a/storage/badger/operation/results.go b/storage/badger/operation/results.go deleted file mode 100644 index 8e762cc5b41..00000000000 --- a/storage/badger/operation/results.go +++ /dev/null @@ -1,54 +0,0 @@ -package operation - -import ( - "github.com/dgraph-io/badger/v2" - - "github.com/onflow/flow-go/model/flow" -) - -// InsertExecutionResult inserts an execution result by ID. -func InsertExecutionResult(result *flow.ExecutionResult) func(*badger.Txn) error { - return insert(makePrefix(codeExecutionResult, result.ID()), result) -} - -// BatchInsertExecutionResult inserts an execution result by ID. -func BatchInsertExecutionResult(result *flow.ExecutionResult) func(batch *badger.WriteBatch) error { - return batchWrite(makePrefix(codeExecutionResult, result.ID()), result) -} - -// RetrieveExecutionResult retrieves a transaction by fingerprint. -func RetrieveExecutionResult(resultID flow.Identifier, result *flow.ExecutionResult) func(*badger.Txn) error { - return retrieve(makePrefix(codeExecutionResult, resultID), result) -} - -// IndexExecutionResult inserts an execution result ID keyed by block ID -func IndexExecutionResult(blockID flow.Identifier, resultID flow.Identifier) func(*badger.Txn) error { - return insert(makePrefix(codeIndexExecutionResultByBlock, blockID), resultID) -} - -// ReindexExecutionResult updates mapping of an execution result ID keyed by block ID -func ReindexExecutionResult(blockID flow.Identifier, resultID flow.Identifier) func(*badger.Txn) error { - return update(makePrefix(codeIndexExecutionResultByBlock, blockID), resultID) -} - -// BatchIndexExecutionResult inserts an execution result ID keyed by block ID into a batch -func BatchIndexExecutionResult(blockID flow.Identifier, resultID flow.Identifier) func(batch *badger.WriteBatch) error { - return batchWrite(makePrefix(codeIndexExecutionResultByBlock, blockID), resultID) -} - -// LookupExecutionResult finds execution result ID by block -func LookupExecutionResult(blockID flow.Identifier, resultID *flow.Identifier) func(*badger.Txn) error { - return retrieve(makePrefix(codeIndexExecutionResultByBlock, blockID), resultID) -} - -// RemoveExecutionResultIndex removes execution result indexed by the given blockID -func RemoveExecutionResultIndex(blockID flow.Identifier) func(*badger.Txn) error { - return remove(makePrefix(codeIndexExecutionResultByBlock, blockID)) -} - -// BatchRemoveExecutionResultIndex removes blockID-to-resultID index entries keyed by a blockID in a provided batch. -// No errors are expected during normal operation, even if no entries are matched. -// If Badger unexpectedly fails to process the request, the error is wrapped in a generic error and returned. -func BatchRemoveExecutionResultIndex(blockID flow.Identifier) func(*badger.WriteBatch) error { - return batchRemove(makePrefix(codeIndexExecutionResultByBlock, blockID)) -} diff --git a/storage/badger/operation/results_test.go b/storage/badger/operation/results_test.go deleted file mode 100644 index 3a3ea267037..00000000000 --- a/storage/badger/operation/results_test.go +++ /dev/null @@ -1,29 +0,0 @@ -// (c) 2019 Dapper Labs - ALL RIGHTS RESERVED - -package operation - -import ( - "testing" - - "github.com/dgraph-io/badger/v2" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/utils/unittest" -) - -func TestResults_InsertRetrieve(t *testing.T) { - unittest.RunWithBadgerDB(t, func(db *badger.DB) { - expected := unittest.ExecutionResultFixture() - - err := db.Update(InsertExecutionResult(expected)) - require.Nil(t, err) - - var actual flow.ExecutionResult - err = db.View(RetrieveExecutionResult(expected.ID(), &actual)) - require.Nil(t, err) - - assert.Equal(t, expected, &actual) - }) -} diff --git a/storage/badger/operation/seals.go b/storage/badger/operation/seals.go deleted file mode 100644 index 961f9826e34..00000000000 --- a/storage/badger/operation/seals.go +++ /dev/null @@ -1,77 +0,0 @@ -package operation - -import ( - "github.com/dgraph-io/badger/v2" - - "github.com/onflow/flow-go/model/flow" -) - -func InsertSeal(sealID flow.Identifier, seal *flow.Seal) func(*badger.Txn) error { - return insert(makePrefix(codeSeal, sealID), seal) -} - -func RetrieveSeal(sealID flow.Identifier, seal *flow.Seal) func(*badger.Txn) error { - return retrieve(makePrefix(codeSeal, sealID), seal) -} - -func IndexPayloadSeals(blockID flow.Identifier, sealIDs []flow.Identifier) func(*badger.Txn) error { - return insert(makePrefix(codePayloadSeals, blockID), sealIDs) -} - -func LookupPayloadSeals(blockID flow.Identifier, sealIDs *[]flow.Identifier) func(*badger.Txn) error { - return retrieve(makePrefix(codePayloadSeals, blockID), sealIDs) -} - -func IndexPayloadReceipts(blockID flow.Identifier, receiptIDs []flow.Identifier) func(*badger.Txn) error { - return insert(makePrefix(codePayloadReceipts, blockID), receiptIDs) -} - -func IndexPayloadResults(blockID flow.Identifier, resultIDs []flow.Identifier) func(*badger.Txn) error { - return insert(makePrefix(codePayloadResults, blockID), resultIDs) -} - -func LookupPayloadReceipts(blockID flow.Identifier, receiptIDs *[]flow.Identifier) func(*badger.Txn) error { - return retrieve(makePrefix(codePayloadReceipts, blockID), receiptIDs) -} - -func LookupPayloadResults(blockID flow.Identifier, resultIDs *[]flow.Identifier) func(*badger.Txn) error { - return retrieve(makePrefix(codePayloadResults, blockID), resultIDs) -} - -// IndexLatestSealAtBlock persists the highest seal that was included in the fork up to (and including) blockID. -// In most cases, it is the highest seal included in this block's payload. However, if there are no -// seals in this block, sealID should reference the highest seal in blockID's ancestor. -func IndexLatestSealAtBlock(blockID flow.Identifier, sealID flow.Identifier) func(*badger.Txn) error { - return insert(makePrefix(codeBlockIDToLatestSealID, blockID), sealID) -} - -// LookupLatestSealAtBlock finds the highest seal that was included in the fork up to (and including) blockID. -// In most cases, it is the highest seal included in this block's payload. However, if there are no -// seals in this block, sealID should reference the highest seal in blockID's ancestor. -func LookupLatestSealAtBlock(blockID flow.Identifier, sealID *flow.Identifier) func(*badger.Txn) error { - return retrieve(makePrefix(codeBlockIDToLatestSealID, blockID), &sealID) -} - -// IndexFinalizedSealByBlockID indexes the _finalized_ seal by the sealed block ID. -// Example: A <- B <- C(SealA) -// when block C is finalized, we create the index `A.ID->SealA.ID` -func IndexFinalizedSealByBlockID(sealedBlockID flow.Identifier, sealID flow.Identifier) func(*badger.Txn) error { - return insert(makePrefix(codeBlockIDToFinalizedSeal, sealedBlockID), sealID) -} - -// LookupBySealedBlockID finds the seal for the given sealed block ID. -func LookupBySealedBlockID(sealedBlockID flow.Identifier, sealID *flow.Identifier) func(*badger.Txn) error { - return retrieve(makePrefix(codeBlockIDToFinalizedSeal, sealedBlockID), &sealID) -} - -func InsertExecutionForkEvidence(conflictingSeals []*flow.IncorporatedResultSeal) func(*badger.Txn) error { - return insert(makePrefix(codeExecutionFork), conflictingSeals) -} - -func RemoveExecutionForkEvidence() func(*badger.Txn) error { - return remove(makePrefix(codeExecutionFork)) -} - -func RetrieveExecutionForkEvidence(conflictingSeals *[]*flow.IncorporatedResultSeal) func(*badger.Txn) error { - return retrieve(makePrefix(codeExecutionFork), conflictingSeals) -} diff --git a/storage/badger/operation/seals_test.go b/storage/badger/operation/seals_test.go deleted file mode 100644 index 73846bbfbed..00000000000 --- a/storage/badger/operation/seals_test.go +++ /dev/null @@ -1,61 +0,0 @@ -// (c) 2019 Dapper Labs - ALL RIGHTS RESERVED - -package operation - -import ( - "testing" - - "github.com/dgraph-io/badger/v2" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/utils/unittest" -) - -func TestSealInsertCheckRetrieve(t *testing.T) { - unittest.RunWithBadgerDB(t, func(db *badger.DB) { - expected := unittest.Seal.Fixture() - - err := db.Update(InsertSeal(expected.ID(), expected)) - require.Nil(t, err) - - var actual flow.Seal - err = db.View(RetrieveSeal(expected.ID(), &actual)) - require.Nil(t, err) - - assert.Equal(t, expected, &actual) - }) -} - -func TestSealIndexAndLookup(t *testing.T) { - unittest.RunWithBadgerDB(t, func(db *badger.DB) { - seal1 := unittest.Seal.Fixture() - seal2 := unittest.Seal.Fixture() - - seals := []*flow.Seal{seal1, seal2} - - blockID := flow.MakeID([]byte{0x42}) - - expected := []flow.Identifier(flow.GetIDs(seals)) - - err := db.Update(func(tx *badger.Txn) error { - for _, seal := range seals { - if err := InsertSeal(seal.ID(), seal)(tx); err != nil { - return err - } - } - if err := IndexPayloadSeals(blockID, expected)(tx); err != nil { - return err - } - return nil - }) - require.Nil(t, err) - - var actual []flow.Identifier - err = db.View(LookupPayloadSeals(blockID, &actual)) - require.Nil(t, err) - - assert.Equal(t, expected, actual) - }) -} diff --git a/storage/badger/operation/spork.go b/storage/badger/operation/spork.go deleted file mode 100644 index 9f80afcddf9..00000000000 --- a/storage/badger/operation/spork.go +++ /dev/null @@ -1,59 +0,0 @@ -package operation - -import ( - "github.com/dgraph-io/badger/v2" - - "github.com/onflow/flow-go/model/flow" -) - -// InsertSporkID inserts the spork ID for the present spork. A single database -// and protocol state instance spans at most one spork, so this is inserted -// exactly once, when bootstrapping the state. -func InsertSporkID(sporkID flow.Identifier) func(*badger.Txn) error { - return insert(makePrefix(codeSporkID), sporkID) -} - -// RetrieveSporkID retrieves the spork ID for the present spork. -func RetrieveSporkID(sporkID *flow.Identifier) func(*badger.Txn) error { - return retrieve(makePrefix(codeSporkID), sporkID) -} - -// InsertSporkRootBlockHeight inserts the spork root block height for the present spork. -// A single database and protocol state instance spans at most one spork, so this is inserted -// exactly once, when bootstrapping the state. -func InsertSporkRootBlockHeight(height uint64) func(*badger.Txn) error { - return insert(makePrefix(codeSporkRootBlockHeight), height) -} - -// RetrieveSporkRootBlockHeight retrieves the spork root block height for the present spork. -func RetrieveSporkRootBlockHeight(height *uint64) func(*badger.Txn) error { - return retrieve(makePrefix(codeSporkRootBlockHeight), height) -} - -// InsertProtocolVersion inserts the protocol version for the present spork. -// A single database and protocol state instance spans at most one spork, and -// a spork has exactly one protocol version for its duration, so this is -// inserted exactly once, when bootstrapping the state. -func InsertProtocolVersion(version uint) func(*badger.Txn) error { - return insert(makePrefix(codeProtocolVersion), version) -} - -// RetrieveProtocolVersion retrieves the protocol version for the present spork. -func RetrieveProtocolVersion(version *uint) func(*badger.Txn) error { - return retrieve(makePrefix(codeProtocolVersion), version) -} - -// InsertEpochCommitSafetyThreshold inserts the epoch commit safety threshold -// for the present spork. -// A single database and protocol state instance spans at most one spork, and -// a spork has exactly one protocol version for its duration, so this is -// inserted exactly once, when bootstrapping the state. -func InsertEpochCommitSafetyThreshold(threshold uint64) func(*badger.Txn) error { - return insert(makePrefix(codeEpochCommitSafetyThreshold), threshold) -} - -// RetrieveEpochCommitSafetyThreshold retrieves the epoch commit safety threshold -// for the present spork. -func RetrieveEpochCommitSafetyThreshold(threshold *uint64) func(*badger.Txn) error { - return retrieve(makePrefix(codeEpochCommitSafetyThreshold), threshold) -} diff --git a/storage/badger/operation/spork_test.go b/storage/badger/operation/spork_test.go deleted file mode 100644 index a000df60561..00000000000 --- a/storage/badger/operation/spork_test.go +++ /dev/null @@ -1,60 +0,0 @@ -package operation - -import ( - "math/rand" - "testing" - - "github.com/dgraph-io/badger/v2" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/utils/unittest" -) - -func TestSporkID_InsertRetrieve(t *testing.T) { - unittest.RunWithBadgerDB(t, func(db *badger.DB) { - sporkID := unittest.IdentifierFixture() - - err := db.Update(InsertSporkID(sporkID)) - require.NoError(t, err) - - var actual flow.Identifier - err = db.View(RetrieveSporkID(&actual)) - require.NoError(t, err) - - assert.Equal(t, sporkID, actual) - }) -} - -func TestProtocolVersion_InsertRetrieve(t *testing.T) { - unittest.RunWithBadgerDB(t, func(db *badger.DB) { - version := uint(rand.Uint32()) - - err := db.Update(InsertProtocolVersion(version)) - require.NoError(t, err) - - var actual uint - err = db.View(RetrieveProtocolVersion(&actual)) - require.NoError(t, err) - - assert.Equal(t, version, actual) - }) -} - -// TestEpochCommitSafetyThreshold_InsertRetrieve tests that we can insert and -// retrieve epoch commit safety threshold values. -func TestEpochCommitSafetyThreshold_InsertRetrieve(t *testing.T) { - unittest.RunWithBadgerDB(t, func(db *badger.DB) { - threshold := rand.Uint64() - - err := db.Update(InsertEpochCommitSafetyThreshold(threshold)) - require.NoError(t, err) - - var actual uint64 - err = db.View(RetrieveEpochCommitSafetyThreshold(&actual)) - require.NoError(t, err) - - assert.Equal(t, threshold, actual) - }) -} diff --git a/storage/badger/operation/transaction_results.go b/storage/badger/operation/transaction_results.go deleted file mode 100644 index f60fdbbf7f3..00000000000 --- a/storage/badger/operation/transaction_results.go +++ /dev/null @@ -1,103 +0,0 @@ -// (c) 2019 Dapper Labs - ALL RIGHTS RESERVED - -package operation - -import ( - "fmt" - - "github.com/dgraph-io/badger/v2" - - "github.com/onflow/flow-go/model/flow" -) - -func InsertTransactionResult(blockID flow.Identifier, transactionResult *flow.TransactionResult) func(*badger.Txn) error { - return insert(makePrefix(codeTransactionResult, blockID, transactionResult.TransactionID), transactionResult) -} - -func BatchInsertTransactionResult(blockID flow.Identifier, transactionResult *flow.TransactionResult) func(batch *badger.WriteBatch) error { - return batchWrite(makePrefix(codeTransactionResult, blockID, transactionResult.TransactionID), transactionResult) -} - -func BatchIndexTransactionResult(blockID flow.Identifier, txIndex uint32, transactionResult *flow.TransactionResult) func(batch *badger.WriteBatch) error { - return batchWrite(makePrefix(codeTransactionResultIndex, blockID, txIndex), transactionResult) -} - -func RetrieveTransactionResult(blockID flow.Identifier, transactionID flow.Identifier, transactionResult *flow.TransactionResult) func(*badger.Txn) error { - return retrieve(makePrefix(codeTransactionResult, blockID, transactionID), transactionResult) -} -func RetrieveTransactionResultByIndex(blockID flow.Identifier, txIndex uint32, transactionResult *flow.TransactionResult) func(*badger.Txn) error { - return retrieve(makePrefix(codeTransactionResultIndex, blockID, txIndex), transactionResult) -} - -func LookupTransactionResultsByBlockID(blockID flow.Identifier, txResults *[]flow.TransactionResult) func(*badger.Txn) error { - - txErrIterFunc := func() (checkFunc, createFunc, handleFunc) { - check := func(_ []byte) bool { - return true - } - var val flow.TransactionResult - create := func() interface{} { - return &val - } - handle := func() error { - *txResults = append(*txResults, val) - return nil - } - return check, create, handle - } - - return traverse(makePrefix(codeTransactionResult, blockID), txErrIterFunc) -} - -// LookupTransactionResultsByBlockIDUsingIndex retrieves all tx results for a block, but using -// tx_index index. This correctly handles cases of duplicate transactions within block, and should -// eventually replace uses of LookupTransactionResultsByBlockID -func LookupTransactionResultsByBlockIDUsingIndex(blockID flow.Identifier, txResults *[]flow.TransactionResult) func(*badger.Txn) error { - - txErrIterFunc := func() (checkFunc, createFunc, handleFunc) { - check := func(_ []byte) bool { - return true - } - var val flow.TransactionResult - create := func() interface{} { - return &val - } - handle := func() error { - *txResults = append(*txResults, val) - return nil - } - return check, create, handle - } - - return traverse(makePrefix(codeTransactionResultIndex, blockID), txErrIterFunc) -} - -// RemoveTransactionResultsByBlockID removes the transaction results for the given blockID -func RemoveTransactionResultsByBlockID(blockID flow.Identifier) func(*badger.Txn) error { - return func(txn *badger.Txn) error { - - prefix := makePrefix(codeTransactionResult, blockID) - err := removeByPrefix(prefix)(txn) - if err != nil { - return fmt.Errorf("could not remove transaction results for block %v: %w", blockID, err) - } - - return nil - } -} - -// BatchRemoveTransactionResultsByBlockID removes transaction results for the given blockID in a provided batch. -// No errors are expected during normal operation, but it may return generic error -// if badger fails to process request -func BatchRemoveTransactionResultsByBlockID(blockID flow.Identifier, batch *badger.WriteBatch) func(*badger.Txn) error { - return func(txn *badger.Txn) error { - - prefix := makePrefix(codeTransactionResult, blockID) - err := batchRemoveByPrefix(prefix)(txn, batch) - if err != nil { - return fmt.Errorf("could not remove transaction results for block %v: %w", blockID, err) - } - - return nil - } -} diff --git a/storage/badger/operation/transactions.go b/storage/badger/operation/transactions.go deleted file mode 100644 index 1ad372bc6a7..00000000000 --- a/storage/badger/operation/transactions.go +++ /dev/null @@ -1,17 +0,0 @@ -package operation - -import ( - "github.com/dgraph-io/badger/v2" - - "github.com/onflow/flow-go/model/flow" -) - -// InsertTransaction inserts a transaction keyed by transaction fingerprint. -func InsertTransaction(txID flow.Identifier, tx *flow.TransactionBody) func(*badger.Txn) error { - return insert(makePrefix(codeTransaction, txID), tx) -} - -// RetrieveTransaction retrieves a transaction by fingerprint. -func RetrieveTransaction(txID flow.Identifier, tx *flow.TransactionBody) func(*badger.Txn) error { - return retrieve(makePrefix(codeTransaction, txID), tx) -} diff --git a/storage/badger/operation/transactions_test.go b/storage/badger/operation/transactions_test.go deleted file mode 100644 index f3b34f7d0ff..00000000000 --- a/storage/badger/operation/transactions_test.go +++ /dev/null @@ -1,26 +0,0 @@ -package operation - -import ( - "testing" - - "github.com/dgraph-io/badger/v2" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/utils/unittest" -) - -func TestTransactions(t *testing.T) { - - unittest.RunWithBadgerDB(t, func(db *badger.DB) { - expected := unittest.TransactionFixture() - err := db.Update(InsertTransaction(expected.ID(), &expected.TransactionBody)) - require.Nil(t, err) - - var actual flow.Transaction - err = db.View(RetrieveTransaction(expected.ID(), &actual.TransactionBody)) - require.Nil(t, err) - assert.Equal(t, expected, actual) - }) -} diff --git a/storage/badger/operation/version_beacon.go b/storage/badger/operation/version_beacon.go deleted file mode 100644 index 69c1b2e6849..00000000000 --- a/storage/badger/operation/version_beacon.go +++ /dev/null @@ -1,31 +0,0 @@ -package operation - -import ( - "github.com/dgraph-io/badger/v2" - - "github.com/onflow/flow-go/model/flow" -) - -// IndexVersionBeaconByHeight stores a sealed version beacon indexed by -// flow.SealedVersionBeacon.SealHeight. -// -// No errors are expected during normal operation. -func IndexVersionBeaconByHeight( - beacon flow.SealedVersionBeacon, -) func(*badger.Txn) error { - return upsert(makePrefix(codeVersionBeacon, beacon.SealHeight), beacon) -} - -// LookupLastVersionBeaconByHeight finds the highest flow.VersionBeacon but no higher -// than maxHeight. Returns storage.ErrNotFound if no version beacon exists at or below -// the given height. -func LookupLastVersionBeaconByHeight( - maxHeight uint64, - versionBeacon *flow.SealedVersionBeacon, -) func(*badger.Txn) error { - return findHighestAtOrBelow( - makePrefix(codeVersionBeacon), - maxHeight, - versionBeacon, - ) -} diff --git a/storage/badger/operation/version_beacon_test.go b/storage/badger/operation/version_beacon_test.go deleted file mode 100644 index 0ca96f7ed88..00000000000 --- a/storage/badger/operation/version_beacon_test.go +++ /dev/null @@ -1,106 +0,0 @@ -package operation - -import ( - "testing" - - "github.com/dgraph-io/badger/v2" - "github.com/stretchr/testify/require" - - "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/storage" - "github.com/onflow/flow-go/utils/unittest" -) - -func TestResults_IndexByServiceEvents(t *testing.T) { - unittest.RunWithBadgerDB(t, func(db *badger.DB) { - height1 := uint64(21) - height2 := uint64(37) - height3 := uint64(55) - vb1 := flow.SealedVersionBeacon{ - VersionBeacon: unittest.VersionBeaconFixture( - unittest.WithBoundaries( - flow.VersionBoundary{ - Version: "1.0.0", - BlockHeight: height1 + 5, - }, - ), - ), - SealHeight: height1, - } - vb2 := flow.SealedVersionBeacon{ - VersionBeacon: unittest.VersionBeaconFixture( - unittest.WithBoundaries( - flow.VersionBoundary{ - Version: "1.1.0", - BlockHeight: height2 + 5, - }, - ), - ), - SealHeight: height2, - } - vb3 := flow.SealedVersionBeacon{ - VersionBeacon: unittest.VersionBeaconFixture( - unittest.WithBoundaries( - flow.VersionBoundary{ - Version: "2.0.0", - BlockHeight: height3 + 5, - }, - ), - ), - SealHeight: height3, - } - - // indexing 3 version beacons at different heights - err := db.Update(IndexVersionBeaconByHeight(vb1)) - require.NoError(t, err) - - err = db.Update(IndexVersionBeaconByHeight(vb2)) - require.NoError(t, err) - - err = db.Update(IndexVersionBeaconByHeight(vb3)) - require.NoError(t, err) - - // index version beacon 2 again to make sure we tolerate duplicates - // it is possible for two or more events of the same type to be from the same height - err = db.Update(IndexVersionBeaconByHeight(vb2)) - require.NoError(t, err) - - t.Run("retrieve exact height match", func(t *testing.T) { - var actualVB flow.SealedVersionBeacon - err := db.View(LookupLastVersionBeaconByHeight(height1, &actualVB)) - require.NoError(t, err) - require.Equal(t, vb1, actualVB) - - err = db.View(LookupLastVersionBeaconByHeight(height2, &actualVB)) - require.NoError(t, err) - require.Equal(t, vb2, actualVB) - - err = db.View(LookupLastVersionBeaconByHeight(height3, &actualVB)) - require.NoError(t, err) - require.Equal(t, vb3, actualVB) - }) - - t.Run("finds highest but not higher than given", func(t *testing.T) { - var actualVB flow.SealedVersionBeacon - - err := db.View(LookupLastVersionBeaconByHeight(height3-1, &actualVB)) - require.NoError(t, err) - require.Equal(t, vb2, actualVB) - }) - - t.Run("finds highest", func(t *testing.T) { - var actualVB flow.SealedVersionBeacon - - err := db.View(LookupLastVersionBeaconByHeight(height3+1, &actualVB)) - require.NoError(t, err) - require.Equal(t, vb3, actualVB) - }) - - t.Run("height below lowest entry returns nothing", func(t *testing.T) { - var actualVB flow.SealedVersionBeacon - - err := db.View(LookupLastVersionBeaconByHeight(height1-1, &actualVB)) - require.ErrorIs(t, err, storage.ErrNotFound) - }) - }) -} diff --git a/storage/badger/operation/views.go b/storage/badger/operation/views.go deleted file mode 100644 index 21f31316f1f..00000000000 --- a/storage/badger/operation/views.go +++ /dev/null @@ -1,38 +0,0 @@ -package operation - -import ( - "github.com/dgraph-io/badger/v2" - - "github.com/onflow/flow-go/consensus/hotstuff" - "github.com/onflow/flow-go/model/flow" -) - -// InsertSafetyData inserts safety data into the database. -func InsertSafetyData(chainID flow.ChainID, safetyData *hotstuff.SafetyData) func(*badger.Txn) error { - return insert(makePrefix(codeSafetyData, chainID), safetyData) -} - -// UpdateSafetyData updates safety data in the database. -func UpdateSafetyData(chainID flow.ChainID, safetyData *hotstuff.SafetyData) func(*badger.Txn) error { - return update(makePrefix(codeSafetyData, chainID), safetyData) -} - -// RetrieveSafetyData retrieves safety data from the database. -func RetrieveSafetyData(chainID flow.ChainID, safetyData *hotstuff.SafetyData) func(*badger.Txn) error { - return retrieve(makePrefix(codeSafetyData, chainID), safetyData) -} - -// InsertLivenessData inserts liveness data into the database. -func InsertLivenessData(chainID flow.ChainID, livenessData *hotstuff.LivenessData) func(*badger.Txn) error { - return insert(makePrefix(codeLivenessData, chainID), livenessData) -} - -// UpdateLivenessData updates liveness data in the database. -func UpdateLivenessData(chainID flow.ChainID, livenessData *hotstuff.LivenessData) func(*badger.Txn) error { - return update(makePrefix(codeLivenessData, chainID), livenessData) -} - -// RetrieveLivenessData retrieves liveness data from the database. -func RetrieveLivenessData(chainID flow.ChainID, livenessData *hotstuff.LivenessData) func(*badger.Txn) error { - return retrieve(makePrefix(codeLivenessData, chainID), livenessData) -} diff --git a/storage/badger/payloads.go b/storage/badger/payloads.go deleted file mode 100644 index ec75103cde3..00000000000 --- a/storage/badger/payloads.go +++ /dev/null @@ -1,165 +0,0 @@ -package badger - -import ( - "errors" - "fmt" - - "github.com/dgraph-io/badger/v2" - - "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/storage" - "github.com/onflow/flow-go/storage/badger/operation" - "github.com/onflow/flow-go/storage/badger/transaction" -) - -type Payloads struct { - db *badger.DB - index *Index - guarantees *Guarantees - seals *Seals - receipts *ExecutionReceipts - results *ExecutionResults -} - -func NewPayloads(db *badger.DB, index *Index, guarantees *Guarantees, seals *Seals, receipts *ExecutionReceipts, - results *ExecutionResults) *Payloads { - - p := &Payloads{ - db: db, - index: index, - guarantees: guarantees, - seals: seals, - receipts: receipts, - results: results, - } - - return p -} - -func (p *Payloads) storeTx(blockID flow.Identifier, payload *flow.Payload) func(*transaction.Tx) error { - // For correct payloads, the execution result is part of the payload or it's already stored - // in storage. If execution result is not present in either of those places, we error. - // ATTENTION: this is unnecessarily complex if we have execution receipt which points an execution result - // which is not included in current payload but was incorporated in one of previous blocks. - - return func(tx *transaction.Tx) error { - - resultsByID := payload.Results.Lookup() - fullReceipts := make([]*flow.ExecutionReceipt, 0, len(payload.Receipts)) - var err error - for _, meta := range payload.Receipts { - result, ok := resultsByID[meta.ResultID] - if !ok { - result, err = p.results.ByIDTx(meta.ResultID)(tx) - if err != nil { - if errors.Is(err, storage.ErrNotFound) { - err = fmt.Errorf("invalid payload referencing unknown execution result %v, err: %w", meta.ResultID, err) - } - return err - } - } - fullReceipts = append(fullReceipts, flow.ExecutionReceiptFromMeta(*meta, *result)) - } - - // make sure all payload guarantees are stored - for _, guarantee := range payload.Guarantees { - err := p.guarantees.storeTx(guarantee)(tx) - if err != nil { - return fmt.Errorf("could not store guarantee: %w", err) - } - } - - // make sure all payload seals are stored - for _, seal := range payload.Seals { - err := p.seals.storeTx(seal)(tx) - if err != nil { - return fmt.Errorf("could not store seal: %w", err) - } - } - - // store all payload receipts - for _, receipt := range fullReceipts { - err := p.receipts.storeTx(receipt)(tx) - if err != nil { - return fmt.Errorf("could not store receipt: %w", err) - } - } - - // store the index - err = p.index.storeTx(blockID, payload.Index())(tx) - if err != nil { - return fmt.Errorf("could not store index: %w", err) - } - - return nil - } -} - -func (p *Payloads) retrieveTx(blockID flow.Identifier) func(tx *badger.Txn) (*flow.Payload, error) { - return func(tx *badger.Txn) (*flow.Payload, error) { - - // retrieve the index - idx, err := p.index.retrieveTx(blockID)(tx) - if err != nil { - return nil, fmt.Errorf("could not retrieve index: %w", err) - } - - // retrieve guarantees - guarantees := make([]*flow.CollectionGuarantee, 0, len(idx.CollectionIDs)) - for _, collID := range idx.CollectionIDs { - guarantee, err := p.guarantees.retrieveTx(collID)(tx) - if err != nil { - return nil, fmt.Errorf("could not retrieve guarantee (%x): %w", collID, err) - } - guarantees = append(guarantees, guarantee) - } - - // retrieve seals - seals := make([]*flow.Seal, 0, len(idx.SealIDs)) - for _, sealID := range idx.SealIDs { - seal, err := p.seals.retrieveTx(sealID)(tx) - if err != nil { - return nil, fmt.Errorf("could not retrieve seal (%x): %w", sealID, err) - } - seals = append(seals, seal) - } - - // retrieve receipts - receipts := make([]*flow.ExecutionReceiptMeta, 0, len(idx.ReceiptIDs)) - for _, recID := range idx.ReceiptIDs { - receipt, err := p.receipts.byID(recID)(tx) - if err != nil { - return nil, fmt.Errorf("could not retrieve receipt %x: %w", recID, err) - } - receipts = append(receipts, receipt.Meta()) - } - - // retrieve results - results := make([]*flow.ExecutionResult, 0, len(idx.ResultIDs)) - for _, resID := range idx.ResultIDs { - result, err := p.results.byID(resID)(tx) - if err != nil { - return nil, fmt.Errorf("could not retrieve result %x: %w", resID, err) - } - results = append(results, result) - } - payload := &flow.Payload{ - Seals: seals, - Guarantees: guarantees, - Receipts: receipts, - Results: results, - } - - return payload, nil - } -} - -func (p *Payloads) Store(blockID flow.Identifier, payload *flow.Payload) error { - return operation.RetryOnConflictTx(p.db, transaction.Update, p.storeTx(blockID, payload)) -} - -func (p *Payloads) ByBlockID(blockID flow.Identifier) (*flow.Payload, error) { - tx := p.db.NewTransaction(false) - defer tx.Discard() - return p.retrieveTx(blockID)(tx) -} diff --git a/storage/badger/payloads_test.go b/storage/badger/payloads_test.go deleted file mode 100644 index cb11074f88b..00000000000 --- a/storage/badger/payloads_test.go +++ /dev/null @@ -1,59 +0,0 @@ -package badger_test - -import ( - "errors" - - "testing" - - "github.com/dgraph-io/badger/v2" - "github.com/stretchr/testify/require" - - "github.com/onflow/flow-go/module/metrics" - "github.com/onflow/flow-go/storage" - "github.com/onflow/flow-go/utils/unittest" - - badgerstorage "github.com/onflow/flow-go/storage/badger" -) - -func TestPayloadStoreRetrieve(t *testing.T) { - unittest.RunWithBadgerDB(t, func(db *badger.DB) { - metrics := metrics.NewNoopCollector() - - index := badgerstorage.NewIndex(metrics, db) - seals := badgerstorage.NewSeals(metrics, db) - guarantees := badgerstorage.NewGuarantees(metrics, db, badgerstorage.DefaultCacheSize) - results := badgerstorage.NewExecutionResults(metrics, db) - receipts := badgerstorage.NewExecutionReceipts(metrics, db, results, badgerstorage.DefaultCacheSize) - store := badgerstorage.NewPayloads(db, index, guarantees, seals, receipts, results) - - blockID := unittest.IdentifierFixture() - expected := unittest.PayloadFixture(unittest.WithAllTheFixins) - - // store payload - err := store.Store(blockID, &expected) - require.NoError(t, err) - - // fetch payload - payload, err := store.ByBlockID(blockID) - require.NoError(t, err) - require.Equal(t, &expected, payload) - }) -} - -func TestPayloadRetreiveWithoutStore(t *testing.T) { - unittest.RunWithBadgerDB(t, func(db *badger.DB) { - metrics := metrics.NewNoopCollector() - - index := badgerstorage.NewIndex(metrics, db) - seals := badgerstorage.NewSeals(metrics, db) - guarantees := badgerstorage.NewGuarantees(metrics, db, badgerstorage.DefaultCacheSize) - results := badgerstorage.NewExecutionResults(metrics, db) - receipts := badgerstorage.NewExecutionReceipts(metrics, db, results, badgerstorage.DefaultCacheSize) - store := badgerstorage.NewPayloads(db, index, guarantees, seals, receipts, results) - - blockID := unittest.IdentifierFixture() - - _, err := store.ByBlockID(blockID) - require.True(t, errors.Is(err, storage.ErrNotFound)) - }) -} diff --git a/storage/badger/procedure/children.go b/storage/badger/procedure/children.go deleted file mode 100644 index e95412f6403..00000000000 --- a/storage/badger/procedure/children.go +++ /dev/null @@ -1,82 +0,0 @@ -package procedure - -import ( - "errors" - "fmt" - - "github.com/dgraph-io/badger/v2" - - "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/storage" - "github.com/onflow/flow-go/storage/badger/operation" -) - -// IndexNewBlock will add parent-child index for the new block. -// - Each block has a parent, we use this parent-child relationship to build a reverse index -// - for looking up children blocks for a given block. This is useful for forks recovery -// where we want to find all the pending children blocks for the lastest finalized block. -// -// When adding parent-child index for a new block, we will add two indexes: -// 1. since it's a new block, the new block should have no child, so adding an empty -// index for the new block. Note: It's impossible there is a block whose parent is the -// new block. -// 2. since the parent block has this new block as a child, adding an index for that. -// there are two special cases for (2): -// - if the parent block is zero, then we don't need to add this index. -// - if the parent block doesn't exist, then we will insert the child index instead of updating -func IndexNewBlock(blockID flow.Identifier, parentID flow.Identifier) func(*badger.Txn) error { - return func(tx *badger.Txn) error { - // Step 1: index the child for the new block. - // the new block has no child, so adding an empty child index for it - err := operation.InsertBlockChildren(blockID, nil)(tx) - if err != nil { - return fmt.Errorf("could not insert empty block children: %w", err) - } - - // Step 2: adding the second index for the parent block - // if the parent block is zero, for instance root block has no parent, - // then no need to add index for it - if parentID == flow.ZeroID { - return nil - } - - // if the parent block is not zero, depending on whether the parent block has - // children or not, we will either update the index or insert the index: - // when parent block doesn't exist, we will insert the block children. - // when parent block exists already, we will update the block children, - var childrenIDs flow.IdentifierList - err = operation.RetrieveBlockChildren(parentID, &childrenIDs)(tx) - - var saveIndex func(blockID flow.Identifier, childrenIDs flow.IdentifierList) func(*badger.Txn) error - if errors.Is(err, storage.ErrNotFound) { - saveIndex = operation.InsertBlockChildren - } else if err != nil { - return fmt.Errorf("could not look up block children: %w", err) - } else { // err == nil - saveIndex = operation.UpdateBlockChildren - } - - // check we don't add a duplicate - for _, dupID := range childrenIDs { - if blockID == dupID { - return storage.ErrAlreadyExists - } - } - - // adding the new block to be another child of the parent - childrenIDs = append(childrenIDs, blockID) - - // saving the index - err = saveIndex(parentID, childrenIDs)(tx) - if err != nil { - return fmt.Errorf("could not update children index: %w", err) - } - - return nil - } -} - -// LookupBlockChildren looks up the IDs of all child blocks of the given parent block. -func LookupBlockChildren(blockID flow.Identifier, childrenIDs *flow.IdentifierList) func(tx *badger.Txn) error { - return operation.RetrieveBlockChildren(blockID, childrenIDs) -} diff --git a/storage/badger/procedure/children_test.go b/storage/badger/procedure/children_test.go deleted file mode 100644 index 9cf6a71773f..00000000000 --- a/storage/badger/procedure/children_test.go +++ /dev/null @@ -1,115 +0,0 @@ -package procedure_test - -import ( - "errors" - "testing" - - "github.com/dgraph-io/badger/v2" - "github.com/stretchr/testify/require" - - "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/storage" - "github.com/onflow/flow-go/storage/badger/procedure" - "github.com/onflow/flow-go/utils/unittest" -) - -// after indexing a block by its parent, it should be able to retrieve the child block by the parentID -func TestIndexAndLookupChild(t *testing.T) { - unittest.RunWithBadgerDB(t, func(db *badger.DB) { - - parentID := unittest.IdentifierFixture() - childID := unittest.IdentifierFixture() - - err := db.Update(procedure.IndexNewBlock(childID, parentID)) - require.NoError(t, err) - - // retrieve child - var retrievedIDs flow.IdentifierList - err = db.View(procedure.LookupBlockChildren(parentID, &retrievedIDs)) - require.NoError(t, err) - - // retrieved child should be the stored child - require.Equal(t, flow.IdentifierList{childID}, retrievedIDs) - }) -} - -// if two blocks connect to the same parent, indexing the second block would have -// no effect, retrieving the child of the parent block will return the first block that -// was indexed. -func TestIndexTwiceAndRetrieve(t *testing.T) { - unittest.RunWithBadgerDB(t, func(db *badger.DB) { - - parentID := unittest.IdentifierFixture() - child1ID := unittest.IdentifierFixture() - child2ID := unittest.IdentifierFixture() - - // index the first child - err := db.Update(procedure.IndexNewBlock(child1ID, parentID)) - require.NoError(t, err) - - // index the second child - err = db.Update(procedure.IndexNewBlock(child2ID, parentID)) - require.NoError(t, err) - - var retrievedIDs flow.IdentifierList - err = db.View(procedure.LookupBlockChildren(parentID, &retrievedIDs)) - require.NoError(t, err) - - require.Equal(t, flow.IdentifierList{child1ID, child2ID}, retrievedIDs) - }) -} - -// if parent is zero, then we don't index it -func TestIndexZeroParent(t *testing.T) { - unittest.RunWithBadgerDB(t, func(db *badger.DB) { - - childID := unittest.IdentifierFixture() - - err := db.Update(procedure.IndexNewBlock(childID, flow.ZeroID)) - require.NoError(t, err) - - // zero id should have no children - var retrievedIDs flow.IdentifierList - err = db.View(procedure.LookupBlockChildren(flow.ZeroID, &retrievedIDs)) - require.True(t, errors.Is(err, storage.ErrNotFound)) - }) -} - -// lookup block children will only return direct childrens -func TestDirectChildren(t *testing.T) { - unittest.RunWithBadgerDB(t, func(db *badger.DB) { - - b1 := unittest.IdentifierFixture() - b2 := unittest.IdentifierFixture() - b3 := unittest.IdentifierFixture() - b4 := unittest.IdentifierFixture() - - err := db.Update(procedure.IndexNewBlock(b2, b1)) - require.NoError(t, err) - - err = db.Update(procedure.IndexNewBlock(b3, b2)) - require.NoError(t, err) - - err = db.Update(procedure.IndexNewBlock(b4, b3)) - require.NoError(t, err) - - // check the children of the first block - var retrievedIDs flow.IdentifierList - - err = db.View(procedure.LookupBlockChildren(b1, &retrievedIDs)) - require.NoError(t, err) - require.Equal(t, flow.IdentifierList{b2}, retrievedIDs) - - err = db.View(procedure.LookupBlockChildren(b2, &retrievedIDs)) - require.NoError(t, err) - require.Equal(t, flow.IdentifierList{b3}, retrievedIDs) - - err = db.View(procedure.LookupBlockChildren(b3, &retrievedIDs)) - require.NoError(t, err) - require.Equal(t, flow.IdentifierList{b4}, retrievedIDs) - - err = db.View(procedure.LookupBlockChildren(b4, &retrievedIDs)) - require.NoError(t, err) - require.Nil(t, retrievedIDs) - }) -} diff --git a/storage/badger/procedure/cluster.go b/storage/badger/procedure/cluster.go deleted file mode 100644 index f51c8597938..00000000000 --- a/storage/badger/procedure/cluster.go +++ /dev/null @@ -1,225 +0,0 @@ -package procedure - -import ( - "fmt" - - "github.com/dgraph-io/badger/v2" - - "github.com/onflow/flow-go/model/cluster" - "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/storage/badger/operation" -) - -// This file implements storage functions for blocks in cluster consensus. - -// InsertClusterBlock inserts a cluster consensus block, updating all -// associated indexes. -func InsertClusterBlock(block *cluster.Block) func(*badger.Txn) error { - return func(tx *badger.Txn) error { - - // check payload integrity - if block.Header.PayloadHash != block.Payload.Hash() { - return fmt.Errorf("computed payload hash does not match header") - } - - // store the block header - blockID := block.ID() - err := operation.InsertHeader(blockID, block.Header)(tx) - if err != nil { - return fmt.Errorf("could not insert header: %w", err) - } - - // insert the block payload - err = InsertClusterPayload(blockID, block.Payload)(tx) - if err != nil { - return fmt.Errorf("could not insert payload: %w", err) - } - - // index the child block for recovery - err = IndexNewBlock(blockID, block.Header.ParentID)(tx) - if err != nil { - return fmt.Errorf("could not index new block: %w", err) - } - return nil - } -} - -// RetrieveClusterBlock retrieves a cluster consensus block by block ID. -func RetrieveClusterBlock(blockID flow.Identifier, block *cluster.Block) func(*badger.Txn) error { - return func(tx *badger.Txn) error { - - // retrieve the block header - var header flow.Header - err := operation.RetrieveHeader(blockID, &header)(tx) - if err != nil { - return fmt.Errorf("could not retrieve header: %w", err) - } - - // retrieve payload - var payload cluster.Payload - err = RetrieveClusterPayload(blockID, &payload)(tx) - if err != nil { - return fmt.Errorf("could not retrieve payload: %w", err) - } - - // overwrite block - *block = cluster.Block{ - Header: &header, - Payload: &payload, - } - - return nil - } -} - -// RetrieveLatestFinalizedClusterHeader retrieves the latest finalized for the -// given cluster chain ID. -func RetrieveLatestFinalizedClusterHeader(chainID flow.ChainID, final *flow.Header) func(tx *badger.Txn) error { - return func(tx *badger.Txn) error { - var boundary uint64 - err := operation.RetrieveClusterFinalizedHeight(chainID, &boundary)(tx) - if err != nil { - return fmt.Errorf("could not retrieve boundary: %w", err) - } - - var finalID flow.Identifier - err = operation.LookupClusterBlockHeight(chainID, boundary, &finalID)(tx) - if err != nil { - return fmt.Errorf("could not retrieve final ID: %w", err) - } - - err = operation.RetrieveHeader(finalID, final)(tx) - if err != nil { - return fmt.Errorf("could not retrieve finalized header: %w", err) - } - - return nil - } -} - -// FinalizeClusterBlock finalizes a block in cluster consensus. -func FinalizeClusterBlock(blockID flow.Identifier) func(*badger.Txn) error { - return func(tx *badger.Txn) error { - - // retrieve the header to check the parent - var header flow.Header - err := operation.RetrieveHeader(blockID, &header)(tx) - if err != nil { - return fmt.Errorf("could not retrieve header: %w", err) - } - - // get the chain ID, which determines which cluster state to query - chainID := header.ChainID - - // retrieve the current finalized state boundary - var boundary uint64 - err = operation.RetrieveClusterFinalizedHeight(chainID, &boundary)(tx) - if err != nil { - return fmt.Errorf("could not retrieve boundary: %w", err) - } - - // retrieve the ID of the boundary head - var headID flow.Identifier - err = operation.LookupClusterBlockHeight(chainID, boundary, &headID)(tx) - if err != nil { - return fmt.Errorf("could not retrieve head: %w", err) - } - - // check that the head ID is the parent of the block we finalize - if header.ParentID != headID { - return fmt.Errorf("can't finalize non-child of chain head") - } - - // insert block view -> ID mapping - err = operation.IndexClusterBlockHeight(chainID, header.Height, header.ID())(tx) - if err != nil { - return fmt.Errorf("could not insert view->ID mapping: %w", err) - } - - // update the finalized boundary - err = operation.UpdateClusterFinalizedHeight(chainID, header.Height)(tx) - if err != nil { - return fmt.Errorf("could not update finalized boundary: %w", err) - } - - // NOTE: we don't want to prune forks that have become invalid here, so - // that we can keep validating entities and generating slashing - // challenges for some time - the pruning should happen some place else - // after a certain delay of blocks - - return nil - } -} - -// InsertClusterPayload inserts the payload for a cluster block. It inserts -// both the collection and all constituent transactions, allowing duplicates. -func InsertClusterPayload(blockID flow.Identifier, payload *cluster.Payload) func(*badger.Txn) error { - return func(tx *badger.Txn) error { - - // cluster payloads only contain a single collection, allow duplicates, - // because it is valid for two competing forks to have the same payload. - light := payload.Collection.Light() - err := operation.SkipDuplicates(operation.InsertCollection(&light))(tx) - if err != nil { - return fmt.Errorf("could not insert payload collection: %w", err) - } - - // insert constituent transactions - for _, colTx := range payload.Collection.Transactions { - err = operation.SkipDuplicates(operation.InsertTransaction(colTx.ID(), colTx))(tx) - if err != nil { - return fmt.Errorf("could not insert payload transaction: %w", err) - } - } - - // index the transaction IDs within the collection - txIDs := payload.Collection.Light().Transactions - err = operation.SkipDuplicates(operation.IndexCollectionPayload(blockID, txIDs))(tx) - if err != nil { - return fmt.Errorf("could not index collection: %w", err) - } - - // insert the reference block ID - err = operation.IndexReferenceBlockByClusterBlock(blockID, payload.ReferenceBlockID)(tx) - if err != nil { - return fmt.Errorf("could not insert reference block ID: %w", err) - } - - return nil - } -} - -// RetrieveClusterPayload retrieves a cluster consensus block payload by block ID. -func RetrieveClusterPayload(blockID flow.Identifier, payload *cluster.Payload) func(*badger.Txn) error { - return func(tx *badger.Txn) error { - - // lookup the reference block ID - var refID flow.Identifier - err := operation.LookupReferenceBlockByClusterBlock(blockID, &refID)(tx) - if err != nil { - return fmt.Errorf("could not retrieve reference block ID: %w", err) - } - - // lookup collection transaction IDs - var txIDs []flow.Identifier - err = operation.LookupCollectionPayload(blockID, &txIDs)(tx) - if err != nil { - return fmt.Errorf("could not look up collection payload: %w", err) - } - - colTransactions := make([]*flow.TransactionBody, 0, len(txIDs)) - // retrieve individual transactions - for _, txID := range txIDs { - var nextTx flow.TransactionBody - err = operation.RetrieveTransaction(txID, &nextTx)(tx) - if err != nil { - return fmt.Errorf("could not retrieve transaction: %w", err) - } - colTransactions = append(colTransactions, &nextTx) - } - - *payload = cluster.PayloadFromTransactions(refID, colTransactions...) - - return nil - } -} diff --git a/storage/badger/procedure/cluster_test.go b/storage/badger/procedure/cluster_test.go deleted file mode 100644 index 325c7919454..00000000000 --- a/storage/badger/procedure/cluster_test.go +++ /dev/null @@ -1,58 +0,0 @@ -package procedure - -import ( - "testing" - - "github.com/dgraph-io/badger/v2" - "github.com/stretchr/testify/require" - - "github.com/onflow/flow-go/model/cluster" - "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/storage/badger/operation" - "github.com/onflow/flow-go/utils/unittest" -) - -func TestInsertRetrieveClusterBlock(t *testing.T) { - unittest.RunWithBadgerDB(t, func(db *badger.DB) { - block := unittest.ClusterBlockFixture() - - err := db.Update(InsertClusterBlock(&block)) - require.NoError(t, err) - - var retrieved cluster.Block - err = db.View(RetrieveClusterBlock(block.Header.ID(), &retrieved)) - require.NoError(t, err) - - require.Equal(t, block, retrieved) - }) -} - -func TestFinalizeClusterBlock(t *testing.T) { - unittest.RunWithBadgerDB(t, func(db *badger.DB) { - parent := unittest.ClusterBlockFixture() - - block := unittest.ClusterBlockWithParent(&parent) - - err := db.Update(InsertClusterBlock(&block)) - require.NoError(t, err) - - err = db.Update(operation.IndexClusterBlockHeight(block.Header.ChainID, parent.Header.Height, parent.ID())) - require.NoError(t, err) - - err = db.Update(operation.InsertClusterFinalizedHeight(block.Header.ChainID, parent.Header.Height)) - require.NoError(t, err) - - err = db.Update(FinalizeClusterBlock(block.Header.ID())) - require.NoError(t, err) - - var boundary uint64 - err = db.View(operation.RetrieveClusterFinalizedHeight(block.Header.ChainID, &boundary)) - require.NoError(t, err) - require.Equal(t, block.Header.Height, boundary) - - var headID flow.Identifier - err = db.View(operation.LookupClusterBlockHeight(block.Header.ChainID, boundary, &headID)) - require.NoError(t, err) - require.Equal(t, block.ID(), headID) - }) -} diff --git a/storage/badger/procedure/executed.go b/storage/badger/procedure/executed.go deleted file mode 100644 index eb6a094f638..00000000000 --- a/storage/badger/procedure/executed.go +++ /dev/null @@ -1,63 +0,0 @@ -package procedure - -import ( - "errors" - "fmt" - - "github.com/dgraph-io/badger/v2" - - "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/storage" - "github.com/onflow/flow-go/storage/badger/operation" -) - -// UpdateHighestExecutedBlockIfHigher updates the latest executed block to be the input block -// if the input block has a greater height than the currently stored latest executed block. -// The executed block index must have been initialized before calling this function. -// Returns storage.ErrNotFound if the input block does not exist in storage. -func UpdateHighestExecutedBlockIfHigher(header *flow.Header) func(txn *badger.Txn) error { - return func(txn *badger.Txn) error { - var blockID flow.Identifier - err := operation.RetrieveExecutedBlock(&blockID)(txn) - if err != nil { - return fmt.Errorf("cannot lookup executed block: %w", err) - } - - var highest flow.Header - err = operation.RetrieveHeader(blockID, &highest)(txn) - if err != nil { - return fmt.Errorf("cannot retrieve executed header: %w", err) - } - - if header.Height <= highest.Height { - return nil - } - err = operation.UpdateExecutedBlock(header.ID())(txn) - if err != nil { - return fmt.Errorf("cannot update highest executed block: %w", err) - } - - return nil - } -} - -// GetHighestExecutedBlock retrieves the height and ID of the latest block executed by this node. -// Returns storage.ErrNotFound if no latest executed block has been stored. -func GetHighestExecutedBlock(height *uint64, blockID *flow.Identifier) func(tx *badger.Txn) error { - return func(tx *badger.Txn) error { - var highest flow.Header - err := operation.RetrieveExecutedBlock(blockID)(tx) - if err != nil { - return fmt.Errorf("could not lookup executed block %v: %w", blockID, err) - } - err = operation.RetrieveHeader(*blockID, &highest)(tx) - if err != nil { - if errors.Is(err, storage.ErrNotFound) { - return fmt.Errorf("unexpected: latest executed block does not exist in storage: %s", err.Error()) - } - return fmt.Errorf("could not retrieve executed header %v: %w", blockID, err) - } - *height = highest.Height - return nil - } -} diff --git a/storage/badger/procedure/executed_test.go b/storage/badger/procedure/executed_test.go deleted file mode 100644 index ba776c17d97..00000000000 --- a/storage/badger/procedure/executed_test.go +++ /dev/null @@ -1,91 +0,0 @@ -package procedure - -import ( - "testing" - - "github.com/dgraph-io/badger/v2" - "github.com/stretchr/testify/require" - - "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/storage/badger/operation" - "github.com/onflow/flow-go/utils/unittest" -) - -func TestInsertExecuted(t *testing.T) { - chain, _, _ := unittest.ChainFixture(6) - unittest.RunWithBadgerDB(t, func(db *badger.DB) { - t.Run("setup and bootstrap", func(t *testing.T) { - for _, block := range chain { - require.NoError(t, db.Update(operation.InsertHeader(block.Header.ID(), block.Header))) - } - - root := chain[0].Header - require.NoError(t, - db.Update(operation.InsertExecutedBlock(root.ID())), - ) - - var height uint64 - var blockID flow.Identifier - require.NoError(t, - db.View(GetHighestExecutedBlock(&height, &blockID)), - ) - - require.Equal(t, root.ID(), blockID) - require.Equal(t, root.Height, height) - }) - - t.Run("insert and get", func(t *testing.T) { - header1 := chain[1].Header - require.NoError(t, - db.Update(UpdateHighestExecutedBlockIfHigher(header1)), - ) - - var height uint64 - var blockID flow.Identifier - require.NoError(t, - db.View(GetHighestExecutedBlock(&height, &blockID)), - ) - - require.Equal(t, header1.ID(), blockID) - require.Equal(t, header1.Height, height) - }) - - t.Run("insert more and get highest", func(t *testing.T) { - header2 := chain[2].Header - header3 := chain[3].Header - require.NoError(t, - db.Update(UpdateHighestExecutedBlockIfHigher(header2)), - ) - require.NoError(t, - db.Update(UpdateHighestExecutedBlockIfHigher(header3)), - ) - var height uint64 - var blockID flow.Identifier - require.NoError(t, - db.View(GetHighestExecutedBlock(&height, &blockID)), - ) - - require.Equal(t, header3.ID(), blockID) - require.Equal(t, header3.Height, height) - }) - - t.Run("insert lower height later and get highest", func(t *testing.T) { - header5 := chain[5].Header - header4 := chain[4].Header - require.NoError(t, - db.Update(UpdateHighestExecutedBlockIfHigher(header5)), - ) - require.NoError(t, - db.Update(UpdateHighestExecutedBlockIfHigher(header4)), - ) - var height uint64 - var blockID flow.Identifier - require.NoError(t, - db.View(GetHighestExecutedBlock(&height, &blockID)), - ) - - require.Equal(t, header5.ID(), blockID) - require.Equal(t, header5.Height, height) - }) - }) -} diff --git a/storage/badger/procedure/index.go b/storage/badger/procedure/index.go deleted file mode 100644 index 0b4e56c7fd2..00000000000 --- a/storage/badger/procedure/index.go +++ /dev/null @@ -1,65 +0,0 @@ -package procedure - -import ( - "fmt" - - "github.com/dgraph-io/badger/v2" - - "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/storage/badger/operation" -) - -func InsertIndex(blockID flow.Identifier, index *flow.Index) func(tx *badger.Txn) error { - return func(tx *badger.Txn) error { - err := operation.IndexPayloadGuarantees(blockID, index.CollectionIDs)(tx) - if err != nil { - return fmt.Errorf("could not store guarantee index: %w", err) - } - err = operation.IndexPayloadSeals(blockID, index.SealIDs)(tx) - if err != nil { - return fmt.Errorf("could not store seal index: %w", err) - } - err = operation.IndexPayloadReceipts(blockID, index.ReceiptIDs)(tx) - if err != nil { - return fmt.Errorf("could not store receipts index: %w", err) - } - err = operation.IndexPayloadResults(blockID, index.ResultIDs)(tx) - if err != nil { - return fmt.Errorf("could not store results index: %w", err) - } - return nil - } -} - -func RetrieveIndex(blockID flow.Identifier, index *flow.Index) func(tx *badger.Txn) error { - return func(tx *badger.Txn) error { - var collIDs []flow.Identifier - err := operation.LookupPayloadGuarantees(blockID, &collIDs)(tx) - if err != nil { - return fmt.Errorf("could not retrieve guarantee index: %w", err) - } - var sealIDs []flow.Identifier - err = operation.LookupPayloadSeals(blockID, &sealIDs)(tx) - if err != nil { - return fmt.Errorf("could not retrieve seal index: %w", err) - } - var receiptIDs []flow.Identifier - err = operation.LookupPayloadReceipts(blockID, &receiptIDs)(tx) - if err != nil { - return fmt.Errorf("could not retrieve receipts index: %w", err) - } - var resultsIDs []flow.Identifier - err = operation.LookupPayloadResults(blockID, &resultsIDs)(tx) - if err != nil { - return fmt.Errorf("could not retrieve receipts index: %w", err) - } - - *index = flow.Index{ - CollectionIDs: collIDs, - SealIDs: sealIDs, - ReceiptIDs: receiptIDs, - ResultIDs: resultsIDs, - } - return nil - } -} diff --git a/storage/badger/procedure/index_test.go b/storage/badger/procedure/index_test.go deleted file mode 100644 index 77a3c32bc9b..00000000000 --- a/storage/badger/procedure/index_test.go +++ /dev/null @@ -1,27 +0,0 @@ -package procedure - -import ( - "testing" - - "github.com/dgraph-io/badger/v2" - "github.com/stretchr/testify/require" - - "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/utils/unittest" -) - -func TestInsertRetrieveIndex(t *testing.T) { - unittest.RunWithBadgerDB(t, func(db *badger.DB) { - blockID := unittest.IdentifierFixture() - index := unittest.IndexFixture() - - err := db.Update(InsertIndex(blockID, index)) - require.NoError(t, err) - - var retrieved flow.Index - err = db.View(RetrieveIndex(blockID, &retrieved)) - require.NoError(t, err) - - require.Equal(t, index, &retrieved) - }) -} diff --git a/storage/badger/qcs.go b/storage/badger/qcs.go deleted file mode 100644 index 432a0f8dfd2..00000000000 --- a/storage/badger/qcs.go +++ /dev/null @@ -1,66 +0,0 @@ -package badger - -import ( - "github.com/dgraph-io/badger/v2" - - "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/module" - "github.com/onflow/flow-go/module/metrics" - "github.com/onflow/flow-go/storage" - "github.com/onflow/flow-go/storage/badger/operation" - "github.com/onflow/flow-go/storage/badger/transaction" -) - -// QuorumCertificates implements persistent storage for quorum certificates. -type QuorumCertificates struct { - db *badger.DB - cache *Cache -} - -var _ storage.QuorumCertificates = (*QuorumCertificates)(nil) - -// NewQuorumCertificates Creates QuorumCertificates instance which is a database of quorum certificates -// which supports storing, caching and retrieving by block ID. -func NewQuorumCertificates(collector module.CacheMetrics, db *badger.DB, cacheSize uint) *QuorumCertificates { - store := func(key interface{}, val interface{}) func(*transaction.Tx) error { - qc := val.(*flow.QuorumCertificate) - return transaction.WithTx(operation.InsertQuorumCertificate(qc)) - } - - retrieve := func(key interface{}) func(tx *badger.Txn) (interface{}, error) { - blockID := key.(flow.Identifier) - var qc flow.QuorumCertificate - return func(tx *badger.Txn) (interface{}, error) { - err := operation.RetrieveQuorumCertificate(blockID, &qc)(tx) - return &qc, err - } - } - - return &QuorumCertificates{ - db: db, - cache: newCache(collector, metrics.ResourceQC, - withLimit(cacheSize), - withStore(store), - withRetrieve(retrieve)), - } -} - -func (q *QuorumCertificates) StoreTx(qc *flow.QuorumCertificate) func(*transaction.Tx) error { - return q.cache.PutTx(qc.BlockID, qc) -} - -func (q *QuorumCertificates) ByBlockID(blockID flow.Identifier) (*flow.QuorumCertificate, error) { - tx := q.db.NewTransaction(false) - defer tx.Discard() - return q.retrieveTx(blockID)(tx) -} - -func (q *QuorumCertificates) retrieveTx(blockID flow.Identifier) func(*badger.Txn) (*flow.QuorumCertificate, error) { - return func(tx *badger.Txn) (*flow.QuorumCertificate, error) { - val, err := q.cache.Get(blockID)(tx) - if err != nil { - return nil, err - } - return val.(*flow.QuorumCertificate), nil - } -} diff --git a/storage/badger/qcs_test.go b/storage/badger/qcs_test.go deleted file mode 100644 index 51cb0bc8a86..00000000000 --- a/storage/badger/qcs_test.go +++ /dev/null @@ -1,70 +0,0 @@ -package badger_test - -import ( - "testing" - - "github.com/dgraph-io/badger/v2" - "github.com/stretchr/testify/require" - - "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/module/metrics" - "github.com/onflow/flow-go/storage" - bstorage "github.com/onflow/flow-go/storage/badger" - "github.com/onflow/flow-go/storage/badger/operation" - "github.com/onflow/flow-go/storage/badger/transaction" - "github.com/onflow/flow-go/utils/unittest" -) - -// TestQuorumCertificates_StoreTx tests storing and retrieving of QC. -func TestQuorumCertificates_StoreTx(t *testing.T) { - unittest.RunWithBadgerDB(t, func(db *badger.DB) { - metrics := metrics.NewNoopCollector() - store := bstorage.NewQuorumCertificates(metrics, db, 10) - qc := unittest.QuorumCertificateFixture() - - err := operation.RetryOnConflictTx(db, transaction.Update, store.StoreTx(qc)) - require.NoError(t, err) - - actual, err := store.ByBlockID(qc.BlockID) - require.NoError(t, err) - - require.Equal(t, qc, actual) - }) -} - -// TestQuorumCertificates_StoreTx_OtherQC checks if storing other QC for same blockID results in -// expected storage error and already stored value is not overwritten. -func TestQuorumCertificates_StoreTx_OtherQC(t *testing.T) { - unittest.RunWithBadgerDB(t, func(db *badger.DB) { - metrics := metrics.NewNoopCollector() - store := bstorage.NewQuorumCertificates(metrics, db, 10) - qc := unittest.QuorumCertificateFixture() - otherQC := unittest.QuorumCertificateFixture(func(otherQC *flow.QuorumCertificate) { - otherQC.View = qc.View - otherQC.BlockID = qc.BlockID - }) - - err := operation.RetryOnConflictTx(db, transaction.Update, store.StoreTx(qc)) - require.NoError(t, err) - - err = operation.RetryOnConflictTx(db, transaction.Update, store.StoreTx(otherQC)) - require.ErrorIs(t, err, storage.ErrAlreadyExists) - - actual, err := store.ByBlockID(otherQC.BlockID) - require.NoError(t, err) - - require.Equal(t, qc, actual) - }) -} - -// TestQuorumCertificates_ByBlockID that ByBlockID returns correct sentinel error if no QC for given block ID has been found -func TestQuorumCertificates_ByBlockID(t *testing.T) { - unittest.RunWithBadgerDB(t, func(db *badger.DB) { - metrics := metrics.NewNoopCollector() - store := bstorage.NewQuorumCertificates(metrics, db, 10) - - actual, err := store.ByBlockID(unittest.IdentifierFixture()) - require.ErrorIs(t, err, storage.ErrNotFound) - require.Nil(t, actual) - }) -} diff --git a/storage/badger/receipts.go b/storage/badger/receipts.go deleted file mode 100644 index fb4996b82d3..00000000000 --- a/storage/badger/receipts.go +++ /dev/null @@ -1,154 +0,0 @@ -package badger - -import ( - "errors" - "fmt" - - "github.com/dgraph-io/badger/v2" - - "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/module" - "github.com/onflow/flow-go/module/metrics" - "github.com/onflow/flow-go/storage" - "github.com/onflow/flow-go/storage/badger/operation" - "github.com/onflow/flow-go/storage/badger/transaction" -) - -// ExecutionReceipts implements storage for execution receipts. -type ExecutionReceipts struct { - db *badger.DB - results *ExecutionResults - cache *Cache -} - -// NewExecutionReceipts Creates ExecutionReceipts instance which is a database of receipts which -// supports storing and indexing receipts by receipt ID and block ID. -func NewExecutionReceipts(collector module.CacheMetrics, db *badger.DB, results *ExecutionResults, cacheSize uint) *ExecutionReceipts { - store := func(key interface{}, val interface{}) func(*transaction.Tx) error { - receipt := val.(*flow.ExecutionReceipt) - receiptID := receipt.ID() - - // assemble DB operations to store result (no execution) - storeResultOps := results.store(&receipt.ExecutionResult) - // assemble DB operations to index receipt (no execution) - storeReceiptOps := transaction.WithTx(operation.SkipDuplicates(operation.InsertExecutionReceiptMeta(receiptID, receipt.Meta()))) - // assemble DB operations to index receipt by the block it computes (no execution) - indexReceiptOps := transaction.WithTx(operation.SkipDuplicates( - operation.IndexExecutionReceipts(receipt.ExecutionResult.BlockID, receiptID), - )) - - return func(tx *transaction.Tx) error { - err := storeResultOps(tx) // execute operations to store results - if err != nil { - return fmt.Errorf("could not store result: %w", err) - } - err = storeReceiptOps(tx) // execute operations to store receipt-specific meta-data - if err != nil { - return fmt.Errorf("could not store receipt metadata: %w", err) - } - err = indexReceiptOps(tx) - if err != nil { - return fmt.Errorf("could not index receipt by the block it computes: %w", err) - } - return nil - } - } - - retrieve := func(key interface{}) func(tx *badger.Txn) (interface{}, error) { - receiptID := key.(flow.Identifier) - return func(tx *badger.Txn) (interface{}, error) { - var meta flow.ExecutionReceiptMeta - err := operation.RetrieveExecutionReceiptMeta(receiptID, &meta)(tx) - if err != nil { - return nil, fmt.Errorf("could not retrieve receipt meta: %w", err) - } - result, err := results.byID(meta.ResultID)(tx) - if err != nil { - return nil, fmt.Errorf("could not retrieve result: %w", err) - } - return flow.ExecutionReceiptFromMeta(meta, *result), nil - } - } - - return &ExecutionReceipts{ - db: db, - results: results, - cache: newCache(collector, metrics.ResourceReceipt, - withLimit(cacheSize), - withStore(store), - withRetrieve(retrieve)), - } -} - -// storeMyReceipt assembles the operations to store an arbitrary receipt. -func (r *ExecutionReceipts) storeTx(receipt *flow.ExecutionReceipt) func(*transaction.Tx) error { - return r.cache.PutTx(receipt.ID(), receipt) -} - -func (r *ExecutionReceipts) byID(receiptID flow.Identifier) func(*badger.Txn) (*flow.ExecutionReceipt, error) { - retrievalOps := r.cache.Get(receiptID) // assemble DB operations to retrieve receipt (no execution) - return func(tx *badger.Txn) (*flow.ExecutionReceipt, error) { - val, err := retrievalOps(tx) // execute operations to retrieve receipt - if err != nil { - return nil, err - } - return val.(*flow.ExecutionReceipt), nil - } -} - -func (r *ExecutionReceipts) byBlockID(blockID flow.Identifier) func(*badger.Txn) ([]*flow.ExecutionReceipt, error) { - return func(tx *badger.Txn) ([]*flow.ExecutionReceipt, error) { - var receiptIDs []flow.Identifier - err := operation.LookupExecutionReceipts(blockID, &receiptIDs)(tx) - if err != nil && !errors.Is(err, storage.ErrNotFound) { - return nil, fmt.Errorf("could not find receipt index for block: %w", err) - } - - var receipts []*flow.ExecutionReceipt - for _, id := range receiptIDs { - receipt, err := r.byID(id)(tx) - if err != nil { - return nil, fmt.Errorf("could not find receipt with id %v: %w", id, err) - } - receipts = append(receipts, receipt) - } - return receipts, nil - } -} - -func (r *ExecutionReceipts) Store(receipt *flow.ExecutionReceipt) error { - return operation.RetryOnConflictTx(r.db, transaction.Update, r.storeTx(receipt)) -} - -func (r *ExecutionReceipts) BatchStore(receipt *flow.ExecutionReceipt, batch storage.BatchStorage) error { - writeBatch := batch.GetWriter() - - err := r.results.BatchStore(&receipt.ExecutionResult, batch) - if err != nil { - return fmt.Errorf("cannot batch store execution result inside execution receipt batch store: %w", err) - } - - err = operation.BatchInsertExecutionReceiptMeta(receipt.ID(), receipt.Meta())(writeBatch) - if err != nil { - return fmt.Errorf("cannot batch store execution meta inside execution receipt batch store: %w", err) - } - - err = operation.BatchIndexExecutionReceipts(receipt.ExecutionResult.BlockID, receipt.ID())(writeBatch) - if err != nil { - return fmt.Errorf("cannot batch index execution receipt inside execution receipt batch store: %w", err) - } - - return nil -} - -func (r *ExecutionReceipts) ByID(receiptID flow.Identifier) (*flow.ExecutionReceipt, error) { - tx := r.db.NewTransaction(false) - defer tx.Discard() - return r.byID(receiptID)(tx) -} - -func (r *ExecutionReceipts) ByBlockID(blockID flow.Identifier) (flow.ExecutionReceiptList, error) { - tx := r.db.NewTransaction(false) - defer tx.Discard() - return r.byBlockID(blockID)(tx) -} diff --git a/storage/badger/receipts_test.go b/storage/badger/receipts_test.go deleted file mode 100644 index 03b8420258e..00000000000 --- a/storage/badger/receipts_test.go +++ /dev/null @@ -1,146 +0,0 @@ -package badger_test - -import ( - "testing" - - "github.com/dgraph-io/badger/v2" - "github.com/stretchr/testify/require" - - "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/module/metrics" - bstorage "github.com/onflow/flow-go/storage/badger" - "github.com/onflow/flow-go/utils/unittest" -) - -func TestExecutionReceiptsStorage(t *testing.T) { - withStore := func(t *testing.T, f func(store *bstorage.ExecutionReceipts)) { - unittest.RunWithBadgerDB(t, func(db *badger.DB) { - metrics := metrics.NewNoopCollector() - results := bstorage.NewExecutionResults(metrics, db) - store := bstorage.NewExecutionReceipts(metrics, db, results, bstorage.DefaultCacheSize) - f(store) - }) - } - - t.Run("get empty", func(t *testing.T) { - withStore(t, func(store *bstorage.ExecutionReceipts) { - block := unittest.BlockFixture() - receipts, err := store.ByBlockID(block.ID()) - require.NoError(t, err) - require.Equal(t, 0, len(receipts)) - }) - }) - - t.Run("store one get one", func(t *testing.T) { - withStore(t, func(store *bstorage.ExecutionReceipts) { - block := unittest.BlockFixture() - receipt1 := unittest.ReceiptForBlockFixture(&block) - - err := store.Store(receipt1) - require.NoError(t, err) - - actual, err := store.ByID(receipt1.ID()) - require.NoError(t, err) - - require.Equal(t, receipt1, actual) - - receipts, err := store.ByBlockID(block.ID()) - require.NoError(t, err) - - require.Equal(t, flow.ExecutionReceiptList{receipt1}, receipts) - }) - }) - - t.Run("store two for the same block", func(t *testing.T) { - withStore(t, func(store *bstorage.ExecutionReceipts) { - block := unittest.BlockFixture() - - executor1 := unittest.IdentifierFixture() - executor2 := unittest.IdentifierFixture() - - receipt1 := unittest.ReceiptForBlockExecutorFixture(&block, executor1) - receipt2 := unittest.ReceiptForBlockExecutorFixture(&block, executor2) - - err := store.Store(receipt1) - require.NoError(t, err) - - err = store.Store(receipt2) - require.NoError(t, err) - - receipts, err := store.ByBlockID(block.ID()) - require.NoError(t, err) - - require.ElementsMatch(t, []*flow.ExecutionReceipt{receipt1, receipt2}, receipts) - }) - }) - - t.Run("store two for different blocks", func(t *testing.T) { - withStore(t, func(store *bstorage.ExecutionReceipts) { - block1 := unittest.BlockFixture() - block2 := unittest.BlockFixture() - - executor1 := unittest.IdentifierFixture() - executor2 := unittest.IdentifierFixture() - - receipt1 := unittest.ReceiptForBlockExecutorFixture(&block1, executor1) - receipt2 := unittest.ReceiptForBlockExecutorFixture(&block2, executor2) - - err := store.Store(receipt1) - require.NoError(t, err) - - err = store.Store(receipt2) - require.NoError(t, err) - - receipts1, err := store.ByBlockID(block1.ID()) - require.NoError(t, err) - - receipts2, err := store.ByBlockID(block2.ID()) - require.NoError(t, err) - - require.ElementsMatch(t, []*flow.ExecutionReceipt{receipt1}, receipts1) - require.ElementsMatch(t, []*flow.ExecutionReceipt{receipt2}, receipts2) - }) - }) - - t.Run("indexing duplicated receipts should be ok", func(t *testing.T) { - withStore(t, func(store *bstorage.ExecutionReceipts) { - block1 := unittest.BlockFixture() - - executor1 := unittest.IdentifierFixture() - receipt1 := unittest.ReceiptForBlockExecutorFixture(&block1, executor1) - - err := store.Store(receipt1) - require.NoError(t, err) - - err = store.Store(receipt1) - require.NoError(t, err) - - receipts, err := store.ByBlockID(block1.ID()) - require.NoError(t, err) - - require.ElementsMatch(t, []*flow.ExecutionReceipt{receipt1}, receipts) - }) - }) - - t.Run("indexing receipt from the same executor for same block should succeed", func(t *testing.T) { - withStore(t, func(store *bstorage.ExecutionReceipts) { - block1 := unittest.BlockFixture() - - executor1 := unittest.IdentifierFixture() - - receipt1 := unittest.ReceiptForBlockExecutorFixture(&block1, executor1) - receipt2 := unittest.ReceiptForBlockExecutorFixture(&block1, executor1) - - err := store.Store(receipt1) - require.NoError(t, err) - - err = store.Store(receipt2) - require.NoError(t, err) - - receipts, err := store.ByBlockID(block1.ID()) - require.NoError(t, err) - - require.ElementsMatch(t, []*flow.ExecutionReceipt{receipt1, receipt2}, receipts) - }) - }) -} diff --git a/storage/badger/results.go b/storage/badger/results.go deleted file mode 100644 index c6160f5dcb5..00000000000 --- a/storage/badger/results.go +++ /dev/null @@ -1,168 +0,0 @@ -package badger - -import ( - "errors" - "fmt" - - "github.com/dgraph-io/badger/v2" - - "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/module" - "github.com/onflow/flow-go/module/metrics" - "github.com/onflow/flow-go/storage" - "github.com/onflow/flow-go/storage/badger/operation" - "github.com/onflow/flow-go/storage/badger/transaction" -) - -// ExecutionResults implements persistent storage for execution results. -type ExecutionResults struct { - db *badger.DB - cache *Cache -} - -var _ storage.ExecutionResults = (*ExecutionResults)(nil) - -func NewExecutionResults(collector module.CacheMetrics, db *badger.DB) *ExecutionResults { - - store := func(key interface{}, val interface{}) func(*transaction.Tx) error { - result := val.(*flow.ExecutionResult) - return transaction.WithTx(operation.SkipDuplicates(operation.InsertExecutionResult(result))) - } - - retrieve := func(key interface{}) func(tx *badger.Txn) (interface{}, error) { - resultID := key.(flow.Identifier) - var result flow.ExecutionResult - return func(tx *badger.Txn) (interface{}, error) { - err := operation.RetrieveExecutionResult(resultID, &result)(tx) - return &result, err - } - } - - res := &ExecutionResults{ - db: db, - cache: newCache(collector, metrics.ResourceResult, - withLimit(flow.DefaultTransactionExpiry+100), - withStore(store), - withRetrieve(retrieve)), - } - - return res -} - -func (r *ExecutionResults) store(result *flow.ExecutionResult) func(*transaction.Tx) error { - return r.cache.PutTx(result.ID(), result) -} - -func (r *ExecutionResults) byID(resultID flow.Identifier) func(*badger.Txn) (*flow.ExecutionResult, error) { - return func(tx *badger.Txn) (*flow.ExecutionResult, error) { - val, err := r.cache.Get(resultID)(tx) - if err != nil { - return nil, err - } - return val.(*flow.ExecutionResult), nil - } -} - -func (r *ExecutionResults) byBlockID(blockID flow.Identifier) func(*badger.Txn) (*flow.ExecutionResult, error) { - return func(tx *badger.Txn) (*flow.ExecutionResult, error) { - var resultID flow.Identifier - err := operation.LookupExecutionResult(blockID, &resultID)(tx) - if err != nil { - return nil, fmt.Errorf("could not lookup execution result ID: %w", err) - } - return r.byID(resultID)(tx) - } -} - -func (r *ExecutionResults) index(blockID, resultID flow.Identifier, force bool) func(*transaction.Tx) error { - return func(tx *transaction.Tx) error { - err := transaction.WithTx(operation.IndexExecutionResult(blockID, resultID))(tx) - if err == nil { - return nil - } - - if !errors.Is(err, storage.ErrAlreadyExists) { - return err - } - - if force { - return transaction.WithTx(operation.ReindexExecutionResult(blockID, resultID))(tx) - } - - // when trying to index a result for a block, and there is already a result indexed for this block, - // double check if the indexed result is the same - var storedResultID flow.Identifier - err = transaction.WithTx(operation.LookupExecutionResult(blockID, &storedResultID))(tx) - if err != nil { - return fmt.Errorf("there is a result stored already, but cannot retrieve it: %w", err) - } - - if storedResultID != resultID { - return fmt.Errorf("storing result that is different from the already stored one for block: %v, storing result: %v, stored result: %v. %w", - blockID, resultID, storedResultID, storage.ErrDataMismatch) - } - - return nil - } -} - -func (r *ExecutionResults) Store(result *flow.ExecutionResult) error { - return operation.RetryOnConflictTx(r.db, transaction.Update, r.store(result)) -} - -func (r *ExecutionResults) BatchStore(result *flow.ExecutionResult, batch storage.BatchStorage) error { - writeBatch := batch.GetWriter() - return operation.BatchInsertExecutionResult(result)(writeBatch) -} - -func (r *ExecutionResults) BatchIndex(blockID flow.Identifier, resultID flow.Identifier, batch storage.BatchStorage) error { - writeBatch := batch.GetWriter() - return operation.BatchIndexExecutionResult(blockID, resultID)(writeBatch) -} - -func (r *ExecutionResults) ByID(resultID flow.Identifier) (*flow.ExecutionResult, error) { - tx := r.db.NewTransaction(false) - defer tx.Discard() - return r.byID(resultID)(tx) -} - -func (r *ExecutionResults) ByIDTx(resultID flow.Identifier) func(*transaction.Tx) (*flow.ExecutionResult, error) { - return func(tx *transaction.Tx) (*flow.ExecutionResult, error) { - result, err := r.byID(resultID)(tx.DBTxn) - return result, err - } -} - -func (r *ExecutionResults) Index(blockID flow.Identifier, resultID flow.Identifier) error { - err := operation.RetryOnConflictTx(r.db, transaction.Update, r.index(blockID, resultID, false)) - if err != nil { - return fmt.Errorf("could not index execution result: %w", err) - } - return nil -} - -func (r *ExecutionResults) ForceIndex(blockID flow.Identifier, resultID flow.Identifier) error { - err := operation.RetryOnConflictTx(r.db, transaction.Update, r.index(blockID, resultID, true)) - if err != nil { - return fmt.Errorf("could not index execution result: %w", err) - } - return nil -} - -func (r *ExecutionResults) ByBlockID(blockID flow.Identifier) (*flow.ExecutionResult, error) { - tx := r.db.NewTransaction(false) - defer tx.Discard() - return r.byBlockID(blockID)(tx) -} - -func (r *ExecutionResults) RemoveIndexByBlockID(blockID flow.Identifier) error { - return r.db.Update(operation.SkipNonExist(operation.RemoveExecutionResultIndex(blockID))) -} - -// BatchRemoveIndexByBlockID removes blockID-to-executionResultID index entries keyed by blockID in a provided batch. -// No errors are expected during normal operation, even if no entries are matched. -// If Badger unexpectedly fails to process the request, the error is wrapped in a generic error and returned. -func (r *ExecutionResults) BatchRemoveIndexByBlockID(blockID flow.Identifier, batch storage.BatchStorage) error { - writeBatch := batch.GetWriter() - return operation.BatchRemoveExecutionResultIndex(blockID)(writeBatch) -} diff --git a/storage/badger/results_test.go b/storage/badger/results_test.go deleted file mode 100644 index a23c8bf7232..00000000000 --- a/storage/badger/results_test.go +++ /dev/null @@ -1,137 +0,0 @@ -package badger_test - -import ( - "errors" - "testing" - - "github.com/dgraph-io/badger/v2" - "github.com/stretchr/testify/require" - - "github.com/onflow/flow-go/module/metrics" - "github.com/onflow/flow-go/storage" - bstorage "github.com/onflow/flow-go/storage/badger" - "github.com/onflow/flow-go/utils/unittest" -) - -func TestResultStoreAndRetrieve(t *testing.T) { - unittest.RunWithBadgerDB(t, func(db *badger.DB) { - metrics := metrics.NewNoopCollector() - store := bstorage.NewExecutionResults(metrics, db) - - result := unittest.ExecutionResultFixture() - blockID := unittest.IdentifierFixture() - err := store.Store(result) - require.NoError(t, err) - - err = store.Index(blockID, result.ID()) - require.NoError(t, err) - - actual, err := store.ByBlockID(blockID) - require.NoError(t, err) - - require.Equal(t, result, actual) - }) -} - -func TestResultStoreTwice(t *testing.T) { - unittest.RunWithBadgerDB(t, func(db *badger.DB) { - metrics := metrics.NewNoopCollector() - store := bstorage.NewExecutionResults(metrics, db) - - result := unittest.ExecutionResultFixture() - blockID := unittest.IdentifierFixture() - err := store.Store(result) - require.NoError(t, err) - - err = store.Index(blockID, result.ID()) - require.NoError(t, err) - - err = store.Store(result) - require.NoError(t, err) - - err = store.Index(blockID, result.ID()) - require.NoError(t, err) - }) -} - -func TestResultBatchStoreTwice(t *testing.T) { - unittest.RunWithBadgerDB(t, func(db *badger.DB) { - metrics := metrics.NewNoopCollector() - store := bstorage.NewExecutionResults(metrics, db) - - result := unittest.ExecutionResultFixture() - blockID := unittest.IdentifierFixture() - - batch := bstorage.NewBatch(db) - err := store.BatchStore(result, batch) - require.NoError(t, err) - - err = store.BatchIndex(blockID, result.ID(), batch) - require.NoError(t, err) - - require.NoError(t, batch.Flush()) - - batch = bstorage.NewBatch(db) - err = store.BatchStore(result, batch) - require.NoError(t, err) - - err = store.BatchIndex(blockID, result.ID(), batch) - require.NoError(t, err) - - require.NoError(t, batch.Flush()) - }) -} - -func TestResultStoreTwoDifferentResultsShouldFail(t *testing.T) { - unittest.RunWithBadgerDB(t, func(db *badger.DB) { - metrics := metrics.NewNoopCollector() - store := bstorage.NewExecutionResults(metrics, db) - - result1 := unittest.ExecutionResultFixture() - result2 := unittest.ExecutionResultFixture() - blockID := unittest.IdentifierFixture() - err := store.Store(result1) - require.NoError(t, err) - - err = store.Index(blockID, result1.ID()) - require.NoError(t, err) - - // we can store a different result, but we can't index - // a different result for that block, because it will mean - // one block has two different results. - err = store.Store(result2) - require.NoError(t, err) - - err = store.Index(blockID, result2.ID()) - require.Error(t, err) - require.True(t, errors.Is(err, storage.ErrDataMismatch)) - }) -} - -func TestResultStoreForceIndexOverridesMapping(t *testing.T) { - unittest.RunWithBadgerDB(t, func(db *badger.DB) { - metrics := metrics.NewNoopCollector() - store := bstorage.NewExecutionResults(metrics, db) - - result1 := unittest.ExecutionResultFixture() - result2 := unittest.ExecutionResultFixture() - blockID := unittest.IdentifierFixture() - err := store.Store(result1) - require.NoError(t, err) - err = store.Index(blockID, result1.ID()) - require.NoError(t, err) - - err = store.Store(result2) - require.NoError(t, err) - - // force index - err = store.ForceIndex(blockID, result2.ID()) - require.NoError(t, err) - - // retrieve index to make sure it points to second ER now - byBlockID, err := store.ByBlockID(blockID) - - require.Equal(t, result2, byBlockID) - require.NoError(t, err) - }) -} diff --git a/storage/badger/seals.go b/storage/badger/seals.go deleted file mode 100644 index aa68511ed7e..00000000000 --- a/storage/badger/seals.go +++ /dev/null @@ -1,97 +0,0 @@ -// (c) 2019 Dapper Labs - ALL RIGHTS RESERVED - -package badger - -import ( - "fmt" - - "github.com/dgraph-io/badger/v2" - - "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/module" - "github.com/onflow/flow-go/module/metrics" - "github.com/onflow/flow-go/storage/badger/operation" - "github.com/onflow/flow-go/storage/badger/transaction" -) - -type Seals struct { - db *badger.DB - cache *Cache -} - -func NewSeals(collector module.CacheMetrics, db *badger.DB) *Seals { - - store := func(key interface{}, val interface{}) func(*transaction.Tx) error { - sealID := key.(flow.Identifier) - seal := val.(*flow.Seal) - return transaction.WithTx(operation.SkipDuplicates(operation.InsertSeal(sealID, seal))) - } - - retrieve := func(key interface{}) func(*badger.Txn) (interface{}, error) { - sealID := key.(flow.Identifier) - var seal flow.Seal - return func(tx *badger.Txn) (interface{}, error) { - err := operation.RetrieveSeal(sealID, &seal)(tx) - return &seal, err - } - } - - s := &Seals{ - db: db, - cache: newCache(collector, metrics.ResourceSeal, - withLimit(flow.DefaultTransactionExpiry+100), - withStore(store), - withRetrieve(retrieve)), - } - - return s -} - -func (s *Seals) storeTx(seal *flow.Seal) func(*transaction.Tx) error { - return s.cache.PutTx(seal.ID(), seal) -} - -func (s *Seals) retrieveTx(sealID flow.Identifier) func(*badger.Txn) (*flow.Seal, error) { - return func(tx *badger.Txn) (*flow.Seal, error) { - val, err := s.cache.Get(sealID)(tx) - if err != nil { - return nil, err - } - return val.(*flow.Seal), err - } -} - -func (s *Seals) Store(seal *flow.Seal) error { - return operation.RetryOnConflictTx(s.db, transaction.Update, s.storeTx(seal)) -} - -func (s *Seals) ByID(sealID flow.Identifier) (*flow.Seal, error) { - tx := s.db.NewTransaction(false) - defer tx.Discard() - return s.retrieveTx(sealID)(tx) -} - -// HighestInFork retrieves the highest seal that was included in the -// fork up to (and including) blockID. This method should return a seal -// for any block known to the node. Returns storage.ErrNotFound if -// blockID is unknown. -func (s *Seals) HighestInFork(blockID flow.Identifier) (*flow.Seal, error) { - var sealID flow.Identifier - err := s.db.View(operation.LookupLatestSealAtBlock(blockID, &sealID)) - if err != nil { - return nil, fmt.Errorf("failed to retrieve seal for fork with head %x: %w", blockID, err) - } - return s.ByID(sealID) -} - -// FinalizedSealForBlock returns the seal for the given block, only if that seal -// has been included in a finalized block. -// Returns storage.ErrNotFound if the block is unknown or unsealed. -func (s *Seals) FinalizedSealForBlock(blockID flow.Identifier) (*flow.Seal, error) { - var sealID flow.Identifier - err := s.db.View(operation.LookupBySealedBlockID(blockID, &sealID)) - if err != nil { - return nil, fmt.Errorf("failed to retrieve seal for block %x: %w", blockID, err) - } - return s.ByID(sealID) -} diff --git a/storage/badger/seals_test.go b/storage/badger/seals_test.go deleted file mode 100644 index 5e700941c0b..00000000000 --- a/storage/badger/seals_test.go +++ /dev/null @@ -1,101 +0,0 @@ -package badger_test - -import ( - "errors" - "testing" - - "github.com/dgraph-io/badger/v2" - "github.com/stretchr/testify/require" - - "github.com/onflow/flow-go/module/metrics" - "github.com/onflow/flow-go/storage" - "github.com/onflow/flow-go/storage/badger/operation" - "github.com/onflow/flow-go/utils/unittest" - - badgerstorage "github.com/onflow/flow-go/storage/badger" -) - -func TestRetrieveWithoutStore(t *testing.T) { - unittest.RunWithBadgerDB(t, func(db *badger.DB) { - metrics := metrics.NewNoopCollector() - store := badgerstorage.NewSeals(metrics, db) - - _, err := store.ByID(unittest.IdentifierFixture()) - require.True(t, errors.Is(err, storage.ErrNotFound)) - - _, err = store.HighestInFork(unittest.IdentifierFixture()) - require.True(t, errors.Is(err, storage.ErrNotFound)) - }) -} - -// TestSealStoreRetrieve verifies that a seal can be stored and retrieved by its ID -func TestSealStoreRetrieve(t *testing.T) { - unittest.RunWithBadgerDB(t, func(db *badger.DB) { - metrics := metrics.NewNoopCollector() - store := badgerstorage.NewSeals(metrics, db) - - expected := unittest.Seal.Fixture() - // store seal - err := store.Store(expected) - require.NoError(t, err) - - // retrieve seal - seal, err := store.ByID(expected.ID()) - require.NoError(t, err) - require.Equal(t, expected, seal) - }) -} - -// TestSealIndexAndRetrieve verifies that: -// - for a block, we can store (aka index) the latest sealed block along this fork. -// -// Note: indexing the seal for a block is currently implemented only through a direct -// Badger operation. The Seals mempool only supports retrieving the latest sealed block. -func TestSealIndexAndRetrieve(t *testing.T) { - unittest.RunWithBadgerDB(t, func(db *badger.DB) { - metrics := metrics.NewNoopCollector() - store := badgerstorage.NewSeals(metrics, db) - - expectedSeal := unittest.Seal.Fixture() - blockID := unittest.IdentifierFixture() - - // store the seal first - err := store.Store(expectedSeal) - require.NoError(t, err) - - // index the seal ID for the heighest sealed block in this fork - err = operation.RetryOnConflict(db.Update, operation.IndexLatestSealAtBlock(blockID, expectedSeal.ID())) - require.NoError(t, err) - - // retrieve latest seal - seal, err := store.HighestInFork(blockID) - require.NoError(t, err) - require.Equal(t, expectedSeal, seal) - }) -} - -// TestSealedBlockIndexAndRetrieve checks after indexing a seal by a sealed block ID, it can be -// retrieved by the sealed block ID -func TestSealedBlockIndexAndRetrieve(t *testing.T) { - unittest.RunWithBadgerDB(t, func(db *badger.DB) { - metrics := metrics.NewNoopCollector() - store := badgerstorage.NewSeals(metrics, db) - - expectedSeal := unittest.Seal.Fixture() - blockID := unittest.IdentifierFixture() - expectedSeal.BlockID = blockID - - // store the seal first - err := store.Store(expectedSeal) - require.NoError(t, err) - - // index the seal ID for the highest sealed block in this fork - err = operation.RetryOnConflict(db.Update, operation.IndexFinalizedSealByBlockID(expectedSeal.BlockID, expectedSeal.ID())) - require.NoError(t, err) - - // retrieve latest seal - seal, err := store.FinalizedSealForBlock(blockID) - require.NoError(t, err) - require.Equal(t, expectedSeal, seal) - }) -} diff --git a/storage/badger/transaction/tx.go b/storage/badger/transaction/tx.go index 4235389ad6d..827f183d110 100644 --- a/storage/badger/transaction/tx.go +++ b/storage/badger/transaction/tx.go @@ -6,26 +6,68 @@ import ( ioutils "github.com/onflow/flow-go/utils/io" ) +// Tx wraps a badger transaction and includes and additional slice for callbacks. +// The callbacks are executed after the badger transaction completed _successfully_. +// DESIGN PATTERN +// - DBTxn should never be nil +// - at initialization, `callbacks` is empty +// - While business logic code operates on `DBTxn`, it can append additional callbacks +// via the `OnSucceed` method. This generally happens during the transaction execution. +// +// CAUTION: +// - Tx is stateful (calls to `OnSucceed` change its internal state). +// Therefore, Tx needs to be passed as pointer variable. +// - Do not instantiate Tx outside of this package. Instead, use `Update` or `View` +// functions. +// - Whether a transaction is considered to have succeeded depends only on the return value +// of the outermost function. For example, consider a chain of 3 functions: f3( f2( f1(x))) +// Lets assume f1 fails with an `storage.ErrAlreadyExists` sentinel, which f2 expects and +// therefore discards. f3 could then succeed, i.e. return nil. +// Consequently, the entire list of callbacks is executed, including f1's callback if it +// added one. Callback implementations therefore need to account for this edge case. +// - not concurrency safe type Tx struct { DBTxn *dbbadger.Txn callbacks []func() } -// OnSucceed adds a callback to execute after the batch has -// been successfully flushed. -// useful for implementing the cache where we will only cache -// after the batch has been successfully flushed +// OnSucceed adds a callback to execute after the batch has been successfully flushed. +// Useful for implementing the cache where we will only cache after the batch of database +// operations has been successfully applied. +// CAUTION: +// Whether a transaction is considered to have succeeded depends only on the return value +// of the outermost function. For example, consider a chain of 3 functions: f3( f2( f1(x))) +// Lets assume f1 fails with an `storage.ErrAlreadyExists` sentinel, which f2 expects and +// therefore discards. f3 could then succeed, i.e. return nil. +// Consequently, the entire list of callbacks is executed, including f1's callback if it +// added one. Callback implementations therefore need to account for this edge case. func (b *Tx) OnSucceed(callback func()) { b.callbacks = append(b.callbacks, callback) } -// Update creates a badger transaction, passing it to a chain of functions, -// if all succeed. Useful to use callback to update cache in order to ensure data -// in badgerDB and cache are consistent. +// Update creates a badger transaction, passing it to a chain of functions. +// Only if transaction succeeds, we run `callbacks` that were appended during the +// transaction execution. The callbacks are useful update caches in order to reduce +// cache misses. func Update(db *dbbadger.DB, f func(*Tx) error) error { dbTxn := db.NewTransaction(true) - defer dbTxn.Discard() + err := run(f, dbTxn) + dbTxn.Discard() + return err +} + +// View creates a read-only badger transaction, passing it to a chain of functions. +// Only if transaction succeeds, we run `callbacks` that were appended during the +// transaction execution. The callbacks are useful update caches in order to reduce +// cache misses. +func View(db *dbbadger.DB, f func(*Tx) error) error { + dbTxn := db.NewTransaction(false) + err := run(f, dbTxn) + dbTxn.Discard() + return err +} +func run(f func(*Tx) error, dbTxn *dbbadger.Txn) error { tx := &Tx{DBTxn: dbTxn} err := f(tx) if err != nil { @@ -43,6 +85,16 @@ func Update(db *dbbadger.DB, f func(*Tx) error) error { return nil } +// Fail returns an anonymous function, whose future execution returns the error e. This +// is useful for front-loading sanity checks. On the happy path (dominant), this function +// will generally not be used. However, if one of the front-loaded sanity checks fails, +// we include `transaction.Fail(e)` in place of the business logic handling the happy path. +func Fail(e error) func(*Tx) error { + return func(tx *Tx) error { + return e + } +} + // WithTx is useful when transaction is used without adding callback. func WithTx(f func(*dbbadger.Txn) error) func(*Tx) error { return func(tx *Tx) error { diff --git a/storage/badger/transaction_results.go b/storage/badger/transaction_results.go deleted file mode 100644 index 77cc103e8b5..00000000000 --- a/storage/badger/transaction_results.go +++ /dev/null @@ -1,245 +0,0 @@ -package badger - -import ( - "encoding/binary" - "encoding/hex" - "fmt" - - "github.com/dgraph-io/badger/v2" - - "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/module" - "github.com/onflow/flow-go/module/metrics" - "github.com/onflow/flow-go/storage" - "github.com/onflow/flow-go/storage/badger/operation" -) - -type TransactionResults struct { - db *badger.DB - cache *Cache - indexCache *Cache - blockCache *Cache -} - -func KeyFromBlockIDTransactionID(blockID flow.Identifier, txID flow.Identifier) string { - return fmt.Sprintf("%x%x", blockID, txID) -} - -func KeyFromBlockIDIndex(blockID flow.Identifier, txIndex uint32) string { - idData := make([]byte, 4) //uint32 fits into 4 bytes - binary.BigEndian.PutUint32(idData, txIndex) - return fmt.Sprintf("%x%x", blockID, idData) -} - -func KeyFromBlockID(blockID flow.Identifier) string { - return blockID.String() -} - -func KeyToBlockIDTransactionID(key string) (flow.Identifier, flow.Identifier, error) { - blockIDStr := key[:64] - txIDStr := key[64:] - blockID, err := flow.HexStringToIdentifier(blockIDStr) - if err != nil { - return flow.ZeroID, flow.ZeroID, fmt.Errorf("could not get block ID: %w", err) - } - - txID, err := flow.HexStringToIdentifier(txIDStr) - if err != nil { - return flow.ZeroID, flow.ZeroID, fmt.Errorf("could not get transaction id: %w", err) - } - - return blockID, txID, nil -} - -func KeyToBlockIDIndex(key string) (flow.Identifier, uint32, error) { - blockIDStr := key[:64] - indexStr := key[64:] - blockID, err := flow.HexStringToIdentifier(blockIDStr) - if err != nil { - return flow.ZeroID, 0, fmt.Errorf("could not get block ID: %w", err) - } - - txIndexBytes, err := hex.DecodeString(indexStr) - if err != nil { - return flow.ZeroID, 0, fmt.Errorf("could not get transaction index: %w", err) - } - if len(txIndexBytes) != 4 { - return flow.ZeroID, 0, fmt.Errorf("could not get transaction index - invalid length: %d", len(txIndexBytes)) - } - - txIndex := binary.BigEndian.Uint32(txIndexBytes) - - return blockID, txIndex, nil -} - -func KeyToBlockID(key string) (flow.Identifier, error) { - - blockID, err := flow.HexStringToIdentifier(key) - if err != nil { - return flow.ZeroID, fmt.Errorf("could not get block ID: %w", err) - } - - return blockID, err -} - -func NewTransactionResults(collector module.CacheMetrics, db *badger.DB, transactionResultsCacheSize uint) *TransactionResults { - retrieve := func(key interface{}) func(tx *badger.Txn) (interface{}, error) { - var txResult flow.TransactionResult - return func(tx *badger.Txn) (interface{}, error) { - - blockID, txID, err := KeyToBlockIDTransactionID(key.(string)) - if err != nil { - return nil, fmt.Errorf("could not convert key: %w", err) - } - - err = operation.RetrieveTransactionResult(blockID, txID, &txResult)(tx) - if err != nil { - return nil, handleError(err, flow.TransactionResult{}) - } - return txResult, nil - } - } - retrieveIndex := func(key interface{}) func(tx *badger.Txn) (interface{}, error) { - var txResult flow.TransactionResult - return func(tx *badger.Txn) (interface{}, error) { - - blockID, txIndex, err := KeyToBlockIDIndex(key.(string)) - if err != nil { - return nil, fmt.Errorf("could not convert index key: %w", err) - } - - err = operation.RetrieveTransactionResultByIndex(blockID, txIndex, &txResult)(tx) - if err != nil { - return nil, handleError(err, flow.TransactionResult{}) - } - return txResult, nil - } - } - retrieveForBlock := func(key interface{}) func(tx *badger.Txn) (interface{}, error) { - var txResults []flow.TransactionResult - return func(tx *badger.Txn) (interface{}, error) { - - blockID, err := KeyToBlockID(key.(string)) - if err != nil { - return nil, fmt.Errorf("could not convert index key: %w", err) - } - - err = operation.LookupTransactionResultsByBlockIDUsingIndex(blockID, &txResults)(tx) - if err != nil { - return nil, handleError(err, flow.TransactionResult{}) - } - return txResults, nil - } - } - return &TransactionResults{ - db: db, - cache: newCache(collector, metrics.ResourceTransactionResults, - withLimit(transactionResultsCacheSize), - withStore(noopStore), - withRetrieve(retrieve), - ), - indexCache: newCache(collector, metrics.ResourceTransactionResultIndices, - withLimit(transactionResultsCacheSize), - withStore(noopStore), - withRetrieve(retrieveIndex), - ), - blockCache: newCache(collector, metrics.ResourceTransactionResultIndices, - withLimit(transactionResultsCacheSize), - withStore(noopStore), - withRetrieve(retrieveForBlock), - ), - } -} - -// BatchStore will store the transaction results for the given block ID in a batch -func (tr *TransactionResults) BatchStore(blockID flow.Identifier, transactionResults []flow.TransactionResult, batch storage.BatchStorage) error { - writeBatch := batch.GetWriter() - - for i, result := range transactionResults { - err := operation.BatchInsertTransactionResult(blockID, &result)(writeBatch) - if err != nil { - return fmt.Errorf("cannot batch insert tx result: %w", err) - } - - err = operation.BatchIndexTransactionResult(blockID, uint32(i), &result)(writeBatch) - if err != nil { - return fmt.Errorf("cannot batch index tx result: %w", err) - } - } - - batch.OnSucceed(func() { - for i, result := range transactionResults { - key := KeyFromBlockIDTransactionID(blockID, result.TransactionID) - // cache for each transaction, so that it's faster to retrieve - tr.cache.Insert(key, result) - - index := uint32(i) - - keyIndex := KeyFromBlockIDIndex(blockID, index) - tr.indexCache.Insert(keyIndex, result) - } - - key := KeyFromBlockID(blockID) - tr.blockCache.Insert(key, transactionResults) - }) - return nil -} - -// ByBlockIDTransactionID returns the runtime transaction result for the given block ID and transaction ID -func (tr *TransactionResults) ByBlockIDTransactionID(blockID flow.Identifier, txID flow.Identifier) (*flow.TransactionResult, error) { - tx := tr.db.NewTransaction(false) - defer tx.Discard() - key := KeyFromBlockIDTransactionID(blockID, txID) - val, err := tr.cache.Get(key)(tx) - if err != nil { - return nil, err - } - transactionResult, ok := val.(flow.TransactionResult) - if !ok { - return nil, fmt.Errorf("could not convert transaction result: %w", err) - } - return &transactionResult, nil -} - -// ByBlockIDTransactionIndex returns the runtime transaction result for the given block ID and transaction index -func (tr *TransactionResults) ByBlockIDTransactionIndex(blockID flow.Identifier, txIndex uint32) (*flow.TransactionResult, error) { - tx := tr.db.NewTransaction(false) - defer tx.Discard() - key := KeyFromBlockIDIndex(blockID, txIndex) - val, err := tr.indexCache.Get(key)(tx) - if err != nil { - return nil, err - } - transactionResult, ok := val.(flow.TransactionResult) - if !ok { - return nil, fmt.Errorf("could not convert transaction result: %w", err) - } - return &transactionResult, nil -} - -// ByBlockID gets all transaction results for a block, ordered by transaction index -func (tr *TransactionResults) ByBlockID(blockID flow.Identifier) ([]flow.TransactionResult, error) { - tx := tr.db.NewTransaction(false) - defer tx.Discard() - key := KeyFromBlockID(blockID) - val, err := tr.blockCache.Get(key)(tx) - if err != nil { - return nil, err - } - transactionResults, ok := val.([]flow.TransactionResult) - if !ok { - return nil, fmt.Errorf("could not convert transaction result: %w", err) - } - return transactionResults, nil -} - -// RemoveByBlockID removes transaction results by block ID -func (tr *TransactionResults) RemoveByBlockID(blockID flow.Identifier) error { - return tr.db.Update(operation.RemoveTransactionResultsByBlockID(blockID)) -} - -// BatchRemoveByBlockID batch removes transaction results by block ID -func (tr *TransactionResults) BatchRemoveByBlockID(blockID flow.Identifier, batch storage.BatchStorage) error { - writeBatch := batch.GetWriter() - return tr.db.View(operation.BatchRemoveTransactionResultsByBlockID(blockID, writeBatch)) -} diff --git a/storage/badger/transaction_results_test.go b/storage/badger/transaction_results_test.go deleted file mode 100644 index 5ba30d74414..00000000000 --- a/storage/badger/transaction_results_test.go +++ /dev/null @@ -1,105 +0,0 @@ -package badger_test - -import ( - "fmt" - mathRand "math/rand" - "testing" - - "github.com/dgraph-io/badger/v2" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - "golang.org/x/exp/rand" - - "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/module/metrics" - "github.com/onflow/flow-go/storage" - "github.com/onflow/flow-go/utils/unittest" - - bstorage "github.com/onflow/flow-go/storage/badger" -) - -func TestBatchStoringTransactionResults(t *testing.T) { - unittest.RunWithBadgerDB(t, func(db *badger.DB) { - metrics := metrics.NewNoopCollector() - store := bstorage.NewTransactionResults(metrics, db, 1000) - - blockID := unittest.IdentifierFixture() - txResults := make([]flow.TransactionResult, 0) - for i := 0; i < 10; i++ { - txID := unittest.IdentifierFixture() - expected := flow.TransactionResult{ - TransactionID: txID, - ErrorMessage: fmt.Sprintf("a runtime error %d", i), - } - txResults = append(txResults, expected) - } - writeBatch := bstorage.NewBatch(db) - err := store.BatchStore(blockID, txResults, writeBatch) - require.NoError(t, err) - - err = writeBatch.Flush() - require.NoError(t, err) - - for _, txResult := range txResults { - actual, err := store.ByBlockIDTransactionID(blockID, txResult.TransactionID) - require.NoError(t, err) - assert.Equal(t, txResult, *actual) - } - - // test loading from database - newStore := bstorage.NewTransactionResults(metrics, db, 1000) - for _, txResult := range txResults { - actual, err := newStore.ByBlockIDTransactionID(blockID, txResult.TransactionID) - require.NoError(t, err) - assert.Equal(t, txResult, *actual) - } - - // check retrieving by index from both cache and db - for i := len(txResults) - 1; i >= 0; i-- { - actual, err := store.ByBlockIDTransactionIndex(blockID, uint32(i)) - require.NoError(t, err) - assert.Equal(t, txResults[i], *actual) - - actual, err = newStore.ByBlockIDTransactionIndex(blockID, uint32(i)) - require.NoError(t, err) - assert.Equal(t, txResults[i], *actual) - } - }) -} - -func TestReadingNotStoreTransaction(t *testing.T) { - unittest.RunWithBadgerDB(t, func(db *badger.DB) { - metrics := metrics.NewNoopCollector() - store := bstorage.NewTransactionResults(metrics, db, 1000) - - blockID := unittest.IdentifierFixture() - txID := unittest.IdentifierFixture() - txIndex := rand.Uint32() - - _, err := store.ByBlockIDTransactionID(blockID, txID) - assert.ErrorIs(t, err, storage.ErrNotFound) - - _, err = store.ByBlockIDTransactionIndex(blockID, txIndex) - assert.ErrorIs(t, err, storage.ErrNotFound) - }) -} - -func TestKeyConversion(t *testing.T) { - blockID := unittest.IdentifierFixture() - txID := unittest.IdentifierFixture() - key := bstorage.KeyFromBlockIDTransactionID(blockID, txID) - bID, tID, err := bstorage.KeyToBlockIDTransactionID(key) - require.NoError(t, err) - require.Equal(t, blockID, bID) - require.Equal(t, txID, tID) -} - -func TestIndexKeyConversion(t *testing.T) { - blockID := unittest.IdentifierFixture() - txIndex := mathRand.Uint32() - key := bstorage.KeyFromBlockIDIndex(blockID, txIndex) - bID, tID, err := bstorage.KeyToBlockIDIndex(key) - require.NoError(t, err) - require.Equal(t, blockID, bID) - require.Equal(t, txIndex, tID) -} diff --git a/storage/badger/transactions.go b/storage/badger/transactions.go deleted file mode 100644 index 97cd6e98293..00000000000 --- a/storage/badger/transactions.go +++ /dev/null @@ -1,71 +0,0 @@ -package badger - -import ( - "github.com/dgraph-io/badger/v2" - - "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/module" - "github.com/onflow/flow-go/module/metrics" - "github.com/onflow/flow-go/storage/badger/operation" - "github.com/onflow/flow-go/storage/badger/transaction" -) - -// Transactions ... -type Transactions struct { - db *badger.DB - cache *Cache -} - -// NewTransactions ... -func NewTransactions(cacheMetrics module.CacheMetrics, db *badger.DB) *Transactions { - store := func(key interface{}, val interface{}) func(*transaction.Tx) error { - txID := key.(flow.Identifier) - flowTx := val.(*flow.TransactionBody) - return transaction.WithTx(operation.SkipDuplicates(operation.InsertTransaction(txID, flowTx))) - } - - retrieve := func(key interface{}) func(tx *badger.Txn) (interface{}, error) { - txID := key.(flow.Identifier) - var flowTx flow.TransactionBody - return func(tx *badger.Txn) (interface{}, error) { - err := operation.RetrieveTransaction(txID, &flowTx)(tx) - return &flowTx, err - } - } - - t := &Transactions{ - db: db, - cache: newCache(cacheMetrics, metrics.ResourceTransaction, - withLimit(flow.DefaultTransactionExpiry+100), - withStore(store), - withRetrieve(retrieve)), - } - - return t -} - -// Store ... -func (t *Transactions) Store(flowTx *flow.TransactionBody) error { - return operation.RetryOnConflictTx(t.db, transaction.Update, t.storeTx(flowTx)) -} - -// ByID ... -func (t *Transactions) ByID(txID flow.Identifier) (*flow.TransactionBody, error) { - tx := t.db.NewTransaction(false) - defer tx.Discard() - return t.retrieveTx(txID)(tx) -} - -func (t *Transactions) storeTx(flowTx *flow.TransactionBody) func(*transaction.Tx) error { - return t.cache.PutTx(flowTx.ID(), flowTx) -} - -func (t *Transactions) retrieveTx(txID flow.Identifier) func(*badger.Txn) (*flow.TransactionBody, error) { - return func(tx *badger.Txn) (*flow.TransactionBody, error) { - val, err := t.cache.Get(txID)(tx) - if err != nil { - return nil, err - } - return val.(*flow.TransactionBody), err - } -} diff --git a/storage/badger/transactions_test.go b/storage/badger/transactions_test.go deleted file mode 100644 index 3b10a10dc5b..00000000000 --- a/storage/badger/transactions_test.go +++ /dev/null @@ -1,48 +0,0 @@ -package badger_test - -import ( - "errors" - "testing" - - "github.com/dgraph-io/badger/v2" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/onflow/flow-go/module/metrics" - "github.com/onflow/flow-go/storage" - "github.com/onflow/flow-go/utils/unittest" - - badgerstorage "github.com/onflow/flow-go/storage/badger" -) - -func TestTransactionStoreRetrieve(t *testing.T) { - unittest.RunWithBadgerDB(t, func(db *badger.DB) { - metrics := metrics.NewNoopCollector() - store := badgerstorage.NewTransactions(metrics, db) - - // store a transaction in db - expected := unittest.TransactionFixture() - err := store.Store(&expected.TransactionBody) - require.NoError(t, err) - - // retrieve the transaction by ID - actual, err := store.ByID(expected.ID()) - require.NoError(t, err) - assert.Equal(t, &expected.TransactionBody, actual) - - // re-insert the transaction - should be idempotent - err = store.Store(&expected.TransactionBody) - require.NoError(t, err) - }) -} - -func TestTransactionRetrieveWithoutStore(t *testing.T) { - unittest.RunWithBadgerDB(t, func(db *badger.DB) { - metrics := metrics.NewNoopCollector() - store := badgerstorage.NewTransactions(metrics, db) - - // attempt to get a invalid transaction - _, err := store.ByID(unittest.IdentifierFixture()) - assert.True(t, errors.Is(err, storage.ErrNotFound)) - }) -} diff --git a/storage/badger/version_beacon.go b/storage/badger/version_beacon.go deleted file mode 100644 index eb44213be5e..00000000000 --- a/storage/badger/version_beacon.go +++ /dev/null @@ -1,38 +0,0 @@ -package badger - -import ( - "github.com/dgraph-io/badger/v2" - - "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/storage" - "github.com/onflow/flow-go/storage/badger/operation" -) - -type VersionBeacons struct { - db *badger.DB -} - -var _ storage.VersionBeacons = (*VersionBeacons)(nil) - -func NewVersionBeacons(db *badger.DB) *VersionBeacons { - res := &VersionBeacons{ - db: db, - } - - return res -} - -func (r *VersionBeacons) Highest( - belowOrEqualTo uint64, -) (*flow.SealedVersionBeacon, error) { - tx := r.db.NewTransaction(false) - defer tx.Discard() - - var beacon *flow.SealedVersionBeacon - - err := operation.LookupLastVersionBeaconByHeight(belowOrEqualTo, beacon)(tx) - if err != nil { - return nil, err - } - return beacon, nil -} diff --git a/storage/batch.go b/storage/batch.go index 3147fc5c0e7..bc9c4853294 100644 --- a/storage/batch.go +++ b/storage/batch.go @@ -1,13 +1,19 @@ package storage -import "github.com/dgraph-io/badger/v2" +import ( + "github.com/dgraph-io/badger/v2" +) +// Deprecated: Transaction is being deprecated as part of the transition from Badger to Pebble. +// Use Writer instead of Transaction for all new code. type Transaction interface { Set(key, val []byte) error } // BatchStorage serves as an abstraction over batch storage, adding ability to add ability to add extra // callbacks which fire after the batch is successfully flushed. +// Deprecated: BatchStorage is being deprecated as part of the transition from Badger to Pebble. +// Use ReaderBatchWriter instead of BatchStorage for all new code. type BatchStorage interface { GetWriter() *badger.WriteBatch diff --git a/storage/blocks.go b/storage/blocks.go index 506588e4869..ad66f7f8b3b 100644 --- a/storage/blocks.go +++ b/storage/blocks.go @@ -1,45 +1,101 @@ -// (c) 2019 Dapper Labs - ALL RIGHTS RESERVED - package storage import ( + "github.com/jordanschalm/lockctx" + "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/storage/badger/transaction" ) -// Blocks represents persistent storage for blocks. +// Blocks provides persistent storage for blocks. +// +// Conceptually, blocks must always be signed by the proposer. Once a block is certified (i.e. +// received votes from a supermajority of consensus participants, in their aggregated form +// represented by the Quorum Certificate [QC]), the proposer's signature is included in the QC +// and does not need to be provided individually anymore. Therefore, from the protocol perspective, +// the proper data structures are either a block proposal (including the proposer's signature) or +// a certified block (including a QC for the block). type Blocks interface { - // Store will atomically store a block with all its dependencies. - Store(block *flow.Block) error - - // StoreTx allows us to store a new block, including its payload & header, as part of a DB transaction, while - // still going through the caching layer. - StoreTx(block *flow.Block) func(*transaction.Tx) error + // BatchStore stores a valid block in a batch. + // Error returns: + // - storage.ErrAlreadyExists if the blockID already exists in the database. + // - generic error in case of unexpected failure from the database layer or encoding failure. + BatchStore(lctx lockctx.Proof, rw ReaderBatchWriter, proposal *flow.Proposal) error - // ByID returns the block with the given hash. It is available for - // finalized and ambiguous blocks. + // ByID returns the block with the given hash. It is available for all incorporated blocks (validated blocks + // that have been appended to any of the known forks) no matter whether the block has been finalized or not. + // + // Error returns: + // - storage.ErrNotFound if no block with the corresponding ID was found + // - generic error in case of unexpected failure from the database layer, or failure + // to decode an existing database value ByID(blockID flow.Identifier) (*flow.Block, error) + // ProposalByID returns the block with the given ID, along with the proposer's signature on it. + // It is available for all incorporated blocks (validated blocks that have been appended to any + // of the known forks) no matter whether the block has been finalized or not. + // + // Error returns: + // - storage.ErrNotFound if no block with the corresponding ID was found + // - generic error in case of unexpected failure from the database layer, or failure + // to decode an existing database value + ProposalByID(blockID flow.Identifier) (*flow.Proposal, error) + // ByHeight returns the block at the given height. It is only available // for finalized blocks. + // + // Error returns: + // - storage.ErrNotFound if no block for the corresponding height was found + // - generic error in case of unexpected failure from the database layer, or failure + // to decode an existing database value ByHeight(height uint64) (*flow.Block, error) - // ByCollectionID returns the block for the given collection ID. - ByCollectionID(collID flow.Identifier) (*flow.Block, error) + // ProposalByHeight returns the block at the given height, along with the proposer's + // signature on it. It is only available for finalized blocks. + // + // Error returns: + // - storage.ErrNotFound if no block proposal for the corresponding height was found + // - generic error in case of unexpected failure from the database layer, or failure + // to decode an existing database value + ProposalByHeight(height uint64) (*flow.Proposal, error) - // IndexBlockForCollections indexes the block each collection was - // included in. - IndexBlockForCollections(blockID flow.Identifier, collIDs []flow.Identifier) error + // ByView returns the block with the given view. It is only available for certified blocks. + // Certified blocks are the blocks that have received a QC. Hotstuff guarantees that for each view, + // at most one block is certified. Hence, the return value of `ByView` is guaranteed to be unique + // even for non-finalized blocks. + // + // Expected errors during normal operations: + // - `storage.ErrNotFound` if no certified block is known at given view. + ByView(view uint64) (*flow.Block, error) - // InsertLastFullBlockHeightIfNotExists inserts the FullBlockHeight index if it does not already exist. - // Calling this function multiple times is a no-op and returns no expected errors. - InsertLastFullBlockHeightIfNotExists(height uint64) error + // ProposalByView returns the block proposal with the given view. It is only available for certified blocks. + // + // Expected errors during normal operations: + // - `storage.ErrNotFound` if no certified block is known at given view. + ProposalByView(view uint64) (*flow.Proposal, error) - // UpdateLastFullBlockHeight updates the FullBlockHeight index - // The FullBlockHeight index indicates that block for which all collections have been received - UpdateLastFullBlockHeight(height uint64) error + // ByCollectionID returns the block for the given [flow.CollectionGuarantee] ID. + // This method is only available for collections included in finalized blocks. + // While consensus nodes verify that collections are not repeated within the same fork, + // each different fork can contain a recent collection once. Therefore, we must wait for + // finality. + // CAUTION: this method is not backed by a cache and therefore comparatively slow! + // + // Error returns: + // - storage.ErrNotFound if the collection ID was not found + // - generic error in case of unexpected failure from the database layer, or failure + // to decode an existing database value + ByCollectionID(collID flow.Identifier) (*flow.Block, error) - // GetLastFullBlockHeight retrieves the FullBlockHeight - GetLastFullBlockHeight() (height uint64, err error) + // IndexBlockContainingCollectionGuarantees populates an index `guaranteeID->blockID` for each guarantee + // which appears in the block. + // CAUTION: a collection can be included in multiple *unfinalized* blocks. However, the implementation + // assumes a one-to-one map from collection ID to a *single* block ID. This holds for FINALIZED BLOCKS ONLY + // *and* only in the absence of byzantine collector clusters (which the mature protocol must tolerate). + // Hence, this function should be treated as a temporary solution, which requires generalization + // (one-to-many mapping) for soft finality and the mature protocol. + // + // Error returns: + // - generic error in case of unexpected failure from the database layer or encoding failure. + IndexBlockContainingCollectionGuarantees(blockID flow.Identifier, collIDs []flow.Identifier) error } diff --git a/storage/chunkDataPacks.go b/storage/chunkDataPacks.go deleted file mode 100644 index a6055b8b9b3..00000000000 --- a/storage/chunkDataPacks.go +++ /dev/null @@ -1,23 +0,0 @@ -package storage - -import ( - "github.com/onflow/flow-go/model/flow" -) - -// ChunkDataPacks represents persistent storage for chunk data packs. -type ChunkDataPacks interface { - - // Store inserts the chunk header, keyed by chunk ID. - Store(c *flow.ChunkDataPack) error - - // BatchStore inserts the chunk header, keyed by chunk ID into a given batch - BatchStore(c *flow.ChunkDataPack, batch BatchStorage) error - - // ByChunkID returns the chunk data for the given a chunk ID. - ByChunkID(chunkID flow.Identifier) (*flow.ChunkDataPack, error) - - // BatchRemove removes ChunkDataPack c keyed by its ChunkID in provided batch - // No errors are expected during normal operation, even if no entries are matched. - // If Badger unexpectedly fails to process the request, the error is wrapped in a generic error and returned. - BatchRemove(chunkID flow.Identifier, batch BatchStorage) error -} diff --git a/storage/chunk_data_packs.go b/storage/chunk_data_packs.go new file mode 100644 index 00000000000..4371b27b16a --- /dev/null +++ b/storage/chunk_data_packs.go @@ -0,0 +1,82 @@ +package storage + +import ( + "bytes" + + "github.com/jordanschalm/lockctx" + + "github.com/onflow/flow-go/model/flow" +) + +// ChunkDataPacks represents persistent storage for chunk data packs. +type ChunkDataPacks interface { + + // Store stores multiple ChunkDataPacks cs keyed by their ChunkIDs in a batch. + // No errors are expected during normal operation, but it may return generic error + StoreByChunkID(lctx lockctx.Proof, cs []*flow.ChunkDataPack) error + + // Remove removes multiple ChunkDataPacks cs keyed by their ChunkIDs in a batch. + // No errors are expected during normal operation, but it may return generic error + Remove(cs []flow.Identifier) error + + // ByChunkID returns the chunk data for the given a chunk ID. + ByChunkID(chunkID flow.Identifier) (*flow.ChunkDataPack, error) + + // BatchRemove removes ChunkDataPack c keyed by its ChunkID in provided batch + // No errors are expected during normal operation, even if no entries are matched. + // If Badger unexpectedly fails to process the request, the error is wrapped in a generic error and returned. + BatchRemove(chunkID flow.Identifier, batch ReaderBatchWriter) error +} + +// StoredChunkDataPack is an in-storage representation of chunk data pack. +// Its prime difference is instead of an actual collection, it keeps a collection ID hence relying on maintaining +// the collection on a secondary storage. +type StoredChunkDataPack struct { + ChunkID flow.Identifier + StartState flow.StateCommitment + Proof flow.StorageProof + CollectionID flow.Identifier + SystemChunk bool + ExecutionDataRoot flow.BlockExecutionDataRoot +} + +func ToStoredChunkDataPack(c *flow.ChunkDataPack) *StoredChunkDataPack { + sc := &StoredChunkDataPack{ + ChunkID: c.ChunkID, + StartState: c.StartState, + Proof: c.Proof, + SystemChunk: false, + ExecutionDataRoot: c.ExecutionDataRoot, + } + + if c.Collection != nil { + // non system chunk + sc.CollectionID = c.Collection.ID() + } else { + sc.SystemChunk = true + } + + return sc +} + +func (c StoredChunkDataPack) Equals(other StoredChunkDataPack) error { + if c.ChunkID != other.ChunkID { + return ErrDataMismatch + } + if c.StartState != other.StartState { + return ErrDataMismatch + } + if !c.ExecutionDataRoot.Equals(other.ExecutionDataRoot) { + return ErrDataMismatch + } + if c.SystemChunk != other.SystemChunk { + return ErrDataMismatch + } + if !bytes.Equal(c.Proof, other.Proof) { + return ErrDataMismatch + } + if c.CollectionID != other.CollectionID { + return ErrDataMismatch + } + return nil +} diff --git a/storage/cluster_blocks.go b/storage/cluster_blocks.go index ca5a3466b87..6827b8f4b57 100644 --- a/storage/cluster_blocks.go +++ b/storage/cluster_blocks.go @@ -5,15 +5,29 @@ import ( "github.com/onflow/flow-go/model/flow" ) +// ClusterBlocks provides persistent storage for collector blocks (aka collections) produced +// by *one specific* collector cluster (identified by the ClusterChainID). +// For consistency, method naming is analogous to the [storage.Blocks] interface. Though, +// at the moment, we only need to store [cluster.Proposal]. Therefore, methods `ByID` and +// `ByHeight` don't exist here (but might be added later). type ClusterBlocks interface { - // Store stores the cluster block. - Store(block *cluster.Block) error + // ProposalByID returns the collection with the given ID, along with the proposer's signature on it. + // It is available for all incorporated collections (validated blocks that have been appended to any + // of the known forks) no matter whether the collection has been finalized or not. + // + // Error returns: + // - storage.ErrNotFound if the block ID was not found + // - generic error in case of unexpected failure from the database layer, or failure + // to decode an existing database value + ProposalByID(blockID flow.Identifier) (*cluster.Proposal, error) - // ByID returns the block with the given ID. - ByID(blockID flow.Identifier) (*cluster.Block, error) - - // ByHeight returns the block with the given height. Only available for - // finalized blocks. - ByHeight(height uint64) (*cluster.Block, error) + // ProposalByHeight returns the collection at the given height, along with the proposer's + // signature on it. It is only available for finalized collections. + // + // Error returns: + // - storage.ErrNotFound if the block height or block ID was not found + // - generic error in case of unexpected failure from the database layer, or failure + // to decode an existing database value + ProposalByHeight(height uint64) (*cluster.Proposal, error) } diff --git a/storage/cluster_payloads.go b/storage/cluster_payloads.go index 7d80d3a9e2e..5cf92194380 100644 --- a/storage/cluster_payloads.go +++ b/storage/cluster_payloads.go @@ -9,9 +9,6 @@ import ( // node cluster consensus. type ClusterPayloads interface { - // Store stores and indexes the given cluster payload. - Store(blockID flow.Identifier, payload *cluster.Payload) error - // ByBlockID returns the cluster payload for the given block ID. ByBlockID(blockID flow.Identifier) (*cluster.Payload, error) } diff --git a/storage/collections.go b/storage/collections.go index c50d92da5b9..c2a3a4113c8 100644 --- a/storage/collections.go +++ b/storage/collections.go @@ -1,46 +1,72 @@ package storage import ( + "github.com/jordanschalm/lockctx" + "github.com/onflow/flow-go/model/flow" ) +// CollectionsReader represents persistent storage read operations for collections. +type CollectionsReader interface { + // ByID returns the collection with the given ID, including all + // transactions within the collection. + // + // Expected errors during normal operation: + // - `storage.ErrNotFound` if no light collection was found. + ByID(collID flow.Identifier) (*flow.Collection, error) + + // LightByID returns a reduced representation of the collection with the given ID. + // The reduced collection references the constituent transactions by their hashes. + // + // Expected errors during normal operation: + // - `storage.ErrNotFound` if no light collection was found. + LightByID(collID flow.Identifier) (*flow.LightCollection, error) + + // LightByTransactionID returns a reduced representation of the collection + // holding the given transaction ID. The reduced collection references the + // constituent transactions by their hashes. + // + // Expected errors during normal operation: + // - `storage.ErrNotFound` if no light collection was found. + LightByTransactionID(txID flow.Identifier) (*flow.LightCollection, error) +} + // Collections represents persistent storage for collections. type Collections interface { - - // StoreLight inserts the collection. It does not insert, nor check - // existence of, the constituent transactions. - StoreLight(collection *flow.LightCollection) error + CollectionsReader // Store inserts the collection keyed by ID and all constituent // transactions. - Store(collection *flow.Collection) error + // This is used by execution node storing collections. + // No errors are expected during normal operation. + Store(collection *flow.Collection) (*flow.LightCollection, error) // Remove removes the collection and all constituent transactions. + // No errors are expected during normal operation. Remove(collID flow.Identifier) error - // LightByID returns collection with the given ID. Only retrieves - // transaction hashes. - LightByID(collID flow.Identifier) (*flow.LightCollection, error) - - // ByID returns the collection with the given ID, including all - // transactions within the collection. - ByID(collID flow.Identifier) (*flow.Collection, error) + // StoreAndIndexByTransaction stores the collection and indexes it by transaction. + // This is used by access node storing collections for finalized blocks. + // + // CAUTION: current approach is NOT BFT and needs to be revised in the future. + // Honest clusters ensure a transaction can only belong to one collection. However, in rare + // cases, the collector clusters can exceed byzantine thresholds -- making it possible to + // produce multiple finalized collections (aka guaranteed collections) containing the same + // transaction repeatedly. + // TODO: eventually we need to handle Byzantine clusters + // + // No errors are expected during normal operation. + StoreAndIndexByTransaction(lctx lockctx.Proof, collection *flow.Collection) (*flow.LightCollection, error) - // StoreLightAndIndexByTransaction inserts the light collection (only - // transaction IDs) and adds a transaction id index for each of the - // transactions within the collection (transaction_id->collection_id). + // BatchStoreAndIndexByTransaction stores the collection and indexes it by transaction within a batch. // - // NOTE: Currently it is possible in rare circumstances for two collections - // to be guaranteed which both contain the same transaction (see https://github.com/dapperlabs/flow-go/issues/3556). - // The second of these will revert upon reaching the execution node, so - // this doesn't impact the execution state, but it can result in the Access - // node processing two collections which both contain the same transaction (see https://github.com/dapperlabs/flow-go/issues/5337). - // To handle this, we skip indexing the affected transaction when inserting - // the transaction_id->collection_id index when an index for the transaction - // already exists. - StoreLightAndIndexByTransaction(collection *flow.LightCollection) error - - // LightByTransactionID returns the collection for the given transaction ID. Only retrieves - // transaction hashes. - LightByTransactionID(txID flow.Identifier) (*flow.LightCollection, error) + // CAUTION: current approach is NOT BFT and needs to be revised in the future. + // Honest clusters ensure a transaction can only belong to one collection. However, in rare + // cases, the collector clusters can exceed byzantine thresholds -- making it possible to + // produce multiple finalized collections (aka guaranteed collections) containing the same + // transaction repeatedly. + // TODO: eventually we need to handle Byzantine clusters + // + // This is used by access node storing collections for finalized blocks + BatchStoreAndIndexByTransaction(lctx lockctx.Proof, collection *flow.Collection, batch ReaderBatchWriter) (*flow.LightCollection, error) } diff --git a/storage/commits.go b/storage/commits.go index 1612c55cc9f..0adbfecf3e1 100644 --- a/storage/commits.go +++ b/storage/commits.go @@ -1,25 +1,27 @@ package storage import ( + "github.com/jordanschalm/lockctx" + "github.com/onflow/flow-go/model/flow" ) +type CommitsReader interface { + // ByBlockID will retrieve a commit by its ID from persistent storage. + ByBlockID(blockID flow.Identifier) (flow.StateCommitment, error) +} + // Commits represents persistent storage for state commitments. type Commits interface { - - // Store will store a commit in the persistent storage. - Store(blockID flow.Identifier, commit flow.StateCommitment) error + CommitsReader // BatchStore stores Commit keyed by blockID in provided batch // No errors are expected during normal operation, even if no entries are matched. - // If Badger unexpectedly fails to process the request, the error is wrapped in a generic error and returned. - BatchStore(blockID flow.Identifier, commit flow.StateCommitment, batch BatchStorage) error - - // ByBlockID will retrieve a commit by its ID from persistent storage. - ByBlockID(blockID flow.Identifier) (flow.StateCommitment, error) + // If the database unexpectedly fails to process the request, the error is wrapped in a generic error and returned. + BatchStore(lctx lockctx.Proof, blockID flow.Identifier, commit flow.StateCommitment, batch ReaderBatchWriter) error // BatchRemoveByBlockID removes Commit keyed by blockID in provided batch // No errors are expected during normal operation, even if no entries are matched. - // If Badger unexpectedly fails to process the request, the error is wrapped in a generic error and returned. - BatchRemoveByBlockID(blockID flow.Identifier, batch BatchStorage) error + // If the database unexpectedly fails to process the request, the error is wrapped in a generic error and returned. + BatchRemoveByBlockID(blockID flow.Identifier, batch ReaderBatchWriter) error } diff --git a/storage/consumer_progress.go b/storage/consumer_progress.go index bd99926ba32..0a3d6b327f6 100644 --- a/storage/consumer_progress.go +++ b/storage/consumer_progress.go @@ -1,13 +1,29 @@ package storage +// ConsumerProgressInitializer is a helper to initialize the consumer progress index in storage +// It prevents the consumer from being used before initialization +type ConsumerProgressInitializer interface { + // Initialize takes a default index and initializes the consumer progress index in storage + // Initialize must be concurrent safe, meaning if called by different modules, should only + // initialize once. + Initialize(defaultIndex uint64) (ConsumerProgress, error) +} + // ConsumerProgress reads and writes the last processed index of the job in the job queue +// It must be created by the ConsumerProgressInitializer, so that it can guarantee +// the ProcessedIndex and SetProcessedIndex methods are safe to use. type ConsumerProgress interface { - // read the current processed index + // ProcessedIndex returns the processed index for the consumer + // No errors are expected during normal operation ProcessedIndex() (uint64, error) - // insert the default processed index to the storage layer, can only be done once. - // initialize for the second time will return storage.ErrAlreadyExists - InitProcessedIndex(defaultIndex uint64) error - // update the processed index in the storage layer. - // it will fail if InitProcessedIndex was never called. + + // SetProcessedIndex updates the processed index for the consumer + // The caller must use ConsumerProgressInitializer to initialize the progress index in storage + // No errors are expected during normal operation SetProcessedIndex(processed uint64) error + + // BatchSetProcessedIndex updates the processed index for the consumer within in provided batch + // The caller must use ConsumerProgressInitializer to initialize the progress index in storage + // No errors are expected during normal operation + BatchSetProcessedIndex(processed uint64, batch ReaderBatchWriter) error } diff --git a/storage/deferred/operations.go b/storage/deferred/operations.go new file mode 100644 index 00000000000..ed13cae839e --- /dev/null +++ b/storage/deferred/operations.go @@ -0,0 +1,101 @@ +package deferred + +import ( + "github.com/jordanschalm/lockctx" + + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/storage" +) + +// DBOp is a shorthand for a deferred database operation that works within a lock-protected context. +// It accepts a lock proof, a block ID, and a reader/writer interface to perform its task. +// This pattern allows chaining database updates for atomic execution in one batch updates. +type DBOp = func(lctx lockctx.Proof, blockID flow.Identifier, rw storage.ReaderBatchWriter) error + +// DeferredBlockPersist accumulates deferred database operations to be executed later in a single atomic batch update. +// Specifically, we defer appending writes and success-callbacks to a [storage.ReaderBatchWriter]. +// Operations for appending writes and success-callbacks are executed in the order in which they were queued. +// Since Pebble does not provide serializable snapshot isolation, callers MUST ensure that the necessary locks are +// acquired before executing the set of deferred operations. +// +// This construct accomplishes two distinct goals: +// 1. Deferring block indexing write operations when the block ID is not yet known. +// 2. Deferring lock-requiring read-then-write operations to minimize time spent holding a lock. +// +// NOT CONCURRENCY SAFE +type DeferredBlockPersist struct { + pending DBOp // Holds the accumulated operations as a single composed function. Can be nil if no ops are added. +} + +// NewDeferredBlockPersist instantiates a DeferredBlockPersist instance. Initially, it behaves as a no-op until operations are added. +func NewDeferredBlockPersist() *DeferredBlockPersist { + return &DeferredBlockPersist{ + pending: nil, + } +} + +// IsEmpty returns true if no operations have been enqueued. +func (d *DeferredBlockPersist) IsEmpty() bool { + return d.pending == nil +} + +// AddNextOperation adds a new deferred database operation to the queue of pending operations. +// If there are already pending operations, this new operation will be composed to run after them. +// This method ensures the operations execute sequentially and abort on the first error. +// +// If `nil` is passed, it is ignored — this might happen if chaining with an empty DeferredBlockPersist. +func (d *DeferredBlockPersist) AddNextOperation(nextOperation DBOp) { + if nextOperation == nil { + // No-op if the provided operation is nil. + return + } + + if d.pending == nil { + // If this is the first operation being added, set it directly. + d.pending = nextOperation + return + } + + // Compose the prior and next operations into a single function. + prior := d.pending + d.pending = func(lctx lockctx.Proof, blockID flow.Identifier, rw storage.ReaderBatchWriter) error { + // Execute the prior operations first. + if err := prior(lctx, blockID, rw); err != nil { + return err + } + // Execute the newly added operation next. + if err := nextOperation(lctx, blockID, rw); err != nil { + return err + } + return nil + } +} + +// Chain merges the deferred operations from another DeferredBlockPersist into this one. +// The resulting order of operations is: +// 1. execute the operations in the receiver in the order they were added +// 2. execute the operations from the input in the order they were added +func (d *DeferredBlockPersist) Chain(deferred *DeferredBlockPersist) { + d.AddNextOperation(deferred.pending) +} + +// AddSucceedCallback adds a callback to be executed **after** the pending database operations succeed. +// This is useful for registering indexing tasks or post-commit hooks. +// The callback is only invoked if no error occurred during batch updates execution. +func (d *DeferredBlockPersist) AddSucceedCallback(callback func()) { + d.AddNextOperation(func(_ lockctx.Proof, blockID flow.Identifier, rw storage.ReaderBatchWriter) error { + // Schedule the callback to run after a successful commit. + storage.OnCommitSucceed(rw, callback) + return nil + }) +} + +// Execute runs all the accumulated deferred database operations in-order. +// If no operations were added, it is effectively a no-op. +// This method should be called exactly once per batch updates context. +func (d *DeferredBlockPersist) Execute(lctx lockctx.Proof, blockID flow.Identifier, rw storage.ReaderBatchWriter) error { + if d.pending == nil { + return nil // No operations to execute. + } + return d.pending(lctx, blockID, rw) +} diff --git a/storage/deferred/operations_test.go b/storage/deferred/operations_test.go new file mode 100644 index 00000000000..a2f87469d5b --- /dev/null +++ b/storage/deferred/operations_test.go @@ -0,0 +1,300 @@ +package deferred_test + +import ( + "errors" + "fmt" + "testing" + + "github.com/jordanschalm/lockctx" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/storage" + "github.com/onflow/flow-go/storage/deferred" + "github.com/onflow/flow-go/storage/mock" + "github.com/onflow/flow-go/storage/operation/dbtest" +) + +// TestNewDeferredBlockPersist verifies that a newly created DeferredBlockPersist instance is empty and not nil. +func TestNewDeferredBlockPersist(t *testing.T) { + dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { + d := deferred.NewDeferredBlockPersist() + assert.NotNil(t, d) + assert.True(t, d.IsEmpty()) + }) +} + +// TestDeferredBlockPersist_IsEmpty verifies the working of `DeferredBlockPersist.IsEmpty` method. +func TestDeferredBlockPersist_IsEmpty(t *testing.T) { + dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { + d := deferred.NewDeferredBlockPersist() + assert.True(t, d.IsEmpty()) + + d.AddNextOperation(func(lctx lockctx.Proof, blockID flow.Identifier, rw storage.ReaderBatchWriter) error { + return nil + }) + assert.False(t, d.IsEmpty()) + }) +} + +// TestDeferredBlockPersist_AddNextOperation_Nil verifies that adding a nil operation does +// not change the state of the DeferredBlockPersist. +func TestDeferredBlockPersist_AddNextOperation_Nil(t *testing.T) { + dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { + d := deferred.NewDeferredBlockPersist() + d.AddNextOperation(nil) + assert.True(t, d.IsEmpty()) + }) +} + +// TestDeferredBlockPersist_Execute_NoOps verifies that executing an empty DeferredBlockPersist is a no-op. +func TestDeferredBlockPersist_Execute_NoOps(t *testing.T) { + rw := mock.NewReaderBatchWriter(t) // mock errors on any function call + d := deferred.NewDeferredBlockPersist() + err := d.Execute(nil, flow.Identifier{}, rw) + assert.NoError(t, err) +} + +// TestDeferredBlockPersist_AddNextOperation_Single verifies that a single operation can be added and executed. +func TestDeferredBlockPersist_AddNextOperation_Single(t *testing.T) { + dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { + d := deferred.NewDeferredBlockPersist() + var executed bool + op := func(lctx lockctx.Proof, blockID flow.Identifier, rw storage.ReaderBatchWriter) error { + executed = true + return nil + } + + d.AddNextOperation(op) + err := db.WithReaderBatchWriter(func(writer storage.ReaderBatchWriter) error { + return d.Execute(nil, flow.Identifier{}, writer) + }) + + require.NoError(t, err) + assert.True(t, executed) + }) +} + +// TestDeferredBlockPersist_AddNextOperation_Multiple verifies that: +// - multiple operations can be added +// - operations are executed in the order they were added +func TestDeferredBlockPersist_AddNextOperation_Multiple(t *testing.T) { + dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { + d := deferred.NewDeferredBlockPersist() + var executionOrder []int + + op1 := func(lctx lockctx.Proof, blockID flow.Identifier, rw storage.ReaderBatchWriter) error { + executionOrder = append(executionOrder, 1) + return nil + } + op2 := func(lctx lockctx.Proof, blockID flow.Identifier, rw storage.ReaderBatchWriter) error { + executionOrder = append(executionOrder, 2) + return nil + } + + d.AddNextOperation(op1) + d.AddNextOperation(op2) + + err := db.WithReaderBatchWriter(func(writer storage.ReaderBatchWriter) error { + return d.Execute(nil, flow.Identifier{}, writer) + }) + + require.NoError(t, err) + assert.Equal(t, []int{1, 2}, executionOrder) + }) +} + +// TestDeferredBlockPersist_AddNextOperation_Error verifies that if an operation returns an error, +// subsequent operations are not executed and the error is returned. +func TestDeferredBlockPersist_AddNextOperation_Error(t *testing.T) { + dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { + d := deferred.NewDeferredBlockPersist() + var op2Executed bool + testErr := errors.New("test error") + + op1 := func(lctx lockctx.Proof, blockID flow.Identifier, rw storage.ReaderBatchWriter) error { + return fmt.Errorf("aborting: %w", testErr) + } + op2 := func(lctx lockctx.Proof, blockID flow.Identifier, rw storage.ReaderBatchWriter) error { + op2Executed = true + return nil + } + + d.AddNextOperation(op1) + d.AddNextOperation(op2) + + err := db.WithReaderBatchWriter(func(writer storage.ReaderBatchWriter) error { + return d.Execute(nil, flow.Identifier{}, writer) + }) + + require.Error(t, err) + assert.ErrorIs(t, err, testErr) + assert.False(t, op2Executed) + }) +} + +// TestDeferredBlockPersist_Chain verifies that chaining two DeferredBlockPersist: +// - executes all operations from both instances +// - maintains the order of operations (first operations from receiver, then from chained instance) +func TestDeferredBlockPersist_Chain(t *testing.T) { + dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { + var executionOrder []int + + d1 := deferred.NewDeferredBlockPersist() + d1op1 := func(lctx lockctx.Proof, blockID flow.Identifier, rw storage.ReaderBatchWriter) error { + executionOrder = append(executionOrder, 1) + return nil + } + d1.AddNextOperation(d1op1) + + d2 := deferred.NewDeferredBlockPersist() + d2op1 := func(lctx lockctx.Proof, blockID flow.Identifier, rw storage.ReaderBatchWriter) error { + executionOrder = append(executionOrder, 2) + return nil + } + d2.AddNextOperation(d2op1) + + d1.Chain(d2) + + err := db.WithReaderBatchWriter(func(writer storage.ReaderBatchWriter) error { + return d1.Execute(nil, flow.Identifier{}, writer) + }) + require.NoError(t, err) + assert.Equal(t, []int{1, 2}, executionOrder) + }) +} + +// TestDeferredBlockPersist_Chain_Empty verifies that chaining involving an empty DeferredBlockPersist works +func TestDeferredBlockPersist_Chain_Empty(t *testing.T) { + dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { + t.Run("non-empty receiver chaining an empty DeferredBlockPersist", func(t *testing.T) { + d := deferred.NewDeferredBlockPersist() + var opExecuted bool + op := func(lctx lockctx.Proof, blockID flow.Identifier, rw storage.ReaderBatchWriter) error { + opExecuted = true + return nil + } + d.AddNextOperation(op) + + empty := deferred.NewDeferredBlockPersist() + d.Chain(empty) + + err := db.WithReaderBatchWriter(func(writer storage.ReaderBatchWriter) error { + return d.Execute(nil, flow.Identifier{}, writer) + }) + require.NoError(t, err) + assert.True(t, opExecuted) + }) + + t.Run("empty receiver chaining an non-empty DeferredBlockPersist", func(t *testing.T) { + d := deferred.NewDeferredBlockPersist() + var opExecuted bool + op := func(lctx lockctx.Proof, blockID flow.Identifier, rw storage.ReaderBatchWriter) error { + opExecuted = true + return nil + } + d.AddNextOperation(op) + + empty := deferred.NewDeferredBlockPersist() + empty.Chain(d) + + err := db.WithReaderBatchWriter(func(writer storage.ReaderBatchWriter) error { + return empty.Execute(nil, flow.Identifier{}, writer) + }) + require.NoError(t, err) + assert.True(t, opExecuted) + }) + }) +} + +// TestDeferredBlockPersist_AddSucceedCallback verifies that a callback is executed when commiting the `ReaderBatchWriter` +func TestDeferredBlockPersist_AddSucceedCallback(t *testing.T) { + dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { + d := deferred.NewDeferredBlockPersist() + var callbackExecuted bool + callback := func() { + callbackExecuted = true + } + d.AddSucceedCallback(callback) + + err := db.WithReaderBatchWriter(func(writer storage.ReaderBatchWriter) error { + // Upon running the deferred operations, the callback should be registered with the writer. However, the + // callback should not be executed yet, as the writer will only be committed once we return from this function. + err := d.Execute(nil, flow.Identifier{}, writer) + require.NoError(t, err) + assert.False(t, callbackExecuted) + return nil + }) // WithReaderBatchWriter commits the batch at the end, which should have triggered the callback + require.NoError(t, err) + assert.True(t, callbackExecuted) + }) +} + +// TestDeferredBlockPersist_AddSucceedCallback_Error verifies that if an error occurs when committing the batch, +// the success callback is not executed. +func TestDeferredBlockPersist_AddSucceedCallback_Error(t *testing.T) { + dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { + d := deferred.NewDeferredBlockPersist() + var callbackExecuted bool + callback := func() { + callbackExecuted = true + } + d.AddSucceedCallback(callback) + + testErr := errors.New("test error") + err := db.WithReaderBatchWriter(func(writer storage.ReaderBatchWriter) error { + // Execute the deferred operation, which registers the success callback with the writer. However, the + // callback should not be executed yet, as the writer will only be committed once we return from this function. + err := d.Execute(nil, flow.Identifier{}, writer) + require.NoError(t, err) + assert.False(t, callbackExecuted) + + // Return an error from the transaction block to simulate a failed transaction. + return fmt.Errorf("abort: %w", testErr) + }) // WithReaderBatchWriter commits the batch at the end, which should have triggered the callback + + // The error from the transaction should be the one we returned. + require.Error(t, err) + assert.ErrorIs(t, err, testErr) + + // Because the transaction failed, the success callback should not have been executed. + assert.False(t, callbackExecuted) + }) +} + +// TestDeferredBlockPersist_Add_Operation_and_Callback verifies that +// a deferred operation and a callback can be added +func TestDeferredBlockPersist_Add_Operation_and_Callback(t *testing.T) { + dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { + d := deferred.NewDeferredBlockPersist() + var opExecuted bool + var callbackExecuted bool + + op := func(lctx lockctx.Proof, blockID flow.Identifier, rw storage.ReaderBatchWriter) error { + opExecuted = true + return nil + } + callback := func() { + callbackExecuted = true + } + + d.AddNextOperation(op) + d.AddSucceedCallback(callback) + + err := db.WithReaderBatchWriter(func(writer storage.ReaderBatchWriter) error { + // When composing the final write batch, the deferred operations should be run and the callback should + // be registered with the writer. However, the callback should not be executed yet, as the writer will + // only be committed once we return from this function. + err := d.Execute(nil, flow.Identifier{}, writer) + require.NoError(t, err) + assert.True(t, opExecuted) + assert.False(t, callbackExecuted) + return nil + }) // WithReaderBatchWriter commits the batch at the end, which should have triggered the callback + + require.NoError(t, err) + assert.True(t, opExecuted) + assert.True(t, callbackExecuted) + }) +} diff --git a/storage/dkg.go b/storage/dkg.go index 3f38212461c..2ff38292e39 100644 --- a/storage/dkg.go +++ b/storage/dkg.go @@ -1,59 +1,103 @@ package storage import ( - "github.com/onflow/flow-go/crypto" + "github.com/onflow/crypto" + "github.com/onflow/flow-go/model/flow" ) -// DKGState is the storage interface for storing all artifacts and state -// related to the DKG process, including the latest state of a running or -// completed DKG, and computed beacon keys. -type DKGState interface { +// SafeBeaconKeys is a safe way to access beacon keys. +type SafeBeaconKeys interface { - // SetDKGStarted sets the flag indicating the DKG has started for the given epoch. - // Error returns: storage.ErrAlreadyExists - SetDKGStarted(epochCounter uint64) error + // RetrieveMyBeaconPrivateKey retrieves my beacon private key for the given + // epoch, only if my key has been confirmed valid and safe for use. + // + // Returns: + // - (key, true, nil) if the key is present and confirmed valid + // - (nil, false, nil) if the key has been marked invalid or unavailable + // -> no beacon key will ever be available for the epoch in this case + // - (nil, false, [storage.ErrNotFound]) if the DKG has not ended + // - (nil, false, error) for any unexpected exception + RetrieveMyBeaconPrivateKey(epochCounter uint64) (key crypto.PrivateKey, safe bool, err error) +} - // GetDKGStarted checks whether the DKG has been started for the given epoch. - // No errors expected during normal operation. - GetDKGStarted(epochCounter uint64) (bool, error) +// DKGStateReader is a read-only interface for low-level reading of the Random Beacon Recoverable State Machine. +type DKGStateReader interface { + SafeBeaconKeys - // SetDKGEndState stores that the DKG has ended, and its end state. - // Error returns: storage.ErrAlreadyExists - SetDKGEndState(epochCounter uint64, endState flow.DKGEndState) error + // GetDKGState retrieves the current state of the state machine for the given epoch. + // If an error is returned, the state is undefined meaning that state machine is in initial state + // Error returns: + // - [storage.ErrNotFound] - if there is no state stored for given epoch, meaning the state machine is in initial state. + GetDKGState(epochCounter uint64) (flow.DKGState, error) - // GetDKGEndState retrieves the end state for the given DKG. - // Error returns: storage.ErrNotFound - GetDKGEndState(epochCounter uint64) (flow.DKGEndState, error) + // IsDKGStarted checks whether the DKG has been started for the given epoch. + // No errors expected during normal operation. + IsDKGStarted(epochCounter uint64) (bool, error) - // InsertMyBeaconPrivateKey stores the random beacon private key for an epoch. + // UnsafeRetrieveMyBeaconPrivateKey retrieves the random beacon private key for an epoch. // // CAUTION: these keys are stored before they are validated against the // canonical key vector and may not be valid for use in signing. Use SafeBeaconKeys // to guarantee only keys safe for signing are returned - // Error returns: storage.ErrAlreadyExists - InsertMyBeaconPrivateKey(epochCounter uint64, key crypto.PrivateKey) error + // Error returns: + // - [storage.ErrNotFound] - if there is no key stored for given epoch. + UnsafeRetrieveMyBeaconPrivateKey(epochCounter uint64) (crypto.PrivateKey, error) +} - // RetrieveMyBeaconPrivateKey retrieves the random beacon private key for an epoch. +// DKGState is the storage interface for storing all artifacts and state related to the DKG process, +// including the latest state of a running or completed DKG, and computed beacon keys. DKGState +// supports all state transitions that can occur for an individual node during the happy path +// epoch switchover of the network as a whole. Recovery from the epoch fallback mode is supported +// by the EpochRecoveryMyBeaconKey interface. +type DKGState interface { + DKGStateReader + + // SetDKGState performs a state transition for the Random Beacon Recoverable State Machine. + // Some state transitions may not be possible using this method. For instance, we might not be able to enter [flow.DKGStateCompleted] + // state directly from [flow.DKGStateStarted], even if such transition is valid. The reason for this is that some states require additional + // data to be processed by the state machine before the transition can be made. For such cases there are dedicated methods that should be used, ex. + // InsertMyBeaconPrivateKey and UpsertMyBeaconPrivateKey, which allow to store the needed data and perform the transition in one atomic operation. + // Error returns: + // - [storage.InvalidDKGStateTransitionError] - if the requested state transition is invalid. + SetDKGState(epochCounter uint64, newState flow.DKGState) error + + // InsertMyBeaconPrivateKey stores the random beacon private key for an epoch and transitions the + // state machine into the [flow.DKGStateCompleted] state. // // CAUTION: these keys are stored before they are validated against the - // canonical key vector and may not be valid for use in signing. Use SafeBeaconKeys + // canonical key vector and may not be valid for use in signing. Use [SafeBeaconKeys] // to guarantee only keys safe for signing are returned - // Error returns: storage.ErrNotFound - RetrieveMyBeaconPrivateKey(epochCounter uint64) (crypto.PrivateKey, error) + // Error returns: + // - [storage.ErrAlreadyExists] - if there is already a key stored for given epoch. + // - [storage.InvalidDKGStateTransitionError] - if the requested state transition is invalid. + InsertMyBeaconPrivateKey(epochCounter uint64, key crypto.PrivateKey) error + + // CommitMyBeaconPrivateKey commits the previously inserted random beacon private key for an epoch. Effectively, this method + // transitions the state machine into the [flow.RandomBeaconKeyCommitted] state if the current state is [flow.DKGStateCompleted]. + // The caller needs to supply the [flow.EpochCommit] as evidence that the stored key is valid for the specified epoch. Repeated + // calls for the same epoch are accepted (idempotent operation),if and only if the provided EpochCommit confirms the already + // committed key. + // No errors are expected during normal operations. + CommitMyBeaconPrivateKey(epochCounter uint64, commit *flow.EpochCommit) error } -// SafeBeaconKeys is a safe way to access beacon keys. -type SafeBeaconKeys interface { +// EpochRecoveryMyBeaconKey is a specific interface that allows to overwrite the beacon private key for +// a future epoch, provided that the state machine has not yet reached the [flow.RandomBeaconKeyCommitted] +// state for the specified epoch. +// This interface is used *ONLY* in the epoch recovery process and only by the consensus participants. On the happy path, +// each consensus committee member takes part in the DKG, and after successfully finishing the DKG protocol it obtains a +// random beacon private key, which is stored in the database along with DKG state [flow.DKGStateCompleted]. If for any +// reason DKG fails, then the private key will be nil and DKG end state will be equal to [flow.DKGStateFailure]. +// This module allows to overwrite the random beacon private key in case of EFM recovery or other configuration issues. +type EpochRecoveryMyBeaconKey interface { + DKGStateReader - // RetrieveMyBeaconPrivateKey retrieves my beacon private key for the given - // epoch, only if my key has been confirmed valid and safe for use. - // - // Returns: - // - (key, true, nil) if the key is present and confirmed valid - // - (nil, false, nil) if the key has been marked invalid or unavailable - // -> no beacon key will ever be available for the epoch in this case - // - (nil, false, storage.ErrNotFound) if the DKG has not ended - // - (nil, false, error) for any unexpected exception - RetrieveMyBeaconPrivateKey(epochCounter uint64) (key crypto.PrivateKey, safe bool, err error) + // UpsertMyBeaconPrivateKey overwrites the random beacon private key for the epoch that recovers the protocol + // from Epoch Fallback Mode. The resulting state of this method call is [flow.RandomBeaconKeyCommitted]. + // State transitions are allowed if and only if the current state is not equal to [flow.RandomBeaconKeyCommitted]. + // Repeated calls for the same epoch are idempotent, if and only if the provided EpochCommit confirms the already + // committed key (error otherwise). + // No errors are expected during normal operations. + UpsertMyBeaconPrivateKey(epochCounter uint64, key crypto.PrivateKey, commit *flow.EpochCommit) error } diff --git a/storage/epoch_commits.go b/storage/epoch_commits.go index 97c23ca99a9..b555aa70e5a 100644 --- a/storage/epoch_commits.go +++ b/storage/epoch_commits.go @@ -1,16 +1,14 @@ -// (c) 2019 Dapper Labs - ALL RIGHTS RESERVED - package storage import ( "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/storage/badger/transaction" ) type EpochCommits interface { - // StoreTx allows us to store a new epoch commit in a DB transaction while updating the cache. - StoreTx(commit *flow.EpochCommit) func(*transaction.Tx) error + // BatchStore allows us to store a new epoch commit in a DB batch update while updating the cache. + // No errors are expected during normal operation. + BatchStore(rw ReaderBatchWriter, commit *flow.EpochCommit) error // ByID will return the EpochCommit event by its ID. // Error returns: diff --git a/storage/epoch_protocol_state.go b/storage/epoch_protocol_state.go new file mode 100644 index 00000000000..6069e018968 --- /dev/null +++ b/storage/epoch_protocol_state.go @@ -0,0 +1,64 @@ +package storage + +import ( + "github.com/jordanschalm/lockctx" + + "github.com/onflow/flow-go/model/flow" +) + +// EpochProtocolStateEntries represents persistent, fork-aware storage for the Epoch-related +// sub-state of the overall of the overall Protocol State (KV Store). +type EpochProtocolStateEntries interface { + + // BatchStore persists the given epoch protocol state entry as part of a DB batch. Per convention, the identities in + // the flow.MinEpochStateEntry must be in canonical order for the current and next epoch (if present), otherwise an + // exception is returned. + // + // CAUTION: The caller must ensure `epochProtocolStateID` is a collision-resistant hash of the provided + // `epochProtocolStateEntry`! This method silently overrides existing data, which is safe only if for the same + // key, we always write the same value. + // + // No errors are expected during normal operation. + BatchStore(w Writer, epochProtocolStateID flow.Identifier, epochProtocolStateEntry *flow.MinEpochStateEntry) error + + // BatchIndex persists the specific map entry in the node's database. + // In a nutshell, we want to maintain a map from `blockID` to `epochStateEntry`, where `blockID` references the + // block that _proposes_ the referenced epoch protocol state entry. + // Protocol convention: + // - Consider block B, whose ingestion might potentially lead to an updated protocol state. For example, + // the protocol state changes if we seal some execution results emitting service events. + // - For the key `blockID`, we use the identity of block B which _proposes_ this Protocol State. As value, + // the hash of the resulting protocol state at the end of processing B is to be used. + // - IMPORTANT: The protocol state requires confirmation by a QC and will only become active at the child block, + // _after_ validating the QC. + // + // CAUTION: + // - The caller must acquire the lock [storage.LockInsertBlock] and hold it until the database write has been committed. + // - OVERWRITES existing data (potential for data corruption): + // The lock proof serves as a reminder that the CALLER is responsible to ensure that the DEDUPLICATION CHECK is done elsewhere + // ATOMICALLY within this write operation. Currently it's done by operation.InsertHeader where it performs a check + // to ensure the blockID is new, therefore any data indexed by this blockID is new as well. + // + // Expected errors during normal operations: + // No expected errors during normal operations. + BatchIndex(lctx lockctx.Proof, rw ReaderBatchWriter, blockID flow.Identifier, epochProtocolStateID flow.Identifier) error + + // ByID returns the flow.RichEpochStateEntry by its ID. + // Expected errors during normal operations: + // - storage.ErrNotFound if no epoch state entry with the given Identifier is known. + ByID(id flow.Identifier) (*flow.RichEpochStateEntry, error) + + // ByBlockID retrieves the flow.RichEpochStateEntry that the block with the given ID proposes. + // CAUTION: this protocol state requires confirmation by a QC and will only become active at the child block, + // _after_ validating the QC. Protocol convention: + // - Consider block B, whose ingestion might potentially lead to an updated protocol state. For example, + // the protocol state changes if we seal some execution results emitting service events. + // - For the key `blockID`, we use the identity of block B which _proposes_ this Protocol State. As value, + // the hash of the resulting protocol state at the end of processing B is to be used. + // - CAUTION: The protocol state requires confirmation by a QC and will only become active at the child block, + // _after_ validating the QC. + // + // Expected errors during normal operations: + // - storage.ErrNotFound if no epoch state entry has been indexed for the given block. + ByBlockID(blockID flow.Identifier) (*flow.RichEpochStateEntry, error) +} diff --git a/storage/epoch_setups.go b/storage/epoch_setups.go index d5023e68579..6df0b4364cf 100644 --- a/storage/epoch_setups.go +++ b/storage/epoch_setups.go @@ -1,16 +1,14 @@ -// (c) 2019 Dapper Labs - ALL RIGHTS RESERVED - package storage import ( "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/storage/badger/transaction" ) type EpochSetups interface { - // StoreTx allows us to store a new epoch setup in a DB transaction while going through the cache. - StoreTx(*flow.EpochSetup) func(*transaction.Tx) error + // BatchStore allows us to store a new epoch setup in a DB batch update while going through the cache. + // No errors are expected during normal operation. + BatchStore(rw ReaderBatchWriter, setup *flow.EpochSetup) error // ByID will return the EpochSetup event by its ID. // Error returns: diff --git a/storage/epoch_statuses.go b/storage/epoch_statuses.go deleted file mode 100644 index 45b591cb0ae..00000000000 --- a/storage/epoch_statuses.go +++ /dev/null @@ -1,19 +0,0 @@ -// (c) 2019 Dapper Labs - ALL RIGHTS RESERVED - -package storage - -import ( - "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/storage/badger/transaction" -) - -type EpochStatuses interface { - - // StoreTx stores a new epoch state in a DB transaction while going through the cache. - StoreTx(blockID flow.Identifier, state *flow.EpochStatus) func(*transaction.Tx) error - - // ByBlockID will return the epoch status for the given block - // Error returns: - // * storage.ErrNotFound if EpochStatus for the block does not exist - ByBlockID(flow.Identifier) (*flow.EpochStatus, error) -} diff --git a/storage/errors.go b/storage/errors.go index ec9c89c02b8..b3d81d9709c 100644 --- a/storage/errors.go +++ b/storage/errors.go @@ -2,6 +2,9 @@ package storage import ( "errors" + "fmt" + + "github.com/onflow/flow-go/model/flow" ) var ( @@ -21,4 +24,36 @@ var ( // ErrDataMismatch is returned when a repeatable insert operation attempts // to insert a different value for the same key. ErrDataMismatch = errors.New("data for key is different") + + // ErrHeightNotIndexed is returned when data that is indexed sequentially is queried by a given block height + // and that data is unavailable. + ErrHeightNotIndexed = errors.New("data for block height not available") + + // ErrNotBootstrapped is returned when the database has not been bootstrapped. + ErrNotBootstrapped = errors.New("pebble database not bootstrapped") ) + +// InvalidDKGStateTransitionError is a sentinel error that is returned in case an invalid state transition is attempted. +type InvalidDKGStateTransitionError struct { + err error + From flow.DKGState + To flow.DKGState +} + +func (e InvalidDKGStateTransitionError) Error() string { + return fmt.Sprintf("invalid state transition from %s to %s: %s", e.From.String(), e.To.String(), e.err.Error()) +} + +func IsInvalidDKGStateTransitionError(err error) bool { + var e InvalidDKGStateTransitionError + return errors.As(err, &e) +} + +// NewInvalidDKGStateTransitionErrorf constructs a new InvalidDKGStateTransitionError error with a formatted message. +func NewInvalidDKGStateTransitionErrorf(from, to flow.DKGState, msg string, args ...any) error { + return InvalidDKGStateTransitionError{ + From: from, + To: to, + err: fmt.Errorf(msg, args...), + } +} diff --git a/storage/events.go b/storage/events.go index c493b93747a..4062acea82e 100644 --- a/storage/events.go +++ b/storage/events.go @@ -4,12 +4,7 @@ import ( "github.com/onflow/flow-go/model/flow" ) -// Events represents persistent storage for events. -type Events interface { - - // BatchStore will store events for the given block ID in a given batch - BatchStore(blockID flow.Identifier, events []flow.EventsList, batch BatchStorage) error - +type EventsReader interface { // ByBlockID returns the events for the given block ID ByBlockID(blockID flow.Identifier) ([]flow.Event, error) @@ -21,24 +16,35 @@ type Events interface { // ByBlockIDEventType returns the events for the given block ID and event type ByBlockIDEventType(blockID flow.Identifier, eventType flow.EventType) ([]flow.Event, error) +} + +// Events represents persistent storage for events. +type Events interface { + EventsReader + + // Store will store events for the given block ID + Store(blockID flow.Identifier, blockEvents []flow.EventsList) error + + // BatchStore will store events for the given block ID in a given batch + BatchStore(blockID flow.Identifier, events []flow.EventsList, batch ReaderBatchWriter) error // BatchRemoveByBlockID removes events keyed by a blockID in provided batch // No errors are expected during normal operation, even if no entries are matched. - // If Badger unexpectedly fails to process the request, the error is wrapped in a generic error and returned. - BatchRemoveByBlockID(blockID flow.Identifier, batch BatchStorage) error + // If database unexpectedly fails to process the request, the error is wrapped in a generic error and returned. + BatchRemoveByBlockID(blockID flow.Identifier, batch ReaderBatchWriter) error } type ServiceEvents interface { // BatchStore stores service events keyed by a blockID in provided batch // No errors are expected during normal operation, even if no entries are matched. - // If Badger unexpectedly fails to process the request, the error is wrapped in a generic error and returned. - BatchStore(blockID flow.Identifier, events []flow.Event, batch BatchStorage) error + // If database unexpectedly fails to process the request, the error is wrapped in a generic error and returned. + BatchStore(blockID flow.Identifier, events []flow.Event, batch ReaderBatchWriter) error // ByBlockID returns the events for the given block ID ByBlockID(blockID flow.Identifier) ([]flow.Event, error) // BatchRemoveByBlockID removes service events keyed by a blockID in provided batch // No errors are expected during normal operation, even if no entries are matched. - // If Badger unexpectedly fails to process the request, the error is wrapped in a generic error and returned. - BatchRemoveByBlockID(blockID flow.Identifier, batch BatchStorage) error + // If database unexpectedly fails to process the request, the error is wrapped in a generic error and returned. + BatchRemoveByBlockID(blockID flow.Identifier, batch ReaderBatchWriter) error } diff --git a/storage/execution_fork_evidence.go b/storage/execution_fork_evidence.go new file mode 100644 index 00000000000..b821021f468 --- /dev/null +++ b/storage/execution_fork_evidence.go @@ -0,0 +1,24 @@ +package storage + +import ( + "github.com/jordanschalm/lockctx" + + "github.com/onflow/flow-go/model/flow" +) + +// ExecutionForkEvidence represents persistent storage for execution fork evidence. +// CAUTION: Not safe for concurrent use by multiple goroutines. +type ExecutionForkEvidence interface { + // StoreIfNotExists stores the given conflictingSeals to the database + // if no execution fork evidence is currently stored in the database. + // This function is a no-op if evidence is already stored, because + // only one execution fork evidence can be stored at a time. + // The caller must hold the [storage.LockInsertExecutionForkEvidence] lock. + // No errors are expected during normal operations. + StoreIfNotExists(lctx lockctx.Proof, conflictingSeals []*flow.IncorporatedResultSeal) error + + // Retrieve reads conflicting seals from the database. + // No error is returned if database record doesn't exist. + // No errors are expected during normal operations. + Retrieve() ([]*flow.IncorporatedResultSeal, error) +} diff --git a/storage/guarantees.go b/storage/guarantees.go index 22804f22808..0d5eaed45a1 100644 --- a/storage/guarantees.go +++ b/storage/guarantees.go @@ -5,11 +5,16 @@ import ( ) // Guarantees represents persistent storage for collection guarantees. +// Must only be used to store finalized collection guarantees. type Guarantees interface { - // Store inserts the collection guarantee. - Store(guarantee *flow.CollectionGuarantee) error + // ByID returns the [flow.CollectionGuarantee] by its ID. + // Expected errors during normal operations: + // - [storage.ErrNotFound] if no collection guarantee with the given Identifier is known. + ByID(guaranteeID flow.Identifier) (*flow.CollectionGuarantee, error) // ByCollectionID retrieves the collection guarantee by collection ID. + // Expected errors during normal operations: + // - [storage.ErrNotFound] if no collection guarantee has been indexed for the given collection ID. ByCollectionID(collID flow.Identifier) (*flow.CollectionGuarantee, error) } diff --git a/storage/headers.go b/storage/headers.go index a5f0aeca64e..045f2e39710 100644 --- a/storage/headers.go +++ b/storage/headers.go @@ -1,5 +1,3 @@ -// (c) 2019 Dapper Labs - ALL RIGHTS RESERVED - package storage import ( @@ -9,28 +7,46 @@ import ( // Headers represents persistent storage for blocks. type Headers interface { - // Store will store a header. - Store(header *flow.Header) error - - // ByBlockID returns the header with the given ID. It is available for finalized and ambiguous blocks. + // ByBlockID returns the header with the given ID. It is available for finalized blocks and those pending finalization. // Error returns: - // - ErrNotFound if no block header with the given ID exists + // - [storage.ErrNotFound] if no block header with the given ID exists ByBlockID(blockID flow.Identifier) (*flow.Header, error) // ByHeight returns the block with the given number. It is only available for finalized blocks. + // Error returns: + // - [storage.ErrNotFound] if no finalized block is known at the given height ByHeight(height uint64) (*flow.Header, error) + // ByView returns the block with the given view. It is only available for certified blocks. + // Certified blocks are the blocks that have received QC. Hotstuff guarantees that for each view, + // at most one block is certified. Hence, the return value of `ByView` is guaranteed to be unique + // even for non-finalized blocks. + // + // Expected errors during normal operations: + // - [storage.ErrNotFound] if no certified block is known at given view. + ByView(view uint64) (*flow.Header, error) + // Exists returns true if a header with the given ID has been stored. // No errors are expected during normal operation. Exists(blockID flow.Identifier) (bool, error) - // BlockIDByHeight the block ID that is finalized at the given height. It is an optimized version - // of `ByHeight` that skips retrieving the block. Expected errors during normal operations: - // * `storage.ErrNotFound` if no finalized block is known at given height + // BlockIDByHeight returns the block ID that is finalized at the given height. It is an optimized + // version of `ByHeight` that skips retrieving the block. Expected errors during normal operations: + // - [storage.ErrNotFound] if no finalized block is known at given height BlockIDByHeight(height uint64) (flow.Identifier, error) // ByParentID finds all children for the given parent block. The returned headers // might be unfinalized; if there is more than one, at least one of them has to // be unfinalized. + // CAUTION: this method is not backed by a cache and therefore comparatively slow! + // + // Expected error returns during normal operations: + // - [storage.ErrNotFound] if no block with the given parentID is known ByParentID(parentID flow.Identifier) ([]*flow.Header, error) + + // ProposalByBlockID returns the header with the given ID, along with the corresponding proposer signature. + // It is available for finalized blocks and those pending finalization. + // Error returns: + // - [storage.ErrNotFound] if no block header or proposer signature with the given blockID exists + ProposalByBlockID(blockID flow.Identifier) (*flow.ProposalHeader, error) } diff --git a/storage/height.go b/storage/height.go new file mode 100644 index 00000000000..b27882d2cc7 --- /dev/null +++ b/storage/height.go @@ -0,0 +1,15 @@ +package storage + +// HeightIndex defines methods for indexing height. +// This interface should normally be composed with some other resource we want to index by height. +type HeightIndex interface { + // LatestHeight returns the latest indexed height. + LatestHeight() (uint64, error) + // FirstHeight at which we started to index. Returns the first indexed height found in the store. + FirstHeight() (uint64, error) + // SetLatestHeight updates the latest height. + // The provided height should either be one higher than the current height or the same to ensure idempotency. + // If the height is not within those bounds it will panic! + // An error might get returned if there are problems with persisting the height. + SetLatestHeight(height uint64) error +} diff --git a/storage/index.go b/storage/index.go index f1f76e8df5b..377cd25e68a 100644 --- a/storage/index.go +++ b/storage/index.go @@ -6,9 +6,8 @@ import ( type Index interface { - // Store stores the index for a block payload. - Store(blockID flow.Identifier, index *flow.Index) error - // ByBlockID retrieves the index for a block payload. + // Error returns: + // - ErrNotFound if no block header with the given ID exists ByBlockID(blockID flow.Identifier) (*flow.Index, error) } diff --git a/storage/inmemory/collections_reader.go b/storage/inmemory/collections_reader.go new file mode 100644 index 00000000000..1efc5bc16d7 --- /dev/null +++ b/storage/inmemory/collections_reader.go @@ -0,0 +1,75 @@ +package inmemory + +import ( + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/storage" +) + +type CollectionsReader struct { + collections map[flow.Identifier]flow.Collection + lightCollections map[flow.Identifier]*flow.LightCollection + transactionIDToLightCollection map[flow.Identifier]*flow.LightCollection +} + +var _ storage.CollectionsReader = (*CollectionsReader)(nil) + +func NewCollections(collections []*flow.Collection) *CollectionsReader { + collectionsMap := make(map[flow.Identifier]flow.Collection) + lightCollections := make(map[flow.Identifier]*flow.LightCollection) + transactionIDToLightCollection := make(map[flow.Identifier]*flow.LightCollection) + + for _, collection := range collections { + light := collection.Light() + collectionID := light.ID() + collectionsMap[collectionID] = *collection + lightCollections[collectionID] = light + for _, txID := range light.Transactions { + transactionIDToLightCollection[txID] = light + } + } + + return &CollectionsReader{ + collections: collectionsMap, + lightCollections: lightCollections, + transactionIDToLightCollection: transactionIDToLightCollection, + } +} + +// ByID returns the collection with the given ID, including all transactions within the collection. +// +// Expected error returns during normal operation: +// - [storage.ErrNotFound] if no light collection was found. +func (c *CollectionsReader) ByID(collID flow.Identifier) (*flow.Collection, error) { + val, ok := c.collections[collID] + if !ok { + return nil, storage.ErrNotFound + } + + return &val, nil +} + +// LightByID returns collection with the given ID. Only retrieves transaction hashes. +// +// Expected error returns during normal operation: +// - [storage.ErrNotFound] if no light collection was found. +func (c *CollectionsReader) LightByID(collID flow.Identifier) (*flow.LightCollection, error) { + val, ok := c.lightCollections[collID] + if !ok { + return nil, storage.ErrNotFound + } + + return val, nil +} + +// LightByTransactionID returns the collection for the given transaction ID. Only retrieves transaction hashes. +// +// Expected error returns during normal operation: +// - [storage.ErrNotFound] if no light collection was found. +func (c *CollectionsReader) LightByTransactionID(txID flow.Identifier) (*flow.LightCollection, error) { + val, ok := c.transactionIDToLightCollection[txID] + if !ok { + return nil, storage.ErrNotFound + } + + return val, nil +} diff --git a/storage/inmemory/collections_reader_test.go b/storage/inmemory/collections_reader_test.go new file mode 100644 index 00000000000..583dcc0eb54 --- /dev/null +++ b/storage/inmemory/collections_reader_test.go @@ -0,0 +1,44 @@ +package inmemory + +import ( + "testing" + + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/storage" + "github.com/onflow/flow-go/utils/unittest" +) + +func TestCollection_HappyCase(t *testing.T) { + collection := unittest.CollectionFixture(3) + lightCollection := collection.Light() + collectionID := collection.ID() + + collections := NewCollections([]*flow.Collection{&collection}) + + // Retrieve collection + retrieved, err := collections.ByID(collectionID) + require.NoError(t, err) + require.Equal(t, &collection, retrieved) + + retrievedLight, err := collections.LightByTransactionID(collection.Transactions[1].ID()) + require.NoError(t, err) + require.Equal(t, *lightCollection, *retrievedLight) + + retrievedLight, err = collections.LightByID(collectionID) + require.NoError(t, err) + require.Equal(t, *lightCollection, *retrievedLight) + + retrieved, err = collections.ByID(unittest.IdentifierFixture()) + require.ErrorIs(t, err, storage.ErrNotFound) + require.Nil(t, retrieved) + + retrievedLight, err = collections.LightByTransactionID(unittest.IdentifierFixture()) + require.ErrorIs(t, err, storage.ErrNotFound) + require.Nil(t, retrievedLight) + + retrievedLight, err = collections.LightByID(unittest.IdentifierFixture()) + require.ErrorIs(t, err, storage.ErrNotFound) + require.Nil(t, retrievedLight) +} diff --git a/storage/inmemory/events_reader.go b/storage/inmemory/events_reader.go new file mode 100644 index 00000000000..9e5a6f15da6 --- /dev/null +++ b/storage/inmemory/events_reader.go @@ -0,0 +1,89 @@ +package inmemory + +import ( + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/storage" +) + +type EventsReader struct { + blockID flow.Identifier + events []flow.Event +} + +var _ storage.EventsReader = (*EventsReader)(nil) + +func NewEvents(blockID flow.Identifier, events []flow.Event) *EventsReader { + return &EventsReader{ + blockID: blockID, + events: events, + } +} + +// ByBlockID returns the events for the given block ID. +// +// Expected error returns during normal operation: +// - [storage.ErrNotFound] if no events were found at given block. +func (e *EventsReader) ByBlockID(blockID flow.Identifier) ([]flow.Event, error) { + if e.blockID != blockID { + return nil, storage.ErrNotFound + } + + return e.events, nil +} + +// ByBlockIDTransactionID returns the events for the given block ID and transaction ID. +// +// Expected error returns during normal operation: +// - [storage.ErrNotFound] if no events were found at given block and transaction. +func (e *EventsReader) ByBlockIDTransactionID(blockID flow.Identifier, txID flow.Identifier) ([]flow.Event, error) { + if e.blockID != blockID { + return nil, storage.ErrNotFound + } + + var matched []flow.Event + for _, event := range e.events { + if event.TransactionID == txID { + matched = append(matched, event) + } + } + + return matched, nil +} + +// ByBlockIDTransactionIndex returns the events for the transaction at given index in a given block +// +// Expected error returns during normal operation: +// - [storage.ErrNotFound] if no events were found at given block and transaction. +func (e *EventsReader) ByBlockIDTransactionIndex(blockID flow.Identifier, txIndex uint32) ([]flow.Event, error) { + if e.blockID != blockID { + return nil, storage.ErrNotFound + } + + var matched []flow.Event + for _, event := range e.events { + if event.TransactionIndex == txIndex { + matched = append(matched, event) + } + } + + return matched, nil +} + +// ByBlockIDEventType returns the events for the given block ID and event type. +// +// Expected error returns during normal operation: +// - [storage.ErrNotFound] if no events were found at given block. +func (e *EventsReader) ByBlockIDEventType(blockID flow.Identifier, eventType flow.EventType) ([]flow.Event, error) { + if e.blockID != blockID { + return nil, storage.ErrNotFound + } + + var matched []flow.Event + for _, event := range e.events { + if event.Type == eventType { + matched = append(matched, event) + } + } + + return matched, nil +} diff --git a/storage/inmemory/events_reader_test.go b/storage/inmemory/events_reader_test.go new file mode 100644 index 00000000000..cd349b6bdcb --- /dev/null +++ b/storage/inmemory/events_reader_test.go @@ -0,0 +1,70 @@ +package inmemory + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/utils/unittest" +) + +func TestEvents_HappyPath(t *testing.T) { + // Define test block and transaction + blockID := unittest.IdentifierFixture() + transaction1 := unittest.TransactionFixture() + transaction2 := unittest.TransactionFixture() + + event1 := unittest.EventFixture( + unittest.Event.WithEventType(flow.EventAccountCreated), + unittest.Event.WithTransactionIndex(0), + unittest.Event.WithEventIndex(0), + unittest.Event.WithTransactionID(transaction1.ID()), + ) + event2 := unittest.EventFixture( + unittest.Event.WithEventType(flow.EventAccountUpdated), + unittest.Event.WithTransactionIndex(0), + unittest.Event.WithEventIndex(1), + unittest.Event.WithTransactionID(transaction1.ID()), + ) + event3 := unittest.EventFixture( + unittest.Event.WithEventType(flow.EventAccountCreated), + unittest.Event.WithTransactionIndex(1), + unittest.Event.WithEventIndex(2), + unittest.Event.WithTransactionID(transaction2.ID()), + ) + + expectedStoredEvents := []flow.Event{event1, event2, event3} + + // Store events + eventsStore := NewEvents(blockID, expectedStoredEvents) + + // Retrieve events by block ID + storedEvents, err := eventsStore.ByBlockID(blockID) + require.NoError(t, err) + assert.Len(t, storedEvents, len(expectedStoredEvents)) + assert.Contains(t, storedEvents, event1) + assert.Contains(t, storedEvents, event2) + assert.Contains(t, storedEvents, event3) + + // Retrieve events by transaction ID + txEvents, err := eventsStore.ByBlockIDTransactionID(blockID, transaction1.ID()) + require.NoError(t, err) + assert.Len(t, txEvents, 2) + assert.Equal(t, event1, txEvents[0]) + assert.Equal(t, event2, txEvents[1]) + + // Retrieve events by transaction index + indexEvents, err := eventsStore.ByBlockIDTransactionIndex(blockID, 1) + require.NoError(t, err) + assert.Len(t, indexEvents, 1) + assert.Equal(t, event3, indexEvents[0]) + + // Retrieve events by event type + typeEvents, err := eventsStore.ByBlockIDEventType(blockID, flow.EventAccountCreated) + require.NoError(t, err) + assert.Len(t, typeEvents, 2) + assert.Contains(t, typeEvents, event1) + assert.Contains(t, typeEvents, event3) +} diff --git a/storage/inmemory/light_transaction_results_reader.go b/storage/inmemory/light_transaction_results_reader.go new file mode 100644 index 00000000000..5cb0cc89f5e --- /dev/null +++ b/storage/inmemory/light_transaction_results_reader.go @@ -0,0 +1,78 @@ +package inmemory + +import ( + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/storage" +) + +type LightTransactionResultsReader struct { + blockID flow.Identifier + results []flow.LightTransactionResult + byTxID map[flow.Identifier]*flow.LightTransactionResult + byTxIndex map[uint32]*flow.LightTransactionResult +} + +var _ storage.LightTransactionResultsReader = (*LightTransactionResultsReader)(nil) + +func NewLightTransactionResults(blockID flow.Identifier, results []flow.LightTransactionResult) *LightTransactionResultsReader { + byTxID := make(map[flow.Identifier]*flow.LightTransactionResult) + byTxIndex := make(map[uint32]*flow.LightTransactionResult) + + for i, result := range results { + byTxID[result.TransactionID] = &result + byTxIndex[uint32(i)] = &result + } + + return &LightTransactionResultsReader{ + blockID: blockID, + results: results, + byTxID: byTxID, + byTxIndex: byTxIndex, + } +} + +// ByBlockIDTransactionID returns the transaction result for the given block ID and transaction +// +// Expected error returns during normal operation: +// - [storage.ErrNotFound] if light transaction result at given blockID wasn't found. +func (l *LightTransactionResultsReader) ByBlockIDTransactionID(blockID flow.Identifier, transactionID flow.Identifier) (*flow.LightTransactionResult, error) { + if l.blockID != blockID { + return nil, storage.ErrNotFound + } + + val, ok := l.byTxID[transactionID] + if !ok { + return nil, storage.ErrNotFound + } + + return val, nil +} + +// ByBlockIDTransactionIndex returns the transaction result for the given blockID and transaction index +// +// Expected error returns during normal operation: +// - [storage.ErrNotFound] if light transaction result at given blockID and txIndex wasn't found. +func (l *LightTransactionResultsReader) ByBlockIDTransactionIndex(blockID flow.Identifier, txIndex uint32) (*flow.LightTransactionResult, error) { + if l.blockID != blockID { + return nil, storage.ErrNotFound + } + + val, ok := l.byTxIndex[txIndex] + if !ok { + return nil, storage.ErrNotFound + } + + return val, nil +} + +// ByBlockID gets all transaction results for a block, ordered by transaction index +// +// Expected error returns during normal operation: +// - [storage.ErrNotFound] if light transaction results at given blockID weren't found. +func (l *LightTransactionResultsReader) ByBlockID(id flow.Identifier) ([]flow.LightTransactionResult, error) { + if l.blockID != id { + return nil, storage.ErrNotFound + } + + return l.results, nil +} diff --git a/storage/inmemory/light_transaction_results_reader_test.go b/storage/inmemory/light_transaction_results_reader_test.go new file mode 100644 index 00000000000..adf9f6dbd86 --- /dev/null +++ b/storage/inmemory/light_transaction_results_reader_test.go @@ -0,0 +1,35 @@ +package inmemory + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/utils/unittest" +) + +func TestLightTransactionResults_HappyPath(t *testing.T) { + + // Define block ID and transaction results + blockID := unittest.IdentifierFixture() + txResults := unittest.LightTransactionResultsFixture(10) + + ltx := NewLightTransactionResults(blockID, txResults) + + // Retrieve by BlockID and TransactionID + retrievedTx, err := ltx.ByBlockIDTransactionID(blockID, txResults[0].TransactionID) + require.NoError(t, err) + assert.Equal(t, &txResults[0], retrievedTx) + + // Retrieve by BlockID and Index + retrievedTxByIndex, err := ltx.ByBlockIDTransactionIndex(blockID, 0) + require.NoError(t, err) + assert.Equal(t, &txResults[0], retrievedTxByIndex) + + // Retrieve by BlockID + retrievedTxs, err := ltx.ByBlockID(blockID) + require.NoError(t, err) + assert.Len(t, retrievedTxs, len(txResults)) + assert.Equal(t, txResults, retrievedTxs) +} diff --git a/storage/inmemory/registers_reader.go b/storage/inmemory/registers_reader.go new file mode 100644 index 00000000000..2445f0df4f3 --- /dev/null +++ b/storage/inmemory/registers_reader.go @@ -0,0 +1,54 @@ +package inmemory + +import ( + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/storage" +) + +// RegistersReader is a simple in-memory implementation of the RegisterIndexReader interface. +// It stores registers for a single block height. +type RegistersReader struct { + blockHeight uint64 + store map[flow.RegisterID]flow.RegisterEntry +} + +var _ storage.RegisterIndexReader = (*RegistersReader)(nil) + +func NewRegisters(blockHeight uint64, registers []flow.RegisterEntry) *RegistersReader { + store := make(map[flow.RegisterID]flow.RegisterEntry) + for _, reg := range registers { + store[reg.Key] = reg + } + return &RegistersReader{ + blockHeight: blockHeight, + store: store, + } +} + +// Get returns a register by the register ID at a storage's block height. +// +// Expected error returns during normal operation: +// - [storage.ErrNotFound] if the register does not exist in this storage object +// - [storage.ErrHeightNotIndexed] if the given height does not match the storage's block height +func (r *RegistersReader) Get(registerID flow.RegisterID, height uint64) (flow.RegisterValue, error) { + if r.blockHeight != height { + return flow.RegisterValue{}, storage.ErrHeightNotIndexed + } + + reg, ok := r.store[registerID] + if !ok { + return flow.RegisterValue{}, storage.ErrNotFound + } + + return reg.Value, nil +} + +// LatestHeight returns the latest indexed height. +func (r *RegistersReader) LatestHeight() uint64 { + return r.blockHeight +} + +// FirstHeight returns the first indexed height found in the store. +func (r *RegistersReader) FirstHeight() uint64 { + return r.blockHeight +} diff --git a/storage/inmemory/registers_reader_test.go b/storage/inmemory/registers_reader_test.go new file mode 100644 index 00000000000..63dee4e6650 --- /dev/null +++ b/storage/inmemory/registers_reader_test.go @@ -0,0 +1,45 @@ +package inmemory + +import ( + "testing" + + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/storage" + "github.com/onflow/flow-go/utils/unittest" +) + +func TestRegisters_HappyPath(t *testing.T) { + // Prepare register entries + entry1 := unittest.RegisterEntryFixture() + entry1.Key = flow.RegisterID{Owner: "owner1", Key: "key1"} + + entry2 := unittest.RegisterEntryFixture() + entry2.Key = flow.RegisterID{Owner: "owner2", Key: "key2"} + + entries := flow.RegisterEntries{entry1, entry2} + + height := uint64(42) + registers := NewRegisters(height, entries) + + require.Equal(t, height, registers.FirstHeight()) + require.Equal(t, height, registers.LatestHeight()) + + // Retrieve both entries + got1, err := registers.Get(entry1.Key, height) + require.NoError(t, err) + require.Equal(t, entry1.Value, got1) + + got2, err := registers.Get(entry2.Key, height) + require.NoError(t, err) + require.Equal(t, entry2.Value, got2) + + // Try retrieving at the wrong height + _, err = registers.Get(entry1.Key, height+1) + require.ErrorIs(t, err, storage.ErrHeightNotIndexed) + + // Try getting a non-existent key + _, err = registers.Get(unittest.RegisterIDFixture(), height) + require.ErrorIs(t, err, storage.ErrNotFound) +} diff --git a/storage/inmemory/transaction_result_error_messages_reader.go b/storage/inmemory/transaction_result_error_messages_reader.go new file mode 100644 index 00000000000..33accf3d4dc --- /dev/null +++ b/storage/inmemory/transaction_result_error_messages_reader.go @@ -0,0 +1,103 @@ +package inmemory + +import ( + "errors" + + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/storage" +) + +type TransactionResultErrorMessagesReader struct { + blockID flow.Identifier + errMessages []flow.TransactionResultErrorMessage + byTxID map[flow.Identifier]*flow.TransactionResultErrorMessage + byTxIndex map[uint32]*flow.TransactionResultErrorMessage +} + +var _ storage.TransactionResultErrorMessagesReader = (*TransactionResultErrorMessagesReader)(nil) + +func NewTransactionResultErrorMessages(blockID flow.Identifier, errMessages []flow.TransactionResultErrorMessage) *TransactionResultErrorMessagesReader { + byTxID := make(map[flow.Identifier]*flow.TransactionResultErrorMessage) + byTxIndex := make(map[uint32]*flow.TransactionResultErrorMessage) + + for i, errMessage := range errMessages { + byTxID[errMessage.TransactionID] = &errMessage + byTxIndex[uint32(i)] = &errMessage + } + + return &TransactionResultErrorMessagesReader{ + blockID: blockID, + errMessages: errMessages, + byTxID: byTxID, + byTxIndex: byTxIndex, + } +} + +// Exists returns true if transaction result error messages for the given ID have been stored. +// +// No error returns are expected during normal operation. +func (t *TransactionResultErrorMessagesReader) Exists(blockID flow.Identifier) (bool, error) { + _, err := t.ByBlockID(blockID) + if err != nil { + if errors.Is(err, storage.ErrNotFound) { + return false, nil + } + + return false, err + } + + return true, nil +} + +// ByBlockIDTransactionID returns the transaction result error message for the given block ID and transaction ID. +// +// Expected error returns during normal operation: +// - [storage.ErrNotFound] if no transaction error message is known at given block and transaction id. +func (t *TransactionResultErrorMessagesReader) ByBlockIDTransactionID( + blockID flow.Identifier, + transactionID flow.Identifier, +) (*flow.TransactionResultErrorMessage, error) { + if t.blockID != blockID { + return nil, storage.ErrNotFound + } + + val, ok := t.byTxID[transactionID] + if !ok { + return nil, storage.ErrNotFound + } + + return val, nil +} + +// ByBlockIDTransactionIndex returns the transaction result error message for the given blockID and transaction index. +// +// Expected error returns during normal operation: +// - [storage.ErrNotFound] if no transaction error message is known at given block and transaction index. +func (t *TransactionResultErrorMessagesReader) ByBlockIDTransactionIndex( + blockID flow.Identifier, + txIndex uint32, +) (*flow.TransactionResultErrorMessage, error) { + if t.blockID != blockID { + return nil, storage.ErrNotFound + } + + val, ok := t.byTxIndex[txIndex] + if !ok { + return nil, storage.ErrNotFound + } + + return val, nil +} + +// ByBlockID gets all transaction result error messages for a block, ordered by transaction index. +// Note: This method will return an empty slice both if the block is not indexed yet and if the block does not have any errors. +// +// Expected error returns during normal operation: +// - [storage.ErrNotFound] if no block was found. +func (t *TransactionResultErrorMessagesReader) ByBlockID(id flow.Identifier) ([]flow.TransactionResultErrorMessage, error) { + if t.blockID != id { + return nil, storage.ErrNotFound + } + + return t.errMessages, nil +} diff --git a/storage/inmemory/transaction_result_error_messages_reader_test.go b/storage/inmemory/transaction_result_error_messages_reader_test.go new file mode 100644 index 00000000000..2bd69a3fd38 --- /dev/null +++ b/storage/inmemory/transaction_result_error_messages_reader_test.go @@ -0,0 +1,50 @@ +package inmemory + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/utils/unittest" +) + +func TestLightTransactionResultErrorMessages_HappyPath(t *testing.T) { + + // Define block ID and error messages + blockID := unittest.IdentifierFixture() + txResults := unittest.TransactionResultsFixture(2) + errorMessages := []flow.TransactionResultErrorMessage{ + { + TransactionID: txResults[0].TransactionID, + Index: 0, + ErrorMessage: "dummy error message 0", + ExecutorID: unittest.IdentifierFixture(), + }, + { + TransactionID: txResults[1].TransactionID, + Index: 1, + ErrorMessage: "dummy error message 1", + ExecutorID: unittest.IdentifierFixture(), + }, + } + + storage := NewTransactionResultErrorMessages(blockID, errorMessages) + + // Retrieve by BlockID and TransactionID + retrievedErrorMessage, err := storage.ByBlockIDTransactionID(blockID, errorMessages[0].TransactionID) + require.NoError(t, err) + assert.Equal(t, &errorMessages[0], retrievedErrorMessage) + + // Retrieve by BlockID and Index + retrievedErrorMessageByIndex, err := storage.ByBlockIDTransactionIndex(blockID, 0) + require.NoError(t, err) + assert.Equal(t, &errorMessages[0], retrievedErrorMessageByIndex) + + // Retrieve by BlockID + retrievedErrorMessages, err := storage.ByBlockID(blockID) + require.NoError(t, err) + assert.Len(t, retrievedErrorMessages, len(errorMessages)) + assert.Equal(t, errorMessages, retrievedErrorMessages) +} diff --git a/storage/inmemory/transactions_reader.go b/storage/inmemory/transactions_reader.go new file mode 100644 index 00000000000..26bd647492f --- /dev/null +++ b/storage/inmemory/transactions_reader.go @@ -0,0 +1,36 @@ +package inmemory + +import ( + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/storage" +) + +type TransactionsReader struct { + transactions map[flow.Identifier]flow.TransactionBody +} + +var _ storage.TransactionsReader = (*TransactionsReader)(nil) + +func NewTransactions(transactions []*flow.TransactionBody) *TransactionsReader { + transactionsMap := make(map[flow.Identifier]flow.TransactionBody) + for _, transaction := range transactions { + transactionsMap[transaction.ID()] = *transaction + } + + return &TransactionsReader{ + transactions: transactionsMap, + } +} + +// ByID returns the transaction for the given fingerprint. +// +// Expected error returns during normal operation: +// - [storage.ErrNotFound] if no transaction with provided ID was found. +func (t *TransactionsReader) ByID(txID flow.Identifier) (*flow.TransactionBody, error) { + val, ok := t.transactions[txID] + if !ok { + return nil, storage.ErrNotFound + } + + return &val, nil +} diff --git a/storage/inmemory/transactions_reader_test.go b/storage/inmemory/transactions_reader_test.go new file mode 100644 index 00000000000..5d14c95f6dc --- /dev/null +++ b/storage/inmemory/transactions_reader_test.go @@ -0,0 +1,23 @@ +package inmemory + +import ( + "testing" + + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/utils/unittest" +) + +func TestTransactions_HappyPath(t *testing.T) { + tx := unittest.TransactionBodyFixture() + txStore := NewTransactions([]*flow.TransactionBody{&tx}) + + // Retrieve the transaction by ID + retrievedTx, err := txStore.ByID(tx.ID()) + require.NoError(t, err, "retrieving stored transaction should not return an error") + require.NotNil(t, retrievedTx, "retrieved transaction should not be nil") + + // Ensure the retrieved transaction matches the stored one + require.Equal(t, &tx, retrievedTx, "retrieved transaction should match the stored transaction") +} diff --git a/storage/latest_persisted_sealed_result.go b/storage/latest_persisted_sealed_result.go new file mode 100644 index 00000000000..5a49040cf31 --- /dev/null +++ b/storage/latest_persisted_sealed_result.go @@ -0,0 +1,17 @@ +package storage + +import "github.com/onflow/flow-go/model/flow" + +// LatestPersistedSealedResult tracks the most recently persisted sealed execution result processed +// by the Access ingestion engine. +type LatestPersistedSealedResult interface { + // Latest returns the ID and height of the latest persisted sealed result. + Latest() (flow.Identifier, uint64) + + // BatchSet updates the latest persisted sealed result in a batch operation + // The resultID and height are added to the provided batch, and the local data is updated only after + // the batch is successfully committed. + // + // No errors are expected during normal operation, + BatchSet(resultID flow.Identifier, height uint64, batch ReaderBatchWriter) error +} diff --git a/storage/ledger.go b/storage/ledger.go old mode 100755 new mode 100644 diff --git a/storage/light_transaction_results.go b/storage/light_transaction_results.go new file mode 100644 index 00000000000..e2109d8e450 --- /dev/null +++ b/storage/light_transaction_results.go @@ -0,0 +1,35 @@ +package storage + +import "github.com/onflow/flow-go/model/flow" + +// LightTransactionResultsReader represents persistent storage read operations for light transaction result +type LightTransactionResultsReader interface { + // ByBlockIDTransactionID returns the transaction result for the given block ID and transaction ID + // + // Expected errors during normal operation: + // - `storage.ErrNotFound` if light transaction result at given blockID wasn't found. + ByBlockIDTransactionID(blockID flow.Identifier, transactionID flow.Identifier) (*flow.LightTransactionResult, error) + + // ByBlockIDTransactionIndex returns the transaction result for the given blockID and transaction index + // + // Expected errors during normal operation: + // - `storage.ErrNotFound` if light transaction result at given blockID and txIndex wasn't found. + ByBlockIDTransactionIndex(blockID flow.Identifier, txIndex uint32) (*flow.LightTransactionResult, error) + + // ByBlockID gets all transaction results for a block, ordered by transaction index + // + // Expected errors during normal operation: + // - `storage.ErrNotFound` if light transaction results at given blockID weren't found. + ByBlockID(id flow.Identifier) ([]flow.LightTransactionResult, error) +} + +// LightTransactionResults represents persistent storage for light transaction result +type LightTransactionResults interface { + LightTransactionResultsReader + + // BatchStore inserts a batch of transaction result into a batch + BatchStore(blockID flow.Identifier, transactionResults []flow.LightTransactionResult, rw ReaderBatchWriter) error + + // Deprecated: deprecated as a part of transition from Badger to Pebble. use BatchStore instead + BatchStoreBadger(blockID flow.Identifier, transactionResults []flow.LightTransactionResult, batch BatchStorage) error +} diff --git a/storage/locks.go b/storage/locks.go new file mode 100644 index 00000000000..cedeff6c65f --- /dev/null +++ b/storage/locks.go @@ -0,0 +1,167 @@ +package storage + +import ( + "fmt" + "sync" + + "github.com/jordanschalm/lockctx" +) + +// This file enumerates all named locks used by the storage layer. + +const ( + // LockInsertBlock protects the entire block insertion process (`ParticipantState.Extend` or `FollowerState.ExtendCertified`) + LockInsertBlock = "lock_insert_block" + // LockFinalizeBlock protects the entire block finalization process (`FollowerState.Finalize`) + LockFinalizeBlock = "lock_finalize_block" + // LockIndexResultApproval protects indexing result approvals by approval and chunk. + LockIndexResultApproval = "lock_index_result_approval" + // LockInsertOrFinalizeClusterBlock protects the entire cluster block insertion or finalization process. + // The reason they are combined is because insertion process reads some data updated by finalization process, + // in order to prevent dirty reads, we need to acquire the lock for both operations. + LockInsertOrFinalizeClusterBlock = "lock_insert_or_finalize_cluster_block" + // LockInsertOwnReceipt is intended for Execution Nodes to ensure that they never publish different receipts for the same block. + // Specifically, with this lock we prevent accidental overwrites of the index `executed block ID` ➜ `Receipt ID`. + LockInsertOwnReceipt = "lock_insert_own_receipt" + // LockInsertCollection protects the insertion of collections. + LockInsertCollection = "lock_insert_collection" + // LockBootstrapping protects data that is *exclusively* written during bootstrapping. + LockBootstrapping = "lock_bootstrapping" + // LockInsertChunkDataPack protects the insertion of chunk data packs (not yet used anywhere + LockInsertChunkDataPack = "lock_insert_chunk_data_pack" + // LockInsertExecutionForkEvidence protects the insertion of execution fork evidence + LockInsertExecutionForkEvidence = "lock_insert_execution_fork_evidence" + LockInsertSafetyData = "lock_insert_safety_data" + LockInsertLivenessData = "lock_insert_liveness_data" +) + +// Locks returns a list of all named locks used by the storage layer. +func Locks() []string { + return []string{ + LockInsertBlock, + LockFinalizeBlock, + LockIndexResultApproval, + LockInsertOrFinalizeClusterBlock, + LockInsertOwnReceipt, + LockInsertCollection, + LockBootstrapping, + LockInsertChunkDataPack, + LockInsertExecutionForkEvidence, + LockInsertSafetyData, + LockInsertLivenessData, + } +} + +type LockManager = lockctx.Manager + +// makeLockPolicy constructs the policy used by the storage layer to prevent deadlocks. +// We use a policy defined by a directed acyclic graph, where vertices represent named locks. +// A directed edge between two vertices A, B means: I can acquire B next after acquiring A. +// When no edges are added, each lock context may acquire at most one lock. +// +// For example, the bootstrapping logic both inserts and finalizes block. So it needs to +// acquire both LockInsertBlock and LockFinalizeBlock. To allow this, we add the directed +// edge LockInsertBlock -> LockFinalizeBlock with `Add(LockInsertBlock, LockFinalizeBlock)`. +// This means: +// - a context can acquire either LockInsertBlock or LockFinalizeBlock first (always true) +// - a context holding LockInsertBlock can acquire LockFinalizeBlock next (allowed by the edge) +// - a context holding LockFinalizeBlock cannot acquire LockInsertBlock next (disallowed, because the edge is directed) +// +// This function will panic if a policy is created which does not prevent deadlocks. +func makeLockPolicy() lockctx.Policy { + return lockctx.NewDAGPolicyBuilder(). + Add(LockInsertBlock, LockFinalizeBlock). + Add(LockFinalizeBlock, LockBootstrapping). + Add(LockBootstrapping, LockInsertSafetyData). + Add(LockInsertSafetyData, LockInsertLivenessData). + Add(LockInsertOrFinalizeClusterBlock, LockInsertSafetyData). + Add(LockInsertOwnReceipt, LockInsertChunkDataPack). + Build() +} + +var makeLockManagerOnce sync.Once + +// MakeSingletonLockManager returns the lock manager used by the storage layer. +// This function must be used for production builds and must be called exactly once process-wide. +// +// The Lock Manager is a core component enforcing atomicity of various storage operations across different +// components. Therefore, the lock manager is a singleton instance, as the storage layer's atomicity and +// consistency depends on the same set of locks being used everywhere. +// By convention, the lock manager singleton is injected into the node's components during their +// initialization, following the same dependency-injection pattern as other components that are conceptually +// singletons (e.g. the storage layer abstractions). Thereby, we explicitly codify in the constructor that a +// component uses the lock manager. We think it is helpful to emphasize that the component at times +// will acquire _exclusive access_ to all key-value pairs in the database whose keys start with some specific +// prefixes (see `storage/badger/operation/prefix.go` for an exhaustive list of prefixes). +// In comparison, the alternative pattern (which we do not use) of retrieving a singleton instance via a +// global variable would hide which components required exclusive storage access, and in addition, it would +// break with our broadly established dependency-injection pattern. To enforce best practices, this function +// will panic if it is called more than once. +// +// CAUTION: +// - The lock manager only guarantees atomicity of reads and writes for the thread holding the lock. +// Other threads can continue to read (possibly stale) values, while the lock is held by a different thread. +// - Furthermore, the writer must bundle all their writes into a _single_ Write Batch for atomicity. Even +// when holding the lock, reading threads can still observe the writes of one batch while not observing +// the writes of a second batch, despite the thread writing both batches while holding the lock. It was +// a deliberate choice for the sake of performance to allow reads without any locking - so instead of +// waiting for the newest value in case a write is currently ongoing, the reader will just retrieve the +// previous value. This aligns with our architecture of the node operating as an eventually-consistent +// system, which favors loose coupling and high throughput for different components within a node. +func MakeSingletonLockManager() lockctx.Manager { + var manager lockctx.Manager + makeLockManagerOnce.Do(func() { + manager = lockctx.NewManager(Locks(), makeLockPolicy()) + }) + if manager == nil { + panic("critical sanity check failed: MakeSingletonLockManager invoked more than once") + } + return manager +} + +// NewTestingLockManager returns the lock manager used by the storage layer. +// This function must be used for testing only but NOT for PRODUCTION builds. +// Unlike MakeSingletonLockManager, this function may be called multiple times. +func NewTestingLockManager() lockctx.Manager { + return lockctx.NewManager(Locks(), makeLockPolicy()) +} + +// HeldOneLock checks that exactly one of the two specified locks is held in the provided lock context. +func HeldOneLock(lctx lockctx.Proof, lockA string, lockB string) (bool, string) { + heldLockA := lctx.HoldsLock(lockA) + heldLockB := lctx.HoldsLock(lockB) + if heldLockA { + if heldLockB { + return false, fmt.Sprintf("epxect to hold only one lock, but actually held both locks: %s and %s", lockA, lockB) + } else { + return true, "" + } + } else { + if heldLockB { + return true, "" + } else { + return false, fmt.Sprintf("expect to hold one of the locks: %s or %s, but actually held none", lockA, lockB) + } + } +} + +// WithLock is a helper function that creates a new lock context, acquires the specified lock, +// and executes the provided function within that context. +// This function passes through any errors returned by fn. +func WithLock(manager lockctx.Manager, lockID string, fn func(lctx lockctx.Context) error) error { + return WithLocks(manager, []string{lockID}, fn) +} + +// WithLocks is a helper function that creates a new lock context, acquires the specified locks, +// and executes the provided function within that context. +// This function passes through any errors returned by fn. +func WithLocks(manager lockctx.Manager, lockIDs []string, fn func(lctx lockctx.Context) error) error { + lctx := manager.NewContext() + defer lctx.Release() + for _, lockID := range lockIDs { + if err := lctx.AcquireLock(lockID); err != nil { + return err + } + } + return fn(lctx) +} diff --git a/storage/locks_test.go b/storage/locks_test.go new file mode 100644 index 00000000000..ee1907a61ea --- /dev/null +++ b/storage/locks_test.go @@ -0,0 +1,128 @@ +package storage + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestHeldOneLock(t *testing.T) { + lockManager := NewTestingLockManager() + + t.Run("holds only lockA", func(t *testing.T) { + lctx := lockManager.NewContext() + defer lctx.Release() + err := lctx.AcquireLock(LockInsertBlock) + require.NoError(t, err) + + held, msg := HeldOneLock(lctx, LockInsertBlock, LockFinalizeBlock) + assert.True(t, held) + assert.Empty(t, msg) + }) + + t.Run("holds only lockB", func(t *testing.T) { + lctx := lockManager.NewContext() + defer lctx.Release() + err := lctx.AcquireLock(LockFinalizeBlock) + require.NoError(t, err) + + held, msg := HeldOneLock(lctx, LockInsertBlock, LockFinalizeBlock) + assert.True(t, held) + assert.Empty(t, msg) + }) + + t.Run("holds both locks", func(t *testing.T) { + lctx := lockManager.NewContext() + defer lctx.Release() + err := lctx.AcquireLock(LockInsertBlock) + require.NoError(t, err) + err = lctx.AcquireLock(LockFinalizeBlock) + require.NoError(t, err) + + held, msg := HeldOneLock(lctx, LockInsertBlock, LockFinalizeBlock) + assert.False(t, held) + assert.Contains(t, msg, "epxect to hold only one lock, but actually held both locks") + assert.Contains(t, msg, LockInsertBlock) + assert.Contains(t, msg, LockFinalizeBlock) + }) + + t.Run("holds neither lock", func(t *testing.T) { + // Create a context that doesn't hold any locks + lctx := lockManager.NewContext() + defer lctx.Release() + + held, msg := HeldOneLock(lctx, LockInsertBlock, LockFinalizeBlock) + assert.False(t, held) + assert.Contains(t, msg, "expect to hold one of the locks") + assert.Contains(t, msg, LockInsertBlock) + assert.Contains(t, msg, LockFinalizeBlock) + }) + + t.Run("holds different lock", func(t *testing.T) { + lctx := lockManager.NewContext() + defer lctx.Release() + err := lctx.AcquireLock(LockBootstrapping) + require.NoError(t, err) + + held, msg := HeldOneLock(lctx, LockInsertBlock, LockFinalizeBlock) + assert.False(t, held) + assert.Contains(t, msg, "expect to hold one of the locks") + assert.Contains(t, msg, LockInsertBlock) + assert.Contains(t, msg, LockFinalizeBlock) + }) + + t.Run("with different lock combinations", func(t *testing.T) { + lctx := lockManager.NewContext() + defer lctx.Release() + err := lctx.AcquireLock(LockInsertOwnReceipt) + require.NoError(t, err) + + held, msg := HeldOneLock(lctx, LockInsertOwnReceipt, LockInsertCollection) + assert.True(t, held) + assert.Empty(t, msg) + }) + + t.Run("with cluster block locks", func(t *testing.T) { + lctx := lockManager.NewContext() + defer lctx.Release() + err := lctx.AcquireLock(LockInsertOrFinalizeClusterBlock) + require.NoError(t, err) + + held, msg := HeldOneLock(lctx, LockInsertOrFinalizeClusterBlock, LockIndexResultApproval) + assert.True(t, held) + assert.Empty(t, msg) + }) + + t.Run("error message format for both locks", func(t *testing.T) { + lctx := lockManager.NewContext() + defer lctx.Release() + err := lctx.AcquireLock(LockInsertBlock) + require.NoError(t, err) + err = lctx.AcquireLock(LockFinalizeBlock) + require.NoError(t, err) + + // Check that both locks are actually held + assert.True(t, lctx.HoldsLock(LockInsertBlock)) + assert.True(t, lctx.HoldsLock(LockFinalizeBlock)) + + held, msg := HeldOneLock(lctx, LockInsertBlock, LockFinalizeBlock) + assert.False(t, held) + require.NotEmpty(t, msg) + assert.Contains(t, msg, "epxect to hold only one lock, but actually held both locks") + assert.Contains(t, msg, LockInsertBlock) + assert.Contains(t, msg, LockFinalizeBlock) + }) + + t.Run("error message format for no locks", func(t *testing.T) { + lctx := lockManager.NewContext() + defer lctx.Release() + + held, msg := HeldOneLock(lctx, "lockA", "lockB") + assert.False(t, held) + require.NotEmpty(t, msg) + assert.Contains(t, msg, "expect to hold one of the locks") + assert.Contains(t, msg, "lockA") + assert.Contains(t, msg, "lockB") + }) +} diff --git a/storage/merkle/node.go b/storage/merkle/node.go index edea5410c8e..f5e7d8c7ae1 100644 --- a/storage/merkle/node.go +++ b/storage/merkle/node.go @@ -1,5 +1,3 @@ -// (c) 2019 Dapper Labs - ALL RIGHTS RESERVED - package merkle import ( diff --git a/storage/merkle/proof_test.go b/storage/merkle/proof_test.go index 44e93a90bef..826b61b6ed8 100644 --- a/storage/merkle/proof_test.go +++ b/storage/merkle/proof_test.go @@ -3,7 +3,6 @@ package merkle import ( "math/rand" "testing" - "time" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -142,7 +141,7 @@ func TestValidateFormat(t *testing.T) { // when trie includes many random keys. (only a random subset of keys are checked for proofs) func TestProofsWithRandomKeys(t *testing.T) { // initialize random generator, two trees and zero hash - rand.Seed(time.Now().UnixNano()) + keyLength := 32 numberOfInsertions := 10000 numberOfProofsToVerify := 100 diff --git a/storage/merkle/tree.go b/storage/merkle/tree.go index 4470422999f..f50c7f5686a 100644 --- a/storage/merkle/tree.go +++ b/storage/merkle/tree.go @@ -1,5 +1,3 @@ -// (c) 2019 Dapper Labs - ALL RIGHTS RESERVED - package merkle import ( @@ -21,6 +19,7 @@ import ( // Therefore, the range of valid key length in bytes is [1, 8191] (the corresponding // range in bits is [8, 65528]) . const maxKeyLength = 8191 + const maxKeyLenBits = maxKeyLength * 8 var EmptyTreeRootHash []byte diff --git a/storage/merkle/tree_test.go b/storage/merkle/tree_test.go index b20ee26d7e5..8d0a601c6c0 100644 --- a/storage/merkle/tree_test.go +++ b/storage/merkle/tree_test.go @@ -1,13 +1,11 @@ -// (c) 2019 Dapper Labs - ALL RIGHTS RESERVED - package merkle import ( + crand "crypto/rand" "encoding/hex" "fmt" - "math/rand" + mrand "math/rand" "testing" - "time" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -64,7 +62,9 @@ func TestEmptyTreeHash(t *testing.T) { // generate random key-value pair key := make([]byte, keyLength) - rand.Read(key) + _, err := crand.Read(key) + require.NoError(t, err) + val := []byte{1} // add key-value pair: hash should be non-empty @@ -239,7 +239,7 @@ func Test_KeyLengthChecked(t *testing.T) { // of a _single_ key-value pair to an otherwise empty tree. func TestTreeSingle(t *testing.T) { // initialize the random generator, tree and zero hash - rand.Seed(time.Now().UnixNano()) + keyLength := 32 tree, err := NewTree(keyLength) assert.NoError(t, err) @@ -275,7 +275,7 @@ func TestTreeSingle(t *testing.T) { // Key-value pairs are added and deleted in the same order. func TestTreeBatch(t *testing.T) { // initialize random generator, tree, zero hash - rand.Seed(time.Now().UnixNano()) + keyLength := 32 tree, err := NewTree(keyLength) assert.NoError(t, err) @@ -321,7 +321,7 @@ func TestTreeBatch(t *testing.T) { // in which the elements were added. func TestRandomOrder(t *testing.T) { // initialize random generator, two trees and zero hash - rand.Seed(time.Now().UnixNano()) + keyLength := 32 tree1, err := NewTree(keyLength) assert.NoError(t, err) @@ -346,7 +346,7 @@ func TestRandomOrder(t *testing.T) { } // shuffle the keys and insert them with random order into the second tree - rand.Shuffle(len(keys), func(i int, j int) { + mrand.Shuffle(len(keys), func(i int, j int) { keys[i], keys[j] = keys[j], keys[i] }) for _, key := range keys { @@ -382,8 +382,8 @@ func BenchmarkTree(b *testing.B) { func randomKeyValuePair(keySize, valueSize int) ([]byte, []byte) { key := make([]byte, keySize) val := make([]byte, valueSize) - _, _ = rand.Read(key) - _, _ = rand.Read(val) + _, _ = crand.Read(key) + _, _ = crand.Read(val) return key, val } diff --git a/storage/migration/migration.go b/storage/migration/migration.go new file mode 100644 index 00000000000..4df596d81ff --- /dev/null +++ b/storage/migration/migration.go @@ -0,0 +1,373 @@ +package migration + +import ( + "bytes" + "context" + "encoding/binary" + "errors" + "fmt" + + "github.com/rs/zerolog/log" + + "github.com/cockroachdb/pebble/v2" + "github.com/dgraph-io/badger/v2" + "golang.org/x/sync/errgroup" + + "github.com/onflow/flow-go/module/util" + "github.com/onflow/flow-go/storage" +) + +type MigrationConfig struct { + PebbleDir string + BatchByteSize int // the size of each batch to write to pebble + ReaderWorkerCount int // the number of workers to read from badger + WriterWorkerCount int // the number of workers to write to the pebble + + // number of prefix bytes used to assign iterator workload + // e.g, if the number is 1, it means the first byte of the key is used to divide into 256 key space, + // and each worker will be assigned to iterate all keys with the same first byte. + // Since keys are not evenly distributed, especially some table under a certain prefix byte may have + // a lot more data than others, we might choose to use 2 or 3 bytes to divide the key space, so that + // the redaer worker can concurrently iterate keys with the same prefix bytes (same table). + ReaderShardPrefixBytes int + + // ValidationMode determines how thorough the validation should be + // - PartialValidation: only checks min/max keys for each prefix (faster) + // - FullValidation: checks all keys in the database (more thorough) + ValidationMode ValidationMode + + ValidationOnly bool // if true, only validate the data in the badger db without copying it to pebble db +} + +type KVPairs struct { + Prefix []byte + Pairs []KVPair +} + +type KVPair struct { + Key []byte + Value []byte +} + +func GeneratePrefixes(n int) [][]byte { + if n == 0 { + return [][]byte{{}} + } + + base := 1 << (8 * n) + results := make([][]byte, 0, base) + + for i := 0; i < base; i++ { + buf := make([]byte, n) + switch n { + case 1: + buf[0] = byte(i) + case 2: + binary.BigEndian.PutUint16(buf, uint16(i)) + case 3: + buf[0] = byte(i >> 16) + buf[1] = byte(i >> 8) + buf[2] = byte(i) + default: + panic("unsupported prefix byte length") + } + results = append(results, buf) + } + return results +} + +func GenerateKeysShorterThanPrefix(n int) [][]byte { + allKeys := make([][]byte, 0) + for i := 1; i < n; i++ { + keys := GeneratePrefixes(i) + allKeys = append(allKeys, keys...) + } + return allKeys +} + +// readerWorker reads key-value pairs from BadgerDB using a prefix iterator. +func readerWorker( + ctx context.Context, + lgProgress func(int), + db *badger.DB, + jobs <-chan []byte, // each job is a prefix to iterate over + kvChan chan<- KVPairs, // channel to send key-value pairs to writer workers + batchSize int, +) error { + for prefix := range jobs { + err := db.View(func(txn *badger.Txn) error { + if ctx.Err() != nil { + return ctx.Err() + } + + options := badger.DefaultIteratorOptions + options.Prefix = prefix + it := txn.NewIterator(options) + defer it.Close() + + var ( + kvBatch []KVPair + currSize int + ) + + for it.Seek(prefix); it.ValidForPrefix(prefix); it.Next() { + if ctx.Err() != nil { + return ctx.Err() + } + + item := it.Item() + key := item.KeyCopy(nil) + val, err := item.ValueCopy(nil) + if err != nil { + return err + } + + kvBatch = append(kvBatch, KVPair{Key: key, Value: val}) + currSize += len(key) + len(val) + + if currSize >= batchSize { + select { + case kvChan <- KVPairs{Prefix: prefix, Pairs: kvBatch}: + case <-ctx.Done(): + return ctx.Err() + } + kvBatch = nil + currSize = 0 + } + } + + if len(kvBatch) > 0 { + select { + case kvChan <- KVPairs{Prefix: prefix, Pairs: kvBatch}: + case <-ctx.Done(): + return ctx.Err() + } + } + + return nil + }) + + lgProgress(1) + + if err != nil { + return err + } + } + return nil +} + +func pebbleReaderWorker( + ctx context.Context, + lgProgress func(int), + db *pebble.DB, + jobs <-chan []byte, // each job is a prefix to iterate over + kvChan chan<- KVPairs, // channel to send key-value pairs to writer workers + batchSize int, +) error { + for prefix := range jobs { + if ctx.Err() != nil { + return ctx.Err() + } + + lowerBound, upperBound, hasUpperBound := storage.StartEndPrefixToLowerUpperBound(prefix, prefix) + options := pebble.IterOptions{ + LowerBound: lowerBound, + UpperBound: upperBound, + } + + if !hasUpperBound { + options.UpperBound = nil + } + + iter, err := db.NewIter(&options) + if err != nil { + return fmt.Errorf("failed to create iterator: %w", err) + } + defer iter.Close() + + var ( + kvBatch []KVPair + currSize int + ) + + for iter.First(); iter.Valid(); iter.Next() { + if ctx.Err() != nil { + return ctx.Err() + } + + key := iter.Key() + value := iter.Value() + + // Only process keys that start with our prefix + if !bytes.HasPrefix(key, prefix) { + break + } + + kvBatch = append(kvBatch, KVPair{ + Key: append([]byte(nil), key...), + Value: append([]byte(nil), value...), + }) + currSize += len(key) + len(value) + + if currSize >= batchSize { + select { + case kvChan <- KVPairs{Prefix: prefix, Pairs: kvBatch}: + case <-ctx.Done(): + return ctx.Err() + } + kvBatch = nil + currSize = 0 + } + } + + if len(kvBatch) > 0 { + select { + case kvChan <- KVPairs{Prefix: prefix, Pairs: kvBatch}: + case <-ctx.Done(): + return ctx.Err() + } + } + + lgProgress(1) + } + return nil +} + +// writerWorker writes key-value pairs to PebbleDB in batches. +func writerWorker(ctx context.Context, db *pebble.DB, kvChan <-chan KVPairs) error { + for { + select { + case <-ctx.Done(): + return ctx.Err() + case kvGroup, ok := <-kvChan: + if !ok { + return nil + } + batch := db.NewBatch() + for _, kv := range kvGroup.Pairs { + if err := batch.Set(kv.Key, kv.Value, nil); err != nil { + return fmt.Errorf("fail to set key %x: %w", kv.Key, err) + } + } + + if err := batch.Commit(nil); err != nil { + return fmt.Errorf("fail to commit batch: %w", err) + } + } + } +} + +// CopyFromBadgerToPebble migrates all key-value pairs from a BadgerDB instance to a PebbleDB instance. +// +// The migration is performed in parallel using a configurable number of reader and writer workers. +// Reader workers iterate over the BadgerDB by sharded key prefixes (based on ReaderShardPrefixBytes) +// and send key-value pairs to a shared channel. Writer workers consume from this channel and write +// batched entries into PebbleDB. +// +// Configuration is provided via MigrationConfig: +// - BatchByteSize: maximum size in bytes for a single Pebble write batch. +// - ReaderWorkerCount: number of concurrent workers reading from Badger. +// - WriterWorkerCount: number of concurrent workers writing to Pebble. +// - ReaderShardPrefixBytes: number of bytes used to shard the keyspace for parallel iteration. +// +// The function blocks until all keys are migrated and written successfully. +// It returns an error if any part of the process fails. +func CopyFromBadgerToPebble(badgerDB *badger.DB, pebbleDB *pebble.DB, cfg MigrationConfig) error { + ctx, cancel := context.WithCancelCause(context.Background()) + defer cancel(nil) + + // Step 1: Copy all keys shorter than prefix + keysShorterThanPrefix := GenerateKeysShorterThanPrefix(cfg.ReaderShardPrefixBytes) + keyCount, err := copyExactKeysFromBadgerToPebble(badgerDB, pebbleDB, keysShorterThanPrefix) + if err != nil { + return fmt.Errorf("failed to copy keys shorter than prefix: %w", err) + } + log.Info().Msgf("Copied %d keys shorter than %v bytes prefix", keyCount, cfg.ReaderShardPrefixBytes) + + // Step 2: Copy all keys with prefix by first generating prefix shards and then + // using reader and writer workers to copy the keys with the same prefix + prefixes := GeneratePrefixes(cfg.ReaderShardPrefixBytes) + prefixJobs := make(chan []byte, len(prefixes)) + for _, prefix := range prefixes { + prefixJobs <- prefix + } + close(prefixJobs) + + kvChan := make(chan KVPairs, cfg.ReaderWorkerCount*2) + + lg := util.LogProgress( + log.Logger, + util.DefaultLogProgressConfig("migration keys from badger to pebble", len(prefixes)), + ) + + g, ctx := errgroup.WithContext(ctx) + + // Spawn reader workers + for i := 0; i < cfg.ReaderWorkerCount; i++ { + g.Go(func() error { + return readerWorker(ctx, lg, badgerDB, prefixJobs, kvChan, cfg.BatchByteSize) + }) + } + + // Spawn writer workers + for i := 0; i < cfg.WriterWorkerCount; i++ { + g.Go(func() error { + return writerWorker(ctx, pebbleDB, kvChan) + }) + } + + // Close kvChan after readers complete + go func() { + // Wait for all reader workers to complete + if err := g.Wait(); err != nil { + cancel(err) + } + close(kvChan) + }() + + // Wait for all workers to complete + if err := g.Wait(); err != nil { + return fmt.Errorf("migration failed: %w", err) + } + return context.Cause(ctx) +} + +func copyExactKeysFromBadgerToPebble(badgerDB *badger.DB, pebbleDB *pebble.DB, keys [][]byte) (int, error) { + batch := pebbleDB.NewBatch() + keyCount := 0 + err := badgerDB.View(func(txn *badger.Txn) error { + for _, key := range keys { + item, err := txn.Get(key) + if err != nil { + if errors.Is(err, badger.ErrKeyNotFound) { + // skip if the key is not found + continue + } + + return err + } + + err = item.Value(func(val []byte) error { + keyCount++ + return batch.Set(key, val, nil) + }) + + if err != nil { + return fmt.Errorf("failed to get value for key %x: %w", key, err) + } + } + + return nil + }) + + if err != nil { + return 0, fmt.Errorf("failed to get key from BadgerDB: %w", err) + } + + err = batch.Commit(pebble.Sync) + if err != nil { + return 0, fmt.Errorf("failed to commit batch to PebbleDB: %w", err) + } + + return keyCount, nil +} diff --git a/storage/migration/migration_test.go b/storage/migration/migration_test.go new file mode 100644 index 00000000000..3bdbfe1e310 --- /dev/null +++ b/storage/migration/migration_test.go @@ -0,0 +1,237 @@ +package migration + +import ( + "fmt" + "math/rand" + "testing" + + "github.com/cockroachdb/pebble/v2" + "github.com/dgraph-io/badger/v2" + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/utils/unittest" +) + +func TestGeneratePrefixes(t *testing.T) { + t.Run("OneBytePrefix", func(t *testing.T) { + prefixes := GeneratePrefixes(1) + require.Len(t, prefixes, 256) + require.Equal(t, []byte{0x00}, prefixes[0]) + require.Equal(t, []byte{0x01}, prefixes[1]) + require.Equal(t, []byte{0xfe}, prefixes[254]) + require.Equal(t, []byte{0xff}, prefixes[255]) + }) + + t.Run("TwoBytePrefix", func(t *testing.T) { + prefixes := GeneratePrefixes(2) + require.Len(t, prefixes, 65536) + require.Equal(t, []byte{0x00, 0x00}, prefixes[0]) + require.Equal(t, []byte{0x00, 0x01}, prefixes[1]) + require.Equal(t, []byte{0xff, 0xfe}, prefixes[65534]) + require.Equal(t, []byte{0xff, 0xff}, prefixes[65535]) + }) +} + +func runMigrationTestCase(t *testing.T, testData map[string]string, cfg MigrationConfig) { + unittest.RunWithBadgerDBAndPebbleDB(t, func(badgerDB *badger.DB, pebbleDB *pebble.DB) { + // Load Badger with test data + require.NoError(t, badgerDB.Update(func(txn *badger.Txn) error { + for k, v := range testData { + if err := txn.Set([]byte(k), []byte(v)); err != nil { + return err + } + } + return nil + })) + + // Run migration + err := CopyFromBadgerToPebbleSSTables(badgerDB, pebbleDB, cfg) + require.NoError(t, err) + + // Validate each key + for k, expected := range testData { + val, closer, err := pebbleDB.Get([]byte(k)) + require.NoError(t, err, "pebbleDB.Get failed for key %s", k) + require.Equal(t, expected, string(val), "mismatched value for key %s", k) + require.NoError(t, closer.Close()) + } + + // Validate: Ensure Pebble have no additional key + iter, err := pebbleDB.NewIter(nil) + require.NoError(t, err) + defer iter.Close() + + seen := make(map[string]string) + + for iter.First(); iter.Valid(); iter.Next() { + k := string(iter.Key()) + v := string(iter.Value()) + + expectedVal, ok := testData[k] + require.True(t, ok, "unexpected key found in PebbleDB: %s", k) + require.Equal(t, expectedVal, v, "mismatched value for key %s", k) + + seen[k] = v + } + require.NoError(t, iter.Error(), "error iterating over PebbleDB") + + // Ensure all expected keys were seen + require.Equal(t, len(testData), len(seen), "PebbleDB key count mismatch") + }) +} + +// Simple deterministic dataset +func TestMigrationWithSimpleData1(t *testing.T) { + data := map[string]string{ + "a": "a single key byte", + "z": "a single key byte", + "apple": "fruit", + "banana": "yellow", + "carrot": "vegetable", + "dog": "animal", + "egg": "protein", + } + cfg := MigrationConfig{ + BatchByteSize: 1024, + ReaderWorkerCount: 2, + WriterWorkerCount: 2, + ReaderShardPrefixBytes: 1, + } + runMigrationTestCase(t, data, cfg) +} + +// Simple deterministic dataset +func TestMigrationWithSimpleDataAnd2PrefixBytes(t *testing.T) { + data := map[string]string{ + "a": "a single key byte", + "z": "a single key byte", + "apple": "fruit", + "banana": "yellow", + "carrot": "vegetable", + "dog": "animal", + "egg": "protein", + } + cfg := MigrationConfig{ + BatchByteSize: 1024, + ReaderWorkerCount: 2, + WriterWorkerCount: 2, + ReaderShardPrefixBytes: 2, + } + runMigrationTestCase(t, data, cfg) +} + +// Randomized data to simulate fuzzing +func TestMigrationWithFuzzyData(t *testing.T) { + data := generateRandomKVData(500, 10, 50) + cfg := MigrationConfig{ + BatchByteSize: 2048, + ReaderWorkerCount: 4, + WriterWorkerCount: 2, + ReaderShardPrefixBytes: 1, + } + runMigrationTestCase(t, data, cfg) +} + +// Fuzzy data with 2-byte prefix shard config +func TestMigrationWithFuzzyDataAndPrefix2(t *testing.T) { + data := generateRandomKVData(500, 10, 50) + cfg := MigrationConfig{ + BatchByteSize: 2048, + ReaderWorkerCount: 8, + WriterWorkerCount: 4, + ReaderShardPrefixBytes: 2, + } + runMigrationTestCase(t, data, cfg) +} + +// Utility: Generate random key-value pairs +func generateRandomKVData(count, keyLen, valLen int) map[string]string { + rng := rand.New(rand.NewSource(42)) // deterministic + data := make(map[string]string, count) + letters := []rune("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789") + + randomStr := func(n int) string { + b := make([]rune, n) + for i := range b { + b[i] = letters[rng.Intn(len(letters))] + } + return string(b) + } + + for i := 0; i < count; i++ { + k := randomStr(keyLen) + v := randomStr(valLen) + data[k] = v + } + return data +} + +func BenchmarkCopyFromBadgerToPebble(b *testing.B) { + // Configuration + const ( + numEntries = 1_000_000 + keySize = 16 + valueSize = 128 + batchByteSize = 4 * 1024 * 1024 // 4MB + readerWorkerCount = 4 + writerWorkerCount = 4 + prefixBytes = 1 + ) + + // Setup: Create temp dirs for Badger and Pebble + unittest.RunWithTempDirs(b, func(badgerDir, pebbleDir string) { + // Open Badger + badgerOpts := badger.DefaultOptions(badgerDir).WithLogger(nil) + badgerDB, err := badger.Open(badgerOpts) + if err != nil { + b.Fatalf("failed to open BadgerDB: %v", err) + } + defer badgerDB.Close() + + // Insert random data into Badger + rng := rand.New(rand.NewSource(42)) + batchSize := 100 + batchCount := numEntries / batchSize + for range batchSize { + err = badgerDB.Update(func(txn *badger.Txn) error { + for range batchCount { + key := make([]byte, keySize) + value := make([]byte, valueSize) + rng.Read(key) + rng.Read(value) + + if err := txn.Set(key, value); err != nil { + return fmt.Errorf("failed to set key %x: %w", key, err) + } + } + return nil + }) + if err != nil { + b.Fatalf("failed to insert data into BadgerDB: %v", err) + } + } + + // Open Pebble + pebbleDB, err := pebble.Open(pebbleDir, &pebble.Options{}) + if err != nil { + b.Fatalf("failed to open PebbleDB: %v", err) + } + defer pebbleDB.Close() + + // Setup migration config + cfg := MigrationConfig{ + BatchByteSize: batchByteSize, + ReaderWorkerCount: readerWorkerCount, + WriterWorkerCount: writerWorkerCount, + ReaderShardPrefixBytes: prefixBytes, + } + + // Benchmark the migration + b.ResetTimer() + b.StartTimer() + if err := CopyFromBadgerToPebble(badgerDB, pebbleDB, cfg); err != nil { + b.Fatalf("migration failed: %v", err) + } + b.StopTimer() + }) +} diff --git a/storage/migration/runner.go b/storage/migration/runner.go new file mode 100644 index 00000000000..25f3ae3acda --- /dev/null +++ b/storage/migration/runner.go @@ -0,0 +1,146 @@ +package migration + +import ( + "fmt" + "os" + "path/filepath" + "time" + + "github.com/cockroachdb/pebble/v2" + "github.com/dgraph-io/badger/v2" + "github.com/rs/zerolog/log" + + badgerstorage "github.com/onflow/flow-go/storage/badger" + pebblestorage "github.com/onflow/flow-go/storage/pebble" + "github.com/onflow/flow-go/storage/util" +) + +var DefaultMigrationConfig = MigrationConfig{ + BatchByteSize: 32_000_000, // 32 MB + ReaderWorkerCount: 2, + WriterWorkerCount: 2, + ReaderShardPrefixBytes: 2, // better to keep it as 2. + ValidationMode: PartialValidation, // Default to partial validation +} + +func RunMigrationAndCompaction(badgerDir string, pebbleDir string, cfg MigrationConfig) error { + err := RunMigration(badgerDir, pebbleDir, cfg) + if err != nil { + return err + } + + err = ForceCompactPebbleDB(pebbleDir) + if err != nil { + return fmt.Errorf("failed to compact PebbleDB: %w", err) + } + + return nil +} + +// RunMigration performs a complete migration of key-value data from a BadgerDB directory +// to a PebbleDB directory and verifies the integrity of the migrated data. +// +// It executes the following steps: +// +// 1. Validates that the Badger directory exists and is non-empty. +// Ensures that the Pebble directory does not already contain data. +// 2. Opens both databases and runs the migration using CopyFromBadgerToPebble with the given config. +// 3. Writes a "MIGRATION_STARTED" marker file with a timestamp in the Pebble directory. +// 4. After migration, performs validation by: +// - For PartialValidation: Generates a list of prefix shards (based on 2-byte prefixes) +// and finds the min and max keys for each prefix group +// - For FullValidation: Validates all keys in the database +// 5. Writes a "MIGRATION_COMPLETED" marker file with a timestamp to signal successful completion. +// +// This function returns an error if any part of the process fails, including directory checks, +// database operations, or validation mismatches. +func RunMigration(badgerDir string, pebbleDir string, cfg MigrationConfig) error { + lg := log.With(). + Str("from-badger-dir", badgerDir). + Str("to-pebble-dir", pebbleDir). + Logger() + + // Step 1: Validate directories + lg.Info().Msg("Step 1/6: Starting directory validation...") + startTime := time.Now() + if !cfg.ValidationOnly { // when ValidationOnly is true, database folders can be not empty + if err := validateBadgerFolderExistPebbleFolderEmpty(badgerDir, pebbleDir); err != nil { + return fmt.Errorf("directory validation failed: %w", err) + } + } + lg.Info().Dur("duration", time.Since(startTime)).Msg("Step 1/6: Directory validation completed successfully") + + // Step 2: Open Badger and Pebble DBs + lg.Info().Msg("Step 2/6: Opening BadgerDB and PebbleDB...") + startTime = time.Now() + badgerOptions := badger.DefaultOptions(badgerDir). + WithLogger(util.NewLogger(log.Logger.With().Str("db", "badger").Logger())) + badgerDB, err := badgerstorage.SafeOpen(badgerOptions) + if err != nil { + return fmt.Errorf("failed to open BadgerDB: %w", err) + } + defer badgerDB.Close() + + cache := pebble.NewCache(pebblestorage.DefaultPebbleCacheSize) + defer cache.Unref() + pebbleDBOpts := pebblestorage.DefaultPebbleOptions(log.Logger, cache, pebble.DefaultComparer) + pebbleDBOpts.DisableAutomaticCompactions = true + + pebbleDB, err := pebble.Open(pebbleDir, pebbleDBOpts) + if err != nil { + return fmt.Errorf("failed to open PebbleDB: %w", err) + } + defer pebbleDB.Close() + lg.Info().Dur("duration", time.Since(startTime)).Msg("Step 2/6: BadgerDB and PebbleDB opened successfully") + + if cfg.ValidationOnly { + lg.Info().Str("mode", string(cfg.ValidationMode)).Msg("Step 6/6 Validation only mode enabled, skipping migration steps, Starting data validation...") + startTime = time.Now() + if err := validateData(badgerDB, pebbleDB, cfg); err != nil { + return fmt.Errorf("data validation failed: %w", err) + } + lg.Info().Dur("duration", time.Since(startTime)).Msg("Step 6/6: Data validation completed successfully") + + return nil + } + // Step 3: Write MIGRATION_STARTED file + lg.Info().Msg("Step 3/6: Writing migration start marker...") + startTime = time.Now() + startTimeStr := time.Now().Format(time.RFC3339) + startMarkerPath := filepath.Join(pebbleDir, "MIGRATION_STARTED") + startContent := fmt.Sprintf("migration started at %s\n", startTimeStr) + if err := os.WriteFile(startMarkerPath, []byte(startContent), 0644); err != nil { + return fmt.Errorf("failed to write MIGRATION_STARTED file: %w", err) + } + lg.Info().Dur("duration", time.Since(startTime)).Str("file", startMarkerPath).Msg("Step 3/6: Migration start marker written successfully") + + // Step 4: Migrate data + lg.Info().Msg("Step 4/6: Starting data migration...") + startTime = time.Now() + cfg.PebbleDir = pebbleDir + if err := CopyFromBadgerToPebbleSSTables(badgerDB, pebbleDB, cfg); err != nil { + return fmt.Errorf("failed to migrate data from Badger to Pebble: %w", err) + } + lg.Info().Dur("duration", time.Since(startTime)).Msg("Step 4/6: Data migration completed successfully") + + // Step 5: Validate data + lg.Info().Str("mode", string(cfg.ValidationMode)).Msg("Step 5/6: Starting data validation...") + startTime = time.Now() + if err := validateData(badgerDB, pebbleDB, cfg); err != nil { + return fmt.Errorf("data validation failed: %w", err) + } + lg.Info().Dur("duration", time.Since(startTime)).Msg("Step 5/6: Data validation completed successfully") + + // Step 6: Write MIGRATION_COMPLETED file + lg.Info().Msg("Step 6/6: Writing migration completion marker...") + startTime = time.Now() + endTime := time.Now().Format(time.RFC3339) + completeMarkerPath := filepath.Join(pebbleDir, "MIGRATION_COMPLETED") + completeContent := fmt.Sprintf("migration completed at %s\n", endTime) + if err := os.WriteFile(completeMarkerPath, []byte(completeContent), 0644); err != nil { + return fmt.Errorf("failed to write MIGRATION_COMPLETED file: %w", err) + } + lg.Info().Dur("duration", time.Since(startTime)).Str("file", completeMarkerPath).Msg("Step 6/6: Migration completion marker written successfully") + + return nil +} diff --git a/storage/migration/runner_test.go b/storage/migration/runner_test.go new file mode 100644 index 00000000000..ca852fb8f31 --- /dev/null +++ b/storage/migration/runner_test.go @@ -0,0 +1,142 @@ +package migration + +import ( + "os" + "path/filepath" + "testing" + + "github.com/cockroachdb/pebble/v2" + "github.com/dgraph-io/badger/v2" + "github.com/stretchr/testify/require" +) + +func TestRunMigration(t *testing.T) { + // Setup temporary directories + tmpDir := t.TempDir() + badgerDir := filepath.Join(tmpDir, "badger") + pebbleDir := filepath.Join(tmpDir, "pebble") + + // Create and open BadgerDB with test data + opts := badger.DefaultOptions(badgerDir).WithLogger(nil) + badgerDB, err := badger.Open(opts) + require.NoError(t, err) + + testData := map[string]string{ + "\x01\x02foo": "bar", + "\x01\x02baz": "qux", + "\x02\xffzip": "zap", + "\xff\xffzz": "last", + } + err = badgerDB.Update(func(txn *badger.Txn) error { + for k, v := range testData { + err := txn.Set([]byte(k), []byte(v)) + require.NoError(t, err) + } + return nil + }) + require.NoError(t, err) + require.NoError(t, badgerDB.Close()) // Close so MigrateAndValidate can reopen it + + // Define migration config + cfg := MigrationConfig{ + BatchByteSize: 1024, + ReaderWorkerCount: 2, + WriterWorkerCount: 2, + ReaderShardPrefixBytes: 2, + ValidationMode: PartialValidation, + } + + // Run migration + err = RunMigration(badgerDir, pebbleDir, cfg) + require.NoError(t, err) + + // Check marker files + startedPath := filepath.Join(pebbleDir, "MIGRATION_STARTED") + completedPath := filepath.Join(pebbleDir, "MIGRATION_COMPLETED") + + startedContent, err := os.ReadFile(startedPath) + require.NoError(t, err) + require.Contains(t, string(startedContent), "migration started") + + completedContent, err := os.ReadFile(completedPath) + require.NoError(t, err) + require.Contains(t, string(completedContent), "migration completed") + + // Open PebbleDB to confirm migrated values + pebbleDB, err := pebble.Open(pebbleDir, &pebble.Options{ + FormatMajorVersion: pebble.FormatNewest, + }) + require.NoError(t, err) + defer pebbleDB.Close() + + for k, expected := range testData { + val, closer, err := pebbleDB.Get([]byte(k)) + require.NoError(t, err) + require.Equal(t, expected, string(val)) + require.NoError(t, closer.Close()) + } +} + +func TestRunMigration_FullValidation(t *testing.T) { + // Setup temporary directories + tmpDir := t.TempDir() + badgerDir := filepath.Join(tmpDir, "badger") + pebbleDir := filepath.Join(tmpDir, "pebble") + + // Generate random test data + testData := generateRandomKVData(200, 8, 16) + + // Create and open BadgerDB with test data + opts := badger.DefaultOptions(badgerDir).WithLogger(nil) + badgerDB, err := badger.Open(opts) + require.NoError(t, err) + + err = badgerDB.Update(func(txn *badger.Txn) error { + for k, v := range testData { + err := txn.Set([]byte(k), []byte(v)) + require.NoError(t, err) + } + return nil + }) + require.NoError(t, err) + require.NoError(t, badgerDB.Close()) // Close so MigrateAndValidate can reopen it + + // Define migration config with FullValidation + cfg := MigrationConfig{ + BatchByteSize: 1024, + ReaderWorkerCount: 2, + WriterWorkerCount: 2, + ReaderShardPrefixBytes: 2, + ValidationMode: FullValidation, + } + + // Run migration + err = RunMigration(badgerDir, pebbleDir, cfg) + require.NoError(t, err) + + // Check marker files + startedPath := filepath.Join(pebbleDir, "MIGRATION_STARTED") + completedPath := filepath.Join(pebbleDir, "MIGRATION_COMPLETED") + + startedContent, err := os.ReadFile(startedPath) + require.NoError(t, err) + require.Contains(t, string(startedContent), "migration started") + + completedContent, err := os.ReadFile(completedPath) + require.NoError(t, err) + require.Contains(t, string(completedContent), "migration completed") + + // Open PebbleDB to confirm migrated values + pebbleDB, err := pebble.Open(pebbleDir, &pebble.Options{ + FormatMajorVersion: pebble.FormatNewest, + }) + require.NoError(t, err) + defer pebbleDB.Close() + + for k, expected := range testData { + val, closer, err := pebbleDB.Get([]byte(k)) + require.NoError(t, err) + require.Equal(t, expected, string(val)) + require.NoError(t, closer.Close()) + } +} diff --git a/storage/migration/sstables.go b/storage/migration/sstables.go new file mode 100644 index 00000000000..45c85802046 --- /dev/null +++ b/storage/migration/sstables.go @@ -0,0 +1,168 @@ +package migration + +import ( + "context" + "fmt" + "os" + "sync" + + "github.com/cockroachdb/pebble/v2" + "github.com/cockroachdb/pebble/v2/objstorage/objstorageprovider" + "github.com/cockroachdb/pebble/v2/sstable" + "github.com/cockroachdb/pebble/v2/vfs" + "github.com/dgraph-io/badger/v2" + "github.com/rs/zerolog/log" + + "github.com/onflow/flow-go/module/util" +) + +// CopyFromBadgerToPebble copies all key-value pairs from a BadgerDB to a PebbleDB +// using SSTable ingestion. It reads BadgerDB in prefix-sharded ranges and writes +// those ranges into SSTable files, which are then ingested into Pebble. +func CopyFromBadgerToPebbleSSTables(badgerDB *badger.DB, pebbleDB *pebble.DB, cfg MigrationConfig) error { + sstableDir, err := os.MkdirTemp(cfg.PebbleDir, "flow-migration-temp-") + if err != nil { + return fmt.Errorf("failed to create temp dir: %w", err) + } + + log.Info().Msgf("Created temporary directory for SSTables: %s", sstableDir) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + var ( + errOnce sync.Once + firstErr error + ) + + // once running into an exception, cancel the context and report the first error + reportFirstError := func(err error) { + if err != nil { + errOnce.Do(func() { + firstErr = err + cancel() + }) + } + } + + // Step 1: Copy all keys shorter than prefix + keysShorterThanPrefix := GenerateKeysShorterThanPrefix(cfg.ReaderShardPrefixBytes) + keyCount, err := copyExactKeysFromBadgerToPebble(badgerDB, pebbleDB, keysShorterThanPrefix) + if err != nil { + return fmt.Errorf("failed to copy keys shorter than prefix: %w", err) + } + log.Info().Msgf("Copied %d keys shorter than %v bytes prefix", keyCount, cfg.ReaderShardPrefixBytes) + + // Step 2: Copy all keys with prefix by first generating prefix shards and then + // using reader and writer workers to copy the keys with the same prefix + prefixes := GeneratePrefixes(cfg.ReaderShardPrefixBytes) + prefixJobs := make(chan []byte, len(prefixes)) + for _, prefix := range prefixes { + prefixJobs <- prefix + } + close(prefixJobs) + + kvChan := make(chan KVPairs, cfg.ReaderWorkerCount*2) + + lg := util.LogProgress( + log.Logger, + util.DefaultLogProgressConfig("migration keys from badger to pebble", len(prefixes)), + ) + + var readerWg sync.WaitGroup + for i := 0; i < cfg.ReaderWorkerCount; i++ { + readerWg.Add(1) + go func() { + defer readerWg.Done() + if err := readerWorker(ctx, lg, badgerDB, prefixJobs, kvChan, cfg.BatchByteSize); err != nil { + reportFirstError(err) + } + }() + } + + var writerWg sync.WaitGroup + for i := 0; i < cfg.WriterWorkerCount; i++ { + writerWg.Add(1) + go func() { + defer writerWg.Done() + if err := writerSSTableWorker(ctx, i, pebbleDB, sstableDir, kvChan); err != nil { + reportFirstError(err) + } + }() + } + + // Close kvChan after readers complete + go func() { + readerWg.Wait() + close(kvChan) + }() + + writerWg.Wait() + return firstErr +} + +func writerSSTableWorker(ctx context.Context, workerIndex int, db *pebble.DB, sstableDir string, kvChan <-chan KVPairs) error { + for { + select { + case <-ctx.Done(): + return ctx.Err() + case kvGroup, ok := <-kvChan: + if !ok { + return nil + } + + filePath := fmt.Sprintf("%s/prefix_%x_worker_%v.sst", sstableDir, kvGroup.Prefix, workerIndex) + writer, err := createSSTableWriter(filePath) + if err != nil { + return err + } + + for _, kv := range kvGroup.Pairs { + if err := writer.Set(kv.Key, kv.Value); err != nil { + return fmt.Errorf("fail to set key %x: %w", kv.Key, err) + } + } + + if err := writer.Close(); err != nil { + return fmt.Errorf("fail to close writer: %w", err) + } + + err = db.Ingest(ctx, []string{filePath}) + if err != nil { + return fmt.Errorf("fail to ingest file %v: %w", filePath, err) + } + + log.Info().Msgf("Ingested SSTable file: %s", filePath) + } + } +} +func createSSTableWriter(filePath string) (*sstable.Writer, error) { + f, err := vfs.Default.Create(filePath, vfs.WriteCategoryUnspecified) + if err != nil { + return nil, err + } + + writable := objstorageprovider.NewFileWritable(f) + sstWriter := sstable.NewWriter(writable, sstable.WriterOptions{ + // pebble 1 is using TableFormatPebblev4, pebble 2's latest is TableFormatPebblev5 (TableFormatMax) + // in order to be compatible with pebble 1, we use TableFormatPebblev4 for now. + // TODO: use TableFormatMax in next spork + // TableFormat: sstable.TableFormatMax, + TableFormat: sstable.TableFormatPebblev4, + }) + + return sstWriter, nil +} + +func ForceCompactPebbleDB(pebbleDir string) error { + pebbleDB, err := pebble.Open(pebbleDir, &pebble.Options{ + // TODO: use FormatNewest in next spork + // FormatMajorVersion: pebble.FormatNewest, + FormatMajorVersion: pebble.FormatVirtualSSTables, + }) + if err != nil { + return err + } + + return pebbleDB.Compact([]byte{0x00}, []byte{0xff}, true) +} diff --git a/storage/migration/sstables_test.go b/storage/migration/sstables_test.go new file mode 100644 index 00000000000..a9201486862 --- /dev/null +++ b/storage/migration/sstables_test.go @@ -0,0 +1,63 @@ +package migration + +import ( + "context" + "os" + "path/filepath" + "sort" + "testing" + + "github.com/cockroachdb/pebble/v2" + "github.com/cockroachdb/pebble/v2/objstorage/objstorageprovider" + "github.com/cockroachdb/pebble/v2/sstable" + "github.com/cockroachdb/pebble/v2/vfs" + "github.com/stretchr/testify/require" +) + +func TestPebbleSSTableIngest(t *testing.T) { + // Create a temporary directory for the Pebble DB + dir, err := os.MkdirTemp("", "pebble-test") + require.NoError(t, err) + defer os.RemoveAll(dir) + + // Open Pebble DB + db, err := pebble.Open(dir, &pebble.Options{ + FormatMajorVersion: pebble.FormatNewest, + }) + require.NoError(t, err) + defer db.Close() + + // Create an SSTable with a few key-values + sstPath := filepath.Join(dir, "test.sst") + file, err := vfs.Default.Create(sstPath, vfs.WriteCategoryUnspecified) + require.NoError(t, err) + writable := objstorageprovider.NewFileWritable(file) + writer := sstable.NewWriter(writable, sstable.WriterOptions{ + TableFormat: sstable.TableFormatMax, + }) + data := generateRandomKVData(500, 10, 50) + + // Sort the keys to ensure strictly increasing order + keys := make([]string, 0, len(data)) + for k := range data { + keys = append(keys, k) + } + sort.Strings(keys) + + for _, k := range keys { + require.NoError(t, writer.Set([]byte(k), []byte(data[k]))) + } + + require.NoError(t, writer.Close()) + + // Ingest the SSTable into Pebble DB + require.NoError(t, db.Ingest(context.Background(), []string{sstPath})) + + // Verify the data exists + for _, k := range keys { + val, closer, err := db.Get([]byte(k)) + require.NoError(t, err, "expected key %s to exist", k) + require.Equal(t, data[k], string(val)) + closer.Close() + } +} diff --git a/storage/migration/validation.go b/storage/migration/validation.go new file mode 100644 index 00000000000..b78732ec664 --- /dev/null +++ b/storage/migration/validation.go @@ -0,0 +1,390 @@ +package migration + +import ( + "bytes" + "context" + "fmt" + "os" + "slices" + "time" + + "github.com/cockroachdb/pebble/v2" + "github.com/dgraph-io/badger/v2" + "github.com/rs/zerolog/log" + "golang.org/x/sync/errgroup" + + "github.com/onflow/flow-go/module/util" + "github.com/onflow/flow-go/storage" +) + +// ValidationMode defines how thorough the validation should be +type ValidationMode string + +const ( + // PartialValidation only checks min/max keys for each prefix + PartialValidation ValidationMode = "partial" + // FullValidation checks all keys in the database + FullValidation ValidationMode = "full" +) + +const batchSize = 10 + +func ParseValidationModeValid(mode string) (ValidationMode, error) { + switch mode { + case string(PartialValidation): + return PartialValidation, nil + case string(FullValidation): + return FullValidation, nil + default: + return "", fmt.Errorf("invalid validation mode: %s", mode) + } +} + +// isDirEmpty checks if a directory exists and is empty. +// Returns true if the directory is empty, false if it contains files, +// and an error if the directory doesn't exist or there's an error reading it. +func isDirEmpty(dir string) (bool, error) { + entries, err := os.ReadDir(dir) + if err != nil { + return false, err + } + return len(entries) == 0, nil +} + +// createDirIfNotExists creates a directory if it doesn't exist. +// Returns an error if the directory already exists and is not empty, +// or if there's an error creating the directory. +func createDirIfNotExists(dir string) error { + if stat, err := os.Stat(dir); err == nil { + if !stat.IsDir() { + return fmt.Errorf("path exists but is not a directory: %s", dir) + } + isEmpty, err := isDirEmpty(dir) + if err != nil { + return fmt.Errorf("failed to check if directory is empty: %w", err) + } + if !isEmpty { + return fmt.Errorf("directory exists and is not empty: %s", dir) + } + return nil + } else if !os.IsNotExist(err) { + return fmt.Errorf("error checking directory: %w", err) + } + + if err := os.MkdirAll(dir, 0755); err != nil { + return fmt.Errorf("failed to create directory: %w", err) + } + return nil +} + +// validateBadgerFolderExistPebbleFolderEmpty checks if the Badger directory exists and is non-empty, +// and if the Pebble directory does not exist or is empty. +func validateBadgerFolderExistPebbleFolderEmpty(badgerDir string, pebbleDir string) error { + // Step 1.1: Ensure Badger directory exists and is non-empty + isEmpty, err := isDirEmpty(badgerDir) + if err != nil { + return fmt.Errorf("badger directory invalid: %w", err) + } + if isEmpty { + return fmt.Errorf("badger directory is empty, %v", badgerDir) + } + + // Step 1.2: Ensure Pebble directory does not exist or is empty + if err := createDirIfNotExists(pebbleDir); err != nil { + return fmt.Errorf("pebble directory validation failed %v: %w", pebbleDir, err) + } + + return nil +} + +func validateMinMaxKeyConsistency(badgerDB *badger.DB, pebbleDB *pebble.DB, prefixBytes int) error { + keys, err := sampleValidationKeysByPrefix(badgerDB, prefixBytes) + if err != nil { + return fmt.Errorf("failed to collect validation keys: %w", err) + } + if err := compareValuesBetweenDBs(keys, badgerDB, pebbleDB); err != nil { + return fmt.Errorf("data mismatch found: %w", err) + } + return nil +} + +// sampleValidationKeysByPrefix takes a prefix bytes number (1 means 1 byte prefix, 2 means 2 bytes prefix, etc.), +// and returns a list of keys that are the min and max keys for each prefix. +// The output will be used to validate the consistency between Badger and Pebble databases. +// Why? Because we want to validate the consistency between Badger and Pebble databases by selecting +// some keys and compare their values between the two databases. +// An easy way to select keys is to go through each prefix, and find the min and max keys for each prefix using +// the database iterator. +func sampleValidationKeysByPrefix(db *badger.DB, prefixBytes int) ([][]byte, error) { + // this includes all prefixes that is shorter than or equal to prefixBytes + // for instance, if prefixBytes is 2, we will include all prefixes that is 1 byte or 2 bytes: + // [ + // [0x00], [0x01], [0x02], ..., [0xff], // 1 byte prefixes + // [0x00, 0x00], [0x00, 0x01], [0x00, 0x02], ..., [0xff, 0xff] // 2 byte prefixes + // ] + prefixes := GenerateKeysShorterThanPrefix(prefixBytes + 1) + var allKeys [][]byte + + err := db.View(func(txn *badger.Txn) error { + for _, prefix := range prefixes { + // Find min key + opts := badger.DefaultIteratorOptions + it := txn.NewIterator(opts) + it.Seek(prefix) + if it.ValidForPrefix(prefix) { + allKeys = append(allKeys, slices.Clone(it.Item().Key())) + } + it.Close() + + // Find max key with reverse iterator + opts.Reverse = true + it = txn.NewIterator(opts) + + // the upper bound is exclusive, so we need to seek to the upper bound + // when the prefix is [0xff,0xff], the end is nil, and we will iterate + // from the last key + end := storage.PrefixUpperBound(prefix) + it.Seek(end) + if it.ValidForPrefix(prefix) { + allKeys = append(allKeys, slices.Clone(it.Item().Key())) + } + it.Close() + } + return nil + }) + if err != nil { + return nil, err + } + + // Deduplicate keys + keyMap := make(map[string][]byte, len(allKeys)) + for _, k := range allKeys { + keyMap[string(k)] = k + } + uniqueKeys := make([][]byte, 0, len(keyMap)) + for _, k := range keyMap { + uniqueKeys = append(uniqueKeys, k) + } + + return uniqueKeys, nil +} + +// compareValuesBetweenDBs takes a list of keys and compares the values between Badger and Pebble databases, +// it returns error if any of the values are different. +func compareValuesBetweenDBs(keys [][]byte, badgerDB *badger.DB, pebbleDB *pebble.DB) error { + for _, key := range keys { + var badgerVal []byte + err := badgerDB.View(func(txn *badger.Txn) error { + item, err := txn.Get(key) + if err != nil { + return err + } + badgerVal, err = item.ValueCopy(nil) + return err + }) + if err != nil { + return fmt.Errorf("badger get error for key %x: %w", key, err) + } + + pebbleVal, closer, err := pebbleDB.Get(key) + if err != nil { + return fmt.Errorf("pebble get error for key %x: %w", key, err) + } + if string(pebbleVal) != string(badgerVal) { + return fmt.Errorf("value mismatch for key %x: badger=%q pebble=%q: %w", key, badgerVal, pebbleVal, + storage.ErrDataMismatch) + } + _ = closer.Close() + } + return nil +} + +// validateData performs validation based on the configured validation mode +func validateData(badgerDB *badger.DB, pebbleDB *pebble.DB, cfg MigrationConfig) error { + switch cfg.ValidationMode { + case PartialValidation: + return validateMinMaxKeyConsistency(badgerDB, pebbleDB, cfg.ReaderShardPrefixBytes) + case FullValidation: + return validateAllKeys(badgerDB, pebbleDB) + default: + return fmt.Errorf("unknown validation mode: %s", cfg.ValidationMode) + } +} + +// validateAllKeys performs a full validation by comparing all keys between Badger and Pebble +func validateAllKeys(badgerDB *badger.DB, pebbleDB *pebble.DB) error { + // Use the same prefix sharding as migration.go (default: 1 byte, but could be configurable) + const prefixBytes = 1 // or make this configurable if needed + prefixes := GeneratePrefixes(prefixBytes) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + eg, ctx := errgroup.WithContext(ctx) + + lg := util.LogProgress( + log.Logger, + util.DefaultLogProgressConfig("verifying progress", len(prefixes)), + ) + + for _, prefix := range prefixes { + curPrefix := prefix // capture range variable + + eg.Go(func() error { + defer lg(1) + start := time.Now() + + // Channels for key-value pairs from Badger and Pebble + kvChanBadger := make(chan KVPairs, 1) + kvChanPebble := make(chan KVPairs, 1) + + // Progress logger (no-op for now) + // Start Badger reader worker + badgerErrCh := make(chan error, 1) + + // By wrapping a single prefix in a channel, badger worker and pebble worker can work on the same prefix. + go func() { + err := readerWorker(ctx, lg, badgerDB, singlePrefixChan(curPrefix), kvChanBadger, batchSize) + close(kvChanBadger) + badgerErrCh <- err + }() + + // each worker only process 1 prefix, so no need to log the progress. + // The progress is logged by the main goroutine + noopLogging := func(int) {} + // Start Pebble reader worker + pebbleErrCh := make(chan error, 1) + go func() { + err := pebbleReaderWorker(ctx, noopLogging, pebbleDB, singlePrefixChan(curPrefix), kvChanPebble, batchSize) + close(kvChanPebble) + pebbleErrCh <- err + }() + + // Compare outputs + err := compareKeyValuePairsFromChannels(ctx, kvChanBadger, kvChanPebble) + + // Wait for workers to finish and check for errors + badgerErr := <-badgerErrCh + pebbleErr := <-pebbleErrCh + + if badgerErr != nil { + return fmt.Errorf("badger reader error for prefix %x: %w", curPrefix, badgerErr) + } + if pebbleErr != nil { + return fmt.Errorf("pebble reader error for prefix %x: %w", curPrefix, pebbleErr) + } + if err != nil { + return fmt.Errorf("comparison error for prefix %x: %w", curPrefix, err) + } + + log.Info().Str("prefix", fmt.Sprintf("%x", curPrefix)). + Msgf("successfully validated prefix in %s", time.Since(start)) + return nil + }) + } + + if err := eg.Wait(); err != nil { + return err + } + return nil +} + +// singlePrefixChan returns a channel that yields a single prefix and then closes. +// Usage: This function is used in validateAllKeys when launching reader workers (e.g., readerWorker and pebbleReaderWorker) +// for the same prefix. +func singlePrefixChan(prefix []byte) <-chan []byte { + ch := make(chan []byte, 1) + ch <- prefix + close(ch) + return ch +} + +// compare the key value pairs from both channel, and return error if any pair is different, +// and return error if ctx is Done +func compareKeyValuePairsFromChannels(ctx context.Context, kvChanBadger <-chan KVPairs, kvChanPebble <-chan KVPairs) error { + var ( + kvBadger, kvPebble KVPairs + okBadger, okPebble bool + ) + + for { + // Read from both channels + select { + case <-ctx.Done(): + return fmt.Errorf("context cancelled while reading from badger: %w", ctx.Err()) + case kvBadger, okBadger = <-kvChanBadger: + if !okBadger { + kvBadger = KVPairs{} + } + } + + select { + case <-ctx.Done(): + return fmt.Errorf("context cancelled while reading from pebble: %w", ctx.Err()) + case kvPebble, okPebble = <-kvChanPebble: + if !okPebble { + kvPebble = KVPairs{} + } + } + + // If both channels are closed, we're done + if !okBadger && !okPebble { + break + } + + // Handle case where Badger channel is closed, Pebble channel is not. + if !okBadger && okPebble { + if len(kvPebble.Pairs) > 0 { + return fmt.Errorf("key %x exists in pebble but not in badger", kvPebble.Pairs[0].Key) + } + return fmt.Errorf("okBadger == false, okPebble == true, but okPebble has no keys") + } + + // Handle case where Pebble channel is closed, Badger channel is not. + if okBadger && !okPebble { + if len(kvBadger.Pairs) > 0 { + return fmt.Errorf("key %x exists in badger but not in pebble", kvBadger.Pairs[0].Key) + } + return fmt.Errorf("okBadger == true, okPebble == false, but okBadger has no keys") + } + + // Both channels are open, compare prefixes + if !bytes.Equal(kvBadger.Prefix, kvPebble.Prefix) { + return fmt.Errorf("prefix mismatch: badger=%x, pebble=%x", kvBadger.Prefix, kvPebble.Prefix) + } + + // Compare key-value pairs + i, j := 0, 0 + for i < len(kvBadger.Pairs) && j < len(kvPebble.Pairs) { + pairBadger := kvBadger.Pairs[i] + pairPebble := kvPebble.Pairs[j] + + cmp := bytes.Compare(pairBadger.Key, pairPebble.Key) + if cmp < 0 { + return fmt.Errorf("key %x exists in badger but not in pebble", pairBadger.Key) + } + if cmp > 0 { + return fmt.Errorf("key %x exists in pebble but not in badger", pairPebble.Key) + } + + // Keys are equal, compare values + if !bytes.Equal(pairBadger.Value, pairPebble.Value) { + return fmt.Errorf("value mismatch for key %x: badger=%x, pebble=%x", + pairBadger.Key, pairBadger.Value, pairPebble.Value) + } + + i++ + j++ + } + + // Check if there are remaining pairs in either channel + if i < len(kvBadger.Pairs) { + return fmt.Errorf("key %x exists in badger but not in pebble", kvBadger.Pairs[i].Key) + } + if j < len(kvPebble.Pairs) { + return fmt.Errorf("key %x exists in pebble but not in badger", kvPebble.Pairs[j].Key) + } + } + + return nil +} diff --git a/storage/migration/validation_test.go b/storage/migration/validation_test.go new file mode 100644 index 00000000000..f7abf578586 --- /dev/null +++ b/storage/migration/validation_test.go @@ -0,0 +1,143 @@ +package migration + +import ( + "context" + "sort" + "testing" + + "github.com/dgraph-io/badger/v2" + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/utils/unittest" +) + +func TestSampleValidationKeysByPrefix(t *testing.T) { + unittest.RunWithBadgerDB(t, func(db *badger.DB) { + // Insert test keys + testKeys := []string{ + "\x01\x02keyA", // group: 0x01, 0x02 + "\x01\x02keyB", + "\x01\x03keyC", // group: 0x01, 0x03 + "\x02\x00keyD", // group: 0x02, 0x00 + "\x02\x00keyE", // group: 0x02, 0x00 + "\x02\x00keyF", // group: 0x02, 0x00 + "\xff\xfflast", + } + require.NoError(t, db.Update(func(txn *badger.Txn) error { + for _, k := range testKeys { + err := txn.Set([]byte(k), []byte("val_"+k)) + require.NoError(t, err) + } + return nil + })) + + // Run key collection + keys, err := sampleValidationKeysByPrefix(db, 2) + require.NoError(t, err) + + // Convert to string for easier comparison + var keyStrs []string + for _, k := range keys { + keyStrs = append(keyStrs, string(k)) + } + sort.Strings(keyStrs) + + // Expected keys are min and max for each 2-byte prefix group + expected := []string{ + "\x01\x02keyA", "\x01\x02keyB", // same group have both min and max + "\x01\x03keyC", // only one key in this group + "\x02\x00keyD", // min key of 0x02,0x00 + "\x02\x00keyF", // max key of 0x02,0x00 + "\xff\xfflast", // last key in this prefix + } + sort.Strings(expected) + require.ElementsMatch(t, expected, keyStrs) + }) +} + +func TestCompareKeyValuePairsFromChannels(t *testing.T) { + type testCase struct { + name string + badgerKVs []KVPairs + pebbleKVs []KVPairs + expectErr string // substring to match in error, or empty for no error + } + + prefix := []byte("pfx") + key1 := []byte("key1") + val1 := []byte("val1") + key2 := []byte("key2") + val2 := []byte("val2") + val2diff := []byte("DIFF") + + tests := []testCase{ + { + name: "matching pairs", + badgerKVs: []KVPairs{{Prefix: prefix, Pairs: []KVPair{{Key: key1, Value: val1}, {Key: key2, Value: val2}}}}, + pebbleKVs: []KVPairs{{Prefix: prefix, Pairs: []KVPair{{Key: key1, Value: val1}, {Key: key2, Value: val2}}}}, + expectErr: "", + }, + { + name: "value mismatch", + badgerKVs: []KVPairs{{Prefix: prefix, Pairs: []KVPair{{Key: key1, Value: val1}, {Key: key2, Value: val2}}}}, + pebbleKVs: []KVPairs{{Prefix: prefix, Pairs: []KVPair{{Key: key1, Value: val1}, {Key: key2, Value: val2diff}}}}, + expectErr: "value mismatch for key", + }, + { + name: "key missing in pebble", + badgerKVs: []KVPairs{{Prefix: prefix, Pairs: []KVPair{{Key: key1, Value: val1}, {Key: key2, Value: val2}}}}, + pebbleKVs: []KVPairs{{Prefix: prefix, Pairs: []KVPair{{Key: key1, Value: val1}}}}, + expectErr: "key 6b657932 exists in badger but not in pebble", + }, + { + name: "key missing in badger", + badgerKVs: []KVPairs{{Prefix: prefix, Pairs: []KVPair{{Key: key1, Value: val1}}}}, + pebbleKVs: []KVPairs{{Prefix: prefix, Pairs: []KVPair{{Key: key1, Value: val1}, {Key: key2, Value: val2}}}}, + expectErr: "key 6b657932 exists in pebble but not in badger", + }, + { + name: "prefix mismatch", + badgerKVs: []KVPairs{{Prefix: []byte("pfx1"), Pairs: []KVPair{{Key: key1, Value: val1}}}}, + pebbleKVs: []KVPairs{{Prefix: []byte("pfx2"), Pairs: []KVPair{{Key: key1, Value: val1}}}}, + expectErr: "prefix mismatch", + }, + { + name: "context cancelled", + badgerKVs: []KVPairs{{Prefix: prefix, Pairs: []KVPair{{Key: key1, Value: val1}}}}, + pebbleKVs: []KVPairs{{Prefix: prefix, Pairs: []KVPair{{Key: key1, Value: val1}}}}, + expectErr: "context cancelled", + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + badgerCh := make(chan KVPairs, len(tc.badgerKVs)) + pebbleCh := make(chan KVPairs, len(tc.pebbleKVs)) + + for _, kv := range tc.badgerKVs { + badgerCh <- kv + } + close(badgerCh) + for _, kv := range tc.pebbleKVs { + pebbleCh <- kv + } + close(pebbleCh) + + if tc.name == "context cancelled" { + // Cancel context before running + cancel() + } + + err := compareKeyValuePairsFromChannels(ctx, badgerCh, pebbleCh) + if tc.expectErr == "" { + require.NoError(t, err) + } else { + require.Error(t, err) + require.Contains(t, err.Error(), tc.expectErr) + } + }) + } +} diff --git a/storage/mock/batch.go b/storage/mock/batch.go new file mode 100644 index 00000000000..682bd456095 --- /dev/null +++ b/storage/mock/batch.go @@ -0,0 +1,143 @@ +// Code generated by mockery. DO NOT EDIT. + +package mock + +import ( + storage "github.com/onflow/flow-go/storage" + mock "github.com/stretchr/testify/mock" +) + +// Batch is an autogenerated mock type for the Batch type +type Batch struct { + mock.Mock +} + +// AddCallback provides a mock function with given fields: _a0 +func (_m *Batch) AddCallback(_a0 func(error)) { + _m.Called(_a0) +} + +// Close provides a mock function with no fields +func (_m *Batch) Close() error { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Close") + } + + var r0 error + if rf, ok := ret.Get(0).(func() error); ok { + r0 = rf() + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// Commit provides a mock function with no fields +func (_m *Batch) Commit() error { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Commit") + } + + var r0 error + if rf, ok := ret.Get(0).(func() error); ok { + r0 = rf() + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// GlobalReader provides a mock function with no fields +func (_m *Batch) GlobalReader() storage.Reader { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for GlobalReader") + } + + var r0 storage.Reader + if rf, ok := ret.Get(0).(func() storage.Reader); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(storage.Reader) + } + } + + return r0 +} + +// ScopedValue provides a mock function with given fields: key +func (_m *Batch) ScopedValue(key string) (any, bool) { + ret := _m.Called(key) + + if len(ret) == 0 { + panic("no return value specified for ScopedValue") + } + + var r0 any + var r1 bool + if rf, ok := ret.Get(0).(func(string) (any, bool)); ok { + return rf(key) + } + if rf, ok := ret.Get(0).(func(string) any); ok { + r0 = rf(key) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(any) + } + } + + if rf, ok := ret.Get(1).(func(string) bool); ok { + r1 = rf(key) + } else { + r1 = ret.Get(1).(bool) + } + + return r0, r1 +} + +// SetScopedValue provides a mock function with given fields: key, value +func (_m *Batch) SetScopedValue(key string, value any) { + _m.Called(key, value) +} + +// Writer provides a mock function with no fields +func (_m *Batch) Writer() storage.Writer { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Writer") + } + + var r0 storage.Writer + if rf, ok := ret.Get(0).(func() storage.Writer); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(storage.Writer) + } + } + + return r0 +} + +// NewBatch creates a new instance of Batch. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewBatch(t interface { + mock.TestingT + Cleanup(func()) +}) *Batch { + mock := &Batch{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/storage/mock/batch_storage.go b/storage/mock/batch_storage.go index 356832a3131..778593db273 100644 --- a/storage/mock/batch_storage.go +++ b/storage/mock/batch_storage.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mock @@ -12,10 +12,14 @@ type BatchStorage struct { mock.Mock } -// Flush provides a mock function with given fields: +// Flush provides a mock function with no fields func (_m *BatchStorage) Flush() error { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Flush") + } + var r0 error if rf, ok := ret.Get(0).(func() error); ok { r0 = rf() @@ -26,10 +30,14 @@ func (_m *BatchStorage) Flush() error { return r0 } -// GetWriter provides a mock function with given fields: +// GetWriter provides a mock function with no fields func (_m *BatchStorage) GetWriter() *badger.WriteBatch { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for GetWriter") + } + var r0 *badger.WriteBatch if rf, ok := ret.Get(0).(func() *badger.WriteBatch); ok { r0 = rf() @@ -47,13 +55,12 @@ func (_m *BatchStorage) OnSucceed(callback func()) { _m.Called(callback) } -type mockConstructorTestingTNewBatchStorage interface { +// NewBatchStorage creates a new instance of BatchStorage. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewBatchStorage(t interface { mock.TestingT Cleanup(func()) -} - -// NewBatchStorage creates a new instance of BatchStorage. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewBatchStorage(t mockConstructorTestingTNewBatchStorage) *BatchStorage { +}) *BatchStorage { mock := &BatchStorage{} mock.Mock.Test(t) diff --git a/storage/mock/beacon_private_keys.go b/storage/mock/beacon_private_keys.go deleted file mode 100644 index 9dc1ec2fe69..00000000000 --- a/storage/mock/beacon_private_keys.go +++ /dev/null @@ -1,50 +0,0 @@ -// Code generated by mockery v1.0.0. DO NOT EDIT. - -package mock - -import ( - encodable "github.com/onflow/flow-go/model/encodable" - mock "github.com/stretchr/testify/mock" -) - -// BeaconPrivateKeys is an autogenerated mock type for the BeaconPrivateKeys type -type BeaconPrivateKeys struct { - mock.Mock -} - -// InsertMyBeaconPrivateKey provides a mock function with given fields: epochCounter, key -func (_m *BeaconPrivateKeys) InsertMyBeaconPrivateKey(epochCounter uint64, key *encodable.RandomBeaconPrivKey) error { - ret := _m.Called(epochCounter, key) - - var r0 error - if rf, ok := ret.Get(0).(func(uint64, *encodable.RandomBeaconPrivKey) error); ok { - r0 = rf(epochCounter, key) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// RetrieveMyBeaconPrivateKey provides a mock function with given fields: epochCounter -func (_m *BeaconPrivateKeys) RetrieveMyBeaconPrivateKey(epochCounter uint64) (*encodable.RandomBeaconPrivKey, error) { - ret := _m.Called(epochCounter) - - var r0 *encodable.RandomBeaconPrivKey - if rf, ok := ret.Get(0).(func(uint64) *encodable.RandomBeaconPrivKey); ok { - r0 = rf(epochCounter) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*encodable.RandomBeaconPrivKey) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(uint64) error); ok { - r1 = rf(epochCounter) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} diff --git a/storage/mock/blocks.go b/storage/mock/blocks.go index cc5326e4f11..15e799ccff2 100644 --- a/storage/mock/blocks.go +++ b/storage/mock/blocks.go @@ -1,12 +1,14 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mock import ( + lockctx "github.com/jordanschalm/lockctx" flow "github.com/onflow/flow-go/model/flow" + mock "github.com/stretchr/testify/mock" - transaction "github.com/onflow/flow-go/storage/badger/transaction" + storage "github.com/onflow/flow-go/storage" ) // Blocks is an autogenerated mock type for the Blocks type @@ -14,10 +16,32 @@ type Blocks struct { mock.Mock } +// BatchStore provides a mock function with given fields: lctx, rw, proposal +func (_m *Blocks) BatchStore(lctx lockctx.Proof, rw storage.ReaderBatchWriter, proposal *flow.Proposal) error { + ret := _m.Called(lctx, rw, proposal) + + if len(ret) == 0 { + panic("no return value specified for BatchStore") + } + + var r0 error + if rf, ok := ret.Get(0).(func(lockctx.Proof, storage.ReaderBatchWriter, *flow.Proposal) error); ok { + r0 = rf(lctx, rw, proposal) + } else { + r0 = ret.Error(0) + } + + return r0 +} + // ByCollectionID provides a mock function with given fields: collID func (_m *Blocks) ByCollectionID(collID flow.Identifier) (*flow.Block, error) { ret := _m.Called(collID) + if len(ret) == 0 { + panic("no return value specified for ByCollectionID") + } + var r0 *flow.Block var r1 error if rf, ok := ret.Get(0).(func(flow.Identifier) (*flow.Block, error)); ok { @@ -44,6 +68,10 @@ func (_m *Blocks) ByCollectionID(collID flow.Identifier) (*flow.Block, error) { func (_m *Blocks) ByHeight(height uint64) (*flow.Block, error) { ret := _m.Called(height) + if len(ret) == 0 { + panic("no return value specified for ByHeight") + } + var r0 *flow.Block var r1 error if rf, ok := ret.Get(0).(func(uint64) (*flow.Block, error)); ok { @@ -70,6 +98,10 @@ func (_m *Blocks) ByHeight(height uint64) (*flow.Block, error) { func (_m *Blocks) ByID(blockID flow.Identifier) (*flow.Block, error) { ret := _m.Called(blockID) + if len(ret) == 0 { + panic("no return value specified for ByID") + } + var r0 *flow.Block var r1 error if rf, ok := ret.Get(0).(func(flow.Identifier) (*flow.Block, error)); ok { @@ -92,23 +124,29 @@ func (_m *Blocks) ByID(blockID flow.Identifier) (*flow.Block, error) { return r0, r1 } -// GetLastFullBlockHeight provides a mock function with given fields: -func (_m *Blocks) GetLastFullBlockHeight() (uint64, error) { - ret := _m.Called() +// ByView provides a mock function with given fields: view +func (_m *Blocks) ByView(view uint64) (*flow.Block, error) { + ret := _m.Called(view) + + if len(ret) == 0 { + panic("no return value specified for ByView") + } - var r0 uint64 + var r0 *flow.Block var r1 error - if rf, ok := ret.Get(0).(func() (uint64, error)); ok { - return rf() + if rf, ok := ret.Get(0).(func(uint64) (*flow.Block, error)); ok { + return rf(view) } - if rf, ok := ret.Get(0).(func() uint64); ok { - r0 = rf() + if rf, ok := ret.Get(0).(func(uint64) *flow.Block); ok { + r0 = rf(view) } else { - r0 = ret.Get(0).(uint64) + if ret.Get(0) != nil { + r0 = ret.Get(0).(*flow.Block) + } } - if rf, ok := ret.Get(1).(func() error); ok { - r1 = rf() + if rf, ok := ret.Get(1).(func(uint64) error); ok { + r1 = rf(view) } else { r1 = ret.Error(1) } @@ -116,10 +154,14 @@ func (_m *Blocks) GetLastFullBlockHeight() (uint64, error) { return r0, r1 } -// IndexBlockForCollections provides a mock function with given fields: blockID, collIDs -func (_m *Blocks) IndexBlockForCollections(blockID flow.Identifier, collIDs []flow.Identifier) error { +// IndexBlockContainingCollectionGuarantees provides a mock function with given fields: blockID, collIDs +func (_m *Blocks) IndexBlockContainingCollectionGuarantees(blockID flow.Identifier, collIDs []flow.Identifier) error { ret := _m.Called(blockID, collIDs) + if len(ret) == 0 { + panic("no return value specified for IndexBlockContainingCollectionGuarantees") + } + var r0 error if rf, ok := ret.Get(0).(func(flow.Identifier, []flow.Identifier) error); ok { r0 = rf(blockID, collIDs) @@ -130,71 +172,102 @@ func (_m *Blocks) IndexBlockForCollections(blockID flow.Identifier, collIDs []fl return r0 } -// InsertLastFullBlockHeightIfNotExists provides a mock function with given fields: height -func (_m *Blocks) InsertLastFullBlockHeightIfNotExists(height uint64) error { +// ProposalByHeight provides a mock function with given fields: height +func (_m *Blocks) ProposalByHeight(height uint64) (*flow.Proposal, error) { ret := _m.Called(height) - var r0 error - if rf, ok := ret.Get(0).(func(uint64) error); ok { + if len(ret) == 0 { + panic("no return value specified for ProposalByHeight") + } + + var r0 *flow.Proposal + var r1 error + if rf, ok := ret.Get(0).(func(uint64) (*flow.Proposal, error)); ok { + return rf(height) + } + if rf, ok := ret.Get(0).(func(uint64) *flow.Proposal); ok { r0 = rf(height) } else { - r0 = ret.Error(0) + if ret.Get(0) != nil { + r0 = ret.Get(0).(*flow.Proposal) + } } - return r0 -} - -// Store provides a mock function with given fields: block -func (_m *Blocks) Store(block *flow.Block) error { - ret := _m.Called(block) - - var r0 error - if rf, ok := ret.Get(0).(func(*flow.Block) error); ok { - r0 = rf(block) + if rf, ok := ret.Get(1).(func(uint64) error); ok { + r1 = rf(height) } else { - r0 = ret.Error(0) + r1 = ret.Error(1) } - return r0 + return r0, r1 } -// StoreTx provides a mock function with given fields: block -func (_m *Blocks) StoreTx(block *flow.Block) func(*transaction.Tx) error { - ret := _m.Called(block) +// ProposalByID provides a mock function with given fields: blockID +func (_m *Blocks) ProposalByID(blockID flow.Identifier) (*flow.Proposal, error) { + ret := _m.Called(blockID) + + if len(ret) == 0 { + panic("no return value specified for ProposalByID") + } - var r0 func(*transaction.Tx) error - if rf, ok := ret.Get(0).(func(*flow.Block) func(*transaction.Tx) error); ok { - r0 = rf(block) + var r0 *flow.Proposal + var r1 error + if rf, ok := ret.Get(0).(func(flow.Identifier) (*flow.Proposal, error)); ok { + return rf(blockID) + } + if rf, ok := ret.Get(0).(func(flow.Identifier) *flow.Proposal); ok { + r0 = rf(blockID) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(func(*transaction.Tx) error) + r0 = ret.Get(0).(*flow.Proposal) } } - return r0 + if rf, ok := ret.Get(1).(func(flow.Identifier) error); ok { + r1 = rf(blockID) + } else { + r1 = ret.Error(1) + } + + return r0, r1 } -// UpdateLastFullBlockHeight provides a mock function with given fields: height -func (_m *Blocks) UpdateLastFullBlockHeight(height uint64) error { - ret := _m.Called(height) +// ProposalByView provides a mock function with given fields: view +func (_m *Blocks) ProposalByView(view uint64) (*flow.Proposal, error) { + ret := _m.Called(view) - var r0 error - if rf, ok := ret.Get(0).(func(uint64) error); ok { - r0 = rf(height) + if len(ret) == 0 { + panic("no return value specified for ProposalByView") + } + + var r0 *flow.Proposal + var r1 error + if rf, ok := ret.Get(0).(func(uint64) (*flow.Proposal, error)); ok { + return rf(view) + } + if rf, ok := ret.Get(0).(func(uint64) *flow.Proposal); ok { + r0 = rf(view) } else { - r0 = ret.Error(0) + if ret.Get(0) != nil { + r0 = ret.Get(0).(*flow.Proposal) + } } - return r0 -} + if rf, ok := ret.Get(1).(func(uint64) error); ok { + r1 = rf(view) + } else { + r1 = ret.Error(1) + } -type mockConstructorTestingTNewBlocks interface { - mock.TestingT - Cleanup(func()) + return r0, r1 } // NewBlocks creates a new instance of Blocks. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewBlocks(t mockConstructorTestingTNewBlocks) *Blocks { +// The first argument is typically a *testing.T value. +func NewBlocks(t interface { + mock.TestingT + Cleanup(func()) +}) *Blocks { mock := &Blocks{} mock.Mock.Test(t) diff --git a/storage/mock/chunk_data_packs.go b/storage/mock/chunk_data_packs.go index 66205d7c099..653eba59cd5 100644 --- a/storage/mock/chunk_data_packs.go +++ b/storage/mock/chunk_data_packs.go @@ -1,9 +1,11 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mock import ( + lockctx "github.com/jordanschalm/lockctx" flow "github.com/onflow/flow-go/model/flow" + mock "github.com/stretchr/testify/mock" storage "github.com/onflow/flow-go/storage" @@ -15,26 +17,16 @@ type ChunkDataPacks struct { } // BatchRemove provides a mock function with given fields: chunkID, batch -func (_m *ChunkDataPacks) BatchRemove(chunkID flow.Identifier, batch storage.BatchStorage) error { +func (_m *ChunkDataPacks) BatchRemove(chunkID flow.Identifier, batch storage.ReaderBatchWriter) error { ret := _m.Called(chunkID, batch) - var r0 error - if rf, ok := ret.Get(0).(func(flow.Identifier, storage.BatchStorage) error); ok { - r0 = rf(chunkID, batch) - } else { - r0 = ret.Error(0) + if len(ret) == 0 { + panic("no return value specified for BatchRemove") } - return r0 -} - -// BatchStore provides a mock function with given fields: c, batch -func (_m *ChunkDataPacks) BatchStore(c *flow.ChunkDataPack, batch storage.BatchStorage) error { - ret := _m.Called(c, batch) - var r0 error - if rf, ok := ret.Get(0).(func(*flow.ChunkDataPack, storage.BatchStorage) error); ok { - r0 = rf(c, batch) + if rf, ok := ret.Get(0).(func(flow.Identifier, storage.ReaderBatchWriter) error); ok { + r0 = rf(chunkID, batch) } else { r0 = ret.Error(0) } @@ -46,6 +38,10 @@ func (_m *ChunkDataPacks) BatchStore(c *flow.ChunkDataPack, batch storage.BatchS func (_m *ChunkDataPacks) ByChunkID(chunkID flow.Identifier) (*flow.ChunkDataPack, error) { ret := _m.Called(chunkID) + if len(ret) == 0 { + panic("no return value specified for ByChunkID") + } + var r0 *flow.ChunkDataPack var r1 error if rf, ok := ret.Get(0).(func(flow.Identifier) (*flow.ChunkDataPack, error)); ok { @@ -68,13 +64,17 @@ func (_m *ChunkDataPacks) ByChunkID(chunkID flow.Identifier) (*flow.ChunkDataPac return r0, r1 } -// Store provides a mock function with given fields: c -func (_m *ChunkDataPacks) Store(c *flow.ChunkDataPack) error { - ret := _m.Called(c) +// Remove provides a mock function with given fields: cs +func (_m *ChunkDataPacks) Remove(cs []flow.Identifier) error { + ret := _m.Called(cs) + + if len(ret) == 0 { + panic("no return value specified for Remove") + } var r0 error - if rf, ok := ret.Get(0).(func(*flow.ChunkDataPack) error); ok { - r0 = rf(c) + if rf, ok := ret.Get(0).(func([]flow.Identifier) error); ok { + r0 = rf(cs) } else { r0 = ret.Error(0) } @@ -82,13 +82,30 @@ func (_m *ChunkDataPacks) Store(c *flow.ChunkDataPack) error { return r0 } -type mockConstructorTestingTNewChunkDataPacks interface { - mock.TestingT - Cleanup(func()) +// StoreByChunkID provides a mock function with given fields: lctx, cs +func (_m *ChunkDataPacks) StoreByChunkID(lctx lockctx.Proof, cs []*flow.ChunkDataPack) error { + ret := _m.Called(lctx, cs) + + if len(ret) == 0 { + panic("no return value specified for StoreByChunkID") + } + + var r0 error + if rf, ok := ret.Get(0).(func(lockctx.Proof, []*flow.ChunkDataPack) error); ok { + r0 = rf(lctx, cs) + } else { + r0 = ret.Error(0) + } + + return r0 } // NewChunkDataPacks creates a new instance of ChunkDataPacks. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewChunkDataPacks(t mockConstructorTestingTNewChunkDataPacks) *ChunkDataPacks { +// The first argument is typically a *testing.T value. +func NewChunkDataPacks(t interface { + mock.TestingT + Cleanup(func()) +}) *ChunkDataPacks { mock := &ChunkDataPacks{} mock.Mock.Test(t) diff --git a/storage/mock/chunks_queue.go b/storage/mock/chunks_queue.go index e2c37661554..d181f271266 100644 --- a/storage/mock/chunks_queue.go +++ b/storage/mock/chunks_queue.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mock @@ -16,6 +16,10 @@ type ChunksQueue struct { func (_m *ChunksQueue) AtIndex(index uint64) (*chunks.Locator, error) { ret := _m.Called(index) + if len(ret) == 0 { + panic("no return value specified for AtIndex") + } + var r0 *chunks.Locator var r1 error if rf, ok := ret.Get(0).(func(uint64) (*chunks.Locator, error)); ok { @@ -38,10 +42,14 @@ func (_m *ChunksQueue) AtIndex(index uint64) (*chunks.Locator, error) { return r0, r1 } -// LatestIndex provides a mock function with given fields: +// LatestIndex provides a mock function with no fields func (_m *ChunksQueue) LatestIndex() (uint64, error) { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for LatestIndex") + } + var r0 uint64 var r1 error if rf, ok := ret.Get(0).(func() (uint64, error)); ok { @@ -66,6 +74,10 @@ func (_m *ChunksQueue) LatestIndex() (uint64, error) { func (_m *ChunksQueue) StoreChunkLocator(locator *chunks.Locator) (bool, error) { ret := _m.Called(locator) + if len(ret) == 0 { + panic("no return value specified for StoreChunkLocator") + } + var r0 bool var r1 error if rf, ok := ret.Get(0).(func(*chunks.Locator) (bool, error)); ok { @@ -86,13 +98,12 @@ func (_m *ChunksQueue) StoreChunkLocator(locator *chunks.Locator) (bool, error) return r0, r1 } -type mockConstructorTestingTNewChunksQueue interface { +// NewChunksQueue creates a new instance of ChunksQueue. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewChunksQueue(t interface { mock.TestingT Cleanup(func()) -} - -// NewChunksQueue creates a new instance of ChunksQueue. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewChunksQueue(t mockConstructorTestingTNewChunksQueue) *ChunksQueue { +}) *ChunksQueue { mock := &ChunksQueue{} mock.Mock.Test(t) diff --git a/storage/mock/cluster_blocks.go b/storage/mock/cluster_blocks.go index ad4787f5128..12ef80c78a3 100644 --- a/storage/mock/cluster_blocks.go +++ b/storage/mock/cluster_blocks.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mock @@ -14,20 +14,24 @@ type ClusterBlocks struct { mock.Mock } -// ByHeight provides a mock function with given fields: height -func (_m *ClusterBlocks) ByHeight(height uint64) (*cluster.Block, error) { +// ProposalByHeight provides a mock function with given fields: height +func (_m *ClusterBlocks) ProposalByHeight(height uint64) (*cluster.Proposal, error) { ret := _m.Called(height) - var r0 *cluster.Block + if len(ret) == 0 { + panic("no return value specified for ProposalByHeight") + } + + var r0 *cluster.Proposal var r1 error - if rf, ok := ret.Get(0).(func(uint64) (*cluster.Block, error)); ok { + if rf, ok := ret.Get(0).(func(uint64) (*cluster.Proposal, error)); ok { return rf(height) } - if rf, ok := ret.Get(0).(func(uint64) *cluster.Block); ok { + if rf, ok := ret.Get(0).(func(uint64) *cluster.Proposal); ok { r0 = rf(height) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(*cluster.Block) + r0 = ret.Get(0).(*cluster.Proposal) } } @@ -40,20 +44,24 @@ func (_m *ClusterBlocks) ByHeight(height uint64) (*cluster.Block, error) { return r0, r1 } -// ByID provides a mock function with given fields: blockID -func (_m *ClusterBlocks) ByID(blockID flow.Identifier) (*cluster.Block, error) { +// ProposalByID provides a mock function with given fields: blockID +func (_m *ClusterBlocks) ProposalByID(blockID flow.Identifier) (*cluster.Proposal, error) { ret := _m.Called(blockID) - var r0 *cluster.Block + if len(ret) == 0 { + panic("no return value specified for ProposalByID") + } + + var r0 *cluster.Proposal var r1 error - if rf, ok := ret.Get(0).(func(flow.Identifier) (*cluster.Block, error)); ok { + if rf, ok := ret.Get(0).(func(flow.Identifier) (*cluster.Proposal, error)); ok { return rf(blockID) } - if rf, ok := ret.Get(0).(func(flow.Identifier) *cluster.Block); ok { + if rf, ok := ret.Get(0).(func(flow.Identifier) *cluster.Proposal); ok { r0 = rf(blockID) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(*cluster.Block) + r0 = ret.Get(0).(*cluster.Proposal) } } @@ -66,27 +74,12 @@ func (_m *ClusterBlocks) ByID(blockID flow.Identifier) (*cluster.Block, error) { return r0, r1 } -// Store provides a mock function with given fields: block -func (_m *ClusterBlocks) Store(block *cluster.Block) error { - ret := _m.Called(block) - - var r0 error - if rf, ok := ret.Get(0).(func(*cluster.Block) error); ok { - r0 = rf(block) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -type mockConstructorTestingTNewClusterBlocks interface { +// NewClusterBlocks creates a new instance of ClusterBlocks. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewClusterBlocks(t interface { mock.TestingT Cleanup(func()) -} - -// NewClusterBlocks creates a new instance of ClusterBlocks. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewClusterBlocks(t mockConstructorTestingTNewClusterBlocks) *ClusterBlocks { +}) *ClusterBlocks { mock := &ClusterBlocks{} mock.Mock.Test(t) diff --git a/storage/mock/cluster_payloads.go b/storage/mock/cluster_payloads.go index e4e1d00616b..d8db251339d 100644 --- a/storage/mock/cluster_payloads.go +++ b/storage/mock/cluster_payloads.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mock @@ -18,6 +18,10 @@ type ClusterPayloads struct { func (_m *ClusterPayloads) ByBlockID(blockID flow.Identifier) (*cluster.Payload, error) { ret := _m.Called(blockID) + if len(ret) == 0 { + panic("no return value specified for ByBlockID") + } + var r0 *cluster.Payload var r1 error if rf, ok := ret.Get(0).(func(flow.Identifier) (*cluster.Payload, error)); ok { @@ -40,27 +44,12 @@ func (_m *ClusterPayloads) ByBlockID(blockID flow.Identifier) (*cluster.Payload, return r0, r1 } -// Store provides a mock function with given fields: blockID, payload -func (_m *ClusterPayloads) Store(blockID flow.Identifier, payload *cluster.Payload) error { - ret := _m.Called(blockID, payload) - - var r0 error - if rf, ok := ret.Get(0).(func(flow.Identifier, *cluster.Payload) error); ok { - r0 = rf(blockID, payload) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -type mockConstructorTestingTNewClusterPayloads interface { +// NewClusterPayloads creates a new instance of ClusterPayloads. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewClusterPayloads(t interface { mock.TestingT Cleanup(func()) -} - -// NewClusterPayloads creates a new instance of ClusterPayloads. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewClusterPayloads(t mockConstructorTestingTNewClusterPayloads) *ClusterPayloads { +}) *ClusterPayloads { mock := &ClusterPayloads{} mock.Mock.Test(t) diff --git a/storage/mock/collections.go b/storage/mock/collections.go index 2927d8a27ec..592ba5fba26 100644 --- a/storage/mock/collections.go +++ b/storage/mock/collections.go @@ -1,10 +1,14 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mock import ( + lockctx "github.com/jordanschalm/lockctx" flow "github.com/onflow/flow-go/model/flow" + mock "github.com/stretchr/testify/mock" + + storage "github.com/onflow/flow-go/storage" ) // Collections is an autogenerated mock type for the Collections type @@ -12,10 +16,44 @@ type Collections struct { mock.Mock } +// BatchStoreAndIndexByTransaction provides a mock function with given fields: lctx, collection, batch +func (_m *Collections) BatchStoreAndIndexByTransaction(lctx lockctx.Proof, collection *flow.Collection, batch storage.ReaderBatchWriter) (*flow.LightCollection, error) { + ret := _m.Called(lctx, collection, batch) + + if len(ret) == 0 { + panic("no return value specified for BatchStoreAndIndexByTransaction") + } + + var r0 *flow.LightCollection + var r1 error + if rf, ok := ret.Get(0).(func(lockctx.Proof, *flow.Collection, storage.ReaderBatchWriter) (*flow.LightCollection, error)); ok { + return rf(lctx, collection, batch) + } + if rf, ok := ret.Get(0).(func(lockctx.Proof, *flow.Collection, storage.ReaderBatchWriter) *flow.LightCollection); ok { + r0 = rf(lctx, collection, batch) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*flow.LightCollection) + } + } + + if rf, ok := ret.Get(1).(func(lockctx.Proof, *flow.Collection, storage.ReaderBatchWriter) error); ok { + r1 = rf(lctx, collection, batch) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + // ByID provides a mock function with given fields: collID func (_m *Collections) ByID(collID flow.Identifier) (*flow.Collection, error) { ret := _m.Called(collID) + if len(ret) == 0 { + panic("no return value specified for ByID") + } + var r0 *flow.Collection var r1 error if rf, ok := ret.Get(0).(func(flow.Identifier) (*flow.Collection, error)); ok { @@ -42,6 +80,10 @@ func (_m *Collections) ByID(collID flow.Identifier) (*flow.Collection, error) { func (_m *Collections) LightByID(collID flow.Identifier) (*flow.LightCollection, error) { ret := _m.Called(collID) + if len(ret) == 0 { + panic("no return value specified for LightByID") + } + var r0 *flow.LightCollection var r1 error if rf, ok := ret.Get(0).(func(flow.Identifier) (*flow.LightCollection, error)); ok { @@ -68,6 +110,10 @@ func (_m *Collections) LightByID(collID flow.Identifier) (*flow.LightCollection, func (_m *Collections) LightByTransactionID(txID flow.Identifier) (*flow.LightCollection, error) { ret := _m.Called(txID) + if len(ret) == 0 { + panic("no return value specified for LightByTransactionID") + } + var r0 *flow.LightCollection var r1 error if rf, ok := ret.Get(0).(func(flow.Identifier) (*flow.LightCollection, error)); ok { @@ -94,6 +140,10 @@ func (_m *Collections) LightByTransactionID(txID flow.Identifier) (*flow.LightCo func (_m *Collections) Remove(collID flow.Identifier) error { ret := _m.Called(collID) + if len(ret) == 0 { + panic("no return value specified for Remove") + } + var r0 error if rf, ok := ret.Get(0).(func(flow.Identifier) error); ok { r0 = rf(collID) @@ -105,54 +155,71 @@ func (_m *Collections) Remove(collID flow.Identifier) error { } // Store provides a mock function with given fields: collection -func (_m *Collections) Store(collection *flow.Collection) error { +func (_m *Collections) Store(collection *flow.Collection) (*flow.LightCollection, error) { ret := _m.Called(collection) - var r0 error - if rf, ok := ret.Get(0).(func(*flow.Collection) error); ok { + if len(ret) == 0 { + panic("no return value specified for Store") + } + + var r0 *flow.LightCollection + var r1 error + if rf, ok := ret.Get(0).(func(*flow.Collection) (*flow.LightCollection, error)); ok { + return rf(collection) + } + if rf, ok := ret.Get(0).(func(*flow.Collection) *flow.LightCollection); ok { r0 = rf(collection) } else { - r0 = ret.Error(0) + if ret.Get(0) != nil { + r0 = ret.Get(0).(*flow.LightCollection) + } } - return r0 -} - -// StoreLight provides a mock function with given fields: collection -func (_m *Collections) StoreLight(collection *flow.LightCollection) error { - ret := _m.Called(collection) - - var r0 error - if rf, ok := ret.Get(0).(func(*flow.LightCollection) error); ok { - r0 = rf(collection) + if rf, ok := ret.Get(1).(func(*flow.Collection) error); ok { + r1 = rf(collection) } else { - r0 = ret.Error(0) + r1 = ret.Error(1) } - return r0 + return r0, r1 } -// StoreLightAndIndexByTransaction provides a mock function with given fields: collection -func (_m *Collections) StoreLightAndIndexByTransaction(collection *flow.LightCollection) error { - ret := _m.Called(collection) +// StoreAndIndexByTransaction provides a mock function with given fields: lctx, collection +func (_m *Collections) StoreAndIndexByTransaction(lctx lockctx.Proof, collection *flow.Collection) (*flow.LightCollection, error) { + ret := _m.Called(lctx, collection) - var r0 error - if rf, ok := ret.Get(0).(func(*flow.LightCollection) error); ok { - r0 = rf(collection) + if len(ret) == 0 { + panic("no return value specified for StoreAndIndexByTransaction") + } + + var r0 *flow.LightCollection + var r1 error + if rf, ok := ret.Get(0).(func(lockctx.Proof, *flow.Collection) (*flow.LightCollection, error)); ok { + return rf(lctx, collection) + } + if rf, ok := ret.Get(0).(func(lockctx.Proof, *flow.Collection) *flow.LightCollection); ok { + r0 = rf(lctx, collection) } else { - r0 = ret.Error(0) + if ret.Get(0) != nil { + r0 = ret.Get(0).(*flow.LightCollection) + } } - return r0 -} + if rf, ok := ret.Get(1).(func(lockctx.Proof, *flow.Collection) error); ok { + r1 = rf(lctx, collection) + } else { + r1 = ret.Error(1) + } -type mockConstructorTestingTNewCollections interface { - mock.TestingT - Cleanup(func()) + return r0, r1 } // NewCollections creates a new instance of Collections. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewCollections(t mockConstructorTestingTNewCollections) *Collections { +// The first argument is typically a *testing.T value. +func NewCollections(t interface { + mock.TestingT + Cleanup(func()) +}) *Collections { mock := &Collections{} mock.Mock.Test(t) diff --git a/storage/mock/collections_reader.go b/storage/mock/collections_reader.go new file mode 100644 index 00000000000..3a8e071d6bc --- /dev/null +++ b/storage/mock/collections_reader.go @@ -0,0 +1,117 @@ +// Code generated by mockery. DO NOT EDIT. + +package mock + +import ( + flow "github.com/onflow/flow-go/model/flow" + mock "github.com/stretchr/testify/mock" +) + +// CollectionsReader is an autogenerated mock type for the CollectionsReader type +type CollectionsReader struct { + mock.Mock +} + +// ByID provides a mock function with given fields: collID +func (_m *CollectionsReader) ByID(collID flow.Identifier) (*flow.Collection, error) { + ret := _m.Called(collID) + + if len(ret) == 0 { + panic("no return value specified for ByID") + } + + var r0 *flow.Collection + var r1 error + if rf, ok := ret.Get(0).(func(flow.Identifier) (*flow.Collection, error)); ok { + return rf(collID) + } + if rf, ok := ret.Get(0).(func(flow.Identifier) *flow.Collection); ok { + r0 = rf(collID) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*flow.Collection) + } + } + + if rf, ok := ret.Get(1).(func(flow.Identifier) error); ok { + r1 = rf(collID) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// LightByID provides a mock function with given fields: collID +func (_m *CollectionsReader) LightByID(collID flow.Identifier) (*flow.LightCollection, error) { + ret := _m.Called(collID) + + if len(ret) == 0 { + panic("no return value specified for LightByID") + } + + var r0 *flow.LightCollection + var r1 error + if rf, ok := ret.Get(0).(func(flow.Identifier) (*flow.LightCollection, error)); ok { + return rf(collID) + } + if rf, ok := ret.Get(0).(func(flow.Identifier) *flow.LightCollection); ok { + r0 = rf(collID) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*flow.LightCollection) + } + } + + if rf, ok := ret.Get(1).(func(flow.Identifier) error); ok { + r1 = rf(collID) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// LightByTransactionID provides a mock function with given fields: txID +func (_m *CollectionsReader) LightByTransactionID(txID flow.Identifier) (*flow.LightCollection, error) { + ret := _m.Called(txID) + + if len(ret) == 0 { + panic("no return value specified for LightByTransactionID") + } + + var r0 *flow.LightCollection + var r1 error + if rf, ok := ret.Get(0).(func(flow.Identifier) (*flow.LightCollection, error)); ok { + return rf(txID) + } + if rf, ok := ret.Get(0).(func(flow.Identifier) *flow.LightCollection); ok { + r0 = rf(txID) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*flow.LightCollection) + } + } + + if rf, ok := ret.Get(1).(func(flow.Identifier) error); ok { + r1 = rf(txID) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// NewCollectionsReader creates a new instance of CollectionsReader. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewCollectionsReader(t interface { + mock.TestingT + Cleanup(func()) +}) *CollectionsReader { + mock := &CollectionsReader{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/storage/mock/commits.go b/storage/mock/commits.go index a3adc0979ab..5d86ac4803b 100644 --- a/storage/mock/commits.go +++ b/storage/mock/commits.go @@ -1,9 +1,11 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mock import ( + lockctx "github.com/jordanschalm/lockctx" flow "github.com/onflow/flow-go/model/flow" + mock "github.com/stretchr/testify/mock" storage "github.com/onflow/flow-go/storage" @@ -15,11 +17,15 @@ type Commits struct { } // BatchRemoveByBlockID provides a mock function with given fields: blockID, batch -func (_m *Commits) BatchRemoveByBlockID(blockID flow.Identifier, batch storage.BatchStorage) error { +func (_m *Commits) BatchRemoveByBlockID(blockID flow.Identifier, batch storage.ReaderBatchWriter) error { ret := _m.Called(blockID, batch) + if len(ret) == 0 { + panic("no return value specified for BatchRemoveByBlockID") + } + var r0 error - if rf, ok := ret.Get(0).(func(flow.Identifier, storage.BatchStorage) error); ok { + if rf, ok := ret.Get(0).(func(flow.Identifier, storage.ReaderBatchWriter) error); ok { r0 = rf(blockID, batch) } else { r0 = ret.Error(0) @@ -28,13 +34,17 @@ func (_m *Commits) BatchRemoveByBlockID(blockID flow.Identifier, batch storage.B return r0 } -// BatchStore provides a mock function with given fields: blockID, commit, batch -func (_m *Commits) BatchStore(blockID flow.Identifier, commit flow.StateCommitment, batch storage.BatchStorage) error { - ret := _m.Called(blockID, commit, batch) +// BatchStore provides a mock function with given fields: lctx, blockID, commit, batch +func (_m *Commits) BatchStore(lctx lockctx.Proof, blockID flow.Identifier, commit flow.StateCommitment, batch storage.ReaderBatchWriter) error { + ret := _m.Called(lctx, blockID, commit, batch) + + if len(ret) == 0 { + panic("no return value specified for BatchStore") + } var r0 error - if rf, ok := ret.Get(0).(func(flow.Identifier, flow.StateCommitment, storage.BatchStorage) error); ok { - r0 = rf(blockID, commit, batch) + if rf, ok := ret.Get(0).(func(lockctx.Proof, flow.Identifier, flow.StateCommitment, storage.ReaderBatchWriter) error); ok { + r0 = rf(lctx, blockID, commit, batch) } else { r0 = ret.Error(0) } @@ -46,6 +56,10 @@ func (_m *Commits) BatchStore(blockID flow.Identifier, commit flow.StateCommitme func (_m *Commits) ByBlockID(blockID flow.Identifier) (flow.StateCommitment, error) { ret := _m.Called(blockID) + if len(ret) == 0 { + panic("no return value specified for ByBlockID") + } + var r0 flow.StateCommitment var r1 error if rf, ok := ret.Get(0).(func(flow.Identifier) (flow.StateCommitment, error)); ok { @@ -68,27 +82,12 @@ func (_m *Commits) ByBlockID(blockID flow.Identifier) (flow.StateCommitment, err return r0, r1 } -// Store provides a mock function with given fields: blockID, commit -func (_m *Commits) Store(blockID flow.Identifier, commit flow.StateCommitment) error { - ret := _m.Called(blockID, commit) - - var r0 error - if rf, ok := ret.Get(0).(func(flow.Identifier, flow.StateCommitment) error); ok { - r0 = rf(blockID, commit) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -type mockConstructorTestingTNewCommits interface { +// NewCommits creates a new instance of Commits. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewCommits(t interface { mock.TestingT Cleanup(func()) -} - -// NewCommits creates a new instance of Commits. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewCommits(t mockConstructorTestingTNewCommits) *Commits { +}) *Commits { mock := &Commits{} mock.Mock.Test(t) diff --git a/storage/mock/commits_reader.go b/storage/mock/commits_reader.go new file mode 100644 index 00000000000..4e46f3d959b --- /dev/null +++ b/storage/mock/commits_reader.go @@ -0,0 +1,57 @@ +// Code generated by mockery. DO NOT EDIT. + +package mock + +import ( + flow "github.com/onflow/flow-go/model/flow" + mock "github.com/stretchr/testify/mock" +) + +// CommitsReader is an autogenerated mock type for the CommitsReader type +type CommitsReader struct { + mock.Mock +} + +// ByBlockID provides a mock function with given fields: blockID +func (_m *CommitsReader) ByBlockID(blockID flow.Identifier) (flow.StateCommitment, error) { + ret := _m.Called(blockID) + + if len(ret) == 0 { + panic("no return value specified for ByBlockID") + } + + var r0 flow.StateCommitment + var r1 error + if rf, ok := ret.Get(0).(func(flow.Identifier) (flow.StateCommitment, error)); ok { + return rf(blockID) + } + if rf, ok := ret.Get(0).(func(flow.Identifier) flow.StateCommitment); ok { + r0 = rf(blockID) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(flow.StateCommitment) + } + } + + if rf, ok := ret.Get(1).(func(flow.Identifier) error); ok { + r1 = rf(blockID) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// NewCommitsReader creates a new instance of CommitsReader. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewCommitsReader(t interface { + mock.TestingT + Cleanup(func()) +}) *CommitsReader { + mock := &CommitsReader{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/storage/mock/computation_result_upload_status.go b/storage/mock/computation_result_upload_status.go index 11b772c9e80..3bd31ddb438 100644 --- a/storage/mock/computation_result_upload_status.go +++ b/storage/mock/computation_result_upload_status.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mock @@ -16,6 +16,10 @@ type ComputationResultUploadStatus struct { func (_m *ComputationResultUploadStatus) ByID(blockID flow.Identifier) (bool, error) { ret := _m.Called(blockID) + if len(ret) == 0 { + panic("no return value specified for ByID") + } + var r0 bool var r1 error if rf, ok := ret.Get(0).(func(flow.Identifier) (bool, error)); ok { @@ -40,6 +44,10 @@ func (_m *ComputationResultUploadStatus) ByID(blockID flow.Identifier) (bool, er func (_m *ComputationResultUploadStatus) GetIDsByUploadStatus(targetUploadStatus bool) ([]flow.Identifier, error) { ret := _m.Called(targetUploadStatus) + if len(ret) == 0 { + panic("no return value specified for GetIDsByUploadStatus") + } + var r0 []flow.Identifier var r1 error if rf, ok := ret.Get(0).(func(bool) ([]flow.Identifier, error)); ok { @@ -66,6 +74,10 @@ func (_m *ComputationResultUploadStatus) GetIDsByUploadStatus(targetUploadStatus func (_m *ComputationResultUploadStatus) Remove(blockID flow.Identifier) error { ret := _m.Called(blockID) + if len(ret) == 0 { + panic("no return value specified for Remove") + } + var r0 error if rf, ok := ret.Get(0).(func(flow.Identifier) error); ok { r0 = rf(blockID) @@ -80,6 +92,10 @@ func (_m *ComputationResultUploadStatus) Remove(blockID flow.Identifier) error { func (_m *ComputationResultUploadStatus) Upsert(blockID flow.Identifier, wasUploadCompleted bool) error { ret := _m.Called(blockID, wasUploadCompleted) + if len(ret) == 0 { + panic("no return value specified for Upsert") + } + var r0 error if rf, ok := ret.Get(0).(func(flow.Identifier, bool) error); ok { r0 = rf(blockID, wasUploadCompleted) @@ -90,13 +106,12 @@ func (_m *ComputationResultUploadStatus) Upsert(blockID flow.Identifier, wasUplo return r0 } -type mockConstructorTestingTNewComputationResultUploadStatus interface { +// NewComputationResultUploadStatus creates a new instance of ComputationResultUploadStatus. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewComputationResultUploadStatus(t interface { mock.TestingT Cleanup(func()) -} - -// NewComputationResultUploadStatus creates a new instance of ComputationResultUploadStatus. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewComputationResultUploadStatus(t mockConstructorTestingTNewComputationResultUploadStatus) *ComputationResultUploadStatus { +}) *ComputationResultUploadStatus { mock := &ComputationResultUploadStatus{} mock.Mock.Test(t) diff --git a/storage/mock/consumer_progress.go b/storage/mock/consumer_progress.go index 9410bc76ea4..baa6a85888b 100644 --- a/storage/mock/consumer_progress.go +++ b/storage/mock/consumer_progress.go @@ -1,21 +1,28 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mock -import mock "github.com/stretchr/testify/mock" +import ( + storage "github.com/onflow/flow-go/storage" + mock "github.com/stretchr/testify/mock" +) // ConsumerProgress is an autogenerated mock type for the ConsumerProgress type type ConsumerProgress struct { mock.Mock } -// InitProcessedIndex provides a mock function with given fields: defaultIndex -func (_m *ConsumerProgress) InitProcessedIndex(defaultIndex uint64) error { - ret := _m.Called(defaultIndex) +// BatchSetProcessedIndex provides a mock function with given fields: processed, batch +func (_m *ConsumerProgress) BatchSetProcessedIndex(processed uint64, batch storage.ReaderBatchWriter) error { + ret := _m.Called(processed, batch) + + if len(ret) == 0 { + panic("no return value specified for BatchSetProcessedIndex") + } var r0 error - if rf, ok := ret.Get(0).(func(uint64) error); ok { - r0 = rf(defaultIndex) + if rf, ok := ret.Get(0).(func(uint64, storage.ReaderBatchWriter) error); ok { + r0 = rf(processed, batch) } else { r0 = ret.Error(0) } @@ -23,10 +30,14 @@ func (_m *ConsumerProgress) InitProcessedIndex(defaultIndex uint64) error { return r0 } -// ProcessedIndex provides a mock function with given fields: +// ProcessedIndex provides a mock function with no fields func (_m *ConsumerProgress) ProcessedIndex() (uint64, error) { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for ProcessedIndex") + } + var r0 uint64 var r1 error if rf, ok := ret.Get(0).(func() (uint64, error)); ok { @@ -51,6 +62,10 @@ func (_m *ConsumerProgress) ProcessedIndex() (uint64, error) { func (_m *ConsumerProgress) SetProcessedIndex(processed uint64) error { ret := _m.Called(processed) + if len(ret) == 0 { + panic("no return value specified for SetProcessedIndex") + } + var r0 error if rf, ok := ret.Get(0).(func(uint64) error); ok { r0 = rf(processed) @@ -61,13 +76,12 @@ func (_m *ConsumerProgress) SetProcessedIndex(processed uint64) error { return r0 } -type mockConstructorTestingTNewConsumerProgress interface { +// NewConsumerProgress creates a new instance of ConsumerProgress. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewConsumerProgress(t interface { mock.TestingT Cleanup(func()) -} - -// NewConsumerProgress creates a new instance of ConsumerProgress. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewConsumerProgress(t mockConstructorTestingTNewConsumerProgress) *ConsumerProgress { +}) *ConsumerProgress { mock := &ConsumerProgress{} mock.Mock.Test(t) diff --git a/storage/mock/consumer_progress_initializer.go b/storage/mock/consumer_progress_initializer.go new file mode 100644 index 00000000000..c10c5fa778d --- /dev/null +++ b/storage/mock/consumer_progress_initializer.go @@ -0,0 +1,57 @@ +// Code generated by mockery. DO NOT EDIT. + +package mock + +import ( + storage "github.com/onflow/flow-go/storage" + mock "github.com/stretchr/testify/mock" +) + +// ConsumerProgressInitializer is an autogenerated mock type for the ConsumerProgressInitializer type +type ConsumerProgressInitializer struct { + mock.Mock +} + +// Initialize provides a mock function with given fields: defaultIndex +func (_m *ConsumerProgressInitializer) Initialize(defaultIndex uint64) (storage.ConsumerProgress, error) { + ret := _m.Called(defaultIndex) + + if len(ret) == 0 { + panic("no return value specified for Initialize") + } + + var r0 storage.ConsumerProgress + var r1 error + if rf, ok := ret.Get(0).(func(uint64) (storage.ConsumerProgress, error)); ok { + return rf(defaultIndex) + } + if rf, ok := ret.Get(0).(func(uint64) storage.ConsumerProgress); ok { + r0 = rf(defaultIndex) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(storage.ConsumerProgress) + } + } + + if rf, ok := ret.Get(1).(func(uint64) error); ok { + r1 = rf(defaultIndex) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// NewConsumerProgressInitializer creates a new instance of ConsumerProgressInitializer. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewConsumerProgressInitializer(t interface { + mock.TestingT + Cleanup(func()) +}) *ConsumerProgressInitializer { + mock := &ConsumerProgressInitializer{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/storage/mock/db.go b/storage/mock/db.go new file mode 100644 index 00000000000..28f8b82b536 --- /dev/null +++ b/storage/mock/db.go @@ -0,0 +1,103 @@ +// Code generated by mockery. DO NOT EDIT. + +package mock + +import ( + storage "github.com/onflow/flow-go/storage" + mock "github.com/stretchr/testify/mock" +) + +// DB is an autogenerated mock type for the DB type +type DB struct { + mock.Mock +} + +// Close provides a mock function with no fields +func (_m *DB) Close() error { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Close") + } + + var r0 error + if rf, ok := ret.Get(0).(func() error); ok { + r0 = rf() + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// NewBatch provides a mock function with no fields +func (_m *DB) NewBatch() storage.Batch { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for NewBatch") + } + + var r0 storage.Batch + if rf, ok := ret.Get(0).(func() storage.Batch); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(storage.Batch) + } + } + + return r0 +} + +// Reader provides a mock function with no fields +func (_m *DB) Reader() storage.Reader { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Reader") + } + + var r0 storage.Reader + if rf, ok := ret.Get(0).(func() storage.Reader); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(storage.Reader) + } + } + + return r0 +} + +// WithReaderBatchWriter provides a mock function with given fields: _a0 +func (_m *DB) WithReaderBatchWriter(_a0 func(storage.ReaderBatchWriter) error) error { + ret := _m.Called(_a0) + + if len(ret) == 0 { + panic("no return value specified for WithReaderBatchWriter") + } + + var r0 error + if rf, ok := ret.Get(0).(func(func(storage.ReaderBatchWriter) error) error); ok { + r0 = rf(_a0) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// NewDB creates a new instance of DB. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewDB(t interface { + mock.TestingT + Cleanup(func()) +}) *DB { + mock := &DB{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/storage/mock/dkg_state.go b/storage/mock/dkg_state.go index e9092a66dd9..52c07058ea2 100644 --- a/storage/mock/dkg_state.go +++ b/storage/mock/dkg_state.go @@ -1,9 +1,9 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mock import ( - crypto "github.com/onflow/flow-go/crypto" + crypto "github.com/onflow/crypto" flow "github.com/onflow/flow-go/model/flow" mock "github.com/stretchr/testify/mock" @@ -14,43 +14,41 @@ type DKGState struct { mock.Mock } -// GetDKGEndState provides a mock function with given fields: epochCounter -func (_m *DKGState) GetDKGEndState(epochCounter uint64) (flow.DKGEndState, error) { - ret := _m.Called(epochCounter) +// CommitMyBeaconPrivateKey provides a mock function with given fields: epochCounter, commit +func (_m *DKGState) CommitMyBeaconPrivateKey(epochCounter uint64, commit *flow.EpochCommit) error { + ret := _m.Called(epochCounter, commit) - var r0 flow.DKGEndState - var r1 error - if rf, ok := ret.Get(0).(func(uint64) (flow.DKGEndState, error)); ok { - return rf(epochCounter) - } - if rf, ok := ret.Get(0).(func(uint64) flow.DKGEndState); ok { - r0 = rf(epochCounter) - } else { - r0 = ret.Get(0).(flow.DKGEndState) + if len(ret) == 0 { + panic("no return value specified for CommitMyBeaconPrivateKey") } - if rf, ok := ret.Get(1).(func(uint64) error); ok { - r1 = rf(epochCounter) + var r0 error + if rf, ok := ret.Get(0).(func(uint64, *flow.EpochCommit) error); ok { + r0 = rf(epochCounter, commit) } else { - r1 = ret.Error(1) + r0 = ret.Error(0) } - return r0, r1 + return r0 } -// GetDKGStarted provides a mock function with given fields: epochCounter -func (_m *DKGState) GetDKGStarted(epochCounter uint64) (bool, error) { +// GetDKGState provides a mock function with given fields: epochCounter +func (_m *DKGState) GetDKGState(epochCounter uint64) (flow.DKGState, error) { ret := _m.Called(epochCounter) - var r0 bool + if len(ret) == 0 { + panic("no return value specified for GetDKGState") + } + + var r0 flow.DKGState var r1 error - if rf, ok := ret.Get(0).(func(uint64) (bool, error)); ok { + if rf, ok := ret.Get(0).(func(uint64) (flow.DKGState, error)); ok { return rf(epochCounter) } - if rf, ok := ret.Get(0).(func(uint64) bool); ok { + if rf, ok := ret.Get(0).(func(uint64) flow.DKGState); ok { r0 = rf(epochCounter) } else { - r0 = ret.Get(0).(bool) + r0 = ret.Get(0).(flow.DKGState) } if rf, ok := ret.Get(1).(func(uint64) error); ok { @@ -66,6 +64,10 @@ func (_m *DKGState) GetDKGStarted(epochCounter uint64) (bool, error) { func (_m *DKGState) InsertMyBeaconPrivateKey(epochCounter uint64, key crypto.PrivateKey) error { ret := _m.Called(epochCounter, key) + if len(ret) == 0 { + panic("no return value specified for InsertMyBeaconPrivateKey") + } + var r0 error if rf, ok := ret.Get(0).(func(uint64, crypto.PrivateKey) error); ok { r0 = rf(epochCounter, key) @@ -76,13 +78,46 @@ func (_m *DKGState) InsertMyBeaconPrivateKey(epochCounter uint64, key crypto.Pri return r0 } +// IsDKGStarted provides a mock function with given fields: epochCounter +func (_m *DKGState) IsDKGStarted(epochCounter uint64) (bool, error) { + ret := _m.Called(epochCounter) + + if len(ret) == 0 { + panic("no return value specified for IsDKGStarted") + } + + var r0 bool + var r1 error + if rf, ok := ret.Get(0).(func(uint64) (bool, error)); ok { + return rf(epochCounter) + } + if rf, ok := ret.Get(0).(func(uint64) bool); ok { + r0 = rf(epochCounter) + } else { + r0 = ret.Get(0).(bool) + } + + if rf, ok := ret.Get(1).(func(uint64) error); ok { + r1 = rf(epochCounter) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + // RetrieveMyBeaconPrivateKey provides a mock function with given fields: epochCounter -func (_m *DKGState) RetrieveMyBeaconPrivateKey(epochCounter uint64) (crypto.PrivateKey, error) { +func (_m *DKGState) RetrieveMyBeaconPrivateKey(epochCounter uint64) (crypto.PrivateKey, bool, error) { ret := _m.Called(epochCounter) + if len(ret) == 0 { + panic("no return value specified for RetrieveMyBeaconPrivateKey") + } + var r0 crypto.PrivateKey - var r1 error - if rf, ok := ret.Get(0).(func(uint64) (crypto.PrivateKey, error)); ok { + var r1 bool + var r2 error + if rf, ok := ret.Get(0).(func(uint64) (crypto.PrivateKey, bool, error)); ok { return rf(epochCounter) } if rf, ok := ret.Get(0).(func(uint64) crypto.PrivateKey); ok { @@ -93,22 +128,32 @@ func (_m *DKGState) RetrieveMyBeaconPrivateKey(epochCounter uint64) (crypto.Priv } } - if rf, ok := ret.Get(1).(func(uint64) error); ok { + if rf, ok := ret.Get(1).(func(uint64) bool); ok { r1 = rf(epochCounter) } else { - r1 = ret.Error(1) + r1 = ret.Get(1).(bool) } - return r0, r1 + if rf, ok := ret.Get(2).(func(uint64) error); ok { + r2 = rf(epochCounter) + } else { + r2 = ret.Error(2) + } + + return r0, r1, r2 } -// SetDKGEndState provides a mock function with given fields: epochCounter, endState -func (_m *DKGState) SetDKGEndState(epochCounter uint64, endState flow.DKGEndState) error { - ret := _m.Called(epochCounter, endState) +// SetDKGState provides a mock function with given fields: epochCounter, newState +func (_m *DKGState) SetDKGState(epochCounter uint64, newState flow.DKGState) error { + ret := _m.Called(epochCounter, newState) + + if len(ret) == 0 { + panic("no return value specified for SetDKGState") + } var r0 error - if rf, ok := ret.Get(0).(func(uint64, flow.DKGEndState) error); ok { - r0 = rf(epochCounter, endState) + if rf, ok := ret.Get(0).(func(uint64, flow.DKGState) error); ok { + r0 = rf(epochCounter, newState) } else { r0 = ret.Error(0) } @@ -116,27 +161,42 @@ func (_m *DKGState) SetDKGEndState(epochCounter uint64, endState flow.DKGEndStat return r0 } -// SetDKGStarted provides a mock function with given fields: epochCounter -func (_m *DKGState) SetDKGStarted(epochCounter uint64) error { +// UnsafeRetrieveMyBeaconPrivateKey provides a mock function with given fields: epochCounter +func (_m *DKGState) UnsafeRetrieveMyBeaconPrivateKey(epochCounter uint64) (crypto.PrivateKey, error) { ret := _m.Called(epochCounter) - var r0 error - if rf, ok := ret.Get(0).(func(uint64) error); ok { + if len(ret) == 0 { + panic("no return value specified for UnsafeRetrieveMyBeaconPrivateKey") + } + + var r0 crypto.PrivateKey + var r1 error + if rf, ok := ret.Get(0).(func(uint64) (crypto.PrivateKey, error)); ok { + return rf(epochCounter) + } + if rf, ok := ret.Get(0).(func(uint64) crypto.PrivateKey); ok { r0 = rf(epochCounter) } else { - r0 = ret.Error(0) + if ret.Get(0) != nil { + r0 = ret.Get(0).(crypto.PrivateKey) + } } - return r0 -} + if rf, ok := ret.Get(1).(func(uint64) error); ok { + r1 = rf(epochCounter) + } else { + r1 = ret.Error(1) + } -type mockConstructorTestingTNewDKGState interface { - mock.TestingT - Cleanup(func()) + return r0, r1 } // NewDKGState creates a new instance of DKGState. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewDKGState(t mockConstructorTestingTNewDKGState) *DKGState { +// The first argument is typically a *testing.T value. +func NewDKGState(t interface { + mock.TestingT + Cleanup(func()) +}) *DKGState { mock := &DKGState{} mock.Mock.Test(t) diff --git a/storage/mock/dkg_state_reader.go b/storage/mock/dkg_state_reader.go new file mode 100644 index 00000000000..622b50470d0 --- /dev/null +++ b/storage/mock/dkg_state_reader.go @@ -0,0 +1,152 @@ +// Code generated by mockery. DO NOT EDIT. + +package mock + +import ( + crypto "github.com/onflow/crypto" + flow "github.com/onflow/flow-go/model/flow" + + mock "github.com/stretchr/testify/mock" +) + +// DKGStateReader is an autogenerated mock type for the DKGStateReader type +type DKGStateReader struct { + mock.Mock +} + +// GetDKGState provides a mock function with given fields: epochCounter +func (_m *DKGStateReader) GetDKGState(epochCounter uint64) (flow.DKGState, error) { + ret := _m.Called(epochCounter) + + if len(ret) == 0 { + panic("no return value specified for GetDKGState") + } + + var r0 flow.DKGState + var r1 error + if rf, ok := ret.Get(0).(func(uint64) (flow.DKGState, error)); ok { + return rf(epochCounter) + } + if rf, ok := ret.Get(0).(func(uint64) flow.DKGState); ok { + r0 = rf(epochCounter) + } else { + r0 = ret.Get(0).(flow.DKGState) + } + + if rf, ok := ret.Get(1).(func(uint64) error); ok { + r1 = rf(epochCounter) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// IsDKGStarted provides a mock function with given fields: epochCounter +func (_m *DKGStateReader) IsDKGStarted(epochCounter uint64) (bool, error) { + ret := _m.Called(epochCounter) + + if len(ret) == 0 { + panic("no return value specified for IsDKGStarted") + } + + var r0 bool + var r1 error + if rf, ok := ret.Get(0).(func(uint64) (bool, error)); ok { + return rf(epochCounter) + } + if rf, ok := ret.Get(0).(func(uint64) bool); ok { + r0 = rf(epochCounter) + } else { + r0 = ret.Get(0).(bool) + } + + if rf, ok := ret.Get(1).(func(uint64) error); ok { + r1 = rf(epochCounter) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// RetrieveMyBeaconPrivateKey provides a mock function with given fields: epochCounter +func (_m *DKGStateReader) RetrieveMyBeaconPrivateKey(epochCounter uint64) (crypto.PrivateKey, bool, error) { + ret := _m.Called(epochCounter) + + if len(ret) == 0 { + panic("no return value specified for RetrieveMyBeaconPrivateKey") + } + + var r0 crypto.PrivateKey + var r1 bool + var r2 error + if rf, ok := ret.Get(0).(func(uint64) (crypto.PrivateKey, bool, error)); ok { + return rf(epochCounter) + } + if rf, ok := ret.Get(0).(func(uint64) crypto.PrivateKey); ok { + r0 = rf(epochCounter) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(crypto.PrivateKey) + } + } + + if rf, ok := ret.Get(1).(func(uint64) bool); ok { + r1 = rf(epochCounter) + } else { + r1 = ret.Get(1).(bool) + } + + if rf, ok := ret.Get(2).(func(uint64) error); ok { + r2 = rf(epochCounter) + } else { + r2 = ret.Error(2) + } + + return r0, r1, r2 +} + +// UnsafeRetrieveMyBeaconPrivateKey provides a mock function with given fields: epochCounter +func (_m *DKGStateReader) UnsafeRetrieveMyBeaconPrivateKey(epochCounter uint64) (crypto.PrivateKey, error) { + ret := _m.Called(epochCounter) + + if len(ret) == 0 { + panic("no return value specified for UnsafeRetrieveMyBeaconPrivateKey") + } + + var r0 crypto.PrivateKey + var r1 error + if rf, ok := ret.Get(0).(func(uint64) (crypto.PrivateKey, error)); ok { + return rf(epochCounter) + } + if rf, ok := ret.Get(0).(func(uint64) crypto.PrivateKey); ok { + r0 = rf(epochCounter) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(crypto.PrivateKey) + } + } + + if rf, ok := ret.Get(1).(func(uint64) error); ok { + r1 = rf(epochCounter) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// NewDKGStateReader creates a new instance of DKGStateReader. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewDKGStateReader(t interface { + mock.TestingT + Cleanup(func()) +}) *DKGStateReader { + mock := &DKGStateReader{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/storage/mock/epoch_commits.go b/storage/mock/epoch_commits.go index 33ebd5d8486..369b93f1e11 100644 --- a/storage/mock/epoch_commits.go +++ b/storage/mock/epoch_commits.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mock @@ -6,7 +6,7 @@ import ( flow "github.com/onflow/flow-go/model/flow" mock "github.com/stretchr/testify/mock" - transaction "github.com/onflow/flow-go/storage/badger/transaction" + storage "github.com/onflow/flow-go/storage" ) // EpochCommits is an autogenerated mock type for the EpochCommits type @@ -14,10 +14,32 @@ type EpochCommits struct { mock.Mock } +// BatchStore provides a mock function with given fields: rw, commit +func (_m *EpochCommits) BatchStore(rw storage.ReaderBatchWriter, commit *flow.EpochCommit) error { + ret := _m.Called(rw, commit) + + if len(ret) == 0 { + panic("no return value specified for BatchStore") + } + + var r0 error + if rf, ok := ret.Get(0).(func(storage.ReaderBatchWriter, *flow.EpochCommit) error); ok { + r0 = rf(rw, commit) + } else { + r0 = ret.Error(0) + } + + return r0 +} + // ByID provides a mock function with given fields: _a0 func (_m *EpochCommits) ByID(_a0 flow.Identifier) (*flow.EpochCommit, error) { ret := _m.Called(_a0) + if len(ret) == 0 { + panic("no return value specified for ByID") + } + var r0 *flow.EpochCommit var r1 error if rf, ok := ret.Get(0).(func(flow.Identifier) (*flow.EpochCommit, error)); ok { @@ -40,29 +62,12 @@ func (_m *EpochCommits) ByID(_a0 flow.Identifier) (*flow.EpochCommit, error) { return r0, r1 } -// StoreTx provides a mock function with given fields: commit -func (_m *EpochCommits) StoreTx(commit *flow.EpochCommit) func(*transaction.Tx) error { - ret := _m.Called(commit) - - var r0 func(*transaction.Tx) error - if rf, ok := ret.Get(0).(func(*flow.EpochCommit) func(*transaction.Tx) error); ok { - r0 = rf(commit) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(func(*transaction.Tx) error) - } - } - - return r0 -} - -type mockConstructorTestingTNewEpochCommits interface { +// NewEpochCommits creates a new instance of EpochCommits. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewEpochCommits(t interface { mock.TestingT Cleanup(func()) -} - -// NewEpochCommits creates a new instance of EpochCommits. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewEpochCommits(t mockConstructorTestingTNewEpochCommits) *EpochCommits { +}) *EpochCommits { mock := &EpochCommits{} mock.Mock.Test(t) diff --git a/storage/mock/epoch_protocol_state_entries.go b/storage/mock/epoch_protocol_state_entries.go new file mode 100644 index 00000000000..9a29646b158 --- /dev/null +++ b/storage/mock/epoch_protocol_state_entries.go @@ -0,0 +1,127 @@ +// Code generated by mockery. DO NOT EDIT. + +package mock + +import ( + lockctx "github.com/jordanschalm/lockctx" + flow "github.com/onflow/flow-go/model/flow" + + mock "github.com/stretchr/testify/mock" + + storage "github.com/onflow/flow-go/storage" +) + +// EpochProtocolStateEntries is an autogenerated mock type for the EpochProtocolStateEntries type +type EpochProtocolStateEntries struct { + mock.Mock +} + +// BatchIndex provides a mock function with given fields: lctx, rw, blockID, epochProtocolStateID +func (_m *EpochProtocolStateEntries) BatchIndex(lctx lockctx.Proof, rw storage.ReaderBatchWriter, blockID flow.Identifier, epochProtocolStateID flow.Identifier) error { + ret := _m.Called(lctx, rw, blockID, epochProtocolStateID) + + if len(ret) == 0 { + panic("no return value specified for BatchIndex") + } + + var r0 error + if rf, ok := ret.Get(0).(func(lockctx.Proof, storage.ReaderBatchWriter, flow.Identifier, flow.Identifier) error); ok { + r0 = rf(lctx, rw, blockID, epochProtocolStateID) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// BatchStore provides a mock function with given fields: w, epochProtocolStateID, epochProtocolStateEntry +func (_m *EpochProtocolStateEntries) BatchStore(w storage.Writer, epochProtocolStateID flow.Identifier, epochProtocolStateEntry *flow.MinEpochStateEntry) error { + ret := _m.Called(w, epochProtocolStateID, epochProtocolStateEntry) + + if len(ret) == 0 { + panic("no return value specified for BatchStore") + } + + var r0 error + if rf, ok := ret.Get(0).(func(storage.Writer, flow.Identifier, *flow.MinEpochStateEntry) error); ok { + r0 = rf(w, epochProtocolStateID, epochProtocolStateEntry) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// ByBlockID provides a mock function with given fields: blockID +func (_m *EpochProtocolStateEntries) ByBlockID(blockID flow.Identifier) (*flow.RichEpochStateEntry, error) { + ret := _m.Called(blockID) + + if len(ret) == 0 { + panic("no return value specified for ByBlockID") + } + + var r0 *flow.RichEpochStateEntry + var r1 error + if rf, ok := ret.Get(0).(func(flow.Identifier) (*flow.RichEpochStateEntry, error)); ok { + return rf(blockID) + } + if rf, ok := ret.Get(0).(func(flow.Identifier) *flow.RichEpochStateEntry); ok { + r0 = rf(blockID) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*flow.RichEpochStateEntry) + } + } + + if rf, ok := ret.Get(1).(func(flow.Identifier) error); ok { + r1 = rf(blockID) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ByID provides a mock function with given fields: id +func (_m *EpochProtocolStateEntries) ByID(id flow.Identifier) (*flow.RichEpochStateEntry, error) { + ret := _m.Called(id) + + if len(ret) == 0 { + panic("no return value specified for ByID") + } + + var r0 *flow.RichEpochStateEntry + var r1 error + if rf, ok := ret.Get(0).(func(flow.Identifier) (*flow.RichEpochStateEntry, error)); ok { + return rf(id) + } + if rf, ok := ret.Get(0).(func(flow.Identifier) *flow.RichEpochStateEntry); ok { + r0 = rf(id) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*flow.RichEpochStateEntry) + } + } + + if rf, ok := ret.Get(1).(func(flow.Identifier) error); ok { + r1 = rf(id) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// NewEpochProtocolStateEntries creates a new instance of EpochProtocolStateEntries. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewEpochProtocolStateEntries(t interface { + mock.TestingT + Cleanup(func()) +}) *EpochProtocolStateEntries { + mock := &EpochProtocolStateEntries{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/storage/mock/epoch_recovery_my_beacon_key.go b/storage/mock/epoch_recovery_my_beacon_key.go new file mode 100644 index 00000000000..8fb2c5066ac --- /dev/null +++ b/storage/mock/epoch_recovery_my_beacon_key.go @@ -0,0 +1,170 @@ +// Code generated by mockery. DO NOT EDIT. + +package mock + +import ( + crypto "github.com/onflow/crypto" + flow "github.com/onflow/flow-go/model/flow" + + mock "github.com/stretchr/testify/mock" +) + +// EpochRecoveryMyBeaconKey is an autogenerated mock type for the EpochRecoveryMyBeaconKey type +type EpochRecoveryMyBeaconKey struct { + mock.Mock +} + +// GetDKGState provides a mock function with given fields: epochCounter +func (_m *EpochRecoveryMyBeaconKey) GetDKGState(epochCounter uint64) (flow.DKGState, error) { + ret := _m.Called(epochCounter) + + if len(ret) == 0 { + panic("no return value specified for GetDKGState") + } + + var r0 flow.DKGState + var r1 error + if rf, ok := ret.Get(0).(func(uint64) (flow.DKGState, error)); ok { + return rf(epochCounter) + } + if rf, ok := ret.Get(0).(func(uint64) flow.DKGState); ok { + r0 = rf(epochCounter) + } else { + r0 = ret.Get(0).(flow.DKGState) + } + + if rf, ok := ret.Get(1).(func(uint64) error); ok { + r1 = rf(epochCounter) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// IsDKGStarted provides a mock function with given fields: epochCounter +func (_m *EpochRecoveryMyBeaconKey) IsDKGStarted(epochCounter uint64) (bool, error) { + ret := _m.Called(epochCounter) + + if len(ret) == 0 { + panic("no return value specified for IsDKGStarted") + } + + var r0 bool + var r1 error + if rf, ok := ret.Get(0).(func(uint64) (bool, error)); ok { + return rf(epochCounter) + } + if rf, ok := ret.Get(0).(func(uint64) bool); ok { + r0 = rf(epochCounter) + } else { + r0 = ret.Get(0).(bool) + } + + if rf, ok := ret.Get(1).(func(uint64) error); ok { + r1 = rf(epochCounter) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// RetrieveMyBeaconPrivateKey provides a mock function with given fields: epochCounter +func (_m *EpochRecoveryMyBeaconKey) RetrieveMyBeaconPrivateKey(epochCounter uint64) (crypto.PrivateKey, bool, error) { + ret := _m.Called(epochCounter) + + if len(ret) == 0 { + panic("no return value specified for RetrieveMyBeaconPrivateKey") + } + + var r0 crypto.PrivateKey + var r1 bool + var r2 error + if rf, ok := ret.Get(0).(func(uint64) (crypto.PrivateKey, bool, error)); ok { + return rf(epochCounter) + } + if rf, ok := ret.Get(0).(func(uint64) crypto.PrivateKey); ok { + r0 = rf(epochCounter) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(crypto.PrivateKey) + } + } + + if rf, ok := ret.Get(1).(func(uint64) bool); ok { + r1 = rf(epochCounter) + } else { + r1 = ret.Get(1).(bool) + } + + if rf, ok := ret.Get(2).(func(uint64) error); ok { + r2 = rf(epochCounter) + } else { + r2 = ret.Error(2) + } + + return r0, r1, r2 +} + +// UnsafeRetrieveMyBeaconPrivateKey provides a mock function with given fields: epochCounter +func (_m *EpochRecoveryMyBeaconKey) UnsafeRetrieveMyBeaconPrivateKey(epochCounter uint64) (crypto.PrivateKey, error) { + ret := _m.Called(epochCounter) + + if len(ret) == 0 { + panic("no return value specified for UnsafeRetrieveMyBeaconPrivateKey") + } + + var r0 crypto.PrivateKey + var r1 error + if rf, ok := ret.Get(0).(func(uint64) (crypto.PrivateKey, error)); ok { + return rf(epochCounter) + } + if rf, ok := ret.Get(0).(func(uint64) crypto.PrivateKey); ok { + r0 = rf(epochCounter) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(crypto.PrivateKey) + } + } + + if rf, ok := ret.Get(1).(func(uint64) error); ok { + r1 = rf(epochCounter) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// UpsertMyBeaconPrivateKey provides a mock function with given fields: epochCounter, key, commit +func (_m *EpochRecoveryMyBeaconKey) UpsertMyBeaconPrivateKey(epochCounter uint64, key crypto.PrivateKey, commit *flow.EpochCommit) error { + ret := _m.Called(epochCounter, key, commit) + + if len(ret) == 0 { + panic("no return value specified for UpsertMyBeaconPrivateKey") + } + + var r0 error + if rf, ok := ret.Get(0).(func(uint64, crypto.PrivateKey, *flow.EpochCommit) error); ok { + r0 = rf(epochCounter, key, commit) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// NewEpochRecoveryMyBeaconKey creates a new instance of EpochRecoveryMyBeaconKey. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewEpochRecoveryMyBeaconKey(t interface { + mock.TestingT + Cleanup(func()) +}) *EpochRecoveryMyBeaconKey { + mock := &EpochRecoveryMyBeaconKey{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/storage/mock/epoch_setups.go b/storage/mock/epoch_setups.go index 0b7386c1af6..a0aece572c3 100644 --- a/storage/mock/epoch_setups.go +++ b/storage/mock/epoch_setups.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mock @@ -6,7 +6,7 @@ import ( flow "github.com/onflow/flow-go/model/flow" mock "github.com/stretchr/testify/mock" - transaction "github.com/onflow/flow-go/storage/badger/transaction" + storage "github.com/onflow/flow-go/storage" ) // EpochSetups is an autogenerated mock type for the EpochSetups type @@ -14,10 +14,32 @@ type EpochSetups struct { mock.Mock } +// BatchStore provides a mock function with given fields: rw, setup +func (_m *EpochSetups) BatchStore(rw storage.ReaderBatchWriter, setup *flow.EpochSetup) error { + ret := _m.Called(rw, setup) + + if len(ret) == 0 { + panic("no return value specified for BatchStore") + } + + var r0 error + if rf, ok := ret.Get(0).(func(storage.ReaderBatchWriter, *flow.EpochSetup) error); ok { + r0 = rf(rw, setup) + } else { + r0 = ret.Error(0) + } + + return r0 +} + // ByID provides a mock function with given fields: _a0 func (_m *EpochSetups) ByID(_a0 flow.Identifier) (*flow.EpochSetup, error) { ret := _m.Called(_a0) + if len(ret) == 0 { + panic("no return value specified for ByID") + } + var r0 *flow.EpochSetup var r1 error if rf, ok := ret.Get(0).(func(flow.Identifier) (*flow.EpochSetup, error)); ok { @@ -40,29 +62,12 @@ func (_m *EpochSetups) ByID(_a0 flow.Identifier) (*flow.EpochSetup, error) { return r0, r1 } -// StoreTx provides a mock function with given fields: _a0 -func (_m *EpochSetups) StoreTx(_a0 *flow.EpochSetup) func(*transaction.Tx) error { - ret := _m.Called(_a0) - - var r0 func(*transaction.Tx) error - if rf, ok := ret.Get(0).(func(*flow.EpochSetup) func(*transaction.Tx) error); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(func(*transaction.Tx) error) - } - } - - return r0 -} - -type mockConstructorTestingTNewEpochSetups interface { +// NewEpochSetups creates a new instance of EpochSetups. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewEpochSetups(t interface { mock.TestingT Cleanup(func()) -} - -// NewEpochSetups creates a new instance of EpochSetups. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewEpochSetups(t mockConstructorTestingTNewEpochSetups) *EpochSetups { +}) *EpochSetups { mock := &EpochSetups{} mock.Mock.Test(t) diff --git a/storage/mock/epoch_states.go b/storage/mock/epoch_states.go deleted file mode 100644 index 2106bd07076..00000000000 --- a/storage/mock/epoch_states.go +++ /dev/null @@ -1,54 +0,0 @@ -// Code generated by mockery v1.0.0. DO NOT EDIT. - -package mock - -import ( - badger "github.com/dgraph-io/badger/v2" - flow "github.com/onflow/flow-go/model/flow" - - mock "github.com/stretchr/testify/mock" -) - -// EpochStates is an autogenerated mock type for the EpochStates type -type EpochStates struct { - mock.Mock -} - -// ByBlockID provides a mock function with given fields: _a0 -func (_m *EpochStates) ByBlockID(_a0 flow.Identifier) (*flow.EpochStatus, error) { - ret := _m.Called(_a0) - - var r0 *flow.EpochStatus - if rf, ok := ret.Get(0).(func(flow.Identifier) *flow.EpochStatus); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*flow.EpochStatus) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(flow.Identifier) error); ok { - r1 = rf(_a0) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// StoreTx provides a mock function with given fields: blockID, state -func (_m *EpochStates) StoreTx(blockID flow.Identifier, state *flow.EpochStatus) func(*badger.Txn) error { - ret := _m.Called(blockID, state) - - var r0 func(*badger.Txn) error - if rf, ok := ret.Get(0).(func(flow.Identifier, *flow.EpochStatus) func(*badger.Txn) error); ok { - r0 = rf(blockID, state) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(func(*badger.Txn) error) - } - } - - return r0 -} diff --git a/storage/mock/epoch_statuses.go b/storage/mock/epoch_statuses.go deleted file mode 100644 index e21c7f1617f..00000000000 --- a/storage/mock/epoch_statuses.go +++ /dev/null @@ -1,72 +0,0 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. - -package mock - -import ( - flow "github.com/onflow/flow-go/model/flow" - mock "github.com/stretchr/testify/mock" - - transaction "github.com/onflow/flow-go/storage/badger/transaction" -) - -// EpochStatuses is an autogenerated mock type for the EpochStatuses type -type EpochStatuses struct { - mock.Mock -} - -// ByBlockID provides a mock function with given fields: _a0 -func (_m *EpochStatuses) ByBlockID(_a0 flow.Identifier) (*flow.EpochStatus, error) { - ret := _m.Called(_a0) - - var r0 *flow.EpochStatus - var r1 error - if rf, ok := ret.Get(0).(func(flow.Identifier) (*flow.EpochStatus, error)); ok { - return rf(_a0) - } - if rf, ok := ret.Get(0).(func(flow.Identifier) *flow.EpochStatus); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*flow.EpochStatus) - } - } - - if rf, ok := ret.Get(1).(func(flow.Identifier) error); ok { - r1 = rf(_a0) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// StoreTx provides a mock function with given fields: blockID, state -func (_m *EpochStatuses) StoreTx(blockID flow.Identifier, state *flow.EpochStatus) func(*transaction.Tx) error { - ret := _m.Called(blockID, state) - - var r0 func(*transaction.Tx) error - if rf, ok := ret.Get(0).(func(flow.Identifier, *flow.EpochStatus) func(*transaction.Tx) error); ok { - r0 = rf(blockID, state) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(func(*transaction.Tx) error) - } - } - - return r0 -} - -type mockConstructorTestingTNewEpochStatuses interface { - mock.TestingT - Cleanup(func()) -} - -// NewEpochStatuses creates a new instance of EpochStatuses. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewEpochStatuses(t mockConstructorTestingTNewEpochStatuses) *EpochStatuses { - mock := &EpochStatuses{} - mock.Mock.Test(t) - - t.Cleanup(func() { mock.AssertExpectations(t) }) - - return mock -} diff --git a/storage/mock/events.go b/storage/mock/events.go index 8e5470e2248..a23564f1e08 100644 --- a/storage/mock/events.go +++ b/storage/mock/events.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mock @@ -15,11 +15,15 @@ type Events struct { } // BatchRemoveByBlockID provides a mock function with given fields: blockID, batch -func (_m *Events) BatchRemoveByBlockID(blockID flow.Identifier, batch storage.BatchStorage) error { +func (_m *Events) BatchRemoveByBlockID(blockID flow.Identifier, batch storage.ReaderBatchWriter) error { ret := _m.Called(blockID, batch) + if len(ret) == 0 { + panic("no return value specified for BatchRemoveByBlockID") + } + var r0 error - if rf, ok := ret.Get(0).(func(flow.Identifier, storage.BatchStorage) error); ok { + if rf, ok := ret.Get(0).(func(flow.Identifier, storage.ReaderBatchWriter) error); ok { r0 = rf(blockID, batch) } else { r0 = ret.Error(0) @@ -29,11 +33,15 @@ func (_m *Events) BatchRemoveByBlockID(blockID flow.Identifier, batch storage.Ba } // BatchStore provides a mock function with given fields: blockID, events, batch -func (_m *Events) BatchStore(blockID flow.Identifier, events []flow.EventsList, batch storage.BatchStorage) error { +func (_m *Events) BatchStore(blockID flow.Identifier, events []flow.EventsList, batch storage.ReaderBatchWriter) error { ret := _m.Called(blockID, events, batch) + if len(ret) == 0 { + panic("no return value specified for BatchStore") + } + var r0 error - if rf, ok := ret.Get(0).(func(flow.Identifier, []flow.EventsList, storage.BatchStorage) error); ok { + if rf, ok := ret.Get(0).(func(flow.Identifier, []flow.EventsList, storage.ReaderBatchWriter) error); ok { r0 = rf(blockID, events, batch) } else { r0 = ret.Error(0) @@ -46,6 +54,10 @@ func (_m *Events) BatchStore(blockID flow.Identifier, events []flow.EventsList, func (_m *Events) ByBlockID(blockID flow.Identifier) ([]flow.Event, error) { ret := _m.Called(blockID) + if len(ret) == 0 { + panic("no return value specified for ByBlockID") + } + var r0 []flow.Event var r1 error if rf, ok := ret.Get(0).(func(flow.Identifier) ([]flow.Event, error)); ok { @@ -72,6 +84,10 @@ func (_m *Events) ByBlockID(blockID flow.Identifier) ([]flow.Event, error) { func (_m *Events) ByBlockIDEventType(blockID flow.Identifier, eventType flow.EventType) ([]flow.Event, error) { ret := _m.Called(blockID, eventType) + if len(ret) == 0 { + panic("no return value specified for ByBlockIDEventType") + } + var r0 []flow.Event var r1 error if rf, ok := ret.Get(0).(func(flow.Identifier, flow.EventType) ([]flow.Event, error)); ok { @@ -98,6 +114,10 @@ func (_m *Events) ByBlockIDEventType(blockID flow.Identifier, eventType flow.Eve func (_m *Events) ByBlockIDTransactionID(blockID flow.Identifier, transactionID flow.Identifier) ([]flow.Event, error) { ret := _m.Called(blockID, transactionID) + if len(ret) == 0 { + panic("no return value specified for ByBlockIDTransactionID") + } + var r0 []flow.Event var r1 error if rf, ok := ret.Get(0).(func(flow.Identifier, flow.Identifier) ([]flow.Event, error)); ok { @@ -124,6 +144,10 @@ func (_m *Events) ByBlockIDTransactionID(blockID flow.Identifier, transactionID func (_m *Events) ByBlockIDTransactionIndex(blockID flow.Identifier, txIndex uint32) ([]flow.Event, error) { ret := _m.Called(blockID, txIndex) + if len(ret) == 0 { + panic("no return value specified for ByBlockIDTransactionIndex") + } + var r0 []flow.Event var r1 error if rf, ok := ret.Get(0).(func(flow.Identifier, uint32) ([]flow.Event, error)); ok { @@ -146,13 +170,30 @@ func (_m *Events) ByBlockIDTransactionIndex(blockID flow.Identifier, txIndex uin return r0, r1 } -type mockConstructorTestingTNewEvents interface { - mock.TestingT - Cleanup(func()) +// Store provides a mock function with given fields: blockID, blockEvents +func (_m *Events) Store(blockID flow.Identifier, blockEvents []flow.EventsList) error { + ret := _m.Called(blockID, blockEvents) + + if len(ret) == 0 { + panic("no return value specified for Store") + } + + var r0 error + if rf, ok := ret.Get(0).(func(flow.Identifier, []flow.EventsList) error); ok { + r0 = rf(blockID, blockEvents) + } else { + r0 = ret.Error(0) + } + + return r0 } // NewEvents creates a new instance of Events. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewEvents(t mockConstructorTestingTNewEvents) *Events { +// The first argument is typically a *testing.T value. +func NewEvents(t interface { + mock.TestingT + Cleanup(func()) +}) *Events { mock := &Events{} mock.Mock.Test(t) diff --git a/storage/mock/events_reader.go b/storage/mock/events_reader.go new file mode 100644 index 00000000000..31459d361ae --- /dev/null +++ b/storage/mock/events_reader.go @@ -0,0 +1,147 @@ +// Code generated by mockery. DO NOT EDIT. + +package mock + +import ( + flow "github.com/onflow/flow-go/model/flow" + mock "github.com/stretchr/testify/mock" +) + +// EventsReader is an autogenerated mock type for the EventsReader type +type EventsReader struct { + mock.Mock +} + +// ByBlockID provides a mock function with given fields: blockID +func (_m *EventsReader) ByBlockID(blockID flow.Identifier) ([]flow.Event, error) { + ret := _m.Called(blockID) + + if len(ret) == 0 { + panic("no return value specified for ByBlockID") + } + + var r0 []flow.Event + var r1 error + if rf, ok := ret.Get(0).(func(flow.Identifier) ([]flow.Event, error)); ok { + return rf(blockID) + } + if rf, ok := ret.Get(0).(func(flow.Identifier) []flow.Event); ok { + r0 = rf(blockID) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]flow.Event) + } + } + + if rf, ok := ret.Get(1).(func(flow.Identifier) error); ok { + r1 = rf(blockID) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ByBlockIDEventType provides a mock function with given fields: blockID, eventType +func (_m *EventsReader) ByBlockIDEventType(blockID flow.Identifier, eventType flow.EventType) ([]flow.Event, error) { + ret := _m.Called(blockID, eventType) + + if len(ret) == 0 { + panic("no return value specified for ByBlockIDEventType") + } + + var r0 []flow.Event + var r1 error + if rf, ok := ret.Get(0).(func(flow.Identifier, flow.EventType) ([]flow.Event, error)); ok { + return rf(blockID, eventType) + } + if rf, ok := ret.Get(0).(func(flow.Identifier, flow.EventType) []flow.Event); ok { + r0 = rf(blockID, eventType) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]flow.Event) + } + } + + if rf, ok := ret.Get(1).(func(flow.Identifier, flow.EventType) error); ok { + r1 = rf(blockID, eventType) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ByBlockIDTransactionID provides a mock function with given fields: blockID, transactionID +func (_m *EventsReader) ByBlockIDTransactionID(blockID flow.Identifier, transactionID flow.Identifier) ([]flow.Event, error) { + ret := _m.Called(blockID, transactionID) + + if len(ret) == 0 { + panic("no return value specified for ByBlockIDTransactionID") + } + + var r0 []flow.Event + var r1 error + if rf, ok := ret.Get(0).(func(flow.Identifier, flow.Identifier) ([]flow.Event, error)); ok { + return rf(blockID, transactionID) + } + if rf, ok := ret.Get(0).(func(flow.Identifier, flow.Identifier) []flow.Event); ok { + r0 = rf(blockID, transactionID) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]flow.Event) + } + } + + if rf, ok := ret.Get(1).(func(flow.Identifier, flow.Identifier) error); ok { + r1 = rf(blockID, transactionID) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ByBlockIDTransactionIndex provides a mock function with given fields: blockID, txIndex +func (_m *EventsReader) ByBlockIDTransactionIndex(blockID flow.Identifier, txIndex uint32) ([]flow.Event, error) { + ret := _m.Called(blockID, txIndex) + + if len(ret) == 0 { + panic("no return value specified for ByBlockIDTransactionIndex") + } + + var r0 []flow.Event + var r1 error + if rf, ok := ret.Get(0).(func(flow.Identifier, uint32) ([]flow.Event, error)); ok { + return rf(blockID, txIndex) + } + if rf, ok := ret.Get(0).(func(flow.Identifier, uint32) []flow.Event); ok { + r0 = rf(blockID, txIndex) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]flow.Event) + } + } + + if rf, ok := ret.Get(1).(func(flow.Identifier, uint32) error); ok { + r1 = rf(blockID, txIndex) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// NewEventsReader creates a new instance of EventsReader. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewEventsReader(t interface { + mock.TestingT + Cleanup(func()) +}) *EventsReader { + mock := &EventsReader{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/storage/mock/execution_fork_evidence.go b/storage/mock/execution_fork_evidence.go new file mode 100644 index 00000000000..f523fdbc98a --- /dev/null +++ b/storage/mock/execution_fork_evidence.go @@ -0,0 +1,77 @@ +// Code generated by mockery. DO NOT EDIT. + +package mock + +import ( + lockctx "github.com/jordanschalm/lockctx" + flow "github.com/onflow/flow-go/model/flow" + + mock "github.com/stretchr/testify/mock" +) + +// ExecutionForkEvidence is an autogenerated mock type for the ExecutionForkEvidence type +type ExecutionForkEvidence struct { + mock.Mock +} + +// Retrieve provides a mock function with no fields +func (_m *ExecutionForkEvidence) Retrieve() ([]*flow.IncorporatedResultSeal, error) { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Retrieve") + } + + var r0 []*flow.IncorporatedResultSeal + var r1 error + if rf, ok := ret.Get(0).(func() ([]*flow.IncorporatedResultSeal, error)); ok { + return rf() + } + if rf, ok := ret.Get(0).(func() []*flow.IncorporatedResultSeal); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]*flow.IncorporatedResultSeal) + } + } + + if rf, ok := ret.Get(1).(func() error); ok { + r1 = rf() + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// StoreIfNotExists provides a mock function with given fields: lctx, conflictingSeals +func (_m *ExecutionForkEvidence) StoreIfNotExists(lctx lockctx.Proof, conflictingSeals []*flow.IncorporatedResultSeal) error { + ret := _m.Called(lctx, conflictingSeals) + + if len(ret) == 0 { + panic("no return value specified for StoreIfNotExists") + } + + var r0 error + if rf, ok := ret.Get(0).(func(lockctx.Proof, []*flow.IncorporatedResultSeal) error); ok { + r0 = rf(lctx, conflictingSeals) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// NewExecutionForkEvidence creates a new instance of ExecutionForkEvidence. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewExecutionForkEvidence(t interface { + mock.TestingT + Cleanup(func()) +}) *ExecutionForkEvidence { + mock := &ExecutionForkEvidence{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/storage/mock/execution_receipts.go b/storage/mock/execution_receipts.go index b1c0d1fd6de..a8c4931a0cf 100644 --- a/storage/mock/execution_receipts.go +++ b/storage/mock/execution_receipts.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mock @@ -15,11 +15,15 @@ type ExecutionReceipts struct { } // BatchStore provides a mock function with given fields: receipt, batch -func (_m *ExecutionReceipts) BatchStore(receipt *flow.ExecutionReceipt, batch storage.BatchStorage) error { +func (_m *ExecutionReceipts) BatchStore(receipt *flow.ExecutionReceipt, batch storage.ReaderBatchWriter) error { ret := _m.Called(receipt, batch) + if len(ret) == 0 { + panic("no return value specified for BatchStore") + } + var r0 error - if rf, ok := ret.Get(0).(func(*flow.ExecutionReceipt, storage.BatchStorage) error); ok { + if rf, ok := ret.Get(0).(func(*flow.ExecutionReceipt, storage.ReaderBatchWriter) error); ok { r0 = rf(receipt, batch) } else { r0 = ret.Error(0) @@ -32,6 +36,10 @@ func (_m *ExecutionReceipts) BatchStore(receipt *flow.ExecutionReceipt, batch st func (_m *ExecutionReceipts) ByBlockID(blockID flow.Identifier) (flow.ExecutionReceiptList, error) { ret := _m.Called(blockID) + if len(ret) == 0 { + panic("no return value specified for ByBlockID") + } + var r0 flow.ExecutionReceiptList var r1 error if rf, ok := ret.Get(0).(func(flow.Identifier) (flow.ExecutionReceiptList, error)); ok { @@ -58,6 +66,10 @@ func (_m *ExecutionReceipts) ByBlockID(blockID flow.Identifier) (flow.ExecutionR func (_m *ExecutionReceipts) ByID(receiptID flow.Identifier) (*flow.ExecutionReceipt, error) { ret := _m.Called(receiptID) + if len(ret) == 0 { + panic("no return value specified for ByID") + } + var r0 *flow.ExecutionReceipt var r1 error if rf, ok := ret.Get(0).(func(flow.Identifier) (*flow.ExecutionReceipt, error)); ok { @@ -84,6 +96,10 @@ func (_m *ExecutionReceipts) ByID(receiptID flow.Identifier) (*flow.ExecutionRec func (_m *ExecutionReceipts) Store(receipt *flow.ExecutionReceipt) error { ret := _m.Called(receipt) + if len(ret) == 0 { + panic("no return value specified for Store") + } + var r0 error if rf, ok := ret.Get(0).(func(*flow.ExecutionReceipt) error); ok { r0 = rf(receipt) @@ -94,13 +110,12 @@ func (_m *ExecutionReceipts) Store(receipt *flow.ExecutionReceipt) error { return r0 } -type mockConstructorTestingTNewExecutionReceipts interface { +// NewExecutionReceipts creates a new instance of ExecutionReceipts. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewExecutionReceipts(t interface { mock.TestingT Cleanup(func()) -} - -// NewExecutionReceipts creates a new instance of ExecutionReceipts. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewExecutionReceipts(t mockConstructorTestingTNewExecutionReceipts) *ExecutionReceipts { +}) *ExecutionReceipts { mock := &ExecutionReceipts{} mock.Mock.Test(t) diff --git a/storage/mock/execution_results.go b/storage/mock/execution_results.go index c9ad6b09035..07702c1db3c 100644 --- a/storage/mock/execution_results.go +++ b/storage/mock/execution_results.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mock @@ -7,8 +7,6 @@ import ( mock "github.com/stretchr/testify/mock" storage "github.com/onflow/flow-go/storage" - - transaction "github.com/onflow/flow-go/storage/badger/transaction" ) // ExecutionResults is an autogenerated mock type for the ExecutionResults type @@ -17,11 +15,15 @@ type ExecutionResults struct { } // BatchIndex provides a mock function with given fields: blockID, resultID, batch -func (_m *ExecutionResults) BatchIndex(blockID flow.Identifier, resultID flow.Identifier, batch storage.BatchStorage) error { +func (_m *ExecutionResults) BatchIndex(blockID flow.Identifier, resultID flow.Identifier, batch storage.ReaderBatchWriter) error { ret := _m.Called(blockID, resultID, batch) + if len(ret) == 0 { + panic("no return value specified for BatchIndex") + } + var r0 error - if rf, ok := ret.Get(0).(func(flow.Identifier, flow.Identifier, storage.BatchStorage) error); ok { + if rf, ok := ret.Get(0).(func(flow.Identifier, flow.Identifier, storage.ReaderBatchWriter) error); ok { r0 = rf(blockID, resultID, batch) } else { r0 = ret.Error(0) @@ -31,11 +33,15 @@ func (_m *ExecutionResults) BatchIndex(blockID flow.Identifier, resultID flow.Id } // BatchRemoveIndexByBlockID provides a mock function with given fields: blockID, batch -func (_m *ExecutionResults) BatchRemoveIndexByBlockID(blockID flow.Identifier, batch storage.BatchStorage) error { +func (_m *ExecutionResults) BatchRemoveIndexByBlockID(blockID flow.Identifier, batch storage.ReaderBatchWriter) error { ret := _m.Called(blockID, batch) + if len(ret) == 0 { + panic("no return value specified for BatchRemoveIndexByBlockID") + } + var r0 error - if rf, ok := ret.Get(0).(func(flow.Identifier, storage.BatchStorage) error); ok { + if rf, ok := ret.Get(0).(func(flow.Identifier, storage.ReaderBatchWriter) error); ok { r0 = rf(blockID, batch) } else { r0 = ret.Error(0) @@ -45,11 +51,15 @@ func (_m *ExecutionResults) BatchRemoveIndexByBlockID(blockID flow.Identifier, b } // BatchStore provides a mock function with given fields: result, batch -func (_m *ExecutionResults) BatchStore(result *flow.ExecutionResult, batch storage.BatchStorage) error { +func (_m *ExecutionResults) BatchStore(result *flow.ExecutionResult, batch storage.ReaderBatchWriter) error { ret := _m.Called(result, batch) + if len(ret) == 0 { + panic("no return value specified for BatchStore") + } + var r0 error - if rf, ok := ret.Get(0).(func(*flow.ExecutionResult, storage.BatchStorage) error); ok { + if rf, ok := ret.Get(0).(func(*flow.ExecutionResult, storage.ReaderBatchWriter) error); ok { r0 = rf(result, batch) } else { r0 = ret.Error(0) @@ -62,6 +72,10 @@ func (_m *ExecutionResults) BatchStore(result *flow.ExecutionResult, batch stora func (_m *ExecutionResults) ByBlockID(blockID flow.Identifier) (*flow.ExecutionResult, error) { ret := _m.Called(blockID) + if len(ret) == 0 { + panic("no return value specified for ByBlockID") + } + var r0 *flow.ExecutionResult var r1 error if rf, ok := ret.Get(0).(func(flow.Identifier) (*flow.ExecutionResult, error)); ok { @@ -88,6 +102,10 @@ func (_m *ExecutionResults) ByBlockID(blockID flow.Identifier) (*flow.ExecutionR func (_m *ExecutionResults) ByID(resultID flow.Identifier) (*flow.ExecutionResult, error) { ret := _m.Called(resultID) + if len(ret) == 0 { + panic("no return value specified for ByID") + } + var r0 *flow.ExecutionResult var r1 error if rf, ok := ret.Get(0).(func(flow.Identifier) (*flow.ExecutionResult, error)); ok { @@ -110,26 +128,14 @@ func (_m *ExecutionResults) ByID(resultID flow.Identifier) (*flow.ExecutionResul return r0, r1 } -// ByIDTx provides a mock function with given fields: resultID -func (_m *ExecutionResults) ByIDTx(resultID flow.Identifier) func(*transaction.Tx) (*flow.ExecutionResult, error) { - ret := _m.Called(resultID) - - var r0 func(*transaction.Tx) (*flow.ExecutionResult, error) - if rf, ok := ret.Get(0).(func(flow.Identifier) func(*transaction.Tx) (*flow.ExecutionResult, error)); ok { - r0 = rf(resultID) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(func(*transaction.Tx) (*flow.ExecutionResult, error)) - } - } - - return r0 -} - // ForceIndex provides a mock function with given fields: blockID, resultID func (_m *ExecutionResults) ForceIndex(blockID flow.Identifier, resultID flow.Identifier) error { ret := _m.Called(blockID, resultID) + if len(ret) == 0 { + panic("no return value specified for ForceIndex") + } + var r0 error if rf, ok := ret.Get(0).(func(flow.Identifier, flow.Identifier) error); ok { r0 = rf(blockID, resultID) @@ -144,6 +150,10 @@ func (_m *ExecutionResults) ForceIndex(blockID flow.Identifier, resultID flow.Id func (_m *ExecutionResults) Index(blockID flow.Identifier, resultID flow.Identifier) error { ret := _m.Called(blockID, resultID) + if len(ret) == 0 { + panic("no return value specified for Index") + } + var r0 error if rf, ok := ret.Get(0).(func(flow.Identifier, flow.Identifier) error); ok { r0 = rf(blockID, resultID) @@ -158,6 +168,10 @@ func (_m *ExecutionResults) Index(blockID flow.Identifier, resultID flow.Identif func (_m *ExecutionResults) Store(result *flow.ExecutionResult) error { ret := _m.Called(result) + if len(ret) == 0 { + panic("no return value specified for Store") + } + var r0 error if rf, ok := ret.Get(0).(func(*flow.ExecutionResult) error); ok { r0 = rf(result) @@ -168,13 +182,12 @@ func (_m *ExecutionResults) Store(result *flow.ExecutionResult) error { return r0 } -type mockConstructorTestingTNewExecutionResults interface { +// NewExecutionResults creates a new instance of ExecutionResults. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewExecutionResults(t interface { mock.TestingT Cleanup(func()) -} - -// NewExecutionResults creates a new instance of ExecutionResults. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewExecutionResults(t mockConstructorTestingTNewExecutionResults) *ExecutionResults { +}) *ExecutionResults { mock := &ExecutionResults{} mock.Mock.Test(t) diff --git a/storage/mock/execution_results_reader.go b/storage/mock/execution_results_reader.go new file mode 100644 index 00000000000..b812a794d1d --- /dev/null +++ b/storage/mock/execution_results_reader.go @@ -0,0 +1,87 @@ +// Code generated by mockery. DO NOT EDIT. + +package mock + +import ( + flow "github.com/onflow/flow-go/model/flow" + mock "github.com/stretchr/testify/mock" +) + +// ExecutionResultsReader is an autogenerated mock type for the ExecutionResultsReader type +type ExecutionResultsReader struct { + mock.Mock +} + +// ByBlockID provides a mock function with given fields: blockID +func (_m *ExecutionResultsReader) ByBlockID(blockID flow.Identifier) (*flow.ExecutionResult, error) { + ret := _m.Called(blockID) + + if len(ret) == 0 { + panic("no return value specified for ByBlockID") + } + + var r0 *flow.ExecutionResult + var r1 error + if rf, ok := ret.Get(0).(func(flow.Identifier) (*flow.ExecutionResult, error)); ok { + return rf(blockID) + } + if rf, ok := ret.Get(0).(func(flow.Identifier) *flow.ExecutionResult); ok { + r0 = rf(blockID) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*flow.ExecutionResult) + } + } + + if rf, ok := ret.Get(1).(func(flow.Identifier) error); ok { + r1 = rf(blockID) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ByID provides a mock function with given fields: resultID +func (_m *ExecutionResultsReader) ByID(resultID flow.Identifier) (*flow.ExecutionResult, error) { + ret := _m.Called(resultID) + + if len(ret) == 0 { + panic("no return value specified for ByID") + } + + var r0 *flow.ExecutionResult + var r1 error + if rf, ok := ret.Get(0).(func(flow.Identifier) (*flow.ExecutionResult, error)); ok { + return rf(resultID) + } + if rf, ok := ret.Get(0).(func(flow.Identifier) *flow.ExecutionResult); ok { + r0 = rf(resultID) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*flow.ExecutionResult) + } + } + + if rf, ok := ret.Get(1).(func(flow.Identifier) error); ok { + r1 = rf(resultID) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// NewExecutionResultsReader creates a new instance of ExecutionResultsReader. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewExecutionResultsReader(t interface { + mock.TestingT + Cleanup(func()) +}) *ExecutionResultsReader { + mock := &ExecutionResultsReader{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/storage/mock/guarantees.go b/storage/mock/guarantees.go index 4ea09b69fad..93c59eae208 100644 --- a/storage/mock/guarantees.go +++ b/storage/mock/guarantees.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mock @@ -16,6 +16,10 @@ type Guarantees struct { func (_m *Guarantees) ByCollectionID(collID flow.Identifier) (*flow.CollectionGuarantee, error) { ret := _m.Called(collID) + if len(ret) == 0 { + panic("no return value specified for ByCollectionID") + } + var r0 *flow.CollectionGuarantee var r1 error if rf, ok := ret.Get(0).(func(flow.Identifier) (*flow.CollectionGuarantee, error)); ok { @@ -38,27 +42,42 @@ func (_m *Guarantees) ByCollectionID(collID flow.Identifier) (*flow.CollectionGu return r0, r1 } -// Store provides a mock function with given fields: guarantee -func (_m *Guarantees) Store(guarantee *flow.CollectionGuarantee) error { - ret := _m.Called(guarantee) +// ByID provides a mock function with given fields: guaranteeID +func (_m *Guarantees) ByID(guaranteeID flow.Identifier) (*flow.CollectionGuarantee, error) { + ret := _m.Called(guaranteeID) + + if len(ret) == 0 { + panic("no return value specified for ByID") + } + + var r0 *flow.CollectionGuarantee + var r1 error + if rf, ok := ret.Get(0).(func(flow.Identifier) (*flow.CollectionGuarantee, error)); ok { + return rf(guaranteeID) + } + if rf, ok := ret.Get(0).(func(flow.Identifier) *flow.CollectionGuarantee); ok { + r0 = rf(guaranteeID) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*flow.CollectionGuarantee) + } + } - var r0 error - if rf, ok := ret.Get(0).(func(*flow.CollectionGuarantee) error); ok { - r0 = rf(guarantee) + if rf, ok := ret.Get(1).(func(flow.Identifier) error); ok { + r1 = rf(guaranteeID) } else { - r0 = ret.Error(0) + r1 = ret.Error(1) } - return r0 + return r0, r1 } -type mockConstructorTestingTNewGuarantees interface { +// NewGuarantees creates a new instance of Guarantees. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewGuarantees(t interface { mock.TestingT Cleanup(func()) -} - -// NewGuarantees creates a new instance of Guarantees. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewGuarantees(t mockConstructorTestingTNewGuarantees) *Guarantees { +}) *Guarantees { mock := &Guarantees{} mock.Mock.Test(t) diff --git a/storage/mock/headers.go b/storage/mock/headers.go index f130a452946..f179da8186e 100644 --- a/storage/mock/headers.go +++ b/storage/mock/headers.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mock @@ -16,6 +16,10 @@ type Headers struct { func (_m *Headers) BlockIDByHeight(height uint64) (flow.Identifier, error) { ret := _m.Called(height) + if len(ret) == 0 { + panic("no return value specified for BlockIDByHeight") + } + var r0 flow.Identifier var r1 error if rf, ok := ret.Get(0).(func(uint64) (flow.Identifier, error)); ok { @@ -42,6 +46,10 @@ func (_m *Headers) BlockIDByHeight(height uint64) (flow.Identifier, error) { func (_m *Headers) ByBlockID(blockID flow.Identifier) (*flow.Header, error) { ret := _m.Called(blockID) + if len(ret) == 0 { + panic("no return value specified for ByBlockID") + } + var r0 *flow.Header var r1 error if rf, ok := ret.Get(0).(func(flow.Identifier) (*flow.Header, error)); ok { @@ -68,6 +76,10 @@ func (_m *Headers) ByBlockID(blockID flow.Identifier) (*flow.Header, error) { func (_m *Headers) ByHeight(height uint64) (*flow.Header, error) { ret := _m.Called(height) + if len(ret) == 0 { + panic("no return value specified for ByHeight") + } + var r0 *flow.Header var r1 error if rf, ok := ret.Get(0).(func(uint64) (*flow.Header, error)); ok { @@ -94,6 +106,10 @@ func (_m *Headers) ByHeight(height uint64) (*flow.Header, error) { func (_m *Headers) ByParentID(parentID flow.Identifier) ([]*flow.Header, error) { ret := _m.Called(parentID) + if len(ret) == 0 { + panic("no return value specified for ByParentID") + } + var r0 []*flow.Header var r1 error if rf, ok := ret.Get(0).(func(flow.Identifier) ([]*flow.Header, error)); ok { @@ -116,10 +132,44 @@ func (_m *Headers) ByParentID(parentID flow.Identifier) ([]*flow.Header, error) return r0, r1 } +// ByView provides a mock function with given fields: view +func (_m *Headers) ByView(view uint64) (*flow.Header, error) { + ret := _m.Called(view) + + if len(ret) == 0 { + panic("no return value specified for ByView") + } + + var r0 *flow.Header + var r1 error + if rf, ok := ret.Get(0).(func(uint64) (*flow.Header, error)); ok { + return rf(view) + } + if rf, ok := ret.Get(0).(func(uint64) *flow.Header); ok { + r0 = rf(view) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*flow.Header) + } + } + + if rf, ok := ret.Get(1).(func(uint64) error); ok { + r1 = rf(view) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + // Exists provides a mock function with given fields: blockID func (_m *Headers) Exists(blockID flow.Identifier) (bool, error) { ret := _m.Called(blockID) + if len(ret) == 0 { + panic("no return value specified for Exists") + } + var r0 bool var r1 error if rf, ok := ret.Get(0).(func(flow.Identifier) (bool, error)); ok { @@ -140,27 +190,42 @@ func (_m *Headers) Exists(blockID flow.Identifier) (bool, error) { return r0, r1 } -// Store provides a mock function with given fields: header -func (_m *Headers) Store(header *flow.Header) error { - ret := _m.Called(header) +// ProposalByBlockID provides a mock function with given fields: blockID +func (_m *Headers) ProposalByBlockID(blockID flow.Identifier) (*flow.ProposalHeader, error) { + ret := _m.Called(blockID) + + if len(ret) == 0 { + panic("no return value specified for ProposalByBlockID") + } + + var r0 *flow.ProposalHeader + var r1 error + if rf, ok := ret.Get(0).(func(flow.Identifier) (*flow.ProposalHeader, error)); ok { + return rf(blockID) + } + if rf, ok := ret.Get(0).(func(flow.Identifier) *flow.ProposalHeader); ok { + r0 = rf(blockID) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*flow.ProposalHeader) + } + } - var r0 error - if rf, ok := ret.Get(0).(func(*flow.Header) error); ok { - r0 = rf(header) + if rf, ok := ret.Get(1).(func(flow.Identifier) error); ok { + r1 = rf(blockID) } else { - r0 = ret.Error(0) + r1 = ret.Error(1) } - return r0 + return r0, r1 } -type mockConstructorTestingTNewHeaders interface { +// NewHeaders creates a new instance of Headers. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewHeaders(t interface { mock.TestingT Cleanup(func()) -} - -// NewHeaders creates a new instance of Headers. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewHeaders(t mockConstructorTestingTNewHeaders) *Headers { +}) *Headers { mock := &Headers{} mock.Mock.Test(t) diff --git a/storage/mock/height_index.go b/storage/mock/height_index.go new file mode 100644 index 00000000000..0b25bf331dc --- /dev/null +++ b/storage/mock/height_index.go @@ -0,0 +1,98 @@ +// Code generated by mockery. DO NOT EDIT. + +package mock + +import mock "github.com/stretchr/testify/mock" + +// HeightIndex is an autogenerated mock type for the HeightIndex type +type HeightIndex struct { + mock.Mock +} + +// FirstHeight provides a mock function with no fields +func (_m *HeightIndex) FirstHeight() (uint64, error) { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for FirstHeight") + } + + var r0 uint64 + var r1 error + if rf, ok := ret.Get(0).(func() (uint64, error)); ok { + return rf() + } + if rf, ok := ret.Get(0).(func() uint64); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(uint64) + } + + if rf, ok := ret.Get(1).(func() error); ok { + r1 = rf() + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// LatestHeight provides a mock function with no fields +func (_m *HeightIndex) LatestHeight() (uint64, error) { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for LatestHeight") + } + + var r0 uint64 + var r1 error + if rf, ok := ret.Get(0).(func() (uint64, error)); ok { + return rf() + } + if rf, ok := ret.Get(0).(func() uint64); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(uint64) + } + + if rf, ok := ret.Get(1).(func() error); ok { + r1 = rf() + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// SetLatestHeight provides a mock function with given fields: height +func (_m *HeightIndex) SetLatestHeight(height uint64) error { + ret := _m.Called(height) + + if len(ret) == 0 { + panic("no return value specified for SetLatestHeight") + } + + var r0 error + if rf, ok := ret.Get(0).(func(uint64) error); ok { + r0 = rf(height) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// NewHeightIndex creates a new instance of HeightIndex. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewHeightIndex(t interface { + mock.TestingT + Cleanup(func()) +}) *HeightIndex { + mock := &HeightIndex{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/storage/mock/index.go b/storage/mock/index.go index d0d2472e181..382c0f8f434 100644 --- a/storage/mock/index.go +++ b/storage/mock/index.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mock @@ -16,6 +16,10 @@ type Index struct { func (_m *Index) ByBlockID(blockID flow.Identifier) (*flow.Index, error) { ret := _m.Called(blockID) + if len(ret) == 0 { + panic("no return value specified for ByBlockID") + } + var r0 *flow.Index var r1 error if rf, ok := ret.Get(0).(func(flow.Identifier) (*flow.Index, error)); ok { @@ -38,27 +42,12 @@ func (_m *Index) ByBlockID(blockID flow.Identifier) (*flow.Index, error) { return r0, r1 } -// Store provides a mock function with given fields: blockID, index -func (_m *Index) Store(blockID flow.Identifier, index *flow.Index) error { - ret := _m.Called(blockID, index) - - var r0 error - if rf, ok := ret.Get(0).(func(flow.Identifier, *flow.Index) error); ok { - r0 = rf(blockID, index) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -type mockConstructorTestingTNewIndex interface { +// NewIndex creates a new instance of Index. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewIndex(t interface { mock.TestingT Cleanup(func()) -} - -// NewIndex creates a new instance of Index. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewIndex(t mockConstructorTestingTNewIndex) *Index { +}) *Index { mock := &Index{} mock.Mock.Test(t) diff --git a/storage/mock/iter_item.go b/storage/mock/iter_item.go new file mode 100644 index 00000000000..6a2e8220d76 --- /dev/null +++ b/storage/mock/iter_item.go @@ -0,0 +1,82 @@ +// Code generated by mockery. DO NOT EDIT. + +package mock + +import mock "github.com/stretchr/testify/mock" + +// IterItem is an autogenerated mock type for the IterItem type +type IterItem struct { + mock.Mock +} + +// Key provides a mock function with no fields +func (_m *IterItem) Key() []byte { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Key") + } + + var r0 []byte + if rf, ok := ret.Get(0).(func() []byte); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]byte) + } + } + + return r0 +} + +// KeyCopy provides a mock function with given fields: dst +func (_m *IterItem) KeyCopy(dst []byte) []byte { + ret := _m.Called(dst) + + if len(ret) == 0 { + panic("no return value specified for KeyCopy") + } + + var r0 []byte + if rf, ok := ret.Get(0).(func([]byte) []byte); ok { + r0 = rf(dst) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]byte) + } + } + + return r0 +} + +// Value provides a mock function with given fields: _a0 +func (_m *IterItem) Value(_a0 func([]byte) error) error { + ret := _m.Called(_a0) + + if len(ret) == 0 { + panic("no return value specified for Value") + } + + var r0 error + if rf, ok := ret.Get(0).(func(func([]byte) error) error); ok { + r0 = rf(_a0) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// NewIterItem creates a new instance of IterItem. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewIterItem(t interface { + mock.TestingT + Cleanup(func()) +}) *IterItem { + mock := &IterItem{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/storage/mock/iterator.go b/storage/mock/iterator.go new file mode 100644 index 00000000000..5a27dc28063 --- /dev/null +++ b/storage/mock/iterator.go @@ -0,0 +1,106 @@ +// Code generated by mockery. DO NOT EDIT. + +package mock + +import ( + storage "github.com/onflow/flow-go/storage" + mock "github.com/stretchr/testify/mock" +) + +// Iterator is an autogenerated mock type for the Iterator type +type Iterator struct { + mock.Mock +} + +// Close provides a mock function with no fields +func (_m *Iterator) Close() error { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Close") + } + + var r0 error + if rf, ok := ret.Get(0).(func() error); ok { + r0 = rf() + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// First provides a mock function with no fields +func (_m *Iterator) First() bool { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for First") + } + + var r0 bool + if rf, ok := ret.Get(0).(func() bool); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(bool) + } + + return r0 +} + +// IterItem provides a mock function with no fields +func (_m *Iterator) IterItem() storage.IterItem { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for IterItem") + } + + var r0 storage.IterItem + if rf, ok := ret.Get(0).(func() storage.IterItem); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(storage.IterItem) + } + } + + return r0 +} + +// Next provides a mock function with no fields +func (_m *Iterator) Next() { + _m.Called() +} + +// Valid provides a mock function with no fields +func (_m *Iterator) Valid() bool { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Valid") + } + + var r0 bool + if rf, ok := ret.Get(0).(func() bool); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(bool) + } + + return r0 +} + +// NewIterator creates a new instance of Iterator. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewIterator(t interface { + mock.TestingT + Cleanup(func()) +}) *Iterator { + mock := &Iterator{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/storage/mock/job.go b/storage/mock/job.go deleted file mode 100644 index dabff1d99ac..00000000000 --- a/storage/mock/job.go +++ /dev/null @@ -1,27 +0,0 @@ -// Code generated by mockery v1.0.0. DO NOT EDIT. - -package mock - -import ( - module "github.com/onflow/flow-go/module" - mock "github.com/stretchr/testify/mock" -) - -// Job is an autogenerated mock type for the Job type -type Job struct { - mock.Mock -} - -// ID provides a mock function with given fields: -func (_m *Job) ID() module.JobID { - ret := _m.Called() - - var r0 module.JobID - if rf, ok := ret.Get(0).(func() module.JobID); ok { - r0 = rf() - } else { - r0 = ret.Get(0).(module.JobID) - } - - return r0 -} diff --git a/storage/mock/latest_persisted_sealed_result.go b/storage/mock/latest_persisted_sealed_result.go new file mode 100644 index 00000000000..6481b95e3e3 --- /dev/null +++ b/storage/mock/latest_persisted_sealed_result.go @@ -0,0 +1,77 @@ +// Code generated by mockery. DO NOT EDIT. + +package mock + +import ( + flow "github.com/onflow/flow-go/model/flow" + mock "github.com/stretchr/testify/mock" + + storage "github.com/onflow/flow-go/storage" +) + +// LatestPersistedSealedResult is an autogenerated mock type for the LatestPersistedSealedResult type +type LatestPersistedSealedResult struct { + mock.Mock +} + +// BatchSet provides a mock function with given fields: resultID, height, batch +func (_m *LatestPersistedSealedResult) BatchSet(resultID flow.Identifier, height uint64, batch storage.ReaderBatchWriter) error { + ret := _m.Called(resultID, height, batch) + + if len(ret) == 0 { + panic("no return value specified for BatchSet") + } + + var r0 error + if rf, ok := ret.Get(0).(func(flow.Identifier, uint64, storage.ReaderBatchWriter) error); ok { + r0 = rf(resultID, height, batch) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// Latest provides a mock function with no fields +func (_m *LatestPersistedSealedResult) Latest() (flow.Identifier, uint64) { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Latest") + } + + var r0 flow.Identifier + var r1 uint64 + if rf, ok := ret.Get(0).(func() (flow.Identifier, uint64)); ok { + return rf() + } + if rf, ok := ret.Get(0).(func() flow.Identifier); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(flow.Identifier) + } + } + + if rf, ok := ret.Get(1).(func() uint64); ok { + r1 = rf() + } else { + r1 = ret.Get(1).(uint64) + } + + return r0, r1 +} + +// NewLatestPersistedSealedResult creates a new instance of LatestPersistedSealedResult. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewLatestPersistedSealedResult(t interface { + mock.TestingT + Cleanup(func()) +}) *LatestPersistedSealedResult { + mock := &LatestPersistedSealedResult{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/storage/mock/ledger.go b/storage/mock/ledger.go index 6d5bee1a697..5ca49f03591 100644 --- a/storage/mock/ledger.go +++ b/storage/mock/ledger.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mock @@ -12,10 +12,14 @@ type Ledger struct { mock.Mock } -// EmptyStateCommitment provides a mock function with given fields: +// EmptyStateCommitment provides a mock function with no fields func (_m *Ledger) EmptyStateCommitment() flow.StateCommitment { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for EmptyStateCommitment") + } + var r0 flow.StateCommitment if rf, ok := ret.Get(0).(func() flow.StateCommitment); ok { r0 = rf() @@ -29,19 +33,23 @@ func (_m *Ledger) EmptyStateCommitment() flow.StateCommitment { } // GetRegisters provides a mock function with given fields: registerIDs, stateCommitment -func (_m *Ledger) GetRegisters(registerIDs []flow.RegisterID, stateCommitment flow.StateCommitment) ([][]byte, error) { +func (_m *Ledger) GetRegisters(registerIDs []flow.RegisterID, stateCommitment flow.StateCommitment) ([]flow.RegisterValue, error) { ret := _m.Called(registerIDs, stateCommitment) - var r0 [][]byte + if len(ret) == 0 { + panic("no return value specified for GetRegisters") + } + + var r0 []flow.RegisterValue var r1 error - if rf, ok := ret.Get(0).(func([]flow.RegisterID, flow.StateCommitment) ([][]byte, error)); ok { + if rf, ok := ret.Get(0).(func([]flow.RegisterID, flow.StateCommitment) ([]flow.RegisterValue, error)); ok { return rf(registerIDs, stateCommitment) } - if rf, ok := ret.Get(0).(func([]flow.RegisterID, flow.StateCommitment) [][]byte); ok { + if rf, ok := ret.Get(0).(func([]flow.RegisterID, flow.StateCommitment) []flow.RegisterValue); ok { r0 = rf(registerIDs, stateCommitment) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).([][]byte) + r0 = ret.Get(0).([]flow.RegisterValue) } } @@ -55,28 +63,32 @@ func (_m *Ledger) GetRegisters(registerIDs []flow.RegisterID, stateCommitment fl } // GetRegistersWithProof provides a mock function with given fields: registerIDs, stateCommitment -func (_m *Ledger) GetRegistersWithProof(registerIDs []flow.RegisterID, stateCommitment flow.StateCommitment) ([][]byte, [][]byte, error) { +func (_m *Ledger) GetRegistersWithProof(registerIDs []flow.RegisterID, stateCommitment flow.StateCommitment) ([]flow.RegisterValue, []flow.StorageProof, error) { ret := _m.Called(registerIDs, stateCommitment) - var r0 [][]byte - var r1 [][]byte + if len(ret) == 0 { + panic("no return value specified for GetRegistersWithProof") + } + + var r0 []flow.RegisterValue + var r1 []flow.StorageProof var r2 error - if rf, ok := ret.Get(0).(func([]flow.RegisterID, flow.StateCommitment) ([][]byte, [][]byte, error)); ok { + if rf, ok := ret.Get(0).(func([]flow.RegisterID, flow.StateCommitment) ([]flow.RegisterValue, []flow.StorageProof, error)); ok { return rf(registerIDs, stateCommitment) } - if rf, ok := ret.Get(0).(func([]flow.RegisterID, flow.StateCommitment) [][]byte); ok { + if rf, ok := ret.Get(0).(func([]flow.RegisterID, flow.StateCommitment) []flow.RegisterValue); ok { r0 = rf(registerIDs, stateCommitment) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).([][]byte) + r0 = ret.Get(0).([]flow.RegisterValue) } } - if rf, ok := ret.Get(1).(func([]flow.RegisterID, flow.StateCommitment) [][]byte); ok { + if rf, ok := ret.Get(1).(func([]flow.RegisterID, flow.StateCommitment) []flow.StorageProof); ok { r1 = rf(registerIDs, stateCommitment) } else { if ret.Get(1) != nil { - r1 = ret.Get(1).([][]byte) + r1 = ret.Get(1).([]flow.StorageProof) } } @@ -90,15 +102,19 @@ func (_m *Ledger) GetRegistersWithProof(registerIDs []flow.RegisterID, stateComm } // UpdateRegisters provides a mock function with given fields: registerIDs, values, stateCommitment -func (_m *Ledger) UpdateRegisters(registerIDs []flow.RegisterID, values [][]byte, stateCommitment flow.StateCommitment) (flow.StateCommitment, error) { +func (_m *Ledger) UpdateRegisters(registerIDs []flow.RegisterID, values []flow.RegisterValue, stateCommitment flow.StateCommitment) (flow.StateCommitment, error) { ret := _m.Called(registerIDs, values, stateCommitment) + if len(ret) == 0 { + panic("no return value specified for UpdateRegisters") + } + var r0 flow.StateCommitment var r1 error - if rf, ok := ret.Get(0).(func([]flow.RegisterID, [][]byte, flow.StateCommitment) (flow.StateCommitment, error)); ok { + if rf, ok := ret.Get(0).(func([]flow.RegisterID, []flow.RegisterValue, flow.StateCommitment) (flow.StateCommitment, error)); ok { return rf(registerIDs, values, stateCommitment) } - if rf, ok := ret.Get(0).(func([]flow.RegisterID, [][]byte, flow.StateCommitment) flow.StateCommitment); ok { + if rf, ok := ret.Get(0).(func([]flow.RegisterID, []flow.RegisterValue, flow.StateCommitment) flow.StateCommitment); ok { r0 = rf(registerIDs, values, stateCommitment) } else { if ret.Get(0) != nil { @@ -106,7 +122,7 @@ func (_m *Ledger) UpdateRegisters(registerIDs []flow.RegisterID, values [][]byte } } - if rf, ok := ret.Get(1).(func([]flow.RegisterID, [][]byte, flow.StateCommitment) error); ok { + if rf, ok := ret.Get(1).(func([]flow.RegisterID, []flow.RegisterValue, flow.StateCommitment) error); ok { r1 = rf(registerIDs, values, stateCommitment) } else { r1 = ret.Error(1) @@ -116,16 +132,20 @@ func (_m *Ledger) UpdateRegisters(registerIDs []flow.RegisterID, values [][]byte } // UpdateRegistersWithProof provides a mock function with given fields: registerIDs, values, stateCommitment -func (_m *Ledger) UpdateRegistersWithProof(registerIDs []flow.RegisterID, values [][]byte, stateCommitment flow.StateCommitment) (flow.StateCommitment, [][]byte, error) { +func (_m *Ledger) UpdateRegistersWithProof(registerIDs []flow.RegisterID, values []flow.RegisterValue, stateCommitment flow.StateCommitment) (flow.StateCommitment, []flow.StorageProof, error) { ret := _m.Called(registerIDs, values, stateCommitment) + if len(ret) == 0 { + panic("no return value specified for UpdateRegistersWithProof") + } + var r0 flow.StateCommitment - var r1 [][]byte + var r1 []flow.StorageProof var r2 error - if rf, ok := ret.Get(0).(func([]flow.RegisterID, [][]byte, flow.StateCommitment) (flow.StateCommitment, [][]byte, error)); ok { + if rf, ok := ret.Get(0).(func([]flow.RegisterID, []flow.RegisterValue, flow.StateCommitment) (flow.StateCommitment, []flow.StorageProof, error)); ok { return rf(registerIDs, values, stateCommitment) } - if rf, ok := ret.Get(0).(func([]flow.RegisterID, [][]byte, flow.StateCommitment) flow.StateCommitment); ok { + if rf, ok := ret.Get(0).(func([]flow.RegisterID, []flow.RegisterValue, flow.StateCommitment) flow.StateCommitment); ok { r0 = rf(registerIDs, values, stateCommitment) } else { if ret.Get(0) != nil { @@ -133,15 +153,15 @@ func (_m *Ledger) UpdateRegistersWithProof(registerIDs []flow.RegisterID, values } } - if rf, ok := ret.Get(1).(func([]flow.RegisterID, [][]byte, flow.StateCommitment) [][]byte); ok { + if rf, ok := ret.Get(1).(func([]flow.RegisterID, []flow.RegisterValue, flow.StateCommitment) []flow.StorageProof); ok { r1 = rf(registerIDs, values, stateCommitment) } else { if ret.Get(1) != nil { - r1 = ret.Get(1).([][]byte) + r1 = ret.Get(1).([]flow.StorageProof) } } - if rf, ok := ret.Get(2).(func([]flow.RegisterID, [][]byte, flow.StateCommitment) error); ok { + if rf, ok := ret.Get(2).(func([]flow.RegisterID, []flow.RegisterValue, flow.StateCommitment) error); ok { r2 = rf(registerIDs, values, stateCommitment) } else { r2 = ret.Error(2) @@ -150,13 +170,12 @@ func (_m *Ledger) UpdateRegistersWithProof(registerIDs []flow.RegisterID, values return r0, r1, r2 } -type mockConstructorTestingTNewLedger interface { +// NewLedger creates a new instance of Ledger. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewLedger(t interface { mock.TestingT Cleanup(func()) -} - -// NewLedger creates a new instance of Ledger. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewLedger(t mockConstructorTestingTNewLedger) *Ledger { +}) *Ledger { mock := &Ledger{} mock.Mock.Test(t) diff --git a/storage/mock/ledger_verifier.go b/storage/mock/ledger_verifier.go index 9a823e5fa0e..c92d8efb92a 100644 --- a/storage/mock/ledger_verifier.go +++ b/storage/mock/ledger_verifier.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mock @@ -13,21 +13,25 @@ type LedgerVerifier struct { } // VerifyRegistersProof provides a mock function with given fields: registerIDs, stateCommitment, values, proof -func (_m *LedgerVerifier) VerifyRegistersProof(registerIDs []flow.RegisterID, stateCommitment flow.StateCommitment, values [][]byte, proof [][]byte) (bool, error) { +func (_m *LedgerVerifier) VerifyRegistersProof(registerIDs []flow.RegisterID, stateCommitment flow.StateCommitment, values []flow.RegisterValue, proof []flow.StorageProof) (bool, error) { ret := _m.Called(registerIDs, stateCommitment, values, proof) + if len(ret) == 0 { + panic("no return value specified for VerifyRegistersProof") + } + var r0 bool var r1 error - if rf, ok := ret.Get(0).(func([]flow.RegisterID, flow.StateCommitment, [][]byte, [][]byte) (bool, error)); ok { + if rf, ok := ret.Get(0).(func([]flow.RegisterID, flow.StateCommitment, []flow.RegisterValue, []flow.StorageProof) (bool, error)); ok { return rf(registerIDs, stateCommitment, values, proof) } - if rf, ok := ret.Get(0).(func([]flow.RegisterID, flow.StateCommitment, [][]byte, [][]byte) bool); ok { + if rf, ok := ret.Get(0).(func([]flow.RegisterID, flow.StateCommitment, []flow.RegisterValue, []flow.StorageProof) bool); ok { r0 = rf(registerIDs, stateCommitment, values, proof) } else { r0 = ret.Get(0).(bool) } - if rf, ok := ret.Get(1).(func([]flow.RegisterID, flow.StateCommitment, [][]byte, [][]byte) error); ok { + if rf, ok := ret.Get(1).(func([]flow.RegisterID, flow.StateCommitment, []flow.RegisterValue, []flow.StorageProof) error); ok { r1 = rf(registerIDs, stateCommitment, values, proof) } else { r1 = ret.Error(1) @@ -36,13 +40,12 @@ func (_m *LedgerVerifier) VerifyRegistersProof(registerIDs []flow.RegisterID, st return r0, r1 } -type mockConstructorTestingTNewLedgerVerifier interface { +// NewLedgerVerifier creates a new instance of LedgerVerifier. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewLedgerVerifier(t interface { mock.TestingT Cleanup(func()) -} - -// NewLedgerVerifier creates a new instance of LedgerVerifier. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewLedgerVerifier(t mockConstructorTestingTNewLedgerVerifier) *LedgerVerifier { +}) *LedgerVerifier { mock := &LedgerVerifier{} mock.Mock.Test(t) diff --git a/storage/mock/light_transaction_results.go b/storage/mock/light_transaction_results.go new file mode 100644 index 00000000000..6e6b277acb8 --- /dev/null +++ b/storage/mock/light_transaction_results.go @@ -0,0 +1,155 @@ +// Code generated by mockery. DO NOT EDIT. + +package mock + +import ( + flow "github.com/onflow/flow-go/model/flow" + mock "github.com/stretchr/testify/mock" + + storage "github.com/onflow/flow-go/storage" +) + +// LightTransactionResults is an autogenerated mock type for the LightTransactionResults type +type LightTransactionResults struct { + mock.Mock +} + +// BatchStore provides a mock function with given fields: blockID, transactionResults, rw +func (_m *LightTransactionResults) BatchStore(blockID flow.Identifier, transactionResults []flow.LightTransactionResult, rw storage.ReaderBatchWriter) error { + ret := _m.Called(blockID, transactionResults, rw) + + if len(ret) == 0 { + panic("no return value specified for BatchStore") + } + + var r0 error + if rf, ok := ret.Get(0).(func(flow.Identifier, []flow.LightTransactionResult, storage.ReaderBatchWriter) error); ok { + r0 = rf(blockID, transactionResults, rw) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// BatchStoreBadger provides a mock function with given fields: blockID, transactionResults, batch +func (_m *LightTransactionResults) BatchStoreBadger(blockID flow.Identifier, transactionResults []flow.LightTransactionResult, batch storage.BatchStorage) error { + ret := _m.Called(blockID, transactionResults, batch) + + if len(ret) == 0 { + panic("no return value specified for BatchStoreBadger") + } + + var r0 error + if rf, ok := ret.Get(0).(func(flow.Identifier, []flow.LightTransactionResult, storage.BatchStorage) error); ok { + r0 = rf(blockID, transactionResults, batch) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// ByBlockID provides a mock function with given fields: id +func (_m *LightTransactionResults) ByBlockID(id flow.Identifier) ([]flow.LightTransactionResult, error) { + ret := _m.Called(id) + + if len(ret) == 0 { + panic("no return value specified for ByBlockID") + } + + var r0 []flow.LightTransactionResult + var r1 error + if rf, ok := ret.Get(0).(func(flow.Identifier) ([]flow.LightTransactionResult, error)); ok { + return rf(id) + } + if rf, ok := ret.Get(0).(func(flow.Identifier) []flow.LightTransactionResult); ok { + r0 = rf(id) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]flow.LightTransactionResult) + } + } + + if rf, ok := ret.Get(1).(func(flow.Identifier) error); ok { + r1 = rf(id) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ByBlockIDTransactionID provides a mock function with given fields: blockID, transactionID +func (_m *LightTransactionResults) ByBlockIDTransactionID(blockID flow.Identifier, transactionID flow.Identifier) (*flow.LightTransactionResult, error) { + ret := _m.Called(blockID, transactionID) + + if len(ret) == 0 { + panic("no return value specified for ByBlockIDTransactionID") + } + + var r0 *flow.LightTransactionResult + var r1 error + if rf, ok := ret.Get(0).(func(flow.Identifier, flow.Identifier) (*flow.LightTransactionResult, error)); ok { + return rf(blockID, transactionID) + } + if rf, ok := ret.Get(0).(func(flow.Identifier, flow.Identifier) *flow.LightTransactionResult); ok { + r0 = rf(blockID, transactionID) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*flow.LightTransactionResult) + } + } + + if rf, ok := ret.Get(1).(func(flow.Identifier, flow.Identifier) error); ok { + r1 = rf(blockID, transactionID) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ByBlockIDTransactionIndex provides a mock function with given fields: blockID, txIndex +func (_m *LightTransactionResults) ByBlockIDTransactionIndex(blockID flow.Identifier, txIndex uint32) (*flow.LightTransactionResult, error) { + ret := _m.Called(blockID, txIndex) + + if len(ret) == 0 { + panic("no return value specified for ByBlockIDTransactionIndex") + } + + var r0 *flow.LightTransactionResult + var r1 error + if rf, ok := ret.Get(0).(func(flow.Identifier, uint32) (*flow.LightTransactionResult, error)); ok { + return rf(blockID, txIndex) + } + if rf, ok := ret.Get(0).(func(flow.Identifier, uint32) *flow.LightTransactionResult); ok { + r0 = rf(blockID, txIndex) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*flow.LightTransactionResult) + } + } + + if rf, ok := ret.Get(1).(func(flow.Identifier, uint32) error); ok { + r1 = rf(blockID, txIndex) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// NewLightTransactionResults creates a new instance of LightTransactionResults. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewLightTransactionResults(t interface { + mock.TestingT + Cleanup(func()) +}) *LightTransactionResults { + mock := &LightTransactionResults{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/storage/mock/light_transaction_results_reader.go b/storage/mock/light_transaction_results_reader.go new file mode 100644 index 00000000000..a93151f12d2 --- /dev/null +++ b/storage/mock/light_transaction_results_reader.go @@ -0,0 +1,117 @@ +// Code generated by mockery. DO NOT EDIT. + +package mock + +import ( + flow "github.com/onflow/flow-go/model/flow" + mock "github.com/stretchr/testify/mock" +) + +// LightTransactionResultsReader is an autogenerated mock type for the LightTransactionResultsReader type +type LightTransactionResultsReader struct { + mock.Mock +} + +// ByBlockID provides a mock function with given fields: id +func (_m *LightTransactionResultsReader) ByBlockID(id flow.Identifier) ([]flow.LightTransactionResult, error) { + ret := _m.Called(id) + + if len(ret) == 0 { + panic("no return value specified for ByBlockID") + } + + var r0 []flow.LightTransactionResult + var r1 error + if rf, ok := ret.Get(0).(func(flow.Identifier) ([]flow.LightTransactionResult, error)); ok { + return rf(id) + } + if rf, ok := ret.Get(0).(func(flow.Identifier) []flow.LightTransactionResult); ok { + r0 = rf(id) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]flow.LightTransactionResult) + } + } + + if rf, ok := ret.Get(1).(func(flow.Identifier) error); ok { + r1 = rf(id) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ByBlockIDTransactionID provides a mock function with given fields: blockID, transactionID +func (_m *LightTransactionResultsReader) ByBlockIDTransactionID(blockID flow.Identifier, transactionID flow.Identifier) (*flow.LightTransactionResult, error) { + ret := _m.Called(blockID, transactionID) + + if len(ret) == 0 { + panic("no return value specified for ByBlockIDTransactionID") + } + + var r0 *flow.LightTransactionResult + var r1 error + if rf, ok := ret.Get(0).(func(flow.Identifier, flow.Identifier) (*flow.LightTransactionResult, error)); ok { + return rf(blockID, transactionID) + } + if rf, ok := ret.Get(0).(func(flow.Identifier, flow.Identifier) *flow.LightTransactionResult); ok { + r0 = rf(blockID, transactionID) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*flow.LightTransactionResult) + } + } + + if rf, ok := ret.Get(1).(func(flow.Identifier, flow.Identifier) error); ok { + r1 = rf(blockID, transactionID) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ByBlockIDTransactionIndex provides a mock function with given fields: blockID, txIndex +func (_m *LightTransactionResultsReader) ByBlockIDTransactionIndex(blockID flow.Identifier, txIndex uint32) (*flow.LightTransactionResult, error) { + ret := _m.Called(blockID, txIndex) + + if len(ret) == 0 { + panic("no return value specified for ByBlockIDTransactionIndex") + } + + var r0 *flow.LightTransactionResult + var r1 error + if rf, ok := ret.Get(0).(func(flow.Identifier, uint32) (*flow.LightTransactionResult, error)); ok { + return rf(blockID, txIndex) + } + if rf, ok := ret.Get(0).(func(flow.Identifier, uint32) *flow.LightTransactionResult); ok { + r0 = rf(blockID, txIndex) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*flow.LightTransactionResult) + } + } + + if rf, ok := ret.Get(1).(func(flow.Identifier, uint32) error); ok { + r1 = rf(blockID, txIndex) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// NewLightTransactionResultsReader creates a new instance of LightTransactionResultsReader. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewLightTransactionResultsReader(t interface { + mock.TestingT + Cleanup(func()) +}) *LightTransactionResultsReader { + mock := &LightTransactionResultsReader{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/storage/mock/lock_manager.go b/storage/mock/lock_manager.go new file mode 100644 index 00000000000..e1ea9a1c2e9 --- /dev/null +++ b/storage/mock/lock_manager.go @@ -0,0 +1,47 @@ +// Code generated by mockery. DO NOT EDIT. + +package mock + +import ( + lockctx "github.com/jordanschalm/lockctx" + mock "github.com/stretchr/testify/mock" +) + +// LockManager is an autogenerated mock type for the LockManager type +type LockManager struct { + mock.Mock +} + +// NewContext provides a mock function with no fields +func (_m *LockManager) NewContext() lockctx.Context { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for NewContext") + } + + var r0 lockctx.Context + if rf, ok := ret.Get(0).(func() lockctx.Context); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(lockctx.Context) + } + } + + return r0 +} + +// NewLockManager creates a new instance of LockManager. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewLockManager(t interface { + mock.TestingT + Cleanup(func()) +}) *LockManager { + mock := &LockManager{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/storage/mock/my_execution_receipts.go b/storage/mock/my_execution_receipts.go index 6ebba2fb4b5..00fa6a07707 100644 --- a/storage/mock/my_execution_receipts.go +++ b/storage/mock/my_execution_receipts.go @@ -1,9 +1,11 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mock import ( + lockctx "github.com/jordanschalm/lockctx" flow "github.com/onflow/flow-go/model/flow" + mock "github.com/stretchr/testify/mock" storage "github.com/onflow/flow-go/storage" @@ -15,11 +17,15 @@ type MyExecutionReceipts struct { } // BatchRemoveIndexByBlockID provides a mock function with given fields: blockID, batch -func (_m *MyExecutionReceipts) BatchRemoveIndexByBlockID(blockID flow.Identifier, batch storage.BatchStorage) error { +func (_m *MyExecutionReceipts) BatchRemoveIndexByBlockID(blockID flow.Identifier, batch storage.ReaderBatchWriter) error { ret := _m.Called(blockID, batch) + if len(ret) == 0 { + panic("no return value specified for BatchRemoveIndexByBlockID") + } + var r0 error - if rf, ok := ret.Get(0).(func(flow.Identifier, storage.BatchStorage) error); ok { + if rf, ok := ret.Get(0).(func(flow.Identifier, storage.ReaderBatchWriter) error); ok { r0 = rf(blockID, batch) } else { r0 = ret.Error(0) @@ -28,13 +34,17 @@ func (_m *MyExecutionReceipts) BatchRemoveIndexByBlockID(blockID flow.Identifier return r0 } -// BatchStoreMyReceipt provides a mock function with given fields: receipt, batch -func (_m *MyExecutionReceipts) BatchStoreMyReceipt(receipt *flow.ExecutionReceipt, batch storage.BatchStorage) error { - ret := _m.Called(receipt, batch) +// BatchStoreMyReceipt provides a mock function with given fields: lctx, receipt, batch +func (_m *MyExecutionReceipts) BatchStoreMyReceipt(lctx lockctx.Proof, receipt *flow.ExecutionReceipt, batch storage.ReaderBatchWriter) error { + ret := _m.Called(lctx, receipt, batch) + + if len(ret) == 0 { + panic("no return value specified for BatchStoreMyReceipt") + } var r0 error - if rf, ok := ret.Get(0).(func(*flow.ExecutionReceipt, storage.BatchStorage) error); ok { - r0 = rf(receipt, batch) + if rf, ok := ret.Get(0).(func(lockctx.Proof, *flow.ExecutionReceipt, storage.ReaderBatchWriter) error); ok { + r0 = rf(lctx, receipt, batch) } else { r0 = ret.Error(0) } @@ -46,6 +56,10 @@ func (_m *MyExecutionReceipts) BatchStoreMyReceipt(receipt *flow.ExecutionReceip func (_m *MyExecutionReceipts) MyReceipt(blockID flow.Identifier) (*flow.ExecutionReceipt, error) { ret := _m.Called(blockID) + if len(ret) == 0 { + panic("no return value specified for MyReceipt") + } + var r0 *flow.ExecutionReceipt var r1 error if rf, ok := ret.Get(0).(func(flow.Identifier) (*flow.ExecutionReceipt, error)); ok { @@ -68,27 +82,12 @@ func (_m *MyExecutionReceipts) MyReceipt(blockID flow.Identifier) (*flow.Executi return r0, r1 } -// StoreMyReceipt provides a mock function with given fields: receipt -func (_m *MyExecutionReceipts) StoreMyReceipt(receipt *flow.ExecutionReceipt) error { - ret := _m.Called(receipt) - - var r0 error - if rf, ok := ret.Get(0).(func(*flow.ExecutionReceipt) error); ok { - r0 = rf(receipt) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -type mockConstructorTestingTNewMyExecutionReceipts interface { +// NewMyExecutionReceipts creates a new instance of MyExecutionReceipts. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewMyExecutionReceipts(t interface { mock.TestingT Cleanup(func()) -} - -// NewMyExecutionReceipts creates a new instance of MyExecutionReceipts. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewMyExecutionReceipts(t mockConstructorTestingTNewMyExecutionReceipts) *MyExecutionReceipts { +}) *MyExecutionReceipts { mock := &MyExecutionReceipts{} mock.Mock.Test(t) diff --git a/storage/mock/node_disallow_list.go b/storage/mock/node_disallow_list.go new file mode 100644 index 00000000000..47b2c7ff04e --- /dev/null +++ b/storage/mock/node_disallow_list.go @@ -0,0 +1,63 @@ +// Code generated by mockery. DO NOT EDIT. + +package mock + +import ( + flow "github.com/onflow/flow-go/model/flow" + mock "github.com/stretchr/testify/mock" +) + +// NodeDisallowList is an autogenerated mock type for the NodeDisallowList type +type NodeDisallowList struct { + mock.Mock +} + +// Retrieve provides a mock function with given fields: disallowList +func (_m *NodeDisallowList) Retrieve(disallowList *map[flow.Identifier]struct{}) error { + ret := _m.Called(disallowList) + + if len(ret) == 0 { + panic("no return value specified for Retrieve") + } + + var r0 error + if rf, ok := ret.Get(0).(func(*map[flow.Identifier]struct{}) error); ok { + r0 = rf(disallowList) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// Store provides a mock function with given fields: disallowList +func (_m *NodeDisallowList) Store(disallowList map[flow.Identifier]struct{}) error { + ret := _m.Called(disallowList) + + if len(ret) == 0 { + panic("no return value specified for Store") + } + + var r0 error + if rf, ok := ret.Get(0).(func(map[flow.Identifier]struct{}) error); ok { + r0 = rf(disallowList) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// NewNodeDisallowList creates a new instance of NodeDisallowList. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewNodeDisallowList(t interface { + mock.TestingT + Cleanup(func()) +}) *NodeDisallowList { + mock := &NodeDisallowList{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/storage/mock/payloads.go b/storage/mock/payloads.go index 8da3720c709..9dd42c8cf79 100644 --- a/storage/mock/payloads.go +++ b/storage/mock/payloads.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mock @@ -16,6 +16,10 @@ type Payloads struct { func (_m *Payloads) ByBlockID(blockID flow.Identifier) (*flow.Payload, error) { ret := _m.Called(blockID) + if len(ret) == 0 { + panic("no return value specified for ByBlockID") + } + var r0 *flow.Payload var r1 error if rf, ok := ret.Get(0).(func(flow.Identifier) (*flow.Payload, error)); ok { @@ -38,27 +42,12 @@ func (_m *Payloads) ByBlockID(blockID flow.Identifier) (*flow.Payload, error) { return r0, r1 } -// Store provides a mock function with given fields: blockID, payload -func (_m *Payloads) Store(blockID flow.Identifier, payload *flow.Payload) error { - ret := _m.Called(blockID, payload) - - var r0 error - if rf, ok := ret.Get(0).(func(flow.Identifier, *flow.Payload) error); ok { - r0 = rf(blockID, payload) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -type mockConstructorTestingTNewPayloads interface { +// NewPayloads creates a new instance of Payloads. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewPayloads(t interface { mock.TestingT Cleanup(func()) -} - -// NewPayloads creates a new instance of Payloads. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewPayloads(t mockConstructorTestingTNewPayloads) *Payloads { +}) *Payloads { mock := &Payloads{} mock.Mock.Test(t) diff --git a/storage/mock/protocol_kv_store.go b/storage/mock/protocol_kv_store.go new file mode 100644 index 00000000000..130bbf0be0a --- /dev/null +++ b/storage/mock/protocol_kv_store.go @@ -0,0 +1,127 @@ +// Code generated by mockery. DO NOT EDIT. + +package mock + +import ( + lockctx "github.com/jordanschalm/lockctx" + flow "github.com/onflow/flow-go/model/flow" + + mock "github.com/stretchr/testify/mock" + + storage "github.com/onflow/flow-go/storage" +) + +// ProtocolKVStore is an autogenerated mock type for the ProtocolKVStore type +type ProtocolKVStore struct { + mock.Mock +} + +// BatchIndex provides a mock function with given fields: lctx, rw, blockID, stateID +func (_m *ProtocolKVStore) BatchIndex(lctx lockctx.Proof, rw storage.ReaderBatchWriter, blockID flow.Identifier, stateID flow.Identifier) error { + ret := _m.Called(lctx, rw, blockID, stateID) + + if len(ret) == 0 { + panic("no return value specified for BatchIndex") + } + + var r0 error + if rf, ok := ret.Get(0).(func(lockctx.Proof, storage.ReaderBatchWriter, flow.Identifier, flow.Identifier) error); ok { + r0 = rf(lctx, rw, blockID, stateID) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// BatchStore provides a mock function with given fields: rw, stateID, data +func (_m *ProtocolKVStore) BatchStore(rw storage.ReaderBatchWriter, stateID flow.Identifier, data *flow.PSKeyValueStoreData) error { + ret := _m.Called(rw, stateID, data) + + if len(ret) == 0 { + panic("no return value specified for BatchStore") + } + + var r0 error + if rf, ok := ret.Get(0).(func(storage.ReaderBatchWriter, flow.Identifier, *flow.PSKeyValueStoreData) error); ok { + r0 = rf(rw, stateID, data) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// ByBlockID provides a mock function with given fields: blockID +func (_m *ProtocolKVStore) ByBlockID(blockID flow.Identifier) (*flow.PSKeyValueStoreData, error) { + ret := _m.Called(blockID) + + if len(ret) == 0 { + panic("no return value specified for ByBlockID") + } + + var r0 *flow.PSKeyValueStoreData + var r1 error + if rf, ok := ret.Get(0).(func(flow.Identifier) (*flow.PSKeyValueStoreData, error)); ok { + return rf(blockID) + } + if rf, ok := ret.Get(0).(func(flow.Identifier) *flow.PSKeyValueStoreData); ok { + r0 = rf(blockID) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*flow.PSKeyValueStoreData) + } + } + + if rf, ok := ret.Get(1).(func(flow.Identifier) error); ok { + r1 = rf(blockID) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ByID provides a mock function with given fields: id +func (_m *ProtocolKVStore) ByID(id flow.Identifier) (*flow.PSKeyValueStoreData, error) { + ret := _m.Called(id) + + if len(ret) == 0 { + panic("no return value specified for ByID") + } + + var r0 *flow.PSKeyValueStoreData + var r1 error + if rf, ok := ret.Get(0).(func(flow.Identifier) (*flow.PSKeyValueStoreData, error)); ok { + return rf(id) + } + if rf, ok := ret.Get(0).(func(flow.Identifier) *flow.PSKeyValueStoreData); ok { + r0 = rf(id) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*flow.PSKeyValueStoreData) + } + } + + if rf, ok := ret.Get(1).(func(flow.Identifier) error); ok { + r1 = rf(id) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// NewProtocolKVStore creates a new instance of ProtocolKVStore. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewProtocolKVStore(t interface { + mock.TestingT + Cleanup(func()) +}) *ProtocolKVStore { + mock := &ProtocolKVStore{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/storage/mock/quorum_certificates.go b/storage/mock/quorum_certificates.go index 980836dbce2..b4bd5177b2e 100644 --- a/storage/mock/quorum_certificates.go +++ b/storage/mock/quorum_certificates.go @@ -1,12 +1,14 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mock import ( + lockctx "github.com/jordanschalm/lockctx" flow "github.com/onflow/flow-go/model/flow" + mock "github.com/stretchr/testify/mock" - transaction "github.com/onflow/flow-go/storage/badger/transaction" + storage "github.com/onflow/flow-go/storage" ) // QuorumCertificates is an autogenerated mock type for the QuorumCertificates type @@ -14,10 +16,32 @@ type QuorumCertificates struct { mock.Mock } +// BatchStore provides a mock function with given fields: _a0, _a1, _a2 +func (_m *QuorumCertificates) BatchStore(_a0 lockctx.Proof, _a1 storage.ReaderBatchWriter, _a2 *flow.QuorumCertificate) error { + ret := _m.Called(_a0, _a1, _a2) + + if len(ret) == 0 { + panic("no return value specified for BatchStore") + } + + var r0 error + if rf, ok := ret.Get(0).(func(lockctx.Proof, storage.ReaderBatchWriter, *flow.QuorumCertificate) error); ok { + r0 = rf(_a0, _a1, _a2) + } else { + r0 = ret.Error(0) + } + + return r0 +} + // ByBlockID provides a mock function with given fields: blockID func (_m *QuorumCertificates) ByBlockID(blockID flow.Identifier) (*flow.QuorumCertificate, error) { ret := _m.Called(blockID) + if len(ret) == 0 { + panic("no return value specified for ByBlockID") + } + var r0 *flow.QuorumCertificate var r1 error if rf, ok := ret.Get(0).(func(flow.Identifier) (*flow.QuorumCertificate, error)); ok { @@ -40,29 +64,12 @@ func (_m *QuorumCertificates) ByBlockID(blockID flow.Identifier) (*flow.QuorumCe return r0, r1 } -// StoreTx provides a mock function with given fields: qc -func (_m *QuorumCertificates) StoreTx(qc *flow.QuorumCertificate) func(*transaction.Tx) error { - ret := _m.Called(qc) - - var r0 func(*transaction.Tx) error - if rf, ok := ret.Get(0).(func(*flow.QuorumCertificate) func(*transaction.Tx) error); ok { - r0 = rf(qc) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(func(*transaction.Tx) error) - } - } - - return r0 -} - -type mockConstructorTestingTNewQuorumCertificates interface { +// NewQuorumCertificates creates a new instance of QuorumCertificates. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewQuorumCertificates(t interface { mock.TestingT Cleanup(func()) -} - -// NewQuorumCertificates creates a new instance of QuorumCertificates. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewQuorumCertificates(t mockConstructorTestingTNewQuorumCertificates) *QuorumCertificates { +}) *QuorumCertificates { mock := &QuorumCertificates{} mock.Mock.Test(t) diff --git a/storage/mock/reader.go b/storage/mock/reader.go new file mode 100644 index 00000000000..b3fab6abdb8 --- /dev/null +++ b/storage/mock/reader.go @@ -0,0 +1,118 @@ +// Code generated by mockery. DO NOT EDIT. + +package mock + +import ( + io "io" + + storage "github.com/onflow/flow-go/storage" + mock "github.com/stretchr/testify/mock" +) + +// Reader is an autogenerated mock type for the Reader type +type Reader struct { + mock.Mock +} + +// Get provides a mock function with given fields: key +func (_m *Reader) Get(key []byte) ([]byte, io.Closer, error) { + ret := _m.Called(key) + + if len(ret) == 0 { + panic("no return value specified for Get") + } + + var r0 []byte + var r1 io.Closer + var r2 error + if rf, ok := ret.Get(0).(func([]byte) ([]byte, io.Closer, error)); ok { + return rf(key) + } + if rf, ok := ret.Get(0).(func([]byte) []byte); ok { + r0 = rf(key) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]byte) + } + } + + if rf, ok := ret.Get(1).(func([]byte) io.Closer); ok { + r1 = rf(key) + } else { + if ret.Get(1) != nil { + r1 = ret.Get(1).(io.Closer) + } + } + + if rf, ok := ret.Get(2).(func([]byte) error); ok { + r2 = rf(key) + } else { + r2 = ret.Error(2) + } + + return r0, r1, r2 +} + +// NewIter provides a mock function with given fields: startPrefix, endPrefix, ops +func (_m *Reader) NewIter(startPrefix []byte, endPrefix []byte, ops storage.IteratorOption) (storage.Iterator, error) { + ret := _m.Called(startPrefix, endPrefix, ops) + + if len(ret) == 0 { + panic("no return value specified for NewIter") + } + + var r0 storage.Iterator + var r1 error + if rf, ok := ret.Get(0).(func([]byte, []byte, storage.IteratorOption) (storage.Iterator, error)); ok { + return rf(startPrefix, endPrefix, ops) + } + if rf, ok := ret.Get(0).(func([]byte, []byte, storage.IteratorOption) storage.Iterator); ok { + r0 = rf(startPrefix, endPrefix, ops) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(storage.Iterator) + } + } + + if rf, ok := ret.Get(1).(func([]byte, []byte, storage.IteratorOption) error); ok { + r1 = rf(startPrefix, endPrefix, ops) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// NewSeeker provides a mock function with no fields +func (_m *Reader) NewSeeker() storage.Seeker { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for NewSeeker") + } + + var r0 storage.Seeker + if rf, ok := ret.Get(0).(func() storage.Seeker); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(storage.Seeker) + } + } + + return r0 +} + +// NewReader creates a new instance of Reader. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewReader(t interface { + mock.TestingT + Cleanup(func()) +}) *Reader { + mock := &Reader{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/storage/mock/reader_batch_writer.go b/storage/mock/reader_batch_writer.go new file mode 100644 index 00000000000..7141a9a9893 --- /dev/null +++ b/storage/mock/reader_batch_writer.go @@ -0,0 +1,107 @@ +// Code generated by mockery. DO NOT EDIT. + +package mock + +import ( + storage "github.com/onflow/flow-go/storage" + mock "github.com/stretchr/testify/mock" +) + +// ReaderBatchWriter is an autogenerated mock type for the ReaderBatchWriter type +type ReaderBatchWriter struct { + mock.Mock +} + +// AddCallback provides a mock function with given fields: _a0 +func (_m *ReaderBatchWriter) AddCallback(_a0 func(error)) { + _m.Called(_a0) +} + +// GlobalReader provides a mock function with no fields +func (_m *ReaderBatchWriter) GlobalReader() storage.Reader { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for GlobalReader") + } + + var r0 storage.Reader + if rf, ok := ret.Get(0).(func() storage.Reader); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(storage.Reader) + } + } + + return r0 +} + +// ScopedValue provides a mock function with given fields: key +func (_m *ReaderBatchWriter) ScopedValue(key string) (any, bool) { + ret := _m.Called(key) + + if len(ret) == 0 { + panic("no return value specified for ScopedValue") + } + + var r0 any + var r1 bool + if rf, ok := ret.Get(0).(func(string) (any, bool)); ok { + return rf(key) + } + if rf, ok := ret.Get(0).(func(string) any); ok { + r0 = rf(key) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(any) + } + } + + if rf, ok := ret.Get(1).(func(string) bool); ok { + r1 = rf(key) + } else { + r1 = ret.Get(1).(bool) + } + + return r0, r1 +} + +// SetScopedValue provides a mock function with given fields: key, value +func (_m *ReaderBatchWriter) SetScopedValue(key string, value any) { + _m.Called(key, value) +} + +// Writer provides a mock function with no fields +func (_m *ReaderBatchWriter) Writer() storage.Writer { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Writer") + } + + var r0 storage.Writer + if rf, ok := ret.Get(0).(func() storage.Writer); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(storage.Writer) + } + } + + return r0 +} + +// NewReaderBatchWriter creates a new instance of ReaderBatchWriter. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewReaderBatchWriter(t interface { + mock.TestingT + Cleanup(func()) +}) *ReaderBatchWriter { + mock := &ReaderBatchWriter{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/storage/mock/register_index.go b/storage/mock/register_index.go new file mode 100644 index 00000000000..83be7904666 --- /dev/null +++ b/storage/mock/register_index.go @@ -0,0 +1,111 @@ +// Code generated by mockery. DO NOT EDIT. + +package mock + +import ( + flow "github.com/onflow/flow-go/model/flow" + mock "github.com/stretchr/testify/mock" +) + +// RegisterIndex is an autogenerated mock type for the RegisterIndex type +type RegisterIndex struct { + mock.Mock +} + +// FirstHeight provides a mock function with no fields +func (_m *RegisterIndex) FirstHeight() uint64 { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for FirstHeight") + } + + var r0 uint64 + if rf, ok := ret.Get(0).(func() uint64); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(uint64) + } + + return r0 +} + +// Get provides a mock function with given fields: ID, height +func (_m *RegisterIndex) Get(ID flow.RegisterID, height uint64) (flow.RegisterValue, error) { + ret := _m.Called(ID, height) + + if len(ret) == 0 { + panic("no return value specified for Get") + } + + var r0 flow.RegisterValue + var r1 error + if rf, ok := ret.Get(0).(func(flow.RegisterID, uint64) (flow.RegisterValue, error)); ok { + return rf(ID, height) + } + if rf, ok := ret.Get(0).(func(flow.RegisterID, uint64) flow.RegisterValue); ok { + r0 = rf(ID, height) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(flow.RegisterValue) + } + } + + if rf, ok := ret.Get(1).(func(flow.RegisterID, uint64) error); ok { + r1 = rf(ID, height) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// LatestHeight provides a mock function with no fields +func (_m *RegisterIndex) LatestHeight() uint64 { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for LatestHeight") + } + + var r0 uint64 + if rf, ok := ret.Get(0).(func() uint64); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(uint64) + } + + return r0 +} + +// Store provides a mock function with given fields: entries, height +func (_m *RegisterIndex) Store(entries flow.RegisterEntries, height uint64) error { + ret := _m.Called(entries, height) + + if len(ret) == 0 { + panic("no return value specified for Store") + } + + var r0 error + if rf, ok := ret.Get(0).(func(flow.RegisterEntries, uint64) error); ok { + r0 = rf(entries, height) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// NewRegisterIndex creates a new instance of RegisterIndex. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewRegisterIndex(t interface { + mock.TestingT + Cleanup(func()) +}) *RegisterIndex { + mock := &RegisterIndex{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/storage/mock/register_index_reader.go b/storage/mock/register_index_reader.go new file mode 100644 index 00000000000..8a056bd5943 --- /dev/null +++ b/storage/mock/register_index_reader.go @@ -0,0 +1,93 @@ +// Code generated by mockery. DO NOT EDIT. + +package mock + +import ( + flow "github.com/onflow/flow-go/model/flow" + mock "github.com/stretchr/testify/mock" +) + +// RegisterIndexReader is an autogenerated mock type for the RegisterIndexReader type +type RegisterIndexReader struct { + mock.Mock +} + +// FirstHeight provides a mock function with no fields +func (_m *RegisterIndexReader) FirstHeight() uint64 { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for FirstHeight") + } + + var r0 uint64 + if rf, ok := ret.Get(0).(func() uint64); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(uint64) + } + + return r0 +} + +// Get provides a mock function with given fields: ID, height +func (_m *RegisterIndexReader) Get(ID flow.RegisterID, height uint64) (flow.RegisterValue, error) { + ret := _m.Called(ID, height) + + if len(ret) == 0 { + panic("no return value specified for Get") + } + + var r0 flow.RegisterValue + var r1 error + if rf, ok := ret.Get(0).(func(flow.RegisterID, uint64) (flow.RegisterValue, error)); ok { + return rf(ID, height) + } + if rf, ok := ret.Get(0).(func(flow.RegisterID, uint64) flow.RegisterValue); ok { + r0 = rf(ID, height) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(flow.RegisterValue) + } + } + + if rf, ok := ret.Get(1).(func(flow.RegisterID, uint64) error); ok { + r1 = rf(ID, height) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// LatestHeight provides a mock function with no fields +func (_m *RegisterIndexReader) LatestHeight() uint64 { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for LatestHeight") + } + + var r0 uint64 + if rf, ok := ret.Get(0).(func() uint64); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(uint64) + } + + return r0 +} + +// NewRegisterIndexReader creates a new instance of RegisterIndexReader. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewRegisterIndexReader(t interface { + mock.TestingT + Cleanup(func()) +}) *RegisterIndexReader { + mock := &RegisterIndexReader{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/storage/mock/result_approvals.go b/storage/mock/result_approvals.go index 9084f2dabbb..a050d410707 100644 --- a/storage/mock/result_approvals.go +++ b/storage/mock/result_approvals.go @@ -1,9 +1,11 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mock import ( + lockctx "github.com/jordanschalm/lockctx" flow "github.com/onflow/flow-go/model/flow" + mock "github.com/stretchr/testify/mock" ) @@ -16,6 +18,10 @@ type ResultApprovals struct { func (_m *ResultApprovals) ByChunk(resultID flow.Identifier, chunkIndex uint64) (*flow.ResultApproval, error) { ret := _m.Called(resultID, chunkIndex) + if len(ret) == 0 { + panic("no return value specified for ByChunk") + } + var r0 *flow.ResultApproval var r1 error if rf, ok := ret.Get(0).(func(flow.Identifier, uint64) (*flow.ResultApproval, error)); ok { @@ -42,6 +48,10 @@ func (_m *ResultApprovals) ByChunk(resultID flow.Identifier, chunkIndex uint64) func (_m *ResultApprovals) ByID(approvalID flow.Identifier) (*flow.ResultApproval, error) { ret := _m.Called(approvalID) + if len(ret) == 0 { + panic("no return value specified for ByID") + } + var r0 *flow.ResultApproval var r1 error if rf, ok := ret.Get(0).(func(flow.Identifier) (*flow.ResultApproval, error)); ok { @@ -64,41 +74,32 @@ func (_m *ResultApprovals) ByID(approvalID flow.Identifier) (*flow.ResultApprova return r0, r1 } -// Index provides a mock function with given fields: resultID, chunkIndex, approvalID -func (_m *ResultApprovals) Index(resultID flow.Identifier, chunkIndex uint64, approvalID flow.Identifier) error { - ret := _m.Called(resultID, chunkIndex, approvalID) +// StoreMyApproval provides a mock function with given fields: approval +func (_m *ResultApprovals) StoreMyApproval(approval *flow.ResultApproval) func(lockctx.Proof) error { + ret := _m.Called(approval) - var r0 error - if rf, ok := ret.Get(0).(func(flow.Identifier, uint64, flow.Identifier) error); ok { - r0 = rf(resultID, chunkIndex, approvalID) - } else { - r0 = ret.Error(0) + if len(ret) == 0 { + panic("no return value specified for StoreMyApproval") } - return r0 -} - -// Store provides a mock function with given fields: result -func (_m *ResultApprovals) Store(result *flow.ResultApproval) error { - ret := _m.Called(result) - - var r0 error - if rf, ok := ret.Get(0).(func(*flow.ResultApproval) error); ok { - r0 = rf(result) + var r0 func(lockctx.Proof) error + if rf, ok := ret.Get(0).(func(*flow.ResultApproval) func(lockctx.Proof) error); ok { + r0 = rf(approval) } else { - r0 = ret.Error(0) + if ret.Get(0) != nil { + r0 = ret.Get(0).(func(lockctx.Proof) error) + } } return r0 } -type mockConstructorTestingTNewResultApprovals interface { +// NewResultApprovals creates a new instance of ResultApprovals. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewResultApprovals(t interface { mock.TestingT Cleanup(func()) -} - -// NewResultApprovals creates a new instance of ResultApprovals. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewResultApprovals(t mockConstructorTestingTNewResultApprovals) *ResultApprovals { +}) *ResultApprovals { mock := &ResultApprovals{} mock.Mock.Test(t) diff --git a/storage/mock/safe_beacon_keys.go b/storage/mock/safe_beacon_keys.go index 5d4ff0b511b..9e0afae0cb7 100644 --- a/storage/mock/safe_beacon_keys.go +++ b/storage/mock/safe_beacon_keys.go @@ -1,9 +1,9 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mock import ( - crypto "github.com/onflow/flow-go/crypto" + crypto "github.com/onflow/crypto" mock "github.com/stretchr/testify/mock" ) @@ -16,6 +16,10 @@ type SafeBeaconKeys struct { func (_m *SafeBeaconKeys) RetrieveMyBeaconPrivateKey(epochCounter uint64) (crypto.PrivateKey, bool, error) { ret := _m.Called(epochCounter) + if len(ret) == 0 { + panic("no return value specified for RetrieveMyBeaconPrivateKey") + } + var r0 crypto.PrivateKey var r1 bool var r2 error @@ -45,13 +49,12 @@ func (_m *SafeBeaconKeys) RetrieveMyBeaconPrivateKey(epochCounter uint64) (crypt return r0, r1, r2 } -type mockConstructorTestingTNewSafeBeaconKeys interface { +// NewSafeBeaconKeys creates a new instance of SafeBeaconKeys. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewSafeBeaconKeys(t interface { mock.TestingT Cleanup(func()) -} - -// NewSafeBeaconKeys creates a new instance of SafeBeaconKeys. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewSafeBeaconKeys(t mockConstructorTestingTNewSafeBeaconKeys) *SafeBeaconKeys { +}) *SafeBeaconKeys { mock := &SafeBeaconKeys{} mock.Mock.Test(t) diff --git a/storage/mock/seals.go b/storage/mock/seals.go index 0c26f7b6737..7968bb2996c 100644 --- a/storage/mock/seals.go +++ b/storage/mock/seals.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mock @@ -16,6 +16,10 @@ type Seals struct { func (_m *Seals) ByID(sealID flow.Identifier) (*flow.Seal, error) { ret := _m.Called(sealID) + if len(ret) == 0 { + panic("no return value specified for ByID") + } + var r0 *flow.Seal var r1 error if rf, ok := ret.Get(0).(func(flow.Identifier) (*flow.Seal, error)); ok { @@ -42,6 +46,10 @@ func (_m *Seals) ByID(sealID flow.Identifier) (*flow.Seal, error) { func (_m *Seals) FinalizedSealForBlock(blockID flow.Identifier) (*flow.Seal, error) { ret := _m.Called(blockID) + if len(ret) == 0 { + panic("no return value specified for FinalizedSealForBlock") + } + var r0 *flow.Seal var r1 error if rf, ok := ret.Get(0).(func(flow.Identifier) (*flow.Seal, error)); ok { @@ -68,6 +76,10 @@ func (_m *Seals) FinalizedSealForBlock(blockID flow.Identifier) (*flow.Seal, err func (_m *Seals) HighestInFork(blockID flow.Identifier) (*flow.Seal, error) { ret := _m.Called(blockID) + if len(ret) == 0 { + panic("no return value specified for HighestInFork") + } + var r0 *flow.Seal var r1 error if rf, ok := ret.Get(0).(func(flow.Identifier) (*flow.Seal, error)); ok { @@ -94,6 +106,10 @@ func (_m *Seals) HighestInFork(blockID flow.Identifier) (*flow.Seal, error) { func (_m *Seals) Store(seal *flow.Seal) error { ret := _m.Called(seal) + if len(ret) == 0 { + panic("no return value specified for Store") + } + var r0 error if rf, ok := ret.Get(0).(func(*flow.Seal) error); ok { r0 = rf(seal) @@ -104,13 +120,12 @@ func (_m *Seals) Store(seal *flow.Seal) error { return r0 } -type mockConstructorTestingTNewSeals interface { +// NewSeals creates a new instance of Seals. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewSeals(t interface { mock.TestingT Cleanup(func()) -} - -// NewSeals creates a new instance of Seals. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewSeals(t mockConstructorTestingTNewSeals) *Seals { +}) *Seals { mock := &Seals{} mock.Mock.Test(t) diff --git a/storage/mock/seeker.go b/storage/mock/seeker.go new file mode 100644 index 00000000000..837dd401cb0 --- /dev/null +++ b/storage/mock/seeker.go @@ -0,0 +1,54 @@ +// Code generated by mockery. DO NOT EDIT. + +package mock + +import mock "github.com/stretchr/testify/mock" + +// Seeker is an autogenerated mock type for the Seeker type +type Seeker struct { + mock.Mock +} + +// SeekLE provides a mock function with given fields: startPrefix, key +func (_m *Seeker) SeekLE(startPrefix []byte, key []byte) ([]byte, error) { + ret := _m.Called(startPrefix, key) + + if len(ret) == 0 { + panic("no return value specified for SeekLE") + } + + var r0 []byte + var r1 error + if rf, ok := ret.Get(0).(func([]byte, []byte) ([]byte, error)); ok { + return rf(startPrefix, key) + } + if rf, ok := ret.Get(0).(func([]byte, []byte) []byte); ok { + r0 = rf(startPrefix, key) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]byte) + } + } + + if rf, ok := ret.Get(1).(func([]byte, []byte) error); ok { + r1 = rf(startPrefix, key) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// NewSeeker creates a new instance of Seeker. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewSeeker(t interface { + mock.TestingT + Cleanup(func()) +}) *Seeker { + mock := &Seeker{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/storage/mock/service_events.go b/storage/mock/service_events.go index e065d969b23..fc2fc46db5c 100644 --- a/storage/mock/service_events.go +++ b/storage/mock/service_events.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mock @@ -15,11 +15,15 @@ type ServiceEvents struct { } // BatchRemoveByBlockID provides a mock function with given fields: blockID, batch -func (_m *ServiceEvents) BatchRemoveByBlockID(blockID flow.Identifier, batch storage.BatchStorage) error { +func (_m *ServiceEvents) BatchRemoveByBlockID(blockID flow.Identifier, batch storage.ReaderBatchWriter) error { ret := _m.Called(blockID, batch) + if len(ret) == 0 { + panic("no return value specified for BatchRemoveByBlockID") + } + var r0 error - if rf, ok := ret.Get(0).(func(flow.Identifier, storage.BatchStorage) error); ok { + if rf, ok := ret.Get(0).(func(flow.Identifier, storage.ReaderBatchWriter) error); ok { r0 = rf(blockID, batch) } else { r0 = ret.Error(0) @@ -29,11 +33,15 @@ func (_m *ServiceEvents) BatchRemoveByBlockID(blockID flow.Identifier, batch sto } // BatchStore provides a mock function with given fields: blockID, events, batch -func (_m *ServiceEvents) BatchStore(blockID flow.Identifier, events []flow.Event, batch storage.BatchStorage) error { +func (_m *ServiceEvents) BatchStore(blockID flow.Identifier, events []flow.Event, batch storage.ReaderBatchWriter) error { ret := _m.Called(blockID, events, batch) + if len(ret) == 0 { + panic("no return value specified for BatchStore") + } + var r0 error - if rf, ok := ret.Get(0).(func(flow.Identifier, []flow.Event, storage.BatchStorage) error); ok { + if rf, ok := ret.Get(0).(func(flow.Identifier, []flow.Event, storage.ReaderBatchWriter) error); ok { r0 = rf(blockID, events, batch) } else { r0 = ret.Error(0) @@ -46,6 +54,10 @@ func (_m *ServiceEvents) BatchStore(blockID flow.Identifier, events []flow.Event func (_m *ServiceEvents) ByBlockID(blockID flow.Identifier) ([]flow.Event, error) { ret := _m.Called(blockID) + if len(ret) == 0 { + panic("no return value specified for ByBlockID") + } + var r0 []flow.Event var r1 error if rf, ok := ret.Get(0).(func(flow.Identifier) ([]flow.Event, error)); ok { @@ -68,13 +80,12 @@ func (_m *ServiceEvents) ByBlockID(blockID flow.Identifier) ([]flow.Event, error return r0, r1 } -type mockConstructorTestingTNewServiceEvents interface { +// NewServiceEvents creates a new instance of ServiceEvents. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewServiceEvents(t interface { mock.TestingT Cleanup(func()) -} - -// NewServiceEvents creates a new instance of ServiceEvents. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewServiceEvents(t mockConstructorTestingTNewServiceEvents) *ServiceEvents { +}) *ServiceEvents { mock := &ServiceEvents{} mock.Mock.Test(t) diff --git a/storage/mock/storage.go b/storage/mock/storage.go deleted file mode 100644 index 95e66e0e896..00000000000 --- a/storage/mock/storage.go +++ /dev/null @@ -1,136 +0,0 @@ -// Code generated by mockery v1.0.0. DO NOT EDIT. - -package mock - -import mock "github.com/stretchr/testify/mock" - -// Storage is an autogenerated mock type for the Storage type -type Storage struct { - mock.Mock -} - -// GetRegisters provides a mock function with given fields: registerIDs, stateCommitment -func (_m *Storage) GetRegisters(registerIDs [][]byte, stateCommitment []byte) ([][]byte, error) { - ret := _m.Called(registerIDs, stateCommitment) - - var r0 [][]byte - if rf, ok := ret.Get(0).(func([][]byte, []byte) [][]byte); ok { - r0 = rf(registerIDs, stateCommitment) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).([][]byte) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func([][]byte, []byte) error); ok { - r1 = rf(registerIDs, stateCommitment) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// GetRegistersWithProof provides a mock function with given fields: registerIDs, stateCommitment -func (_m *Storage) GetRegistersWithProof(registerIDs [][]byte, stateCommitment []byte) ([][]byte, [][]byte, error) { - ret := _m.Called(registerIDs, stateCommitment) - - var r0 [][]byte - if rf, ok := ret.Get(0).(func([][]byte, []byte) [][]byte); ok { - r0 = rf(registerIDs, stateCommitment) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).([][]byte) - } - } - - var r1 [][]byte - if rf, ok := ret.Get(1).(func([][]byte, []byte) [][]byte); ok { - r1 = rf(registerIDs, stateCommitment) - } else { - if ret.Get(1) != nil { - r1 = ret.Get(1).([][]byte) - } - } - - var r2 error - if rf, ok := ret.Get(2).(func([][]byte, []byte) error); ok { - r2 = rf(registerIDs, stateCommitment) - } else { - r2 = ret.Error(2) - } - - return r0, r1, r2 -} - -// LatestStateCommitment provides a mock function with given fields: -func (_m *Storage) LatestStateCommitment() []byte { - ret := _m.Called() - - var r0 []byte - if rf, ok := ret.Get(0).(func() []byte); ok { - r0 = rf() - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).([]byte) - } - } - - return r0 -} - -// UpdateRegisters provides a mock function with given fields: registerIDs, values -func (_m *Storage) UpdateRegisters(registerIDs [][]byte, values [][]byte) ([]byte, error) { - ret := _m.Called(registerIDs, values) - - var r0 []byte - if rf, ok := ret.Get(0).(func([][]byte, [][]byte) []byte); ok { - r0 = rf(registerIDs, values) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).([]byte) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func([][]byte, [][]byte) error); ok { - r1 = rf(registerIDs, values) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// UpdateRegistersWithProof provides a mock function with given fields: registerIDs, values -func (_m *Storage) UpdateRegistersWithProof(registerIDs [][]byte, values [][]byte) ([]byte, [][]byte, error) { - ret := _m.Called(registerIDs, values) - - var r0 []byte - if rf, ok := ret.Get(0).(func([][]byte, [][]byte) []byte); ok { - r0 = rf(registerIDs, values) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).([]byte) - } - } - - var r1 [][]byte - if rf, ok := ret.Get(1).(func([][]byte, [][]byte) [][]byte); ok { - r1 = rf(registerIDs, values) - } else { - if ret.Get(1) != nil { - r1 = ret.Get(1).([][]byte) - } - } - - var r2 error - if rf, ok := ret.Get(2).(func([][]byte, [][]byte) error); ok { - r2 = rf(registerIDs, values) - } else { - r2 = ret.Error(2) - } - - return r0, r1, r2 -} diff --git a/storage/mock/transaction.go b/storage/mock/transaction.go index 97a4de1493c..ea5a27c0c96 100644 --- a/storage/mock/transaction.go +++ b/storage/mock/transaction.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mock @@ -13,6 +13,10 @@ type Transaction struct { func (_m *Transaction) Set(key []byte, val []byte) error { ret := _m.Called(key, val) + if len(ret) == 0 { + panic("no return value specified for Set") + } + var r0 error if rf, ok := ret.Get(0).(func([]byte, []byte) error); ok { r0 = rf(key, val) @@ -23,13 +27,12 @@ func (_m *Transaction) Set(key []byte, val []byte) error { return r0 } -type mockConstructorTestingTNewTransaction interface { +// NewTransaction creates a new instance of Transaction. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewTransaction(t interface { mock.TestingT Cleanup(func()) -} - -// NewTransaction creates a new instance of Transaction. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewTransaction(t mockConstructorTestingTNewTransaction) *Transaction { +}) *Transaction { mock := &Transaction{} mock.Mock.Test(t) diff --git a/storage/mock/transaction_result_error_messages.go b/storage/mock/transaction_result_error_messages.go new file mode 100644 index 00000000000..3c1075fe3ac --- /dev/null +++ b/storage/mock/transaction_result_error_messages.go @@ -0,0 +1,183 @@ +// Code generated by mockery. DO NOT EDIT. + +package mock + +import ( + flow "github.com/onflow/flow-go/model/flow" + mock "github.com/stretchr/testify/mock" + + storage "github.com/onflow/flow-go/storage" +) + +// TransactionResultErrorMessages is an autogenerated mock type for the TransactionResultErrorMessages type +type TransactionResultErrorMessages struct { + mock.Mock +} + +// BatchStore provides a mock function with given fields: blockID, transactionResultErrorMessages, batch +func (_m *TransactionResultErrorMessages) BatchStore(blockID flow.Identifier, transactionResultErrorMessages []flow.TransactionResultErrorMessage, batch storage.ReaderBatchWriter) error { + ret := _m.Called(blockID, transactionResultErrorMessages, batch) + + if len(ret) == 0 { + panic("no return value specified for BatchStore") + } + + var r0 error + if rf, ok := ret.Get(0).(func(flow.Identifier, []flow.TransactionResultErrorMessage, storage.ReaderBatchWriter) error); ok { + r0 = rf(blockID, transactionResultErrorMessages, batch) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// ByBlockID provides a mock function with given fields: id +func (_m *TransactionResultErrorMessages) ByBlockID(id flow.Identifier) ([]flow.TransactionResultErrorMessage, error) { + ret := _m.Called(id) + + if len(ret) == 0 { + panic("no return value specified for ByBlockID") + } + + var r0 []flow.TransactionResultErrorMessage + var r1 error + if rf, ok := ret.Get(0).(func(flow.Identifier) ([]flow.TransactionResultErrorMessage, error)); ok { + return rf(id) + } + if rf, ok := ret.Get(0).(func(flow.Identifier) []flow.TransactionResultErrorMessage); ok { + r0 = rf(id) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]flow.TransactionResultErrorMessage) + } + } + + if rf, ok := ret.Get(1).(func(flow.Identifier) error); ok { + r1 = rf(id) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ByBlockIDTransactionID provides a mock function with given fields: blockID, transactionID +func (_m *TransactionResultErrorMessages) ByBlockIDTransactionID(blockID flow.Identifier, transactionID flow.Identifier) (*flow.TransactionResultErrorMessage, error) { + ret := _m.Called(blockID, transactionID) + + if len(ret) == 0 { + panic("no return value specified for ByBlockIDTransactionID") + } + + var r0 *flow.TransactionResultErrorMessage + var r1 error + if rf, ok := ret.Get(0).(func(flow.Identifier, flow.Identifier) (*flow.TransactionResultErrorMessage, error)); ok { + return rf(blockID, transactionID) + } + if rf, ok := ret.Get(0).(func(flow.Identifier, flow.Identifier) *flow.TransactionResultErrorMessage); ok { + r0 = rf(blockID, transactionID) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*flow.TransactionResultErrorMessage) + } + } + + if rf, ok := ret.Get(1).(func(flow.Identifier, flow.Identifier) error); ok { + r1 = rf(blockID, transactionID) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ByBlockIDTransactionIndex provides a mock function with given fields: blockID, txIndex +func (_m *TransactionResultErrorMessages) ByBlockIDTransactionIndex(blockID flow.Identifier, txIndex uint32) (*flow.TransactionResultErrorMessage, error) { + ret := _m.Called(blockID, txIndex) + + if len(ret) == 0 { + panic("no return value specified for ByBlockIDTransactionIndex") + } + + var r0 *flow.TransactionResultErrorMessage + var r1 error + if rf, ok := ret.Get(0).(func(flow.Identifier, uint32) (*flow.TransactionResultErrorMessage, error)); ok { + return rf(blockID, txIndex) + } + if rf, ok := ret.Get(0).(func(flow.Identifier, uint32) *flow.TransactionResultErrorMessage); ok { + r0 = rf(blockID, txIndex) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*flow.TransactionResultErrorMessage) + } + } + + if rf, ok := ret.Get(1).(func(flow.Identifier, uint32) error); ok { + r1 = rf(blockID, txIndex) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Exists provides a mock function with given fields: blockID +func (_m *TransactionResultErrorMessages) Exists(blockID flow.Identifier) (bool, error) { + ret := _m.Called(blockID) + + if len(ret) == 0 { + panic("no return value specified for Exists") + } + + var r0 bool + var r1 error + if rf, ok := ret.Get(0).(func(flow.Identifier) (bool, error)); ok { + return rf(blockID) + } + if rf, ok := ret.Get(0).(func(flow.Identifier) bool); ok { + r0 = rf(blockID) + } else { + r0 = ret.Get(0).(bool) + } + + if rf, ok := ret.Get(1).(func(flow.Identifier) error); ok { + r1 = rf(blockID) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Store provides a mock function with given fields: blockID, transactionResultErrorMessages +func (_m *TransactionResultErrorMessages) Store(blockID flow.Identifier, transactionResultErrorMessages []flow.TransactionResultErrorMessage) error { + ret := _m.Called(blockID, transactionResultErrorMessages) + + if len(ret) == 0 { + panic("no return value specified for Store") + } + + var r0 error + if rf, ok := ret.Get(0).(func(flow.Identifier, []flow.TransactionResultErrorMessage) error); ok { + r0 = rf(blockID, transactionResultErrorMessages) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// NewTransactionResultErrorMessages creates a new instance of TransactionResultErrorMessages. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewTransactionResultErrorMessages(t interface { + mock.TestingT + Cleanup(func()) +}) *TransactionResultErrorMessages { + mock := &TransactionResultErrorMessages{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/storage/mock/transaction_result_error_messages_reader.go b/storage/mock/transaction_result_error_messages_reader.go new file mode 100644 index 00000000000..c4005009fb2 --- /dev/null +++ b/storage/mock/transaction_result_error_messages_reader.go @@ -0,0 +1,145 @@ +// Code generated by mockery. DO NOT EDIT. + +package mock + +import ( + flow "github.com/onflow/flow-go/model/flow" + mock "github.com/stretchr/testify/mock" +) + +// TransactionResultErrorMessagesReader is an autogenerated mock type for the TransactionResultErrorMessagesReader type +type TransactionResultErrorMessagesReader struct { + mock.Mock +} + +// ByBlockID provides a mock function with given fields: id +func (_m *TransactionResultErrorMessagesReader) ByBlockID(id flow.Identifier) ([]flow.TransactionResultErrorMessage, error) { + ret := _m.Called(id) + + if len(ret) == 0 { + panic("no return value specified for ByBlockID") + } + + var r0 []flow.TransactionResultErrorMessage + var r1 error + if rf, ok := ret.Get(0).(func(flow.Identifier) ([]flow.TransactionResultErrorMessage, error)); ok { + return rf(id) + } + if rf, ok := ret.Get(0).(func(flow.Identifier) []flow.TransactionResultErrorMessage); ok { + r0 = rf(id) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]flow.TransactionResultErrorMessage) + } + } + + if rf, ok := ret.Get(1).(func(flow.Identifier) error); ok { + r1 = rf(id) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ByBlockIDTransactionID provides a mock function with given fields: blockID, transactionID +func (_m *TransactionResultErrorMessagesReader) ByBlockIDTransactionID(blockID flow.Identifier, transactionID flow.Identifier) (*flow.TransactionResultErrorMessage, error) { + ret := _m.Called(blockID, transactionID) + + if len(ret) == 0 { + panic("no return value specified for ByBlockIDTransactionID") + } + + var r0 *flow.TransactionResultErrorMessage + var r1 error + if rf, ok := ret.Get(0).(func(flow.Identifier, flow.Identifier) (*flow.TransactionResultErrorMessage, error)); ok { + return rf(blockID, transactionID) + } + if rf, ok := ret.Get(0).(func(flow.Identifier, flow.Identifier) *flow.TransactionResultErrorMessage); ok { + r0 = rf(blockID, transactionID) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*flow.TransactionResultErrorMessage) + } + } + + if rf, ok := ret.Get(1).(func(flow.Identifier, flow.Identifier) error); ok { + r1 = rf(blockID, transactionID) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ByBlockIDTransactionIndex provides a mock function with given fields: blockID, txIndex +func (_m *TransactionResultErrorMessagesReader) ByBlockIDTransactionIndex(blockID flow.Identifier, txIndex uint32) (*flow.TransactionResultErrorMessage, error) { + ret := _m.Called(blockID, txIndex) + + if len(ret) == 0 { + panic("no return value specified for ByBlockIDTransactionIndex") + } + + var r0 *flow.TransactionResultErrorMessage + var r1 error + if rf, ok := ret.Get(0).(func(flow.Identifier, uint32) (*flow.TransactionResultErrorMessage, error)); ok { + return rf(blockID, txIndex) + } + if rf, ok := ret.Get(0).(func(flow.Identifier, uint32) *flow.TransactionResultErrorMessage); ok { + r0 = rf(blockID, txIndex) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*flow.TransactionResultErrorMessage) + } + } + + if rf, ok := ret.Get(1).(func(flow.Identifier, uint32) error); ok { + r1 = rf(blockID, txIndex) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Exists provides a mock function with given fields: blockID +func (_m *TransactionResultErrorMessagesReader) Exists(blockID flow.Identifier) (bool, error) { + ret := _m.Called(blockID) + + if len(ret) == 0 { + panic("no return value specified for Exists") + } + + var r0 bool + var r1 error + if rf, ok := ret.Get(0).(func(flow.Identifier) (bool, error)); ok { + return rf(blockID) + } + if rf, ok := ret.Get(0).(func(flow.Identifier) bool); ok { + r0 = rf(blockID) + } else { + r0 = ret.Get(0).(bool) + } + + if rf, ok := ret.Get(1).(func(flow.Identifier) error); ok { + r1 = rf(blockID) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// NewTransactionResultErrorMessagesReader creates a new instance of TransactionResultErrorMessagesReader. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewTransactionResultErrorMessagesReader(t interface { + mock.TestingT + Cleanup(func()) +}) *TransactionResultErrorMessagesReader { + mock := &TransactionResultErrorMessagesReader{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/storage/mock/transaction_results.go b/storage/mock/transaction_results.go index 33b975ff007..d94b4881ce7 100644 --- a/storage/mock/transaction_results.go +++ b/storage/mock/transaction_results.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mock @@ -14,12 +14,34 @@ type TransactionResults struct { mock.Mock } +// BatchRemoveByBlockID provides a mock function with given fields: id, batch +func (_m *TransactionResults) BatchRemoveByBlockID(id flow.Identifier, batch storage.ReaderBatchWriter) error { + ret := _m.Called(id, batch) + + if len(ret) == 0 { + panic("no return value specified for BatchRemoveByBlockID") + } + + var r0 error + if rf, ok := ret.Get(0).(func(flow.Identifier, storage.ReaderBatchWriter) error); ok { + r0 = rf(id, batch) + } else { + r0 = ret.Error(0) + } + + return r0 +} + // BatchStore provides a mock function with given fields: blockID, transactionResults, batch -func (_m *TransactionResults) BatchStore(blockID flow.Identifier, transactionResults []flow.TransactionResult, batch storage.BatchStorage) error { +func (_m *TransactionResults) BatchStore(blockID flow.Identifier, transactionResults []flow.TransactionResult, batch storage.ReaderBatchWriter) error { ret := _m.Called(blockID, transactionResults, batch) + if len(ret) == 0 { + panic("no return value specified for BatchStore") + } + var r0 error - if rf, ok := ret.Get(0).(func(flow.Identifier, []flow.TransactionResult, storage.BatchStorage) error); ok { + if rf, ok := ret.Get(0).(func(flow.Identifier, []flow.TransactionResult, storage.ReaderBatchWriter) error); ok { r0 = rf(blockID, transactionResults, batch) } else { r0 = ret.Error(0) @@ -32,6 +54,10 @@ func (_m *TransactionResults) BatchStore(blockID flow.Identifier, transactionRes func (_m *TransactionResults) ByBlockID(id flow.Identifier) ([]flow.TransactionResult, error) { ret := _m.Called(id) + if len(ret) == 0 { + panic("no return value specified for ByBlockID") + } + var r0 []flow.TransactionResult var r1 error if rf, ok := ret.Get(0).(func(flow.Identifier) ([]flow.TransactionResult, error)); ok { @@ -58,6 +84,10 @@ func (_m *TransactionResults) ByBlockID(id flow.Identifier) ([]flow.TransactionR func (_m *TransactionResults) ByBlockIDTransactionID(blockID flow.Identifier, transactionID flow.Identifier) (*flow.TransactionResult, error) { ret := _m.Called(blockID, transactionID) + if len(ret) == 0 { + panic("no return value specified for ByBlockIDTransactionID") + } + var r0 *flow.TransactionResult var r1 error if rf, ok := ret.Get(0).(func(flow.Identifier, flow.Identifier) (*flow.TransactionResult, error)); ok { @@ -84,6 +114,10 @@ func (_m *TransactionResults) ByBlockIDTransactionID(blockID flow.Identifier, tr func (_m *TransactionResults) ByBlockIDTransactionIndex(blockID flow.Identifier, txIndex uint32) (*flow.TransactionResult, error) { ret := _m.Called(blockID, txIndex) + if len(ret) == 0 { + panic("no return value specified for ByBlockIDTransactionIndex") + } + var r0 *flow.TransactionResult var r1 error if rf, ok := ret.Get(0).(func(flow.Identifier, uint32) (*flow.TransactionResult, error)); ok { @@ -106,13 +140,12 @@ func (_m *TransactionResults) ByBlockIDTransactionIndex(blockID flow.Identifier, return r0, r1 } -type mockConstructorTestingTNewTransactionResults interface { +// NewTransactionResults creates a new instance of TransactionResults. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewTransactionResults(t interface { mock.TestingT Cleanup(func()) -} - -// NewTransactionResults creates a new instance of TransactionResults. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewTransactionResults(t mockConstructorTestingTNewTransactionResults) *TransactionResults { +}) *TransactionResults { mock := &TransactionResults{} mock.Mock.Test(t) diff --git a/storage/mock/transaction_results_reader.go b/storage/mock/transaction_results_reader.go new file mode 100644 index 00000000000..52f2b0f1ae1 --- /dev/null +++ b/storage/mock/transaction_results_reader.go @@ -0,0 +1,117 @@ +// Code generated by mockery. DO NOT EDIT. + +package mock + +import ( + flow "github.com/onflow/flow-go/model/flow" + mock "github.com/stretchr/testify/mock" +) + +// TransactionResultsReader is an autogenerated mock type for the TransactionResultsReader type +type TransactionResultsReader struct { + mock.Mock +} + +// ByBlockID provides a mock function with given fields: id +func (_m *TransactionResultsReader) ByBlockID(id flow.Identifier) ([]flow.TransactionResult, error) { + ret := _m.Called(id) + + if len(ret) == 0 { + panic("no return value specified for ByBlockID") + } + + var r0 []flow.TransactionResult + var r1 error + if rf, ok := ret.Get(0).(func(flow.Identifier) ([]flow.TransactionResult, error)); ok { + return rf(id) + } + if rf, ok := ret.Get(0).(func(flow.Identifier) []flow.TransactionResult); ok { + r0 = rf(id) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]flow.TransactionResult) + } + } + + if rf, ok := ret.Get(1).(func(flow.Identifier) error); ok { + r1 = rf(id) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ByBlockIDTransactionID provides a mock function with given fields: blockID, transactionID +func (_m *TransactionResultsReader) ByBlockIDTransactionID(blockID flow.Identifier, transactionID flow.Identifier) (*flow.TransactionResult, error) { + ret := _m.Called(blockID, transactionID) + + if len(ret) == 0 { + panic("no return value specified for ByBlockIDTransactionID") + } + + var r0 *flow.TransactionResult + var r1 error + if rf, ok := ret.Get(0).(func(flow.Identifier, flow.Identifier) (*flow.TransactionResult, error)); ok { + return rf(blockID, transactionID) + } + if rf, ok := ret.Get(0).(func(flow.Identifier, flow.Identifier) *flow.TransactionResult); ok { + r0 = rf(blockID, transactionID) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*flow.TransactionResult) + } + } + + if rf, ok := ret.Get(1).(func(flow.Identifier, flow.Identifier) error); ok { + r1 = rf(blockID, transactionID) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ByBlockIDTransactionIndex provides a mock function with given fields: blockID, txIndex +func (_m *TransactionResultsReader) ByBlockIDTransactionIndex(blockID flow.Identifier, txIndex uint32) (*flow.TransactionResult, error) { + ret := _m.Called(blockID, txIndex) + + if len(ret) == 0 { + panic("no return value specified for ByBlockIDTransactionIndex") + } + + var r0 *flow.TransactionResult + var r1 error + if rf, ok := ret.Get(0).(func(flow.Identifier, uint32) (*flow.TransactionResult, error)); ok { + return rf(blockID, txIndex) + } + if rf, ok := ret.Get(0).(func(flow.Identifier, uint32) *flow.TransactionResult); ok { + r0 = rf(blockID, txIndex) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*flow.TransactionResult) + } + } + + if rf, ok := ret.Get(1).(func(flow.Identifier, uint32) error); ok { + r1 = rf(blockID, txIndex) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// NewTransactionResultsReader creates a new instance of TransactionResultsReader. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewTransactionResultsReader(t interface { + mock.TestingT + Cleanup(func()) +}) *TransactionResultsReader { + mock := &TransactionResultsReader{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/storage/mock/transactions.go b/storage/mock/transactions.go index b15c922be60..c45143dcc6f 100644 --- a/storage/mock/transactions.go +++ b/storage/mock/transactions.go @@ -1,10 +1,12 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mock import ( flow "github.com/onflow/flow-go/model/flow" mock "github.com/stretchr/testify/mock" + + storage "github.com/onflow/flow-go/storage" ) // Transactions is an autogenerated mock type for the Transactions type @@ -12,10 +14,32 @@ type Transactions struct { mock.Mock } +// BatchStore provides a mock function with given fields: tx, batch +func (_m *Transactions) BatchStore(tx *flow.TransactionBody, batch storage.ReaderBatchWriter) error { + ret := _m.Called(tx, batch) + + if len(ret) == 0 { + panic("no return value specified for BatchStore") + } + + var r0 error + if rf, ok := ret.Get(0).(func(*flow.TransactionBody, storage.ReaderBatchWriter) error); ok { + r0 = rf(tx, batch) + } else { + r0 = ret.Error(0) + } + + return r0 +} + // ByID provides a mock function with given fields: txID func (_m *Transactions) ByID(txID flow.Identifier) (*flow.TransactionBody, error) { ret := _m.Called(txID) + if len(ret) == 0 { + panic("no return value specified for ByID") + } + var r0 *flow.TransactionBody var r1 error if rf, ok := ret.Get(0).(func(flow.Identifier) (*flow.TransactionBody, error)); ok { @@ -42,6 +66,10 @@ func (_m *Transactions) ByID(txID flow.Identifier) (*flow.TransactionBody, error func (_m *Transactions) Store(tx *flow.TransactionBody) error { ret := _m.Called(tx) + if len(ret) == 0 { + panic("no return value specified for Store") + } + var r0 error if rf, ok := ret.Get(0).(func(*flow.TransactionBody) error); ok { r0 = rf(tx) @@ -52,13 +80,12 @@ func (_m *Transactions) Store(tx *flow.TransactionBody) error { return r0 } -type mockConstructorTestingTNewTransactions interface { +// NewTransactions creates a new instance of Transactions. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewTransactions(t interface { mock.TestingT Cleanup(func()) -} - -// NewTransactions creates a new instance of Transactions. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewTransactions(t mockConstructorTestingTNewTransactions) *Transactions { +}) *Transactions { mock := &Transactions{} mock.Mock.Test(t) diff --git a/storage/mock/transactions_reader.go b/storage/mock/transactions_reader.go new file mode 100644 index 00000000000..d14c0ef05e3 --- /dev/null +++ b/storage/mock/transactions_reader.go @@ -0,0 +1,57 @@ +// Code generated by mockery. DO NOT EDIT. + +package mock + +import ( + flow "github.com/onflow/flow-go/model/flow" + mock "github.com/stretchr/testify/mock" +) + +// TransactionsReader is an autogenerated mock type for the TransactionsReader type +type TransactionsReader struct { + mock.Mock +} + +// ByID provides a mock function with given fields: txID +func (_m *TransactionsReader) ByID(txID flow.Identifier) (*flow.TransactionBody, error) { + ret := _m.Called(txID) + + if len(ret) == 0 { + panic("no return value specified for ByID") + } + + var r0 *flow.TransactionBody + var r1 error + if rf, ok := ret.Get(0).(func(flow.Identifier) (*flow.TransactionBody, error)); ok { + return rf(txID) + } + if rf, ok := ret.Get(0).(func(flow.Identifier) *flow.TransactionBody); ok { + r0 = rf(txID) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*flow.TransactionBody) + } + } + + if rf, ok := ret.Get(1).(func(flow.Identifier) error); ok { + r1 = rf(txID) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// NewTransactionsReader creates a new instance of TransactionsReader. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewTransactionsReader(t interface { + mock.TestingT + Cleanup(func()) +}) *TransactionsReader { + mock := &TransactionsReader{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/storage/mock/verifier.go b/storage/mock/verifier.go deleted file mode 100644 index 485eed29409..00000000000 --- a/storage/mock/verifier.go +++ /dev/null @@ -1,31 +0,0 @@ -// Code generated by mockery v1.0.0. DO NOT EDIT. - -package mock - -import mock "github.com/stretchr/testify/mock" - -// Verifier is an autogenerated mock type for the Verifier type -type Verifier struct { - mock.Mock -} - -// VerifyRegistersProof provides a mock function with given fields: registerIDs, stateCommitment, values, proof -func (_m *Verifier) VerifyRegistersProof(registerIDs [][]byte, stateCommitment []byte, values [][]byte, proof [][]byte) (bool, error) { - ret := _m.Called(registerIDs, stateCommitment, values, proof) - - var r0 bool - if rf, ok := ret.Get(0).(func([][]byte, []byte, [][]byte, [][]byte) bool); ok { - r0 = rf(registerIDs, stateCommitment, values, proof) - } else { - r0 = ret.Get(0).(bool) - } - - var r1 error - if rf, ok := ret.Get(1).(func([][]byte, []byte, [][]byte, [][]byte) error); ok { - r1 = rf(registerIDs, stateCommitment, values, proof) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} diff --git a/storage/mock/version_beacons.go b/storage/mock/version_beacons.go index dd06ce17dd2..7219f130a6c 100644 --- a/storage/mock/version_beacons.go +++ b/storage/mock/version_beacons.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.21.4. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package mock @@ -16,6 +16,10 @@ type VersionBeacons struct { func (_m *VersionBeacons) Highest(belowOrEqualTo uint64) (*flow.SealedVersionBeacon, error) { ret := _m.Called(belowOrEqualTo) + if len(ret) == 0 { + panic("no return value specified for Highest") + } + var r0 *flow.SealedVersionBeacon var r1 error if rf, ok := ret.Get(0).(func(uint64) (*flow.SealedVersionBeacon, error)); ok { @@ -38,13 +42,12 @@ func (_m *VersionBeacons) Highest(belowOrEqualTo uint64) (*flow.SealedVersionBea return r0, r1 } -type mockConstructorTestingTNewVersionBeacons interface { +// NewVersionBeacons creates a new instance of VersionBeacons. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewVersionBeacons(t interface { mock.TestingT Cleanup(func()) -} - -// NewVersionBeacons creates a new instance of VersionBeacons. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewVersionBeacons(t mockConstructorTestingTNewVersionBeacons) *VersionBeacons { +}) *VersionBeacons { mock := &VersionBeacons{} mock.Mock.Test(t) diff --git a/storage/mock/views.go b/storage/mock/views.go deleted file mode 100644 index 54b9af071c0..00000000000 --- a/storage/mock/views.go +++ /dev/null @@ -1,45 +0,0 @@ -// Code generated by mockery v1.0.0. DO NOT EDIT. - -package mock - -import mock "github.com/stretchr/testify/mock" - -// Views is an autogenerated mock type for the Views type -type Views struct { - mock.Mock -} - -// Retrieve provides a mock function with given fields: action -func (_m *Views) Retrieve(action uint8) (uint64, error) { - ret := _m.Called(action) - - var r0 uint64 - if rf, ok := ret.Get(0).(func(uint8) uint64); ok { - r0 = rf(action) - } else { - r0 = ret.Get(0).(uint64) - } - - var r1 error - if rf, ok := ret.Get(1).(func(uint8) error); ok { - r1 = rf(action) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// Store provides a mock function with given fields: action, view -func (_m *Views) Store(action uint8, view uint64) error { - ret := _m.Called(action, view) - - var r0 error - if rf, ok := ret.Get(0).(func(uint8, uint64) error); ok { - r0 = rf(action, view) - } else { - r0 = ret.Error(0) - } - - return r0 -} diff --git a/storage/mock/writer.go b/storage/mock/writer.go new file mode 100644 index 00000000000..55cf24ad9e8 --- /dev/null +++ b/storage/mock/writer.go @@ -0,0 +1,81 @@ +// Code generated by mockery. DO NOT EDIT. + +package mock + +import ( + storage "github.com/onflow/flow-go/storage" + mock "github.com/stretchr/testify/mock" +) + +// Writer is an autogenerated mock type for the Writer type +type Writer struct { + mock.Mock +} + +// Delete provides a mock function with given fields: key +func (_m *Writer) Delete(key []byte) error { + ret := _m.Called(key) + + if len(ret) == 0 { + panic("no return value specified for Delete") + } + + var r0 error + if rf, ok := ret.Get(0).(func([]byte) error); ok { + r0 = rf(key) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// DeleteByRange provides a mock function with given fields: globalReader, startPrefix, endPrefix +func (_m *Writer) DeleteByRange(globalReader storage.Reader, startPrefix []byte, endPrefix []byte) error { + ret := _m.Called(globalReader, startPrefix, endPrefix) + + if len(ret) == 0 { + panic("no return value specified for DeleteByRange") + } + + var r0 error + if rf, ok := ret.Get(0).(func(storage.Reader, []byte, []byte) error); ok { + r0 = rf(globalReader, startPrefix, endPrefix) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// Set provides a mock function with given fields: k, v +func (_m *Writer) Set(k []byte, v []byte) error { + ret := _m.Called(k, v) + + if len(ret) == 0 { + panic("no return value specified for Set") + } + + var r0 error + if rf, ok := ret.Get(0).(func([]byte, []byte) error); ok { + r0 = rf(k, v) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// NewWriter creates a new instance of Writer. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewWriter(t interface { + mock.TestingT + Cleanup(func()) +}) *Writer { + mock := &Writer{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/storage/mocks/storage.go b/storage/mocks/storage.go index e8b1281377a..3bbe715f4ea 100644 --- a/storage/mocks/storage.go +++ b/storage/mocks/storage.go @@ -8,9 +8,9 @@ import ( reflect "reflect" gomock "github.com/golang/mock/gomock" + lockctx "github.com/jordanschalm/lockctx" flow "github.com/onflow/flow-go/model/flow" storage "github.com/onflow/flow-go/storage" - transaction "github.com/onflow/flow-go/storage/badger/transaction" ) // MockBlocks is a mock of Blocks interface. @@ -36,6 +36,20 @@ func (m *MockBlocks) EXPECT() *MockBlocksMockRecorder { return m.recorder } +// BatchStore mocks base method. +func (m *MockBlocks) BatchStore(arg0 lockctx.Proof, arg1 storage.ReaderBatchWriter, arg2 *flow.Block) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "BatchStore", arg0, arg1, arg2) + ret0, _ := ret[0].(error) + return ret0 +} + +// BatchStore indicates an expected call of BatchStore. +func (mr *MockBlocksMockRecorder) BatchStore(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "BatchStore", reflect.TypeOf((*MockBlocks)(nil).BatchStore), arg0, arg1, arg2) +} + // ByCollectionID mocks base method. func (m *MockBlocks) ByCollectionID(arg0 flow.Identifier) (*flow.Block, error) { m.ctrl.T.Helper() @@ -81,21 +95,6 @@ func (mr *MockBlocksMockRecorder) ByID(arg0 interface{}) *gomock.Call { return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ByID", reflect.TypeOf((*MockBlocks)(nil).ByID), arg0) } -// GetLastFullBlockHeight mocks base method. -func (m *MockBlocks) GetLastFullBlockHeight() (uint64, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetLastFullBlockHeight") - ret0, _ := ret[0].(uint64) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetLastFullBlockHeight indicates an expected call of GetLastFullBlockHeight. -func (mr *MockBlocksMockRecorder) GetLastFullBlockHeight() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetLastFullBlockHeight", reflect.TypeOf((*MockBlocks)(nil).GetLastFullBlockHeight)) -} - // IndexBlockForCollections mocks base method. func (m *MockBlocks) IndexBlockForCollections(arg0 flow.Identifier, arg1 []flow.Identifier) error { m.ctrl.T.Helper() @@ -110,62 +109,6 @@ func (mr *MockBlocksMockRecorder) IndexBlockForCollections(arg0, arg1 interface{ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "IndexBlockForCollections", reflect.TypeOf((*MockBlocks)(nil).IndexBlockForCollections), arg0, arg1) } -// InsertLastFullBlockHeightIfNotExists mocks base method. -func (m *MockBlocks) InsertLastFullBlockHeightIfNotExists(arg0 uint64) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "InsertLastFullBlockHeightIfNotExists", arg0) - ret0, _ := ret[0].(error) - return ret0 -} - -// InsertLastFullBlockHeightIfNotExists indicates an expected call of InsertLastFullBlockHeightIfNotExists. -func (mr *MockBlocksMockRecorder) InsertLastFullBlockHeightIfNotExists(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "InsertLastFullBlockHeightIfNotExists", reflect.TypeOf((*MockBlocks)(nil).InsertLastFullBlockHeightIfNotExists), arg0) -} - -// Store mocks base method. -func (m *MockBlocks) Store(arg0 *flow.Block) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Store", arg0) - ret0, _ := ret[0].(error) - return ret0 -} - -// Store indicates an expected call of Store. -func (mr *MockBlocksMockRecorder) Store(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Store", reflect.TypeOf((*MockBlocks)(nil).Store), arg0) -} - -// StoreTx mocks base method. -func (m *MockBlocks) StoreTx(arg0 *flow.Block) func(*transaction.Tx) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "StoreTx", arg0) - ret0, _ := ret[0].(func(*transaction.Tx) error) - return ret0 -} - -// StoreTx indicates an expected call of StoreTx. -func (mr *MockBlocksMockRecorder) StoreTx(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StoreTx", reflect.TypeOf((*MockBlocks)(nil).StoreTx), arg0) -} - -// UpdateLastFullBlockHeight mocks base method. -func (m *MockBlocks) UpdateLastFullBlockHeight(arg0 uint64) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "UpdateLastFullBlockHeight", arg0) - ret0, _ := ret[0].(error) - return ret0 -} - -// UpdateLastFullBlockHeight indicates an expected call of UpdateLastFullBlockHeight. -func (mr *MockBlocksMockRecorder) UpdateLastFullBlockHeight(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateLastFullBlockHeight", reflect.TypeOf((*MockBlocks)(nil).UpdateLastFullBlockHeight), arg0) -} - // MockHeaders is a mock of Headers interface. type MockHeaders struct { ctrl *gomock.Controller @@ -264,20 +207,6 @@ func (mr *MockHeadersMockRecorder) Exists(arg0 interface{}) *gomock.Call { return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Exists", reflect.TypeOf((*MockHeaders)(nil).Exists), arg0) } -// Store mocks base method. -func (m *MockHeaders) Store(arg0 *flow.Header) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Store", arg0) - ret0, _ := ret[0].(error) - return ret0 -} - -// Store indicates an expected call of Store. -func (mr *MockHeadersMockRecorder) Store(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Store", reflect.TypeOf((*MockHeaders)(nil).Store), arg0) -} - // MockPayloads is a mock of Payloads interface. type MockPayloads struct { ctrl *gomock.Controller @@ -316,20 +245,6 @@ func (mr *MockPayloadsMockRecorder) ByBlockID(arg0 interface{}) *gomock.Call { return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ByBlockID", reflect.TypeOf((*MockPayloads)(nil).ByBlockID), arg0) } -// Store mocks base method. -func (m *MockPayloads) Store(arg0 flow.Identifier, arg1 *flow.Payload) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Store", arg0, arg1) - ret0, _ := ret[0].(error) - return ret0 -} - -// Store indicates an expected call of Store. -func (mr *MockPayloadsMockRecorder) Store(arg0, arg1 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Store", reflect.TypeOf((*MockPayloads)(nil).Store), arg0, arg1) -} - // MockCollections is a mock of Collections interface. type MockCollections struct { ctrl *gomock.Controller @@ -353,6 +268,21 @@ func (m *MockCollections) EXPECT() *MockCollectionsMockRecorder { return m.recorder } +// BatchStoreAndIndexByTransaction mocks base method. +func (m *MockCollections) BatchStoreAndIndexByTransaction(arg0 lockctx.Proof, arg1 *flow.Collection, arg2 storage.ReaderBatchWriter) (flow.LightCollection, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "BatchStoreAndIndexByTransaction", arg0, arg1, arg2) + ret0, _ := ret[0].(flow.LightCollection) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// BatchStoreAndIndexByTransaction indicates an expected call of BatchStoreAndIndexByTransaction. +func (mr *MockCollectionsMockRecorder) BatchStoreAndIndexByTransaction(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "BatchStoreAndIndexByTransaction", reflect.TypeOf((*MockCollections)(nil).BatchStoreAndIndexByTransaction), arg0, arg1, arg2) +} + // ByID mocks base method. func (m *MockCollections) ByID(arg0 flow.Identifier) (*flow.Collection, error) { m.ctrl.T.Helper() @@ -413,11 +343,12 @@ func (mr *MockCollectionsMockRecorder) Remove(arg0 interface{}) *gomock.Call { } // Store mocks base method. -func (m *MockCollections) Store(arg0 *flow.Collection) error { +func (m *MockCollections) Store(arg0 *flow.Collection) (flow.LightCollection, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Store", arg0) - ret0, _ := ret[0].(error) - return ret0 + ret0, _ := ret[0].(flow.LightCollection) + ret1, _ := ret[1].(error) + return ret0, ret1 } // Store indicates an expected call of Store. @@ -426,32 +357,19 @@ func (mr *MockCollectionsMockRecorder) Store(arg0 interface{}) *gomock.Call { return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Store", reflect.TypeOf((*MockCollections)(nil).Store), arg0) } -// StoreLight mocks base method. -func (m *MockCollections) StoreLight(arg0 *flow.LightCollection) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "StoreLight", arg0) - ret0, _ := ret[0].(error) - return ret0 -} - -// StoreLight indicates an expected call of StoreLight. -func (mr *MockCollectionsMockRecorder) StoreLight(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StoreLight", reflect.TypeOf((*MockCollections)(nil).StoreLight), arg0) -} - -// StoreLightAndIndexByTransaction mocks base method. -func (m *MockCollections) StoreLightAndIndexByTransaction(arg0 *flow.LightCollection) error { +// StoreAndIndexByTransaction mocks base method. +func (m *MockCollections) StoreAndIndexByTransaction(arg0 lockctx.Proof, arg1 *flow.Collection) (flow.LightCollection, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "StoreLightAndIndexByTransaction", arg0) - ret0, _ := ret[0].(error) - return ret0 + ret := m.ctrl.Call(m, "StoreAndIndexByTransaction", arg0, arg1) + ret0, _ := ret[0].(flow.LightCollection) + ret1, _ := ret[1].(error) + return ret0, ret1 } -// StoreLightAndIndexByTransaction indicates an expected call of StoreLightAndIndexByTransaction. -func (mr *MockCollectionsMockRecorder) StoreLightAndIndexByTransaction(arg0 interface{}) *gomock.Call { +// StoreAndIndexByTransaction indicates an expected call of StoreAndIndexByTransaction. +func (mr *MockCollectionsMockRecorder) StoreAndIndexByTransaction(arg0, arg1 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StoreLightAndIndexByTransaction", reflect.TypeOf((*MockCollections)(nil).StoreLightAndIndexByTransaction), arg0) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StoreAndIndexByTransaction", reflect.TypeOf((*MockCollections)(nil).StoreAndIndexByTransaction), arg0, arg1) } // MockCommits is a mock of Commits interface. @@ -478,7 +396,7 @@ func (m *MockCommits) EXPECT() *MockCommitsMockRecorder { } // BatchRemoveByBlockID mocks base method. -func (m *MockCommits) BatchRemoveByBlockID(arg0 flow.Identifier, arg1 storage.BatchStorage) error { +func (m *MockCommits) BatchRemoveByBlockID(arg0 flow.Identifier, arg1 storage.ReaderBatchWriter) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "BatchRemoveByBlockID", arg0, arg1) ret0, _ := ret[0].(error) @@ -492,17 +410,17 @@ func (mr *MockCommitsMockRecorder) BatchRemoveByBlockID(arg0, arg1 interface{}) } // BatchStore mocks base method. -func (m *MockCommits) BatchStore(arg0 flow.Identifier, arg1 flow.StateCommitment, arg2 storage.BatchStorage) error { +func (m *MockCommits) BatchStore(arg0 lockctx.Proof, arg1 flow.Identifier, arg2 flow.StateCommitment, arg3 storage.ReaderBatchWriter) error { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "BatchStore", arg0, arg1, arg2) + ret := m.ctrl.Call(m, "BatchStore", arg0, arg1, arg2, arg3) ret0, _ := ret[0].(error) return ret0 } // BatchStore indicates an expected call of BatchStore. -func (mr *MockCommitsMockRecorder) BatchStore(arg0, arg1, arg2 interface{}) *gomock.Call { +func (mr *MockCommitsMockRecorder) BatchStore(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "BatchStore", reflect.TypeOf((*MockCommits)(nil).BatchStore), arg0, arg1, arg2) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "BatchStore", reflect.TypeOf((*MockCommits)(nil).BatchStore), arg0, arg1, arg2, arg3) } // ByBlockID mocks base method. @@ -520,20 +438,6 @@ func (mr *MockCommitsMockRecorder) ByBlockID(arg0 interface{}) *gomock.Call { return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ByBlockID", reflect.TypeOf((*MockCommits)(nil).ByBlockID), arg0) } -// Store mocks base method. -func (m *MockCommits) Store(arg0 flow.Identifier, arg1 flow.StateCommitment) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Store", arg0, arg1) - ret0, _ := ret[0].(error) - return ret0 -} - -// Store indicates an expected call of Store. -func (mr *MockCommitsMockRecorder) Store(arg0, arg1 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Store", reflect.TypeOf((*MockCommits)(nil).Store), arg0, arg1) -} - // MockEvents is a mock of Events interface. type MockEvents struct { ctrl *gomock.Controller @@ -558,7 +462,7 @@ func (m *MockEvents) EXPECT() *MockEventsMockRecorder { } // BatchRemoveByBlockID mocks base method. -func (m *MockEvents) BatchRemoveByBlockID(arg0 flow.Identifier, arg1 storage.BatchStorage) error { +func (m *MockEvents) BatchRemoveByBlockID(arg0 flow.Identifier, arg1 storage.ReaderBatchWriter) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "BatchRemoveByBlockID", arg0, arg1) ret0, _ := ret[0].(error) @@ -572,7 +476,7 @@ func (mr *MockEventsMockRecorder) BatchRemoveByBlockID(arg0, arg1 interface{}) * } // BatchStore mocks base method. -func (m *MockEvents) BatchStore(arg0 flow.Identifier, arg1 []flow.EventsList, arg2 storage.BatchStorage) error { +func (m *MockEvents) BatchStore(arg0 flow.Identifier, arg1 []flow.EventsList, arg2 storage.ReaderBatchWriter) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "BatchStore", arg0, arg1, arg2) ret0, _ := ret[0].(error) @@ -645,6 +549,20 @@ func (mr *MockEventsMockRecorder) ByBlockIDTransactionIndex(arg0, arg1 interface return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ByBlockIDTransactionIndex", reflect.TypeOf((*MockEvents)(nil).ByBlockIDTransactionIndex), arg0, arg1) } +// Store mocks base method. +func (m *MockEvents) Store(arg0 flow.Identifier, arg1 []flow.EventsList) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Store", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// Store indicates an expected call of Store. +func (mr *MockEventsMockRecorder) Store(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Store", reflect.TypeOf((*MockEvents)(nil).Store), arg0, arg1) +} + // MockServiceEvents is a mock of ServiceEvents interface. type MockServiceEvents struct { ctrl *gomock.Controller @@ -669,7 +587,7 @@ func (m *MockServiceEvents) EXPECT() *MockServiceEventsMockRecorder { } // BatchRemoveByBlockID mocks base method. -func (m *MockServiceEvents) BatchRemoveByBlockID(arg0 flow.Identifier, arg1 storage.BatchStorage) error { +func (m *MockServiceEvents) BatchRemoveByBlockID(arg0 flow.Identifier, arg1 storage.ReaderBatchWriter) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "BatchRemoveByBlockID", arg0, arg1) ret0, _ := ret[0].(error) @@ -683,7 +601,7 @@ func (mr *MockServiceEventsMockRecorder) BatchRemoveByBlockID(arg0, arg1 interfa } // BatchStore mocks base method. -func (m *MockServiceEvents) BatchStore(arg0 flow.Identifier, arg1 []flow.Event, arg2 storage.BatchStorage) error { +func (m *MockServiceEvents) BatchStore(arg0 flow.Identifier, arg1 []flow.Event, arg2 storage.ReaderBatchWriter) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "BatchStore", arg0, arg1, arg2) ret0, _ := ret[0].(error) @@ -734,8 +652,22 @@ func (m *MockTransactionResults) EXPECT() *MockTransactionResultsMockRecorder { return m.recorder } +// BatchRemoveByBlockID mocks base method. +func (m *MockTransactionResults) BatchRemoveByBlockID(arg0 flow.Identifier, arg1 storage.ReaderBatchWriter) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "BatchRemoveByBlockID", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// BatchRemoveByBlockID indicates an expected call of BatchRemoveByBlockID. +func (mr *MockTransactionResultsMockRecorder) BatchRemoveByBlockID(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "BatchRemoveByBlockID", reflect.TypeOf((*MockTransactionResults)(nil).BatchRemoveByBlockID), arg0, arg1) +} + // BatchStore mocks base method. -func (m *MockTransactionResults) BatchStore(arg0 flow.Identifier, arg1 []flow.TransactionResult, arg2 storage.BatchStorage) error { +func (m *MockTransactionResults) BatchStore(arg0 flow.Identifier, arg1 []flow.TransactionResult, arg2 storage.ReaderBatchWriter) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "BatchStore", arg0, arg1, arg2) ret0, _ := ret[0].(error) diff --git a/storage/node_disallow_list.go b/storage/node_disallow_list.go new file mode 100644 index 00000000000..0c4cb77cb65 --- /dev/null +++ b/storage/node_disallow_list.go @@ -0,0 +1,17 @@ +package storage + +import "github.com/onflow/flow-go/model/flow" + +// NodeDisallowList represents persistent storage for node disallow list. +type NodeDisallowList interface { + // Store writes the given disallowList to the database. + // To avoid legacy entries in the database, we purge + // the entire database entry if disallowList is empty. + // No errors are expected during normal operations. + Store(disallowList map[flow.Identifier]struct{}) error + + // Retrieve reads the set of disallowed nodes from the database. + // No error is returned if no database entry exists. + // No errors are expected during normal operations. + Retrieve(disallowList *map[flow.Identifier]struct{}) error +} diff --git a/storage/operation/Documentation-Guidelines.md b/storage/operation/Documentation-Guidelines.md new file mode 100644 index 00000000000..396d0476ba7 --- /dev/null +++ b/storage/operation/Documentation-Guidelines.md @@ -0,0 +1,212 @@ +# Guidelines for documenting low-level primitives for persistent storage + +The folder `storage/operation` contains low-level primitives for persistent storage and retrieval of data structures from a database. +We accept that these functions have to be used _carefully_ by engineers that are knowledgeable about the +safety limitations of these functions to avoid data corruption . In order to facilitate correct usage, we need to diligently document +what aspects have to be paid attention to when calling these functions. + +Proceed as follows +1. look at one file in `storage/operation` at a time (skip test files for now) +2. Go over the functions contained in the file one by one and for each function decide whether it is for writing or reading data. +3. For each function, provide a concise yet precise documentation covering + - what this function is for + - the assumptions this function makes about its inputs + - what has to be payed attention to when calling this function + - expected error returns during normal operations + - follow our godocs policy `docs/agents/GoDocs.md` + +Guidelines: +- Tune your documentation on a case by case basis to reflect the function's specific detail. +- Avoid overly generic documentation. +- Stick to a uniform framing and wording. +- Be very concise and precise. +- Analyze the implementation to make the correct statements! +- Double check your work. + +## High level structure + +On the highest level, there are function for storing data and other functions for retrieving data. The naming indicate which class +a function belongs to, though there is no absolutely uniform convention. For example, some function for loading data start with `Retrieve`, +while others start with `Lookup`, and additional names might be used as well. So pay close attention to the naming of the function. + +Conceptually, we have data structures that contain certain fields. Furthermore, most data structures we deal with provide the functionality +to compute a cryptographic hash of their contents, which is typically referred to as "ID". We store data as key-value pairs. +(i) keys are either: the cryptographic hashes of the data structures. +(ii) Frequently, we break up the storage of compound objects, storing their sub-data structures individually. For example, a block contains the payload, the payload contains Seals. Frequently, we create mappings from the ID of the high-level data structure (e.g. block ID) to the IDs of the lower-level objects it contains (e.g. Seals). For example, `operation.IndexPayloadSeals`. + +(i) and (ii) are fundamentally different: In case (i) the key is derived from the value in a collision-resistant manner (via cryptographic hash). +Meaning, if we change the value, the key should also change. Hence, unchecked overwrites pose no risk of data corruption, because for the same key, +we expect the same value. In comparison, for case (ii) we derive the key from the _context_ of the value. Note that the Flow protocol mandates that +for a previously persisted key, the data is never changed to a different value. Changing data could cause the node to publish inconsistent data and +to be slashed, or the protocol to be compromised as a whole. In many cases, the caller has to be cautious about avoiding usages causing data +corruption. This is because we don't wan't to implement override protections in all low-level storage functions of type (ii) for performance +reasons. Rather, we delegate the responsibility for cohesive checks to the caller, which must be clearly documented. + + +### Functions for reading data + +When generating documentation for functions that read data, carefully differentiate between functions of type (i) and (ii). + +#### Type (i) functions for reading data + +As an example for functions of type (i), consider `operation.RetrieveSeal`: +```golang +// RetrieveSeal retrieves [flow.Seal] by its ID. +// Expected error returns during normal operations: +// - [storage.ErrNotFound] if no seal with the specified `sealID` is known. +func RetrieveSeal(r storage.Reader, sealID flow.Identifier, seal *flow.Seal) error +``` +* We document the struct type that is retrieved, here flow.Seal. Be mindful whether we are retrieving an individual struct or a slice. +* We document that the key we look up is the struct's own ID. +* We document the "Expected errors during normal operations:" (use this phrase) + - in all cases, this will be the error storage.ErrNotFound, followed by a short description that no object with the specified ID is known. + +#### Type (ii) functions for reading data + +As an example for functions of type (ii), consider `operation.LookupPayloadSeals `: +```golang +// LookupPayloadSeals retrieves the list of Seals that were included in the payload +// of the specified block. For every known block, this index should be populated (at or above the root block). +// Expected error returns during normal operations: +// - [storage.ErrNotFound] if `blockID` does not refer to a known block +func LookupPayloadSeals(r storage.Reader, blockID flow.Identifier, sealIDs *[]flow.Identifier) error error +``` +* We document the struct type that is retrieved, here list of Seals. Be mindful whether we are retrieving an individual struct or a slice. +* Document that the lookup key is the ID of the bock containing the data structure. You can use our standard shorthand in this case, and just write that we are looking up X by Y containing X. +* We state if the index is populated for every known struct (which is typically the case). Consult the places in the code, where the corresponding index is written, to determine when the index is populated. +* We document the "Expected errors during normal operations" (use this phrase). Typically, the error explanation is that no struct Y is known, which contains X. + + +### Functions for writing data + +When generating documentation for functions that write data, carefully differentiate between functions of type (i) and (ii). +For type (i), you need to carefully differentiate two sub-cases (i.a) and (i.b). Analogously, for type (ii), +you need to carefully differentiate two sub-cases (ii.a) and (ii.b) + +#### Type (i.a) functions for writing data + +As an example for functions of type (i.a), consider `operation.UpsertCollection`: +```golang +// UpsertCollection inserts a light collection into the storage, keyed by its ID. +// +// If the collection already exists, it will be overwritten. Note that here, the key (collection ID) is derived +// from the value (collection) via a collision-resistant hash function. Hence, unchecked overwrites pose no risk +// of data corruption, because for the same key, we expect the same value. +// +// No error returns are expected during normal operation. +func UpsertCollection(w storage.Writer, collection *flow.LightCollection) error { + return UpsertByKey(w, MakePrefix(codeCollection, collection.ID()), collection) +} +``` +Analyze the implementation! Here, the method itself computes the ID (i.e. cryptographic hash). +In this case, the function contains internal protections against the caller accidentally corrupting data. +Only functions that store the struct by its own ID _and_ contain internal safeguards against accidentally corrupting data are of type (i.a)! + +* We document the struct type that is stored, here light collection. Be mindful whether we are storing an individual struct or a slice. +* We state whether the method will overwrite existing data. And then explain why this is safe. +* We state which errors are expected during normal operations (here none). + +#### Type (i.b) functions for writing data + +As an example for functions of type (i.b), consider `operation.InsertSeal`: + +```golang +// InsertSeal inserts a [flow.Seal] into the database, keyed by its ID. +// +// CAUTION: The caller must ensure sealID is a collision-resistant hash of the provided seal! +// This method silently overrides existing data, which is safe only if for the same key, we +// always write the same value. +// +// No error returns are expected during normal operation. +func InsertSeal(w storage.Writer, sealID flow.Identifier, seal *flow.Seal) error { + return UpsertByKey(w, MakePrefix(codeSeal, sealID), seal) +} +``` +Analyze the implementation! Here, the method itself receives the ID (i.e. the cryptographic hash) if the object it is storing as an input. In this case, the function requires the caller to precompute the ID of the struct and provide it as an input. Only functions that store the struct by its own ID _but_ require the caller to provide this ID are of type (i.b)! + +* We document the struct type that is stored, here flow.Seal. Be mindful whether we are storing an individual struct or a slice. +* We document that the key which we use (here "its ID"). +* With a "CAUTION" statement, we document the requirement that the caller must ensure that the key is a collision-resistant hash of the provided data struct. +* We state which errors are expected during normal operations (here none). + +#### Type (ii.a) functions for writing data + +As an example for functions of type (ii.a), consider `operation.IndexStateCommitment`: + +```golang +// IndexStateCommitment indexes a state commitment by the block ID whose execution results in that state. +// The function ensures data integrity by first checking if a commitment already exists for the given block +// and rejecting overwrites with different values. This function is idempotent, i.e. repeated calls with the +// *initially* indexed value are no-ops. +// +// CAUTION: +// - Confirming that no value is already stored and the subsequent write must be atomic to prevent data corruption. +// The caller must acquire the [storage.LockInsertOwnReceipt] and hold it until the database write has been committed. +// +// Expected error returns during normal operations: +// - [storage.ErrDataMismatch] if a *different* state commitment is already indexed for the same block ID +func IndexStateCommitment(lctx lockctx.Proof, rw storage.ReaderBatchWriter, blockID flow.Identifier, commit flow.StateCommitment) error { + if !lctx.HoldsLock(storage.LockInsertOwnReceipt) { + return fmt.Errorf("cannot index state commitment without holding lock %s", storage.LockInsertOwnReceipt) + } + + var existingCommit flow.StateCommitment + err := LookupStateCommitment(rw.GlobalReader(), blockID, &existingCommit) // on happy path, i.e. nothing stored yet, we expect `storage.ErrNotFound` + if err == nil { // Value for this key already exists! Need to check for data mismatch: + if existingCommit == commit { + return nil // The commit already exists, no need to index again + } + return fmt.Errorf("commit for block %v already exists with different value, (existing: %v, new: %v), %w", blockID, existingCommit, commit, storage.ErrDataMismatch) + } else if !errors.Is(err, storage.ErrNotFound) { + return fmt.Errorf("could not check existing state commitment: %w", err) + } + + return UpsertByKey(rw.Writer(), MakePrefix(codeCommit, blockID), commit) +} +``` +Analyze the implementation! Only functions that internally implement safeguards against overwriting a key-value pair with _different_ data for the same key are of type (ii.a)! + +* We document the struct type that is stored, here `flow.StateCommitment`. If applicable, we also document additional key-value pairs that are persisted as part of this function (here, none). Analyze the implementation. +* We concisely document by which means the implementation ensures data integrity. For functions of type (ii.a), we typically just attempt to read the value for the respective key. You may adapt the explanation from this example to reflect the specifics of the implementation. Note that the behaviour might be different if a value has previously been stored. Analyze the implementation. +* With a "CAUTION" statement, we concisely document the requirement that the read for the data integrity check and the subsequent write must happen atomically. This requires synchronization, and hence locking. We document which locks are required to be held by the caller. +* Analyze the implementation to decide whether additional cautionary statements are required to reduce the probability of accidental bugs. +* We state which errors are expected during normal operations (here `storage.ErrDataMismatch`) and the condition under which they occur. Analyze the implementation to make the correct statements! + + + +#### Type (ii.b) functions for writing data + +As an example for functions of type (ii.b), consider `operation.IndexPayloadSeals`: + +```golang +// IndexPayloadSeals indexes the given Seal IDs by the block ID. +// +// CAUTION: +// - The caller must acquire the [storage.LockInsertBlock] and hold it until the database write has been committed. +// - OVERWRITES existing data (potential for data corruption): +// This method silently overrides existing data without any sanity checks whether data for the same key already exits. +// Note that the Flow protocol mandates that for a previously persisted key, the data is never changed to a different +// value. Changing data could cause the node to publish inconsistent data and to be slashed, or the protocol to be +// compromised as a whole. This method does not contain any safeguards to prevent such data corruption. The lock proof +// serves as a reminder that the CALLER is responsible to ensure that the DEDUPLICATION CHECK is done elsewhere +// ATOMICALLY with this write operation. +// +// No error returns are expected during normal operation. +func IndexPayloadSeals(lctx lockctx.Proof, w storage.Writer, blockID flow.Identifier, sealIDs []flow.Identifier) error { + if !lctx.HoldsLock(storage.LockInsertBlock) { + return fmt.Errorf("cannot index seal for blockID %v without holding lock %s", + blockID, storage.LockInsertBlock) + } + return UpsertByKey(w, MakePrefix(codePayloadSeals, blockID), sealIDs) +} +``` + +Analyze the implementation! Only functions are of type (ii.b) that delegate the check whether an entry with the specified key already exists to the caller! + +* We document the struct type that is stored, here "the given Seal". If applicable, we also document additional key-value pairs that are persisted as part of this function (here none). Analyze the implementation. +* With a "CAUTION" statement, we document that the caller must provide protections against accidental overrides. Typically, those protections require reads happening in one atomic operation with the writes. To perform those reads atomically with the writes, the caller is intended to hold the specified locks and only release them after the database writes have been committed. + - The first bullet point in the CAUTION statement specifies which locks the caller must hold and that those locks are to be held until the writes have been committed. + - The second bullet point in the CAUTION statement emphasizes that the caller must provide protections against accidental overrides with different data. You may copy the wording of the second bullet point. It is generic enough, so it should apply in the majority of cases. +* We state which errors are expected during normal operations (here none) and the condition under which they occur. Analyze the implementation to make the correct statements! + + diff --git a/storage/operation/approvals.go b/storage/operation/approvals.go new file mode 100644 index 00000000000..08903484a34 --- /dev/null +++ b/storage/operation/approvals.go @@ -0,0 +1,90 @@ +package operation + +import ( + "errors" + "fmt" + + "github.com/jordanschalm/lockctx" + + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module/irrecoverable" + "github.com/onflow/flow-go/storage" +) + +// RetrieveResultApproval retrieves an approval by ID. +// Returns `storage.ErrNotFound` if no Approval with the given ID has been stored. +func RetrieveResultApproval(r storage.Reader, approvalID flow.Identifier, approval *flow.ResultApproval) error { + return RetrieveByKey(r, MakePrefix(codeResultApproval, approvalID), approval) +} + +// InsertAndIndexResultApproval atomically performs the following storage operations: +// 1. Store ResultApproval by its ID (in this step, accidental overwrites with inconsistent values +// are prevented by using a collision-resistant hash to derive the key from the value) +// 2. Index approval by the executed chunk, specifically the key pair (ExecutionResultID, chunk index). +// - first, we ensure that no _different_ approval has already been indexed for the same key pair +// - only if the prior check succeeds, we write the index to the database +// +// CAUTION: +// - In general, the Flow protocol requires multiple approvals for the same chunk from different +// verification nodes. In other words, there are multiple different approvals for the same chunk. +// Therefore, this index Executed Chunk ➜ ResultApproval ID is *only safe* to be used by +// Verification Nodes for tracking their own approvals (for the same ExecutionResult, a Verifier +// will always produce the same approval) +// - In order to make sure only one approval is indexed for the chunk, _all calls_ to +// `InsertAndIndexResultApproval` must be synchronized by the higher-logic. Currently, we have the +// lockctx.Proof to prove the higher logic is holding the lock inserting the approval after checking +// that the approval is not already indexed. +// +// Expected error returns: +// - `storage.ErrDataMismatch` if a *different* approval for the same key pair (ExecutionResultID, chunk index) is already indexed +func InsertAndIndexResultApproval(approval *flow.ResultApproval) func(lctx lockctx.Proof, rw storage.ReaderBatchWriter) error { + approvalID := approval.ID() + resultID := approval.Body.ExecutionResultID + chunkIndex := approval.Body.ChunkIndex + + // the following functors allow encoding to be done before acquiring the lock + inserting := Upserting(MakePrefix(codeResultApproval, approvalID), approval) + indexing := Upserting(MakePrefix(codeIndexResultApprovalByChunk, resultID, chunkIndex), approvalID) + + return func(lctx lockctx.Proof, rw storage.ReaderBatchWriter) error { + if !lctx.HoldsLock(storage.LockIndexResultApproval) { + return fmt.Errorf("missing lock for index result approval for result: %v", resultID) + } + + var storedApprovalID flow.Identifier + err := LookupResultApproval(rw.GlobalReader(), resultID, chunkIndex, &storedApprovalID) + if err == nil { + if storedApprovalID != approvalID { + return fmt.Errorf("attempting to store conflicting approval (result: %v, chunk index: %d): storing: %v, stored: %v. %w", + resultID, chunkIndex, approvalID, storedApprovalID, storage.ErrDataMismatch) + } + return nil // already stored and indexed + } + if !errors.Is(err, storage.ErrNotFound) { // `storage.ErrNotFound` is expected, as this indicates that no receipt is indexed yet; anything else is an exception + return fmt.Errorf("could not lookup result approval ID: %w", irrecoverable.NewException(err)) + } + + err = inserting(rw.Writer()) + if err != nil { + return fmt.Errorf("could not store result approval: %w", err) + } + + err = indexing(rw.Writer()) + if err != nil { + return fmt.Errorf("could not index result approval: %w", err) + } + + return nil + } +} + +// LookupResultApproval finds a ResultApproval by result ID and chunk index. +// Returns `storage.ErrNotFound` if no Approval for the given key (resultID, chunkIndex) has been stored. +// +// NOTE that the Flow protocol requires multiple approvals for the same chunk from different verification +// nodes. In other words, there are multiple different approvals for the same chunk. Therefore, the index +// Executed Chunk ➜ ResultApproval ID (queried here) is *only safe* to be used by Verification Nodes +// for tracking their own approvals (for the same ExecutionResult, a Verifier will always produce the same approval) +func LookupResultApproval(r storage.Reader, resultID flow.Identifier, chunkIndex uint64, approvalID *flow.Identifier) error { + return RetrieveByKey(r, MakePrefix(codeIndexResultApprovalByChunk, resultID, chunkIndex), approvalID) +} diff --git a/storage/operation/badgerimpl/dbstore.go b/storage/operation/badgerimpl/dbstore.go new file mode 100644 index 00000000000..6f1376596b4 --- /dev/null +++ b/storage/operation/badgerimpl/dbstore.go @@ -0,0 +1,34 @@ +package badgerimpl + +import ( + "github.com/dgraph-io/badger/v2" + + "github.com/onflow/flow-go/storage" +) + +func ToDB(db *badger.DB) storage.DB { + return &dbStore{db: db} +} + +type dbStore struct { + db *badger.DB +} + +var _ (storage.DB) = (*dbStore)(nil) + +func (b *dbStore) Reader() storage.Reader { + return dbReader{db: b.db} +} + +func (b *dbStore) WithReaderBatchWriter(fn func(storage.ReaderBatchWriter) error) error { + return WithReaderBatchWriter(b.db, fn) +} + +func (b *dbStore) NewBatch() storage.Batch { + return NewReaderBatchWriter(b.db) +} + +// No errors are expected during normal operation. +func (b *dbStore) Close() error { + return b.db.Close() +} diff --git a/storage/operation/badgerimpl/iterator.go b/storage/operation/badgerimpl/iterator.go new file mode 100644 index 00000000000..2b0e83af371 --- /dev/null +++ b/storage/operation/badgerimpl/iterator.go @@ -0,0 +1,97 @@ +package badgerimpl + +import ( + "bytes" + + "github.com/dgraph-io/badger/v2" + + "github.com/onflow/flow-go/storage" + "github.com/onflow/flow-go/storage/operation" +) + +type badgerIterator struct { + tx *badger.Txn + iter *badger.Iterator + lowerBound []byte + upperBound []byte + hasUpperBound bool // whether there's an upper bound +} + +var _ storage.Iterator = (*badgerIterator)(nil) + +func newBadgerIterator(db *badger.DB, startPrefix, endPrefix []byte, ops storage.IteratorOption) *badgerIterator { + options := badger.DefaultIteratorOptions + if ops.BadgerIterateKeyOnly { + options.PrefetchValues = false + } + options.Prefix = operation.CommonPrefix(startPrefix, endPrefix) + + tx := db.NewTransaction(false) + iter := tx.NewIterator(options) + + lowerBound, upperBound, hasUpperBound := storage.StartEndPrefixToLowerUpperBound(startPrefix, endPrefix) + + return &badgerIterator{ + tx: tx, + iter: iter, + lowerBound: lowerBound, + upperBound: upperBound, + hasUpperBound: hasUpperBound, + } +} + +// First seeks to the smallest key greater than or equal to the given key. +func (i *badgerIterator) First() bool { + i.iter.Seek(i.lowerBound) + return i.Valid() +} + +// Valid returns whether the iterator is positioned at a valid key-value pair. +func (i *badgerIterator) Valid() bool { + // Note: we didn't specify the iteration range with the badger IteratorOptions, + // because the IterationOptions only allows us to specify a single prefix, whereas + // we need to specify a range of prefixes. So we have to manually check the bounds here. + // The First() method, which calls Seek(i.lowerBound), ensures the iteration starts from + // the lowerBound, and the upperbound is checked here by first checking if it's + // reaching the end of the iteration, then checking if the key is within the upperbound. + + // check if it's reaching the end of the iteration + if !i.iter.Valid() { + return false + } + + // if upper bound is nil, then there's no upper bound, so it's always valid + if !i.hasUpperBound { + return true + } + + // check if the key is within the upperbound (exclusive) + key := i.iter.Item().Key() + // note: for the boundary case, + // upperBound is the exclusive upper bound, should not be included in the iteration, + // so if key == upperBound, it's invalid, should return false. + valid := bytes.Compare(key, i.upperBound) < 0 + return valid +} + +// Next advances the iterator to the next key-value pair. +func (i *badgerIterator) Next() { + i.iter.Next() +} + +// IterItem returns the current key-value pair, or nil if done. +func (i *badgerIterator) IterItem() storage.IterItem { + return i.iter.Item() +} + +var _ storage.IterItem = (*badger.Item)(nil) + +// Close closes the iterator and discards transaction. +// Iterator must be closed, otherwise it causes memory leaks. +// Transaction.Discard must be called. +// No errors are expected during normal operation. +func (i *badgerIterator) Close() error { + i.iter.Close() + i.tx.Discard() + return nil +} diff --git a/storage/operation/badgerimpl/reader.go b/storage/operation/badgerimpl/reader.go new file mode 100644 index 00000000000..e08afde8998 --- /dev/null +++ b/storage/operation/badgerimpl/reader.go @@ -0,0 +1,74 @@ +package badgerimpl + +import ( + "bytes" + "errors" + "fmt" + "io" + + "github.com/dgraph-io/badger/v2" + + "github.com/onflow/flow-go/module/irrecoverable" + "github.com/onflow/flow-go/storage" + "github.com/onflow/flow-go/utils/noop" +) + +type dbReader struct { + db *badger.DB +} + +var _ storage.Reader = (*dbReader)(nil) + +// Get gets the value for the given key. It returns ErrNotFound if the DB +// does not contain the key. +// other errors are exceptions +// +// The caller should not modify the contents of the returned slice, but it is +// safe to modify the contents of the argument after Get returns. The +// returned slice will remain valid until the returned Closer is closed. +// when err == nil, the caller MUST call closer.Close() or a memory leak will occur. +func (b dbReader) Get(key []byte) ([]byte, io.Closer, error) { + tx := b.db.NewTransaction(false) + defer tx.Discard() + + item, err := tx.Get(key) + if err != nil { + if errors.Is(err, badger.ErrKeyNotFound) { + return nil, noop.Closer{}, storage.ErrNotFound + } + return nil, noop.Closer{}, irrecoverable.NewExceptionf("could not load data: %w", err) + } + + value, err := item.ValueCopy(nil) + if err != nil { + return nil, noop.Closer{}, irrecoverable.NewExceptionf("could not load value: %w", err) + } + + return value, noop.Closer{}, nil +} + +// NewIter returns a new Iterator for the given key prefix range [startPrefix, endPrefix], both inclusive. +// Specifically, all keys that meet ANY of the following conditions are included in the iteration: +// - have a prefix equal to startPrefix OR +// - have a prefix equal to the endPrefix OR +// - have a prefix that is lexicographically between startPrefix and endPrefix +// +// it returns error if the startPrefix key is greater than the endPrefix key +// no errors are expected during normal operation +func (b dbReader) NewIter(startPrefix, endPrefix []byte, ops storage.IteratorOption) (storage.Iterator, error) { + if bytes.Compare(startPrefix, endPrefix) > 0 { + return nil, fmt.Errorf("startPrefix key must be less than or equal to endPrefix key") + } + + return newBadgerIterator(b.db, startPrefix, endPrefix, ops), nil +} + +// NewSeeker returns a new Seeker. +func (b dbReader) NewSeeker() storage.Seeker { + return newBadgerSeeker(b.db) +} + +// ToReader is a helper function to convert a *badger.DB to a Reader +func ToReader(db *badger.DB) storage.Reader { + return dbReader{db} +} diff --git a/storage/operation/badgerimpl/seeker.go b/storage/operation/badgerimpl/seeker.go new file mode 100644 index 00000000000..e6259ffd32e --- /dev/null +++ b/storage/operation/badgerimpl/seeker.go @@ -0,0 +1,61 @@ +package badgerimpl + +import ( + "bytes" + "errors" + + "github.com/dgraph-io/badger/v2" + + "github.com/onflow/flow-go/storage" + "github.com/onflow/flow-go/storage/operation" +) + +type badgerSeeker struct { + db *badger.DB +} + +var _ storage.Seeker = (*badgerSeeker)(nil) + +func newBadgerSeeker(db *badger.DB) *badgerSeeker { + return &badgerSeeker{db: db} +} + +// SeekLE (seek less than or equal) returns the largest key in lexicographical +// order within inclusive range of [startPrefix, key]. +// This function returns an error if specified key is less than startPrefix. +// This function returns storage.ErrNotFound if a key that matches +// the specified criteria is not found. +func (i *badgerSeeker) SeekLE(startPrefix, key []byte) ([]byte, error) { + if bytes.Compare(key, startPrefix) < 0 { + return nil, errors.New("key must be greater than or equal to startPrefix key") + } + + options := badger.DefaultIteratorOptions + options.PrefetchValues = false + options.Reverse = true + options.Prefix = operation.CommonPrefix(startPrefix, key) + + tx := i.db.NewTransaction(false) + iter := tx.NewIterator(options) + defer func() { + iter.Close() + tx.Discard() + }() + + // Seek seeks to given key or largest key less than the given key because we are iterating backwards. + iter.Seek(key) + + // Check if we reach the end of the iteration. + if !iter.Valid() { + return nil, storage.ErrNotFound + } + + item := iter.Item() + + // Check if returned key is less than startPrefix. + if bytes.Compare(item.Key(), startPrefix) < 0 { + return nil, storage.ErrNotFound + } + + return item.KeyCopy(nil), nil +} diff --git a/storage/operation/badgerimpl/writer.go b/storage/operation/badgerimpl/writer.go new file mode 100644 index 00000000000..3d5c48bd43d --- /dev/null +++ b/storage/operation/badgerimpl/writer.go @@ -0,0 +1,223 @@ +package badgerimpl + +import ( + "fmt" + "slices" + + "github.com/dgraph-io/badger/v2" + + "github.com/onflow/flow-go/storage" + "github.com/onflow/flow-go/storage/operation" +) + +// ReaderBatchWriter is for reading and writing to a storage backend. +// It is useful for performing a related sequence of reads and writes, after which you would like +// to modify some non-database state if the sequence completed successfully (via AddCallback). +// If you are not using AddCallback, avoid using ReaderBatchWriter: use Reader and Writer directly. +// ReaderBatchWriter is not safe for concurrent use. +type ReaderBatchWriter struct { + globalReader storage.Reader + batch *badger.WriteBatch + + // for executing callbacks after the batch has been flushed, such as updating caches + callbacks *operation.Callbacks + + // values contains the values for this batch. + // The values map is set using SetScopedValue(key, value) and retrieved using ScopedValue(key). + // Initialization of the values map is deferred until it is needed, because + // ReaderBatchWriter is created frequently to update the database, but + // this values map is used infrequently to save data for batch operations. + // For example, store.TransactionResults.BatchRemoveByBlockID() saves batch + // removed block IDs in values map, and retrieves the batch removed block + // IDs in OnCommitSucceed() callback. This allows locking just once, + // instead of locking TransactionResults cache for every removed block ID. + values map[string]any +} + +var _ storage.ReaderBatchWriter = (*ReaderBatchWriter)(nil) +var _ storage.Batch = (*ReaderBatchWriter)(nil) + +// GlobalReader returns a database-backed reader which reads the latest committed global database state ("read-committed isolation"). +// This reader will not read un-committed writes written to ReaderBatchWriter.Writer until the write batch is committed. +// This reader may observe different values for the same key on subsequent reads. +func (b *ReaderBatchWriter) GlobalReader() storage.Reader { + return b.globalReader +} + +// Writer returns a writer associated with a batch of writes. The batch is pending until it is committed. +// When we `Write` into the batch, that write operation is added to the pending batch, but not committed. +// The commit operation is atomic w.r.t. the batch; either all writes are applied to the database, or no writes are. +// Note: +// - The writer cannot be used concurrently for writing. +func (b *ReaderBatchWriter) Writer() storage.Writer { + return b +} + +// BadgerWriteBatch returns the badger write batch +func (b *ReaderBatchWriter) BadgerWriteBatch() *badger.WriteBatch { + return b.batch +} + +// AddCallback adds a callback to execute after the batch has been flush +// regardless the batch update is succeeded or failed. +// The error parameter is the error returned by the batch update. +func (b *ReaderBatchWriter) AddCallback(callback func(error)) { + b.callbacks.AddCallback(callback) +} + +// Commit flushes the batch to the database. +// No errors expected during normal operation +func (b *ReaderBatchWriter) Commit() error { + err := b.batch.Flush() + + b.callbacks.NotifyCallbacks(err) + + return err +} + +// Close releases memory of the batch and no error is returned. +// This can be called as a defer statement immediately after creating Batch +// to reduce risk of unbounded memory consumption. +// No errors are expected during normal operation. +func (b *ReaderBatchWriter) Close() error { + // BadgerDB v2 docs for WriteBatch.Cancel(): + // + // "Cancel function must be called if there's a chance that Flush might not get + // called. If neither Flush or Cancel is called, the transaction oracle would + // never get a chance to clear out the row commit timestamp map, thus causing an + // unbounded memory consumption. Typically, you can call Cancel as a defer + // statement right after NewWriteBatch is called. + // + // Note that any committed writes would still go through despite calling Cancel." + + b.batch.Cancel() + return nil +} + +func WithReaderBatchWriter(db *badger.DB, fn func(storage.ReaderBatchWriter) error) error { + batch := NewReaderBatchWriter(db) + defer batch.Close() // Release memory + + err := fn(batch) + if err != nil { + // fn might use lock to ensure concurrent safety while reading and writing data + // and the lock is usually released by a callback. + // in other words, fn might hold a lock to be released by a callback, + // we need to notify the callback for the locks to be released before + // returning the error. + batch.callbacks.NotifyCallbacks(err) + return err + } + + return batch.Commit() +} + +func NewReaderBatchWriter(db *badger.DB) *ReaderBatchWriter { + return &ReaderBatchWriter{ + globalReader: ToReader(db), + batch: db.NewWriteBatch(), + callbacks: operation.NewCallbacks(), + } +} + +var _ storage.Writer = (*ReaderBatchWriter)(nil) + +// Set sets the value for the given key. It overwrites any previous value +// for that key; a DB is not a multi-map. +// +// It is safe to modify the contents of the arguments after Set returns. +// No errors expected during normal operation +func (b *ReaderBatchWriter) Set(key, value []byte) error { + // BadgerDB v2 docs for WriteBatch.Set() says: + // + // "Set is equivalent of Txn.Set()." + // + // BadgerDB v2 docs for Txn.Set() says: + // + // "Set adds a key-value pair to the database. + // ... + // The current transaction keeps a reference to the key and val byte slice + // arguments. Users must not modify key and val until the end of the transaction." + + // Make copies of given key and value because: + // - ReaderBatchWriter.Set() (this function) promises that it is safe to modify + // key and value after Set returns, while + // - BadgerDB's WriteBatch.Set() said users must not modify key and value + // until end of transaction. + keyCopy := slices.Clone(key) + valueCopy := slices.Clone(value) + + return b.batch.Set(keyCopy, valueCopy) +} + +// Delete deletes the value for the given key. Deletes are blind all will +// succeed even if the given key does not exist. +// +// It is safe to modify the contents of the arguments after Delete returns. +// No errors expected during normal operation +func (b *ReaderBatchWriter) Delete(key []byte) error { + // BadgerDB v2 docs for WriteBatch.Delete() says: + // + // "Set is equivalent of Txn.Delete." + // + // BadgerDB v2 docs for Txn.Set() says: + // + // "Delete deletes a key. + // ... + // The current transaction keeps a reference to the key byte slice argument. + // Users must not modify the key until the end of the transaction." + + // Make copies of given key because: + // - ReaderBatchWriter.Delete() (this function) promises that it is safe to modify + // key after Delete returns, while + // - BadgerDB's WriteBatch.Delete() says users must not modify key until end of transaction. + keyCopy := slices.Clone(key) + + return b.batch.Delete(keyCopy) +} + +// DeleteByRange removes all keys with a prefix that falls within the +// range [start, end], both inclusive. +// It returns error if endPrefix < startPrefix +// no other errors are expected during normal operation +func (b *ReaderBatchWriter) DeleteByRange(globalReader storage.Reader, startPrefix, endPrefix []byte) error { + err := operation.IterateKeysByPrefixRange(globalReader, startPrefix, endPrefix, func(key []byte) error { + err := b.batch.Delete(key) + if err != nil { + return fmt.Errorf("could not add key to delete batch (%v): %w", key, err) + } + return nil + }) + + if err != nil { + return fmt.Errorf("could not find keys by range to be deleted: %w", err) + } + return nil +} + +// SetScopedValue stores the given value by the given key in this batch. +// Stored value can be retrieved by the same key via ScopedValue(). +func (b *ReaderBatchWriter) SetScopedValue(key string, value any) { + // Creation of b.values is deferred until needed, so b.values can be nil here. + // Deleting element from nil b.values (map[string]any) is no-op. + // Inserting element to b.values requires initializing b.values first. + + if value == nil { + delete(b.values, key) + return + } + if b.values == nil { + b.values = make(map[string]any) + } + b.values[key] = value +} + +// ScopedValue returns the value associated with this batch for the given key and true if key exists, +// or nil and false if key doesn't exist. +func (b *ReaderBatchWriter) ScopedValue(key string) (any, bool) { + // Creation of b.values is deferred until needed, so b.values can be nil here. + // Accessing nil b.values (map[string]any) always returns (nil, false). + + v, exists := b.values[key] + return v, exists +} diff --git a/storage/operation/badgerimpl/writer_test.go b/storage/operation/badgerimpl/writer_test.go new file mode 100644 index 00000000000..c534d3a9116 --- /dev/null +++ b/storage/operation/badgerimpl/writer_test.go @@ -0,0 +1,47 @@ +package badgerimpl_test + +import ( + "testing" + + "github.com/dgraph-io/badger/v2" + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/utils/unittest" +) + +// TestBadgerSetArgumentsNotSafeToModify verifies that WriteBatch.Set() +// arguments are not safe to be modified before batch is committed. +func TestBadgerSetArgumentsNotSafeToModify(t *testing.T) { + unittest.RunWithBadgerDB(t, func(db *badger.DB) { + b := db.NewWriteBatch() + + k := []byte{0x01} + v := []byte{0x01, 0x02, 0x03} + + // Insert k and v to batch. + err := b.Set(k, v) + require.NoError(t, err) + + // Modify k and v before commit. + k[0] = 0x02 + v[0] = 0x04 + + // Commit pending writes. + err = b.Flush() + require.NoError(t, err) + + tx := db.NewTransaction(false) + + // Retrieve value by original key returns ErrKeyNotFound error. + _, err = tx.Get([]byte{0x01}) + require.ErrorIs(t, err, badger.ErrKeyNotFound) + + // Retrieve value by modified key returns modified value. + item, err := tx.Get([]byte{0x02}) + require.NoError(t, err, nil) + + retrievedValue, err := item.ValueCopy(nil) + require.NoError(t, err, nil) + require.Equal(t, v, retrievedValue) + }) +} diff --git a/storage/operation/callbacks.go b/storage/operation/callbacks.go new file mode 100644 index 00000000000..b44ce8bd87c --- /dev/null +++ b/storage/operation/callbacks.go @@ -0,0 +1,26 @@ +package operation + +// Callbacks represents a collection of callbacks to be executed. +// Callbacks are not concurrent safe. +// Since Callbacks is only used in ReaderBatchWriter, which +// isn't concurrent safe, there isn't a need to add locking +// overhead to Callbacks. +type Callbacks struct { + callbacks []func(error) +} + +func NewCallbacks() *Callbacks { + return &Callbacks{ + callbacks: nil, // lazy initialization + } +} + +func (b *Callbacks) AddCallback(callback func(error)) { + b.callbacks = append(b.callbacks, callback) +} + +func (b *Callbacks) NotifyCallbacks(err error) { + for _, callback := range b.callbacks { + callback(err) + } +} diff --git a/storage/operation/callbacks_test.go b/storage/operation/callbacks_test.go new file mode 100644 index 00000000000..3e34f822461 --- /dev/null +++ b/storage/operation/callbacks_test.go @@ -0,0 +1,32 @@ +package operation + +import "testing" + +func TestCallback(t *testing.T) { + cb := NewCallbacks() + var called bool + cb.AddCallback(func(err error) { + called = true + }) + cb.NotifyCallbacks(nil) + if !called { + t.Error("Callback was not called") + } +} + +func TestCallbackConcurrency(t *testing.T) { + cb := NewCallbacks() + var called bool + cb.AddCallback(func(err error) { + called = true + }) + done := make(chan struct{}) + go func() { + cb.NotifyCallbacks(nil) + close(done) + }() + <-done + if !called { + t.Error("Callback was not called") + } +} diff --git a/storage/operation/children.go b/storage/operation/children.go new file mode 100644 index 00000000000..6d1f757d176 --- /dev/null +++ b/storage/operation/children.go @@ -0,0 +1,108 @@ +package operation + +import ( + "errors" + "fmt" + + "github.com/jordanschalm/lockctx" + + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/storage" +) + +// IndexNewBlock populates the parent-child index for block, by adding the given blockID to the set of children of its parent. +// +// CAUTION: +// - This function should only be used for KNOWN BLOCKs (neither existence of the block nor its parent is verified here) +// - The caller must acquire the [storage.LockInsertBlock] and hold it until the database write has been committed. +// +// Expected error returns during normal operations: +// - [storage.ErrAlreadyExists] if the blockID is already indexed as a child of the parent +func IndexNewBlock(lctx lockctx.Proof, rw storage.ReaderBatchWriter, blockID flow.Identifier, parentID flow.Identifier) error { + if !lctx.HoldsLock(storage.LockInsertBlock) { + return fmt.Errorf("missing required lock: %s", storage.LockInsertBlock) + } + + return indexBlockByParent(rw, blockID, parentID) +} + +// IndexNewClusterBlock populates the parent-child index for cluster blocks, aka collections, by adding the given +// blockID to the set of children of its parent. +// +// CAUTION: +// - This function should only be used for KNOWN BLOCKs (neither existence of the block nor its parent is verified here) +// - The caller must acquire the [storage.LockInsertOrFinalizeClusterBlock] and hold it until the database write has been committed. +// +// Expected error returns during normal operations: +// - [storage.ErrAlreadyExists] if the blockID is already indexed as a child of the parent +func IndexNewClusterBlock(lctx lockctx.Proof, rw storage.ReaderBatchWriter, blockID flow.Identifier, parentID flow.Identifier) error { + if !lctx.HoldsLock(storage.LockInsertOrFinalizeClusterBlock) { + return fmt.Errorf("missing required lock: %s", storage.LockInsertOrFinalizeClusterBlock) + } + + return indexBlockByParent(rw, blockID, parentID) +} + +// indexBlockByParent is the internal function that implements the indexing logic for both regular blocks and cluster blocks. +// the caller must ensure the required locks are held to prevent concurrent writes to the [codeBlockChildren] key space. +func indexBlockByParent(rw storage.ReaderBatchWriter, blockID flow.Identifier, parentID flow.Identifier) error { + // By convention, the parentID being [flow.ZeroID] means that the block is a root block that has no parent. + // This is the case for genesis blocks and cluster root blocks. In this case, we don't need to index anything. + if parentID == flow.ZeroID { + return nil + } + + // If the parent block is not zero, depending on whether the parent block has + // children or not, we will either update the index or insert the index. + var childrenIDs flow.IdentifierList + err := RetrieveBlockChildren(rw.GlobalReader(), parentID, &childrenIDs) + if err != nil { + if !errors.Is(err, storage.ErrNotFound) { + return fmt.Errorf("could not look up block children: %w", err) + } + } + + // check we don't add a duplicate + for _, dupID := range childrenIDs { + if blockID == dupID { + return storage.ErrAlreadyExists + } + } + + // adding the new block to be another child of the parent + childrenIDs = append(childrenIDs, blockID) + + // saving the index + err = UpsertByKey(rw.Writer(), MakePrefix(codeBlockChildren, parentID), childrenIDs) + if err != nil { + return fmt.Errorf("could not update children index: %w", err) + } + + return nil +} + +// RetrieveBlockChildren retrieves the list of child block IDs for the specified parent block. +// +// No error returns expected during normal operations. +// It returns [storage.ErrNotFound] if the block has no children. +// Note, this would mean either the block does not exist or the block exists but has no children. +// The caller has to check if the block exists by other means if needed. +func RetrieveBlockChildren(r storage.Reader, blockID flow.Identifier, childrenIDs *flow.IdentifierList) error { + err := RetrieveByKey(r, MakePrefix(codeBlockChildren, blockID), childrenIDs) + if err != nil { + // when indexing new block, we don't create an index for the block if it has no children + // so we can't distinguish between a block that doesn't exist and a block that exists but has no children + // If the block doesn't have a children index yet, it means it has no children + if errors.Is(err, storage.ErrNotFound) { + return fmt.Errorf("the block has no children, but it might also be the case the block does not exist: %w", err) + } + + return fmt.Errorf("could not retrieve block children: %w", err) + } + + if len(*childrenIDs) == 0 { + return fmt.Errorf("the block has no children: %w", storage.ErrNotFound) + } + + return nil +} diff --git a/storage/operation/children_test.go b/storage/operation/children_test.go new file mode 100644 index 00000000000..8a5c6f419c1 --- /dev/null +++ b/storage/operation/children_test.go @@ -0,0 +1,277 @@ +package operation_test + +import ( + "testing" + + "github.com/jordanschalm/lockctx" + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/storage" + "github.com/onflow/flow-go/storage/operation" + "github.com/onflow/flow-go/storage/operation/dbtest" + "github.com/onflow/flow-go/utils/unittest" +) + +// TestOperationPair is a pair of indexing operation and corresponding lock to be held during the operation. +// The name is an auxiliary identifier, for debugging only. +type TestOperationPair struct { + name string + indexFunc func(lockctx.Proof, storage.ReaderBatchWriter, flow.Identifier, flow.Identifier) error + lockType string +} + +// getTestOperationPairs returns a `TestOperationPair` for indexing main consensus blocks +// and one `TestOperationPair` for indexing collector blocks (aka collections). +func getTestOperationPairs() []TestOperationPair { + return []TestOperationPair{ + { + name: "IndexNewBlock", + indexFunc: operation.IndexNewBlock, + lockType: storage.LockInsertBlock, + }, + { + name: "IndexNewClusterBlock", + indexFunc: operation.IndexNewClusterBlock, + lockType: storage.LockInsertOrFinalizeClusterBlock, + }, + } +} + +// after indexing a block by its parent, it should be able to retrieve the child block by the parentID +func TestIndexAndLookupChild(t *testing.T) { + for _, opPair := range getTestOperationPairs() { + t.Run(opPair.name, func(t *testing.T) { + dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { + lockManager := storage.NewTestingLockManager() + nonExist := unittest.IdentifierFixture() + + // retrieving children of a non-existent block should return empty list + var retrievedIDs flow.IdentifierList + err := operation.RetrieveBlockChildren(db.Reader(), nonExist, &retrievedIDs) + require.ErrorIs(t, err, storage.ErrNotFound) + + parentID := unittest.IdentifierFixture() + childID := unittest.IdentifierFixture() + + err = unittest.WithLock(t, lockManager, opPair.lockType, func(lctx lockctx.Context) error { + return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return opPair.indexFunc(lctx, rw, childID, parentID) + }) + }) + require.NoError(t, err) + + // retrieve child + require.NoError(t, operation.RetrieveBlockChildren(db.Reader(), parentID, &retrievedIDs)) + + // retrieved child should be the stored child + require.Equal(t, flow.IdentifierList{childID}, retrievedIDs) + + err = operation.RetrieveBlockChildren(db.Reader(), childID, &retrievedIDs) + // verify new block has no children index (returning storage.ErrNotFound) + require.ErrorIs(t, err, storage.ErrNotFound) + + // verify indexing again would hit storage.ErrAlreadyExists error + err = unittest.WithLock(t, lockManager, opPair.lockType, func(lctx lockctx.Context) error { + return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return opPair.indexFunc(lctx, rw, childID, parentID) + }) + }) + require.ErrorIs(t, err, storage.ErrAlreadyExists) + }) + }) + } +} + +// indexing multiple children to the same parent should be retrievable +func TestIndexWithMultiChildrenRetrieve(t *testing.T) { + for _, opPair := range getTestOperationPairs() { + t.Run(opPair.name, func(t *testing.T) { + dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { + lockManager := storage.NewTestingLockManager() + + parentID := unittest.IdentifierFixture() + child1ID := unittest.IdentifierFixture() + child2ID := unittest.IdentifierFixture() + + // index the first child + err := unittest.WithLock(t, lockManager, opPair.lockType, func(lctx lockctx.Context) error { + return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return opPair.indexFunc(lctx, rw, child1ID, parentID) + }) + }) + require.NoError(t, err) + + // index the second child + err = unittest.WithLock(t, lockManager, opPair.lockType, func(lctx lockctx.Context) error { + return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return opPair.indexFunc(lctx, rw, child2ID, parentID) + }) + }) + require.NoError(t, err) + + var retrievedIDs flow.IdentifierList + err = operation.RetrieveBlockChildren(db.Reader(), parentID, &retrievedIDs) + require.NoError(t, err) + + require.ElementsMatch(t, flow.IdentifierList{child1ID, child2ID}, retrievedIDs) + }) + }) + } +} + +// Test indexing the same child with different parents should not error +func TestIndexAgainWithDifferentParentShouldError(t *testing.T) { + for _, opPair := range getTestOperationPairs() { + t.Run(opPair.name, func(t *testing.T) { + dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { + lockManager := storage.NewTestingLockManager() + + child := unittest.IdentifierFixture() + parent1 := unittest.IdentifierFixture() + parent2 := unittest.IdentifierFixture() + + // index with parent + err := unittest.WithLock(t, lockManager, opPair.lockType, func(lctx lockctx.Context) error { + return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return opPair.indexFunc(lctx, rw, child, parent1) + }) + }) + require.NoError(t, err) + + // index with a different parent + err = unittest.WithLock(t, lockManager, opPair.lockType, func(lctx lockctx.Context) error { + return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return opPair.indexFunc(lctx, rw, child, parent2) + }) + }) + require.NoError(t, err) + + var retrievedIDs flow.IdentifierList + err = operation.RetrieveBlockChildren(db.Reader(), parent1, &retrievedIDs) + require.NoError(t, err) + + require.ElementsMatch(t, flow.IdentifierList{child}, retrievedIDs) + + err = operation.RetrieveBlockChildren(db.Reader(), parent2, &retrievedIDs) + require.NoError(t, err) + require.ElementsMatch(t, flow.IdentifierList{child}, retrievedIDs) + }) + }) + } +} + +// if parent is zero, then we don't index it +func TestIndexZeroParent(t *testing.T) { + for _, opPair := range getTestOperationPairs() { + t.Run(opPair.name, func(t *testing.T) { + dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { + lockManager := storage.NewTestingLockManager() + + childID := unittest.IdentifierFixture() + + err := unittest.WithLock(t, lockManager, opPair.lockType, func(lctx lockctx.Context) error { + return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return opPair.indexFunc(lctx, rw, childID, flow.ZeroID) + }) + }) + require.NoError(t, err) + + // zero id should have no children + var retrievedIDs flow.IdentifierList + err = operation.RetrieveBlockChildren(db.Reader(), flow.ZeroID, &retrievedIDs) + require.Error(t, err) + require.ErrorIs(t, err, storage.ErrNotFound) + }) + }) + } +} + +// lookup block children will only return direct children, even if grandchildren exist +func TestDirectChildren(t *testing.T) { + for _, opPair := range getTestOperationPairs() { + t.Run(opPair.name, func(t *testing.T) { + dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { + lockManager := storage.NewTestingLockManager() + + // We emulate a fork of blocks: b1 ← b2 ← b3 ← b4 + b1 := unittest.IdentifierFixture() + b2 := unittest.IdentifierFixture() + b3 := unittest.IdentifierFixture() + b4 := unittest.IdentifierFixture() + + err := unittest.WithLock(t, lockManager, opPair.lockType, func(lctx lockctx.Context) error { + return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return opPair.indexFunc(lctx, rw, b2, b1) + }) + }) + require.NoError(t, err) + + err = unittest.WithLock(t, lockManager, opPair.lockType, func(lctx lockctx.Context) error { + return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return opPair.indexFunc(lctx, rw, b3, b2) + }) + }) + require.NoError(t, err) + + err = unittest.WithLock(t, lockManager, opPair.lockType, func(lctx lockctx.Context) error { + return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return opPair.indexFunc(lctx, rw, b4, b3) + }) + }) + require.NoError(t, err) + + // check the children of the first block + var retrievedIDs flow.IdentifierList + + err = operation.RetrieveBlockChildren(db.Reader(), b1, &retrievedIDs) + require.NoError(t, err) + require.Equal(t, flow.IdentifierList{b2}, retrievedIDs) + + err = operation.RetrieveBlockChildren(db.Reader(), b2, &retrievedIDs) + require.NoError(t, err) + require.Equal(t, flow.IdentifierList{b3}, retrievedIDs) + + err = operation.RetrieveBlockChildren(db.Reader(), b3, &retrievedIDs) + require.NoError(t, err) + require.Equal(t, flow.IdentifierList{b4}, retrievedIDs) + + err = operation.RetrieveBlockChildren(db.Reader(), b4, &retrievedIDs) + // verify b4 has no children index (not indexed yet) + require.ErrorIs(t, err, storage.ErrNotFound) + }) + }) + } +} + +// TestChildrenWrongLockIsRejected verifies that operations fail when called with the wrong lock type. +// This ensures that IndexNewBlock requires LockInsertBlock and IndexNewClusterBlock requires LockInsertOrFinalizeClusterBlock. +func TestChildrenWrongLockIsRejected(t *testing.T) { + for _, opPair := range getTestOperationPairs() { + t.Run(opPair.name, func(t *testing.T) { + dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { + lockManager := storage.NewTestingLockManager() + + parentID := unittest.IdentifierFixture() + childID := unittest.IdentifierFixture() + + // Use the wrong lock type for each operation + var wrongLockType string + if opPair.lockType == storage.LockInsertBlock { + // For IndexNewBlock, use the cluster block lock (wrong) + wrongLockType = storage.LockInsertOrFinalizeClusterBlock + } else { + // For IndexNewClusterBlock, use the regular block lock (wrong) + wrongLockType = storage.LockInsertBlock + } + + err := unittest.WithLock(t, lockManager, wrongLockType, func(lctx lockctx.Context) error { + return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return opPair.indexFunc(lctx, rw, childID, parentID) + }) + }) + require.Error(t, err) + }) + }) + } +} diff --git a/storage/operation/chunk_data_packs.go b/storage/operation/chunk_data_packs.go new file mode 100644 index 00000000000..6502ab49f59 --- /dev/null +++ b/storage/operation/chunk_data_packs.go @@ -0,0 +1,59 @@ +package operation + +import ( + "errors" + "fmt" + + "github.com/jordanschalm/lockctx" + + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/storage" +) + +// InsertChunkDataPack inserts a [storage.StoredChunkDataPack] into the database, keyed by its chunk ID. +// The function ensures data integrity by first checking if a chunk data pack already exists for the given +// chunk ID and rejecting overwrites with different values. This function is idempotent, i.e. repeated calls +// with the *initially* stored value are no-ops. +// +// CAUTION: +// - Confirming that no value is already stored and the subsequent write must be atomic to prevent data corruption. +// The caller must acquire the [storage.LockInsertChunkDataPack] and hold it until the database write has been committed. +// +// Expected error returns during normal operations: +// - [storage.ErrDataMismatch] if a *different* chunk data pack is already stored for the same chunk ID +func InsertChunkDataPack(lctx lockctx.Proof, rw storage.ReaderBatchWriter, c *storage.StoredChunkDataPack) error { + if !lctx.HoldsLock(storage.LockInsertChunkDataPack) { + return fmt.Errorf("InsertChunkDataPack requires lock: %s", storage.LockInsertChunkDataPack) + } + + key := MakePrefix(codeChunkDataPack, c.ChunkID) + + var existing storage.StoredChunkDataPack + err := RetrieveByKey(rw.GlobalReader(), key, &existing) + if err == nil { + err := c.Equals(existing) + if err != nil { + return fmt.Errorf("attempting to store conflicting chunk data pack (chunk ID: %v): storing: %+v, stored: %+v, err: %s. %w", + c.ChunkID, c, &existing, err, storage.ErrDataMismatch) + } + return nil // already stored, nothing to do + } + + if !errors.Is(err, storage.ErrNotFound) { + return fmt.Errorf("checking for existing chunk data pack (chunk ID: %v): %w", c.ChunkID, err) + } + + return UpsertByKey(rw.Writer(), key, c) +} + +// RetrieveChunkDataPack retrieves a chunk data pack by chunk ID. +// it returns storage.ErrNotFound if the chunk data pack is not found +func RetrieveChunkDataPack(r storage.Reader, chunkID flow.Identifier, c *storage.StoredChunkDataPack) error { + return RetrieveByKey(r, MakePrefix(codeChunkDataPack, chunkID), c) +} + +// RemoveChunkDataPack removes the chunk data pack with the given chunk ID. +// any error are exceptions +func RemoveChunkDataPack(w storage.Writer, chunkID flow.Identifier) error { + return RemoveByKey(w, MakePrefix(codeChunkDataPack, chunkID)) +} diff --git a/storage/operation/chunk_data_packs_test.go b/storage/operation/chunk_data_packs_test.go new file mode 100644 index 00000000000..b2843154a2c --- /dev/null +++ b/storage/operation/chunk_data_packs_test.go @@ -0,0 +1,58 @@ +package operation_test + +import ( + "testing" + + "github.com/jordanschalm/lockctx" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/storage" + "github.com/onflow/flow-go/storage/operation" + "github.com/onflow/flow-go/storage/operation/dbtest" + "github.com/onflow/flow-go/utils/unittest" +) + +func TestChunkDataPack(t *testing.T) { + dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { + lockManager := storage.NewTestingLockManager() + collectionID := unittest.IdentifierFixture() + expected := &storage.StoredChunkDataPack{ + ChunkID: unittest.IdentifierFixture(), + StartState: unittest.StateCommitmentFixture(), + Proof: []byte{'p'}, + CollectionID: collectionID, + } + + t.Run("Retrieve non-existent", func(t *testing.T) { + var actual storage.StoredChunkDataPack + err := operation.RetrieveChunkDataPack(db.Reader(), expected.ChunkID, &actual) + assert.Error(t, err) + }) + + t.Run("Save", func(t *testing.T) { + require.NoError(t, unittest.WithLock(t, lockManager, storage.LockInsertChunkDataPack, func(lctx lockctx.Context) error { + return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return operation.InsertChunkDataPack(lctx, rw, expected) + }) + })) + + var actual storage.StoredChunkDataPack + err := operation.RetrieveChunkDataPack(db.Reader(), expected.ChunkID, &actual) + assert.NoError(t, err) + + assert.Equal(t, *expected, actual) + }) + + t.Run("Remove", func(t *testing.T) { + err := db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return operation.RemoveChunkDataPack(rw.Writer(), expected.ChunkID) + }) + require.NoError(t, err) + + var actual storage.StoredChunkDataPack + err = operation.RetrieveChunkDataPack(db.Reader(), expected.ChunkID, &actual) + assert.Error(t, err) + }) + }) +} diff --git a/storage/operation/chunk_locators.go b/storage/operation/chunk_locators.go new file mode 100644 index 00000000000..5e2e29973e6 --- /dev/null +++ b/storage/operation/chunk_locators.go @@ -0,0 +1,19 @@ +package operation + +import ( + "github.com/onflow/flow-go/model/chunks" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/storage" +) + +func InsertChunkLocator(w storage.Writer, locator *chunks.Locator) error { + return UpsertByKey(w, MakePrefix(codeChunk, locator.ID()), locator) +} + +func RetrieveChunkLocator(r storage.Reader, locatorID flow.Identifier, locator *chunks.Locator) error { + return RetrieveByKey(r, MakePrefix(codeChunk, locatorID), locator) +} + +func ExistChunkLocator(r storage.Reader, locatorID flow.Identifier) (bool, error) { + return KeyExists(r, MakePrefix(codeChunk, locatorID)) +} diff --git a/storage/operation/cluster.go b/storage/operation/cluster.go new file mode 100644 index 00000000000..bf8765c322f --- /dev/null +++ b/storage/operation/cluster.go @@ -0,0 +1,472 @@ +package operation + +import ( + "errors" + "fmt" + + "github.com/jordanschalm/lockctx" + + "github.com/onflow/flow-go/model/cluster" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/storage" +) + +// This file implements storage functions for chain state book-keeping of +// collection node cluster consensus. In contrast to the corresponding functions +// for regular consensus, these functions include the cluster ID in order to +// support storing multiple chains, for example during epoch switchover. + +// IndexClusterBlockHeight indexes a cluster block ID by the cluster ID and block height. +// The function ensures data integrity by first checking if a block ID already exists for the given +// cluster and height, and rejecting overwrites with different values. This function is idempotent, +// i.e. repeated calls with the *initially* indexed value are no-ops. +// +// CAUTION: +// - Confirming that no value is already stored and the subsequent write must be atomic to prevent data corruption. +// The caller must acquire the [storage.LockInsertOrFinalizeClusterBlock] and hold it until the database write has been committed. +// +// Expected error returns during normal operations: +// - [storage.ErrDataMismatch] if a *different* block ID is already indexed for the same cluster and height +func IndexClusterBlockHeight(lctx lockctx.Proof, rw storage.ReaderBatchWriter, clusterID flow.ChainID, height uint64, blockID flow.Identifier) error { + if !lctx.HoldsLock(storage.LockInsertOrFinalizeClusterBlock) { + return fmt.Errorf("missing lock: %v", storage.LockInsertOrFinalizeClusterBlock) + } + + key := MakePrefix(codeFinalizedCluster, clusterID, height) + var existing flow.Identifier + err := RetrieveByKey(rw.GlobalReader(), key, &existing) + if err == nil { + if existing != blockID { + return fmt.Errorf("cluster block height already indexed with different block ID: %s vs %s: %w", existing, blockID, storage.ErrDataMismatch) + } + return nil // for the specified height, the finalized block is already set to `blockID` + } + // We do NOT want to continue with the WRITE UNLESS `storage.ErrNotFound` was received when checking for existing data. + if !errors.Is(err, storage.ErrNotFound) { + return fmt.Errorf("failed to check existing cluster block height index: %w", err) + } + + return UpsertByKey(rw.Writer(), key, blockID) +} + +// LookupClusterBlockHeight retrieves the ID of a finalized cluster block at the given height produced by the specified cluster. +// Note that only finalized cluster blocks are indexed by height to guarantee uniqueness. +// +// Expected error returns during normal operations: +// - [storage.ErrNotFound] if no finalized block from the specified cluster is known at the given height +func LookupClusterBlockHeight(r storage.Reader, clusterID flow.ChainID, height uint64, blockID *flow.Identifier) error { + return RetrieveByKey(r, MakePrefix(codeFinalizedCluster, clusterID, height), blockID) +} + +// BootstrapClusterFinalizedHeight initializes the latest finalized cluster block height for the given cluster. +// +// CAUTION: +// - This function is intended to be called during bootstrapping only. It expects that the height of the latest +// known finalized cluster block has not yet been persisted. +// - Confirming that no value is already stored and the subsequent write must be atomic to prevent data corruption. +// Therefore, the caller must acquire the [storage.LockInsertOrFinalizeClusterBlock] and hold it until the database +// write has been committed. +// +// No error returns expected during normal operations. +func BootstrapClusterFinalizedHeight(lctx lockctx.Proof, rw storage.ReaderBatchWriter, clusterID flow.ChainID, number uint64) error { + if !lctx.HoldsLock(storage.LockInsertOrFinalizeClusterBlock) { + return fmt.Errorf("missing lock: %v", storage.LockInsertOrFinalizeClusterBlock) + } + + key := MakePrefix(codeClusterHeight, clusterID) + + var existing uint64 + err := RetrieveByKey(rw.GlobalReader(), key, &existing) + if err == nil { + return fmt.Errorf("finalized height for cluster %v already initialized to %d", clusterID, existing) + } + + // We do NOT want to continue with the WRITE UNLESS `storage.ErrNotFound` was received when checking for existing data. + if !errors.Is(err, storage.ErrNotFound) { + return fmt.Errorf("failed to check existing finalized height: %w", err) + } + + return UpsertByKey(rw.Writer(), key, number) +} + +// UpdateClusterFinalizedHeight updates (overwrites!) the latest finalized cluster block height for the given cluster. +// +// CAUTION: +// - This function is intended for normal operations after bootstrapping. It expects that the height of the +// latest known finalized cluster block has already been persisted. This function guarantees that the height is updated +// sequentially, i.e. the new height is equal to the old height plus one. Otherwise, an exception is returned. +// - Reading the current height value, checking that it increases sequentially, and writing the new value must happen in one +// atomic operation to prevent data corruption. Hence, the caller must acquire [storage.LockInsertOrFinalizeClusterBlock] +// and hold it until the database write has been committed. +// +// No error returns expected during normal operations. +func UpdateClusterFinalizedHeight(lctx lockctx.Proof, rw storage.ReaderBatchWriter, clusterID flow.ChainID, latestFinalizedHeight uint64) error { + if !lctx.HoldsLock(storage.LockInsertOrFinalizeClusterBlock) { + return fmt.Errorf("missing lock: %v", storage.LockInsertOrFinalizeClusterBlock) + } + + key := MakePrefix(codeClusterHeight, clusterID) + + var existing uint64 + err := RetrieveByKey(rw.GlobalReader(), key, &existing) + if err != nil { + return fmt.Errorf("failed to check existing finalized height: %w", err) + } + + if existing+1 != latestFinalizedHeight { + return fmt.Errorf("finalization isn't sequential: existing %d, new %d", existing, latestFinalizedHeight) + } + + return UpsertByKey(rw.Writer(), key, latestFinalizedHeight) +} + +// RetrieveClusterFinalizedHeight retrieves the latest finalized cluster block height of the given cluster. +// For collector nodes in the specified cluster, this value should always exist (after bootstrapping). +// However, other nodes outside the cluster typically do not track the latest finalized heights for the +// different collector clusters. +// +// Expected error returns during normal operations: +// - [storage.ErrNotFound] if the latest finalized height for the specified cluster is not present in the database +func RetrieveClusterFinalizedHeight(r storage.Reader, clusterID flow.ChainID, height *uint64) error { + return RetrieveByKey(r, MakePrefix(codeClusterHeight, clusterID), height) +} + +// IndexReferenceBlockByClusterBlock updates the reference block ID for the given +// cluster block ID. While each cluster block specifies a reference block in its +// payload, we maintain this additional lookup for performance reasons. +func IndexReferenceBlockByClusterBlock(lctx lockctx.Proof, w storage.Writer, clusterBlockID, refID flow.Identifier) error { + if !lctx.HoldsLock(storage.LockInsertOrFinalizeClusterBlock) { + return fmt.Errorf("missing lock: %v", storage.LockInsertOrFinalizeClusterBlock) + } + + // Only need to check if the lock is held, no need to check if is already stored, + // because the duplication check is done when storing a header, which is in the same + // batch update and holding the same lock. + + return UpsertByKey(w, MakePrefix(codeClusterBlockToRefBlock, clusterBlockID), refID) +} + +// LookupReferenceBlockByClusterBlock looks up the reference block ID for the given +// cluster block ID. While each cluster block specifies a reference block in its +// payload, we maintain this additional lookup for performance reasons. +func LookupReferenceBlockByClusterBlock(r storage.Reader, clusterBlockID flow.Identifier, refID *flow.Identifier) error { + return RetrieveByKey(r, MakePrefix(codeClusterBlockToRefBlock, clusterBlockID), refID) +} + +// IndexClusterBlockByReferenceHeight indexes a cluster block ID by its reference +// block height. The cluster block ID is included in the key for more efficient +// traversal. Only finalized cluster blocks should be included in this index. +// The key looks like: <prefix 0:1><ref_height 1:9><cluster_block_id 9:41> +func IndexClusterBlockByReferenceHeight(lctx lockctx.Proof, w storage.Writer, refHeight uint64, clusterBlockID flow.Identifier) error { + // Why is this lock necessary? + // A single reference height can correspond to multiple cluster blocks. While we are finalizing blocks, + // we may also be concurrently extending cluster blocks. This leads to simultaneous updates and reads + // on keys sharing the same prefix. To prevent race conditions during these concurrent reads and writes, + // synchronization is required when accessing these keys. + if !lctx.HoldsLock(storage.LockInsertOrFinalizeClusterBlock) { + return fmt.Errorf("missing lock: %v", storage.LockInsertOrFinalizeClusterBlock) + } + return UpsertByKey(w, MakePrefix(codeRefHeightToClusterBlock, refHeight, clusterBlockID), nil) +} + +// LookupClusterBlocksByReferenceHeightRange traverses the ref_height->cluster_block +// index and returns any finalized cluster blocks which have a reference block with +// height in the given range. This is used to avoid including duplicate transaction +// when building or validating a new collection. +func LookupClusterBlocksByReferenceHeightRange(lctx lockctx.Proof, r storage.Reader, start, end uint64, clusterBlockIDs *[]flow.Identifier) error { + // Why is this lock necessary? + // A single reference height can correspond to multiple cluster blocks. While we are finalizing blocks, + // we may also be concurrently extending cluster blocks. This leads to simultaneous updates and reads + // on keys sharing the same prefix. To prevent race conditions during these concurrent reads and writes, + // synchronization is required when accessing these keys. + if !lctx.HoldsLock(storage.LockInsertOrFinalizeClusterBlock) { + return fmt.Errorf("missing lock: %v", storage.LockInsertOrFinalizeClusterBlock) + } + startPrefix := MakePrefix(codeRefHeightToClusterBlock, start) + endPrefix := MakePrefix(codeRefHeightToClusterBlock, end) + prefixLen := len(startPrefix) + checkFunc := func(key []byte) error { + clusterBlockIDBytes := key[prefixLen:] + var clusterBlockID flow.Identifier + copy(clusterBlockID[:], clusterBlockIDBytes) + *clusterBlockIDs = append(*clusterBlockIDs, clusterBlockID) + + // the info we need is stored in the key, never process the value + return nil + } + + return IterateKeysByPrefixRange(r, startPrefix, endPrefix, checkFunc) +} + +// This file implements storage functions for blocks in cluster consensus. + +// InsertClusterBlock inserts a cluster consensus block, updating all associated indexes. +// +// CAUTION: +// - The caller must acquire the lock [storage.LockInsertOrFinalizeClusterBlock] and hold it +// until the database write has been committed. This lock allows `InsertClusterBlock` to verify +// that this block has not yet been indexed. In order to protect against accidental mutation +// of existing data, this read and subsequent writes must be performed as one atomic operation. +// Hence, the requirement to hold the lock until the write is committed. +// +// We return [storage.ErrAlreadyExists] if the block has already been persisted before, i.e. we only +// insert a block once. This error allows the caller to detect duplicate inserts. +// No other errors are expected during normal operation. +func InsertClusterBlock(lctx lockctx.Proof, rw storage.ReaderBatchWriter, proposal *cluster.Proposal) error { + // We need to enforce that each cluster block is inserted and indexed exactly once (no overwriting allowed): + // 1. We check that the lock [storage.LockInsertOrFinalizeClusterBlock] for cluster block insertion is held. + // 2. When calling `operation.InsertHeader`, we append the storage operations for inserting the header to the + // provided write batch. Note that `operation.InsertHeader` checks whether the header already exists, + // returning [storage.ErrAlreadyExists] if so. + // 3. We append all other storage indexing operations to the same write batch, without additional existence + // checks. This is safe, because this is the only place where these indexes are created, and we always + // store the block header first alongside the indices in one atomic batch. Hence, since we know from step 2 + // that the header did not exist before, we also know that none of the other indexes existed before either + // 4. We require that the caller holds the lock until the write batch has been committed. + // Thereby, we guarantee that no other thread can write data about the same block concurrently. + // When these constraints are met, we know that no overwrites occurred because `InsertHeader` + // includes guarantees that the key `blockID` has not yet been used before. + if !lctx.HoldsLock(storage.LockInsertOrFinalizeClusterBlock) { // 1. check lock + return fmt.Errorf("missing required lock: %s", storage.LockInsertOrFinalizeClusterBlock) + } + + // Here the key `blockID` is derived from the `block` via a collision-resistant hash function. + // Hence, two different blocks having the same key is practically impossible. + blockID := proposal.Block.ID() + // 2. Store the block header; errors with [storage.ErrAlreadyExists] if some entry for `blockID` already exists + err := InsertHeader(lctx, rw, blockID, proposal.Block.ToHeader()) + if err != nil { + return fmt.Errorf("could not insert cluster block header: %w", err) + } + + // insert the block payload; without further overwrite checks (see above for explanation) + err = InsertProposalSignature(lctx, rw.Writer(), blockID, &proposal.ProposerSigData) + if err != nil { + return fmt.Errorf("could not insert proposer signature: %w", err) + } + + // insert the block payload + err = InsertClusterPayload(lctx, rw, blockID, &proposal.Block.Payload) + if err != nil { + return fmt.Errorf("could not insert cluster block payload: %w", err) + } + + // index the child block for recovery; without further overwrite checks (see above for explanation) + err = IndexNewClusterBlock(lctx, rw, blockID, proposal.Block.ParentID) + if err != nil { + return fmt.Errorf("could not index new cluster block block: %w", err) + } + return nil +} + +// RetrieveClusterBlock retrieves a cluster consensus block by block ID. +func RetrieveClusterBlock(r storage.Reader, blockID flow.Identifier, block *cluster.Block) error { + // retrieve the block header + var header flow.Header + err := RetrieveHeader(r, blockID, &header) + if err != nil { + return fmt.Errorf("could not retrieve cluster block header: %w", err) + } + + // retrieve payload + var payload cluster.Payload + err = RetrieveClusterPayload(r, blockID, &payload) + if err != nil { + return fmt.Errorf("could not retrieve cluster block payload: %w", err) + } + + // overwrite block + newBlock, err := cluster.NewBlock( + cluster.UntrustedBlock{ + HeaderBody: header.HeaderBody, + Payload: payload, + }, + ) + if err != nil { + return fmt.Errorf("could not build cluster block: %w", err) + } + *block = *newBlock + + return nil +} + +// RetrieveLatestFinalizedClusterHeader retrieves the latest finalized cluster block header from the specified cluster. +func RetrieveLatestFinalizedClusterHeader(r storage.Reader, clusterID flow.ChainID, final *flow.Header) error { + var latestFinalizedHeight uint64 + err := RetrieveClusterFinalizedHeight(r, clusterID, &latestFinalizedHeight) + if err != nil { + return fmt.Errorf("could not retrieve latest finalized cluster block height: %w", err) + } + + var finalID flow.Identifier + err = LookupClusterBlockHeight(r, clusterID, latestFinalizedHeight, &finalID) + if err != nil { + return fmt.Errorf("could not retrieve ID of latest finalized cluster block: %w", err) + } + + err = RetrieveHeader(r, finalID, final) + if err != nil { + return fmt.Errorf("could not retrieve header of latest finalized cluster block: %w", err) + } + return nil +} + +// FinalizeClusterBlock finalizes a block in cluster consensus. +func FinalizeClusterBlock(lctx lockctx.Proof, rw storage.ReaderBatchWriter, blockID flow.Identifier) error { + if !lctx.HoldsLock(storage.LockInsertOrFinalizeClusterBlock) { + return fmt.Errorf("missing required lock: %s", storage.LockInsertOrFinalizeClusterBlock) + } + + r := rw.GlobalReader() + // retrieve the header to check the parent + var header flow.Header + err := RetrieveHeader(r, blockID, &header) + if err != nil { + return fmt.Errorf("could not retrieve header: %w", err) + } + + // get the chain ID, which determines which cluster state to query + clusterID := header.ChainID + + // retrieve the latest finalized cluster block height + var latestFinalizedHeight uint64 + err = RetrieveClusterFinalizedHeight(r, clusterID, &latestFinalizedHeight) + if err != nil { + return fmt.Errorf("could not retrieve boundary: %w", err) + } + + // retrieve the ID of the latest finalized cluster block + var latestFinalizedBlockID flow.Identifier + err = LookupClusterBlockHeight(r, clusterID, latestFinalizedHeight, &latestFinalizedBlockID) + if err != nil { + return fmt.Errorf("could not retrieve head: %w", err) + } + + // sanity check: the previously latest finalized block is the parent of the block we are now finalizing + if header.ParentID != latestFinalizedBlockID { + return fmt.Errorf("can't finalize non-child of chain head") + } + + // index the block by its height + err = IndexClusterBlockHeight(lctx, rw, clusterID, header.Height, blockID) + if err != nil { + return fmt.Errorf("could not index cluster block height: %w", err) + } + + // update the finalized boundary + err = UpdateClusterFinalizedHeight(lctx, rw, clusterID, header.Height) + if err != nil { + return fmt.Errorf("could not update finalized boundary: %w", err) + } + + // NOTE: we don't want to prune forks that have become invalid here, so + // that we can keep validating entities and generating slashing + // challenges for some time - the pruning should happen some place else + // after a certain delay of blocks + + return nil +} + +// InsertClusterPayload inserts the payload for a cluster block. It inserts +// both the collection and all constituent transactions, allowing duplicates. +func InsertClusterPayload(lctx lockctx.Proof, rw storage.ReaderBatchWriter, blockID flow.Identifier, payload *cluster.Payload) error { + if !lctx.HoldsLock(storage.LockInsertOrFinalizeClusterBlock) { + return fmt.Errorf("missing required lock: %s", storage.LockInsertOrFinalizeClusterBlock) + } + + var txIDs []flow.Identifier + err := LookupCollectionPayload(rw.GlobalReader(), blockID, &txIDs) + if err == nil { + return fmt.Errorf("collection payload already exists for block %s: %w", blockID, storage.ErrAlreadyExists) + } + if err != storage.ErrNotFound { + return fmt.Errorf("unexpected error while attempting to retrieve collection payload: %w", err) + } + + // STEP 1: persist the collection and constituent transactions. + // A cluster payload essentially represents a single collection (batch of transactions) plus some auxilluary + // information. Storing the collection on its own allows us to also retrieve it independently of the cluster + // block's payload. We expect repeated requests to persist the same collection data here, because it is valid + // to propose the same collection in two competing forks. However, we don't have to worry about repeated calls, + // because collections and transactions are keyed by their respective content hashes. So a different value + // should produce a different key, making accidental overwrites with inconsistent values impossible. + // Here, we persist a reduced representation of the collection, only listing the constituent transactions by their hashes. + light := payload.Collection.Light() + writer := rw.Writer() + err = UpsertCollection(writer, light) // collection is keyed by content hash, hence no overwrite protection is needed + if err != nil { + return fmt.Errorf("could not insert payload collection: %w", err) + } + + // persist constituent transactions: + for _, colTx := range payload.Collection.Transactions { + err = UpsertTransaction(writer, colTx.ID(), colTx) // as transaction is keyed by content hash, hence no overwrite protection is needed + if err != nil { + return fmt.Errorf("could not insert payload transaction: %w", err) + } + } + + // STEP 2: for the cluster block ID, index the consistent transactions plus the auxilluary data from the playload. + // Caution: Here we use the cluster block's ID as key, which is *not* uniquely determined by the indexed data. + // Hence, we must ensure that we are not accidentally overwriting existing data (in case of a bug in the calling + // code) with different values. This is ensured by the initial check confirming that the collection payload + // has not yet been indexed (and the assumption that `IndexReferenceBlockByClusterBlock` is called nowhere else). + txIDs = light.Transactions + err = IndexCollectionPayload(lctx, writer, blockID, txIDs) + if err != nil { + return fmt.Errorf("could not index collection: %w", err) + } + + // insert the reference block ID + err = IndexReferenceBlockByClusterBlock(lctx, writer, blockID, payload.ReferenceBlockID) + if err != nil { + return fmt.Errorf("could not insert reference block ID: %w", err) + } + + return nil +} + +// RetrieveClusterPayload retrieves a cluster consensus block payload by block ID. +func RetrieveClusterPayload(r storage.Reader, blockID flow.Identifier, payload *cluster.Payload) error { + // lookup the reference block ID + var refID flow.Identifier + err := LookupReferenceBlockByClusterBlock(r, blockID, &refID) + if err != nil { + return fmt.Errorf("could not retrieve reference block ID: %w", err) + } + + // lookup collection transaction IDs + var txIDs []flow.Identifier + err = LookupCollectionPayload(r, blockID, &txIDs) + if err != nil { + return fmt.Errorf("could not look up collection payload: %w", err) + } + + colTransactions := make([]*flow.TransactionBody, 0, len(txIDs)) + // retrieve individual transactions + for _, txID := range txIDs { + var nextTx flow.TransactionBody + err = RetrieveTransaction(r, txID, &nextTx) + if err != nil { + return fmt.Errorf("could not retrieve transaction: %w", err) + } + colTransactions = append(colTransactions, &nextTx) + } + + collection, err := flow.NewCollection(flow.UntrustedCollection{Transactions: colTransactions}) + if err != nil { + return fmt.Errorf("could not build the collection from the transactions: %w", err) + } + newPayload, err := cluster.NewPayload( + cluster.UntrustedPayload{ + ReferenceBlockID: refID, + Collection: *collection, + }, + ) + if err != nil { + return fmt.Errorf("could not build the payload: %w", err) + } + *payload = *newPayload + + return nil +} diff --git a/storage/operation/cluster_test.go b/storage/operation/cluster_test.go new file mode 100644 index 00000000000..9cf4839b2af --- /dev/null +++ b/storage/operation/cluster_test.go @@ -0,0 +1,609 @@ +package operation_test + +import ( + "math/rand" + "testing" + + "github.com/jordanschalm/lockctx" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/model/cluster" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/storage" + "github.com/onflow/flow-go/storage/operation" + "github.com/onflow/flow-go/storage/operation/dbtest" + "github.com/onflow/flow-go/utils/unittest" +) + +func TestClusterHeights(t *testing.T) { + dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { + lockManager := storage.NewTestingLockManager() + var ( + clusterID flow.ChainID = "cluster" + height uint64 = 42 + expected = unittest.IdentifierFixture() + err error + ) + + t.Run("retrieve non-existent", func(t *testing.T) { + var actual flow.Identifier + err = operation.LookupClusterBlockHeight(db.Reader(), clusterID, height, &actual) + t.Log(err) + assert.ErrorIs(t, err, storage.ErrNotFound) + }) + + t.Run("insert/retrieve", func(t *testing.T) { + err := unittest.WithLock(t, lockManager, storage.LockInsertOrFinalizeClusterBlock, func(lctx lockctx.Context) error { + return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return operation.IndexClusterBlockHeight(lctx, rw, clusterID, height, expected) + }) + }) + require.NoError(t, err) + + var actual flow.Identifier + err = operation.LookupClusterBlockHeight(db.Reader(), clusterID, height, &actual) + assert.NoError(t, err) + assert.Equal(t, expected, actual) + }) + + t.Run("data mismatch error", func(t *testing.T) { + // Use a different cluster ID and height to avoid conflicts with other tests + testClusterID := flow.ChainID("test-cluster") + testHeight := uint64(999) + + // First index a block ID for the cluster and height + firstBlockID := unittest.IdentifierFixture() + err := unittest.WithLock(t, lockManager, storage.LockInsertOrFinalizeClusterBlock, func(lctx lockctx.Context) error { + return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return operation.IndexClusterBlockHeight(lctx, rw, testClusterID, testHeight, firstBlockID) + }) + }) + require.NoError(t, err) + + // Try to index a different block ID for the same cluster and height + differentBlockID := unittest.IdentifierFixture() + err = unittest.WithLock(t, lockManager, storage.LockInsertOrFinalizeClusterBlock, func(lctx lockctx.Context) error { + return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return operation.IndexClusterBlockHeight(lctx, rw, testClusterID, testHeight, differentBlockID) + }) + }) + + require.Error(t, err) + assert.ErrorIs(t, err, storage.ErrDataMismatch) + }) + + t.Run("multiple chain IDs", func(t *testing.T) { + // use different cluster ID but same block height + // - we first index *all* three blocks from different clusters for the same height + // - then we retrieve *all* three block IDs in a second step + // First writing all three is important to detect bugs, where the logic ignores the cluster ID + // and only memorizes the latest block stored for a given height (irrespective of cluster ID). + clusterBlockIDs := unittest.IdentifierListFixture(3) + clusterIDs := []flow.ChainID{"cluster-0", "cluster-1", "cluster-2"} + var actual flow.Identifier + for i := 0; i < len(clusterBlockIDs); i++ { + err = operation.LookupClusterBlockHeight(db.Reader(), clusterIDs[i], height, &actual) + assert.ErrorIs(t, err, storage.ErrNotFound) + + err := unittest.WithLock(t, lockManager, storage.LockInsertOrFinalizeClusterBlock, func(lctx lockctx.Context) error { + return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return operation.IndexClusterBlockHeight(lctx, rw, clusterIDs[i], height, clusterBlockIDs[i]) + }) + }) + require.NoError(t, err) + } + for i := 0; i < len(clusterBlockIDs); i++ { + err = operation.LookupClusterBlockHeight(db.Reader(), clusterIDs[i], height, &actual) + assert.NoError(t, err) + assert.Equal(t, clusterBlockIDs[i], actual) + } + }) + }) +} + +// Test_RetrieveClusterFinalizedHeight verifies proper retrieval of the latest finalized cluster block height. +func Test_RetrieveClusterFinalizedHeight(t *testing.T) { + dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { + lockManager := storage.NewTestingLockManager() + var ( + clusterID flow.ChainID = "cluster" + err error + ) + + t.Run("retrieve non-existent", func(t *testing.T) { + var actual uint64 + err = operation.RetrieveClusterFinalizedHeight(db.Reader(), clusterID, &actual) + t.Log(err) + assert.ErrorIs(t, err, storage.ErrNotFound) + }) + + t.Run("insert/retrieve", func(t *testing.T) { + + err := unittest.WithLock(t, lockManager, storage.LockInsertOrFinalizeClusterBlock, func(lctx lockctx.Context) error { + return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return operation.BootstrapClusterFinalizedHeight(lctx, rw, clusterID, 20) + }) + }) + require.NoError(t, err) + + err = unittest.WithLock(t, lockManager, storage.LockInsertOrFinalizeClusterBlock, func(lctx lockctx.Context) error { + return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return operation.UpdateClusterFinalizedHeight(lctx, rw, clusterID, 21) + }) + }) + require.NoError(t, err) + + err = unittest.WithLock(t, lockManager, storage.LockInsertOrFinalizeClusterBlock, func(lctx lockctx.Context) error { + return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return operation.UpdateClusterFinalizedHeight(lctx, rw, clusterID, 22) + }) + }) + require.NoError(t, err) + + var actual uint64 + err = operation.RetrieveClusterFinalizedHeight(db.Reader(), clusterID, &actual) + assert.NoError(t, err) + assert.Equal(t, uint64(22), actual) + }) + + t.Run("multiple chain IDs", func(t *testing.T) { + // persist latest finalized cluster block height for three different collector clusters + // - we first index *all* three latest finalized block heights from different clusters + // - then we retrieve all three latest finalized block heights in a second step + // First writing all three is important to detect bugs, where the logic ignores the cluster ID + // and only memorizes the last value stored (irrespective of cluster ID). + clusterFinalizedHeights := []uint64{117, 11, 791} + clusterIDs := []flow.ChainID{"cluster-0", "cluster-1", "cluster-2"} + var actual uint64 + for i := 0; i < len(clusterFinalizedHeights); i++ { + err = operation.RetrieveClusterFinalizedHeight(db.Reader(), clusterIDs[i], &actual) + assert.ErrorIs(t, err, storage.ErrNotFound) + + err := unittest.WithLock(t, lockManager, storage.LockInsertOrFinalizeClusterBlock, func(lctx lockctx.Context) error { + return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return operation.BootstrapClusterFinalizedHeight(lctx, rw, clusterIDs[i], clusterFinalizedHeights[i]) + }) + }) + require.NoError(t, err) + } + for i := 0; i < len(clusterFinalizedHeights); i++ { + err = operation.RetrieveClusterFinalizedHeight(db.Reader(), clusterIDs[i], &actual) + assert.NoError(t, err) + assert.Equal(t, clusterFinalizedHeights[i], actual) + } + }) + + t.Run("update to non-sequential finalized height returns error", func(t *testing.T) { + // Use a different cluster ID to avoid conflicts with other tests + testClusterID := flow.ChainID("test-cluster-non-sequential") + + // First bootstrap a cluster with height 20 + err := unittest.WithLock(t, lockManager, storage.LockInsertOrFinalizeClusterBlock, func(lctx lockctx.Context) error { + return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return operation.BootstrapClusterFinalizedHeight(lctx, rw, testClusterID, 20) + }) + }) + require.NoError(t, err) + + // Try to update to a non-sequential height (should fail) + err = unittest.WithLock(t, lockManager, storage.LockInsertOrFinalizeClusterBlock, func(lctx lockctx.Context) error { + return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return operation.UpdateClusterFinalizedHeight(lctx, rw, testClusterID, 25) // Should be 21, not 25 + }) + }) + require.Error(t, err) + assert.Contains(t, err.Error(), "finalization isn't sequential") + }) + + t.Run("bootstrap on non-empty key returns error", func(t *testing.T) { + // Use a different cluster ID to avoid conflicts with other tests + testClusterID := flow.ChainID("test-cluster-bootstrap-error") + + // First bootstrap a cluster with height 30 + err := unittest.WithLock(t, lockManager, storage.LockInsertOrFinalizeClusterBlock, func(lctx lockctx.Context) error { + return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return operation.BootstrapClusterFinalizedHeight(lctx, rw, testClusterID, 30) + }) + }) + require.NoError(t, err) + + // Try to bootstrap again (should fail) + err = unittest.WithLock(t, lockManager, storage.LockInsertOrFinalizeClusterBlock, func(lctx lockctx.Context) error { + return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return operation.BootstrapClusterFinalizedHeight(lctx, rw, testClusterID, 35) + }) + }) + require.Error(t, err) + assert.Contains(t, err.Error(), "finalized height for cluster") + assert.Contains(t, err.Error(), "already initialized") + }) + }) +} + +func TestClusterBlockByReferenceHeight(t *testing.T) { + t.Run("should be able to index cluster block by reference height", func(t *testing.T) { + dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { + lockManager := storage.NewTestingLockManager() + id := unittest.IdentifierFixture() + height := rand.Uint64() + err := unittest.WithLock(t, lockManager, storage.LockInsertOrFinalizeClusterBlock, func(lctx lockctx.Context) error { + return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return operation.IndexClusterBlockByReferenceHeight(lctx, rw.Writer(), height, id) + }) + }) + require.NoError(t, err) + + var retrieved []flow.Identifier + err = unittest.WithLock(t, lockManager, storage.LockInsertOrFinalizeClusterBlock, func(lctx lockctx.Context) error { + return operation.LookupClusterBlocksByReferenceHeightRange(lctx, db.Reader(), height, height, &retrieved) + }) + require.NoError(t, err) + require.Len(t, retrieved, 1) + assert.Equal(t, id, retrieved[0]) + }) + }) + + t.Run("should be able to index multiple cluster blocks at same reference height", func(t *testing.T) { + dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { + lockManager := storage.NewTestingLockManager() + ids := unittest.IdentifierListFixture(10) + height := rand.Uint64() + err := unittest.WithLock(t, lockManager, storage.LockInsertOrFinalizeClusterBlock, func(lctx lockctx.Context) error { + for _, id := range ids { + err := db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return operation.IndexClusterBlockByReferenceHeight(lctx, rw.Writer(), height, id) + }) + if err != nil { + return err + } + } + + return nil + }) + require.NoError(t, err) + + var retrieved []flow.Identifier + err = unittest.WithLock(t, lockManager, storage.LockInsertOrFinalizeClusterBlock, func(lctx lockctx.Context) error { + return operation.LookupClusterBlocksByReferenceHeightRange(lctx, db.Reader(), height, height, &retrieved) + }) + assert.NoError(t, err) + assert.Len(t, retrieved, len(ids)) + assert.ElementsMatch(t, ids, retrieved) + }) + }) + + t.Run("should be able to lookup cluster blocks across height range", func(t *testing.T) { + dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { + lockManager := storage.NewTestingLockManager() + ids := unittest.IdentifierListFixture(100) + nextHeight := rand.Uint64() + // keep track of height range + minHeight, maxHeight := nextHeight, nextHeight + // keep track of which ids are indexed at each nextHeight + lookup := make(map[uint64][]flow.Identifier) + err := unittest.WithLock(t, lockManager, storage.LockInsertOrFinalizeClusterBlock, func(lctx lockctx.Context) error { + for i := 0; i < len(ids); i++ { + // randomly adjust the nextHeight, increasing on average + r := rand.Intn(100) + if r < 20 { + nextHeight -= 1 // 20% probability + } else if r < 40 { + // 20% probability: nextHeight stays the same + } else if r < 80 { + nextHeight += 1 // 40% probability + } else { + nextHeight += 2 // 20% probability + } + + lookup[nextHeight] = append(lookup[nextHeight], ids[i]) + if nextHeight < minHeight { + minHeight = nextHeight + } + if nextHeight > maxHeight { + maxHeight = nextHeight + } + + err := db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return operation.IndexClusterBlockByReferenceHeight(lctx, rw.Writer(), nextHeight, ids[i]) + }) + assert.NoError(t, err) + } + return nil + }) + require.NoError(t, err) + + // determine which ids we expect to be retrieved for a given height range + idsInHeightRange := func(min, max uint64) []flow.Identifier { + var idsForHeight []flow.Identifier + for height, id := range lookup { + if min <= height && height <= max { + idsForHeight = append(idsForHeight, id...) + } + } + return idsForHeight + } + + // Test cases are described as follows: + // {---} represents the queried height range + // [---] represents the indexed height range + // [{ means the left endpoint of both ranges are the same + // {-[ means the left endpoint of the queried range is strictly less than the indexed range + t.Run("{-}--[-]", func(t *testing.T) { + var retrieved []flow.Identifier + err := unittest.WithLock(t, lockManager, storage.LockInsertOrFinalizeClusterBlock, func(lctx lockctx.Context) error { + return operation.LookupClusterBlocksByReferenceHeightRange(lctx, db.Reader(), minHeight-100, minHeight-1, &retrieved) + }) + require.NoError(t, err) + assert.Len(t, retrieved, 0) + }) + + t.Run("{-[--}-]", func(t *testing.T) { + var retrieved []flow.Identifier + min := minHeight - 100 + max := minHeight + (maxHeight-minHeight)/2 + err := unittest.WithLock(t, lockManager, storage.LockInsertOrFinalizeClusterBlock, func(lctx lockctx.Context) error { + return operation.LookupClusterBlocksByReferenceHeightRange(lctx, db.Reader(), min, max, &retrieved) + }) + require.NoError(t, err) + + expected := idsInHeightRange(min, max) + assert.NotEmpty(t, expected, "test assumption broken") + assert.Len(t, retrieved, len(expected)) + assert.ElementsMatch(t, expected, retrieved) + }) + t.Run("{[--}--]", func(t *testing.T) { + var retrieved []flow.Identifier + min := minHeight + max := minHeight + (maxHeight-minHeight)/2 + err := unittest.WithLock(t, lockManager, storage.LockInsertOrFinalizeClusterBlock, func(lctx lockctx.Context) error { + return operation.LookupClusterBlocksByReferenceHeightRange(lctx, db.Reader(), min, max, &retrieved) + + }) + require.NoError(t, err) + expected := idsInHeightRange(min, max) + assert.NotEmpty(t, expected, "test assumption broken") + assert.Len(t, retrieved, len(expected)) + assert.ElementsMatch(t, expected, retrieved) + }) + t.Run("[-{--}-]", func(t *testing.T) { + var retrieved []flow.Identifier + min := minHeight + 1 + max := maxHeight - 1 + err := unittest.WithLock(t, lockManager, storage.LockInsertOrFinalizeClusterBlock, func(lctx lockctx.Context) error { + return operation.LookupClusterBlocksByReferenceHeightRange(lctx, db.Reader(), min, max, &retrieved) + }) + require.NoError(t, err) + expected := idsInHeightRange(min, max) + assert.NotEmpty(t, expected, "test assumption broken") + assert.Len(t, retrieved, len(expected)) + assert.ElementsMatch(t, expected, retrieved) + }) + t.Run("[{----}]", func(t *testing.T) { + var retrieved []flow.Identifier + err = unittest.WithLock(t, lockManager, storage.LockInsertOrFinalizeClusterBlock, func(lctx lockctx.Context) error { + return operation.LookupClusterBlocksByReferenceHeightRange(lctx, db.Reader(), minHeight, maxHeight, &retrieved) + }) + require.NoError(t, err) + expected := idsInHeightRange(minHeight, maxHeight) + assert.NotEmpty(t, expected, "test assumption broken") + assert.Len(t, retrieved, len(expected)) + assert.ElementsMatch(t, expected, retrieved) + }) + t.Run("[--{--}]", func(t *testing.T) { + var retrieved []flow.Identifier + min := minHeight + (maxHeight-minHeight)/2 + max := maxHeight + err := unittest.WithLock(t, lockManager, storage.LockInsertOrFinalizeClusterBlock, func(lctx lockctx.Context) error { + return operation.LookupClusterBlocksByReferenceHeightRange(lctx, db.Reader(), min, max, &retrieved) + }) + require.NoError(t, err) + expected := idsInHeightRange(min, max) + assert.NotEmpty(t, expected, "test assumption broken") + assert.Len(t, retrieved, len(expected)) + assert.ElementsMatch(t, expected, retrieved) + }) + t.Run("[-{--]-}", func(t *testing.T) { + var retrieved []flow.Identifier + min := minHeight + (maxHeight-minHeight)/2 + max := maxHeight + 100 + err := unittest.WithLock(t, lockManager, storage.LockInsertOrFinalizeClusterBlock, func(lctx lockctx.Context) error { + return operation.LookupClusterBlocksByReferenceHeightRange(lctx, db.Reader(), min, max, &retrieved) + }) + require.NoError(t, err) + expected := idsInHeightRange(min, max) + assert.NotEmpty(t, expected, "test assumption broken") + assert.Len(t, retrieved, len(expected)) + assert.ElementsMatch(t, expected, retrieved) + }) + t.Run("[-]--{-}", func(t *testing.T) { + var retrieved []flow.Identifier + err := unittest.WithLock(t, lockManager, storage.LockInsertOrFinalizeClusterBlock, func(lctx lockctx.Context) error { + return operation.LookupClusterBlocksByReferenceHeightRange(lctx, db.Reader(), maxHeight+1, maxHeight+100, &retrieved) + }) + require.NoError(t, err) + assert.Len(t, retrieved, 0) + }) + }) + }) +} + +// expected average case # of blocks to lookup on Mainnet +func BenchmarkLookupClusterBlocksByReferenceHeightRange_1200(b *testing.B) { + benchmarkLookupClusterBlocksByReferenceHeightRange(b, 1200) +} + +// 5x average case on Mainnet +func BenchmarkLookupClusterBlocksByReferenceHeightRange_6_000(b *testing.B) { + benchmarkLookupClusterBlocksByReferenceHeightRange(b, 6_000) +} + +func BenchmarkLookupClusterBlocksByReferenceHeightRange_100_000(b *testing.B) { + benchmarkLookupClusterBlocksByReferenceHeightRange(b, 100_000) +} + +func benchmarkLookupClusterBlocksByReferenceHeightRange(b *testing.B, n int) { + lockManager := storage.NewTestingLockManager() + dbtest.BenchWithStorages(b, func(b *testing.B, r storage.Reader, wr dbtest.WithWriter) { + for i := 0; i < n; i++ { + err := unittest.WithLock(b, lockManager, storage.LockInsertOrFinalizeClusterBlock, func(lctx lockctx.Context) error { + return wr(func(w storage.Writer) error { + return operation.IndexClusterBlockByReferenceHeight(lctx, w, rand.Uint64()%1000, unittest.IdentifierFixture()) + }) + }) + require.NoError(b, err) + } + + b.ResetTimer() + for i := 0; i < b.N; i++ { + var blockIDs []flow.Identifier + err := unittest.WithLock(b, lockManager, storage.LockInsertOrFinalizeClusterBlock, func(lctx lockctx.Context) error { + return operation.LookupClusterBlocksByReferenceHeightRange(lctx, r, 0, 1000, &blockIDs) + }) + require.NoError(b, err) + } + }) +} + +func TestInsertRetrieveClusterBlock(t *testing.T) { + dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { + block := unittest.ClusterBlockFixture() + + lockManager := storage.NewTestingLockManager() + err := unittest.WithLock(t, lockManager, storage.LockInsertOrFinalizeClusterBlock, func(lctx lockctx.Context) error { + return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return operation.InsertClusterBlock(lctx, rw, unittest.ClusterProposalFromBlock(block)) + }) + }) + require.NoError(t, err) + + var retrieved cluster.Block + err = operation.RetrieveClusterBlock(db.Reader(), block.ID(), &retrieved) + require.NoError(t, err) + + require.Equal(t, *block, retrieved) + }) +} + +func TestFinalizeClusterBlock(t *testing.T) { + dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { + parent := unittest.ClusterBlockFixture() + block := unittest.ClusterBlockFixture( + unittest.ClusterBlock.WithParent(parent), + ) + + lockManager := storage.NewTestingLockManager() + err := unittest.WithLock(t, lockManager, storage.LockInsertOrFinalizeClusterBlock, func(lctx lockctx.Context) error { + require.NoError(t, db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return operation.InsertClusterBlock(lctx, rw, unittest.ClusterProposalFromBlock(parent)) + })) + + // index parent as latest finalized block (manually writing respective indexes like in bootstrapping to skip transitive consistency checks) + require.NoError(t, db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return operation.IndexClusterBlockHeight(lctx, rw, block.ChainID, parent.Height, parent.ID()) + })) + require.NoError(t, db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return operation.BootstrapClusterFinalizedHeight(lctx, rw, block.ChainID, parent.Height) + })) + + // Insert new block and verify `FinalizeClusterBlock` procedure accepts it + require.NoError(t, db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return operation.InsertClusterBlock(lctx, rw, unittest.ClusterProposalFromBlock(block)) + })) + return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return operation.FinalizeClusterBlock(lctx, rw, block.ID()) + }) + }) + require.NoError(t, err) + + // verify that the new block as been properly indexed as the latest finalized + var latestFinalizedHeight uint64 + err = operation.RetrieveClusterFinalizedHeight(db.Reader(), block.ChainID, &latestFinalizedHeight) + require.NoError(t, err) + require.Equal(t, block.Height, latestFinalizedHeight) + + var headID flow.Identifier + err = operation.LookupClusterBlockHeight(db.Reader(), block.ChainID, latestFinalizedHeight, &headID) + require.NoError(t, err) + require.Equal(t, block.ID(), headID) + }) +} + +// TestDisconnectedFinalizedBlock verifies that finalization logic rejects finalizing a block whose parent is not the latest finalized block. +func TestDisconnectedFinalizedBlock(t *testing.T) { + lockManager := storage.NewTestingLockManager() + + t.Run("finalizing C should fail because B is not yet finalized", func(t *testing.T) { + dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { + err := unittest.WithLock(t, lockManager, storage.LockInsertOrFinalizeClusterBlock, func(lctx lockctx.Context) error { + _, _, blockC, _ := constructState(t, db, lctx) + return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return operation.FinalizeClusterBlock(lctx, rw, blockC.ID()) + }) + }) + require.Error(t, err) + require.NotErrorIs(t, err, storage.ErrAlreadyExists) + }) + }) + + t.Run("finalizing B and then C should succeed", func(t *testing.T) { + dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { + err := unittest.WithLock(t, lockManager, storage.LockInsertOrFinalizeClusterBlock, func(lctx lockctx.Context) error { + _, blockB, blockC, _ := constructState(t, db, lctx) + require.NoError(t, db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return operation.FinalizeClusterBlock(lctx, rw, blockB.ID()) + })) + return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return operation.FinalizeClusterBlock(lctx, rw, blockC.ID()) + }) + }) + require.NoError(t, err) + }) + }) + + t.Run("finalizing B and then D should fail, because B is not the parent of D", func(t *testing.T) { + dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { + err := unittest.WithLock(t, lockManager, storage.LockInsertOrFinalizeClusterBlock, func(lctx lockctx.Context) error { + _, blockB, _, blockD := constructState(t, db, lctx) + require.NoError(t, db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return operation.FinalizeClusterBlock(lctx, rw, blockB.ID()) + })) + return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return operation.FinalizeClusterBlock(lctx, rw, blockD.ID()) + }) + + }) + require.Error(t, err) + require.NotErrorIs(t, err, storage.ErrAlreadyExists) + }) + }) + +} + +// `constructState` initializes a stub of the following collector chain state: +// +// A ← B ← C +// ↖ D +func constructState(t *testing.T, db storage.DB, lctx lockctx.Proof) (blockA, blockB, blockC, blockD *cluster.Block) { + blockA = unittest.ClusterBlockFixture() // Create block A as the root + blockB = unittest.ClusterBlockFixture(unittest.ClusterBlock.WithParent(blockA)) // Create block B as a child of A + blockC = unittest.ClusterBlockFixture(unittest.ClusterBlock.WithParent(blockB)) // Create block C as a child of B + blockD = unittest.ClusterBlockFixture(unittest.ClusterBlock.WithParent(blockA)) // Create block D as a child of A (creating a fork) + + // Store all blocks + for _, b := range []*cluster.Block{blockA, blockB, blockC, blockD} { + require.NoError(t, db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return operation.InsertClusterBlock(lctx, rw, unittest.ClusterProposalFromBlock(b)) + })) + } + + // index `blockA` as latest finalized block (manually writing respective indexes like in bootstrapping to skip transitive consistency checks) + require.NoError(t, db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return operation.IndexClusterBlockHeight(lctx, rw, blockA.ChainID, blockA.Height, blockA.ID()) + })) + require.NoError(t, db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return operation.BootstrapClusterFinalizedHeight(lctx, rw, blockA.ChainID, blockA.Height) + })) + + return blockA, blockB, blockC, blockD +} diff --git a/storage/operation/codec.go b/storage/operation/codec.go new file mode 100644 index 00000000000..43dc4c37f7a --- /dev/null +++ b/storage/operation/codec.go @@ -0,0 +1,34 @@ +package operation + +import ( + "encoding/binary" + "fmt" + + "github.com/onflow/flow-go/model/flow" +) + +// EncodeKeyPart encodes a value to be used as a part of a key to be stored in storage. +func EncodeKeyPart(v interface{}) []byte { + switch i := v.(type) { + case uint8: + return []byte{i} + case uint32: + b := make([]byte, 4) + binary.BigEndian.PutUint32(b, i) + return b + case uint64: + b := make([]byte, 8) + binary.BigEndian.PutUint64(b, i) + return b + case string: + return []byte(i) + case flow.Role: + return []byte{byte(i)} + case flow.Identifier: + return i[:] + case flow.ChainID: + return []byte(i) + default: + panic(fmt.Sprintf("unsupported type to convert (%T)", v)) + } +} diff --git a/storage/operation/collections.go b/storage/operation/collections.go new file mode 100644 index 00000000000..b2d20c9f953 --- /dev/null +++ b/storage/operation/collections.go @@ -0,0 +1,130 @@ +package operation + +import ( + "fmt" + + "github.com/jordanschalm/lockctx" + + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/storage" +) + +// NOTE: These insert light collections, which only contain references +// to the constituent transactions. They do not modify transactions contained +// by the collections. + +// UpsertCollection inserts a [flow.LightCollection] into the storage, keyed by its ID. +// +// If the collection already exists, it will be overwritten. Note that here, the key (collection ID) is derived +// from the value (collection) via a collision-resistant hash function. Hence, unchecked overwrites pose no risk +// of data corruption, because for the same key, we expect the same value. +// +// No errors are expected during normal operation. +func UpsertCollection(w storage.Writer, collection *flow.LightCollection) error { + return UpsertByKey(w, MakePrefix(codeCollection, collection.ID()), collection) +} + +// RetrieveCollection retrieves a [flow.LightCollection] by its ID. +// +// Expected errors during normal operations: +// - [storage.ErrNotFound] if no collection with the specified ID is known. +func RetrieveCollection(r storage.Reader, collID flow.Identifier, collection *flow.LightCollection) error { + return RetrieveByKey(r, MakePrefix(codeCollection, collID), collection) +} + +// RemoveCollection removes a collection from the storage. +// CAUTION: this is for recovery purposes only, and should not be used during normal operations! +// It returns nil if the collection does not exist. +// No errors are expected during normal operation. +func RemoveCollection(w storage.Writer, collID flow.Identifier) error { + return RemoveByKey(w, MakePrefix(codeCollection, collID)) +} + +// IndexCollectionPayload populates the map from a cluster block ID to the batch of transactions it contains. +// +// CAUTION: +// - The caller must acquire the [storage.LockInsertOrFinalizeClusterBlock] and hold it until the database write has been +// committed. +// - OVERWRITES existing data (potential for data corruption): +// This method silently overrides existing data without any sanity checks whether data for the same key already exits. +// Note that the Flow protocol mandates that for a previously persisted key, the data is never changed to a different +// value. Changing data could cause the node to publish inconsistent data and to be slashed, or the protocol to be +// compromised as a whole. This method does not contain any safeguards to prevent such data corruption. The lock proof +// serves as a reminder that the CALLER is responsible to ensure that the DEDUPLICATION CHECK is done elsewhere +// ATOMICALLY with this write operation. +// +// No errors are expected during normal operation. +func IndexCollectionPayload(lctx lockctx.Proof, w storage.Writer, clusterBlockID flow.Identifier, txIDs []flow.Identifier) error { + if !lctx.HoldsLock(storage.LockInsertOrFinalizeClusterBlock) { + return fmt.Errorf("missing lock: %v", storage.LockInsertOrFinalizeClusterBlock) + } + return UpsertByKey(w, MakePrefix(codeIndexCollection, clusterBlockID), txIDs) +} + +// LookupCollectionPayload retrieves the list of transaction IDs that constitute the payload of the specified cluster block. +// For every known cluster block, this index should be populated. +// +// Expected errors during normal operations: +// - [storage.ErrNotFound] if `clusterBlockID` does not refer to a known cluster block +func LookupCollectionPayload(r storage.Reader, clusterBlockID flow.Identifier, txIDs *[]flow.Identifier) error { + return RetrieveByKey(r, MakePrefix(codeIndexCollection, clusterBlockID), txIDs) +} + +// RemoveCollectionPayloadIndices removes a collection id indexed by a block id. +// CAUTION: this is for recovery purposes only, and should not be used during normal operations! +// It returns nil if the collection does not exist. +// No errors are expected during normal operation. +func RemoveCollectionPayloadIndices(w storage.Writer, collID flow.Identifier) error { + return RemoveByKey(w, MakePrefix(codeIndexCollection, collID)) +} + +// IndexCollectionByTransaction indexes the given collection ID, keyed by the transaction ID. +// +// CAUTION: +// - The caller must acquire the [storage.LockInsertCollection] and hold it until the database write has been committed. +// - OVERWRITES existing data (potential for data corruption): +// This method silently overrides existing data without any sanity checks whether data for the same key already exits. +// Note that the Flow protocol mandates that for a previously persisted key, the data is never changed to a different +// value. Changing data could cause the node to publish inconsistent data and to be slashed, or the protocol to be +// compromised as a whole. This method does not contain any safeguards to prevent such data corruption. The lock proof +// serves as a reminder that the CALLER is responsible to ensure that the DEDUPLICATION CHECK is done elsewhere +// ATOMICALLY with this write operation. +// +// WARNING, this index is NOT BFT in its current form: +// Honest clusters ensure a transaction can only belong to one collection. However, in rare +// cases, the collector clusters can exceed byzantine thresholds -- making it possible to +// produce multiple finalized collections (aka guaranteed collections) containing the same +// transaction repeatedly. +// TODO: eventually we need to handle Byzantine clusters +// +// No errors are expected during normal operation. +func IndexCollectionByTransaction(lctx lockctx.Proof, w storage.Writer, txID flow.Identifier, collectionID flow.Identifier) error { + if !lctx.HoldsLock(storage.LockInsertCollection) { + return fmt.Errorf("missing lock: %v", storage.LockInsertCollection) + } + + return UpsertByKey(w, MakePrefix(codeIndexCollectionByTransaction, txID), collectionID) +} + +// LookupCollectionByTransaction retrieves the collection ID for the collection that contains the specified transaction. +// For every known transaction, this index should be populated. +// +// WARNING, this index is NOT BFT in its current form: +// Honest clusters ensure a transaction can only belong to one collection. However, in rare +// cases, the collector clusters can exceed byzantine thresholds -- making it possible to +// produce multiple finalized collections (aka guaranteed collections) containing the same +// transaction repeatedly. +// +// Expected errors during normal operations: +// - [storage.ErrNotFound] if `txID` does not refer to a known transaction +func LookupCollectionByTransaction(r storage.Reader, txID flow.Identifier, collectionID *flow.Identifier) error { + return RetrieveByKey(r, MakePrefix(codeIndexCollectionByTransaction, txID), collectionID) +} + +// RemoveCollectionByTransactionIndex removes an entry in the index from transaction ID to collection containing the transaction. +// CAUTION: this is for recovery purposes only, and should not be used during normal operations! +// It returns nil if the collection does not exist. +// No errors are expected during normal operation. +func RemoveCollectionTransactionIndices(w storage.Writer, txID flow.Identifier) error { + return RemoveByKey(w, MakePrefix(codeIndexCollectionByTransaction, txID)) +} diff --git a/storage/operation/collections_test.go b/storage/operation/collections_test.go new file mode 100644 index 00000000000..53f2ff46523 --- /dev/null +++ b/storage/operation/collections_test.go @@ -0,0 +1,97 @@ +package operation_test + +import ( + "testing" + + "github.com/jordanschalm/lockctx" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/storage" + "github.com/onflow/flow-go/storage/operation" + "github.com/onflow/flow-go/storage/operation/dbtest" + "github.com/onflow/flow-go/utils/unittest" +) + +func TestCollections(t *testing.T) { + dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { + expected := unittest.CollectionFixture(2).Light() + lockManager := storage.NewTestingLockManager() + + t.Run("Retrieve nonexistant", func(t *testing.T) { + var actual flow.LightCollection + err := operation.RetrieveCollection(db.Reader(), expected.ID(), &actual) + assert.Error(t, err) + assert.ErrorIs(t, err, storage.ErrNotFound) + }) + + t.Run("Save", func(t *testing.T) { + err := db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return operation.UpsertCollection(rw.Writer(), expected) + }) + require.NoError(t, err) + + actual := new(flow.LightCollection) + err = operation.RetrieveCollection(db.Reader(), expected.ID(), actual) + assert.NoError(t, err) + + assert.Equal(t, expected, actual) + }) + + t.Run("Remove", func(t *testing.T) { + err := db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return operation.RemoveCollection(rw.Writer(), expected.ID()) + }) + require.NoError(t, err) + + actual := new(flow.LightCollection) + err = operation.RetrieveCollection(db.Reader(), expected.ID(), actual) + assert.Error(t, err) + assert.ErrorIs(t, err, storage.ErrNotFound) + + // Remove again should not error + err = db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return operation.RemoveCollection(rw.Writer(), expected.ID()) + }) + require.NoError(t, err) + }) + + t.Run("Index and lookup", func(t *testing.T) { + expected := unittest.CollectionFixture(1).Light() + blockID := unittest.IdentifierFixture() + + err := unittest.WithLock(t, lockManager, storage.LockInsertOrFinalizeClusterBlock, func(lctx lockctx.Context) error { + return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + err := operation.UpsertCollection(rw.Writer(), expected) + assert.NoError(t, err) + return operation.IndexCollectionPayload(lctx, rw.Writer(), blockID, expected.Transactions) + }) + }) + require.NoError(t, err) + + actual := new(flow.LightCollection) + err = operation.LookupCollectionPayload(db.Reader(), blockID, &actual.Transactions) + assert.NoError(t, err) + assert.Equal(t, expected, actual) + }) + + t.Run("Index and lookup by transaction ID", func(t *testing.T) { + expected := unittest.IdentifierFixture() + transactionID := unittest.IdentifierFixture() + actual := flow.Identifier{} + + err := unittest.WithLock(t, lockManager, storage.LockInsertCollection, func(lctx lockctx.Context) error { + return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return operation.IndexCollectionByTransaction(lctx, rw.Writer(), transactionID, expected) + }) + }) + require.NoError(t, err) + + err = operation.LookupCollectionByTransaction(db.Reader(), transactionID, &actual) + assert.NoError(t, err) + + assert.Equal(t, expected, actual) + }) + }) +} diff --git a/storage/operation/commits.go b/storage/operation/commits.go new file mode 100644 index 00000000000..448a8543b6f --- /dev/null +++ b/storage/operation/commits.go @@ -0,0 +1,56 @@ +package operation + +import ( + "errors" + "fmt" + + "github.com/jordanschalm/lockctx" + + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/storage" +) + +// IndexStateCommitment indexes a state commitment by the block ID whose execution results in that state. +// The function ensures data integrity by first checking if a commitment already exists for the given block +// and rejecting overwrites with different values. This function is idempotent, i.e. repeated calls with the +// *initially* indexed value are no-ops. +// +// CAUTION: +// - Confirming that no value is already stored and the subsequent write must be atomic to prevent data corruption. +// The caller must acquire the [storage.LockInsertOwnReceipt] and hold it until the database write has been committed. +// +// Expected error returns during normal operations: +// - [storage.ErrDataMismatch] if a *different* state commitment is already indexed for the same block ID +func IndexStateCommitment(lctx lockctx.Proof, rw storage.ReaderBatchWriter, blockID flow.Identifier, commit flow.StateCommitment) error { + if !lctx.HoldsLock(storage.LockInsertOwnReceipt) { + return fmt.Errorf("cannot index state commitment without holding lock %s", storage.LockInsertOwnReceipt) + } + + var existingCommit flow.StateCommitment + err := LookupStateCommitment(rw.GlobalReader(), blockID, &existingCommit) // on happy path, i.e. nothing stored yet, we expect `storage.ErrNotFound` + if err == nil { // Value for this key already exists! Need to check for data mismatch: + if existingCommit == commit { + return nil // The commit already exists, no need to index again + } + return fmt.Errorf("commit for block %v already exists with different value, (existing: %v, new: %v), %w", blockID, existingCommit, commit, storage.ErrDataMismatch) + } else if !errors.Is(err, storage.ErrNotFound) { + return fmt.Errorf("could not check existing state commitment: %w", err) + } + + return UpsertByKey(rw.Writer(), MakePrefix(codeCommit, blockID), commit) +} + +// LookupStateCommitment retrieves a state commitment by the block ID whose execution results in that state. +// Expected error returns during normal operations: +// - [storage.ErrNotFound] if no state commitment is indexed for the specified block ID +func LookupStateCommitment(r storage.Reader, blockID flow.Identifier, commit *flow.StateCommitment) error { + return RetrieveByKey(r, MakePrefix(codeCommit, blockID), commit) +} + +// RemoveStateCommitment removes the state commitment by block ID +// CAUTION: this is for recovery purposes only, and should not be used during normal operations! +// It returns nil if no execution result for the given blockID was previously indexed. +// No errors are expected during normal operation. +func RemoveStateCommitment(w storage.Writer, blockID flow.Identifier) error { + return RemoveByKey(w, MakePrefix(codeCommit, blockID)) +} diff --git a/storage/operation/commits_test.go b/storage/operation/commits_test.go new file mode 100644 index 00000000000..90545dde3ca --- /dev/null +++ b/storage/operation/commits_test.go @@ -0,0 +1,34 @@ +package operation_test + +import ( + "testing" + + "github.com/jordanschalm/lockctx" + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/storage" + "github.com/onflow/flow-go/storage/operation" + "github.com/onflow/flow-go/storage/operation/dbtest" + "github.com/onflow/flow-go/utils/unittest" +) + +func TestStateCommitments(t *testing.T) { + dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { + lockManager := storage.NewTestingLockManager() + expected := unittest.StateCommitmentFixture() + id := unittest.IdentifierFixture() + + err := unittest.WithLock(t, lockManager, storage.LockInsertOwnReceipt, func(lctx lockctx.Context) error { + return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return operation.IndexStateCommitment(lctx, rw, id, expected) + }) + }) + require.NoError(t, err) + + var actual flow.StateCommitment + err = operation.LookupStateCommitment(db.Reader(), id, &actual) + require.Nil(t, err) + require.Equal(t, expected, actual) + }) +} diff --git a/storage/operation/computation_result.go b/storage/operation/computation_result.go new file mode 100644 index 00000000000..fae8a241fcb --- /dev/null +++ b/storage/operation/computation_result.go @@ -0,0 +1,44 @@ +package operation + +import ( + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/storage" +) + +// UpsertComputationResult upserts given existing instance of ComputationResult in local BadgerDB. +func UpsertComputationResultUploadStatus(w storage.Writer, blockID flow.Identifier, + wasUploadCompleted bool) error { + return UpsertByKey(w, MakePrefix(codeComputationResults, blockID), wasUploadCompleted) +} + +// RemoveComputationResult removes an instance of ComputationResult with given ID. +func RemoveComputationResultUploadStatus( + w storage.Writer, + blockID flow.Identifier) error { + return RemoveByKey(w, MakePrefix(codeComputationResults, blockID)) +} + +// GetComputationResult returns stored ComputationResult instance with given ID. +func GetComputationResultUploadStatus(r storage.Reader, blockID flow.Identifier, + wasUploadCompleted *bool) error { + return RetrieveByKey(r, MakePrefix(codeComputationResults, blockID), wasUploadCompleted) +} + +// GetBlockIDsByStatus returns all IDs of stored ComputationResult instances. +func GetBlockIDsByStatus(r storage.Reader, blockIDs *[]flow.Identifier, + targetUploadStatus bool) error { + iterationFunc := func(keyCopy []byte, getValue func(destVal any) error) (bail bool, err error) { + var wasUploadCompleted bool + err = getValue(&wasUploadCompleted) + if err != nil { + return true, err + } + + if wasUploadCompleted == targetUploadStatus { + *blockIDs = append(*blockIDs, flow.HashToID(keyCopy[1:])) + } + return false, nil + } + + return TraverseByPrefix(r, MakePrefix(codeComputationResults), iterationFunc, storage.DefaultIteratorOptions()) +} diff --git a/storage/operation/computation_result_test.go b/storage/operation/computation_result_test.go new file mode 100644 index 00000000000..752dab102b5 --- /dev/null +++ b/storage/operation/computation_result_test.go @@ -0,0 +1,114 @@ +package operation_test + +import ( + "reflect" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/engine/execution" + "github.com/onflow/flow-go/engine/execution/testutil" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/storage" + "github.com/onflow/flow-go/storage/operation" + "github.com/onflow/flow-go/storage/operation/dbtest" +) + +func TestUpsertAndRetrieveComputationResultUpdateStatus(t *testing.T) { + dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { + expected := testutil.ComputationResultFixture(t) + expectedId := expected.ExecutableBlock.BlockID() + + t.Run("Upsert ComputationResult", func(t *testing.T) { + // first upsert as false + testUploadStatusVal := false + + err := db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return operation.UpsertComputationResultUploadStatus(rw.Writer(), expectedId, testUploadStatusVal) + }) + require.NoError(t, err) + + var actualUploadStatus bool + err = operation.GetComputationResultUploadStatus(db.Reader(), expectedId, &actualUploadStatus) + require.NoError(t, err) + + assert.Equal(t, testUploadStatusVal, actualUploadStatus) + + // upsert to true + testUploadStatusVal = true + err = db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return operation.UpsertComputationResultUploadStatus(rw.Writer(), expectedId, testUploadStatusVal) + }) + require.NoError(t, err) + + // check if value is updated + err = operation.GetComputationResultUploadStatus(db.Reader(), expectedId, &actualUploadStatus) + require.NoError(t, err) + + assert.Equal(t, testUploadStatusVal, actualUploadStatus) + }) + }) +} + +func TestRemoveComputationResultUploadStatus(t *testing.T) { + dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { + expected := testutil.ComputationResultFixture(t) + expectedId := expected.ExecutableBlock.BlockID() + + t.Run("Remove ComputationResult", func(t *testing.T) { + testUploadStatusVal := true + + err := db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return operation.UpsertComputationResultUploadStatus(rw.Writer(), expectedId, testUploadStatusVal) + }) + require.NoError(t, err) + + var actualUploadStatus bool + err = operation.GetComputationResultUploadStatus(db.Reader(), expectedId, &actualUploadStatus) + require.NoError(t, err) + + assert.Equal(t, testUploadStatusVal, actualUploadStatus) + + err = db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return operation.RemoveComputationResultUploadStatus(rw.Writer(), expectedId) + }) + require.NoError(t, err) + + err = operation.GetComputationResultUploadStatus(db.Reader(), expectedId, &actualUploadStatus) + assert.NotNil(t, err) + }) + }) +} + +func TestListComputationResults(t *testing.T) { + dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { + expected := [...]*execution.ComputationResult{ + testutil.ComputationResultFixture(t), + testutil.ComputationResultFixture(t), + } + t.Run("List all ComputationResult with status True", func(t *testing.T) { + expectedIDs := make(map[string]bool, 0) + // Store a list of ComputationResult instances first + for _, cr := range expected { + expectedId := cr.ExecutableBlock.BlockID() + expectedIDs[expectedId.String()] = true + err := db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return operation.UpsertComputationResultUploadStatus(rw.Writer(), expectedId, true) + }) + require.NoError(t, err) + } + + // Get the list of IDs of stored ComputationResult + crIDs := make([]flow.Identifier, 0) + err := operation.GetBlockIDsByStatus(db.Reader(), &crIDs, true) + require.NoError(t, err) + crIDsStrMap := make(map[string]bool, 0) + for _, crID := range crIDs { + crIDsStrMap[crID.String()] = true + } + + assert.True(t, reflect.DeepEqual(crIDsStrMap, expectedIDs)) + }) + }) +} diff --git a/storage/operation/consume_progress.go b/storage/operation/consume_progress.go new file mode 100644 index 00000000000..177f9a79f30 --- /dev/null +++ b/storage/operation/consume_progress.go @@ -0,0 +1,15 @@ +package operation + +import ( + "github.com/onflow/flow-go/storage" +) + +// RetrieveProcessedIndex returns the processed index for a job consumer +func RetrieveProcessedIndex(r storage.Reader, jobName string, processed *uint64) error { + return RetrieveByKey(r, MakePrefix(codeJobConsumerProcessed, jobName), processed) +} + +// SetProcessedIndex updates the processed index for a job consumer with given index +func SetProcessedIndex(w storage.Writer, jobName string, processed uint64) error { + return UpsertByKey(w, MakePrefix(codeJobConsumerProcessed, jobName), processed) +} diff --git a/storage/operation/dbtest/helper.go b/storage/operation/dbtest/helper.go new file mode 100644 index 00000000000..81992a5af10 --- /dev/null +++ b/storage/operation/dbtest/helper.go @@ -0,0 +1,164 @@ +package dbtest + +import ( + "testing" + + "github.com/cockroachdb/pebble/v2" + "github.com/dgraph-io/badger/v2" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/storage" + "github.com/onflow/flow-go/storage/operation/badgerimpl" + "github.com/onflow/flow-go/storage/operation/pebbleimpl" + "github.com/onflow/flow-go/utils/unittest" +) + +// helper types and functions +type WithWriter func(func(storage.Writer) error) error + +func RunWithStorages(t *testing.T, fn func(*testing.T, storage.Reader, WithWriter)) { + RunWithBadger(t, fn) + RunWithPebble(t, fn) +} + +func RunWithDB(t *testing.T, fn func(*testing.T, storage.DB)) { + t.Run("BadgerStorage", func(t *testing.T) { + unittest.RunWithBadgerDB(t, func(db *badger.DB) { + fn(t, badgerimpl.ToDB(db)) + }) + }) + + t.Run("PebbleStorage", func(t *testing.T) { + unittest.RunWithPebbleDB(t, func(db *pebble.DB) { + fn(t, pebbleimpl.ToDB(db)) + }) + }) +} + +// RunFuncsWithNewPebbleDBHandle runs provided functions with +// new database handles of the same underlying database. +// Each provided function will receive a new (different) DB handle. +// This can be used to test database persistence. +func RunFuncsWithNewDBHandle(t *testing.T, fn ...func(*testing.T, storage.DB)) { + t.Run("BadgerStorage", func(t *testing.T) { + RunFuncsWithNewBadgerDBHandle(t, fn...) + }) + + t.Run("PebbleStorage", func(t *testing.T) { + RunFuncsWithNewPebbleDBHandle(t, fn...) + }) +} + +func RunWithBadger(t *testing.T, fn func(*testing.T, storage.Reader, WithWriter)) { + t.Run("BadgerStorage", func(t *testing.T) { + unittest.RunWithBadgerDB(t, runWithBadger(func(r storage.Reader, wr WithWriter) { + fn(t, r, wr) + })) + }) +} + +func RunWithPebble(t *testing.T, fn func(*testing.T, storage.Reader, WithWriter)) { + t.Run("PebbleStorage", func(t *testing.T) { + unittest.RunWithPebbleDB(t, runWithPebble(func(r storage.Reader, wr WithWriter) { + fn(t, r, wr) + })) + }) +} + +func RunWithPebbleDB(t *testing.T, opts *pebble.Options, fn func(*testing.T, storage.Reader, WithWriter, string, *pebble.DB)) { + t.Run("PebbleStorage", func(t *testing.T) { + unittest.RunWithTempDir(t, func(dir string) { + db, err := pebble.Open(dir, opts) + require.NoError(t, err) + defer func() { + require.NoError(t, db.Close()) + }() + + runWithPebble(func(r storage.Reader, w WithWriter) { + fn(t, r, w, dir, db) + })(db) + }) + }) +} + +func BenchWithStorages(t *testing.B, fn func(*testing.B, storage.Reader, WithWriter)) { + t.Run("BadgerStorage", func(t *testing.B) { + unittest.RunWithBadgerDB(t, runWithBadger(func(r storage.Reader, wr WithWriter) { + fn(t, r, wr) + })) + }) + + t.Run("PebbleStorage", func(t *testing.B) { + unittest.RunWithPebbleDB(t, runWithPebble(func(r storage.Reader, wr WithWriter) { + fn(t, r, wr) + })) + }) +} + +func runWithBadger(fn func(storage.Reader, WithWriter)) func(*badger.DB) { + return func(db *badger.DB) { + withWriter := func(writing func(storage.Writer) error) error { + return badgerimpl.ToDB(db).WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return writing(rw.Writer()) + }) + } + + reader := badgerimpl.ToReader(db) + fn(reader, withWriter) + } +} + +func runWithPebble(fn func(storage.Reader, WithWriter)) func(*pebble.DB) { + return func(db *pebble.DB) { + withWriter := func(writing func(storage.Writer) error) error { + return pebbleimpl.ToDB(db).WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return writing(rw.Writer()) + }) + } + + reader := pebbleimpl.ToReader(db) + fn(reader, withWriter) + } +} + +// RunFuncsWithNewBadgerDBHandle runs provided functions with +// new BadgerDB handles of the same underlying database. +// Each provided function will receive a new (different) DB handle. +// This can be used to test database persistence. +func RunFuncsWithNewBadgerDBHandle(t *testing.T, fs ...func(*testing.T, storage.DB)) { + unittest.RunWithTempDir(t, func(dir string) { + // Run provided functions with new DB handles of the same underlying database. + for _, f := range fs { + // Open BadgerDB + db := unittest.BadgerDB(t, dir) + + // Run provided function + f(t, badgerimpl.ToDB(db)) + + // Close BadgerDB + assert.NoError(t, db.Close()) + } + }) +} + +// RunFuncsWithNewPebbleDBHandle runs provided functions with +// new Pebble handles of the same underlying database. +// Each provided function will receive a new (different) DB handle. +// This can be used to test database persistence. +func RunFuncsWithNewPebbleDBHandle(t *testing.T, fs ...func(*testing.T, storage.DB)) { + unittest.RunWithTempDir(t, func(dir string) { + // Run provided f with new DB handle to test database persistence. + for _, f := range fs { + // Open Pebble + db, err := pebble.Open(dir, &pebble.Options{}) + require.NoError(t, err) + + // Call provided function + f(t, pebbleimpl.ToDB(db)) + + // Close Pebble + assert.NoError(t, db.Close()) + } + }) +} diff --git a/storage/operation/epoch.go b/storage/operation/epoch.go new file mode 100644 index 00000000000..1735fe2afce --- /dev/null +++ b/storage/operation/epoch.go @@ -0,0 +1,22 @@ +package operation + +import ( + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/storage" +) + +func InsertEpochSetup(w storage.Writer, eventID flow.Identifier, event *flow.EpochSetup) error { + return UpsertByKey(w, MakePrefix(codeEpochSetup, eventID), event) +} + +func RetrieveEpochSetup(r storage.Reader, eventID flow.Identifier, event *flow.EpochSetup) error { + return RetrieveByKey(r, MakePrefix(codeEpochSetup, eventID), event) +} + +func InsertEpochCommit(w storage.Writer, eventID flow.Identifier, event *flow.EpochCommit) error { + return UpsertByKey(w, MakePrefix(codeEpochCommit, eventID), event) +} + +func RetrieveEpochCommit(r storage.Reader, eventID flow.Identifier, event *flow.EpochCommit) error { + return RetrieveByKey(r, MakePrefix(codeEpochCommit, eventID), event) +} diff --git a/storage/operation/epoch_protocol_state.go b/storage/operation/epoch_protocol_state.go new file mode 100644 index 00000000000..a8cbe27a985 --- /dev/null +++ b/storage/operation/epoch_protocol_state.go @@ -0,0 +1,49 @@ +package operation + +import ( + "fmt" + + "github.com/jordanschalm/lockctx" + + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/storage" +) + +// InsertEpochProtocolState inserts an epoch protocol state entry by ID. +// Error returns: +// - generic error in case of unexpected failure from the database layer or encoding failure. +func InsertEpochProtocolState(w storage.Writer, entryID flow.Identifier, entry *flow.MinEpochStateEntry) error { + return UpsertByKey(w, MakePrefix(codeEpochProtocolState, entryID), entry) +} + +// RetrieveEpochProtocolState retrieves an epoch protocol state entry by ID. +// Error returns: +// - storage.ErrNotFound if the key does not exist in the database +func RetrieveEpochProtocolState(r storage.Reader, entryID flow.Identifier, entry *flow.MinEpochStateEntry) error { + return RetrieveByKey(r, MakePrefix(codeEpochProtocolState, entryID), entry) +} + +// IndexEpochProtocolState indexes an epoch protocol state entry by block ID. +// +// CAUTION: +// - The caller must acquire the lock [storage.LockInsertBlock] and hold it until the database write has been committed. +// - OVERWRITES existing data (potential for data corruption): +// The lock proof serves as a reminder that the CALLER is responsible to ensure that the DEDUPLICATION CHECK is done elsewhere +// ATOMICALLY within this write operation. Currently it's done by operation.InsertHeader where it performs a check +// to ensure the blockID is new, therefore any data indexed by this blockID is new as well. +// +// No error returns are expected during normal operation. +func IndexEpochProtocolState(lctx lockctx.Proof, w storage.Writer, blockID flow.Identifier, epochProtocolStateEntryID flow.Identifier) error { + if !lctx.HoldsLock(storage.LockInsertBlock) { + return fmt.Errorf("missing required lock: %s", storage.LockInsertBlock) + } + + return UpsertByKey(w, MakePrefix(codeEpochProtocolStateByBlockID, blockID), epochProtocolStateEntryID) +} + +// LookupEpochProtocolState finds an epoch protocol state entry ID by block ID. +// Error returns: +// - storage.ErrNotFound if the key does not exist in the database +func LookupEpochProtocolState(r storage.Reader, blockID flow.Identifier, epochProtocolStateEntryID *flow.Identifier) error { + return RetrieveByKey(r, MakePrefix(codeEpochProtocolStateByBlockID, blockID), epochProtocolStateEntryID) +} diff --git a/storage/operation/epoch_protocol_state_test.go b/storage/operation/epoch_protocol_state_test.go new file mode 100644 index 00000000000..44b5611e1fc --- /dev/null +++ b/storage/operation/epoch_protocol_state_test.go @@ -0,0 +1,101 @@ +package operation_test + +import ( + "testing" + + "github.com/jordanschalm/lockctx" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/storage" + "github.com/onflow/flow-go/storage/operation" + "github.com/onflow/flow-go/storage/operation/dbtest" + "github.com/onflow/flow-go/utils/unittest" +) + +// TestInsertProtocolState tests if basic badger operations on EpochProtocolState work as expected. +func TestInsertEpochProtocolState(t *testing.T) { + lockManager := storage.NewTestingLockManager() + dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { + expected := unittest.EpochStateFixture().MinEpochStateEntry + + epochProtocolStateEntryID := expected.ID() + err := db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return operation.InsertEpochProtocolState(rw.Writer(), epochProtocolStateEntryID, expected) + }) + require.NoError(t, err) + + var actual flow.MinEpochStateEntry + err = operation.RetrieveEpochProtocolState(db.Reader(), epochProtocolStateEntryID, &actual) + require.NoError(t, err) + + assert.Equal(t, expected, &actual) + + blockID := unittest.IdentifierFixture() + require.NoError(t, unittest.WithLock(t, lockManager, storage.LockInsertBlock, func(lctx lockctx.Context) error { + return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return operation.IndexEpochProtocolState(lctx, rw.Writer(), blockID, epochProtocolStateEntryID) + }) + })) + + var actualProtocolStateID flow.Identifier + err = operation.LookupEpochProtocolState(db.Reader(), blockID, &actualProtocolStateID) + require.NoError(t, err) + + assert.Equal(t, epochProtocolStateEntryID, actualProtocolStateID) + }) +} + +// TestIndexEpochProtocolStateLockValidation tests that IndexEpochProtocolState properly validates lock requirements. +func TestIndexEpochProtocolStateLockValidation(t *testing.T) { + lockManager := storage.NewTestingLockManager() + dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { + blockID := unittest.IdentifierFixture() + epochProtocolStateEntryID := unittest.IdentifierFixture() + + t.Run("should error when no lock is held", func(t *testing.T) { + // Create a context without any locks + lctx := lockManager.NewContext() + defer lctx.Release() + + err := db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return operation.IndexEpochProtocolState(lctx, rw.Writer(), blockID, epochProtocolStateEntryID) + }) + + require.Error(t, err) + assert.Contains(t, err.Error(), "missing required lock") + assert.Contains(t, err.Error(), storage.LockInsertBlock) + }) + + t.Run("should error when different lock is held", func(t *testing.T) { + // Acquire a different lock (not LockInsertBlock) + err := unittest.WithLock(t, lockManager, storage.LockFinalizeBlock, func(lctx lockctx.Context) error { + return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return operation.IndexEpochProtocolState(lctx, rw.Writer(), blockID, epochProtocolStateEntryID) + }) + }) + + require.Error(t, err) + assert.Contains(t, err.Error(), "missing required lock") + assert.Contains(t, err.Error(), storage.LockInsertBlock) + }) + + t.Run("should succeed when correct lock is held", func(t *testing.T) { + // This test case verifies that the function works correctly when the proper lock is held + err := unittest.WithLock(t, lockManager, storage.LockInsertBlock, func(lctx lockctx.Context) error { + return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return operation.IndexEpochProtocolState(lctx, rw.Writer(), blockID, epochProtocolStateEntryID) + }) + }) + + require.NoError(t, err) + + // Verify the indexing worked by looking up the entry + var actualProtocolStateID flow.Identifier + err = operation.LookupEpochProtocolState(db.Reader(), blockID, &actualProtocolStateID) + require.NoError(t, err) + assert.Equal(t, epochProtocolStateEntryID, actualProtocolStateID) + }) + }) +} diff --git a/storage/operation/events.go b/storage/operation/events.go new file mode 100644 index 00000000000..f07467fe6db --- /dev/null +++ b/storage/operation/events.go @@ -0,0 +1,75 @@ +package operation + +import ( + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/storage" +) + +func eventPrefix(prefix byte, blockID flow.Identifier, event flow.Event) []byte { + return MakePrefix(prefix, blockID, event.TransactionID, event.TransactionIndex, event.EventIndex) +} + +func InsertEvent(w storage.Writer, blockID flow.Identifier, event flow.Event) error { + return UpsertByKey(w, eventPrefix(codeEvent, blockID, event), event) +} + +func InsertServiceEvent(w storage.Writer, blockID flow.Identifier, event flow.Event) error { + return UpsertByKey(w, eventPrefix(codeServiceEvent, blockID, event), event) +} + +func RetrieveEvents(r storage.Reader, blockID flow.Identifier, transactionID flow.Identifier, events *[]flow.Event) error { + iterationFunc := eventIterationFunc(events) + return TraverseByPrefix(r, MakePrefix(codeEvent, blockID, transactionID), iterationFunc, storage.DefaultIteratorOptions()) +} + +func LookupEventsByBlockID(r storage.Reader, blockID flow.Identifier, events *[]flow.Event) error { + iterationFunc := eventIterationFunc(events) + return TraverseByPrefix(r, MakePrefix(codeEvent, blockID), iterationFunc, storage.DefaultIteratorOptions()) +} + +func LookupServiceEventsByBlockID(r storage.Reader, blockID flow.Identifier, events *[]flow.Event) error { + iterationFunc := eventIterationFunc(events) + return TraverseByPrefix(r, MakePrefix(codeServiceEvent, blockID), iterationFunc, storage.DefaultIteratorOptions()) +} + +func LookupEventsByBlockIDEventType(r storage.Reader, blockID flow.Identifier, eventType flow.EventType, events *[]flow.Event) error { + iterationFunc := eventFilterIterationFunc(events, eventType) + return TraverseByPrefix(r, MakePrefix(codeEvent, blockID), iterationFunc, storage.DefaultIteratorOptions()) +} + +func RemoveServiceEventsByBlockID(r storage.Reader, w storage.Writer, blockID flow.Identifier) error { + return RemoveByKeyPrefix(r, w, MakePrefix(codeServiceEvent, blockID)) +} + +func RemoveEventsByBlockID(r storage.Reader, w storage.Writer, blockID flow.Identifier) error { + return RemoveByKeyPrefix(r, w, MakePrefix(codeEvent, blockID)) +} + +// eventIterationFunc returns an in iteration function which returns all events found during traversal or iteration +func eventIterationFunc(events *[]flow.Event) IterationFunc { + return func(keyCopy []byte, getValue func(destVal any) error) (bail bool, err error) { + var event flow.Event + err = getValue(&event) + if err != nil { + return true, err + } + *events = append(*events, event) + return false, nil + } +} + +// eventFilterIterationFunc returns an iteration function which filters the result by the given event type in the handleFunc +func eventFilterIterationFunc(events *[]flow.Event, eventType flow.EventType) IterationFunc { + return func(keyCopy []byte, getValue func(destVal any) error) (bail bool, err error) { + var event flow.Event + err = getValue(&event) + if err != nil { + return true, err + } + // filter out all events not of type eventType + if event.Type == eventType { + *events = append(*events, event) + } + return false, nil + } +} diff --git a/storage/operation/events_test.go b/storage/operation/events_test.go new file mode 100644 index 00000000000..272ccea1410 --- /dev/null +++ b/storage/operation/events_test.go @@ -0,0 +1,137 @@ +package operation_test + +import ( + "bytes" + "testing" + + "golang.org/x/exp/slices" + + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/storage" + "github.com/onflow/flow-go/storage/operation" + "github.com/onflow/flow-go/storage/operation/dbtest" + "github.com/onflow/flow-go/utils/unittest" +) + +// TestRetrieveEventByBlockIDTxID tests event insertion, event retrieval by block id, block id and transaction id, +// and block id and event type +func TestRetrieveEventByBlockIDTxID(t *testing.T) { + dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { + + // create block ids, transaction ids and event types slices + blockIDs := []flow.Identifier{flow.HashToID([]byte{0x01}), flow.HashToID([]byte{0x02})} + txIDs := []flow.Identifier{flow.HashToID([]byte{0x11}), flow.HashToID([]byte{0x12})} + eTypes := []flow.EventType{flow.EventAccountCreated, flow.EventAccountUpdated} + + // create map of block id to event, tx id to event and event type to event + blockMap := make(map[string][]flow.Event) + txMap := make(map[string][]flow.Event) + typeMap := make(map[string][]flow.Event) + + // initialize the maps and the db + for _, b := range blockIDs { + + bEvents := make([]flow.Event, 0) + + // all blocks share the same transactions + for i, tx := range txIDs { + + tEvents := make([]flow.Event, 0) + + // create one event for each possible event type + for j, etype := range eTypes { + + eEvents := make([]flow.Event, 0) + + event := unittest.EventFixture( + unittest.Event.WithEventType(etype), + unittest.Event.WithTransactionIndex(uint32(i)), + unittest.Event.WithEventIndex(uint32(j)), + unittest.Event.WithTransactionID(tx), + ) + + // insert event into the db + err := db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return operation.InsertEvent(rw.Writer(), b, event) + }) + require.Nil(t, err) + + // update event arrays in the maps + bEvents = append(bEvents, event) + tEvents = append(tEvents, event) + eEvents = append(eEvents, event) + + key := b.String() + "_" + string(etype) + if _, ok := typeMap[key]; ok { + typeMap[key] = append(typeMap[key], eEvents...) + } else { + typeMap[key] = eEvents + } + } + txMap[b.String()+"_"+tx.String()] = tEvents + } + blockMap[b.String()] = bEvents + } + + assertFunc := func(err error, expected []flow.Event, actual []flow.Event) { + require.NoError(t, err) + sortEvent(expected) + sortEvent(actual) + require.Equal(t, expected, actual) + } + + t.Run("retrieve events by Block ID", func(t *testing.T) { + for _, b := range blockIDs { + var actualEvents = make([]flow.Event, 0) + + // lookup events by block id + err := operation.LookupEventsByBlockID(db.Reader(), b, &actualEvents) + + expectedEvents := blockMap[b.String()] + assertFunc(err, expectedEvents, actualEvents) + } + }) + + t.Run("retrieve events by block ID and transaction ID", func(t *testing.T) { + for _, b := range blockIDs { + for _, tid := range txIDs { + var actualEvents = make([]flow.Event, 0) + + //lookup events by block id and transaction id + err := operation.RetrieveEvents(db.Reader(), b, tid, &actualEvents) + + expectedEvents := txMap[b.String()+"_"+tid.String()] + assertFunc(err, expectedEvents, actualEvents) + } + } + }) + + t.Run("retrieve events by block ID and event type", func(t *testing.T) { + for _, b := range blockIDs { + for _, et := range eTypes { + var actualEvents = make([]flow.Event, 0) + + //lookup events by block id and transaction id + err := operation.LookupEventsByBlockIDEventType(db.Reader(), b, et, &actualEvents) + + expectedEvents := typeMap[b.String()+"_"+string(et)] + assertFunc(err, expectedEvents, actualEvents) + } + } + }) + }) +} + +// Event retrieval does not guarantee any order, +// Hence, we a sort the events for comparing the expected and actual events. +func sortEvent(events []flow.Event) { + slices.SortFunc(events, func(i, j flow.Event) int { + tComp := bytes.Compare(i.TransactionID[:], j.TransactionID[:]) + if tComp != 0 { + return tComp + } + return int(i.EventIndex) - int(j.EventIndex) + }) +} diff --git a/storage/operation/executed.go b/storage/operation/executed.go new file mode 100644 index 00000000000..f8f2c1d5d17 --- /dev/null +++ b/storage/operation/executed.go @@ -0,0 +1,44 @@ +package operation + +import ( + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/storage" +) + +// UpdateExecutedBlock updates the pointer to the Execution Node's OWN highest executed block. We +// overwrite the block ID of the most recently executed block, regardless of whether this block may +// later be orphaned or is already orphaned. +// +// ## Usage Context +// - The stored "last executed block" may reference a block on a fork that is later orphaned. +// - This is acceptable and expected: the index is intended for reporting execution metrics and +// for optimizing the loading of unexecuted blocks on node startup. +// - On startup, the Execution Node may use the latest executed block as a hint on where to +// restart the execution. It MUST traverse from the last executed in the direction of decreasing +// height. It will eventually reach a block with a finalized seal. From this block, the Execution +// Node should restart its execution and cover _all_ descendants (that are not orphaned). Thereby, +// we guarantee that even if the stored block is on a fork, we eventually also cover blocks +// are finalized or and the most recent still unfinalized blocks. +// - If the block referenced as "highest executed block" is not on the canonical chain, the Execution +// Node may (re-)execute some blocks unnecessarily, but this does not affect correctness. +// +// ## Limitations & Edge Cases +// - The value is not guaranteed to be on the finalized chain. +// - Forks of arbitrary length may occur; the stored block may be on any such fork. +// +// ## Correct Usage +// - Use for metrics (e.g., reporting latest executed block height). +// - Use for optimizing block execution on startup (as a performance hint). +// +// ## Incorrect Usage +// - Do not use as a source of truth for canonical chain state. +// - Do not disregard blocks with lower heights as not needing execution. +// +// See project documentation in `engine/execution/ingestion/loader/unexecuted_loader.go` for details on startup traversal logic. +func UpdateExecutedBlock(w storage.Writer, blockID flow.Identifier) error { + return UpsertByKey(w, MakePrefix(codeExecutedBlock), blockID) +} + +func RetrieveExecutedBlock(r storage.Reader, blockID *flow.Identifier) error { + return RetrieveByKey(r, MakePrefix(codeExecutedBlock), blockID) +} diff --git a/storage/operation/execution_fork_evidence.go b/storage/operation/execution_fork_evidence.go new file mode 100644 index 00000000000..bcaa8b6fdac --- /dev/null +++ b/storage/operation/execution_fork_evidence.go @@ -0,0 +1,55 @@ +package operation + +import ( + "fmt" + + "github.com/jordanschalm/lockctx" + + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/storage" +) + +// NOTE: the following functions have the same functionality as +// the corresponding BadgerDB-specific implementations in +// badger/operation/payload.go + +// HasExecutionForkEvidence checks if conflicting seals record exists in the database. +// No errors are expected during normal operations. +func HasExecutionForkEvidence(r storage.Reader) (bool, error) { + return KeyExists(r, MakePrefix(codeExecutionFork)) +} + +// RetrieveExecutionForkEvidence reads conflicting seals from the database. +// It returns `storage.ErrNotFound` error if no database record is present. +func RetrieveExecutionForkEvidence(r storage.Reader, conflictingSeals *[]*flow.IncorporatedResultSeal) error { + return RetrieveByKey(r, MakePrefix(codeExecutionFork), conflictingSeals) +} + +// RemoveExecutionForkEvidence deletes conflicting seals record from the database. +// No errors are expected during normal operations. +func RemoveExecutionForkEvidence(w storage.Writer) error { + return RemoveByKey(w, MakePrefix(codeExecutionFork)) +} + +// InsertExecutionForkEvidence upserts conflicting seals to the database. +// If a record already exists, it is NOT overwritten, the new record is ignored. +// The caller must hold the [storage.LockInsertExecutionForkEvidence] lock. +// No errors are expected during normal operations. +func InsertExecutionForkEvidence(lctx lockctx.Proof, rw storage.ReaderBatchWriter, conflictingSeals []*flow.IncorporatedResultSeal) error { + if !lctx.HoldsLock(storage.LockInsertExecutionForkEvidence) { + return fmt.Errorf("InsertExecutionForkEvidence requires LockInsertExecutionForkEvidence to be held") + } + key := MakePrefix(codeExecutionFork) + exist, err := KeyExists(rw.GlobalReader(), key) + if err != nil { + return fmt.Errorf("failed to check if execution fork evidence exists: %w", err) + } + + if exist { + // Some evidence about execution fork already stored; + // We only keep the first evidence => nothing more to do + return nil + } + + return UpsertByKey(rw.Writer(), key, conflictingSeals) +} diff --git a/storage/operation/execution_fork_evidence_test.go b/storage/operation/execution_fork_evidence_test.go new file mode 100644 index 00000000000..5d0b91d8e14 --- /dev/null +++ b/storage/operation/execution_fork_evidence_test.go @@ -0,0 +1,64 @@ +package operation_test + +import ( + "testing" + + "github.com/jordanschalm/lockctx" + + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/storage" + "github.com/onflow/flow-go/storage/operation" + "github.com/onflow/flow-go/storage/operation/dbtest" + "github.com/onflow/flow-go/utils/unittest" + + "github.com/stretchr/testify/require" +) + +func Test_ExecutionForkEvidenceOperations(t *testing.T) { + t.Run("Retrieving non-existing evidence should return 'storage.ErrNotFound'", func(t *testing.T) { + dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { + var conflictingSeals []*flow.IncorporatedResultSeal + err := operation.RetrieveExecutionForkEvidence(db.Reader(), &conflictingSeals) + require.ErrorIs(t, err, storage.ErrNotFound) + + exists, err := operation.HasExecutionForkEvidence(db.Reader()) + require.NoError(t, err) + require.False(t, exists) + }) + }) + + t.Run("Write evidence and retrieve", func(t *testing.T) { + dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { + lockManager := storage.NewTestingLockManager() + exists, err := operation.HasExecutionForkEvidence(db.Reader()) + require.NoError(t, err) + require.False(t, exists) + + block := unittest.BlockFixture() + + conflictingSeals := make([]*flow.IncorporatedResultSeal, 2) + for i := range len(conflictingSeals) { + conflictingSeals[i] = unittest.IncorporatedResultSeal.Fixture( + unittest.IncorporatedResultSeal.WithResult( + unittest.ExecutionResultFixture( + unittest.WithBlock(block)))) + } + + err = unittest.WithLock(t, lockManager, storage.LockInsertExecutionForkEvidence, func(lctx lockctx.Context) error { + return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return operation.InsertExecutionForkEvidence(lctx, rw, conflictingSeals) + }) + }) + require.NoError(t, err) + + var b []*flow.IncorporatedResultSeal + err = operation.RetrieveExecutionForkEvidence(db.Reader(), &b) + require.NoError(t, err) + require.Equal(t, conflictingSeals, b) + + exists, err = operation.HasExecutionForkEvidence(db.Reader()) + require.NoError(t, err) + require.True(t, exists) + }) + }) +} diff --git a/storage/operation/guarantees.go b/storage/operation/guarantees.go new file mode 100644 index 00000000000..c518d394b36 --- /dev/null +++ b/storage/operation/guarantees.go @@ -0,0 +1,101 @@ +package operation + +import ( + "errors" + "fmt" + + "github.com/jordanschalm/lockctx" + + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/storage" +) + +// InsertGuarantee inserts a collection guarantee by ID. +// +// CAUTION: The caller must ensure guaranteeID is a collision-resistant hash of the provided +// guarantee! This method silently overrides existing data, which is safe only if for the same +// key, we always write the same value. +// +// No errors expected during normal operations. +func InsertGuarantee(w storage.Writer, guaranteeID flow.Identifier, guarantee *flow.CollectionGuarantee) error { + return UpsertByKey(w, MakePrefix(codeGuarantee, guaranteeID), guarantee) +} + +// IndexGuarantee inserts a [flow.CollectionGuarantee] into the database, keyed by the collection ID. +// +// Expected errors during normal operations: +// - [storage.ErrDataMismatch] if a different [flow.CollectionGuarantee] has already been indexed for the given collection ID. +// - All other errors have to be treated as unexpected failures from the database layer. +func IndexGuarantee(lctx lockctx.Proof, rw storage.ReaderBatchWriter, collectionID flow.Identifier, guaranteeID flow.Identifier) error { + if !lctx.HoldsLock(storage.LockInsertBlock) { + return fmt.Errorf("cannot index guarantee for collectionID %v without holding lock %s", + collectionID, storage.LockInsertBlock) + } + + var storedGuaranteeID flow.Identifier + err := LookupGuarantee(rw.GlobalReader(), collectionID, &storedGuaranteeID) + if err == nil { + if storedGuaranteeID != guaranteeID { + return fmt.Errorf("new guarantee %x did not match already stored guarantee %x, for collection %x: %w", + guaranteeID, storedGuaranteeID, collectionID, storage.ErrDataMismatch) + } + return nil + } + if !errors.Is(err, storage.ErrNotFound) { + return fmt.Errorf("failed to retrieve existing guarantee for collection %x: %w", collectionID, err) + } + + return UpsertByKey(rw.Writer(), MakePrefix(codeGuaranteeByCollectionID, collectionID), guaranteeID) +} + +// RetrieveGuarantee retrieves a [flow.CollectionGuarantee] by the collection ID. +// For every collection that has been guaranteed, this data should be populated. +// +// Expected errors during normal operations: +// - [storage.ErrNotFound] if `collID` does not refer to a known guaranteed collection +// - All other errors have to be treated as unexpected failures from the database layer. +func RetrieveGuarantee(r storage.Reader, collID flow.Identifier, guarantee *flow.CollectionGuarantee) error { + return RetrieveByKey(r, MakePrefix(codeGuarantee, collID), guarantee) +} + +// LookupGuarantee finds collection guarantee ID by collection ID. +// Error returns: +// - [storage.ErrNotFound] if the key does not exist in the database +// - All other errors have to be treated as unexpected failures from the database layer. +func LookupGuarantee(r storage.Reader, collectionID flow.Identifier, guaranteeID *flow.Identifier) error { + return RetrieveByKey(r, MakePrefix(codeGuaranteeByCollectionID, collectionID), guaranteeID) +} + +// IndexPayloadGuarantees indexes the list of collection guarantees that were included in the specified block, +// keyed by the block ID. It produces a mapping from block ID to the list of collection guarantees contained in +// the block's payload. The collection guarantees are represented by their respective IDs. +// +// CAUTION: +// - The caller must acquire the [storage.LockInsertBlock] and hold it until the database write has been committed. +// - OVERWRITES existing data (potential for data corruption): +// This method silently overrides existing data without any sanity checks whether data for the same key already exits. +// Note that the Flow protocol mandates that for a previously persisted key, the data is never changed to a different +// value. Changing data could cause the node to publish inconsistent data and to be slashed, or the protocol to be +// compromised as a whole. This method does not contain any safeguards to prevent such data corruption. The lock proof +// serves as a reminder that the CALLER is responsible to ensure that the DEDUPLICATION CHECK is done elsewhere +// ATOMICALLY with this write operation. +// +// No errors expected during normal operations. +func IndexPayloadGuarantees(lctx lockctx.Proof, w storage.Writer, blockID flow.Identifier, guarIDs []flow.Identifier) error { + if !lctx.HoldsLock(storage.LockInsertBlock) { + return fmt.Errorf("cannot index guarantee for blockID %v without holding lock %s", + blockID, storage.LockInsertBlock) + } + + return UpsertByKey(w, MakePrefix(codePayloadGuarantees, blockID), guarIDs) +} + +// LookupPayloadGuarantees retrieves the list of guarantee IDs that were included in the payload +// of the specified block. For every known block (at or above the root block height), this index should be populated. +// +// Expected errors during normal operations: +// - [storage.ErrNotFound] if `blockID` does not refer to a known block +// - All other errors have to be treated as unexpected failures from the database layer. +func LookupPayloadGuarantees(r storage.Reader, blockID flow.Identifier, guarIDs *[]flow.Identifier) error { + return RetrieveByKey(r, MakePrefix(codePayloadGuarantees, blockID), guarIDs) +} diff --git a/storage/operation/guarantees_test.go b/storage/operation/guarantees_test.go new file mode 100644 index 00000000000..bc483196e89 --- /dev/null +++ b/storage/operation/guarantees_test.go @@ -0,0 +1,163 @@ +package operation_test + +import ( + "testing" + + "github.com/jordanschalm/lockctx" + "github.com/onflow/crypto" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/storage" + "github.com/onflow/flow-go/storage/operation" + "github.com/onflow/flow-go/storage/operation/dbtest" + "github.com/onflow/flow-go/utils/unittest" +) + +func TestGuaranteeInsertRetrieve(t *testing.T) { + dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { + g := unittest.CollectionGuaranteeFixture() + + lockManager := storage.NewTestingLockManager() + + err := unittest.WithLock(t, lockManager, storage.LockInsertBlock, func(lctx lockctx.Context) error { + return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return operation.InsertGuarantee(rw.Writer(), g.ID(), g) + }) + }) + require.NoError(t, err) + + var retrieved flow.CollectionGuarantee + err = operation.RetrieveGuarantee(db.Reader(), g.ID(), &retrieved) + require.NoError(t, err) + + assert.Equal(t, g, &retrieved) + }) +} + +func TestIndexGuaranteedCollectionByBlockHashInsertRetrieve(t *testing.T) { + dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { + blockID := flow.Identifier{0x10} + collID1 := flow.Identifier{0x01} + collID2 := flow.Identifier{0x02} + guarantees := []*flow.CollectionGuarantee{ + {CollectionID: collID1, Signature: crypto.Signature{0x10}}, + {CollectionID: collID2, Signature: crypto.Signature{0x20}}, + } + expected := flow.GetIDs(guarantees) + + lockManager := storage.NewTestingLockManager() + + err := unittest.WithLock(t, lockManager, storage.LockInsertBlock, func(lctx lockctx.Context) error { + return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + for _, guarantee := range guarantees { + if err := operation.InsertGuarantee(rw.Writer(), guarantee.ID(), guarantee); err != nil { + return err + } + } + return operation.IndexPayloadGuarantees(lctx, rw.Writer(), blockID, expected) + }) + }) + require.NoError(t, err) + var actual []flow.Identifier + err = operation.LookupPayloadGuarantees(db.Reader(), blockID, &actual) + require.NoError(t, err) + + assert.Equal(t, []flow.Identifier(expected), actual) + }) +} + +func TestIndexGuaranteedCollectionByBlockHashMultipleBlocks(t *testing.T) { + dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { + lockManager := storage.NewTestingLockManager() + blockID1 := flow.Identifier{0x10} + blockID2 := flow.Identifier{0x20} + collID1 := flow.Identifier{0x01} + collID2 := flow.Identifier{0x02} + collID3 := flow.Identifier{0x03} + collID4 := flow.Identifier{0x04} + set1 := []*flow.CollectionGuarantee{ + {CollectionID: collID1, Signature: crypto.Signature{0x1}}, + } + set2 := []*flow.CollectionGuarantee{ + {CollectionID: collID2, Signature: crypto.Signature{0x2}}, + {CollectionID: collID3, Signature: crypto.Signature{0x3}}, + {CollectionID: collID4, Signature: crypto.Signature{0x1}}, + } + ids1 := flow.GetIDs(set1) + ids2 := flow.GetIDs(set2) + + // insert block 1 + err := unittest.WithLock(t, lockManager, storage.LockInsertBlock, func(lctx lockctx.Context) error { + return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + for _, guarantee := range set1 { + if err := operation.InsertGuarantee(rw.Writer(), guarantee.ID(), guarantee); err != nil { + return err + } + } + return operation.IndexPayloadGuarantees(lctx, rw.Writer(), blockID1, ids1) + }) + }) + require.NoError(t, err) + + // insert block 2 + err = unittest.WithLock(t, lockManager, storage.LockInsertBlock, func(lctx lockctx.Context) error { + return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + for _, guarantee := range set2 { + if err := operation.InsertGuarantee(rw.Writer(), guarantee.ID(), guarantee); err != nil { + return err + } + } + return operation.IndexPayloadGuarantees(lctx, rw.Writer(), blockID2, ids2) + }) + }) + require.NoError(t, err) + + t.Run("should retrieve collections for block", func(t *testing.T) { + var actual1 []flow.Identifier + err := operation.LookupPayloadGuarantees(db.Reader(), blockID1, &actual1) + assert.NoError(t, err) + assert.ElementsMatch(t, []flow.Identifier(ids1), actual1) + + // get block 2 + var actual2 []flow.Identifier + err = operation.LookupPayloadGuarantees(db.Reader(), blockID2, &actual2) + assert.NoError(t, err) + assert.Equal(t, []flow.Identifier(ids2), actual2) + }) + }) +} + +func TestIndexGuaranteeDataMismatch(t *testing.T) { + dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { + collectionID := flow.Identifier{0x01} + guaranteeID1 := flow.Identifier{0x10} + guaranteeID2 := flow.Identifier{0x20} + + lockManager := storage.NewTestingLockManager() + + // First, index a guarantee for the collection + err := unittest.WithLock(t, lockManager, storage.LockInsertBlock, func(lctx lockctx.Context) error { + return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return operation.IndexGuarantee(lctx, rw, collectionID, guaranteeID1) + }) + }) + require.NoError(t, err) + + // Now try to index a different guarantee ID for the same collection + // This should return storage.ErrDataMismatch + err = unittest.WithLock(t, lockManager, storage.LockInsertBlock, func(lctx lockctx.Context) error { + return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return operation.IndexGuarantee(lctx, rw, collectionID, guaranteeID2) + }) + }) + require.ErrorIs(t, err, storage.ErrDataMismatch) + + // Verify that the original guarantee ID is still stored + var retrievedGuaranteeID flow.Identifier + err = operation.LookupGuarantee(db.Reader(), collectionID, &retrievedGuaranteeID) + require.NoError(t, err) + assert.Equal(t, guaranteeID1, retrievedGuaranteeID) + }) +} diff --git a/storage/operation/headers.go b/storage/operation/headers.go new file mode 100644 index 00000000000..5036c08aa8f --- /dev/null +++ b/storage/operation/headers.go @@ -0,0 +1,172 @@ +package operation + +import ( + "errors" + "fmt" + + "github.com/jordanschalm/lockctx" + + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/storage" +) + +// InsertHeader inserts a block header into the database. +// +// CAUTION: +// - The caller must ensure that headerID is a collision-resistant hash of the provided header! +// Otherwise, data corruption may occur. +// - The caller must acquire one (but not both) of the following locks and hold it until the database +// write has been committed: either [storage.LockInsertBlock] or [storage.LockInsertOrFinalizeClusterBlock]. +// +// It returns [storage.ErrAlreadyExists] if the header already exists, i.e. we only insert a new header once. +// This error allows the caller to detect duplicate inserts. If the header is stored along with other parts +// of the block in the same batch, similar duplication checks can be skipped for storing other parts of the block. +// No other errors are expected during normal operation. +func InsertHeader(lctx lockctx.Proof, rw storage.ReaderBatchWriter, headerID flow.Identifier, header *flow.Header) error { + held := lctx.HoldsLock(storage.LockInsertBlock) || lctx.HoldsLock(storage.LockInsertOrFinalizeClusterBlock) + if !held { + return fmt.Errorf("missing required lock: %s or %s", storage.LockInsertBlock, storage.LockInsertOrFinalizeClusterBlock) + } + + key := MakePrefix(codeHeader, headerID) + exist, err := KeyExists(rw.GlobalReader(), key) + if err != nil { + return err + } + if exist { + return fmt.Errorf("header already exists: %w", storage.ErrAlreadyExists) + } + + return UpsertByKey(rw.Writer(), key, header) +} + +// RetrieveHeader retrieves the header of the block with the specified ID. +// Expected errors during normal operations: +// - [storage.ErrNotFound] if no block with the specified `blockID` is known. +// - generic error in case of unexpected failure from the database layer +func RetrieveHeader(r storage.Reader, blockID flow.Identifier, header *flow.Header) error { + return RetrieveByKey(r, MakePrefix(codeHeader, blockID), header) +} + +// IndexFinalizedBlockByHeight indexes a block by its height. It must ONLY be called on FINALIZED BLOCKS. +// +// CAUTION: The caller must acquire the [storage.LockFinalizeBlock] and hold it until the database +// write has been committed. +// +// This function guarantees that the index is only inserted once for each height. We return +// [storage.ErrAlreadyExists] if an entry for the given height already exists in the database. +// No other errors are expected during normal operation. +func IndexFinalizedBlockByHeight(lctx lockctx.Proof, rw storage.ReaderBatchWriter, height uint64, blockID flow.Identifier) error { + if !lctx.HoldsLock(storage.LockFinalizeBlock) { + return fmt.Errorf("missing required lock: %s", storage.LockFinalizeBlock) + } + + var existingID flow.Identifier + key := MakePrefix(codeHeightToBlock, height) + err := RetrieveByKey(rw.GlobalReader(), key, &existingID) + if err == nil { + return fmt.Errorf("block ID already exists for height %d with existing ID %v, cannot reindex with blockID %v: %w", + height, existingID, blockID, storage.ErrAlreadyExists) + } + if !errors.Is(err, storage.ErrNotFound) { + return fmt.Errorf("failed to check existing block ID for height %d: %w", height, err) + } + + return UpsertByKey(rw.Writer(), key, blockID) +} + +// IndexCertifiedBlockByView indexes a CERTIFIED block by its view. +// HotStuff guarantees that there is at most one certified block per view. Note that this does not hold +// for uncertified proposals, as a byzantine leader might produce multiple proposals for the same view. +// +// CAUTION: The caller must acquire the [storage.LockInsertBlock] and hold it until the database write +// has been committed. +// +// Hence, only certified blocks (i.e. blocks that have received a QC) can be indexed! +// Returns [storage.ErrAlreadyExists] if an ID has already been finalized for this view. +// No other errors are expected during normal operation. +func IndexCertifiedBlockByView(lctx lockctx.Proof, rw storage.ReaderBatchWriter, view uint64, blockID flow.Identifier) error { + if !lctx.HoldsLock(storage.LockInsertBlock) { + return fmt.Errorf("missing required lock: %s", storage.LockInsertBlock) + } + + var existingID flow.Identifier + key := MakePrefix(codeCertifiedBlockByView, view) + err := RetrieveByKey(rw.GlobalReader(), key, &existingID) + if err == nil { + return fmt.Errorf("block ID already exists for view %d with existingID %v, cannot reindex with blockID %v: %w", + view, existingID, blockID, storage.ErrAlreadyExists) + } + if !errors.Is(err, storage.ErrNotFound) { + return fmt.Errorf("failed to check existing block ID for view %d: %w", view, err) + } + + return UpsertByKey(rw.Writer(), key, blockID) +} + +// LookupBlockHeight retrieves finalized blocks by height. +// Expected errors during normal operations: +// - [storage.ErrNotFound] if no finalized block for the specified height is known. +func LookupBlockHeight(r storage.Reader, height uint64, blockID *flow.Identifier) error { + return RetrieveByKey(r, MakePrefix(codeHeightToBlock, height), blockID) +} + +// LookupCertifiedBlockByView retrieves the certified block by view. (Certified blocks are blocks that have received QC.) +// Expected errors during normal operations: +// - [storage.ErrNotFound] if no certified block for the specified view is known. +func LookupCertifiedBlockByView(r storage.Reader, view uint64, blockID *flow.Identifier) error { + return RetrieveByKey(r, MakePrefix(codeCertifiedBlockByView, view), blockID) +} + +// BlockExists checks whether the block exists in the database. +// No errors are expected during normal operation. +func BlockExists(r storage.Reader, blockID flow.Identifier) (bool, error) { + return KeyExists(r, MakePrefix(codeHeader, blockID)) +} + +// IndexBlockContainingCollectionGuarantee produces a mapping from the ID of a [flow.CollectionGuarantee] to the block ID containing this guarantee. +// +// CAUTION: +// - The caller must acquire the lock ??? and hold it until the database write has been committed. +// TODO: USE LOCK, we want to protect this mapping from accidental overwrites (because the key is not derived from the value via a collision-resistant hash) +// - A collection can be included in multiple *unfinalized* blocks. However, the implementation +// assumes a one-to-one map from collection ID to a *single* block ID. This holds for FINALIZED BLOCKS ONLY +// *and* only in the ABSENCE of BYZANTINE collector CLUSTERS (which the mature protocol must tolerate). +// Hence, this function should be treated as a temporary solution, which requires generalization +// (one-to-many mapping) for soft finality and the mature protocol. +// +// Expected errors during normal operations: +// TODO: return [storage.ErrAlreadyExists] or [storage.ErrDataMismatch] +func IndexBlockContainingCollectionGuarantee(w storage.Writer, collID flow.Identifier, blockID flow.Identifier) error { + return UpsertByKey(w, MakePrefix(codeCollectionBlock, collID), blockID) +} + +// LookupBlockContainingCollectionGuarantee retrieves the block containing the [flow.CollectionGuarantee] with the given ID. +// +// CAUTION: A collection can be included in multiple *unfinalized* blocks. However, the implementation +// assumes a one-to-one map from collection ID to a *single* block ID. This holds for FINALIZED BLOCKS ONLY +// *and* only in the ABSENCE of BYZANTINE collector CLUSTERS (which the mature protocol must tolerate). +// Hence, this function should be treated as a temporary solution, which requires generalization +// (one-to-many mapping) for soft finality and the mature protocol. +// +// Expected errors during normal operations: +// - [storage.ErrNotFound] if no block is known that contains the specified collection ID. +func LookupBlockContainingCollectionGuarantee(r storage.Reader, collID flow.Identifier, blockID *flow.Identifier) error { + return RetrieveByKey(r, MakePrefix(codeCollectionBlock, collID), blockID) +} + +// FindHeaders iterates through all headers, calling `filter` on each, and adding +// them to the `found` slice if `filter` returned true +func FindHeaders(r storage.Reader, filter func(header *flow.Header) bool, found *[]flow.Header) error { + return TraverseByPrefix(r, MakePrefix(codeHeader), func(key []byte, getValue func(destVal any) error) (bail bool, err error) { + var h flow.Header + err = getValue(&h) + if err != nil { + return true, err + } + if filter(&h) { + *found = append(*found, h) + } + return false, nil + }, storage.DefaultIteratorOptions()) +} diff --git a/storage/operation/headers_test.go b/storage/operation/headers_test.go new file mode 100644 index 00000000000..9ec7b8b8644 --- /dev/null +++ b/storage/operation/headers_test.go @@ -0,0 +1,112 @@ +package operation_test + +import ( + "testing" + "time" + + "github.com/jordanschalm/lockctx" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/storage" + "github.com/onflow/flow-go/storage/operation" + "github.com/onflow/flow-go/storage/operation/dbtest" + "github.com/onflow/flow-go/utils/unittest" +) + +func TestHeaderInsertCheckRetrieve(t *testing.T) { + dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { + expected := &flow.Header{ + HeaderBody: flow.HeaderBody{ + View: 1337, + Timestamp: uint64(time.Now().UnixMilli()), + ParentID: flow.Identifier{0x11}, + ParentVoterIndices: []byte{0x44}, + ParentVoterSigData: []byte{0x88}, + ProposerID: flow.Identifier{0x33}, + }, + PayloadHash: flow.Identifier{0x22}, + } + blockID := expected.ID() + + lockManager := storage.NewTestingLockManager() + + err := unittest.WithLock(t, lockManager, storage.LockInsertBlock, func(lctx lockctx.Context) error { + return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return operation.InsertHeader(lctx, rw, expected.ID(), expected) + }) + }) + require.NoError(t, err) + + var actual flow.Header + err = operation.RetrieveHeader(db.Reader(), blockID, &actual) + require.NoError(t, err) + + assert.Equal(t, *expected, actual) + }) +} + +func TestHeaderIDIndexByCollectionID(t *testing.T) { + dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { + + headerID := unittest.IdentifierFixture() + collectionGuaranteeID := unittest.IdentifierFixture() + + err := db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return operation.IndexBlockContainingCollectionGuarantee(rw.Writer(), collectionGuaranteeID, headerID) + }) + require.NoError(t, err) + + actualID := &flow.Identifier{} + err = operation.LookupBlockContainingCollectionGuarantee(db.Reader(), collectionGuaranteeID, actualID) + require.NoError(t, err) + assert.Equal(t, headerID, *actualID) + }) +} + +func TestBlockHeightIndexLookup(t *testing.T) { + dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { + + height := uint64(1337) + expected := flow.Identifier{0x01, 0x02, 0x03} + + lockManager := storage.NewTestingLockManager() + + err := unittest.WithLock(t, lockManager, storage.LockFinalizeBlock, func(lctx lockctx.Context) error { + return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return operation.IndexFinalizedBlockByHeight(lctx, rw, height, expected) + }) + }) + require.NoError(t, err) + + var actual flow.Identifier + err = operation.LookupBlockHeight(db.Reader(), height, &actual) + require.NoError(t, err) + + assert.Equal(t, expected, actual) + }) +} + +func TestBlockViewIndexLookup(t *testing.T) { + dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { + + view := uint64(1337) + expected := flow.Identifier{0x01, 0x02, 0x03} + + lockManager := storage.NewTestingLockManager() + + err := unittest.WithLock(t, lockManager, storage.LockInsertBlock, func(lctx lockctx.Context) error { + return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return operation.IndexCertifiedBlockByView(lctx, rw, view, expected) + }) + }) + require.NoError(t, err) + + var actual flow.Identifier + err = operation.LookupCertifiedBlockByView(db.Reader(), view, &actual) + require.NoError(t, err) + + assert.Equal(t, expected, actual) + }) +} diff --git a/storage/operation/heights.go b/storage/operation/heights.go new file mode 100644 index 00000000000..f502a088ba3 --- /dev/null +++ b/storage/operation/heights.go @@ -0,0 +1,80 @@ +package operation + +import ( + "errors" + "fmt" + + "github.com/jordanschalm/lockctx" + + "github.com/onflow/flow-go/storage" +) + +// UpsertFinalizedHeight upserts the finalized height index, overwriting the current value. +// Updates to this index must strictly increase the finalized height. +// To enforce this, the caller must check the current finalized height while holding [storage.LockFinalizeBlock]. +func UpsertFinalizedHeight(lctx lockctx.Proof, w storage.Writer, height uint64) error { + if !lctx.HoldsLock(storage.LockFinalizeBlock) { + return fmt.Errorf("missing required lock: %s", storage.LockFinalizeBlock) + } + return UpsertByKey(w, MakePrefix(codeFinalizedHeight), height) +} + +func RetrieveFinalizedHeight(r storage.Reader, height *uint64) error { + return RetrieveByKey(r, MakePrefix(codeFinalizedHeight), height) +} + +// UpsertSealedHeight upserts the latest sealed height, OVERWRITING the current value. +// Updates to this index must strictly increase the sealed height. +// To enforce this, the caller must check the current sealed height while holding [storage.LockFinalizeBlock]. +func UpsertSealedHeight(lctx lockctx.Proof, w storage.Writer, height uint64) error { + if !lctx.HoldsLock(storage.LockFinalizeBlock) { + return fmt.Errorf("missing required lock: %s", storage.LockFinalizeBlock) + } + return UpsertByKey(w, MakePrefix(codeSealedHeight), height) +} + +func RetrieveSealedHeight(r storage.Reader, height *uint64) error { + return RetrieveByKey(r, MakePrefix(codeSealedHeight), height) +} + +// InsertEpochFirstHeight inserts the height of the first block in the given epoch. +// The first block of an epoch E is the finalized block with view >= E.FirstView. +// Although we don't store the final height of an epoch, it can be inferred from this index. +// The caller must hold [storage.LockFinalizeBlock]. This function enforces each index is written exactly once. +// Returns storage.ErrAlreadyExists if the height has already been indexed. +func InsertEpochFirstHeight(lctx lockctx.Proof, rw storage.ReaderBatchWriter, epoch, height uint64) error { + if !lctx.HoldsLock(storage.LockFinalizeBlock) { + return fmt.Errorf("missing required lock: %s", storage.LockFinalizeBlock) + } + + var existingHeight uint64 + err := RetrieveEpochFirstHeight(rw.GlobalReader(), epoch, &existingHeight) + if err == nil { + return storage.ErrAlreadyExists + } + if !errors.Is(err, storage.ErrNotFound) { + return fmt.Errorf("failed to check existing epoch first height: %w", err) + } + + return UpsertByKey(rw.Writer(), MakePrefix(codeEpochFirstHeight, epoch), height) +} + +// RetrieveEpochFirstHeight retrieves the height of the first block in the given epoch. +// This operation does not require any locks, because the first height of an epoch does not change once set. +// Returns [storage.ErrNotFound] if the first block of the epoch has not yet been finalized. +func RetrieveEpochFirstHeight(r storage.Reader, epoch uint64, height *uint64) error { + return RetrieveByKey(r, MakePrefix(codeEpochFirstHeight, epoch), height) +} + +// RetrieveEpochLastHeight retrieves the height of the last block in the given epoch. +// This operation does not require any locks, because the first height of an epoch does not change once set. +// It's a more readable, but equivalent query to RetrieveEpochFirstHeight when interested in the last height of an epoch. +// Returns [storage.ErrNotFound] if the first block of the epoch has not yet been finalized. +func RetrieveEpochLastHeight(r storage.Reader, epoch uint64, height *uint64) error { + var nextEpochFirstHeight uint64 + if err := RetrieveByKey(r, MakePrefix(codeEpochFirstHeight, epoch+1), &nextEpochFirstHeight); err != nil { + return err + } + *height = nextEpochFirstHeight - 1 + return nil +} diff --git a/storage/operation/heights_test.go b/storage/operation/heights_test.go new file mode 100644 index 00000000000..8b29f603028 --- /dev/null +++ b/storage/operation/heights_test.go @@ -0,0 +1,120 @@ +package operation_test + +import ( + "math/rand" + "testing" + + "github.com/jordanschalm/lockctx" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/storage" + "github.com/onflow/flow-go/storage/operation" + "github.com/onflow/flow-go/storage/operation/dbtest" + "github.com/onflow/flow-go/utils/unittest" +) + +func TestFinalizedInsertUpdateRetrieve(t *testing.T) { + dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { + lockManager := storage.NewTestingLockManager() + + height := uint64(1337) + err := unittest.WithLock(t, lockManager, storage.LockFinalizeBlock, func(lctx lockctx.Context) error { + return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return operation.UpsertFinalizedHeight(lctx, rw.Writer(), height) + }) + }) + require.NoError(t, err) + + var retrieved uint64 + err = operation.RetrieveFinalizedHeight(db.Reader(), &retrieved) + require.NoError(t, err) + + assert.Equal(t, retrieved, height) + + height = 9999 + err = unittest.WithLock(t, lockManager, storage.LockFinalizeBlock, func(lctx lockctx.Context) error { + return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return operation.UpsertFinalizedHeight(lctx, rw.Writer(), height) + }) + }) + require.NoError(t, err) + + err = operation.RetrieveFinalizedHeight(db.Reader(), &retrieved) + require.NoError(t, err) + + assert.Equal(t, retrieved, height) + }) +} + +func TestSealedInsertUpdateRetrieve(t *testing.T) { + dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { + lockManager := storage.NewTestingLockManager() + + height := uint64(1337) + err := unittest.WithLock(t, lockManager, storage.LockFinalizeBlock, func(lctx lockctx.Context) error { + return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return operation.UpsertSealedHeight(lctx, rw.Writer(), height) + }) + }) + require.NoError(t, err) + + var retrieved uint64 + err = operation.RetrieveSealedHeight(db.Reader(), &retrieved) + require.NoError(t, err) + + assert.Equal(t, retrieved, height) + + height = 9999 + err = unittest.WithLock(t, lockManager, storage.LockFinalizeBlock, func(lctx lockctx.Context) error { + return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return operation.UpsertSealedHeight(lctx, rw.Writer(), height) + }) + }) + require.NoError(t, err) + + err = operation.RetrieveSealedHeight(db.Reader(), &retrieved) + require.NoError(t, err) + + assert.Equal(t, retrieved, height) + }) +} + +func TestEpochFirstBlockIndex_InsertRetrieve(t *testing.T) { + dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { + lockManager := storage.NewTestingLockManager() + + height := rand.Uint64() + epoch := rand.Uint64() + + // retrieve when empty errors + var retrieved uint64 + err := operation.RetrieveEpochFirstHeight(db.Reader(), epoch, &retrieved) + require.ErrorIs(t, err, storage.ErrNotFound) + + // can insert + err = unittest.WithLock(t, lockManager, storage.LockFinalizeBlock, func(lctx lockctx.Context) error { + return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return operation.InsertEpochFirstHeight(lctx, rw, epoch, height) + }) + }) + require.NoError(t, err) + + // can retrieve + err = operation.RetrieveEpochFirstHeight(db.Reader(), epoch, &retrieved) + require.NoError(t, err) + assert.Equal(t, retrieved, height) + + // retrieve non-existent key errors + err = operation.RetrieveEpochFirstHeight(db.Reader(), epoch+1, &retrieved) + require.ErrorIs(t, err, storage.ErrNotFound) + + err = unittest.WithLock(t, lockManager, storage.LockFinalizeBlock, func(lctx lockctx.Context) error { + // insert existent key errors + return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return operation.InsertEpochFirstHeight(lctx, rw, epoch, height) + }) + }) + require.ErrorIs(t, err, storage.ErrAlreadyExists) + }) +} diff --git a/storage/operation/index.go b/storage/operation/index.go new file mode 100644 index 00000000000..22b6a6fc247 --- /dev/null +++ b/storage/operation/index.go @@ -0,0 +1,94 @@ +package operation + +import ( + "fmt" + + "github.com/jordanschalm/lockctx" + + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/storage" +) + +// InsertIndex persists the given index keyed by the block ID +// +// CAUTION: +// - The caller must acquire the [storage.LockInsertBlock] and hold it until the database write has been committed. +// - OVERWRITES existing data (potential for data corruption): +// This method silently overrides existing data without any sanity checks whether data for the same key already exits. +// Note that the Flow protocol mandates that for a previously persisted key, the data is never changed to a different +// value. Changing data could cause the node to publish inconsistent data and to be slashed, or the protocol to be +// compromised as a whole. This method does not contain any safeguards to prevent such data corruption. The lock proof +// serves as a reminder that the CALLER is responsible to ensure that the DEDUPLICATION CHECK is done elsewhere +// ATOMICALLY with this write operation. +// +// No errors expected during normal operations. +func InsertIndex(lctx lockctx.Proof, rw storage.ReaderBatchWriter, blockID flow.Identifier, index *flow.Index) error { + if !lctx.HoldsLock(storage.LockInsertBlock) { + return fmt.Errorf("missing required lock: %s", storage.LockInsertBlock) + } + + // The following database operations are all indexing data by block ID. If used correctly, + // we don't need to check here if the data is already stored, because the same check should have + // been done when storing the block header, which is in the same batch update and holding the same lock. + // If there is no header stored for the block ID, it means no index data for the same block ID + // was stored either, as long as the same lock is held, the data is guaranteed to be consistent. + w := rw.Writer() + err := IndexPayloadGuarantees(lctx, w, blockID, index.GuaranteeIDs) + if err != nil { + return fmt.Errorf("could not store guarantee index: %w", err) + } + err = IndexPayloadSeals(lctx, w, blockID, index.SealIDs) + if err != nil { + return fmt.Errorf("could not store seal index: %w", err) + } + err = IndexPayloadReceipts(lctx, w, blockID, index.ReceiptIDs) + if err != nil { + return fmt.Errorf("could not store receipts index: %w", err) + } + err = IndexPayloadResults(lctx, w, blockID, index.ResultIDs) + if err != nil { + return fmt.Errorf("could not store results index: %w", err) + } + err = IndexPayloadProtocolStateID(lctx, w, blockID, index.ProtocolStateID) + if err != nil { + return fmt.Errorf("could not store protocol state id: %w", err) + } + return nil +} + +func RetrieveIndex(r storage.Reader, blockID flow.Identifier, index *flow.Index) error { + var guaranteeIDs []flow.Identifier + err := LookupPayloadGuarantees(r, blockID, &guaranteeIDs) + if err != nil { + return fmt.Errorf("could not retrieve guarantee index: %w", err) + } + var sealIDs []flow.Identifier + err = LookupPayloadSeals(r, blockID, &sealIDs) + if err != nil { + return fmt.Errorf("could not retrieve seal index: %w", err) + } + var receiptIDs []flow.Identifier + err = LookupPayloadReceipts(r, blockID, &receiptIDs) + if err != nil { + return fmt.Errorf("could not retrieve receipts index: %w", err) + } + var resultsIDs []flow.Identifier + err = LookupPayloadResults(r, blockID, &resultsIDs) + if err != nil { + return fmt.Errorf("could not retrieve results index: %w", err) + } + var stateID flow.Identifier + err = LookupPayloadProtocolStateID(r, blockID, &stateID) + if err != nil { + return fmt.Errorf("could not retrieve protocol state id: %w", err) + } + + *index = flow.Index{ + GuaranteeIDs: guaranteeIDs, + SealIDs: sealIDs, + ReceiptIDs: receiptIDs, + ResultIDs: resultsIDs, + ProtocolStateID: stateID, + } + return nil +} diff --git a/storage/operation/index_test.go b/storage/operation/index_test.go new file mode 100644 index 00000000000..3978161bf7d --- /dev/null +++ b/storage/operation/index_test.go @@ -0,0 +1,35 @@ +package operation_test + +import ( + "testing" + + "github.com/jordanschalm/lockctx" + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/storage" + "github.com/onflow/flow-go/storage/operation" + "github.com/onflow/flow-go/storage/operation/dbtest" + "github.com/onflow/flow-go/utils/unittest" +) + +func TestInsertRetrieveIndex(t *testing.T) { + dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { + lockManager := storage.NewTestingLockManager() + blockID := unittest.IdentifierFixture() + index := unittest.IndexFixture() + + err := unittest.WithLock(t, lockManager, storage.LockInsertBlock, func(lctx lockctx.Context) error { + return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return operation.InsertIndex(lctx, rw, blockID, index) + }) + }) + require.NoError(t, err) + + var retrieved flow.Index + err = operation.RetrieveIndex(db.Reader(), blockID, &retrieved) + require.NoError(t, err) + + require.Equal(t, index, &retrieved) + }) +} diff --git a/storage/operation/instance_params.go b/storage/operation/instance_params.go new file mode 100644 index 00000000000..9fb67289d79 --- /dev/null +++ b/storage/operation/instance_params.go @@ -0,0 +1,43 @@ +package operation + +import ( + "fmt" + + "github.com/jordanschalm/lockctx" + + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/storage" +) + +// InsertInstanceParams stores the consolidated instance params under a single key. +// +// CAUTION: +// - This function is intended to be called exactly once during bootstrapping. +// Overwrites are prevented by an explicit existence check; if data is already present, error is returned. +// - To guarantee atomicity of existence-check plus database write, we require the caller to acquire +// the [storage.LockBootstrapping] lock and hold it until the database write has been committed. +// +// Expected errors during normal operations: +// - [storage.ErrAlreadyExists] if instance params have already been stored. +func InsertInstanceParams(lctx lockctx.Proof, rw storage.ReaderBatchWriter, params flow.VersionedInstanceParams) error { + if !lctx.HoldsLock(storage.LockBootstrapping) { + return fmt.Errorf("missing required lock: %s", storage.LockBootstrapping) + } + key := MakePrefix(codeInstanceParams) + exist, err := KeyExists(rw.GlobalReader(), key) + if err != nil { + return err + } + if exist { + return fmt.Errorf("instance params is already stored: %w", storage.ErrAlreadyExists) + } + return UpsertByKey(rw.Writer(), key, params) +} + +// RetrieveInstanceParams retrieves the consolidated instance params from storage. +// +// Expected errors during normal operations: +// - [storage.ErrNotFound] if the key does not exist (not bootstrapped). +func RetrieveInstanceParams(r storage.Reader, params *flow.VersionedInstanceParams) error { + return RetrieveByKey(r, MakePrefix(codeInstanceParams), params) +} diff --git a/storage/operation/instance_params_test.go b/storage/operation/instance_params_test.go new file mode 100644 index 00000000000..26b0c88fdd2 --- /dev/null +++ b/storage/operation/instance_params_test.go @@ -0,0 +1,100 @@ +package operation_test + +import ( + "testing" + + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/state/protocol/datastore" + "github.com/onflow/flow-go/storage" + "github.com/onflow/flow-go/storage/operation" + "github.com/onflow/flow-go/storage/operation/dbtest" + "github.com/onflow/flow-go/utils/unittest" +) + +// TestInstanceParams_InsertRetrieve verifies that InstanceParams can be +// correctly stored and retrieved from the database as a single encoded +// structure. +// Test cases: +// 1. InstanceParams can be inserted and retrieved successfully. +// 2. Overwrite attempts return storage.ErrAlreadyExists and do not change the +// persisted value. +// 3. Writes without holding LockBootstrapping are denied. +func TestInstanceParams_InsertRetrieve(t *testing.T) { + lockManager := storage.NewTestingLockManager() + enc, err := datastore.NewVersionedInstanceParams( + datastore.DefaultInstanceParamsVersion, + unittest.IdentifierFixture(), + unittest.IdentifierFixture(), + unittest.IdentifierFixture(), + ) + require.NoError(t, err) + + t.Run("happy path: insert and retrieve", func(t *testing.T) { + dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { + lctx := lockManager.NewContext() + require.NoError(t, lctx.AcquireLock(storage.LockBootstrapping)) + defer lctx.Release() + + err = db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return operation.InsertInstanceParams(lctx, rw, *enc) + }) + require.NoError(t, err) + + var actual flow.VersionedInstanceParams + err = operation.RetrieveInstanceParams(db.Reader(), &actual) + require.NoError(t, err) + require.Equal(t, enc, &actual) + }) + }) + + t.Run("overwrite returns ErrAlreadyExists", func(t *testing.T) { + dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { + lctx := lockManager.NewContext() + require.NoError(t, lctx.AcquireLock(storage.LockBootstrapping)) + + err = db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return operation.InsertInstanceParams(lctx, rw, *enc) + }) + require.NoError(t, err) + lctx.Release() + + // try to overwrite with different params + enc2, err := datastore.NewVersionedInstanceParams( + datastore.DefaultInstanceParamsVersion, + unittest.IdentifierFixture(), + unittest.IdentifierFixture(), + unittest.IdentifierFixture(), + ) + require.NoError(t, err) + + lctx2 := lockManager.NewContext() + require.NoError(t, lctx2.AcquireLock(storage.LockBootstrapping)) + defer lctx2.Release() + + err = db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return operation.InsertInstanceParams(lctx2, rw, *enc2) + }) + require.ErrorIs(t, err, storage.ErrAlreadyExists) + + // DB must still contain original value + var check flow.VersionedInstanceParams + err = operation.RetrieveInstanceParams(db.Reader(), &check) + require.NoError(t, err) + require.Equal(t, enc, &check) + }) + }) + + t.Run("insert without required lock", func(t *testing.T) { + dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { + lctx := lockManager.NewContext() + defer lctx.Release() + + err = db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return operation.InsertInstanceParams(lctx, rw, *enc) + }) + require.ErrorContains(t, err, storage.LockBootstrapping) + }) + }) +} diff --git a/storage/operation/interactions.go b/storage/operation/interactions.go new file mode 100644 index 00000000000..04c63635419 --- /dev/null +++ b/storage/operation/interactions.go @@ -0,0 +1,26 @@ +package operation + +import ( + "github.com/onflow/flow-go/fvm/storage/snapshot" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/storage" +) + +func InsertExecutionStateInteractions( + w storage.Writer, + blockID flow.Identifier, + executionSnapshots []*snapshot.ExecutionSnapshot, +) error { + return UpsertByKey(w, + MakePrefix(codeExecutionStateInteractions, blockID), + executionSnapshots) +} + +func RetrieveExecutionStateInteractions( + r storage.Reader, + blockID flow.Identifier, + executionSnapshots *[]*snapshot.ExecutionSnapshot, +) error { + return RetrieveByKey(r, + MakePrefix(codeExecutionStateInteractions, blockID), executionSnapshots) +} diff --git a/storage/operation/interactions_test.go b/storage/operation/interactions_test.go new file mode 100644 index 00000000000..e8865fc161d --- /dev/null +++ b/storage/operation/interactions_test.go @@ -0,0 +1,64 @@ +package operation_test + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/fvm/storage/snapshot" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/storage" + "github.com/onflow/flow-go/storage/operation" + "github.com/onflow/flow-go/storage/operation/dbtest" + "github.com/onflow/flow-go/utils/unittest" +) + +func TestStateInteractionsInsertCheckRetrieve(t *testing.T) { + dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { + + id1 := flow.NewRegisterID( + flow.BytesToAddress([]byte("\x89krg\u007fBN\x1d\xf5\xfb\xb8r\xbc4\xbd\x98ռ\xf1\xd0twU\xbf\x16N\xb4?,\xa0&;")), + "") + id2 := flow.NewRegisterID(flow.BytesToAddress([]byte{2}), "") + id3 := flow.NewRegisterID(flow.BytesToAddress([]byte{3}), "") + + executionSnapshot := &snapshot.ExecutionSnapshot{ + ReadSet: map[flow.RegisterID]struct{}{ + id2: {}, + id3: {}, + }, + WriteSet: map[flow.RegisterID]flow.RegisterValue{ + id1: []byte("zażółć gęślą jaźń"), + id2: []byte("c"), + }, + } + + interactions := []*snapshot.ExecutionSnapshot{ + executionSnapshot, + {}, + } + + blockID := unittest.IdentifierFixture() + + err := db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return operation.InsertExecutionStateInteractions(rw.Writer(), blockID, interactions) + }) + require.NoError(t, err) + + var readInteractions []*snapshot.ExecutionSnapshot + + err = operation.RetrieveExecutionStateInteractions(db.Reader(), blockID, &readInteractions) + require.NoError(t, err) + + assert.Equal(t, interactions, readInteractions) + assert.Equal( + t, + executionSnapshot.WriteSet, + readInteractions[0].WriteSet) + assert.Equal( + t, + executionSnapshot.ReadSet, + readInteractions[0].ReadSet) + }) +} diff --git a/storage/operation/iterate_bench_test.go b/storage/operation/iterate_bench_test.go new file mode 100644 index 00000000000..29d0047495e --- /dev/null +++ b/storage/operation/iterate_bench_test.go @@ -0,0 +1,109 @@ +package operation_test + +import ( + "fmt" + "testing" + + "github.com/onflow/flow-go/module/irrecoverable" + "github.com/onflow/flow-go/storage" + "github.com/onflow/flow-go/storage/operation" + "github.com/onflow/flow-go/storage/operation/dbtest" + "github.com/onflow/flow-go/utils/merr" + + "github.com/stretchr/testify/require" + "github.com/vmihailenco/msgpack/v4" +) + +func BenchmarkFindHighestAtOrBelowByPrefixUsingIterator(t *testing.B) { + dbtest.BenchWithStorages(t, func(t *testing.B, r storage.Reader, withWriter dbtest.WithWriter) { + const count = 500 + const step = 2 + + prefix := byte(67) + for i := range count { + k := operation.MakePrefix(prefix, uint64(i)*step) + e := Entity{ID: uint64(i * 2)} + require.NoError(t, withWriter(operation.Upsert(k, e))) + } + + t.ResetTimer() + + height := uint64(500) + for i := 0; i < t.N; i++ { + var entity Entity + require.NoError(t, findHighestAtOrBelowByPrefixUsingIterator(r, []byte{prefix}, height, &entity)) + } + }) +} + +func BenchmarkFindHighestAtOrBelowByPrefixUsingSeeker(t *testing.B) { + dbtest.BenchWithStorages(t, func(t *testing.B, r storage.Reader, withWriter dbtest.WithWriter) { + const count = 500 + const step = 2 + + prefix := byte(67) + for i := range count { + k := operation.MakePrefix(prefix, uint64(i)*step) + e := Entity{ID: uint64(i * 2)} + require.NoError(t, withWriter(operation.Upsert(k, e))) + } + + t.ResetTimer() + + height := uint64(500) + for i := 0; i < t.N; i++ { + var entity Entity + require.NoError(t, findHighestAtOrBelowByPrefixUsingSeeker(r, []byte{prefix}, height, &entity)) + } + }) +} + +// findHighestAtOrBelowByPrefixUsingIterator is the original operation.FindHighestAtOrBelowByPrefix(). +func findHighestAtOrBelowByPrefixUsingIterator(r storage.Reader, prefix []byte, height uint64, entity interface{}) (errToReturn error) { + if len(prefix) == 0 { + return fmt.Errorf("prefix must not be empty") + } + + key := append(prefix, operation.EncodeKeyPart(height)...) + it, err := r.NewIter(prefix, key, storage.DefaultIteratorOptions()) + if err != nil { + return fmt.Errorf("can not create iterator: %w", err) + } + defer func() { + errToReturn = merr.CloseAndMergeError(it, errToReturn) + }() + + var highestKey []byte + + // find highest value below the given height + for it.First(); it.Valid(); it.Next() { + // copy the key to avoid the underlying slices of the key + // being modified by the Next() call + highestKey = it.IterItem().KeyCopy(highestKey) + } + + if len(highestKey) == 0 { + return storage.ErrNotFound + } + + // read the value of the highest key + val, closer, err := r.Get(highestKey) + if err != nil { + return err + } + + defer func() { + errToReturn = merr.CloseAndMergeError(closer, errToReturn) + }() + + err = msgpack.Unmarshal(val, entity) + if err != nil { + return irrecoverable.NewExceptionf("failed to decode value: %w", err) + } + + return nil +} + +func findHighestAtOrBelowByPrefixUsingSeeker(r storage.Reader, prefix []byte, height uint64, entity interface{}) (errToReturn error) { + return operation.FindHighestAtOrBelowByPrefix(r, prefix, height, entity) +} diff --git a/storage/operation/jobs.go b/storage/operation/jobs.go new file mode 100644 index 00000000000..75b88ec0d55 --- /dev/null +++ b/storage/operation/jobs.go @@ -0,0 +1,24 @@ +package operation + +import ( + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/storage" +) + +func RetrieveJobLatestIndex(r storage.Reader, queue string, index *uint64) error { + return RetrieveByKey(r, MakePrefix(codeJobQueuePointer, queue), index) +} + +func SetJobLatestIndex(w storage.Writer, queue string, index uint64) error { + return UpsertByKey(w, MakePrefix(codeJobQueuePointer, queue), index) +} + +// RetrieveJobAtIndex returns the entity at the given index +func RetrieveJobAtIndex(r storage.Reader, queue string, index uint64, entity *flow.Identifier) error { + return RetrieveByKey(r, MakePrefix(codeJobQueue, queue, index), entity) +} + +// InsertJobAtIndex insert an entity ID at the given index +func InsertJobAtIndex(w storage.Writer, queue string, index uint64, entity flow.Identifier) error { + return UpsertByKey(w, MakePrefix(codeJobQueue, queue, index), entity) +} diff --git a/storage/operation/multi_dbstore.go b/storage/operation/multi_dbstore.go new file mode 100644 index 00000000000..1e161cdf4da --- /dev/null +++ b/storage/operation/multi_dbstore.go @@ -0,0 +1,51 @@ +package operation + +import ( + "github.com/hashicorp/go-multierror" + + "github.com/onflow/flow-go/storage" +) + +type multiDBStore struct { + rwStore storage.DB // primary read and write store + r storage.DB // secondary read store +} + +var _ (storage.DB) = (*multiDBStore)(nil) + +// NewMultiDBStore returns a DB store that consists of a primary +// read-and-write store, and a secondary read-only store. +func NewMultiDBStore(rwStore storage.DB, rStore storage.DB) storage.DB { + return &multiDBStore{ + rwStore: rwStore, + r: rStore, + } +} + +func (b *multiDBStore) Reader() storage.Reader { + return NewMultiReader( + b.rwStore.Reader(), + b.r.Reader(), + ) +} + +func (b *multiDBStore) WithReaderBatchWriter(fn func(storage.ReaderBatchWriter) error) error { + return b.rwStore.WithReaderBatchWriter(fn) +} + +func (b *multiDBStore) NewBatch() storage.Batch { + return b.rwStore.NewBatch() +} + +func (b *multiDBStore) Close() error { + var result *multierror.Error + + if err := b.rwStore.Close(); err != nil { + result = multierror.Append(result, err) + } + if err := b.r.Close(); err != nil { + result = multierror.Append(result, err) + } + + return result.ErrorOrNil() +} diff --git a/storage/operation/multi_iterator.go b/storage/operation/multi_iterator.go new file mode 100644 index 00000000000..20aa46f69c1 --- /dev/null +++ b/storage/operation/multi_iterator.go @@ -0,0 +1,100 @@ +package operation + +import ( + "errors" + + "github.com/onflow/flow-go/storage" +) + +// multiIterator represents a logical concatenation of multiple iterators +// in sequence. multiIterator iterates items in the first iterator, and then +// iterates items in the second iterator, etc. +// Invariants: +// - len(iterators) is > 0 +// - cur is [0, len(iterator)-1] +type multiIterator struct { + iterators []storage.Iterator + cur int // index of current iterator +} + +var _ storage.Iterator = (*multiIterator)(nil) + +// NewMultiIterator returns an Iterator that is a logical concatenation of +// multiple iterators in the provided sequence. The returned iterator +// iterates items in the first iterator, and then iterates items in +// the second iterator, etc. +// NewMultiIterator panics if 0 iterators are provided. +func NewMultiIterator(iterators ...storage.Iterator) (storage.Iterator, error) { + if len(iterators) == 0 { + panic("failed to create multiIterator: need at least one iterator") + } + if len(iterators) == 1 { + return iterators[0], nil + } + return &multiIterator{iterators: iterators}, nil +} + +// First seeks to the smallest key greater than or equal to the given key. +func (mi *multiIterator) First() bool { + for i, iterator := range mi.iterators { + mi.cur = i + + valid := iterator.First() + if valid { + return true + } + } + + return false +} + +// Valid returns whether the iterator is positioned at a valid key-value pair. +func (mi *multiIterator) Valid() bool { + return mi.iterators[mi.cur].Valid() +} + +// Next advances the iterator to the next key-value pair. +func (mi *multiIterator) Next() { + // Move to next item in the current iterator. + mi.iterators[mi.cur].Next() + + // Return if next item is valid or end of last iterator is reached. + if mi.Valid() || mi.isLast() { + return + } + + // Move to next iterator. + mi.cur++ + + for i := mi.cur; i < len(mi.iterators); i++ { + mi.cur = i + + valid := mi.iterators[mi.cur].First() + if valid { + return + } + } +} + +// IterItem returns the current key-value pair, or nil if done. +func (mi *multiIterator) IterItem() storage.IterItem { + return mi.iterators[mi.cur].IterItem() +} + +// Close closes the iterator. +// Iterator must be closed, otherwise it causes memory leaks. +// No errors are expected during normal operation. +func (mi *multiIterator) Close() error { + var errs []error + for _, iterator := range mi.iterators { + err := iterator.Close() + if err != nil { + errs = append(errs, err) + } + } + return errors.Join(errs...) +} + +func (mi *multiIterator) isLast() bool { + return mi.cur == len(mi.iterators)-1 +} diff --git a/storage/operation/multi_iterator_test.go b/storage/operation/multi_iterator_test.go new file mode 100644 index 00000000000..db553541fc3 --- /dev/null +++ b/storage/operation/multi_iterator_test.go @@ -0,0 +1,179 @@ +package operation_test + +import ( + "testing" + + "github.com/cockroachdb/pebble/v2" + "github.com/dgraph-io/badger/v2" + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/storage" + "github.com/onflow/flow-go/storage/operation" + "github.com/onflow/flow-go/storage/operation/badgerimpl" + "github.com/onflow/flow-go/storage/operation/pebbleimpl" + "github.com/onflow/flow-go/utils/unittest" +) + +func TestMultiIterator(t *testing.T) { + unittest.RunWithBadgerDBAndPebbleDB(t, func(bdb *badger.DB, pdb *pebble.DB) { + prefix := byte(0x01) + + const lowBound = 0x00 + const highBound = 0xff + const keyCount = highBound - lowBound + 1 + + keys := make([][]byte, 0, keyCount) + values := make([][]byte, 0, keyCount) + for i := lowBound; i < highBound+1; i++ { + keys = append(keys, operation.MakePrefix(prefix, byte(i))) + values = append(values, []byte{byte(i)}) + } + + // Store first half of key-value pairs in BadgerDB + err := badgerimpl.ToDB(bdb).WithReaderBatchWriter(func(rbw storage.ReaderBatchWriter) error { + for i := 0; i < len(keys)/2; i++ { + err := rbw.Writer().Set(keys[i], values[i]) + if err != nil { + return err + } + } + return nil + }) + require.NoError(t, err) + + // Store second half of key-value pairs in Pebble + err = pebbleimpl.ToDB(pdb).WithReaderBatchWriter(func(rbw storage.ReaderBatchWriter) error { + for i := len(keys) / 2; i < len(keys); i++ { + err := rbw.Writer().Set(keys[i], values[i]) + if err != nil { + return err + } + } + return nil + }) + require.NoError(t, err) + + preader := pebbleimpl.ToDB(pdb).Reader() + + breader := badgerimpl.ToDB(bdb).Reader() + + reader := operation.NewMultiReader(preader, breader) + + t.Run("not found, less than lower bound", func(t *testing.T) { + startPrefix := []byte{0x00} + endPrefix := []byte{0x00, 0xff} + + it, err := reader.NewIter(startPrefix, endPrefix, storage.DefaultIteratorOptions()) + require.NoError(t, err) + + defer it.Close() + + require.False(t, it.First()) + require.False(t, it.Valid()) + + it.Next() + require.False(t, it.Valid()) + }) + + t.Run("not found, higher than upper bound", func(t *testing.T) { + startPrefix := []byte{0x02} + endPrefix := []byte{0x02, 0xff} + + it, err := reader.NewIter(startPrefix, endPrefix, storage.DefaultIteratorOptions()) + require.NoError(t, err) + + defer it.Close() + + require.False(t, it.First()) + require.False(t, it.Valid()) + + it.Next() + require.False(t, it.Valid()) + }) + + t.Run("found in second db", func(t *testing.T) { + startPrefix := []byte{0x01} + endPrefix := []byte{0x01, 0x0f} + expectedCount := 16 + + it, err := reader.NewIter(startPrefix, endPrefix, storage.DefaultIteratorOptions()) + require.NoError(t, err) + + defer it.Close() + + i := 0 + for it.First(); it.Valid(); it.Next() { + item := it.IterItem() + + require.Equal(t, keys[i], item.Key()) + + err = item.Value(func(val []byte) error { + require.Equal(t, values[i], val) + return nil + }) + require.NoError(t, err) + + i++ + } + require.Equal(t, expectedCount, i) + }) + + t.Run("found in first db", func(t *testing.T) { + startPrefix := []byte{0x01, 0xf0} + endPrefix := []byte{0x01, 0xff} + expectedCount := 16 + + it, err := reader.NewIter(startPrefix, endPrefix, storage.DefaultIteratorOptions()) + require.NoError(t, err) + + defer it.Close() + + count := 0 + i := len(keys) - expectedCount + for it.First(); it.Valid(); it.Next() { + item := it.IterItem() + + require.Equal(t, keys[i], item.Key()) + + err = item.Value(func(val []byte) error { + require.Equal(t, values[i], val) + return nil + }) + require.NoError(t, err) + + i++ + count++ + } + require.Equal(t, expectedCount, count) + }) + + t.Run("found in both db", func(t *testing.T) { + startPrefix := []byte{0x01, 0x0f} + endPrefix := []byte{0x01, 0xf0} + expectedCount := len(keys) - 15 - 15 + + it, err := reader.NewIter(startPrefix, endPrefix, storage.DefaultIteratorOptions()) + require.NoError(t, err) + + defer it.Close() + + count := 0 + i := 15 + for it.First(); it.Valid(); it.Next() { + item := it.IterItem() + + require.Equal(t, keys[i], item.Key()) + + err = item.Value(func(val []byte) error { + require.Equal(t, values[i], val) + return nil + }) + require.NoError(t, err) + + i++ + count++ + } + require.Equal(t, expectedCount, count) + }) + }) +} diff --git a/storage/operation/multi_reader.go b/storage/operation/multi_reader.go new file mode 100644 index 00000000000..335b294f888 --- /dev/null +++ b/storage/operation/multi_reader.go @@ -0,0 +1,93 @@ +package operation + +import ( + "errors" + "io" + + "github.com/onflow/flow-go/storage" + "github.com/onflow/flow-go/utils/noop" +) + +type multiReader struct { + readers []storage.Reader +} + +var _ storage.Reader = (*multiReader)(nil) + +// NewMultiReader returns a Reader that consists of multiple readers +// in the provided order. Readers are read sequentially until +// - a reader succeeds or +// - a reader returns an error that is not ErrNotFound +// If all readers return ErrNotFound, Reader.Get will return ErrNotFound. +// NewMultiReader panics if 0 readers are provided. +func NewMultiReader(readers ...storage.Reader) storage.Reader { + if len(readers) == 0 { + panic("failed to create multiReader: need at least one reader") + } + if len(readers) == 1 { + return readers[0] + } + return &multiReader{readers: readers} +} + +// Get gets the value for the given key from one of the readers. +// Readers are read sequentially until +// - a reader succeeds or +// - a reader returns an error that is not ErrNotFound +// If all readers return ErrNotFound, Get will return ErrNotFound. +// Other errors are exceptions. +// +// The caller should not modify the contents of the returned slice, but it is +// safe to modify the contents of the argument after Get returns. The +// returned slice will remain valid until the returned Closer is closed. +// when err == nil, the caller MUST call closer.Close() or a memory leak will occur. +func (b *multiReader) Get(key []byte) (value []byte, closer io.Closer, err error) { + for _, r := range b.readers { + value, closer, err = r.Get(key) + if err == nil || !errors.Is(err, storage.ErrNotFound) { + return + } + } + return nil, noop.Closer{}, storage.ErrNotFound +} + +// NewIter returns a new Iterator for the given key prefix range [startPrefix, endPrefix], both inclusive. +// Specifically, all keys that meet ANY of the following conditions are included in the iteration: +// - have a prefix equal to startPrefix OR +// - have a prefix equal to the endPrefix OR +// - have a prefix that is lexicographically between startPrefix and endPrefix +// +// Returned new iterator consists of multiple iterators in reverse order from underlying readers. +// For example, the first iterator is created from the last underlying reader. +// This is to ensure that legacy databases are iterated first to preserve key orders. +// +// NewIter returns error if the startPrefix key is greater than the endPrefix key. +// No errors are expected during normal operation. +func (b *multiReader) NewIter(startPrefix, endPrefix []byte, ops storage.IteratorOption) (storage.Iterator, error) { + // Create iterators from readers in reverse order + // because we want to iterate legacy databases first + // to preserve key orders. + iterators := make([]storage.Iterator, len(b.readers)) + for i, r := range b.readers { + iterator, err := r.NewIter(startPrefix, endPrefix, ops) + if err != nil { + return nil, err + } + iterators[len(b.readers)-1-i] = iterator + } + + return NewMultiIterator(iterators...) +} + +// NewSeeker returns a new Seeker. +// +// Returned new Seeker consists of multiple seekers in reverse order from underlying readers. +// For example, the first seeker is created from the last underlying reader. +func (b *multiReader) NewSeeker() storage.Seeker { + seekers := make([]storage.Seeker, len(b.readers)) + for i, r := range b.readers { + seekers[len(b.readers)-1-i] = r.NewSeeker() + } + + return NewMultiSeeker(seekers...) +} diff --git a/storage/operation/multi_reader_test.go b/storage/operation/multi_reader_test.go new file mode 100644 index 00000000000..dbbbabf26c5 --- /dev/null +++ b/storage/operation/multi_reader_test.go @@ -0,0 +1,89 @@ +package operation_test + +import ( + "testing" + + "github.com/cockroachdb/pebble/v2" + "github.com/dgraph-io/badger/v2" + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/storage" + "github.com/onflow/flow-go/storage/operation" + "github.com/onflow/flow-go/storage/operation/badgerimpl" + "github.com/onflow/flow-go/storage/operation/pebbleimpl" + "github.com/onflow/flow-go/utils/unittest" +) + +func TestMultiReader(t *testing.T) { + unittest.RunWithBadgerDBAndPebbleDB(t, func(bdb *badger.DB, pdb *pebble.DB) { + key1 := []byte{0x01, 0x01} + value1 := []byte{0x01} + + key2 := []byte{0x01, 0x02} + value2 := []byte{0x02} + + key3 := []byte{0x01, 0x03} + value3a := []byte{0x03, 0x00} + value3b := []byte{0x03, 0x01} + + notFoundKey := []byte{0x01, 0x04} + + // Store {key1, value1} and {key3, value3a} in Pebble + err := pebbleimpl.ToDB(pdb).WithReaderBatchWriter(func(rbw storage.ReaderBatchWriter) error { + err := rbw.Writer().Set(key1, value1) + if err != nil { + return err + } + return rbw.Writer().Set(key3, value3a) + }) + require.NoError(t, err) + + // Store {key2, value2} and {key3, value3b} in BadgerDB + err = badgerimpl.ToDB(bdb).WithReaderBatchWriter(func(rbw storage.ReaderBatchWriter) error { + err := rbw.Writer().Set(key2, value2) + if err != nil { + return err + } + return rbw.Writer().Set(key3, value3b) + }) + require.NoError(t, err) + + preader := pebbleimpl.ToDB(pdb).Reader() + + breader := badgerimpl.ToDB(bdb).Reader() + + reader := operation.NewMultiReader(preader, breader) + + t.Run("not found", func(t *testing.T) { + value, closer, err := reader.Get(notFoundKey) + require.Equal(t, 0, len(value)) + require.ErrorIs(t, err, storage.ErrNotFound) + + closer.Close() + }) + + t.Run("in first db", func(t *testing.T) { + value, closer, err := reader.Get(key1) + require.Equal(t, value1, value) + require.NoError(t, err) + + closer.Close() + }) + + t.Run("in second db", func(t *testing.T) { + value, closer, err := reader.Get(key2) + require.Equal(t, value2, value) + require.NoError(t, err) + + closer.Close() + }) + + t.Run("in both db", func(t *testing.T) { + value, closer, err := reader.Get(key3) + require.Equal(t, value3a, value) + require.NoError(t, err) + + closer.Close() + }) + }) +} diff --git a/storage/operation/multi_seeker.go b/storage/operation/multi_seeker.go new file mode 100644 index 00000000000..d246490eb56 --- /dev/null +++ b/storage/operation/multi_seeker.go @@ -0,0 +1,49 @@ +package operation + +import ( + "bytes" + "errors" + + "github.com/onflow/flow-go/storage" +) + +type multiSeeker struct { + seekers []storage.Seeker +} + +var _ storage.Seeker = (*multiSeeker)(nil) + +// NewMultiSeeker returns a Seeker that consists of multiple seekers +// in the provided order. +// NewMultiSeeker panics if 0 seekers are provided. +func NewMultiSeeker(seekers ...storage.Seeker) storage.Seeker { + if len(seekers) == 0 { + panic("failed to create multiSeeker: need at least one seeker") + } + if len(seekers) == 1 { + return seekers[0] + } + return &multiSeeker{seekers: seekers} +} + +// SeekLE (seek less than or equal) returns the largest key in lexicographical +// order within inclusive range of [startPrefix, key]. +// This function returns an error if specified key is less than startPrefix. +// This function returns storage.ErrNotFound if a key that matches +// the specified criteria is not found. +func (b *multiSeeker) SeekLE(startPrefix, key []byte) ([]byte, error) { + if bytes.Compare(key, startPrefix) < 0 { + return nil, errors.New("key must be greater than or equal to startPrefix key") + } + + // Seek less than or equal from the last seeker first. + for i := len(b.seekers) - 1; i >= 0; i-- { + seeker := b.seekers[i] + key, err := seeker.SeekLE(startPrefix, key) + if err == nil || !errors.Is(err, storage.ErrNotFound) { + return key, err + } + } + + return nil, storage.ErrNotFound +} diff --git a/storage/operation/multi_seeker_test.go b/storage/operation/multi_seeker_test.go new file mode 100644 index 00000000000..d37ccda5bcb --- /dev/null +++ b/storage/operation/multi_seeker_test.go @@ -0,0 +1,125 @@ +package operation_test + +import ( + "testing" + + "github.com/cockroachdb/pebble/v2" + "github.com/dgraph-io/badger/v2" + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/storage" + "github.com/onflow/flow-go/storage/operation" + "github.com/onflow/flow-go/storage/operation/badgerimpl" + "github.com/onflow/flow-go/storage/operation/pebbleimpl" + "github.com/onflow/flow-go/utils/unittest" +) + +func TestMultiSeeker(t *testing.T) { + unittest.RunWithBadgerDBAndPebbleDB(t, func(bdb *badger.DB, pdb *pebble.DB) { + // Insert the keys into the storage + codePrefix := byte(1) + badgerDBKeyParts := []uint64{1, 5} + pebbleKeyParts := []uint64{9} + + // Store keys in BadgerDB + err := badgerimpl.ToDB(bdb).WithReaderBatchWriter(func(rbw storage.ReaderBatchWriter) error { + for _, keyPart := range badgerDBKeyParts { + key := operation.MakePrefix(codePrefix, keyPart) + err := rbw.Writer().Set(key, []byte{0x01}) + if err != nil { + return err + } + } + return nil + }) + require.NoError(t, err) + + // Store keys in Pebble + err = pebbleimpl.ToDB(pdb).WithReaderBatchWriter(func(rbw storage.ReaderBatchWriter) error { + for _, keyPart := range pebbleKeyParts { + key := operation.MakePrefix(codePrefix, keyPart) + err := rbw.Writer().Set(key, []byte{0x01}) + if err != nil { + return err + } + } + return nil + }) + require.NoError(t, err) + + preader := pebbleimpl.ToDB(pdb).Reader() + + breader := badgerimpl.ToDB(bdb).Reader() + + r := operation.NewMultiReader(preader, breader) + + t.Run("key below start prefix", func(t *testing.T) { + seeker := r.NewSeeker() + + key := operation.MakePrefix(codePrefix, uint64(4)) + startPrefix := operation.MakePrefix(codePrefix, uint64(5)) + + _, err = seeker.SeekLE(startPrefix, key) + require.Error(t, err) + }) + + t.Run("has key below startPrefix", func(t *testing.T) { + seeker := r.NewSeeker() + + startPrefix := operation.MakePrefix(codePrefix, uint64(6)) + + // Key 5 exists, but it is below startPrefix, so nil is returned. + key := operation.MakePrefix(codePrefix, uint64(6)) + foundKey, err := seeker.SeekLE(startPrefix, key) + require.ErrorIs(t, err, storage.ErrNotFound) + require.Nil(t, foundKey) + }) + + t.Run("seek key in first db (Pebble)", func(t *testing.T) { + seeker := r.NewSeeker() + + startPrefix := operation.MakePrefix(codePrefix) + + // Seeking 9 and 10 return 9. + for _, keyPart := range []uint64{9, 10} { + key := operation.MakePrefix(codePrefix, keyPart) + expectedKey := operation.MakePrefix(codePrefix, uint64(9)) + foundKey, err := seeker.SeekLE(startPrefix, key) + require.NoError(t, err) + require.Equal(t, expectedKey, foundKey) + } + }) + + t.Run("seek key in second db (BadgerDB)", func(t *testing.T) { + seeker := r.NewSeeker() + + startPrefix := operation.MakePrefix(codePrefix) + + // Seeking [5, 8] returns 5. + for _, keyPart := range []uint64{5, 6, 7, 8} { + key := operation.MakePrefix(codePrefix, keyPart) + expectedKey := operation.MakePrefix(codePrefix, uint64(5)) + foundKey, err := seeker.SeekLE(startPrefix, key) + require.NoError(t, err) + require.Equal(t, expectedKey, foundKey) + } + + // Seeking [1, 4] returns 1. + for _, keyPart := range []uint64{1, 2, 3, 4} { + key := operation.MakePrefix(codePrefix, keyPart) + expectedKey := operation.MakePrefix(codePrefix, uint64(1)) + foundKey, err := seeker.SeekLE(startPrefix, key) + require.NoError(t, err) + require.Equal(t, expectedKey, foundKey) + } + + // Seeking 0 returns nil. + for _, keyPart := range []uint64{0} { + key := operation.MakePrefix(codePrefix, keyPart) + foundKey, err := seeker.SeekLE(startPrefix, key) + require.ErrorIs(t, err, storage.ErrNotFound) + require.Nil(t, foundKey) + } + }) + }) +} diff --git a/storage/operation/node_disallow_list.go b/storage/operation/node_disallow_list.go new file mode 100644 index 00000000000..540d7a01337 --- /dev/null +++ b/storage/operation/node_disallow_list.go @@ -0,0 +1,38 @@ +package operation + +import ( + "fmt" + + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/storage" +) + +// PurgeNodeDisallowList removes the set of disallowed nodes IDs from the database. +// If no corresponding entry exists, this function is a no-op. +// No errors are expected during normal operations. +// +// TODO: TEMPORARY manual override for adding node IDs to list of ejected nodes, applies to networking layer only +func PurgeNodeDisallowList(w storage.Writer) error { + err := RemoveByKey(w, MakePrefix(disallowedNodeIDs)) + if err != nil { + return fmt.Errorf("unexpected error while purging disallow list: %w", err) + } + return nil +} + +// PersistNodeDisallowList writes the set of disallowed nodes IDs into the database. +// If an entry already exists, it is overwritten; otherwise a new entry is created. +// No errors are expected during normal operations. +// +// TODO: TEMPORARY manual override for adding node IDs to list of ejected nodes, applies to networking layer only +func PersistNodeDisallowList(w storage.Writer, disallowList map[flow.Identifier]struct{}) error { + return UpsertByKey(w, MakePrefix(disallowedNodeIDs), disallowList) +} + +// RetrieveNodeDisallowList reads the set of disallowed node IDs from the database. +// Returns `storage.ErrNotFound` error in case no respective database entry is present. +// +// TODO: TEMPORARY manual override for adding node IDs to list of ejected nodes, applies to networking layer only +func RetrieveNodeDisallowList(r storage.Reader, disallowList *map[flow.Identifier]struct{}) error { + return RetrieveByKey(r, MakePrefix(disallowedNodeIDs), disallowList) +} diff --git a/storage/operation/node_disallow_list_test.go b/storage/operation/node_disallow_list_test.go new file mode 100644 index 00000000000..0265f34e92c --- /dev/null +++ b/storage/operation/node_disallow_list_test.go @@ -0,0 +1,109 @@ +package operation_test + +import ( + "testing" + + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/storage" + "github.com/onflow/flow-go/storage/operation" + "github.com/onflow/flow-go/storage/operation/dbtest" + "github.com/onflow/flow-go/utils/unittest" +) + +// Test_PersistNodeDisallowlist tests the operations: +// - PersistNodeDisallowList(disallowList map[flow.Identifier]struct{}) +// - RetrieveNodeDisallowLis(disallowList *map[flow.Identifier]struct{}) +// - PurgeNodeDisallowList() +func Test_PersistNodeDisallowlist(t *testing.T) { + t.Run("Retrieving non-existing disallowlist should return 'storage.ErrNotFound'", func(t *testing.T) { + dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { + var disallowList map[flow.Identifier]struct{} + err := operation.RetrieveNodeDisallowList(db.Reader(), &disallowList) + require.ErrorIs(t, err, storage.ErrNotFound) + }) + }) + + t.Run("Persisting and read disalowlist", func(t *testing.T) { + dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { + disallowList := unittest.IdentifierListFixture(8).Lookup() + err := db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return operation.PersistNodeDisallowList(rw.Writer(), disallowList) + }) + require.NoError(t, err) + + var b map[flow.Identifier]struct{} + err = operation.RetrieveNodeDisallowList(db.Reader(), &b) + require.NoError(t, err) + require.Equal(t, disallowList, b) + }) + }) + + t.Run("Overwrite disallowlist", func(t *testing.T) { + dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { + disallowList1 := unittest.IdentifierListFixture(8).Lookup() + err := db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return operation.PersistNodeDisallowList(rw.Writer(), disallowList1) + }) + require.NoError(t, err) + + disallowList2 := unittest.IdentifierListFixture(8).Lookup() + err = db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return operation.PersistNodeDisallowList(rw.Writer(), disallowList2) + }) + require.NoError(t, err) + + var b map[flow.Identifier]struct{} + err = operation.RetrieveNodeDisallowList(db.Reader(), &b) + require.NoError(t, err) + require.Equal(t, disallowList2, b) + }) + }) + + t.Run("Write & Purge & Write disallowlist", func(t *testing.T) { + dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { + disallowList1 := unittest.IdentifierListFixture(8).Lookup() + err := db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return operation.PersistNodeDisallowList(rw.Writer(), disallowList1) + }) + require.NoError(t, err) + + err = db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return operation.PurgeNodeDisallowList(rw.Writer()) + }) + require.NoError(t, err) + + var b map[flow.Identifier]struct{} + err = operation.RetrieveNodeDisallowList(db.Reader(), &b) + require.ErrorIs(t, err, storage.ErrNotFound) + + disallowList2 := unittest.IdentifierListFixture(8).Lookup() + err = db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return operation.PersistNodeDisallowList(rw.Writer(), disallowList2) + }) + require.NoError(t, err) + + err = operation.RetrieveNodeDisallowList(db.Reader(), &b) + require.NoError(t, err) + require.Equal(t, disallowList2, b) + }) + }) + + t.Run("Purge non-existing disallowlist", func(t *testing.T) { + dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { + var b map[flow.Identifier]struct{} + + err := operation.RetrieveNodeDisallowList(db.Reader(), &b) + require.ErrorIs(t, err, storage.ErrNotFound) + + err = db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return operation.PurgeNodeDisallowList(rw.Writer()) + }) + require.NoError(t, err) + + err = operation.RetrieveNodeDisallowList(db.Reader(), &b) + require.ErrorIs(t, err, storage.ErrNotFound) + }) + }) +} diff --git a/storage/operation/payload.go b/storage/operation/payload.go new file mode 100644 index 00000000000..e7f10bbdd7a --- /dev/null +++ b/storage/operation/payload.go @@ -0,0 +1,219 @@ +package operation + +import ( + "fmt" + + "github.com/jordanschalm/lockctx" + + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/storage" +) + +// InsertSeal inserts a [flow.Seal] into the database, keyed by its ID. +// +// CAUTION: The caller must ensure sealID is a collision-resistant hash of the provided seal! +// This method silently overrides existing data, which is safe only if for the same key, we +// always write the same value. +// +// No errors are expected during normal operation. +func InsertSeal(w storage.Writer, sealID flow.Identifier, seal *flow.Seal) error { + return UpsertByKey(w, MakePrefix(codeSeal, sealID), seal) +} + +// RetrieveSeal retrieves the [flow.Seal] by its ID. +// Expected errors during normal operations: +// - [storage.ErrNotFound] if no seal with the specified `sealID` is known. +func RetrieveSeal(r storage.Reader, sealID flow.Identifier, seal *flow.Seal) error { + return RetrieveByKey(r, MakePrefix(codeSeal, sealID), seal) +} + +// IndexPayloadSeals indexes the list of Seals that were included in the specified block by the block ID. +// It produces a mapping from block ID to the list of seals contained in the block's payload. +// The seals are represented by their respective IDs. +// +// CAUTION: +// - The caller must acquire the [storage.LockInsertBlock] and hold it until the database write has been committed. +// - OVERWRITES existing data (potential for data corruption): +// This method silently overrides existing data without any sanity checks whether data for the same key already exits. +// Note that the Flow protocol mandates that for a previously persisted key, the data is never changed to a different +// value. Changing data could cause the node to publish inconsistent data and to be slashed, or the protocol to be +// compromised as a whole. This method does not contain any safeguards to prevent such data corruption. The lock proof +// serves as a reminder that the CALLER is responsible to ensure that the DEDUPLICATION CHECK is done elsewhere +// ATOMICALLY with this write operation. +// +// No errors are expected during normal operation. +func IndexPayloadSeals(lctx lockctx.Proof, w storage.Writer, blockID flow.Identifier, sealIDs []flow.Identifier) error { + if !lctx.HoldsLock(storage.LockInsertBlock) { + return fmt.Errorf("cannot index seal for blockID %v without holding lock %s", + blockID, storage.LockInsertBlock) + } + return UpsertByKey(w, MakePrefix(codePayloadSeals, blockID), sealIDs) +} + +// LookupPayloadSeals retrieves the list of Seals that were included in the payload +// of the specified block. For every known block (at or above the root block height), +// this index should be populated. +// +// Expected errors during normal operations: +// - [storage.ErrNotFound] if `blockID` does not refer to a known block +func LookupPayloadSeals(r storage.Reader, blockID flow.Identifier, sealIDs *[]flow.Identifier) error { + return RetrieveByKey(r, MakePrefix(codePayloadSeals, blockID), sealIDs) +} + +// IndexPayloadReceipts indexes the list of Execution Receipts that were included in the specified block by the block ID. +// It produces a mapping from block ID to the list of Receipts contained in the block's payload. +// Execution Receipts are represented by their respective IDs. +// +// CAUTION: +// - The caller must acquire the [storage.LockInsertBlock] and hold it until the database write has been committed. +// - OVERWRITES existing data (potential for data corruption): +// This method silently overrides existing data without any sanity checks whether data for the same key already exits. +// Note that the Flow protocol mandates that for a previously persisted key, the data is never changed to a different +// value. Changing data could cause the node to publish inconsistent data and to be slashed, or the protocol to be +// compromised as a whole. This method does not contain any safeguards to prevent such data corruption. The lock proof +// serves as a reminder that the CALLER is responsible to ensure that the DEDUPLICATION CHECK is done elsewhere +// ATOMICALLY with this write operation. +// +// No errors are expected during normal operation. +func IndexPayloadReceipts(lctx lockctx.Proof, w storage.Writer, blockID flow.Identifier, receiptIDs []flow.Identifier) error { + if !lctx.HoldsLock(storage.LockInsertBlock) { + return fmt.Errorf("cannot index seal for blockID %v without holding lock %s", + blockID, storage.LockInsertBlock) + } + return UpsertByKey(w, MakePrefix(codePayloadReceipts, blockID), receiptIDs) +} + +// IndexPayloadResults indexes the list of Execution Results that were included in the specified block by the block ID. +// It produces a mapping from block ID to the list of Results contained in the block's payload. +// Execution Results are represented by their respective IDs. +// +// CAUTION: +// - The caller must acquire the [storage.LockInsertBlock] and hold it until the database write has been committed. +// - OVERWRITES existing data (potential for data corruption): +// This method silently overrides existing data without any sanity checks whether data for the same key already exits. +// Note that the Flow protocol mandates that for a previously persisted key, the data is never changed to a different +// value. Changing data could cause the node to publish inconsistent data and to be slashed, or the protocol to be +// compromised as a whole. This method does not contain any safeguards to prevent such data corruption. The lock proof +// serves as a reminder that the CALLER is responsible to ensure that the DEDUPLICATION CHECK is done elsewhere +// ATOMICALLY with this write operation. +// +// No errors are expected during normal operation. +func IndexPayloadResults(lctx lockctx.Proof, w storage.Writer, blockID flow.Identifier, resultIDs []flow.Identifier) error { + if !lctx.HoldsLock(storage.LockInsertBlock) { + return fmt.Errorf("cannot index seal for blockID %v without holding lock %s", + blockID, storage.LockInsertBlock) + } + return UpsertByKey(w, MakePrefix(codePayloadResults, blockID), resultIDs) +} + +// IndexPayloadProtocolStateID indexes the given Protocol State ID by the block ID. +// The Protocol State ID represents the configuration, which the block proposes to become active *after* the +// block's certification. Every block states the ID of the Protocol State it proposes as part of the payload. +// +// CAUTION: +// - The caller must acquire the [storage.LockInsertBlock] and hold it until the database write has been committed. +// - OVERWRITES existing data (potential for data corruption): +// This method silently overrides existing data without any sanity checks whether data for the same key already exits. +// Note that the Flow protocol mandates that for a previously persisted key, the data is never changed to a different +// value. Changing data could cause the node to publish inconsistent data and to be slashed, or the protocol to be +// compromised as a whole. This method does not contain any safeguards to prevent such data corruption. The lock proof +// serves as a reminder that the CALLER is responsible to ensure that the DEDUPLICATION CHECK is done elsewhere +// ATOMICALLY with this write operation. +// +// No errors are expected during normal operation. +func IndexPayloadProtocolStateID(lctx lockctx.Proof, w storage.Writer, blockID flow.Identifier, stateID flow.Identifier) error { + if !lctx.HoldsLock(storage.LockInsertBlock) { + return fmt.Errorf("cannot index seal for blockID %v without holding lock %s", + blockID, storage.LockInsertBlock) + } + return UpsertByKey(w, MakePrefix(codePayloadProtocolStateID, blockID), stateID) +} + +// LookupPayloadProtocolStateID retrieves the Protocol State ID for the specified block. +// The Protocol State ID represents the configuration, which the block proposes to become active *after* +// the block's certification. For every known block (at or above the root block height), the protocol +// state at the end of the block should be specified in the payload, and hence be indexed. +// Expected errors during normal operations: +// - [storage.ErrNotFound] if `blockID` does not refer to a known block +func LookupPayloadProtocolStateID(r storage.Reader, blockID flow.Identifier, stateID *flow.Identifier) error { + return RetrieveByKey(r, MakePrefix(codePayloadProtocolStateID, blockID), stateID) +} + +// LookupPayloadReceipts retrieves the list of Execution Receipts that were included in the payload of the +// specified block. For every known block (at or above the root block height), this index should be populated. +// Expected errors during normal operations: +// - [storage.ErrNotFound] if `blockID` does not refer to a known block. +func LookupPayloadReceipts(r storage.Reader, blockID flow.Identifier, receiptIDs *[]flow.Identifier) error { + return RetrieveByKey(r, MakePrefix(codePayloadReceipts, blockID), receiptIDs) +} + +// LookupPayloadResults retrieves the list of Execution Results that were included in the payload of the +// specified block. For every known block (at or above the root block height), this index should be populated. +// Expected errors during normal operations: +// - [storage.ErrNotFound] if `blockID` does not refer to a known block +func LookupPayloadResults(r storage.Reader, blockID flow.Identifier, resultIDs *[]flow.Identifier) error { + return RetrieveByKey(r, MakePrefix(codePayloadResults, blockID), resultIDs) +} + +// IndexLatestSealAtBlock persists the highest seal that was included in the fork with head blockID. +// Frequently, the highest seal included in this block's payload. However, if there are no seals in +// this block, sealID should reference the highest seal in blockID's ancestors. +// +// CAUTION: +// - The caller must acquire the [storage.LockInsertBlock] and hold it until the database write has been committed. +// - OVERWRITES existing data (potential for data corruption): +// This method silently overrides existing data without any sanity checks whether data for the same key already exits. +// Note that the Flow protocol mandates that for a previously persisted key, the data is never changed to a different +// value. Changing data could cause the node to publish inconsistent data and to be slashed, or the protocol to be +// compromised as a whole. This method does not contain any safeguards to prevent such data corruption. The lock proof +// serves as a reminder that the CALLER is responsible to ensure that the DEDUPLICATION CHECK is done elsewhere +// ATOMICALLY with this write operation. +// +// No errors are expected during normal operation. +func IndexLatestSealAtBlock(lctx lockctx.Proof, w storage.Writer, blockID flow.Identifier, sealID flow.Identifier) error { + if !lctx.HoldsLock(storage.LockInsertBlock) { + return fmt.Errorf("missing required lock: %s", storage.LockInsertBlock) + } + return UpsertByKey(w, MakePrefix(codeBlockIDToLatestSealID, blockID), sealID) +} + +// LookupLatestSealAtBlock finds the highest seal that was included in the fork up to (and including) blockID. +// Frequently, the highest seal included in this block's payload. However, if there are no seals in +// this block, sealID should reference the highest seal in blockID's ancestors. +// +// Expected errors during normal operations: +// - [storage.ErrNotFound] if the specified block is unknown +func LookupLatestSealAtBlock(r storage.Reader, blockID flow.Identifier, sealID *flow.Identifier) error { + return RetrieveByKey(r, MakePrefix(codeBlockIDToLatestSealID, blockID), &sealID) +} + +// IndexFinalizedSealByBlockID indexes the _finalized_ seal by the sealed block ID. +// Example: A <- B <- C(SealA) +// when block C is finalized, we create the index `A.ID->SealA.ID` +// +// CAUTION: +// - The caller must acquire the [storage.LockFinalizeBlock] and hold it until the database write has been committed. +// TODO: add lock proof as input and check for holding the lock in the implementation +// - OVERWRITES existing data (potential for data corruption): +// This method silently overrides existing data without any sanity checks whether data for the same key already exits. +// Note that the Flow protocol mandates that for a previously persisted key, the data is never changed to a different +// value. Changing data could cause the node to publish inconsistent data and to be slashed, or the protocol to be +// compromised as a whole. This method does not contain any safeguards to prevent such data corruption. The lock proof +// serves as a reminder that the CALLER is responsible to ensure that the DEDUPLICATION CHECK is done elsewhere +// ATOMICALLY with this write operation. +// +// No errors are expected during normal operation. +func IndexFinalizedSealByBlockID(w storage.Writer, sealedBlockID flow.Identifier, sealID flow.Identifier) error { + return UpsertByKey(w, MakePrefix(codeBlockIDToFinalizedSeal, sealedBlockID), sealID) +} + +// LookupBySealedBlockID returns the finalized seal for the specified FINALIZED block ID. +// In order for a block to have a seal in a finalized block, it must itself be finalized. Hence, +// this function only works for finalized blocks. However, note that there might be finalized +// for which no seal exits (or the block containing the seal might not yet be finalized). +// +// Expected errors during normal operations: +// - [storage.ErrNotFound] if no seal for the specified block is known. +func LookupBySealedBlockID(r storage.Reader, blockID flow.Identifier, sealID *flow.Identifier) error { + return RetrieveByKey(r, MakePrefix(codeBlockIDToFinalizedSeal, blockID), &sealID) +} diff --git a/storage/operation/payload_test.go b/storage/operation/payload_test.go new file mode 100644 index 00000000000..7b1e03adf69 --- /dev/null +++ b/storage/operation/payload_test.go @@ -0,0 +1,66 @@ +package operation_test + +import ( + "testing" + + "github.com/jordanschalm/lockctx" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/storage" + "github.com/onflow/flow-go/storage/operation" + "github.com/onflow/flow-go/storage/operation/dbtest" + "github.com/onflow/flow-go/utils/unittest" +) + +func TestSealInsertCheckRetrieve(t *testing.T) { + dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { + expected := unittest.Seal.Fixture() + + err := db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return operation.InsertSeal(rw.Writer(), expected.ID(), expected) + }) + require.NoError(t, err) + + var actual flow.Seal + err = operation.RetrieveSeal(db.Reader(), expected.ID(), &actual) + require.NoError(t, err) + + assert.Equal(t, expected, &actual) + }) +} + +func TestSealIndexAndLookup(t *testing.T) { + dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { + seal1 := unittest.Seal.Fixture() + seal2 := unittest.Seal.Fixture() + + seals := []*flow.Seal{seal1, seal2} + blockID := flow.MakeID([]byte{0x42}) + expected := []flow.Identifier(flow.GetIDs(seals)) + + lockManager := storage.NewTestingLockManager() + + err := unittest.WithLock(t, lockManager, storage.LockInsertBlock, func(lctx lockctx.Context) error { + return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + for _, seal := range seals { + if err := operation.InsertSeal(rw.Writer(), seal.ID(), seal); err != nil { + return err + } + } + if err := operation.IndexPayloadSeals(lctx, rw.Writer(), blockID, expected); err != nil { + return err + } + return nil + }) + }) + require.NoError(t, err) + + var actual []flow.Identifier + err = operation.LookupPayloadSeals(db.Reader(), blockID, &actual) + require.NoError(t, err) + + assert.Equal(t, expected, actual) + }) +} diff --git a/storage/operation/pebbleimpl/dbstore.go b/storage/operation/pebbleimpl/dbstore.go new file mode 100644 index 00000000000..f9891d135f4 --- /dev/null +++ b/storage/operation/pebbleimpl/dbstore.go @@ -0,0 +1,32 @@ +package pebbleimpl + +import ( + "github.com/cockroachdb/pebble/v2" + + "github.com/onflow/flow-go/storage" +) + +func ToDB(db *pebble.DB) storage.DB { + return &dbStore{db: db} +} + +type dbStore struct { + db *pebble.DB +} + +func (b *dbStore) Reader() storage.Reader { + return dbReader{db: b.db} +} + +func (b *dbStore) WithReaderBatchWriter(fn func(storage.ReaderBatchWriter) error) error { + return WithReaderBatchWriter(b.db, fn) +} + +func (b *dbStore) NewBatch() storage.Batch { + return NewReaderBatchWriter(b.db) +} + +// No errors are expected during normal operation. +func (b *dbStore) Close() error { + return b.db.Close() +} diff --git a/storage/operation/pebbleimpl/iterator.go b/storage/operation/pebbleimpl/iterator.go new file mode 100644 index 00000000000..269de850681 --- /dev/null +++ b/storage/operation/pebbleimpl/iterator.go @@ -0,0 +1,68 @@ +package pebbleimpl + +import ( + "fmt" + + "github.com/cockroachdb/pebble/v2" + + "github.com/onflow/flow-go/storage" +) + +type pebbleIterator struct { + *pebble.Iterator +} + +var _ storage.Iterator = (*pebbleIterator)(nil) + +func newPebbleIterator(reader pebble.Reader, startPrefix, endPrefix []byte, ops storage.IteratorOption) (*pebbleIterator, error) { + lowerBound, upperBound, hasUpperBound := storage.StartEndPrefixToLowerUpperBound(startPrefix, endPrefix) + + options := pebble.IterOptions{ + LowerBound: lowerBound, + UpperBound: upperBound, + } + + // setting UpperBound to nil if there is no upper bound + if !hasUpperBound { + options.UpperBound = nil + } + + iter, err := reader.NewIter(&options) + if err != nil { + return nil, fmt.Errorf("can not create iterator: %w", err) + } + + return &pebbleIterator{ + iter, + }, nil +} + +// IterItem returns the current key-value pair, or nil if done. +func (i *pebbleIterator) IterItem() storage.IterItem { + return pebbleIterItem{i.Iterator} +} + +// Next seeks to the smallest key greater than or equal to the given key. +func (i *pebbleIterator) Next() { + i.Iterator.Next() +} + +type pebbleIterItem struct { + *pebble.Iterator +} + +var _ storage.IterItem = (*pebbleIterItem)(nil) + +// KeyCopy returns a copy of the key of the item, writing it to dst slice. +func (i pebbleIterItem) KeyCopy(dst []byte) []byte { + return append(dst[:0], i.Key()...) +} + +func (i pebbleIterItem) Value(fn func([]byte) error) error { + val, err := i.ValueAndErr() + if err != nil { + return err + } + + return fn(val) +} diff --git a/storage/operation/pebbleimpl/reader.go b/storage/operation/pebbleimpl/reader.go new file mode 100644 index 00000000000..a1a2c122cf5 --- /dev/null +++ b/storage/operation/pebbleimpl/reader.go @@ -0,0 +1,69 @@ +package pebbleimpl + +import ( + "bytes" + "errors" + "fmt" + "io" + + "github.com/cockroachdb/pebble/v2" + + "github.com/onflow/flow-go/module/irrecoverable" + "github.com/onflow/flow-go/storage" + "github.com/onflow/flow-go/utils/noop" +) + +type dbReader struct { + db *pebble.DB +} + +var _ storage.Reader = (*dbReader)(nil) + +// Get gets the value for the given key. It returns ErrNotFound if the DB +// does not contain the key. +// other errors are exceptions +// +// The caller should not modify the contents of the returned slice, but it is +// safe to modify the contents of the argument after Get returns. The +// returned slice will remain valid until the returned Closer is closed. +// when err == nil, the caller MUST call closer.Close() or a memory leak will occur. +func (b dbReader) Get(key []byte) ([]byte, io.Closer, error) { + value, closer, err := b.db.Get(key) + + if err != nil { + if errors.Is(err, pebble.ErrNotFound) { + return nil, noop.Closer{}, storage.ErrNotFound + } + + // exception while checking for the key + return nil, noop.Closer{}, irrecoverable.NewExceptionf("could not load data: %w", err) + } + + return value, closer, nil +} + +// NewIter returns a new Iterator for the given key prefix range [startPrefix, endPrefix], both inclusive. +// Specifically, all keys that meet ANY of the following conditions are included in the iteration: +// - have a prefix equal to startPrefix OR +// - have a prefix equal to the endPrefix OR +// - have a prefix that is lexicographically between startPrefix and endPrefix +// +// it returns error if the startPrefix key is greater than the endPrefix key +// no errors are expected during normal operation +func (b dbReader) NewIter(startPrefix, endPrefix []byte, ops storage.IteratorOption) (storage.Iterator, error) { + if bytes.Compare(startPrefix, endPrefix) > 0 { + return nil, fmt.Errorf("startPrefix key must be less than or equal to endPrefix key") + } + + return newPebbleIterator(b.db, startPrefix, endPrefix, ops) +} + +// NewSeeker returns a new Seeker. +func (b dbReader) NewSeeker() storage.Seeker { + return newPebbleSeeker(b.db) +} + +// ToReader is a helper function to convert a *pebble.DB to a Reader +func ToReader(db *pebble.DB) storage.Reader { + return dbReader{db} +} diff --git a/storage/operation/pebbleimpl/seeker.go b/storage/operation/pebbleimpl/seeker.go new file mode 100644 index 00000000000..ba3f8ca2813 --- /dev/null +++ b/storage/operation/pebbleimpl/seeker.go @@ -0,0 +1,75 @@ +package pebbleimpl + +import ( + "bytes" + "errors" + "fmt" + "slices" + + "github.com/onflow/flow-go/storage" + "github.com/onflow/flow-go/utils/merr" + + "github.com/cockroachdb/pebble/v2" +) + +type pebbleSeeker struct { + reader pebble.Reader +} + +var _ storage.Seeker = (*pebbleSeeker)(nil) + +func newPebbleSeeker(reader pebble.Reader) *pebbleSeeker { + return &pebbleSeeker{ + reader: reader, + } +} + +// SeekLE (seek less than or equal) returns the largest key in lexicographical +// order within inclusive range of [startPrefix, key]. +// This function returns an error if specified key is less than startPrefix. +// This function returns storage.ErrNotFound if a key that matches +// the specified criteria is not found. +func (i *pebbleSeeker) SeekLE(startPrefix, key []byte) (_ []byte, errToReturn error) { + + if bytes.Compare(key, startPrefix) < 0 { + return nil, errors.New("key must be greater than or equal to startPrefix key") + } + + lowerBound, upperBound, hasUpperBound := storage.StartEndPrefixToLowerUpperBound(startPrefix, key) + + options := pebble.IterOptions{ + LowerBound: lowerBound, + UpperBound: upperBound, + } + + // Setting UpperBound to nil if there is no upper bound + if !hasUpperBound { + options.UpperBound = nil + } + + iter, err := i.reader.NewIter(&options) + if err != nil { + return nil, fmt.Errorf("can not create iterator: %w", err) + } + defer func() { + errToReturn = merr.CloseAndMergeError(iter, errToReturn) + }() + + // Seek given key if present. + + valid := iter.SeekGE(key) + if valid { + if bytes.Equal(iter.Key(), key) { + return slices.Clone(key), nil + } + } + + // Seek largest key less than the given key. + + valid = iter.SeekLT(key) + if !valid { + return nil, storage.ErrNotFound + } + + return slices.Clone(iter.Key()), nil +} diff --git a/storage/operation/pebbleimpl/writer.go b/storage/operation/pebbleimpl/writer.go new file mode 100644 index 00000000000..a65321b418a --- /dev/null +++ b/storage/operation/pebbleimpl/writer.go @@ -0,0 +1,188 @@ +package pebbleimpl + +import ( + "bytes" + "fmt" + + "github.com/cockroachdb/pebble/v2" + + "github.com/onflow/flow-go/storage" + "github.com/onflow/flow-go/storage/operation" +) + +// ReaderBatchWriter is for reading and writing to a storage backend. +// It is useful for performing a related sequence of reads and writes, after which you would like +// to modify some non-database state if the sequence completed successfully (via AddCallback). +// If you are not using AddCallback, avoid using ReaderBatchWriter: use Reader and Writer directly. +// ReaderBatchWriter is not safe for concurrent use. +type ReaderBatchWriter struct { + globalReader storage.Reader + batch *pebble.Batch + + // for executing callbacks after the batch has been flushed, such as updating caches + callbacks *operation.Callbacks + + // values contains the values for this batch. + // The values map is set using SetScopedValue(key, value) and retrieved using ScopedValue(key). + // Initialization of the values map is deferred until it is needed, because + // ReaderBatchWriter is created frequently to update the database, but + // this values map is used infrequently to save data for batch operations. + // For example, store.TransactionResults.BatchRemoveByBlockID() saves batch + // removed block IDs in values map, and retrieves the batch removed block + // IDs in OnCommitSucceed() callback. This allows locking just once, + // instead of locking TransactionResults cache for every removed block ID. + values map[string]any +} + +var _ storage.ReaderBatchWriter = (*ReaderBatchWriter)(nil) +var _ storage.Batch = (*ReaderBatchWriter)(nil) + +// GlobalReader returns a database-backed reader which reads the latest committed global database state ("read-committed isolation"). +// This reader will not read writes written to ReaderBatchWriter.Writer until the write batch is committed. +// This reader may observe different values for the same key on subsequent reads. +func (b *ReaderBatchWriter) GlobalReader() storage.Reader { + return b.globalReader +} + +// Writer returns a writer associated with a batch of writes. The batch is pending until it is committed. +// When we `Write` into the batch, that write operation is added to the pending batch, but not committed. +// The commit operation is atomic w.r.t. the batch; either all writes are applied to the database, or no writes are. +// Note: +// - The writer cannot be used concurrently for writing. +func (b *ReaderBatchWriter) Writer() storage.Writer { + return b +} + +func (b *ReaderBatchWriter) PebbleWriterBatch() *pebble.Batch { + return b.batch +} + +// AddCallback adds a callback to execute after the batch has been flush +// regardless the batch update is succeeded or failed. +// The error parameter is the error returned by the batch update. +func (b *ReaderBatchWriter) AddCallback(callback func(error)) { + b.callbacks.AddCallback(callback) +} + +// Commit flushes the batch to the database. +// Commit may be called at most once per Batch. +// ReaderBatchWriter can't be reused after Commit() is called. +// No errors are expected during normal operation. +func (b *ReaderBatchWriter) Commit() error { + err := b.batch.Commit(pebble.Sync) + + b.callbacks.NotifyCallbacks(err) + + return err +} + +// Close releases memory of the batch and no error is returned. +// Close must be called exactly once per batch. +// This can be called as a defer statement immediately after creating Batch +// to reduce risk of unbounded memory consumption. +// No errors are expected during normal operation. +func (b *ReaderBatchWriter) Close() error { + // Pebble v2 docs for Batch.Close(): + // + // "Close closes the batch without committing it." + + return b.batch.Close() +} + +func WithReaderBatchWriter(db *pebble.DB, fn func(storage.ReaderBatchWriter) error) error { + batch := NewReaderBatchWriter(db) + defer batch.Close() // Release batch resource + + err := fn(batch) + if err != nil { + // fn might use lock to ensure concurrent safety while reading and writing data + // and the lock is usually released by a callback. + // in other words, fn might hold a lock to be released by a callback, + // we need to notify the callback for the locks to be released before + // returning the error. + batch.callbacks.NotifyCallbacks(err) + return err + } + + return batch.Commit() +} + +func NewReaderBatchWriter(db *pebble.DB) *ReaderBatchWriter { + return &ReaderBatchWriter{ + globalReader: ToReader(db), + batch: db.NewBatch(), + callbacks: operation.NewCallbacks(), + } +} + +var _ storage.Writer = (*ReaderBatchWriter)(nil) + +// Set sets the value for the given key. It overwrites any previous value +// for that key; a DB is not a multi-map. +// +// It is safe to modify the contents of the arguments after Set returns. +// No errors expected during normal operation +func (b *ReaderBatchWriter) Set(key, value []byte) error { + return b.batch.Set(key, value, pebble.Sync) +} + +// Delete deletes the value for the given key. Deletes are blind all will +// succeed even if the given key does not exist. +// +// It is safe to modify the contents of the arguments after Delete returns. +// No errors expected during normal operation +func (b *ReaderBatchWriter) Delete(key []byte) error { + return b.batch.Delete(key, pebble.Sync) +} + +// DeleteByRange removes all keys with a prefix that falls within the +// range [start, end], both inclusive. +// It returns error if endPrefix < startPrefix +// no other errors are expected during normal operation +func (b *ReaderBatchWriter) DeleteByRange(globalReader storage.Reader, startPrefix, endPrefix []byte) error { + if bytes.Compare(startPrefix, endPrefix) > 0 { + return fmt.Errorf("startPrefix key must be less than or equal to endPrefix key") + } + + // DeleteRange takes the prefix range with start (inclusive) and end (exclusive, note: not inclusive). + // therefore, we need to increment the endPrefix to make it inclusive. + start, end, hasUpperBound := storage.StartEndPrefixToLowerUpperBound(startPrefix, endPrefix) + if hasUpperBound { + return b.batch.DeleteRange(start, end, pebble.Sync) + } + + return operation.IterateKeysByPrefixRange(globalReader, startPrefix, endPrefix, func(key []byte) error { + err := b.batch.Delete(key, pebble.Sync) + if err != nil { + return fmt.Errorf("could not add key to delete batch (%v): %w", key, err) + } + return nil + }) +} + +// SetScopedValue stores the given value by the given key in this batch. +// Stored value can be retrieved by the same key via ScopedValue(). +func (b *ReaderBatchWriter) SetScopedValue(key string, value any) { + // Creation of b.values is deferred until needed, so b.values can be nil here. + // Deleting element from nil b.values (map[string]any) is no-op. + // Inserting element to b.values requires initializing b.values first. + + if value == nil { + delete(b.values, key) + return + } + if b.values == nil { + b.values = make(map[string]any) + } + b.values[key] = value +} + +// ScopedValue returns the value associated with this batch for the given key and true if key exists, +// or nil and false if key doesn't exist. +func (b *ReaderBatchWriter) ScopedValue(key string) (any, bool) { + // Creation of b.values is deferred until needed, so b.values can be nil here. + // Accessing nil b.values (map[string]any) always returns (nil, false). + + v, exists := b.values[key] + return v, exists +} diff --git a/storage/operation/prefix.go b/storage/operation/prefix.go new file mode 100644 index 00000000000..40d6b2aeb6a --- /dev/null +++ b/storage/operation/prefix.go @@ -0,0 +1,187 @@ +//nolint:golint,unused +package operation + +import ( + "encoding/binary" + "fmt" + + "github.com/onflow/flow-go/model/flow" +) + +const ( + + // codes for special database markers + _ = 1 // DEPRECATED: previously used for badger to denote the max length of the storage codes in units of bytes + _ = 2 // DEPRECATED: previously used to differentiate the protocol database from the secrets database; now the former is pebble and the latter is badger + + // codes for views with special meaning + codeSafetyData = 10 // safety data for hotstuff state + codeLivenessData = 11 // liveness data for hotstuff state + + // codes for fields associated with the root state + codeInstanceParams = 13 // instance parameters which are constant throughout the lifetime of a node(finalized root, sealed root, spork root) + _ = 14 // DEPRECATED: 14 was used for ProtocolVersion before the versioned Protocol State + _ = 15 // DEPRECATED: 15 was used to save the finalization safety threshold + _ = 16 // DEPRECATED: 16 was used for root spork height + _ = 17 // DEPRECATED: 17 was used for the root spork block ID + + // code for heights with special meaning + codeFinalizedHeight = 20 // latest finalized block height + codeSealedHeight = 21 // latest sealed block height + codeClusterHeight = 22 // latest finalized height on cluster + codeExecutedBlock = 23 // latest executed block with max height + _ = 24 // DEPRECATED: 24 was used for the height of the highest finalized block contained in the root snapshot + codeLastCompleteBlockHeight = 25 // the height of the last block for which all collections were received + codeEpochFirstHeight = 26 // the height of the first block in a given epoch + _ = 27 // DEPRECATED: 27 was used for the height of the highest sealed block contained in the root snapshot + + // codes for single entity storage + codeHeader = 30 + _ = 31 // DEPRECATED: 31 was used for identities before epochs + codeGuarantee = 32 + codeSeal = 33 + codeTransaction = 34 + codeCollection = 35 + codeExecutionResult = 36 + codeResultApproval = 37 + codeChunk = 38 + codeExecutionReceiptMeta = 39 // NOTE: prior to Mainnet25, this erroneously had the same value as codeExecutionResult (36) + + // codes for indexing multiple identifiers by identifier -- to be continued with 80+ + codeHeightToBlock = 40 // index mapping height to block ID + codeBlockIDToLatestSealID = 41 // index mapping a block its last payload seal + codeClusterBlockToRefBlock = 42 // index cluster block ID to reference block ID + codeRefHeightToClusterBlock = 43 // index reference block height to cluster block IDs + codeBlockIDToFinalizedSeal = 44 // index _finalized_ seal by sealed block ID + codeBlockIDToQuorumCertificate = 45 // index of quorum certificates by block ID + codeEpochProtocolStateByBlockID = 46 // index of epoch protocol state entry by block ID + codeProtocolKVStoreByBlockID = 47 // index of protocol KV store entry by block ID + codeCertifiedBlockByView = 48 // index mapping view to ID of certified block (guaranteed by HotStuff that there is at most one per view) + + // codes for indexing multiple identifiers by identifier + codeBlockChildren = 50 // index mapping block ID to children blocks + _ = 51 // DEPRECATED: 51 was used for identity indexes before epochs + codePayloadGuarantees = 52 // index mapping block ID to payload guarantees + codePayloadSeals = 53 // index mapping block ID to payload seals + codeCollectionBlock = 54 // index mapping collection ID to block ID + codeOwnBlockReceipt = 55 // index mapping block ID to execution receipt ID for execution nodes + _ = 56 // DEPRECATED: 56 was used for block->epoch status prior to Dynamic Protocol State in Mainnet25 + codePayloadReceipts = 57 // index mapping block ID to payload receipts + codePayloadResults = 58 // index mapping block ID to payload results + codeAllBlockReceipts = 59 // index mapping of blockID to multiple receipts + codePayloadProtocolStateID = 60 // index mapping block ID to payload protocol state ID + + // codes related to protocol level information + codeEpochSetup = 61 // EpochSetup service event, keyed by ID + codeEpochCommit = 62 // EpochCommit service event, keyed by ID + _ = 63 // USED BY SECRETS DATABASE: BeaconPrivateKey, keyed by epoch counter + _ = 64 // DEPRECATED: flag that the DKG for an epoch has been started + _ = 65 // DEPRECATED: flag that the DKG for an epoch has ended (stores end state) + _ = 66 // USED BY SECRETS DATABASE: current state of Recoverable Random Beacon State Machine for given epoch + codeVersionBeacon = 67 // flag for storing version beacons + codeEpochProtocolState = 68 + codeProtocolKVStore = 69 + + // code for ComputationResult upload status storage + // NOTE: for now only GCP uploader is supported. When other uploader (AWS e.g.) needs to + // be supported, we will need to define new code. + codeComputationResults = 66 + + // job queue consumers and producers + codeJobConsumerProcessed = 70 + codeJobQueue = 71 + codeJobQueuePointer = 72 + + // codes for indexing multiple identifiers by identifier -- continued from 40-49 + codeBlockIDToProposalSignature = 80 // index of proposer signatures by block ID + codeGuaranteeByCollectionID = 81 // index of collection guarantee by collection ID + + // legacy codes (should be cleaned up) + codeChunkDataPack = 100 + codeCommit = 101 + codeEvent = 102 + codeExecutionStateInteractions = 103 + codeTransactionResult = 104 + codeFinalizedCluster = 105 + codeServiceEvent = 106 + codeTransactionResultIndex = 107 + codeLightTransactionResult = 108 + codeLightTransactionResultIndex = 109 + codeTransactionResultErrorMessage = 110 + codeTransactionResultErrorMessageIndex = 111 + codeIndexCollection = 200 + codeIndexExecutionResultByBlock = 202 + codeIndexCollectionByTransaction = 203 + codeIndexResultApprovalByChunk = 204 + + // TEMPORARY codes + disallowedNodeIDs = 205 // manual override for adding node IDs to list of ejected nodes, applies to networking layer only + + // internal failure information that should be preserved across restarts + codeExecutionFork = 254 + codeEpochEmergencyFallbackTriggered = 255 +) + +func MakePrefix(code byte, keys ...any) []byte { + length := 1 + for _, key := range keys { + length += prefixKeyPartLength(key) + } + + prefix := make([]byte, 1, length) + prefix[0] = code + for _, key := range keys { + prefix = AppendPrefixKeyPart(prefix, key) + } + return prefix +} + +// AppendPrefixKeyPart appends v in binary prefix format to buf. +// NOTE: this function needs to be in sync with prefixKeyPartLength. +func AppendPrefixKeyPart(buf []byte, v any) []byte { + switch i := v.(type) { + case uint8: + return append(buf, i) + case uint32: + var b [4]byte + binary.BigEndian.PutUint32(b[:], i) + return append(buf, b[:]...) + case uint64: + var b [8]byte + binary.BigEndian.PutUint64(b[:], i) + return append(buf, b[:]...) + case string: + return append(buf, []byte(i)...) + case flow.Role: + return append(buf, byte(i)) + case flow.Identifier: + return append(buf, i[:]...) + case flow.ChainID: + return append(buf, []byte(i)...) + default: + panic(fmt.Sprintf("unsupported type to convert (%T)", v)) + } +} + +// prefixKeyPartLength returns length of v in binary prefix format. +// NOTE: this function needs to be in sync with AppendPrefixKeyPartToBuffer. +func prefixKeyPartLength(v any) int { + switch i := v.(type) { + case uint8: + return 1 + case uint32: + return 4 + case uint64: + return 8 + case string: + return len(i) + case flow.Role: + return 1 + case flow.Identifier: + return len(i) + case flow.ChainID: + return len(i) + default: + panic(fmt.Sprintf("unsupported type to convert (%T)", v)) + } +} diff --git a/storage/operation/prefix_bench_test.go b/storage/operation/prefix_bench_test.go new file mode 100644 index 00000000000..92bde52aeb6 --- /dev/null +++ b/storage/operation/prefix_bench_test.go @@ -0,0 +1,24 @@ +package operation + +import ( + "testing" + + "github.com/onflow/flow-go/model/flow" +) + +func BenchmarkMakePrefixWithID(b *testing.B) { + var id flow.Identifier + + for range b.N { + _ = MakePrefix(codeHeader, id) + } +} + +func BenchmarkMakePrefixWithChainIDAndNum(b *testing.B) { + chainID := flow.ChainID("flow-emulator") + num := uint64(42) + + for range b.N { + _ = MakePrefix(codeFinalizedCluster, chainID, num) + } +} diff --git a/storage/operation/prefix_test.go b/storage/operation/prefix_test.go new file mode 100644 index 00000000000..b93b292ee15 --- /dev/null +++ b/storage/operation/prefix_test.go @@ -0,0 +1,37 @@ +package operation + +import ( + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/onflow/flow-go/model/flow" +) + +func TestMakePrefix(t *testing.T) { + + code := byte(0x01) + + u := uint64(1337) + expected := []byte{0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, 0x39} + actual := MakePrefix(code, u) + + assert.Equal(t, expected, actual) + + r := flow.Role(2) + expected = []byte{0x01, 0x02} + actual = MakePrefix(code, r) + + assert.Equal(t, expected, actual) + + id := flow.Identifier{0x05, 0x06, 0x07} + expected = []byte{0x01, + 0x05, 0x06, 0x07, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + } + actual = MakePrefix(code, id) + + assert.Equal(t, expected, actual) +} diff --git a/storage/operation/proposal_signatures.go b/storage/operation/proposal_signatures.go new file mode 100644 index 00000000000..a3ca35a3cdc --- /dev/null +++ b/storage/operation/proposal_signatures.go @@ -0,0 +1,36 @@ +package operation + +import ( + "fmt" + + "github.com/jordanschalm/lockctx" + + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/storage" +) + +// InsertProposalSignature inserts a proposal signature by block ID. +// +// CAUTION: +// - The caller must acquire either the lock [storage.LockInsertBlock] or [storage.LockInsertOrFinalizeClusterBlock] (but not both) +// and hold it until the database write has been committed. +// - OVERWRITES existing data (potential for data corruption): +// The lock proof serves as a reminder that the CALLER is responsible to ensure that the DEDUPLICATION CHECK +// is done elsewhere ATOMICALLY with this write operation. It is intended that this function is called only for new +// blocks, i.e. no signature was previously persisted for it. +// +// No errors are expected during normal operation. +func InsertProposalSignature(lctx lockctx.Proof, w storage.Writer, blockID flow.Identifier, sig *[]byte) error { + held, errStr := storage.HeldOneLock(lctx, storage.LockInsertBlock, storage.LockInsertOrFinalizeClusterBlock) + if !held { + return fmt.Errorf("%s", errStr) + } + + return UpsertByKey(w, MakePrefix(codeBlockIDToProposalSignature, blockID), sig) +} + +// RetrieveProposalSignature retrieves a proposal signature by blockID. +// Returns storage.ErrNotFound if no proposal signature is stored for the block. +func RetrieveProposalSignature(r storage.Reader, blockID flow.Identifier, sig *[]byte) error { + return RetrieveByKey(r, MakePrefix(codeBlockIDToProposalSignature, blockID), sig) +} diff --git a/storage/operation/protocol_kv_store.go b/storage/operation/protocol_kv_store.go new file mode 100644 index 00000000000..528d727ade4 --- /dev/null +++ b/storage/operation/protocol_kv_store.go @@ -0,0 +1,62 @@ +package operation + +import ( + "fmt" + + "github.com/jordanschalm/lockctx" + + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module/irrecoverable" + "github.com/onflow/flow-go/storage" +) + +// InsertProtocolKVStore inserts a protocol KV store by protocol kv store ID. +// This function can be called, and does not require the caller to hold any lock proof, +// but the caller must ensure the protocolKVStoreID is the hash of the given kvStore, +// This is currently true, see makeVersionedModelID in state/protocol/protocol_state/kvstore/models.go +// No expected error returns during normal operations. +func InsertProtocolKVStore(rw storage.ReaderBatchWriter, protocolKVStoreID flow.Identifier, kvStore *flow.PSKeyValueStoreData) error { + return UpsertByKey(rw.Writer(), MakePrefix(codeProtocolKVStore, protocolKVStoreID), kvStore) +} + +// RetrieveProtocolKVStore retrieves a protocol KV store by ID. +// Expected error returns during normal operations: +// - [storage.ErrNotFound] if no protocol KV with the given ID store exists +func RetrieveProtocolKVStore(r storage.Reader, protocolKVStoreID flow.Identifier, kvStore *flow.PSKeyValueStoreData) error { + return RetrieveByKey(r, MakePrefix(codeProtocolKVStore, protocolKVStoreID), kvStore) +} + +// IndexProtocolKVStore indexes a protocol KV store by block ID. +// +// CAUTION: +// - The caller must acquire the lock [storage.LockInsertBlock] and hold it until the database write has been committed. +// - OVERWRITES existing data (potential for data corruption): +// The lock proof serves as a reminder that the CALLER is responsible to ensure that the DEDUPLICATION CHECK is done elsewhere +// ATOMICALLY within this write operation. Currently it's done by operation.InsertHeader where it performs a check +// to ensure the blockID is new, therefore any data indexed by this blockID is new as well. +// +// Expected error returns during normal operations: +// - [storage.ErrAlreadyExists] if a KV store for the given blockID has already been indexed +func IndexProtocolKVStore(lctx lockctx.Proof, rw storage.ReaderBatchWriter, blockID flow.Identifier, protocolKVStoreID flow.Identifier) error { + if !lctx.HoldsLock(storage.LockInsertBlock) { + return fmt.Errorf("missing required lock: %s", storage.LockInsertBlock) + } + + key := MakePrefix(codeProtocolKVStoreByBlockID, blockID) + exists, err := KeyExists(rw.GlobalReader(), key) + if err != nil { + return fmt.Errorf("could not check if kv-store snapshot with block id (%x) exists: %w", blockID[:], irrecoverable.NewException(err)) + } + if exists { + return fmt.Errorf("a kv-store snapshot for block id (%x) already exists: %w", blockID[:], storage.ErrAlreadyExists) + } + + return UpsertByKey(rw.Writer(), key, protocolKVStoreID) +} + +// LookupProtocolKVStore finds protocol KV store ID by block ID. +// Expected error returns during normal operations: +// - [storage.ErrNotFound] if the given ID does not correspond to any known block +func LookupProtocolKVStore(r storage.Reader, blockID flow.Identifier, protocolKVStoreID *flow.Identifier) error { + return RetrieveByKey(r, MakePrefix(codeProtocolKVStoreByBlockID, blockID), protocolKVStoreID) +} diff --git a/storage/operation/protocol_kv_store_test.go b/storage/operation/protocol_kv_store_test.go new file mode 100644 index 00000000000..e4b8d4e46c2 --- /dev/null +++ b/storage/operation/protocol_kv_store_test.go @@ -0,0 +1,134 @@ +package operation_test + +import ( + "testing" + + "github.com/jordanschalm/lockctx" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/storage" + "github.com/onflow/flow-go/storage/operation" + "github.com/onflow/flow-go/storage/operation/dbtest" + "github.com/onflow/flow-go/utils/unittest" +) + +// TestInsertProtocolKVStore tests if basic store and index operations on ProtocolKVStore work as expected. +func TestInsertProtocolKVStore(t *testing.T) { + dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { + lockManager := storage.NewTestingLockManager() + expected := &flow.PSKeyValueStoreData{ + Version: 2, + Data: unittest.RandomBytes(32), + } + + kvStoreStateID := unittest.IdentifierFixture() + err := db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return operation.InsertProtocolKVStore(rw, kvStoreStateID, expected) + }) + require.NoError(t, err) + + var actual flow.PSKeyValueStoreData + err = operation.RetrieveProtocolKVStore(db.Reader(), kvStoreStateID, &actual) + require.NoError(t, err) + + assert.Equal(t, expected, &actual) + + blockID := unittest.IdentifierFixture() + err = unittest.WithLock(t, lockManager, storage.LockInsertBlock, func(lctx lockctx.Context) error { + return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return operation.IndexProtocolKVStore(lctx, rw, blockID, kvStoreStateID) + }) + }) + require.NoError(t, err) + + var actualProtocolKVStoreID flow.Identifier + err = operation.LookupProtocolKVStore(db.Reader(), blockID, &actualProtocolKVStoreID) + require.NoError(t, err) + + assert.Equal(t, kvStoreStateID, actualProtocolKVStoreID) + }) +} + +// TestIndexProtocolKVStore_ErrAlreadyExists tests that IndexProtocolKVStore returns ErrAlreadyExists +// when attempting to index a protocol KV store for a block ID that already has an index. +func TestIndexProtocolKVStore_ErrAlreadyExists(t *testing.T) { + dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { + lockManager := storage.NewTestingLockManager() + expected := &flow.PSKeyValueStoreData{ + Version: 2, + Data: unittest.RandomBytes(32), + } + + kvStoreStateID := unittest.IdentifierFixture() + blockID := unittest.IdentifierFixture() + + // Insert the protocol KV store first + err := db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return operation.InsertProtocolKVStore(rw, kvStoreStateID, expected) + }) + require.NoError(t, err) + + // First indexing should succeed + err = unittest.WithLock(t, lockManager, storage.LockInsertBlock, func(lctx lockctx.Context) error { + return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return operation.IndexProtocolKVStore(lctx, rw, blockID, kvStoreStateID) + }) + }) + require.NoError(t, err) + + // Second indexing with same block ID should fail with ErrAlreadyExists + differentKVStoreID := unittest.IdentifierFixture() + err = unittest.WithLock(t, lockManager, storage.LockInsertBlock, func(lctx lockctx.Context) error { + return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return operation.IndexProtocolKVStore(lctx, rw, blockID, differentKVStoreID) + }) + }) + require.Error(t, err) + require.ErrorIs(t, err, storage.ErrAlreadyExists) + + // Verify original index is still there and unchanged + var actualProtocolKVStoreID flow.Identifier + err = operation.LookupProtocolKVStore(db.Reader(), blockID, &actualProtocolKVStoreID) + require.NoError(t, err) + assert.Equal(t, kvStoreStateID, actualProtocolKVStoreID) + }) +} + +// TestIndexProtocolKVStore_MissingLock tests that IndexProtocolKVStore requires LockInsertBlock. +func TestIndexProtocolKVStore_MissingLock(t *testing.T) { + dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { + lockManager := storage.NewTestingLockManager() + kvStoreStateID := unittest.IdentifierFixture() + blockID := unittest.IdentifierFixture() + + // Attempt to index without holding the required lock + lctx := lockManager.NewContext() + defer lctx.Release() + + err := db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return operation.IndexProtocolKVStore(lctx, rw, blockID, kvStoreStateID) + }) + require.Error(t, err) + require.Contains(t, err.Error(), storage.LockInsertBlock) + }) +} + +// TestIndexProtocolKVStore_WrongLock tests that IndexProtocolKVStore fails when holding wrong locks. +func TestIndexProtocolKVStore_WrongLock(t *testing.T) { + dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { + lockManager := storage.NewTestingLockManager() + kvStoreStateID := unittest.IdentifierFixture() + blockID := unittest.IdentifierFixture() + + // Test with LockFinalizeBlock (wrong lock) + err := unittest.WithLock(t, lockManager, storage.LockFinalizeBlock, func(lctx lockctx.Context) error { + return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return operation.IndexProtocolKVStore(lctx, rw, blockID, kvStoreStateID) + }) + }) + require.Error(t, err) + require.Contains(t, err.Error(), storage.LockInsertBlock) + }) +} diff --git a/storage/operation/qcs.go b/storage/operation/qcs.go new file mode 100644 index 00000000000..00f1cabe7b8 --- /dev/null +++ b/storage/operation/qcs.go @@ -0,0 +1,51 @@ +package operation + +import ( + "fmt" + + "github.com/jordanschalm/lockctx" + + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/storage" +) + +// InsertQuorumCertificate atomically performs the following storage operations for the given QuorumCertificate [QC]: +// 1. Check if a QC certifying the same block is already stored. +// 2. Only if no QC exists for the block, append the storage operations for indexing the QC by the block ID it certifies. +// +// CAUTION: +// - For the same block, different QCs can easily be constructed by selecting different sub-sets +// of the received votes. In most cases, it is only important that a block has been certified, +// but it is irrelevant who specifically contributed to the QC. Therefore, we only store the first QC. +// - In order to make sure only one QC is stored per block, _all calls_ to +// `InsertQuorumCertificate` must be synchronized by the higher-logic. Currently, we have the +// lockctx.Proof to prove the higher logic is holding the [storage.LockInsertBlock] when +// inserting the QC after checking that no QC is already stored. +// +// Expected error returns: +// - [storage.ErrAlreadyExists] if any QuorumCertificate certifying the samn block already exists +func InsertQuorumCertificate(lctx lockctx.Proof, rw storage.ReaderBatchWriter, qc *flow.QuorumCertificate) error { + if !lctx.HoldsLock(storage.LockInsertBlock) { + return fmt.Errorf("cannot insert quorum certificate without holding lock %s", storage.LockInsertBlock) + } + + key := MakePrefix(codeBlockIDToQuorumCertificate, qc.BlockID) + exist, err := KeyExists(rw.GlobalReader(), key) + if err != nil { + return fmt.Errorf("failed to check if quorum certificate exists for block %s: %w", qc.BlockID, err) + } + if exist { + return fmt.Errorf("quorum certificate for block %s already exists: %w", qc.BlockID, storage.ErrAlreadyExists) + } + + return UpsertByKey(rw.Writer(), key, qc) +} + +// RetrieveQuorumCertificate retrieves the QuorumCertificate for the specified block. +// For every block that has been certified, this index should be populated. +// +// Expected errors during normal operations: +// - [storage.ErrNotFound] if `blockID` does not refer to a certified block +func RetrieveQuorumCertificate(r storage.Reader, blockID flow.Identifier, qc *flow.QuorumCertificate) error { + return RetrieveByKey(r, MakePrefix(codeBlockIDToQuorumCertificate, blockID), qc) +} diff --git a/storage/operation/qcs_test.go b/storage/operation/qcs_test.go new file mode 100644 index 00000000000..c2d1ddd3ced --- /dev/null +++ b/storage/operation/qcs_test.go @@ -0,0 +1,53 @@ +package operation_test + +import ( + "testing" + + "github.com/jordanschalm/lockctx" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/storage" + "github.com/onflow/flow-go/storage/operation" + "github.com/onflow/flow-go/storage/operation/dbtest" + "github.com/onflow/flow-go/utils/unittest" +) + +func TestInsertQuorumCertificate(t *testing.T) { + dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { + expected := unittest.QuorumCertificateFixture() + lockManager := storage.NewTestingLockManager() + + err := unittest.WithLock(t, lockManager, storage.LockInsertBlock, func(lctx lockctx.Context) error { + return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return operation.InsertQuorumCertificate(lctx, rw, expected) + }) + }) + require.NoError(t, err) + + // While still holding the lock, get value; this verifies that reads are not blocked by acquired locks + var actual flow.QuorumCertificate + err = operation.RetrieveQuorumCertificate(db.Reader(), expected.BlockID, &actual) + require.NoError(t, err) + assert.Equal(t, expected, &actual) + + // create a different QC for the same block + different := unittest.QuorumCertificateFixture() + different.BlockID = expected.BlockID + + // verify that overwriting the prior QC fails with `storage.ErrAlreadyExists` + err = unittest.WithLock(t, lockManager, storage.LockInsertBlock, func(lctx lockctx.Context) error { + return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return operation.InsertQuorumCertificate(lctx, rw, different) + }) + }) + require.Error(t, err) + require.ErrorIs(t, err, storage.ErrAlreadyExists) + + // verify that the original QC is still there + err = operation.RetrieveQuorumCertificate(db.Reader(), expected.BlockID, &actual) + require.NoError(t, err) + assert.Equal(t, expected, &actual) + }) +} diff --git a/storage/operation/reads.go b/storage/operation/reads.go new file mode 100644 index 00000000000..9bf9c60197d --- /dev/null +++ b/storage/operation/reads.go @@ -0,0 +1,215 @@ +package operation + +import ( + "bytes" + "errors" + "fmt" + "slices" + + "github.com/vmihailenco/msgpack/v4" + + "github.com/onflow/flow-go/module/irrecoverable" + "github.com/onflow/flow-go/storage" + "github.com/onflow/flow-go/utils/merr" +) + +// IterationFunc is a callback function that will be called on each key-value pair during the iteration. +// The key is copied and passed to the function, so key can be modified or retained after iteration. +// The `getValue` function can be called to retrieve the value of the current key and decode value into destVal object. +// The caller can return (true, nil) to stop the iteration early. +type IterationFunc func(keyCopy []byte, getValue func(destVal any) error) (bail bool, err error) + +// IterateKeysByPrefixRange will iterate over all entries in the database, where the key starts with a prefixes in +// the range [startPrefix, endPrefix] (both inclusive). We require that startPrefix <= endPrefix (otherwise this +// function errors). On every such key, the `check` function is called. If `check` errors, iteration is aborted. +// In other words, error returned by the iteration functions will be propagated to the caller. +// No errors expected during normal operations. +func IterateKeysByPrefixRange(r storage.Reader, startPrefix []byte, endPrefix []byte, check func(key []byte) error) error { + iterFunc := func(key []byte, getValue func(destVal any) error) (bail bool, err error) { + err = check(key) + if err != nil { + return true, err + } + return false, nil + } + return IterateKeys(r, startPrefix, endPrefix, iterFunc, storage.IteratorOption{BadgerIterateKeyOnly: true}) +} + +// IterateKeys will iterate over all entries in the database, where the key starts with a prefixes in +// the range [startPrefix, endPrefix] (both inclusive). +// No errors expected during normal operations. +func IterateKeys(r storage.Reader, startPrefix []byte, endPrefix []byte, iterFunc IterationFunc, opt storage.IteratorOption) (errToReturn error) { + if len(startPrefix) == 0 { + return fmt.Errorf("startPrefix prefix is empty") + } + + if len(endPrefix) == 0 { + return fmt.Errorf("endPrefix prefix is empty") + } + + // Reverse iteration is not supported by pebble + if bytes.Compare(startPrefix, endPrefix) > 0 { + return fmt.Errorf("startPrefix key must be less than or equal to endPrefix key") + } + + it, err := r.NewIter(startPrefix, endPrefix, opt) + if err != nil { + return fmt.Errorf("can not create iterator: %w", err) + } + defer func() { + errToReturn = merr.CloseAndMergeError(it, errToReturn) + }() + + for it.First(); it.Valid(); it.Next() { + item := it.IterItem() + key := item.Key() + + keyCopy := make([]byte, len(key)) + + // The underlying database may re-use and modify the backing memory of the returned key. + // Tor safety we proactively make a copy before passing the key to the upper layer. + copy(keyCopy, key) + + // check if we should process the item at all + bail, err := iterFunc(keyCopy, func(destVal any) error { + return item.Value(func(val []byte) error { + return msgpack.Unmarshal(val, destVal) + }) + }) + if err != nil { + return err + } + if bail { + return nil + } + } + + return nil +} + +// TraverseByPrefix will iterate over all keys with the given prefix +// error returned by the iteration functions will be propagated to the caller. +// No other errors are expected during normal operation. +func TraverseByPrefix(r storage.Reader, prefix []byte, iterFunc IterationFunc, opt storage.IteratorOption) error { + return IterateKeys(r, prefix, prefix, iterFunc, opt) +} + +// KeyOnlyIterateFunc returns an IterationFunc that only iterates over keys +func KeyOnlyIterateFunc(fn func(key []byte) error) IterationFunc { + return func(key []byte, _ func(destVal any) error) (bail bool, err error) { + err = fn(key) + if err != nil { + return true, err + } + return false, nil + } +} + +// KeyExists returns true if a key exists in the database. +// When this returned function is executed (and only then), it will write into the `keyExists` whether +// the key exists. +// No errors are expected during normal operation. +func KeyExists(r storage.Reader, key []byte) (exist bool, errToReturn error) { + _, closer, err := r.Get(key) + if err != nil { + // the key does not exist in the database + if errors.Is(err, storage.ErrNotFound) { + return false, nil + } + // exception while checking for the key + return false, irrecoverable.NewExceptionf("could not load data: %w", err) + } + defer func() { + errToReturn = merr.CloseAndMergeError(closer, errToReturn) + }() + + // the key does exist in the database + return true, nil +} + +// RetrieveByKey will retrieve the binary data under the given key from the database +// and decode it into the given entity. The provided entity needs to be a +// pointer to an initialized entity of the correct type. +// Error returns: +// - [storage.ErrNotFound] if the key does not exist in the database +// - generic error in case of unexpected failure from the database layer, or failure +// to decode an existing database value +func RetrieveByKey(r storage.Reader, key []byte, entity any) (errToReturn error) { + val, closer, err := r.Get(key) + if err != nil { + return err + } + + defer func() { + errToReturn = merr.CloseAndMergeError(closer, errToReturn) + }() + + err = msgpack.Unmarshal(val, entity) + if err != nil { + return irrecoverable.NewExceptionf("could not decode entity: %w", err) + } + return nil +} + +// FindHighestAtOrBelowByPrefix is for database entries that are indexed by block height. It is suitable to search +// keys with the format prefix` + `height` (where "+" denotes concatenation of binary strings). The height +// is encoded as Big-Endian (entries with numerically smaller height have lexicographically smaller key). +// The function finds the *highest* key with the given prefix and height equal to or below the given height. +func FindHighestAtOrBelowByPrefix(r storage.Reader, prefix []byte, height uint64, entity any) (errToReturn error) { + if len(prefix) == 0 { + return fmt.Errorf("prefix must not be empty") + } + + key := append(prefix, EncodeKeyPart(height)...) + + seeker := r.NewSeeker() + + // Seek the highest key equal or below to the given key within the [prefix, key] prefix range + highestKey, err := seeker.SeekLE(prefix, key) + if err != nil { + if err == storage.ErrNotFound { + return storage.ErrNotFound + } + return fmt.Errorf("can not seek height %d: %w", height, err) + } + + // read the value of the highest key + val, closer, err := r.Get(highestKey) + if err != nil { + return err + } + + defer func() { + errToReturn = merr.CloseAndMergeError(closer, errToReturn) + }() + + err = msgpack.Unmarshal(val, entity) + if err != nil { + return irrecoverable.NewExceptionf("failed to decode value: %w", err) + } + + return nil +} + +// CommonPrefix returns common prefix of startPrefix and endPrefix. +// The common prefix is used to narrow down the SSTables that +// BadgerDB's iterator picks up. +func CommonPrefix(startPrefix, endPrefix []byte) []byte { + commonPrefixMaxLength := min( + len(startPrefix), + len(endPrefix), + ) + + commonPrefixLength := commonPrefixMaxLength + for i := range commonPrefixMaxLength { + if startPrefix[i] != endPrefix[i] { + commonPrefixLength = i + break + } + } + + if commonPrefixLength == 0 { + return nil + } + return slices.Clone(startPrefix[:commonPrefixLength]) +} diff --git a/storage/operation/reads_bench_test.go b/storage/operation/reads_bench_test.go new file mode 100644 index 00000000000..7ad5a520be9 --- /dev/null +++ b/storage/operation/reads_bench_test.go @@ -0,0 +1,63 @@ +package operation_test + +import ( + "testing" + + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/storage" + "github.com/onflow/flow-go/storage/operation" + "github.com/onflow/flow-go/storage/operation/dbtest" +) + +func BenchmarkRetrieve(t *testing.B) { + dbtest.BenchWithStorages(t, func(t *testing.B, r storage.Reader, withWriter dbtest.WithWriter) { + e := Entity{ID: 1337} + require.NoError(t, withWriter(operation.Upsert(e.Key(), e))) + + t.ResetTimer() + + for i := 0; i < t.N; i++ { + var readBack Entity + require.NoError(t, operation.RetrieveByKey(r, e.Key(), &readBack)) + } + }) +} + +func BenchmarkNonExist(t *testing.B) { + dbtest.BenchWithStorages(t, func(t *testing.B, r storage.Reader, withWriter dbtest.WithWriter) { + for i := 0; i < t.N; i++ { + e := Entity{ID: uint64(i)} + require.NoError(t, withWriter(operation.Upsert(e.Key(), e))) + } + + t.ResetTimer() + nonExist := Entity{ID: uint64(t.N + 1)} + for i := 0; i < t.N; i++ { + _, err := operation.KeyExists(r, nonExist.Key()) + require.NoError(t, err) + } + }) +} + +func BenchmarkIterate(t *testing.B) { + dbtest.BenchWithStorages(t, func(t *testing.B, r storage.Reader, withWriter dbtest.WithWriter) { + prefix1 := []byte("prefix-1") + prefix2 := []byte("prefix-2") + for i := 0; i < t.N; i++ { + e := Entity{ID: uint64(i)} + key1 := append(prefix1, e.Key()...) + key2 := append(prefix2, e.Key()...) + + require.NoError(t, withWriter(operation.Upsert(key1, e))) + require.NoError(t, withWriter(operation.Upsert(key2, e))) + } + + t.ResetTimer() + var found [][]byte + require.NoError(t, operation.IterateKeysByPrefixRange(r, prefix1, prefix2, func(key []byte) error { + found = append(found, key) + return nil + }), "should iterate forward without error") + }) +} diff --git a/storage/operation/reads_functors.go b/storage/operation/reads_functors.go new file mode 100644 index 00000000000..dace2e9ec02 --- /dev/null +++ b/storage/operation/reads_functors.go @@ -0,0 +1,44 @@ +package operation + +import "github.com/onflow/flow-go/storage" + +// Leo: This package includes deprecated functions that wraps the operation of reading from the database. +// They are needed because the original badger implementation is also implemented in the same wrapped function manner, +// since badger requires reads to be done in a transaction, which is stateful. +// Using these deprecated functions could minimize the changes during refactor and easier to review the changes. +// The simplified implementation of the functions are in the reads.go file, which are encouraged to be used instead. + +func Iterate(startPrefix []byte, endPrefix []byte, check func(key []byte) error) func(storage.Reader) error { + return func(r storage.Reader) error { + return IterateKeysByPrefixRange(r, startPrefix, endPrefix, check) + } +} + +func Traverse(prefix []byte, iterFunc IterationFunc, opt storage.IteratorOption) func(storage.Reader) error { + return func(r storage.Reader) error { + return TraverseByPrefix(r, prefix, iterFunc, opt) + } +} + +func Retrieve(key []byte, entity interface{}) func(storage.Reader) error { + return func(r storage.Reader) error { + return RetrieveByKey(r, key, entity) + } +} + +func Exists(key []byte, keyExists *bool) func(storage.Reader) error { + return func(r storage.Reader) error { + exists, err := KeyExists(r, key) + if err != nil { + return err + } + *keyExists = exists + return nil + } +} + +func FindHighestAtOrBelow(prefix []byte, height uint64, entity interface{}) func(storage.Reader) error { + return func(r storage.Reader) error { + return FindHighestAtOrBelowByPrefix(r, prefix, height, entity) + } +} diff --git a/storage/operation/reads_test.go b/storage/operation/reads_test.go new file mode 100644 index 00000000000..78a4d51024e --- /dev/null +++ b/storage/operation/reads_test.go @@ -0,0 +1,432 @@ +package operation_test + +import ( + "bytes" + "fmt" + "testing" + + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/storage" + "github.com/onflow/flow-go/storage/operation" + "github.com/onflow/flow-go/storage/operation/dbtest" +) + +func TestFirst(t *testing.T) { + dbtest.RunWithStorages(t, func(t *testing.T, r storage.Reader, withWriter dbtest.WithWriter) { + + // Create a range of keys around the prefix start/end values + keys := [][]byte{ + {0x10, 0x00}, + {0x10, 0xff}, + } + + // Insert the keys into the storage + require.NoError(t, withWriter(func(writer storage.Writer) error { + for _, key := range keys { + value := []byte{0x00} // value are skipped, doesn't matter + err := operation.Upsert(key, value)(writer) + if err != nil { + return err + } + } + return nil + })) + + iter, err := r.NewIter([]byte{0x20}, []byte{0x30}, storage.DefaultIteratorOptions()) + require.NoError(t, err) + + // Check that the iterator is at the first key and return false when matching no key + require.False(t, iter.First()) + require.NoError(t, iter.Close()) + + iter, err = r.NewIter([]byte{0x10}, []byte{0x30}, storage.DefaultIteratorOptions()) + require.NoError(t, err) + + // Check that the iterator is at the first key and return true when matching the first key + require.True(t, iter.First()) + require.NoError(t, iter.Close()) + }) +} + +func TestIterateKeysByPrefixRange(t *testing.T) { + dbtest.RunWithStorages(t, func(t *testing.T, r storage.Reader, withWriter dbtest.WithWriter) { + // Define the prefix range + prefixStart := []byte{0x10} + prefixEnd := []byte{0x20} + + // Create a range of keys around the prefix start/end values + keys := [][]byte{ + // before start -> not included in range + {0x09, 0xff}, + // within the start prefix -> included in range + {0x10, 0x00}, + {0x10, 0xff}, + // between start and end -> included in range + {0x15, 0x00}, + {0x1A, 0xff}, + // within the end prefix -> included in range + {0x20, 0x00}, + {0x20, 0xff}, + // after end -> not included in range + {0x21, 0x00}, + } + + // The first and last keys are outside the prefix range, so we omit them + // from keysInRange, which is the set of keys we expect in the iteration + keysInRange := keys[1 : len(keys)-1] + + // Insert the keys into the storage + require.NoError(t, withWriter(func(writer storage.Writer) error { + for _, key := range keys { + value := []byte{0x00} // value are skipped, doesn't matter + err := operation.Upsert(key, value)(writer) + if err != nil { + return err + } + } + return nil + })) + + // Forward iteration and check boundaries + var found [][]byte + require.NoError(t, operation.IterateKeysByPrefixRange(r, prefixStart, prefixEnd, func(key []byte) error { + found = append(found, key) + return nil + }), "should iterate forward without error") + require.ElementsMatch(t, keysInRange, found, "forward iteration should return the correct keys in range") + }) +} + +// Verify that when keys are prefixed by two prefixes,we can iterate with either first prefix or second prefix. +func TestIterateHierachicalPrefixes(t *testing.T) { + dbtest.RunWithStorages(t, func(t *testing.T, r storage.Reader, withWriter dbtest.WithWriter) { + keys := [][]byte{ + {0x09, 0x00, 0x00}, + {0x09, 0x00, 0xff}, + {0x09, 0x19, 0xff}, + {0x09, 0xff, 0x00}, + {0x09, 0xff, 0xff}, + {0x10, 0x00, 0x00}, + {0x10, 0x00, 0xff}, + {0x10, 0x19, 0x00}, + {0x10, 0x19, 0xff}, + {0x10, 0x20, 0x00}, + {0x10, 0x20, 0xff}, + {0x10, 0x21, 0x00}, + {0x10, 0x21, 0xff}, + {0x10, 0x22, 0x00}, + {0x10, 0x22, 0xff}, + {0x10, 0xff, 0x00}, + {0x10, 0xff, 0xff}, + {0x11, 0x00, 0x00}, + {0x11, 0x00, 0xff}, + {0x11, 0xff, 0x00}, + {0x11, 0xff, 0xff}, + {0x12, 0x00, 0x00}, + {0x12, 0x00, 0xff}, + {0x12, 0xff, 0x00}, + {0x12, 0xff, 0xff}, + } + + // Insert the keys and values into storage + require.NoError(t, withWriter(func(writer storage.Writer) error { + for _, key := range keys { + err := operation.Upsert(key, []byte{1})(writer) + if err != nil { + return err + } + } + return nil + })) + + // Test iteration with range of first prefixes (0x10 to 0x11) + firstPrefixRangeExpected := [][]byte{ + {0x10, 0x00, 0x00}, + {0x10, 0x00, 0xff}, + {0x10, 0x19, 0x00}, + {0x10, 0x19, 0xff}, + {0x10, 0x20, 0x00}, + {0x10, 0x20, 0xff}, + {0x10, 0x21, 0x00}, + {0x10, 0x21, 0xff}, + {0x10, 0x22, 0x00}, + {0x10, 0x22, 0xff}, + {0x10, 0xff, 0x00}, + {0x10, 0xff, 0xff}, + {0x11, 0x00, 0x00}, + {0x11, 0x00, 0xff}, + {0x11, 0xff, 0x00}, + {0x11, 0xff, 0xff}, + } + firstPrefixRangeActual := make([][]byte, 0) + err := operation.IterateKeysByPrefixRange(r, []byte{0x10}, []byte{0x11}, func(key []byte) error { + firstPrefixRangeActual = append(firstPrefixRangeActual, key) + return nil + }) + require.NoError(t, err, "iterate with range of first prefixes should not return an error") + require.Equal(t, firstPrefixRangeExpected, firstPrefixRangeActual, "iterated values for range of first prefixes should match expected values") + + // Test iteration with range of second prefixes (0x1020 to 0x1021) + secondPrefixRangeActual := make([][]byte, 0) + secondPrefixRangeExpected := [][]byte{ + {0x10, 0x20, 0x00}, + {0x10, 0x20, 0xff}, + {0x10, 0x21, 0x00}, + {0x10, 0x21, 0xff}, + } + err = operation.IterateKeysByPrefixRange(r, []byte{0x10, 0x20}, []byte{0x10, 0x21}, func(key []byte) error { + secondPrefixRangeActual = append(secondPrefixRangeActual, key) + return nil + }) + require.NoError(t, err, "iterate with range of second prefixes should not return an error") + require.Equal(t, secondPrefixRangeExpected, secondPrefixRangeActual, "iterated values for range of second prefixes should match expected values") + }) +} + +// TestIterationBoundary tests that when the prefixEnd is all 1s, the iteration should include the last key +func TestIterationBoundary(t *testing.T) { + dbtest.RunWithStorages(t, func(t *testing.T, r storage.Reader, withWriter dbtest.WithWriter) { + // Define the prefix range + prefixStart := []byte{0x01} + prefixEnd := []byte{0xff} + + // Create a range of keys around the prefix start/end values + keys := [][]byte{ + {0x00}, + {0x00, 0x00}, + {0x00, 0xff}, + {0x01}, + {0x01, 0x00}, + {0x01, 0xff}, + {0x02}, + {0xff}, + {0xff, 0x00}, + {0xff, 0xff}, + } + + expectedKeys := [][]byte{ + {0x01}, + {0x01, 0x00}, + {0x01, 0xff}, + {0x02}, + {0xff}, + {0xff, 0x00}, + {0xff, 0xff}, + } + + // Insert the keys into the storage + require.NoError(t, withWriter(func(writer storage.Writer) error { + for _, key := range keys { + value := []byte{0x00} // value are skipped, doesn't matter + err := operation.Upsert(key, value)(writer) + if err != nil { + return err + } + } + return nil + })) + + // Forward iteration and check boundaries + var found [][]byte + require.NoError(t, operation.IterateKeysByPrefixRange(r, prefixStart, prefixEnd, func(key []byte) error { + found = append(found, key) + return nil + }), "should iterate forward without error") + require.ElementsMatch(t, expectedKeys, found, "forward iteration should return the correct keys in range") + }) +} + +func TestTraverse(t *testing.T) { + dbtest.RunWithStorages(t, func(t *testing.T, r storage.Reader, withWriter dbtest.WithWriter) { + keyVals := map[[2]byte]uint64{ + {0x41, 0xff}: 3, + {0x42, 0x00}: 11, + {0xff}: 13, + {0x42, 0x56}: 17, + {0x00}: 19, + {0x42, 0xff}: 23, + {0x43, 0x00}: 33, + } + expected := []uint64{11, 23} + + // Insert the keys and values into storage + require.NoError(t, withWriter(func(writer storage.Writer) error { + for key, val := range keyVals { + err := operation.Upsert(key[:], val)(writer) + if err != nil { + return err + } + } + return nil + })) + + actual := make([]uint64, 0, len(keyVals)) + + // Define the iteration logic + iterationFunc := func(keyCopy []byte, getValue func(destVal any) error) (bail bool, err error) { + // Skip the key {0x42, 0x56} + if bytes.Equal(keyCopy, []byte{0x42, 0x56}) { + return false, nil + } + var val uint64 + err = getValue(&val) + if err != nil { + return true, err + } + actual = append(actual, val) + return false, nil + } + + // Traverse the keys starting with prefix {0x42} + err := operation.TraverseByPrefix(r, []byte{0x42}, iterationFunc, storage.DefaultIteratorOptions()) + require.NoError(t, err, "traverse should not return an error") + + // Assert that the actual values match the expected values + require.Equal(t, expected, actual, "traversed values should match expected values") + }) +} + +// Verify traversing a subset of keys with only keys traversal +func TestTraverseKeyOnly(t *testing.T) { + dbtest.RunWithStorages(t, func(t *testing.T, r storage.Reader, withWriter dbtest.WithWriter) { + keys := [][]byte{ + // before start -> not included in range + {0x04, 0x33}, + {0x09, 0xff}, + // within the start prefix -> included in range + {0x10, 0x00}, + {0x10, 0xff}, + // between start and end -> included in range + {0x11, 0x00}, + {0x1A, 0xff}, + } + expected := [][]byte{ + {0x10, 0x00}, + {0x10, 0xff}, + } + + // Insert the keys and values into storage + require.NoError(t, withWriter(func(writer storage.Writer) error { + for _, key := range keys { + err := operation.Upsert(key, []byte{1})(writer) + if err != nil { + return err + } + } + return nil + })) + + actual := make([][]byte, 0) + + // Traverse the keys starting with prefix {0x11} + err := operation.TraverseByPrefix(r, []byte{0x10}, func(key []byte, getValue func(destVal any) error) (bail bool, err error) { + actual = append(actual, key) + return false, nil + }, storage.DefaultIteratorOptions()) + require.NoError(t, err, "traverse should not return an error") + + // Assert that the actual values match the expected values + require.Equal(t, expected, actual, "traversed values should match expected values") + }) +} + +func TestFindHighestAtOrBelow(t *testing.T) { + // Helper function to insert an entity into the storage + insertEntity := func(writer storage.Writer, prefix []byte, height uint64, entity Entity) error { + key := append(prefix, operation.EncodeKeyPart(height)...) + return operation.Upsert(key, entity)(writer) + } + + // Entities to be inserted + entities := []struct { + height uint64 + entity Entity + }{ + {5, Entity{ID: 41}}, + {10, Entity{ID: 42}}, + {15, Entity{ID: 43}}, + } + + // Run test with multiple storage backends + dbtest.RunWithStorages(t, func(t *testing.T, r storage.Reader, withWriter dbtest.WithWriter) { + prefix := []byte("test_prefix") + + // Insert entities into the storage + require.NoError(t, withWriter(func(writer storage.Writer) error { + for _, e := range entities { + if err := insertEntity(writer, prefix, e.height, e.entity); err != nil { + return err + } + } + return nil + })) + + // Declare entity to store the results of FindHighestAtOrBelow + var entity Entity + + // Test cases + tests := []struct { + name string + height uint64 + expectedValue uint64 + expectError bool + expectedErrMsg string + }{ + {"target first height exists", 5, 41, false, ""}, + {"target height exists", 10, 42, false, ""}, + {"target height above", 11, 42, false, ""}, + {"target height above highest", 20, 43, false, ""}, + {"target height below lowest", 4, 0, true, storage.ErrNotFound.Error()}, + {"empty prefix", 5, 0, true, "prefix must not be empty"}, + } + + // Execute test cases + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + prefixToUse := prefix + + if tt.name == "empty prefix" { + prefixToUse = []byte{} + } + + err := operation.FindHighestAtOrBelowByPrefix( + r, + prefixToUse, + tt.height, + &entity) + + if tt.expectError { + require.Error(t, err, fmt.Sprintf("expected error but got nil, entity: %v", entity)) + require.Contains(t, err.Error(), tt.expectedErrMsg) + } else { + require.NoError(t, err) + require.Equal(t, tt.expectedValue, entity.ID) + } + }) + } + }) +} + +func TestCommonPrefix(t *testing.T) { + testCases := []struct { + name string + startPrefix []byte + endPrefix []byte + expectedCommonPrefix []byte + }{ + {name: "both nil", expectedCommonPrefix: []byte(nil)}, + {name: "startPrefix nil", endPrefix: []byte{0x00, 0x01}, expectedCommonPrefix: []byte(nil)}, + {name: "identical", startPrefix: []byte{0x00, 0x01}, endPrefix: []byte{0x00, 0x01}, expectedCommonPrefix: []byte{0x00, 0x01}}, + {name: "substring", startPrefix: []byte{0x00}, endPrefix: []byte{0x00, 0x01}, expectedCommonPrefix: []byte{0x00}}, + {name: "has common prefix", startPrefix: []byte{0x00, 0x01, 0x02}, endPrefix: []byte{0x00, 0x01, 0x03}, expectedCommonPrefix: []byte{0x00, 0x01}}, + {name: "no common prefix", startPrefix: []byte{0x00, 0x01}, endPrefix: []byte{0x02, 0x01}, expectedCommonPrefix: []byte(nil)}, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + commonPrefix := operation.CommonPrefix(tc.startPrefix, tc.endPrefix) + require.Equal(t, tc.expectedCommonPrefix, commonPrefix) + }) + } +} diff --git a/storage/operation/receipts.go b/storage/operation/receipts.go new file mode 100644 index 00000000000..832a86544b4 --- /dev/null +++ b/storage/operation/receipts.go @@ -0,0 +1,92 @@ +package operation + +import ( + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/storage" +) + +// InsertExecutionReceiptStub inserts a [flow.ExecutionReceiptStub] into the database, keyed by its ID. +// +// CAUTION: The caller must ensure receiptID is a collision-resistant hash of the provided +// [flow.ExecutionReceiptMeta]! This method silently overrides existing data, which is safe only if +// for the same key, we always write the same value. +func InsertExecutionReceiptStub(w storage.Writer, receiptID flow.Identifier, meta *flow.ExecutionReceiptStub) error { + return UpsertByKey(w, MakePrefix(codeExecutionReceiptMeta, receiptID), meta) +} + +// RetrieveExecutionReceiptStub retrieves a [flow.ExecutionReceiptStub] by its ID. +// +// Expected errors during normal operations: +// - [storage.ErrNotFound] if no receipt stub with the specified ID is known. +func RetrieveExecutionReceiptStub(r storage.Reader, receiptID flow.Identifier, meta *flow.ExecutionReceiptStub) error { + return RetrieveByKey(r, MakePrefix(codeExecutionReceiptMeta, receiptID), meta) +} + +// IndexOwnExecutionReceipt indexes the Execution Node's OWN execution receipt by the executed block ID. +// +// CAUTION: +// - OVERWRITES existing data (potential for data corruption): +// This method silently overrides existing data without any sanity checks whether data for the same key already exits. +// Note that the Flow protocol mandates that for a previously persisted key, the data is never changed to a different +// value. Changing data could cause the node to publish inconsistent data and to be slashed, or the protocol to be +// compromised as a whole. This method does not contain any safeguards to prevent such data corruption. The caller +// is responsible to ensure that the DEDUPLICATION CHECK is done elsewhere ATOMICALLY with this write operation. +// +// No errors are expected during normal operation. +func IndexOwnExecutionReceipt(w storage.Writer, blockID flow.Identifier, receiptID flow.Identifier) error { + return UpsertByKey(w, MakePrefix(codeOwnBlockReceipt, blockID), receiptID) +} + +// LookupOwnExecutionReceipt retrieves the Execution Node's OWN execution receipt ID for the specified block. +// Intended for Execution Node only. For every block executed by this node, this index should be populated. +// +// Expected errors during normal operations: +// - [storage.ErrNotFound] if `blockID` does not refer to a block executed by this node +func LookupOwnExecutionReceipt(r storage.Reader, blockID flow.Identifier, receiptID *flow.Identifier) error { + return RetrieveByKey(r, MakePrefix(codeOwnBlockReceipt, blockID), receiptID) +} + +// RemoveOwnExecutionReceipt removes the Execution Node's OWN execution receipt index for the given block ID. +// CAUTION: this is for recovery purposes only, and should not be used during normal operations! +// It returns nil if the collection does not exist. +// +// No errors are expected during normal operation. +func RemoveOwnExecutionReceipt(w storage.Writer, blockID flow.Identifier) error { + return RemoveByKey(w, MakePrefix(codeOwnBlockReceipt, blockID)) +} + +// IndexExecutionReceipts adds the given execution receipts to the set of all known receipts for the +// given block. It produces a mapping from block ID to the set of all known receipts for that block. +// One block could have multiple receipts, even if they are from the same executor. +// +// This method is idempotent, and can be called repeatedly with the same block ID and receipt ID, +// without the risk of data corruption. +// +// No errors are expected during normal operation. +func IndexExecutionReceipts(w storage.Writer, blockID, receiptID flow.Identifier) error { + return UpsertByKey(w, MakePrefix(codeAllBlockReceipts, blockID, receiptID), receiptID) +} + +// LookupExecutionReceipts retrieves the set of all execution receipts for the specified block. +// For every known block (at or above the root block height), this index should be populated +// with all known receipts for that block. +// +// Expected errors during normal operations: +// - [storage.ErrNotFound] if `blockID` does not refer to a known block +func LookupExecutionReceipts(r storage.Reader, blockID flow.Identifier, receiptIDs *[]flow.Identifier) error { + iterationFunc := receiptIterationFunc(receiptIDs) + return TraverseByPrefix(r, MakePrefix(codeAllBlockReceipts, blockID), iterationFunc, storage.DefaultIteratorOptions()) +} + +// receiptIterationFunc returns an iteration function which collects all receipt IDs found during traversal. +func receiptIterationFunc(receiptIDs *[]flow.Identifier) IterationFunc { + return func(keyCopy []byte, getValue func(destVal any) error) (bail bool, err error) { + var receiptID flow.Identifier + err = getValue(&receiptID) + if err != nil { + return true, err + } + *receiptIDs = append(*receiptIDs, receiptID) + return false, nil + } +} diff --git a/storage/operation/receipts_test.go b/storage/operation/receipts_test.go new file mode 100644 index 00000000000..0bdf92d663f --- /dev/null +++ b/storage/operation/receipts_test.go @@ -0,0 +1,72 @@ +package operation_test + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/storage" + "github.com/onflow/flow-go/storage/operation" + "github.com/onflow/flow-go/storage/operation/dbtest" + "github.com/onflow/flow-go/utils/unittest" +) + +func TestReceipts_InsertRetrieve(t *testing.T) { + dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { + receipt := unittest.ExecutionReceiptFixture() + expected := receipt.Stub() + + err := db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return operation.InsertExecutionReceiptStub(rw.Writer(), receipt.ID(), expected) + }) + require.Nil(t, err) + + var actual flow.ExecutionReceiptStub + err = operation.RetrieveExecutionReceiptStub(db.Reader(), receipt.ID(), &actual) + require.Nil(t, err) + + assert.Equal(t, expected, &actual) + }) +} + +func TestReceipts_Index(t *testing.T) { + dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { + receipt := unittest.ExecutionReceiptFixture() + expected := receipt.ID() + blockID := receipt.ExecutionResult.BlockID + + err := db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return operation.IndexOwnExecutionReceipt(rw.Writer(), blockID, expected) + }) + require.Nil(t, err) + + var actual flow.Identifier + err = operation.LookupOwnExecutionReceipt(db.Reader(), blockID, &actual) + require.Nil(t, err) + + assert.Equal(t, expected, actual) + }) +} + +func TestReceipts_MultiIndex(t *testing.T) { + dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { + expected := []flow.Identifier{unittest.IdentifierFixture(), unittest.IdentifierFixture()} + blockID := unittest.IdentifierFixture() + + require.NoError(t, db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + for _, id := range expected { + err := operation.IndexExecutionReceipts(rw.Writer(), blockID, id) + require.Nil(t, err) + } + return nil + })) + + var actual []flow.Identifier + err := operation.LookupExecutionReceipts(db.Reader(), blockID, &actual) + require.Nil(t, err) + + assert.ElementsMatch(t, expected, actual) + }) +} diff --git a/storage/operation/results.go b/storage/operation/results.go new file mode 100644 index 00000000000..29937653968 --- /dev/null +++ b/storage/operation/results.go @@ -0,0 +1,63 @@ +package operation + +import ( + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/storage" +) + +// InsertExecutionResult inserts a [flow.ExecutionResult] into the storage, keyed by its ID. +// +// If the result already exists, it will be overwritten. Note that here, the key (result ID) is derived +// from the value (result) via a collision-resistant hash function. Hence, unchecked overwrites pose no risk +// of data corruption, because for the same key, we expect the same value. +// +// No errors are expected during normal operation. +func InsertExecutionResult(w storage.Writer, result *flow.ExecutionResult) error { + return UpsertByKey(w, MakePrefix(codeExecutionResult, result.ID()), result) +} + +// RetrieveExecutionResult retrieves an Execution Result by its ID. +// Expected errors during normal operations: +// - [storage.ErrNotFound] if no result with the specified `resultID` is known. +func RetrieveExecutionResult(r storage.Reader, resultID flow.Identifier, result *flow.ExecutionResult) error { + return RetrieveByKey(r, MakePrefix(codeExecutionResult, resultID), result) +} + +// IndexExecutionResult indexes the Execution Node's OWN Execution Result by the executed block's ID. +// +// CAUTION: +// - OVERWRITES existing data (potential for data corruption): +// This method silently overrides existing data without any sanity checks whether data for the same key already exits. +// Note that the Flow protocol mandates that for a previously persisted key, the data is never changed to a different +// value. Changing data could cause the node to publish inconsistent data and to be slashed, or the protocol to be +// compromised as a whole. This method does not contain any safeguards to prevent such data corruption. +// +// TODO: USE LOCK, we want to protect this mapping from accidental overwrites (because the key is not derived from the value via a collision-resistant hash) +// +// No errors are expected during normal operation. +func IndexExecutionResult(w storage.Writer, blockID flow.Identifier, resultID flow.Identifier) error { + return UpsertByKey(w, MakePrefix(codeIndexExecutionResultByBlock, blockID), resultID) +} + +// LookupExecutionResult retrieves the Execution Node's OWN Execution Result ID for the specified block. +// Intended for Execution Node only. For every block executed by this node, this index should be populated. +// +// Expected errors during normal operations: +// - [storage.ErrNotFound] if `blockID` does not refer to a block executed by this node +func LookupExecutionResult(r storage.Reader, blockID flow.Identifier, resultID *flow.Identifier) error { + return RetrieveByKey(r, MakePrefix(codeIndexExecutionResultByBlock, blockID), resultID) +} + +// ExistExecutionResult checks if the execution node has its OWN Execution Result for the specified block. +// No errors are expected during normal operation. +func ExistExecutionResult(r storage.Reader, blockID flow.Identifier) (bool, error) { + return KeyExists(r, MakePrefix(codeIndexExecutionResultByBlock, blockID)) +} + +// RemoveExecutionResultIndex removes Execution Node's OWN Execution Result for the given blockID. +// CAUTION: this is for recovery purposes only, and should not be used during normal operations +// It returns nil if the collection does not exist. +// No errors are expected during normal operation. +func RemoveExecutionResultIndex(w storage.Writer, blockID flow.Identifier) error { + return RemoveByKey(w, MakePrefix(codeIndexExecutionResultByBlock, blockID)) +} diff --git a/storage/operation/results_test.go b/storage/operation/results_test.go new file mode 100644 index 00000000000..a2f59425b1c --- /dev/null +++ b/storage/operation/results_test.go @@ -0,0 +1,31 @@ +package operation_test + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/storage" + "github.com/onflow/flow-go/storage/operation" + "github.com/onflow/flow-go/storage/operation/dbtest" + "github.com/onflow/flow-go/utils/unittest" +) + +func TestResults_InsertRetrieve(t *testing.T) { + dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { + expected := unittest.ExecutionResultFixture() + + err := db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return operation.InsertExecutionResult(rw.Writer(), expected) + }) + require.Nil(t, err) + + var actual flow.ExecutionResult + err = operation.RetrieveExecutionResult(db.Reader(), expected.ID(), &actual) + require.Nil(t, err) + + assert.Equal(t, expected, &actual) + }) +} diff --git a/storage/operation/seeker_test.go b/storage/operation/seeker_test.go new file mode 100644 index 00000000000..0f924a37f4a --- /dev/null +++ b/storage/operation/seeker_test.go @@ -0,0 +1,96 @@ +package operation_test + +import ( + "testing" + + "github.com/onflow/flow-go/storage" + "github.com/onflow/flow-go/storage/operation" + "github.com/onflow/flow-go/storage/operation/dbtest" + + "github.com/stretchr/testify/require" +) + +func TestSeekLE(t *testing.T) { + dbtest.RunWithStorages(t, func(t *testing.T, r storage.Reader, withWriter dbtest.WithWriter) { + + // Insert the keys into the storage + codePrefix := byte(1) + keyParts := []uint64{1, 5, 9} + + require.NoError(t, withWriter(func(writer storage.Writer) error { + for _, keyPart := range keyParts { + key := operation.MakePrefix(codePrefix, keyPart) + value := []byte{0x00} // value are skipped, doesn't matter + + err := operation.Upsert(key, value)(writer) + if err != nil { + return err + } + } + return nil + })) + + t.Run("key below start prefix", func(t *testing.T) { + seeker := r.NewSeeker() + + key := operation.MakePrefix(codePrefix, uint64(4)) + startPrefix := operation.MakePrefix(codePrefix, uint64(5)) + + _, err := seeker.SeekLE(startPrefix, key) + require.Error(t, err) + }) + + t.Run("seek key inside range", func(t *testing.T) { + seeker := r.NewSeeker() + + startPrefix := operation.MakePrefix(codePrefix) + + // Seeking 9 and 10 return 9. + for _, keyPart := range []uint64{9, 10} { + key := operation.MakePrefix(codePrefix, keyPart) + expectedKey := operation.MakePrefix(codePrefix, uint64(9)) + foundKey, err := seeker.SeekLE(startPrefix, key) + require.NoError(t, err) + require.Equal(t, expectedKey, foundKey) + } + + // Seeking [5, 8] returns 5. + for _, keyPart := range []uint64{5, 6, 7, 8} { + key := operation.MakePrefix(codePrefix, keyPart) + expectedKey := operation.MakePrefix(codePrefix, uint64(5)) + foundKey, err := seeker.SeekLE(startPrefix, key) + require.NoError(t, err) + require.Equal(t, expectedKey, foundKey) + } + + // Seeking [1, 4] returns 1. + for _, keyPart := range []uint64{1, 2, 3, 4} { + key := operation.MakePrefix(codePrefix, keyPart) + expectedKey := operation.MakePrefix(codePrefix, uint64(1)) + foundKey, err := seeker.SeekLE(startPrefix, key) + require.NoError(t, err) + require.Equal(t, expectedKey, foundKey) + } + + // Seeking 0 returns nil. + for _, keyPart := range []uint64{0} { + key := operation.MakePrefix(codePrefix, keyPart) + foundKey, err := seeker.SeekLE(startPrefix, key) + require.ErrorIs(t, err, storage.ErrNotFound) + require.Nil(t, foundKey) + } + }) + + t.Run("has key below startPrefix", func(t *testing.T) { + seeker := r.NewSeeker() + + startPrefix := operation.MakePrefix(codePrefix, uint64(6)) + + // Key 5 exists, but it is below startPrefix, so nil is returned. + key := operation.MakePrefix(codePrefix, uint64(6)) + foundKey, err := seeker.SeekLE(startPrefix, key) + require.ErrorIs(t, err, storage.ErrNotFound) + require.Nil(t, foundKey) + }) + }) +} diff --git a/storage/operation/stats.go b/storage/operation/stats.go new file mode 100644 index 00000000000..e72b35b3e22 --- /dev/null +++ b/storage/operation/stats.go @@ -0,0 +1,202 @@ +package operation + +import ( + "context" + "encoding/json" + "fmt" + "math" + "sort" + "sync" + + "github.com/rs/zerolog" + + "github.com/onflow/flow-go/module/util" + "github.com/onflow/flow-go/storage" +) + +// Stats holds statistics for a single prefix group. +type Stats struct { + Count int `json:"count"` + MinSize int `json:"min_size"` + MaxSize int `json:"max_size"` + TotalSize int `json:"total_size"` + AverageSize float64 `json:"avg_size"` +} + +// SummarizeKeysByFirstByteConcurrent iterates over all prefixes [0x00..0xFF] in parallel +// using nWorker goroutines. Each worker handles one prefix at a time until all are processed. +// +// The storage.Reader must be able to create multiple iterators concurrently. +func SummarizeKeysByFirstByteConcurrent(log zerolog.Logger, r storage.Reader, nWorker int) (map[byte]Stats, error) { + // We'll have at most 256 possible prefixes (0x00..0xFF). + // Create tasks (one per prefix), a results channel, and a wait group. + taskChan := make(chan byte, 256) + resultChan := make(chan struct { + prefix byte + stats Stats + err error + }, 256) + + var wg sync.WaitGroup + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + // Start nWorker goroutines. + for i := 0; i < nWorker; i++ { + wg.Add(1) + go func() { + defer wg.Done() + for { + select { + case <-ctx.Done(): + return // Stop immediately on cancellation + case prefix, ok := <-taskChan: + if !ok { + return // Stop if taskChan is closed + } + + st, err := processPrefix(r, prefix) + resultChan <- struct { + prefix byte + stats Stats + err error + }{ + prefix: prefix, + stats: st, + err: err, + } + } + } + }() + } + + progress := util.LogProgress(log, + util.DefaultLogProgressConfig( + "Summarizing keys by first byte", + 256, + )) + + // Send all prefixes [0..255] to taskChan. + for p := 0; p < 256; p++ { + taskChan <- byte(p) + } + close(taskChan) + + // Once all workers finish, close the result channel. + go func() { + wg.Wait() + close(resultChan) + }() + + // Gather results. We'll accumulate them in a map[prefix]Stats. + finalStats := make(map[byte]Stats, 256) + + var err error + // If we encounter an error, we will return it immediately. + for res := range resultChan { + if res.err != nil { + cancel() // Cancel running goroutines + err = res.err + break + } + finalStats[res.prefix] = res.stats + log.Info(). + Int("prefix", int(res.prefix)). + Int("total", res.stats.TotalSize). + Int("count", res.stats.Count). + Int("min", res.stats.MinSize). + Int("max", res.stats.MaxSize). + Msg("Processed prefix") + progress(1) // log the progress + } + + if err != nil { + return nil, err + } + return finalStats, nil +} + +// processPrefix does the actual iteration and statistic calculation for a single prefix. +// It returns the Stats for that prefix, or an error if iteration fails. +func processPrefix(r storage.Reader, prefix byte) (Stats, error) { + var s Stats + // We use MinSize = math.MaxInt as a sentinel so the first real size will become the new minimum. + s.MinSize = math.MaxInt + + // Iterator range is [prefix, prefix] (inclusive). + start, end := []byte{prefix}, []byte{prefix} + it, err := r.NewIter(start, end, storage.IteratorOption{BadgerIterateKeyOnly: true}) + if err != nil { + return s, fmt.Errorf("failed to create iterator for prefix 0x%X: %w", prefix, err) + } + defer it.Close() + + for it.First(); it.Valid(); it.Next() { + item := it.IterItem() + + // item.Value(...) is a function call that gives us the value, on which we measure size. + err := item.Value(func(val []byte) error { + size := len(val) + s.Count++ + s.TotalSize += size + if size < s.MinSize { + s.MinSize = size + } + if size > s.MaxSize { + s.MaxSize = size + } + return nil + }) + + if err != nil { + return s, fmt.Errorf("failed to process value for prefix %v: %w", int(prefix), err) + } + } + + // If we found no keys for this prefix, reset MinSize to 0 to avoid confusion. + if s.Count == 0 { + s.MinSize = 0 + } else { + // Compute average size. + s.AverageSize = float64(s.TotalSize) / float64(s.Count) + } + + return s, nil +} + +// PrintStats logs the statistics for each prefix in ascending order. +// Each prefix is shown in hex, along with count, min, max, total, and average sizes. +func PrintStats(log zerolog.Logger, stats map[byte]Stats) { + if len(stats) == 0 { + log.Info().Msg("No stats to print (map is empty).") + return + } + + // Convert map to a slice of key-value pairs + statList := make([]struct { + Prefix int `json:"prefix"` + Stats Stats `json:"stats"` + }, 0, len(stats)) + + for p, s := range stats { + statList = append(statList, struct { + Prefix int `json:"prefix"` + Stats Stats `json:"stats"` + }{Prefix: int(p), Stats: s}) + } + + // Sort by TotalSize in ascending order + sort.Slice(statList, func(i, j int) bool { + return statList[i].Stats.TotalSize < statList[j].Stats.TotalSize + }) + + // Convert sorted stats to JSON + jsonData, err := json.MarshalIndent(statList, "", " ") + if err != nil { + log.Error().Err(err).Msg("Failed to marshal stats to JSON") + return + } + + // Log the JSON + log.Info().RawJSON("stats", jsonData).Msg("Sorted prefix stats") +} diff --git a/storage/operation/stats_test.go b/storage/operation/stats_test.go new file mode 100644 index 00000000000..ff05671b1c3 --- /dev/null +++ b/storage/operation/stats_test.go @@ -0,0 +1,76 @@ +package operation_test + +import ( + "testing" + + "github.com/jordanschalm/lockctx" + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/storage" + "github.com/onflow/flow-go/storage/operation" + "github.com/onflow/flow-go/storage/operation/dbtest" + "github.com/onflow/flow-go/utils/unittest" +) + +func TestSummarizeKeysByFirstByteConcurrent(t *testing.T) { + dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { + lockManager := storage.NewTestingLockManager() + + err := db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + // insert random events + b := unittest.IdentifierFixture() + events := unittest.EventsFixture(30) + for _, evt := range events { + err := operation.InsertEvent(rw.Writer(), b, evt) + if err != nil { + return err + } + } + + // insert 100 chunk data packs + for i := 0; i < 100; i++ { + collectionID := unittest.IdentifierFixture() + cdp := &storage.StoredChunkDataPack{ + ChunkID: unittest.IdentifierFixture(), + StartState: unittest.StateCommitmentFixture(), + Proof: []byte{'p'}, + CollectionID: collectionID, + } + require.NoError(t, unittest.WithLock(t, lockManager, storage.LockInsertChunkDataPack, func(lctx lockctx.Context) error { + return operation.InsertChunkDataPack(lctx, rw, cdp) + })) + } + + // insert 20 results + for i := 0; i < 20; i++ { + result := unittest.ExecutionResultFixture() + err := operation.InsertExecutionResult(rw.Writer(), result) + if err != nil { + return err + } + } + + return nil + }) + require.NoError(t, err) + + // summarize keys by first byte + stats, err := operation.SummarizeKeysByFirstByteConcurrent(unittest.Logger(), db.Reader(), 10) + require.NoError(t, err) + + // print + operation.PrintStats(unittest.Logger(), stats) + + for i := 0; i < 256; i++ { + count := 0 + if i == 102 { // events + count = 30 + } else if i == 100 { // CDP + count = 100 + } else if i == 36 { // results + count = 20 + } + require.Equal(t, count, stats[byte(i)].Count, "byte %d", i) + } + }) +} diff --git a/storage/operation/transaction_results.go b/storage/operation/transaction_results.go new file mode 100644 index 00000000000..a97197a5cde --- /dev/null +++ b/storage/operation/transaction_results.go @@ -0,0 +1,151 @@ +package operation + +import ( + "fmt" + + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/storage" +) + +func InsertTransactionResult(w storage.Writer, blockID flow.Identifier, transactionResult *flow.TransactionResult) error { + return UpsertByKey(w, MakePrefix(codeTransactionResult, blockID, transactionResult.TransactionID), transactionResult) +} + +func IndexTransactionResult(w storage.Writer, blockID flow.Identifier, txIndex uint32, transactionResult *flow.TransactionResult) error { + return UpsertByKey(w, MakePrefix(codeTransactionResultIndex, blockID, txIndex), transactionResult) +} + +func RetrieveTransactionResult(r storage.Reader, blockID flow.Identifier, transactionID flow.Identifier, transactionResult *flow.TransactionResult) error { + return RetrieveByKey(r, MakePrefix(codeTransactionResult, blockID, transactionID), transactionResult) +} + +func RetrieveTransactionResultByIndex(r storage.Reader, blockID flow.Identifier, txIndex uint32, transactionResult *flow.TransactionResult) error { + return RetrieveByKey(r, MakePrefix(codeTransactionResultIndex, blockID, txIndex), transactionResult) +} + +// LookupTransactionResultsByBlockIDUsingIndex retrieves all tx results for a block, by using +// tx_index index. This correctly handles cases of duplicate transactions within block. +func LookupTransactionResultsByBlockIDUsingIndex(r storage.Reader, blockID flow.Identifier, txResults *[]flow.TransactionResult) error { + + txErrIterFunc := func(keyCopy []byte, getValue func(destVal any) error) (bail bool, err error) { + var val flow.TransactionResult + err = getValue(&val) + if err != nil { + return true, err + } + *txResults = append(*txResults, val) + return false, nil + } + + return TraverseByPrefix(r, MakePrefix(codeTransactionResultIndex, blockID), txErrIterFunc, storage.DefaultIteratorOptions()) +} + +// RemoveTransactionResultsByBlockID removes the transaction results for the given blockID +func RemoveTransactionResultsByBlockID(r storage.Reader, w storage.Writer, blockID flow.Identifier) error { + prefix := MakePrefix(codeTransactionResult, blockID) + err := RemoveByKeyPrefix(r, w, prefix) + if err != nil { + return fmt.Errorf("could not remove transaction results for block %v: %w", blockID, err) + } + + return nil +} + +// BatchRemoveTransactionResultsByBlockID removes transaction results for the given blockID in a provided batch. +// No errors are expected during normal operation, but it may return generic error +// if badger fails to process request +func BatchRemoveTransactionResultsByBlockID(blockID flow.Identifier, batch storage.ReaderBatchWriter) error { + prefix := MakePrefix(codeTransactionResult, blockID) + err := RemoveByKeyPrefix(batch.GlobalReader(), batch.Writer(), prefix) + if err != nil { + return fmt.Errorf("could not remove transaction results for block %v: %w", blockID, err) + } + + return nil +} + +// deprecated +func InsertLightTransactionResult(w storage.Writer, blockID flow.Identifier, transactionResult *flow.LightTransactionResult) error { + return UpsertByKey(w, MakePrefix(codeLightTransactionResult, blockID, transactionResult.TransactionID), transactionResult) +} + +func BatchInsertLightTransactionResult(w storage.Writer, blockID flow.Identifier, transactionResult *flow.LightTransactionResult) error { + return UpsertByKey(w, MakePrefix(codeLightTransactionResult, blockID, transactionResult.TransactionID), transactionResult) +} + +func BatchIndexLightTransactionResult(w storage.Writer, blockID flow.Identifier, txIndex uint32, transactionResult *flow.LightTransactionResult) error { + return UpsertByKey(w, MakePrefix(codeLightTransactionResultIndex, blockID, txIndex), transactionResult) +} + +func RetrieveLightTransactionResult(r storage.Reader, blockID flow.Identifier, transactionID flow.Identifier, transactionResult *flow.LightTransactionResult) error { + return RetrieveByKey(r, MakePrefix(codeLightTransactionResult, blockID, transactionID), transactionResult) +} + +func RetrieveLightTransactionResultByIndex(r storage.Reader, blockID flow.Identifier, txIndex uint32, transactionResult *flow.LightTransactionResult) error { + return RetrieveByKey(r, MakePrefix(codeLightTransactionResultIndex, blockID, txIndex), transactionResult) +} + +// LookupLightTransactionResultsByBlockIDUsingIndex retrieves all tx results for a block, but using +// tx_index index. This correctly handles cases of duplicate transactions within block. +func LookupLightTransactionResultsByBlockIDUsingIndex(r storage.Reader, blockID flow.Identifier, txResults *[]flow.LightTransactionResult) error { + + txErrIterFunc := func(keyCopy []byte, getValue func(destVal any) error) (bail bool, err error) { + var val flow.LightTransactionResult + err = getValue(&val) + if err != nil { + return true, err + } + *txResults = append(*txResults, val) + return false, nil + } + + return TraverseByPrefix(r, MakePrefix(codeLightTransactionResultIndex, blockID), txErrIterFunc, storage.DefaultIteratorOptions()) +} + +// BatchInsertTransactionResultErrorMessage inserts a transaction result error message by block ID and transaction ID +// into the database using a batch write. +func BatchInsertTransactionResultErrorMessage(w storage.Writer, blockID flow.Identifier, transactionResultErrorMessage *flow.TransactionResultErrorMessage) error { + return UpsertByKey(w, MakePrefix(codeTransactionResultErrorMessage, blockID, transactionResultErrorMessage.TransactionID), transactionResultErrorMessage) +} + +// BatchIndexTransactionResultErrorMessage indexes a transaction result error message by index within the block using a +// batch write. +func BatchIndexTransactionResultErrorMessage(w storage.Writer, blockID flow.Identifier, transactionResultErrorMessage *flow.TransactionResultErrorMessage) error { + return UpsertByKey(w, MakePrefix(codeTransactionResultErrorMessageIndex, blockID, transactionResultErrorMessage.Index), transactionResultErrorMessage) +} + +// RetrieveTransactionResultErrorMessage retrieves a transaction result error message by block ID and transaction ID. +func RetrieveTransactionResultErrorMessage(r storage.Reader, blockID flow.Identifier, transactionID flow.Identifier, transactionResultErrorMessage *flow.TransactionResultErrorMessage) error { + return RetrieveByKey(r, MakePrefix(codeTransactionResultErrorMessage, blockID, transactionID), transactionResultErrorMessage) +} + +// RetrieveTransactionResultErrorMessageByIndex retrieves a transaction result error message by block ID and index. +func RetrieveTransactionResultErrorMessageByIndex(r storage.Reader, blockID flow.Identifier, txIndex uint32, transactionResultErrorMessage *flow.TransactionResultErrorMessage) error { + return RetrieveByKey(r, MakePrefix(codeTransactionResultErrorMessageIndex, blockID, txIndex), transactionResultErrorMessage) +} + +// TransactionResultErrorMessagesExists checks whether tx result error messages exist in the database. +func TransactionResultErrorMessagesExists(r storage.Reader, blockID flow.Identifier, blockExists *bool) error { + exists, err := KeyExists(r, MakePrefix(codeTransactionResultErrorMessageIndex, blockID)) + if err != nil { + return err + } + *blockExists = exists + return nil +} + +// LookupTransactionResultErrorMessagesByBlockIDUsingIndex retrieves all tx result error messages for a block, by using +// tx_index index. This correctly handles cases of duplicate transactions within block. +func LookupTransactionResultErrorMessagesByBlockIDUsingIndex(r storage.Reader, blockID flow.Identifier, txResultErrorMessages *[]flow.TransactionResultErrorMessage) error { + txErrIterFunc := func(keyCopy []byte, getValue func(destVal any) error) (bail bool, err error) { + var val flow.TransactionResultErrorMessage + err = getValue(&val) + if err != nil { + return true, err + } + *txResultErrorMessages = append(*txResultErrorMessages, val) + return false, nil + } + + return TraverseByPrefix(r, MakePrefix(codeTransactionResultErrorMessageIndex, blockID), txErrIterFunc, storage.DefaultIteratorOptions()) +} diff --git a/storage/operation/transactions.go b/storage/operation/transactions.go new file mode 100644 index 00000000000..171c51f8797 --- /dev/null +++ b/storage/operation/transactions.go @@ -0,0 +1,22 @@ +package operation + +import ( + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/storage" +) + +// UpsertTransaction inserts a transaction keyed by transaction fingerprint. +// It overwrites any existing transaction, which is ok because tx is unique by its ID +func UpsertTransaction(w storage.Writer, txID flow.Identifier, tx *flow.TransactionBody) error { + return UpsertByKey(w, MakePrefix(codeTransaction, txID), tx) +} + +// RetrieveTransaction retrieves a transaction by fingerprint. +func RetrieveTransaction(r storage.Reader, txID flow.Identifier, tx *flow.TransactionBody) error { + return RetrieveByKey(r, MakePrefix(codeTransaction, txID), tx) +} + +// RemoveTransaction removes a transaction by ID. +func RemoveTransaction(r storage.Writer, txID flow.Identifier) error { + return RemoveByKey(r, MakePrefix(codeTransaction, txID)) +} diff --git a/storage/operation/transactions_test.go b/storage/operation/transactions_test.go new file mode 100644 index 00000000000..8986d8aab70 --- /dev/null +++ b/storage/operation/transactions_test.go @@ -0,0 +1,54 @@ +package operation_test + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/storage" + "github.com/onflow/flow-go/storage/operation" + "github.com/onflow/flow-go/storage/operation/dbtest" + "github.com/onflow/flow-go/utils/unittest" +) + +// TestTransactions tests storage, deletion and retrieval of transactions +func TestTransactions(t *testing.T) { + dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { + // storing a tx + expected := unittest.TransactionBodyFixture() + err := db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return operation.UpsertTransaction(rw.Writer(), expected.ID(), &expected) + }) + require.NoError(t, err) + + // verify can be retrieved + var actual flow.TransactionBody + err = operation.RetrieveTransaction(db.Reader(), expected.ID(), &actual) + require.NoError(t, err) + assert.Equal(t, expected, actual) + + // retrieve non exist + err = operation.RetrieveTransaction(db.Reader(), unittest.IdentifierFixture(), &actual) + require.Error(t, err) + require.ErrorIs(t, err, storage.ErrNotFound) + + // delete + err = db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return operation.RemoveTransaction(rw.Writer(), expected.ID()) + }) + require.NoError(t, err) + + // verify has been deleted + err = operation.RetrieveTransaction(db.Reader(), expected.ID(), &actual) + require.Error(t, err) + require.ErrorIs(t, err, storage.ErrNotFound) + + // deleting a non exist + err = db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return operation.RemoveTransaction(rw.Writer(), unittest.IdentifierFixture()) + }) + require.NoError(t, err) + }) +} diff --git a/storage/operation/version_beacon.go b/storage/operation/version_beacon.go new file mode 100644 index 00000000000..814a226bc7d --- /dev/null +++ b/storage/operation/version_beacon.go @@ -0,0 +1,35 @@ +package operation + +import ( + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/storage" +) + +// IndexVersionBeaconByHeight stores a sealed version beacon indexed by +// flow.SealedVersionBeacon.SealHeight. +// +// No errors are expected during normal operation. +func IndexVersionBeaconByHeight( + w storage.Writer, + beacon *flow.SealedVersionBeacon, +) error { + return UpsertByKey(w, MakePrefix(codeVersionBeacon, beacon.SealHeight), beacon) +} + +// LookupLastVersionBeaconByHeight finds the highest flow.VersionBeacon but no higher +// than maxHeight. + +// Returns storage.ErrNotFound if no version beacon exists at or below +// the given height. +func LookupLastVersionBeaconByHeight( + r storage.Reader, + maxHeight uint64, + versionBeacon *flow.SealedVersionBeacon, +) error { + return FindHighestAtOrBelowByPrefix( + r, + MakePrefix(codeVersionBeacon), + maxHeight, + versionBeacon, + ) +} diff --git a/storage/operation/version_beacon_test.go b/storage/operation/version_beacon_test.go new file mode 100644 index 00000000000..a36cae30484 --- /dev/null +++ b/storage/operation/version_beacon_test.go @@ -0,0 +1,111 @@ +package operation_test + +import ( + "testing" + + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/storage" + "github.com/onflow/flow-go/storage/operation" + "github.com/onflow/flow-go/storage/operation/dbtest" + "github.com/onflow/flow-go/utils/unittest" +) + +func TestResults_IndexByServiceEvents(t *testing.T) { + dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { + height1 := uint64(21) + height2 := uint64(37) + height3 := uint64(55) + vb1 := flow.SealedVersionBeacon{ + VersionBeacon: unittest.VersionBeaconFixture( + unittest.WithBoundaries( + flow.VersionBoundary{ + Version: "1.0.0", + BlockHeight: height1 + 5, + }, + ), + ), + SealHeight: height1, + } + vb2 := flow.SealedVersionBeacon{ + VersionBeacon: unittest.VersionBeaconFixture( + unittest.WithBoundaries( + flow.VersionBoundary{ + Version: "1.1.0", + BlockHeight: height2 + 5, + }, + ), + ), + SealHeight: height2, + } + vb3 := flow.SealedVersionBeacon{ + VersionBeacon: unittest.VersionBeaconFixture( + unittest.WithBoundaries( + flow.VersionBoundary{ + Version: "2.0.0", + BlockHeight: height3 + 5, + }, + ), + ), + SealHeight: height3, + } + + // indexing 3 version beacons at different heights + require.NoError(t, db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return operation.IndexVersionBeaconByHeight(rw.Writer(), &vb1) + })) + + require.NoError(t, db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return operation.IndexVersionBeaconByHeight(rw.Writer(), &vb2) + })) + + require.NoError(t, db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return operation.IndexVersionBeaconByHeight(rw.Writer(), &vb3) + })) + + // index version beacon 2 again to make sure we tolerate duplicates + // it is possible for two or more events of the same type to be from the same height + require.NoError(t, db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return operation.IndexVersionBeaconByHeight(rw.Writer(), &vb2) + })) + + t.Run("retrieve exact height match", func(t *testing.T) { + var actualVB flow.SealedVersionBeacon + err := operation.LookupLastVersionBeaconByHeight(db.Reader(), height1, &actualVB) + require.NoError(t, err) + require.Equal(t, vb1, actualVB) + + err = operation.LookupLastVersionBeaconByHeight(db.Reader(), height2, &actualVB) + require.NoError(t, err) + require.Equal(t, vb2, actualVB) + + err = operation.LookupLastVersionBeaconByHeight(db.Reader(), height3, &actualVB) + require.NoError(t, err) + require.Equal(t, vb3, actualVB) + }) + + t.Run("finds highest but not higher than given", func(t *testing.T) { + var actualVB flow.SealedVersionBeacon + + err := operation.LookupLastVersionBeaconByHeight(db.Reader(), height3-1, &actualVB) + require.NoError(t, err) + require.Equal(t, vb2, actualVB) + }) + + t.Run("finds highest", func(t *testing.T) { + var actualVB flow.SealedVersionBeacon + + err := operation.LookupLastVersionBeaconByHeight(db.Reader(), height3+1, &actualVB) + require.NoError(t, err) + require.Equal(t, vb3, actualVB) + }) + + t.Run("height below lowest entry returns nothing", func(t *testing.T) { + var actualVB flow.SealedVersionBeacon + + err := operation.LookupLastVersionBeaconByHeight(db.Reader(), height1-1, &actualVB) + require.ErrorIs(t, err, storage.ErrNotFound) + }) + }) +} diff --git a/storage/operation/views.go b/storage/operation/views.go new file mode 100644 index 00000000000..0e18daec20a --- /dev/null +++ b/storage/operation/views.go @@ -0,0 +1,55 @@ +package operation + +import ( + "fmt" + + "github.com/jordanschalm/lockctx" + + "github.com/onflow/flow-go/consensus/hotstuff" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/storage" +) + +// UpsertSafetyData inserts or updates the given safety data for this node. +// Intended for consensus participants only (consensus and collector nodes). +// Here, `chainID` specifies which consensus instance specifically the node participates in. +// +// No errors are expected during normal operation. +func UpsertSafetyData(lctx lockctx.Proof, rw storage.ReaderBatchWriter, chainID flow.ChainID, safetyData *hotstuff.SafetyData) error { + if !lctx.HoldsLock(storage.LockInsertSafetyData) { + return fmt.Errorf("missing required lock: storage.LockInsertSafetyData") + } + + return UpsertByKey(rw.Writer(), MakePrefix(codeSafetyData, chainID), safetyData) +} + +// RetrieveSafetyData retrieves the safety data for this node. +// Intended for consensus participants only (consensus and collector nodes). +// Here, `chainID` specifies which consensus instance specifically the node participates in. +// For consensus and collector nodes, this value should always exist (for the correct chainID). +// No errors are expected during normal operation. +func RetrieveSafetyData(r storage.Reader, chainID flow.ChainID, safetyData *hotstuff.SafetyData) error { + return RetrieveByKey(r, MakePrefix(codeSafetyData, chainID), safetyData) +} + +// UpsertLivenessData inserts or updates the given liveness data for this node. +// Intended for consensus participants only (consensus and collector nodes). +// Here, `chainID` specifies which consensus instance specifically the node participates in. +// +// No errors are expected during normal operation. +func UpsertLivenessData(lctx lockctx.Proof, rw storage.ReaderBatchWriter, chainID flow.ChainID, livenessData *hotstuff.LivenessData) error { + if !lctx.HoldsLock(storage.LockInsertLivenessData) { + return fmt.Errorf("missing required lock: storage.LockInsertLivenessData") + } + + return UpsertByKey(rw.Writer(), MakePrefix(codeLivenessData, chainID), livenessData) +} + +// RetrieveSafetyData retrieves the safety data for this node. +// Intended for consensus participants only (consensus and collector nodes). +// Here, `chainID` specifies which consensus instance specifically the node participates in. +// For consensus and collector nodes, this value should always exist (for the correct chainID). +// No errors are expected during normal operation. +func RetrieveLivenessData(r storage.Reader, chainID flow.ChainID, livenessData *hotstuff.LivenessData) error { + return RetrieveByKey(r, MakePrefix(codeLivenessData, chainID), livenessData) +} diff --git a/storage/operation/writes.go b/storage/operation/writes.go new file mode 100644 index 00000000000..920cc232d3d --- /dev/null +++ b/storage/operation/writes.go @@ -0,0 +1,82 @@ +package operation + +import ( + "bytes" + "fmt" + + "github.com/vmihailenco/msgpack/v4" + + "github.com/onflow/flow-go/module/irrecoverable" + "github.com/onflow/flow-go/storage" +) + +// UpsertByKey will encode the given entity using msgpack and will insert the resulting +// binary data under the provided key. +// If the key already exists, the value will be overwritten. +// Error returns: +// - generic error in case of unexpected failure from the database layer or +// encoding failure. +func UpsertByKey(w storage.Writer, key []byte, val interface{}) error { + value, err := msgpack.Marshal(val) + if err != nil { + return irrecoverable.NewExceptionf("failed to encode value: %w", err) + } + + err = w.Set(key, value) + if err != nil { + return irrecoverable.NewExceptionf("failed to store data: %w", err) + } + + return nil +} + +// Upserting returns a functor, whose execution will append the given key-value-pair to the provided +// storage writer (typically a pending batch of database writes). +func Upserting(key []byte, val interface{}) func(storage.Writer) error { + value, err := msgpack.Marshal(val) + return func(w storage.Writer) error { + if err != nil { + return irrecoverable.NewExceptionf("failed to encode value: %w", err) + } + + err = w.Set(key, value) + if err != nil { + return irrecoverable.NewExceptionf("failed to store data: %w", err) + } + + return nil + } +} + +// RemoveByKey removes the entity with the given key, if it exists. If it doesn't +// exist, this is a no-op. +// Error returns: +// * generic error in case of unexpected database error +func RemoveByKey(w storage.Writer, key []byte) error { + err := w.Delete(key) + if err != nil { + return irrecoverable.NewExceptionf("could not delete item: %w", err) + } + return nil +} + +// RemoveByKeyPrefix removes all keys with the given prefix +// Error returns: +// * generic error in case of unexpected database error +func RemoveByKeyPrefix(reader storage.Reader, w storage.Writer, prefix []byte) error { + return RemoveByKeyRange(reader, w, prefix, prefix) +} + +// RemoveByKeyRange removes all keys with a prefix that falls within the range [start, end], both inclusive. +// It returns error if endPrefix < startPrefix +// no other errors are expected during normal operation +func RemoveByKeyRange(reader storage.Reader, w storage.Writer, startPrefix []byte, endPrefix []byte) error { + if bytes.Compare(startPrefix, endPrefix) > 0 { + return fmt.Errorf("startPrefix key must be less than or equal to endPrefix key") + } + err := w.DeleteByRange(reader, startPrefix, endPrefix) + if err != nil { + return irrecoverable.NewExceptionf("could not delete item: %w", err) + } + return nil +} diff --git a/storage/operation/writes_bench_test.go b/storage/operation/writes_bench_test.go new file mode 100644 index 00000000000..4c569d397f0 --- /dev/null +++ b/storage/operation/writes_bench_test.go @@ -0,0 +1,48 @@ +package operation_test + +import ( + "testing" + + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/storage" + "github.com/onflow/flow-go/storage/operation" + "github.com/onflow/flow-go/storage/operation/dbtest" +) + +func BenchmarkUpsert(t *testing.B) { + dbtest.BenchWithStorages(t, func(t *testing.B, r storage.Reader, withWriter dbtest.WithWriter) { + for i := 0; i < t.N; i++ { + e := Entity{ID: uint64(i)} + require.NoError(t, withWriter(operation.Upsert(e.Key(), e))) + } + }) +} + +func BenchmarkRemove(t *testing.B) { + dbtest.BenchWithStorages(t, func(t *testing.B, r storage.Reader, withWriter dbtest.WithWriter) { + n := t.N + for i := 0; i < n; i++ { + e := Entity{ID: uint64(i)} + require.NoError(t, withWriter(operation.Upsert(e.Key(), e))) + } + t.ResetTimer() + for i := 0; i < n; i++ { + e := Entity{ID: uint64(i)} + require.NoError(t, withWriter(operation.Remove(e.Key()))) + } + }) +} + +func BenchmarkRemoveByPrefix(t *testing.B) { + dbtest.BenchWithStorages(t, func(t *testing.B, r storage.Reader, withWriter dbtest.WithWriter) { + prefix := []byte("prefix") + for i := 0; i < t.N; i++ { + e := Entity{ID: uint64(i)} + key := append(prefix, e.Key()...) + require.NoError(t, withWriter(operation.Upsert(key, e))) + } + t.ResetTimer() + require.NoError(t, withWriter(operation.RemoveByPrefix(r, prefix))) + }) +} diff --git a/storage/operation/writes_functors.go b/storage/operation/writes_functors.go new file mode 100644 index 00000000000..1ee182d040b --- /dev/null +++ b/storage/operation/writes_functors.go @@ -0,0 +1,33 @@ +package operation + +import "github.com/onflow/flow-go/storage" + +// Leo: This package includes deprecated functions that wraps the operation of writing to the database. +// They are needed because the original badger implementation is also implemented in the same wrapped function manner, +// since badger requires writes to be done in a transaction, which is stateful. +// Using these deprecated functions could minimize the changes during refactor and easier to review the changes. +// The simplified implementation of the functions are in the writes.go file, which are encouraged to be used instead. + +func Upsert(key []byte, val interface{}) func(storage.Writer) error { + return func(w storage.Writer) error { + return UpsertByKey(w, key, val) + } +} + +func Remove(key []byte) func(storage.Writer) error { + return func(w storage.Writer) error { + return RemoveByKey(w, key) + } +} + +func RemoveByPrefix(reader storage.Reader, key []byte) func(storage.Writer) error { + return func(w storage.Writer) error { + return RemoveByKeyPrefix(reader, w, key) + } +} + +func RemoveByRange(reader storage.Reader, startPrefix []byte, endPrefix []byte) func(storage.Writer) error { + return func(w storage.Writer) error { + return RemoveByKeyRange(reader, w, startPrefix, endPrefix) + } +} diff --git a/storage/operation/writes_test.go b/storage/operation/writes_test.go new file mode 100644 index 00000000000..f7307007888 --- /dev/null +++ b/storage/operation/writes_test.go @@ -0,0 +1,688 @@ +package operation_test + +import ( + "encoding/binary" + "errors" + "fmt" + "os" + "path/filepath" + "sync" + "testing" + "time" + + "github.com/cockroachdb/pebble/v2" + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/storage" + "github.com/onflow/flow-go/storage/operation" + "github.com/onflow/flow-go/storage/operation/dbtest" + "github.com/onflow/flow-go/utils/unittest" +) + +func TestReadWrite(t *testing.T) { + dbtest.RunWithStorages(t, func(t *testing.T, r storage.Reader, withWriter dbtest.WithWriter) { + e := Entity{ID: 1337} + + // Test read nothing should return not found + var item Entity + err := operation.RetrieveByKey(r, e.Key(), &item) + require.True(t, errors.Is(err, storage.ErrNotFound), "expected not found error") + + require.NoError(t, withWriter(operation.Upsert(e.Key(), e))) + + var readBack Entity + require.NoError(t, operation.RetrieveByKey(r, e.Key(), &readBack)) + require.Equal(t, e, readBack, "expected retrieved value to match written value") + + // Test write again should overwrite + newEntity := Entity{ID: 42} + require.NoError(t, withWriter(operation.Upsert(e.Key(), newEntity))) + + require.NoError(t, operation.RetrieveByKey(r, e.Key(), &readBack)) + require.Equal(t, newEntity, readBack, "expected overwritten value to be retrieved") + + // Test write should not overwrite a different key + anotherEntity := Entity{ID: 84} + require.NoError(t, withWriter(operation.Upsert(anotherEntity.Key(), anotherEntity))) + + var anotherReadBack Entity + require.NoError(t, operation.RetrieveByKey(r, anotherEntity.Key(), &anotherReadBack)) + require.Equal(t, anotherEntity, anotherReadBack, "expected different key to return different value") + }) +} + +func TestReadWriteMalformed(t *testing.T) { + dbtest.RunWithStorages(t, func(t *testing.T, r storage.Reader, withWriter dbtest.WithWriter) { + e := Entity{ID: 1337} + ue := UnencodeableEntity(e) + + // Test write should return encoding error + require.NoError(t, withWriter(func(writer storage.Writer) error { + err := operation.Upsert(e.Key(), ue)(writer) + require.Contains(t, err.Error(), errCantEncode.Error(), "expected encoding error") + return nil + })) + + // Test read should return decoding error + var exists bool + var err error + exists, err = operation.KeyExists(r, e.Key()) + require.NoError(t, err) + require.False(t, exists, "expected key to not exist") + }) +} + +// Verify multiple entities can be removed in one batch update +func TestBatchWrite(t *testing.T) { + dbtest.RunWithStorages(t, func(t *testing.T, r storage.Reader, withWriter dbtest.WithWriter) { + // Define multiple entities for batch insertion + entities := []Entity{ + {ID: 1337}, + {ID: 42}, + {ID: 84}, + } + + // Batch write: insert multiple entities in a single transaction + require.NoError(t, withWriter(func(writer storage.Writer) error { + for _, e := range entities { + if err := operation.Upsert(e.Key(), e)(writer); err != nil { + return err + } + } + return nil + })) + + // Verify that each entity can be read back + for _, e := range entities { + var readBack Entity + require.NoError(t, operation.RetrieveByKey(r, e.Key(), &readBack)) + require.Equal(t, e, readBack, "expected retrieved value to match written value for entity ID %d", e.ID) + } + + // Batch update: remove multiple entities in a single transaction + require.NoError(t, withWriter(func(writer storage.Writer) error { + for _, e := range entities { + if err := operation.Remove(e.Key())(writer); err != nil { + return err + } + } + return nil + })) + + // Verify that each entity has been removed + for _, e := range entities { + var readBack Entity + err := operation.RetrieveByKey(r, e.Key(), &readBack) + require.True(t, errors.Is(err, storage.ErrNotFound), "expected not found error for entity ID %d after removal", e.ID) + } + }) +} + +func TestBatchWriteArgumentCanBeModified(t *testing.T) { + dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { + b := db.NewBatch() + defer b.Close() + + k := []byte{0x01} + v := []byte{0x02} + + // Insert k and v into batch. + err := b.Writer().Set(k, v) + require.NoError(t, err) + + // Modify k and v. + k[0]++ + v[0]++ + + // Commit batch. + err = b.Commit() + require.NoError(t, err) + + // Retrieve value with original key. + retreivedValue, closer, err := db.Reader().Get([]byte{0x01}) + defer closer.Close() + require.NoError(t, err) + require.Equal(t, []byte{0x02}, retreivedValue) + }) +} + +func TestBatchDeleteArgumentCanBeModified(t *testing.T) { + dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { + { + b := db.NewBatch() + defer b.Close() + + k := []byte{0x01} + v := []byte{0x02} + + // Insert k and v into batch. + err := b.Writer().Set(k, v) + require.NoError(t, err) + + // Commit batch. + err = b.Commit() + require.NoError(t, err) + } + { + // Create batch to remove records. + b := db.NewBatch() + defer b.Close() + + k := []byte{0x01} + + // Delete record. + err := b.Writer().Delete(k) + require.NoError(t, err) + + // Modify k + k[0]++ + + // Commit batch. + err = b.Commit() + require.NoError(t, err) + } + { + // Retrieve value with original key + retreivedValue, closer, err := db.Reader().Get([]byte{0x01}) + defer closer.Close() + require.ErrorIs(t, storage.ErrNotFound, err) + require.Nil(t, retreivedValue) + } + }) +} + +func TestRemove(t *testing.T) { + dbtest.RunWithStorages(t, func(t *testing.T, r storage.Reader, withWriter dbtest.WithWriter) { + e := Entity{ID: 1337} + + var exists bool + var err error + exists, err = operation.KeyExists(r, e.Key()) + require.NoError(t, err) + require.False(t, exists, "expected key to not exist") + + // Test delete nothing should return OK + require.NoError(t, withWriter(operation.Remove(e.Key()))) + + // Test write, delete, then read should return not found + require.NoError(t, withWriter(operation.Upsert(e.Key(), e))) + + exists, err = operation.KeyExists(r, e.Key()) + require.NoError(t, err) + require.True(t, exists, "expected key to exist") + + require.NoError(t, withWriter(operation.Remove(e.Key()))) + + var item Entity + err = operation.RetrieveByKey(r, e.Key(), &item) + require.True(t, errors.Is(err, storage.ErrNotFound), "expected not found error after delete") + }) +} + +func TestRemoveDiskUsage(t *testing.T) { + const count = 10000 + + opts := &pebble.Options{ + MemTableSize: 64 << 20, // required for rotating WAL + } + + dbtest.RunWithPebbleDB(t, opts, func(t *testing.T, r storage.Reader, withWriter dbtest.WithWriter, dir string, db *pebble.DB) { + prefix := []byte{1} + endPrefix := []byte{2} + getKey := func(c *flow.ChunkDataPack) []byte { + return append(prefix, c.ChunkID[:]...) + } + + items := make([]*flow.ChunkDataPack, count) + for i := 0; i < count; i++ { + chunkID := unittest.IdentifierFixture() + chunkDataPack := unittest.ChunkDataPackFixture(chunkID) + items[i] = chunkDataPack + } + + // 1. Insert 10000 entities. + require.NoError(t, withWriter(func(writer storage.Writer) error { + for i := 0; i < count; i++ { + if err := operation.Upsert(getKey(items[i]), items[i])(writer); err != nil { + return err + } + } + return nil + })) + + // 2. Flush and compact to get a stable state. + require.NoError(t, db.Flush()) + require.NoError(t, db.Compact(prefix, endPrefix, true)) + + // 3. Get sizeBefore. + sizeBefore := getFolderSize(t, dir) + t.Logf("Size after initial write and compact: %d", sizeBefore) + + // 4. Remove all entities + require.NoError(t, withWriter(func(writer storage.Writer) error { + for i := 0; i < count; i++ { + if err := operation.Remove(getKey(items[i]))(writer); err != nil { + return err + } + } + return nil + })) + + // 5. Flush and compact again. + require.NoError(t, db.Flush()) + require.NoError(t, db.Compact(prefix, endPrefix, true)) + + // 6. Verify the disk usage is reduced. + require.Eventually(t, func() bool { + sizeAfter := getFolderSize(t, dir) + t.Logf("Size after delete and compact: %d", sizeAfter) + return sizeAfter < sizeBefore + }, 30*time.Second, 200*time.Millisecond, + "expected disk usage to be reduced after compaction. before: %d, after: %d", + sizeBefore, getFolderSize(t, dir)) + }) +} + +func TestConcurrentWrite(t *testing.T) { + dbtest.RunWithStorages(t, func(t *testing.T, r storage.Reader, withWriter dbtest.WithWriter) { + var wg sync.WaitGroup + numWrites := 10 // number of concurrent writes + + for i := 0; i < numWrites; i++ { + wg.Add(1) + go func(i int) { + defer wg.Done() + e := Entity{ID: uint64(i)} + + // Simulate a concurrent write to a different key + require.NoError(t, withWriter(operation.Upsert(e.Key(), e))) + + var readBack Entity + require.NoError(t, operation.RetrieveByKey(r, e.Key(), &readBack)) + require.Equal(t, e, readBack, "expected retrieved value to match written value for key %d", i) + }(i) + } + + wg.Wait() // Wait for all goroutines to finish + }) +} + +func TestConcurrentRemove(t *testing.T) { + dbtest.RunWithStorages(t, func(t *testing.T, r storage.Reader, withWriter dbtest.WithWriter) { + var wg sync.WaitGroup + numDeletes := 10 // number of concurrent deletions + + // First, insert entities to be deleted concurrently + for i := 0; i < numDeletes; i++ { + e := Entity{ID: uint64(i)} + require.NoError(t, withWriter(operation.Upsert(e.Key(), e))) + } + + // Now, perform concurrent deletes + for i := 0; i < numDeletes; i++ { + wg.Add(1) + go func(i int) { + defer wg.Done() + e := Entity{ID: uint64(i)} + + // Simulate a concurrent delete + require.NoError(t, withWriter(operation.Remove(e.Key()))) + + // Check that the item is no longer retrievable + var item Entity + err := operation.RetrieveByKey(r, e.Key(), &item) + require.True(t, errors.Is(err, storage.ErrNotFound), "expected not found error after delete for key %d", i) + }(i) + } + + wg.Wait() // Wait for all goroutines to finish + }) +} + +func TestRemoveByPrefix(t *testing.T) { + dbtest.RunWithStorages(t, func(t *testing.T, r storage.Reader, withWriter dbtest.WithWriter) { + + // Define the prefix + prefix := []byte{0x10} + + // Create a range of keys around the boundaries of the prefix + keys := [][]byte{ + // before prefix -> not included in range + {0x09, 0xff}, + // within the prefix -> included in range + {0x10, 0x00}, + {0x10, 0x50}, + {0x10, 0xff}, + // after end -> not included in range + {0x11, 0x00}, + {0x1A, 0xff}, + } + + // Keys expected to be in the prefix range + includeStart, includeEnd := 1, 3 + + // Insert the keys into the storage + require.NoError(t, withWriter(func(writer storage.Writer) error { + for _, key := range keys { + value := []byte{0x00} // value are skipped, doesn't matter + err := operation.Upsert(key, value)(writer) + if err != nil { + return err + } + } + return nil + })) + + // Remove the keys in the prefix range + require.NoError(t, withWriter(operation.RemoveByPrefix(r, prefix))) + + // Verify that the keys in the prefix range have been removed + for i, key := range keys { + var exists bool + var err error + exists, err = operation.KeyExists(r, key) + require.NoError(t, err) + t.Logf("key %x exists: %t", key, exists) + + deleted := includeStart <= i && i <= includeEnd + + // An item that was not deleted must exist + require.Equal(t, !deleted, exists, + "expected key %x to be %s", key, map[bool]string{true: "deleted", false: "not deleted"}) + } + + // Verify that after the removal, Traverse the removed prefix would return nothing + removedKeys := make([]string, 0) + err := operation.TraverseByPrefix(r, prefix, func(key []byte, getValue func(destVal any) error) (bail bool, err error) { + removedKeys = append(removedKeys, fmt.Sprintf("%x", key)) + return false, nil + }, storage.DefaultIteratorOptions()) + require.NoError(t, err) + require.Len(t, removedKeys, 0, "expected no entries to be found when traversing the removed prefix") + + // Verify that after the removal, Iterate over all keys should only return keys outside the prefix range + expected := [][]byte{ + {0x09, 0xff}, + {0x11, 0x00}, + {0x1A, 0xff}, + } + + actual := make([][]byte, 0) + err = operation.IterateKeysByPrefixRange(r, []byte{keys[0][0]}, storage.PrefixUpperBound(keys[len(keys)-1]), func(key []byte) error { + actual = append(actual, key) + return nil + }) + require.NoError(t, err) + require.Equal(t, expected, actual, "expected keys to match expected values") + }) +} + +func TestRemoveByRange(t *testing.T) { + dbtest.RunWithStorages(t, func(t *testing.T, r storage.Reader, withWriter dbtest.WithWriter) { + + startPrefix, endPrefix := []byte{0x10}, []byte{0x12} + // Create a range of keys around the boundaries of the prefix + keys := [][]byte{ + {0x09, 0xff}, + // within the range + {0x10, 0x00}, + {0x10, 0x50}, + {0x10, 0xff}, + {0x11}, + {0x12}, + {0x12, 0x00}, + {0x12, 0xff}, + // after end -> not included in range + {0x13}, + {0x1A, 0xff}, + } + + // Keys expected to be in the prefix range + includeStart, includeEnd := 1, 7 + + // Insert the keys into the storage + require.NoError(t, withWriter(func(writer storage.Writer) error { + for _, key := range keys { + value := []byte{0x00} // value are skipped, doesn't matter + err := operation.Upsert(key, value)(writer) + if err != nil { + return err + } + } + return nil + })) + + // Remove the keys in the prefix range + require.NoError(t, withWriter(operation.RemoveByRange(r, startPrefix, endPrefix))) + + // Verify that the keys in the prefix range have been removed + for i, key := range keys { + var exists bool + var err error + exists, err = operation.KeyExists(r, key) + require.NoError(t, err) + t.Logf("key %x exists: %t", key, exists) + + deleted := includeStart <= i && i <= includeEnd + + // An item that was not deleted must exist + require.Equal(t, !deleted, exists, + "expected key %x to be %s", key, map[bool]string{true: "deleted", false: "not deleted"}) + } + }) +} + +func TestRemoveFrom(t *testing.T) { + dbtest.RunWithStorages(t, func(t *testing.T, r storage.Reader, withWriter dbtest.WithWriter) { + + // Define the prefix + prefix := []byte{0xff} + + // Create a range of keys around the boundaries of the prefix + keys := [][]byte{ + {0x10, 0x00}, + {0xff}, + {0xff, 0x00}, + {0xff, 0xff}, + } + + // Keys expected to be in the prefix range + includeStart, includeEnd := 1, 3 + + // Insert the keys into the storage + require.NoError(t, withWriter(func(writer storage.Writer) error { + for _, key := range keys { + value := []byte{0x00} // value are skipped, doesn't matter + err := operation.Upsert(key, value)(writer) + if err != nil { + return err + } + } + return nil + })) + + // Remove the keys in the prefix range + require.NoError(t, withWriter(operation.RemoveByPrefix(r, prefix))) + + // Verify that the keys in the prefix range have been removed + for i, key := range keys { + var exists bool + var err error + exists, err = operation.KeyExists(r, key) + require.NoError(t, err) + t.Logf("key %x exists: %t", key, exists) + + deleted := includeStart <= i && i <= includeEnd + + // An item that was not deleted must exist + require.Equal(t, !deleted, exists, + fmt.Errorf("a key %x should be deleted (%v), but actually exists (%v)", key, deleted, exists)) + } + }) +} + +type Entity struct { + ID uint64 +} + +func (e Entity) Key() []byte { + byteSlice := make([]byte, 8) // uint64 is 8 bytes + binary.BigEndian.PutUint64(byteSlice, e.ID) + return byteSlice +} + +type UnencodeableEntity Entity + +var errCantEncode = fmt.Errorf("encoding not supported") +var errCantDecode = fmt.Errorf("decoding not supported") + +func (a UnencodeableEntity) MarshalJSON() ([]byte, error) { + return nil, errCantEncode +} + +func (a *UnencodeableEntity) UnmarshalJSON(b []byte) error { + return errCantDecode +} + +func (a UnencodeableEntity) MarshalMsgpack() ([]byte, error) { + return nil, errCantEncode +} + +func (a UnencodeableEntity) UnmarshalMsgpack(b []byte) error { + return errCantDecode +} + +func getFolderSize(t testing.TB, dir string) int64 { + var size int64 + require.NoError(t, filepath.WalkDir(dir, func(path string, d os.DirEntry, err error) error { + if err != nil { + return err + } + if !d.IsDir() { + info, err := d.Info() + if err != nil { + fmt.Printf("warning: could not get file info for %s: %v\n", path, err) + return nil + } + + // Add the file size to total + size += info.Size() + } + return nil + })) + + return size +} + +func TestBatchValue(t *testing.T) { + const key = "key1" + + dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { + t.Run("no data", func(t *testing.T) { + const expectedCallbackInvocationCount = 2 + callbackInvocationCount := 0 + + err := db.WithReaderBatchWriter(func(b storage.ReaderBatchWriter) error { + callbackFunc := func(error) { + callbackInvocationCount++ + + value, exists := b.ScopedValue(key) + require.Nil(t, value) + require.False(t, exists) + } + + for range expectedCallbackInvocationCount { + b.AddCallback(callbackFunc) + } + + k := []byte{0x01} + v := []byte{0x02} + + // Insert k and v into batch. + err := b.Writer().Set(k, v) + require.NoError(t, err) + + return nil + }) + + require.NoError(t, err) + require.Equal(t, expectedCallbackInvocationCount, callbackInvocationCount) + }) + + t.Run("store data multiple times", func(t *testing.T) { + const expectedCallbackInvocationCount = 2 + callbackInvocationCount := 0 + + err := db.WithReaderBatchWriter(func(b storage.ReaderBatchWriter) error { + b.SetScopedValue(key, []string{"value1", "value2"}) + + b.SetScopedValue(key, []string{"value2", "value3"}) + + callbackFunc := func(error) { + callbackInvocationCount++ + + data, exists := b.ScopedValue(key) + require.Equal(t, []string{"value2", "value3"}, data.([]string)) + require.True(t, exists) + } + + for range expectedCallbackInvocationCount { + b.AddCallback(callbackFunc) + } + + k := []byte{0x01} + v := []byte{0x02} + + // Insert k and v into batch. + err := b.Writer().Set(k, v) + require.NoError(t, err) + + return nil + }) + + require.NoError(t, err) + require.Equal(t, expectedCallbackInvocationCount, callbackInvocationCount) + }) + + t.Run("store and remove data", func(t *testing.T) { + const expectedCallbackInvocationCount = 2 + callbackInvocationCount := 0 + + err := db.WithReaderBatchWriter(func(b storage.ReaderBatchWriter) error { + b.SetScopedValue(key, []string{"value1", "value2"}) + + callbackFunc := func(error) { + callbackInvocationCount++ + + data, exists := b.ScopedValue(key) + if callbackInvocationCount == 1 { + require.Equal(t, []string{"value1", "value2"}, data.([]string)) + require.True(t, exists) + + b.SetScopedValue(key, nil) + } else { + require.Nil(t, data) + require.False(t, exists) + } + } + + for range expectedCallbackInvocationCount { + b.AddCallback(callbackFunc) + } + + k := []byte{0x01} + v := []byte{0x02} + + // Insert k and v into batch. + err := b.Writer().Set(k, v) + require.NoError(t, err) + + return nil + }) + + require.NoError(t, err) + require.Equal(t, expectedCallbackInvocationCount, callbackInvocationCount) + }) + }) +} diff --git a/storage/operations.go b/storage/operations.go new file mode 100644 index 00000000000..0950bfc00aa --- /dev/null +++ b/storage/operations.go @@ -0,0 +1,277 @@ +package storage + +import ( + "io" +) + +// Iterator is an interface for iterating over key-value pairs in a storage backend. +// A common usage is: +// +// defer it.Close() +// +// for it.First(); it.Valid(); it.Next() { +// item := it.IterItem() +// } +type Iterator interface { + // First seeks to the smallest key greater than or equal to the given key. + // This method must be called because it's necessary for the badger implementation + // to move the iteration cursor to the first key in the iteration range. + // This method must be called before calling Valid, Next, IterItem, or Close. + // return true if the iterator is pointing to a valid key-value pair after calling First, + // return false otherwise. + First() bool + + // Valid returns whether the iterator is positioned at a valid key-value pair. + // If Valid returns false, the iterator is done and must be closed. + Valid() bool + + // Next advances the iterator to the next key-value pair. + // The next key-value pair might be invalid, so you should call Valid() to check. + Next() + + // IterItem returns the current key-value pair, or nil if Valid returns false. + // Always to call Valid() before calling IterItem. + // Note, the returned item is only valid until the Next() method is called. + IterItem() IterItem + + // Close closes the iterator. Iterator must be closed, otherwise it causes memory leak. + // No errors expected during normal operation + Close() error +} + +// IterItem is an interface for iterating over key-value pairs in a storage backend. +type IterItem interface { + // Key returns the key of the current key-value pair + // Key is only valid until the Iterator.Next() method is called + // If you need to use it outside its validity, please use KeyCopy + Key() []byte + + // KeyCopy returns a copy of the key of the item, writing it to dst slice. + // If nil is passed, or capacity of dst isn't sufficient, a new slice would be allocated and + // returned. + KeyCopy(dst []byte) []byte + + // Value returns the value of the current key-value pair + // The reason it takes a function is to follow badgerDB's API pattern + // No errors expected during normal operation + Value(func(val []byte) error) error +} + +type IteratorOption struct { + BadgerIterateKeyOnly bool // default false +} + +// TODO: convert into a var +func DefaultIteratorOptions() IteratorOption { + return IteratorOption{ + // only needed for badger. ignored by pebble + BadgerIterateKeyOnly: false, + } +} + +// Seeker is an interface for seeking a key within a range. +type Seeker interface { + // SeekLE (seek less than or equal) returns the largest key in lexicographical + // order within inclusive range of [startPrefix, key]. + // This function returns an error if specified key is less than startPrefix. + // This function returns storage.ErrNotFound if a key that matches + // the specified criteria is not found. + SeekLE(startPrefix, key []byte) ([]byte, error) +} + +type Reader interface { + // Get gets the value for the given key. It returns ErrNotFound if the DB + // does not contain the key. + // other errors are exceptions + // + // The caller should not modify the contents of the returned slice, but it is + // safe to modify the contents of the `key` argument after Get returns. The + // returned slice will remain valid until the returned Closer is closed. + // when err == nil, the caller MUST call closer.Close() or a memory leak will occur. + Get(key []byte) (value []byte, closer io.Closer, err error) + + // NewIter returns a new Iterator for the given key prefix range [startPrefix, endPrefix], both inclusive. + // We require that startPrefix ≤ endPrefix (otherwise this function errors). + // Specifically, all keys that meet ANY of the following conditions are included in the iteration: + // - have a prefix equal to startPrefix OR + // - have a prefix equal to the endPrefix OR + // - have a prefix that is lexicographically between startPrefix and endPrefix + NewIter(startPrefix, endPrefix []byte, ops IteratorOption) (Iterator, error) + + // NewSeeker returns a new Seeker. + NewSeeker() Seeker +} + +// Writer is an interface for batch writing to a storage backend. +// One Writer instance cannot be used concurrently by multiple goroutines. +type Writer interface { + // Set sets the value for the given key. It overwrites any previous value + // for that key; a DB is not a multi-map. + // + // It is safe to modify the contents of the arguments after Set returns. + // No errors expected during normal operation + Set(k, v []byte) error + + // Delete deletes the value for the given key. Deletes are blind all will + // succeed even if the given key does not exist. + // + // It is safe to modify the contents of the arguments after Delete returns. + // No errors expected during normal operation + Delete(key []byte) error + + // DeleteByRange removes all keys with a prefix that falls within the + // range [start, end], both inclusive. + // No errors expected during normal operation + DeleteByRange(globalReader Reader, startPrefix, endPrefix []byte) error +} + +// ReaderBatchWriter is an interface for reading and writing to a storage backend. +// It is useful for performing a related sequence of reads and writes, after which you would like +// to modify some non-database state if the sequence completed successfully (via AddCallback). +// If you are not using AddCallback, avoid using ReaderBatchWriter: use Reader and Writer directly. +// ReaderBatchWriter is not safe for concurrent use. +type ReaderBatchWriter interface { + // GlobalReader returns a database-backed reader which reads the latest committed global database state ("read-committed isolation"). + // This reader will not read writes written to ReaderBatchWriter.Writer until the write batch is committed. + // This reader may observe different values for the same key on subsequent reads. + GlobalReader() Reader + + // Writer returns a writer associated with a batch of writes. The batch is pending until it is committed. + // When we `Write` into the batch, that write operation is added to the pending batch, but not committed. + // The commit operation is atomic w.r.t. the batch; either all writes are applied to the database, or no writes are. + // Note: + // - The writer cannot be used concurrently for writing. + Writer() Writer + + // AddCallback adds a callback to execute after the batch has been flush + // regardless the batch update is succeeded or failed. + // The error parameter is the error returned by the batch update. + AddCallback(func(error)) + + // SetScopedValue stores the given value by the given key in this batch. + // Value can be retrieved by the same key via ScopedValue(key). + // + // Saving data in ReaderBatchWriter can be useful when the store's operation + // is called repeatedly with the same ReaderBatchWriter and different data + // (e.g., block ID). Aggregating different data (e.g., block ID) within + // the same ReaderBatchWriter can help store's operation to perform batch + // operation efficiently on commit success. + // + // For example, TransactionResults.BatchRemoveByBlockID() receives + // ReaderBatchWriter and block ID to remove the given block from the + // database and memory cache. TransactionResults.BatchRemoveByBlockID() + // can be called repeatedly with the same ReaderBatchWriter and different + // block IDs to remove multiple blocks. By saving all removed block IDs + // with the same ReaderBatchWriter, TransactionResults.BatchRemoveByBlockID() + // retrieves all block IDs and removes cached blocks by locking just once + // in OnCommitSucceed() callback, instead of locking TransactionResults cache + // for every removed block ID. + SetScopedValue(key string, value any) + + // ScopedValue returns the value associated with this batch for the given key + // and true if key exists, or nil and false if key doesn't exist. + ScopedValue(key string) (any, bool) +} + +// DB is an interface for a database store that provides a reader and a writer. +type DB interface { + // Reader returns a database-backed reader which reads the latest + // committed global database state + Reader() Reader + + // WithReaderBatchWriter creates a batch writer and allows the caller to perform + // atomic batch updates to the database. + // Any error returned are considered fatal and the batch is not committed. + WithReaderBatchWriter(func(ReaderBatchWriter) error) error + + // NewBatch create a new batch for writing. + NewBatch() Batch + + // Close closes the database and releases all resources. + // No errors are expected during normal operation. + Close() error +} + +// Batch is an interface for a batch of writes to a storage backend. +// The batch is pending until it is committed. Useful for dynamically adding writes to the batch. +type Batch interface { + ReaderBatchWriter + + // Commit applies the batched updates to the database. + // Commit may be called at most once per Batch. + // No errors are expected during normal operation. + Commit() error + + // Close releases memory of the batch. + // Close must be called exactly once per Batch. + // This can be called as a defer statement immediately after creating Batch + // to reduce risk of unbounded memory consumption. + // No errors are expected during normal operation. + Close() error +} + +// OnlyWriter is an adapter to convert a function that takes a Writer +// to a function that takes a ReaderBatchWriter. +func OnlyWriter(fn func(Writer) error) func(ReaderBatchWriter) error { + return func(rw ReaderBatchWriter) error { + return fn(rw.Writer()) + } +} + +// OnCommitSucceed adds a callback to execute after the batch has been successfully committed. +// +// Context on why we don't add this method to the ReaderBatchWriter: +// Because the implementation of the ReaderBatchWriter interface would have to provide an implementation +// for AddSuccessCallback, which can be derived for free from the AddCallback method. +// It's better avoid using AddCallback directly and use OnCommitSucceed instead, +// because you might write `if err != nil` by mistake, which is a golang idiom for error handling +func OnCommitSucceed(b ReaderBatchWriter, onSuccessFn func()) { + b.AddCallback(func(err error) { + if err == nil { + onSuccessFn() + } + }) +} + +// StartEndPrefixToLowerUpperBound returns the lower and upper bounds for a range of keys +// specified by the start and end prefixes. +// the lower and upper bounds are used for the key iteration. +// The return value lowerBound specifies the smallest key to iterate and it's inclusive. +// The return value upperBound specifies the largest key to iterate and it's exclusive (not inclusive) +// The return value hasUpperBound specifies whether there is upperBound +// in order to match all keys prefixed with `endPrefix`, we increment the bytes of `endPrefix` by 1, +// for instance, to iterate keys between "hello" and "world", +// we use "hello" as LowerBound, "worle" as UpperBound, so that "world", "world1", "worldffff...ffff" +// will all be included. +// In the case that the endPrefix is all 1s, such as []byte{0xff, 0xff, ...}, there is no upper-bound, +// it returns (startPrefix, nil, false) +func StartEndPrefixToLowerUpperBound(startPrefix, endPrefix []byte) (lowerBound, upperBound []byte, hasUpperBound bool) { + // if the endPrefix is all 1s, such as []byte{0xff, 0xff, ...}, there is no upper-bound + // so we return the startPrefix as the lower-bound, and nil as the upper-bound, and false for hasUpperBound + upperBound = PrefixUpperBound(endPrefix) + if upperBound == nil { + return startPrefix, nil, false + } + + return startPrefix, upperBound, true +} + +// PrefixUpperBound returns a key K such that all possible keys beginning with the input prefix +// sort lower than K according to the byte-wise lexicographic key ordering. +// This is used to define an upper bound for iteration, when we want to iterate over +// all keys beginning with a given prefix. +// referred to https://pkg.go.dev/github.com/cockroachdb/pebble#example-Iterator-PrefixIteration +// when the prefix is all 1s, such as []byte{0xff}, or []byte(0xff, 0xff} etc, there is no upper-bound +// It returns nil in this case. +func PrefixUpperBound(prefix []byte) []byte { + end := make([]byte, len(prefix)) + copy(end, prefix) + for i := len(end) - 1; i >= 0; i-- { + // increment the bytes by 1 + end[i] = end[i] + 1 + if end[i] != 0 { + return end[:i+1] + } + } + return nil // no upper-bound +} diff --git a/storage/payloads.go b/storage/payloads.go index d9926a966f9..fbb9c2d7883 100644 --- a/storage/payloads.go +++ b/storage/payloads.go @@ -1,5 +1,3 @@ -// (c) 2019 Dapper Labs - ALL RIGHTS RESERVED - package storage import ( @@ -9,9 +7,6 @@ import ( // Payloads represents persistent storage for payloads. type Payloads interface { - // Store will store a payload and index its contents. - Store(blockID flow.Identifier, payload *flow.Payload) error - // ByBlockID returns the payload with the given hash. It is available for // finalized and ambiguous blocks. ByBlockID(blockID flow.Identifier) (*flow.Payload, error) diff --git a/storage/pebble/batch.go b/storage/pebble/batch.go new file mode 100644 index 00000000000..2bba07cb9b0 --- /dev/null +++ b/storage/pebble/batch.go @@ -0,0 +1,59 @@ +package pebble + +import ( + "sync" + + "github.com/cockroachdb/pebble/v2" +) + +// TODO: unused? +type Batch struct { + writer *pebble.Batch + + lock sync.RWMutex + callbacks []func() +} + +func NewBatch(db *pebble.DB) *Batch { + batch := db.NewBatch() + return &Batch{ + writer: batch, + callbacks: make([]func(), 0), + } +} + +func (b *Batch) GetWriter() *pebble.Batch { + return b.writer +} + +// OnSucceed adds a callback to execute after the batch has +// been successfully flushed. +// useful for implementing the cache where we will only cache +// after the batch has been successfully flushed +func (b *Batch) OnSucceed(callback func()) { + b.lock.Lock() + defer b.lock.Unlock() + b.callbacks = append(b.callbacks, callback) +} + +// Flush will call the badger Batch's Flush method, in +// addition, it will call the callbacks added by +// OnSucceed +// any error are exceptions +func (b *Batch) Flush() error { + err := b.writer.Commit(nil) + if err != nil { + return err + } + + b.lock.RLock() + defer b.lock.RUnlock() + for _, callback := range b.callbacks { + callback() + } + return nil +} + +func (b *Batch) Close() error { + return b.writer.Close() +} diff --git a/storage/pebble/bootstrap.go b/storage/pebble/bootstrap.go new file mode 100644 index 00000000000..e70b1fec389 --- /dev/null +++ b/storage/pebble/bootstrap.go @@ -0,0 +1,170 @@ +package pebble + +import ( + "context" + "errors" + "fmt" + "path/filepath" + "time" + + "github.com/cockroachdb/pebble/v2" + "github.com/rs/zerolog" + "go.uber.org/atomic" + "golang.org/x/sync/errgroup" + + "github.com/onflow/flow-go/ledger" + "github.com/onflow/flow-go/ledger/common/convert" + "github.com/onflow/flow-go/ledger/complete/wal" +) + +// ErrAlreadyBootstrapped is the sentinel error for an already bootstrapped pebble instance +var ErrAlreadyBootstrapped = errors.New("found latest key set on badger instance, DB is already bootstrapped") + +type RegisterBootstrap struct { + log zerolog.Logger + db *pebble.DB + checkpointDir string + checkpointFileName string + leafNodeChan chan *wal.LeafNode + rootHeight uint64 + rootHash ledger.RootHash + registerCount *atomic.Uint64 +} + +// NewRegisterBootstrap creates the bootstrap object for reading checkpoint data and the height tracker in pebble +// This object must be initialized and RegisterBootstrap.IndexCheckpointFile must be run to have the pebble db instance +// in the correct state to initialize a Registers store. +func NewRegisterBootstrap( + db *pebble.DB, + checkpointFile string, + rootHeight uint64, + rootHash ledger.RootHash, + log zerolog.Logger, +) (*RegisterBootstrap, error) { + // check for pre-populated heights, fail if it is populated + // i.e. the IndexCheckpointFile function has already run for the db in this directory + isBootstrapped, err := IsBootstrapped(db) + if err != nil { + return nil, err + } + if isBootstrapped { + // key detected, attempt to run bootstrap on corrupt or already bootstrapped data + return nil, ErrAlreadyBootstrapped + } + + checkpointDir, checkpointFileName := filepath.Split(checkpointFile) + return &RegisterBootstrap{ + log: log.With().Str("module", "register_bootstrap").Logger(), + db: db, + checkpointDir: checkpointDir, + checkpointFileName: checkpointFileName, + leafNodeChan: make(chan *wal.LeafNode, checkpointLeafNodeBufSize), + rootHeight: rootHeight, + rootHash: rootHash, + registerCount: atomic.NewUint64(0), + }, nil +} + +func (b *RegisterBootstrap) batchIndexRegisters(leafNodes []*wal.LeafNode) error { + batch := b.db.NewBatch() + defer batch.Close() + + b.log.Trace().Int("batch_size", len(leafNodes)).Msg("indexing batch of leaf nodes") + for _, register := range leafNodes { + payload := register.Payload + key, err := payload.Key() + if err != nil { + return fmt.Errorf("could not get key from register payload: %w", err) + } + + registerID, err := convert.LedgerKeyToRegisterID(key) + if err != nil { + return fmt.Errorf("could not get register ID from key: %w", err) + } + + encoded := newLookupKey(b.rootHeight, registerID).Bytes() + err = batch.Set(encoded, payload.Value(), nil) + if err != nil { + return fmt.Errorf("failed to set key: %w", err) + } + } + + err := batch.Commit(pebble.Sync) + if err != nil { + return fmt.Errorf("failed to commit batch: %w", err) + } + + b.registerCount.Add(uint64(len(leafNodes))) + + return nil +} + +// indexCheckpointFileWorker asynchronously indexes register entries in b.checkpointDir +// with wal.OpenAndReadLeafNodesFromCheckpointV6 +func (b *RegisterBootstrap) indexCheckpointFileWorker(ctx context.Context) error { + b.log.Debug().Msg("started checkpoint index worker") + + // collect leaf nodes to batch index until the channel is closed + batch := make([]*wal.LeafNode, 0, pebbleBootstrapRegisterBatchLen) + for leafNode := range b.leafNodeChan { + select { + case <-ctx.Done(): + return nil + default: + batch = append(batch, leafNode) + if len(batch) >= pebbleBootstrapRegisterBatchLen { + err := b.batchIndexRegisters(batch) + if err != nil { + return fmt.Errorf("unable to index registers to pebble in batch: %w", err) + } + batch = make([]*wal.LeafNode, 0, pebbleBootstrapRegisterBatchLen) + } + } + } + + // index the remaining registers if didn't reach a batch length. + err := b.batchIndexRegisters(batch) + if err != nil { + return fmt.Errorf("unable to index remaining registers to pebble: %w", err) + } + return nil +} + +// IndexCheckpointFile indexes the checkpoint file in the Dir provided +func (b *RegisterBootstrap) IndexCheckpointFile(ctx context.Context, workerCount int) error { + cct, cancel := context.WithCancel(ctx) + defer cancel() + + g, gCtx := errgroup.WithContext(cct) + + start := time.Now() + b.log.Info().Msgf("indexing registers from checkpoint with %v worker", workerCount) + for i := 0; i < workerCount; i++ { + g.Go(func() error { + return b.indexCheckpointFileWorker(gCtx) + }) + } + + err := wal.OpenAndReadLeafNodesFromCheckpointV6(b.leafNodeChan, b.checkpointDir, b.checkpointFileName, b.rootHash, b.log) + if err != nil { + return fmt.Errorf("error reading leaf node: %w", err) + } + + if err = g.Wait(); err != nil { + return fmt.Errorf("failed to index checkpoint file: %w", err) + } + + err = initHeights(b.db, b.rootHeight) + if err != nil { + return fmt.Errorf("could not index latest height: %w", err) + } + + b.log.Info(). + Uint64("root_height", b.rootHeight). + Uint64("register_count", b.registerCount.Load()). + // note: not using Dur() since default units are ms and this duration is long + Str("duration", fmt.Sprintf("%v", time.Since(start))). + Msg("checkpoint indexing complete") + + return nil +} diff --git a/storage/pebble/bootstrap_test.go b/storage/pebble/bootstrap_test.go new file mode 100644 index 00000000000..4225b5e5d47 --- /dev/null +++ b/storage/pebble/bootstrap_test.go @@ -0,0 +1,259 @@ +package pebble + +import ( + "context" + "encoding/binary" + "fmt" + "io" + "os" + "path" + "testing" + + "github.com/cockroachdb/pebble/v2" + "github.com/rs/zerolog" + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/ledger" + "github.com/onflow/flow-go/ledger/common/convert" + "github.com/onflow/flow-go/ledger/common/testutils" + "github.com/onflow/flow-go/ledger/complete/mtrie/trie" + "github.com/onflow/flow-go/ledger/complete/wal" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/utils/unittest" +) + +const defaultRegisterValue = byte('v') + +func TestRegisterBootstrap_NewBootstrap(t *testing.T) { + t.Parallel() + unittest.RunWithTempDir(t, func(dir string) { + rootHeight := uint64(1) + rootHash := ledger.RootHash(unittest.StateCommitmentFixture()) + log := zerolog.New(io.Discard) + p, err := OpenRegisterPebbleDB(log, dir) + require.NoError(t, err) + // set heights + require.NoError(t, initHeights(p, rootHeight)) + // errors if FirstHeight or LastHeight are populated + _, err = NewRegisterBootstrap(p, dir, rootHeight, rootHash, log) + require.ErrorIs(t, err, ErrAlreadyBootstrapped) + }) +} + +func TestRegisterBootstrap_IndexCheckpointFile_Happy(t *testing.T) { + t.Parallel() + log := zerolog.New(io.Discard) + rootHeight := uint64(10000) + unittest.RunWithTempDir(t, func(dir string) { + tries, registerIDs := simpleTrieWithValidRegisterIDs(t) + // exclude the empty trie + rootHash := tries[0].RootHash() + fileName := "simple-checkpoint" + require.NoErrorf(t, wal.StoreCheckpointV6Concurrently(tries, dir, fileName, log), "fail to store checkpoint") + checkpointFile := path.Join(dir, fileName) + pb, dbDir := createPebbleForTest(t) + + bootstrap, err := NewRegisterBootstrap(pb, checkpointFile, rootHeight, rootHash, log) + require.NoError(t, err) + err = bootstrap.IndexCheckpointFile(context.Background(), workerCount) + require.NoError(t, err) + + // create registers instance and check values + reg, err := NewRegisters(pb, PruningDisabled) + require.NoError(t, err) + + require.Equal(t, reg.LatestHeight(), rootHeight) + require.Equal(t, reg.FirstHeight(), rootHeight) + + for _, register := range registerIDs { + val, err := reg.Get(*register, rootHeight) + require.NoError(t, err) + require.Equal(t, val, []byte{defaultRegisterValue}) + } + + require.NoError(t, pb.Close()) + require.NoError(t, os.RemoveAll(dbDir)) + }) +} + +func TestRegisterBootstrap_IndexCheckpointFile_Empty(t *testing.T) { + t.Parallel() + log := zerolog.New(io.Discard) + rootHeight := uint64(10000) + unittest.RunWithTempDir(t, func(dir string) { + tries := []*trie.MTrie{trie.NewEmptyMTrie()} + rootHash := tries[0].RootHash() + fileName := "empty-checkpoint" + require.NoErrorf(t, wal.StoreCheckpointV6Concurrently(tries, dir, fileName, log), "fail to store checkpoint") + checkpointFile := path.Join(dir, fileName) + pb, dbDir := createPebbleForTest(t) + + bootstrap, err := NewRegisterBootstrap(pb, checkpointFile, rootHeight, rootHash, log) + require.NoError(t, err) + err = bootstrap.IndexCheckpointFile(context.Background(), workerCount) + require.NoError(t, err) + + // create registers instance and check values + reg, err := NewRegisters(pb, PruningDisabled) + require.NoError(t, err) + + require.Equal(t, reg.LatestHeight(), rootHeight) + require.Equal(t, reg.FirstHeight(), rootHeight) + + require.NoError(t, pb.Close()) + require.NoError(t, os.RemoveAll(dbDir)) + }) +} + +func TestRegisterBootstrap_IndexCheckpointFile_FormatIssue(t *testing.T) { + t.Parallel() + pa1 := testutils.PathByUint8(0) + pa2 := testutils.PathByUint8(1) + rootHeight := uint64(666) + pl1 := testutils.LightPayload8('A', 'A') + pl2 := testutils.LightPayload('B', 'B') + paths := []ledger.Path{pa1, pa2} + payloads := []ledger.Payload{*pl1, *pl2} + emptyTrie := trie.NewEmptyMTrie() + trieWithInvalidEntry, _, err := trie.NewTrieWithUpdatedRegisters(emptyTrie, paths, payloads, true) + require.NoError(t, err) + rootHash := trieWithInvalidEntry.RootHash() + log := zerolog.New(io.Discard) + + unittest.RunWithTempDir(t, func(dir string) { + fileName := "invalid-checkpoint" + require.NoErrorf(t, wal.StoreCheckpointV6Concurrently([]*trie.MTrie{trieWithInvalidEntry}, dir, fileName, log), + "fail to store checkpoint") + checkpointFile := path.Join(dir, fileName) + pb, dbDir := createPebbleForTest(t) + + bootstrap, err := NewRegisterBootstrap(pb, checkpointFile, rootHeight, rootHash, log) + require.NoError(t, err) + err = bootstrap.IndexCheckpointFile(context.Background(), workerCount) + require.ErrorContains(t, err, "unexpected ledger key format") + require.NoError(t, pb.Close()) + require.NoError(t, os.RemoveAll(dbDir)) + }) + +} + +func TestRegisterBootstrap_IndexCheckpointFile_CorruptedCheckpointFile(t *testing.T) { + t.Parallel() + rootHeight := uint64(666) + log := zerolog.New(io.Discard) + unittest.RunWithTempDir(t, func(dir string) { + tries, _ := largeTrieWithValidRegisterIDs(t) + rootHash := tries[0].RootHash() + checkpointFileName := "large-checkpoint-incomplete" + require.NoErrorf(t, wal.StoreCheckpointV6Concurrently(tries, dir, checkpointFileName, log), "fail to store checkpoint") + // delete 2nd part of the file (2nd subtrie) + fileToDelete := path.Join(dir, fmt.Sprintf("%v.%03d", checkpointFileName, 2)) + err := os.RemoveAll(fileToDelete) + require.NoError(t, err) + pb, dbDir := createPebbleForTest(t) + bootstrap, err := NewRegisterBootstrap(pb, checkpointFileName, rootHeight, rootHash, log) + require.NoError(t, err) + err = bootstrap.IndexCheckpointFile(context.Background(), workerCount) + require.ErrorIs(t, err, os.ErrNotExist) + require.NoError(t, os.RemoveAll(dbDir)) + }) +} + +func TestRegisterBootstrap_IndexCheckpointFile_MultipleBatch(t *testing.T) { + t.Parallel() + log := zerolog.New(io.Discard) + rootHeight := uint64(10000) + unittest.RunWithTempDir(t, func(dir string) { + tries, registerIDs := largeTrieWithValidRegisterIDs(t) + rootHash := tries[0].RootHash() + fileName := "large-checkpoint" + require.NoErrorf(t, wal.StoreCheckpointV6Concurrently(tries, dir, fileName, log), "fail to store checkpoint") + checkpointFile := path.Join(dir, fileName) + pb, dbDir := createPebbleForTest(t) + bootstrap, err := NewRegisterBootstrap(pb, checkpointFile, rootHeight, rootHash, log) + require.NoError(t, err) + err = bootstrap.IndexCheckpointFile(context.Background(), workerCount) + require.NoError(t, err) + + // create registers instance and check values + reg, err := NewRegisters(pb, PruningDisabled) + require.NoError(t, err) + + require.Equal(t, reg.LatestHeight(), rootHeight) + require.Equal(t, reg.FirstHeight(), rootHeight) + + for _, register := range registerIDs { + val, err := reg.Get(*register, rootHeight) + require.NoError(t, err) + require.Equal(t, val, []byte{defaultRegisterValue}) + } + + require.NoError(t, pb.Close()) + require.NoError(t, os.RemoveAll(dbDir)) + }) + +} + +func simpleTrieWithValidRegisterIDs(t *testing.T) ([]*trie.MTrie, []*flow.RegisterID) { + return trieWithValidRegisterIDs(t, 2) +} + +const workerCount = 10 + +func largeTrieWithValidRegisterIDs(t *testing.T) ([]*trie.MTrie, []*flow.RegisterID) { + // large enough trie so every worker should have something to index + largeTrieSize := 2 * pebbleBootstrapRegisterBatchLen * workerCount + return trieWithValidRegisterIDs(t, uint16(largeTrieSize)) +} + +func trieWithValidRegisterIDs(t *testing.T, n uint16) ([]*trie.MTrie, []*flow.RegisterID) { + emptyTrie := trie.NewEmptyMTrie() + resultRegisterIDs := make([]*flow.RegisterID, 0, n) + paths := randomRegisterPaths(n) + payloads := randomRegisterPayloads(n) + for _, payload := range payloads { + key, err := payload.Key() + require.NoError(t, err) + regID, err := convert.LedgerKeyToRegisterID(key) + require.NoError(t, err) + resultRegisterIDs = append(resultRegisterIDs, ®ID) + } + populatedTrie, depth, err := trie.NewTrieWithUpdatedRegisters(emptyTrie, paths, payloads, true) + // make sure it has at least 1 leaf node + require.GreaterOrEqual(t, depth, uint16(1)) + require.NoError(t, err) + resultTries := []*trie.MTrie{populatedTrie} + return resultTries, resultRegisterIDs +} + +func randomRegisterPayloads(n uint16) []ledger.Payload { + p := make([]ledger.Payload, 0, n) + for i := uint16(0); i < n; i++ { + o := make([]byte, 0, 8) + o = binary.BigEndian.AppendUint16(o, n) + k := ledger.Key{KeyParts: []ledger.KeyPart{ + {Type: ledger.KeyPartOwner, Value: o}, + {Type: ledger.KeyPartKey, Value: o}, + }} + // values are always 'v' for ease of testing/checking + v := ledger.Value{defaultRegisterValue} + pl := ledger.NewPayload(k, v) + p = append(p, *pl) + } + return p +} + +func randomRegisterPaths(n uint16) []ledger.Path { + p := make([]ledger.Path, 0, n) + for i := uint16(0); i < n; i++ { + p = append(p, testutils.PathByUint16(i)) + } + return p +} + +func createPebbleForTest(t *testing.T) (*pebble.DB, string) { + dbDir := unittest.TempPebblePath(t) + pb, err := OpenRegisterPebbleDB(unittest.Logger(), dbDir) + require.NoError(t, err) + return pb, dbDir +} diff --git a/storage/pebble/cache.go b/storage/pebble/cache.go new file mode 100644 index 00000000000..e40fc8a69f2 --- /dev/null +++ b/storage/pebble/cache.go @@ -0,0 +1,167 @@ +package pebble + +import ( + "errors" + "fmt" + + lru "github.com/hashicorp/golang-lru/v2" + + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module" + "github.com/onflow/flow-go/storage" +) + +const DefaultCacheSize = uint(10_000) + +type CacheType int + +const ( + CacheTypeLRU CacheType = iota + 1 + CacheTypeTwoQueue +) + +func ParseCacheType(s string) (CacheType, error) { + switch s { + case CacheTypeLRU.String(): + return CacheTypeLRU, nil + case CacheTypeTwoQueue.String(): + return CacheTypeTwoQueue, nil + default: + return 0, errors.New("invalid cache type") + } +} + +func (m CacheType) String() string { + switch m { + case CacheTypeLRU: + return "lru" + case CacheTypeTwoQueue: + return "2q" + default: + return "" + } +} + +type CacheBackend interface { + Get(key string) (value flow.RegisterValue, ok bool) + Add(key string, value flow.RegisterValue) + Contains(key string) bool + Len() int + Remove(key string) +} + +// wrapped is a wrapper around lru.Cache to implement CacheBackend +// this is needed because the standard lru cache implementation provides additional features that +// the 2Q cache do not. This standardizes the interface to allow swapping between types. +type wrapped struct { + cache *lru.Cache[string, flow.RegisterValue] +} + +func (c *wrapped) Get(key string) (value flow.RegisterValue, ok bool) { + return c.cache.Get(key) +} +func (c *wrapped) Add(key string, value flow.RegisterValue) { + _ = c.cache.Add(key, value) +} +func (c *wrapped) Contains(key string) bool { + return c.cache.Contains(key) +} +func (c *wrapped) Len() int { + return c.cache.Len() +} +func (c *wrapped) Remove(key string) { + _ = c.cache.Remove(key) +} + +type ReadCache struct { + metrics module.CacheMetrics + resource string + cache CacheBackend + retrieve func(key string) (flow.RegisterValue, error) +} + +func newReadCache( + collector module.CacheMetrics, + resourceName string, + cacheType CacheType, + cacheSize uint, + retrieve func(key string) (flow.RegisterValue, error), +) (*ReadCache, error) { + cache, err := getCache(cacheType, int(cacheSize)) + if err != nil { + return nil, fmt.Errorf("could not create cache: %w", err) + } + + c := ReadCache{ + metrics: collector, + resource: resourceName, + cache: cache, + retrieve: retrieve, + } + c.metrics.CacheEntries(c.resource, uint(c.cache.Len())) + + return &c, nil +} + +func getCache(cacheType CacheType, size int) (CacheBackend, error) { + switch cacheType { + case CacheTypeLRU: + cache, err := lru.New[string, flow.RegisterValue](size) + if err != nil { + return nil, err + } + return &wrapped{cache: cache}, nil + case CacheTypeTwoQueue: + return lru.New2Q[string, flow.RegisterValue](size) + default: + return nil, fmt.Errorf("unknown cache type: %d", cacheType) + } +} + +// IsCached returns true if the key exists in the cache. +// It DOES NOT check whether the key exists in the underlying data store. +func (c *ReadCache) IsCached(key string) bool { + return c.cache.Contains(key) +} + +// Get will try to retrieve the resource from cache first, and then from the +// injected. During normal operations, the following error returns are expected: +// - `storage.ErrNotFound` if key is unknown. +func (c *ReadCache) Get(key string) (flow.RegisterValue, error) { + resource, cached := c.cache.Get(key) + if cached { + c.metrics.CacheHit(c.resource) + if resource == nil { + return nil, storage.ErrNotFound + } + return resource, nil + } + + // get it from the database + resource, err := c.retrieve(key) + if err != nil { + if errors.Is(err, storage.ErrNotFound) { + c.cache.Add(key, nil) + c.metrics.CacheEntries(c.resource, uint(c.cache.Len())) + c.metrics.CacheNotFound(c.resource) + } + return nil, fmt.Errorf("could not retrieve resource: %w", err) + } + + c.metrics.CacheMiss(c.resource) + + c.cache.Add(key, resource) + c.metrics.CacheEntries(c.resource, uint(c.cache.Len())) + + return resource, nil +} + +func (c *ReadCache) Remove(key string) { + c.cache.Remove(key) +} + +// Insert will add a resource directly to the cache with the given ID +func (c *ReadCache) Insert(key string, resource flow.RegisterValue) { + c.cache.Add(key, resource) + c.metrics.CacheEntries(c.resource, uint(c.cache.Len())) +} diff --git a/storage/pebble/config.go b/storage/pebble/config.go new file mode 100644 index 00000000000..06e8497d50d --- /dev/null +++ b/storage/pebble/config.go @@ -0,0 +1,65 @@ +package pebble + +import ( + "github.com/cockroachdb/pebble/v2" + "github.com/cockroachdb/pebble/v2/bloom" + "github.com/rs/zerolog" + + "github.com/onflow/flow-go/storage/util" +) + +// DefaultPebbleOptions returns an optimized set of pebble options. +// This is mostly copied form pebble's nightly performance benchmark. +func DefaultPebbleOptions(logger zerolog.Logger, cache *pebble.Cache, comparer *pebble.Comparer) *pebble.Options { + opts := &pebble.Options{ + Cache: cache, + Comparer: comparer, + FormatMajorVersion: pebble.FormatVirtualSSTables, + + // Soft and hard limits on read amplificaction of L0 respectfully. + L0CompactionThreshold: 2, + L0StopWritesThreshold: 1000, + + // When the maximum number of bytes for a level is exceeded, compaction is requested. + LBaseMaxBytes: 64 << 20, // 64 MB + Levels: make([]pebble.LevelOptions, 7), + MaxOpenFiles: 16384, + + // Writes are stopped when the sum of the queued memtable sizes exceeds MemTableStopWritesThreshold*MemTableSize. + MemTableSize: 64 << 20, + MemTableStopWritesThreshold: 4, + + // The default is 1. + MaxConcurrentCompactions: func() int { return 4 }, + Logger: util.NewLogger(logger), + } + + for i := 0; i < len(opts.Levels); i++ { + l := &opts.Levels[i] + // The default is 4KiB (uncompressed), which is too small + // for good performance (esp. on stripped storage). + l.BlockSize = 32 << 10 // 32 KB + l.IndexBlockSize = 256 << 10 // 256 KB + + // The bloom filter speedsup our SeekPrefixGE by skipping + // sstables that do not contain the prefix + l.FilterPolicy = bloom.FilterPolicy(MinLookupKeyLen) + l.FilterType = pebble.TableFilter + + if i > 0 { + // L0 starts at 2MiB, each level is 2x the previous. + l.TargetFileSize = opts.Levels[i-1].TargetFileSize * 2 + } + l.EnsureDefaults() + } + + // TODO(rbtz): benchmark with and without bloom filters on L6 + // opts.Levels[6].FilterPolicy = nil + + // Splitting sstables during flush allows increased compaction flexibility and concurrency when those + // tables are compacted to lower levels. + opts.FlushSplitBytes = opts.Levels[0].TargetFileSize + opts.EnsureDefaults() + + return opts +} diff --git a/storage/pebble/constants.go b/storage/pebble/constants.go new file mode 100644 index 00000000000..9805b37f90b --- /dev/null +++ b/storage/pebble/constants.go @@ -0,0 +1,39 @@ +package pebble + +import "github.com/onflow/flow-go/storage/pebble/registers" + +const ( + // checkpointLeafNodeBufSize is the batch size of leaf nodes being read from the checkpoint file, + // for use by wal.OpenAndReadLeafNodesFromCheckpointV6 + checkpointLeafNodeBufSize = 1000 + + // pebbleBootstrapRegisterBatchLen is the batch size of converted register values to be written to pebble by the + // register bootstrap process + pebbleBootstrapRegisterBatchLen = 1000 + + // placeHolderHeight is an element of the height lookup keys of length HeightSuffixLen + // 10 bits per key yields a filter with <1% false positive rate. + placeHolderHeight = uint64(0) + + // MinLookupKeyLen defines the minimum length for a valid lookup key + // + // Lookup keys use the following format: + // [code] [owner] / [key] / [height] + // Where: + // - code: 1 byte indicating the type of data stored + // - owner: optional variable length field + // - key: optional variable length field + // - height: 8 bytes representing the block height (uint64) + // - separator: '/' is used to separate variable length fields (required 2) + // + // Therefore the minimum key would be 3 bytes + # of bytes for height + // [code] / / [height] + MinLookupKeyLen = 3 + registers.HeightSuffixLen + + // prefixes + // codeRegister starting at 2, 1 and 0 reserved for DB specific constants + codeRegister byte = 2 + // codeFirstBlockHeight and codeLatestBlockHeight are keys for the range of block heights in the register store + codeFirstBlockHeight byte = 3 + codeLatestBlockHeight byte = 4 +) diff --git a/storage/pebble/lookup.go b/storage/pebble/lookup.go new file mode 100644 index 00000000000..d0059abf2c2 --- /dev/null +++ b/storage/pebble/lookup.go @@ -0,0 +1,128 @@ +package pebble + +import ( + "bytes" + "encoding/binary" + "fmt" + + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/storage/pebble/registers" +) + +// latestHeightKey is a special case of a lookupKey +// with keyLatestBlockHeight as key, no owner and a placeholder height of 0. +// This is to ensure SeekPrefixGE in pebble does not break +var latestHeightKey = binary.BigEndian.AppendUint64( + []byte{codeLatestBlockHeight, byte('/'), byte('/')}, placeHolderHeight) + +// firstHeightKey is a special case of a lookupKey +// with keyFirstBlockHeight as key, no owner and a placeholder height of 0. +// This is to ensure SeekPrefixGE in pebble does not break +var firstHeightKey = binary.BigEndian.AppendUint64( + []byte{codeFirstBlockHeight, byte('/'), byte('/')}, placeHolderHeight) + +// lookupKey is the encoded format of the storage key for looking up register value +type lookupKey struct { + encoded []byte +} + +// newLookupKey takes a height and registerID, returns the key for storing the register value in storage +func newLookupKey(height uint64, reg flow.RegisterID) *lookupKey { + key := lookupKey{ + // 1 byte gaps for db prefix and '/' separators + encoded: make([]byte, 0, MinLookupKeyLen+len(reg.Owner)+len(reg.Key)), + } + + // append DB prefix + key.encoded = append(key.encoded, codeRegister) + + // The lookup key used to find most recent value for a register. + // + // The "<owner>/<key>" part is the register key, which is used as a prefix to filter and iterate + // through updated values at different heights, and find the most recent updated value at or below + // a certain height. + key.encoded = append(key.encoded, []byte(reg.Owner)...) + key.encoded = append(key.encoded, '/') + key.encoded = append(key.encoded, []byte(reg.Key)...) + key.encoded = append(key.encoded, '/') + + // Encode the height getting it to 1s compliment (all bits flipped) and big-endian byte order. + // + // Registers are a sparse dataset stored with a single entry per update. To find the value at a particular + // height, we need to do a scan across the entries to find the highest height that is less than or equal + // to the target height. + // + // Pebble does not support reverse iteration, so we use the height's one's complement to effectively + // reverse sort on the height. This allows us to use a bitwise forward scan for the next most recent + // entry. + onesCompliment := ^height + key.encoded = binary.BigEndian.AppendUint64(key.encoded, onesCompliment) + + return &key +} + +// lookupKeyToRegisterID takes a lookup key and decode it into height and RegisterID +func lookupKeyToRegisterID(lookupKey []byte) (uint64, flow.RegisterID, error) { + if len(lookupKey) < MinLookupKeyLen { + return 0, flow.RegisterID{}, fmt.Errorf("invalid lookup key format: expected >= %d bytes, got %d bytes", + MinLookupKeyLen, len(lookupKey)) + } + + // check and exclude db prefix + prefix := lookupKey[0] + if prefix != codeRegister { + return 0, flow.RegisterID{}, fmt.Errorf("incorrect prefix %d for register lookup key, expected %d", + prefix, codeRegister) + } + lookupKey = lookupKey[1:] + + // Find the first slash to split the lookup key and decode the owner. + firstSlash := bytes.IndexByte(lookupKey, '/') + if firstSlash == -1 { + return 0, flow.RegisterID{}, fmt.Errorf("invalid lookup key format: cannot find first slash") + } + + owner := string(lookupKey[:firstSlash]) + + // Find the last slash to split encoded height. + lastSlashPos := bytes.LastIndexByte(lookupKey, '/') + if lastSlashPos == firstSlash { + return 0, flow.RegisterID{}, fmt.Errorf("invalid lookup key format: expected 2 separators, got 1 separator") + } + encodedHeightPos := lastSlashPos + 1 + if len(lookupKey)-encodedHeightPos != registers.HeightSuffixLen { + return 0, flow.RegisterID{}, + fmt.Errorf("invalid lookup key format: expected %d bytes of encoded height, got %d bytes", + registers.HeightSuffixLen, len(lookupKey)-encodedHeightPos) + } + + // Decode height. + heightBytes := lookupKey[encodedHeightPos:] + + oneCompliment := binary.BigEndian.Uint64(heightBytes) + height := ^oneCompliment + + // Decode the remaining bytes into the key. + keyBytes := lookupKey[firstSlash+1 : lastSlashPos] + key := string(keyBytes) + + regID := flow.RegisterID{Owner: owner, Key: key} + + return height, regID, nil +} + +// Bytes returns the encoded lookup key. +func (h lookupKey) Bytes() []byte { + return h.encoded +} + +// String returns the encoded lookup key as a string. +func (h lookupKey) String() string { + return string(h.encoded) +} + +// encodedUint64 encodes uint64 for storing as a pebble payload +func encodedUint64(height uint64) []byte { + payload := make([]byte, 0, 8) + return binary.BigEndian.AppendUint64(payload, height) +} diff --git a/storage/pebble/lookup_test.go b/storage/pebble/lookup_test.go new file mode 100644 index 00000000000..383fd995490 --- /dev/null +++ b/storage/pebble/lookup_test.go @@ -0,0 +1,108 @@ +package pebble + +import ( + "encoding/binary" + "math" + "testing" + + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/model/flow" +) + +// Test_lookupKey_Bytes tests the lookup key encoding. +func Test_lookupKey_Bytes(t *testing.T) { + t.Parallel() + + expectedHeight := uint64(777) + key := newLookupKey(expectedHeight, flow.RegisterID{Owner: "owner", Key: "key"}) + + // Test prefix + require.Equal(t, byte(codeRegister), key.Bytes()[0]) + + // Test encoded Owner and Key + require.Equal(t, []byte("owner/key/"), key.Bytes()[1:11]) + + // Test encoded height + actualHeight := binary.BigEndian.Uint64(key.Bytes()[11:]) + require.Equal(t, math.MaxUint64-actualHeight, expectedHeight) + + // Test everything together + resultLookupKey := []byte{codeRegister} + resultLookupKey = append(resultLookupKey, []byte("owner/key/\xff\xff\xff\xff\xff\xff\xfc\xf6")...) + require.Equal(t, resultLookupKey, key.Bytes()) + + decodedHeight, decodedReg, err := lookupKeyToRegisterID(key.encoded) + require.NoError(t, err) + + require.Equal(t, expectedHeight, decodedHeight) + require.Equal(t, "owner", decodedReg.Owner) + require.Equal(t, "key", decodedReg.Key) +} + +func Test_decodeKey_Bytes(t *testing.T) { + height := uint64(10) + + cases := []struct { + owner string + key string + }{ + {owner: "owneraddress", key: "public/storage/hasslash-in-key"}, + {owner: "owneraddress", key: ""}, + {owner: "", key: "somekey"}, + {owner: "", key: ""}, + } + + for _, c := range cases { + owner, key := c.owner, c.key + + lookupKey := newLookupKey(height, flow.RegisterID{Owner: owner, Key: key}) + decodedHeight, decodedReg, err := lookupKeyToRegisterID(lookupKey.Bytes()) + require.NoError(t, err) + + require.Equal(t, height, decodedHeight) + require.Equal(t, owner, decodedReg.Owner) + require.Equal(t, key, decodedReg.Key) + } +} + +func Test_decodeKey_fail(t *testing.T) { + var err error + // less than min length (10) + _, _, err = lookupKeyToRegisterID([]byte{codeRegister, 1, 2, 3, 4, 5, 6, 7, 8, 9}) + require.Contains(t, err.Error(), "bytes") + + // missing slash + _, _, err = lookupKeyToRegisterID([]byte{codeRegister, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10}) + require.Contains(t, err.Error(), "slash") + + // missing second slash + _, _, err = lookupKeyToRegisterID([]byte{codeRegister, 1, 2, 3, '/', 5, 6, 7, 8, 9, 10}) + require.Contains(t, err.Error(), "separator") + + // invalid height + _, _, err = lookupKeyToRegisterID([]byte{codeRegister, 1, 2, 3, '/', 5, 6, 7, 8, '/', 10}) + require.Contains(t, err.Error(), "height") + + // invalid height + _, _, err = lookupKeyToRegisterID([]byte{codeRegister, 1, 2, 3, '/', 5, '/', 7, 8, 9, 10}) + require.Contains(t, err.Error(), "height") + + // invalid height + _, _, err = lookupKeyToRegisterID([]byte{codeRegister, 1, 2, 3, '/', 5, '/', 7, 8, 9, 10, 11, 12, 13}) + require.Contains(t, err.Error(), "height") + + // valid height + _, _, err = lookupKeyToRegisterID([]byte{codeRegister, 1, 2, 3, '/', 5, '/', 7, 8, 9, 10, 11, 12, 13, 14}) + require.NoError(t, err) +} + +func Test_prefix_error(t *testing.T) { + correctKey := newLookupKey(uint64(0), flow.RegisterID{Owner: "owner", Key: "key"}) + incorrectKey := firstHeightKey + _, _, err := lookupKeyToRegisterID(correctKey.Bytes()) + require.NoError(t, err) + + _, _, err = lookupKeyToRegisterID(incorrectKey) + require.ErrorContains(t, err, "incorrect prefix") +} diff --git a/storage/pebble/open.go b/storage/pebble/open.go new file mode 100644 index 00000000000..05fefc3d246 --- /dev/null +++ b/storage/pebble/open.go @@ -0,0 +1,201 @@ +package pebble + +import ( + "errors" + "fmt" + "os" + "path/filepath" + + "github.com/cockroachdb/pebble/v2" + "github.com/hashicorp/go-multierror" + "github.com/rs/zerolog" + + "github.com/onflow/flow-go/module/util" + "github.com/onflow/flow-go/storage" + "github.com/onflow/flow-go/storage/pebble/registers" +) + +const DefaultPebbleCacheSize = 1 << 20 + +// NewBootstrappedRegistersWithPath initializes a new Registers instance with a pebble db +// if the database is not initialized, it close the database and return storage.ErrNotBootstrapped +func NewBootstrappedRegistersWithPath(logger zerolog.Logger, dir string) (*Registers, *pebble.DB, error) { + db, err := OpenRegisterPebbleDB(logger, dir) + if err != nil { + return nil, nil, fmt.Errorf("failed to initialize pebble db: %w", err) + } + registers, err := NewRegisters(db, PruningDisabled) + if err != nil { + if errors.Is(err, storage.ErrNotBootstrapped) { + // closing the db if not bootstrapped + dbErr := db.Close() + if dbErr != nil { + err = multierror.Append(err, fmt.Errorf("failed to close db: %w", dbErr)) + } + } + return nil, nil, fmt.Errorf("failed to initialize registers: %w", err) + } + return registers, db, nil +} + +// OpenRegisterPebbleDB opens the database +// The difference between openDefaultPebbleDB is that it uses +// a customized comparer (NewMVCCComparer) which is needed to +// implement finding register values at any given height using +// pebble's SeekPrefixGE function +func OpenRegisterPebbleDB(logger zerolog.Logger, dir string) (*pebble.DB, error) { + cache := pebble.NewCache(DefaultPebbleCacheSize) + defer cache.Unref() + // currently pebble is only used for registers + opts := DefaultPebbleOptions(logger, cache, registers.NewMVCCComparer()) + db, err := pebble.Open(dir, opts) + if err != nil { + return nil, fmt.Errorf("failed to open db: %w", err) + } + + return db, nil +} + +// openDefaultPebbleDB opens a pebble database using default options, +// such as cache size and comparer +func openDefaultPebbleDB(logger zerolog.Logger, dir string) (*pebble.DB, error) { + cache := pebble.NewCache(DefaultPebbleCacheSize) + defer cache.Unref() + opts := DefaultPebbleOptions(logger, cache, pebble.DefaultComparer) + db, err := pebble.Open(dir, opts) + if err != nil { + return nil, fmt.Errorf("failed to open db: %w", err) + } + + return db, nil +} + +// ShouldOpenDefaultPebbleDB returns error if the pebbleDB is not bootstrapped at this folder +// if bootstrapped, then open the pebbleDB +func ShouldOpenDefaultPebbleDB(logger zerolog.Logger, dir string) (*pebble.DB, error) { + ok, err := IsPebbleFolder(dir) + if err != nil || !ok { + return nil, fmt.Errorf("pebble db is not initialized: %w", err) + } + + return SafeOpen(logger, dir) +} + +// SafeOpen open a pebble database at the given directory. +// It opens the database only if the directory: +// 1. does not exist, then it will create this directory +// 2. is empty +// 3. was opened before, in which case have all pebble required files +// It returns an error if the directory is not empty and missing required pebble files. +// more specifically, if the folder is a badger folder, it will return an error because it would +// miss some pebble file. +func SafeOpen(logger zerolog.Logger, dataDir string) (*pebble.DB, error) { + ok, err := util.IsEmptyOrNotExists(dataDir) + if err != nil { + return nil, fmt.Errorf("error checking if folder is empty or does not exist: %w", err) + } + + // if the folder is empty or does not exist, then it can be used as a Pebble folder + if ok { + return openDefaultPebbleDB(logger, dataDir) + } + + // note, a badger folder does not have MANIFEST-* file, so this will return error + // and prevent opening a badger folder as a pebble folder + ok, err = folderHaveAllPebbleFiles(dataDir) + if err != nil || !ok { + return nil, fmt.Errorf("folder %s is not a valid pebble folder: %w", dataDir, err) + } + + return openDefaultPebbleDB(logger, dataDir) +} + +// IsPebbleFolder checks if the given folder contains a valid Pebble DB. +// return error if the folder does not exist, is not a directory, or is missing required files +// return nil if the folder contains a valid Pebble DB +func IsPebbleFolder(folderPath string) (bool, error) { + // Check if the folder exists + info, err := os.Stat(folderPath) + if os.IsNotExist(err) { + return false, fmt.Errorf("directory does not exist: %s", folderPath) + } + if !info.IsDir() { + return false, fmt.Errorf("not a directory: %s", folderPath) + } + + return folderHaveAllPebbleFiles(folderPath) +} + +func folderHaveAllPebbleFiles(folderPath string) (bool, error) { + // Look for Pebble-specific files + requiredFiles := []string{"MANIFEST-*"} + for _, pattern := range requiredFiles { + matches, err := filepath.Glob(filepath.Join(folderPath, pattern)) + if err != nil { + return false, fmt.Errorf("error checking for files: %v", err) + } + if len(matches) == 0 { + return false, fmt.Errorf("missing required file: %s", pattern) + } + } + + return true, nil +} + +// ReadHeightsFromBootstrappedDB reads the first and latest height from a bootstrapped register db +// If the register db is not bootstrapped, it returns storage.ErrNotBootstrapped +// If the register db is corrupted, it returns an error +func ReadHeightsFromBootstrappedDB(db *pebble.DB) (firstHeight uint64, latestHeight uint64, err error) { + // check height keys and populate cache. These two variables will have been set + firstHeight, err = firstStoredHeight(db) + if err != nil { + if errors.Is(err, storage.ErrNotFound) { + return 0, 0, fmt.Errorf("unable to initialize register storage, first height not found in db: %w", storage.ErrNotBootstrapped) + } + // this means that the DB is either in a corrupted state or has not been initialized + return 0, 0, fmt.Errorf("unable to initialize register storage, first height unavailable in db: %w", err) + } + latestHeight, err = latestStoredHeight(db) + if err != nil { + // first height is found, but latest height is not found, this means that the DB is in a corrupted state + return 0, 0, fmt.Errorf("unable to initialize register storage, latest height unavailable in db: %w", err) + } + return firstHeight, latestHeight, nil +} + +// IsBootstrapped returns true if the db is bootstrapped +// otherwise return false +// it returns error if the db is corrupted or other exceptions +func IsBootstrapped(db *pebble.DB) (bool, error) { + _, err1 := firstStoredHeight(db) + _, err2 := latestStoredHeight(db) + + if err1 == nil && err2 == nil { + return true, nil + } + + if errors.Is(err1, storage.ErrNotFound) && errors.Is(err2, storage.ErrNotFound) { + return false, nil + } + + return false, fmt.Errorf("unable to check if db is bootstrapped %v: %w", err1, err2) +} + +func initHeights(db *pebble.DB, firstHeight uint64) error { + batch := db.NewBatch() + defer batch.Close() + // update heights atomically to prevent one getting populated without the other + err := batch.Set(firstHeightKey, encodedUint64(firstHeight), nil) + if err != nil { + return fmt.Errorf("unable to add first height to batch: %w", err) + } + err = batch.Set(latestHeightKey, encodedUint64(firstHeight), nil) + if err != nil { + return fmt.Errorf("unable to add latest height to batch: %w", err) + } + err = batch.Commit(pebble.Sync) + if err != nil { + return fmt.Errorf("unable to index first and latest heights: %w", err) + } + return nil +} diff --git a/storage/pebble/open_test.go b/storage/pebble/open_test.go new file mode 100644 index 00000000000..2a19c506d75 --- /dev/null +++ b/storage/pebble/open_test.go @@ -0,0 +1,154 @@ +package pebble + +import ( + "fmt" + "testing" + + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/storage" + "github.com/onflow/flow-go/utils/unittest" +) + +func TestIsBootstrapped(t *testing.T) { + t.Parallel() + unittest.RunWithTempDir(t, func(dir string) { + logger := unittest.Logger() + db, err := OpenRegisterPebbleDB(logger, dir) + require.NoError(t, err) + bootstrapped, err := IsBootstrapped(db) + require.NoError(t, err) + require.False(t, bootstrapped) + require.NoError(t, db.Close()) + }) +} + +func TestReadHeightsFromBootstrappedDB(t *testing.T) { + t.Parallel() + unittest.RunWithTempDir(t, func(dir string) { + logger := unittest.Logger() + db, err := OpenRegisterPebbleDB(logger, dir) + require.NoError(t, err) + + // init with first height + firstHeight := uint64(10) + require.NoError(t, initHeights(db, firstHeight)) + + bootstrapped, err := IsBootstrapped(db) + require.NoError(t, err) + + require.True(t, bootstrapped) + require.NoError(t, db.Close()) + + // reopen the db + registers, db, err := NewBootstrappedRegistersWithPath(logger, dir) + require.NoError(t, err) + + require.Equal(t, firstHeight, registers.FirstHeight()) + require.Equal(t, firstHeight, registers.LatestHeight()) + + require.NoError(t, db.Close()) + }) +} + +func TestNewBootstrappedRegistersWithPath(t *testing.T) { + t.Parallel() + unittest.RunWithTempDir(t, func(dir string) { + logger := unittest.Logger() + _, db, err := NewBootstrappedRegistersWithPath(logger, dir) + require.ErrorIs(t, err, storage.ErrNotBootstrapped) + + // verify the db is closed + require.True(t, db == nil) + + // bootstrap the db + // init with first height + db2, err := OpenRegisterPebbleDB(logger, dir) + require.NoError(t, err) + firstHeight := uint64(10) + require.NoError(t, initHeights(db2, firstHeight)) + + registers, err := NewRegisters(db2, PruningDisabled) + require.NoError(t, err) + require.Equal(t, firstHeight, registers.FirstHeight()) + require.Equal(t, firstHeight, registers.LatestHeight()) + + require.NoError(t, db2.Close()) + }) +} + +func TestSafeOpen(t *testing.T) { + t.Parallel() + unittest.RunWithTempDir(t, func(dir string) { + logger := unittest.Logger() + // create an empty folder + pebbleDB, err := SafeOpen(logger, dir) + require.NoError(t, err) + require.NoError(t, pebbleDB.Close()) + + // can be opened again + db, err := SafeOpen(logger, dir) + require.NoError(t, err) + require.NoError(t, db.Close()) + }) +} + +func TestSafeOpenFailIfDirIsUsedByBadgerDB(t *testing.T) { + t.Parallel() + unittest.RunWithTempDir(t, func(dir string) { + logger := unittest.Logger() + // create a badger db + badgerDB := unittest.BadgerDB(t, dir) + require.NoError(t, badgerDB.Close()) + + _, err := SafeOpen(logger, dir) + require.Error(t, err) + require.Contains(t, err.Error(), "is not a valid pebble folder") + }) +} + +func TestShouldOpenDefaultPebbleDB(t *testing.T) { + t.Parallel() + unittest.RunWithTempDir(t, func(dir string) { + logger := unittest.Logger() + // verify error if directy not exist + _, err := ShouldOpenDefaultPebbleDB(logger, dir+"/not-exist") + require.Error(t, err) + require.Contains(t, err.Error(), "not initialized") + + // verify error if directory exist but not empty + _, err = ShouldOpenDefaultPebbleDB(logger, dir) + require.Error(t, err) + require.Contains(t, err.Error(), "not initialized") + + // bootstrap the db + db, err := SafeOpen(logger, dir) + require.NoError(t, err) + require.NoError(t, initHeights(db, uint64(10))) + require.NoError(t, db.Close()) + fmt.Println(dir) + + // verify no error is returned when the db is bootstrapped + db, err = ShouldOpenDefaultPebbleDB(logger, dir) + require.NoError(t, err) + + h, err := latestStoredHeight(db) + require.NoError(t, err) + require.Equal(t, uint64(10), h) + require.NoError(t, db.Close()) + }) +} + +func TestShouldOpenDefaultPebbleDBFailWhenOpeningBadgerDBDir(t *testing.T) { + t.Parallel() + unittest.RunWithTempDir(t, func(dir string) { + logger := unittest.Logger() + // create a badger db + badgerDB := unittest.BadgerDB(t, dir) + require.NoError(t, badgerDB.Close()) + + _, err := ShouldOpenDefaultPebbleDB(logger, dir) + require.Error(t, err) + require.Contains(t, err.Error(), "pebble db is not initialized") + }) +} diff --git a/storage/pebble/registers.go b/storage/pebble/registers.go new file mode 100644 index 00000000000..aa08a19c77d --- /dev/null +++ b/storage/pebble/registers.go @@ -0,0 +1,210 @@ +package pebble + +import ( + "encoding/binary" + "fmt" + "math" + + "github.com/cockroachdb/pebble/v2" + "github.com/pkg/errors" + "go.uber.org/atomic" + + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/storage" +) + +// Registers library that implements pebble storage for registers +// given a pebble instance with root block and root height populated +type Registers struct { + db *pebble.DB + firstHeight uint64 + latestHeight *atomic.Uint64 + pruneThreshold uint64 +} + +// PruningDisabled represents the absence of a pruning threshold. +const PruningDisabled = math.MaxUint64 + +var _ storage.RegisterIndex = (*Registers)(nil) + +// NewRegisters takes a populated pebble instance with LatestHeight and FirstHeight set. +// return storage.ErrNotBootstrapped if they those two keys are unavailable as it implies a uninitialized state +// return other error if database is in a corrupted state +func NewRegisters(db *pebble.DB, pruneThreshold uint64) (*Registers, error) { + // check height keys and populate cache. These two variables will have been set + firstHeight, latestHeight, err := ReadHeightsFromBootstrappedDB(db) + if err != nil { + // first height is found, but latest height is not found, this means that the DB is in a corrupted state + return nil, fmt.Errorf("unable to initialize register storage, latest height unavailable in db: %w", err) + } + + // If no pruning threshold is provided, disable pruning. + if pruneThreshold == 0 { + pruneThreshold = PruningDisabled + } + + // All registers between firstHeight and lastHeight have been indexed + return &Registers{ + db: db, + firstHeight: firstHeight, + latestHeight: atomic.NewUint64(latestHeight), + pruneThreshold: pruneThreshold, + }, nil +} + +// Get returns the most recent updated payload for the given RegisterID. +// "most recent" means the updates happens most recent up the given height. +// +// For example, if there are 2 values stored for register A at height 6 and 11, then +// GetPayload(13, A) would return the value at height 11. +// +// - storage.ErrNotFound if no register values are found +// - storage.ErrHeightNotIndexed if the requested height is out of the range of stored heights +func (s *Registers) Get( + reg flow.RegisterID, + height uint64, +) (flow.RegisterValue, error) { + latestHeight := s.LatestHeight() + if height > latestHeight { + return nil, fmt.Errorf("height %d not indexed, latestHeight: %d, %w", height, latestHeight, storage.ErrHeightNotIndexed) + } + + firstHeight := s.calculateFirstHeight(latestHeight) + if height < firstHeight { + return nil, fmt.Errorf("height %d not indexed, indexed range: [%d-%d], %w", height, firstHeight, latestHeight, storage.ErrHeightNotIndexed) + } + key := newLookupKey(height, reg) + return s.lookupRegister(key.Bytes()) +} + +func (s *Registers) lookupRegister(key []byte) (flow.RegisterValue, error) { + iter, err := s.db.NewIter(&pebble.IterOptions{ + UseL6Filters: true, + }) + if err != nil { + return nil, err + } + + defer iter.Close() + + ok := iter.SeekPrefixGE(key) + if !ok { + // no such register found + return nil, storage.ErrNotFound + } + + binaryValue, err := iter.ValueAndErr() + if err != nil { + return nil, fmt.Errorf("failed to get value: %w", err) + } + // preventing caller from modifying the iterator's value slices + valueCopy := make([]byte, len(binaryValue)) + copy(valueCopy, binaryValue) + + return valueCopy, nil +} + +// Store sets the given entries in a batch. +// This function is expected to be called at one batch per height, sequentially. Under normal conditions, +// it should be called wth the value of height set to LatestHeight + 1 +// CAUTION: This function is not safe for concurrent use. +func (s *Registers) Store( + entries flow.RegisterEntries, + height uint64, +) error { + latestHeight := s.latestHeight.Load() + // This check is for a special case for the execution node. + // Upon restart, it may be in a state where registers are indexed in pebble for the latest height + // but the remaining execution data in badger is not, so we skip the indexing step without throwing an error + if height == latestHeight { + // already updated + return nil + } + + nextHeight := latestHeight + 1 + if height != nextHeight { + return fmt.Errorf("must store registers with the next height %v, but got %v", nextHeight, height) + } + batch := s.db.NewBatch() + defer batch.Close() + + for _, entry := range entries { + encoded := newLookupKey(height, entry.Key).Bytes() + + err := batch.Set(encoded, entry.Value, nil) + if err != nil { + return fmt.Errorf("failed to set key: %w", err) + } + } + // increment height and commit + err := batch.Set(latestHeightKey, encodedUint64(height), nil) + if err != nil { + return fmt.Errorf("failed to update latest height %d", height) + } + err = batch.Commit(pebble.Sync) + if err != nil { + return fmt.Errorf("failed to commit batch: %w", err) + } + + s.latestHeight.Store(height) + + return nil +} + +// LatestHeight Gets the latest height of complete registers available +func (s *Registers) LatestHeight() uint64 { + return s.latestHeight.Load() +} + +// FirstHeight first indexed height found in the store, typically root block for the spork +func (s *Registers) FirstHeight() uint64 { + return s.calculateFirstHeight(s.LatestHeight()) +} + +// calculateFirstHeight calculates the first indexed height that is stored in the register index, based on the +// latest height and the configured pruning threshold. If the latest height is below the pruning threshold, the +// first indexed height will be the same as the initial height when the store was initialized. If the pruning +// threshold has been exceeded, the first indexed height is adjusted accordingly. +// +// Parameters: +// - latestHeight: the most recent height of complete registers available. +// +// Returns: +// - The first indexed height, either as the initialized height or adjusted for pruning. +func (s *Registers) calculateFirstHeight(latestHeight uint64) uint64 { + if latestHeight < s.pruneThreshold { + return s.firstHeight + } + + pruneHeight := latestHeight - s.pruneThreshold + if pruneHeight < s.firstHeight { + return s.firstHeight + } + + return pruneHeight +} + +func firstStoredHeight(db *pebble.DB) (uint64, error) { + return heightLookup(db, firstHeightKey) +} + +func latestStoredHeight(db *pebble.DB) (uint64, error) { + return heightLookup(db, latestHeightKey) +} + +func heightLookup(db *pebble.DB, key []byte) (uint64, error) { + res, closer, err := db.Get(key) + if err != nil { + return 0, convertNotFoundError(err) + } + defer closer.Close() + return binary.BigEndian.Uint64(res), nil +} + +// convert pebble NotFound error to storage NotFound error +func convertNotFoundError(err error) error { + if errors.Is(err, pebble.ErrNotFound) { + return storage.ErrNotFound + } + return err +} diff --git a/storage/pebble/registers/comparer.go b/storage/pebble/registers/comparer.go new file mode 100644 index 00000000000..973bd9fe8ab --- /dev/null +++ b/storage/pebble/registers/comparer.go @@ -0,0 +1,22 @@ +package registers + +import "github.com/cockroachdb/pebble/v2" + +const ( + // Size of the block height encoded in the key. + HeightSuffixLen = 8 +) + +// NewMVCCComparer creates a new comparer with a +// custom Split function that separates the height from the rest of the key. +// +// This is needed for SeekPrefixGE to work. +func NewMVCCComparer() *pebble.Comparer { + comparer := *pebble.DefaultComparer + comparer.Split = func(a []byte) int { + return len(a) - HeightSuffixLen + } + comparer.Name = "flow.MVCCComparer" + + return &comparer +} diff --git a/storage/pebble/registers/comparer_test.go b/storage/pebble/registers/comparer_test.go new file mode 100644 index 00000000000..9781ad6b745 --- /dev/null +++ b/storage/pebble/registers/comparer_test.go @@ -0,0 +1,36 @@ +package registers + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func Test_NewMVCCComparer_Split(t *testing.T) { + t.Parallel() + + comparer := NewMVCCComparer() + + tests := []struct { + name string + arg []byte + want int + }{ + {name: "nil", arg: nil, want: -HeightSuffixLen}, + {name: "empty", arg: []byte(""), want: -HeightSuffixLen}, + {name: "edge0", arg: []byte("1234567"), want: -1}, + {name: "edge1", arg: []byte("12345678"), want: 0}, + {name: "edge2", arg: []byte("123456789"), want: 1}, + {name: "split", arg: []byte("1234567890"), want: 2}, + } + + for _, tt := range tests { + tt := tt + + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + + require.Equal(t, tt.want, comparer.Split(tt.arg)) + }) + } +} diff --git a/storage/pebble/registers_cache.go b/storage/pebble/registers_cache.go new file mode 100644 index 00000000000..89c31aeefe9 --- /dev/null +++ b/storage/pebble/registers_cache.go @@ -0,0 +1,62 @@ +package pebble + +import ( + "fmt" + + "github.com/pkg/errors" + + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module" + "github.com/onflow/flow-go/storage" +) + +const ( + registerResourceName = "registers" +) + +type RegistersCache struct { + *Registers + cache *ReadCache +} + +var _ storage.RegisterIndex = (*RegistersCache)(nil) + +// NewRegistersCache wraps a read cache around Get requests to a underlying Registers object. +func NewRegistersCache(registers *Registers, cacheType CacheType, size uint, metrics module.CacheMetrics) (*RegistersCache, error) { + if size == 0 { + return nil, errors.New("cache size cannot be 0") + } + + cache, err := newReadCache( + metrics, + registerResourceName, + cacheType, + size, + func(key string) (flow.RegisterValue, error) { + return registers.lookupRegister([]byte(key)) + }, + ) + if err != nil { + return nil, fmt.Errorf("could not create cache: %w", err) + } + + return &RegistersCache{ + Registers: registers, + cache: cache, + }, nil +} + +// Get returns the most recent updated payload for the given RegisterID. +// "most recent" means the updates happens most recent up the given height. +// +// For example, if there are 2 values stored for register A at height 6 and 11, then +// GetPayload(13, A) would return the value at height 11. +// +// - storage.ErrNotFound if no register values are found +// - storage.ErrHeightNotIndexed if the requested height is out of the range of stored heights +func (c *RegistersCache) Get( + reg flow.RegisterID, + height uint64, +) (flow.RegisterValue, error) { + return c.cache.Get(newLookupKey(height, reg).String()) +} diff --git a/storage/pebble/registers_test.go b/storage/pebble/registers_test.go new file mode 100644 index 00000000000..58899e72084 --- /dev/null +++ b/storage/pebble/registers_test.go @@ -0,0 +1,379 @@ +package pebble + +import ( + "bytes" + "fmt" + "math/rand" + "os" + "path" + "strconv" + "testing" + + "github.com/cockroachdb/pebble/v2" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/storage" + "github.com/onflow/flow-go/storage/pebble/registers" + "github.com/onflow/flow-go/utils/unittest" +) + +// TestRegisters_Initialize +func TestRegisters_Initialize(t *testing.T) { + t.Parallel() + p, dir := unittest.TempPebbleDBWithOpts(t, nil) + // fail on blank database without FirstHeight and LastHeight set + _, err := NewRegisters(p, PruningDisabled) + require.Error(t, err) + // verify the error type + require.ErrorIs(t, err, storage.ErrNotBootstrapped) + err = os.RemoveAll(dir) + require.NoError(t, err) +} + +// TestRegisters_Get tests the expected Get function behavior on a single height +func TestRegisters_Get(t *testing.T) { + t.Parallel() + height1 := uint64(1) + RunWithRegistersStorageAtHeight1(t, func(r *Registers) { + // invalid keys return correct error type + invalidKey := flow.RegisterID{Owner: "invalid", Key: "invalid"} + _, err := r.Get(invalidKey, height1) + require.ErrorIs(t, err, storage.ErrNotFound) + + // insert new data + height2 := uint64(2) + key1 := flow.RegisterID{Owner: "owner", Key: "key1"} + expectedValue1 := []byte("value1") + entries := flow.RegisterEntries{ + {Key: key1, Value: expectedValue1}, + } + + err = r.Store(entries, height2) + require.NoError(t, err) + + // happy path + value1, err := r.Get(key1, height2) + require.NoError(t, err) + require.Equal(t, expectedValue1, value1) + + // out of range + beforeFirstHeight := uint64(0) + _, err = r.Get(key1, beforeFirstHeight) + require.ErrorIs(t, err, storage.ErrHeightNotIndexed) + afterLatestHeight := uint64(3) + _, err = r.Get(key1, afterLatestHeight) + require.ErrorIs(t, err, storage.ErrHeightNotIndexed) + }) +} + +// TestRegisters_Store tests the expected store behaviour on a single height +func TestRegisters_Store(t *testing.T) { + t.Parallel() + RunWithRegistersStorageAtHeight1(t, func(r *Registers) { + // insert new data + key1 := flow.RegisterID{Owner: "owner", Key: "key1"} + expectedValue1 := []byte("value1") + entries := flow.RegisterEntries{ + {Key: key1, Value: expectedValue1}, + } + height2 := uint64(2) + err := r.Store(entries, height2) + require.NoError(t, err) + + // idempotent at same height + err = r.Store(entries, height2) + require.NoError(t, err) + + // out of range + height4 := uint64(4) + err = r.Store(entries, height4) + require.Error(t, err) + + height1 := uint64(1) + err = r.Store(entries, height1) + require.Error(t, err) + + }) +} + +// TestRegisters_Heights tests the expected store behaviour on a single height +func TestRegisters_Heights(t *testing.T) { + t.Parallel() + RunWithRegistersStorageAtHeight1(t, func(r *Registers) { + // first and latest heights are the same + firstHeight := r.FirstHeight() + latestHeight := r.LatestHeight() + require.Equal(t, firstHeight, latestHeight) + // insert new data + key1 := flow.RegisterID{Owner: "owner", Key: "key1"} + expectedValue1 := []byte("value1") + entries := flow.RegisterEntries{ + {Key: key1, Value: expectedValue1}, + } + height2 := uint64(2) + err := r.Store(entries, height2) + require.NoError(t, err) + + firstHeight2 := r.FirstHeight() + latestHeight2 := r.LatestHeight() + + // new latest height + require.Equal(t, latestHeight2, height2) + + // same first height + require.Equal(t, firstHeight, firstHeight2) + }) +} + +// TestRegisters_Store_RoundTrip tests the round trip of a payload storage. +func TestRegisters_Store_RoundTrip(t *testing.T) { + t.Parallel() + minHeight := uint64(2) + RunWithRegistersStorageAtInitialHeights(t, minHeight, minHeight, func(r *Registers) { + key1 := flow.RegisterID{Owner: "owner", Key: "key1"} + expectedValue1 := []byte("value1") + entries := flow.RegisterEntries{ + {Key: key1, Value: expectedValue1}, + } + testHeight := minHeight + 1 + // happy path + err := r.Store(entries, testHeight) + require.NoError(t, err) + + // lookup with exact height returns the correct value + value1, err := r.Get(key1, testHeight) + require.NoError(t, err) + require.Equal(t, expectedValue1, value1) + + value11, err := r.Get(key1, testHeight) + require.NoError(t, err) + require.Equal(t, expectedValue1, value11) + }) +} + +// TestRegisters_Store_Versioning tests the scan functionality for the most recent value +func TestRegisters_Store_Versioning(t *testing.T) { + t.Parallel() + RunWithRegistersStorageAtHeight1(t, func(r *Registers) { + // Save key11 is a prefix of the key1, and we save it first. + // It should be invisible for our prefix scan. + key11 := flow.RegisterID{Owner: "owner", Key: "key11"} + expectedValue11 := []byte("value11") + + key1 := flow.RegisterID{Owner: "owner", Key: "key1"} + expectedValue1 := []byte("value1") + entries1 := flow.RegisterEntries{ + {Key: key1, Value: expectedValue1}, + {Key: key11, Value: expectedValue11}, + } + + height2 := uint64(2) + + // check increment in height after Store() + err := r.Store(entries1, height2) + require.NoError(t, err) + + // Add new version of key1. + height3 := uint64(3) + expectedValue1ge3 := []byte("value1ge3") + entries3 := flow.RegisterEntries{ + {Key: key1, Value: expectedValue1ge3}, + } + + // check increment in height after Store() + err = r.Store(entries3, height3) + require.NoError(t, err) + updatedHeight := r.LatestHeight() + require.Equal(t, updatedHeight, height3) + + // test old version at previous height + value1, err := r.Get(key1, height2) + require.NoError(t, err) + require.Equal(t, expectedValue1, value1) + + // test new version at new height + value1, err = r.Get(key1, height3) + require.NoError(t, err) + require.Equal(t, expectedValue1ge3, value1) + + // test unchanged key at incremented height + value11, err := r.Get(key11, height3) + require.NoError(t, err) + require.Equal(t, expectedValue11, value11) + + // make sure the key is unavailable at height 1 + _, err = r.Get(key1, uint64(1)) + require.ErrorIs(t, err, storage.ErrNotFound) + }) +} + +// TestRegisters_GetAndStoreEmptyOwner tests behavior of storing and retrieving registers with +// an empty owner value, which is used for global state variables. +func TestRegisters_GetAndStoreEmptyOwner(t *testing.T) { + t.Parallel() + height := uint64(2) + emptyOwnerKey := flow.RegisterID{Owner: "", Key: "uuid"} + zeroOwnerKey := flow.RegisterID{Owner: flow.EmptyAddress.Hex(), Key: "uuid"} + expectedValue := []byte("first value") + otherValue := []byte("other value") + + t.Run("empty owner", func(t *testing.T) { + RunWithRegistersStorageAtInitialHeights(t, 1, 1, func(r *Registers) { + // First, only set the empty Owner key, and make sure the empty value is available, + // and the zero value returns an errors + entries := flow.RegisterEntries{ + {Key: emptyOwnerKey, Value: expectedValue}, + } + + err := r.Store(entries, height) + require.NoError(t, err) + + actual, err := r.Get(emptyOwnerKey, height) + assert.NoError(t, err) + assert.Equal(t, expectedValue, actual) + + actual, err = r.Get(zeroOwnerKey, height) + assert.Error(t, err) + assert.Nil(t, actual) + + // Next, add the zero value, and make sure it is returned + entries = flow.RegisterEntries{ + {Key: zeroOwnerKey, Value: otherValue}, + } + + err = r.Store(entries, height+1) + require.NoError(t, err) + + actual, err = r.Get(zeroOwnerKey, height+1) + assert.NoError(t, err) + assert.Equal(t, otherValue, actual) + }) + }) + + t.Run("zero owner", func(t *testing.T) { + RunWithRegistersStorageAtInitialHeights(t, 1, 1, func(r *Registers) { + // First, only set the zero Owner key, and make sure the zero value is available, + // and the empty value returns an errors + entries := flow.RegisterEntries{ + {Key: zeroOwnerKey, Value: expectedValue}, + } + + err := r.Store(entries, height) + require.NoError(t, err) + + actual, err := r.Get(zeroOwnerKey, height) + assert.NoError(t, err) + assert.Equal(t, expectedValue, actual) + + actual, err = r.Get(emptyOwnerKey, height) + assert.Error(t, err) + assert.Nil(t, actual) + + // Next, add the empty value, and make sure it is returned + entries = flow.RegisterEntries{ + {Key: emptyOwnerKey, Value: otherValue}, + } + + err = r.Store(entries, height+1) + require.NoError(t, err) + + actual, err = r.Get(emptyOwnerKey, height+1) + assert.NoError(t, err) + assert.Equal(t, otherValue, actual) + }) + }) +} + +// Benchmark_PayloadStorage benchmarks the SetBatch method. +func Benchmark_PayloadStorage(b *testing.B) { + cache := pebble.NewCache(32 << 20) + defer cache.Unref() + opts := DefaultPebbleOptions(unittest.Logger(), cache, registers.NewMVCCComparer()) + + dbpath := path.Join(b.TempDir(), "benchmark1.db") + db, err := pebble.Open(dbpath, opts) + require.NoError(b, err) + s, err := NewRegisters(db, PruningDisabled) + require.NoError(b, err) + require.NotNil(b, s) + + owner := unittest.RandomAddressFixture() + batchSizeKey := flow.NewRegisterID(owner, "size") + const maxBatchSize = 1024 + var totalBatchSize int + + keyForBatchSize := func(i int) flow.RegisterID { + return flow.NewRegisterID(owner, strconv.Itoa(i)) + } + valueForHeightAndKey := func(i, j int) []byte { + return []byte(fmt.Sprintf("%d-%d", i, j)) + } + b.ResetTimer() + + // Write a random number of entries in each batch. + for i := 0; i < b.N; i++ { + b.StopTimer() + batchSize := rand.Intn(maxBatchSize) + 1 + totalBatchSize += batchSize + entries := make(flow.RegisterEntries, 1, batchSize) + entries[0] = flow.RegisterEntry{ + Key: batchSizeKey, + Value: []byte(fmt.Sprintf("%d", batchSize)), + } + for j := 1; j < batchSize; j++ { + entries = append(entries, flow.RegisterEntry{ + Key: keyForBatchSize(j), + Value: valueForHeightAndKey(i, j), + }) + } + b.StartTimer() + + err = s.Store(entries, uint64(i)) + require.NoError(b, err) + } + + b.StopTimer() + + // verify written batches + for i := 0; i < b.N; i++ { + // get number of batches written for height + batchSizeBytes, err := s.Get(batchSizeKey, uint64(i)) + require.NoError(b, err) + batchSize, err := strconv.Atoi(string(batchSizeBytes)) + require.NoError(b, err) + + // verify that all entries can be read with correct values + for j := 1; j < batchSize; j++ { + value, err := s.Get(keyForBatchSize(j), uint64(i)) + require.NoError(b, err) + require.Equal(b, valueForHeightAndKey(i, j), value) + } + + // verify that the rest of the batches either do not exist or have a previous height + for j := batchSize; j < maxBatchSize+1; j++ { + value, err := s.Get(keyForBatchSize(j), uint64(i)) + require.NoError(b, err) + + if len(value) > 0 { + ij := bytes.Split(value, []byte("-")) + + // verify that we've got a value for a previous height + height, err := strconv.Atoi(string(ij[0])) + require.NoError(b, err) + require.Lessf(b, height, i, "height: %d, j: %d", height, j) + + // verify that we've got a value corresponding to the index + index, err := strconv.Atoi(string(ij[1])) + require.NoError(b, err) + require.Equal(b, index, j) + } + } + } +} + +func RunWithRegistersStorageAtHeight1(tb testing.TB, f func(r *Registers)) { + defaultHeight := uint64(1) + RunWithRegistersStorageAtInitialHeights(tb, defaultHeight, defaultHeight, f) +} diff --git a/storage/pebble/testutil.go b/storage/pebble/testutil.go new file mode 100644 index 00000000000..6bce658bd44 --- /dev/null +++ b/storage/pebble/testutil.go @@ -0,0 +1,32 @@ +package pebble + +import ( + "testing" + + "github.com/cockroachdb/pebble/v2" + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/utils/unittest" +) + +func RunWithRegistersStorageAtInitialHeights(tb testing.TB, first uint64, latest uint64, f func(r *Registers)) { + unittest.RunWithTempDir(tb, func(dir string) { + db := NewBootstrappedRegistersWithPathForTest(tb, dir, first, latest) + r, err := NewRegisters(db, PruningDisabled) + require.NoError(tb, err) + + f(r) + + require.NoError(tb, db.Close()) + }) +} + +func NewBootstrappedRegistersWithPathForTest(tb testing.TB, dir string, first, latest uint64) *pebble.DB { + db, err := OpenRegisterPebbleDB(unittest.Logger(), dir) + require.NoError(tb, err) + + // insert initial heights to pebble + require.NoError(tb, db.Set(firstHeightKey, encodedUint64(first), nil)) + require.NoError(tb, db.Set(latestHeightKey, encodedUint64(latest), nil)) + return db +} diff --git a/storage/protocol_kv_store.go b/storage/protocol_kv_store.go new file mode 100644 index 00000000000..a87194111dd --- /dev/null +++ b/storage/protocol_kv_store.go @@ -0,0 +1,64 @@ +package storage + +import ( + "github.com/jordanschalm/lockctx" + + "github.com/onflow/flow-go/model/flow" +) + +// ProtocolKVStore persists different snapshots of key-value stores [KV-stores]. At this level, the API +// deals with versioned data blobs, each representing a Snapshot of the Protocol State. The *current* +// implementation allows to retrieve snapshots from the database (e.g. to answer external API calls) even +// for legacy protocol states whose versions are not support anymore. However, this _may_ change in the +// future, where only versioned snapshots can be retrieved that are also supported by the current software. +// TODO maybe rename to `ProtocolStateSnapshots` (?) because at this low level, we are not exposing the +// KV-store, it is just an encoded data blob +type ProtocolKVStore interface { + // BatchStore persists the KV-store snapshot in the database using the given ID as key. + // BatchStore is idempotent, i.e. it accepts repeated calls with the same pairs of (stateID, kvStore). + // Here, the ID is expected to be a collision-resistant hash of the snapshot (including the + // ProtocolStateVersion). + // + // No error is expected during normal operations. + BatchStore(rw ReaderBatchWriter, stateID flow.Identifier, data *flow.PSKeyValueStoreData) error + + // BatchIndex appends the following operation to the provided write batch: + // we extend the map from `blockID` to `stateID`, where `blockID` references the + // block that _proposes_ updated key-value store. + // BatchIndex is idempotent, i.e. it accepts repeated calls with the same pairs of (blockID , stateID). + // Per protocol convention, the block references the `stateID`. As the `blockID` is a collision-resistant hash, + // for the same `blockID`, BatchIndex will reject changing the data. + // Protocol convention: + // - Consider block B, whose ingestion might potentially lead to an updated KV store. For example, + // the KV store changes if we seal some execution results emitting specific service events. + // - For the key `blockID`, we use the identity of block B which _proposes_ this updated KV store. + // - IMPORTANT: The updated state requires confirmation by a QC and will only become active at the + // child block, _after_ validating the QC. + // + // CAUTION: To prevent data corruption, we need to guarantee atomicity of existence-check and the subsequent + // database write. Hence, we require the caller to acquire [storage.LockInsertBlock] and hold it until the + // database write has been committed. + // + // Expected error returns during normal operations: + // - [storage.ErrAlreadyExists] if a KV store for the given blockID has already been indexed + BatchIndex(lctx lockctx.Proof, rw ReaderBatchWriter, blockID flow.Identifier, stateID flow.Identifier) error + + // ByID retrieves the KV store snapshot with the given ID. + // Expected errors during normal operations: + // - storage.ErrNotFound if no snapshot with the given Identifier is known. + ByID(id flow.Identifier) (*flow.PSKeyValueStoreData, error) + + // ByBlockID retrieves the kv-store snapshot that the block with the given ID proposes. + // CAUTION: this store snapshot requires confirmation by a QC and will only become active at the child block, + // _after_ validating the QC. Protocol convention: + // - Consider block B, whose ingestion might potentially lead to an updated KV store state. + // For example, the state changes if we seal some execution results emitting specific service events. + // - For the key `blockID`, we use the identity of block B which _proposes_ this updated KV store. As value, + // the hash of the resulting state at the end of processing B is to be used. + // - CAUTION: The updated state requires confirmation by a QC and will only become active at the child block, + // _after_ validating the QC. + // + // Expected errors during normal operations: + // - storage.ErrNotFound if no snapshot has been indexed for the given block. + ByBlockID(blockID flow.Identifier) (*flow.PSKeyValueStoreData, error) +} diff --git a/storage/qcs.go b/storage/qcs.go index fab51e125ea..7a276d2f426 100644 --- a/storage/qcs.go +++ b/storage/qcs.go @@ -1,8 +1,9 @@ package storage import ( + "github.com/jordanschalm/lockctx" + "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/storage/badger/transaction" ) // QuorumCertificates represents storage for Quorum Certificates. @@ -11,9 +12,16 @@ import ( // In the example below, `QC_1` is indexed by `Block_1.ID()` // Block_1 <- Block_2(QC_1) type QuorumCertificates interface { - // StoreTx stores a Quorum Certificate as part of database transaction QC is indexed by QC.BlockID. - // * storage.ErrAlreadyExists if any QC for blockID is already stored - StoreTx(qc *flow.QuorumCertificate) func(*transaction.Tx) error + // BatchStore stores a Quorum Certificate as part of database batch update. QC is indexed by QC.BlockID. + // + // Note: For the same block, different QCs can easily be constructed by selecting different sub-sets of the received votes + // (provided more than the minimal number of consensus participants voted, which is typically the case). In most cases, it + // is only important that a block has been certified, but irrelevant who specifically contributed to the QC. Therefore, we + // only store the first QC. + // + // If *any* quorum certificate for QC.BlockID has already been stored, a `storage.ErrAlreadyExists` is returned (typically benign). + BatchStore(lockctx.Proof, ReaderBatchWriter, *flow.QuorumCertificate) error + // ByBlockID returns QC that certifies block referred by blockID. // * storage.ErrNotFound if no QC for blockID doesn't exist. ByBlockID(blockID flow.Identifier) (*flow.QuorumCertificate, error) diff --git a/storage/receipts.go b/storage/receipts.go index 1aa95c6368c..14c6a9d1148 100644 --- a/storage/receipts.go +++ b/storage/receipts.go @@ -1,8 +1,8 @@ -// (c) 2019 Dapper Labs - ALL RIGHTS RESERVED - package storage import ( + "github.com/jordanschalm/lockctx" + "github.com/onflow/flow-go/model/flow" ) @@ -16,13 +16,15 @@ type ExecutionReceipts interface { Store(receipt *flow.ExecutionReceipt) error // BatchStore stores an execution receipt inside given batch - BatchStore(receipt *flow.ExecutionReceipt, batch BatchStorage) error + BatchStore(receipt *flow.ExecutionReceipt, batch ReaderBatchWriter) error // ByID retrieves an execution receipt by its ID. ByID(receiptID flow.Identifier) (*flow.ExecutionReceipt, error) // ByBlockID retrieves all known execution receipts for the given block // (from any Execution Node). + // + // No errors are expected errors during normal operations. ByBlockID(blockID flow.Identifier) (flow.ExecutionReceiptList, error) } @@ -30,23 +32,20 @@ type ExecutionReceipts interface { // them. Instead, it includes the "My" in the method name in order to highlight the notion // of "MY execution receipt", from the viewpoint of an individual Execution Node. type MyExecutionReceipts interface { - // StoreMyReceipt stores the receipt and marks it as mine (trusted). My - // receipts are indexed by the block whose result they compute. Currently, - // we only support indexing a _single_ receipt per block. Attempting to - // store conflicting receipts for the same block will error. - StoreMyReceipt(receipt *flow.ExecutionReceipt) error - // BatchStoreMyReceipt stores blockID-to-my-receipt index entry keyed by blockID in a provided batch. - // No errors are expected during normal operation + // // If entity fails marshalling, the error is wrapped in a generic error and returned. - // If Badger unexpectedly fails to process the request, the error is wrapped in a generic error and returned. - BatchStoreMyReceipt(receipt *flow.ExecutionReceipt, batch BatchStorage) error + // If database unexpectedly fails to process the request, the error is wrapped in a generic error and returned. + // + // Expected error returns during *normal* operations: + // - `storage.ErrDataMismatch` if a *different* receipt has already been indexed for the same block + BatchStoreMyReceipt(lctx lockctx.Proof, receipt *flow.ExecutionReceipt, batch ReaderBatchWriter) error // MyReceipt retrieves my receipt for the given block. MyReceipt(blockID flow.Identifier) (*flow.ExecutionReceipt, error) // BatchRemoveIndexByBlockID removes blockID-to-my-execution-receipt index entry keyed by a blockID in a provided batch // No errors are expected during normal operation, even if no entries are matched. - // If Badger unexpectedly fails to process the request, the error is wrapped in a generic error and returned. - BatchRemoveIndexByBlockID(blockID flow.Identifier, batch BatchStorage) error + // If database unexpectedly fails to process the request, the error is wrapped in a generic error and returned. + BatchRemoveIndexByBlockID(blockID flow.Identifier, batch ReaderBatchWriter) error } diff --git a/storage/registers.go b/storage/registers.go new file mode 100644 index 00000000000..1eb56fcef4a --- /dev/null +++ b/storage/registers.go @@ -0,0 +1,37 @@ +package storage + +import ( + "github.com/onflow/flow-go/model/flow" +) + +// RegisterIndexReader defines readonly methods for the register index. +type RegisterIndexReader interface { + // Get register by the register ID at a given block height. + // + // If the register at the given height was not indexed, returns the highest + // height the register was indexed at. + // Expected errors: + // - storage.ErrHeightNotIndexed if the given height was not indexed yet or lower than the first indexed height. + // - storage.ErrNotFound if the given height is indexed, but the register does not exist. + Get(ID flow.RegisterID, height uint64) (flow.RegisterValue, error) + + // LatestHeight returns the latest indexed height. + LatestHeight() uint64 + + // FirstHeight at which we started to index. Returns the first indexed height found in the store. + FirstHeight() uint64 +} + +// RegisterIndex defines methods for the register index. +type RegisterIndex interface { + RegisterIndexReader + + // Store batch of register entries at the provided block height. + // + // The provided height must either be one higher than the current height or the same to ensure idempotency, + // otherwise and error is returned. If the height is not within those bounds there is either a bug + // or state corruption. + // + // No errors are expected during normal operation. + Store(entries flow.RegisterEntries, height uint64) error +} diff --git a/storage/results.go b/storage/results.go index 39fd4d810e1..a943866370e 100644 --- a/storage/results.go +++ b/storage/results.go @@ -1,25 +1,25 @@ -// (c) 2019 Dapper Labs - ALL RIGHTS RESERVED - package storage import ( "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/storage/badger/transaction" ) +type ExecutionResultsReader interface { + // ByID retrieves an execution result by its ID. Returns `ErrNotFound` if `resultID` is unknown. + ByID(resultID flow.Identifier) (*flow.ExecutionResult, error) + + // ByBlockID retrieves an execution result by block ID. + ByBlockID(blockID flow.Identifier) (*flow.ExecutionResult, error) +} + type ExecutionResults interface { + ExecutionResultsReader // Store stores an execution result. Store(result *flow.ExecutionResult) error // BatchStore stores an execution result in a given batch - BatchStore(result *flow.ExecutionResult, batch BatchStorage) error - - // ByID retrieves an execution result by its ID. - ByID(resultID flow.Identifier) (*flow.ExecutionResult, error) - - // ByIDTx retrieves an execution result by its ID in the context of the given transaction - ByIDTx(resultID flow.Identifier) func(*transaction.Tx) (*flow.ExecutionResult, error) + BatchStore(result *flow.ExecutionResult, batch ReaderBatchWriter) error // Index indexes an execution result by block ID. Index(blockID flow.Identifier, resultID flow.Identifier) error @@ -28,13 +28,10 @@ type ExecutionResults interface { ForceIndex(blockID flow.Identifier, resultID flow.Identifier) error // BatchIndex indexes an execution result by block ID in a given batch - BatchIndex(blockID flow.Identifier, resultID flow.Identifier, batch BatchStorage) error - - // ByBlockID retrieves an execution result by block ID. - ByBlockID(blockID flow.Identifier) (*flow.ExecutionResult, error) + BatchIndex(blockID flow.Identifier, resultID flow.Identifier, batch ReaderBatchWriter) error // BatchRemoveIndexByBlockID removes blockID-to-executionResultID index entries keyed by blockID in a provided batch. // No errors are expected during normal operation, even if no entries are matched. // If Badger unexpectedly fails to process the request, the error is wrapped in a generic error and returned. - BatchRemoveIndexByBlockID(blockID flow.Identifier, batch BatchStorage) error + BatchRemoveIndexByBlockID(blockID flow.Identifier, batch ReaderBatchWriter) error } diff --git a/storage/seals.go b/storage/seals.go index c394098d30d..43fb783ea72 100644 --- a/storage/seals.go +++ b/storage/seals.go @@ -1,5 +1,3 @@ -// (c) 2019 Dapper Labs - ALL RIGHTS RESERVED - package storage import ( diff --git a/storage/store/approvals.go b/storage/store/approvals.go new file mode 100644 index 00000000000..e53ccd7e8b8 --- /dev/null +++ b/storage/store/approvals.go @@ -0,0 +1,107 @@ +package store + +import ( + "fmt" + + "github.com/jordanschalm/lockctx" + + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module" + "github.com/onflow/flow-go/module/metrics" + "github.com/onflow/flow-go/storage" + "github.com/onflow/flow-go/storage/operation" +) + +// ResultApprovals implements persistent storage for result approvals. +// +// CAUTION suitable only for _Verification Nodes_ for persisting their _own_ approvals! +// - In general, the Flow protocol requires multiple approvals for the same chunk from different +// verification nodes. In other words, there are multiple different approvals for the same chunk. +// - Internally, ResultApprovals populates an index from Executed Chunk ➜ ResultApproval. This is +// *only safe* for Verification Nodes when tracking their own approvals (for the same ExecutionResult, +// a Verifier will always produce the same approval) +type ResultApprovals struct { + db storage.DB + cache *Cache[flow.Identifier, *flow.ResultApproval] + lockManager lockctx.Manager +} + +var _ storage.ResultApprovals = (*ResultApprovals)(nil) + +func NewResultApprovals(collector module.CacheMetrics, db storage.DB, lockManager lockctx.Manager) *ResultApprovals { + retrieve := func(r storage.Reader, approvalID flow.Identifier) (*flow.ResultApproval, error) { + var approval flow.ResultApproval + err := operation.RetrieveResultApproval(r, approvalID, &approval) + return &approval, err + } + + return &ResultApprovals{ + lockManager: lockManager, + db: db, + cache: newCache(collector, metrics.ResourceResultApprovals, + withLimit[flow.Identifier, *flow.ResultApproval](flow.DefaultTransactionExpiry+100), + withRetrieve(retrieve)), + } +} + +// StoreMyApproval returns a functor, whose execution +// - will store the given ResultApproval +// - and index it by result ID and chunk index. +// - requires storage.LockIndexResultApproval lock to be held by the caller +// +// The functor's expected error returns during normal operation are: +// - `storage.ErrDataMismatch` if a *different* approval for the same key pair (ExecutionResultID, chunk index) is already indexed +// +// CAUTION: the Flow protocol requires multiple approvals for the same chunk from different verification +// nodes. In other words, there are multiple different approvals for the same chunk. Therefore, the index +// Executed Chunk ➜ ResultApproval ID (populated here) is *only safe* to be used by Verification Nodes +// for tracking their own approvals. +// +// For the same ExecutionResult, a Verifier will always produce the same approval. Therefore, this operation +// is idempotent, i.e. repeated calls with the *same inputs* are equivalent to just calling the method once; +// still the method succeeds on each call. However, when attempting to index *different* ResultApproval IDs +// for the same key (resultID, chunkIndex) this method returns an exception, as this should never happen for +// a correct Verification Node indexing its own approvals. +// It returns a functor so that some computation (such as computing approval ID) can be done +// before acquiring the lock. +func (r *ResultApprovals) StoreMyApproval(approval *flow.ResultApproval) func(lctx lockctx.Proof) error { + // pre-compute the approval ID and encoded data to be stored + // db operation is deferred until the returned function is called + storing := operation.InsertAndIndexResultApproval(approval) + + return func(lctx lockctx.Proof) error { + if !lctx.HoldsLock(storage.LockIndexResultApproval) { + return fmt.Errorf("missing lock for index result approval") + } + + return r.db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + storage.OnCommitSucceed(rw, func() { + // the success callback is called after the lock is released, so + // the id computation here would not increase the lock contention + r.cache.Insert(approval.ID(), approval) + }) + return storing(lctx, rw) + }) + } +} + +// ByID retrieves a ResultApproval by its ID +func (r *ResultApprovals) ByID(approvalID flow.Identifier) (*flow.ResultApproval, error) { + val, err := r.cache.Get(r.db.Reader(), approvalID) + if err != nil { + return nil, err + } + return val, nil +} + +// ByChunk retrieves a ResultApproval by result ID and chunk index. The +// ResultApprovals store is only used within a verification node, where it is +// assumed that there is never more than one approval per chunk. +func (r *ResultApprovals) ByChunk(resultID flow.Identifier, chunkIndex uint64) (*flow.ResultApproval, error) { + var approvalID flow.Identifier + err := operation.LookupResultApproval(r.db.Reader(), resultID, chunkIndex, &approvalID) + if err != nil { + return nil, fmt.Errorf("could not lookup result approval ID: %w", err) + } + return r.ByID(approvalID) +} diff --git a/storage/store/approvals_test.go b/storage/store/approvals_test.go new file mode 100644 index 00000000000..1eac3a164fb --- /dev/null +++ b/storage/store/approvals_test.go @@ -0,0 +1,160 @@ +package store_test + +import ( + "errors" + "sync" + "testing" + + "github.com/jordanschalm/lockctx" + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module/metrics" + "github.com/onflow/flow-go/storage" + "github.com/onflow/flow-go/storage/operation/dbtest" + "github.com/onflow/flow-go/storage/store" + "github.com/onflow/flow-go/utils/unittest" +) + +func TestApprovalStoreAndRetrieve(t *testing.T) { + dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { + metrics := metrics.NewNoopCollector() + lockManager := storage.NewTestingLockManager() + store := store.NewResultApprovals(metrics, db, lockManager) + + // create the deferred database operation to store `approval`; we deliberately + // do this outside of the lock to confirm that the lock is not required for + // creating the operation -- only for executing the storage write further below + approval := unittest.ResultApprovalFixture() + storing := store.StoreMyApproval(approval) + err := unittest.WithLock(t, lockManager, storage.LockIndexResultApproval, func(lctx lockctx.Context) error { + return storing(lctx) + }) + require.NoError(t, err) + + // retrieve entire approval by its ID + byID, err := store.ByID(approval.ID()) + require.NoError(t, err) + require.Equal(t, approval, byID) + + // retrieve approval by pair (executed result ID, chunk index) + byChunk, err := store.ByChunk(approval.Body.ExecutionResultID, approval.Body.ChunkIndex) + require.NoError(t, err) + require.Equal(t, approval, byChunk) + }) +} + +func TestApprovalStoreTwice(t *testing.T) { + dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { + metrics := metrics.NewNoopCollector() + lockManager := storage.NewTestingLockManager() + store := store.NewResultApprovals(metrics, db, lockManager) + + // create the deferred database operation to store `approval`; we deliberately + // do this outside of the lock to confirm that the lock is not required for + // creating the operation -- only for executing the storage write further below + approval := unittest.ResultApprovalFixture() + storing := store.StoreMyApproval(approval) + err := unittest.WithLock(t, lockManager, storage.LockIndexResultApproval, func(lctx lockctx.Context) error { + return storing(lctx) + }) + require.NoError(t, err) + + err = unittest.WithLock(t, lockManager, storage.LockIndexResultApproval, func(lctx lockctx.Context) error { + return storing(lctx) // repeated storage of same approval should be no-op + }) + require.NoError(t, err) + }) +} + +func TestApprovalStoreTwoDifferentApprovalsShouldFail(t *testing.T) { + dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { + metrics := metrics.NewNoopCollector() + lockManager := storage.NewTestingLockManager() + store := store.NewResultApprovals(metrics, db, lockManager) + + approval1, approval2 := twoApprovalsForTheSameResult(t) + + storing := store.StoreMyApproval(approval1) + err := unittest.WithLock(t, lockManager, storage.LockIndexResultApproval, func(lctx lockctx.Context) error { + return storing(lctx) + }) + require.NoError(t, err) + + // we can store a different approval, but we can't index a different + // approval for the same chunk. + storing2 := store.StoreMyApproval(approval2) + err = unittest.WithLock(t, lockManager, storage.LockIndexResultApproval, func(lctx lockctx.Context) error { + return storing2(lctx) + }) + require.ErrorIs(t, err, storage.ErrDataMismatch) + }) +} + +// verify that storing and indexing two conflicting approvals concurrently should be impossible; +// we expect that one operations succeeds, the other one should fail +func TestApprovalStoreTwoDifferentApprovalsConcurrently(t *testing.T) { + dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { + metrics := metrics.NewNoopCollector() + lockManager := storage.NewTestingLockManager() + store := store.NewResultApprovals(metrics, db, lockManager) + + approval1, approval2 := twoApprovalsForTheSameResult(t) + + var startSignal sync.WaitGroup // goroutines attempting store operations will wait for this signal to start concurrently + startSignal.Add(1) // expecting one signal from the main thread to start both goroutines + var doneSinal sync.WaitGroup // the main thread will wait on this for goroutines attempting store operations to finish + doneSinal.Add(2) // expecting two goroutines to signal finish + + var firstIndexErr, secondIndexErr error + + // First goroutine stores and indexes the first approval. + go func() { + storing := store.StoreMyApproval(approval1) + + startSignal.Wait() + err := unittest.WithLock(t, lockManager, storage.LockIndexResultApproval, func(lctx lockctx.Context) error { + firstIndexErr = storing(lctx) + return nil + }) + require.NoError(t, err) + doneSinal.Done() + }() + + // Second goroutine stores and tries to index the second approval for the same chunk. + go func() { + storing := store.StoreMyApproval(approval2) + + startSignal.Wait() + err := unittest.WithLock(t, lockManager, storage.LockIndexResultApproval, func(lctx lockctx.Context) error { + secondIndexErr = storing(lctx) + return nil + }) + require.NoError(t, err) + doneSinal.Done() + }() + + startSignal.Done() // start both goroutines + doneSinal.Wait() // wait for both goroutines to finish + + // Check that one of the Index operations succeeded and the other failed + if firstIndexErr == nil { + require.Error(t, secondIndexErr) + require.True(t, errors.Is(secondIndexErr, storage.ErrDataMismatch)) + } else { + require.NoError(t, secondIndexErr) + require.True(t, errors.Is(firstIndexErr, storage.ErrDataMismatch)) + } + }) +} + +func twoApprovalsForTheSameResult(t *testing.T) (*flow.ResultApproval, *flow.ResultApproval) { + approval1 := unittest.ResultApprovalFixture() + approval2 := unittest.ResultApprovalFixture() + // have two entirely different approvals, nor modify the second to reference the same result and chunk as the first + approval2.Body.ChunkIndex = approval1.Body.ChunkIndex + approval2.Body.ExecutionResultID = approval1.Body.ExecutionResultID + // sanity check: make sure the two approvals are different + require.NotEqual(t, approval1.ID(), approval2.ID(), "expected two different approvals, but got the same ID") + return approval1, approval2 +} diff --git a/storage/store/blocks.go b/storage/store/blocks.go new file mode 100644 index 00000000000..c34a0cc53ea --- /dev/null +++ b/storage/store/blocks.go @@ -0,0 +1,238 @@ +package store + +import ( + "fmt" + + "github.com/jordanschalm/lockctx" + + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/storage" + "github.com/onflow/flow-go/storage/operation" +) + +// Blocks implements a simple block storage around a badger DB. +type Blocks struct { + db storage.DB + headers *Headers + payloads *Payloads +} + +var _ storage.Blocks = (*Blocks)(nil) + +// NewBlocks ... +func NewBlocks(db storage.DB, headers *Headers, payloads *Payloads) *Blocks { + b := &Blocks{ + db: db, + headers: headers, + payloads: payloads, + } + return b +} + +// BatchStore stores a valid block in a batch. +// +// Expected errors during normal operations: +// - [storage.ErrAlreadyExists] if some block with the same ID has already been stored +func (b *Blocks) BatchStore(lctx lockctx.Proof, rw storage.ReaderBatchWriter, proposal *flow.Proposal) error { + blockID := proposal.Block.ID() + err := b.headers.storeTx(lctx, rw, blockID, proposal.Block.ToHeader(), proposal.ProposerSigData) + if err != nil { + return fmt.Errorf("could not store header %v: %w", blockID, err) + } + err = b.payloads.storeTx(lctx, rw, blockID, &proposal.Block.Payload) + if err != nil { + return fmt.Errorf("could not store payload: %w", err) + } + return nil +} + +// retrieve returns the block with the given hash. It is available for +// finalized and pending blocks. +// Expected errors during normal operations: +// - storage.ErrNotFound if no block is found +func (b *Blocks) retrieve(blockID flow.Identifier) (*flow.Block, error) { + header, err := b.headers.retrieveTx(blockID) + if err != nil { + return nil, fmt.Errorf("could not retrieve header: %w", err) + } + payload, err := b.payloads.retrieveTx(blockID) + if err != nil { + return nil, fmt.Errorf("could not retrieve payload: %w", err) + } + untrustedBlock := flow.UntrustedBlock{ + HeaderBody: header.HeaderBody, + Payload: *payload, + } + var block *flow.Block + if header.ContainsParentQC() { + block, err = flow.NewBlock(untrustedBlock) + if err != nil { + return nil, fmt.Errorf("could not construct block: %w", err) + } + } else { + block, err = flow.NewRootBlock(untrustedBlock) + if err != nil { + return nil, fmt.Errorf("could not construct root block: %w", err) + } + } + return block, nil +} + +// retrieveProposal returns the proposal with the given block ID. +// It is available for finalized and pending blocks. +// Expected errors during normal operations: +// - storage.ErrNotFound if no block is found +func (b *Blocks) retrieveProposal(blockID flow.Identifier) (*flow.Proposal, error) { + block, err := b.retrieve(blockID) + if err != nil { + return nil, fmt.Errorf("could not retrieve block body: %w", err) + } + sig, err := b.headers.sigs.retrieveTx(blockID) + if err != nil { + return nil, fmt.Errorf("could not retrieve proposer signature: %w", err) + } + + untrustedProposal := flow.UntrustedProposal{ + Block: *block, + ProposerSigData: sig, + } + var proposal *flow.Proposal + if block.ContainsParentQC() { + proposal, err = flow.NewProposal(untrustedProposal) + if err != nil { + return nil, fmt.Errorf("could not construct proposal: %w", err) + } + } else { + proposal, err = flow.NewRootProposal(untrustedProposal) + if err != nil { + return nil, fmt.Errorf("could not construct root proposal: %w", err) + } + } + + return proposal, nil +} + +// ByID returns the block with the given hash. It is available for all incorporated blocks (validated blocks +// that have been appended to any of the known forks) no matter whether the block has been finalized or not. +// +// Error returns: +// - storage.ErrNotFound if no block with the corresponding ID was found +// - generic error in case of unexpected failure from the database layer, or failure +// to decode an existing database value +func (b *Blocks) ByID(blockID flow.Identifier) (*flow.Block, error) { + return b.retrieve(blockID) +} + +// ProposalByID returns the block with the given ID, along with the proposer's signature on it. +// It is available for all incorporated blocks (validated blocks that have been appended to any +// of the known forks) no matter whether the block has been finalized or not. +// +// Error returns: +// - storage.ErrNotFound if no block with the corresponding ID was found +// - generic error in case of unexpected failure from the database layer, or failure +// to decode an existing database value +func (b *Blocks) ProposalByID(blockID flow.Identifier) (*flow.Proposal, error) { + return b.retrieveProposal(blockID) +} + +// ByView returns the block with the given view. It is only available for certified blocks. +// certified blocks are the blocks that have received QC. Hotstuff guarantees that for each view, +// at most one block is certified. Hence, the return value of `ByView` is guaranteed to be unique +// even for non-finalized blocks. +// Expected errors during normal operations: +// - `storage.ErrNotFound` if no certified block is known at given view. +func (b *Blocks) ByView(view uint64) (*flow.Block, error) { + blockID, err := b.headers.BlockIDByView(view) + if err != nil { + return nil, err + } + return b.ByID(blockID) +} + +// ProposalByView returns the block proposal with the given view. It is only available for certified blocks. +// +// Expected errors during normal operations: +// - `storage.ErrNotFound` if no certified block is known at given view. +func (b *Blocks) ProposalByView(view uint64) (*flow.Proposal, error) { + blockID, err := b.headers.BlockIDByView(view) + if err != nil { + return nil, err + } + return b.retrieveProposal(blockID) +} + +// ByHeight returns the block at the given height. It is only available +// for finalized blocks. +// +// Error returns: +// - storage.ErrNotFound if no block for the corresponding height was found +// - generic error in case of unexpected failure from the database layer, or failure +// to decode an existing database value +func (b *Blocks) ByHeight(height uint64) (*flow.Block, error) { + blockID, err := b.headers.retrieveIdByHeightTx(height) + if err != nil { + return nil, err + } + return b.retrieve(blockID) +} + +// ProposalByHeight returns the block at the given height, along with the proposer's +// signature on it. It is only available for finalized blocks. +// +// Error returns: +// - storage.ErrNotFound if no block proposal for the corresponding height was found +// - generic error in case of unexpected failure from the database layer, or failure +// to decode an existing database value +func (b *Blocks) ProposalByHeight(height uint64) (*flow.Proposal, error) { + blockID, err := b.headers.retrieveIdByHeightTx(height) + if err != nil { + return nil, err + } + return b.retrieveProposal(blockID) +} + +// ByCollectionID returns the block for the given [flow.CollectionGuarantee] ID. +// This method is only available for collections included in finalized blocks. +// While consensus nodes verify that collections are not repeated within the same fork, +// each different fork can contain a recent collection once. Therefore, we must wait for +// finality. +// CAUTION: this method is not backed by a cache and therefore comparatively slow! +// +// Error returns: +// - storage.ErrNotFound if the collection ID was not found +// - generic error in case of unexpected failure from the database layer, or failure +// to decode an existing database value +func (b *Blocks) ByCollectionID(collID flow.Identifier) (*flow.Block, error) { + guarantee, err := b.payloads.guarantees.ByCollectionID(collID) + if err != nil { + return nil, fmt.Errorf("could not look up guarantee: %w", err) + } + var blockID flow.Identifier + err = operation.LookupBlockContainingCollectionGuarantee(b.db.Reader(), guarantee.ID(), &blockID) + if err != nil { + return nil, fmt.Errorf("could not look up block: %w", err) + } + return b.ByID(blockID) +} + +// IndexBlockContainingCollectionGuarantees populates an index `guaranteeID->blockID` for each guarantee +// which appears in the block. +// CAUTION: a collection can be included in multiple *unfinalized* blocks. However, the implementation +// assumes a one-to-one map from collection ID to a *single* block ID. This holds for FINALIZED BLOCKS ONLY +// *and* only in the absence of byzantine collector clusters (which the mature protocol must tolerate). +// Hence, this function should be treated as a temporary solution, which requires generalization +// (one-to-many mapping) for soft finality and the mature protocol. +// +// Error returns: +// - generic error in case of unexpected failure from the database layer or encoding failure. +func (b *Blocks) IndexBlockContainingCollectionGuarantees(blockID flow.Identifier, guaranteeIDs []flow.Identifier) error { + return b.db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + for _, guaranteeID := range guaranteeIDs { + err := operation.IndexBlockContainingCollectionGuarantee(rw.Writer(), guaranteeID, blockID) + if err != nil { + return fmt.Errorf("could not index collection block (%x): %w", guaranteeID, err) + } + } + return nil + }) +} diff --git a/storage/store/blocks_test.go b/storage/store/blocks_test.go new file mode 100644 index 00000000000..f88ceeb977b --- /dev/null +++ b/storage/store/blocks_test.go @@ -0,0 +1,161 @@ +package store_test + +import ( + "testing" + + "github.com/jordanschalm/lockctx" + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/module/metrics" + "github.com/onflow/flow-go/storage" + "github.com/onflow/flow-go/storage/operation" + "github.com/onflow/flow-go/storage/operation/dbtest" + "github.com/onflow/flow-go/storage/store" + "github.com/onflow/flow-go/utils/unittest" +) + +func TestBlockStoreAndRetrieve(t *testing.T) { + dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { + lockManager := storage.NewTestingLockManager() + cacheMetrics := &metrics.NoopCollector{} + // verify after storing a block should be able to retrieve it back + blocks := store.InitAll(cacheMetrics, db).Blocks + block := unittest.FullBlockFixture() + prop := unittest.ProposalFromBlock(block) + + err := unittest.WithLock(t, lockManager, storage.LockInsertBlock, func(lctx lockctx.Context) error { + return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return blocks.BatchStore(lctx, rw, prop) + }) + }) + require.NoError(t, err) + + retrieved, err := blocks.ByID(block.ID()) + require.NoError(t, err) + require.Equal(t, *block, *retrieved) + + // repeated storage of the same block should return + err = unittest.WithLock(t, lockManager, storage.LockInsertBlock, func(lctx2 lockctx.Context) error { + err := db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return blocks.BatchStore(lctx2, rw, prop) + }) + require.ErrorIs(t, err, storage.ErrAlreadyExists) + return nil + }) + require.NoError(t, err) + + // verify after a restart, the block stored in the database is the same + // as the original + blocksAfterRestart := store.InitAll(cacheMetrics, db).Blocks + receivedAfterRestart, err := blocksAfterRestart.ByID(block.ID()) + require.NoError(t, err) + require.Equal(t, *block, *receivedAfterRestart) + }) +} + +func TestBlockIndexByHeightAndRetrieve(t *testing.T) { + dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { + lockManager := storage.NewTestingLockManager() + cacheMetrics := &metrics.NoopCollector{} + blocks := store.InitAll(cacheMetrics, db).Blocks + block := unittest.FullBlockFixture() + prop := unittest.ProposalFromBlock(block) + + // First store the block + err := unittest.WithLock(t, lockManager, storage.LockInsertBlock, func(lctx lockctx.Context) error { + return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return blocks.BatchStore(lctx, rw, prop) + }) + }) + require.NoError(t, err) + + // Now index the block by height (requires LockFinalizeBlock) + err = unittest.WithLock(t, lockManager, storage.LockFinalizeBlock, func(lctx lockctx.Context) error { + return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return operation.IndexFinalizedBlockByHeight(lctx, rw, block.Height, block.ID()) + }) + }) + require.NoError(t, err) + + // Verify we can retrieve the block by height + retrievedByHeight, err := blocks.ByHeight(block.Height) + require.NoError(t, err) + require.Equal(t, *block, *retrievedByHeight) + + // Verify we can retrieve the proposal by height + retrievedProposalByHeight, err := blocks.ProposalByHeight(block.Height) + require.NoError(t, err) + require.Equal(t, *prop, *retrievedProposalByHeight) + + // Test that indexing the same height again returns ErrAlreadyExists + err = unittest.WithLock(t, lockManager, storage.LockFinalizeBlock, func(lctx lockctx.Context) error { + err := db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return operation.IndexFinalizedBlockByHeight(lctx, rw, block.Height, block.ID()) + }) + require.ErrorIs(t, err, storage.ErrAlreadyExists) + return nil + }) + require.NoError(t, err) + + // Test that retrieving by non-existent height returns ErrNotFound + _, err = blocks.ByHeight(block.Height + 1000) + require.ErrorIs(t, err, storage.ErrNotFound) + + // Verify after a restart, the block indexed by height is still retrievable + blocksAfterRestart := store.InitAll(cacheMetrics, db).Blocks + receivedAfterRestart, err := blocksAfterRestart.ByHeight(block.Height) + require.NoError(t, err) + require.Equal(t, *block, *receivedAfterRestart) + }) +} + +func TestBlockIndexByViewAndRetrieve(t *testing.T) { + dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { + lockManager := storage.NewTestingLockManager() + cacheMetrics := &metrics.NoopCollector{} + blocks := store.InitAll(cacheMetrics, db).Blocks + block := unittest.FullBlockFixture() + prop := unittest.ProposalFromBlock(block) + + // First store the block and index by view + err := unittest.WithLock(t, lockManager, storage.LockInsertBlock, func(lctx lockctx.Context) error { + return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + err := blocks.BatchStore(lctx, rw, prop) + if err != nil { + return err + } + // Now index the block by view (requires LockInsertBlock) + return operation.IndexCertifiedBlockByView(lctx, rw, block.View, block.ID()) + }) + }) + require.NoError(t, err) + + // Verify we can retrieve the block by view + retrievedByView, err := blocks.ByView(block.View) + require.NoError(t, err) + require.Equal(t, *block, *retrievedByView) + + // Verify we can retrieve the proposal by view + retrievedProposalByView, err := blocks.ProposalByView(block.View) + require.NoError(t, err) + require.Equal(t, *prop, *retrievedProposalByView) + + // Test that indexing the same view again returns ErrAlreadyExists + err = unittest.WithLock(t, lockManager, storage.LockInsertBlock, func(lctx lockctx.Context) error { + return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return operation.IndexCertifiedBlockByView(lctx, rw, block.View, block.ID()) + }) + }) + require.ErrorIs(t, err, storage.ErrAlreadyExists) + + // Test that retrieving by non-existent view returns ErrNotFound + _, err = blocks.ByView(block.View + 1000) + require.ErrorIs(t, err, storage.ErrNotFound) + + // Verify after a restart, the block indexed by view is still retrievable + blocksAfterRestart := store.InitAll(cacheMetrics, db).Blocks + receivedAfterRestart, err := blocksAfterRestart.ByView(block.View) + require.NoError(t, err) + require.Equal(t, *block, *receivedAfterRestart) + }) +} diff --git a/storage/store/cache.go b/storage/store/cache.go new file mode 100644 index 00000000000..73e343fda3e --- /dev/null +++ b/storage/store/cache.go @@ -0,0 +1,201 @@ +package store + +import ( + "errors" + "fmt" + + lru "github.com/fxamacker/golang-lru/v2" + "github.com/jordanschalm/lockctx" + + "github.com/onflow/flow-go/module" + "github.com/onflow/flow-go/storage" +) + +type lruCache[K comparable, V any] interface { + Add(key K, value V) bool + Get(key K) (value V, ok bool) + Contains(key K) (ok bool) + Remove(key K) bool + Len() int +} + +func withLimit[K comparable, V any](limit uint) func(*Cache[K, V]) { + return func(c *Cache[K, V]) { + c.limit = limit + } +} + +type storeFunc[K comparable, V any] func(rw storage.ReaderBatchWriter, key K, val V) error + +type storeWithLockFunc[K comparable, V any] func(lctx lockctx.Proof, rw storage.ReaderBatchWriter, key K, val V) error + +const DefaultCacheSize = uint(1000) + +func withStore[K comparable, V any](store storeFunc[K, V]) func(*Cache[K, V]) { + return func(c *Cache[K, V]) { + c.store = store + } +} + +func withStoreWithLock[K comparable, V any](store storeWithLockFunc[K, V]) func(*Cache[K, V]) { + return func(c *Cache[K, V]) { + c.storeWithLock = store + } +} + +func noStore[K comparable, V any](_ storage.ReaderBatchWriter, _ K, _ V) error { + return fmt.Errorf("no store function for cache put available") +} + +func noStoreWithLock[K comparable, V any](_ lockctx.Proof, _ storage.ReaderBatchWriter, _ K, _ V) error { + return fmt.Errorf("no store function for cache put with lock available") +} + +func noopStore[K comparable, V any](_ storage.ReaderBatchWriter, _ K, _ V) error { + return nil +} + +type removeFunc[K comparable] func(storage.ReaderBatchWriter, K) error + +func withRemove[K comparable, V any](remove removeFunc[K]) func(*Cache[K, V]) { + return func(c *Cache[K, V]) { + c.remove = remove + } +} + +func noRemove[K comparable](_ storage.ReaderBatchWriter, _ K) error { + return fmt.Errorf("no remove function for cache remove available") +} + +type retrieveFunc[K comparable, V any] func(r storage.Reader, key K) (V, error) + +func withRetrieve[K comparable, V any](retrieve retrieveFunc[K, V]) func(*Cache[K, V]) { + return func(c *Cache[K, V]) { + c.retrieve = retrieve + } +} + +func noRetrieve[K comparable, V any](_ storage.Reader, _ K) (V, error) { + var nullV V + return nullV, fmt.Errorf("no retrieve function for cache get available") +} + +type Cache[K comparable, V any] struct { + metrics module.CacheMetrics + limit uint + store storeFunc[K, V] + storeWithLock storeWithLockFunc[K, V] + retrieve retrieveFunc[K, V] + remove removeFunc[K] + resource string + cache lruCache[K, V] +} + +func newCache[K comparable, V any](collector module.CacheMetrics, resourceName string, options ...func(*Cache[K, V])) *Cache[K, V] { + c := Cache[K, V]{ + metrics: collector, + limit: 1000, + store: noStore[K, V], + storeWithLock: noStoreWithLock[K, V], + retrieve: noRetrieve[K, V], + remove: noRemove[K], + resource: resourceName, + } + for _, option := range options { + option(&c) + } + c.cache, _ = lru.New[K, V](int(c.limit)) + c.metrics.CacheEntries(c.resource, uint(c.cache.Len())) + return &c +} + +// IsCached returns true if the key exists in the cache. +// It DOES NOT check whether the key exists in the underlying data store. +func (c *Cache[K, V]) IsCached(key K) bool { + return c.cache.Contains(key) +} + +// Get will try to retrieve the resource from cache first, and then from the +// injected. During normal operations, the following error returns are expected: +// - `storage.ErrNotFound` if key is unknown. +func (c *Cache[K, V]) Get(r storage.Reader, key K) (V, error) { + // check if we have it in the cache + resource, cached := c.cache.Get(key) + if cached { + c.metrics.CacheHit(c.resource) + return resource, nil + } + + // get it from the database + resource, err := c.retrieve(r, key) + if err != nil { + if errors.Is(err, storage.ErrNotFound) { + c.metrics.CacheNotFound(c.resource) + } + var nullV V + return nullV, fmt.Errorf("could not retrieve resource: %w", err) + } + + c.metrics.CacheMiss(c.resource) + + // cache the resource and eject least recently used one if we reached limit + evicted := c.cache.Add(key, resource) + if !evicted { + c.metrics.CacheEntries(c.resource, uint(c.cache.Len())) + } + + return resource, nil +} + +func (c *Cache[K, V]) Remove(key K) { + c.cache.Remove(key) +} + +// Insert will add a resource directly to the cache with the given ID +func (c *Cache[K, V]) Insert(key K, resource V) { + // cache the resource and eject least recently used one if we reached limit + evicted := c.cache.Add(key, resource) + if !evicted { + c.metrics.CacheEntries(c.resource, uint(c.cache.Len())) + } +} + +// PutTx will return tx which adds a resource to the cache with the given ID. +func (c *Cache[K, V]) PutTx(rw storage.ReaderBatchWriter, key K, resource V) error { + storage.OnCommitSucceed(rw, func() { + c.Insert(key, resource) + }) + + err := c.store(rw, key, resource) + if err != nil { + return fmt.Errorf("could not store resource: %w", err) + } + + return nil +} + +func (c *Cache[K, V]) PutWithLockTx(lctx lockctx.Proof, rw storage.ReaderBatchWriter, key K, resource V) error { + storage.OnCommitSucceed(rw, func() { + c.Insert(key, resource) + }) + + err := c.storeWithLock(lctx, rw, key, resource) + if err != nil { + return fmt.Errorf("could not store resource: %w", err) + } + + return nil +} + +func (c *Cache[K, V]) RemoveTx(rw storage.ReaderBatchWriter, key K) error { + storage.OnCommitSucceed(rw, func() { + c.Remove(key) + }) + + err := c.remove(rw, key) + if err != nil { + return fmt.Errorf("could not remove resource: %w", err) + } + + return nil +} diff --git a/storage/store/cache_test.go b/storage/store/cache_test.go new file mode 100644 index 00000000000..228c5392f43 --- /dev/null +++ b/storage/store/cache_test.go @@ -0,0 +1,364 @@ +package store + +import ( + "fmt" + "strconv" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.uber.org/atomic" + + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module" + "github.com/onflow/flow-go/module/metrics" + "github.com/onflow/flow-go/storage" + "github.com/onflow/flow-go/storage/operation" + "github.com/onflow/flow-go/storage/operation/dbtest" + "github.com/onflow/flow-go/utils/unittest" +) + +type cache[K comparable, V any] interface { + IsCached(K) bool + Get(storage.Reader, K) (V, error) + Insert(K, V) + Remove(K) + PutTx(storage.ReaderBatchWriter, K, V) error +} + +func mustNewGroupCache[G comparable, K comparable, V any]( + t testing.TB, + collector module.CacheMetrics, + resourceName string, + groupFromKey func(K) G, + options ...func(*Cache[K, V]), +) *GroupCache[G, K, V] { + cache, err := newGroupCache(collector, resourceName, groupFromKey, options...) + require.NoError(t, err) + return cache +} + +// TestCacheExists tests existence checking items in the cache. +func TestCacheExists(t *testing.T) { + caches := []cache[flow.Identifier, any]{ + newCache[flow.Identifier, any]( + metrics.NewNoopCollector(), + "test", + ), + mustNewGroupCache[string, flow.Identifier, any]( + t, + metrics.NewNoopCollector(), + "test", + func(id flow.Identifier) string { return strconv.Itoa(int(id[0])) }, + ), + } + + for _, cache := range caches { + testCacheExists(t, cache) + } +} + +func testCacheExists(t *testing.T, cache cache[flow.Identifier, any]) { + t.Run("non-existent", func(t *testing.T) { + key := unittest.IdentifierFixture() + exists := cache.IsCached(key) + assert.False(t, exists) + }) + + t.Run("existent", func(t *testing.T) { + key := unittest.IdentifierFixture() + cache.Insert(key, unittest.RandomBytes(128)) + + exists := cache.IsCached(key) + assert.True(t, exists) + }) + + t.Run("removed", func(t *testing.T) { + key := unittest.IdentifierFixture() + // insert, then remove the item + cache.Insert(key, unittest.RandomBytes(128)) + cache.Remove(key) + + exists := cache.IsCached(key) + assert.False(t, exists) + }) +} + +// Test storing an item will be cached, and when cache hit, +// the retrieve function is only called once +func TestCacheCachedHit(t *testing.T) { + retrieved := atomic.NewUint64(0) + + store := func(rw storage.ReaderBatchWriter, key flow.Identifier, val []byte) error { + return operation.UpsertByKey(rw.Writer(), key[:], val) + } + retrieve := func(r storage.Reader, key flow.Identifier) ([]byte, error) { + retrieved.Inc() + var val []byte + err := operation.RetrieveByKey(r, key[:], &val) + if err != nil { + return nil, err + } + return val, nil + } + + caches := []cache[flow.Identifier, []byte]{ + newCache( + metrics.NewNoopCollector(), + "test", + withStore(store), + withRetrieve(retrieve), + ), + + mustNewGroupCache( + t, + metrics.NewNoopCollector(), + "test", + func(id flow.Identifier) string { return strconv.Itoa(int(id[0])) }, + withStore(store), + withRetrieve(retrieve), + ), + } + + for _, cache := range caches { + testCacheCachedHit(t, cache, retrieved) + } +} + +func testCacheCachedHit(t *testing.T, cache cache[flow.Identifier, []byte], retrieved *atomic.Uint64) { + dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { + retrieved.Store(0) + + key := unittest.IdentifierFixture() + val := unittest.RandomBytes(128) + + // storing the item will cache it + require.NoError(t, db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return cache.PutTx(rw, key, val) + })) + + // retrieving stored item should hit the cache, no db op is called + cached, err := cache.Get(db.Reader(), key) + require.NoError(t, err) + require.Equal(t, val, cached) + require.Equal(t, uint64(0), retrieved.Load()) // no db op + + // removing the cached item + cache.Remove(key) + + // Get the same item, the cached item will miss, and retrieve from db, so db op is called + cached, err = cache.Get(db.Reader(), key) + require.NoError(t, err) + require.Equal(t, val, cached) + require.Equal(t, uint64(1), retrieved.Load()) // hit db + + // Get the same item again, hit cache + _, err = cache.Get(db.Reader(), key) + require.NoError(t, err) + require.Equal(t, uint64(1), retrieved.Load()) // cache hit + + // Query other key will hit db + _, err = cache.Get(db.Reader(), unittest.IdentifierFixture()) + require.ErrorIs(t, err, storage.ErrNotFound) + }) +} + +// Test storage.ErrNotFound is returned when cache is missing +// and is not cached +func TestCacheNotFoundReturned(t *testing.T) { + retrieved := atomic.NewUint64(0) + retrieve := func(storage.Reader, flow.Identifier) ([]byte, error) { + retrieved.Inc() + return nil, storage.ErrNotFound + } + + caches := []cache[flow.Identifier, []byte]{ + newCache( + metrics.NewNoopCollector(), + "test", + withRetrieve(retrieve), + ), + + mustNewGroupCache( + t, + metrics.NewNoopCollector(), + "test", + func(id flow.Identifier) string { return strconv.Itoa(int(id[0])) }, + withRetrieve(retrieve), + ), + } + + for _, cache := range caches { + testCacheNotFoundReturned(t, cache, retrieved) + } +} + +func testCacheNotFoundReturned(t *testing.T, cache cache[flow.Identifier, []byte], retrieved *atomic.Uint64) { + dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { + retrieved.Store(0) + + // Create a random identifier to use as a key + notExist := unittest.IdentifierFixture() + + // Try to get the non-existent item from the cache + // Assert that the error is storage.ErrNotFound + _, err := cache.Get(db.Reader(), notExist) + require.ErrorIs(t, err, storage.ErrNotFound) + + // Get the item again, this time the cache should not be used + _, err = cache.Get(db.Reader(), notExist) + require.ErrorIs(t, err, storage.ErrNotFound) + require.Equal(t, uint64(2), retrieved.Load()) // retrieved from DB 2 times. + }) +} + +var errStoreException = fmt.Errorf("storing exception") + +// Test when store return exception error, the key value is not cached, +func TestCacheExceptionNotCached(t *testing.T) { + stored, retrieved := atomic.NewUint64(0), atomic.NewUint64(0) + + store := func(storage.ReaderBatchWriter, flow.Identifier, []byte) error { + stored.Inc() + return errStoreException + } + + retrieve := func(r storage.Reader, key flow.Identifier) ([]byte, error) { + retrieved.Inc() + var val []byte + err := operation.RetrieveByKey(r, key[:], &val) + if err != nil { + return nil, err + } + return val, nil + } + + caches := []cache[flow.Identifier, []byte]{ + newCache( + metrics.NewNoopCollector(), + "test", + withStore(store), + withRetrieve(retrieve), + ), + + mustNewGroupCache( + t, + metrics.NewNoopCollector(), + "test", + func(id flow.Identifier) string { return strconv.Itoa(int(id[0])) }, + withStore(store), + withRetrieve(retrieve), + ), + } + + for _, cache := range caches { + testCacheExceptionNotCached(t, cache) + } +} + +func testCacheExceptionNotCached(t *testing.T, cache cache[flow.Identifier, []byte]) { + + dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { + + key := unittest.IdentifierFixture() + val := unittest.RandomBytes(128) + + // store returns exception err + err := db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return cache.PutTx(rw, key, val) + }) + + require.ErrorIs(t, err, errStoreException) + + // assert key value is not cached + _, err = cache.Get(db.Reader(), key) + require.ErrorIs(t, err, storage.ErrNotFound) + }) +} + +func BenchmarkCacheRemove(b *testing.B) { + const txCountPerBlock = 5 + + benchmarks := []struct { + name string + cacheSize int + removeCount int + }{ + {name: "cache size 1,000, remove count 25", cacheSize: 1_000, removeCount: 25}, + {name: "cache size 2,000, remove count 25", cacheSize: 2_000, removeCount: 25}, + {name: "cache size 3,000, remove count 25", cacheSize: 3_000, removeCount: 25}, + {name: "cache size 4,000, remove count 25", cacheSize: 4_000, removeCount: 25}, + {name: "cache size 5,000, remove count 25", cacheSize: 5_000, removeCount: 25}, + {name: "cache size 6,000, remove count 25", cacheSize: 6_000, removeCount: 25}, + {name: "cache size 7,000, remove count 25", cacheSize: 7_000, removeCount: 25}, + {name: "cache size 8,000, remove count 25", cacheSize: 8_000, removeCount: 25}, + {name: "cache size 9,000, remove count 25", cacheSize: 9_000, removeCount: 25}, + {name: "cache size 10,000, remove count 25", cacheSize: 10_000, removeCount: 25}, + {name: "cache size 20,000, remove count 25", cacheSize: 20_000, removeCount: 25}, + {name: "cache size 10,000, remove count 5,000", cacheSize: 10_000, removeCount: 5_000}, + } + + for _, bm := range benchmarks { + b.Run(bm.name, func(b *testing.B) { + blockCount := bm.cacheSize/txCountPerBlock + 1 + + blockIDs := make([]flow.Identifier, blockCount) + for i := range len(blockIDs) { + blockIDs[i] = unittest.IdentifierFixture() + } + + txIDs := make([]flow.Identifier, blockCount*txCountPerBlock) + for i := range len(txIDs) { + txIDs[i] = unittest.IdentifierFixture() + } + + removeIDs := make([]TwoIdentifier, 0, bm.removeCount) + + blockIDIndex := len(blockIDs) - 1 + txIDIndex := len(txIDs) - 1 + for len(removeIDs) < bm.removeCount { + blockID := blockIDs[blockIDIndex] + blockIDIndex-- + + for range txCountPerBlock { + var key TwoIdentifier + n := copy(key[:], blockID[:]) + copy(key[n:], txIDs[txIDIndex][:]) + removeIDs = append(removeIDs, key) + + txIDIndex-- + } + } + + b.ResetTimer() + + for range b.N { + b.StopTimer() + + cache := newCache( + metrics.NewNoopCollector(), + metrics.ResourceTransactionResults, + withLimit[TwoIdentifier, struct{}](uint(bm.cacheSize)), + withStore(noopStore[TwoIdentifier, struct{}]), + withRetrieve(noRetrieve[TwoIdentifier, struct{}]), + ) + + for i, blockID := range blockIDs { + for _, txID := range txIDs[i*txCountPerBlock : (i+1)*txCountPerBlock] { + var key TwoIdentifier + n := copy(key[:], blockID[:]) + copy(key[n:], txID[:]) + + cache.Insert(key, struct{}{}) + } + } + + b.StartTimer() + + for _, id := range removeIDs { + cache.Remove(id) + } + } + }) + } +} diff --git a/storage/store/chunk_data_packs.go b/storage/store/chunk_data_packs.go new file mode 100644 index 00000000000..0568dd8f5c9 --- /dev/null +++ b/storage/store/chunk_data_packs.go @@ -0,0 +1,130 @@ +package store + +import ( + "fmt" + + "github.com/jordanschalm/lockctx" + + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module" + "github.com/onflow/flow-go/module/metrics" + "github.com/onflow/flow-go/storage" + "github.com/onflow/flow-go/storage/operation" +) + +type ChunkDataPacks struct { + db storage.DB + collections storage.Collections + byChunkIDCache *Cache[flow.Identifier, *storage.StoredChunkDataPack] +} + +var _ storage.ChunkDataPacks = (*ChunkDataPacks)(nil) + +func NewChunkDataPacks(collector module.CacheMetrics, db storage.DB, collections storage.Collections, byChunkIDCacheSize uint) *ChunkDataPacks { + + storeWithLock := func(lctx lockctx.Proof, rw storage.ReaderBatchWriter, key flow.Identifier, val *storage.StoredChunkDataPack) error { + return operation.InsertChunkDataPack(lctx, rw, val) + } + + retrieve := func(r storage.Reader, key flow.Identifier) (*storage.StoredChunkDataPack, error) { + var c storage.StoredChunkDataPack + err := operation.RetrieveChunkDataPack(r, key, &c) + return &c, err + } + + cache := newCache(collector, metrics.ResourceChunkDataPack, + withLimit[flow.Identifier, *storage.StoredChunkDataPack](byChunkIDCacheSize), + withStoreWithLock(storeWithLock), + withRetrieve(retrieve), + ) + + ch := ChunkDataPacks{ + db: db, + byChunkIDCache: cache, + collections: collections, + } + return &ch +} + +// Remove removes multiple ChunkDataPacks cs keyed by their ChunkIDs in a batch. +// No errors are expected during normal operation, even if no entries are matched. +func (ch *ChunkDataPacks) Remove(chunkIDs []flow.Identifier) error { + return ch.db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + for _, c := range chunkIDs { + err := ch.BatchRemove(c, rw) + if err != nil { + return fmt.Errorf("cannot remove chunk data pack: %w", err) + } + } + + return nil + }) +} + +// StoreByChunkID stores multiple ChunkDataPacks cs keyed by their ChunkIDs in a batch. +// No errors are expected during normal operation, but it may return generic error +func (ch *ChunkDataPacks) StoreByChunkID(lctx lockctx.Proof, cs []*flow.ChunkDataPack) error { + return ch.db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + for _, c := range cs { + sc := storage.ToStoredChunkDataPack(c) + err := ch.byChunkIDCache.PutWithLockTx(lctx, rw, sc.ChunkID, sc) + if err != nil { + return err + } + } + + return nil + }) +} + +// BatchRemove removes ChunkDataPack c keyed by its ChunkID in provided batch +// No errors are expected during normal operation, even if no entries are matched. +// If Badger unexpectedly fails to process the request, the error is wrapped in a generic error and returned. +func (ch *ChunkDataPacks) BatchRemove(chunkID flow.Identifier, rw storage.ReaderBatchWriter) error { + storage.OnCommitSucceed(rw, func() { + ch.byChunkIDCache.Remove(chunkID) + }) + return operation.RemoveChunkDataPack(rw.Writer(), chunkID) +} + +func (ch *ChunkDataPacks) ByChunkID(chunkID flow.Identifier) (*flow.ChunkDataPack, error) { + schdp, err := ch.byChunkID(chunkID) + if err != nil { + return nil, err + } + + chdp := &flow.ChunkDataPack{ + ChunkID: schdp.ChunkID, + StartState: schdp.StartState, + Proof: schdp.Proof, + ExecutionDataRoot: schdp.ExecutionDataRoot, + } + + if !schdp.SystemChunk { + collection, err := ch.collections.ByID(schdp.CollectionID) + if err != nil { + return nil, fmt.Errorf("could not retrive collection (id: %x) for stored chunk data pack: %w", schdp.CollectionID, err) + } + + chdp.Collection = collection + } + + return chdp, nil +} + +func (ch *ChunkDataPacks) byChunkID(chunkID flow.Identifier) (*storage.StoredChunkDataPack, error) { + schdp, err := ch.retrieveCHDP(chunkID) + if err != nil { + return nil, fmt.Errorf("could not retrive stored chunk data pack: %w", err) + } + + return schdp, nil +} + +func (ch *ChunkDataPacks) retrieveCHDP(chunkID flow.Identifier) (*storage.StoredChunkDataPack, error) { + val, err := ch.byChunkIDCache.Get(ch.db.Reader(), chunkID) + if err != nil { + return nil, err + } + return val, nil +} diff --git a/storage/store/chunk_data_packs_test.go b/storage/store/chunk_data_packs_test.go new file mode 100644 index 00000000000..acc5f77cb20 --- /dev/null +++ b/storage/store/chunk_data_packs_test.go @@ -0,0 +1,132 @@ +package store_test + +import ( + "testing" + + "github.com/cockroachdb/pebble/v2" + "github.com/jordanschalm/lockctx" + + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module/metrics" + "github.com/onflow/flow-go/storage" + "github.com/onflow/flow-go/storage/operation/pebbleimpl" + "github.com/onflow/flow-go/storage/store" + "github.com/onflow/flow-go/utils/unittest" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +// TestChunkDataPacks_Store evaluates correct storage and retrieval of chunk data packs in the storage. +// It also evaluates that re-inserting is idempotent. +func TestChunkDataPacks_Store(t *testing.T) { + WithChunkDataPacks(t, 100, func(t *testing.T, chunkDataPacks []*flow.ChunkDataPack, chunkDataPackStore *store.ChunkDataPacks, _ *pebble.DB, lockManager storage.LockManager) { + require.NoError(t, unittest.WithLock(t, lockManager, storage.LockInsertChunkDataPack, func(lctx lockctx.Context) error { + require.NoError(t, chunkDataPackStore.StoreByChunkID(lctx, chunkDataPacks)) + return chunkDataPackStore.StoreByChunkID(lctx, chunkDataPacks) + })) + }) +} + +func TestChunkDataPack_Remove(t *testing.T) { + unittest.RunWithPebbleDB(t, func(pdb *pebble.DB) { + lockManager := storage.NewTestingLockManager() + db := pebbleimpl.ToDB(pdb) + transactions := store.NewTransactions(&metrics.NoopCollector{}, db) + collections := store.NewCollections(db, transactions) + // keep the cache size at 1 to make sure that entries are written and read from storage itself. + chunkDataPackStore := store.NewChunkDataPacks(&metrics.NoopCollector{}, db, collections, 1) + + chunkDataPacks := unittest.ChunkDataPacksFixture(10) + for _, chunkDataPack := range chunkDataPacks { + // store collection in Collections storage (which ChunkDataPacks store uses internally) + _, err := collections.Store(chunkDataPack.Collection) + require.NoError(t, err) + } + + chunkIDs := make([]flow.Identifier, 0, len(chunkDataPacks)) + for _, chunk := range chunkDataPacks { + chunkIDs = append(chunkIDs, chunk.ChunkID) + } + + require.NoError(t, unittest.WithLock(t, lockManager, storage.LockInsertChunkDataPack, func(lctx lockctx.Context) error { + return chunkDataPackStore.StoreByChunkID(lctx, chunkDataPacks) + })) + require.NoError(t, chunkDataPackStore.Remove(chunkIDs)) + + // verify it has been removed + _, err := chunkDataPackStore.ByChunkID(chunkIDs[0]) + assert.ErrorIs(t, err, storage.ErrNotFound) + + // Removing again should not error + require.NoError(t, chunkDataPackStore.Remove(chunkIDs)) + }) +} + +// TestChunkDataPacks_MissingItem evaluates querying a missing item returns a storage.ErrNotFound error. +func TestChunkDataPacks_MissingItem(t *testing.T) { + unittest.RunWithPebbleDB(t, func(pdb *pebble.DB) { + db := pebbleimpl.ToDB(pdb) + transactions := store.NewTransactions(&metrics.NoopCollector{}, db) + collections := store.NewCollections(db, transactions) + store1 := store.NewChunkDataPacks(&metrics.NoopCollector{}, db, collections, 1) + + // attempt to get an invalid + _, err := store1.ByChunkID(unittest.IdentifierFixture()) + assert.ErrorIs(t, err, storage.ErrNotFound) + }) +} + +// TestChunkDataPacks_StoreTwice evaluates that storing the same chunk data pack twice +// does not result in an error. +func TestChunkDataPacks_StoreTwice(t *testing.T) { + WithChunkDataPacks(t, 2, func(t *testing.T, chunkDataPacks []*flow.ChunkDataPack, chunkDataPackStore *store.ChunkDataPacks, pdb *pebble.DB, lockManager storage.LockManager) { + db := pebbleimpl.ToDB(pdb) + transactions := store.NewTransactions(&metrics.NoopCollector{}, db) + collections := store.NewCollections(db, transactions) + store1 := store.NewChunkDataPacks(&metrics.NoopCollector{}, db, collections, 1) + require.NoError(t, unittest.WithLock(t, lockManager, storage.LockInsertChunkDataPack, func(lctx lockctx.Context) error { + require.NoError(t, store1.StoreByChunkID(lctx, chunkDataPacks)) + + // sanity-check first that chunk data packs are stored, before attempting to store them again. + for _, c := range chunkDataPacks { + c2, err := store1.ByChunkID(c.ChunkID) + require.NoError(t, err) + require.Equal(t, c, c2) + } + + return store1.StoreByChunkID(lctx, chunkDataPacks) + })) + }) +} + +// WithChunkDataPacks is a test helper that generates specified number of chunk data packs, store1 them using the storeFunc, and +// then evaluates whether they are successfully retrieved from storage. +func WithChunkDataPacks(t *testing.T, chunks int, storeFunc func(*testing.T, []*flow.ChunkDataPack, *store.ChunkDataPacks, *pebble.DB, storage.LockManager)) { + unittest.RunWithPebbleDB(t, func(pdb *pebble.DB) { + lockManager := storage.NewTestingLockManager() + db := pebbleimpl.ToDB(pdb) + transactions := store.NewTransactions(&metrics.NoopCollector{}, db) + collections := store.NewCollections(db, transactions) + // keep the cache size at 1 to make sure that entries are written and read from storage itself. + store1 := store.NewChunkDataPacks(&metrics.NoopCollector{}, db, collections, 1) + + chunkDataPacks := unittest.ChunkDataPacksFixture(chunks) + for _, chunkDataPack := range chunkDataPacks { + // store collection in Collections storage (which ChunkDataPacks store uses internally) + _, err := collections.Store(chunkDataPack.Collection) + require.NoError(t, err) + } + + // store chunk data packs in the memory using provided store function. + storeFunc(t, chunkDataPacks, store1, pdb, lockManager) + + // store1d chunk data packs should be retrieved successfully. + for _, expected := range chunkDataPacks { + actual, err := store1.ByChunkID(expected.ChunkID) + require.NoError(t, err) + + assert.Equal(t, expected, actual) + } + }) +} diff --git a/storage/store/chunks_queue.go b/storage/store/chunks_queue.go new file mode 100644 index 00000000000..33941b58c35 --- /dev/null +++ b/storage/store/chunks_queue.go @@ -0,0 +1,179 @@ +package store + +import ( + "errors" + "fmt" + "sync" + + "github.com/onflow/flow-go/model/chunks" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module" + "github.com/onflow/flow-go/module/metrics" + "github.com/onflow/flow-go/storage" + "github.com/onflow/flow-go/storage/operation" +) + +// ChunksQueue stores a queue of chunk locators that assigned to me to verify. +// Job consumers can read the locators as job from the queue by index. +// Chunk locators stored in this queue are unique. +type ChunksQueue struct { + db storage.DB + chunkLocatorCache *Cache[uint64, *chunks.Locator] // cache for chunk locators, indexed by job index + // TODO(7355): lockctx + storing *sync.Mutex +} + +const JobQueueChunksQueue = "JobQueueChunksQueue" +const DefaultChunkQueuesCacheSize = uint(1000) + +func newChunkLocatorCache(collector module.CacheMetrics) *Cache[uint64, *chunks.Locator] { + store := func(rw storage.ReaderBatchWriter, index uint64, locator *chunks.Locator) error { + // make sure the chunk locator is unique + err := operation.InsertChunkLocator(rw.Writer(), locator) + if err != nil { + return fmt.Errorf("failed to insert chunk locator: %w", err) + } + + err = operation.InsertJobAtIndex(rw.Writer(), JobQueueChunksQueue, index, locator.ID()) + if err != nil { + return fmt.Errorf("failed to set job index for chunk locator queue at index %v: %w", index, err) + } + + return nil + } + + retrieve := func(r storage.Reader, index uint64) (*chunks.Locator, error) { + var locatorID flow.Identifier + err := operation.RetrieveJobAtIndex(r, JobQueueChunksQueue, index, &locatorID) + if err != nil { + return nil, fmt.Errorf("could not retrieve chunk locator in queue: %w", err) + } + + var locator chunks.Locator + err = operation.RetrieveChunkLocator(r, locatorID, &locator) + if err != nil { + return nil, fmt.Errorf("could not retrieve locator for chunk id %v: %w", locatorID, err) + } + + return &locator, nil + } + return newCache(collector, metrics.ResourceChunkLocators, + withLimit[uint64, *chunks.Locator](DefaultChunkQueuesCacheSize), + withStore(store), + withRetrieve(retrieve)) +} + +// NewChunkQueue will initialize the underlying badger database of chunk locator queue. +func NewChunkQueue(collector module.CacheMetrics, db storage.DB) *ChunksQueue { + return &ChunksQueue{ + db: db, + chunkLocatorCache: newChunkLocatorCache(collector), + storing: &sync.Mutex{}, + } +} + +// Init initializes chunk queue's latest index with the given default index. +// It returns (false, nil) if the chunk queue is already initialized. +// It returns (true, nil) if the chunk queue is successfully initialized. +func (q *ChunksQueue) Init(defaultIndex uint64) (bool, error) { + q.storing.Lock() + defer q.storing.Unlock() + + _, err := q.LatestIndex() + if err == nil { + // the chunk queue is already initialized + return false, nil + } + + if !errors.Is(err, storage.ErrNotFound) { + return false, fmt.Errorf("could not get latest index: %w", err) + } + + // the latest index does not exist, + // if the latest index is not found, initialize it with the default index + // in this case, double check that no chunk locator exist at the default index + _, err = q.AtIndex(defaultIndex) + if err == nil { + return false, fmt.Errorf("chunk locator already exists at default index %v", defaultIndex) + } + if !errors.Is(err, storage.ErrNotFound) { + return false, fmt.Errorf("could not check chunk locator at default index %v: %w", defaultIndex, err) + } + + // set the default index as the latest index + err = q.db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return operation.SetJobLatestIndex(rw.Writer(), JobQueueChunksQueue, defaultIndex) + }) + + if err != nil { + return false, fmt.Errorf("could not init chunk locator queue with default index %v: %w", defaultIndex, err) + } + + return true, nil +} + +// StoreChunkLocator stores a new chunk locator that assigned to me to the job queue. +// A true will be returned, if the locator was new. +// A false will be returned, if the locator was duplicate. +func (q *ChunksQueue) StoreChunkLocator(locator *chunks.Locator) (bool, error) { + // storing chunk locator requires reading the latest index and updating it, + // so we need to lock the storing operation + q.storing.Lock() + defer q.storing.Unlock() + + // read the latest index + latest, err := q.LatestIndex() + if err != nil { + return false, err + } + + // make sure the chunk locator is unique + exists, err := operation.ExistChunkLocator(q.db.Reader(), locator.ID()) + if err != nil { + return false, fmt.Errorf("failed to check chunk locator existence: %w", err) + } + + // if the locator already exists, return false + if exists { + return false, nil + } + + // insert to the next index + next := latest + 1 + + err = q.db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + // store and cache the chunk locator + err := q.chunkLocatorCache.PutTx(rw, next, locator) + if err != nil { + return fmt.Errorf("failed to store and cache chunk locator: %w", err) + } + + // update the next index as the latest index + err = operation.SetJobLatestIndex(rw.Writer(), JobQueueChunksQueue, next) + if err != nil { + return fmt.Errorf("failed to update latest index %v: %w", next, err) + } + + return nil + }) + + if err != nil { + return false, fmt.Errorf("failed to store chunk locator: %w", err) + } + return true, nil +} + +// LatestIndex returns the index of the latest chunk locator stored in the queue. +func (q *ChunksQueue) LatestIndex() (uint64, error) { + var latest uint64 + err := operation.RetrieveJobLatestIndex(q.db.Reader(), JobQueueChunksQueue, &latest) + if err != nil { + return 0, fmt.Errorf("could not retrieve latest index for chunks queue: %w", err) + } + return latest, nil +} + +// AtIndex returns the chunk locator stored at the given index in the queue. +func (q *ChunksQueue) AtIndex(index uint64) (*chunks.Locator, error) { + return q.chunkLocatorCache.Get(q.db.Reader(), index) +} diff --git a/storage/store/chunks_queue_test.go b/storage/store/chunks_queue_test.go new file mode 100644 index 00000000000..f13b1304657 --- /dev/null +++ b/storage/store/chunks_queue_test.go @@ -0,0 +1,171 @@ +package store + +import ( + "errors" + "sync" + "testing" + + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/model/chunks" + "github.com/onflow/flow-go/module/metrics" + "github.com/onflow/flow-go/storage" + "github.com/onflow/flow-go/storage/operation" + "github.com/onflow/flow-go/storage/operation/dbtest" + "github.com/onflow/flow-go/utils/unittest" +) + +// 1. should be able to read after store +// 2. should be able to read the latest index after store +// 3. should return false if a duplicate chunk is stored +// 4. should return true if a new chunk is stored +// 5. should return an increased index when a chunk is stored +// 6. storing 100 chunks concurrent should return last index as 100 +// 7. should not be able to read with wrong index +// 8. should return init index after init +// 9. storing chunk and updating the latest index should be atomic +func TestChunksQueue(t *testing.T) { + t.Run("store and read", func(t *testing.T) { + dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { + q := NewChunkQueue(metrics.NewNoopCollector(), db) + initialized, err := q.Init(0) + require.NoError(t, err) + require.True(t, initialized) + + locators := unittest.ChunkLocatorListFixture(1) + locator := locators[0] + + // Store the locator + stored, err := q.StoreChunkLocator(locator) + require.NoError(t, err) + require.True(t, stored) + + // Read the locator + retrieved, err := q.AtIndex(1) + require.NoError(t, err) + require.Equal(t, locator, retrieved) + }) + }) + + t.Run("latest index after store", func(t *testing.T) { + dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { + q := NewChunkQueue(metrics.NewNoopCollector(), db) + _, err := q.Init(0) + require.NoError(t, err) + + locators := unittest.ChunkLocatorListFixture(1) + locator := locators[0] + + _, err = q.StoreChunkLocator(locator) + require.NoError(t, err) + + latest, err := q.LatestIndex() + require.NoError(t, err) + require.Equal(t, uint64(1), latest) + }) + }) + + t.Run("duplicate chunk storage", func(t *testing.T) { + dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { + q := NewChunkQueue(metrics.NewNoopCollector(), db) + _, err := q.Init(0) + require.NoError(t, err) + + locators := unittest.ChunkLocatorListFixture(1) + locator := locators[0] + + stored, err := q.StoreChunkLocator(locator) + require.NoError(t, err) + require.True(t, stored) + + stored, err = q.StoreChunkLocator(locator) + require.NoError(t, err) + require.False(t, stored) + }) + }) + + t.Run("increasing index", func(t *testing.T) { + dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { + q := NewChunkQueue(metrics.NewNoopCollector(), db) + _, err := q.Init(0) + require.NoError(t, err) + + locators := unittest.ChunkLocatorListFixture(2) + locator1, locator2 := locators[0], locators[1] + + _, err = q.StoreChunkLocator(locator1) + require.NoError(t, err) + + _, err = q.StoreChunkLocator(locator2) + require.NoError(t, err) + + latest, err := q.LatestIndex() + require.NoError(t, err) + require.Equal(t, uint64(2), latest) + }) + }) + + t.Run("concurrent storage", func(t *testing.T) { + dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { + q := NewChunkQueue(metrics.NewNoopCollector(), db) + _, err := q.Init(0) + require.NoError(t, err) + + locators := unittest.ChunkLocatorListFixture(100) + + var wg sync.WaitGroup + wg.Add(len(locators)) + + for _, locator := range locators { + go func(loc *chunks.Locator) { + defer wg.Done() + _, err := q.StoreChunkLocator(loc) + require.NoError(t, err) + }(locator) + } + + wg.Wait() + + latest, err := q.LatestIndex() + require.NoError(t, err) + require.Equal(t, uint64(len(locators)), latest) + + for _, locator := range locators { + var stored chunks.Locator + err = operation.RetrieveChunkLocator(db.Reader(), locator.ID(), &stored) + require.NoError(t, err) + require.Equal(t, *locator, stored) + } + }) + }) + + t.Run("read with wrong index", func(t *testing.T) { + dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { + q := NewChunkQueue(metrics.NewNoopCollector(), db) + + _, err := q.AtIndex(1) + require.Error(t, err) + require.True(t, errors.Is(err, storage.ErrNotFound)) + }) + }) + + t.Run("init index", func(t *testing.T) { + dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { + q := NewChunkQueue(metrics.NewNoopCollector(), db) + defaultIndex := uint64(10) + + initialized, err := q.Init(defaultIndex) + require.NoError(t, err) + require.True(t, initialized) + + latest, err := q.LatestIndex() + require.NoError(t, err) + require.Equal(t, defaultIndex, latest) + + // Trying to init again should return false + initialized, err = q.Init(defaultIndex) + require.NoError(t, err) + require.False(t, initialized) + }) + }) +} diff --git a/storage/store/cluster_blocks.go b/storage/store/cluster_blocks.go new file mode 100644 index 00000000000..5592a5fd238 --- /dev/null +++ b/storage/store/cluster_blocks.go @@ -0,0 +1,90 @@ +package store + +import ( + "fmt" + + "github.com/onflow/flow-go/model/cluster" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/storage" + "github.com/onflow/flow-go/storage/operation" +) + +// ClusterBlocks implements a simple block storage around a badger DB. +type ClusterBlocks struct { + db storage.DB + chainID flow.ChainID + headers *Headers + payloads *ClusterPayloads +} + +var _ storage.ClusterBlocks = (*ClusterBlocks)(nil) + +func NewClusterBlocks(db storage.DB, chainID flow.ChainID, headers *Headers, payloads *ClusterPayloads) *ClusterBlocks { + b := &ClusterBlocks{ + db: db, + chainID: chainID, + headers: headers, + payloads: payloads, + } + return b +} + +func (b *ClusterBlocks) ProposalByID(blockID flow.Identifier) (*cluster.Proposal, error) { + header, err := b.headers.ByBlockID(blockID) + if err != nil { + return nil, fmt.Errorf("could not get header: %w", err) + } + payload, err := b.payloads.ByBlockID(blockID) + if err != nil { + return nil, fmt.Errorf("could not retrieve payload: %w", err) + } + sig, err := b.headers.sigs.ByBlockID(blockID) + if err != nil { + return nil, fmt.Errorf("could not retrieve proposer signature: %w", err) + } + untrustedBlock := cluster.UntrustedBlock{ + HeaderBody: header.HeaderBody, + Payload: *payload, + } + var block *cluster.Block + if header.ContainsParentQC() { + block, err = cluster.NewBlock(untrustedBlock) + if err != nil { + return nil, fmt.Errorf("could not build cluster block: %w", err) + } + + } else { + block, err = cluster.NewRootBlock(untrustedBlock) + if err != nil { + return nil, fmt.Errorf("could not build cluster root block: %w", err) + } + } + + untrustedProposal := cluster.UntrustedProposal{ + Block: *block, + ProposerSigData: sig, + } + var proposal *cluster.Proposal + if header.ContainsParentQC() { + proposal, err = cluster.NewProposal(untrustedProposal) + if err != nil { + return nil, fmt.Errorf("could not build cluster proposal: %w", err) + } + } else { + proposal, err = cluster.NewRootProposal(untrustedProposal) + if err != nil { + return nil, fmt.Errorf("could not build root cluster proposal: %w", err) + } + } + + return proposal, nil +} + +func (b *ClusterBlocks) ProposalByHeight(height uint64) (*cluster.Proposal, error) { + var blockID flow.Identifier + err := operation.LookupClusterBlockHeight(b.db.Reader(), b.chainID, height, &blockID) + if err != nil { + return nil, fmt.Errorf("could not look up block: %w", err) + } + return b.ProposalByID(blockID) +} diff --git a/storage/store/cluster_blocks_test.go b/storage/store/cluster_blocks_test.go new file mode 100644 index 00000000000..1f9377348d3 --- /dev/null +++ b/storage/store/cluster_blocks_test.go @@ -0,0 +1,80 @@ +package store + +import ( + "testing" + + "github.com/jordanschalm/lockctx" + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/module/metrics" + "github.com/onflow/flow-go/storage" + "github.com/onflow/flow-go/storage/operation" + "github.com/onflow/flow-go/storage/operation/dbtest" + "github.com/onflow/flow-go/utils/unittest" +) + +// TestClusterBlocks tests inserting and querying a chain of cluster blocks. +func TestClusterBlocks(t *testing.T) { + dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { + lockManager := storage.NewTestingLockManager() + chain := unittest.ClusterBlockFixtures(5) + parent, blocks := chain[0], chain[1:] + + // add parent and mark its height as the latest finalized block + err := unittest.WithLock(t, lockManager, storage.LockInsertOrFinalizeClusterBlock, func(lctx lockctx.Context) error { + err := db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return operation.IndexClusterBlockHeight(lctx, rw, parent.ChainID, parent.Height, parent.ID()) + }) + require.NoError(t, err) + + return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return operation.BootstrapClusterFinalizedHeight(lctx, rw, parent.ChainID, parent.Height) + }) + }) + require.NoError(t, err) + + // store chain of descending blocks + for _, block := range blocks { + // InsertClusterBlock only needs LockInsertOrFinalizeClusterBlock + err := unittest.WithLock(t, lockManager, storage.LockInsertOrFinalizeClusterBlock, func(lctx2 lockctx.Context) error { + return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return operation.InsertClusterBlock(lctx2, rw, unittest.ClusterProposalFromBlock(block)) + }) + }) + require.NoError(t, err) + + // FinalizeClusterBlock only needs LockInsertOrFinalizeClusterBlock + err = unittest.WithLock(t, lockManager, storage.LockInsertOrFinalizeClusterBlock, func(lctx2 lockctx.Context) error { + return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return operation.FinalizeClusterBlock(lctx2, rw, block.ID()) + }) + }) + require.NoError(t, err) + } + + clusterBlocks := NewClusterBlocks( + db, + blocks[0].ChainID, + NewHeaders(metrics.NewNoopCollector(), db), + NewClusterPayloads(metrics.NewNoopCollector(), db), + ) + + t.Run("ByHeight", func(t *testing.T) { + // check if the block can be retrieved by height + for _, block := range blocks { + retrievedBlock, err := clusterBlocks.ProposalByHeight(block.Height) + require.NoError(t, err) + require.Equal(t, block.ID(), retrievedBlock.Block.ID()) + } + }) + + t.Run("ByID", func(t *testing.T) { + // check if the block can be retrieved by ID + for _, block := range blocks { + retrievedBlock, err := clusterBlocks.ProposalByID(block.ID()) + require.NoError(t, err) + require.Equal(t, block.ID(), retrievedBlock.Block.ID()) + } + }) + }) +} diff --git a/storage/store/cluster_payloads.go b/storage/store/cluster_payloads.go new file mode 100644 index 00000000000..035e91ffe92 --- /dev/null +++ b/storage/store/cluster_payloads.go @@ -0,0 +1,45 @@ +package store + +import ( + "fmt" + + "github.com/onflow/flow-go/model/cluster" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module" + "github.com/onflow/flow-go/module/metrics" + "github.com/onflow/flow-go/storage" + "github.com/onflow/flow-go/storage/operation" +) + +// ClusterPayloads implements storage of block payloads for collection node +// cluster consensus. +type ClusterPayloads struct { + db storage.DB + cache *Cache[flow.Identifier, *cluster.Payload] +} + +var _ storage.ClusterPayloads = (*ClusterPayloads)(nil) + +func NewClusterPayloads(cacheMetrics module.CacheMetrics, db storage.DB) *ClusterPayloads { + retrieve := func(r storage.Reader, blockID flow.Identifier) (*cluster.Payload, error) { + var payload cluster.Payload + err := operation.RetrieveClusterPayload(r, blockID, &payload) + return &payload, err + } + + cp := &ClusterPayloads{ + db: db, + cache: newCache[flow.Identifier, *cluster.Payload](cacheMetrics, metrics.ResourceClusterPayload, + withLimit[flow.Identifier, *cluster.Payload](flow.DefaultTransactionExpiry*4), + withRetrieve(retrieve)), + } + return cp +} + +func (cp *ClusterPayloads) ByBlockID(blockID flow.Identifier) (*cluster.Payload, error) { + val, err := cp.cache.Get(cp.db.Reader(), blockID) + if err != nil { + return nil, fmt.Errorf("failed to retrieve cluster block payload: %w", err) + } + return val, nil +} diff --git a/storage/store/cluster_payloads_test.go b/storage/store/cluster_payloads_test.go new file mode 100644 index 00000000000..aeece4c8af1 --- /dev/null +++ b/storage/store/cluster_payloads_test.go @@ -0,0 +1,50 @@ +package store_test + +import ( + "testing" + + "github.com/jordanschalm/lockctx" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/module/metrics" + "github.com/onflow/flow-go/storage" + "github.com/onflow/flow-go/storage/operation" + "github.com/onflow/flow-go/storage/operation/dbtest" + "github.com/onflow/flow-go/storage/store" + "github.com/onflow/flow-go/utils/unittest" +) + +func TestStoreRetrieveClusterPayload(t *testing.T) { + dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { + metrics := metrics.NewNoopCollector() + payloads := store.NewClusterPayloads(metrics, db) + + blockID := unittest.IdentifierFixture() + expected := unittest.ClusterPayloadFixture(5) + + // store payload + manager := storage.NewTestingLockManager() + err := unittest.WithLock(t, manager, storage.LockInsertOrFinalizeClusterBlock, func(lctx lockctx.Context) error { + return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return operation.InsertClusterPayload(lctx, rw, blockID, expected) + }) + }) + require.NoError(t, err) + + // fetch payload + payload, err := payloads.ByBlockID(blockID) + require.NoError(t, err) + require.Equal(t, expected, payload) + }) +} + +func TestClusterPayloadRetrieveWithoutStore(t *testing.T) { + dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { + metrics := metrics.NewNoopCollector() + payloads := store.NewClusterPayloads(metrics, db) + + _, err := payloads.ByBlockID(unittest.IdentifierFixture()) // attempt to retrieve block for random ID + assert.ErrorIs(t, err, storage.ErrNotFound) + }) +} diff --git a/storage/store/collections.go b/storage/store/collections.go new file mode 100644 index 00000000000..14f19e1ae0a --- /dev/null +++ b/storage/store/collections.go @@ -0,0 +1,274 @@ +package store + +import ( + "errors" + "fmt" + + "github.com/jordanschalm/lockctx" + "github.com/rs/zerolog/log" + + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/storage" + "github.com/onflow/flow-go/storage/operation" + "github.com/onflow/flow-go/utils/logging" +) + +type Collections struct { + db storage.DB + transactions *Transactions + + // TODO: Add caching -- this might be relatively frequently queried within the AN; + // likely predominantly with requests about recent transactions. + // Note that we already have caching for transactions. +} + +var _ storage.Collections = (*Collections)(nil) + +func NewCollections(db storage.DB, transactions *Transactions) *Collections { + + c := &Collections{ + db: db, + transactions: transactions, + } + return c +} + +// Store stores a collection in the database. +// any error returned are exceptions +func (c *Collections) Store(collection *flow.Collection) (*flow.LightCollection, error) { + light := collection.Light() + err := c.db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + err := operation.UpsertCollection(rw.Writer(), light) + if err != nil { + return fmt.Errorf("could not insert collection: %w", err) + } + + for _, tx := range collection.Transactions { + err = c.transactions.storeTx(rw, tx) + if err != nil { + return fmt.Errorf("could not insert transaction: %w", err) + } + } + + return nil + }) + + if err != nil { + return nil, err + } + return light, nil +} + +// ByID returns the collection with the given ID, including all +// transactions within the collection. +// +// Expected errors during normal operation: +// - `storage.ErrNotFound` if no light collection was found. +func (c *Collections) ByID(colID flow.Identifier) (*flow.Collection, error) { + var ( + light flow.LightCollection + txs []*flow.TransactionBody + ) + + err := operation.RetrieveCollection(c.db.Reader(), colID, &light) + if err != nil { + return nil, fmt.Errorf("could not retrieve collection: %w", err) + } + + txs = make([]*flow.TransactionBody, 0, len(light.Transactions)) + for _, txID := range light.Transactions { + tx, err := c.transactions.ByID(txID) + if err != nil { + return nil, fmt.Errorf("could not retrieve transaction %v: %w", txID, err) + } + + txs = append(txs, tx) + } + + collection, err := flow.NewCollection(flow.UntrustedCollection{Transactions: txs}) + if err != nil { + return nil, fmt.Errorf("could not construct collection: %w", err) + } + + return collection, nil +} + +// LightByID returns a reduced representation of the collection with the given ID. +// The reduced collection references the constituent transactions by their hashes. +// +// Expected errors during normal operation: +// - `storage.ErrNotFound` if no light collection was found. +func (c *Collections) LightByID(colID flow.Identifier) (*flow.LightCollection, error) { + var collection flow.LightCollection + + err := operation.RetrieveCollection(c.db.Reader(), colID, &collection) + if err != nil { + return nil, fmt.Errorf("could not retrieve collection: %w", err) + } + + return &collection, nil +} + +// Remove removes a collection from the database, including all constituent transactions and +// indices inserted by Store. +// Remove does not error if the collection does not exist +// Note: this method should only be called for collections included in blocks below sealed height +// No errors are expected during normal operation. +func (c *Collections) Remove(colID flow.Identifier) error { + col, err := c.LightByID(colID) + if err != nil { + if errors.Is(err, storage.ErrNotFound) { + // already removed + return nil + } + + return fmt.Errorf("could not retrieve collection: %w", err) + } + + err = c.db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + // remove transaction indices + for _, txID := range col.Transactions { + err = operation.RemoveCollectionTransactionIndices(rw.Writer(), txID) + if err != nil { + return fmt.Errorf("could not remove collection payload indices: %w", err) + } + + err = c.transactions.RemoveBatch(rw, txID) + if err != nil { + return fmt.Errorf("could not remove transaction: %w", err) + } + } + + // remove the collection + return operation.RemoveCollection(rw.Writer(), colID) + }) + if err != nil { + return fmt.Errorf("could not remove collection: %w", err) + } + return nil +} + +// BatchStoreAndIndexByTransaction stores a collection and indexes it by transaction ID within a batch. +// +// CAUTION: current approach is NOT BFT and needs to be revised in the future. +// Honest clusters ensure a transaction can only belong to one collection. However, in rare +// cases, the collector clusters can exceed byzantine thresholds -- making it possible to +// produce multiple finalized collections (aka guaranteed collections) containing the same +// transaction repeatedly. +// TODO: eventually we need to handle Byzantine clusters +// +// No errors are expected during normal operations +func (c *Collections) BatchStoreAndIndexByTransaction(lctx lockctx.Proof, collection *flow.Collection, rw storage.ReaderBatchWriter) (*flow.LightCollection, error) { + // - This lock is to ensure there is no race condition when indexing collection by transaction ID + // - The access node uses this index to report the transaction status. It's done by first + // find the collection for a given transaction ID, and then find the block by the collection, + // and then find the status of the block. + // - since a transaction can belong to multiple collections, when indexing collection by transaction ID, + // if we overwrite the previous collection ID that was indexed by the same transaction ID, the access node + // will return different collection for the same transaction, and the transaction result status will be + // inconsistent. + // - therefore, we need to check if the transaction is already indexed by a collection, and to + // make sure there is no dirty read, we need to use a lock to protect the indexing operation. + // - Note, this approach works because this is the only place where [operation.IndexCollectionByTransaction] + // is used in the code base to index collection by transaction. + if !lctx.HoldsLock(storage.LockInsertCollection) { + return nil, fmt.Errorf("missing lock: %v", storage.LockInsertCollection) + } + + light := collection.Light() + collectionID := light.ID() + + err := operation.UpsertCollection(rw.Writer(), light) + if err != nil { + return nil, fmt.Errorf("could not insert collection: %w", err) + } + + for _, txID := range light.Transactions { + var differentColTxIsIn flow.Identifier + // The following is not BFT, because we can't handle the case where a transaction is included + // in multiple collections. As long as we only have significantly less than 1/3 byzantine + // collectors in the overall population (across all clusters) this should not happen. + // TODO: For now we log a warning, but eventually we need to handle Byzantine clusters + err := operation.LookupCollectionByTransaction(rw.GlobalReader(), txID, &differentColTxIsIn) + if err == nil { + if collectionID != differentColTxIsIn { + log.Error(). + Str(logging.KeySuspicious, "true"). + Hex("transaction hash", txID[:]). + Hex("previously persisted collection containing transactions", differentColTxIsIn[:]). + Hex("newly encountered collection containing transactions", collectionID[:]). + Msgf("sanity check failed: transaction contained in multiple collections -- this is a symptom of a byzantine collector cluster (or a bug)") + } + continue + } + err = operation.IndexCollectionByTransaction(lctx, rw.Writer(), txID, collectionID) + if err != nil { + return nil, fmt.Errorf("could not insert transaction ID: %w", err) + } + } + + // Store individual transactions + for _, tx := range collection.Transactions { + err = c.transactions.storeTx(rw, tx) + if err != nil { + return nil, fmt.Errorf("could not insert transaction: %w", err) + } + } + + return light, nil +} + +// StoreAndIndexByTransaction stores a collection and indexes it by transaction ID. +// It's concurrent-safe. +// +// CAUTION: current approach is NOT BFT and needs to be revised in the future. +// Honest clusters ensure a transaction can only belong to one collection. However, in rare +// cases, the collector clusters can exceed byzantine thresholds -- making it possible to +// produce multiple finalized collections (aka guaranteed collections) containing the same +// transaction repeatedly. +// TODO: eventually we need to handle Byzantine clusters +// +// No errors are expected during normal operation. +func (c *Collections) StoreAndIndexByTransaction(lctx lockctx.Proof, collection *flow.Collection) (*flow.LightCollection, error) { + // - This lock is to ensure there is no race condition when indexing collection by transaction ID + // - The access node uses this index to report the transaction status. It's done by first + // find the collection for a given transaction ID, and then find the block by the collection, + // and then find the status of the block. + // - since a transaction can belong to multiple collections, when indexing collection by transaction ID, + // if we overwrite the previous collection ID that was indexed by the same transaction ID, the access node + // will return different collection for the same transaction, and the transaction result status will be + // inconsistent. + // - therefore, we need to check if the transaction is already indexed by a collection, and to + // make sure there is no dirty read, we need to use a lock to protect the indexing operation. + // - Note, this approach works because this is the only place where UnsafeIndexCollectionByTransaction + // is used in the code base to index collection by transaction. + var light *flow.LightCollection + err := c.db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + var err error + light, err = c.BatchStoreAndIndexByTransaction(lctx, collection, rw) + return err + }) + return light, err +} + +// LightByTransactionID returns a reduced representation of the collection +// holding the given transaction ID. The reduced collection references the +// constituent transactions by their hashes. +// +// Expected errors during normal operation: +// - `storage.ErrNotFound` if no light collection was found. +func (c *Collections) LightByTransactionID(txID flow.Identifier) (*flow.LightCollection, error) { + collID := &flow.Identifier{} + err := operation.LookupCollectionByTransaction(c.db.Reader(), txID, collID) + if err != nil { + return nil, fmt.Errorf("could not retrieve collection id: %w", err) + } + + var collection flow.LightCollection + err = operation.RetrieveCollection(c.db.Reader(), *collID, &collection) + if err != nil { + return nil, fmt.Errorf("could not retrieve collection: %w", err) + } + + return &collection, nil +} diff --git a/storage/store/collections_test.go b/storage/store/collections_test.go new file mode 100644 index 00000000000..8ab091c67b4 --- /dev/null +++ b/storage/store/collections_test.go @@ -0,0 +1,189 @@ +package store_test + +import ( + "errors" + "sync" + "testing" + + "github.com/jordanschalm/lockctx" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/module/metrics" + "github.com/onflow/flow-go/storage" + "github.com/onflow/flow-go/storage/operation/dbtest" + "github.com/onflow/flow-go/storage/store" + "github.com/onflow/flow-go/utils/unittest" +) + +func TestCollections(t *testing.T) { + dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { + lockManager := storage.NewTestingLockManager() + + metrics := metrics.NewNoopCollector() + transactions := store.NewTransactions(metrics, db) + collections := store.NewCollections(db, transactions) + + // create a collection with three transactions + expected := unittest.CollectionFixture(3) + + // Create a lock manager and context for testing + err := unittest.WithLock(t, lockManager, storage.LockInsertCollection, func(lctx lockctx.Context) error { + // store the collection and the transaction index + _, err := collections.StoreAndIndexByTransaction(lctx, &expected) + return err + }) + require.NoError(t, err) + + // retrieve the light collection by collection id + actual, err := collections.LightByID(expected.ID()) + require.NoError(t, err) + + // check if the light collection was indeed persisted + expectedLight := expected.Light() + assert.Equal(t, expectedLight, actual) + + expectedID := expected.ID() + + // retrieve the collection light id by each of its transaction id + for _, tx := range expected.Transactions { + collLight, err := collections.LightByTransactionID(tx.ID()) + actualID := collLight.ID() + // check that the collection id can indeed be retrieved by transaction id + require.NoError(t, err) + assert.Equal(t, expectedID, actualID) + } + + // remove the collection + require.NoError(t, collections.Remove(expected.ID())) + + // check that the collection was indeed removed + _, err = collections.LightByID(expected.ID()) + assert.Error(t, err) + assert.True(t, errors.Is(err, storage.ErrNotFound)) + + // check that the collection was indeed removed from the transaction index + for _, tx := range expected.Transactions { + _, err = collections.LightByTransactionID(tx.ID()) + assert.Error(t, err) + assert.ErrorIs(t, err, storage.ErrNotFound) + + _, err = transactions.ByID(tx.ID()) + assert.Error(t, err) + assert.ErrorIs(t, err, storage.ErrNotFound) + } + }) +} + +// verify if a tx belongs to multiple collections, the first collection to be +// indexed by the tx will be the one that is indexed in storage +func TestCollections_IndexDuplicateTx(t *testing.T) { + dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { + lockManager := storage.NewTestingLockManager() + metrics := metrics.NewNoopCollector() + transactions := store.NewTransactions(metrics, db) + collections := store.NewCollections(db, transactions) + + // create two collections which share 1 transaction (dupTx) + col1 := unittest.CollectionFixture(2) + col2 := unittest.CollectionFixture(1) + dupTx := col1.Transactions[0] // the duplicated transaction + col2Tx := col2.Transactions[0] // transaction that's only in col2 + col2.Transactions = append(col2.Transactions, dupTx) + + // Create a lock manager and context for testing + err := unittest.WithLock(t, lockManager, storage.LockInsertCollection, func(lctx lockctx.Context) error { + // insert col1 + _, err := collections.StoreAndIndexByTransaction(lctx, &col1) + require.NoError(t, err) + + // insert col2 + _, err = collections.StoreAndIndexByTransaction(lctx, &col2) + return err + }) + require.NoError(t, err) + + // should be able to retrieve col2 by ID + gotLightByCol2ID, err := collections.LightByID(col2.ID()) + require.NoError(t, err) + col2Light := col2.Light() + assert.Equal(t, col2Light, gotLightByCol2ID) + + // should be able to retrieve col2 by the transaction which only appears in col2 + _, err = collections.LightByTransactionID(col2Tx.ID()) + require.NoError(t, err) + + // col1 should be indexed by the shared transaction , + // since col1 is the first collection to be indexed by the shared transaction (dupTx) + gotLightByDupTxID, err := collections.LightByTransactionID(dupTx.ID()) + require.NoError(t, err) + col1Light := col1.Light() + assert.Equal(t, col1Light, gotLightByDupTxID) + }) +} + +// verify that when StoreAndIndexByTransaction is concurrently called with same tx and +// different collection both will succeed, and one of the collection will be indexed by the tx +func TestCollections_ConcurrentIndexByTx(t *testing.T) { + dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { + lockManager := storage.NewTestingLockManager() + metrics := metrics.NewNoopCollector() + transactions := store.NewTransactions(metrics, db) + collections := store.NewCollections(db, transactions) + + // Create two collections sharing the same transaction + const numCollections = 100 + + // Create collections sharing the same transaction + col1 := unittest.CollectionFixture(1) + col2 := unittest.CollectionFixture(1) + sharedTx := col1.Transactions[0] // The shared transaction + col2.Transactions[0] = sharedTx + + var wg sync.WaitGroup + errChan := make(chan error, 2*numCollections) + + // Insert col1 batch + wg.Add(1) + go func() { + defer wg.Done() + for i := 0; i < numCollections; i++ { + col := unittest.CollectionFixture(1) + col.Transactions[0] = sharedTx // Ensure it shares the same transaction + err := unittest.WithLock(t, lockManager, storage.LockInsertCollection, func(lctx lockctx.Context) error { + _, err := collections.StoreAndIndexByTransaction(lctx, &col) + return err + }) + errChan <- err + } + }() + + // Insert col2 batch + wg.Add(1) + go func() { + defer wg.Done() + for i := 0; i < numCollections; i++ { + col := unittest.CollectionFixture(1) + col.Transactions[0] = sharedTx // Ensure it shares the same transaction + err := unittest.WithLock(t, lockManager, storage.LockInsertCollection, func(lctx lockctx.Context) error { + _, err := collections.StoreAndIndexByTransaction(lctx, &col) + return err + }) + errChan <- err + } + }() + + wg.Wait() + close(errChan) + + // Ensure all operations succeeded + for err := range errChan { + require.NoError(t, err) + } + + // Verify that one of the collections is indexed by the shared transaction + indexedCollection, err := collections.LightByTransactionID(sharedTx.ID()) + require.NoError(t, err) + assert.True(t, indexedCollection.ID() == col1.ID() || indexedCollection.ID() == col2.ID(), "Expected one of the collections to be indexed") + }) +} diff --git a/storage/store/commits.go b/storage/store/commits.go new file mode 100644 index 00000000000..19ca4e1a78b --- /dev/null +++ b/storage/store/commits.go @@ -0,0 +1,76 @@ +package store + +import ( + "github.com/jordanschalm/lockctx" + + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module" + "github.com/onflow/flow-go/module/metrics" + "github.com/onflow/flow-go/storage" + "github.com/onflow/flow-go/storage/operation" +) + +type Commits struct { + db storage.DB + cache *Cache[flow.Identifier, flow.StateCommitment] +} + +var _ storage.Commits = (*Commits)(nil) + +func NewCommits(collector module.CacheMetrics, db storage.DB) *Commits { + + retrieve := func(r storage.Reader, blockID flow.Identifier) (flow.StateCommitment, error) { + var commit flow.StateCommitment + err := operation.LookupStateCommitment(r, blockID, &commit) + return commit, err + } + + remove := func(rw storage.ReaderBatchWriter, blockID flow.Identifier) error { + return operation.RemoveStateCommitment(rw.Writer(), blockID) + } + + c := &Commits{ + db: db, + cache: newCache(collector, metrics.ResourceCommit, + withLimit[flow.Identifier, flow.StateCommitment](1000), + withRetrieve(retrieve), + withRemove[flow.Identifier, flow.StateCommitment](remove), + ), + } + + return c +} + +func (c *Commits) retrieveTx(r storage.Reader, blockID flow.Identifier) (flow.StateCommitment, error) { + val, err := c.cache.Get(r, blockID) + if err != nil { + return flow.DummyStateCommitment, err + } + return val, nil +} + +// BatchStore stores Commit keyed by blockID in provided batch +// No errors are expected during normal operation. +// If the database unexpectedly fails to process the request, the error is wrapped in a generic error and returned. +func (c *Commits) BatchStore(lctx lockctx.Proof, blockID flow.Identifier, commit flow.StateCommitment, rw storage.ReaderBatchWriter) error { + // we can't cache while using batches, as it's unknown at this point when, and if + // the batch will be committed. Cache will be populated on read however. + return operation.IndexStateCommitment(lctx, rw, blockID, commit) +} + +func (c *Commits) ByBlockID(blockID flow.Identifier) (flow.StateCommitment, error) { + return c.retrieveTx(c.db.Reader(), blockID) +} + +func (c *Commits) RemoveByBlockID(blockID flow.Identifier) error { + return c.db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return c.BatchRemoveByBlockID(blockID, rw) + }) +} + +// BatchRemoveByBlockID removes Commit keyed by blockID in provided batch +// No errors are expected during normal operation, even if no entries are matched. +// If the database unexpectedly fails to process the request, the error is wrapped in a generic error and returned. +func (c *Commits) BatchRemoveByBlockID(blockID flow.Identifier, rw storage.ReaderBatchWriter) error { + return c.cache.RemoveTx(rw, blockID) +} diff --git a/storage/store/commits_test.go b/storage/store/commits_test.go new file mode 100644 index 00000000000..4fcc00f37e5 --- /dev/null +++ b/storage/store/commits_test.go @@ -0,0 +1,84 @@ +package store_test + +import ( + "errors" + "testing" + + "github.com/jordanschalm/lockctx" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/module/metrics" + "github.com/onflow/flow-go/storage" + "github.com/onflow/flow-go/utils/unittest" + + "github.com/onflow/flow-go/storage/operation/dbtest" + "github.com/onflow/flow-go/storage/store" +) + +// TestCommitsStoreAndRetrieve tests that a commit can be store1d, retrieved and attempted to be stored again without an error +func TestCommitsStoreAndRetrieve(t *testing.T) { + dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { + lockManager := storage.NewTestingLockManager() + metrics := metrics.NewNoopCollector() + store1 := store.NewCommits(metrics, db) + + // attempt to get a invalid commit + _, err := store1.ByBlockID(unittest.IdentifierFixture()) + assert.ErrorIs(t, err, storage.ErrNotFound) + + // store a commit in db + blockID := unittest.IdentifierFixture() + expected := unittest.StateCommitmentFixture() + err = unittest.WithLock(t, lockManager, storage.LockInsertOwnReceipt, func(lctx lockctx.Context) error { + return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return store1.BatchStore(lctx, blockID, expected, rw) + }) + }) + require.NoError(t, err) + + // retrieve the commit by ID + actual, err := store1.ByBlockID(blockID) + require.NoError(t, err) + assert.Equal(t, expected, actual) + + // re-insert the commit - should be idempotent + err = unittest.WithLock(t, lockManager, storage.LockInsertOwnReceipt, func(lctx lockctx.Context) error { + return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return store1.BatchStore(lctx, blockID, expected, rw) + }) + }) + require.NoError(t, err) + }) +} + +func TestCommitStoreAndRemove(t *testing.T) { + dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { + lockManager := storage.NewTestingLockManager() + metrics := metrics.NewNoopCollector() + store := store.NewCommits(metrics, db) + + // Create and store a commit + blockID := unittest.IdentifierFixture() + expected := unittest.StateCommitmentFixture() + err := unittest.WithLock(t, lockManager, storage.LockInsertOwnReceipt, func(lctx lockctx.Context) error { + return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return store.BatchStore(lctx, blockID, expected, rw) + }) + }) + require.NoError(t, err) + + // Ensure it exists + commit, err := store.ByBlockID(blockID) + require.NoError(t, err) + assert.Equal(t, expected, commit) + + // Remove it + err = store.RemoveByBlockID(blockID) + require.NoError(t, err) + + // Ensure it no longer exists + _, err = store.ByBlockID(blockID) + assert.True(t, errors.Is(err, storage.ErrNotFound)) + }) +} diff --git a/storage/store/computation_result.go b/storage/store/computation_result.go new file mode 100644 index 00000000000..82457244a12 --- /dev/null +++ b/storage/store/computation_result.go @@ -0,0 +1,46 @@ +package store + +import ( + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/storage" + "github.com/onflow/flow-go/storage/operation" +) + +type ComputationResultUploadStatus struct { + db storage.DB +} + +func NewComputationResultUploadStatus(db storage.DB) *ComputationResultUploadStatus { + return &ComputationResultUploadStatus{ + db: db, + } +} + +func (c *ComputationResultUploadStatus) Upsert(blockID flow.Identifier, + wasUploadCompleted bool) error { + return c.db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return operation.UpsertComputationResultUploadStatus(rw.Writer(), blockID, wasUploadCompleted) + }) +} + +func (c *ComputationResultUploadStatus) GetIDsByUploadStatus(targetUploadStatus bool) ([]flow.Identifier, error) { + ids := make([]flow.Identifier, 0) + err := operation.GetBlockIDsByStatus(c.db.Reader(), &ids, targetUploadStatus) + return ids, err +} + +func (c *ComputationResultUploadStatus) ByID(computationResultID flow.Identifier) (bool, error) { + var ret bool + err := operation.GetComputationResultUploadStatus(c.db.Reader(), computationResultID, &ret) + if err != nil { + return false, err + } + + return ret, nil +} + +func (c *ComputationResultUploadStatus) Remove(computationResultID flow.Identifier) error { + return c.db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return operation.RemoveComputationResultUploadStatus(rw.Writer(), computationResultID) + }) +} diff --git a/storage/store/computation_result_test.go b/storage/store/computation_result_test.go new file mode 100644 index 00000000000..af8ab154194 --- /dev/null +++ b/storage/store/computation_result_test.go @@ -0,0 +1,108 @@ +package store_test + +import ( + "reflect" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/engine/execution" + "github.com/onflow/flow-go/engine/execution/testutil" + "github.com/onflow/flow-go/storage" + "github.com/onflow/flow-go/storage/operation/dbtest" + "github.com/onflow/flow-go/storage/store" +) + +func TestUpsertAndRetrieveComputationResult(t *testing.T) { + dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { + expected := testutil.ComputationResultFixture(t) + crStorage := store.NewComputationResultUploadStatus(db) + crId := expected.ExecutableBlock.BlockID() + + // True case - upsert + testUploadStatus := true + err := crStorage.Upsert(crId, testUploadStatus) + require.NoError(t, err) + + actualUploadStatus, err := crStorage.ByID(crId) + require.NoError(t, err) + + assert.Equal(t, testUploadStatus, actualUploadStatus) + + // False case - update + testUploadStatus = false + err = crStorage.Upsert(crId, testUploadStatus) + require.NoError(t, err) + + actualUploadStatus, err = crStorage.ByID(crId) + require.NoError(t, err) + + assert.Equal(t, testUploadStatus, actualUploadStatus) + }) +} + +func TestRemoveComputationResults(t *testing.T) { + dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { + t.Run("Remove ComputationResult", func(t *testing.T) { + expected := testutil.ComputationResultFixture(t) + crId := expected.ExecutableBlock.BlockID() + crStorage := store.NewComputationResultUploadStatus(db) + + testUploadStatus := true + err := crStorage.Upsert(crId, testUploadStatus) + require.NoError(t, err) + + _, err = crStorage.ByID(crId) + require.NoError(t, err) + + err = crStorage.Remove(crId) + require.NoError(t, err) + + _, err = crStorage.ByID(crId) + assert.Error(t, err) + }) + }) +} + +func TestListComputationResults(t *testing.T) { + dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { + t.Run("List all ComputationResult with given status", func(t *testing.T) { + expected := [...]*execution.ComputationResult{ + testutil.ComputationResultFixture(t), + testutil.ComputationResultFixture(t), + } + crStorage := store.NewComputationResultUploadStatus(db) + + // Store a list of ComputationResult instances first + expectedIDs := make(map[string]bool, 0) + for _, cr := range expected { + crId := cr.ExecutableBlock.BlockID() + expectedIDs[crId.String()] = true + err := crStorage.Upsert(crId, true) + require.NoError(t, err) + } + // Add in entries with non-targeted status + unexpected := [...]*execution.ComputationResult{ + testutil.ComputationResultFixture(t), + testutil.ComputationResultFixture(t), + } + for _, cr := range unexpected { + crId := cr.ExecutableBlock.BlockID() + err := crStorage.Upsert(crId, false) + require.NoError(t, err) + } + + // Get the list of IDs for stored instances + crIDs, err := crStorage.GetIDsByUploadStatus(true) + require.NoError(t, err) + + crIDsStrMap := make(map[string]bool, 0) + for _, crID := range crIDs { + crIDsStrMap[crID.String()] = true + } + + assert.True(t, reflect.DeepEqual(crIDsStrMap, expectedIDs)) + }) + }) +} diff --git a/storage/store/consumer_progress.go b/storage/store/consumer_progress.go new file mode 100644 index 00000000000..f8bc131fb2c --- /dev/null +++ b/storage/store/consumer_progress.go @@ -0,0 +1,100 @@ +package store + +import ( + "errors" + "fmt" + "sync" + + "github.com/onflow/flow-go/storage" + "github.com/onflow/flow-go/storage/operation" +) + +// ConsumerProgressInitializer is a helper to initialize the consumer progress index in storage +// It prevents the consumer from being used before initialization +type ConsumerProgressInitializer struct { + // TODO(7355): lockctx + initing sync.Mutex + progress *consumerProgress +} + +var _ storage.ConsumerProgressInitializer = (*ConsumerProgressInitializer)(nil) + +func NewConsumerProgress(db storage.DB, consumer string) *ConsumerProgressInitializer { + progress := newConsumerProgress(db, consumer) + return &ConsumerProgressInitializer{ + progress: progress, + } +} + +func (cpi *ConsumerProgressInitializer) Initialize(defaultIndex uint64) (storage.ConsumerProgress, error) { + // making sure only one process is initializing at any time. + cpi.initing.Lock() + defer cpi.initing.Unlock() + + _, err := cpi.progress.ProcessedIndex() + if err != nil { + + // if not initialized, then initialize with default index + if !errors.Is(err, storage.ErrNotFound) { + return nil, fmt.Errorf("could not retrieve processed index: %w", err) + } + + err = cpi.progress.SetProcessedIndex(defaultIndex) + if err != nil { + return nil, fmt.Errorf("could not set processed index: %w", err) + } + } + + return cpi.progress, nil +} + +type consumerProgress struct { + db storage.DB + consumer string // to distinguish the consume progress between different consumers +} + +var _ storage.ConsumerProgress = (*consumerProgress)(nil) + +func newConsumerProgress(db storage.DB, consumer string) *consumerProgress { + return &consumerProgress{ + db: db, + consumer: consumer, + } +} + +// ProcessedIndex returns the processed index for the consumer +// No errors are expected during normal operation +func (cp *consumerProgress) ProcessedIndex() (uint64, error) { + var processed uint64 + err := operation.RetrieveProcessedIndex(cp.db.Reader(), cp.consumer, &processed) + if err != nil { + return 0, fmt.Errorf("failed to retrieve processed index: %w", err) + } + return processed, nil +} + +// SetProcessedIndex updates the processed index for the consumer +// The caller must use ConsumerProgressInitializer to initialize the progress index in storage +// No errors are expected during normal operation +func (cp *consumerProgress) SetProcessedIndex(processed uint64) error { + err := cp.db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return operation.SetProcessedIndex(rw.Writer(), cp.consumer, processed) + }) + if err != nil { + return fmt.Errorf("could not update processed index: %w", err) + } + + return nil +} + +// BatchSetProcessedIndex updates the processed index for the consumer within a batch operation +// The caller must use ConsumerProgressInitializer to initialize the progress index in storage +// No errors are expected during normal operation +func (cp *consumerProgress) BatchSetProcessedIndex(processed uint64, batch storage.ReaderBatchWriter) error { + err := operation.SetProcessedIndex(batch.Writer(), cp.consumer, processed) + if err != nil { + return fmt.Errorf("could not add processed index update to batch: %w", err) + } + + return nil +} diff --git a/storage/store/consumer_progress_test.go b/storage/store/consumer_progress_test.go new file mode 100644 index 00000000000..fa5171959b5 --- /dev/null +++ b/storage/store/consumer_progress_test.go @@ -0,0 +1,83 @@ +package store + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/storage" + "github.com/onflow/flow-go/storage/operation/dbtest" +) + +func TestConsumerProgressInitializer(t *testing.T) { + dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { + t.Run("Initialize with default index", func(t *testing.T) { + cpi := NewConsumerProgress(db, "test_consumer1") + progress, err := cpi.Initialize(100) + require.NoError(t, err) + + index, err := progress.ProcessedIndex() + require.NoError(t, err) + assert.Equal(t, uint64(100), index) + }) + + t.Run("Initialize when already initialized", func(t *testing.T) { + cpi := NewConsumerProgress(db, "test_consumer2") + + // First initialization + _, err := cpi.Initialize(100) + require.NoError(t, err) + + // Second initialization with different index + progress, err := cpi.Initialize(200) + require.NoError(t, err) + + // Should still return the original index + index, err := progress.ProcessedIndex() + require.NoError(t, err) + assert.Equal(t, uint64(100), index) + }) + + t.Run("SetProcessedIndex and ProcessedIndex", func(t *testing.T) { + cpi := NewConsumerProgress(db, "test_consumer3") + progress, err := cpi.Initialize(100) + require.NoError(t, err) + + err = progress.SetProcessedIndex(150) + require.NoError(t, err) + + index, err := progress.ProcessedIndex() + require.NoError(t, err) + assert.Equal(t, uint64(150), index) + }) + }) +} + +func TestConsumerProgressBatchSet(t *testing.T) { + dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { + t.Run("BatchSetProcessedIndex and ProcessedIndex", func(t *testing.T) { + cpi := NewConsumerProgress(db, "test_consumer") + progress, err := cpi.Initialize(100) + require.NoError(t, err) + + err = db.WithReaderBatchWriter(func(r storage.ReaderBatchWriter) error { + err := progress.BatchSetProcessedIndex(150, r) + require.NoError(t, err) + + // Verify the index is not set until batch is committed + index, err := progress.ProcessedIndex() + require.NoError(t, err) + assert.Equal(t, uint64(100), index) + + return nil + }) + require.NoError(t, err) + + // Verify the index was updated after batch commit + index, err := progress.ProcessedIndex() + require.NoError(t, err) + assert.Equal(t, uint64(150), index) + }) + }) +} diff --git a/storage/store/epoch_commits.go b/storage/store/epoch_commits.go new file mode 100644 index 00000000000..bb7256bf767 --- /dev/null +++ b/storage/store/epoch_commits.go @@ -0,0 +1,58 @@ +package store + +import ( + "fmt" + + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module" + "github.com/onflow/flow-go/module/metrics" + "github.com/onflow/flow-go/storage" + "github.com/onflow/flow-go/storage/operation" +) + +type EpochCommits struct { + db storage.DB + cache *Cache[flow.Identifier, *flow.EpochCommit] +} + +func NewEpochCommits(collector module.CacheMetrics, db storage.DB) *EpochCommits { + + store := func(rw storage.ReaderBatchWriter, id flow.Identifier, commit *flow.EpochCommit) error { + return operation.InsertEpochCommit(rw.Writer(), id, commit) + } + + retrieve := func(r storage.Reader, id flow.Identifier) (*flow.EpochCommit, error) { + var commit flow.EpochCommit + err := operation.RetrieveEpochCommit(r, id, &commit) + return &commit, err + } + + ec := &EpochCommits{ + db: db, + cache: newCache[flow.Identifier, *flow.EpochCommit](collector, metrics.ResourceEpochCommit, + withLimit[flow.Identifier, *flow.EpochCommit](4*flow.DefaultTransactionExpiry), + withStore(store), + withRetrieve(retrieve)), + } + + return ec +} + +func (ec *EpochCommits) BatchStore(rw storage.ReaderBatchWriter, commit *flow.EpochCommit) error { + return ec.cache.PutTx(rw, commit.ID(), commit) +} + +func (ec *EpochCommits) retrieveTx(commitID flow.Identifier) (*flow.EpochCommit, error) { + val, err := ec.cache.Get(ec.db.Reader(), commitID) + if err != nil { + return nil, fmt.Errorf("could not retrieve EpochCommit event with id %x: %w", commitID, err) + } + return val, nil +} + +// ByID will return the EpochCommit event by its ID. +// Error returns: +// * storage.ErrNotFound if no EpochCommit with the ID exists +func (ec *EpochCommits) ByID(commitID flow.Identifier) (*flow.EpochCommit, error) { + return ec.retrieveTx(commitID) +} diff --git a/storage/store/epoch_commits_test.go b/storage/store/epoch_commits_test.go new file mode 100644 index 00000000000..eddb2e72904 --- /dev/null +++ b/storage/store/epoch_commits_test.go @@ -0,0 +1,44 @@ +package store_test + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/module/metrics" + "github.com/onflow/flow-go/storage" + "github.com/onflow/flow-go/storage/operation/dbtest" + "github.com/onflow/flow-go/storage/store" + "github.com/onflow/flow-go/utils/unittest" +) + +// TestEpochCommitStoreAndRetrieve tests that a commit can be sd, retrieved and attempted to be stored again without an error +func TestEpochCommitStoreAndRetrieve(t *testing.T) { + dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { + metrics := metrics.NewNoopCollector() + s := store.NewEpochCommits(metrics, db) + + // attempt to get a invalid commit + _, err := s.ByID(unittest.IdentifierFixture()) + assert.ErrorIs(t, err, storage.ErrNotFound) + + // store a commit in db + expected := unittest.EpochCommitFixture() + err = db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return s.BatchStore(rw, expected) + }) + require.NoError(t, err) + + // retrieve the commit by ID + actual, err := s.ByID(expected.ID()) + require.NoError(t, err) + assert.Equal(t, expected, actual) + + // test storing same epoch commit + err = db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return s.BatchStore(rw, expected) + }) + require.NoError(t, err) + }) +} diff --git a/storage/store/epoch_protocol_state.go b/storage/store/epoch_protocol_state.go new file mode 100644 index 00000000000..5944b1af344 --- /dev/null +++ b/storage/store/epoch_protocol_state.go @@ -0,0 +1,257 @@ +package store + +import ( + "fmt" + + "github.com/jordanschalm/lockctx" + + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module" + "github.com/onflow/flow-go/module/irrecoverable" + "github.com/onflow/flow-go/module/metrics" + "github.com/onflow/flow-go/storage" + "github.com/onflow/flow-go/storage/operation" +) + +// DefaultEpochProtocolStateCacheSize is the default size for primary epoch protocol state entry cache. +// Minimally, we have 3 entries per epoch (one on epoch Switchover, one on receiving the Epoch Setup and one when seeing the Epoch Commit event). +// Let's be generous and assume we have 20 different epoch state entries per epoch. +var DefaultEpochProtocolStateCacheSize uint = 20 + +// DefaultProtocolStateIndexCacheSize is the default value for secondary byBlockIdCache. +// We want to be able to cover a broad interval of views without cache misses, so we use a bigger value. +var DefaultProtocolStateIndexCacheSize uint = 1000 + +// EpochProtocolStateEntries implements a persistent, fork-aware storage for the Epoch-related +// sub-states of the overall of the overall Protocol State (KV Store). It uses an embedded cache +// which is populated on first retrieval to speed up access to frequently used epoch sub-state. +type EpochProtocolStateEntries struct { + db storage.DB + + // cache is essentially an in-memory map from `MinEpochStateEntry.ID()` -> `RichEpochStateEntry` + // We do _not_ populate this cache which holds the RichEpochStateEntry's on store. This is because + // (i) we don't have the RichEpochStateEntry on store readily available and + // (ii) new RichEpochStateEntry are really rare throughout an epoch, so the total cost of populating + // the cache becomes negligible over several views. + // In the future, we might want to populate the cache on store, if we want to maintain frequently-changing + // information in the protocol state, like the latest sealed block. This should be a smaller amount of work, + // because the `MinEpochStateEntry` is generated by `StateMutator.Build()`. The `StateMutator` should already + // have the needed Epoch Setup and Commit events, since it starts with a RichEpochStateEntry for the parent + // state and consumes Epoch Setup and Epoch Commit events. Though, we leave this optimization for later. + // + // `cache` only holds the distinct state entries. On the happy path, we expect something like 3 entries per epoch. + // On the optimal happy path we have 3 entries per epoch: one entry on epoch Switchover, one on receiving the Epoch Setup + // and one when seeing the Epoch Commit event. Let's be generous and assume we have 20 different state entries per epoch. + // Beyond that, we are certainly leaving the domain of normal operations that we optimize for. Therefore, a cache size of + // roughly 100 is a reasonable balance between performance and memory consumption. + cache *Cache[flow.Identifier, *flow.RichEpochStateEntry] + + // byBlockIdCache is essentially an in-memory map from `Block.ID()` -> `MinEpochStateEntry.ID()`. The full + // flow.RichEpochStateEntry can be retrieved from the `cache` above. + // We populate the `byBlockIdCache` on store, because a new entry is added for every block and we probably also + // query the Protocol state for every block. So argument (ii) from above does not apply here. Furthermore, + // argument (i) from above also does not apply, because we already have the state entry's ID on store, + // so populating the cache is easy. + // + // `byBlockIdCache` will contain an entry for every block. We want to be able to cover a broad interval of views + // without cache misses, so a cache size of roughly 1000 entries is reasonable. + byBlockIdCache *Cache[flow.Identifier, flow.Identifier] +} + +var _ storage.EpochProtocolStateEntries = (*EpochProtocolStateEntries)(nil) + +// NewEpochProtocolStateEntries creates a EpochProtocolStateEntries instance, which stores a subset of the +// state stored by the Dynamic Protocol State. +// It supports storing, caching and retrieving by ID or the additionally indexed block ID. +func NewEpochProtocolStateEntries(collector module.CacheMetrics, + epochSetups storage.EpochSetups, + epochCommits storage.EpochCommits, + db storage.DB, + stateCacheSize uint, + stateByBlockIDCacheSize uint, +) *EpochProtocolStateEntries { + retrieveByEntryID := func(r storage.Reader, epochProtocolStateEntryID flow.Identifier) (*flow.RichEpochStateEntry, error) { + var entry flow.MinEpochStateEntry + err := operation.RetrieveEpochProtocolState(r, epochProtocolStateEntryID, &entry) + if err != nil { + return nil, err + } + result, err := newRichEpochProtocolStateEntry(&entry, epochSetups, epochCommits) + if err != nil { + return nil, fmt.Errorf("could not create RichEpochStateEntry: %w", err) + } + return result, nil + } + + storeByBlockID := func(lctx lockctx.Proof, rw storage.ReaderBatchWriter, blockID flow.Identifier, epochProtocolStateEntryID flow.Identifier) error { + // CAUTION: requires [storage.LockInsertBlock] and for the caller to ensure that they are not overwriting existing data. + err := operation.IndexEpochProtocolState(lctx, rw.Writer(), blockID, epochProtocolStateEntryID) + if err != nil { + return fmt.Errorf("could not index EpochProtocolState for block (%x): %w", blockID[:], err) + } + return nil + } + + retrieveByBlockID := func(r storage.Reader, blockID flow.Identifier) (flow.Identifier, error) { + var entryID flow.Identifier + err := operation.LookupEpochProtocolState(r, blockID, &entryID) + if err != nil { + return flow.ZeroID, fmt.Errorf("could not lookup epoch protocol state entry ID for block (%x): %w", blockID[:], err) + } + return entryID, nil + } + + return &EpochProtocolStateEntries{ + db: db, + cache: newCache(collector, metrics.ResourceProtocolState, + withLimit[flow.Identifier, *flow.RichEpochStateEntry](stateCacheSize), + withStore(noopStore[flow.Identifier, *flow.RichEpochStateEntry]), + withRetrieve(retrieveByEntryID)), + byBlockIdCache: newCache(collector, metrics.ResourceProtocolStateByBlockID, + withLimit[flow.Identifier, flow.Identifier](stateByBlockIDCacheSize), + withStoreWithLock(storeByBlockID), + withRetrieve(retrieveByBlockID)), + } +} + +// BatchStore persists the given epoch protocol state entry as part of a DB batch. Per convention, the identities in +// the flow.MinEpochStateEntry must be in canonical order for the current and next epoch (if present), otherwise an +// exception is returned. +// +// CAUTION: The caller must ensure `epochProtocolStateID` is a collision-resistant hash of the provided +// `epochProtocolStateEntry`! This method silently overrides existing data, which is safe only if for the same +// key, we always write the same value. +// +// No errors are expected during normal operation. +func (s *EpochProtocolStateEntries) BatchStore(w storage.Writer, epochProtocolStateEntryID flow.Identifier, epochStateEntry *flow.MinEpochStateEntry) error { + // sanity checks: + if !epochStateEntry.CurrentEpoch.ActiveIdentities.Sorted(flow.IdentifierCanonical) { + return fmt.Errorf("sanity check failed: identities are not sorted") + } + if epochStateEntry.NextEpoch != nil && !epochStateEntry.NextEpoch.ActiveIdentities.Sorted(flow.IdentifierCanonical) { + return fmt.Errorf("sanity check failed: next epoch identities are not sorted") + } + + // happy path: add storage operation of the state entry to the batch + return operation.InsertEpochProtocolState(w, epochProtocolStateEntryID, epochStateEntry) +} + +// BatchIndex persists the specific map entry in the node's database. +// In a nutshell, we want to maintain a map from `blockID` to `epochStateEntry`, where `blockID` references the +// block that _proposes_ the referenced epoch protocol state entry. +// Protocol convention: +// - Consider block B, whose ingestion might potentially lead to an updated protocol state. For example, +// the protocol state changes if we seal some execution results emitting service events. +// - For the key `blockID`, we use the identity of block B which _proposes_ this Protocol State. As value, +// the hash of the resulting protocol state at the end of processing B is to be used. +// - CAUTION: The protocol state requires confirmation by a QC and will only become active at the child block, +// _after_ validating the QC. +// +// No errors are expected during normal operation. +func (s *EpochProtocolStateEntries) BatchIndex(lctx lockctx.Proof, rw storage.ReaderBatchWriter, blockID flow.Identifier, epochProtocolStateEntryID flow.Identifier) error { + return s.byBlockIdCache.PutWithLockTx(lctx, rw, blockID, epochProtocolStateEntryID) +} + +// ByID returns the epoch protocol state entry by its ID. +// Expected errors during normal operations: +// - storage.ErrNotFound if no protocol state with the given Identifier is known. +func (s *EpochProtocolStateEntries) ByID(epochProtocolStateEntryID flow.Identifier) (*flow.RichEpochStateEntry, error) { + return s.cache.Get(s.db.Reader(), epochProtocolStateEntryID) +} + +// ByBlockID retrieves the epoch protocol state entry that the block with the given ID proposes. +// CAUTION: this protocol state requires confirmation by a QC and will only become active at the child block, +// _after_ validating the QC. Protocol convention: +// - Consider block B, whose ingestion might potentially lead to an updated protocol state. For example, +// the protocol state changes if we seal some execution results emitting service events. +// - For the key `blockID`, we use the identity of block B which _proposes_ this Protocol State. As value, +// the hash of the resulting protocol state at the end of processing B is to be used. +// - CAUTION: The protocol state requires confirmation by a QC and will only become active at the child block, +// _after_ validating the QC. +// +// Expected errors during normal operations: +// - storage.ErrNotFound if no state entry has been indexed for the given block. +func (s *EpochProtocolStateEntries) ByBlockID(blockID flow.Identifier) (*flow.RichEpochStateEntry, error) { + epochProtocolStateEntryID, err := s.byBlockIdCache.Get(s.db.Reader(), blockID) + if err != nil { + return nil, fmt.Errorf("could not lookup epoch protocol state ID for block (%x): %w", blockID[:], err) + } + return s.cache.Get(s.db.Reader(), epochProtocolStateEntryID) +} + +// newRichEpochProtocolStateEntry constructs a RichEpochStateEntry from an epoch sub-state entry. +// It queries and fills in epoch setups and commits for previous and current epochs and possibly next epoch. +// No errors are expected during normal operation. +func newRichEpochProtocolStateEntry( + minEpochStateEntry *flow.MinEpochStateEntry, + setups storage.EpochSetups, + commits storage.EpochCommits, +) (*flow.RichEpochStateEntry, error) { + var ( + previousEpochSetup *flow.EpochSetup + previousEpochCommit *flow.EpochCommit + nextEpochSetup *flow.EpochSetup + nextEpochCommit *flow.EpochCommit + err error + ) + // query and fill in epoch setups and commits for previous and current epochs + if minEpochStateEntry.PreviousEpoch != nil { + previousEpochSetup, err = setups.ByID(minEpochStateEntry.PreviousEpoch.SetupID) + if err != nil { + return nil, fmt.Errorf("could not retrieve previous epoch setup: %w", err) + } + previousEpochCommit, err = commits.ByID(minEpochStateEntry.PreviousEpoch.CommitID) + if err != nil { + return nil, fmt.Errorf("could not retrieve previous epoch commit: %w", err) + } + } + + currentEpochSetup, err := setups.ByID(minEpochStateEntry.CurrentEpoch.SetupID) + if err != nil { + return nil, fmt.Errorf("could not retrieve current epoch setup: %w", err) + } + currentEpochCommit, err := commits.ByID(minEpochStateEntry.CurrentEpoch.CommitID) + if err != nil { + return nil, fmt.Errorf("could not retrieve current epoch commit: %w", err) + } + + // if next epoch has been set up, fill in data for it as well + nextEpoch := minEpochStateEntry.NextEpoch + if nextEpoch != nil { + nextEpochSetup, err = setups.ByID(nextEpoch.SetupID) + if err != nil { + return nil, fmt.Errorf("could not retrieve next epoch's setup event: %w", err) + } + if nextEpoch.CommitID != flow.ZeroID { + nextEpochCommit, err = commits.ByID(nextEpoch.CommitID) + if err != nil { + return nil, fmt.Errorf("could not retrieve next epoch's commit event: %w", err) + } + } + } + + epochStateEntry, err := flow.NewEpochStateEntry( + flow.UntrustedEpochStateEntry{ + MinEpochStateEntry: minEpochStateEntry, + PreviousEpochSetup: previousEpochSetup, + PreviousEpochCommit: previousEpochCommit, + CurrentEpochSetup: currentEpochSetup, + CurrentEpochCommit: currentEpochCommit, + NextEpochSetup: nextEpochSetup, + NextEpochCommit: nextEpochCommit, + }, + ) + if err != nil { + // observing an error here would be an indication of severe data corruption or bug in our code since + // all data should be available and correctly structured at this point. + return nil, irrecoverable.NewExceptionf("critical failure while instantiating EpochStateEntry: %w", err) + } + + result, err := flow.NewRichEpochStateEntry(epochStateEntry) + if err != nil { + // observing an error here would be an indication of severe data corruption or bug in our code since + // all data should be available and correctly structured at this point. + return nil, irrecoverable.NewExceptionf("critical failure while constructing RichEpochStateEntry from EpochStateEntry: %w", err) + } + return result, nil +} diff --git a/storage/store/epoch_protocol_state_test.go b/storage/store/epoch_protocol_state_test.go new file mode 100644 index 00000000000..512ea499e5d --- /dev/null +++ b/storage/store/epoch_protocol_state_test.go @@ -0,0 +1,283 @@ +package store + +import ( + "testing" + + "github.com/jordanschalm/lockctx" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/model/flow/mapfunc" + "github.com/onflow/flow-go/module/metrics" + "github.com/onflow/flow-go/storage" + "github.com/onflow/flow-go/storage/operation/dbtest" + "github.com/onflow/flow-go/utils/unittest" +) + +// TestProtocolStateStorage tests if the protocol state is sd, retrieved and indexed correctly +func TestProtocolStateStorage(t *testing.T) { + lockManager := storage.NewTestingLockManager() + dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { + metrics := metrics.NewNoopCollector() + + setups := NewEpochSetups(metrics, db) + commits := NewEpochCommits(metrics, db) + s := NewEpochProtocolStateEntries(metrics, setups, commits, db, DefaultEpochProtocolStateCacheSize, DefaultProtocolStateIndexCacheSize) + + expected := unittest.EpochStateFixture(unittest.WithNextEpochProtocolState()) + protocolStateID := expected.ID() + blockID := unittest.IdentifierFixture() + + // store protocol state and auxiliary info + require.NoError(t, unittest.WithLock(t, lockManager, storage.LockInsertBlock, func(lctx lockctx.Context) error { + return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + // store epoch events to be able to retrieve them later + err := setups.BatchStore(rw, expected.PreviousEpochSetup) + require.NoError(t, err) + err = setups.BatchStore(rw, expected.CurrentEpochSetup) + require.NoError(t, err) + err = setups.BatchStore(rw, expected.NextEpochSetup) + require.NoError(t, err) + err = commits.BatchStore(rw, expected.PreviousEpochCommit) + require.NoError(t, err) + err = commits.BatchStore(rw, expected.CurrentEpochCommit) + require.NoError(t, err) + err = commits.BatchStore(rw, expected.NextEpochCommit) + require.NoError(t, err) + + err = s.BatchStore(rw.Writer(), protocolStateID, expected.MinEpochStateEntry) + require.NoError(t, err) + return s.BatchIndex(lctx, rw, blockID, protocolStateID) + }) + })) + + // fetch protocol state + actual, err := s.ByID(protocolStateID) + require.NoError(t, err) + require.Equal(t, expected, actual) + + assertRichProtocolStateValidity(t, actual) + + // fetch protocol state by block ID + actualByBlockID, err := s.ByBlockID(blockID) + require.NoError(t, err) + require.Equal(t, expected, actualByBlockID) + + assertRichProtocolStateValidity(t, actualByBlockID) + }) +} + +// TestProtocolStateStoreInvalidProtocolState tests that storing protocol state which has unsorted identities fails for +// current and next epoch protocol states. +func TestProtocolStateStoreInvalidProtocolState(t *testing.T) { + dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { + metrics := metrics.NewNoopCollector() + setups := NewEpochSetups(metrics, db) + commits := NewEpochCommits(metrics, db) + s := NewEpochProtocolStateEntries(metrics, setups, commits, db, DefaultEpochProtocolStateCacheSize, DefaultProtocolStateIndexCacheSize) + invalid := unittest.EpochStateFixture().MinEpochStateEntry + // swap first and second elements to break canonical order + invalid.CurrentEpoch.ActiveIdentities[0], invalid.CurrentEpoch.ActiveIdentities[1] = invalid.CurrentEpoch.ActiveIdentities[1], invalid.CurrentEpoch.ActiveIdentities[0] + + err := db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return s.BatchStore(rw.Writer(), invalid.ID(), invalid) + }) + require.Error(t, err) + + invalid = unittest.EpochStateFixture(unittest.WithNextEpochProtocolState()).MinEpochStateEntry + // swap first and second elements to break canonical order + invalid.NextEpoch.ActiveIdentities[0], invalid.NextEpoch.ActiveIdentities[1] = invalid.NextEpoch.ActiveIdentities[1], invalid.NextEpoch.ActiveIdentities[0] + + err = db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return s.BatchStore(rw.Writer(), invalid.ID(), invalid) + }) + require.Error(t, err) + }) +} + +// TestProtocolStateMergeParticipants tests that merging participants between epochs works correctly. We always take participants +// from current epoch and additionally add participants from previous epoch if they are not present in current epoch. +// If the same participant is in the previous and current epochs, we should see it only once in the merged list and the dynamic portion has to be from current epoch. +func TestProtocolStateMergeParticipants(t *testing.T) { + dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { + metrics := metrics.NewNoopCollector() + + setups := NewEpochSetups(metrics, db) + commits := NewEpochCommits(metrics, db) + s := NewEpochProtocolStateEntries(metrics, setups, commits, db, DefaultEpochProtocolStateCacheSize, DefaultProtocolStateIndexCacheSize) + + stateEntry := unittest.EpochStateFixture() + // change address of participant in current epoch, so we can distinguish it from the one in previous epoch + // when performing assertion. + newAddress := "123" + nodeID := stateEntry.CurrentEpochSetup.Participants[1].NodeID + stateEntry.CurrentEpochSetup.Participants[1].Address = newAddress + stateEntry.CurrentEpoch.SetupID = stateEntry.CurrentEpochSetup.ID() + identity, _ := stateEntry.CurrentEpochIdentityTable.ByNodeID(nodeID) + identity.Address = newAddress + protocolStateID := stateEntry.ID() + + // store protocol state and auxiliary info + err := db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + // store epoch events to be able to retrieve them later + err := setups.BatchStore(rw, stateEntry.PreviousEpochSetup) + require.NoError(t, err) + err = setups.BatchStore(rw, stateEntry.CurrentEpochSetup) + require.NoError(t, err) + err = commits.BatchStore(rw, stateEntry.PreviousEpochCommit) + require.NoError(t, err) + err = commits.BatchStore(rw, stateEntry.CurrentEpochCommit) + require.NoError(t, err) + + return s.BatchStore(rw.Writer(), protocolStateID, stateEntry.MinEpochStateEntry) + }) + require.NoError(t, err) + + // fetch protocol state + actual, err := s.ByID(protocolStateID) + require.NoError(t, err) + require.Equal(t, stateEntry, actual) + + assertRichProtocolStateValidity(t, actual) + identity, ok := actual.CurrentEpochIdentityTable.ByNodeID(nodeID) + require.True(t, ok) + require.Equal(t, newAddress, identity.Address) + }) +} + +// TestProtocolStateRootSnapshot tests that storing and retrieving root protocol state (in case of bootstrap) works as expected. +// Specifically, this means that no prior epoch exists (situation after a spork) from the perspective of the freshly-sporked network. +func TestProtocolStateRootSnapshot(t *testing.T) { + lockManager := storage.NewTestingLockManager() + dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { + metrics := metrics.NewNoopCollector() + + setups := NewEpochSetups(metrics, db) + commits := NewEpochCommits(metrics, db) + s := NewEpochProtocolStateEntries(metrics, setups, commits, db, DefaultEpochProtocolStateCacheSize, DefaultProtocolStateIndexCacheSize) + expected := unittest.RootEpochProtocolStateFixture() + + protocolStateID := expected.ID() + blockID := unittest.IdentifierFixture() + + // store protocol state and auxiliary info + require.NoError(t, unittest.WithLock(t, lockManager, storage.LockInsertBlock, func(lctx lockctx.Context) error { + return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + // store epoch events to be able to retrieve them later + err := setups.BatchStore(rw, expected.CurrentEpochSetup) + require.NoError(t, err) + err = commits.BatchStore(rw, expected.CurrentEpochCommit) + require.NoError(t, err) + + err = s.BatchStore(rw.Writer(), protocolStateID, expected.MinEpochStateEntry) + require.NoError(t, err) + return s.BatchIndex(lctx, rw, blockID, protocolStateID) + }) + })) + + // fetch protocol state + actual, err := s.ByID(protocolStateID) + require.NoError(t, err) + require.Equal(t, expected, actual) + + assertRichProtocolStateValidity(t, actual) + + // fetch protocol state by block ID + actualByBlockID, err := s.ByBlockID(blockID) + require.NoError(t, err) + require.Equal(t, expected, actualByBlockID) + + assertRichProtocolStateValidity(t, actualByBlockID) + }) +} + +// assertRichProtocolStateValidity checks if RichProtocolState holds its invariant and is correctly populated by storage layer. +func assertRichProtocolStateValidity(t *testing.T, state *flow.RichEpochStateEntry) { + // invariants: + // - CurrentEpochSetup and CurrentEpochCommit are for the same epoch. Never nil. + // - CurrentEpochSetup and CurrentEpochCommit IDs match respective commitments in the `MinEpochStateEntry`. + assert.Equal(t, state.CurrentEpochSetup.Counter, state.CurrentEpochCommit.Counter, "current epoch setup and commit should be for the same epoch") + assert.Equal(t, state.CurrentEpochSetup.ID(), state.MinEpochStateEntry.CurrentEpoch.SetupID, "epoch setup should be for correct event ID") + assert.Equal(t, state.CurrentEpochCommit.ID(), state.MinEpochStateEntry.CurrentEpoch.CommitID, "epoch commit should be for correct event ID") + + var ( + previousEpochParticipants flow.IdentityList + err error + ) + // invariant: PreviousEpochSetup and PreviousEpochCommit should be present if respective ID is not zero. + if state.PreviousEpoch != nil { + // invariant: PreviousEpochSetup and PreviousEpochCommit are for the same epoch. Never nil. + assert.Equal(t, state.PreviousEpochSetup.Counter+1, state.CurrentEpochSetup.Counter, "current epoch (%d) should be following right after previous epoch (%d)", state.CurrentEpochSetup.Counter, state.PreviousEpochSetup.Counter) + assert.Equal(t, state.PreviousEpochSetup.Counter, state.PreviousEpochCommit.Counter, "previous epoch setup and commit should be for the same epoch") + + // invariant: PreviousEpochSetup and PreviousEpochCommit IDs are the equal to the ID of the protocol state entry. Never nil. + assert.Equal(t, state.PreviousEpochSetup.ID(), state.MinEpochStateEntry.PreviousEpoch.SetupID, "epoch setup should be for correct event ID") + assert.Equal(t, state.PreviousEpochCommit.ID(), state.MinEpochStateEntry.PreviousEpoch.CommitID, "epoch commit should be for correct event ID") + + // invariant: ComposeFullIdentities ensures that we can build full identities of previous epoch's active participants. This step also confirms that the + // previous epoch's `Participants` [IdentitySkeletons] and `ActiveIdentities` [DynamicIdentity properties] list the same nodes in canonical ordering. + previousEpochParticipants, err = flow.ComposeFullIdentities( + state.PreviousEpochSetup.Participants, + state.PreviousEpoch.ActiveIdentities, + flow.EpochParticipationStatusActive, + ) + assert.NoError(t, err, "should be able to reconstruct previous epoch active participants") + // Function `ComposeFullIdentities` verified that `Participants` and `ActiveIdentities` have identical ordering w.r.t nodeID. + // By construction, `participantsFromCurrentEpochSetup` lists the full Identities in the same ordering as `Participants` and + // `ActiveIdentities`. By confirming that `participantsFromCurrentEpochSetup` follows canonical ordering, we can conclude that + // also `Participants` and `ActiveIdentities` are canonically ordered. + require.True(t, previousEpochParticipants.Sorted(flow.Canonical[flow.Identity]), "participants in previous epoch's setup event are not in canonical order") + } + + // invariant: ComposeFullIdentities ensures that we can build full identities of current epoch's *active* participants. This step also confirms that the + // current epoch's `Participants` [IdentitySkeletons] and `ActiveIdentities` [DynamicIdentity properties] list the same nodes in canonical ordering. + participantsFromCurrentEpochSetup, err := flow.ComposeFullIdentities( + state.CurrentEpochSetup.Participants, + state.CurrentEpoch.ActiveIdentities, + flow.EpochParticipationStatusActive, + ) + assert.NoError(t, err, "should be able to reconstruct current epoch active participants") + require.True(t, participantsFromCurrentEpochSetup.Sorted(flow.Canonical[flow.Identity]), "participants in current epoch's setup event are not in canonical order") + + // invariants for `CurrentEpochIdentityTable`: + // - full identity table containing *active* nodes for the current epoch + weight-zero identities of adjacent epoch + // - Identities are sorted in canonical order. Without duplicates. Never nil. + var allIdentities, participantsFromNextEpochSetup flow.IdentityList + if state.NextEpoch != nil { + // setup/commit phase + // invariant: ComposeFullIdentities ensures that we can build full identities of next epoch's *active* participants. This step also confirms that the + // next epoch's `Participants` [IdentitySkeletons] and `ActiveIdentities` [DynamicIdentity properties] list the same nodes in canonical ordering. + participantsFromNextEpochSetup, err = flow.ComposeFullIdentities( + state.NextEpochSetup.Participants, + state.NextEpoch.ActiveIdentities, + flow.EpochParticipationStatusActive, + ) + assert.NoError(t, err, "should be able to reconstruct next epoch active participants") + allIdentities = participantsFromCurrentEpochSetup.Union(participantsFromNextEpochSetup.Copy().Map(mapfunc.WithEpochParticipationStatus(flow.EpochParticipationStatusJoining))) + } else { + // staking phase + allIdentities = participantsFromCurrentEpochSetup.Union(previousEpochParticipants.Copy().Map(mapfunc.WithEpochParticipationStatus(flow.EpochParticipationStatusLeaving))) + } + assert.Equal(t, allIdentities, state.CurrentEpochIdentityTable, "identities should be a full identity table for the current epoch, without duplicates") + require.True(t, allIdentities.Sorted(flow.Canonical[flow.Identity]), "current epoch's identity table is not in canonical order") + + // check next epoch; only applicable during setup/commit phase + if state.NextEpoch == nil { // during staking phase, next epoch is not yet specified; hence there is nothing else to check + return + } + + // invariants: + // - NextEpochSetup and NextEpochCommit are for the same epoch. Never nil. + // - NextEpochSetup and NextEpochCommit IDs match respective commitments in the `MinEpochStateEntry`. + assert.Equal(t, state.CurrentEpochSetup.Counter+1, state.NextEpochSetup.Counter, "next epoch (%d) should be following right after current epoch (%d)", state.NextEpochSetup.Counter, state.CurrentEpochSetup.Counter) + assert.Equal(t, state.NextEpochSetup.Counter, state.NextEpochCommit.Counter, "next epoch setup and commit should be for the same epoch") + assert.Equal(t, state.NextEpochSetup.ID(), state.NextEpoch.SetupID, "epoch setup should be for correct event ID") + assert.Equal(t, state.NextEpochCommit.ID(), state.NextEpoch.CommitID, "epoch commit should be for correct event ID") + + // invariants for `NextEpochIdentityTable`: + // - full identity table containing *active* nodes for next epoch + weight-zero identities of current epoch + // - Identities are sorted in canonical order. Without duplicates. Never nil. + allIdentities = participantsFromNextEpochSetup.Union(participantsFromCurrentEpochSetup.Copy().Map(mapfunc.WithEpochParticipationStatus(flow.EpochParticipationStatusLeaving))) + assert.Equal(t, allIdentities, state.NextEpochIdentityTable, "identities should be a full identity table for the next epoch, without duplicates") +} diff --git a/storage/store/epoch_setups.go b/storage/store/epoch_setups.go new file mode 100644 index 00000000000..56048c58d43 --- /dev/null +++ b/storage/store/epoch_setups.go @@ -0,0 +1,60 @@ +package store + +import ( + "fmt" + + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module" + "github.com/onflow/flow-go/module/metrics" + "github.com/onflow/flow-go/storage" + "github.com/onflow/flow-go/storage/operation" +) + +type EpochSetups struct { + db storage.DB + cache *Cache[flow.Identifier, *flow.EpochSetup] +} + +// NewEpochSetups instantiates a new EpochSetups storage. +func NewEpochSetups(collector module.CacheMetrics, db storage.DB) *EpochSetups { + + store := func(rw storage.ReaderBatchWriter, id flow.Identifier, setup *flow.EpochSetup) error { + return operation.InsertEpochSetup(rw.Writer(), id, setup) + } + + retrieve := func(r storage.Reader, id flow.Identifier) (*flow.EpochSetup, error) { + var setup flow.EpochSetup + err := operation.RetrieveEpochSetup(r, id, &setup) + return &setup, err + } + + es := &EpochSetups{ + db: db, + cache: newCache[flow.Identifier, *flow.EpochSetup](collector, metrics.ResourceEpochSetup, + withLimit[flow.Identifier, *flow.EpochSetup](4*flow.DefaultTransactionExpiry), + withStore(store), + withRetrieve(retrieve)), + } + + return es +} + +// No errors are expected during normal operation. +func (es *EpochSetups) BatchStore(rw storage.ReaderBatchWriter, setup *flow.EpochSetup) error { + return es.cache.PutTx(rw, setup.ID(), setup) +} + +func (es *EpochSetups) retrieveTx(setupID flow.Identifier) (*flow.EpochSetup, error) { + val, err := es.cache.Get(es.db.Reader(), setupID) + if err != nil { + return nil, fmt.Errorf("could not retrieve EpochSetup event with id %x: %w", setupID, err) + } + return val, nil +} + +// ByID will return the EpochSetup event by its ID. +// Error returns: +// * storage.ErrNotFound if no EpochSetup with the ID exists +func (es *EpochSetups) ByID(setupID flow.Identifier) (*flow.EpochSetup, error) { + return es.retrieveTx(setupID) +} diff --git a/storage/store/epoch_setups_test.go b/storage/store/epoch_setups_test.go new file mode 100644 index 00000000000..d6f1548e6ea --- /dev/null +++ b/storage/store/epoch_setups_test.go @@ -0,0 +1,45 @@ +package store_test + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/module/metrics" + "github.com/onflow/flow-go/storage" + "github.com/onflow/flow-go/utils/unittest" + + "github.com/onflow/flow-go/storage/operation/dbtest" + "github.com/onflow/flow-go/storage/store" +) + +// TestEpochSetupStoreAndRetrieve tests that a setup can be sd, retrieved and attempted to be stored again without an error +func TestEpochSetupStoreAndRetrieve(t *testing.T) { + dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { + metrics := metrics.NewNoopCollector() + s := store.NewEpochSetups(metrics, db) + + // attempt to get a setup that doesn't exist + _, err := s.ByID(unittest.IdentifierFixture()) + assert.ErrorIs(t, err, storage.ErrNotFound) + + // store a setup in db + expected := unittest.EpochSetupFixture() + err = db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return s.BatchStore(rw, expected) + }) + require.NoError(t, err) + + // retrieve the setup by ID + actual, err := s.ByID(expected.ID()) + require.NoError(t, err) + assert.Equal(t, expected, actual) + + // test storing same epoch setup + err = db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return s.BatchStore(rw, expected) + }) + require.NoError(t, err) + }) +} diff --git a/storage/store/events.go b/storage/store/events.go new file mode 100644 index 00000000000..dc6283e8d02 --- /dev/null +++ b/storage/store/events.go @@ -0,0 +1,221 @@ +package store + +import ( + "fmt" + + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module" + "github.com/onflow/flow-go/module/metrics" + "github.com/onflow/flow-go/storage" + "github.com/onflow/flow-go/storage/operation" +) + +type Events struct { + db storage.DB + cache *Cache[flow.Identifier, []flow.Event] +} + +var _ storage.Events = (*Events)(nil) + +func NewEvents(collector module.CacheMetrics, db storage.DB) *Events { + retrieve := func(r storage.Reader, blockID flow.Identifier) ([]flow.Event, error) { + var events []flow.Event + err := operation.LookupEventsByBlockID(r, blockID, &events) + return events, err + } + + remove := func(rw storage.ReaderBatchWriter, blockID flow.Identifier) error { + return operation.RemoveEventsByBlockID(rw.GlobalReader(), rw.Writer(), blockID) + } + + return &Events{ + db: db, + cache: newCache(collector, metrics.ResourceEvents, + withStore(noopStore[flow.Identifier, []flow.Event]), + withRetrieve(retrieve), + withRemove[flow.Identifier, []flow.Event](remove), + )} +} + +// BatchStore stores events keyed by a blockID in provided batch +// No errors are expected during normal operation, but it may return generic error +// if badger fails to process request +func (e *Events) BatchStore(blockID flow.Identifier, blockEvents []flow.EventsList, batch storage.ReaderBatchWriter) error { + writer := batch.Writer() + + // pre-allocating and indexing slice is faster than appending + sliceSize := 0 + for _, b := range blockEvents { + sliceSize += len(b) + } + + combinedEvents := make([]flow.Event, sliceSize) + + eventIndex := 0 + + for _, events := range blockEvents { + for _, event := range events { + err := operation.InsertEvent(writer, blockID, event) + if err != nil { + return fmt.Errorf("cannot batch insert event: %w", err) + } + combinedEvents[eventIndex] = event + eventIndex++ + } + } + + callback := func() { + e.cache.Insert(blockID, combinedEvents) + } + storage.OnCommitSucceed(batch, callback) + return nil +} + +// Store will store events for the given block ID +func (e *Events) Store(blockID flow.Identifier, blockEvents []flow.EventsList) error { + return e.db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return e.BatchStore(blockID, blockEvents, rw) + }) +} + +// ByBlockID returns the events for the given block ID +// Note: This method will return an empty slice and no error if no entries for the blockID are found +func (e *Events) ByBlockID(blockID flow.Identifier) ([]flow.Event, error) { + val, err := e.cache.Get(e.db.Reader(), blockID) + if err != nil { + return nil, err + } + return val, nil +} + +// ByBlockIDTransactionID returns the events for the given block ID and transaction ID +// Note: This method will return an empty slice and no error if no entries for the blockID are found +func (e *Events) ByBlockIDTransactionID(blockID flow.Identifier, txID flow.Identifier) ([]flow.Event, error) { + events, err := e.ByBlockID(blockID) + if err != nil { + return nil, err + } + + var matched []flow.Event + for _, event := range events { + if event.TransactionID == txID { + matched = append(matched, event) + } + } + return matched, nil +} + +// ByBlockIDTransactionIndex returns the events for the given block ID and transaction index +// Note: This method will return an empty slice and no error if no entries for the blockID are found +func (e *Events) ByBlockIDTransactionIndex(blockID flow.Identifier, txIndex uint32) ([]flow.Event, error) { + events, err := e.ByBlockID(blockID) + if err != nil { + return nil, err + } + + var matched []flow.Event + for _, event := range events { + if event.TransactionIndex == txIndex { + matched = append(matched, event) + } + } + return matched, nil +} + +// ByBlockIDEventType returns the events for the given block ID and event type +// Note: This method will return an empty slice and no error if no entries for the blockID are found +func (e *Events) ByBlockIDEventType(blockID flow.Identifier, eventType flow.EventType) ([]flow.Event, error) { + events, err := e.ByBlockID(blockID) + if err != nil { + return nil, err + } + + var matched []flow.Event + for _, event := range events { + if event.Type == eventType { + matched = append(matched, event) + } + } + return matched, nil +} + +// RemoveByBlockID removes events by block ID +func (e *Events) RemoveByBlockID(blockID flow.Identifier) error { + return e.db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return e.BatchRemoveByBlockID(blockID, rw) + }) +} + +// BatchRemoveByBlockID removes events keyed by a blockID in provided batch +// No errors are expected during normal operation, even if no entries are matched. +// If Badger unexpectedly fails to process the request, the error is wrapped in a generic error and returned. +func (e *Events) BatchRemoveByBlockID(blockID flow.Identifier, rw storage.ReaderBatchWriter) error { + return e.cache.RemoveTx(rw, blockID) +} + +type ServiceEvents struct { + db storage.DB + cache *Cache[flow.Identifier, []flow.Event] +} + +func NewServiceEvents(collector module.CacheMetrics, db storage.DB) *ServiceEvents { + retrieve := func(r storage.Reader, blockID flow.Identifier) ([]flow.Event, error) { + var events []flow.Event + err := operation.LookupServiceEventsByBlockID(r, blockID, &events) + return events, err + } + + remove := func(rw storage.ReaderBatchWriter, blockID flow.Identifier) error { + return operation.RemoveServiceEventsByBlockID(rw.GlobalReader(), rw.Writer(), blockID) + } + + return &ServiceEvents{ + db: db, + cache: newCache(collector, metrics.ResourceEvents, + withStore(noopStore[flow.Identifier, []flow.Event]), + withRetrieve(retrieve), + withRemove[flow.Identifier, []flow.Event](remove), + )} +} + +// BatchStore stores service events keyed by a blockID in provided batch +// No errors are expected during normal operation, even if no entries are matched. +// If Badger unexpectedly fails to process the request, the error is wrapped in a generic error and returned. +func (e *ServiceEvents) BatchStore(blockID flow.Identifier, events []flow.Event, rw storage.ReaderBatchWriter) error { + writer := rw.Writer() + for _, event := range events { + err := operation.InsertServiceEvent(writer, blockID, event) + if err != nil { + return fmt.Errorf("cannot batch insert service event: %w", err) + } + } + + callback := func() { + e.cache.Insert(blockID, events) + } + storage.OnCommitSucceed(rw, callback) + return nil +} + +// ByBlockID returns the events for the given block ID +func (e *ServiceEvents) ByBlockID(blockID flow.Identifier) ([]flow.Event, error) { + val, err := e.cache.Get(e.db.Reader(), blockID) + if err != nil { + return nil, err + } + return val, nil +} + +// RemoveByBlockID removes service events by block ID +func (e *ServiceEvents) RemoveByBlockID(blockID flow.Identifier) error { + return e.db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return operation.RemoveServiceEventsByBlockID(rw.GlobalReader(), rw.Writer(), blockID) + }) +} + +// BatchRemoveByBlockID removes service events keyed by a blockID in provided batch +// No errors are expected during normal operation, even if no entries are matched. +// If Badger unexpectedly fails to process the request, the error is wrapped in a generic error and returned. +func (e *ServiceEvents) BatchRemoveByBlockID(blockID flow.Identifier, rw storage.ReaderBatchWriter) error { + return e.cache.RemoveTx(rw, blockID) +} diff --git a/storage/store/events_test.go b/storage/store/events_test.go new file mode 100644 index 00000000000..3db86249e8d --- /dev/null +++ b/storage/store/events_test.go @@ -0,0 +1,192 @@ +package store_test + +import ( + "math/rand" + "testing" + + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/fvm/systemcontracts" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module/metrics" + "github.com/onflow/flow-go/storage" + "github.com/onflow/flow-go/storage/operation/dbtest" + "github.com/onflow/flow-go/storage/store" + "github.com/onflow/flow-go/utils/unittest" +) + +func TestEventStoreRetrieve(t *testing.T) { + dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { + metrics := metrics.NewNoopCollector() + events := store.NewEvents(metrics, db) + + blockID := unittest.IdentifierFixture() + tx1ID := unittest.IdentifierFixture() + tx2ID := unittest.IdentifierFixture() + evt1_1 := unittest.EventFixture( + unittest.Event.WithEventType(flow.EventAccountCreated), + unittest.Event.WithTransactionIndex(0), + unittest.Event.WithEventIndex(0), + unittest.Event.WithTransactionID(tx1ID), + ) + evt1_2 := unittest.EventFixture( + unittest.Event.WithEventType(flow.EventAccountCreated), + unittest.Event.WithTransactionIndex(1), + unittest.Event.WithEventIndex(1), + unittest.Event.WithTransactionID(tx2ID), + ) + + evt2_1 := unittest.EventFixture( + unittest.Event.WithEventType(flow.EventAccountUpdated), + unittest.Event.WithTransactionIndex(2), + unittest.Event.WithEventIndex(2), + unittest.Event.WithTransactionID(tx2ID), + ) + + expected := []flow.EventsList{ + {evt1_1, evt1_2}, + {evt2_1}, + } + + require.NoError(t, db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + // store event + return events.BatchStore(blockID, expected, rw) + })) + + // retrieve by blockID + actual, err := events.ByBlockID(blockID) + require.NoError(t, err) + require.Len(t, actual, 3) + require.Contains(t, actual, evt1_1) + require.Contains(t, actual, evt1_2) + require.Contains(t, actual, evt2_1) + + // retrieve by blockID and event type + actual, err = events.ByBlockIDEventType(blockID, flow.EventAccountCreated) + require.NoError(t, err) + require.Len(t, actual, 2) + require.Contains(t, actual, evt1_1) + require.Contains(t, actual, evt1_2) + + actual, err = events.ByBlockIDEventType(blockID, flow.EventAccountUpdated) + require.NoError(t, err) + require.Len(t, actual, 1) + require.Contains(t, actual, evt2_1) + + evts := systemcontracts.ServiceEventsForChain(flow.Emulator) + + actual, err = events.ByBlockIDEventType(blockID, evts.EpochSetup.EventType()) + require.NoError(t, err) + require.Len(t, actual, 0) + + // retrieve by blockID and transaction id + actual, err = events.ByBlockIDTransactionID(blockID, tx1ID) + require.NoError(t, err) + require.Len(t, actual, 1) + require.Contains(t, actual, evt1_1) + + // retrieve by blockID and transaction index + actual, err = events.ByBlockIDTransactionIndex(blockID, 1) + require.NoError(t, err) + require.Len(t, actual, 1) + require.Contains(t, actual, evt1_2) + + // test loading from database + + newStore := store.NewEvents(metrics, db) + actual, err = newStore.ByBlockID(blockID) + require.NoError(t, err) + require.Len(t, actual, 3) + require.Contains(t, actual, evt1_1) + require.Contains(t, actual, evt1_2) + require.Contains(t, actual, evt2_1) + }) +} + +func TestEventRetrieveWithoutStore(t *testing.T) { + dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { + metrics := metrics.NewNoopCollector() + events := store.NewEvents(metrics, db) + + blockID := unittest.IdentifierFixture() + txID := unittest.IdentifierFixture() + txIndex := rand.Uint32() + + // retrieve by blockID + evts, err := events.ByBlockID(blockID) + require.NoError(t, err) + require.True(t, len(evts) == 0) + + // retrieve by blockID and event type + evts, err = events.ByBlockIDEventType(blockID, flow.EventAccountCreated) + require.NoError(t, err) + require.True(t, len(evts) == 0) + + // retrieve by blockID and transaction id + evts, err = events.ByBlockIDTransactionID(blockID, txID) + require.NoError(t, err) + require.True(t, len(evts) == 0) + + // retrieve by blockID and transaction id + evts, err = events.ByBlockIDTransactionIndex(blockID, txIndex) + require.NoError(t, err) + require.True(t, len(evts) == 0) + + }) +} + +func TestEventStoreAndRemove(t *testing.T) { + dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { + metrics := metrics.NewNoopCollector() + store := store.NewEvents(metrics, db) + + // Create and store an event + blockID := unittest.IdentifierFixture() + tx1ID := unittest.IdentifierFixture() + tx2ID := unittest.IdentifierFixture() + evt1_1 := unittest.EventFixture( + unittest.Event.WithEventType(flow.EventAccountCreated), + unittest.Event.WithTransactionIndex(0), + unittest.Event.WithEventIndex(0), + unittest.Event.WithTransactionID(tx1ID), + ) + evt1_2 := unittest.EventFixture( + unittest.Event.WithEventType(flow.EventAccountCreated), + unittest.Event.WithTransactionIndex(1), + unittest.Event.WithEventIndex(1), + unittest.Event.WithTransactionID(tx2ID), + ) + + evt2_1 := unittest.EventFixture( + unittest.Event.WithEventType(flow.EventAccountUpdated), + unittest.Event.WithTransactionIndex(2), + unittest.Event.WithEventIndex(2), + unittest.Event.WithTransactionID(tx2ID), + ) + + expected := []flow.EventsList{ + {evt1_1, evt1_2}, + {evt2_1}, + } + + err := store.Store(blockID, expected) + require.NoError(t, err) + + // Ensure it exists + event, err := store.ByBlockID(blockID) + require.NoError(t, err) + require.Len(t, event, 3) + require.Contains(t, event, evt1_1) + require.Contains(t, event, evt1_2) + require.Contains(t, event, evt2_1) + + // Remove it + err = store.RemoveByBlockID(blockID) + require.NoError(t, err) + + // Ensure it no longer exists + event, err = store.ByBlockID(blockID) + require.NoError(t, err) + require.Len(t, event, 0) + }) +} diff --git a/storage/store/execution_fork_evidence.go b/storage/store/execution_fork_evidence.go new file mode 100644 index 00000000000..a1369d5e35a --- /dev/null +++ b/storage/store/execution_fork_evidence.go @@ -0,0 +1,49 @@ +package store + +import ( + "errors" + "fmt" + + "github.com/jordanschalm/lockctx" + + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/storage" + "github.com/onflow/flow-go/storage/operation" +) + +// ExecutionForkEvidence represents persistent storage for execution fork evidence. +type ExecutionForkEvidence struct { + db storage.DB +} + +var _ storage.ExecutionForkEvidence = (*ExecutionForkEvidence)(nil) + +// NewExecutionForkEvidence creates a new ExecutionForkEvidence store. +func NewExecutionForkEvidence(db storage.DB) *ExecutionForkEvidence { + return &ExecutionForkEvidence{db: db} +} + +// StoreIfNotExists stores the given conflictingSeals to the database. +// This is a no-op if there is already a record in the database with the same key. +// The caller must hold the [storage.LockInsertExecutionForkEvidence] lock. +// No errors are expected during normal operations. +func (efe *ExecutionForkEvidence) StoreIfNotExists(lctx lockctx.Proof, conflictingSeals []*flow.IncorporatedResultSeal) error { + return efe.db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return operation.InsertExecutionForkEvidence(lctx, rw, conflictingSeals) + }) +} + +// Retrieve reads conflicting seals from the database. +// No error is returned if database record doesn't exist. +// No errors are expected during normal operations. +func (efe *ExecutionForkEvidence) Retrieve() ([]*flow.IncorporatedResultSeal, error) { + var conflictingSeals []*flow.IncorporatedResultSeal + err := operation.RetrieveExecutionForkEvidence(efe.db.Reader(), &conflictingSeals) + if errors.Is(err, storage.ErrNotFound) { + return nil, nil // No evidence in the database. + } + if err != nil { + return nil, fmt.Errorf("failed to load evidence whether or not an execution fork occured: %w", err) + } + return conflictingSeals, nil +} diff --git a/storage/store/execution_fork_evidence_test.go b/storage/store/execution_fork_evidence_test.go new file mode 100644 index 00000000000..2e87bb706c0 --- /dev/null +++ b/storage/store/execution_fork_evidence_test.go @@ -0,0 +1,102 @@ +package store_test + +import ( + "testing" + + "github.com/jordanschalm/lockctx" + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/storage" + "github.com/onflow/flow-go/storage/operation/dbtest" + "github.com/onflow/flow-go/storage/store" + "github.com/onflow/flow-go/utils/unittest" +) + +func TestExecutionForkEvidenceStoreAndRetrieve(t *testing.T) { + t.Run("Retrieving non-existing evidence should return no error", func(t *testing.T) { + dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { + evidenceStore := store.NewExecutionForkEvidence(db) + + conflictingSeals, err := evidenceStore.Retrieve() + require.NoError(t, err) + require.Equal(t, 0, len(conflictingSeals)) + }) + }) + + t.Run("Store and read evidence", func(t *testing.T) { + dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { + lockManager := storage.NewTestingLockManager() + block := unittest.BlockFixture() + + conflictingSeals := make([]*flow.IncorporatedResultSeal, 2) + for i := range len(conflictingSeals) { + conflictingSeals[i] = unittest.IncorporatedResultSeal.Fixture( + unittest.IncorporatedResultSeal.WithResult( + unittest.ExecutionResultFixture( + unittest.WithBlock(block)))) + } + + evidenceStore := store.NewExecutionForkEvidence(db) + + err := unittest.WithLock(t, lockManager, storage.LockInsertExecutionForkEvidence, func(lctx lockctx.Context) error { + return evidenceStore.StoreIfNotExists(lctx, conflictingSeals) + }) + require.NoError(t, err) + + retrievedConflictingSeals, err := evidenceStore.Retrieve() + require.NoError(t, err) + require.Equal(t, conflictingSeals, retrievedConflictingSeals) + }) + }) + + t.Run("Don't overwrite evidence", func(t *testing.T) { + dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { + lockManager := storage.NewTestingLockManager() + block := unittest.BlockFixture() + + conflictingSeals := make([]*flow.IncorporatedResultSeal, 2) + for i := range len(conflictingSeals) { + conflictingSeals[i] = unittest.IncorporatedResultSeal.Fixture( + unittest.IncorporatedResultSeal.WithResult( + unittest.ExecutionResultFixture( + unittest.WithBlock(block)))) + } + + conflictingSeals2 := make([]*flow.IncorporatedResultSeal, 2) + for i := range len(conflictingSeals2) { + conflictingSeals2[i] = unittest.IncorporatedResultSeal.Fixture( + unittest.IncorporatedResultSeal.WithResult( + unittest.ExecutionResultFixture( + unittest.WithBlock(block)))) + } + + evidenceStore := store.NewExecutionForkEvidence(db) + + // Store and read evidence. + { + err := unittest.WithLock(t, lockManager, storage.LockInsertExecutionForkEvidence, func(lctx lockctx.Context) error { + return evidenceStore.StoreIfNotExists(lctx, conflictingSeals) + }) + require.NoError(t, err) + + retrievedConflictingSeals, err := evidenceStore.Retrieve() + require.NoError(t, err) + require.Equal(t, conflictingSeals, retrievedConflictingSeals) + } + + // Overwriting existing evidence is no-op. + { + err := unittest.WithLock(t, lockManager, storage.LockInsertExecutionForkEvidence, func(lctx lockctx.Context) error { + return evidenceStore.StoreIfNotExists(lctx, conflictingSeals2) + }) + require.NoError(t, err) + + retrievedConflictingSeals, err := evidenceStore.Retrieve() + require.NoError(t, err) + require.Equal(t, conflictingSeals, retrievedConflictingSeals) + require.NotEqual(t, conflictingSeals2, retrievedConflictingSeals) + } + }) + }) +} diff --git a/storage/store/group_cache.go b/storage/store/group_cache.go new file mode 100644 index 00000000000..8ca0279498e --- /dev/null +++ b/storage/store/group_cache.go @@ -0,0 +1,59 @@ +package store + +import ( + lru "github.com/fxamacker/golang-lru/v2" + + "github.com/onflow/flow-go/module" +) + +// GroupCache extends the Cache with a primary index on K and secondary index on G, +// which can be used to remove multiple cached items efficiently. +// A common use case of GroupCache is to cache data by concatenated key +// (block ID and transaction ID) for faster retrieval, and +// to remove cached items by first key (block ID). +// Although G can be a prefix of K since that can be useful, +// G can be anything comparable (it doesn't need to be a prefix). +type GroupCache[G comparable, K comparable, V any] struct { + Cache[K, V] +} + +func newGroupCache[G comparable, K comparable, V any]( + collector module.CacheMetrics, + resourceName string, + groupFromKey func(K) G, + options ...func(*Cache[K, V]), +) (*GroupCache[G, K, V], error) { + c := Cache[K, V]{ + metrics: collector, + limit: 1000, + store: noStore[K, V], + storeWithLock: noStoreWithLock[K, V], + retrieve: noRetrieve[K, V], + remove: noRemove[K], + resource: resourceName, + } + for _, option := range options { + option(&c) + } + var err error + c.cache, err = lru.NewGroupCache[G, K, V](int(c.limit), groupFromKey) + if err != nil { + return nil, err + } + c.metrics.CacheEntries(c.resource, uint(c.cache.Len())) + return &GroupCache[G, K, V]{ + Cache: c, + }, nil +} + +// RemoveGroup removes all cached items associated with the given group. +func (c *GroupCache[G, K, V]) RemoveGroup(group G) int { + return c.cache.(*lru.GroupCache[G, K, V]).RemoveGroup(group) +} + +// RemoveGroup removes all cached items associated with the given groups. +// RemoveGroup should be used to remove multiple groups to +// reduce number of times cache is locked. +func (c *GroupCache[G, K, V]) RemoveGroups(groups []G) int { + return c.cache.(*lru.GroupCache[G, K, V]).RemoveGroups(groups) +} diff --git a/storage/store/group_cache_test.go b/storage/store/group_cache_test.go new file mode 100644 index 00000000000..809e3c2b37f --- /dev/null +++ b/storage/store/group_cache_test.go @@ -0,0 +1,219 @@ +package store + +import ( + "bytes" + "crypto/rand" + "slices" + "testing" + + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module/metrics" + "github.com/onflow/flow-go/storage" + "github.com/onflow/flow-go/storage/operation" + "github.com/onflow/flow-go/storage/operation/dbtest" + "github.com/onflow/flow-go/utils/unittest" +) + +func TestGroupCacheRemoveGroups(t *testing.T) { + dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { + store := func(rw storage.ReaderBatchWriter, key TwoIdentifier, val []byte) error { + return operation.UpsertByKey(rw.Writer(), key[:], val) + } + + retrieve := func(r storage.Reader, key TwoIdentifier) ([]byte, error) { + var val []byte + err := operation.RetrieveByKey(r, key[:], &val) + if err != nil { + return nil, err + } + return val, nil + } + + cache := mustNewGroupCache( + t, + metrics.NewNoopCollector(), + "test", + FirstIDFromTwoIdentifier, // cache records are grouped by first identifier of keys + withStore(store), + withRetrieve(retrieve), + ) + + const groupCount = 5 + const itemCountPerGroup = 5 + + groupIDs := make([]flow.Identifier, 0, groupCount) + keyValuePairs := make(map[TwoIdentifier][]byte) + for range groupCount { + groupID := unittest.IdentifierFixture() + + groupIDs = append(groupIDs, groupID) + + for range itemCountPerGroup { + var itemID TwoIdentifier + n := copy(itemID[:], groupID[:]) + _, _ = rand.Read(itemID[n:]) + + val := unittest.RandomBytes(128) + + keyValuePairs[itemID] = val + } + } + + // Store items in DB and cache + require.NoError(t, db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + for key, val := range keyValuePairs { + err := cache.PutTx(rw, key, val) + if err != nil { + return err + } + } + return nil + })) + + // Retrieve stored item from cache + for key, val := range keyValuePairs { + cached, err := cache.Get(db.Reader(), key) + require.NoError(t, err) + require.Equal(t, val, cached) + } + + // Remove items in the first group in DB. + groupIDToRemove := groupIDs[0] + groupIDs = groupIDs[1:] + + err := db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return operation.RemoveByKeyPrefix(rw.GlobalReader(), rw.Writer(), groupIDToRemove[:]) + }) + require.NoError(t, err) + + // Remove cached items in the first group + removedCount := cache.RemoveGroup(groupIDToRemove) + require.Equal(t, itemCountPerGroup, removedCount) + + // Retrieve removed and stored items + for key, val := range keyValuePairs { + isRemovedItem := bytes.Equal(key[:flow.IdentifierLen], groupIDToRemove[:]) + + cached, err := cache.Get(db.Reader(), key) + if isRemovedItem { + require.ErrorIs(t, err, storage.ErrNotFound) + } else { + require.NoError(t, err) + require.Equal(t, val, cached) + } + } + + // Remove group that is already removed + removedCount = cache.RemoveGroup(groupIDToRemove) + require.Equal(t, 0, removedCount) + + // Remove all groups in DB except the last goup. + groupIDsToRemove := groupIDs[:len(groupIDs)-1] + groupIDs = groupIDs[len(groupIDs)-1:] + + err = db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + for _, groupID := range groupIDsToRemove { + err := operation.RemoveByKeyPrefix(rw.GlobalReader(), rw.Writer(), groupID[:]) + if err != nil { + return err + } + } + return err + }) + require.NoError(t, err) + + // Remove all groups in cache except the last goup. + removedCount = cache.RemoveGroups(groupIDsToRemove) + require.Equal(t, itemCountPerGroup*len(groupIDsToRemove), removedCount) + + // Retrieve removed and stored items + for key, val := range keyValuePairs { + isRemovedItem := !slices.Contains(groupIDs, flow.Identifier(key[:flow.IdentifierLen])) + + cached, err := cache.Get(db.Reader(), key) + if isRemovedItem { + require.ErrorIs(t, err, storage.ErrNotFound) + } else { + require.NoError(t, err) + require.Equal(t, val, cached) + } + } + }) +} + +func BenchmarkCacheRemoveGroup(b *testing.B) { + const txCountPerBlock = 5 + + benchmarks := []struct { + name string + cacheSize int + removeCount int + }{ + {name: "cache size 1,000, remove count 25", cacheSize: 1_000, removeCount: 25}, + {name: "cache size 2,000, remove count 25", cacheSize: 2_000, removeCount: 25}, + {name: "cache size 3,000, remove count 25", cacheSize: 3_000, removeCount: 25}, + {name: "cache size 4,000, remove count 25", cacheSize: 4_000, removeCount: 25}, + {name: "cache size 5,000, remove count 25", cacheSize: 5_000, removeCount: 25}, + {name: "cache size 6,000, remove count 25", cacheSize: 6_000, removeCount: 25}, + {name: "cache size 7,000, remove count 25", cacheSize: 7_000, removeCount: 25}, + {name: "cache size 8,000, remove count 25", cacheSize: 8_000, removeCount: 25}, + {name: "cache size 9,000, remove count 25", cacheSize: 9_000, removeCount: 25}, + {name: "cache size 10,000, remove count 25", cacheSize: 10_000, removeCount: 25}, + {name: "cache size 20,000, remove count 25", cacheSize: 20_000, removeCount: 25}, + {name: "cache size 10,000, remove count 5,000", cacheSize: 10_000, removeCount: 5_000}, + } + + for _, bm := range benchmarks { + b.Run(bm.name, func(b *testing.B) { + blockCount := bm.cacheSize/txCountPerBlock + 1 + + blockIDs := make([]flow.Identifier, blockCount) + for i := range len(blockIDs) { + blockIDs[i] = unittest.IdentifierFixture() + } + + txIDs := make([]flow.Identifier, blockCount*txCountPerBlock) + for i := range len(txIDs) { + txIDs[i] = unittest.IdentifierFixture() + } + + prefixCount := bm.removeCount / txCountPerBlock + var removePrefixes []flow.Identifier + for blockIDIndex := len(blockIDs) - 1; len(removePrefixes) < prefixCount; blockIDIndex-- { + blockID := blockIDs[blockIDIndex] + removePrefixes = append(removePrefixes, blockID) + } + + b.ResetTimer() + + for range b.N { + b.StopTimer() + + cache := mustNewGroupCache( + b, + metrics.NewNoopCollector(), + metrics.ResourceTransactionResults, + func(key TwoIdentifier) flow.Identifier { return flow.Identifier(key[:flow.IdentifierLen]) }, + withLimit[TwoIdentifier, struct{}](uint(bm.cacheSize)), + withStore(noopStore[TwoIdentifier, struct{}]), + withRetrieve(noRetrieve[TwoIdentifier, struct{}]), + ) + + for i, blockID := range blockIDs { + for _, txID := range txIDs[i*txCountPerBlock : (i+1)*txCountPerBlock] { + var key TwoIdentifier + n := copy(key[:], blockID[:]) + copy(key[n:], txID[:]) + cache.Insert(key, struct{}{}) + } + } + + b.StartTimer() + + cache.RemoveGroups(removePrefixes) + } + }) + } +} diff --git a/storage/store/guarantees.go b/storage/store/guarantees.go new file mode 100644 index 00000000000..45e5b5eab85 --- /dev/null +++ b/storage/store/guarantees.go @@ -0,0 +1,120 @@ +package store + +import ( + "fmt" + + "github.com/jordanschalm/lockctx" + + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module" + "github.com/onflow/flow-go/module/metrics" + "github.com/onflow/flow-go/storage" + "github.com/onflow/flow-go/storage/operation" +) + +// Guarantees implements persistent storage for collection guarantees. +type Guarantees struct { + db storage.DB + // cache is essentially an in-memory map from `CollectionGuarantee.ID()` -> `CollectionGuarantee` + cache *Cache[flow.Identifier, *flow.CollectionGuarantee] + + // byCollectionIdCache is essentially an in-memory map from `CollectionGuarantee.CollectionID` -> `CollectionGuarantee.ID()`. + // The full flow.CollectionGuarantee can be retrieved from the `cache` above. + byCollectionIdCache *Cache[flow.Identifier, flow.Identifier] +} + +var _ storage.Guarantees = (*Guarantees)(nil) + +// NewGuarantees creates a Guarantees instance, which stores collection guarantees. +// It supports storing, caching and retrieving by guaranteeID or the additionally indexed collection ID. +func NewGuarantees( + collector module.CacheMetrics, + db storage.DB, + cacheSize uint, + byCollectionIDCacheSize uint, +) *Guarantees { + + storeByGuaranteeIDWithLock := func(rw storage.ReaderBatchWriter, guaranteeID flow.Identifier, guarantee *flow.CollectionGuarantee) error { + return operation.InsertGuarantee(rw.Writer(), guaranteeID, guarantee) + } + + retrieveByGuaranteeID := func(r storage.Reader, guaranteeID flow.Identifier) (*flow.CollectionGuarantee, error) { + var guarantee flow.CollectionGuarantee + err := operation.RetrieveGuarantee(r, guaranteeID, &guarantee) + return &guarantee, err + } + + // While a collection guarantee can only be present once in the finalized chain, + // across different consensus forks we may encounter the same guarantee multiple times. + // On the happy path there is a 1:1 correspondence between CollectionGuarantees and Collections. + // However, the finalization status of guarantees is not yet verified by consensus nodes, + // nor is the possibility of byzantine collection nodes dealt with, so we check here that + // there are no conflicting guarantees for the same collection. + indexByCollectionID := operation.IndexGuarantee + + lookupByCollectionID := func(r storage.Reader, collID flow.Identifier) (flow.Identifier, error) { + var guaranteeID flow.Identifier + err := operation.LookupGuarantee(r, collID, &guaranteeID) + if err != nil { + return flow.ZeroID, fmt.Errorf("could not lookup guarantee ID for collection (%x): %w", collID[:], err) + } + return guaranteeID, nil + } + + g := &Guarantees{ + db: db, + cache: newCache(collector, metrics.ResourceGuarantee, + withLimit[flow.Identifier, *flow.CollectionGuarantee](cacheSize), + withStore(storeByGuaranteeIDWithLock), + withRetrieve(retrieveByGuaranteeID)), + byCollectionIdCache: newCache[flow.Identifier, flow.Identifier](collector, metrics.ResourceGuaranteeByCollectionID, + withLimit[flow.Identifier, flow.Identifier](byCollectionIDCacheSize), + withStoreWithLock(indexByCollectionID), + withRetrieve(lookupByCollectionID)), + } + + return g +} + +func (g *Guarantees) storeTx(lctx lockctx.Proof, rw storage.ReaderBatchWriter, guarantee *flow.CollectionGuarantee) error { + guaranteeID := guarantee.ID() + err := g.cache.PutTx(rw, guaranteeID, guarantee) + if err != nil { + return err + } + + err = g.byCollectionIdCache.PutWithLockTx(lctx, rw, guarantee.CollectionID, guaranteeID) + if err != nil { + return fmt.Errorf("could not index guarantee %x under collection %x: %w", + guaranteeID, guarantee.CollectionID[:], err) + } + + return nil +} + +func (g *Guarantees) retrieveTx(guaranteeID flow.Identifier) (*flow.CollectionGuarantee, error) { + val, err := g.cache.Get(g.db.Reader(), guaranteeID) + if err != nil { + return nil, err + } + return val, nil +} + +// ByID returns the [flow.CollectionGuarantee] by its ID. +// Expected errors during normal operations: +// - [storage.ErrNotFound] if no collection guarantee with the given Identifier is known. +func (g *Guarantees) ByID(guaranteeID flow.Identifier) (*flow.CollectionGuarantee, error) { + return g.retrieveTx(guaranteeID) +} + +// ByCollectionID retrieves the collection guarantee by collection ID. +// Expected errors during normal operations: +// - [storage.ErrNotFound] if no collection guarantee has been indexed for the given collection ID. +func (g *Guarantees) ByCollectionID(collID flow.Identifier) (*flow.CollectionGuarantee, error) { + guaranteeID, err := g.byCollectionIdCache.Get(g.db.Reader(), collID) + if err != nil { + return nil, fmt.Errorf("could not lookup collection guarantee ID for collection (%x): %w", collID[:], err) + } + + return g.retrieveTx(guaranteeID) +} diff --git a/storage/store/guarantees_test.go b/storage/store/guarantees_test.go new file mode 100644 index 00000000000..de27ab3f5ab --- /dev/null +++ b/storage/store/guarantees_test.go @@ -0,0 +1,167 @@ +package store_test + +import ( + "testing" + + "github.com/jordanschalm/lockctx" + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module/metrics" + "github.com/onflow/flow-go/storage" + "github.com/onflow/flow-go/storage/operation/dbtest" + "github.com/onflow/flow-go/storage/store" + "github.com/onflow/flow-go/utils/unittest" +) + +// TestGuaranteeStoreRetrieve tests storing and retrieving collection guarantees. +// Generally, collection guarantees are persisted as part of storing a block proposal -- we follow that approach here. +// We test the following: +// - retrieving an unknown guarantee returns [storage.ErrNotFound] +// - storing a guarantee as part of a block proposal and then retrieving it by its ID +// - repeated storage of the same block returns [storage.ErrAlreadyExists] +// and collection guarantee can still be retrieved +// - storing a different block with holding the same guarantee also works +func TestGuaranteeStoreRetrieve(t *testing.T) { + dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { + lockManager := storage.NewTestingLockManager() + metrics := metrics.NewNoopCollector() + all := store.InitAll(metrics, db) + blocks := all.Blocks + guarantees := all.Guarantees + + s := store.NewGuarantees(metrics, db, 1000, 1000) + + // make block with a collection guarantee: + guarantee1 := unittest.CollectionGuaranteeFixture() + block := unittest.BlockWithGuaranteesFixture([]*flow.CollectionGuarantee{guarantee1}) + proposal := unittest.ProposalFromBlock(block) + + // attempt to retrieve (still) unknown guarantee + _, err := s.ByCollectionID(guarantee1.ID()) + require.ErrorIs(t, err, storage.ErrNotFound) + + // store guarantee + err = unittest.WithLock(t, lockManager, storage.LockInsertBlock, func(lctx lockctx.Context) error { + return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return blocks.BatchStore(lctx, rw, proposal) + }) + }) + require.NoError(t, err) + + // retrieve the guarantee by the ID of the collection + actual, err := guarantees.ByCollectionID(guarantee1.CollectionID) + require.NoError(t, err) + require.Equal(t, guarantee1, actual) + + // Repeated storage of the same block should return [storage.ErrAlreadyExists]. + // Yet, the guarantee can still be retrieved. + err = unittest.WithLock(t, lockManager, storage.LockInsertBlock, func(lctx2 lockctx.Context) error { + return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return blocks.BatchStore(lctx2, rw, proposal) + }) + }) + require.ErrorIs(t, err, storage.ErrAlreadyExists) + actual, err = guarantees.ByCollectionID(guarantee1.CollectionID) + require.NoError(t, err) + require.Equal(t, guarantee1, actual) + + // OK to store a different block holding the _same_ guarantee (this is possible across forks). + guarantee2 := unittest.CollectionGuaranteeFixture() + block2 := unittest.BlockWithGuaranteesFixture([]*flow.CollectionGuarantee{guarantee2, guarantee1}) + proposal2 := unittest.ProposalFromBlock(block2) + err = unittest.WithLock(t, lockManager, storage.LockInsertBlock, func(lctx3 lockctx.Context) error { + return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return blocks.BatchStore(lctx3, rw, proposal2) + }) + }) + require.NoError(t, err) + // retrieving guarantee 1 (contained in both blocks) still works + actual, err = guarantees.ByCollectionID(guarantee1.CollectionID) + require.NoError(t, err) + require.Equal(t, guarantee1, actual) + // retrieving guarantee 2 (contained only in the second block): + actual, err = guarantees.ByCollectionID(guarantee2.CollectionID) + require.NoError(t, err) + require.Equal(t, guarantee2, actual) + }) +} + +// Storing the same guarantee should be idempotent +func TestStoreDuplicateGuarantee(t *testing.T) { + dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { + lockManager := storage.NewTestingLockManager() + metrics := metrics.NewNoopCollector() + all := store.InitAll(metrics, db) + blocks := all.Blocks + store1 := all.Guarantees + expected := unittest.CollectionGuaranteeFixture() + block := unittest.BlockWithGuaranteesFixture([]*flow.CollectionGuarantee{expected}) + proposal := unittest.ProposalFromBlock(block) + + // store guarantee + err := unittest.WithLock(t, lockManager, storage.LockInsertBlock, func(lctx lockctx.Context) error { + return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return blocks.BatchStore(lctx, rw, proposal) + }) + }) + require.NoError(t, err) + + // storage of the same guarantee should be idempotent + block2 := unittest.BlockWithGuaranteesFixture([]*flow.CollectionGuarantee{expected}) + proposal2 := unittest.ProposalFromBlock(block2) + err = unittest.WithLock(t, lockManager, storage.LockInsertBlock, func(lctx2 lockctx.Context) error { + return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return blocks.BatchStore(lctx2, rw, proposal2) + }) + }) + require.NoError(t, err) + + actual, err := store1.ByID(expected.ID()) + require.NoError(t, err) + require.Equal(t, expected, actual) + actual, err = store1.ByCollectionID(expected.CollectionID) + require.NoError(t, err) + require.Equal(t, expected, actual) + }) +} + +// Storing a different guarantee for the same collection should return [storage.ErrDataMismatch] +func TestStoreConflictingGuarantee(t *testing.T) { + dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { + lockManager := storage.NewTestingLockManager() + metrics := metrics.NewNoopCollector() + all := store.InitAll(metrics, db) + blocks := all.Blocks + store1 := all.Guarantees + expected := unittest.CollectionGuaranteeFixture() + block := unittest.BlockWithGuaranteesFixture([]*flow.CollectionGuarantee{expected}) + proposal := unittest.ProposalFromBlock(block) + + err := unittest.WithLock(t, lockManager, storage.LockInsertBlock, func(lctx lockctx.Context) error { + return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return blocks.BatchStore(lctx, rw, proposal) + }) + }) + require.NoError(t, err) + + // a differing guarantee for the same collection is potentially byzantine and should return [storage.ErrDataMismatch] + conflicting := *expected + conflicting.SignerIndices = []byte{99} + block2 := unittest.BlockWithGuaranteesFixture([]*flow.CollectionGuarantee{&conflicting}) + proposal2 := unittest.ProposalFromBlock(block2) + err = unittest.WithLock(t, lockManager, storage.LockInsertBlock, func(lctx lockctx.Context) error { + return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return blocks.BatchStore(lctx, rw, proposal2) + }) + }) + require.ErrorIs(t, err, storage.ErrDataMismatch) + + actual, err := store1.ByID(expected.ID()) + require.NoError(t, err) + require.Equal(t, expected, actual) + actual, err = store1.ByCollectionID(expected.CollectionID) + require.NoError(t, err) + require.Equal(t, expected, actual) + }) +} diff --git a/storage/store/headers.go b/storage/store/headers.go new file mode 100644 index 00000000000..496cc7b7ec7 --- /dev/null +++ b/storage/store/headers.go @@ -0,0 +1,274 @@ +package store + +import ( + "errors" + "fmt" + + "github.com/jordanschalm/lockctx" + + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module" + "github.com/onflow/flow-go/module/metrics" + "github.com/onflow/flow-go/storage" + "github.com/onflow/flow-go/storage/operation" +) + +// Headers implements a simple read-only header storage around a DB. +type Headers struct { + db storage.DB + // cache is essentially an in-memory map from `Block.ID()` -> `Header` + cache *Cache[flow.Identifier, *flow.Header] + heightCache *Cache[uint64, flow.Identifier] + viewCache *Cache[uint64, flow.Identifier] + sigs *proposalSignatures +} + +var _ storage.Headers = (*Headers)(nil) + +// NewHeaders creates a Headers instance, which stores block headers. +// It supports storing, caching and retrieving by block ID and the additionally indexed by header height. +func NewHeaders(collector module.CacheMetrics, db storage.DB) *Headers { + storeWithLock := func(lctx lockctx.Proof, rw storage.ReaderBatchWriter, blockID flow.Identifier, header *flow.Header) error { + return operation.InsertHeader(lctx, rw, blockID, header) + } + + retrieve := func(r storage.Reader, blockID flow.Identifier) (*flow.Header, error) { + var header flow.Header + err := operation.RetrieveHeader(r, blockID, &header) + return &header, err + } + + retrieveHeight := func(r storage.Reader, height uint64) (flow.Identifier, error) { + var id flow.Identifier + err := operation.LookupBlockHeight(r, height, &id) + return id, err + } + + retrieveView := func(r storage.Reader, view uint64) (flow.Identifier, error) { + var id flow.Identifier + err := operation.LookupCertifiedBlockByView(r, view, &id) + return id, err + } + + h := &Headers{ + db: db, + cache: newCache(collector, metrics.ResourceHeader, + withLimit[flow.Identifier, *flow.Header](4*flow.DefaultTransactionExpiry), + withStoreWithLock(storeWithLock), + withRetrieve(retrieve)), + + heightCache: newCache(collector, metrics.ResourceFinalizedHeight, + withLimit[uint64, flow.Identifier](4*flow.DefaultTransactionExpiry), + withRetrieve(retrieveHeight)), + + viewCache: newCache(collector, metrics.ResourceCertifiedView, + withLimit[uint64, flow.Identifier](4*flow.DefaultTransactionExpiry), + withRetrieve(retrieveView)), + + sigs: newProposalSignatures(collector, db), + } + + return h +} + +func (h *Headers) storeTx( + lctx lockctx.Proof, + rw storage.ReaderBatchWriter, + blockID flow.Identifier, + header *flow.Header, + proposalSig []byte, +) error { + err := h.cache.PutWithLockTx(lctx, rw, blockID, header) + if err != nil { + return err + } + + return h.sigs.storeTx(lctx, rw, blockID, proposalSig) +} + +func (h *Headers) retrieveTx(blockID flow.Identifier) (*flow.Header, error) { + val, err := h.cache.Get(h.db.Reader(), blockID) + if err != nil { + return nil, err + } + return val, nil +} + +func (h *Headers) retrieveProposalTx(blockID flow.Identifier) (*flow.ProposalHeader, error) { + header, err := h.cache.Get(h.db.Reader(), blockID) + if err != nil { + return nil, fmt.Errorf("could not retrieve header: %w", err) + } + sig, err := h.sigs.retrieveTx(blockID) + if err != nil { + return nil, fmt.Errorf("could not retrieve proposer signature for id %x: %w", blockID, err) + } + return &flow.ProposalHeader{Header: header, ProposerSigData: sig}, nil +} + +// results in [storage.ErrNotFound] for unknown height +func (h *Headers) retrieveIdByHeightTx(height uint64) (flow.Identifier, error) { + blockID, err := h.heightCache.Get(h.db.Reader(), height) + if err != nil { + return flow.ZeroID, fmt.Errorf("failed to retrieve block ID for height %d: %w", height, err) + } + return blockID, nil +} + +// ByBlockID returns the header with the given ID. It is available for finalized blocks and those pending finalization. +// Error returns: +// - [storage.ErrNotFound] if no block header with the given ID exists +func (h *Headers) ByBlockID(blockID flow.Identifier) (*flow.Header, error) { + return h.retrieveTx(blockID) +} + +// ProposalByBlockID returns the header with the given ID, along with the corresponding proposer signature. +// It is available for finalized blocks and those pending finalization. +// Error returns: +// - [storage.ErrNotFound] if no block header or proposer signature with the given blockID exists +func (h *Headers) ProposalByBlockID(blockID flow.Identifier) (*flow.ProposalHeader, error) { + return h.retrieveProposalTx(blockID) +} + +// ByHeight returns the block with the given number. It is only available for finalized blocks. +// Error returns: +// - [storage.ErrNotFound] if no finalized block is known at the given height +func (h *Headers) ByHeight(height uint64) (*flow.Header, error) { + blockID, err := h.retrieveIdByHeightTx(height) + if err != nil { + return nil, err + } + return h.retrieveTx(blockID) +} + +// ByView returns the block with the given view. It is only available for certified blocks. +// Certified blocks are the blocks that have received QC. Hotstuff guarantees that for each view, +// at most one block is certified. Hence, the return value of `ByView` is guaranteed to be unique +// even for non-finalized blocks. +// +// Expected errors during normal operations: +// - [storage.ErrNotFound] if no certified block is known at given view. +func (h *Headers) ByView(view uint64) (*flow.Header, error) { + blockID, err := h.viewCache.Get(h.db.Reader(), view) + if err != nil { + return nil, err + } + return h.retrieveTx(blockID) +} + +// Exists returns true if a header with the given ID has been stored. +// No errors are expected during normal operation. +func (h *Headers) Exists(blockID flow.Identifier) (bool, error) { + // if the block is in the cache, return true + if ok := h.cache.IsCached(blockID); ok { + return ok, nil + } + // otherwise, check badger store + exists, err := operation.BlockExists(h.db.Reader(), blockID) + if err != nil { + return false, fmt.Errorf("could not check existence: %w", err) + } + return exists, nil +} + +// BlockIDByHeight returns the block ID that is finalized at the given height. It is an optimized +// version of `ByHeight` that skips retrieving the block. Expected errors during normal operations: +// - [storage.ErrNotFound] if no finalized block is known at given height +func (h *Headers) BlockIDByHeight(height uint64) (flow.Identifier, error) { + blockID, err := h.retrieveIdByHeightTx(height) + if err != nil { + return flow.ZeroID, fmt.Errorf("could not lookup block id by height %d: %w", height, err) + } + return blockID, nil +} + +// ByParentID finds all children for the given parent block. The returned headers +// might be unfinalized; if there is more than one, at least one of them has to +// be unfinalized. +// CAUTION: this method is not backed by a cache and therefore comparatively slow! +// +// Expected error returns during normal operations: +// - [storage.ErrNotFound] if no block with the given parentID is known +func (h *Headers) ByParentID(parentID flow.Identifier) ([]*flow.Header, error) { + var blockIDs flow.IdentifierList + err := operation.RetrieveBlockChildren(h.db.Reader(), parentID, &blockIDs) + if err != nil { + // if not found error is returned, there are two possible reasons: + // 1. the parent block does not exist, in which case we should return not found error + // 2. the parent block exists but has no children, in which case we should return empty list + if errors.Is(err, storage.ErrNotFound) { + exists, err := h.Exists(parentID) + if err != nil { + return nil, fmt.Errorf("could not check existence of parent %x: %w", parentID, err) + } + if !exists { + return nil, fmt.Errorf("cannot retrieve children of unknown block %x: %w", parentID, storage.ErrNotFound) + } + // parent exists but has no children + return []*flow.Header{}, nil + } + return nil, fmt.Errorf("could not look up children: %w", err) + } + headers := make([]*flow.Header, 0, len(blockIDs)) + for _, blockID := range blockIDs { + header, err := h.ByBlockID(blockID) + if err != nil { + return nil, fmt.Errorf("could not retrieve child (%x): %w", blockID, err) + } + headers = append(headers, header) + } + return headers, nil +} + +// BlockIDByView returns the block ID that is certified at the given view. It is an optimized +// version of `ByView` that skips retrieving the block. Expected errors during normal operations: +// - `[storage.ErrNotFound] if no certified block is known at given view. +// +// NOTE: this method is not available until next spork (mainnet27) or a migration that builds the index. +func (h *Headers) BlockIDByView(view uint64) (flow.Identifier, error) { + blockID, err := h.viewCache.Get(h.db.Reader(), view) + if err != nil { + return flow.ZeroID, fmt.Errorf("could not lookup block id by view %d: %w", view, err) + } + return blockID, nil +} + +func (h *Headers) FindHeaders(filter func(header *flow.Header) bool) ([]flow.Header, error) { + blocks := make([]flow.Header, 0, 1) + err := operation.FindHeaders(h.db.Reader(), filter, &blocks) + return blocks, err +} + +// RollbackExecutedBlock update the executed block header to the given header. +// Intended to be used by Execution Nodes only, to roll back executed block height. +// This method is NOT CONCURRENT SAFE, the caller should make sure to call +// this method in a single thread. +func (h *Headers) RollbackExecutedBlock(header *flow.Header) error { + var blockID flow.Identifier + err := operation.RetrieveExecutedBlock(h.db.Reader(), &blockID) + if err != nil { + return fmt.Errorf("cannot lookup executed block: %w", err) + } + + var highest flow.Header + err = operation.RetrieveHeader(h.db.Reader(), blockID, &highest) + if err != nil { + return fmt.Errorf("cannot retrieve executed header: %w", err) + } + + // only rollback if the given height is below the current executed height + if header.Height >= highest.Height { + return fmt.Errorf("cannot roolback. expect the target height %v to be lower than highest executed height %v, but actually is not", + header.Height, highest.Height, + ) + } + + return h.db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + err = operation.UpdateExecutedBlock(rw.Writer(), header.ID()) + if err != nil { + return fmt.Errorf("cannot update highest executed block: %w", err) + } + + return nil + }) +} diff --git a/storage/store/headers_test.go b/storage/store/headers_test.go new file mode 100644 index 00000000000..bc4f8ceb12f --- /dev/null +++ b/storage/store/headers_test.go @@ -0,0 +1,228 @@ +package store_test + +import ( + "testing" + + "github.com/jordanschalm/lockctx" + + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/storage/operation" + "github.com/onflow/flow-go/storage/operation/dbtest" + "github.com/onflow/flow-go/storage/store" + + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/module/metrics" + "github.com/onflow/flow-go/storage" + "github.com/onflow/flow-go/utils/unittest" +) + +func TestHeaderStoreRetrieve(t *testing.T) { + dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { + lockManager := storage.NewTestingLockManager() + metrics := metrics.NewNoopCollector() + all := store.InitAll(metrics, db) + headers := all.Headers + blocks := all.Blocks + + proposal := unittest.ProposalFixture() + block := proposal.Block + + // store block which will also store header + err := unittest.WithLock(t, lockManager, storage.LockInsertBlock, func(lctx lockctx.Context) error { + return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return blocks.BatchStore(lctx, rw, proposal) + }) + }) + require.NoError(t, err) + + // index the header + err = unittest.WithLock(t, lockManager, storage.LockFinalizeBlock, func(lctx2 lockctx.Context) error { + return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return operation.IndexFinalizedBlockByHeight(lctx2, rw, block.Height, block.ID()) + }) + }) + require.NoError(t, err) + + // retrieve header by height + actual, err := headers.ByHeight(block.Height) + require.NoError(t, err) + require.Equal(t, block.ToHeader(), actual) + }) +} + +func TestHeaderIndexByViewAndRetrieve(t *testing.T) { + dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { + lockManager := storage.NewTestingLockManager() + metrics := metrics.NewNoopCollector() + all := store.InitAll(metrics, db) + headers := all.Headers + blocks := all.Blocks + + proposal := unittest.ProposalFixture() + block := proposal.Block + + // store block which will also store header + err := unittest.WithLock(t, lockManager, storage.LockInsertBlock, func(lctx lockctx.Context) error { + return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return blocks.BatchStore(lctx, rw, proposal) + }) + }) + require.NoError(t, err) + + err = unittest.WithLock(t, lockManager, storage.LockInsertBlock, func(lctx lockctx.Context) error { + return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + // index the header + return operation.IndexCertifiedBlockByView(lctx, rw, block.View, block.ID()) + }) + }) + require.NoError(t, err) + + // retrieve header by view + actual, err := headers.ByView(block.View) + require.NoError(t, err) + require.Equal(t, block.ToHeader(), actual) + }) +} + +func TestHeaderRetrieveWithoutStore(t *testing.T) { + dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { + metrics := metrics.NewNoopCollector() + headers := store.NewHeaders(metrics, db) + + header := unittest.BlockHeaderFixture() + + // retrieve header by height, should err as not store before height + _, err := headers.ByHeight(header.Height) + require.ErrorIs(t, err, storage.ErrNotFound) + }) +} + +// TestHeadersByParentID tests method [Headers.ByParentID] for: +// 1. a known parent with no children should return an empty list; +// 2. a known parent with 3 children should return the headers of those children; +// 3. an unknown parent should return [storage.ErrNotFound]. +func TestHeadersByParentID(t *testing.T) { + dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { + lockManager := storage.NewTestingLockManager() + metrics := metrics.NewNoopCollector() + all := store.InitAll(metrics, db) + headers := all.Headers + blocks := all.Blocks + + // Create a parent block + parentProposal := unittest.ProposalFixture() + parentBlock := parentProposal.Block + + // Store parent block + err := unittest.WithLock(t, lockManager, storage.LockInsertBlock, func(lctx lockctx.Context) error { + return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return blocks.BatchStore(lctx, rw, parentProposal) + }) + }) + require.NoError(t, err) + + // Test case 1: Parent with no children should return empty list + children, err := headers.ByParentID(parentBlock.ID()) + require.NoError(t, err) + require.Empty(t, children) + + // Test case 2: Parent with 3 children + var childProposals []*flow.Proposal + for i := 0; i < 3; i++ { + childProposal := unittest.ProposalFromBlock(unittest.BlockWithParentFixture(parentBlock.ToHeader())) + childProposals = append(childProposals, childProposal) + + err := unittest.WithLock(t, lockManager, storage.LockInsertBlock, func(lctx lockctx.Context) error { + return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + // Store the block + err := blocks.BatchStore(lctx, rw, childProposal) + if err != nil { + return err + } + // Index the parent-child relationship + return operation.IndexNewBlock(lctx, rw, childProposal.Block.ID(), childProposal.Block.ParentID) + }) + }) + require.NoError(t, err) + } + + // confirm correct behaviour for test case 2: we should retrieve the headers of the 3 children + children, err = headers.ByParentID(parentBlock.ID()) + require.NoError(t, err) + require.ElementsMatch(t, + children, + []*flow.Header{childProposals[0].Block.ToHeader(), childProposals[1].Block.ToHeader(), childProposals[2].Block.ToHeader()}) + + // Test case 3: Non-existent parent should return ErrNotFound + nonExistentParent := unittest.IdentifierFixture() + _, err = headers.ByParentID(nonExistentParent) + require.ErrorIs(t, err, storage.ErrNotFound) + }) +} + +// TestHeadersByParentIDChainStructure tests method [Headers.ByParentID] for a tree of blocks with +// deeper ancestry (children and grandchildren). Specifically, we use the following fork structure, +// which blocks denoted in square brackets: +// +// ↙ [grandchild1] +// [parent] ← [child] +// ↖ [grandchild2] +func TestHeadersByParentIDChainStructure(t *testing.T) { + dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { + lockManager := storage.NewTestingLockManager() + metrics := metrics.NewNoopCollector() + all := store.InitAll(metrics, db) + headers := all.Headers + blocks := all.Blocks + + // Create child structure: parent -> child -> grandchild1, grandchild2 + parent := unittest.BlockFixture() + child := unittest.BlockWithParentFixture(parent.ToHeader()) + grandchild1 := unittest.BlockWithParentFixture(child.ToHeader()) + grandchild2 := unittest.BlockWithParentFixture(child.ToHeader()) + + // Store all blocks + for _, b := range []*flow.Block{parent, child, grandchild1, grandchild2} { + proposal := unittest.ProposalFromBlock(b) + + err := unittest.WithLock(t, lockManager, storage.LockInsertBlock, func(lctx lockctx.Context) error { + return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + // Store the block + err := blocks.BatchStore(lctx, rw, proposal) + if err != nil { + return err + } + // Index the parent-child relationship (skip for root block) + if proposal.Block.ParentID != flow.ZeroID { + return operation.IndexNewBlock(lctx, rw, proposal.Block.ID(), proposal.Block.ParentID) + } + return nil + }) + }) + require.NoError(t, err) + } + + // Test that parent only returns direct children (child1) + children, err := headers.ByParentID(parent.ID()) + require.NoError(t, err) + require.Len(t, children, 1) + require.Equal(t, child.ToHeader(), children[0]) + + // Test that child1 returns its direct children (grandchild1, grandchild2) + // Test that child returns its direct children (grandchild1, grandchild2) + grandchildren, err := headers.ByParentID(child.ID()) + require.NoError(t, err) + require.ElementsMatch(t, grandchildren, + []*flow.Header{grandchild1.ToHeader(), grandchild2.ToHeader()}) + + // Test that grandchildren have no children + children, err = headers.ByParentID(grandchild1.ID()) + require.NoError(t, err) + require.Empty(t, children) + + children, err = headers.ByParentID(grandchild2.ID()) + require.NoError(t, err) + require.Empty(t, children) + }) +} diff --git a/storage/store/index.go b/storage/store/index.go new file mode 100644 index 00000000000..9e92261929a --- /dev/null +++ b/storage/store/index.go @@ -0,0 +1,51 @@ +package store + +import ( + "github.com/jordanschalm/lockctx" + + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module" + "github.com/onflow/flow-go/module/metrics" + "github.com/onflow/flow-go/storage" + "github.com/onflow/flow-go/storage/operation" +) + +// Index implements a simple read-only payload storage around a badger DB. +type Index struct { + db storage.DB + cache *Cache[flow.Identifier, *flow.Index] +} + +func NewIndex(collector module.CacheMetrics, db storage.DB) *Index { + storeWithLock := func(lctx lockctx.Proof, rw storage.ReaderBatchWriter, blockID flow.Identifier, index *flow.Index) error { + return operation.InsertIndex(lctx, rw, blockID, index) + } + + retrieve := func(r storage.Reader, blockID flow.Identifier) (*flow.Index, error) { + var index flow.Index + err := operation.RetrieveIndex(r, blockID, &index) + return &index, err + } + + p := &Index{ + db: db, + cache: newCache(collector, metrics.ResourceIndex, + withLimit[flow.Identifier, *flow.Index](flow.DefaultTransactionExpiry+100), + withStoreWithLock(storeWithLock), + withRetrieve(retrieve)), + } + + return p +} + +func (i *Index) storeTx(lctx lockctx.Proof, rw storage.ReaderBatchWriter, blockID flow.Identifier, index *flow.Index) error { + return i.cache.PutWithLockTx(lctx, rw, blockID, index) +} + +func (i *Index) ByBlockID(blockID flow.Identifier) (*flow.Index, error) { + val, err := i.cache.Get(i.db.Reader(), blockID) + if err != nil { + return nil, err + } + return val, nil +} diff --git a/storage/store/init.go b/storage/store/init.go new file mode 100644 index 00000000000..a4ec067d16c --- /dev/null +++ b/storage/store/init.go @@ -0,0 +1,72 @@ +package store + +import ( + "github.com/onflow/flow-go/module" + "github.com/onflow/flow-go/storage" +) + +type All struct { + Headers *Headers + Guarantees *Guarantees + Seals *Seals + Index *Index + Payloads *Payloads + Blocks *Blocks + QuorumCertificates *QuorumCertificates + Results *ExecutionResults + Receipts *ExecutionReceipts + Commits *Commits + + EpochSetups *EpochSetups + EpochCommits *EpochCommits + EpochProtocolStateEntries *EpochProtocolStateEntries + ProtocolKVStore *ProtocolKVStore + VersionBeacons *VersionBeacons + + Transactions *Transactions + Collections *Collections +} + +func InitAll(metrics module.CacheMetrics, db storage.DB) *All { + headers := NewHeaders(metrics, db) + guarantees := NewGuarantees(metrics, db, DefaultCacheSize, DefaultCacheSize) + seals := NewSeals(metrics, db) + index := NewIndex(metrics, db) + results := NewExecutionResults(metrics, db) + receipts := NewExecutionReceipts(metrics, db, results, DefaultCacheSize) + payloads := NewPayloads(db, index, guarantees, seals, receipts, results) + blocks := NewBlocks(db, headers, payloads) + qcs := NewQuorumCertificates(metrics, db, DefaultCacheSize) + commits := NewCommits(metrics, db) + + setups := NewEpochSetups(metrics, db) + epochCommits := NewEpochCommits(metrics, db) + epochProtocolStateEntries := NewEpochProtocolStateEntries(metrics, setups, epochCommits, db, + DefaultEpochProtocolStateCacheSize, DefaultProtocolStateIndexCacheSize) + protocolKVStore := NewProtocolKVStore(metrics, db, DefaultProtocolKVStoreCacheSize, DefaultProtocolKVStoreByBlockIDCacheSize) + versionBeacons := NewVersionBeacons(db) + + transactions := NewTransactions(metrics, db) + collections := NewCollections(db, transactions) + + return &All{ + Headers: headers, + Guarantees: guarantees, + Seals: seals, + Index: index, + Payloads: payloads, + Blocks: blocks, + QuorumCertificates: qcs, + Results: results, + Receipts: receipts, + Commits: commits, + EpochCommits: epochCommits, + EpochSetups: setups, + EpochProtocolStateEntries: epochProtocolStateEntries, + ProtocolKVStore: protocolKVStore, + VersionBeacons: versionBeacons, + + Transactions: transactions, + Collections: collections, + } +} diff --git a/storage/store/latest_persisted_sealed_result.go b/storage/store/latest_persisted_sealed_result.go new file mode 100644 index 00000000000..bcb8957ef63 --- /dev/null +++ b/storage/store/latest_persisted_sealed_result.go @@ -0,0 +1,106 @@ +package store + +import ( + "fmt" + "sync" + + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/storage" +) + +var _ storage.LatestPersistedSealedResult = (*LatestPersistedSealedResult)(nil) + +// LatestPersistedSealedResult tracks the most recently persisted sealed execution result processed +// by the Access ingestion engine. +type LatestPersistedSealedResult struct { + // resultID is the execution result ID of the most recently persisted sealed result. + resultID flow.Identifier + + // height is the height of the most recently persisted sealed result's block. + // This is the value stored in the consumer progress index. + height uint64 + + // progress is the consumer progress instance + progress storage.ConsumerProgress + + // batchMu is used to prevent concurrent batch updates to the persisted height. + // the critical section is fairly large, so use a separate mutex from the cached values. + batchMu sync.Mutex + + // cacheMu is used to protect access to resultID and height. + cacheMu sync.RWMutex +} + +// NewLatestPersistedSealedResult creates a new LatestPersistedSealedResult instance. +// +// No errors are expected during normal operation, +func NewLatestPersistedSealedResult( + progress storage.ConsumerProgress, + headers storage.Headers, + results storage.ExecutionResults, +) (*LatestPersistedSealedResult, error) { + // load the height and resultID of the latest persisted sealed result + height, err := progress.ProcessedIndex() + if err != nil { + return nil, fmt.Errorf("could not get processed index: %w", err) + } + + header, err := headers.ByHeight(height) + if err != nil { + return nil, fmt.Errorf("could not get header: %w", err) + } + + // Note: the result-to-block relationship is indexed by the Access ingestion engine when a + // result is sealed. + result, err := results.ByBlockID(header.ID()) + if err != nil { + return nil, fmt.Errorf("could not get result: %w", err) + } + + return &LatestPersistedSealedResult{ + resultID: result.ID(), + height: height, + progress: progress, + }, nil +} + +// Latest returns the ID and height of the latest persisted sealed result. +func (l *LatestPersistedSealedResult) Latest() (flow.Identifier, uint64) { + l.cacheMu.RLock() + defer l.cacheMu.RUnlock() + return l.resultID, l.height +} + +// BatchSet updates the latest persisted sealed result in a batch operation +// The resultID and height are added to the provided batch, and the local data is updated only after +// the batch is successfully committed. +// +// No errors are expected during normal operation, +func (l *LatestPersistedSealedResult) BatchSet(resultID flow.Identifier, height uint64, batch storage.ReaderBatchWriter) error { + // there are 2 mutexes used here: + // - batchMu is used to prevent concurrent batch updates to the persisted height. Since this + // is a global variable, we need to ensure that only a single batch is in progress at a time. + // - cacheMu is used to protect access to the cached resultID and height values. This is an + // optimization to avoid readers having to block during the batch operations, since they + // can have arbitrarily long setup times. + l.batchMu.Lock() + + batch.AddCallback(func(err error) { + defer l.batchMu.Unlock() + if err != nil { + return + } + + l.cacheMu.Lock() + defer l.cacheMu.Unlock() + + l.resultID = resultID + l.height = height + }) + + if err := l.progress.BatchSetProcessedIndex(height, batch); err != nil { + return fmt.Errorf("could not add processed index update to batch: %w", err) + } + + return nil +} diff --git a/storage/store/latest_persisted_sealed_result_test.go b/storage/store/latest_persisted_sealed_result_test.go new file mode 100644 index 00000000000..3a1f35ca82a --- /dev/null +++ b/storage/store/latest_persisted_sealed_result_test.go @@ -0,0 +1,259 @@ +package store + +import ( + "fmt" + "sync" + "testing" + "time" + + "github.com/cockroachdb/pebble/v2" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/model/flow" + storagemock "github.com/onflow/flow-go/storage/mock" + "github.com/onflow/flow-go/storage/operation/pebbleimpl" + "github.com/onflow/flow-go/utils/unittest" +) + +// TestNewLatestPersistedSealedResult tests the initialization of LatestPersistedSealedResult. +// It verifies that: +// - The ConsumerProgress is properly stored +// - All fields are correctly initialized on success +func TestNewLatestPersistedSealedResult(t *testing.T) { + initialHeight := uint64(100) + missingHeaderHeight := initialHeight + 1 + missingResultHeight := initialHeight + 2 + + initialHeader, initialResult, mockHeaders, mockResults := getHeadersResults(t, initialHeight) + + unittest.RunWithPebbleDB(t, func(pdb *pebble.DB) { + db := pebbleimpl.ToDB(pdb) + + t.Run("successful initialization", func(t *testing.T) { + initializer := NewConsumerProgress(db, "test_consumer1") + progress, err := initializer.Initialize(initialHeight) + require.NoError(t, err) + + latest, err := NewLatestPersistedSealedResult(progress, mockHeaders, mockResults) + require.NoError(t, err) + + require.NotNil(t, latest) + + actualResultID, actualHeight := latest.Latest() + + assert.Equal(t, initialResult.ID(), actualResultID) + assert.Equal(t, initialHeader.Height, actualHeight) + }) + + t.Run("processed index error", func(t *testing.T) { + expectedErr := fmt.Errorf("processed index error") + + mockCP := storagemock.NewConsumerProgress(t) + mockCP.On("ProcessedIndex").Return(uint64(0), expectedErr) + + latest, err := NewLatestPersistedSealedResult(mockCP, nil, nil) + + assert.ErrorIs(t, err, expectedErr) + require.Nil(t, latest) + }) + + t.Run("header lookup error", func(t *testing.T) { + expectedErr := fmt.Errorf("header lookup error") + + initializer := NewConsumerProgress(db, "test_consumer2") + progress, err := initializer.Initialize(missingHeaderHeight) + require.NoError(t, err) + + mockHeaders.On("ByHeight", missingHeaderHeight).Return(nil, expectedErr) + + latest, err := NewLatestPersistedSealedResult(progress, mockHeaders, nil) + + assert.ErrorIs(t, err, expectedErr) + require.Nil(t, latest) + }) + + t.Run("result lookup error", func(t *testing.T) { + expectedErr := fmt.Errorf("result lookup error") + + initializer := NewConsumerProgress(db, "test_consumer3") + progress, err := initializer.Initialize(missingResultHeight) + require.NoError(t, err) + + header := unittest.BlockHeaderFixture(unittest.WithHeaderHeight(missingResultHeight)) + + mockHeaders.On("ByHeight", missingResultHeight).Return(header, nil) + mockResults.On("ByBlockID", header.ID()).Return(nil, expectedErr) + + latest, err := NewLatestPersistedSealedResult(progress, mockHeaders, mockResults) + + assert.ErrorIs(t, err, expectedErr) + require.Nil(t, latest) + }) + }) + +} + +// TestLatestPersistedSealedResult_BatchSet tests the batch update functionality. +// It verifies that: +// - Updates are atomic - either all state is updated or none +// - The callback mechanism works correctly for both success and failure cases +// - State is not updated if BatchSetProcessedIndex fails +// - State is only updated after the batch callback indicates success +func TestLatestPersistedSealedResult_BatchSet(t *testing.T) { + initialHeader, initialResult, mockHeaders, mockResults := getHeadersResults(t, 100) + + newResultID := unittest.IdentifierFixture() + newHeight := uint64(200) + + unittest.RunWithPebbleDB(t, func(pdb *pebble.DB) { + db := pebbleimpl.ToDB(pdb) + + t.Run("successful batch update", func(t *testing.T) { + initializer := NewConsumerProgress(db, "test_consumer1") + progress, err := initializer.Initialize(initialHeader.Height) + require.NoError(t, err) + + latest, err := NewLatestPersistedSealedResult(progress, mockHeaders, mockResults) + require.NoError(t, err) + + batch := db.NewBatch() + defer batch.Close() + + done := make(chan struct{}) + batch.AddCallback(func(err error) { + require.NoError(t, err) + close(done) + }) + + err = latest.BatchSet(newResultID, newHeight, batch) + require.NoError(t, err) + + err = batch.Commit() + require.NoError(t, err) + + unittest.RequireCloseBefore(t, done, 100*time.Millisecond, "callback not called") + + actualResultID, actualHeight := latest.Latest() + + assert.Equal(t, newResultID, actualResultID) + assert.Equal(t, newHeight, actualHeight) + }) + }) + + t.Run("batch update error during BatchSetProcessedIndex", func(t *testing.T) { + expectedErr := fmt.Errorf("could not set processed index") + + var callbackCalled sync.WaitGroup + callbackCalled.Add(1) + + mockBatch := storagemock.NewReaderBatchWriter(t) + mockBatch.On("AddCallback", mock.AnythingOfType("func(error)")).Run(func(args mock.Arguments) { + callback := args.Get(0).(func(error)) + callback(expectedErr) + callbackCalled.Done() + }) + + mockCP := storagemock.NewConsumerProgress(t) + mockCP.On("ProcessedIndex").Return(initialHeader.Height, nil) + mockCP.On("BatchSetProcessedIndex", newHeight, mockBatch).Return(expectedErr) + + latest, err := NewLatestPersistedSealedResult(mockCP, mockHeaders, mockResults) + require.NoError(t, err) + + err = latest.BatchSet(newResultID, newHeight, mockBatch) + assert.ErrorIs(t, err, expectedErr) + + callbackCalled.Wait() + + actualResultID, actualHeight := latest.Latest() + + assert.Equal(t, initialResult.ID(), actualResultID) + assert.Equal(t, initialHeader.Height, actualHeight) + }) +} + +// TestLatestPersistedSealedResult_ConcurrentAccess tests the thread safety of the implementation. +// It verifies that: +// - Multiple concurrent reads are safe +// - Concurrent reads and writes are properly synchronized +// - No data races occur under heavy concurrent load +// - The state remains consistent during concurrent operations +func TestLatestPersistedSealedResult_ConcurrentAccess(t *testing.T) { + unittest.RunWithPebbleDB(t, func(pdb *pebble.DB) { + db := pebbleimpl.ToDB(pdb) + + initialHeader, initialResult, mockHeaders, mockResults := getHeadersResults(t, 100) + + initializer := NewConsumerProgress(db, "test_consumer") + progress, err := initializer.Initialize(initialHeader.Height) + require.NoError(t, err) + + latest, err := NewLatestPersistedSealedResult(progress, mockHeaders, mockResults) + require.NoError(t, err) + + t.Run("concurrent reads", func(t *testing.T) { + var wg sync.WaitGroup + numGoroutines := 1000 + + for range numGoroutines { + wg.Add(1) + go func() { + defer wg.Done() + + actualResultID, actualHeight := latest.Latest() + + assert.Equal(t, initialResult.ID(), actualResultID) + assert.Equal(t, initialHeader.Height, actualHeight) + }() + } + + wg.Wait() + }) + + t.Run("concurrent read/write", func(t *testing.T) { + var wg sync.WaitGroup + numGoroutines := 1000 + + for i := 0; i < numGoroutines; i++ { + wg.Add(2) + go func(i int) { + defer wg.Done() + + batch := db.NewBatch() + defer batch.Close() + + newResultID := unittest.IdentifierFixture() + newHeight := uint64(200 + i) + err := latest.BatchSet(newResultID, newHeight, batch) + require.NoError(t, err) + + err = batch.Commit() + require.NoError(t, err) + }(i) + go func() { + defer wg.Done() + _, _ = latest.Latest() + }() + } + + wg.Wait() + }) + }) +} + +func getHeadersResults(t *testing.T, initialHeight uint64) (*flow.Header, *flow.ExecutionResult, *storagemock.Headers, *storagemock.ExecutionResults) { + header := unittest.BlockHeaderFixture(unittest.WithHeaderHeight(initialHeight)) + result := unittest.ExecutionResultFixture(func(result *flow.ExecutionResult) { + result.BlockID = header.ID() + }) + + mockHeaders := storagemock.NewHeaders(t) + mockHeaders.On("ByHeight", initialHeight).Return(header, nil).Maybe() + + mockResults := storagemock.NewExecutionResults(t) + mockResults.On("ByBlockID", result.BlockID).Return(result, nil).Maybe() + + return header, result, mockHeaders, mockResults +} diff --git a/storage/store/light_transaction_results.go b/storage/store/light_transaction_results.go new file mode 100644 index 00000000000..dd8cab8e29a --- /dev/null +++ b/storage/store/light_transaction_results.go @@ -0,0 +1,137 @@ +package store + +import ( + "fmt" + + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module" + "github.com/onflow/flow-go/module/metrics" + "github.com/onflow/flow-go/storage" + "github.com/onflow/flow-go/storage/operation" +) + +var _ storage.LightTransactionResults = (*LightTransactionResults)(nil) + +type LightTransactionResults struct { + db storage.DB + cache *Cache[TwoIdentifier, flow.LightTransactionResult] // Key: blockID + txID + indexCache *Cache[IdentifierAndUint32, flow.LightTransactionResult] // Key: blockID + txIndex + blockCache *Cache[flow.Identifier, []flow.LightTransactionResult] // Key: blockID +} + +func NewLightTransactionResults(collector module.CacheMetrics, db storage.DB, transactionResultsCacheSize uint) *LightTransactionResults { + retrieve := func(r storage.Reader, key TwoIdentifier) (flow.LightTransactionResult, error) { + blockID, txID := KeyToBlockIDTransactionID(key) + + var txResult flow.LightTransactionResult + err := operation.RetrieveLightTransactionResult(r, blockID, txID, &txResult) + if err != nil { + return flow.LightTransactionResult{}, err + } + return txResult, nil + } + + retrieveIndex := func(r storage.Reader, key IdentifierAndUint32) (flow.LightTransactionResult, error) { + blockID, txIndex := KeyToBlockIDIndex(key) + + var txResult flow.LightTransactionResult + err := operation.RetrieveLightTransactionResultByIndex(r, blockID, txIndex, &txResult) + if err != nil { + return flow.LightTransactionResult{}, err + } + return txResult, nil + } + + retrieveForBlock := func(r storage.Reader, blockID flow.Identifier) ([]flow.LightTransactionResult, error) { + var txResults []flow.LightTransactionResult + err := operation.LookupLightTransactionResultsByBlockIDUsingIndex(r, blockID, &txResults) + if err != nil { + return nil, err + } + return txResults, nil + } + + return &LightTransactionResults{ + db: db, + cache: newCache(collector, metrics.ResourceTransactionResults, + withLimit[TwoIdentifier, flow.LightTransactionResult](transactionResultsCacheSize), + withStore(noopStore[TwoIdentifier, flow.LightTransactionResult]), + withRetrieve(retrieve), + ), + indexCache: newCache(collector, metrics.ResourceTransactionResultIndices, + withLimit[IdentifierAndUint32, flow.LightTransactionResult](transactionResultsCacheSize), + withStore(noopStore[IdentifierAndUint32, flow.LightTransactionResult]), + withRetrieve(retrieveIndex), + ), + blockCache: newCache(collector, metrics.ResourceTransactionResultIndices, + withLimit[flow.Identifier, []flow.LightTransactionResult](transactionResultsCacheSize), + withStore(noopStore[flow.Identifier, []flow.LightTransactionResult]), + withRetrieve(retrieveForBlock), + ), + } +} + +func (tr *LightTransactionResults) BatchStore(blockID flow.Identifier, transactionResults []flow.LightTransactionResult, rw storage.ReaderBatchWriter) error { + w := rw.Writer() + + for i, result := range transactionResults { + err := operation.BatchInsertLightTransactionResult(w, blockID, &result) + if err != nil { + return fmt.Errorf("cannot batch insert tx result: %w", err) + } + + err = operation.BatchIndexLightTransactionResult(w, blockID, uint32(i), &result) + if err != nil { + return fmt.Errorf("cannot batch index tx result: %w", err) + } + } + + storage.OnCommitSucceed(rw, func() { + for i, result := range transactionResults { + key := KeyFromBlockIDTransactionID(blockID, result.TransactionID) + // cache for each transaction, so that it's faster to retrieve + tr.cache.Insert(key, result) + + index := uint32(i) + + keyIndex := KeyFromBlockIDIndex(blockID, index) + tr.indexCache.Insert(keyIndex, result) + } + + tr.blockCache.Insert(blockID, transactionResults) + }) + return nil +} + +func (tr *LightTransactionResults) BatchStoreBadger(blockID flow.Identifier, transactionResults []flow.LightTransactionResult, batch storage.BatchStorage) error { + panic("LightTransactionResults BatchStoreBadger not implemented") +} + +// ByBlockIDTransactionID returns the transaction result for the given block ID and transaction ID +func (tr *LightTransactionResults) ByBlockIDTransactionID(blockID flow.Identifier, txID flow.Identifier) (*flow.LightTransactionResult, error) { + key := KeyFromBlockIDTransactionID(blockID, txID) + transactionResult, err := tr.cache.Get(tr.db.Reader(), key) + if err != nil { + return nil, err + } + return &transactionResult, nil +} + +// ByBlockIDTransactionIndex returns the transaction result for the given blockID and transaction index +func (tr *LightTransactionResults) ByBlockIDTransactionIndex(blockID flow.Identifier, txIndex uint32) (*flow.LightTransactionResult, error) { + key := KeyFromBlockIDIndex(blockID, txIndex) + transactionResult, err := tr.indexCache.Get(tr.db.Reader(), key) + if err != nil { + return nil, err + } + return &transactionResult, nil +} + +// ByBlockID gets all transaction results for a block, ordered by transaction index +func (tr *LightTransactionResults) ByBlockID(blockID flow.Identifier) ([]flow.LightTransactionResult, error) { + transactionResults, err := tr.blockCache.Get(tr.db.Reader(), blockID) + if err != nil { + return nil, err + } + return transactionResults, nil +} diff --git a/storage/store/light_transaction_results_test.go b/storage/store/light_transaction_results_test.go new file mode 100644 index 00000000000..c3ea965ab72 --- /dev/null +++ b/storage/store/light_transaction_results_test.go @@ -0,0 +1,110 @@ +package store_test + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "golang.org/x/exp/rand" + + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module/metrics" + "github.com/onflow/flow-go/storage" + "github.com/onflow/flow-go/storage/operation/dbtest" + "github.com/onflow/flow-go/storage/store" + "github.com/onflow/flow-go/utils/unittest" +) + +func TestBatchStoringLightTransactionResults(t *testing.T) { + dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { + metrics := metrics.NewNoopCollector() + store1 := store.NewLightTransactionResults(metrics, db, 1000) + + blockID := unittest.IdentifierFixture() + txResults := getLightTransactionResultsFixture(10) + + t.Run("batch store1 results", func(t *testing.T) { + require.NoError(t, db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return store1.BatchStore(blockID, txResults, rw) + })) + + // add a results to a new block to validate they are not included in lookups + require.NoError(t, db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return store1.BatchStore(unittest.IdentifierFixture(), getLightTransactionResultsFixture(2), rw) + })) + + }) + + t.Run("read results with cache", func(t *testing.T) { + for _, txResult := range txResults { + actual, err := store1.ByBlockIDTransactionID(blockID, txResult.TransactionID) + require.NoError(t, err) + assert.Equal(t, txResult, *actual) + } + }) + + newStore := store.NewLightTransactionResults(metrics, db, 1000) + t.Run("read results without cache", func(t *testing.T) { + // test loading from database (without cache) + // create a new instance using the same db so it has an empty cache + for _, txResult := range txResults { + actual, err := newStore.ByBlockIDTransactionID(blockID, txResult.TransactionID) + require.NoError(t, err) + assert.Equal(t, txResult, *actual) + } + }) + + t.Run("cached and non-cached results are equal", func(t *testing.T) { + // check retrieving by index from both cache and db + for i := len(txResults) - 1; i >= 0; i-- { + actual, err := store1.ByBlockIDTransactionIndex(blockID, uint32(i)) + require.NoError(t, err) + assert.Equal(t, txResults[i], *actual) + + actual, err = newStore.ByBlockIDTransactionIndex(blockID, uint32(i)) + require.NoError(t, err) + assert.Equal(t, txResults[i], *actual) + } + }) + + t.Run("read all results for block", func(t *testing.T) { + actuals, err := store1.ByBlockID(blockID) + require.NoError(t, err) + + assert.Equal(t, len(txResults), len(actuals)) + for i := range txResults { + assert.Equal(t, txResults[i], actuals[i]) + } + }) + }) +} + +func TestReadingNotStoredLightTransactionResults(t *testing.T) { + dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { + metrics := metrics.NewNoopCollector() + store1 := store.NewLightTransactionResults(metrics, db, 1000) + + blockID := unittest.IdentifierFixture() + txID := unittest.IdentifierFixture() + txIndex := rand.Uint32() + + _, err := store1.ByBlockIDTransactionID(blockID, txID) + assert.ErrorIs(t, err, storage.ErrNotFound) + + _, err = store1.ByBlockIDTransactionIndex(blockID, txIndex) + assert.ErrorIs(t, err, storage.ErrNotFound) + }) +} + +func getLightTransactionResultsFixture(n int) []flow.LightTransactionResult { + txResults := make([]flow.LightTransactionResult, 0, n) + for i := 0; i < n; i++ { + expected := flow.LightTransactionResult{ + TransactionID: unittest.IdentifierFixture(), + Failed: i%2 == 0, + ComputationUsed: unittest.Uint64InRange(1, 1000), + } + txResults = append(txResults, expected) + } + return txResults +} diff --git a/storage/store/my_receipts.go b/storage/store/my_receipts.go new file mode 100644 index 00000000000..02c79cbbb56 --- /dev/null +++ b/storage/store/my_receipts.go @@ -0,0 +1,125 @@ +package store + +import ( + "errors" + "fmt" + + "github.com/jordanschalm/lockctx" + + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module" + "github.com/onflow/flow-go/module/irrecoverable" + "github.com/onflow/flow-go/module/metrics" + "github.com/onflow/flow-go/storage" + "github.com/onflow/flow-go/storage/operation" +) + +// MyExecutionReceipts holds and indexes Execution Receipts. +// MyExecutionReceipts is implemented as a wrapper around badger.ExecutionReceipts +// The wrapper adds the ability to "MY execution receipt", from the viewpoint +// of an individual Execution Node. +type MyExecutionReceipts struct { + genericReceipts storage.ExecutionReceipts + db storage.DB + cache *Cache[flow.Identifier, *flow.ExecutionReceipt] +} + +// NewMyExecutionReceipts creates instance of MyExecutionReceipts which is a wrapper wrapper around badger.ExecutionReceipts +// It's useful for execution nodes to keep track of produced execution receipts. +func NewMyExecutionReceipts(collector module.CacheMetrics, db storage.DB, receipts storage.ExecutionReceipts) *MyExecutionReceipts { + retrieve := func(r storage.Reader, blockID flow.Identifier) (*flow.ExecutionReceipt, error) { + var receiptID flow.Identifier + err := operation.LookupOwnExecutionReceipt(r, blockID, &receiptID) + if err != nil { + return nil, fmt.Errorf("could not lookup receipt ID: %w", err) + } + receipt, err := receipts.ByID(receiptID) + if err != nil { + return nil, err + } + return receipt, nil + } + + remove := func(rw storage.ReaderBatchWriter, blockID flow.Identifier) error { + return operation.RemoveOwnExecutionReceipt(rw.Writer(), blockID) + } + + return &MyExecutionReceipts{ + genericReceipts: receipts, + db: db, + cache: newCache(collector, metrics.ResourceMyReceipt, + withLimit[flow.Identifier, *flow.ExecutionReceipt](flow.DefaultTransactionExpiry+100), + withRetrieve(retrieve), + withRemove[flow.Identifier, *flow.ExecutionReceipt](remove), + ), + } +} + +// storeMyReceipt assembles the operations to retrieve my receipt for the given block ID. +func (m *MyExecutionReceipts) myReceipt(blockID flow.Identifier) (*flow.ExecutionReceipt, error) { + return m.cache.Get(m.db.Reader(), blockID) // assemble DB operations to retrieve receipt (no execution) +} + +// BatchStoreMyReceipt stores blockID-to-my-receipt index entry keyed by blockID in a provided batch. +// +// If entity fails marshalling, the error is wrapped in a generic error and returned. +// If database unexpectedly fails to process the request, the error is wrapped in a generic error and returned. +// +// Expected error returns during *normal* operations: +// - `storage.ErrDataMismatch` if a *different* receipt has already been indexed for the same block +func (m *MyExecutionReceipts) BatchStoreMyReceipt(lctx lockctx.Proof, receipt *flow.ExecutionReceipt, rw storage.ReaderBatchWriter) error { + receiptID := receipt.ID() + blockID := receipt.ExecutionResult.BlockID + + if lctx == nil || !lctx.HoldsLock(storage.LockInsertOwnReceipt) { + return fmt.Errorf("cannot store my receipt, missing lock %v", storage.LockInsertOwnReceipt) + } + + // add DB operation to batch for storing receipt (execution deferred until batch is committed) + err := m.genericReceipts.BatchStore(receipt, rw) + if err != nil { + return err + } + + // dd DB operation to batch for indexing receipt as one of my own (execution deferred until batch is committed) + var savedReceiptID flow.Identifier + err = operation.LookupOwnExecutionReceipt(rw.GlobalReader(), blockID, &savedReceiptID) + if err == nil { + if savedReceiptID == receiptID { + return nil // no-op we are storing *same* receipt + } + return fmt.Errorf("indexing my receipt %v failed: different receipt %v for the same block %v is already indexed: %w", receiptID, savedReceiptID, blockID, storage.ErrDataMismatch) + } + if !errors.Is(err, storage.ErrNotFound) { // `storage.ErrNotFound` is expected, as this indicates that no receipt is indexed yet; anything else is an exception + return irrecoverable.NewException(err) + } + err = operation.IndexOwnExecutionReceipt(rw.Writer(), blockID, receiptID) + if err != nil { + return err + } + + // TODO: ideally, adding the receipt to the cache on success, should be done by the cache itself + storage.OnCommitSucceed(rw, func() { + m.cache.Insert(blockID, receipt) + }) + return nil +} + +// MyReceipt retrieves my receipt for the given block. +// Returns storage.ErrNotFound if no receipt was persisted for the block. +func (m *MyExecutionReceipts) MyReceipt(blockID flow.Identifier) (*flow.ExecutionReceipt, error) { + return m.myReceipt(blockID) +} + +func (m *MyExecutionReceipts) RemoveIndexByBlockID(blockID flow.Identifier) error { + return m.db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return m.BatchRemoveIndexByBlockID(blockID, rw) + }) +} + +// BatchRemoveIndexByBlockID removes blockID-to-my-execution-receipt index entry keyed by a blockID in a provided batch +// No errors are expected during normal operation, even if no entries are matched. +// If Badger unexpectedly fails to process the request, the error is wrapped in a generic error and returned. +func (m *MyExecutionReceipts) BatchRemoveIndexByBlockID(blockID flow.Identifier, rw storage.ReaderBatchWriter) error { + return m.cache.RemoveTx(rw, blockID) +} diff --git a/storage/store/my_receipts_test.go b/storage/store/my_receipts_test.go new file mode 100644 index 00000000000..62d1e6e0286 --- /dev/null +++ b/storage/store/my_receipts_test.go @@ -0,0 +1,262 @@ +package store_test + +import ( + "errors" + "sync" + "testing" + + "github.com/jordanschalm/lockctx" + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/module/metrics" + "github.com/onflow/flow-go/storage" + "github.com/onflow/flow-go/storage/operation/dbtest" + "github.com/onflow/flow-go/storage/store" + "github.com/onflow/flow-go/utils/unittest" +) + +func TestMyExecutionReceiptsStorage(t *testing.T) { + withStore := func(t *testing.T, f func(storage.MyExecutionReceipts, storage.ExecutionResults, storage.ExecutionReceipts, storage.DB, lockctx.Manager)) { + dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { + lockManager := storage.NewTestingLockManager() + metrics := metrics.NewNoopCollector() + results := store.NewExecutionResults(metrics, db) + receipts := store.NewExecutionReceipts(metrics, db, results, 100) + myReceipts := store.NewMyExecutionReceipts(metrics, db, receipts) + + f(myReceipts, results, receipts, db, lockManager) + }) + } + + t.Run("myReceipts store and retrieve from different storage layers", func(t *testing.T) { + withStore(t, func(myReceipts storage.MyExecutionReceipts, results storage.ExecutionResults, receipts storage.ExecutionReceipts, db storage.DB, lockManager lockctx.Manager) { + block := unittest.BlockFixture() + receipt1 := unittest.ReceiptForBlockFixture(block) + + // STEP 1: Store receipt + err := unittest.WithLock(t, lockManager, storage.LockInsertOwnReceipt, func(lctx lockctx.Context) error { + return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return myReceipts.BatchStoreMyReceipt(lctx, receipt1, rw) + }) + }) + require.NoError(t, err) + + // STEP 2: Retrieve from different storage layers + // MyExecutionReceipts delegates the storage of the receipt to the more generic storage.ExecutionReceipts and storage.ExecutionResults, + // which is also used by the consensus follower to store execution receipts & results that are incorporated into blocks. + // After storing my receipts, we check that the result and receipt can also be retrieved from the lower-level generic storage layers. + actual, err := myReceipts.MyReceipt(block.ID()) + require.NoError(t, err) + require.Equal(t, receipt1, actual) + + actualReceipt, err := receipts.ByID(receipt1.ID()) // generic receipts storage + require.NoError(t, err) + require.Equal(t, receipt1, actualReceipt) + + actualResult, err := results.ByID(receipt1.ExecutionResult.ID()) // generic results storage + require.NoError(t, err) + require.Equal(t, receipt1.ExecutionResult, *actualResult) + }) + }) + + t.Run("myReceipts store identical receipt for the same block", func(t *testing.T) { + withStore(t, func(myReceipts storage.MyExecutionReceipts, _ storage.ExecutionResults, _ storage.ExecutionReceipts, db storage.DB, lockManager lockctx.Manager) { + block := unittest.BlockFixture() + receipt1 := unittest.ReceiptForBlockFixture(block) + + err := unittest.WithLock(t, lockManager, storage.LockInsertOwnReceipt, func(lctx lockctx.Context) error { + return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return myReceipts.BatchStoreMyReceipt(lctx, receipt1, rw) + }) + }) + require.NoError(t, err) + + err = unittest.WithLock(t, lockManager, storage.LockInsertOwnReceipt, func(lctx lockctx.Context) error { + return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return myReceipts.BatchStoreMyReceipt(lctx, receipt1, rw) + }) + }) + require.NoError(t, err) + }) + }) + + t.Run("store different receipt for same block should fail", func(t *testing.T) { + withStore(t, func(myReceipts storage.MyExecutionReceipts, results storage.ExecutionResults, receipts storage.ExecutionReceipts, db storage.DB, lockManager lockctx.Manager) { + block := unittest.BlockFixture() + + executor1 := unittest.IdentifierFixture() + executor2 := unittest.IdentifierFixture() + + receipt1 := unittest.ReceiptForBlockExecutorFixture(block, executor1) + receipt2 := unittest.ReceiptForBlockExecutorFixture(block, executor2) + + err := unittest.WithLock(t, lockManager, storage.LockInsertOwnReceipt, func(lctx lockctx.Context) error { + return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return myReceipts.BatchStoreMyReceipt(lctx, receipt1, rw) + }) + }) + require.NoError(t, err) + + err = unittest.WithLock(t, lockManager, storage.LockInsertOwnReceipt, func(lctx lockctx.Context) error { + return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return myReceipts.BatchStoreMyReceipt(lctx, receipt2, rw) + }) + }) + require.Error(t, err) + require.ErrorIs(t, err, storage.ErrDataMismatch) + }) + }) + + t.Run("concurrent store different receipt for same block should fail", func(t *testing.T) { + withStore(t, func(myReceipts storage.MyExecutionReceipts, results storage.ExecutionResults, receipts storage.ExecutionReceipts, db storage.DB, lockManager lockctx.Manager) { + block := unittest.BlockFixture() + + executor1 := unittest.IdentifierFixture() + executor2 := unittest.IdentifierFixture() + + receipt1 := unittest.ReceiptForBlockExecutorFixture(block, executor1) + receipt2 := unittest.ReceiptForBlockExecutorFixture(block, executor2) + + var startSignal sync.WaitGroup // goroutines attempting store operations will wait for this signal to start concurrently + startSignal.Add(1) // expecting one signal from the main thread to start both goroutines + var doneSinal sync.WaitGroup // the main thread will wait on this for both goroutines to finish + doneSinal.Add(2) // expecting two goroutines to signal finish + errChan := make(chan error, 2) + + go func() { + startSignal.Wait() + err := unittest.WithLock(t, lockManager, storage.LockInsertOwnReceipt, func(lctx lockctx.Context) error { + return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return myReceipts.BatchStoreMyReceipt(lctx, receipt1, rw) + }) + }) + errChan <- err + doneSinal.Done() + }() + + go func() { + startSignal.Wait() + err := unittest.WithLock(t, lockManager, storage.LockInsertOwnReceipt, func(lctx lockctx.Context) error { + return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return myReceipts.BatchStoreMyReceipt(lctx, receipt2, rw) + }) + }) + errChan <- err + doneSinal.Done() + }() + + startSignal.Done() // start both goroutines + doneSinal.Wait() // wait for both goroutines to finish + close(errChan) + + // Check that one of the Index operations succeeded and the other failed + var errCount int + for err := range errChan { + if err != nil { + errCount++ + require.Contains(t, err.Error(), "different receipt") + } + } + require.Equal(t, 1, errCount, "Exactly one of the operations should fail") + }) + }) + + t.Run("concurrent store of 10 different receipts for different blocks should succeed", func(t *testing.T) { + withStore(t, func(myReceipts storage.MyExecutionReceipts, results storage.ExecutionResults, receipts storage.ExecutionReceipts, db storage.DB, lockManager lockctx.Manager) { + var startSignal sync.WaitGroup // goroutines attempting store operations will wait for this signal to start concurrently + startSignal.Add(1) // expecting one signal from the main thread to start both goroutines + var doneSinal sync.WaitGroup // the main thread will wait on this for goroutines attempting store operations to finish + errChan := make(chan error, 10) + + // Store receipts concurrently + for i := 0; i < 10; i++ { + doneSinal.Add(1) + go func(i int) { + block := unittest.BlockFixture() // Each iteration gets a new block + executor := unittest.IdentifierFixture() + receipt := unittest.ReceiptForBlockExecutorFixture(block, executor) + + startSignal.Wait() + err := unittest.WithLock(t, lockManager, storage.LockInsertOwnReceipt, func(lctx lockctx.Context) error { + return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return myReceipts.BatchStoreMyReceipt(lctx, receipt, rw) + }) + }) + errChan <- err + require.NoError(t, err) + doneSinal.Done() + }(i) + } + + startSignal.Done() // start both goroutines + doneSinal.Wait() // wait for both goroutines to finish + close(errChan) + + // Verify all succeeded + for err := range errChan { + require.NoError(t, err, "All receipts should be stored successfully") + } + }) + }) + + t.Run("store and remove", func(t *testing.T) { + withStore(t, func(myReceipts storage.MyExecutionReceipts, results storage.ExecutionResults, receipts storage.ExecutionReceipts, db storage.DB, lockManager lockctx.Manager) { + block := unittest.BlockFixture() + receipt1 := unittest.ReceiptForBlockFixture(block) + + err := unittest.WithLock(t, lockManager, storage.LockInsertOwnReceipt, func(lctx lockctx.Context) error { + return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return myReceipts.BatchStoreMyReceipt(lctx, receipt1, rw) + }) + }) + require.NoError(t, err) + + actual, err := myReceipts.MyReceipt(block.ID()) + require.NoError(t, err) + + require.Equal(t, receipt1, actual) + + // Check after storing my receipts, the result and receipt are stored + actualReceipt, err := receipts.ByID(receipt1.ID()) + require.NoError(t, err) + require.Equal(t, receipt1, actualReceipt) + + actualResult, err := results.ByID(receipt1.ExecutionResult.ID()) + require.NoError(t, err) + require.Equal(t, receipt1.ExecutionResult, *actualResult) + + err = db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return myReceipts.BatchRemoveIndexByBlockID(block.ID(), rw) + }) + require.NoError(t, err) + + _, err = myReceipts.MyReceipt(block.ID()) + require.True(t, errors.Is(err, storage.ErrNotFound)) + }) + }) +} + +func TestMyExecutionReceiptsStorageMultipleStoreInSameBatch(t *testing.T) { + dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { + lockManager := storage.NewTestingLockManager() + metrics := metrics.NewNoopCollector() + results := store.NewExecutionResults(metrics, db) + receipts := store.NewExecutionReceipts(metrics, db, results, 100) + myReceipts := store.NewMyExecutionReceipts(metrics, db, receipts) + + block := unittest.BlockFixture() + receipt1 := unittest.ReceiptForBlockFixture(block) + receipt2 := unittest.ReceiptForBlockFixture(block) + + err := unittest.WithLock(t, lockManager, storage.LockInsertOwnReceipt, func(lctx lockctx.Context) error { + return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + err := myReceipts.BatchStoreMyReceipt(lctx, receipt1, rw) + if err != nil { + return err + } + return myReceipts.BatchStoreMyReceipt(lctx, receipt2, rw) + }) + }) + require.NoError(t, err) + }) +} diff --git a/storage/store/node_disallow_list.go b/storage/store/node_disallow_list.go new file mode 100644 index 00000000000..e7c909c3186 --- /dev/null +++ b/storage/store/node_disallow_list.go @@ -0,0 +1,43 @@ +package store + +import ( + "errors" + + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/storage" + "github.com/onflow/flow-go/storage/operation" +) + +type NodeDisallowList struct { + db storage.DB +} + +var _ storage.NodeDisallowList = (*NodeDisallowList)(nil) + +func NewNodeDisallowList(db storage.DB) *NodeDisallowList { + return &NodeDisallowList{db: db} +} + +// Store writes the given disallowList to the database. +// To avoid legacy entries in the database, we purge +// the entire database entry if disallowList is empty. +// No errors are expected during normal operations. +func (dl *NodeDisallowList) Store(disallowList map[flow.Identifier]struct{}) error { + return dl.db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + if len(disallowList) == 0 { + return operation.PurgeNodeDisallowList(rw.Writer()) + } + return operation.PersistNodeDisallowList(rw.Writer(), disallowList) + }) +} + +// Retrieve reads the set of disallowed nodes from the database. +// No error is returned if no database entry exists. +// No errors are expected during normal operations. +func (dl *NodeDisallowList) Retrieve(disallowList *map[flow.Identifier]struct{}) error { + err := operation.RetrieveNodeDisallowList(dl.db.Reader(), disallowList) + if err != nil && !errors.Is(err, storage.ErrNotFound) { + return err + } + return nil +} diff --git a/storage/store/node_disallow_list_test.go b/storage/store/node_disallow_list_test.go new file mode 100644 index 00000000000..ecacc01d49a --- /dev/null +++ b/storage/store/node_disallow_list_test.go @@ -0,0 +1,104 @@ +package store_test + +import ( + "testing" + + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/storage" + "github.com/onflow/flow-go/storage/operation/dbtest" + "github.com/onflow/flow-go/storage/store" + "github.com/onflow/flow-go/utils/unittest" +) + +func TestNodeDisallowedListStoreAndRetrieve(t *testing.T) { + t.Run("Retrieving non-existing disallowlist should return no error", func(t *testing.T) { + dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { + nodeDisallowListStore := store.NewNodeDisallowList(db) + + var disallowList map[flow.Identifier]struct{} + err := nodeDisallowListStore.Retrieve(&disallowList) + require.NoError(t, err) + require.Equal(t, 0, len(disallowList)) + }) + }) + + t.Run("Storing and read disallowlist", func(t *testing.T) { + dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { + nodeDisallowListStore := store.NewNodeDisallowList(db) + + disallowList := unittest.IdentifierListFixture(8).Lookup() + err := nodeDisallowListStore.Store(disallowList) + require.NoError(t, err) + + var b map[flow.Identifier]struct{} + err = nodeDisallowListStore.Retrieve(&b) + require.NoError(t, err) + require.Equal(t, disallowList, b) + }) + }) + + t.Run("Overwrite disallowlist", func(t *testing.T) { + dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { + nodeDisallowListStore := store.NewNodeDisallowList(db) + + disallowList1 := unittest.IdentifierListFixture(8).Lookup() + err := nodeDisallowListStore.Store(disallowList1) + require.NoError(t, err) + + disallowList2 := unittest.IdentifierListFixture(8).Lookup() + err = nodeDisallowListStore.Store(disallowList2) + require.NoError(t, err) + + var b map[flow.Identifier]struct{} + err = nodeDisallowListStore.Retrieve(&b) + require.NoError(t, err) + require.Equal(t, disallowList2, b) + }) + }) + + t.Run("Write & Purge & Write disallowlist", func(t *testing.T) { + dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { + nodeDisallowListStore := store.NewNodeDisallowList(db) + + disallowList1 := unittest.IdentifierListFixture(8).Lookup() + err := nodeDisallowListStore.Store(disallowList1) + require.NoError(t, err) + + err = nodeDisallowListStore.Store(nil) + require.NoError(t, err) + + var b map[flow.Identifier]struct{} + err = nodeDisallowListStore.Retrieve(&b) + require.NoError(t, err) + require.Equal(t, 0, len(b)) + + disallowList2 := unittest.IdentifierListFixture(8).Lookup() + err = nodeDisallowListStore.Store(disallowList2) + require.NoError(t, err) + + err = nodeDisallowListStore.Retrieve(&b) + require.NoError(t, err) + require.Equal(t, disallowList2, b) + }) + }) + + t.Run("Purge non-existing disallowlis", func(t *testing.T) { + dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { + nodeDisallowListStore := store.NewNodeDisallowList(db) + + var b map[flow.Identifier]struct{} + err := nodeDisallowListStore.Retrieve(&b) + require.NoError(t, err) + require.Equal(t, 0, len(b)) + + err = nodeDisallowListStore.Store(nil) + require.NoError(t, err) + + err = nodeDisallowListStore.Retrieve(&b) + require.NoError(t, err) + require.Equal(t, 0, len(b)) + }) + }) +} diff --git a/storage/store/payloads.go b/storage/store/payloads.go new file mode 100644 index 00000000000..fc5562d7f36 --- /dev/null +++ b/storage/store/payloads.go @@ -0,0 +1,162 @@ +package store + +import ( + "errors" + "fmt" + + "github.com/jordanschalm/lockctx" + + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/storage" +) + +type Payloads struct { + db storage.DB + index *Index + guarantees *Guarantees + seals *Seals + receipts *ExecutionReceipts + results *ExecutionResults +} + +func NewPayloads(db storage.DB, index *Index, guarantees *Guarantees, seals *Seals, receipts *ExecutionReceipts, + results *ExecutionResults) *Payloads { + + p := &Payloads{ + db: db, + index: index, + guarantees: guarantees, + seals: seals, + receipts: receipts, + results: results, + } + + return p +} + +// storeTx stores the payloads and their components in the database. +// it takes a map of storingResults to ensure the receipt to be stored contains a known result, +// which is either already stored in the database or is going to be stored in the same batch. +func (p *Payloads) storeTx(lctx lockctx.Proof, rw storage.ReaderBatchWriter, blockID flow.Identifier, payload *flow.Payload) error { + // For correct payloads, the execution result is part of the payload or it's already stored + // in storage. If execution result is not present in either of those places, we error: + resultsByID := payload.Results.Lookup() + fullReceipts := make([]*flow.ExecutionReceipt, 0, len(payload.Receipts)) + var err error + for _, meta := range payload.Receipts { + result, ok := resultsByID[meta.ResultID] + if !ok { + result, err = p.results.ByID(meta.ResultID) + if err != nil { + if errors.Is(err, storage.ErrNotFound) { + return fmt.Errorf("invalid payload referencing unknown execution result %v, err: %w", meta.ResultID, err) + } + return err + } + } + executionReceipt, err := flow.ExecutionReceiptFromStub(*meta, *result) + if err != nil { + return fmt.Errorf("could not create execution receipt from stub: %w", err) + } + + fullReceipts = append(fullReceipts, executionReceipt) + } + + // make sure all payload guarantees are stored + for _, guarantee := range payload.Guarantees { + err := p.guarantees.storeTx(lctx, rw, guarantee) + if err != nil { + return fmt.Errorf("could not store guarantee: %w", err) + } + } + + // make sure all payload seals are stored + for _, seal := range payload.Seals { + err := p.seals.storeTx(rw, seal) + if err != nil { + return fmt.Errorf("could not store seal: %w", err) + } + } + + // store all payload receipts + for _, receipt := range fullReceipts { + err := p.receipts.storeTx(rw, receipt) + if err != nil { + return fmt.Errorf("could not store receipt: %w", err) + } + } + + // store the index + err = p.index.storeTx(lctx, rw, blockID, payload.Index()) + if err != nil { + return fmt.Errorf("could not store index: %w", err) + } + + return nil +} + +func (p *Payloads) retrieveTx(blockID flow.Identifier) (*flow.Payload, error) { + // retrieve the index + idx, err := p.index.ByBlockID(blockID) + if err != nil { + return nil, fmt.Errorf("could not retrieve index: %w", err) + } + + // retrieve guarantees + guarantees := make([]*flow.CollectionGuarantee, 0, len(idx.GuaranteeIDs)) + for _, collID := range idx.GuaranteeIDs { + guarantee, err := p.guarantees.retrieveTx(collID) + if err != nil { + return nil, fmt.Errorf("could not retrieve guarantee (%x): %w", collID, err) + } + guarantees = append(guarantees, guarantee) + } + + // retrieve seals + seals := make([]*flow.Seal, 0, len(idx.SealIDs)) + for _, sealID := range idx.SealIDs { + seal, err := p.seals.retrieveTx(sealID) + if err != nil { + return nil, fmt.Errorf("could not retrieve seal (%x): %w", sealID, err) + } + seals = append(seals, seal) + } + + // retrieve receipts + receipts := make([]*flow.ExecutionReceiptStub, 0, len(idx.ReceiptIDs)) + for _, recID := range idx.ReceiptIDs { + receipt, err := p.receipts.byID(recID) + if err != nil { + return nil, fmt.Errorf("could not retrieve receipt %x: %w", recID, err) + } + receipts = append(receipts, receipt.Stub()) + } + + // retrieve results + results := make([]*flow.ExecutionResult, 0, len(idx.ResultIDs)) + for _, resID := range idx.ResultIDs { + result, err := p.results.byID(resID) + if err != nil { + return nil, fmt.Errorf("could not retrieve result %x: %w", resID, err) + } + results = append(results, result) + } + payload, err := flow.NewPayload( + flow.UntrustedPayload{ + Seals: seals, + Guarantees: guarantees, + Receipts: receipts, + Results: results, + ProtocolStateID: idx.ProtocolStateID, + }, + ) + if err != nil { + return nil, fmt.Errorf("could not build the payload: %w", err) + } + + return payload, nil +} + +func (p *Payloads) ByBlockID(blockID flow.Identifier) (*flow.Payload, error) { + return p.retrieveTx(blockID) +} diff --git a/storage/store/payloads_test.go b/storage/store/payloads_test.go new file mode 100644 index 00000000000..604ff48b9cc --- /dev/null +++ b/storage/store/payloads_test.go @@ -0,0 +1,61 @@ +package store_test + +import ( + "testing" + + "github.com/jordanschalm/lockctx" + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/module/metrics" + "github.com/onflow/flow-go/storage" + "github.com/onflow/flow-go/storage/operation/dbtest" + "github.com/onflow/flow-go/storage/store" + "github.com/onflow/flow-go/utils/unittest" +) + +func TestPayloadStoreRetrieve(t *testing.T) { + dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { + lockManager := storage.NewTestingLockManager() + metrics := metrics.NewNoopCollector() + + all := store.InitAll(metrics, db) + payloads := all.Payloads + blocks := all.Blocks + + expected := unittest.PayloadFixture(unittest.WithAllTheFixins) + block := unittest.BlockWithParentAndPayload(unittest.BlockHeaderWithHeight(10), expected) + proposal := unittest.ProposalFromBlock(block) + require.Equal(t, expected, block.Payload) + blockID := block.ID() + + err := unittest.WithLock(t, lockManager, storage.LockInsertBlock, func(lctx lockctx.Context) error { + return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return blocks.BatchStore(lctx, rw, proposal) + }) + }) + require.NoError(t, err) + + // fetch payload + payload, err := payloads.ByBlockID(blockID) + require.NoError(t, err) + require.Equal(t, expected, *payload) + }) +} + +func TestPayloadRetreiveWithoutStore(t *testing.T) { + dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { + metrics := metrics.NewNoopCollector() + + index := store.NewIndex(metrics, db) + seals := store.NewSeals(metrics, db) + guarantees := store.NewGuarantees(metrics, db, store.DefaultCacheSize, store.DefaultCacheSize) + results := store.NewExecutionResults(metrics, db) + receipts := store.NewExecutionReceipts(metrics, db, results, store.DefaultCacheSize) + s := store.NewPayloads(db, index, guarantees, seals, receipts, results) + + blockID := unittest.IdentifierFixture() + + _, err := s.ByBlockID(blockID) + require.ErrorIs(t, err, storage.ErrNotFound) + }) +} diff --git a/storage/store/proposal_signatures.go b/storage/store/proposal_signatures.go new file mode 100644 index 00000000000..39636e428b6 --- /dev/null +++ b/storage/store/proposal_signatures.go @@ -0,0 +1,71 @@ +package store + +import ( + "github.com/jordanschalm/lockctx" + + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module" + "github.com/onflow/flow-go/module/metrics" + "github.com/onflow/flow-go/storage" + "github.com/onflow/flow-go/storage/operation" +) + +// proposalSignatures implements a proposal signature storage around a storage DB. +// The proposer's signature is only transiently important conceptually (until the block obtains a QC), +// but our current business logic validates it even in cases where it is not strictly necessary. +// For simplicity, we require it be stored for all blocks, however it is stored separately to +// make it easier to remove in the future if/when we update the syncing and block ingestion logic. +type proposalSignatures struct { + db storage.DB + cache *Cache[flow.Identifier, []byte] +} + +// newProposalSignatures creates a proposalSignatures instance which is a database of block proposal signatures +// which supports storing, caching and retrieving by block ID. +func newProposalSignatures(collector module.CacheMetrics, db storage.DB) *proposalSignatures { + store := func(lctx lockctx.Proof, rw storage.ReaderBatchWriter, blockID flow.Identifier, sig []byte) error { + return operation.InsertProposalSignature(lctx, rw.Writer(), blockID, &sig) + } + + retrieve := func(r storage.Reader, blockID flow.Identifier) ([]byte, error) { + var sig []byte + err := operation.RetrieveProposalSignature(r, blockID, &sig) + return sig, err + } + + return &proposalSignatures{ + db: db, + cache: newCache(collector, metrics.ResourceProposalSignature, + withLimit[flow.Identifier, []byte](4*flow.DefaultTransactionExpiry), + withStoreWithLock(store), + withRetrieve(retrieve)), + } +} + +// storeTx persists the given `sig` as the proposer's signature for the specified block. +// +// CAUTION: +// - The caller must acquire either the lock [storage.LockInsertBlock] or [storage.LockInsertOrFinalizeClusterBlock] +// but not both and hold the lock until the database write has been committed. +// - OVERWRITES existing data (potential for data corruption): +// The lock proof serves as a reminder that the CALLER is responsible to ensure that the DEDUPLICATION CHECK is done elsewhere +// ATOMICALLY within this write operation. Currently it's done by operation.InsertHeader where it performs a check +// to ensure the blockID is new, therefore any data indexed by this blockID is new as well. +// +// No error returns expected during normal operations. +func (h *proposalSignatures) storeTx(lctx lockctx.Proof, rw storage.ReaderBatchWriter, blockID flow.Identifier, sig []byte) error { + return h.cache.PutWithLockTx(lctx, rw, blockID, sig) +} + +func (h *proposalSignatures) retrieveTx(blockID flow.Identifier) ([]byte, error) { + return h.cache.Get(h.db.Reader(), blockID) +} + +// ByBlockID returns the proposer signature for the specified block. +// Currently, we store the proposer signature for all blocks, even though this is only strictly +// necessary for blocks that do not have a QC yet. However, this might change in the future. +// Expected errors during normal operations: +// - [storage.ErrNotFound] if no block with the specified ID is known +func (h *proposalSignatures) ByBlockID(blockID flow.Identifier) ([]byte, error) { + return h.retrieveTx(blockID) +} diff --git a/storage/store/protocol_kv_store.go b/storage/store/protocol_kv_store.go new file mode 100644 index 00000000000..b1213931c2a --- /dev/null +++ b/storage/store/protocol_kv_store.go @@ -0,0 +1,143 @@ +package store + +import ( + "fmt" + + "github.com/jordanschalm/lockctx" + + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module" + "github.com/onflow/flow-go/module/metrics" + "github.com/onflow/flow-go/storage" + "github.com/onflow/flow-go/storage/operation" +) + +// DefaultProtocolKVStoreCacheSize is the default size for primary protocol KV store cache. +// KV store is rarely updated, so we will have a limited number of unique snapshots. +// Let's be generous and assume we have 10 different KV stores used at the same time. +var DefaultProtocolKVStoreCacheSize uint = 10 + +// DefaultProtocolKVStoreByBlockIDCacheSize is the default value for secondary index `byBlockIdCache`. +// We want to be able to cover a broad interval of views without cache misses, so we use a bigger value. +// Generally, many blocks will reference the same KV store snapshot. +var DefaultProtocolKVStoreByBlockIDCacheSize uint = 1000 + +// ProtocolKVStore implements persistent storage for storing KV store snapshots. +type ProtocolKVStore struct { + db storage.DB + + // cache holds versioned binary blobs representing snapshots of key-value stores. We use the kv-store's + // ID as key for retrieving the versioned binary snapshot of the kv-store. Consumers must know how to + // deal with the binary representation. `cache` only holds the distinct snapshots. On the happy path, + // we expect single-digit number of unique snapshots within an epoch. + cache *Cache[flow.Identifier, *flow.PSKeyValueStoreData] + + // byBlockIdCache is essentially an in-memory map from `Block.ID()` -> `KeyValueStore.ID()`. The full + // kv-store snapshot can be retrieved from the `cache` above. + // `byBlockIdCache` will contain an entry for every block. We want to be able to cover a broad interval of views + // without cache misses, so a cache size of roughly 1000 entries is reasonable. + byBlockIdCache *Cache[flow.Identifier, flow.Identifier] +} + +var _ storage.ProtocolKVStore = (*ProtocolKVStore)(nil) + +// NewProtocolKVStore creates a ProtocolKVStore instance, which is a database holding KV store snapshots. +// It supports storing, caching and retrieving by ID or the additionally indexed block ID. +func NewProtocolKVStore(collector module.CacheMetrics, + db storage.DB, + kvStoreCacheSize uint, + kvStoreByBlockIDCacheSize uint, +) *ProtocolKVStore { + retrieveByStateID := func(r storage.Reader, stateID flow.Identifier) (*flow.PSKeyValueStoreData, error) { + var kvStore flow.PSKeyValueStoreData + err := operation.RetrieveProtocolKVStore(r, stateID, &kvStore) + if err != nil { + return nil, fmt.Errorf("could not get kv snapshot by id (%x): %w", stateID, err) + } + return &kvStore, nil + } + + retrieveByBlockID := func(r storage.Reader, blockID flow.Identifier) (flow.Identifier, error) { + var stateID flow.Identifier + err := operation.LookupProtocolKVStore(r, blockID, &stateID) + if err != nil { + return flow.ZeroID, fmt.Errorf("could not lookup protocol state ID for block (%x): %w", blockID[:], err) + } + return stateID, nil + } + + return &ProtocolKVStore{ + db: db, + cache: newCache(collector, metrics.ResourceProtocolKVStore, + withLimit[flow.Identifier, *flow.PSKeyValueStoreData](kvStoreCacheSize), + withStore(operation.InsertProtocolKVStore), + withRetrieve(retrieveByStateID)), + byBlockIdCache: newCache(collector, metrics.ResourceProtocolKVStoreByBlockID, + withLimit[flow.Identifier, flow.Identifier](kvStoreByBlockIDCacheSize), + withStoreWithLock(operation.IndexProtocolKVStore), + withRetrieve(retrieveByBlockID)), + } +} + +// BatchStore persists the KV-store snapshot in the database using the given ID as key. +// BatchStore is idempotent, i.e. it accepts repeated calls with the same pairs of (stateID, kvStore). +// Here, the ID is expected to be a collision-resistant hash of the snapshot (including the +// ProtocolStateVersion). Hence, for the same ID, BatchStore will reject changing the data. +// +// No error is expected during normal operations. +func (s *ProtocolKVStore) BatchStore(rw storage.ReaderBatchWriter, stateID flow.Identifier, data *flow.PSKeyValueStoreData) error { + return s.db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return s.cache.PutTx(rw, stateID, data) + }) +} + +// BatchIndex persists the specific map entry in the node's database. +// In a nutshell, we want to maintain a map from `blockID` to `epochStateEntry`, where `blockID` references the +// block that _proposes_ the referenced epoch protocol state entry. +// Protocol convention: +// - Consider block B, whose ingestion might potentially lead to an updated protocol state. For example, +// the protocol state changes if we seal some execution results emitting service events. +// - For the key `blockID`, we use the identity of block B which _proposes_ this Protocol State. As value, +// the hash of the resulting protocol state at the end of processing B is to be used. +// - IMPORTANT: The protocol state requires confirmation by a QC and will only become active at the child block, +// _after_ validating the QC. +// +// CAUTION: +// - The caller must acquire the lock [storage.LockInsertBlock] and hold it until the database write has been committed. +// - OVERWRITES existing data (potential for data corruption): +// The lock proof serves as a reminder that the CALLER is responsible to ensure that the DEDUPLICATION CHECK is done elsewhere +// ATOMICALLY within this write operation. Currently it's done by operation.InsertHeader where it performs a check +// to ensure the blockID is new, therefore any data indexed by this blockID is new as well. +// +// Expected errors during normal operations: +// - [storage.ErrAlreadyExist] if a KV store for the given blockID has already been indexed. +func (s *ProtocolKVStore) BatchIndex(lctx lockctx.Proof, rw storage.ReaderBatchWriter, blockID flow.Identifier, stateID flow.Identifier) error { + return s.byBlockIdCache.PutWithLockTx(lctx, rw, blockID, stateID) +} + +// ByID retrieves the KV store snapshot with the given state ID. +// Expected errors during normal operations: +// - storage.ErrNotFound if no snapshot with the given Identifier is known. +func (s *ProtocolKVStore) ByID(stateID flow.Identifier) (*flow.PSKeyValueStoreData, error) { + return s.cache.Get(s.db.Reader(), stateID) +} + +// ByBlockID retrieves the kv-store snapshot that the block with the given ID proposes. +// CAUTION: this store snapshot requires confirmation by a QC and will only become active at the child block, +// _after_ validating the QC. Protocol convention: +// - Consider block B, whose ingestion might potentially lead to an updated KV store state. +// For example, the state changes if we seal some execution results emitting specific service events. +// - For the key `blockID`, we use the identity of block B which _proposes_ this updated KV store. As value, +// the hash of the resulting state at the end of processing B is to be used. +// - CAUTION: The updated state requires confirmation by a QC and will only become active at the child block, +// _after_ validating the QC. +// +// Expected errors during normal operations: +// - storage.ErrNotFound if no snapshot has been indexed for the given block. +func (s *ProtocolKVStore) ByBlockID(blockID flow.Identifier) (*flow.PSKeyValueStoreData, error) { + stateID, err := s.byBlockIdCache.Get(s.db.Reader(), blockID) + if err != nil { + return nil, fmt.Errorf("could not lookup protocol state ID for block (%x): %w", blockID[:], err) + } + return s.cache.Get(s.db.Reader(), stateID) +} diff --git a/storage/store/protocol_kv_store_test.go b/storage/store/protocol_kv_store_test.go new file mode 100644 index 00000000000..eeaefafe912 --- /dev/null +++ b/storage/store/protocol_kv_store_test.go @@ -0,0 +1,150 @@ +package store + +import ( + "testing" + + "github.com/jordanschalm/lockctx" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module/metrics" + "github.com/onflow/flow-go/storage" + "github.com/onflow/flow-go/storage/operation/dbtest" + "github.com/onflow/flow-go/utils/unittest" +) + +// TesKeyValueStoreStorage tests if the KV store is stored, retrieved and indexed correctly +func TestKeyValueStoreStorage(t *testing.T) { + dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { + lockManager := storage.NewTestingLockManager() + metrics := metrics.NewNoopCollector() + store := NewProtocolKVStore(metrics, db, DefaultProtocolKVStoreCacheSize, DefaultProtocolKVStoreByBlockIDCacheSize) + + expected := &flow.PSKeyValueStoreData{ + Version: 2, + Data: unittest.RandomBytes(32), + } + stateID := unittest.IdentifierFixture() + blockID := unittest.IdentifierFixture() + + // store protocol state and auxiliary info + err := unittest.WithLock(t, lockManager, storage.LockInsertBlock, func(lctx lockctx.Context) error { + return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + err := store.BatchStore(rw, stateID, expected) + if err != nil { + return err + } + return store.BatchIndex(lctx, rw, blockID, stateID) + }) + }) + require.NoError(t, err) + + // fetch protocol state by its own ID + actual, err := store.ByID(stateID) + require.NoError(t, err) + assert.Equal(t, expected, actual) + + // fetch protocol state index by the block ID + actualByBlockID, err := store.ByBlockID(blockID) + require.NoError(t, err) + assert.Equal(t, expected, actualByBlockID) + }) +} + +// TestProtocolKVStore_StoreTx tests that StoreTx handles storage request correctly. +// Since BatchStore is now idempotent and doesn't return errors for duplicate data, +// we test that it can be called multiple times without issues. +func TestProtocolKVStore_StoreTx(t *testing.T) { + dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { + metrics := metrics.NewNoopCollector() + store := NewProtocolKVStore(metrics, db, DefaultProtocolKVStoreCacheSize, DefaultProtocolKVStoreByBlockIDCacheSize) + + stateID := unittest.IdentifierFixture() + expected := &flow.PSKeyValueStoreData{ + Version: 2, + Data: unittest.RandomBytes(32), + } + + // Store initial data + err := db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return store.BatchStore(rw, stateID, expected) + }) + require.NoError(t, err) + + // Store same data again - should succeed (idempotent) + err = db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return store.BatchStore(rw, stateID, expected) + }) + require.NoError(t, err) + + // Verify the data can still be retrieved + actual, err := store.ByID(stateID) + require.NoError(t, err) + assert.Equal(t, expected, actual) + }) +} + +// TestProtocolKVStore_IndexTx tests that IndexTx handles storage request correctly, when a snapshot with +// the given id has already been indexed: +// - if the KV-store ID is exactly the same as the one already indexed, `BatchIndex` should return without an error +// - if we request to index a different ID, an `storage.ErrDataMismatch` should be returned. +func TestProtocolKVStore_IndexTx(t *testing.T) { + dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { + lockManager := storage.NewTestingLockManager() + metrics := metrics.NewNoopCollector() + store := NewProtocolKVStore(metrics, db, DefaultProtocolKVStoreCacheSize, DefaultProtocolKVStoreByBlockIDCacheSize) + + stateID := unittest.IdentifierFixture() + blockID := unittest.IdentifierFixture() + + // Index initial data + err := unittest.WithLock(t, lockManager, storage.LockInsertBlock, func(lctx lockctx.Context) error { + return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return store.BatchIndex(lctx, rw, blockID, stateID) + }) + }) + require.NoError(t, err) + + // Index same data again - should error with storage.ErrAlreadyExists + err = unittest.WithLock(t, lockManager, storage.LockInsertBlock, func(lctx lockctx.Context) error { + return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return store.BatchIndex(lctx, rw, blockID, stateID) + }) + }) + require.ErrorIs(t, err, storage.ErrAlreadyExists) + + // Attempt to index different stateID with same blockID + differentStateID := unittest.IdentifierFixture() + err = unittest.WithLock(t, lockManager, storage.LockInsertBlock, func(lctx lockctx.Context) error { + return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return store.BatchIndex(lctx, rw, blockID, differentStateID) + }) + }) + require.ErrorIs(t, err, storage.ErrAlreadyExists) + }) +} + +// TestProtocolKVStore_ByBlockID tests that ByBlockID returns an error if no snapshot has been indexed for the given block. +func TestProtocolKVStore_ByBlockID(t *testing.T) { + dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { + metrics := metrics.NewNoopCollector() + store := NewProtocolKVStore(metrics, db, DefaultProtocolKVStoreCacheSize, DefaultProtocolKVStoreByBlockIDCacheSize) + + blockID := unittest.IdentifierFixture() + _, err := store.ByBlockID(blockID) + require.ErrorIs(t, err, storage.ErrNotFound) + }) +} + +// TestProtocolKVStore_ByID tests that ByID returns an error if no snapshot with the given Identifier is known. +func TestProtocolKVStore_ByID(t *testing.T) { + dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { + metrics := metrics.NewNoopCollector() + store := NewProtocolKVStore(metrics, db, DefaultProtocolKVStoreCacheSize, DefaultProtocolKVStoreByBlockIDCacheSize) + + stateID := unittest.IdentifierFixture() + _, err := store.ByID(stateID) + require.ErrorIs(t, err, storage.ErrNotFound) + }) +} diff --git a/storage/store/qcs.go b/storage/store/qcs.go new file mode 100644 index 00000000000..f5a9e57a9fd --- /dev/null +++ b/storage/store/qcs.go @@ -0,0 +1,63 @@ +package store + +import ( + "github.com/jordanschalm/lockctx" + + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module" + "github.com/onflow/flow-go/module/metrics" + "github.com/onflow/flow-go/storage" + "github.com/onflow/flow-go/storage/operation" +) + +// QuorumCertificates implements persistent storage for quorum certificates. +type QuorumCertificates struct { + db storage.DB + cache *Cache[flow.Identifier, *flow.QuorumCertificate] +} + +var _ storage.QuorumCertificates = (*QuorumCertificates)(nil) + +// NewQuorumCertificates Creates QuorumCertificates instance which is a database of quorum certificates +// which supports storing, caching and retrieving by block ID. +func NewQuorumCertificates(collector module.CacheMetrics, db storage.DB, cacheSize uint) *QuorumCertificates { + storeWithLock := func(lctx lockctx.Proof, rw storage.ReaderBatchWriter, _ flow.Identifier, qc *flow.QuorumCertificate) error { + return operation.InsertQuorumCertificate(lctx, rw, qc) + } + + retrieve := func(r storage.Reader, blockID flow.Identifier) (*flow.QuorumCertificate, error) { + var qc flow.QuorumCertificate + err := operation.RetrieveQuorumCertificate(r, blockID, &qc) + return &qc, err + } + + return &QuorumCertificates{ + db: db, + cache: newCache(collector, metrics.ResourceQC, + withLimit[flow.Identifier, *flow.QuorumCertificate](cacheSize), + withStoreWithLock(storeWithLock), + withRetrieve(retrieve)), + } +} + +// BatchStore stores a Quorum Certificate as part of database batch update. QC is indexed by QC.BlockID. +// +// Note: For the same block, different QCs can easily be constructed by selecting different sub-sets of the received votes +// (provided more than the minimal number of consensus participants voted, which is typically the case). In most cases, it +// is only important that a block has been certified, but irrelevant who specifically contributed to the QC. Therefore, we +// only store the first QC. +// +// If *any* quorum certificate for QC.BlockID has already been stored, a `storage.ErrAlreadyExists` is returned (typically benign). +func (q *QuorumCertificates) BatchStore(lctx lockctx.Proof, rw storage.ReaderBatchWriter, qc *flow.QuorumCertificate) error { + return q.cache.PutWithLockTx(lctx, rw, qc.BlockID, qc) +} + +// ByBlockID returns QC that certifies the block referred by blockID. +// * storage.ErrNotFound if no QC for blockID doesn't exist. +func (q *QuorumCertificates) ByBlockID(blockID flow.Identifier) (*flow.QuorumCertificate, error) { + val, err := q.cache.Get(q.db.Reader(), blockID) + if err != nil { + return nil, err + } + return val, nil +} diff --git a/storage/store/qcs_test.go b/storage/store/qcs_test.go new file mode 100644 index 00000000000..d7060cd1d7d --- /dev/null +++ b/storage/store/qcs_test.go @@ -0,0 +1,110 @@ +package store_test + +import ( + "testing" + + "github.com/jordanschalm/lockctx" + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module/metrics" + "github.com/onflow/flow-go/storage" + "github.com/onflow/flow-go/storage/operation/dbtest" + "github.com/onflow/flow-go/storage/store" + "github.com/onflow/flow-go/utils/unittest" +) + +// TestQuorumCertificates_StoreTx tests storing and retrieving of QC. +func TestQuorumCertificates_StoreTx(t *testing.T) { + dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { + lockManager := storage.NewTestingLockManager() + metrics := metrics.NewNoopCollector() + store := store.NewQuorumCertificates(metrics, db, 10) + qc := unittest.QuorumCertificateFixture() + + err := unittest.WithLock(t, lockManager, storage.LockInsertBlock, func(lctx lockctx.Context) error { + return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return store.BatchStore(lctx, rw, qc) + }) + }) + require.NoError(t, err) + + actual, err := store.ByBlockID(qc.BlockID) + require.NoError(t, err) + + require.Equal(t, qc, actual) + }) +} + +// TestQuorumCertificates_LockEnforced verifies that storing a QC requires holding the +// storage.LockInsertBlock lock. If the lock is not held, `BatchStore` should error. +func TestQuorumCertificates_LockEnforced(t *testing.T) { + dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { + lockManager := storage.NewTestingLockManager() + metrics := metrics.NewNoopCollector() + store := store.NewQuorumCertificates(metrics, db, 10) + qc := unittest.QuorumCertificateFixture() + + // acquire wrong lock and attempt to store QC: should error + err := unittest.WithLock(t, lockManager, storage.LockFinalizeBlock, func(lctx lockctx.Context) error { // INCORRECT LOCK + return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return store.BatchStore(lctx, rw, qc) + }) + }) + // handle the error outside of the db.WithReaderBatchWriter, otherwise batch writer would + // consider no error happened and would commit the batch, which would execute + // the callbacks to store the QC in the cache, and causing the following reads + // with ByBlockID to read from dirty data. + require.Error(t, err) + + // qc should not be stored, so ByBlockID should return `storage.ErrNotFound` + _, err = store.ByBlockID(qc.BlockID) + require.ErrorIs(t, err, storage.ErrNotFound) + }) +} + +// TestQuorumCertificates_StoreTx_OtherQC checks if storing other QC for same blockID results in +// `storage.ErrAlreadyExists` and already stored value is not overwritten. +func TestQuorumCertificates_StoreTx_OtherQC(t *testing.T) { + dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { + lockManager := storage.NewTestingLockManager() + metrics := metrics.NewNoopCollector() + s := store.NewQuorumCertificates(metrics, db, 10) + qc := unittest.QuorumCertificateFixture() + otherQC := unittest.QuorumCertificateFixture(func(otherQC *flow.QuorumCertificate) { + otherQC.View = qc.View + otherQC.BlockID = qc.BlockID + }) + + err := unittest.WithLock(t, lockManager, storage.LockInsertBlock, func(lctx lockctx.Context) error { + return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return s.BatchStore(lctx, rw, qc) + }) + }) + require.NoError(t, err) + + err = unittest.WithLock(t, lockManager, storage.LockInsertBlock, func(lctx lockctx.Context) error { + return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return s.BatchStore(lctx, rw, otherQC) + }) + }) + require.ErrorIs(t, err, storage.ErrAlreadyExists) + + actual, err := s.ByBlockID(otherQC.BlockID) + require.NoError(t, err) + + require.Equal(t, qc, actual) + }) +} + +// TestQuorumCertificates_ByBlockID that ByBlockID returns correct sentinel error if no QC for given block ID has been found +func TestQuorumCertificates_ByBlockID(t *testing.T) { + dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { + metrics := metrics.NewNoopCollector() + store := store.NewQuorumCertificates(metrics, db, 10) + + actual, err := store.ByBlockID(unittest.IdentifierFixture()) + require.ErrorIs(t, err, storage.ErrNotFound) + require.Nil(t, actual) + }) +} diff --git a/storage/store/receipts.go b/storage/store/receipts.go new file mode 100644 index 00000000000..9f76d4d653c --- /dev/null +++ b/storage/store/receipts.go @@ -0,0 +1,118 @@ +package store + +import ( + "fmt" + + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module" + "github.com/onflow/flow-go/module/metrics" + "github.com/onflow/flow-go/storage" + "github.com/onflow/flow-go/storage/operation" +) + +// ExecutionReceipts implements storage for execution receipts. +type ExecutionReceipts struct { + db storage.DB + results storage.ExecutionResults + cache *Cache[flow.Identifier, *flow.ExecutionReceipt] +} + +// NewExecutionReceipts Creates ExecutionReceipts instance which is a database of receipts which +// supports storing and indexing receipts by receipt ID and block ID. +func NewExecutionReceipts(collector module.CacheMetrics, db storage.DB, results storage.ExecutionResults, cacheSize uint) *ExecutionReceipts { + store := func(rw storage.ReaderBatchWriter, receiptTD flow.Identifier, receipt *flow.ExecutionReceipt) error { + receiptID := receipt.ID() + + err := results.BatchStore(&receipt.ExecutionResult, rw) + if err != nil { + return fmt.Errorf("could not store result: %w", err) + } + err = operation.InsertExecutionReceiptStub(rw.Writer(), receiptID, receipt.Stub()) + if err != nil { + return fmt.Errorf("could not store receipt metadata: %w", err) + } + err = operation.IndexExecutionReceipts(rw.Writer(), receipt.ExecutionResult.BlockID, receiptID) + if err != nil { + return fmt.Errorf("could not index receipt by the block it computes: %w", err) + } + return nil + } + + retrieve := func(r storage.Reader, receiptID flow.Identifier) (*flow.ExecutionReceipt, error) { + var meta flow.ExecutionReceiptStub + err := operation.RetrieveExecutionReceiptStub(r, receiptID, &meta) + if err != nil { + return nil, fmt.Errorf("could not retrieve receipt meta: %w", err) + } + result, err := results.ByID(meta.ResultID) + if err != nil { + return nil, fmt.Errorf("could not retrieve result: %w", err) + } + executionReceipt, err := flow.ExecutionReceiptFromStub(meta, *result) + if err != nil { + return nil, fmt.Errorf("could not create execution receipt from stub: %w", err) + } + return executionReceipt, nil + } + + return &ExecutionReceipts{ + db: db, + results: results, + cache: newCache(collector, metrics.ResourceReceipt, + withLimit[flow.Identifier, *flow.ExecutionReceipt](cacheSize), + withStore(store), + withRetrieve(retrieve)), + } +} + +// storeMyReceipt assembles the operations to store an arbitrary receipt. +func (r *ExecutionReceipts) storeTx(rw storage.ReaderBatchWriter, receipt *flow.ExecutionReceipt) error { + return r.cache.PutTx(rw, receipt.ID(), receipt) +} + +func (r *ExecutionReceipts) byID(receiptID flow.Identifier) (*flow.ExecutionReceipt, error) { + val, err := r.cache.Get(r.db.Reader(), receiptID) + if err != nil { + return nil, err + } + return val, nil +} + +func (r *ExecutionReceipts) byBlockID(blockID flow.Identifier) ([]*flow.ExecutionReceipt, error) { + var receiptIDs []flow.Identifier + err := operation.LookupExecutionReceipts(r.db.Reader(), blockID, &receiptIDs) + if err != nil { + return nil, fmt.Errorf("could not find receipt index for block: %w", err) + } + + var receipts []*flow.ExecutionReceipt + for _, id := range receiptIDs { + receipt, err := r.byID(id) + if err != nil { + return nil, fmt.Errorf("could not find receipt with id %v: %w", id, err) + } + receipts = append(receipts, receipt) + } + return receipts, nil +} + +func (r *ExecutionReceipts) Store(receipt *flow.ExecutionReceipt) error { + return r.db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return r.storeTx(rw, receipt) + }) +} + +func (r *ExecutionReceipts) BatchStore(receipt *flow.ExecutionReceipt, rw storage.ReaderBatchWriter) error { + return r.storeTx(rw, receipt) +} + +func (r *ExecutionReceipts) ByID(receiptID flow.Identifier) (*flow.ExecutionReceipt, error) { + return r.byID(receiptID) +} + +// ByBlockID retrieves list of execution receipts from the storage +// +// No errors are expected errors during normal operations. +func (r *ExecutionReceipts) ByBlockID(blockID flow.Identifier) (flow.ExecutionReceiptList, error) { + return r.byBlockID(blockID) +} diff --git a/storage/store/receipts_test.go b/storage/store/receipts_test.go new file mode 100644 index 00000000000..1c8ba7b1b1f --- /dev/null +++ b/storage/store/receipts_test.go @@ -0,0 +1,147 @@ +package store_test + +import ( + "testing" + + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module/metrics" + "github.com/onflow/flow-go/storage" + "github.com/onflow/flow-go/storage/operation/dbtest" + "github.com/onflow/flow-go/storage/store" + "github.com/onflow/flow-go/utils/unittest" +) + +func TestExecutionReceiptsStorage(t *testing.T) { + withStore := func(t *testing.T, f func(store1 *store.ExecutionReceipts)) { + dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { + metrics := metrics.NewNoopCollector() + results := store.NewExecutionResults(metrics, db) + store1 := store.NewExecutionReceipts(metrics, db, results, 100) + f(store1) + }) + } + + t.Run("get empty", func(t *testing.T) { + withStore(t, func(store1 *store.ExecutionReceipts) { + block := unittest.BlockFixture() + receipts, err := store1.ByBlockID(block.ID()) + require.NoError(t, err) + require.Equal(t, 0, len(receipts)) + }) + }) + + t.Run("store1 one get one", func(t *testing.T) { + withStore(t, func(store1 *store.ExecutionReceipts) { + block := unittest.BlockFixture() + receipt1 := unittest.ReceiptForBlockFixture(block) + + err := store1.Store(receipt1) + require.NoError(t, err) + + actual, err := store1.ByID(receipt1.ID()) + require.NoError(t, err) + + require.Equal(t, receipt1, actual) + + receipts, err := store1.ByBlockID(block.ID()) + require.NoError(t, err) + + require.Equal(t, flow.ExecutionReceiptList{receipt1}, receipts) + }) + }) + + t.Run("store1 two for the same block", func(t *testing.T) { + withStore(t, func(store1 *store.ExecutionReceipts) { + block := unittest.BlockFixture() + + executor1 := unittest.IdentifierFixture() + executor2 := unittest.IdentifierFixture() + + receipt1 := unittest.ReceiptForBlockExecutorFixture(block, executor1) + receipt2 := unittest.ReceiptForBlockExecutorFixture(block, executor2) + + err := store1.Store(receipt1) + require.NoError(t, err) + + err = store1.Store(receipt2) + require.NoError(t, err) + + receipts, err := store1.ByBlockID(block.ID()) + require.NoError(t, err) + + require.ElementsMatch(t, []*flow.ExecutionReceipt{receipt1, receipt2}, receipts) + }) + }) + + t.Run("store1 two for different blocks", func(t *testing.T) { + withStore(t, func(store1 *store.ExecutionReceipts) { + block1 := unittest.BlockFixture() + block2 := unittest.BlockFixture() + + executor1 := unittest.IdentifierFixture() + executor2 := unittest.IdentifierFixture() + + receipt1 := unittest.ReceiptForBlockExecutorFixture(block1, executor1) + receipt2 := unittest.ReceiptForBlockExecutorFixture(block2, executor2) + + err := store1.Store(receipt1) + require.NoError(t, err) + + err = store1.Store(receipt2) + require.NoError(t, err) + + receipts1, err := store1.ByBlockID(block1.ID()) + require.NoError(t, err) + + receipts2, err := store1.ByBlockID(block2.ID()) + require.NoError(t, err) + + require.ElementsMatch(t, []*flow.ExecutionReceipt{receipt1}, receipts1) + require.ElementsMatch(t, []*flow.ExecutionReceipt{receipt2}, receipts2) + }) + }) + + t.Run("indexing duplicated receipts should be ok", func(t *testing.T) { + withStore(t, func(store1 *store.ExecutionReceipts) { + block1 := unittest.BlockFixture() + + executor1 := unittest.IdentifierFixture() + receipt1 := unittest.ReceiptForBlockExecutorFixture(block1, executor1) + + err := store1.Store(receipt1) + require.NoError(t, err) + + err = store1.Store(receipt1) + require.NoError(t, err) + + receipts, err := store1.ByBlockID(block1.ID()) + require.NoError(t, err) + + require.ElementsMatch(t, []*flow.ExecutionReceipt{receipt1}, receipts) + }) + }) + + t.Run("indexing receipt from the same executor for same block should succeed", func(t *testing.T) { + withStore(t, func(store1 *store.ExecutionReceipts) { + block1 := unittest.BlockFixture() + + executor1 := unittest.IdentifierFixture() + + receipt1 := unittest.ReceiptForBlockExecutorFixture(block1, executor1) + receipt2 := unittest.ReceiptForBlockExecutorFixture(block1, executor1) + + err := store1.Store(receipt1) + require.NoError(t, err) + + err = store1.Store(receipt2) + require.NoError(t, err) + + receipts, err := store1.ByBlockID(block1.ID()) + require.NoError(t, err) + + require.ElementsMatch(t, []*flow.ExecutionReceipt{receipt1, receipt2}, receipts) + }) + }) +} diff --git a/storage/store/results.go b/storage/store/results.go new file mode 100644 index 00000000000..45c269f5a7f --- /dev/null +++ b/storage/store/results.go @@ -0,0 +1,160 @@ +package store + +import ( + "fmt" + + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module" + "github.com/onflow/flow-go/module/metrics" + "github.com/onflow/flow-go/storage" + "github.com/onflow/flow-go/storage/operation" +) + +// ExecutionResults implements persistent storage for execution results. +type ExecutionResults struct { + db storage.DB + cache *Cache[flow.Identifier, *flow.ExecutionResult] +} + +var _ storage.ExecutionResults = (*ExecutionResults)(nil) + +func NewExecutionResults(collector module.CacheMetrics, db storage.DB) *ExecutionResults { + + store := func(rw storage.ReaderBatchWriter, _ flow.Identifier, result *flow.ExecutionResult) error { + return operation.InsertExecutionResult(rw.Writer(), result) + } + + retrieve := func(r storage.Reader, resultID flow.Identifier) (*flow.ExecutionResult, error) { + var result flow.ExecutionResult + err := operation.RetrieveExecutionResult(r, resultID, &result) + return &result, err + } + + res := &ExecutionResults{ + db: db, + cache: newCache(collector, metrics.ResourceResult, + withLimit[flow.Identifier, *flow.ExecutionResult](flow.DefaultTransactionExpiry+100), + withStore(store), + withRetrieve(retrieve)), + } + + return res +} + +func (r *ExecutionResults) store(rw storage.ReaderBatchWriter, result *flow.ExecutionResult) error { + return r.cache.PutTx(rw, result.ID(), result) +} + +func (r *ExecutionResults) byID(resultID flow.Identifier) (*flow.ExecutionResult, error) { + val, err := r.cache.Get(r.db.Reader(), resultID) + if err != nil { + return nil, err + } + return val, nil +} + +func (r *ExecutionResults) byBlockID(blockID flow.Identifier) (*flow.ExecutionResult, error) { + var resultID flow.Identifier + err := operation.LookupExecutionResult(r.db.Reader(), blockID, &resultID) + if err != nil { + return nil, fmt.Errorf("could not lookup execution result ID: %w", err) + } + return r.byID(resultID) +} + +func (r *ExecutionResults) index(w storage.Writer, blockID, resultID flow.Identifier, force bool) error { + if !force { + // when not forcing the index, check if the result is already indexed + exist, err := operation.ExistExecutionResult(r.db.Reader(), blockID) + if err != nil { + return fmt.Errorf("could not check if execution result exists: %w", err) + } + + // if the result is already indexed, check if the stored result is the same + if exist { + var storedResultID flow.Identifier + err = operation.LookupExecutionResult(r.db.Reader(), blockID, &storedResultID) + if err != nil { + return fmt.Errorf("could not lookup execution result ID: %w", err) + } + + if storedResultID != resultID { + return fmt.Errorf("storing result that is different from the already stored one for block: %v, storing result: %v, stored result: %v. %w", + blockID, resultID, storedResultID, storage.ErrDataMismatch) + } + + // if the result is the same, we don't need to index it again + return nil + } + + // if the result is not indexed, we can index it + } + + err := operation.IndexExecutionResult(w, blockID, resultID) + if err == nil { + return nil + } + + return nil +} + +func (r *ExecutionResults) Store(result *flow.ExecutionResult) error { + return r.db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return r.store(rw, result) + }) +} + +func (r *ExecutionResults) BatchStore(result *flow.ExecutionResult, batch storage.ReaderBatchWriter) error { + return r.store(batch, result) +} + +func (r *ExecutionResults) BatchIndex(blockID flow.Identifier, resultID flow.Identifier, batch storage.ReaderBatchWriter) error { + return operation.IndexExecutionResult(batch.Writer(), blockID, resultID) +} + +func (r *ExecutionResults) ByID(resultID flow.Identifier) (*flow.ExecutionResult, error) { + return r.byID(resultID) +} + +// Index indexes an execution result by block ID. +// Note: this method call is not concurrent safe, because it checks if the different result is already indexed +// by the same blockID, and if it is, it returns an error. +// The caller needs to ensure that there is no concurrent call to this method with the same blockID. +func (r *ExecutionResults) Index(blockID flow.Identifier, resultID flow.Identifier) error { + err := r.db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return r.index(rw.Writer(), blockID, resultID, false) + }) + + if err != nil { + return fmt.Errorf("could not index execution result: %w", err) + } + return nil +} + +func (r *ExecutionResults) ForceIndex(blockID flow.Identifier, resultID flow.Identifier) error { + err := r.db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return r.index(rw.Writer(), blockID, resultID, true) + }) + + if err != nil { + return fmt.Errorf("could not index execution result: %w", err) + } + return nil +} + +func (r *ExecutionResults) ByBlockID(blockID flow.Identifier) (*flow.ExecutionResult, error) { + return r.byBlockID(blockID) +} + +func (r *ExecutionResults) RemoveIndexByBlockID(blockID flow.Identifier) error { + return r.db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return operation.RemoveExecutionResultIndex(rw.Writer(), blockID) + }) +} + +// BatchRemoveIndexByBlockID removes blockID-to-executionResultID index entries keyed by blockID in a provided batch. +// No errors are expected during normal operation, even if no entries are matched. +// If Badger unexpectedly fails to process the request, the error is wrapped in a generic error and returned. +func (r *ExecutionResults) BatchRemoveIndexByBlockID(blockID flow.Identifier, batch storage.ReaderBatchWriter) error { + return operation.RemoveExecutionResultIndex(batch.Writer(), blockID) +} diff --git a/storage/store/results_test.go b/storage/store/results_test.go new file mode 100644 index 00000000000..34f63e1f885 --- /dev/null +++ b/storage/store/results_test.go @@ -0,0 +1,138 @@ +package store_test + +import ( + "errors" + "testing" + + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/module/metrics" + "github.com/onflow/flow-go/storage" + "github.com/onflow/flow-go/storage/operation/dbtest" + "github.com/onflow/flow-go/storage/store" + "github.com/onflow/flow-go/utils/unittest" +) + +func TestResultStoreAndRetrieve(t *testing.T) { + dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { + metrics := metrics.NewNoopCollector() + store1 := store.NewExecutionResults(metrics, db) + + result := unittest.ExecutionResultFixture() + blockID := unittest.IdentifierFixture() + err := store1.Store(result) + require.NoError(t, err) + + err = store1.Index(blockID, result.ID()) + require.NoError(t, err) + + actual, err := store1.ByBlockID(blockID) + require.NoError(t, err) + + require.Equal(t, result, actual) + }) +} + +func TestResultStoreTwice(t *testing.T) { + dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { + metrics := metrics.NewNoopCollector() + store1 := store.NewExecutionResults(metrics, db) + + result := unittest.ExecutionResultFixture() + blockID := unittest.IdentifierFixture() + err := store1.Store(result) + require.NoError(t, err) + + err = store1.Index(blockID, result.ID()) + require.NoError(t, err) + + err = store1.Store(result) + require.NoError(t, err) + + err = store1.Index(blockID, result.ID()) + require.NoError(t, err) + }) +} + +func TestResultBatchStoreTwice(t *testing.T) { + dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { + metrics := metrics.NewNoopCollector() + store1 := store.NewExecutionResults(metrics, db) + + result := unittest.ExecutionResultFixture() + blockID := unittest.IdentifierFixture() + + require.NoError(t, db.WithReaderBatchWriter(func(batch storage.ReaderBatchWriter) error { + err := store1.BatchStore(result, batch) + require.NoError(t, err) + + err = store1.BatchIndex(blockID, result.ID(), batch) + require.NoError(t, err) + return nil + })) + + require.NoError(t, db.WithReaderBatchWriter(func(batch storage.ReaderBatchWriter) error { + err := store1.BatchStore(result, batch) + require.NoError(t, err) + + err = store1.BatchIndex(blockID, result.ID(), batch) + require.NoError(t, err) + + return nil + })) + }) +} + +func TestResultStoreTwoDifferentResultsShouldFail(t *testing.T) { + dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { + metrics := metrics.NewNoopCollector() + store1 := store.NewExecutionResults(metrics, db) + + result1 := unittest.ExecutionResultFixture() + result2 := unittest.ExecutionResultFixture() + blockID := unittest.IdentifierFixture() + err := store1.Store(result1) + require.NoError(t, err) + + err = store1.Index(blockID, result1.ID()) + require.NoError(t, err) + + // we can store1 a different result, but we can't index + // a different result for that block, because it will mean + // one block has two different results. + err = store1.Store(result2) + require.NoError(t, err) + + err = store1.Index(blockID, result2.ID()) + require.Error(t, err) + require.True(t, errors.Is(err, storage.ErrDataMismatch)) + }) +} + +func TestResultStoreForceIndexOverridesMapping(t *testing.T) { + dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { + metrics := metrics.NewNoopCollector() + store1 := store.NewExecutionResults(metrics, db) + + result1 := unittest.ExecutionResultFixture() + result2 := unittest.ExecutionResultFixture() + blockID := unittest.IdentifierFixture() + err := store1.Store(result1) + require.NoError(t, err) + err = store1.Index(blockID, result1.ID()) + require.NoError(t, err) + + err = store1.Store(result2) + require.NoError(t, err) + + // force index + err = store1.ForceIndex(blockID, result2.ID()) + require.NoError(t, err) + + // retrieve index to make sure it points to second ER now + byBlockID, err := store1.ByBlockID(blockID) + + require.Equal(t, result2, byBlockID) + require.NoError(t, err) + }) +} diff --git a/storage/store/seals.go b/storage/store/seals.go new file mode 100644 index 00000000000..884e17d68aa --- /dev/null +++ b/storage/store/seals.go @@ -0,0 +1,86 @@ +package store + +import ( + "fmt" + + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module" + "github.com/onflow/flow-go/module/metrics" + "github.com/onflow/flow-go/storage" + "github.com/onflow/flow-go/storage/operation" +) + +type Seals struct { + db storage.DB + cache *Cache[flow.Identifier, *flow.Seal] +} + +func NewSeals(collector module.CacheMetrics, db storage.DB) *Seals { + + store := func(rw storage.ReaderBatchWriter, sealID flow.Identifier, seal *flow.Seal) error { + return operation.InsertSeal(rw.Writer(), sealID, seal) + } + + retrieve := func(r storage.Reader, sealID flow.Identifier) (*flow.Seal, error) { + var seal flow.Seal + err := operation.RetrieveSeal(r, sealID, &seal) + return &seal, err + } + + s := &Seals{ + db: db, + cache: newCache[flow.Identifier, *flow.Seal](collector, metrics.ResourceSeal, + withLimit[flow.Identifier, *flow.Seal](flow.DefaultTransactionExpiry+100), + withStore(store), + withRetrieve(retrieve)), + } + + return s +} + +func (s *Seals) storeTx(rw storage.ReaderBatchWriter, seal *flow.Seal) error { + return s.cache.PutTx(rw, seal.ID(), seal) +} + +func (s *Seals) retrieveTx(sealID flow.Identifier) (*flow.Seal, error) { + val, err := s.cache.Get(s.db.Reader(), sealID) + if err != nil { + return nil, err + } + return val, err +} + +func (s *Seals) Store(seal *flow.Seal) error { + return s.db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return s.storeTx(rw, seal) + }) +} + +func (s *Seals) ByID(sealID flow.Identifier) (*flow.Seal, error) { + return s.retrieveTx(sealID) +} + +// HighestInFork retrieves the highest seal that was included in the +// fork up to (and including) blockID. This method should return a seal +// for any block known to the node. Returns storage.ErrNotFound if +// blockID is unknown. +func (s *Seals) HighestInFork(blockID flow.Identifier) (*flow.Seal, error) { + var sealID flow.Identifier + err := operation.LookupLatestSealAtBlock(s.db.Reader(), blockID, &sealID) + if err != nil { + return nil, fmt.Errorf("failed to retrieve seal for fork with head %x: %w", blockID, err) + } + return s.ByID(sealID) +} + +// FinalizedSealForBlock returns the seal for the given block, only if that seal +// has been included in a finalized block. +// Returns storage.ErrNotFound if the block is unknown or unsealed. +func (s *Seals) FinalizedSealForBlock(blockID flow.Identifier) (*flow.Seal, error) { + var sealID flow.Identifier + err := operation.LookupBySealedBlockID(s.db.Reader(), blockID, &sealID) + if err != nil { + return nil, fmt.Errorf("failed to retrieve seal for block %x: %w", blockID, err) + } + return s.ByID(sealID) +} diff --git a/storage/store/seals_test.go b/storage/store/seals_test.go new file mode 100644 index 00000000000..02f79fdece2 --- /dev/null +++ b/storage/store/seals_test.go @@ -0,0 +1,107 @@ +package store_test + +import ( + "testing" + + "github.com/jordanschalm/lockctx" + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/module/metrics" + "github.com/onflow/flow-go/storage" + "github.com/onflow/flow-go/storage/operation" + "github.com/onflow/flow-go/storage/operation/dbtest" + "github.com/onflow/flow-go/storage/store" + "github.com/onflow/flow-go/utils/unittest" +) + +func TestRetrieveWithoutStore(t *testing.T) { + dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { + metrics := metrics.NewNoopCollector() + s := store.NewSeals(metrics, db) + + _, err := s.ByID(unittest.IdentifierFixture()) + require.ErrorIs(t, err, storage.ErrNotFound) + + _, err = s.HighestInFork(unittest.IdentifierFixture()) + require.ErrorIs(t, err, storage.ErrNotFound) + }) +} + +// TestSealStoreRetrieve verifies that a seal can be stored and retrieved by its ID +func TestSealStoreRetrieve(t *testing.T) { + dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { + metrics := metrics.NewNoopCollector() + s := store.NewSeals(metrics, db) + + expected := unittest.Seal.Fixture() + // store seal + err := s.Store(expected) + require.NoError(t, err) + + // retrieve seal + seal, err := s.ByID(expected.ID()) + require.NoError(t, err) + require.Equal(t, expected, seal) + }) +} + +// TestSealIndexAndRetrieve verifies that: +// - for a block, we can s (aka index) the latest sealed block along this fork. +// +// Note: indexing the seal for a block is currently implemented only through a direct +// Badger operation. The Seals mempool only supports retrieving the latest sealed block. +func TestSealIndexAndRetrieve(t *testing.T) { + dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { + lockManager := storage.NewTestingLockManager() + metrics := metrics.NewNoopCollector() + s := store.NewSeals(metrics, db) + + expectedSeal := unittest.Seal.Fixture() + blockID := unittest.IdentifierFixture() + + // store the seal first + err := s.Store(expectedSeal) + require.NoError(t, err) + + // index the seal ID for the heighest sealed block in this fork + err = unittest.WithLock(t, lockManager, storage.LockInsertBlock, func(lctx lockctx.Context) error { + return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return operation.IndexLatestSealAtBlock(lctx, rw.Writer(), blockID, expectedSeal.ID()) + }) + }) + require.NoError(t, err) + + // retrieve latest seal + seal, err := s.HighestInFork(blockID) + require.NoError(t, err) + require.Equal(t, expectedSeal, seal) + }) +} + +// TestSealedBlockIndexAndRetrieve checks after indexing a seal by a sealed block ID, it can be +// retrieved by the sealed block ID +func TestSealedBlockIndexAndRetrieve(t *testing.T) { + dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { + metrics := metrics.NewNoopCollector() + s := store.NewSeals(metrics, db) + + expectedSeal := unittest.Seal.Fixture() + blockID := unittest.IdentifierFixture() + expectedSeal.BlockID = blockID + + // store the seal first + err := s.Store(expectedSeal) + require.NoError(t, err) + + // index the seal ID for the highest sealed block in this fork + err = db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return operation.IndexFinalizedSealByBlockID(rw.Writer(), expectedSeal.BlockID, expectedSeal.ID()) + }) + require.NoError(t, err) + + // retrieve latest seal + seal, err := s.FinalizedSealForBlock(blockID) + require.NoError(t, err) + require.Equal(t, expectedSeal, seal) + }) +} diff --git a/storage/store/transaction_result_error_messages.go b/storage/store/transaction_result_error_messages.go new file mode 100644 index 00000000000..6f315216aea --- /dev/null +++ b/storage/store/transaction_result_error_messages.go @@ -0,0 +1,173 @@ +package store + +import ( + "fmt" + + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module" + "github.com/onflow/flow-go/module/metrics" + "github.com/onflow/flow-go/storage" + "github.com/onflow/flow-go/storage/operation" +) + +var _ storage.TransactionResultErrorMessages = (*TransactionResultErrorMessages)(nil) + +type TransactionResultErrorMessages struct { + db storage.DB + cache *Cache[TwoIdentifier, flow.TransactionResultErrorMessage] // Key: blockID + txID + indexCache *Cache[IdentifierAndUint32, flow.TransactionResultErrorMessage] // Key: blockID + txIndex + blockCache *Cache[flow.Identifier, []flow.TransactionResultErrorMessage] // Key: blockID +} + +func NewTransactionResultErrorMessages(collector module.CacheMetrics, db storage.DB, transactionResultsCacheSize uint) *TransactionResultErrorMessages { + retrieve := func(r storage.Reader, key TwoIdentifier) (flow.TransactionResultErrorMessage, error) { + blockID, txID := KeyToBlockIDTransactionID(key) + + var txResultErrMsg flow.TransactionResultErrorMessage + err := operation.RetrieveTransactionResultErrorMessage(r, blockID, txID, &txResultErrMsg) + if err != nil { + return flow.TransactionResultErrorMessage{}, err + } + return txResultErrMsg, nil + } + + retrieveIndex := func(r storage.Reader, key IdentifierAndUint32) (flow.TransactionResultErrorMessage, error) { + blockID, txIndex := KeyToBlockIDIndex(key) + + var txResultErrMsg flow.TransactionResultErrorMessage + err := operation.RetrieveTransactionResultErrorMessageByIndex(r, blockID, txIndex, &txResultErrMsg) + if err != nil { + return flow.TransactionResultErrorMessage{}, err + } + return txResultErrMsg, nil + } + + retrieveForBlock := func(r storage.Reader, blockID flow.Identifier) ([]flow.TransactionResultErrorMessage, error) { + var txResultErrMsg []flow.TransactionResultErrorMessage + err := operation.LookupTransactionResultErrorMessagesByBlockIDUsingIndex(r, blockID, &txResultErrMsg) + if err != nil { + return nil, err + } + return txResultErrMsg, nil + } + + return &TransactionResultErrorMessages{ + db: db, + cache: newCache(collector, metrics.ResourceTransactionResultErrorMessages, + withLimit[TwoIdentifier, flow.TransactionResultErrorMessage](transactionResultsCacheSize), + withStore(noopStore[TwoIdentifier, flow.TransactionResultErrorMessage]), + withRetrieve(retrieve), + ), + indexCache: newCache(collector, metrics.ResourceTransactionResultErrorMessagesIndices, + withLimit[IdentifierAndUint32, flow.TransactionResultErrorMessage](transactionResultsCacheSize), + withStore(noopStore[IdentifierAndUint32, flow.TransactionResultErrorMessage]), + withRetrieve(retrieveIndex), + ), + blockCache: newCache(collector, metrics.ResourceTransactionResultErrorMessagesIndices, + withLimit[flow.Identifier, []flow.TransactionResultErrorMessage](transactionResultsCacheSize), + withStore(noopStore[flow.Identifier, []flow.TransactionResultErrorMessage]), + withRetrieve(retrieveForBlock), + ), + } +} + +// Store will store transaction result error messages for the given block ID. +// +// No errors are expected during normal operation. +func (t *TransactionResultErrorMessages) Store(blockID flow.Identifier, transactionResultErrorMessages []flow.TransactionResultErrorMessage) error { + return t.db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return t.BatchStore(blockID, transactionResultErrorMessages, rw) + }) +} + +// Exists returns true if transaction result error messages for the given ID have been stored. +// +// No errors are expected during normal operation. +func (t *TransactionResultErrorMessages) Exists(blockID flow.Identifier) (bool, error) { + // if the block is in the cache, return true + if ok := t.blockCache.IsCached(blockID); ok { + return ok, nil + } + + // otherwise, check badger store + var exists bool + err := operation.TransactionResultErrorMessagesExists(t.db.Reader(), blockID, &exists) + if err != nil { + return false, fmt.Errorf("could not check existence: %w", err) + } + return exists, nil +} + +// BatchStore inserts a batch of transaction result error messages into a batch +// +// No errors are expected during normal operation. +func (t *TransactionResultErrorMessages) BatchStore( + blockID flow.Identifier, + transactionResultErrorMessages []flow.TransactionResultErrorMessage, + batch storage.ReaderBatchWriter, +) error { + writer := batch.Writer() + for _, result := range transactionResultErrorMessages { + err := operation.BatchInsertTransactionResultErrorMessage(writer, blockID, &result) + if err != nil { + return fmt.Errorf("cannot batch insert tx result error message: %w", err) + } + + err = operation.BatchIndexTransactionResultErrorMessage(writer, blockID, &result) + if err != nil { + return fmt.Errorf("cannot batch index tx result error message: %w", err) + } + } + + storage.OnCommitSucceed(batch, func() { + for _, result := range transactionResultErrorMessages { + key := KeyFromBlockIDTransactionID(blockID, result.TransactionID) + // cache for each transaction, so that it's faster to retrieve + t.cache.Insert(key, result) + + keyIndex := KeyFromBlockIDIndex(blockID, result.Index) + t.indexCache.Insert(keyIndex, result) + } + + t.blockCache.Insert(blockID, transactionResultErrorMessages) + }) + return nil +} + +// ByBlockIDTransactionID returns the transaction result error message for the given block ID and transaction ID +// +// Expected errors during normal operation: +// - `storage.ErrNotFound` if no transaction error message is known at given block and transaction id. +func (t *TransactionResultErrorMessages) ByBlockIDTransactionID(blockID flow.Identifier, transactionID flow.Identifier) (*flow.TransactionResultErrorMessage, error) { + key := KeyFromBlockIDTransactionID(blockID, transactionID) + transactionResultErrorMessage, err := t.cache.Get(t.db.Reader(), key) + if err != nil { + return nil, err + } + return &transactionResultErrorMessage, nil +} + +// ByBlockIDTransactionIndex returns the transaction result error message for the given blockID and transaction index +// +// Expected errors during normal operation: +// - `storage.ErrNotFound` if no transaction error message is known at given block and transaction index. +func (t *TransactionResultErrorMessages) ByBlockIDTransactionIndex(blockID flow.Identifier, txIndex uint32) (*flow.TransactionResultErrorMessage, error) { + key := KeyFromBlockIDIndex(blockID, txIndex) + transactionResultErrorMessage, err := t.indexCache.Get(t.db.Reader(), key) + if err != nil { + return nil, err + } + return &transactionResultErrorMessage, nil +} + +// ByBlockID gets all transaction result error messages for a block, ordered by transaction index. +// Note: This method will return an empty slice both if the block is not indexed yet and if the block does not have any errors. +// +// No errors are expected during normal operations. +func (t *TransactionResultErrorMessages) ByBlockID(blockID flow.Identifier) ([]flow.TransactionResultErrorMessage, error) { + transactionResultErrorMessages, err := t.blockCache.Get(t.db.Reader(), blockID) + if err != nil { + return nil, err + } + return transactionResultErrorMessages, nil +} diff --git a/storage/store/transaction_result_error_messages_test.go b/storage/store/transaction_result_error_messages_test.go new file mode 100644 index 00000000000..02238f0138c --- /dev/null +++ b/storage/store/transaction_result_error_messages_test.go @@ -0,0 +1,110 @@ +package store_test + +import ( + "fmt" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "golang.org/x/exp/rand" + + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module/metrics" + "github.com/onflow/flow-go/storage" + "github.com/onflow/flow-go/utils/unittest" + + "github.com/onflow/flow-go/storage/operation/dbtest" + "github.com/onflow/flow-go/storage/store" +) + +func TestStoringTransactionResultErrorMessages(t *testing.T) { + dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { + metrics := metrics.NewNoopCollector() + store1 := store.NewTransactionResultErrorMessages(metrics, db, 1000) + + blockID := unittest.IdentifierFixture() + + // test db Exists by block id + exists, err := store1.Exists(blockID) + require.NoError(t, err) + require.False(t, exists) + + // check retrieving by ByBlockID + messages, err := store1.ByBlockID(blockID) + require.NoError(t, err) + require.Nil(t, messages) + + txErrorMessages := make([]flow.TransactionResultErrorMessage, 0) + for i := 0; i < 10; i++ { + expected := flow.TransactionResultErrorMessage{ + TransactionID: unittest.IdentifierFixture(), + ErrorMessage: fmt.Sprintf("a runtime error %d", i), + ExecutorID: unittest.IdentifierFixture(), + Index: rand.Uint32(), + } + txErrorMessages = append(txErrorMessages, expected) + } + err = store1.Store(blockID, txErrorMessages) + require.NoError(t, err) + + // test db Exists by block id + exists, err = store1.Exists(blockID) + require.NoError(t, err) + require.True(t, exists) + + // check retrieving by ByBlockIDTransactionID + for _, txErrorMessage := range txErrorMessages { + actual, err := store1.ByBlockIDTransactionID(blockID, txErrorMessage.TransactionID) + require.NoError(t, err) + assert.Equal(t, txErrorMessage, *actual) + } + + // check retrieving by ByBlockIDTransactionIndex + for _, txErrorMessage := range txErrorMessages { + actual, err := store1.ByBlockIDTransactionIndex(blockID, txErrorMessage.Index) + require.NoError(t, err) + assert.Equal(t, txErrorMessage, *actual) + } + + // check retrieving by ByBlockID + actual, err := store1.ByBlockID(blockID) + require.NoError(t, err) + assert.Equal(t, txErrorMessages, actual) + + // test loading from database + newStore := store.NewTransactionResultErrorMessages(metrics, db, 1000) + for _, txErrorMessage := range txErrorMessages { + actual, err := newStore.ByBlockIDTransactionID(blockID, txErrorMessage.TransactionID) + require.NoError(t, err) + assert.Equal(t, txErrorMessage, *actual) + } + + // check retrieving by index from both cache and db + for i, txErrorMessage := range txErrorMessages { + actual, err := store1.ByBlockIDTransactionIndex(blockID, txErrorMessage.Index) + require.NoError(t, err) + assert.Equal(t, txErrorMessages[i], *actual) + + actual, err = newStore.ByBlockIDTransactionIndex(blockID, txErrorMessage.Index) + require.NoError(t, err) + assert.Equal(t, txErrorMessages[i], *actual) + } + }) +} + +func TestReadingNotStoreTransactionResultErrorMessage(t *testing.T) { + dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { + metrics := metrics.NewNoopCollector() + store1 := store.NewTransactionResultErrorMessages(metrics, db, 1000) + + blockID := unittest.IdentifierFixture() + txID := unittest.IdentifierFixture() + txIndex := rand.Uint32() + + _, err := store1.ByBlockIDTransactionID(blockID, txID) + assert.ErrorIs(t, err, storage.ErrNotFound) + + _, err = store1.ByBlockIDTransactionIndex(blockID, txIndex) + assert.ErrorIs(t, err, storage.ErrNotFound) + }) +} diff --git a/storage/store/transaction_results.go b/storage/store/transaction_results.go new file mode 100644 index 00000000000..a870fe65ba8 --- /dev/null +++ b/storage/store/transaction_results.go @@ -0,0 +1,252 @@ +package store + +import ( + "encoding/binary" + "fmt" + + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module" + "github.com/onflow/flow-go/module/metrics" + "github.com/onflow/flow-go/storage" + "github.com/onflow/flow-go/storage/operation" +) + +var _ storage.TransactionResults = (*TransactionResults)(nil) + +type TwoIdentifier [flow.IdentifierLen * 2]byte +type IdentifierAndUint32 [flow.IdentifierLen + 4]byte + +type TransactionResults struct { + db storage.DB + cache *GroupCache[flow.Identifier, TwoIdentifier, flow.TransactionResult] // Key: blockID + txID + indexCache *GroupCache[flow.Identifier, IdentifierAndUint32, flow.TransactionResult] // Key: blockID + txIndex + blockCache *Cache[flow.Identifier, []flow.TransactionResult] // Key: blockID +} + +func KeyFromBlockIDTransactionID(blockID flow.Identifier, txID flow.Identifier) TwoIdentifier { + var key TwoIdentifier + n := copy(key[:], blockID[:]) + copy(key[n:], txID[:]) + return key +} + +func KeyFromBlockIDIndex(blockID flow.Identifier, txIndex uint32) IdentifierAndUint32 { + var key IdentifierAndUint32 + n := copy(key[:], blockID[:]) + binary.BigEndian.PutUint32(key[n:], txIndex) + return key +} + +func KeyToBlockIDTransactionID(key TwoIdentifier) (flow.Identifier, flow.Identifier) { + blockID := flow.Identifier(key[:flow.IdentifierLen]) + transactionID := flow.Identifier(key[flow.IdentifierLen:]) + return blockID, transactionID +} + +func KeyToBlockIDIndex(key IdentifierAndUint32) (flow.Identifier, uint32) { + blockID := flow.Identifier(key[:flow.IdentifierLen]) + txIndex := binary.BigEndian.Uint32(key[flow.IdentifierLen:]) + return blockID, txIndex +} + +func FirstIDFromTwoIdentifier(key TwoIdentifier) flow.Identifier { + return flow.Identifier(key[:flow.IdentifierLen]) +} + +func IDFromIdentifierAndUint32(key IdentifierAndUint32) flow.Identifier { + return flow.Identifier(key[:flow.IdentifierLen]) +} + +func NewTransactionResults(collector module.CacheMetrics, db storage.DB, transactionResultsCacheSize uint) (*TransactionResults, error) { + retrieve := func(r storage.Reader, key TwoIdentifier) (flow.TransactionResult, error) { + blockID, txID := KeyToBlockIDTransactionID(key) + + var txResult flow.TransactionResult + err := operation.RetrieveTransactionResult(r, blockID, txID, &txResult) + if err != nil { + return flow.TransactionResult{}, err + } + return txResult, nil + } + + retrieveIndex := func(r storage.Reader, key IdentifierAndUint32) (flow.TransactionResult, error) { + blockID, txIndex := KeyToBlockIDIndex(key) + + var txResult flow.TransactionResult + err := operation.RetrieveTransactionResultByIndex(r, blockID, txIndex, &txResult) + if err != nil { + return flow.TransactionResult{}, err + } + return txResult, nil + } + + retrieveForBlock := func(r storage.Reader, blockID flow.Identifier) ([]flow.TransactionResult, error) { + var txResults []flow.TransactionResult + err := operation.LookupTransactionResultsByBlockIDUsingIndex(r, blockID, &txResults) + if err != nil { + return nil, err + } + return txResults, nil + } + + cache, err := newGroupCache( + collector, + metrics.ResourceTransactionResults, + FirstIDFromTwoIdentifier, + withLimit[TwoIdentifier, flow.TransactionResult](transactionResultsCacheSize), + withStore(noopStore[TwoIdentifier, flow.TransactionResult]), + withRetrieve(retrieve), + ) + if err != nil { + return nil, fmt.Errorf("failed to create transaction results group cache: %w", err) + } + + indexCache, err := newGroupCache( + collector, + metrics.ResourceTransactionResultIndices, + IDFromIdentifierAndUint32, + withLimit[IdentifierAndUint32, flow.TransactionResult](transactionResultsCacheSize), + withStore(noopStore[IdentifierAndUint32, flow.TransactionResult]), + withRetrieve(retrieveIndex), + ) + if err != nil { + return nil, fmt.Errorf("failed to create transaction index group cache: %w", err) + } + + return &TransactionResults{ + db: db, + cache: cache, + indexCache: indexCache, + blockCache: newCache( + collector, + metrics.ResourceTransactionResultIndices, + withLimit[flow.Identifier, []flow.TransactionResult](transactionResultsCacheSize), + withStore(noopStore[flow.Identifier, []flow.TransactionResult]), + withRetrieve(retrieveForBlock), + ), + }, nil +} + +// BatchStore will store the transaction results for the given block ID in a batch +func (tr *TransactionResults) BatchStore(blockID flow.Identifier, transactionResults []flow.TransactionResult, batch storage.ReaderBatchWriter) error { + w := batch.Writer() + + for i, result := range transactionResults { + err := operation.InsertTransactionResult(w, blockID, &result) + if err != nil { + return fmt.Errorf("cannot batch insert tx result: %w", err) + } + + err = operation.IndexTransactionResult(w, blockID, uint32(i), &result) + if err != nil { + return fmt.Errorf("cannot batch index tx result: %w", err) + } + } + + storage.OnCommitSucceed(batch, func() { + for i, result := range transactionResults { + key := KeyFromBlockIDTransactionID(blockID, result.TransactionID) + // cache for each transaction, so that it's faster to retrieve + tr.cache.Insert(key, result) + + index := uint32(i) + + keyIndex := KeyFromBlockIDIndex(blockID, index) + tr.indexCache.Insert(keyIndex, result) + } + + tr.blockCache.Insert(blockID, transactionResults) + }) + return nil +} + +// ByBlockIDTransactionID returns the runtime transaction result for the given block ID and transaction ID +func (tr *TransactionResults) ByBlockIDTransactionID(blockID flow.Identifier, txID flow.Identifier) (*flow.TransactionResult, error) { + key := KeyFromBlockIDTransactionID(blockID, txID) + transactionResult, err := tr.cache.Get(tr.db.Reader(), key) + if err != nil { + return nil, err + } + return &transactionResult, nil +} + +// ByBlockIDTransactionIndex returns the runtime transaction result for the given block ID and transaction index +func (tr *TransactionResults) ByBlockIDTransactionIndex(blockID flow.Identifier, txIndex uint32) (*flow.TransactionResult, error) { + key := KeyFromBlockIDIndex(blockID, txIndex) + transactionResult, err := tr.indexCache.Get(tr.db.Reader(), key) + if err != nil { + return nil, err + } + return &transactionResult, nil +} + +// ByBlockID gets all transaction results for a block, ordered by transaction index +func (tr *TransactionResults) ByBlockID(blockID flow.Identifier) ([]flow.TransactionResult, error) { + transactionResults, err := tr.blockCache.Get(tr.db.Reader(), blockID) + if err != nil { + return nil, err + } + return transactionResults, nil +} + +// RemoveByBlockID removes transaction results by block ID +func (tr *TransactionResults) RemoveByBlockID(blockID flow.Identifier) error { + return tr.db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return tr.BatchRemoveByBlockID(blockID, rw) + }) +} + +// BatchRemoveByBlockID batch removes transaction results by block ID. +func (tr *TransactionResults) BatchRemoveByBlockID(blockID flow.Identifier, batch storage.ReaderBatchWriter) error { + const batchDataKey = "TransactionResults.BatchRemoveByBlockID" + + // BatchRemoveByBlockID() receives ReaderBatchWriter and block ID to + // remove the given block from the database and memory cache. + // BatchRemoveByBlockID() can be called repeatedly with the same + // ReaderBatchWriter and different block IDs to remove multiple blocks. + // + // To avoid locking TransactionResults cache for every removed block ID, + // this function: + // - saves and aggregates the received blockID in the ReaderBatchWriter's scope data, + // - in the OnCommitSucceed callback, retrieves all saved block IDs and + // removes all cached blocks by locking the cache just once + // After cache removal, the scoped block IDs in ReaderBatchWriter are removed. + + storage.OnCommitSucceed(batch, func() { + batchData, _ := batch.ScopedValue(batchDataKey) + + if batchData != nil { + batch.SetScopedValue(batchDataKey, nil) + + blockIDs := batchData.(map[flow.Identifier]struct{}) + + if len(blockIDs) > 0 { + blockIDsInSlice := make([]flow.Identifier, 0, len(blockIDs)) + for id := range blockIDs { + blockIDsInSlice = append(blockIDsInSlice, id) + } + + tr.cache.RemoveGroups(blockIDsInSlice) + } + } + }) + + saveBlockIDInBatchData(batch, batchDataKey, blockID) + + return operation.BatchRemoveTransactionResultsByBlockID(blockID, batch) +} + +func saveBlockIDInBatchData(batch storage.ReaderBatchWriter, batchDataKey string, blockID flow.Identifier) { + var blockIDs map[flow.Identifier]struct{} + + batchValue, _ := batch.ScopedValue(batchDataKey) + if batchValue == nil { + blockIDs = make(map[flow.Identifier]struct{}) + } else { + blockIDs = batchValue.(map[flow.Identifier]struct{}) + } + + blockIDs[blockID] = struct{}{} + + batch.SetScopedValue(batchDataKey, blockIDs) +} diff --git a/storage/store/transaction_results_test.go b/storage/store/transaction_results_test.go new file mode 100644 index 00000000000..aa082c7b2b7 --- /dev/null +++ b/storage/store/transaction_results_test.go @@ -0,0 +1,259 @@ +package store_test + +import ( + "errors" + "fmt" + mathRand "math/rand" + "slices" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "golang.org/x/exp/rand" + + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module/metrics" + "github.com/onflow/flow-go/storage" + "github.com/onflow/flow-go/utils/unittest" + + "github.com/onflow/flow-go/storage/operation/dbtest" + "github.com/onflow/flow-go/storage/store" +) + +func TestBatchStoringTransactionResults(t *testing.T) { + dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { + metrics := metrics.NewNoopCollector() + st, err := store.NewTransactionResults(metrics, db, 1000) + require.NoError(t, err) + + blockID := unittest.IdentifierFixture() + txResults := make([]flow.TransactionResult, 0) + for i := 0; i < 10; i++ { + txID := unittest.IdentifierFixture() + expected := flow.TransactionResult{ + TransactionID: txID, + ErrorMessage: fmt.Sprintf("a runtime error %d", i), + } + txResults = append(txResults, expected) + } + writeBatch := db.NewBatch() + defer writeBatch.Close() + + err = st.BatchStore(blockID, txResults, writeBatch) + require.NoError(t, err) + + err = writeBatch.Commit() + require.NoError(t, err) + + for _, txResult := range txResults { + actual, err := st.ByBlockIDTransactionID(blockID, txResult.TransactionID) + require.NoError(t, err) + assert.Equal(t, txResult, *actual) + } + + // test loading from database + newst, err := store.NewTransactionResults(metrics, db, 1000) + require.NoError(t, err) + for _, txResult := range txResults { + actual, err := newst.ByBlockIDTransactionID(blockID, txResult.TransactionID) + require.NoError(t, err) + assert.Equal(t, txResult, *actual) + } + + // check retrieving by index from both cache and db + for i := len(txResults) - 1; i >= 0; i-- { + actual, err := st.ByBlockIDTransactionIndex(blockID, uint32(i)) + require.NoError(t, err) + assert.Equal(t, txResults[i], *actual) + + actual, err = newst.ByBlockIDTransactionIndex(blockID, uint32(i)) + require.NoError(t, err) + assert.Equal(t, txResults[i], *actual) + } + }) +} + +func TestBatchStoreAndBatchRemoveTransactionResults(t *testing.T) { + dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { + const blockCount = 10 + const txCountPerBlock = 10 + + metrics := metrics.NewNoopCollector() + st, err := store.NewTransactionResults(metrics, db, 1000) + require.NoError(t, err) + + blockIDs := make([]flow.Identifier, blockCount) + txResults := make(map[flow.Identifier][]flow.TransactionResult) + for i := range blockCount { + blockID := unittest.IdentifierFixture() + blockIDs[i] = blockID + + for j := range txCountPerBlock { + txID := unittest.IdentifierFixture() + expected := flow.TransactionResult{ + TransactionID: txID, + ErrorMessage: fmt.Sprintf("a runtime error %d", j), + } + txResults[blockID] = append(txResults[blockID], expected) + } + } + + // Store transaction results of multiple blocks + err = db.WithReaderBatchWriter(func(rbw storage.ReaderBatchWriter) error { + for _, blockID := range blockIDs { + err := st.BatchStore(blockID, txResults[blockID], rbw) + if err != nil { + return err + } + } + return nil + }) + require.NoError(t, err) + + // Retrieve transaction results + for blockID, txResult := range txResults { + for _, result := range txResult { + actual, err := st.ByBlockIDTransactionID(blockID, result.TransactionID) + require.NoError(t, err) + assert.Equal(t, result, *actual) + } + } + + // Remove 2 blocks of transaction results + removeBlockIDs := blockIDs[:2] + + err = db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + for _, blockID := range removeBlockIDs { + err := st.BatchRemoveByBlockID(blockID, rw) + if err != nil { + return err + } + } + return nil + }) + require.NoError(t, err) + + // Retrieve transaction results + for blockID, txResult := range txResults { + removedBlock := slices.Contains(removeBlockIDs, blockID) + + for _, result := range txResult { + actual, err := st.ByBlockIDTransactionID(blockID, result.TransactionID) + if removedBlock { + require.True(t, errors.Is(err, storage.ErrNotFound)) + } else { + require.NoError(t, err) + assert.Equal(t, result, *actual) + } + } + } + }) +} + +func TestReadingNotstTransaction(t *testing.T) { + dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { + metrics := metrics.NewNoopCollector() + st, err := store.NewTransactionResults(metrics, db, 1000) + require.NoError(t, err) + + blockID := unittest.IdentifierFixture() + txID := unittest.IdentifierFixture() + txIndex := rand.Uint32() + + _, err = st.ByBlockIDTransactionID(blockID, txID) + assert.ErrorIs(t, err, storage.ErrNotFound) + + _, err = st.ByBlockIDTransactionIndex(blockID, txIndex) + assert.ErrorIs(t, err, storage.ErrNotFound) + }) +} + +func TestKeyConversion(t *testing.T) { + blockID := unittest.IdentifierFixture() + txID := unittest.IdentifierFixture() + key := store.KeyFromBlockIDTransactionID(blockID, txID) + bID, tID := store.KeyToBlockIDTransactionID(key) + require.Equal(t, blockID, bID) + require.Equal(t, txID, tID) +} + +func TestIndexKeyConversion(t *testing.T) { + blockID := unittest.IdentifierFixture() + txIndex := mathRand.Uint32() + key := store.KeyFromBlockIDIndex(blockID, txIndex) + bID, tID := store.KeyToBlockIDIndex(key) + require.Equal(t, blockID, bID) + require.Equal(t, txIndex, tID) +} + +func BenchmarkTransactionResultCacheKey(b *testing.B) { + b.Run("new: create cache key", func(b *testing.B) { + blockID := unittest.IdentifierFixture() + txID := unittest.IdentifierFixture() + + var key store.TwoIdentifier + for range b.N { + key = store.KeyFromBlockIDTransactionID(blockID, txID) + } + _ = key + }) + + b.Run("old: create cache key", func(b *testing.B) { + blockID := unittest.IdentifierFixture() + txID := unittest.IdentifierFixture() + + var key string + for range b.N { + key = DeprecatedKeyFromBlockIDTransactionID(blockID, txID) + } + _ = key + }) + + b.Run("new: parse cache key", func(b *testing.B) { + blockID := unittest.IdentifierFixture() + txID := unittest.IdentifierFixture() + key := store.KeyFromBlockIDTransactionID(blockID, txID) + + var id1, id2 flow.Identifier + for range b.N { + id1, id2 = store.KeyToBlockIDTransactionID(key) + } + _ = id1 + _ = id2 + }) + + b.Run("old: parse cache key", func(b *testing.B) { + blockID := unittest.IdentifierFixture() + txID := unittest.IdentifierFixture() + key := DeprecatedKeyFromBlockIDTransactionID(blockID, txID) + + var id1, id2 flow.Identifier + for range b.N { + id1, id2, _ = DeprecatedKeyToBlockIDTransactionID(key) + } + _ = id1 + _ = id2 + }) +} + +// This deprecated function is for benchmark purpose. +func DeprecatedKeyFromBlockIDTransactionID(blockID flow.Identifier, txID flow.Identifier) string { + return fmt.Sprintf("%x%x", blockID, txID) +} + +// This deprecated function is for benchmark purpose. +func DeprecatedKeyToBlockIDTransactionID(key string) (flow.Identifier, flow.Identifier, error) { + blockIDStr := key[:64] + txIDStr := key[64:] + blockID, err := flow.HexStringToIdentifier(blockIDStr) + if err != nil { + return flow.ZeroID, flow.ZeroID, fmt.Errorf("could not get block ID: %w", err) + } + + txID, err := flow.HexStringToIdentifier(txIDStr) + if err != nil { + return flow.ZeroID, flow.ZeroID, fmt.Errorf("could not get transaction id: %w", err) + } + + return blockID, txID, nil +} diff --git a/storage/store/transactions.go b/storage/store/transactions.go new file mode 100644 index 00000000000..f2357ca33ab --- /dev/null +++ b/storage/store/transactions.go @@ -0,0 +1,77 @@ +package store + +import ( + "fmt" + + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module" + "github.com/onflow/flow-go/module/metrics" + "github.com/onflow/flow-go/storage" + "github.com/onflow/flow-go/storage/operation" +) + +// Transactions ... +type Transactions struct { + db storage.DB + cache *Cache[flow.Identifier, *flow.TransactionBody] +} + +var _ storage.Transactions = (*Transactions)(nil) + +// NewTransactions ... +func NewTransactions(cacheMetrics module.CacheMetrics, db storage.DB) *Transactions { + store := func(rw storage.ReaderBatchWriter, txID flow.Identifier, flowTX *flow.TransactionBody) error { + return operation.UpsertTransaction(rw.Writer(), txID, flowTX) + } + + retrieve := func(r storage.Reader, txID flow.Identifier) (*flow.TransactionBody, error) { + var flowTx flow.TransactionBody + err := operation.RetrieveTransaction(r, txID, &flowTx) + return &flowTx, err + } + + remove := func(rw storage.ReaderBatchWriter, txID flow.Identifier) error { + return operation.RemoveTransaction(rw.Writer(), txID) + } + + t := &Transactions{ + db: db, + cache: newCache(cacheMetrics, metrics.ResourceTransaction, + withLimit[flow.Identifier, *flow.TransactionBody](flow.DefaultTransactionExpiry+100), + withStore(store), + withRemove[flow.Identifier, *flow.TransactionBody](remove), + withRetrieve(retrieve), + ), + } + + return t +} + +func (t *Transactions) Store(flowTx *flow.TransactionBody) error { + return t.db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return t.storeTx(rw, flowTx) + }) +} + +func (t *Transactions) storeTx(rw storage.ReaderBatchWriter, flowTx *flow.TransactionBody) error { + return t.cache.PutTx(rw, flowTx.ID(), flowTx) +} + +func (t *Transactions) ByID(txID flow.Identifier) (*flow.TransactionBody, error) { + return t.cache.Get(t.db.Reader(), txID) +} + +// RemoveBatch removes a transaction by fingerprint. +func (t *Transactions) RemoveBatch(rw storage.ReaderBatchWriter, txID flow.Identifier) error { + return t.cache.RemoveTx(rw, txID) +} + +// BatchStore stores transaction within a batch operation. +// No errors are expected during normal operations +func (t *Transactions) BatchStore(tx *flow.TransactionBody, batch storage.ReaderBatchWriter) error { + if err := t.storeTx(batch, tx); err != nil { + return fmt.Errorf("cannot batch insert transaction: %w", err) + } + + return nil +} diff --git a/storage/store/transactions_test.go b/storage/store/transactions_test.go new file mode 100644 index 00000000000..011ceb49444 --- /dev/null +++ b/storage/store/transactions_test.go @@ -0,0 +1,74 @@ +package store_test + +import ( + "errors" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/module/metrics" + "github.com/onflow/flow-go/storage" + "github.com/onflow/flow-go/storage/operation/dbtest" + "github.com/onflow/flow-go/storage/store" + "github.com/onflow/flow-go/utils/unittest" +) + +func TestTransactionStoreRetrieve(t *testing.T) { + dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { + metrics := metrics.NewNoopCollector() + store := store.NewTransactions(metrics, db) + + // store a transaction in db + expected := unittest.TransactionFixture() + err := store.Store(&expected) + require.NoError(t, err) + + // retrieve the transaction by ID + actual, err := store.ByID(expected.ID()) + require.NoError(t, err) + assert.Equal(t, &expected, actual) + + // re-insert the transaction - should be idempotent + err = store.Store(&expected) + require.NoError(t, err) + }) +} + +func TestTransactionRetrieveWithoutStore(t *testing.T) { + dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { + metrics := metrics.NewNoopCollector() + store := store.NewTransactions(metrics, db) + + // attempt to get a invalid transaction + _, err := store.ByID(unittest.IdentifierFixture()) + assert.True(t, errors.Is(err, storage.ErrNotFound)) + }) +} + +func TestTransactionRemove(t *testing.T) { + dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) { + metrics := metrics.NewNoopCollector() + store := store.NewTransactions(metrics, db) + + // Create and store a transaction + expected := unittest.TransactionFixture() + err := store.Store(&expected) + require.NoError(t, err) + + // Ensure it exists + tx, err := store.ByID(expected.ID()) + require.NoError(t, err) + assert.Equal(t, &expected, tx) + + // Remove it + err = db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error { + return store.RemoveBatch(rw, expected.ID()) + }) + require.NoError(t, err) + + // Ensure it no longer exists + _, err = store.ByID(expected.ID()) + assert.True(t, errors.Is(err, storage.ErrNotFound)) + }) +} diff --git a/storage/store/version_beacon.go b/storage/store/version_beacon.go new file mode 100644 index 00000000000..eb1b68c9b8f --- /dev/null +++ b/storage/store/version_beacon.go @@ -0,0 +1,38 @@ +package store + +import ( + "errors" + + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/storage" + "github.com/onflow/flow-go/storage/operation" +) + +type VersionBeacons struct { + db storage.DB +} + +var _ storage.VersionBeacons = (*VersionBeacons)(nil) + +func NewVersionBeacons(db storage.DB) *VersionBeacons { + res := &VersionBeacons{ + db: db, + } + + return res +} + +func (r *VersionBeacons) Highest( + belowOrEqualTo uint64, +) (*flow.SealedVersionBeacon, error) { + var beacon flow.SealedVersionBeacon + + err := operation.LookupLastVersionBeaconByHeight(r.db.Reader(), belowOrEqualTo, &beacon) + if err != nil { + if errors.Is(err, storage.ErrNotFound) { + return nil, nil + } + return nil, err + } + return &beacon, nil +} diff --git a/storage/transaction_result_error_messages.go b/storage/transaction_result_error_messages.go new file mode 100644 index 00000000000..a573bbae8a2 --- /dev/null +++ b/storage/transaction_result_error_messages.go @@ -0,0 +1,44 @@ +package storage + +import "github.com/onflow/flow-go/model/flow" + +// TransactionResultErrorMessagesReader represents persistent storage read operations for transaction result error messages +type TransactionResultErrorMessagesReader interface { + // Exists returns true if transaction result error messages for the given ID have been stored. + // + // No errors are expected during normal operation. + Exists(blockID flow.Identifier) (bool, error) + + // ByBlockIDTransactionID returns the transaction result error message for the given block ID and transaction ID. + // + // Expected errors during normal operation: + // - `storage.ErrNotFound` if no transaction error message is known at given block and transaction id. + ByBlockIDTransactionID(blockID flow.Identifier, transactionID flow.Identifier) (*flow.TransactionResultErrorMessage, error) + + // ByBlockIDTransactionIndex returns the transaction result error message for the given blockID and transaction index. + // + // Expected errors during normal operation: + // - `storage.ErrNotFound` if no transaction error message is known at given block and transaction index. + ByBlockIDTransactionIndex(blockID flow.Identifier, txIndex uint32) (*flow.TransactionResultErrorMessage, error) + + // ByBlockID gets all transaction result error messages for a block, ordered by transaction index. + // Note: This method will return an empty slice both if the block is not indexed yet and if the block does not have any errors. + // + // No errors are expected during normal operations. + ByBlockID(id flow.Identifier) ([]flow.TransactionResultErrorMessage, error) +} + +// TransactionResultErrorMessages represents persistent storage for transaction result error messages +type TransactionResultErrorMessages interface { + TransactionResultErrorMessagesReader + + // Store will store transaction result error messages for the given block ID. + // + // No errors are expected during normal operation. + Store(blockID flow.Identifier, transactionResultErrorMessages []flow.TransactionResultErrorMessage) error + + // BatchStore inserts a batch of transaction result error messages into a batch + // + // No errors are expected during normal operation. + BatchStore(blockID flow.Identifier, transactionResultErrorMessages []flow.TransactionResultErrorMessage, batch ReaderBatchWriter) error +} diff --git a/storage/transaction_results.go b/storage/transaction_results.go index b2843702292..9c7ecdbe1db 100644 --- a/storage/transaction_results.go +++ b/storage/transaction_results.go @@ -2,12 +2,7 @@ package storage import "github.com/onflow/flow-go/model/flow" -// TransactionResults represents persistent storage for transaction result -type TransactionResults interface { - - // BatchStore inserts a batch of transaction result into a batch - BatchStore(blockID flow.Identifier, transactionResults []flow.TransactionResult, batch BatchStorage) error - +type TransactionResultsReader interface { // ByBlockIDTransactionID returns the transaction result for the given block ID and transaction ID ByBlockIDTransactionID(blockID flow.Identifier, transactionID flow.Identifier) (*flow.TransactionResult, error) @@ -17,3 +12,14 @@ type TransactionResults interface { // ByBlockID gets all transaction results for a block, ordered by transaction index ByBlockID(id flow.Identifier) ([]flow.TransactionResult, error) } + +// TransactionResults represents persistent storage for transaction result +type TransactionResults interface { + TransactionResultsReader + + // BatchStore inserts a batch of transaction result into a batch + BatchStore(blockID flow.Identifier, transactionResults []flow.TransactionResult, batch ReaderBatchWriter) error + + // RemoveByBlockID removes all transaction results for a block + BatchRemoveByBlockID(id flow.Identifier, batch ReaderBatchWriter) error +} diff --git a/storage/transactions.go b/storage/transactions.go index 2a6ab7a88a8..19783098480 100644 --- a/storage/transactions.go +++ b/storage/transactions.go @@ -4,12 +4,23 @@ import ( "github.com/onflow/flow-go/model/flow" ) +// TransactionsReader represents persistent storage read operations for transactions. +type TransactionsReader interface { + // ByID returns the transaction for the given fingerprint. + // Expected errors during normal operation: + // - `storage.ErrNotFound` if transaction is not found. + ByID(txID flow.Identifier) (*flow.TransactionBody, error) +} + // Transactions represents persistent storage for transactions. type Transactions interface { + TransactionsReader // Store inserts the transaction, keyed by fingerprint. Duplicate transaction insertion is ignored + // No errors are expected during normal operation. Store(tx *flow.TransactionBody) error - // ByID returns the transaction for the given fingerprint. - ByID(txID flow.Identifier) (*flow.TransactionBody, error) + // BatchStore stores transaction within a batch operation. + // No errors are expected during normal operation. + BatchStore(tx *flow.TransactionBody, batch ReaderBatchWriter) error } diff --git a/storage/util/logger.go b/storage/util/logger.go index 7c44637f33c..6a3cd914e97 100644 --- a/storage/util/logger.go +++ b/storage/util/logger.go @@ -1,6 +1,8 @@ package util import ( + "github.com/cockroachdb/pebble/v2" + "github.com/dgraph-io/badger/v2" "github.com/rs/zerolog" ) @@ -9,12 +11,19 @@ type Logger struct { log zerolog.Logger } +var _ pebble.Logger = (*Logger)(nil) +var _ badger.Logger = (*Logger)(nil) + func NewLogger(logger zerolog.Logger) *Logger { return &Logger{ - log: logger.With().Str("component", "badger").Logger(), + log: logger, } } +func (l *Logger) Fatalf(msg string, args ...interface{}) { + l.log.Fatal().Msgf(msg, args...) +} + func (l *Logger) Errorf(msg string, args ...interface{}) { l.log.Error().Msgf(msg, args...) } diff --git a/storage/util/testing.go b/storage/util/testing.go index 89e7e523364..830e27df882 100644 --- a/storage/util/testing.go +++ b/storage/util/testing.go @@ -5,20 +5,9 @@ import ( "path/filepath" "testing" - "github.com/dgraph-io/badger/v2" "github.com/stretchr/testify/require" - - "github.com/onflow/flow-go/module/metrics" - "github.com/onflow/flow-go/storage" - bstorage "github.com/onflow/flow-go/storage/badger" ) -func StorageLayer(_ testing.TB, db *badger.DB) *storage.All { - metrics := metrics.NewNoopCollector() - all := bstorage.InitAll(metrics, db) - return all -} - func CreateFiles(t *testing.T, dir string, names ...string) { for _, name := range names { file, err := os.Create(filepath.Join(dir, name)) diff --git a/storage/version_beacon.go b/storage/version_beacon.go index 0fca248b085..2a57c944aa4 100644 --- a/storage/version_beacon.go +++ b/storage/version_beacon.go @@ -7,7 +7,7 @@ type VersionBeacons interface { // Highest finds the highest flow.SealedVersionBeacon but no higher than // belowOrEqualTo - // Returns storage.ErrNotFound if no version beacon exists at or below the - // given height. + // Returns nil if no version beacon has been sealed below or equal to the + // input height. Highest(belowOrEqualTo uint64) (*flow.SealedVersionBeacon, error) } diff --git a/tools/move-checkpoint.sh b/tools/move-checkpoint.sh new file mode 100755 index 00000000000..1d69aeff72e --- /dev/null +++ b/tools/move-checkpoint.sh @@ -0,0 +1,106 @@ +#!/bin/bash + +# Exit immediately if a command exits with a non-zero status +set -e + +# Check if exactly two arguments are provided +usage() { + echo "Description: move checkpoint files from one directory to another" + echo "Usage: $0 source_file destination_file [--run]" + echo "Example: $0 /var/flow/from-folder/checkpoint.000010 /var/flow/to-folder/root.checkpoint [--run]" + echo "The above command will preview the checkpoint files to be moved including its 17 subfiles to the destination folder and rename them." + echo "Preview mode is default. Use --run to actually move the files." + exit 1 +} + +# Check if at least two arguments are provided +if [ "$#" -lt 2 ] || [ "$#" -gt 3 ]; then + usage +fi + +# Assign arguments to variables +source_file_pattern=$1 +destination_file_base=$2 +run_mode=false + +# Check for run mode +if [ "$#" -eq 3 ] && [ "$3" == "--run" ]; then + run_mode=true +elif [ "$#" -eq 3 ]; then + usage +fi + +# Extract the basename from the source file pattern +source_base=$(basename "$source_file_pattern") + +# Extract the directory and base name from the destination file base +source_directory=$(dirname "$source_file_pattern") +destination_directory=$(dirname "$destination_file_base") +destination_base=$(basename "$destination_file_base") + +# Create the destination directory if it doesn't exist +mkdir -p "$destination_directory" + +# Define the expected files +expected_files=( + "$source_base" + "$source_base.000" + "$source_base.001" + "$source_base.002" + "$source_base.003" + "$source_base.004" + "$source_base.005" + "$source_base.006" + "$source_base.007" + "$source_base.008" + "$source_base.009" + "$source_base.010" + "$source_base.011" + "$source_base.012" + "$source_base.013" + "$source_base.014" + "$source_base.015" + "$source_base.016" +) + +# Check if all expected files are present +missing_files=() +for expected_file in "${expected_files[@]}"; do + full_expected_file="$source_directory/$expected_file" + if [ ! -f "$full_expected_file" ]; then + missing_files+=("$full_expected_file") + fi +done + +if [ "${#missing_files[@]}" -ne 0 ]; then + echo "Error: The following expected files are missing:" + for file in "${missing_files[@]}"; do + echo " $file" + done + exit 1 +fi + +# Loop through the expected files and preview/move them to the destination directory +for file in "${expected_files[@]}"; do + full_source_file="$source_directory/$file" + if [ -f "$full_source_file" ]; then + # Get the file extension (if any) + extension="${file#$source_base}" + # Construct the destination file name + destination_file="$destination_directory/$destination_base$extension" + # Preview or move the file + if [ "$run_mode" = true ]; then + echo "Moving: $(realpath "$full_source_file") -> $(realpath "$destination_file")" + mv "$full_source_file" "$destination_file" + else + echo "Preview: $(realpath "$full_source_file") -> $(realpath "$destination_file")" + fi + fi +done + + +if [ "$run_mode" = true ]; then + echo "Checkpoint files have been moved successfully." +else + echo "Preview complete. No files have been moved. add --run flag to move the files." +fi diff --git a/tools/structwrite/go.mod b/tools/structwrite/go.mod new file mode 100644 index 00000000000..b5165189ee5 --- /dev/null +++ b/tools/structwrite/go.mod @@ -0,0 +1,17 @@ +module github.com/onflow/flow-go/tools/structwrite + +go 1.25.0 + +require ( + github.com/golangci/plugin-module-register v0.1.1 + github.com/stretchr/testify v1.10.0 + golang.org/x/tools v0.32.0 +) + +require ( + github.com/davecgh/go-spew v1.1.1 // indirect + github.com/pmezard/go-difflib v1.0.0 // indirect + golang.org/x/mod v0.24.0 // indirect + golang.org/x/sync v0.13.0 // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect +) diff --git a/tools/structwrite/go.sum b/tools/structwrite/go.sum new file mode 100644 index 00000000000..97329b82569 --- /dev/null +++ b/tools/structwrite/go.sum @@ -0,0 +1,20 @@ +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/golangci/plugin-module-register v0.1.1 h1:TCmesur25LnyJkpsVrupv1Cdzo+2f7zX0H6Jkw1Ol6c= +github.com/golangci/plugin-module-register v0.1.1/go.mod h1:TTpqoB6KkwOJMV8u7+NyXMrkwwESJLOkfl9TxR1DGFc= +github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= +github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= +github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +golang.org/x/mod v0.24.0 h1:ZfthKaKaT4NrhGVZHO1/WDTwGES4De8KtWO0SIbNJMU= +golang.org/x/mod v0.24.0/go.mod h1:IXM97Txy2VM4PJ3gI61r1YEk/gAj6zAHN3AdZt6S9Ww= +golang.org/x/sync v0.13.0 h1:AauUjRAJ9OSnvULf/ARrrVywoJDy0YS2AwQ98I37610= +golang.org/x/sync v0.13.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= +golang.org/x/tools v0.32.0 h1:Q7N1vhpkQv7ybVzLFtTjvQya2ewbwNDZzUgfXGqtMWU= +golang.org/x/tools v0.32.0/go.mod h1:ZxrU41P/wAbZD8EDa6dDCa6XfpkhJ7HFMjHJXfBDu8s= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/tools/structwrite/structwrite.go b/tools/structwrite/structwrite.go new file mode 100644 index 00000000000..0e66f53d635 --- /dev/null +++ b/tools/structwrite/structwrite.go @@ -0,0 +1,336 @@ +package structwrite + +import ( + "go/ast" + "go/token" + "go/types" + "regexp" + "strings" + "sync" + + "github.com/golangci/plugin-module-register/register" + "golang.org/x/tools/go/analysis" +) + +func init() { + register.Plugin("structwrite", New) +} + +// Settings defines the configuration schema for the plugin. +type Settings struct { + // ConstructorRegex is a regex pattern (optional) to identify allowed constructor function names. + ConstructorRegex string `json:"constructorRegex"` +} + +// PluginStructWrite implements the LinterPlugin interface for the structwrite linter. +// This linter prevents mutations and non-empty construction of struct types marked as immutable. +// Mutation and construction of these types are only allowed in constructor functions. +// Re-assignment of a variable with a type marked immutable is allowed. +// +// x := NewImmutableType(1) +// x.SomeField = 2 // not allowed +// x = NewImmutableType(2) // allowed +// +// A struct type is marked as immutable by adding a directive comment of the form: `//structwrite:immutable .*`. +// The directive comment must appear in the godoc for the type being marked immutable. +// +// See handleAssignStmt and handleCompositeLit in this file for examples of what operations are allowed. +// See also the Go files under ./testdata, which represent the test cases for the linter. +// +// This linter does not guarantee that structs marked immutable cannot be mutated, but it does +// warn for the majority of possible mutation situations. Below are a list of scenarios which will +// mutate a mutation-protected struct without the linter noticing: +// +// 1. Reflection (for example passing a pointer to a struct type into json.Unmarshal) +// 2. Use of unsafe.Pointer +// 3. Re-assignment after reference escape. +// +// Example of (3.): +// +// type Y struct { +// B int +// } +// func NewY(b int) Y { return Y{B: b} } +// type X struct { +// Y *Y +// } +// func NewX(y *Y) X { return X{Y:y} } +// +// y := NewY(1) +// x := NewX(&y) +// // x.Y.B == 1 +// y = NewY(2) +// // x.Y.B == 2: x has been mutated due to the shared reference +type PluginStructWrite struct { + // Set of mutation-protected types, stored as fully qualified type names + mutationProtected map[string]struct{} + // Regex of constructor function names, where mutation is allowed. + constructorRegex *regexp.Regexp + + mu sync.RWMutex +} + +// New creates a new instance of the PluginStructWrite plugin. +func New(cfg any) (register.LinterPlugin, error) { + s, err := register.DecodeSettings[Settings](cfg) + if err != nil { + return nil, err + } + + // Default to New* + if s.ConstructorRegex == "" { + s.ConstructorRegex = "^New.*" + } + re, err := regexp.Compile(s.ConstructorRegex) + if err != nil { + return nil, err + } + + return &PluginStructWrite{ + mutationProtected: make(map[string]struct{}), + constructorRegex: re, + }, nil +} + +func (p *PluginStructWrite) BuildAnalyzers() ([]*analysis.Analyzer, error) { + a := &analysis.Analyzer{ + Name: "structwrite", + Doc: "flags writes to specified struct fields or construction of structs outside constructor functions", + Run: p.run, + } + return []*analysis.Analyzer{a}, nil +} + +func (p *PluginStructWrite) GetLoadMode() string { + return register.LoadModeTypesInfo +} + +// run is the main analysis function. +func (p *PluginStructWrite) run(pass *analysis.Pass) (interface{}, error) { + p.gatherMutationProtectedTypes(pass) + + for _, file := range pass.Files { + ast.Inspect(file, func(n ast.Node) bool { + switch node := n.(type) { + case *ast.AssignStmt: + p.handleAssignStmt(node, pass, file) + case *ast.CompositeLit: + p.handleCompositeLit(node, pass, file) + } + return true + }) + } + return nil, nil +} + +// gatherMutationProtectedTypes populates the set of mutation-protected types before the linter runs. +// A struct type is marked as immutable by adding a directive comment of the form: `//structwrite:immutable .*`. +// The directive comment must appear in the godoc for the type being marked immutable. +func (p *PluginStructWrite) gatherMutationProtectedTypes(pass *analysis.Pass) { + for _, file := range pass.Files { + for _, decl := range file.Decls { + genDecl, ok := decl.(*ast.GenDecl) + if !ok || genDecl.Tok != token.TYPE { + continue + } + + for _, spec := range genDecl.Specs { + typeSpec, ok := spec.(*ast.TypeSpec) + if !ok { + continue + } + + if genDecl.Doc != nil { + for _, comment := range genDecl.Doc.List { + if strings.HasPrefix(comment.Text, "//structwrite:immutable") { + typeObj := pass.TypesInfo.Defs[typeSpec.Name] + if named, ok := typeObj.Type().(*types.Named); ok { + fullyQualified := named.String() + p.setMutationProtected(fullyQualified) + } + break + } + } + } + } + } + } +} + +// handleAssignStmt checks for disallowed writes to tracked struct fields in assignments. +// It handles pointer and literal types, and writes to fields promoted through embedding. +// +// In the examples below, suppose A is mutation-protected and B is not mutation-protected. +// Suppose C is a struct which embeds A, and which is not mutation-protected. +// +// type A struct { +// FieldA int +// } +// type C struct { +// A +// FieldC int +// } +// +// You can only write to fields of non-mutation-protected structs: +// +// A.FieldA = 1 // not allowed +// B.SomeField = 1 // allowed +// +// When a mutation-protected struct is embedded, you can write to fields of the outer struct. +// You cannot write to fields defined on the inner struct that are promoted through embedding. +// +// C.FieldC = 1 // allowed +// C.FieldA = 1 // not allowed +func (p *PluginStructWrite) handleAssignStmt(assign *ast.AssignStmt, pass *analysis.Pass, file *ast.File) { + for i, lhs := range assign.Lhs { + // we are only concerned about assignments + selExpr, ok := lhs.(*ast.SelectorExpr) + if !ok { + continue + } + + named, found := p.containsTrackedStruct(selExpr, pass) + if !found { + continue + } + + funcDecl := findEnclosingFunc(file, assign.Pos()) + if funcDecl == nil || !p.constructorRegex.MatchString(funcDecl.Name.Name) { + pass.Reportf(assign.Lhs[i].Pos(), "write to %s field outside constructor: func=%s, named=%s", + named.Obj().Name(), funcNameOrEmpty(funcDecl), named.String()) + } + } +} + +// handleCompositeLit checks for disallowed literal construction of mutation-protected structs. +// +// In the examples below, suppose A is mutation-protected and B is not mutation-protected. +// In general, construction of empty instances of mutation-protected structs is allowed: +// +// x := new(A) // allowed +// var x A // allowed +// +// However, for simplicity, literal construction is disallowed even when no fields are specified: +// +// x := A{} // not allowed +// +// Additional examples: +// +// x := A{SomeField: 1} // not allowed +// x := B{} // allowed +// x := B{SomeField: 1} // allowed +// x := B{SomeField: A{}} // not allowed +func (p *PluginStructWrite) handleCompositeLit(lit *ast.CompositeLit, pass *analysis.Pass, file *ast.File) { + typ := pass.TypesInfo.Types[lit].Type + if typ == nil { + return + } + typ = deref(typ) + + named, ok := typ.(*types.Named) + if !ok { + return + } + + fullyQualified := named.String() + if !p.isMutationProtected(fullyQualified) { + return + } + + funcDecl := findEnclosingFunc(file, lit.Pos()) + if funcDecl == nil || !p.constructorRegex.MatchString(funcDecl.Name.Name) { + pass.Reportf(lit.Pos(), "construction of %s outside constructor", named.Obj().Name()) + } +} + +// isMutationProtected checks whether a fully qualified type is configured to be mutation-protected. +func (p *PluginStructWrite) isMutationProtected(fullyQualifiedTypeName string) bool { + p.mu.RLock() + defer p.mu.RUnlock() + _, ok := p.mutationProtected[fullyQualifiedTypeName] + return ok +} + +// setMutationProtected sets a fully qualified type as mutation-protected. +func (p *PluginStructWrite) setMutationProtected(fullyQualifiedTypeName string) { + p.mu.Lock() + defer p.mu.Unlock() + p.mutationProtected[fullyQualifiedTypeName] = struct{}{} +} + +// containsTrackedStruct checks whether the field accessed via selector expression belongs to a tracked struct, +// either directly or via embedding. +func (p *PluginStructWrite) containsTrackedStruct(selExpr *ast.SelectorExpr, pass *analysis.Pass) (*types.Named, bool) { + // Handle promoted fields (embedding) + if sel := pass.TypesInfo.Selections[selExpr]; sel != nil && sel.Kind() == types.FieldVal { + // Traverse the selector’s index path and detect writes *inside* a + // mutation‑protected type (including promoted/embedded sub‑fields). + indices := sel.Index() + typ := sel.Recv() + for i, idx := range indices { + structType, ok := deref(typ).Underlying().(*types.Struct) + if !ok || idx >= structType.NumFields() { + break // not a struct or invalid index—stop walking + } + field := structType.Field(idx) + typ = field.Type() // advance to the field’s type for the next hop + + if named, ok := deref(typ).(*types.Named); ok && p.isMutationProtected(named.String()) { + // If there are more indices *after* the protected type, the write is + // targeting one of its sub‑fields (or a promoted field) → forbid. + if i < len(indices)-1 { + return named, true // “write to NonWritable field outside constructor” + } + // Otherwise (i == last index) we’re assigning the protected value + // itself (e.g. w.NonWritableField = …) → allowed. + } + } + } + + // Fallback: direct access (non-promoted) + tv, ok := pass.TypesInfo.Types[selExpr.X] + if !ok { + return nil, false + } + + typ := deref(tv.Type) + named, ok := typ.(*types.Named) + if !ok { + return nil, false + } + fullyQualified := named.String() + if p.isMutationProtected(fullyQualified) { + return named, true + } + + return nil, false +} + +// findEnclosingFunc returns the enclosing function declaration for a given position. +func findEnclosingFunc(file *ast.File, pos token.Pos) *ast.FuncDecl { + for _, decl := range file.Decls { + if fn, ok := decl.(*ast.FuncDecl); ok && fn.Body != nil { + if fn.Body.Pos() <= pos && pos <= fn.Body.End() { + return fn + } + } + } + return nil +} + +// deref removes pointer indirection from a type if it is a pointer. +// Otherwise returns the input unchanged. +func deref(t types.Type) types.Type { + if ptr, ok := t.(*types.Pointer); ok { + return ptr.Elem() + } + return t +} + +// funcNameOrEmpty returns the function name or a fallback if nil. +func funcNameOrEmpty(fn *ast.FuncDecl) string { + if fn != nil { + return fn.Name.Name + } + return "(unknown)" +} diff --git a/tools/structwrite/structwrite_test.go b/tools/structwrite/structwrite_test.go new file mode 100644 index 00000000000..7229e665af1 --- /dev/null +++ b/tools/structwrite/structwrite_test.go @@ -0,0 +1,35 @@ +package structwrite + +import ( + "path/filepath" + "runtime" + "testing" + + "github.com/golangci/plugin-module-register/register" + "github.com/stretchr/testify/require" + "golang.org/x/tools/go/analysis/analysistest" +) + +func TestPlugin(t *testing.T) { + newPlugin, err := register.GetPlugin("structwrite") + require.NoError(t, err) + + plugin, err := newPlugin(Settings{}) + require.NoError(t, err) + + analyzers, err := plugin.BuildAnalyzers() + require.NoError(t, err) + + analysistest.Run(t, testdataDir(t), analyzers[0], "github.com/username/linttestmodule") +} + +func testdataDir(t *testing.T) string { + t.Helper() + + _, testFilename, _, ok := runtime.Caller(1) + if !ok { + require.Fail(t, "unable to get current test filename") + } + + return filepath.Join(filepath.Dir(testFilename), "testdata") +} diff --git a/tools/structwrite/testdata/src/github.com/username/linttestmodule/structwrite.go b/tools/structwrite/testdata/src/github.com/username/linttestmodule/structwrite.go new file mode 100644 index 00000000000..ff0a9477cd3 --- /dev/null +++ b/tools/structwrite/testdata/src/github.com/username/linttestmodule/structwrite.go @@ -0,0 +1,216 @@ +// Package linttestmodule is a collection of code representing test cases for the linter. +// See package golang.org/x/tools/go/analysis/analysistest for details. +package linttestmodule + +// NonWritable is configured for linting. +// +//structwrite:immutable +type NonWritable struct { + A int +} + +func NewNonWritable() *NonWritable { + return &NonWritable{ + A: 1, + } +} + +func NotConstructorButContainsNewString() *NonWritable { + return &NonWritable{A: 1} // want "construction of NonWritable outside constructor" +} + +func (nw *NonWritable) SetA() { + nw.A = 1 // want "write to NonWritable field outside constructor" +} + +func NonWritableConstructLiteral() { + nw := NonWritable{} // want "construction of NonWritable outside constructor" + nw = NonWritable{A: 1} // want "construction of NonWritable outside constructor" + nwp := &NonWritable{} // want "construction of NonWritable outside constructor" + nwp = &NonWritable{A: 1} // want "construction of NonWritable outside constructor" + nwp = new(NonWritable) + var nwnw NonWritable + _ = nw + _ = nwp + _ = nwnw +} + +func NonWritableSetALiteral() { + nw := NewNonWritable() + nw.A = 1 // want "write to NonWritable field outside constructor" + (*nw).A = 1 // want "write to NonWritable field outside constructor" + NewNonWritable().A = 1 // want "write to NonWritable field outside constructor" +} + +func NonWritableSetADoublePtr() { + nw := NewNonWritable() + nwp := &nw + (*nwp).A = 1 // want "write to NonWritable field outside constructor" +} + +func NonWritableSetWithinSlice() { + ptr := []*NonWritable{NewNonWritable()} + ptr[0].A = 1 // want "write to NonWritable field outside constructor" + lit := []NonWritable{*NewNonWritable()} + lit[0].A = 1 // want "write to NonWritable field outside constructor" +} + +func NonWritableSetWithinArray() { + ptr := [1]*NonWritable{NewNonWritable()} + ptr[0].A = 1 // want "write to NonWritable field outside constructor" + lit := [1]NonWritable{*NewNonWritable()} + lit[0].A = 1 // want "write to NonWritable field outside constructor" +} + +func NonWritableSetWithinMap() { + ptr := map[int]*NonWritable{1: NewNonWritable()} + ptr[1].A = 1 // want "write to NonWritable field outside constructor" + // writing to a non-pointer value in a map is a compile error +} + +func NonWritableSetWithinAnonymousFunc() { + nw := NewNonWritable() + fn := func() { + nw.A = 1 // want "write to NonWritable field outside constructor" + } + fn() +} + +// Writable is not configured for linting. +type Writable struct { + A int +} + +func NewWritable() Writable { + return Writable{ + A: 1, + } +} + +func (w Writable) SetA() { + w.A = 1 +} + +// EmbedsNonWritable is not configured for linting, but embeds a type that is. +type EmbedsNonWritable struct { + NonWritable + B int +} + +func NewEmbedsNonWritable() EmbedsNonWritable { + return EmbedsNonWritable{ + NonWritable: *NewNonWritable(), + } +} + +func (w *EmbedsNonWritable) SetA() { + // disallowed because A is promoted by embedding from a mutation-protected type + w.A = 1 // want "write to NonWritable field outside constructor" +} + +func (w *EmbedsNonWritable) SetB() { + // allowed because B is not promoted from the embedded mutation-protected type + w.B = 1 +} + +func EmbedsWritableConstructLiteral() { + nw := EmbedsNonWritable{} + nw = EmbedsNonWritable{B: 2} + nw = EmbedsNonWritable{NonWritable: NonWritable{A: 1}} // want "construction of NonWritable outside constructor" + nw = EmbedsNonWritable{B: 2, NonWritable: NonWritable{A: 1}} // want "construction of NonWritable outside constructor" + nwp := &EmbedsNonWritable{} + nwp = &EmbedsNonWritable{B: 2} + nwp = &EmbedsNonWritable{NonWritable: NonWritable{A: 1}} // want "construction of NonWritable outside constructor" + _ = nw + _ = nwp +} + +// ContainsNonWritableField is not configured for linting, but has a field which is mutation-protected. +type ContainsNonWritableField struct { + NonWritableField NonWritable + B int +} + +func (w *ContainsNonWritableField) SetA() { + // disallowed because we are writing the mutation-protected type + w.NonWritableField.A = 1 // want "write to NonWritable field outside constructor" +} + +// SetNonWritableField sets a field which has a mutation-protected type, but is a field +// of a non-mutation-protected type. This is allowed. +func (w *ContainsNonWritableField) SetNonWritableField() { + // allowed because we are not mutating the mutation-protected type (we are mutating ContainsNonWritableField) + w.NonWritableField = *NewNonWritable() +} + +func (w *ContainsNonWritableField) SetB() { + // allowed because B is not promoted from the embedded mutation-protected type + w.B = 1 +} + +func ContainsNonWritableFieldConstructLiteral() { + nw := ContainsNonWritableField{} + nw = ContainsNonWritableField{B: 2} + nw = ContainsNonWritableField{NonWritableField: NonWritable{A: 1}} // want "construction of NonWritable outside constructor" + nw = ContainsNonWritableField{B: 2, NonWritableField: NonWritable{A: 1}} // want "construction of NonWritable outside constructor" + nwp := &ContainsNonWritableField{} + nwp = &ContainsNonWritableField{B: 2} + nwp = &ContainsNonWritableField{NonWritableField: NonWritable{A: 1}} // want "construction of NonWritable outside constructor" + _ = nw + _ = nwp +} + +// ContainsDeeplyNestedNonWritableField is not configured for linting, but has a (deeply nested) field which is mutation-protected. +type ContainsDeeplyNestedNonWritableField struct { + DeeplyNestedNonWritableField struct { + L1 struct { + NonWritableField NonWritable + } + } + B int +} + +// SetNonWritableField sets a field which has a mutation-protected type, but is a field +// of a non-mutation-protected type. This is allowed. +func (w *ContainsDeeplyNestedNonWritableField) SetNonWritableField() { + // allowed because we are not mutating the mutation-protected type (we are mutating ContainsNonWritableField) + w.DeeplyNestedNonWritableField.L1.NonWritableField = *NewNonWritable() + // can set the middle nested layer for the same reason + w.DeeplyNestedNonWritableField.L1 = struct { + NonWritableField NonWritable + }{ + NonWritableField: *NewNonWritable(), + } +} + +func (w *ContainsDeeplyNestedNonWritableField) SetB() { + // allowed because B is not promoted from the embedded mutation-protected type + w.B = 1 +} + +func ContainsDeeplyNestedNonWritableFieldConstructLiteral() { + nw := ContainsDeeplyNestedNonWritableField{} + nw = ContainsDeeplyNestedNonWritableField{B: 2} + nw = ContainsDeeplyNestedNonWritableField{ + DeeplyNestedNonWritableField: struct { + L1 struct{ NonWritableField NonWritable } + }{L1: struct { + NonWritableField NonWritable + }{ + NonWritableField: NonWritable{A: 1}, // want "construction of NonWritable outside constructor" + }}, + } + nwp := &ContainsDeeplyNestedNonWritableField{} + nwp = &ContainsDeeplyNestedNonWritableField{B: 2} + nwp = &ContainsDeeplyNestedNonWritableField{ + DeeplyNestedNonWritableField: struct { + L1 struct{ NonWritableField NonWritable } + }{L1: struct { + NonWritableField NonWritable + }{ + NonWritableField: NonWritable{A: 1}, // want "construction of NonWritable outside constructor" + }}, + } + _ = nw + _ = nwp +} diff --git a/tools/structwrite/testdata/src/github.com/username/linttestmodule/subpkg/subpkg.go b/tools/structwrite/testdata/src/github.com/username/linttestmodule/subpkg/subpkg.go new file mode 100644 index 00000000000..0a1d784d467 --- /dev/null +++ b/tools/structwrite/testdata/src/github.com/username/linttestmodule/subpkg/subpkg.go @@ -0,0 +1,35 @@ +package subpkg + +// NonWritable has the same type name as NonWritable in the root package. +// The test configuration should specify only the root type. +// This type exists to check that types are checked by fully qualified name. +// +//structwrite:immutable +type NonWritable struct { + A int +} + +func NewNonWritable() NonWritable { + return NonWritable{ + A: 1, + } +} + +func (nw *NonWritable) SetA() { + nw.A = 1 +} + +// NonWritableInSubpackage is configured for linting. +type NonWritableInSubpackage struct { + A int +} + +func NewNonWritableInSubpackage() NonWritableInSubpackage { + return NonWritableInSubpackage{ + A: 1, + } +} + +func (nw *NonWritableInSubpackage) SetA() { + nw.A = 1 // want "write to NonWritable field outside constructor" +} diff --git a/tools/test_matrix_generator/default-test-matrix-config.json b/tools/test_matrix_generator/default-test-matrix-config.json new file mode 100644 index 00000000000..a62ac6aec5a --- /dev/null +++ b/tools/test_matrix_generator/default-test-matrix-config.json @@ -0,0 +1,33 @@ +{ + "includeOthers": true, + "packages": [ + {"name": "admin"}, + {"name": "cmd"}, + {"name": "consensus"}, + {"name": "fvm"}, + {"name": "ledger"}, + {"name": "state"}, + {"name": "storage"}, + {"name": "utils"}, + {"name": "engine", "runner": "buildjet-4vcpu-ubuntu-2004" ,"subpackages": [ + {"name": "engine/access"}, + {"name": "engine/collection"}, + {"name": "engine/common"}, + {"name": "engine/consensus"}, + {"name": "engine/execution/computation"}, + {"name": "engine/execution"}, + {"name": "engine/verification"}, + {"name": "engine/execution/ingestion", "runner": "buildjet-8vcpu-ubuntu-2004"} + ]}, + {"name": "module", "runner": "buildjet-4vcpu-ubuntu-2004" ,"subpackages": [{"name": "module/dkg"}]}, + {"name": "network", "subpackages": [ + {"name": "network/alsp"}, + {"name": "network/p2p/connection"}, + {"name": "network/p2p/scoring"}, + {"name": "network/p2p", "runner": "buildjet-16vcpu-ubuntu-2004"}, + {"name": "network/test/cohort1", "runner": "buildjet-16vcpu-ubuntu-2004"}, + {"name": "network/test/cohort2", "runner": "buildjet-4vcpu-ubuntu-2004"}, + {"name": "network/p2p/node", "runner": "buildjet-4vcpu-ubuntu-2004"} + ]} + ] +} diff --git a/tools/test_matrix_generator/insecure-module-test-matrix-config.json b/tools/test_matrix_generator/insecure-module-test-matrix-config.json new file mode 100644 index 00000000000..59e7aa6ecb0 --- /dev/null +++ b/tools/test_matrix_generator/insecure-module-test-matrix-config.json @@ -0,0 +1,10 @@ +{ + "packagesPath": "./insecure", + "includeOthers": false, + "packages": [ + {"name": "insecure", "runner": "buildjet-4vcpu-ubuntu-2004" ,"subpackages": [ + {"name": "insecure/integration/functional/test/gossipsub/rpc_inspector", "runner": "buildjet-8vcpu-ubuntu-2004"}, + {"name": "insecure/integration/functional/test/gossipsub/scoring", "runner": "buildjet-8vcpu-ubuntu-2004"} + ]} + ] +} diff --git a/tools/test_matrix_generator/integration-module-test-matrix-config.json b/tools/test_matrix_generator/integration-module-test-matrix-config.json new file mode 100644 index 00000000000..379ee6ab64e --- /dev/null +++ b/tools/test_matrix_generator/integration-module-test-matrix-config.json @@ -0,0 +1,9 @@ +{ + "packagesPath": "./integration", + "includeOthers": false, + "packages": [{ + "name": "integration", + "runner": "buildjet-4vcpu-ubuntu-2004", + "exclude": ["integration/tests"] + }] +} diff --git a/tools/test_matrix_generator/matrix.go b/tools/test_matrix_generator/matrix.go new file mode 100644 index 00000000000..1fea6492ef5 --- /dev/null +++ b/tools/test_matrix_generator/matrix.go @@ -0,0 +1,230 @@ +package main + +import ( + "bytes" + _ "embed" + "encoding/json" + "fmt" + "strings" + + "github.com/spf13/pflag" + "golang.org/x/tools/go/packages" +) + +var ( + //go:embed default-test-matrix-config.json + defaultTestMatrixConfig string + + //go:embed insecure-module-test-matrix-config.json + insecureModuleTestMatrixConfig string + + //go:embed integration-module-test-matrix-config.json + integrationModuleTestMatrixConfig string + + matrixConfigFile string +) + +const ( + flowPackagePrefix = "github.com/onflow/flow-go/" + ciMatrixName = "dynamicMatrix" + defaultCIRunner = "ubuntu-latest" +) + +// flowGoPackage configuration for a package to be tested. +type flowGoPackage struct { + // Name the name of the package where test are located. + Name string `json:"name"` + // Runner the runner used for the top level github actions job that runs the tests all the tests in the parent package. + Runner string `json:"runner,omitempty"` + // Exclude list of packages to exclude from top level parent package test matrix. + Exclude []string `json:"exclude,omitempty"` + // Subpackages list of subpackages of the parent package that should be run in their own github actions job. + Subpackages []*subpackage `json:"subpackages,omitempty"` +} + +// subpackage configuration for a subpackage. +type subpackage struct { + Name string `json:"name"` + Runner string `json:"runner,omitempty"` +} + +// config the test matrix configuration for a package. +type config struct { + // PackagesPath director where to load packages from. + PackagesPath string `json:"packagesPath,omitempty"` + // IncludeOthers when set to true will put all packages and subpackages of the packages path into a test matrix that will run in a job called others. + IncludeOthers bool `json:"includeOthers,omitempty"` + // Packages configurations for all packages that test should be run from. + Packages []*flowGoPackage `json:"packages"` +} + +// testMatrix represents a single GitHub Actions test matrix combination that consists of a name and a list of flow-go packages associated with that name. +type testMatrix struct { + Name string `json:"name"` + Packages string `json:"packages"` + Runner string `json:"runner"` +} + +// newTestMatrix returns a new testMatrix, if runner is empty "" set the runner to the defaultCIRunner. +func newTestMatrix(name, runner, pkgs string) *testMatrix { + t := &testMatrix{ + Name: name, + Packages: pkgs, + Runner: runner, + } + + if t.Runner == "" { + t.Runner = defaultCIRunner + } + + return t +} + +// Generates a list of packages to test that will be passed to GitHub Actions +func main() { + pflag.Parse() + + var configFile string + switch matrixConfigFile { + case "insecure": + configFile = insecureModuleTestMatrixConfig + case "integration": + configFile = integrationModuleTestMatrixConfig + default: + configFile = defaultTestMatrixConfig + } + + packageConfig := loadPackagesConfig(configFile) + + testMatrices := buildTestMatrices(packageConfig, listAllFlowPackages) + printCIString(testMatrices) +} + +// printCIString encodes the test matrices and prints the json string to stdout. The CI runner will read this json string +// and make the data available for our github workflows. +func printCIString(testMatrices []*testMatrix) { + // generate JSON output that will be read in by CI matrix + // can't use json.MarshalIndent because fromJSON() in CI can’t read JSON with any spaces + b, err := json.Marshal(testMatrices) + if err != nil { + panic(fmt.Errorf("failed to marshal test matrices json: %w", err)) + } + // this string will be read by CI to generate groups of tests to run in separate CI jobs + testMatrixStr := "::set-output name=" + ciMatrixName + "::" + string(b) + // very important to add newline character at the end of the compacted JSON - otherwise fromJSON() in CI will throw unmarshalling error + fmt.Println(testMatrixStr) +} + +// buildTestMatrices builds the test matrices. +func buildTestMatrices(packageConfig *config, flowPackages func(dir string) []*packages.Package) []*testMatrix { + testMatrices := make([]*testMatrix, 0) + seenPaths := make(map[string]struct{}) + seenPath := func(p string) { + seenPaths[p] = struct{}{} + } + seen := func(p string) bool { + _, seen := seenPaths[p] + return seen + } + + for _, topLevelPkg := range packageConfig.Packages { + allPackages := flowPackages(topLevelPkg.Name) + // first build test matrix for each of the subpackages and mark all complete paths seen + subPkgMatrices := processSubpackages(topLevelPkg.Subpackages, allPackages, seenPath) + testMatrices = append(testMatrices, subPkgMatrices...) + // now build top level test matrix + topLevelTestMatrix := processTopLevelPackage(topLevelPkg, allPackages, seenPath, seen) + testMatrices = append(testMatrices, topLevelTestMatrix) + } + + // any packages left out of the explicit Packages field will be run together as "others" from the config PackagesPath + if packageConfig.IncludeOthers { + allPkgs := flowPackages(packageConfig.PackagesPath) + if othersTestMatrix := buildOthersTestMatrix(allPkgs, seen); othersTestMatrix != nil { + testMatrices = append(testMatrices, othersTestMatrix) + } + } + return testMatrices +} + +// processSubpackages creates a test matrix for all subpackages provided. +func processSubpackages(subPkgs []*subpackage, allPkgs []*packages.Package, seenPath func(p string)) []*testMatrix { + testMatrices := make([]*testMatrix, 0) + for _, subPkg := range subPkgs { + pkgPath := fullGoPackagePath(subPkg.Name) + // this is the list of allPackages that used with the go test command + var testPkgStrBuilder strings.Builder + for _, p := range allPkgs { + if strings.HasPrefix(p.PkgPath, pkgPath) { + testPkgStrBuilder.WriteString(fmt.Sprintf("%s ", p.PkgPath)) + seenPath(p.PkgPath) + } + } + testMatrices = append(testMatrices, newTestMatrix(subPkg.Name, subPkg.Runner, testPkgStrBuilder.String())) + } + return testMatrices +} + +// processTopLevelPackage creates test matrix for the top level package excluding any packages from the exclude list. +func processTopLevelPackage(pkg *flowGoPackage, allPkgs []*packages.Package, seenPath func(p string), seen func(p string) bool) *testMatrix { + var topLevelTestPkgStrBuilder strings.Builder + for _, p := range allPkgs { + if !seen(p.PkgPath) { + includePkg := true + for _, exclude := range pkg.Exclude { + if strings.HasPrefix(p.PkgPath, fullGoPackagePath(exclude)) { + includePkg = false + } + } + + if includePkg && strings.HasPrefix(p.PkgPath, fullGoPackagePath(pkg.Name)) { + topLevelTestPkgStrBuilder.WriteString(fmt.Sprintf("%s ", p.PkgPath)) + seenPath(p.PkgPath) + } + } + } + return newTestMatrix(pkg.Name, pkg.Runner, topLevelTestPkgStrBuilder.String()) +} + +// buildOthersTestMatrix builds an others test matrix that includes all packages in a path not explicitly set in the packages list of a config. +func buildOthersTestMatrix(allPkgs []*packages.Package, seen func(p string) bool) *testMatrix { + var othersTestPkgStrBuilder strings.Builder + for _, otherPkg := range allPkgs { + if !seen(otherPkg.PkgPath) { + othersTestPkgStrBuilder.WriteString(fmt.Sprintf("%s ", otherPkg.PkgPath)) + } + } + + if othersTestPkgStrBuilder.Len() > 0 { + return newTestMatrix("others", "", othersTestPkgStrBuilder.String()) + } + + return nil +} + +func listAllFlowPackages(dir string) []*packages.Package { + flowPackages, err := packages.Load(&packages.Config{Dir: dir}, "./...") + if err != nil { + panic(err) + } + return flowPackages +} + +func loadPackagesConfig(configFile string) *config { + var packageConfig config + buf := bytes.NewBufferString(configFile) + err := json.NewDecoder(buf).Decode(&packageConfig) + if err != nil { + panic(fmt.Errorf("failed to decode package config json %w: %s", err, configFile)) + } + return &packageConfig +} + +func fullGoPackagePath(pkg string) string { + return fmt.Sprintf("%s%s", flowPackagePrefix, pkg) +} + +func init() { + // Add flags to the FlagSet + pflag.StringVarP(&matrixConfigFile, "config", "c", "", "the config file used to generate the test matrix") +} diff --git a/tools/test_matrix_generator/matrix_test.go b/tools/test_matrix_generator/matrix_test.go new file mode 100644 index 00000000000..a155b541f62 --- /dev/null +++ b/tools/test_matrix_generator/matrix_test.go @@ -0,0 +1,161 @@ +package main + +import ( + "fmt" + "testing" + + "github.com/stretchr/testify/require" + "golang.org/x/tools/go/packages" +) + +// TestLoadPackagesConfig ensure packages config json loads as expected. +func TestLoadPackagesConfig(t *testing.T) { + configFile := `{"packagesPath": ".", "includeOthers": true, "packages": [{"name": "testPackage"}]}` + config := loadPackagesConfig(configFile) + if config.PackagesPath != "." || !config.IncludeOthers || len(config.Packages) != 1 { + t.Errorf("loadPackagesConfig failed for valid input") + } + + invalidConfigFile := "invalidJSON" + defer func() { + if recover() == nil { + t.Errorf("loadPackagesConfig did not panic for invalid JSON input") + } + }() + loadPackagesConfig(invalidConfigFile) +} + +// TestBuildMatrices ensures test matrices are built from config json as expected. +func TestBuildMatrices(t *testing.T) { + t.Run("top level package only default runner", func(t *testing.T) { + name := "counter" + configFile := fmt.Sprintf(`{"packagesPath": ".", "includeOthers": true, "packages": [{"name": "%s"}]}`, name) + allPackges := goPackageFixture("counter/count", "counter/print/int", "counter/log") + cfg := loadPackagesConfig(configFile) + matrices := buildTestMatrices(cfg, func(dir string) []*packages.Package { + return allPackges + }) + require.Equal(t, name, matrices[0].Name) + require.Equal(t, defaultCIRunner, matrices[0].Runner) + require.Equal(t, fmt.Sprintf("%s %s %s ", allPackges[0].PkgPath, allPackges[1].PkgPath, allPackges[2].PkgPath), matrices[0].Packages) + fmt.Println(matrices[0].Name, matrices[0].Runner, matrices[0].Packages) + }) + t.Run("top level package only override runner", func(t *testing.T) { + name := "counter" + runner := "buildjet-4vcpu-ubuntu-2204" + configFile := fmt.Sprintf(`{"packagesPath": ".", "packages": [{"name": "%s", "runner": "%s"}]}`, name, runner) + allPackges := goPackageFixture("counter/count", "counter/print/int", "counter/log") + cfg := loadPackagesConfig(configFile) + matrices := buildTestMatrices(cfg, func(dir string) []*packages.Package { + return allPackges + }) + require.Equal(t, name, matrices[0].Name) + require.Equal(t, runner, matrices[0].Runner) + require.Equal(t, fmt.Sprintf("%s %s %s ", allPackges[0].PkgPath, allPackges[1].PkgPath, allPackges[2].PkgPath), matrices[0].Packages) + }) + t.Run("top level package with sub packages include others", func(t *testing.T) { + topLevelPkgName := "network" + subPkg1 := "network/p2p/node" + subPkg2 := "module/chunks" + subPkg3 := "crypto/hash" + subPkg4 := "model/bootstrap" + subPkg1Runner := "buildjet-4vcpu-ubuntu-2204" + configFile := fmt.Sprintf(` + {"packagesPath": ".", "includeOthers": true, "packages": [{"name": "%s", "subpackages": [{"name": "%s", "runner": "%s"}, {"name": "%s"}, {"name": "%s"}, {"name": "%s"}]}]}`, + topLevelPkgName, subPkg1, subPkg1Runner, subPkg2, subPkg3, subPkg4) + allPackges := goPackageFixture( + "network", + "network/alsp", + "network/cache", + "network/channels", + "network/p2p/node", + "network/p2p/node/internal", + "module", + "module/chunks/chunky", + "crypto/hash", + "crypto/random", + "crypto/hash/ecc", + "model/bootstrap", + "model/bootstrap/info", + "model", + ) + cfg := loadPackagesConfig(configFile) + matrices := buildTestMatrices(cfg, func(dir string) []*packages.Package { + return allPackges + }) + require.Len(t, matrices, 6) + for _, matrix := range matrices { + switch matrix.Name { + case topLevelPkgName: + require.Equal(t, defaultCIRunner, matrix.Runner) + require.Equal(t, fmt.Sprintf("%s %s %s %s ", allPackges[0].PkgPath, allPackges[1].PkgPath, allPackges[2].PkgPath, allPackges[3].PkgPath), matrix.Packages) + case subPkg1: + require.Equal(t, subPkg1Runner, matrix.Runner) + require.Equal(t, fmt.Sprintf("%s %s ", allPackges[4].PkgPath, allPackges[5].PkgPath), matrix.Packages) + case subPkg2: + require.Equal(t, defaultCIRunner, matrix.Runner) + require.Equal(t, fmt.Sprintf("%s ", allPackges[7].PkgPath), matrix.Packages) + case subPkg3: + require.Equal(t, defaultCIRunner, matrix.Runner) + require.Equal(t, fmt.Sprintf("%s %s ", allPackges[8].PkgPath, allPackges[10].PkgPath), matrix.Packages) + case subPkg4: + require.Equal(t, defaultCIRunner, matrix.Runner) + require.Equal(t, fmt.Sprintf("%s %s ", allPackges[11].PkgPath, allPackges[12].PkgPath), matrix.Packages) + case "others": + require.Equal(t, defaultCIRunner, matrix.Runner) + require.Equal(t, fmt.Sprintf("%s %s %s ", allPackges[6].PkgPath, allPackges[9].PkgPath, allPackges[13].PkgPath), matrix.Packages) + default: + require.Fail(t, fmt.Sprintf("unexpected matrix name: %s", matrix.Name)) + } + } + }) + t.Run("top level package with sub packages and exclude", func(t *testing.T) { + topLevelPkgName := "network" + subPkg1 := "network/p2p/node" + subPkg1Runner := "buildjet-4vcpu-ubuntu-2204" + configFile := fmt.Sprintf(` + {"packagesPath": ".", "packages": [{"name": "%s", "exclude": ["network/alsp"], "subpackages": [{"name": "%s", "runner": "%s"}]}]}`, + topLevelPkgName, subPkg1, subPkg1Runner) + allPackges := goPackageFixture( + "network", + "network/alsp", + "network/cache", + "network/channels", + "network/p2p/node", + "network/p2p/node/internal", + "module", + "module/chunks/chunky", + "crypto/hash", + "crypto/random", + "crypto/hash/ecc", + "model/bootstrap", + "model/bootstrap/info", + "model", + ) + cfg := loadPackagesConfig(configFile) + matrices := buildTestMatrices(cfg, func(dir string) []*packages.Package { + return allPackges + }) + require.Len(t, matrices, 2) + for _, matrix := range matrices { + switch matrix.Name { + case topLevelPkgName: + require.Equal(t, defaultCIRunner, matrix.Runner) + require.Equal(t, fmt.Sprintf("%s %s %s ", allPackges[0].PkgPath, allPackges[2].PkgPath, allPackges[3].PkgPath), matrix.Packages) + case subPkg1: + require.Equal(t, subPkg1Runner, matrix.Runner) + require.Equal(t, fmt.Sprintf("%s %s ", allPackges[4].PkgPath, allPackges[5].PkgPath), matrix.Packages) + default: + require.Fail(t, fmt.Sprintf("unexpected matrix name: %s", matrix.Name)) + } + } + }) +} + +func goPackageFixture(pkgs ...string) []*packages.Package { + goPkgs := make([]*packages.Package, len(pkgs)) + for i, pkg := range pkgs { + goPkgs[i] = &packages.Package{PkgPath: fullGoPackagePath(pkg)} + } + return goPkgs +} diff --git a/tools/test_monitor/common/testdata/test_data.go b/tools/test_monitor/common/testdata/test_data.go index 728184f5b63..f324427b702 100644 --- a/tools/test_monitor/common/testdata/test_data.go +++ b/tools/test_monitor/common/testdata/test_data.go @@ -9,7 +9,7 @@ import ( const COMMIT_DATE = "2021-09-21T18:06:25-07:00" const COMMIT_SHA = "46baf6c6be29af9c040bc14195e195848598bbae" const JOB_STARTED = "2021-09-21T21:06:25-07:00" -const CRYPTO_HASH_PACKAGE = "github.com/onflow/flow-go/crypto/hash" +const CRYPTO_HASH_PACKAGE = "github.com/onflow/crypto/hash" const RUN_ID = "12345" // Level1TestData is used by tests to store what the expected test result should be and what the raw @@ -225,7 +225,7 @@ func GetTestData_Level1_1Count1FailRestPass() common.Level1Summary { } } -// GetTestData_Level1_1CountAllPass represents a level 1 summary (as exptected output from level 1 parser) +// GetTestData_Level1_1CountAllPass represents a level 1 summary (as expected output from level 1 parser) // with multiple passed tests, count=1. func GetTestData_Level1_1CountAllPass() common.Level1Summary { return []common.Level1TestResult{ diff --git a/tools/test_monitor/level1/process_summary1_results_test.go b/tools/test_monitor/level1/process_summary1_results_test.go index c64f8442995..6e7b12f0551 100644 --- a/tools/test_monitor/level1/process_summary1_results_test.go +++ b/tools/test_monitor/level1/process_summary1_results_test.go @@ -33,19 +33,19 @@ func TestGenerateLevel1Summary_Struct(t *testing.T) { RawJSONTestRunFile: "test-result-crypto-hash-1-count-skip-pass.json", }, - // raw results generated with: go test -json -count 1 --tags relic ./utils/unittest/... + // raw results generated with: go test -json -count 1 ./utils/unittest/... "2 count all pass": { ExpectedLevel1Summary: testdata.GetTestData_Level1_2CountPass(), RawJSONTestRunFile: "test-result-crypto-hash-2-count-pass.json", }, - // raw results generated with: go test -json -count 1 --tags relic ./utils/unittest/... + // raw results generated with: go test -json -count 1 ./utils/unittest/... "10 count all pass": { ExpectedLevel1Summary: testdata.GetTestData_Level1_10CountPass(), RawJSONTestRunFile: "test-result-crypto-hash-10-count-pass.json", }, - // raw results generated with: go test -json -count 1 --tags relic ./utils/unittest/... + // raw results generated with: go test -json -count 1 ./utils/unittest/... "10 count some failures": { ExpectedLevel1Summary: testdata.GetTestData_Level1_10CountSomeFailures(), RawJSONTestRunFile: "test-result-crypto-hash-10-count-fail.json", @@ -54,14 +54,14 @@ func TestGenerateLevel1Summary_Struct(t *testing.T) { // no result tests - tests below don't generate pass/fail result due to `go test` bug // with using `fmt.printf("log message")` without newline `\n` - // raw results generated with: go test -v -tags relic -count=1 -json ./model/encodable/. -test.run TestEncodableRandomBeaconPrivKeyMsgPack + // raw results generated with: go test -v -count=1 -json ./model/encodable/. -test.run TestEncodableRandomBeaconPrivKeyMsgPack // this is a single unit test that produces a no result "1 count single no result test": { ExpectedLevel1Summary: testdata.GetTestData_Level1_1CountSingleExceptionTest(), RawJSONTestRunFile: "test-result-exception-single-1-count-pass.json", }, - //raw results generated with: go test -v -tags relic -count=5 -json ./model/encodable/. -test.run TestEncodableRandomBeaconPrivKeyMsgPack + //raw results generated with: go test -v -count=5 -json ./model/encodable/. -test.run TestEncodableRandomBeaconPrivKeyMsgPack //multiple no result tests in a row "5 no result tests in a row": { ExpectedLevel1Summary: testdata.GetTestData_Level1_5CountSingleExceptionTest(), @@ -74,7 +74,7 @@ func TestGenerateLevel1Summary_Struct(t *testing.T) { RawJSONTestRunFile: "test-result-exception-single-5-count-4-nil-1-normal-pass.json", }, - // raw results generated with: go test -v -tags relic -count=3 -json ./model/encodable/. + // raw results generated with: go test -v -count=3 -json ./model/encodable/. // group of unit tests with a single no result test "3 count no result test with normal tests": { ExpectedLevel1Summary: testdata.GetTestData_Leve1_3CountExceptionWithNormalTests(), diff --git a/tools/test_monitor/level2/process_summary2_results.go b/tools/test_monitor/level2/process_summary2_results.go index a8491ac5853..8d40c2cb711 100644 --- a/tools/test_monitor/level2/process_summary2_results.go +++ b/tools/test_monitor/level2/process_summary2_results.go @@ -107,8 +107,8 @@ func saveExceptionMessage(testResult common.Level1TestResult) { // for each failed / exception test, we want to save the raw output message as a text file // there could be multiple failures / exceptions of the same test, so we want to save each failed / exception message in a separate text file // each test with failures / exceptions will have a uniquely named (based on test name and package) subdirectory where failed / exception messages are saved -// e.g. "failures/TestSanitySha3_256+github.com-onflow-flow-go-crypto-hash" will store failed messages text files -// from test TestSanitySha3_256 from the "github.com/onflow/flow-go/crypto/hash" package +// e.g. "failures/TestSanitySha3_256+github.com-onflow-crypto-hash" will store failed messages text files +// from test TestSanitySha3_256 from the "github.com/onflow/crypto/hash" package // failure and exception messages are saved in a similar way so this helper function // handles saving both types of messages func saveMessageHelper(testResult common.Level1TestResult, messagesDir string, messageFileStem string) { diff --git a/tools/test_monitor/level2/process_summary2_results_test.go b/tools/test_monitor/level2/process_summary2_results_test.go index cd5e981c4d2..df9c731a172 100644 --- a/tools/test_monitor/level2/process_summary2_results_test.go +++ b/tools/test_monitor/level2/process_summary2_results_test.go @@ -104,11 +104,11 @@ const actualExceptionMessagesPath = "./exceptions" func deleteMessagesDir(t *testing.T) { // delete failure test dir that stores failure messages err := os.RemoveAll(actualFailureMessagesPath) - require.Nil(t, err) + require.NoError(t, err) // delete exceptions test dir that stores exception messages err = os.RemoveAll(actualExceptionMessagesPath) - require.Nil(t, err) + require.NoError(t, err) } func checkLevel2Summary(t *testing.T, actualLevel2Summary common.Level2Summary, testData testdata.Level2TestData) { @@ -119,10 +119,10 @@ func checkLevel2Summary(t *testing.T, actualLevel2Summary common.Level2Summary, // read in expected summary level 2 var expectedLevel2Summary common.Level2Summary expectedLevel2SummaryJsonBytes, err := os.ReadFile(expectedOutputTestDataPath) - require.Nil(t, err) + require.NoError(t, err) require.NotEmpty(t, expectedLevel2SummaryJsonBytes) err = json.Unmarshal(expectedLevel2SummaryJsonBytes, &expectedLevel2Summary) - require.Nil(t, err) + require.NoError(t, err) require.Equal(t, len(expectedLevel2Summary.TestResultsMap), len(actualLevel2Summary.TestResultsMap)) @@ -180,11 +180,11 @@ func checkMessagesHelper(t *testing.T, expectedMessagesPath string, actualMessag // count expected failure / exception directories (1 directory/test) expectedMessageDirs, err := os.ReadDir(expectedMessagesPath) - require.Nil(t, err) + require.NoError(t, err) // count actual failure / exception directories actualMessageDirs, err := os.ReadDir(actualMessagesPath) - require.Nil(t, err) + require.NoError(t, err) // expected test summary has at least 1 failure / exception require.Equal(t, len(expectedMessageDirs), len(actualMessageDirs)) @@ -198,10 +198,10 @@ func checkMessagesHelper(t *testing.T, expectedMessagesPath string, actualMessag // under each sub-directory, there should be 1 or more text files (failure1.txt/exception1.txt, failure2.txt/exception2.txt, etc) // that holds the raw failure / exception message for that test expectedMessagesDirFiles, err := os.ReadDir(filepath.Join(expectedMessagesPath, expectedMessageDir.Name())) - require.Nil(t, err) + require.NoError(t, err) actualMessageDirFiles, err := os.ReadDir(filepath.Join(actualMessagesPath, actualMessageDirs[expectedMessageDirIndex].Name())) - require.Nil(t, err) + require.NoError(t, err) // make sure there are the expected number of failed / exception text files in the sub-directory require.Equal(t, len(expectedMessagesDirFiles), len(actualMessageDirFiles)) @@ -215,11 +215,11 @@ func checkMessagesHelper(t *testing.T, expectedMessagesPath string, actualMessag for expectedMessageFileIndex, expectedMessageFileDirEntry := range expectedMessagesDirFiles { expectedMessageFilePath := filepath.Join(expectedMessagesPath, expectedMessageDir.Name(), expectedMessageFileDirEntry.Name()) expectedMessageFileBytes, err := os.ReadFile(expectedMessageFilePath) - require.Nil(t, err) + require.NoError(t, err) actualMessageFilePath := filepath.Join(actualMessagesPath, actualMessageDirs[expectedMessageDirIndex].Name(), actualMessageDirFiles[expectedMessageFileIndex].Name()) actualMessageFileBytes, err := os.ReadFile(actualMessageFilePath) - require.Nil(t, err) + require.NoError(t, err) // read expected and actual text files as bytes and compare them all at once require.Equal(t, expectedMessageFileBytes, actualMessageFileBytes) diff --git a/tools/test_monitor/level3/process_summary3_results_test.go b/tools/test_monitor/level3/process_summary3_results_test.go index 9849efa2a7b..feb19a7a4be 100644 --- a/tools/test_monitor/level3/process_summary3_results_test.go +++ b/tools/test_monitor/level3/process_summary3_results_test.go @@ -45,10 +45,10 @@ func runGenerateLevel3Summary(t *testing.T, testDir string) { // read in expected summary level 3 var expectedTestSummary3 common.Level3Summary expectedTestSummary3JsonBytes, err := os.ReadFile(expectedOutputTestDataPath) - require.Nil(t, err) + require.NoError(t, err) require.NotEmpty(t, expectedTestSummary3JsonBytes) err = json.Unmarshal(expectedTestSummary3JsonBytes, &expectedTestSummary3) - require.Nil(t, err) + require.NoError(t, err) // check all details of test summary level 2 between expected and actual diff --git a/tools/test_monitor/run-tests.sh b/tools/test_monitor/run-tests.sh deleted file mode 100755 index 0cbf1383b19..00000000000 --- a/tools/test_monitor/run-tests.sh +++ /dev/null @@ -1,63 +0,0 @@ -#!/usr/bin/env bash - -# This script runs the tests in the category specified by the TEST_CATEGORY environment variable. -# Echo / logging statements send output to standard error to separate that output from test result output -# (which sends output to standard output) which needs to be parsed. - -set -e -shopt -s extglob - -echo "test category (run-tests):" $TEST_CATEGORY>&2 - -# run tests and process results - -if [[ $TEST_CATEGORY =~ integration-(bft|ghost|mvp|network|epochs|access|collection|consensus|execution|verification)$ ]] -then - echo "killing and removing orphaned containers from previous run">&2 - # kill and remove orphaned containers from previous run - containers=$(docker ps -a -q) - - if [ ! -z "$containers" ] - then - docker rm -f $containers > /dev/null - fi - - echo "preparing $TEST_CATEGORY tests">&2 - make crypto_setup_gopath - make docker-build-flow docker-build-flow-corrupt - echo "running $TEST_CATEGORY tests">&2 - make -C integration -s ${BASH_REMATCH[1]}-tests > test-output -else - case $TEST_CATEGORY in - unit) - echo "preparing unit tests">&2 - make install-tools - make verify-mocks - echo "running unit tests">&2 - make -s unittest-main > test-output - ;; - unit-crypto) - echo "preparing crypto unit tests">&2 - make -C crypto setup - echo "running crypto unit tests">&2 - make -C crypto -s unittest > test-output - ;; - unit-insecure) - echo "preparing insecure unit tests">&2 - make install-tools - echo "running insecure unit tests">&2 - make -C insecure -s test > test-output - ;; - unit-integration) - echo "preparing integration unit tests">&2 - make install-tools - echo "running integration unit tests">&2 - make -C integration -s test > test-output - ;; - *) - echo "unrecognized test category (run-tests):" $TEST_CATEGORY>&2 - exit 1 - ;; - esac -fi - diff --git a/tools/test_monitor/testdata/summary1/raw/test-result-crypto-hash-1-count-fail.json b/tools/test_monitor/testdata/summary1/raw/test-result-crypto-hash-1-count-fail.json index c9f23234e69..04cda7b0b10 100644 --- a/tools/test_monitor/testdata/summary1/raw/test-result-crypto-hash-1-count-fail.json +++ b/tools/test_monitor/testdata/summary1/raw/test-result-crypto-hash-1-count-fail.json @@ -1,55 +1,55 @@ -{"Time":"2021-09-24T11:51:27.764908-04:00","Action":"run","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha3_256"} -{"Time":"2021-09-24T11:51:27.767125-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha3_256","Output":"=== RUN TestSanitySha3_256\n"} -{"Time":"2021-09-24T11:51:27.767166-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha3_256","Output":" hash_test.go:21: \n"} -{"Time":"2021-09-24T11:51:27.76718-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha3_256","Output":" \tError Trace:\thash_test.go:21\n"} -{"Time":"2021-09-24T11:51:27.767187-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha3_256","Output":" \tError: \tNot equal: \n"} -{"Time":"2021-09-24T11:51:27.767194-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha3_256","Output":" \t \texpected: hash.Hash{0x36, 0xf0, 0x28, 0x58, 0xb, 0xb0, 0x2c, 0xc8, 0x27, 0x2a, 0x9a, 0x2, 0xf, 0x42, 0x0, 0xe3, 0x46, 0xe2, 0x76, 0xae, 0x66, 0x4e, 0x45, 0xee, 0x80, 0x74, 0x55, 0x74, 0xe2, 0xf5, 0xab, 0x81}\n"} -{"Time":"2021-09-24T11:51:27.767226-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha3_256","Output":" \t \tactual : hash.Hash{0x36, 0xf0, 0x28, 0x58, 0xb, 0xb0, 0x2c, 0xc8, 0x27, 0x2a, 0x9a, 0x2, 0xf, 0x42, 0x0, 0xe3, 0x46, 0xe2, 0x76, 0xae, 0x66, 0x4e, 0x45, 0xee, 0x80, 0x74, 0x55, 0x74, 0xe2, 0xf5, 0xab, 0x80}\n"} -{"Time":"2021-09-24T11:51:27.767234-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha3_256","Output":" \t \t\n"} -{"Time":"2021-09-24T11:51:27.76724-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha3_256","Output":" \t \tDiff:\n"} -{"Time":"2021-09-24T11:51:27.76745-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha3_256","Output":" \t \t--- Expected\n"} -{"Time":"2021-09-24T11:51:27.767484-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha3_256","Output":" \t \t+++ Actual\n"} -{"Time":"2021-09-24T11:51:27.767497-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha3_256","Output":" \t \t@@ -1,2 +1,2 @@\n"} -{"Time":"2021-09-24T11:51:27.76751-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha3_256","Output":" \t \t-(hash.Hash) (len=32) 0x36f028580bb02cc8272a9a020f4200e346e276ae664e45ee80745574e2f5ab81\n"} -{"Time":"2021-09-24T11:51:27.767522-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha3_256","Output":" \t \t+(hash.Hash) (len=32) 0x36f028580bb02cc8272a9a020f4200e346e276ae664e45ee80745574e2f5ab80\n"} -{"Time":"2021-09-24T11:51:27.767533-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha3_256","Output":" \t \t \n"} -{"Time":"2021-09-24T11:51:27.767544-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha3_256","Output":" \tTest: \tTestSanitySha3_256\n"} -{"Time":"2021-09-24T11:51:27.76759-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha3_256","Output":"--- FAIL: TestSanitySha3_256 (0.00s)\n"} -{"Time":"2021-09-24T11:51:27.767602-04:00","Action":"fail","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha3_256","Elapsed":0} -{"Time":"2021-09-24T11:51:27.767632-04:00","Action":"run","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha3_384"} -{"Time":"2021-09-24T11:51:27.767642-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha3_384","Output":"=== RUN TestSanitySha3_384\n"} -{"Time":"2021-09-24T11:51:27.767654-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha3_384","Output":"--- PASS: TestSanitySha3_384 (0.00s)\n"} -{"Time":"2021-09-24T11:51:27.767663-04:00","Action":"pass","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha3_384","Elapsed":0} -{"Time":"2021-09-24T11:51:27.767672-04:00","Action":"run","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_256"} -{"Time":"2021-09-24T11:51:27.767681-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_256","Output":"=== RUN TestSanitySha2_256\n"} -{"Time":"2021-09-24T11:51:27.767692-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_256","Output":"--- PASS: TestSanitySha2_256 (0.00s)\n"} -{"Time":"2021-09-24T11:51:27.767702-04:00","Action":"pass","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_256","Elapsed":0} -{"Time":"2021-09-24T11:51:27.767712-04:00","Action":"run","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_384"} -{"Time":"2021-09-24T11:51:27.767721-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_384","Output":"=== RUN TestSanitySha2_384\n"} -{"Time":"2021-09-24T11:51:27.767732-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_384","Output":"--- PASS: TestSanitySha2_384 (0.00s)\n"} -{"Time":"2021-09-24T11:51:27.767743-04:00","Action":"pass","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_384","Elapsed":0} -{"Time":"2021-09-24T11:51:27.767752-04:00","Action":"run","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanityKmac128"} -{"Time":"2021-09-24T11:51:27.767791-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanityKmac128","Output":"=== RUN TestSanityKmac128\n"} -{"Time":"2021-09-24T11:51:27.767816-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanityKmac128","Output":"--- PASS: TestSanityKmac128 (0.00s)\n"} -{"Time":"2021-09-24T11:51:27.767824-04:00","Action":"pass","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanityKmac128","Elapsed":0} -{"Time":"2021-09-24T11:51:27.767831-04:00","Action":"run","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestHashersAPI"} -{"Time":"2021-09-24T11:51:27.767837-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestHashersAPI","Output":"=== RUN TestHashersAPI\n"} -{"Time":"2021-09-24T11:51:27.767843-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestHashersAPI","Output":" hash_test.go:114: math rand seed is 1632498687765218000\n"} -{"Time":"2021-09-24T11:51:27.767866-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestHashersAPI","Output":"--- PASS: TestHashersAPI (0.00s)\n"} -{"Time":"2021-09-24T11:51:27.767875-04:00","Action":"pass","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestHashersAPI","Elapsed":0} -{"Time":"2021-09-24T11:51:27.767884-04:00","Action":"run","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3"} -{"Time":"2021-09-24T11:51:27.7679-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3","Output":"=== RUN TestSha3\n"} -{"Time":"2021-09-24T11:51:27.767909-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3","Output":" hash_test.go:158: math rand seed is 1632498687765661000\n"} -{"Time":"2021-09-24T11:51:27.767965-04:00","Action":"run","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3/SHA3_256"} -{"Time":"2021-09-24T11:51:27.767986-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3/SHA3_256","Output":"=== RUN TestSha3/SHA3_256\n"} -{"Time":"2021-09-24T11:51:27.875316-04:00","Action":"run","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3/SHA3_384"} -{"Time":"2021-09-24T11:51:27.875342-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3/SHA3_384","Output":"=== RUN TestSha3/SHA3_384\n"} -{"Time":"2021-09-24T11:51:27.99617-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3","Output":"--- PASS: TestSha3 (0.23s)\n"} -{"Time":"2021-09-24T11:51:27.996209-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3/SHA3_256","Output":" --- PASS: TestSha3/SHA3_256 (0.11s)\n"} -{"Time":"2021-09-24T11:51:27.99622-04:00","Action":"pass","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3/SHA3_256","Elapsed":0.11} -{"Time":"2021-09-24T11:51:27.996229-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3/SHA3_384","Output":" --- PASS: TestSha3/SHA3_384 (0.12s)\n"} -{"Time":"2021-09-24T11:51:27.996233-04:00","Action":"pass","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3/SHA3_384","Elapsed":0.12} -{"Time":"2021-09-24T11:51:27.996236-04:00","Action":"pass","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3","Elapsed":0.23} -{"Time":"2021-09-24T11:51:27.996241-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Output":"FAIL\n"} -{"Time":"2021-09-24T11:51:27.997332-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Output":"FAIL\tgithub.com/onflow/flow-go/crypto/hash\t0.483s\n"} -{"Time":"2021-09-24T11:51:27.997383-04:00","Action":"fail","Package":"github.com/onflow/flow-go/crypto/hash","Elapsed":0.483} \ No newline at end of file +{"Time":"2021-09-24T11:51:27.764908-04:00","Action":"run","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha3_256"} +{"Time":"2021-09-24T11:51:27.767125-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha3_256","Output":"=== RUN TestSanitySha3_256\n"} +{"Time":"2021-09-24T11:51:27.767166-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha3_256","Output":" hash_test.go:21: \n"} +{"Time":"2021-09-24T11:51:27.76718-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha3_256","Output":" \tError Trace:\thash_test.go:21\n"} +{"Time":"2021-09-24T11:51:27.767187-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha3_256","Output":" \tError: \tNot equal: \n"} +{"Time":"2021-09-24T11:51:27.767194-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha3_256","Output":" \t \texpected: hash.Hash{0x36, 0xf0, 0x28, 0x58, 0xb, 0xb0, 0x2c, 0xc8, 0x27, 0x2a, 0x9a, 0x2, 0xf, 0x42, 0x0, 0xe3, 0x46, 0xe2, 0x76, 0xae, 0x66, 0x4e, 0x45, 0xee, 0x80, 0x74, 0x55, 0x74, 0xe2, 0xf5, 0xab, 0x81}\n"} +{"Time":"2021-09-24T11:51:27.767226-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha3_256","Output":" \t \tactual : hash.Hash{0x36, 0xf0, 0x28, 0x58, 0xb, 0xb0, 0x2c, 0xc8, 0x27, 0x2a, 0x9a, 0x2, 0xf, 0x42, 0x0, 0xe3, 0x46, 0xe2, 0x76, 0xae, 0x66, 0x4e, 0x45, 0xee, 0x80, 0x74, 0x55, 0x74, 0xe2, 0xf5, 0xab, 0x80}\n"} +{"Time":"2021-09-24T11:51:27.767234-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha3_256","Output":" \t \t\n"} +{"Time":"2021-09-24T11:51:27.76724-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha3_256","Output":" \t \tDiff:\n"} +{"Time":"2021-09-24T11:51:27.76745-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha3_256","Output":" \t \t--- Expected\n"} +{"Time":"2021-09-24T11:51:27.767484-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha3_256","Output":" \t \t+++ Actual\n"} +{"Time":"2021-09-24T11:51:27.767497-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha3_256","Output":" \t \t@@ -1,2 +1,2 @@\n"} +{"Time":"2021-09-24T11:51:27.76751-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha3_256","Output":" \t \t-(hash.Hash) (len=32) 0x36f028580bb02cc8272a9a020f4200e346e276ae664e45ee80745574e2f5ab81\n"} +{"Time":"2021-09-24T11:51:27.767522-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha3_256","Output":" \t \t+(hash.Hash) (len=32) 0x36f028580bb02cc8272a9a020f4200e346e276ae664e45ee80745574e2f5ab80\n"} +{"Time":"2021-09-24T11:51:27.767533-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha3_256","Output":" \t \t \n"} +{"Time":"2021-09-24T11:51:27.767544-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha3_256","Output":" \tTest: \tTestSanitySha3_256\n"} +{"Time":"2021-09-24T11:51:27.76759-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha3_256","Output":"--- FAIL: TestSanitySha3_256 (0.00s)\n"} +{"Time":"2021-09-24T11:51:27.767602-04:00","Action":"fail","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha3_256","Elapsed":0} +{"Time":"2021-09-24T11:51:27.767632-04:00","Action":"run","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha3_384"} +{"Time":"2021-09-24T11:51:27.767642-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha3_384","Output":"=== RUN TestSanitySha3_384\n"} +{"Time":"2021-09-24T11:51:27.767654-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha3_384","Output":"--- PASS: TestSanitySha3_384 (0.00s)\n"} +{"Time":"2021-09-24T11:51:27.767663-04:00","Action":"pass","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha3_384","Elapsed":0} +{"Time":"2021-09-24T11:51:27.767672-04:00","Action":"run","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_256"} +{"Time":"2021-09-24T11:51:27.767681-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_256","Output":"=== RUN TestSanitySha2_256\n"} +{"Time":"2021-09-24T11:51:27.767692-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_256","Output":"--- PASS: TestSanitySha2_256 (0.00s)\n"} +{"Time":"2021-09-24T11:51:27.767702-04:00","Action":"pass","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_256","Elapsed":0} +{"Time":"2021-09-24T11:51:27.767712-04:00","Action":"run","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_384"} +{"Time":"2021-09-24T11:51:27.767721-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_384","Output":"=== RUN TestSanitySha2_384\n"} +{"Time":"2021-09-24T11:51:27.767732-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_384","Output":"--- PASS: TestSanitySha2_384 (0.00s)\n"} +{"Time":"2021-09-24T11:51:27.767743-04:00","Action":"pass","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_384","Elapsed":0} +{"Time":"2021-09-24T11:51:27.767752-04:00","Action":"run","Package":"github.com/onflow/crypto/hash","Test":"TestSanityKmac128"} +{"Time":"2021-09-24T11:51:27.767791-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanityKmac128","Output":"=== RUN TestSanityKmac128\n"} +{"Time":"2021-09-24T11:51:27.767816-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanityKmac128","Output":"--- PASS: TestSanityKmac128 (0.00s)\n"} +{"Time":"2021-09-24T11:51:27.767824-04:00","Action":"pass","Package":"github.com/onflow/crypto/hash","Test":"TestSanityKmac128","Elapsed":0} +{"Time":"2021-09-24T11:51:27.767831-04:00","Action":"run","Package":"github.com/onflow/crypto/hash","Test":"TestHashersAPI"} +{"Time":"2021-09-24T11:51:27.767837-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestHashersAPI","Output":"=== RUN TestHashersAPI\n"} +{"Time":"2021-09-24T11:51:27.767843-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestHashersAPI","Output":" hash_test.go:114: math rand seed is 1632498687765218000\n"} +{"Time":"2021-09-24T11:51:27.767866-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestHashersAPI","Output":"--- PASS: TestHashersAPI (0.00s)\n"} +{"Time":"2021-09-24T11:51:27.767875-04:00","Action":"pass","Package":"github.com/onflow/crypto/hash","Test":"TestHashersAPI","Elapsed":0} +{"Time":"2021-09-24T11:51:27.767884-04:00","Action":"run","Package":"github.com/onflow/crypto/hash","Test":"TestSha3"} +{"Time":"2021-09-24T11:51:27.7679-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSha3","Output":"=== RUN TestSha3\n"} +{"Time":"2021-09-24T11:51:27.767909-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSha3","Output":" hash_test.go:158: math rand seed is 1632498687765661000\n"} +{"Time":"2021-09-24T11:51:27.767965-04:00","Action":"run","Package":"github.com/onflow/crypto/hash","Test":"TestSha3/SHA3_256"} +{"Time":"2021-09-24T11:51:27.767986-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSha3/SHA3_256","Output":"=== RUN TestSha3/SHA3_256\n"} +{"Time":"2021-09-24T11:51:27.875316-04:00","Action":"run","Package":"github.com/onflow/crypto/hash","Test":"TestSha3/SHA3_384"} +{"Time":"2021-09-24T11:51:27.875342-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSha3/SHA3_384","Output":"=== RUN TestSha3/SHA3_384\n"} +{"Time":"2021-09-24T11:51:27.99617-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSha3","Output":"--- PASS: TestSha3 (0.23s)\n"} +{"Time":"2021-09-24T11:51:27.996209-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSha3/SHA3_256","Output":" --- PASS: TestSha3/SHA3_256 (0.11s)\n"} +{"Time":"2021-09-24T11:51:27.99622-04:00","Action":"pass","Package":"github.com/onflow/crypto/hash","Test":"TestSha3/SHA3_256","Elapsed":0.11} +{"Time":"2021-09-24T11:51:27.996229-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSha3/SHA3_384","Output":" --- PASS: TestSha3/SHA3_384 (0.12s)\n"} +{"Time":"2021-09-24T11:51:27.996233-04:00","Action":"pass","Package":"github.com/onflow/crypto/hash","Test":"TestSha3/SHA3_384","Elapsed":0.12} +{"Time":"2021-09-24T11:51:27.996236-04:00","Action":"pass","Package":"github.com/onflow/crypto/hash","Test":"TestSha3","Elapsed":0.23} +{"Time":"2021-09-24T11:51:27.996241-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Output":"FAIL\n"} +{"Time":"2021-09-24T11:51:27.997332-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Output":"FAIL\tgithub.com/onflow/crypto/hash\t0.483s\n"} +{"Time":"2021-09-24T11:51:27.997383-04:00","Action":"fail","Package":"github.com/onflow/crypto/hash","Elapsed":0.483} \ No newline at end of file diff --git a/tools/test_monitor/testdata/summary1/raw/test-result-crypto-hash-1-count-pass.json b/tools/test_monitor/testdata/summary1/raw/test-result-crypto-hash-1-count-pass.json index 4fe74ee9c9d..386e6b733ae 100644 --- a/tools/test_monitor/testdata/summary1/raw/test-result-crypto-hash-1-count-pass.json +++ b/tools/test_monitor/testdata/summary1/raw/test-result-crypto-hash-1-count-pass.json @@ -1,41 +1,41 @@ -{"Time":"2021-09-24T11:27:29.121674-04:00","Action":"run","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha3_256"} -{"Time":"2021-09-24T11:27:29.121944-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha3_256","Output":"=== RUN TestSanitySha3_256\n"} -{"Time":"2021-09-24T11:27:29.121969-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha3_256","Output":"--- PASS: TestSanitySha3_256 (0.00s)\n"} -{"Time":"2021-09-24T11:27:29.121973-04:00","Action":"pass","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha3_256","Elapsed":0} -{"Time":"2021-09-24T11:27:29.121993-04:00","Action":"run","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha3_384"} -{"Time":"2021-09-24T11:27:29.121996-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha3_384","Output":"=== RUN TestSanitySha3_384\n"} -{"Time":"2021-09-24T11:27:29.122-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha3_384","Output":"--- PASS: TestSanitySha3_384 (0.00s)\n"} -{"Time":"2021-09-24T11:27:29.122003-04:00","Action":"pass","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha3_384","Elapsed":0} -{"Time":"2021-09-24T11:27:29.122006-04:00","Action":"run","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_256"} -{"Time":"2021-09-24T11:27:29.122126-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_256","Output":"=== RUN TestSanitySha2_256\n"} -{"Time":"2021-09-24T11:27:29.122143-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_256","Output":"--- PASS: TestSanitySha2_256 (0.00s)\n"} -{"Time":"2021-09-24T11:27:29.122155-04:00","Action":"pass","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_256","Elapsed":0} -{"Time":"2021-09-24T11:27:29.122161-04:00","Action":"run","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_384"} -{"Time":"2021-09-24T11:27:29.122168-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_384","Output":"=== RUN TestSanitySha2_384\n"} -{"Time":"2021-09-24T11:27:29.122175-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_384","Output":"--- PASS: TestSanitySha2_384 (0.00s)\n"} -{"Time":"2021-09-24T11:27:29.122184-04:00","Action":"pass","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_384","Elapsed":0} -{"Time":"2021-09-24T11:27:29.122189-04:00","Action":"run","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanityKmac128"} -{"Time":"2021-09-24T11:27:29.122194-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanityKmac128","Output":"=== RUN TestSanityKmac128\n"} -{"Time":"2021-09-24T11:27:29.122205-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanityKmac128","Output":"--- PASS: TestSanityKmac128 (0.00s)\n"} -{"Time":"2021-09-24T11:27:29.122212-04:00","Action":"pass","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanityKmac128","Elapsed":0} -{"Time":"2021-09-24T11:27:29.122216-04:00","Action":"run","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestHashersAPI"} -{"Time":"2021-09-24T11:27:29.122273-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestHashersAPI","Output":"=== RUN TestHashersAPI\n"} -{"Time":"2021-09-24T11:27:29.122293-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestHashersAPI","Output":" hash_test.go:114: math rand seed is 1632497249121800000\n"} -{"Time":"2021-09-24T11:27:29.122332-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestHashersAPI","Output":"--- PASS: TestHashersAPI (0.00s)\n"} -{"Time":"2021-09-24T11:27:29.122339-04:00","Action":"pass","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestHashersAPI","Elapsed":0} -{"Time":"2021-09-24T11:27:29.122344-04:00","Action":"run","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3"} -{"Time":"2021-09-24T11:27:29.122351-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3","Output":"=== RUN TestSha3\n"} -{"Time":"2021-09-24T11:27:29.122663-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3","Output":" hash_test.go:158: math rand seed is 1632497249122032000\n"} -{"Time":"2021-09-24T11:27:29.122678-04:00","Action":"run","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3/SHA3_256"} -{"Time":"2021-09-24T11:27:29.122724-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3/SHA3_256","Output":"=== RUN TestSha3/SHA3_256\n"} -{"Time":"2021-09-24T11:27:29.223618-04:00","Action":"run","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3/SHA3_384"} -{"Time":"2021-09-24T11:27:29.223646-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3/SHA3_384","Output":"=== RUN TestSha3/SHA3_384\n"} -{"Time":"2021-09-24T11:27:29.347325-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3","Output":"--- PASS: TestSha3 (0.23s)\n"} -{"Time":"2021-09-24T11:27:29.347372-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3/SHA3_256","Output":" --- PASS: TestSha3/SHA3_256 (0.10s)\n"} -{"Time":"2021-09-24T11:27:29.347376-04:00","Action":"pass","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3/SHA3_256","Elapsed":0.1} -{"Time":"2021-09-24T11:27:29.347401-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3/SHA3_384","Output":" --- PASS: TestSha3/SHA3_384 (0.12s)\n"} -{"Time":"2021-09-24T11:27:29.347405-04:00","Action":"pass","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3/SHA3_384","Elapsed":0.12} -{"Time":"2021-09-24T11:27:29.347408-04:00","Action":"pass","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3","Elapsed":0.23} -{"Time":"2021-09-24T11:27:29.347412-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Output":"PASS\n"} -{"Time":"2021-09-24T11:27:29.348371-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Output":"ok \tgithub.com/onflow/flow-go/crypto/hash\t0.349s\n"} -{"Time":"2021-09-24T11:27:29.348417-04:00","Action":"pass","Package":"github.com/onflow/flow-go/crypto/hash","Elapsed":0.349} \ No newline at end of file +{"Time":"2021-09-24T11:27:29.121674-04:00","Action":"run","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha3_256"} +{"Time":"2021-09-24T11:27:29.121944-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha3_256","Output":"=== RUN TestSanitySha3_256\n"} +{"Time":"2021-09-24T11:27:29.121969-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha3_256","Output":"--- PASS: TestSanitySha3_256 (0.00s)\n"} +{"Time":"2021-09-24T11:27:29.121973-04:00","Action":"pass","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha3_256","Elapsed":0} +{"Time":"2021-09-24T11:27:29.121993-04:00","Action":"run","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha3_384"} +{"Time":"2021-09-24T11:27:29.121996-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha3_384","Output":"=== RUN TestSanitySha3_384\n"} +{"Time":"2021-09-24T11:27:29.122-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha3_384","Output":"--- PASS: TestSanitySha3_384 (0.00s)\n"} +{"Time":"2021-09-24T11:27:29.122003-04:00","Action":"pass","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha3_384","Elapsed":0} +{"Time":"2021-09-24T11:27:29.122006-04:00","Action":"run","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_256"} +{"Time":"2021-09-24T11:27:29.122126-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_256","Output":"=== RUN TestSanitySha2_256\n"} +{"Time":"2021-09-24T11:27:29.122143-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_256","Output":"--- PASS: TestSanitySha2_256 (0.00s)\n"} +{"Time":"2021-09-24T11:27:29.122155-04:00","Action":"pass","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_256","Elapsed":0} +{"Time":"2021-09-24T11:27:29.122161-04:00","Action":"run","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_384"} +{"Time":"2021-09-24T11:27:29.122168-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_384","Output":"=== RUN TestSanitySha2_384\n"} +{"Time":"2021-09-24T11:27:29.122175-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_384","Output":"--- PASS: TestSanitySha2_384 (0.00s)\n"} +{"Time":"2021-09-24T11:27:29.122184-04:00","Action":"pass","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_384","Elapsed":0} +{"Time":"2021-09-24T11:27:29.122189-04:00","Action":"run","Package":"github.com/onflow/crypto/hash","Test":"TestSanityKmac128"} +{"Time":"2021-09-24T11:27:29.122194-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanityKmac128","Output":"=== RUN TestSanityKmac128\n"} +{"Time":"2021-09-24T11:27:29.122205-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanityKmac128","Output":"--- PASS: TestSanityKmac128 (0.00s)\n"} +{"Time":"2021-09-24T11:27:29.122212-04:00","Action":"pass","Package":"github.com/onflow/crypto/hash","Test":"TestSanityKmac128","Elapsed":0} +{"Time":"2021-09-24T11:27:29.122216-04:00","Action":"run","Package":"github.com/onflow/crypto/hash","Test":"TestHashersAPI"} +{"Time":"2021-09-24T11:27:29.122273-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestHashersAPI","Output":"=== RUN TestHashersAPI\n"} +{"Time":"2021-09-24T11:27:29.122293-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestHashersAPI","Output":" hash_test.go:114: math rand seed is 1632497249121800000\n"} +{"Time":"2021-09-24T11:27:29.122332-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestHashersAPI","Output":"--- PASS: TestHashersAPI (0.00s)\n"} +{"Time":"2021-09-24T11:27:29.122339-04:00","Action":"pass","Package":"github.com/onflow/crypto/hash","Test":"TestHashersAPI","Elapsed":0} +{"Time":"2021-09-24T11:27:29.122344-04:00","Action":"run","Package":"github.com/onflow/crypto/hash","Test":"TestSha3"} +{"Time":"2021-09-24T11:27:29.122351-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSha3","Output":"=== RUN TestSha3\n"} +{"Time":"2021-09-24T11:27:29.122663-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSha3","Output":" hash_test.go:158: math rand seed is 1632497249122032000\n"} +{"Time":"2021-09-24T11:27:29.122678-04:00","Action":"run","Package":"github.com/onflow/crypto/hash","Test":"TestSha3/SHA3_256"} +{"Time":"2021-09-24T11:27:29.122724-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSha3/SHA3_256","Output":"=== RUN TestSha3/SHA3_256\n"} +{"Time":"2021-09-24T11:27:29.223618-04:00","Action":"run","Package":"github.com/onflow/crypto/hash","Test":"TestSha3/SHA3_384"} +{"Time":"2021-09-24T11:27:29.223646-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSha3/SHA3_384","Output":"=== RUN TestSha3/SHA3_384\n"} +{"Time":"2021-09-24T11:27:29.347325-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSha3","Output":"--- PASS: TestSha3 (0.23s)\n"} +{"Time":"2021-09-24T11:27:29.347372-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSha3/SHA3_256","Output":" --- PASS: TestSha3/SHA3_256 (0.10s)\n"} +{"Time":"2021-09-24T11:27:29.347376-04:00","Action":"pass","Package":"github.com/onflow/crypto/hash","Test":"TestSha3/SHA3_256","Elapsed":0.1} +{"Time":"2021-09-24T11:27:29.347401-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSha3/SHA3_384","Output":" --- PASS: TestSha3/SHA3_384 (0.12s)\n"} +{"Time":"2021-09-24T11:27:29.347405-04:00","Action":"pass","Package":"github.com/onflow/crypto/hash","Test":"TestSha3/SHA3_384","Elapsed":0.12} +{"Time":"2021-09-24T11:27:29.347408-04:00","Action":"pass","Package":"github.com/onflow/crypto/hash","Test":"TestSha3","Elapsed":0.23} +{"Time":"2021-09-24T11:27:29.347412-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Output":"PASS\n"} +{"Time":"2021-09-24T11:27:29.348371-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Output":"ok \tgithub.com/onflow/crypto/hash\t0.349s\n"} +{"Time":"2021-09-24T11:27:29.348417-04:00","Action":"pass","Package":"github.com/onflow/crypto/hash","Elapsed":0.349} \ No newline at end of file diff --git a/tools/test_monitor/testdata/summary1/raw/test-result-crypto-hash-1-count-skip-pass.json b/tools/test_monitor/testdata/summary1/raw/test-result-crypto-hash-1-count-skip-pass.json index 0236cfc342b..5f27a6040b7 100644 --- a/tools/test_monitor/testdata/summary1/raw/test-result-crypto-hash-1-count-skip-pass.json +++ b/tools/test_monitor/testdata/summary1/raw/test-result-crypto-hash-1-count-skip-pass.json @@ -1,42 +1,42 @@ -{"Time":"2021-10-06T07:11:37.589443-04:00","Action":"run","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha3_256"} -{"Time":"2021-10-06T07:11:37.58988-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha3_256","Output":"=== RUN TestSanitySha3_256\n"} -{"Time":"2021-10-06T07:11:37.589928-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha3_256","Output":"--- PASS: TestSanitySha3_256 (0.00s)\n"} -{"Time":"2021-10-06T07:11:37.589942-04:00","Action":"pass","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha3_256","Elapsed":0} -{"Time":"2021-10-06T07:11:37.589967-04:00","Action":"run","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha3_384"} -{"Time":"2021-10-06T07:11:37.589977-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha3_384","Output":"=== RUN TestSanitySha3_384\n"} -{"Time":"2021-10-06T07:11:37.589987-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha3_384","Output":"--- PASS: TestSanitySha3_384 (0.00s)\n"} -{"Time":"2021-10-06T07:11:37.589999-04:00","Action":"pass","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha3_384","Elapsed":0} -{"Time":"2021-10-06T07:11:37.590008-04:00","Action":"run","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_256"} -{"Time":"2021-10-06T07:11:37.590017-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_256","Output":"=== RUN TestSanitySha2_256\n"} -{"Time":"2021-10-06T07:11:37.590029-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_256","Output":" hash_test.go:36: SKIP [TEST_TODO]: skip for testing\n"} -{"Time":"2021-10-06T07:11:37.590039-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_256","Output":"--- SKIP: TestSanitySha2_256 (0.00s)\n"} -{"Time":"2021-10-06T07:11:37.590049-04:00","Action":"skip","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_256","Elapsed":0} -{"Time":"2021-10-06T07:11:37.590061-04:00","Action":"run","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_384"} -{"Time":"2021-10-06T07:11:37.590073-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_384","Output":"=== RUN TestSanitySha2_384\n"} -{"Time":"2021-10-06T07:11:37.590087-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_384","Output":"--- PASS: TestSanitySha2_384 (0.00s)\n"} -{"Time":"2021-10-06T07:11:37.5901-04:00","Action":"pass","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_384","Elapsed":0} -{"Time":"2021-10-06T07:11:37.590114-04:00","Action":"run","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanityKmac128"} -{"Time":"2021-10-06T07:11:37.590137-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanityKmac128","Output":"=== RUN TestSanityKmac128\n"} -{"Time":"2021-10-06T07:11:37.590159-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanityKmac128","Output":"--- PASS: TestSanityKmac128 (0.00s)\n"} -{"Time":"2021-10-06T07:11:37.590169-04:00","Action":"pass","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanityKmac128","Elapsed":0} -{"Time":"2021-10-06T07:11:37.590178-04:00","Action":"run","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestHashersAPI"} -{"Time":"2021-10-06T07:11:37.590186-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestHashersAPI","Output":"=== RUN TestHashersAPI\n"} -{"Time":"2021-10-06T07:11:37.5902-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestHashersAPI","Output":" hash_test.go:96: SKIP [TEST_TODO]: skip for testing\n"} -{"Time":"2021-10-06T07:11:37.590209-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestHashersAPI","Output":"--- SKIP: TestHashersAPI (0.00s)\n"} -{"Time":"2021-10-06T07:11:37.590246-04:00","Action":"skip","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestHashersAPI","Elapsed":0} -{"Time":"2021-10-06T07:11:37.590253-04:00","Action":"run","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3"} -{"Time":"2021-10-06T07:11:37.590261-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3","Output":"=== RUN TestSha3\n"} -{"Time":"2021-10-06T07:11:37.590268-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3","Output":" hash_test.go:160: math rand seed is 1633518697589650000\n"} -{"Time":"2021-10-06T07:11:37.590283-04:00","Action":"run","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3/SHA3_256"} -{"Time":"2021-10-06T07:11:37.590289-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3/SHA3_256","Output":"=== RUN TestSha3/SHA3_256\n"} -{"Time":"2021-10-06T07:11:37.701578-04:00","Action":"run","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3/SHA3_384"} -{"Time":"2021-10-06T07:11:37.701615-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3/SHA3_384","Output":"=== RUN TestSha3/SHA3_384\n"} -{"Time":"2021-10-06T07:11:37.828418-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3","Output":"--- PASS: TestSha3 (0.24s)\n"} -{"Time":"2021-10-06T07:11:37.828497-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3/SHA3_256","Output":" --- PASS: TestSha3/SHA3_256 (0.11s)\n"} -{"Time":"2021-10-06T07:11:37.828512-04:00","Action":"pass","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3/SHA3_256","Elapsed":0.11} -{"Time":"2021-10-06T07:11:37.82853-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3/SHA3_384","Output":" --- PASS: TestSha3/SHA3_384 (0.13s)\n"} -{"Time":"2021-10-06T07:11:37.828545-04:00","Action":"pass","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3/SHA3_384","Elapsed":0.13} -{"Time":"2021-10-06T07:11:37.828557-04:00","Action":"pass","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3","Elapsed":0.24} -{"Time":"2021-10-06T07:11:37.828563-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Output":"PASS\n"} -{"Time":"2021-10-06T07:11:37.829672-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Output":"ok \tgithub.com/onflow/flow-go/crypto/hash\t0.446s\n"} -{"Time":"2021-10-06T07:11:37.829738-04:00","Action":"pass","Package":"github.com/onflow/flow-go/crypto/hash","Elapsed":0.446} +{"Time":"2021-10-06T07:11:37.589443-04:00","Action":"run","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha3_256"} +{"Time":"2021-10-06T07:11:37.58988-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha3_256","Output":"=== RUN TestSanitySha3_256\n"} +{"Time":"2021-10-06T07:11:37.589928-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha3_256","Output":"--- PASS: TestSanitySha3_256 (0.00s)\n"} +{"Time":"2021-10-06T07:11:37.589942-04:00","Action":"pass","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha3_256","Elapsed":0} +{"Time":"2021-10-06T07:11:37.589967-04:00","Action":"run","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha3_384"} +{"Time":"2021-10-06T07:11:37.589977-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha3_384","Output":"=== RUN TestSanitySha3_384\n"} +{"Time":"2021-10-06T07:11:37.589987-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha3_384","Output":"--- PASS: TestSanitySha3_384 (0.00s)\n"} +{"Time":"2021-10-06T07:11:37.589999-04:00","Action":"pass","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha3_384","Elapsed":0} +{"Time":"2021-10-06T07:11:37.590008-04:00","Action":"run","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_256"} +{"Time":"2021-10-06T07:11:37.590017-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_256","Output":"=== RUN TestSanitySha2_256\n"} +{"Time":"2021-10-06T07:11:37.590029-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_256","Output":" hash_test.go:36: SKIP [TEST_TODO]: skip for testing\n"} +{"Time":"2021-10-06T07:11:37.590039-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_256","Output":"--- SKIP: TestSanitySha2_256 (0.00s)\n"} +{"Time":"2021-10-06T07:11:37.590049-04:00","Action":"skip","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_256","Elapsed":0} +{"Time":"2021-10-06T07:11:37.590061-04:00","Action":"run","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_384"} +{"Time":"2021-10-06T07:11:37.590073-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_384","Output":"=== RUN TestSanitySha2_384\n"} +{"Time":"2021-10-06T07:11:37.590087-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_384","Output":"--- PASS: TestSanitySha2_384 (0.00s)\n"} +{"Time":"2021-10-06T07:11:37.5901-04:00","Action":"pass","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_384","Elapsed":0} +{"Time":"2021-10-06T07:11:37.590114-04:00","Action":"run","Package":"github.com/onflow/crypto/hash","Test":"TestSanityKmac128"} +{"Time":"2021-10-06T07:11:37.590137-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanityKmac128","Output":"=== RUN TestSanityKmac128\n"} +{"Time":"2021-10-06T07:11:37.590159-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanityKmac128","Output":"--- PASS: TestSanityKmac128 (0.00s)\n"} +{"Time":"2021-10-06T07:11:37.590169-04:00","Action":"pass","Package":"github.com/onflow/crypto/hash","Test":"TestSanityKmac128","Elapsed":0} +{"Time":"2021-10-06T07:11:37.590178-04:00","Action":"run","Package":"github.com/onflow/crypto/hash","Test":"TestHashersAPI"} +{"Time":"2021-10-06T07:11:37.590186-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestHashersAPI","Output":"=== RUN TestHashersAPI\n"} +{"Time":"2021-10-06T07:11:37.5902-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestHashersAPI","Output":" hash_test.go:96: SKIP [TEST_TODO]: skip for testing\n"} +{"Time":"2021-10-06T07:11:37.590209-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestHashersAPI","Output":"--- SKIP: TestHashersAPI (0.00s)\n"} +{"Time":"2021-10-06T07:11:37.590246-04:00","Action":"skip","Package":"github.com/onflow/crypto/hash","Test":"TestHashersAPI","Elapsed":0} +{"Time":"2021-10-06T07:11:37.590253-04:00","Action":"run","Package":"github.com/onflow/crypto/hash","Test":"TestSha3"} +{"Time":"2021-10-06T07:11:37.590261-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSha3","Output":"=== RUN TestSha3\n"} +{"Time":"2021-10-06T07:11:37.590268-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSha3","Output":" hash_test.go:160: math rand seed is 1633518697589650000\n"} +{"Time":"2021-10-06T07:11:37.590283-04:00","Action":"run","Package":"github.com/onflow/crypto/hash","Test":"TestSha3/SHA3_256"} +{"Time":"2021-10-06T07:11:37.590289-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSha3/SHA3_256","Output":"=== RUN TestSha3/SHA3_256\n"} +{"Time":"2021-10-06T07:11:37.701578-04:00","Action":"run","Package":"github.com/onflow/crypto/hash","Test":"TestSha3/SHA3_384"} +{"Time":"2021-10-06T07:11:37.701615-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSha3/SHA3_384","Output":"=== RUN TestSha3/SHA3_384\n"} +{"Time":"2021-10-06T07:11:37.828418-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSha3","Output":"--- PASS: TestSha3 (0.24s)\n"} +{"Time":"2021-10-06T07:11:37.828497-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSha3/SHA3_256","Output":" --- PASS: TestSha3/SHA3_256 (0.11s)\n"} +{"Time":"2021-10-06T07:11:37.828512-04:00","Action":"pass","Package":"github.com/onflow/crypto/hash","Test":"TestSha3/SHA3_256","Elapsed":0.11} +{"Time":"2021-10-06T07:11:37.82853-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSha3/SHA3_384","Output":" --- PASS: TestSha3/SHA3_384 (0.13s)\n"} +{"Time":"2021-10-06T07:11:37.828545-04:00","Action":"pass","Package":"github.com/onflow/crypto/hash","Test":"TestSha3/SHA3_384","Elapsed":0.13} +{"Time":"2021-10-06T07:11:37.828557-04:00","Action":"pass","Package":"github.com/onflow/crypto/hash","Test":"TestSha3","Elapsed":0.24} +{"Time":"2021-10-06T07:11:37.828563-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Output":"PASS\n"} +{"Time":"2021-10-06T07:11:37.829672-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Output":"ok \tgithub.com/onflow/crypto/hash\t0.446s\n"} +{"Time":"2021-10-06T07:11:37.829738-04:00","Action":"pass","Package":"github.com/onflow/crypto/hash","Elapsed":0.446} diff --git a/tools/test_monitor/testdata/summary1/raw/test-result-crypto-hash-10-count-fail.json b/tools/test_monitor/testdata/summary1/raw/test-result-crypto-hash-10-count-fail.json index 7c1203f336c..a594c74fb0c 100644 --- a/tools/test_monitor/testdata/summary1/raw/test-result-crypto-hash-10-count-fail.json +++ b/tools/test_monitor/testdata/summary1/raw/test-result-crypto-hash-10-count-fail.json @@ -1,523 +1,523 @@ -{"Time":"2021-09-27T06:48:02.184192-04:00","Action":"run","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha3_256"} -{"Time":"2021-09-27T06:48:02.184694-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha3_256","Output":"=== RUN TestSanitySha3_256\n"} -{"Time":"2021-09-27T06:48:02.18473-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha3_256","Output":"--- PASS: TestSanitySha3_256 (0.00s)\n"} -{"Time":"2021-09-27T06:48:02.184738-04:00","Action":"pass","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha3_256","Elapsed":0} -{"Time":"2021-09-27T06:48:02.184759-04:00","Action":"run","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha3_384"} -{"Time":"2021-09-27T06:48:02.184765-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha3_384","Output":"=== RUN TestSanitySha3_384\n"} -{"Time":"2021-09-27T06:48:02.184772-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha3_384","Output":"--- PASS: TestSanitySha3_384 (0.00s)\n"} -{"Time":"2021-09-27T06:48:02.184778-04:00","Action":"pass","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha3_384","Elapsed":0} -{"Time":"2021-09-27T06:48:02.184784-04:00","Action":"run","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_256"} -{"Time":"2021-09-27T06:48:02.184913-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_256","Output":"=== RUN TestSanitySha2_256\n"} -{"Time":"2021-09-27T06:48:02.184947-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_256","Output":" hash_test.go:41: \n"} -{"Time":"2021-09-27T06:48:02.184971-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_256","Output":" \tError Trace:\thash_test.go:41\n"} -{"Time":"2021-09-27T06:48:02.184993-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_256","Output":" \tError: \tNot equal: \n"} -{"Time":"2021-09-27T06:48:02.185002-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_256","Output":" \t \texpected: hash.Hash{0x9f, 0x86, 0xd0, 0x81, 0x88, 0x4c, 0x7d, 0x65, 0x9a, 0x2f, 0xea, 0xa0, 0xc5, 0x5a, 0xd0, 0x15, 0xa3, 0xbf, 0x4f, 0x1b, 0x2b, 0xb, 0x82, 0x2c, 0xd1, 0x5d, 0x6c, 0x15, 0xb0, 0xf0, 0xa, 0x9}\n"} -{"Time":"2021-09-27T06:48:02.185025-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_256","Output":" \t \tactual : hash.Hash{0x9f, 0x86, 0xd0, 0x81, 0x88, 0x4c, 0x7d, 0x65, 0x9a, 0x2f, 0xea, 0xa0, 0xc5, 0x5a, 0xd0, 0x15, 0xa3, 0xbf, 0x4f, 0x1b, 0x2b, 0xb, 0x82, 0x2c, 0xd1, 0x5d, 0x6c, 0x15, 0xb0, 0xf0, 0xa, 0x8}\n"} -{"Time":"2021-09-27T06:48:02.185037-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_256","Output":" \t \t\n"} -{"Time":"2021-09-27T06:48:02.185046-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_256","Output":" \t \tDiff:\n"} -{"Time":"2021-09-27T06:48:02.185055-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_256","Output":" \t \t--- Expected\n"} -{"Time":"2021-09-27T06:48:02.185063-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_256","Output":" \t \t+++ Actual\n"} -{"Time":"2021-09-27T06:48:02.185101-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_256","Output":" \t \t@@ -1,2 +1,2 @@\n"} -{"Time":"2021-09-27T06:48:02.185122-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_256","Output":" \t \t-(hash.Hash) (len=32) 0x9f86d081884c7d659a2feaa0c55ad015a3bf4f1b2b0b822cd15d6c15b0f00a09\n"} -{"Time":"2021-09-27T06:48:02.185145-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_256","Output":" \t \t+(hash.Hash) (len=32) 0x9f86d081884c7d659a2feaa0c55ad015a3bf4f1b2b0b822cd15d6c15b0f00a08\n"} -{"Time":"2021-09-27T06:48:02.185155-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_256","Output":" \t \t \n"} -{"Time":"2021-09-27T06:48:02.185164-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_256","Output":" \tTest: \tTestSanitySha2_256\n"} -{"Time":"2021-09-27T06:48:02.185182-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_256","Output":"--- FAIL: TestSanitySha2_256 (0.00s)\n"} -{"Time":"2021-09-27T06:48:02.185192-04:00","Action":"fail","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_256","Elapsed":0} -{"Time":"2021-09-27T06:48:02.185199-04:00","Action":"run","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_384"} -{"Time":"2021-09-27T06:48:02.185208-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_384","Output":"=== RUN TestSanitySha2_384\n"} -{"Time":"2021-09-27T06:48:02.185218-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_384","Output":"--- PASS: TestSanitySha2_384 (0.00s)\n"} -{"Time":"2021-09-27T06:48:02.185239-04:00","Action":"pass","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_384","Elapsed":0} -{"Time":"2021-09-27T06:48:02.185245-04:00","Action":"run","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanityKmac128"} -{"Time":"2021-09-27T06:48:02.185261-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanityKmac128","Output":"=== RUN TestSanityKmac128\n"} -{"Time":"2021-09-27T06:48:02.185271-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanityKmac128","Output":"--- PASS: TestSanityKmac128 (0.00s)\n"} -{"Time":"2021-09-27T06:48:02.18528-04:00","Action":"pass","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanityKmac128","Elapsed":0} -{"Time":"2021-09-27T06:48:02.185289-04:00","Action":"run","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestHashersAPI"} -{"Time":"2021-09-27T06:48:02.185297-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestHashersAPI","Output":"=== RUN TestHashersAPI\n"} -{"Time":"2021-09-27T06:48:02.185307-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestHashersAPI","Output":" hash_test.go:114: math rand seed is 1632739682184421000\n"} -{"Time":"2021-09-27T06:48:02.185335-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestHashersAPI","Output":"--- PASS: TestHashersAPI (0.00s)\n"} -{"Time":"2021-09-27T06:48:02.185346-04:00","Action":"pass","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestHashersAPI","Elapsed":0} -{"Time":"2021-09-27T06:48:02.185354-04:00","Action":"run","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3"} -{"Time":"2021-09-27T06:48:02.185376-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3","Output":"=== RUN TestSha3\n"} -{"Time":"2021-09-27T06:48:02.185386-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3","Output":" hash_test.go:158: math rand seed is 1632739682184858000\n"} -{"Time":"2021-09-27T06:48:02.185396-04:00","Action":"run","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3/SHA3_256"} -{"Time":"2021-09-27T06:48:02.185404-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3/SHA3_256","Output":"=== RUN TestSha3/SHA3_256\n"} -{"Time":"2021-09-27T06:48:02.294081-04:00","Action":"run","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3/SHA3_384"} -{"Time":"2021-09-27T06:48:02.294138-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3/SHA3_384","Output":"=== RUN TestSha3/SHA3_384\n"} -{"Time":"2021-09-27T06:48:02.415091-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3","Output":"--- PASS: TestSha3 (0.23s)\n"} -{"Time":"2021-09-27T06:48:02.415117-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3/SHA3_256","Output":" --- PASS: TestSha3/SHA3_256 (0.11s)\n"} -{"Time":"2021-09-27T06:48:02.415121-04:00","Action":"pass","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3/SHA3_256","Elapsed":0.11} -{"Time":"2021-09-27T06:48:02.415206-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3/SHA3_384","Output":" --- PASS: TestSha3/SHA3_384 (0.12s)\n"} -{"Time":"2021-09-27T06:48:02.41522-04:00","Action":"pass","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3/SHA3_384","Elapsed":0.12} -{"Time":"2021-09-27T06:48:02.415224-04:00","Action":"pass","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3","Elapsed":0.23} -{"Time":"2021-09-27T06:48:02.415227-04:00","Action":"run","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha3_256"} -{"Time":"2021-09-27T06:48:02.41523-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha3_256","Output":"=== RUN TestSanitySha3_256\n"} -{"Time":"2021-09-27T06:48:02.415234-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha3_256","Output":"--- PASS: TestSanitySha3_256 (0.00s)\n"} -{"Time":"2021-09-27T06:48:02.415237-04:00","Action":"pass","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha3_256","Elapsed":0} -{"Time":"2021-09-27T06:48:02.415322-04:00","Action":"run","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha3_384"} -{"Time":"2021-09-27T06:48:02.415329-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha3_384","Output":"=== RUN TestSanitySha3_384\n"} -{"Time":"2021-09-27T06:48:02.415335-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha3_384","Output":"--- PASS: TestSanitySha3_384 (0.00s)\n"} -{"Time":"2021-09-27T06:48:02.415344-04:00","Action":"pass","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha3_384","Elapsed":0} -{"Time":"2021-09-27T06:48:02.415349-04:00","Action":"run","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_256"} -{"Time":"2021-09-27T06:48:02.415354-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_256","Output":"=== RUN TestSanitySha2_256\n"} -{"Time":"2021-09-27T06:48:02.415361-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_256","Output":" hash_test.go:41: \n"} -{"Time":"2021-09-27T06:48:02.415364-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_256","Output":" \tError Trace:\thash_test.go:41\n"} -{"Time":"2021-09-27T06:48:02.415368-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_256","Output":" \tError: \tNot equal: \n"} -{"Time":"2021-09-27T06:48:02.415371-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_256","Output":" \t \texpected: hash.Hash{0x9f, 0x86, 0xd0, 0x81, 0x88, 0x4c, 0x7d, 0x65, 0x9a, 0x2f, 0xea, 0xa0, 0xc5, 0x5a, 0xd0, 0x15, 0xa3, 0xbf, 0x4f, 0x1b, 0x2b, 0xb, 0x82, 0x2c, 0xd1, 0x5d, 0x6c, 0x15, 0xb0, 0xf0, 0xa, 0x9}\n"} -{"Time":"2021-09-27T06:48:02.415406-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_256","Output":" \t \tactual : hash.Hash{0x9f, 0x86, 0xd0, 0x81, 0x88, 0x4c, 0x7d, 0x65, 0x9a, 0x2f, 0xea, 0xa0, 0xc5, 0x5a, 0xd0, 0x15, 0xa3, 0xbf, 0x4f, 0x1b, 0x2b, 0xb, 0x82, 0x2c, 0xd1, 0x5d, 0x6c, 0x15, 0xb0, 0xf0, 0xa, 0x8}\n"} -{"Time":"2021-09-27T06:48:02.41542-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_256","Output":" \t \t\n"} -{"Time":"2021-09-27T06:48:02.415436-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_256","Output":" \t \tDiff:\n"} -{"Time":"2021-09-27T06:48:02.415442-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_256","Output":" \t \t--- Expected\n"} -{"Time":"2021-09-27T06:48:02.415454-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_256","Output":" \t \t+++ Actual\n"} -{"Time":"2021-09-27T06:48:02.415467-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_256","Output":" \t \t@@ -1,2 +1,2 @@\n"} -{"Time":"2021-09-27T06:48:02.415476-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_256","Output":" \t \t-(hash.Hash) (len=32) 0x9f86d081884c7d659a2feaa0c55ad015a3bf4f1b2b0b822cd15d6c15b0f00a09\n"} -{"Time":"2021-09-27T06:48:02.415481-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_256","Output":" \t \t+(hash.Hash) (len=32) 0x9f86d081884c7d659a2feaa0c55ad015a3bf4f1b2b0b822cd15d6c15b0f00a08\n"} -{"Time":"2021-09-27T06:48:02.415487-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_256","Output":" \t \t \n"} -{"Time":"2021-09-27T06:48:02.415492-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_256","Output":" \tTest: \tTestSanitySha2_256\n"} -{"Time":"2021-09-27T06:48:02.415498-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_256","Output":"--- FAIL: TestSanitySha2_256 (0.00s)\n"} -{"Time":"2021-09-27T06:48:02.415504-04:00","Action":"fail","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_256","Elapsed":0} -{"Time":"2021-09-27T06:48:02.415507-04:00","Action":"run","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_384"} -{"Time":"2021-09-27T06:48:02.41551-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_384","Output":"=== RUN TestSanitySha2_384\n"} -{"Time":"2021-09-27T06:48:02.415514-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_384","Output":"--- PASS: TestSanitySha2_384 (0.00s)\n"} -{"Time":"2021-09-27T06:48:02.415517-04:00","Action":"pass","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_384","Elapsed":0} -{"Time":"2021-09-27T06:48:02.41552-04:00","Action":"run","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanityKmac128"} -{"Time":"2021-09-27T06:48:02.415548-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanityKmac128","Output":"=== RUN TestSanityKmac128\n"} -{"Time":"2021-09-27T06:48:02.415557-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanityKmac128","Output":"--- PASS: TestSanityKmac128 (0.00s)\n"} -{"Time":"2021-09-27T06:48:02.415563-04:00","Action":"pass","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanityKmac128","Elapsed":0} -{"Time":"2021-09-27T06:48:02.415568-04:00","Action":"run","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestHashersAPI"} -{"Time":"2021-09-27T06:48:02.415573-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestHashersAPI","Output":"=== RUN TestHashersAPI\n"} -{"Time":"2021-09-27T06:48:02.415578-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestHashersAPI","Output":" hash_test.go:114: math rand seed is 1632739682415309000\n"} -{"Time":"2021-09-27T06:48:02.415609-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestHashersAPI","Output":"--- PASS: TestHashersAPI (0.00s)\n"} -{"Time":"2021-09-27T06:48:02.415656-04:00","Action":"pass","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestHashersAPI","Elapsed":0} -{"Time":"2021-09-27T06:48:02.415675-04:00","Action":"run","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3"} -{"Time":"2021-09-27T06:48:02.41568-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3","Output":"=== RUN TestSha3\n"} -{"Time":"2021-09-27T06:48:02.415697-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3","Output":" hash_test.go:158: math rand seed is 1632739682415616000\n"} -{"Time":"2021-09-27T06:48:02.415702-04:00","Action":"run","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3/SHA3_256"} -{"Time":"2021-09-27T06:48:02.415706-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3/SHA3_256","Output":"=== RUN TestSha3/SHA3_256\n"} -{"Time":"2021-09-27T06:48:02.51693-04:00","Action":"run","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3/SHA3_384"} -{"Time":"2021-09-27T06:48:02.516956-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3/SHA3_384","Output":"=== RUN TestSha3/SHA3_384\n"} -{"Time":"2021-09-27T06:48:02.636991-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3","Output":"--- PASS: TestSha3 (0.22s)\n"} -{"Time":"2021-09-27T06:48:02.637017-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3/SHA3_256","Output":" --- PASS: TestSha3/SHA3_256 (0.10s)\n"} -{"Time":"2021-09-27T06:48:02.637022-04:00","Action":"pass","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3/SHA3_256","Elapsed":0.1} -{"Time":"2021-09-27T06:48:02.637027-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3/SHA3_384","Output":" --- PASS: TestSha3/SHA3_384 (0.12s)\n"} -{"Time":"2021-09-27T06:48:02.637031-04:00","Action":"pass","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3/SHA3_384","Elapsed":0.12} -{"Time":"2021-09-27T06:48:02.637034-04:00","Action":"pass","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3","Elapsed":0.22} -{"Time":"2021-09-27T06:48:02.637037-04:00","Action":"run","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha3_256"} -{"Time":"2021-09-27T06:48:02.637059-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha3_256","Output":"=== RUN TestSanitySha3_256\n"} -{"Time":"2021-09-27T06:48:02.637075-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha3_256","Output":"--- PASS: TestSanitySha3_256 (0.00s)\n"} -{"Time":"2021-09-27T06:48:02.637095-04:00","Action":"pass","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha3_256","Elapsed":0} -{"Time":"2021-09-27T06:48:02.6371-04:00","Action":"run","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha3_384"} -{"Time":"2021-09-27T06:48:02.637103-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha3_384","Output":"=== RUN TestSanitySha3_384\n"} -{"Time":"2021-09-27T06:48:02.637117-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha3_384","Output":"--- PASS: TestSanitySha3_384 (0.00s)\n"} -{"Time":"2021-09-27T06:48:02.637177-04:00","Action":"pass","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha3_384","Elapsed":0} -{"Time":"2021-09-27T06:48:02.637187-04:00","Action":"run","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_256"} -{"Time":"2021-09-27T06:48:02.63719-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_256","Output":"=== RUN TestSanitySha2_256\n"} -{"Time":"2021-09-27T06:48:02.637196-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_256","Output":" hash_test.go:41: \n"} -{"Time":"2021-09-27T06:48:02.6372-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_256","Output":" \tError Trace:\thash_test.go:41\n"} -{"Time":"2021-09-27T06:48:02.637203-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_256","Output":" \tError: \tNot equal: \n"} -{"Time":"2021-09-27T06:48:02.637231-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_256","Output":" \t \texpected: hash.Hash{0x9f, 0x86, 0xd0, 0x81, 0x88, 0x4c, 0x7d, 0x65, 0x9a, 0x2f, 0xea, 0xa0, 0xc5, 0x5a, 0xd0, 0x15, 0xa3, 0xbf, 0x4f, 0x1b, 0x2b, 0xb, 0x82, 0x2c, 0xd1, 0x5d, 0x6c, 0x15, 0xb0, 0xf0, 0xa, 0x9}\n"} -{"Time":"2021-09-27T06:48:02.637241-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_256","Output":" \t \tactual : hash.Hash{0x9f, 0x86, 0xd0, 0x81, 0x88, 0x4c, 0x7d, 0x65, 0x9a, 0x2f, 0xea, 0xa0, 0xc5, 0x5a, 0xd0, 0x15, 0xa3, 0xbf, 0x4f, 0x1b, 0x2b, 0xb, 0x82, 0x2c, 0xd1, 0x5d, 0x6c, 0x15, 0xb0, 0xf0, 0xa, 0x8}\n"} -{"Time":"2021-09-27T06:48:02.637248-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_256","Output":" \t \t\n"} -{"Time":"2021-09-27T06:48:02.637265-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_256","Output":" \t \tDiff:\n"} -{"Time":"2021-09-27T06:48:02.63729-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_256","Output":" \t \t--- Expected\n"} -{"Time":"2021-09-27T06:48:02.637294-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_256","Output":" \t \t+++ Actual\n"} -{"Time":"2021-09-27T06:48:02.637298-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_256","Output":" \t \t@@ -1,2 +1,2 @@\n"} -{"Time":"2021-09-27T06:48:02.637301-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_256","Output":" \t \t-(hash.Hash) (len=32) 0x9f86d081884c7d659a2feaa0c55ad015a3bf4f1b2b0b822cd15d6c15b0f00a09\n"} -{"Time":"2021-09-27T06:48:02.637305-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_256","Output":" \t \t+(hash.Hash) (len=32) 0x9f86d081884c7d659a2feaa0c55ad015a3bf4f1b2b0b822cd15d6c15b0f00a08\n"} -{"Time":"2021-09-27T06:48:02.637309-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_256","Output":" \t \t \n"} -{"Time":"2021-09-27T06:48:02.637334-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_256","Output":" \tTest: \tTestSanitySha2_256\n"} -{"Time":"2021-09-27T06:48:02.63734-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_256","Output":"--- FAIL: TestSanitySha2_256 (0.00s)\n"} -{"Time":"2021-09-27T06:48:02.637343-04:00","Action":"fail","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_256","Elapsed":0} -{"Time":"2021-09-27T06:48:02.637346-04:00","Action":"run","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_384"} -{"Time":"2021-09-27T06:48:02.637352-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_384","Output":"=== RUN TestSanitySha2_384\n"} -{"Time":"2021-09-27T06:48:02.637358-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_384","Output":"--- PASS: TestSanitySha2_384 (0.00s)\n"} -{"Time":"2021-09-27T06:48:02.637376-04:00","Action":"pass","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_384","Elapsed":0} -{"Time":"2021-09-27T06:48:02.63738-04:00","Action":"run","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanityKmac128"} -{"Time":"2021-09-27T06:48:02.637383-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanityKmac128","Output":"=== RUN TestSanityKmac128\n"} -{"Time":"2021-09-27T06:48:02.637386-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanityKmac128","Output":"--- PASS: TestSanityKmac128 (0.00s)\n"} -{"Time":"2021-09-27T06:48:02.63739-04:00","Action":"pass","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanityKmac128","Elapsed":0} -{"Time":"2021-09-27T06:48:02.637392-04:00","Action":"run","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestHashersAPI"} -{"Time":"2021-09-27T06:48:02.637404-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestHashersAPI","Output":"=== RUN TestHashersAPI\n"} -{"Time":"2021-09-27T06:48:02.637408-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestHashersAPI","Output":" hash_test.go:114: math rand seed is 1632739682637108000\n"} -{"Time":"2021-09-27T06:48:02.637414-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestHashersAPI","Output":"--- PASS: TestHashersAPI (0.00s)\n"} -{"Time":"2021-09-27T06:48:02.637417-04:00","Action":"pass","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestHashersAPI","Elapsed":0} -{"Time":"2021-09-27T06:48:02.63742-04:00","Action":"run","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3"} -{"Time":"2021-09-27T06:48:02.637423-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3","Output":"=== RUN TestSha3\n"} -{"Time":"2021-09-27T06:48:02.637426-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3","Output":" hash_test.go:158: math rand seed is 1632739682637311000\n"} -{"Time":"2021-09-27T06:48:02.637466-04:00","Action":"run","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3/SHA3_256"} -{"Time":"2021-09-27T06:48:02.637491-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3/SHA3_256","Output":"=== RUN TestSha3/SHA3_256\n"} -{"Time":"2021-09-27T06:48:02.736502-04:00","Action":"run","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3/SHA3_384"} -{"Time":"2021-09-27T06:48:02.736566-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3/SHA3_384","Output":"=== RUN TestSha3/SHA3_384\n"} -{"Time":"2021-09-27T06:48:02.857319-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3","Output":"--- PASS: TestSha3 (0.22s)\n"} -{"Time":"2021-09-27T06:48:02.85738-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3/SHA3_256","Output":" --- PASS: TestSha3/SHA3_256 (0.10s)\n"} -{"Time":"2021-09-27T06:48:02.857386-04:00","Action":"pass","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3/SHA3_256","Elapsed":0.1} -{"Time":"2021-09-27T06:48:02.8574-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3/SHA3_384","Output":" --- PASS: TestSha3/SHA3_384 (0.12s)\n"} -{"Time":"2021-09-27T06:48:02.857404-04:00","Action":"pass","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3/SHA3_384","Elapsed":0.12} -{"Time":"2021-09-27T06:48:02.857408-04:00","Action":"pass","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3","Elapsed":0.22} -{"Time":"2021-09-27T06:48:02.857413-04:00","Action":"run","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha3_256"} -{"Time":"2021-09-27T06:48:02.857417-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha3_256","Output":"=== RUN TestSanitySha3_256\n"} -{"Time":"2021-09-27T06:48:02.857426-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha3_256","Output":"--- PASS: TestSanitySha3_256 (0.00s)\n"} -{"Time":"2021-09-27T06:48:02.85743-04:00","Action":"pass","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha3_256","Elapsed":0} -{"Time":"2021-09-27T06:48:02.857433-04:00","Action":"run","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha3_384"} -{"Time":"2021-09-27T06:48:02.857437-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha3_384","Output":"=== RUN TestSanitySha3_384\n"} -{"Time":"2021-09-27T06:48:02.857441-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha3_384","Output":"--- PASS: TestSanitySha3_384 (0.00s)\n"} -{"Time":"2021-09-27T06:48:02.857541-04:00","Action":"pass","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha3_384","Elapsed":0} -{"Time":"2021-09-27T06:48:02.857552-04:00","Action":"run","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_256"} -{"Time":"2021-09-27T06:48:02.857557-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_256","Output":"=== RUN TestSanitySha2_256\n"} -{"Time":"2021-09-27T06:48:02.857565-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_256","Output":" hash_test.go:41: \n"} -{"Time":"2021-09-27T06:48:02.857568-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_256","Output":" \tError Trace:\thash_test.go:41\n"} -{"Time":"2021-09-27T06:48:02.857572-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_256","Output":" \tError: \tNot equal: \n"} -{"Time":"2021-09-27T06:48:02.857588-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_256","Output":" \t \texpected: hash.Hash{0x9f, 0x86, 0xd0, 0x81, 0x88, 0x4c, 0x7d, 0x65, 0x9a, 0x2f, 0xea, 0xa0, 0xc5, 0x5a, 0xd0, 0x15, 0xa3, 0xbf, 0x4f, 0x1b, 0x2b, 0xb, 0x82, 0x2c, 0xd1, 0x5d, 0x6c, 0x15, 0xb0, 0xf0, 0xa, 0x9}\n"} -{"Time":"2021-09-27T06:48:02.857594-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_256","Output":" \t \tactual : hash.Hash{0x9f, 0x86, 0xd0, 0x81, 0x88, 0x4c, 0x7d, 0x65, 0x9a, 0x2f, 0xea, 0xa0, 0xc5, 0x5a, 0xd0, 0x15, 0xa3, 0xbf, 0x4f, 0x1b, 0x2b, 0xb, 0x82, 0x2c, 0xd1, 0x5d, 0x6c, 0x15, 0xb0, 0xf0, 0xa, 0x8}\n"} -{"Time":"2021-09-27T06:48:02.857613-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_256","Output":" \t \t\n"} -{"Time":"2021-09-27T06:48:02.857617-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_256","Output":" \t \tDiff:\n"} -{"Time":"2021-09-27T06:48:02.85762-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_256","Output":" \t \t--- Expected\n"} -{"Time":"2021-09-27T06:48:02.857624-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_256","Output":" \t \t+++ Actual\n"} -{"Time":"2021-09-27T06:48:02.857639-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_256","Output":" \t \t@@ -1,2 +1,2 @@\n"} -{"Time":"2021-09-27T06:48:02.857644-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_256","Output":" \t \t-(hash.Hash) (len=32) 0x9f86d081884c7d659a2feaa0c55ad015a3bf4f1b2b0b822cd15d6c15b0f00a09\n"} -{"Time":"2021-09-27T06:48:02.857647-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_256","Output":" \t \t+(hash.Hash) (len=32) 0x9f86d081884c7d659a2feaa0c55ad015a3bf4f1b2b0b822cd15d6c15b0f00a08\n"} -{"Time":"2021-09-27T06:48:02.857651-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_256","Output":" \t \t \n"} -{"Time":"2021-09-27T06:48:02.85766-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_256","Output":" \tTest: \tTestSanitySha2_256\n"} -{"Time":"2021-09-27T06:48:02.857665-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_256","Output":"--- FAIL: TestSanitySha2_256 (0.00s)\n"} -{"Time":"2021-09-27T06:48:02.857668-04:00","Action":"fail","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_256","Elapsed":0} -{"Time":"2021-09-27T06:48:02.857671-04:00","Action":"run","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_384"} -{"Time":"2021-09-27T06:48:02.857674-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_384","Output":"=== RUN TestSanitySha2_384\n"} -{"Time":"2021-09-27T06:48:02.857678-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_384","Output":"--- PASS: TestSanitySha2_384 (0.00s)\n"} -{"Time":"2021-09-27T06:48:02.857699-04:00","Action":"pass","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_384","Elapsed":0} -{"Time":"2021-09-27T06:48:02.857744-04:00","Action":"run","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanityKmac128"} -{"Time":"2021-09-27T06:48:02.85775-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanityKmac128","Output":"=== RUN TestSanityKmac128\n"} -{"Time":"2021-09-27T06:48:02.857757-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanityKmac128","Output":"--- PASS: TestSanityKmac128 (0.00s)\n"} -{"Time":"2021-09-27T06:48:02.857782-04:00","Action":"pass","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanityKmac128","Elapsed":0} -{"Time":"2021-09-27T06:48:02.857789-04:00","Action":"run","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestHashersAPI"} -{"Time":"2021-09-27T06:48:02.857793-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestHashersAPI","Output":"=== RUN TestHashersAPI\n"} -{"Time":"2021-09-27T06:48:02.857796-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestHashersAPI","Output":" hash_test.go:114: math rand seed is 1632739682857435000\n"} -{"Time":"2021-09-27T06:48:02.857838-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestHashersAPI","Output":"--- PASS: TestHashersAPI (0.00s)\n"} -{"Time":"2021-09-27T06:48:02.857843-04:00","Action":"pass","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestHashersAPI","Elapsed":0} -{"Time":"2021-09-27T06:48:02.857846-04:00","Action":"run","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3"} -{"Time":"2021-09-27T06:48:02.857849-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3","Output":"=== RUN TestSha3\n"} -{"Time":"2021-09-27T06:48:02.857852-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3","Output":" hash_test.go:158: math rand seed is 1632739682857668000\n"} -{"Time":"2021-09-27T06:48:02.857855-04:00","Action":"run","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3/SHA3_256"} -{"Time":"2021-09-27T06:48:02.857859-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3/SHA3_256","Output":"=== RUN TestSha3/SHA3_256\n"} -{"Time":"2021-09-27T06:48:02.956791-04:00","Action":"run","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3/SHA3_384"} -{"Time":"2021-09-27T06:48:02.956849-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3/SHA3_384","Output":"=== RUN TestSha3/SHA3_384\n"} -{"Time":"2021-09-27T06:48:03.076829-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3","Output":"--- PASS: TestSha3 (0.22s)\n"} -{"Time":"2021-09-27T06:48:03.076855-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3/SHA3_256","Output":" --- PASS: TestSha3/SHA3_256 (0.10s)\n"} -{"Time":"2021-09-27T06:48:03.076859-04:00","Action":"pass","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3/SHA3_256","Elapsed":0.1} -{"Time":"2021-09-27T06:48:03.076866-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3/SHA3_384","Output":" --- PASS: TestSha3/SHA3_384 (0.12s)\n"} -{"Time":"2021-09-27T06:48:03.076871-04:00","Action":"pass","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3/SHA3_384","Elapsed":0.12} -{"Time":"2021-09-27T06:48:03.076876-04:00","Action":"pass","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3","Elapsed":0.22} -{"Time":"2021-09-27T06:48:03.07688-04:00","Action":"run","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha3_256"} -{"Time":"2021-09-27T06:48:03.077032-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha3_256","Output":"=== RUN TestSanitySha3_256\n"} -{"Time":"2021-09-27T06:48:03.077046-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha3_256","Output":"--- PASS: TestSanitySha3_256 (0.00s)\n"} -{"Time":"2021-09-27T06:48:03.077053-04:00","Action":"pass","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha3_256","Elapsed":0} -{"Time":"2021-09-27T06:48:03.077056-04:00","Action":"run","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha3_384"} -{"Time":"2021-09-27T06:48:03.077059-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha3_384","Output":"=== RUN TestSanitySha3_384\n"} -{"Time":"2021-09-27T06:48:03.077063-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha3_384","Output":"--- PASS: TestSanitySha3_384 (0.00s)\n"} -{"Time":"2021-09-27T06:48:03.077123-04:00","Action":"pass","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha3_384","Elapsed":0} -{"Time":"2021-09-27T06:48:03.07713-04:00","Action":"run","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_256"} -{"Time":"2021-09-27T06:48:03.077133-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_256","Output":"=== RUN TestSanitySha2_256\n"} -{"Time":"2021-09-27T06:48:03.077137-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_256","Output":" hash_test.go:41: \n"} -{"Time":"2021-09-27T06:48:03.07714-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_256","Output":" \tError Trace:\thash_test.go:41\n"} -{"Time":"2021-09-27T06:48:03.077143-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_256","Output":" \tError: \tNot equal: \n"} -{"Time":"2021-09-27T06:48:03.077156-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_256","Output":" \t \texpected: hash.Hash{0x9f, 0x86, 0xd0, 0x81, 0x88, 0x4c, 0x7d, 0x65, 0x9a, 0x2f, 0xea, 0xa0, 0xc5, 0x5a, 0xd0, 0x15, 0xa3, 0xbf, 0x4f, 0x1b, 0x2b, 0xb, 0x82, 0x2c, 0xd1, 0x5d, 0x6c, 0x15, 0xb0, 0xf0, 0xa, 0x9}\n"} -{"Time":"2021-09-27T06:48:03.07716-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_256","Output":" \t \tactual : hash.Hash{0x9f, 0x86, 0xd0, 0x81, 0x88, 0x4c, 0x7d, 0x65, 0x9a, 0x2f, 0xea, 0xa0, 0xc5, 0x5a, 0xd0, 0x15, 0xa3, 0xbf, 0x4f, 0x1b, 0x2b, 0xb, 0x82, 0x2c, 0xd1, 0x5d, 0x6c, 0x15, 0xb0, 0xf0, 0xa, 0x8}\n"} -{"Time":"2021-09-27T06:48:03.077172-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_256","Output":" \t \t\n"} -{"Time":"2021-09-27T06:48:03.077195-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_256","Output":" \t \tDiff:\n"} -{"Time":"2021-09-27T06:48:03.077199-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_256","Output":" \t \t--- Expected\n"} -{"Time":"2021-09-27T06:48:03.077203-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_256","Output":" \t \t+++ Actual\n"} -{"Time":"2021-09-27T06:48:03.077207-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_256","Output":" \t \t@@ -1,2 +1,2 @@\n"} -{"Time":"2021-09-27T06:48:03.077212-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_256","Output":" \t \t-(hash.Hash) (len=32) 0x9f86d081884c7d659a2feaa0c55ad015a3bf4f1b2b0b822cd15d6c15b0f00a09\n"} -{"Time":"2021-09-27T06:48:03.077224-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_256","Output":" \t \t+(hash.Hash) (len=32) 0x9f86d081884c7d659a2feaa0c55ad015a3bf4f1b2b0b822cd15d6c15b0f00a08\n"} -{"Time":"2021-09-27T06:48:03.077229-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_256","Output":" \t \t \n"} -{"Time":"2021-09-27T06:48:03.077232-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_256","Output":" \tTest: \tTestSanitySha2_256\n"} -{"Time":"2021-09-27T06:48:03.077236-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_256","Output":"--- FAIL: TestSanitySha2_256 (0.00s)\n"} -{"Time":"2021-09-27T06:48:03.077239-04:00","Action":"fail","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_256","Elapsed":0} -{"Time":"2021-09-27T06:48:03.077273-04:00","Action":"run","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_384"} -{"Time":"2021-09-27T06:48:03.077286-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_384","Output":"=== RUN TestSanitySha2_384\n"} -{"Time":"2021-09-27T06:48:03.077294-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_384","Output":"--- PASS: TestSanitySha2_384 (0.00s)\n"} -{"Time":"2021-09-27T06:48:03.077299-04:00","Action":"pass","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_384","Elapsed":0} -{"Time":"2021-09-27T06:48:03.077304-04:00","Action":"run","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanityKmac128"} -{"Time":"2021-09-27T06:48:03.077308-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanityKmac128","Output":"=== RUN TestSanityKmac128\n"} -{"Time":"2021-09-27T06:48:03.077313-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanityKmac128","Output":"--- PASS: TestSanityKmac128 (0.00s)\n"} -{"Time":"2021-09-27T06:48:03.077326-04:00","Action":"pass","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanityKmac128","Elapsed":0} -{"Time":"2021-09-27T06:48:03.077338-04:00","Action":"run","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestHashersAPI"} -{"Time":"2021-09-27T06:48:03.077344-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestHashersAPI","Output":"=== RUN TestHashersAPI\n"} -{"Time":"2021-09-27T06:48:03.077349-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestHashersAPI","Output":" hash_test.go:114: math rand seed is 1632739683077064000\n"} -{"Time":"2021-09-27T06:48:03.077357-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestHashersAPI","Output":"--- PASS: TestHashersAPI (0.00s)\n"} -{"Time":"2021-09-27T06:48:03.07736-04:00","Action":"pass","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestHashersAPI","Elapsed":0} -{"Time":"2021-09-27T06:48:03.077363-04:00","Action":"run","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3"} -{"Time":"2021-09-27T06:48:03.077366-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3","Output":"=== RUN TestSha3\n"} -{"Time":"2021-09-27T06:48:03.07737-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3","Output":" hash_test.go:158: math rand seed is 1632739683077268000\n"} -{"Time":"2021-09-27T06:48:03.077808-04:00","Action":"run","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3/SHA3_256"} -{"Time":"2021-09-27T06:48:03.077826-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3/SHA3_256","Output":"=== RUN TestSha3/SHA3_256\n"} -{"Time":"2021-09-27T06:48:03.176088-04:00","Action":"run","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3/SHA3_384"} -{"Time":"2021-09-27T06:48:03.176175-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3/SHA3_384","Output":"=== RUN TestSha3/SHA3_384\n"} -{"Time":"2021-09-27T06:48:03.297398-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3","Output":"--- PASS: TestSha3 (0.22s)\n"} -{"Time":"2021-09-27T06:48:03.297426-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3/SHA3_256","Output":" --- PASS: TestSha3/SHA3_256 (0.10s)\n"} -{"Time":"2021-09-27T06:48:03.297431-04:00","Action":"pass","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3/SHA3_256","Elapsed":0.1} -{"Time":"2021-09-27T06:48:03.297439-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3/SHA3_384","Output":" --- PASS: TestSha3/SHA3_384 (0.12s)\n"} -{"Time":"2021-09-27T06:48:03.297442-04:00","Action":"pass","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3/SHA3_384","Elapsed":0.12} -{"Time":"2021-09-27T06:48:03.297445-04:00","Action":"pass","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3","Elapsed":0.22} -{"Time":"2021-09-27T06:48:03.297448-04:00","Action":"run","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha3_256"} -{"Time":"2021-09-27T06:48:03.297467-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha3_256","Output":"=== RUN TestSanitySha3_256\n"} -{"Time":"2021-09-27T06:48:03.297477-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha3_256","Output":"--- PASS: TestSanitySha3_256 (0.00s)\n"} -{"Time":"2021-09-27T06:48:03.297482-04:00","Action":"pass","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha3_256","Elapsed":0} -{"Time":"2021-09-27T06:48:03.297485-04:00","Action":"run","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha3_384"} -{"Time":"2021-09-27T06:48:03.297488-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha3_384","Output":"=== RUN TestSanitySha3_384\n"} -{"Time":"2021-09-27T06:48:03.297491-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha3_384","Output":"--- PASS: TestSanitySha3_384 (0.00s)\n"} -{"Time":"2021-09-27T06:48:03.297591-04:00","Action":"pass","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha3_384","Elapsed":0} -{"Time":"2021-09-27T06:48:03.297618-04:00","Action":"run","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_256"} -{"Time":"2021-09-27T06:48:03.297624-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_256","Output":"=== RUN TestSanitySha2_256\n"} -{"Time":"2021-09-27T06:48:03.297632-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_256","Output":" hash_test.go:41: \n"} -{"Time":"2021-09-27T06:48:03.297635-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_256","Output":" \tError Trace:\thash_test.go:41\n"} -{"Time":"2021-09-27T06:48:03.297639-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_256","Output":" \tError: \tNot equal: \n"} -{"Time":"2021-09-27T06:48:03.297642-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_256","Output":" \t \texpected: hash.Hash{0x9f, 0x86, 0xd0, 0x81, 0x88, 0x4c, 0x7d, 0x65, 0x9a, 0x2f, 0xea, 0xa0, 0xc5, 0x5a, 0xd0, 0x15, 0xa3, 0xbf, 0x4f, 0x1b, 0x2b, 0xb, 0x82, 0x2c, 0xd1, 0x5d, 0x6c, 0x15, 0xb0, 0xf0, 0xa, 0x9}\n"} -{"Time":"2021-09-27T06:48:03.297646-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_256","Output":" \t \tactual : hash.Hash{0x9f, 0x86, 0xd0, 0x81, 0x88, 0x4c, 0x7d, 0x65, 0x9a, 0x2f, 0xea, 0xa0, 0xc5, 0x5a, 0xd0, 0x15, 0xa3, 0xbf, 0x4f, 0x1b, 0x2b, 0xb, 0x82, 0x2c, 0xd1, 0x5d, 0x6c, 0x15, 0xb0, 0xf0, 0xa, 0x8}\n"} -{"Time":"2021-09-27T06:48:03.297678-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_256","Output":" \t \t\n"} -{"Time":"2021-09-27T06:48:03.297687-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_256","Output":" \t \tDiff:\n"} -{"Time":"2021-09-27T06:48:03.297704-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_256","Output":" \t \t--- Expected\n"} -{"Time":"2021-09-27T06:48:03.297709-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_256","Output":" \t \t+++ Actual\n"} -{"Time":"2021-09-27T06:48:03.297714-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_256","Output":" \t \t@@ -1,2 +1,2 @@\n"} -{"Time":"2021-09-27T06:48:03.297738-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_256","Output":" \t \t-(hash.Hash) (len=32) 0x9f86d081884c7d659a2feaa0c55ad015a3bf4f1b2b0b822cd15d6c15b0f00a09\n"} -{"Time":"2021-09-27T06:48:03.297751-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_256","Output":" \t \t+(hash.Hash) (len=32) 0x9f86d081884c7d659a2feaa0c55ad015a3bf4f1b2b0b822cd15d6c15b0f00a08\n"} -{"Time":"2021-09-27T06:48:03.297755-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_256","Output":" \t \t \n"} -{"Time":"2021-09-27T06:48:03.297759-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_256","Output":" \tTest: \tTestSanitySha2_256\n"} -{"Time":"2021-09-27T06:48:03.297763-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_256","Output":"--- FAIL: TestSanitySha2_256 (0.00s)\n"} -{"Time":"2021-09-27T06:48:03.297767-04:00","Action":"fail","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_256","Elapsed":0} -{"Time":"2021-09-27T06:48:03.29777-04:00","Action":"run","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_384"} -{"Time":"2021-09-27T06:48:03.297773-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_384","Output":"=== RUN TestSanitySha2_384\n"} -{"Time":"2021-09-27T06:48:03.297776-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_384","Output":"--- PASS: TestSanitySha2_384 (0.00s)\n"} -{"Time":"2021-09-27T06:48:03.297795-04:00","Action":"pass","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_384","Elapsed":0} -{"Time":"2021-09-27T06:48:03.2978-04:00","Action":"run","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanityKmac128"} -{"Time":"2021-09-27T06:48:03.297802-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanityKmac128","Output":"=== RUN TestSanityKmac128\n"} -{"Time":"2021-09-27T06:48:03.297806-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanityKmac128","Output":"--- PASS: TestSanityKmac128 (0.00s)\n"} -{"Time":"2021-09-27T06:48:03.29781-04:00","Action":"pass","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanityKmac128","Elapsed":0} -{"Time":"2021-09-27T06:48:03.297813-04:00","Action":"run","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestHashersAPI"} -{"Time":"2021-09-27T06:48:03.297816-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestHashersAPI","Output":"=== RUN TestHashersAPI\n"} -{"Time":"2021-09-27T06:48:03.297819-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestHashersAPI","Output":" hash_test.go:114: math rand seed is 1632739683297507000\n"} -{"Time":"2021-09-27T06:48:03.297825-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestHashersAPI","Output":"--- PASS: TestHashersAPI (0.00s)\n"} -{"Time":"2021-09-27T06:48:03.297833-04:00","Action":"pass","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestHashersAPI","Elapsed":0} -{"Time":"2021-09-27T06:48:03.297837-04:00","Action":"run","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3"} -{"Time":"2021-09-27T06:48:03.29784-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3","Output":"=== RUN TestSha3\n"} -{"Time":"2021-09-27T06:48:03.297843-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3","Output":" hash_test.go:158: math rand seed is 1632739683297711000\n"} -{"Time":"2021-09-27T06:48:03.297847-04:00","Action":"run","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3/SHA3_256"} -{"Time":"2021-09-27T06:48:03.297855-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3/SHA3_256","Output":"=== RUN TestSha3/SHA3_256\n"} -{"Time":"2021-09-27T06:48:03.39759-04:00","Action":"run","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3/SHA3_384"} -{"Time":"2021-09-27T06:48:03.397615-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3/SHA3_384","Output":"=== RUN TestSha3/SHA3_384\n"} -{"Time":"2021-09-27T06:48:03.518378-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3","Output":"--- PASS: TestSha3 (0.22s)\n"} -{"Time":"2021-09-27T06:48:03.518404-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3/SHA3_256","Output":" --- PASS: TestSha3/SHA3_256 (0.10s)\n"} -{"Time":"2021-09-27T06:48:03.518408-04:00","Action":"pass","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3/SHA3_256","Elapsed":0.1} -{"Time":"2021-09-27T06:48:03.518413-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3/SHA3_384","Output":" --- PASS: TestSha3/SHA3_384 (0.12s)\n"} -{"Time":"2021-09-27T06:48:03.518417-04:00","Action":"pass","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3/SHA3_384","Elapsed":0.12} -{"Time":"2021-09-27T06:48:03.51842-04:00","Action":"pass","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3","Elapsed":0.22} -{"Time":"2021-09-27T06:48:03.518423-04:00","Action":"run","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha3_256"} -{"Time":"2021-09-27T06:48:03.518426-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha3_256","Output":"=== RUN TestSanitySha3_256\n"} -{"Time":"2021-09-27T06:48:03.518429-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha3_256","Output":"--- PASS: TestSanitySha3_256 (0.00s)\n"} -{"Time":"2021-09-27T06:48:03.518432-04:00","Action":"pass","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha3_256","Elapsed":0} -{"Time":"2021-09-27T06:48:03.518453-04:00","Action":"run","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha3_384"} -{"Time":"2021-09-27T06:48:03.518456-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha3_384","Output":"=== RUN TestSanitySha3_384\n"} -{"Time":"2021-09-27T06:48:03.51846-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha3_384","Output":"--- PASS: TestSanitySha3_384 (0.00s)\n"} -{"Time":"2021-09-27T06:48:03.518629-04:00","Action":"pass","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha3_384","Elapsed":0} -{"Time":"2021-09-27T06:48:03.518642-04:00","Action":"run","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_256"} -{"Time":"2021-09-27T06:48:03.518648-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_256","Output":"=== RUN TestSanitySha2_256\n"} -{"Time":"2021-09-27T06:48:03.518658-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_256","Output":" hash_test.go:41: \n"} -{"Time":"2021-09-27T06:48:03.518683-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_256","Output":" \tError Trace:\thash_test.go:41\n"} -{"Time":"2021-09-27T06:48:03.518689-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_256","Output":" \tError: \tNot equal: \n"} -{"Time":"2021-09-27T06:48:03.518693-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_256","Output":" \t \texpected: hash.Hash{0x9f, 0x86, 0xd0, 0x81, 0x88, 0x4c, 0x7d, 0x65, 0x9a, 0x2f, 0xea, 0xa0, 0xc5, 0x5a, 0xd0, 0x15, 0xa3, 0xbf, 0x4f, 0x1b, 0x2b, 0xb, 0x82, 0x2c, 0xd1, 0x5d, 0x6c, 0x15, 0xb0, 0xf0, 0xa, 0x9}\n"} -{"Time":"2021-09-27T06:48:03.518697-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_256","Output":" \t \tactual : hash.Hash{0x9f, 0x86, 0xd0, 0x81, 0x88, 0x4c, 0x7d, 0x65, 0x9a, 0x2f, 0xea, 0xa0, 0xc5, 0x5a, 0xd0, 0x15, 0xa3, 0xbf, 0x4f, 0x1b, 0x2b, 0xb, 0x82, 0x2c, 0xd1, 0x5d, 0x6c, 0x15, 0xb0, 0xf0, 0xa, 0x8}\n"} -{"Time":"2021-09-27T06:48:03.518702-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_256","Output":" \t \t\n"} -{"Time":"2021-09-27T06:48:03.518705-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_256","Output":" \t \tDiff:\n"} -{"Time":"2021-09-27T06:48:03.518708-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_256","Output":" \t \t--- Expected\n"} -{"Time":"2021-09-27T06:48:03.518711-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_256","Output":" \t \t+++ Actual\n"} -{"Time":"2021-09-27T06:48:03.518747-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_256","Output":" \t \t@@ -1,2 +1,2 @@\n"} -{"Time":"2021-09-27T06:48:03.51876-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_256","Output":" \t \t-(hash.Hash) (len=32) 0x9f86d081884c7d659a2feaa0c55ad015a3bf4f1b2b0b822cd15d6c15b0f00a09\n"} -{"Time":"2021-09-27T06:48:03.518772-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_256","Output":" \t \t+(hash.Hash) (len=32) 0x9f86d081884c7d659a2feaa0c55ad015a3bf4f1b2b0b822cd15d6c15b0f00a08\n"} -{"Time":"2021-09-27T06:48:03.518778-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_256","Output":" \t \t \n"} -{"Time":"2021-09-27T06:48:03.51879-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_256","Output":" \tTest: \tTestSanitySha2_256\n"} -{"Time":"2021-09-27T06:48:03.518811-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_256","Output":"--- FAIL: TestSanitySha2_256 (0.00s)\n"} -{"Time":"2021-09-27T06:48:03.518816-04:00","Action":"fail","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_256","Elapsed":0} -{"Time":"2021-09-27T06:48:03.51882-04:00","Action":"run","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_384"} -{"Time":"2021-09-27T06:48:03.518822-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_384","Output":"=== RUN TestSanitySha2_384\n"} -{"Time":"2021-09-27T06:48:03.518826-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_384","Output":"--- PASS: TestSanitySha2_384 (0.00s)\n"} -{"Time":"2021-09-27T06:48:03.518835-04:00","Action":"pass","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_384","Elapsed":0} -{"Time":"2021-09-27T06:48:03.518838-04:00","Action":"run","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanityKmac128"} -{"Time":"2021-09-27T06:48:03.51884-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanityKmac128","Output":"=== RUN TestSanityKmac128\n"} -{"Time":"2021-09-27T06:48:03.518844-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanityKmac128","Output":"--- PASS: TestSanityKmac128 (0.00s)\n"} -{"Time":"2021-09-27T06:48:03.518847-04:00","Action":"pass","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanityKmac128","Elapsed":0} -{"Time":"2021-09-27T06:48:03.518849-04:00","Action":"run","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestHashersAPI"} -{"Time":"2021-09-27T06:48:03.518852-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestHashersAPI","Output":"=== RUN TestHashersAPI\n"} -{"Time":"2021-09-27T06:48:03.518881-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestHashersAPI","Output":" hash_test.go:114: math rand seed is 1632739683518492000\n"} -{"Time":"2021-09-27T06:48:03.518905-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestHashersAPI","Output":"--- PASS: TestHashersAPI (0.00s)\n"} -{"Time":"2021-09-27T06:48:03.518912-04:00","Action":"pass","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestHashersAPI","Elapsed":0} -{"Time":"2021-09-27T06:48:03.518917-04:00","Action":"run","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3"} -{"Time":"2021-09-27T06:48:03.518922-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3","Output":"=== RUN TestSha3\n"} -{"Time":"2021-09-27T06:48:03.518927-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3","Output":" hash_test.go:158: math rand seed is 1632739683518781000\n"} -{"Time":"2021-09-27T06:48:03.518935-04:00","Action":"run","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3/SHA3_256"} -{"Time":"2021-09-27T06:48:03.51894-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3/SHA3_256","Output":"=== RUN TestSha3/SHA3_256\n"} -{"Time":"2021-09-27T06:48:03.618338-04:00","Action":"run","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3/SHA3_384"} -{"Time":"2021-09-27T06:48:03.618361-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3/SHA3_384","Output":"=== RUN TestSha3/SHA3_384\n"} -{"Time":"2021-09-27T06:48:03.740052-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3","Output":"--- PASS: TestSha3 (0.22s)\n"} -{"Time":"2021-09-27T06:48:03.740148-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3/SHA3_256","Output":" --- PASS: TestSha3/SHA3_256 (0.10s)\n"} -{"Time":"2021-09-27T06:48:03.740153-04:00","Action":"pass","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3/SHA3_256","Elapsed":0.1} -{"Time":"2021-09-27T06:48:03.740158-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3/SHA3_384","Output":" --- PASS: TestSha3/SHA3_384 (0.12s)\n"} -{"Time":"2021-09-27T06:48:03.740161-04:00","Action":"pass","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3/SHA3_384","Elapsed":0.12} -{"Time":"2021-09-27T06:48:03.740164-04:00","Action":"pass","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3","Elapsed":0.22} -{"Time":"2021-09-27T06:48:03.740167-04:00","Action":"run","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha3_256"} -{"Time":"2021-09-27T06:48:03.740292-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha3_256","Output":"=== RUN TestSanitySha3_256\n"} -{"Time":"2021-09-27T06:48:03.740318-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha3_256","Output":"--- PASS: TestSanitySha3_256 (0.00s)\n"} -{"Time":"2021-09-27T06:48:03.740325-04:00","Action":"pass","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha3_256","Elapsed":0} -{"Time":"2021-09-27T06:48:03.74033-04:00","Action":"run","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha3_384"} -{"Time":"2021-09-27T06:48:03.740335-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha3_384","Output":"=== RUN TestSanitySha3_384\n"} -{"Time":"2021-09-27T06:48:03.740341-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha3_384","Output":"--- PASS: TestSanitySha3_384 (0.00s)\n"} -{"Time":"2021-09-27T06:48:03.740417-04:00","Action":"pass","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha3_384","Elapsed":0} -{"Time":"2021-09-27T06:48:03.740429-04:00","Action":"run","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_256"} -{"Time":"2021-09-27T06:48:03.740434-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_256","Output":"=== RUN TestSanitySha2_256\n"} -{"Time":"2021-09-27T06:48:03.74045-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_256","Output":" hash_test.go:41: \n"} -{"Time":"2021-09-27T06:48:03.740455-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_256","Output":" \tError Trace:\thash_test.go:41\n"} -{"Time":"2021-09-27T06:48:03.740461-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_256","Output":" \tError: \tNot equal: \n"} -{"Time":"2021-09-27T06:48:03.740465-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_256","Output":" \t \texpected: hash.Hash{0x9f, 0x86, 0xd0, 0x81, 0x88, 0x4c, 0x7d, 0x65, 0x9a, 0x2f, 0xea, 0xa0, 0xc5, 0x5a, 0xd0, 0x15, 0xa3, 0xbf, 0x4f, 0x1b, 0x2b, 0xb, 0x82, 0x2c, 0xd1, 0x5d, 0x6c, 0x15, 0xb0, 0xf0, 0xa, 0x9}\n"} -{"Time":"2021-09-27T06:48:03.740471-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_256","Output":" \t \tactual : hash.Hash{0x9f, 0x86, 0xd0, 0x81, 0x88, 0x4c, 0x7d, 0x65, 0x9a, 0x2f, 0xea, 0xa0, 0xc5, 0x5a, 0xd0, 0x15, 0xa3, 0xbf, 0x4f, 0x1b, 0x2b, 0xb, 0x82, 0x2c, 0xd1, 0x5d, 0x6c, 0x15, 0xb0, 0xf0, 0xa, 0x8}\n"} -{"Time":"2021-09-27T06:48:03.740505-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_256","Output":" \t \t\n"} -{"Time":"2021-09-27T06:48:03.740515-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_256","Output":" \t \tDiff:\n"} -{"Time":"2021-09-27T06:48:03.74052-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_256","Output":" \t \t--- Expected\n"} -{"Time":"2021-09-27T06:48:03.740524-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_256","Output":" \t \t+++ Actual\n"} -{"Time":"2021-09-27T06:48:03.740536-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_256","Output":" \t \t@@ -1,2 +1,2 @@\n"} -{"Time":"2021-09-27T06:48:03.74054-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_256","Output":" \t \t-(hash.Hash) (len=32) 0x9f86d081884c7d659a2feaa0c55ad015a3bf4f1b2b0b822cd15d6c15b0f00a09\n"} -{"Time":"2021-09-27T06:48:03.740544-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_256","Output":" \t \t+(hash.Hash) (len=32) 0x9f86d081884c7d659a2feaa0c55ad015a3bf4f1b2b0b822cd15d6c15b0f00a08\n"} -{"Time":"2021-09-27T06:48:03.740547-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_256","Output":" \t \t \n"} -{"Time":"2021-09-27T06:48:03.74055-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_256","Output":" \tTest: \tTestSanitySha2_256\n"} -{"Time":"2021-09-27T06:48:03.740592-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_256","Output":"--- FAIL: TestSanitySha2_256 (0.00s)\n"} -{"Time":"2021-09-27T06:48:03.740606-04:00","Action":"fail","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_256","Elapsed":0} -{"Time":"2021-09-27T06:48:03.740611-04:00","Action":"run","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_384"} -{"Time":"2021-09-27T06:48:03.74062-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_384","Output":"=== RUN TestSanitySha2_384\n"} -{"Time":"2021-09-27T06:48:03.740682-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_384","Output":"--- PASS: TestSanitySha2_384 (0.00s)\n"} -{"Time":"2021-09-27T06:48:03.740696-04:00","Action":"pass","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_384","Elapsed":0} -{"Time":"2021-09-27T06:48:03.7407-04:00","Action":"run","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanityKmac128"} -{"Time":"2021-09-27T06:48:03.740704-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanityKmac128","Output":"=== RUN TestSanityKmac128\n"} -{"Time":"2021-09-27T06:48:03.740728-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanityKmac128","Output":"--- PASS: TestSanityKmac128 (0.00s)\n"} -{"Time":"2021-09-27T06:48:03.74074-04:00","Action":"pass","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanityKmac128","Elapsed":0} -{"Time":"2021-09-27T06:48:03.740752-04:00","Action":"run","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestHashersAPI"} -{"Time":"2021-09-27T06:48:03.740757-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestHashersAPI","Output":"=== RUN TestHashersAPI\n"} -{"Time":"2021-09-27T06:48:03.740764-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestHashersAPI","Output":" hash_test.go:114: math rand seed is 1632739683740724000\n"} -{"Time":"2021-09-27T06:48:03.740982-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestHashersAPI","Output":"--- PASS: TestHashersAPI (0.00s)\n"} -{"Time":"2021-09-27T06:48:03.740995-04:00","Action":"pass","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestHashersAPI","Elapsed":0} -{"Time":"2021-09-27T06:48:03.74102-04:00","Action":"run","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3"} -{"Time":"2021-09-27T06:48:03.741028-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3","Output":"=== RUN TestSha3\n"} -{"Time":"2021-09-27T06:48:03.741037-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3","Output":" hash_test.go:158: math rand seed is 1632739683740970000\n"} -{"Time":"2021-09-27T06:48:03.741063-04:00","Action":"run","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3/SHA3_256"} -{"Time":"2021-09-27T06:48:03.741078-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3/SHA3_256","Output":"=== RUN TestSha3/SHA3_256\n"} -{"Time":"2021-09-27T06:48:03.858998-04:00","Action":"run","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3/SHA3_384"} -{"Time":"2021-09-27T06:48:03.859054-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3/SHA3_384","Output":"=== RUN TestSha3/SHA3_384\n"} -{"Time":"2021-09-27T06:48:03.979881-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3","Output":"--- PASS: TestSha3 (0.24s)\n"} -{"Time":"2021-09-27T06:48:03.97994-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3/SHA3_256","Output":" --- PASS: TestSha3/SHA3_256 (0.12s)\n"} -{"Time":"2021-09-27T06:48:03.979945-04:00","Action":"pass","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3/SHA3_256","Elapsed":0.12} -{"Time":"2021-09-27T06:48:03.97995-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3/SHA3_384","Output":" --- PASS: TestSha3/SHA3_384 (0.12s)\n"} -{"Time":"2021-09-27T06:48:03.979953-04:00","Action":"pass","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3/SHA3_384","Elapsed":0.12} -{"Time":"2021-09-27T06:48:03.979956-04:00","Action":"pass","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3","Elapsed":0.24} -{"Time":"2021-09-27T06:48:03.979959-04:00","Action":"run","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha3_256"} -{"Time":"2021-09-27T06:48:03.979975-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha3_256","Output":"=== RUN TestSanitySha3_256\n"} -{"Time":"2021-09-27T06:48:03.979988-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha3_256","Output":"--- PASS: TestSanitySha3_256 (0.00s)\n"} -{"Time":"2021-09-27T06:48:03.979992-04:00","Action":"pass","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha3_256","Elapsed":0} -{"Time":"2021-09-27T06:48:03.979995-04:00","Action":"run","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha3_384"} -{"Time":"2021-09-27T06:48:03.979998-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha3_384","Output":"=== RUN TestSanitySha3_384\n"} -{"Time":"2021-09-27T06:48:03.980002-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha3_384","Output":"--- PASS: TestSanitySha3_384 (0.00s)\n"} -{"Time":"2021-09-27T06:48:03.980104-04:00","Action":"pass","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha3_384","Elapsed":0} -{"Time":"2021-09-27T06:48:03.980129-04:00","Action":"run","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_256"} -{"Time":"2021-09-27T06:48:03.980135-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_256","Output":"=== RUN TestSanitySha2_256\n"} -{"Time":"2021-09-27T06:48:03.980142-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_256","Output":" hash_test.go:41: \n"} -{"Time":"2021-09-27T06:48:03.980152-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_256","Output":" \tError Trace:\thash_test.go:41\n"} -{"Time":"2021-09-27T06:48:03.980156-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_256","Output":" \tError: \tNot equal: \n"} -{"Time":"2021-09-27T06:48:03.980159-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_256","Output":" \t \texpected: hash.Hash{0x9f, 0x86, 0xd0, 0x81, 0x88, 0x4c, 0x7d, 0x65, 0x9a, 0x2f, 0xea, 0xa0, 0xc5, 0x5a, 0xd0, 0x15, 0xa3, 0xbf, 0x4f, 0x1b, 0x2b, 0xb, 0x82, 0x2c, 0xd1, 0x5d, 0x6c, 0x15, 0xb0, 0xf0, 0xa, 0x9}\n"} -{"Time":"2021-09-27T06:48:03.980163-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_256","Output":" \t \tactual : hash.Hash{0x9f, 0x86, 0xd0, 0x81, 0x88, 0x4c, 0x7d, 0x65, 0x9a, 0x2f, 0xea, 0xa0, 0xc5, 0x5a, 0xd0, 0x15, 0xa3, 0xbf, 0x4f, 0x1b, 0x2b, 0xb, 0x82, 0x2c, 0xd1, 0x5d, 0x6c, 0x15, 0xb0, 0xf0, 0xa, 0x8}\n"} -{"Time":"2021-09-27T06:48:03.980167-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_256","Output":" \t \t\n"} -{"Time":"2021-09-27T06:48:03.980192-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_256","Output":" \t \tDiff:\n"} -{"Time":"2021-09-27T06:48:03.980205-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_256","Output":" \t \t--- Expected\n"} -{"Time":"2021-09-27T06:48:03.980211-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_256","Output":" \t \t+++ Actual\n"} -{"Time":"2021-09-27T06:48:03.980216-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_256","Output":" \t \t@@ -1,2 +1,2 @@\n"} -{"Time":"2021-09-27T06:48:03.980221-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_256","Output":" \t \t-(hash.Hash) (len=32) 0x9f86d081884c7d659a2feaa0c55ad015a3bf4f1b2b0b822cd15d6c15b0f00a09\n"} -{"Time":"2021-09-27T06:48:03.980244-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_256","Output":" \t \t+(hash.Hash) (len=32) 0x9f86d081884c7d659a2feaa0c55ad015a3bf4f1b2b0b822cd15d6c15b0f00a08\n"} -{"Time":"2021-09-27T06:48:03.980257-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_256","Output":" \t \t \n"} -{"Time":"2021-09-27T06:48:03.980261-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_256","Output":" \tTest: \tTestSanitySha2_256\n"} -{"Time":"2021-09-27T06:48:03.980266-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_256","Output":"--- FAIL: TestSanitySha2_256 (0.00s)\n"} -{"Time":"2021-09-27T06:48:03.980271-04:00","Action":"fail","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_256","Elapsed":0} -{"Time":"2021-09-27T06:48:03.980274-04:00","Action":"run","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_384"} -{"Time":"2021-09-27T06:48:03.980277-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_384","Output":"=== RUN TestSanitySha2_384\n"} -{"Time":"2021-09-27T06:48:03.98028-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_384","Output":"--- PASS: TestSanitySha2_384 (0.00s)\n"} -{"Time":"2021-09-27T06:48:03.980284-04:00","Action":"pass","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_384","Elapsed":0} -{"Time":"2021-09-27T06:48:03.980286-04:00","Action":"run","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanityKmac128"} -{"Time":"2021-09-27T06:48:03.980307-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanityKmac128","Output":"=== RUN TestSanityKmac128\n"} -{"Time":"2021-09-27T06:48:03.980317-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanityKmac128","Output":"--- PASS: TestSanityKmac128 (0.00s)\n"} -{"Time":"2021-09-27T06:48:03.980323-04:00","Action":"pass","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanityKmac128","Elapsed":0} -{"Time":"2021-09-27T06:48:03.98033-04:00","Action":"run","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestHashersAPI"} -{"Time":"2021-09-27T06:48:03.980335-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestHashersAPI","Output":"=== RUN TestHashersAPI\n"} -{"Time":"2021-09-27T06:48:03.98034-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestHashersAPI","Output":" hash_test.go:114: math rand seed is 1632739683980033000\n"} -{"Time":"2021-09-27T06:48:03.980388-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestHashersAPI","Output":"--- PASS: TestHashersAPI (0.00s)\n"} -{"Time":"2021-09-27T06:48:03.980399-04:00","Action":"pass","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestHashersAPI","Elapsed":0} -{"Time":"2021-09-27T06:48:03.980403-04:00","Action":"run","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3"} -{"Time":"2021-09-27T06:48:03.980408-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3","Output":"=== RUN TestSha3\n"} -{"Time":"2021-09-27T06:48:03.980411-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3","Output":" hash_test.go:158: math rand seed is 1632739683980266000\n"} -{"Time":"2021-09-27T06:48:03.980416-04:00","Action":"run","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3/SHA3_256"} -{"Time":"2021-09-27T06:48:03.98042-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3/SHA3_256","Output":"=== RUN TestSha3/SHA3_256\n"} -{"Time":"2021-09-27T06:48:04.080285-04:00","Action":"run","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3/SHA3_384"} -{"Time":"2021-09-27T06:48:04.080313-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3/SHA3_384","Output":"=== RUN TestSha3/SHA3_384\n"} -{"Time":"2021-09-27T06:48:04.200352-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3","Output":"--- PASS: TestSha3 (0.22s)\n"} -{"Time":"2021-09-27T06:48:04.20038-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3/SHA3_256","Output":" --- PASS: TestSha3/SHA3_256 (0.10s)\n"} -{"Time":"2021-09-27T06:48:04.200385-04:00","Action":"pass","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3/SHA3_256","Elapsed":0.1} -{"Time":"2021-09-27T06:48:04.20039-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3/SHA3_384","Output":" --- PASS: TestSha3/SHA3_384 (0.12s)\n"} -{"Time":"2021-09-27T06:48:04.200393-04:00","Action":"pass","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3/SHA3_384","Elapsed":0.12} -{"Time":"2021-09-27T06:48:04.200397-04:00","Action":"pass","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3","Elapsed":0.22} -{"Time":"2021-09-27T06:48:04.200401-04:00","Action":"run","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha3_256"} -{"Time":"2021-09-27T06:48:04.200405-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha3_256","Output":"=== RUN TestSanitySha3_256\n"} -{"Time":"2021-09-27T06:48:04.200422-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha3_256","Output":"--- PASS: TestSanitySha3_256 (0.00s)\n"} -{"Time":"2021-09-27T06:48:04.200426-04:00","Action":"pass","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha3_256","Elapsed":0} -{"Time":"2021-09-27T06:48:04.200429-04:00","Action":"run","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha3_384"} -{"Time":"2021-09-27T06:48:04.200433-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha3_384","Output":"=== RUN TestSanitySha3_384\n"} -{"Time":"2021-09-27T06:48:04.200437-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha3_384","Output":"--- PASS: TestSanitySha3_384 (0.00s)\n"} -{"Time":"2021-09-27T06:48:04.200554-04:00","Action":"pass","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha3_384","Elapsed":0} -{"Time":"2021-09-27T06:48:04.200568-04:00","Action":"run","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_256"} -{"Time":"2021-09-27T06:48:04.200572-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_256","Output":"=== RUN TestSanitySha2_256\n"} -{"Time":"2021-09-27T06:48:04.200579-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_256","Output":" hash_test.go:41: \n"} -{"Time":"2021-09-27T06:48:04.200583-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_256","Output":" \tError Trace:\thash_test.go:41\n"} -{"Time":"2021-09-27T06:48:04.200586-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_256","Output":" \tError: \tNot equal: \n"} -{"Time":"2021-09-27T06:48:04.20059-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_256","Output":" \t \texpected: hash.Hash{0x9f, 0x86, 0xd0, 0x81, 0x88, 0x4c, 0x7d, 0x65, 0x9a, 0x2f, 0xea, 0xa0, 0xc5, 0x5a, 0xd0, 0x15, 0xa3, 0xbf, 0x4f, 0x1b, 0x2b, 0xb, 0x82, 0x2c, 0xd1, 0x5d, 0x6c, 0x15, 0xb0, 0xf0, 0xa, 0x9}\n"} -{"Time":"2021-09-27T06:48:04.200596-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_256","Output":" \t \tactual : hash.Hash{0x9f, 0x86, 0xd0, 0x81, 0x88, 0x4c, 0x7d, 0x65, 0x9a, 0x2f, 0xea, 0xa0, 0xc5, 0x5a, 0xd0, 0x15, 0xa3, 0xbf, 0x4f, 0x1b, 0x2b, 0xb, 0x82, 0x2c, 0xd1, 0x5d, 0x6c, 0x15, 0xb0, 0xf0, 0xa, 0x8}\n"} -{"Time":"2021-09-27T06:48:04.200609-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_256","Output":" \t \t\n"} -{"Time":"2021-09-27T06:48:04.200613-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_256","Output":" \t \tDiff:\n"} -{"Time":"2021-09-27T06:48:04.200616-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_256","Output":" \t \t--- Expected\n"} -{"Time":"2021-09-27T06:48:04.20062-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_256","Output":" \t \t+++ Actual\n"} -{"Time":"2021-09-27T06:48:04.200634-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_256","Output":" \t \t@@ -1,2 +1,2 @@\n"} -{"Time":"2021-09-27T06:48:04.200651-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_256","Output":" \t \t-(hash.Hash) (len=32) 0x9f86d081884c7d659a2feaa0c55ad015a3bf4f1b2b0b822cd15d6c15b0f00a09\n"} -{"Time":"2021-09-27T06:48:04.200657-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_256","Output":" \t \t+(hash.Hash) (len=32) 0x9f86d081884c7d659a2feaa0c55ad015a3bf4f1b2b0b822cd15d6c15b0f00a08\n"} -{"Time":"2021-09-27T06:48:04.200662-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_256","Output":" \t \t \n"} -{"Time":"2021-09-27T06:48:04.200666-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_256","Output":" \tTest: \tTestSanitySha2_256\n"} -{"Time":"2021-09-27T06:48:04.200671-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_256","Output":"--- FAIL: TestSanitySha2_256 (0.00s)\n"} -{"Time":"2021-09-27T06:48:04.200677-04:00","Action":"fail","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_256","Elapsed":0} -{"Time":"2021-09-27T06:48:04.200682-04:00","Action":"run","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_384"} -{"Time":"2021-09-27T06:48:04.200687-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_384","Output":"=== RUN TestSanitySha2_384\n"} -{"Time":"2021-09-27T06:48:04.200695-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_384","Output":"--- PASS: TestSanitySha2_384 (0.00s)\n"} -{"Time":"2021-09-27T06:48:04.200698-04:00","Action":"pass","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_384","Elapsed":0} -{"Time":"2021-09-27T06:48:04.200701-04:00","Action":"run","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanityKmac128"} -{"Time":"2021-09-27T06:48:04.200704-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanityKmac128","Output":"=== RUN TestSanityKmac128\n"} -{"Time":"2021-09-27T06:48:04.200707-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanityKmac128","Output":"--- PASS: TestSanityKmac128 (0.00s)\n"} -{"Time":"2021-09-27T06:48:04.20071-04:00","Action":"pass","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanityKmac128","Elapsed":0} -{"Time":"2021-09-27T06:48:04.200713-04:00","Action":"run","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestHashersAPI"} -{"Time":"2021-09-27T06:48:04.200716-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestHashersAPI","Output":"=== RUN TestHashersAPI\n"} -{"Time":"2021-09-27T06:48:04.200722-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestHashersAPI","Output":" hash_test.go:114: math rand seed is 1632739684200452000\n"} -{"Time":"2021-09-27T06:48:04.200737-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestHashersAPI","Output":"--- PASS: TestHashersAPI (0.00s)\n"} -{"Time":"2021-09-27T06:48:04.200741-04:00","Action":"pass","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestHashersAPI","Elapsed":0} -{"Time":"2021-09-27T06:48:04.200745-04:00","Action":"run","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3"} -{"Time":"2021-09-27T06:48:04.20075-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3","Output":"=== RUN TestSha3\n"} -{"Time":"2021-09-27T06:48:04.200755-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3","Output":" hash_test.go:158: math rand seed is 1632739684200658000\n"} -{"Time":"2021-09-27T06:48:04.20076-04:00","Action":"run","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3/SHA3_256"} -{"Time":"2021-09-27T06:48:04.200769-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3/SHA3_256","Output":"=== RUN TestSha3/SHA3_256\n"} -{"Time":"2021-09-27T06:48:04.301997-04:00","Action":"run","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3/SHA3_384"} -{"Time":"2021-09-27T06:48:04.302032-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3/SHA3_384","Output":"=== RUN TestSha3/SHA3_384\n"} -{"Time":"2021-09-27T06:48:04.425552-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3","Output":"--- PASS: TestSha3 (0.22s)\n"} -{"Time":"2021-09-27T06:48:04.425588-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3/SHA3_256","Output":" --- PASS: TestSha3/SHA3_256 (0.10s)\n"} -{"Time":"2021-09-27T06:48:04.425594-04:00","Action":"pass","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3/SHA3_256","Elapsed":0.1} -{"Time":"2021-09-27T06:48:04.425599-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3/SHA3_384","Output":" --- PASS: TestSha3/SHA3_384 (0.12s)\n"} -{"Time":"2021-09-27T06:48:04.425602-04:00","Action":"pass","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3/SHA3_384","Elapsed":0.12} -{"Time":"2021-09-27T06:48:04.425605-04:00","Action":"pass","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3","Elapsed":0.22} -{"Time":"2021-09-27T06:48:04.425609-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Output":"FAIL\n"} -{"Time":"2021-09-27T06:48:04.426907-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Output":"FAIL\tgithub.com/onflow/flow-go/crypto/hash\t2.445s\n"} -{"Time":"2021-09-27T06:48:04.42693-04:00","Action":"fail","Package":"github.com/onflow/flow-go/crypto/hash","Elapsed":2.445} \ No newline at end of file +{"Time":"2021-09-27T06:48:02.184192-04:00","Action":"run","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha3_256"} +{"Time":"2021-09-27T06:48:02.184694-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha3_256","Output":"=== RUN TestSanitySha3_256\n"} +{"Time":"2021-09-27T06:48:02.18473-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha3_256","Output":"--- PASS: TestSanitySha3_256 (0.00s)\n"} +{"Time":"2021-09-27T06:48:02.184738-04:00","Action":"pass","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha3_256","Elapsed":0} +{"Time":"2021-09-27T06:48:02.184759-04:00","Action":"run","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha3_384"} +{"Time":"2021-09-27T06:48:02.184765-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha3_384","Output":"=== RUN TestSanitySha3_384\n"} +{"Time":"2021-09-27T06:48:02.184772-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha3_384","Output":"--- PASS: TestSanitySha3_384 (0.00s)\n"} +{"Time":"2021-09-27T06:48:02.184778-04:00","Action":"pass","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha3_384","Elapsed":0} +{"Time":"2021-09-27T06:48:02.184784-04:00","Action":"run","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_256"} +{"Time":"2021-09-27T06:48:02.184913-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_256","Output":"=== RUN TestSanitySha2_256\n"} +{"Time":"2021-09-27T06:48:02.184947-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_256","Output":" hash_test.go:41: \n"} +{"Time":"2021-09-27T06:48:02.184971-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_256","Output":" \tError Trace:\thash_test.go:41\n"} +{"Time":"2021-09-27T06:48:02.184993-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_256","Output":" \tError: \tNot equal: \n"} +{"Time":"2021-09-27T06:48:02.185002-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_256","Output":" \t \texpected: hash.Hash{0x9f, 0x86, 0xd0, 0x81, 0x88, 0x4c, 0x7d, 0x65, 0x9a, 0x2f, 0xea, 0xa0, 0xc5, 0x5a, 0xd0, 0x15, 0xa3, 0xbf, 0x4f, 0x1b, 0x2b, 0xb, 0x82, 0x2c, 0xd1, 0x5d, 0x6c, 0x15, 0xb0, 0xf0, 0xa, 0x9}\n"} +{"Time":"2021-09-27T06:48:02.185025-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_256","Output":" \t \tactual : hash.Hash{0x9f, 0x86, 0xd0, 0x81, 0x88, 0x4c, 0x7d, 0x65, 0x9a, 0x2f, 0xea, 0xa0, 0xc5, 0x5a, 0xd0, 0x15, 0xa3, 0xbf, 0x4f, 0x1b, 0x2b, 0xb, 0x82, 0x2c, 0xd1, 0x5d, 0x6c, 0x15, 0xb0, 0xf0, 0xa, 0x8}\n"} +{"Time":"2021-09-27T06:48:02.185037-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_256","Output":" \t \t\n"} +{"Time":"2021-09-27T06:48:02.185046-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_256","Output":" \t \tDiff:\n"} +{"Time":"2021-09-27T06:48:02.185055-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_256","Output":" \t \t--- Expected\n"} +{"Time":"2021-09-27T06:48:02.185063-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_256","Output":" \t \t+++ Actual\n"} +{"Time":"2021-09-27T06:48:02.185101-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_256","Output":" \t \t@@ -1,2 +1,2 @@\n"} +{"Time":"2021-09-27T06:48:02.185122-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_256","Output":" \t \t-(hash.Hash) (len=32) 0x9f86d081884c7d659a2feaa0c55ad015a3bf4f1b2b0b822cd15d6c15b0f00a09\n"} +{"Time":"2021-09-27T06:48:02.185145-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_256","Output":" \t \t+(hash.Hash) (len=32) 0x9f86d081884c7d659a2feaa0c55ad015a3bf4f1b2b0b822cd15d6c15b0f00a08\n"} +{"Time":"2021-09-27T06:48:02.185155-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_256","Output":" \t \t \n"} +{"Time":"2021-09-27T06:48:02.185164-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_256","Output":" \tTest: \tTestSanitySha2_256\n"} +{"Time":"2021-09-27T06:48:02.185182-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_256","Output":"--- FAIL: TestSanitySha2_256 (0.00s)\n"} +{"Time":"2021-09-27T06:48:02.185192-04:00","Action":"fail","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_256","Elapsed":0} +{"Time":"2021-09-27T06:48:02.185199-04:00","Action":"run","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_384"} +{"Time":"2021-09-27T06:48:02.185208-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_384","Output":"=== RUN TestSanitySha2_384\n"} +{"Time":"2021-09-27T06:48:02.185218-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_384","Output":"--- PASS: TestSanitySha2_384 (0.00s)\n"} +{"Time":"2021-09-27T06:48:02.185239-04:00","Action":"pass","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_384","Elapsed":0} +{"Time":"2021-09-27T06:48:02.185245-04:00","Action":"run","Package":"github.com/onflow/crypto/hash","Test":"TestSanityKmac128"} +{"Time":"2021-09-27T06:48:02.185261-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanityKmac128","Output":"=== RUN TestSanityKmac128\n"} +{"Time":"2021-09-27T06:48:02.185271-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanityKmac128","Output":"--- PASS: TestSanityKmac128 (0.00s)\n"} +{"Time":"2021-09-27T06:48:02.18528-04:00","Action":"pass","Package":"github.com/onflow/crypto/hash","Test":"TestSanityKmac128","Elapsed":0} +{"Time":"2021-09-27T06:48:02.185289-04:00","Action":"run","Package":"github.com/onflow/crypto/hash","Test":"TestHashersAPI"} +{"Time":"2021-09-27T06:48:02.185297-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestHashersAPI","Output":"=== RUN TestHashersAPI\n"} +{"Time":"2021-09-27T06:48:02.185307-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestHashersAPI","Output":" hash_test.go:114: math rand seed is 1632739682184421000\n"} +{"Time":"2021-09-27T06:48:02.185335-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestHashersAPI","Output":"--- PASS: TestHashersAPI (0.00s)\n"} +{"Time":"2021-09-27T06:48:02.185346-04:00","Action":"pass","Package":"github.com/onflow/crypto/hash","Test":"TestHashersAPI","Elapsed":0} +{"Time":"2021-09-27T06:48:02.185354-04:00","Action":"run","Package":"github.com/onflow/crypto/hash","Test":"TestSha3"} +{"Time":"2021-09-27T06:48:02.185376-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSha3","Output":"=== RUN TestSha3\n"} +{"Time":"2021-09-27T06:48:02.185386-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSha3","Output":" hash_test.go:158: math rand seed is 1632739682184858000\n"} +{"Time":"2021-09-27T06:48:02.185396-04:00","Action":"run","Package":"github.com/onflow/crypto/hash","Test":"TestSha3/SHA3_256"} +{"Time":"2021-09-27T06:48:02.185404-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSha3/SHA3_256","Output":"=== RUN TestSha3/SHA3_256\n"} +{"Time":"2021-09-27T06:48:02.294081-04:00","Action":"run","Package":"github.com/onflow/crypto/hash","Test":"TestSha3/SHA3_384"} +{"Time":"2021-09-27T06:48:02.294138-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSha3/SHA3_384","Output":"=== RUN TestSha3/SHA3_384\n"} +{"Time":"2021-09-27T06:48:02.415091-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSha3","Output":"--- PASS: TestSha3 (0.23s)\n"} +{"Time":"2021-09-27T06:48:02.415117-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSha3/SHA3_256","Output":" --- PASS: TestSha3/SHA3_256 (0.11s)\n"} +{"Time":"2021-09-27T06:48:02.415121-04:00","Action":"pass","Package":"github.com/onflow/crypto/hash","Test":"TestSha3/SHA3_256","Elapsed":0.11} +{"Time":"2021-09-27T06:48:02.415206-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSha3/SHA3_384","Output":" --- PASS: TestSha3/SHA3_384 (0.12s)\n"} +{"Time":"2021-09-27T06:48:02.41522-04:00","Action":"pass","Package":"github.com/onflow/crypto/hash","Test":"TestSha3/SHA3_384","Elapsed":0.12} +{"Time":"2021-09-27T06:48:02.415224-04:00","Action":"pass","Package":"github.com/onflow/crypto/hash","Test":"TestSha3","Elapsed":0.23} +{"Time":"2021-09-27T06:48:02.415227-04:00","Action":"run","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha3_256"} +{"Time":"2021-09-27T06:48:02.41523-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha3_256","Output":"=== RUN TestSanitySha3_256\n"} +{"Time":"2021-09-27T06:48:02.415234-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha3_256","Output":"--- PASS: TestSanitySha3_256 (0.00s)\n"} +{"Time":"2021-09-27T06:48:02.415237-04:00","Action":"pass","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha3_256","Elapsed":0} +{"Time":"2021-09-27T06:48:02.415322-04:00","Action":"run","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha3_384"} +{"Time":"2021-09-27T06:48:02.415329-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha3_384","Output":"=== RUN TestSanitySha3_384\n"} +{"Time":"2021-09-27T06:48:02.415335-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha3_384","Output":"--- PASS: TestSanitySha3_384 (0.00s)\n"} +{"Time":"2021-09-27T06:48:02.415344-04:00","Action":"pass","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha3_384","Elapsed":0} +{"Time":"2021-09-27T06:48:02.415349-04:00","Action":"run","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_256"} +{"Time":"2021-09-27T06:48:02.415354-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_256","Output":"=== RUN TestSanitySha2_256\n"} +{"Time":"2021-09-27T06:48:02.415361-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_256","Output":" hash_test.go:41: \n"} +{"Time":"2021-09-27T06:48:02.415364-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_256","Output":" \tError Trace:\thash_test.go:41\n"} +{"Time":"2021-09-27T06:48:02.415368-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_256","Output":" \tError: \tNot equal: \n"} +{"Time":"2021-09-27T06:48:02.415371-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_256","Output":" \t \texpected: hash.Hash{0x9f, 0x86, 0xd0, 0x81, 0x88, 0x4c, 0x7d, 0x65, 0x9a, 0x2f, 0xea, 0xa0, 0xc5, 0x5a, 0xd0, 0x15, 0xa3, 0xbf, 0x4f, 0x1b, 0x2b, 0xb, 0x82, 0x2c, 0xd1, 0x5d, 0x6c, 0x15, 0xb0, 0xf0, 0xa, 0x9}\n"} +{"Time":"2021-09-27T06:48:02.415406-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_256","Output":" \t \tactual : hash.Hash{0x9f, 0x86, 0xd0, 0x81, 0x88, 0x4c, 0x7d, 0x65, 0x9a, 0x2f, 0xea, 0xa0, 0xc5, 0x5a, 0xd0, 0x15, 0xa3, 0xbf, 0x4f, 0x1b, 0x2b, 0xb, 0x82, 0x2c, 0xd1, 0x5d, 0x6c, 0x15, 0xb0, 0xf0, 0xa, 0x8}\n"} +{"Time":"2021-09-27T06:48:02.41542-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_256","Output":" \t \t\n"} +{"Time":"2021-09-27T06:48:02.415436-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_256","Output":" \t \tDiff:\n"} +{"Time":"2021-09-27T06:48:02.415442-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_256","Output":" \t \t--- Expected\n"} +{"Time":"2021-09-27T06:48:02.415454-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_256","Output":" \t \t+++ Actual\n"} +{"Time":"2021-09-27T06:48:02.415467-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_256","Output":" \t \t@@ -1,2 +1,2 @@\n"} +{"Time":"2021-09-27T06:48:02.415476-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_256","Output":" \t \t-(hash.Hash) (len=32) 0x9f86d081884c7d659a2feaa0c55ad015a3bf4f1b2b0b822cd15d6c15b0f00a09\n"} +{"Time":"2021-09-27T06:48:02.415481-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_256","Output":" \t \t+(hash.Hash) (len=32) 0x9f86d081884c7d659a2feaa0c55ad015a3bf4f1b2b0b822cd15d6c15b0f00a08\n"} +{"Time":"2021-09-27T06:48:02.415487-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_256","Output":" \t \t \n"} +{"Time":"2021-09-27T06:48:02.415492-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_256","Output":" \tTest: \tTestSanitySha2_256\n"} +{"Time":"2021-09-27T06:48:02.415498-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_256","Output":"--- FAIL: TestSanitySha2_256 (0.00s)\n"} +{"Time":"2021-09-27T06:48:02.415504-04:00","Action":"fail","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_256","Elapsed":0} +{"Time":"2021-09-27T06:48:02.415507-04:00","Action":"run","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_384"} +{"Time":"2021-09-27T06:48:02.41551-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_384","Output":"=== RUN TestSanitySha2_384\n"} +{"Time":"2021-09-27T06:48:02.415514-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_384","Output":"--- PASS: TestSanitySha2_384 (0.00s)\n"} +{"Time":"2021-09-27T06:48:02.415517-04:00","Action":"pass","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_384","Elapsed":0} +{"Time":"2021-09-27T06:48:02.41552-04:00","Action":"run","Package":"github.com/onflow/crypto/hash","Test":"TestSanityKmac128"} +{"Time":"2021-09-27T06:48:02.415548-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanityKmac128","Output":"=== RUN TestSanityKmac128\n"} +{"Time":"2021-09-27T06:48:02.415557-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanityKmac128","Output":"--- PASS: TestSanityKmac128 (0.00s)\n"} +{"Time":"2021-09-27T06:48:02.415563-04:00","Action":"pass","Package":"github.com/onflow/crypto/hash","Test":"TestSanityKmac128","Elapsed":0} +{"Time":"2021-09-27T06:48:02.415568-04:00","Action":"run","Package":"github.com/onflow/crypto/hash","Test":"TestHashersAPI"} +{"Time":"2021-09-27T06:48:02.415573-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestHashersAPI","Output":"=== RUN TestHashersAPI\n"} +{"Time":"2021-09-27T06:48:02.415578-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestHashersAPI","Output":" hash_test.go:114: math rand seed is 1632739682415309000\n"} +{"Time":"2021-09-27T06:48:02.415609-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestHashersAPI","Output":"--- PASS: TestHashersAPI (0.00s)\n"} +{"Time":"2021-09-27T06:48:02.415656-04:00","Action":"pass","Package":"github.com/onflow/crypto/hash","Test":"TestHashersAPI","Elapsed":0} +{"Time":"2021-09-27T06:48:02.415675-04:00","Action":"run","Package":"github.com/onflow/crypto/hash","Test":"TestSha3"} +{"Time":"2021-09-27T06:48:02.41568-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSha3","Output":"=== RUN TestSha3\n"} +{"Time":"2021-09-27T06:48:02.415697-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSha3","Output":" hash_test.go:158: math rand seed is 1632739682415616000\n"} +{"Time":"2021-09-27T06:48:02.415702-04:00","Action":"run","Package":"github.com/onflow/crypto/hash","Test":"TestSha3/SHA3_256"} +{"Time":"2021-09-27T06:48:02.415706-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSha3/SHA3_256","Output":"=== RUN TestSha3/SHA3_256\n"} +{"Time":"2021-09-27T06:48:02.51693-04:00","Action":"run","Package":"github.com/onflow/crypto/hash","Test":"TestSha3/SHA3_384"} +{"Time":"2021-09-27T06:48:02.516956-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSha3/SHA3_384","Output":"=== RUN TestSha3/SHA3_384\n"} +{"Time":"2021-09-27T06:48:02.636991-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSha3","Output":"--- PASS: TestSha3 (0.22s)\n"} +{"Time":"2021-09-27T06:48:02.637017-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSha3/SHA3_256","Output":" --- PASS: TestSha3/SHA3_256 (0.10s)\n"} +{"Time":"2021-09-27T06:48:02.637022-04:00","Action":"pass","Package":"github.com/onflow/crypto/hash","Test":"TestSha3/SHA3_256","Elapsed":0.1} +{"Time":"2021-09-27T06:48:02.637027-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSha3/SHA3_384","Output":" --- PASS: TestSha3/SHA3_384 (0.12s)\n"} +{"Time":"2021-09-27T06:48:02.637031-04:00","Action":"pass","Package":"github.com/onflow/crypto/hash","Test":"TestSha3/SHA3_384","Elapsed":0.12} +{"Time":"2021-09-27T06:48:02.637034-04:00","Action":"pass","Package":"github.com/onflow/crypto/hash","Test":"TestSha3","Elapsed":0.22} +{"Time":"2021-09-27T06:48:02.637037-04:00","Action":"run","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha3_256"} +{"Time":"2021-09-27T06:48:02.637059-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha3_256","Output":"=== RUN TestSanitySha3_256\n"} +{"Time":"2021-09-27T06:48:02.637075-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha3_256","Output":"--- PASS: TestSanitySha3_256 (0.00s)\n"} +{"Time":"2021-09-27T06:48:02.637095-04:00","Action":"pass","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha3_256","Elapsed":0} +{"Time":"2021-09-27T06:48:02.6371-04:00","Action":"run","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha3_384"} +{"Time":"2021-09-27T06:48:02.637103-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha3_384","Output":"=== RUN TestSanitySha3_384\n"} +{"Time":"2021-09-27T06:48:02.637117-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha3_384","Output":"--- PASS: TestSanitySha3_384 (0.00s)\n"} +{"Time":"2021-09-27T06:48:02.637177-04:00","Action":"pass","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha3_384","Elapsed":0} +{"Time":"2021-09-27T06:48:02.637187-04:00","Action":"run","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_256"} +{"Time":"2021-09-27T06:48:02.63719-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_256","Output":"=== RUN TestSanitySha2_256\n"} +{"Time":"2021-09-27T06:48:02.637196-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_256","Output":" hash_test.go:41: \n"} +{"Time":"2021-09-27T06:48:02.6372-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_256","Output":" \tError Trace:\thash_test.go:41\n"} +{"Time":"2021-09-27T06:48:02.637203-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_256","Output":" \tError: \tNot equal: \n"} +{"Time":"2021-09-27T06:48:02.637231-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_256","Output":" \t \texpected: hash.Hash{0x9f, 0x86, 0xd0, 0x81, 0x88, 0x4c, 0x7d, 0x65, 0x9a, 0x2f, 0xea, 0xa0, 0xc5, 0x5a, 0xd0, 0x15, 0xa3, 0xbf, 0x4f, 0x1b, 0x2b, 0xb, 0x82, 0x2c, 0xd1, 0x5d, 0x6c, 0x15, 0xb0, 0xf0, 0xa, 0x9}\n"} +{"Time":"2021-09-27T06:48:02.637241-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_256","Output":" \t \tactual : hash.Hash{0x9f, 0x86, 0xd0, 0x81, 0x88, 0x4c, 0x7d, 0x65, 0x9a, 0x2f, 0xea, 0xa0, 0xc5, 0x5a, 0xd0, 0x15, 0xa3, 0xbf, 0x4f, 0x1b, 0x2b, 0xb, 0x82, 0x2c, 0xd1, 0x5d, 0x6c, 0x15, 0xb0, 0xf0, 0xa, 0x8}\n"} +{"Time":"2021-09-27T06:48:02.637248-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_256","Output":" \t \t\n"} +{"Time":"2021-09-27T06:48:02.637265-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_256","Output":" \t \tDiff:\n"} +{"Time":"2021-09-27T06:48:02.63729-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_256","Output":" \t \t--- Expected\n"} +{"Time":"2021-09-27T06:48:02.637294-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_256","Output":" \t \t+++ Actual\n"} +{"Time":"2021-09-27T06:48:02.637298-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_256","Output":" \t \t@@ -1,2 +1,2 @@\n"} +{"Time":"2021-09-27T06:48:02.637301-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_256","Output":" \t \t-(hash.Hash) (len=32) 0x9f86d081884c7d659a2feaa0c55ad015a3bf4f1b2b0b822cd15d6c15b0f00a09\n"} +{"Time":"2021-09-27T06:48:02.637305-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_256","Output":" \t \t+(hash.Hash) (len=32) 0x9f86d081884c7d659a2feaa0c55ad015a3bf4f1b2b0b822cd15d6c15b0f00a08\n"} +{"Time":"2021-09-27T06:48:02.637309-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_256","Output":" \t \t \n"} +{"Time":"2021-09-27T06:48:02.637334-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_256","Output":" \tTest: \tTestSanitySha2_256\n"} +{"Time":"2021-09-27T06:48:02.63734-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_256","Output":"--- FAIL: TestSanitySha2_256 (0.00s)\n"} +{"Time":"2021-09-27T06:48:02.637343-04:00","Action":"fail","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_256","Elapsed":0} +{"Time":"2021-09-27T06:48:02.637346-04:00","Action":"run","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_384"} +{"Time":"2021-09-27T06:48:02.637352-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_384","Output":"=== RUN TestSanitySha2_384\n"} +{"Time":"2021-09-27T06:48:02.637358-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_384","Output":"--- PASS: TestSanitySha2_384 (0.00s)\n"} +{"Time":"2021-09-27T06:48:02.637376-04:00","Action":"pass","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_384","Elapsed":0} +{"Time":"2021-09-27T06:48:02.63738-04:00","Action":"run","Package":"github.com/onflow/crypto/hash","Test":"TestSanityKmac128"} +{"Time":"2021-09-27T06:48:02.637383-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanityKmac128","Output":"=== RUN TestSanityKmac128\n"} +{"Time":"2021-09-27T06:48:02.637386-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanityKmac128","Output":"--- PASS: TestSanityKmac128 (0.00s)\n"} +{"Time":"2021-09-27T06:48:02.63739-04:00","Action":"pass","Package":"github.com/onflow/crypto/hash","Test":"TestSanityKmac128","Elapsed":0} +{"Time":"2021-09-27T06:48:02.637392-04:00","Action":"run","Package":"github.com/onflow/crypto/hash","Test":"TestHashersAPI"} +{"Time":"2021-09-27T06:48:02.637404-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestHashersAPI","Output":"=== RUN TestHashersAPI\n"} +{"Time":"2021-09-27T06:48:02.637408-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestHashersAPI","Output":" hash_test.go:114: math rand seed is 1632739682637108000\n"} +{"Time":"2021-09-27T06:48:02.637414-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestHashersAPI","Output":"--- PASS: TestHashersAPI (0.00s)\n"} +{"Time":"2021-09-27T06:48:02.637417-04:00","Action":"pass","Package":"github.com/onflow/crypto/hash","Test":"TestHashersAPI","Elapsed":0} +{"Time":"2021-09-27T06:48:02.63742-04:00","Action":"run","Package":"github.com/onflow/crypto/hash","Test":"TestSha3"} +{"Time":"2021-09-27T06:48:02.637423-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSha3","Output":"=== RUN TestSha3\n"} +{"Time":"2021-09-27T06:48:02.637426-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSha3","Output":" hash_test.go:158: math rand seed is 1632739682637311000\n"} +{"Time":"2021-09-27T06:48:02.637466-04:00","Action":"run","Package":"github.com/onflow/crypto/hash","Test":"TestSha3/SHA3_256"} +{"Time":"2021-09-27T06:48:02.637491-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSha3/SHA3_256","Output":"=== RUN TestSha3/SHA3_256\n"} +{"Time":"2021-09-27T06:48:02.736502-04:00","Action":"run","Package":"github.com/onflow/crypto/hash","Test":"TestSha3/SHA3_384"} +{"Time":"2021-09-27T06:48:02.736566-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSha3/SHA3_384","Output":"=== RUN TestSha3/SHA3_384\n"} +{"Time":"2021-09-27T06:48:02.857319-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSha3","Output":"--- PASS: TestSha3 (0.22s)\n"} +{"Time":"2021-09-27T06:48:02.85738-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSha3/SHA3_256","Output":" --- PASS: TestSha3/SHA3_256 (0.10s)\n"} +{"Time":"2021-09-27T06:48:02.857386-04:00","Action":"pass","Package":"github.com/onflow/crypto/hash","Test":"TestSha3/SHA3_256","Elapsed":0.1} +{"Time":"2021-09-27T06:48:02.8574-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSha3/SHA3_384","Output":" --- PASS: TestSha3/SHA3_384 (0.12s)\n"} +{"Time":"2021-09-27T06:48:02.857404-04:00","Action":"pass","Package":"github.com/onflow/crypto/hash","Test":"TestSha3/SHA3_384","Elapsed":0.12} +{"Time":"2021-09-27T06:48:02.857408-04:00","Action":"pass","Package":"github.com/onflow/crypto/hash","Test":"TestSha3","Elapsed":0.22} +{"Time":"2021-09-27T06:48:02.857413-04:00","Action":"run","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha3_256"} +{"Time":"2021-09-27T06:48:02.857417-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha3_256","Output":"=== RUN TestSanitySha3_256\n"} +{"Time":"2021-09-27T06:48:02.857426-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha3_256","Output":"--- PASS: TestSanitySha3_256 (0.00s)\n"} +{"Time":"2021-09-27T06:48:02.85743-04:00","Action":"pass","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha3_256","Elapsed":0} +{"Time":"2021-09-27T06:48:02.857433-04:00","Action":"run","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha3_384"} +{"Time":"2021-09-27T06:48:02.857437-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha3_384","Output":"=== RUN TestSanitySha3_384\n"} +{"Time":"2021-09-27T06:48:02.857441-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha3_384","Output":"--- PASS: TestSanitySha3_384 (0.00s)\n"} +{"Time":"2021-09-27T06:48:02.857541-04:00","Action":"pass","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha3_384","Elapsed":0} +{"Time":"2021-09-27T06:48:02.857552-04:00","Action":"run","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_256"} +{"Time":"2021-09-27T06:48:02.857557-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_256","Output":"=== RUN TestSanitySha2_256\n"} +{"Time":"2021-09-27T06:48:02.857565-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_256","Output":" hash_test.go:41: \n"} +{"Time":"2021-09-27T06:48:02.857568-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_256","Output":" \tError Trace:\thash_test.go:41\n"} +{"Time":"2021-09-27T06:48:02.857572-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_256","Output":" \tError: \tNot equal: \n"} +{"Time":"2021-09-27T06:48:02.857588-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_256","Output":" \t \texpected: hash.Hash{0x9f, 0x86, 0xd0, 0x81, 0x88, 0x4c, 0x7d, 0x65, 0x9a, 0x2f, 0xea, 0xa0, 0xc5, 0x5a, 0xd0, 0x15, 0xa3, 0xbf, 0x4f, 0x1b, 0x2b, 0xb, 0x82, 0x2c, 0xd1, 0x5d, 0x6c, 0x15, 0xb0, 0xf0, 0xa, 0x9}\n"} +{"Time":"2021-09-27T06:48:02.857594-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_256","Output":" \t \tactual : hash.Hash{0x9f, 0x86, 0xd0, 0x81, 0x88, 0x4c, 0x7d, 0x65, 0x9a, 0x2f, 0xea, 0xa0, 0xc5, 0x5a, 0xd0, 0x15, 0xa3, 0xbf, 0x4f, 0x1b, 0x2b, 0xb, 0x82, 0x2c, 0xd1, 0x5d, 0x6c, 0x15, 0xb0, 0xf0, 0xa, 0x8}\n"} +{"Time":"2021-09-27T06:48:02.857613-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_256","Output":" \t \t\n"} +{"Time":"2021-09-27T06:48:02.857617-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_256","Output":" \t \tDiff:\n"} +{"Time":"2021-09-27T06:48:02.85762-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_256","Output":" \t \t--- Expected\n"} +{"Time":"2021-09-27T06:48:02.857624-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_256","Output":" \t \t+++ Actual\n"} +{"Time":"2021-09-27T06:48:02.857639-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_256","Output":" \t \t@@ -1,2 +1,2 @@\n"} +{"Time":"2021-09-27T06:48:02.857644-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_256","Output":" \t \t-(hash.Hash) (len=32) 0x9f86d081884c7d659a2feaa0c55ad015a3bf4f1b2b0b822cd15d6c15b0f00a09\n"} +{"Time":"2021-09-27T06:48:02.857647-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_256","Output":" \t \t+(hash.Hash) (len=32) 0x9f86d081884c7d659a2feaa0c55ad015a3bf4f1b2b0b822cd15d6c15b0f00a08\n"} +{"Time":"2021-09-27T06:48:02.857651-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_256","Output":" \t \t \n"} +{"Time":"2021-09-27T06:48:02.85766-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_256","Output":" \tTest: \tTestSanitySha2_256\n"} +{"Time":"2021-09-27T06:48:02.857665-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_256","Output":"--- FAIL: TestSanitySha2_256 (0.00s)\n"} +{"Time":"2021-09-27T06:48:02.857668-04:00","Action":"fail","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_256","Elapsed":0} +{"Time":"2021-09-27T06:48:02.857671-04:00","Action":"run","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_384"} +{"Time":"2021-09-27T06:48:02.857674-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_384","Output":"=== RUN TestSanitySha2_384\n"} +{"Time":"2021-09-27T06:48:02.857678-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_384","Output":"--- PASS: TestSanitySha2_384 (0.00s)\n"} +{"Time":"2021-09-27T06:48:02.857699-04:00","Action":"pass","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_384","Elapsed":0} +{"Time":"2021-09-27T06:48:02.857744-04:00","Action":"run","Package":"github.com/onflow/crypto/hash","Test":"TestSanityKmac128"} +{"Time":"2021-09-27T06:48:02.85775-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanityKmac128","Output":"=== RUN TestSanityKmac128\n"} +{"Time":"2021-09-27T06:48:02.857757-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanityKmac128","Output":"--- PASS: TestSanityKmac128 (0.00s)\n"} +{"Time":"2021-09-27T06:48:02.857782-04:00","Action":"pass","Package":"github.com/onflow/crypto/hash","Test":"TestSanityKmac128","Elapsed":0} +{"Time":"2021-09-27T06:48:02.857789-04:00","Action":"run","Package":"github.com/onflow/crypto/hash","Test":"TestHashersAPI"} +{"Time":"2021-09-27T06:48:02.857793-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestHashersAPI","Output":"=== RUN TestHashersAPI\n"} +{"Time":"2021-09-27T06:48:02.857796-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestHashersAPI","Output":" hash_test.go:114: math rand seed is 1632739682857435000\n"} +{"Time":"2021-09-27T06:48:02.857838-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestHashersAPI","Output":"--- PASS: TestHashersAPI (0.00s)\n"} +{"Time":"2021-09-27T06:48:02.857843-04:00","Action":"pass","Package":"github.com/onflow/crypto/hash","Test":"TestHashersAPI","Elapsed":0} +{"Time":"2021-09-27T06:48:02.857846-04:00","Action":"run","Package":"github.com/onflow/crypto/hash","Test":"TestSha3"} +{"Time":"2021-09-27T06:48:02.857849-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSha3","Output":"=== RUN TestSha3\n"} +{"Time":"2021-09-27T06:48:02.857852-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSha3","Output":" hash_test.go:158: math rand seed is 1632739682857668000\n"} +{"Time":"2021-09-27T06:48:02.857855-04:00","Action":"run","Package":"github.com/onflow/crypto/hash","Test":"TestSha3/SHA3_256"} +{"Time":"2021-09-27T06:48:02.857859-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSha3/SHA3_256","Output":"=== RUN TestSha3/SHA3_256\n"} +{"Time":"2021-09-27T06:48:02.956791-04:00","Action":"run","Package":"github.com/onflow/crypto/hash","Test":"TestSha3/SHA3_384"} +{"Time":"2021-09-27T06:48:02.956849-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSha3/SHA3_384","Output":"=== RUN TestSha3/SHA3_384\n"} +{"Time":"2021-09-27T06:48:03.076829-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSha3","Output":"--- PASS: TestSha3 (0.22s)\n"} +{"Time":"2021-09-27T06:48:03.076855-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSha3/SHA3_256","Output":" --- PASS: TestSha3/SHA3_256 (0.10s)\n"} +{"Time":"2021-09-27T06:48:03.076859-04:00","Action":"pass","Package":"github.com/onflow/crypto/hash","Test":"TestSha3/SHA3_256","Elapsed":0.1} +{"Time":"2021-09-27T06:48:03.076866-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSha3/SHA3_384","Output":" --- PASS: TestSha3/SHA3_384 (0.12s)\n"} +{"Time":"2021-09-27T06:48:03.076871-04:00","Action":"pass","Package":"github.com/onflow/crypto/hash","Test":"TestSha3/SHA3_384","Elapsed":0.12} +{"Time":"2021-09-27T06:48:03.076876-04:00","Action":"pass","Package":"github.com/onflow/crypto/hash","Test":"TestSha3","Elapsed":0.22} +{"Time":"2021-09-27T06:48:03.07688-04:00","Action":"run","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha3_256"} +{"Time":"2021-09-27T06:48:03.077032-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha3_256","Output":"=== RUN TestSanitySha3_256\n"} +{"Time":"2021-09-27T06:48:03.077046-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha3_256","Output":"--- PASS: TestSanitySha3_256 (0.00s)\n"} +{"Time":"2021-09-27T06:48:03.077053-04:00","Action":"pass","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha3_256","Elapsed":0} +{"Time":"2021-09-27T06:48:03.077056-04:00","Action":"run","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha3_384"} +{"Time":"2021-09-27T06:48:03.077059-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha3_384","Output":"=== RUN TestSanitySha3_384\n"} +{"Time":"2021-09-27T06:48:03.077063-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha3_384","Output":"--- PASS: TestSanitySha3_384 (0.00s)\n"} +{"Time":"2021-09-27T06:48:03.077123-04:00","Action":"pass","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha3_384","Elapsed":0} +{"Time":"2021-09-27T06:48:03.07713-04:00","Action":"run","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_256"} +{"Time":"2021-09-27T06:48:03.077133-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_256","Output":"=== RUN TestSanitySha2_256\n"} +{"Time":"2021-09-27T06:48:03.077137-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_256","Output":" hash_test.go:41: \n"} +{"Time":"2021-09-27T06:48:03.07714-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_256","Output":" \tError Trace:\thash_test.go:41\n"} +{"Time":"2021-09-27T06:48:03.077143-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_256","Output":" \tError: \tNot equal: \n"} +{"Time":"2021-09-27T06:48:03.077156-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_256","Output":" \t \texpected: hash.Hash{0x9f, 0x86, 0xd0, 0x81, 0x88, 0x4c, 0x7d, 0x65, 0x9a, 0x2f, 0xea, 0xa0, 0xc5, 0x5a, 0xd0, 0x15, 0xa3, 0xbf, 0x4f, 0x1b, 0x2b, 0xb, 0x82, 0x2c, 0xd1, 0x5d, 0x6c, 0x15, 0xb0, 0xf0, 0xa, 0x9}\n"} +{"Time":"2021-09-27T06:48:03.07716-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_256","Output":" \t \tactual : hash.Hash{0x9f, 0x86, 0xd0, 0x81, 0x88, 0x4c, 0x7d, 0x65, 0x9a, 0x2f, 0xea, 0xa0, 0xc5, 0x5a, 0xd0, 0x15, 0xa3, 0xbf, 0x4f, 0x1b, 0x2b, 0xb, 0x82, 0x2c, 0xd1, 0x5d, 0x6c, 0x15, 0xb0, 0xf0, 0xa, 0x8}\n"} +{"Time":"2021-09-27T06:48:03.077172-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_256","Output":" \t \t\n"} +{"Time":"2021-09-27T06:48:03.077195-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_256","Output":" \t \tDiff:\n"} +{"Time":"2021-09-27T06:48:03.077199-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_256","Output":" \t \t--- Expected\n"} +{"Time":"2021-09-27T06:48:03.077203-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_256","Output":" \t \t+++ Actual\n"} +{"Time":"2021-09-27T06:48:03.077207-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_256","Output":" \t \t@@ -1,2 +1,2 @@\n"} +{"Time":"2021-09-27T06:48:03.077212-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_256","Output":" \t \t-(hash.Hash) (len=32) 0x9f86d081884c7d659a2feaa0c55ad015a3bf4f1b2b0b822cd15d6c15b0f00a09\n"} +{"Time":"2021-09-27T06:48:03.077224-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_256","Output":" \t \t+(hash.Hash) (len=32) 0x9f86d081884c7d659a2feaa0c55ad015a3bf4f1b2b0b822cd15d6c15b0f00a08\n"} +{"Time":"2021-09-27T06:48:03.077229-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_256","Output":" \t \t \n"} +{"Time":"2021-09-27T06:48:03.077232-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_256","Output":" \tTest: \tTestSanitySha2_256\n"} +{"Time":"2021-09-27T06:48:03.077236-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_256","Output":"--- FAIL: TestSanitySha2_256 (0.00s)\n"} +{"Time":"2021-09-27T06:48:03.077239-04:00","Action":"fail","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_256","Elapsed":0} +{"Time":"2021-09-27T06:48:03.077273-04:00","Action":"run","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_384"} +{"Time":"2021-09-27T06:48:03.077286-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_384","Output":"=== RUN TestSanitySha2_384\n"} +{"Time":"2021-09-27T06:48:03.077294-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_384","Output":"--- PASS: TestSanitySha2_384 (0.00s)\n"} +{"Time":"2021-09-27T06:48:03.077299-04:00","Action":"pass","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_384","Elapsed":0} +{"Time":"2021-09-27T06:48:03.077304-04:00","Action":"run","Package":"github.com/onflow/crypto/hash","Test":"TestSanityKmac128"} +{"Time":"2021-09-27T06:48:03.077308-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanityKmac128","Output":"=== RUN TestSanityKmac128\n"} +{"Time":"2021-09-27T06:48:03.077313-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanityKmac128","Output":"--- PASS: TestSanityKmac128 (0.00s)\n"} +{"Time":"2021-09-27T06:48:03.077326-04:00","Action":"pass","Package":"github.com/onflow/crypto/hash","Test":"TestSanityKmac128","Elapsed":0} +{"Time":"2021-09-27T06:48:03.077338-04:00","Action":"run","Package":"github.com/onflow/crypto/hash","Test":"TestHashersAPI"} +{"Time":"2021-09-27T06:48:03.077344-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestHashersAPI","Output":"=== RUN TestHashersAPI\n"} +{"Time":"2021-09-27T06:48:03.077349-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestHashersAPI","Output":" hash_test.go:114: math rand seed is 1632739683077064000\n"} +{"Time":"2021-09-27T06:48:03.077357-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestHashersAPI","Output":"--- PASS: TestHashersAPI (0.00s)\n"} +{"Time":"2021-09-27T06:48:03.07736-04:00","Action":"pass","Package":"github.com/onflow/crypto/hash","Test":"TestHashersAPI","Elapsed":0} +{"Time":"2021-09-27T06:48:03.077363-04:00","Action":"run","Package":"github.com/onflow/crypto/hash","Test":"TestSha3"} +{"Time":"2021-09-27T06:48:03.077366-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSha3","Output":"=== RUN TestSha3\n"} +{"Time":"2021-09-27T06:48:03.07737-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSha3","Output":" hash_test.go:158: math rand seed is 1632739683077268000\n"} +{"Time":"2021-09-27T06:48:03.077808-04:00","Action":"run","Package":"github.com/onflow/crypto/hash","Test":"TestSha3/SHA3_256"} +{"Time":"2021-09-27T06:48:03.077826-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSha3/SHA3_256","Output":"=== RUN TestSha3/SHA3_256\n"} +{"Time":"2021-09-27T06:48:03.176088-04:00","Action":"run","Package":"github.com/onflow/crypto/hash","Test":"TestSha3/SHA3_384"} +{"Time":"2021-09-27T06:48:03.176175-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSha3/SHA3_384","Output":"=== RUN TestSha3/SHA3_384\n"} +{"Time":"2021-09-27T06:48:03.297398-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSha3","Output":"--- PASS: TestSha3 (0.22s)\n"} +{"Time":"2021-09-27T06:48:03.297426-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSha3/SHA3_256","Output":" --- PASS: TestSha3/SHA3_256 (0.10s)\n"} +{"Time":"2021-09-27T06:48:03.297431-04:00","Action":"pass","Package":"github.com/onflow/crypto/hash","Test":"TestSha3/SHA3_256","Elapsed":0.1} +{"Time":"2021-09-27T06:48:03.297439-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSha3/SHA3_384","Output":" --- PASS: TestSha3/SHA3_384 (0.12s)\n"} +{"Time":"2021-09-27T06:48:03.297442-04:00","Action":"pass","Package":"github.com/onflow/crypto/hash","Test":"TestSha3/SHA3_384","Elapsed":0.12} +{"Time":"2021-09-27T06:48:03.297445-04:00","Action":"pass","Package":"github.com/onflow/crypto/hash","Test":"TestSha3","Elapsed":0.22} +{"Time":"2021-09-27T06:48:03.297448-04:00","Action":"run","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha3_256"} +{"Time":"2021-09-27T06:48:03.297467-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha3_256","Output":"=== RUN TestSanitySha3_256\n"} +{"Time":"2021-09-27T06:48:03.297477-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha3_256","Output":"--- PASS: TestSanitySha3_256 (0.00s)\n"} +{"Time":"2021-09-27T06:48:03.297482-04:00","Action":"pass","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha3_256","Elapsed":0} +{"Time":"2021-09-27T06:48:03.297485-04:00","Action":"run","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha3_384"} +{"Time":"2021-09-27T06:48:03.297488-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha3_384","Output":"=== RUN TestSanitySha3_384\n"} +{"Time":"2021-09-27T06:48:03.297491-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha3_384","Output":"--- PASS: TestSanitySha3_384 (0.00s)\n"} +{"Time":"2021-09-27T06:48:03.297591-04:00","Action":"pass","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha3_384","Elapsed":0} +{"Time":"2021-09-27T06:48:03.297618-04:00","Action":"run","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_256"} +{"Time":"2021-09-27T06:48:03.297624-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_256","Output":"=== RUN TestSanitySha2_256\n"} +{"Time":"2021-09-27T06:48:03.297632-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_256","Output":" hash_test.go:41: \n"} +{"Time":"2021-09-27T06:48:03.297635-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_256","Output":" \tError Trace:\thash_test.go:41\n"} +{"Time":"2021-09-27T06:48:03.297639-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_256","Output":" \tError: \tNot equal: \n"} +{"Time":"2021-09-27T06:48:03.297642-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_256","Output":" \t \texpected: hash.Hash{0x9f, 0x86, 0xd0, 0x81, 0x88, 0x4c, 0x7d, 0x65, 0x9a, 0x2f, 0xea, 0xa0, 0xc5, 0x5a, 0xd0, 0x15, 0xa3, 0xbf, 0x4f, 0x1b, 0x2b, 0xb, 0x82, 0x2c, 0xd1, 0x5d, 0x6c, 0x15, 0xb0, 0xf0, 0xa, 0x9}\n"} +{"Time":"2021-09-27T06:48:03.297646-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_256","Output":" \t \tactual : hash.Hash{0x9f, 0x86, 0xd0, 0x81, 0x88, 0x4c, 0x7d, 0x65, 0x9a, 0x2f, 0xea, 0xa0, 0xc5, 0x5a, 0xd0, 0x15, 0xa3, 0xbf, 0x4f, 0x1b, 0x2b, 0xb, 0x82, 0x2c, 0xd1, 0x5d, 0x6c, 0x15, 0xb0, 0xf0, 0xa, 0x8}\n"} +{"Time":"2021-09-27T06:48:03.297678-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_256","Output":" \t \t\n"} +{"Time":"2021-09-27T06:48:03.297687-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_256","Output":" \t \tDiff:\n"} +{"Time":"2021-09-27T06:48:03.297704-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_256","Output":" \t \t--- Expected\n"} +{"Time":"2021-09-27T06:48:03.297709-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_256","Output":" \t \t+++ Actual\n"} +{"Time":"2021-09-27T06:48:03.297714-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_256","Output":" \t \t@@ -1,2 +1,2 @@\n"} +{"Time":"2021-09-27T06:48:03.297738-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_256","Output":" \t \t-(hash.Hash) (len=32) 0x9f86d081884c7d659a2feaa0c55ad015a3bf4f1b2b0b822cd15d6c15b0f00a09\n"} +{"Time":"2021-09-27T06:48:03.297751-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_256","Output":" \t \t+(hash.Hash) (len=32) 0x9f86d081884c7d659a2feaa0c55ad015a3bf4f1b2b0b822cd15d6c15b0f00a08\n"} +{"Time":"2021-09-27T06:48:03.297755-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_256","Output":" \t \t \n"} +{"Time":"2021-09-27T06:48:03.297759-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_256","Output":" \tTest: \tTestSanitySha2_256\n"} +{"Time":"2021-09-27T06:48:03.297763-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_256","Output":"--- FAIL: TestSanitySha2_256 (0.00s)\n"} +{"Time":"2021-09-27T06:48:03.297767-04:00","Action":"fail","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_256","Elapsed":0} +{"Time":"2021-09-27T06:48:03.29777-04:00","Action":"run","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_384"} +{"Time":"2021-09-27T06:48:03.297773-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_384","Output":"=== RUN TestSanitySha2_384\n"} +{"Time":"2021-09-27T06:48:03.297776-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_384","Output":"--- PASS: TestSanitySha2_384 (0.00s)\n"} +{"Time":"2021-09-27T06:48:03.297795-04:00","Action":"pass","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_384","Elapsed":0} +{"Time":"2021-09-27T06:48:03.2978-04:00","Action":"run","Package":"github.com/onflow/crypto/hash","Test":"TestSanityKmac128"} +{"Time":"2021-09-27T06:48:03.297802-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanityKmac128","Output":"=== RUN TestSanityKmac128\n"} +{"Time":"2021-09-27T06:48:03.297806-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanityKmac128","Output":"--- PASS: TestSanityKmac128 (0.00s)\n"} +{"Time":"2021-09-27T06:48:03.29781-04:00","Action":"pass","Package":"github.com/onflow/crypto/hash","Test":"TestSanityKmac128","Elapsed":0} +{"Time":"2021-09-27T06:48:03.297813-04:00","Action":"run","Package":"github.com/onflow/crypto/hash","Test":"TestHashersAPI"} +{"Time":"2021-09-27T06:48:03.297816-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestHashersAPI","Output":"=== RUN TestHashersAPI\n"} +{"Time":"2021-09-27T06:48:03.297819-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestHashersAPI","Output":" hash_test.go:114: math rand seed is 1632739683297507000\n"} +{"Time":"2021-09-27T06:48:03.297825-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestHashersAPI","Output":"--- PASS: TestHashersAPI (0.00s)\n"} +{"Time":"2021-09-27T06:48:03.297833-04:00","Action":"pass","Package":"github.com/onflow/crypto/hash","Test":"TestHashersAPI","Elapsed":0} +{"Time":"2021-09-27T06:48:03.297837-04:00","Action":"run","Package":"github.com/onflow/crypto/hash","Test":"TestSha3"} +{"Time":"2021-09-27T06:48:03.29784-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSha3","Output":"=== RUN TestSha3\n"} +{"Time":"2021-09-27T06:48:03.297843-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSha3","Output":" hash_test.go:158: math rand seed is 1632739683297711000\n"} +{"Time":"2021-09-27T06:48:03.297847-04:00","Action":"run","Package":"github.com/onflow/crypto/hash","Test":"TestSha3/SHA3_256"} +{"Time":"2021-09-27T06:48:03.297855-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSha3/SHA3_256","Output":"=== RUN TestSha3/SHA3_256\n"} +{"Time":"2021-09-27T06:48:03.39759-04:00","Action":"run","Package":"github.com/onflow/crypto/hash","Test":"TestSha3/SHA3_384"} +{"Time":"2021-09-27T06:48:03.397615-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSha3/SHA3_384","Output":"=== RUN TestSha3/SHA3_384\n"} +{"Time":"2021-09-27T06:48:03.518378-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSha3","Output":"--- PASS: TestSha3 (0.22s)\n"} +{"Time":"2021-09-27T06:48:03.518404-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSha3/SHA3_256","Output":" --- PASS: TestSha3/SHA3_256 (0.10s)\n"} +{"Time":"2021-09-27T06:48:03.518408-04:00","Action":"pass","Package":"github.com/onflow/crypto/hash","Test":"TestSha3/SHA3_256","Elapsed":0.1} +{"Time":"2021-09-27T06:48:03.518413-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSha3/SHA3_384","Output":" --- PASS: TestSha3/SHA3_384 (0.12s)\n"} +{"Time":"2021-09-27T06:48:03.518417-04:00","Action":"pass","Package":"github.com/onflow/crypto/hash","Test":"TestSha3/SHA3_384","Elapsed":0.12} +{"Time":"2021-09-27T06:48:03.51842-04:00","Action":"pass","Package":"github.com/onflow/crypto/hash","Test":"TestSha3","Elapsed":0.22} +{"Time":"2021-09-27T06:48:03.518423-04:00","Action":"run","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha3_256"} +{"Time":"2021-09-27T06:48:03.518426-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha3_256","Output":"=== RUN TestSanitySha3_256\n"} +{"Time":"2021-09-27T06:48:03.518429-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha3_256","Output":"--- PASS: TestSanitySha3_256 (0.00s)\n"} +{"Time":"2021-09-27T06:48:03.518432-04:00","Action":"pass","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha3_256","Elapsed":0} +{"Time":"2021-09-27T06:48:03.518453-04:00","Action":"run","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha3_384"} +{"Time":"2021-09-27T06:48:03.518456-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha3_384","Output":"=== RUN TestSanitySha3_384\n"} +{"Time":"2021-09-27T06:48:03.51846-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha3_384","Output":"--- PASS: TestSanitySha3_384 (0.00s)\n"} +{"Time":"2021-09-27T06:48:03.518629-04:00","Action":"pass","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha3_384","Elapsed":0} +{"Time":"2021-09-27T06:48:03.518642-04:00","Action":"run","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_256"} +{"Time":"2021-09-27T06:48:03.518648-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_256","Output":"=== RUN TestSanitySha2_256\n"} +{"Time":"2021-09-27T06:48:03.518658-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_256","Output":" hash_test.go:41: \n"} +{"Time":"2021-09-27T06:48:03.518683-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_256","Output":" \tError Trace:\thash_test.go:41\n"} +{"Time":"2021-09-27T06:48:03.518689-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_256","Output":" \tError: \tNot equal: \n"} +{"Time":"2021-09-27T06:48:03.518693-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_256","Output":" \t \texpected: hash.Hash{0x9f, 0x86, 0xd0, 0x81, 0x88, 0x4c, 0x7d, 0x65, 0x9a, 0x2f, 0xea, 0xa0, 0xc5, 0x5a, 0xd0, 0x15, 0xa3, 0xbf, 0x4f, 0x1b, 0x2b, 0xb, 0x82, 0x2c, 0xd1, 0x5d, 0x6c, 0x15, 0xb0, 0xf0, 0xa, 0x9}\n"} +{"Time":"2021-09-27T06:48:03.518697-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_256","Output":" \t \tactual : hash.Hash{0x9f, 0x86, 0xd0, 0x81, 0x88, 0x4c, 0x7d, 0x65, 0x9a, 0x2f, 0xea, 0xa0, 0xc5, 0x5a, 0xd0, 0x15, 0xa3, 0xbf, 0x4f, 0x1b, 0x2b, 0xb, 0x82, 0x2c, 0xd1, 0x5d, 0x6c, 0x15, 0xb0, 0xf0, 0xa, 0x8}\n"} +{"Time":"2021-09-27T06:48:03.518702-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_256","Output":" \t \t\n"} +{"Time":"2021-09-27T06:48:03.518705-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_256","Output":" \t \tDiff:\n"} +{"Time":"2021-09-27T06:48:03.518708-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_256","Output":" \t \t--- Expected\n"} +{"Time":"2021-09-27T06:48:03.518711-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_256","Output":" \t \t+++ Actual\n"} +{"Time":"2021-09-27T06:48:03.518747-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_256","Output":" \t \t@@ -1,2 +1,2 @@\n"} +{"Time":"2021-09-27T06:48:03.51876-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_256","Output":" \t \t-(hash.Hash) (len=32) 0x9f86d081884c7d659a2feaa0c55ad015a3bf4f1b2b0b822cd15d6c15b0f00a09\n"} +{"Time":"2021-09-27T06:48:03.518772-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_256","Output":" \t \t+(hash.Hash) (len=32) 0x9f86d081884c7d659a2feaa0c55ad015a3bf4f1b2b0b822cd15d6c15b0f00a08\n"} +{"Time":"2021-09-27T06:48:03.518778-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_256","Output":" \t \t \n"} +{"Time":"2021-09-27T06:48:03.51879-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_256","Output":" \tTest: \tTestSanitySha2_256\n"} +{"Time":"2021-09-27T06:48:03.518811-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_256","Output":"--- FAIL: TestSanitySha2_256 (0.00s)\n"} +{"Time":"2021-09-27T06:48:03.518816-04:00","Action":"fail","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_256","Elapsed":0} +{"Time":"2021-09-27T06:48:03.51882-04:00","Action":"run","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_384"} +{"Time":"2021-09-27T06:48:03.518822-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_384","Output":"=== RUN TestSanitySha2_384\n"} +{"Time":"2021-09-27T06:48:03.518826-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_384","Output":"--- PASS: TestSanitySha2_384 (0.00s)\n"} +{"Time":"2021-09-27T06:48:03.518835-04:00","Action":"pass","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_384","Elapsed":0} +{"Time":"2021-09-27T06:48:03.518838-04:00","Action":"run","Package":"github.com/onflow/crypto/hash","Test":"TestSanityKmac128"} +{"Time":"2021-09-27T06:48:03.51884-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanityKmac128","Output":"=== RUN TestSanityKmac128\n"} +{"Time":"2021-09-27T06:48:03.518844-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanityKmac128","Output":"--- PASS: TestSanityKmac128 (0.00s)\n"} +{"Time":"2021-09-27T06:48:03.518847-04:00","Action":"pass","Package":"github.com/onflow/crypto/hash","Test":"TestSanityKmac128","Elapsed":0} +{"Time":"2021-09-27T06:48:03.518849-04:00","Action":"run","Package":"github.com/onflow/crypto/hash","Test":"TestHashersAPI"} +{"Time":"2021-09-27T06:48:03.518852-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestHashersAPI","Output":"=== RUN TestHashersAPI\n"} +{"Time":"2021-09-27T06:48:03.518881-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestHashersAPI","Output":" hash_test.go:114: math rand seed is 1632739683518492000\n"} +{"Time":"2021-09-27T06:48:03.518905-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestHashersAPI","Output":"--- PASS: TestHashersAPI (0.00s)\n"} +{"Time":"2021-09-27T06:48:03.518912-04:00","Action":"pass","Package":"github.com/onflow/crypto/hash","Test":"TestHashersAPI","Elapsed":0} +{"Time":"2021-09-27T06:48:03.518917-04:00","Action":"run","Package":"github.com/onflow/crypto/hash","Test":"TestSha3"} +{"Time":"2021-09-27T06:48:03.518922-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSha3","Output":"=== RUN TestSha3\n"} +{"Time":"2021-09-27T06:48:03.518927-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSha3","Output":" hash_test.go:158: math rand seed is 1632739683518781000\n"} +{"Time":"2021-09-27T06:48:03.518935-04:00","Action":"run","Package":"github.com/onflow/crypto/hash","Test":"TestSha3/SHA3_256"} +{"Time":"2021-09-27T06:48:03.51894-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSha3/SHA3_256","Output":"=== RUN TestSha3/SHA3_256\n"} +{"Time":"2021-09-27T06:48:03.618338-04:00","Action":"run","Package":"github.com/onflow/crypto/hash","Test":"TestSha3/SHA3_384"} +{"Time":"2021-09-27T06:48:03.618361-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSha3/SHA3_384","Output":"=== RUN TestSha3/SHA3_384\n"} +{"Time":"2021-09-27T06:48:03.740052-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSha3","Output":"--- PASS: TestSha3 (0.22s)\n"} +{"Time":"2021-09-27T06:48:03.740148-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSha3/SHA3_256","Output":" --- PASS: TestSha3/SHA3_256 (0.10s)\n"} +{"Time":"2021-09-27T06:48:03.740153-04:00","Action":"pass","Package":"github.com/onflow/crypto/hash","Test":"TestSha3/SHA3_256","Elapsed":0.1} +{"Time":"2021-09-27T06:48:03.740158-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSha3/SHA3_384","Output":" --- PASS: TestSha3/SHA3_384 (0.12s)\n"} +{"Time":"2021-09-27T06:48:03.740161-04:00","Action":"pass","Package":"github.com/onflow/crypto/hash","Test":"TestSha3/SHA3_384","Elapsed":0.12} +{"Time":"2021-09-27T06:48:03.740164-04:00","Action":"pass","Package":"github.com/onflow/crypto/hash","Test":"TestSha3","Elapsed":0.22} +{"Time":"2021-09-27T06:48:03.740167-04:00","Action":"run","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha3_256"} +{"Time":"2021-09-27T06:48:03.740292-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha3_256","Output":"=== RUN TestSanitySha3_256\n"} +{"Time":"2021-09-27T06:48:03.740318-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha3_256","Output":"--- PASS: TestSanitySha3_256 (0.00s)\n"} +{"Time":"2021-09-27T06:48:03.740325-04:00","Action":"pass","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha3_256","Elapsed":0} +{"Time":"2021-09-27T06:48:03.74033-04:00","Action":"run","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha3_384"} +{"Time":"2021-09-27T06:48:03.740335-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha3_384","Output":"=== RUN TestSanitySha3_384\n"} +{"Time":"2021-09-27T06:48:03.740341-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha3_384","Output":"--- PASS: TestSanitySha3_384 (0.00s)\n"} +{"Time":"2021-09-27T06:48:03.740417-04:00","Action":"pass","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha3_384","Elapsed":0} +{"Time":"2021-09-27T06:48:03.740429-04:00","Action":"run","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_256"} +{"Time":"2021-09-27T06:48:03.740434-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_256","Output":"=== RUN TestSanitySha2_256\n"} +{"Time":"2021-09-27T06:48:03.74045-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_256","Output":" hash_test.go:41: \n"} +{"Time":"2021-09-27T06:48:03.740455-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_256","Output":" \tError Trace:\thash_test.go:41\n"} +{"Time":"2021-09-27T06:48:03.740461-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_256","Output":" \tError: \tNot equal: \n"} +{"Time":"2021-09-27T06:48:03.740465-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_256","Output":" \t \texpected: hash.Hash{0x9f, 0x86, 0xd0, 0x81, 0x88, 0x4c, 0x7d, 0x65, 0x9a, 0x2f, 0xea, 0xa0, 0xc5, 0x5a, 0xd0, 0x15, 0xa3, 0xbf, 0x4f, 0x1b, 0x2b, 0xb, 0x82, 0x2c, 0xd1, 0x5d, 0x6c, 0x15, 0xb0, 0xf0, 0xa, 0x9}\n"} +{"Time":"2021-09-27T06:48:03.740471-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_256","Output":" \t \tactual : hash.Hash{0x9f, 0x86, 0xd0, 0x81, 0x88, 0x4c, 0x7d, 0x65, 0x9a, 0x2f, 0xea, 0xa0, 0xc5, 0x5a, 0xd0, 0x15, 0xa3, 0xbf, 0x4f, 0x1b, 0x2b, 0xb, 0x82, 0x2c, 0xd1, 0x5d, 0x6c, 0x15, 0xb0, 0xf0, 0xa, 0x8}\n"} +{"Time":"2021-09-27T06:48:03.740505-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_256","Output":" \t \t\n"} +{"Time":"2021-09-27T06:48:03.740515-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_256","Output":" \t \tDiff:\n"} +{"Time":"2021-09-27T06:48:03.74052-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_256","Output":" \t \t--- Expected\n"} +{"Time":"2021-09-27T06:48:03.740524-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_256","Output":" \t \t+++ Actual\n"} +{"Time":"2021-09-27T06:48:03.740536-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_256","Output":" \t \t@@ -1,2 +1,2 @@\n"} +{"Time":"2021-09-27T06:48:03.74054-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_256","Output":" \t \t-(hash.Hash) (len=32) 0x9f86d081884c7d659a2feaa0c55ad015a3bf4f1b2b0b822cd15d6c15b0f00a09\n"} +{"Time":"2021-09-27T06:48:03.740544-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_256","Output":" \t \t+(hash.Hash) (len=32) 0x9f86d081884c7d659a2feaa0c55ad015a3bf4f1b2b0b822cd15d6c15b0f00a08\n"} +{"Time":"2021-09-27T06:48:03.740547-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_256","Output":" \t \t \n"} +{"Time":"2021-09-27T06:48:03.74055-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_256","Output":" \tTest: \tTestSanitySha2_256\n"} +{"Time":"2021-09-27T06:48:03.740592-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_256","Output":"--- FAIL: TestSanitySha2_256 (0.00s)\n"} +{"Time":"2021-09-27T06:48:03.740606-04:00","Action":"fail","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_256","Elapsed":0} +{"Time":"2021-09-27T06:48:03.740611-04:00","Action":"run","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_384"} +{"Time":"2021-09-27T06:48:03.74062-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_384","Output":"=== RUN TestSanitySha2_384\n"} +{"Time":"2021-09-27T06:48:03.740682-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_384","Output":"--- PASS: TestSanitySha2_384 (0.00s)\n"} +{"Time":"2021-09-27T06:48:03.740696-04:00","Action":"pass","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_384","Elapsed":0} +{"Time":"2021-09-27T06:48:03.7407-04:00","Action":"run","Package":"github.com/onflow/crypto/hash","Test":"TestSanityKmac128"} +{"Time":"2021-09-27T06:48:03.740704-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanityKmac128","Output":"=== RUN TestSanityKmac128\n"} +{"Time":"2021-09-27T06:48:03.740728-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanityKmac128","Output":"--- PASS: TestSanityKmac128 (0.00s)\n"} +{"Time":"2021-09-27T06:48:03.74074-04:00","Action":"pass","Package":"github.com/onflow/crypto/hash","Test":"TestSanityKmac128","Elapsed":0} +{"Time":"2021-09-27T06:48:03.740752-04:00","Action":"run","Package":"github.com/onflow/crypto/hash","Test":"TestHashersAPI"} +{"Time":"2021-09-27T06:48:03.740757-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestHashersAPI","Output":"=== RUN TestHashersAPI\n"} +{"Time":"2021-09-27T06:48:03.740764-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestHashersAPI","Output":" hash_test.go:114: math rand seed is 1632739683740724000\n"} +{"Time":"2021-09-27T06:48:03.740982-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestHashersAPI","Output":"--- PASS: TestHashersAPI (0.00s)\n"} +{"Time":"2021-09-27T06:48:03.740995-04:00","Action":"pass","Package":"github.com/onflow/crypto/hash","Test":"TestHashersAPI","Elapsed":0} +{"Time":"2021-09-27T06:48:03.74102-04:00","Action":"run","Package":"github.com/onflow/crypto/hash","Test":"TestSha3"} +{"Time":"2021-09-27T06:48:03.741028-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSha3","Output":"=== RUN TestSha3\n"} +{"Time":"2021-09-27T06:48:03.741037-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSha3","Output":" hash_test.go:158: math rand seed is 1632739683740970000\n"} +{"Time":"2021-09-27T06:48:03.741063-04:00","Action":"run","Package":"github.com/onflow/crypto/hash","Test":"TestSha3/SHA3_256"} +{"Time":"2021-09-27T06:48:03.741078-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSha3/SHA3_256","Output":"=== RUN TestSha3/SHA3_256\n"} +{"Time":"2021-09-27T06:48:03.858998-04:00","Action":"run","Package":"github.com/onflow/crypto/hash","Test":"TestSha3/SHA3_384"} +{"Time":"2021-09-27T06:48:03.859054-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSha3/SHA3_384","Output":"=== RUN TestSha3/SHA3_384\n"} +{"Time":"2021-09-27T06:48:03.979881-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSha3","Output":"--- PASS: TestSha3 (0.24s)\n"} +{"Time":"2021-09-27T06:48:03.97994-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSha3/SHA3_256","Output":" --- PASS: TestSha3/SHA3_256 (0.12s)\n"} +{"Time":"2021-09-27T06:48:03.979945-04:00","Action":"pass","Package":"github.com/onflow/crypto/hash","Test":"TestSha3/SHA3_256","Elapsed":0.12} +{"Time":"2021-09-27T06:48:03.97995-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSha3/SHA3_384","Output":" --- PASS: TestSha3/SHA3_384 (0.12s)\n"} +{"Time":"2021-09-27T06:48:03.979953-04:00","Action":"pass","Package":"github.com/onflow/crypto/hash","Test":"TestSha3/SHA3_384","Elapsed":0.12} +{"Time":"2021-09-27T06:48:03.979956-04:00","Action":"pass","Package":"github.com/onflow/crypto/hash","Test":"TestSha3","Elapsed":0.24} +{"Time":"2021-09-27T06:48:03.979959-04:00","Action":"run","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha3_256"} +{"Time":"2021-09-27T06:48:03.979975-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha3_256","Output":"=== RUN TestSanitySha3_256\n"} +{"Time":"2021-09-27T06:48:03.979988-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha3_256","Output":"--- PASS: TestSanitySha3_256 (0.00s)\n"} +{"Time":"2021-09-27T06:48:03.979992-04:00","Action":"pass","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha3_256","Elapsed":0} +{"Time":"2021-09-27T06:48:03.979995-04:00","Action":"run","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha3_384"} +{"Time":"2021-09-27T06:48:03.979998-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha3_384","Output":"=== RUN TestSanitySha3_384\n"} +{"Time":"2021-09-27T06:48:03.980002-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha3_384","Output":"--- PASS: TestSanitySha3_384 (0.00s)\n"} +{"Time":"2021-09-27T06:48:03.980104-04:00","Action":"pass","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha3_384","Elapsed":0} +{"Time":"2021-09-27T06:48:03.980129-04:00","Action":"run","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_256"} +{"Time":"2021-09-27T06:48:03.980135-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_256","Output":"=== RUN TestSanitySha2_256\n"} +{"Time":"2021-09-27T06:48:03.980142-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_256","Output":" hash_test.go:41: \n"} +{"Time":"2021-09-27T06:48:03.980152-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_256","Output":" \tError Trace:\thash_test.go:41\n"} +{"Time":"2021-09-27T06:48:03.980156-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_256","Output":" \tError: \tNot equal: \n"} +{"Time":"2021-09-27T06:48:03.980159-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_256","Output":" \t \texpected: hash.Hash{0x9f, 0x86, 0xd0, 0x81, 0x88, 0x4c, 0x7d, 0x65, 0x9a, 0x2f, 0xea, 0xa0, 0xc5, 0x5a, 0xd0, 0x15, 0xa3, 0xbf, 0x4f, 0x1b, 0x2b, 0xb, 0x82, 0x2c, 0xd1, 0x5d, 0x6c, 0x15, 0xb0, 0xf0, 0xa, 0x9}\n"} +{"Time":"2021-09-27T06:48:03.980163-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_256","Output":" \t \tactual : hash.Hash{0x9f, 0x86, 0xd0, 0x81, 0x88, 0x4c, 0x7d, 0x65, 0x9a, 0x2f, 0xea, 0xa0, 0xc5, 0x5a, 0xd0, 0x15, 0xa3, 0xbf, 0x4f, 0x1b, 0x2b, 0xb, 0x82, 0x2c, 0xd1, 0x5d, 0x6c, 0x15, 0xb0, 0xf0, 0xa, 0x8}\n"} +{"Time":"2021-09-27T06:48:03.980167-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_256","Output":" \t \t\n"} +{"Time":"2021-09-27T06:48:03.980192-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_256","Output":" \t \tDiff:\n"} +{"Time":"2021-09-27T06:48:03.980205-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_256","Output":" \t \t--- Expected\n"} +{"Time":"2021-09-27T06:48:03.980211-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_256","Output":" \t \t+++ Actual\n"} +{"Time":"2021-09-27T06:48:03.980216-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_256","Output":" \t \t@@ -1,2 +1,2 @@\n"} +{"Time":"2021-09-27T06:48:03.980221-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_256","Output":" \t \t-(hash.Hash) (len=32) 0x9f86d081884c7d659a2feaa0c55ad015a3bf4f1b2b0b822cd15d6c15b0f00a09\n"} +{"Time":"2021-09-27T06:48:03.980244-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_256","Output":" \t \t+(hash.Hash) (len=32) 0x9f86d081884c7d659a2feaa0c55ad015a3bf4f1b2b0b822cd15d6c15b0f00a08\n"} +{"Time":"2021-09-27T06:48:03.980257-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_256","Output":" \t \t \n"} +{"Time":"2021-09-27T06:48:03.980261-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_256","Output":" \tTest: \tTestSanitySha2_256\n"} +{"Time":"2021-09-27T06:48:03.980266-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_256","Output":"--- FAIL: TestSanitySha2_256 (0.00s)\n"} +{"Time":"2021-09-27T06:48:03.980271-04:00","Action":"fail","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_256","Elapsed":0} +{"Time":"2021-09-27T06:48:03.980274-04:00","Action":"run","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_384"} +{"Time":"2021-09-27T06:48:03.980277-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_384","Output":"=== RUN TestSanitySha2_384\n"} +{"Time":"2021-09-27T06:48:03.98028-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_384","Output":"--- PASS: TestSanitySha2_384 (0.00s)\n"} +{"Time":"2021-09-27T06:48:03.980284-04:00","Action":"pass","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_384","Elapsed":0} +{"Time":"2021-09-27T06:48:03.980286-04:00","Action":"run","Package":"github.com/onflow/crypto/hash","Test":"TestSanityKmac128"} +{"Time":"2021-09-27T06:48:03.980307-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanityKmac128","Output":"=== RUN TestSanityKmac128\n"} +{"Time":"2021-09-27T06:48:03.980317-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanityKmac128","Output":"--- PASS: TestSanityKmac128 (0.00s)\n"} +{"Time":"2021-09-27T06:48:03.980323-04:00","Action":"pass","Package":"github.com/onflow/crypto/hash","Test":"TestSanityKmac128","Elapsed":0} +{"Time":"2021-09-27T06:48:03.98033-04:00","Action":"run","Package":"github.com/onflow/crypto/hash","Test":"TestHashersAPI"} +{"Time":"2021-09-27T06:48:03.980335-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestHashersAPI","Output":"=== RUN TestHashersAPI\n"} +{"Time":"2021-09-27T06:48:03.98034-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestHashersAPI","Output":" hash_test.go:114: math rand seed is 1632739683980033000\n"} +{"Time":"2021-09-27T06:48:03.980388-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestHashersAPI","Output":"--- PASS: TestHashersAPI (0.00s)\n"} +{"Time":"2021-09-27T06:48:03.980399-04:00","Action":"pass","Package":"github.com/onflow/crypto/hash","Test":"TestHashersAPI","Elapsed":0} +{"Time":"2021-09-27T06:48:03.980403-04:00","Action":"run","Package":"github.com/onflow/crypto/hash","Test":"TestSha3"} +{"Time":"2021-09-27T06:48:03.980408-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSha3","Output":"=== RUN TestSha3\n"} +{"Time":"2021-09-27T06:48:03.980411-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSha3","Output":" hash_test.go:158: math rand seed is 1632739683980266000\n"} +{"Time":"2021-09-27T06:48:03.980416-04:00","Action":"run","Package":"github.com/onflow/crypto/hash","Test":"TestSha3/SHA3_256"} +{"Time":"2021-09-27T06:48:03.98042-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSha3/SHA3_256","Output":"=== RUN TestSha3/SHA3_256\n"} +{"Time":"2021-09-27T06:48:04.080285-04:00","Action":"run","Package":"github.com/onflow/crypto/hash","Test":"TestSha3/SHA3_384"} +{"Time":"2021-09-27T06:48:04.080313-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSha3/SHA3_384","Output":"=== RUN TestSha3/SHA3_384\n"} +{"Time":"2021-09-27T06:48:04.200352-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSha3","Output":"--- PASS: TestSha3 (0.22s)\n"} +{"Time":"2021-09-27T06:48:04.20038-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSha3/SHA3_256","Output":" --- PASS: TestSha3/SHA3_256 (0.10s)\n"} +{"Time":"2021-09-27T06:48:04.200385-04:00","Action":"pass","Package":"github.com/onflow/crypto/hash","Test":"TestSha3/SHA3_256","Elapsed":0.1} +{"Time":"2021-09-27T06:48:04.20039-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSha3/SHA3_384","Output":" --- PASS: TestSha3/SHA3_384 (0.12s)\n"} +{"Time":"2021-09-27T06:48:04.200393-04:00","Action":"pass","Package":"github.com/onflow/crypto/hash","Test":"TestSha3/SHA3_384","Elapsed":0.12} +{"Time":"2021-09-27T06:48:04.200397-04:00","Action":"pass","Package":"github.com/onflow/crypto/hash","Test":"TestSha3","Elapsed":0.22} +{"Time":"2021-09-27T06:48:04.200401-04:00","Action":"run","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha3_256"} +{"Time":"2021-09-27T06:48:04.200405-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha3_256","Output":"=== RUN TestSanitySha3_256\n"} +{"Time":"2021-09-27T06:48:04.200422-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha3_256","Output":"--- PASS: TestSanitySha3_256 (0.00s)\n"} +{"Time":"2021-09-27T06:48:04.200426-04:00","Action":"pass","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha3_256","Elapsed":0} +{"Time":"2021-09-27T06:48:04.200429-04:00","Action":"run","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha3_384"} +{"Time":"2021-09-27T06:48:04.200433-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha3_384","Output":"=== RUN TestSanitySha3_384\n"} +{"Time":"2021-09-27T06:48:04.200437-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha3_384","Output":"--- PASS: TestSanitySha3_384 (0.00s)\n"} +{"Time":"2021-09-27T06:48:04.200554-04:00","Action":"pass","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha3_384","Elapsed":0} +{"Time":"2021-09-27T06:48:04.200568-04:00","Action":"run","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_256"} +{"Time":"2021-09-27T06:48:04.200572-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_256","Output":"=== RUN TestSanitySha2_256\n"} +{"Time":"2021-09-27T06:48:04.200579-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_256","Output":" hash_test.go:41: \n"} +{"Time":"2021-09-27T06:48:04.200583-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_256","Output":" \tError Trace:\thash_test.go:41\n"} +{"Time":"2021-09-27T06:48:04.200586-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_256","Output":" \tError: \tNot equal: \n"} +{"Time":"2021-09-27T06:48:04.20059-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_256","Output":" \t \texpected: hash.Hash{0x9f, 0x86, 0xd0, 0x81, 0x88, 0x4c, 0x7d, 0x65, 0x9a, 0x2f, 0xea, 0xa0, 0xc5, 0x5a, 0xd0, 0x15, 0xa3, 0xbf, 0x4f, 0x1b, 0x2b, 0xb, 0x82, 0x2c, 0xd1, 0x5d, 0x6c, 0x15, 0xb0, 0xf0, 0xa, 0x9}\n"} +{"Time":"2021-09-27T06:48:04.200596-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_256","Output":" \t \tactual : hash.Hash{0x9f, 0x86, 0xd0, 0x81, 0x88, 0x4c, 0x7d, 0x65, 0x9a, 0x2f, 0xea, 0xa0, 0xc5, 0x5a, 0xd0, 0x15, 0xa3, 0xbf, 0x4f, 0x1b, 0x2b, 0xb, 0x82, 0x2c, 0xd1, 0x5d, 0x6c, 0x15, 0xb0, 0xf0, 0xa, 0x8}\n"} +{"Time":"2021-09-27T06:48:04.200609-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_256","Output":" \t \t\n"} +{"Time":"2021-09-27T06:48:04.200613-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_256","Output":" \t \tDiff:\n"} +{"Time":"2021-09-27T06:48:04.200616-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_256","Output":" \t \t--- Expected\n"} +{"Time":"2021-09-27T06:48:04.20062-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_256","Output":" \t \t+++ Actual\n"} +{"Time":"2021-09-27T06:48:04.200634-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_256","Output":" \t \t@@ -1,2 +1,2 @@\n"} +{"Time":"2021-09-27T06:48:04.200651-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_256","Output":" \t \t-(hash.Hash) (len=32) 0x9f86d081884c7d659a2feaa0c55ad015a3bf4f1b2b0b822cd15d6c15b0f00a09\n"} +{"Time":"2021-09-27T06:48:04.200657-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_256","Output":" \t \t+(hash.Hash) (len=32) 0x9f86d081884c7d659a2feaa0c55ad015a3bf4f1b2b0b822cd15d6c15b0f00a08\n"} +{"Time":"2021-09-27T06:48:04.200662-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_256","Output":" \t \t \n"} +{"Time":"2021-09-27T06:48:04.200666-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_256","Output":" \tTest: \tTestSanitySha2_256\n"} +{"Time":"2021-09-27T06:48:04.200671-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_256","Output":"--- FAIL: TestSanitySha2_256 (0.00s)\n"} +{"Time":"2021-09-27T06:48:04.200677-04:00","Action":"fail","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_256","Elapsed":0} +{"Time":"2021-09-27T06:48:04.200682-04:00","Action":"run","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_384"} +{"Time":"2021-09-27T06:48:04.200687-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_384","Output":"=== RUN TestSanitySha2_384\n"} +{"Time":"2021-09-27T06:48:04.200695-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_384","Output":"--- PASS: TestSanitySha2_384 (0.00s)\n"} +{"Time":"2021-09-27T06:48:04.200698-04:00","Action":"pass","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_384","Elapsed":0} +{"Time":"2021-09-27T06:48:04.200701-04:00","Action":"run","Package":"github.com/onflow/crypto/hash","Test":"TestSanityKmac128"} +{"Time":"2021-09-27T06:48:04.200704-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanityKmac128","Output":"=== RUN TestSanityKmac128\n"} +{"Time":"2021-09-27T06:48:04.200707-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanityKmac128","Output":"--- PASS: TestSanityKmac128 (0.00s)\n"} +{"Time":"2021-09-27T06:48:04.20071-04:00","Action":"pass","Package":"github.com/onflow/crypto/hash","Test":"TestSanityKmac128","Elapsed":0} +{"Time":"2021-09-27T06:48:04.200713-04:00","Action":"run","Package":"github.com/onflow/crypto/hash","Test":"TestHashersAPI"} +{"Time":"2021-09-27T06:48:04.200716-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestHashersAPI","Output":"=== RUN TestHashersAPI\n"} +{"Time":"2021-09-27T06:48:04.200722-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestHashersAPI","Output":" hash_test.go:114: math rand seed is 1632739684200452000\n"} +{"Time":"2021-09-27T06:48:04.200737-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestHashersAPI","Output":"--- PASS: TestHashersAPI (0.00s)\n"} +{"Time":"2021-09-27T06:48:04.200741-04:00","Action":"pass","Package":"github.com/onflow/crypto/hash","Test":"TestHashersAPI","Elapsed":0} +{"Time":"2021-09-27T06:48:04.200745-04:00","Action":"run","Package":"github.com/onflow/crypto/hash","Test":"TestSha3"} +{"Time":"2021-09-27T06:48:04.20075-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSha3","Output":"=== RUN TestSha3\n"} +{"Time":"2021-09-27T06:48:04.200755-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSha3","Output":" hash_test.go:158: math rand seed is 1632739684200658000\n"} +{"Time":"2021-09-27T06:48:04.20076-04:00","Action":"run","Package":"github.com/onflow/crypto/hash","Test":"TestSha3/SHA3_256"} +{"Time":"2021-09-27T06:48:04.200769-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSha3/SHA3_256","Output":"=== RUN TestSha3/SHA3_256\n"} +{"Time":"2021-09-27T06:48:04.301997-04:00","Action":"run","Package":"github.com/onflow/crypto/hash","Test":"TestSha3/SHA3_384"} +{"Time":"2021-09-27T06:48:04.302032-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSha3/SHA3_384","Output":"=== RUN TestSha3/SHA3_384\n"} +{"Time":"2021-09-27T06:48:04.425552-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSha3","Output":"--- PASS: TestSha3 (0.22s)\n"} +{"Time":"2021-09-27T06:48:04.425588-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSha3/SHA3_256","Output":" --- PASS: TestSha3/SHA3_256 (0.10s)\n"} +{"Time":"2021-09-27T06:48:04.425594-04:00","Action":"pass","Package":"github.com/onflow/crypto/hash","Test":"TestSha3/SHA3_256","Elapsed":0.1} +{"Time":"2021-09-27T06:48:04.425599-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSha3/SHA3_384","Output":" --- PASS: TestSha3/SHA3_384 (0.12s)\n"} +{"Time":"2021-09-27T06:48:04.425602-04:00","Action":"pass","Package":"github.com/onflow/crypto/hash","Test":"TestSha3/SHA3_384","Elapsed":0.12} +{"Time":"2021-09-27T06:48:04.425605-04:00","Action":"pass","Package":"github.com/onflow/crypto/hash","Test":"TestSha3","Elapsed":0.22} +{"Time":"2021-09-27T06:48:04.425609-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Output":"FAIL\n"} +{"Time":"2021-09-27T06:48:04.426907-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Output":"FAIL\tgithub.com/onflow/crypto/hash\t2.445s\n"} +{"Time":"2021-09-27T06:48:04.42693-04:00","Action":"fail","Package":"github.com/onflow/crypto/hash","Elapsed":2.445} \ No newline at end of file diff --git a/tools/test_monitor/testdata/summary1/raw/test-result-crypto-hash-10-count-pass.json b/tools/test_monitor/testdata/summary1/raw/test-result-crypto-hash-10-count-pass.json index 4b1a17de6d5..72aab10fe2b 100644 --- a/tools/test_monitor/testdata/summary1/raw/test-result-crypto-hash-10-count-pass.json +++ b/tools/test_monitor/testdata/summary1/raw/test-result-crypto-hash-10-count-pass.json @@ -1,383 +1,383 @@ -{"Time":"2021-09-27T06:45:52.470262-04:00","Action":"run","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha3_256"} -{"Time":"2021-09-27T06:45:52.470682-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha3_256","Output":"=== RUN TestSanitySha3_256\n"} -{"Time":"2021-09-27T06:45:52.470718-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha3_256","Output":"--- PASS: TestSanitySha3_256 (0.00s)\n"} -{"Time":"2021-09-27T06:45:52.470724-04:00","Action":"pass","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha3_256","Elapsed":0} -{"Time":"2021-09-27T06:45:52.470741-04:00","Action":"run","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha3_384"} -{"Time":"2021-09-27T06:45:52.470745-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha3_384","Output":"=== RUN TestSanitySha3_384\n"} -{"Time":"2021-09-27T06:45:52.470751-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha3_384","Output":"--- PASS: TestSanitySha3_384 (0.00s)\n"} -{"Time":"2021-09-27T06:45:52.470756-04:00","Action":"pass","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha3_384","Elapsed":0} -{"Time":"2021-09-27T06:45:52.47076-04:00","Action":"run","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_256"} -{"Time":"2021-09-27T06:45:52.470876-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_256","Output":"=== RUN TestSanitySha2_256\n"} -{"Time":"2021-09-27T06:45:52.470895-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_256","Output":"--- PASS: TestSanitySha2_256 (0.00s)\n"} -{"Time":"2021-09-27T06:45:52.470901-04:00","Action":"pass","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_256","Elapsed":0} -{"Time":"2021-09-27T06:45:52.470907-04:00","Action":"run","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_384"} -{"Time":"2021-09-27T06:45:52.470911-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_384","Output":"=== RUN TestSanitySha2_384\n"} -{"Time":"2021-09-27T06:45:52.470917-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_384","Output":"--- PASS: TestSanitySha2_384 (0.00s)\n"} -{"Time":"2021-09-27T06:45:52.47093-04:00","Action":"pass","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_384","Elapsed":0} -{"Time":"2021-09-27T06:45:52.470935-04:00","Action":"run","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanityKmac128"} -{"Time":"2021-09-27T06:45:52.470943-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanityKmac128","Output":"=== RUN TestSanityKmac128\n"} -{"Time":"2021-09-27T06:45:52.470949-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanityKmac128","Output":"--- PASS: TestSanityKmac128 (0.00s)\n"} -{"Time":"2021-09-27T06:45:52.470953-04:00","Action":"pass","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanityKmac128","Elapsed":0} -{"Time":"2021-09-27T06:45:52.470957-04:00","Action":"run","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestHashersAPI"} -{"Time":"2021-09-27T06:45:52.470981-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestHashersAPI","Output":"=== RUN TestHashersAPI\n"} -{"Time":"2021-09-27T06:45:52.470992-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestHashersAPI","Output":" hash_test.go:114: math rand seed is 1632739552470379000\n"} -{"Time":"2021-09-27T06:45:52.471024-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestHashersAPI","Output":"--- PASS: TestHashersAPI (0.00s)\n"} -{"Time":"2021-09-27T06:45:52.471029-04:00","Action":"pass","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestHashersAPI","Elapsed":0} -{"Time":"2021-09-27T06:45:52.471034-04:00","Action":"run","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3"} -{"Time":"2021-09-27T06:45:52.471042-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3","Output":"=== RUN TestSha3\n"} -{"Time":"2021-09-27T06:45:52.471046-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3","Output":" hash_test.go:158: math rand seed is 1632739552470723000\n"} -{"Time":"2021-09-27T06:45:52.471051-04:00","Action":"run","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3/SHA3_256"} -{"Time":"2021-09-27T06:45:52.471055-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3/SHA3_256","Output":"=== RUN TestSha3/SHA3_256\n"} -{"Time":"2021-09-27T06:45:52.575573-04:00","Action":"run","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3/SHA3_384"} -{"Time":"2021-09-27T06:45:52.575605-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3/SHA3_384","Output":"=== RUN TestSha3/SHA3_384\n"} -{"Time":"2021-09-27T06:45:52.696742-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3","Output":"--- PASS: TestSha3 (0.23s)\n"} -{"Time":"2021-09-27T06:45:52.696768-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3/SHA3_256","Output":" --- PASS: TestSha3/SHA3_256 (0.10s)\n"} -{"Time":"2021-09-27T06:45:52.696773-04:00","Action":"pass","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3/SHA3_256","Elapsed":0.1} -{"Time":"2021-09-27T06:45:52.696784-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3/SHA3_384","Output":" --- PASS: TestSha3/SHA3_384 (0.12s)\n"} -{"Time":"2021-09-27T06:45:52.696787-04:00","Action":"pass","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3/SHA3_384","Elapsed":0.12} -{"Time":"2021-09-27T06:45:52.69679-04:00","Action":"pass","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3","Elapsed":0.23} -{"Time":"2021-09-27T06:45:52.696793-04:00","Action":"run","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha3_256"} -{"Time":"2021-09-27T06:45:52.696796-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha3_256","Output":"=== RUN TestSanitySha3_256\n"} -{"Time":"2021-09-27T06:45:52.6968-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha3_256","Output":"--- PASS: TestSanitySha3_256 (0.00s)\n"} -{"Time":"2021-09-27T06:45:52.696803-04:00","Action":"pass","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha3_256","Elapsed":0} -{"Time":"2021-09-27T06:45:52.696806-04:00","Action":"run","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha3_384"} -{"Time":"2021-09-27T06:45:52.696809-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha3_384","Output":"=== RUN TestSanitySha3_384\n"} -{"Time":"2021-09-27T06:45:52.697192-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha3_384","Output":"--- PASS: TestSanitySha3_384 (0.00s)\n"} -{"Time":"2021-09-27T06:45:52.697219-04:00","Action":"pass","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha3_384","Elapsed":0} -{"Time":"2021-09-27T06:45:52.697226-04:00","Action":"run","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_256"} -{"Time":"2021-09-27T06:45:52.697243-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_256","Output":"=== RUN TestSanitySha2_256\n"} -{"Time":"2021-09-27T06:45:52.697259-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_256","Output":"--- PASS: TestSanitySha2_256 (0.00s)\n"} -{"Time":"2021-09-27T06:45:52.697266-04:00","Action":"pass","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_256","Elapsed":0} -{"Time":"2021-09-27T06:45:52.697276-04:00","Action":"run","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_384"} -{"Time":"2021-09-27T06:45:52.697281-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_384","Output":"=== RUN TestSanitySha2_384\n"} -{"Time":"2021-09-27T06:45:52.697286-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_384","Output":"--- PASS: TestSanitySha2_384 (0.00s)\n"} -{"Time":"2021-09-27T06:45:52.69729-04:00","Action":"pass","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_384","Elapsed":0} -{"Time":"2021-09-27T06:45:52.697295-04:00","Action":"run","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanityKmac128"} -{"Time":"2021-09-27T06:45:52.6973-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanityKmac128","Output":"=== RUN TestSanityKmac128\n"} -{"Time":"2021-09-27T06:45:52.697305-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanityKmac128","Output":"--- PASS: TestSanityKmac128 (0.00s)\n"} -{"Time":"2021-09-27T06:45:52.697309-04:00","Action":"pass","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanityKmac128","Elapsed":0} -{"Time":"2021-09-27T06:45:52.697314-04:00","Action":"run","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestHashersAPI"} -{"Time":"2021-09-27T06:45:52.697318-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestHashersAPI","Output":"=== RUN TestHashersAPI\n"} -{"Time":"2021-09-27T06:45:52.697322-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestHashersAPI","Output":" hash_test.go:114: math rand seed is 1632739552696815000\n"} -{"Time":"2021-09-27T06:45:52.697325-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestHashersAPI","Output":"--- PASS: TestHashersAPI (0.00s)\n"} -{"Time":"2021-09-27T06:45:52.697328-04:00","Action":"pass","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestHashersAPI","Elapsed":0} -{"Time":"2021-09-27T06:45:52.697331-04:00","Action":"run","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3"} -{"Time":"2021-09-27T06:45:52.697335-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3","Output":"=== RUN TestSha3\n"} -{"Time":"2021-09-27T06:45:52.697348-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3","Output":" hash_test.go:158: math rand seed is 1632739552697024000\n"} -{"Time":"2021-09-27T06:45:52.697359-04:00","Action":"run","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3/SHA3_256"} -{"Time":"2021-09-27T06:45:52.697362-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3/SHA3_256","Output":"=== RUN TestSha3/SHA3_256\n"} -{"Time":"2021-09-27T06:45:52.796063-04:00","Action":"run","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3/SHA3_384"} -{"Time":"2021-09-27T06:45:52.79612-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3/SHA3_384","Output":"=== RUN TestSha3/SHA3_384\n"} -{"Time":"2021-09-27T06:45:52.917391-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3","Output":"--- PASS: TestSha3 (0.22s)\n"} -{"Time":"2021-09-27T06:45:52.917417-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3/SHA3_256","Output":" --- PASS: TestSha3/SHA3_256 (0.10s)\n"} -{"Time":"2021-09-27T06:45:52.917421-04:00","Action":"pass","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3/SHA3_256","Elapsed":0.1} -{"Time":"2021-09-27T06:45:52.917427-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3/SHA3_384","Output":" --- PASS: TestSha3/SHA3_384 (0.12s)\n"} -{"Time":"2021-09-27T06:45:52.917431-04:00","Action":"pass","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3/SHA3_384","Elapsed":0.12} -{"Time":"2021-09-27T06:45:52.917436-04:00","Action":"pass","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3","Elapsed":0.22} -{"Time":"2021-09-27T06:45:52.917441-04:00","Action":"run","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha3_256"} -{"Time":"2021-09-27T06:45:52.917446-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha3_256","Output":"=== RUN TestSanitySha3_256\n"} -{"Time":"2021-09-27T06:45:52.917449-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha3_256","Output":"--- PASS: TestSanitySha3_256 (0.00s)\n"} -{"Time":"2021-09-27T06:45:52.917452-04:00","Action":"pass","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha3_256","Elapsed":0} -{"Time":"2021-09-27T06:45:52.917455-04:00","Action":"run","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha3_384"} -{"Time":"2021-09-27T06:45:52.917458-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha3_384","Output":"=== RUN TestSanitySha3_384\n"} -{"Time":"2021-09-27T06:45:52.91757-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha3_384","Output":"--- PASS: TestSanitySha3_384 (0.00s)\n"} -{"Time":"2021-09-27T06:45:52.917587-04:00","Action":"pass","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha3_384","Elapsed":0} -{"Time":"2021-09-27T06:45:52.917591-04:00","Action":"run","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_256"} -{"Time":"2021-09-27T06:45:52.917595-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_256","Output":"=== RUN TestSanitySha2_256\n"} -{"Time":"2021-09-27T06:45:52.917599-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_256","Output":"--- PASS: TestSanitySha2_256 (0.00s)\n"} -{"Time":"2021-09-27T06:45:52.917602-04:00","Action":"pass","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_256","Elapsed":0} -{"Time":"2021-09-27T06:45:52.917605-04:00","Action":"run","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_384"} -{"Time":"2021-09-27T06:45:52.917608-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_384","Output":"=== RUN TestSanitySha2_384\n"} -{"Time":"2021-09-27T06:45:52.917611-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_384","Output":"--- PASS: TestSanitySha2_384 (0.00s)\n"} -{"Time":"2021-09-27T06:45:52.91762-04:00","Action":"pass","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_384","Elapsed":0} -{"Time":"2021-09-27T06:45:52.917624-04:00","Action":"run","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanityKmac128"} -{"Time":"2021-09-27T06:45:52.917631-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanityKmac128","Output":"=== RUN TestSanityKmac128\n"} -{"Time":"2021-09-27T06:45:52.917635-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanityKmac128","Output":"--- PASS: TestSanityKmac128 (0.00s)\n"} -{"Time":"2021-09-27T06:45:52.917647-04:00","Action":"pass","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanityKmac128","Elapsed":0} -{"Time":"2021-09-27T06:45:52.91765-04:00","Action":"run","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestHashersAPI"} -{"Time":"2021-09-27T06:45:52.917653-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestHashersAPI","Output":"=== RUN TestHashersAPI\n"} -{"Time":"2021-09-27T06:45:52.917658-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestHashersAPI","Output":" hash_test.go:114: math rand seed is 1632739552917474000\n"} -{"Time":"2021-09-27T06:45:52.917717-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestHashersAPI","Output":"--- PASS: TestHashersAPI (0.00s)\n"} -{"Time":"2021-09-27T06:45:52.917729-04:00","Action":"pass","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestHashersAPI","Elapsed":0} -{"Time":"2021-09-27T06:45:52.917733-04:00","Action":"run","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3"} -{"Time":"2021-09-27T06:45:52.917737-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3","Output":"=== RUN TestSha3\n"} -{"Time":"2021-09-27T06:45:52.917742-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3","Output":" hash_test.go:158: math rand seed is 1632739552917708000\n"} -{"Time":"2021-09-27T06:45:52.917753-04:00","Action":"run","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3/SHA3_256"} -{"Time":"2021-09-27T06:45:52.917756-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3/SHA3_256","Output":"=== RUN TestSha3/SHA3_256\n"} -{"Time":"2021-09-27T06:45:53.018094-04:00","Action":"run","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3/SHA3_384"} -{"Time":"2021-09-27T06:45:53.018182-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3/SHA3_384","Output":"=== RUN TestSha3/SHA3_384\n"} -{"Time":"2021-09-27T06:45:53.140382-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3","Output":"--- PASS: TestSha3 (0.22s)\n"} -{"Time":"2021-09-27T06:45:53.140409-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3/SHA3_256","Output":" --- PASS: TestSha3/SHA3_256 (0.10s)\n"} -{"Time":"2021-09-27T06:45:53.140413-04:00","Action":"pass","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3/SHA3_256","Elapsed":0.1} -{"Time":"2021-09-27T06:45:53.140425-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3/SHA3_384","Output":" --- PASS: TestSha3/SHA3_384 (0.12s)\n"} -{"Time":"2021-09-27T06:45:53.140428-04:00","Action":"pass","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3/SHA3_384","Elapsed":0.12} -{"Time":"2021-09-27T06:45:53.140431-04:00","Action":"pass","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3","Elapsed":0.22} -{"Time":"2021-09-27T06:45:53.140434-04:00","Action":"run","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha3_256"} -{"Time":"2021-09-27T06:45:53.140447-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha3_256","Output":"=== RUN TestSanitySha3_256\n"} -{"Time":"2021-09-27T06:45:53.140452-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha3_256","Output":"--- PASS: TestSanitySha3_256 (0.00s)\n"} -{"Time":"2021-09-27T06:45:53.140455-04:00","Action":"pass","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha3_256","Elapsed":0} -{"Time":"2021-09-27T06:45:53.140458-04:00","Action":"run","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha3_384"} -{"Time":"2021-09-27T06:45:53.14046-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha3_384","Output":"=== RUN TestSanitySha3_384\n"} -{"Time":"2021-09-27T06:45:53.140464-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha3_384","Output":"--- PASS: TestSanitySha3_384 (0.00s)\n"} -{"Time":"2021-09-27T06:45:53.140578-04:00","Action":"pass","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha3_384","Elapsed":0} -{"Time":"2021-09-27T06:45:53.140591-04:00","Action":"run","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_256"} -{"Time":"2021-09-27T06:45:53.140599-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_256","Output":"=== RUN TestSanitySha2_256\n"} -{"Time":"2021-09-27T06:45:53.14062-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_256","Output":"--- PASS: TestSanitySha2_256 (0.00s)\n"} -{"Time":"2021-09-27T06:45:53.140635-04:00","Action":"pass","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_256","Elapsed":0} -{"Time":"2021-09-27T06:45:53.140641-04:00","Action":"run","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_384"} -{"Time":"2021-09-27T06:45:53.140646-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_384","Output":"=== RUN TestSanitySha2_384\n"} -{"Time":"2021-09-27T06:45:53.140651-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_384","Output":"--- PASS: TestSanitySha2_384 (0.00s)\n"} -{"Time":"2021-09-27T06:45:53.140656-04:00","Action":"pass","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_384","Elapsed":0} -{"Time":"2021-09-27T06:45:53.14066-04:00","Action":"run","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanityKmac128"} -{"Time":"2021-09-27T06:45:53.140689-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanityKmac128","Output":"=== RUN TestSanityKmac128\n"} -{"Time":"2021-09-27T06:45:53.1407-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanityKmac128","Output":"--- PASS: TestSanityKmac128 (0.00s)\n"} -{"Time":"2021-09-27T06:45:53.140705-04:00","Action":"pass","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanityKmac128","Elapsed":0} -{"Time":"2021-09-27T06:45:53.14071-04:00","Action":"run","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestHashersAPI"} -{"Time":"2021-09-27T06:45:53.140718-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestHashersAPI","Output":"=== RUN TestHashersAPI\n"} -{"Time":"2021-09-27T06:45:53.140724-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestHashersAPI","Output":" hash_test.go:114: math rand seed is 1632739553140451000\n"} -{"Time":"2021-09-27T06:45:53.140754-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestHashersAPI","Output":"--- PASS: TestHashersAPI (0.00s)\n"} -{"Time":"2021-09-27T06:45:53.140765-04:00","Action":"pass","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestHashersAPI","Elapsed":0} -{"Time":"2021-09-27T06:45:53.140768-04:00","Action":"run","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3"} -{"Time":"2021-09-27T06:45:53.140772-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3","Output":"=== RUN TestSha3\n"} -{"Time":"2021-09-27T06:45:53.140775-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3","Output":" hash_test.go:158: math rand seed is 1632739553140702000\n"} -{"Time":"2021-09-27T06:45:53.140779-04:00","Action":"run","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3/SHA3_256"} -{"Time":"2021-09-27T06:45:53.141538-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3/SHA3_256","Output":"=== RUN TestSha3/SHA3_256\n"} -{"Time":"2021-09-27T06:45:53.240746-04:00","Action":"run","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3/SHA3_384"} -{"Time":"2021-09-27T06:45:53.24077-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3/SHA3_384","Output":"=== RUN TestSha3/SHA3_384\n"} -{"Time":"2021-09-27T06:45:53.362197-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3","Output":"--- PASS: TestSha3 (0.22s)\n"} -{"Time":"2021-09-27T06:45:53.362225-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3/SHA3_256","Output":" --- PASS: TestSha3/SHA3_256 (0.10s)\n"} -{"Time":"2021-09-27T06:45:53.362229-04:00","Action":"pass","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3/SHA3_256","Elapsed":0.1} -{"Time":"2021-09-27T06:45:53.362234-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3/SHA3_384","Output":" --- PASS: TestSha3/SHA3_384 (0.12s)\n"} -{"Time":"2021-09-27T06:45:53.362237-04:00","Action":"pass","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3/SHA3_384","Elapsed":0.12} -{"Time":"2021-09-27T06:45:53.362241-04:00","Action":"pass","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3","Elapsed":0.22} -{"Time":"2021-09-27T06:45:53.362244-04:00","Action":"run","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha3_256"} -{"Time":"2021-09-27T06:45:53.362278-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha3_256","Output":"=== RUN TestSanitySha3_256\n"} -{"Time":"2021-09-27T06:45:53.362284-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha3_256","Output":"--- PASS: TestSanitySha3_256 (0.00s)\n"} -{"Time":"2021-09-27T06:45:53.362287-04:00","Action":"pass","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha3_256","Elapsed":0} -{"Time":"2021-09-27T06:45:53.36229-04:00","Action":"run","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha3_384"} -{"Time":"2021-09-27T06:45:53.362293-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha3_384","Output":"=== RUN TestSanitySha3_384\n"} -{"Time":"2021-09-27T06:45:53.362298-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha3_384","Output":"--- PASS: TestSanitySha3_384 (0.00s)\n"} -{"Time":"2021-09-27T06:45:53.362414-04:00","Action":"pass","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha3_384","Elapsed":0} -{"Time":"2021-09-27T06:45:53.362433-04:00","Action":"run","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_256"} -{"Time":"2021-09-27T06:45:53.362446-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_256","Output":"=== RUN TestSanitySha2_256\n"} -{"Time":"2021-09-27T06:45:53.362463-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_256","Output":"--- PASS: TestSanitySha2_256 (0.00s)\n"} -{"Time":"2021-09-27T06:45:53.362483-04:00","Action":"pass","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_256","Elapsed":0} -{"Time":"2021-09-27T06:45:53.362487-04:00","Action":"run","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_384"} -{"Time":"2021-09-27T06:45:53.36249-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_384","Output":"=== RUN TestSanitySha2_384\n"} -{"Time":"2021-09-27T06:45:53.362494-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_384","Output":"--- PASS: TestSanitySha2_384 (0.00s)\n"} -{"Time":"2021-09-27T06:45:53.362498-04:00","Action":"pass","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_384","Elapsed":0} -{"Time":"2021-09-27T06:45:53.362502-04:00","Action":"run","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanityKmac128"} -{"Time":"2021-09-27T06:45:53.362505-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanityKmac128","Output":"=== RUN TestSanityKmac128\n"} -{"Time":"2021-09-27T06:45:53.362508-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanityKmac128","Output":"--- PASS: TestSanityKmac128 (0.00s)\n"} -{"Time":"2021-09-27T06:45:53.36252-04:00","Action":"pass","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanityKmac128","Elapsed":0} -{"Time":"2021-09-27T06:45:53.362523-04:00","Action":"run","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestHashersAPI"} -{"Time":"2021-09-27T06:45:53.362526-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestHashersAPI","Output":"=== RUN TestHashersAPI\n"} -{"Time":"2021-09-27T06:45:53.362529-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestHashersAPI","Output":" hash_test.go:114: math rand seed is 1632739553362249000\n"} -{"Time":"2021-09-27T06:45:53.362534-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestHashersAPI","Output":"--- PASS: TestHashersAPI (0.00s)\n"} -{"Time":"2021-09-27T06:45:53.36255-04:00","Action":"pass","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestHashersAPI","Elapsed":0} -{"Time":"2021-09-27T06:45:53.362555-04:00","Action":"run","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3"} -{"Time":"2021-09-27T06:45:53.362558-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3","Output":"=== RUN TestSha3\n"} -{"Time":"2021-09-27T06:45:53.362561-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3","Output":" hash_test.go:158: math rand seed is 1632739553362497000\n"} -{"Time":"2021-09-27T06:45:53.362565-04:00","Action":"run","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3/SHA3_256"} -{"Time":"2021-09-27T06:45:53.362568-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3/SHA3_256","Output":"=== RUN TestSha3/SHA3_256\n"} -{"Time":"2021-09-27T06:45:53.484859-04:00","Action":"run","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3/SHA3_384"} -{"Time":"2021-09-27T06:45:53.484885-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3/SHA3_384","Output":"=== RUN TestSha3/SHA3_384\n"} -{"Time":"2021-09-27T06:45:53.605156-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3","Output":"--- PASS: TestSha3 (0.24s)\n"} -{"Time":"2021-09-27T06:45:53.605222-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3/SHA3_256","Output":" --- PASS: TestSha3/SHA3_256 (0.12s)\n"} -{"Time":"2021-09-27T06:45:53.605251-04:00","Action":"pass","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3/SHA3_256","Elapsed":0.12} -{"Time":"2021-09-27T06:45:53.605259-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3/SHA3_384","Output":" --- PASS: TestSha3/SHA3_384 (0.12s)\n"} -{"Time":"2021-09-27T06:45:53.605264-04:00","Action":"pass","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3/SHA3_384","Elapsed":0.12} -{"Time":"2021-09-27T06:45:53.605267-04:00","Action":"pass","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3","Elapsed":0.24} -{"Time":"2021-09-27T06:45:53.605271-04:00","Action":"run","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha3_256"} -{"Time":"2021-09-27T06:45:53.605274-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha3_256","Output":"=== RUN TestSanitySha3_256\n"} -{"Time":"2021-09-27T06:45:53.605379-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha3_256","Output":"--- PASS: TestSanitySha3_256 (0.00s)\n"} -{"Time":"2021-09-27T06:45:53.605388-04:00","Action":"pass","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha3_256","Elapsed":0} -{"Time":"2021-09-27T06:45:53.605392-04:00","Action":"run","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha3_384"} -{"Time":"2021-09-27T06:45:53.605395-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha3_384","Output":"=== RUN TestSanitySha3_384\n"} -{"Time":"2021-09-27T06:45:53.605399-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha3_384","Output":"--- PASS: TestSanitySha3_384 (0.00s)\n"} -{"Time":"2021-09-27T06:45:53.605402-04:00","Action":"pass","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha3_384","Elapsed":0} -{"Time":"2021-09-27T06:45:53.605416-04:00","Action":"run","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_256"} -{"Time":"2021-09-27T06:45:53.60542-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_256","Output":"=== RUN TestSanitySha2_256\n"} -{"Time":"2021-09-27T06:45:53.605423-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_256","Output":"--- PASS: TestSanitySha2_256 (0.00s)\n"} -{"Time":"2021-09-27T06:45:53.605426-04:00","Action":"pass","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_256","Elapsed":0} -{"Time":"2021-09-27T06:45:53.605429-04:00","Action":"run","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_384"} -{"Time":"2021-09-27T06:45:53.605432-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_384","Output":"=== RUN TestSanitySha2_384\n"} -{"Time":"2021-09-27T06:45:53.605444-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_384","Output":"--- PASS: TestSanitySha2_384 (0.00s)\n"} -{"Time":"2021-09-27T06:45:53.605447-04:00","Action":"pass","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_384","Elapsed":0} -{"Time":"2021-09-27T06:45:53.60545-04:00","Action":"run","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanityKmac128"} -{"Time":"2021-09-27T06:45:53.605458-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanityKmac128","Output":"=== RUN TestSanityKmac128\n"} -{"Time":"2021-09-27T06:45:53.605463-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanityKmac128","Output":"--- PASS: TestSanityKmac128 (0.00s)\n"} -{"Time":"2021-09-27T06:45:53.605473-04:00","Action":"pass","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanityKmac128","Elapsed":0} -{"Time":"2021-09-27T06:45:53.605475-04:00","Action":"run","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestHashersAPI"} -{"Time":"2021-09-27T06:45:53.605478-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestHashersAPI","Output":"=== RUN TestHashersAPI\n"} -{"Time":"2021-09-27T06:45:53.605482-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestHashersAPI","Output":" hash_test.go:114: math rand seed is 1632739553605325000\n"} -{"Time":"2021-09-27T06:45:53.605642-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestHashersAPI","Output":"--- PASS: TestHashersAPI (0.00s)\n"} -{"Time":"2021-09-27T06:45:53.605657-04:00","Action":"pass","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestHashersAPI","Elapsed":0} -{"Time":"2021-09-27T06:45:53.605663-04:00","Action":"run","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3"} -{"Time":"2021-09-27T06:45:53.605668-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3","Output":"=== RUN TestSha3\n"} -{"Time":"2021-09-27T06:45:53.605673-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3","Output":" hash_test.go:158: math rand seed is 1632739553605582000\n"} -{"Time":"2021-09-27T06:45:53.605678-04:00","Action":"run","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3/SHA3_256"} -{"Time":"2021-09-27T06:45:53.605682-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3/SHA3_256","Output":"=== RUN TestSha3/SHA3_256\n"} -{"Time":"2021-09-27T06:45:53.704399-04:00","Action":"run","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3/SHA3_384"} -{"Time":"2021-09-27T06:45:53.704425-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3/SHA3_384","Output":"=== RUN TestSha3/SHA3_384\n"} -{"Time":"2021-09-27T06:45:53.826472-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3","Output":"--- PASS: TestSha3 (0.22s)\n"} -{"Time":"2021-09-27T06:45:53.826499-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3/SHA3_256","Output":" --- PASS: TestSha3/SHA3_256 (0.10s)\n"} -{"Time":"2021-09-27T06:45:53.826504-04:00","Action":"pass","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3/SHA3_256","Elapsed":0.1} -{"Time":"2021-09-27T06:45:53.82652-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3/SHA3_384","Output":" --- PASS: TestSha3/SHA3_384 (0.12s)\n"} -{"Time":"2021-09-27T06:45:53.826523-04:00","Action":"pass","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3/SHA3_384","Elapsed":0.12} -{"Time":"2021-09-27T06:45:53.826527-04:00","Action":"pass","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3","Elapsed":0.22} -{"Time":"2021-09-27T06:45:53.826531-04:00","Action":"run","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha3_256"} -{"Time":"2021-09-27T06:45:53.826534-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha3_256","Output":"=== RUN TestSanitySha3_256\n"} -{"Time":"2021-09-27T06:45:53.826537-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha3_256","Output":"--- PASS: TestSanitySha3_256 (0.00s)\n"} -{"Time":"2021-09-27T06:45:53.82654-04:00","Action":"pass","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha3_256","Elapsed":0} -{"Time":"2021-09-27T06:45:53.826546-04:00","Action":"run","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha3_384"} -{"Time":"2021-09-27T06:45:53.826549-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha3_384","Output":"=== RUN TestSanitySha3_384\n"} -{"Time":"2021-09-27T06:45:53.826677-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha3_384","Output":"--- PASS: TestSanitySha3_384 (0.00s)\n"} -{"Time":"2021-09-27T06:45:53.826691-04:00","Action":"pass","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha3_384","Elapsed":0} -{"Time":"2021-09-27T06:45:53.826711-04:00","Action":"run","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_256"} -{"Time":"2021-09-27T06:45:53.826715-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_256","Output":"=== RUN TestSanitySha2_256\n"} -{"Time":"2021-09-27T06:45:53.826721-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_256","Output":"--- PASS: TestSanitySha2_256 (0.00s)\n"} -{"Time":"2021-09-27T06:45:53.826724-04:00","Action":"pass","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_256","Elapsed":0} -{"Time":"2021-09-27T06:45:53.826727-04:00","Action":"run","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_384"} -{"Time":"2021-09-27T06:45:53.82673-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_384","Output":"=== RUN TestSanitySha2_384\n"} -{"Time":"2021-09-27T06:45:53.826733-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_384","Output":"--- PASS: TestSanitySha2_384 (0.00s)\n"} -{"Time":"2021-09-27T06:45:53.826737-04:00","Action":"pass","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_384","Elapsed":0} -{"Time":"2021-09-27T06:45:53.82674-04:00","Action":"run","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanityKmac128"} -{"Time":"2021-09-27T06:45:53.826752-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanityKmac128","Output":"=== RUN TestSanityKmac128\n"} -{"Time":"2021-09-27T06:45:53.82676-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanityKmac128","Output":"--- PASS: TestSanityKmac128 (0.00s)\n"} -{"Time":"2021-09-27T06:45:53.826763-04:00","Action":"pass","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanityKmac128","Elapsed":0} -{"Time":"2021-09-27T06:45:53.826766-04:00","Action":"run","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestHashersAPI"} -{"Time":"2021-09-27T06:45:53.826768-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestHashersAPI","Output":"=== RUN TestHashersAPI\n"} -{"Time":"2021-09-27T06:45:53.826774-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestHashersAPI","Output":" hash_test.go:114: math rand seed is 1632739553826502000\n"} -{"Time":"2021-09-27T06:45:53.826778-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestHashersAPI","Output":"--- PASS: TestHashersAPI (0.00s)\n"} -{"Time":"2021-09-27T06:45:53.826781-04:00","Action":"pass","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestHashersAPI","Elapsed":0} -{"Time":"2021-09-27T06:45:53.826783-04:00","Action":"run","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3"} -{"Time":"2021-09-27T06:45:53.826786-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3","Output":"=== RUN TestSha3\n"} -{"Time":"2021-09-27T06:45:53.826789-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3","Output":" hash_test.go:158: math rand seed is 1632739553826733000\n"} -{"Time":"2021-09-27T06:45:53.826807-04:00","Action":"run","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3/SHA3_256"} -{"Time":"2021-09-27T06:45:53.82681-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3/SHA3_256","Output":"=== RUN TestSha3/SHA3_256\n"} -{"Time":"2021-09-27T06:45:53.92865-04:00","Action":"run","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3/SHA3_384"} -{"Time":"2021-09-27T06:45:53.928683-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3/SHA3_384","Output":"=== RUN TestSha3/SHA3_384\n"} -{"Time":"2021-09-27T06:45:54.054194-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3","Output":"--- PASS: TestSha3 (0.23s)\n"} -{"Time":"2021-09-27T06:45:54.054225-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3/SHA3_256","Output":" --- PASS: TestSha3/SHA3_256 (0.10s)\n"} -{"Time":"2021-09-27T06:45:54.054231-04:00","Action":"pass","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3/SHA3_256","Elapsed":0.1} -{"Time":"2021-09-27T06:45:54.054238-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3/SHA3_384","Output":" --- PASS: TestSha3/SHA3_384 (0.13s)\n"} -{"Time":"2021-09-27T06:45:54.054251-04:00","Action":"pass","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3/SHA3_384","Elapsed":0.13} -{"Time":"2021-09-27T06:45:54.054272-04:00","Action":"pass","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3","Elapsed":0.23} -{"Time":"2021-09-27T06:45:54.054276-04:00","Action":"run","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha3_256"} -{"Time":"2021-09-27T06:45:54.054279-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha3_256","Output":"=== RUN TestSanitySha3_256\n"} -{"Time":"2021-09-27T06:45:54.05429-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha3_256","Output":"--- PASS: TestSanitySha3_256 (0.00s)\n"} -{"Time":"2021-09-27T06:45:54.054299-04:00","Action":"pass","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha3_256","Elapsed":0} -{"Time":"2021-09-27T06:45:54.054302-04:00","Action":"run","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha3_384"} -{"Time":"2021-09-27T06:45:54.054364-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha3_384","Output":"=== RUN TestSanitySha3_384\n"} -{"Time":"2021-09-27T06:45:54.054376-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha3_384","Output":"--- PASS: TestSanitySha3_384 (0.00s)\n"} -{"Time":"2021-09-27T06:45:54.05438-04:00","Action":"pass","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha3_384","Elapsed":0} -{"Time":"2021-09-27T06:45:54.054383-04:00","Action":"run","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_256"} -{"Time":"2021-09-27T06:45:54.054386-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_256","Output":"=== RUN TestSanitySha2_256\n"} -{"Time":"2021-09-27T06:45:54.054389-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_256","Output":"--- PASS: TestSanitySha2_256 (0.00s)\n"} -{"Time":"2021-09-27T06:45:54.054421-04:00","Action":"pass","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_256","Elapsed":0} -{"Time":"2021-09-27T06:45:54.054426-04:00","Action":"run","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_384"} -{"Time":"2021-09-27T06:45:54.054429-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_384","Output":"=== RUN TestSanitySha2_384\n"} -{"Time":"2021-09-27T06:45:54.054435-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_384","Output":"--- PASS: TestSanitySha2_384 (0.00s)\n"} -{"Time":"2021-09-27T06:45:54.054438-04:00","Action":"pass","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_384","Elapsed":0} -{"Time":"2021-09-27T06:45:54.054441-04:00","Action":"run","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanityKmac128"} -{"Time":"2021-09-27T06:45:54.054444-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanityKmac128","Output":"=== RUN TestSanityKmac128\n"} -{"Time":"2021-09-27T06:45:54.054447-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanityKmac128","Output":"--- PASS: TestSanityKmac128 (0.00s)\n"} -{"Time":"2021-09-27T06:45:54.05445-04:00","Action":"pass","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanityKmac128","Elapsed":0} -{"Time":"2021-09-27T06:45:54.054453-04:00","Action":"run","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestHashersAPI"} -{"Time":"2021-09-27T06:45:54.054464-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestHashersAPI","Output":"=== RUN TestHashersAPI\n"} -{"Time":"2021-09-27T06:45:54.054468-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestHashersAPI","Output":" hash_test.go:114: math rand seed is 1632739554054239000\n"} -{"Time":"2021-09-27T06:45:54.054481-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestHashersAPI","Output":"--- PASS: TestHashersAPI (0.00s)\n"} -{"Time":"2021-09-27T06:45:54.054485-04:00","Action":"pass","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestHashersAPI","Elapsed":0} -{"Time":"2021-09-27T06:45:54.054488-04:00","Action":"run","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3"} -{"Time":"2021-09-27T06:45:54.054491-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3","Output":"=== RUN TestSha3\n"} -{"Time":"2021-09-27T06:45:54.054495-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3","Output":" hash_test.go:158: math rand seed is 1632739554054464000\n"} -{"Time":"2021-09-27T06:45:54.054499-04:00","Action":"run","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3/SHA3_256"} -{"Time":"2021-09-27T06:45:54.054501-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3/SHA3_256","Output":"=== RUN TestSha3/SHA3_256\n"} -{"Time":"2021-09-27T06:45:54.15541-04:00","Action":"run","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3/SHA3_384"} -{"Time":"2021-09-27T06:45:54.155492-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3/SHA3_384","Output":"=== RUN TestSha3/SHA3_384\n"} -{"Time":"2021-09-27T06:45:54.27999-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3","Output":"--- PASS: TestSha3 (0.23s)\n"} -{"Time":"2021-09-27T06:45:54.280024-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3/SHA3_256","Output":" --- PASS: TestSha3/SHA3_256 (0.10s)\n"} -{"Time":"2021-09-27T06:45:54.280032-04:00","Action":"pass","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3/SHA3_256","Elapsed":0.1} -{"Time":"2021-09-27T06:45:54.280037-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3/SHA3_384","Output":" --- PASS: TestSha3/SHA3_384 (0.12s)\n"} -{"Time":"2021-09-27T06:45:54.280041-04:00","Action":"pass","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3/SHA3_384","Elapsed":0.12} -{"Time":"2021-09-27T06:45:54.280045-04:00","Action":"pass","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3","Elapsed":0.23} -{"Time":"2021-09-27T06:45:54.280049-04:00","Action":"run","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha3_256"} -{"Time":"2021-09-27T06:45:54.280086-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha3_256","Output":"=== RUN TestSanitySha3_256\n"} -{"Time":"2021-09-27T06:45:54.280103-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha3_256","Output":"--- PASS: TestSanitySha3_256 (0.00s)\n"} -{"Time":"2021-09-27T06:45:54.28011-04:00","Action":"pass","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha3_256","Elapsed":0} -{"Time":"2021-09-27T06:45:54.280113-04:00","Action":"run","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha3_384"} -{"Time":"2021-09-27T06:45:54.280126-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha3_384","Output":"=== RUN TestSanitySha3_384\n"} -{"Time":"2021-09-27T06:45:54.280131-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha3_384","Output":"--- PASS: TestSanitySha3_384 (0.00s)\n"} -{"Time":"2021-09-27T06:45:54.280204-04:00","Action":"pass","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha3_384","Elapsed":0} -{"Time":"2021-09-27T06:45:54.280214-04:00","Action":"run","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_256"} -{"Time":"2021-09-27T06:45:54.280239-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_256","Output":"=== RUN TestSanitySha2_256\n"} -{"Time":"2021-09-27T06:45:54.280248-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_256","Output":"--- PASS: TestSanitySha2_256 (0.00s)\n"} -{"Time":"2021-09-27T06:45:54.280252-04:00","Action":"pass","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_256","Elapsed":0} -{"Time":"2021-09-27T06:45:54.280256-04:00","Action":"run","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_384"} -{"Time":"2021-09-27T06:45:54.280259-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_384","Output":"=== RUN TestSanitySha2_384\n"} -{"Time":"2021-09-27T06:45:54.280262-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_384","Output":"--- PASS: TestSanitySha2_384 (0.00s)\n"} -{"Time":"2021-09-27T06:45:54.280265-04:00","Action":"pass","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_384","Elapsed":0} -{"Time":"2021-09-27T06:45:54.280268-04:00","Action":"run","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanityKmac128"} -{"Time":"2021-09-27T06:45:54.280271-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanityKmac128","Output":"=== RUN TestSanityKmac128\n"} -{"Time":"2021-09-27T06:45:54.280282-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanityKmac128","Output":"--- PASS: TestSanityKmac128 (0.00s)\n"} -{"Time":"2021-09-27T06:45:54.280301-04:00","Action":"pass","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanityKmac128","Elapsed":0} -{"Time":"2021-09-27T06:45:54.280306-04:00","Action":"run","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestHashersAPI"} -{"Time":"2021-09-27T06:45:54.280308-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestHashersAPI","Output":"=== RUN TestHashersAPI\n"} -{"Time":"2021-09-27T06:45:54.280311-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestHashersAPI","Output":" hash_test.go:114: math rand seed is 1632739554280043000\n"} -{"Time":"2021-09-27T06:45:54.280334-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestHashersAPI","Output":"--- PASS: TestHashersAPI (0.00s)\n"} -{"Time":"2021-09-27T06:45:54.280338-04:00","Action":"pass","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestHashersAPI","Elapsed":0} -{"Time":"2021-09-27T06:45:54.280341-04:00","Action":"run","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3"} -{"Time":"2021-09-27T06:45:54.280344-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3","Output":"=== RUN TestSha3\n"} -{"Time":"2021-09-27T06:45:54.280348-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3","Output":" hash_test.go:158: math rand seed is 1632739554280256000\n"} -{"Time":"2021-09-27T06:45:54.280351-04:00","Action":"run","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3/SHA3_256"} -{"Time":"2021-09-27T06:45:54.280354-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3/SHA3_256","Output":"=== RUN TestSha3/SHA3_256\n"} -{"Time":"2021-09-27T06:45:54.380565-04:00","Action":"run","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3/SHA3_384"} -{"Time":"2021-09-27T06:45:54.380592-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3/SHA3_384","Output":"=== RUN TestSha3/SHA3_384\n"} -{"Time":"2021-09-27T06:45:54.500675-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3","Output":"--- PASS: TestSha3 (0.22s)\n"} -{"Time":"2021-09-27T06:45:54.500703-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3/SHA3_256","Output":" --- PASS: TestSha3/SHA3_256 (0.10s)\n"} -{"Time":"2021-09-27T06:45:54.50071-04:00","Action":"pass","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3/SHA3_256","Elapsed":0.1} -{"Time":"2021-09-27T06:45:54.500717-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3/SHA3_384","Output":" --- PASS: TestSha3/SHA3_384 (0.12s)\n"} -{"Time":"2021-09-27T06:45:54.500721-04:00","Action":"pass","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3/SHA3_384","Elapsed":0.12} -{"Time":"2021-09-27T06:45:54.500739-04:00","Action":"pass","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3","Elapsed":0.22} -{"Time":"2021-09-27T06:45:54.500742-04:00","Action":"run","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha3_256"} -{"Time":"2021-09-27T06:45:54.500745-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha3_256","Output":"=== RUN TestSanitySha3_256\n"} -{"Time":"2021-09-27T06:45:54.500748-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha3_256","Output":"--- PASS: TestSanitySha3_256 (0.00s)\n"} -{"Time":"2021-09-27T06:45:54.500751-04:00","Action":"pass","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha3_256","Elapsed":0} -{"Time":"2021-09-27T06:45:54.500754-04:00","Action":"run","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha3_384"} -{"Time":"2021-09-27T06:45:54.500862-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha3_384","Output":"=== RUN TestSanitySha3_384\n"} -{"Time":"2021-09-27T06:45:54.500873-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha3_384","Output":"--- PASS: TestSanitySha3_384 (0.00s)\n"} -{"Time":"2021-09-27T06:45:54.500896-04:00","Action":"pass","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha3_384","Elapsed":0} -{"Time":"2021-09-27T06:45:54.500913-04:00","Action":"run","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_256"} -{"Time":"2021-09-27T06:45:54.500917-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_256","Output":"=== RUN TestSanitySha2_256\n"} -{"Time":"2021-09-27T06:45:54.500923-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_256","Output":"--- PASS: TestSanitySha2_256 (0.00s)\n"} -{"Time":"2021-09-27T06:45:54.500926-04:00","Action":"pass","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_256","Elapsed":0} -{"Time":"2021-09-27T06:45:54.500929-04:00","Action":"run","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_384"} -{"Time":"2021-09-27T06:45:54.500932-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_384","Output":"=== RUN TestSanitySha2_384\n"} -{"Time":"2021-09-27T06:45:54.500935-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_384","Output":"--- PASS: TestSanitySha2_384 (0.00s)\n"} -{"Time":"2021-09-27T06:45:54.500957-04:00","Action":"pass","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_384","Elapsed":0} -{"Time":"2021-09-27T06:45:54.500961-04:00","Action":"run","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanityKmac128"} -{"Time":"2021-09-27T06:45:54.500964-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanityKmac128","Output":"=== RUN TestSanityKmac128\n"} -{"Time":"2021-09-27T06:45:54.500968-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanityKmac128","Output":"--- PASS: TestSanityKmac128 (0.00s)\n"} -{"Time":"2021-09-27T06:45:54.500971-04:00","Action":"pass","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanityKmac128","Elapsed":0} -{"Time":"2021-09-27T06:45:54.500974-04:00","Action":"run","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestHashersAPI"} -{"Time":"2021-09-27T06:45:54.500977-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestHashersAPI","Output":"=== RUN TestHashersAPI\n"} -{"Time":"2021-09-27T06:45:54.50098-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestHashersAPI","Output":" hash_test.go:114: math rand seed is 1632739554500707000\n"} -{"Time":"2021-09-27T06:45:54.500988-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestHashersAPI","Output":"--- PASS: TestHashersAPI (0.00s)\n"} -{"Time":"2021-09-27T06:45:54.501007-04:00","Action":"pass","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestHashersAPI","Elapsed":0} -{"Time":"2021-09-27T06:45:54.501011-04:00","Action":"run","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3"} -{"Time":"2021-09-27T06:45:54.501014-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3","Output":"=== RUN TestSha3\n"} -{"Time":"2021-09-27T06:45:54.501017-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3","Output":" hash_test.go:158: math rand seed is 1632739554500935000\n"} -{"Time":"2021-09-27T06:45:54.501023-04:00","Action":"run","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3/SHA3_256"} -{"Time":"2021-09-27T06:45:54.501026-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3/SHA3_256","Output":"=== RUN TestSha3/SHA3_256\n"} -{"Time":"2021-09-27T06:45:54.59914-04:00","Action":"run","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3/SHA3_384"} -{"Time":"2021-09-27T06:45:54.599167-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3/SHA3_384","Output":"=== RUN TestSha3/SHA3_384\n"} -{"Time":"2021-09-27T06:45:54.719353-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3","Output":"--- PASS: TestSha3 (0.22s)\n"} -{"Time":"2021-09-27T06:45:54.719385-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3/SHA3_256","Output":" --- PASS: TestSha3/SHA3_256 (0.10s)\n"} -{"Time":"2021-09-27T06:45:54.71939-04:00","Action":"pass","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3/SHA3_256","Elapsed":0.1} -{"Time":"2021-09-27T06:45:54.719396-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3/SHA3_384","Output":" --- PASS: TestSha3/SHA3_384 (0.12s)\n"} -{"Time":"2021-09-27T06:45:54.719399-04:00","Action":"pass","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3/SHA3_384","Elapsed":0.12} -{"Time":"2021-09-27T06:45:54.719402-04:00","Action":"pass","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3","Elapsed":0.22} -{"Time":"2021-09-27T06:45:54.719405-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Output":"PASS\n"} -{"Time":"2021-09-27T06:45:54.720685-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Output":"ok \tgithub.com/onflow/flow-go/crypto/hash\t2.541s\n"} -{"Time":"2021-09-27T06:45:54.720757-04:00","Action":"pass","Package":"github.com/onflow/flow-go/crypto/hash","Elapsed":2.541} \ No newline at end of file +{"Time":"2021-09-27T06:45:52.470262-04:00","Action":"run","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha3_256"} +{"Time":"2021-09-27T06:45:52.470682-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha3_256","Output":"=== RUN TestSanitySha3_256\n"} +{"Time":"2021-09-27T06:45:52.470718-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha3_256","Output":"--- PASS: TestSanitySha3_256 (0.00s)\n"} +{"Time":"2021-09-27T06:45:52.470724-04:00","Action":"pass","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha3_256","Elapsed":0} +{"Time":"2021-09-27T06:45:52.470741-04:00","Action":"run","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha3_384"} +{"Time":"2021-09-27T06:45:52.470745-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha3_384","Output":"=== RUN TestSanitySha3_384\n"} +{"Time":"2021-09-27T06:45:52.470751-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha3_384","Output":"--- PASS: TestSanitySha3_384 (0.00s)\n"} +{"Time":"2021-09-27T06:45:52.470756-04:00","Action":"pass","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha3_384","Elapsed":0} +{"Time":"2021-09-27T06:45:52.47076-04:00","Action":"run","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_256"} +{"Time":"2021-09-27T06:45:52.470876-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_256","Output":"=== RUN TestSanitySha2_256\n"} +{"Time":"2021-09-27T06:45:52.470895-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_256","Output":"--- PASS: TestSanitySha2_256 (0.00s)\n"} +{"Time":"2021-09-27T06:45:52.470901-04:00","Action":"pass","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_256","Elapsed":0} +{"Time":"2021-09-27T06:45:52.470907-04:00","Action":"run","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_384"} +{"Time":"2021-09-27T06:45:52.470911-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_384","Output":"=== RUN TestSanitySha2_384\n"} +{"Time":"2021-09-27T06:45:52.470917-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_384","Output":"--- PASS: TestSanitySha2_384 (0.00s)\n"} +{"Time":"2021-09-27T06:45:52.47093-04:00","Action":"pass","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_384","Elapsed":0} +{"Time":"2021-09-27T06:45:52.470935-04:00","Action":"run","Package":"github.com/onflow/crypto/hash","Test":"TestSanityKmac128"} +{"Time":"2021-09-27T06:45:52.470943-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanityKmac128","Output":"=== RUN TestSanityKmac128\n"} +{"Time":"2021-09-27T06:45:52.470949-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanityKmac128","Output":"--- PASS: TestSanityKmac128 (0.00s)\n"} +{"Time":"2021-09-27T06:45:52.470953-04:00","Action":"pass","Package":"github.com/onflow/crypto/hash","Test":"TestSanityKmac128","Elapsed":0} +{"Time":"2021-09-27T06:45:52.470957-04:00","Action":"run","Package":"github.com/onflow/crypto/hash","Test":"TestHashersAPI"} +{"Time":"2021-09-27T06:45:52.470981-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestHashersAPI","Output":"=== RUN TestHashersAPI\n"} +{"Time":"2021-09-27T06:45:52.470992-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestHashersAPI","Output":" hash_test.go:114: math rand seed is 1632739552470379000\n"} +{"Time":"2021-09-27T06:45:52.471024-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestHashersAPI","Output":"--- PASS: TestHashersAPI (0.00s)\n"} +{"Time":"2021-09-27T06:45:52.471029-04:00","Action":"pass","Package":"github.com/onflow/crypto/hash","Test":"TestHashersAPI","Elapsed":0} +{"Time":"2021-09-27T06:45:52.471034-04:00","Action":"run","Package":"github.com/onflow/crypto/hash","Test":"TestSha3"} +{"Time":"2021-09-27T06:45:52.471042-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSha3","Output":"=== RUN TestSha3\n"} +{"Time":"2021-09-27T06:45:52.471046-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSha3","Output":" hash_test.go:158: math rand seed is 1632739552470723000\n"} +{"Time":"2021-09-27T06:45:52.471051-04:00","Action":"run","Package":"github.com/onflow/crypto/hash","Test":"TestSha3/SHA3_256"} +{"Time":"2021-09-27T06:45:52.471055-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSha3/SHA3_256","Output":"=== RUN TestSha3/SHA3_256\n"} +{"Time":"2021-09-27T06:45:52.575573-04:00","Action":"run","Package":"github.com/onflow/crypto/hash","Test":"TestSha3/SHA3_384"} +{"Time":"2021-09-27T06:45:52.575605-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSha3/SHA3_384","Output":"=== RUN TestSha3/SHA3_384\n"} +{"Time":"2021-09-27T06:45:52.696742-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSha3","Output":"--- PASS: TestSha3 (0.23s)\n"} +{"Time":"2021-09-27T06:45:52.696768-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSha3/SHA3_256","Output":" --- PASS: TestSha3/SHA3_256 (0.10s)\n"} +{"Time":"2021-09-27T06:45:52.696773-04:00","Action":"pass","Package":"github.com/onflow/crypto/hash","Test":"TestSha3/SHA3_256","Elapsed":0.1} +{"Time":"2021-09-27T06:45:52.696784-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSha3/SHA3_384","Output":" --- PASS: TestSha3/SHA3_384 (0.12s)\n"} +{"Time":"2021-09-27T06:45:52.696787-04:00","Action":"pass","Package":"github.com/onflow/crypto/hash","Test":"TestSha3/SHA3_384","Elapsed":0.12} +{"Time":"2021-09-27T06:45:52.69679-04:00","Action":"pass","Package":"github.com/onflow/crypto/hash","Test":"TestSha3","Elapsed":0.23} +{"Time":"2021-09-27T06:45:52.696793-04:00","Action":"run","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha3_256"} +{"Time":"2021-09-27T06:45:52.696796-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha3_256","Output":"=== RUN TestSanitySha3_256\n"} +{"Time":"2021-09-27T06:45:52.6968-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha3_256","Output":"--- PASS: TestSanitySha3_256 (0.00s)\n"} +{"Time":"2021-09-27T06:45:52.696803-04:00","Action":"pass","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha3_256","Elapsed":0} +{"Time":"2021-09-27T06:45:52.696806-04:00","Action":"run","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha3_384"} +{"Time":"2021-09-27T06:45:52.696809-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha3_384","Output":"=== RUN TestSanitySha3_384\n"} +{"Time":"2021-09-27T06:45:52.697192-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha3_384","Output":"--- PASS: TestSanitySha3_384 (0.00s)\n"} +{"Time":"2021-09-27T06:45:52.697219-04:00","Action":"pass","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha3_384","Elapsed":0} +{"Time":"2021-09-27T06:45:52.697226-04:00","Action":"run","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_256"} +{"Time":"2021-09-27T06:45:52.697243-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_256","Output":"=== RUN TestSanitySha2_256\n"} +{"Time":"2021-09-27T06:45:52.697259-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_256","Output":"--- PASS: TestSanitySha2_256 (0.00s)\n"} +{"Time":"2021-09-27T06:45:52.697266-04:00","Action":"pass","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_256","Elapsed":0} +{"Time":"2021-09-27T06:45:52.697276-04:00","Action":"run","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_384"} +{"Time":"2021-09-27T06:45:52.697281-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_384","Output":"=== RUN TestSanitySha2_384\n"} +{"Time":"2021-09-27T06:45:52.697286-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_384","Output":"--- PASS: TestSanitySha2_384 (0.00s)\n"} +{"Time":"2021-09-27T06:45:52.69729-04:00","Action":"pass","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_384","Elapsed":0} +{"Time":"2021-09-27T06:45:52.697295-04:00","Action":"run","Package":"github.com/onflow/crypto/hash","Test":"TestSanityKmac128"} +{"Time":"2021-09-27T06:45:52.6973-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanityKmac128","Output":"=== RUN TestSanityKmac128\n"} +{"Time":"2021-09-27T06:45:52.697305-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanityKmac128","Output":"--- PASS: TestSanityKmac128 (0.00s)\n"} +{"Time":"2021-09-27T06:45:52.697309-04:00","Action":"pass","Package":"github.com/onflow/crypto/hash","Test":"TestSanityKmac128","Elapsed":0} +{"Time":"2021-09-27T06:45:52.697314-04:00","Action":"run","Package":"github.com/onflow/crypto/hash","Test":"TestHashersAPI"} +{"Time":"2021-09-27T06:45:52.697318-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestHashersAPI","Output":"=== RUN TestHashersAPI\n"} +{"Time":"2021-09-27T06:45:52.697322-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestHashersAPI","Output":" hash_test.go:114: math rand seed is 1632739552696815000\n"} +{"Time":"2021-09-27T06:45:52.697325-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestHashersAPI","Output":"--- PASS: TestHashersAPI (0.00s)\n"} +{"Time":"2021-09-27T06:45:52.697328-04:00","Action":"pass","Package":"github.com/onflow/crypto/hash","Test":"TestHashersAPI","Elapsed":0} +{"Time":"2021-09-27T06:45:52.697331-04:00","Action":"run","Package":"github.com/onflow/crypto/hash","Test":"TestSha3"} +{"Time":"2021-09-27T06:45:52.697335-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSha3","Output":"=== RUN TestSha3\n"} +{"Time":"2021-09-27T06:45:52.697348-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSha3","Output":" hash_test.go:158: math rand seed is 1632739552697024000\n"} +{"Time":"2021-09-27T06:45:52.697359-04:00","Action":"run","Package":"github.com/onflow/crypto/hash","Test":"TestSha3/SHA3_256"} +{"Time":"2021-09-27T06:45:52.697362-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSha3/SHA3_256","Output":"=== RUN TestSha3/SHA3_256\n"} +{"Time":"2021-09-27T06:45:52.796063-04:00","Action":"run","Package":"github.com/onflow/crypto/hash","Test":"TestSha3/SHA3_384"} +{"Time":"2021-09-27T06:45:52.79612-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSha3/SHA3_384","Output":"=== RUN TestSha3/SHA3_384\n"} +{"Time":"2021-09-27T06:45:52.917391-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSha3","Output":"--- PASS: TestSha3 (0.22s)\n"} +{"Time":"2021-09-27T06:45:52.917417-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSha3/SHA3_256","Output":" --- PASS: TestSha3/SHA3_256 (0.10s)\n"} +{"Time":"2021-09-27T06:45:52.917421-04:00","Action":"pass","Package":"github.com/onflow/crypto/hash","Test":"TestSha3/SHA3_256","Elapsed":0.1} +{"Time":"2021-09-27T06:45:52.917427-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSha3/SHA3_384","Output":" --- PASS: TestSha3/SHA3_384 (0.12s)\n"} +{"Time":"2021-09-27T06:45:52.917431-04:00","Action":"pass","Package":"github.com/onflow/crypto/hash","Test":"TestSha3/SHA3_384","Elapsed":0.12} +{"Time":"2021-09-27T06:45:52.917436-04:00","Action":"pass","Package":"github.com/onflow/crypto/hash","Test":"TestSha3","Elapsed":0.22} +{"Time":"2021-09-27T06:45:52.917441-04:00","Action":"run","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha3_256"} +{"Time":"2021-09-27T06:45:52.917446-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha3_256","Output":"=== RUN TestSanitySha3_256\n"} +{"Time":"2021-09-27T06:45:52.917449-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha3_256","Output":"--- PASS: TestSanitySha3_256 (0.00s)\n"} +{"Time":"2021-09-27T06:45:52.917452-04:00","Action":"pass","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha3_256","Elapsed":0} +{"Time":"2021-09-27T06:45:52.917455-04:00","Action":"run","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha3_384"} +{"Time":"2021-09-27T06:45:52.917458-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha3_384","Output":"=== RUN TestSanitySha3_384\n"} +{"Time":"2021-09-27T06:45:52.91757-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha3_384","Output":"--- PASS: TestSanitySha3_384 (0.00s)\n"} +{"Time":"2021-09-27T06:45:52.917587-04:00","Action":"pass","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha3_384","Elapsed":0} +{"Time":"2021-09-27T06:45:52.917591-04:00","Action":"run","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_256"} +{"Time":"2021-09-27T06:45:52.917595-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_256","Output":"=== RUN TestSanitySha2_256\n"} +{"Time":"2021-09-27T06:45:52.917599-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_256","Output":"--- PASS: TestSanitySha2_256 (0.00s)\n"} +{"Time":"2021-09-27T06:45:52.917602-04:00","Action":"pass","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_256","Elapsed":0} +{"Time":"2021-09-27T06:45:52.917605-04:00","Action":"run","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_384"} +{"Time":"2021-09-27T06:45:52.917608-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_384","Output":"=== RUN TestSanitySha2_384\n"} +{"Time":"2021-09-27T06:45:52.917611-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_384","Output":"--- PASS: TestSanitySha2_384 (0.00s)\n"} +{"Time":"2021-09-27T06:45:52.91762-04:00","Action":"pass","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_384","Elapsed":0} +{"Time":"2021-09-27T06:45:52.917624-04:00","Action":"run","Package":"github.com/onflow/crypto/hash","Test":"TestSanityKmac128"} +{"Time":"2021-09-27T06:45:52.917631-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanityKmac128","Output":"=== RUN TestSanityKmac128\n"} +{"Time":"2021-09-27T06:45:52.917635-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanityKmac128","Output":"--- PASS: TestSanityKmac128 (0.00s)\n"} +{"Time":"2021-09-27T06:45:52.917647-04:00","Action":"pass","Package":"github.com/onflow/crypto/hash","Test":"TestSanityKmac128","Elapsed":0} +{"Time":"2021-09-27T06:45:52.91765-04:00","Action":"run","Package":"github.com/onflow/crypto/hash","Test":"TestHashersAPI"} +{"Time":"2021-09-27T06:45:52.917653-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestHashersAPI","Output":"=== RUN TestHashersAPI\n"} +{"Time":"2021-09-27T06:45:52.917658-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestHashersAPI","Output":" hash_test.go:114: math rand seed is 1632739552917474000\n"} +{"Time":"2021-09-27T06:45:52.917717-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestHashersAPI","Output":"--- PASS: TestHashersAPI (0.00s)\n"} +{"Time":"2021-09-27T06:45:52.917729-04:00","Action":"pass","Package":"github.com/onflow/crypto/hash","Test":"TestHashersAPI","Elapsed":0} +{"Time":"2021-09-27T06:45:52.917733-04:00","Action":"run","Package":"github.com/onflow/crypto/hash","Test":"TestSha3"} +{"Time":"2021-09-27T06:45:52.917737-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSha3","Output":"=== RUN TestSha3\n"} +{"Time":"2021-09-27T06:45:52.917742-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSha3","Output":" hash_test.go:158: math rand seed is 1632739552917708000\n"} +{"Time":"2021-09-27T06:45:52.917753-04:00","Action":"run","Package":"github.com/onflow/crypto/hash","Test":"TestSha3/SHA3_256"} +{"Time":"2021-09-27T06:45:52.917756-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSha3/SHA3_256","Output":"=== RUN TestSha3/SHA3_256\n"} +{"Time":"2021-09-27T06:45:53.018094-04:00","Action":"run","Package":"github.com/onflow/crypto/hash","Test":"TestSha3/SHA3_384"} +{"Time":"2021-09-27T06:45:53.018182-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSha3/SHA3_384","Output":"=== RUN TestSha3/SHA3_384\n"} +{"Time":"2021-09-27T06:45:53.140382-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSha3","Output":"--- PASS: TestSha3 (0.22s)\n"} +{"Time":"2021-09-27T06:45:53.140409-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSha3/SHA3_256","Output":" --- PASS: TestSha3/SHA3_256 (0.10s)\n"} +{"Time":"2021-09-27T06:45:53.140413-04:00","Action":"pass","Package":"github.com/onflow/crypto/hash","Test":"TestSha3/SHA3_256","Elapsed":0.1} +{"Time":"2021-09-27T06:45:53.140425-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSha3/SHA3_384","Output":" --- PASS: TestSha3/SHA3_384 (0.12s)\n"} +{"Time":"2021-09-27T06:45:53.140428-04:00","Action":"pass","Package":"github.com/onflow/crypto/hash","Test":"TestSha3/SHA3_384","Elapsed":0.12} +{"Time":"2021-09-27T06:45:53.140431-04:00","Action":"pass","Package":"github.com/onflow/crypto/hash","Test":"TestSha3","Elapsed":0.22} +{"Time":"2021-09-27T06:45:53.140434-04:00","Action":"run","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha3_256"} +{"Time":"2021-09-27T06:45:53.140447-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha3_256","Output":"=== RUN TestSanitySha3_256\n"} +{"Time":"2021-09-27T06:45:53.140452-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha3_256","Output":"--- PASS: TestSanitySha3_256 (0.00s)\n"} +{"Time":"2021-09-27T06:45:53.140455-04:00","Action":"pass","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha3_256","Elapsed":0} +{"Time":"2021-09-27T06:45:53.140458-04:00","Action":"run","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha3_384"} +{"Time":"2021-09-27T06:45:53.14046-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha3_384","Output":"=== RUN TestSanitySha3_384\n"} +{"Time":"2021-09-27T06:45:53.140464-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha3_384","Output":"--- PASS: TestSanitySha3_384 (0.00s)\n"} +{"Time":"2021-09-27T06:45:53.140578-04:00","Action":"pass","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha3_384","Elapsed":0} +{"Time":"2021-09-27T06:45:53.140591-04:00","Action":"run","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_256"} +{"Time":"2021-09-27T06:45:53.140599-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_256","Output":"=== RUN TestSanitySha2_256\n"} +{"Time":"2021-09-27T06:45:53.14062-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_256","Output":"--- PASS: TestSanitySha2_256 (0.00s)\n"} +{"Time":"2021-09-27T06:45:53.140635-04:00","Action":"pass","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_256","Elapsed":0} +{"Time":"2021-09-27T06:45:53.140641-04:00","Action":"run","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_384"} +{"Time":"2021-09-27T06:45:53.140646-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_384","Output":"=== RUN TestSanitySha2_384\n"} +{"Time":"2021-09-27T06:45:53.140651-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_384","Output":"--- PASS: TestSanitySha2_384 (0.00s)\n"} +{"Time":"2021-09-27T06:45:53.140656-04:00","Action":"pass","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_384","Elapsed":0} +{"Time":"2021-09-27T06:45:53.14066-04:00","Action":"run","Package":"github.com/onflow/crypto/hash","Test":"TestSanityKmac128"} +{"Time":"2021-09-27T06:45:53.140689-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanityKmac128","Output":"=== RUN TestSanityKmac128\n"} +{"Time":"2021-09-27T06:45:53.1407-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanityKmac128","Output":"--- PASS: TestSanityKmac128 (0.00s)\n"} +{"Time":"2021-09-27T06:45:53.140705-04:00","Action":"pass","Package":"github.com/onflow/crypto/hash","Test":"TestSanityKmac128","Elapsed":0} +{"Time":"2021-09-27T06:45:53.14071-04:00","Action":"run","Package":"github.com/onflow/crypto/hash","Test":"TestHashersAPI"} +{"Time":"2021-09-27T06:45:53.140718-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestHashersAPI","Output":"=== RUN TestHashersAPI\n"} +{"Time":"2021-09-27T06:45:53.140724-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestHashersAPI","Output":" hash_test.go:114: math rand seed is 1632739553140451000\n"} +{"Time":"2021-09-27T06:45:53.140754-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestHashersAPI","Output":"--- PASS: TestHashersAPI (0.00s)\n"} +{"Time":"2021-09-27T06:45:53.140765-04:00","Action":"pass","Package":"github.com/onflow/crypto/hash","Test":"TestHashersAPI","Elapsed":0} +{"Time":"2021-09-27T06:45:53.140768-04:00","Action":"run","Package":"github.com/onflow/crypto/hash","Test":"TestSha3"} +{"Time":"2021-09-27T06:45:53.140772-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSha3","Output":"=== RUN TestSha3\n"} +{"Time":"2021-09-27T06:45:53.140775-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSha3","Output":" hash_test.go:158: math rand seed is 1632739553140702000\n"} +{"Time":"2021-09-27T06:45:53.140779-04:00","Action":"run","Package":"github.com/onflow/crypto/hash","Test":"TestSha3/SHA3_256"} +{"Time":"2021-09-27T06:45:53.141538-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSha3/SHA3_256","Output":"=== RUN TestSha3/SHA3_256\n"} +{"Time":"2021-09-27T06:45:53.240746-04:00","Action":"run","Package":"github.com/onflow/crypto/hash","Test":"TestSha3/SHA3_384"} +{"Time":"2021-09-27T06:45:53.24077-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSha3/SHA3_384","Output":"=== RUN TestSha3/SHA3_384\n"} +{"Time":"2021-09-27T06:45:53.362197-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSha3","Output":"--- PASS: TestSha3 (0.22s)\n"} +{"Time":"2021-09-27T06:45:53.362225-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSha3/SHA3_256","Output":" --- PASS: TestSha3/SHA3_256 (0.10s)\n"} +{"Time":"2021-09-27T06:45:53.362229-04:00","Action":"pass","Package":"github.com/onflow/crypto/hash","Test":"TestSha3/SHA3_256","Elapsed":0.1} +{"Time":"2021-09-27T06:45:53.362234-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSha3/SHA3_384","Output":" --- PASS: TestSha3/SHA3_384 (0.12s)\n"} +{"Time":"2021-09-27T06:45:53.362237-04:00","Action":"pass","Package":"github.com/onflow/crypto/hash","Test":"TestSha3/SHA3_384","Elapsed":0.12} +{"Time":"2021-09-27T06:45:53.362241-04:00","Action":"pass","Package":"github.com/onflow/crypto/hash","Test":"TestSha3","Elapsed":0.22} +{"Time":"2021-09-27T06:45:53.362244-04:00","Action":"run","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha3_256"} +{"Time":"2021-09-27T06:45:53.362278-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha3_256","Output":"=== RUN TestSanitySha3_256\n"} +{"Time":"2021-09-27T06:45:53.362284-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha3_256","Output":"--- PASS: TestSanitySha3_256 (0.00s)\n"} +{"Time":"2021-09-27T06:45:53.362287-04:00","Action":"pass","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha3_256","Elapsed":0} +{"Time":"2021-09-27T06:45:53.36229-04:00","Action":"run","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha3_384"} +{"Time":"2021-09-27T06:45:53.362293-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha3_384","Output":"=== RUN TestSanitySha3_384\n"} +{"Time":"2021-09-27T06:45:53.362298-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha3_384","Output":"--- PASS: TestSanitySha3_384 (0.00s)\n"} +{"Time":"2021-09-27T06:45:53.362414-04:00","Action":"pass","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha3_384","Elapsed":0} +{"Time":"2021-09-27T06:45:53.362433-04:00","Action":"run","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_256"} +{"Time":"2021-09-27T06:45:53.362446-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_256","Output":"=== RUN TestSanitySha2_256\n"} +{"Time":"2021-09-27T06:45:53.362463-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_256","Output":"--- PASS: TestSanitySha2_256 (0.00s)\n"} +{"Time":"2021-09-27T06:45:53.362483-04:00","Action":"pass","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_256","Elapsed":0} +{"Time":"2021-09-27T06:45:53.362487-04:00","Action":"run","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_384"} +{"Time":"2021-09-27T06:45:53.36249-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_384","Output":"=== RUN TestSanitySha2_384\n"} +{"Time":"2021-09-27T06:45:53.362494-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_384","Output":"--- PASS: TestSanitySha2_384 (0.00s)\n"} +{"Time":"2021-09-27T06:45:53.362498-04:00","Action":"pass","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_384","Elapsed":0} +{"Time":"2021-09-27T06:45:53.362502-04:00","Action":"run","Package":"github.com/onflow/crypto/hash","Test":"TestSanityKmac128"} +{"Time":"2021-09-27T06:45:53.362505-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanityKmac128","Output":"=== RUN TestSanityKmac128\n"} +{"Time":"2021-09-27T06:45:53.362508-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanityKmac128","Output":"--- PASS: TestSanityKmac128 (0.00s)\n"} +{"Time":"2021-09-27T06:45:53.36252-04:00","Action":"pass","Package":"github.com/onflow/crypto/hash","Test":"TestSanityKmac128","Elapsed":0} +{"Time":"2021-09-27T06:45:53.362523-04:00","Action":"run","Package":"github.com/onflow/crypto/hash","Test":"TestHashersAPI"} +{"Time":"2021-09-27T06:45:53.362526-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestHashersAPI","Output":"=== RUN TestHashersAPI\n"} +{"Time":"2021-09-27T06:45:53.362529-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestHashersAPI","Output":" hash_test.go:114: math rand seed is 1632739553362249000\n"} +{"Time":"2021-09-27T06:45:53.362534-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestHashersAPI","Output":"--- PASS: TestHashersAPI (0.00s)\n"} +{"Time":"2021-09-27T06:45:53.36255-04:00","Action":"pass","Package":"github.com/onflow/crypto/hash","Test":"TestHashersAPI","Elapsed":0} +{"Time":"2021-09-27T06:45:53.362555-04:00","Action":"run","Package":"github.com/onflow/crypto/hash","Test":"TestSha3"} +{"Time":"2021-09-27T06:45:53.362558-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSha3","Output":"=== RUN TestSha3\n"} +{"Time":"2021-09-27T06:45:53.362561-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSha3","Output":" hash_test.go:158: math rand seed is 1632739553362497000\n"} +{"Time":"2021-09-27T06:45:53.362565-04:00","Action":"run","Package":"github.com/onflow/crypto/hash","Test":"TestSha3/SHA3_256"} +{"Time":"2021-09-27T06:45:53.362568-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSha3/SHA3_256","Output":"=== RUN TestSha3/SHA3_256\n"} +{"Time":"2021-09-27T06:45:53.484859-04:00","Action":"run","Package":"github.com/onflow/crypto/hash","Test":"TestSha3/SHA3_384"} +{"Time":"2021-09-27T06:45:53.484885-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSha3/SHA3_384","Output":"=== RUN TestSha3/SHA3_384\n"} +{"Time":"2021-09-27T06:45:53.605156-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSha3","Output":"--- PASS: TestSha3 (0.24s)\n"} +{"Time":"2021-09-27T06:45:53.605222-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSha3/SHA3_256","Output":" --- PASS: TestSha3/SHA3_256 (0.12s)\n"} +{"Time":"2021-09-27T06:45:53.605251-04:00","Action":"pass","Package":"github.com/onflow/crypto/hash","Test":"TestSha3/SHA3_256","Elapsed":0.12} +{"Time":"2021-09-27T06:45:53.605259-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSha3/SHA3_384","Output":" --- PASS: TestSha3/SHA3_384 (0.12s)\n"} +{"Time":"2021-09-27T06:45:53.605264-04:00","Action":"pass","Package":"github.com/onflow/crypto/hash","Test":"TestSha3/SHA3_384","Elapsed":0.12} +{"Time":"2021-09-27T06:45:53.605267-04:00","Action":"pass","Package":"github.com/onflow/crypto/hash","Test":"TestSha3","Elapsed":0.24} +{"Time":"2021-09-27T06:45:53.605271-04:00","Action":"run","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha3_256"} +{"Time":"2021-09-27T06:45:53.605274-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha3_256","Output":"=== RUN TestSanitySha3_256\n"} +{"Time":"2021-09-27T06:45:53.605379-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha3_256","Output":"--- PASS: TestSanitySha3_256 (0.00s)\n"} +{"Time":"2021-09-27T06:45:53.605388-04:00","Action":"pass","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha3_256","Elapsed":0} +{"Time":"2021-09-27T06:45:53.605392-04:00","Action":"run","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha3_384"} +{"Time":"2021-09-27T06:45:53.605395-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha3_384","Output":"=== RUN TestSanitySha3_384\n"} +{"Time":"2021-09-27T06:45:53.605399-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha3_384","Output":"--- PASS: TestSanitySha3_384 (0.00s)\n"} +{"Time":"2021-09-27T06:45:53.605402-04:00","Action":"pass","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha3_384","Elapsed":0} +{"Time":"2021-09-27T06:45:53.605416-04:00","Action":"run","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_256"} +{"Time":"2021-09-27T06:45:53.60542-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_256","Output":"=== RUN TestSanitySha2_256\n"} +{"Time":"2021-09-27T06:45:53.605423-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_256","Output":"--- PASS: TestSanitySha2_256 (0.00s)\n"} +{"Time":"2021-09-27T06:45:53.605426-04:00","Action":"pass","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_256","Elapsed":0} +{"Time":"2021-09-27T06:45:53.605429-04:00","Action":"run","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_384"} +{"Time":"2021-09-27T06:45:53.605432-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_384","Output":"=== RUN TestSanitySha2_384\n"} +{"Time":"2021-09-27T06:45:53.605444-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_384","Output":"--- PASS: TestSanitySha2_384 (0.00s)\n"} +{"Time":"2021-09-27T06:45:53.605447-04:00","Action":"pass","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_384","Elapsed":0} +{"Time":"2021-09-27T06:45:53.60545-04:00","Action":"run","Package":"github.com/onflow/crypto/hash","Test":"TestSanityKmac128"} +{"Time":"2021-09-27T06:45:53.605458-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanityKmac128","Output":"=== RUN TestSanityKmac128\n"} +{"Time":"2021-09-27T06:45:53.605463-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanityKmac128","Output":"--- PASS: TestSanityKmac128 (0.00s)\n"} +{"Time":"2021-09-27T06:45:53.605473-04:00","Action":"pass","Package":"github.com/onflow/crypto/hash","Test":"TestSanityKmac128","Elapsed":0} +{"Time":"2021-09-27T06:45:53.605475-04:00","Action":"run","Package":"github.com/onflow/crypto/hash","Test":"TestHashersAPI"} +{"Time":"2021-09-27T06:45:53.605478-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestHashersAPI","Output":"=== RUN TestHashersAPI\n"} +{"Time":"2021-09-27T06:45:53.605482-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestHashersAPI","Output":" hash_test.go:114: math rand seed is 1632739553605325000\n"} +{"Time":"2021-09-27T06:45:53.605642-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestHashersAPI","Output":"--- PASS: TestHashersAPI (0.00s)\n"} +{"Time":"2021-09-27T06:45:53.605657-04:00","Action":"pass","Package":"github.com/onflow/crypto/hash","Test":"TestHashersAPI","Elapsed":0} +{"Time":"2021-09-27T06:45:53.605663-04:00","Action":"run","Package":"github.com/onflow/crypto/hash","Test":"TestSha3"} +{"Time":"2021-09-27T06:45:53.605668-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSha3","Output":"=== RUN TestSha3\n"} +{"Time":"2021-09-27T06:45:53.605673-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSha3","Output":" hash_test.go:158: math rand seed is 1632739553605582000\n"} +{"Time":"2021-09-27T06:45:53.605678-04:00","Action":"run","Package":"github.com/onflow/crypto/hash","Test":"TestSha3/SHA3_256"} +{"Time":"2021-09-27T06:45:53.605682-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSha3/SHA3_256","Output":"=== RUN TestSha3/SHA3_256\n"} +{"Time":"2021-09-27T06:45:53.704399-04:00","Action":"run","Package":"github.com/onflow/crypto/hash","Test":"TestSha3/SHA3_384"} +{"Time":"2021-09-27T06:45:53.704425-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSha3/SHA3_384","Output":"=== RUN TestSha3/SHA3_384\n"} +{"Time":"2021-09-27T06:45:53.826472-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSha3","Output":"--- PASS: TestSha3 (0.22s)\n"} +{"Time":"2021-09-27T06:45:53.826499-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSha3/SHA3_256","Output":" --- PASS: TestSha3/SHA3_256 (0.10s)\n"} +{"Time":"2021-09-27T06:45:53.826504-04:00","Action":"pass","Package":"github.com/onflow/crypto/hash","Test":"TestSha3/SHA3_256","Elapsed":0.1} +{"Time":"2021-09-27T06:45:53.82652-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSha3/SHA3_384","Output":" --- PASS: TestSha3/SHA3_384 (0.12s)\n"} +{"Time":"2021-09-27T06:45:53.826523-04:00","Action":"pass","Package":"github.com/onflow/crypto/hash","Test":"TestSha3/SHA3_384","Elapsed":0.12} +{"Time":"2021-09-27T06:45:53.826527-04:00","Action":"pass","Package":"github.com/onflow/crypto/hash","Test":"TestSha3","Elapsed":0.22} +{"Time":"2021-09-27T06:45:53.826531-04:00","Action":"run","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha3_256"} +{"Time":"2021-09-27T06:45:53.826534-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha3_256","Output":"=== RUN TestSanitySha3_256\n"} +{"Time":"2021-09-27T06:45:53.826537-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha3_256","Output":"--- PASS: TestSanitySha3_256 (0.00s)\n"} +{"Time":"2021-09-27T06:45:53.82654-04:00","Action":"pass","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha3_256","Elapsed":0} +{"Time":"2021-09-27T06:45:53.826546-04:00","Action":"run","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha3_384"} +{"Time":"2021-09-27T06:45:53.826549-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha3_384","Output":"=== RUN TestSanitySha3_384\n"} +{"Time":"2021-09-27T06:45:53.826677-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha3_384","Output":"--- PASS: TestSanitySha3_384 (0.00s)\n"} +{"Time":"2021-09-27T06:45:53.826691-04:00","Action":"pass","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha3_384","Elapsed":0} +{"Time":"2021-09-27T06:45:53.826711-04:00","Action":"run","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_256"} +{"Time":"2021-09-27T06:45:53.826715-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_256","Output":"=== RUN TestSanitySha2_256\n"} +{"Time":"2021-09-27T06:45:53.826721-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_256","Output":"--- PASS: TestSanitySha2_256 (0.00s)\n"} +{"Time":"2021-09-27T06:45:53.826724-04:00","Action":"pass","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_256","Elapsed":0} +{"Time":"2021-09-27T06:45:53.826727-04:00","Action":"run","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_384"} +{"Time":"2021-09-27T06:45:53.82673-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_384","Output":"=== RUN TestSanitySha2_384\n"} +{"Time":"2021-09-27T06:45:53.826733-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_384","Output":"--- PASS: TestSanitySha2_384 (0.00s)\n"} +{"Time":"2021-09-27T06:45:53.826737-04:00","Action":"pass","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_384","Elapsed":0} +{"Time":"2021-09-27T06:45:53.82674-04:00","Action":"run","Package":"github.com/onflow/crypto/hash","Test":"TestSanityKmac128"} +{"Time":"2021-09-27T06:45:53.826752-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanityKmac128","Output":"=== RUN TestSanityKmac128\n"} +{"Time":"2021-09-27T06:45:53.82676-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanityKmac128","Output":"--- PASS: TestSanityKmac128 (0.00s)\n"} +{"Time":"2021-09-27T06:45:53.826763-04:00","Action":"pass","Package":"github.com/onflow/crypto/hash","Test":"TestSanityKmac128","Elapsed":0} +{"Time":"2021-09-27T06:45:53.826766-04:00","Action":"run","Package":"github.com/onflow/crypto/hash","Test":"TestHashersAPI"} +{"Time":"2021-09-27T06:45:53.826768-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestHashersAPI","Output":"=== RUN TestHashersAPI\n"} +{"Time":"2021-09-27T06:45:53.826774-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestHashersAPI","Output":" hash_test.go:114: math rand seed is 1632739553826502000\n"} +{"Time":"2021-09-27T06:45:53.826778-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestHashersAPI","Output":"--- PASS: TestHashersAPI (0.00s)\n"} +{"Time":"2021-09-27T06:45:53.826781-04:00","Action":"pass","Package":"github.com/onflow/crypto/hash","Test":"TestHashersAPI","Elapsed":0} +{"Time":"2021-09-27T06:45:53.826783-04:00","Action":"run","Package":"github.com/onflow/crypto/hash","Test":"TestSha3"} +{"Time":"2021-09-27T06:45:53.826786-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSha3","Output":"=== RUN TestSha3\n"} +{"Time":"2021-09-27T06:45:53.826789-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSha3","Output":" hash_test.go:158: math rand seed is 1632739553826733000\n"} +{"Time":"2021-09-27T06:45:53.826807-04:00","Action":"run","Package":"github.com/onflow/crypto/hash","Test":"TestSha3/SHA3_256"} +{"Time":"2021-09-27T06:45:53.82681-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSha3/SHA3_256","Output":"=== RUN TestSha3/SHA3_256\n"} +{"Time":"2021-09-27T06:45:53.92865-04:00","Action":"run","Package":"github.com/onflow/crypto/hash","Test":"TestSha3/SHA3_384"} +{"Time":"2021-09-27T06:45:53.928683-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSha3/SHA3_384","Output":"=== RUN TestSha3/SHA3_384\n"} +{"Time":"2021-09-27T06:45:54.054194-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSha3","Output":"--- PASS: TestSha3 (0.23s)\n"} +{"Time":"2021-09-27T06:45:54.054225-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSha3/SHA3_256","Output":" --- PASS: TestSha3/SHA3_256 (0.10s)\n"} +{"Time":"2021-09-27T06:45:54.054231-04:00","Action":"pass","Package":"github.com/onflow/crypto/hash","Test":"TestSha3/SHA3_256","Elapsed":0.1} +{"Time":"2021-09-27T06:45:54.054238-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSha3/SHA3_384","Output":" --- PASS: TestSha3/SHA3_384 (0.13s)\n"} +{"Time":"2021-09-27T06:45:54.054251-04:00","Action":"pass","Package":"github.com/onflow/crypto/hash","Test":"TestSha3/SHA3_384","Elapsed":0.13} +{"Time":"2021-09-27T06:45:54.054272-04:00","Action":"pass","Package":"github.com/onflow/crypto/hash","Test":"TestSha3","Elapsed":0.23} +{"Time":"2021-09-27T06:45:54.054276-04:00","Action":"run","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha3_256"} +{"Time":"2021-09-27T06:45:54.054279-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha3_256","Output":"=== RUN TestSanitySha3_256\n"} +{"Time":"2021-09-27T06:45:54.05429-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha3_256","Output":"--- PASS: TestSanitySha3_256 (0.00s)\n"} +{"Time":"2021-09-27T06:45:54.054299-04:00","Action":"pass","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha3_256","Elapsed":0} +{"Time":"2021-09-27T06:45:54.054302-04:00","Action":"run","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha3_384"} +{"Time":"2021-09-27T06:45:54.054364-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha3_384","Output":"=== RUN TestSanitySha3_384\n"} +{"Time":"2021-09-27T06:45:54.054376-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha3_384","Output":"--- PASS: TestSanitySha3_384 (0.00s)\n"} +{"Time":"2021-09-27T06:45:54.05438-04:00","Action":"pass","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha3_384","Elapsed":0} +{"Time":"2021-09-27T06:45:54.054383-04:00","Action":"run","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_256"} +{"Time":"2021-09-27T06:45:54.054386-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_256","Output":"=== RUN TestSanitySha2_256\n"} +{"Time":"2021-09-27T06:45:54.054389-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_256","Output":"--- PASS: TestSanitySha2_256 (0.00s)\n"} +{"Time":"2021-09-27T06:45:54.054421-04:00","Action":"pass","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_256","Elapsed":0} +{"Time":"2021-09-27T06:45:54.054426-04:00","Action":"run","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_384"} +{"Time":"2021-09-27T06:45:54.054429-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_384","Output":"=== RUN TestSanitySha2_384\n"} +{"Time":"2021-09-27T06:45:54.054435-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_384","Output":"--- PASS: TestSanitySha2_384 (0.00s)\n"} +{"Time":"2021-09-27T06:45:54.054438-04:00","Action":"pass","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_384","Elapsed":0} +{"Time":"2021-09-27T06:45:54.054441-04:00","Action":"run","Package":"github.com/onflow/crypto/hash","Test":"TestSanityKmac128"} +{"Time":"2021-09-27T06:45:54.054444-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanityKmac128","Output":"=== RUN TestSanityKmac128\n"} +{"Time":"2021-09-27T06:45:54.054447-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanityKmac128","Output":"--- PASS: TestSanityKmac128 (0.00s)\n"} +{"Time":"2021-09-27T06:45:54.05445-04:00","Action":"pass","Package":"github.com/onflow/crypto/hash","Test":"TestSanityKmac128","Elapsed":0} +{"Time":"2021-09-27T06:45:54.054453-04:00","Action":"run","Package":"github.com/onflow/crypto/hash","Test":"TestHashersAPI"} +{"Time":"2021-09-27T06:45:54.054464-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestHashersAPI","Output":"=== RUN TestHashersAPI\n"} +{"Time":"2021-09-27T06:45:54.054468-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestHashersAPI","Output":" hash_test.go:114: math rand seed is 1632739554054239000\n"} +{"Time":"2021-09-27T06:45:54.054481-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestHashersAPI","Output":"--- PASS: TestHashersAPI (0.00s)\n"} +{"Time":"2021-09-27T06:45:54.054485-04:00","Action":"pass","Package":"github.com/onflow/crypto/hash","Test":"TestHashersAPI","Elapsed":0} +{"Time":"2021-09-27T06:45:54.054488-04:00","Action":"run","Package":"github.com/onflow/crypto/hash","Test":"TestSha3"} +{"Time":"2021-09-27T06:45:54.054491-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSha3","Output":"=== RUN TestSha3\n"} +{"Time":"2021-09-27T06:45:54.054495-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSha3","Output":" hash_test.go:158: math rand seed is 1632739554054464000\n"} +{"Time":"2021-09-27T06:45:54.054499-04:00","Action":"run","Package":"github.com/onflow/crypto/hash","Test":"TestSha3/SHA3_256"} +{"Time":"2021-09-27T06:45:54.054501-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSha3/SHA3_256","Output":"=== RUN TestSha3/SHA3_256\n"} +{"Time":"2021-09-27T06:45:54.15541-04:00","Action":"run","Package":"github.com/onflow/crypto/hash","Test":"TestSha3/SHA3_384"} +{"Time":"2021-09-27T06:45:54.155492-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSha3/SHA3_384","Output":"=== RUN TestSha3/SHA3_384\n"} +{"Time":"2021-09-27T06:45:54.27999-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSha3","Output":"--- PASS: TestSha3 (0.23s)\n"} +{"Time":"2021-09-27T06:45:54.280024-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSha3/SHA3_256","Output":" --- PASS: TestSha3/SHA3_256 (0.10s)\n"} +{"Time":"2021-09-27T06:45:54.280032-04:00","Action":"pass","Package":"github.com/onflow/crypto/hash","Test":"TestSha3/SHA3_256","Elapsed":0.1} +{"Time":"2021-09-27T06:45:54.280037-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSha3/SHA3_384","Output":" --- PASS: TestSha3/SHA3_384 (0.12s)\n"} +{"Time":"2021-09-27T06:45:54.280041-04:00","Action":"pass","Package":"github.com/onflow/crypto/hash","Test":"TestSha3/SHA3_384","Elapsed":0.12} +{"Time":"2021-09-27T06:45:54.280045-04:00","Action":"pass","Package":"github.com/onflow/crypto/hash","Test":"TestSha3","Elapsed":0.23} +{"Time":"2021-09-27T06:45:54.280049-04:00","Action":"run","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha3_256"} +{"Time":"2021-09-27T06:45:54.280086-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha3_256","Output":"=== RUN TestSanitySha3_256\n"} +{"Time":"2021-09-27T06:45:54.280103-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha3_256","Output":"--- PASS: TestSanitySha3_256 (0.00s)\n"} +{"Time":"2021-09-27T06:45:54.28011-04:00","Action":"pass","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha3_256","Elapsed":0} +{"Time":"2021-09-27T06:45:54.280113-04:00","Action":"run","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha3_384"} +{"Time":"2021-09-27T06:45:54.280126-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha3_384","Output":"=== RUN TestSanitySha3_384\n"} +{"Time":"2021-09-27T06:45:54.280131-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha3_384","Output":"--- PASS: TestSanitySha3_384 (0.00s)\n"} +{"Time":"2021-09-27T06:45:54.280204-04:00","Action":"pass","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha3_384","Elapsed":0} +{"Time":"2021-09-27T06:45:54.280214-04:00","Action":"run","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_256"} +{"Time":"2021-09-27T06:45:54.280239-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_256","Output":"=== RUN TestSanitySha2_256\n"} +{"Time":"2021-09-27T06:45:54.280248-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_256","Output":"--- PASS: TestSanitySha2_256 (0.00s)\n"} +{"Time":"2021-09-27T06:45:54.280252-04:00","Action":"pass","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_256","Elapsed":0} +{"Time":"2021-09-27T06:45:54.280256-04:00","Action":"run","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_384"} +{"Time":"2021-09-27T06:45:54.280259-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_384","Output":"=== RUN TestSanitySha2_384\n"} +{"Time":"2021-09-27T06:45:54.280262-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_384","Output":"--- PASS: TestSanitySha2_384 (0.00s)\n"} +{"Time":"2021-09-27T06:45:54.280265-04:00","Action":"pass","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_384","Elapsed":0} +{"Time":"2021-09-27T06:45:54.280268-04:00","Action":"run","Package":"github.com/onflow/crypto/hash","Test":"TestSanityKmac128"} +{"Time":"2021-09-27T06:45:54.280271-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanityKmac128","Output":"=== RUN TestSanityKmac128\n"} +{"Time":"2021-09-27T06:45:54.280282-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanityKmac128","Output":"--- PASS: TestSanityKmac128 (0.00s)\n"} +{"Time":"2021-09-27T06:45:54.280301-04:00","Action":"pass","Package":"github.com/onflow/crypto/hash","Test":"TestSanityKmac128","Elapsed":0} +{"Time":"2021-09-27T06:45:54.280306-04:00","Action":"run","Package":"github.com/onflow/crypto/hash","Test":"TestHashersAPI"} +{"Time":"2021-09-27T06:45:54.280308-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestHashersAPI","Output":"=== RUN TestHashersAPI\n"} +{"Time":"2021-09-27T06:45:54.280311-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestHashersAPI","Output":" hash_test.go:114: math rand seed is 1632739554280043000\n"} +{"Time":"2021-09-27T06:45:54.280334-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestHashersAPI","Output":"--- PASS: TestHashersAPI (0.00s)\n"} +{"Time":"2021-09-27T06:45:54.280338-04:00","Action":"pass","Package":"github.com/onflow/crypto/hash","Test":"TestHashersAPI","Elapsed":0} +{"Time":"2021-09-27T06:45:54.280341-04:00","Action":"run","Package":"github.com/onflow/crypto/hash","Test":"TestSha3"} +{"Time":"2021-09-27T06:45:54.280344-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSha3","Output":"=== RUN TestSha3\n"} +{"Time":"2021-09-27T06:45:54.280348-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSha3","Output":" hash_test.go:158: math rand seed is 1632739554280256000\n"} +{"Time":"2021-09-27T06:45:54.280351-04:00","Action":"run","Package":"github.com/onflow/crypto/hash","Test":"TestSha3/SHA3_256"} +{"Time":"2021-09-27T06:45:54.280354-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSha3/SHA3_256","Output":"=== RUN TestSha3/SHA3_256\n"} +{"Time":"2021-09-27T06:45:54.380565-04:00","Action":"run","Package":"github.com/onflow/crypto/hash","Test":"TestSha3/SHA3_384"} +{"Time":"2021-09-27T06:45:54.380592-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSha3/SHA3_384","Output":"=== RUN TestSha3/SHA3_384\n"} +{"Time":"2021-09-27T06:45:54.500675-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSha3","Output":"--- PASS: TestSha3 (0.22s)\n"} +{"Time":"2021-09-27T06:45:54.500703-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSha3/SHA3_256","Output":" --- PASS: TestSha3/SHA3_256 (0.10s)\n"} +{"Time":"2021-09-27T06:45:54.50071-04:00","Action":"pass","Package":"github.com/onflow/crypto/hash","Test":"TestSha3/SHA3_256","Elapsed":0.1} +{"Time":"2021-09-27T06:45:54.500717-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSha3/SHA3_384","Output":" --- PASS: TestSha3/SHA3_384 (0.12s)\n"} +{"Time":"2021-09-27T06:45:54.500721-04:00","Action":"pass","Package":"github.com/onflow/crypto/hash","Test":"TestSha3/SHA3_384","Elapsed":0.12} +{"Time":"2021-09-27T06:45:54.500739-04:00","Action":"pass","Package":"github.com/onflow/crypto/hash","Test":"TestSha3","Elapsed":0.22} +{"Time":"2021-09-27T06:45:54.500742-04:00","Action":"run","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha3_256"} +{"Time":"2021-09-27T06:45:54.500745-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha3_256","Output":"=== RUN TestSanitySha3_256\n"} +{"Time":"2021-09-27T06:45:54.500748-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha3_256","Output":"--- PASS: TestSanitySha3_256 (0.00s)\n"} +{"Time":"2021-09-27T06:45:54.500751-04:00","Action":"pass","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha3_256","Elapsed":0} +{"Time":"2021-09-27T06:45:54.500754-04:00","Action":"run","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha3_384"} +{"Time":"2021-09-27T06:45:54.500862-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha3_384","Output":"=== RUN TestSanitySha3_384\n"} +{"Time":"2021-09-27T06:45:54.500873-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha3_384","Output":"--- PASS: TestSanitySha3_384 (0.00s)\n"} +{"Time":"2021-09-27T06:45:54.500896-04:00","Action":"pass","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha3_384","Elapsed":0} +{"Time":"2021-09-27T06:45:54.500913-04:00","Action":"run","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_256"} +{"Time":"2021-09-27T06:45:54.500917-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_256","Output":"=== RUN TestSanitySha2_256\n"} +{"Time":"2021-09-27T06:45:54.500923-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_256","Output":"--- PASS: TestSanitySha2_256 (0.00s)\n"} +{"Time":"2021-09-27T06:45:54.500926-04:00","Action":"pass","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_256","Elapsed":0} +{"Time":"2021-09-27T06:45:54.500929-04:00","Action":"run","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_384"} +{"Time":"2021-09-27T06:45:54.500932-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_384","Output":"=== RUN TestSanitySha2_384\n"} +{"Time":"2021-09-27T06:45:54.500935-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_384","Output":"--- PASS: TestSanitySha2_384 (0.00s)\n"} +{"Time":"2021-09-27T06:45:54.500957-04:00","Action":"pass","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_384","Elapsed":0} +{"Time":"2021-09-27T06:45:54.500961-04:00","Action":"run","Package":"github.com/onflow/crypto/hash","Test":"TestSanityKmac128"} +{"Time":"2021-09-27T06:45:54.500964-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanityKmac128","Output":"=== RUN TestSanityKmac128\n"} +{"Time":"2021-09-27T06:45:54.500968-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanityKmac128","Output":"--- PASS: TestSanityKmac128 (0.00s)\n"} +{"Time":"2021-09-27T06:45:54.500971-04:00","Action":"pass","Package":"github.com/onflow/crypto/hash","Test":"TestSanityKmac128","Elapsed":0} +{"Time":"2021-09-27T06:45:54.500974-04:00","Action":"run","Package":"github.com/onflow/crypto/hash","Test":"TestHashersAPI"} +{"Time":"2021-09-27T06:45:54.500977-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestHashersAPI","Output":"=== RUN TestHashersAPI\n"} +{"Time":"2021-09-27T06:45:54.50098-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestHashersAPI","Output":" hash_test.go:114: math rand seed is 1632739554500707000\n"} +{"Time":"2021-09-27T06:45:54.500988-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestHashersAPI","Output":"--- PASS: TestHashersAPI (0.00s)\n"} +{"Time":"2021-09-27T06:45:54.501007-04:00","Action":"pass","Package":"github.com/onflow/crypto/hash","Test":"TestHashersAPI","Elapsed":0} +{"Time":"2021-09-27T06:45:54.501011-04:00","Action":"run","Package":"github.com/onflow/crypto/hash","Test":"TestSha3"} +{"Time":"2021-09-27T06:45:54.501014-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSha3","Output":"=== RUN TestSha3\n"} +{"Time":"2021-09-27T06:45:54.501017-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSha3","Output":" hash_test.go:158: math rand seed is 1632739554500935000\n"} +{"Time":"2021-09-27T06:45:54.501023-04:00","Action":"run","Package":"github.com/onflow/crypto/hash","Test":"TestSha3/SHA3_256"} +{"Time":"2021-09-27T06:45:54.501026-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSha3/SHA3_256","Output":"=== RUN TestSha3/SHA3_256\n"} +{"Time":"2021-09-27T06:45:54.59914-04:00","Action":"run","Package":"github.com/onflow/crypto/hash","Test":"TestSha3/SHA3_384"} +{"Time":"2021-09-27T06:45:54.599167-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSha3/SHA3_384","Output":"=== RUN TestSha3/SHA3_384\n"} +{"Time":"2021-09-27T06:45:54.719353-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSha3","Output":"--- PASS: TestSha3 (0.22s)\n"} +{"Time":"2021-09-27T06:45:54.719385-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSha3/SHA3_256","Output":" --- PASS: TestSha3/SHA3_256 (0.10s)\n"} +{"Time":"2021-09-27T06:45:54.71939-04:00","Action":"pass","Package":"github.com/onflow/crypto/hash","Test":"TestSha3/SHA3_256","Elapsed":0.1} +{"Time":"2021-09-27T06:45:54.719396-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSha3/SHA3_384","Output":" --- PASS: TestSha3/SHA3_384 (0.12s)\n"} +{"Time":"2021-09-27T06:45:54.719399-04:00","Action":"pass","Package":"github.com/onflow/crypto/hash","Test":"TestSha3/SHA3_384","Elapsed":0.12} +{"Time":"2021-09-27T06:45:54.719402-04:00","Action":"pass","Package":"github.com/onflow/crypto/hash","Test":"TestSha3","Elapsed":0.22} +{"Time":"2021-09-27T06:45:54.719405-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Output":"PASS\n"} +{"Time":"2021-09-27T06:45:54.720685-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Output":"ok \tgithub.com/onflow/crypto/hash\t2.541s\n"} +{"Time":"2021-09-27T06:45:54.720757-04:00","Action":"pass","Package":"github.com/onflow/crypto/hash","Elapsed":2.541} \ No newline at end of file diff --git a/tools/test_monitor/testdata/summary1/raw/test-result-crypto-hash-2-count-pass.json b/tools/test_monitor/testdata/summary1/raw/test-result-crypto-hash-2-count-pass.json index e4001bb454a..50582a7c37e 100644 --- a/tools/test_monitor/testdata/summary1/raw/test-result-crypto-hash-2-count-pass.json +++ b/tools/test_monitor/testdata/summary1/raw/test-result-crypto-hash-2-count-pass.json @@ -1,79 +1,79 @@ -{"Time":"2021-10-04T10:34:10.203071-04:00","Action":"run","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha3_256"} -{"Time":"2021-10-04T10:34:10.203659-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha3_256","Output":"=== RUN TestSanitySha3_256\n"} -{"Time":"2021-10-04T10:34:10.203707-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha3_256","Output":"--- PASS: TestSanitySha3_256 (0.00s)\n"} -{"Time":"2021-10-04T10:34:10.203717-04:00","Action":"pass","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha3_256","Elapsed":0} -{"Time":"2021-10-04T10:34:10.203809-04:00","Action":"run","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha3_384"} -{"Time":"2021-10-04T10:34:10.203817-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha3_384","Output":"=== RUN TestSanitySha3_384\n"} -{"Time":"2021-10-04T10:34:10.203824-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha3_384","Output":"--- PASS: TestSanitySha3_384 (0.00s)\n"} -{"Time":"2021-10-04T10:34:10.20383-04:00","Action":"pass","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha3_384","Elapsed":0} -{"Time":"2021-10-04T10:34:10.203837-04:00","Action":"run","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_256"} -{"Time":"2021-10-04T10:34:10.203843-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_256","Output":"=== RUN TestSanitySha2_256\n"} -{"Time":"2021-10-04T10:34:10.20385-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_256","Output":"--- PASS: TestSanitySha2_256 (0.00s)\n"} -{"Time":"2021-10-04T10:34:10.203858-04:00","Action":"pass","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_256","Elapsed":0} -{"Time":"2021-10-04T10:34:10.203865-04:00","Action":"run","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_384"} -{"Time":"2021-10-04T10:34:10.203871-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_384","Output":"=== RUN TestSanitySha2_384\n"} -{"Time":"2021-10-04T10:34:10.203878-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_384","Output":"--- PASS: TestSanitySha2_384 (0.00s)\n"} -{"Time":"2021-10-04T10:34:10.203884-04:00","Action":"pass","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_384","Elapsed":0} -{"Time":"2021-10-04T10:34:10.203891-04:00","Action":"run","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanityKmac128"} -{"Time":"2021-10-04T10:34:10.203897-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanityKmac128","Output":"=== RUN TestSanityKmac128\n"} -{"Time":"2021-10-04T10:34:10.203904-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanityKmac128","Output":"--- PASS: TestSanityKmac128 (0.00s)\n"} -{"Time":"2021-10-04T10:34:10.20391-04:00","Action":"pass","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanityKmac128","Elapsed":0} -{"Time":"2021-10-04T10:34:10.203916-04:00","Action":"run","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestHashersAPI"} -{"Time":"2021-10-04T10:34:10.203923-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestHashersAPI","Output":"=== RUN TestHashersAPI\n"} -{"Time":"2021-10-04T10:34:10.203933-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestHashersAPI","Output":" hash_test.go:114: math rand seed is 1633358050203144000\n"} -{"Time":"2021-10-04T10:34:10.203947-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestHashersAPI","Output":"--- PASS: TestHashersAPI (0.00s)\n"} -{"Time":"2021-10-04T10:34:10.203954-04:00","Action":"pass","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestHashersAPI","Elapsed":0} -{"Time":"2021-10-04T10:34:10.203977-04:00","Action":"run","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3"} -{"Time":"2021-10-04T10:34:10.203987-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3","Output":"=== RUN TestSha3\n"} -{"Time":"2021-10-04T10:34:10.203991-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3","Output":" hash_test.go:158: math rand seed is 1633358050203374000\n"} -{"Time":"2021-10-04T10:34:10.203999-04:00","Action":"run","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3/SHA3_256"} -{"Time":"2021-10-04T10:34:10.204004-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3/SHA3_256","Output":"=== RUN TestSha3/SHA3_256\n"} -{"Time":"2021-10-04T10:34:10.306005-04:00","Action":"run","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3/SHA3_384"} -{"Time":"2021-10-04T10:34:10.306046-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3/SHA3_384","Output":"=== RUN TestSha3/SHA3_384\n"} -{"Time":"2021-10-04T10:34:10.430169-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3","Output":"--- PASS: TestSha3 (0.23s)\n"} -{"Time":"2021-10-04T10:34:10.430224-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3/SHA3_256","Output":" --- PASS: TestSha3/SHA3_256 (0.10s)\n"} -{"Time":"2021-10-04T10:34:10.430234-04:00","Action":"pass","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3/SHA3_256","Elapsed":0.1} -{"Time":"2021-10-04T10:34:10.430278-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3/SHA3_384","Output":" --- PASS: TestSha3/SHA3_384 (0.12s)\n"} -{"Time":"2021-10-04T10:34:10.430284-04:00","Action":"pass","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3/SHA3_384","Elapsed":0.12} -{"Time":"2021-10-04T10:34:10.430289-04:00","Action":"pass","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3","Elapsed":0.23} -{"Time":"2021-10-04T10:34:10.430295-04:00","Action":"run","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha3_256"} -{"Time":"2021-10-04T10:34:10.430299-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha3_256","Output":"=== RUN TestSanitySha3_256\n"} -{"Time":"2021-10-04T10:34:10.430305-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha3_256","Output":"--- PASS: TestSanitySha3_256 (0.00s)\n"} -{"Time":"2021-10-04T10:34:10.430309-04:00","Action":"pass","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha3_256","Elapsed":0} -{"Time":"2021-10-04T10:34:10.430314-04:00","Action":"run","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha3_384"} -{"Time":"2021-10-04T10:34:10.430318-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha3_384","Output":"=== RUN TestSanitySha3_384\n"} -{"Time":"2021-10-04T10:34:10.430337-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha3_384","Output":"--- PASS: TestSanitySha3_384 (0.00s)\n"} -{"Time":"2021-10-04T10:34:10.430343-04:00","Action":"pass","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha3_384","Elapsed":0} -{"Time":"2021-10-04T10:34:10.430347-04:00","Action":"run","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_256"} -{"Time":"2021-10-04T10:34:10.430351-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_256","Output":"=== RUN TestSanitySha2_256\n"} -{"Time":"2021-10-04T10:34:10.430356-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_256","Output":"--- PASS: TestSanitySha2_256 (0.00s)\n"} -{"Time":"2021-10-04T10:34:10.430362-04:00","Action":"pass","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_256","Elapsed":0} -{"Time":"2021-10-04T10:34:10.430384-04:00","Action":"run","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_384"} -{"Time":"2021-10-04T10:34:10.430389-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_384","Output":"=== RUN TestSanitySha2_384\n"} -{"Time":"2021-10-04T10:34:10.430395-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_384","Output":"--- PASS: TestSanitySha2_384 (0.00s)\n"} -{"Time":"2021-10-04T10:34:10.4304-04:00","Action":"pass","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanitySha2_384","Elapsed":0} -{"Time":"2021-10-04T10:34:10.430404-04:00","Action":"run","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanityKmac128"} -{"Time":"2021-10-04T10:34:10.430408-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanityKmac128","Output":"=== RUN TestSanityKmac128\n"} -{"Time":"2021-10-04T10:34:10.430413-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanityKmac128","Output":"--- PASS: TestSanityKmac128 (0.00s)\n"} -{"Time":"2021-10-04T10:34:10.430418-04:00","Action":"pass","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSanityKmac128","Elapsed":0} -{"Time":"2021-10-04T10:34:10.430423-04:00","Action":"run","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestHashersAPI"} -{"Time":"2021-10-04T10:34:10.430427-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestHashersAPI","Output":"=== RUN TestHashersAPI\n"} -{"Time":"2021-10-04T10:34:10.430432-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestHashersAPI","Output":" hash_test.go:114: math rand seed is 1633358050430256000\n"} -{"Time":"2021-10-04T10:34:10.430469-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestHashersAPI","Output":"--- PASS: TestHashersAPI (0.00s)\n"} -{"Time":"2021-10-04T10:34:10.430483-04:00","Action":"pass","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestHashersAPI","Elapsed":0} -{"Time":"2021-10-04T10:34:10.430488-04:00","Action":"run","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3"} -{"Time":"2021-10-04T10:34:10.430493-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3","Output":"=== RUN TestSha3\n"} -{"Time":"2021-10-04T10:34:10.430499-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3","Output":" hash_test.go:158: math rand seed is 1633358050430467000\n"} -{"Time":"2021-10-04T10:34:10.430504-04:00","Action":"run","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3/SHA3_256"} -{"Time":"2021-10-04T10:34:10.430509-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3/SHA3_256","Output":"=== RUN TestSha3/SHA3_256\n"} -{"Time":"2021-10-04T10:34:10.530207-04:00","Action":"run","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3/SHA3_384"} -{"Time":"2021-10-04T10:34:10.530243-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3/SHA3_384","Output":"=== RUN TestSha3/SHA3_384\n"} -{"Time":"2021-10-04T10:34:10.654891-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3","Output":"--- PASS: TestSha3 (0.22s)\n"} -{"Time":"2021-10-04T10:34:10.654967-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3/SHA3_256","Output":" --- PASS: TestSha3/SHA3_256 (0.10s)\n"} -{"Time":"2021-10-04T10:34:10.654975-04:00","Action":"pass","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3/SHA3_256","Elapsed":0.1} -{"Time":"2021-10-04T10:34:10.654986-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3/SHA3_384","Output":" --- PASS: TestSha3/SHA3_384 (0.12s)\n"} -{"Time":"2021-10-04T10:34:10.654991-04:00","Action":"pass","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3/SHA3_384","Elapsed":0.12} -{"Time":"2021-10-04T10:34:10.655024-04:00","Action":"pass","Package":"github.com/onflow/flow-go/crypto/hash","Test":"TestSha3","Elapsed":0.22} -{"Time":"2021-10-04T10:34:10.65503-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Output":"PASS\n"} -{"Time":"2021-10-04T10:34:10.656297-04:00","Action":"output","Package":"github.com/onflow/flow-go/crypto/hash","Output":"ok \tgithub.com/onflow/flow-go/crypto/hash\t0.642s\n"} -{"Time":"2021-10-04T10:34:10.656366-04:00","Action":"pass","Package":"github.com/onflow/flow-go/crypto/hash","Elapsed":0.643} +{"Time":"2021-10-04T10:34:10.203071-04:00","Action":"run","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha3_256"} +{"Time":"2021-10-04T10:34:10.203659-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha3_256","Output":"=== RUN TestSanitySha3_256\n"} +{"Time":"2021-10-04T10:34:10.203707-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha3_256","Output":"--- PASS: TestSanitySha3_256 (0.00s)\n"} +{"Time":"2021-10-04T10:34:10.203717-04:00","Action":"pass","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha3_256","Elapsed":0} +{"Time":"2021-10-04T10:34:10.203809-04:00","Action":"run","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha3_384"} +{"Time":"2021-10-04T10:34:10.203817-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha3_384","Output":"=== RUN TestSanitySha3_384\n"} +{"Time":"2021-10-04T10:34:10.203824-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha3_384","Output":"--- PASS: TestSanitySha3_384 (0.00s)\n"} +{"Time":"2021-10-04T10:34:10.20383-04:00","Action":"pass","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha3_384","Elapsed":0} +{"Time":"2021-10-04T10:34:10.203837-04:00","Action":"run","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_256"} +{"Time":"2021-10-04T10:34:10.203843-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_256","Output":"=== RUN TestSanitySha2_256\n"} +{"Time":"2021-10-04T10:34:10.20385-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_256","Output":"--- PASS: TestSanitySha2_256 (0.00s)\n"} +{"Time":"2021-10-04T10:34:10.203858-04:00","Action":"pass","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_256","Elapsed":0} +{"Time":"2021-10-04T10:34:10.203865-04:00","Action":"run","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_384"} +{"Time":"2021-10-04T10:34:10.203871-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_384","Output":"=== RUN TestSanitySha2_384\n"} +{"Time":"2021-10-04T10:34:10.203878-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_384","Output":"--- PASS: TestSanitySha2_384 (0.00s)\n"} +{"Time":"2021-10-04T10:34:10.203884-04:00","Action":"pass","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_384","Elapsed":0} +{"Time":"2021-10-04T10:34:10.203891-04:00","Action":"run","Package":"github.com/onflow/crypto/hash","Test":"TestSanityKmac128"} +{"Time":"2021-10-04T10:34:10.203897-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanityKmac128","Output":"=== RUN TestSanityKmac128\n"} +{"Time":"2021-10-04T10:34:10.203904-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanityKmac128","Output":"--- PASS: TestSanityKmac128 (0.00s)\n"} +{"Time":"2021-10-04T10:34:10.20391-04:00","Action":"pass","Package":"github.com/onflow/crypto/hash","Test":"TestSanityKmac128","Elapsed":0} +{"Time":"2021-10-04T10:34:10.203916-04:00","Action":"run","Package":"github.com/onflow/crypto/hash","Test":"TestHashersAPI"} +{"Time":"2021-10-04T10:34:10.203923-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestHashersAPI","Output":"=== RUN TestHashersAPI\n"} +{"Time":"2021-10-04T10:34:10.203933-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestHashersAPI","Output":" hash_test.go:114: math rand seed is 1633358050203144000\n"} +{"Time":"2021-10-04T10:34:10.203947-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestHashersAPI","Output":"--- PASS: TestHashersAPI (0.00s)\n"} +{"Time":"2021-10-04T10:34:10.203954-04:00","Action":"pass","Package":"github.com/onflow/crypto/hash","Test":"TestHashersAPI","Elapsed":0} +{"Time":"2021-10-04T10:34:10.203977-04:00","Action":"run","Package":"github.com/onflow/crypto/hash","Test":"TestSha3"} +{"Time":"2021-10-04T10:34:10.203987-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSha3","Output":"=== RUN TestSha3\n"} +{"Time":"2021-10-04T10:34:10.203991-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSha3","Output":" hash_test.go:158: math rand seed is 1633358050203374000\n"} +{"Time":"2021-10-04T10:34:10.203999-04:00","Action":"run","Package":"github.com/onflow/crypto/hash","Test":"TestSha3/SHA3_256"} +{"Time":"2021-10-04T10:34:10.204004-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSha3/SHA3_256","Output":"=== RUN TestSha3/SHA3_256\n"} +{"Time":"2021-10-04T10:34:10.306005-04:00","Action":"run","Package":"github.com/onflow/crypto/hash","Test":"TestSha3/SHA3_384"} +{"Time":"2021-10-04T10:34:10.306046-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSha3/SHA3_384","Output":"=== RUN TestSha3/SHA3_384\n"} +{"Time":"2021-10-04T10:34:10.430169-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSha3","Output":"--- PASS: TestSha3 (0.23s)\n"} +{"Time":"2021-10-04T10:34:10.430224-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSha3/SHA3_256","Output":" --- PASS: TestSha3/SHA3_256 (0.10s)\n"} +{"Time":"2021-10-04T10:34:10.430234-04:00","Action":"pass","Package":"github.com/onflow/crypto/hash","Test":"TestSha3/SHA3_256","Elapsed":0.1} +{"Time":"2021-10-04T10:34:10.430278-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSha3/SHA3_384","Output":" --- PASS: TestSha3/SHA3_384 (0.12s)\n"} +{"Time":"2021-10-04T10:34:10.430284-04:00","Action":"pass","Package":"github.com/onflow/crypto/hash","Test":"TestSha3/SHA3_384","Elapsed":0.12} +{"Time":"2021-10-04T10:34:10.430289-04:00","Action":"pass","Package":"github.com/onflow/crypto/hash","Test":"TestSha3","Elapsed":0.23} +{"Time":"2021-10-04T10:34:10.430295-04:00","Action":"run","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha3_256"} +{"Time":"2021-10-04T10:34:10.430299-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha3_256","Output":"=== RUN TestSanitySha3_256\n"} +{"Time":"2021-10-04T10:34:10.430305-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha3_256","Output":"--- PASS: TestSanitySha3_256 (0.00s)\n"} +{"Time":"2021-10-04T10:34:10.430309-04:00","Action":"pass","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha3_256","Elapsed":0} +{"Time":"2021-10-04T10:34:10.430314-04:00","Action":"run","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha3_384"} +{"Time":"2021-10-04T10:34:10.430318-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha3_384","Output":"=== RUN TestSanitySha3_384\n"} +{"Time":"2021-10-04T10:34:10.430337-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha3_384","Output":"--- PASS: TestSanitySha3_384 (0.00s)\n"} +{"Time":"2021-10-04T10:34:10.430343-04:00","Action":"pass","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha3_384","Elapsed":0} +{"Time":"2021-10-04T10:34:10.430347-04:00","Action":"run","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_256"} +{"Time":"2021-10-04T10:34:10.430351-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_256","Output":"=== RUN TestSanitySha2_256\n"} +{"Time":"2021-10-04T10:34:10.430356-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_256","Output":"--- PASS: TestSanitySha2_256 (0.00s)\n"} +{"Time":"2021-10-04T10:34:10.430362-04:00","Action":"pass","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_256","Elapsed":0} +{"Time":"2021-10-04T10:34:10.430384-04:00","Action":"run","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_384"} +{"Time":"2021-10-04T10:34:10.430389-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_384","Output":"=== RUN TestSanitySha2_384\n"} +{"Time":"2021-10-04T10:34:10.430395-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_384","Output":"--- PASS: TestSanitySha2_384 (0.00s)\n"} +{"Time":"2021-10-04T10:34:10.4304-04:00","Action":"pass","Package":"github.com/onflow/crypto/hash","Test":"TestSanitySha2_384","Elapsed":0} +{"Time":"2021-10-04T10:34:10.430404-04:00","Action":"run","Package":"github.com/onflow/crypto/hash","Test":"TestSanityKmac128"} +{"Time":"2021-10-04T10:34:10.430408-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanityKmac128","Output":"=== RUN TestSanityKmac128\n"} +{"Time":"2021-10-04T10:34:10.430413-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSanityKmac128","Output":"--- PASS: TestSanityKmac128 (0.00s)\n"} +{"Time":"2021-10-04T10:34:10.430418-04:00","Action":"pass","Package":"github.com/onflow/crypto/hash","Test":"TestSanityKmac128","Elapsed":0} +{"Time":"2021-10-04T10:34:10.430423-04:00","Action":"run","Package":"github.com/onflow/crypto/hash","Test":"TestHashersAPI"} +{"Time":"2021-10-04T10:34:10.430427-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestHashersAPI","Output":"=== RUN TestHashersAPI\n"} +{"Time":"2021-10-04T10:34:10.430432-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestHashersAPI","Output":" hash_test.go:114: math rand seed is 1633358050430256000\n"} +{"Time":"2021-10-04T10:34:10.430469-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestHashersAPI","Output":"--- PASS: TestHashersAPI (0.00s)\n"} +{"Time":"2021-10-04T10:34:10.430483-04:00","Action":"pass","Package":"github.com/onflow/crypto/hash","Test":"TestHashersAPI","Elapsed":0} +{"Time":"2021-10-04T10:34:10.430488-04:00","Action":"run","Package":"github.com/onflow/crypto/hash","Test":"TestSha3"} +{"Time":"2021-10-04T10:34:10.430493-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSha3","Output":"=== RUN TestSha3\n"} +{"Time":"2021-10-04T10:34:10.430499-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSha3","Output":" hash_test.go:158: math rand seed is 1633358050430467000\n"} +{"Time":"2021-10-04T10:34:10.430504-04:00","Action":"run","Package":"github.com/onflow/crypto/hash","Test":"TestSha3/SHA3_256"} +{"Time":"2021-10-04T10:34:10.430509-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSha3/SHA3_256","Output":"=== RUN TestSha3/SHA3_256\n"} +{"Time":"2021-10-04T10:34:10.530207-04:00","Action":"run","Package":"github.com/onflow/crypto/hash","Test":"TestSha3/SHA3_384"} +{"Time":"2021-10-04T10:34:10.530243-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSha3/SHA3_384","Output":"=== RUN TestSha3/SHA3_384\n"} +{"Time":"2021-10-04T10:34:10.654891-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSha3","Output":"--- PASS: TestSha3 (0.22s)\n"} +{"Time":"2021-10-04T10:34:10.654967-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSha3/SHA3_256","Output":" --- PASS: TestSha3/SHA3_256 (0.10s)\n"} +{"Time":"2021-10-04T10:34:10.654975-04:00","Action":"pass","Package":"github.com/onflow/crypto/hash","Test":"TestSha3/SHA3_256","Elapsed":0.1} +{"Time":"2021-10-04T10:34:10.654986-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Test":"TestSha3/SHA3_384","Output":" --- PASS: TestSha3/SHA3_384 (0.12s)\n"} +{"Time":"2021-10-04T10:34:10.654991-04:00","Action":"pass","Package":"github.com/onflow/crypto/hash","Test":"TestSha3/SHA3_384","Elapsed":0.12} +{"Time":"2021-10-04T10:34:10.655024-04:00","Action":"pass","Package":"github.com/onflow/crypto/hash","Test":"TestSha3","Elapsed":0.22} +{"Time":"2021-10-04T10:34:10.65503-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Output":"PASS\n"} +{"Time":"2021-10-04T10:34:10.656297-04:00","Action":"output","Package":"github.com/onflow/crypto/hash","Output":"ok \tgithub.com/onflow/crypto/hash\t0.642s\n"} +{"Time":"2021-10-04T10:34:10.656366-04:00","Action":"pass","Package":"github.com/onflow/crypto/hash","Elapsed":0.643} diff --git a/tools/test_monitor/testdata/summary2/test1-1package-1failure/expected-output/failures/TestSanitySha3_256+github.com-onflow-flow-go-crypto-hash/failure1.txt b/tools/test_monitor/testdata/summary2/test1-1package-1failure/expected-output/failures/TestSanitySha3_256+github.com-onflow-crypto-hash/failure1.txt similarity index 100% rename from tools/test_monitor/testdata/summary2/test1-1package-1failure/expected-output/failures/TestSanitySha3_256+github.com-onflow-flow-go-crypto-hash/failure1.txt rename to tools/test_monitor/testdata/summary2/test1-1package-1failure/expected-output/failures/TestSanitySha3_256+github.com-onflow-crypto-hash/failure1.txt diff --git a/tools/test_monitor/testdata/summary2/test1-1package-1failure/expected-output/test1-1package-1failure.json b/tools/test_monitor/testdata/summary2/test1-1package-1failure/expected-output/test1-1package-1failure.json index bd477393a56..00aad44afd2 100644 --- a/tools/test_monitor/testdata/summary2/test1-1package-1failure/expected-output/test1-1package-1failure.json +++ b/tools/test_monitor/testdata/summary2/test1-1package-1failure/expected-output/test1-1package-1failure.json @@ -1,8 +1,8 @@ { "tests": { - "github.com/onflow/flow-go/crypto/hash/TestSanitySha3_256": { + "github.com/onflow/crypto/hash/TestSanitySha3_256": { "test": "TestSanitySha3_256", - "package": "github.com/onflow/flow-go/crypto/hash", + "package": "github.com/onflow/crypto/hash", "runs": 1, "passed": 0, "failed": 1, @@ -14,9 +14,9 @@ 0 ] }, - "github.com/onflow/flow-go/crypto/hash/TestSanitySha3_384": { + "github.com/onflow/crypto/hash/TestSanitySha3_384": { "test": "TestSanitySha3_384", - "package": "github.com/onflow/flow-go/crypto/hash", + "package": "github.com/onflow/crypto/hash", "runs": 1, "passed": 1, "failed": 0, @@ -28,9 +28,9 @@ 0 ] }, - "github.com/onflow/flow-go/crypto/hash/TestSanitySha2_256": { + "github.com/onflow/crypto/hash/TestSanitySha2_256": { "test": "TestSanitySha2_256", - "package": "github.com/onflow/flow-go/crypto/hash", + "package": "github.com/onflow/crypto/hash", "runs": 1, "passed": 1, "failed": 0, @@ -42,9 +42,9 @@ 0 ] }, - "github.com/onflow/flow-go/crypto/hash/TestSanitySha2_384": { + "github.com/onflow/crypto/hash/TestSanitySha2_384": { "test": "TestSanitySha2_384", - "package": "github.com/onflow/flow-go/crypto/hash", + "package": "github.com/onflow/crypto/hash", "runs": 1, "passed": 1, "failed": 0, @@ -56,9 +56,9 @@ 0 ] }, - "github.com/onflow/flow-go/crypto/hash/TestSanityKmac128": { + "github.com/onflow/crypto/hash/TestSanityKmac128": { "test": "TestSanityKmac128", - "package": "github.com/onflow/flow-go/crypto/hash", + "package": "github.com/onflow/crypto/hash", "runs": 1, "passed": 1, "failed": 0, @@ -70,9 +70,9 @@ 0 ] }, - "github.com/onflow/flow-go/crypto/hash/TestHashersAPI": { + "github.com/onflow/crypto/hash/TestHashersAPI": { "test": "TestHashersAPI", - "package": "github.com/onflow/flow-go/crypto/hash", + "package": "github.com/onflow/crypto/hash", "runs": 1, "passed": 1, "failed": 0, @@ -84,9 +84,9 @@ 0 ] }, - "github.com/onflow/flow-go/crypto/hash/TestSha3": { + "github.com/onflow/crypto/hash/TestSha3": { "test": "TestSha3", - "package": "github.com/onflow/flow-go/crypto/hash", + "package": "github.com/onflow/crypto/hash", "runs": 1, "passed": 1, "failed": 0, @@ -98,9 +98,9 @@ 0.23 ] }, - "github.com/onflow/flow-go/crypto/hash/TestSha3/SHA3_256": { + "github.com/onflow/crypto/hash/TestSha3/SHA3_256": { "test": "TestSha3/SHA3_256", - "package": "github.com/onflow/flow-go/crypto/hash", + "package": "github.com/onflow/crypto/hash", "runs": 1, "passed": 1, "failed": 0, @@ -112,9 +112,9 @@ 0.11 ] }, - "github.com/onflow/flow-go/crypto/hash/TestSha3/SHA3_384": { + "github.com/onflow/crypto/hash/TestSha3/SHA3_384": { "test": "TestSha3/SHA3_384", - "package": "github.com/onflow/flow-go/crypto/hash", + "package": "github.com/onflow/crypto/hash", "runs": 1, "passed": 1, "failed": 0, diff --git a/tools/test_monitor/testdata/summary2/test1-1package-1failure/input/test-result-crypto-hash-1-count-fail.json b/tools/test_monitor/testdata/summary2/test1-1package-1failure/input/test-result-crypto-hash-1-count-fail.json index 9ebb56c98b8..25746e1fe7a 100644 --- a/tools/test_monitor/testdata/summary2/test1-1package-1failure/input/test-result-crypto-hash-1-count-fail.json +++ b/tools/test_monitor/testdata/summary2/test1-1package-1failure/input/test-result-crypto-hash-1-count-fail.json @@ -3,7 +3,7 @@ { "json": { "test": "TestSanitySha3_256", - "package": "github.com/onflow/flow-go/crypto/hash", + "package": "github.com/onflow/crypto/hash", "output": [ {"item": "=== RUN TestSanitySha3_256\n"}, {"item": " hash_test.go:21: \n"}, @@ -29,7 +29,7 @@ { "json": { "test": "TestSanitySha3_384", - "package": "github.com/onflow/flow-go/crypto/hash", + "package": "github.com/onflow/crypto/hash", "output": [ {"item": "=== RUN TestSanitySha3_384\n"}, {"item": "--- PASS: TestSanitySha3_384 (0.00s)\n"} @@ -41,7 +41,7 @@ { "json": { "test": "TestSanitySha2_256", - "package": "github.com/onflow/flow-go/crypto/hash", + "package": "github.com/onflow/crypto/hash", "output": [ {"item": "=== RUN TestSanitySha2_256\n"}, {"item": "--- PASS: TestSanitySha2_256 (0.00s)\n"} @@ -53,7 +53,7 @@ { "json": { "test": "TestSanitySha2_384", - "package": "github.com/onflow/flow-go/crypto/hash", + "package": "github.com/onflow/crypto/hash", "output": [ {"item": "=== RUN TestSanitySha2_384\n"}, {"item": "--- PASS: TestSanitySha2_384 (0.00s)\n"} @@ -65,7 +65,7 @@ { "json": { "test": "TestSanityKmac128", - "package": "github.com/onflow/flow-go/crypto/hash", + "package": "github.com/onflow/crypto/hash", "output": [ {"item": "=== RUN TestSanityKmac128\n"}, {"item": "--- PASS: TestSanityKmac128 (0.00s)\n"} @@ -77,7 +77,7 @@ { "json": { "test": "TestHashersAPI", - "package": "github.com/onflow/flow-go/crypto/hash", + "package": "github.com/onflow/crypto/hash", "output": [ {"item": "=== RUN TestHashersAPI\n"}, {"item": " hash_test.go:114: math rand seed is 1632498687765218000\n"}, @@ -90,7 +90,7 @@ { "json": { "test": "TestSha3", - "package": "github.com/onflow/flow-go/crypto/hash", + "package": "github.com/onflow/crypto/hash", "output": [ {"item": "=== RUN TestSha3\n"}, {"item": " hash_test.go:158: math rand seed is 1632498687765661000\n"}, @@ -103,7 +103,7 @@ { "json": { "test": "TestSha3/SHA3_256", - "package": "github.com/onflow/flow-go/crypto/hash", + "package": "github.com/onflow/crypto/hash", "output": [ {"item": "=== RUN TestSha3/SHA3_256\n"}, {"item": " --- PASS: TestSha3/SHA3_256 (0.11s)\n"} @@ -115,7 +115,7 @@ { "json": { "test": "TestSha3/SHA3_384", - "package": "github.com/onflow/flow-go/crypto/hash", + "package": "github.com/onflow/crypto/hash", "output": [ {"item": "=== RUN TestSha3/SHA3_384\n"}, {"item": " --- PASS: TestSha3/SHA3_384 (0.12s)\n"} diff --git a/tools/test_monitor/testdata/summary2/test4-multi-failures/expected-output/failures/TestSanitySha2_256+github.com-onflow-flow-go-crypto-hash/failure1.txt b/tools/test_monitor/testdata/summary2/test4-multi-failures/expected-output/failures/TestSanitySha2_256+github.com-onflow-crypto-hash/failure1.txt similarity index 100% rename from tools/test_monitor/testdata/summary2/test4-multi-failures/expected-output/failures/TestSanitySha2_256+github.com-onflow-flow-go-crypto-hash/failure1.txt rename to tools/test_monitor/testdata/summary2/test4-multi-failures/expected-output/failures/TestSanitySha2_256+github.com-onflow-crypto-hash/failure1.txt diff --git a/tools/test_monitor/testdata/summary2/test4-multi-failures/expected-output/failures/TestSanitySha2_256+github.com-onflow-flow-go-crypto-hash/failure10.txt b/tools/test_monitor/testdata/summary2/test4-multi-failures/expected-output/failures/TestSanitySha2_256+github.com-onflow-crypto-hash/failure10.txt similarity index 100% rename from tools/test_monitor/testdata/summary2/test4-multi-failures/expected-output/failures/TestSanitySha2_256+github.com-onflow-flow-go-crypto-hash/failure10.txt rename to tools/test_monitor/testdata/summary2/test4-multi-failures/expected-output/failures/TestSanitySha2_256+github.com-onflow-crypto-hash/failure10.txt diff --git a/tools/test_monitor/testdata/summary2/test4-multi-failures/expected-output/failures/TestSanitySha2_256+github.com-onflow-flow-go-crypto-hash/failure2.txt b/tools/test_monitor/testdata/summary2/test4-multi-failures/expected-output/failures/TestSanitySha2_256+github.com-onflow-crypto-hash/failure2.txt similarity index 100% rename from tools/test_monitor/testdata/summary2/test4-multi-failures/expected-output/failures/TestSanitySha2_256+github.com-onflow-flow-go-crypto-hash/failure2.txt rename to tools/test_monitor/testdata/summary2/test4-multi-failures/expected-output/failures/TestSanitySha2_256+github.com-onflow-crypto-hash/failure2.txt diff --git a/tools/test_monitor/testdata/summary2/test4-multi-failures/expected-output/failures/TestSanitySha2_256+github.com-onflow-flow-go-crypto-hash/failure3.txt b/tools/test_monitor/testdata/summary2/test4-multi-failures/expected-output/failures/TestSanitySha2_256+github.com-onflow-crypto-hash/failure3.txt similarity index 100% rename from tools/test_monitor/testdata/summary2/test4-multi-failures/expected-output/failures/TestSanitySha2_256+github.com-onflow-flow-go-crypto-hash/failure3.txt rename to tools/test_monitor/testdata/summary2/test4-multi-failures/expected-output/failures/TestSanitySha2_256+github.com-onflow-crypto-hash/failure3.txt diff --git a/tools/test_monitor/testdata/summary2/test4-multi-failures/expected-output/failures/TestSanitySha2_256+github.com-onflow-flow-go-crypto-hash/failure4.txt b/tools/test_monitor/testdata/summary2/test4-multi-failures/expected-output/failures/TestSanitySha2_256+github.com-onflow-crypto-hash/failure4.txt similarity index 100% rename from tools/test_monitor/testdata/summary2/test4-multi-failures/expected-output/failures/TestSanitySha2_256+github.com-onflow-flow-go-crypto-hash/failure4.txt rename to tools/test_monitor/testdata/summary2/test4-multi-failures/expected-output/failures/TestSanitySha2_256+github.com-onflow-crypto-hash/failure4.txt diff --git a/tools/test_monitor/testdata/summary2/test4-multi-failures/expected-output/failures/TestSanitySha2_256+github.com-onflow-flow-go-crypto-hash/failure5.txt b/tools/test_monitor/testdata/summary2/test4-multi-failures/expected-output/failures/TestSanitySha2_256+github.com-onflow-crypto-hash/failure5.txt similarity index 100% rename from tools/test_monitor/testdata/summary2/test4-multi-failures/expected-output/failures/TestSanitySha2_256+github.com-onflow-flow-go-crypto-hash/failure5.txt rename to tools/test_monitor/testdata/summary2/test4-multi-failures/expected-output/failures/TestSanitySha2_256+github.com-onflow-crypto-hash/failure5.txt diff --git a/tools/test_monitor/testdata/summary2/test4-multi-failures/expected-output/failures/TestSanitySha2_256+github.com-onflow-flow-go-crypto-hash/failure6.txt b/tools/test_monitor/testdata/summary2/test4-multi-failures/expected-output/failures/TestSanitySha2_256+github.com-onflow-crypto-hash/failure6.txt similarity index 100% rename from tools/test_monitor/testdata/summary2/test4-multi-failures/expected-output/failures/TestSanitySha2_256+github.com-onflow-flow-go-crypto-hash/failure6.txt rename to tools/test_monitor/testdata/summary2/test4-multi-failures/expected-output/failures/TestSanitySha2_256+github.com-onflow-crypto-hash/failure6.txt diff --git a/tools/test_monitor/testdata/summary2/test4-multi-failures/expected-output/failures/TestSanitySha2_256+github.com-onflow-flow-go-crypto-hash/failure7.txt b/tools/test_monitor/testdata/summary2/test4-multi-failures/expected-output/failures/TestSanitySha2_256+github.com-onflow-crypto-hash/failure7.txt similarity index 100% rename from tools/test_monitor/testdata/summary2/test4-multi-failures/expected-output/failures/TestSanitySha2_256+github.com-onflow-flow-go-crypto-hash/failure7.txt rename to tools/test_monitor/testdata/summary2/test4-multi-failures/expected-output/failures/TestSanitySha2_256+github.com-onflow-crypto-hash/failure7.txt diff --git a/tools/test_monitor/testdata/summary2/test4-multi-failures/expected-output/failures/TestSanitySha2_256+github.com-onflow-flow-go-crypto-hash/failure8.txt b/tools/test_monitor/testdata/summary2/test4-multi-failures/expected-output/failures/TestSanitySha2_256+github.com-onflow-crypto-hash/failure8.txt similarity index 100% rename from tools/test_monitor/testdata/summary2/test4-multi-failures/expected-output/failures/TestSanitySha2_256+github.com-onflow-flow-go-crypto-hash/failure8.txt rename to tools/test_monitor/testdata/summary2/test4-multi-failures/expected-output/failures/TestSanitySha2_256+github.com-onflow-crypto-hash/failure8.txt diff --git a/tools/test_monitor/testdata/summary2/test4-multi-failures/expected-output/failures/TestSanitySha2_256+github.com-onflow-flow-go-crypto-hash/failure9.txt b/tools/test_monitor/testdata/summary2/test4-multi-failures/expected-output/failures/TestSanitySha2_256+github.com-onflow-crypto-hash/failure9.txt similarity index 100% rename from tools/test_monitor/testdata/summary2/test4-multi-failures/expected-output/failures/TestSanitySha2_256+github.com-onflow-flow-go-crypto-hash/failure9.txt rename to tools/test_monitor/testdata/summary2/test4-multi-failures/expected-output/failures/TestSanitySha2_256+github.com-onflow-crypto-hash/failure9.txt diff --git a/tools/test_monitor/testdata/summary2/test4-multi-failures/expected-output/failures/TestSanitySha3_256+github.com-onflow-flow-go-crypto-hash/failure1.txt b/tools/test_monitor/testdata/summary2/test4-multi-failures/expected-output/failures/TestSanitySha3_256+github.com-onflow-crypto-hash/failure1.txt similarity index 100% rename from tools/test_monitor/testdata/summary2/test4-multi-failures/expected-output/failures/TestSanitySha3_256+github.com-onflow-flow-go-crypto-hash/failure1.txt rename to tools/test_monitor/testdata/summary2/test4-multi-failures/expected-output/failures/TestSanitySha3_256+github.com-onflow-crypto-hash/failure1.txt diff --git a/tools/test_monitor/testdata/summary2/test4-multi-failures/expected-output/test4-multi-failures.json b/tools/test_monitor/testdata/summary2/test4-multi-failures/expected-output/test4-multi-failures.json index 29b56d45a53..0b358ac4099 100644 --- a/tools/test_monitor/testdata/summary2/test4-multi-failures/expected-output/test4-multi-failures.json +++ b/tools/test_monitor/testdata/summary2/test4-multi-failures/expected-output/test4-multi-failures.json @@ -1,8 +1,8 @@ { "tests": { - "github.com/onflow/flow-go/crypto/hash/TestSanitySha3_256": { + "github.com/onflow/crypto/hash/TestSanitySha3_256": { "test": "TestSanitySha3_256", - "package": "github.com/onflow/flow-go/crypto/hash", + "package": "github.com/onflow/crypto/hash", "runs": 25, "passed": 24, "failed": 1, @@ -38,9 +38,9 @@ 0 ] }, - "github.com/onflow/flow-go/crypto/hash/TestSanitySha3_384": { + "github.com/onflow/crypto/hash/TestSanitySha3_384": { "test": "TestSanitySha3_384", - "package": "github.com/onflow/flow-go/crypto/hash", + "package": "github.com/onflow/crypto/hash", "runs": 25, "passed": 25, "failed": 0, @@ -76,9 +76,9 @@ 0 ] }, - "github.com/onflow/flow-go/crypto/hash/TestSanitySha2_256": { + "github.com/onflow/crypto/hash/TestSanitySha2_256": { "test": "TestSanitySha2_256", - "package": "github.com/onflow/flow-go/crypto/hash", + "package": "github.com/onflow/crypto/hash", "runs": 25, "passed": 15, "failed": 10, @@ -114,9 +114,9 @@ 0 ] }, - "github.com/onflow/flow-go/crypto/hash/TestSanitySha2_384": { + "github.com/onflow/crypto/hash/TestSanitySha2_384": { "test": "TestSanitySha2_384", - "package": "github.com/onflow/flow-go/crypto/hash", + "package": "github.com/onflow/crypto/hash", "runs": 25, "passed": 25, "failed": 0, @@ -152,9 +152,9 @@ 0 ] }, - "github.com/onflow/flow-go/crypto/hash/TestSanityKmac128": { + "github.com/onflow/crypto/hash/TestSanityKmac128": { "test": "TestSanityKmac128", - "package": "github.com/onflow/flow-go/crypto/hash", + "package": "github.com/onflow/crypto/hash", "runs": 25, "passed": 25, "failed": 0, @@ -190,9 +190,9 @@ 0 ] }, - "github.com/onflow/flow-go/crypto/hash/TestHashersAPI": { + "github.com/onflow/crypto/hash/TestHashersAPI": { "test": "TestHashersAPI", - "package": "github.com/onflow/flow-go/crypto/hash", + "package": "github.com/onflow/crypto/hash", "runs": 24, "passed": 24, "failed": 0, @@ -227,9 +227,9 @@ 0 ] }, - "github.com/onflow/flow-go/crypto/hash/TestSha3": { + "github.com/onflow/crypto/hash/TestSha3": { "test": "TestSha3", - "package": "github.com/onflow/flow-go/crypto/hash", + "package": "github.com/onflow/crypto/hash", "runs": 25, "passed": 25, "failed": 0, @@ -265,9 +265,9 @@ 0.22 ] }, - "github.com/onflow/flow-go/crypto/hash/TestSha3/SHA3_256": { + "github.com/onflow/crypto/hash/TestSha3/SHA3_256": { "test": "TestSha3/SHA3_256", - "package": "github.com/onflow/flow-go/crypto/hash", + "package": "github.com/onflow/crypto/hash", "runs": 25, "passed": 25, "failed": 0, @@ -303,9 +303,9 @@ 0.1 ] }, - "github.com/onflow/flow-go/crypto/hash/TestSha3/SHA3_384": { + "github.com/onflow/crypto/hash/TestSha3/SHA3_384": { "test": "TestSha3/SHA3_384", - "package": "github.com/onflow/flow-go/crypto/hash", + "package": "github.com/onflow/crypto/hash", "runs": 25, "passed": 25, "failed": 0, diff --git a/tools/test_monitor/testdata/summary2/test5-multi-failures-multi-exceptions/expected-output/failures/TestSanitySha2_256+github.com-onflow-flow-go-crypto-hash/failure1.txt b/tools/test_monitor/testdata/summary2/test5-multi-failures-multi-exceptions/expected-output/failures/TestSanitySha2_256+github.com-onflow-crypto-hash/failure1.txt similarity index 100% rename from tools/test_monitor/testdata/summary2/test5-multi-failures-multi-exceptions/expected-output/failures/TestSanitySha2_256+github.com-onflow-flow-go-crypto-hash/failure1.txt rename to tools/test_monitor/testdata/summary2/test5-multi-failures-multi-exceptions/expected-output/failures/TestSanitySha2_256+github.com-onflow-crypto-hash/failure1.txt diff --git a/tools/test_monitor/testdata/summary2/test5-multi-failures-multi-exceptions/expected-output/failures/TestSanitySha2_256+github.com-onflow-flow-go-crypto-hash/failure10.txt b/tools/test_monitor/testdata/summary2/test5-multi-failures-multi-exceptions/expected-output/failures/TestSanitySha2_256+github.com-onflow-crypto-hash/failure10.txt similarity index 100% rename from tools/test_monitor/testdata/summary2/test5-multi-failures-multi-exceptions/expected-output/failures/TestSanitySha2_256+github.com-onflow-flow-go-crypto-hash/failure10.txt rename to tools/test_monitor/testdata/summary2/test5-multi-failures-multi-exceptions/expected-output/failures/TestSanitySha2_256+github.com-onflow-crypto-hash/failure10.txt diff --git a/tools/test_monitor/testdata/summary2/test5-multi-failures-multi-exceptions/expected-output/failures/TestSanitySha2_256+github.com-onflow-flow-go-crypto-hash/failure2.txt b/tools/test_monitor/testdata/summary2/test5-multi-failures-multi-exceptions/expected-output/failures/TestSanitySha2_256+github.com-onflow-crypto-hash/failure2.txt similarity index 100% rename from tools/test_monitor/testdata/summary2/test5-multi-failures-multi-exceptions/expected-output/failures/TestSanitySha2_256+github.com-onflow-flow-go-crypto-hash/failure2.txt rename to tools/test_monitor/testdata/summary2/test5-multi-failures-multi-exceptions/expected-output/failures/TestSanitySha2_256+github.com-onflow-crypto-hash/failure2.txt diff --git a/tools/test_monitor/testdata/summary2/test5-multi-failures-multi-exceptions/expected-output/failures/TestSanitySha2_256+github.com-onflow-flow-go-crypto-hash/failure3.txt b/tools/test_monitor/testdata/summary2/test5-multi-failures-multi-exceptions/expected-output/failures/TestSanitySha2_256+github.com-onflow-crypto-hash/failure3.txt similarity index 100% rename from tools/test_monitor/testdata/summary2/test5-multi-failures-multi-exceptions/expected-output/failures/TestSanitySha2_256+github.com-onflow-flow-go-crypto-hash/failure3.txt rename to tools/test_monitor/testdata/summary2/test5-multi-failures-multi-exceptions/expected-output/failures/TestSanitySha2_256+github.com-onflow-crypto-hash/failure3.txt diff --git a/tools/test_monitor/testdata/summary2/test5-multi-failures-multi-exceptions/expected-output/failures/TestSanitySha2_256+github.com-onflow-flow-go-crypto-hash/failure4.txt b/tools/test_monitor/testdata/summary2/test5-multi-failures-multi-exceptions/expected-output/failures/TestSanitySha2_256+github.com-onflow-crypto-hash/failure4.txt similarity index 100% rename from tools/test_monitor/testdata/summary2/test5-multi-failures-multi-exceptions/expected-output/failures/TestSanitySha2_256+github.com-onflow-flow-go-crypto-hash/failure4.txt rename to tools/test_monitor/testdata/summary2/test5-multi-failures-multi-exceptions/expected-output/failures/TestSanitySha2_256+github.com-onflow-crypto-hash/failure4.txt diff --git a/tools/test_monitor/testdata/summary2/test5-multi-failures-multi-exceptions/expected-output/failures/TestSanitySha2_256+github.com-onflow-flow-go-crypto-hash/failure5.txt b/tools/test_monitor/testdata/summary2/test5-multi-failures-multi-exceptions/expected-output/failures/TestSanitySha2_256+github.com-onflow-crypto-hash/failure5.txt similarity index 100% rename from tools/test_monitor/testdata/summary2/test5-multi-failures-multi-exceptions/expected-output/failures/TestSanitySha2_256+github.com-onflow-flow-go-crypto-hash/failure5.txt rename to tools/test_monitor/testdata/summary2/test5-multi-failures-multi-exceptions/expected-output/failures/TestSanitySha2_256+github.com-onflow-crypto-hash/failure5.txt diff --git a/tools/test_monitor/testdata/summary2/test5-multi-failures-multi-exceptions/expected-output/failures/TestSanitySha2_256+github.com-onflow-flow-go-crypto-hash/failure6.txt b/tools/test_monitor/testdata/summary2/test5-multi-failures-multi-exceptions/expected-output/failures/TestSanitySha2_256+github.com-onflow-crypto-hash/failure6.txt similarity index 100% rename from tools/test_monitor/testdata/summary2/test5-multi-failures-multi-exceptions/expected-output/failures/TestSanitySha2_256+github.com-onflow-flow-go-crypto-hash/failure6.txt rename to tools/test_monitor/testdata/summary2/test5-multi-failures-multi-exceptions/expected-output/failures/TestSanitySha2_256+github.com-onflow-crypto-hash/failure6.txt diff --git a/tools/test_monitor/testdata/summary2/test5-multi-failures-multi-exceptions/expected-output/failures/TestSanitySha2_256+github.com-onflow-flow-go-crypto-hash/failure7.txt b/tools/test_monitor/testdata/summary2/test5-multi-failures-multi-exceptions/expected-output/failures/TestSanitySha2_256+github.com-onflow-crypto-hash/failure7.txt similarity index 100% rename from tools/test_monitor/testdata/summary2/test5-multi-failures-multi-exceptions/expected-output/failures/TestSanitySha2_256+github.com-onflow-flow-go-crypto-hash/failure7.txt rename to tools/test_monitor/testdata/summary2/test5-multi-failures-multi-exceptions/expected-output/failures/TestSanitySha2_256+github.com-onflow-crypto-hash/failure7.txt diff --git a/tools/test_monitor/testdata/summary2/test5-multi-failures-multi-exceptions/expected-output/failures/TestSanitySha2_256+github.com-onflow-flow-go-crypto-hash/failure8.txt b/tools/test_monitor/testdata/summary2/test5-multi-failures-multi-exceptions/expected-output/failures/TestSanitySha2_256+github.com-onflow-crypto-hash/failure8.txt similarity index 100% rename from tools/test_monitor/testdata/summary2/test5-multi-failures-multi-exceptions/expected-output/failures/TestSanitySha2_256+github.com-onflow-flow-go-crypto-hash/failure8.txt rename to tools/test_monitor/testdata/summary2/test5-multi-failures-multi-exceptions/expected-output/failures/TestSanitySha2_256+github.com-onflow-crypto-hash/failure8.txt diff --git a/tools/test_monitor/testdata/summary2/test5-multi-failures-multi-exceptions/expected-output/failures/TestSanitySha2_256+github.com-onflow-flow-go-crypto-hash/failure9.txt b/tools/test_monitor/testdata/summary2/test5-multi-failures-multi-exceptions/expected-output/failures/TestSanitySha2_256+github.com-onflow-crypto-hash/failure9.txt similarity index 100% rename from tools/test_monitor/testdata/summary2/test5-multi-failures-multi-exceptions/expected-output/failures/TestSanitySha2_256+github.com-onflow-flow-go-crypto-hash/failure9.txt rename to tools/test_monitor/testdata/summary2/test5-multi-failures-multi-exceptions/expected-output/failures/TestSanitySha2_256+github.com-onflow-crypto-hash/failure9.txt diff --git a/tools/test_monitor/testdata/summary2/test5-multi-failures-multi-exceptions/expected-output/failures/TestSanitySha3_256+github.com-onflow-flow-go-crypto-hash/failure1.txt b/tools/test_monitor/testdata/summary2/test5-multi-failures-multi-exceptions/expected-output/failures/TestSanitySha3_256+github.com-onflow-crypto-hash/failure1.txt similarity index 100% rename from tools/test_monitor/testdata/summary2/test5-multi-failures-multi-exceptions/expected-output/failures/TestSanitySha3_256+github.com-onflow-flow-go-crypto-hash/failure1.txt rename to tools/test_monitor/testdata/summary2/test5-multi-failures-multi-exceptions/expected-output/failures/TestSanitySha3_256+github.com-onflow-crypto-hash/failure1.txt diff --git a/tools/test_monitor/testdata/summary2/test5-multi-failures-multi-exceptions/expected-output/test5-multi-failures-multi-exceptions.json b/tools/test_monitor/testdata/summary2/test5-multi-failures-multi-exceptions/expected-output/test5-multi-failures-multi-exceptions.json index 040e31b8abc..b3ef9ce1823 100644 --- a/tools/test_monitor/testdata/summary2/test5-multi-failures-multi-exceptions/expected-output/test5-multi-failures-multi-exceptions.json +++ b/tools/test_monitor/testdata/summary2/test5-multi-failures-multi-exceptions/expected-output/test5-multi-failures-multi-exceptions.json @@ -1,8 +1,8 @@ { "tests": { - "github.com/onflow/flow-go/crypto/hash/TestSanitySha3_256": { + "github.com/onflow/crypto/hash/TestSanitySha3_256": { "test": "TestSanitySha3_256", - "package": "github.com/onflow/flow-go/crypto/hash", + "package": "github.com/onflow/crypto/hash", "runs": 25, "passed": 24, "failed": 1, @@ -38,9 +38,9 @@ 0 ] }, - "github.com/onflow/flow-go/crypto/hash/TestSanitySha3_384": { + "github.com/onflow/crypto/hash/TestSanitySha3_384": { "test": "TestSanitySha3_384", - "package": "github.com/onflow/flow-go/crypto/hash", + "package": "github.com/onflow/crypto/hash", "runs": 25, "passed": 25, "failed": 0, @@ -76,9 +76,9 @@ 0 ] }, - "github.com/onflow/flow-go/crypto/hash/TestSanitySha2_256": { + "github.com/onflow/crypto/hash/TestSanitySha2_256": { "test": "TestSanitySha2_256", - "package": "github.com/onflow/flow-go/crypto/hash", + "package": "github.com/onflow/crypto/hash", "runs": 24, "passed": 14, "failed": 10, @@ -113,9 +113,9 @@ 0 ] }, - "github.com/onflow/flow-go/crypto/hash/TestSanitySha2_384": { + "github.com/onflow/crypto/hash/TestSanitySha2_384": { "test": "TestSanitySha2_384", - "package": "github.com/onflow/flow-go/crypto/hash", + "package": "github.com/onflow/crypto/hash", "runs": 25, "passed": 25, "failed": 0, @@ -151,9 +151,9 @@ 0 ] }, - "github.com/onflow/flow-go/crypto/hash/TestSanityKmac128": { + "github.com/onflow/crypto/hash/TestSanityKmac128": { "test": "TestSanityKmac128", - "package": "github.com/onflow/flow-go/crypto/hash", + "package": "github.com/onflow/crypto/hash", "runs": 25, "passed": 25, "failed": 0, @@ -189,9 +189,9 @@ 0 ] }, - "github.com/onflow/flow-go/crypto/hash/TestHashersAPI": { + "github.com/onflow/crypto/hash/TestHashersAPI": { "test": "TestHashersAPI", - "package": "github.com/onflow/flow-go/crypto/hash", + "package": "github.com/onflow/crypto/hash", "runs": 24, "passed": 24, "failed": 0, @@ -226,9 +226,9 @@ 0 ] }, - "github.com/onflow/flow-go/crypto/hash/TestSha3": { + "github.com/onflow/crypto/hash/TestSha3": { "test": "TestSha3", - "package": "github.com/onflow/flow-go/crypto/hash", + "package": "github.com/onflow/crypto/hash", "runs": 25, "passed": 25, "failed": 0, @@ -264,9 +264,9 @@ 0.22 ] }, - "github.com/onflow/flow-go/crypto/hash/TestSha3/SHA3_256": { + "github.com/onflow/crypto/hash/TestSha3/SHA3_256": { "test": "TestSha3/SHA3_256", - "package": "github.com/onflow/flow-go/crypto/hash", + "package": "github.com/onflow/crypto/hash", "runs": 25, "passed": 25, "failed": 0, @@ -302,9 +302,9 @@ 0.1 ] }, - "github.com/onflow/flow-go/crypto/hash/TestSha3/SHA3_384": { + "github.com/onflow/crypto/hash/TestSha3/SHA3_384": { "test": "TestSha3/SHA3_384", - "package": "github.com/onflow/flow-go/crypto/hash", + "package": "github.com/onflow/crypto/hash", "runs": 25, "passed": 25, "failed": 0, diff --git a/tools/test_monitor/testdata/summary3/test1-1package-1failure/expected-output/test1-1package-1failure.json b/tools/test_monitor/testdata/summary3/test1-1package-1failure/expected-output/test1-1package-1failure.json index abb5ac0b893..51b4e4d10f1 100644 --- a/tools/test_monitor/testdata/summary3/test1-1package-1failure/expected-output/test1-1package-1failure.json +++ b/tools/test_monitor/testdata/summary3/test1-1package-1failure/expected-output/test1-1package-1failure.json @@ -3,7 +3,7 @@ "most_failures": [ { "test": "TestSanitySha3_256", - "package": "github.com/onflow/flow-go/crypto/hash", + "package": "github.com/onflow/crypto/hash", "runs": 1, "passed": 0, "failed": 1, @@ -20,7 +20,7 @@ "longest_running": [ { "test": "TestSha3", - "package": "github.com/onflow/flow-go/crypto/hash", + "package": "github.com/onflow/crypto/hash", "runs": 1, "passed": 1, "failed": 0, @@ -34,7 +34,7 @@ }, { "test": "TestSha3/SHA3_384", - "package": "github.com/onflow/flow-go/crypto/hash", + "package": "github.com/onflow/crypto/hash", "runs": 1, "passed": 1, "failed": 0, @@ -48,7 +48,7 @@ }, { "test": "TestSha3/SHA3_256", - "package": "github.com/onflow/flow-go/crypto/hash", + "package": "github.com/onflow/crypto/hash", "runs": 1, "passed": 1, "failed": 0, diff --git a/tools/test_monitor/testdata/summary3/test1-1package-1failure/input/test1-1package-1failure.json b/tools/test_monitor/testdata/summary3/test1-1package-1failure/input/test1-1package-1failure.json index bd477393a56..00aad44afd2 100644 --- a/tools/test_monitor/testdata/summary3/test1-1package-1failure/input/test1-1package-1failure.json +++ b/tools/test_monitor/testdata/summary3/test1-1package-1failure/input/test1-1package-1failure.json @@ -1,8 +1,8 @@ { "tests": { - "github.com/onflow/flow-go/crypto/hash/TestSanitySha3_256": { + "github.com/onflow/crypto/hash/TestSanitySha3_256": { "test": "TestSanitySha3_256", - "package": "github.com/onflow/flow-go/crypto/hash", + "package": "github.com/onflow/crypto/hash", "runs": 1, "passed": 0, "failed": 1, @@ -14,9 +14,9 @@ 0 ] }, - "github.com/onflow/flow-go/crypto/hash/TestSanitySha3_384": { + "github.com/onflow/crypto/hash/TestSanitySha3_384": { "test": "TestSanitySha3_384", - "package": "github.com/onflow/flow-go/crypto/hash", + "package": "github.com/onflow/crypto/hash", "runs": 1, "passed": 1, "failed": 0, @@ -28,9 +28,9 @@ 0 ] }, - "github.com/onflow/flow-go/crypto/hash/TestSanitySha2_256": { + "github.com/onflow/crypto/hash/TestSanitySha2_256": { "test": "TestSanitySha2_256", - "package": "github.com/onflow/flow-go/crypto/hash", + "package": "github.com/onflow/crypto/hash", "runs": 1, "passed": 1, "failed": 0, @@ -42,9 +42,9 @@ 0 ] }, - "github.com/onflow/flow-go/crypto/hash/TestSanitySha2_384": { + "github.com/onflow/crypto/hash/TestSanitySha2_384": { "test": "TestSanitySha2_384", - "package": "github.com/onflow/flow-go/crypto/hash", + "package": "github.com/onflow/crypto/hash", "runs": 1, "passed": 1, "failed": 0, @@ -56,9 +56,9 @@ 0 ] }, - "github.com/onflow/flow-go/crypto/hash/TestSanityKmac128": { + "github.com/onflow/crypto/hash/TestSanityKmac128": { "test": "TestSanityKmac128", - "package": "github.com/onflow/flow-go/crypto/hash", + "package": "github.com/onflow/crypto/hash", "runs": 1, "passed": 1, "failed": 0, @@ -70,9 +70,9 @@ 0 ] }, - "github.com/onflow/flow-go/crypto/hash/TestHashersAPI": { + "github.com/onflow/crypto/hash/TestHashersAPI": { "test": "TestHashersAPI", - "package": "github.com/onflow/flow-go/crypto/hash", + "package": "github.com/onflow/crypto/hash", "runs": 1, "passed": 1, "failed": 0, @@ -84,9 +84,9 @@ 0 ] }, - "github.com/onflow/flow-go/crypto/hash/TestSha3": { + "github.com/onflow/crypto/hash/TestSha3": { "test": "TestSha3", - "package": "github.com/onflow/flow-go/crypto/hash", + "package": "github.com/onflow/crypto/hash", "runs": 1, "passed": 1, "failed": 0, @@ -98,9 +98,9 @@ 0.23 ] }, - "github.com/onflow/flow-go/crypto/hash/TestSha3/SHA3_256": { + "github.com/onflow/crypto/hash/TestSha3/SHA3_256": { "test": "TestSha3/SHA3_256", - "package": "github.com/onflow/flow-go/crypto/hash", + "package": "github.com/onflow/crypto/hash", "runs": 1, "passed": 1, "failed": 0, @@ -112,9 +112,9 @@ 0.11 ] }, - "github.com/onflow/flow-go/crypto/hash/TestSha3/SHA3_384": { + "github.com/onflow/crypto/hash/TestSha3/SHA3_384": { "test": "TestSha3/SHA3_384", - "package": "github.com/onflow/flow-go/crypto/hash", + "package": "github.com/onflow/crypto/hash", "runs": 1, "passed": 1, "failed": 0, diff --git a/tools/test_monitor/testdata/summary3/test4-multi-failures/expected-output/test4-multi-failures.json b/tools/test_monitor/testdata/summary3/test4-multi-failures/expected-output/test4-multi-failures.json index 49a32f08d29..fee741cc3ea 100644 --- a/tools/test_monitor/testdata/summary3/test4-multi-failures/expected-output/test4-multi-failures.json +++ b/tools/test_monitor/testdata/summary3/test4-multi-failures/expected-output/test4-multi-failures.json @@ -3,7 +3,7 @@ "most_failures": [ { "test": "TestSanitySha3_256_4", - "package": "github.com/onflow/flow-go/crypto/hash", + "package": "github.com/onflow/crypto/hash", "runs": 24, "passed": 14, "failed": 19, @@ -40,7 +40,7 @@ }, { "test": "TestSanitySha3_256_2", - "package": "github.com/onflow/flow-go/crypto/hash", + "package": "github.com/onflow/crypto/hash", "runs": 24, "passed": 14, "failed": 11, @@ -77,7 +77,7 @@ }, { "test": "TestSanitySha3_256", - "package": "github.com/onflow/flow-go/crypto/hash", + "package": "github.com/onflow/crypto/hash", "runs": 24, "passed": 14, "failed": 10, @@ -114,7 +114,7 @@ }, { "test": "TestSanitySha3_256_3", - "package": "github.com/onflow/flow-go/crypto/hash", + "package": "github.com/onflow/crypto/hash", "runs": 24, "passed": 14, "failed": 9, @@ -151,7 +151,7 @@ }, { "test": "TestSanitySha2_256", - "package": "github.com/onflow/flow-go/crypto/hash", + "package": "github.com/onflow/crypto/hash", "runs": 25, "passed": 24, "failed": 1, @@ -192,7 +192,7 @@ "longest_running": [ { "test": "TestSha3", - "package": "github.com/onflow/flow-go/crypto/hash", + "package": "github.com/onflow/crypto/hash", "runs": 25, "passed": 25, "failed": 0, @@ -230,7 +230,7 @@ }, { "test": "TestSha3/SHA3_384", - "package": "github.com/onflow/flow-go/crypto/hash", + "package": "github.com/onflow/crypto/hash", "runs": 25, "passed": 25, "failed": 0, @@ -268,7 +268,7 @@ }, { "test": "TestSha3/SHA3_256", - "package": "github.com/onflow/flow-go/crypto/hash", + "package": "github.com/onflow/crypto/hash", "runs": 25, "passed": 25, "failed": 0, diff --git a/tools/test_monitor/testdata/summary3/test4-multi-failures/input/test4-multi-failures.json b/tools/test_monitor/testdata/summary3/test4-multi-failures/input/test4-multi-failures.json index f61be9b8c2d..ce2de3a7f6a 100644 --- a/tools/test_monitor/testdata/summary3/test4-multi-failures/input/test4-multi-failures.json +++ b/tools/test_monitor/testdata/summary3/test4-multi-failures/input/test4-multi-failures.json @@ -1,8 +1,8 @@ { "tests": { - "github.com/onflow/flow-go/crypto/hash/TestSanitySha2_256": { + "github.com/onflow/crypto/hash/TestSanitySha2_256": { "test": "TestSanitySha2_256", - "package": "github.com/onflow/flow-go/crypto/hash", + "package": "github.com/onflow/crypto/hash", "runs": 25, "passed": 24, "failed": 1, @@ -38,9 +38,9 @@ 0 ] }, - "github.com/onflow/flow-go/crypto/hash/TestSanitySha3_384": { + "github.com/onflow/crypto/hash/TestSanitySha3_384": { "test": "TestSanitySha3_384", - "package": "github.com/onflow/flow-go/crypto/hash", + "package": "github.com/onflow/crypto/hash", "runs": 25, "passed": 25, "failed": 0, @@ -76,9 +76,9 @@ 0 ] }, - "github.com/onflow/flow-go/crypto/hash/TestSanitySha3_256": { + "github.com/onflow/crypto/hash/TestSanitySha3_256": { "test": "TestSanitySha3_256", - "package": "github.com/onflow/flow-go/crypto/hash", + "package": "github.com/onflow/crypto/hash", "runs": 24, "passed": 14, "failed": 10, @@ -113,9 +113,9 @@ 0 ] }, - "github.com/onflow/flow-go/crypto/hash/TestSanitySha2_384": { + "github.com/onflow/crypto/hash/TestSanitySha2_384": { "test": "TestSanitySha2_384", - "package": "github.com/onflow/flow-go/crypto/hash", + "package": "github.com/onflow/crypto/hash", "runs": 25, "passed": 25, "failed": 0, @@ -151,9 +151,9 @@ 0 ] }, - "github.com/onflow/flow-go/crypto/hash/TestSanitySha3_256_2": { + "github.com/onflow/crypto/hash/TestSanitySha3_256_2": { "test": "TestSanitySha3_256_2", - "package": "github.com/onflow/flow-go/crypto/hash", + "package": "github.com/onflow/crypto/hash", "runs": 24, "passed": 14, "failed": 11, @@ -188,9 +188,9 @@ 0 ] }, - "github.com/onflow/flow-go/crypto/hash/TestSanityKmac128": { + "github.com/onflow/crypto/hash/TestSanityKmac128": { "test": "TestSanityKmac128", - "package": "github.com/onflow/flow-go/crypto/hash", + "package": "github.com/onflow/crypto/hash", "runs": 25, "passed": 25, "failed": 0, @@ -226,9 +226,9 @@ 0 ] }, - "github.com/onflow/flow-go/crypto/hash/TestHashersAPI": { + "github.com/onflow/crypto/hash/TestHashersAPI": { "test": "TestHashersAPI", - "package": "github.com/onflow/flow-go/crypto/hash", + "package": "github.com/onflow/crypto/hash", "runs": 24, "passed": 24, "failed": 0, @@ -263,9 +263,9 @@ 0 ] }, - "github.com/onflow/flow-go/crypto/hash/TestSha3": { + "github.com/onflow/crypto/hash/TestSha3": { "test": "TestSha3", - "package": "github.com/onflow/flow-go/crypto/hash", + "package": "github.com/onflow/crypto/hash", "runs": 25, "passed": 25, "failed": 0, @@ -301,9 +301,9 @@ 0.22 ] }, - "github.com/onflow/flow-go/crypto/hash/TestSanitySha3_256_3": { + "github.com/onflow/crypto/hash/TestSanitySha3_256_3": { "test": "TestSanitySha3_256_3", - "package": "github.com/onflow/flow-go/crypto/hash", + "package": "github.com/onflow/crypto/hash", "runs": 24, "passed": 14, "failed": 9, @@ -338,9 +338,9 @@ 0 ] }, - "github.com/onflow/flow-go/crypto/hash/TestSha3/SHA3_256": { + "github.com/onflow/crypto/hash/TestSha3/SHA3_256": { "test": "TestSha3/SHA3_256", - "package": "github.com/onflow/flow-go/crypto/hash", + "package": "github.com/onflow/crypto/hash", "runs": 25, "passed": 25, "failed": 0, @@ -376,9 +376,9 @@ 0.1 ] }, - "github.com/onflow/flow-go/crypto/hash/TestSanitySha3_256_4": { + "github.com/onflow/crypto/hash/TestSanitySha3_256_4": { "test": "TestSanitySha3_256_4", - "package": "github.com/onflow/flow-go/crypto/hash", + "package": "github.com/onflow/crypto/hash", "runs": 24, "passed": 14, "failed": 19, @@ -413,9 +413,9 @@ 0 ] }, - "github.com/onflow/flow-go/crypto/hash/TestSha3/SHA3_384": { + "github.com/onflow/crypto/hash/TestSha3/SHA3_384": { "test": "TestSha3/SHA3_384", - "package": "github.com/onflow/flow-go/crypto/hash", + "package": "github.com/onflow/crypto/hash", "runs": 25, "passed": 25, "failed": 0, diff --git a/tools/test_monitor/testdata/summary3/test5-multi-durations/expected-output/test5-multi-durations.json b/tools/test_monitor/testdata/summary3/test5-multi-durations/expected-output/test5-multi-durations.json index 7336f71d3d8..86ad9d9f88f 100644 --- a/tools/test_monitor/testdata/summary3/test5-multi-durations/expected-output/test5-multi-durations.json +++ b/tools/test_monitor/testdata/summary3/test5-multi-durations/expected-output/test5-multi-durations.json @@ -18,7 +18,7 @@ "most_failures": [ { "test": "TestSanitySha2_256", - "package": "github.com/onflow/flow-go/crypto/hash", + "package": "github.com/onflow/crypto/hash", "runs": 24, "passed": 14, "failed": 10, @@ -55,7 +55,7 @@ }, { "test": "TestSanitySha3_256", - "package": "github.com/onflow/flow-go/crypto/hash", + "package": "github.com/onflow/crypto/hash", "runs": 25, "passed": 24, "failed": 1, @@ -96,7 +96,7 @@ "longest_running": [ { "test": "TestSha3_2", - "package": "github.com/onflow/flow-go/crypto/hash", + "package": "github.com/onflow/crypto/hash", "runs": 25, "passed": 25, "failed": 0, @@ -134,7 +134,7 @@ }, { "test": "TestSha3", - "package": "github.com/onflow/flow-go/crypto/hash", + "package": "github.com/onflow/crypto/hash", "runs": 25, "passed": 25, "failed": 0, @@ -172,7 +172,7 @@ }, { "test": "TestSha3_3", - "package": "github.com/onflow/flow-go/crypto/hash", + "package": "github.com/onflow/crypto/hash", "runs": 25, "passed": 25, "failed": 0, @@ -210,7 +210,7 @@ }, { "test": "TestSha3/SHA3_384", - "package": "github.com/onflow/flow-go/crypto/hash", + "package": "github.com/onflow/crypto/hash", "runs": 25, "passed": 25, "failed": 0, @@ -248,7 +248,7 @@ }, { "test": "TestSha3/SHA3_256", - "package": "github.com/onflow/flow-go/crypto/hash", + "package": "github.com/onflow/crypto/hash", "runs": 25, "passed": 25, "failed": 0, @@ -286,7 +286,7 @@ }, { "test": "TestSha3_4", - "package": "github.com/onflow/flow-go/crypto/hash", + "package": "github.com/onflow/crypto/hash", "runs": 25, "passed": 25, "failed": 0, diff --git a/tools/test_monitor/testdata/summary3/test5-multi-durations/input/test5-multi-durations.json b/tools/test_monitor/testdata/summary3/test5-multi-durations/input/test5-multi-durations.json index efc8872446e..e73536877c9 100644 --- a/tools/test_monitor/testdata/summary3/test5-multi-durations/input/test5-multi-durations.json +++ b/tools/test_monitor/testdata/summary3/test5-multi-durations/input/test5-multi-durations.json @@ -1,8 +1,8 @@ { "tests": { - "github.com/onflow/flow-go/crypto/hash/TestSanitySha3_256": { + "github.com/onflow/crypto/hash/TestSanitySha3_256": { "test": "TestSanitySha3_256", - "package": "github.com/onflow/flow-go/crypto/hash", + "package": "github.com/onflow/crypto/hash", "runs": 25, "passed": 24, "failed": 1, @@ -38,9 +38,9 @@ 0 ] }, - "github.com/onflow/flow-go/crypto/hash/TestSanitySha3_384": { + "github.com/onflow/crypto/hash/TestSanitySha3_384": { "test": "TestSanitySha3_384", - "package": "github.com/onflow/flow-go/crypto/hash", + "package": "github.com/onflow/crypto/hash", "runs": 25, "passed": 25, "failed": 0, @@ -76,9 +76,9 @@ 0 ] }, - "github.com/onflow/flow-go/crypto/hash/TestSanitySha2_256": { + "github.com/onflow/crypto/hash/TestSanitySha2_256": { "test": "TestSanitySha2_256", - "package": "github.com/onflow/flow-go/crypto/hash", + "package": "github.com/onflow/crypto/hash", "runs": 24, "passed": 14, "failed": 10, @@ -113,9 +113,9 @@ 0 ] }, - "github.com/onflow/flow-go/crypto/hash/TestSanitySha2_384": { + "github.com/onflow/crypto/hash/TestSanitySha2_384": { "test": "TestSanitySha2_384", - "package": "github.com/onflow/flow-go/crypto/hash", + "package": "github.com/onflow/crypto/hash", "runs": 25, "passed": 25, "failed": 0, @@ -151,9 +151,9 @@ 0 ] }, - "github.com/onflow/flow-go/crypto/hash/TestSanityKmac128": { + "github.com/onflow/crypto/hash/TestSanityKmac128": { "test": "TestSanityKmac128", - "package": "github.com/onflow/flow-go/crypto/hash", + "package": "github.com/onflow/crypto/hash", "runs": 25, "passed": 25, "failed": 0, @@ -189,9 +189,9 @@ 0 ] }, - "github.com/onflow/flow-go/crypto/hash/TestHashersAPI": { + "github.com/onflow/crypto/hash/TestHashersAPI": { "test": "TestHashersAPI", - "package": "github.com/onflow/flow-go/crypto/hash", + "package": "github.com/onflow/crypto/hash", "runs": 24, "passed": 24, "failed": 0, @@ -226,9 +226,9 @@ 0 ] }, - "github.com/onflow/flow-go/crypto/hash/TestSha3": { + "github.com/onflow/crypto/hash/TestSha3": { "test": "TestSha3", - "package": "github.com/onflow/flow-go/crypto/hash", + "package": "github.com/onflow/crypto/hash", "runs": 25, "passed": 25, "failed": 0, @@ -264,9 +264,9 @@ 0.22 ] }, - "github.com/onflow/flow-go/crypto/hash/TestSha3/SHA3_256": { + "github.com/onflow/crypto/hash/TestSha3/SHA3_256": { "test": "TestSha3/SHA3_256", - "package": "github.com/onflow/flow-go/crypto/hash", + "package": "github.com/onflow/crypto/hash", "runs": 25, "passed": 25, "failed": 0, @@ -302,9 +302,9 @@ 0.1 ] }, - "github.com/onflow/flow-go/crypto/hash/TestSha3/SHA3_384": { + "github.com/onflow/crypto/hash/TestSha3/SHA3_384": { "test": "TestSha3/SHA3_384", - "package": "github.com/onflow/flow-go/crypto/hash", + "package": "github.com/onflow/crypto/hash", "runs": 25, "passed": 25, "failed": 0, @@ -516,9 +516,9 @@ 0 ] }, - "github.com/onflow/flow-go/crypto/hash/TestSha3_4": { + "github.com/onflow/crypto/hash/TestSha3_4": { "test": "TestSha3_4", - "package": "github.com/onflow/flow-go/crypto/hash", + "package": "github.com/onflow/crypto/hash", "runs": 25, "passed": 25, "failed": 0, @@ -600,9 +600,9 @@ 0 ] }, - "github.com/onflow/flow-go/crypto/hash/TestSha3_2": { + "github.com/onflow/crypto/hash/TestSha3_2": { "test": "TestSha3_2", - "package": "github.com/onflow/flow-go/crypto/hash", + "package": "github.com/onflow/crypto/hash", "runs": 25, "passed": 25, "failed": 0, @@ -638,9 +638,9 @@ 0.22 ] }, - "github.com/onflow/flow-go/crypto/hash/TestSha3_3": { + "github.com/onflow/crypto/hash/TestSha3_3": { "test": "TestSha3_3", - "package": "github.com/onflow/flow-go/crypto/hash", + "package": "github.com/onflow/crypto/hash", "runs": 25, "passed": 25, "failed": 0, diff --git a/tools/test_monitor/testdata/summary3/test6-multi-failures-cap/expected-output/test6-multi-failures-cap.json b/tools/test_monitor/testdata/summary3/test6-multi-failures-cap/expected-output/test6-multi-failures-cap.json index 5db7f8f8015..2b1cf11a24e 100644 --- a/tools/test_monitor/testdata/summary3/test6-multi-failures-cap/expected-output/test6-multi-failures-cap.json +++ b/tools/test_monitor/testdata/summary3/test6-multi-failures-cap/expected-output/test6-multi-failures-cap.json @@ -3,7 +3,7 @@ "most_failures": [ { "test": "TestSanitySha3_256_6", - "package": "github.com/onflow/flow-go/crypto/hash", + "package": "github.com/onflow/crypto/hash", "runs": 24, "passed": 12, "failed": 21, @@ -40,7 +40,7 @@ }, { "test": "TestSanitySha3_256_5", - "package": "github.com/onflow/flow-go/crypto/hash", + "package": "github.com/onflow/crypto/hash", "runs": 24, "passed": 13, "failed": 20, @@ -77,7 +77,7 @@ }, { "test": "TestSanitySha3_256_4", - "package": "github.com/onflow/flow-go/crypto/hash", + "package": "github.com/onflow/crypto/hash", "runs": 24, "passed": 14, "failed": 19, @@ -114,7 +114,7 @@ }, { "test": "TestSanitySha3_256_7", - "package": "github.com/onflow/flow-go/crypto/hash", + "package": "github.com/onflow/crypto/hash", "runs": 24, "passed": 12, "failed": 17, @@ -151,7 +151,7 @@ }, { "test": "TestSanitySha3_256_2", - "package": "github.com/onflow/flow-go/crypto/hash", + "package": "github.com/onflow/crypto/hash", "runs": 24, "passed": 14, "failed": 11, @@ -191,7 +191,7 @@ "longest_running": [ { "test": "TestSha3", - "package": "github.com/onflow/flow-go/crypto/hash", + "package": "github.com/onflow/crypto/hash", "runs": 25, "passed": 25, "failed": 0, @@ -229,7 +229,7 @@ }, { "test": "TestSha3/SHA3_384", - "package": "github.com/onflow/flow-go/crypto/hash", + "package": "github.com/onflow/crypto/hash", "runs": 25, "passed": 25, "failed": 0, @@ -267,7 +267,7 @@ }, { "test": "TestSha3/SHA3_256", - "package": "github.com/onflow/flow-go/crypto/hash", + "package": "github.com/onflow/crypto/hash", "runs": 25, "passed": 25, "failed": 0, diff --git a/tools/test_monitor/testdata/summary3/test6-multi-failures-cap/input/test6-multi-failures-cap.json b/tools/test_monitor/testdata/summary3/test6-multi-failures-cap/input/test6-multi-failures-cap.json index 69d0aedbe34..fddf7ef19ad 100644 --- a/tools/test_monitor/testdata/summary3/test6-multi-failures-cap/input/test6-multi-failures-cap.json +++ b/tools/test_monitor/testdata/summary3/test6-multi-failures-cap/input/test6-multi-failures-cap.json @@ -1,8 +1,8 @@ { "tests": { - "github.com/onflow/flow-go/crypto/hash/TestSanitySha2_256": { + "github.com/onflow/crypto/hash/TestSanitySha2_256": { "test": "TestSanitySha2_256", - "package": "github.com/onflow/flow-go/crypto/hash", + "package": "github.com/onflow/crypto/hash", "runs": 25, "passed": 24, "failed": 1, @@ -38,9 +38,9 @@ 0 ] }, - "github.com/onflow/flow-go/crypto/hash/TestSanitySha3_384": { + "github.com/onflow/crypto/hash/TestSanitySha3_384": { "test": "TestSanitySha3_384", - "package": "github.com/onflow/flow-go/crypto/hash", + "package": "github.com/onflow/crypto/hash", "runs": 25, "passed": 25, "failed": 0, @@ -76,9 +76,9 @@ 0 ] }, - "github.com/onflow/flow-go/crypto/hash/TestSanitySha3_256": { + "github.com/onflow/crypto/hash/TestSanitySha3_256": { "test": "TestSanitySha3_256", - "package": "github.com/onflow/flow-go/crypto/hash", + "package": "github.com/onflow/crypto/hash", "runs": 24, "passed": 14, "failed": 10, @@ -113,9 +113,9 @@ 0 ] }, - "github.com/onflow/flow-go/crypto/hash/TestSanitySha2_384": { + "github.com/onflow/crypto/hash/TestSanitySha2_384": { "test": "TestSanitySha2_384", - "package": "github.com/onflow/flow-go/crypto/hash", + "package": "github.com/onflow/crypto/hash", "runs": 25, "passed": 25, "failed": 0, @@ -151,9 +151,9 @@ 0 ] }, - "github.com/onflow/flow-go/crypto/hash/TestSanitySha3_256_2": { + "github.com/onflow/crypto/hash/TestSanitySha3_256_2": { "test": "TestSanitySha3_256_2", - "package": "github.com/onflow/flow-go/crypto/hash", + "package": "github.com/onflow/crypto/hash", "runs": 24, "passed": 14, "failed": 11, @@ -188,9 +188,9 @@ 0 ] }, - "github.com/onflow/flow-go/crypto/hash/TestSanityKmac128": { + "github.com/onflow/crypto/hash/TestSanityKmac128": { "test": "TestSanityKmac128", - "package": "github.com/onflow/flow-go/crypto/hash", + "package": "github.com/onflow/crypto/hash", "runs": 25, "passed": 25, "failed": 0, @@ -226,9 +226,9 @@ 0 ] }, - "github.com/onflow/flow-go/crypto/hash/TestHashersAPI": { + "github.com/onflow/crypto/hash/TestHashersAPI": { "test": "TestHashersAPI", - "package": "github.com/onflow/flow-go/crypto/hash", + "package": "github.com/onflow/crypto/hash", "runs": 24, "passed": 24, "failed": 0, @@ -263,9 +263,9 @@ 0 ] }, - "github.com/onflow/flow-go/crypto/hash/TestSha3": { + "github.com/onflow/crypto/hash/TestSha3": { "test": "TestSha3", - "package": "github.com/onflow/flow-go/crypto/hash", + "package": "github.com/onflow/crypto/hash", "runs": 25, "passed": 25, "failed": 0, @@ -301,9 +301,9 @@ 0.22 ] }, - "github.com/onflow/flow-go/crypto/hash/TestSanitySha3_256_3": { + "github.com/onflow/crypto/hash/TestSanitySha3_256_3": { "test": "TestSanitySha3_256_3", - "package": "github.com/onflow/flow-go/crypto/hash", + "package": "github.com/onflow/crypto/hash", "runs": 24, "passed": 14, "failed": 9, @@ -338,9 +338,9 @@ 0 ] }, - "github.com/onflow/flow-go/crypto/hash/TestSha3/SHA3_256": { + "github.com/onflow/crypto/hash/TestSha3/SHA3_256": { "test": "TestSha3/SHA3_256", - "package": "github.com/onflow/flow-go/crypto/hash", + "package": "github.com/onflow/crypto/hash", "runs": 25, "passed": 25, "failed": 0, @@ -376,9 +376,9 @@ 0.1 ] }, - "github.com/onflow/flow-go/crypto/hash/TestSanitySha3_256_4": { + "github.com/onflow/crypto/hash/TestSanitySha3_256_4": { "test": "TestSanitySha3_256_4", - "package": "github.com/onflow/flow-go/crypto/hash", + "package": "github.com/onflow/crypto/hash", "runs": 24, "passed": 14, "failed": 19, @@ -413,9 +413,9 @@ 0 ] }, - "github.com/onflow/flow-go/crypto/hash/TestSanitySha3_256_5": { + "github.com/onflow/crypto/hash/TestSanitySha3_256_5": { "test": "TestSanitySha3_256_5", - "package": "github.com/onflow/flow-go/crypto/hash", + "package": "github.com/onflow/crypto/hash", "runs": 24, "passed": 13, "failed": 20, @@ -450,9 +450,9 @@ 0 ] }, - "github.com/onflow/flow-go/crypto/hash/TestSanitySha3_256_6": { + "github.com/onflow/crypto/hash/TestSanitySha3_256_6": { "test": "TestSanitySha3_256_6", - "package": "github.com/onflow/flow-go/crypto/hash", + "package": "github.com/onflow/crypto/hash", "runs": 24, "passed": 12, "failed": 21, @@ -487,9 +487,9 @@ 0 ] }, - "github.com/onflow/flow-go/crypto/hash/TestSanitySha3_256_7": { + "github.com/onflow/crypto/hash/TestSanitySha3_256_7": { "test": "TestSanitySha3_256_7", - "package": "github.com/onflow/flow-go/crypto/hash", + "package": "github.com/onflow/crypto/hash", "runs": 24, "passed": 12, "failed": 17, @@ -524,9 +524,9 @@ 0 ] }, - "github.com/onflow/flow-go/crypto/hash/TestSha3/SHA3_384": { + "github.com/onflow/crypto/hash/TestSha3/SHA3_384": { "test": "TestSha3/SHA3_384", - "package": "github.com/onflow/flow-go/crypto/hash", + "package": "github.com/onflow/crypto/hash", "runs": 25, "passed": 25, "failed": 0, diff --git a/tools/test_monitor/testdata/summary3/test7-multi-durations-cap/expected-output/test7-multi-durations-cap.json b/tools/test_monitor/testdata/summary3/test7-multi-durations-cap/expected-output/test7-multi-durations-cap.json index 00fffd33996..78832c110ff 100644 --- a/tools/test_monitor/testdata/summary3/test7-multi-durations-cap/expected-output/test7-multi-durations-cap.json +++ b/tools/test_monitor/testdata/summary3/test7-multi-durations-cap/expected-output/test7-multi-durations-cap.json @@ -18,7 +18,7 @@ "most_failures": [ { "test": "TestSanitySha2_256", - "package": "github.com/onflow/flow-go/crypto/hash", + "package": "github.com/onflow/crypto/hash", "runs": 24, "passed": 14, "failed": 10, @@ -55,7 +55,7 @@ }, { "test": "TestSanitySha3_256", - "package": "github.com/onflow/flow-go/crypto/hash", + "package": "github.com/onflow/crypto/hash", "runs": 25, "passed": 24, "failed": 1, @@ -96,7 +96,7 @@ "longest_running": [ { "test": "TestSha3_8", - "package": "github.com/onflow/flow-go/crypto/hash", + "package": "github.com/onflow/crypto/hash", "runs": 25, "passed": 25, "failed": 0, @@ -134,7 +134,7 @@ }, { "test": "TestSha3_9", - "package": "github.com/onflow/flow-go/crypto/hash", + "package": "github.com/onflow/crypto/hash", "runs": 25, "passed": 25, "failed": 0, @@ -172,7 +172,7 @@ }, { "test": "TestSha3_10", - "package": "github.com/onflow/flow-go/crypto/hash", + "package": "github.com/onflow/crypto/hash", "runs": 25, "passed": 25, "failed": 0, @@ -210,7 +210,7 @@ }, { "test": "TestSha3_7", - "package": "github.com/onflow/flow-go/crypto/hash", + "package": "github.com/onflow/crypto/hash", "runs": 25, "passed": 25, "failed": 0, @@ -248,7 +248,7 @@ }, { "test": "TestSha3_5", - "package": "github.com/onflow/flow-go/crypto/hash", + "package": "github.com/onflow/crypto/hash", "runs": 25, "passed": 25, "failed": 0, diff --git a/tools/test_monitor/testdata/summary3/test7-multi-durations-cap/input/test7-multi-durations-cap.json b/tools/test_monitor/testdata/summary3/test7-multi-durations-cap/input/test7-multi-durations-cap.json index 0763288eadf..5e3ee5f7c51 100644 --- a/tools/test_monitor/testdata/summary3/test7-multi-durations-cap/input/test7-multi-durations-cap.json +++ b/tools/test_monitor/testdata/summary3/test7-multi-durations-cap/input/test7-multi-durations-cap.json @@ -1,8 +1,8 @@ { "tests": { - "github.com/onflow/flow-go/crypto/hash/TestSanitySha3_256": { + "github.com/onflow/crypto/hash/TestSanitySha3_256": { "test": "TestSanitySha3_256", - "package": "github.com/onflow/flow-go/crypto/hash", + "package": "github.com/onflow/crypto/hash", "runs": 25, "passed": 24, "failed": 1, @@ -38,9 +38,9 @@ 0 ] }, - "github.com/onflow/flow-go/crypto/hash/TestSanitySha3_384": { + "github.com/onflow/crypto/hash/TestSanitySha3_384": { "test": "TestSanitySha3_384", - "package": "github.com/onflow/flow-go/crypto/hash", + "package": "github.com/onflow/crypto/hash", "runs": 25, "passed": 25, "failed": 0, @@ -76,9 +76,9 @@ 0 ] }, - "github.com/onflow/flow-go/crypto/hash/TestSanitySha2_256": { + "github.com/onflow/crypto/hash/TestSanitySha2_256": { "test": "TestSanitySha2_256", - "package": "github.com/onflow/flow-go/crypto/hash", + "package": "github.com/onflow/crypto/hash", "runs": 24, "passed": 14, "failed": 10, @@ -113,9 +113,9 @@ 0 ] }, - "github.com/onflow/flow-go/crypto/hash/TestSanitySha2_384": { + "github.com/onflow/crypto/hash/TestSanitySha2_384": { "test": "TestSanitySha2_384", - "package": "github.com/onflow/flow-go/crypto/hash", + "package": "github.com/onflow/crypto/hash", "runs": 25, "passed": 25, "failed": 0, @@ -151,9 +151,9 @@ 0 ] }, - "github.com/onflow/flow-go/crypto/hash/TestSanityKmac128": { + "github.com/onflow/crypto/hash/TestSanityKmac128": { "test": "TestSanityKmac128", - "package": "github.com/onflow/flow-go/crypto/hash", + "package": "github.com/onflow/crypto/hash", "runs": 25, "passed": 25, "failed": 0, @@ -189,9 +189,9 @@ 0 ] }, - "github.com/onflow/flow-go/crypto/hash/TestHashersAPI": { + "github.com/onflow/crypto/hash/TestHashersAPI": { "test": "TestHashersAPI", - "package": "github.com/onflow/flow-go/crypto/hash", + "package": "github.com/onflow/crypto/hash", "runs": 24, "passed": 24, "failed": 0, @@ -226,9 +226,9 @@ 0 ] }, - "github.com/onflow/flow-go/crypto/hash/TestSha3": { + "github.com/onflow/crypto/hash/TestSha3": { "test": "TestSha3", - "package": "github.com/onflow/flow-go/crypto/hash", + "package": "github.com/onflow/crypto/hash", "runs": 25, "passed": 25, "failed": 0, @@ -264,9 +264,9 @@ 0.22 ] }, - "github.com/onflow/flow-go/crypto/hash/TestSha3/SHA3_256": { + "github.com/onflow/crypto/hash/TestSha3/SHA3_256": { "test": "TestSha3/SHA3_256", - "package": "github.com/onflow/flow-go/crypto/hash", + "package": "github.com/onflow/crypto/hash", "runs": 25, "passed": 25, "failed": 0, @@ -302,9 +302,9 @@ 0.1 ] }, - "github.com/onflow/flow-go/crypto/hash/TestSha3/SHA3_384": { + "github.com/onflow/crypto/hash/TestSha3/SHA3_384": { "test": "TestSha3/SHA3_384", - "package": "github.com/onflow/flow-go/crypto/hash", + "package": "github.com/onflow/crypto/hash", "runs": 25, "passed": 25, "failed": 0, @@ -516,9 +516,9 @@ 0 ] }, - "github.com/onflow/flow-go/crypto/hash/TestSha3_4": { + "github.com/onflow/crypto/hash/TestSha3_4": { "test": "TestSha3_4", - "package": "github.com/onflow/flow-go/crypto/hash", + "package": "github.com/onflow/crypto/hash", "runs": 25, "passed": 25, "failed": 0, @@ -600,9 +600,9 @@ 0 ] }, - "github.com/onflow/flow-go/crypto/hash/TestSha3_2": { + "github.com/onflow/crypto/hash/TestSha3_2": { "test": "TestSha3_2", - "package": "github.com/onflow/flow-go/crypto/hash", + "package": "github.com/onflow/crypto/hash", "runs": 25, "passed": 25, "failed": 0, @@ -638,9 +638,9 @@ 0.22 ] }, - "github.com/onflow/flow-go/crypto/hash/TestSha3_3": { + "github.com/onflow/crypto/hash/TestSha3_3": { "test": "TestSha3_3", - "package": "github.com/onflow/flow-go/crypto/hash", + "package": "github.com/onflow/crypto/hash", "runs": 25, "passed": 25, "failed": 0, @@ -676,9 +676,9 @@ 0.22 ] }, - "github.com/onflow/flow-go/crypto/hash/TestSha3_5": { + "github.com/onflow/crypto/hash/TestSha3_5": { "test": "TestSha3_5", - "package": "github.com/onflow/flow-go/crypto/hash", + "package": "github.com/onflow/crypto/hash", "runs": 25, "passed": 25, "failed": 0, @@ -714,9 +714,9 @@ 0.22 ] }, - "github.com/onflow/flow-go/crypto/hash/TestSha3_6": { + "github.com/onflow/crypto/hash/TestSha3_6": { "test": "TestSha3_6", - "package": "github.com/onflow/flow-go/crypto/hash", + "package": "github.com/onflow/crypto/hash", "runs": 25, "passed": 25, "failed": 0, @@ -752,9 +752,9 @@ 0.22 ] }, - "github.com/onflow/flow-go/crypto/hash/TestSha3_7": { + "github.com/onflow/crypto/hash/TestSha3_7": { "test": "TestSha3_7", - "package": "github.com/onflow/flow-go/crypto/hash", + "package": "github.com/onflow/crypto/hash", "runs": 25, "passed": 25, "failed": 0, @@ -790,9 +790,9 @@ 0.22 ] }, - "github.com/onflow/flow-go/crypto/hash/TestSha3_8": { + "github.com/onflow/crypto/hash/TestSha3_8": { "test": "TestSha3_8", - "package": "github.com/onflow/flow-go/crypto/hash", + "package": "github.com/onflow/crypto/hash", "runs": 25, "passed": 25, "failed": 0, @@ -828,9 +828,9 @@ 0.22 ] }, - "github.com/onflow/flow-go/crypto/hash/TestSha3_9": { + "github.com/onflow/crypto/hash/TestSha3_9": { "test": "TestSha3_9", - "package": "github.com/onflow/flow-go/crypto/hash", + "package": "github.com/onflow/crypto/hash", "runs": 25, "passed": 25, "failed": 0, @@ -866,9 +866,9 @@ 0.22 ] }, - "github.com/onflow/flow-go/crypto/hash/TestSha3_10": { + "github.com/onflow/crypto/hash/TestSha3_10": { "test": "TestSha3_10", - "package": "github.com/onflow/flow-go/crypto/hash", + "package": "github.com/onflow/crypto/hash", "runs": 25, "passed": 25, "failed": 0, @@ -904,9 +904,9 @@ 0.22 ] }, - "github.com/onflow/flow-go/crypto/hash/TestSha3_11": { + "github.com/onflow/crypto/hash/TestSha3_11": { "test": "TestSha3_11", - "package": "github.com/onflow/flow-go/crypto/hash", + "package": "github.com/onflow/crypto/hash", "runs": 25, "passed": 25, "failed": 0, @@ -942,9 +942,9 @@ 0.22 ] }, - "github.com/onflow/flow-go/crypto/hash/TestSha3_12": { + "github.com/onflow/crypto/hash/TestSha3_12": { "test": "TestSha3_12", - "package": "github.com/onflow/flow-go/crypto/hash", + "package": "github.com/onflow/crypto/hash", "runs": 25, "passed": 25, "failed": 0, diff --git a/utils/binstat/binstat.go b/utils/binstat/binstat.go index a77e5800035..e6e26ee2afa 100644 --- a/utils/binstat/binstat.go +++ b/utils/binstat/binstat.go @@ -63,8 +63,12 @@ uint64_t gettid() { return syscall(SYS_gettid); } #include <stdint.h> #include <pthread.h> uint64_t gettid() { uint64_t tid; pthread_threadid_np(NULL, &tid); return tid; } +#elif defined(_WIN32) +#include <stdint.h> +#include <windows.h> +uint64_t gettid() { return (uint64_t)GetCurrentThreadId(); } #else -# error "Unknown platform; __linux__ or __APPLE__ supported" +# error "Unknown platform; __linux__ or __APPLE__ or _WIN32 expected" #endif */ import "C" @@ -238,7 +242,7 @@ func init() { } t2 := runtimeNanoAsTimeDuration() - if t2 <= t1 { + if t2 < t1 { panic(fmt.Sprintf("ERROR: BINSTAT: INTERNAL: t1=%d but t2=%d\n", t1, t2)) } } diff --git a/utils/binstat/binstat_external_test.go b/utils/binstat/binstat_external_test.go index 9ffa7b23065..10f8b911ff9 100644 --- a/utils/binstat/binstat_external_test.go +++ b/utils/binstat/binstat_external_test.go @@ -28,7 +28,7 @@ import ( * 5. Strip "time" field from JSON log line output for shorter read, and * 6. Show the amount of code coverage from the tests. * - * pushd utils/binstat ; go fmt ./*.go ; golangci-lint run && go test -v -vv -coverprofile=coverage.txt -covermode=atomic --tags relic ./... | perl -lane 's~\\n~\n~g; s~"time".*?,~~g; print;' ; go tool cover -func=coverage.txt ; popd + * pushd utils/binstat ; go fmt ./*.go ; golangci-lint run && go test -v -vv -coverprofile=coverage.txt -covermode=atomic ./... | perl -lane 's~\\n~\n~g; s~"time".*?,~~g; print;' ; go tool cover -func=coverage.txt ; popd */ /* diff --git a/utils/concurrentmap/concurrent_map.go b/utils/concurrentmap/concurrent_map.go new file mode 100644 index 00000000000..148c3741428 --- /dev/null +++ b/utils/concurrentmap/concurrent_map.go @@ -0,0 +1,72 @@ +package concurrentmap + +import "sync" + +// Map is a thread-safe map. +type Map[K comparable, V any] struct { + mu sync.RWMutex + m map[K]V +} + +// New returns a new Map with the given types +func New[K comparable, V any]() *Map[K, V] { + return &Map[K, V]{ + m: make(map[K]V), + } +} + +// Add adds a key-value pair to the map +func (p *Map[K, V]) Add(key K, value V) { + p.mu.Lock() + defer p.mu.Unlock() + p.m[key] = value +} + +// Remove removes a key-value pair from the map +func (p *Map[K, V]) Remove(key K) { + p.mu.Lock() + defer p.mu.Unlock() + delete(p.m, key) +} + +// Has returns true if the map contains the given key +func (p *Map[K, V]) Has(key K) bool { + p.mu.RLock() + defer p.mu.RUnlock() + _, ok := p.m[key] + return ok +} + +// Get returns the value for the given key and a boolean indicating if the key was found +func (p *Map[K, V]) Get(key K) (V, bool) { + p.mu.RLock() + defer p.mu.RUnlock() + value, ok := p.m[key] + return value, ok +} + +// ForEach iterates over the map and calls the given function for each key-value pair. +// If the function returns an error, the iteration is stopped and the error is returned. +func (p *Map[K, V]) ForEach(fn func(k K, v V) error) error { + p.mu.RLock() + defer p.mu.RUnlock() + for k, v := range p.m { + if err := fn(k, v); err != nil { + return err + } + } + return nil +} + +// Size returns the size of the map. +func (p *Map[K, V]) Size() int { + p.mu.RLock() + defer p.mu.RUnlock() + return len(p.m) +} + +func (p *Map[K, V]) Clear() { + p.mu.Lock() + defer p.mu.Unlock() + clear(p.m) +} diff --git a/utils/debug/README.md b/utils/debug/README.md index c7591241d10..6bdc182bba0 100644 --- a/utils/debug/README.md +++ b/utils/debug/README.md @@ -1,37 +1,49 @@ - ## Remote Debugger -Remote debugger provides utils needed to run transactions and scripts against live network data. It uses GRPC endpoints on an execution nodes to fetch registers and block info when running a transaction. This is mostly provided for debugging purpose and should not be used for production level operations. -If you use the caching method you can run the transaction once and use the cached values to run transaction in debugging mode. +The remote debugger allows running transactions and scripts against existing network data. + +It uses APIs to fetch registers and block info, for example the register value API of the execution nodes, +or the execution data API of the access nodes. + +This is mostly provided for debugging purpose and should not be used for production level operations. + +Optionally, the debugger allows the fetched registers that are necessary for the execution to be written to +and read from a cache. + +Use the `ExecutionNodeStorageSnapshot` to fetch the registers and block info from the execution node (live/recent data). -### sample code +Use the `ExecutionDataStorageSnapshot` to fetch the execution data from the access node (recent/historic data). + +### Sample Code ```GO package debug_test import ( + "context" "fmt" "os" "testing" + "github.com/onflow/flow/protobuf/go/flow/access" + "github.com/onflow/flow/protobuf/go/flow/execution" + "github.com/onflow/flow/protobuf/go/flow/executiondata" "github.com/rs/zerolog" "github.com/stretchr/testify/require" + "google.golang.org/grpc" + "google.golang.org/grpc/credentials/insecure" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/utils/debug" ) -func TestDebugger_RunTransaction(t *testing.T) { - - grpcAddress := "localhost:3600" - chain := flow.Emulator.Chain() - debugger := debug.NewRemoteDebugger(grpcAddress, chain, zerolog.New(os.Stdout).With().Logger()) +func getTransaction(chain flow.Chain) *flow.TransactionBody { const scriptTemplate = ` import FlowServiceAccount from 0x%s transaction() { - prepare(signer: AuthAccount) { + prepare(signer: &Account) { log(signer.balance) } } @@ -39,38 +51,100 @@ func TestDebugger_RunTransaction(t *testing.T) { script := []byte(fmt.Sprintf(scriptTemplate, chain.ServiceAddress())) txBody := flow.NewTransactionBody(). - SetGasLimit(9999). - SetScript([]byte(script)). + SetComputeLimit(9999). + SetScript(script). SetPayer(chain.ServiceAddress()). SetProposalKey(chain.ServiceAddress(), 0, 0) txBody.Authorizers = []flow.Address{chain.ServiceAddress()} - // Run at the latest blockID - txErr, err := debugger.RunTransaction(txBody) - require.NoError(t, txErr) + return txBody +} + +func TestDebugger_RunTransactionAgainstExecutionNodeAtBlockID(t *testing.T) { + + host := "execution-001.mainnet26.nodes.onflow.org:9000" + + conn, err := grpc.NewClient( + host, + grpc.WithTransportCredentials(insecure.NewCredentials()), + ) require.NoError(t, err) + defer conn.Close() + + executionClient := execution.NewExecutionAPIClient(conn) - // Run with blockID (use the file cache) - blockId, err := flow.HexStringToIdentifier("3a8281395e2c1aaa3b8643d148594b19e2acb477611a8e0cab8a55c46c40b563") + blockID, err := flow.HexStringToIdentifier("e68a9a1fe849d1be80e4c5e414f53e3b59a170b88785e0b22be077ae9c3bbd29") require.NoError(t, err) - txErr, err = debugger.RunTransactionAtBlockID(txBody, blockId, "") + + header, err := debug.GetExecutionAPIBlockHeader(executionClient, context.Background(), blockID) + + snapshot, err := debug.NewExecutionNodeStorageSnapshot(executionClient, nil, blockID) + require.NoError(t, err) + + defer snapshot.Close() + + chain := flow.Mainnet.Chain() + logger := zerolog.New(os.Stdout).With().Logger() + debugger := debug.NewRemoteDebugger(chain, logger) + + txBody := getTransaction(chain) + + _, txErr, err := debugger.RunTransaction(txBody, snapshot, header) require.NoError(t, txErr) require.NoError(t, err) +} + +func TestDebugger_RunTransactionAgainstAccessNodeAtBlockIDWithFileCache(t *testing.T) { + + host := "access.mainnet.nodes.onflow.org:9000" + + conn, err := grpc.NewClient( + host, + grpc.WithTransportCredentials(insecure.NewCredentials()), + ) + require.NoError(t, err) + defer conn.Close() + + executionDataClient := executiondata.NewExecutionDataAPIClient(conn) + + var blockHeight uint64 = 100_000_000 + + blockID, err := flow.HexStringToIdentifier("e68a9a1fe849d1be80e4c5e414f53e3b59a170b88785e0b22be077ae9c3bbd29") + require.NoError(t, err) + + accessClient := access.NewAccessAPIClient(conn) + header, err := debug.GetAccessAPIBlockHeader( + accessClient, + context.Background(), + blockID, + ) + require.NoError(t, err) testCacheFile := "test.cache" defer os.Remove(testCacheFile) - // the first run would cache the results - txErr, err = debugger.RunTransactionAtBlockID(txBody, blockId, testCacheFile) + + cache := debug.NewFileRegisterCache(testCacheFile) + + snapshot, err := debug.NewExecutionDataStorageSnapshot(executionDataClient, cache, blockHeight) + require.NoError(t, err) + + defer snapshot.Close() + + chain := flow.Mainnet.Chain() + logger := zerolog.New(os.Stdout).With().Logger() + debugger := debug.NewRemoteDebugger(chain, logger) + + txBody := getTransaction(chain) + + // the first run will cache the results + _, txErr, err := debugger.RunTransaction(txBody, snapshot, header) require.NoError(t, txErr) require.NoError(t, err) - // second one should only use the cache - // make blockId invalid so if it endsup looking up by id it should fail - blockId = flow.Identifier{} - txErr, err = debugger.RunTransactionAtBlockID(txBody, blockId, testCacheFile) + // the second run should only use the cache. + _, txErr, err = debugger.RunTransaction(txBody, snapshot, header) require.NoError(t, txErr) require.NoError(t, err) } - -``` \ No newline at end of file +``` diff --git a/utils/debug/api.go b/utils/debug/api.go new file mode 100644 index 00000000000..9e6ea7bfca1 --- /dev/null +++ b/utils/debug/api.go @@ -0,0 +1,51 @@ +package debug + +import ( + "context" + + "github.com/onflow/flow/protobuf/go/flow/access" + "github.com/onflow/flow/protobuf/go/flow/execution" + + rpcConvert "github.com/onflow/flow-go/engine/common/rpc/convert" + "github.com/onflow/flow-go/model/flow" +) + +func GetExecutionAPIBlockHeader( + client execution.ExecutionAPIClient, + ctx context.Context, + blockID flow.Identifier, +) ( + *flow.Header, + error, +) { + req := &execution.GetBlockHeaderByIDRequest{ + Id: blockID[:], + } + + resp, err := client.GetBlockHeaderByID(ctx, req) + if err != nil { + return nil, err + } + + return rpcConvert.MessageToBlockHeader(resp.Block) +} + +func GetAccessAPIBlockHeader( + client access.AccessAPIClient, + ctx context.Context, + blockID flow.Identifier, +) ( + *flow.Header, + error, +) { + req := &access.GetBlockHeaderByIDRequest{ + Id: blockID[:], + } + + resp, err := client.GetBlockHeaderByID(ctx, req) + if err != nil { + return nil, err + } + + return rpcConvert.MessageToBlockHeader(resp.Block) +} diff --git a/utils/debug/registerCache.go b/utils/debug/registerCache.go index 086be61b250..29befd1a7fc 100644 --- a/utils/debug/registerCache.go +++ b/utils/debug/registerCache.go @@ -11,40 +11,44 @@ import ( "github.com/onflow/flow-go/model/flow" ) -type registerCache interface { +type RegisterCache interface { Get(owner, key string) (value []byte, found bool) Set(owner, key string, value []byte) Persist() error } -type memRegisterCache struct { +type InMemoryRegisterCache struct { data map[string]flow.RegisterValue } -func newMemRegisterCache() *memRegisterCache { - return &memRegisterCache{data: make(map[string]flow.RegisterValue)} +var _ RegisterCache = &InMemoryRegisterCache{} + +func NewInMemoryRegisterCache() *InMemoryRegisterCache { + return &InMemoryRegisterCache{data: make(map[string]flow.RegisterValue)} } -func (c *memRegisterCache) Get(owner, key string) ([]byte, bool) { +func (c *InMemoryRegisterCache) Get(owner, key string) ([]byte, bool) { v, found := c.data[owner+"~"+key] return v, found } -func (c *memRegisterCache) Set(owner, key string, value []byte) { +func (c *InMemoryRegisterCache) Set(owner, key string, value []byte) { c.data[owner+"~"+key] = value } -func (c *memRegisterCache) Persist() error { +func (c *InMemoryRegisterCache) Persist() error { // No-op return nil } -type fileRegisterCache struct { +type FileRegisterCache struct { filePath string data map[string]flow.RegisterEntry } -func newFileRegisterCache(filePath string) *fileRegisterCache { - cache := &fileRegisterCache{filePath: filePath} +var _ RegisterCache = &FileRegisterCache{} + +func NewFileRegisterCache(filePath string) *FileRegisterCache { + cache := &FileRegisterCache{filePath: filePath} data := make(map[string]flow.RegisterEntry) if _, err := os.Stat(filePath); err == nil { @@ -86,7 +90,7 @@ func newFileRegisterCache(filePath string) *fileRegisterCache { return cache } -func (f *fileRegisterCache) Get(owner, key string) ([]byte, bool) { +func (f *FileRegisterCache) Get(owner, key string) ([]byte, bool) { v, found := f.data[owner+"~"+key] if found { return v.Value, found @@ -94,18 +98,18 @@ func (f *fileRegisterCache) Get(owner, key string) ([]byte, bool) { return nil, found } -func (f *fileRegisterCache) Set(owner, key string, value []byte) { +func (f *FileRegisterCache) Set(owner, key string, value []byte) { valueCopy := make([]byte, len(value)) copy(valueCopy, value) - fmt.Println(hex.EncodeToString([]byte(owner)), hex.EncodeToString([]byte(key)), len(value)) + ownerAddr := flow.BytesToAddress([]byte(owner)) + fmt.Println(ownerAddr.Hex(), hex.EncodeToString([]byte(key)), len(value)) f.data[owner+"~"+key] = flow.RegisterEntry{ - Key: flow.NewRegisterID(hex.EncodeToString([]byte(owner)), - hex.EncodeToString([]byte(key))), + Key: flow.NewRegisterID(ownerAddr, hex.EncodeToString([]byte(key))), Value: flow.RegisterValue(valueCopy), } } -func (c *fileRegisterCache) Persist() error { +func (c *FileRegisterCache) Persist() error { f, err := os.OpenFile(c.filePath, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0755) if err != nil { return err diff --git a/utils/debug/remoteDebugger.go b/utils/debug/remoteDebugger.go index 86c8292588a..71dfddc2972 100644 --- a/utils/debug/remoteDebugger.go +++ b/utils/debug/remoteDebugger.go @@ -5,132 +5,96 @@ import ( "github.com/rs/zerolog" "github.com/onflow/flow-go/fvm" + "github.com/onflow/flow-go/fvm/storage/snapshot" "github.com/onflow/flow-go/model/flow" ) type RemoteDebugger struct { - vm fvm.VM - ctx fvm.Context - grpcAddress string + vm fvm.VM + ctx fvm.Context } -// Warning : make sure you use the proper flow-go version, same version as the network you are collecting registers -// from, otherwise the execution might differ from the way runs on the network -func NewRemoteDebugger(grpcAddress string, +// NewRemoteDebugger creates a new remote debugger. +// NOTE: Make sure to use the same version of flow-go as the network +// you are collecting registers from, otherwise the execution might differ +// from the way it runs on the network +func NewRemoteDebugger( chain flow.Chain, - logger zerolog.Logger) *RemoteDebugger { + logger zerolog.Logger, + options ...fvm.Option, +) *RemoteDebugger { vm := fvm.NewVirtualMachine() // no signature processor here // TODO Maybe we add fee-deduction step as well + ctx := fvm.NewContext( - fvm.WithLogger(logger), - fvm.WithChain(chain), - fvm.WithAuthorizationChecksEnabled(false), + append( + []fvm.Option{ + fvm.WithLogger(logger), + fvm.WithChain(chain), + fvm.WithAuthorizationChecksEnabled(false), + fvm.WithEVMEnabled(true), + }, + options..., + )..., ) return &RemoteDebugger{ - ctx: ctx, - vm: vm, - grpcAddress: grpcAddress, + ctx: ctx, + vm: vm, } } -// RunTransaction runs the transaction given the latest sealed block data +// RunTransaction runs the transaction using the given storage snapshot. func (d *RemoteDebugger) RunTransaction( txBody *flow.TransactionBody, + snapshot StorageSnapshot, + blockHeader *flow.Header, ) ( + resultSnapshot *snapshot.ExecutionSnapshot, txErr error, processError error, ) { - snapshot := NewRemoteStorageSnapshot(d.grpcAddress) - defer snapshot.Close() - blockCtx := fvm.NewContextFromParent( d.ctx, - fvm.WithBlockHeader(d.ctx.BlockHeader)) - tx := fvm.Transaction(txBody, 0) - _, output, err := d.vm.Run(blockCtx, tx, snapshot) - if err != nil { - return nil, err - } - return output.Err, nil -} - -// RunTransaction runs the transaction and tries to collect the registers at -// the given blockID note that it would be very likely that block is far in the -// past and you can't find the trie to read the registers from. -// if regCachePath is empty, the register values won't be cached -func (d *RemoteDebugger) RunTransactionAtBlockID( - txBody *flow.TransactionBody, - blockID flow.Identifier, - regCachePath string, -) ( - txErr error, - processError error, -) { - snapshot := NewRemoteStorageSnapshot(d.grpcAddress, WithBlockID(blockID)) - defer snapshot.Close() + fvm.WithBlockHeader(blockHeader)) - blockCtx := fvm.NewContextFromParent( - d.ctx, - fvm.WithBlockHeader(d.ctx.BlockHeader)) - if len(regCachePath) > 0 { - snapshot.Cache = newFileRegisterCache(regCachePath) - } tx := fvm.Transaction(txBody, 0) - _, output, err := d.vm.Run(blockCtx, tx, snapshot) - if err != nil { - return nil, err - } - err = snapshot.Cache.Persist() + + var ( + output fvm.ProcedureOutput + err error + ) + resultSnapshot, output, err = d.vm.Run(blockCtx, tx, snapshot) if err != nil { - return nil, err + return resultSnapshot, nil, err } - return output.Err, nil + return resultSnapshot, output.Err, nil } +// RunScript runs the script using the given storage snapshot. func (d *RemoteDebugger) RunScript( code []byte, arguments [][]byte, + snapshot StorageSnapshot, + blockHeader *flow.Header, ) ( value cadence.Value, scriptError error, processError error, ) { - snapshot := NewRemoteStorageSnapshot(d.grpcAddress) - defer snapshot.Close() - scriptCtx := fvm.NewContextFromParent( d.ctx, - fvm.WithBlockHeader(d.ctx.BlockHeader)) - script := fvm.Script(code).WithArguments(arguments...) - _, output, err := d.vm.Run(scriptCtx, script, snapshot) - if err != nil { - return nil, nil, err - } - return output.Value, output.Err, nil -} - -func (d *RemoteDebugger) RunScriptAtBlockID( - code []byte, - arguments [][]byte, - blockID flow.Identifier, -) ( - value cadence.Value, - scriptError error, - processError error, -) { - snapshot := NewRemoteStorageSnapshot(d.grpcAddress, WithBlockID(blockID)) - defer snapshot.Close() + fvm.WithBlockHeader(blockHeader), + ) - scriptCtx := fvm.NewContextFromParent( - d.ctx, - fvm.WithBlockHeader(d.ctx.BlockHeader)) script := fvm.Script(code).WithArguments(arguments...) + _, output, err := d.vm.Run(scriptCtx, script, snapshot) if err != nil { return nil, nil, err } + return output.Value, output.Err, nil } diff --git a/utils/debug/remoteView.go b/utils/debug/remoteView.go index 5951a555ac0..fba6dccf46a 100644 --- a/utils/debug/remoteView.go +++ b/utils/debug/remoteView.go @@ -3,164 +3,154 @@ package debug import ( "context" + "github.com/onflow/flow/protobuf/go/flow/entities" "github.com/onflow/flow/protobuf/go/flow/execution" - "google.golang.org/grpc" - "google.golang.org/grpc/credentials/insecure" + "github.com/onflow/flow/protobuf/go/flow/executiondata" + "github.com/onflow/flow-go/fvm/storage/snapshot" "github.com/onflow/flow-go/model/flow" ) -// RemoteStorageSnapshot provides a storage snapshot connected to a live -// execution node to read the registers. -type RemoteStorageSnapshot struct { - Cache registerCache - BlockID []byte - BlockHeader *flow.Header - connection *grpc.ClientConn - executionAPIclient execution.ExecutionAPIClient +type StorageSnapshot interface { + snapshot.StorageSnapshot } -// A RemoteStorageSnapshotOption sets a configuration parameter for the remote -// snapshot -type RemoteStorageSnapshotOption func(*RemoteStorageSnapshot) *RemoteStorageSnapshot - -// WithFileCache sets the output path to store -// register values so can be fetched from a file cache -// it loads the values from the cache upon object construction -func WithCache(cache registerCache) RemoteStorageSnapshotOption { - return func(snapshot *RemoteStorageSnapshot) *RemoteStorageSnapshot { - snapshot.Cache = cache - return snapshot - } -} - -// WithBlockID sets the blockID for the remote snapshot, if not used -// remote snapshot will use the latest sealed block -func WithBlockID(blockID flow.Identifier) RemoteStorageSnapshotOption { - return func(snapshot *RemoteStorageSnapshot) *RemoteStorageSnapshot { - snapshot.BlockID = blockID[:] - var err error - snapshot.BlockHeader, err = snapshot.getBlockHeader(blockID) - if err != nil { - panic(err) - } - return snapshot - } +// ExecutionNodeStorageSnapshot provides a storage snapshot connected +// to an execution node to read the registers. +type ExecutionNodeStorageSnapshot struct { + Client execution.ExecutionAPIClient + Cache RegisterCache + BlockID flow.Identifier } -func NewRemoteStorageSnapshot( - grpcAddress string, - opts ...RemoteStorageSnapshotOption, -) *RemoteStorageSnapshot { - conn, err := grpc.Dial( - grpcAddress, - grpc.WithTransportCredentials(insecure.NewCredentials())) - if err != nil { - panic(err) - } - - snapshot := &RemoteStorageSnapshot{ - connection: conn, - executionAPIclient: execution.NewExecutionAPIClient(conn), - Cache: newMemRegisterCache(), - } +var _ StorageSnapshot = &ExecutionNodeStorageSnapshot{} - snapshot.BlockID, snapshot.BlockHeader, err = snapshot.getLatestBlockID() - if err != nil { - panic(err) +func NewExecutionNodeStorageSnapshot( + client execution.ExecutionAPIClient, + cache RegisterCache, + blockID flow.Identifier, +) ( + *ExecutionNodeStorageSnapshot, + error, +) { + if cache == nil { + cache = NewInMemoryRegisterCache() } - for _, applyOption := range opts { - snapshot = applyOption(snapshot) - } - return snapshot + return &ExecutionNodeStorageSnapshot{ + Client: client, + Cache: cache, + BlockID: blockID, + }, nil } -func (snapshot *RemoteStorageSnapshot) Close() error { - return snapshot.connection.Close() +func (snapshot *ExecutionNodeStorageSnapshot) Close() error { + return snapshot.Cache.Persist() } -func (snapshot *RemoteStorageSnapshot) getLatestBlockID() ( - []byte, - *flow.Header, +func (snapshot *ExecutionNodeStorageSnapshot) Get( + id flow.RegisterID, +) ( + flow.RegisterValue, error, ) { - req := &execution.GetLatestBlockHeaderRequest{ - IsSealed: true, + // first, check the cache + value, found := snapshot.Cache.Get(id.Owner, id.Key) + if found { + return value, nil + } + + // if the register is not cached, fetch it from the execution node + req := &execution.GetRegisterAtBlockIDRequest{ + BlockId: snapshot.BlockID[:], + RegisterOwner: []byte(id.Owner), + RegisterKey: []byte(id.Key), } - resp, err := snapshot.executionAPIclient.GetLatestBlockHeader( + // TODO use a proper context for timeouts + resp, err := snapshot.Client.GetRegisterAtBlockID( context.Background(), - req) + req, + ) if err != nil { - return nil, nil, err + return nil, err } - // TODO set chainID and parentID - header := &flow.Header{ - Height: resp.Block.Height, - Timestamp: resp.Block.Timestamp.AsTime(), - } + // append register to the cache + snapshot.Cache.Set(id.Owner, id.Key, resp.Value) - return resp.Block.Id, header, nil + return resp.Value, nil } -func (snapshot *RemoteStorageSnapshot) getBlockHeader( - blockID flow.Identifier, +// ExecutionDataStorageSnapshot provides a storage snapshot connected +// to an access node to read the registers (via its execution data API). +type ExecutionDataStorageSnapshot struct { + Client executiondata.ExecutionDataAPIClient + Cache RegisterCache + BlockHeight uint64 +} + +var _ StorageSnapshot = &ExecutionDataStorageSnapshot{} + +func NewExecutionDataStorageSnapshot( + client executiondata.ExecutionDataAPIClient, + cache RegisterCache, + blockHeight uint64, ) ( - *flow.Header, + *ExecutionDataStorageSnapshot, error, ) { - req := &execution.GetBlockHeaderByIDRequest{ - Id: blockID[:], + if cache == nil { + cache = NewInMemoryRegisterCache() } - resp, err := snapshot.executionAPIclient.GetBlockHeaderByID( - context.Background(), - req) - if err != nil { - return nil, err - } - - // TODO set chainID and parentID - header := &flow.Header{ - Height: resp.Block.Height, - Timestamp: resp.Block.Timestamp.AsTime(), - } + return &ExecutionDataStorageSnapshot{ + Client: client, + Cache: cache, + BlockHeight: blockHeight, + }, nil +} - return header, nil +func (snapshot *ExecutionDataStorageSnapshot) Close() error { + return snapshot.Cache.Persist() } -func (snapshot *RemoteStorageSnapshot) Get( +func (snapshot *ExecutionDataStorageSnapshot) Get( id flow.RegisterID, ) ( flow.RegisterValue, error, ) { - // then check the read cache + // first, check the cache value, found := snapshot.Cache.Get(id.Owner, id.Key) if found { return value, nil } - // last use the grpc api the - req := &execution.GetRegisterAtBlockIDRequest{ - BlockId: []byte(snapshot.BlockID), - RegisterOwner: []byte(id.Owner), - RegisterKey: []byte(id.Key), + // if the register is not cached, fetch it from the execution data API + req := &executiondata.GetRegisterValuesRequest{ + BlockHeight: snapshot.BlockHeight, + RegisterIds: []*entities.RegisterID{ + { + Owner: []byte(id.Owner), + Key: []byte(id.Key), + }, + }, } // TODO use a proper context for timeouts - resp, err := snapshot.executionAPIclient.GetRegisterAtBlockID( + resp, err := snapshot.Client.GetRegisterValues( context.Background(), - req) + req, + ) if err != nil { return nil, err } - snapshot.Cache.Set(id.Owner, id.Key, resp.Value) + value = resp.Values[0] - // append value to the file cache + // append register to the cache + snapshot.Cache.Set(id.Owner, id.Key, value) - return resp.Value, nil + return value, nil } diff --git a/utils/dsl/dsl.go b/utils/dsl/dsl.go index ccf9c648be0..928d0e7ad9c 100644 --- a/utils/dsl/dsl.go +++ b/utils/dsl/dsl.go @@ -13,12 +13,15 @@ type CadenceCode interface { } type Transaction struct { - Import Import + Imports Imports Content CadenceCode } func (t Transaction) ToCadence() string { - return fmt.Sprintf("%s \n transaction { %s }", t.Import.ToCadence(), t.Content.ToCadence()) + return fmt.Sprintf(` + %s + transaction { %s } + `, t.Imports.ToCadence(), t.Content.ToCadence()) } type Prepare struct { @@ -26,22 +29,25 @@ type Prepare struct { } func (p Prepare) ToCadence() string { - return fmt.Sprintf("prepare(signer: AuthAccount) { %s }", p.Content.ToCadence()) + return fmt.Sprintf("prepare(signer: auth(Storage, Capabilities, Contracts) &Account) { %s }", p.Content.ToCadence()) } type Contract struct { + Imports Imports Name string Members []CadenceCode } func (c Contract) ToCadence() string { - memberStrings := make([]string, len(c.Members)) for i, member := range c.Members { memberStrings[i] = member.ToCadence() } - return fmt.Sprintf("access(all) contract %s { %s }", c.Name, strings.Join(memberStrings, "\n")) + return fmt.Sprintf(` + %s + access(all) contract %s { %s } + `, c.Imports.ToCadence(), c.Name, strings.Join(memberStrings, "\n")) } type Resource struct { @@ -68,17 +74,35 @@ func (i Import) ToCadence() string { return "" } -type UpdateAccountCode struct { - Code string - Name string +type Imports []Import + +func (i Imports) ToCadence() string { + imports := "" + for _, imp := range i { + imports += imp.ToCadence() + } + return imports } -func (u UpdateAccountCode) ToCadence() string { +type SetAccountCode struct { + Code string + Name string + Update bool +} + +func (u SetAccountCode) ToCadence() string { bytes := []byte(u.Code) hexCode := hex.EncodeToString(bytes) + if u.Update { + return fmt.Sprintf(` + let code = "%s" + signer.contracts.update(name: "%s", code: code.decodeHex()) + `, hexCode, u.Name) + } + return fmt.Sprintf(` let code = "%s" signer.contracts.add(name: "%s", code: code.decodeHex()) @@ -92,7 +116,7 @@ type Main struct { } func (m Main) ToCadence() string { - return fmt.Sprintf("%s \npub fun main(): %s { %s }", m.Import.ToCadence(), m.ReturnType, m.Code) + return fmt.Sprintf("%s \naccess(all) fun main(): %s { %s }", m.Import.ToCadence(), m.ReturnType, m.Code) } type Code string diff --git a/utils/grpcutils/grpc.go b/utils/grpcutils/grpc.go index 3167b025dff..76d56b10b1b 100644 --- a/utils/grpcutils/grpc.go +++ b/utils/grpcutils/grpc.go @@ -8,14 +8,13 @@ import ( lcrypto "github.com/libp2p/go-libp2p/core/crypto" libp2ptls "github.com/libp2p/go-libp2p/p2p/security/tls" + "github.com/onflow/crypto" - "github.com/onflow/flow-go/crypto" "github.com/onflow/flow-go/network/p2p/keyutils" ) -// DefaultMaxMsgSize use 20MB as the default message size limit. -// grpc library default is 4MB -const DefaultMaxMsgSize = 1024 * 1024 * 20 +// NoCompressor use when no specific compressor name provided, which effectively means no compression. +const NoCompressor = "" // CertificateConfig is used to configure an Certificate type CertificateConfig struct { @@ -107,6 +106,7 @@ func DefaultClientTLSConfig(publicKey crypto.PublicKey) (*tls.Config, error) { config := &tls.Config{ MinVersion: tls.VersionTLS13, // This is not insecure here. We will verify the cert chain ourselves. + // nolint InsecureSkipVerify: true, ClientAuth: tls.RequireAnyClientCert, } @@ -136,7 +136,7 @@ func verifyPeerCertificateFunc(expectedPublicKey crypto.PublicKey) (func(rawCert for i := 0; i < len(rawCerts); i++ { cert, err := x509.ParseCertificate(rawCerts[i]) if err != nil { - return newServerAuthError(err.Error()) + return newServerAuthError("failed to parse certificate: %s", err.Error()) } chain[i] = cert } @@ -145,7 +145,7 @@ func verifyPeerCertificateFunc(expectedPublicKey crypto.PublicKey) (func(rawCert // extension, extract the remote's public key and finally verifies the signature included in the certificate actualLibP2PKey, err := libp2ptls.PubKeyFromCertChain(chain) if err != nil { - return newServerAuthError(err.Error()) + return newServerAuthError("could not convert certificate to libp2p public key: %s", err.Error()) } // verify that the public key received is the one that is expected @@ -165,7 +165,7 @@ func verifyPeerCertificateFunc(expectedPublicKey crypto.PublicKey) (func(rawCert func libP2PKeyToHexString(key lcrypto.PubKey) (string, *ServerAuthError) { keyRaw, err := key.Raw() if err != nil { - return "", newServerAuthError(err.Error()) + return "", newServerAuthError("could not convert public key to hex string: %s", err.Error()) } return hex.EncodeToString(keyRaw), nil } diff --git a/utils/grpcutils/grpc_e2e_test.go b/utils/grpcutils/grpc_e2e_test.go new file mode 100644 index 00000000000..8287a0c936d --- /dev/null +++ b/utils/grpcutils/grpc_e2e_test.go @@ -0,0 +1,84 @@ +package grpcutils_test + +import ( + "context" + "net" + "testing" + "time" + + "github.com/onflow/crypto" + "github.com/stretchr/testify/require" + "google.golang.org/grpc" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/health" + "google.golang.org/grpc/health/grpc_health_v1" + + "github.com/onflow/flow-go/utils/grpcutils" + "github.com/onflow/flow-go/utils/unittest" +) + +// TestTLSConnection tests a simple gRPC connection using our default client and server TLS configs +func TestTLSConnection(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + + serverCreds, serverCert := generateServerCredentials(t) + clientCreds := generateClientCredentials(t, serverCert) + + serverURL, serverDone := runServer(t, ctx, serverCreds) + + conn, err := grpc.NewClient(serverURL, grpc.WithTransportCredentials(clientCreds)) + require.NoError(t, err) + defer conn.Close() + + healthClient := grpc_health_v1.NewHealthClient(conn) + + resp, err := healthClient.Check(ctx, &grpc_health_v1.HealthCheckRequest{}) + require.NoError(t, err) + + require.Equal(t, grpc_health_v1.HealthCheckResponse_SERVING, resp.GetStatus()) + + cancel() + unittest.RequireCloseBefore(t, serverDone, time.Second, "server did not stop on time") +} + +func generateServerCredentials(t *testing.T) (credentials.TransportCredentials, crypto.PublicKey) { + netPriv := unittest.NetworkingPrivKeyFixture() + + x509Certificate, err := grpcutils.X509Certificate(netPriv) + require.NoError(t, err) + + tlsConfig := grpcutils.DefaultServerTLSConfig(x509Certificate) + return credentials.NewTLS(tlsConfig), netPriv.PublicKey() +} + +func generateClientCredentials(t *testing.T, serverCert crypto.PublicKey) credentials.TransportCredentials { + clientTLSConfig, err := grpcutils.DefaultClientTLSConfig(serverCert) + require.NoError(t, err) + + return credentials.NewTLS(clientTLSConfig) +} + +func runServer(t *testing.T, ctx context.Context, creds credentials.TransportCredentials) (string, <-chan struct{}) { + grpcServer := grpc.NewServer(grpc.Creds(creds)) + + healthServer := health.NewServer() + healthServer.SetServingStatus("", grpc_health_v1.HealthCheckResponse_SERVING) + grpc_health_v1.RegisterHealthServer(grpcServer, healthServer) + + lis, err := net.Listen("tcp", "127.0.0.1:0") // OS-assigned free port + require.NoError(t, err) + + done := make(chan struct{}) + go func() { + defer close(done) + err := grpcServer.Serve(lis) + require.NoError(t, err) + }() + + go func() { + <-ctx.Done() + grpcServer.GracefulStop() + }() + + return lis.Addr().String(), done +} diff --git a/utils/logging/consts.go b/utils/logging/consts.go index 31cfef3078a..de4ed2c84bc 100644 --- a/utils/logging/consts.go +++ b/utils/logging/consts.go @@ -1,5 +1,27 @@ package logging -// KeySuspicious is a logging label that is used to flag the log event as suspicious behavior -// This is used to add an easily searchable label to the log event -const KeySuspicious = "suspicious" +const ( + // KeySuspicious is a logging label. It flags that the log event informs about suspected BYZANTINE + // behaviour observed by this node. Adding such labels is beneficial for easily searching log events + // regarding misbehaving nodes. + // Axiomatically, each node considers its own operator as well as Flow's governance committee as trusted. + // Hence, potential problems with inputs (mostly configurations) from these sources are *not* considered + // byzantine. To flag inputs from these sources, please use `logging.KeyPotentialConfigurationProblem`. + KeySuspicious = "suspicious" + + // KeyPotentialConfigurationProblem is a logging label. It flags that the log event informs about suspected + // configuration problem by this node. Adding such labels is beneficial for easily searching log events + // regarding potential configuration problems. + KeyPotentialConfigurationProblem = "potential-configuration-problem" + + // KeyNetworkingSecurity is a logging label that is used to flag the log event as a networking security issue. + // This is used to add an easily searchable label to the log events. + KeyNetworkingSecurity = "networking-security" + + // KeyProtocolViolation is a logging label that is used to flag the log event as byzantine protocol violation. + // This is used to add an easily searchable label to the log events. + KeyProtocolViolation = "byzantine-protocol-violation" + + // KeyLoad is a logging label that is used to flag the log event as a load issue. + KeyLoad = "load" +) diff --git a/utils/logging/identifier.go b/utils/logging/identifier.go index 1cac5cd522c..4df1ca9af3e 100644 --- a/utils/logging/identifier.go +++ b/utils/logging/identifier.go @@ -1,5 +1,3 @@ -// (c) 2019 Dapper Labs - ALL RIGHTS RESERVED - package logging import ( diff --git a/utils/logging/json.go b/utils/logging/json.go index bcf4e49c285..3c9e50908a7 100644 --- a/utils/logging/json.go +++ b/utils/logging/json.go @@ -1,5 +1,3 @@ -// (c) 2019 Dapper Labs - ALL RIGHTS RESERVED - package logging import ( diff --git a/utils/merr/closer.go b/utils/merr/closer.go new file mode 100644 index 00000000000..bb59c143b47 --- /dev/null +++ b/utils/merr/closer.go @@ -0,0 +1,31 @@ +package merr + +import ( + "io" + + "github.com/hashicorp/go-multierror" +) + +// CloseAndMergeError close the closable and merge the closeErr with the given err into a multierror +// Note: when using this function in a defer function, don't use as below: +// func XXX() ( +// +// err error, +// ) { +// defer func() { +// // bad, because the definition of err might get overwritten by another deferred function +// err = closeAndMergeError(closable, err) +// }() +// +// Better to use as below: +// func XXX() ( +// +// errToReturn error, +// ) { +// defer func() { +// // good, because the error to returned is only updated here, and guaranteed to be returned +// errToReturn = closeAndMergeError(closable, errToReturn) +// }() +func CloseAndMergeError(closable io.Closer, err error) error { + return multierror.Append(err, closable.Close()).ErrorOrNil() +} diff --git a/utils/merr/closer_test.go b/utils/merr/closer_test.go new file mode 100644 index 00000000000..ee546eb7518 --- /dev/null +++ b/utils/merr/closer_test.go @@ -0,0 +1,62 @@ +package merr + +import ( + "errors" + "fmt" + "testing" + + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/storage" +) + +// mockCloser is a mock implementation of io.Closer +type mockCloser struct { + closeError error +} + +// Close is a mock implementation of io.Closer.Close +func (c *mockCloser) Close() error { + return c.closeError +} + +func TestCloseAndMergeError(t *testing.T) { + // Create a mock closer + closer := &mockCloser{} + + // Test case 1: no error + err := CloseAndMergeError(closer, nil) + require.Nil(t, err) + + // Test case 2: only original error + err = CloseAndMergeError(closer, errors.New("original error")) + require.Error(t, err) + require.Contains(t, err.Error(), "original error") + + // Test case 3: only close error + closer.closeError = errors.New("close error") + err = CloseAndMergeError(closer, nil) + require.Error(t, err) + require.Contains(t, err.Error(), "close error") + + // Test case 4: both original error and close error + err = CloseAndMergeError(closer, errors.New("original error")) + require.Error(t, err) + require.Contains(t, err.Error(), "original error") + require.Contains(t, err.Error(), "close error") + + // Test case 5: original error is storage.ErrNotFound + err = CloseAndMergeError(closer, fmt.Errorf("not found error: %w", storage.ErrNotFound)) + require.Error(t, err) + require.ErrorIs(t, err, storage.ErrNotFound) + + // Test case 6: close error is storage.ErrNotFound + closer.closeError = fmt.Errorf("not found error: %w", storage.ErrNotFound) + err = CloseAndMergeError(closer, nil) + require.ErrorIs(t, err, storage.ErrNotFound) + + // Test case 7: error check works with multierror + closer.closeError = fmt.Errorf("exception") + err = CloseAndMergeError(closer, fmt.Errorf("not found error: %w", storage.ErrNotFound)) + require.ErrorIs(t, err, storage.ErrNotFound) +} diff --git a/utils/noop/closer.go b/utils/noop/closer.go new file mode 100644 index 00000000000..994038362de --- /dev/null +++ b/utils/noop/closer.go @@ -0,0 +1,9 @@ +package noop + +import "io" + +type Closer struct{} + +var _ io.Closer = (*Closer)(nil) + +func (Closer) Close() error { return nil } diff --git a/utils/rand/rand.go b/utils/rand/rand.go index c589ae67868..929f944b391 100644 --- a/utils/rand/rand.go +++ b/utils/rand/rand.go @@ -1,18 +1,28 @@ +// Package rand is a wrapper around `crypto/rand` that uses the system RNG underneath +// to extract secure entropy. +// +// It implements useful tools that are not exported by the `crypto/rand` package. +// This package should be used instead of `math/rand` for any use-case requiring +// a secure randomness. It provides similar APIs to the ones provided by `math/rand`. +// This package does not implement any determinstic RNG (Pseudo-RNG) based on +// user input seeds. For the deterministic use-cases please use `github.com/onflow/crypto/random`. +// +// Functions in this package may return an error if the underlying system implementation fails +// to read new randoms. When that happens, this package considers it an irrecoverable exception. package rand import ( "crypto/rand" + "encoding/base64" "encoding/binary" "fmt" ) -// This package is a wrppaer around true RNG crypto/rand. -// It implements useful tools using the true RNG and that -// are not exported by the crypto/rand package. -// This package does not implement any determinstic RNG (Pseudo RNG) -// unlike the package flow-go/crypto/random. - -// returns a random uint64 +// Uint64 returns a random uint64. +// +// It returns: +// - (0, exception) if crypto/rand fails to provide entropy which is likely a result of a system error. +// - (random, nil) otherwise func Uint64() (uint64, error) { // allocate a new memory at each call. Another possibility // is to use a global variable but that would make the package non thread safe @@ -24,8 +34,13 @@ func Uint64() (uint64, error) { return r, nil } -// returns a random uint64 strictly less than n -// errors if n==0 +// Uint64n returns a random uint64 strictly less than `n`. +// `n` has to be a strictly positive integer. +// +// It returns: +// - (0, exception) if `n==0` +// - (0, exception) if crypto/rand fails to provide entropy which is likely a result of a system error. +// - (random, nil) otherwise func Uint64n(n uint64) (uint64, error) { if n == 0 { return 0, fmt.Errorf("n should be strictly positive, got %d", n) @@ -66,29 +81,49 @@ func Uint64n(n uint64) (uint64, error) { return random, nil } -// returns a random uint32 +// Uint32 returns a random uint32. +// +// It returns: +// - (0, exception) if crypto/rand fails to provide entropy which is likely a result of a system error. +// - (random, nil) otherwise func Uint32() (uint32, error) { // for 64-bits machines, doing 64 bits operations and then casting // should be faster than dealing with 32 bits operations r, err := Uint64() + // 64 bits are sampled but only 32 bits are used. This does not affect the uniformity of the output + // assuming that the 64-bits distribution is uniform return uint32(r), err } -// returns a random uint32 strictly less than n -// errors if n==0 +// Uint32n returns a random uint32 strictly less than `n`. +// `n` has to be a strictly positive integer. +// +// It returns an error: +// - (0, exception) if `n==0` +// - (0, exception) if crypto/rand fails to provide entropy which is likely a result of a system error. +// - (random, nil) otherwise func Uint32n(n uint32) (uint32, error) { r, err := Uint64n(uint64(n)) - return uint32(r), err + return uint32(r), err // `r` is less than `n` and necessarily fits in 32 bits } -// returns a random uint +// Uint returns a random uint. +// +// It returns: +// - (0, exception) if crypto/rand fails to provide entropy which is likely a result of a system error. +// - (random, nil) otherwise func Uint() (uint, error) { r, err := Uint64() return uint(r), err } -// returns a random uint strictly less than n -// errors if n==0 +// Uintn returns a random uint strictly less than `n`. +// `n` has to be a strictly positive integer. +// +// It returns an error: +// - (0, exception) if `n==0` +// - (0, exception) if crypto/rand fails to provide entropy which is likely a result of a system error. +// - (random, nil) otherwise func Uintn(n uint) (uint, error) { r, err := Uint64n(uint64(n)) return uint(r), err @@ -99,22 +134,29 @@ func Uintn(n uint) (uint, error) { // It is not deterministic. // // It implements Fisher-Yates Shuffle using crypto/rand as a source of randoms. +// It uses O(1) space and O(n) time // -// O(1) space and O(n) time +// It returns: +// - (exception) if crypto/rand fails to provide entropy which is likely a result of a system error. +// - (nil) otherwise func Shuffle(n uint, swap func(i, j uint)) error { return Samples(n, n, swap) } -// Samples picks randomly m elements out of n elemnts in a data structure +// Samples picks randomly `m` elements out of `n` elements in a data structure // and places them in random order at indices [0,m-1], // the swapping being implemented in place. The data structure is defined -// by the `swap` function. -// Sampling is not deterministic. +// by the `swap` function itself. +// Sampling is not deterministic like the other functions of the package. // -// It implements the first (m) elements of Fisher-Yates Shuffle using -// crypto/rand as a source of randoms. +// It implements the first `m` elements of Fisher-Yates Shuffle using +// crypto/rand as a source of randoms. `m` has to be less or equal to `n`. +// It uses O(1) space and O(m) time // -// O(1) space and O(m) time +// It returns: +// - (exception) if `n < m` +// - (exception) if crypto/rand fails to provide entropy which is likely a result of a system error. +// - (nil) otherwise func Samples(n uint, m uint, swap func(i, j uint)) error { if n < m { return fmt.Errorf("sample size (%d) cannot be larger than entire population (%d)", m, n) @@ -128,3 +170,34 @@ func Samples(n uint, m uint, swap func(i, j uint)) error { } return nil } + +// GenerateRandomString generates a cryptographically secure random string of size n. +// n must be > 0 +func GenerateRandomString(length int) (string, error) { + if length <= 0 { + return "", fmt.Errorf("length should greater than 0, got %d", length) + } + + // The base64 encoding uses 64 different characters to represent data in + // strings, which makes it possible to represent 6 bits of data with each + // character (as 2^6 is 64). This means that every 3 bytes (24 bits) of + // input data will be represented by 4 characters (4 * 6 bits) in the + // base64 encoding. Consequently, base64 encoding increases the size of + // the data by approximately 1/3 compared to the original input data. + // + // 1. (n+3) / 4 - This calculates how many groups of 4 characters are needed + // in the base64 encoded output to represent at least 'n' characters. + // The +3 ensures rounding up, as integer division truncates the result. + // + // 2. ... * 3 - Each group of 4 base64 characters represents 3 bytes + // of input data. This multiplication calculates the number of bytes + // needed to produce the required length of the base64 string. + byteSlice := make([]byte, (length+3)/4*3) + _, err := rand.Read(byteSlice) + if err != nil { + return "", fmt.Errorf("failed to generate random string: %w", err) + } + + encodedString := base64.URLEncoding.EncodeToString(byteSlice) + return encodedString[:length], nil +} diff --git a/utils/rand/rand_test.go b/utils/rand/rand_test.go index 14f00559d62..35fe273a2e1 100644 --- a/utils/rand/rand_test.go +++ b/utils/rand/rand_test.go @@ -1,49 +1,16 @@ package rand import ( - "fmt" "math" mrand "math/rand" "testing" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "gonum.org/v1/gonum/stat" - _ "github.com/onflow/flow-go/crypto/random" + "github.com/onflow/crypto/random" ) -// TODO: these functions are copied from flow-go/crypto/rand -// Once the new flow-go/crypto/ module version is tagged, flow-go would upgrade -// to the new version and import these functions -func BasicDistributionTest(t *testing.T, n uint64, classWidth uint64, randf func() (uint64, error)) { - // sample size should ideally be a high number multiple of `n` - // but if `n` is too small, we could use a small sample size so that the test - // isn't too slow - sampleSize := 1000 * n - if n < 100 { - sampleSize = (80000 / n) * n // highest multiple of n less than 80000 - } - distribution := make([]float64, n) - // populate the distribution - for i := uint64(0); i < sampleSize; i++ { - r, err := randf() - require.NoError(t, err) - if n*classWidth != 0 { - require.Less(t, r, n*classWidth) - } - distribution[r/classWidth] += 1.0 - } - EvaluateDistributionUniformity(t, distribution) -} - -func EvaluateDistributionUniformity(t *testing.T, distribution []float64) { - tolerance := 0.05 - stdev := stat.StdDev(distribution, nil) - mean := stat.Mean(distribution, nil) - assert.Greater(t, tolerance*mean, stdev, fmt.Sprintf("basic randomness test failed: n: %d, stdev: %v, mean: %v", len(distribution), stdev, mean)) -} - func TestRandomIntegers(t *testing.T) { t.Run("basic uniformity", func(t *testing.T) { @@ -56,7 +23,7 @@ func TestRandomIntegers(t *testing.T) { r, err := Uint() return uint64(r), err } - BasicDistributionTest(t, uint64(n), uint64(classWidth), uintf) + random.BasicDistributionTest(t, uint64(n), uint64(classWidth), uintf) }) t.Run("Uint64", func(t *testing.T) { @@ -64,7 +31,7 @@ func TestRandomIntegers(t *testing.T) { // n is a random power of 2 (from 2 to 2^10) n := 1 << (1 + mrand.Intn(10)) classWidth := (math.MaxUint64 / uint64(n)) + 1 - BasicDistributionTest(t, uint64(n), uint64(classWidth), Uint64) + random.BasicDistributionTest(t, uint64(n), uint64(classWidth), Uint64) }) t.Run("Uint32", func(t *testing.T) { @@ -76,7 +43,7 @@ func TestRandomIntegers(t *testing.T) { r, err := Uint32() return uint64(r), err } - BasicDistributionTest(t, uint64(n), uint64(classWidth), uintf) + random.BasicDistributionTest(t, uint64(n), uint64(classWidth), uintf) }) t.Run("Uintn", func(t *testing.T) { @@ -86,7 +53,7 @@ func TestRandomIntegers(t *testing.T) { return uint64(r), err } // classWidth is 1 since `n` is small - BasicDistributionTest(t, uint64(n), uint64(1), uintf) + random.BasicDistributionTest(t, uint64(n), uint64(1), uintf) }) t.Run("Uint64n", func(t *testing.T) { @@ -95,7 +62,7 @@ func TestRandomIntegers(t *testing.T) { return Uint64n(uint64(n)) } // classWidth is 1 since `n` is small - BasicDistributionTest(t, uint64(n), uint64(1), uintf) + random.BasicDistributionTest(t, uint64(n), uint64(1), uintf) }) t.Run("Uint32n", func(t *testing.T) { @@ -105,7 +72,7 @@ func TestRandomIntegers(t *testing.T) { return uint64(r), err } // classWidth is 1 since `n` is small - BasicDistributionTest(t, uint64(n), uint64(1), uintf) + random.BasicDistributionTest(t, uint64(n), uint64(1), uintf) }) }) @@ -169,7 +136,7 @@ func TestShuffle(t *testing.T) { } // if the shuffle is uniform, the test element // should end up uniformly in all positions of the slice - EvaluateDistributionUniformity(t, distribution) + random.EvaluateDistributionUniformity(t, distribution) }) t.Run("shuffle a same permutation", func(t *testing.T) { @@ -182,7 +149,7 @@ func TestShuffle(t *testing.T) { } // if the shuffle is uniform, the test element // should end up uniformly in all positions of the slice - EvaluateDistributionUniformity(t, distribution) + random.EvaluateDistributionUniformity(t, distribution) }) }) @@ -232,10 +199,10 @@ func TestSamples(t *testing.T) { } // if the sampling is uniform, all elements // should end up being sampled an equivalent number of times - EvaluateDistributionUniformity(t, samplingDistribution) + random.EvaluateDistributionUniformity(t, samplingDistribution) // if the sampling is uniform, the test element // should end up uniformly in all positions of the sample slice - EvaluateDistributionUniformity(t, orderingDistribution) + random.EvaluateDistributionUniformity(t, orderingDistribution) }) t.Run("zero edge cases", func(t *testing.T) { @@ -257,3 +224,13 @@ func TestSamples(t *testing.T) { assert.Equal(t, constant, fullSlice) }) } + +func TestRandomString(t *testing.T) { + t.Run("basic random string", func(t *testing.T) { + length := 32 + str, err := GenerateRandomString(length) + require.NoError(t, err) + t.Logf("string: %s", str) + require.Equal(t, length, len(str)) + }) +} diff --git a/utils/slices/slices.go b/utils/slices/slices.go index d2333f2d5aa..a8ac7982467 100644 --- a/utils/slices/slices.go +++ b/utils/slices/slices.go @@ -1,6 +1,10 @@ package slices -import "sort" +import ( + "sort" + + "golang.org/x/exp/constraints" +) // Concat concatenates multiple []byte into one []byte with efficient one-time allocation. func Concat(slices [][]byte) []byte { @@ -28,15 +32,25 @@ func EnsureByteSliceSize(b []byte, length int) []byte { return stateBytes } -// MakeRange returns a slice of int from [min, max] -func MakeRange(min, max int) []int { - a := make([]int, max-min+1) +// MakeRange returns a slice of numbers [min, max). +// The range includes min and excludes max. +func MakeRange[T constraints.Integer](min, max T) []T { + a := make([]T, max-min) for i := range a { - a[i] = min + i + a[i] = min + T(i) } return a } +// Fill constructs a slice of type T with length n. The slice is then filled with input "val". +func Fill[T any](val T, n int) []T { + arr := make([]T, n) + for i := 0; i < n; i++ { + arr[i] = val + } + return arr +} + // AreStringSlicesEqual returns true if the two string slices are equal. func AreStringSlicesEqual(a, b []string) bool { if len(a) != len(b) { diff --git a/utils/test_matrix/test_matrix.go b/utils/test_matrix/test_matrix.go deleted file mode 100644 index faa8d0bc075..00000000000 --- a/utils/test_matrix/test_matrix.go +++ /dev/null @@ -1,130 +0,0 @@ -package main - -import ( - "encoding/json" - "fmt" - "os" - "strings" - - "golang.org/x/tools/go/packages" -) - -const flowPackagePrefix = "github.com/onflow/flow-go/" -const ciMatrixName = "dynamicMatrix" - -// testMatrix represents a single GitHub Actions test matrix combination that consists of a name and a list of flow-go packages associated with that name. -type testMatrix struct { - Name string `json:"name"` - Packages string `json:"packages"` -} - -// Generates a list of packages to test that will be passed to GitHub Actions -func main() { - if len(os.Args) == 1 { - fmt.Fprintln(os.Stderr, "must have at least 1 package listed") - return - } - - allFlowPackages := listAllFlowPackages() - - targetPackages, seenPackages := listTargetPackages(os.Args[1:], allFlowPackages) - - otherPackages := listOtherPackages(allFlowPackages, seenPackages) - - testMatrix := generateTestMatrix(targetPackages, otherPackages) - - // generate JSON output that will be read in by CI matrix - // can't use json.MarshalIndent because fromJSON() in CI can’t read JSON with any spaces - testMatrixBytes, err := json.Marshal(testMatrix) - if err != nil { - panic(err) - } - - // this string will be read by CI to generate groups of tests to run in separate CI jobs - testMatrixStr := "::set-output name=" + ciMatrixName + "::" + string(testMatrixBytes) - - // very important to add newline character at the end of the compacted JSON - otherwise fromJSON() in CI will throw unmarshalling error - fmt.Println(testMatrixStr) -} - -func generateTestMatrix(targetPackages map[string][]string, otherPackages []string) []testMatrix { - - var testMatrices []testMatrix - - for names := range targetPackages { - targetTestMatrix := testMatrix{ - Name: names, - Packages: strings.Join(targetPackages[names], " "), - } - testMatrices = append(testMatrices, targetTestMatrix) - } - - // add the other packages after all target packages added - otherTestMatrix := testMatrix{ - Name: "others", - Packages: strings.Join(otherPackages, " "), - } - - testMatrices = append(testMatrices, otherTestMatrix) - - return testMatrices -} - -// listTargetPackages returns a map-list of target packages to run as separate CI jobs, based on a list of target package prefixes. -// It also returns a list of the "seen" packages that can then be used to extract the remaining packages to run (in a separate CI job). -func listTargetPackages(targetPackagePrefixes []string, allFlowPackages []string) (map[string][]string, map[string]string) { - targetPackages := make(map[string][]string) - - // Stores list of packages already seen / allocated to other lists. Needed for the last package which will - // have all the leftover packages that weren't allocated to a separate list (CI job). - // It's a map, not a list, to make it easier to check if a package was seen or not. - seenPackages := make(map[string]string) - - // iterate over the target packages to run as separate CI jobs - for _, targetPackagePrefix := range targetPackagePrefixes { - var targetPackage []string - - // go through all packages to see which ones to pull out - for _, allPackage := range allFlowPackages { - if strings.HasPrefix(allPackage, flowPackagePrefix+targetPackagePrefix) { - targetPackage = append(targetPackage, allPackage) - seenPackages[allPackage] = allPackage - } - } - if len(targetPackage) == 0 { - panic("no packages exist with prefix " + targetPackagePrefix) - } - targetPackages[targetPackagePrefix] = targetPackage - } - return targetPackages, seenPackages -} - -// listOtherPackages compiles the remaining packages that don't match any of the target packages. -func listOtherPackages(allFlowPackages []string, seenPackages map[string]string) []string { - var otherPackages []string - - for _, allFlowPackage := range allFlowPackages { - _, seen := seenPackages[allFlowPackage] - if !seen { - otherPackages = append(otherPackages, allFlowPackage) - } - } - - if len(otherPackages) == 0 { - panic("other packages list can't be 0") - } - return otherPackages -} - -func listAllFlowPackages() []string { - flowPackages, err := packages.Load(&packages.Config{}, "./...") - - if err != nil { - panic(err) - } - var flowPackagesStr []string - for _, p := range flowPackages { - flowPackagesStr = append(flowPackagesStr, p.PkgPath) - } - return flowPackagesStr -} diff --git a/utils/test_matrix/test_matrix_test.go b/utils/test_matrix/test_matrix_test.go deleted file mode 100644 index 8e5d2e6a976..00000000000 --- a/utils/test_matrix/test_matrix_test.go +++ /dev/null @@ -1,95 +0,0 @@ -package main - -import ( - "testing" - - "github.com/stretchr/testify/require" -) - -// Can't have a const []string so resorting to using a test helper function. -func getAllFlowPackages() []string { - return []string{ - flowPackagePrefix + "abc", - flowPackagePrefix + "abc/def", - flowPackagePrefix + "abc/def/ghi", - flowPackagePrefix + "def", - flowPackagePrefix + "def/abc", - flowPackagePrefix + "ghi", - flowPackagePrefix + "jkl", - flowPackagePrefix + "mno/abc", - flowPackagePrefix + "pqr", - flowPackagePrefix + "stu", - flowPackagePrefix + "vwx", - flowPackagePrefix + "vwx/ghi", - flowPackagePrefix + "yz", - } -} - -func TestListTargetPackages(t *testing.T) { - targetPackages, seenPackages := listTargetPackages([]string{"abc", "ghi"}, getAllFlowPackages()) - require.Equal(t, 2, len(targetPackages)) - require.Equal(t, 4, len(seenPackages)) - - // there should be 3 packages that start with "abc" - require.Equal(t, 3, len(targetPackages["abc"])) - require.Contains(t, targetPackages["abc"], flowPackagePrefix+"abc") - require.Contains(t, targetPackages["abc"], flowPackagePrefix+"abc/def") - require.Contains(t, targetPackages["abc"], flowPackagePrefix+"abc/def/ghi") - - // there should be 1 package that starts with "ghi" - require.Equal(t, 1, len(targetPackages["ghi"])) - require.Contains(t, targetPackages["ghi"], flowPackagePrefix+"ghi") - - require.Contains(t, seenPackages, flowPackagePrefix+"abc") - require.Contains(t, seenPackages, flowPackagePrefix+"abc/def") - require.Contains(t, seenPackages, flowPackagePrefix+"abc/def/ghi") - require.Contains(t, seenPackages, flowPackagePrefix+"ghi") -} - -func TestListOtherPackages(t *testing.T) { - var seenPackages = make(map[string]string) - seenPackages[flowPackagePrefix+"abc"] = flowPackagePrefix + "abc" - seenPackages[flowPackagePrefix+"ghi"] = flowPackagePrefix + "ghi" - seenPackages[flowPackagePrefix+"mno/abc"] = flowPackagePrefix + "mno/abc" - seenPackages[flowPackagePrefix+"stu"] = flowPackagePrefix + "stu" - - otherPackages := listOtherPackages(getAllFlowPackages(), seenPackages) - - require.Equal(t, 9, len(otherPackages)) - - require.Contains(t, otherPackages, flowPackagePrefix+"abc/def") - require.Contains(t, otherPackages, flowPackagePrefix+"abc/def/ghi") - require.Contains(t, otherPackages, flowPackagePrefix+"def") - require.Contains(t, otherPackages, flowPackagePrefix+"def/abc") - require.Contains(t, otherPackages, flowPackagePrefix+"jkl") - require.Contains(t, otherPackages, flowPackagePrefix+"pqr") - require.Contains(t, otherPackages, flowPackagePrefix+"vwx") - require.Contains(t, otherPackages, flowPackagePrefix+"vwx/ghi") - require.Contains(t, otherPackages, flowPackagePrefix+"yz") -} - -func TestGenerateTestMatrix(t *testing.T) { - targetPackages, seenPackages := listTargetPackages([]string{"abc", "ghi"}, getAllFlowPackages()) - require.Equal(t, 2, len(targetPackages)) - require.Equal(t, 4, len(seenPackages)) - - otherPackages := listOtherPackages(getAllFlowPackages(), seenPackages) - - matrix := generateTestMatrix(targetPackages, otherPackages) - - // should be 3 groups in test matrix: abc, ghi, others - require.Equal(t, 3, len(matrix)) - - require.Contains(t, matrix, testMatrix{ - Name: "abc", - Packages: "github.com/onflow/flow-go/abc github.com/onflow/flow-go/abc/def github.com/onflow/flow-go/abc/def/ghi"}, - ) - require.Contains(t, matrix, testMatrix{ - Name: "ghi", - Packages: "github.com/onflow/flow-go/ghi"}, - ) - require.Contains(t, matrix, testMatrix{ - Name: "others", - Packages: "github.com/onflow/flow-go/def github.com/onflow/flow-go/def/abc github.com/onflow/flow-go/jkl github.com/onflow/flow-go/mno/abc github.com/onflow/flow-go/pqr github.com/onflow/flow-go/stu github.com/onflow/flow-go/vwx github.com/onflow/flow-go/vwx/ghi github.com/onflow/flow-go/yz"}, - ) -} diff --git a/utils/unittest/block.go b/utils/unittest/block.go new file mode 100644 index 00000000000..10b334229de --- /dev/null +++ b/utils/unittest/block.go @@ -0,0 +1,100 @@ +package unittest + +import ( + "fmt" + + "github.com/onflow/flow-go/model/flow" +) + +var Block blockFactory + +type blockFactory struct{} + +// BlockFixture initializes and returns a new *flow.Block instance. +func BlockFixture(opts ...func(*flow.Block)) *flow.Block { + header := BlockHeaderFixture() + block := BlockWithParentFixture(header) + for _, opt := range opts { + opt(block) + } + return block +} + +func (f *blockFactory) WithParent(parentID flow.Identifier, parentView uint64, parentHeight uint64) func(*flow.Block) { + return func(block *flow.Block) { + block.ParentID = parentID + block.ParentView = parentView + block.Height = parentHeight + 1 + block.View = parentView + 1 + } +} + +func (f *blockFactory) WithView(view uint64) func(*flow.Block) { + return func(block *flow.Block) { + block.View = view + } +} + +func (f *blockFactory) WithParentView(view uint64) func(*flow.Block) { + return func(block *flow.Block) { + block.ParentView = view + } +} + +func (f *blockFactory) WithHeight(height uint64) func(*flow.Block) { + return func(block *flow.Block) { + block.Height = height + } +} + +func (f *blockFactory) WithPayload(payload flow.Payload) func(*flow.Block) { + return func(b *flow.Block) { + b.Payload = payload + } +} + +func (f *blockFactory) WithProposerID(proposerID flow.Identifier) func(*flow.Block) { + return func(b *flow.Block) { + b.ProposerID = proposerID + } +} + +func (f *blockFactory) WithLastViewTC(lastViewTC *flow.TimeoutCertificate) func(*flow.Block) { + return func(block *flow.Block) { + block.LastViewTC = lastViewTC + } +} + +func (f *blockFactory) Genesis(chainID flow.ChainID) *flow.Block { + // create the raw content for the genesis block + payload := flow.Payload{ + ProtocolStateID: IdentifierFixture(), + } + + // create the headerBody + headerBody, err := flow.NewRootHeaderBody( + flow.UntrustedHeaderBody{ + ChainID: chainID, + ParentID: flow.ZeroID, + Height: 0, + Timestamp: uint64(flow.GenesisTime.UnixMilli()), + View: 0, + }, + ) + if err != nil { + panic(fmt.Errorf("failed to create root header body: %w", err)) + } + + // combine to block + block, err := flow.NewRootBlock( + flow.UntrustedBlock{ + HeaderBody: *headerBody, + Payload: payload, + }, + ) + if err != nil { + panic(fmt.Errorf("failed to create root block: %w", err)) + } + + return block +} diff --git a/utils/unittest/bytes.go b/utils/unittest/bytes.go new file mode 100644 index 00000000000..96238cc7ae0 --- /dev/null +++ b/utils/unittest/bytes.go @@ -0,0 +1,20 @@ +package unittest + +import ( + "crypto/rand" + "testing" + + "github.com/stretchr/testify/require" +) + +// RandomByteSlice is a test helper that generates a cryptographically secure random byte slice of size n. +func RandomByteSlice(t *testing.T, n int) []byte { + require.Greater(t, n, 0, "size should be positive") + + byteSlice := make([]byte, n) + n, err := rand.Read(byteSlice) + require.NoErrorf(t, err, "failed to generate random byte slice of size %d", n) + require.Equalf(t, n, len(byteSlice), "failed to generate random byte slice of size %d", n) + + return byteSlice +} diff --git a/utils/unittest/chain_suite.go b/utils/unittest/chain_suite.go index bd7b97fe52b..061c06829cb 100644 --- a/utils/unittest/chain_suite.go +++ b/utils/unittest/chain_suite.go @@ -4,12 +4,14 @@ import ( "fmt" "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" "github.com/onflow/flow-go/model/chunks" "github.com/onflow/flow-go/model/flow" mempool "github.com/onflow/flow-go/module/mempool/mock" module "github.com/onflow/flow-go/module/mock" + "github.com/onflow/flow-go/state" realproto "github.com/onflow/flow-go/state/protocol" protocol "github.com/onflow/flow-go/state/protocol/mock" storerr "github.com/onflow/flow-go/storage" @@ -28,7 +30,7 @@ type BaseChainSuite struct { Approvers flow.IdentityList // BLOCKS - RootBlock flow.Block + RootBlock *flow.Block LatestSealedBlock flow.Block LatestFinalizedBlock *flow.Block UnfinalizedBlock flow.Block @@ -40,6 +42,9 @@ type BaseChainSuite struct { SealedSnapshot *protocol.Snapshot FinalSnapshot *protocol.Snapshot + KVStoreReader *protocol.KVStoreReader + ProtocolStateVersion uint64 + // MEMPOOLS and STORAGE which are injected into Matching Engine // mock storage.ExecutionReceipts: backed by in-memory map PersistedReceipts ReceiptsDB *storage.ExecutionReceipts @@ -71,7 +76,7 @@ func (bc *BaseChainSuite) SetupChain() { // ~~~~~~~~~~~~~~~~~~~~~~~~~~ SETUP IDENTITIES ~~~~~~~~~~~~~~~~~~~~~~~~~~ // - // asign node Identities + // assign node Identities con := IdentityFixture(WithRole(flow.RoleConsensus)) exe := IdentityFixture(WithRole(flow.RoleExecution)) ver := IdentityFixture(WithRole(flow.RoleVerification)) @@ -88,19 +93,19 @@ func (bc *BaseChainSuite) SetupChain() { // assign 4 nodes to the verification role bc.Approvers = IdentityListFixture(4, WithRole(flow.RoleVerification)) for _, verifier := range bc.Approvers { - bc.Identities[verifier.ID()] = verifier + bc.Identities[verifier.NodeID] = verifier } // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ SETUP BLOCKS ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // RootBlock <- LatestSealedBlock <- LatestFinalizedBlock <- UnfinalizedBlock bc.RootBlock = BlockFixture() - bc.LatestSealedBlock = *BlockWithParentFixture(bc.RootBlock.Header) - latestFinalizedBlock := BlockWithParentFixture(bc.LatestSealedBlock.Header) + bc.LatestSealedBlock = *BlockWithParentFixture(bc.RootBlock.ToHeader()) + latestFinalizedBlock := BlockWithParentFixture(bc.LatestSealedBlock.ToHeader()) bc.LatestFinalizedBlock = latestFinalizedBlock - bc.UnfinalizedBlock = *BlockWithParentFixture(bc.LatestFinalizedBlock.Header) + bc.UnfinalizedBlock = *BlockWithParentFixture(bc.LatestFinalizedBlock.ToHeader()) bc.Blocks = make(map[flow.Identifier]*flow.Block) - bc.Blocks[bc.RootBlock.ID()] = &bc.RootBlock + bc.Blocks[bc.RootBlock.ID()] = bc.RootBlock bc.Blocks[bc.LatestSealedBlock.ID()] = &bc.LatestSealedBlock bc.Blocks[bc.LatestFinalizedBlock.ID()] = bc.LatestFinalizedBlock bc.Blocks[bc.UnfinalizedBlock.ID()] = &bc.UnfinalizedBlock @@ -108,6 +113,10 @@ func (bc *BaseChainSuite) SetupChain() { // ~~~~~~~~~~~~~~~~~~~~~~~~ SETUP PROTOCOL STATE ~~~~~~~~~~~~~~~~~~~~~~~~ // bc.State = &protocol.State{} + bc.KVStoreReader = &protocol.KVStoreReader{} + bc.ProtocolStateVersion = 2 // default to latest version + bc.KVStoreReader.On("GetProtocolStateVersion").Return(func() uint64 { return bc.ProtocolStateVersion }) + // define the protocol state snapshot of the latest finalized block bc.State.On("Final").Return( func() realproto.Snapshot { @@ -118,10 +127,11 @@ func (bc *BaseChainSuite) SetupChain() { bc.FinalSnapshot = &protocol.Snapshot{} bc.FinalSnapshot.On("Head").Return( func() *flow.Header { - return bc.LatestFinalizedBlock.Header + return bc.LatestFinalizedBlock.ToHeader() }, nil, ) + bc.FinalSnapshot.On("ProtocolState").Return(bc.KVStoreReader, nil) bc.FinalSnapshot.On("SealedResult").Return( func() *flow.ExecutionResult { blockID := bc.LatestFinalizedBlock.ID() @@ -165,16 +175,17 @@ func (bc *BaseChainSuite) SetupChain() { nil, ) bc.SealedSnapshot = &protocol.Snapshot{} + bc.SealedSnapshot.On("ProtocolState").Return(bc.KVStoreReader, nil) bc.SealedSnapshot.On("Head").Return( func() *flow.Header { - return bc.LatestSealedBlock.Header + return bc.LatestSealedBlock.ToHeader() }, nil, ) findBlockByHeight := func(blocks map[flow.Identifier]*flow.Block, height uint64) (*flow.Block, bool) { for _, block := range blocks { - if block.Header.Height == height { + if block.Height == height { return block, true } } @@ -188,7 +199,9 @@ func (bc *BaseChainSuite) SetupChain() { if !found { return StateSnapshotForUnknownBlock() } - return StateSnapshotForKnownBlock(block.Header, bc.Identities) + snapshot := StateSnapshotForKnownBlock(block.ToHeader(), bc.Identities) + snapshot.On("ProtocolState").Return(bc.KVStoreReader, nil) + return snapshot }, ) @@ -199,13 +212,14 @@ func (bc *BaseChainSuite) SetupChain() { snapshot := &protocol.Snapshot{} snapshot.On("Head").Return( func() *flow.Header { - return block.Header + return block.ToHeader() }, nil, ) + snapshot.On("ProtocolState").Return(bc.KVStoreReader, nil) return snapshot } - panic(fmt.Sprintf("unknown height: %v, final: %v, sealed: %v", height, bc.LatestFinalizedBlock.Header.Height, bc.LatestSealedBlock.Header.Height)) + panic(fmt.Sprintf("unknown height: %v, final: %v, sealed: %v", height, bc.LatestFinalizedBlock.Height, bc.LatestSealedBlock.Height)) }, ) @@ -246,7 +260,7 @@ func (bc *BaseChainSuite) SetupChain() { if !found { return nil } - return block.Header + return block.ToHeader() }, func(blockID flow.Identifier) error { _, found := bc.Blocks[blockID] @@ -256,18 +270,25 @@ func (bc *BaseChainSuite) SetupChain() { return nil }, ) + bc.HeadersDB.On("Exists", mock.Anything).Return( + func(blockID flow.Identifier) bool { + _, found := bc.Blocks[blockID] + return found + }, + func(blockID flow.Identifier) error { return nil }, + ) bc.HeadersDB.On("ByHeight", mock.Anything).Return( func(blockHeight uint64) *flow.Header { for _, b := range bc.Blocks { - if b.Header.Height == blockHeight { - return b.Header + if b.Height == blockHeight { + return b.ToHeader() } } return nil }, func(blockHeight uint64) error { for _, b := range bc.Blocks { - if b.Header.Height == blockHeight { + if b.Height == blockHeight { return nil } } @@ -283,25 +304,19 @@ func (bc *BaseChainSuite) SetupChain() { if !found { return nil } - if block.Payload == nil { - return nil - } return block.Payload.Index() }, func(blockID flow.Identifier) error { - block, found := bc.Blocks[blockID] + _, found := bc.Blocks[blockID] if !found { return storerr.ErrNotFound } - if block.Payload == nil { - return storerr.ErrNotFound - } return nil }, ) bc.SealsIndex = make(map[flow.Identifier]*flow.Seal) - firtSeal := Seal.Fixture(Seal.WithBlock(bc.LatestSealedBlock.Header), + firtSeal := Seal.Fixture(Seal.WithBlock(bc.LatestSealedBlock.ToHeader()), Seal.WithResult(bc.LatestExecutionResult)) for id, block := range bc.Blocks { if id != bc.RootBlock.ID() { @@ -316,19 +331,13 @@ func (bc *BaseChainSuite) SetupChain() { if !found { return nil } - if block.Payload == nil { - return nil - } - return block.Payload + return &block.Payload }, func(blockID flow.Identifier) error { - block, found := bc.Blocks[blockID] + _, found := bc.Blocks[blockID] if !found { return storerr.ErrNotFound } - if block.Payload == nil { - return storerr.ErrNotFound - } return nil }, ) @@ -366,7 +375,7 @@ func (bc *BaseChainSuite) SetupChain() { bc.SealsPL = &mempool.IncorporatedResultSeals{} bc.SealsPL.On("Size").Return(uint(0)).Maybe() // only for metrics bc.SealsPL.On("Limit").Return(uint(1000)).Maybe() - bc.SealsPL.On("ByID", mock.Anything).Return( + bc.SealsPL.On("Get", mock.Anything).Return( func(sealID flow.Identifier) *flow.IncorporatedResultSeal { return bc.PendingSeals[sealID] }, @@ -392,13 +401,13 @@ func (bc *BaseChainSuite) SetupChain() { func StateSnapshotForUnknownBlock() *protocol.Snapshot { snapshot := &protocol.Snapshot{} snapshot.On("Identity", mock.Anything).Return( - nil, storerr.ErrNotFound, + nil, state.ErrUnknownSnapshotReference, ) snapshot.On("Identities", mock.Anything).Return( - nil, storerr.ErrNotFound, + nil, state.ErrUnknownSnapshotReference, ) snapshot.On("Head", mock.Anything).Return( - nil, storerr.ErrNotFound, + nil, state.ErrUnknownSnapshotReference, ) return snapshot } @@ -418,7 +427,7 @@ func StateSnapshotForKnownBlock(block *flow.Header, identities map[flow.Identifi }, ) snapshot.On("Identities", mock.Anything).Return( - func(selector flow.IdentityFilter) flow.IdentityList { + func(selector flow.IdentityFilter[flow.Identity]) flow.IdentityList { var idts flow.IdentityList for _, i := range identities { if selector(i) { @@ -427,7 +436,7 @@ func StateSnapshotForKnownBlock(block *flow.Header, identities map[flow.Identifi } return idts }, - func(selector flow.IdentityFilter) error { + func(selector flow.IdentityFilter[flow.Identity]) error { return nil }, ) @@ -444,13 +453,6 @@ func ApprovalFor(result *flow.ExecutionResult, chunkIdx uint64, approverID flow. ) } -func EntityWithID(expectedID flow.Identifier) interface{} { - return mock.MatchedBy( - func(entity flow.Entity) bool { - return expectedID == entity.ID() - }) -} - // subgraphFixture represents a subgraph of the blockchain: // // Result -----------------------------------> Block @@ -473,7 +475,7 @@ type subgraphFixture struct { Approvals map[uint64]map[flow.Identifier]*flow.ResultApproval // chunkIndex -> Verifier Node ID -> Approval } -// Generates a valid subgraph: +// ValidSubgraphFixture generates a valid subgraph: // let // - R1 be a result which pertains to blockA // - R2 be R1's previous result, @@ -484,13 +486,17 @@ type subgraphFixture struct { // blockA.ParentID == blockB.ID func (bc *BaseChainSuite) ValidSubgraphFixture() subgraphFixture { // BLOCKS: <- previousBlock <- block - parentBlock := BlockFixture() - parentBlock.SetPayload(PayloadFixture(WithGuarantees(CollectionGuaranteesFixture(12)...))) - block := BlockWithParentFixture(parentBlock.Header) - block.SetPayload(PayloadFixture(WithGuarantees(CollectionGuaranteesFixture(12)...))) + parentBlock := BlockFixture( + Block.WithPayload(PayloadFixture( + WithGuarantees(CollectionGuaranteesFixture(12)...))), + ) + block := BlockWithParentAndPayload( + parentBlock.ToHeader(), + PayloadFixture(WithGuarantees(CollectionGuaranteesFixture(12)...)), + ) // RESULTS for Blocks: - previousResult := ExecutionResultFixture(WithBlock(&parentBlock)) + previousResult := ExecutionResultFixture(WithBlock(parentBlock)) result := ExecutionResultFixture( WithBlock(block), WithPreviousResult(*previousResult), @@ -500,12 +506,13 @@ func (bc *BaseChainSuite) ValidSubgraphFixture() subgraphFixture { incorporatedResult := IncorporatedResult.Fixture(IncorporatedResult.WithResult(result)) // assign each chunk to 50% of validation Nodes and generate respective approvals - assignment := chunks.NewAssignment() + assignmentBuilder := chunks.NewAssignmentBuilder() assignedVerifiersPerChunk := uint(len(bc.Approvers) / 2) approvals := make(map[uint64]map[flow.Identifier]*flow.ResultApproval) for _, chunk := range incorporatedResult.Result.Chunks { - assignedVerifiers := bc.Approvers.Sample(assignedVerifiersPerChunk) - assignment.Add(chunk, assignedVerifiers.NodeIDs()) + assignedVerifiers, err := bc.Approvers.Sample(assignedVerifiersPerChunk) + require.NoError(bc.T(), err) + require.NoError(bc.T(), assignmentBuilder.Add(chunk.Index, assignedVerifiers.NodeIDs())) // generate approvals chunkApprovals := make(map[flow.Identifier]*flow.ResultApproval) @@ -517,11 +524,11 @@ func (bc *BaseChainSuite) ValidSubgraphFixture() subgraphFixture { return subgraphFixture{ Block: block, - ParentBlock: &parentBlock, + ParentBlock: parentBlock, Result: result, PreviousResult: previousResult, IncorporatedResult: incorporatedResult, - Assignment: assignment, + Assignment: assignmentBuilder.Build(), Approvals: approvals, } } @@ -529,7 +536,7 @@ func (bc *BaseChainSuite) ValidSubgraphFixture() subgraphFixture { func (bc *BaseChainSuite) Extend(block *flow.Block) { blockID := block.ID() bc.Blocks[blockID] = block - if seal, ok := bc.SealsIndex[block.Header.ParentID]; ok { + if seal, ok := bc.SealsIndex[block.ParentID]; ok { bc.SealsIndex[block.ID()] = seal } @@ -539,12 +546,13 @@ func (bc *BaseChainSuite) Extend(block *flow.Block) { IncorporatedResult.WithIncorporatedBlockID(blockID)) // assign each chunk to 50% of validation Nodes and generate respective approvals - assignment := chunks.NewAssignment() + assignmentBuilder := chunks.NewAssignmentBuilder() assignedVerifiersPerChunk := uint(len(bc.Approvers) / 2) approvals := make(map[uint64]map[flow.Identifier]*flow.ResultApproval) for _, chunk := range incorporatedResult.Result.Chunks { - assignedVerifiers := bc.Approvers.Sample(assignedVerifiersPerChunk) - assignment.Add(chunk, assignedVerifiers.NodeIDs()) + assignedVerifiers, err := bc.Approvers.Sample(assignedVerifiersPerChunk) + require.NoError(bc.T(), err) + require.NoError(bc.T(), assignmentBuilder.Add(chunk.Index, assignedVerifiers.NodeIDs())) // generate approvals chunkApprovals := make(map[flow.Identifier]*flow.ResultApproval) @@ -553,6 +561,7 @@ func (bc *BaseChainSuite) Extend(block *flow.Block) { } approvals[chunk.Index] = chunkApprovals } + assignment := assignmentBuilder.Build() bc.Assigner.On("Assign", incorporatedResult.Result, incorporatedResult.IncorporatedBlockID).Return(assignment, nil).Maybe() bc.Assignments[incorporatedResult.Result.ID()] = assignment bc.PersistedResults[result.ID()] = result @@ -562,7 +571,7 @@ func (bc *BaseChainSuite) Extend(block *flow.Block) { } } -// addSubgraphFixtureToMempools adds add entities in subgraph to mempools and persistent storage mocks +// AddSubgraphFixtureToMempools adds entities in subgraph to mempools and persistent storage mocks func (bc *BaseChainSuite) AddSubgraphFixtureToMempools(subgraph subgraphFixture) { bc.Blocks[subgraph.ParentBlock.ID()] = subgraph.ParentBlock bc.Blocks[subgraph.Block.ID()] = subgraph.Block diff --git a/utils/unittest/cluster.go b/utils/unittest/cluster.go index 80d8627342c..d36237a5f3c 100644 --- a/utils/unittest/cluster.go +++ b/utils/unittest/cluster.go @@ -2,17 +2,17 @@ package unittest import ( "fmt" - "sort" + + "golang.org/x/exp/slices" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/model/flow/factory" "github.com/onflow/flow-go/model/flow/filter" - "github.com/onflow/flow-go/model/flow/order" ) // TransactionForCluster generates a transaction that will be assigned to the // target cluster ID. -func TransactionForCluster(clusters flow.ClusterList, target flow.IdentityList) flow.TransactionBody { +func TransactionForCluster(clusters flow.ClusterList, target flow.IdentitySkeletonList) flow.TransactionBody { tx := TransactionBodyFixture() return AlterTransactionForCluster(tx, clusters, target, func(*flow.TransactionBody) {}) } @@ -22,7 +22,7 @@ func TransactionForCluster(clusters flow.ClusterList, target flow.IdentityList) // // The `after` function is run after each modification to allow for any content // dependent changes to the transaction (eg. signing it). -func AlterTransactionForCluster(tx flow.TransactionBody, clusters flow.ClusterList, target flow.IdentityList, after func(tx *flow.TransactionBody)) flow.TransactionBody { +func AlterTransactionForCluster(tx flow.TransactionBody, clusters flow.ClusterList, target flow.IdentitySkeletonList, after func(tx *flow.TransactionBody)) flow.TransactionBody { // Bound to avoid infinite loop in case the routing algorithm is broken for i := 0; i < 10000; i++ { @@ -46,14 +46,12 @@ func AlterTransactionForCluster(tx flow.TransactionBody, clusters flow.ClusterLi // ClusterAssignment creates an assignment list with n clusters and with nodes // evenly distributed among clusters. -func ClusterAssignment(n uint, nodes flow.IdentityList) flow.AssignmentList { +func ClusterAssignment(n uint, nodes flow.IdentitySkeletonList) flow.AssignmentList { - collectors := nodes.Filter(filter.HasRole(flow.RoleCollection)) + collectors := nodes.Filter(filter.HasRole[flow.IdentitySkeleton](flow.RoleCollection)) // order, so the same list results in the same - sort.Slice(collectors, func(i, j int) bool { - return order.Canonical(collectors[i], collectors[j]) - }) + slices.SortFunc(collectors, flow.Canonical[flow.IdentitySkeleton]) assignments := make(flow.AssignmentList, n) for i, collector := range collectors { @@ -64,12 +62,18 @@ func ClusterAssignment(n uint, nodes flow.IdentityList) flow.AssignmentList { return assignments } -func ClusterList(n uint, nodes flow.IdentityList) flow.ClusterList { +func ClusterList(n uint, nodes flow.IdentitySkeletonList) flow.ClusterList { assignments := ClusterAssignment(n, nodes) - clusters, err := factory.NewClusterList(assignments, nodes.Filter(filter.HasRole(flow.RoleCollection))) + clusters, err := factory.NewClusterList(assignments, nodes.Filter(filter.HasRole[flow.IdentitySkeleton](flow.RoleCollection))) if err != nil { panic(err) } return clusters } + +// CollectionFromTransactions creates a new collection from the list of transactions. +func CollectionFromTransactions(transactions ...*flow.TransactionBody) flow.Collection { + txs := append(([]*flow.TransactionBody)(nil), transactions...) // copy slice to avoid mutation + return flow.Collection{Transactions: txs} +} diff --git a/utils/unittest/cluster_block.go b/utils/unittest/cluster_block.go new file mode 100644 index 00000000000..55e2206a64b --- /dev/null +++ b/utils/unittest/cluster_block.go @@ -0,0 +1,89 @@ +package unittest + +import ( + "fmt" + "time" + + "github.com/onflow/flow-go/model/cluster" + "github.com/onflow/flow-go/model/flow" +) + +var ClusterBlock clusterBlockFactory + +type clusterBlockFactory struct{} + +func ClusterBlockFixture(opts ...func(*cluster.Block)) *cluster.Block { + block := &cluster.Block{ + HeaderBody: HeaderBodyFixture(), + Payload: *ClusterPayloadFixture(3), + } + for _, opt := range opts { + opt(block) + } + return block +} + +func (f *clusterBlockFactory) WithParent(parent *cluster.Block) func(*cluster.Block) { + return func(block *cluster.Block) { + block.Height = parent.Height + 1 + block.View = parent.View + 1 + block.ChainID = parent.ChainID + block.Timestamp = uint64(time.Now().UnixMilli()) + block.ParentID = parent.ID() + block.ParentView = parent.View + } +} + +func (f *clusterBlockFactory) WithHeight(height uint64) func(*cluster.Block) { + return func(block *cluster.Block) { + block.Height = height + } +} + +func (f *clusterBlockFactory) WithChainID(chainID flow.ChainID) func(*cluster.Block) { + return func(block *cluster.Block) { + block.ChainID = chainID + } +} + +func (f *clusterBlockFactory) WithProposerID(proposerID flow.Identifier) func(*cluster.Block) { + return func(block *cluster.Block) { + block.ProposerID = proposerID + } +} + +func (f *clusterBlockFactory) WithPayload(payload cluster.Payload) func(*cluster.Block) { + return func(b *cluster.Block) { + b.Payload = payload + } +} + +func (f *clusterBlockFactory) Genesis() (*cluster.Block, error) { + headerBody, err := flow.NewRootHeaderBody(flow.UntrustedHeaderBody{ + View: 0, + ChainID: "cluster", + Timestamp: uint64(flow.GenesisTime.UnixMilli()), + ParentID: flow.ZeroID, + }) + if err != nil { + return nil, err + } + + payload, err := cluster.NewRootPayload( + cluster.UntrustedPayload(*cluster.NewEmptyPayload(flow.ZeroID)), + ) + if err != nil { + return nil, fmt.Errorf("failed to create root cluster payload: %w", err) + } + + block, err := cluster.NewRootBlock( + cluster.UntrustedBlock{ + HeaderBody: *headerBody, + Payload: *payload, + }, + ) + if err != nil { + return nil, fmt.Errorf("failed to create root cluster block: %w", err) + } + return block, nil +} diff --git a/utils/unittest/cluster_state_checker.go b/utils/unittest/cluster_state_checker.go index 2c0426a16c9..8b7a8ec6a92 100644 --- a/utils/unittest/cluster_state_checker.go +++ b/utils/unittest/cluster_state_checker.go @@ -63,7 +63,7 @@ func (checker *ClusterStateChecker) Assert(t *testing.T) { // start at the state head head, err := checker.state.Final().Head() - assert.Nil(t, err) + assert.NoError(t, err) // track properties of the state we will later compare against expectations var ( @@ -75,10 +75,10 @@ func (checker *ClusterStateChecker) Assert(t *testing.T) { // walk the chain state from head to genesis for head.Height > 0 { collection, err := checker.state.AtBlockID(head.ID()).Collection() - assert.Nil(t, err) + assert.NoError(t, err) head, err = checker.state.AtBlockID(head.ParentID).Head() - assert.Nil(t, err) + assert.NoError(t, err) if collection.Len() == 0 { continue diff --git a/utils/unittest/encoding.go b/utils/unittest/encoding.go new file mode 100644 index 00000000000..b2ab5fd0fa3 --- /dev/null +++ b/utils/unittest/encoding.go @@ -0,0 +1,29 @@ +package unittest + +import ( + "testing" + + "github.com/fxamacker/cbor/v2" + "github.com/stretchr/testify/require" +) + +// EncodeDecodeDifferentVersions emulates the situation where a peer running software version A receives +// a message from a sender running software version B, where the format of the message may have been upgraded between +// the different software versions. This method works irrespective whether version A or B is the older/newer version +// (also allowing that both versions are the same; in this degenerate edge-case the old and new format would be the same). +// +// This function works by encoding src using CBOR, then decoding the result into dst. +// Compatible fields as defined by CBOR will be copied into dst; in-compatible fields +// may be omitted. +func EncodeDecodeDifferentVersions(t *testing.T, src, dst any) { + bz, err := cbor.Marshal(src) + require.NoError(t, err) + err = cbor.Unmarshal(bz, dst) + require.NoError(t, err) +} + +// PtrTo returns a pointer to the input. Useful for concisely constructing +// a reference-typed argument to a function or similar. +func PtrTo[T any](target T) *T { + return &target +} diff --git a/utils/unittest/entity.go b/utils/unittest/entity.go new file mode 100644 index 00000000000..b18a5810c67 --- /dev/null +++ b/utils/unittest/entity.go @@ -0,0 +1,370 @@ +package unittest + +import ( + "fmt" + "math/rand" + "reflect" + "testing" + + "github.com/onflow/crypto" + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/model/flow" +) + +// RequireEntityNonMalleable is RequireNonMalleable with the constraint that models implement [flow.IDEntity] +// and the content hash function is the ID() function. +// Non-malleability is a required property for any entity that implements the [flow.IDEntity] interface. +// This is especially important for entities that contain signatures and are transmitted over the network. +// ID is used by the protocol to insure entity integrity when transmitted over the network. ID must therefore be a binding cryptographic commitment to an entity. +// This function consumes the entity and modifies its fields randomly to ensure that the ID changes after each modification. +// Generally speaking each type that implements [flow.IDEntity] method should be tested with this function. +func RequireEntityNonMalleable(t *testing.T, entity flow.IDEntity, ops ...MalleabilityCheckerOpt) { + err := NewMalleabilityChecker(ops...).CheckEntity(entity) + require.NoError(t, err) +} + +// RequireNonMalleable is a sanity check that the model is not malleable with respect to a content hash over the model (hashModel). +// Non-malleability in this sense means that it is computationally hard to build a different model with the same hash. +// Hence, changing *any* field of a non-malleable model should change the hash, which we check here. +// Note that this is sanity check of non-malleability and that passing this test does not guarantee non-malleability. +// ATTENTION: We put only one requirement for data types, that is all fields have to be exported so we can modify them. +func RequireNonMalleable(t *testing.T, model any, hashModel func() flow.Identifier, ops ...MalleabilityCheckerOpt) { + err := NewMalleabilityChecker(ops...).Check(model, hashModel) + require.NoError(t, err) +} + +// MalleabilityChecker is a customizable checker to test whether an entity is malleable. If a structure implements [flow.IDEntity] +// interface, *any* change to the data structure has to change the ID of the entity as well. +// The MalleabilityChecker performs a recursive check of all fields of the entity and ensures that changing any field will change +// the ID of the entity. By default, the MalleabilityChecker uses pre-defined generators for each basic golang type, which return +// a random value, to modify the entity's field values. However, the MalleabilityChecker can be customized, by providing custom +// generators for specific types or specific fields. +// +// The caller can provide a loosely instantiated entity struct, which serves as a template for further modification. +// If checker encounters a field that is nil, it will insert a randomized instance into field and continue the check. +// If checker encounters a nil/empty slice or map, it will create a new instance of the slice/map, insert a value and continue the check. +// In rare cases, a type may have a different ID computation depending on whether a field is nil. +// In such cases, we can use the WithPinnedField option to skip malleability checks on this field. +// +// This checker heavily relies on generation of random values for the fields based on their type. All types are split into three categories: +// 1. structures, primitives, slices, arrays, maps (generateRandomReflectValue) +// 2. interfaces (generateInterfaceFlowValue) +// +// Checker knows how to deal with each of the categories and generate random values for them. +// There are two ways to handle types not natively recognized by the MalleabilityChecker: +// 1. User can provide a custom type generator for the type using WithTypeGenerator option. +// 2. User can provide a custom generator for the field using WithFieldGenerator option. +// +// It is recommended to use the first option if type is used in multiple places and general enough. +// Matching by type (instead of field name) is less selective, by covering all fields of the given type. +// Field generator is very useful for cases where the field is context-sensitive, and we cannot generate a completely random value. +type MalleabilityChecker struct { + typeGenerator map[reflect.Type]func() reflect.Value + fieldGenerator map[string]func() reflect.Value + pinnedFields map[string]struct{} +} + +// MalleabilityCheckerOpt is a functional option for the MalleabilityChecker which allows to modify behavior of the checker. +type MalleabilityCheckerOpt func(*MalleabilityChecker) + +// WithTypeGenerator allows to override the default behavior of the checker for the given type, meaning if a field of the given type +// is encountered, the MalleabilityChecker will use the provided generator instead of a random value. +// An example usage would be: +// +// type BarType struct { +// Baz string +// } +// type FooType struct { +// Bar []BarType +// } +// +// ... +// +// WithTypeGenerator(func() BarType { return randomBar()}) +// +// ATTENTION: In order for the MalleabilityChecker to work properly, two calls of the generator should produce two different values. +func WithTypeGenerator[T any](generator func() T) MalleabilityCheckerOpt { + return func(mc *MalleabilityChecker) { + mc.typeGenerator[reflect.TypeOf((*T)(nil)).Elem()] = func() reflect.Value { + return reflect.ValueOf(generator()) + } + } +} + +// WithPinnedField allows to skip malleability checks for the given field. If a field with given path is encountered, the MalleabilityChecker +// will skip the check for this field. Pinning is mutually exclusive with field generators, meaning if a field is pinned, the checker will +// not overwrite the field with a random value, even if a field generator is provided. This is useful when the ID computation varies depending +// on specific values of some field (typically used for temporary downwards compatibility - new fields are added as temporary optional). +// An example usage would be: +// +// type BarType struct { +// Baz string +// } +// type FooType struct { +// Bar BarType +// } +// +// ... +// +// WithPinnedField("Bar.Baz") +func WithPinnedField(field string) MalleabilityCheckerOpt { + return func(mc *MalleabilityChecker) { + mc.pinnedFields[field] = struct{}{} + } +} + +// WithFieldGenerator allows to override the default behavior of the checker for the given field, meaning if a field with given path +// is encountered, the MalleabilityChecker will use the provided generator instead of a random value. +// An example usage would be: +// +// type BarType struct { +// Baz string +// } +// type FooType struct { +// Bar BarType +// } +// +// ... +// +// WithFieldGenerator("Bar.Baz", func() string { return randomString()}) +func WithFieldGenerator[T any](field string, generator func() T) MalleabilityCheckerOpt { + return func(mc *MalleabilityChecker) { + mc.fieldGenerator[field] = func() reflect.Value { + return reflect.ValueOf(generator()) + } + } +} + +// NewMalleabilityChecker creates a new instance of the MalleabilityChecker with the given options. +func NewMalleabilityChecker(ops ...MalleabilityCheckerOpt) *MalleabilityChecker { + checker := &MalleabilityChecker{ + pinnedFields: make(map[string]struct{}), + typeGenerator: make(map[reflect.Type]func() reflect.Value), + fieldGenerator: make(map[string]func() reflect.Value), + } + + for _, op := range ops { + op(checker) + } + + return checker +} + +// CheckEntity is Check with the constraint that models implement [flow.IDEntity] and the content hash function is the ID() function. +// It returns an error if the entity is malleable, otherwise it returns nil. +// No errors are expected during normal operations. +func (mc *MalleabilityChecker) CheckEntity(entity flow.IDEntity) error { + if entity == nil { + return fmt.Errorf("entity is nil") + } + return mc.Check(entity, entity.ID) +} + +// Check is a method that performs the malleability check on the input model. +// The caller provides a loosely instantiated model, which serves as a template for further modification. +// The malleability check is recursively applied to all fields of the model. +// If one of the fields is nil or an empty slice/map, the checker will create a new instance of the field and continue the check. +// In rare cases, a type may have a different ID computation depending on whether a field is nil, in such case, we can use field pinning to +// prevent the checker from changing the field. +// It returns an error if the model is malleable, otherwise it returns nil. +// No errors are expected during normal operations. +func (mc *MalleabilityChecker) Check(model any, hashModel func() flow.Identifier) error { + v := reflect.ValueOf(model) + if !v.IsValid() { + return fmt.Errorf("input is not a valid entity") + } + if v.Kind() != reflect.Ptr { + // If it is not a pointer type, we may not be able to set fields to test malleability, since the model may not be addressable + return fmt.Errorf("entity is not a pointer type (try checking a reference to it), entity: %v %v", v.Kind(), v.Type()) + } + if v.IsNil() { + return fmt.Errorf("entity is nil, nothing to check") + } + v = v.Elem() + if err := mc.isModelMalleable(v, nil, "", hashModel); err != nil { + return err + } + return mc.checkExpectations() +} + +// checkExpectations checks if all pre-configured options were used during the check. +// This includes checking if all pinned fields were used and if all field generators were used. +// Pins and field generators are mutually exclusive, and consumed once per field. +// An error is returned in case checker has been misconfigured. +func (mc *MalleabilityChecker) checkExpectations() error { + for field := range mc.pinnedFields { + return fmt.Errorf("field %s is pinned, but wasn't used, checker misconfigured", field) + } + for field := range mc.fieldGenerator { + return fmt.Errorf("field %s has a generator, but wasn't used, checker misconfigured", field) + } + return nil +} + +// isModelMalleable is a helper function to recursively check fields of a model for malleability. +// This function is called recursively for each field of the input model and checks if the model is malleable by comparing its hash +// before and after changing the field value. +// Arguments: +// - modelOrField: value to check - at the top-level of the recursion, it is the overall model we are checking; +// for all recursive calls it is fields and sub-fields of the model. +// - structField: optional metadata about the field, it is present only for values which are fields of a struct. +// - parentFieldPath: previously accumulated field path which leads to the current field. +// - hashModel: function to get a hash of the whole model. +// +// This function returns error if the entity is malleable, otherwise it returns nil. +func (mc *MalleabilityChecker) isModelMalleable(modelOrField reflect.Value, structField *reflect.StructField, parentFieldPath string, hashModel func() flow.Identifier) error { + var fullFieldPath string + // if we are dealing with a field of a struct, we need to build a full field path and use that for custom options lookup. + if structField != nil { + fullFieldPath = buildFieldPath(parentFieldPath, structField.Name) + // pinning has priority over field generators, if the field is pinned, we skip the check for this field. + // if we have both pin and field generator, we will never use the field generator but will fail to meet the expectations, after running the check. + if _, ok := mc.pinnedFields[fullFieldPath]; ok { + // make sure we consume the pin so we can check if all pins were used. + delete(mc.pinnedFields, fullFieldPath) + return nil + } + } + + if modelOrField.Kind() == reflect.Ptr { + if modelOrField.IsNil() { + modelOrField.Set(reflect.New(modelOrField.Type().Elem())) + } + modelOrField = modelOrField.Elem() + } + tType := modelOrField.Type() + + // if we have a field generator for the field, we use it to generate a random value for the field instead of using the default flow. + if generator, ok := mc.fieldGenerator[fullFieldPath]; ok { + // make sure we consume the field generator so we can check if all field generators were used. + delete(mc.fieldGenerator, fullFieldPath) + originalHash := hashModel() + modelOrField.Set(generator()) + newHash := hashModel() + if originalHash != newHash { + return nil + } + return fmt.Errorf("hash did not change after changing %s value", fullFieldPath) + } + + if modelOrField.Kind() == reflect.Struct { + // any time we encounter a structure we need to go through all fields and check if the entity is malleable in recursive manner. + for i := 0; i < modelOrField.NumField(); i++ { + field := modelOrField.Field(i) + if !field.CanSet() { + return fmt.Errorf("field %s is not settable, try providing a field generator for field %s", tType.Field(i).Name, fullFieldPath) + } + + nextField := tType.Field(i) + if err := mc.isModelMalleable(field, &nextField, fullFieldPath, hashModel); err != nil { + return fmt.Errorf("field %s is malleable: %w", tType.Field(i).Name, err) + } + } + return nil + } else { + // when dealing with non-composite type we can generate random values for it and check if ID has changed. + origID := hashModel() + err := mc.generateRandomReflectValue(modelOrField) + if err != nil { + return fmt.Errorf("failed to generate random value for %s: %w", fullFieldPath, err) + } + newID := hashModel() + if origID != newID { + return nil + } + return fmt.Errorf("hash did not change after changing %s value", fullFieldPath) + } +} + +// buildFieldPath is a helper function to build a full field path. +func buildFieldPath(fieldPath string, fieldName string) string { + if fieldPath == "" { + return fieldName + } + return fieldPath + "." + fieldName +} + +// generateRandomReflectValue uses reflection to switch on the field type and generate a random value for it. +// This function mutates the input [reflect.Value]. If it cannot mutate the input, an error is returned and the malleability check should be considered failed. +// If a type generator is provided for the field type, it will be used to generate a random value for the field. +// No errors are expected during normal operations. +func (mc *MalleabilityChecker) generateRandomReflectValue(field reflect.Value) error { + if generator, ok := mc.typeGenerator[field.Type()]; ok { + field.Set(generator()) + return nil + } + + switch field.Kind() { + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + field.SetUint(^field.Uint()) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + field.SetInt(^field.Int()) + case reflect.String: + field.SetString(fmt.Sprintf("random_%d", rand.Intn(100000))) + case reflect.Float64, reflect.Float32: + field.SetFloat(field.Float() + rand.Float64()*10) + case reflect.Bool: + field.SetBool(!field.Bool()) + case reflect.Slice: + if field.Len() == 0 { + field.Set(reflect.MakeSlice(field.Type(), 1, 1)) + } + return mc.generateRandomReflectValue(field.Index(rand.Intn(field.Len()))) + case reflect.Array: + index := rand.Intn(field.Len()) + return mc.generateRandomReflectValue(field.Index(index)) + case reflect.Map: + mapKeys := field.MapKeys() + var key reflect.Value + if len(mapKeys) == 0 { + field.Set(reflect.MakeMap(field.Type())) + key = reflect.New(field.Type().Key()).Elem() + if err := mc.generateRandomReflectValue(key); err != nil { + return err + } + } else { + index := rand.Intn(len(mapKeys)) + key = mapKeys[index] + } + val := reflect.New(field.Type().Elem()).Elem() + if err := mc.generateRandomReflectValue(val); err != nil { + return err + } + field.SetMapIndex(key, val) + case reflect.Ptr: + if field.IsNil() { + field.Set(reflect.New(field.Type().Elem())) + } + return mc.generateRandomReflectValue(field.Elem()) // modify underlying value + case reflect.Struct: + // if we are dealing with a struct, we need to go through all fields and generate random values for them + // if the field is another struct, we will deal with it recursively. + // at the end of the recursion, we must encounter a primitive type, which we can generate a random value for, otherwise an error is returned. + for i := 0; i < field.NumField(); i++ { + structField := field.Field(i) + err := mc.generateRandomReflectValue(structField) + if err != nil { + return fmt.Errorf("cannot generate random value for struct field: %s", field.Type().String()) + } + } + case reflect.Interface: + generatedValue := reflect.ValueOf(generateInterfaceFlowValue(field)) // it's always a pointer + if !generatedValue.IsValid() { + return fmt.Errorf("cannot generate random value for interface: %s", field.Type().String()) + } + field.Set(generatedValue) + default: + return fmt.Errorf("cannot generate random value, unsupported type: %s", field.Kind().String()) + } + return nil +} + +// generateInterfaceFlowValue generates a random value for the field of the struct that is an interface. +// This can be extended for types that are broadly used in the code base. +func generateInterfaceFlowValue(field reflect.Value) any { + if field.Type().Implements(reflect.TypeOf((*crypto.PublicKey)(nil)).Elem()) { + return KeyFixture(crypto.ECDSAP256).PublicKey() + } + return nil +} diff --git a/utils/unittest/entity_test.go b/utils/unittest/entity_test.go new file mode 100644 index 00000000000..5199237829a --- /dev/null +++ b/utils/unittest/entity_test.go @@ -0,0 +1,381 @@ +package unittest + +import ( + "testing" + + clone "github.com/huandu/go-clone/generic" + "github.com/onflow/crypto" + "github.com/stretchr/testify/require" + "golang.org/x/exp/slices" + + "github.com/onflow/flow-go/model/flow" +) + +type IntList []uint32 + +func (l IntList) ID() flow.Identifier { + return flow.MakeID(l) +} + +// StructWithNilFields allows testing all cases of nil-able fields: struct pointer, slice, map. +// The default behaviour is if any nil-able fields is nil or size 0, the malleability checker fails. +type StructWithNilFields struct { + Identities flow.IdentitySkeletonList + Index map[flow.Identifier]uint32 + QC *flow.QuorumCertificate +} + +func (e *StructWithNilFields) ID() flow.Identifier { + type pair struct { + ID flow.Identifier + Index uint32 + } + var pairs []pair + for id, index := range e.Index { + pairs = append(pairs, pair{id, index}) + } + slices.SortFunc(pairs, func(a, b pair) int { + return flow.IdentifierCanonical(a.ID, b.ID) + }) + return flow.MakeID(struct { + Identities flow.IdentitySkeletonList + Index []pair + QcID flow.Identifier + }{ + Identities: e.Identities, + Index: pairs, + QcID: e.QC.ID(), + }) +} + +// StructWithNotSettableFlowField will always fail malleability checking because it contains a private (non-settable) field +type StructWithNotSettableFlowField struct { + field flow.IdentitySkeleton +} + +func (e *StructWithNotSettableFlowField) ID() flow.Identifier { + return flow.MakeID(e) +} + +// MalleableEntityStruct is a struct that is malleable because its ID method does not cover all of its fields. +type MalleableEntityStruct struct { + Identities flow.IdentitySkeletonList + QC *flow.QuorumCertificate + Signature crypto.Signature +} + +// ID returns the hash of the entity in a way that does not cover all of its fields. +func (e *MalleableEntityStruct) ID() flow.Identifier { + return flow.MakeID(struct { + Identities flow.IdentitySkeletonList + QcID flow.Identifier + }{ + Identities: e.Identities, + QcID: e.QC.ID(), + }) +} + +// StructWithOptionalField is a struct that has an optional field. This is a rare case but it happens that we need to include +// a field that is optional for backward compatibility reasons. In such cases the ID method might behave differently depending +// on the presence of the optional field. Checker should be able to handle a case where the optional field is nil and when it is not. +// To accomplish this, we are using a special struct tag otherwise the checker would fail to detect the optional field as it requires +// that all fields are non-empty/non-nil. +type StructWithOptionalField struct { + Identifier flow.Identifier + RequiredField uint32 + OptionalField *uint32 +} + +// ID returns the hash of the entity depending on the presence of the optional field. +func (e *StructWithOptionalField) ID() flow.Identifier { + if e.OptionalField == nil { + return flow.MakeID(struct { + Identifier flow.Identifier + RequiredField uint32 + }{ + Identifier: e.Identifier, + RequiredField: e.RequiredField, + }) + } else { + return flow.MakeID(struct { + RequiredField uint32 + OptionalField uint32 + Identifier flow.Identifier + }{ + Identifier: e.Identifier, + OptionalField: *e.OptionalField, + RequiredField: e.RequiredField, + }) + } +} + +// TestRequireEntityNonMalleable tests the behavior of MalleabilityChecker with different types of entities ensuring +// it correctly handles the supported types and returns an error when the entity is malleable, or it cannot perform the check. +func TestRequireEntityNonMalleable(t *testing.T) { + t.Run("type alias", func(t *testing.T) { + list := &IntList{1, 2, 3} + RequireEntityNonMalleable(t, list) + }) + t.Run("embedded-struct", func(t *testing.T) { + RequireEntityNonMalleable(t, &StructWithNilFields{ + Identities: IdentityListFixture(2).ToSkeleton(), + Index: map[flow.Identifier]uint32{IdentifierFixture(): 0}, + QC: QuorumCertificateFixture(), + }) + }) + t.Run("embedded-struct-with-nil-value", func(t *testing.T) { + t.Run("nil-slice", func(t *testing.T) { + RequireEntityNonMalleable(t, &StructWithNilFields{ + Identities: nil, + Index: map[flow.Identifier]uint32{IdentifierFixture(): 0}, + QC: QuorumCertificateFixture(), + }) + }) + t.Run("empty-slice", func(t *testing.T) { + RequireEntityNonMalleable(t, &StructWithNilFields{ + Identities: make(flow.IdentitySkeletonList, 0), + Index: map[flow.Identifier]uint32{IdentifierFixture(): 0}, + QC: QuorumCertificateFixture(), + }) + }) + t.Run("nil-map", func(t *testing.T) { + RequireEntityNonMalleable(t, &StructWithNilFields{ + Identities: IdentityListFixture(5).ToSkeleton(), + Index: nil, + QC: QuorumCertificateFixture(), + }) + }) + t.Run("empty-map", func(t *testing.T) { + RequireEntityNonMalleable(t, &StructWithNilFields{ + Identities: IdentityListFixture(5).ToSkeleton(), + Index: map[flow.Identifier]uint32{}, + QC: QuorumCertificateFixture(), + }) + }) + t.Run("nil-ptr", func(t *testing.T) { + RequireEntityNonMalleable(t, &StructWithNilFields{ + Identities: IdentityListFixture(5).ToSkeleton(), + Index: map[flow.Identifier]uint32{IdentifierFixture(): 0}, + QC: nil, + }) + }) + }) + t.Run("nil-entity", func(t *testing.T) { + err := NewMalleabilityChecker().CheckEntity(nil) + require.Error(t, err) + require.ErrorContains(t, err, "entity is nil") + }) + t.Run("unsupported-field", func(t *testing.T) { + err := NewMalleabilityChecker().CheckEntity(&StructWithNotSettableFlowField{ + field: IdentityFixture().IdentitySkeleton, + }) + require.Error(t, err) + require.ErrorContains(t, err, "not settable") + }) + t.Run("malleable-entity", func(t *testing.T) { + err := NewMalleabilityChecker().CheckEntity(&MalleableEntityStruct{ + Identities: IdentityListFixture(2).ToSkeleton(), + QC: QuorumCertificateFixture(), + Signature: SignatureFixture(), + }) + require.Error(t, err) + require.ErrorContains(t, err, "Signature is malleable") + }) + t.Run("struct-with-optional-field", func(t *testing.T) { + t.Run("without-optional-field", func(t *testing.T) { + err := NewMalleabilityChecker().CheckEntity(&StructWithOptionalField{ + Identifier: IdentifierFixture(), + RequiredField: 42, + OptionalField: nil, + }) + require.NoError(t, err) + }) + t.Run("with-optional-field", func(t *testing.T) { + v := &StructWithOptionalField{ + Identifier: IdentifierFixture(), + RequiredField: 42, + OptionalField: new(uint32), + } + *v.OptionalField = 13 + err := NewMalleabilityChecker().CheckEntity(v) + require.NoError(t, err) + }) + }) +} + +// EnterViewEvidence is a utility struct for testing the malleability checker when multiple levels of nested structs are involved. +type EnterViewEvidence struct { + QC *flow.QuorumCertificate + TC *flow.TimeoutCertificate +} + +// StructWithPinning is a struct specifically designed to test the pinning feature of the malleability checker. +type StructWithPinning struct { + Version uint32 + Evidence *EnterViewEvidence +} + +// ID returns the hash of the entity depending on the value of the Version field. +// Depending on the value of the Version field, the ID method includes or excludes the TC field in the hash calculation and requires it's nil or not in some cases. +// This is a contrived example to demonstrate the pinning feature of the malleability checker. +func (e *StructWithPinning) ID() flow.Identifier { + if e.Version == 1 { + if e.Evidence.TC != nil { + panic("TC should not be set for version 1") + } + return flow.MakeID(struct { + Version uint32 + QcID flow.Identifier + }{ + Version: e.Version, + QcID: e.Evidence.QC.ID(), + }) + } else if e.Version == 2 { + if e.Evidence.QC == nil || e.Evidence.TC == nil { + panic("QC and TC should be set for version 2") + } + return flow.MakeID(struct { + Version uint32 + QcID flow.Identifier + TcID flow.Identifier + }{ + Version: e.Version, + QcID: e.Evidence.QC.ID(), + TcID: e.Evidence.TC.ID(), + }) + } else { + panic("unsupported version") + } +} + +// TestMalleabilityChecker_PinField tests the behavior of MalleabilityChecker when pinning is required. +// This structure is implemented in a way that the ID method behaves differently depending on the value of the Version field. +// Depending on the value of the Version field, the ID method includes or excludes the TC field in the hash calculation and requires +// it's nil or not in some cases, this means we need to use pinning otherwise checker will generate random values for the fields. +func TestMalleabilityChecker_PinField(t *testing.T) { + t.Run("v1", func(t *testing.T) { + checker := NewMalleabilityChecker(WithPinnedField("Version"), WithPinnedField("Evidence.TC")) + err := checker.CheckEntity(&StructWithPinning{ + Version: 1, + Evidence: &EnterViewEvidence{ + QC: QuorumCertificateFixture(), + TC: nil, + }, + }) + require.NoError(t, err) + }) + t.Run("v2", func(t *testing.T) { + checker := NewMalleabilityChecker(WithPinnedField("Version")) + err := checker.CheckEntity(&StructWithPinning{ + Version: 2, + Evidence: &EnterViewEvidence{ + QC: QuorumCertificateFixture(), + TC: &flow.TimeoutCertificate{ + View: 0, + NewestQCViews: nil, + NewestQC: nil, + SignerIndices: nil, + SigData: nil, + }, + }, + }) + require.NoError(t, err) + }) +} + +// StructWithComplexType is a struct that contains a slice with complex type. +type StructWithComplexType struct { + Version uint32 + Evidences []*EnterViewEvidence +} + +func (e *StructWithComplexType) ID() flow.Identifier { + return flow.MakeID(e) +} + +// TestMalleabilityChecker_Generators tests the behavior of MalleabilityChecker when using field and type generators. +// In this test we actually ensure that checker uses the generator and the generated values are set on the entity that is being checked. +func TestMalleabilityChecker_Generators(t *testing.T) { + t.Run("no-generator", func(t *testing.T) { + original := &StructWithComplexType{ + Version: 0, + Evidences: nil, + } + cpy := clone.Clone(original) + RequireEntityNonMalleable(t, cpy) + require.NotEqual(t, original.Version, cpy.Version) + require.NotElementsMatch(t, original.Evidences, cpy.Evidences) + }) + t.Run("field-generator", func(t *testing.T) { + original := &StructWithComplexType{ + Version: 0, + Evidences: nil, + } + generated := []*EnterViewEvidence{ + { + QC: QuorumCertificateFixture(), + TC: nil, + }, + } + RequireEntityNonMalleable(t, original, WithFieldGenerator("Evidences", func() []*EnterViewEvidence { + return generated + })) + require.Equal(t, generated, original.Evidences) + }) + t.Run("type-generator", func(t *testing.T) { + generated := EnterViewEvidence{ + QC: QuorumCertificateFixture(), + TC: nil, + } + original := &StructWithComplexType{ + Version: 0, + Evidences: nil, + } + RequireEntityNonMalleable(t, original, WithTypeGenerator(func() EnterViewEvidence { + return generated + }), + ) + require.Equal(t, generated, *original.Evidences[0]) + }) +} + +// PartialHashStruct represents a model which includes a signature field attesting to the rest of the model. +// Hash returns a hash over PartialHashStruct excluding the Signature field, and the Signature would sign the Hash. +// ID returns a hash over the entire PartialHashStruct. +// PartialHashStruct is malleable with respect to the Hash method, but non-malleable with respect to the ID method. +// Although the Hash method is malleable, we still want to be able to verify that it is non-malleable with respect +// to all fields other than the Signature field. +type PartialHashStruct struct { + Data []byte + Signature crypto.Signature +} + +func (e *PartialHashStruct) Hash() flow.Identifier { + return flow.MakeID(struct { + Data []byte + }{ + Data: e.Data, + }) +} + +func (e *PartialHashStruct) ID() flow.Identifier { + return flow.MakeID(e) +} + +// TestMalleabilityChecker_PartialHash tests a partial hash malleability check. See PartialHashStruct for details. +func TestMalleabilityChecker_PartialHash(t *testing.T) { + model := &PartialHashStruct{ + Data: SeedFixture(32), + Signature: SignatureFixture(), + } + // the entity check passes + err := NewMalleabilityChecker().CheckEntity(model) + require.NoError(t, err) + // the default Hash check fails + err = NewMalleabilityChecker().Check(model, model.Hash) + require.Error(t, err) + require.ErrorContains(t, err, "Signature is malleable") + // the Hash check omitting the Signature field passes + err = NewMalleabilityChecker(WithPinnedField("Signature")).Check(model, model.Hash) + require.NoError(t, err) +} diff --git a/utils/unittest/epoch_builder.go b/utils/unittest/epoch_builder.go index 321522f582a..76ff23b4d46 100644 --- a/utils/unittest/epoch_builder.go +++ b/utils/unittest/epoch_builder.go @@ -10,6 +10,7 @@ import ( "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/model/flow/filter" "github.com/onflow/flow-go/state/protocol" + "github.com/onflow/flow-go/storage/deferred" ) // EpochHeights is a structure caching the results of building an epoch with @@ -70,27 +71,29 @@ func (epoch EpochHeights) CommittedRange() []uint64 { // EpochBuilder is a testing utility for building epochs into chain state. type EpochBuilder struct { - t *testing.T - states []protocol.FollowerState - blocksByID map[flow.Identifier]*flow.Block - blocks []*flow.Block - built map[uint64]*EpochHeights - setupOpts []func(*flow.EpochSetup) // options to apply to the EpochSetup event - commitOpts []func(*flow.EpochCommit) // options to apply to the EpochCommit event + t *testing.T + mutableProtocolState protocol.MutableProtocolState + states []protocol.FollowerState + blocksByID map[flow.Identifier]*flow.Block + blocks []*flow.Block + built map[uint64]*EpochHeights + setupOpts []func(*flow.EpochSetup) // options to apply to the EpochSetup event + commitOpts []func(*flow.EpochCommit) // options to apply to the EpochCommit event } // NewEpochBuilder returns a new EpochBuilder which will build epochs using the // given states. At least one state must be provided. If more than one are // provided they must have the same initial state. -func NewEpochBuilder(t *testing.T, states ...protocol.FollowerState) *EpochBuilder { +func NewEpochBuilder(t *testing.T, mutator protocol.MutableProtocolState, states ...protocol.FollowerState) *EpochBuilder { require.True(t, len(states) >= 1, "must provide at least one state") builder := &EpochBuilder{ - t: t, - states: states, - blocksByID: make(map[flow.Identifier]*flow.Block), - blocks: make([]*flow.Block, 0), - built: make(map[uint64]*EpochHeights), + t: t, + mutableProtocolState: mutator, + states: states, + blocksByID: make(map[flow.Identifier]*flow.Block), + blocks: make([]*flow.Block, 0), + built: make(map[uint64]*EpochHeights), } return builder } @@ -154,31 +157,30 @@ func (builder *EpochBuilder) EpochHeights(counter uint64) (*EpochHeights, bool) // about the heights of blocks in the BUILT epoch (epoch N). These can be // queried with EpochHeights. func (builder *EpochBuilder) BuildEpoch() *EpochBuilder { - state := builder.states[0] + finalSnap := state.Final() // prepare default values for the service events based on the current state - identities, err := state.Final().Identities(filter.Any) - require.Nil(builder.t, err) - epoch := state.Final().Epochs().Current() - counter, err := epoch.Counter() - require.Nil(builder.t, err) - finalView, err := epoch.FinalView() - require.Nil(builder.t, err) + identities, err := finalSnap.Identities(filter.Any) + require.NoError(builder.t, err) + epoch, err := finalSnap.Epochs().Current() + require.NoError(builder.t, err) + counter := epoch.Counter() + finalView := epoch.FinalView() // retrieve block A - A, err := state.Final().Head() - require.Nil(builder.t, err) + A, err := finalSnap.Head() + require.NoError(builder.t, err) // check that block A satisfies initial condition - phase, err := state.Final().Phase() - require.Nil(builder.t, err) + phase, err := finalSnap.EpochPhase() + require.NoError(builder.t, err) require.Equal(builder.t, flow.EpochPhaseStaking, phase) // Define receipts and seals for block B payload. They will be nil if A is // the root block var receiptA *flow.ExecutionReceipt - var prevReceipts []*flow.ExecutionReceiptMeta + var prevReceipts []*flow.ExecutionReceiptStub var prevResults []*flow.ExecutionResult var sealsForPrev []*flow.Seal @@ -187,8 +189,8 @@ func (builder *EpochBuilder) BuildEpoch() *EpochBuilder { // A is not the root block. B will contain a receipt for A, and a seal // for the receipt contained in A. receiptA = ReceiptForBlockFixture(aBlock) - prevReceipts = []*flow.ExecutionReceiptMeta{ - receiptA.Meta(), + prevReceipts = []*flow.ExecutionReceiptStub{ + receiptA.Stub(), } prevResults = []*flow.ExecutionResult{ &receiptA.ExecutionResult, @@ -201,7 +203,7 @@ func (builder *EpochBuilder) BuildEpoch() *EpochBuilder { // defaults for the EpochSetup event setupDefaults := []func(*flow.EpochSetup){ - WithParticipants(identities), + WithParticipants(identities.ToSkeleton()), SetupWithCounter(counter + 1), WithFirstView(finalView + 1), WithFinalView(finalView + 1_000_000), @@ -209,13 +211,14 @@ func (builder *EpochBuilder) BuildEpoch() *EpochBuilder { setup := EpochSetupFixture(append(setupDefaults, builder.setupOpts...)...) // build block B, sealing up to and including block A - B := BlockWithParentFixture(A) - B.SetPayload(flow.Payload{ - Receipts: prevReceipts, - Results: prevResults, - Seals: sealsForPrev, - }) - + B := BlockWithParentAndPayload( + A, + flow.Payload{ + Receipts: prevReceipts, + Results: prevResults, + Seals: sealsForPrev, + }, + ) builder.addBlock(B) // create a receipt for block B, to be included in block C @@ -225,33 +228,37 @@ func (builder *EpochBuilder) BuildEpoch() *EpochBuilder { // insert block C with a receipt for block B, and a seal for the receipt in // block B if there was one - C := BlockWithParentFixture(B.Header) var sealsForA []*flow.Seal if receiptA != nil { sealsForA = []*flow.Seal{ Seal.Fixture(Seal.WithResult(&receiptA.ExecutionResult)), } } - C.SetPayload(flow.Payload{ - Receipts: []*flow.ExecutionReceiptMeta{receiptB.Meta()}, - Results: []*flow.ExecutionResult{&receiptB.ExecutionResult}, - Seals: sealsForA, - }) + C := BlockWithParentAndPayload( + B.ToHeader(), + flow.Payload{ + Receipts: []*flow.ExecutionReceiptStub{receiptB.Stub()}, + Results: []*flow.ExecutionResult{&receiptB.ExecutionResult}, + Seals: sealsForA, + }, + ) builder.addBlock(C) // create a receipt for block C, to be included in block D receiptC := ReceiptForBlockFixture(C) // build block D // D contains a seal for block B and a receipt for block C - D := BlockWithParentFixture(C.Header) sealForB := Seal.Fixture( Seal.WithResult(&receiptB.ExecutionResult), ) - D.SetPayload(flow.Payload{ - Receipts: []*flow.ExecutionReceiptMeta{receiptC.Meta()}, - Results: []*flow.ExecutionResult{&receiptC.ExecutionResult}, - Seals: []*flow.Seal{sealForB}, - }) + D := BlockWithParentAndPayload( + C.ToHeader(), + flow.Payload{ + Receipts: []*flow.ExecutionReceiptStub{receiptC.Stub()}, + Results: []*flow.ExecutionResult{&receiptC.ExecutionResult}, + Seals: []*flow.Seal{sealForB}, + }, + ) builder.addBlock(D) // defaults for the EpochCommit event @@ -269,15 +276,17 @@ func (builder *EpochBuilder) BuildEpoch() *EpochBuilder { // build block E // E contains a seal for C and a receipt for D - E := BlockWithParentFixture(D.Header) sealForC := Seal.Fixture( Seal.WithResult(&receiptC.ExecutionResult), ) - E.SetPayload(flow.Payload{ - Receipts: []*flow.ExecutionReceiptMeta{receiptD.Meta()}, - Results: []*flow.ExecutionResult{&receiptD.ExecutionResult}, - Seals: []*flow.Seal{sealForC}, - }) + E := BlockWithParentAndPayload( + D.ToHeader(), + flow.Payload{ + Receipts: []*flow.ExecutionReceiptStub{receiptD.Stub()}, + Results: []*flow.ExecutionResult{&receiptD.ExecutionResult}, + Seals: []*flow.Seal{sealForC}, + }, + ) builder.addBlock(E) // create receipt for block E receiptE := ReceiptForBlockFixture(E) @@ -285,24 +294,26 @@ func (builder *EpochBuilder) BuildEpoch() *EpochBuilder { // build block F // F contains a seal for block D and the EpochCommit event, as well as a // receipt for block E - F := BlockWithParentFixture(E.Header) sealForD := Seal.Fixture( Seal.WithResult(&receiptD.ExecutionResult), ) - F.SetPayload(flow.Payload{ - Receipts: []*flow.ExecutionReceiptMeta{receiptE.Meta()}, - Results: []*flow.ExecutionResult{&receiptE.ExecutionResult}, - Seals: []*flow.Seal{sealForD}, - }) + F := BlockWithParentAndPayload( + E.ToHeader(), + flow.Payload{ + Receipts: []*flow.ExecutionReceiptStub{receiptE.Stub()}, + Results: []*flow.ExecutionResult{&receiptE.ExecutionResult}, + Seals: []*flow.Seal{sealForD}, + }, + ) builder.addBlock(F) // cache information about the built epoch builder.built[counter] = &EpochHeights{ Counter: counter, Staking: A.Height, - Setup: D.Header.Height, - Committed: F.Header.Height, - CommittedFinal: F.Header.Height, + Setup: D.Height, + Committed: F.Height, + CommittedFinal: F.Height, } return builder @@ -312,63 +323,58 @@ func (builder *EpochBuilder) BuildEpoch() *EpochBuilder { // epoch. We must be in the Committed phase to call CompleteEpoch. Once the epoch // has been capped off, we can build the next epoch with BuildEpoch. func (builder *EpochBuilder) CompleteEpoch() *EpochBuilder { - state := builder.states[0] + finalSnap := state.Final() - phase, err := state.Final().Phase() - require.Nil(builder.t, err) + phase, err := finalSnap.EpochPhase() + require.NoError(builder.t, err) require.Equal(builder.t, flow.EpochPhaseCommitted, phase) - finalView, err := state.Final().Epochs().Current().FinalView() - require.Nil(builder.t, err) + currentEpoch, err := finalSnap.Epochs().Current() + require.NoError(builder.t, err) + finalView := currentEpoch.FinalView() - final, err := state.Final().Head() - require.Nil(builder.t, err) + final, err := finalSnap.Head() + require.NoError(builder.t, err) finalBlock, ok := builder.blocksByID[final.ID()] require.True(builder.t, ok) // A is the first block of the next epoch (see diagram in BuildEpoch) - A := BlockWithParentFixture(final) - // first view is not necessarily exactly final view of previous epoch - A.Header.View = finalView + (rand.Uint64() % 4) + 1 finalReceipt := ReceiptForBlockFixture(finalBlock) - A.SetPayload(flow.Payload{ - Receipts: []*flow.ExecutionReceiptMeta{ - finalReceipt.Meta(), - }, - Results: []*flow.ExecutionResult{ - &finalReceipt.ExecutionResult, - }, - Seals: []*flow.Seal{ - Seal.Fixture( - Seal.WithResult(finalBlock.Payload.Results[0]), - ), - }, - }) + A := BlockWithParentAndPayload( + final, + flow.Payload{ + Receipts: []*flow.ExecutionReceiptStub{ + finalReceipt.Stub(), + }, + Results: []*flow.ExecutionResult{ + &finalReceipt.ExecutionResult, + }, + Seals: []*flow.Seal{ + Seal.Fixture( + Seal.WithResult(finalBlock.Payload.Results[0]), + ), + }, + }) + // first view is not necessarily exactly final view of previous epoch + A.View = finalView + (rand.Uint64() % 4) + 1 builder.addBlock(A) return builder } -// BuildBlocks builds empty blocks on top of the finalized state. It is used -// to build epochs that are not the minimum possible length, which is the -// default result from chaining BuildEpoch and CompleteEpoch. -func (builder *EpochBuilder) BuildBlocks(n uint) { - head, err := builder.states[0].Final().Head() - require.NoError(builder.t, err) - for i := uint(0); i < n; i++ { - next := BlockWithParentFixture(head) - builder.addBlock(next) - head = next.Header - } -} - // addBlock adds the given block to the state by: extending the state, -// finalizing the block, marking the block as valid, and caching the block. +// finalizing the block, and caching the block. func (builder *EpochBuilder) addBlock(block *flow.Block) { + dbUpdates := deferred.NewDeferredBlockPersist() + updatedStateId, err := builder.mutableProtocolState.EvolveState(dbUpdates, block.ParentID, block.View, block.Payload.Seals) + require.NoError(builder.t, err) + require.False(builder.t, dbUpdates.IsEmpty()) + + block.Payload.ProtocolStateID = updatedStateId blockID := block.ID() for _, state := range builder.states { - err := state.ExtendCertified(context.Background(), block, CertifyBlock(block.Header)) + err = state.ExtendCertified(context.Background(), NewCertifiedBlock(block)) require.NoError(builder.t, err) err = state.Finalize(context.Background(), blockID) @@ -392,7 +398,6 @@ func (builder *EpochBuilder) AddBlocksWithSeals(n int, counter uint64) *EpochBui receiptB := ReceiptForBlockFixture(b) - block := BlockWithParentFixture(b.Header) seal := Seal.Fixture( Seal.WithResult(b.Payload.Results[0]), ) @@ -401,13 +406,16 @@ func (builder *EpochBuilder) AddBlocksWithSeals(n int, counter uint64) *EpochBui WithReceipts(receiptB), WithSeals(seal), ) - block.SetPayload(payload) + block := BlockWithParentAndPayload( + b.ToHeader(), + payload, + ) builder.addBlock(block) // update cache information about the built epoch // we have extended the commit phase - builder.built[counter].CommittedFinal = block.Header.Height + builder.built[counter].CommittedFinal = block.Height } return builder diff --git a/utils/unittest/events.go b/utils/unittest/events.go new file mode 100644 index 00000000000..1f236470c1f --- /dev/null +++ b/utils/unittest/events.go @@ -0,0 +1,269 @@ +package unittest + +import ( + "encoding/hex" + "fmt" + "testing" + + "github.com/stretchr/testify/require" + + "github.com/onflow/cadence" + "github.com/onflow/cadence/common" + "github.com/onflow/cadence/encoding/ccf" + jsoncdc "github.com/onflow/cadence/encoding/json" + "github.com/onflow/cadence/stdlib" + "github.com/onflow/flow/protobuf/go/flow/entities" + + "github.com/onflow/flow-go/model/flow" +) + +// EventGeneratorOption configures an Events generator. +type EventGeneratorOption func(*Events) + +// Events generates mock Flow events with incremental count and optional encoding. +type Events struct { + count uint32 + ids *Identifiers + encoding entities.EventEncodingVersion +} + +// NewEventGenerator creates a new Events generator. +func NewEventGenerator(opts ...EventGeneratorOption) *Events { + g := &Events{ + count: 1, + ids: IdentifierGenerator(), + encoding: entities.EventEncodingVersion_CCF_V0, + } + + for _, opt := range opts { + opt(g) + } + + return g +} + +// New creates a new flow.Event. +func (e *Events) New(opts ...EventOption) flow.Event { + address, err := common.BytesToAddress(RandomAddressFixture().Bytes()) + if err != nil { + panic(fmt.Sprintf("unexpected error while creating random address: %s", err)) + } + + location := common.NewAddressLocation(nil, address, "TestContract") + identifier := fmt.Sprintf("TestContract.FooEvent%d", e.count) + typeID := location.TypeID(nil, identifier) + + event := &flow.Event{ + Type: flow.EventType(typeID), + TransactionID: e.ids.New(), + TransactionIndex: e.count, + EventIndex: e.count, + Payload: e.createNewEventPayload(location, identifier), + } + + for _, opt := range opts { + opt(event) + } + e.count++ + + return *event +} + +func (e *Events) createNewEventPayload(location common.AddressLocation, identifier string) []byte { + testEventType := cadence.NewEventType( + location, + identifier, + []cadence.Field{ + { + Identifier: "a", + Type: cadence.IntType, + }, + { + Identifier: "b", + Type: cadence.StringType, + }, + }, + nil, + ) + + randomString := cadence.String(hex.EncodeToString(RandomBytes(100))) + + testEvent := cadence.NewEvent( + []cadence.Value{ + cadence.NewInt(int(e.count)), + randomString, + }).WithType(testEventType) + + var payload []byte + var err error + switch e.encoding { + case entities.EventEncodingVersion_CCF_V0: + payload, err = ccf.Encode(testEvent) + if err != nil { + panic(fmt.Sprintf("unexpected error while ccf encoding events: %s", err)) + } + case entities.EventEncodingVersion_JSON_CDC_V0: + payload, err = jsoncdc.Encode(testEvent) + if err != nil { + panic(fmt.Sprintf("unexpected error while json encoding events: %s", err)) + } + } + + return payload +} + +var EventGenerator eventGeneratorFactory + +type eventGeneratorFactory struct{} + +// WithEncoding sets event encoding (CCF or JSON). +func (e *eventGeneratorFactory) WithEncoding(encoding entities.EventEncodingVersion) EventGeneratorOption { + return func(g *Events) { + g.encoding = encoding + } +} + +// GetEventsWithEncoding generates a specified number of events with a given encoding version. +func (e *eventGeneratorFactory) GetEventsWithEncoding(n int, version entities.EventEncodingVersion) []flow.Event { + eventGenerator := NewEventGenerator(EventGenerator.WithEncoding(version)) + events := make([]flow.Event, 0, n) + for i := 0; i < n; i++ { + events = append(events, eventGenerator.New()) + } + return events +} + +// GenerateAccountCreateEvent returns a mock account creation event. +func (e *eventGeneratorFactory) GenerateAccountCreateEvent(t *testing.T, address flow.Address) flow.Event { + cadenceEvent := cadence.NewEvent( + []cadence.Value{ + cadence.NewAddress(address), + }). + WithType(cadence.NewEventType( + stdlib.FlowLocation{}, + "AccountCreated", + []cadence.Field{ + { + Identifier: "address", + Type: cadence.AddressType, + }, + }, + nil, + )) + + payload, err := ccf.Encode(cadenceEvent) + require.NoError(t, err) + + return flow.Event{ + Type: flow.EventType(cadenceEvent.EventType.Location.TypeID(nil, cadenceEvent.EventType.QualifiedIdentifier)), + TransactionID: IdentifierFixture(), + TransactionIndex: 0, + EventIndex: 0, + Payload: payload, + } +} + +// GenerateAccountContractEvent returns a mock account contract event. +func (e *eventGeneratorFactory) GenerateAccountContractEvent(t *testing.T, qualifiedIdentifier string, address flow.Address) flow.Event { + contractName, err := cadence.NewString("EventContract") + require.NoError(t, err) + + cadenceEvent := cadence.NewEvent( + []cadence.Value{ + cadence.NewAddress(address), + cadence.NewArray( + BytesToCdcUInt8(RandomBytes(32)), + ).WithType(cadence.NewConstantSizedArrayType(32, cadence.UInt8Type)), + contractName, + }). + WithType(cadence.NewEventType( + stdlib.FlowLocation{}, + qualifiedIdentifier, + []cadence.Field{ + { + Identifier: "address", + Type: cadence.AddressType, + }, + { + Identifier: "codeHash", + Type: cadence.NewConstantSizedArrayType(32, cadence.UInt8Type), + }, + { + Identifier: "contract", + Type: cadence.StringType, + }, + }, + nil, + )) + + payload, err := ccf.Encode(cadenceEvent) + require.NoError(t, err) + + return flow.Event{ + Type: flow.EventType(cadenceEvent.EventType.Location.TypeID(nil, cadenceEvent.EventType.QualifiedIdentifier)), + TransactionID: IdentifierFixture(), + TransactionIndex: 0, + EventIndex: 0, + Payload: payload, + } +} + +var Event eventFactory + +type eventFactory struct{} + +// EventOption configures a flow.Event fields. +type EventOption func(*flow.Event) + +func (f *eventFactory) WithEventType(eventType flow.EventType) EventOption { + return func(e *flow.Event) { + e.Type = eventType + } +} + +func (f *eventFactory) WithTransactionIndex(transactionIndex uint32) EventOption { + return func(e *flow.Event) { + e.TransactionIndex = transactionIndex + } +} + +func (f *eventFactory) WithEventIndex(eventIndex uint32) EventOption { + return func(e *flow.Event) { + e.EventIndex = eventIndex + } +} + +func (f *eventFactory) WithTransactionID(txID flow.Identifier) EventOption { + return func(e *flow.Event) { + e.TransactionID = txID + } +} + +func (f *eventFactory) WithPayload(payload []byte) EventOption { + return func(e *flow.Event) { + e.Payload = payload + } +} + +// EventFixture returns a single event +func EventFixture( + opts ...EventOption, +) flow.Event { + g := NewEventGenerator(EventGenerator.WithEncoding(entities.EventEncodingVersion_CCF_V0)) + return g.New(opts...) +} + +func EventsFixture( + n int, +) []flow.Event { + events := make([]flow.Event, n) + g := NewEventGenerator(EventGenerator.WithEncoding(entities.EventEncodingVersion_CCF_V0)) + for i := 0; i < n; i++ { + events[i] = g.New( + Event.WithTransactionIndex(0), + Event.WithEventIndex(uint32(i)), + ) + } + + return events +} diff --git a/utils/unittest/execution_state.go b/utils/unittest/execution_state.go index 048ac1e1d94..3a82fedaf46 100644 --- a/utils/unittest/execution_state.go +++ b/utils/unittest/execution_state.go @@ -4,10 +4,9 @@ import ( "encoding/hex" "fmt" - "github.com/onflow/flow-go/crypto" - "github.com/onflow/flow-go/crypto/hash" - "github.com/onflow/cadence" + "github.com/onflow/crypto" + "github.com/onflow/crypto/hash" "github.com/onflow/flow-go/model/flow" ) @@ -24,7 +23,7 @@ const ServiceAccountPrivateKeySignAlgo = crypto.ECDSAP256 const ServiceAccountPrivateKeyHashAlgo = hash.SHA2_256 // Pre-calculated state commitment with root account with the above private key -const GenesisStateCommitmentHex = "1fa4f0ccd3b991627d2c95a7aca3294fbe7407f82711b55f6863dc03c970ce08" +const GenesisStateCommitmentHex = "b8bb1b56c944902e5e0877735e2e08779a3a6385c714169473ea723a47caba83" var GenesisStateCommitment flow.StateCommitment @@ -68,3 +67,30 @@ func init() { // fvm.AccountKeyWeightThreshold here ServiceAccountPublicKey = ServiceAccountPrivateKey.PublicKey(1000) } + +// this is done by printing the state commitment in TestBootstrapLedger test with different chain ID +func GenesisStateCommitmentByChainID(chainID flow.ChainID) flow.StateCommitment { + commitString := genesisCommitHexByChainID(chainID) + bytes, err := hex.DecodeString(commitString) + if err != nil { + panic("error while hex decoding hardcoded state commitment") + } + commit, err := flow.ToStateCommitment(bytes) + if err != nil { + panic("genesis state commitment size is invalid") + } + return commit +} + +func genesisCommitHexByChainID(chainID flow.ChainID) string { + if chainID == flow.Mainnet { + return GenesisStateCommitmentHex + } + if chainID == flow.Testnet { + return "cff935133d5591fb4984d82f748b99475cd6e135f40a25e2f5084035935aac7f" + } + if chainID == flow.Sandboxnet { + return "e1c08b17f9e5896f03fe28dd37ca396c19b26628161506924fbf785834646ea1" + } + return "713eab04e1dea71710e79dc53ba94b1e8cbc63e7c03a9a75f24acd4a28c6a655" +} diff --git a/utils/unittest/fixtures.go b/utils/unittest/fixtures.go index 65111cb6c37..d944d454fb6 100644 --- a/utils/unittest/fixtures.go +++ b/utils/unittest/fixtures.go @@ -9,19 +9,20 @@ import ( "testing" "time" - blocks "github.com/ipfs/go-block-format" "github.com/ipfs/go-cid" + pubsub "github.com/libp2p/go-libp2p-pubsub" + pubsub_pb "github.com/libp2p/go-libp2p-pubsub/pb" "github.com/libp2p/go-libp2p/core/peer" - "github.com/stretchr/testify/require" - "github.com/onflow/cadence" + "github.com/onflow/crypto" + "github.com/onflow/crypto/hash" + "github.com/stretchr/testify/require" sdk "github.com/onflow/flow-go-sdk" hotstuff "github.com/onflow/flow-go/consensus/hotstuff/model" - "github.com/onflow/flow-go/crypto" - "github.com/onflow/flow-go/crypto/hash" "github.com/onflow/flow-go/engine" + "github.com/onflow/flow-go/engine/access/rest/util" "github.com/onflow/flow-go/fvm/storage/snapshot" "github.com/onflow/flow-go/ledger" "github.com/onflow/flow-go/ledger/common/bitutils" @@ -33,7 +34,7 @@ import ( "github.com/onflow/flow-go/model/encoding" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/model/flow/filter" - "github.com/onflow/flow-go/model/flow/order" + "github.com/onflow/flow-go/model/flow/mapfunc" "github.com/onflow/flow-go/model/messages" "github.com/onflow/flow-go/model/verification" "github.com/onflow/flow-go/module" @@ -41,17 +42,19 @@ import ( "github.com/onflow/flow-go/module/mempool/entity" "github.com/onflow/flow-go/module/signature" "github.com/onflow/flow-go/module/updatable_configs" - "github.com/onflow/flow-go/network" "github.com/onflow/flow-go/network/channels" + "github.com/onflow/flow-go/network/message" + p2pconfig "github.com/onflow/flow-go/network/p2p/config" "github.com/onflow/flow-go/network/p2p/keyutils" "github.com/onflow/flow-go/state/protocol" "github.com/onflow/flow-go/state/protocol/inmem" + "github.com/onflow/flow-go/state/protocol/protocol_state" + "github.com/onflow/flow-go/state/protocol/protocol_state/kvstore" "github.com/onflow/flow-go/utils/dsl" ) const ( - DefaultSeedFixtureLength = 64 - DefaultAddress = "localhost:0" + DefaultAddress = "localhost:0" ) // returns a deterministic math/rand PRG that can be used for deterministic randomness in tests only. @@ -72,9 +75,13 @@ func AddressFixture() flow.Address { } func RandomAddressFixture() flow.Address { + return RandomAddressFixtureForChain(flow.Testnet) +} + +func RandomAddressFixtureForChain(chainID flow.ChainID) flow.Address { // we use a 32-bit index - since the linear address generator uses 45 bits, // this won't error - addr, err := flow.Testnet.Chain().AddressAtIndex(uint64(rand.Uint32())) + addr, err := chainID.Chain().AddressAtIndex(uint64(rand.Uint32())) if err != nil { panic(err) } @@ -114,10 +121,11 @@ func InvalidFormatSignature() flow.TransactionSignature { func TransactionSignatureFixture() flow.TransactionSignature { sigLen := crypto.SignatureLenECDSAP256 s := flow.TransactionSignature{ - Address: AddressFixture(), - SignerIndex: 0, - Signature: SeedFixture(sigLen), - KeyIndex: 1, + Address: AddressFixture(), + SignerIndex: 0, + Signature: SeedFixture(sigLen), + KeyIndex: 1, + ExtensionData: []byte{}, } // make sure the ECDSA signature passes the format check s.Signature[sigLen/2] = 0 @@ -184,53 +192,117 @@ func AccountFixture() (*flow.Account, error) { }, nil } -func BlockFixture() flow.Block { - header := BlockHeaderFixture() - return *BlockWithParentFixture(header) +func ChainBlockFixtureWithRoot(root *flow.Header, n int) []*flow.Block { + bs := make([]*flow.Block, 0, n) + parent := root + for i := 0; i < n; i++ { + b := BlockWithParentFixture(parent) + bs = append(bs, b) + parent = b.ToHeader() + } + return bs } -func FullBlockFixture() flow.Block { - block := BlockFixture() - payload := block.Payload - payload.Seals = Seal.Fixtures(10) - payload.Results = []*flow.ExecutionResult{ - ExecutionResultFixture(), - ExecutionResultFixture(), - } - payload.Receipts = []*flow.ExecutionReceiptMeta{ - ExecutionReceiptFixture(WithResult(payload.Results[0])).Meta(), - ExecutionReceiptFixture(WithResult(payload.Results[1])).Meta(), +func RechainBlocks(blocks []*flow.Block) { + if len(blocks) == 0 { + return } - header := block.Header - header.PayloadHash = payload.Hash() + parent := blocks[0] + + for _, block := range blocks[1:] { + block.ParentID = parent.ID() + parent = block + } +} - return flow.Block{ - Header: header, - Payload: payload, +func FullBlockFixture() *flow.Block { + b := BlockFixture() + return &flow.Block{ + HeaderBody: b.HeaderBody, + Payload: PayloadFixture(WithAllTheFixins), } } func BlockFixtures(number int) []*flow.Block { blocks := make([]*flow.Block, 0, number) - for ; number > 0; number-- { + for range number { block := BlockFixture() - blocks = append(blocks, &block) + blocks = append(blocks, block) } return blocks } -func ProposalFixture() *messages.BlockProposal { - block := BlockFixture() - return ProposalFromBlock(&block) +func ProposalFixtures(number int) []*flow.Proposal { + proposals := make([]*flow.Proposal, 0, number) + for range number { + proposal := ProposalFixture() + proposals = append(proposals, proposal) + } + return proposals +} + +func ProposalFixture() *flow.Proposal { + return ProposalFromBlock(BlockFixture()) } -func ProposalFromBlock(block *flow.Block) *messages.BlockProposal { - return messages.NewBlockProposal(block) +func BlockResponseFixture(count int) *flow.BlockResponse { + blocks := make([]flow.Proposal, count) + for i := 0; i < count; i++ { + blocks[i] = *ProposalFixture() + } + return &flow.BlockResponse{ + Nonce: rand.Uint64(), + Blocks: blocks, + } } -func ClusterProposalFromBlock(block *cluster.Block) *messages.ClusterBlockProposal { - return messages.NewClusterBlockProposal(block) +func ClusterProposalFixture() *cluster.Proposal { + return ClusterProposalFromBlock(ClusterBlockFixture()) +} + +func ClusterBlockResponseFixture(count int) *cluster.BlockResponse { + blocks := make([]cluster.Proposal, count) + for i := 0; i < count; i++ { + blocks[i] = *ClusterProposalFixture() + } + return &cluster.BlockResponse{ + Nonce: rand.Uint64(), + Blocks: blocks, + } +} + +func ProposalHeaderFromHeader(header *flow.Header) *flow.ProposalHeader { + return &flow.ProposalHeader{ + Header: header, + ProposerSigData: SignatureFixture(), + } +} + +func ProposalFromBlock(block *flow.Block) *flow.Proposal { + return &flow.Proposal{ + Block: *block, + ProposerSigData: SignatureFixture(), + } +} + +func ClusterProposalFromBlock(block *cluster.Block) *cluster.Proposal { + return &cluster.Proposal{ + Block: *block, + ProposerSigData: SignatureFixture(), + } +} + +func BlockchainFixture(length int) []*flow.Block { + blocks := make([]*flow.Block, length) + + genesis := BlockFixture() + blocks[0] = genesis + for i := 1; i < length; i++ { + blocks[i] = BlockWithParentFixture(blocks[i-1].ToHeader()) + } + + return blocks } // AsSlashable returns the input message T, wrapped as a flow.Slashable instance with a random origin ID. @@ -242,14 +314,18 @@ func AsSlashable[T any](msg T) flow.Slashable[T] { return slashable } -func ReceiptAndSealForBlock(block *flow.Block) (*flow.ExecutionReceipt, *flow.Seal) { +// ReceiptAndSealForBlock returns a receipt with service events and a seal for them for a given block. +func ReceiptAndSealForBlock(block *flow.Block, serviceEvents ...flow.ServiceEvent) (*flow.ExecutionReceipt, *flow.Seal) { receipt := ReceiptForBlockFixture(block) - seal := Seal.Fixture(Seal.WithBlock(block.Header), Seal.WithResult(&receipt.ExecutionResult)) + receipt.ExecutionResult.ServiceEvents = serviceEvents + seal := Seal.Fixture(Seal.WithBlock(block.ToHeader()), Seal.WithResult(&receipt.ExecutionResult)) return receipt, seal } func PayloadFixture(options ...func(*flow.Payload)) flow.Payload { - payload := flow.EmptyPayload() + payload := flow.Payload{ + ProtocolStateID: IdentifierFixture(), + } for _, option := range options { option(&payload) } @@ -264,10 +340,14 @@ func WithAllTheFixins(payload *flow.Payload) { payload.Seals = Seal.Fixtures(3) payload.Guarantees = CollectionGuaranteesFixture(4) for i := 0; i < 10; i++ { - receipt := ExecutionReceiptFixture() - payload.Receipts = flow.ExecutionReceiptMetaList{receipt.Meta()} - payload.Results = flow.ExecutionResultList{&receipt.ExecutionResult} + receipt := ExecutionReceiptFixture( + WithResult(ExecutionResultFixture(WithServiceEvents(3))), + WithSpocks(SignaturesFixture(3)), + ) + payload.Receipts = append(payload.Receipts, receipt.Stub()) + payload.Results = append(payload.Results, &receipt.ExecutionResult) } + payload.ProtocolStateID = IdentifierFixture() } func WithSeals(seals ...*flow.Seal) func(*flow.Payload) { @@ -285,17 +365,23 @@ func WithGuarantees(guarantees ...*flow.CollectionGuarantee) func(*flow.Payload) func WithReceipts(receipts ...*flow.ExecutionReceipt) func(*flow.Payload) { return func(payload *flow.Payload) { for _, receipt := range receipts { - payload.Receipts = append(payload.Receipts, receipt.Meta()) + payload.Receipts = append(payload.Receipts, receipt.Stub()) payload.Results = append(payload.Results, &receipt.ExecutionResult) } } } +func WithProtocolStateID(stateID flow.Identifier) func(payload *flow.Payload) { + return func(payload *flow.Payload) { + payload.ProtocolStateID = stateID + } +} + // WithReceiptsAndNoResults will add receipt to payload only func WithReceiptsAndNoResults(receipts ...*flow.ExecutionReceipt) func(*flow.Payload) { return func(payload *flow.Payload) { for _, receipt := range receipts { - payload.Receipts = append(payload.Receipts, receipt.Meta()) + payload.Receipts = append(payload.Receipts, receipt.Stub()) } } } @@ -310,22 +396,109 @@ func WithExecutionResults(results ...*flow.ExecutionResult) func(*flow.Payload) } func BlockWithParentFixture(parent *flow.Header) *flow.Block { - payload := PayloadFixture() - header := BlockHeaderWithParentFixture(parent) - header.PayloadHash = payload.Hash() + return BlockWithParentAndPayload(parent, PayloadFixture()) +} + +// BlockWithParentAndPayload creates a new block that is valid +// with respect to the given parent block and with given payload. +func BlockWithParentAndPayload(parent *flow.Header, payload flow.Payload) *flow.Block { + return &flow.Block{ + HeaderBody: HeaderBodyWithParentFixture(parent), + Payload: payload, + } +} + +// BlockWithParentAndUniqueView creates a child block of the given parent. +// We provide a set of views that are _not_ allowed to be used for the new block. A typical usage +// scenario is to create blocks of different forks, without accidentally creating two blocks with +// the same view. +// CAUTION: +// - modifies the set `forbiddenViews` by adding the view of the newly created block. +// - To generate the child's view, we randomly select a small increment and add it to the +// parent's view. If the set of views covers all possible increments, this function will panic +func BlockWithParentAndUniqueView(parent *flow.Header, forbiddenViews map[uint64]struct{}) *flow.Block { + var block *flow.Block + counter := 0 + for { + block = BlockWithParentFixture(parent) + if _, hasForbiddenView := forbiddenViews[block.View]; !hasForbiddenView { + break + } + counter += 1 + if counter > 20 { + panic(fmt.Sprintf("BlockWithParentAndUniqueView failed to generate child despite %d attempts", counter)) + } + } + // block has a view that is not forbidden: + forbiddenViews[block.View] = struct{}{} // add the block's view to `forbiddenViews` to prevent future re-usage + return block +} + +// BlockWithParentAndPayloadAndUniqueView creates a child block of the given parent, where the block +// to be constructed will contain the iven payload. +// We provide a set of views that are _not_ allowed to be used for the new block. A typical usage +// scenario is to create blocks of different forks, without accidentally creating two blocks with +// the same view. +// CAUTION: +// - modifies the set `forbiddenViews` by adding the view of the newly created block. +// - To generate the child's view, we randomly select a small increment and add it to the +// parent's view. If the set of views covers all possible increments, this function will panic +func BlockWithParentAndPayloadAndUniqueView(parent *flow.Header, payload flow.Payload, forbiddenViews map[uint64]struct{}) *flow.Block { + var block *flow.Block + counter := 0 + for { + block = BlockWithParentAndPayload(parent, payload) + if _, hasForbiddenView := forbiddenViews[block.View]; !hasForbiddenView { + break + } + counter += 1 + if counter > 20 { + panic(fmt.Sprintf("BlockWithParentAndUniqueView failed to generate child despite %d attempts", counter)) + } + } + // block has a view that is not forbidden: + forbiddenViews[block.View] = struct{}{} // add the block's view to `forbiddenViews` to prevent future re-usage + return block +} + +func BlockWithParentProtocolState(parent *flow.Block) *flow.Block { return &flow.Block{ - Header: header, - Payload: &payload, + HeaderBody: HeaderBodyWithParentFixture(parent.ToHeader()), + Payload: PayloadFixture(WithProtocolStateID(parent.Payload.ProtocolStateID)), + } +} + +// BlockWithParentProtocolStateAndUniqueView creates a child block of the given parent, such that +// the child's protocol state is the same as the parent's. +// We provide a set of views that are _not_ allowed to be used for the new block. A typical usage +// scenario is to create blocks of different forks, without accidentally creating two blocks with +// the same view. +// CAUTION: +// - modifies the set `forbiddenViews` by adding the view of the newly created block. +// - To generate the child's view, we randomly select a small increment and add it to the +// parent's view. If the set of views covers all possible increments, this function will panic +func BlockWithParentProtocolStateAndUniqueView(parent *flow.Block, forbiddenViews map[uint64]struct{}) *flow.Block { + var block *flow.Block + counter := 0 + for { + block = BlockWithParentProtocolState(parent) + if _, hasForbiddenView := forbiddenViews[block.View]; !hasForbiddenView { + break + } + counter += 1 + if counter > 20 { + panic(fmt.Sprintf("BlockWithParentProtocolStateAndUniqueView failed to generate child despite %d attempts", counter)) + } } + // block has a view that is not forbidden: + forbiddenViews[block.View] = struct{}{} // add the block's view to `forbiddenViews` to prevent future re-usage + return block } func BlockWithGuaranteesFixture(guarantees []*flow.CollectionGuarantee) *flow.Block { - payload := PayloadFixture(WithGuarantees(guarantees...)) - header := BlockHeaderFixture() - header.PayloadHash = payload.Hash() return &flow.Block{ - Header: header, - Payload: &payload, + HeaderBody: HeaderBodyFixture(), + Payload: PayloadFixture(WithGuarantees(guarantees...)), } } @@ -342,28 +515,25 @@ func BlockWithParentAndProposerFixture( t *testing.T, parent *flow.Header, proposer flow.Identifier, -) flow.Block { +) *flow.Block { block := BlockWithParentFixture(parent) indices, err := signature.EncodeSignersToIndices( []flow.Identifier{proposer}, []flow.Identifier{proposer}) require.NoError(t, err) - block.Header.ProposerID = proposer - block.Header.ParentVoterIndices = indices - if block.Header.LastViewTC != nil { - block.Header.LastViewTC.SignerIndices = indices - block.Header.LastViewTC.NewestQC.SignerIndices = indices + block.ProposerID = proposer + block.ParentVoterIndices = indices + if block.LastViewTC != nil { + block.LastViewTC.SignerIndices = indices + block.LastViewTC.NewestQC.SignerIndices = indices } - return *block + return block } func BlockWithParentAndSeals(parent *flow.Header, seals []*flow.Header) *flow.Block { - block := BlockWithParentFixture(parent) - payload := flow.Payload{ - Guarantees: nil, - } + payload := flow.Payload{} if len(seals) > 0 { payload.Seals = make([]*flow.Seal, len(seals)) @@ -373,17 +543,10 @@ func BlockWithParentAndSeals(parent *flow.Header, seals []*flow.Header) *flow.Bl ) } } - - block.SetPayload(payload) - return block -} - -func GenesisFixture() *flow.Block { - genesis := flow.Genesis(flow.Emulator) - return genesis + return BlockWithParentAndPayload(parent, payload) } -func WithHeaderHeight(height uint64) func(header *flow.Header) { +func WithHeaderHeight(height uint64) func(*flow.Header) { return func(header *flow.Header) { header.Height = height } @@ -395,14 +558,16 @@ func HeaderWithView(view uint64) func(*flow.Header) { } } -func BlockHeaderFixture(opts ...func(header *flow.Header)) *flow.Header { +func HeaderBodyFixture(opts ...func(header flow.HeaderBody)) flow.HeaderBody { height := 1 + uint64(rand.Uint32()) // avoiding edge case of height = 0 (genesis block) view := height + uint64(rand.Intn(1000)) - header := BlockHeaderWithParentFixture(&flow.Header{ - ChainID: flow.Emulator, - ParentID: IdentifierFixture(), - Height: height, - View: view, + header := HeaderBodyWithParentFixture(&flow.Header{ + HeaderBody: flow.HeaderBody{ + ChainID: flow.Emulator, + ParentID: IdentifierFixture(), + Height: height, + View: view, + }, }) for _, opt := range opts { @@ -412,10 +577,26 @@ func BlockHeaderFixture(opts ...func(header *flow.Header)) *flow.Header { return header } -func CidFixture() cid.Cid { - data := make([]byte, 1024) - _, _ = rand.Read(data) - return blocks.NewBlock(data).Cid() +func BlockHeaderFixture(opts ...func(header *flow.Header)) *flow.Header { + height := 1 + uint64(rand.Uint32()) // avoiding edge case of height = 0 (genesis block) + view := height + uint64(rand.Intn(1000)) + header := BlockHeaderWithParentFixture(&flow.Header{ + HeaderBody: flow.HeaderBody{ + ChainID: flow.Emulator, + ParentID: IdentifierFixture(), + Height: height, + View: view, + ParentVoterIndices: SignerIndicesFixture(4), + ParentVoterSigData: QCSigDataFixture(), + ProposerID: IdentifierFixture(), + }, + }) + + for _, opt := range opts { + opt(header) + } + + return header } func BlockHeaderFixtureOnChain( @@ -425,10 +606,12 @@ func BlockHeaderFixtureOnChain( height := 1 + uint64(rand.Uint32()) // avoiding edge case of height = 0 (genesis block) view := height + uint64(rand.Intn(1000)) header := BlockHeaderWithParentFixture(&flow.Header{ - ChainID: chainID, - ParentID: IdentifierFixture(), - Height: height, - View: view, + HeaderBody: flow.HeaderBody{ + ChainID: chainID, + ParentID: IdentifierFixture(), + Height: height, + View: view, + }, }) for _, opt := range opts { @@ -439,6 +622,13 @@ func BlockHeaderFixtureOnChain( } func BlockHeaderWithParentFixture(parent *flow.Header) *flow.Header { + return &flow.Header{ + HeaderBody: HeaderBodyWithParentFixture(parent), + PayloadHash: IdentifierFixture(), + } +} + +func HeaderBodyWithParentFixture(parent *flow.Header) flow.HeaderBody { height := parent.Height + 1 view := parent.View + 1 + uint64(rand.Intn(10)) // Intn returns [0, n) var lastViewTC *flow.TimeoutCertificate @@ -454,65 +644,55 @@ func BlockHeaderWithParentFixture(parent *flow.Header) *flow.Header { SigData: SignatureFixture(), } } - return &flow.Header{ + return flow.HeaderBody{ ChainID: parent.ChainID, ParentID: parent.ID(), Height: height, - PayloadHash: IdentifierFixture(), - Timestamp: time.Now().UTC(), + Timestamp: uint64(time.Now().UnixMilli()), View: view, ParentView: parent.View, ParentVoterIndices: SignerIndicesFixture(4), ParentVoterSigData: QCSigDataFixture(), ProposerID: IdentifierFixture(), - ProposerSigData: SignatureFixture(), LastViewTC: lastViewTC, } } -func ClusterPayloadFixture(n int) *cluster.Payload { - transactions := make([]*flow.TransactionBody, n) - for i := 0; i < n; i++ { - tx := TransactionBodyFixture() - transactions[i] = &tx - } - payload := cluster.PayloadFromTransactions(flow.ZeroID, transactions...) - return &payload +func BlockHeaderWithHeight(height uint64) *flow.Header { + return BlockHeaderFixture(WithHeaderHeight(height)) } -func ClusterBlockFixture() cluster.Block { - - payload := ClusterPayloadFixture(3) - header := BlockHeaderFixture() - header.PayloadHash = payload.Hash() +func BlockHeaderWithParentWithSoRFixture(parent *flow.Header, source []byte) *flow.Header { + headerBody := HeaderBodyWithParentFixture(parent) + headerBody.ParentVoterSigData = QCSigDataWithSoRFixture(source) - return cluster.Block{ - Header: header, - Payload: payload, + return &flow.Header{ + HeaderBody: headerBody, + PayloadHash: IdentifierFixture(), } } -// ClusterBlockWithParent creates a new cluster consensus block that is valid -// with respect to the given parent block. -func ClusterBlockWithParent(parent *cluster.Block) cluster.Block { +func ClusterPayloadFixture(transactionsCount int) *cluster.Payload { + return &cluster.Payload{ + ReferenceBlockID: IdentifierFixture(), + Collection: CollectionFixture(transactionsCount), + } +} - payload := ClusterPayloadFixture(3) +func ClusterBlockFixtures(n int) []*cluster.Block { + clusterBlocks := make([]*cluster.Block, 0, n) - header := BlockHeaderFixture() - header.Height = parent.Header.Height + 1 - header.View = parent.Header.View + 1 - header.ChainID = parent.Header.ChainID - header.Timestamp = time.Now() - header.ParentID = parent.ID() - header.ParentView = parent.Header.View - header.PayloadHash = payload.Hash() + parent := ClusterBlockFixture() - block := cluster.Block{ - Header: header, - Payload: payload, + for i := 0; i < n; i++ { + block := ClusterBlockFixture( + ClusterBlock.WithParent(parent), + ) + clusterBlocks = append(clusterBlocks, block) + parent = block } - return block + return clusterBlocks } func WithCollRef(refID flow.Identifier) func(*flow.CollectionGuarantee) { @@ -527,11 +707,22 @@ func WithCollection(collection *flow.Collection) func(guarantee *flow.Collection } } +func AddCollectionsToBlock(block *flow.Block, collections []*flow.Collection) { + gs := make([]*flow.CollectionGuarantee, 0, len(collections)) + for _, collection := range collections { + gs = append(gs, &flow.CollectionGuarantee{CollectionID: collection.ID()}) + } + + block.Payload.Guarantees = gs +} + func CollectionGuaranteeFixture(options ...func(*flow.CollectionGuarantee)) *flow.CollectionGuarantee { guarantee := &flow.CollectionGuarantee{ - CollectionID: IdentifierFixture(), - SignerIndices: RandomBytes(16), - Signature: SignatureFixture(), + CollectionID: IdentifierFixture(), + ReferenceBlockID: IdentifierFixture(), + ClusterChainID: flow.ChainID("cluster-1-00000000"), + SignerIndices: RandomBytes(16), + Signature: SignatureFixture(), } for _, option := range options { option(guarantee) @@ -581,10 +772,9 @@ func CollectionListFixture(n int, options ...func(*flow.Collection)) []*flow.Col func CollectionFixture(n int, options ...func(*flow.Collection)) flow.Collection { transactions := make([]*flow.TransactionBody, 0, n) - for i := 0; i < n; i++ { tx := TransactionFixture() - transactions = append(transactions, &tx.TransactionBody) + transactions = append(transactions, &tx) } col := flow.Collection{Transactions: transactions} @@ -609,7 +799,9 @@ func CompleteCollectionFixture() *entity.CompleteCollection { ReferenceBlockID: FixedReferenceBlockID(), SignerIndices: SignerIndicesFixture(1), }, - Transactions: []*flow.TransactionBody{&txBody}, + Collection: &flow.Collection{ + Transactions: []*flow.TransactionBody{&txBody}, + }, } } @@ -621,7 +813,9 @@ func CompleteCollectionFromTransactions(txs []*flow.TransactionBody) *entity.Com ReferenceBlockID: IdentifierFixture(), SignerIndices: SignerIndicesFixture(3), }, - Transactions: txs, + Collection: &flow.Collection{ + Transactions: txs, + }, } } @@ -629,7 +823,6 @@ func ExecutableBlockFixture( collectionsSignerIDs [][]flow.Identifier, startState *flow.StateCommitment, ) *entity.ExecutableBlock { - header := BlockHeaderFixture() return ExecutableBlockFixtureWithParent(collectionsSignerIDs, header, startState) } @@ -650,8 +843,6 @@ func ExecutableBlockFixtureWithParent( completeCollections[completeCollection.Guarantee.CollectionID] = completeCollection } - block.Header.PayloadHash = block.Payload.Hash() - executableBlock := &entity.ExecutableBlock{ Block: block, CompleteCollections: completeCollections, @@ -676,14 +867,12 @@ func ExecutableBlockFromTransactions( completeCollections[cc.Guarantee.CollectionID] = cc } - block.Header.PayloadHash = block.Payload.Hash() - executableBlock := &entity.ExecutableBlock{ Block: &block, CompleteCollections: completeCollections, } // Preload the id - executableBlock.ID() + executableBlock.BlockID() return executableBlock } @@ -699,12 +888,30 @@ func WithResult(result *flow.ExecutionResult) func(*flow.ExecutionReceipt) { } } +func WithSpocks(spocks []crypto.Signature) func(*flow.ExecutionReceipt) { + return func(receipt *flow.ExecutionReceipt) { + receipt.Spocks = spocks + } +} + func ExecutionReceiptFixture(opts ...func(*flow.ExecutionReceipt)) *flow.ExecutionReceipt { receipt := &flow.ExecutionReceipt{ - ExecutorID: IdentifierFixture(), - ExecutionResult: *ExecutionResultFixture(), - Spocks: nil, - ExecutorSignature: SignatureFixture(), + UnsignedExecutionReceipt: *UnsignedExecutionReceiptFixture(), + ExecutorSignature: SignatureFixture(), + } + + for _, apply := range opts { + apply(receipt) + } + + return receipt +} + +func UnsignedExecutionReceiptFixture(opts ...func(*flow.UnsignedExecutionReceipt)) *flow.UnsignedExecutionReceipt { + receipt := &flow.UnsignedExecutionReceipt{ + ExecutorID: IdentifierFixture(), + ExecutionResult: *ExecutionResultFixture(), + Spocks: SignaturesFixture(1), } for _, apply := range opts { @@ -750,26 +957,28 @@ func WithPreviousResult(prevResult flow.ExecutionResult) func(*flow.ExecutionRes } } +func WithPreviousResultID(previousResultID flow.Identifier) func(*flow.ExecutionResult) { + return func(result *flow.ExecutionResult) { + result.PreviousResultID = previousResultID + } +} + func WithBlock(block *flow.Block) func(*flow.ExecutionResult) { chunks := 1 // tailing chunk is always system chunk - var previousResultID flow.Identifier - if block.Payload != nil { - chunks += len(block.Payload.Guarantees) - } + chunks += len(block.Payload.Guarantees) blockID := block.ID() return func(result *flow.ExecutionResult) { startState := result.Chunks[0].StartState // retain previous start state in case it was user-defined result.BlockID = blockID - result.Chunks = ChunkListFixture(uint(chunks), blockID) - result.Chunks[0].StartState = startState // set start state to value before update - result.PreviousResultID = previousResultID + result.Chunks = ChunkListFixture(uint(chunks), blockID, startState) + result.PreviousResultID = IdentifierFixture() } } func WithChunks(n uint) func(*flow.ExecutionResult) { return func(result *flow.ExecutionResult) { - result.Chunks = ChunkListFixture(n, result.BlockID) + result.Chunks = ChunkListFixture(n, result.BlockID, StateCommitmentFixture()) } } @@ -794,9 +1003,20 @@ func WithExecutionResultBlockID(blockID flow.Identifier) func(*flow.ExecutionRes } } +func WithFinalState(commit flow.StateCommitment) func(*flow.ExecutionResult) { + return func(result *flow.ExecutionResult) { + result.Chunks[len(result.Chunks)-1].EndState = commit + } +} + func WithServiceEvents(n int) func(result *flow.ExecutionResult) { return func(result *flow.ExecutionResult) { result.ServiceEvents = ServiceEventsFixture(n) + // randomly assign service events to chunks + for i := 0; i < n; i++ { + chunkIndex := rand.Intn(result.Chunks.Len()) + result.Chunks[chunkIndex].ServiceEventCount++ + } } } @@ -824,11 +1044,11 @@ func ServiceEventsFixture(n int) flow.ServiceEventList { } func ExecutionResultFixture(opts ...func(*flow.ExecutionResult)) *flow.ExecutionResult { - blockID := IdentifierFixture() + executedBlockID := IdentifierFixture() result := &flow.ExecutionResult{ PreviousResultID: IdentifierFixture(), - BlockID: IdentifierFixture(), - Chunks: ChunkListFixture(2, blockID), + BlockID: executedBlockID, + Chunks: ChunkListFixture(2, executedBlockID, StateCommitmentFixture()), ExecutionDataID: IdentifierFixture(), } @@ -881,7 +1101,7 @@ func ResultApprovalFixture(opts ...func(*flow.ResultApproval)) *flow.ResultAppro Attestation: attestation, ApproverID: IdentifierFixture(), AttestationSignature: SignatureFixture(), - Spock: nil, + Spock: SignatureFixture(), }, VerifierSignature: SignatureFixture(), } @@ -937,7 +1157,7 @@ func IdentifierFixture() flow.Identifier { func SignerIndicesFixture(n int) []byte { indices := bitutils.MakeBitVector(10) for i := 0; i < n; i++ { - bitutils.SetBit(indices, 1) + bitutils.SetBit(indices, i) } return indices } @@ -957,16 +1177,17 @@ func WithRole(role flow.Role) func(*flow.Identity) { } } -// WithWeight sets the weight on an identity fixture. -func WithWeight(weight uint64) func(*flow.Identity) { +// WithInitialWeight sets the initial weight on an identity fixture. +func WithInitialWeight(weight uint64) func(*flow.Identity) { return func(identity *flow.Identity) { - identity.Weight = weight + identity.InitialWeight = weight } } -func WithEjected(ejected bool) func(*flow.Identity) { +// WithParticipationStatus sets the epoch participation status on an identity fixture. +func WithParticipationStatus(status flow.EpochParticipationStatus) func(*flow.Identity) { return func(identity *flow.Identity) { - identity.Ejected = ejected + identity.EpochParticipationStatus = status } } @@ -1001,13 +1222,33 @@ func NodeConfigFixture(opts ...func(*flow.Identity)) bootstrap.NodeConfig { return bootstrap.NodeConfig{ Role: identity.Role, Address: identity.Address, - Weight: identity.Weight, + Weight: identity.InitialWeight, } } func NodeInfoFixture(opts ...func(*flow.Identity)) bootstrap.NodeInfo { - opts = append(opts, WithKeys) - return bootstrap.NodeInfoFromIdentity(IdentityFixture(opts...)) + nodes := NodeInfosFixture(1, opts...) + return nodes[0] +} + +// NodeInfoFromIdentity converts an identity to a public NodeInfo +// WARNING: the function replaces the staking key from the identity by a freshly generated one. +func NodeInfoFromIdentity(identity *flow.Identity) bootstrap.NodeInfo { + stakingSK := StakingPrivKeyFixture() + stakingPoP, err := crypto.BLSGeneratePOP(stakingSK) + if err != nil { + panic(err.Error()) + } + + return bootstrap.NewPublicNodeInfo( + identity.NodeID, + identity.Role, + identity.Address, + identity.InitialWeight, + identity.NetworkPubKey, + stakingSK.PublicKey(), + stakingPoP, + ) } func NodeInfosFixture(n int, opts ...func(*flow.Identity)) []bootstrap.NodeInfo { @@ -1015,16 +1256,26 @@ func NodeInfosFixture(n int, opts ...func(*flow.Identity)) []bootstrap.NodeInfo il := IdentityListFixture(n, opts...) nodeInfos := make([]bootstrap.NodeInfo, 0, n) for _, identity := range il { - nodeInfos = append(nodeInfos, bootstrap.NodeInfoFromIdentity(identity)) + nodeInfos = append(nodeInfos, NodeInfoFromIdentity(identity)) } return nodeInfos } +func PrivateNodeInfoFixture(opts ...func(*flow.Identity)) bootstrap.NodeInfo { + return PrivateNodeInfosFixture(1, opts...)[0] +} + func PrivateNodeInfosFixture(n int, opts ...func(*flow.Identity)) []bootstrap.NodeInfo { - il := IdentityListFixture(n, opts...) - nodeInfos := make([]bootstrap.NodeInfo, 0, n) + return PrivateNodeInfosFromIdentityList(IdentityListFixture(n, opts...)) +} + +func PrivateNodeInfosFromIdentityList(il flow.IdentityList) []bootstrap.NodeInfo { + nodeInfos := make([]bootstrap.NodeInfo, 0, len(il)) for _, identity := range il { - nodeInfo := bootstrap.PrivateNodeInfoFromIdentity(identity, KeyFixture(crypto.ECDSAP256), KeyFixture(crypto.BLSBLS12381)) + nodeInfo, err := bootstrap.PrivateNodeInfoFromIdentity(identity, KeyFixture(crypto.ECDSAP256), KeyFixture(crypto.BLSBLS12381)) + if err != nil { + panic(err.Error()) + } nodeInfos = append(nodeInfos, nodeInfo) } return nodeInfos @@ -1035,11 +1286,16 @@ func IdentityFixture(opts ...func(*flow.Identity)) *flow.Identity { nodeID := IdentifierFixture() stakingKey := StakingPrivKeyByIdentifier(nodeID) identity := flow.Identity{ - NodeID: nodeID, - Address: fmt.Sprintf("address-%x", nodeID[0:7]), - Role: flow.RoleConsensus, - Weight: 1000, - StakingPubKey: stakingKey.PublicKey(), + IdentitySkeleton: flow.IdentitySkeleton{ + NodeID: nodeID, + Address: fmt.Sprintf("address-%x", nodeID[0:7]), + Role: flow.RoleConsensus, + InitialWeight: 1000, + StakingPubKey: stakingKey.PublicKey(), + }, + DynamicIdentity: flow.DynamicIdentity{ + EpochParticipationStatus: flow.EpochParticipationStatusActive, + }, } for _, apply := range opts { apply(&identity) @@ -1087,14 +1343,14 @@ func WithRandomPublicKeys() func(*flow.Identity) { } } -// WithAllRoles can be used used to ensure an IdentityList fixtures contains +// WithAllRoles can be used to ensure an IdentityList fixtures contains // all the roles required for a valid genesis block. func WithAllRoles() func(*flow.Identity) { return WithAllRolesExcept() } -// Same as above, but omitting a certain role for cases where we are manually -// setting up nodes or a particular role. +// WithAllRolesExcept is used to ensure an IdentityList fixture contains all roles +// except omitting a certain role, for cases where we are manually setting up nodes. func WithAllRolesExcept(except ...flow.Role) func(*flow.Identity) { i := 0 roles := flow.Roles() @@ -1151,22 +1407,54 @@ func IdentityListFixture(n int, opts ...func(*flow.Identity)) flow.IdentityList return identities } +// DynamicIdentityEntryFixture returns the DynamicIdentityEntry object. The dynamic identity entry +// can be customized (ie. set Ejected). +func DynamicIdentityEntryFixture(opts ...func(*flow.DynamicIdentityEntry)) *flow.DynamicIdentityEntry { + dynamicIdentityEntry := &flow.DynamicIdentityEntry{ + NodeID: IdentifierFixture(), + Ejected: false, + } + + for _, opt := range opts { + opt(dynamicIdentityEntry) + } + + return dynamicIdentityEntry +} + +// DynamicIdentityEntryListFixture returns a list of DynamicIdentityEntry objects. +func DynamicIdentityEntryListFixture(n int, opts ...func(*flow.DynamicIdentityEntry)) flow.DynamicIdentityEntryList { + list := make(flow.DynamicIdentityEntryList, n) + for i := 0; i < n; i++ { + list[i] = DynamicIdentityEntryFixture(opts...) + } + return list +} + func WithChunkStartState(startState flow.StateCommitment) func(chunk *flow.Chunk) { return func(chunk *flow.Chunk) { chunk.StartState = startState } } +func WithServiceEventCount(count uint16) func(*flow.Chunk) { + return func(chunk *flow.Chunk) { + chunk.ServiceEventCount = count + } +} + func ChunkFixture( blockID flow.Identifier, collectionIndex uint, + startState flow.StateCommitment, opts ...func(*flow.Chunk), ) *flow.Chunk { chunk := &flow.Chunk{ ChunkBody: flow.ChunkBody{ CollectionIndex: collectionIndex, - StartState: StateCommitmentFixture(), + StartState: startState, EventCollection: IdentifierFixture(), + ServiceEventCount: 0, TotalComputationUsed: 4200, NumberOfTransactions: 42, BlockID: blockID, @@ -1182,12 +1470,13 @@ func ChunkFixture( return chunk } -func ChunkListFixture(n uint, blockID flow.Identifier) flow.ChunkList { +func ChunkListFixture(n uint, blockID flow.Identifier, startState flow.StateCommitment, opts ...func(*flow.Chunk)) flow.ChunkList { chunks := make([]*flow.Chunk, 0, n) for i := uint64(0); i < uint64(n); i++ { - chunk := ChunkFixture(blockID, uint(i)) + chunk := ChunkFixture(blockID, uint(i), startState, opts...) chunk.Index = i chunks = append(chunks, chunk) + startState = chunk.EndState } return chunks } @@ -1250,8 +1539,7 @@ func ChunkStatusListFixture( return statuses } -func QCSigDataFixture() []byte { - packer := hotstuff.SigDataPacker{} +func qcSignatureDataFixture() hotstuff.SignatureData { sigType := RandomBytes(5) for i := range sigType { sigType[i] = sigType[i] % 2 @@ -1262,6 +1550,20 @@ func QCSigDataFixture() []byte { AggregatedRandomBeaconSig: SignatureFixture(), ReconstructedRandomBeaconSig: SignatureFixture(), } + return sigData +} + +func QCSigDataFixture() []byte { + packer := hotstuff.SigDataPacker{} + sigData := qcSignatureDataFixture() + encoded, _ := packer.Encode(&sigData) + return encoded +} + +func QCSigDataWithSoRFixture(sor []byte) []byte { + packer := hotstuff.SigDataPacker{} + sigData := qcSignatureDataFixture() + sigData.ReconstructedRandomBeaconSig = sor encoded, _ := packer.Encode(&sigData) return encoded } @@ -1280,17 +1582,22 @@ func SignaturesFixture(n int) []crypto.Signature { return sigs } -func TransactionFixture(n ...func(t *flow.Transaction)) flow.Transaction { - tx := flow.Transaction{TransactionBody: TransactionBodyFixture()} - if len(n) > 0 { - n[0](&tx) +func RandomSourcesFixture(n int) [][]byte { + var sigs [][]byte + for i := 0; i < n; i++ { + sigs = append(sigs, SignatureFixture()) } - return tx + return sigs } +func TransactionFixture(opts ...func(*flow.TransactionBody)) flow.TransactionBody { + return TransactionBodyFixture(opts...) +} + +// DEPRECATED: please use TransactionFixture instead func TransactionBodyFixture(opts ...func(*flow.TransactionBody)) flow.TransactionBody { tb := flow.TransactionBody{ - Script: []byte("pub fun main() {}"), + Script: []byte("access(all) fun main() {}"), ReferenceBlockID: IdentifierFixture(), GasLimit: 10, ProposalKey: ProposalKeyFixture(), @@ -1329,39 +1636,50 @@ func WithReferenceBlock(id flow.Identifier) func(tx *flow.TransactionBody) { func TransactionDSLFixture(chain flow.Chain) dsl.Transaction { return dsl.Transaction{ - Import: dsl.Import{Address: sdk.Address(chain.ServiceAddress())}, + Imports: dsl.Imports{ + dsl.Import{ + Address: sdk.Address(chain.ServiceAddress()), + Names: []string{"FlowTransactionScheduler"}, + }, + }, Content: dsl.Prepare{ Content: dsl.Code(` - pub fun main() {} + access(all) fun main() {} `), }, } } +// RegisterIDFixture returns a RegisterID with a fixed key and owner +func RegisterIDFixture() flow.RegisterID { + return flow.NewRegisterID(RandomAddressFixture(), "key") +} + // VerifiableChunkDataFixture returns a complete verifiable chunk with an // execution receipt referencing the block/collections. -func VerifiableChunkDataFixture(chunkIndex uint64) *verification.VerifiableChunkData { +func VerifiableChunkDataFixture(chunkIndex uint64, opts ...func(*flow.HeaderBody)) (*verification.VerifiableChunkData, *flow.Block) { - guarantees := make([]*flow.CollectionGuarantee, 0) + guarantees := make([]*flow.CollectionGuarantee, 0, chunkIndex+1) var col flow.Collection for i := 0; i <= int(chunkIndex); i++ { col = CollectionFixture(1) - guarantee := col.Guarantee() - guarantees = append(guarantees, &guarantee) + guarantees = append(guarantees, &flow.CollectionGuarantee{CollectionID: col.ID()}) } payload := flow.Payload{ Guarantees: guarantees, Seals: nil, } - header := BlockHeaderFixture() - header.PayloadHash = payload.Hash() + headerBody := HeaderBodyFixture() + for _, opt := range opts { + opt(&headerBody) + } - block := flow.Block{ - Header: header, - Payload: &payload, + block := &flow.Block{ + HeaderBody: headerBody, + Payload: payload, } chunks := make([]*flow.Chunk, 0) @@ -1381,8 +1699,9 @@ func VerifiableChunkDataFixture(chunkIndex uint64) *verification.VerifiableChunk } result := flow.ExecutionResult{ - BlockID: block.ID(), - Chunks: chunks, + PreviousResultID: IdentifierFixture(), + BlockID: block.ID(), + Chunks: chunks, } // computes chunk end state @@ -1396,13 +1715,17 @@ func VerifiableChunkDataFixture(chunkIndex uint64) *verification.VerifiableChunk endState = result.Chunks[index+1].StartState } + chunkDataPack := ChunkDataPackFixture(chunk.ID(), func(c *flow.ChunkDataPack) { + c.Collection = &col + }) + return &verification.VerifiableChunkData{ Chunk: &chunk, - Header: block.Header, + Header: block.ToHeader(), Result: &result, - ChunkDataPack: ChunkDataPackFixture(result.ID()), + ChunkDataPack: chunkDataPack, EndState: endState, - } + }, block } // ChunkDataResponseMsgFixture creates a chunk data response message with a single-transaction collection, and random chunk ID. @@ -1412,7 +1735,7 @@ func ChunkDataResponseMsgFixture( opts ...func(*messages.ChunkDataResponse), ) *messages.ChunkDataResponse { cdp := &messages.ChunkDataResponse{ - ChunkDataPack: *ChunkDataPackFixture(chunkID), + ChunkDataPack: flow.UntrustedChunkDataPack(*ChunkDataPackFixture(chunkID)), Nonce: rand.Uint64(), } @@ -1429,8 +1752,7 @@ func WithApproximateSize(bytes uint64) func(*messages.ChunkDataResponse) { // 1 tx fixture is approximately 350 bytes txCount := bytes / 350 collection := CollectionFixture(int(txCount) + 1) - pack := ChunkDataPackFixture(request.ChunkDataPack.ChunkID, WithChunkDataPackCollection(&collection)) - request.ChunkDataPack = *pack + request.ChunkDataPack = flow.UntrustedChunkDataPack(*ChunkDataPackFixture(request.ChunkDataPack.ChunkID, WithChunkDataPackCollection(&collection))) } } @@ -1490,37 +1812,46 @@ func WithChunkID(chunkID flow.Identifier) func(*verification.ChunkDataPackReques // Use options to customize the request. func ChunkDataPackRequestFixture(opts ...func(*verification.ChunkDataPackRequest)) *verification. ChunkDataPackRequest { - req := &verification.ChunkDataPackRequest{ - Locator: chunks.Locator{ - ResultID: IdentifierFixture(), - Index: 0, - }, - ChunkDataPackRequestInfo: verification.ChunkDataPackRequestInfo{ - ChunkID: IdentifierFixture(), - Height: 0, - Agrees: IdentifierListFixture(1), - Disagrees: IdentifierListFixture(1), - }, + Locator: *ChunkLocatorFixture(IdentifierFixture(), 0), + ChunkDataPackRequestInfo: *ChunkDataPackRequestInfoFixture(), } for _, opt := range opts { opt(req) } + // Ensure Targets reflects current Agrees and Disagrees + req.Targets = makeTargets(req.Agrees, req.Disagrees) + + return req +} + +func ChunkDataPackRequestInfoFixture() *verification.ChunkDataPackRequestInfo { + agrees := IdentifierListFixture(1) + disagrees := IdentifierListFixture(1) + + return &verification.ChunkDataPackRequestInfo{ + ChunkID: IdentifierFixture(), + Height: 0, + Agrees: agrees, + Disagrees: disagrees, + Targets: makeTargets(agrees, disagrees), + } +} + +// makeTargets returns a combined IdentityList for the given agrees and disagrees. +func makeTargets(agrees, disagrees flow.IdentifierList) flow.IdentityList { // creates identity fixtures for target ids as union of agrees and disagrees // TODO: remove this inner fixture once we have filter for identifier list. targets := flow.IdentityList{} - for _, id := range req.Agrees { - targets = append(targets, IdentityFixture(WithNodeID(id), WithRole(flow.RoleExecution))) + for _, id := range append(agrees, disagrees...) { + targets = append(targets, IdentityFixture( + WithNodeID(id), + WithRole(flow.RoleExecution), + )) } - for _, id := range req.Disagrees { - targets = append(targets, IdentityFixture(WithNodeID(id), WithRole(flow.RoleExecution))) - } - - req.Targets = targets - - return req + return targets } func WithChunkDataPackCollection(collection *flow.Collection) func(*flow.ChunkDataPack) { @@ -1545,6 +1876,10 @@ func ChunkDataPackFixture( StartState: StateCommitmentFixture(), Proof: []byte{'p'}, Collection: &coll, + ExecutionDataRoot: flow.BlockExecutionDataRoot{ + BlockID: IdentifierFixture(), + ChunkExecutionDataIDs: []cid.Cid{flow.IdToCid(IdentifierFixture())}, + }, } for _, opt := range opts { @@ -1566,6 +1901,15 @@ func ChunkDataPacksFixture( return chunkDataPacks } +func ChunkDataPacksFixtureAndResult() ([]*flow.ChunkDataPack, *flow.ExecutionResult) { + result := ExecutionResultFixture() + cdps := make([]*flow.ChunkDataPack, 0, len(result.Chunks)) + for _, c := range result.Chunks { + cdps = append(cdps, ChunkDataPackFixture(c.ID())) + } + return cdps, result +} + // SeedFixture returns a random []byte with length n func SeedFixture(n int) []byte { var seed = make([]byte, n) @@ -1586,46 +1930,25 @@ func SeedFixtures(m int, n int) [][]byte { func BlockEventsFixture( header *flow.Header, n int, - types ...flow.EventType, ) flow.BlockEvents { - if len(types) == 0 { - types = []flow.EventType{"A.0x1.Foo.Bar", "A.0x2.Zoo.Moo", "A.0x3.Goo.Hoo"} - } - - events := make([]flow.Event, n) - for i := 0; i < n; i++ { - events[i] = EventFixture(types[i%len(types)], 0, uint32(i), IdentifierFixture(), 0) - } - return flow.BlockEvents{ BlockID: header.ID(), BlockHeight: header.Height, - BlockTimestamp: header.Timestamp, - Events: events, + BlockTimestamp: time.UnixMilli(int64(header.Timestamp)).UTC(), + Events: EventsFixture(n), } } -// EventFixture returns an event -func EventFixture( - eType flow.EventType, - transactionIndex uint32, - eventIndex uint32, - txID flow.Identifier, - _ int, -) flow.Event { - return flow.Event{ - Type: eType, - TransactionIndex: transactionIndex, - EventIndex: eventIndex, - Payload: []byte{}, - TransactionID: txID, - } +func EventTypeFixture(chainID flow.ChainID) flow.EventType { + eventType := fmt.Sprintf("A.%s.TestContract.TestEvent1", RandomAddressFixtureForChain(chainID)) + return flow.EventType(eventType) } func EmulatorRootKey() (*flow.AccountPrivateKey, error) { // TODO seems this key literal doesn't decode anymore - emulatorRootKey, err := crypto.DecodePrivateKey(crypto.ECDSAP256, []byte("f87db87930770201010420ae2cc975dcbdd0ebc56f268b1d8a95834c2955970aea27042d35ec9f298b9e5aa00a06082a8648ce3d030107a1440342000417f5a527137785d2d773fee84b4c7ee40266a1dd1f36ddd46ecf25db6df6a499459629174de83256f2a44ebd4325b9def67d523b755a8926218c4efb7904f8ce0203")) + emulatorRootKey, err := crypto.DecodePrivateKey(crypto.ECDSAP256, + []byte("f87db87930770201010420ae2cc975dcbdd0ebc56f268b1d8a95834c2955970aea27042d35ec9f298b9e5aa00a06082a8648ce3d030107a1440342000417f5a527137785d2d773fee84b4c7ee40266a1dd1f36ddd46ecf25db6df6a499459629174de83256f2a44ebd4325b9def67d523b755a8926218c4efb7904f8ce0203")) if err != nil { return nil, err } @@ -1770,6 +2093,23 @@ func CertifyBlock(header *flow.Header) *flow.QuorumCertificate { return qc } +func CertifiedByChild(block *flow.Block, child *flow.Block) *flow.CertifiedBlock { + return &flow.CertifiedBlock{ + Proposal: &flow.Proposal{Block: *block, ProposerSigData: SignatureFixture()}, + CertifyingQC: child.ParentQC(), + } +} + +func NewCertifiedBlock(block *flow.Block) *flow.CertifiedBlock { + return &flow.CertifiedBlock{ + Proposal: &flow.Proposal{ + Block: *block, + ProposerSigData: SignatureFixture(), + }, + CertifyingQC: CertifyBlock(block.ToHeader()), + } +} + func QuorumCertificatesFixtures( n uint, opts ...func(*flow.QuorumCertificate), @@ -1859,10 +2199,16 @@ func VoteWithBeaconSig() func(*hotstuff.Vote) { } } -func WithParticipants(participants flow.IdentityList) func(*flow.EpochSetup) { +func WithParticipants(participants flow.IdentitySkeletonList) func(*flow.EpochSetup) { + return func(setup *flow.EpochSetup) { + setup.Participants = participants.Sort(flow.Canonical[flow.IdentitySkeleton]) + setup.Assignments = ClusterAssignment(1, participants.ToSkeleton()) + } +} + +func WithAssignments(assignments flow.AssignmentList) func(*flow.EpochSetup) { return func(setup *flow.EpochSetup) { - setup.Participants = participants.Sort(order.Canonical) - setup.Assignments = ClusterAssignment(1, participants) + setup.Assignments = assignments } } @@ -1893,11 +2239,13 @@ func EpochSetupFixture(opts ...func(setup *flow.EpochSetup)) *flow.EpochSetup { Counter: uint64(rand.Uint32()), FirstView: uint64(0), FinalView: uint64(rand.Uint32() + 1000), - Participants: participants.Sort(order.Canonical), + Participants: participants.Sort(flow.Canonical[flow.Identity]).ToSkeleton(), RandomSource: SeedFixture(flow.EpochSetupRandomSourceLength), DKGPhase1FinalView: 100, DKGPhase2FinalView: 200, DKGPhase3FinalView: 300, + TargetDuration: 60 * 60, + TargetEndTime: uint64(time.Now().Add(time.Hour).Unix()), } for _, apply := range opts { apply(setup) @@ -1908,59 +2256,58 @@ func EpochSetupFixture(opts ...func(setup *flow.EpochSetup)) *flow.EpochSetup { return setup } -func EpochStatusFixture() *flow.EpochStatus { - return &flow.EpochStatus{ - PreviousEpoch: flow.EventIDs{ - SetupID: IdentifierFixture(), - CommitID: IdentifierFixture(), - }, - CurrentEpoch: flow.EventIDs{ - SetupID: IdentifierFixture(), - CommitID: IdentifierFixture(), - }, - NextEpoch: flow.EventIDs{ - SetupID: IdentifierFixture(), - CommitID: IdentifierFixture(), - }, +// EpochRecoverFixture creates a valid EpochRecover with default properties for testing. +// The default properties for setup part can be overwritten with optional parameter functions. +// Commit part will be adjusted accordingly. +func EpochRecoverFixture(opts ...func(setup *flow.EpochSetup)) *flow.EpochRecover { + setup := EpochSetupFixture() + for _, apply := range opts { + apply(setup) + } + + commit := EpochCommitFixture( + CommitWithCounter(setup.Counter), + WithDKGFromParticipants(setup.Participants), + WithClusterQCsFromAssignments(setup.Assignments), + ) + + return &flow.EpochRecover{ + EpochSetup: *setup, + EpochCommit: *commit, } } func IndexFixture() *flow.Index { return &flow.Index{ - CollectionIDs: IdentifierListFixture(5), - SealIDs: IdentifierListFixture(5), - ReceiptIDs: IdentifierListFixture(5), + GuaranteeIDs: IdentifierListFixture(5), + SealIDs: IdentifierListFixture(5), + ReceiptIDs: IdentifierListFixture(5), } } -func WithDKGFromParticipants(participants flow.IdentityList) func(*flow.EpochCommit) { - count := len(participants.Filter(filter.IsValidDKGParticipant)) +func WithDKGFromParticipants(participants flow.IdentitySkeletonList) func(*flow.EpochCommit) { + dkgParticipants := participants.Filter(filter.IsConsensusCommitteeMember).Sort(flow.Canonical[flow.IdentitySkeleton]) return func(commit *flow.EpochCommit) { - commit.DKGParticipantKeys = PublicKeysFixture(count, crypto.BLSBLS12381) + commit.DKGParticipantKeys = nil + commit.DKGIndexMap = make(flow.DKGIndexMap) + for index, nodeID := range dkgParticipants.NodeIDs() { + commit.DKGParticipantKeys = append(commit.DKGParticipantKeys, KeyFixture(crypto.BLSBLS12381).PublicKey()) + commit.DKGIndexMap[nodeID] = index + } } } -func WithClusterQCsFromAssignments(assignments flow.AssignmentList) func(*flow.EpochCommit) { - qcs := make([]*flow.QuorumCertificateWithSignerIDs, 0, len(assignments)) - for _, assignment := range assignments { - qcWithSignerIndex := QuorumCertificateWithSignerIDsFixture() - qcWithSignerIndex.SignerIDs = assignment - qcs = append(qcs, qcWithSignerIndex) - } +func WithClusterQCs(qcs []flow.ClusterQCVoteData) func(*flow.EpochCommit) { return func(commit *flow.EpochCommit) { - commit.ClusterQCs = flow.ClusterQCVoteDatasFromQCs(qcs) + commit.ClusterQCs = qcs } } -func DKGParticipantLookup(participants flow.IdentityList) map[flow.Identifier]flow.DKGParticipant { - lookup := make(map[flow.Identifier]flow.DKGParticipant) - for i, node := range participants.Filter(filter.HasRole(flow.RoleConsensus)) { - lookup[node.NodeID] = flow.DKGParticipant{ - Index: uint(i), - KeyShare: KeyFixture(crypto.BLSBLS12381).PublicKey(), - } +func WithClusterQCsFromAssignments(assignments flow.AssignmentList) func(*flow.EpochCommit) { + qcs := QuorumCertificatesFromAssignments(assignments) + return func(commit *flow.EpochCommit) { + commit.ClusterQCs = flow.ClusterQCVoteDatasFromQCs(qcs) } - return lookup } func CommitWithCounter(counter uint64) func(*flow.EpochCommit) { @@ -2012,32 +2359,76 @@ func VersionBeaconFixture(options ...func(*flow.VersionBeacon)) *flow.VersionBea return versionTable } +func ProtocolStateVersionUpgradeFixture() *flow.ProtocolStateVersionUpgrade { + return &flow.ProtocolStateVersionUpgrade{ + NewProtocolStateVersion: rand.Uint64(), + ActiveView: rand.Uint64(), + } +} + // BootstrapFixture generates all the artifacts necessary to bootstrap the // protocol state. func BootstrapFixture( participants flow.IdentityList, opts ...func(*flow.Block), ) (*flow.Block, *flow.ExecutionResult, *flow.Seal) { + return BootstrapFixtureWithChainID(participants, flow.Emulator, opts...) +} - root := GenesisFixture() +func BootstrapFixtureWithChainID( + participants flow.IdentityList, + chainID flow.ChainID, + opts ...func(*flow.Block), +) (*flow.Block, *flow.ExecutionResult, *flow.Seal) { + root := Block.Genesis(chainID) for _, apply := range opts { apply(root) } counter := uint64(1) setup := EpochSetupFixture( - WithParticipants(participants), + WithParticipants(participants.ToSkeleton()), SetupWithCounter(counter), - WithFirstView(root.Header.View), - WithFinalView(root.Header.View+1000), + WithFirstView(root.View), + WithFinalView(root.View+100_000), ) commit := EpochCommitFixture( CommitWithCounter(counter), WithClusterQCsFromAssignments(setup.Assignments), - WithDKGFromParticipants(participants), + WithDKGFromParticipants(participants.ToSkeleton()), ) - result := BootstrapExecutionResultFixture(root, GenesisStateCommitment) + return BootstrapFixtureWithSetupAndCommit(root.HeaderBody, setup, commit) +} + +// BootstrapFixtureWithSetupAndCommit generates all the artifacts necessary to bootstrap the +// protocol state using the provided epoch setup and commit. +func BootstrapFixtureWithSetupAndCommit( + header flow.HeaderBody, + setup *flow.EpochSetup, + commit *flow.EpochCommit, +) (*flow.Block, *flow.ExecutionResult, *flow.Seal) { + safetyParams, err := protocol.DefaultEpochSafetyParams(header.ChainID) + if err != nil { + panic(err) + } + rootEpochState, err := inmem.EpochProtocolStateFromServiceEvents(setup, commit) + if err != nil { + panic(err) + } + rootProtocolState, err := kvstore.NewDefaultKVStore(safetyParams.FinalizationSafetyThreshold, safetyParams.EpochExtensionViewCount, rootEpochState.ID()) + if err != nil { + panic(err) + } + + root := &flow.Block{ + HeaderBody: header, + Payload: flow.Payload{ProtocolStateID: rootProtocolState.ID()}, + } + + stateCommit := GenesisStateCommitmentByChainID(header.ChainID) + + result := BootstrapExecutionResultFixture(root, stateCommit) result.ServiceEvents = []flow.ServiceEvent{ setup.ServiceEvent(), commit.ServiceEvent(), @@ -2054,9 +2445,17 @@ func RootSnapshotFixture( participants flow.IdentityList, opts ...func(*flow.Block), ) *inmem.Snapshot { - block, result, seal := BootstrapFixture(participants.Sort(order.Canonical), opts...) + return RootSnapshotFixtureWithChainID(participants, flow.Emulator, opts...) +} + +func RootSnapshotFixtureWithChainID( + participants flow.IdentityList, + chainID flow.ChainID, + opts ...func(*flow.Block), +) *inmem.Snapshot { + block, result, seal := BootstrapFixtureWithChainID(participants.Sort(flow.Canonical[flow.Identity]), chainID, opts...) qc := QuorumCertificateFixture(QCWithRootBlockID(block.ID())) - root, err := inmem.SnapshotFromBootstrapState(block, result, seal, qc) + root, err := SnapshotFromBootstrapState(block, result, seal, qc) if err != nil { panic(err) } @@ -2068,7 +2467,10 @@ func SnapshotClusterByIndex( clusterIndex uint, ) (protocol.Cluster, error) { epochs := snapshot.Epochs() - epoch := epochs.Current() + epoch, err := epochs.Current() + if err != nil { + return nil, err + } cluster, err := epoch.Cluster(clusterIndex) if err != nil { return nil, err @@ -2088,7 +2490,7 @@ func ChainFixture(nonGenesisCount int) ( genesis, result, seal := BootstrapFixture(participants) chain = append(chain, genesis) - children := ChainFixtureFrom(nonGenesisCount, genesis.Header) + children := ChainFixtureFrom(nonGenesisCount, genesis.ToHeader()) chain = append(chain, children...) return chain, result, seal } @@ -2101,12 +2503,21 @@ func ChainFixtureFrom(count int, parent *flow.Header) []*flow.Block { for i := 0; i < count; i++ { block := BlockWithParentFixture(parent) blocks = append(blocks, block) - parent = block.Header + parent = block.ToHeader() } return blocks } +// ProposalChainFixtureFrom creates a chain of blocks and wraps each one in a Proposal. +func ProposalChainFixtureFrom(count int, parent *flow.Header) []*flow.Proposal { + proposals := make([]*flow.Proposal, 0, count) + for _, block := range ChainFixtureFrom(count, parent) { + proposals = append(proposals, ProposalFromBlock(block)) + } + return proposals +} + func ReceiptChainFor( blocks []*flow.Block, result0 *flow.ExecutionResult, @@ -2136,11 +2547,10 @@ func ReconnectBlocksAndReceipts(blocks []*flow.Block, receipts []*flow.Execution b := blocks[i] p := i - 1 prev := blocks[p] - if prev.Header.Height+1 != b.Header.Height { - panic(fmt.Sprintf("height has gap when connecting blocks: expect %v, but got %v", prev.Header.Height+1, b.Header.Height)) + if prev.Height+1 != b.Height { + panic(fmt.Sprintf("height has gap when connecting blocks: expect %v, but got %v", prev.Height+1, b.Height)) } - b.Header.ParentID = prev.ID() - b.Header.PayloadHash = b.Payload.Hash() + b.ParentID = prev.ID() receipts[i].ExecutionResult.BlockID = b.ID() prevReceipt := receipts[p] receipts[i].ExecutionResult.PreviousResultID = prevReceipt.ExecutionResult.ID() @@ -2177,9 +2587,9 @@ func DKGBroadcastMessageFixture() *messages.BroadcastDKGMessage { } } -// PrivateKeyFixture returns a random private key with specified signature algorithm and seed length -func PrivateKeyFixture(algo crypto.SigningAlgorithm, seedLength int) crypto.PrivateKey { - sk, err := crypto.GeneratePrivateKey(algo, SeedFixture(seedLength)) +// PrivateKeyFixture returns a random private key with specified signature algorithm +func PrivateKeyFixture(algo crypto.SigningAlgorithm) crypto.PrivateKey { + sk, err := crypto.GeneratePrivateKey(algo, SeedFixture(crypto.KeyGenSeedMinLen)) if err != nil { panic(err) } @@ -2193,8 +2603,9 @@ func PrivateKeyFixtureByIdentifier( seedLength int, id flow.Identifier, ) crypto.PrivateKey { - seed := append(id[:], id[:]...) - sk, err := crypto.GeneratePrivateKey(algo, seed[:seedLength]) + seed := make([]byte, seedLength) + copy(seed, id[:]) + sk, err := crypto.GeneratePrivateKey(algo, seed) if err != nil { panic(err) } @@ -2207,18 +2618,18 @@ func StakingPrivKeyByIdentifier(id flow.Identifier) crypto.PrivateKey { // NetworkingPrivKeyFixture returns random ECDSAP256 private key func NetworkingPrivKeyFixture() crypto.PrivateKey { - return PrivateKeyFixture(crypto.ECDSAP256, crypto.KeyGenSeedMinLen) + return PrivateKeyFixture(crypto.ECDSAP256) } // StakingPrivKeyFixture returns a random BLS12381 private keyf func StakingPrivKeyFixture() crypto.PrivateKey { - return PrivateKeyFixture(crypto.BLSBLS12381, crypto.KeyGenSeedMinLen) + return PrivateKeyFixture(crypto.BLSBLS12381) } func NodeMachineAccountInfoFixture() bootstrap.NodeMachineAccountInfo { return bootstrap.NodeMachineAccountInfo{ Address: RandomAddressFixture().String(), - EncodedPrivateKey: PrivateKeyFixture(crypto.ECDSAP256, DefaultSeedFixtureLength).Encode(), + EncodedPrivateKey: PrivateKeyFixture(crypto.ECDSAP256).Encode(), HashAlgorithm: bootstrap.DefaultMachineAccountHashAlgo, SigningAlgorithm: bootstrap.DefaultMachineAccountSignAlgo, KeyIndex: bootstrap.DefaultMachineAccountKeyIndex, @@ -2239,7 +2650,7 @@ func MachineAccountFixture(t *testing.T) ( Balance: uint64(bal), Keys: []*sdk.AccountKey{ { - Index: int(info.KeyIndex), + Index: info.KeyIndex, PublicKey: info.MustPrivateKey().PublicKey(), SigAlgo: info.SigningAlgorithm, HashAlgo: info.HashAlgorithm, @@ -2262,6 +2673,18 @@ func TransactionResultsFixture(n int) []flow.TransactionResult { return results } +func LightTransactionResultsFixture(n int) []flow.LightTransactionResult { + results := make([]flow.LightTransactionResult, 0, n) + for i := 0; i < n; i++ { + results = append(results, flow.LightTransactionResult{ + TransactionID: IdentifierFixture(), + Failed: i%2 == 0, + ComputationUsed: Uint64InRange(1, 10_000), + }) + } + return results +} + func AllowAllPeerFilter() func(peer.ID) error { return func(_ peer.ID) error { return nil @@ -2323,7 +2746,7 @@ func GetFlowProtocolEventID( ) flow.Identifier { payload, err := NetworkCodec().Encode(event) require.NoError(t, err) - eventIDHash, err := network.EventId(channel, payload) + eventIDHash, err := message.EventId(channel, payload) require.NoError(t, err) return flow.HashToID(eventIDHash) } @@ -2373,19 +2796,35 @@ func WithChunkEvents(events flow.EventsList) func(*execution_data.ChunkExecution } } +func WithTrieUpdate(trieUpdate *ledger.TrieUpdate) func(*execution_data.ChunkExecutionData) { + return func(conf *execution_data.ChunkExecutionData) { + conf.TrieUpdate = trieUpdate + } +} + func ChunkExecutionDataFixture(t *testing.T, minSize int, opts ...func(*execution_data.ChunkExecutionData)) *execution_data.ChunkExecutionData { - collection := CollectionFixture(1) + collection := CollectionFixture(5) + results := make([]flow.LightTransactionResult, len(collection.Transactions)) + for i, tx := range collection.Transactions { + results[i] = flow.LightTransactionResult{ + TransactionID: tx.ID(), + Failed: false, + ComputationUsed: uint64(i * 100), + } + } + ced := &execution_data.ChunkExecutionData{ - Collection: &collection, - Events: flow.EventsList{}, - TrieUpdate: testutils.TrieUpdateFixture(1, 1, 8), + Collection: &collection, + Events: nil, + TrieUpdate: testutils.TrieUpdateFixture(2, 1, 8), + TransactionResults: results, } for _, opt := range opts { opt(ced) } - if minSize <= 1 { + if minSize <= 1 || ced.TrieUpdate == nil { return ced } @@ -2398,7 +2837,7 @@ func ChunkExecutionDataFixture(t *testing.T, minSize int, opts ...func(*executio } v := make([]byte, size) - _, err := rand.Read(v) + _, err := crand.Read(v) require.NoError(t, err) k, err := ced.TrieUpdate.Payloads[0].Key() @@ -2408,3 +2847,557 @@ func ChunkExecutionDataFixture(t *testing.T, minSize int, opts ...func(*executio size *= 2 } } + +func WithTxResultErrorMessageTxID(id flow.Identifier) func(txResErrMsg *flow.TransactionResultErrorMessage) { + return func(txResErrMsg *flow.TransactionResultErrorMessage) { + txResErrMsg.TransactionID = id + } +} + +func WithTxResultErrorMessageIndex(index uint32) func(txResErrMsg *flow.TransactionResultErrorMessage) { + return func(txResErrMsg *flow.TransactionResultErrorMessage) { + txResErrMsg.Index = index + } +} + +func WithTxResultErrorMessageTxMsg(message string) func(txResErrMsg *flow.TransactionResultErrorMessage) { + return func(txResErrMsg *flow.TransactionResultErrorMessage) { + txResErrMsg.ErrorMessage = message + } +} + +func WithTxResultErrorMessageExecutorID(id flow.Identifier) func(txResErrMsg *flow.TransactionResultErrorMessage) { + return func(txResErrMsg *flow.TransactionResultErrorMessage) { + txResErrMsg.ExecutorID = id + } +} + +// TransactionResultErrorMessageFixture creates a fixture tx result error message with random generated tx ID and executor ID for test purpose. +func TransactionResultErrorMessageFixture(opts ...func(*flow.TransactionResultErrorMessage)) flow.TransactionResultErrorMessage { + txResErrMsg := flow.TransactionResultErrorMessage{ + TransactionID: IdentifierFixture(), + Index: 0, + ErrorMessage: "transaction result error", + ExecutorID: IdentifierFixture(), + } + + for _, opt := range opts { + opt(&txResErrMsg) + } + + return txResErrMsg +} + +// TransactionResultErrorMessagesFixture creates a fixture collection of tx result error messages with n elements. +func TransactionResultErrorMessagesFixture(n int) []flow.TransactionResultErrorMessage { + txResErrMsgs := make([]flow.TransactionResultErrorMessage, 0, n) + executorID := IdentifierFixture() + + for i := 0; i < n; i++ { + txResErrMsgs = append(txResErrMsgs, TransactionResultErrorMessageFixture( + WithTxResultErrorMessageIndex(uint32(i)), + WithTxResultErrorMessageTxMsg(fmt.Sprintf("transaction result error %d", i)), + WithTxResultErrorMessageExecutorID(executorID), + )) + } + return txResErrMsgs +} + +// RootEpochProtocolStateFixture creates a fixture with correctly structured Epoch sub-state. +// The epoch substate is part of the overall protocol state (KV store). +// This can be useful for testing bootstrap when there is no previous epoch. +func RootEpochProtocolStateFixture() *flow.RichEpochStateEntry { + currentEpochSetup := EpochSetupFixture(func(setup *flow.EpochSetup) { + setup.Counter = 1 + }) + currentEpochCommit := EpochCommitFixture(func(commit *flow.EpochCommit) { + commit.Counter = currentEpochSetup.Counter + }) + + allIdentities := make(flow.IdentityList, 0, len(currentEpochSetup.Participants)) + for _, identity := range currentEpochSetup.Participants { + allIdentities = append(allIdentities, &flow.Identity{ + IdentitySkeleton: *identity, + DynamicIdentity: flow.DynamicIdentity{ + EpochParticipationStatus: flow.EpochParticipationStatusActive, + }, + }) + } + return &flow.RichEpochStateEntry{ + EpochStateEntry: &flow.EpochStateEntry{ + MinEpochStateEntry: &flow.MinEpochStateEntry{ + PreviousEpoch: nil, + CurrentEpoch: flow.EpochStateContainer{ + SetupID: currentEpochSetup.ID(), + CommitID: currentEpochCommit.ID(), + ActiveIdentities: flow.DynamicIdentityEntryListFromIdentities(allIdentities), + }, + EpochFallbackTriggered: false, + NextEpoch: nil, + }, + PreviousEpochSetup: nil, + PreviousEpochCommit: nil, + CurrentEpochSetup: currentEpochSetup, + CurrentEpochCommit: currentEpochCommit, + NextEpochSetup: nil, + NextEpochCommit: nil, + }, + CurrentEpochIdentityTable: allIdentities, + NextEpochIdentityTable: flow.IdentityList{}, + } +} + +// EpochStateFixture creates a fixture with correctly structured data. The returned Identity Table +// represents the common situation during the staking phase of Epoch N+1: +// - we are currently in Epoch N +// - previous epoch N-1 is known (specifically EpochSetup and EpochCommit events) +// - network is currently in the staking phase to setup the next epoch, hence no service +// events for the next epoch exist +// +// In particular, the following consistency requirements hold: +// - Epoch setup and commit counters are set to match. +// - Identities are constructed from setup events. +// - Identities are sorted in canonical order. +func EpochStateFixture(options ...func(*flow.RichEpochStateEntry)) *flow.RichEpochStateEntry { + prevEpochSetup := EpochSetupFixture() + prevEpochCommit := EpochCommitFixture(func(commit *flow.EpochCommit) { + commit.Counter = prevEpochSetup.Counter + }) + currentEpochSetup := EpochSetupFixture(func(setup *flow.EpochSetup) { + setup.Counter = prevEpochSetup.Counter + 1 + // reuse same participant for current epoch + sameParticipant := *prevEpochSetup.Participants[1] + setup.Participants = append(setup.Participants, &sameParticipant) + setup.Participants = setup.Participants.Sort(flow.Canonical[flow.IdentitySkeleton]) + }) + currentEpochCommit := EpochCommitFixture(func(commit *flow.EpochCommit) { + commit.Counter = currentEpochSetup.Counter + }) + + buildDefaultIdentities := func(setup *flow.EpochSetup) flow.IdentityList { + epochIdentities := make(flow.IdentityList, 0, len(setup.Participants)) + for _, identity := range setup.Participants { + epochIdentities = append(epochIdentities, &flow.Identity{ + IdentitySkeleton: *identity, + DynamicIdentity: flow.DynamicIdentity{ + EpochParticipationStatus: flow.EpochParticipationStatusActive, + }, + }) + } + return epochIdentities.Sort(flow.Canonical[flow.Identity]) + } + + prevEpochIdentities := buildDefaultIdentities(prevEpochSetup) + currentEpochIdentities := buildDefaultIdentities(currentEpochSetup) + allIdentities := currentEpochIdentities.Union( + prevEpochIdentities.Map(mapfunc.WithEpochParticipationStatus(flow.EpochParticipationStatusLeaving))) + + entry := &flow.RichEpochStateEntry{ + EpochStateEntry: &flow.EpochStateEntry{ + MinEpochStateEntry: &flow.MinEpochStateEntry{ + CurrentEpoch: flow.EpochStateContainer{ + SetupID: currentEpochSetup.ID(), + CommitID: currentEpochCommit.ID(), + ActiveIdentities: flow.DynamicIdentityEntryListFromIdentities(currentEpochIdentities), + }, + PreviousEpoch: &flow.EpochStateContainer{ + SetupID: prevEpochSetup.ID(), + CommitID: prevEpochCommit.ID(), + ActiveIdentities: flow.DynamicIdentityEntryListFromIdentities(prevEpochIdentities), + }, + EpochFallbackTriggered: false, + NextEpoch: nil, + }, + PreviousEpochSetup: prevEpochSetup, + PreviousEpochCommit: prevEpochCommit, + CurrentEpochSetup: currentEpochSetup, + CurrentEpochCommit: currentEpochCommit, + NextEpochSetup: nil, + NextEpochCommit: nil, + }, + CurrentEpochIdentityTable: allIdentities, + NextEpochIdentityTable: flow.IdentityList{}, + } + + for _, option := range options { + option(entry) + } + + return entry +} + +// WithNextEpochProtocolState creates a fixture with correctly structured data for next epoch. +// The resulting Identity Table represents the common situation during the epoch commit phase for Epoch N+1: +// - We are currently in Epoch N. +// - The previous epoch N-1 is known (specifically EpochSetup and EpochCommit events). +// - The network has completed the epoch setup phase, i.e. published the EpochSetup and EpochCommit events for epoch N+1. +func WithNextEpochProtocolState() func(entry *flow.RichEpochStateEntry) { + return func(entry *flow.RichEpochStateEntry) { + nextEpochSetup := EpochSetupFixture(func(setup *flow.EpochSetup) { + setup.Counter = entry.CurrentEpochSetup.Counter + 1 + setup.FirstView = entry.CurrentEpochSetup.FinalView + 1 + setup.FinalView = setup.FirstView + 1000 + // reuse same participant for current epoch + sameParticipant := *entry.CurrentEpochSetup.Participants[1] + setup.Participants[1] = &sameParticipant + setup.Participants = setup.Participants.Sort(flow.Canonical[flow.IdentitySkeleton]) + }) + nextEpochCommit := EpochCommitFixture(func(commit *flow.EpochCommit) { + commit.Counter = nextEpochSetup.Counter + }) + + nextEpochParticipants := make(flow.IdentityList, 0, len(nextEpochSetup.Participants)) + for _, identity := range nextEpochSetup.Participants { + nextEpochParticipants = append(nextEpochParticipants, &flow.Identity{ + IdentitySkeleton: *identity, + DynamicIdentity: flow.DynamicIdentity{ + EpochParticipationStatus: flow.EpochParticipationStatusActive, + }, + }) + } + nextEpochParticipants = nextEpochParticipants.Sort(flow.Canonical[flow.Identity]) + + currentEpochParticipants := entry.CurrentEpochIdentityTable.Filter(func(identity *flow.Identity) bool { + _, found := entry.CurrentEpochSetup.Participants.ByNodeID(identity.NodeID) + return found + }).Sort(flow.Canonical[flow.Identity]) + + entry.CurrentEpochIdentityTable = currentEpochParticipants.Union( + nextEpochParticipants.Map(mapfunc.WithEpochParticipationStatus(flow.EpochParticipationStatusJoining))) + entry.NextEpochIdentityTable = nextEpochParticipants.Union( + currentEpochParticipants.Map(mapfunc.WithEpochParticipationStatus(flow.EpochParticipationStatusLeaving))) + + entry.NextEpoch = &flow.EpochStateContainer{ + SetupID: nextEpochSetup.ID(), + CommitID: nextEpochCommit.ID(), + ActiveIdentities: flow.DynamicIdentityEntryListFromIdentities(nextEpochParticipants), + } + entry.NextEpochSetup = nextEpochSetup + entry.NextEpochCommit = nextEpochCommit + } +} + +// WithValidDKG updated protocol state with correctly structured data for DKG. +func WithValidDKG() func(*flow.RichEpochStateEntry) { + return func(entry *flow.RichEpochStateEntry) { + commit := entry.CurrentEpochCommit + dkgParticipants := entry.CurrentEpochSetup.Participants.Filter(filter.IsConsensusCommitteeMember).Sort(flow.Canonical[flow.IdentitySkeleton]) + commit.DKGParticipantKeys = nil + commit.DKGIndexMap = make(flow.DKGIndexMap) + for index, nodeID := range dkgParticipants.NodeIDs() { + commit.DKGParticipantKeys = append(commit.DKGParticipantKeys, KeyFixture(crypto.BLSBLS12381).PublicKey()) + commit.DKGIndexMap[nodeID] = index + } + // update CommitID according to new CurrentEpochCommit object + entry.MinEpochStateEntry.CurrentEpoch.CommitID = entry.CurrentEpochCommit.ID() + } +} + +// EpochProtocolStateEntryFixture returns a flow.MinEpochStateEntry fixture. +// - PreviousEpoch is always nil +// - tentativePhase defines what service events should be defined for the NextEpoch +// - efmTriggered defines whether the EpochFallbackTriggered flag should be set. +func EpochProtocolStateEntryFixture(tentativePhase flow.EpochPhase, efmTriggered bool) flow.MinEpochStateEntry { + identities := DynamicIdentityEntryListFixture(5) + entry := flow.MinEpochStateEntry{ + EpochFallbackTriggered: efmTriggered, + PreviousEpoch: nil, + CurrentEpoch: flow.EpochStateContainer{ + SetupID: IdentifierFixture(), + CommitID: IdentifierFixture(), + ActiveIdentities: identities, + }, + NextEpoch: nil, + } + + switch tentativePhase { + case flow.EpochPhaseStaking: + break + case flow.EpochPhaseSetup: + entry.NextEpoch = &flow.EpochStateContainer{ + SetupID: IdentifierFixture(), + CommitID: flow.ZeroID, + ActiveIdentities: identities, + } + case flow.EpochPhaseCommitted: + entry.NextEpoch = &flow.EpochStateContainer{ + SetupID: IdentifierFixture(), + CommitID: IdentifierFixture(), + ActiveIdentities: identities, + } + default: + panic("unexpected input phase: " + tentativePhase.String()) + } + return entry +} + +func CreateSendTxHttpPayload(tx flow.TransactionBody) map[string]interface{} { + tx.Arguments = [][]uint8{} // fix how fixture creates nil values + auth := make([]string, len(tx.Authorizers)) + for i, a := range tx.Authorizers { + auth[i] = a.String() + } + + return map[string]interface{}{ + "script": util.ToBase64(tx.Script), + "arguments": tx.Arguments, + "reference_block_id": tx.ReferenceBlockID.String(), + "gas_limit": fmt.Sprintf("%d", tx.GasLimit), + "payer": tx.Payer.String(), + "proposal_key": map[string]interface{}{ + "address": tx.ProposalKey.Address.String(), + "key_index": fmt.Sprintf("%d", tx.ProposalKey.KeyIndex), + "sequence_number": fmt.Sprintf("%d", tx.ProposalKey.SequenceNumber), + }, + "authorizers": auth, + "payload_signatures": []map[string]interface{}{{ + "address": tx.PayloadSignatures[0].Address.String(), + "key_index": fmt.Sprintf("%d", tx.PayloadSignatures[0].KeyIndex), + "signature": util.ToBase64(tx.PayloadSignatures[0].Signature), + }}, + "envelope_signatures": []map[string]interface{}{{ + "address": tx.EnvelopeSignatures[0].Address.String(), + "key_index": fmt.Sprintf("%d", tx.EnvelopeSignatures[0].KeyIndex), + "signature": util.ToBase64(tx.EnvelopeSignatures[0].Signature), + }}, + } +} + +// P2PRPCGraftFixtures returns n number of control message rpc Graft fixtures. +func P2PRPCGraftFixtures(topics ...string) []*pubsub_pb.ControlGraft { + n := len(topics) + grafts := make([]*pubsub_pb.ControlGraft, n) + for i := 0; i < n; i++ { + grafts[i] = P2PRPCGraftFixture(&topics[i]) + } + return grafts +} + +// P2PRPCGraftFixture returns a control message rpc Graft fixture. +func P2PRPCGraftFixture(topic *string) *pubsub_pb.ControlGraft { + return &pubsub_pb.ControlGraft{ + TopicID: topic, + } +} + +// P2PRPCPruneFixtures returns n number of control message rpc Prune fixtures. +func P2PRPCPruneFixtures(topics ...string) []*pubsub_pb.ControlPrune { + n := len(topics) + prunes := make([]*pubsub_pb.ControlPrune, n) + for i := 0; i < n; i++ { + prunes[i] = P2PRPCPruneFixture(&topics[i]) + } + return prunes +} + +// P2PRPCPruneFixture returns a control message rpc Prune fixture. +func P2PRPCPruneFixture(topic *string) *pubsub_pb.ControlPrune { + return &pubsub_pb.ControlPrune{ + TopicID: topic, + } +} + +// P2PRPCIHaveFixtures returns n number of control message where n = len(topics) rpc iHave fixtures with m number of message ids each. +func P2PRPCIHaveFixtures(m int, topics ...string) []*pubsub_pb.ControlIHave { + n := len(topics) + ihaves := make([]*pubsub_pb.ControlIHave, n) + for i := 0; i < n; i++ { + ihaves[i] = P2PRPCIHaveFixture(&topics[i], IdentifierListFixture(m).Strings()...) + } + return ihaves +} + +// P2PRPCIHaveFixture returns a control message rpc iHave fixture. +func P2PRPCIHaveFixture(topic *string, messageIds ...string) *pubsub_pb.ControlIHave { + return &pubsub_pb.ControlIHave{ + TopicID: topic, + MessageIDs: messageIds, + } +} + +// P2PRPCIWantFixtures returns n number of control message rpc iWant fixtures with m number of message ids each. +func P2PRPCIWantFixtures(n, m int) []*pubsub_pb.ControlIWant { + iwants := make([]*pubsub_pb.ControlIWant, n) + for i := 0; i < n; i++ { + iwants[i] = P2PRPCIWantFixture(IdentifierListFixture(m).Strings()...) + } + return iwants +} + +// P2PRPCIWantFixture returns a control message rpc iWant fixture. +func P2PRPCIWantFixture(messageIds ...string) *pubsub_pb.ControlIWant { + return &pubsub_pb.ControlIWant{ + MessageIDs: messageIds, + } +} + +type RPCFixtureOpt func(rpc *pubsub.RPC) + +// WithGrafts sets the grafts on the rpc control message. +func WithGrafts(grafts ...*pubsub_pb.ControlGraft) RPCFixtureOpt { + return func(rpc *pubsub.RPC) { + rpc.Control.Graft = grafts + } +} + +// WithPrunes sets the prunes on the rpc control message. +func WithPrunes(prunes ...*pubsub_pb.ControlPrune) RPCFixtureOpt { + return func(rpc *pubsub.RPC) { + rpc.Control.Prune = prunes + } +} + +// WithIHaves sets the iHaves on the rpc control message. +func WithIHaves(iHaves ...*pubsub_pb.ControlIHave) RPCFixtureOpt { + return func(rpc *pubsub.RPC) { + rpc.Control.Ihave = iHaves + } +} + +// WithIWants sets the iWants on the rpc control message. +func WithIWants(iWants ...*pubsub_pb.ControlIWant) RPCFixtureOpt { + return func(rpc *pubsub.RPC) { + rpc.Control.Iwant = iWants + } +} + +func WithPubsubMessages(msgs ...*pubsub_pb.Message) RPCFixtureOpt { + return func(rpc *pubsub.RPC) { + rpc.Publish = msgs + } +} + +// P2PRPCFixture returns a pubsub RPC fixture. Currently, this fixture only sets the ControlMessage field. +func P2PRPCFixture(opts ...RPCFixtureOpt) *pubsub.RPC { + rpc := &pubsub.RPC{ + RPC: pubsub_pb.RPC{ + Control: &pubsub_pb.ControlMessage{}, + }, + } + + for _, opt := range opts { + opt(rpc) + } + + return rpc +} + +func WithFrom(pid peer.ID) func(*pubsub_pb.Message) { + return func(msg *pubsub_pb.Message) { + msg.From = []byte(pid) + } +} + +// GossipSubMessageFixture returns a gossip sub message fixture for the specified topic. +func GossipSubMessageFixture(s string, opts ...func(*pubsub_pb.Message)) *pubsub_pb.Message { + pb := &pubsub_pb.Message{ + From: RandomBytes(32), + Data: RandomBytes(32), + Seqno: RandomBytes(10), + Topic: &s, + Signature: RandomBytes(100), + Key: RandomBytes(32), + } + + for _, opt := range opts { + opt(pb) + } + + return pb +} + +// GossipSubMessageFixtures returns a list of gossipsub message fixtures. +func GossipSubMessageFixtures(n int, topic string, opts ...func(*pubsub_pb.Message)) []*pubsub_pb.Message { + msgs := make([]*pubsub_pb.Message, n) + for i := 0; i < n; i++ { + msgs[i] = GossipSubMessageFixture(topic, opts...) + } + return msgs +} + +// LibP2PResourceLimitOverrideFixture returns a random resource limit override for testing. +// The values are not guaranteed to be valid between 0 and 1000. +// Returns: +// - p2pconf.ResourceManagerOverrideLimit: a random resource limit override. +func LibP2PResourceLimitOverrideFixture() p2pconfig.ResourceManagerOverrideLimit { + return p2pconfig.ResourceManagerOverrideLimit{ + StreamsInbound: rand.Intn(1000), + StreamsOutbound: rand.Intn(1000), + ConnectionsInbound: rand.Intn(1000), + ConnectionsOutbound: rand.Intn(1000), + FD: rand.Intn(1000), + Memory: rand.Intn(1000), + } +} + +func RegisterEntryFixture() flow.RegisterEntry { + val := make([]byte, 4) + _, _ = crand.Read(val) + return flow.RegisterEntry{ + Key: flow.RegisterID{ + Owner: "owner", + Key: "key1", + }, + Value: val, + } +} + +func MakeOwnerReg(key string, value string) flow.RegisterEntry { + return flow.RegisterEntry{ + Key: flow.RegisterID{ + Owner: "owner", + Key: key, + }, + Value: []byte(value), + } +} + +// ViewBasedActivatorFixture returns a ViewBasedActivator with randomly generated Data and ActivationView. +func ViewBasedActivatorFixture() *protocol.ViewBasedActivator[uint64] { + return &protocol.ViewBasedActivator[uint64]{ + Data: rand.Uint64(), + ActivationView: rand.Uint64(), + } +} + +// EpochExtensionFixture returns a randomly generated EpochExtensionFixture object. +func EpochExtensionFixture() flow.EpochExtension { + firstView := rand.Uint64() + + return flow.EpochExtension{ + FirstView: firstView, + FinalView: firstView + uint64(rand.Intn(10)+1), + } +} + +// EpochStateContainerFixture returns a randomly generated EpochStateContainer object. +func EpochStateContainerFixture() *flow.EpochStateContainer { + return &flow.EpochStateContainer{ + SetupID: IdentifierFixture(), + CommitID: IdentifierFixture(), + ActiveIdentities: DynamicIdentityEntryListFixture(5), + EpochExtensions: []flow.EpochExtension{EpochExtensionFixture()}, + } +} + +func EpochSetupRandomSourceFixture() []byte { + source := make([]byte, flow.EpochSetupRandomSourceLength) + _, err := rand.Read(source) + if err != nil { + panic(err) + } + return source +} + +// SnapshotFromBootstrapState generates a protocol.Snapshot representing a +// root bootstrap state. This is used to bootstrap the protocol state for +// genesis or post-spork states. +func SnapshotFromBootstrapState(root *flow.Block, result *flow.ExecutionResult, seal *flow.Seal, qc *flow.QuorumCertificate) (*inmem.Snapshot, error) { + safetyParams, err := protocol.DefaultEpochSafetyParams(root.ChainID) + if err != nil { + return nil, fmt.Errorf("could not get default epoch commit safety threshold: %w", err) + } + return inmem.SnapshotFromBootstrapStateWithParams(root, result, seal, qc, func(epochStateID flow.Identifier) (protocol_state.KVStoreAPI, error) { + return kvstore.NewDefaultKVStore(safetyParams.FinalizationSafetyThreshold, safetyParams.EpochExtensionViewCount, epochStateID) + }) +} diff --git a/utils/unittest/fixtures/README.md b/utils/unittest/fixtures/README.md new file mode 100644 index 00000000000..f540cf99251 --- /dev/null +++ b/utils/unittest/fixtures/README.md @@ -0,0 +1,1304 @@ +# Flow Go Fixtures Module + +A context-aware test fixture generation system for Flow Go that provides deterministic, reproducible +test data with shared randomness across all generators. This module replaces the standalone fixture +functions with a comprehensive suite of generator objects. + +## Table of Contents + +1. [Overview](#overview) +2. [Reproducibility](#reproducibility) +3. [Concurrency](#concurrency) +4. [Module Structure](#module-structure) +5. [Quick Start](#quick-start) +6. [Core Concepts](#core-concepts) +7. [Generator Suite](#generator-suite) +8. [Available Generators](#available-generators) +9. [Migration Guide](#migration-guide) +10. [Testing](#testing) +11. [Architecture](#architecture) + +## Overview + +The fixtures module replaces standalone fixture functions with a suite of context-aware generator objects that: + +- **Support deterministic results**: All generators use the same RNG, which allows for reproducible deterministic data using a static seed. +- **Complete Objects**: All generators produce complete and realistic model objects. +- **Provide context awareness**: Generators can create related data that makes sense together +- **Enable easy extension**: Simple and consistent API makes exending or adding new generators straight forward. +- **Improve test reproducibility**: By reusing the same seed from a failed test, you can reproduce exactly the same inputs. + +## Reproducibility + +This suite is designed to allow producing complete data types with reproducible random data. This is +critical for certain test scenarios like testing hash functions or data serializers. In most cases, +deterministic data is not strictly require. + +However, it is very useful to be able to replay a failed test that used random data. Imagine the +scenario where a test failed in CI due to some corner case bug with data handling. Since the data +was randomly generated, it's extrememly difficult to reverse engineer the inputs that caused the failure. +With deterministic test fixtures, we could grab the random seed used by the test from the logs, +then rerun the test locally with the exact same test data. + +This does require that some extra care is taken while designing the tests, especially any tests that +use multiple goroutines, or any tests that run subtests with `Parallel()`. See [Concurrency](#concurrancy). + +## Concurrency + +The generator suite does support concurrent usage, but it is discouraged to ensure that tests remain +reproducible. Any time that the main suite or any generator are used within different goroutines or +parallel subtests, the specific order that fixtures are produced with vary depending on the go +scheduler. The order a fixture is generated determines the random data provided by the PRNG, thus if +fixtures are produced concurrently, their output will not be deterministic! + +### Best Practice + +To support using concurrency within your tests AND get deterministic random fixtures, you need to +follow these best practices: + +1. Always use a single `GeneratorSuite` per test. This ensures you can replay the test individually, +and allows for tests to be run in parallel without losing support for deterministic fixtures. +2. Always generate all fixture data before executing any concurrent logic. +3. **Never** generate any fixtures outside of the test's main goroutine. + +It is fine to share a `GeneratorSuite` between subtests so long as they are **not** marked `Parallel()`. +It is also fine to use a `GeneratorSuite` within a test suite since suite tests do not support parallelism. + +## Module Structure + +The fixtures module is organized as follows: + +``` +utils/unittest/fixtures/ +├── README.md # This documentation +├── generators_test.go # Test suite +├── generator.go # Core GeneratorSuite and options +├── util.go # Utility functions +├── random.go # RandomGenerator implementation +├── *Core Data Types* +├── identifier.go # Identifier generator +├── signature.go # Signature generator +├── address.go # Address generator +├── time.go # Time generator +├── *Block Components* +├── block_header.go # Block header generator +├── block.go # Block generator +├── payload.go # Block payload generator +├── seal.go # Block seal generator +├── *Consensus Components* +├── quorum_certificate.go # Quorum certificate generator +├── timeout_certificate.go # Timeout certificate generator +├── signer_indices.go # Signer indices generator +├── *Execution Components* +├── execution_result.go # Execution result generator +├── execution_receipt.go # Execution receipt generator +├── chunk.go # Execution chunk generator +├── chunk_execution_data.go # Chunk execution data generator +├── block_execution_data.go # Block execution data generator +├── *Transaction Components* +├── transaction.go # Transaction generator +├── collection.go # Collection generator +├── collection_guarantee.go # Collection guarantee generator +├── transaction_result.go # Transaction result generator +├── transaction_signature.go # Transaction signature generator +├── transaction_error_messages.go # Transaction error messages generator +├── proposal_key.go # Proposal key generator +├── *Events* +├── event.go # Event generator +├── event_type.go # Event type generator +├── service_event.go # Service event generator +├── service_event_epoch_setup.go # Epoch setup service event generator +├── service_event_epoch_commit.go # Epoch commit service event generator +├── service_event_epoch_recover.go # Epoch recover service event generator +├── service_event_version_beacon.go # Version beacon service event generator +├── service_event_protocol_state_version_upgrade.go # Protocol state version upgrade generator +├── service_event_set_epoch_extension_view_count.go # Set epoch extension view count generator +├── service_event_eject_node.go # Eject node service event generator +├── *Ledger Components* +├── trie_update.go # Trie update generator +├── ledger_path.go # Ledger path generator +├── ledger_payload.go # Ledger payload generator +├── ledger_value.go # Ledger value generator +├── register_entry.go # Register entry generator +├── state_commitment.go # State commitment generator +├── *Cryptographic Components* +├── crypto.go # Cryptographic generator +├── aggregated_signature.go # Aggregated signature generator +└── *Identity Components* +└── identity.go # Identity generator +``` + +## Quick Start + +### Import and Create Suite + +```go +import ( + "github.com/onflow/flow-go/utils/unittest/fixtures" +) + +// Create suite with random seed +suite := fixtures.NewGeneratorSuite(t) + +// Create suite with specific seed for deterministic reproducible results +suite := fixtures.NewGeneratorSuite(fixtures.WithSeed(12345)) +``` + +### Basic Usage Examples + +```go +// Generate a block header +header := suite.Headers().Fixture() + +// Generate an identifier +id := suite.Identifiers().Fixture() + +// Generate a collection with 3 transactions +collection := suite.Collections().Fixture(3) + +// Generate events for a transaction +txID := suite.Identifiers().Fixture() +events := suite.Events().ForTransaction(txID, 0, 3) +``` + +## Core Concepts + +### Generator Suite +The `GeneratorSuite` is the central object that: +- Manages a shared random number generator +- Provides access to specialized generators +- Ensures consistent randomness across all generators + +### Options Pattern +Most generators use an options pattern for configuration. This is an example using the default +`Header` options factory: +```go +// Example: Configure a block header +header := suite.Headers().Fixture( + Header.WithHeight(100), + Header.WithView(200), +) +``` + +Alternatively, you can use the generator itself as the factory: +```go +// Example: Configure a block header +headerGen := suite.Headers() +header := headerGen.Fixture( + headerGen.WithHeight(100), + headerGen.WithView(200), +) +``` + +Using typed options allows us to namepace the options by type, avoiding name conflicts for similar +options. + +### Error Handling +Any generators that can produce an error, check it using `fixtures.NoError(err)` which panics if an +error is encountered. +```go +// No error handling needed - failures will panic +result := suite.TransactionResults().Fixture() +``` + +## Generator Suite + +### Constructor Options + +```go +// No options: Use random seed +suite := fixtures.NewGeneratorSuite(t) + +// WithSeed(seed int64): Set specific seed for deterministic results +suite := fixtures.NewGeneratorSuite(fixtures.WithSeed(42)) +``` + +### Core Methods + +```go +// Access the random generator +random := suite.Random() + +// Generate random bytes +bytes := random.RandomBytes(32) + +// Generate random string +str := random.RandomString(16) +``` + +### Available Generators + +| Generator | Method | Purpose | +|-----------|--------|---------| +| **Core Data Types** | | | +| Block Headers | `Headers()` | Generate block headers with proper relationships | +| Blocks | `Blocks()` | Generate complete blocks with headers and payloads | +| Identifiers | `Identifiers()` | Generate flow identifiers | +| Signatures | `Signatures()` | Generate cryptographic signatures | +| Addresses | `Addresses()` | Generate flow addresses | +| Time | `Time()` | Generate [time.Time] values | +| **Consensus Components** | | | +| Quorum Certificates | `QuorumCertificates()` | Generate quorum certificates | +| Quorum Certificates with Signer IDs | `QuorumCertificatesWithSignerIDs()` | Generate quorum certificates with signer IDs | +| Timeout Certificates | `TimeoutCertificates()` | Generate timeout certificates | +| Signer Indices | `SignerIndices()` | Generate signer index arrays | +| **Execution Components** | | | +| Execution Results | `ExecutionResults()` | Generate execution results | +| Execution Receipts | `ExecutionReceipts()` | Generate execution receipts | +| Chunks | `Chunks()` | Generate execution chunks | +| Chunk Execution Data | `ChunkExecutionDatas()` | Generate chunk execution data | +| Block Execution Data | `BlockExecutionDatas()` | Generate block execution data | +| Block Execution Data Entities | `BlockExecutionDataEntities()` | Generate block execution data entities | +| **Transaction Components** | | | +| Transactions | `Transactions()` | Generate transaction bodies | +| Collections | `Collections()` | Generate collections of transactions | +| Collection Guarantees | `CollectionGuarantees()` | Generate collection guarantees | +| Transaction Results | `TransactionResults()` | Generate transaction results | +| Light Transaction Results | `LightTransactionResults()` | Generate light transaction results | +| Transaction Error Messages | `TransactionErrorMessages()` | Generate transaction error messages | +| Transaction Signatures | `TransactionSignatures()` | Generate transaction signatures | +| Proposal Keys | `ProposalKeys()` | Generate proposal keys | +| **Block Components** | | | +| Payloads | `Payloads()` | Generate block payloads | +| Seals | `Seals()` | Generate block seals | +| **Events** | | | +| Events | `Events()` | Generate events with encoding support | +| Event Types | `EventTypes()` | Generate event types | +| Service Events | `ServiceEvents()` | Generate service events | +| Epoch Setups | `EpochSetups()` | Generate epoch setup service events | +| Epoch Commits | `EpochCommits()` | Generate epoch commit service events | +| Epoch Recovers | `EpochRecovers()` | Generate epoch recover service events | +| Version Beacons | `VersionBeacons()` | Generate version beacon service events | +| Protocol State Version Upgrades | `ProtocolStateVersionUpgrades()` | Generate protocol state version upgrade service events | +| Set Epoch Extension View Counts | `SetEpochExtensionViewCounts()` | Generate set epoch extension view count service events | +| Eject Nodes | `EjectNodes()` | Generate eject node service events | +| **Ledger Components** | | | +| Trie Updates | `TrieUpdates()` | Generate ledger trie updates | +| Ledger Paths | `LedgerPaths()` | Generate ledger paths | +| Ledger Payloads | `LedgerPayloads()` | Generate ledger payloads | +| Ledger Values | `LedgerValues()` | Generate ledger values | +| Register Entries | `RegisterEntries()` | Generate register entries | +| State Commitments | `StateCommitments()` | Generate state commitments | +| **Cryptographic Components** | | | +| Crypto | `Crypto()` | Generate cryptographic keys and signatures | +| Aggregated Signatures | `AggregatedSignatures()` | Generate aggregated signatures | +| **Identity Components** | | | +| Identities | `Identities()` | Generate flow identities with cryptographic keys | + +## Generator Documentation + +### Random Generator + +The RandomGenerator provides consistent random value generation for all other generators. It exposes all methods from `*rand.Rand` and provides additional convenience methods for common use cases. + +**Core Methods:** +- `RandomBytes(n int)`: Generates n random bytes +- `RandomString(length uint)`: Generates a random string of specified length +- `Uint32()`: Generates a random uint32 +- `Uint64()`: Generates a random uint64 +- `Int31()`: Generates a random int32 +- `Int63()`: Generates a random int64 +- `Intn(n int)`: Generates a random int in the range [0, n) + +**Unsigned Integer Methods (n > 0 required):** +- `Uintn(n uint)`: Generates a random uint strictly less than n +- `Uint32n(n uint32)`: Generates a random uint32 strictly less than n +- `Uint64n(n uint64)`: Generates a random uint64 strictly less than n + +**Range Methods (positive ranges only):** +- `IntInRange(min, max int)`: Generates a random int in the inclusive range [min, max] +- `Int32InRange(min, max int32)`: Generates a random int32 in the inclusive range [min, max] +- `Int64InRange(min, max int64)`: Generates a random int64 in the inclusive range [min, max] +- `UintInRange(min, max uint)`: Generates a random uint in the inclusive range [min, max] +- `Uint32InRange(min, max uint32)`: Generates a random uint32 in the inclusive range [min, max] +- `Uint64InRange(min, max uint64)`: Generates a random uint64 in the inclusive range [min, max] + +**Generic Functions:** +- `InclusiveRange[T](g *RandomGenerator, min, max T)`: Generic function for generating random numbers in inclusive ranges +- `RandomElement[T](g *RandomGenerator, slice []T)`: Selects a random element from a slice + +**Constraints:** +- All `*n` methods require n > 0 (will panic if n = 0) +- All range methods only support positive ranges (will panic with negative ranges) +- The RandomGenerator exposes all methods from `*rand.Rand` for additional functionality + +```go +random := suite.Random() + +// Generate random bytes +bytes := random.RandomBytes(32) + +// Generate random string +str := random.RandomString(16) + +// Generate random unsigned integers less than n +uintVal := random.Uintn(100) +uint32Val := random.Uint32n(100) +uint64Val := random.Uint64n(100) + +// Generate random numbers in inclusive ranges (positive ranges only) +intInRange := random.IntInRange(1, 50) +int32InRange := random.Int32InRange(1, 25) +int64InRange := random.Int64InRange(1, 100) +uintInRange := random.UintInRange(10, 90) +uint32InRange := random.Uint32InRange(5, 95) +uint64InRange := random.Uint64InRange(1, 1000) + +// Use generic InclusiveRange function +numInRange := InclusiveRange(random, 1, 100) +numInRangeWithType := InclusiveRange[uint32](random, 1, 100) + +// Select random element from slice +slice := []string{"apple", "banana", "cherry", "date"} +randomElement := RandomElement(random, slice) + +// random also exposes all methods from *rand.Rand +val := random.Uint32() +val64 := random.Uint64() +int32Val := random.Int31() +int64Val := random.Int63() +intVal := random.Intn(100) +``` + +### Block Header Generator + +Generates block headers with proper field relationships and chain-specific defaults. Uses an options pattern for configuration. + +**Options:** +- `WithHeight(height uint64)`: Sets the height of the block header +- `WithView(view uint64)`: Sets the view of the block header +- `WithChainID(chainID flow.ChainID)`: Sets the chain ID of the block header +- `WithParent(parent *flow.Header)`: Sets the parent header (ignores height, view, and chainID if set) +- `WithParentAndSoR(parent *flow.Header, source []byte)`: Sets the parent and source of randomness + +**Methods:** +- `Fixture(t testing.TB, opts ...func(*blockHeaderConfig))`: Generates a block header with optional configuration + +```go +headerGen := suite.Headers() + +// Basic header +header := headerGen.Fixture() + +// Header with specific height +header := headerGen.Fixture( + headerGen.WithHeight(100), +) + +// Header with specific view +header := headerGen.Fixture( + headerGen.WithView(200), +) + +// Header for specific chain +header := headerGen.Fixture( + headerGen.WithChainID(flow.Testnet), +) + +// Header with parent +parent := headerGen.Fixture() +child := headerGen.Fixture( + headerGen.WithParent(parent), +) + +// Header with parent and source of randomness +parent := headerGen.Fixture() +source := suite.Random().RandomBytes(32) +child := headerGen.Fixture( + headerGen.WithParentAndSoR(parent, source), +) +``` + +### Block Generator + +Generates complete blocks with headers and payloads using an options pattern for configuration. + +**Options:** +- `WithHeight(height uint64)`: Sets the height of the block header +- `WithView(view uint64)`: Sets the view of the block header +- `WithChainID(chainID flow.ChainID)`: Sets the chain ID of the block header +- `WithParent(parentID, parentView, parentHeight)`: Sets parent block information +- `WithParentHeader(parent *flow.Header)`: Sets parent header and derives child values +- `WithProposerID(proposerID flow.Identifier)`: Sets the block proposer ID +- `WithLastViewTC(lastViewTC *flow.TimeoutCertificate)`: Sets the timeout certificate +- `WithTimestamp(timestamp uint64)`: Sets the block timestamp +- `WithPayload(payload flow.Payload)`: Sets the block payload +- `WithHeaderBody(headerBody flow.HeaderBody)`: Sets the header body + +**Methods:** +- `Fixture(opts ...BlockOption)`: Generates a block with optional configuration +- `List(n int, opts ...BlockOption)`: Generates a chain of n blocks +- `Genesis(opts ...BlockOption)`: Generates a genesis block + +```go +blockGen := suite.Blocks() + +// Basic block +block := blockGen.Fixture() + +// Block with specific height and view +block := blockGen.Fixture( + Block.WithHeight(100), + Block.WithView(200), +) + +// Block chain with parent-child relationships +blocks := blockGen.List(5) // Creates chain of 5 blocks + +// Genesis block +genesis := blockGen.Genesis(Block.WithChainID(flow.Testnet)) +``` + +### Primitive Generators + +#### Identifier Generator +```go +idGen := suite.Identifiers() + +// Single identifier +id := idGen.Fixture() + +// List of identifiers +ids := idGen.List(5) +``` + +#### Signature Generator +```go +sigGen := suite.Signatures() + +// Single signature +sig := sigGen.Fixture() + +// List of signatures +sigs := sigGen.List(3) +``` + +#### Address Generator +```go +addrGen := suite.Addresses() + +// Default random address on default chain (Testnet) +addr := addrGen.Fixture() + +// Address for specific chain +addr := addrGen.Fixture(addrGen.WithChainID(flow.Testnet)) + +// Service account address +addr := addrGen.Fixture(addrGen.ServiceAddress()) + +// Invalid address +invalidAddr := CorruptAddress(addrGen.Fixture(), flow.Testnet) +``` + +#### Signer Indices Generator +```go +indicesGen := suite.SignerIndices() + +// Generate indices with total validators and signer count +indices := indicesGen.Fixture(indicesGen.WithSignerCount(10, 4)) + +// Generate indices at specific positions +indices := indicesGen.Fixture(indicesGen.WithIndices([]int{0, 2, 4})) + +// Generate list of indices +indicesList := indicesGen.List(3, indicesGen.WithSignerCount(10, 2)) +``` + +### Consensus Generators + +#### Quorum Certificate Generator +```go +qcGen := suite.QuorumCertificates() + +// Basic quorum certificate +qc := qcGen.Fixture() + +// With specific view and block ID +qc := qcGen.Fixture( + qcGen.WithView(100), + qcGen.WithBlockID(blockID), +) + +// With root block ID (sets view to 0) +qc := qcGen.Fixture( + qcGen.WithRootBlockID(blockID), +) + +// Certifies specific block +qc := qcGen.Fixture( + qcGen.CertifiesBlock(header), +) + +// With signer indices and source +qc := qcGen.Fixture( + qcGen.WithSignerIndices(signerIndices), + qcGen.WithRandomnessSource(source), +) + +// List of certificates +qcList := qcGen.List(3) +``` + +### Execution Data Generators + +#### Chunk Execution Data Generator +```go +cedGen := suite.ChunkExecutionDatas() + +// Basic chunk execution data +ced := cedGen.Fixture() + +// With minimum size +ced := cedGen.Fixture( + cedGen.WithMinSize(100), +) + +// List of chunk execution data +cedList := cedGen.List(3) +``` + +#### Block Execution Data Generator +```go +bedGen := suite.BlockExecutionDatas() + +// Basic block execution data +bed := bedGen.Fixture() + +// With specific block ID +bed := bedGen.Fixture( + bedGen.WithBlockID(blockID), +) + +// With specific chunk execution datas +chunks := suite.ChunkExecutionDatas().List(3) +bed := bedGen.Fixture( + bedGen.WithChunkExecutionDatas(chunks...), +) + +// List of block execution data +bedList := bedGen.List(2) +``` + +#### Execution Result Generator +```go +erGen := suite.ExecutionResults() + +// Basic execution result +result := erGen.Fixture() + +// With specific block ID +result := erGen.Fixture( + erGen.WithBlockID(blockID), +) + +// With specific chunks +chunks := suite.Chunks().List(3) +result := erGen.Fixture( + erGen.WithChunks(chunks...), +) + +// List of execution results +results := erGen.List(2) +``` + +#### Execution Receipt Generator +```go +receiptGen := suite.ExecutionReceipts() + +// Basic execution receipt +receipt := receiptGen.Fixture() + +// With specific executor ID +receipt := receiptGen.Fixture( + receiptGen.WithExecutorID(executorID), +) + +// With specific execution result +result := suite.ExecutionResults().Fixture() +receipt := receiptGen.Fixture( + receiptGen.WithExecutionResult(result), +) + +// List of execution receipts +receipts := receiptGen.List(3) +``` + +#### Chunk Generator +```go +chunkGen := suite.Chunks() + +// Basic chunk +chunk := chunkGen.Fixture() + +// With specific collection index +chunk := chunkGen.Fixture( + chunkGen.WithIndex(2), +) + +// List of chunks +chunks := chunkGen.List(4) +``` + +#### Block Execution Data Entity Generator +```go +bedEntityGen := suite.BlockExecutionDataEntities() + +// Basic block execution data entity +entity := bedEntityGen.Fixture() + +// With specific block ID +entity := bedEntityGen.Fixture( + bedEntityGen.WithBlockID(blockID), +) + +// List of entities +entityList := bedEntityGen.List(2) +``` + +### Transaction Generators + +#### Transaction Generator +```go +txGen := suite.Transactions() + +// Basic transaction body +tx := txGen.Fixture() + +// With custom gas limit +tx := txGen.Fixture( + txGen.WithGasLimit(100), +) + +// List of transactions +txList := txGen.List(3) +``` + +#### Full Transaction Generator +```go +fullTxGen := suite.FullTransactions() + +// Complete transaction (with TransactionBody embedded) +tx := fullTxGen.Fixture() + +// List of complete transactions +txList := fullTxGen.List(2) +``` + +#### Collection Generator +```go +colGen := suite.Collections() + +// Collection with 1 transaction (default) +col := colGen.Fixture() + +// Collection with specific transaction count +col := colGen.Fixture(colGen.WithTxCount(3)) + +// Collection with specific transactions +transactions := suite.Transactions().List(2) +col := colGen.Fixture(colGen.WithTransactions(transactions)) + +// List of collections each with 3 transactions +colList := colGen.List(2, colGen.WithTxCount(3)) +``` + +### Ledger Generators + +#### Trie Update Generator +```go +trieGen := suite.TrieUpdates() + +// Basic trie update +trie := trieGen.Fixture() + +// With specific number of paths +trie := trieGen.Fixture( + trieGen.WithNumPaths(5), +) + +// List of trie updates +trieList := trieGen.List(2) +``` + +### Transaction Result Generators + +#### Transaction Result Generator +```go +trGen := suite.TransactionResults() + +// Basic transaction result +tr := trGen.Fixture() + +// With custom error message +tr := trGen.Fixture( + trGen.WithErrorMessage("custom error"), +) + +// List of results +trList := trGen.List(2) +``` + +#### Light Transaction Result Generator +```go +ltrGen := suite.LightTransactionResults() + +// Basic light transaction result +ltr := ltrGen.Fixture() + +// With failed status +ltr := ltrGen.Fixture( + ltrGen.WithFailed(true), +) + +// List of light results +ltrList := ltrGen.List(2) +``` + +#### Transaction Error Message Generator +```go +txErrMsgGen := suite.TransactionErrorMessages() + +// Basic transaction error message +txErrMsg := txErrMsgGen.Fixture() + +// With specific transaction ID +txErrMsg := txErrMsgGen.Fixture( + txErrMsgGen.WithTransactionID(txID), +) + +// With specific index +txErrMsg := txErrMsgGen.Fixture( + txErrMsgGen.WithIndex(42), +) + +// With specific error message +txErrMsg := txErrMsgGen.Fixture( + txErrMsgGen.WithErrorMessage("custom error"), +) + +// With specific executor ID +txErrMsg := txErrMsgGen.Fixture( + txErrMsgGen.WithExecutorID(executorID), +) + +// Generate error messages for failed transaction results +txResults := suite.LightTransactionResults().List(5, LightTransactionResult.WithFailed(true)) +txErrMsgs := txErrMsgGen.ForTransactionResults(txResults) + +// List of error messages +txErrMsgList := txErrMsgGen.List(2) +``` + +### Transaction Component Generators + +#### Transaction Signature Generator +```go +tsGen := suite.TransactionSignatures() + +// Basic transaction signature +ts := tsGen.Fixture() + +// With custom signer index +ts := tsGen.Fixture( + tsGen.WithSignerIndex(5), +) + +// List of signatures +tsList := tsGen.List(2) +``` + +#### Proposal Key Generator +```go +pkGen := suite.ProposalKeys() + +// Basic proposal key +pk := pkGen.Fixture() + +// With custom sequence number +pk := pkGen.Fixture( + pkGen.WithSequenceNumber(100), +) + +// List of proposal keys +pkList := pkGen.List(2) +``` + +### Event Generators + +#### Event Type Generator +```go +eventTypeGen := suite.EventTypes() + +// Basic event type +eventType := eventTypeGen.Fixture() + +// With custom event name +eventType := eventTypeGen.Fixture( + eventTypeGen.WithEventName("CustomEvent"), +) + +// With custom contract name +eventType := eventTypeGen.Fixture( + eventTypeGen.WithContractName("CustomContract"), +) + +// With specific address +address := suite.Addresses().Fixture() +eventType := eventTypeGen.Fixture( + eventTypeGen.WithAddress(address), +) +``` + +#### Event Generator +```go +eventGen := suite.Events() + +// Basic event with default CCF encoding +event := eventGen.Fixture() + +// With specific encoding +eventWithCCF := eventGen.Fixture( + eventGen.WithEncoding(entities.EventEncodingVersion_CCF_V0), +) +eventWithJSON := eventGen.Fixture( + eventGen.WithEncoding(entities.EventEncodingVersion_JSON_CDC_V0), +) + +// With custom type and encoding +event := eventGen.Fixture( + eventGen.WithEventType("A.0x1.Test.Event"), + eventGen.WithEncoding(entities.EventEncodingVersion_JSON_CDC_V0), +) + +// Events for specific transaction +txID := suite.Identifiers().Fixture() +txEvents := eventGen.ForTransaction(txID, 0, 3) + +// Events for multiple transactions +txIDs := suite.Identifiers().List(2) +allEvents := eventGen.ForTransactions(txIDs, 2) +``` + +#### Service Event Generators + +Service events represent important protocol-level events in Flow. The suite provides generators for all types of service events. + +**Service Event Generator** +```go +serviceEventGen := suite.ServiceEvents() + +// Basic service event (random type) +serviceEvent := serviceEventGen.Fixture() + +// List of service events +serviceEvents := serviceEventGen.List(3) +``` + +**Specific Service Event Types** +```go +// Epoch Setup events +epochSetup := suite.EpochSetups().Fixture() + +// Epoch Commit events +epochCommit := suite.EpochCommits().Fixture() + +// Epoch Recover events +epochRecover := suite.EpochRecovers().Fixture() + +// Version Beacon events +versionBeacon := suite.VersionBeacons().Fixture() + +// Protocol State Version Upgrade events +protocolUpgrade := suite.ProtocolStateVersionUpgrades().Fixture() + +// Set Epoch Extension View Count events +setExtension := suite.SetEpochExtensionViewCounts().Fixture() + +// Eject Node events +ejectNode := suite.EjectNodes().Fixture() +``` + +### Identity Generator + +Generates Flow identities with cryptographic keys and metadata. + +```go +identityGen := suite.Identities() + +// Basic identity +identity := identityGen.Fixture() + +// Identity with specific role +identity := identityGen.Fixture(identityGen.WithRole(flow.RoleConsensus)) + +// List of identities +identities := identityGen.List(4) +``` + +### Time Generator + +Generates `time.Time` values with various options for time-based testing scenarios. + +**Options:** +- `WithBaseTime(baseTime time.Time)`: Sets the base time for generation +- `WithOffset(offset time.Duration)`: Sets a specific offset from the base time +- `WithOffsetRandom(max time.Duration)`: Sets a random offset from the base time (0 to max) +- `WithTimezone(tz *time.Location)`: Sets the timezone for time generation + +**Methods:** +- `Fixture(t testing.TB, opts ...func(*timeConfig))`: Generates a time.Time value with optional configuration + +```go +timeGen := suite.Time() + +// Basic time fixture +time1 := timeGen.Fixture() + +// Time with specific base time +now := time.Now() +time2 := timeGen.Fixture(timeGen.WithBaseTime(now)) + +// Time with specific base time +baseTime := time.Date(2023, 1, 1, 12, 0, 0, 0, time.UTC) +time3 := timeGen.Fixture(timeGen.WithBaseTime(baseTime)) + +// Time with specific offset +time4 := timeGen.Fixture(timeGen.WithOffset(time.Hour)) + +// Time with random offset +time5 := timeGen.Fixture(timeGen.WithOffsetRandom(24*time.Hour)) + +// Time in a specific timezone +time6 := timeGen.Fixture(timeGen.WithTimezone(time.FixedZone("EST", -5*3600))) + +// Time with multiple options +time7 := timeGen.Fixture( + timeGen.WithBaseTime(baseTime), + timeGen.WithOffset(time.Hour), + timeGen.WithTimezone(time.FixedZone("EST", -5*3600)), +) +``` + +### Ledger Path Generator + +Generates ledger paths with consistent randomness and deduplication. + +**Options:** +- `WithCount(count int)`: Sets the number of paths to generate + +**Methods:** +- `Fixture(t testing.TB, opts ...func(*pathConfig))`: Generates a single ledger path +- `List(t testing.TB, n int, opts ...func(*pathConfig))`: Generates a list of ledger paths + +```go +pathGen := suite.LedgerPaths() + +// Basic path fixture +path1 := pathGen.Fixture() + +// List of paths +paths := pathGen.List(3) +``` + +### Ledger Payload Generator + +Generates ledger payloads with consistent randomness and configurable sizes. + +**Options:** +- `WithSize(minSize, maxSize int)`: Sets the payload size range +- `WithValue(value ledger.Value)`: Sets the value for the payload + +**Methods:** +- `Fixture(t testing.TB, opts ...func(*payloadConfig))`: Generates a single ledger payload +- `List(t testing.TB, n int, opts ...func(*payloadConfig))`: Generates a list of ledger payloads + +```go +payloadGen := suite.LedgerPayloads() + +// Basic payload fixture +payload1 := payloadGen.Fixture() + +// Payload with specific size +payload2 := payloadGen.Fixture(payloadGen.WithSize(4, 16)) + +// Payload with specific value +value := suite.LedgerValues().Fixture() +payload3 := payloadGen.Fixture(payloadGen.WithValue(value)) + +// List of payloads +payloads := payloadGen.List(3) +``` + +### Ledger Value Generator + +Generates ledger values with consistent randomness and configurable sizes. + +**Options:** +- `WithSize(minSize, maxSize int)`: Sets the value size range [minSize, maxSize) + +**Methods:** +- `Fixture(t testing.TB, opts ...func(*valueConfig))`: Generates a single ledger value +- `List(t testing.TB, n int, opts ...func(*valueConfig))`: Generates a list of ledger values + +```go +valueGen := suite.LedgerValues() + +// Basic value fixture +value1 := valueGen.Fixture() + +// Value with specific size +value2 := valueGen.Fixture(valueGen.WithSize(4, 16)) + +// List of values +values := valueGen.List(3) + +// List of values with specific size +largeValues := valueGen.List(2, valueGen.WithSize(8, 32)) +``` + +### Register Entry Generator + +Generates register entries with consistent randomness. Register entries combine a register key with a register value. + +**Options:** +- `WithKey(key flow.RegisterID)`: Sets the register key +- `WithValue(value flow.RegisterValue)`: Sets the register value +- `WithPayload(payload *ledger.Payload)`: Converts a ledger payload to a register entry + +**Methods:** +- `Fixture(opts ...RegisterEntryOption)`: Generates a single register entry +- `List(n int, opts ...RegisterEntryOption)`: Generates a list of register entries + +```go +registerEntryGen := suite.RegisterEntries() + +// Basic register entry +entry := registerEntryGen.Fixture() + +// Register entry with specific key +entry := registerEntryGen.Fixture(RegisterEntry.WithKey(key)) + +// Register entry with specific value +value := []byte("some value") +entry := registerEntryGen.Fixture(RegisterEntry.WithValue(value)) + +// Register entry from ledger payload +payload := suite.LedgerPayloads().Fixture() +entry := registerEntryGen.Fixture(RegisterEntry.WithPayload(payload)) + +// List of register entries +entries := registerEntryGen.List(3) +``` + +## Migration Guide + +### From Standalone Fixtures + +The new fixtures module replaces standalone fixture functions. Here's how to migrate: + +**Old Way:** +```go +import "github.com/onflow/flow-go/utils/unittest" + +// Standalone functions +header := unittest.BlockHeaderFixture() +header := unittest.BlockHeaderFixtureOnChain(flow.Testnet) +header := unittest.BlockHeaderWithParentFixture(parent) +``` + +**New Way:** +```go +import "github.com/onflow/flow-go/utils/unittest/fixtures" + +// Create suite once per test +suite := fixtures.NewGeneratorSuite(t) // or fixtures.WithSeed(12345) + +// Use suite generators +headerGen := suite.Headers() +header := headerGen.Fixture() +header := headerGen.Fixture(headerGen.WithChainID(flow.Testnet)) +header := headerGen.Fixture(headerGen.WithParent(parent)) +``` + +### Benefits of Migration + +1. **Deterministic Results**: Explicit seed control for reproducible tests +2. **Shared Randomness**: All generators use the same RNG for consistency +3. **Context Awareness**: Generators create related data that makes sense together +4. **Better Error Handling**: Proper error handling with `testing.TB` +5. **Easier Extension**: Simple to add new generator types + +## Testing + +The fixtures module includes comprehensive tests: + +```bash +# Run all fixture tests +go test -v ./utils/unittest/fixtures + +# Run specific test +go test -v ./utils/unittest/fixtures -run TestGeneratorSuite + +# Run deterministic tests +go test -v ./utils/unittest/fixtures -run TestGeneratorSuiteDeterministic +``` + +### Test Coverage + +- Deterministic results with the same seed +- Proper relationships between generated objects +- Correct field values and constraints +- Random seed generation when seed is not specified +- All generator options and methods + +## Architecture + +### File Organization + +The fixtures module is organized into focused files: + +| Category | File | Purpose | +|----------|------|---------| +| **Core** | `generator.go` | Core `GeneratorSuite` and options | +| | `random.go` | Random value generation utilities | +| | `util.go` | Utility functions | +| | `generators_test.go` | Comprehensive tests | +| **Core Data Types** | `identifier.go` | Identifier generation | +| | `signature.go` | Signature generation | +| | `address.go` | Address generation | +| | `time.go` | Time generation | +| **Block Components** | `block_header.go` | Block header generation | +| | `block.go` | Complete block generation | +| | `payload.go` | Block payload generation | +| | `seal.go` | Block seal generation | +| **Consensus** | `quorum_certificate.go` | Quorum certificate generation | +| | `timeout_certificate.go` | Timeout certificate generation | +| | `signer_indices.go` | Signer indices generation | +| **Execution** | `execution_result.go` | Execution result generation | +| | `execution_receipt.go` | Execution receipt generation | +| | `chunk.go` | Execution chunk generation | +| | `chunk_execution_data.go` | Chunk execution data generation | +| | `block_execution_data.go` | Block execution data generation | +| **Transactions** | `transaction.go` | Transaction generation | +| | `collection.go` | Collection generation | +| | `collection_guarantee.go` | Collection guarantee generation | +| | `transaction_result.go` | Transaction result generation | +| | `transaction_signature.go` | Transaction signature generation | +| | `proposal_key.go` | Proposal key generation | +| **Events** | `event.go` | Event generation | +| | `event_type.go` | Event type generation | +| | `service_event.go` | Service event generation | +| | `service_event_*.go` | Specific service event type generators | +| **Ledger** | `trie_update.go` | Trie update generation | +| | `ledger_path.go` | Ledger path generation | +| | `ledger_payload.go` | Ledger payload generation | +| | `ledger_value.go` | Ledger value generation | +| | `state_commitment.go` | State commitment generation | +| **Cryptographic** | `crypto.go` | Cryptographic key generation | +| | `aggregated_signature.go` | Aggregated signature generation | +| **Identity** | `identity.go` | Identity generation | + +### Design Principles + +1. **Single Responsibility**: Each generator focuses on one data type +2. **Dependency Injection**: Generators store instances of other generators they need +3. **Options Pattern**: Flexible configuration through options +4. **Error Safety**: All errors returned during fixture construction must be checked using `NoError()`. Important constraints on the inputs must also be checked using `Assert()` +5. **Deterministic**: All generators use the same `rand.Rand`, ensuring consistent and deterministic value when the seed it set. +6. **Consistent**: Easy to add new generators following established patterns. + +### Adding New Generators + +To add a new generator: + +1. **Create a new file** (e.g., `my_type.go`) +2. **Define the options factory and options** +3. **Define the generator struct with dependencies** +4. **Implement the Fixture and List methods** +5. **Add getter method to GeneratorSuite** +6. **Add tests and update documentation** + +Example: +```go +// In my_type.go +package fixtures + +import "github.com/onflow/flow-go/model/flow" + +// MyType is the default options factory for [flow.MyType] generation. +var MyType myTypeFactory + +type myTypeFactory struct{} + +type MyTypeOption func(*MyTypeGenerator, *flow.MyType) + +// WithField is an option that sets the `Field` of the my type. +func (f myTypeFactory) WithField(field string) MyTypeOption { + return func(g *MyTypeGenerator, myType *flow.MyType) { + myType.Field = field + } +} + +// MyTypeGenerator generates my types with consistent randomness. +type MyTypeGenerator struct { + myTypeFactory + + random *RandomGenerator + identifiers *IdentifierGenerator + // Add other generator dependencies as needed +} + +func NewMyTypeGenerator( + random *RandomGenerator, + identifiers *IdentifierGenerator, +) *MyTypeGenerator { + return &MyTypeGenerator{ + random: random, + identifiers: identifiers, + } +} + +// Fixture generates a [flow.MyType] with random data based on the provided options. +func (g *MyTypeGenerator) Fixture(opts ...MyTypeOption) *flow.MyType { + myType := &flow.MyType{ + ID: g.identifiers.Fixture(), + Field: g.random.RandomString(10), + // Set other fields with random values + } + + for _, opt := range opts { + opt(g, myType) + } + + return myType +} + +// List generates a list of [flow.MyType] with random data. +func (g *MyTypeGenerator) List(n int, opts ...MyTypeOption) []*flow.MyType { + items := make([]*flow.MyType, n) + for i := range n { + items[i] = g.Fixture(opts...) + } + return items +} +``` + +```go +// In generator.go +func (g *GeneratorSuite) MyTypes() *MyTypeGenerator { + return NewMyTypeGenerator( + g.Random(), + g.Identifiers(), + // Pass other dependencies as needed + ) +} +``` + +**Key Implementation Patterns:** + +1. **Factory Pattern**: Each generator has a global factory variable (e.g., `MyType`) that provides typed options +2. **Options Pattern**: Each option is a function that takes the generator and modifies the object being built +3. **Constructor Injection**: Generators receive their dependencies through `NewXxxGenerator()` constructors +4. **Consistent API**: All generators implement `Fixture(opts...)` and `List(n int, opts...)` methods +5. **Error Handling**: Use `fixtures.NoError(err)` for any operations that might fail +6. **Embedding**: Generator structs embed their factory for direct access to options methods \ No newline at end of file diff --git a/utils/unittest/fixtures/address.go b/utils/unittest/fixtures/address.go new file mode 100644 index 00000000000..3b2684cb278 --- /dev/null +++ b/utils/unittest/fixtures/address.go @@ -0,0 +1,108 @@ +package fixtures + +import ( + sdk "github.com/onflow/flow-go-sdk" + + "github.com/onflow/flow-go/model/flow" +) + +// maxIndex is the maximum index for the linear code address generator. +const maxIndex = 1<<45 - 1 + +// Address is the default options factory for [flow.Address] generation. +var Address addressFactory + +type addressFactory struct{} + +type AddressOption func(*AddressGenerator, *addressConfig) + +// addressConfig holds the configuration for address generation. +type addressConfig struct { + chainID flow.ChainID + index uint64 +} + +// WithChainID is an option that generates an [flow.Address] for the specified chain. +func (f addressFactory) WithChainID(chainID flow.ChainID) AddressOption { + return func(g *AddressGenerator, config *addressConfig) { + config.chainID = chainID + } +} + +// ServiceAddress is an option that generates the service account [flow.Address] for the given chain. +func (f addressFactory) ServiceAddress() AddressOption { + return func(g *AddressGenerator, config *addressConfig) { + config.index = 1 + } +} + +// WithIndex is an option that sets the index for the address. +func (f addressFactory) WithIndex(index uint64) AddressOption { + return func(g *AddressGenerator, config *addressConfig) { + config.index = index + } +} + +// AddressGenerator generates [flow.Address] with consistent randomness. +type AddressGenerator struct { + addressFactory + + random *RandomGenerator + + chainID flow.ChainID +} + +func NewAddressGenerator( + random *RandomGenerator, + chainID flow.ChainID, +) *AddressGenerator { + return &AddressGenerator{ + random: random, + chainID: chainID, + } +} + +// Fixture generates a random [flow.Address] with the provided options. +// Defaults to the chain ID specified in the generator suite. +func (g *AddressGenerator) Fixture(opts ...AddressOption) flow.Address { + config := &addressConfig{ + chainID: g.chainID, + index: g.random.Uint64InRange(1, maxIndex), + } + + for _, opt := range opts { + opt(g, config) + } + + Assertf(config.index <= maxIndex, "index must be less than %d, got %d", maxIndex, config.index) + + addr, err := config.chainID.Chain().AddressAtIndex(config.index) + NoError(err) + + return addr +} + +// List returns a list of [flow.Address] with the provided options. +func (g *AddressGenerator) List(n int, opts ...AddressOption) []flow.Address { + addresses := make([]flow.Address, n) + for i := range uint64(n) { + addresses[i] = g.Fixture(opts...) + } + return addresses +} + +// ToSDKAddress converts a [flow.Address] to a [sdk.Address]. +func ToSDKAddress(addr flow.Address) sdk.Address { + var sdkAddr sdk.Address + copy(sdkAddr[:], addr[:]) + return sdkAddr +} + +// CorruptAddress corrupts the first byte of the address and checks that the address is invalid. +func CorruptAddress(addr flow.Address, chainID flow.ChainID) flow.Address { + addr[0] ^= 1 + // this should only fail if the provided address was already not valid for the chain. explicitly + // check so the returned address is guaranteed to be invalid. + Assert(!chainID.Chain().IsValid(addr), "corrupted address is valid!") + return addr +} diff --git a/utils/unittest/fixtures/aggregated_signature.go b/utils/unittest/fixtures/aggregated_signature.go new file mode 100644 index 00000000000..6d082182d9c --- /dev/null +++ b/utils/unittest/fixtures/aggregated_signature.go @@ -0,0 +1,76 @@ +package fixtures + +import ( + "github.com/onflow/crypto" + + "github.com/onflow/flow-go/model/flow" +) + +// AggregatedSignature is the default options factory for [flow.AggregatedSignature] generation. +var AggregatedSignature aggregatedSignatureFactory + +type aggregatedSignatureFactory struct{} + +type AggregatedSignatureOption func(*AggregatedSignatureGenerator, *flow.AggregatedSignature) + +// WithVerifierSignatures is an option that sets the `VerifierSignatures` of the aggregated signature. +func (f aggregatedSignatureFactory) WithVerifierSignatures(sigs ...crypto.Signature) AggregatedSignatureOption { + return func(g *AggregatedSignatureGenerator, aggSig *flow.AggregatedSignature) { + aggSig.VerifierSignatures = sigs + } +} + +// WithSignerIDs is an option that sets the `SignerIDs` of the aggregated signature. +func (f aggregatedSignatureFactory) WithSignerIDs(signerIDs flow.IdentifierList) AggregatedSignatureOption { + return func(g *AggregatedSignatureGenerator, aggSig *flow.AggregatedSignature) { + aggSig.SignerIDs = signerIDs + } +} + +// AggregatedSignatureGenerator generates aggregated signatures with consistent randomness. +type AggregatedSignatureGenerator struct { + aggregatedSignatureFactory + + random *RandomGenerator + identifiers *IdentifierGenerator + signatures *SignatureGenerator +} + +// NewAggregatedSignatureGenerator creates a new AggregatedSignatureGenerator. +func NewAggregatedSignatureGenerator( + random *RandomGenerator, + identifiers *IdentifierGenerator, + signatures *SignatureGenerator, +) *AggregatedSignatureGenerator { + return &AggregatedSignatureGenerator{ + random: random, + identifiers: identifiers, + signatures: signatures, + } +} + +// Fixture generates a [flow.AggregatedSignature] with random data based on the provided options. +func (g *AggregatedSignatureGenerator) Fixture(opts ...AggregatedSignatureOption) flow.AggregatedSignature { + numSigners := g.random.IntInRange(2, 5) + aggSig := flow.AggregatedSignature{ + VerifierSignatures: g.signatures.List(numSigners), + SignerIDs: g.identifiers.List(numSigners), + } + + for _, opt := range opts { + opt(g, &aggSig) + } + + Assertf(len(aggSig.VerifierSignatures) == len(aggSig.SignerIDs), "verifier signatures and signer IDs must have the same length") + + return aggSig +} + +// List generates a list of [flow.AggregatedSignature]. +func (g *AggregatedSignatureGenerator) List(n int, opts ...AggregatedSignatureOption) []flow.AggregatedSignature { + signatures := make([]flow.AggregatedSignature, n) + for i := range n { + signatures[i] = g.Fixture(opts...) + } + return signatures +} diff --git a/utils/unittest/fixtures/block.go b/utils/unittest/fixtures/block.go new file mode 100644 index 00000000000..2c8384b414c --- /dev/null +++ b/utils/unittest/fixtures/block.go @@ -0,0 +1,248 @@ +package fixtures + +import "github.com/onflow/flow-go/model/flow" + +// Block is the default options factory for [flow.Block] generation. +var Block blockFactory + +type blockFactory struct{} + +type BlockOption func(*BlockGenerator, *blockConfig) + +type blockConfig struct { + headerOpts []HeaderOption + + headerBody *flow.HeaderBody + payload *flow.Payload +} + +// WithHeight is an option that sets the `Height` of the block's header. +func (f blockFactory) WithHeight(height uint64) BlockOption { + return func(g *BlockGenerator, config *blockConfig) { + config.headerOpts = append(config.headerOpts, Header.WithHeight(height)) + + } +} + +// WithView is an option that sets the `View` of the block's header. +func (f blockFactory) WithView(view uint64) BlockOption { + return func(g *BlockGenerator, config *blockConfig) { + config.headerOpts = append(config.headerOpts, + Header.WithView(view), + ) + } +} + +// WithChainID is an option that sets the `ChainID` of the block's header. +func (f blockFactory) WithChainID(chainID flow.ChainID) BlockOption { + return func(g *BlockGenerator, config *blockConfig) { + config.headerOpts = append(config.headerOpts, + Header.WithChainID(chainID), + ) + } +} + +// WithParent is an option that sets the `ParentID`, `ParentView`, and `Height` of the block's header based +// on the provided fields. `Height` is set to parent's `Height` + 1. +func (f blockFactory) WithParent(parentID flow.Identifier, parentView uint64, parentHeight uint64) BlockOption { + return func(g *BlockGenerator, config *blockConfig) { + config.headerOpts = append(config.headerOpts, + Header.WithParent(parentID, parentView, parentHeight), + ) + } +} + +// WithParentView is an option that sets the `ParentView` of the block's header. +func (f blockFactory) WithParentView(view uint64) BlockOption { + return func(g *BlockGenerator, config *blockConfig) { + config.headerOpts = append(config.headerOpts, + Header.WithParentView(view), + ) + } +} + +// WithParentHeader is an option that sets the following fields of the block's header based on the +// provided parent header: +// - `View` +// - `Height` +// - `ChainID` +// - `Timestamp` +// - `ParentID` +// - `ParentView` +// +// If you want a specific value for any of these fields, you should add the appropriate option +// after this option. +func (f blockFactory) WithParentHeader(parent *flow.Header) BlockOption { + return func(g *BlockGenerator, config *blockConfig) { + config.headerOpts = append(config.headerOpts, + Header.WithParentHeader(parent), + ) + } +} + +// WithProposerID is an option that sets the `ProposerID` of the block's header. +func (f blockFactory) WithProposerID(proposerID flow.Identifier) BlockOption { + return func(g *BlockGenerator, config *blockConfig) { + config.headerOpts = append(config.headerOpts, + Header.WithProposerID(proposerID), + ) + } +} + +// WithLastViewTC is an option that sets the `LastViewTC` of the block's header. +func (f blockFactory) WithLastViewTC(lastViewTC *flow.TimeoutCertificate) BlockOption { + return func(g *BlockGenerator, config *blockConfig) { + config.headerOpts = append(config.headerOpts, + Header.WithLastViewTC(lastViewTC), + ) + } +} + +// WithTimestamp is an option that sets the `Timestamp` of the block's header. +func (f blockFactory) WithTimestamp(timestamp uint64) BlockOption { + return func(g *BlockGenerator, config *blockConfig) { + config.headerOpts = append(config.headerOpts, + Header.WithTimestamp(timestamp), + ) + } +} + +// WithParentVoterIndices is an option that sets the `ParentVoterIndices` of the block's header. +func (f blockFactory) WithParentVoterIndices(indices []byte) BlockOption { + return func(g *BlockGenerator, config *blockConfig) { + config.headerOpts = append(config.headerOpts, + Header.WithParentVoterIndices(indices), + ) + } +} + +// WithParentVoterSigData is an option that sets the `ParentVoterSigData` of the block's header. +func (f blockFactory) WithParentVoterSigData(data []byte) BlockOption { + return func(g *BlockGenerator, config *blockConfig) { + config.headerOpts = append(config.headerOpts, + Header.WithParentVoterSigData(data), + ) + } +} + +// WithHeaderBody is an option that sets the `HeaderBody` of the block. +func (f blockFactory) WithHeaderBody(headerBody *flow.HeaderBody) BlockOption { + return func(g *BlockGenerator, config *blockConfig) { + config.headerBody = headerBody + } +} + +// WithPayload is an option that sets the `Payload` of the block. +func (f blockFactory) WithPayload(payload *flow.Payload) BlockOption { + return func(g *BlockGenerator, config *blockConfig) { + config.payload = payload + } +} + +// BlockGenerator generates blocks with consistent randomness. +type BlockGenerator struct { + blockFactory + + random *RandomGenerator + identifiers *IdentifierGenerator + headers *HeaderGenerator + payloads *PayloadGenerator + + chainID flow.ChainID +} + +func NewBlockGenerator( + random *RandomGenerator, + identifiers *IdentifierGenerator, + headers *HeaderGenerator, + payloads *PayloadGenerator, + chainID flow.ChainID, +) *BlockGenerator { + return &BlockGenerator{ + random: random, + identifiers: identifiers, + headers: headers, + payloads: payloads, + chainID: chainID, + } +} + +// Fixture generates a [flow.Block] with random data based on the provided options. +func (g *BlockGenerator) Fixture(opts ...BlockOption) *flow.Block { + config := &blockConfig{ + headerOpts: []HeaderOption{Header.WithChainID(g.chainID)}, + } + + for _, opt := range opts { + opt(g, config) + } + + if config.headerBody == nil { + header := g.headers.Fixture(config.headerOpts...) + config.headerBody = &header.HeaderBody + } + if config.payload == nil { + config.payload = g.payloads.Fixture() + } + + return &flow.Block{ + HeaderBody: *config.headerBody, + Payload: *config.payload, + } +} + +// List generates a chain of [flow.Block] objects. The first block is generated with the given options, +// and the subsequent blocks are generated using only the WithParentHeader option, specifying the +// previous block as the parent. +func (g *BlockGenerator) List(n int, opts ...BlockOption) []*flow.Block { + blocks := make([]*flow.Block, 0, n) + blocks = append(blocks, g.Fixture(opts...)) + + for i := 1; i < n; i++ { + // give a 50% chance that the view is not ParentView + 1 + view := blocks[i-1].View + 1 + if g.random.Bool() { + view += g.random.Uint64InRange(1, 10) + } + + parent := blocks[i-1].ToHeader() + blocks = append(blocks, g.Fixture( + Block.WithParentHeader(parent), + Block.WithView(view), + )) + } + return blocks +} + +// Genesis instantiates a genesis block. This block has view and height equal to zero. However, +// conceptually spork root blocks are functionally equivalent to genesis blocks. We have decided that +// in the long term, the protocol must support spork root blocks with height _and_ view larger than zero. +// The only options that are used are the chainID and the protocol state ID. +func (g *BlockGenerator) Genesis(opts ...BlockOption) *flow.Block { + config := &blockConfig{ + headerOpts: []HeaderOption{Header.WithChainID(g.chainID)}, + payload: &flow.Payload{ + ProtocolStateID: g.identifiers.Fixture(), + }, + } + + // allow overriding the chainID and the protocol state ID + for _, opt := range opts { + opt(g, config) + } + + header := g.headers.Genesis(config.headerOpts...) + if config.payload == nil { + config.payload = &flow.Payload{ + ProtocolStateID: g.identifiers.Fixture(), + } + } + + block, err := flow.NewRootBlock(flow.UntrustedBlock{ + HeaderBody: header.HeaderBody, + Payload: *config.payload, + }) + NoError(err) + + return block +} diff --git a/utils/unittest/fixtures/block_execution_data.go b/utils/unittest/fixtures/block_execution_data.go new file mode 100644 index 00000000000..ff285160e38 --- /dev/null +++ b/utils/unittest/fixtures/block_execution_data.go @@ -0,0 +1,102 @@ +package fixtures + +import ( + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module/executiondatasync/execution_data" +) + +// BlockExecutionData is the default options factory for [execution_data.BlockExecutionData] generation. +var BlockExecutionData blockExecutionDataFactory + +type blockExecutionDataFactory struct{} + +type BlockExecutionDataOption func(*BlockExecutionDataGenerator, *execution_data.BlockExecutionData) + +// WithBlockID is an option that sets the BlockID for the block execution data. +func (f blockExecutionDataFactory) WithBlockID(blockID flow.Identifier) BlockExecutionDataOption { + return func(g *BlockExecutionDataGenerator, blockExecutionData *execution_data.BlockExecutionData) { + blockExecutionData.BlockID = blockID + } +} + +// WithChunkExecutionDatas is an option that sets the ChunkExecutionDatas for the block execution data. +func (f blockExecutionDataFactory) WithChunkExecutionDatas(chunks ...*execution_data.ChunkExecutionData) BlockExecutionDataOption { + return func(g *BlockExecutionDataGenerator, blockExecutionData *execution_data.BlockExecutionData) { + blockExecutionData.ChunkExecutionDatas = chunks + } +} + +// BlockExecutionDataGenerator generates block execution data with consistent randomness. +type BlockExecutionDataGenerator struct { + blockExecutionDataFactory + + random *RandomGenerator + identifiers *IdentifierGenerator + chunkExecutionDatas *ChunkExecutionDataGenerator +} + +func NewBlockExecutionDataGenerator( + random *RandomGenerator, + identifiers *IdentifierGenerator, + chunkExecutionDatas *ChunkExecutionDataGenerator, +) *BlockExecutionDataGenerator { + return &BlockExecutionDataGenerator{ + random: random, + identifiers: identifiers, + chunkExecutionDatas: chunkExecutionDatas, + } +} + +// Fixture generates a [execution_data.BlockExecutionData] with random data based on the provided options. +func (g *BlockExecutionDataGenerator) Fixture(opts ...BlockExecutionDataOption) *execution_data.BlockExecutionData { + blockExecutionData := &execution_data.BlockExecutionData{ + BlockID: g.identifiers.Fixture(), + } + + for _, opt := range opts { + opt(g, blockExecutionData) + } + + if len(blockExecutionData.ChunkExecutionDatas) == 0 { + blockExecutionData.ChunkExecutionDatas = g.chunkExecutionDatas.List(g.random.IntInRange(1, 4)) + } + + return blockExecutionData +} + +// List generates a list of [execution_data.BlockExecutionData]. +func (g *BlockExecutionDataGenerator) List(n int, opts ...BlockExecutionDataOption) []*execution_data.BlockExecutionData { + list := make([]*execution_data.BlockExecutionData, n) + for i := range n { + list[i] = g.Fixture(opts...) + } + return list +} + +// BlockExecutionDataEntityGenerator generates [execution_data.BlockExecutionDataEntity] with consistent randomness. +type BlockExecutionDataEntityGenerator struct { + *BlockExecutionDataGenerator +} + +func NewBlockExecutionDataEntityGenerator( + blockExecutionDatas *BlockExecutionDataGenerator, +) *BlockExecutionDataEntityGenerator { + return &BlockExecutionDataEntityGenerator{ + BlockExecutionDataGenerator: blockExecutionDatas, + } +} + +// Fixture generates a [execution_data.BlockExecutionDataEntity] with random data based on the provided options. +func (g *BlockExecutionDataEntityGenerator) Fixture(opts ...BlockExecutionDataOption) *execution_data.BlockExecutionDataEntity { + execData := g.BlockExecutionDataGenerator.Fixture(opts...) + return execution_data.NewBlockExecutionDataEntity(g.identifiers.Fixture(), execData) +} + +// List generates a list of [execution_data.BlockExecutionDataEntity]. +func (g *BlockExecutionDataEntityGenerator) List(n int, opts ...BlockExecutionDataOption) []*execution_data.BlockExecutionDataEntity { + list := make([]*execution_data.BlockExecutionDataEntity, n) + for i := range n { + list[i] = g.Fixture(opts...) + } + return list +} diff --git a/utils/unittest/fixtures/chunk.go b/utils/unittest/fixtures/chunk.go new file mode 100644 index 00000000000..a321554a0e7 --- /dev/null +++ b/utils/unittest/fixtures/chunk.go @@ -0,0 +1,137 @@ +package fixtures + +import ( + "github.com/onflow/flow-go/model/flow" +) + +// Chunk is the default options factory for [flow.Chunk] generation. +var Chunk chunkFactory + +type chunkFactory struct{} + +type ChunkOption func(*ChunkGenerator, *flow.Chunk) + +// WithIndex is an option that sets the `Index` of the chunk. +func (f chunkFactory) WithIndex(index uint64) ChunkOption { + return func(g *ChunkGenerator, chunk *flow.Chunk) { + chunk.Index = index + } +} + +// WithBlockID is an option that sets the `BlockID` of the chunk. +func (f chunkFactory) WithBlockID(blockID flow.Identifier) ChunkOption { + return func(g *ChunkGenerator, chunk *flow.Chunk) { + chunk.BlockID = blockID + } +} + +// WithCollectionIndex is an option that sets the `CollectionIndex` of the chunk. +func (f chunkFactory) WithCollectionIndex(collectionIndex uint) ChunkOption { + return func(g *ChunkGenerator, chunk *flow.Chunk) { + chunk.CollectionIndex = collectionIndex + } +} + +// WithStartState is an option that sets the `StartState` of the chunk. +func (f chunkFactory) WithStartState(startState flow.StateCommitment) ChunkOption { + return func(g *ChunkGenerator, chunk *flow.Chunk) { + chunk.StartState = startState + } +} + +// WithEndState is an option that sets the `EndState` of the chunk. +func (f chunkFactory) WithEndState(endState flow.StateCommitment) ChunkOption { + return func(g *ChunkGenerator, chunk *flow.Chunk) { + chunk.EndState = endState + } +} + +// WithEventCollection is an option that sets the `EventCollection` of the chunk. +func (f chunkFactory) WithEventCollection(eventCollection flow.Identifier) ChunkOption { + return func(g *ChunkGenerator, chunk *flow.Chunk) { + chunk.EventCollection = eventCollection + } +} + +// WithNumberOfTransactions is an option that sets the `NumberOfTransactions` of the chunk. +func (f chunkFactory) WithNumberOfTransactions(numTxs uint64) ChunkOption { + return func(g *ChunkGenerator, chunk *flow.Chunk) { + chunk.NumberOfTransactions = numTxs + } +} + +// WithTotalComputationUsed is an option that sets the `TotalComputationUsed` of the chunk. +func (f chunkFactory) WithTotalComputationUsed(computation uint64) ChunkOption { + return func(g *ChunkGenerator, chunk *flow.Chunk) { + chunk.TotalComputationUsed = computation + } +} + +// WithServiceEventCount is an option that sets the `ServiceEventCount` of the chunk. +func (f chunkFactory) WithServiceEventCount(count uint16) ChunkOption { + return func(g *ChunkGenerator, chunk *flow.Chunk) { + chunk.ServiceEventCount = count + } +} + +// ChunkGenerator generates chunks with consistent randomness. +type ChunkGenerator struct { + chunkFactory + + random *RandomGenerator + identifiers *IdentifierGenerator + stateCommitments *StateCommitmentGenerator +} + +// NewChunkGenerator creates a new ChunkGenerator. +func NewChunkGenerator( + random *RandomGenerator, + identifiers *IdentifierGenerator, + stateCommitments *StateCommitmentGenerator, +) *ChunkGenerator { + return &ChunkGenerator{ + random: random, + identifiers: identifiers, + stateCommitments: stateCommitments, + } +} + +// Fixture generates a [flow.Chunk] with random data based on the provided options. +func (g *ChunkGenerator) Fixture(opts ...ChunkOption) *flow.Chunk { + chunk := &flow.Chunk{ + ChunkBody: flow.ChunkBody{ + CollectionIndex: g.random.Uintn(10), // TODO: should CollectionIndex == Index? + StartState: g.stateCommitments.Fixture(), + EventCollection: g.identifiers.Fixture(), + ServiceEventCount: g.random.Uint16n(10), + BlockID: g.identifiers.Fixture(), + TotalComputationUsed: g.random.Uint64InRange(1, 9999), + NumberOfTransactions: g.random.Uint64InRange(1, 100), + }, + Index: g.random.Uint64(), + EndState: g.stateCommitments.Fixture(), + } + + for _, opt := range opts { + opt(g, chunk) + } + + return chunk +} + +// List generates a list of [flow.Chunk]. +func (g *ChunkGenerator) List(n int, opts ...ChunkOption) []*flow.Chunk { + chunks := make([]*flow.Chunk, n) + startState := g.stateCommitments.Fixture() + for i := range n { + endState := g.stateCommitments.Fixture() + chunks[i] = g.Fixture(append(opts, + Chunk.WithIndex(uint64(i)), + Chunk.WithCollectionIndex(uint(i)), + Chunk.WithStartState(startState), + Chunk.WithEndState(endState), + )...) + startState = endState + } + return chunks +} diff --git a/utils/unittest/fixtures/chunk_execution_data.go b/utils/unittest/fixtures/chunk_execution_data.go new file mode 100644 index 00000000000..e628a17a482 --- /dev/null +++ b/utils/unittest/fixtures/chunk_execution_data.go @@ -0,0 +1,144 @@ +package fixtures + +import ( + "bytes" + + "github.com/onflow/flow-go/ledger" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module/executiondatasync/execution_data" +) + +// ChunkExecutionData is the default options factory for [execution_data.ChunkExecutionData] generation. +var ChunkExecutionData chunkExecutionDataFactory + +type chunkExecutionDataFactory struct{} + +type ChunkExecutionDataOption func(*ChunkExecutionDataGenerator, *execution_data.ChunkExecutionData) + +// WithCollection is an option that sets the collection for the chunk execution data. +func (f chunkExecutionDataFactory) WithCollection(collection *flow.Collection) ChunkExecutionDataOption { + return func(g *ChunkExecutionDataGenerator, ced *execution_data.ChunkExecutionData) { + ced.Collection = collection + } +} + +// WithEvents is an option that sets the events for the chunk execution data. +func (f chunkExecutionDataFactory) WithEvents(events flow.EventsList) ChunkExecutionDataOption { + return func(g *ChunkExecutionDataGenerator, ced *execution_data.ChunkExecutionData) { + ced.Events = events + } +} + +// WithTrieUpdate is an option that sets the trie update for the chunk execution data. +func (f chunkExecutionDataFactory) WithTrieUpdate(trieUpdate *ledger.TrieUpdate) ChunkExecutionDataOption { + return func(g *ChunkExecutionDataGenerator, ced *execution_data.ChunkExecutionData) { + ced.TrieUpdate = trieUpdate + } +} + +// WithTransactionResults is an option that sets the transaction results for the chunk execution data. +func (f chunkExecutionDataFactory) WithTransactionResults(results ...flow.LightTransactionResult) ChunkExecutionDataOption { + return func(g *ChunkExecutionDataGenerator, ced *execution_data.ChunkExecutionData) { + ced.TransactionResults = results + } +} + +// WithMinSize is an option that sets the minimum size for the chunk execution data. +func (f chunkExecutionDataFactory) WithMinSize(minSize int) ChunkExecutionDataOption { + return func(g *ChunkExecutionDataGenerator, ced *execution_data.ChunkExecutionData) { + if minSize > 0 && ced.TrieUpdate != nil { + g.ensureMinSize(ced, minSize) + } + } +} + +// ChunkExecutionDataGenerator generates chunk execution data with consistent randomness. +type ChunkExecutionDataGenerator struct { + chunkExecutionDataFactory + + random *RandomGenerator + collections *CollectionGenerator + lightTxResults *LightTransactionResultGenerator + events *EventGenerator + trieUpdates *TrieUpdateGenerator +} + +func NewChunkExecutionDataGenerator( + random *RandomGenerator, + collections *CollectionGenerator, + lightTxResults *LightTransactionResultGenerator, + events *EventGenerator, + trieUpdates *TrieUpdateGenerator, +) *ChunkExecutionDataGenerator { + return &ChunkExecutionDataGenerator{ + random: random, + collections: collections, + lightTxResults: lightTxResults, + events: events, + trieUpdates: trieUpdates, + } +} + +// Fixture generates a [execution_data.ChunkExecutionData] with random data based on the provided options. +func (g *ChunkExecutionDataGenerator) Fixture(opts ...ChunkExecutionDataOption) *execution_data.ChunkExecutionData { + ced := &execution_data.ChunkExecutionData{ + Collection: g.collections.Fixture(Collection.WithTxCount(5)), + TrieUpdate: g.trieUpdates.Fixture(), + } + + for _, opt := range opts { + opt(g, ced) + } + + if len(ced.TransactionResults) == 0 { + ced.TransactionResults = make([]flow.LightTransactionResult, len(ced.Collection.Transactions)) + for i, tx := range ced.Collection.Transactions { + ced.TransactionResults[i] = g.lightTxResults.Fixture(LightTransactionResult.WithTransactionID(tx.ID())) + } + } + + if len(ced.Events) == 0 { + for txIndex, result := range ced.TransactionResults { + events := g.events.List(5, + Event.WithTransactionID(result.TransactionID), + Event.WithTransactionIndex(uint32(txIndex)), + ) + ced.Events = append(ced.Events, events...) + } + ced.Events = AdjustEventsMetadata(ced.Events) + } + + return ced +} + +// List generates a list of [execution_data.ChunkExecutionData]. +func (g *ChunkExecutionDataGenerator) List(n int, opts ...ChunkExecutionDataOption) []*execution_data.ChunkExecutionData { + list := make([]*execution_data.ChunkExecutionData, n) + for i := range n { + list[i] = g.Fixture(opts...) + } + return list +} + +// Helper methods for generating random values + +func (g *ChunkExecutionDataGenerator) ensureMinSize(ced *execution_data.ChunkExecutionData, minSize int) { + size := 1 + for { + buf := &bytes.Buffer{} + err := execution_data.DefaultSerializer.Serialize(buf, ced) + NoError(err) + + if buf.Len() >= minSize { + return + } + + k, err := ced.TrieUpdate.Payloads[0].Key() + NoError(err) + + v := g.random.RandomBytes(size) + + ced.TrieUpdate.Payloads[0] = ledger.NewPayload(k, v) + size *= 2 + } +} diff --git a/utils/unittest/fixtures/collection.go b/utils/unittest/fixtures/collection.go new file mode 100644 index 00000000000..388dc4b9b98 --- /dev/null +++ b/utils/unittest/fixtures/collection.go @@ -0,0 +1,65 @@ +package fixtures + +import ( + "github.com/onflow/flow-go/model/flow" +) + +// Collection is the default options factory for [flow.Collection] generation. +var Collection collectionFactory + +type collectionFactory struct{} + +type CollectionOption func(*CollectionGenerator, *flow.Collection) + +// WithTxCount is an option that sets the number of transactions in the collection. +func (f collectionFactory) WithTxCount(count int) CollectionOption { + return func(g *CollectionGenerator, collection *flow.Collection) { + collection.Transactions = g.transactions.List(count) + } +} + +// WithTransactions is an option that sets the transactions for the collection. +func (f collectionFactory) WithTransactions(transactions ...*flow.TransactionBody) CollectionOption { + return func(g *CollectionGenerator, collection *flow.Collection) { + collection.Transactions = transactions + } +} + +// CollectionGenerator generates collections with consistent randomness. +type CollectionGenerator struct { + collectionFactory + + transactions *TransactionGenerator +} + +func NewCollectionGenerator( + transactions *TransactionGenerator, +) *CollectionGenerator { + return &CollectionGenerator{ + transactions: transactions, + } +} + +// Fixture generates a [flow.Collection] with random data based on the provided options. +func (g *CollectionGenerator) Fixture(opts ...CollectionOption) *flow.Collection { + collection := &flow.Collection{} + + for _, opt := range opts { + opt(g, collection) + } + + if len(collection.Transactions) == 0 { + collection.Transactions = g.transactions.List(1) + } + + return collection +} + +// List generates a list of [flow.Collection]. +func (g *CollectionGenerator) List(n int, opts ...CollectionOption) []*flow.Collection { + list := make([]*flow.Collection, n) + for i := range n { + list[i] = g.Fixture(opts...) + } + return list +} diff --git a/utils/unittest/fixtures/collection_guarantee.go b/utils/unittest/fixtures/collection_guarantee.go new file mode 100644 index 00000000000..091d5fd85b9 --- /dev/null +++ b/utils/unittest/fixtures/collection_guarantee.go @@ -0,0 +1,104 @@ +package fixtures + +import ( + "github.com/onflow/crypto" + + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/state/cluster" +) + +// CollectionGuarantee is the default options factory for [flow.CollectionGuarantee] generation. +var Guarantee collectionGuaranteeFactory + +type collectionGuaranteeFactory struct{} + +type CollectionGuaranteeOption func(*CollectionGuaranteeGenerator, *flow.CollectionGuarantee) + +// WithCollectionID is an option that sets the `CollectionID` of the collection guarantee. +func (f collectionGuaranteeFactory) WithCollectionID(collectionID flow.Identifier) CollectionGuaranteeOption { + return func(g *CollectionGuaranteeGenerator, guarantee *flow.CollectionGuarantee) { + guarantee.CollectionID = collectionID + } +} + +// WithReferenceBlockID is an option that sets the `ReferenceBlockID` of the collection guarantee. +func (f collectionGuaranteeFactory) WithReferenceBlockID(blockID flow.Identifier) CollectionGuaranteeOption { + return func(g *CollectionGuaranteeGenerator, guarantee *flow.CollectionGuarantee) { + guarantee.ReferenceBlockID = blockID + } +} + +// WithClusterChainID is an option that sets the `ClusterChainID` of the collection guarantee. +func (f collectionGuaranteeFactory) WithClusterChainID(clusterChainID flow.ChainID) CollectionGuaranteeOption { + return func(g *CollectionGuaranteeGenerator, guarantee *flow.CollectionGuarantee) { + guarantee.ClusterChainID = clusterChainID + } +} + +// WithSignerIndices is an option that sets the `SignerIndices` of the collection guarantee. +func (f collectionGuaranteeFactory) WithSignerIndices(signerIndices []byte) CollectionGuaranteeOption { + return func(g *CollectionGuaranteeGenerator, guarantee *flow.CollectionGuarantee) { + guarantee.SignerIndices = signerIndices + } +} + +// WithSignature is an option that sets the `Signature` of the collection guarantee. +func (f collectionGuaranteeFactory) WithSignature(signature crypto.Signature) CollectionGuaranteeOption { + return func(g *CollectionGuaranteeGenerator, guarantee *flow.CollectionGuarantee) { + guarantee.Signature = signature + } +} + +// CollectionGuaranteeGenerator generates collection guarantees with consistent randomness. +type CollectionGuaranteeGenerator struct { + collectionGuaranteeFactory + + random *RandomGenerator + identifiers *IdentifierGenerator + signatures *SignatureGenerator + signerIndices *SignerIndicesGenerator + + chainID flow.ChainID +} + +func NewCollectionGuaranteeGenerator( + random *RandomGenerator, + identifiers *IdentifierGenerator, + signatures *SignatureGenerator, + signerIndices *SignerIndicesGenerator, + chainID flow.ChainID, +) *CollectionGuaranteeGenerator { + return &CollectionGuaranteeGenerator{ + random: random, + identifiers: identifiers, + signatures: signatures, + signerIndices: signerIndices, + chainID: chainID, + } +} + +// Fixture generates a [flow.CollectionGuarantee] with random data based on the provided options. +func (g *CollectionGuaranteeGenerator) Fixture(opts ...CollectionGuaranteeOption) *flow.CollectionGuarantee { + guarantee := &flow.CollectionGuarantee{ + CollectionID: g.identifiers.Fixture(), + ReferenceBlockID: g.identifiers.Fixture(), + ClusterChainID: cluster.CanonicalClusterID(g.random.Uint64(), g.identifiers.List(1)), + SignerIndices: g.signerIndices.Fixture(), + Signature: g.signatures.Fixture(), + } + + for _, opt := range opts { + opt(g, guarantee) + } + + return guarantee +} + +// List generates a list of [flow.CollectionGuarantee] with random data. +func (g *CollectionGuaranteeGenerator) List(n int, opts ...CollectionGuaranteeOption) []*flow.CollectionGuarantee { + guarantees := make([]*flow.CollectionGuarantee, 0, n) + for i := 0; i < n; i++ { + guarantees = append(guarantees, g.Fixture(opts...)) + } + return guarantees +} diff --git a/utils/unittest/fixtures/crypto.go b/utils/unittest/fixtures/crypto.go new file mode 100644 index 00000000000..7c05d5141ca --- /dev/null +++ b/utils/unittest/fixtures/crypto.go @@ -0,0 +1,70 @@ +package fixtures + +import "github.com/onflow/crypto" + +var PrivateKey privateKeyFactory + +type privateKeyFactory struct{} + +type PrivateKeyOption func(*CryptoGenerator, *privateKeyConfig) + +type privateKeyConfig struct { + seed []byte +} + +// WithSeed is an option that sets the seed of the private key. +func (f privateKeyFactory) WithSeed(seed []byte) PrivateKeyOption { + return func(g *CryptoGenerator, config *privateKeyConfig) { + config.seed = seed + } +} + +type CryptoGenerator struct { + privateKeyFactory + + random *RandomGenerator +} + +func NewCryptoGenerator(random *RandomGenerator) *CryptoGenerator { + return &CryptoGenerator{ + random: random, + } +} + +// PrivateKey generates a [crypto.PrivateKey]. +func (g *CryptoGenerator) PrivateKey(algo crypto.SigningAlgorithm, opts ...PrivateKeyOption) crypto.PrivateKey { + config := &privateKeyConfig{} + + for _, opt := range opts { + opt(g, config) + } + + if len(config.seed) == 0 { + config.seed = g.random.RandomBytes(crypto.KeyGenSeedMinLen) + } + + Assertf(len(config.seed) == crypto.KeyGenSeedMinLen, "seed must be %d bytes, got %d", crypto.KeyGenSeedMinLen, len(config.seed)) + + pk, err := crypto.GeneratePrivateKey(algo, config.seed) + NoError(err) + return pk +} + +// StakingPrivateKey generates a staking [crypto.PrivateKey] using the crypto.BLSBLS12381 algorithm. +func (g *CryptoGenerator) StakingPrivateKey(opts ...PrivateKeyOption) crypto.PrivateKey { + return g.PrivateKey(crypto.BLSBLS12381, opts...) +} + +// NetworkingPrivateKey generates a networking [crypto.PrivateKey] using the crypto.ECDSAP256 algorithm. +func (g *CryptoGenerator) NetworkingPrivateKey(opts ...PrivateKeyOption) crypto.PrivateKey { + return g.PrivateKey(crypto.ECDSAP256, opts...) +} + +// PublicKeys generates a list of [crypto.PublicKey]. +func (g *CryptoGenerator) PublicKeys(n int, algo crypto.SigningAlgorithm, opts ...PrivateKeyOption) []crypto.PublicKey { + keys := make([]crypto.PublicKey, n) + for i := range n { + keys[i] = g.PrivateKey(algo, opts...).PublicKey() + } + return keys +} diff --git a/utils/unittest/fixtures/event.go b/utils/unittest/fixtures/event.go new file mode 100644 index 00000000000..37071abf85c --- /dev/null +++ b/utils/unittest/fixtures/event.go @@ -0,0 +1,325 @@ +package fixtures + +import ( + "fmt" + + "github.com/onflow/cadence" + "github.com/onflow/cadence/common" + "github.com/onflow/cadence/encoding/ccf" + jsoncdc "github.com/onflow/cadence/encoding/json" + "github.com/onflow/cadence/stdlib" + "github.com/onflow/flow/protobuf/go/flow/entities" + + "github.com/onflow/flow-go/model/events" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/utils/unittest" +) + +// Event is the default options factory for [flow.Event] generation. +var Event eventFactory + +type eventFactory struct{} + +type EventOption func(*EventGenerator, *eventConfig) + +// eventConfig holds the configuration for event generation. +type eventConfig struct { + event flow.Event + encoding entities.EventEncodingVersion +} + +// WithEventType is an option that sets the event type for the event. +func (f eventFactory) WithEventType(eventType flow.EventType) EventOption { + return func(g *EventGenerator, config *eventConfig) { + config.event.Type = eventType + } +} + +// WithTransactionID is an option that sets the transaction ID for the event. +func (f eventFactory) WithTransactionID(transactionID flow.Identifier) EventOption { + return func(g *EventGenerator, config *eventConfig) { + config.event.TransactionID = transactionID + } +} + +// WithTransactionIndex is an option that sets the transaction index for the event. +func (f eventFactory) WithTransactionIndex(transactionIndex uint32) EventOption { + return func(g *EventGenerator, config *eventConfig) { + config.event.TransactionIndex = transactionIndex + } +} + +// WithEventIndex is an option that sets the event index for the event. +func (f eventFactory) WithEventIndex(eventIndex uint32) EventOption { + return func(g *EventGenerator, config *eventConfig) { + config.event.EventIndex = eventIndex + } +} + +// WithPayload is an option that sets the payload for the event. +// Note: if payload is provided, it must already be in the desired encoding. +func (f eventFactory) WithPayload(payload []byte) EventOption { + return func(g *EventGenerator, config *eventConfig) { + config.event.Payload = payload + } +} + +// WithEncoding is an option that sets the encoding for the event payload. +func (f eventFactory) WithEncoding(encoding entities.EventEncodingVersion) EventOption { + return func(g *EventGenerator, config *eventConfig) { + config.encoding = encoding + } +} + +// EventGenerator generates events with consistent randomness. +type EventGenerator struct { + eventFactory + + random *RandomGenerator + identifiers *IdentifierGenerator + eventTypes *EventTypeGenerator + addresses *AddressGenerator +} + +func NewEventGenerator( + random *RandomGenerator, + identifiers *IdentifierGenerator, + eventTypes *EventTypeGenerator, + addresses *AddressGenerator, +) *EventGenerator { + return &EventGenerator{ + random: random, + identifiers: identifiers, + eventTypes: eventTypes, + addresses: addresses, + } +} + +// Fixture generates a [flow.Event] with random data based on the provided options. +func (g *EventGenerator) Fixture(opts ...EventOption) flow.Event { + config := &eventConfig{ + event: flow.Event{ + Type: g.eventTypes.Fixture(), + TransactionID: g.identifiers.Fixture(), + TransactionIndex: 0, + EventIndex: 0, + Payload: nil, // Will be generated based on encoding + }, + encoding: entities.EventEncodingVersion_CCF_V0, // Default to CCF + } + + for _, opt := range opts { + opt(g, config) + } + + // Generate payload if not provided + if config.event.Payload == nil { + config.event.Payload = g.generateEncodedPayload(config.event.Type, config.encoding) + } + + return config.event +} + +// List generates a list of [flow.Event]. +func (g *EventGenerator) List(n int, opts ...EventOption) []flow.Event { + list := make([]flow.Event, n) + for i := range n { + // For lists, we want sequential indices + list[i] = g.Fixture(append(opts, + Event.WithTransactionIndex(uint32(i)), + Event.WithEventIndex(uint32(i)), + )...) + } + // ensure event/transaction indexes are sequential + list = AdjustEventsMetadata(list) + return list +} + +// ForTransaction generates a list of [flow.Event] for a specific transaction. +func (g *EventGenerator) ForTransaction(transactionID flow.Identifier, transactionIndex uint32, eventCount int, opts ...EventOption) []flow.Event { + events := make([]flow.Event, eventCount) + for i := range eventCount { + eventOpts := append(opts, + Event.WithTransactionID(transactionID), + Event.WithTransactionIndex(transactionIndex), + Event.WithEventIndex(uint32(i)), + ) + events[i] = g.Fixture(eventOpts...) + } + return events +} + +// ForTransactions generates a list of [flow.Event] for multiple transactions. +func (g *EventGenerator) ForTransactions(transactionIDs []flow.Identifier, eventsPerTransaction int, opts ...EventOption) []flow.Event { + var allEvents []flow.Event + for i, txID := range transactionIDs { + txEvents := g.ForTransaction(txID, uint32(i), eventsPerTransaction, opts...) + allEvents = append(allEvents, txEvents...) + } + return allEvents +} + +// Helper methods for generating random values + +// generateEncodedPayload generates a properly encoded event payload based on the specified encoding. +func (g *EventGenerator) generateEncodedPayload(eventType flow.EventType, encoding entities.EventEncodingVersion) []byte { + testEvent := g.generateCadenceEvent(eventType) + + switch encoding { + case entities.EventEncodingVersion_CCF_V0: + payload, err := ccf.Encode(testEvent) + NoError(err) + return payload + + case entities.EventEncodingVersion_JSON_CDC_V0: + payload, err := jsoncdc.Encode(testEvent) + NoError(err) + return payload + + default: + // Fallback to random bytes for unknown encoding + return g.random.RandomBytes(10) + } +} + +// generateCadenceEvent generates a cadence event fixture from a flow event type. +func (g *EventGenerator) generateCadenceEvent(eventType flow.EventType) cadence.Event { + parsed, err := events.ParseEvent(eventType) + NoError(err) + + var fields []cadence.Field + var values []cadence.Value + var cadenceEventType *cadence.EventType + + if parsed.Type == events.ProtocolEventType { + fields, values = g.generateProtocolEventData(parsed.Name) + cadenceEventType = cadence.NewEventType( + stdlib.FlowLocation{}, + parsed.Name, + fields, + nil, + ) + } else { + address, err := common.BytesToAddress(flow.HexToAddress(parsed.Address).Bytes()) + NoError(err) + + fields, values = g.generateGenericEventData() + cadenceEventType = cadence.NewEventType( + common.NewAddressLocation(nil, address, parsed.ContractName), + fmt.Sprintf("%s.%s", parsed.ContractName, parsed.Name), + fields, + nil, + ) + } + + return cadence.NewEvent(values).WithType(cadenceEventType) +} + +// generateGenericEventData generates generic event data for a cadence event. +func (g *EventGenerator) generateGenericEventData() (fields []cadence.Field, values []cadence.Value) { + testString, err := cadence.NewString(g.random.RandomString(10)) + NoError(err) + + fields = []cadence.Field{ + { + Identifier: "value", + Type: cadence.IntType, + }, + { + Identifier: "message", + Type: cadence.StringType, + }, + } + values = []cadence.Value{ + cadence.NewInt(g.random.IntInRange(1, 100)), + testString, + } + + return fields, values +} + +// generateProtocolEventData generates protocol event data for a cadence event. +func (g *EventGenerator) generateProtocolEventData(eventName string) ([]cadence.Field, []cadence.Value) { + switch eventName { + case "AccountCreated": + address, err := common.BytesToAddress(g.addresses.Fixture().Bytes()) + NoError(err) + + fields := []cadence.Field{ + { + Identifier: "address", + Type: cadence.AddressType, + }, + } + values := []cadence.Value{ + cadence.NewAddress(address), + } + + return fields, values + + case "AccountContractAdded": + address, err := common.BytesToAddress(g.addresses.Fixture().Bytes()) + NoError(err) + + contractName, err := cadence.NewString("EventContract") + NoError(err) + + codeHash := unittest.BytesToCdcUInt8(g.random.RandomBytes(32)) + + fields := []cadence.Field{ + { + Identifier: "address", + Type: cadence.AddressType, + }, + { + Identifier: "codeHash", + Type: cadence.NewConstantSizedArrayType(32, cadence.UInt8Type), + }, + { + Identifier: "contract", + Type: cadence.StringType, + }, + } + values := []cadence.Value{ + cadence.NewAddress(address), + cadence.NewArray(codeHash).WithType(cadence.NewConstantSizedArrayType(32, cadence.UInt8Type)), + contractName, + } + + return fields, values + + default: + // If you get this error, you will need to add support for the new event type using the correct fields and values. + panic(fmt.Sprintf("unsupported protocol event type: flow.%s", eventName)) + } +} + +// AdjustEventsMetadata adjusts the event and transaction indexes to be sequential. +// The following changes are made: +// - Transaction Index is updated to match the actual transactions +// - Event Index is updated to be sequential and reset for each transaction +func AdjustEventsMetadata(events []flow.Event) []flow.Event { + if len(events) == 0 { + return events + } + + lastTxID := events[0].TransactionID + txIndex := uint32(0) + eventIndex := uint32(0) + + output := make([]flow.Event, len(events)) + for i, event := range events { + if event.TransactionID != lastTxID { + lastTxID = event.TransactionID + txIndex++ + eventIndex = 0 + } + + event.EventIndex = eventIndex + event.TransactionIndex = txIndex + eventIndex++ + + output[i] = event + } + return output +} diff --git a/utils/unittest/fixtures/event_test.go b/utils/unittest/fixtures/event_test.go new file mode 100644 index 00000000000..05e6810b912 --- /dev/null +++ b/utils/unittest/fixtures/event_test.go @@ -0,0 +1,145 @@ +package fixtures + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/model/flow" +) + +func TestAdjustEventsMetadata(t *testing.T) { + t.Parallel() + + t.Run("empty events list", func(t *testing.T) { + result := AdjustEventsMetadata([]flow.Event{}) + assert.Empty(t, result) + }) + + t.Run("single event", func(t *testing.T) { + suite := NewGeneratorSuite() + eventGen := suite.Events() + identifier := suite.Identifiers() + + txID := identifier.Fixture() + event := eventGen.Fixture( + Event.WithTransactionID(txID), + Event.WithEventIndex(999), + Event.WithTransactionIndex(999), + ) + + result := AdjustEventsMetadata([]flow.Event{event}) + require.Len(t, result, 1) + + assert.Equal(t, uint32(0), result[0].EventIndex) + assert.Equal(t, uint32(0), result[0].TransactionIndex) + + // unchanged + assert.Equal(t, txID, result[0].TransactionID) + assert.Equal(t, event.Type, result[0].Type) + assert.Equal(t, event.Payload, result[0].Payload) + }) + + t.Run("multiple events from same transaction", func(t *testing.T) { + suite := NewGeneratorSuite() + eventGen := suite.Events() + identifier := suite.Identifiers() + + txID := identifier.Fixture() + events := []flow.Event{ + eventGen.Fixture( + Event.WithTransactionID(txID), + Event.WithTransactionIndex(999), + Event.WithEventIndex(999), + ), + eventGen.Fixture( + Event.WithTransactionID(txID), + Event.WithTransactionIndex(888), + Event.WithEventIndex(888), + ), + eventGen.Fixture( + Event.WithTransactionID(txID), + Event.WithTransactionIndex(777), + Event.WithEventIndex(777), + ), + } + + result := AdjustEventsMetadata(events) + require.Len(t, result, 3) + + for i, event := range result { + assert.Equal(t, txID, event.TransactionID) + assert.Equal(t, uint32(0), event.TransactionIndex) + assert.Equal(t, uint32(i), event.EventIndex) + } + }) + + t.Run("multiple events from different transactions", func(t *testing.T) { + suite := NewGeneratorSuite() + eventGen := suite.Events() + identifier := suite.Identifiers() + random := suite.Random() + + txID0 := identifier.Fixture() + txID1 := identifier.Fixture() + txID2 := identifier.Fixture() + + type eventConfig struct { + transactionID flow.Identifier + transactionIndex uint32 + eventIndex uint32 + } + + expected := []eventConfig{ + { + transactionID: txID0, + transactionIndex: 0, + eventIndex: 0, + }, + { + transactionID: txID0, + transactionIndex: 0, + eventIndex: 1, + }, + { + transactionID: txID1, + transactionIndex: 1, + eventIndex: 0, + }, + { + transactionID: txID2, + transactionIndex: 2, + eventIndex: 0, + }, + { + transactionID: txID2, + transactionIndex: 2, + eventIndex: 1, + }, + { + transactionID: txID2, + transactionIndex: 2, + eventIndex: 2, + }, + } + + events := make([]flow.Event, len(expected)) + for i, event := range expected { + events[i] = eventGen.Fixture( + Event.WithTransactionID(event.transactionID), + Event.WithTransactionIndex(random.Uint32()), + Event.WithEventIndex(random.Uint32()), + ) + } + + result := AdjustEventsMetadata(events) + require.Len(t, result, len(expected)) + + for i, event := range result { + assert.Equal(t, expected[i].transactionID, event.TransactionID) + assert.Equal(t, expected[i].transactionIndex, event.TransactionIndex) + assert.Equal(t, expected[i].eventIndex, event.EventIndex) + } + }) +} diff --git a/utils/unittest/fixtures/event_type.go b/utils/unittest/fixtures/event_type.go new file mode 100644 index 00000000000..542f46e7e47 --- /dev/null +++ b/utils/unittest/fixtures/event_type.go @@ -0,0 +1,156 @@ +package fixtures + +import ( + "fmt" + + "github.com/onflow/cadence" + "github.com/onflow/cadence/common" + "github.com/onflow/cadence/stdlib" + + "github.com/onflow/flow-go/model/events" + "github.com/onflow/flow-go/model/flow" +) + +const ( + protocolEventName = "flow" +) + +var ( + sampleContractNames = []string{"TestContract", "MyContract", "EventContract", "SampleContract", "DemoContract"} + sampleEventNames = []string{"TestEvent", "MyEvent", "SampleEvent", "DemoEvent", "Created", "Updated", "Deleted"} +) + +// EventType is the default options factory for [flow.EventType] generation. +var EventType eventTypeFactory + +type eventTypeFactory struct{} + +type EventTypeOption func(*EventTypeGenerator, *eventTypeConfig) + +// EventTypeGenerator generates event types with consistent randomness. +type EventTypeGenerator struct { + eventTypeFactory + + random *RandomGenerator + addresses *AddressGenerator +} + +func NewEventTypeGenerator( + random *RandomGenerator, + addresses *AddressGenerator, +) *EventTypeGenerator { + return &EventTypeGenerator{ + random: random, + addresses: addresses, + } +} + +// eventTypeConfig holds the configuration for event type generation. +type eventTypeConfig struct { + address flow.Address + contractName string + eventName string +} + +// WithAddress is an option that sets the address for the event type. +func (f eventTypeFactory) WithAddress(address flow.Address) EventTypeOption { + return func(g *EventTypeGenerator, config *eventTypeConfig) { + config.address = address + } +} + +// WithContractName is an option that sets the contract name for the event type. +func (f eventTypeFactory) WithContractName(contractName string) EventTypeOption { + return func(g *EventTypeGenerator, config *eventTypeConfig) { + config.contractName = contractName + } +} + +// WithEventName is an option that sets the event name for the event type. +func (f eventTypeFactory) WithEventName(eventName string) EventTypeOption { + return func(g *EventTypeGenerator, config *eventTypeConfig) { + config.eventName = eventName + } +} + +// Fixture generates a [flow.EventType] with random data based on the provided options. +func (g *EventTypeGenerator) Fixture(opts ...EventTypeOption) flow.EventType { + config := &eventTypeConfig{ + address: g.addresses.Fixture(), + contractName: g.generateContractName(), + eventName: g.generateEventName(), + } + + for _, opt := range opts { + opt(g, config) + } + + if config.contractName == protocolEventName { + return flow.EventType(fmt.Sprintf("%s.%s", protocolEventName, config.eventName)) + } + + return flow.EventType(fmt.Sprintf("A.%s.%s.%s", config.address, config.contractName, config.eventName)) +} + +// List generates a list of [flow.EventType]. +func (g *EventTypeGenerator) List(n int, opts ...EventTypeOption) []flow.EventType { + types := make([]flow.EventType, n) + for i := range n { + types[i] = g.Fixture(opts...) + } + return types +} + +// generateContractName generates a random contract name. +func (g *EventTypeGenerator) generateContractName() string { + return RandomElement(g.random, sampleContractNames) +} + +// generateEventName generates a random event name. +func (g *EventTypeGenerator) generateEventName() string { + return RandomElement(g.random, sampleEventNames) +} + +// ToCadenceEventType converts a flow.EventType to a cadence.EventType. +func ToCadenceEventType(eventType flow.EventType) *cadence.EventType { + parsed, err := events.ParseEvent(eventType) + NoError(err) + + // TODO: add support for actual protocol event fields + if parsed.Type == events.ProtocolEventType { + return cadence.NewEventType( + stdlib.FlowLocation{}, + parsed.Name, + []cadence.Field{ + { + Identifier: "value", + Type: cadence.IntType, + }, + { + Identifier: "message", + Type: cadence.StringType, + }, + }, + nil, + ) + } + + address, err := common.BytesToAddress(flow.HexToAddress(parsed.Address).Bytes()) + NoError(err) + + return cadence.NewEventType( + common.NewAddressLocation(nil, address, parsed.ContractName), + fmt.Sprintf("%s.%s", parsed.ContractName, parsed.Name), + []cadence.Field{ + { + Identifier: "value", + Type: cadence.IntType, + }, + { + Identifier: "message", + Type: cadence.StringType, + }, + }, + nil, + ) +} diff --git a/utils/unittest/fixtures/execution_receipt.go b/utils/unittest/fixtures/execution_receipt.go new file mode 100644 index 00000000000..5cd7079323d --- /dev/null +++ b/utils/unittest/fixtures/execution_receipt.go @@ -0,0 +1,178 @@ +package fixtures + +import ( + "github.com/onflow/crypto" + + "github.com/onflow/flow-go/model/flow" +) + +// ExecutionReceipt is the default options factory for [flow.ExecutionReceipt] generation. +var ExecutionReceipt executionReceiptFactory + +type executionReceiptFactory struct{} + +type ExecutionReceiptOption func(*ExecutionReceiptGenerator, *flow.ExecutionReceipt) + +// WithExecutorID is an option that sets the `ExecutorID` of the execution receipt. +func (f executionReceiptFactory) WithExecutorID(executorID flow.Identifier) ExecutionReceiptOption { + return func(g *ExecutionReceiptGenerator, receipt *flow.ExecutionReceipt) { + receipt.ExecutorID = executorID + } +} + +// WithExecutionResult is an option that sets the `ExecutionResult` of the execution receipt. +func (f executionReceiptFactory) WithExecutionResult(result flow.ExecutionResult) ExecutionReceiptOption { + return func(g *ExecutionReceiptGenerator, receipt *flow.ExecutionReceipt) { + receipt.ExecutionResult = result + } +} + +// WithSpocks is an option that sets the `Spocks` of the execution receipt. +func (f executionReceiptFactory) WithSpocks(spocks ...crypto.Signature) ExecutionReceiptOption { + return func(g *ExecutionReceiptGenerator, receipt *flow.ExecutionReceipt) { + receipt.Spocks = spocks + } +} + +// WithExecutorSignature is an option that sets the `ExecutorSignature` of the execution receipt. +func (f executionReceiptFactory) WithExecutorSignature(executorSignature crypto.Signature) ExecutionReceiptOption { + return func(g *ExecutionReceiptGenerator, receipt *flow.ExecutionReceipt) { + receipt.ExecutorSignature = executorSignature + } +} + +// ExecutionReceiptGenerator generates execution receipts with consistent randomness. +type ExecutionReceiptGenerator struct { + executionReceiptFactory + + random *RandomGenerator + identifiers *IdentifierGenerator + executionResults *ExecutionResultGenerator + signatures *SignatureGenerator +} + +// NewExecutionReceiptGenerator creates a new ExecutionReceiptGenerator. +func NewExecutionReceiptGenerator( + random *RandomGenerator, + identifiers *IdentifierGenerator, + executionResults *ExecutionResultGenerator, + signatures *SignatureGenerator, +) *ExecutionReceiptGenerator { + return &ExecutionReceiptGenerator{ + random: random, + identifiers: identifiers, + executionResults: executionResults, + signatures: signatures, + } +} + +// Fixture generates a [flow.ExecutionReceipt] with random data based on the provided options. +func (g *ExecutionReceiptGenerator) Fixture(opts ...ExecutionReceiptOption) *flow.ExecutionReceipt { + receipt := &flow.ExecutionReceipt{ + UnsignedExecutionReceipt: flow.UnsignedExecutionReceipt{ + ExecutorID: g.identifiers.Fixture(), + ExecutionResult: *g.executionResults.Fixture(), + Spocks: g.signatures.List(g.random.IntInRange(1, 5)), + }, + ExecutorSignature: g.signatures.Fixture(), + } + + for _, opt := range opts { + opt(g, receipt) + } + + return receipt +} + +// List generates a list of [flow.ExecutionReceipt]. +func (g *ExecutionReceiptGenerator) List(n int, opts ...ExecutionReceiptOption) []*flow.ExecutionReceipt { + receipts := make([]*flow.ExecutionReceipt, n) + for i := range n { + receipts[i] = g.Fixture(opts...) + } + return receipts +} + +// ExecutionReceiptStub is the default options factory for [flow.ExecutionReceiptStub] generation. +var ExecutionReceiptStub executionReceiptStubFactory + +type executionReceiptStubFactory struct{} + +type ExecutionReceiptStubOption func(*ExecutionReceiptStubGenerator, *flow.ExecutionReceiptStub) + +// WithExecutorID is an option that sets the `ExecutorID` of the execution receipt stub. +func (f executionReceiptStubFactory) WithExecutorID(executorID flow.Identifier) ExecutionReceiptStubOption { + return func(g *ExecutionReceiptStubGenerator, stub *flow.ExecutionReceiptStub) { + stub.ExecutorID = executorID + } +} + +// WithResultID is an option that sets the `ResultID` of the execution receipt stub. +func (f executionReceiptStubFactory) WithResultID(resultID flow.Identifier) ExecutionReceiptStubOption { + return func(g *ExecutionReceiptStubGenerator, stub *flow.ExecutionReceiptStub) { + stub.ResultID = resultID + } +} + +// WithSpocks is an option that sets the `Spocks` of the execution receipt stub. +func (f executionReceiptStubFactory) WithSpocks(spocks ...crypto.Signature) ExecutionReceiptStubOption { + return func(g *ExecutionReceiptStubGenerator, stub *flow.ExecutionReceiptStub) { + stub.Spocks = spocks + } +} + +// WithExecutorSignature is an option that sets the `ExecutorSignature` of the execution receipt stub. +func (f executionReceiptStubFactory) WithExecutorSignature(executorSignature crypto.Signature) ExecutionReceiptStubOption { + return func(g *ExecutionReceiptStubGenerator, stub *flow.ExecutionReceiptStub) { + stub.ExecutorSignature = executorSignature + } +} + +// ExecutionReceiptStubGenerator generates execution receipt stubs with consistent randomness. +type ExecutionReceiptStubGenerator struct { + executionReceiptStubFactory + + random *RandomGenerator + identifiers *IdentifierGenerator + signatures *SignatureGenerator +} + +// NewExecutionReceiptStubGenerator creates a new ExecutionReceiptStubGenerator. +func NewExecutionReceiptStubGenerator( + random *RandomGenerator, + identifiers *IdentifierGenerator, + signatures *SignatureGenerator, +) *ExecutionReceiptStubGenerator { + return &ExecutionReceiptStubGenerator{ + random: random, + identifiers: identifiers, + signatures: signatures, + } +} + +// Fixture generates a [flow.ExecutionReceiptStub] with random data based on the provided options. +func (g *ExecutionReceiptStubGenerator) Fixture(opts ...ExecutionReceiptStubOption) *flow.ExecutionReceiptStub { + stub := &flow.ExecutionReceiptStub{ + UnsignedExecutionReceiptStub: flow.UnsignedExecutionReceiptStub{ + ExecutorID: g.identifiers.Fixture(), + ResultID: g.identifiers.Fixture(), + Spocks: g.signatures.List(g.random.IntInRange(1, 5)), + }, + ExecutorSignature: g.signatures.Fixture(), + } + + for _, opt := range opts { + opt(g, stub) + } + + return stub +} + +// List generates a list of [flow.ExecutionReceiptStub]. +func (g *ExecutionReceiptStubGenerator) List(n int, opts ...ExecutionReceiptStubOption) []*flow.ExecutionReceiptStub { + stubs := make([]*flow.ExecutionReceiptStub, n) + for i := range n { + stubs[i] = g.Fixture(opts...) + } + return stubs +} diff --git a/utils/unittest/fixtures/execution_result.go b/utils/unittest/fixtures/execution_result.go new file mode 100644 index 00000000000..a63f4327373 --- /dev/null +++ b/utils/unittest/fixtures/execution_result.go @@ -0,0 +1,140 @@ +package fixtures + +import ( + "github.com/onflow/flow-go/model/flow" +) + +// ExecutionResult is the default options factory for [flow.ExecutionResult] generation. +var ExecutionResult executionResultFactory + +type executionResultFactory struct{} + +type ExecutionResultOption func(*ExecutionResultGenerator, *flow.ExecutionResult) + +// WithPreviousResultID is an option that sets the `PreviousResultID` of the execution result. +func (f executionResultFactory) WithPreviousResultID(previousResultID flow.Identifier) ExecutionResultOption { + return func(g *ExecutionResultGenerator, result *flow.ExecutionResult) { + result.PreviousResultID = previousResultID + } +} + +// WithBlockID is an option that sets the `BlockID` of the execution result. +func (f executionResultFactory) WithBlockID(blockID flow.Identifier) ExecutionResultOption { + return func(g *ExecutionResultGenerator, result *flow.ExecutionResult) { + result.BlockID = blockID + for _, chunk := range result.Chunks { + chunk.BlockID = blockID + } + } +} + +// WithChunks is an option that sets the `Chunks` of the execution result. +func (f executionResultFactory) WithChunks(chunks flow.ChunkList) ExecutionResultOption { + return func(g *ExecutionResultGenerator, result *flow.ExecutionResult) { + result.Chunks = chunks + } +} + +// WithServiceEvents is an option that sets the `ServiceEvents` of the execution result. +func (f executionResultFactory) WithServiceEvents(serviceEvents flow.ServiceEventList) ExecutionResultOption { + return func(g *ExecutionResultGenerator, result *flow.ExecutionResult) { + result.ServiceEvents = serviceEvents + } +} + +// WithExecutionDataID is an option that sets the `ExecutionDataID` of the execution result. +func (f executionResultFactory) WithExecutionDataID(executionDataID flow.Identifier) ExecutionResultOption { + return func(g *ExecutionResultGenerator, result *flow.ExecutionResult) { + result.ExecutionDataID = executionDataID + } +} + +// WithBlock is an option that sets the `BlockID` of the execution result and all configured `Chunks` +func (f executionResultFactory) WithBlock(block *flow.Block) ExecutionResultOption { + return func(g *ExecutionResultGenerator, result *flow.ExecutionResult) { + result.BlockID = block.ID() + for _, chunk := range result.Chunks { + chunk.BlockID = block.ID() + } + } +} + +// WithPreviousResult is an option that sets the `PreviousResultID` of the execution result, and +// adjusts the `StartState` of the first chunk to match the final state of the previous result. +func (f executionResultFactory) WithPreviousResult(previousResult *flow.ExecutionResult) ExecutionResultOption { + return func(g *ExecutionResultGenerator, result *flow.ExecutionResult) { + result.PreviousResultID = previousResult.ID() + finalState, err := previousResult.FinalStateCommitment() + NoError(err) + result.Chunks[0].StartState = finalState + } +} + +// WithFinalState is an option that sets the `EndState` of the last chunk. +func (f executionResultFactory) WithFinalState(finalState flow.StateCommitment) ExecutionResultOption { + return func(g *ExecutionResultGenerator, result *flow.ExecutionResult) { + result.Chunks[len(result.Chunks)-1].EndState = finalState + } +} + +// ExecutionResultGenerator generates execution results with consistent randomness. +type ExecutionResultGenerator struct { + executionResultFactory + + random *RandomGenerator + identifiers *IdentifierGenerator + chunks *ChunkGenerator + serviceEvents *ServiceEventGenerator + stateCommitments *StateCommitmentGenerator +} + +// NewExecutionResultGenerator creates a new ExecutionResultGenerator. +func NewExecutionResultGenerator( + random *RandomGenerator, + identifiers *IdentifierGenerator, + chunks *ChunkGenerator, + serviceEvents *ServiceEventGenerator, + stateCommitments *StateCommitmentGenerator, +) *ExecutionResultGenerator { + return &ExecutionResultGenerator{ + random: random, + identifiers: identifiers, + chunks: chunks, + serviceEvents: serviceEvents, + stateCommitments: stateCommitments, + } +} + +// Fixture generates a [flow.ExecutionResult] with random data based on the provided options. +func (g *ExecutionResultGenerator) Fixture(opts ...ExecutionResultOption) *flow.ExecutionResult { + blockID := g.identifiers.Fixture() + chunks := g.chunks.List(g.random.IntInRange(1, 4), Chunk.WithBlockID(blockID)) + serviceEventCount := 0 + for _, chunk := range chunks { + serviceEventCount += int(chunk.ServiceEventCount) + } + result := &flow.ExecutionResult{ + PreviousResultID: g.identifiers.Fixture(), + BlockID: blockID, + Chunks: chunks, + ServiceEvents: g.serviceEvents.List(serviceEventCount), + ExecutionDataID: g.identifiers.Fixture(), + } + + for _, opt := range opts { + opt(g, result) + } + + Assertf(len(result.Chunks) > 0, "there must be at least one chunk") + + return result +} + +// List generates a list of [flow.ExecutionResult]. +func (g *ExecutionResultGenerator) List(n int, opts ...ExecutionResultOption) []*flow.ExecutionResult { + list := make([]*flow.ExecutionResult, n) + for i := range n { + list[i] = g.Fixture(opts...) + } + return list +} diff --git a/utils/unittest/fixtures/generator.go b/utils/unittest/fixtures/generator.go new file mode 100644 index 00000000000..696de62cd7c --- /dev/null +++ b/utils/unittest/fixtures/generator.go @@ -0,0 +1,398 @@ +package fixtures + +import ( + "math/rand" + "time" + + "github.com/onflow/flow-go/model/flow" +) + +// GeneratorSuite provides a context-aware generator system for creating test fixtures. +// It manages a shared random number generator and provides access to specialized generators. +type GeneratorSuite struct { + rng *rand.Rand + + chainID flow.ChainID +} + +// GeneratorSuiteOption defines an option for configuring a GeneratorSuite. +type GeneratorSuiteOption func(*generatorSuiteConfig) + +type generatorSuiteConfig struct { + seed int64 + chainID flow.ChainID +} + +// WithSeed sets the random seed used for the random number generator used by all generators. +// Specifying a seed makes the fixture data deterministic. +func WithSeed(seed int64) GeneratorSuiteOption { + return func(config *generatorSuiteConfig) { + config.seed = seed + } +} + +// WithChainID sets the chain ID that's used as the default for all generators. +func WithChainID(chainID flow.ChainID) GeneratorSuiteOption { + return func(config *generatorSuiteConfig) { + config.chainID = chainID + } +} + +// NewGeneratorSuite creates a new generator suite with optional configuration. +// If no seed is specified, a random seed is generated. +// If no chain ID is specified, the default chain ID is [flow.Emulator]. +func NewGeneratorSuite(opts ...GeneratorSuiteOption) *GeneratorSuite { + config := &generatorSuiteConfig{ + chainID: flow.Emulator, + seed: time.Now().UnixNano(), // default random seed + } + + for _, opt := range opts { + opt(config) + } + + return &GeneratorSuite{ + chainID: config.chainID, + rng: rand.New(rand.NewSource(config.seed)), + } +} + +// ChainID returns the default chain ID used by all generators. +func (g *GeneratorSuite) ChainID() flow.ChainID { + return g.chainID +} + +// Random returns the shared random generator. +func (g *GeneratorSuite) Random() *RandomGenerator { + return NewRandomGenerator(g.rng) +} + +// Headers returns a generator for [flow.Header]. +func (g *GeneratorSuite) Headers() *HeaderGenerator { + return NewBlockHeaderGenerator( + g.Random(), + g.Identifiers(), + g.Signatures(), + g.SignerIndices(), + g.QuorumCertificates(), + g.Time(), + g.chainID, + ) +} + +// Blocks returns a generator for [flow.Block]. +func (g *GeneratorSuite) Blocks() *BlockGenerator { + return NewBlockGenerator( + g.Random(), + g.Identifiers(), + g.Headers(), + g.Payloads(), + g.chainID, + ) +} + +// Payloads returns a generator for [flow.Payload]. +func (g *GeneratorSuite) Payloads() *PayloadGenerator { + return NewPayloadGenerator( + g.Random(), + g.Identifiers(), + g.Guarantees(), + g.Seals(), + g.ExecutionReceiptStubs(), + g.ExecutionResults(), + ) +} + +// Seals returns a generator for [flow.Seal]. +func (g *GeneratorSuite) Seals() *SealGenerator { + return NewSealGenerator( + g.Random(), + g.Identifiers(), + g.StateCommitments(), + g.AggregatedSignatures(), + ) +} + +// Guarantees returns a generator for [flow.CollectionGuarantee]. +func (g *GeneratorSuite) Guarantees() *CollectionGuaranteeGenerator { + return NewCollectionGuaranteeGenerator( + g.Random(), + g.Identifiers(), + g.Signatures(), + g.SignerIndices(), + g.chainID, + ) +} + +// ExecutionReceiptStubs returns a generator for [flow.ExecutionReceiptStub]. +func (g *GeneratorSuite) ExecutionReceiptStubs() *ExecutionReceiptStubGenerator { + return NewExecutionReceiptStubGenerator( + g.Random(), + g.Identifiers(), + g.Signatures(), + ) +} + +// Identifiers returns a shared generator for [flow.Identifier]. +func (g *GeneratorSuite) Identifiers() *IdentifierGenerator { + return NewIdentifierGenerator(g.Random()) +} + +// Signatures returns a shared generator for [crypto.Signature]. +func (g *GeneratorSuite) Signatures() *SignatureGenerator { + return NewSignatureGenerator(g.Random()) +} + +// Addresses returns a shared generator for [flow.Address]. +func (g *GeneratorSuite) Addresses() *AddressGenerator { + return NewAddressGenerator(g.Random(), g.chainID) +} + +// SignerIndices returns a generator for [flow.SignerIndices]. +func (g *GeneratorSuite) SignerIndices() *SignerIndicesGenerator { + return NewSignerIndicesGenerator(g.Random()) +} + +// QuorumCertificates returns a generator for [flow.QuorumCertificate]. +func (g *GeneratorSuite) QuorumCertificates() *QuorumCertificateGenerator { + return NewQuorumCertificateGenerator( + g.Random(), + g.Identifiers(), + g.SignerIndices(), + g.Signatures(), + ) +} + +// QuorumCertificatesWithSignerIDs returns a generator for [flow.QuorumCertificateWithSignerIDs]. +func (g *GeneratorSuite) QuorumCertificatesWithSignerIDs() *QuorumCertificateWithSignerIDsGenerator { + return NewQuorumCertificateWithSignerIDsGenerator( + g.Random(), + g.Identifiers(), + g.QuorumCertificates(), + ) +} + +// Transactions returns a generator for [flow.TransactionBody]. +func (g *GeneratorSuite) Transactions() *TransactionGenerator { + return NewTransactionGenerator( + g.Identifiers(), + g.ProposalKeys(), + g.Addresses(), + g.TransactionSignatures(), + ) +} + +// Collections returns a generator for [flow.Collection]. +func (g *GeneratorSuite) Collections() *CollectionGenerator { + return NewCollectionGenerator(g.Transactions()) +} + +// TransactionResults returns a generator for [flow.TransactionResult]. +func (g *GeneratorSuite) TransactionResults() *TransactionResultGenerator { + return NewTransactionResultGenerator(g.Random(), g.Identifiers()) +} + +// LightTransactionResults returns a generator for [flow.LightTransactionResult]. +func (g *GeneratorSuite) LightTransactionResults() *LightTransactionResultGenerator { + return NewLightTransactionResultGenerator(g.TransactionResults()) +} + +// TransactionSignatures returns a generator for [flow.TransactionSignature]. +func (g *GeneratorSuite) TransactionSignatures() *TransactionSignatureGenerator { + return NewTransactionSignatureGenerator(g.Random(), g.Addresses()) +} + +// ProposalKeys returns a generator for [flow.ProposalKey]. +func (g *GeneratorSuite) ProposalKeys() *ProposalKeyGenerator { + return NewProposalKeyGenerator(g.Addresses()) +} + +// Events returns a generator for [flow.Event]. +func (g *GeneratorSuite) Events() *EventGenerator { + return NewEventGenerator( + g.Random(), + g.Identifiers(), + g.EventTypes(), + g.Addresses(), + ) +} + +// EventTypes returns a generator for [flow.EventType]. +func (g *GeneratorSuite) EventTypes() *EventTypeGenerator { + return NewEventTypeGenerator(g.Random(), g.Addresses()) +} + +// ChunkExecutionDatas returns a generator for [flow.ChunkExecutionData]. +func (g *GeneratorSuite) ChunkExecutionDatas() *ChunkExecutionDataGenerator { + return NewChunkExecutionDataGenerator( + g.Random(), + g.Collections(), + g.LightTransactionResults(), + g.Events(), + g.TrieUpdates(), + ) +} + +// BlockExecutionDatas returns a generator for [flow.BlockExecutionData]. +func (g *GeneratorSuite) BlockExecutionDatas() *BlockExecutionDataGenerator { + return NewBlockExecutionDataGenerator( + g.Random(), + g.Identifiers(), + g.ChunkExecutionDatas(), + ) +} + +// BlockExecutionDataEntities returns a generator for [flow.BlockExecutionDataEntity]. +func (g *GeneratorSuite) BlockExecutionDataEntities() *BlockExecutionDataEntityGenerator { + return NewBlockExecutionDataEntityGenerator(g.BlockExecutionDatas()) +} + +// TrieUpdates returns a generator for [ledger.TrieUpdate]. +func (g *GeneratorSuite) TrieUpdates() *TrieUpdateGenerator { + return NewTrieUpdateGenerator( + g.Random(), + g.LedgerPaths(), + g.LedgerPayloads(), + ) +} + +// LedgerPaths returns a generator for [ledger.Path]. +func (g *GeneratorSuite) LedgerPaths() *LedgerPathGenerator { + return NewLedgerPathGenerator(g.Random()) +} + +// LedgerPayloads returns a generator for [ledger.Payload]. +func (g *GeneratorSuite) LedgerPayloads() *LedgerPayloadGenerator { + return NewLedgerPayloadGenerator(g.Random(), g.Addresses(), g.LedgerValues()) +} + +// LedgerValues returns a generator for [ledger.Value]. +func (g *GeneratorSuite) LedgerValues() *LedgerValueGenerator { + return NewLedgerValueGenerator(g.Random()) +} + +// Time returns a generator for [time.Time]. +func (g *GeneratorSuite) Time() *TimeGenerator { + return NewTimeGenerator(g.Random()) +} + +func (g *GeneratorSuite) Identities() *IdentityGenerator { + return NewIdentityGenerator(g.Random(), g.Crypto(), g.Identifiers(), g.Addresses()) +} + +func (g *GeneratorSuite) Crypto() *CryptoGenerator { + return NewCryptoGenerator(g.Random()) +} + +// StateCommitments returns a generator for [flow.StateCommitment]. +func (g *GeneratorSuite) StateCommitments() *StateCommitmentGenerator { + return NewStateCommitmentGenerator(g.Random()) +} + +// AggregatedSignatures returns a generator for [flow.AggregatedSignature]. +func (g *GeneratorSuite) AggregatedSignatures() *AggregatedSignatureGenerator { + return NewAggregatedSignatureGenerator(g.Random(), g.Identifiers(), g.Signatures()) +} + +// TimeoutCertificates returns a generator for [flow.TimeoutCertificate]. +func (g *GeneratorSuite) TimeoutCertificates() *TimeoutCertificateGenerator { + return NewTimeoutCertificateGenerator(g.Random(), g.QuorumCertificates(), g.Signatures(), g.SignerIndices()) +} + +// ExecutionResults returns a generator for [flow.ExecutionResult]. +func (g *GeneratorSuite) ExecutionResults() *ExecutionResultGenerator { + return NewExecutionResultGenerator( + g.Random(), + g.Identifiers(), + g.Chunks(), + g.ServiceEvents(), + g.StateCommitments(), + ) +} + +// ExecutionReceipts returns a generator for [flow.ExecutionReceipt]. +func (g *GeneratorSuite) ExecutionReceipts() *ExecutionReceiptGenerator { + return NewExecutionReceiptGenerator( + g.Random(), + g.Identifiers(), + g.ExecutionResults(), + g.Signatures(), + ) +} + +// Chunks returns a generator for [flow.Chunk]. +func (g *GeneratorSuite) Chunks() *ChunkGenerator { + return NewChunkGenerator( + g.Random(), + g.Identifiers(), + g.StateCommitments(), + ) +} + +// ServiceEvents returns a generator for [flow.ServiceEvent]. +func (g *GeneratorSuite) ServiceEvents() *ServiceEventGenerator { + return NewServiceEventGenerator( + g.Random(), + g.EpochSetups(), + g.EpochCommits(), + g.EpochRecovers(), + g.VersionBeacons(), + g.ProtocolStateVersionUpgrades(), + g.SetEpochExtensionViewCounts(), + g.EjectNodes(), + ) +} + +// VersionBeacons returns a generator for [flow.VersionBeacon]. +func (g *GeneratorSuite) VersionBeacons() *VersionBeaconGenerator { + return NewVersionBeaconGenerator(g.Random()) +} + +// ProtocolStateVersionUpgrades returns a generator for [flow.ProtocolStateVersionUpgrade]. +func (g *GeneratorSuite) ProtocolStateVersionUpgrades() *ProtocolStateVersionUpgradeGenerator { + return NewProtocolStateVersionUpgradeGenerator(g.Random()) +} + +// SetEpochExtensionViewCounts returns a generator for [flow.SetEpochExtensionViewCount]. +func (g *GeneratorSuite) SetEpochExtensionViewCounts() *SetEpochExtensionViewCountGenerator { + return NewSetEpochExtensionViewCountGenerator(g.Random()) +} + +// EjectNodes returns a generator for [flow.EjectNode]. +func (g *GeneratorSuite) EjectNodes() *EjectNodeGenerator { + return NewEjectNodeGenerator(g.Identifiers()) +} + +// EpochSetups returns a generator for [flow.EpochSetup]. +func (g *GeneratorSuite) EpochSetups() *EpochSetupGenerator { + return NewEpochSetupGenerator(g.Random(), g.Time(), g.Identities()) +} + +// EpochCommits returns a generator for [flow.EpochCommit]. +func (g *GeneratorSuite) EpochCommits() *EpochCommitGenerator { + return NewEpochCommitGenerator( + g.Random(), + g.Crypto(), + g.Identifiers(), + g.QuorumCertificatesWithSignerIDs(), + ) +} + +// EpochRecovers returns a generator for [flow.EpochRecover]. +func (g *GeneratorSuite) EpochRecovers() *EpochRecoverGenerator { + return NewEpochRecoverGenerator( + g.Random(), + g.EpochSetups(), + g.EpochCommits(), + ) +} + +// RegisterEntries returns a generator for [flow.RegisterEntry]. +func (g *GeneratorSuite) RegisterEntries() *RegisterEntryGenerator { + return NewRegisterEntryGenerator(g.Random(), g.LedgerPayloads()) +} + +// TransactionErrorMessages returns a generator for [flow.TransactionResultErrorMessage]. +func (g *GeneratorSuite) TransactionErrorMessages() *TransactionErrorMessageGenerator { + return NewTransactionErrorMessageGenerator(g.Random(), g.Identifiers()) +} diff --git a/utils/unittest/fixtures/generators_test.go b/utils/unittest/fixtures/generators_test.go new file mode 100644 index 00000000000..af78905d136 --- /dev/null +++ b/utils/unittest/fixtures/generators_test.go @@ -0,0 +1,925 @@ +package fixtures + +import ( + "fmt" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/onflow/crypto" + "github.com/onflow/flow/protobuf/go/flow/entities" + + "github.com/onflow/flow-go/ledger/common/hash" + "github.com/onflow/flow-go/model/flow" +) + +func TestGeneratorSuiteRandomSeed(t *testing.T) { + // Test with random seed (no seed specified) + suite1 := NewGeneratorSuite() + suite2 := NewGeneratorSuite() + + // generated values should be different + header := suite1.Headers().Fixture() + header2 := suite2.Headers().Fixture() + assert.NotEqual(t, header, header2) +} + +func TestGeneratorsDeterminism(t *testing.T) { + // Test all generators + tests := []struct { + name string + fixture func(a, b *GeneratorSuite) (any, any) + list func(a, b *GeneratorSuite, n int) (any, any) + sanity func(t *testing.T, suite *GeneratorSuite) + }{ + // All generators have both Fixture and List methods + { + name: "BlockHeaders", + fixture: func(a, b *GeneratorSuite) (any, any) { + return a.Headers().Fixture(), b.Headers().Fixture() + }, + list: func(a, b *GeneratorSuite, n int) (any, any) { + return a.Headers().List(n), b.Headers().List(n) + }, + sanity: func(t *testing.T, suite *GeneratorSuite) { + // Test basic block header generation + header1 := suite.Headers().Fixture() + require.NotNil(t, header1) + assert.Equal(t, flow.Emulator, header1.ChainID) + assert.Greater(t, header1.Height, uint64(0)) + assert.Greater(t, header1.View, uint64(0)) + + // Test with specific height + header2 := suite.Headers().Fixture(Header.WithHeight(100)) + assert.Equal(t, uint64(100), header2.Height) + + // Test with parent details + parent := suite.Headers().Fixture() + child1 := suite.Headers().Fixture(Header.WithParent(parent.ID(), parent.View, parent.Height)) + assert.Equal(t, parent.Height+1, child1.Height) + assert.Equal(t, parent.ID(), child1.ParentID) + assert.Equal(t, parent.ChainID, child1.ChainID) + assert.Less(t, parent.View, child1.View) + + // Test with parent header + child2 := suite.Headers().Fixture(Header.WithParentHeader(parent)) + assert.Equal(t, parent.Height+1, child2.Height) + assert.Equal(t, parent.ID(), child2.ParentID) + assert.Equal(t, parent.ChainID, child2.ChainID) + assert.Less(t, parent.View, child2.View) + + // Test on specific chain + header3 := suite.Headers().Fixture(Header.WithChainID(flow.Testnet)) + assert.Equal(t, flow.Testnet, header3.ChainID) + }, + }, + { + name: "Time", + fixture: func(a, b *GeneratorSuite) (any, any) { + return a.Time().Fixture(), b.Time().Fixture() + }, + list: func(a, b *GeneratorSuite, n int) (any, any) { + return a.Time().List(n), b.Time().List(n) + }, + sanity: func(t *testing.T, suite *GeneratorSuite) { + // Test basic time generation + time1 := suite.Time().Fixture() + require.NotNil(t, time1) + assert.Greater(t, time1.Unix(), int64(0)) + + // Test default is random + time2 := suite.Time().Fixture() + assert.NotEqual(t, time1, time2) + }, + }, + { + name: "Identifiers", + fixture: func(a, b *GeneratorSuite) (any, any) { + return a.Identifiers().Fixture(), b.Identifiers().Fixture() + }, + list: func(a, b *GeneratorSuite, n int) (any, any) { + return a.Identifiers().List(n), b.Identifiers().List(n) + }, + sanity: func(t *testing.T, suite *GeneratorSuite) { + identifier := suite.Identifiers().Fixture() + assert.NotEmpty(t, identifier) + assert.NotEqual(t, flow.ZeroID, identifier) + }, + }, + { + name: "Signatures", + fixture: func(a, b *GeneratorSuite) (any, any) { + return a.Signatures().Fixture(), b.Signatures().Fixture() + }, + list: func(a, b *GeneratorSuite, n int) (any, any) { + return a.Signatures().List(n), b.Signatures().List(n) + }, + sanity: func(t *testing.T, suite *GeneratorSuite) { + signature := suite.Signatures().Fixture() + assert.NotEmpty(t, signature) + assert.Len(t, signature, crypto.SignatureLenBLSBLS12381) + }, + }, + { + name: "Addresses", + fixture: func(a, b *GeneratorSuite) (any, any) { + return a.Addresses().Fixture(), b.Addresses().Fixture() + }, + list: func(a, b *GeneratorSuite, n int) (any, any) { + return a.Addresses().List(n), b.Addresses().List(n) + }, + sanity: func(t *testing.T, suite *GeneratorSuite) { + addr := suite.Addresses().Fixture() + assert.True(t, suite.ChainID().Chain().IsValid(addr)) + }, + }, + { + name: "SignerIndices", + fixture: func(a, b *GeneratorSuite) (any, any) { + // use a larger total to avoid accidental collisions + opts := []SignerIndicesOption{SignerIndices.WithSignerCount(1000, 5)} + return a.SignerIndices().Fixture(opts...), b.SignerIndices().Fixture(opts...) + }, + list: func(a, b *GeneratorSuite, n int) (any, any) { + // use a larger total to avoid accidental collisions + opts := []SignerIndicesOption{SignerIndices.WithSignerCount(1000, 5)} + return a.SignerIndices().List(n, opts...), b.SignerIndices().List(n, opts...) + }, + sanity: func(t *testing.T, suite *GeneratorSuite) { + indices := suite.SignerIndices().Fixture() + assert.NotEmpty(t, indices) + }, + }, + { + name: "QuorumCertificates", + fixture: func(a, b *GeneratorSuite) (any, any) { + return a.QuorumCertificates().Fixture(), b.QuorumCertificates().Fixture() + }, + list: func(a, b *GeneratorSuite, n int) (any, any) { + return a.QuorumCertificates().List(n), b.QuorumCertificates().List(n) + }, + sanity: func(t *testing.T, suite *GeneratorSuite) { + qc := suite.QuorumCertificates().Fixture() + assert.NotEmpty(t, qc) + + qc2 := suite.QuorumCertificates().Fixture(QuorumCertificate.WithView(100)) + assert.Equal(t, uint64(100), qc2.View) + }, + }, + { + name: "ChunkExecutionDatas", + fixture: func(a, b *GeneratorSuite) (any, any) { + return a.ChunkExecutionDatas().Fixture(), b.ChunkExecutionDatas().Fixture() + }, + list: func(a, b *GeneratorSuite, n int) (any, any) { + return a.ChunkExecutionDatas().List(n), b.ChunkExecutionDatas().List(n) + }, + sanity: func(t *testing.T, suite *GeneratorSuite) { + ced := suite.ChunkExecutionDatas().Fixture() + assert.NotEmpty(t, ced) + }, + }, + { + name: "BlockExecutionDatas", + fixture: func(a, b *GeneratorSuite) (any, any) { + return a.BlockExecutionDatas().Fixture(), b.BlockExecutionDatas().Fixture() + }, + list: func(a, b *GeneratorSuite, n int) (any, any) { + return a.BlockExecutionDatas().List(n), b.BlockExecutionDatas().List(n) + }, + sanity: func(t *testing.T, suite *GeneratorSuite) { + bed := suite.BlockExecutionDatas().Fixture() + assert.NotEmpty(t, bed) + }, + }, + { + name: "BlockExecutionDataEntities", + fixture: func(a, b *GeneratorSuite) (any, any) { + return a.BlockExecutionDataEntities().Fixture(), b.BlockExecutionDataEntities().Fixture() + }, + list: func(a, b *GeneratorSuite, n int) (any, any) { + return a.BlockExecutionDataEntities().List(n), b.BlockExecutionDataEntities().List(n) + }, + sanity: func(t *testing.T, suite *GeneratorSuite) { + bedEntity := suite.BlockExecutionDataEntities().Fixture() + assert.NotEmpty(t, bedEntity) + }, + }, + { + name: "Transactions", + fixture: func(a, b *GeneratorSuite) (any, any) { + return a.Transactions().Fixture(), b.Transactions().Fixture() + }, + list: func(a, b *GeneratorSuite, n int) (any, any) { + return a.Transactions().List(n), b.Transactions().List(n) + }, + sanity: func(t *testing.T, suite *GeneratorSuite) { + tx := suite.Transactions().Fixture() + assert.NotEmpty(t, tx) + }, + }, + { + name: "Collections", + fixture: func(a, b *GeneratorSuite) (any, any) { + return a.Collections().Fixture(), b.Collections().Fixture() + }, + list: func(a, b *GeneratorSuite, n int) (any, any) { + return a.Collections().List(n), b.Collections().List(n) + }, + sanity: func(t *testing.T, suite *GeneratorSuite) { + col := suite.Collections().Fixture() + assert.NotEmpty(t, col) + + col2 := suite.Collections().Fixture(Collection.WithTxCount(10)) + assert.Len(t, col2.Transactions, 10) + }, + }, + { + name: "TrieUpdates", + fixture: func(a, b *GeneratorSuite) (any, any) { + return a.TrieUpdates().Fixture(), b.TrieUpdates().Fixture() + }, + list: func(a, b *GeneratorSuite, n int) (any, any) { + return a.TrieUpdates().List(n), b.TrieUpdates().List(n) + }, + sanity: func(t *testing.T, suite *GeneratorSuite) { + trie := suite.TrieUpdates().Fixture() + assert.NotEmpty(t, trie) + }, + }, + { + name: "TransactionResults", + fixture: func(a, b *GeneratorSuite) (any, any) { + return a.TransactionResults().Fixture(), b.TransactionResults().Fixture() + }, + list: func(a, b *GeneratorSuite, n int) (any, any) { + return a.TransactionResults().List(n), b.TransactionResults().List(n) + }, + sanity: func(t *testing.T, suite *GeneratorSuite) { + tr := suite.TransactionResults().Fixture() + assert.NotEmpty(t, tr) + + tr2 := suite.TransactionResults().Fixture(TransactionResult.WithErrorMessage("custom error")) + assert.Equal(t, "custom error", tr2.ErrorMessage) + }, + }, + { + name: "LightTransactionResults", + fixture: func(a, b *GeneratorSuite) (any, any) { + return a.LightTransactionResults().Fixture(), b.LightTransactionResults().Fixture() + }, + list: func(a, b *GeneratorSuite, n int) (any, any) { + return a.LightTransactionResults().List(n), b.LightTransactionResults().List(n) + }, + sanity: func(t *testing.T, suite *GeneratorSuite) { + ltr := suite.LightTransactionResults().Fixture() + assert.NotEmpty(t, ltr) + + ltr2 := suite.LightTransactionResults().Fixture(LightTransactionResult.WithFailed(true)) + assert.True(t, ltr2.Failed) + }, + }, + { + name: "TransactionSignatures", + fixture: func(a, b *GeneratorSuite) (any, any) { + return a.TransactionSignatures().Fixture(), b.TransactionSignatures().Fixture() + }, + list: func(a, b *GeneratorSuite, n int) (any, any) { + return a.TransactionSignatures().List(n), b.TransactionSignatures().List(n) + }, + sanity: func(t *testing.T, suite *GeneratorSuite) { + ts := suite.TransactionSignatures().Fixture() + assert.NotEmpty(t, ts) + }, + }, + { + name: "ProposalKeys", + fixture: func(a, b *GeneratorSuite) (any, any) { + return a.ProposalKeys().Fixture(), b.ProposalKeys().Fixture() + }, + list: func(a, b *GeneratorSuite, n int) (any, any) { + return a.ProposalKeys().List(n), b.ProposalKeys().List(n) + }, + sanity: func(t *testing.T, suite *GeneratorSuite) { + pk := suite.ProposalKeys().Fixture() + assert.NotEmpty(t, pk) + }, + }, + { + name: "Events", + fixture: func(a, b *GeneratorSuite) (any, any) { + return a.Events().Fixture(), b.Events().Fixture() + }, + list: func(a, b *GeneratorSuite, n int) (any, any) { + return a.Events().List(n), b.Events().List(n) + }, + sanity: func(t *testing.T, suite *GeneratorSuite) { + event := suite.Events().Fixture() + assert.NotEmpty(t, event) + + eventWithCCF := suite.Events().Fixture(Event.WithEncoding(entities.EventEncodingVersion_CCF_V0)) + assert.NotEmpty(t, eventWithCCF.Payload) + + eventWithJSON := suite.Events().Fixture(Event.WithEncoding(entities.EventEncodingVersion_JSON_CDC_V0)) + assert.NotEmpty(t, eventWithJSON.Payload) + + // Test events for transaction + txID := suite.Identifiers().Fixture() + txEvents := suite.Events().ForTransaction(txID, 0, 3) + assert.Len(t, txEvents, 3) + for i, event := range txEvents { + assert.Equal(t, txID, event.TransactionID) + assert.Equal(t, uint32(0), event.TransactionIndex) + assert.Equal(t, uint32(i), event.EventIndex) + } + }, + }, + { + name: "EventTypes", + fixture: func(a, b *GeneratorSuite) (any, any) { + return a.EventTypes().Fixture(), b.EventTypes().Fixture() + }, + list: func(a, b *GeneratorSuite, n int) (any, any) { + return a.EventTypes().List(n), b.EventTypes().List(n) + }, + sanity: func(t *testing.T, suite *GeneratorSuite) { + eventType1 := suite.EventTypes().Fixture() + assert.NotEmpty(t, eventType1) + + eventType2 := suite.EventTypes().Fixture(EventType.WithEventName("CustomEvent")) + assert.Contains(t, string(eventType2), "CustomEvent") + + eventType3 := suite.EventTypes().Fixture(EventType.WithContractName("CustomContract")) + assert.Contains(t, string(eventType3), "CustomContract") + + addr := suite.Addresses().Fixture() + eventType4 := suite.EventTypes().Fixture(EventType.WithAddress(addr)) + assert.Contains(t, string(eventType4), addr.String()) + }, + }, + { + name: "LedgerPaths", + fixture: func(a, b *GeneratorSuite) (any, any) { + return a.LedgerPaths().Fixture(), b.LedgerPaths().Fixture() + }, + list: func(a, b *GeneratorSuite, n int) (any, any) { + return a.LedgerPaths().List(n), b.LedgerPaths().List(n) + }, + sanity: func(t *testing.T, suite *GeneratorSuite) { + lp := suite.LedgerPayloads().Fixture() + assert.NotEmpty(t, lp) + }, + }, + { + name: "LedgerPayloads", + fixture: func(a, b *GeneratorSuite) (any, any) { + return a.LedgerPayloads().Fixture(), b.LedgerPayloads().Fixture() + }, + list: func(a, b *GeneratorSuite, n int) (any, any) { + return a.LedgerPayloads().List(n), b.LedgerPayloads().List(n) + }, + sanity: func(t *testing.T, suite *GeneratorSuite) { + lp := suite.LedgerPayloads().Fixture() + assert.NotEmpty(t, lp) + }, + }, + { + name: "LedgerValues", + fixture: func(a, b *GeneratorSuite) (any, any) { + return a.LedgerValues().Fixture(), b.LedgerValues().Fixture() + }, + list: func(a, b *GeneratorSuite, n int) (any, any) { + return a.LedgerValues().List(n), b.LedgerValues().List(n) + }, + sanity: func(t *testing.T, suite *GeneratorSuite) { + lv := suite.LedgerValues().Fixture() + assert.NotEmpty(t, lv) + }, + }, + { + name: "Identities", + fixture: func(a, b *GeneratorSuite) (any, any) { + return a.Identities().Fixture(), b.Identities().Fixture() + }, + list: func(a, b *GeneratorSuite, n int) (any, any) { + return a.Identities().List(n), b.Identities().List(n) + }, + sanity: func(t *testing.T, suite *GeneratorSuite) { + identity := suite.Identities().Fixture() + assert.NotEmpty(t, identity.NodeID) + assert.NotEmpty(t, identity.Address) + assert.NotNil(t, identity.StakingPubKey) + assert.NotNil(t, identity.NetworkPubKey) + + // Test with specific role + consensus := suite.Identities().Fixture(Identity.WithRole(flow.RoleConsensus)) + assert.Equal(t, flow.RoleConsensus, consensus.Role) + + // Test with all roles + identities := suite.Identities().List(10, Identity.WithAllRoles()) + rolesSeen := make(map[flow.Role]bool) + for _, id := range identities { + rolesSeen[id.Role] = true + } + assert.Equal(t, len(rolesSeen), len(flow.Roles())) // Should see multiple roles + }, + }, + { + name: "StateCommitments", + fixture: func(a, b *GeneratorSuite) (any, any) { + return a.StateCommitments().Fixture(), b.StateCommitments().Fixture() + }, + list: func(a, b *GeneratorSuite, n int) (any, any) { + return a.StateCommitments().List(n), b.StateCommitments().List(n) + }, + sanity: func(t *testing.T, suite *GeneratorSuite) { + sc := suite.StateCommitments().Fixture() + assert.Len(t, sc, 32) + assert.NotEqual(t, flow.DummyStateCommitment, sc) + + // Test empty state + empty := suite.StateCommitments().Fixture(StateCommitment.WithEmptyState()) + assert.Equal(t, flow.EmptyStateCommitment, empty) + + // Test special state + hash, err := hash.ToHash(suite.Random().RandomBytes(32)) + require.NoError(t, err) + actual := suite.StateCommitments().Fixture(StateCommitment.WithHash(hash)) + assert.Equal(t, flow.StateCommitment(hash), actual) + }, + }, + { + name: "AggregatedSignatures", + fixture: func(a, b *GeneratorSuite) (any, any) { + return a.AggregatedSignatures().Fixture(), b.AggregatedSignatures().Fixture() + }, + list: func(a, b *GeneratorSuite, n int) (any, any) { + return a.AggregatedSignatures().List(n), b.AggregatedSignatures().List(n) + }, + sanity: func(t *testing.T, suite *GeneratorSuite) { + as := suite.AggregatedSignatures().Fixture() + assert.NotEmpty(t, as.VerifierSignatures) + assert.NotEmpty(t, as.SignerIDs) + }, + }, + { + name: "TimeoutCertificates", + fixture: func(a, b *GeneratorSuite) (any, any) { + return a.TimeoutCertificates().Fixture(), b.TimeoutCertificates().Fixture() + }, + list: func(a, b *GeneratorSuite, n int) (any, any) { + return a.TimeoutCertificates().List(n), b.TimeoutCertificates().List(n) + }, + sanity: func(t *testing.T, suite *GeneratorSuite) { + tc := suite.TimeoutCertificates().Fixture() + assert.Greater(t, tc.View, uint64(0)) + assert.NotEmpty(t, tc.NewestQCViews) + assert.NotEmpty(t, tc.SignerIndices) + }, + }, + { + name: "ExecutionResults", + fixture: func(a, b *GeneratorSuite) (any, any) { + return a.ExecutionResults().Fixture(), b.ExecutionResults().Fixture() + }, + list: func(a, b *GeneratorSuite, n int) (any, any) { + return a.ExecutionResults().List(n), b.ExecutionResults().List(n) + }, + sanity: func(t *testing.T, suite *GeneratorSuite) { + er := suite.ExecutionResults().Fixture() + assert.NotEmpty(t, er.PreviousResultID) + assert.NotEmpty(t, er.BlockID) + assert.NotEmpty(t, er.Chunks) + }, + }, + { + name: "ExecutionReceipts", + fixture: func(a, b *GeneratorSuite) (any, any) { + return a.ExecutionReceipts().Fixture(), b.ExecutionReceipts().Fixture() + }, + list: func(a, b *GeneratorSuite, n int) (any, any) { + return a.ExecutionReceipts().List(n), b.ExecutionReceipts().List(n) + }, + sanity: func(t *testing.T, suite *GeneratorSuite) { + er := suite.ExecutionReceipts().Fixture() + assert.NotEmpty(t, er.ExecutorID) + assert.NotNil(t, er.ExecutionResult) + assert.NotEmpty(t, er.Spocks) + assert.NotEmpty(t, er.ExecutorSignature) + }, + }, + { + name: "Chunks", + fixture: func(a, b *GeneratorSuite) (any, any) { + return a.Chunks().Fixture(), b.Chunks().Fixture() + }, + list: func(a, b *GeneratorSuite, n int) (any, any) { + return a.Chunks().List(n), b.Chunks().List(n) + }, + sanity: func(t *testing.T, suite *GeneratorSuite) { + chunk := suite.Chunks().Fixture() + assert.NotEmpty(t, chunk.StartState) + assert.NotEmpty(t, chunk.EventCollection) + assert.NotEmpty(t, chunk.BlockID) + }, + }, + { + name: "ServiceEvents", + fixture: func(a, b *GeneratorSuite) (any, any) { + return a.ServiceEvents().Fixture(), b.ServiceEvents().Fixture() + }, + list: func(a, b *GeneratorSuite, n int) (any, any) { + return a.ServiceEvents().List(n), b.ServiceEvents().List(n) + }, + sanity: func(t *testing.T, suite *GeneratorSuite) { + se := suite.ServiceEvents().Fixture() + assert.NotEmpty(t, se.Type) + assert.NotNil(t, se.Event) + + // Test with specific type + setup := suite.ServiceEvents().Fixture(ServiceEvent.WithType(flow.ServiceEventSetup)) + assert.Equal(t, flow.ServiceEventSetup, setup.Type) + assert.IsType(t, &flow.EpochSetup{}, setup.Event) + }, + }, + { + name: "VersionBeacons", + fixture: func(a, b *GeneratorSuite) (any, any) { + return a.VersionBeacons().Fixture(), b.VersionBeacons().Fixture() + }, + list: func(a, b *GeneratorSuite, n int) (any, any) { + return a.VersionBeacons().List(n), b.VersionBeacons().List(n) + }, + sanity: func(t *testing.T, suite *GeneratorSuite) { + vb := suite.VersionBeacons().Fixture() + assert.NotEmpty(t, vb.VersionBoundaries) + }, + }, + { + name: "ProtocolStateVersionUpgrades", + fixture: func(a, b *GeneratorSuite) (any, any) { + return a.ProtocolStateVersionUpgrades().Fixture(), b.ProtocolStateVersionUpgrades().Fixture() + }, + list: func(a, b *GeneratorSuite, n int) (any, any) { + return a.ProtocolStateVersionUpgrades().List(n), b.ProtocolStateVersionUpgrades().List(n) + }, + sanity: func(t *testing.T, suite *GeneratorSuite) { + psvu := suite.ProtocolStateVersionUpgrades().Fixture() + assert.Greater(t, psvu.NewProtocolStateVersion, uint64(0)) + }, + }, + { + name: "SetEpochExtensionViewCounts", + fixture: func(a, b *GeneratorSuite) (any, any) { + return a.SetEpochExtensionViewCounts().Fixture(), b.SetEpochExtensionViewCounts().Fixture() + }, + list: func(a, b *GeneratorSuite, n int) (any, any) { + return a.SetEpochExtensionViewCounts().List(n), b.SetEpochExtensionViewCounts().List(n) + }, + sanity: func(t *testing.T, suite *GeneratorSuite) { + seevc := suite.SetEpochExtensionViewCounts().Fixture() + assert.Greater(t, seevc.Value, uint64(0)) + }, + }, + { + name: "EjectNodes", + fixture: func(a, b *GeneratorSuite) (any, any) { + return a.EjectNodes().Fixture(), b.EjectNodes().Fixture() + }, + list: func(a, b *GeneratorSuite, n int) (any, any) { + return a.EjectNodes().List(n), b.EjectNodes().List(n) + }, + sanity: func(t *testing.T, suite *GeneratorSuite) { + en := suite.EjectNodes().Fixture() + assert.NotEmpty(t, en.NodeID) + }, + }, + { + name: "EpochSetups", + fixture: func(a, b *GeneratorSuite) (any, any) { + return a.EpochSetups().Fixture(), b.EpochSetups().Fixture() + }, + list: func(a, b *GeneratorSuite, n int) (any, any) { + return a.EpochSetups().List(n), b.EpochSetups().List(n) + }, + sanity: func(t *testing.T, suite *GeneratorSuite) { + es := suite.EpochSetups().Fixture() + assert.Greater(t, es.Counter, uint64(0)) + assert.NotEmpty(t, es.Participants) + assert.NotEmpty(t, es.Assignments) + assert.Greater(t, es.FinalView, es.FirstView) + }, + }, + { + name: "EpochCommits", + fixture: func(a, b *GeneratorSuite) (any, any) { + return a.EpochCommits().Fixture(), b.EpochCommits().Fixture() + }, + list: func(a, b *GeneratorSuite, n int) (any, any) { + return a.EpochCommits().List(n), b.EpochCommits().List(n) + }, + sanity: func(t *testing.T, suite *GeneratorSuite) { + ec := suite.EpochCommits().Fixture() + assert.Greater(t, ec.Counter, uint64(0)) + assert.NotEmpty(t, ec.ClusterQCs) + assert.NotNil(t, ec.DKGGroupKey) + assert.NotEmpty(t, ec.DKGParticipantKeys) + }, + }, + { + name: "EpochRecovers", + fixture: func(a, b *GeneratorSuite) (any, any) { + return a.EpochRecovers().Fixture(), b.EpochRecovers().Fixture() + }, + list: func(a, b *GeneratorSuite, n int) (any, any) { + return a.EpochRecovers().List(n), b.EpochRecovers().List(n) + }, + sanity: func(t *testing.T, suite *GeneratorSuite) { + er := suite.EpochRecovers().Fixture() + assert.Equal(t, er.EpochSetup.Counter, er.EpochCommit.Counter) + assert.NotEmpty(t, er.EpochSetup.Participants) + assert.NotEmpty(t, er.EpochCommit.ClusterQCs) + }, + }, + { + name: "QuorumCertificatesWithSignerIDs", + fixture: func(a, b *GeneratorSuite) (any, any) { + return a.QuorumCertificatesWithSignerIDs().Fixture(), b.QuorumCertificatesWithSignerIDs().Fixture() + }, + list: func(a, b *GeneratorSuite, n int) (any, any) { + return a.QuorumCertificatesWithSignerIDs().List(n), b.QuorumCertificatesWithSignerIDs().List(n) + }, + sanity: func(t *testing.T, suite *GeneratorSuite) { + qc := suite.QuorumCertificatesWithSignerIDs().Fixture() + assert.Greater(t, qc.View, uint64(0)) + assert.NotEmpty(t, qc.BlockID) + assert.NotEmpty(t, qc.SignerIDs) + }, + }, + { + name: "Blocks", + fixture: func(a, b *GeneratorSuite) (any, any) { + return a.Blocks().Fixture(), b.Blocks().Fixture() + }, + list: func(a, b *GeneratorSuite, n int) (any, any) { + return a.Blocks().List(n), b.Blocks().List(n) + }, + sanity: func(t *testing.T, suite *GeneratorSuite) { + block := suite.Blocks().Fixture() + assert.NotNil(t, block) + assert.NotNil(t, block.Payload) + assert.Equal(t, flow.Emulator, block.ChainID) + assert.Greater(t, block.Height, uint64(0)) + assert.Greater(t, block.View, uint64(0)) + + // Test with specific height + block2 := suite.Blocks().Fixture(Block.WithHeight(100)) + assert.Equal(t, uint64(100), block2.Height) + + // Test with specific view + block3 := suite.Blocks().Fixture(Block.WithView(200)) + assert.Equal(t, uint64(200), block3.View) + + // Test with specific chain ID + block4 := suite.Blocks().Fixture(Block.WithChainID(flow.Testnet)) + assert.Equal(t, flow.Testnet, block4.ChainID) + }, + }, + { + name: "Payloads", + fixture: func(a, b *GeneratorSuite) (any, any) { + return a.Payloads().Fixture(), b.Payloads().Fixture() + }, + list: func(a, b *GeneratorSuite, n int) (any, any) { + return a.Payloads().List(n), b.Payloads().List(n) + }, + sanity: func(t *testing.T, suite *GeneratorSuite) { + payload := suite.Payloads().Fixture() + assert.NotNil(t, payload) + + // Test with specific seals + seal := suite.Seals().Fixture() + payload2 := suite.Payloads().Fixture(Payload.WithSeals(seal)) + assert.Len(t, payload2.Seals, 1) + assert.Equal(t, seal, payload2.Seals[0]) + + // Test with specific guarantees + guarantee := suite.Guarantees().Fixture() + payload3 := suite.Payloads().Fixture(Payload.WithGuarantees(guarantee)) + assert.Len(t, payload3.Guarantees, 1) + assert.Equal(t, guarantee, payload3.Guarantees[0]) + }, + }, + { + name: "Seals", + fixture: func(a, b *GeneratorSuite) (any, any) { + return a.Seals().Fixture(), b.Seals().Fixture() + }, + list: func(a, b *GeneratorSuite, n int) (any, any) { + return a.Seals().List(n), b.Seals().List(n) + }, + sanity: func(t *testing.T, suite *GeneratorSuite) { + seal := suite.Seals().Fixture() + assert.NotNil(t, seal) + assert.NotEmpty(t, seal.BlockID) + assert.NotEmpty(t, seal.ResultID) + assert.NotEmpty(t, seal.FinalState) + + // Test with specific block ID + blockID := suite.Identifiers().Fixture() + seal2 := suite.Seals().Fixture(Seal.WithBlockID(blockID)) + assert.Equal(t, blockID, seal2.BlockID) + + // Test with specific result ID + resultID := suite.Identifiers().Fixture() + seal3 := suite.Seals().Fixture(Seal.WithResultID(resultID)) + assert.Equal(t, resultID, seal3.ResultID) + + // Test with specific final state + finalState := suite.StateCommitments().Fixture() + seal4 := suite.Seals().Fixture(Seal.WithFinalState(finalState)) + assert.Equal(t, finalState, seal4.FinalState) + }, + }, + { + name: "CollectionGuarantees", + fixture: func(a, b *GeneratorSuite) (any, any) { + return a.Guarantees().Fixture(), b.Guarantees().Fixture() + }, + list: func(a, b *GeneratorSuite, n int) (any, any) { + return a.Guarantees().List(n), b.Guarantees().List(n) + }, + sanity: func(t *testing.T, suite *GeneratorSuite) { + guarantee := suite.Guarantees().Fixture() + assert.NotNil(t, guarantee) + assert.NotEmpty(t, guarantee.CollectionID) + assert.NotEmpty(t, guarantee.ReferenceBlockID) + assert.NotEmpty(t, guarantee.SignerIndices) + assert.NotEmpty(t, guarantee.Signature) + + // Test with specific collection ID + collectionID := suite.Identifiers().Fixture() + guarantee2 := suite.Guarantees().Fixture(Guarantee.WithCollectionID(collectionID)) + assert.Equal(t, collectionID, guarantee2.CollectionID) + + // Test with specific reference block ID + refBlockID := suite.Identifiers().Fixture() + guarantee3 := suite.Guarantees().Fixture(Guarantee.WithReferenceBlockID(refBlockID)) + assert.Equal(t, refBlockID, guarantee3.ReferenceBlockID) + }, + }, + { + name: "ExecutionReceiptStubs", + fixture: func(a, b *GeneratorSuite) (any, any) { + return a.ExecutionReceiptStubs().Fixture(), b.ExecutionReceiptStubs().Fixture() + }, + list: func(a, b *GeneratorSuite, n int) (any, any) { + return a.ExecutionReceiptStubs().List(n), b.ExecutionReceiptStubs().List(n) + }, + sanity: func(t *testing.T, suite *GeneratorSuite) { + stub := suite.ExecutionReceiptStubs().Fixture() + assert.NotNil(t, stub) + assert.NotEmpty(t, stub.ExecutorID) + assert.NotEmpty(t, stub.ResultID) + assert.NotEmpty(t, stub.Spocks) + assert.NotEmpty(t, stub.ExecutorSignature) + + // Test with specific executor ID + executorID := suite.Identifiers().Fixture() + stub2 := suite.ExecutionReceiptStubs().Fixture(ExecutionReceiptStub.WithExecutorID(executorID)) + assert.Equal(t, executorID, stub2.ExecutorID) + + // Test with specific result ID + resultID := suite.Identifiers().Fixture() + stub3 := suite.ExecutionReceiptStubs().Fixture(ExecutionReceiptStub.WithResultID(resultID)) + assert.Equal(t, resultID, stub3.ResultID) + }, + }, + { + name: "Crypto", + fixture: func(a, b *GeneratorSuite) (any, any) { + return a.Crypto().PrivateKey(crypto.ECDSAP256), b.Crypto().PrivateKey(crypto.ECDSAP256) + }, + list: func(a, b *GeneratorSuite, n int) (any, any) { + keys1 := make([]crypto.PrivateKey, n) + keys2 := make([]crypto.PrivateKey, n) + for i := range n { + keys1[i] = a.Crypto().PrivateKey(crypto.BLSBLS12381) + keys2[i] = b.Crypto().PrivateKey(crypto.BLSBLS12381) + } + return keys1, keys2 + }, + sanity: func(t *testing.T, suite *GeneratorSuite) { + // Test BLS key generation + blsKey := suite.Crypto().PrivateKey(crypto.BLSBLS12381) + assert.NotNil(t, blsKey) + assert.Equal(t, crypto.BLSBLS12381, blsKey.Algorithm()) + + // Test ECDSA key generation + ecdsaKey := suite.Crypto().PrivateKey(crypto.ECDSAP256) + assert.NotNil(t, ecdsaKey) + assert.Equal(t, crypto.ECDSAP256, ecdsaKey.Algorithm()) + + // Test with custom seed + seed := suite.Random().RandomBytes(crypto.KeyGenSeedMinLen) + seededKey := suite.Crypto().PrivateKey(crypto.BLSBLS12381, PrivateKey.WithSeed(seed)) + assert.NotNil(t, seededKey) + }, + }, + { + name: "RegisterEntries", + fixture: func(a, b *GeneratorSuite) (any, any) { + return a.RegisterEntries().Fixture(), b.RegisterEntries().Fixture() + }, + list: func(a, b *GeneratorSuite, n int) (any, any) { + return a.RegisterEntries().List(n), b.RegisterEntries().List(n) + }, + sanity: func(t *testing.T, suite *GeneratorSuite) { + // Test basic register entry generation + entry := suite.RegisterEntries().Fixture() + assert.NotEmpty(t, entry.Key) + assert.NotEmpty(t, entry.Value) + // Test with payload + payload := suite.LedgerPayloads().Fixture() + entry4 := suite.RegisterEntries().Fixture(RegisterEntry.WithPayload(payload)) + assert.NotEmpty(t, entry4.Key) + assert.NotEmpty(t, entry4.Value) + }, + }, + { + name: "TransactionErrorMessages", + fixture: func(a, b *GeneratorSuite) (any, any) { + return a.TransactionErrorMessages().Fixture(), b.TransactionErrorMessages().Fixture() + }, + list: func(a, b *GeneratorSuite, n int) (any, any) { + return a.TransactionErrorMessages().List(n), b.TransactionErrorMessages().List(n) + }, + sanity: func(t *testing.T, suite *GeneratorSuite) { + // Test basic transaction error message generation + txErrMsg := suite.TransactionErrorMessages().Fixture() + assert.NotEmpty(t, txErrMsg.TransactionID) + assert.NotEmpty(t, txErrMsg.ErrorMessage) + assert.NotEmpty(t, txErrMsg.ExecutorID) + + // Test ForTransactionResults helper + txResults := suite.LightTransactionResults().List(5, + LightTransactionResult.WithFailed(true), + ) + txErrMsgs := suite.TransactionErrorMessages().ForTransactionResults(txResults) + assert.Len(t, txErrMsgs, 5) + for i, txErrMsg := range txErrMsgs { + assert.Equal(t, txResults[i].TransactionID, txErrMsg.TransactionID) + assert.Equal(t, uint32(i), txErrMsg.Index) + } + }, + }, + } + + suite1 := NewGeneratorSuite(WithSeed(42)) + suite2 := NewGeneratorSuite(WithSeed(42)) + + // IMPORTANT: these tests must not be run in parallel, or they will receive non-deterministic + // random data and fail. + + // Run all deterministic tests first to ensure that the both of the generators are at the same + // point in their random streams. + for _, tt := range tests { + t.Run(fmt.Sprintf("Deterministic %s Fixture", tt.name), func(t *testing.T) { + fixture1, fixture2 := tt.fixture(suite1, suite2) + assert.Equal(t, fixture1, fixture2) + }) + + t.Run(fmt.Sprintf("Deterministic %s List", tt.name), func(t *testing.T) { + count := 3 + list1, list2 := tt.list(suite1, suite2, count) + assert.Len(t, list1, count) + assert.Len(t, list2, count) + assert.Equal(t, list1, list2) + }) + } + + suite3 := NewGeneratorSuite() + suite4 := NewGeneratorSuite() + + for _, tt := range tests { + t.Run(fmt.Sprintf("Non-Deterministic %s Fixture", tt.name), func(t *testing.T) { + fixture1, fixture2 := tt.fixture(suite3, suite4) + assert.NotEqual(t, fixture1, fixture2) + }) + + t.Run(fmt.Sprintf("Non-Deterministic %s List", tt.name), func(t *testing.T) { + count := 3 + list1, list2 := tt.list(suite3, suite4, count) + assert.Len(t, list1, count) + assert.Len(t, list2, count) + assert.NotEqual(t, list1, list2) + }) + } + + for _, tt := range tests { + t.Run(fmt.Sprintf("Sanity Check %s", tt.name), func(t *testing.T) { + tt.sanity(t, suite3) + }) + } +} diff --git a/utils/unittest/fixtures/header.go b/utils/unittest/fixtures/header.go new file mode 100644 index 00000000000..f6315f7db3c --- /dev/null +++ b/utils/unittest/fixtures/header.go @@ -0,0 +1,279 @@ +package fixtures + +import ( + "math" + + "github.com/onflow/flow-go/model/flow" +) + +// Header is the default options factory for [flow.Header] generation. +var Header headerFactory + +type headerFactory struct{} + +type HeaderOption func(*HeaderGenerator, *flow.Header) + +// WithHeight is an option that sets the `Height` of the block header. +func (f headerFactory) WithHeight(height uint64) HeaderOption { + return func(g *HeaderGenerator, header *flow.Header) { + header.Height = height + } +} + +// WithView is an option that sets the `View` of the block header. +func (f headerFactory) WithView(view uint64) HeaderOption { + return func(g *HeaderGenerator, header *flow.Header) { + header.View = view + } +} + +// WithChainID is an option that sets the `ChainID` of the block header. +func (f headerFactory) WithChainID(chainID flow.ChainID) HeaderOption { + return func(g *HeaderGenerator, header *flow.Header) { + header.ChainID = chainID + } +} + +// WithParent is an option that sets the `ParentID`, `ParentView`, and `Height` of the block header based +// on the provided fields. `Height` is set to parent's `Height` + 1. +func (f headerFactory) WithParent(parentID flow.Identifier, parentView uint64, parentHeight uint64) HeaderOption { + return func(g *HeaderGenerator, header *flow.Header) { + header.ParentID = parentID + header.ParentView = parentView + header.Height = parentHeight + 1 + } +} + +// WithParentView is an option that sets the `ParentView` of the block header. +func (f headerFactory) WithParentView(view uint64) HeaderOption { + return func(g *HeaderGenerator, header *flow.Header) { + header.ParentView = view + } +} + +// WithParentHeader is an option that sets the following fields of the block header based on the +// provided parent header: +// - `View` +// - `Height` +// - `ChainID` +// - `Timestamp` +// - `ParentID` +// - `ParentView` +// +// If you want a specific value for any of these fields, you should add the appropriate option +// after this option. +func (f headerFactory) WithParentHeader(parent *flow.Header) HeaderOption { + return func(g *HeaderGenerator, header *flow.Header) { + header.View = parent.View + 1 + header.Height = parent.Height + 1 + header.ChainID = parent.ChainID + header.Timestamp = g.random.Uint64InRange(parent.Timestamp+1, parent.Timestamp+1000) + header.ParentID = parent.ID() + header.ParentView = parent.View + } +} + +// WithProposerID is an option that sets the `ProposerID` of the block header. +func (f headerFactory) WithProposerID(proposerID flow.Identifier) HeaderOption { + return func(g *HeaderGenerator, header *flow.Header) { + header.ProposerID = proposerID + } +} + +// WithLastViewTC is an option that sets the `LastViewTC` of the block header. +func (f headerFactory) WithLastViewTC(lastViewTC *flow.TimeoutCertificate) HeaderOption { + return func(g *HeaderGenerator, header *flow.Header) { + header.LastViewTC = lastViewTC + } +} + +// WithPayloadHash is an option that sets the `PayloadHash` of the block header. +func (f headerFactory) WithPayloadHash(hash flow.Identifier) HeaderOption { + return func(g *HeaderGenerator, header *flow.Header) { + header.PayloadHash = hash + } +} + +// WithTimestamp is an option that sets the `Timestamp` of the block header. +func (f headerFactory) WithTimestamp(timestamp uint64) HeaderOption { + return func(g *HeaderGenerator, header *flow.Header) { + header.Timestamp = timestamp + } +} + +// WithParentVoterIndices is an option that sets the `ParentVoterIndices` of the block header. +func (f headerFactory) WithParentVoterIndices(indices []byte) HeaderOption { + return func(g *HeaderGenerator, header *flow.Header) { + header.ParentVoterIndices = indices + } +} + +// WithParentVoterSigData is an option that sets the `ParentVoterSigData` of the block header. +func (f headerFactory) WithParentVoterSigData(data []byte) HeaderOption { + return func(g *HeaderGenerator, header *flow.Header) { + header.ParentVoterSigData = data + } +} + +// WithSourceOfRandomness is an option that sets the `ParentVoterSigData` of the block header based on +// the provided source of randomness. +func (f headerFactory) WithSourceOfRandomness(source []byte) HeaderOption { + return func(g *HeaderGenerator, header *flow.Header) { + header.ParentVoterSigData = g.quorumCerts.QCSigDataWithSoR(source) + } +} + +// HeaderGenerator generates block headers with consistent randomness. +type HeaderGenerator struct { + headerFactory + + random *RandomGenerator + identifiers *IdentifierGenerator + signatures *SignatureGenerator + signerIndices *SignerIndicesGenerator + quorumCerts *QuorumCertificateGenerator + timeGen *TimeGenerator + + chainID flow.ChainID +} + +func NewBlockHeaderGenerator( + random *RandomGenerator, + identifiers *IdentifierGenerator, + signatures *SignatureGenerator, + signerIndices *SignerIndicesGenerator, + quorumCerts *QuorumCertificateGenerator, + timeGen *TimeGenerator, + chainID flow.ChainID, +) *HeaderGenerator { + return &HeaderGenerator{ + random: random, + identifiers: identifiers, + signatures: signatures, + signerIndices: signerIndices, + quorumCerts: quorumCerts, + timeGen: timeGen, + chainID: chainID, + } +} + +// Fixture generates a [flow.Header] with random data based on the provided options. +func (g *HeaderGenerator) Fixture(opts ...HeaderOption) *flow.Header { + height := g.random.Uint64InRange(1, math.MaxUint32) // avoiding edge case of height = 0 (genesis block) + view := g.random.Uint64InRange(height, height+1000) + + header := &flow.Header{ + HeaderBody: flow.HeaderBody{ + ChainID: g.chainID, + ParentID: g.identifiers.Fixture(), + Height: height, + Timestamp: uint64(g.timeGen.Fixture().UnixMilli()), + View: view, + ParentView: view - 1, + ParentVoterIndices: g.signerIndices.Fixture(), + ParentVoterSigData: g.signatures.Fixture(), + ProposerID: g.identifiers.Fixture(), + LastViewTC: nil, // default no TC + }, + PayloadHash: g.identifiers.Fixture(), + } + + for _, opt := range opts { + opt(g, header) + } + + if header.View != header.ParentView+1 && header.LastViewTC == nil { + newestQC := g.quorumCerts.Fixture(QuorumCertificate.WithView(header.ParentView)) + header.LastViewTC = &flow.TimeoutCertificate{ + View: view - 1, + NewestQCViews: []uint64{newestQC.View}, + NewestQC: newestQC, + SignerIndices: g.signerIndices.Fixture(), + SigData: g.signatures.Fixture(), + } + } + + // View must be strictly greater than ParentView. Since we are generating default values for each + // and allowing the caller to independently update them, we need to do some extra bookkeeping to + // ensure that the values remain consistent after applying the options. Since the values start + // in a consistent state, if they are now inconsistent, there are 3 possible cases: + // 1. View was updated and ParentView was not + // -> adjust ParentView to align with the user set value + // 2. View was not updated and ParentView was updated + // -> adjust View to align with the user set value + // 3. Both were updated + // -> do nothing since the user specifically configured it this way + if header.View <= header.ParentView { + if header.View != view && header.ParentView == view-1 { // case 1 + header.ParentView = header.View - 1 + } + if header.View == view && header.ParentView != view-1 { // case 2 + header.View = header.ParentView + 1 + } + } + + // sanity checks + Assertf(header.View > header.ParentView, + "view must be greater than or equal to parent view: %d > %d", header.View, header.ParentView) + + Assertf(header.Height > 0 || (header.Height == 0 && header.View == 0), + "height and view must either both be greater than 0 or both be 0 (genesis): (height: %d, view: %d)", + header.Height, header.View) + + Assertf(header.LastViewTC != nil || header.View == header.ParentView+1, + "last view TC must be present if view is not equal to parent view + 1: (view: %d, parent view: %d)", + header.View, header.ParentView) + + return header +} + +// Genesis instantiates a genesis block header. This block has view and height equal to zero. However, +// conceptually spork root blocks are functionally equivalent to genesis blocks. We have decided that +// in the long term, the protocol must support spork root blocks with height _and_ view larger than zero. +// The only option that's used is the chainID. +func (g *HeaderGenerator) Genesis(opts ...HeaderOption) *flow.Header { + config := &flow.Header{ + HeaderBody: flow.HeaderBody{ + ChainID: g.chainID, + }, + } + + // allow overriding the chainID + for _, opt := range opts { + opt(g, config) + } + + header, err := flow.NewRootHeader(flow.UntrustedHeader{ + HeaderBody: flow.HeaderBody{ + ChainID: config.ChainID, + ParentID: flow.ZeroID, + Height: 0, + Timestamp: uint64(flow.GenesisTime.UnixMilli()), + View: 0, + }, + PayloadHash: flow.NewEmptyPayload().Hash(), + }) + NoError(err) + return header +} + +// List generates a chain of [flow.Header]. The first header is generated with the given options, +// and the subsequent headers are generated using the previous header as the parent. +func (g *HeaderGenerator) List(n int, opts ...HeaderOption) []*flow.Header { + headers := make([]*flow.Header, 0, n) + headers = append(headers, g.Fixture(opts...)) + + for i := 1; i < n; i++ { + // give a 50% chance that the view is not ParentView + 1 + view := headers[i-1].View + 1 + if g.random.Bool() { + view += g.random.Uint64InRange(1, 10) + } + + headers = append(headers, g.Fixture( + Header.WithParentHeader(headers[i-1]), + Header.WithView(view), + )) + } + return headers +} diff --git a/utils/unittest/fixtures/identifier.go b/utils/unittest/fixtures/identifier.go new file mode 100644 index 00000000000..6c93e6047a3 --- /dev/null +++ b/utils/unittest/fixtures/identifier.go @@ -0,0 +1,54 @@ +package fixtures + +import ( + "github.com/onflow/flow-go/model/flow" +) + +// Identifier is the default options factory for [flow.Identifier] generation. +var Identifier identifierFactory + +type identifierFactory struct{} + +type IdentifierOption func(*IdentifierGenerator, *identifierConfig) + +// identifierConfig holds the configuration for identifier generation. +type identifierConfig struct { + // Currently no special options needed, but maintaining pattern consistency +} + +// IdentifierGenerator generates identifiers with consistent randomness. +type IdentifierGenerator struct { + identifierFactory //nolint:unused + + random *RandomGenerator +} + +func NewIdentifierGenerator( + random *RandomGenerator, +) *IdentifierGenerator { + return &IdentifierGenerator{ + random: random, + } +} + +// Fixture generates a random [flow.Identifier]. +func (g *IdentifierGenerator) Fixture(opts ...IdentifierOption) flow.Identifier { + config := &identifierConfig{} + + for _, opt := range opts { + opt(g, config) + } + + id, err := flow.ByteSliceToId(g.random.RandomBytes(flow.IdentifierLen)) + NoError(err) + return id +} + +// List generates a list of random [flow.Identifier]. +func (g *IdentifierGenerator) List(n int, opts ...IdentifierOption) flow.IdentifierList { + list := make([]flow.Identifier, n) + for i := range n { + list[i] = g.Fixture(opts...) + } + return list +} diff --git a/utils/unittest/fixtures/identity.go b/utils/unittest/fixtures/identity.go new file mode 100644 index 00000000000..20c43151ec6 --- /dev/null +++ b/utils/unittest/fixtures/identity.go @@ -0,0 +1,162 @@ +package fixtures + +import ( + "github.com/onflow/crypto" + + "github.com/onflow/flow-go/model/flow" +) + +// Identity is the default options factory for [flow.Identity] generation. +var Identity identityFactory + +type identityFactory struct{} + +type IdentityOption func(*IdentityGenerator, *flow.Identity) + +// WithNodeID is an option that sets the `NodeID` of the identity. +func (f identityFactory) WithNodeID(nodeID flow.Identifier) IdentityOption { + return func(g *IdentityGenerator, identity *flow.Identity) { + identity.NodeID = nodeID + } +} + +// WithRole is an option that sets the `Role` of the identity. +func (f identityFactory) WithRole(role flow.Role) IdentityOption { + return func(g *IdentityGenerator, identity *flow.Identity) { + identity.Role = role + } +} + +// WithAddress is an option that sets the `Address` of the identity. +func (f identityFactory) WithAddress(address string) IdentityOption { + return func(g *IdentityGenerator, identity *flow.Identity) { + identity.Address = address + } +} + +// WithInitialWeight is an option that sets the `InitialWeight` of the identity. +func (f identityFactory) WithInitialWeight(weight uint64) IdentityOption { + return func(g *IdentityGenerator, identity *flow.Identity) { + identity.InitialWeight = weight + } +} + +// WithStakingPubKey is an option that sets the `StakingPubKey` of the identity. +func (f identityFactory) WithStakingPubKey(key crypto.PublicKey) IdentityOption { + return func(g *IdentityGenerator, identity *flow.Identity) { + identity.StakingPubKey = key + } +} + +// WithNetworkPubKey is an option that sets the `NetworkPubKey` of the identity. +func (f identityFactory) WithNetworkPubKey(key crypto.PublicKey) IdentityOption { + return func(g *IdentityGenerator, identity *flow.Identity) { + identity.NetworkPubKey = key + } +} + +// WithEpochParticipationStatus is an option that sets the `EpochParticipationStatus` of the identity. +func (f identityFactory) WithEpochParticipationStatus(status flow.EpochParticipationStatus) IdentityOption { + return func(g *IdentityGenerator, identity *flow.Identity) { + identity.EpochParticipationStatus = status + } +} + +// WithAllRoles is an option that sets the `Role` of the identity. When used with `List()`, it will +// set successive identities to different roles, cycling through all roles. +func (f identityFactory) WithAllRoles() IdentityOption { + return f.WithAllRolesExcept() +} + +// WithAllRolesExcept is an option that sets the `Role` of the identity. When used with `List()`, it will +// set successive identities to different roles, cycling through all roles except the ones specified. +func (f identityFactory) WithAllRolesExcept(except ...flow.Role) IdentityOption { + omitRoles := make(map[flow.Role]struct{}) + for _, role := range except { + omitRoles[role] = struct{}{} + } + + // create a set of roles without the omitted roles + roles := make(flow.RoleList, 0) + for _, role := range flow.Roles() { + if _, ok := omitRoles[role]; !ok { + roles = append(roles, role) + } + } + + i := 0 + return func(g *IdentityGenerator, identity *flow.Identity) { + identity.Role = roles[i%len(roles)] + i++ + } +} + +// IdentityGenerator generates identities with consistent randomness. +type IdentityGenerator struct { + identityFactory + + random *RandomGenerator + cryptoGen *CryptoGenerator + identifiers *IdentifierGenerator + addresses *AddressGenerator +} + +// NewIdentityGenerator creates a new IdentityGenerator. +func NewIdentityGenerator( + random *RandomGenerator, + cryptoGen *CryptoGenerator, + identifiers *IdentifierGenerator, + addresses *AddressGenerator, +) *IdentityGenerator { + return &IdentityGenerator{ + random: random, + cryptoGen: cryptoGen, + identifiers: identifiers, + addresses: addresses, + } +} + +// Fixture generates a [flow.Identity] with random data based on the provided options. +func (g *IdentityGenerator) Fixture(opts ...IdentityOption) *flow.Identity { + statuses := []flow.EpochParticipationStatus{ + flow.EpochParticipationStatusActive, + flow.EpochParticipationStatusLeaving, + flow.EpochParticipationStatusJoining, + // omit ejected status + } + + // 50% chance of being 1000 weight + weight := uint64(1000) + if g.random.Bool() { + weight = g.random.Uint64InRange(1, 999) + } + + identity := &flow.Identity{ + IdentitySkeleton: flow.IdentitySkeleton{ + NodeID: g.identifiers.Fixture(), + Address: g.addresses.Fixture().String(), + Role: RandomElement(g.random, flow.Roles()), + InitialWeight: weight, + StakingPubKey: g.cryptoGen.StakingPrivateKey().PublicKey(), + NetworkPubKey: g.cryptoGen.NetworkingPrivateKey().PublicKey(), + }, + DynamicIdentity: flow.DynamicIdentity{ + EpochParticipationStatus: RandomElement(g.random, statuses), + }, + } + + for _, opt := range opts { + opt(g, identity) + } + + return identity +} + +// List generates a list of [flow.Identity]. +func (g *IdentityGenerator) List(n int, opts ...IdentityOption) flow.IdentityList { + identities := make(flow.IdentityList, n) + for i := range n { + identities[i] = g.Fixture(opts...) + } + return identities +} diff --git a/utils/unittest/fixtures/identity_test.go b/utils/unittest/fixtures/identity_test.go new file mode 100644 index 00000000000..97f5e308d8d --- /dev/null +++ b/utils/unittest/fixtures/identity_test.go @@ -0,0 +1,67 @@ +package fixtures + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/model/flow" +) + +// TestList_WithAllRoles tests that the WithAllRoles and WithAllRolesExcept methods work as expected. +func TestList_WithAllRoles(t *testing.T) { + suite := NewGeneratorSuite() + + g := suite.Identities() + + t.Run("Fixture: WithAllRoles uses first role", func(t *testing.T) { + identities := g.Fixture(g.WithAllRoles()) + + expected := flow.Roles()[0] + assert.Equal(t, expected, identities.Role) + }) + + t.Run("Fixture: WithAllRolesExcept skips omitted role", func(t *testing.T) { + identities := g.Fixture(g.WithAllRolesExcept(flow.RoleCollection)) + + expected := flow.Roles()[1] // index 0 is collection + assert.Equal(t, expected, identities.Role) + }) + + t.Run("List: WithAllRoles returns all roles", func(t *testing.T) { + identities := g.List(5, g.WithAllRoles()) + require.Len(t, identities, len(flow.Roles())) + + for i, role := range flow.Roles() { + require.Equal(t, role, identities[i].Role) + } + }) + + t.Run("List: WithAllRolesExcept returns all roles except the ones specified", func(t *testing.T) { + identities := g.List(4, g.WithAllRolesExcept(flow.RoleCollection)) + require.Len(t, identities, len(flow.Roles())-1) + + i := 0 + for _, role := range flow.Roles() { + if role != flow.RoleCollection { + assert.Equal(t, role, identities[i].Role) + i++ + } + } + }) + + t.Run("List: WithAllRoles cycles through roles when more than 5 are requested", func(t *testing.T) { + expected := make([]flow.Role, 0, 20) + for len(expected) < 18 { + expected = append(expected, flow.Roles()...) + } + + identities := g.List(18, g.WithAllRoles()) + require.Len(t, identities, 18) + + for i, role := range expected[:18] { + assert.Equal(t, role, identities[i].Role) + } + }) +} diff --git a/utils/unittest/fixtures/ledger_path.go b/utils/unittest/fixtures/ledger_path.go new file mode 100644 index 00000000000..a6bc5761478 --- /dev/null +++ b/utils/unittest/fixtures/ledger_path.go @@ -0,0 +1,64 @@ +package fixtures + +import ( + "github.com/onflow/flow-go/ledger" +) + +// LedgerPath is the default options factory for [ledger.Path] generation. +var LedgerPath ledgerPathFactory + +type ledgerPathFactory struct{} + +type LedgerPathOption func(*LedgerPathGenerator, *ledgerPathConfig) + +// ledgerPathConfig holds the configuration for ledger path generation. +type ledgerPathConfig struct { + // Currently no special options needed, but maintaining pattern consistency +} + +// LedgerPathGenerator generates ledger paths with consistent randomness. +type LedgerPathGenerator struct { + ledgerPathFactory //nolint:unused + + random *RandomGenerator +} + +func NewLedgerPathGenerator( + random *RandomGenerator, +) *LedgerPathGenerator { + return &LedgerPathGenerator{ + random: random, + } +} + +// Fixture generates a single random [ledger.Path]. +func (g *LedgerPathGenerator) Fixture(opts ...LedgerPathOption) ledger.Path { + config := &ledgerPathConfig{} + + for _, opt := range opts { + opt(g, config) + } + + var path ledger.Path + pathData := g.random.RandomBytes(ledger.PathLen) + copy(path[:], pathData) + return path +} + +// List generates a list of random [ledger.Path]. +func (g *LedgerPathGenerator) List(n int, opts ...LedgerPathOption) []ledger.Path { + paths := make([]ledger.Path, 0, n) + alreadySelectPaths := make(map[ledger.Path]bool) + i := 0 + for i < n { + path := g.Fixture(opts...) + + // deduplicate + if _, found := alreadySelectPaths[path]; !found { + paths = append(paths, path) + alreadySelectPaths[path] = true + i++ + } + } + return paths +} diff --git a/utils/unittest/fixtures/ledger_payload.go b/utils/unittest/fixtures/ledger_payload.go new file mode 100644 index 00000000000..584ae034b8f --- /dev/null +++ b/utils/unittest/fixtures/ledger_payload.go @@ -0,0 +1,96 @@ +package fixtures + +import ( + "github.com/onflow/flow-go/ledger" +) + +// LedgerPayload is the default options factory for [ledger.Payload] generation. +var LedgerPayload ledgerPayloadFactory + +type ledgerPayloadFactory struct{} + +type LedgerPayloadOption func(*LedgerPayloadGenerator, *payloadConfig) + +// payloadConfig holds the configuration for payload generation. +type payloadConfig struct { + minSize int + maxSize int + value ledger.Value +} + +// WithSize is an option that sets the payload size range. +func (f ledgerPayloadFactory) WithSize(minSize, maxSize int) LedgerPayloadOption { + return func(g *LedgerPayloadGenerator, config *payloadConfig) { + config.minSize = minSize + config.maxSize = maxSize + } +} + +// WithValue is an option that sets the value for the payload. +func (f ledgerPayloadFactory) WithValue(value ledger.Value) LedgerPayloadOption { + return func(g *LedgerPayloadGenerator, config *payloadConfig) { + config.value = value + } +} + +// LedgerPayloadGenerator generates ledger payloads with consistent randomness. +type LedgerPayloadGenerator struct { + ledgerPayloadFactory + + random *RandomGenerator + addresses *AddressGenerator + ledgerValues *LedgerValueGenerator +} + +func NewLedgerPayloadGenerator( + random *RandomGenerator, + addresses *AddressGenerator, + ledgerValues *LedgerValueGenerator, +) *LedgerPayloadGenerator { + return &LedgerPayloadGenerator{ + random: random, + addresses: addresses, + ledgerValues: ledgerValues, + } +} + +// Fixture generates a single random [ledger.Payload]. +func (g *LedgerPayloadGenerator) Fixture(opts ...LedgerPayloadOption) *ledger.Payload { + config := &payloadConfig{ + minSize: 1, + maxSize: 8, + } + + for _, opt := range opts { + opt(g, config) + } + + Assert(config.minSize <= config.maxSize, "minSize must be less than or equal to maxSize") + + if config.value == nil { + config.value = g.ledgerValues.Fixture(LedgerValue.WithSize(config.minSize, config.maxSize)) + } + + return g.generatePayload(config.minSize, config.maxSize, config.value) +} + +// List generates a list of random [ledger.Payload]. +func (g *LedgerPayloadGenerator) List(n int, opts ...LedgerPayloadOption) []*ledger.Payload { + res := make([]*ledger.Payload, n) + for i := range n { + res[i] = g.Fixture(opts...) + } + return res +} + +// generatePayload returns a random [ledger.Payload]. +func (g *LedgerPayloadGenerator) generatePayload(minByteSize int, maxByteSize int, value ledger.Value) *ledger.Payload { + keyByteSize := g.random.IntInRange(minByteSize, maxByteSize) + parts := []ledger.KeyPart{ + ledger.NewKeyPart(ledger.KeyPartOwner, g.addresses.Fixture().Bytes()), + ledger.NewKeyPart(ledger.KeyPartKey, g.random.RandomBytes(keyByteSize)), + } + key := ledger.NewKey(parts) + + return ledger.NewPayload(key, value) +} diff --git a/utils/unittest/fixtures/ledger_value.go b/utils/unittest/fixtures/ledger_value.go new file mode 100644 index 00000000000..56d8d266db5 --- /dev/null +++ b/utils/unittest/fixtures/ledger_value.go @@ -0,0 +1,75 @@ +package fixtures + +import ( + "github.com/onflow/flow-go/ledger" +) + +// LedgerValue is the default options factory for [ledger.Value] generation. +var LedgerValue ledgerValueFactory + +type ledgerValueFactory struct{} + +type LedgerValueOption func(*LedgerValueGenerator, *valueConfig) + +// valueConfig holds the configuration for value generation. +type valueConfig struct { + minSize int + maxSize int +} + +// WithSize is an option that sets the value size range [minSize, maxSize). +func (f ledgerValueFactory) WithSize(minSize, maxSize int) LedgerValueOption { + return func(g *LedgerValueGenerator, config *valueConfig) { + config.minSize = minSize + config.maxSize = maxSize + } +} + +// LedgerValueGenerator generates ledger values with consistent randomness. +type LedgerValueGenerator struct { + ledgerValueFactory + + random *RandomGenerator +} + +func NewLedgerValueGenerator( + random *RandomGenerator, +) *LedgerValueGenerator { + return &LedgerValueGenerator{ + random: random, + } +} + +// Fixture generates a single random [ledger.Value]. +func (g *LedgerValueGenerator) Fixture(opts ...LedgerValueOption) ledger.Value { + config := &valueConfig{ + minSize: 1, + maxSize: 8, + } + + for _, opt := range opts { + opt(g, config) + } + + Assert(config.minSize <= config.maxSize, "minSize must be less than or equal to maxSize") + + return g.generateValue(config.minSize, config.maxSize) +} + +// List generates a list of random [ledger.Value]. +func (g *LedgerValueGenerator) List(n int, opts ...LedgerValueOption) []ledger.Value { + values := make([]ledger.Value, n) + for i := range n { + values[i] = g.Fixture(opts...) + } + return values +} + +// generateValue returns a random [ledger.Value] with variable size (minByteSize <= size < maxByteSize). +func (g *LedgerValueGenerator) generateValue(minByteSize, maxByteSize int) ledger.Value { + byteSize := maxByteSize + if minByteSize < maxByteSize { + byteSize = g.random.IntInRange(minByteSize, maxByteSize) + } + return g.random.RandomBytes(byteSize) +} diff --git a/utils/unittest/fixtures/payload.go b/utils/unittest/fixtures/payload.go new file mode 100644 index 00000000000..3fa7ca617ce --- /dev/null +++ b/utils/unittest/fixtures/payload.go @@ -0,0 +1,121 @@ +package fixtures + +import ( + "github.com/onflow/flow-go/model/flow" +) + +// Payload is the default options factory for [flow.Payload] generation. +var Payload payloadFactory + +type payloadFactory struct{} + +type PayloadOption func(*PayloadGenerator, *flow.Payload) + +// WithSeals is an option that sets the `Seals` of the payload. +func (f payloadFactory) WithSeals(seals ...*flow.Seal) PayloadOption { + return func(g *PayloadGenerator, payload *flow.Payload) { + payload.Seals = seals + } +} + +// WithGuarantees is an option that sets the `Guarantees` of the payload. +func (f payloadFactory) WithGuarantees(guarantees ...*flow.CollectionGuarantee) PayloadOption { + return func(g *PayloadGenerator, payload *flow.Payload) { + payload.Guarantees = guarantees + } +} + +// WithReceipts is an option that sets the `Receipts` and `Results` of the payload by adding all +// receipts and their `ExecutionResults` to the payload. +// +// To add only receipts, use `WithReceiptStubs` instead. +// e.g. +// +// Payload.WithReceiptsStubs( +// flow.ExecutionReceiptList(receipts).Stubs()..., +// ) +func (f payloadFactory) WithReceipts(receipts ...*flow.ExecutionReceipt) PayloadOption { + return func(g *PayloadGenerator, payload *flow.Payload) { + for _, receipt := range receipts { + payload.Receipts = append(payload.Receipts, receipt.Stub()) + payload.Results = append(payload.Results, &receipt.ExecutionResult) + } + } +} + +// WithReceiptStubs is an option that sets the `Receipts` of the payload. +func (f payloadFactory) WithReceiptStubs(receipts ...*flow.ExecutionReceiptStub) PayloadOption { + return func(g *PayloadGenerator, payload *flow.Payload) { + payload.Receipts = receipts + } +} + +// WithResults is an option that sets the `Results` of the payload. +func (f payloadFactory) WithResults(results flow.ExecutionResultList) PayloadOption { + return func(g *PayloadGenerator, payload *flow.Payload) { + payload.Results = results + } +} + +// WithProtocolStateID is an option that sets the `ProtocolStateID` of the payload. +func (f payloadFactory) WithProtocolStateID(protocolStateID flow.Identifier) PayloadOption { + return func(g *PayloadGenerator, payload *flow.Payload) { + payload.ProtocolStateID = protocolStateID + } +} + +// PayloadGenerator generates block payloads with consistent randomness. +type PayloadGenerator struct { + payloadFactory + + random *RandomGenerator + identifies *IdentifierGenerator + guarantees *CollectionGuaranteeGenerator + seals *SealGenerator + executionReceiptStubs *ExecutionReceiptStubGenerator + executionResults *ExecutionResultGenerator +} + +func NewPayloadGenerator( + random *RandomGenerator, + identifies *IdentifierGenerator, + guarantees *CollectionGuaranteeGenerator, + seals *SealGenerator, + executionReceiptStubs *ExecutionReceiptStubGenerator, + executionResults *ExecutionResultGenerator, +) *PayloadGenerator { + return &PayloadGenerator{ + random: random, + identifies: identifies, + guarantees: guarantees, + seals: seals, + executionReceiptStubs: executionReceiptStubs, + executionResults: executionResults, + } +} + +// Fixture generates a [flow.Payload] with random data based on the provided options. +func (g *PayloadGenerator) Fixture(opts ...PayloadOption) *flow.Payload { + payload := &flow.Payload{ + Guarantees: g.guarantees.List(g.random.IntInRange(0, 4)), + Seals: g.seals.List(g.random.IntInRange(0, 3)), + Receipts: g.executionReceiptStubs.List(g.random.IntInRange(0, 10)), + Results: g.executionResults.List(g.random.IntInRange(0, 10)), + ProtocolStateID: g.identifies.Fixture(), + } + + for _, opt := range opts { + opt(g, payload) + } + + return payload +} + +// List generates a list of [flow.Payload]. +func (g *PayloadGenerator) List(n int, opts ...PayloadOption) []*flow.Payload { + payloads := make([]*flow.Payload, n) + for i := range n { + payloads[i] = g.Fixture(opts...) + } + return payloads +} diff --git a/utils/unittest/fixtures/proposal_key.go b/utils/unittest/fixtures/proposal_key.go new file mode 100644 index 00000000000..ac820e55fe2 --- /dev/null +++ b/utils/unittest/fixtures/proposal_key.go @@ -0,0 +1,72 @@ +package fixtures + +import ( + "github.com/onflow/flow-go/model/flow" +) + +// ProposalKey is the default options factory for [flow.ProposalKey] generation. +var ProposalKey proposalKeyFactory + +type proposalKeyFactory struct{} + +type ProposalKeyOption func(*ProposalKeyGenerator, *flow.ProposalKey) + +// WithAddress is an option that sets the address for the proposal key. +func (f proposalKeyFactory) WithAddress(address flow.Address) ProposalKeyOption { + return func(g *ProposalKeyGenerator, key *flow.ProposalKey) { + key.Address = address + } +} + +// WithKeyIndex is an option that sets the key index for the proposal key. +func (f proposalKeyFactory) WithKeyIndex(keyIndex uint32) ProposalKeyOption { + return func(g *ProposalKeyGenerator, key *flow.ProposalKey) { + key.KeyIndex = keyIndex + } +} + +// WithSequenceNumber is an option that sets the sequence number for the proposal key. +func (f proposalKeyFactory) WithSequenceNumber(sequenceNumber uint64) ProposalKeyOption { + return func(g *ProposalKeyGenerator, key *flow.ProposalKey) { + key.SequenceNumber = sequenceNumber + } +} + +// ProposalKeyGenerator generates proposal keys with consistent randomness. +type ProposalKeyGenerator struct { + proposalKeyFactory + + addresses *AddressGenerator +} + +func NewProposalKeyGenerator( + addresses *AddressGenerator, +) *ProposalKeyGenerator { + return &ProposalKeyGenerator{ + addresses: addresses, + } +} + +// Fixture generates a [flow.ProposalKey] with random data based on the provided options. +func (g *ProposalKeyGenerator) Fixture(opts ...ProposalKeyOption) flow.ProposalKey { + key := flow.ProposalKey{ + Address: g.addresses.Fixture(), + KeyIndex: 1, + SequenceNumber: 0, + } + + for _, opt := range opts { + opt(g, &key) + } + + return key +} + +// List generates a list of [flow.ProposalKey]. +func (g *ProposalKeyGenerator) List(n int, opts ...ProposalKeyOption) []flow.ProposalKey { + list := make([]flow.ProposalKey, n) + for i := range n { + list[i] = g.Fixture(opts...) + } + return list +} diff --git a/utils/unittest/fixtures/quorum_certificate.go b/utils/unittest/fixtures/quorum_certificate.go new file mode 100644 index 00000000000..6a37c52647b --- /dev/null +++ b/utils/unittest/fixtures/quorum_certificate.go @@ -0,0 +1,218 @@ +package fixtures + +import ( + hotstuff "github.com/onflow/flow-go/consensus/hotstuff/model" + "github.com/onflow/flow-go/model/flow" +) + +// QuorumCertificate is the default options factory for [flow.QuorumCertificate] generation. +var QuorumCertificate quorumCertificateFactory + +type quorumCertificateFactory struct{} + +type QuorumCertificateOption func(*QuorumCertificateGenerator, *flow.QuorumCertificate) + +// WithView is an option that sets the view of the quorum certificate. +func (f quorumCertificateFactory) WithView(view uint64) QuorumCertificateOption { + return func(g *QuorumCertificateGenerator, qc *flow.QuorumCertificate) { + qc.View = view + } +} + +// WithRootBlockID is an option that sets the root block ID of the quorum certificate. +func (f quorumCertificateFactory) WithRootBlockID(blockID flow.Identifier) QuorumCertificateOption { + return func(g *QuorumCertificateGenerator, qc *flow.QuorumCertificate) { + qc.BlockID = blockID + qc.View = 0 + } +} + +// WithBlockID is an option that sets the block ID of the quorum certificate. +func (f quorumCertificateFactory) WithBlockID(blockID flow.Identifier) QuorumCertificateOption { + return func(g *QuorumCertificateGenerator, qc *flow.QuorumCertificate) { + qc.BlockID = blockID + } +} + +// CertifiesBlock is an option that sets the block ID and view of the quorum certificate to match +// the provided header. +func (f quorumCertificateFactory) CertifiesBlock(header *flow.Header) QuorumCertificateOption { + return func(g *QuorumCertificateGenerator, qc *flow.QuorumCertificate) { + qc.View = header.View + qc.BlockID = header.ID() + } +} + +// WithSignerIndices is an option that sets the signer indices of the quorum certificate. +func (f quorumCertificateFactory) WithSignerIndices(signerIndices []byte) QuorumCertificateOption { + return func(g *QuorumCertificateGenerator, qc *flow.QuorumCertificate) { + qc.SignerIndices = signerIndices + } +} + +// WithRandomnessSource is an option that sets the source of randomness for the quorum certificate. +func (f quorumCertificateFactory) WithRandomnessSource(source []byte) QuorumCertificateOption { + return func(g *QuorumCertificateGenerator, qc *flow.QuorumCertificate) { + qc.SigData = g.QCSigDataWithSoR(source) + } +} + +// QuorumCertificateGenerator generates quorum certificates with consistent randomness. +type QuorumCertificateGenerator struct { + quorumCertificateFactory + + random *RandomGenerator + identifiers *IdentifierGenerator + signerIndices *SignerIndicesGenerator + signatures *SignatureGenerator +} + +func NewQuorumCertificateGenerator( + random *RandomGenerator, + identifiers *IdentifierGenerator, + signerIndices *SignerIndicesGenerator, + signatures *SignatureGenerator, +) *QuorumCertificateGenerator { + return &QuorumCertificateGenerator{ + random: random, + identifiers: identifiers, + signerIndices: signerIndices, + signatures: signatures, + } +} + +// Fixture generates a [flow.QuorumCertificate] with random data based on the provided options. +func (g *QuorumCertificateGenerator) Fixture(opts ...QuorumCertificateOption) *flow.QuorumCertificate { + qc := &flow.QuorumCertificate{ + View: uint64(g.random.Uint32()), + BlockID: g.identifiers.Fixture(), + SignerIndices: g.signerIndices.Fixture(SignerIndices.WithSignerCount(10, 3)), + SigData: g.QCSigDataWithSoR(nil), + } + + for _, opt := range opts { + opt(g, qc) + } + + return qc +} + +// List generates a list of [flow.QuorumCertificate]. +func (g *QuorumCertificateGenerator) List(n int, opts ...QuorumCertificateOption) []*flow.QuorumCertificate { + list := make([]*flow.QuorumCertificate, n) + for i := range n { + list[i] = g.Fixture(opts...) + } + return list +} + +// QCSigDataWithSoR generates a [flow.QuorumCertificate] signature data with a source of randomness. +// If source is empty, a random source of randomness is generated. +func (g *QuorumCertificateGenerator) QCSigDataWithSoR(source []byte) []byte { + packer := hotstuff.SigDataPacker{} + sigData := g.qcRawSignatureData(source) + encoded, err := packer.Encode(&sigData) + NoError(err) + return encoded +} + +// qcRawSignatureData generates a raw signature data for a [flow.QuorumCertificate]. +func (g *QuorumCertificateGenerator) qcRawSignatureData(source []byte) hotstuff.SignatureData { + sigType := g.random.RandomBytes(5) + for i := range sigType { + sigType[i] = sigType[i] % 2 + } + sigData := hotstuff.SignatureData{ + SigType: sigType, + AggregatedStakingSig: g.signatures.Fixture(), + AggregatedRandomBeaconSig: g.signatures.Fixture(), + ReconstructedRandomBeaconSig: source, + } + if len(sigData.ReconstructedRandomBeaconSig) == 0 { + sigData.ReconstructedRandomBeaconSig = g.signatures.Fixture() + } + return sigData +} + +// QuorumCertificateWithSignerIDs is the default options factory for +// [flow.QuorumCertificateWithSignerIDs] generation. +var QuorumCertificateWithSignerIDs quorumCertificateWithSignerIDsFactory + +type quorumCertificateWithSignerIDsFactory struct{} + +type QuorumCertificateWithSignerIDsOption func(*QuorumCertificateWithSignerIDsGenerator, *flow.QuorumCertificateWithSignerIDs) + +// WithView is an option that sets the `View` of the quorum certificate with signer IDs. +func (f quorumCertificateWithSignerIDsFactory) WithView(view uint64) QuorumCertificateWithSignerIDsOption { + return func(g *QuorumCertificateWithSignerIDsGenerator, qc *flow.QuorumCertificateWithSignerIDs) { + qc.View = view + } +} + +// WithSignerIDs is an option that sets the `SignerIDs` of the quorum certificate with signer IDs. +func (f quorumCertificateWithSignerIDsFactory) WithSignerIDs(signerIDs flow.IdentifierList) QuorumCertificateWithSignerIDsOption { + return func(g *QuorumCertificateWithSignerIDsGenerator, qc *flow.QuorumCertificateWithSignerIDs) { + qc.SignerIDs = signerIDs + } +} + +// WithBlockID is an option that sets the `BlockID` of the quorum certificate with signer IDs. +func (f quorumCertificateWithSignerIDsFactory) WithBlockID(blockID flow.Identifier) QuorumCertificateWithSignerIDsOption { + return func(g *QuorumCertificateWithSignerIDsGenerator, qc *flow.QuorumCertificateWithSignerIDs) { + qc.BlockID = blockID + } +} + +// WithSigData is an option that sets the `SigData` of the quorum certificate with signer IDs. +func (f quorumCertificateWithSignerIDsFactory) WithSigData(sigData []byte) QuorumCertificateWithSignerIDsOption { + return func(g *QuorumCertificateWithSignerIDsGenerator, qc *flow.QuorumCertificateWithSignerIDs) { + qc.SigData = sigData + } +} + +// QuorumCertificateWithSignerIDsGenerator generates [flow.QuorumCertificateWithSignerIDs] with +// consistent randomness. +type QuorumCertificateWithSignerIDsGenerator struct { + quorumCertificateWithSignerIDsFactory + + random *RandomGenerator + identifiers *IdentifierGenerator + quorumCerts *QuorumCertificateGenerator +} + +func NewQuorumCertificateWithSignerIDsGenerator( + random *RandomGenerator, + identifiers *IdentifierGenerator, + quorumCerts *QuorumCertificateGenerator, +) *QuorumCertificateWithSignerIDsGenerator { + return &QuorumCertificateWithSignerIDsGenerator{ + random: random, + identifiers: identifiers, + quorumCerts: quorumCerts, + } +} + +// Fixture generates a [flow.QuorumCertificateWithSignerIDs] with random data based on the provided options. +func (g *QuorumCertificateWithSignerIDsGenerator) Fixture(opts ...QuorumCertificateWithSignerIDsOption) *flow.QuorumCertificateWithSignerIDs { + qc := &flow.QuorumCertificateWithSignerIDs{ + View: uint64(g.random.Uint32()), + BlockID: g.identifiers.Fixture(), + SignerIDs: g.identifiers.List(10), + SigData: g.quorumCerts.QCSigDataWithSoR(nil), + } + + for _, opt := range opts { + opt(g, qc) + } + + return qc +} + +// List generates a list of [flow.QuorumCertificateWithSignerIDs]. +func (g *QuorumCertificateWithSignerIDsGenerator) List(n int, opts ...QuorumCertificateWithSignerIDsOption) []*flow.QuorumCertificateWithSignerIDs { + list := make([]*flow.QuorumCertificateWithSignerIDs, n) + for i := range n { + list[i] = g.Fixture(opts...) + } + return list +} diff --git a/utils/unittest/fixtures/random.go b/utils/unittest/fixtures/random.go new file mode 100644 index 00000000000..64afd7f6f1b --- /dev/null +++ b/utils/unittest/fixtures/random.go @@ -0,0 +1,126 @@ +package fixtures + +import ( + "math/rand" +) + +const ( + // charset is the set of characters used to generate random strings. + charset = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789" +) + +// RandomGenerator provides random value generation with consistent randomness. +// Exposes all methods from *rand.Rand, as well as additional methods to reduce +type RandomGenerator struct { + *rand.Rand +} + +func NewRandomGenerator(rng *rand.Rand) *RandomGenerator { + return &RandomGenerator{ + Rand: rng, + } +} + +// RandomBytes generates n random bytes. +func (g *RandomGenerator) RandomBytes(n int) []byte { + bytes := make([]byte, n) + read, err := g.Read(bytes) + NoError(err) + Assertf(read == n, "expected to read %d bytes, got %d", n, read) + + return bytes +} + +// RandomString generates a random string of the specified length. +func (g *RandomGenerator) RandomString(length uint) string { + result := make([]byte, length) + for i := range length { + result[i] = charset[g.Intn(len(charset))] + } + return string(result) +} + +// Uint64n generates a random uint64 strictly less than `n`. +// Uses rand.Int63n to generate a random int64 and then casts it to uint64. n MUST be > 0. +func (g *RandomGenerator) Uint64n(n uint64) uint64 { + return uint64(g.Int63n(int64(n))) +} + +// Uint32n generates a random uint32 strictly less than `n`. +// Uses rand.Int31n to generate a random int32 and then casts it to uint32. n MUST be > 0. +func (g *RandomGenerator) Uint32n(n uint32) uint32 { + return uint32(g.Int31n(int32(n))) +} + +// Uint16n generates a random uint16 strictly less than `n`. +// Uses rand.Int31n to generate a random int32 and then casts it to uint16. n MUST be > 0. +func (g *RandomGenerator) Uint16n(n uint16) uint16 { + return uint16(g.Int31n(int32(n))) +} + +// Uintn generates a random uint strictly less than `n`. +// Uses rand.Int63n to generate a random int64 and then casts it to uint. n MUST be > 0. +func (g *RandomGenerator) Uintn(n uint) uint { + return uint(g.Int63n(int64(n))) +} + +// Uint64InRange generates a random uint64 in the inclusive range [min, max]. +// `max` must be strictly greater than `min` or the method will panic. +func (g *RandomGenerator) Uint64InRange(min, max uint64) uint64 { + return InclusiveRange(g, min, max) +} + +// Uint32InRange generates a random uint32 in the inclusive range [min, max]. +// `max` must be strictly greater than `min` or the method will panic. +func (g *RandomGenerator) Uint32InRange(min, max uint32) uint32 { + return InclusiveRange(g, min, max) +} + +// UintInRange generates a random uint in the inclusive range [min, max]. +// `max` must be strictly greater than `min` or the method will panic. +func (g *RandomGenerator) UintInRange(min, max uint) uint { + return InclusiveRange(g, min, max) +} + +// Int64InRange generates a random int64 in the inclusive range [min, max]. +// Min and max MUST be positive numbers and `max` must be strictly greater than `min` or the method +// will panic. +func (g *RandomGenerator) Int64InRange(min, max int64) int64 { + return InclusiveRange(g, min, max) +} + +// Int32InRange generates a random int32 in the inclusive range [min, max]. +// Min and max MUST be positive numbers and `max` must be strictly greater than `min` or the method +// will panic. +func (g *RandomGenerator) Int32InRange(min, max int32) int32 { + return InclusiveRange(g, min, max) +} + +// IntInRange generates a random int in the inclusive range [min, max]. +// Min and max MUST be positive numbers and `max` must be strictly greater than `min` or the method +// will panic. +func (g *RandomGenerator) IntInRange(min, max int) int { + return InclusiveRange(g, min, max) +} + +// Bool generates a random bool. +func (g *RandomGenerator) Bool() bool { + return g.Intn(2) == 0 +} + +// InclusiveRange generates a random number of type T in the inclusive range [min, max]. +// Min and max MUST be positive numbers and `max` must be strictly greater than `min` or the method +// will panic. +func InclusiveRange[T ~uint64 | ~uint32 | ~uint | ~int64 | ~int32 | ~int](g *RandomGenerator, min, max T) T { + return min + T(g.Intn(int(max)+1-int(min))) +} + +// RandomElement selects a random element from the provided slice. +// Returns the zero value of T if the slice is empty. +func RandomElement[T any](g *RandomGenerator, slice []T) T { + if len(slice) == 0 { + var zero T + return zero + } + return slice[g.Intn(len(slice))] +} diff --git a/utils/unittest/fixtures/random_test.go b/utils/unittest/fixtures/random_test.go new file mode 100644 index 00000000000..c439085476c --- /dev/null +++ b/utils/unittest/fixtures/random_test.go @@ -0,0 +1,229 @@ +package fixtures + +import ( + "math/rand" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestRandomGenerator(t *testing.T) { + // Test with explicit seed for deterministic results + rng := rand.New(rand.NewSource(12345)) + random := NewRandomGenerator(rng) + + // Test RandomBytes + bytes := random.RandomBytes(32) + require.Len(t, bytes, 32) + assert.NotEqual(t, make([]byte, 32), bytes) // Should not be all zeros + + // Test RandomString + str := random.RandomString(16) + require.Len(t, str, 16) + assert.NotEmpty(t, str) + + // Test Uintn + uintVal := random.Uintn(100) + assert.GreaterOrEqual(t, uintVal, uint(0)) + assert.Less(t, uintVal, uint(100)) + + // Test Uint32n + uint32Val := random.Uint32n(100) + assert.GreaterOrEqual(t, uint32Val, uint32(0)) + assert.Less(t, uint32Val, uint32(100)) + + // Test Uint64n + uint64Val := random.Uint64n(100) + assert.GreaterOrEqual(t, uint64Val, uint64(0)) + assert.Less(t, uint64Val, uint64(100)) + + // Test IntInRange (positive ranges only) + intInRange := random.IntInRange(1, 50) + assert.GreaterOrEqual(t, intInRange, 1) + assert.LessOrEqual(t, intInRange, 50) + + // Test Int32InRange (positive ranges only) + int32InRange := random.Int32InRange(1, 25) + assert.GreaterOrEqual(t, int32InRange, int32(1)) + assert.LessOrEqual(t, int32InRange, int32(25)) + + // Test Int64InRange (positive ranges only) + int64InRange := random.Int64InRange(1, 100) + assert.GreaterOrEqual(t, int64InRange, int64(1)) + assert.LessOrEqual(t, int64InRange, int64(100)) + + // Test UintInRange + uintInRange := random.UintInRange(10, 90) + assert.GreaterOrEqual(t, uintInRange, uint(10)) + assert.LessOrEqual(t, uintInRange, uint(90)) + + // Test Uint32InRange + uint32InRange := random.Uint32InRange(5, 95) + assert.GreaterOrEqual(t, uint32InRange, uint32(5)) + assert.LessOrEqual(t, uint32InRange, uint32(95)) + + // Test Uint64InRange + uint64InRange := random.Uint64InRange(1, 1000) + assert.GreaterOrEqual(t, uint64InRange, uint64(1)) + assert.LessOrEqual(t, uint64InRange, uint64(1000)) +} + +func TestInclusiveRange(t *testing.T) { + rng := rand.New(rand.NewSource(12345)) + random := NewRandomGenerator(rng) + + // Test with different types (positive ranges only) + tests := []struct { + name string + min any + max any + }{ + {"int", 1, 100}, + {"int32", int32(1), int32(100)}, + {"int64", int64(1), int64(100)}, + {"uint", uint(1), uint(100)}, + {"uint32", uint32(1), uint32(100)}, + {"uint64", uint64(1), uint64(100)}, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + switch min := tt.min.(type) { + case int: + max := tt.max.(int) + result := InclusiveRange(random, min, max) + assert.GreaterOrEqual(t, result, min) + assert.LessOrEqual(t, result, max) + case int32: + max := tt.max.(int32) + result := InclusiveRange(random, min, max) + assert.GreaterOrEqual(t, result, min) + assert.LessOrEqual(t, result, max) + case int64: + max := tt.max.(int64) + result := InclusiveRange(random, min, max) + assert.GreaterOrEqual(t, result, min) + assert.LessOrEqual(t, result, max) + case uint: + max := tt.max.(uint) + result := InclusiveRange(random, min, max) + assert.GreaterOrEqual(t, result, min) + assert.LessOrEqual(t, result, max) + case uint32: + max := tt.max.(uint32) + result := InclusiveRange(random, min, max) + assert.GreaterOrEqual(t, result, min) + assert.LessOrEqual(t, result, max) + case uint64: + max := tt.max.(uint64) + result := InclusiveRange(random, min, max) + assert.GreaterOrEqual(t, result, min) + assert.LessOrEqual(t, result, max) + } + }) + } +} + +func TestRandomElement(t *testing.T) { + rng := rand.New(rand.NewSource(12345)) + random := NewRandomGenerator(rng) + + // Test with string slice + stringSlice := []string{"apple", "banana", "cherry", "date"} + element := RandomElement(random, stringSlice) + assert.Contains(t, stringSlice, element) + + // Test with int slice + intSlice := []int{1, 2, 3, 4, 5} + intElement := RandomElement(random, intSlice) + assert.Contains(t, intSlice, intElement) + + // Test with empty slice + emptySlice := []string{} + emptyElement := RandomElement(random, emptySlice) + assert.Empty(t, emptyElement) + + // Test with single element slice + singleSlice := []string{"only"} + singleElement := RandomElement(random, singleSlice) + assert.Equal(t, "only", singleElement) +} + +func TestRandomGeneratorDeterministic(t *testing.T) { + // Test that generators produce same results with same seed + rng1 := rand.New(rand.NewSource(42)) + rng2 := rand.New(rand.NewSource(42)) + random1 := NewRandomGenerator(rng1) + random2 := NewRandomGenerator(rng2) + + const max = 100_000_000 + + // Test all methods produce same results + assert.Equal(t, random1.RandomBytes(32), random2.RandomBytes(32)) + assert.Equal(t, random1.RandomString(32), random2.RandomString(32)) + assert.Equal(t, random1.Uintn(max), random2.Uintn(max)) + assert.Equal(t, random1.Uint32n(max), random2.Uint32n(max)) + assert.Equal(t, random1.Uint64n(max), random2.Uint64n(max)) + assert.Equal(t, random1.IntInRange(1, max), random2.IntInRange(1, max)) + assert.Equal(t, random1.Int32InRange(1, max), random2.Int32InRange(1, max)) + assert.Equal(t, random1.Int64InRange(1, max), random2.Int64InRange(1, max)) + assert.Equal(t, random1.UintInRange(1, max), random2.UintInRange(1, max)) + assert.Equal(t, random1.Uint32InRange(1, max), random2.Uint32InRange(1, max)) + assert.Equal(t, random1.Uint64InRange(1, max), random2.Uint64InRange(1, max)) + + // Test InclusiveRange + assert.Equal(t, InclusiveRange(random1, 1, max), InclusiveRange(random2, 1, max)) + assert.Equal(t, InclusiveRange(random1, uint32(1), max), InclusiveRange(random2, uint32(1), max)) + + // Test RandomElement + slice := []string{"apple", "banana", "cherry"} + assert.Equal(t, RandomElement(random1, slice), RandomElement(random2, slice)) +} + +func TestRandomGeneratorDifferentSeeds(t *testing.T) { + // Test that generators produce different results with different seeds + rng1 := rand.New(rand.NewSource(42)) + rng2 := rand.New(rand.NewSource(123)) + random1 := NewRandomGenerator(rng1) + random2 := NewRandomGenerator(rng2) + + // Test that results are different (very high probability) + assert.NotEqual(t, random1.RandomBytes(32), random2.RandomBytes(32)) + assert.NotEqual(t, random1.RandomString(32), random2.RandomString(32)) + assert.NotEqual(t, random1.Uintn(100_000_000), random2.Uintn(100_000_000)) + assert.NotEqual(t, random1.Uint32n(100_000_000), random2.Uint32n(100_000_000)) + assert.NotEqual(t, random1.Uint64n(100_000_000), random2.Uint64n(100_000_000)) +} + +func TestRandomGeneratorEdgeCases(t *testing.T) { + rng := rand.New(rand.NewSource(12345)) + random := NewRandomGenerator(rng) + + // Test with single value range (positive only, as per current implementation) + singleInt := random.IntInRange(5, 5) + assert.Equal(t, 5, singleInt) + + singleUint := random.UintInRange(10, 10) + assert.Equal(t, uint(10), singleUint) + + // Test with positive ranges only (as per current implementation constraints) + positiveRange := random.IntInRange(1, 10) + assert.GreaterOrEqual(t, positiveRange, 1) + assert.LessOrEqual(t, positiveRange, 10) + + // Test with positive range for int32 + positiveRange32 := random.Int32InRange(5, 15) + assert.GreaterOrEqual(t, positiveRange32, int32(5)) + assert.LessOrEqual(t, positiveRange32, int32(15)) + + // Test with positive range for int64 + positiveRange64 := random.Int64InRange(10, 50) + assert.GreaterOrEqual(t, positiveRange64, int64(10)) + assert.LessOrEqual(t, positiveRange64, int64(50)) + + // Test with positive range for unsigned types + positiveUintRange := random.UintInRange(5, 25) + assert.GreaterOrEqual(t, positiveUintRange, uint(5)) + assert.LessOrEqual(t, positiveUintRange, uint(25)) +} diff --git a/utils/unittest/fixtures/register_entry.go b/utils/unittest/fixtures/register_entry.go new file mode 100644 index 00000000000..c082c1bf112 --- /dev/null +++ b/utils/unittest/fixtures/register_entry.go @@ -0,0 +1,80 @@ +package fixtures + +import ( + "github.com/onflow/flow-go/ledger" + "github.com/onflow/flow-go/ledger/common/convert" + "github.com/onflow/flow-go/model/flow" +) + +// RegisterEntry is the default options factory for [flow.RegisterEntry] generation. +var RegisterEntry registerEntryFactory + +type registerEntryFactory struct{} + +type RegisterEntryOption func(*RegisterEntryGenerator, *flow.RegisterEntry) + +func (f registerEntryFactory) WithKey(key flow.RegisterID) RegisterEntryOption { + return func(g *RegisterEntryGenerator, entry *flow.RegisterEntry) { + entry.Key = key + } +} + +func (f registerEntryFactory) WithValue(value flow.RegisterValue) RegisterEntryOption { + return func(g *RegisterEntryGenerator, entry *flow.RegisterEntry) { + entry.Value = value + } +} + +func (f registerEntryFactory) WithPayload(payload *ledger.Payload) RegisterEntryOption { + return func(g *RegisterEntryGenerator, entry *flow.RegisterEntry) { + key, value, err := convert.PayloadToRegister(payload) + NoError(err) + + entry.Key = key + entry.Value = value + } +} + +type RegisterEntryGenerator struct { + registerEntryFactory + + random *RandomGenerator + ledgerPayloads *LedgerPayloadGenerator +} + +func NewRegisterEntryGenerator( + random *RandomGenerator, + ledgerPayloads *LedgerPayloadGenerator, +) *RegisterEntryGenerator { + return &RegisterEntryGenerator{ + random: random, + ledgerPayloads: ledgerPayloads, + } +} + +// Fixture generates a [flow.RegisterEntry] with random data based on the provided options. +func (g *RegisterEntryGenerator) Fixture(opts ...RegisterEntryOption) flow.RegisterEntry { + payload := g.ledgerPayloads.Fixture() + key, value, err := convert.PayloadToRegister(payload) + NoError(err) + + entry := flow.RegisterEntry{ + Key: key, + Value: value, + } + + for _, opt := range opts { + opt(g, &entry) + } + + return entry +} + +// List generates a list of [flow.RegisterEntry]. +func (g *RegisterEntryGenerator) List(n int, opts ...RegisterEntryOption) []flow.RegisterEntry { + entries := make([]flow.RegisterEntry, n) + for i := range n { + entries[i] = g.Fixture(opts...) + } + return entries +} diff --git a/utils/unittest/fixtures/seal.go b/utils/unittest/fixtures/seal.go new file mode 100644 index 00000000000..cfeb797a4e2 --- /dev/null +++ b/utils/unittest/fixtures/seal.go @@ -0,0 +1,89 @@ +package fixtures + +import ( + "github.com/onflow/flow-go/model/flow" +) + +// Seal is the default options factory for [flow.Seal] generation. +var Seal sealFactory + +type sealFactory struct{} + +type SealOption func(*SealGenerator, *flow.Seal) + +// WithBlockID is an option that sets the `BlockID` of the seal. +func (f sealFactory) WithBlockID(blockID flow.Identifier) SealOption { + return func(g *SealGenerator, seal *flow.Seal) { + seal.BlockID = blockID + } +} + +// WithResultID is an option that sets the `ResultID` of the seal. +func (f sealFactory) WithResultID(resultID flow.Identifier) SealOption { + return func(g *SealGenerator, seal *flow.Seal) { + seal.ResultID = resultID + } +} + +// WithFinalState is an option that sets the `FinalState` of the seal. +func (f sealFactory) WithFinalState(finalState flow.StateCommitment) SealOption { + return func(g *SealGenerator, seal *flow.Seal) { + seal.FinalState = finalState + } +} + +// WithAggregatedApprovalSigs is an option that sets the `AggregatedApprovalSigs` of the seal. +func (f sealFactory) WithAggregatedApprovalSigs(sigs ...flow.AggregatedSignature) SealOption { + return func(g *SealGenerator, seal *flow.Seal) { + seal.AggregatedApprovalSigs = sigs + } +} + +// SealGenerator generates seals with consistent randomness. +type SealGenerator struct { + sealFactory + + random *RandomGenerator + identifiers *IdentifierGenerator + stateCommitments *StateCommitmentGenerator + aggregatedSignatures *AggregatedSignatureGenerator +} + +func NewSealGenerator( + random *RandomGenerator, + identifiers *IdentifierGenerator, + stateCommitments *StateCommitmentGenerator, + aggregatedSignatures *AggregatedSignatureGenerator, +) *SealGenerator { + return &SealGenerator{ + random: random, + identifiers: identifiers, + stateCommitments: stateCommitments, + aggregatedSignatures: aggregatedSignatures, + } +} + +// Fixture generates a [flow.Seal] with random data based on the provided options. +func (g *SealGenerator) Fixture(opts ...SealOption) *flow.Seal { + seal := &flow.Seal{ + BlockID: g.identifiers.Fixture(), + ResultID: g.identifiers.Fixture(), + FinalState: g.stateCommitments.Fixture(), + AggregatedApprovalSigs: g.aggregatedSignatures.List(g.random.IntInRange(1, 4)), + } + + for _, opt := range opts { + opt(g, seal) + } + + return seal +} + +// List generates a list of [flow.Seal] with random data. +func (g *SealGenerator) List(n int, opts ...SealOption) []*flow.Seal { + seals := make([]*flow.Seal, 0, n) + for range n { + seals = append(seals, g.Fixture(opts...)) + } + return seals +} diff --git a/utils/unittest/fixtures/service_event.go b/utils/unittest/fixtures/service_event.go new file mode 100644 index 00000000000..1bda29e3a05 --- /dev/null +++ b/utils/unittest/fixtures/service_event.go @@ -0,0 +1,120 @@ +package fixtures + +import ( + "github.com/onflow/flow-go/model/flow" +) + +// ServiceEvent is the default options factory for [flow.ServiceEvent] generation. +var ServiceEvent serviceEventFactory + +type serviceEventFactory struct{} + +type ServiceEventOption func(*ServiceEventGenerator, *flow.ServiceEvent) + +// WithType is an option that sets the `Type` of the service event. +func (f serviceEventFactory) WithType(eventType flow.ServiceEventType) ServiceEventOption { + return func(g *ServiceEventGenerator, event *flow.ServiceEvent) { + event.Type = eventType + } +} + +// WithEvent is an option that sets the `Event` data of the service event. +func (f serviceEventFactory) WithEvent(eventData interface{}) ServiceEventOption { + return func(g *ServiceEventGenerator, event *flow.ServiceEvent) { + event.Event = eventData + } +} + +// ServiceEventGenerator generates service events with consistent randomness. +type ServiceEventGenerator struct { + serviceEventFactory + + random *RandomGenerator + epochSetups *EpochSetupGenerator + epochCommits *EpochCommitGenerator + epochRecovers *EpochRecoverGenerator + versionBeacons *VersionBeaconGenerator + protocolStateVersionUpgrades *ProtocolStateVersionUpgradeGenerator + setEpochExtensionViewCounts *SetEpochExtensionViewCountGenerator + ejectNodes *EjectNodeGenerator +} + +// NewServiceEventGenerator creates a new ServiceEventGenerator. +func NewServiceEventGenerator( + random *RandomGenerator, + epochSetups *EpochSetupGenerator, + epochCommits *EpochCommitGenerator, + epochRecovers *EpochRecoverGenerator, + versionBeacons *VersionBeaconGenerator, + protocolStateVersionUpgrades *ProtocolStateVersionUpgradeGenerator, + setEpochExtensionViewCounts *SetEpochExtensionViewCountGenerator, + ejectNodes *EjectNodeGenerator, +) *ServiceEventGenerator { + return &ServiceEventGenerator{ + random: random, + epochSetups: epochSetups, + epochCommits: epochCommits, + epochRecovers: epochRecovers, + versionBeacons: versionBeacons, + protocolStateVersionUpgrades: protocolStateVersionUpgrades, + setEpochExtensionViewCounts: setEpochExtensionViewCounts, + ejectNodes: ejectNodes, + } +} + +// Fixture generates a [flow.ServiceEvent] with random data based on the provided options. +func (g *ServiceEventGenerator) Fixture(opts ...ServiceEventOption) flow.ServiceEvent { + // Define available service event types + eventTypes := []flow.ServiceEventType{ + flow.ServiceEventSetup, + flow.ServiceEventCommit, + flow.ServiceEventRecover, + flow.ServiceEventVersionBeacon, + flow.ServiceEventProtocolStateVersionUpgrade, + flow.ServiceEventSetEpochExtensionViewCount, + flow.ServiceEventEjectNode, + } + + // Pick a random event type + eventType := RandomElement(g.random, eventTypes) + + event := flow.ServiceEvent{ + Type: eventType, + } + + // Apply options first (may change the type) + for _, opt := range opts { + opt(g, &event) + } + + // Generate event data based on the final type using specific generators + switch event.Type { + case flow.ServiceEventSetup: + event.Event = g.epochSetups.Fixture() + case flow.ServiceEventCommit: + event.Event = g.epochCommits.Fixture() + case flow.ServiceEventRecover: + event.Event = g.epochRecovers.Fixture() + case flow.ServiceEventVersionBeacon: + event.Event = g.versionBeacons.Fixture() + case flow.ServiceEventProtocolStateVersionUpgrade: + event.Event = g.protocolStateVersionUpgrades.Fixture() + case flow.ServiceEventSetEpochExtensionViewCount: + event.Event = g.setEpochExtensionViewCounts.Fixture() + case flow.ServiceEventEjectNode: + event.Event = g.ejectNodes.Fixture() + default: + Assertf(false, "unexpected service event type: %s", event.Type) + } + + return event +} + +// List generates a list of [flow.ServiceEvent]. +func (g *ServiceEventGenerator) List(n int, opts ...ServiceEventOption) []flow.ServiceEvent { + events := make([]flow.ServiceEvent, n) + for i := range n { + events[i] = g.Fixture(opts...) + } + return events +} diff --git a/utils/unittest/fixtures/service_event_eject_node.go b/utils/unittest/fixtures/service_event_eject_node.go new file mode 100644 index 00000000000..8a217e13c72 --- /dev/null +++ b/utils/unittest/fixtures/service_event_eject_node.go @@ -0,0 +1,55 @@ +package fixtures + +import "github.com/onflow/flow-go/model/flow" + +// EjectNode is the default options factory for [flow.EjectNode] generation. +var EjectNode ejectNodeFactory + +type ejectNodeFactory struct{} + +type EjectNodeOption func(*EjectNodeGenerator, *flow.EjectNode) + +// WithNodeID is an option that sets the `NodeID` of the node to be ejected. +func (f ejectNodeFactory) WithNodeID(nodeID flow.Identifier) EjectNodeOption { + return func(g *EjectNodeGenerator, eject *flow.EjectNode) { + eject.NodeID = nodeID + } +} + +// EjectNodeGenerator generates node ejection events with consistent randomness. +type EjectNodeGenerator struct { + ejectNodeFactory + + identifiers *IdentifierGenerator +} + +// NewEjectNodeGenerator creates a new EjectNodeGenerator. +func NewEjectNodeGenerator( + identifiers *IdentifierGenerator, +) *EjectNodeGenerator { + return &EjectNodeGenerator{ + identifiers: identifiers, + } +} + +// Fixture generates a [flow.EjectNode] with random data based on the provided options. +func (g *EjectNodeGenerator) Fixture(opts ...EjectNodeOption) *flow.EjectNode { + eject := &flow.EjectNode{ + NodeID: g.identifiers.Fixture(), + } + + for _, opt := range opts { + opt(g, eject) + } + + return eject +} + +// List generates a list of [flow.EjectNode]. +func (g *EjectNodeGenerator) List(n int, opts ...EjectNodeOption) []*flow.EjectNode { + ejects := make([]*flow.EjectNode, n) + for i := range n { + ejects[i] = g.Fixture(opts...) + } + return ejects +} diff --git a/utils/unittest/fixtures/service_event_epoch_commit.go b/utils/unittest/fixtures/service_event_epoch_commit.go new file mode 100644 index 00000000000..59b0a941d7c --- /dev/null +++ b/utils/unittest/fixtures/service_event_epoch_commit.go @@ -0,0 +1,106 @@ +package fixtures + +import ( + "github.com/onflow/crypto" + + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/model/flow/filter" +) + +// EpochCommit is the default options factory for [flow.EpochCommit] generation. +var EpochCommit epochCommitFactory + +type epochCommitFactory struct{} + +type EpochCommitOption func(*EpochCommitGenerator, *flow.EpochCommit) + +// WithCounter is an option that sets the `Counter` of the epoch commit. +func (f epochCommitFactory) WithCounter(counter uint64) EpochCommitOption { + return func(g *EpochCommitGenerator, commit *flow.EpochCommit) { + commit.Counter = counter + } +} + +// WithClusterQCs is an option that sets the `ClusterQCs` of the epoch commit. +func (f epochCommitFactory) WithClusterQCs(qcs ...flow.ClusterQCVoteData) EpochCommitOption { + return func(g *EpochCommitGenerator, commit *flow.EpochCommit) { + commit.ClusterQCs = qcs + } +} + +// WithClusterQCsFromAssignments is an option that sets the `ClusterQCs` of the epoch commit from +// the assignments. +func (f epochCommitFactory) WithClusterQCsFromAssignments(assignments flow.AssignmentList) EpochCommitOption { + return func(g *EpochCommitGenerator, commit *flow.EpochCommit) { + qcs := make([]*flow.QuorumCertificateWithSignerIDs, 0, len(assignments)) + for _, nodes := range assignments { + qc := g.quorumCerts.Fixture(QuorumCertificateWithSignerIDs.WithSignerIDs(nodes)) + qcs = append(qcs, qc) + } + commit.ClusterQCs = flow.ClusterQCVoteDatasFromQCs(qcs) + } +} + +// WithDKGFromParticipants is an option that sets the `DKGGroupKey` and `DKGParticipantKeys` of the +// epoch commit from the participants. +func (f epochCommitFactory) WithDKGFromParticipants(participants flow.IdentitySkeletonList) EpochCommitOption { + return func(g *EpochCommitGenerator, commit *flow.EpochCommit) { + dkgParticipants := participants.Filter(filter.IsConsensusCommitteeMember).Sort(flow.Canonical[flow.IdentitySkeleton]) + commit.DKGParticipantKeys = nil + commit.DKGIndexMap = make(flow.DKGIndexMap) + commit.DKGParticipantKeys = g.cryptoGen.PublicKeys(len(dkgParticipants), crypto.BLSBLS12381) + for index, nodeID := range dkgParticipants.NodeIDs() { + commit.DKGIndexMap[nodeID] = index + } + } +} + +// EpochCommitGenerator generates epoch commit events with consistent randomness. +type EpochCommitGenerator struct { + epochCommitFactory + + random *RandomGenerator + cryptoGen *CryptoGenerator + identifiers *IdentifierGenerator + quorumCerts *QuorumCertificateWithSignerIDsGenerator +} + +// NewEpochCommitGenerator creates a new EpochCommitGenerator. +func NewEpochCommitGenerator( + random *RandomGenerator, + cryptoGen *CryptoGenerator, + identifiers *IdentifierGenerator, + quorumCerts *QuorumCertificateWithSignerIDsGenerator, +) *EpochCommitGenerator { + return &EpochCommitGenerator{ + random: random, + cryptoGen: cryptoGen, + identifiers: identifiers, + quorumCerts: quorumCerts, + } +} + +// Fixture generates a [flow.EpochCommit] with random data based on the provided options. +func (g *EpochCommitGenerator) Fixture(opts ...EpochCommitOption) *flow.EpochCommit { + commit := &flow.EpochCommit{ + Counter: uint64(g.random.Uint32()), + ClusterQCs: flow.ClusterQCVoteDatasFromQCs(g.quorumCerts.List(1)), + DKGGroupKey: g.cryptoGen.PrivateKey(crypto.BLSBLS12381).PublicKey(), + DKGParticipantKeys: g.cryptoGen.PublicKeys(2, crypto.BLSBLS12381), + } + + for _, opt := range opts { + opt(g, commit) + } + + return commit +} + +// List generates a list of [flow.EpochCommit]. +func (g *EpochCommitGenerator) List(n int, opts ...EpochCommitOption) []*flow.EpochCommit { + commits := make([]*flow.EpochCommit, n) + for i := range n { + commits[i] = g.Fixture(opts...) + } + return commits +} diff --git a/utils/unittest/fixtures/service_event_epoch_recover.go b/utils/unittest/fixtures/service_event_epoch_recover.go new file mode 100644 index 00000000000..b1ea59b1aa7 --- /dev/null +++ b/utils/unittest/fixtures/service_event_epoch_recover.go @@ -0,0 +1,79 @@ +package fixtures + +import "github.com/onflow/flow-go/model/flow" + +// EpochRecover is the default options factory for [flow.EpochRecover] generation. +var EpochRecover epochRecoverFactory + +type epochRecoverFactory struct{} + +type EpochRecoverOption func(*EpochRecoverGenerator, *flow.EpochRecover) + +// WithEpochSetup is an option that sets the `EpochSetup` of the recover event. +func (f epochRecoverFactory) WithEpochSetup(setup flow.EpochSetup) EpochRecoverOption { + return func(g *EpochRecoverGenerator, recover *flow.EpochRecover) { + recover.EpochSetup = setup + } +} + +// WithEpochCommit is an option that sets the `EpochCommit` of the recover event. +func (f epochRecoverFactory) WithEpochCommit(commit flow.EpochCommit) EpochRecoverOption { + return func(g *EpochRecoverGenerator, recover *flow.EpochRecover) { + recover.EpochCommit = commit + } +} + +// EpochRecoverGenerator generates epoch recovery events with consistent randomness. +type EpochRecoverGenerator struct { + epochRecoverFactory + + random *RandomGenerator + epochSetups *EpochSetupGenerator + epochCommits *EpochCommitGenerator +} + +// NewEpochRecoverGenerator creates a new EpochRecoverGenerator. +func NewEpochRecoverGenerator( + random *RandomGenerator, + epochSetups *EpochSetupGenerator, + epochCommits *EpochCommitGenerator, +) *EpochRecoverGenerator { + return &EpochRecoverGenerator{ + random: random, + epochSetups: epochSetups, + epochCommits: epochCommits, + } +} + +// Fixture generates a [flow.EpochRecover] with random data based on the provided options. +func (g *EpochRecoverGenerator) Fixture(opts ...EpochRecoverOption) *flow.EpochRecover { + counter := g.random.Uint64InRange(1, 1000) + + // Generate compatible EpochSetup and EpochCommit with the same counter + setup := g.epochSetups.Fixture(EpochSetup.WithCounter(counter)) + commit := g.epochCommits.Fixture( + EpochCommit.WithCounter(counter), + EpochCommit.WithDKGFromParticipants(setup.Participants), + EpochCommit.WithClusterQCsFromAssignments(setup.Assignments), + ) + + recover := &flow.EpochRecover{ + EpochSetup: *setup, + EpochCommit: *commit, + } + + for _, opt := range opts { + opt(g, recover) + } + + return recover +} + +// List generates a list of [flow.EpochRecover]. +func (g *EpochRecoverGenerator) List(n int, opts ...EpochRecoverOption) []*flow.EpochRecover { + recovers := make([]*flow.EpochRecover, n) + for i := range n { + recovers[i] = g.Fixture(opts...) + } + return recovers +} diff --git a/utils/unittest/fixtures/service_event_epoch_setup.go b/utils/unittest/fixtures/service_event_epoch_setup.go new file mode 100644 index 00000000000..127dac376ee --- /dev/null +++ b/utils/unittest/fixtures/service_event_epoch_setup.go @@ -0,0 +1,144 @@ +package fixtures + +import ( + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/utils/unittest" +) + +// EpochSetup is the default options factory for [flow.EpochSetup] generation. +var EpochSetup epochSetupFactory + +type epochSetupFactory struct{} + +type EpochSetupOption func(*EpochSetupGenerator, *flow.EpochSetup) + +// WithCounter is an option that sets the `Counter` of the epoch setup. +func (f epochSetupFactory) WithCounter(counter uint64) EpochSetupOption { + return func(g *EpochSetupGenerator, setup *flow.EpochSetup) { + setup.Counter = counter + } +} + +// WithFirstView is an option that sets the `FirstView` of the epoch setup. +func (f epochSetupFactory) WithFirstView(view uint64) EpochSetupOption { + return func(g *EpochSetupGenerator, setup *flow.EpochSetup) { + setup.FirstView = view + } +} + +// WithFinalView is an option that sets the `FinalView` of the epoch setup. +func (f epochSetupFactory) WithFinalView(view uint64) EpochSetupOption { + return func(g *EpochSetupGenerator, setup *flow.EpochSetup) { + setup.FinalView = view + } +} + +// WithParticipants is an option that sets the `Participants` of the epoch setup. +func (f epochSetupFactory) WithParticipants(participants flow.IdentitySkeletonList) EpochSetupOption { + return func(g *EpochSetupGenerator, setup *flow.EpochSetup) { + setup.Participants = participants + setup.Assignments = unittest.ClusterAssignment(1, participants) + } +} + +// WithRandomSource is an option that sets the `RandomSource` of the epoch setup. +func (f epochSetupFactory) WithRandomSource(source []byte) EpochSetupOption { + return func(g *EpochSetupGenerator, setup *flow.EpochSetup) { + setup.RandomSource = source + } +} + +// WithDKGFinalViews is an option that sets the `DKGPhase1FinalView`, `DKGPhase2FinalView`, and `DKGPhase3FinalView` of the epoch setup. +func (f epochSetupFactory) WithDKGFinalViews(phase1, phase2, phase3 uint64) EpochSetupOption { + return func(g *EpochSetupGenerator, setup *flow.EpochSetup) { + setup.DKGPhase1FinalView = phase1 + setup.DKGPhase2FinalView = phase2 + setup.DKGPhase3FinalView = phase3 + } +} + +// WithTargetDuration is an option that sets the `TargetDuration` of the epoch setup. +func (f epochSetupFactory) WithTargetDuration(duration uint64) EpochSetupOption { + return func(g *EpochSetupGenerator, setup *flow.EpochSetup) { + setup.TargetDuration = duration + } +} + +// WithTargetEndTime is an option that sets the `TargetEndTime` of the epoch setup. +func (f epochSetupFactory) WithTargetEndTime(endTime uint64) EpochSetupOption { + return func(g *EpochSetupGenerator, setup *flow.EpochSetup) { + setup.TargetEndTime = endTime + } +} + +// WithAssignments is an option that sets the `Assignments` of the epoch setup. +func (f epochSetupFactory) WithAssignments(assignments flow.AssignmentList) EpochSetupOption { + return func(g *EpochSetupGenerator, setup *flow.EpochSetup) { + setup.Assignments = assignments + } +} + +// EpochSetupGenerator generates epoch setup events with consistent randomness. +type EpochSetupGenerator struct { + epochSetupFactory + + random *RandomGenerator + timeGen *TimeGenerator + identities *IdentityGenerator +} + +// NewEpochSetupGenerator creates a new EpochSetupGenerator. +func NewEpochSetupGenerator(random *RandomGenerator, timeGen *TimeGenerator, identities *IdentityGenerator) *EpochSetupGenerator { + return &EpochSetupGenerator{ + random: random, + timeGen: timeGen, + identities: identities, + } +} + +// Fixture generates a [flow.EpochSetup] with random data based on the provided options. +func (g *EpochSetupGenerator) Fixture(opts ...EpochSetupOption) *flow.EpochSetup { + baseView := uint64(g.random.Uint32()) + finalView := uint64(g.random.Uint32()) + if finalView < baseView { + finalView = baseView + } + if finalView < baseView+1000 { + finalView = baseView + 1000 + } + + participants := g.identities.List(5, g.identities.WithAllRoles()) + participants = participants.Sort(flow.Canonical[flow.Identity]) + + setup := &flow.EpochSetup{ + Counter: uint64(g.random.Uint32()), + FirstView: baseView, + FinalView: finalView, + DKGPhase1FinalView: baseView + 100, + DKGPhase2FinalView: baseView + 200, + DKGPhase3FinalView: baseView + 300, + TargetDuration: 60 * 60, + TargetEndTime: uint64(g.timeGen.Fixture().UnixMilli()), + Participants: participants.ToSkeleton(), + RandomSource: g.random.RandomBytes(flow.EpochSetupRandomSourceLength), + } + + for _, opt := range opts { + opt(g, setup) + } + + if setup.Assignments == nil { + setup.Assignments = unittest.ClusterAssignment(1, setup.Participants) + } + + return setup +} + +// List generates a list of [flow.EpochSetup]. +func (g *EpochSetupGenerator) List(n int, opts ...EpochSetupOption) []*flow.EpochSetup { + setups := make([]*flow.EpochSetup, n) + for i := range n { + setups[i] = g.Fixture(opts...) + } + return setups +} diff --git a/utils/unittest/fixtures/service_event_protocol_state_version_upgrade.go b/utils/unittest/fixtures/service_event_protocol_state_version_upgrade.go new file mode 100644 index 00000000000..d5411c17760 --- /dev/null +++ b/utils/unittest/fixtures/service_event_protocol_state_version_upgrade.go @@ -0,0 +1,61 @@ +package fixtures + +import "github.com/onflow/flow-go/model/flow" + +// ProtocolStateVersionUpgrade is the default options factory for [flow.ProtocolStateVersionUpgrade] generation. +var ProtocolStateVersionUpgrade protocolStateVersionUpgradeFactory + +type protocolStateVersionUpgradeFactory struct{} + +type ProtocolStateVersionUpgradeOption func(*ProtocolStateVersionUpgradeGenerator, *flow.ProtocolStateVersionUpgrade) + +// WithNewProtocolStateVersion is an option that sets the `NewProtocolStateVersion` of the upgrade. +func (f protocolStateVersionUpgradeFactory) WithNewProtocolStateVersion(version uint64) ProtocolStateVersionUpgradeOption { + return func(g *ProtocolStateVersionUpgradeGenerator, upgrade *flow.ProtocolStateVersionUpgrade) { + upgrade.NewProtocolStateVersion = version + } +} + +// WithActiveView is an option that sets the `ActiveView` of the upgrade. +func (f protocolStateVersionUpgradeFactory) WithActiveView(view uint64) ProtocolStateVersionUpgradeOption { + return func(g *ProtocolStateVersionUpgradeGenerator, upgrade *flow.ProtocolStateVersionUpgrade) { + upgrade.ActiveView = view + } +} + +// ProtocolStateVersionUpgradeGenerator generates protocol state version upgrades with consistent randomness. +type ProtocolStateVersionUpgradeGenerator struct { + protocolStateVersionUpgradeFactory + + random *RandomGenerator +} + +// NewProtocolStateVersionUpgradeGenerator creates a new ProtocolStateVersionUpgradeGenerator. +func NewProtocolStateVersionUpgradeGenerator(random *RandomGenerator) *ProtocolStateVersionUpgradeGenerator { + return &ProtocolStateVersionUpgradeGenerator{ + random: random, + } +} + +// Fixture generates a [flow.ProtocolStateVersionUpgrade] with random data based on the provided options. +func (g *ProtocolStateVersionUpgradeGenerator) Fixture(opts ...ProtocolStateVersionUpgradeOption) *flow.ProtocolStateVersionUpgrade { + upgrade := &flow.ProtocolStateVersionUpgrade{ + NewProtocolStateVersion: g.random.Uint64(), + ActiveView: g.random.Uint64(), + } + + for _, opt := range opts { + opt(g, upgrade) + } + + return upgrade +} + +// List generates a list of [flow.ProtocolStateVersionUpgrade]. +func (g *ProtocolStateVersionUpgradeGenerator) List(n int, opts ...ProtocolStateVersionUpgradeOption) []*flow.ProtocolStateVersionUpgrade { + upgrades := make([]*flow.ProtocolStateVersionUpgrade, n) + for i := range n { + upgrades[i] = g.Fixture(opts...) + } + return upgrades +} diff --git a/utils/unittest/fixtures/service_event_set_epoch_extension_view_count.go b/utils/unittest/fixtures/service_event_set_epoch_extension_view_count.go new file mode 100644 index 00000000000..f4dda8cbdd3 --- /dev/null +++ b/utils/unittest/fixtures/service_event_set_epoch_extension_view_count.go @@ -0,0 +1,53 @@ +package fixtures + +import "github.com/onflow/flow-go/model/flow" + +// SetEpochExtensionViewCount is the default options factory for [flow.SetEpochExtensionViewCount] generation. +var SetEpochExtensionViewCount setEpochExtensionViewCountFactory + +type setEpochExtensionViewCountFactory struct{} + +type SetEpochExtensionViewCountOption func(*SetEpochExtensionViewCountGenerator, *flow.SetEpochExtensionViewCount) + +// WithValue is an option that sets the `Value` of the extension view count. +func (f setEpochExtensionViewCountFactory) WithValue(value uint64) SetEpochExtensionViewCountOption { + return func(g *SetEpochExtensionViewCountGenerator, extension *flow.SetEpochExtensionViewCount) { + extension.Value = value + } +} + +// SetEpochExtensionViewCountGenerator generates epoch extension view count events with consistent randomness. +type SetEpochExtensionViewCountGenerator struct { + setEpochExtensionViewCountFactory + + random *RandomGenerator +} + +// NewSetEpochExtensionViewCountGenerator creates a new SetEpochExtensionViewCountGenerator. +func NewSetEpochExtensionViewCountGenerator(random *RandomGenerator) *SetEpochExtensionViewCountGenerator { + return &SetEpochExtensionViewCountGenerator{ + random: random, + } +} + +// Fixture generates a [flow.SetEpochExtensionViewCount] with random data based on the provided options. +func (g *SetEpochExtensionViewCountGenerator) Fixture(opts ...SetEpochExtensionViewCountOption) *flow.SetEpochExtensionViewCount { + extension := &flow.SetEpochExtensionViewCount{ + Value: g.random.Uint64InRange(200, 10000), + } + + for _, opt := range opts { + opt(g, extension) + } + + return extension +} + +// List generates a list of [flow.SetEpochExtensionViewCount]. +func (g *SetEpochExtensionViewCountGenerator) List(n int, opts ...SetEpochExtensionViewCountOption) []*flow.SetEpochExtensionViewCount { + extensions := make([]*flow.SetEpochExtensionViewCount, n) + for i := range n { + extensions[i] = g.Fixture(opts...) + } + return extensions +} diff --git a/utils/unittest/fixtures/service_event_version_beacon.go b/utils/unittest/fixtures/service_event_version_beacon.go new file mode 100644 index 00000000000..7cc4a2640da --- /dev/null +++ b/utils/unittest/fixtures/service_event_version_beacon.go @@ -0,0 +1,98 @@ +package fixtures + +import ( + "fmt" + + "github.com/coreos/go-semver/semver" + + "github.com/onflow/flow-go/model/flow" +) + +// VersionBeacon is the default options factory for [flow.VersionBeacon] generation. +var VersionBeacon versionBeaconFactory + +type versionBeaconFactory struct{} + +type VersionBeaconOption func(*VersionBeaconGenerator, *flow.VersionBeacon) + +// WithSequence is an option that sets the `Sequence` of the version beacon. +func (f versionBeaconFactory) WithSequence(sequence uint64) VersionBeaconOption { + return func(g *VersionBeaconGenerator, vb *flow.VersionBeacon) { + vb.Sequence = sequence + } +} + +// WithBoundaries is an option that sets the `VersionBoundaries` of the version beacon. +func (f versionBeaconFactory) WithBoundaries(boundaries ...flow.VersionBoundary) VersionBeaconOption { + return func(g *VersionBeaconGenerator, vb *flow.VersionBeacon) { + vb.VersionBoundaries = boundaries + } +} + +// VersionBeaconGenerator generates version beacons with consistent randomness. +type VersionBeaconGenerator struct { + versionBeaconFactory + + random *RandomGenerator +} + +// NewVersionBeaconGenerator creates a new VersionBeaconGenerator. +func NewVersionBeaconGenerator(random *RandomGenerator) *VersionBeaconGenerator { + return &VersionBeaconGenerator{ + random: random, + } +} + +// Fixture generates a [flow.VersionBeacon] with random data based on the provided options. +func (g *VersionBeaconGenerator) Fixture(opts ...VersionBeaconOption) *flow.VersionBeacon { + numBoundaries := g.random.IntInRange(1, 4) + + height := g.random.Uint64InRange(0, 1000) + version := g.generateRandomVersion() + + boundaries := make([]flow.VersionBoundary, numBoundaries) + for i := range numBoundaries { + boundaries[i] = flow.VersionBoundary{ + Version: version.String(), + BlockHeight: height, + } + + // increment the version so boundaries are always increasing + if i%3 == 0 { + version.BumpMajor() + } else if i%2 == 0 { + version.BumpMinor() + } else { + version.BumpPatch() + } + height += g.random.Uint64InRange(1000, 100_000) + } + + vb := &flow.VersionBeacon{ + VersionBoundaries: boundaries, + Sequence: uint64(g.random.Uint32()), + } + + for _, opt := range opts { + opt(g, vb) + } + + return vb +} + +// List generates a list of [flow.VersionBeacon]. +func (g *VersionBeaconGenerator) List(n int, opts ...VersionBeaconOption) []*flow.VersionBeacon { + beacons := make([]*flow.VersionBeacon, n) + for i := range n { + beacons[i] = g.Fixture(opts...) + } + return beacons +} + +// generateRandomVersion creates a random semver version string. +func (g *VersionBeaconGenerator) generateRandomVersion() *semver.Version { + major := g.random.IntInRange(0, 5) + minor := g.random.IntInRange(0, 20) + patch := g.random.IntInRange(0, 50) + return semver.New(fmt.Sprintf("%d.%d.%d", major, minor, patch)) +} diff --git a/utils/unittest/fixtures/signature.go b/utils/unittest/fixtures/signature.go new file mode 100644 index 00000000000..444639d942c --- /dev/null +++ b/utils/unittest/fixtures/signature.go @@ -0,0 +1,52 @@ +package fixtures + +import ( + "github.com/onflow/crypto" +) + +// Signature is the default options factory for [crypto.Signature] generation. +var Signature signatureFactory + +type signatureFactory struct{} + +type SignatureOption func(*SignatureGenerator, *signatureConfig) + +// SignatureGenerator generates signatures with consistent randomness. +type SignatureGenerator struct { + signatureFactory //nolint:unused + + random *RandomGenerator +} + +func NewSignatureGenerator( + random *RandomGenerator, +) *SignatureGenerator { + return &SignatureGenerator{ + random: random, + } +} + +// signatureConfig holds the configuration for signature generation. +type signatureConfig struct { + // Currently no special options needed, but maintaining pattern consistency +} + +// Fixture generates a random [crypto.Signature]. +func (g *SignatureGenerator) Fixture(opts ...SignatureOption) crypto.Signature { + config := &signatureConfig{} + + for _, opt := range opts { + opt(g, config) + } + + return g.random.RandomBytes(crypto.SignatureLenBLSBLS12381) +} + +// List generates a list of random [crypto.Signature]. +func (g *SignatureGenerator) List(n int, opts ...SignatureOption) []crypto.Signature { + sigs := make([]crypto.Signature, n) + for i := range n { + sigs[i] = g.Fixture(opts...) + } + return sigs +} diff --git a/utils/unittest/fixtures/signer_indices.go b/utils/unittest/fixtures/signer_indices.go new file mode 100644 index 00000000000..7e08d868a8c --- /dev/null +++ b/utils/unittest/fixtures/signer_indices.go @@ -0,0 +1,108 @@ +package fixtures + +import ( + "github.com/onflow/flow-go/ledger/common/bitutils" +) + +// SignerIndices is the default options factory for SignerIndices generation. +var SignerIndices signerIndicesFactory + +type signerIndicesFactory struct{} + +type SignerIndicesOption func(*SignerIndicesGenerator, *signerIndicesConfig) + +// signerIndicesConfig holds the configuration for signer indices generation. +type signerIndicesConfig struct { + authorizedSigners int + contributingSigners int + indices []int +} + +// WithSignerCount is an option that sets the total number of indices and the number of signers. +func (f signerIndicesFactory) WithSignerCount(authorizedSigners, contributingSigners int) SignerIndicesOption { + return func(g *SignerIndicesGenerator, config *signerIndicesConfig) { + config.authorizedSigners = authorizedSigners + config.contributingSigners = contributingSigners + } +} + +// WithIndices is an option that sets the total number of indices and specific indices for signers. +// Note: passing an empty slice is valid and will be treated as all indices are not set. +func (f signerIndicesFactory) WithIndices(authorizedSigners int, indices []int) SignerIndicesOption { + return func(g *SignerIndicesGenerator, config *signerIndicesConfig) { + config.authorizedSigners = authorizedSigners + config.indices = indices + } +} + +// SignerIndicesGenerator generates signer indices with consistent randomness. +type SignerIndicesGenerator struct { + signerIndicesFactory + + random *RandomGenerator +} + +func NewSignerIndicesGenerator(random *RandomGenerator) *SignerIndicesGenerator { + return &SignerIndicesGenerator{ + random: random, + } +} + +// Fixture generates signer indices with random data based on the provided options. +// Uses default 10-bit vector size and count of 3 signers. +func (g *SignerIndicesGenerator) Fixture(opts ...SignerIndicesOption) []byte { + config := &signerIndicesConfig{ + authorizedSigners: 10, + contributingSigners: 3, + } + + for _, opt := range opts { + opt(g, config) + } + + Assert(config.authorizedSigners > 0, "authorizedSigners must be greater than 0") + Assert(config.contributingSigners >= 0, "contributingSigners must be greater than or equal to 0") + Assert(config.contributingSigners <= config.authorizedSigners, "contributingSigners must be less than or equal to authorizedSigners") + + indices := bitutils.MakeBitVector(config.authorizedSigners) + + if config.indices != nil { + for _, i := range config.indices { + Assert(i >= 0 && i < config.authorizedSigners, "index must be within the total number of indices") + bitutils.SetBit(indices, i) + } + return indices + } + + // special case to avoid looping when all indices are set + if config.contributingSigners == config.authorizedSigners { + for i := range config.authorizedSigners { + bitutils.SetBit(indices, i) + } + return indices + } + + // choose `contributingSigners` random indices from the total + count := 0 + for count < config.contributingSigners { + index := g.random.Intn(config.authorizedSigners) + + // only count unset bits to ensure that we set the correct number of unique indices + if bitutils.ReadBit(indices, index) == 0 { + bitutils.SetBit(indices, index) + count++ + } + } + + return indices +} + +// List generates a list of signer indices. +// Uses default 10-bit vector size and count of 3 signers. +func (g *SignerIndicesGenerator) List(n int, opts ...SignerIndicesOption) [][]byte { + list := make([][]byte, n) + for i := range n { + list[i] = g.Fixture(opts...) + } + return list +} diff --git a/utils/unittest/fixtures/state_commitment.go b/utils/unittest/fixtures/state_commitment.go new file mode 100644 index 00000000000..6a81ffe9c67 --- /dev/null +++ b/utils/unittest/fixtures/state_commitment.go @@ -0,0 +1,64 @@ +package fixtures + +import ( + "github.com/onflow/flow-go/ledger/common/hash" + "github.com/onflow/flow-go/model/flow" +) + +// StateCommitment is the default options factory for [flow.StateCommitment] generation. +var StateCommitment stateCommitmentFactory + +type stateCommitmentFactory struct{} + +type StateCommitmentOption func(*StateCommitmentGenerator, *flow.StateCommitment) + +// WithHash is an option that sets the hash of the state commitment. +func (f stateCommitmentFactory) WithHash(h hash.Hash) StateCommitmentOption { + return func(g *StateCommitmentGenerator, sc *flow.StateCommitment) { + *sc = flow.StateCommitment(h) + } +} + +// WithEmptyState is an option that sets the state commitment to empty. +func (f stateCommitmentFactory) WithEmptyState() StateCommitmentOption { + return func(g *StateCommitmentGenerator, sc *flow.StateCommitment) { + *sc = flow.EmptyStateCommitment + } +} + +// StateCommitmentGenerator generates state commitments with consistent randomness. +type StateCommitmentGenerator struct { + stateCommitmentFactory + random *RandomGenerator +} + +// NewStateCommitmentGenerator creates a new StateCommitmentGenerator. +func NewStateCommitmentGenerator(random *RandomGenerator) *StateCommitmentGenerator { + return &StateCommitmentGenerator{ + random: random, + } +} + +// Fixture generates a [flow.StateCommitment] with random data based on the provided options. +func (g *StateCommitmentGenerator) Fixture(opts ...StateCommitmentOption) flow.StateCommitment { + var sc flow.StateCommitment + + // Generate random 32-byte hash + randomBytes := g.random.RandomBytes(32) + copy(sc[:], randomBytes) + + for _, opt := range opts { + opt(g, &sc) + } + + return sc +} + +// List generates a list of [flow.StateCommitment]. +func (g *StateCommitmentGenerator) List(n int, opts ...StateCommitmentOption) []flow.StateCommitment { + commitments := make([]flow.StateCommitment, n) + for i := range n { + commitments[i] = g.Fixture(opts...) + } + return commitments +} diff --git a/utils/unittest/fixtures/time.go b/utils/unittest/fixtures/time.go new file mode 100644 index 00000000000..ca56d5deeaf --- /dev/null +++ b/utils/unittest/fixtures/time.go @@ -0,0 +1,105 @@ +package fixtures + +import ( + "time" +) + +const ( + defaultOffset = int64(10 * 365 * 24 * time.Hour) // 10 years +) + +// Time is the default options factory for [time.Time] generation. +var Time timeFactory + +type timeFactory struct{} + +type TimeOption func(*TimeGenerator, *timeConfig) + +// timeConfig holds the configuration for time generation. +type timeConfig struct { + baseTime time.Time + offset time.Duration + timezone *time.Location +} + +// WithBaseTime is an option that sets the time produced by the generator. +// This will be the exact time. If you want to include a random jitter, use the +// WithOffsetRandom option in combination with this option. +func (f timeFactory) WithBaseTime(baseTime time.Time) TimeOption { + return func(g *TimeGenerator, config *timeConfig) { + config.baseTime = baseTime + config.offset = 0 + } +} + +// WithTimezone is an option that sets the timezone for time generation. +func (f timeFactory) WithTimezone(tz *time.Location) TimeOption { + return func(g *TimeGenerator, config *timeConfig) { + config.timezone = tz + } +} + +// WithOffset is an option that sets the offset from the base time. +// This sets the exact offset of the base time. +func (f timeFactory) WithOffset(offset time.Duration) TimeOption { + return func(g *TimeGenerator, config *timeConfig) { + config.offset = offset + } +} + +// WithOffsetRandom is an option that sets a random offset from the base time in the range [0, max). +func (f timeFactory) WithOffsetRandom(max time.Duration) TimeOption { + return func(g *TimeGenerator, config *timeConfig) { + config.offset = time.Duration(g.random.Intn(int(max))) + } +} + +// TimeGenerator generates [time.Time] values with consistent randomness. +type TimeGenerator struct { + timeFactory + + random *RandomGenerator +} + +func NewTimeGenerator( + random *RandomGenerator, +) *TimeGenerator { + return &TimeGenerator{ + random: random, + } +} + +// Fixture generates a [time.Time] value. +// Uses default base time of 2020-07-14T16:00:00Z and timezone of UTC. +// The default offset is within 10 years of 2020-07-14T16:00:00Z. +func (g *TimeGenerator) Fixture(opts ...TimeOption) time.Time { + defaultBaseTime := time.Date(2020, 7, 14, 16, 0, 0, 0, time.UTC) // 2020-07-14T16:00:00Z + defaultOffset := time.Duration(g.random.Int63n(defaultOffset)) + config := &timeConfig{ + baseTime: defaultBaseTime, + offset: defaultOffset, + timezone: time.UTC, + } + + for _, opt := range opts { + opt(g, config) + } + + // Apply timezone if specified + if config.timezone != config.baseTime.Location() { + config.baseTime = config.baseTime.In(config.timezone) + } + + return config.baseTime.Add(config.offset) +} + +// List generates a list of [time.Time] values. +// Uses default base time of 2020-07-14T16:00:00Z and timezone of UTC. +// The default offset is within 10 years of 2020-07-14T16:00:00Z. +func (g *TimeGenerator) List(n int, opts ...TimeOption) []time.Time { + times := make([]time.Time, n) + for i := range n { + times[i] = g.Fixture(opts...) + } + return times +} diff --git a/utils/unittest/fixtures/timeout_certificate.go b/utils/unittest/fixtures/timeout_certificate.go new file mode 100644 index 00000000000..c9594c3862e --- /dev/null +++ b/utils/unittest/fixtures/timeout_certificate.go @@ -0,0 +1,111 @@ +package fixtures + +import ( + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/utils/slices" +) + +// TimeoutCertificate is the default options factory for [flow.TimeoutCertificate] generation. +var TimeoutCertificate timeoutCertificateFactory + +type timeoutCertificateFactory struct{} + +type TimeoutCertificateOption func(*TimeoutCertificateGenerator, *flow.TimeoutCertificate) + +// WithView is an option that sets the `View` of the timeout certificate. +func (f timeoutCertificateFactory) WithView(view uint64) TimeoutCertificateOption { + return func(g *TimeoutCertificateGenerator, tc *flow.TimeoutCertificate) { + tc.View = view + } +} + +// WithNewestQC is an option that sets the `NewestQC` of the timeout certificate. +func (f timeoutCertificateFactory) WithNewestQC(qc *flow.QuorumCertificate) TimeoutCertificateOption { + return func(g *TimeoutCertificateGenerator, tc *flow.TimeoutCertificate) { + tc.NewestQC = qc + } +} + +// WithNewestQCViews is an option that sets the `NewestQCViews` of the timeout certificate. +func (f timeoutCertificateFactory) WithNewestQCViews(views []uint64) TimeoutCertificateOption { + return func(g *TimeoutCertificateGenerator, tc *flow.TimeoutCertificate) { + tc.NewestQCViews = views + } +} + +// WithSignerIndices is an option that sets the `SignerIndices` of the timeout certificate. +func (f timeoutCertificateFactory) WithSignerIndices(indices []byte) TimeoutCertificateOption { + return func(g *TimeoutCertificateGenerator, tc *flow.TimeoutCertificate) { + tc.SignerIndices = indices + } +} + +// WithSigData is an option that sets the `SigData` of the timeout certificate. +func (f timeoutCertificateFactory) WithSigData(sigData []byte) TimeoutCertificateOption { + return func(g *TimeoutCertificateGenerator, tc *flow.TimeoutCertificate) { + tc.SigData = sigData + } +} + +// TimeoutCertificateGenerator generates timeout certificates with consistent randomness. +type TimeoutCertificateGenerator struct { + timeoutCertificateFactory + + random *RandomGenerator + quorumCerts *QuorumCertificateGenerator + signatures *SignatureGenerator + signerIndices *SignerIndicesGenerator +} + +// NewTimeoutCertificateGenerator creates a new TimeoutCertificateGenerator. +func NewTimeoutCertificateGenerator( + random *RandomGenerator, + quorumCerts *QuorumCertificateGenerator, + signatures *SignatureGenerator, + signerIndices *SignerIndicesGenerator, +) *TimeoutCertificateGenerator { + return &TimeoutCertificateGenerator{ + random: random, + quorumCerts: quorumCerts, + signatures: signatures, + signerIndices: signerIndices, + } +} + +// Fixture generates a [flow.TimeoutCertificate] with random data based on the provided options. +func (g *TimeoutCertificateGenerator) Fixture(opts ...TimeoutCertificateOption) *flow.TimeoutCertificate { + // skip view 0 to avoid edge case where TC view is 0 and QC view is 0 + view := uint64(1 + g.random.Uint32()) + + contributingSignerCount := g.random.IntInRange(3, 7) + signerIndices := g.signerIndices.Fixture(SignerIndices.WithSignerCount(10, contributingSignerCount)) + + newestQC := g.quorumCerts.Fixture(QuorumCertificate.WithView(view - 1)) + + // newestQCViews should have an entry for each contributing signer, use the same view for each + newestQCViews := slices.Fill(newestQC.View, contributingSignerCount) + + // Create timeout certificate using constructor to ensure validity + tc := &flow.TimeoutCertificate{ + View: view, + NewestQCViews: newestQCViews, + NewestQC: newestQC, + SignerIndices: signerIndices, + SigData: g.signatures.Fixture(), + } + + for _, opt := range opts { + opt(g, tc) + } + + return tc +} + +// List generates a list of [flow.TimeoutCertificate]. +func (g *TimeoutCertificateGenerator) List(n int, opts ...TimeoutCertificateOption) []*flow.TimeoutCertificate { + certificates := make([]*flow.TimeoutCertificate, n) + for i := range n { + certificates[i] = g.Fixture(opts...) + } + return certificates +} diff --git a/utils/unittest/fixtures/transaction.go b/utils/unittest/fixtures/transaction.go new file mode 100644 index 00000000000..6015f2c7424 --- /dev/null +++ b/utils/unittest/fixtures/transaction.go @@ -0,0 +1,125 @@ +package fixtures + +import ( + "github.com/onflow/flow-go/model/flow" +) + +// Transaction is the default options factory for [flow.TransactionBody] generation. +var Transaction transactionFactory + +type transactionFactory struct{} + +type TransactionOption func(*TransactionGenerator, *flow.TransactionBody) + +// WithScript is an option that sets the script for the transaction. +func (f transactionFactory) WithScript(script []byte) TransactionOption { + return func(g *TransactionGenerator, tx *flow.TransactionBody) { + tx.Script = script + } +} + +// WithReferenceBlockID is an option that sets the reference block ID for the transaction. +func (f transactionFactory) WithReferenceBlockID(blockID flow.Identifier) TransactionOption { + return func(g *TransactionGenerator, tx *flow.TransactionBody) { + tx.ReferenceBlockID = blockID + } +} + +// WithGasLimit is an option that sets the gas limit for the transaction. +func (f transactionFactory) WithGasLimit(gasLimit uint64) TransactionOption { + return func(g *TransactionGenerator, tx *flow.TransactionBody) { + tx.GasLimit = gasLimit + } +} + +// WithProposalKey is an option that sets the proposal key for the transaction. +func (f transactionFactory) WithProposalKey(proposalKey flow.ProposalKey) TransactionOption { + return func(g *TransactionGenerator, tx *flow.TransactionBody) { + tx.ProposalKey = proposalKey + } +} + +// WithPayer is an option that sets the payer for the transaction. +func (f transactionFactory) WithPayer(payer flow.Address) TransactionOption { + return func(g *TransactionGenerator, tx *flow.TransactionBody) { + tx.Payer = payer + } +} + +// WithAuthorizers is an option that sets the authorizers for the transaction. +func (f transactionFactory) WithAuthorizers(authorizers ...flow.Address) TransactionOption { + return func(g *TransactionGenerator, tx *flow.TransactionBody) { + tx.Authorizers = authorizers + } +} + +// WithEnvelopeSignatures is an option that sets the envelope signatures for the transaction. +func (f transactionFactory) WithEnvelopeSignatures(signatures ...flow.TransactionSignature) TransactionOption { + return func(g *TransactionGenerator, tx *flow.TransactionBody) { + tx.EnvelopeSignatures = signatures + } +} + +// TransactionGenerator generates transactions with consistent randomness. +type TransactionGenerator struct { + transactionFactory + + identifiers *IdentifierGenerator + proposalKeys *ProposalKeyGenerator + addresses *AddressGenerator + transactionSigs *TransactionSignatureGenerator +} + +func NewTransactionGenerator( + identifiers *IdentifierGenerator, + proposalKeys *ProposalKeyGenerator, + addresses *AddressGenerator, + transactionSigs *TransactionSignatureGenerator, +) *TransactionGenerator { + return &TransactionGenerator{ + identifiers: identifiers, + proposalKeys: proposalKeys, + addresses: addresses, + transactionSigs: transactionSigs, + } +} + +// Fixture generates a [flow.TransactionBody] with random data based on the provided options. +func (g *TransactionGenerator) Fixture(opts ...TransactionOption) *flow.TransactionBody { + tx := &flow.TransactionBody{ + ReferenceBlockID: g.identifiers.Fixture(), + Script: []byte("access(all) fun main() {}"), + GasLimit: 10, + ProposalKey: g.proposalKeys.Fixture(), + Payer: g.addresses.Fixture(), + Authorizers: []flow.Address{g.addresses.Fixture()}, + } + + for _, opt := range opts { + opt(g, tx) + } + + if len(tx.EnvelopeSignatures) == 0 { + // use the proposer to sign the envelope. this ensures the metadata is consistent, + // allowing the transaction to be properly serialized and deserialized over protobuf. + // if the signature does not match the proposer, the signer index resolved during + // deserialization will be incorrect. + tx.EnvelopeSignatures = []flow.TransactionSignature{ + g.transactionSigs.Fixture( + TransactionSignature.WithAddress(tx.ProposalKey.Address), + TransactionSignature.WithSignerIndex(0), // proposer should be index 0 + ), + } + } + + return tx +} + +// List generates a list of [flow.TransactionBody]. +func (g *TransactionGenerator) List(n int, opts ...TransactionOption) []*flow.TransactionBody { + list := make([]*flow.TransactionBody, n) + for i := range n { + list[i] = g.Fixture(opts...) + } + return list +} diff --git a/utils/unittest/fixtures/transaction_error_messages.go b/utils/unittest/fixtures/transaction_error_messages.go new file mode 100644 index 00000000000..0c7030060f7 --- /dev/null +++ b/utils/unittest/fixtures/transaction_error_messages.go @@ -0,0 +1,97 @@ +package fixtures + +import ( + "fmt" + + "github.com/onflow/flow-go/model/flow" +) + +// TransactionErrorMessage is the default options factory for [flow.TransactionResultErrorMessage] generation. +var TransactionErrorMessage transactionErrorMessageFactory + +type transactionErrorMessageFactory struct{} + +type TransactionErrorMessageOption func(*TransactionErrorMessageGenerator, *flow.TransactionResultErrorMessage) + +// WithTransactionID is an option that sets the transaction ID for the transaction result error message. +func (f transactionErrorMessageFactory) WithTransactionID(transactionID flow.Identifier) TransactionErrorMessageOption { + return func(g *TransactionErrorMessageGenerator, err *flow.TransactionResultErrorMessage) { + err.TransactionID = transactionID + } +} + +// WithIndex is an option that sets the index for the transaction result error message. +func (f transactionErrorMessageFactory) WithIndex(index uint32) TransactionErrorMessageOption { + return func(g *TransactionErrorMessageGenerator, err *flow.TransactionResultErrorMessage) { + err.Index = index + } +} + +// WithErrorMessage is an option that sets the error message for the transaction result error message. +func (f transactionErrorMessageFactory) WithErrorMessage(errorMessage string) TransactionErrorMessageOption { + return func(g *TransactionErrorMessageGenerator, err *flow.TransactionResultErrorMessage) { + err.ErrorMessage = errorMessage + } +} + +// WithExecutorID is an option that sets the executor ID for the transaction result error message. +func (f transactionErrorMessageFactory) WithExecutorID(executorID flow.Identifier) TransactionErrorMessageOption { + return func(g *TransactionErrorMessageGenerator, err *flow.TransactionResultErrorMessage) { + err.ExecutorID = executorID + } +} + +type TransactionErrorMessageGenerator struct { + transactionErrorMessageFactory + + random *RandomGenerator + identifiers *IdentifierGenerator +} + +func NewTransactionErrorMessageGenerator( + random *RandomGenerator, + identifiers *IdentifierGenerator, +) *TransactionErrorMessageGenerator { + return &TransactionErrorMessageGenerator{ + random: random, + identifiers: identifiers, + } +} + +func (g *TransactionErrorMessageGenerator) Fixture(opts ...TransactionErrorMessageOption) flow.TransactionResultErrorMessage { + transactionID := g.identifiers.Fixture() + txErrMsg := flow.TransactionResultErrorMessage{ + TransactionID: transactionID, + Index: g.random.Uint32InRange(0, 100), + ErrorMessage: fmt.Sprintf("transaction error for %s", transactionID), + ExecutorID: g.identifiers.Fixture(), + } + + for _, opt := range opts { + opt(g, &txErrMsg) + } + + return txErrMsg +} + +func (g *TransactionErrorMessageGenerator) List(n int, opts ...TransactionErrorMessageOption) []flow.TransactionResultErrorMessage { + list := make([]flow.TransactionResultErrorMessage, n) + for i := range n { + list[i] = g.Fixture(opts...) + } + return list +} + +func (g *TransactionErrorMessageGenerator) ForTransactionResults(transactionResults []flow.LightTransactionResult) []flow.TransactionResultErrorMessage { + txErrMsgs := make([]flow.TransactionResultErrorMessage, 0) + for i, result := range transactionResults { + if result.Failed { + txErrMsgs = append(txErrMsgs, g.Fixture( + g.WithTransactionID(result.TransactionID), + g.WithIndex(uint32(i)), + g.WithErrorMessage(fmt.Sprintf("transaction error for %s", result.TransactionID)), + )) + } + } + return txErrMsgs +} diff --git a/utils/unittest/fixtures/transaction_result.go b/utils/unittest/fixtures/transaction_result.go new file mode 100644 index 00000000000..8db06f402bf --- /dev/null +++ b/utils/unittest/fixtures/transaction_result.go @@ -0,0 +1,139 @@ +package fixtures + +import ( + "github.com/onflow/flow-go/model/flow" +) + +// TransactionResult is the default options factory for [flow.TransactionResult] generation. +var TransactionResult transactionResultFactory + +type transactionResultFactory struct{} + +type TransactionResultOption func(*TransactionResultGenerator, *flow.TransactionResult) + +// WithTransactionID is an option that sets the transaction ID for the transaction result. +func (f transactionResultFactory) WithTransactionID(transactionID flow.Identifier) TransactionResultOption { + return func(g *TransactionResultGenerator, result *flow.TransactionResult) { + result.TransactionID = transactionID + } +} + +// WithErrorMessage is an option that sets the error message for the transaction result. +func (f transactionResultFactory) WithErrorMessage(errorMessage string) TransactionResultOption { + return func(g *TransactionResultGenerator, result *flow.TransactionResult) { + result.ErrorMessage = errorMessage + } +} + +// WithComputationUsed is an option that sets the computation used for the transaction result. +func (f transactionResultFactory) WithComputationUsed(computationUsed uint64) TransactionResultOption { + return func(g *TransactionResultGenerator, result *flow.TransactionResult) { + result.ComputationUsed = computationUsed + } +} + +// TransactionResultGenerator generates transaction results with consistent randomness. +type TransactionResultGenerator struct { + transactionResultFactory + + random *RandomGenerator + identifiers *IdentifierGenerator +} + +func NewTransactionResultGenerator( + random *RandomGenerator, + identifiers *IdentifierGenerator, +) *TransactionResultGenerator { + return &TransactionResultGenerator{ + random: random, + identifiers: identifiers, + } +} + +// Fixture generates a [flow.TransactionResult] with random data based on the provided options. +func (g *TransactionResultGenerator) Fixture(opts ...TransactionResultOption) flow.TransactionResult { + result := flow.TransactionResult{ + TransactionID: g.identifiers.Fixture(), + ComputationUsed: g.random.Uint64InRange(1, 9999), + ErrorMessage: "", + } + + for _, opt := range opts { + opt(g, &result) + } + + return result +} + +// List generates a list of [flow.TransactionResult]. +func (g *TransactionResultGenerator) List(n int, opts ...TransactionResultOption) []flow.TransactionResult { + list := make([]flow.TransactionResult, n) + for i := range n { + list[i] = g.Fixture(opts...) + } + return list +} + +var LightTransactionResult lightTransactionResultFactory + +type lightTransactionResultFactory struct{} + +type LightTransactionResultOption func(*LightTransactionResultGenerator, *flow.LightTransactionResult) + +// WithTransactionID is an option that sets the transaction ID for the transaction result. +func (f lightTransactionResultFactory) WithTransactionID(transactionID flow.Identifier) LightTransactionResultOption { + return func(g *LightTransactionResultGenerator, result *flow.LightTransactionResult) { + result.TransactionID = transactionID + } +} + +// WithFailed is an option that sets the failed status for the transaction result. +func (f lightTransactionResultFactory) WithFailed(failed bool) LightTransactionResultOption { + return func(g *LightTransactionResultGenerator, result *flow.LightTransactionResult) { + result.Failed = failed + } +} + +// WithComputationUsed is an option that sets the computation used for the transaction result. +func (f lightTransactionResultFactory) WithComputationUsed(computationUsed uint64) LightTransactionResultOption { + return func(g *LightTransactionResultGenerator, result *flow.LightTransactionResult) { + result.ComputationUsed = computationUsed + } +} + +// LightTransactionResultGenerator generates light transaction results with consistent randomness. +type LightTransactionResultGenerator struct { + *TransactionResultGenerator +} + +func NewLightTransactionResultGenerator( + txResults *TransactionResultGenerator, +) *LightTransactionResultGenerator { + return &LightTransactionResultGenerator{ + TransactionResultGenerator: txResults, + } +} + +// Fixture generates a [flow.LightTransactionResult] with random data based on the provided options. +func (g *LightTransactionResultGenerator) Fixture(opts ...LightTransactionResultOption) flow.LightTransactionResult { + result := flow.LightTransactionResult{ + TransactionID: g.identifiers.Fixture(), + ComputationUsed: g.random.Uint64InRange(1, 9999), + Failed: false, + } + + for _, opt := range opts { + opt(g, &result) + } + + return result +} + +// List generates a list of [flow.LightTransactionResult]. +func (g *LightTransactionResultGenerator) List(n int, opts ...LightTransactionResultOption) []flow.LightTransactionResult { + list := make([]flow.LightTransactionResult, n) + for i := range n { + list[i] = g.Fixture(opts...) + } + return list +} diff --git a/utils/unittest/fixtures/transaction_signature.go b/utils/unittest/fixtures/transaction_signature.go new file mode 100644 index 00000000000..145c1bfbf7c --- /dev/null +++ b/utils/unittest/fixtures/transaction_signature.go @@ -0,0 +1,99 @@ +package fixtures + +import ( + "github.com/onflow/crypto" + + "github.com/onflow/flow-go/model/flow" +) + +// TransactionSignature is the default options factory for [flow.TransactionSignature] generation. +var TransactionSignature transactionSignatureFactory + +type transactionSignatureFactory struct{} + +type TransactionSignatureOption func(*TransactionSignatureGenerator, *flow.TransactionSignature) + +// WithAddress is an option that sets the address for the transaction signature. +func (f transactionSignatureFactory) WithAddress(address flow.Address) TransactionSignatureOption { + return func(g *TransactionSignatureGenerator, signature *flow.TransactionSignature) { + signature.Address = address + } +} + +// WithSignerIndex is an option that sets the signer index for the transaction signature. +func (f transactionSignatureFactory) WithSignerIndex(signerIndex int) TransactionSignatureOption { + return func(g *TransactionSignatureGenerator, signature *flow.TransactionSignature) { + signature.SignerIndex = signerIndex + } +} + +// WithSignature is an option that sets the signature for the transaction signature. +func (f transactionSignatureFactory) WithSignature(sig crypto.Signature) TransactionSignatureOption { + return func(g *TransactionSignatureGenerator, signature *flow.TransactionSignature) { + signature.Signature = sig + } +} + +// WithKeyIndex is an option that sets the key index for the transaction signature. +func (f transactionSignatureFactory) WithKeyIndex(keyIndex uint32) TransactionSignatureOption { + return func(g *TransactionSignatureGenerator, signature *flow.TransactionSignature) { + signature.KeyIndex = keyIndex + } +} + +// TransactionSignatureGenerator generates transaction signatures with consistent randomness. +type TransactionSignatureGenerator struct { + transactionSignatureFactory + + random *RandomGenerator + addresses *AddressGenerator +} + +func NewTransactionSignatureGenerator( + random *RandomGenerator, + addresses *AddressGenerator, +) *TransactionSignatureGenerator { + return &TransactionSignatureGenerator{ + random: random, + addresses: addresses, + } +} + +// Fixture generates a [flow.TransactionSignature] with random data based on the provided options. +func (g *TransactionSignatureGenerator) Fixture(opts ...TransactionSignatureOption) flow.TransactionSignature { + signature := flow.TransactionSignature{ + Address: g.addresses.Fixture(), + SignerIndex: 0, + Signature: g.generateValidSignature(), + KeyIndex: 1, + } + + for _, opt := range opts { + opt(g, &signature) + } + + return signature +} + +// List generates a list of [flow.TransactionSignature]. +func (g *TransactionSignatureGenerator) List(n int, opts ...TransactionSignatureOption) []flow.TransactionSignature { + list := make([]flow.TransactionSignature, n) + for i := range n { + list[i] = g.Fixture(opts...) + } + return list +} + +// generateValidSignature generates a valid ECDSA signature for the given transaction. +func (g *TransactionSignatureGenerator) generateValidSignature() crypto.Signature { + sigLen := crypto.SignatureLenECDSAP256 + signature := g.random.RandomBytes(sigLen) + + // Make sure the ECDSA signature passes the format check + signature[sigLen/2] = 0 + signature[0] = 0 + signature[sigLen/2-1] |= 1 + signature[sigLen-1] |= 1 + + return signature +} diff --git a/utils/unittest/fixtures/trie_update.go b/utils/unittest/fixtures/trie_update.go new file mode 100644 index 00000000000..36812b6b2a4 --- /dev/null +++ b/utils/unittest/fixtures/trie_update.go @@ -0,0 +1,117 @@ +package fixtures + +import ( + "github.com/onflow/flow-go/ledger" + "github.com/onflow/flow-go/ledger/common/testutils" +) + +// TrieUpdate is the default options factory for [ledger.TrieUpdate] generation. +var TrieUpdate trieUpdateFactory + +type trieUpdateFactory struct{} + +type TrieUpdateOption func(*TrieUpdateGenerator, *trieUpdateConfig) + +// trieUpdateConfig holds the configuration for trie update generation. +type trieUpdateConfig struct { + trieUpdate *ledger.TrieUpdate + numPaths int + minSize int + maxSize int +} + +// WithRootHash is an option that sets the root hash for the trie update. +func (f trieUpdateFactory) WithRootHash(rootHash ledger.RootHash) TrieUpdateOption { + return func(g *TrieUpdateGenerator, config *trieUpdateConfig) { + config.trieUpdate.RootHash = rootHash + } +} + +// WithPaths is an option that sets the paths for the trie update. +func (f trieUpdateFactory) WithPaths(paths ...ledger.Path) TrieUpdateOption { + return func(g *TrieUpdateGenerator, config *trieUpdateConfig) { + config.trieUpdate.Paths = paths + } +} + +// WithPayloads is an option that sets the payloads for the trie update. +func (f trieUpdateFactory) WithPayloads(payloads ...*ledger.Payload) TrieUpdateOption { + return func(g *TrieUpdateGenerator, config *trieUpdateConfig) { + config.trieUpdate.Payloads = payloads + } +} + +// WithNumPaths is an option that sets the number of paths for the trie update. +func (f trieUpdateFactory) WithNumPaths(numPaths int) TrieUpdateOption { + return func(g *TrieUpdateGenerator, config *trieUpdateConfig) { + config.numPaths = numPaths + } +} + +// WithPayloadSize is an option that sets the payload size range for the trie update. +func (f trieUpdateFactory) WithPayloadSize(minSize, maxSize int) TrieUpdateOption { + return func(g *TrieUpdateGenerator, config *trieUpdateConfig) { + config.minSize = minSize + config.maxSize = maxSize + } +} + +// TrieUpdateGenerator generates trie updates with consistent randomness. +type TrieUpdateGenerator struct { + trieUpdateFactory + + random *RandomGenerator + ledgerPaths *LedgerPathGenerator + ledgerPayloads *LedgerPayloadGenerator +} + +func NewTrieUpdateGenerator( + random *RandomGenerator, + ledgerPaths *LedgerPathGenerator, + ledgerPayloads *LedgerPayloadGenerator, +) *TrieUpdateGenerator { + return &TrieUpdateGenerator{ + random: random, + ledgerPaths: ledgerPaths, + ledgerPayloads: ledgerPayloads, + } +} + +// Fixture generates a [ledger.TrieUpdate] with random data based on the provided options. +func (g *TrieUpdateGenerator) Fixture(opts ...TrieUpdateOption) *ledger.TrieUpdate { + config := &trieUpdateConfig{ + trieUpdate: &ledger.TrieUpdate{ + RootHash: testutils.RootHashFixture(), + Paths: nil, + Payloads: nil, + }, + numPaths: 2, + minSize: 1, + maxSize: 8, + } + + for _, opt := range opts { + opt(g, config) + } + + // Generate paths and payloads if not provided + if config.trieUpdate.Paths == nil { + config.trieUpdate.Paths = g.ledgerPaths.List(config.numPaths) + } + if config.trieUpdate.Payloads == nil { + config.trieUpdate.Payloads = g.ledgerPayloads.List(config.numPaths, LedgerPayload.WithSize(config.minSize, config.maxSize)) + } + + Assertf(len(config.trieUpdate.Paths) == len(config.trieUpdate.Payloads), "paths and payloads must have the same length") + + return config.trieUpdate +} + +// List generates a list of [ledger.TrieUpdate]. +func (g *TrieUpdateGenerator) List(n int, opts ...TrieUpdateOption) []*ledger.TrieUpdate { + list := make([]*ledger.TrieUpdate, n) + for i := range n { + list[i] = g.Fixture(opts...) + } + return list +} diff --git a/utils/unittest/fixtures/util.go b/utils/unittest/fixtures/util.go new file mode 100644 index 00000000000..e471912bbc2 --- /dev/null +++ b/utils/unittest/fixtures/util.go @@ -0,0 +1,22 @@ +package fixtures + +import "fmt" + +// NoError panics if the error is not nil. +func NoError(err error) { + if err != nil { + panic(err) + } +} + +// Assert panics if the conditional is false, throwing the provided message. +func Assert(conditional bool, msg string) { + if !conditional { + panic(msg) + } +} + +// Assertf panics if the conditional is false, throwing the provided formatted message. +func Assertf(conditional bool, msg string, args ...any) { + Assert(conditional, fmt.Sprintf(msg, args...)) +} diff --git a/utils/unittest/fvm.go b/utils/unittest/fvm.go index 51e3229cdbc..d21818da571 100644 --- a/utils/unittest/fvm.go +++ b/utils/unittest/fvm.go @@ -5,31 +5,14 @@ import ( "github.com/stretchr/testify/require" - "github.com/onflow/flow-go/fvm/systemcontracts" "github.com/onflow/flow-go/model/flow" ) -func IsServiceEvent(event flow.Event, chainID flow.ChainID) bool { - serviceEvents, _ := systemcontracts.ServiceEventsForChain(chainID) - for _, serviceEvent := range serviceEvents.All() { - if serviceEvent.EventType() == event.Type { - return true - } - } - return false -} - // EnsureEventsIndexSeq checks if values of given event index sequence are monotonically increasing. func EnsureEventsIndexSeq(t *testing.T, events []flow.Event, chainID flow.ChainID) { expectedEventIndex := uint32(0) for _, event := range events { require.Equal(t, expectedEventIndex, event.EventIndex) - if IsServiceEvent(event, chainID) { - // TODO: we will need to address the double counting issue for service events. - // https://github.com/onflow/flow-go/issues/3393 - expectedEventIndex += 2 - } else { - expectedEventIndex++ - } + expectedEventIndex++ } } diff --git a/utils/unittest/generator/events.go b/utils/unittest/generator/events.go deleted file mode 100644 index 117f3834007..00000000000 --- a/utils/unittest/generator/events.go +++ /dev/null @@ -1,71 +0,0 @@ -package generator - -import ( - "fmt" - - "github.com/onflow/cadence" - encoding "github.com/onflow/cadence/encoding/json" - "github.com/onflow/cadence/runtime/common" - - "github.com/onflow/flow-go/model/flow" -) - -type Events struct { - count uint32 - ids *Identifiers -} - -func EventGenerator() *Events { - return &Events{ - count: 1, - ids: IdentifierGenerator(), - } -} - -func (g *Events) New() flow.Event { - location := common.StringLocation("test") - identifier := fmt.Sprintf("FooEvent%d", g.count) - typeID := location.TypeID(nil, identifier) - - testEventType := &cadence.EventType{ - Location: location, - QualifiedIdentifier: identifier, - Fields: []cadence.Field{ - { - Identifier: "a", - Type: cadence.IntType{}, - }, - { - Identifier: "b", - Type: cadence.StringType{}, - }, - }, - } - - fooString, err := cadence.NewString("foo") - if err != nil { - panic(fmt.Sprintf("unexpected error while creating cadence string: %s", err)) - } - - testEvent := cadence.NewEvent( - []cadence.Value{ - cadence.NewInt(int(g.count)), - fooString, - }).WithType(testEventType) - - payload, err := encoding.Encode(testEvent) - if err != nil { - panic(fmt.Sprintf("unexpected error while encoding events: %s", err)) - } - event := flow.Event{ - Type: flow.EventType(typeID), - TransactionID: g.ids.New(), - TransactionIndex: g.count, - EventIndex: g.count, - Payload: payload, - } - - g.count++ - - return event -} diff --git a/utils/unittest/generator/identifiers.go b/utils/unittest/generator/identifiers.go deleted file mode 100644 index 0a3a7e30a98..00000000000 --- a/utils/unittest/generator/identifiers.go +++ /dev/null @@ -1,26 +0,0 @@ -package generator - -import "github.com/onflow/flow-go/model/flow" - -type Identifiers struct { - count int -} - -func IdentifierGenerator() *Identifiers { - return &Identifiers{1} -} - -func newIdentifier(count int) flow.Identifier { - var id flow.Identifier - for i := range id { - id[i] = uint8(count) - } - - return id -} - -func (g *Identifiers) New() flow.Identifier { - id := newIdentifier(g.count + 1) - g.count++ - return id -} diff --git a/utils/unittest/identifiers.go b/utils/unittest/identifiers.go new file mode 100644 index 00000000000..896aa0cc120 --- /dev/null +++ b/utils/unittest/identifiers.go @@ -0,0 +1,26 @@ +package unittest + +import "github.com/onflow/flow-go/model/flow" + +type Identifiers struct { + count int +} + +func IdentifierGenerator() *Identifiers { + return &Identifiers{1} +} + +func newIdentifier(count int) flow.Identifier { + var id flow.Identifier + for i := range id { + id[i] = uint8(count) + } + + return id +} + +func (g *Identifiers) New() flow.Identifier { + id := newIdentifier(g.count + 1) + g.count++ + return id +} diff --git a/utils/unittest/identities.go b/utils/unittest/identities.go index ab396e3b7b9..88e239b9d26 100644 --- a/utils/unittest/identities.go +++ b/utils/unittest/identities.go @@ -12,7 +12,7 @@ func CreateNParticipantsWithMyRole(myRole flow.Role, otherRoles ...flow.Role) ( // participants := IdentityFixture(myRole) participants := make(flow.IdentityList, 0) myIdentity := IdentityFixture(WithRole(myRole)) - myID := myIdentity.ID() + myNodeID := myIdentity.NodeID participants = append(participants, myIdentity) for _, role := range otherRoles { id := IdentityFixture(WithRole(role)) @@ -23,8 +23,8 @@ func CreateNParticipantsWithMyRole(myRole flow.Role, otherRoles ...flow.Role) ( me := &module.Local{} me.On("NodeID").Return( func() flow.Identifier { - return myID + return myNodeID }, ) - return participants, myID, me + return participants, myNodeID, me } diff --git a/utils/unittest/incorporated_results.go b/utils/unittest/incorporated_results.go index e61ebf1fb56..ade11768f88 100644 --- a/utils/unittest/incorporated_results.go +++ b/utils/unittest/incorporated_results.go @@ -7,9 +7,9 @@ var IncorporatedResult incorporatedResultFactory type incorporatedResultFactory struct{} func (f *incorporatedResultFactory) Fixture(opts ...func(*flow.IncorporatedResult)) *flow.IncorporatedResult { - result := ExecutionResultFixture() - incorporatedBlockID := IdentifierFixture() - ir := flow.NewIncorporatedResult(incorporatedBlockID, result) + ir, _ := flow.NewIncorporatedResult(flow.UntrustedIncorporatedResult{ + IncorporatedBlockID: IdentifierFixture(), + Result: ExecutionResultFixture()}) for _, apply := range opts { apply(ir) diff --git a/utils/unittest/keys.go b/utils/unittest/keys.go index 6dac2b3f703..adf300948e6 100644 --- a/utils/unittest/keys.go +++ b/utils/unittest/keys.go @@ -3,7 +3,7 @@ package unittest import ( "encoding/hex" - "github.com/onflow/flow-go/crypto" + "github.com/onflow/crypto" "github.com/onflow/flow-go/model/encodable" ) diff --git a/utils/unittest/locks.go b/utils/unittest/locks.go new file mode 100644 index 00000000000..a7387bdd31b --- /dev/null +++ b/utils/unittest/locks.go @@ -0,0 +1,71 @@ +package unittest + +import ( + "errors" + "fmt" + "testing" + + "github.com/jordanschalm/lockctx" +) + +// WithLock creates a lock context from the given manager, acquires the given lock, then executes the function `fn`. +// Error returns: +// - Errors produced by the injected function `fn` are directly propagated to the caller. +// - Errors during lock acquisition are wrapped in a StorageLockAcquisitionError. +func WithLock(t testing.TB, manager lockctx.Manager, lockID string, fn func(lctx lockctx.Context) error) error { + t.Helper() + + lctx := manager.NewContext() + defer lctx.Release() + err := lctx.AcquireLock(lockID) + if err != nil { + return NewStorageLockAcquisitionErrorf("failed to acquire lock %s: %w", lockID, err) + } + + return fn(lctx) +} + +// StorageLockAcquisitionError indicates that acquiring a storage lock failed. +type StorageLockAcquisitionError struct { + err error +} + +func NewStorageLockAcquisitionErrorf(msg string, args ...interface{}) error { + return StorageLockAcquisitionError{ + err: fmt.Errorf(msg, args...), + } +} + +func (e StorageLockAcquisitionError) Unwrap() error { + return e.err +} + +func (e StorageLockAcquisitionError) Error() string { + return e.err.Error() +} + +func IsStorageLockAcquisitionError(err error) bool { + var targetErr StorageLockAcquisitionError + return errors.As(err, &targetErr) +} + +// WithLocks creates a lock context from the given manager, acquires the given locks, then executes the function `fn`. +// The test will fail if we are unable to acquire any of the locks or if `fn` returns any non-nil error. +// Error returns: +// - Errors produced by the injected function `fn` are directly propagated to the caller. +// - Errors during lock acquisition are wrapped in a StorageLockAcquisitionError. +func WithLocks(t testing.TB, manager lockctx.Manager, lockIDs []string, fn func(lctx lockctx.Context) error) error { + t.Helper() + + lctx := manager.NewContext() + defer lctx.Release() + + for _, lockID := range lockIDs { + err := lctx.AcquireLock(lockID) + if err != nil { + return NewStorageLockAcquisitionErrorf("failed to acquire lock %s: %w", lockID, err) + } + } + + return fn(lctx) +} diff --git a/utils/unittest/logging.go b/utils/unittest/logging.go index ee9dd762b77..507607af822 100644 --- a/utils/unittest/logging.go +++ b/utils/unittest/logging.go @@ -30,7 +30,7 @@ func Logger() zerolog.Logger { writer = os.Stderr } - return LoggerWithWriterAndLevel(writer, zerolog.DebugLevel) + return LoggerWithWriterAndLevel(writer, zerolog.InfoLevel) } func LoggerWithWriterAndLevel(writer io.Writer, level zerolog.Level) zerolog.Logger { @@ -41,7 +41,7 @@ func LoggerWithWriterAndLevel(writer io.Writer, level zerolog.Level) zerolog.Log return log } -// go:noinline +//go:noinline func LoggerForTest(t *testing.T, level zerolog.Level) zerolog.Logger { _, file, _, ok := runtime.Caller(1) if !ok { diff --git a/utils/unittest/matchers.go b/utils/unittest/matchers.go new file mode 100644 index 00000000000..3caaabd3c6b --- /dev/null +++ b/utils/unittest/matchers.go @@ -0,0 +1,18 @@ +package unittest + +import ( + "github.com/stretchr/testify/mock" + + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/state/protocol" +) + +// MatchEpochExtension matches an epoch extension argument passed to a mocked component. +func MatchEpochExtension(finalView, extensionLen uint64) any { + return mock.MatchedBy(func(extension flow.EpochExtension) bool { + return extension.FirstView == finalView+1 && extension.FinalView == finalView+extensionLen + }) +} + +// MatchInvalidServiceEventError matches an error that is an InvalidServiceEventError. +var MatchInvalidServiceEventError = mock.MatchedBy(func(err error) bool { return protocol.IsInvalidServiceEventError(err) }) diff --git a/utils/unittest/math.go b/utils/unittest/math.go new file mode 100644 index 00000000000..eb44762247d --- /dev/null +++ b/utils/unittest/math.go @@ -0,0 +1,61 @@ +package unittest + +import ( + "math" + "testing" + + "github.com/stretchr/testify/require" +) + +// RequireNumericallyClose is a wrapper around require.Equal that allows for a small epsilon difference between +// two floats. This is useful when comparing floats that are the result of a computation. For example, when comparing +// the result of a computation with a constant. +// The epsilon is calculated as: +// +// epsilon = max(|a|, |b|) * epsilon +// +// Example: +// +// RequireNumericallyClose(t, 1.0, 1.1, 0.1) // passes since 1.0 * 0.1 = 0.1 < 0.1 +// RequireNumericallyClose(t, 1.0, 1.1, 0.01) // fails since 1.0 * 0.01 = 0.01 < 0.1 +// RequireNumericallyClose(t, 1.0, 1.1, 0.11) // fails since 1.1 * 0.11 = 0.121 > 0.1 +// +// Args: +// +// t: the testing.TB instance +// a: the first float +// b: the second float +func RequireNumericallyClose(t testing.TB, a, b float64, epsilon float64, msgAndArgs ...interface{}) { + require.True(t, AreNumericallyClose(a, b, epsilon), msgAndArgs...) +} + +// AreNumericallyClose returns true if the two floats are within epsilon of each other. +// The epsilon is calculated as: +// +// epsilon = max(|a|, |b|) * epsilon +// +// Example: +// +// AreNumericallyClose(1.0, 1.1, 0.1) // true since 1.0 * 0.1 = 0.1 < 0.1 +// AreNumericallyClose(1.0, 1.1, 0.01) // false since 1.0 * 0.01 = 0.01 < 0.1 +// AreNumericallyClose(1.0, 1.1, 0.11) // false since 1.1 * 0.11 = 0.121 > 0.1 +// +// Args: +// a: the first float +// b: the second float +// epsilon: the epsilon value +// Returns: +// true if the two floats are within epsilon of each other +// false otherwise +func AreNumericallyClose(a, b float64, epsilon float64) bool { + if a == float64(0) { + return math.Abs(b) <= epsilon + } + if b == float64(0) { + return math.Abs(a) <= epsilon + } + + nominator := math.Abs(a - b) + denominator := math.Max(math.Abs(a), math.Abs(b)) + return nominator/denominator <= epsilon +} diff --git a/utils/unittest/math_test.go b/utils/unittest/math_test.go new file mode 100644 index 00000000000..bfbc4547d91 --- /dev/null +++ b/utils/unittest/math_test.go @@ -0,0 +1,36 @@ +package unittest_test + +import ( + "testing" + + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/utils/unittest" +) + +func TestAreNumericallyClose(t *testing.T) { + tests := []struct { + name string + a float64 + b float64 + epsilon float64 + expected bool + }{ + {"close enough under epsilon", 1.0, 1.1, 0.1, true}, + {"not close under epsilon", 1.0, 1.1, 0.01, false}, + {"equal values", 2.0, 2.0, 0.1, true}, + {"zero epsilon with equal values", 2.0, 2.0, 0.0, true}, + {"zero epsilon with different values", 2.0, 2.1, 0.0, false}, + {"first value zero", 0, 0.1, 0.1, true}, + {"both values zero", 0, 0, 0.1, true}, + {"negative values close enough", -1.0, -1.1, 0.1, true}, + {"negative values not close enough", -1.0, -1.2, 0.1, false}, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + actual := unittest.AreNumericallyClose(tt.a, tt.b, tt.epsilon) + require.Equal(t, tt.expected, actual, "test Failed: %s", tt.name) + }) + } +} diff --git a/utils/unittest/mockEntity.go b/utils/unittest/mockEntity.go deleted file mode 100644 index 60dfca14ac7..00000000000 --- a/utils/unittest/mockEntity.go +++ /dev/null @@ -1,42 +0,0 @@ -package unittest - -import ( - "github.com/onflow/flow-go/model/flow" -) - -// MockEntity implements a bare minimum entity for sake of test. -type MockEntity struct { - Identifier flow.Identifier -} - -func (m MockEntity) ID() flow.Identifier { - return m.Identifier -} - -func (m MockEntity) Checksum() flow.Identifier { - return m.Identifier -} - -func EntityListFixture(n uint) []*MockEntity { - list := make([]*MockEntity, 0, n) - - for i := uint(0); i < n; i++ { - list = append(list, &MockEntity{ - Identifier: IdentifierFixture(), - }) - } - - return list -} - -func MockEntityFixture() *MockEntity { - return &MockEntity{Identifier: IdentifierFixture()} -} - -func MockEntityListFixture(count int) []*MockEntity { - entities := make([]*MockEntity, 0, count) - for i := 0; i < count; i++ { - entities = append(entities, MockEntityFixture()) - } - return entities -} diff --git a/utils/unittest/mocks/closer.go b/utils/unittest/mocks/closer.go new file mode 100644 index 00000000000..21961f0f982 --- /dev/null +++ b/utils/unittest/mocks/closer.go @@ -0,0 +1,5 @@ +package mocks + +type MockCloser struct{} + +func (mc *MockCloser) Close() error { return nil } diff --git a/utils/unittest/mocks/epoch_query.go b/utils/unittest/mocks/epoch_query.go index a624a655dd7..19117efd892 100644 --- a/utils/unittest/mocks/epoch_query.go +++ b/utils/unittest/mocks/epoch_query.go @@ -1,80 +1,134 @@ package mocks import ( + "fmt" "sync" "testing" "github.com/stretchr/testify/require" + "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/state/protocol" - "github.com/onflow/flow-go/state/protocol/invalid" ) -// EpochQuery implements protocol.EpochQuery for testing purposes. +// EpochQuery implements [protocol.EpochQuery] for testing purposes. // Safe for concurrent use by multiple goroutines. type EpochQuery struct { t *testing.T mu sync.RWMutex - counter uint64 // represents the current epoch - byCounter map[uint64]protocol.Epoch // all epochs + counter uint64 // represents the current epoch + committed map[uint64]protocol.CommittedEpoch // all committed epochs, by their respective epoch counter + tentative map[uint64]protocol.TentativeEpoch // only for the next epoch (counter+1) if uncommitted } -func NewEpochQuery(t *testing.T, counter uint64, epochs ...protocol.Epoch) *EpochQuery { +var _ protocol.EpochQuery = (*EpochQuery)(nil) + +func NewEpochQuery(t *testing.T, counter uint64, epochs ...protocol.CommittedEpoch) *EpochQuery { mock := &EpochQuery{ t: t, counter: counter, - byCounter: make(map[uint64]protocol.Epoch), + committed: make(map[uint64]protocol.CommittedEpoch), + tentative: make(map[uint64]protocol.TentativeEpoch), } for _, epoch := range epochs { - mock.Add(epoch) + mock.AddCommitted(epoch) } return mock } -func (mock *EpochQuery) Current() protocol.Epoch { +func (mock *EpochQuery) Current() (protocol.CommittedEpoch, error) { + mock.mu.RLock() + defer mock.mu.RUnlock() + epoch, exists := mock.committed[mock.counter] + if !exists { + return nil, fmt.Errorf("EpochQuery mock has no entry for current epoch - likely a test is not properly set up") + } + return epoch, nil +} + +func (mock *EpochQuery) NextUnsafe() (protocol.TentativeEpoch, error) { mock.mu.RLock() defer mock.mu.RUnlock() - return mock.byCounter[mock.counter] + // NextUnsafe should only return a tentative epoch when we have no committed epoch for the next counter. + // If we have a committed epoch (are implicitly in EpochPhaseCommitted) or no tentative epoch, return an error. + // Note that in tests we do not require that a committed epoch be added as a tentative epoch first. + _, exists := mock.committed[mock.counter+1] + if exists { + return nil, protocol.ErrNextEpochAlreadyCommitted + } + epoch, exists := mock.tentative[mock.counter+1] + if !exists { + return nil, protocol.ErrNextEpochNotSetup + } + return epoch, nil } -func (mock *EpochQuery) Next() protocol.Epoch { +func (mock *EpochQuery) NextCommitted() (protocol.CommittedEpoch, error) { mock.mu.RLock() defer mock.mu.RUnlock() - epoch, exists := mock.byCounter[mock.counter+1] + epoch, exists := mock.committed[mock.counter+1] if !exists { - return invalid.NewEpoch(protocol.ErrNextEpochNotSetup) + return nil, protocol.ErrNextEpochNotCommitted } - return epoch + return epoch, nil } -func (mock *EpochQuery) Previous() protocol.Epoch { +func (mock *EpochQuery) Previous() (protocol.CommittedEpoch, error) { mock.mu.RLock() defer mock.mu.RUnlock() - epoch, exists := mock.byCounter[mock.counter-1] + epoch, exists := mock.committed[mock.counter-1] if !exists { - return invalid.NewEpoch(protocol.ErrNoPreviousEpoch) + return nil, protocol.ErrNoPreviousEpoch } - return epoch + return epoch, nil } -func (mock *EpochQuery) ByCounter(counter uint64) protocol.Epoch { +// Phase returns a phase consistent with the current epoch state. +func (mock *EpochQuery) Phase() flow.EpochPhase { mock.mu.RLock() defer mock.mu.RUnlock() - return mock.byCounter[counter] + _, exists := mock.committed[mock.counter+1] + if exists { + return flow.EpochPhaseCommitted + } + _, exists = mock.tentative[mock.counter+1] + if exists { + return flow.EpochPhaseSetup + } + return flow.EpochPhaseStaking +} + +func (mock *EpochQuery) ByCounter(counter uint64) protocol.CommittedEpoch { + mock.mu.RLock() + defer mock.mu.RUnlock() + return mock.committed[counter] } +// Transition increments the counter indicating which epoch is the "current epoch". +// It is assumed that an epoch corresponding to the current epoch counter exists; +// otherwise this mock is in a state that is illegal according to protocol rules. func (mock *EpochQuery) Transition() { mock.mu.Lock() defer mock.mu.Unlock() mock.counter++ } -func (mock *EpochQuery) Add(epoch protocol.Epoch) { +// AddCommitted adds the given Committed Epoch to this EpochQuery implementation, so its +// information can be retrieved by the business logic via the [protocol.EpochQuery] API. +func (mock *EpochQuery) AddCommitted(epoch protocol.CommittedEpoch) { + mock.mu.Lock() + defer mock.mu.Unlock() + mock.committed[epoch.Counter()] = epoch +} + +// AddTentative adds the given Tentative Epoch to this EpochQuery implementation, so its +// information can be retrieved by the business logic via the [protocol.EpochQuery] API. +func (mock *EpochQuery) AddTentative(epoch protocol.TentativeEpoch) { mock.mu.Lock() defer mock.mu.Unlock() - counter, err := epoch.Counter() - require.NoError(mock.t, err, "cannot add epoch with invalid counter") - mock.byCounter[counter] = epoch + counter := epoch.Counter() + require.Equal(mock.t, mock.counter+1, counter, "may only add tentative next epoch with current counter + 1") + mock.tentative[counter] = epoch } diff --git a/utils/unittest/mocks/mock_getters.go b/utils/unittest/mocks/mock_getters.go new file mode 100644 index 00000000000..c36d53c75f0 --- /dev/null +++ b/utils/unittest/mocks/mock_getters.go @@ -0,0 +1,55 @@ +package mocks + +import "github.com/onflow/flow-go/storage" + +// StorageMapGetter implements a simple generic getter function for mock storage methods. +// This is useful to avoid duplicating boilerplate code for mock storage methods. +// +// Example: +// Instead of the following code: +// +// results.On("ByID", mock.AnythingOfType("flow.Identifier")).Return( +// func(resultID flow.Identifier) (*flow.ExecutionResult, error) { +// if result, ok := s.resultMap[resultID]; ok { +// return result, nil +// } +// return nil, storage.ErrNotFound +// }, +// ) +// +// Use this: +// +// results.On("ByID", mock.AnythingOfType("flow.Identifier")).Return( +// mocks.StorageMapGetter(s.resultMap), +// ) +func StorageMapGetter[K comparable, V any](m map[K]V) func(key K) (V, error) { + return func(key K) (V, error) { + if val, ok := m[key]; ok { + return val, nil + } + return *new(V), storage.ErrNotFound + } +} + +// ConvertStorageOutput maps the output type from a getter function to a different type. +// This is useful to avoid maintaining multiple maps for the same data. +// +// Example usage: +// +// blockMap := map[uint64]*flow.Block{} +// +// headers.On("BlockIDByHeight", mock.AnythingOfType("uint64")).Return( +// mocks.ConvertStorageOutput( +// mocks.StorageMapGetter(s.blockMap), +// func(block *flow.Block) flow.Identifier { return block.ID() }, +// ), +// ) +func ConvertStorageOutput[K comparable, V any, R any](fn func(key K) (V, error), mapper func(V) R) func(key K) (R, error) { + return func(key K) (R, error) { + v, err := fn(key) + if err != nil { + return *new(R), err + } + return mapper(v), err + } +} diff --git a/utils/unittest/mocks/protocol_state.go b/utils/unittest/mocks/protocol_state.go index c2fa3421c13..0549ebd2bfa 100644 --- a/utils/unittest/mocks/protocol_state.go +++ b/utils/unittest/mocks/protocol_state.go @@ -25,11 +25,14 @@ type ProtocolState struct { children map[flow.Identifier][]flow.Identifier heights map[uint64]*flow.Block finalized uint64 + sealed uint64 root *flow.Block result *flow.ExecutionResult seal *flow.Seal } +var _ protocol.State = (*ProtocolState)(nil) + func NewProtocolState() *ProtocolState { return &ProtocolState{ blocks: make(map[flow.Identifier]*flow.Block), @@ -42,36 +45,40 @@ type Params struct { state *ProtocolState } -func (p *Params) ChainID() (flow.ChainID, error) { - return p.state.root.Header.ChainID, nil +func (p *Params) SporkRootBlock() *flow.Block { + return p.state.root } -func (p *Params) SporkID() (flow.Identifier, error) { - return flow.ZeroID, fmt.Errorf("not implemented") +func (p *Params) ChainID() flow.ChainID { + return p.state.root.ChainID } -func (p *Params) SporkRootBlockHeight() (uint64, error) { - return 0, fmt.Errorf("not implemented") +func (p *Params) SporkID() flow.Identifier { + return flow.ZeroID } -func (p *Params) ProtocolVersion() (uint, error) { - return 0, fmt.Errorf("not implemented") +func (p *Params) SporkRootBlockHeight() uint64 { + return 0 } -func (p *Params) EpochCommitSafetyThreshold() (uint64, error) { - return 0, fmt.Errorf("not implemented") +func (p *Params) SporkRootBlockView() uint64 { + return 0 } func (p *Params) EpochFallbackTriggered() (bool, error) { return false, fmt.Errorf("not implemented") } -func (p *Params) Root() (*flow.Header, error) { - return p.state.root.Header, nil +func (p *Params) FinalizedRoot() *flow.Header { + return p.state.root.ToHeader() +} + +func (p *Params) SealedRoot() *flow.Header { + return p.FinalizedRoot() } -func (p *Params) Seal() (*flow.Seal, error) { - return nil, fmt.Errorf("not implemented") +func (p *Params) Seal() *flow.Seal { + return nil } func (ps *ProtocolState) Params() protocol.Params { @@ -87,7 +94,7 @@ func (ps *ProtocolState) AtBlockID(blockID flow.Identifier) protocol.Snapshot { snapshot := new(protocolmock.Snapshot) block, ok := ps.blocks[blockID] if ok { - snapshot.On("Head").Return(block.Header, nil) + snapshot.On("Head").Return(block.ToHeader(), nil) } else { snapshot.On("Head").Return(nil, storage.ErrNotFound) } @@ -101,7 +108,13 @@ func (ps *ProtocolState) AtHeight(height uint64) protocol.Snapshot { snapshot := new(protocolmock.Snapshot) block, ok := ps.heights[height] if ok { - snapshot.On("Head").Return(block.Header, nil) + snapshot.On("Head").Return(block.ToHeader(), nil) + mocked := snapshot.On("Descendants") + mocked.RunFn = func(args mock.Arguments) { + pendings := pending(ps, block.ID()) + mocked.ReturnArguments = mock.Arguments{pendings, nil} + } + } else { snapshot.On("Head").Return(nil, storage.ErrNotFound) } @@ -118,7 +131,7 @@ func (ps *ProtocolState) Final() protocol.Snapshot { } snapshot := new(protocolmock.Snapshot) - snapshot.On("Head").Return(final.Header, nil) + snapshot.On("Head").Return(final.ToHeader(), nil) finalID := final.ID() mocked := snapshot.On("Descendants") mocked.RunFn = func(args mock.Arguments) { @@ -130,6 +143,20 @@ func (ps *ProtocolState) Final() protocol.Snapshot { return snapshot } +func (ps *ProtocolState) Sealed() protocol.Snapshot { + ps.Lock() + defer ps.Unlock() + + sealed, ok := ps.heights[ps.sealed] + if !ok { + return nil + } + + snapshot := new(protocolmock.Snapshot) + snapshot.On("Head").Return(sealed.ToHeader(), nil) + return snapshot +} + func pending(ps *ProtocolState, blockID flow.Identifier) []flow.Identifier { var pendingIDs []flow.Identifier pendingIDs, ok := ps.children[blockID] @@ -158,8 +185,8 @@ func (m *ProtocolState) Bootstrap(root *flow.Block, result *flow.ExecutionResult m.root = root m.result = result m.seal = seal - m.heights[root.Header.Height] = root - m.finalized = root.Header.Height + m.heights[root.Height] = root + m.finalized = root.Height return nil } @@ -172,20 +199,20 @@ func (m *ProtocolState) Extend(block *flow.Block) error { return storage.ErrAlreadyExists } - if _, ok := m.blocks[block.Header.ParentID]; !ok { - return fmt.Errorf("could not retrieve parent") + if _, ok := m.blocks[block.ParentID]; !ok { + return fmt.Errorf("could not retrieve parent %v", block.ParentID) } m.blocks[id] = block // index children - children, ok := m.children[block.Header.ParentID] + children, ok := m.children[block.ParentID] if !ok { children = make([]flow.Identifier, 0) } children = append(children, id) - m.children[block.Header.ParentID] = children + m.children[block.ParentID] = children return nil } @@ -199,22 +226,43 @@ func (m *ProtocolState) Finalize(blockID flow.Identifier) error { return fmt.Errorf("could not retrieve final header") } - if block.Header.Height <= m.finalized { + if block.Height <= m.finalized { return fmt.Errorf("could not finalize old blocks") } // update heights cur := block - for height := cur.Header.Height; height > m.finalized; height-- { - parent, ok := m.blocks[cur.Header.ParentID] + for height := cur.Height; height > m.finalized; height-- { + parent, ok := m.blocks[cur.ParentID] if !ok { - return fmt.Errorf("parent does not exist for block at height: %v, parentID: %v", cur.Header.Height, cur.Header.ParentID) + return fmt.Errorf("parent does not exist for block at height: %v, parentID: %v", cur.Height, cur.ParentID) } m.heights[height] = cur cur = parent } - m.finalized = block.Header.Height + m.finalized = block.Height + + return nil +} + +func (m *ProtocolState) MakeSeal(blockID flow.Identifier) error { + m.Lock() + defer m.Unlock() + + block, ok := m.blocks[blockID] + if !ok { + return fmt.Errorf("could not retrieve final header") + } + + if block.Height <= m.sealed { + return fmt.Errorf("could not seal old blocks") + } + + if block.Height >= m.finalized { + return fmt.Errorf("incorrect sealed height sealed %v, finalized %v", block.Height, m.finalized) + } + m.sealed = block.Height return nil } diff --git a/utils/unittest/network/conduit.go b/utils/unittest/network/conduit.go index 5ce87ee1de6..e4a60acc155 100644 --- a/utils/unittest/network/conduit.go +++ b/utils/unittest/network/conduit.go @@ -4,7 +4,7 @@ import ( "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/network" "github.com/onflow/flow-go/network/channels" - "github.com/onflow/flow-go/network/mocknetwork" + mocknetwork "github.com/onflow/flow-go/network/mock" ) type Conduit struct { diff --git a/utils/unittest/network/network.go b/utils/unittest/network/network.go index 369e014f52a..324d3c82050 100644 --- a/utils/unittest/network/network.go +++ b/utils/unittest/network/network.go @@ -8,7 +8,7 @@ import ( "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/network" "github.com/onflow/flow-go/network/channels" - "github.com/onflow/flow-go/network/mocknetwork" + mocknetwork "github.com/onflow/flow-go/network/mock" ) type EngineProcessFunc func(channels.Channel, flow.Identifier, interface{}) error @@ -18,20 +18,20 @@ type PublishFunc func(channels.Channel, interface{}, ...flow.Identifier) error // Network represents a mock network. The implementation is not concurrency-safe. type Network struct { - mocknetwork.Network + mocknetwork.EngineRegistry conduits map[channels.Channel]*Conduit engines map[channels.Channel]network.MessageProcessor publishFunc PublishFunc } -var _ network.Network = (*Network)(nil) +var _ network.EngineRegistry = (*Network)(nil) // NewNetwork returns a new mock network. func NewNetwork() *Network { return &Network{ - Network: mocknetwork.Network{}, - conduits: make(map[channels.Channel]*Conduit), - engines: make(map[channels.Channel]network.MessageProcessor), + EngineRegistry: mocknetwork.EngineRegistry{}, + conduits: make(map[channels.Channel]*Conduit), + engines: make(map[channels.Channel]network.MessageProcessor), } } diff --git a/utils/unittest/protected_map.go b/utils/unittest/protected_map.go deleted file mode 100644 index f0b4a65ad92..00000000000 --- a/utils/unittest/protected_map.go +++ /dev/null @@ -1,59 +0,0 @@ -package unittest - -import "sync" - -// ProtectedMap is a thread-safe map. -type ProtectedMap[K comparable, V any] struct { - mu sync.RWMutex - m map[K]V -} - -// NewProtectedMap returns a new ProtectedMap with the given types -func NewProtectedMap[K comparable, V any]() *ProtectedMap[K, V] { - return &ProtectedMap[K, V]{ - m: make(map[K]V), - } -} - -// Add adds a key-value pair to the map -func (p *ProtectedMap[K, V]) Add(key K, value V) { - p.mu.Lock() - defer p.mu.Unlock() - p.m[key] = value -} - -// Remove removes a key-value pair from the map -func (p *ProtectedMap[K, V]) Remove(key K) { - p.mu.Lock() - defer p.mu.Unlock() - delete(p.m, key) -} - -// Has returns true if the map contains the given key -func (p *ProtectedMap[K, V]) Has(key K) bool { - p.mu.RLock() - defer p.mu.RUnlock() - _, ok := p.m[key] - return ok -} - -// Get returns the value for the given key and a boolean indicating if the key was found -func (p *ProtectedMap[K, V]) Get(key K) (V, bool) { - p.mu.RLock() - defer p.mu.RUnlock() - value, ok := p.m[key] - return value, ok -} - -// ForEach iterates over the map and calls the given function for each key-value pair. -// If the function returns an error, the iteration is stopped and the error is returned. -func (p *ProtectedMap[K, V]) ForEach(fn func(k K, v V) error) error { - p.mu.RLock() - defer p.mu.RUnlock() - for k, v := range p.m { - if err := fn(k, v); err != nil { - return err - } - } - return nil -} diff --git a/utils/unittest/protocol_state.go b/utils/unittest/protocol_state.go index f5dbcb88073..027e8276f55 100644 --- a/utils/unittest/protocol_state.go +++ b/utils/unittest/protocol_state.go @@ -10,26 +10,27 @@ import ( "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/state/protocol" mockprotocol "github.com/onflow/flow-go/state/protocol/mock" + "github.com/onflow/flow-go/storage/deferred" ) // FinalizedProtocolStateWithParticipants returns a protocol state with finalized participants func FinalizedProtocolStateWithParticipants(participants flow.IdentityList) ( *flow.Block, *mockprotocol.Snapshot, *mockprotocol.State, *mockprotocol.Snapshot) { sealed := BlockFixture() - block := BlockWithParentFixture(sealed.Header) - head := block.Header + block := BlockWithParentFixture(sealed.ToHeader()) + head := block.ToHeader() // set up protocol snapshot mock snapshot := &mockprotocol.Snapshot{} snapshot.On("Identities", mock.Anything).Return( - func(filter flow.IdentityFilter) flow.IdentityList { + func(filter flow.IdentityFilter[flow.Identity]) flow.IdentityList { return participants.Filter(filter) }, nil, ) snapshot.On("Identity", mock.Anything).Return(func(id flow.Identifier) *flow.Identity { for _, n := range participants { - if n.ID() == id { + if n.NodeID == id { return n } } @@ -45,7 +46,7 @@ func FinalizedProtocolStateWithParticipants(participants flow.IdentityList) ( sealedSnapshot := &mockprotocol.Snapshot{} sealedSnapshot.On("Head").Return( func() *flow.Header { - return sealed.Header + return sealed.ToHeader() }, nil, ) @@ -73,29 +74,43 @@ func FinalizedProtocolStateWithParticipants(participants flow.IdentityList) ( // a receipt for the block (BR), the second (BS) containing a seal for the block. // B <- BR(Result_B) <- BS(Seal_B) // Returns the two generated blocks. -func SealBlock(t *testing.T, st protocol.ParticipantState, block *flow.Block, receipt *flow.ExecutionReceipt, seal *flow.Seal) (br *flow.Header, bs *flow.Header) { +func SealBlock(t *testing.T, st protocol.ParticipantState, mutableProtocolState protocol.MutableProtocolState, block *flow.Block, receipt *flow.ExecutionReceipt, seal *flow.Seal) (br *flow.Block, bs *flow.Block) { + block2 := BlockWithParentAndPayload( + block.ToHeader(), + flow.Payload{ + Receipts: []*flow.ExecutionReceiptStub{receipt.Stub()}, + Results: []*flow.ExecutionResult{&receipt.ExecutionResult}, + ProtocolStateID: block.Payload.ProtocolStateID, + }, + ) + err := st.Extend(context.Background(), ProposalFromBlock(block2)) + require.NoError(t, err) - block2 := BlockWithParentFixture(block.Header) - block2.SetPayload(flow.Payload{ - Receipts: []*flow.ExecutionReceiptMeta{receipt.Meta()}, - Results: []*flow.ExecutionResult{&receipt.ExecutionResult}, - }) - err := st.Extend(context.Background(), block2) + seals := []*flow.Seal{seal} + + dbUpdates := deferred.NewDeferredBlockPersist() + block3View := block2.View + 1 + updatedStateId, err := mutableProtocolState.EvolveState(dbUpdates, block2.ID(), block3View, seals) require.NoError(t, err) + require.False(t, dbUpdates.IsEmpty()) - block3 := BlockWithParentFixture(block2.Header) - block3.SetPayload(flow.Payload{ - Seals: []*flow.Seal{seal}, - }) - err = st.Extend(context.Background(), block3) + block3 := BlockFixture( + Block.WithParent(block2.ID(), block2.View, block2.Height), + Block.WithPayload( + flow.Payload{ + Seals: seals, + ProtocolStateID: updatedStateId, + }), + ) + err = st.Extend(context.Background(), ProposalFromBlock(block3)) require.NoError(t, err) - return block2.Header, block3.Header + return block2, block3 } // InsertAndFinalize inserts, then finalizes, the input block. func InsertAndFinalize(t *testing.T, st protocol.ParticipantState, block *flow.Block) { - err := st.Extend(context.Background(), block) + err := st.Extend(context.Background(), ProposalFromBlock(block)) require.NoError(t, err) err = st.Finalize(context.Background(), block.ID()) require.NoError(t, err) diff --git a/utils/unittest/service_events_fixtures.go b/utils/unittest/service_events_fixtures.go index 7888fe0a494..aa3f78a92cf 100644 --- a/utils/unittest/service_events_fixtures.go +++ b/utils/unittest/service_events_fixtures.go @@ -1,30 +1,28 @@ package unittest import ( - "github.com/onflow/flow-go/crypto" + "encoding/hex" + + "github.com/onflow/cadence" + "github.com/onflow/cadence/common" + "github.com/onflow/cadence/encoding/ccf" + "github.com/onflow/crypto" + "github.com/onflow/flow-go/fvm/systemcontracts" "github.com/onflow/flow-go/model/flow" ) // This file contains service event fixtures for testing purposes. -// The Cadence form is represented by JSON-CDC-encoded string variables. // EpochSetupFixtureByChainID returns an EpochSetup service event as a Cadence event // representation and as a protocol model representation. func EpochSetupFixtureByChainID(chain flow.ChainID) (flow.Event, *flow.EpochSetup) { - events, err := systemcontracts.ServiceEventsForChain(chain) - if err != nil { - panic(err) - } - - event := EventFixture(events.EpochSetup.EventType(), 1, 1, IdentifierFixture(), 0) - event.Payload = []byte(EpochSetupFixtureJSON) - - // randomSource is [0,0,...,1,2,3,4] - randomSource := make([]uint8, flow.EpochSetupRandomSourceLength) - for i := 0; i < 4; i++ { - randomSource[flow.EpochSetupRandomSourceLength-1-i] = uint8(4 - i) - } + events := systemcontracts.ServiceEventsForChain(chain) + randomSource := EpochSetupRandomSourceFixture() + event := EventFixture( + Event.WithEventType(events.EpochSetup.EventType()), + Event.WithPayload(EpochSetupFixtureCCF(randomSource)), + ) expected := &flow.EpochSetup{ Counter: 1, @@ -34,6 +32,8 @@ func EpochSetupFixtureByChainID(chain flow.ChainID) (flow.Event, *flow.EpochSetu DKGPhase2FinalView: 160, DKGPhase3FinalView: 170, RandomSource: randomSource, + TargetDuration: 200, + TargetEndTime: 2000000000, Assignments: flow.AssignmentList{ { flow.MustHexStringToIdentifier("0000000000000000000000000000000000000000000000000000000000000001"), @@ -44,14 +44,14 @@ func EpochSetupFixtureByChainID(chain flow.ChainID) (flow.Event, *flow.EpochSetu flow.MustHexStringToIdentifier("0000000000000000000000000000000000000000000000000000000000000004"), }, }, - Participants: flow.IdentityList{ + Participants: flow.IdentitySkeletonList{ { Role: flow.RoleCollection, NodeID: flow.MustHexStringToIdentifier("0000000000000000000000000000000000000000000000000000000000000001"), Address: "1.flow.com", NetworkPubKey: MustDecodePublicKeyHex(crypto.ECDSAP256, "378dbf45d85c614feb10d8bd4f78f4b6ef8eec7d987b937e123255444657fb3da031f232a507e323df3a6f6b8f50339c51d188e80c0e7a92420945cc6ca893fc"), StakingPubKey: MustDecodePublicKeyHex(crypto.BLSBLS12381, "af4aade26d76bb2ab15dcc89adcef82a51f6f04b3cb5f4555214b40ec89813c7a5f95776ea4fe449de48166d0bbc59b919b7eabebaac9614cf6f9461fac257765415f4d8ef1376a2365ec9960121888ea5383d88a140c24c29962b0a14e4e4e7"), - Weight: 100, + InitialWeight: 100, }, { Role: flow.RoleCollection, @@ -59,7 +59,7 @@ func EpochSetupFixtureByChainID(chain flow.ChainID) (flow.Event, *flow.EpochSetu Address: "2.flow.com", NetworkPubKey: MustDecodePublicKeyHex(crypto.ECDSAP256, "378dbf45d85c614feb10d8bd4f78f4b6ef8eec7d987b937e123255444657fb3da031f232a507e323df3a6f6b8f50339c51d188e80c0e7a92420945cc6ca893fc"), StakingPubKey: MustDecodePublicKeyHex(crypto.BLSBLS12381, "af4aade26d76bb2ab15dcc89adcef82a51f6f04b3cb5f4555214b40ec89813c7a5f95776ea4fe449de48166d0bbc59b919b7eabebaac9614cf6f9461fac257765415f4d8ef1376a2365ec9960121888ea5383d88a140c24c29962b0a14e4e4e7"), - Weight: 100, + InitialWeight: 100, }, { Role: flow.RoleCollection, @@ -67,7 +67,7 @@ func EpochSetupFixtureByChainID(chain flow.ChainID) (flow.Event, *flow.EpochSetu Address: "3.flow.com", NetworkPubKey: MustDecodePublicKeyHex(crypto.ECDSAP256, "378dbf45d85c614feb10d8bd4f78f4b6ef8eec7d987b937e123255444657fb3da031f232a507e323df3a6f6b8f50339c51d188e80c0e7a92420945cc6ca893fc"), StakingPubKey: MustDecodePublicKeyHex(crypto.BLSBLS12381, "af4aade26d76bb2ab15dcc89adcef82a51f6f04b3cb5f4555214b40ec89813c7a5f95776ea4fe449de48166d0bbc59b919b7eabebaac9614cf6f9461fac257765415f4d8ef1376a2365ec9960121888ea5383d88a140c24c29962b0a14e4e4e7"), - Weight: 100, + InitialWeight: 100, }, { Role: flow.RoleCollection, @@ -75,7 +75,7 @@ func EpochSetupFixtureByChainID(chain flow.ChainID) (flow.Event, *flow.EpochSetu Address: "4.flow.com", NetworkPubKey: MustDecodePublicKeyHex(crypto.ECDSAP256, "378dbf45d85c614feb10d8bd4f78f4b6ef8eec7d987b937e123255444657fb3da031f232a507e323df3a6f6b8f50339c51d188e80c0e7a92420945cc6ca893fc"), StakingPubKey: MustDecodePublicKeyHex(crypto.BLSBLS12381, "af4aade26d76bb2ab15dcc89adcef82a51f6f04b3cb5f4555214b40ec89813c7a5f95776ea4fe449de48166d0bbc59b919b7eabebaac9614cf6f9461fac257765415f4d8ef1376a2365ec9960121888ea5383d88a140c24c29962b0a14e4e4e7"), - Weight: 100, + InitialWeight: 100, }, { Role: flow.RoleConsensus, @@ -83,7 +83,7 @@ func EpochSetupFixtureByChainID(chain flow.ChainID) (flow.Event, *flow.EpochSetu Address: "11.flow.com", NetworkPubKey: MustDecodePublicKeyHex(crypto.ECDSAP256, "cfdfe8e4362c8f79d11772cb7277ab16e5033a63e8dd5d34caf1b041b77e5b2d63c2072260949ccf8907486e4cfc733c8c42ca0e4e208f30470b0d950856cd47"), StakingPubKey: MustDecodePublicKeyHex(crypto.BLSBLS12381, "8207559cd7136af378bba53a8f0196dee3849a3ab02897c1995c3e3f6ca0c4a776c3ae869d1ddbb473090054be2400ad06d7910aa2c5d1780220fdf3765a3c1764bce10c6fe66a5a2be51a422e878518bd750424bb56b8a0ecf0f8ad2057e83f"), - Weight: 100, + InitialWeight: 100, }, { Role: flow.RoleExecution, @@ -91,7 +91,7 @@ func EpochSetupFixtureByChainID(chain flow.ChainID) (flow.Event, *flow.EpochSetu Address: "21.flow.com", NetworkPubKey: MustDecodePublicKeyHex(crypto.ECDSAP256, "d64318ba0dbf68f3788fc81c41d507c5822bf53154530673127c66f50fe4469ccf1a054a868a9f88506a8999f2386d86fcd2b901779718cba4fb53c2da258f9e"), StakingPubKey: MustDecodePublicKeyHex(crypto.BLSBLS12381, "880b162b7ec138b36af401d07868cb08d25746d905395edbb4625bdf105d4bb2b2f4b0f4ae273a296a6efefa7ce9ccb914e39947ce0e83745125cab05d62516076ff0173ed472d3791ccef937597c9ea12381d76f547a092a4981d77ff3fba83"), - Weight: 100, + InitialWeight: 100, }, { Role: flow.RoleVerification, @@ -99,7 +99,7 @@ func EpochSetupFixtureByChainID(chain flow.ChainID) (flow.Event, *flow.EpochSetu Address: "31.flow.com", NetworkPubKey: MustDecodePublicKeyHex(crypto.ECDSAP256, "697241208dcc9142b6f53064adc8ff1c95760c68beb2ba083c1d005d40181fd7a1b113274e0163c053a3addd47cd528ec6a1f190cf465aac87c415feaae011ae"), StakingPubKey: MustDecodePublicKeyHex(crypto.BLSBLS12381, "b1f97d0a06020eca97352e1adde72270ee713c7daf58da7e74bf72235321048b4841bdfc28227964bf18e371e266e32107d238358848bcc5d0977a0db4bda0b4c33d3874ff991e595e0f537c7b87b4ddce92038ebc7b295c9ea20a1492302aa7"), - Weight: 100, + InitialWeight: 100, }, }, } @@ -107,17 +107,15 @@ func EpochSetupFixtureByChainID(chain flow.ChainID) (flow.Event, *flow.EpochSetu return event, expected } -// EpochCommitFixture returns an EpochCommit service event as a Cadence event +// EpochCommitFixtureByChainID returns an EpochCommit service event as a Cadence event // representation and as a protocol model representation. func EpochCommitFixtureByChainID(chain flow.ChainID) (flow.Event, *flow.EpochCommit) { + events := systemcontracts.ServiceEventsForChain(chain) - events, err := systemcontracts.ServiceEventsForChain(chain) - if err != nil { - panic(err) - } - - event := EventFixture(events.EpochCommit.EventType(), 1, 1, IdentifierFixture(), 0) - event.Payload = []byte(EpochCommitFixtureJSON) + event := EventFixture( + Event.WithEventType(events.EpochCommit.EventType()), + Event.WithPayload(EpochCommitFixtureCCF), + ) expected := &flow.EpochCommit{ Counter: 1, @@ -141,28 +139,151 @@ func EpochCommitFixtureByChainID(chain flow.ChainID) (flow.Event, *flow.EpochCom DKGParticipantKeys: []crypto.PublicKey{ MustDecodePublicKeyHex(crypto.BLSBLS12381, "87a339e4e5c74f089da20a33f515d8c8f4464ab53ede5a74aa2432cd1ae66d522da0c122249ee176cd747ddc83ca81090498389384201614caf51eac392c1c0a916dfdcfbbdf7363f9552b6468434add3d3f6dc91a92bbe3ee368b59b7828488"), }, + DKGIndexMap: flow.DKGIndexMap{ + flow.MustHexStringToIdentifier("0000000000000000000000000000000000000000000000000000000000000011"): 0, + }, } return event, expected } -// VersionBeaconFixtureByChainID returns a VersionTable service event as a Cadence event +// EpochRecoverFixtureByChainID returns an EpochRecover service event as a Cadence event // representation and as a protocol model representation. -func VersionBeaconFixtureByChainID(chain flow.ChainID) (flow.Event, *flow.VersionBeacon) { +func EpochRecoverFixtureByChainID(chain flow.ChainID) (flow.Event, *flow.EpochRecover) { + events := systemcontracts.ServiceEventsForChain(chain) + randomSource := EpochSetupRandomSourceFixture() - events, err := systemcontracts.ServiceEventsForChain(chain) - if err != nil { - panic(err) + event := EventFixture( + Event.WithEventType(events.EpochRecover.EventType()), + Event.WithPayload(EpochRecoverFixtureCCF(randomSource)), + ) + + expected := &flow.EpochRecover{ + EpochSetup: flow.EpochSetup{ + Counter: 1, + FirstView: 100, + FinalView: 200, + DKGPhase1FinalView: 150, + DKGPhase2FinalView: 160, + DKGPhase3FinalView: 170, + RandomSource: randomSource, + TargetDuration: 200, + TargetEndTime: 2000000000, + Assignments: flow.AssignmentList{ + { + flow.MustHexStringToIdentifier("0000000000000000000000000000000000000000000000000000000000000001"), + flow.MustHexStringToIdentifier("0000000000000000000000000000000000000000000000000000000000000002"), + }, + { + flow.MustHexStringToIdentifier("0000000000000000000000000000000000000000000000000000000000000003"), + flow.MustHexStringToIdentifier("0000000000000000000000000000000000000000000000000000000000000004"), + }, + }, + Participants: flow.IdentitySkeletonList{ + { + Role: flow.RoleCollection, + NodeID: flow.MustHexStringToIdentifier("0000000000000000000000000000000000000000000000000000000000000001"), + Address: "1.flow.com", + NetworkPubKey: MustDecodePublicKeyHex(crypto.ECDSAP256, "378dbf45d85c614feb10d8bd4f78f4b6ef8eec7d987b937e123255444657fb3da031f232a507e323df3a6f6b8f50339c51d188e80c0e7a92420945cc6ca893fc"), + StakingPubKey: MustDecodePublicKeyHex(crypto.BLSBLS12381, "af4aade26d76bb2ab15dcc89adcef82a51f6f04b3cb5f4555214b40ec89813c7a5f95776ea4fe449de48166d0bbc59b919b7eabebaac9614cf6f9461fac257765415f4d8ef1376a2365ec9960121888ea5383d88a140c24c29962b0a14e4e4e7"), + InitialWeight: 100, + }, + { + Role: flow.RoleCollection, + NodeID: flow.MustHexStringToIdentifier("0000000000000000000000000000000000000000000000000000000000000002"), + Address: "2.flow.com", + NetworkPubKey: MustDecodePublicKeyHex(crypto.ECDSAP256, "378dbf45d85c614feb10d8bd4f78f4b6ef8eec7d987b937e123255444657fb3da031f232a507e323df3a6f6b8f50339c51d188e80c0e7a92420945cc6ca893fc"), + StakingPubKey: MustDecodePublicKeyHex(crypto.BLSBLS12381, "af4aade26d76bb2ab15dcc89adcef82a51f6f04b3cb5f4555214b40ec89813c7a5f95776ea4fe449de48166d0bbc59b919b7eabebaac9614cf6f9461fac257765415f4d8ef1376a2365ec9960121888ea5383d88a140c24c29962b0a14e4e4e7"), + InitialWeight: 100, + }, + { + Role: flow.RoleCollection, + NodeID: flow.MustHexStringToIdentifier("0000000000000000000000000000000000000000000000000000000000000003"), + Address: "3.flow.com", + NetworkPubKey: MustDecodePublicKeyHex(crypto.ECDSAP256, "378dbf45d85c614feb10d8bd4f78f4b6ef8eec7d987b937e123255444657fb3da031f232a507e323df3a6f6b8f50339c51d188e80c0e7a92420945cc6ca893fc"), + StakingPubKey: MustDecodePublicKeyHex(crypto.BLSBLS12381, "af4aade26d76bb2ab15dcc89adcef82a51f6f04b3cb5f4555214b40ec89813c7a5f95776ea4fe449de48166d0bbc59b919b7eabebaac9614cf6f9461fac257765415f4d8ef1376a2365ec9960121888ea5383d88a140c24c29962b0a14e4e4e7"), + InitialWeight: 100, + }, + { + Role: flow.RoleCollection, + NodeID: flow.MustHexStringToIdentifier("0000000000000000000000000000000000000000000000000000000000000004"), + Address: "4.flow.com", + NetworkPubKey: MustDecodePublicKeyHex(crypto.ECDSAP256, "378dbf45d85c614feb10d8bd4f78f4b6ef8eec7d987b937e123255444657fb3da031f232a507e323df3a6f6b8f50339c51d188e80c0e7a92420945cc6ca893fc"), + StakingPubKey: MustDecodePublicKeyHex(crypto.BLSBLS12381, "af4aade26d76bb2ab15dcc89adcef82a51f6f04b3cb5f4555214b40ec89813c7a5f95776ea4fe449de48166d0bbc59b919b7eabebaac9614cf6f9461fac257765415f4d8ef1376a2365ec9960121888ea5383d88a140c24c29962b0a14e4e4e7"), + InitialWeight: 100, + }, + { + Role: flow.RoleConsensus, + NodeID: flow.MustHexStringToIdentifier("0000000000000000000000000000000000000000000000000000000000000011"), + Address: "11.flow.com", + NetworkPubKey: MustDecodePublicKeyHex(crypto.ECDSAP256, "cfdfe8e4362c8f79d11772cb7277ab16e5033a63e8dd5d34caf1b041b77e5b2d63c2072260949ccf8907486e4cfc733c8c42ca0e4e208f30470b0d950856cd47"), + StakingPubKey: MustDecodePublicKeyHex(crypto.BLSBLS12381, "8207559cd7136af378bba53a8f0196dee3849a3ab02897c1995c3e3f6ca0c4a776c3ae869d1ddbb473090054be2400ad06d7910aa2c5d1780220fdf3765a3c1764bce10c6fe66a5a2be51a422e878518bd750424bb56b8a0ecf0f8ad2057e83f"), + InitialWeight: 100, + }, + { + Role: flow.RoleExecution, + NodeID: flow.MustHexStringToIdentifier("0000000000000000000000000000000000000000000000000000000000000021"), + Address: "21.flow.com", + NetworkPubKey: MustDecodePublicKeyHex(crypto.ECDSAP256, "d64318ba0dbf68f3788fc81c41d507c5822bf53154530673127c66f50fe4469ccf1a054a868a9f88506a8999f2386d86fcd2b901779718cba4fb53c2da258f9e"), + StakingPubKey: MustDecodePublicKeyHex(crypto.BLSBLS12381, "880b162b7ec138b36af401d07868cb08d25746d905395edbb4625bdf105d4bb2b2f4b0f4ae273a296a6efefa7ce9ccb914e39947ce0e83745125cab05d62516076ff0173ed472d3791ccef937597c9ea12381d76f547a092a4981d77ff3fba83"), + InitialWeight: 100, + }, + { + Role: flow.RoleVerification, + NodeID: flow.MustHexStringToIdentifier("0000000000000000000000000000000000000000000000000000000000000031"), + Address: "31.flow.com", + NetworkPubKey: MustDecodePublicKeyHex(crypto.ECDSAP256, "697241208dcc9142b6f53064adc8ff1c95760c68beb2ba083c1d005d40181fd7a1b113274e0163c053a3addd47cd528ec6a1f190cf465aac87c415feaae011ae"), + StakingPubKey: MustDecodePublicKeyHex(crypto.BLSBLS12381, "b1f97d0a06020eca97352e1adde72270ee713c7daf58da7e74bf72235321048b4841bdfc28227964bf18e371e266e32107d238358848bcc5d0977a0db4bda0b4c33d3874ff991e595e0f537c7b87b4ddce92038ebc7b295c9ea20a1492302aa7"), + InitialWeight: 100, + }, + }, + }, + EpochCommit: flow.EpochCommit{ + Counter: 1, + ClusterQCs: []flow.ClusterQCVoteData{ + { + VoterIDs: []flow.Identifier{ + flow.MustHexStringToIdentifier("0000000000000000000000000000000000000000000000000000000000000001"), + flow.MustHexStringToIdentifier("0000000000000000000000000000000000000000000000000000000000000002"), + }, + SigData: MustDecodeSignatureHex("b072ed22ed305acd44818a6c836e09b4e844eebde6a4fdbf5cec983e2872b86c8b0f6c34c0777bf52e385ab7c45dc55d"), + }, + { + VoterIDs: []flow.Identifier{ + flow.MustHexStringToIdentifier("0000000000000000000000000000000000000000000000000000000000000003"), + flow.MustHexStringToIdentifier("0000000000000000000000000000000000000000000000000000000000000004"), + }, + SigData: MustDecodeSignatureHex("899e266a543e1b3a564f68b22f7be571f2e944ec30fadc4b39e2d5f526ba044c0f3cb2648f8334fc216fa3360a0418b2"), + }, + }, + DKGGroupKey: MustDecodePublicKeyHex(crypto.BLSBLS12381, "8c588266db5f5cda629e83f8aa04ae9413593fac19e4865d06d291c9d14fbdd9bdb86a7a12f9ef8590c79cb635e3163315d193087e9336092987150d0cd2b14ac6365f7dc93eec573752108b8c12368abb65f0652d9f644e5aed611c37926950"), + DKGParticipantKeys: []crypto.PublicKey{ + MustDecodePublicKeyHex(crypto.BLSBLS12381, "87a339e4e5c74f089da20a33f515d8c8f4464ab53ede5a74aa2432cd1ae66d522da0c122249ee176cd747ddc83ca81090498389384201614caf51eac392c1c0a916dfdcfbbdf7363f9552b6468434add3d3f6dc91a92bbe3ee368b59b7828488"), + }, + DKGIndexMap: flow.DKGIndexMap{ + flow.MustHexStringToIdentifier("0000000000000000000000000000000000000000000000000000000000000011"): 0, + }, + }, } - event := EventFixture(events.VersionBeacon.EventType(), 1, 1, IdentifierFixture(), 0) - event.Payload = []byte(VersionBeaconFixtureJSON) + return event, expected +} + +// VersionBeaconFixtureByChainID returns a VersionTable service event as a Cadence event +// representation and as a protocol model representation. +func VersionBeaconFixtureByChainID(chain flow.ChainID) (flow.Event, *flow.VersionBeacon) { + events := systemcontracts.ServiceEventsForChain(chain) + + event := EventFixture( + Event.WithEventType(events.VersionBeacon.EventType()), + Event.WithPayload(VersionBeaconFixtureCCF), + ) expected := &flow.VersionBeacon{ VersionBoundaries: []flow.VersionBoundary{ { BlockHeight: 44, - Version: "2.13.7", + Version: "2.13.7-test", }, }, Sequence: 5, @@ -171,1169 +292,1223 @@ func VersionBeaconFixtureByChainID(chain flow.ChainID) (flow.Event, *flow.Versio return event, expected } -var EpochSetupFixtureJSON = ` -{ - "type": "Event", - "value": { - "id": "A.01cf0e2f2f715450.FlowEpoch.EpochSetup", - "fields": [ - { - "name": "counter", - "value": { - "type": "UInt64", - "value": "1" - } - }, - { - "name": "nodeInfo", - "value": { - "type": "Array", - "value": [ - { - "type": "Struct", - "value": { - "id": "A.01cf0e2f2f715450.FlowIDTableStaking.NodeInfo", - "fields": [ - { - "name": "id", - "value": { - "type": "String", - "value": "0000000000000000000000000000000000000000000000000000000000000001" - } - }, - { - "name": "role", - "value": { - "type": "UInt8", - "value": "1" - } - }, - { - "name": "networkingAddress", - "value": { - "type": "String", - "value": "1.flow.com" - } - }, - { - "name": "networkingKey", - "value": { - "type": "String", - "value": "378dbf45d85c614feb10d8bd4f78f4b6ef8eec7d987b937e123255444657fb3da031f232a507e323df3a6f6b8f50339c51d188e80c0e7a92420945cc6ca893fc" - } - }, - { - "name": "stakingKey", - "value": { - "type": "String", - "value": "af4aade26d76bb2ab15dcc89adcef82a51f6f04b3cb5f4555214b40ec89813c7a5f95776ea4fe449de48166d0bbc59b919b7eabebaac9614cf6f9461fac257765415f4d8ef1376a2365ec9960121888ea5383d88a140c24c29962b0a14e4e4e7" - } - }, - { - "name": "tokensStaked", - "value": { - "type": "UFix64", - "value": "0.00000000" - } - }, - { - "name": "tokensCommitted", - "value": { - "type": "UFix64", - "value": "1350000.00000000" - } - }, - { - "name": "tokensUnstaking", - "value": { - "type": "UFix64", - "value": "0.00000000" - } - }, - { - "name": "tokensUnstaked", - "value": { - "type": "UFix64", - "value": "0.00000000" - } - }, - { - "name": "tokensRewarded", - "value": { - "type": "UFix64", - "value": "0.00000000" - } - }, - { - "name": "delegators", - "value": { - "type": "Array", - "value": [] - } - }, - { - "name": "delegatorIDCounter", - "value": { - "type": "UInt32", - "value": "0" - } - }, - { - "name": "tokensRequestedToUnstake", - "value": { - "type": "UFix64", - "value": "0.00000000" - } - }, - { - "name": "initialWeight", - "value": { - "type": "UInt64", - "value": "100" - } - } - ] - } - }, - { - "type": "Struct", - "value": { - "id": "A.01cf0e2f2f715450.FlowIDTableStaking.NodeInfo", - "fields": [ - { - "name": "id", - "value": { - "type": "String", - "value": "0000000000000000000000000000000000000000000000000000000000000002" - } - }, - { - "name": "role", - "value": { - "type": "UInt8", - "value": "1" - } - }, - { - "name": "networkingAddress", - "value": { - "type": "String", - "value": "2.flow.com" - } - }, - { - "name": "networkingKey", - "value": { - "type": "String", - "value": "378dbf45d85c614feb10d8bd4f78f4b6ef8eec7d987b937e123255444657fb3da031f232a507e323df3a6f6b8f50339c51d188e80c0e7a92420945cc6ca893fc" - } - }, - { - "name": "stakingKey", - "value": { - "type": "String", - "value": "af4aade26d76bb2ab15dcc89adcef82a51f6f04b3cb5f4555214b40ec89813c7a5f95776ea4fe449de48166d0bbc59b919b7eabebaac9614cf6f9461fac257765415f4d8ef1376a2365ec9960121888ea5383d88a140c24c29962b0a14e4e4e7" - } - }, - { - "name": "tokensStaked", - "value": { - "type": "UFix64", - "value": "0.00000000" - } - }, - { - "name": "tokensCommitted", - "value": { - "type": "UFix64", - "value": "1350000.00000000" - } - }, - { - "name": "tokensUnstaking", - "value": { - "type": "UFix64", - "value": "0.00000000" - } - }, - { - "name": "tokensUnstaked", - "value": { - "type": "UFix64", - "value": "0.00000000" - } - }, - { - "name": "tokensRewarded", - "value": { - "type": "UFix64", - "value": "0.00000000" - } - }, - { - "name": "delegators", - "value": { - "type": "Array", - "value": [] - } - }, - { - "name": "delegatorIDCounter", - "value": { - "type": "UInt32", - "value": "0" - } - }, - { - "name": "tokensRequestedToUnstake", - "value": { - "type": "UFix64", - "value": "0.00000000" - } - }, - { - "name": "initialWeight", - "value": { - "type": "UInt64", - "value": "100" - } - } - ] - } - }, - { - "type": "Struct", - "value": { - "id": "A.01cf0e2f2f715450.FlowIDTableStaking.NodeInfo", - "fields": [ - { - "name": "id", - "value": { - "type": "String", - "value": "0000000000000000000000000000000000000000000000000000000000000003" - } - }, - { - "name": "role", - "value": { - "type": "UInt8", - "value": "1" - } - }, - { - "name": "networkingAddress", - "value": { - "type": "String", - "value": "3.flow.com" - } - }, - { - "name": "networkingKey", - "value": { - "type": "String", - "value": "378dbf45d85c614feb10d8bd4f78f4b6ef8eec7d987b937e123255444657fb3da031f232a507e323df3a6f6b8f50339c51d188e80c0e7a92420945cc6ca893fc" - } - }, - { - "name": "stakingKey", - "value": { - "type": "String", - "value": "af4aade26d76bb2ab15dcc89adcef82a51f6f04b3cb5f4555214b40ec89813c7a5f95776ea4fe449de48166d0bbc59b919b7eabebaac9614cf6f9461fac257765415f4d8ef1376a2365ec9960121888ea5383d88a140c24c29962b0a14e4e4e7" - } - }, - { - "name": "tokensStaked", - "value": { - "type": "UFix64", - "value": "0.00000000" - } - }, - { - "name": "tokensCommitted", - "value": { - "type": "UFix64", - "value": "1350000.00000000" - } - }, - { - "name": "tokensUnstaking", - "value": { - "type": "UFix64", - "value": "0.00000000" - } - }, - { - "name": "tokensUnstaked", - "value": { - "type": "UFix64", - "value": "0.00000000" - } - }, - { - "name": "tokensRewarded", - "value": { - "type": "UFix64", - "value": "0.00000000" - } - }, - { - "name": "delegators", - "value": { - "type": "Array", - "value": [] - } - }, - { - "name": "delegatorIDCounter", - "value": { - "type": "UInt32", - "value": "0" - } - }, - { - "name": "tokensRequestedToUnstake", - "value": { - "type": "UFix64", - "value": "0.00000000" - } - }, - { - "name": "initialWeight", - "value": { - "type": "UInt64", - "value": "100" - } - } - ] - } - }, - { - "type": "Struct", - "value": { - "id": "A.01cf0e2f2f715450.FlowIDTableStaking.NodeInfo", - "fields": [ - { - "name": "id", - "value": { - "type": "String", - "value": "0000000000000000000000000000000000000000000000000000000000000004" - } - }, - { - "name": "role", - "value": { - "type": "UInt8", - "value": "1" - } - }, - { - "name": "networkingAddress", - "value": { - "type": "String", - "value": "4.flow.com" - } - }, - { - "name": "networkingKey", - "value": { - "type": "String", - "value": "378dbf45d85c614feb10d8bd4f78f4b6ef8eec7d987b937e123255444657fb3da031f232a507e323df3a6f6b8f50339c51d188e80c0e7a92420945cc6ca893fc" - } - }, - { - "name": "stakingKey", - "value": { - "type": "String", - "value": "af4aade26d76bb2ab15dcc89adcef82a51f6f04b3cb5f4555214b40ec89813c7a5f95776ea4fe449de48166d0bbc59b919b7eabebaac9614cf6f9461fac257765415f4d8ef1376a2365ec9960121888ea5383d88a140c24c29962b0a14e4e4e7" - } - }, - { - "name": "tokensStaked", - "value": { - "type": "UFix64", - "value": "0.00000000" - } - }, - { - "name": "tokensCommitted", - "value": { - "type": "UFix64", - "value": "1350000.00000000" - } - }, - { - "name": "tokensUnstaking", - "value": { - "type": "UFix64", - "value": "0.00000000" - } - }, - { - "name": "tokensUnstaked", - "value": { - "type": "UFix64", - "value": "0.00000000" - } - }, - { - "name": "tokensRewarded", - "value": { - "type": "UFix64", - "value": "0.00000000" - } - }, - { - "name": "delegators", - "value": { - "type": "Array", - "value": [] - } - }, - { - "name": "delegatorIDCounter", - "value": { - "type": "UInt32", - "value": "0" - } - }, - { - "name": "tokensRequestedToUnstake", - "value": { - "type": "UFix64", - "value": "0.00000000" - } - }, - { - "name": "initialWeight", - "value": { - "type": "UInt64", - "value": "100" - } - } - ] - } - }, - { - "type": "Struct", - "value": { - "id": "A.01cf0e2f2f715450.FlowIDTableStaking.NodeInfo", - "fields": [ - { - "name": "id", - "value": { - "type": "String", - "value": "0000000000000000000000000000000000000000000000000000000000000011" - } - }, - { - "name": "role", - "value": { - "type": "UInt8", - "value": "2" - } - }, - { - "name": "networkingAddress", - "value": { - "type": "String", - "value": "11.flow.com" - } - }, - { - "name": "networkingKey", - "value": { - "type": "String", - "value": "cfdfe8e4362c8f79d11772cb7277ab16e5033a63e8dd5d34caf1b041b77e5b2d63c2072260949ccf8907486e4cfc733c8c42ca0e4e208f30470b0d950856cd47" - } - }, - { - "name": "stakingKey", - "value": { - "type": "String", - "value": "8207559cd7136af378bba53a8f0196dee3849a3ab02897c1995c3e3f6ca0c4a776c3ae869d1ddbb473090054be2400ad06d7910aa2c5d1780220fdf3765a3c1764bce10c6fe66a5a2be51a422e878518bd750424bb56b8a0ecf0f8ad2057e83f" - } - }, - { - "name": "tokensStaked", - "value": { - "type": "UFix64", - "value": "0.00000000" - } - }, - { - "name": "tokensCommitted", - "value": { - "type": "UFix64", - "value": "1350000.00000000" - } - }, - { - "name": "tokensUnstaking", - "value": { - "type": "UFix64", - "value": "0.00000000" - } - }, - { - "name": "tokensUnstaked", - "value": { - "type": "UFix64", - "value": "0.00000000" - } - }, - { - "name": "tokensRewarded", - "value": { - "type": "UFix64", - "value": "0.00000000" - } - }, - { - "name": "delegators", - "value": { - "type": "Array", - "value": [] - } - }, - { - "name": "delegatorIDCounter", - "value": { - "type": "UInt32", - "value": "0" - } - }, - { - "name": "tokensRequestedToUnstake", - "value": { - "type": "UFix64", - "value": "0.00000000" - } - }, - { - "name": "initialWeight", - "value": { - "type": "UInt64", - "value": "100" - } - } - ] - } - }, - { - "type": "Struct", - "value": { - "id": "A.01cf0e2f2f715450.FlowIDTableStaking.NodeInfo", - "fields": [ - { - "name": "id", - "value": { - "type": "String", - "value": "0000000000000000000000000000000000000000000000000000000000000021" - } - }, - { - "name": "role", - "value": { - "type": "UInt8", - "value": "3" - } - }, - { - "name": "networkingAddress", - "value": { - "type": "String", - "value": "21.flow.com" - } - }, - { - "name": "networkingKey", - "value": { - "type": "String", - "value": "d64318ba0dbf68f3788fc81c41d507c5822bf53154530673127c66f50fe4469ccf1a054a868a9f88506a8999f2386d86fcd2b901779718cba4fb53c2da258f9e" - } - }, - { - "name": "stakingKey", - "value": { - "type": "String", - "value": "880b162b7ec138b36af401d07868cb08d25746d905395edbb4625bdf105d4bb2b2f4b0f4ae273a296a6efefa7ce9ccb914e39947ce0e83745125cab05d62516076ff0173ed472d3791ccef937597c9ea12381d76f547a092a4981d77ff3fba83" - } - }, - { - "name": "tokensStaked", - "value": { - "type": "UFix64", - "value": "0.00000000" - } - }, - { - "name": "tokensCommitted", - "value": { - "type": "UFix64", - "value": "1350000.00000000" - } - }, - { - "name": "tokensUnstaking", - "value": { - "type": "UFix64", - "value": "0.00000000" - } - }, - { - "name": "tokensUnstaked", - "value": { - "type": "UFix64", - "value": "0.00000000" - } - }, - { - "name": "tokensRewarded", - "value": { - "type": "UFix64", - "value": "0.00000000" - } - }, - { - "name": "delegators", - "value": { - "type": "Array", - "value": [] - } - }, - { - "name": "delegatorIDCounter", - "value": { - "type": "UInt32", - "value": "0" - } - }, - { - "name": "tokensRequestedToUnstake", - "value": { - "type": "UFix64", - "value": "0.00000000" - } - }, - { - "name": "initialWeight", - "value": { - "type": "UInt64", - "value": "100" - } - } - ] - } - }, - { - "type": "Struct", - "value": { - "id": "A.01cf0e2f2f715450.FlowIDTableStaking.NodeInfo", - "fields": [ - { - "name": "id", - "value": { - "type": "String", - "value": "0000000000000000000000000000000000000000000000000000000000000031" - } - }, - { - "name": "role", - "value": { - "type": "UInt8", - "value": "4" - } - }, - { - "name": "networkingAddress", - "value": { - "type": "String", - "value": "31.flow.com" - } - }, - { - "name": "networkingKey", - "value": { - "type": "String", - "value": "697241208dcc9142b6f53064adc8ff1c95760c68beb2ba083c1d005d40181fd7a1b113274e0163c053a3addd47cd528ec6a1f190cf465aac87c415feaae011ae" - } - }, - { - "name": "stakingKey", - "value": { - "type": "String", - "value": "b1f97d0a06020eca97352e1adde72270ee713c7daf58da7e74bf72235321048b4841bdfc28227964bf18e371e266e32107d238358848bcc5d0977a0db4bda0b4c33d3874ff991e595e0f537c7b87b4ddce92038ebc7b295c9ea20a1492302aa7" - } - }, - { - "name": "tokensStaked", - "value": { - "type": "UFix64", - "value": "0.00000000" - } - }, - { - "name": "tokensCommitted", - "value": { - "type": "UFix64", - "value": "1350000.00000000" - } - }, - { - "name": "tokensUnstaking", - "value": { - "type": "UFix64", - "value": "0.00000000" - } - }, - { - "name": "tokensUnstaked", - "value": { - "type": "UFix64", - "value": "0.00000000" - } - }, - { - "name": "tokensRewarded", - "value": { - "type": "UFix64", - "value": "0.00000000" - } - }, - { - "name": "delegators", - "value": { - "type": "Array", - "value": [] - } - }, - { - "name": "delegatorIDCounter", - "value": { - "type": "UInt32", - "value": "0" - } - }, - { - "name": "tokensRequestedToUnstake", - "value": { - "type": "UFix64", - "value": "0.00000000" - } - }, - { - "name": "initialWeight", - "value": { - "type": "UInt64", - "value": "100" - } - } - ] - } - } - ] - } - }, - { - "name": "firstView", - "value": { - "type": "UInt64", - "value": "100" - } - }, - { - "name": "finalView", - "value": { - "type": "UInt64", - "value": "200" - } - }, - { - "name": "collectorClusters", - "value": { - "type": "Array", - "value": [ - { - "type": "Struct", - "value": { - "id": "A.01cf0e2f2f715450.FlowClusterQC.Cluster", - "fields": [ - { - "name": "index", - "value": { - "type": "UInt16", - "value": "0" - } - }, - { - "name": "nodeWeights", - "value": { - "type": "Dictionary", - "value": [ - { - "key": { - "type": "String", - "value": "0000000000000000000000000000000000000000000000000000000000000001" - }, - "value": { - "type": "UInt64", - "value": "100" - } - }, - { - "key": { - "type": "String", - "value": "0000000000000000000000000000000000000000000000000000000000000002" - }, - "value": { - "type": "UInt64", - "value": "100" - } - } - ] - } - }, - { - "name": "totalWeight", - "value": { - "type": "UInt64", - "value": "100" - } - }, - { - "name": "votes", - "value": { - "type": "Array", - "value": [] - } - } - ] - } - }, - { - "type": "Struct", - "value": { - "id": "A.01cf0e2f2f715450.FlowClusterQC.Cluster", - "fields": [ - { - "name": "index", - "value": { - "type": "UInt16", - "value": "1" - } - }, - { - "name": "nodeWeights", - "value": { - "type": "Dictionary", - "value": [ - { - "key": { - "type": "String", - "value": "0000000000000000000000000000000000000000000000000000000000000003" - }, - "value": { - "type": "UInt64", - "value": "100" - } - }, - { - "key": { - "type": "String", - "value": "0000000000000000000000000000000000000000000000000000000000000004" - }, - "value": { - "type": "UInt64", - "value": "100" - } - } - ] - } - }, - { - "name": "totalWeight", - "value": { - "type": "UInt64", - "value": "0" - } - }, - { - "name": "votes", - "value": { - "type": "Array", - "value": [] - } - } - ] - } - } - ] - } - }, - { - "name": "randomSource", - "value": { - "type": "String", - "value": "01020304" - } - }, - { - "name": "DKGPhase1FinalView", - "value": { - "type": "UInt64", - "value": "150" - } - }, - { - "name": "DKGPhase2FinalView", - "value": { - "type": "UInt64", - "value": "160" - } - }, - { - "name": "DKGPhase3FinalView", - "value": { - "type": "UInt64", - "value": "170" - } - } - ] - } +func ProtocolStateVersionUpgradeFixtureByChainID(chain flow.ChainID) (flow.Event, *flow.ProtocolStateVersionUpgrade) { + events := systemcontracts.ServiceEventsForChain(chain) + + event := EventFixture( + Event.WithEventType(events.ProtocolStateVersionUpgrade.EventType()), + Event.WithPayload(ProtocolStateVersionUpgradeFixtureCCF), + ) + + expected := &flow.ProtocolStateVersionUpgrade{ + NewProtocolStateVersion: 1, + ActiveView: 1000, + } + + return event, expected +} + +func createEpochSetupEvent(randomSourceHex string) cadence.Event { + return cadence.NewEvent([]cadence.Value{ + // counter + cadence.NewUInt64(1), + + // nodeInfo + createEpochNodes(), + + // firstView + cadence.NewUInt64(100), + + // finalView + cadence.NewUInt64(200), + + // collectorClusters + createEpochCollectors(), + + // randomSource + cadence.String(randomSourceHex), + + // DKGPhase1FinalView + cadence.UInt64(150), + + // DKGPhase2FinalView + cadence.UInt64(160), + + // DKGPhase3FinalView + cadence.UInt64(170), + + // targetDuration + cadence.UInt64(200), + + // targetEndTime + cadence.UInt64(2000000000), + }).WithType(newFlowEpochEpochSetupEventType()) +} + +func createEpochNodes() cadence.Array { + + nodeInfoType := newFlowIDTableStakingNodeInfoStructType() + + nodeInfo1 := cadence.NewStruct([]cadence.Value{ + // id + cadence.String("0000000000000000000000000000000000000000000000000000000000000001"), + + // role + cadence.UInt8(1), + + // networkingAddress + cadence.String("1.flow.com"), + + // networkingKey + cadence.String("378dbf45d85c614feb10d8bd4f78f4b6ef8eec7d987b937e123255444657fb3da031f232a507e323df3a6f6b8f50339c51d188e80c0e7a92420945cc6ca893fc"), + + // stakingKey + cadence.String("af4aade26d76bb2ab15dcc89adcef82a51f6f04b3cb5f4555214b40ec89813c7a5f95776ea4fe449de48166d0bbc59b919b7eabebaac9614cf6f9461fac257765415f4d8ef1376a2365ec9960121888ea5383d88a140c24c29962b0a14e4e4e7"), + + // tokensStaked + ufix64FromString("0.00000000"), + + // tokensCommitted + ufix64FromString("1350000.00000000"), + + // tokensUnstaking + ufix64FromString("0.00000000"), + + // tokensUnstaked + ufix64FromString("0.00000000"), + + // tokensRewarded + ufix64FromString("0.00000000"), + + // delegators + cadence.NewArray([]cadence.Value{}).WithType(cadence.NewVariableSizedArrayType(cadence.UInt32Type)), + + // delegatorIDCounter + cadence.UInt32(0), + + // tokensRequestedToUnstake + ufix64FromString("0.00000000"), + + // initialWeight + cadence.UInt64(100), + }).WithType(nodeInfoType) + + nodeInfo2 := cadence.NewStruct([]cadence.Value{ + // id + cadence.String("0000000000000000000000000000000000000000000000000000000000000002"), + + // role + cadence.UInt8(1), + + // networkingAddress + cadence.String("2.flow.com"), + + // networkingKey + cadence.String("378dbf45d85c614feb10d8bd4f78f4b6ef8eec7d987b937e123255444657fb3da031f232a507e323df3a6f6b8f50339c51d188e80c0e7a92420945cc6ca893fc"), + + // stakingKey + cadence.String("af4aade26d76bb2ab15dcc89adcef82a51f6f04b3cb5f4555214b40ec89813c7a5f95776ea4fe449de48166d0bbc59b919b7eabebaac9614cf6f9461fac257765415f4d8ef1376a2365ec9960121888ea5383d88a140c24c29962b0a14e4e4e7"), + + // tokensStaked + ufix64FromString("0.00000000"), + + // tokensCommitted + ufix64FromString("1350000.00000000"), + + // tokensUnstaking + ufix64FromString("0.00000000"), + + // tokensUnstaked + ufix64FromString("0.00000000"), + + // tokensRewarded + ufix64FromString("0.00000000"), + + // delegators + cadence.NewArray([]cadence.Value{}).WithType(cadence.NewVariableSizedArrayType(cadence.UInt32Type)), + + // delegatorIDCounter + cadence.UInt32(0), + + // tokensRequestedToUnstake + ufix64FromString("0.00000000"), + + // initialWeight + cadence.UInt64(100), + }).WithType(nodeInfoType) + + nodeInfo3 := cadence.NewStruct([]cadence.Value{ + // id + cadence.String("0000000000000000000000000000000000000000000000000000000000000003"), + + // role + cadence.UInt8(1), + + // networkingAddress + cadence.String("3.flow.com"), + + // networkingKey + cadence.String("378dbf45d85c614feb10d8bd4f78f4b6ef8eec7d987b937e123255444657fb3da031f232a507e323df3a6f6b8f50339c51d188e80c0e7a92420945cc6ca893fc"), + + // stakingKey + cadence.String("af4aade26d76bb2ab15dcc89adcef82a51f6f04b3cb5f4555214b40ec89813c7a5f95776ea4fe449de48166d0bbc59b919b7eabebaac9614cf6f9461fac257765415f4d8ef1376a2365ec9960121888ea5383d88a140c24c29962b0a14e4e4e7"), + + // tokensStaked + ufix64FromString("0.00000000"), + + // tokensCommitted + ufix64FromString("1350000.00000000"), + + // tokensUnstaking + ufix64FromString("0.00000000"), + + // tokensUnstaked + ufix64FromString("0.00000000"), + + // tokensRewarded + ufix64FromString("0.00000000"), + + // delegators + cadence.NewArray([]cadence.Value{}).WithType(cadence.NewVariableSizedArrayType(cadence.UInt32Type)), + + // delegatorIDCounter + cadence.UInt32(0), + + // tokensRequestedToUnstake + ufix64FromString("0.00000000"), + + // initialWeight + cadence.UInt64(100), + }).WithType(nodeInfoType) + + nodeInfo4 := cadence.NewStruct([]cadence.Value{ + // id + cadence.String("0000000000000000000000000000000000000000000000000000000000000004"), + + // role + cadence.UInt8(1), + + // networkingAddress + cadence.String("4.flow.com"), + + // networkingKey + cadence.String("378dbf45d85c614feb10d8bd4f78f4b6ef8eec7d987b937e123255444657fb3da031f232a507e323df3a6f6b8f50339c51d188e80c0e7a92420945cc6ca893fc"), + + // stakingKey + cadence.String("af4aade26d76bb2ab15dcc89adcef82a51f6f04b3cb5f4555214b40ec89813c7a5f95776ea4fe449de48166d0bbc59b919b7eabebaac9614cf6f9461fac257765415f4d8ef1376a2365ec9960121888ea5383d88a140c24c29962b0a14e4e4e7"), + + // tokensStaked + ufix64FromString("0.00000000"), + + // tokensCommitted + ufix64FromString("1350000.00000000"), + + // tokensUnstaking + ufix64FromString("0.00000000"), + + // tokensUnstaked + ufix64FromString("0.00000000"), + + // tokensRewarded + ufix64FromString("0.00000000"), + + // delegators + cadence.NewArray([]cadence.Value{}).WithType(cadence.NewVariableSizedArrayType(cadence.UInt32Type)), + + // delegatorIDCounter + cadence.UInt32(0), + + // tokensRequestedToUnstake + ufix64FromString("0.00000000"), + + // initialWeight + cadence.UInt64(100), + }).WithType(nodeInfoType) + + nodeInfo5 := cadence.NewStruct([]cadence.Value{ + // id + cadence.String("0000000000000000000000000000000000000000000000000000000000000011"), + + // role + cadence.UInt8(2), + + // networkingAddress + cadence.String("11.flow.com"), + + // networkingKey + cadence.String("cfdfe8e4362c8f79d11772cb7277ab16e5033a63e8dd5d34caf1b041b77e5b2d63c2072260949ccf8907486e4cfc733c8c42ca0e4e208f30470b0d950856cd47"), + + // stakingKey + cadence.String("8207559cd7136af378bba53a8f0196dee3849a3ab02897c1995c3e3f6ca0c4a776c3ae869d1ddbb473090054be2400ad06d7910aa2c5d1780220fdf3765a3c1764bce10c6fe66a5a2be51a422e878518bd750424bb56b8a0ecf0f8ad2057e83f"), + + // tokensStaked + ufix64FromString("0.00000000"), + + // tokensCommitted + ufix64FromString("1350000.00000000"), + + // tokensUnstaking + ufix64FromString("0.00000000"), + + // tokensUnstaked + ufix64FromString("0.00000000"), + + // tokensRewarded + ufix64FromString("0.00000000"), + + // delegators + cadence.NewArray([]cadence.Value{}).WithType(cadence.NewVariableSizedArrayType(cadence.UInt32Type)), + + // delegatorIDCounter + cadence.UInt32(0), + + // tokensRequestedToUnstake + ufix64FromString("0.00000000"), + + // initialWeight + cadence.UInt64(100), + }).WithType(nodeInfoType) + + nodeInfo6 := cadence.NewStruct([]cadence.Value{ + // id + cadence.String("0000000000000000000000000000000000000000000000000000000000000021"), + + // role + cadence.UInt8(3), + + // networkingAddress + cadence.String("21.flow.com"), + + // networkingKey + cadence.String("d64318ba0dbf68f3788fc81c41d507c5822bf53154530673127c66f50fe4469ccf1a054a868a9f88506a8999f2386d86fcd2b901779718cba4fb53c2da258f9e"), + + // stakingKey + cadence.String("880b162b7ec138b36af401d07868cb08d25746d905395edbb4625bdf105d4bb2b2f4b0f4ae273a296a6efefa7ce9ccb914e39947ce0e83745125cab05d62516076ff0173ed472d3791ccef937597c9ea12381d76f547a092a4981d77ff3fba83"), + + // tokensStaked + ufix64FromString("0.00000000"), + + // tokensCommitted + ufix64FromString("1350000.00000000"), + + // tokensUnstaking + ufix64FromString("0.00000000"), + + // tokensUnstaked + ufix64FromString("0.00000000"), + + // tokensRewarded + ufix64FromString("0.00000000"), + + // delegators + cadence.NewArray([]cadence.Value{}).WithType(cadence.NewVariableSizedArrayType(cadence.UInt32Type)), + + // delegatorIDCounter + cadence.UInt32(0), + + // tokensRequestedToUnstake + ufix64FromString("0.00000000"), + + // initialWeight + cadence.UInt64(100), + }).WithType(nodeInfoType) + + nodeInfo7 := cadence.NewStruct([]cadence.Value{ + // id + cadence.String("0000000000000000000000000000000000000000000000000000000000000031"), + + // role + cadence.UInt8(4), + + // networkingAddress + cadence.String("31.flow.com"), + + // networkingKey + cadence.String("697241208dcc9142b6f53064adc8ff1c95760c68beb2ba083c1d005d40181fd7a1b113274e0163c053a3addd47cd528ec6a1f190cf465aac87c415feaae011ae"), + + // stakingKey + cadence.String("b1f97d0a06020eca97352e1adde72270ee713c7daf58da7e74bf72235321048b4841bdfc28227964bf18e371e266e32107d238358848bcc5d0977a0db4bda0b4c33d3874ff991e595e0f537c7b87b4ddce92038ebc7b295c9ea20a1492302aa7"), + + // tokensStaked + ufix64FromString("0.00000000"), + + // tokensCommitted + ufix64FromString("1350000.00000000"), + + // tokensUnstaking + ufix64FromString("0.00000000"), + + // tokensUnstaked + ufix64FromString("0.00000000"), + + // tokensRewarded + ufix64FromString("0.00000000"), + + // delegators + cadence.NewArray([]cadence.Value{}).WithType(cadence.NewVariableSizedArrayType(cadence.UInt32Type)), + + // delegatorIDCounter + cadence.UInt32(0), + + // tokensRequestedToUnstake + ufix64FromString("0.00000000"), + + // initialWeight + cadence.UInt64(100), + }).WithType(nodeInfoType) + + return cadence.NewArray([]cadence.Value{ + nodeInfo1, + nodeInfo2, + nodeInfo3, + nodeInfo4, + nodeInfo5, + nodeInfo6, + nodeInfo7, + }).WithType(cadence.NewVariableSizedArrayType(nodeInfoType)) +} + +func createEpochCollectors() cadence.Array { + + clusterType := NewFlowClusterQCClusterStructType() + + voteType := newFlowClusterQCVoteStructType() + + cluster1 := cadence.NewStruct([]cadence.Value{ + // index + cadence.NewUInt16(0), + + // nodeWeights + cadence.NewDictionary([]cadence.KeyValuePair{ + { + Key: cadence.String("0000000000000000000000000000000000000000000000000000000000000001"), + Value: cadence.UInt64(100), + }, + { + Key: cadence.String("0000000000000000000000000000000000000000000000000000000000000002"), + Value: cadence.UInt64(100), + }, + }).WithType(cadence.NewMeteredDictionaryType(nil, cadence.StringType, cadence.UInt64Type)), + + // totalWeight + cadence.NewUInt64(100), + + // generatedVotes + cadence.NewDictionary(nil).WithType(cadence.NewDictionaryType(cadence.StringType, voteType)), + + // uniqueVoteMessageTotalWeights + cadence.NewDictionary(nil).WithType(cadence.NewDictionaryType(cadence.StringType, cadence.UInt64Type)), + }).WithType(clusterType) + + cluster2 := cadence.NewStruct([]cadence.Value{ + // index + cadence.NewUInt16(1), + + // nodeWeights + cadence.NewDictionary([]cadence.KeyValuePair{ + { + Key: cadence.String("0000000000000000000000000000000000000000000000000000000000000003"), + Value: cadence.UInt64(100), + }, + { + Key: cadence.String("0000000000000000000000000000000000000000000000000000000000000004"), + Value: cadence.UInt64(100), + }, + }).WithType(cadence.NewMeteredDictionaryType(nil, cadence.StringType, cadence.UInt64Type)), + + // totalWeight + cadence.NewUInt64(0), + + // generatedVotes + cadence.NewDictionary(nil).WithType(cadence.NewDictionaryType(cadence.StringType, voteType)), + + // uniqueVoteMessageTotalWeights + cadence.NewDictionary(nil).WithType(cadence.NewDictionaryType(cadence.StringType, cadence.UInt64Type)), + }).WithType(clusterType) + + return cadence.NewArray([]cadence.Value{ + cluster1, + cluster2, + }).WithType(cadence.NewVariableSizedArrayType(clusterType)) +} + +func createEpochCommitEvent() cadence.Event { + + clusterQCType := newFlowClusterQCClusterQCStructType() + + cluster1 := cadence.NewStruct([]cadence.Value{ + // index + cadence.UInt16(0), + + // voteSignatures + cadence.NewArray([]cadence.Value{ + cadence.String("a39cd1e1bf7e2fb0609b7388ce5215a6a4c01eef2aee86e1a007faa28a6b2a3dc876e11bb97cdb26c3846231d2d01e4d"), + cadence.String("91673ad9c717d396c9a0953617733c128049ac1a639653d4002ab245b121df1939430e313bcbfd06948f6a281f6bf853"), + }).WithType(cadence.NewVariableSizedArrayType(cadence.StringType)), + + // voteMessage + cadence.String("irrelevant_for_these_purposes"), + + // voterIDs + cadence.NewArray([]cadence.Value{ + cadence.String("0000000000000000000000000000000000000000000000000000000000000001"), + cadence.String("0000000000000000000000000000000000000000000000000000000000000002"), + }).WithType(cadence.NewVariableSizedArrayType(cadence.StringType)), + }).WithType(clusterQCType) + + cluster2 := cadence.NewStruct([]cadence.Value{ + // index + cadence.UInt16(1), + + // voteSignatures + cadence.NewArray([]cadence.Value{ + cadence.String("b2bff159971852ed63e72c37991e62c94822e52d4fdcd7bf29aaf9fb178b1c5b4ce20dd9594e029f3574cb29533b857a"), + cadence.String("9931562f0248c9195758da3de4fb92f24fa734cbc20c0cb80280163560e0e0348f843ac89ecbd3732e335940c1e8dccb"), + }).WithType(cadence.NewVariableSizedArrayType(cadence.StringType)), + + // voteMessage + cadence.String("irrelevant_for_these_purposes"), + + // voterIDs + cadence.NewArray([]cadence.Value{ + cadence.String("0000000000000000000000000000000000000000000000000000000000000003"), + cadence.String("0000000000000000000000000000000000000000000000000000000000000004"), + }).WithType(cadence.NewVariableSizedArrayType(cadence.StringType)), + }).WithType(clusterQCType) + + return cadence.NewEvent([]cadence.Value{ + // counter + cadence.NewUInt64(1), + + // clusterQCs + cadence.NewArray([]cadence.Value{ + cluster1, + cluster2, + }).WithType(cadence.NewVariableSizedArrayType(clusterQCType)), + + // dkgGroupKey + cadence.String("8c588266db5f5cda629e83f8aa04ae9413593fac19e4865d06d291c9d14fbdd9bdb86a7a12f9ef8590c79cb635e3163315d193087e9336092987150d0cd2b14ac6365f7dc93eec573752108b8c12368abb65f0652d9f644e5aed611c37926950"), + + // dkgPubKeys + cadence.NewArray([]cadence.Value{ + cadence.String("87a339e4e5c74f089da20a33f515d8c8f4464ab53ede5a74aa2432cd1ae66d522da0c122249ee176cd747ddc83ca81090498389384201614caf51eac392c1c0a916dfdcfbbdf7363f9552b6468434add3d3f6dc91a92bbe3ee368b59b7828488"), + }).WithType(cadence.NewVariableSizedArrayType(cadence.StringType)), + + // dkgIdMapping + cadence.NewDictionary([]cadence.KeyValuePair{ + { + Key: cadence.String("0000000000000000000000000000000000000000000000000000000000000011"), + Value: cadence.NewInt(0), + }, + }).WithType(cadence.NewDictionaryType(cadence.StringType, cadence.IntType)), + }).WithType(newFlowEpochEpochCommitEventType()) +} + +func createEpochRecoverEvent(randomSourceHex string) cadence.Event { + + clusterQCVoteDataType := newFlowClusterQCClusterQCVoteDataStructType() + + cluster1 := cadence.NewStruct([]cadence.Value{ + // aggregatedSignature + cadence.String("b072ed22ed305acd44818a6c836e09b4e844eebde6a4fdbf5cec983e2872b86c8b0f6c34c0777bf52e385ab7c45dc55d"), + // Node IDs of signers + cadence.NewArray([]cadence.Value{ + cadence.String("0000000000000000000000000000000000000000000000000000000000000001"), + cadence.String("0000000000000000000000000000000000000000000000000000000000000002"), + }).WithType(cadence.NewVariableSizedArrayType(cadence.StringType)), + }).WithType(clusterQCVoteDataType) + + cluster2 := cadence.NewStruct([]cadence.Value{ + // aggregatedSignature + cadence.String("899e266a543e1b3a564f68b22f7be571f2e944ec30fadc4b39e2d5f526ba044c0f3cb2648f8334fc216fa3360a0418b2"), + // Node IDs of signers + cadence.NewArray([]cadence.Value{ + cadence.String("0000000000000000000000000000000000000000000000000000000000000003"), + cadence.String("0000000000000000000000000000000000000000000000000000000000000004"), + }).WithType(cadence.NewVariableSizedArrayType(cadence.StringType)), + }).WithType(clusterQCVoteDataType) + + return cadence.NewEvent([]cadence.Value{ + // counter + cadence.NewUInt64(1), + + // nodeInfo + createEpochNodes(), + + // firstView + cadence.NewUInt64(100), + + // finalView + cadence.NewUInt64(200), + + // collectorClusters + cadence.NewArray([]cadence.Value{ + // cluster 1 + cadence.NewArray([]cadence.Value{ + cadence.String("0000000000000000000000000000000000000000000000000000000000000001"), + cadence.String("0000000000000000000000000000000000000000000000000000000000000002"), + }).WithType(cadence.NewVariableSizedArrayType(cadence.StringType)), + // cluster 2 + cadence.NewArray([]cadence.Value{ + cadence.String("0000000000000000000000000000000000000000000000000000000000000003"), + cadence.String("0000000000000000000000000000000000000000000000000000000000000004"), + }).WithType(cadence.NewVariableSizedArrayType(cadence.StringType)), + }).WithType(cadence.NewVariableSizedArrayType(cadence.NewVariableSizedArrayType(cadence.StringType))), + + // randomSource + cadence.String(randomSourceHex), + + // DKGPhase1FinalView + cadence.UInt64(150), + + // DKGPhase2FinalView + cadence.UInt64(160), + + // DKGPhase3FinalView + cadence.UInt64(170), + + // targetDuration + cadence.UInt64(200), + + // targetEndTime + cadence.UInt64(2000000000), + + // clusterQCs + cadence.NewArray([]cadence.Value{ + // cluster 1 + cluster1, + // cluster 2 + cluster2, + }).WithType(cadence.NewVariableSizedArrayType(clusterQCVoteDataType)), + + // dkgGroupKey + cadence.String("8c588266db5f5cda629e83f8aa04ae9413593fac19e4865d06d291c9d14fbdd9bdb86a7a12f9ef8590c79cb635e3163315d193087e9336092987150d0cd2b14ac6365f7dc93eec573752108b8c12368abb65f0652d9f644e5aed611c37926950"), + + // dkgPubKeys + cadence.NewArray([]cadence.Value{ + cadence.String("87a339e4e5c74f089da20a33f515d8c8f4464ab53ede5a74aa2432cd1ae66d522da0c122249ee176cd747ddc83ca81090498389384201614caf51eac392c1c0a916dfdcfbbdf7363f9552b6468434add3d3f6dc91a92bbe3ee368b59b7828488"), + }).WithType(cadence.NewVariableSizedArrayType(cadence.StringType)), + + // dkgIdMapping + cadence.NewDictionary([]cadence.KeyValuePair{ + { + Key: cadence.String("0000000000000000000000000000000000000000000000000000000000000011"), + Value: cadence.NewInt(0), + }, + }).WithType(cadence.NewDictionaryType(cadence.StringType, cadence.IntType)), + }).WithType(newFlowEpochEpochRecoverEventType()) +} + +func createVersionBeaconEvent() cadence.Event { + versionBoundaryType := NewNodeVersionBeaconVersionBoundaryStructType() + + semverType := NewNodeVersionBeaconSemverStructType() + + semver := cadence.NewStruct([]cadence.Value{ + // major + cadence.UInt8(2), + + // minor + cadence.UInt8(13), + + // patch + cadence.UInt8(7), + + // preRelease + cadence.NewOptional(cadence.String("test")), + }).WithType(semverType) + + versionBoundary := cadence.NewStruct([]cadence.Value{ + // blockHeight + cadence.UInt64(44), + + // version + semver, + }).WithType(versionBoundaryType) + + return cadence.NewEvent([]cadence.Value{ + // versionBoundaries + cadence.NewArray([]cadence.Value{ + versionBoundary, + }).WithType(cadence.NewVariableSizedArrayType(versionBoundaryType)), + + // sequence + cadence.UInt64(5), + }).WithType(NewNodeVersionBeaconVersionBeaconEventType()) +} + +func createProtocolStateVersionUpgradeEvent() cadence.Event { + newVersion := cadence.NewUInt64(1) + activeView := cadence.NewUInt64(1000) + + return cadence.NewEvent([]cadence.Value{ + newVersion, + activeView, + }).WithType(NewProtocolStateVersionUpgradeEventType()) +} + +func newFlowClusterQCVoteStructType() cadence.Type { + + // A.01cf0e2f2f715450.FlowClusterQC.Vote + + address, _ := common.HexToAddress("01cf0e2f2f715450") + location := common.NewAddressLocation(nil, address, "FlowClusterQC") + + return cadence.NewStructType( + location, + "FlowClusterQC.Vote", + []cadence.Field{ + { + Identifier: "nodeID", + Type: cadence.StringType, + }, + { + Identifier: "signature", + Type: cadence.NewOptionalType(cadence.StringType), + }, + { + Identifier: "message", + Type: cadence.NewOptionalType(cadence.StringType), + }, + { + Identifier: "clusterIndex", + Type: cadence.UInt16Type, + }, + { + Identifier: "weight", + Type: cadence.UInt64Type, + }, + }, + nil, + ) +} + +func newFlowIDTableStakingNodeInfoStructType() *cadence.StructType { + + // A.01cf0e2f2f715450.FlowIDTableStaking.NodeInfo + + address, _ := common.HexToAddress("01cf0e2f2f715450") + location := common.NewAddressLocation(nil, address, "FlowIDTableStaking") + + return cadence.NewStructType( + location, + "FlowIDTableStaking.NodeInfo", + []cadence.Field{ + { + Identifier: "id", + Type: cadence.StringType, + }, + { + Identifier: "role", + Type: cadence.UInt8Type, + }, + { + Identifier: "networkingAddress", + Type: cadence.StringType, + }, + { + Identifier: "networkingKey", + Type: cadence.StringType, + }, + { + Identifier: "stakingKey", + Type: cadence.StringType, + }, + { + Identifier: "tokensStaked", + Type: cadence.UFix64Type, + }, + { + Identifier: "tokensCommitted", + Type: cadence.UFix64Type, + }, + { + Identifier: "tokensUnstaking", + Type: cadence.UFix64Type, + }, + { + Identifier: "tokensUnstaked", + Type: cadence.UFix64Type, + }, + { + Identifier: "tokensRewarded", + Type: cadence.UFix64Type, + }, + { + Identifier: "delegators", + Type: cadence.NewVariableSizedArrayType(cadence.UInt32Type), + }, + { + Identifier: "delegatorIDCounter", + Type: cadence.UInt32Type, + }, + { + Identifier: "tokensRequestedToUnstake", + Type: cadence.UFix64Type, + }, + { + Identifier: "initialWeight", + Type: cadence.UInt64Type, + }, + }, + nil, + ) +} + +func newFlowEpochEpochSetupEventType() *cadence.EventType { + + // A.01cf0e2f2f715450.FlowEpoch.EpochSetup + + address, _ := common.HexToAddress("01cf0e2f2f715450") + location := common.NewAddressLocation(nil, address, "FlowEpoch") + + return cadence.NewEventType( + location, + "FlowEpoch.EpochSetup", + []cadence.Field{ + { + Identifier: "counter", + Type: cadence.UInt64Type, + }, + { + Identifier: "nodeInfo", + Type: cadence.NewVariableSizedArrayType(newFlowIDTableStakingNodeInfoStructType()), + }, + { + Identifier: "firstView", + Type: cadence.UInt64Type, + }, + { + Identifier: "finalView", + Type: cadence.UInt64Type, + }, + { + Identifier: "collectorClusters", + Type: cadence.NewVariableSizedArrayType(NewFlowClusterQCClusterStructType()), + }, + { + Identifier: "randomSource", + Type: cadence.StringType, + }, + { + Identifier: "DKGPhase1FinalView", + Type: cadence.UInt64Type, + }, + { + Identifier: "DKGPhase2FinalView", + Type: cadence.UInt64Type, + }, + { + Identifier: "DKGPhase3FinalView", + Type: cadence.UInt64Type, + }, + { + Identifier: "targetDuration", + Type: cadence.UInt64Type, + }, + { + Identifier: "targetEndTime", + Type: cadence.UInt64Type, + }, + }, + nil, + ) +} + +func newFlowEpochEpochCommitEventType() *cadence.EventType { + + // A.01cf0e2f2f715450.FlowEpoch.EpochCommit + + address, _ := common.HexToAddress("01cf0e2f2f715450") + location := common.NewAddressLocation(nil, address, "FlowEpoch") + + return cadence.NewEventType( + location, + "FlowEpoch.EpochCommit", + []cadence.Field{ + { + Identifier: "counter", + Type: cadence.UInt64Type, + }, + { + Identifier: "clusterQCs", + Type: cadence.NewVariableSizedArrayType(newFlowClusterQCClusterQCStructType()), + }, + { + Identifier: "dkgGroupKey", + Type: cadence.StringType, + }, + { + Identifier: "dkgPubKeys", + Type: cadence.NewVariableSizedArrayType(cadence.StringType), + }, + { + Identifier: "dkgIdMapping", + Type: cadence.NewDictionaryType(cadence.StringType, cadence.IntType), + }, + }, + nil, + ) +} + +func newFlowEpochEpochRecoverEventType() *cadence.EventType { + + // A.01cf0e2f2f715450.FlowEpoch.EpochRecover + + address, _ := common.HexToAddress("01cf0e2f2f715450") + location := common.NewAddressLocation(nil, address, "FlowEpoch") + + return cadence.NewEventType( + location, + "FlowEpoch.EpochRecover", + []cadence.Field{ + { + Identifier: "counter", + Type: cadence.UInt64Type, + }, + { + Identifier: "nodeInfo", + Type: cadence.NewVariableSizedArrayType(newFlowIDTableStakingNodeInfoStructType()), + }, + { + Identifier: "firstView", + Type: cadence.UInt64Type, + }, + { + Identifier: "finalView", + Type: cadence.UInt64Type, + }, + { + Identifier: "clusterAssignments", + Type: cadence.NewVariableSizedArrayType(cadence.NewVariableSizedArrayType(cadence.StringType)), + }, + { + Identifier: "randomSource", + Type: cadence.StringType, + }, + { + Identifier: "DKGPhase1FinalView", + Type: cadence.UInt64Type, + }, + { + Identifier: "DKGPhase2FinalView", + Type: cadence.UInt64Type, + }, + { + Identifier: "DKGPhase3FinalView", + Type: cadence.UInt64Type, + }, + { + Identifier: "targetDuration", + Type: cadence.UInt64Type, + }, + { + Identifier: "targetEndTime", + Type: cadence.UInt64Type, + }, + { + Identifier: "clusterQCVoteData", + Type: cadence.NewVariableSizedArrayType(newFlowClusterQCClusterQCVoteDataStructType()), + }, + { + Identifier: "dkgGroupKey", + Type: cadence.StringType, + }, + { + Identifier: "dkgPubKeys", + Type: cadence.NewVariableSizedArrayType(cadence.StringType), + }, + { + Identifier: "dkgIdMapping", + Type: cadence.NewDictionaryType(cadence.StringType, cadence.IntType), + }, + }, + nil, + ) +} + +func newFlowClusterQCClusterQCStructType() *cadence.StructType { + + // A.01cf0e2f2f715450.FlowClusterQC.ClusterQC" + + address, _ := common.HexToAddress("01cf0e2f2f715450") + location := common.NewAddressLocation(nil, address, "FlowClusterQC") + + return cadence.NewStructType( + location, + "FlowClusterQC.ClusterQC", + []cadence.Field{ + { + Identifier: "index", + Type: cadence.UInt16Type, + }, + { + Identifier: "voteSignatures", + Type: cadence.NewVariableSizedArrayType(cadence.StringType), + }, + { + Identifier: "voteMessage", + Type: cadence.StringType, + }, + { + Identifier: "voterIDs", + Type: cadence.NewVariableSizedArrayType(cadence.StringType), + }, + }, + nil, + ) +} + +func newFlowClusterQCClusterQCVoteDataStructType() *cadence.StructType { + + // A.01cf0e2f2f715450.FlowClusterQC.ClusterQCVoteData" + + address, _ := common.HexToAddress("01cf0e2f2f715450") + location := common.NewAddressLocation(nil, address, "ClusterQCVoteData") + + return cadence.NewStructType( + location, + "FlowClusterQC.ClusterQCVoteData", + []cadence.Field{ + { + Identifier: "aggregatedSignature", + Type: cadence.StringType, + }, + { + Identifier: "voterIDs", + Type: cadence.NewVariableSizedArrayType(cadence.StringType), + }, + }, + nil, + ) +} + +func NewNodeVersionBeaconVersionBeaconEventType() *cadence.EventType { + + // A.01cf0e2f2f715450.NodeVersionBeacon.VersionBeacon + + address, _ := common.HexToAddress("01cf0e2f2f715450") + location := common.NewAddressLocation(nil, address, "NodeVersionBeacon") + + return cadence.NewEventType( + location, + "NodeVersionBeacon.VersionBeacon", + []cadence.Field{ + { + Identifier: "versionBoundaries", + Type: cadence.NewVariableSizedArrayType(NewNodeVersionBeaconVersionBoundaryStructType()), + }, + { + Identifier: "sequence", + Type: cadence.UInt64Type, + }, + }, + nil, + ) +} + +func NewNodeVersionBeaconVersionBoundaryStructType() *cadence.StructType { + + // A.01cf0e2f2f715450.NodeVersionBeacon.VersionBoundary + + address, _ := common.HexToAddress("01cf0e2f2f715450") + location := common.NewAddressLocation(nil, address, "NodeVersionBeacon") + + return cadence.NewStructType( + location, + "NodeVersionBeacon.VersionBoundary", + []cadence.Field{ + { + Identifier: "blockHeight", + Type: cadence.UInt64Type, + }, + { + Identifier: "version", + Type: NewNodeVersionBeaconSemverStructType(), + }, + }, + nil, + ) +} + +func NewNodeVersionBeaconSemverStructType() *cadence.StructType { + + // A.01cf0e2f2f715450.NodeVersionBeacon.Semver + + address, _ := common.HexToAddress("01cf0e2f2f715450") + location := common.NewAddressLocation(nil, address, "NodeVersionBeacon") + + return cadence.NewStructType( + location, + "NodeVersionBeacon.Semver", + []cadence.Field{ + { + Identifier: "major", + Type: cadence.UInt8Type, + }, + { + Identifier: "minor", + Type: cadence.UInt8Type, + }, + { + Identifier: "patch", + Type: cadence.UInt8Type, + }, + { + Identifier: "preRelease", + Type: cadence.NewOptionalType(cadence.StringType), + }, + }, + nil, + ) +} + +func NewProtocolStateVersionUpgradeEventType() *cadence.EventType { + + // A.01cf0e2f2f715450.NodeVersionBeacon.ProtocolStateVersionUpgrade + + address, _ := common.HexToAddress("01cf0e2f2f715450") + location := common.NewAddressLocation(nil, address, "ProtocolStateVersionUpgrade") + + return cadence.NewEventType( + location, + "NodeVersionBeacon.ProtocolStateVersionUpgrade", + []cadence.Field{ + { + Identifier: "newProtocolVersion", + Type: cadence.UInt64Type, + }, + { + Identifier: "activeView", + Type: cadence.UInt64Type, + }, + }, + nil, + ) +} + +func ufix64FromString(s string) cadence.UFix64 { + f, err := cadence.NewUFix64(s) + if err != nil { + panic(err) + } + return f +} + +func EpochSetupFixtureCCF(randomSource []byte) []byte { + randomSourceHex := hex.EncodeToString(randomSource) + b, err := ccf.Encode(createEpochSetupEvent(randomSourceHex)) + if err != nil { + panic(err) + } + _, err = ccf.Decode(nil, b) + if err != nil { + panic(err) + } + return b +} + +func EpochSetupCCFWithNonHexRandomSource() []byte { + // randomSource of correct length but made of non hex characters + randomSource := "ZZ" + for len(randomSource) != 2*flow.EpochSetupRandomSourceLength { + randomSource = randomSource + "aa" + } + + event := createEpochSetupEvent(randomSource) + + b, err := ccf.Encode(event) + if err != nil { + panic(err) + } + _, err = ccf.Decode(nil, b) + if err != nil { + panic(err) + } + return b +} + +var EpochCommitFixtureCCF = func() []byte { + b, err := ccf.Encode(createEpochCommitEvent()) + if err != nil { + panic(err) + } + _, err = ccf.Decode(nil, b) + if err != nil { + panic(err) + } + return b +}() + +func EpochRecoverFixtureCCF(randomSource []byte) []byte { + randomSourceHex := hex.EncodeToString(randomSource) + b, err := ccf.Encode(createEpochRecoverEvent(randomSourceHex)) + if err != nil { + panic(err) + } + _, err = ccf.Decode(nil, b) + if err != nil { + panic(err) + } + return b +} + +var VersionBeaconFixtureCCF = func() []byte { + b, err := ccf.Encode(createVersionBeaconEvent()) + if err != nil { + panic(err) + } + _, err = ccf.Decode(nil, b) + if err != nil { + panic(err) + } + return b +}() + +var ProtocolStateVersionUpgradeFixtureCCF = func() []byte { + b, err := ccf.Encode(createProtocolStateVersionUpgradeEvent()) + if err != nil { + panic(err) + } + _, err = ccf.Decode(nil, b) + if err != nil { + panic(err) + } + return b +}() + +func NewFlowClusterQCClusterStructType() *cadence.StructType { + + // A.01cf0e2f2f715450.FlowClusterQC.Cluster + + address, _ := common.HexToAddress("01cf0e2f2f715450") + location := common.NewAddressLocation(nil, address, "FlowClusterQC") + + return cadence.NewStructType( + location, + "FlowClusterQC.Cluster", + []cadence.Field{ + { + Identifier: "index", + Type: cadence.UInt16Type, + }, + { + Identifier: "nodeWeights", + Type: cadence.NewDictionaryType(cadence.StringType, cadence.UInt64Type), + }, + { + Identifier: "totalWeight", + Type: cadence.UInt64Type, + }, + { + Identifier: "generatedVotes", + Type: cadence.NewDictionaryType(cadence.StringType, newFlowClusterQCVoteStructType()), + }, + { + Identifier: "uniqueVoteMessageTotalWeights", + Type: cadence.NewDictionaryType(cadence.StringType, cadence.UInt64Type), + }, + }, + nil, + ) } -` - -var EpochCommitFixtureJSON = ` -{ - "type": "Event", - "value": { - "id": "A.01cf0e2f2f715450.FlowEpoch.EpochCommitted", - "fields": [ - { - "name": "counter", - "value": { - "type": "UInt64", - "value": "1" - } - }, - { - "name": "clusterQCs", - "value": { - "type": "Array", - "value": [ - { - "type": "Struct", - "value": { - "id": "A.01cf0e2f2f715450.FlowClusterQC.ClusterQC", - "fields": [ - { - "name": "index", - "value": { - "type": "UInt16", - "value": "0" - } - }, - { - "name": "voteSignatures", - "value": { - "type": "Array", - "value": [ - { - "type": "String", - "value": "a39cd1e1bf7e2fb0609b7388ce5215a6a4c01eef2aee86e1a007faa28a6b2a3dc876e11bb97cdb26c3846231d2d01e4d" - }, - { - "type": "String", - "value": "91673ad9c717d396c9a0953617733c128049ac1a639653d4002ab245b121df1939430e313bcbfd06948f6a281f6bf853" - } - ] - } - }, - { - "name": "voteMessage", - "value": { - "type": "String", - "value": "irrelevant_for_these_purposes" - } - }, - { - "name": "voterIDs", - "value": { - "type": "Array", - "value": [ - { - "type": "String", - "value": "0000000000000000000000000000000000000000000000000000000000000001" - }, - { - "type": "String", - "value": "0000000000000000000000000000000000000000000000000000000000000002" - } - ] - } - } - ] - } - }, - { - "type": "Struct", - "value": { - "id": "A.01cf0e2f2f715450.FlowClusterQC.ClusterQC", - "fields": [ - { - "name": "index", - "value": { - "type": "UInt16", - "value": "1" - } - }, - { - "name": "voteSignatures", - "value": { - "type": "Array", - "value": [ - { - "type": "String", - "value": "b2bff159971852ed63e72c37991e62c94822e52d4fdcd7bf29aaf9fb178b1c5b4ce20dd9594e029f3574cb29533b857a" - }, - { - "type": "String", - "value": "9931562f0248c9195758da3de4fb92f24fa734cbc20c0cb80280163560e0e0348f843ac89ecbd3732e335940c1e8dccb" - } - ] - } - }, - { - "name": "voteMessage", - "value": { - "type": "String", - "value": "irrelevant_for_these_purposes" - } - }, - { - "name": "voterIDs", - "value": { - "type": "Array", - "value": [ - { - "type": "String", - "value": "0000000000000000000000000000000000000000000000000000000000000003" - }, - { - "type": "String", - "value": "0000000000000000000000000000000000000000000000000000000000000004" - } - ] - } - } - ] - } - } - ] - } - }, - { - "name": "dkgPubKeys", - "value": { - "type": "Array", - "value": [ - { - "type": "String", - "value": "8c588266db5f5cda629e83f8aa04ae9413593fac19e4865d06d291c9d14fbdd9bdb86a7a12f9ef8590c79cb635e3163315d193087e9336092987150d0cd2b14ac6365f7dc93eec573752108b8c12368abb65f0652d9f644e5aed611c37926950" - }, - { - "type": "String", - "value": "87a339e4e5c74f089da20a33f515d8c8f4464ab53ede5a74aa2432cd1ae66d522da0c122249ee176cd747ddc83ca81090498389384201614caf51eac392c1c0a916dfdcfbbdf7363f9552b6468434add3d3f6dc91a92bbe3ee368b59b7828488" - } - ] - } - } - ] - } -}` - -var VersionBeaconFixtureJSON = `{ - "type": "Event", - "value": { - "id": "A.01cf0e2f2f715450.NodeVersionBeacon.VersionBeacon", - "fields": [ - { - "value": { - "type": "Array", - "value": [ - { - "type": "Struct", - "value": { - "id": "A.01cf0e2f2f715450.NodeVersionBeacon.VersionBoundary", - "fields": [ - { - "name": "blockHeight", - "value": { - "type": "UInt64", - "value": "44" - } - }, - { - "name": "version", - "value": { - "type": "String", - "value": { - "id": "A.01cf0e2f2f715450.NodeVersionBeacon.Semver", - "fields": [ - { - "value": { - "value": "2", - "type": "UInt8" - }, - "name": "major" - }, - { - "value": { - "value": "13", - "type": "UInt8" - }, - "name": "minor" - }, - { - "value": { - "value": "7", - "type": "UInt8" - }, - "name": "patch" - }, - { - "value": { - "value": { - "value": "", - "type": "String" - }, - "type": "Optional" - }, - "name": "preRelease" - } - ] - }, - "type": "Struct" - }, - "name": "version" - } - ] - }, - "type": "Struct" - } - ], - "type": "Array" - }, - "name": "versionBoundaries" - }, - { - "value": { - "value": "5", - "type": "UInt64" - }, - "name": "sequence" - } - ] - }, - "type": "Event" -}` diff --git a/utils/unittest/staker.go b/utils/unittest/staker.go deleted file mode 100644 index 6305bc224a2..00000000000 --- a/utils/unittest/staker.go +++ /dev/null @@ -1,19 +0,0 @@ -package unittest - -import ( - "github.com/onflow/flow-go/model/flow" -) - -type FixedStaker struct { - Staked bool -} - -func NewFixedStaker(initial bool) *FixedStaker { - return &FixedStaker{ - Staked: initial, - } -} - -func (f *FixedStaker) AmIStakedAt(_ flow.Identifier) bool { - return f.Staked -} diff --git a/utils/unittest/unittest.go b/utils/unittest/unittest.go index 459a4db0e16..e74c3ff37a5 100644 --- a/utils/unittest/unittest.go +++ b/utils/unittest/unittest.go @@ -6,14 +6,18 @@ import ( "math/rand" "os" "os/exec" + "path" "regexp" + "strconv" "strings" "sync" "testing" "time" + "github.com/cockroachdb/pebble/v2" "github.com/dgraph-io/badger/v2" - "github.com/rs/zerolog" + "github.com/libp2p/go-libp2p/core/peer" + "github.com/onflow/crypto" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -22,7 +26,7 @@ import ( "github.com/onflow/flow-go/module/util" "github.com/onflow/flow-go/network" cborcodec "github.com/onflow/flow-go/network/codec/cbor" - "github.com/onflow/flow-go/network/slashing" + "github.com/onflow/flow-go/network/p2p/keyutils" "github.com/onflow/flow-go/network/topology" ) @@ -315,6 +319,16 @@ func RunWithTempDir(t testing.TB, f func(string)) { f(dbDir) } +func RunWithTempDirs(t testing.TB, f func(string, string)) { + dbDir := TempDir(t) + dbDir2 := TempDir(t) + defer func() { + require.NoError(t, os.RemoveAll(dbDir)) + require.NoError(t, os.RemoveAll(dbDir2)) + }() + f(dbDir, dbDir2) +} + func badgerDB(t testing.TB, dir string, create func(badger.Options) (*badger.DB, error)) *badger.DB { opts := badger. DefaultOptions(dir). @@ -364,6 +378,82 @@ func TempBadgerDB(t testing.TB) (*badger.DB, string) { return db, dir } +func TempPebbleDB(t testing.TB) (*pebble.DB, string) { + dir := TempDir(t) + return PebbleDB(t, dir), dir +} + +func TempPebblePath(t *testing.T) string { + return path.Join(TempDir(t), "pebble"+strconv.Itoa(rand.Int())+".db") +} + +func TempPebbleDBWithOpts(t testing.TB, opts *pebble.Options) (*pebble.DB, string) { + // create random path string for parallelization + dbpath := path.Join(TempDir(t), "pebble"+strconv.Itoa(rand.Int())+".db") + db, err := pebble.Open(dbpath, opts) + require.NoError(t, err) + return db, dbpath +} + +func RunWithPebbleDB(t testing.TB, f func(*pebble.DB)) { + RunWithTempDir(t, func(dir string) { + db, err := pebble.Open(dir, &pebble.Options{ + FormatMajorVersion: pebble.FormatNewest, + }) + require.NoError(t, err) + defer func() { + assert.NoError(t, db.Close()) + }() + f(db) + }) +} + +func RunWithBadgerDBAndPebbleDB(t testing.TB, f func(*badger.DB, *pebble.DB)) { + RunWithTempDir(t, func(dir string) { + bdb := BadgerDB(t, dir) + defer func() { + assert.NoError(t, bdb.Close()) + }() + + pdb := PebbleDB(t, dir) + defer func() { + assert.NoError(t, pdb.Close()) + }() + + f(bdb, pdb) + }) +} + +func PebbleDB(t testing.TB, dir string) *pebble.DB { + db, err := pebble.Open(dir, &pebble.Options{ + FormatMajorVersion: pebble.FormatNewest, + }) + require.NoError(t, err) + return db +} + +func TypedPebbleDB(t testing.TB, dir string, create func(string, *pebble.Options) (*pebble.DB, error)) *pebble.DB { + db, err := create(dir, &pebble.Options{ + FormatMajorVersion: pebble.FormatNewest, + }) + require.NoError(t, err) + return db +} + +func RunWithTypedPebbleDB( + t testing.TB, + create func(string, *pebble.Options) (*pebble.DB, error), + f func(*pebble.DB)) { + RunWithTempDir(t, func(dir string) { + db, err := create(dir, &pebble.Options{}) + require.NoError(t, err) + defer func() { + assert.NoError(t, db.Close()) + }() + f(db) + }) +} + func Concurrently(n int, f func(int)) { var wg sync.WaitGroup for i := 0; i < n; i++ { @@ -376,9 +466,15 @@ func Concurrently(n int, f func(int)) { wg.Wait() } -// AssertEqualBlocksLenAndOrder asserts that both a segment of blocks have the same len and blocks are in the same order -func AssertEqualBlocksLenAndOrder(t *testing.T, expectedBlocks, actualSegmentBlocks []*flow.Block) { - assert.Equal(t, flow.GetIDs(expectedBlocks), flow.GetIDs(actualSegmentBlocks)) +// AssertEqualBlockSequences is given a sequence of Blocks and a sequence of Proposals. It asserts that +// both sequences are of the same length, and that each proposal is for the block at the same index (via block hash). +// Used as a convenience function for Sealing Segment tests due to differences with nils vs empty slices. +func AssertEqualBlockSequences(t *testing.T, blocks []*flow.Block, proposals []*flow.Proposal) { + assert.Equal(t, len(blocks), len(proposals), "block and proposal sequences have different lengths (%d vs %d)", len(blocks), len(proposals)) + for i, block := range blocks { + proposal := proposals[i] + assert.Equal(t, block.ID(), proposal.Block.ID(), "block and proposal at index %d do not match", i) + } } // NetworkCodec returns cbor codec. @@ -437,7 +533,43 @@ func GenerateRandomStringWithLen(commentLen uint) string { return string(bytes) } -// NetworkSlashingViolationsConsumer returns a slashing violations consumer for network middleware -func NetworkSlashingViolationsConsumer(logger zerolog.Logger, metrics module.NetworkSecurityMetrics) slashing.ViolationsConsumer { - return slashing.NewSlashingViolationsConsumer(logger, metrics) +// PeerIdFixture creates a random and unique peer ID (libp2p node ID). +func PeerIdFixture(tb testing.TB) peer.ID { + peerID, err := peerIDFixture() + require.NoError(tb, err) + return peerID +} + +func peerIDFixture() (peer.ID, error) { + key, err := generateNetworkingKey(IdentifierFixture()) + if err != nil { + return "", err + } + pubKey, err := keyutils.LibP2PPublicKeyFromFlow(key.PublicKey()) + if err != nil { + return "", err + } + + peerID, err := peer.IDFromPublicKey(pubKey) + if err != nil { + return "", err + } + + return peerID, nil +} + +// generateNetworkingKey generates a Flow ECDSA key using the given seed +func generateNetworkingKey(s flow.Identifier) (crypto.PrivateKey, error) { + seed := make([]byte, crypto.KeyGenSeedMinLen) + copy(seed, s[:]) + return crypto.GeneratePrivateKey(crypto.ECDSASecp256k1, seed) +} + +// PeerIdFixtures creates random and unique peer IDs (libp2p node IDs). +func PeerIdFixtures(t *testing.T, n int) []peer.ID { + peerIDs := make([]peer.ID, n) + for i := 0; i < n; i++ { + peerIDs[i] = PeerIdFixture(t) + } + return peerIDs } diff --git a/utils/unittest/unittest_test.go b/utils/unittest/unittest_test.go index 25953644be6..297670e59e0 100644 --- a/utils/unittest/unittest_test.go +++ b/utils/unittest/unittest_test.go @@ -6,7 +6,7 @@ import ( "testing" ) -// TestCrashTest_ErrorMessage tests that CrashTest() can check a function that crashed without any messages. +// TestCrashTest_NoMessage tests that CrashTest() can check a function that crashed without any messages. func TestCrashTest_NoMessage(t *testing.T) { f := func(t *testing.T) { crash_NoMessage() diff --git a/network/internal/testutils/updatable_provider.go b/utils/unittest/updatable_provider.go similarity index 95% rename from network/internal/testutils/updatable_provider.go rename to utils/unittest/updatable_provider.go index 014ae696b99..a2b4c5d3dc1 100644 --- a/network/internal/testutils/updatable_provider.go +++ b/utils/unittest/updatable_provider.go @@ -1,4 +1,4 @@ -package testutils +package unittest import ( "sync" @@ -29,7 +29,7 @@ func (p *UpdatableIDProvider) SetIdentities(identities flow.IdentityList) { p.identities = identities } -func (p *UpdatableIDProvider) Identities(filter flow.IdentityFilter) flow.IdentityList { +func (p *UpdatableIDProvider) Identities(filter flow.IdentityFilter[flow.Identity]) flow.IdentityList { p.mu.RLock() defer p.mu.RUnlock() return p.identities.Filter(filter) @@ -46,7 +46,7 @@ func (p *UpdatableIDProvider) ByNodeID(flowID flow.Identifier) (*flow.Identity, defer p.mu.RUnlock() for _, v := range p.identities { - if v.ID() == flowID { + if v.NodeID == flowID { return v, true } } diff --git a/utils/unittest/util.go b/utils/unittest/util.go new file mode 100644 index 00000000000..b9f3b48fcf9 --- /dev/null +++ b/utils/unittest/util.go @@ -0,0 +1,27 @@ +package unittest + +import ( + "github.com/onflow/flow-go/model/flow" +) + +// MockEntity is a simple struct used for testing mempools constrained to Identifier-typed keys. +// The Identifier field is the key used to store the entity in the mempool. +// The Nonce field can be used to simulate modifying the entity value stored in the mempool, +// for example via `Adjust`/`AdjustWithInit` methods. +// This allows for controlled testing of how mempools handle entity storage and updates. +type MockEntity struct { + Identifier flow.Identifier + Nonce uint64 +} + +func EntityListFixture(n uint) []*MockEntity { + list := make([]*MockEntity, 0, n) + for range n { + list = append(list, MockEntityFixture()) + } + return list +} + +func MockEntityFixture() *MockEntity { + return &MockEntity{Identifier: IdentifierFixture()} +} diff --git a/utils/unittest/utils.go b/utils/unittest/utils.go new file mode 100644 index 00000000000..e76b7e63f30 --- /dev/null +++ b/utils/unittest/utils.go @@ -0,0 +1,53 @@ +package unittest + +import ( + "encoding/json" + "testing" + + "github.com/onflow/cadence" + jsoncdc "github.com/onflow/cadence/encoding/json" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +// VerifyCdcArguments verifies that the actual slice of Go values match the expected set of Cadence values. +func VerifyCdcArguments(t *testing.T, expected []cadence.Value, actual []interface{}) { + for index, arg := range actual { + // marshal to bytes + bz, err := json.Marshal(arg) + require.NoError(t, err) + + // parse cadence value + decoded, err := jsoncdc.Decode(nil, bz) + require.NoError(t, err) + + assert.Equal(t, expected[index], decoded) + } +} + +// InterfaceToCdcValues decodes jsoncdc encoded values from interface -> cadence value. +func InterfaceToCdcValues(t *testing.T, vals []interface{}) []cadence.Value { + decoded := make([]cadence.Value, len(vals)) + for index, val := range vals { + // marshal to bytes + bz, err := json.Marshal(val) + require.NoError(t, err) + + // parse cadence value + cdcVal, err := jsoncdc.Decode(nil, bz) + require.NoError(t, err) + + decoded[index] = cdcVal + } + + return decoded +} + +// BytesToCdcUInt8 converts a Go []byte to a Cadence []UInt8 with equal content. +func BytesToCdcUInt8(data []byte) []cadence.Value { + ret := make([]cadence.Value, len(data)) + for i, v := range data { + ret[i] = cadence.UInt8(v) + } + return ret +} diff --git a/utils/unittest/version_beacon.go b/utils/unittest/version_beacon.go new file mode 100644 index 00000000000..caa3174ce36 --- /dev/null +++ b/utils/unittest/version_beacon.go @@ -0,0 +1,69 @@ +package unittest + +import ( + "context" + "testing" + + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/state/protocol" +) + +// AddVersionBeacon adds blocks sequence with given VersionBeacon so this +// service events takes effect in Flow protocol. +// This means execution result where event was emitted is sealed, and the seal is +// finalized by a valid block. +// This assumes state is bootstrapped with a root block, as it does NOT produce +// results for final block of the state +// Root <- A <- B(result(A(VB))) <- C(seal(B)) +func AddVersionBeacon(t *testing.T, beacon *flow.VersionBeacon, state protocol.FollowerState) { + final, err := state.Final().Head() + require.NoError(t, err) + + protocolState, err := state.Final().ProtocolState() + require.NoError(t, err) + protocolStateID := protocolState.ID() + + A := BlockWithParentAndPayload( + final, + PayloadFixture(WithProtocolStateID(protocolStateID)), + ) + addToState(t, state, A, true) + + receiptA := ReceiptForBlockFixture(A) + receiptA.ExecutionResult.ServiceEvents = []flow.ServiceEvent{beacon.ServiceEvent()} + + B := BlockWithParentAndPayload( + A.ToHeader(), + flow.Payload{ + Receipts: []*flow.ExecutionReceiptStub{receiptA.Stub()}, + Results: []*flow.ExecutionResult{&receiptA.ExecutionResult}, + ProtocolStateID: protocolStateID, + }, + ) + addToState(t, state, B, true) + + sealsForB := []*flow.Seal{ + Seal.Fixture(Seal.WithResult(&receiptA.ExecutionResult)), + } + + C := BlockWithParentAndPayload( + B.ToHeader(), + flow.Payload{ + Seals: sealsForB, + ProtocolStateID: protocolStateID, + }, + ) + addToState(t, state, C, true) +} + +func addToState(t *testing.T, state protocol.FollowerState, block *flow.Block, finalize bool) { + err := state.ExtendCertified(context.Background(), NewCertifiedBlock(block)) + require.NoError(t, err) + + if finalize { + err = state.Finalize(context.Background(), block.ID()) + require.NoError(t, err) + } +}